summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/radeon/radeon_fb.c
blob: 9cdf6a35bc2c3f4efbd5daab2a9506078308aa31 (plain) (blame)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
/*
 * Copyright © 2007 David Airlie
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the "Software"),
 * to deal in the Software without restriction, including without limitation
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
 * and/or sell copies of the Software, and to permit persons to whom the
 * Software is furnished to do so, subject to the following conditions:
 *
 * The above copyright notice and this permission notice (including the next
 * paragraph) shall be included in all copies or substantial portions of the
 * Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
 * DEALINGS IN THE SOFTWARE.
 *
 * Authors:
 *     David Airlie
 */
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/fb.h>

#include "drmP.h"
#include "drm.h"
#include "drm_crtc.h"
#include "drm_crtc_helper.h"
#include "radeon_drm.h"
#include "radeon.h"

#include "drm_fb_helper.h"

#include <linux/vga_switcheroo.h>

/* object hierarchy -
   this contains a helper + a radeon fb
   the helper contains a pointer to radeon framebuffer baseclass.
*/
struct radeon_fbdev {
	struct drm_fb_helper helper;
	struct radeon_framebuffer rfb;
	struct list_head fbdev_list;
	struct radeon_device *rdev;
};

static struct fb_ops radeonfb_ops = {
	.owner = THIS_MODULE,
	.fb_check_var = drm_fb_helper_check_var,
	.fb_set_par = drm_fb_helper_set_par,
	.fb_fillrect = cfb_fillrect,
	.fb_copyarea = cfb_copyarea,
	.fb_imageblit = cfb_imageblit,
	.fb_pan_display = drm_fb_helper_pan_display,
	.fb_blank = drm_fb_helper_blank,
	.fb_setcmap = drm_fb_helper_setcmap,
};


static int radeon_align_pitch(struct radeon_device *rdev, int width, int bpp, bool tiled)
{
	int aligned = width;
	int align_large = (ASIC_IS_AVIVO(rdev)) || tiled;
	int pitch_mask = 0;

	switch (bpp / 8) {
	case 1:
		pitch_mask = align_large ? 255 : 127;
		break;
	case 2:
		pitch_mask = align_large ? 127 : 31;
		break;
	case 3:
	case 4:
		pitch_mask = align_large ? 63 : 15;
		break;
	}

	aligned += pitch_mask;
	aligned &= ~pitch_mask;
	return aligned;
}

static void radeonfb_destroy_pinned_object(struct drm_gem_object *gobj)
{
	struct radeon_bo *rbo = gobj->driver_private;
	int ret;

	ret = radeon_bo_reserve(rbo, false);
	if (likely(ret == 0)) {
		radeon_bo_kunmap(rbo);
		radeon_bo_unpin(rbo);
		radeon_bo_unreserve(rbo);
	}
	drm_gem_object_handle_unreference(gobj);
	drm_gem_object_unreference_unlocked(gobj);
}

static int radeonfb_create_pinned_object(struct radeon_fbdev *rfbdev,
					 struct drm_mode_fb_cmd *mode_cmd,
					 struct drm_gem_object **gobj_p)
{
	struct radeon_device *rdev = rfbdev->rdev;
	struct drm_gem_object *gobj = NULL;
	struct radeon_bo *rbo = NULL;
	bool fb_tiled = false; /* useful for testing */
	u32 tiling_flags = 0;
	int ret;
	int aligned_size, size;

	/* need to align pitch with crtc limits */
	mode_cmd->pitch = radeon_align_pitch(rdev, mode_cmd->width, mode_cmd->bpp, fb_tiled) * ((mode_cmd->bpp + 1) / 8);

	size = mode_cmd->pitch * mode_cmd->height;
	aligned_size = ALIGN(size, PAGE_SIZE);
	ret = radeon_gem_object_create(rdev, aligned_size, 0,
				       RADEON_GEM_DOMAIN_VRAM,
				       false, true,
				       &gobj);
	if (ret) {
		printk(KERN_ERR "failed to allocate framebuffer (%d)\n",
		       aligned_size);
		return -ENOMEM;
	}
	rbo = gobj->driver_private;

	if (fb_tiled)
		tiling_flags = RADEON_TILING_MACRO;

#ifdef __BIG_ENDIAN
	switch (mode_cmd->bpp) {
	case 32:
		tiling_flags |= RADEON_TILING_SWAP_32BIT;
		break;
	case 16:
		tiling_flags |= RADEON_TILING_SWAP_16BIT;
	default:
		break;
	}
#endif

	if (tiling_flags) {
		ret = radeon_bo_set_tiling_flags(rbo,
						 tiling_flags | RADEON_TILING_SURFACE,
						 mode_cmd->pitch);
		if (ret)
			dev_err(rdev->dev, "FB failed to set tiling flags\n");
	}


	ret = radeon_bo_reserve(rbo, false);
	if (unlikely(ret != 0))
		goto out_unref;
	ret = radeon_bo_pin(rbo, RADEON_GEM_DOMAIN_VRAM, NULL);
	if (ret) {
		radeon_bo_unreserve(rbo);
		goto out_unref;
	}
	if (fb_tiled)
		radeon_bo_check_tiling(rbo, 0, 0);
	ret = radeon_bo_kmap(rbo, NULL);
	radeon_bo_unreserve(rbo);
	if (ret) {
		goto out_unref;
	}

	*gobj_p = gobj;
	return 0;
out_unref:
	radeonfb_destroy_pinned_object(gobj);
	*gobj_p = NULL;
	return ret;
}

static int radeonfb_create(struct radeon_fbdev *rfbdev,
			   struct drm_fb_helper_surface_size *sizes)
{
	struct radeon_device *rdev = rfbdev->rdev;
	struct fb_info *info;
	struct drm_framebuffer *fb = NULL;
	struct drm_mode_fb_cmd mode_cmd;
	struct drm_gem_object *gobj = NULL;
	struct radeon_bo *rbo = NULL;
	struct device *device = &rdev->pdev->dev;
	int ret;
	unsigned long tmp;

	mode_cmd.width = sizes->surface_width;
	mode_cmd.height = sizes->surface_height;

	/* avivo can't scanout real 24bpp */
	if ((sizes->surface_bpp == 24) && ASIC_IS_AVIVO(rdev))
		sizes->surface_bpp = 32;

	mode_cmd.bpp = sizes->surface_bpp;
	mode_cmd.depth = sizes->surface_depth;

	ret = radeonfb_create_pinned_object(rfbdev, &mode_cmd, &gobj);
	rbo = gobj->driver_private;

	/* okay we have an object now allocate the framebuffer */
	info = framebuffer_alloc(0, device);
	if (info == NULL) {
		ret = -ENOMEM;
		goto out_unref;
	}

	info->par = rfbdev;

	radeon_framebuffer_init(rdev->ddev, &rfbdev->rfb, &mode_cmd, gobj);

	fb = &rfbdev->rfb.base;

	/* setup helper */
	rfbdev->helper.fb = fb;
	rfbdev->helper.fbdev = info;

	memset_io(rbo->kptr, 0x0, radeon_bo_size(rbo));

	strcpy(info->fix.id, "radeondrmfb");

	drm_fb_helper_fill_fix(info, fb->pitch, fb->depth);

	info->flags = FBINFO_DEFAULT | FBINFO_CAN_FORCE_OUTPUT;
	info->fbops = &radeonfb_ops;

	tmp = radeon_bo_gpu_offset(rbo) - rdev->mc.vram_start;
	info->fix.smem_start = rdev->mc.aper_base + tmp;
	info->fix.smem_len = radeon_bo_size(rbo);
	info->screen_base = rbo->kptr;
	info->screen_size = radeon_bo_size(rbo);

	drm_fb_helper_fill_var(info, &rfbdev->helper, sizes->fb_width, sizes->fb_height);

	/* setup aperture base/size for vesafb takeover */
	info->apertures = alloc_apertures(1);
	if (!info->apertures) {
		ret = -ENOMEM;
		goto out_unref;
	}
	info->apertures->ranges[0].base = rdev->ddev->mode_config.fb_base;
	info->apertures->ranges[0].size = rdev->mc.real_vram_size;

	info->fix.mmio_start = 0;
	info->fix.mmio_len = 0;
	info->pixmap.size = 64*1024;
	info->pixmap.buf_align = 8;
	info->pixmap.access_align = 32;
	info->pixmap.flags = FB_PIXMAP_SYSTEM;
	info->pixmap.scan_align = 1;

	if (info->screen_base == NULL) {
		ret = -ENOSPC;
		goto out_unref;
	}

	ret = fb_alloc_cmap(&info->cmap, 256, 0);
	if (ret) {
		ret = -ENOMEM;
		goto out_unref;
	}

	DRM_INFO("fb mappable at 0x%lX\n",  info->fix.smem_start);
	DRM_INFO("vram apper at 0x%lX\n",  (unsigned long)rdev->mc.aper_base);
	DRM_INFO("size %lu\n", (unsigned long)radeon_bo_size(rbo));
	DRM_INFO("fb depth is %d\n", fb->depth);
	DRM_INFO("   pitch is %d\n", fb->pitch);

	vga_switcheroo_client_fb_set(rdev->ddev->pdev, info);
	return 0;

out_unref:
	if (rbo) {

	}
	if (fb && ret) {
		drm_gem_object_unreference(gobj);
		drm_framebuffer_cleanup(fb);
		kfree(fb);
	}
	return ret;
}

static int radeon_fb_find_or_create_single(struct drm_fb_helper *helper,
					   struct drm_fb_helper_surface_size *sizes)
{
	struct radeon_fbdev *rfbdev = (struct radeon_fbdev *)helper;
	int new_fb = 0;
	int ret;

	if (!helper->fb) {
		ret = radeonfb_create(rfbdev, sizes);
		if (ret)
			return ret;
		new_fb = 1;
	}
	return new_fb;
}

static char *mode_option;
int radeon_parse_options(char *options)
{
	char *this_opt;

	if (!options || !*options)
		return 0;

	while ((this_opt = strsep(&options, ",")) != NULL) {
		if (!*this_opt)
			continue;
		mode_option = this_opt;
	}
	return 0;
}

void radeon_fb_output_poll_changed(struct radeon_device *rdev)
{
	drm_fb_helper_hotplug_event(&rdev->mode_info.rfbdev->helper);
}

static int radeon_fbdev_destroy(struct drm_device *dev, struct radeon_fbdev *rfbdev)
{
	struct fb_info *info;
	struct radeon_framebuffer *rfb = &rfbdev->rfb;

	if (rfbdev->helper.fbdev) {
		info = rfbdev->helper.fbdev;

		unregister_framebuffer(info);
		if (info->cmap.len)
			fb_dealloc_cmap(&info->cmap);
		framebuffer_release(info);
	}

	if (rfb->obj) {
		radeonfb_destroy_pinned_object(rfb->obj);
		rfb->obj = NULL;
	}
	drm_fb_helper_fini(&rfbdev->helper);
	drm_framebuffer_cleanup(&rfb->base);

	return 0;
}

static struct drm_fb_helper_funcs radeon_fb_helper_funcs = {
	.gamma_set = radeon_crtc_fb_gamma_set,
	.gamma_get = radeon_crtc_fb_gamma_get,
	.fb_probe = radeon_fb_find_or_create_single,
};

int radeon_fbdev_init(struct radeon_device *rdev)
{
	struct radeon_fbdev *rfbdev;
	int bpp_sel = 32;
	int ret;

	/* select 8 bpp console on RN50 or 16MB cards */
	if (ASIC_IS_RN50(rdev) || rdev->mc.real_vram_size <= (32*1024*1024))
		bpp_sel = 8;

	rfbdev = kzalloc(sizeof(struct radeon_fbdev), GFP_KERNEL);
	if (!rfbdev)
		return -ENOMEM;

	rfbdev->rdev = rdev;
	rdev->mode_info.rfbdev = rfbdev;
	rfbdev->helper.funcs = &radeon_fb_helper_funcs;

	ret = drm_fb_helper_init(rdev->ddev, &rfbdev->helper,
				 rdev->num_crtc,
				 RADEONFB_CONN_LIMIT);
	if (ret) {
		kfree(rfbdev);
		return ret;
	}

	drm_fb_helper_single_add_all_connectors(&rfbdev->helper);
	drm_fb_helper_initial_config(&rfbdev->helper, bpp_sel);
	return 0;
}

void radeon_fbdev_fini(struct radeon_device *rdev)
{
	if (!rdev->mode_info.rfbdev)
		return;

	radeon_fbdev_destroy(rdev->ddev, rdev->mode_info.rfbdev);
	kfree(rdev->mode_info.rfbdev);
	rdev->mode_info.rfbdev = NULL;
}

void radeon_fbdev_set_suspend(struct radeon_device *rdev, int state)
{
	fb_set_suspend(rdev->mode_info.rfbdev->helper.fbdev, state);
}

int radeon_fbdev_total_size(struct radeon_device *rdev)
{
	struct radeon_bo *robj;
	int size = 0;

	robj = rdev->mode_info.rfbdev->rfb.obj->driver_private;
	size += radeon_bo_size(robj);
	return size;
}

bool radeon_fbdev_robj_is_fb(struct radeon_device *rdev, struct radeon_bo *robj)
{
	if (robj == rdev->mode_info.rfbdev->rfb.obj->driver_private)
		return true;
	return false;
}
-rw-r--r--arch/arm/configs/at91rm9200ek_defconfig73
-rw-r--r--arch/arm/configs/ateb9200_defconfig131
-rw-r--r--arch/arm/configs/carmeva_defconfig47
-rw-r--r--arch/arm/configs/cpuat91_defconfig112
-rw-r--r--arch/arm/configs/csb337_defconfig104
-rw-r--r--arch/arm/configs/csb637_defconfig98
-rw-r--r--arch/arm/configs/da8xx_omapl_defconfig3
-rw-r--r--arch/arm/configs/ecbat91_defconfig99
-rw-r--r--arch/arm/configs/kafa_defconfig61
-rw-r--r--arch/arm/configs/kb9202_defconfig127
-rw-r--r--arch/arm/configs/mx51_defconfig1
-rw-r--r--arch/arm/configs/n8x0_defconfig94
-rw-r--r--arch/arm/configs/omap2plus_defconfig (renamed from arch/arm/configs/omap3_defconfig)51
-rw-r--r--arch/arm/configs/omap_4430sdp_defconfig125
-rw-r--r--arch/arm/configs/omap_generic_2420_defconfig37
-rw-r--r--arch/arm/configs/onearm_defconfig80
-rw-r--r--arch/arm/configs/pcontrol_g20_defconfig175
-rw-r--r--arch/arm/configs/picotux200_defconfig242
-rw-r--r--arch/arm/configs/yl9200_defconfig137
-rw-r--r--arch/arm/include/asm/assembler.h2
-rw-r--r--arch/arm/include/asm/hardware/cache-l2x0.h13
-rw-r--r--arch/arm/include/asm/hardware/it8152.h2
-rw-r--r--arch/arm/include/asm/highmem.h6
-rw-r--r--arch/arm/include/asm/kgdb.h5
-rw-r--r--arch/arm/include/asm/memblock.h7
-rw-r--r--arch/arm/include/asm/mmu.h4
-rw-r--r--arch/arm/include/asm/outercache.h24
-rw-r--r--arch/arm/include/asm/pgtable.h17
-rw-r--r--arch/arm/kernel/entry-armv.S2
-rw-r--r--arch/arm/kernel/head.S7
-rw-r--r--arch/arm/kernel/hw_breakpoint.c3
-rw-r--r--arch/arm/kernel/kgdb.c2
-rw-r--r--arch/arm/kernel/machine_kexec.c3
-rw-r--r--arch/arm/kernel/perf_event.c2
-rw-r--r--arch/arm/kernel/ptrace.c28
-rw-r--r--arch/arm/kernel/relocate_kernel.S2
-rw-r--r--arch/arm/kernel/stacktrace.c2
-rw-r--r--arch/arm/kernel/traps.c5
-rw-r--r--arch/arm/kernel/unwind.c2
-rw-r--r--arch/arm/kernel/vmlinux.lds.S1
-rw-r--r--arch/arm/lib/findbit.S6
-rw-r--r--arch/arm/mach-aaec2000/include/mach/vmalloc.h2
-rw-r--r--arch/arm/mach-at91/Kconfig6
-rw-r--r--arch/arm/mach-at91/Makefile17
-rw-r--r--arch/arm/mach-at91/at91rm9200_devices.c45
-rw-r--r--arch/arm/mach-at91/at91sam9260.c7
-rw-r--r--arch/arm/mach-at91/at91sam9261.c7
-rw-r--r--arch/arm/mach-at91/at91sam9263.c7
-rw-r--r--arch/arm/mach-at91/at91sam9_alt_reset.S48
-rw-r--r--arch/arm/mach-at91/at91sam9g45_devices.c165
-rw-r--r--arch/arm/mach-at91/at91sam9rl.c7
-rw-r--r--arch/arm/mach-at91/board-1arm.c26
-rw-r--r--arch/arm/mach-at91/board-kafa.c21
-rw-r--r--arch/arm/mach-at91/board-pcontrol-g20.c322
-rw-r--r--arch/arm/mach-at91/board-picotux200.c53
-rw-r--r--arch/arm/mach-at91/board-rm9200dk.c (renamed from arch/arm/mach-at91/board-dk.c)4
-rw-r--r--arch/arm/mach-at91/board-rm9200ek.c (renamed from arch/arm/mach-at91/board-ek.c)4
-rw-r--r--arch/arm/mach-at91/board-sam9m10g45ek.c24
-rw-r--r--arch/arm/mach-at91/board-yl-9200.c2
-rw-r--r--arch/arm/mach-at91/generic.h3
-rw-r--r--arch/arm/mach-at91/include/mach/board.h6
-rw-r--r--arch/arm/mach-at91/pm.c15
-rw-r--r--arch/arm/mach-at91/pm.h5
-rw-r--r--arch/arm/mach-at91/pm_slowclock.S1
-rw-r--r--arch/arm/mach-bcmring/include/mach/vmalloc.h2
-rw-r--r--arch/arm/mach-clps711x/include/mach/vmalloc.h2
-rw-r--r--arch/arm/mach-cns3xxx/pcie.c2
-rw-r--r--arch/arm/mach-davinci/Kconfig76
-rw-r--r--arch/arm/mach-davinci/Makefile4
-rw-r--r--arch/arm/mach-davinci/aemif.c133
-rw-r--r--arch/arm/mach-davinci/board-da830-evm.c24
-rw-r--r--arch/arm/mach-davinci/board-da850-evm.c92
-rw-r--r--arch/arm/mach-davinci/board-dm365-evm.c11
-rw-r--r--arch/arm/mach-davinci/board-dm644x-evm.c20
-rw-r--r--arch/arm/mach-davinci/board-dm646x-evm.c21
-rw-r--r--arch/arm/mach-davinci/board-mityomapl138.c422
-rw-r--r--arch/arm/mach-davinci/board-neuros-osd2.c7
-rw-r--r--arch/arm/mach-davinci/board-omapl138-hawk.c62
-rw-r--r--arch/arm/mach-davinci/board-sffsdr.c7
-rw-r--r--arch/arm/mach-davinci/board-tnetv107x-evm.c56
-rw-r--r--arch/arm/mach-davinci/clock.c75
-rw-r--r--arch/arm/mach-davinci/clock.h5
-rw-r--r--arch/arm/mach-davinci/cpufreq.c28
-rw-r--r--arch/arm/mach-davinci/da850.c76
-rw-r--r--arch/arm/mach-davinci/devices-da8xx.c70
-rw-r--r--arch/arm/mach-davinci/devices-tnetv107x.c50
-rw-r--r--arch/arm/mach-davinci/devices.c15
-rw-r--r--arch/arm/mach-davinci/dm355.c6
-rw-r--r--arch/arm/mach-davinci/dm365.c29
-rw-r--r--arch/arm/mach-davinci/dm644x.c27
-rw-r--r--arch/arm/mach-davinci/dm646x.c22
-rw-r--r--arch/arm/mach-davinci/dma.c8
-rw-r--r--arch/arm/mach-davinci/include/mach/aemif.h36
-rw-r--r--arch/arm/mach-davinci/include/mach/da8xx.h7
-rw-r--r--arch/arm/mach-davinci/include/mach/dm365.h2
-rw-r--r--arch/arm/mach-davinci/include/mach/dm644x.h2
-rw-r--r--arch/arm/mach-davinci/include/mach/dm646x.h2
-rw-r--r--arch/arm/mach-davinci/include/mach/nand.h6
-rw-r--r--arch/arm/mach-davinci/include/mach/psc.h1
-rw-r--r--arch/arm/mach-davinci/include/mach/tnetv107x.h3
-rw-r--r--arch/arm/mach-davinci/include/mach/uncompress.h2
-rw-r--r--arch/arm/mach-davinci/tnetv107x.c11
-rw-r--r--arch/arm/mach-ebsa110/include/mach/vmalloc.h2
-rw-r--r--arch/arm/mach-ep93xx/clock.c3
-rw-r--r--arch/arm/mach-ep93xx/core.c40
-rw-r--r--arch/arm/mach-ep93xx/include/mach/dma.h111
-rw-r--r--arch/arm/mach-ep93xx/include/mach/ep93xx-regs.h1
-rw-r--r--arch/arm/mach-ep93xx/include/mach/platform.h1
-rw-r--r--arch/arm/mach-ep93xx/simone.c1
-rw-r--r--arch/arm/mach-footbridge/include/mach/vmalloc.h2
-rw-r--r--arch/arm/mach-h720x/include/mach/vmalloc.h2
-rw-r--r--arch/arm/mach-imx/eukrea_mbimx27-baseboard.c6
-rw-r--r--arch/arm/mach-imx/include/mach/dma-v1.h8
-rw-r--r--arch/arm/mach-imx/mach-mx27_3ds.c38
-rw-r--r--arch/arm/mach-integrator/include/mach/vmalloc.h2
-rw-r--r--arch/arm/mach-ixp2000/core.c2
-rw-r--r--arch/arm/mach-kirkwood/common.c13
-rw-r--r--arch/arm/mach-kirkwood/d2net_v2-setup.c2
-rw-r--r--arch/arm/mach-kirkwood/lacie_v2-common.c14
-rw-r--r--arch/arm/mach-kirkwood/lacie_v2-common.h2
-rw-r--r--arch/arm/mach-kirkwood/mpp.c4
-rw-r--r--arch/arm/mach-kirkwood/netspace_v2-setup.c49
-rw-r--r--arch/arm/mach-kirkwood/netxbig_v2-setup.c4
-rw-r--r--arch/arm/mach-kirkwood/ts41x-setup.c14
-rw-r--r--arch/arm/mach-mmp/include/mach/cputype.h3
-rw-r--r--arch/arm/mach-msm/Kconfig9
-rw-r--r--arch/arm/mach-msm/board-halibut.c1
-rw-r--r--arch/arm/mach-msm/include/mach/debug-macro.S15
-rw-r--r--arch/arm/mach-msm/include/mach/vmalloc.h2
-rw-r--r--arch/arm/mach-msm/iommu_dev.c22
-rw-r--r--arch/arm/mach-msm/timer.c2
-rw-r--r--arch/arm/mach-mv78xx0/mpp.c4
-rw-r--r--arch/arm/mach-mx25/Kconfig1
-rw-r--r--arch/arm/mach-mx25/devices-imx25.h4
-rw-r--r--arch/arm/mach-mx25/mach-mx25_3ds.c10
-rw-r--r--arch/arm/mach-mx3/Kconfig2
-rw-r--r--arch/arm/mach-mx3/devices.c16
-rw-r--r--arch/arm/mach-mx3/mach-mx31_3ds.c38
-rw-r--r--arch/arm/mach-mx3/mach-mx35_3ds.c16
-rw-r--r--arch/arm/mach-mx3/mach-pcm037.c2
-rw-r--r--arch/arm/mach-mx3/mach-pcm037_eet.c5
-rw-r--r--arch/arm/mach-mx3/mx31moboard-marxbot.c1
-rw-r--r--arch/arm/mach-mx3/mx31moboard-smartbot.c1
-rw-r--r--arch/arm/mach-mx5/Kconfig2
-rw-r--r--arch/arm/mach-mx5/Makefile1
-rw-r--r--arch/arm/mach-mx5/board-mx51_babbage.c49
-rw-r--r--arch/arm/mach-mx5/clock-mx51.c22
-rw-r--r--arch/arm/mach-mx5/cpu_op-mx51.c29
-rw-r--r--arch/arm/mach-mx5/cpu_op-mx51.h14
-rw-r--r--arch/arm/mach-mx5/devices-imx51.h2
-rw-r--r--arch/arm/mach-netx/include/mach/vmalloc.h2
-rw-r--r--arch/arm/mach-omap1/Makefile2
-rw-r--r--arch/arm/mach-omap1/board-ams-delta.c69
-rw-r--r--arch/arm/mach-omap1/board-h2-mmc.c3
-rw-r--r--arch/arm/mach-omap1/board-h3-mmc.c3
-rw-r--r--arch/arm/mach-omap1/board-htcherald.c321
-rw-r--r--arch/arm/mach-omap1/board-sx1-mmc.c3
-rw-r--r--arch/arm/mach-omap1/devices.c95
-rw-r--r--arch/arm/mach-omap1/include/mach/camera.h13
-rw-r--r--arch/arm/mach-omap1/include/mach/vmalloc.h2
-rw-r--r--arch/arm/mach-omap1/pm_bus.c98
-rw-r--r--arch/arm/mach-omap2/Kconfig54
-rw-r--r--arch/arm/mach-omap2/Makefile32
-rw-r--r--arch/arm/mach-omap2/board-2430sdp.c3
-rw-r--r--arch/arm/mach-omap2/board-3430sdp.c12
-rw-r--r--arch/arm/mach-omap2/board-3630sdp.c3
-rw-r--r--arch/arm/mach-omap2/board-4430sdp.c94
-rw-r--r--arch/arm/mach-omap2/board-am3517evm.c38
-rw-r--r--arch/arm/mach-omap2/board-apollon.c2
-rw-r--r--arch/arm/mach-omap2/board-cm-t35.c9
-rw-r--r--arch/arm/mach-omap2/board-cm-t3517.c292
-rw-r--r--arch/arm/mach-omap2/board-devkit8000.c10
-rw-r--r--arch/arm/mach-omap2/board-flash.c3
-rw-r--r--arch/arm/mach-omap2/board-flash.h (renamed from arch/arm/mach-omap2/include/mach/board-flash.h)2
-rw-r--r--arch/arm/mach-omap2/board-generic.c16
-rw-r--r--arch/arm/mach-omap2/board-h4.c2
-rw-r--r--arch/arm/mach-omap2/board-igep0020.c369
-rw-r--r--arch/arm/mach-omap2/board-igep0030.c400
-rw-r--r--arch/arm/mach-omap2/board-ldp.c8
-rw-r--r--arch/arm/mach-omap2/board-n8x0.c65
-rw-r--r--arch/arm/mach-omap2/board-omap3beagle.c106
-rw-r--r--arch/arm/mach-omap2/board-omap3evm.c9
-rw-r--r--arch/arm/mach-omap2/board-omap3logic.c241
-rw-r--r--arch/arm/mach-omap2/board-omap3pandora.c53
-rw-r--r--arch/arm/mach-omap2/board-omap3stalker.c9
-rw-r--r--arch/arm/mach-omap2/board-omap3touchbook.c7
-rw-r--r--arch/arm/mach-omap2/board-omap4panda.c124
-rw-r--r--arch/arm/mach-omap2/board-overo.c5
-rw-r--r--arch/arm/mach-omap2/board-rx51-peripherals.c31
-rw-r--r--arch/arm/mach-omap2/board-rx51-sdram.c2
-rw-r--r--arch/arm/mach-omap2/board-rx51-video.c2
-rw-r--r--arch/arm/mach-omap2/board-zoom-debugboard.c2
-rw-r--r--arch/arm/mach-omap2/board-zoom-peripherals.c20
-rw-r--r--arch/arm/mach-omap2/board-zoom2.c37
-rw-r--r--arch/arm/mach-omap2/board-zoom3.c1
-rw-r--r--arch/arm/mach-omap2/clock.c2
-rw-r--r--arch/arm/mach-omap2/clock2420_data.c40
-rw-r--r--arch/arm/mach-omap2/clock2430_data.c64
-rw-r--r--arch/arm/mach-omap2/clock3xxx_data.c34
-rw-r--r--arch/arm/mach-omap2/clock44xx_data.c1312
-rw-r--r--arch/arm/mach-omap2/clockdomain.c110
-rw-r--r--arch/arm/mach-omap2/cm-regbits-34xx.h2
-rw-r--r--arch/arm/mach-omap2/cm-regbits-44xx.h1287
-rw-r--r--arch/arm/mach-omap2/cm44xx.h90
-rw-r--r--arch/arm/mach-omap2/cm4xxx.c9
-rw-r--r--arch/arm/mach-omap2/common.c135
-rw-r--r--arch/arm/mach-omap2/control.c33
-rw-r--r--arch/arm/mach-omap2/control.h (renamed from arch/arm/plat-omap/include/plat/control.h)49
-rw-r--r--arch/arm/mach-omap2/cpuidle34xx.c58
-rw-r--r--arch/arm/mach-omap2/devices.c170
-rw-r--r--arch/arm/mach-omap2/dsp.c85
-rw-r--r--arch/arm/mach-omap2/gpmc-smsc911x.c113
-rw-r--r--arch/arm/mach-omap2/hsmmc.c92
-rw-r--r--arch/arm/mach-omap2/hsmmc.h3
-rw-r--r--arch/arm/mach-omap2/id.c115
-rw-r--r--arch/arm/mach-omap2/include/mach/board-rx51.h11
-rw-r--r--arch/arm/mach-omap2/include/mach/board-zoom.h6
-rw-r--r--arch/arm/mach-omap2/include/mach/ctrl_module_core_44xx.h391
-rw-r--r--arch/arm/mach-omap2/include/mach/ctrl_module_pad_core_44xx.h1409
-rw-r--r--arch/arm/mach-omap2/include/mach/ctrl_module_pad_wkup_44xx.h236
-rw-r--r--arch/arm/mach-omap2/include/mach/ctrl_module_wkup_44xx.h92
-rw-r--r--arch/arm/mach-omap2/include/mach/vmalloc.h2
-rw-r--r--arch/arm/mach-omap2/io.c8
-rw-r--r--arch/arm/mach-omap2/io.h7
-rw-r--r--arch/arm/mach-omap2/irq.c1
-rw-r--r--arch/arm/mach-omap2/mailbox.c8
-rw-r--r--arch/arm/mach-omap2/mcbsp.c105
-rw-r--r--arch/arm/mach-omap2/mux.c23
-rw-r--r--arch/arm/mach-omap2/mux.h2
-rw-r--r--arch/arm/mach-omap2/mux2420.c2
-rw-r--r--arch/arm/mach-omap2/mux2430.c2
-rw-r--r--arch/arm/mach-omap2/mux34xx.c12
-rw-r--r--arch/arm/mach-omap2/omap4-common.c23
-rw-r--r--arch/arm/mach-omap2/omap_hwmod.c599
-rw-r--r--arch/arm/mach-omap2/omap_hwmod_2420_data.c257
-rw-r--r--arch/arm/mach-omap2/omap_hwmod_2430_data.c257
-rw-r--r--arch/arm/mach-omap2/omap_hwmod_3xxx_data.c319
-rw-r--r--arch/arm/mach-omap2/omap_hwmod_44xx_data.c850
-rw-r--r--arch/arm/mach-omap2/pm-debug.c44
-rw-r--r--arch/arm/mach-omap2/pm.c75
-rw-r--r--arch/arm/mach-omap2/pm.h11
-rw-r--r--arch/arm/mach-omap2/pm24xx.c11
-rw-r--r--arch/arm/mach-omap2/pm34xx.c136
-rw-r--r--arch/arm/mach-omap2/pm_bus.c85
-rw-r--r--arch/arm/mach-omap2/powerdomains44xx.h2
-rw-r--r--arch/arm/mach-omap2/prcm-common.h5
-rw-r--r--arch/arm/mach-omap2/prcm.c33
-rw-r--r--arch/arm/mach-omap2/prm-regbits-34xx.h1
-rw-r--r--arch/arm/mach-omap2/prm-regbits-44xx.h1314
-rw-r--r--arch/arm/mach-omap2/prm.h18
-rw-r--r--arch/arm/mach-omap2/prm2xxx_3xxx.c110
-rw-r--r--arch/arm/mach-omap2/prm44xx.c116
-rw-r--r--arch/arm/mach-omap2/prm44xx.h14
-rw-r--r--arch/arm/mach-omap2/serial.c585
-rw-r--r--arch/arm/mach-omap2/sleep34xx.S2
-rw-r--r--arch/arm/mach-omap2/sram34xx.S6
-rw-r--r--arch/arm/mach-omap2/timer-gp.c8
-rw-r--r--arch/arm/mach-omap2/timer-gp.h (renamed from arch/arm/plat-omap/include/plat/timer-gp.h)3
-rw-r--r--arch/arm/mach-omap2/usb-fs.c6
-rw-r--r--arch/arm/mach-orion5x/mpp.c4
-rw-r--r--arch/arm/mach-orion5x/ts78xx-setup.c2
-rw-r--r--arch/arm/mach-pnx4008/include/mach/vmalloc.h2
-rw-r--r--arch/arm/mach-pxa/cm-x2xx.c2
-rw-r--r--arch/arm/mach-pxa/devices.c25
-rw-r--r--arch/arm/mach-pxa/devices.h6
-rw-r--r--arch/arm/mach-pxa/em-x270.c1
-rw-r--r--arch/arm/mach-pxa/ezx.c2
-rw-r--r--arch/arm/mach-pxa/mioa701.c1
-rw-r--r--arch/arm/mach-pxa/pcm990-baseboard.c2
-rw-r--r--arch/arm/mach-pxa/pxa27x.c4
-rw-r--r--arch/arm/mach-pxa/pxa3xx.c5
-rw-r--r--arch/arm/mach-pxa/saar.c2
-rw-r--r--arch/arm/mach-pxa/zylonite.c11
-rw-r--r--arch/arm/mach-realview/headsmp.S1
-rw-r--r--arch/arm/mach-rpc/include/mach/vmalloc.h2
-rw-r--r--arch/arm/mach-s3c2410/h1940-bluetooth.c21
-rw-r--r--arch/arm/mach-s3c2410/include/mach/gpio.h10
-rw-r--r--arch/arm/mach-s3c2410/include/mach/h1940-latch.h57
-rw-r--r--arch/arm/mach-s3c2410/include/mach/regs-s3c2443-clock.h2
-rw-r--r--arch/arm/mach-s3c2410/include/mach/vmalloc.h2
-rw-r--r--arch/arm/mach-s3c2410/mach-h1940.c169
-rw-r--r--arch/arm/mach-s3c2412/Kconfig2
-rw-r--r--arch/arm/mach-s3c2412/s3c2412.c3
-rw-r--r--arch/arm/mach-s3c2416/Kconfig9
-rw-r--r--arch/arm/mach-s3c2416/Makefile2
-rw-r--r--arch/arm/mach-s3c2416/irq.c7
-rw-r--r--arch/arm/mach-s3c2416/pm.c84
-rw-r--r--arch/arm/mach-s3c2416/s3c2416.c3
-rw-r--r--arch/arm/mach-s3c2440/Kconfig11
-rw-r--r--arch/arm/mach-s3c2440/mach-rx1950.c218
-rw-r--r--arch/arm/mach-s3c2440/s3c2440.c11
-rw-r--r--arch/arm/mach-s3c2440/s3c2442.c14
-rw-r--r--arch/arm/mach-s3c2440/s3c244x.c3
-rw-r--r--arch/arm/mach-s3c2443/Kconfig1
-rw-r--r--arch/arm/mach-s3c2443/irq.c5
-rw-r--r--arch/arm/mach-s3c2443/s3c2443.c3
-rw-r--r--arch/arm/mach-s3c24a0/include/mach/vmalloc.h2
-rw-r--r--arch/arm/mach-s3c64xx/Kconfig24
-rw-r--r--arch/arm/mach-s3c64xx/Makefile1
-rw-r--r--arch/arm/mach-s3c64xx/dev-audio.c83
-rw-r--r--arch/arm/mach-s3c64xx/gpiolib.c8
-rw-r--r--arch/arm/mach-s3c64xx/include/mach/vmalloc.h2
-rw-r--r--arch/arm/mach-s3c64xx/mach-mini6410.c357
-rw-r--r--arch/arm/mach-s3c64xx/mach-real6410.c200
-rw-r--r--arch/arm/mach-s3c64xx/mach-smdk6410.c1
-rw-r--r--arch/arm/mach-s3c64xx/setup-fb-24bpp.c13
-rw-r--r--arch/arm/mach-s3c64xx/setup-ide.c11
-rw-r--r--arch/arm/mach-s3c64xx/setup-keypad.c16
-rw-r--r--arch/arm/mach-s3c64xx/setup-sdhci-gpio.c41
-rw-r--r--arch/arm/mach-s5p6442/Kconfig1
-rw-r--r--arch/arm/mach-s5p6442/clock.c28
-rw-r--r--arch/arm/mach-s5p6442/dev-audio.c30
-rw-r--r--arch/arm/mach-s5p6442/dev-spi.c6
-rw-r--r--arch/arm/mach-s5p6442/dma.c2
-rw-r--r--arch/arm/mach-s5p6442/include/mach/regs-clock.h1
-rw-r--r--arch/arm/mach-s5p6442/include/mach/vmalloc.h2
-rw-r--r--arch/arm/mach-s5p64x0/Kconfig2
-rw-r--r--arch/arm/mach-s5p64x0/clock-s5p6440.c19
-rw-r--r--arch/arm/mach-s5p64x0/clock-s5p6450.c19
-rw-r--r--arch/arm/mach-s5p64x0/clock.c18
-rw-r--r--arch/arm/mach-s5p64x0/dev-audio.c26
-rw-r--r--arch/arm/mach-s5p64x0/dev-spi.c38
-rw-r--r--arch/arm/mach-s5p64x0/dma.c2
-rw-r--r--arch/arm/mach-s5p64x0/include/mach/regs-clock.h2
-rw-r--r--arch/arm/mach-s5p64x0/include/mach/vmalloc.h2
-rw-r--r--arch/arm/mach-s5p64x0/setup-i2c0.c12
-rw-r--r--arch/arm/mach-s5p64x0/setup-i2c1.c12
-rw-r--r--arch/arm/mach-s5pc100/Kconfig1
-rw-r--r--arch/arm/mach-s5pc100/Makefile2
-rw-r--r--arch/arm/mach-s5pc100/clock.c174
-rw-r--r--arch/arm/mach-s5pc100/dev-audio.c86
-rw-r--r--arch/arm/mach-s5pc100/dev-spi.c22
-rw-r--r--arch/arm/mach-s5pc100/dma.c4
-rw-r--r--arch/arm/mach-s5pc100/gpiolib.c216
-rw-r--r--arch/arm/mach-s5pc100/include/mach/gpio.h7
-rw-r--r--arch/arm/mach-s5pc100/include/mach/irqs.h13
-rw-r--r--arch/arm/mach-s5pc100/include/mach/map.h2
-rw-r--r--arch/arm/mach-s5pc100/include/mach/regs-gpio.h49
-rw-r--r--arch/arm/mach-s5pc100/include/mach/vmalloc.h2
-rw-r--r--arch/arm/mach-s5pc100/irq-gpio.c266
-rw-r--r--arch/arm/mach-s5pc100/mach-smdkc100.c4
-rw-r--r--arch/arm/mach-s5pc100/setup-fb-24bpp.c30
-rw-r--r--arch/arm/mach-s5pc100/setup-i2c0.c6
-rw-r--r--arch/arm/mach-s5pc100/setup-i2c1.c6
-rw-r--r--arch/arm/mach-s5pc100/setup-ide.c41
-rw-r--r--arch/arm/mach-s5pc100/setup-keypad.c15
-rw-r--r--arch/arm/mach-s5pc100/setup-sdhci-gpio.c35
-rw-r--r--arch/arm/mach-s5pv210/Kconfig37
-rw-r--r--arch/arm/mach-s5pv210/Makefile3
-rw-r--r--arch/arm/mach-s5pv210/clock.c207
-rw-r--r--arch/arm/mach-s5pv210/cpu.c15
-rw-r--r--arch/arm/mach-s5pv210/cpufreq.c484
-rw-r--r--arch/arm/mach-s5pv210/dev-audio.c86
-rw-r--r--arch/arm/mach-s5pv210/dev-spi.c19
-rw-r--r--arch/arm/mach-s5pv210/dma.c4
-rw-r--r--arch/arm/mach-s5pv210/gpiolib.c14
-rw-r--r--arch/arm/mach-s5pv210/include/mach/irqs.h12
-rw-r--r--arch/arm/mach-s5pv210/include/mach/map.h12
-rw-r--r--arch/arm/mach-s5pv210/include/mach/pm-core.h43
-rw-r--r--arch/arm/mach-s5pv210/include/mach/regs-clock.h39
-rw-r--r--arch/arm/mach-s5pv210/include/mach/regs-gpio.h7
-rw-r--r--arch/arm/mach-s5pv210/include/mach/regs-sys.h19
-rw-r--r--arch/arm/mach-s5pv210/include/mach/vmalloc.h2
-rw-r--r--arch/arm/mach-s5pv210/mach-aquila.c169
-rw-r--r--arch/arm/mach-s5pv210/mach-goni.c381
-rw-r--r--arch/arm/mach-s5pv210/mach-smdkc110.c5
-rw-r--r--arch/arm/mach-s5pv210/mach-smdkv210.c5
-rw-r--r--arch/arm/mach-s5pv210/mach-torbreck.c131
-rw-r--r--arch/arm/mach-s5pv210/pm.c166
-rw-r--r--arch/arm/mach-s5pv210/setup-fb-24bpp.c34
-rw-r--r--arch/arm/mach-s5pv210/setup-i2c0.c6
-rw-r--r--arch/arm/mach-s5pv210/setup-i2c1.c6
-rw-r--r--arch/arm/mach-s5pv210/setup-i2c2.c6
-rw-r--r--arch/arm/mach-s5pv210/setup-ide.c49
-rw-r--r--arch/arm/mach-s5pv210/setup-keypad.c14
-rw-r--r--arch/arm/mach-s5pv210/setup-sdhci-gpio.c57
-rw-r--r--arch/arm/mach-s5pv210/sleep.S170
-rw-r--r--arch/arm/mach-s5pv310/Kconfig95
-rw-r--r--arch/arm/mach-s5pv310/Makefile10
-rw-r--r--arch/arm/mach-s5pv310/clock.c635
-rw-r--r--arch/arm/mach-s5pv310/cpu.c47
-rw-r--r--arch/arm/mach-s5pv310/gpiolib.c304
-rw-r--r--arch/arm/mach-s5pv310/hotplug.c144
-rw-r--r--arch/arm/mach-s5pv310/include/mach/irqs.h44
-rw-r--r--arch/arm/mach-s5pv310/include/mach/map.h19
-rw-r--r--arch/arm/mach-s5pv310/include/mach/regs-clock.h32
-rw-r--r--arch/arm/mach-s5pv310/include/mach/regs-gpio.h42
-rw-r--r--arch/arm/mach-s5pv310/include/mach/regs-srom.h50
-rw-r--r--arch/arm/mach-s5pv310/include/mach/vmalloc.h2
-rw-r--r--arch/arm/mach-s5pv310/irq-combiner.c6
-rw-r--r--arch/arm/mach-s5pv310/irq-eint.c228
-rw-r--r--arch/arm/mach-s5pv310/mach-smdkc210.c202
-rw-r--r--arch/arm/mach-s5pv310/mach-smdkv310.c121
-rw-r--r--arch/arm/mach-s5pv310/mach-universal_c210.c81
-rw-r--r--arch/arm/mach-s5pv310/setup-i2c0.c6
-rw-r--r--arch/arm/mach-s5pv310/setup-i2c1.c6
-rw-r--r--arch/arm/mach-s5pv310/setup-i2c2.c6
-rw-r--r--arch/arm/mach-s5pv310/setup-i2c3.c23
-rw-r--r--arch/arm/mach-s5pv310/setup-i2c4.c23
-rw-r--r--arch/arm/mach-s5pv310/setup-i2c5.c23
-rw-r--r--arch/arm/mach-s5pv310/setup-i2c6.c23
-rw-r--r--arch/arm/mach-s5pv310/setup-i2c7.c23
-rw-r--r--arch/arm/mach-s5pv310/setup-sdhci-gpio.c152
-rw-r--r--arch/arm/mach-s5pv310/setup-sdhci.c69
-rw-r--r--arch/arm/mach-sa1100/cpu-sa1100.c5
-rw-r--r--arch/arm/mach-shark/include/mach/vmalloc.h2
-rw-r--r--arch/arm/mach-shmobile/Kconfig2
-rw-r--r--arch/arm/mach-shmobile/board-ap4evb.c460
-rw-r--r--arch/arm/mach-shmobile/clock-sh7367.c2
-rw-r--r--arch/arm/mach-shmobile/clock-sh7372.c189
-rw-r--r--arch/arm/mach-shmobile/clock-sh7377.c2
-rw-r--r--arch/arm/mach-shmobile/include/mach/gpio.h4
-rw-r--r--arch/arm/mach-shmobile/include/mach/sh7372.h12
-rw-r--r--arch/arm/mach-shmobile/intc-sh7372.c30
-rw-r--r--arch/arm/mach-shmobile/pfc-sh7372.c8
-rw-r--r--arch/arm/mach-shmobile/setup-sh7367.c1
-rw-r--r--arch/arm/mach-shmobile/setup-sh7372.c94
-rw-r--r--arch/arm/mach-shmobile/setup-sh7377.c1
-rw-r--r--arch/arm/mach-tegra/Kconfig11
-rw-r--r--arch/arm/mach-tegra/Makefile9
-rw-r--r--arch/arm/mach-tegra/board-harmony-pcie.c57
-rw-r--r--arch/arm/mach-tegra/board.h1
-rw-r--r--arch/arm/mach-tegra/clock.c267
-rw-r--r--arch/arm/mach-tegra/clock.h58
-rw-r--r--arch/arm/mach-tegra/common.c13
-rw-r--r--arch/arm/mach-tegra/cpu-tegra.c185
-rw-r--r--arch/arm/mach-tegra/dma.c752
-rw-r--r--arch/arm/mach-tegra/fuse.c84
-rw-r--r--arch/arm/mach-tegra/fuse.h24
-rw-r--r--arch/arm/mach-tegra/gpio.c104
-rw-r--r--arch/arm/mach-tegra/include/mach/clk.h5
-rw-r--r--arch/arm/mach-tegra/include/mach/debug-macro.S4
-rw-r--r--arch/arm/mach-tegra/include/mach/dma.h155
-rw-r--r--arch/arm/mach-tegra/include/mach/gpio.h4
-rw-r--r--arch/arm/mach-tegra/include/mach/hardware.h4
-rw-r--r--arch/arm/mach-tegra/include/mach/io.h18
-rw-r--r--arch/arm/mach-tegra/include/mach/iomap.h33
-rw-r--r--arch/arm/mach-tegra/include/mach/irqs.h2
-rw-r--r--arch/arm/mach-tegra/include/mach/legacy_irq.h31
-rw-r--r--arch/arm/mach-tegra/include/mach/pinmux-t2.h174
-rw-r--r--arch/arm/mach-tegra/include/mach/pinmux.h210
-rw-r--r--arch/arm/mach-tegra/io.c6
-rw-r--r--arch/arm/mach-tegra/irq.c137
-rw-r--r--arch/arm/mach-tegra/legacy_irq.c114
-rw-r--r--arch/arm/mach-tegra/pcie.c915
-rw-r--r--arch/arm/mach-tegra/pinmux-t2-tables.c260
-rw-r--r--arch/arm/mach-tegra/pinmux.c354
-rw-r--r--arch/arm/mach-tegra/tegra2_clocks.c826
-rw-r--r--arch/arm/mach-tegra/tegra2_dvfs.c86
-rw-r--r--arch/arm/mach-tegra/tegra2_dvfs.h20
-rw-r--r--arch/arm/mach-tegra/timer.c1
-rw-r--r--arch/arm/mach-u300/clock.c6
-rw-r--r--arch/arm/mach-u300/core.c47
-rw-r--r--arch/arm/mach-u300/include/mach/u300-regs.h2
-rw-r--r--arch/arm/mach-u300/spi.c2
-rw-r--r--arch/arm/mach-ux500/board-mop500.c129
-rw-r--r--arch/arm/mach-ux500/clock.c1
-rw-r--r--arch/arm/mach-ux500/cpu.c45
-rw-r--r--arch/arm/mach-ux500/devices-db8500.c33
-rw-r--r--arch/arm/mach-ux500/include/mach/devices.h1
-rw-r--r--arch/arm/mach-ux500/pins-db8500.h32
-rw-r--r--arch/arm/mach-versatile/include/mach/vmalloc.h2
-rw-r--r--arch/arm/mach-vexpress/ct-ca9x4.c2
-rw-r--r--arch/arm/mach-vexpress/headsmp.S1
-rw-r--r--arch/arm/mm/Kconfig8
-rw-r--r--arch/arm/mm/cache-fa.S12
-rw-r--r--arch/arm/mm/cache-l2x0.c78
-rw-r--r--arch/arm/mm/cache-v3.S10
-rw-r--r--arch/arm/mm/cache-v4.S10
-rw-r--r--arch/arm/mm/cache-v4wb.S12
-rw-r--r--arch/arm/mm/cache-v4wt.S12
-rw-r--r--arch/arm/mm/dma-mapping.c2
-rw-r--r--arch/arm/mm/fault-armv.c32
-rw-r--r--arch/arm/mm/highmem.c24
-rw-r--r--arch/arm/mm/init.c155
-rw-r--r--arch/arm/mm/ioremap.c4
-rw-r--r--arch/arm/mm/mmu.c73
-rw-r--r--arch/arm/mm/pgd.c4
-rw-r--r--arch/arm/mm/proc-arm1020.S15
-rw-r--r--arch/arm/mm/proc-arm1020e.S15
-rw-r--r--arch/arm/mm/proc-arm1022.S15
-rw-r--r--arch/arm/mm/proc-arm1026.S15
-rw-r--r--arch/arm/mm/proc-arm920.S12
-rw-r--r--arch/arm/mm/proc-arm922.S12
-rw-r--r--arch/arm/mm/proc-arm925.S12
-rw-r--r--arch/arm/mm/proc-arm926.S12
-rw-r--r--arch/arm/mm/proc-arm940.S12
-rw-r--r--arch/arm/mm/proc-arm946.S12
-rw-r--r--arch/arm/mm/proc-feroceon.S13
-rw-r--r--arch/arm/mm/proc-v7.S4
-rw-r--r--arch/arm/mm/proc-xsc3.S12
-rw-r--r--arch/arm/mm/proc-xscale.S12
-rw-r--r--arch/arm/plat-iop/time.c3
-rw-r--r--arch/arm/plat-mxc/Makefile1
-rw-r--r--arch/arm/plat-mxc/cpufreq.c206
-rw-r--r--arch/arm/plat-mxc/devices/Kconfig6
-rw-r--r--arch/arm/plat-mxc/devices/Makefile1
-rw-r--r--arch/arm/plat-mxc/devices/platform-gpio_keys.c27
-rw-r--r--arch/arm/plat-mxc/devices/platform-imx-dma.c8
-rw-r--r--arch/arm/plat-mxc/devices/platform-spi_imx.c1
-rw-r--r--arch/arm/plat-mxc/gpio.c32
-rw-r--r--arch/arm/plat-mxc/include/mach/devices-common.h4
-rw-r--r--arch/arm/plat-mxc/include/mach/dma.h67
-rw-r--r--arch/arm/plat-mxc/include/mach/iomux-mx51.h2
-rw-r--r--arch/arm/plat-mxc/include/mach/mx31.h1
-rw-r--r--arch/arm/plat-mxc/include/mach/mx35.h2
-rw-r--r--arch/arm/plat-mxc/include/mach/mxc.h13
-rw-r--r--arch/arm/plat-mxc/include/mach/sdma.h17
-rw-r--r--arch/arm/plat-nomadik/include/plat/ske.h50
-rw-r--r--arch/arm/plat-nomadik/include/plat/ste_dma40.h135
-rw-r--r--arch/arm/plat-nomadik/timer.c89
-rw-r--r--arch/arm/plat-omap/Kconfig2
-rw-r--r--arch/arm/plat-omap/Makefile4
-rw-r--r--arch/arm/plat-omap/clock.c5
-rw-r--r--arch/arm/plat-omap/common.c292
-rw-r--r--arch/arm/plat-omap/counter_32k.c183
-rw-r--r--arch/arm/plat-omap/cpu-omap.c4
-rw-r--r--arch/arm/plat-omap/devices.c34
-rw-r--r--arch/arm/plat-omap/dma.c52
-rw-r--r--arch/arm/plat-omap/dmtimer.c2
-rw-r--r--arch/arm/plat-omap/fb.c16
-rw-r--r--arch/arm/plat-omap/fb.h10
-rw-r--r--arch/arm/plat-omap/gpio.c10
-rw-r--r--arch/arm/plat-omap/include/plat/common.h5
-rw-r--r--arch/arm/plat-omap/include/plat/cpu.h41
-rw-r--r--arch/arm/plat-omap/include/plat/display.h31
-rw-r--r--arch/arm/plat-omap/include/plat/dma.h6
-rw-r--r--arch/arm/plat-omap/include/plat/dmtimer.h2
-rw-r--r--arch/arm/plat-omap/include/plat/dsp.h31
-rw-r--r--arch/arm/plat-omap/include/plat/gpmc-smsc911x.h35
-rw-r--r--arch/arm/plat-omap/include/plat/i2c.h4
-rw-r--r--arch/arm/plat-omap/include/plat/irqs.h2
-rw-r--r--arch/arm/plat-omap/include/plat/mcbsp.h29
-rw-r--r--arch/arm/plat-omap/include/plat/mmc.h12
-rw-r--r--arch/arm/plat-omap/include/plat/omap-serial.h128
-rw-r--r--arch/arm/plat-omap/include/plat/omap24xx.h2
-rw-r--r--arch/arm/plat-omap/include/plat/omap4-keypad.h14
-rw-r--r--arch/arm/plat-omap/include/plat/omap_device.h4
-rw-r--r--arch/arm/plat-omap/include/plat/omap_hwmod.h62
-rw-r--r--arch/arm/plat-omap/include/plat/powerdomain.h2
-rw-r--r--arch/arm/plat-omap/include/plat/prcm.h2
-rw-r--r--arch/arm/plat-omap/include/plat/sdrc.h1
-rw-r--r--arch/arm/plat-omap/include/plat/sram.h1
-rw-r--r--arch/arm/plat-omap/include/plat/uncompress.h5
-rw-r--r--arch/arm/plat-omap/include/plat/usb.h2
-rw-r--r--arch/arm/plat-omap/include/plat/vrfb.h16
-rw-r--r--arch/arm/plat-omap/mcbsp.c26
-rw-r--r--arch/arm/plat-omap/omap_device.c110
-rw-r--r--arch/arm/plat-omap/sram.c37
-rw-r--r--arch/arm/plat-omap/sram.h6
-rw-r--r--arch/arm/plat-orion/include/plat/pcie.h3
-rw-r--r--arch/arm/plat-orion/pcie.c5
-rw-r--r--arch/arm/plat-pxa/include/plat/pxa3xx_nand.h18
-rw-r--r--arch/arm/plat-pxa/include/plat/sdhci.h35
-rw-r--r--arch/arm/plat-s3c24xx/Kconfig1
-rw-r--r--arch/arm/plat-s3c24xx/common-smdk.c2
-rw-r--r--arch/arm/plat-s3c24xx/cpu.c8
-rw-r--r--arch/arm/plat-s3c24xx/devs.c34
-rw-r--r--arch/arm/plat-s3c24xx/gpiolib.c8
-rw-r--r--arch/arm/plat-s3c24xx/include/plat/s3c244x.h7
-rw-r--r--arch/arm/plat-s3c24xx/spi-bus0-gpe11_12_13.c6
-rw-r--r--arch/arm/plat-s3c24xx/spi-bus1-gpd8_9_10.c6
-rw-r--r--arch/arm/plat-s3c24xx/spi-bus1-gpg5_6_7.c6
-rw-r--r--arch/arm/plat-s5p/Kconfig5
-rw-r--r--arch/arm/plat-s5p/Makefile3
-rw-r--r--arch/arm/plat-s5p/clock.c29
-rw-r--r--arch/arm/plat-s5p/include/plat/irqs.h18
-rw-r--r--arch/arm/plat-s5p/include/plat/map-s5p.h40
-rw-r--r--arch/arm/plat-s5p/include/plat/s5p-clock.h4
-rw-r--r--arch/arm/plat-s5p/irq-eint.c10
-rw-r--r--arch/arm/plat-s5p/irq-gpioint.c237
-rw-r--r--arch/arm/plat-s5p/irq-pm.c93
-rw-r--r--arch/arm/plat-s5p/pm.c52
-rw-r--r--arch/arm/plat-samsung/Kconfig25
-rw-r--r--arch/arm/plat-samsung/Makefile5
-rw-r--r--arch/arm/plat-samsung/dev-hsmmc.c8
-rw-r--r--arch/arm/plat-samsung/dev-hsmmc1.c8
-rw-r--r--arch/arm/plat-samsung/dev-hsmmc2.c8
-rw-r--r--arch/arm/plat-samsung/dev-hsmmc3.c12
-rw-r--r--arch/arm/plat-samsung/dev-i2c2.c4
-rw-r--r--arch/arm/plat-samsung/dev-i2c3.c68
-rw-r--r--arch/arm/plat-samsung/dev-i2c4.c68
-rw-r--r--arch/arm/plat-samsung/dev-i2c5.c68
-rw-r--r--arch/arm/plat-samsung/dev-i2c6.c68
-rw-r--r--arch/arm/plat-samsung/dev-i2c7.c68
-rw-r--r--arch/arm/plat-samsung/gpio-config.c97
-rw-r--r--arch/arm/plat-samsung/gpio.c8
-rw-r--r--arch/arm/plat-samsung/include/plat/audio.h9
-rw-r--r--arch/arm/plat-samsung/include/plat/devs.h9
-rw-r--r--arch/arm/plat-samsung/include/plat/gpio-cfg-helpers.h17
-rw-r--r--arch/arm/plat-samsung/include/plat/gpio-cfg.h56
-rw-r--r--arch/arm/plat-samsung/include/plat/gpio-core.h15
-rw-r--r--arch/arm/plat-samsung/include/plat/iic.h10
-rw-r--r--arch/arm/plat-samsung/include/plat/map-base.h4
-rw-r--r--arch/arm/plat-samsung/include/plat/nand-core.h28
-rw-r--r--arch/arm/plat-samsung/include/plat/sdhci.h64
-rw-r--r--arch/arm/plat-samsung/pm-gpio.c4
-rw-r--r--arch/arm/plat-samsung/s3c-pl330.c34
-rw-r--r--arch/arm/vfp/vfphw.S1
-rw-r--r--arch/avr32/Kconfig7
-rw-r--r--arch/avr32/include/asm/pgtable.h2
-rw-r--r--arch/avr32/kernel/ptrace.c11
-rw-r--r--arch/blackfin/Kconfig7
-rw-r--r--arch/blackfin/Kconfig.debug11
-rw-r--r--arch/blackfin/configs/BF518F-EZBRD_defconfig2
-rw-r--r--arch/blackfin/configs/BF526-EZBRD_defconfig2
-rw-r--r--arch/blackfin/configs/BF527-AD7160-EVAL_defconfig2
-rw-r--r--arch/blackfin/configs/BF527-EZKIT-V2_defconfig3
-rw-r--r--arch/blackfin/configs/BF527-EZKIT_defconfig3
-rw-r--r--arch/blackfin/configs/BF527-TLL6527M_defconfig1
-rw-r--r--arch/blackfin/configs/BF533-EZKIT_defconfig2
-rw-r--r--arch/blackfin/configs/BF533-STAMP_defconfig2
-rw-r--r--arch/blackfin/configs/BF537-STAMP_defconfig3
-rw-r--r--arch/blackfin/configs/BF538-EZKIT_defconfig3
-rw-r--r--arch/blackfin/configs/BF548-EZKIT_defconfig4
-rw-r--r--arch/blackfin/configs/BF561-ACVILON_defconfig4
-rw-r--r--arch/blackfin/configs/BF561-EZKIT_defconfig1
-rw-r--r--arch/blackfin/configs/BlackStamp_defconfig3
-rw-r--r--arch/blackfin/configs/CM-BF527_defconfig4
-rw-r--r--arch/blackfin/configs/CM-BF533_defconfig3
-rw-r--r--arch/blackfin/configs/CM-BF537E_defconfig3
-rw-r--r--arch/blackfin/configs/CM-BF537U_defconfig3
-rw-r--r--arch/blackfin/configs/CM-BF548_defconfig3
-rw-r--r--arch/blackfin/configs/CM-BF561_defconfig3
-rw-r--r--arch/blackfin/configs/H8606_defconfig3
-rw-r--r--arch/blackfin/configs/IP0X_defconfig3
-rw-r--r--arch/blackfin/configs/PNAV-10_defconfig2
-rw-r--r--arch/blackfin/configs/SRV1_defconfig3
-rw-r--r--arch/blackfin/configs/TCM-BF518_defconfig1
-rw-r--r--arch/blackfin/configs/TCM-BF537_defconfig2
-rw-r--r--arch/blackfin/include/asm/bfin5xx_spi.h2
-rw-r--r--arch/blackfin/include/asm/bfin_ppi.h2
-rw-r--r--arch/blackfin/include/asm/bfin_twi.h45
-rw-r--r--arch/blackfin/include/asm/cdef_LPBlackfin.h2
-rw-r--r--arch/blackfin/include/asm/entry.h8
-rw-r--r--arch/blackfin/kernel/kgdb.c3
-rw-r--r--arch/blackfin/kernel/process.c1
-rw-r--r--arch/blackfin/kernel/ptrace.c16
-rw-r--r--arch/blackfin/mach-bf518/boards/ezbrd.c44
-rw-r--r--arch/blackfin/mach-bf518/boards/tcm-bf518.c24
-rw-r--r--arch/blackfin/mach-bf527/boards/cm_bf527.c24
-rw-r--r--arch/blackfin/mach-bf527/boards/ezbrd.c24
-rw-r--r--arch/blackfin/mach-bf527/boards/ezkit.c24
-rw-r--r--arch/blackfin/mach-bf527/boards/tll6527m.c24
-rw-r--r--arch/blackfin/mach-bf537/boards/cm_bf537e.c24
-rw-r--r--arch/blackfin/mach-bf537/boards/cm_bf537u.c24
-rw-r--r--arch/blackfin/mach-bf537/boards/minotaur.c24
-rw-r--r--arch/blackfin/mach-bf537/boards/pnav10.c24
-rw-r--r--arch/blackfin/mach-bf537/boards/stamp.c24
-rw-r--r--arch/blackfin/mach-bf537/boards/tcm_bf537.c24
-rw-r--r--arch/blackfin/mach-common/Makefile1
-rw-r--r--arch/blackfin/mach-common/irqpanic.c106
-rw-r--r--arch/cris/Kconfig7
-rw-r--r--arch/cris/arch-v10/kernel/ptrace.c20
-rw-r--r--arch/cris/arch-v32/kernel/ptrace.c16
-rw-r--r--arch/cris/include/asm/pgtable.h2
-rw-r--r--arch/frv/Kconfig6
-rw-r--r--arch/frv/include/asm/highmem.h25
-rw-r--r--arch/frv/include/asm/pgtable.h9
-rw-r--r--arch/frv/kernel/process.c1
-rw-r--r--arch/frv/kernel/ptrace.c32
-rw-r--r--arch/frv/mb93090-mb00/pci-dma.c4
-rw-r--r--arch/frv/mm/cache-page.c8
-rw-r--r--arch/frv/mm/highmem.c51
-rw-r--r--arch/h8300/Kconfig7
-rw-r--r--arch/h8300/kernel/process.c1
-rw-r--r--arch/h8300/kernel/ptrace.c33
-rw-r--r--arch/ia64/Kconfig7
-rw-r--r--arch/ia64/hp/sim/simscsi.c4
-rw-r--r--arch/ia64/include/asm/cputime.h6
-rw-r--r--arch/ia64/include/asm/pgtable.h2
-rw-r--r--arch/ia64/include/asm/siginfo.h1
-rw-r--r--arch/ia64/kernel/perfmon.c9
-rw-r--r--arch/ia64/kernel/ptrace.c3
-rw-r--r--arch/m32r/Kconfig7
-rw-r--r--arch/m32r/include/asm/pgtable.h2
-rw-r--r--arch/m32r/kernel/ptrace.c11
-rw-r--r--arch/m68k/Kconfig6
-rw-r--r--arch/m68k/include/asm/cacheflush_no.h2
-rw-r--r--arch/m68k/include/asm/coldfire.h4
-rw-r--r--arch/m68k/include/asm/entry_mm.h8
-rw-r--r--arch/m68k/include/asm/entry_no.h10
-rw-r--r--arch/m68k/include/asm/gpio.h7
-rw-r--r--arch/m68k/include/asm/irqflags.h2
-rw-r--r--arch/m68k/include/asm/m548xgpt.h88
-rw-r--r--arch/m68k/include/asm/m548xsim.h55
-rw-r--r--arch/m68k/include/asm/machdep.h1
-rw-r--r--arch/m68k/include/asm/mcfcache.h2
-rw-r--r--arch/m68k/include/asm/mcfsim.h2
-rw-r--r--arch/m68k/include/asm/mcfslt.h44
-rw-r--r--arch/m68k/include/asm/mcfuart.h9
-rw-r--r--arch/m68k/include/asm/motorola_pgtable.h2
-rw-r--r--arch/m68k/include/asm/sun3_pgtable.h2
-rw-r--r--arch/m68k/kernel/asm-offsets.c12
-rw-r--r--arch/m68k/kernel/process.c1
-rw-r--r--arch/m68k/kernel/ptrace.c51
-rw-r--r--arch/m68knommu/Kconfig18
-rw-r--r--arch/m68knommu/Makefile3
-rw-r--r--arch/m68knommu/kernel/.gitignore1
-rw-r--r--arch/m68knommu/kernel/asm-offsets.c11
-rw-r--r--arch/m68knommu/kernel/process.c1
-rw-r--r--arch/m68knommu/kernel/ptrace.c104
-rw-r--r--arch/m68knommu/kernel/setup.c3
-rw-r--r--arch/m68knommu/kernel/time.c13
-rw-r--r--arch/m68knommu/kernel/traps.c26
-rw-r--r--arch/m68knommu/platform/5206/Makefile4
-rw-r--r--arch/m68knommu/platform/5206e/Makefile4
-rw-r--r--arch/m68knommu/platform/520x/Makefile4
-rw-r--r--arch/m68knommu/platform/523x/Makefile4
-rw-r--r--arch/m68knommu/platform/5249/Makefile4
-rw-r--r--arch/m68knommu/platform/5272/Makefile4
-rw-r--r--arch/m68knommu/platform/5272/config.c16
-rw-r--r--arch/m68knommu/platform/5272/intc.c60
-rw-r--r--arch/m68knommu/platform/527x/Makefile4
-rw-r--r--arch/m68knommu/platform/528x/Makefile4
-rw-r--r--arch/m68knommu/platform/5307/Makefile4
-rw-r--r--arch/m68knommu/platform/532x/Makefile4
-rw-r--r--arch/m68knommu/platform/5407/Makefile4
-rw-r--r--arch/m68knommu/platform/548x/Makefile18
-rw-r--r--arch/m68knommu/platform/548x/config.c115
-rw-r--r--arch/m68knommu/platform/68328/entry.S36
-rw-r--r--arch/m68knommu/platform/68328/head-de2.S6
-rw-r--r--arch/m68knommu/platform/68328/head-ram.S27
-rw-r--r--arch/m68knommu/platform/68328/ints.c6
-rw-r--r--arch/m68knommu/platform/68360/entry.S13
-rw-r--r--arch/m68knommu/platform/68360/ints.c6
-rw-r--r--arch/m68knommu/platform/68VZ328/config.c5
-rw-r--r--arch/m68knommu/platform/coldfire/Makefile5
-rw-r--r--arch/m68knommu/platform/coldfire/entry.S4
-rw-r--r--arch/m68knommu/platform/coldfire/intc-2.c53
-rw-r--r--arch/m68knommu/platform/coldfire/intc-simr.c10
-rw-r--r--arch/m68knommu/platform/coldfire/intc.c8
-rw-r--r--arch/m68knommu/platform/coldfire/sltimers.c145
-rw-r--r--arch/microblaze/Kconfig22
-rw-r--r--arch/microblaze/Kconfig.debug2
-rw-r--r--arch/microblaze/Makefile11
-rw-r--r--arch/microblaze/include/asm/byteorder.h4
-rw-r--r--arch/microblaze/include/asm/checksum.h9
-rw-r--r--arch/microblaze/include/asm/cpuinfo.h5
-rw-r--r--arch/microblaze/include/asm/elf.h2
-rw-r--r--arch/microblaze/include/asm/gpio.h5
-rw-r--r--arch/microblaze/include/asm/io.h2
-rw-r--r--arch/microblaze/include/asm/page.h3
-rw-r--r--arch/microblaze/include/asm/pci.h2
-rw-r--r--arch/microblaze/include/asm/pgalloc.h3
-rw-r--r--arch/microblaze/include/asm/pgtable.h14
-rw-r--r--arch/microblaze/include/asm/prom.h1
-rw-r--r--arch/microblaze/include/asm/pvr.h14
-rw-r--r--arch/microblaze/include/asm/seccomp.h16
-rw-r--r--arch/microblaze/include/asm/setup.h6
-rw-r--r--arch/microblaze/include/asm/thread_info.h20
-rw-r--r--arch/microblaze/include/asm/unaligned.h12
-rw-r--r--arch/microblaze/include/asm/unistd.h5
-rw-r--r--arch/microblaze/kernel/cpu/cpuinfo-pvr-full.c1
-rw-r--r--arch/microblaze/kernel/cpu/cpuinfo-static.c1
-rw-r--r--arch/microblaze/kernel/cpu/cpuinfo.c2
-rw-r--r--arch/microblaze/kernel/cpu/mb.c3
-rw-r--r--arch/microblaze/kernel/cpu/pvr.c2
-rw-r--r--arch/microblaze/kernel/early_printk.c87
-rw-r--r--arch/microblaze/kernel/entry.S21
-rw-r--r--arch/microblaze/kernel/exceptions.c25
-rw-r--r--arch/microblaze/kernel/heartbeat.c11
-rw-r--r--arch/microblaze/kernel/intc.c14
-rw-r--r--arch/microblaze/kernel/kgdb.c7
-rw-r--r--arch/microblaze/kernel/microblaze_ksyms.c32
-rw-r--r--arch/microblaze/kernel/prom.c44
-rw-r--r--arch/microblaze/kernel/ptrace.c5
-rw-r--r--arch/microblaze/kernel/setup.c6
-rw-r--r--arch/microblaze/kernel/syscall_table.S3
-rw-r--r--arch/microblaze/kernel/timer.c41
-rw-r--r--arch/microblaze/kernel/vmlinux.lds.S5
-rw-r--r--arch/microblaze/lib/Makefile10
-rw-r--r--arch/microblaze/lib/ashldi3.c29
-rw-r--r--arch/microblaze/lib/ashrdi3.c31
-rw-r--r--arch/microblaze/lib/divsi3.S73
-rw-r--r--arch/microblaze/lib/libgcc.h25
-rw-r--r--arch/microblaze/lib/lshrdi3.c29
-rw-r--r--arch/microblaze/lib/memcpy.c46
-rw-r--r--arch/microblaze/lib/memmove.c59
-rw-r--r--arch/microblaze/lib/memset.c22
-rw-r--r--arch/microblaze/lib/modsi3.S73
-rw-r--r--arch/microblaze/lib/muldi3.S121
-rw-r--r--arch/microblaze/lib/mulsi3.S46
-rw-r--r--arch/microblaze/lib/udivsi3.S84
-rw-r--r--arch/microblaze/lib/umodsi3.S86
-rw-r--r--arch/microblaze/pci/pci-common.c21
-rw-r--r--arch/microblaze/platform/generic/system.dts3
-rw-r--r--arch/microblaze/platform/platform.c3
-rw-r--r--arch/mips/Kconfig88
-rw-r--r--arch/mips/Kconfig.debug9
-rw-r--r--arch/mips/Makefile4
-rw-r--r--arch/mips/alchemy/devboards/db1200/platform.c6
-rw-r--r--arch/mips/ar7/gpio.c243
-rw-r--r--arch/mips/ar7/platform.c56
-rw-r--r--arch/mips/ar7/prom.c2
-rw-r--r--arch/mips/ar7/setup.c14
-rw-r--r--arch/mips/bcm63xx/cpu.c30
-rw-r--r--arch/mips/cavium-octeon/Kconfig23
-rw-r--r--arch/mips/cavium-octeon/csrc-octeon.c34
-rw-r--r--arch/mips/cavium-octeon/dma-octeon.c581
-rw-r--r--arch/mips/cavium-octeon/executive/cvmx-l2c.c810
-rw-r--r--arch/mips/cavium-octeon/octeon-platform.c112
-rw-r--r--arch/mips/cavium-octeon/serial.c2
-rw-r--r--arch/mips/cavium-octeon/setup.c120
-rw-r--r--arch/mips/include/asm/atomic.h208
-rw-r--r--arch/mips/include/asm/bitops.h270
-rw-r--r--arch/mips/include/asm/bootinfo.h12
-rw-r--r--arch/mips/include/asm/cmpxchg.h7
-rw-r--r--arch/mips/include/asm/cpu.h26
-rw-r--r--arch/mips/include/asm/device.h15
-rw-r--r--arch/mips/include/asm/dma-mapping.h96
-rw-r--r--arch/mips/include/asm/dma.h3
-rw-r--r--arch/mips/include/asm/highmem.h18
-rw-r--r--arch/mips/include/asm/irq.h5
-rw-r--r--arch/mips/include/asm/local.h2
-rw-r--r--arch/mips/include/asm/mach-ar7/ar7.h47
-rw-r--r--arch/mips/include/asm/mach-ar7/gpio.h3
-rw-r--r--arch/mips/include/asm/mach-bcm63xx/bcm963xx_tag.h97
-rw-r--r--arch/mips/include/asm/mach-cavium-octeon/cpu-feature-overrides.h8
-rw-r--r--arch/mips/include/asm/mach-cavium-octeon/dma-coherence.h24
-rw-r--r--arch/mips/include/asm/mach-ip27/dma-coherence.h5
-rw-r--r--arch/mips/include/asm/mach-ip32/dma-coherence.h5
-rw-r--r--arch/mips/include/asm/mach-jazz/dma-coherence.h9
-rw-r--r--arch/mips/include/asm/mipsregs.h51
-rw-r--r--arch/mips/include/asm/octeon/cvmx-agl-defs.h616
-rw-r--r--arch/mips/include/asm/octeon/cvmx-asm.h11
-rw-r--r--arch/mips/include/asm/octeon/cvmx-ciu-defs.h857
-rw-r--r--arch/mips/include/asm/octeon/cvmx-gpio-defs.h74
-rw-r--r--arch/mips/include/asm/octeon/cvmx-iob-defs.h242
-rw-r--r--arch/mips/include/asm/octeon/cvmx-ipd-defs.h314
-rw-r--r--arch/mips/include/asm/octeon/cvmx-l2c-defs.h738
-rw-r--r--arch/mips/include/asm/octeon/cvmx-l2c.h225
-rw-r--r--arch/mips/include/asm/octeon/cvmx-l2d-defs.h38
-rw-r--r--arch/mips/include/asm/octeon/cvmx-l2t-defs.h5
-rw-r--r--arch/mips/include/asm/octeon/cvmx-led-defs.h41
-rw-r--r--arch/mips/include/asm/octeon/cvmx-mio-defs.h807
-rw-r--r--arch/mips/include/asm/octeon/cvmx-mixx-defs.h200
-rw-r--r--arch/mips/include/asm/octeon/cvmx-npei-defs.h681
-rw-r--r--arch/mips/include/asm/octeon/cvmx-npi-defs.h362
-rw-r--r--arch/mips/include/asm/octeon/cvmx-pci-defs.h265
-rw-r--r--arch/mips/include/asm/octeon/cvmx-pciercx-defs.h435
-rw-r--r--arch/mips/include/asm/octeon/cvmx-pescx-defs.h50
-rw-r--r--arch/mips/include/asm/octeon/cvmx-pexp-defs.h378
-rw-r--r--arch/mips/include/asm/octeon/cvmx-pow-defs.h157
-rw-r--r--arch/mips/include/asm/octeon/cvmx-rnm-defs.h67
-rw-r--r--arch/mips/include/asm/octeon/cvmx-smix-defs.h46
-rw-r--r--arch/mips/include/asm/octeon/cvmx-uctlx-defs.h261
-rw-r--r--arch/mips/include/asm/octeon/octeon-model.h36
-rw-r--r--arch/mips/include/asm/octeon/octeon.h1
-rw-r--r--arch/mips/include/asm/octeon/pci-octeon.h10
-rw-r--r--arch/mips/include/asm/pci/bridge.h2
-rw-r--r--arch/mips/include/asm/perf_event.h25
-rw-r--r--arch/mips/include/asm/pgtable-32.h3
-rw-r--r--arch/mips/include/asm/pgtable-64.h7
-rw-r--r--arch/mips/include/asm/processor.h40
-rw-r--r--arch/mips/include/asm/prom.h31
-rw-r--r--arch/mips/include/asm/system.h52
-rw-r--r--arch/mips/include/asm/thread_info.h2
-rw-r--r--arch/mips/include/asm/uaccess.h4
-rw-r--r--arch/mips/kernel/Makefile4
-rw-r--r--arch/mips/kernel/cpu-probe.c82
-rw-r--r--arch/mips/kernel/irq.c24
-rw-r--r--arch/mips/kernel/mips-mt-fpaff.c2
-rw-r--r--arch/mips/kernel/perf_event.c601
-rw-r--r--arch/mips/kernel/perf_event_mipsxx.c1052
-rw-r--r--arch/mips/kernel/prom.c112
-rw-r--r--arch/mips/kernel/ptrace.c25
-rw-r--r--arch/mips/kernel/setup.c3
-rw-r--r--arch/mips/kernel/traps.c31
-rw-r--r--arch/mips/kernel/unaligned.c7
-rw-r--r--arch/mips/loongson/Kconfig2
-rw-r--r--arch/mips/math-emu/cp1emu.c3
-rw-r--r--arch/mips/mm/c-octeon.c16
-rw-r--r--arch/mips/mm/c-r4k.c21
-rw-r--r--arch/mips/mm/dma-default.c165
-rw-r--r--arch/mips/mm/fault.c11
-rw-r--r--arch/mips/mm/highmem.c51
-rw-r--r--arch/mips/mm/sc-mips.c34
-rw-r--r--arch/mips/mm/tlbex.c11
-rw-r--r--arch/mips/mm/uasm.c20
-rw-r--r--arch/mips/pci/pci-octeon.c60
-rw-r--r--arch/mips/pci/pcie-octeon.c5
-rw-r--r--arch/mn10300/Kconfig286
-rw-r--r--arch/mn10300/Makefile6
-rw-r--r--arch/mn10300/boot/compressed/head.S69
-rw-r--r--arch/mn10300/configs/asb2303_defconfig2
-rw-r--r--arch/mn10300/configs/asb2364_defconfig98
-rw-r--r--arch/mn10300/include/asm/atomic.h350
-rw-r--r--arch/mn10300/include/asm/bitops.h14
-rw-r--r--arch/mn10300/include/asm/cache.h14
-rw-r--r--arch/mn10300/include/asm/cacheflush.h164
-rw-r--r--arch/mn10300/include/asm/cpu-regs.h91
-rw-r--r--arch/mn10300/include/asm/dmactl-regs.h87
-rw-r--r--arch/mn10300/include/asm/elf.h12
-rw-r--r--arch/mn10300/include/asm/exceptions.h8
-rw-r--r--arch/mn10300/include/asm/fpu.h157
-rw-r--r--arch/mn10300/include/asm/frame.inc20
-rw-r--r--arch/mn10300/include/asm/gdb-stub.h2
-rw-r--r--arch/mn10300/include/asm/hardirq.h3
-rw-r--r--arch/mn10300/include/asm/highmem.h46
-rw-r--r--arch/mn10300/include/asm/intctl-regs.h37
-rw-r--r--arch/mn10300/include/asm/io.h13
-rw-r--r--arch/mn10300/include/asm/irq.h12
-rw-r--r--arch/mn10300/include/asm/irq_regs.h6
-rw-r--r--arch/mn10300/include/asm/irqflags.h111
-rw-r--r--arch/mn10300/include/asm/mmu_context.h73
-rw-r--r--arch/mn10300/include/asm/pgalloc.h1
-rw-r--r--arch/mn10300/include/asm/pgtable.h90
-rw-r--r--arch/mn10300/include/asm/processor.h66
-rw-r--r--arch/mn10300/include/asm/ptrace.h13
-rw-r--r--arch/mn10300/include/asm/reset-regs.h2
-rw-r--r--arch/mn10300/include/asm/rtc.h11
-rw-r--r--arch/mn10300/include/asm/rwlock.h125
-rw-r--r--arch/mn10300/include/asm/serial-regs.h51
-rw-r--r--arch/mn10300/include/asm/serial.h8
-rw-r--r--arch/mn10300/include/asm/smp.h91
-rw-r--r--arch/mn10300/include/asm/smsc911x.h1
-rw-r--r--arch/mn10300/include/asm/spinlock.h179
-rw-r--r--arch/mn10300/include/asm/spinlock_types.h20
-rw-r--r--arch/mn10300/include/asm/syscall.h117
-rw-r--r--arch/mn10300/include/asm/system.h73
-rw-r--r--arch/mn10300/include/asm/thread_info.h18
-rw-r--r--arch/mn10300/include/asm/timer-regs.h191
-rw-r--r--arch/mn10300/include/asm/timex.h20
-rw-r--r--arch/mn10300/include/asm/tlbflush.h174
-rw-r--r--arch/mn10300/include/asm/uaccess.h6
-rw-r--r--arch/mn10300/kernel/Makefile15
-rw-r--r--arch/mn10300/kernel/asm-offsets.c11
-rw-r--r--arch/mn10300/kernel/cevt-mn10300.c131
-rw-r--r--arch/mn10300/kernel/csrc-mn10300.c35
-rw-r--r--arch/mn10300/kernel/entry.S146
-rw-r--r--arch/mn10300/kernel/fpu-low.S265
-rw-r--r--arch/mn10300/kernel/fpu-nofpu-low.S39
-rw-r--r--arch/mn10300/kernel/fpu-nofpu.c30
-rw-r--r--arch/mn10300/kernel/fpu.c141
-rw-r--r--arch/mn10300/kernel/gdb-io-serial-low.S5
-rw-r--r--arch/mn10300/kernel/gdb-io-serial.c38
-rw-r--r--arch/mn10300/kernel/gdb-io-ttysm.c25
-rw-r--r--arch/mn10300/kernel/gdb-stub.c18
-rw-r--r--arch/mn10300/kernel/head.S202
-rw-r--r--arch/mn10300/kernel/internal.h25
-rw-r--r--arch/mn10300/kernel/irq.c276
-rw-r--r--arch/mn10300/kernel/kprobes.c4
-rw-r--r--arch/mn10300/kernel/mn10300-serial-low.S6
-rw-r--r--arch/mn10300/kernel/mn10300-serial.c210
-rw-r--r--arch/mn10300/kernel/mn10300-watchdog-low.S9
-rw-r--r--arch/mn10300/kernel/mn10300-watchdog.c100
-rw-r--r--arch/mn10300/kernel/process.c62
-rw-r--r--arch/mn10300/kernel/profile.c2
-rw-r--r--arch/mn10300/kernel/ptrace.c20
-rw-r--r--arch/mn10300/kernel/rtc.c41
-rw-r--r--arch/mn10300/kernel/setup.c79
-rw-r--r--arch/mn10300/kernel/signal.c20
-rw-r--r--arch/mn10300/kernel/smp-low.S97
-rw-r--r--arch/mn10300/kernel/smp.c1152
-rw-r--r--arch/mn10300/kernel/switch_to.S7
-rw-r--r--arch/mn10300/kernel/time.c112
-rw-r--r--arch/mn10300/kernel/traps.c42
-rw-r--r--arch/mn10300/kernel/vmlinux.lds.S2
-rw-r--r--arch/mn10300/lib/bitops.c4
-rw-r--r--arch/mn10300/lib/delay.c8
-rw-r--r--arch/mn10300/lib/do_csum.S49
-rw-r--r--arch/mn10300/mm/Kconfig.cache101
-rw-r--r--arch/mn10300/mm/Makefile14
-rw-r--r--arch/mn10300/mm/cache-flush-by-reg.S308
-rw-r--r--arch/mn10300/mm/cache-flush-by-tag.S251
-rw-r--r--arch/mn10300/mm/cache-flush-icache.c155
-rw-r--r--arch/mn10300/mm/cache-flush-mn10300.S192
-rw-r--r--arch/mn10300/mm/cache-inv-by-reg.S356
-rw-r--r--arch/mn10300/mm/cache-inv-by-tag.S348
-rw-r--r--arch/mn10300/mm/cache-inv-icache.c129
-rw-r--r--arch/mn10300/mm/cache-mn10300.S289
-rw-r--r--arch/mn10300/mm/cache-smp-flush.c156
-rw-r--r--arch/mn10300/mm/cache-smp-inv.c153
-rw-r--r--arch/mn10300/mm/cache-smp.c105
-rw-r--r--arch/mn10300/mm/cache-smp.h69
-rw-r--r--arch/mn10300/mm/cache.c95
-rw-r--r--arch/mn10300/mm/fault.c17
-rw-r--r--arch/mn10300/mm/init.c26
-rw-r--r--arch/mn10300/mm/misalignment.c3
-rw-r--r--arch/mn10300/mm/mmu-context.c41
-rw-r--r--arch/mn10300/mm/pgtable.c2
-rw-r--r--arch/mn10300/mm/tlb-mn10300.S59
-rw-r--r--arch/mn10300/mm/tlb-smp.c214
-rw-r--r--arch/mn10300/proc-mn103e010/include/proc/cache.h9
-rw-r--r--arch/mn10300/proc-mn103e010/include/proc/clock.h2
-rw-r--r--arch/mn10300/proc-mn103e010/include/proc/dmactl-regs.h102
-rw-r--r--arch/mn10300/proc-mn103e010/include/proc/intctl-regs.h29
-rw-r--r--arch/mn10300/proc-mn103e010/include/proc/proc.h2
-rw-r--r--arch/mn10300/proc-mn103e010/proc-init.c37
-rw-r--r--arch/mn10300/proc-mn2ws0050/Makefile5
-rw-r--r--arch/mn10300/proc-mn2ws0050/include/proc/cache.h48
-rw-r--r--arch/mn10300/proc-mn2ws0050/include/proc/clock.h20
-rw-r--r--arch/mn10300/proc-mn2ws0050/include/proc/dmactl-regs.h103
-rw-r--r--arch/mn10300/proc-mn2ws0050/include/proc/intctl-regs.h29
-rw-r--r--arch/mn10300/proc-mn2ws0050/include/proc/irq.h49
-rw-r--r--arch/mn10300/proc-mn2ws0050/include/proc/nand-regs.h120
-rw-r--r--arch/mn10300/proc-mn2ws0050/include/proc/proc.h18
-rw-r--r--arch/mn10300/proc-mn2ws0050/include/proc/smp-regs.h51
-rw-r--r--arch/mn10300/proc-mn2ws0050/proc-init.c134
-rw-r--r--arch/mn10300/unit-asb2303/include/unit/clock.h25
-rw-r--r--arch/mn10300/unit-asb2303/include/unit/serial.h5
-rw-r--r--arch/mn10300/unit-asb2303/include/unit/timex.h109
-rw-r--r--arch/mn10300/unit-asb2303/unit-init.c10
-rw-r--r--arch/mn10300/unit-asb2305/include/unit/clock.h25
-rw-r--r--arch/mn10300/unit-asb2305/include/unit/serial.h5
-rw-r--r--arch/mn10300/unit-asb2305/include/unit/timex.h109
-rw-r--r--arch/mn10300/unit-asb2305/pci-asb2305.c16
-rw-r--r--arch/mn10300/unit-asb2305/pci.c2
-rw-r--r--arch/mn10300/unit-asb2305/unit-init.c6
-rw-r--r--arch/mn10300/unit-asb2364/Makefile12
-rw-r--r--arch/mn10300/unit-asb2364/include/unit/clock.h29
-rw-r--r--arch/mn10300/unit-asb2364/include/unit/fpga-regs.h52
-rw-r--r--arch/mn10300/unit-asb2364/include/unit/irq.h35
-rw-r--r--arch/mn10300/unit-asb2364/include/unit/leds.h54
-rw-r--r--arch/mn10300/unit-asb2364/include/unit/serial.h151
-rw-r--r--arch/mn10300/unit-asb2364/include/unit/smsc911x.h171
-rw-r--r--arch/mn10300/unit-asb2364/include/unit/timex.h159
-rw-r--r--arch/mn10300/unit-asb2364/irq-fpga.c96
-rw-r--r--arch/mn10300/unit-asb2364/leds.c98
-rw-r--r--arch/mn10300/unit-asb2364/smsc911x.c58
-rw-r--r--arch/mn10300/unit-asb2364/unit-init.c88
-rw-r--r--arch/parisc/Kconfig11
-rw-r--r--arch/parisc/hpux/sys_hpux.c1
-rw-r--r--arch/parisc/include/asm/cache.h2
-rw-r--r--arch/parisc/include/asm/cacheflush.h8
-rw-r--r--arch/parisc/include/asm/irq.h2
-rw-r--r--arch/parisc/include/asm/pgtable.h2
-rw-r--r--arch/parisc/include/asm/unistd.h3
-rw-r--r--arch/parisc/kernel/irq.c44
-rw-r--r--arch/parisc/kernel/pdc_cons.c141
-rw-r--r--arch/parisc/kernel/ptrace.c13
-rw-r--r--arch/parisc/kernel/signal.c9
-rw-r--r--arch/parisc/kernel/sys_parisc32.c1
-rw-r--r--arch/parisc/kernel/syscall_table.S1
-rw-r--r--arch/parisc/kernel/unaligned.c3
-rw-r--r--arch/parisc/kernel/unwind.c5
-rw-r--r--arch/parisc/math-emu/Makefile2
-rw-r--r--arch/powerpc/Kconfig17
-rw-r--r--arch/powerpc/boot/div64.S3
-rw-r--r--arch/powerpc/boot/dts/mpc8610_hpcd.dts1
-rw-r--r--arch/powerpc/configs/mpc85xx_defconfig3
-rw-r--r--arch/powerpc/configs/mpc85xx_smp_defconfig3
-rw-r--r--arch/powerpc/include/asm/cputime.h12
-rw-r--r--arch/powerpc/include/asm/fsl_guts.h (renamed from arch/powerpc/include/asm/immap_86xx.h)111
-rw-r--r--arch/powerpc/include/asm/fsl_lbc.h34
-rw-r--r--arch/powerpc/include/asm/fsldma.h137
-rw-r--r--arch/powerpc/include/asm/highmem.h9
-rw-r--r--arch/powerpc/include/asm/kgdb.h1
-rw-r--r--arch/powerpc/include/asm/pgtable-ppc32.h8
-rw-r--r--arch/powerpc/include/asm/pgtable-ppc64.h2
-rw-r--r--arch/powerpc/kernel/ibmebus.c11
-rw-r--r--arch/powerpc/kernel/kgdb.c188
-rw-r--r--arch/powerpc/kernel/kvm.c2
-rw-r--r--arch/powerpc/kernel/legacy_serial.c22
-rw-r--r--arch/powerpc/kernel/prom.c12
-rw-r--r--arch/powerpc/kernel/ptrace.c66
-rw-r--r--arch/powerpc/kernel/setup_64.c5
-rw-r--r--arch/powerpc/kernel/sys_ppc32.c1
-rw-r--r--arch/powerpc/kernel/vio.c4
-rw-r--r--arch/powerpc/kvm/booke_interrupts.S2
-rw-r--r--arch/powerpc/kvm/e500.c2
-rw-r--r--arch/powerpc/kvm/powerpc.c1
-rw-r--r--arch/powerpc/kvm/timing.c2
-rw-r--r--arch/powerpc/mm/hash_utils_64.c2
-rw-r--r--arch/powerpc/mm/highmem.c37
-rw-r--r--arch/powerpc/mm/pgtable.c2
-rw-r--r--arch/powerpc/mm/tlb_low_64e.S5
-rw-r--r--arch/powerpc/mm/tlb_nohash.c2
-rw-r--r--arch/powerpc/platforms/85xx/p1022_ds.c211
-rw-r--r--arch/powerpc/platforms/cell/spufs/inode.c10
-rw-r--r--arch/powerpc/platforms/pseries/Kconfig6
-rw-r--r--arch/powerpc/platforms/pseries/eeh.c2
-rw-r--r--arch/powerpc/platforms/pseries/pci_dlpar.c2
-rw-r--r--arch/powerpc/sysdev/fsl_lbc.c244
-rw-r--r--arch/powerpc/sysdev/fsl_rio.c76
-rw-r--r--arch/s390/Kbuild6
-rw-r--r--arch/s390/Kconfig72
-rw-r--r--arch/s390/Kconfig.debug12
-rw-r--r--arch/s390/Makefile5
-rw-r--r--arch/s390/crypto/crypt_s390.h2
-rw-r--r--arch/s390/hypfs/hypfs_diag.c19
-rw-r--r--arch/s390/hypfs/inode.c8
-rw-r--r--arch/s390/include/asm/ccwdev.h12
-rw-r--r--arch/s390/include/asm/cpu.h2
-rw-r--r--arch/s390/include/asm/cputime.h10
-rw-r--r--arch/s390/include/asm/dasd.h40
-rw-r--r--arch/s390/include/asm/hugetlb.h26
-rw-r--r--arch/s390/include/asm/lowcore.h11
-rw-r--r--arch/s390/include/asm/page.h13
-rw-r--r--arch/s390/include/asm/pgalloc.h4
-rw-r--r--arch/s390/include/asm/pgtable.h62
-rw-r--r--arch/s390/include/asm/processor.h2
-rw-r--r--arch/s390/include/asm/ptrace.h3
-rw-r--r--arch/s390/include/asm/s390_ext.h2
-rw-r--r--arch/s390/include/asm/scatterlist.h2
-rw-r--r--arch/s390/include/asm/setup.h3
-rw-r--r--arch/s390/include/asm/syscall.h4
-rw-r--r--arch/s390/include/asm/sysinfo.h40
-rw-r--r--arch/s390/include/asm/system.h51
-rw-r--r--arch/s390/include/asm/tlb.h13
-rw-r--r--arch/s390/include/asm/topology.h2
-rw-r--r--arch/s390/kernel/asm-offsets.c9
-rw-r--r--arch/s390/kernel/compat_linux.c1
-rw-r--r--arch/s390/kernel/compat_ptrace.h3
-rw-r--r--arch/s390/kernel/dis.c145
-rw-r--r--arch/s390/kernel/early.c47
-rw-r--r--arch/s390/kernel/entry.S45
-rw-r--r--arch/s390/kernel/entry.h4
-rw-r--r--arch/s390/kernel/entry64.S46
-rw-r--r--arch/s390/kernel/head.S8
-rw-r--r--arch/s390/kernel/kprobes.c67
-rw-r--r--arch/s390/kernel/nmi.c10
-rw-r--r--arch/s390/kernel/process.c10
-rw-r--r--arch/s390/kernel/processor.c2
-rw-r--r--arch/s390/kernel/ptrace.c3
-rw-r--r--arch/s390/kernel/s390_ext.c9
-rw-r--r--arch/s390/kernel/setup.c25
-rw-r--r--arch/s390/kernel/smp.c5
-rw-r--r--arch/s390/kernel/sysinfo.c43
-rw-r--r--arch/s390/kernel/time.c17
-rw-r--r--arch/s390/kernel/topology.c105
-rw-r--r--arch/s390/kernel/traps.c173
-rw-r--r--arch/s390/kernel/vdso.c6
-rw-r--r--arch/s390/kernel/vdso32/clock_getres.S6
-rw-r--r--arch/s390/kernel/vdso32/clock_gettime.S4
-rw-r--r--arch/s390/kernel/vdso64/clock_getres.S6
-rw-r--r--arch/s390/kernel/vdso64/clock_gettime.S4
-rw-r--r--arch/s390/kernel/vtime.c22
-rw-r--r--arch/s390/kvm/kvm-s390.c4
-rw-r--r--arch/s390/kvm/priv.c4
-rw-r--r--arch/s390/lib/delay.c14
-rw-r--r--arch/s390/mm/Makefile2
-rw-r--r--arch/s390/mm/cmm.c7
-rw-r--r--arch/s390/mm/fault.c77
-rw-r--r--arch/s390/mm/gup.c224
-rw-r--r--arch/s390/mm/hugetlbpage.c2
-rw-r--r--arch/s390/mm/init.c52
-rw-r--r--arch/s390/mm/pgtable.c173
-rw-r--r--arch/score/Kconfig5
-rw-r--r--arch/score/include/asm/pgtable.h3
-rw-r--r--arch/score/kernel/ptrace.c7
-rw-r--r--arch/sh/Kconfig28
-rw-r--r--arch/sh/Makefile3
-rw-r--r--arch/sh/boards/Kconfig25
-rw-r--r--arch/sh/boards/Makefile4
-rw-r--r--arch/sh/boards/board-edosk7705.c78
-rw-r--r--arch/sh/boards/board-secureedge5410.c (renamed from arch/sh/boards/mach-snapgear/setup.c)38
-rw-r--r--arch/sh/boards/board-sh2007.c133
-rw-r--r--arch/sh/boards/board-sh7757lcr.c374
-rw-r--r--arch/sh/boards/mach-ap325rxa/setup.c30
-rw-r--r--arch/sh/boards/mach-cayman/irq.c16
-rw-r--r--arch/sh/boards/mach-dreamcast/irq.c17
-rw-r--r--arch/sh/boards/mach-ecovec24/setup.c104
-rw-r--r--arch/sh/boards/mach-edosk7705/Makefile5
-rw-r--r--arch/sh/boards/mach-edosk7705/io.c71
-rw-r--r--arch/sh/boards/mach-edosk7705/setup.c36
-rw-r--r--arch/sh/boards/mach-kfr2r09/setup.c30
-rw-r--r--arch/sh/boards/mach-landisk/irq.c15
-rw-r--r--arch/sh/boards/mach-microdev/io.c246
-rw-r--r--arch/sh/boards/mach-microdev/irq.c30
-rw-r--r--arch/sh/boards/mach-microdev/setup.c23
-rw-r--r--arch/sh/boards/mach-migor/setup.c60
-rw-r--r--arch/sh/boards/mach-sdk7786/Makefile5
-rw-r--r--arch/sh/boards/mach-sdk7786/gpio.c49
-rw-r--r--arch/sh/boards/mach-sdk7786/setup.c54
-rw-r--r--arch/sh/boards/mach-sdk7786/sram.c72
-rw-r--r--arch/sh/boards/mach-se/7206/Makefile2
-rw-r--r--arch/sh/boards/mach-se/7206/io.c104
-rw-r--r--arch/sh/boards/mach-se/7206/irq.c24
-rw-r--r--arch/sh/boards/mach-se/7206/setup.c15
-rw-r--r--arch/sh/boards/mach-se/7343/irq.c15
-rw-r--r--arch/sh/boards/mach-se/770x/Makefile2
-rw-r--r--arch/sh/boards/mach-se/770x/io.c156
-rw-r--r--arch/sh/boards/mach-se/770x/setup.c22
-rw-r--r--arch/sh/boards/mach-se/7722/irq.c15
-rw-r--r--arch/sh/boards/mach-se/7724/irq.c13
-rw-r--r--arch/sh/boards/mach-se/7724/setup.c102
-rw-r--r--arch/sh/boards/mach-se/7751/Makefile2
-rw-r--r--arch/sh/boards/mach-se/7751/io.c119
-rw-r--r--arch/sh/boards/mach-se/7751/setup.c18
-rw-r--r--arch/sh/boards/mach-snapgear/Makefile5
-rw-r--r--arch/sh/boards/mach-snapgear/io.c121
-rw-r--r--arch/sh/boards/mach-systemh/Makefile13
-rw-r--r--arch/sh/boards/mach-systemh/io.c158
-rw-r--r--arch/sh/boards/mach-systemh/irq.c76
-rw-r--r--arch/sh/boards/mach-systemh/setup.c57
-rw-r--r--arch/sh/boards/mach-x3proto/Makefile2
-rw-r--r--arch/sh/boards/mach-x3proto/gpio.c136
-rw-r--r--arch/sh/boards/mach-x3proto/ilsel.c18
-rw-r--r--arch/sh/boards/mach-x3proto/setup.c132
-rw-r--r--arch/sh/boot/compressed/head_32.S4
-rw-r--r--arch/sh/cchips/hd6446x/Makefile2
-rw-r--r--arch/sh/cchips/hd6446x/hd64461.c19
-rw-r--r--arch/sh/configs/ap325rxa_defconfig1
-rw-r--r--arch/sh/configs/cayman_defconfig1
-rw-r--r--arch/sh/configs/dreamcast_defconfig1
-rw-r--r--arch/sh/configs/ecovec24-romimage_defconfig1
-rw-r--r--arch/sh/configs/edosk7760_defconfig1
-rw-r--r--arch/sh/configs/espt_defconfig1
-rw-r--r--arch/sh/configs/hp6xx_defconfig1
-rw-r--r--arch/sh/configs/kfr2r09-romimage_defconfig1
-rw-r--r--arch/sh/configs/kfr2r09_defconfig1
-rw-r--r--arch/sh/configs/landisk_defconfig1
-rw-r--r--arch/sh/configs/lboxre2_defconfig1
-rw-r--r--arch/sh/configs/magicpanelr2_defconfig1
-rw-r--r--arch/sh/configs/microdev_defconfig1
-rw-r--r--arch/sh/configs/migor_defconfig1
-rw-r--r--arch/sh/configs/polaris_defconfig1
-rw-r--r--arch/sh/configs/r7780mp_defconfig1
-rw-r--r--arch/sh/configs/r7785rp_defconfig1
-rw-r--r--arch/sh/configs/rts7751r2d1_defconfig1
-rw-r--r--arch/sh/configs/rts7751r2dplus_defconfig1
-rw-r--r--arch/sh/configs/sdk7780_defconfig1
-rw-r--r--arch/sh/configs/se7343_defconfig1
-rw-r--r--arch/sh/configs/se7712_defconfig1
-rw-r--r--arch/sh/configs/se7721_defconfig1
-rw-r--r--arch/sh/configs/se7722_defconfig1
-rw-r--r--arch/sh/configs/se7724_defconfig1
-rw-r--r--arch/sh/configs/se7750_defconfig1
-rw-r--r--arch/sh/configs/se7751_defconfig1
-rw-r--r--arch/sh/configs/se7780_defconfig1
-rw-r--r--arch/sh/configs/secureedge5410_defconfig (renamed from arch/sh/configs/snapgear_defconfig)1
-rw-r--r--arch/sh/configs/sh03_defconfig1
-rw-r--r--arch/sh/configs/sh2007_defconfig212
-rw-r--r--arch/sh/configs/sh7710voipgw_defconfig1
-rw-r--r--arch/sh/configs/sh7757lcr_defconfig85
-rw-r--r--arch/sh/configs/sh7763rdp_defconfig1
-rw-r--r--arch/sh/configs/sh7785lcr_defconfig1
-rw-r--r--arch/sh/configs/shx3_defconfig1
-rw-r--r--arch/sh/configs/systemh_defconfig29
-rw-r--r--arch/sh/configs/titan_defconfig1
-rw-r--r--arch/sh/configs/ul2_defconfig1
-rw-r--r--arch/sh/drivers/dma/dma-api.c4
-rw-r--r--arch/sh/drivers/pci/Makefile1
-rw-r--r--arch/sh/drivers/pci/fixups-sdk7786.c67
-rw-r--r--arch/sh/drivers/pci/ops-sh4.c11
-rw-r--r--arch/sh/drivers/pci/ops-sh7786.c69
-rw-r--r--arch/sh/drivers/pci/pci-sh7751.c2
-rw-r--r--arch/sh/drivers/pci/pci-sh7780.c2
-rw-r--r--arch/sh/drivers/pci/pci-sh7780.h6
-rw-r--r--arch/sh/drivers/pci/pci.c43
-rw-r--r--arch/sh/drivers/pci/pcie-sh7786.c251
-rw-r--r--arch/sh/drivers/pci/pcie-sh7786.h56
-rw-r--r--arch/sh/include/asm/Kbuild2
-rw-r--r--arch/sh/include/asm/addrspace.h8
-rw-r--r--arch/sh/include/asm/cacheflush.h2
-rw-r--r--arch/sh/include/asm/elf.h27
-rw-r--r--arch/sh/include/asm/fixmap.h4
-rw-r--r--arch/sh/include/asm/gpio.h6
-rw-r--r--arch/sh/include/asm/irq.h2
-rw-r--r--arch/sh/include/asm/kprobes.h1
-rw-r--r--arch/sh/include/asm/pci.h2
-rw-r--r--arch/sh/include/asm/pgtable.h14
-rw-r--r--arch/sh/include/asm/pgtable_32.h5
-rw-r--r--arch/sh/include/asm/pgtable_64.h11
-rw-r--r--arch/sh/include/asm/processor.h1
-rw-r--r--arch/sh/include/asm/processor_32.h10
-rw-r--r--arch/sh/include/asm/processor_64.h3
-rw-r--r--arch/sh/include/asm/ptrace.h169
-rw-r--r--arch/sh/include/asm/ptrace_32.h83
-rw-r--r--arch/sh/include/asm/ptrace_64.h20
-rw-r--r--arch/sh/include/asm/sizes.h1
-rw-r--r--arch/sh/include/asm/sram.h38
-rw-r--r--arch/sh/include/asm/system.h6
-rw-r--r--arch/sh/include/asm/system_32.h51
-rw-r--r--arch/sh/include/asm/system_64.h3
-rw-r--r--arch/sh/include/asm/tlbflush.h2
-rw-r--r--arch/sh/include/asm/uncached.h40
-rw-r--r--arch/sh/include/asm/unistd_32.h27
-rw-r--r--arch/sh/include/asm/unistd_64.h5
-rw-r--r--arch/sh/include/cpu-sh3/cpu/mmu_context.h1
-rw-r--r--arch/sh/include/cpu-sh4/cpu/freq.h4
-rw-r--r--arch/sh/include/cpu-sh4/cpu/sh7724.h3
-rw-r--r--arch/sh/include/cpu-sh4/cpu/sh7757.h301
-rw-r--r--arch/sh/include/cpu-sh4/cpu/shx3.h64
-rw-r--r--arch/sh/include/mach-common/mach/edosk7705.h7
-rw-r--r--arch/sh/include/mach-common/mach/microdev.h9
-rw-r--r--arch/sh/include/mach-common/mach/secureedge5410.h (renamed from arch/sh/include/mach-common/mach/snapgear.h)22
-rw-r--r--arch/sh/include/mach-common/mach/sh2007.h117
-rw-r--r--arch/sh/include/mach-common/mach/systemh7751.h71
-rw-r--r--arch/sh/include/mach-sdk7786/mach/fpga.h26
-rw-r--r--arch/sh/include/mach-x3proto/mach/hardware.h12
-rw-r--r--arch/sh/include/mach-x3proto/mach/ilsel.h (renamed from arch/sh/include/asm/ilsel.h)0
-rw-r--r--arch/sh/kernel/Makefile8
-rw-r--r--arch/sh/kernel/clkdev.c4
-rw-r--r--arch/sh/kernel/cpu/init.c2
-rw-r--r--arch/sh/kernel/cpu/irq/imask.c14
-rw-r--r--arch/sh/kernel/cpu/irq/intc-sh5.c49
-rw-r--r--arch/sh/kernel/cpu/irq/ipr.c29
-rw-r--r--arch/sh/kernel/cpu/sh4/clock-sh4-202.c2
-rw-r--r--arch/sh/kernel/cpu/sh4/perf_event.c2
-rw-r--r--arch/sh/kernel/cpu/sh4/probe.c2
-rw-r--r--arch/sh/kernel/cpu/sh4a/Makefile5
-rw-r--r--arch/sh/kernel/cpu/sh4a/clock-sh7724.c50
-rw-r--r--arch/sh/kernel/cpu/sh4a/clock-sh7757.c199
-rw-r--r--arch/sh/kernel/cpu/sh4a/clock-shx3.c225
-rw-r--r--arch/sh/kernel/cpu/sh4a/intc-shx3.c34
-rw-r--r--arch/sh/kernel/cpu/sh4a/perf_event.c22
-rw-r--r--arch/sh/kernel/cpu/sh4a/pinmux-sh7757.c1582
-rw-r--r--arch/sh/kernel/cpu/sh4a/pinmux-shx3.c587
-rw-r--r--arch/sh/kernel/cpu/sh4a/setup-sh7722.c2
-rw-r--r--arch/sh/kernel/cpu/sh4a/setup-sh7724.c66
-rw-r--r--arch/sh/kernel/cpu/sh4a/setup-sh7757.c222
-rw-r--r--arch/sh/kernel/cpu/sh4a/setup-sh7786.c126
-rw-r--r--arch/sh/kernel/cpu/sh4a/setup-shx3.c42
-rw-r--r--arch/sh/kernel/head_32.S2
-rw-r--r--arch/sh/kernel/io_trapped.c2
-rw-r--r--arch/sh/kernel/irq.c39
-rw-r--r--arch/sh/kernel/irq_64.c16
-rw-r--r--arch/sh/kernel/kdebugfs.c16
-rw-r--r--arch/sh/kernel/kprobes.c100
-rw-r--r--arch/sh/kernel/ptrace.c33
-rw-r--r--arch/sh/kernel/ptrace_32.c72
-rw-r--r--arch/sh/kernel/ptrace_64.c113
-rw-r--r--arch/sh/kernel/reboot.c4
-rw-r--r--arch/sh/kernel/setup.c26
-rw-r--r--arch/sh/kernel/sys_sh.c2
-rw-r--r--arch/sh/kernel/syscalls_32.S22
-rw-r--r--arch/sh/kernel/syscalls_64.S3
-rw-r--r--arch/sh/kernel/traps_32.c29
-rw-r--r--arch/sh/kernel/traps_64.c11
-rw-r--r--arch/sh/kernel/vsyscall/vsyscall-trapa.S2
-rw-r--r--arch/sh/lib/Makefile2
-rw-r--r--arch/sh/math-emu/math.c3
-rw-r--r--arch/sh/mm/Kconfig6
-rw-r--r--arch/sh/mm/Makefile5
-rw-r--r--arch/sh/mm/asids-debugfs.c2
-rw-r--r--arch/sh/mm/cache-debugfs.c10
-rw-r--r--arch/sh/mm/cache-sh4.c4
-rw-r--r--arch/sh/mm/cache-sh7705.c2
-rw-r--r--arch/sh/mm/cache.c14
-rw-r--r--arch/sh/mm/consistent.c18
-rw-r--r--arch/sh/mm/gup.c273
-rw-r--r--arch/sh/mm/init.c52
-rw-r--r--arch/sh/mm/kmap.c2
-rw-r--r--arch/sh/mm/nommu.c4
-rw-r--r--arch/sh/mm/pmb.c35
-rw-r--r--arch/sh/mm/sram.c34
-rw-r--r--arch/sh/mm/tlb-debugfs.c11
-rw-r--r--arch/sh/mm/tlbflush_32.c16
-rw-r--r--arch/sh/mm/tlbflush_64.c5
-rw-r--r--arch/sh/mm/uncached.c2
-rw-r--r--arch/sh/oprofile/Makefile2
-rw-r--r--arch/sh/oprofile/backtrace.c2
-rw-r--r--arch/sh/oprofile/common.c35
-rw-r--r--arch/sh/tools/mach-types3
-rw-r--r--arch/sparc/Kconfig10
-rw-r--r--arch/sparc/include/asm/Kbuild1
-rw-r--r--arch/sparc/include/asm/floppy_32.h3
-rw-r--r--arch/sparc/include/asm/highmem.h4
-rw-r--r--arch/sparc/include/asm/io_32.h31
-rw-r--r--arch/sparc/include/asm/io_64.h31
-rw-r--r--arch/sparc/include/asm/jump_label.h2
-rw-r--r--arch/sparc/include/asm/openprom.h16
-rw-r--r--arch/sparc/include/asm/oplib_32.h44
-rw-r--r--arch/sparc/include/asm/oplib_64.h39
-rw-r--r--arch/sparc/include/asm/pci_64.h2
-rw-r--r--arch/sparc/include/asm/pgtable_32.h3
-rw-r--r--arch/sparc/include/asm/pgtable_64.h2
-rw-r--r--arch/sparc/include/asm/prom.h5
-rw-r--r--arch/sparc/kernel/auxio_32.c4
-rw-r--r--arch/sparc/kernel/btext.c4
-rw-r--r--arch/sparc/kernel/devices.c23
-rw-r--r--arch/sparc/kernel/irq_32.c4
-rw-r--r--arch/sparc/kernel/leon_kernel.c2
-rw-r--r--arch/sparc/kernel/leon_smp.c5
-rw-r--r--arch/sparc/kernel/pcic.c4
-rw-r--r--arch/sparc/kernel/prom.h6
-rw-r--r--arch/sparc/kernel/prom_common.c202
-rw-r--r--arch/sparc/kernel/ptrace_32.c57
-rw-r--r--arch/sparc/kernel/ptrace_64.c15
-rw-r--r--arch/sparc/kernel/rtrap_32.S6
-rw-r--r--arch/sparc/kernel/rtrap_64.S36
-rw-r--r--arch/sparc/kernel/setup_64.c2
-rw-r--r--arch/sparc/kernel/starfire.c2
-rw-r--r--arch/sparc/kernel/sys_sparc32.c1
-rw-r--r--arch/sparc/kernel/sys_sparc_32.c1
-rw-r--r--arch/sparc/kernel/tadpole.c2
-rw-r--r--arch/sparc/kernel/unaligned_32.c1
-rw-r--r--arch/sparc/kernel/windows.c1
-rw-r--r--arch/sparc/mm/fault_32.c12
-rw-r--r--arch/sparc/mm/highmem.c48
-rw-r--r--arch/sparc/mm/init_64.c2
-rw-r--r--arch/sparc/mm/srmmu.c8
-rw-r--r--arch/sparc/mm/sun4c.c2
-rw-r--r--arch/sparc/prom/init_32.c2
-rw-r--r--arch/sparc/prom/init_64.c4
-rw-r--r--arch/sparc/prom/memory.c3
-rw-r--r--arch/sparc/prom/misc_64.c6
-rw-r--r--arch/sparc/prom/ranges.c6
-rw-r--r--arch/sparc/prom/tree_32.c58
-rw-r--r--arch/sparc/prom/tree_64.c62
-rw-r--r--arch/tile/Kconfig37
-rw-r--r--arch/tile/Makefile19
-rw-r--r--arch/tile/include/arch/sim.h619
-rw-r--r--arch/tile/include/arch/sim_def.h548
-rw-r--r--arch/tile/include/arch/spr_def.h85
-rw-r--r--arch/tile/include/arch/spr_def_32.h39
-rw-r--r--arch/tile/include/asm/backtrace.h5
-rw-r--r--arch/tile/include/asm/cacheflush.h52
-rw-r--r--arch/tile/include/asm/compat.h15
-rw-r--r--arch/tile/include/asm/highmem.h11
-rw-r--r--arch/tile/include/asm/io.h15
-rw-r--r--arch/tile/include/asm/irqflags.h64
-rw-r--r--arch/tile/include/asm/kmap_types.h34
-rw-r--r--arch/tile/include/asm/mman.h1
-rw-r--r--arch/tile/include/asm/page.h27
-rw-r--r--arch/tile/include/asm/pci-bridge.h117
-rw-r--r--arch/tile/include/asm/pci.h107
-rw-r--r--arch/tile/include/asm/pgtable.h11
-rw-r--r--arch/tile/include/asm/processor.h21
-rw-r--r--arch/tile/include/asm/ptrace.h4
-rw-r--r--arch/tile/include/asm/stat.h3
-rw-r--r--arch/tile/include/asm/syscalls.h73
-rw-r--r--arch/tile/include/asm/system.h14
-rw-r--r--arch/tile/include/asm/traps.h4
-rw-r--r--arch/tile/include/asm/unistd.h1
-rw-r--r--arch/tile/include/hv/drv_xgbe_impl.h300
-rw-r--r--arch/tile/include/hv/drv_xgbe_intf.h615
-rw-r--r--arch/tile/include/hv/hypervisor.h30
-rw-r--r--arch/tile/include/hv/netio_errors.h122
-rw-r--r--arch/tile/include/hv/netio_intf.h2975
-rw-r--r--arch/tile/kernel/Makefile1
-rw-r--r--arch/tile/kernel/backtrace.c4
-rw-r--r--arch/tile/kernel/compat.c21
-rw-r--r--arch/tile/kernel/compat_signal.c11
-rw-r--r--arch/tile/kernel/early_printk.c2
-rw-r--r--arch/tile/kernel/entry.S34
-rw-r--r--arch/tile/kernel/hardwall.c6
-rw-r--r--arch/tile/kernel/head_32.S5
-rw-r--r--arch/tile/kernel/intvec_32.S101
-rw-r--r--arch/tile/kernel/irq.c18
-rw-r--r--arch/tile/kernel/machine_kexec.c6
-rw-r--r--arch/tile/kernel/messaging.c2
-rw-r--r--arch/tile/kernel/pci.c621
-rw-r--r--arch/tile/kernel/process.c50
-rw-r--r--arch/tile/kernel/ptrace.c108
-rw-r--r--arch/tile/kernel/reboot.c6
-rw-r--r--arch/tile/kernel/regs_32.S2
-rw-r--r--arch/tile/kernel/setup.c44
-rw-r--r--arch/tile/kernel/signal.c16
-rw-r--r--arch/tile/kernel/single_step.c73
-rw-r--r--arch/tile/kernel/smp.c4
-rw-r--r--arch/tile/kernel/smpboot.c1
-rw-r--r--arch/tile/kernel/stack.c35
-rw-r--r--arch/tile/kernel/sys.c10
-rw-r--r--arch/tile/kernel/time.c8
-rw-r--r--arch/tile/kernel/traps.c4
-rw-r--r--arch/tile/kvm/Kconfig38
-rw-r--r--arch/tile/lib/Makefile4
-rw-r--r--arch/tile/lib/atomic_32.c8
-rw-r--r--arch/tile/lib/exports.c3
-rw-r--r--arch/tile/lib/memchr_32.c35
-rw-r--r--arch/tile/lib/memcpy_32.S206
-rw-r--r--arch/tile/lib/memcpy_tile64.c11
-rw-r--r--arch/tile/lib/memmove.c (renamed from arch/tile/lib/memmove_32.c)0
-rw-r--r--arch/tile/lib/memset_32.c1
-rw-r--r--arch/tile/lib/spinlock_32.c29
-rw-r--r--arch/tile/lib/strlen_32.c2
-rw-r--r--arch/tile/mm/fault.c13
-rw-r--r--arch/tile/mm/highmem.c88
-rw-r--r--arch/tile/mm/homecache.c11
-rw-r--r--arch/tile/mm/hugetlbpage.c1
-rw-r--r--arch/tile/mm/init.c10
-rw-r--r--arch/tile/mm/pgtable.c4
-rw-r--r--arch/um/Kconfig.common2
-rw-r--r--arch/um/Kconfig.um6
-rw-r--r--arch/um/defconfig1
-rw-r--r--arch/um/drivers/line.c5
-rw-r--r--arch/um/include/asm/dma-mapping.h112
-rw-r--r--arch/um/include/asm/pgtable.h2
-rw-r--r--arch/um/include/asm/ptrace-generic.h4
-rw-r--r--arch/um/include/asm/system.h49
-rw-r--r--arch/um/kernel/dyn.lds.S14
-rw-r--r--arch/um/kernel/exec.c1
-rw-r--r--arch/um/kernel/irq.c15
-rw-r--r--arch/um/kernel/ptrace.c23
-rw-r--r--arch/um/kernel/uml.lds.S19
-rw-r--r--arch/um/os-Linux/time.c2
-rw-r--r--arch/um/sys-i386/ptrace.c4
-rw-r--r--arch/um/sys-x86_64/ptrace.c11
-rw-r--r--arch/x86/Kbuild1
-rw-r--r--arch/x86/Kconfig11
-rw-r--r--arch/x86/Makefile_32.cpu13
-rw-r--r--arch/x86/ia32/sys_ia32.c1
-rw-r--r--arch/x86/include/asm/acpi.h3
-rw-r--r--arch/x86/include/asm/apic.h10
-rw-r--r--arch/x86/include/asm/fixmap.h4
-rw-r--r--arch/x86/include/asm/highmem.h11
-rw-r--r--arch/x86/include/asm/io.h13
-rw-r--r--arch/x86/include/asm/io_apic.h1
-rw-r--r--arch/x86/include/asm/iomap.h4
-rw-r--r--arch/x86/include/asm/irq.h2
-rw-r--r--arch/x86/include/asm/msr-index.h3
-rw-r--r--arch/x86/include/asm/olpc.h2
-rw-r--r--arch/x86/include/asm/paravirt.h10
-rw-r--r--arch/x86/include/asm/pci.h33
-rw-r--r--arch/x86/include/asm/pci_x86.h1
-rw-r--r--arch/x86/include/asm/perf_event.h19
-rw-r--r--arch/x86/include/asm/pgtable_32.h14
-rw-r--r--arch/x86/include/asm/pgtable_64.h2
-rw-r--r--arch/x86/include/asm/pvclock.h1
-rw-r--r--arch/x86/include/asm/smp.h9
-rw-r--r--arch/x86/include/asm/uv/uv_hub.h25
-rw-r--r--arch/x86/include/asm/uv/uv_mmrs.h208
-rw-r--r--arch/x86/include/asm/x86_init.h9
-rw-r--r--arch/x86/include/asm/xen/hypercall.h17
-rw-r--r--arch/x86/include/asm/xen/interface.h6
-rw-r--r--arch/x86/include/asm/xen/interface_32.h5
-rw-r--r--arch/x86/include/asm/xen/interface_64.h13
-rw-r--r--arch/x86/include/asm/xen/page.h19
-rw-r--r--arch/x86/include/asm/xen/pci.h65
-rw-r--r--arch/x86/kernel/Makefile12
-rw-r--r--arch/x86/kernel/acpi/boot.c60
-rw-r--r--arch/x86/kernel/acpi/sleep.c1
-rw-r--r--arch/x86/kernel/alternative.c71
-rw-r--r--arch/x86/kernel/apic/apic.c1
-rw-r--r--arch/x86/kernel/apic/hw_nmi.c7
-rw-r--r--arch/x86/kernel/apic/io_apic.c11
-rw-r--r--arch/x86/kernel/apic/x2apic_uv_x.c61
-rw-r--r--arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c1
-rw-r--r--arch/x86/kernel/cpu/cpufreq/cpufreq-nforce2.c2
-rw-r--r--arch/x86/kernel/cpu/cpufreq/longrun.c4
-rw-r--r--arch/x86/kernel/cpu/intel_cacheinfo.c1
-rw-r--r--arch/x86/kernel/cpu/perf_event.c46
-rw-r--r--arch/x86/kernel/cpu/perf_event_amd.c4
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel_ds.c216
-rw-r--r--arch/x86/kernel/cpuid.c1
-rw-r--r--arch/x86/kernel/crash_dump_32.c2
-rw-r--r--arch/x86/kernel/dumpstack_32.c6
-rw-r--r--arch/x86/kernel/dumpstack_64.c8
-rw-r--r--arch/x86/kernel/entry_32.S2
-rw-r--r--arch/x86/kernel/entry_64.S2
-rw-r--r--arch/x86/kernel/hpet.c2
-rw-r--r--arch/x86/kernel/hw_breakpoint.c4
-rw-r--r--arch/x86/kernel/irq_32.c17
-rw-r--r--arch/x86/kernel/kgdb.c15
-rw-r--r--arch/x86/kernel/microcode_amd.c2
-rw-r--r--arch/x86/kernel/mmconf-fam10h_64.c71
-rw-r--r--arch/x86/kernel/msr.c1
-rw-r--r--arch/x86/kernel/ptrace.c17
-rw-r--r--arch/x86/kernel/pvclock.c43
-rw-r--r--arch/x86/kernel/reboot.c2
-rw-r--r--arch/x86/kernel/setup.c2
-rw-r--r--arch/x86/kernel/smp.c15
-rw-r--r--arch/x86/kernel/smpboot.c3
-rw-r--r--arch/x86/kernel/x86_init.c7
-rw-r--r--arch/x86/kvm/mmu.c9
-rw-r--r--arch/x86/kvm/svm.c2
-rw-r--r--arch/x86/kvm/vmx.c19
-rw-r--r--arch/x86/kvm/x86.c16
-rw-r--r--arch/x86/mm/fault.c63
-rw-r--r--arch/x86/mm/highmem_32.c76
-rw-r--r--arch/x86/mm/init_64.c1
-rw-r--r--arch/x86/mm/iomap_32.c43
-rw-r--r--arch/x86/mm/numa_64.c7
-rw-r--r--arch/x86/mm/tlb.c7
-rw-r--r--arch/x86/oprofile/nmi_int.c6
-rw-r--r--arch/x86/oprofile/op_model_amd.c146
-rw-r--r--arch/x86/pci/Makefile1
-rw-r--r--arch/x86/pci/acpi.c103
-rw-r--r--arch/x86/pci/common.c17
-rw-r--r--arch/x86/pci/i386.c19
-rw-r--r--arch/x86/pci/irq.c11
-rw-r--r--arch/x86/pci/mmconfig-shared.c4
-rw-r--r--arch/x86/pci/xen.c429
-rw-r--r--arch/x86/platform/Makefile8
-rw-r--r--arch/x86/platform/efi/Makefile1
-rw-r--r--arch/x86/platform/efi/efi.c (renamed from arch/x86/kernel/efi.c)0
-rw-r--r--arch/x86/platform/efi/efi_32.c (renamed from arch/x86/kernel/efi_32.c)0
-rw-r--r--arch/x86/platform/efi/efi_64.c (renamed from arch/x86/kernel/efi_64.c)0
-rw-r--r--arch/x86/platform/efi/efi_stub_32.S (renamed from arch/x86/kernel/efi_stub_32.S)0
-rw-r--r--arch/x86/platform/efi/efi_stub_64.S (renamed from arch/x86/kernel/efi_stub_64.S)0
-rw-r--r--arch/x86/platform/mrst/Makefile1
-rw-r--r--arch/x86/platform/mrst/mrst.c (renamed from arch/x86/kernel/mrst.c)0
-rw-r--r--arch/x86/platform/olpc/Makefile3
-rw-r--r--arch/x86/platform/olpc/olpc-xo1.c (renamed from arch/x86/kernel/olpc-xo1.c)0
-rw-r--r--arch/x86/platform/olpc/olpc.c (renamed from arch/x86/kernel/olpc.c)0
-rw-r--r--arch/x86/platform/olpc/olpc_ofw.c (renamed from arch/x86/kernel/olpc_ofw.c)0
-rw-r--r--arch/x86/platform/scx200/Makefile2
-rw-r--r--arch/x86/platform/scx200/scx200_32.c (renamed from arch/x86/kernel/scx200_32.c)0
-rw-r--r--arch/x86/platform/sfi/Makefile1
-rw-r--r--arch/x86/platform/sfi/sfi.c (renamed from arch/x86/kernel/sfi.c)0
-rw-r--r--arch/x86/platform/uv/Makefile1
-rw-r--r--arch/x86/platform/uv/bios_uv.c (renamed from arch/x86/kernel/bios_uv.c)0
-rw-r--r--arch/x86/platform/uv/tlb_uv.c (renamed from arch/x86/kernel/tlb_uv.c)15
-rw-r--r--arch/x86/platform/uv/uv_irq.c (renamed from arch/x86/kernel/uv_irq.c)0
-rw-r--r--arch/x86/platform/uv/uv_sysfs.c (renamed from arch/x86/kernel/uv_sysfs.c)0
-rw-r--r--arch/x86/platform/uv/uv_time.c (renamed from arch/x86/kernel/uv_time.c)4
-rw-r--r--arch/x86/platform/visws/Makefile1
-rw-r--r--arch/x86/platform/visws/visws_quirks.c (renamed from arch/x86/kernel/visws_quirks.c)0
-rw-r--r--arch/x86/xen/Kconfig21
-rw-r--r--arch/x86/xen/enlighten.c45
-rw-r--r--arch/x86/xen/mmu.c628
-rw-r--r--arch/x86/xen/mmu.h1
-rw-r--r--arch/x86/xen/pci-swiotlb-xen.c4
-rw-r--r--arch/x86/xen/platform-pci-unplug.c2
-rw-r--r--arch/x86/xen/setup.c156
-rw-r--r--arch/x86/xen/smp.c32
-rw-r--r--arch/x86/xen/suspend.c1
-rw-r--r--arch/x86/xen/time.c2
-rw-r--r--arch/x86/xen/xen-ops.h5
-rw-r--r--arch/xtensa/Kconfig5
-rw-r--r--arch/xtensa/include/asm/pgtable.h3
-rw-r--r--arch/xtensa/kernel/ptrace.c14
-rw-r--r--block/blk-core.c35
-rw-r--r--block/blk-ioc.c14
-rw-r--r--block/blk-map.c2
-rw-r--r--block/blk-merge.c2
-rw-r--r--block/blk-throttle.c2
-rw-r--r--block/blk.h4
-rw-r--r--block/compat_ioctl.c5
-rw-r--r--block/elevator.c4
-rw-r--r--block/genhd.c14
-rw-r--r--block/ioctl.c8
-rw-r--r--block/scsi_ioctl.c34
-rw-r--r--crypto/async_tx/Kconfig13
-rw-r--r--crypto/async_tx/async_memcpy.c2
-rw-r--r--crypto/blkcipher.c2
-rw-r--r--crypto/pcrypt.c1
-rw-r--r--drivers/Makefile4
-rw-r--r--drivers/acpi/Kconfig13
-rw-r--r--drivers/acpi/ac.c14
-rw-r--r--drivers/acpi/acpica/Makefile5
-rw-r--r--drivers/acpi/acpica/acdebug.h2
-rw-r--r--drivers/acpi/acpica/acevents.h5
-rw-r--r--drivers/acpi/acpica/acglobal.h9
-rw-r--r--drivers/acpi/acpica/achware.h7
-rw-r--r--drivers/acpi/acpica/aclocal.h15
-rw-r--r--drivers/acpi/acpica/acmacros.h4
-rw-r--r--drivers/acpi/acpica/acnamesp.h12
-rw-r--r--drivers/acpi/acpica/acobject.h2
-rw-r--r--drivers/acpi/acpica/acutils.h56
-rw-r--r--drivers/acpi/acpica/dsmethod.c2
-rw-r--r--drivers/acpi/acpica/dswexec.c19
-rw-r--r--drivers/acpi/acpica/evevent.c41
-rw-r--r--drivers/acpi/acpica/evgpeblk.c47
-rw-r--r--drivers/acpi/acpica/evgpeinit.c31
-rw-r--r--drivers/acpi/acpica/evmisc.c2
-rw-r--r--drivers/acpi/acpica/evrgnini.c14
-rw-r--r--drivers/acpi/acpica/evxface.c19
-rw-r--r--drivers/acpi/acpica/evxfevnt.c61
-rw-r--r--drivers/acpi/acpica/evxfregn.c6
-rw-r--r--drivers/acpi/acpica/exfldio.c75
-rw-r--r--drivers/acpi/acpica/exmutex.c10
-rw-r--r--drivers/acpi/acpica/exprep.c45
-rw-r--r--drivers/acpi/acpica/exregion.c4
-rw-r--r--drivers/acpi/acpica/hwpci.c412
-rw-r--r--drivers/acpi/acpica/nsrepair2.c163
-rw-r--r--drivers/acpi/acpica/nsutils.c98
-rw-r--r--drivers/acpi/acpica/tbfadt.c4
-rw-r--r--drivers/acpi/acpica/utdebug.c7
-rw-r--r--drivers/acpi/acpica/uteval.c147
-rw-r--r--drivers/acpi/acpica/utglobal.c9
-rw-r--r--drivers/acpi/acpica/utids.c45
-rw-r--r--drivers/acpi/acpica/utinit.c4
-rw-r--r--drivers/acpi/acpica/utmath.c23
-rw-r--r--drivers/acpi/acpica/utmisc.c162
-rw-r--r--drivers/acpi/acpica/utmutex.c37
-rw-r--r--drivers/acpi/acpica/utosi.c380
-rw-r--r--drivers/acpi/acpica/utxface.c138
-rw-r--r--drivers/acpi/acpica/utxferror.c415
-rw-r--r--drivers/acpi/battery.c94
-rw-r--r--drivers/acpi/bus.c7
-rw-r--r--drivers/acpi/button.c4
-rw-r--r--drivers/acpi/debugfs.c2
-rw-r--r--drivers/acpi/dock.c7
-rw-r--r--drivers/acpi/ec.c9
-rw-r--r--drivers/acpi/fan.c139
-rw-r--r--drivers/acpi/osl.c463
-rw-r--r--drivers/acpi/pci_irq.c1
-rw-r--r--drivers/acpi/pci_link.c1
-rw-r--r--drivers/acpi/pci_root.c1
-rw-r--r--drivers/acpi/power.c167
-rw-r--r--drivers/acpi/processor_driver.c22
-rw-r--r--drivers/acpi/processor_idle.c2
-rw-r--r--drivers/acpi/processor_thermal.c178
-rw-r--r--drivers/acpi/processor_throttling.c4
-rw-r--r--drivers/acpi/sbs.c25
-rw-r--r--drivers/acpi/scan.c46
-rw-r--r--drivers/acpi/sleep.c28
-rw-r--r--drivers/acpi/sleep.h1
-rw-r--r--drivers/acpi/thermal.c436
-rw-r--r--drivers/acpi/video.c771
-rw-r--r--drivers/ata/libata-scsi.c24
-rw-r--r--drivers/ata/pata_legacy.c2
-rw-r--r--drivers/ata/pata_octeon_cf.c4
-rw-r--r--drivers/ata/sata_via.c9
-rw-r--r--drivers/atm/eni.c7
-rw-r--r--drivers/atm/solos-attrlist.c1
-rw-r--r--drivers/atm/solos-pci.c8
-rw-r--r--drivers/base/devtmpfs.c18
-rw-r--r--drivers/base/node.c14
-rw-r--r--drivers/base/platform.c1
-rw-r--r--drivers/base/power/main.c34
-rw-r--r--drivers/base/power/runtime.c4
-rw-r--r--drivers/block/amiflop.c2
-rw-r--r--drivers/block/aoe/aoeblk.c7
-rw-r--r--drivers/block/aoe/aoedev.c4
-rw-r--r--drivers/block/ataflop.c2
-rw-r--r--drivers/block/cciss.c171
-rw-r--r--drivers/block/cciss.h4
-rw-r--r--drivers/block/cciss_scsi.c8
-rw-r--r--drivers/block/drbd/drbd_actlog.c42
-rw-r--r--drivers/block/drbd/drbd_int.h52
-rw-r--r--drivers/block/drbd/drbd_main.c150
-rw-r--r--drivers/block/drbd/drbd_nl.c25
-rw-r--r--drivers/block/drbd/drbd_proc.c1
-rw-r--r--drivers/block/drbd/drbd_receiver.c218
-rw-r--r--drivers/block/drbd/drbd_req.c38
-rw-r--r--drivers/block/drbd/drbd_worker.c24
-rw-r--r--drivers/block/floppy.c4
-rw-r--r--drivers/block/loop.c12
-rw-r--r--drivers/block/rbd.c748
-rw-r--r--drivers/block/xen-blkfront.c57
-rw-r--r--drivers/block/xsysace.c3
-rw-r--r--drivers/block/z2ram.c6
-rw-r--r--drivers/bluetooth/btusb.c5
-rw-r--r--drivers/cdrom/gdrom.c76
-rw-r--r--drivers/char/Makefile44
-rw-r--r--drivers/char/agp/Makefile1
-rw-r--r--drivers/char/agp/agp.h5
-rw-r--r--drivers/char/agp/amd-k7-agp.c6
-rw-r--r--drivers/char/agp/backend.c22
-rw-r--r--drivers/char/agp/frontend.c1
-rw-r--r--drivers/char/agp/generic.c8
-rw-r--r--drivers/char/agp/intel-agp.c201
-rw-r--r--drivers/char/agp/intel-agp.h43
-rw-r--r--drivers/char/agp/intel-gtt.c1620
-rw-r--r--drivers/char/agp/parisc-agp.c5
-rw-r--r--drivers/char/amiserial.c2
-rw-r--r--drivers/char/applicom.c1
-rw-r--r--drivers/char/briq_panel.c1
-rw-r--r--drivers/char/hpet.c41
-rw-r--r--drivers/char/hvc_console.c1
-rw-r--r--drivers/char/hvc_iucv.c4
-rw-r--r--drivers/char/hvc_tile.c5
-rw-r--r--drivers/char/hvc_xen.c101
-rw-r--r--drivers/char/hw_random/core.c1
-rw-r--r--drivers/char/i8k.c7
-rw-r--r--drivers/char/ip2/Makefile2
-rw-r--r--drivers/char/ipmi/Makefile2
-rw-r--r--drivers/char/ipmi/ipmi_devintf.c4
-rw-r--r--drivers/char/ipmi/ipmi_msghandler.c4
-rw-r--r--drivers/char/ipmi/ipmi_si_intf.c44
-rw-r--r--drivers/char/istallion.c1
-rw-r--r--drivers/char/mem.c4
-rw-r--r--drivers/char/mmtimer.c60
-rw-r--r--drivers/char/mwave/Makefile4
-rw-r--r--drivers/char/mxser.c4
-rw-r--r--drivers/char/nozomi.c1
-rw-r--r--drivers/char/pcmcia/cm4000_cs.c3
-rw-r--r--drivers/char/pcmcia/ipwireless/Makefile2
-rw-r--r--drivers/char/pcmcia/synclink_cs.c1
-rw-r--r--drivers/char/ppdev.c1
-rw-r--r--drivers/char/ramoops.c30
-rw-r--r--drivers/char/rio/Makefile2
-rw-r--r--drivers/char/rocket.c5
-rw-r--r--drivers/char/serial167.c1
-rw-r--r--drivers/char/specialix.c1
-rw-r--r--drivers/char/stallion.c1
-rw-r--r--drivers/char/sx.c1
-rw-r--r--drivers/char/synclink_gt.c142
-rw-r--r--drivers/char/tpm/tpm_tis.c24
-rw-r--r--drivers/char/uv_mmtimer.c1
-rw-r--r--drivers/char/virtio_console.c37
-rw-r--r--drivers/clocksource/sh_cmt.c22
-rw-r--r--drivers/clocksource/sh_mtu2.c10
-rw-r--r--drivers/clocksource/sh_tmu.c10
-rw-r--r--drivers/connector/cn_queue.c75
-rw-r--r--drivers/connector/connector.c9
-rw-r--r--drivers/cpufreq/cpufreq.c4
-rw-r--r--drivers/cpufreq/cpufreq_ondemand.c42
-rw-r--r--drivers/crypto/hifn_795x.c4
-rw-r--r--drivers/crypto/n2_core.c2
-rw-r--r--drivers/crypto/padlock-aes.c2
-rw-r--r--drivers/dma/Kconfig35
-rw-r--r--drivers/dma/Makefile3
-rw-r--r--drivers/dma/amba-pl08x.c2167
-rw-r--r--drivers/dma/coh901318.c2
-rw-r--r--drivers/dma/dmaengine.c8
-rw-r--r--drivers/dma/fsldma.c328
-rw-r--r--drivers/dma/imx-dma.c424
-rw-r--r--drivers/dma/imx-sdma.c1392
-rw-r--r--drivers/dma/intel_mid_dma.c476
-rw-r--r--drivers/dma/intel_mid_dma_regs.h53
-rw-r--r--drivers/dma/pch_dma.c1
-rw-r--r--drivers/dma/shdma.c1
-rw-r--r--drivers/dma/ste_dma40.c1023
-rw-r--r--drivers/dma/ste_dma40_ll.c180
-rw-r--r--drivers/dma/ste_dma40_ll.h86
-rw-r--r--drivers/dma/timb_dma.c2
-rw-r--r--drivers/edac/Makefile8
-rw-r--r--drivers/edac/edac_core.h11
-rw-r--r--drivers/edac/edac_mc.c12
-rw-r--r--drivers/edac/edac_mc_sysfs.c82
-rw-r--r--drivers/edac/i7core_edac.c432
-rw-r--r--drivers/edac/mce_amd_inj.c2
-rw-r--r--drivers/firewire/Kconfig5
-rw-r--r--drivers/firewire/Makefile1
-rw-r--r--drivers/firewire/init_ohci1394_dma.c (renamed from drivers/ieee1394/init_ohci1394_dma.c)76
-rw-r--r--drivers/firewire/net.c160
-rw-r--r--drivers/firewire/ohci.c88
-rw-r--r--drivers/firewire/sbp2.c4
-rw-r--r--drivers/firmware/dmi_scan.c32
-rw-r--r--drivers/gpio/74x164.c182
-rw-r--r--drivers/gpio/Kconfig40
-rw-r--r--drivers/gpio/Makefile4
-rw-r--r--drivers/gpio/adp5588-gpio.c277
-rw-r--r--drivers/gpio/basic_mmio_gpio.c297
-rw-r--r--drivers/gpio/cs5535-gpio.c16
-rw-r--r--drivers/gpio/langwell_gpio.c89
-rw-r--r--drivers/gpio/pca953x.c13
-rw-r--r--drivers/gpio/pch_gpio.c312
-rw-r--r--drivers/gpio/stmpe-gpio.c13
-rw-r--r--drivers/gpio/timbgpio.c21
-rw-r--r--drivers/gpio/vx855_gpio.c332
-rw-r--r--drivers/gpio/wm8994-gpio.c1
-rw-r--r--drivers/gpio/xilinx_gpio.c6
-rw-r--r--drivers/gpu/Makefile2
-rw-r--r--drivers/gpu/drm/Makefile2
-rw-r--r--drivers/gpu/drm/drm_agpsupport.c40
-rw-r--r--drivers/gpu/drm/drm_context.c8
-rw-r--r--drivers/gpu/drm/drm_crtc.c3
-rw-r--r--drivers/gpu/drm/drm_crtc_helper.c18
-rw-r--r--drivers/gpu/drm/drm_debugfs.c1
-rw-r--r--drivers/gpu/drm/drm_drawable.c198
-rw-r--r--drivers/gpu/drm/drm_drv.c10
-rw-r--r--drivers/gpu/drm/drm_edid.c119
-rw-r--r--drivers/gpu/drm/drm_fb_helper.c32
-rw-r--r--drivers/gpu/drm/drm_fops.c1
-rw-r--r--drivers/gpu/drm/drm_gem.c14
-rw-r--r--drivers/gpu/drm/drm_info.c14
-rw-r--r--drivers/gpu/drm/drm_irq.c19
-rw-r--r--drivers/gpu/drm/drm_lock.c30
-rw-r--r--drivers/gpu/drm/drm_memory.c14
-rw-r--r--drivers/gpu/drm/drm_proc.c14
-rw-r--r--drivers/gpu/drm/drm_scatter.c2
-rw-r--r--drivers/gpu/drm/drm_stub.c4
-rw-r--r--drivers/gpu/drm/drm_vm.c13
-rw-r--r--drivers/gpu/drm/i810/i810_drv.c2
-rw-r--r--drivers/gpu/drm/i830/i830_drv.c2
-rw-r--r--drivers/gpu/drm/i915/Makefile4
-rw-r--r--drivers/gpu/drm/i915/dvo_ch7017.c66
-rw-r--r--drivers/gpu/drm/i915/dvo_ch7xxx.c10
-rw-r--r--drivers/gpu/drm/i915/dvo_ivch.c10
-rw-r--r--drivers/gpu/drm/i915/dvo_sil164.c10
-rw-r--r--drivers/gpu/drm/i915/dvo_tfp410.c10
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c336
-rw-r--r--drivers/gpu/drm/i915/i915_dma.c363
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c219
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h274
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c2516
-rw-r--r--drivers/gpu/drm/i915/i915_gem_debug.c148
-rw-r--r--drivers/gpu/drm/i915/i915_gem_evict.c72
-rw-r--r--drivers/gpu/drm/i915/i915_gem_tiling.c54
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c264
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h336
-rw-r--r--drivers/gpu/drm/i915/i915_suspend.c68
-rw-r--r--drivers/gpu/drm/i915/intel_acpi.c252
-rw-r--r--drivers/gpu/drm/i915/intel_bios.c234
-rw-r--r--drivers/gpu/drm/i915/intel_bios.h6
-rw-r--r--drivers/gpu/drm/i915/intel_crt.c248
-rw-r--r--drivers/gpu/drm/i915/intel_display.c2472
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c563
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h163
-rw-r--r--drivers/gpu/drm/i915/intel_dvo.c69
-rw-r--r--drivers/gpu/drm/i915/intel_fb.c29
-rw-r--r--drivers/gpu/drm/i915/intel_hdmi.c193
-rw-r--r--drivers/gpu/drm/i915/intel_i2c.c485
-rw-r--r--drivers/gpu/drm/i915/intel_lvds.c496
-rw-r--r--drivers/gpu/drm/i915/intel_modes.c16
-rw-r--r--drivers/gpu/drm/i915/intel_opregion.c (renamed from drivers/gpu/drm/i915/i915_opregion.c)183
-rw-r--r--drivers/gpu/drm/i915/intel_overlay.c1003
-rw-r--r--drivers/gpu/drm/i915/intel_panel.c109
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c598
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.h84
-rw-r--r--drivers/gpu/drm/i915/intel_sdvo.c1090
-rw-r--r--drivers/gpu/drm/i915/intel_tv.c165
-rw-r--r--drivers/gpu/drm/mga/mga_drv.c2
-rw-r--r--drivers/gpu/drm/nouveau/Kconfig1
-rw-r--r--drivers/gpu/drm/nouveau/Makefile6
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_acpi.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_backlight.c9
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bios.c376
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bios.h43
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo.c327
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_calc.c10
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_channel.c23
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_connector.c131
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_connector.h4
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_debugfs.c16
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_dma.c32
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_dma.h1
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_dp.c10
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drv.c23
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drv.h308
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_encoder.h1
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fbcon.c6
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fence.c323
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_gem.c40
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_grctx.h2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_hw.c49
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_hw.h19
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_i2c.c10
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_i2c.h5
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_irq.c145
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_mem.c384
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_notifier.c9
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_object.c778
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_perf.c205
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_pm.c523
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_pm.h74
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_ramht.c306
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_ramht.h55
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_reg.h9
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_sgdma.c82
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_state.c140
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_temp.c309
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_volt.c212
-rw-r--r--drivers/gpu/drm/nouveau/nv04_crtc.c67
-rw-r--r--drivers/gpu/drm/nouveau/nv04_dac.c11
-rw-r--r--drivers/gpu/drm/nouveau/nv04_dfp.c52
-rw-r--r--drivers/gpu/drm/nouveau/nv04_fbcon.c9
-rw-r--r--drivers/gpu/drm/nouveau/nv04_fifo.c68
-rw-r--r--drivers/gpu/drm/nouveau/nv04_instmem.c140
-rw-r--r--drivers/gpu/drm/nouveau/nv04_pm.c90
-rw-r--r--drivers/gpu/drm/nouveau/nv04_tv.c10
-rw-r--r--drivers/gpu/drm/nouveau/nv10_fifo.c19
-rw-r--r--drivers/gpu/drm/nouveau/nv10_graph.c2
-rw-r--r--drivers/gpu/drm/nouveau/nv17_tv.c110
-rw-r--r--drivers/gpu/drm/nouveau/nv17_tv.h15
-rw-r--r--drivers/gpu/drm/nouveau/nv17_tv_modes.c48
-rw-r--r--drivers/gpu/drm/nouveau/nv20_graph.c506
-rw-r--r--drivers/gpu/drm/nouveau/nv40_fifo.c20
-rw-r--r--drivers/gpu/drm/nouveau/nv40_graph.c16
-rw-r--r--drivers/gpu/drm/nouveau/nv40_grctx.c6
-rw-r--r--drivers/gpu/drm/nouveau/nv50_calc.c16
-rw-r--r--drivers/gpu/drm/nouveau/nv50_crtc.c91
-rw-r--r--drivers/gpu/drm/nouveau/nv50_cursor.c2
-rw-r--r--drivers/gpu/drm/nouveau/nv50_dac.c4
-rw-r--r--drivers/gpu/drm/nouveau/nv50_display.c127
-rw-r--r--drivers/gpu/drm/nouveau/nv50_fb.c40
-rw-r--r--drivers/gpu/drm/nouveau/nv50_fbcon.c4
-rw-r--r--drivers/gpu/drm/nouveau/nv50_fifo.c291
-rw-r--r--drivers/gpu/drm/nouveau/nv50_graph.c103
-rw-r--r--drivers/gpu/drm/nouveau/nv50_grctx.c3305
-rw-r--r--drivers/gpu/drm/nouveau/nv50_instmem.c419
-rw-r--r--drivers/gpu/drm/nouveau/nv50_pm.c131
-rw-r--r--drivers/gpu/drm/nouveau/nv50_sor.c4
-rw-r--r--drivers/gpu/drm/nouveau/nva3_pm.c95
-rw-r--r--drivers/gpu/drm/nouveau/nvc0_fifo.c6
-rw-r--r--drivers/gpu/drm/nouveau/nvc0_instmem.c13
-rw-r--r--drivers/gpu/drm/nouveau/nvreg.h1
-rw-r--r--drivers/gpu/drm/r128/r128_drv.c2
-rw-r--r--drivers/gpu/drm/radeon/Makefile2
-rw-r--r--drivers/gpu/drm/radeon/atom.c1
-rw-r--r--drivers/gpu/drm/radeon/atombios_crtc.c406
-rw-r--r--drivers/gpu/drm/radeon/evergreen.c603
-rw-r--r--drivers/gpu/drm/radeon/evergreen_blit_kms.c774
-rw-r--r--drivers/gpu/drm/radeon/evergreen_blit_shaders.c348
-rw-r--r--drivers/gpu/drm/radeon/evergreen_blit_shaders.h35
-rw-r--r--drivers/gpu/drm/radeon/evergreend.h20
-rw-r--r--drivers/gpu/drm/radeon/r100.c107
-rw-r--r--drivers/gpu/drm/radeon/r100_track.h1
-rw-r--r--drivers/gpu/drm/radeon/r200.c2
-rw-r--r--drivers/gpu/drm/radeon/r300.c17
-rw-r--r--drivers/gpu/drm/radeon/r420.c16
-rw-r--r--drivers/gpu/drm/radeon/r520.c11
-rw-r--r--drivers/gpu/drm/radeon/r600.c199
-rw-r--r--drivers/gpu/drm/radeon/r600_blit_kms.c53
-rw-r--r--drivers/gpu/drm/radeon/r600_cs.c341
-rw-r--r--drivers/gpu/drm/radeon/r600_reg.h1
-rw-r--r--drivers/gpu/drm/radeon/r600d.h27
-rw-r--r--drivers/gpu/drm/radeon/radeon.h27
-rw-r--r--drivers/gpu/drm/radeon/radeon_asic.c6
-rw-r--r--drivers/gpu/drm/radeon/radeon_asic.h11
-rw-r--r--drivers/gpu/drm/radeon/radeon_atombios.c172
-rw-r--r--drivers/gpu/drm/radeon/radeon_benchmark.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_bios.c13
-rw-r--r--drivers/gpu/drm/radeon/radeon_combios.c15
-rw-r--r--drivers/gpu/drm/radeon/radeon_connectors.c133
-rw-r--r--drivers/gpu/drm/radeon/radeon_cursor.c15
-rw-r--r--drivers/gpu/drm/radeon/radeon_device.c87
-rw-r--r--drivers/gpu/drm/radeon/radeon_display.c423
-rw-r--r--drivers/gpu/drm/radeon/radeon_drv.c11
-rw-r--r--drivers/gpu/drm/radeon/radeon_encoders.c398
-rw-r--r--drivers/gpu/drm/radeon/radeon_fb.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_fence.c13
-rw-r--r--drivers/gpu/drm/radeon/radeon_gart.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_gem.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_i2c.c49
-rw-r--r--drivers/gpu/drm/radeon/radeon_irq.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_legacy_crtc.c49
-rw-r--r--drivers/gpu/drm/radeon/radeon_legacy_encoders.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_mode.h75
-rw-r--r--drivers/gpu/drm/radeon/radeon_object.c28
-rw-r--r--drivers/gpu/drm/radeon/radeon_object.h7
-rw-r--r--drivers/gpu/drm/radeon/radeon_pm.c70
-rw-r--r--drivers/gpu/drm/radeon/radeon_reg.h1
-rw-r--r--drivers/gpu/drm/radeon/radeon_ring.c18
-rw-r--r--drivers/gpu/drm/radeon/radeon_test.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_ttm.c39
-rw-r--r--drivers/gpu/drm/radeon/reg_srcs/evergreen8
-rw-r--r--drivers/gpu/drm/radeon/rs400.c17
-rw-r--r--drivers/gpu/drm/radeon/rs600.c19
-rw-r--r--drivers/gpu/drm/radeon/rs690.c15
-rw-r--r--drivers/gpu/drm/radeon/rv515.c15
-rw-r--r--drivers/gpu/drm/radeon/rv770.c42
-rw-r--r--drivers/gpu/drm/savage/savage_drv.c2
-rw-r--r--drivers/gpu/drm/sis/sis_drv.c3
-rw-r--r--drivers/gpu/drm/tdfx/tdfx_drv.c2
-rw-r--r--drivers/gpu/drm/ttm/Makefile3
-rw-r--r--drivers/gpu/drm/ttm/ttm_agp_backend.c3
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo.c392
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_manager.c157
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_util.c20
-rw-r--r--drivers/gpu/drm/ttm/ttm_tt.c4
-rw-r--r--drivers/gpu/drm/via/via_dmablit.c4
-rw-r--r--drivers/gpu/drm/via/via_drv.c2
-rw-r--r--drivers/gpu/drm/vmwgfx/Makefile2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c84
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.c130
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.h40
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c30
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_fb.c13
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c38
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c137
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c3
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.c205
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c30
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_resource.c89
-rw-r--r--drivers/gpu/stub/Kconfig16
-rw-r--r--drivers/gpu/stub/Makefile1
-rw-r--r--drivers/gpu/stub/poulsbo.c64
-rw-r--r--drivers/hid/hid-core.c6
-rw-r--r--drivers/hid/hid-egalax.c2
-rw-r--r--drivers/hid/hid-ids.h4
-rw-r--r--drivers/hid/hid-input.c127
-rw-r--r--drivers/hid/hid-tmff.c2
-rw-r--r--drivers/hid/hidraw.c1
-rw-r--r--drivers/hid/usbhid/hiddev.c1
-rw-r--r--drivers/hwmon/Kconfig140
-rw-r--r--drivers/hwmon/Makefile5
-rw-r--r--drivers/hwmon/ad7414.c6
-rw-r--r--drivers/hwmon/adt7470.c4
-rw-r--r--drivers/hwmon/adt7475.c2
-rw-r--r--drivers/hwmon/amc6821.c2
-rw-r--r--drivers/hwmon/asc7621.c4
-rw-r--r--drivers/hwmon/coretemp.c28
-rw-r--r--drivers/hwmon/gpio-fan.c558
-rw-r--r--drivers/hwmon/hp_accel.c32
-rw-r--r--drivers/hwmon/i5k_amb.c2
-rw-r--r--drivers/hwmon/it87.c210
-rw-r--r--drivers/hwmon/k8temp.c51
-rw-r--r--drivers/hwmon/lis3lv02d.c364
-rw-r--r--drivers/hwmon/lis3lv02d.h47
-rw-r--r--drivers/hwmon/lis3lv02d_i2c.c136
-rw-r--r--drivers/hwmon/lis3lv02d_spi.c5
-rw-r--r--drivers/hwmon/lm75.c51
-rw-r--r--drivers/hwmon/lm85.c36
-rw-r--r--drivers/hwmon/lm90.c1014
-rw-r--r--drivers/hwmon/lm93.c4
-rw-r--r--drivers/hwmon/lm95241.c19
-rw-r--r--drivers/hwmon/ltc4261.c314
-rw-r--r--drivers/hwmon/max6650.c2
-rw-r--r--drivers/hwmon/pcf8591.c38
-rw-r--r--drivers/hwmon/pkgtemp.c32
-rw-r--r--drivers/hwmon/s3c-hwmon.c8
-rw-r--r--drivers/hwmon/tmp421.c4
-rw-r--r--drivers/hwmon/via-cputemp.c11
-rw-r--r--drivers/hwmon/w83795.c2262
-rw-r--r--drivers/i2c/Kconfig3
-rw-r--r--drivers/i2c/algos/Kconfig14
-rw-r--r--drivers/i2c/busses/Kconfig15
-rw-r--r--drivers/i2c/busses/Makefile1
-rw-r--r--drivers/i2c/busses/i2c-i801.c343
-rw-r--r--drivers/i2c/busses/i2c-intel-mid.c1135
-rw-r--r--drivers/i2c/busses/i2c-nomadik.c36
-rw-r--r--drivers/i2c/busses/i2c-s3c2410.c10
-rw-r--r--drivers/i2c/busses/i2c-sh7760.c4
-rw-r--r--drivers/i2c/busses/i2c-sh_mobile.c23
-rw-r--r--drivers/i2c/busses/scx200_acb.c3
-rw-r--r--drivers/i2c/i2c-core.c12
-rw-r--r--drivers/i2c/i2c-mux.c1
-rw-r--r--drivers/ide/hpt366.c14
-rw-r--r--drivers/ide/ide-dma.c11
-rw-r--r--drivers/idle/intel_idle.c61
-rw-r--r--drivers/ieee1394/Kconfig182
-rw-r--r--drivers/ieee1394/Makefile18
-rw-r--r--drivers/ieee1394/config_roms.c194
-rw-r--r--drivers/ieee1394/config_roms.h19
-rw-r--r--drivers/ieee1394/csr.c843
-rw-r--r--drivers/ieee1394/csr.h99
-rw-r--r--drivers/ieee1394/csr1212.c1467
-rw-r--r--drivers/ieee1394/csr1212.h383
-rw-r--r--drivers/ieee1394/dma.c289
-rw-r--r--drivers/ieee1394/dma.h89
-rw-r--r--drivers/ieee1394/dv1394-private.h587
-rw-r--r--drivers/ieee1394/dv1394.c2584
-rw-r--r--drivers/ieee1394/dv1394.h305
-rw-r--r--drivers/ieee1394/eth1394.c1720
-rw-r--r--drivers/ieee1394/eth1394.h234
-rw-r--r--drivers/ieee1394/highlevel.c691
-rw-r--r--drivers/ieee1394/highlevel.h141
-rw-r--r--drivers/ieee1394/hosts.c249
-rw-r--r--drivers/ieee1394/hosts.h201
-rw-r--r--drivers/ieee1394/ieee1394-ioctl.h106
-rw-r--r--drivers/ieee1394/ieee1394.h220
-rw-r--r--drivers/ieee1394/ieee1394_core.c1380
-rw-r--r--drivers/ieee1394/ieee1394_core.h172
-rw-r--r--drivers/ieee1394/ieee1394_hotplug.h19
-rw-r--r--drivers/ieee1394/ieee1394_transactions.c595
-rw-r--r--drivers/ieee1394/ieee1394_transactions.h40
-rw-r--r--drivers/ieee1394/ieee1394_types.h69
-rw-r--r--drivers/ieee1394/iso.c568
-rw-r--r--drivers/ieee1394/iso.h195
-rw-r--r--drivers/ieee1394/nodemgr.c1901
-rw-r--r--drivers/ieee1394/nodemgr.h186
-rw-r--r--drivers/ieee1394/ohci1394.c3590
-rw-r--r--drivers/ieee1394/ohci1394.h453
-rw-r--r--drivers/ieee1394/pcilynx.c1554
-rw-r--r--drivers/ieee1394/pcilynx.h468
-rw-r--r--drivers/ieee1394/raw1394-private.h81
-rw-r--r--drivers/ieee1394/raw1394.c3096
-rw-r--r--drivers/ieee1394/raw1394.h191
-rw-r--r--drivers/ieee1394/sbp2.c2138
-rw-r--r--drivers/ieee1394/sbp2.h346
-rw-r--r--drivers/ieee1394/video1394.c1528
-rw-r--r--drivers/ieee1394/video1394.h67
-rw-r--r--drivers/infiniband/core/agent.c29
-rw-r--r--drivers/infiniband/core/cma.c313
-rw-r--r--drivers/infiniband/core/iwcm.c4
-rw-r--r--drivers/infiniband/core/mad.c27
-rw-r--r--drivers/infiniband/core/multicast.c23
-rw-r--r--drivers/infiniband/core/sa_query.c30
-rw-r--r--drivers/infiniband/core/sysfs.c15
-rw-r--r--drivers/infiniband/core/ucma.c92
-rw-r--r--drivers/infiniband/core/ud_header.c110
-rw-r--r--drivers/infiniband/core/user_mad.c2
-rw-r--r--drivers/infiniband/core/uverbs_cmd.c2
-rw-r--r--drivers/infiniband/core/uverbs_marshall.c4
-rw-r--r--drivers/infiniband/core/verbs.c16
-rw-r--r--drivers/infiniband/hw/amso1100/Kbuild4
-rw-r--r--drivers/infiniband/hw/amso1100/c2_intr.c4
-rw-r--r--drivers/infiniband/hw/cxgb3/Makefile6
-rw-r--r--drivers/infiniband/hw/cxgb3/cxio_hal.c1
-rw-r--r--drivers/infiniband/hw/cxgb3/cxio_wr.h16
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch_cm.c4
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch_ev.c17
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch_provider.c24
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch_qp.c25
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch_user.h8
-rw-r--r--drivers/infiniband/hw/cxgb4/Makefile2
-rw-r--r--drivers/infiniband/hw/cxgb4/cm.c178
-rw-r--r--drivers/infiniband/hw/cxgb4/cq.c28
-rw-r--r--drivers/infiniband/hw/cxgb4/device.c191
-rw-r--r--drivers/infiniband/hw/cxgb4/ev.c2
-rw-r--r--drivers/infiniband/hw/cxgb4/iw_cxgb4.h68
-rw-r--r--drivers/infiniband/hw/cxgb4/mem.c11
-rw-r--r--drivers/infiniband/hw/cxgb4/provider.c44
-rw-r--r--drivers/infiniband/hw/cxgb4/qp.c283
-rw-r--r--drivers/infiniband/hw/cxgb4/resource.c62
-rw-r--r--drivers/infiniband/hw/cxgb4/t4.h44
-rw-r--r--drivers/infiniband/hw/cxgb4/user.h7
-rw-r--r--drivers/infiniband/hw/ehca/ehca_mrmw.c6
-rw-r--r--drivers/infiniband/hw/ipath/Makefile2
-rw-r--r--drivers/infiniband/hw/ipath/ipath_file_ops.c1
-rw-r--r--drivers/infiniband/hw/ipath/ipath_fs.c15
-rw-r--r--drivers/infiniband/hw/mlx4/ah.c163
-rw-r--r--drivers/infiniband/hw/mlx4/mad.c32
-rw-r--r--drivers/infiniband/hw/mlx4/main.c553
-rw-r--r--drivers/infiniband/hw/mlx4/mlx4_ib.h32
-rw-r--r--drivers/infiniband/hw/mlx4/mr.c2
-rw-r--r--drivers/infiniband/hw/mlx4/qp.c195
-rw-r--r--drivers/infiniband/hw/mthca/mthca_qp.c2
-rw-r--r--drivers/infiniband/hw/nes/nes_cm.c3
-rw-r--r--drivers/infiniband/hw/nes/nes_nic.c1
-rw-r--r--drivers/infiniband/hw/nes/nes_verbs.c16
-rw-r--r--drivers/infiniband/hw/qib/qib.h2
-rw-r--r--drivers/infiniband/hw/qib/qib_fs.c15
-rw-r--r--drivers/infiniband/hw/qib/qib_init.c1
-rw-r--r--drivers/infiniband/hw/qib/qib_pcie.c8
-rw-r--r--drivers/infiniband/hw/qib/qib_rc.c5
-rw-r--r--drivers/infiniband/hw/qib/qib_uc.c6
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_ib.c14
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_main.c6
-rw-r--r--drivers/infiniband/ulp/srp/ib_srp.c240
-rw-r--r--drivers/infiniband/ulp/srp/ib_srp.h21
-rw-r--r--drivers/input/evdev.c100
-rw-r--r--drivers/input/gameport/emu10k1-gp.c44
-rw-r--r--drivers/input/gameport/fm801-gp.c10
-rw-r--r--drivers/input/input.c286
-rw-r--r--drivers/input/joystick/turbografx.c1
-rw-r--r--drivers/input/keyboard/Kconfig44
-rw-r--r--drivers/input/keyboard/Makefile4
-rw-r--r--drivers/input/keyboard/adp5588-keys.c76
-rw-r--r--drivers/input/keyboard/atkbd.c12
-rw-r--r--drivers/input/keyboard/gpio_keys_polled.c261
-rw-r--r--drivers/input/keyboard/hil_kbd.c2
-rw-r--r--drivers/input/keyboard/jornada680_kbd.c28
-rw-r--r--drivers/input/keyboard/nomadik-ske-keypad.c408
-rw-r--r--drivers/input/keyboard/omap4-keypad.c318
-rw-r--r--drivers/input/keyboard/tnetv107x-keypad.c340
-rw-r--r--drivers/input/keyboard/twl4030_keypad.c7
-rw-r--r--drivers/input/misc/Kconfig10
-rw-r--r--drivers/input/misc/Makefile1
-rw-r--r--drivers/input/misc/ab8500-ponkey.c157
-rw-r--r--drivers/input/misc/ati_remote2.c93
-rw-r--r--drivers/input/misc/max8925_onkey.c72
-rw-r--r--drivers/input/misc/pcf8574_keypad.c23
-rw-r--r--drivers/input/misc/powermate.c2
-rw-r--r--drivers/input/misc/twl4030-vibra.c4
-rw-r--r--drivers/input/mouse/appletouch.c2
-rw-r--r--drivers/input/mouse/elantech.c2
-rw-r--r--drivers/input/mouse/psmouse-base.c4
-rw-r--r--drivers/input/mouse/synaptics.c38
-rw-r--r--drivers/input/mouse/synaptics.h5
-rw-r--r--drivers/input/mouse/trackpoint.c2
-rw-r--r--drivers/input/mousedev.c2
-rw-r--r--drivers/input/serio/Kconfig9
-rw-r--r--drivers/input/serio/Makefile1
-rw-r--r--drivers/input/serio/gscps2.c2
-rw-r--r--drivers/input/serio/i8042-x86ia64io.h11
-rw-r--r--drivers/input/serio/i8042.c2
-rw-r--r--drivers/input/serio/ps2mult.c318
-rw-r--r--drivers/input/serio/serio.c125
-rw-r--r--drivers/input/serio/serio_raw.c1
-rw-r--r--drivers/input/sparse-keymap.c81
-rw-r--r--drivers/input/tablet/Kconfig11
-rw-r--r--drivers/input/tablet/Makefile1
-rw-r--r--drivers/input/tablet/acecad.c3
-rw-r--r--drivers/input/tablet/aiptek.c28
-rw-r--r--drivers/input/tablet/hanwang.c446
-rw-r--r--drivers/input/tablet/wacom.h1
-rw-r--r--drivers/input/tablet/wacom_sys.c77
-rw-r--r--drivers/input/tablet/wacom_wac.c262
-rw-r--r--drivers/input/tablet/wacom_wac.h15
-rw-r--r--drivers/input/touchscreen/Kconfig43
-rw-r--r--drivers/input/touchscreen/Makefile4
-rw-r--r--drivers/input/touchscreen/ad7877.c144
-rw-r--r--drivers/input/touchscreen/ad7879.c32
-rw-r--r--drivers/input/touchscreen/ads7846.c886
-rw-r--r--drivers/input/touchscreen/bu21013_ts.c648
-rw-r--r--drivers/input/touchscreen/cy8ctmg110_ts.c4
-rw-r--r--drivers/input/touchscreen/hp680_ts_input.c22
-rw-r--r--drivers/input/touchscreen/intel-mid-touch.c687
-rw-r--r--drivers/input/touchscreen/lpc32xx_ts.c411
-rw-r--r--drivers/input/touchscreen/s3c2410_ts.c2
-rw-r--r--drivers/input/touchscreen/stmpe-ts.c11
-rw-r--r--drivers/input/touchscreen/tnetv107x-ts.c396
-rw-r--r--drivers/input/touchscreen/tps6507x-ts.c3
-rw-r--r--drivers/input/touchscreen/tsc2007.c2
-rw-r--r--drivers/input/touchscreen/usbtouchscreen.c1
-rw-r--r--drivers/input/touchscreen/wacom_w8001.c185
-rw-r--r--drivers/input/touchscreen/wm97xx-core.c18
-rw-r--r--drivers/input/xen-kbdfront.c2
-rw-r--r--drivers/isdn/capi/capifs.c8
-rw-r--r--drivers/isdn/hardware/mISDN/mISDNinfineon.c2
-rw-r--r--drivers/isdn/hisax/isar.c4
-rw-r--r--drivers/isdn/hisax/l3_1tr6.c6
-rw-r--r--drivers/isdn/icn/icn.c7
-rw-r--r--drivers/isdn/mISDN/socket.c2
-rw-r--r--drivers/leds/Kconfig70
-rw-r--r--drivers/leds/Makefile2
-rw-r--r--drivers/leds/led-class.c105
-rw-r--r--drivers/leds/led-triggers.c2
-rw-r--r--drivers/leds/leds-88pm860x.c119
-rw-r--r--drivers/leds/leds-gpio.c2
-rw-r--r--drivers/leds/leds-lp5521.c837
-rw-r--r--drivers/leds/leds-lp5523.c1069
-rw-r--r--drivers/leds/leds-net5501.c2
-rw-r--r--drivers/leds/leds-ss4200.c1
-rw-r--r--drivers/leds/ledtrig-timer.c124
-rw-r--r--drivers/macintosh/Kconfig27
-rw-r--r--drivers/macintosh/Makefile2
-rw-r--r--drivers/macintosh/adb-iop.c4
-rw-r--r--drivers/macintosh/ams/Makefile (renamed from drivers/hwmon/ams/Makefile)0
-rw-r--r--drivers/macintosh/ams/ams-core.c (renamed from drivers/hwmon/ams/ams-core.c)0
-rw-r--r--drivers/macintosh/ams/ams-i2c.c (renamed from drivers/hwmon/ams/ams-i2c.c)0
-rw-r--r--drivers/macintosh/ams/ams-input.c (renamed from drivers/hwmon/ams/ams-input.c)0
-rw-r--r--drivers/macintosh/ams/ams-pmu.c (renamed from drivers/hwmon/ams/ams-pmu.c)0
-rw-r--r--drivers/macintosh/ams/ams.h (renamed from drivers/hwmon/ams/ams.h)0
-rw-r--r--drivers/macintosh/windfarm_pm121.c2
-rw-r--r--drivers/md/bitmap.c30
-rw-r--r--drivers/md/bitmap.h4
-rw-r--r--drivers/md/dm-snap-persistent.c2
-rw-r--r--drivers/md/faulty.c2
-rw-r--r--drivers/md/md.c188
-rw-r--r--drivers/md/md.h8
-rw-r--r--drivers/md/raid1.c225
-rw-r--r--drivers/md/raid1.h2
-rw-r--r--drivers/md/raid10.c42
-rw-r--r--drivers/md/raid5.c6
-rw-r--r--drivers/media/IR/Kconfig39
-rw-r--r--drivers/media/IR/Makefile2
-rw-r--r--drivers/media/IR/ene_ir.c1046
-rw-r--r--drivers/media/IR/ene_ir.h275
-rw-r--r--drivers/media/IR/imon.c690
-rw-r--r--drivers/media/IR/ir-core-priv.h22
-rw-r--r--drivers/media/IR/ir-jvc-decoder.c5
-rw-r--r--drivers/media/IR/ir-keytable.c402
-rw-r--r--drivers/media/IR/ir-lirc-codec.c135
-rw-r--r--drivers/media/IR/ir-nec-decoder.c5
-rw-r--r--drivers/media/IR/ir-raw-event.c81
-rw-r--r--drivers/media/IR/ir-rc5-decoder.c5
-rw-r--r--drivers/media/IR/ir-rc5-sz-decoder.c154
-rw-r--r--drivers/media/IR/ir-rc6-decoder.c5
-rw-r--r--drivers/media/IR/ir-sony-decoder.c5
-rw-r--r--drivers/media/IR/ir-sysfs.c37
-rw-r--r--drivers/media/IR/keymaps/Makefile16
-rw-r--r--drivers/media/IR/keymaps/rc-alink-dtu-m.c68
-rw-r--r--drivers/media/IR/keymaps/rc-anysee.c93
-rw-r--r--drivers/media/IR/keymaps/rc-asus-pc39.c80
-rw-r--r--drivers/media/IR/keymaps/rc-avermedia-rm-ks.c79
-rw-r--r--drivers/media/IR/keymaps/rc-azurewave-ad-tu700.c102
-rw-r--r--drivers/media/IR/keymaps/rc-digitalnow-tinytwin.c98
-rw-r--r--drivers/media/IR/keymaps/rc-digittrade.c82
-rw-r--r--drivers/media/IR/keymaps/rc-leadtek-y04g0051.c99
-rw-r--r--drivers/media/IR/keymaps/rc-lme2510.c68
-rw-r--r--drivers/media/IR/keymaps/rc-msi-digivox-ii.c67
-rw-r--r--drivers/media/IR/keymaps/rc-msi-digivox-iii.c85
-rw-r--r--drivers/media/IR/keymaps/rc-rc5-streamzap.c81
-rw-r--r--drivers/media/IR/keymaps/rc-rc6-mce.c88
-rw-r--r--drivers/media/IR/keymaps/rc-streamzap.c82
-rw-r--r--drivers/media/IR/keymaps/rc-terratec-slim.c79
-rw-r--r--drivers/media/IR/keymaps/rc-total-media-in-hand.c85
-rw-r--r--drivers/media/IR/keymaps/rc-trekstor.c80
-rw-r--r--drivers/media/IR/keymaps/rc-twinhan1027.c87
-rw-r--r--drivers/media/IR/lirc_dev.c134
-rw-r--r--drivers/media/IR/mceusb.c475
-rw-r--r--drivers/media/IR/nuvoton-cir.c1246
-rw-r--r--drivers/media/IR/nuvoton-cir.h408
-rw-r--r--drivers/media/IR/streamzap.c376
-rw-r--r--drivers/media/Kconfig1
-rw-r--r--drivers/media/common/saa7146_fops.c2
-rw-r--r--drivers/media/common/saa7146_i2c.c2
-rw-r--r--drivers/media/common/saa7146_vbi.c2
-rw-r--r--drivers/media/common/saa7146_video.c2
-rw-r--r--drivers/media/common/tuners/Kconfig13
-rw-r--r--drivers/media/common/tuners/Makefile1
-rw-r--r--drivers/media/common/tuners/tda18218.c334
-rw-r--r--drivers/media/common/tuners/tda18218.h45
-rw-r--r--drivers/media/common/tuners/tda18218_priv.h106
-rw-r--r--drivers/media/common/tuners/tda18271-common.c61
-rw-r--r--drivers/media/common/tuners/tda18271-fe.c16
-rw-r--r--drivers/media/common/tuners/tda18271.h5
-rw-r--r--drivers/media/common/tuners/xc5000.c2
-rw-r--r--drivers/media/common/tuners/xc5000.h4
-rw-r--r--drivers/media/dvb/b2c2/flexcop-i2c.c3
-rw-r--r--drivers/media/dvb/dm1105/dm1105.c1
-rw-r--r--drivers/media/dvb/dvb-core/dvb_ca_en50221.c1
-rw-r--r--drivers/media/dvb/dvb-core/dvb_frontend.c5
-rw-r--r--drivers/media/dvb/dvb-core/dvb_frontend.h2
-rw-r--r--drivers/media/dvb/dvb-usb/Kconfig12
-rw-r--r--drivers/media/dvb/dvb-usb/Makefile3
-rw-r--r--drivers/media/dvb/dvb-usb/af9015.c394
-rw-r--r--drivers/media/dvb/dvb-usb/af9015.h735
-rw-r--r--drivers/media/dvb/dvb-usb/anysee.c87
-rw-r--r--drivers/media/dvb/dvb-usb/dvb-usb-i2c.c1
-rw-r--r--drivers/media/dvb/dvb-usb/dvb-usb-ids.h6
-rw-r--r--drivers/media/dvb/dvb-usb/friio-fe.c2
-rw-r--r--drivers/media/dvb/dvb-usb/gp8psk-fe.c4
-rw-r--r--drivers/media/dvb/dvb-usb/gp8psk.c9
-rw-r--r--drivers/media/dvb/dvb-usb/lmedm04.c1088
-rw-r--r--drivers/media/dvb/dvb-usb/lmedm04.h173
-rw-r--r--drivers/media/dvb/firewire/firedtv-avc.c61
-rw-r--r--drivers/media/dvb/firewire/firedtv-fe.c36
-rw-r--r--drivers/media/dvb/frontends/Kconfig29
-rw-r--r--drivers/media/dvb/frontends/Makefile3
-rw-r--r--drivers/media/dvb/frontends/af9013.c251
-rw-r--r--drivers/media/dvb/frontends/af9013.h1
-rw-r--r--drivers/media/dvb/frontends/af9013_priv.h60
-rw-r--r--drivers/media/dvb/frontends/au8522_decoder.c27
-rw-r--r--drivers/media/dvb/frontends/cx22702.c123
-rw-r--r--drivers/media/dvb/frontends/cx24110.c2
-rw-r--r--drivers/media/dvb/frontends/cx24123.c1
-rw-r--r--drivers/media/dvb/frontends/dibx000_common.c2
-rw-r--r--drivers/media/dvb/frontends/drx397xD.c2
-rw-r--r--drivers/media/dvb/frontends/ix2505v.c323
-rw-r--r--drivers/media/dvb/frontends/ix2505v.h64
-rw-r--r--drivers/media/dvb/frontends/lgdt3304.c380
-rw-r--r--drivers/media/dvb/frontends/lgdt3304.h45
-rw-r--r--drivers/media/dvb/frontends/lgs8gxx.c2
-rw-r--r--drivers/media/dvb/frontends/mt352.c2
-rw-r--r--drivers/media/dvb/frontends/mt352.h2
-rw-r--r--drivers/media/dvb/frontends/s5h1420.c1
-rw-r--r--drivers/media/dvb/frontends/s5h1432.c415
-rw-r--r--drivers/media/dvb/frontends/s5h1432.h91
-rw-r--r--drivers/media/dvb/frontends/si21xx.c2
-rw-r--r--drivers/media/dvb/frontends/stb6100.c2
-rw-r--r--drivers/media/dvb/frontends/stb6100.h4
-rw-r--r--drivers/media/dvb/frontends/stv0288.c25
-rw-r--r--drivers/media/dvb/frontends/stv0299.c2
-rw-r--r--drivers/media/dvb/frontends/stv0299.h2
-rw-r--r--drivers/media/dvb/frontends/tda1004x.c2
-rw-r--r--drivers/media/dvb/frontends/zl10353.c2
-rw-r--r--drivers/media/dvb/mantis/mantis_core.c5
-rw-r--r--drivers/media/dvb/mantis/mantis_i2c.c1
-rw-r--r--drivers/media/dvb/mantis/mantis_ioc.c9
-rw-r--r--drivers/media/dvb/ngene/ngene-core.c1
-rw-r--r--drivers/media/dvb/ngene/ngene-dvb.c1
-rw-r--r--drivers/media/dvb/ngene/ngene-i2c.c2
-rw-r--r--drivers/media/dvb/pluto2/pluto2.c1
-rw-r--r--drivers/media/dvb/pt1/pt1.c1
-rw-r--r--drivers/media/dvb/siano/smscoreapi.c3
-rw-r--r--drivers/media/dvb/siano/smsir.c2
-rw-r--r--drivers/media/dvb/ttpci/av7110.c3
-rw-r--r--drivers/media/dvb/ttpci/av7110_av.c5
-rw-r--r--drivers/media/dvb/ttpci/budget-core.c2
-rw-r--r--drivers/media/dvb/ttusb-budget/dvb-ttusb-budget.c1
-rw-r--r--drivers/media/radio/radio-cadet.c3
-rw-r--r--drivers/media/radio/radio-mr800.c76
-rw-r--r--drivers/media/radio/radio-si4713.c12
-rw-r--r--drivers/media/radio/si470x/radio-si470x-common.c29
-rw-r--r--drivers/media/radio/si470x/radio-si470x-usb.c17
-rw-r--r--drivers/media/radio/si470x/radio-si470x.h3
-rw-r--r--drivers/media/radio/si4713-i2c.c2
-rw-r--r--drivers/media/radio/tef6862.c1
-rw-r--r--drivers/media/video/Kconfig108
-rw-r--r--drivers/media/video/Makefile12
-rw-r--r--drivers/media/video/adv7170.c28
-rw-r--r--drivers/media/video/adv7175.c28
-rw-r--r--drivers/media/video/adv7180.c1
-rw-r--r--drivers/media/video/au0828/au0828-cards.c4
-rw-r--r--drivers/media/video/au0828/au0828-video.c4
-rw-r--r--drivers/media/video/bt819.c28
-rw-r--r--drivers/media/video/bt856.c28
-rw-r--r--drivers/media/video/bt866.c28
-rw-r--r--drivers/media/video/bt8xx/bttv-cards.c22
-rw-r--r--drivers/media/video/bt8xx/bttv-driver.c274
-rw-r--r--drivers/media/video/bt8xx/bttv-i2c.c43
-rw-r--r--drivers/media/video/bt8xx/bttv-input.c84
-rw-r--r--drivers/media/video/bt8xx/bttv-risc.c2
-rw-r--r--drivers/media/video/bt8xx/bttv.h1
-rw-r--r--drivers/media/video/bt8xx/bttvp.h13
-rw-r--r--drivers/media/video/cafe_ccic.c182
-rw-r--r--drivers/media/video/cpia2/Kconfig2
-rw-r--r--drivers/media/video/cpia2/cpia2.h8
-rw-r--r--drivers/media/video/cpia2/cpia2_core.c51
-rw-r--r--drivers/media/video/cpia2/cpia2_v4l.c332
-rw-r--r--drivers/media/video/cpia2/cpia2dev.h4
-rw-r--r--drivers/media/video/cs5345.c27
-rw-r--r--drivers/media/video/cs53l32a.c27
-rw-r--r--drivers/media/video/cx18/cx18-driver.h19
-rw-r--r--drivers/media/video/cx18/cx18-i2c.c23
-rw-r--r--drivers/media/video/cx18/cx18-ioctl.c1
-rw-r--r--drivers/media/video/cx231xx/Kconfig1
-rw-r--r--drivers/media/video/cx231xx/Makefile2
-rw-r--r--drivers/media/video/cx231xx/cx231xx-417.c2192
-rw-r--r--drivers/media/video/cx231xx/cx231xx-audio.c256
-rw-r--r--drivers/media/video/cx231xx/cx231xx-avcore.c687
-rw-r--r--drivers/media/video/cx231xx/cx231xx-cards.c427
-rw-r--r--drivers/media/video/cx231xx/cx231xx-conf-reg.h1
-rw-r--r--drivers/media/video/cx231xx/cx231xx-core.c787
-rw-r--r--drivers/media/video/cx231xx/cx231xx-dif.h3178
-rw-r--r--drivers/media/video/cx231xx/cx231xx-dvb.c250
-rw-r--r--drivers/media/video/cx231xx/cx231xx-i2c.c11
-rw-r--r--drivers/media/video/cx231xx/cx231xx-input.c222
-rw-r--r--drivers/media/video/cx231xx/cx231xx-vbi.c109
-rw-r--r--drivers/media/video/cx231xx/cx231xx-vbi.h2
-rw-r--r--drivers/media/video/cx231xx/cx231xx-video.c595
-rw-r--r--drivers/media/video/cx231xx/cx231xx.h260
-rw-r--r--drivers/media/video/cx23885/cx23885-417.c11
-rw-r--r--drivers/media/video/cx23885/cx23885-cards.c2
-rw-r--r--drivers/media/video/cx23885/cx23885-core.c3
-rw-r--r--drivers/media/video/cx23885/cx23885-dvb.c7
-rw-r--r--drivers/media/video/cx23885/cx23885-video.c14
-rw-r--r--drivers/media/video/cx23885/cx23888-ir.c1
-rw-r--r--drivers/media/video/cx25840/cx25840-audio.c54
-rw-r--r--drivers/media/video/cx25840/cx25840-core.c44
-rw-r--r--drivers/media/video/cx25840/cx25840-ir.c1
-rw-r--r--drivers/media/video/cx88/cx88-alsa.c117
-rw-r--r--drivers/media/video/cx88/cx88-blackbird.c17
-rw-r--r--drivers/media/video/cx88/cx88-cards.c45
-rw-r--r--drivers/media/video/cx88/cx88-core.c30
-rw-r--r--drivers/media/video/cx88/cx88-dsp.c11
-rw-r--r--drivers/media/video/cx88/cx88-dvb.c181
-rw-r--r--drivers/media/video/cx88/cx88-i2c.c31
-rw-r--r--drivers/media/video/cx88/cx88-input.c57
-rw-r--r--drivers/media/video/cx88/cx88-mpeg.c6
-rw-r--r--drivers/media/video/cx88/cx88-tvaudio.c43
-rw-r--r--drivers/media/video/cx88/cx88-vbi.c2
-rw-r--r--drivers/media/video/cx88/cx88-video.c90
-rw-r--r--drivers/media/video/cx88/cx88-vp3054-i2c.c2
-rw-r--r--drivers/media/video/cx88/cx88.h74
-rw-r--r--drivers/media/video/davinci/vpfe_capture.c39
-rw-r--r--drivers/media/video/davinci/vpif_capture.c17
-rw-r--r--drivers/media/video/davinci/vpif_display.c16
-rw-r--r--drivers/media/video/em28xx/em28xx-audio.c75
-rw-r--r--drivers/media/video/em28xx/em28xx-cards.c57
-rw-r--r--drivers/media/video/em28xx/em28xx-video.c97
-rw-r--r--drivers/media/video/em28xx/em28xx.h18
-rw-r--r--drivers/media/video/fsl-viu.c7
-rw-r--r--drivers/media/video/gspca/Kconfig18
-rw-r--r--drivers/media/video/gspca/Makefile4
-rw-r--r--drivers/media/video/gspca/benq.c23
-rw-r--r--drivers/media/video/gspca/conex.c14
-rw-r--r--drivers/media/video/gspca/cpia1.c133
-rw-r--r--drivers/media/video/gspca/etoms.c12
-rw-r--r--drivers/media/video/gspca/finepix.c15
-rw-r--r--drivers/media/video/gspca/gl860/gl860-mi2020.c6
-rw-r--r--drivers/media/video/gspca/gl860/gl860.c6
-rw-r--r--drivers/media/video/gspca/gspca.c161
-rw-r--r--drivers/media/video/gspca/gspca.h12
-rw-r--r--drivers/media/video/gspca/jeilinj.c15
-rw-r--r--drivers/media/video/gspca/konica.c646
-rw-r--r--drivers/media/video/gspca/m5602/m5602_core.c8
-rw-r--r--drivers/media/video/gspca/m5602/m5602_mt9m111.c48
-rw-r--r--drivers/media/video/gspca/m5602/m5602_mt9m111.h14
-rw-r--r--drivers/media/video/gspca/m5602/m5602_ov7660.c70
-rw-r--r--drivers/media/video/gspca/m5602/m5602_ov7660.h9
-rw-r--r--drivers/media/video/gspca/m5602/m5602_ov9650.c102
-rw-r--r--drivers/media/video/gspca/m5602/m5602_ov9650.h12
-rw-r--r--drivers/media/video/gspca/m5602/m5602_po1030.c136
-rw-r--r--drivers/media/video/gspca/m5602/m5602_po1030.h13
-rw-r--r--drivers/media/video/gspca/m5602/m5602_s5k4aa.c28
-rw-r--r--drivers/media/video/gspca/m5602/m5602_s5k4aa.h14
-rw-r--r--drivers/media/video/gspca/m5602/m5602_s5k83a.h12
-rw-r--r--drivers/media/video/gspca/mars.c327
-rw-r--r--drivers/media/video/gspca/mr97310a.c56
-rw-r--r--drivers/media/video/gspca/ov519.c389
-rw-r--r--drivers/media/video/gspca/ov534.c19
-rw-r--r--drivers/media/video/gspca/ov534_9.c19
-rw-r--r--drivers/media/video/gspca/pac207.c26
-rw-r--r--drivers/media/video/gspca/pac7302.c32
-rw-r--r--drivers/media/video/gspca/pac7311.c32
-rw-r--r--drivers/media/video/gspca/sn9c2028.c19
-rw-r--r--drivers/media/video/gspca/sn9c20x.c65
-rw-r--r--drivers/media/video/gspca/sonixb.c21
-rw-r--r--drivers/media/video/gspca/sonixj.c926
-rw-r--r--drivers/media/video/gspca/spca1528.c15
-rw-r--r--drivers/media/video/gspca/spca500.c14
-rw-r--r--drivers/media/video/gspca/spca501.c16
-rw-r--r--drivers/media/video/gspca/spca505.c18
-rw-r--r--drivers/media/video/gspca/spca508.c16
-rw-r--r--drivers/media/video/gspca/spca561.c16
-rw-r--r--drivers/media/video/gspca/sq905.c21
-rw-r--r--drivers/media/video/gspca/sq905c.c15
-rw-r--r--drivers/media/video/gspca/sq930x.c23
-rw-r--r--drivers/media/video/gspca/stk014.c174
-rw-r--r--drivers/media/video/gspca/stv0680.c17
-rw-r--r--drivers/media/video/gspca/stv06xx/stv06xx.c14
-rw-r--r--drivers/media/video/gspca/stv06xx/stv06xx.h2
-rw-r--r--drivers/media/video/gspca/stv06xx/stv06xx_hdcs.c19
-rw-r--r--drivers/media/video/gspca/stv06xx/stv06xx_hdcs.h2
-rw-r--r--drivers/media/video/gspca/stv06xx/stv06xx_st6422.c2
-rw-r--r--drivers/media/video/gspca/stv06xx/stv06xx_vv6410.c2
-rw-r--r--drivers/media/video/gspca/stv06xx/stv06xx_vv6410.h4
-rw-r--r--drivers/media/video/gspca/sunplus.c27
-rw-r--r--drivers/media/video/gspca/t613.c10
-rw-r--r--drivers/media/video/gspca/tv8532.c8
-rw-r--r--drivers/media/video/gspca/vc032x.c19
-rw-r--r--drivers/media/video/gspca/w996Xcf.c10
-rw-r--r--drivers/media/video/gspca/xirlink_cit.c3253
-rw-r--r--drivers/media/video/gspca/zc3xx.c37
-rw-r--r--drivers/media/video/hdpvr/hdpvr-control.c5
-rw-r--r--drivers/media/video/hdpvr/hdpvr-core.c36
-rw-r--r--drivers/media/video/hdpvr/hdpvr-i2c.c1
-rw-r--r--drivers/media/video/hdpvr/hdpvr-video.c5
-rw-r--r--drivers/media/video/hdpvr/hdpvr.h7
-rw-r--r--drivers/media/video/hexium_gemini.c1
-rw-r--r--drivers/media/video/hexium_orion.c1
-rw-r--r--drivers/media/video/imx074.c506
-rw-r--r--drivers/media/video/indycam.c27
-rw-r--r--drivers/media/video/ir-kbd-i2c.c63
-rw-r--r--drivers/media/video/ivtv/ivtv-driver.h14
-rw-r--r--drivers/media/video/ivtv/ivtv-i2c.c50
-rw-r--r--drivers/media/video/ivtv/ivtv-ioctl.c1
-rw-r--r--drivers/media/video/ks0127.c27
-rw-r--r--drivers/media/video/m52790.c28
-rw-r--r--drivers/media/video/mem2mem_testdev.c2
-rw-r--r--drivers/media/video/msp3400-driver.c38
-rw-r--r--drivers/media/video/mt9m001.c26
-rw-r--r--drivers/media/video/mt9m111.c38
-rw-r--r--drivers/media/video/mt9t031.c24
-rw-r--r--drivers/media/video/mt9t112.c14
-rw-r--r--drivers/media/video/mt9v011.c29
-rw-r--r--drivers/media/video/mt9v022.c26
-rw-r--r--drivers/media/video/mx1_camera.c12
-rw-r--r--drivers/media/video/mx2_camera.c57
-rw-r--r--drivers/media/video/mx3_camera.c15
-rw-r--r--drivers/media/video/mxb.c17
-rw-r--r--drivers/media/video/omap/omap_vout.c2
-rw-r--r--drivers/media/video/omap1_camera.c1702
-rw-r--r--drivers/media/video/omap24xxcam.c4
-rw-r--r--drivers/media/video/ov6650.c1221
-rw-r--r--drivers/media/video/ov7670.c268
-rw-r--r--drivers/media/video/ov7670.h20
-rw-r--r--drivers/media/video/ov772x.c18
-rw-r--r--drivers/media/video/ov9640.c12
-rw-r--r--drivers/media/video/pvrusb2/pvrusb2-hdw.c13
-rw-r--r--drivers/media/video/pwc/Kconfig2
-rw-r--r--drivers/media/video/pwc/pwc-ctrl.c20
-rw-r--r--drivers/media/video/pwc/pwc-if.c36
-rw-r--r--drivers/media/video/pwc/pwc-misc.c4
-rw-r--r--drivers/media/video/pwc/pwc-uncompress.c2
-rw-r--r--drivers/media/video/pwc/pwc-v4l.c322
-rw-r--r--drivers/media/video/pwc/pwc.h6
-rw-r--r--drivers/media/video/pxa_camera.c12
-rw-r--r--drivers/media/video/rj54n1cb0c.c26
-rw-r--r--drivers/media/video/s2255drv.c5
-rw-r--r--drivers/media/video/s5p-fimc/Makefile2
-rw-r--r--drivers/media/video/s5p-fimc/fimc-capture.c819
-rw-r--r--drivers/media/video/s5p-fimc/fimc-core.c952
-rw-r--r--drivers/media/video/s5p-fimc/fimc-core.h377
-rw-r--r--drivers/media/video/s5p-fimc/fimc-reg.c321
-rw-r--r--drivers/media/video/s5p-fimc/regs-fimc.h64
-rw-r--r--drivers/media/video/saa5246a.c1123
-rw-r--r--drivers/media/video/saa5249.c650
-rw-r--r--drivers/media/video/saa6588.c29
-rw-r--r--drivers/media/video/saa7110.c27
-rw-r--r--drivers/media/video/saa7115.c33
-rw-r--r--drivers/media/video/saa7127.c27
-rw-r--r--drivers/media/video/saa7134/Kconfig11
-rw-r--r--drivers/media/video/saa7134/Makefile7
-rw-r--r--drivers/media/video/saa7134/saa6752hs.c27
-rw-r--r--drivers/media/video/saa7134/saa7134-cards.c32
-rw-r--r--drivers/media/video/saa7134/saa7134-core.c6
-rw-r--r--drivers/media/video/saa7134/saa7134-dvb.c2
-rw-r--r--drivers/media/video/saa7134/saa7134-empress.c3
-rw-r--r--drivers/media/video/saa7134/saa7134-i2c.c1
-rw-r--r--drivers/media/video/saa7134/saa7134-input.c15
-rw-r--r--drivers/media/video/saa7134/saa7134-video.c16
-rw-r--r--drivers/media/video/saa7134/saa7134.h16
-rw-r--r--drivers/media/video/saa7164/Makefile2
-rw-r--r--drivers/media/video/saa7164/saa7164-api.c973
-rw-r--r--drivers/media/video/saa7164/saa7164-buffer.c194
-rw-r--r--drivers/media/video/saa7164/saa7164-bus.c131
-rw-r--r--drivers/media/video/saa7164/saa7164-cards.c33
-rw-r--r--drivers/media/video/saa7164/saa7164-cmd.c35
-rw-r--r--drivers/media/video/saa7164/saa7164-core.c890
-rw-r--r--drivers/media/video/saa7164/saa7164-dvb.c109
-rw-r--r--drivers/media/video/saa7164/saa7164-encoder.c1503
-rw-r--r--drivers/media/video/saa7164/saa7164-fw.c11
-rw-r--r--drivers/media/video/saa7164/saa7164-i2c.c2
-rw-r--r--drivers/media/video/saa7164/saa7164-reg.h59
-rw-r--r--drivers/media/video/saa7164/saa7164-types.h255
-rw-r--r--drivers/media/video/saa7164/saa7164-vbi.c1375
-rw-r--r--drivers/media/video/saa7164/saa7164.h294
-rw-r--r--drivers/media/video/saa717x.c27
-rw-r--r--drivers/media/video/saa7185.c28
-rw-r--r--drivers/media/video/saa7191.c27
-rw-r--r--drivers/media/video/se401.c7
-rw-r--r--drivers/media/video/sh_mobile_ceu_camera.c30
-rw-r--r--drivers/media/video/sh_vou.c7
-rw-r--r--drivers/media/video/sn9c102/sn9c102_devtable.h4
-rw-r--r--drivers/media/video/soc_camera.c200
-rw-r--r--drivers/media/video/sr030pc30.c894
-rw-r--r--drivers/media/video/stk-webcam.c4
-rw-r--r--drivers/media/video/tda7432.c27
-rw-r--r--drivers/media/video/tda9840.c27
-rw-r--r--drivers/media/video/tda9875.c27
-rw-r--r--drivers/media/video/tea6415c.c27
-rw-r--r--drivers/media/video/tea6420.c27
-rw-r--r--drivers/media/video/tlg2300/pd-main.c13
-rw-r--r--drivers/media/video/tlg2300/pd-video.c4
-rw-r--r--drivers/media/video/tlv320aic23b.c28
-rw-r--r--drivers/media/video/tuner-core.c40
-rw-r--r--drivers/media/video/tvaudio.c40
-rw-r--r--drivers/media/video/tvp514x.c67
-rw-r--r--drivers/media/video/tvp5150.c31
-rw-r--r--drivers/media/video/tvp7002.c126
-rw-r--r--drivers/media/video/tw9910.c20
-rw-r--r--drivers/media/video/upd64031a.c27
-rw-r--r--drivers/media/video/upd64083.c27
-rw-r--r--drivers/media/video/usbvideo/Kconfig10
-rw-r--r--drivers/media/video/usbvideo/vicam.c29
-rw-r--r--drivers/media/video/usbvision/usbvision-i2c.c15
-rw-r--r--drivers/media/video/usbvision/usbvision-video.c9
-rw-r--r--drivers/media/video/usbvision/usbvision.h1
-rw-r--r--drivers/media/video/uvc/uvc_ctrl.c712
-rw-r--r--drivers/media/video/uvc/uvc_driver.c19
-rw-r--r--drivers/media/video/uvc/uvc_isight.c2
-rw-r--r--drivers/media/video/uvc/uvc_queue.c11
-rw-r--r--drivers/media/video/uvc/uvc_status.c4
-rw-r--r--drivers/media/video/uvc/uvc_v4l2.c56
-rw-r--r--drivers/media/video/uvc/uvc_video.c52
-rw-r--r--drivers/media/video/uvc/uvcvideo.h40
-rw-r--r--drivers/media/video/v4l1-compat.c13
-rw-r--r--drivers/media/video/v4l2-common.c38
-rw-r--r--drivers/media/video/v4l2-compat-ioctl32.c1
-rw-r--r--drivers/media/video/v4l2-ctrls.c4
-rw-r--r--drivers/media/video/v4l2-dev.c122
-rw-r--r--drivers/media/video/v4l2-event.c9
-rw-r--r--drivers/media/video/v4l2-mem2mem.c8
-rw-r--r--drivers/media/video/via-camera.c1474
-rw-r--r--drivers/media/video/via-camera.h93
-rw-r--r--drivers/media/video/videobuf-core.c115
-rw-r--r--drivers/media/video/videobuf-dma-contig.c15
-rw-r--r--drivers/media/video/videobuf-dma-sg.c13
-rw-r--r--drivers/media/video/videobuf-dvb.c2
-rw-r--r--drivers/media/video/videobuf-vmalloc.c9
-rw-r--r--drivers/media/video/vino.c4
-rw-r--r--drivers/media/video/vivi.c17
-rw-r--r--drivers/media/video/vp27smpx.c28
-rw-r--r--drivers/media/video/vpx3220.c27
-rw-r--r--drivers/media/video/wm8739.c27
-rw-r--r--drivers/media/video/wm8775.c132
-rw-r--r--drivers/media/video/zoran/zoran.h3
-rw-r--r--drivers/media/video/zoran/zoran_card.c25
-rw-r--r--drivers/media/video/zoran/zoran_device.c12
-rw-r--r--drivers/media/video/zoran/zoran_driver.c29
-rw-r--r--drivers/media/video/zr364xx.c4
-rw-r--r--drivers/message/fusion/mptfc.c7
-rw-r--r--drivers/message/fusion/mptsas.c4
-rw-r--r--drivers/message/fusion/mptspi.c4
-rw-r--r--drivers/message/i2o/i2o_scsi.c6
-rw-r--r--drivers/mfd/88pm860x-core.c51
-rw-r--r--drivers/mfd/Kconfig91
-rw-r--r--drivers/mfd/Makefile9
-rw-r--r--drivers/mfd/ab3100-core.c143
-rw-r--r--drivers/mfd/ab8500-core.c319
-rw-r--r--drivers/mfd/ab8500-debugfs.c652
-rw-r--r--drivers/mfd/ab8500-i2c.c105
-rw-r--r--drivers/mfd/ab8500-spi.c2
-rw-r--r--drivers/mfd/da903x.c8
-rw-r--r--drivers/mfd/ezx-pcap.c11
-rw-r--r--drivers/mfd/htc-pasic3.c7
-rw-r--r--drivers/mfd/jz4740-adc.c2
-rw-r--r--drivers/mfd/max8925-core.c11
-rw-r--r--drivers/mfd/max8998-irq.c258
-rw-r--r--drivers/mfd/max8998.c90
-rw-r--r--drivers/mfd/mc13783-core.c752
-rw-r--r--drivers/mfd/mc13xxx-core.c840
-rw-r--r--drivers/mfd/mfd-core.c18
-rw-r--r--drivers/mfd/pcf50633-core.c9
-rw-r--r--drivers/mfd/sh_mobile_sdhi.c21
-rw-r--r--drivers/mfd/stmpe.c32
-rw-r--r--drivers/mfd/tc6393xb.c2
-rw-r--r--drivers/mfd/timberdale.c14
-rw-r--r--drivers/mfd/tps6507x.c2
-rw-r--r--drivers/mfd/tps6586x.c225
-rw-r--r--drivers/mfd/twl-core.c46
-rw-r--r--drivers/mfd/twl-core.h10
-rw-r--r--drivers/mfd/twl4030-codec.c8
-rw-r--r--drivers/mfd/twl4030-irq.c4
-rw-r--r--drivers/mfd/twl4030-power.c30
-rw-r--r--drivers/mfd/twl6030-irq.c75
-rw-r--r--drivers/mfd/vx855.c147
-rw-r--r--drivers/mfd/wm831x-core.c148
-rw-r--r--drivers/mfd/wm831x-i2c.c143
-rw-r--r--drivers/mfd/wm831x-spi.c232
-rw-r--r--drivers/misc/Kconfig55
-rw-r--r--drivers/misc/Makefile6
-rw-r--r--drivers/misc/ab8500-pwm.c168
-rw-r--r--drivers/misc/ad525x_dpot-i2c.c2
-rw-r--r--drivers/misc/ad525x_dpot-spi.c6
-rw-r--r--drivers/misc/ad525x_dpot.c117
-rw-r--r--drivers/misc/ad525x_dpot.h37
-rw-r--r--drivers/misc/apds9802als.c346
-rw-r--r--drivers/misc/apds990x.c1295
-rw-r--r--drivers/misc/bh1770glc.c1417
-rw-r--r--drivers/misc/ibmasm/ibmasmfs.c10
-rw-r--r--drivers/misc/isl29020.c248
-rw-r--r--drivers/misc/kgdbts.c16
-rw-r--r--drivers/misc/lkdtm.c128
-rw-r--r--drivers/misc/phantom.c8
-rw-r--r--drivers/misc/sgi-xp/xpc_partition.c25
-rw-r--r--drivers/misc/sgi-xp/xpc_uv.c17
-rw-r--r--drivers/misc/ti-st/Kconfig17
-rw-r--r--drivers/misc/ti-st/Makefile6
-rw-r--r--drivers/misc/ti-st/st_core.c (renamed from drivers/staging/ti-st/st_core.c)91
-rw-r--r--drivers/misc/ti-st/st_kim.c (renamed from drivers/staging/ti-st/st_kim.c)32
-rw-r--r--drivers/misc/ti-st/st_ll.c (renamed from drivers/staging/ti-st/st_ll.c)7
-rw-r--r--drivers/mmc/Makefile4
-rw-r--r--drivers/mmc/card/Kconfig17
-rw-r--r--drivers/mmc/card/Makefile4
-rw-r--r--drivers/mmc/card/block.c61
-rw-r--r--drivers/mmc/card/mmc_test.c469
-rw-r--r--drivers/mmc/card/queue.c14
-rw-r--r--drivers/mmc/core/Makefile4
-rw-r--r--drivers/mmc/core/bus.c58
-rw-r--r--drivers/mmc/core/bus.h2
-rw-r--r--drivers/mmc/core/core.c181
-rw-r--r--drivers/mmc/core/core.h7
-rw-r--r--drivers/mmc/core/debugfs.c35
-rw-r--r--drivers/mmc/core/host.c3
-rw-r--r--drivers/mmc/core/mmc.c62
-rw-r--r--drivers/mmc/core/sd.c10
-rw-r--r--drivers/mmc/core/sdio.c73
-rw-r--r--drivers/mmc/core/sdio_bus.c96
-rw-r--r--drivers/mmc/host/Kconfig39
-rw-r--r--drivers/mmc/host/Makefile7
-rw-r--r--drivers/mmc/host/at91_mci.c11
-rw-r--r--drivers/mmc/host/atmel-mci.c5
-rw-r--r--drivers/mmc/host/au1xmmc.c4
-rw-r--r--drivers/mmc/host/bfin_sdh.c2
-rw-r--r--drivers/mmc/host/cb710-mmc.c54
-rw-r--r--drivers/mmc/host/davinci_mmc.c8
-rw-r--r--drivers/mmc/host/imxmmc.c3
-rw-r--r--drivers/mmc/host/jz4740_mmc.c3
-rw-r--r--drivers/mmc/host/mmc_spi.c24
-rw-r--r--drivers/mmc/host/mmci.c31
-rw-r--r--drivers/mmc/host/msm_sdcc.c3
-rw-r--r--drivers/mmc/host/mvsdio.c3
-rw-r--r--drivers/mmc/host/mxcmmc.c3
-rw-r--r--drivers/mmc/host/omap.c3
-rw-r--r--drivers/mmc/host/omap_hsmmc.c71
-rw-r--r--drivers/mmc/host/pxamci.c43
-rw-r--r--drivers/mmc/host/s3cmci.c3
-rw-r--r--drivers/mmc/host/sdhci-cns3xxx.c2
-rw-r--r--drivers/mmc/host/sdhci-esdhc-imx.c149
-rw-r--r--drivers/mmc/host/sdhci-esdhc.h83
-rw-r--r--drivers/mmc/host/sdhci-of-esdhc.c70
-rw-r--r--drivers/mmc/host/sdhci-pci.c108
-rw-r--r--drivers/mmc/host/sdhci-pltfm.c44
-rw-r--r--drivers/mmc/host/sdhci-pltfm.h10
-rw-r--r--drivers/mmc/host/sdhci-pxa.c257
-rw-r--r--drivers/mmc/host/sdhci.c134
-rw-r--r--drivers/mmc/host/sdhci.h159
-rw-r--r--drivers/mmc/host/sh_mmcif.c15
-rw-r--r--drivers/mmc/host/tifm_sd.c3
-rw-r--r--drivers/mmc/host/tmio_mmc.c30
-rw-r--r--drivers/mmc/host/ushc.c580
-rw-r--r--drivers/mmc/host/via-sdmmc.c3
-rw-r--r--drivers/mmc/host/wbsd.c3
-rw-r--r--drivers/mtd/chips/cfi_cmdset_0001.c4
-rw-r--r--drivers/mtd/chips/cfi_cmdset_0002.c83
-rw-r--r--drivers/mtd/chips/cfi_probe.c2
-rw-r--r--drivers/mtd/chips/cfi_util.c7
-rw-r--r--drivers/mtd/devices/block2mtd.c1
-rw-r--r--drivers/mtd/devices/m25p80.c11
-rw-r--r--drivers/mtd/devices/phram.c2
-rw-r--r--drivers/mtd/maps/Kconfig9
-rw-r--r--drivers/mtd/maps/Makefile1
-rw-r--r--drivers/mtd/maps/bcm963xx-flash.c271
-rw-r--r--drivers/mtd/maps/gpio-addr-flash.c10
-rw-r--r--drivers/mtd/maps/pcmciamtd.c4
-rw-r--r--drivers/mtd/maps/physmap_of.c22
-rw-r--r--drivers/mtd/mtd_blkdevs.c68
-rw-r--r--drivers/mtd/mtdchar.c119
-rw-r--r--drivers/mtd/mtdpart.c154
-rw-r--r--drivers/mtd/mtdsuper.c54
-rw-r--r--drivers/mtd/nand/Kconfig15
-rw-r--r--drivers/mtd/nand/Makefile1
-rw-r--r--drivers/mtd/nand/bf5xx_nand.c14
-rw-r--r--drivers/mtd/nand/davinci_nand.c72
-rw-r--r--drivers/mtd/nand/denali.c1
-rw-r--r--drivers/mtd/nand/fsl_elbc_nand.c484
-rw-r--r--drivers/mtd/nand/fsl_upm.c10
-rw-r--r--drivers/mtd/nand/fsmc_nand.c866
-rw-r--r--drivers/mtd/nand/mpc5121_nfc.c11
-rw-r--r--drivers/mtd/nand/nand_base.c338
-rw-r--r--drivers/mtd/nand/nand_bbt.c250
-rw-r--r--drivers/mtd/nand/nand_ids.c31
-rw-r--r--drivers/mtd/nand/nandsim.c19
-rw-r--r--drivers/mtd/nand/ndfc.c8
-rw-r--r--drivers/mtd/nand/omap2.c4
-rw-r--r--drivers/mtd/nand/pxa3xx_nand.c348
-rw-r--r--drivers/mtd/nand/r852.c30
-rw-r--r--drivers/mtd/nand/r852.h2
-rw-r--r--drivers/mtd/ofpart.c6
-rw-r--r--drivers/mtd/onenand/Kconfig7
-rw-r--r--drivers/mtd/onenand/onenand_base.c7
-rw-r--r--drivers/mtd/onenand/samsung.c133
-rw-r--r--drivers/mtd/sm_ftl.h8
-rw-r--r--drivers/mtd/ubi/io.c37
-rw-r--r--drivers/mtd/ubi/scan.c20
-rw-r--r--drivers/net/3c59x.c6
-rw-r--r--drivers/net/8139cp.c10
-rw-r--r--drivers/net/Kconfig60
-rw-r--r--drivers/net/Makefile3
-rw-r--r--drivers/net/atarilance.c2
-rw-r--r--drivers/net/atl1c/atl1c.h2
-rw-r--r--drivers/net/atl1c/atl1c_hw.c2
-rw-r--r--drivers/net/atl1c/atl1c_main.c6
-rw-r--r--drivers/net/atlx/atl1.c13
-rw-r--r--drivers/net/atlx/atl1.h9
-rw-r--r--drivers/net/atlx/atlx.c4
-rw-r--r--drivers/net/au1000_eth.c10
-rw-r--r--drivers/net/benet/be_cmds.c36
-rw-r--r--drivers/net/benet/be_cmds.h2
-rw-r--r--drivers/net/benet/be_main.c55
-rw-r--r--drivers/net/bfin_mac.c145
-rw-r--r--drivers/net/bfin_mac.h2
-rw-r--r--drivers/net/bnx2x/bnx2x.h9
-rw-r--r--drivers/net/bnx2x/bnx2x_cmn.c5
-rw-r--r--drivers/net/bnx2x/bnx2x_cmn.h55
-rw-r--r--drivers/net/bnx2x/bnx2x_hsi.h9
-rw-r--r--drivers/net/bnx2x/bnx2x_init_ops.h34
-rw-r--r--drivers/net/bnx2x/bnx2x_link.c194
-rw-r--r--drivers/net/bnx2x/bnx2x_link.h15
-rw-r--r--drivers/net/bnx2x/bnx2x_main.c57
-rw-r--r--drivers/net/bonding/bond_main.c6
-rw-r--r--drivers/net/caif/Kconfig7
-rw-r--r--drivers/net/caif/Makefile4
-rw-r--r--drivers/net/caif/caif_shm_u5500.c129
-rw-r--r--drivers/net/caif/caif_shmcore.c744
-rw-r--r--drivers/net/caif/caif_spi.c61
-rw-r--r--drivers/net/caif/caif_spi_slave.c13
-rw-r--r--drivers/net/can/Kconfig8
-rw-r--r--drivers/net/can/Makefile1
-rw-r--r--drivers/net/can/at91_can.c97
-rw-r--r--drivers/net/can/flexcan.c3
-rw-r--r--drivers/net/can/mcp251x.c3
-rw-r--r--drivers/net/can/pch_can.c1463
-rw-r--r--drivers/net/can/sja1000/Kconfig12
-rw-r--r--drivers/net/can/sja1000/Makefile1
-rw-r--r--drivers/net/can/sja1000/tscan1.c216
-rw-r--r--drivers/net/cxgb3/cxgb3_main.c9
-rw-r--r--drivers/net/cxgb3/sge.c4
-rw-r--r--drivers/net/cxgb4/cxgb4.h1
-rw-r--r--drivers/net/cxgb4/cxgb4_main.c32
-rw-r--r--drivers/net/cxgb4/sge.c23
-rw-r--r--drivers/net/cxgb4vf/cxgb4vf_main.c116
-rw-r--r--drivers/net/cxgb4vf/sge.c122
-rw-r--r--drivers/net/cxgb4vf/t4vf_common.h1
-rw-r--r--drivers/net/cxgb4vf/t4vf_hw.c113
-rw-r--r--drivers/net/davinci_cpdma.c965
-rw-r--r--drivers/net/davinci_cpdma.h108
-rw-r--r--drivers/net/davinci_emac.c1338
-rw-r--r--drivers/net/davinci_mdio.c475
-rw-r--r--drivers/net/e1000/e1000_main.c14
-rw-r--r--drivers/net/e1000e/82571.c38
-rw-r--r--drivers/net/e1000e/e1000.h3
-rw-r--r--drivers/net/e1000e/netdev.c29
-rw-r--r--drivers/net/ehea/ehea.h2
-rw-r--r--drivers/net/ehea/ehea_main.c60
-rw-r--r--drivers/net/gianfar.c13
-rw-r--r--drivers/net/gianfar_ethtool.c5
-rw-r--r--drivers/net/ibm_newemac/core.c1
-rw-r--r--drivers/net/igb/igb_main.c1
-rw-r--r--drivers/net/igbvf/netdev.c8
-rw-r--r--drivers/net/ipg.c6
-rw-r--r--drivers/net/irda/sh_sir.c2
-rw-r--r--drivers/net/ixgb/ixgb_main.c1
-rw-r--r--drivers/net/ixgbe/ixgbe_dcb.c39
-rw-r--r--drivers/net/ixgbe/ixgbe_dcb.h5
-rw-r--r--drivers/net/ixgbe/ixgbe_dcb_82599.c5
-rw-r--r--drivers/net/ixgbe/ixgbe_dcb_82599.h3
-rw-r--r--drivers/net/ixgbe/ixgbe_main.c72
-rw-r--r--drivers/net/jme.c49
-rw-r--r--drivers/net/lib8390.c1
-rw-r--r--drivers/net/macb.c27
-rw-r--r--drivers/net/mlx4/en_main.c15
-rw-r--r--drivers/net/mlx4/en_netdev.c10
-rw-r--r--drivers/net/mlx4/en_port.c4
-rw-r--r--drivers/net/mlx4/en_port.h3
-rw-r--r--drivers/net/mlx4/fw.c7
-rw-r--r--drivers/net/mlx4/icm.c28
-rw-r--r--drivers/net/mlx4/icm.h2
-rw-r--r--drivers/net/mlx4/intf.c21
-rw-r--r--drivers/net/mlx4/main.c4
-rw-r--r--drivers/net/mlx4/mlx4_en.h1
-rw-r--r--drivers/net/mlx4/port.c30
-rw-r--r--drivers/net/netxen/netxen_nic_ctx.c15
-rw-r--r--drivers/net/netxen/netxen_nic_main.c11
-rw-r--r--drivers/net/pch_gbe/pch_gbe_main.c6
-rw-r--r--drivers/net/pch_gbe/pch_gbe_param.c8
-rw-r--r--drivers/net/pcmcia/axnet_cs.c30
-rw-r--r--drivers/net/pcmcia/pcnet_cs.c1
-rw-r--r--drivers/net/phy/marvell.c182
-rw-r--r--drivers/net/phy/phy.c13
-rw-r--r--drivers/net/phy/phy_device.c19
-rw-r--r--drivers/net/ppp_generic.c43
-rw-r--r--drivers/net/qlcnic/qlcnic.h7
-rw-r--r--drivers/net/qlcnic/qlcnic_ethtool.c23
-rw-r--r--drivers/net/qlcnic/qlcnic_main.c20
-rw-r--r--drivers/net/qlge/qlge.h12
-rw-r--r--drivers/net/qlge/qlge_main.c30
-rw-r--r--drivers/net/qlge/qlge_mpi.c6
-rw-r--r--drivers/net/r8169.c12
-rw-r--r--drivers/net/sb1000.c6
-rw-r--r--drivers/net/sgiseeq.c2
-rw-r--r--drivers/net/skge.c1
-rw-r--r--drivers/net/slhc.c15
-rw-r--r--drivers/net/smsc911x.c3
-rw-r--r--drivers/net/smsc911x.h13
-rw-r--r--drivers/net/stmmac/stmmac_main.c40
-rw-r--r--drivers/net/tg3.c10
-rw-r--r--drivers/net/tile/Makefile10
-rw-r--r--drivers/net/tile/tilepro.c2406
-rw-r--r--drivers/net/tokenring/tms380tr.c2
-rw-r--r--drivers/net/tulip/de2104x.c1
-rw-r--r--drivers/net/typhoon.c92
-rw-r--r--drivers/net/ucc_geth.c25
-rw-r--r--drivers/net/ucc_geth.h3
-rw-r--r--drivers/net/usb/hso.c10
-rw-r--r--drivers/net/usb/usbnet.c11
-rw-r--r--drivers/net/virtio_net.c12
-rw-r--r--drivers/net/vmxnet3/upt1_defs.h8
-rw-r--r--drivers/net/vmxnet3/vmxnet3_defs.h6
-rw-r--r--drivers/net/vmxnet3/vmxnet3_drv.c24
-rw-r--r--drivers/net/vmxnet3/vmxnet3_ethtool.c14
-rw-r--r--drivers/net/vmxnet3/vmxnet3_int.h27
-rw-r--r--drivers/net/vxge/vxge-config.c332
-rw-r--r--drivers/net/vxge/vxge-config.h227
-rw-r--r--drivers/net/vxge/vxge-ethtool.c2
-rw-r--r--drivers/net/vxge/vxge-main.c64
-rw-r--r--drivers/net/vxge/vxge-main.h59
-rw-r--r--drivers/net/vxge/vxge-traffic.c101
-rw-r--r--drivers/net/vxge/vxge-traffic.h134
-rw-r--r--drivers/net/wan/x25_asy.c13
-rw-r--r--drivers/net/wireless/ath/ath5k/attach.c17
-rw-r--r--drivers/net/wireless/ath/ath5k/base.c1
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9002_hw.c3
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_2p2_initvals.h191
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_paprd.c14
-rw-r--r--drivers/net/wireless/ath/ath9k/ath9k.h3
-rw-r--r--drivers/net/wireless/ath/ath9k/beacon.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/eeprom_9287.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/hif_usb.c50
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_init.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_txrx.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/hw.c15
-rw-r--r--drivers/net/wireless/ath/ath9k/hw.h1
-rw-r--r--drivers/net/wireless/ath/ath9k/init.c8
-rw-r--r--drivers/net/wireless/ath/ath9k/main.c67
-rw-r--r--drivers/net/wireless/ath/ath9k/rc.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/recv.c21
-rw-r--r--drivers/net/wireless/ath/ath9k/reg.h9
-rw-r--r--drivers/net/wireless/ath/ath9k/xmit.c26
-rw-r--r--drivers/net/wireless/ath/carl9170/cmd.h51
-rw-r--r--drivers/net/wireless/ath/carl9170/main.c4
-rw-r--r--drivers/net/wireless/ath/carl9170/usb.c33
-rw-r--r--drivers/net/wireless/b43/phy_n.c2
-rw-r--r--drivers/net/wireless/b43/sdio.c3
-rw-r--r--drivers/net/wireless/ipw2x00/libipw_module.c9
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-tx.c3
-rw-r--r--drivers/net/wireless/iwlwifi/iwl3945-base.c3
-rw-r--r--drivers/net/wireless/libertas/cfg.c5
-rw-r--r--drivers/net/wireless/libertas/dev.h1
-rw-r--r--drivers/net/wireless/libertas/if_sdio.c32
-rw-r--r--drivers/net/wireless/libertas/main.c7
-rw-r--r--drivers/net/wireless/orinoco/orinoco_usb.c1
-rw-r--r--drivers/net/wireless/rt2x00/Kconfig3
-rw-r--r--drivers/net/wireless/wl1251/Makefile8
-rw-r--r--drivers/net/xen-netfront.c2
-rw-r--r--drivers/net/xilinx_emaclite.c8
-rw-r--r--drivers/of/Kconfig5
-rw-r--r--drivers/of/Makefile1
-rw-r--r--drivers/of/address.c2
-rw-r--r--drivers/of/base.c4
-rw-r--r--drivers/of/device.c27
-rw-r--r--drivers/of/fdt.c2
-rw-r--r--drivers/of/irq.c39
-rw-r--r--drivers/of/of_i2c.c1
-rw-r--r--drivers/of/pdt.c276
-rw-r--r--drivers/of/platform.c34
-rw-r--r--drivers/oprofile/buffer_sync.c2
-rw-r--r--drivers/oprofile/cpu_buffer.c10
-rw-r--r--drivers/oprofile/cpu_buffer.h1
-rw-r--r--drivers/oprofile/oprofilefs.c9
-rw-r--r--drivers/oprofile/timer_int.c13
-rw-r--r--drivers/parisc/dino.c28
-rw-r--r--drivers/parisc/eisa.c28
-rw-r--r--drivers/parisc/eisa_eeprom.c1
-rw-r--r--drivers/parisc/gsc.c35
-rw-r--r--drivers/parisc/iosapic.c56
-rw-r--r--drivers/parisc/led.c23
-rw-r--r--drivers/parisc/superio.c26
-rw-r--r--drivers/pci/Kconfig21
-rw-r--r--drivers/pci/Makefile7
-rw-r--r--drivers/pci/bus.c82
-rw-r--r--drivers/pci/hotplug/ibmphp_ebda.c6
-rw-r--r--drivers/pci/hotplug/ibmphp_hpc.c4
-rw-r--r--drivers/pci/msi.c14
-rw-r--r--drivers/pci/msi.h4
-rw-r--r--drivers/pci/pci-sysfs.c23
-rw-r--r--drivers/pci/pci.c91
-rw-r--r--drivers/pci/pci.h10
-rw-r--r--drivers/pci/pcie/aer/aerdrv.c2
-rw-r--r--drivers/pci/pcie/aer/aerdrv.h3
-rw-r--r--drivers/pci/pcie/aer/aerdrv_acpi.c34
-rw-r--r--drivers/pci/pcie/aer/aerdrv_core.c2
-rw-r--r--drivers/pci/pcie/portdrv_acpi.c2
-rw-r--r--drivers/pci/probe.c4
-rw-r--r--drivers/pci/proc.c7
-rw-r--r--drivers/pci/quirks.c49
-rw-r--r--drivers/pci/setup-res.c2
-rw-r--r--drivers/pci/xen-pcifront.c1148
-rw-r--r--drivers/pcmcia/pd6729.c8
-rw-r--r--drivers/pcmcia/pd6729.h2
-rw-r--r--drivers/pcmcia/pxa2xx_sharpsl.c2
-rw-r--r--drivers/pcmcia/sa1100_assabet.c2
-rw-r--r--drivers/pcmcia/sa1100_cerf.c2
-rw-r--r--drivers/pcmcia/sa1100_generic.c2
-rw-r--r--drivers/pcmcia/sa1100_h3600.c2
-rw-r--r--drivers/pcmcia/sa1100_shannon.c2
-rw-r--r--drivers/pcmcia/sa1100_simpad.c2
-rw-r--r--drivers/pcmcia/soc_common.c10
-rw-r--r--drivers/platform/x86/Kconfig53
-rw-r--r--drivers/platform/x86/Makefile6
-rw-r--r--drivers/platform/x86/acer-wmi.c2
-rw-r--r--drivers/platform/x86/asus-laptop.c266
-rw-r--r--drivers/platform/x86/dell-laptop.c77
-rw-r--r--drivers/platform/x86/dell-wmi.c256
-rw-r--r--drivers/platform/x86/eeepc-laptop.c16
-rw-r--r--drivers/platform/x86/eeepc-wmi.c56
-rw-r--r--drivers/platform/x86/hdaps.c (renamed from drivers/hwmon/hdaps.c)2
-rw-r--r--drivers/platform/x86/hp-wmi.c174
-rw-r--r--drivers/platform/x86/ibm_rtl.c323
-rw-r--r--drivers/platform/x86/ideapad-laptop.c (renamed from drivers/platform/x86/ideapad_acpi.c)238
-rw-r--r--drivers/platform/x86/intel_pmic_gpio.c27
-rw-r--r--drivers/platform/x86/intel_scu_ipc.c1
-rw-r--r--drivers/platform/x86/msi-wmi.c16
-rw-r--r--drivers/platform/x86/panasonic-laptop.c194
-rw-r--r--drivers/platform/x86/thinkpad_acpi.c1
-rw-r--r--drivers/platform/x86/topstar-laptop.c161
-rw-r--r--drivers/platform/x86/toshiba_acpi.c192
-rw-r--r--drivers/platform/x86/wmi.c308
-rw-r--r--drivers/platform/x86/xo1-rfkill.c85
-rw-r--r--drivers/pnp/base.h5
-rw-r--r--drivers/pnp/core.c8
-rw-r--r--drivers/pnp/driver.c2
-rw-r--r--drivers/pnp/isapnp/proc.c1
-rw-r--r--drivers/pnp/pnpacpi/core.c31
-rw-r--r--drivers/pnp/resource.c10
-rw-r--r--drivers/power/Kconfig23
-rw-r--r--drivers/power/Makefile19
-rw-r--r--drivers/power/bq20z75.c493
-rw-r--r--drivers/power/bq27x00_battery.c1
-rw-r--r--drivers/power/ds2760_battery.c1
-rw-r--r--drivers/power/ds2782_battery.c12
-rw-r--r--drivers/power/isp1704_charger.c369
-rw-r--r--drivers/power/jz4740-battery.c1
-rw-r--r--drivers/power/olpc_battery.c8
-rw-r--r--drivers/power/pcf50633-charger.c1
-rw-r--r--drivers/power/power_supply_sysfs.c4
-rw-r--r--drivers/power/twl4030_charger.c565
-rw-r--r--drivers/power/wm831x_power.c2
-rw-r--r--drivers/rapidio/rio-driver.c2
-rw-r--r--drivers/rapidio/rio-scan.c187
-rw-r--r--drivers/rapidio/rio-sysfs.c26
-rw-r--r--drivers/rapidio/rio.c334
-rw-r--r--drivers/rapidio/rio.h5
-rw-r--r--drivers/rapidio/switches/Kconfig7
-rw-r--r--drivers/rapidio/switches/Makefile1
-rw-r--r--drivers/rapidio/switches/idt_gen2.c447
-rw-r--r--drivers/rapidio/switches/idtcps.c10
-rw-r--r--drivers/rapidio/switches/tsi568.c15
-rw-r--r--drivers/rapidio/switches/tsi57x.c7
-rw-r--r--drivers/regulator/Kconfig15
-rw-r--r--drivers/regulator/Makefile5
-rw-r--r--drivers/regulator/ab8500.c86
-rw-r--r--drivers/regulator/core.c87
-rw-r--r--drivers/regulator/dummy.h4
-rw-r--r--drivers/regulator/lp3972.c660
-rw-r--r--drivers/regulator/max8952.c366
-rw-r--r--drivers/regulator/max8998.c270
-rw-r--r--drivers/regulator/mc13783-regulator.c4
-rw-r--r--drivers/regulator/twl-regulator.c6
-rw-r--r--drivers/rtc/Kconfig43
-rw-r--r--drivers/rtc/Makefile4
-rw-r--r--drivers/rtc/class.c4
-rw-r--r--drivers/rtc/rtc-ab8500.c103
-rw-r--r--drivers/rtc/rtc-bfin.c43
-rw-r--r--drivers/rtc/rtc-ds1302.c2
-rw-r--r--drivers/rtc/rtc-ds3232.c181
-rw-r--r--drivers/rtc/rtc-jz4740.c45
-rw-r--r--drivers/rtc/rtc-lpc32xx.c414
-rw-r--r--drivers/rtc/rtc-max8998.c300
-rw-r--r--drivers/rtc/rtc-mc13783.c428
-rw-r--r--drivers/rtc/rtc-mc13xxx.c437
-rw-r--r--drivers/rtc/rtc-omap.c12
-rw-r--r--drivers/rtc/rtc-rs5c313.c34
-rw-r--r--drivers/rtc/rtc-s3c.c92
-rw-r--r--drivers/rtc/rtc-sh.c4
-rw-r--r--drivers/s390/block/dasd.c24
-rw-r--r--drivers/s390/block/dasd_3990_erp.c3
-rw-r--r--drivers/s390/block/dasd_diag.c19
-rw-r--r--drivers/s390/block/dasd_diag.h4
-rw-r--r--drivers/s390/block/dasd_eckd.c137
-rw-r--r--drivers/s390/block/dasd_eckd.h1
-rw-r--r--drivers/s390/block/dasd_eer.c1
-rw-r--r--drivers/s390/block/dasd_proc.c1
-rw-r--r--drivers/s390/char/fs3270.c1
-rw-r--r--drivers/s390/char/sclp.c14
-rw-r--r--drivers/s390/char/tape_char.c1
-rw-r--r--drivers/s390/char/tape_core.c77
-rw-r--r--drivers/s390/char/tape_std.c4
-rw-r--r--drivers/s390/char/vmlogrdr.c41
-rw-r--r--drivers/s390/char/vmur.c1
-rw-r--r--drivers/s390/cio/blacklist.c10
-rw-r--r--drivers/s390/cio/chp.c41
-rw-r--r--drivers/s390/cio/chp.h12
-rw-r--r--drivers/s390/cio/chsc.c291
-rw-r--r--drivers/s390/cio/chsc.h28
-rw-r--r--drivers/s390/cio/chsc_sch.c12
-rw-r--r--drivers/s390/cio/css.c52
-rw-r--r--drivers/s390/cio/device.c29
-rw-r--r--drivers/s390/cio/device_fsm.c41
-rw-r--r--drivers/s390/cio/device_pgid.c23
-rw-r--r--drivers/s390/cio/io_sch.h7
-rw-r--r--drivers/s390/cio/qdio_thinint.c2
-rw-r--r--drivers/s390/crypto/ap_bus.c9
-rw-r--r--drivers/s390/crypto/zcrypt_api.c1
-rw-r--r--drivers/s390/kvm/kvm_virtio.c9
-rw-r--r--drivers/s390/net/qeth_core.h9
-rw-r--r--drivers/s390/net/qeth_core_main.c55
-rw-r--r--drivers/s390/scsi/zfcp_fc.h2
-rw-r--r--drivers/s390/scsi/zfcp_fsf.c3
-rw-r--r--drivers/s390/scsi/zfcp_scsi.c4
-rw-r--r--drivers/s390/scsi/zfcp_unit.c4
-rw-r--r--drivers/sbus/char/jsflash.c2
-rw-r--r--drivers/scsi/3w-9xxx.c4
-rw-r--r--drivers/scsi/3w-sas.c4
-rw-r--r--drivers/scsi/3w-xxxx.c4
-rw-r--r--drivers/scsi/53c700.c8
-rw-r--r--drivers/scsi/BusLogic.c3
-rw-r--r--drivers/scsi/BusLogic.h2
-rw-r--r--drivers/scsi/NCR5380.c3
-rw-r--r--drivers/scsi/NCR5380.h2
-rw-r--r--drivers/scsi/NCR53c406a.c4
-rw-r--r--drivers/scsi/a100u2w.c4
-rw-r--r--drivers/scsi/aacraid/linit.c4
-rw-r--r--drivers/scsi/advansys.c4
-rw-r--r--drivers/scsi/aha152x.c4
-rw-r--r--drivers/scsi/aha1542.c4
-rw-r--r--drivers/scsi/aha1542.h2
-rw-r--r--drivers/scsi/aha1740.c4
-rw-r--r--drivers/scsi/aic7xxx/aic79xx_osm.c4
-rw-r--r--drivers/scsi/aic7xxx/aic7xxx_osm.c4
-rw-r--r--drivers/scsi/aic7xxx_old.c4
-rw-r--r--drivers/scsi/arcmsr/arcmsr_hba.c7
-rw-r--r--drivers/scsi/arm/acornscsi.c4
-rw-r--r--drivers/scsi/arm/fas216.c10
-rw-r--r--drivers/scsi/arm/fas216.h18
-rw-r--r--drivers/scsi/atari_NCR5380.c4
-rw-r--r--drivers/scsi/atari_scsi.c17
-rw-r--r--drivers/scsi/atp870u.c4
-rw-r--r--drivers/scsi/bfa/bfa.h48
-rw-r--r--drivers/scsi/bfa/bfa_cb_ioim.h22
-rw-r--r--drivers/scsi/bfa/bfa_core.c178
-rw-r--r--drivers/scsi/bfa/bfa_cs.h24
-rw-r--r--drivers/scsi/bfa/bfa_defs.h54
-rw-r--r--drivers/scsi/bfa/bfa_defs_fcs.h48
-rw-r--r--drivers/scsi/bfa/bfa_defs_svc.h82
-rw-r--r--drivers/scsi/bfa/bfa_drv.c6
-rw-r--r--drivers/scsi/bfa/bfa_fc.h30
-rw-r--r--drivers/scsi/bfa/bfa_fcbuild.c196
-rw-r--r--drivers/scsi/bfa/bfa_fcpim.c351
-rw-r--r--drivers/scsi/bfa/bfa_fcpim.h18
-rw-r--r--drivers/scsi/bfa/bfa_fcs.c335
-rw-r--r--drivers/scsi/bfa/bfa_fcs.h43
-rw-r--r--drivers/scsi/bfa/bfa_fcs_fcpim.c34
-rw-r--r--drivers/scsi/bfa/bfa_fcs_lport.c468
-rw-r--r--drivers/scsi/bfa/bfa_fcs_rport.c198
-rw-r--r--drivers/scsi/bfa/bfa_hw_cb.c16
-rw-r--r--drivers/scsi/bfa/bfa_hw_ct.c22
-rw-r--r--drivers/scsi/bfa/bfa_ioc.c397
-rw-r--r--drivers/scsi/bfa/bfa_ioc.h108
-rw-r--r--drivers/scsi/bfa/bfa_ioc_cb.c90
-rw-r--r--drivers/scsi/bfa/bfa_ioc_ct.c167
-rw-r--r--drivers/scsi/bfa/bfa_modules.h6
-rw-r--r--drivers/scsi/bfa/bfa_os_inc.h82
-rw-r--r--drivers/scsi/bfa/bfa_port.c40
-rw-r--r--drivers/scsi/bfa/bfa_svc.c504
-rw-r--r--drivers/scsi/bfa/bfa_svc.h41
-rw-r--r--drivers/scsi/bfa/bfad.c73
-rw-r--r--drivers/scsi/bfa/bfad_attr.c38
-rw-r--r--drivers/scsi/bfa/bfad_debugfs.c4
-rw-r--r--drivers/scsi/bfa/bfad_drv.h5
-rw-r--r--drivers/scsi/bfa/bfad_im.c78
-rw-r--r--drivers/scsi/bfa/bfi.h58
-rw-r--r--drivers/scsi/bfa/bfi_ms.h50
-rw-r--r--drivers/scsi/cxgbi/cxgb4i/cxgb4i.c3
-rw-r--r--drivers/scsi/dc395x.c3
-rw-r--r--drivers/scsi/device_handler/scsi_dh_rdac.c2
-rw-r--r--drivers/scsi/dpt_i2o.c4
-rw-r--r--drivers/scsi/dpti.h2
-rw-r--r--drivers/scsi/dtc.h2
-rw-r--r--drivers/scsi/eata.c7
-rw-r--r--drivers/scsi/eata_pio.c4
-rw-r--r--drivers/scsi/esp_scsi.c4
-rw-r--r--drivers/scsi/fcoe/fcoe.c18
-rw-r--r--drivers/scsi/fcoe/libfcoe.c2
-rw-r--r--drivers/scsi/fd_mcs.c4
-rw-r--r--drivers/scsi/fdomain.c4
-rw-r--r--drivers/scsi/fnic/fnic.h2
-rw-r--r--drivers/scsi/fnic/fnic_scsi.c4
-rw-r--r--drivers/scsi/g_NCR5380.h2
-rw-r--r--drivers/scsi/gdth.c14
-rw-r--r--drivers/scsi/hpsa.c8
-rw-r--r--drivers/scsi/hptiop.c4
-rw-r--r--drivers/scsi/ibmmca.c6
-rw-r--r--drivers/scsi/ibmvscsi/ibmvfc.c4
-rw-r--r--drivers/scsi/ibmvscsi/ibmvscsi.c4
-rw-r--r--drivers/scsi/imm.c4
-rw-r--r--drivers/scsi/in2000.c4
-rw-r--r--drivers/scsi/in2000.h2
-rw-r--r--drivers/scsi/initio.c4
-rw-r--r--drivers/scsi/ipr.c6
-rw-r--r--drivers/scsi/ipr.h1
-rw-r--r--drivers/scsi/ips.c6
-rw-r--r--drivers/scsi/libfc/fc_disc.c5
-rw-r--r--drivers/scsi/libfc/fc_fcp.c28
-rw-r--r--drivers/scsi/libfc/fc_lport.c12
-rw-r--r--drivers/scsi/libfc/fc_rport.c4
-rw-r--r--drivers/scsi/libiscsi.c4
-rw-r--r--drivers/scsi/libsas/sas_scsi_host.c4
-rw-r--r--drivers/scsi/lpfc/lpfc.h12
-rw-r--r--drivers/scsi/lpfc/lpfc_attr.c7
-rw-r--r--drivers/scsi/lpfc/lpfc_bsg.c4
-rw-r--r--drivers/scsi/lpfc/lpfc_crtn.h4
-rw-r--r--drivers/scsi/lpfc/lpfc_els.c439
-rw-r--r--drivers/scsi/lpfc/lpfc_hbadisc.c437
-rw-r--r--drivers/scsi/lpfc/lpfc_hw.h41
-rw-r--r--drivers/scsi/lpfc/lpfc_hw4.h167
-rw-r--r--drivers/scsi/lpfc/lpfc_init.c102
-rw-r--r--drivers/scsi/lpfc/lpfc_mbox.c28
-rw-r--r--drivers/scsi/lpfc/lpfc_scsi.c13
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.c458
-rw-r--r--drivers/scsi/lpfc/lpfc_sli4.h9
-rw-r--r--drivers/scsi/lpfc/lpfc_version.h2
-rw-r--r--drivers/scsi/mac53c94.c4
-rw-r--r--drivers/scsi/megaraid.c6
-rw-r--r--drivers/scsi/megaraid.h2
-rw-r--r--drivers/scsi/megaraid/megaraid_mbox.c7
-rw-r--r--drivers/scsi/megaraid/megaraid_sas.c130
-rw-r--r--drivers/scsi/megaraid/megaraid_sas.h7
-rw-r--r--drivers/scsi/mesh.c4
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_scsih.c4
-rw-r--r--drivers/scsi/ncr53c8xx.c4
-rw-r--r--drivers/scsi/nsp32.c7
-rw-r--r--drivers/scsi/osd/osd_initiator.c244
-rw-r--r--drivers/scsi/pas16.h2
-rw-r--r--drivers/scsi/pcmcia/nsp_cs.c4
-rw-r--r--drivers/scsi/pcmcia/nsp_cs.h3
-rw-r--r--drivers/scsi/pcmcia/sym53c500_cs.c4
-rw-r--r--drivers/scsi/pm8001/pm8001_sas.h1
-rw-r--r--drivers/scsi/pmcraid.c133
-rw-r--r--drivers/scsi/pmcraid.h23
-rw-r--r--drivers/scsi/ppa.c4
-rw-r--r--drivers/scsi/ps3rom.c4
-rw-r--r--drivers/scsi/qla1280.c4
-rw-r--r--drivers/scsi/qla2xxx/qla_attr.c4
-rw-r--r--drivers/scsi/qla2xxx/qla_bsg.c125
-rw-r--r--drivers/scsi/qla2xxx/qla_bsg.h2
-rw-r--r--drivers/scsi/qla2xxx/qla_def.h2
-rw-r--r--drivers/scsi/qla2xxx/qla_gbl.h1
-rw-r--r--drivers/scsi/qla2xxx/qla_init.c28
-rw-r--r--drivers/scsi/qla2xxx/qla_iocb.c4
-rw-r--r--drivers/scsi/qla2xxx/qla_isr.c21
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c214
-rw-r--r--drivers/scsi/qla4xxx/ql4_dbg.c101
-rw-r--r--drivers/scsi/qla4xxx/ql4_def.h20
-rw-r--r--drivers/scsi/qla4xxx/ql4_fw.h3
-rw-r--r--drivers/scsi/qla4xxx/ql4_glbl.h1
-rw-r--r--drivers/scsi/qla4xxx/ql4_init.c10
-rw-r--r--drivers/scsi/qla4xxx/ql4_iocb.c10
-rw-r--r--drivers/scsi/qla4xxx/ql4_isr.c16
-rw-r--r--drivers/scsi/qla4xxx/ql4_mbx.c11
-rw-r--r--drivers/scsi/qla4xxx/ql4_nx.c89
-rw-r--r--drivers/scsi/qla4xxx/ql4_nx.h5
-rw-r--r--drivers/scsi/qla4xxx/ql4_os.c116
-rw-r--r--drivers/scsi/qla4xxx/ql4_version.h2
-rw-r--r--drivers/scsi/qlogicfas408.c4
-rw-r--r--drivers/scsi/qlogicfas408.h3
-rw-r--r--drivers/scsi/qlogicpti.c4
-rw-r--r--drivers/scsi/scsi.c18
-rw-r--r--drivers/scsi/scsi_debug.c4
-rw-r--r--drivers/scsi/scsi_error.c24
-rw-r--r--drivers/scsi/scsi_lib.c3
-rw-r--r--drivers/scsi/scsi_sysfs.c3
-rw-r--r--drivers/scsi/sd.c24
-rw-r--r--drivers/scsi/sr_ioctl.c9
-rw-r--r--drivers/scsi/stex.c4
-rw-r--r--drivers/scsi/sun3_NCR5380.c4
-rw-r--r--drivers/scsi/sun3_scsi.h3
-rw-r--r--drivers/scsi/sym53c416.c4
-rw-r--r--drivers/scsi/sym53c416.h2
-rw-r--r--drivers/scsi/sym53c8xx_2/sym_glue.c4
-rw-r--r--drivers/scsi/t128.h3
-rw-r--r--drivers/scsi/tmscsim.c4
-rw-r--r--drivers/scsi/u14-34f.c6
-rw-r--r--drivers/scsi/ultrastor.c4
-rw-r--r--drivers/scsi/ultrastor.h3
-rw-r--r--drivers/scsi/vmw_pvscsi.c4
-rw-r--r--drivers/scsi/wd33c93.c6
-rw-r--r--drivers/scsi/wd33c93.h3
-rw-r--r--drivers/scsi/wd7000.c4
-rw-r--r--drivers/serial/68328serial.h5
-rw-r--r--drivers/serial/8250.c2
-rw-r--r--drivers/serial/8250_pci.c5
-rw-r--r--drivers/serial/Kconfig27
-rw-r--r--drivers/serial/Makefile1
-rw-r--r--drivers/serial/bfin_5xx.c31
-rw-r--r--drivers/serial/crisv10.c24
-rw-r--r--drivers/serial/kgdboc.c59
-rw-r--r--drivers/serial/mfd.c24
-rw-r--r--drivers/serial/of_serial.c12
-rw-r--r--drivers/serial/omap-serial.c1333
-rw-r--r--drivers/serial/serial_core.c1
-rw-r--r--drivers/serial/sh-sci.h17
-rw-r--r--drivers/sh/Kconfig25
-rw-r--r--drivers/sh/Makefile7
-rw-r--r--drivers/sh/clk/Makefile3
-rw-r--r--drivers/sh/clk/core.c (renamed from drivers/sh/clk.c)311
-rw-r--r--drivers/sh/clk/cpg.c (renamed from drivers/sh/clk-cpg.c)18
-rw-r--r--drivers/sh/intc.c1390
-rw-r--r--drivers/sh/intc/Kconfig35
-rw-r--r--drivers/sh/intc/Makefile5
-rw-r--r--drivers/sh/intc/access.c237
-rw-r--r--drivers/sh/intc/balancing.c97
-rw-r--r--drivers/sh/intc/chip.c222
-rw-r--r--drivers/sh/intc/core.c482
-rw-r--r--drivers/sh/intc/dynamic.c64
-rw-r--r--drivers/sh/intc/handle.c307
-rw-r--r--drivers/sh/intc/internals.h186
-rw-r--r--drivers/sh/intc/userimask.c83
-rw-r--r--drivers/sh/intc/virq-debugfs.c64
-rw-r--r--drivers/sh/intc/virq.c257
-rw-r--r--drivers/sh/maple/maple.c20
-rw-r--r--drivers/sh/pfc.c17
-rw-r--r--drivers/spi/Kconfig7
-rw-r--r--drivers/spi/Makefile1
-rw-r--r--drivers/spi/atmel_spi.c6
-rw-r--r--drivers/spi/spi.c98
-rw-r--r--drivers/spi/spi_bfin5xx.c22
-rw-r--r--drivers/spi/spi_tegra.c618
-rw-r--r--drivers/ssb/b43_pci_bridge.c1
-rw-r--r--drivers/staging/Kconfig36
-rw-r--r--drivers/staging/Makefile19
-rw-r--r--drivers/staging/adis16255/adis16255.c4
-rw-r--r--drivers/staging/asus_oled/asus_oled.c8
-rw-r--r--drivers/staging/ath6kl/Kconfig163
-rw-r--r--drivers/staging/ath6kl/Makefile159
-rw-r--r--drivers/staging/ath6kl/TODO8
-rw-r--r--drivers/staging/ath6kl/bmi/include/bmi_internal.h55
-rw-r--r--drivers/staging/ath6kl/bmi/src/bmi.c1010
-rw-r--r--drivers/staging/ath6kl/hif/common/hif_sdio_common.h87
-rw-r--r--drivers/staging/ath6kl/hif/sdio/linux_sdio/include/hif_internal.h134
-rw-r--r--drivers/staging/ath6kl/hif/sdio/linux_sdio/src/hif.c1298
-rw-r--r--drivers/staging/ath6kl/hif/sdio/linux_sdio/src/hif_scatter.c393
-rw-r--r--drivers/staging/ath6kl/htc2/AR6000/ar6k.c1471
-rw-r--r--drivers/staging/ath6kl/htc2/AR6000/ar6k.h398
-rw-r--r--drivers/staging/ath6kl/htc2/AR6000/ar6k_events.c784
-rw-r--r--drivers/staging/ath6kl/htc2/AR6000/ar6k_gmbox.c756
-rw-r--r--drivers/staging/ath6kl/htc2/AR6000/ar6k_gmbox_hciuart.c1280
-rw-r--r--drivers/staging/ath6kl/htc2/htc.c579
-rw-r--r--drivers/staging/ath6kl/htc2/htc_debug.h38
-rw-r--r--drivers/staging/ath6kl/htc2/htc_internal.h220
-rw-r--r--drivers/staging/ath6kl/htc2/htc_recv.c1578
-rw-r--r--drivers/staging/ath6kl/htc2/htc_send.c1023
-rw-r--r--drivers/staging/ath6kl/htc2/htc_services.c450
-rw-r--r--drivers/staging/ath6kl/include/a_config.h53
-rw-r--r--drivers/staging/ath6kl/include/a_debug.h224
-rw-r--r--drivers/staging/ath6kl/include/a_drv.h54
-rw-r--r--drivers/staging/ath6kl/include/a_drv_api.h232
-rw-r--r--drivers/staging/ath6kl/include/a_osapi.h61
-rw-r--r--drivers/staging/ath6kl/include/a_types.h58
-rw-r--r--drivers/staging/ath6kl/include/aggr_recv_api.h140
-rw-r--r--drivers/staging/ath6kl/include/ar3kconfig.h65
-rw-r--r--drivers/staging/ath6kl/include/ar6000_api.h54
-rw-r--r--drivers/staging/ath6kl/include/ar6000_diag.h48
-rw-r--r--drivers/staging/ath6kl/include/ar6kap_common.h44
-rw-r--r--drivers/staging/ath6kl/include/athbtfilter.h135
-rw-r--r--drivers/staging/ath6kl/include/athendpack.h52
-rw-r--r--drivers/staging/ath6kl/include/athstartpack.h55
-rw-r--r--drivers/staging/ath6kl/include/bmi.h135
-rw-r--r--drivers/staging/ath6kl/include/common/AR6002/AR6002_regdump.h60
-rw-r--r--drivers/staging/ath6kl/include/common/AR6002/AR6K_version.h52
-rw-r--r--drivers/staging/ath6kl/include/common/AR6002/addrs.h90
-rw-r--r--drivers/staging/ath6kl/include/common/AR6002/hw2.0/hw/analog_intf_reg.h64
-rw-r--r--drivers/staging/ath6kl/include/common/AR6002/hw2.0/hw/analog_reg.h1932
-rw-r--r--drivers/staging/ath6kl/include/common/AR6002/hw2.0/hw/apb_map.h13
-rw-r--r--drivers/staging/ath6kl/include/common/AR6002/hw2.0/hw/gpio_reg.h977
-rw-r--r--drivers/staging/ath6kl/include/common/AR6002/hw2.0/hw/mbox_host_reg.h386
-rw-r--r--drivers/staging/ath6kl/include/common/AR6002/hw2.0/hw/mbox_reg.h481
-rw-r--r--drivers/staging/ath6kl/include/common/AR6002/hw2.0/hw/rtc_reg.h1163
-rw-r--r--drivers/staging/ath6kl/include/common/AR6002/hw2.0/hw/si_reg.h186
-rw-r--r--drivers/staging/ath6kl/include/common/AR6002/hw2.0/hw/uart_reg.h327
-rw-r--r--drivers/staging/ath6kl/include/common/AR6002/hw2.0/hw/vmc_reg.h76
-rw-r--r--drivers/staging/ath6kl/include/common/AR6002/hw4.0/hw/analog_intf_ares_reg.h3291
-rw-r--r--drivers/staging/ath6kl/include/common/AR6002/hw4.0/hw/analog_intf_athr_wlan_reg.h3674
-rw-r--r--drivers/staging/ath6kl/include/common/AR6002/hw4.0/hw/analog_intf_reg.h37
-rw-r--r--drivers/staging/ath6kl/include/common/AR6002/hw4.0/hw/apb_athr_wlan_map.h40
-rw-r--r--drivers/staging/ath6kl/include/common/AR6002/hw4.0/hw/apb_map.h48
-rw-r--r--drivers/staging/ath6kl/include/common/AR6002/hw4.0/hw/bb_lc_reg.h7076
-rw-r--r--drivers/staging/ath6kl/include/common/AR6002/hw4.0/hw/efuse_reg.h108
-rw-r--r--drivers/staging/ath6kl/include/common/AR6002/hw4.0/hw/gpio_athr_wlan_reg.h1253
-rw-r--r--drivers/staging/ath6kl/include/common/AR6002/hw4.0/hw/gpio_reg.h1094
-rw-r--r--drivers/staging/ath6kl/include/common/AR6002/hw4.0/hw/mac_dma_reg.h605
-rw-r--r--drivers/staging/ath6kl/include/common/AR6002/hw4.0/hw/mac_pcu_reg.h3065
-rw-r--r--drivers/staging/ath6kl/include/common/AR6002/hw4.0/hw/mbox_host_reg.h37
-rw-r--r--drivers/staging/ath6kl/include/common/AR6002/hw4.0/hw/mbox_reg.h560
-rw-r--r--drivers/staging/ath6kl/include/common/AR6002/hw4.0/hw/mbox_wlan_host_reg.h522
-rw-r--r--drivers/staging/ath6kl/include/common/AR6002/hw4.0/hw/mbox_wlan_reg.h638
-rw-r--r--drivers/staging/ath6kl/include/common/AR6002/hw4.0/hw/rdma_reg.h564
-rw-r--r--drivers/staging/ath6kl/include/common/AR6002/hw4.0/hw/rtc_reg.h975
-rw-r--r--drivers/staging/ath6kl/include/common/AR6002/hw4.0/hw/rtc_wlan_reg.h2065
-rw-r--r--drivers/staging/ath6kl/include/common/AR6002/hw4.0/hw/si_reg.h209
-rw-r--r--drivers/staging/ath6kl/include/common/AR6002/hw4.0/hw/uart_reg.h260
-rw-r--r--drivers/staging/ath6kl/include/common/AR6002/hw4.0/hw/umbox_reg.h37
-rw-r--r--drivers/staging/ath6kl/include/common/AR6002/hw4.0/hw/umbox_wlan_reg.h322
-rw-r--r--drivers/staging/ath6kl/include/common/AR6002/hw4.0/hw/vmc_reg.h167
-rw-r--r--drivers/staging/ath6kl/include/common/AR6002/hw4.0/hw/vmc_wlan_reg.h195
-rw-r--r--drivers/staging/ath6kl/include/common/a_hci.h682
-rw-r--r--drivers/staging/ath6kl/include/common/athdefs.h84
-rw-r--r--drivers/staging/ath6kl/include/common/bmi_msg.h241
-rw-r--r--drivers/staging/ath6kl/include/common/btcoexGpio.h86
-rw-r--r--drivers/staging/ath6kl/include/common/cnxmgmt.h36
-rw-r--r--drivers/staging/ath6kl/include/common/dbglog.h134
-rw-r--r--drivers/staging/ath6kl/include/common/dbglog_id.h558
-rw-r--r--drivers/staging/ath6kl/include/common/discovery.h75
-rw-r--r--drivers/staging/ath6kl/include/common/dset_internal.h63
-rw-r--r--drivers/staging/ath6kl/include/common/dsetid.h134
-rw-r--r--drivers/staging/ath6kl/include/common/epping_test.h120
-rw-r--r--drivers/staging/ath6kl/include/common/gmboxif.h78
-rw-r--r--drivers/staging/ath6kl/include/common/gpio.h45
-rw-r--r--drivers/staging/ath6kl/include/common/htc.h236
-rw-r--r--drivers/staging/ath6kl/include/common/htc_services.h52
-rw-r--r--drivers/staging/ath6kl/include/common/ini_dset.h82
-rw-r--r--drivers/staging/ath6kl/include/common/pkt_log.h45
-rw-r--r--drivers/staging/ath6kl/include/common/regDb.h29
-rw-r--r--drivers/staging/ath6kl/include/common/regdump.h59
-rw-r--r--drivers/staging/ath6kl/include/common/regulatory/reg_dbschema.h237
-rw-r--r--drivers/staging/ath6kl/include/common/regulatory/reg_dbvalues.h504
-rw-r--r--drivers/staging/ath6kl/include/common/roaming.h41
-rw-r--r--drivers/staging/ath6kl/include/common/targaddrs.h245
-rw-r--r--drivers/staging/ath6kl/include/common/testcmd.h185
-rw-r--r--drivers/staging/ath6kl/include/common/tlpm.h38
-rw-r--r--drivers/staging/ath6kl/include/common/wlan_defs.h79
-rw-r--r--drivers/staging/ath6kl/include/common/wlan_dset.h33
-rw-r--r--drivers/staging/ath6kl/include/common/wmi.h3119
-rw-r--r--drivers/staging/ath6kl/include/common/wmi_thin.h347
-rw-r--r--drivers/staging/ath6kl/include/common/wmix.h279
-rw-r--r--drivers/staging/ath6kl/include/common_drv.h108
-rw-r--r--drivers/staging/ath6kl/include/dbglog_api.h52
-rw-r--r--drivers/staging/ath6kl/include/dl_list.h153
-rw-r--r--drivers/staging/ath6kl/include/dset_api.h65
-rw-r--r--drivers/staging/ath6kl/include/gpio_api.h59
-rw-r--r--drivers/staging/ath6kl/include/hci_transport_api.h259
-rw-r--r--drivers/staging/ath6kl/include/hif.h458
-rw-r--r--drivers/staging/ath6kl/include/host_version.h52
-rw-r--r--drivers/staging/ath6kl/include/htc_api.h575
-rw-r--r--drivers/staging/ath6kl/include/htc_packet.h227
-rw-r--r--drivers/staging/ath6kl/include/target_reg_table.h244
-rw-r--r--drivers/staging/ath6kl/include/wlan_api.h128
-rw-r--r--drivers/staging/ath6kl/include/wmi_api.h441
-rw-r--r--drivers/staging/ath6kl/miscdrv/ar3kconfig.c566
-rw-r--r--drivers/staging/ath6kl/miscdrv/ar3kps/ar3kpsconfig.c572
-rw-r--r--drivers/staging/ath6kl/miscdrv/ar3kps/ar3kpsconfig.h75
-rw-r--r--drivers/staging/ath6kl/miscdrv/ar3kps/ar3kpsparser.c969
-rw-r--r--drivers/staging/ath6kl/miscdrv/ar3kps/ar3kpsparser.h127
-rw-r--r--drivers/staging/ath6kl/miscdrv/common_drv.c1027
-rw-r--r--drivers/staging/ath6kl/miscdrv/credit_dist.c418
-rw-r--r--drivers/staging/ath6kl/miscdrv/miscdrv.h42
-rw-r--r--drivers/staging/ath6kl/os/linux/ar6000_android.c413
-rw-r--r--drivers/staging/ath6kl/os/linux/ar6000_drv.c6444
-rw-r--r--drivers/staging/ath6kl/os/linux/ar6000_pm.c731
-rw-r--r--drivers/staging/ath6kl/os/linux/ar6000_raw_if.c455
-rw-r--r--drivers/staging/ath6kl/os/linux/ar6k_pal.c481
-rw-r--r--drivers/staging/ath6kl/os/linux/cfg80211.c1471
-rw-r--r--drivers/staging/ath6kl/os/linux/eeprom.c574
-rw-r--r--drivers/staging/ath6kl/os/linux/export_hci_transport.c125
-rw-r--r--drivers/staging/ath6kl/os/linux/hci_bridge.c1144
-rw-r--r--drivers/staging/ath6kl/os/linux/include/ar6000_drv.h762
-rw-r--r--drivers/staging/ath6kl/os/linux/include/ar6k_pal.h36
-rw-r--r--drivers/staging/ath6kl/os/linux/include/ar6xapi_linux.h197
-rw-r--r--drivers/staging/ath6kl/os/linux/include/athdrv_linux.h1219
-rw-r--r--drivers/staging/ath6kl/os/linux/include/athtypes_linux.h53
-rw-r--r--drivers/staging/ath6kl/os/linux/include/cfg80211.h50
-rw-r--r--drivers/staging/ath6kl/os/linux/include/config_linux.h60
-rw-r--r--drivers/staging/ath6kl/os/linux/include/debug_linux.h50
-rw-r--r--drivers/staging/ath6kl/os/linux/include/export_hci_transport.h76
-rw-r--r--drivers/staging/ath6kl/os/linux/include/ieee80211_ioctl.h179
-rw-r--r--drivers/staging/ath6kl/os/linux/include/osapi_linux.h387
-rw-r--r--drivers/staging/ath6kl/os/linux/include/wlan_config.h111
-rw-r--r--drivers/staging/ath6kl/os/linux/include/wmi_filter_linux.h293
-rw-r--r--drivers/staging/ath6kl/os/linux/ioctl.c4733
-rw-r--r--drivers/staging/ath6kl/os/linux/netbuf.c234
-rw-r--r--drivers/staging/ath6kl/os/linux/wireless_ext.c2725
-rw-r--r--drivers/staging/ath6kl/reorder/aggr_rx_internal.h116
-rw-r--r--drivers/staging/ath6kl/reorder/rcv_aggr.c666
-rw-r--r--drivers/staging/ath6kl/wlan/include/ieee80211.h401
-rw-r--r--drivers/staging/ath6kl/wlan/include/ieee80211_node.h93
-rw-r--r--drivers/staging/ath6kl/wlan/src/wlan_node.c636
-rw-r--r--drivers/staging/ath6kl/wlan/src/wlan_recv_beacon.c200
-rw-r--r--drivers/staging/ath6kl/wlan/src/wlan_utils.c61
-rw-r--r--drivers/staging/ath6kl/wmi/wmi.c6670
-rw-r--r--drivers/staging/ath6kl/wmi/wmi_host.h102
-rw-r--r--drivers/staging/autofs/Kconfig (renamed from fs/autofs/Kconfig)0
-rw-r--r--drivers/staging/autofs/Makefile (renamed from fs/autofs/Makefile)2
-rw-r--r--drivers/staging/autofs/TODO8
-rw-r--r--drivers/staging/autofs/autofs_i.h (renamed from fs/autofs/autofs_i.h)2
-rw-r--r--drivers/staging/autofs/dirhash.c (renamed from fs/autofs/dirhash.c)2
-rw-r--r--drivers/staging/autofs/init.c (renamed from fs/autofs/init.c)10
-rw-r--r--drivers/staging/autofs/inode.c (renamed from fs/autofs/inode.c)2
-rw-r--r--drivers/staging/autofs/root.c (renamed from fs/autofs/root.c)2
-rw-r--r--drivers/staging/autofs/symlink.c (renamed from fs/autofs/symlink.c)2
-rw-r--r--drivers/staging/autofs/waitq.c (renamed from fs/autofs/waitq.c)2
-rw-r--r--drivers/staging/batman-adv/CHANGELOG63
-rw-r--r--drivers/staging/batman-adv/Makefile2
-rw-r--r--drivers/staging/batman-adv/README50
-rw-r--r--drivers/staging/batman-adv/TODO9
-rw-r--r--drivers/staging/batman-adv/aggregation.c70
-rw-r--r--drivers/staging/batman-adv/bat_debugfs.c2
-rw-r--r--drivers/staging/batman-adv/bat_sysfs.c140
-rw-r--r--drivers/staging/batman-adv/bitarray.c22
-rw-r--r--drivers/staging/batman-adv/bitarray.h7
-rw-r--r--drivers/staging/batman-adv/hard-interface.c313
-rw-r--r--drivers/staging/batman-adv/hard-interface.h19
-rw-r--r--drivers/staging/batman-adv/hash.c6
-rw-r--r--drivers/staging/batman-adv/hash.h4
-rw-r--r--drivers/staging/batman-adv/icmp_socket.c73
-rw-r--r--drivers/staging/batman-adv/main.c146
-rw-r--r--drivers/staging/batman-adv/main.h30
-rw-r--r--drivers/staging/batman-adv/originator.c186
-rw-r--r--drivers/staging/batman-adv/originator.h8
-rw-r--r--drivers/staging/batman-adv/packet.h28
-rw-r--r--drivers/staging/batman-adv/routing.c448
-rw-r--r--drivers/staging/batman-adv/routing.h20
-rw-r--r--drivers/staging/batman-adv/send.c175
-rw-r--r--drivers/staging/batman-adv/send.h7
-rw-r--r--drivers/staging/batman-adv/soft-interface.c255
-rw-r--r--drivers/staging/batman-adv/soft-interface.h13
-rw-r--r--drivers/staging/batman-adv/sysfs-class-net-mesh8
-rw-r--r--drivers/staging/batman-adv/translation-table.c271
-rw-r--r--drivers/staging/batman-adv/translation-table.h30
-rw-r--r--drivers/staging/batman-adv/types.h103
-rw-r--r--drivers/staging/batman-adv/unicast.c269
-rw-r--r--drivers/staging/batman-adv/unicast.h39
-rw-r--r--drivers/staging/batman-adv/vis.c534
-rw-r--r--drivers/staging/batman-adv/vis.h27
-rw-r--r--drivers/staging/bcm/Adapter.h714
-rw-r--r--drivers/staging/bcm/Arp.c94
-rw-r--r--drivers/staging/bcm/Bcmchar.c2453
-rw-r--r--drivers/staging/bcm/Bcmnet.c264
-rw-r--r--drivers/staging/bcm/CmHost.c2441
-rw-r--r--drivers/staging/bcm/CmHost.h166
-rw-r--r--drivers/staging/bcm/DDRInit.c1302
-rw-r--r--drivers/staging/bcm/DDRInit.h9
-rw-r--r--drivers/staging/bcm/Debug.c41
-rw-r--r--drivers/staging/bcm/Debug.h297
-rw-r--r--drivers/staging/bcm/HandleControlPacket.c247
-rw-r--r--drivers/staging/bcm/HostMIBSInterface.h230
-rw-r--r--drivers/staging/bcm/HostMibs.h7
-rw-r--r--drivers/staging/bcm/IPv6Protocol.c400
-rw-r--r--drivers/staging/bcm/IPv6ProtocolHdr.h119
-rw-r--r--drivers/staging/bcm/InterfaceAdapter.h97
-rw-r--r--drivers/staging/bcm/InterfaceDld.c510
-rw-r--r--drivers/staging/bcm/InterfaceIdleMode.c318
-rw-r--r--drivers/staging/bcm/InterfaceIdleMode.h16
-rw-r--r--drivers/staging/bcm/InterfaceInit.c868
-rw-r--r--drivers/staging/bcm/InterfaceInit.h51
-rw-r--r--drivers/staging/bcm/InterfaceIsr.c203
-rw-r--r--drivers/staging/bcm/InterfaceIsr.h15
-rw-r--r--drivers/staging/bcm/InterfaceMacros.h18
-rw-r--r--drivers/staging/bcm/InterfaceMisc.c290
-rw-r--r--drivers/staging/bcm/InterfaceMisc.h45
-rw-r--r--drivers/staging/bcm/InterfaceRx.c256
-rw-r--r--drivers/staging/bcm/InterfaceRx.h7
-rw-r--r--drivers/staging/bcm/InterfaceTx.c259
-rw-r--r--drivers/staging/bcm/InterfaceTx.h13
-rw-r--r--drivers/staging/bcm/Interfacemain.h10
-rw-r--r--drivers/staging/bcm/Ioctl.h360
-rw-r--r--drivers/staging/bcm/Kconfig7
-rw-r--r--drivers/staging/bcm/LeakyBucket.c399
-rw-r--r--drivers/staging/bcm/Macros.h399
-rw-r--r--drivers/staging/bcm/Makefile12
-rw-r--r--drivers/staging/bcm/Misc.c2243
-rw-r--r--drivers/staging/bcm/Osal_Misc.c27
-rw-r--r--drivers/staging/bcm/PHSDefines.h125
-rw-r--r--drivers/staging/bcm/PHSModule.c1641
-rw-r--r--drivers/staging/bcm/PHSModule.h95
-rw-r--r--drivers/staging/bcm/Protocol.h151
-rw-r--r--drivers/staging/bcm/Prototypes.h322
-rw-r--r--drivers/staging/bcm/Qos.c892
-rw-r--r--drivers/staging/bcm/Queue.h31
-rw-r--r--drivers/staging/bcm/TODO15
-rw-r--r--drivers/staging/bcm/Transmit.c555
-rw-r--r--drivers/staging/bcm/Typedefs.h47
-rw-r--r--drivers/staging/bcm/Version.h35
-rw-r--r--drivers/staging/bcm/cntrl_SignalingInterface.h677
-rw-r--r--drivers/staging/bcm/headers.h109
-rw-r--r--drivers/staging/bcm/hostmibs.c164
-rw-r--r--drivers/staging/bcm/led_control.c1006
-rw-r--r--drivers/staging/bcm/led_control.h106
-rw-r--r--drivers/staging/bcm/nvm.c5614
-rw-r--r--drivers/staging/bcm/nvm.h489
-rw-r--r--drivers/staging/bcm/osal_misc.h49
-rw-r--r--drivers/staging/bcm/sort.c63
-rw-r--r--drivers/staging/bcm/target_params.h81
-rw-r--r--drivers/staging/bcm/vendorspecificextn.c146
-rw-r--r--drivers/staging/bcm/vendorspecificextn.h18
-rw-r--r--drivers/staging/brcm80211/Kconfig33
-rw-r--r--drivers/staging/brcm80211/Makefile76
-rw-r--r--drivers/staging/brcm80211/README96
-rw-r--r--drivers/staging/brcm80211/TODO51
-rw-r--r--drivers/staging/brcm80211/brcmfmac/Kconfig15
-rw-r--r--drivers/staging/brcm80211/brcmfmac/Makefile47
-rw-r--r--drivers/staging/brcm80211/brcmfmac/README36
-rw-r--r--drivers/staging/brcm80211/brcmfmac/bcmsdh.c632
-rw-r--r--drivers/staging/brcm80211/brcmfmac/bcmsdh_linux.c658
-rw-r--r--drivers/staging/brcm80211/brcmfmac/bcmsdh_sdmmc.c1238
-rw-r--r--drivers/staging/brcm80211/brcmfmac/bcmsdh_sdmmc_linux.c231
-rw-r--r--drivers/staging/brcm80211/brcmfmac/dhd.h468
-rw-r--r--drivers/staging/brcm80211/brcmfmac/dhd_bus.h82
-rw-r--r--drivers/staging/brcm80211/brcmfmac/dhd_cdc.c487
-rw-r--r--drivers/staging/brcm80211/brcmfmac/dhd_common.c1910
-rw-r--r--drivers/staging/brcm80211/brcmfmac/dhd_custom_gpio.c160
-rw-r--r--drivers/staging/brcm80211/brcmfmac/dhd_dbg.h103
-rw-r--r--drivers/staging/brcm80211/brcmfmac/dhd_linux.c2927
-rw-r--r--drivers/staging/brcm80211/brcmfmac/dhd_linux_sched.c26
-rw-r--r--drivers/staging/brcm80211/brcmfmac/dhd_proto.h92
-rw-r--r--drivers/staging/brcm80211/brcmfmac/dhd_sdio.c6103
-rw-r--r--drivers/staging/brcm80211/brcmfmac/dngl_stats.h32
-rw-r--r--drivers/staging/brcm80211/brcmfmac/wl_cfg80211.c4229
-rw-r--r--drivers/staging/brcm80211/brcmfmac/wl_cfg80211.h394
-rw-r--r--drivers/staging/brcm80211/brcmfmac/wl_iw.c3767
-rw-r--r--drivers/staging/brcm80211/brcmfmac/wl_iw.h149
-rw-r--r--drivers/staging/brcm80211/include/aidmp.h374
-rw-r--r--drivers/staging/brcm80211/include/bcm_rpc.h79
-rw-r--r--drivers/staging/brcm80211/include/bcm_rpc_tp.h137
-rw-r--r--drivers/staging/brcm80211/include/bcm_xdr.h60
-rw-r--r--drivers/staging/brcm80211/include/bcmcdc.h98
-rw-r--r--drivers/staging/brcm80211/include/bcmdefs.h200
-rw-r--r--drivers/staging/brcm80211/include/bcmdevs.h192
-rw-r--r--drivers/staging/brcm80211/include/bcmendian.h303
-rw-r--r--drivers/staging/brcm80211/include/bcmnvram.h172
-rw-r--r--drivers/staging/brcm80211/include/bcmotp.h44
-rw-r--r--drivers/staging/brcm80211/include/bcmsdbus.h113
-rw-r--r--drivers/staging/brcm80211/include/bcmsdh.h198
-rw-r--r--drivers/staging/brcm80211/include/bcmsdh_sdmmc.h110
-rw-r--r--drivers/staging/brcm80211/include/bcmsdpcm.h207
-rw-r--r--drivers/staging/brcm80211/include/bcmsrom.h34
-rw-r--r--drivers/staging/brcm80211/include/bcmsrom_fmt.h367
-rw-r--r--drivers/staging/brcm80211/include/bcmsrom_tbl.h583
-rw-r--r--drivers/staging/brcm80211/include/bcmutils.h502
-rw-r--r--drivers/staging/brcm80211/include/bcmwifi.h192
-rw-r--r--drivers/staging/brcm80211/include/d11.h1778
-rw-r--r--drivers/staging/brcm80211/include/dbus.h353
-rw-r--r--drivers/staging/brcm80211/include/dhdioctl.h107
-rw-r--r--drivers/staging/brcm80211/include/epivers.h44
-rw-r--r--drivers/staging/brcm80211/include/hnddma.h243
-rw-r--r--drivers/staging/brcm80211/include/hndpmu.h71
-rw-r--r--drivers/staging/brcm80211/include/hndrte_armtrap.h75
-rw-r--r--drivers/staging/brcm80211/include/hndrte_cons.h57
-rw-r--r--drivers/staging/brcm80211/include/hndsoc.h199
-rw-r--r--drivers/staging/brcm80211/include/linux_osl.h407
-rw-r--r--drivers/staging/brcm80211/include/linuxver.h38
-rw-r--r--drivers/staging/brcm80211/include/msgtrace.h67
-rw-r--r--drivers/staging/brcm80211/include/nicpci.h79
-rw-r--r--drivers/staging/brcm80211/include/osl.h59
-rw-r--r--drivers/staging/brcm80211/include/packed_section_end.h32
-rw-r--r--drivers/staging/brcm80211/include/packed_section_start.h36
-rw-r--r--drivers/staging/brcm80211/include/pci_core.h122
-rw-r--r--drivers/staging/brcm80211/include/pcicfg.h524
-rw-r--r--drivers/staging/brcm80211/include/pcie_core.h299
-rw-r--r--drivers/staging/brcm80211/include/proto/802.11.h322
-rw-r--r--drivers/staging/brcm80211/include/proto/802.1d.h37
-rw-r--r--drivers/staging/brcm80211/include/proto/bcmeth.h48
-rw-r--r--drivers/staging/brcm80211/include/proto/bcmevent.h217
-rw-r--r--drivers/staging/brcm80211/include/proto/ethernet.h110
-rw-r--r--drivers/staging/brcm80211/include/proto/wpa.h127
-rw-r--r--drivers/staging/brcm80211/include/qmath.h78
-rw-r--r--drivers/staging/brcm80211/include/rpc_osl.h33
-rw-r--r--drivers/staging/brcm80211/include/sbchipc.h1588
-rw-r--r--drivers/staging/brcm80211/include/sbconfig.h272
-rw-r--r--drivers/staging/brcm80211/include/sbhnddma.h315
-rw-r--r--drivers/staging/brcm80211/include/sbhndpio.h52
-rw-r--r--drivers/staging/brcm80211/include/sbpcmcia.h217
-rw-r--r--drivers/staging/brcm80211/include/sbsdio.h152
-rw-r--r--drivers/staging/brcm80211/include/sbsdpcmdev.h281
-rw-r--r--drivers/staging/brcm80211/include/sbsocram.h175
-rw-r--r--drivers/staging/brcm80211/include/sdio.h552
-rw-r--r--drivers/staging/brcm80211/include/sdioh.h63
-rw-r--r--drivers/staging/brcm80211/include/sdiovar.h44
-rw-r--r--drivers/staging/brcm80211/include/siutils.h377
-rw-r--r--drivers/staging/brcm80211/include/spid.h155
-rw-r--r--drivers/staging/brcm80211/include/wlioctl.h2025
-rw-r--r--drivers/staging/brcm80211/phy/phy_version.h36
-rw-r--r--drivers/staging/brcm80211/phy/wlc_phy_cmn.c3456
-rw-r--r--drivers/staging/brcm80211/phy/wlc_phy_hal.h262
-rw-r--r--drivers/staging/brcm80211/phy/wlc_phy_int.h1229
-rw-r--r--drivers/staging/brcm80211/phy/wlc_phy_lcn.c5320
-rw-r--r--drivers/staging/brcm80211/phy/wlc_phy_lcn.h119
-rw-r--r--drivers/staging/brcm80211/phy/wlc_phy_n.c29234
-rw-r--r--drivers/staging/brcm80211/phy/wlc_phy_radio.h1533
-rw-r--r--drivers/staging/brcm80211/phy/wlc_phyreg_n.h167
-rw-r--r--drivers/staging/brcm80211/phy/wlc_phytbl_lcn.c3638
-rw-r--r--drivers/staging/brcm80211/phy/wlc_phytbl_lcn.h49
-rw-r--r--drivers/staging/brcm80211/phy/wlc_phytbl_n.c10631
-rw-r--r--drivers/staging/brcm80211/phy/wlc_phytbl_n.h39
-rw-r--r--drivers/staging/brcm80211/sys/d11ucode_ext.h35
-rw-r--r--drivers/staging/brcm80211/sys/wl_dbg.h82
-rw-r--r--drivers/staging/brcm80211/sys/wl_export.h63
-rw-r--r--drivers/staging/brcm80211/sys/wl_mac80211.c2382
-rw-r--r--drivers/staging/brcm80211/sys/wl_mac80211.h161
-rw-r--r--drivers/staging/brcm80211/sys/wl_ucode.h37
-rw-r--r--drivers/staging/brcm80211/sys/wl_ucode_loader.c90
-rw-r--r--drivers/staging/brcm80211/sys/wlc_alloc.c373
-rw-r--r--drivers/staging/brcm80211/sys/wlc_alloc.h25
-rw-r--r--drivers/staging/brcm80211/sys/wlc_ampdu.c1411
-rw-r--r--drivers/staging/brcm80211/sys/wlc_ampdu.h40
-rw-r--r--drivers/staging/brcm80211/sys/wlc_antsel.c322
-rw-r--r--drivers/staging/brcm80211/sys/wlc_antsel.h28
-rw-r--r--drivers/staging/brcm80211/sys/wlc_bmac.c4206
-rw-r--r--drivers/staging/brcm80211/sys/wlc_bmac.h277
-rw-r--r--drivers/staging/brcm80211/sys/wlc_bsscfg.h152
-rw-r--r--drivers/staging/brcm80211/sys/wlc_cfg.h310
-rw-r--r--drivers/staging/brcm80211/sys/wlc_channel.c1599
-rw-r--r--drivers/staging/brcm80211/sys/wlc_channel.h159
-rw-r--r--drivers/staging/brcm80211/sys/wlc_event.c226
-rw-r--r--drivers/staging/brcm80211/sys/wlc_event.h51
-rw-r--r--drivers/staging/brcm80211/sys/wlc_key.h144
-rw-r--r--drivers/staging/brcm80211/sys/wlc_mac80211.c8675
-rw-r--r--drivers/staging/brcm80211/sys/wlc_mac80211.h1040
-rw-r--r--drivers/staging/brcm80211/sys/wlc_phy_shim.c243
-rw-r--r--drivers/staging/brcm80211/sys/wlc_phy_shim.h112
-rw-r--r--drivers/staging/brcm80211/sys/wlc_pub.h627
-rw-r--r--drivers/staging/brcm80211/sys/wlc_rate.c499
-rw-r--r--drivers/staging/brcm80211/sys/wlc_rate.h170
-rw-r--r--drivers/staging/brcm80211/sys/wlc_rpc.h527
-rw-r--r--drivers/staging/brcm80211/sys/wlc_rpctx.h71
-rw-r--r--drivers/staging/brcm80211/sys/wlc_scb.h84
-rw-r--r--drivers/staging/brcm80211/sys/wlc_stf.c593
-rw-r--r--drivers/staging/brcm80211/sys/wlc_stf.h42
-rw-r--r--drivers/staging/brcm80211/sys/wlc_types.h52
-rw-r--r--drivers/staging/brcm80211/util/aiutils.c708
-rw-r--r--drivers/staging/brcm80211/util/bcmotp.c965
-rw-r--r--drivers/staging/brcm80211/util/bcmsrom.c2076
-rw-r--r--drivers/staging/brcm80211/util/bcmutils.c1044
-rw-r--r--drivers/staging/brcm80211/util/bcmwifi.c189
-rw-r--r--drivers/staging/brcm80211/util/hnddma.c2690
-rw-r--r--drivers/staging/brcm80211/util/hndpmu.c2693
-rw-r--r--drivers/staging/brcm80211/util/linux_osl.c424
-rw-r--r--drivers/staging/brcm80211/util/nicpci.c881
-rw-r--r--drivers/staging/brcm80211/util/nvram/nvram_ro.c212
-rw-r--r--drivers/staging/brcm80211/util/qmath.c681
-rw-r--r--drivers/staging/brcm80211/util/sbutils.c585
-rw-r--r--drivers/staging/brcm80211/util/siutils.c2021
-rw-r--r--drivers/staging/brcm80211/util/siutils_priv.h32
-rw-r--r--drivers/staging/comedi/Makefile2
-rw-r--r--drivers/staging/comedi/comedi_fops.c3
-rw-r--r--drivers/staging/comedi/drivers/addi-data/APCI1710_82x54.c2
-rw-r--r--drivers/staging/comedi/drivers/addi-data/APCI1710_82x54.h2
-rw-r--r--drivers/staging/comedi/drivers/addi-data/APCI1710_Chrono.c2
-rw-r--r--drivers/staging/comedi/drivers/addi-data/APCI1710_Dig_io.c2
-rw-r--r--drivers/staging/comedi/drivers/addi-data/APCI1710_Dig_io.h2
-rw-r--r--drivers/staging/comedi/drivers/addi-data/APCI1710_INCCPT.c2
-rw-r--r--drivers/staging/comedi/drivers/addi-data/APCI1710_INCCPT.h2
-rw-r--r--drivers/staging/comedi/drivers/addi-data/APCI1710_Inp_cpt.c2
-rw-r--r--drivers/staging/comedi/drivers/addi-data/APCI1710_Inp_cpt.h2
-rw-r--r--drivers/staging/comedi/drivers/addi-data/APCI1710_Pwm.c2
-rw-r--r--drivers/staging/comedi/drivers/addi-data/APCI1710_Pwm.h2
-rw-r--r--drivers/staging/comedi/drivers/addi-data/APCI1710_Ssi.c2
-rw-r--r--drivers/staging/comedi/drivers/addi-data/APCI1710_Ssi.h2
-rw-r--r--drivers/staging/comedi/drivers/addi-data/APCI1710_Tor.c2
-rw-r--r--drivers/staging/comedi/drivers/addi-data/APCI1710_Tor.h2
-rw-r--r--drivers/staging/comedi/drivers/addi-data/APCI1710_Ttl.c2
-rw-r--r--drivers/staging/comedi/drivers/addi-data/APCI1710_Ttl.h2
-rw-r--r--drivers/staging/comedi/drivers/addi-data/addi_amcc_S5920.c2
-rw-r--r--drivers/staging/comedi/drivers/addi-data/addi_amcc_S5920.h2
-rw-r--r--drivers/staging/comedi/drivers/addi-data/addi_amcc_s5933.h2
-rw-r--r--drivers/staging/comedi/drivers/addi-data/addi_common.c2
-rw-r--r--drivers/staging/comedi/drivers/addi-data/addi_common.h2
-rw-r--r--drivers/staging/comedi/drivers/addi-data/addi_eeprom.c2
-rw-r--r--drivers/staging/comedi/drivers/addi-data/hwdrv_APCI1710.c6
-rw-r--r--drivers/staging/comedi/drivers/addi-data/hwdrv_APCI1710.h2
-rw-r--r--drivers/staging/comedi/drivers/addi-data/hwdrv_apci035.c4
-rw-r--r--drivers/staging/comedi/drivers/addi-data/hwdrv_apci035.h2
-rw-r--r--drivers/staging/comedi/drivers/addi-data/hwdrv_apci1032.c4
-rw-r--r--drivers/staging/comedi/drivers/addi-data/hwdrv_apci1032.h2
-rw-r--r--drivers/staging/comedi/drivers/addi-data/hwdrv_apci1500.c4
-rw-r--r--drivers/staging/comedi/drivers/addi-data/hwdrv_apci1500.h2
-rw-r--r--drivers/staging/comedi/drivers/addi-data/hwdrv_apci1516.c4
-rw-r--r--drivers/staging/comedi/drivers/addi-data/hwdrv_apci1516.h2
-rw-r--r--drivers/staging/comedi/drivers/addi-data/hwdrv_apci1564.c4
-rw-r--r--drivers/staging/comedi/drivers/addi-data/hwdrv_apci1564.h2
-rw-r--r--drivers/staging/comedi/drivers/addi-data/hwdrv_apci16xx.c2
-rw-r--r--drivers/staging/comedi/drivers/addi-data/hwdrv_apci2016.c4
-rw-r--r--drivers/staging/comedi/drivers/addi-data/hwdrv_apci2016.h2
-rw-r--r--drivers/staging/comedi/drivers/addi-data/hwdrv_apci2032.c4
-rw-r--r--drivers/staging/comedi/drivers/addi-data/hwdrv_apci2032.h2
-rw-r--r--drivers/staging/comedi/drivers/addi-data/hwdrv_apci2200.c4
-rw-r--r--drivers/staging/comedi/drivers/addi-data/hwdrv_apci2200.h2
-rw-r--r--drivers/staging/comedi/drivers/addi-data/hwdrv_apci3120.c2
-rw-r--r--drivers/staging/comedi/drivers/addi-data/hwdrv_apci3120.h2
-rw-r--r--drivers/staging/comedi/drivers/addi-data/hwdrv_apci3200.c4
-rw-r--r--drivers/staging/comedi/drivers/addi-data/hwdrv_apci3200.h2
-rw-r--r--drivers/staging/comedi/drivers/addi-data/hwdrv_apci3501.c4
-rw-r--r--drivers/staging/comedi/drivers/addi-data/hwdrv_apci3501.h2
-rw-r--r--drivers/staging/comedi/drivers/addi-data/hwdrv_apci3xxx.c2
-rw-r--r--drivers/staging/comedi/drivers/addi-data/hwdrv_apci3xxx.h2
-rw-r--r--drivers/staging/comedi/drivers/adl_pci6208.c5
-rw-r--r--drivers/staging/comedi/drivers/adl_pci9111.c219
-rw-r--r--drivers/staging/comedi/drivers/adl_pci9118.c42
-rw-r--r--drivers/staging/comedi/drivers/adv_pci1710.c13
-rw-r--r--drivers/staging/comedi/drivers/adv_pci1723.c5
-rw-r--r--drivers/staging/comedi/drivers/adv_pci_dio.c29
-rw-r--r--drivers/staging/comedi/drivers/aio_aio12_8.c6
-rw-r--r--drivers/staging/comedi/drivers/aio_iiro_16.c6
-rw-r--r--drivers/staging/comedi/drivers/cb_pcidas.c19
-rw-r--r--drivers/staging/comedi/drivers/cb_pcidas64.c62
-rw-r--r--drivers/staging/comedi/drivers/cb_pcidda.c15
-rw-r--r--drivers/staging/comedi/drivers/cb_pcidio.c9
-rw-r--r--drivers/staging/comedi/drivers/cb_pcimdas.c6
-rw-r--r--drivers/staging/comedi/drivers/dt2817.c14
-rw-r--r--drivers/staging/comedi/drivers/dt3000.c17
-rw-r--r--drivers/staging/comedi/drivers/dt9812.c4
-rw-r--r--drivers/staging/comedi/drivers/me4000.c29
-rw-r--r--drivers/staging/comedi/drivers/ni_labpc.c16
-rw-r--r--drivers/staging/comedi/drivers/rtd520.c7
-rw-r--r--drivers/staging/comedi/drivers/skel.c7
-rw-r--r--drivers/staging/comedi/drivers/usbdux.c10
-rw-r--r--drivers/staging/comedi/drivers/usbduxfast.c4
-rw-r--r--drivers/staging/cpia/Kconfig39
-rw-r--r--drivers/staging/cpia/Makefile5
-rw-r--r--drivers/staging/cpia/TODO8
-rw-r--r--drivers/staging/cpia/cpia.c (renamed from drivers/media/video/cpia.c)6
-rw-r--r--drivers/staging/cpia/cpia.h (renamed from drivers/media/video/cpia.h)0
-rw-r--r--drivers/staging/cpia/cpia_pp.c (renamed from drivers/media/video/cpia_pp.c)0
-rw-r--r--drivers/staging/cpia/cpia_usb.c (renamed from drivers/media/video/cpia_usb.c)0
-rw-r--r--drivers/staging/crystalhd/Makefile2
-rw-r--r--drivers/staging/crystalhd/crystalhd_lnx.c1
-rw-r--r--drivers/staging/crystalhd/crystalhd_lnx.h2
-rw-r--r--drivers/staging/cx25821/Kconfig2
-rw-r--r--drivers/staging/cx25821/Makefile10
-rw-r--r--drivers/staging/cx25821/cx25821-alsa.c2
-rw-r--r--drivers/staging/cx25821/cx25821-audio-upstream.c15
-rw-r--r--drivers/staging/cx25821/cx25821-audio-upstream.h4
-rw-r--r--drivers/staging/cx25821/cx25821-audio.h13
-rw-r--r--drivers/staging/cx25821/cx25821-core.c77
-rw-r--r--drivers/staging/cx25821/cx25821-i2c.c2
-rw-r--r--drivers/staging/cx25821/cx25821-medusa-reg.h10
-rw-r--r--drivers/staging/cx25821/cx25821-medusa-video.c8
-rw-r--r--drivers/staging/cx25821/cx25821-reg.h4
-rw-r--r--drivers/staging/cx25821/cx25821-video-upstream-ch2.c135
-rw-r--r--drivers/staging/cx25821/cx25821-video-upstream-ch2.h14
-rw-r--r--drivers/staging/cx25821/cx25821-video-upstream.c28
-rw-r--r--drivers/staging/cx25821/cx25821-video-upstream.h10
-rw-r--r--drivers/staging/cx25821/cx25821-video.c6
-rw-r--r--drivers/staging/cx25821/cx25821.h51
-rw-r--r--drivers/staging/cxt1e1/Kconfig2
-rw-r--r--drivers/staging/cxt1e1/Makefile8
-rw-r--r--drivers/staging/cxt1e1/functions.c4
-rw-r--r--drivers/staging/cxt1e1/linux.c20
-rw-r--r--drivers/staging/cxt1e1/musycc.c22
-rw-r--r--drivers/staging/cxt1e1/pmcc4_drv.c14
-rw-r--r--drivers/staging/cxt1e1/sbeproc.c13
-rw-r--r--drivers/staging/dream/Kconfig13
-rw-r--r--drivers/staging/dream/Makefile5
-rw-r--r--drivers/staging/dream/TODO13
-rw-r--r--drivers/staging/dream/camera/Kconfig46
-rw-r--r--drivers/staging/dream/camera/Makefile8
-rw-r--r--drivers/staging/dream/camera/msm_camera.c2181
-rw-r--r--drivers/staging/dream/camera/msm_io7x.c291
-rw-r--r--drivers/staging/dream/camera/msm_io8x.c320
-rw-r--r--drivers/staging/dream/camera/msm_v4l2.c798
-rw-r--r--drivers/staging/dream/camera/msm_vfe7x.c702
-rw-r--r--drivers/staging/dream/camera/msm_vfe7x.h255
-rw-r--r--drivers/staging/dream/camera/msm_vfe8x.c736
-rw-r--r--drivers/staging/dream/camera/msm_vfe8x.h895
-rw-r--r--drivers/staging/dream/camera/msm_vfe8x_proc.c4003
-rw-r--r--drivers/staging/dream/camera/msm_vfe8x_proc.h1549
-rw-r--r--drivers/staging/dream/camera/mt9d112.c762
-rw-r--r--drivers/staging/dream/camera/mt9d112.h36
-rw-r--r--drivers/staging/dream/camera/mt9d112_reg.c307
-rw-r--r--drivers/staging/dream/camera/mt9p012.h51
-rw-r--r--drivers/staging/dream/camera/mt9p012_fox.c1306
-rw-r--r--drivers/staging/dream/camera/mt9p012_reg.c573
-rw-r--r--drivers/staging/dream/camera/mt9t013.c1497
-rw-r--r--drivers/staging/dream/camera/mt9t013.h48
-rw-r--r--drivers/staging/dream/camera/mt9t013_reg.c266
-rw-r--r--drivers/staging/dream/camera/s5k3e2fx.c1307
-rw-r--r--drivers/staging/dream/camera/s5k3e2fx.h9
-rw-r--r--drivers/staging/dream/generic_gpio.c274
-rw-r--r--drivers/staging/dream/gpio_axis.c181
-rw-r--r--drivers/staging/dream/gpio_event.c224
-rw-r--r--drivers/staging/dream/gpio_input.c337
-rw-r--r--drivers/staging/dream/gpio_matrix.c399
-rw-r--r--drivers/staging/dream/gpio_output.c84
-rw-r--r--drivers/staging/dream/include/linux/android_pmem.h80
-rw-r--r--drivers/staging/dream/include/linux/gpio_event.h154
-rw-r--r--drivers/staging/dream/include/linux/msm_adsp.h84
-rw-r--r--drivers/staging/dream/include/linux/msm_audio.h115
-rw-r--r--drivers/staging/dream/include/linux/msm_rpcrouter.h47
-rw-r--r--drivers/staging/dream/include/linux/wakelock.h91
-rw-r--r--drivers/staging/dream/include/mach/camera.h279
-rw-r--r--drivers/staging/dream/include/mach/msm_adsp.h112
-rw-r--r--drivers/staging/dream/include/mach/msm_rpcrouter.h179
-rw-r--r--drivers/staging/dream/include/mach/msm_smd.h107
-rw-r--r--drivers/staging/dream/include/mach/qdsp5/qdsp5audplaycmdi.h94
-rw-r--r--drivers/staging/dream/include/mach/qdsp5/qdsp5audplaymsg.h70
-rw-r--r--drivers/staging/dream/include/mach/qdsp5/qdsp5audppcmdi.h914
-rw-r--r--drivers/staging/dream/include/mach/qdsp5/qdsp5audppmsg.h318
-rw-r--r--drivers/staging/dream/include/mach/qdsp5/qdsp5audpreproccmdi.h256
-rw-r--r--drivers/staging/dream/include/mach/qdsp5/qdsp5audpreprocmsg.h85
-rw-r--r--drivers/staging/dream/include/mach/qdsp5/qdsp5audreccmdi.h176
-rw-r--r--drivers/staging/dream/include/mach/qdsp5/qdsp5audrecmsg.h127
-rw-r--r--drivers/staging/dream/include/mach/qdsp5/qdsp5jpegcmdi.h376
-rw-r--r--drivers/staging/dream/include/mach/qdsp5/qdsp5jpegmsg.h177
-rw-r--r--drivers/staging/dream/include/mach/qdsp5/qdsp5lpmcmdi.h82
-rw-r--r--drivers/staging/dream/include/mach/qdsp5/qdsp5lpmmsg.h80
-rw-r--r--drivers/staging/dream/include/mach/qdsp5/qdsp5vdeccmdi.h235
-rw-r--r--drivers/staging/dream/include/mach/qdsp5/qdsp5vdecmsg.h107
-rw-r--r--drivers/staging/dream/include/mach/qdsp5/qdsp5venccmdi.h212
-rw-r--r--drivers/staging/dream/include/mach/qdsp5/qdsp5vfecmdi.h910
-rw-r--r--drivers/staging/dream/include/mach/qdsp5/qdsp5vfemsg.h290
-rw-r--r--drivers/staging/dream/include/media/msm_camera.h388
-rw-r--r--drivers/staging/dream/pmem.c1333
-rw-r--r--drivers/staging/dream/qdsp5/Makefile18
-rw-r--r--drivers/staging/dream/qdsp5/adsp.c1159
-rw-r--r--drivers/staging/dream/qdsp5/adsp.h369
-rw-r--r--drivers/staging/dream/qdsp5/adsp_6210.c283
-rw-r--r--drivers/staging/dream/qdsp5/adsp_6220.c284
-rw-r--r--drivers/staging/dream/qdsp5/adsp_6225.c328
-rw-r--r--drivers/staging/dream/qdsp5/adsp_driver.c643
-rw-r--r--drivers/staging/dream/qdsp5/adsp_info.c121
-rw-r--r--drivers/staging/dream/qdsp5/adsp_jpeg_patch_event.c31
-rw-r--r--drivers/staging/dream/qdsp5/adsp_jpeg_verify_cmd.c182
-rw-r--r--drivers/staging/dream/qdsp5/adsp_lpm_verify_cmd.c65
-rw-r--r--drivers/staging/dream/qdsp5/adsp_vfe_patch_event.c54
-rw-r--r--drivers/staging/dream/qdsp5/adsp_vfe_verify_cmd.c239
-rw-r--r--drivers/staging/dream/qdsp5/adsp_video_verify_cmd.c163
-rw-r--r--drivers/staging/dream/qdsp5/adsp_videoenc_verify_cmd.c235
-rw-r--r--drivers/staging/dream/qdsp5/audio_aac.c1054
-rw-r--r--drivers/staging/dream/qdsp5/audio_amrnb.c875
-rw-r--r--drivers/staging/dream/qdsp5/audio_evrc.c847
-rw-r--r--drivers/staging/dream/qdsp5/audio_in.c970
-rw-r--r--drivers/staging/dream/qdsp5/audio_mp3.c972
-rw-r--r--drivers/staging/dream/qdsp5/audio_out.c841
-rw-r--r--drivers/staging/dream/qdsp5/audio_qcelp.c858
-rw-r--r--drivers/staging/dream/qdsp5/audmgr.c314
-rw-r--r--drivers/staging/dream/qdsp5/audmgr.h215
-rw-r--r--drivers/staging/dream/qdsp5/audmgr_new.h213
-rw-r--r--drivers/staging/dream/qdsp5/audpp.c429
-rw-r--r--drivers/staging/dream/qdsp5/evlog.h134
-rw-r--r--drivers/staging/dream/qdsp5/snd.c280
-rw-r--r--drivers/staging/dream/synaptics_i2c_rmi.c649
-rw-r--r--drivers/staging/dream/synaptics_i2c_rmi.h53
-rw-r--r--drivers/staging/dt3155v4l/dt3155v4l.c8
-rw-r--r--drivers/staging/easycap/Makefile14
-rw-r--r--drivers/staging/easycap/easycap.h2
-rw-r--r--drivers/staging/et131x/Makefile2
-rw-r--r--drivers/staging/et131x/et131x_initpci.c2
-rw-r--r--drivers/staging/frontier/alphatrack.c2
-rw-r--r--drivers/staging/frontier/tranzport.c56
-rw-r--r--drivers/staging/ft1000/Kconfig22
-rw-r--r--drivers/staging/ft1000/Makefile3
-rw-r--r--drivers/staging/ft1000/TODO9
-rw-r--r--drivers/staging/ft1000/ft1000-pcmcia/Makefile3
-rw-r--r--drivers/staging/ft1000/ft1000-pcmcia/boot.h158
-rw-r--r--drivers/staging/ft1000/ft1000-pcmcia/ft1000.conf14
-rw-r--r--drivers/staging/ft1000/ft1000-pcmcia/ft1000.h409
-rw-r--r--drivers/staging/ft1000/ft1000-pcmcia/ft1000.imgbin0 -> 305770 bytes-rw-r--r--drivers/staging/ft1000/ft1000-pcmcia/ft1000_cs.c513
-rw-r--r--drivers/staging/ft1000/ft1000-pcmcia/ft1000_cs.h1
-rw-r--r--drivers/staging/ft1000/ft1000-pcmcia/ft1000_dev.h66
-rw-r--r--drivers/staging/ft1000/ft1000-pcmcia/ft1000_dnld.c940
-rw-r--r--drivers/staging/ft1000/ft1000-pcmcia/ft1000_hw.c2294
-rw-r--r--drivers/staging/ft1000/ft1000-pcmcia/ft1000_proc.c219
-rw-r--r--drivers/staging/ft1000/ft1000-usb/Makefile3
-rw-r--r--drivers/staging/ft1000/ft1000-usb/ft1000_chdev.c935
-rw-r--r--drivers/staging/ft1000/ft1000-usb/ft1000_download.c1248
-rw-r--r--drivers/staging/ft1000/ft1000-usb/ft1000_hw.c2326
-rw-r--r--drivers/staging/ft1000/ft1000-usb/ft1000_hw.h10
-rw-r--r--drivers/staging/ft1000/ft1000-usb/ft1000_ioctl.h139
-rw-r--r--drivers/staging/ft1000/ft1000-usb/ft1000_proc.c232
-rw-r--r--drivers/staging/ft1000/ft1000-usb/ft1000_usb.c276
-rw-r--r--drivers/staging/ft1000/ft1000-usb/ft1000_usb.h608
-rw-r--r--drivers/staging/ft1000/ft1000-usb/ft3000.imgbin0 -> 280414 bytes-rw-r--r--drivers/staging/go7007/Kconfig2
-rw-r--r--drivers/staging/go7007/Makefile10
-rw-r--r--drivers/staging/go7007/go7007-driver.c55
-rw-r--r--drivers/staging/go7007/go7007-usb.c2
-rw-r--r--drivers/staging/go7007/go7007-v4l2.c19
-rw-r--r--drivers/staging/go7007/s2250-board.c34
-rw-r--r--drivers/staging/go7007/wis-ov7640.c1
-rw-r--r--drivers/staging/go7007/wis-saa7113.c1
-rw-r--r--drivers/staging/go7007/wis-saa7115.c1
-rw-r--r--drivers/staging/go7007/wis-sony-tuner.c1
-rw-r--r--drivers/staging/go7007/wis-tw2804.c1
-rw-r--r--drivers/staging/go7007/wis-tw9903.c1
-rw-r--r--drivers/staging/go7007/wis-uda1342.c1
-rw-r--r--drivers/staging/hv/Makefile10
-rw-r--r--drivers/staging/hv/TODO2
-rw-r--r--drivers/staging/hv/blkvsc.c2
-rw-r--r--drivers/staging/hv/blkvsc_drv.c2
-rw-r--r--drivers/staging/hv/channel.c841
-rw-r--r--drivers/staging/hv/channel.h122
-rw-r--r--drivers/staging/hv/channel_interface.c152
-rw-r--r--drivers/staging/hv/channel_interface.h35
-rw-r--r--drivers/staging/hv/channel_mgmt.c342
-rw-r--r--drivers/staging/hv/channel_mgmt.h10
-rw-r--r--drivers/staging/hv/connection.c4
-rw-r--r--drivers/staging/hv/hv_utils.c15
-rw-r--r--drivers/staging/hv/netvsc.c137
-rw-r--r--drivers/staging/hv/netvsc_drv.c4
-rw-r--r--drivers/staging/hv/storvsc.c93
-rw-r--r--drivers/staging/hv/storvsc_drv.c9
-rw-r--r--drivers/staging/hv/vmbus.c35
-rw-r--r--drivers/staging/hv/vmbus.h2
-rw-r--r--drivers/staging/hv/vmbus_api.h55
-rw-r--r--drivers/staging/hv/vmbus_drv.c84
-rw-r--r--drivers/staging/hv/vmbus_private.h3
-rw-r--r--drivers/staging/iio/Documentation/generic_buffer.c318
-rw-r--r--drivers/staging/iio/Documentation/iio_utils.h396
-rw-r--r--drivers/staging/iio/Documentation/lis3l02dqbuffersimple.c238
-rw-r--r--drivers/staging/iio/Documentation/overview.txt17
-rw-r--r--drivers/staging/iio/Documentation/ring.txt6
-rw-r--r--drivers/staging/iio/Documentation/sysfs-bus-iio390
-rw-r--r--drivers/staging/iio/Documentation/sysfs-bus-iio-light64
-rw-r--r--drivers/staging/iio/Documentation/sysfs-class-iio294
-rw-r--r--drivers/staging/iio/Documentation/userspace.txt56
-rw-r--r--drivers/staging/iio/accel/accel.h172
-rw-r--r--drivers/staging/iio/accel/adis16209_core.c48
-rw-r--r--drivers/staging/iio/accel/adis16209_ring.c90
-rw-r--r--drivers/staging/iio/accel/adis16209_trigger.c2
-rw-r--r--drivers/staging/iio/accel/adis16220_core.c28
-rw-r--r--drivers/staging/iio/accel/adis16240_core.c37
-rw-r--r--drivers/staging/iio/accel/adis16240_ring.c72
-rw-r--r--drivers/staging/iio/accel/adis16240_trigger.c2
-rw-r--r--drivers/staging/iio/accel/inclinometer.h2
-rw-r--r--drivers/staging/iio/accel/lis3l02dq_core.c66
-rw-r--r--drivers/staging/iio/accel/lis3l02dq_ring.c79
-rw-r--r--drivers/staging/iio/accel/sca3000.h2
-rw-r--r--drivers/staging/iio/accel/sca3000_core.c71
-rw-r--r--drivers/staging/iio/accel/sca3000_ring.c91
-rw-r--r--drivers/staging/iio/adc/Kconfig35
-rw-r--r--drivers/staging/iio/adc/Makefile8
-rw-r--r--drivers/staging/iio/adc/ad7476.h77
-rw-r--r--drivers/staging/iio/adc/ad7476_core.c293
-rw-r--r--drivers/staging/iio/adc/ad7476_ring.c207
-rw-r--r--drivers/staging/iio/adc/ad799x.h159
-rw-r--r--drivers/staging/iio/adc/ad799x_core.c923
-rw-r--r--drivers/staging/iio/adc/ad799x_ring.c240
-rw-r--r--drivers/staging/iio/adc/adc.h19
-rw-r--r--drivers/staging/iio/adc/max1363_core.c208
-rw-r--r--drivers/staging/iio/adc/max1363_ring.c22
-rw-r--r--drivers/staging/iio/chrdev.h2
-rw-r--r--drivers/staging/iio/gyro/adis16260_core.c36
-rw-r--r--drivers/staging/iio/gyro/adis16260_ring.c68
-rw-r--r--drivers/staging/iio/gyro/adis16260_trigger.c2
-rw-r--r--drivers/staging/iio/gyro/gyro.h46
-rw-r--r--drivers/staging/iio/iio.h106
-rw-r--r--drivers/staging/iio/imu/adis16300_core.c50
-rw-r--r--drivers/staging/iio/imu/adis16300_ring.c95
-rw-r--r--drivers/staging/iio/imu/adis16300_trigger.c2
-rw-r--r--drivers/staging/iio/imu/adis16350_core.c59
-rw-r--r--drivers/staging/iio/imu/adis16350_ring.c113
-rw-r--r--drivers/staging/iio/imu/adis16350_trigger.c2
-rw-r--r--drivers/staging/iio/imu/adis16400_core.c69
-rw-r--r--drivers/staging/iio/imu/adis16400_ring.c122
-rw-r--r--drivers/staging/iio/imu/adis16400_trigger.c2
-rw-r--r--drivers/staging/iio/industrialio-core.c90
-rw-r--r--drivers/staging/iio/industrialio-ring.c61
-rw-r--r--drivers/staging/iio/light/Kconfig12
-rw-r--r--drivers/staging/iio/light/Makefile1
-rw-r--r--drivers/staging/iio/light/isl29018.c563
-rw-r--r--drivers/staging/iio/light/light.h7
-rw-r--r--drivers/staging/iio/light/tsl2563.c35
-rw-r--r--drivers/staging/iio/magnetometer/Kconfig10
-rw-r--r--drivers/staging/iio/magnetometer/Makefile1
-rw-r--r--drivers/staging/iio/magnetometer/ak8975.c558
-rw-r--r--drivers/staging/iio/magnetometer/hmc5843.c61
-rw-r--r--drivers/staging/iio/magnetometer/magnet.h12
-rw-r--r--drivers/staging/iio/ring_generic.h241
-rw-r--r--drivers/staging/iio/ring_sw.c79
-rw-r--r--drivers/staging/iio/ring_sw.h12
-rw-r--r--drivers/staging/iio/sysfs.h162
-rw-r--r--drivers/staging/iio/trigger.h6
-rw-r--r--drivers/staging/iio/trigger/iio-trig-gpio.c2
-rw-r--r--drivers/staging/iio/trigger/iio-trig-periodic-rtc.c12
-rw-r--r--drivers/staging/intel_sst/Kconfig18
-rw-r--r--drivers/staging/intel_sst/Makefile7
-rw-r--r--drivers/staging/intel_sst/TODO13
-rw-r--r--drivers/staging/intel_sst/intel_sst.c512
-rw-r--r--drivers/staging/intel_sst/intel_sst.h131
-rw-r--r--drivers/staging/intel_sst/intel_sst_app_interface.c1257
-rw-r--r--drivers/staging/intel_sst/intel_sst_common.h618
-rw-r--r--drivers/staging/intel_sst/intel_sst_drv_interface.c493
-rw-r--r--drivers/staging/intel_sst/intel_sst_dsp.c486
-rw-r--r--drivers/staging/intel_sst/intel_sst_fw_ipc.h392
-rw-r--r--drivers/staging/intel_sst/intel_sst_ioctl.h435
-rw-r--r--drivers/staging/intel_sst/intel_sst_ipc.c656
-rw-r--r--drivers/staging/intel_sst/intel_sst_pvt.c311
-rw-r--r--drivers/staging/intel_sst/intel_sst_stream.c576
-rw-r--r--drivers/staging/intel_sst/intel_sst_stream_encoded.c1275
-rw-r--r--drivers/staging/intel_sst/intelmid.c1220
-rw-r--r--drivers/staging/intel_sst/intelmid.h186
-rw-r--r--drivers/staging/intel_sst/intelmid_ctrl.c629
-rw-r--r--drivers/staging/intel_sst/intelmid_msic_control.c410
-rw-r--r--drivers/staging/intel_sst/intelmid_pvt.c174
-rw-r--r--drivers/staging/intel_sst/intelmid_snd_control.h114
-rw-r--r--drivers/staging/intel_sst/intelmid_v0_control.c771
-rw-r--r--drivers/staging/intel_sst/intelmid_v1_control.c900
-rw-r--r--drivers/staging/intel_sst/intelmid_v2_control.c1001
-rw-r--r--drivers/staging/intel_sst/jack.h10
-rw-r--r--drivers/staging/keucr/Kconfig13
-rw-r--r--drivers/staging/keucr/Makefile16
-rw-r--r--drivers/staging/keucr/TODO14
-rw-r--r--drivers/staging/keucr/common.h26
-rw-r--r--drivers/staging/keucr/init.c543
-rw-r--r--drivers/staging/keucr/init.h2066
-rw-r--r--drivers/staging/keucr/ms.c956
-rw-r--r--drivers/staging/keucr/ms.h381
-rw-r--r--drivers/staging/keucr/msscsi.c324
-rw-r--r--drivers/staging/keucr/scsiglue.c450
-rw-r--r--drivers/staging/keucr/scsiglue.h10
-rw-r--r--drivers/staging/keucr/sdscsi.c210
-rw-r--r--drivers/staging/keucr/smcommon.h33
-rw-r--r--drivers/staging/keucr/smil.h290
-rw-r--r--drivers/staging/keucr/smilecc.c201
-rw-r--r--drivers/staging/keucr/smilmain.c1852
-rw-r--r--drivers/staging/keucr/smilsub.c1661
-rw-r--r--drivers/staging/keucr/smscsi.c193
-rw-r--r--drivers/staging/keucr/transport.c783
-rw-r--r--drivers/staging/keucr/transport.h144
-rw-r--r--drivers/staging/keucr/usb.c709
-rw-r--r--drivers/staging/keucr/usb.h238
-rw-r--r--drivers/staging/line6/Kconfig67
-rw-r--r--drivers/staging/line6/Makefile2
-rw-r--r--drivers/staging/line6/audio.c16
-rw-r--r--drivers/staging/line6/audio.h7
-rw-r--r--drivers/staging/line6/capture.c239
-rw-r--r--drivers/staging/line6/capture.h25
-rw-r--r--drivers/staging/line6/config.h6
-rw-r--r--drivers/staging/line6/control.c242
-rw-r--r--drivers/staging/line6/control.h190
-rw-r--r--drivers/staging/line6/driver.c566
-rw-r--r--drivers/staging/line6/driver.h59
-rw-r--r--drivers/staging/line6/dumprequest.c60
-rw-r--r--drivers/staging/line6/dumprequest.h28
-rw-r--r--drivers/staging/line6/midi.c100
-rw-r--r--drivers/staging/line6/midi.h9
-rw-r--r--drivers/staging/line6/midibuf.c90
-rw-r--r--drivers/staging/line6/midibuf.h31
-rw-r--r--drivers/staging/line6/pcm.c319
-rw-r--r--drivers/staging/line6/pcm.h145
-rw-r--r--drivers/staging/line6/playback.c313
-rw-r--r--drivers/staging/line6/playback.h29
-rw-r--r--drivers/staging/line6/pod.c780
-rw-r--r--drivers/staging/line6/pod.h127
-rw-r--r--drivers/staging/line6/revision.h2
-rw-r--r--drivers/staging/line6/toneport.c283
-rw-r--r--drivers/staging/line6/toneport.h33
-rw-r--r--drivers/staging/line6/usbdefs.h44
-rw-r--r--drivers/staging/line6/variax.c371
-rw-r--r--drivers/staging/line6/variax.h84
-rw-r--r--drivers/staging/lirc/Kconfig2
-rw-r--r--drivers/staging/lirc/lirc_igorplugusb.c190
-rw-r--r--drivers/staging/lirc/lirc_imon.c16
-rw-r--r--drivers/staging/lirc/lirc_it87.c23
-rw-r--r--drivers/staging/lirc/lirc_ite8709.c6
-rw-r--r--drivers/staging/lirc/lirc_parallel.c62
-rw-r--r--drivers/staging/lirc/lirc_sasem.c15
-rw-r--r--drivers/staging/lirc/lirc_serial.c24
-rw-r--r--drivers/staging/lirc/lirc_sir.c24
-rw-r--r--drivers/staging/lirc/lirc_zilog.c13
-rw-r--r--drivers/staging/mrst-touchscreen/Kconfig7
-rw-r--r--drivers/staging/mrst-touchscreen/Makefile3
-rw-r--r--drivers/staging/mrst-touchscreen/TODO2
-rw-r--r--drivers/staging/mrst-touchscreen/intel-mid-touch.c864
-rw-r--r--drivers/staging/msm/mddihost.c2
-rw-r--r--drivers/staging/msm/mdp.c10
-rw-r--r--drivers/staging/msm/msm_fb.c6
-rw-r--r--drivers/staging/msm/staging-devices.c2
-rw-r--r--drivers/staging/octeon/Makefile20
-rw-r--r--drivers/staging/octeon/cvmx-fpa.c2
-rw-r--r--drivers/staging/octeon/cvmx-fpa.h2
-rw-r--r--drivers/staging/octeon/cvmx-helper-board.c19
-rw-r--r--drivers/staging/octeon/cvmx-helper-board.h29
-rw-r--r--drivers/staging/octeon/ethernet.c13
-rw-r--r--drivers/staging/olpc_dcon/Kconfig8
-rw-r--r--drivers/staging/olpc_dcon/Makefile1
-rw-r--r--drivers/staging/olpc_dcon/TODO17
-rw-r--r--drivers/staging/olpc_dcon/olpc_dcon.c866
-rw-r--r--drivers/staging/olpc_dcon/olpc_dcon.h75
-rw-r--r--drivers/staging/olpc_dcon/olpc_dcon_xo_1.c171
-rw-r--r--drivers/staging/olpc_dcon/olpc_dcon_xo_1_5.c219
-rw-r--r--drivers/staging/otus/80211core/amsdu.c129
-rw-r--r--drivers/staging/otus/80211core/cagg.c3621
-rw-r--r--drivers/staging/otus/80211core/cagg.h435
-rw-r--r--drivers/staging/otus/80211core/ccmd.c1766
-rw-r--r--drivers/staging/otus/80211core/cfunc.c1226
-rw-r--r--drivers/staging/otus/80211core/cfunc.h449
-rw-r--r--drivers/staging/otus/80211core/chb.c200
-rw-r--r--drivers/staging/otus/80211core/cic.c499
-rw-r--r--drivers/staging/otus/80211core/cinit.c1912
-rw-r--r--drivers/staging/otus/80211core/cmm.c2183
-rw-r--r--drivers/staging/otus/80211core/cmmap.c2435
-rw-r--r--drivers/staging/otus/80211core/cmmsta.c5817
-rw-r--r--drivers/staging/otus/80211core/coid.c2696
-rw-r--r--drivers/staging/otus/80211core/cprecomp.h32
-rw-r--r--drivers/staging/otus/80211core/cpsmgr.c730
-rw-r--r--drivers/staging/otus/80211core/cscanmgr.c533
-rw-r--r--drivers/staging/otus/80211core/ctkip.c599
-rw-r--r--drivers/staging/otus/80211core/ctxrx.c4115
-rw-r--r--drivers/staging/otus/80211core/cwep.c299
-rw-r--r--drivers/staging/otus/80211core/cwm.c131
-rw-r--r--drivers/staging/otus/80211core/cwm.h45
-rw-r--r--drivers/staging/otus/80211core/freqctrl.c259
-rw-r--r--drivers/staging/otus/80211core/ledmgr.c556
-rw-r--r--drivers/staging/otus/80211core/performance.c431
-rw-r--r--drivers/staging/otus/80211core/performance.h97
-rw-r--r--drivers/staging/otus/80211core/pub_usb.h102
-rw-r--r--drivers/staging/otus/80211core/pub_zfi.h820
-rw-r--r--drivers/staging/otus/80211core/pub_zfw.h93
-rw-r--r--drivers/staging/otus/80211core/queue.c304
-rw-r--r--drivers/staging/otus/80211core/queue.h37
-rw-r--r--drivers/staging/otus/80211core/ratectrl.c875
-rw-r--r--drivers/staging/otus/80211core/ratectrl.h37
-rw-r--r--drivers/staging/otus/80211core/struct.h1315
-rw-r--r--drivers/staging/otus/80211core/wlan.h595
-rw-r--r--drivers/staging/otus/Kconfig34
-rw-r--r--drivers/staging/otus/Makefile67
-rw-r--r--drivers/staging/otus/TODO8
-rw-r--r--drivers/staging/otus/apdbg.c379
-rw-r--r--drivers/staging/otus/athr_common.h141
-rw-r--r--drivers/staging/otus/hal/hpDKfwu.c832
-rw-r--r--drivers/staging/otus/hal/hpani.c721
-rw-r--r--drivers/staging/otus/hal/hpani.h419
-rw-r--r--drivers/staging/otus/hal/hpfw2.c1018
-rw-r--r--drivers/staging/otus/hal/hpfwbu.c5269
-rw-r--r--drivers/staging/otus/hal/hpfwspiu.c655
-rw-r--r--drivers/staging/otus/hal/hpfwu.c1017
-rw-r--r--drivers/staging/otus/hal/hpfwu.c.drv_ba_resend742
-rw-r--r--drivers/staging/otus/hal/hpfwu_2k.c1016
-rw-r--r--drivers/staging/otus/hal/hpfwu_BA.c874
-rw-r--r--drivers/staging/otus/hal/hpfwu_FB50_mdk.c721
-rw-r--r--drivers/staging/otus/hal/hpfwu_OTUS_RC.c715
-rw-r--r--drivers/staging/otus/hal/hpfwu_txstream.c1017
-rw-r--r--drivers/staging/otus/hal/hpfwuinit.c240
-rw-r--r--drivers/staging/otus/hal/hpmain.c4672
-rw-r--r--drivers/staging/otus/hal/hpreg.c2270
-rw-r--r--drivers/staging/otus/hal/hpreg.h524
-rw-r--r--drivers/staging/otus/hal/hprw.c1568
-rw-r--r--drivers/staging/otus/hal/hpusb.c1589
-rw-r--r--drivers/staging/otus/hal/hpusb.h437
-rw-r--r--drivers/staging/otus/hal/otus.ini414
-rw-r--r--drivers/staging/otus/ioctl.c2756
-rw-r--r--drivers/staging/otus/oal_dt.h60
-rw-r--r--drivers/staging/otus/oal_marc.h143
-rw-r--r--drivers/staging/otus/usbdrv.c1143
-rw-r--r--drivers/staging/otus/usbdrv.h245
-rw-r--r--drivers/staging/otus/wrap_buf.c111
-rw-r--r--drivers/staging/otus/wrap_dbg.c95
-rw-r--r--drivers/staging/otus/wrap_ev.c292
-rw-r--r--drivers/staging/otus/wrap_mem.c105
-rw-r--r--drivers/staging/otus/wrap_mis.c103
-rw-r--r--drivers/staging/otus/wrap_pkt.c147
-rw-r--r--drivers/staging/otus/wrap_sec.c125
-rw-r--r--drivers/staging/otus/wrap_usb.c187
-rw-r--r--drivers/staging/otus/wwrap.c1048
-rw-r--r--drivers/staging/otus/zdcompat.h45
-rw-r--r--drivers/staging/otus/zdusb.c226
-rw-r--r--drivers/staging/otus/zdusb.h47
-rw-r--r--drivers/staging/phison/phison.c2
-rw-r--r--drivers/staging/pohmelfs/config.c34
-rw-r--r--drivers/staging/pohmelfs/inode.c15
-rw-r--r--drivers/staging/quatech_usb2/quatech_usb2.c2
-rw-r--r--drivers/staging/quickstart/quickstart.c17
-rw-r--r--drivers/staging/rt2860/Makefile10
-rw-r--r--drivers/staging/rt2860/ap.h20
-rw-r--r--drivers/staging/rt2860/common/ba_action.c5
-rw-r--r--drivers/staging/rt2860/common/cmm_aes.c2
-rw-r--r--drivers/staging/rt2860/common/cmm_mac_usb.c5
-rw-r--r--drivers/staging/rt2860/common/cmm_wpa.c4
-rw-r--r--drivers/staging/rt2860/common/mlme.c7
-rw-r--r--drivers/staging/rt2860/common/rtmp_init.c20
-rw-r--r--drivers/staging/rt2860/eeprom.h4
-rw-r--r--drivers/staging/rt2860/iface/rtmp_pci.h42
-rw-r--r--drivers/staging/rt2860/iface/rtmp_usb.h110
-rw-r--r--drivers/staging/rt2860/oid.h2
-rw-r--r--drivers/staging/rt2860/rt_linux.c7
-rw-r--r--drivers/staging/rt2860/rt_linux.h3
-rw-r--r--drivers/staging/rt2860/rtmp.h26
-rw-r--r--drivers/staging/rt2860/sta/assoc.c7
-rw-r--r--drivers/staging/rt2860/sta/connect.c24
-rw-r--r--drivers/staging/rt2860/sta/rtmp_data.c5
-rw-r--r--drivers/staging/rt2860/sta/sync.c12
-rw-r--r--drivers/staging/rt2860/sta_ioctl.c11
-rw-r--r--drivers/staging/rt2860/usb_main_dev.c2
-rw-r--r--drivers/staging/rt2870/Makefile10
-rw-r--r--drivers/staging/rt2870/common/rtusb_io.c11
-rw-r--r--drivers/staging/rtl8187se/Makefile16
-rw-r--r--drivers/staging/rtl8187se/ieee80211/ieee80211.h1
-rw-r--r--drivers/staging/rtl8187se/ieee80211/ieee80211_softmac.c4
-rw-r--r--drivers/staging/rtl8187se/r8180_core.c10
-rw-r--r--drivers/staging/rtl8187se/r8180_dm.h12
-rw-r--r--drivers/staging/rtl8187se/r8180_hw.h432
-rw-r--r--drivers/staging/rtl8187se/r8180_rtl8225.h20
-rw-r--r--drivers/staging/rtl8187se/r8180_wx.c722
-rw-r--r--drivers/staging/rtl8187se/r8185b_init.c1538
-rw-r--r--drivers/staging/rtl8192e/Makefile14
-rw-r--r--drivers/staging/rtl8192e/dot11d.h8
-rw-r--r--drivers/staging/rtl8192e/ieee80211.h213
-rw-r--r--drivers/staging/rtl8192e/ieee80211/ieee80211_module.c71
-rw-r--r--drivers/staging/rtl8192e/ieee80211/ieee80211_rx.c2
-rw-r--r--drivers/staging/rtl8192e/ieee80211/ieee80211_softmac.c18
-rw-r--r--drivers/staging/rtl8192e/r8180_93cx6.c13
-rw-r--r--drivers/staging/rtl8192e/r8190_rtl8256.c2
-rw-r--r--drivers/staging/rtl8192e/r8190_rtl8256.h12
-rw-r--r--drivers/staging/rtl8192e/r8192E.h8
-rw-r--r--drivers/staging/rtl8192e/r8192E_core.c1426
-rw-r--r--drivers/staging/rtl8192e/r8192E_dm.c480
-rw-r--r--drivers/staging/rtl8192e/r8192E_dm.h2
-rw-r--r--drivers/staging/rtl8192e/r8192E_wx.c38
-rw-r--r--drivers/staging/rtl8192e/r8192E_wx.h2
-rw-r--r--drivers/staging/rtl8192e/r8192_pm.c6
-rw-r--r--drivers/staging/rtl8192e/r819xE_cmdpkt.c310
-rw-r--r--drivers/staging/rtl8192e/r819xE_cmdpkt.h2
-rw-r--r--drivers/staging/rtl8192e/r819xE_firmware.c40
-rw-r--r--drivers/staging/rtl8192e/r819xE_phy.c39
-rw-r--r--drivers/staging/rtl8192e/r819xE_phy.h50
-rw-r--r--drivers/staging/rtl8192su/Kconfig9
-rw-r--r--drivers/staging/rtl8192su/Makefile39
-rw-r--r--drivers/staging/rtl8192su/TODO21
-rw-r--r--drivers/staging/rtl8192su/authors1
-rw-r--r--drivers/staging/rtl8192su/ieee80211/Makefile30
-rw-r--r--drivers/staging/rtl8192su/ieee80211/dot11d.c224
-rw-r--r--drivers/staging/rtl8192su/ieee80211/dot11d.h111
-rw-r--r--drivers/staging/rtl8192su/ieee80211/ieee80211.h1934
-rw-r--r--drivers/staging/rtl8192su/ieee80211/ieee80211_crypt.c242
-rw-r--r--drivers/staging/rtl8192su/ieee80211/ieee80211_crypt.h86
-rw-r--r--drivers/staging/rtl8192su/ieee80211/ieee80211_crypt_ccmp.c471
-rw-r--r--drivers/staging/rtl8192su/ieee80211/ieee80211_crypt_tkip.c776
-rw-r--r--drivers/staging/rtl8192su/ieee80211/ieee80211_crypt_wep.c294
-rw-r--r--drivers/staging/rtl8192su/ieee80211/ieee80211_module.c301
-rw-r--r--drivers/staging/rtl8192su/ieee80211/ieee80211_r8192s.h449
-rw-r--r--drivers/staging/rtl8192su/ieee80211/ieee80211_rx.c2580
-rw-r--r--drivers/staging/rtl8192su/ieee80211/ieee80211_softmac.c3291
-rw-r--r--drivers/staging/rtl8192su/ieee80211/ieee80211_softmac_wx.c625
-rw-r--r--drivers/staging/rtl8192su/ieee80211/ieee80211_tx.c916
-rw-r--r--drivers/staging/rtl8192su/ieee80211/ieee80211_wx.c772
-rw-r--r--drivers/staging/rtl8192su/ieee80211/readme162
-rw-r--r--drivers/staging/rtl8192su/ieee80211/rtl819x_BA.h79
-rw-r--r--drivers/staging/rtl8192su/ieee80211/rtl819x_BAProc.c745
-rw-r--r--drivers/staging/rtl8192su/ieee80211/rtl819x_HT.h530
-rw-r--r--drivers/staging/rtl8192su/ieee80211/rtl819x_HTProc.c1725
-rw-r--r--drivers/staging/rtl8192su/ieee80211/rtl819x_Qos.h540
-rw-r--r--drivers/staging/rtl8192su/ieee80211/rtl819x_TS.h71
-rw-r--r--drivers/staging/rtl8192su/ieee80211/rtl819x_TSProc.c631
-rw-r--r--drivers/staging/rtl8192su/r8192SU_HWImg.c647
-rw-r--r--drivers/staging/rtl8192su/r8192SU_HWImg.h60
-rw-r--r--drivers/staging/rtl8192su/r8192SU_led.c2338
-rw-r--r--drivers/staging/rtl8192su/r8192SU_led.h93
-rw-r--r--drivers/staging/rtl8192su/r8192S_Efuse.c2199
-rw-r--r--drivers/staging/rtl8192su/r8192S_Efuse.h79
-rw-r--r--drivers/staging/rtl8192su/r8192S_firmware.c481
-rw-r--r--drivers/staging/rtl8192su/r8192S_firmware.h210
-rw-r--r--drivers/staging/rtl8192su/r8192S_hw.h1445
-rw-r--r--drivers/staging/rtl8192su/r8192S_phy.c3634
-rw-r--r--drivers/staging/rtl8192su/r8192S_phy.h135
-rw-r--r--drivers/staging/rtl8192su/r8192S_phyreg.h1033
-rw-r--r--drivers/staging/rtl8192su/r8192S_rtl6052.c842
-rw-r--r--drivers/staging/rtl8192su/r8192S_rtl6052.h87
-rw-r--r--drivers/staging/rtl8192su/r8192S_rtl8225.c292
-rw-r--r--drivers/staging/rtl8192su/r8192S_rtl8225.h30
-rw-r--r--drivers/staging/rtl8192su/r8192U.h1519
-rw-r--r--drivers/staging/rtl8192su/r8192U_core.c7712
-rw-r--r--drivers/staging/rtl8192su/r8192U_dm.c3982
-rw-r--r--drivers/staging/rtl8192su/r8192U_dm.h254
-rw-r--r--drivers/staging/rtl8192su/r8192U_pm.c72
-rw-r--r--drivers/staging/rtl8192su/r8192U_pm.h25
-rw-r--r--drivers/staging/rtl8192su/r8192U_wx.c1296
-rw-r--r--drivers/staging/rtl8192su/r8192U_wx.h22
-rw-r--r--drivers/staging/rtl8192su/r819xU_HTGen.h22
-rw-r--r--drivers/staging/rtl8192su/r819xU_HTType.h383
-rw-r--r--drivers/staging/rtl8192su/r819xU_cmdpkt.c512
-rw-r--r--drivers/staging/rtl8192su/r819xU_cmdpkt.h192
-rw-r--r--drivers/staging/rtl8192u/Makefile22
-rw-r--r--drivers/staging/rtl8192u/ieee80211/Makefile16
-rw-r--r--drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c2
-rw-r--r--drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c2
-rw-r--r--drivers/staging/rtl8192u/r8192U_core.c25
-rw-r--r--drivers/staging/rtl8192u/r819xU_firmware.c2
-rw-r--r--drivers/staging/rtl8712/Kconfig18
-rw-r--r--drivers/staging/rtl8712/Makefile34
-rw-r--r--drivers/staging/rtl8712/TODO16
-rw-r--r--drivers/staging/rtl8712/basic_types.h23
-rw-r--r--drivers/staging/rtl8712/big_endian.h69
-rw-r--r--drivers/staging/rtl8712/drv_types.h165
-rw-r--r--drivers/staging/rtl8712/ethernet.h23
-rw-r--r--drivers/staging/rtl8712/farray.h10197
-rw-r--r--drivers/staging/rtl8712/generic.h153
-rw-r--r--drivers/staging/rtl8712/hal_init.c358
-rw-r--r--drivers/staging/rtl8712/ieee80211.c454
-rw-r--r--drivers/staging/rtl8712/ieee80211.h770
-rw-r--r--drivers/staging/rtl8712/if_ether.h116
-rw-r--r--drivers/staging/rtl8712/ip.h137
-rw-r--r--drivers/staging/rtl8712/little_endian.h69
-rw-r--r--drivers/staging/rtl8712/mlme_linux.c170
-rw-r--r--drivers/staging/rtl8712/mlme_osdep.h18
-rw-r--r--drivers/staging/rtl8712/mp_custom_oid.h274
-rw-r--r--drivers/staging/rtl8712/os_intfs.c464
-rw-r--r--drivers/staging/rtl8712/osdep_intf.h19
-rw-r--r--drivers/staging/rtl8712/osdep_service.h256
-rw-r--r--drivers/staging/rtl8712/recv_linux.c169
-rw-r--r--drivers/staging/rtl8712/recv_osdep.h27
-rw-r--r--drivers/staging/rtl8712/rtl8712_bitdef.h19
-rw-r--r--drivers/staging/rtl8712/rtl8712_cmd.c465
-rw-r--r--drivers/staging/rtl8712/rtl8712_cmd.h157
-rw-r--r--drivers/staging/rtl8712/rtl8712_cmdctrl_bitdef.h89
-rw-r--r--drivers/staging/rtl8712/rtl8712_cmdctrl_regdef.h15
-rw-r--r--drivers/staging/rtl8712/rtl8712_debugctrl_bitdef.h36
-rw-r--r--drivers/staging/rtl8712/rtl8712_debugctrl_regdef.h28
-rw-r--r--drivers/staging/rtl8712/rtl8712_edcasetting_bitdef.h52
-rw-r--r--drivers/staging/rtl8712/rtl8712_edcasetting_regdef.h18
-rw-r--r--drivers/staging/rtl8712/rtl8712_efuse.c568
-rw-r--r--drivers/staging/rtl8712/rtl8712_efuse.h43
-rw-r--r--drivers/staging/rtl8712/rtl8712_event.h73
-rw-r--r--drivers/staging/rtl8712/rtl8712_fifoctrl_bitdef.h126
-rw-r--r--drivers/staging/rtl8712/rtl8712_fifoctrl_regdef.h57
-rw-r--r--drivers/staging/rtl8712/rtl8712_gp_bitdef.h54
-rw-r--r--drivers/staging/rtl8712/rtl8712_gp_regdef.h17
-rw-r--r--drivers/staging/rtl8712/rtl8712_hal.h124
-rw-r--r--drivers/staging/rtl8712/rtl8712_interrupt_bitdef.h39
-rw-r--r--drivers/staging/rtl8712/rtl8712_io.c151
-rw-r--r--drivers/staging/rtl8712/rtl8712_led.c1815
-rw-r--r--drivers/staging/rtl8712/rtl8712_macsetting_bitdef.h28
-rw-r--r--drivers/staging/rtl8712/rtl8712_macsetting_regdef.h16
-rw-r--r--drivers/staging/rtl8712/rtl8712_powersave_bitdef.h33
-rw-r--r--drivers/staging/rtl8712/rtl8712_powersave_regdef.h20
-rw-r--r--drivers/staging/rtl8712/rtl8712_ratectrl_bitdef.h30
-rw-r--r--drivers/staging/rtl8712/rtl8712_ratectrl_regdef.h31
-rw-r--r--drivers/staging/rtl8712/rtl8712_recv.c1131
-rw-r--r--drivers/staging/rtl8712/rtl8712_recv.h128
-rw-r--r--drivers/staging/rtl8712/rtl8712_regdef.h19
-rw-r--r--drivers/staging/rtl8712/rtl8712_security_bitdef.h29
-rw-r--r--drivers/staging/rtl8712/rtl8712_spec.h110
-rw-r--r--drivers/staging/rtl8712/rtl8712_syscfg_bitdef.h145
-rw-r--r--drivers/staging/rtl8712/rtl8712_syscfg_regdef.h31
-rw-r--r--drivers/staging/rtl8712/rtl8712_timectrl_bitdef.h44
-rw-r--r--drivers/staging/rtl8712/rtl8712_timectrl_regdef.h20
-rw-r--r--drivers/staging/rtl8712/rtl8712_wmac_bitdef.h37
-rw-r--r--drivers/staging/rtl8712/rtl8712_wmac_regdef.h24
-rw-r--r--drivers/staging/rtl8712/rtl8712_xmit.c509
-rw-r--r--drivers/staging/rtl8712/rtl8712_xmit.h95
-rw-r--r--drivers/staging/rtl8712/rtl871x_byteorder.h13
-rw-r--r--drivers/staging/rtl8712/rtl871x_cmd.c926
-rw-r--r--drivers/staging/rtl8712/rtl871x_cmd.h719
-rw-r--r--drivers/staging/rtl8712/rtl871x_debug.h142
-rw-r--r--drivers/staging/rtl8712/rtl871x_eeprom.c233
-rw-r--r--drivers/staging/rtl8712/rtl871x_eeprom.h82
-rw-r--r--drivers/staging/rtl8712/rtl871x_event.h95
-rw-r--r--drivers/staging/rtl8712/rtl871x_ht.h19
-rw-r--r--drivers/staging/rtl8712/rtl871x_io.c163
-rw-r--r--drivers/staging/rtl8712/rtl871x_io.h233
-rw-r--r--drivers/staging/rtl8712/rtl871x_ioctl.h97
-rw-r--r--drivers/staging/rtl8712/rtl871x_ioctl_linux.c2246
-rw-r--r--drivers/staging/rtl8712/rtl871x_ioctl_rtl.c535
-rw-r--r--drivers/staging/rtl8712/rtl871x_ioctl_rtl.h96
-rw-r--r--drivers/staging/rtl8712/rtl871x_ioctl_set.c379
-rw-r--r--drivers/staging/rtl8712/rtl871x_ioctl_set.h24
-rw-r--r--drivers/staging/rtl8712/rtl871x_led.h99
-rw-r--r--drivers/staging/rtl8712/rtl871x_mlme.c1840
-rw-r--r--drivers/staging/rtl8712/rtl871x_mlme.h208
-rw-r--r--drivers/staging/rtl8712/rtl871x_mp.c736
-rw-r--r--drivers/staging/rtl8712/rtl871x_mp.h318
-rw-r--r--drivers/staging/rtl8712/rtl871x_mp_ioctl.c1475
-rw-r--r--drivers/staging/rtl8712/rtl871x_mp_ioctl.h457
-rw-r--r--drivers/staging/rtl8712/rtl871x_mp_phy_regdef.h1025
-rw-r--r--drivers/staging/rtl8712/rtl871x_pwrctrl.c250
-rw-r--r--drivers/staging/rtl8712/rtl871x_pwrctrl.h127
-rw-r--r--drivers/staging/rtl8712/rtl871x_recv.c693
-rw-r--r--drivers/staging/rtl8712/rtl871x_recv.h330
-rw-r--r--drivers/staging/rtl8712/rtl871x_rf.h43
-rw-r--r--drivers/staging/rtl8712/rtl871x_security.c1389
-rw-r--r--drivers/staging/rtl8712/rtl871x_security.h196
-rw-r--r--drivers/staging/rtl8712/rtl871x_sta_mgt.c299
-rw-r--r--drivers/staging/rtl8712/rtl871x_wlan_sme.h22
-rw-r--r--drivers/staging/rtl8712/rtl871x_xmit.c1052
-rw-r--r--drivers/staging/rtl8712/rtl871x_xmit.h260
-rw-r--r--drivers/staging/rtl8712/sta_info.h125
-rw-r--r--drivers/staging/rtl8712/swab.h106
-rw-r--r--drivers/staging/rtl8712/usb_halinit.c317
-rw-r--r--drivers/staging/rtl8712/usb_intf.c571
-rw-r--r--drivers/staging/rtl8712/usb_ops.c201
-rw-r--r--drivers/staging/rtl8712/usb_ops.h25
-rw-r--r--drivers/staging/rtl8712/usb_ops_linux.c529
-rw-r--r--drivers/staging/rtl8712/usb_osintf.h24
-rw-r--r--drivers/staging/rtl8712/usb_vendor_req.h33
-rw-r--r--drivers/staging/rtl8712/wifi.h622
-rw-r--r--drivers/staging/rtl8712/wlan_bssdef.h242
-rw-r--r--drivers/staging/rtl8712/xmit_linux.c182
-rw-r--r--drivers/staging/rtl8712/xmit_osdep.h38
-rw-r--r--drivers/staging/samsung-laptop/samsung-laptop.c2
-rw-r--r--drivers/staging/sbe-2t3e3/2t3e3.h894
-rw-r--r--drivers/staging/sbe-2t3e3/Kconfig13
-rw-r--r--drivers/staging/sbe-2t3e3/Makefile4
-rw-r--r--drivers/staging/sbe-2t3e3/TODO6
-rw-r--r--drivers/staging/sbe-2t3e3/cpld.c366
-rw-r--r--drivers/staging/sbe-2t3e3/ctrl.c362
-rw-r--r--drivers/staging/sbe-2t3e3/ctrl.h131
-rw-r--r--drivers/staging/sbe-2t3e3/dc.c502
-rw-r--r--drivers/staging/sbe-2t3e3/exar7250.c217
-rw-r--r--drivers/staging/sbe-2t3e3/exar7300.c182
-rw-r--r--drivers/staging/sbe-2t3e3/intr.c635
-rw-r--r--drivers/staging/sbe-2t3e3/io.c352
-rw-r--r--drivers/staging/sbe-2t3e3/main.c171
-rw-r--r--drivers/staging/sbe-2t3e3/maps.c104
-rw-r--r--drivers/staging/sbe-2t3e3/module.c210
-rw-r--r--drivers/staging/sbe-2t3e3/netdev.c142
-rw-r--r--drivers/staging/slicoss/slic.h8
-rw-r--r--drivers/staging/slicoss/slicoss.c44
-rw-r--r--drivers/staging/sm7xx/smtcfb.c13
-rw-r--r--drivers/staging/smbfs/Kconfig (renamed from fs/smbfs/Kconfig)0
-rw-r--r--drivers/staging/smbfs/Makefile (renamed from fs/smbfs/Makefile)12
-rw-r--r--drivers/staging/smbfs/TODO8
-rw-r--r--drivers/staging/smbfs/cache.c (renamed from fs/smbfs/cache.c)2
-rw-r--r--drivers/staging/smbfs/dir.c (renamed from fs/smbfs/dir.c)22
-rw-r--r--drivers/staging/smbfs/file.c (renamed from fs/smbfs/file.c)5
-rw-r--r--drivers/staging/smbfs/getopt.c (renamed from fs/smbfs/getopt.c)0
-rw-r--r--drivers/staging/smbfs/getopt.h (renamed from fs/smbfs/getopt.h)0
-rw-r--r--drivers/staging/smbfs/inode.c (renamed from fs/smbfs/inode.c)19
-rw-r--r--drivers/staging/smbfs/ioctl.c (renamed from fs/smbfs/ioctl.c)5
-rw-r--r--drivers/staging/smbfs/proc.c (renamed from fs/smbfs/proc.c)17
-rw-r--r--drivers/staging/smbfs/proto.h (renamed from fs/smbfs/proto.h)0
-rw-r--r--drivers/staging/smbfs/request.c (renamed from fs/smbfs/request.c)7
-rw-r--r--drivers/staging/smbfs/request.h (renamed from fs/smbfs/request.h)0
-rw-r--r--drivers/staging/smbfs/smb.h (renamed from include/linux/smb.h)0
-rw-r--r--drivers/staging/smbfs/smb_debug.h (renamed from fs/smbfs/smb_debug.h)0
-rw-r--r--drivers/staging/smbfs/smb_fs.h (renamed from include/linux/smb_fs.h)8
-rw-r--r--drivers/staging/smbfs/smb_fs_i.h (renamed from include/linux/smb_fs_i.h)0
-rw-r--r--drivers/staging/smbfs/smb_fs_sb.h (renamed from include/linux/smb_fs_sb.h)2
-rw-r--r--drivers/staging/smbfs/smb_mount.h (renamed from include/linux/smb_mount.h)0
-rw-r--r--drivers/staging/smbfs/smbfs.txt (renamed from Documentation/filesystems/smbfs.txt)0
-rw-r--r--drivers/staging/smbfs/smbiod.c (renamed from fs/smbfs/smbiod.c)7
-rw-r--r--drivers/staging/smbfs/smbno.h (renamed from include/linux/smbno.h)0
-rw-r--r--drivers/staging/smbfs/sock.c (renamed from fs/smbfs/sock.c)7
-rw-r--r--drivers/staging/smbfs/symlink.c (renamed from fs/smbfs/symlink.c)5
-rw-r--r--drivers/staging/solo6x10/Makefile2
-rw-r--r--drivers/staging/solo6x10/solo6010-core.c40
-rw-r--r--drivers/staging/solo6x10/solo6010-g723.c2
-rw-r--r--drivers/staging/solo6x10/solo6010-i2c.c2
-rw-r--r--drivers/staging/solo6x10/solo6010-p2m.c2
-rw-r--r--drivers/staging/solo6x10/solo6010-v4l2-enc.c4
-rw-r--r--drivers/staging/solo6x10/solo6010-v4l2.c4
-rw-r--r--drivers/staging/speakup/DefaultKeyAssignments46
-rw-r--r--drivers/staging/speakup/Kconfig195
-rw-r--r--drivers/staging/speakup/Makefile30
-rw-r--r--drivers/staging/speakup/TODO47
-rw-r--r--drivers/staging/speakup/buffers.c106
-rw-r--r--drivers/staging/speakup/devsynth.c94
-rw-r--r--drivers/staging/speakup/fakekey.c104
-rw-r--r--drivers/staging/speakup/i18n.c628
-rw-r--r--drivers/staging/speakup/i18n.h228
-rw-r--r--drivers/staging/speakup/keyhelp.c214
-rw-r--r--drivers/staging/speakup/kobjects.c1022
-rw-r--r--drivers/staging/speakup/main.c2310
-rw-r--r--drivers/staging/speakup/selection.c151
-rw-r--r--drivers/staging/speakup/serialio.c215
-rw-r--r--drivers/staging/speakup/serialio.h55
-rw-r--r--drivers/staging/speakup/speakup.h130
-rw-r--r--drivers/staging/speakup/speakup_acnt.h16
-rw-r--r--drivers/staging/speakup/speakup_acntpc.c335
-rw-r--r--drivers/staging/speakup/speakup_acntsa.c163
-rw-r--r--drivers/staging/speakup/speakup_apollo.c227
-rw-r--r--drivers/staging/speakup/speakup_audptr.c195
-rw-r--r--drivers/staging/speakup/speakup_bns.c147
-rw-r--r--drivers/staging/speakup/speakup_decext.c253
-rw-r--r--drivers/staging/speakup/speakup_decpc.c504
-rw-r--r--drivers/staging/speakup/speakup_dectlk.c322
-rw-r--r--drivers/staging/speakup/speakup_dtlk.c402
-rw-r--r--drivers/staging/speakup/speakup_dtlk.h54
-rw-r--r--drivers/staging/speakup/speakup_dummy.c148
-rw-r--r--drivers/staging/speakup/speakup_keypc.c335
-rw-r--r--drivers/staging/speakup/speakup_ltlk.c194
-rw-r--r--drivers/staging/speakup/speakup_soft.c379
-rw-r--r--drivers/staging/speakup/speakup_spkout.c165
-rw-r--r--drivers/staging/speakup/speakup_txprt.c147
-rw-r--r--drivers/staging/speakup/speakupmap.h65
-rw-r--r--drivers/staging/speakup/speakupmap.map93
-rw-r--r--drivers/staging/speakup/spk_priv.h93
-rw-r--r--drivers/staging/speakup/spk_priv_keyinfo.h110
-rw-r--r--drivers/staging/speakup/spk_types.h193
-rw-r--r--drivers/staging/speakup/spkguide.txt1575
-rw-r--r--drivers/staging/speakup/synth.c463
-rw-r--r--drivers/staging/speakup/thread.c58
-rw-r--r--drivers/staging/speakup/varhandlers.c404
-rw-r--r--drivers/staging/spectra/Kconfig2
-rw-r--r--drivers/staging/spectra/ffsport.c2
-rw-r--r--drivers/staging/spectra/flash.c2
-rw-r--r--drivers/staging/spectra/flash.h2
-rw-r--r--drivers/staging/spectra/lld_emu.c10
-rw-r--r--drivers/staging/spectra/lld_mtd.c8
-rw-r--r--drivers/staging/spectra/lld_nand.c7
-rw-r--r--drivers/staging/stradis/Kconfig7
-rw-r--r--drivers/staging/stradis/Makefile3
-rw-r--r--drivers/staging/stradis/TODO6
-rw-r--r--drivers/staging/stradis/stradis.c (renamed from drivers/media/video/stradis.c)11
-rw-r--r--drivers/staging/ti-st/Kconfig13
-rw-r--r--drivers/staging/ti-st/Makefile2
-rw-r--r--drivers/staging/ti-st/bt_drv.c2
-rw-r--r--drivers/staging/ti-st/fm.h13
-rw-r--r--drivers/staging/ti-st/st.h83
-rw-r--r--drivers/staging/ti-st/st_core.h128
-rw-r--r--drivers/staging/ti-st/st_kim.h180
-rw-r--r--drivers/staging/ti-st/st_ll.h69
-rw-r--r--drivers/staging/tidspbridge/Makefile6
-rw-r--r--drivers/staging/tidspbridge/TODO1
-rw-r--r--drivers/staging/tidspbridge/core/chnl_sm.c1
-rw-r--r--drivers/staging/tidspbridge/core/dsp-clock.c1
-rw-r--r--drivers/staging/tidspbridge/core/io_sm.c1
-rw-r--r--drivers/staging/tidspbridge/core/sync.c (renamed from drivers/staging/tidspbridge/services/sync.c)17
-rw-r--r--drivers/staging/tidspbridge/core/tiomap3430.c33
-rw-r--r--drivers/staging/tidspbridge/core/tiomap3430_pwr.c18
-rw-r--r--drivers/staging/tidspbridge/core/tiomap_io.c5
-rw-r--r--drivers/staging/tidspbridge/gen/gb.c1
-rw-r--r--drivers/staging/tidspbridge/gen/gs.c1
-rw-r--r--drivers/staging/tidspbridge/include/dspbridge/cfg.h222
-rw-r--r--drivers/staging/tidspbridge/include/dspbridge/cmm.h19
-rw-r--r--drivers/staging/tidspbridge/include/dspbridge/drv.h2
-rw-r--r--drivers/staging/tidspbridge/include/dspbridge/host_os.h19
-rw-r--r--drivers/staging/tidspbridge/include/dspbridge/mgr.h2
-rw-r--r--drivers/staging/tidspbridge/include/dspbridge/services.h50
-rw-r--r--drivers/staging/tidspbridge/include/dspbridge/wdt.h2
-rw-r--r--drivers/staging/tidspbridge/pmgr/chnl.c1
-rw-r--r--drivers/staging/tidspbridge/pmgr/cmm.c18
-rw-r--r--drivers/staging/tidspbridge/pmgr/dbll.c2
-rw-r--r--drivers/staging/tidspbridge/pmgr/dev.c69
-rw-r--r--drivers/staging/tidspbridge/pmgr/dspapi.c28
-rw-r--r--drivers/staging/tidspbridge/pmgr/io.c3
-rw-r--r--drivers/staging/tidspbridge/rmgr/dbdcd.c8
-rw-r--r--drivers/staging/tidspbridge/rmgr/drv.c59
-rw-r--r--drivers/staging/tidspbridge/rmgr/drv_interface.c27
-rw-r--r--drivers/staging/tidspbridge/rmgr/dspdrv.c12
-rw-r--r--drivers/staging/tidspbridge/rmgr/mgr.c52
-rw-r--r--drivers/staging/tidspbridge/rmgr/nldr.c17
-rw-r--r--drivers/staging/tidspbridge/rmgr/node.c10
-rw-r--r--drivers/staging/tidspbridge/rmgr/proc.c48
-rw-r--r--drivers/staging/tidspbridge/rmgr/strm.c12
-rw-r--r--drivers/staging/tidspbridge/services/cfg.c253
-rw-r--r--drivers/staging/tidspbridge/services/ntfy.c31
-rw-r--r--drivers/staging/tidspbridge/services/services.c70
-rw-r--r--drivers/staging/tm6000/Makefile10
-rw-r--r--drivers/staging/tm6000/TODO6
-rw-r--r--drivers/staging/tm6000/tm6000-alsa.c109
-rw-r--r--drivers/staging/tm6000/tm6000-cards.c41
-rw-r--r--drivers/staging/tm6000/tm6000-core.c163
-rw-r--r--drivers/staging/tm6000/tm6000-dvb.c32
-rw-r--r--drivers/staging/tm6000/tm6000-i2c.c44
-rw-r--r--drivers/staging/tm6000/tm6000-input.c34
-rw-r--r--drivers/staging/tm6000/tm6000-regs.h32
-rw-r--r--drivers/staging/tm6000/tm6000-stds.c350
-rw-r--r--drivers/staging/tm6000/tm6000-usb-isoc.h32
-rw-r--r--drivers/staging/tm6000/tm6000-video.c443
-rw-r--r--drivers/staging/tm6000/tm6000.h54
-rw-r--r--drivers/staging/udlfb/udlfb.c989
-rw-r--r--drivers/staging/udlfb/udlfb.h39
-rw-r--r--drivers/staging/udlfb/udlfb.txt144
-rw-r--r--drivers/staging/usbip/Makefile11
-rw-r--r--drivers/staging/usbip/stub_dev.c2
-rw-r--r--drivers/staging/usbip/usbip_common.c2
-rw-r--r--drivers/staging/usbip/usbip_event.c16
-rw-r--r--drivers/staging/usbip/vhci_hcd.c2
-rw-r--r--drivers/staging/vme/bridges/vme_ca91cx42.c94
-rw-r--r--drivers/staging/vme/devices/vme_user.c2
-rw-r--r--drivers/staging/vt6655/Makefile4
-rw-r--r--drivers/staging/vt6655/device_main.c3
-rw-r--r--drivers/staging/vt6655/iocmd.h4
-rw-r--r--drivers/staging/vt6655/iwctl.c6
-rw-r--r--drivers/staging/vt6655/ttype.h7
-rw-r--r--drivers/staging/vt6655/vntwifi.c6
-rw-r--r--drivers/staging/vt6656/80211mgr.h10
-rw-r--r--drivers/staging/vt6656/Makefile4
-rw-r--r--drivers/staging/vt6656/baseband.c2
-rw-r--r--drivers/staging/vt6656/bssdb.c14
-rw-r--r--drivers/staging/vt6656/card.c2
-rw-r--r--drivers/staging/vt6656/channel.c12
-rw-r--r--drivers/staging/vt6656/device.h19
-rw-r--r--drivers/staging/vt6656/dpc.c17
-rw-r--r--drivers/staging/vt6656/firmware.c6
-rw-r--r--drivers/staging/vt6656/iocmd.h4
-rw-r--r--drivers/staging/vt6656/ioctl.c2
-rw-r--r--drivers/staging/vt6656/iwctl.c20
-rw-r--r--drivers/staging/vt6656/iwctl.h3
-rw-r--r--drivers/staging/vt6656/key.c2
-rw-r--r--drivers/staging/vt6656/mac.c12
-rw-r--r--drivers/staging/vt6656/main_usb.c32
-rw-r--r--drivers/staging/vt6656/power.c6
-rw-r--r--drivers/staging/vt6656/rxtx.c26
-rw-r--r--drivers/staging/vt6656/tether.h34
-rw-r--r--drivers/staging/vt6656/usbpipe.c52
-rw-r--r--drivers/staging/vt6656/wcmd.c4
-rw-r--r--drivers/staging/vt6656/wmgr.c19
-rw-r--r--drivers/staging/vt6656/wmgr.h2
-rw-r--r--drivers/staging/vt6656/wpa.c8
-rw-r--r--drivers/staging/vt6656/wpactl.c6
-rw-r--r--drivers/staging/westbridge/Kconfig53
-rw-r--r--drivers/staging/westbridge/TODO7
-rw-r--r--drivers/staging/westbridge/astoria/Kconfig9
-rw-r--r--drivers/staging/westbridge/astoria/Makefile11
-rw-r--r--drivers/staging/westbridge/astoria/api/Makefile14
-rw-r--r--drivers/staging/westbridge/astoria/api/src/cyasdma.c1107
-rw-r--r--drivers/staging/westbridge/astoria/api/src/cyasintr.c143
-rw-r--r--drivers/staging/westbridge/astoria/api/src/cyaslep2pep.c358
-rw-r--r--drivers/staging/westbridge/astoria/api/src/cyaslowlevel.c1264
-rw-r--r--drivers/staging/westbridge/astoria/api/src/cyasmisc.c3474
-rw-r--r--drivers/staging/westbridge/astoria/api/src/cyasmtp.c1128
-rw-r--r--drivers/staging/westbridge/astoria/api/src/cyasstorage.c4104
-rw-r--r--drivers/staging/westbridge/astoria/api/src/cyasusb.c3717
-rw-r--r--drivers/staging/westbridge/astoria/arch/arm/mach-omap2/cyashalomap_kernel.c2450
-rw-r--r--drivers/staging/westbridge/astoria/arch/arm/plat-omap/include/mach/westbridge/cyashaldef.h55
-rw-r--r--drivers/staging/westbridge/astoria/arch/arm/plat-omap/include/mach/westbridge/westbridge-omap3-pnand-hal/cyashalomap_kernel.h319
-rw-r--r--drivers/staging/westbridge/astoria/arch/arm/plat-omap/include/mach/westbridge/westbridge-omap3-pnand-hal/cyasmemmap.h558
-rw-r--r--drivers/staging/westbridge/astoria/arch/arm/plat-omap/include/mach/westbridge/westbridge-omap3-pnand-hal/cyasomapdev_kernel.h72
-rw-r--r--drivers/staging/westbridge/astoria/block/Kconfig9
-rw-r--r--drivers/staging/westbridge/astoria/block/Makefile11
-rw-r--r--drivers/staging/westbridge/astoria/block/cyasblkdev_block.c1628
-rw-r--r--drivers/staging/westbridge/astoria/block/cyasblkdev_queue.c417
-rw-r--r--drivers/staging/westbridge/astoria/block/cyasblkdev_queue.h64
-rw-r--r--drivers/staging/westbridge/astoria/device/Kconfig9
-rw-r--r--drivers/staging/westbridge/astoria/device/Makefile23
-rw-r--r--drivers/staging/westbridge/astoria/device/cyandevice_export.h132
-rw-r--r--drivers/staging/westbridge/astoria/device/cyasdevice.c412
-rw-r--r--drivers/staging/westbridge/astoria/gadget/Kconfig9
-rw-r--r--drivers/staging/westbridge/astoria/gadget/Makefile11
-rw-r--r--drivers/staging/westbridge/astoria/gadget/cyasgadget.c2176
-rw-r--r--drivers/staging/westbridge/astoria/gadget/cyasgadget.h193
-rw-r--r--drivers/staging/westbridge/astoria/gadget/cyasgadget_ioctl.h99
-rw-r--r--drivers/staging/westbridge/astoria/include/linux/westbridge/cyanerr.h418
-rw-r--r--drivers/staging/westbridge/astoria/include/linux/westbridge/cyanmedia.h59
-rw-r--r--drivers/staging/westbridge/astoria/include/linux/westbridge/cyanmisc.h614
-rw-r--r--drivers/staging/westbridge/astoria/include/linux/westbridge/cyanregs.h180
-rw-r--r--drivers/staging/westbridge/astoria/include/linux/westbridge/cyansdkversion.h30
-rw-r--r--drivers/staging/westbridge/astoria/include/linux/westbridge/cyanstorage.h419
-rw-r--r--drivers/staging/westbridge/astoria/include/linux/westbridge/cyantioch.h35
-rw-r--r--drivers/staging/westbridge/astoria/include/linux/westbridge/cyantypes.h31
-rw-r--r--drivers/staging/westbridge/astoria/include/linux/westbridge/cyanusb.h619
-rw-r--r--drivers/staging/westbridge/astoria/include/linux/westbridge/cyas_cplus_end.h11
-rw-r--r--drivers/staging/westbridge/astoria/include/linux/westbridge/cyas_cplus_start.h11
-rw-r--r--drivers/staging/westbridge/astoria/include/linux/westbridge/cyascast.h35
-rw-r--r--drivers/staging/westbridge/astoria/include/linux/westbridge/cyasdevice.h1057
-rw-r--r--drivers/staging/westbridge/astoria/include/linux/westbridge/cyasdma.h375
-rw-r--r--drivers/staging/westbridge/astoria/include/linux/westbridge/cyaserr.h1094
-rw-r--r--drivers/staging/westbridge/astoria/include/linux/westbridge/cyashal.h108
-rw-r--r--drivers/staging/westbridge/astoria/include/linux/westbridge/cyashalcb.h44
-rw-r--r--drivers/staging/westbridge/astoria/include/linux/westbridge/cyashaldoc.h800
-rw-r--r--drivers/staging/westbridge/astoria/include/linux/westbridge/cyasintr.h104
-rw-r--r--drivers/staging/westbridge/astoria/include/linux/westbridge/cyaslep2pep.h36
-rw-r--r--drivers/staging/westbridge/astoria/include/linux/westbridge/cyaslowlevel.h366
-rw-r--r--drivers/staging/westbridge/astoria/include/linux/westbridge/cyasmedia.h54
-rw-r--r--drivers/staging/westbridge/astoria/include/linux/westbridge/cyasmisc.h1549
-rw-r--r--drivers/staging/westbridge/astoria/include/linux/westbridge/cyasmisc_dep.h53
-rw-r--r--drivers/staging/westbridge/astoria/include/linux/westbridge/cyasmtp.h646
-rw-r--r--drivers/staging/westbridge/astoria/include/linux/westbridge/cyasprotocol.h3838
-rw-r--r--drivers/staging/westbridge/astoria/include/linux/westbridge/cyasregs.h201
-rw-r--r--drivers/staging/westbridge/astoria/include/linux/westbridge/cyasstorage.h2759
-rw-r--r--drivers/staging/westbridge/astoria/include/linux/westbridge/cyasstorage_dep.h309
-rw-r--r--drivers/staging/westbridge/astoria/include/linux/westbridge/cyastoria.h36
-rw-r--r--drivers/staging/westbridge/astoria/include/linux/westbridge/cyastsdkversion.h30
-rw-r--r--drivers/staging/westbridge/astoria/include/linux/westbridge/cyastypes.h71
-rw-r--r--drivers/staging/westbridge/astoria/include/linux/westbridge/cyasusb.h1862
-rw-r--r--drivers/staging/westbridge/astoria/include/linux/westbridge/cyasusb_dep.h224
-rw-r--r--drivers/staging/winbond/Makefile2
-rw-r--r--drivers/staging/winbond/TODO2
-rw-r--r--drivers/staging/winbond/core.h43
-rw-r--r--drivers/staging/winbond/mac_structures.h17
-rw-r--r--drivers/staging/winbond/mds.c207
-rw-r--r--drivers/staging/winbond/mds_f.h2
-rw-r--r--drivers/staging/winbond/mds_s.h1
-rw-r--r--drivers/staging/winbond/mlme_s.h188
-rw-r--r--drivers/staging/winbond/mlmetxrx.c31
-rw-r--r--drivers/staging/winbond/mlmetxrx_f.h2
-rw-r--r--drivers/staging/winbond/phy_calibration.c20
-rw-r--r--drivers/staging/winbond/scan_s.h110
-rw-r--r--drivers/staging/winbond/sysdef.h3
-rw-r--r--drivers/staging/winbond/wb35rx.c2
-rw-r--r--drivers/staging/winbond/wb35tx.c2
-rw-r--r--drivers/staging/winbond/wbhal_f.h10
-rw-r--r--drivers/staging/winbond/wbhal_s.h37
-rw-r--r--drivers/staging/winbond/wbusb.c21
-rw-r--r--drivers/staging/wlags49_h2/Makefile10
-rw-r--r--drivers/staging/wlags49_h2/hcf.c1
-rw-r--r--drivers/staging/wlags49_h2/hcfdef.h8
-rw-r--r--drivers/staging/wlags49_h2/mdd.h2
-rw-r--r--drivers/staging/wlags49_h2/wl_main.c88
-rw-r--r--drivers/staging/wlags49_h2/wl_netdev.c5
-rw-r--r--drivers/staging/wlags49_h2/wl_util.c72
-rw-r--r--drivers/staging/wlags49_h2/wl_util.h5
-rw-r--r--drivers/staging/wlags49_h2/wl_wext.c9
-rw-r--r--drivers/staging/wlags49_h25/Makefile10
-rw-r--r--drivers/staging/wlan-ng/Makefile2
-rw-r--r--drivers/staging/wlan-ng/cfg80211.c6
-rw-r--r--drivers/staging/wlan-ng/hfa384x_usb.c2
-rw-r--r--drivers/staging/wlan-ng/p80211netdev.c2
-rw-r--r--drivers/staging/xgifb/Makefile2
-rw-r--r--drivers/staging/xgifb/TODO2
-rw-r--r--drivers/staging/xgifb/XGI_accel.h3
-rw-r--r--drivers/staging/xgifb/XGI_main.h30
-rw-r--r--drivers/staging/xgifb/XGI_main_26.c3876
-rw-r--r--drivers/staging/xgifb/vb_ext.c1737
-rw-r--r--drivers/staging/xgifb/vb_init.c5022
-rw-r--r--drivers/staging/xgifb/vb_setmode.c17041
-rw-r--r--drivers/staging/xgifb/vb_table.h620
-rw-r--r--drivers/staging/xgifb/vb_util.c192
-rw-r--r--drivers/staging/zram/Kconfig12
-rw-r--r--drivers/staging/zram/Makefile2
-rw-r--r--drivers/staging/zram/zram.txt58
-rw-r--r--drivers/staging/zram/zram_drv.c219
-rw-r--r--drivers/staging/zram/zram_drv.h57
-rw-r--r--drivers/staging/zram/zram_ioctl.h41
-rw-r--r--drivers/staging/zram/zram_sysfs.c224
-rw-r--r--drivers/tty/Makefile11
-rw-r--r--drivers/tty/n_gsm.c (renamed from drivers/char/n_gsm.c)5
-rw-r--r--drivers/tty/n_hdlc.c (renamed from drivers/char/n_hdlc.c)0
-rw-r--r--drivers/tty/n_r3964.c (renamed from drivers/char/n_r3964.c)0
-rw-r--r--drivers/tty/n_tty.c (renamed from drivers/char/n_tty.c)0
-rw-r--r--drivers/tty/pty.c (renamed from drivers/char/pty.c)0
-rw-r--r--drivers/tty/sysrq.c (renamed from drivers/char/sysrq.c)172
-rw-r--r--drivers/tty/tty_audit.c (renamed from drivers/char/tty_audit.c)38
-rw-r--r--drivers/tty/tty_buffer.c (renamed from drivers/char/tty_buffer.c)14
-rw-r--r--drivers/tty/tty_io.c (renamed from drivers/char/tty_io.c)13
-rw-r--r--drivers/tty/tty_ioctl.c (renamed from drivers/char/tty_ioctl.c)0
-rw-r--r--drivers/tty/tty_ldisc.c (renamed from drivers/char/tty_ldisc.c)51
-rw-r--r--drivers/tty/tty_mutex.c (renamed from drivers/char/tty_mutex.c)0
-rw-r--r--drivers/tty/tty_port.c (renamed from drivers/char/tty_port.c)0
-rw-r--r--drivers/tty/vt/.gitignore (renamed from drivers/char/.gitignore)0
-rw-r--r--drivers/tty/vt/Makefile34
-rw-r--r--drivers/tty/vt/consolemap.c (renamed from drivers/char/consolemap.c)0
-rw-r--r--drivers/tty/vt/cp437.uni (renamed from drivers/char/cp437.uni)0
-rw-r--r--drivers/tty/vt/defkeymap.c_shipped (renamed from drivers/char/defkeymap.c_shipped)0
-rw-r--r--drivers/tty/vt/defkeymap.map (renamed from drivers/char/defkeymap.map)0
-rw-r--r--drivers/tty/vt/keyboard.c (renamed from drivers/char/keyboard.c)31
-rw-r--r--drivers/tty/vt/selection.c (renamed from drivers/char/selection.c)0
-rw-r--r--drivers/tty/vt/vc_screen.c (renamed from drivers/char/vc_screen.c)6
-rw-r--r--drivers/tty/vt/vt.c (renamed from drivers/char/vt.c)0
-rw-r--r--drivers/tty/vt/vt_ioctl.c (renamed from drivers/char/vt_ioctl.c)11
-rw-r--r--drivers/uio/uio.c2
-rw-r--r--drivers/uio/uio_cif.c2
-rw-r--r--drivers/uio/uio_netx.c2
-rw-r--r--drivers/usb/atm/ueagle-atm.c7
-rw-r--r--drivers/usb/core/devices.c1
-rw-r--r--drivers/usb/core/devio.c8
-rw-r--r--drivers/usb/core/file.c1
-rw-r--r--drivers/usb/core/hcd.c2
-rw-r--r--drivers/usb/core/inode.c10
-rw-r--r--drivers/usb/gadget/Kconfig2
-rw-r--r--drivers/usb/gadget/atmel_usba_udc.c2
-rw-r--r--drivers/usb/gadget/f_fs.c16
-rw-r--r--drivers/usb/gadget/f_hid.c1
-rw-r--r--drivers/usb/gadget/goku_udc.h3
-rw-r--r--drivers/usb/gadget/inode.c11
-rw-r--r--drivers/usb/gadget/omap_udc.c18
-rw-r--r--drivers/usb/gadget/u_ether.c1
-rw-r--r--drivers/usb/gadget/u_serial.c54
-rw-r--r--drivers/usb/host/Kconfig32
-rw-r--r--drivers/usb/host/Makefile1
-rw-r--r--drivers/usb/host/ehci-dbg.c2
-rw-r--r--drivers/usb/host/ehci-hcd.c15
-rw-r--r--drivers/usb/host/ehci-mem.c26
-rw-r--r--drivers/usb/host/ehci-mxc.c14
-rw-r--r--drivers/usb/host/ehci-octeon.c207
-rw-r--r--drivers/usb/host/ehci-pci.c25
-rw-r--r--drivers/usb/host/ehci-sched.c21
-rw-r--r--drivers/usb/host/ehci.h2
-rw-r--r--drivers/usb/host/isp1362-hcd.c3
-rw-r--r--drivers/usb/host/octeon2-common.c185
-rw-r--r--drivers/usb/host/ohci-hcd.c5
-rw-r--r--drivers/usb/host/ohci-jz4740.c2
-rw-r--r--drivers/usb/host/ohci-octeon.c214
-rw-r--r--drivers/usb/host/r8a66597-hcd.c4
-rw-r--r--drivers/usb/host/uhci-debug.c1
-rw-r--r--drivers/usb/host/xhci-hub.c7
-rw-r--r--drivers/usb/host/xhci-mem.c168
-rw-r--r--drivers/usb/host/xhci-ring.c1
-rw-r--r--drivers/usb/host/xhci.c91
-rw-r--r--drivers/usb/host/xhci.h31
-rw-r--r--drivers/usb/image/microtek.c6
-rw-r--r--drivers/usb/misc/cypress_cy7c63.c6
-rw-r--r--drivers/usb/misc/iowarrior.c1
-rw-r--r--drivers/usb/misc/sisusbvga/sisusb.c1
-rw-r--r--drivers/usb/misc/trancevibrator.c2
-rw-r--r--drivers/usb/misc/usbled.c2
-rw-r--r--drivers/usb/misc/usbsevseg.c10
-rw-r--r--drivers/usb/misc/yurex.c1
-rw-r--r--drivers/usb/mon/mon_bin.c1
-rw-r--r--drivers/usb/mon/mon_stat.c1
-rw-r--r--drivers/usb/musb/blackfin.c80
-rw-r--r--drivers/usb/musb/musb_core.c44
-rw-r--r--drivers/usb/musb/musb_core.h2
-rw-r--r--drivers/usb/musb/musb_gadget.c165
-rw-r--r--drivers/usb/musb/musb_regs.h3
-rw-r--r--drivers/usb/musb/musbhsdma.c14
-rw-r--r--drivers/usb/otg/langwell_otg.c9
-rw-r--r--drivers/usb/otg/twl4030-usb.c13
-rw-r--r--drivers/usb/serial/ftdi_sio.c4
-rw-r--r--drivers/usb/serial/ftdi_sio_ids.h11
-rw-r--r--drivers/usb/serial/option.c2
-rw-r--r--drivers/usb/serial/usb-serial.c4
-rw-r--r--drivers/usb/storage/scsiglue.c4
-rw-r--r--drivers/usb/storage/sierra_ms.c2
-rw-r--r--drivers/usb/storage/uas.c9
-rw-r--r--drivers/uwb/Kconfig20
-rw-r--r--drivers/uwb/Makefile1
-rw-r--r--drivers/uwb/allocator.c3
-rw-r--r--drivers/uwb/i1480/Makefile1
-rw-r--r--drivers/uwb/i1480/i1480-wlp.h200
-rw-r--r--drivers/uwb/i1480/i1480u-wlp/Makefile8
-rw-r--r--drivers/uwb/i1480/i1480u-wlp/i1480u-wlp.h283
-rw-r--r--drivers/uwb/i1480/i1480u-wlp/lc.c424
-rw-r--r--drivers/uwb/i1480/i1480u-wlp/netdev.c331
-rw-r--r--drivers/uwb/i1480/i1480u-wlp/rx.c474
-rw-r--r--drivers/uwb/i1480/i1480u-wlp/sysfs.c407
-rw-r--r--drivers/uwb/i1480/i1480u-wlp/tx.c584
-rw-r--r--drivers/uwb/wlp/Makefile10
-rw-r--r--drivers/uwb/wlp/driver.c43
-rw-r--r--drivers/uwb/wlp/eda.c415
-rw-r--r--drivers/uwb/wlp/messages.c1798
-rw-r--r--drivers/uwb/wlp/sysfs.c708
-rw-r--r--drivers/uwb/wlp/txrx.c354
-rw-r--r--drivers/uwb/wlp/wlp-internal.h224
-rw-r--r--drivers/uwb/wlp/wlp-lc.c560
-rw-r--r--drivers/uwb/wlp/wss-lc.c959
-rw-r--r--drivers/vhost/net.c5
-rw-r--r--drivers/video/Kconfig5
-rw-r--r--drivers/video/aty/atyfb_base.c3
-rw-r--r--drivers/video/au1200fb.c2
-rw-r--r--drivers/video/backlight/adp8860_bl.c8
-rw-r--r--drivers/video/backlight/backlight.c12
-rw-r--r--drivers/video/backlight/l4f00242t03.c2
-rw-r--r--drivers/video/backlight/lms283gf05.c2
-rw-r--r--drivers/video/backlight/mbp_nvidia_bl.c18
-rw-r--r--drivers/video/backlight/pwm_bl.c7
-rw-r--r--drivers/video/backlight/s6e63m0.c7
-rw-r--r--drivers/video/console/vgacon.c1
-rw-r--r--drivers/video/da8xx-fb.c14
-rw-r--r--drivers/video/fbcmap.c69
-rw-r--r--drivers/video/fbmem.c60
-rw-r--r--drivers/video/gbefb.c6
-rw-r--r--drivers/video/geode/lxfb.h4
-rw-r--r--drivers/video/geode/lxfb_ops.c24
-rw-r--r--drivers/video/matrox/matroxfb_DAC1064.c5
-rw-r--r--drivers/video/matrox/matroxfb_maven.c2
-rw-r--r--drivers/video/msm/mddi.c5
-rw-r--r--drivers/video/msm/mdp.c3
-rw-r--r--drivers/video/mx3fb.c4
-rw-r--r--drivers/video/omap/blizzard.c2
-rw-r--r--drivers/video/omap/lcd_omap3beagle.c2
-rw-r--r--drivers/video/omap2/displays/Kconfig2
-rw-r--r--drivers/video/omap2/displays/panel-acx565akm.c6
-rw-r--r--drivers/video/omap2/displays/panel-generic.c6
-rw-r--r--drivers/video/omap2/displays/panel-sharp-lq043t1dg01.c6
-rw-r--r--drivers/video/omap2/displays/panel-sharp-ls037v7dw01.c6
-rw-r--r--drivers/video/omap2/displays/panel-toppoly-tdo35s.c6
-rw-r--r--drivers/video/omap2/displays/panel-tpo-td043mtea1.c6
-rw-r--r--drivers/video/omap2/dss/Makefile2
-rw-r--r--drivers/video/omap2/dss/core.c3
-rw-r--r--drivers/video/omap2/dss/dispc.c270
-rw-r--r--drivers/video/omap2/dss/dsi.c1
-rw-r--r--drivers/video/omap2/dss/dss_features.c191
-rw-r--r--drivers/video/omap2/dss/dss_features.h50
-rw-r--r--drivers/video/omap2/dss/manager.c33
-rw-r--r--drivers/video/omap2/dss/overlay.c24
-rw-r--r--drivers/video/omap2/omapfb/Kconfig2
-rw-r--r--drivers/video/omap2/omapfb/omapfb-main.c26
-rw-r--r--drivers/video/omap2/vram.c17
-rw-r--r--drivers/video/riva/rivafb-i2c.c1
-rw-r--r--drivers/video/savage/savagefb-i2c.c9
-rw-r--r--drivers/video/sh_mipi_dsi.c32
-rw-r--r--drivers/video/sh_mobile_hdmi.c704
-rw-r--r--drivers/video/sh_mobile_lcdcfb.c359
-rw-r--r--drivers/video/sh_mobile_lcdcfb.h41
-rw-r--r--drivers/video/sis/init.c685
-rw-r--r--drivers/video/sis/init.h63
-rw-r--r--drivers/video/sis/init301.c467
-rw-r--r--drivers/video/sis/init301.h43
-rw-r--r--drivers/video/sis/initextlfb.c7
-rw-r--r--drivers/video/sis/osdef.h133
-rw-r--r--drivers/video/sis/sis.h1
-rw-r--r--drivers/video/sis/sis_main.c38
-rw-r--r--drivers/video/sis/vgatypes.h11
-rw-r--r--drivers/video/sis/vstruct.h12
-rw-r--r--drivers/video/via/Makefile2
-rw-r--r--drivers/video/via/accel.c55
-rw-r--r--drivers/video/via/accel.h3
-rw-r--r--drivers/video/via/chip.h3
-rw-r--r--drivers/video/via/dvi.c189
-rw-r--r--drivers/video/via/dvi.h4
-rw-r--r--drivers/video/via/global.h1
-rw-r--r--drivers/video/via/hw.c648
-rw-r--r--drivers/video/via/hw.h53
-rw-r--r--drivers/video/via/ioctl.c2
-rw-r--r--drivers/video/via/lcd.c90
-rw-r--r--drivers/video/via/lcd.h6
-rw-r--r--drivers/video/via/lcdtbl.h591
-rw-r--r--drivers/video/via/tbl1636.c71
-rw-r--r--drivers/video/via/tbl1636.h34
-rw-r--r--drivers/video/via/via-core.c32
-rw-r--r--drivers/video/via/via_i2c.c31
-rw-r--r--drivers/video/via/viafbdev.c294
-rw-r--r--drivers/video/via/viafbdev.h7
-rw-r--r--drivers/video/via/vt1636.c121
-rw-r--r--drivers/video/xen-fbfront.c2
-rw-r--r--drivers/video/xilinxfb.c24
-rw-r--r--drivers/virtio/virtio.c6
-rw-r--r--drivers/virtio/virtio_ring.c3
-rw-r--r--drivers/w1/w1.c8
-rw-r--r--drivers/watchdog/Kconfig26
-rw-r--r--drivers/watchdog/Makefile1
-rw-r--r--drivers/watchdog/bcm63xx_wdt.c332
-rw-r--r--drivers/watchdog/f71808e_wdt.c10
-rw-r--r--drivers/watchdog/gef_wdt.c1
-rw-r--r--drivers/watchdog/iTCO_wdt.c30
-rw-r--r--drivers/watchdog/it8712f_wdt.c25
-rw-r--r--drivers/watchdog/it87_wdt.c96
-rw-r--r--drivers/watchdog/machzwd.c4
-rw-r--r--drivers/watchdog/octeon-wdt-main.c4
-rw-r--r--drivers/watchdog/omap_wdt.c42
-rw-r--r--drivers/xen/Kconfig3
-rw-r--r--drivers/xen/Makefile7
-rw-r--r--drivers/xen/balloon.c42
-rw-r--r--drivers/xen/biomerge.c13
-rw-r--r--drivers/xen/events.c687
-rw-r--r--drivers/xen/evtchn.c100
-rw-r--r--drivers/xen/manage.c1
-rw-r--r--drivers/xen/pci.c117
-rw-r--r--drivers/xen/xenbus/xenbus_client.c2
-rw-r--r--drivers/xen/xenbus/xenbus_probe.c33
-rw-r--r--drivers/xen/xenfs/Makefile3
-rw-r--r--drivers/xen/xenfs/privcmd.c400
-rw-r--r--drivers/xen/xenfs/super.c65
-rw-r--r--drivers/xen/xenfs/xenfs.h3
-rw-r--r--drivers/xen/xenfs/xenstored.c68
-rw-r--r--drivers/zorro/proc.c1
-rw-r--r--firmware/ihex2fw.c17
-rw-r--r--fs/9p/Kconfig13
-rw-r--r--fs/9p/Makefile1
-rw-r--r--fs/9p/acl.c392
-rw-r--r--fs/9p/acl.h49
-rw-r--r--fs/9p/fid.c1
-rw-r--r--fs/9p/v9fs.c22
-rw-r--r--fs/9p/v9fs.h10
-rw-r--r--fs/9p/v9fs_vfs.h4
-rw-r--r--fs/9p/vfs_addr.c30
-rw-r--r--fs/9p/vfs_dir.c4
-rw-r--r--fs/9p/vfs_file.c265
-rw-r--r--fs/9p/vfs_inode.c258
-rw-r--r--fs/9p/vfs_super.c36
-rw-r--r--fs/9p/xattr.c52
-rw-r--r--fs/9p/xattr.h6
-rw-r--r--fs/Kconfig9
-rw-r--r--fs/Kconfig.binfmt4
-rw-r--r--fs/Makefile7
-rw-r--r--fs/adfs/super.c9
-rw-r--r--fs/affs/file.c4
-rw-r--r--fs/affs/inode.c2
-rw-r--r--fs/affs/super.c9
-rw-r--r--fs/afs/dir.c2
-rw-r--r--fs/afs/super.c19
-rw-r--r--fs/afs/write.c19
-rw-r--r--fs/aio.c14
-rw-r--r--fs/anon_inodes.c16
-rw-r--r--fs/autofs4/init.c8
-rw-r--r--fs/autofs4/inode.c1
-rw-r--r--fs/autofs4/root.c12
-rw-r--r--fs/befs/linuxvfs.c11
-rw-r--r--fs/bfs/dir.c2
-rw-r--r--fs/bfs/inode.c8
-rw-r--r--fs/binfmt_misc.c9
-rw-r--r--fs/bio.c23
-rw-r--r--fs/block_dev.c43
-rw-r--r--fs/btrfs/compression.c17
-rw-r--r--fs/btrfs/ctree.c57
-rw-r--r--fs/btrfs/ctree.h100
-rw-r--r--fs/btrfs/dir-item.c2
-rw-r--r--fs/btrfs/disk-io.c70
-rw-r--r--fs/btrfs/export.c76
-rw-r--r--fs/btrfs/extent-tree.c694
-rw-r--r--fs/btrfs/extent_io.c245
-rw-r--r--fs/btrfs/extent_io.h7
-rw-r--r--fs/btrfs/extent_map.c4
-rw-r--r--fs/btrfs/file.c7
-rw-r--r--fs/btrfs/free-space-cache.c751
-rw-r--r--fs/btrfs/free-space-cache.h18
-rw-r--r--fs/btrfs/inode.c500
-rw-r--r--fs/btrfs/ioctl.c429
-rw-r--r--fs/btrfs/ioctl.h13
-rw-r--r--fs/btrfs/ordered-data.c69
-rw-r--r--fs/btrfs/ordered-data.h3
-rw-r--r--fs/btrfs/relocation.c109
-rw-r--r--fs/btrfs/root-tree.c2
-rw-r--r--fs/btrfs/super.c98
-rw-r--r--fs/btrfs/transaction.c239
-rw-r--r--fs/btrfs/transaction.h8
-rw-r--r--fs/btrfs/tree-defrag.c2
-rw-r--r--fs/btrfs/tree-log.c38
-rw-r--r--fs/btrfs/volumes.c7
-rw-r--r--fs/btrfs/xattr.c2
-rw-r--r--fs/btrfs/zlib.c5
-rw-r--r--fs/buffer.c29
-rw-r--r--fs/ceph/addr.c15
-rw-r--r--fs/ceph/caps.c17
-rw-r--r--fs/ceph/dir.c16
-rw-r--r--fs/ceph/file.c52
-rw-r--r--fs/ceph/inode.c50
-rw-r--r--fs/ceph/mds_client.c8
-rw-r--r--fs/ceph/mds_client.h2
-rw-r--r--fs/ceph/super.c50
-rw-r--r--fs/ceph/super.h4
-rw-r--r--fs/cifs/Kconfig11
-rw-r--r--fs/cifs/TODO2
-rw-r--r--fs/cifs/cifs_fs_sb.h6
-rw-r--r--fs/cifs/cifsacl.c48
-rw-r--r--fs/cifs/cifsencrypt.c427
-rw-r--r--fs/cifs/cifsfs.c23
-rw-r--r--fs/cifs/cifsfs.h2
-rw-r--r--fs/cifs/cifsglob.h58
-rw-r--r--fs/cifs/cifspdu.h13
-rw-r--r--fs/cifs/cifsproto.h21
-rw-r--r--fs/cifs/cifssmb.c4
-rw-r--r--fs/cifs/connect.c251
-rw-r--r--fs/cifs/dns_resolve.c2
-rw-r--r--fs/cifs/file.c143
-rw-r--r--fs/cifs/fscache.c12
-rw-r--r--fs/cifs/inode.c52
-rw-r--r--fs/cifs/ioctl.c16
-rw-r--r--fs/cifs/misc.c27
-rw-r--r--fs/cifs/readdir.c29
-rw-r--r--fs/cifs/sess.c166
-rw-r--r--fs/cifs/transport.c6
-rw-r--r--fs/cifs/xattr.c55
-rw-r--r--fs/coda/cache.c17
-rw-r--r--fs/coda/cnode.c19
-rw-r--r--fs/coda/dir.c157
-rw-r--r--fs/coda/file.c31
-rw-r--r--fs/coda/inode.c69
-rw-r--r--fs/coda/pioctl.c22
-rw-r--r--fs/coda/psdev.c41
-rw-r--r--fs/coda/symlink.c3
-rw-r--r--fs/coda/upcall.c89
-rw-r--r--fs/compat.c74
-rw-r--r--fs/compat_ioctl.c30
-rw-r--r--fs/configfs/inode.c1
-rw-r--r--fs/configfs/mount.c8
-rw-r--r--fs/cramfs/inode.c9
-rw-r--r--fs/dcache.c277
-rw-r--r--fs/debugfs/inode.c9
-rw-r--r--fs/devpts/inode.c32
-rw-r--r--fs/direct-io.c2
-rw-r--r--fs/ecryptfs/ecryptfs_kernel.h1
-rw-r--r--fs/ecryptfs/inode.c11
-rw-r--r--fs/ecryptfs/keystore.c45
-rw-r--r--fs/ecryptfs/main.c20
-rw-r--r--fs/ecryptfs/super.c3
-rw-r--r--fs/efs/super.c8
-rw-r--r--fs/eventpoll.c35
-rw-r--r--fs/exec.c209
-rw-r--r--fs/exofs/dir.c4
-rw-r--r--fs/exofs/file.c6
-rw-r--r--fs/exofs/inode.c64
-rw-r--r--fs/exofs/namei.c2
-rw-r--r--fs/exofs/super.c10
-rw-r--r--fs/exportfs/expfs.c17
-rw-r--r--fs/ext2/balloc.c3
-rw-r--r--fs/ext2/dir.c2
-rw-r--r--fs/ext2/ext2.h1
-rw-r--r--fs/ext2/inode.c11
-rw-r--r--fs/ext2/namei.c2
-rw-r--r--fs/ext2/super.c12
-rw-r--r--fs/ext2/xattr.c2
-rw-r--r--fs/ext3/balloc.c17
-rw-r--r--fs/ext3/ialloc.c11
-rw-r--r--fs/ext3/inode.c24
-rw-r--r--fs/ext3/namei.c2
-rw-r--r--fs/ext3/resize.c13
-rw-r--r--fs/ext3/super.c50
-rw-r--r--fs/ext4/Makefile2
-rw-r--r--fs/ext4/balloc.c5
-rw-r--r--fs/ext4/block_validity.c7
-rw-r--r--fs/ext4/dir.c2
-rw-r--r--fs/ext4/ext4.h112
-rw-r--r--fs/ext4/ext4_extents.h65
-rw-r--r--fs/ext4/extents.c368
-rw-r--r--fs/ext4/file.c44
-rw-r--r--fs/ext4/fsync.c83
-rw-r--r--fs/ext4/ialloc.c135
-rw-r--r--fs/ext4/inode.c599
-rw-r--r--fs/ext4/ioctl.c24
-rw-r--r--fs/ext4/mballoc.c553
-rw-r--r--fs/ext4/migrate.c2
-rw-r--r--fs/ext4/move_extent.c22
-rw-r--r--fs/ext4/namei.c65
-rw-r--r--fs/ext4/page-io.c431
-rw-r--r--fs/ext4/resize.c52
-rw-r--r--fs/ext4/super.c620
-rw-r--r--fs/ext4/xattr.c4
-rw-r--r--fs/ext4/xattr.h10
-rw-r--r--fs/fat/namei_msdos.c9
-rw-r--r--fs/fat/namei_vfat.c9
-rw-r--r--fs/fcntl.c62
-rw-r--r--fs/file_table.c17
-rw-r--r--fs/freevxfs/vxfs_inode.c1
-rw-r--r--fs/freevxfs/vxfs_super.c9
-rw-r--r--fs/fs-writeback.c144
-rw-r--r--fs/fuse/control.c11
-rw-r--r--fs/fuse/dev.c19
-rw-r--r--fs/fuse/file.c10
-rw-r--r--fs/fuse/inode.c17
-rw-r--r--fs/gfs2/aops.c3
-rw-r--r--fs/gfs2/export.c46
-rw-r--r--fs/gfs2/glock.c21
-rw-r--r--fs/gfs2/inode.c152
-rw-r--r--fs/gfs2/inode.h4
-rw-r--r--fs/gfs2/meta_io.c2
-rw-r--r--fs/gfs2/ops_fstype.c52
-rw-r--r--fs/gfs2/ops_inode.c8
-rw-r--r--fs/gfs2/quota.c15
-rw-r--r--fs/gfs2/rgrp.c91
-rw-r--r--fs/gfs2/super.c1
-rw-r--r--fs/hfs/hfs_fs.h13
-rw-r--r--fs/hfs/inode.c2
-rw-r--r--fs/hfs/mdb.c4
-rw-r--r--fs/hfs/super.c10
-rw-r--r--fs/hfsplus/dir.c6
-rw-r--r--fs/hfsplus/inode.c2
-rw-r--r--fs/hfsplus/ioctl.c2
-rw-r--r--fs/hfsplus/super.c10
-rw-r--r--fs/hostfs/hostfs.h3
-rw-r--r--fs/hostfs/hostfs_kern.c10
-rw-r--r--fs/hostfs/hostfs_user.c14
-rw-r--r--fs/hpfs/buffer.c4
-rw-r--r--fs/hpfs/hpfs_fn.h2
-rw-r--r--fs/hpfs/super.c11
-rw-r--r--fs/hppfs/hppfs.c8
-rw-r--r--fs/hugetlbfs/inode.c27
-rw-r--r--fs/inode.c527
-rw-r--r--fs/internal.h7
-rw-r--r--fs/ioctl.c1
-rw-r--r--fs/ioprio.c13
-rw-r--r--fs/isofs/inode.c66
-rw-r--r--fs/jbd/checkpoint.c4
-rw-r--r--fs/jbd/commit.c8
-rw-r--r--fs/jbd/journal.c44
-rw-r--r--fs/jbd/recovery.c2
-rw-r--r--fs/jbd/transaction.c6
-rw-r--r--fs/jbd2/checkpoint.c10
-rw-r--r--fs/jbd2/commit.c12
-rw-r--r--fs/jbd2/journal.c30
-rw-r--r--fs/jbd2/transaction.c1
-rw-r--r--fs/jffs2/build.c2
-rw-r--r--fs/jffs2/compr.c6
-rw-r--r--fs/jffs2/compr.h4
-rw-r--r--fs/jffs2/compr_lzo.c4
-rw-r--r--fs/jffs2/compr_rtime.c6
-rw-r--r--fs/jffs2/compr_rubin.c11
-rw-r--r--fs/jffs2/compr_zlib.c6
-rw-r--r--fs/jffs2/dir.c7
-rw-r--r--fs/jffs2/erase.c2
-rw-r--r--fs/jffs2/fs.c22
-rw-r--r--fs/jffs2/gc.c7
-rw-r--r--fs/jffs2/jffs2_fs_sb.h1
-rw-r--r--fs/jffs2/nodelist.c8
-rw-r--r--fs/jffs2/nodelist.h3
-rw-r--r--fs/jffs2/scan.c12
-rw-r--r--fs/jffs2/super.c9
-rw-r--r--fs/jfs/jfs_imap.c2
-rw-r--r--fs/jfs/jfs_txnmgr.c2
-rw-r--r--fs/jfs/namei.c2
-rw-r--r--fs/jfs/super.c9
-rw-r--r--fs/libfs.c22
-rw-r--r--fs/lockd/clntlock.c16
-rw-r--r--fs/lockd/clntproc.c14
-rw-r--r--fs/lockd/host.c12
-rw-r--r--fs/lockd/mon.c1
-rw-r--r--fs/lockd/svc.c13
-rw-r--r--fs/lockd/svc4proc.c3
-rw-r--r--fs/lockd/svclock.c38
-rw-r--r--fs/lockd/svcproc.c3
-rw-r--r--fs/lockd/svcsubs.c9
-rw-r--r--fs/locks.c149
-rw-r--r--fs/logfs/dev_bdev.c15
-rw-r--r--fs/logfs/dev_mtd.c18
-rw-r--r--fs/logfs/dir.c2
-rw-r--r--fs/logfs/logfs.h22
-rw-r--r--fs/logfs/super.c77
-rw-r--r--fs/minix/inode.c9
-rw-r--r--fs/minix/namei.c2
-rw-r--r--fs/namei.c18
-rw-r--r--fs/namespace.c3
-rw-r--r--fs/ncpfs/dir.c1
-rw-r--r--fs/ncpfs/file.c1
-rw-r--r--fs/ncpfs/inode.c9
-rw-r--r--fs/ncpfs/ioctl.c1
-rw-r--r--fs/nfs/Kconfig20
-rw-r--r--fs/nfs/Makefile4
-rw-r--r--fs/nfs/callback.c5
-rw-r--r--fs/nfs/callback_proc.c8
-rw-r--r--fs/nfs/client.c28
-rw-r--r--fs/nfs/delegation.c1
-rw-r--r--fs/nfs/dir.c1059
-rw-r--r--fs/nfs/direct.c4
-rw-r--r--fs/nfs/dns_resolve.c6
-rw-r--r--fs/nfs/file.c87
-rw-r--r--fs/nfs/getroot.c3
-rw-r--r--fs/nfs/idmap.c211
-rw-r--r--fs/nfs/inode.c39
-rw-r--r--fs/nfs/internal.h21
-rw-r--r--fs/nfs/mount_clnt.c4
-rw-r--r--fs/nfs/nfs2xdr.c111
-rw-r--r--fs/nfs/nfs3proc.c62
-rw-r--r--fs/nfs/nfs3xdr.c202
-rw-r--r--fs/nfs/nfs4_fs.h4
-rw-r--r--fs/nfs/nfs4filelayout.c280
-rw-r--r--fs/nfs/nfs4filelayout.h94
-rw-r--r--fs/nfs/nfs4filelayoutdev.c448
-rw-r--r--fs/nfs/nfs4proc.c501
-rw-r--r--fs/nfs/nfs4state.c42
-rw-r--r--fs/nfs/nfs4xdr.c706
-rw-r--r--fs/nfs/nfsroot.c568
-rw-r--r--fs/nfs/pagelist.c8
-rw-r--r--fs/nfs/pnfs.c783
-rw-r--r--fs/nfs/pnfs.h189
-rw-r--r--fs/nfs/proc.c35
-rw-r--r--fs/nfs/read.c4
-rw-r--r--fs/nfs/super.c177
-rw-r--r--fs/nfs/sysctl.c2
-rw-r--r--fs/nfs/unlink.c259
-rw-r--r--fs/nfs/write.c22
-rw-r--r--fs/nfsd/Kconfig13
-rw-r--r--fs/nfsd/export.c73
-rw-r--r--fs/nfsd/nfs4callback.c245
-rw-r--r--fs/nfsd/nfs4idmap.c105
-rw-r--r--fs/nfsd/nfs4proc.c7
-rw-r--r--fs/nfsd/nfs4state.c555
-rw-r--r--fs/nfsd/nfs4xdr.c18
-rw-r--r--fs/nfsd/nfsctl.c34
-rw-r--r--fs/nfsd/nfsd.h2
-rw-r--r--fs/nfsd/nfssvc.c5
-rw-r--r--fs/nfsd/state.h52
-rw-r--r--fs/nfsd/vfs.c16
-rw-r--r--fs/nilfs2/dat.c2
-rw-r--r--fs/nilfs2/ioctl.c4
-rw-r--r--fs/nilfs2/namei.c2
-rw-r--r--fs/nilfs2/segment.c2
-rw-r--r--fs/nilfs2/super.c16
-rw-r--r--fs/notify/Kconfig2
-rw-r--r--fs/notify/fanotify/fanotify.c27
-rw-r--r--fs/notify/fanotify/fanotify_user.c98
-rw-r--r--fs/notify/fsnotify.c68
-rw-r--r--fs/notify/inode_mark.c11
-rw-r--r--fs/notify/inotify/inotify_user.c2
-rw-r--r--fs/notify/vfsmount_mark.c6
-rw-r--r--fs/ntfs/super.c28
-rw-r--r--fs/ocfs2/aops.c19
-rw-r--r--fs/ocfs2/aops.h3
-rw-r--r--fs/ocfs2/cluster/heartbeat.c14
-rw-r--r--fs/ocfs2/dcache.c1
-rw-r--r--fs/ocfs2/dlm/dlmdomain.c2
-rw-r--r--fs/ocfs2/dlmfs/dlmfs.c10
-rw-r--r--fs/ocfs2/file.c9
-rw-r--r--fs/ocfs2/namei.c2
-rw-r--r--fs/ocfs2/ocfs2.h6
-rw-r--r--fs/ocfs2/stack_user.c2
-rw-r--r--fs/ocfs2/super.c12
-rw-r--r--fs/omfs/inode.c9
-rw-r--r--fs/open.c6
-rw-r--r--fs/openpromfs/inode.c8
-rw-r--r--fs/partitions/check.c12
-rw-r--r--fs/pipe.c25
-rw-r--r--fs/proc/Kconfig4
-rw-r--r--fs/proc/base.c113
-rw-r--r--fs/proc/inode.c1
-rw-r--r--fs/proc/proc_sysctl.c2
-rw-r--r--fs/proc/root.c16
-rw-r--r--fs/proc/softirqs.c4
-rw-r--r--fs/proc/stat.c14
-rw-r--r--fs/proc/task_mmu.c9
-rw-r--r--fs/qnx4/inode.c9
-rw-r--r--fs/quota/Kconfig4
-rw-r--r--fs/quota/dquot.c30
-rw-r--r--fs/ramfs/inode.c18
-rw-r--r--fs/read_write.c91
-rw-r--r--fs/reiserfs/inode.c27
-rw-r--r--fs/reiserfs/ioctl.c14
-rw-r--r--fs/reiserfs/journal.c1
-rw-r--r--fs/reiserfs/namei.c2
-rw-r--r--fs/reiserfs/super.c10
-rw-r--r--fs/reiserfs/xattr.c7
-rw-r--r--fs/reiserfs/xattr_acl.c6
-rw-r--r--fs/romfs/super.c17
-rw-r--r--fs/select.c6
-rw-r--r--fs/seq_file.c2
-rw-r--r--fs/signalfd.c10
-rw-r--r--fs/splice.c24
-rw-r--r--fs/squashfs/super.c10
-rw-r--r--fs/squashfs/xattr.c9
-rw-r--r--fs/squashfs/xattr.h4
-rw-r--r--fs/squashfs/xattr_id.c1
-rw-r--r--fs/super.c119
-rw-r--r--fs/sysfs/mount.c32
-rw-r--r--fs/sysv/namei.c2
-rw-r--r--fs/sysv/super.c17
-rw-r--r--fs/ubifs/dir.c2
-rw-r--r--fs/ubifs/super.c13
-rw-r--r--fs/udf/namei.c2
-rw-r--r--fs/udf/super.c9
-rw-r--r--fs/ufs/namei.c2
-rw-r--r--fs/ufs/super.c8
-rw-r--r--fs/xfs/Kconfig1
-rw-r--r--fs/xfs/linux-2.6/xfs_aops.c104
-rw-r--r--fs/xfs/linux-2.6/xfs_buf.c38
-rw-r--r--fs/xfs/linux-2.6/xfs_ioctl.c2
-rw-r--r--fs/xfs/linux-2.6/xfs_iops.c7
-rw-r--r--fs/xfs/linux-2.6/xfs_super.c17
-rw-r--r--fs/xfs/linux-2.6/xfs_sync.c1
-rw-r--r--fs/xfs/xfs_bmap.c85
-rw-r--r--fs/xfs/xfs_bmap.h5
-rw-r--r--fs/xfs/xfs_dfrag.c13
-rw-r--r--fs/xfs/xfs_error.c3
-rw-r--r--fs/xfs/xfs_error.h5
-rw-r--r--fs/xfs/xfs_filestream.c8
-rw-r--r--fs/xfs/xfs_inode.h2
-rw-r--r--fs/xfs/xfs_inode_item.c31
-rw-r--r--fs/xfs/xfs_mount.c1
-rw-r--r--fs/xfs/xfs_quota.h20
-rw-r--r--include/acpi/acpi_bus.h14
-rw-r--r--include/acpi/acpi_drivers.h2
-rw-r--r--include/acpi/acpiosxf.h14
-rw-r--r--include/acpi/acpixf.h11
-rw-r--r--include/acpi/actypes.h30
-rw-r--r--include/acpi/platform/acenv.h6
-rw-r--r--include/acpi/platform/acgcc.h2
-rw-r--r--include/acpi/platform/aclinux.h7
-rw-r--r--include/asm-generic/audit_change_attr.h4
-rw-r--r--include/asm-generic/cputime.h6
-rw-r--r--include/asm-generic/gpio.h4
-rw-r--r--include/asm-generic/pgtable.h2
-rw-r--r--include/asm-generic/stat.h14
-rw-r--r--include/asm-generic/vmlinux.lds.h6
-rw-r--r--include/drm/drmP.h45
-rw-r--r--include/drm/drm_crtc.h4
-rw-r--r--include/drm/drm_crtc_helper.h8
-rw-r--r--include/drm/drm_dp_helper.h3
-rw-r--r--include/drm/i915_drm.h8
-rw-r--r--include/drm/intel-gtt.h18
-rw-r--r--include/drm/nouveau_drm.h7
-rw-r--r--include/drm/ttm/ttm_bo_api.h7
-rw-r--r--include/drm/ttm/ttm_bo_driver.h100
-rw-r--r--include/drm/vmwgfx_drm.h1
-rw-r--r--include/linux/Kbuild6
-rw-r--r--include/linux/acpi.h11
-rw-r--r--include/linux/amba/pl08x.h222
-rw-r--r--include/linux/atomic.h37
-rw-r--r--include/linux/audit.h9
-rw-r--r--include/linux/backing-dev.h3
-rw-r--r--include/linux/basic_mmio_gpio.h20
-rw-r--r--include/linux/bfin_mac.h29
-rw-r--r--include/linux/binfmts.h5
-rw-r--r--include/linux/bio.h4
-rw-r--r--include/linux/blk_types.h6
-rw-r--r--include/linux/blkdev.h12
-rw-r--r--include/linux/buffer_head.h1
-rw-r--r--include/linux/ceph/libceph.h3
-rw-r--r--include/linux/ceph/messenger.h1
-rw-r--r--include/linux/ceph/osd_client.h7
-rw-r--r--include/linux/cgroup.h4
-rw-r--r--include/linux/coda_fs_i.h13
-rw-r--r--include/linux/coda_linux.h6
-rw-r--r--include/linux/coda_psdev.h4
-rw-r--r--include/linux/completion.h10
-rw-r--r--include/linux/connector.h8
-rw-r--r--include/linux/cpu.h5
-rw-r--r--include/linux/davinci_emac.h16
-rw-r--r--include/linux/dccp.h4
-rw-r--r--include/linux/dmaengine.h60
-rw-r--r--include/linux/dmar.h17
-rw-r--r--include/linux/drbd.h2
-rw-r--r--include/linux/elevator.h2
-rw-r--r--include/linux/fanotify.h36
-rw-r--r--include/linux/fb.h7
-rw-r--r--include/linux/fs.h91
-rw-r--r--include/linux/fsl-diu-fb.h1
-rw-r--r--include/linux/fsnotify.h38
-rw-r--r--include/linux/fsnotify_backend.h26
-rw-r--r--include/linux/gameport.h4
-rw-r--r--include/linux/genhd.h1
-rw-r--r--include/linux/gfp.h109
-rw-r--r--include/linux/gpio-fan.h36
-rw-r--r--include/linux/gpio_keys.h2
-rw-r--r--include/linux/hardirq.h12
-rw-r--r--include/linux/highmem.h75
-rw-r--r--include/linux/hugetlb.h17
-rw-r--r--include/linux/hw_breakpoint.h4
-rw-r--r--include/linux/i2c-id.h22
-rw-r--r--include/linux/i2c.h4
-rw-r--r--include/linux/i2c/adp5588.h36
-rw-r--r--include/linux/i2c/apds990x.h79
-rw-r--r--include/linux/i2c/bh1770glc.h53
-rw-r--r--include/linux/i2c/twl.h83
-rw-r--r--include/linux/idr.h1
-rw-r--r--include/linux/if_vlan.h25
-rw-r--r--include/linux/init_task.h4
-rw-r--r--include/linux/input.h86
-rw-r--r--include/linux/input/bu21013.h44
-rw-r--r--include/linux/intel_mid_dma.h16
-rw-r--r--include/linux/interrupt.h2
-rw-r--r--include/linux/io-mapping.h14
-rw-r--r--include/linux/iocontext.h1
-rw-r--r--include/linux/ioport.h1
-rw-r--r--include/linux/irq.h5
-rw-r--r--include/linux/irqnr.h2
-rw-r--r--include/linux/jbd2.h2
-rw-r--r--include/linux/jump_label.h7
-rw-r--r--include/linux/kernel.h272
-rw-r--r--include/linux/kernel_stat.h18
-rw-r--r--include/linux/kfifo.h28
-rw-r--r--include/linux/kgdb.h13
-rw-r--r--include/linux/leds-lp5521.h47
-rw-r--r--include/linux/leds-lp5523.h (renamed from sound/soc/omap/omap-mcpdm.h)34
-rw-r--r--include/linux/leds.h47
-rw-r--r--include/linux/libata.h2
-rw-r--r--include/linux/lis3lv02d.h55
-rw-r--r--include/linux/list.h6
-rw-r--r--include/linux/lockd/lockd.h1
-rw-r--r--include/linux/magic.h1
-rw-r--r--include/linux/marvell_phy.h3
-rw-r--r--include/linux/math64.h12
-rw-r--r--include/linux/memory_hotplug.h10
-rw-r--r--include/linux/mfd/88pm860x.h2
-rw-r--r--include/linux/mfd/ab8500.h28
-rw-r--r--include/linux/mfd/abx500.h4
-rw-r--r--include/linux/mfd/core.h3
-rw-r--r--include/linux/mfd/max8998-private.h129
-rw-r--r--include/linux/mfd/max8998.h23
-rw-r--r--include/linux/mfd/mc13783.h239
-rw-r--r--include/linux/mfd/mc13xxx.h154
-rw-r--r--include/linux/mfd/pcf50633/core.h7
-rw-r--r--include/linux/mfd/sh_mobile_sdhi.h2
-rw-r--r--include/linux/mfd/stmpe.h6
-rw-r--r--include/linux/mfd/tmio.h6
-rw-r--r--include/linux/mfd/tps6586x.h31
-rw-r--r--include/linux/mfd/wm831x/core.h12
-rw-r--r--include/linux/mfd/wm8350/audio.h3
-rw-r--r--include/linux/migrate.h16
-rw-r--r--include/linux/mlx4/cmd.h2
-rw-r--r--include/linux/mlx4/device.h35
-rw-r--r--include/linux/mlx4/driver.h9
-rw-r--r--include/linux/mlx4/qp.h9
-rw-r--r--include/linux/mm.h29
-rw-r--r--include/linux/mm_types.h2
-rw-r--r--include/linux/mmc/card.h6
-rw-r--r--include/linux/mmc/core.h2
-rw-r--r--include/linux/mmc/host.h49
-rw-r--r--include/linux/mmc/mmc.h10
-rw-r--r--include/linux/mmc/sdhci-pltfm.h (renamed from include/linux/sdhci-pltfm.h)2
-rw-r--r--include/linux/mmc/sdhci.h144
-rw-r--r--include/linux/mmc/sh_mmcif.h19
-rw-r--r--include/linux/mmu_notifier.h2
-rw-r--r--include/linux/mmzone.h10
-rw-r--r--include/linux/module.h2
-rw-r--r--include/linux/moduleparam.h4
-rw-r--r--include/linux/mtd/bbm.h4
-rw-r--r--include/linux/mtd/cfi.h1
-rw-r--r--include/linux/mtd/fsmc.h181
-rw-r--r--include/linux/mtd/inftl.h14
-rw-r--r--include/linux/mtd/mtd.h15
-rw-r--r--include/linux/mtd/nand.h426
-rw-r--r--include/linux/mtd/partitions.h7
-rw-r--r--include/linux/mtd/super.h5
-rw-r--r--include/linux/net.h2
-rw-r--r--include/linux/netdevice.h23
-rw-r--r--include/linux/netfilter.h2
-rw-r--r--include/linux/nfs4.h65
-rw-r--r--include/linux/nfs_fs.h25
-rw-r--r--include/linux/nfs_fs_sb.h4
-rw-r--r--include/linux/nfs_idmap.h31
-rw-r--r--include/linux/nfs_mount.h3
-rw-r--r--include/linux/nfs_xdr.h129
-rw-r--r--include/linux/node.h5
-rw-r--r--include/linux/of_device.h13
-rw-r--r--include/linux/of_fdt.h2
-rw-r--r--include/linux/of_irq.h4
-rw-r--r--include/linux/of_pdt.h45
-rw-r--r--include/linux/page-flags.h2
-rw-r--r--include/linux/page_cgroup.h7
-rw-r--r--include/linux/pageblock-flags.h5
-rw-r--r--include/linux/pagemap.h13
-rw-r--r--include/linux/pci.h5
-rw-r--r--include/linux/pci_ids.h38
-rw-r--r--include/linux/pci_regs.h6
-rw-r--r--include/linux/percpu-defs.h12
-rw-r--r--include/linux/percpu_counter.h10
-rw-r--r--include/linux/perf_event.h40
-rw-r--r--include/linux/phy.h12
-rw-r--r--include/linux/pipe_fs_i.h1
-rw-r--r--include/linux/poll.h2
-rw-r--r--include/linux/power_supply.h6
-rw-r--r--include/linux/printk.h248
-rw-r--r--include/linux/ptrace.h12
-rw-r--r--include/linux/pwm_backlight.h1
-rw-r--r--include/linux/radix-tree.h39
-rw-r--r--include/linux/ramfs.h4
-rw-r--r--include/linux/ramoops.h15
-rw-r--r--include/linux/ratelimit.h2
-rw-r--r--include/linux/regulator/lp3972.h48
-rw-r--r--include/linux/regulator/machine.h5
-rw-r--r--include/linux/regulator/max8952.h135
-rw-r--r--include/linux/reiserfs_fs.h3
-rw-r--r--include/linux/resource.h1
-rw-r--r--include/linux/ring_buffer.h12
-rw-r--r--include/linux/rio.h17
-rw-r--r--include/linux/rio_ids.h2
-rw-r--r--include/linux/rio_regs.h18
-rw-r--r--include/linux/rmap.h30
-rw-r--r--include/linux/rtnetlink.h2
-rw-r--r--include/linux/sched.h23
-rw-r--r--include/linux/security.h9
-rw-r--r--include/linux/semaphore.h6
-rw-r--r--include/linux/serial_core.h3
-rw-r--r--include/linux/serio.h10
-rw-r--r--include/linux/sfi.h24
-rw-r--r--include/linux/sh_clk.h55
-rw-r--r--include/linux/sh_intc.h19
-rw-r--r--include/linux/sh_pfc.h1
-rw-r--r--include/linux/sh_timer.h1
-rw-r--r--include/linux/signalfd.h3
-rw-r--r--include/linux/smp.h19
-rw-r--r--include/linux/smp_lock.h3
-rw-r--r--include/linux/socket.h2
-rw-r--r--include/linux/spi/74x164.h11
-rw-r--r--include/linux/spi/spi.h3
-rw-r--r--include/linux/sunrpc/auth.h4
-rw-r--r--include/linux/sunrpc/cache.h37
-rw-r--r--include/linux/sunrpc/clnt.h2
-rw-r--r--include/linux/sunrpc/gss_spkm3.h55
-rw-r--r--include/linux/sunrpc/stats.h23
-rw-r--r--include/linux/sunrpc/svc_xprt.h42
-rw-r--r--include/linux/sunrpc/svcauth.h17
-rw-r--r--include/linux/sunrpc/xdr.h9
-rw-r--r--include/linux/sunrpc/xprt.h4
-rw-r--r--include/linux/swap.h10
-rw-r--r--include/linux/synclink.h5
-rw-r--r--include/linux/syscalls.h3
-rw-r--r--include/linux/ti_wilink_st.h400
-rw-r--r--include/linux/tracehook.h2
-rw-r--r--include/linux/tty.h12
-rw-r--r--include/linux/types.h20
-rw-r--r--include/linux/uio_driver.h2
-rw-r--r--include/linux/usb.h6
-rw-r--r--include/linux/usb/musb.h2
-rw-r--r--include/linux/via-core.h4
-rw-r--r--include/linux/videodev2.h12
-rw-r--r--include/linux/videotext.h125
-rw-r--r--include/linux/virtio_9p.h1
-rw-r--r--include/linux/vmalloc.h4
-rw-r--r--include/linux/wlp.h736
-rw-r--r--include/linux/workqueue.h6
-rw-r--r--include/linux/writeback.h8
-rw-r--r--include/media/ir-core.h41
-rw-r--r--include/media/ir-kbd-i2c.h10
-rw-r--r--include/media/lirc_dev.h6
-rw-r--r--include/media/omap1_camera.h35
-rw-r--r--include/media/rc-map.h21
-rw-r--r--include/media/s3c_fimc.h60
-rw-r--r--include/media/sh_vou.h1
-rw-r--r--include/media/soc_camera.h9
-rw-r--r--include/media/sr030pc30.h21
-rw-r--r--include/media/v4l2-chip-ident.h8
-rw-r--r--include/media/v4l2-common.h26
-rw-r--r--include/media/v4l2-dev.h8
-rw-r--r--include/media/v4l2-device.h57
-rw-r--r--include/media/v4l2-i2c-drv.h80
-rw-r--r--include/media/v4l2-mediabus.h8
-rw-r--r--include/media/v4l2-subdev.h24
-rw-r--r--include/media/videobuf-core.h19
-rw-r--r--include/media/videobuf-dma-contig.h3
-rw-r--r--include/media/videobuf-dma-sg.h3
-rw-r--r--include/media/videobuf-vmalloc.h3
-rw-r--r--include/media/wm8775.h3
-rw-r--r--include/mtd/mtd-abi.h16
-rw-r--r--include/mtd/mtd-user.h2
-rw-r--r--include/net/9p/9p.h54
-rw-r--r--include/net/9p/client.h4
-rw-r--r--include/net/af_unix.h2
-rw-r--r--include/net/caif/caif_dev.h4
-rw-r--r--include/net/caif/caif_shm.h26
-rw-r--r--include/net/caif/caif_spi.h2
-rw-r--r--include/net/caif/cfcnfg.h8
-rw-r--r--include/net/cfg80211.h2
-rw-r--r--include/net/dn.h2
-rw-r--r--include/net/dst.h2
-rw-r--r--include/net/dst_ops.h1
-rw-r--r--include/net/fib_rules.h2
-rw-r--r--include/net/garp.h2
-rw-r--r--include/net/inetpeer.h2
-rw-r--r--include/net/ip.h4
-rw-r--r--include/net/ip6_tunnel.h2
-rw-r--r--include/net/ip_fib.h2
-rw-r--r--include/net/ipip.h6
-rw-r--r--include/net/neighbour.h2
-rw-r--r--include/net/net_namespace.h2
-rw-r--r--include/net/netlink.h2
-rw-r--r--include/net/protocol.h4
-rw-r--r--include/net/sock.h6
-rw-r--r--include/net/tcp.h6
-rw-r--r--include/net/udp.h4
-rw-r--r--include/net/xfrm.h4
-rw-r--r--include/rdma/ib_addr.h134
-rw-r--r--include/rdma/ib_pack.h39
-rw-r--r--include/rdma/ib_user_verbs.h3
-rw-r--r--include/rdma/ib_verbs.h11
-rw-r--r--include/scsi/libfc.h5
-rw-r--r--include/scsi/libiscsi.h3
-rw-r--r--include/scsi/libsas.h3
-rw-r--r--include/scsi/osd_initiator.h16
-rw-r--r--include/scsi/osd_protocol.h42
-rw-r--r--include/scsi/osd_types.h5
-rw-r--r--include/scsi/scsi_host.h23
-rw-r--r--include/scsi/srp.h38
-rw-r--r--include/sound/core.h2
-rw-r--r--include/sound/emu10k1.h2
-rw-r--r--include/sound/jack.h5
-rw-r--r--include/sound/max98088.h50
-rw-r--r--include/sound/pcm.h1
-rw-r--r--include/sound/sh_fsi.h9
-rw-r--r--include/sound/soc-dai.h98
-rw-r--r--include/sound/soc-dapm.h18
-rw-r--r--include/sound/soc-of-simple.h25
-rw-r--r--include/sound/soc.h245
-rw-r--r--include/sound/tlv.h4
-rw-r--r--include/sound/tlv320aic3x.h43
-rw-r--r--include/sound/wm8962.h32
-rw-r--r--include/trace/events/ext4.h482
-rw-r--r--include/trace/events/irq.h54
-rw-r--r--include/trace/events/jbd2.h78
-rw-r--r--include/trace/events/vmscan.h44
-rw-r--r--include/trace/events/writeback.h37
-rw-r--r--include/video/da8xx-fb.h1
-rw-r--r--include/video/sh_mobile_hdmi.h16
-rw-r--r--include/video/sh_mobile_lcdc.h5
-rw-r--r--include/xen/Kbuild1
-rw-r--r--include/xen/events.h31
-rw-r--r--include/xen/interface/features.h3
-rw-r--r--include/xen/interface/io/pciif.h112
-rw-r--r--include/xen/interface/io/xenbus.h8
-rw-r--r--include/xen/interface/memory.h42
-rw-r--r--include/xen/interface/physdev.h77
-rw-r--r--include/xen/page.h7
-rw-r--r--include/xen/privcmd.h77
-rw-r--r--include/xen/xen-ops.h5
-rw-r--r--init/Kconfig130
-rw-r--r--init/do_mounts.c16
-rw-r--r--init/do_mounts_md.c2
-rw-r--r--init/do_mounts_rd.c4
-rw-r--r--init/initramfs.c14
-rw-r--r--init/main.c1
-rw-r--r--init/noinitramfs.c6
-rw-r--r--ipc/compat.c6
-rw-r--r--ipc/compat_mq.c5
-rw-r--r--ipc/mqueue.c11
-rw-r--r--ipc/shm.c64
-rw-r--r--kernel/audit.c67
-rw-r--r--kernel/audit.h5
-rw-r--r--kernel/audit_tree.c9
-rw-r--r--kernel/audit_watch.c4
-rw-r--r--kernel/auditfilter.c12
-rw-r--r--kernel/auditsc.c16
-rw-r--r--kernel/cgroup.c141
-rw-r--r--kernel/cgroup_freezer.c72
-rw-r--r--kernel/cpuset.c13
-rw-r--r--kernel/cred.c4
-rw-r--r--kernel/debug/debug_core.c16
-rw-r--r--kernel/debug/kdb/kdb_main.c69
-rw-r--r--kernel/exit.c22
-rw-r--r--kernel/fork.c17
-rw-r--r--kernel/futex.c5
-rw-r--r--kernel/futex_compat.c3
-rw-r--r--kernel/hw_breakpoint.c3
-rw-r--r--kernel/irq/irqdesc.c15
-rw-r--r--kernel/irq/manage.c4
-rw-r--r--kernel/irq/proc.c2
-rw-r--r--kernel/irq_work.c4
-rw-r--r--kernel/jump_label.c77
-rw-r--r--kernel/kexec.c2
-rw-r--r--kernel/kprobes.c33
-rw-r--r--kernel/latencytop.c17
-rw-r--r--kernel/module.c14
-rw-r--r--kernel/ns_cgroup.c8
-rw-r--r--kernel/perf_event.c229
-rw-r--r--kernel/pm_qos_params.c4
-rw-r--r--kernel/posix-cpu-timers.c12
-rw-r--r--kernel/power/Kconfig4
-rw-r--r--kernel/power/hibernate.c22
-rw-r--r--kernel/power/snapshot.c18
-rw-r--r--kernel/power/suspend.c5
-rw-r--r--kernel/power/swap.c59
-rw-r--r--kernel/power/user.c2
-rw-r--r--kernel/printk.c30
-rw-r--r--kernel/ptrace.c36
-rw-r--r--kernel/range.c2
-rw-r--r--kernel/relay.c15
-rw-r--r--kernel/resource.c153
-rw-r--r--kernel/sched.c47
-rw-r--r--kernel/sched_fair.c73
-rw-r--r--kernel/sched_stats.h20
-rw-r--r--kernel/sched_stoptask.c4
-rw-r--r--kernel/signal.c5
-rw-r--r--kernel/smp.c8
-rw-r--r--kernel/softirq.c16
-rw-r--r--kernel/stop_machine.c6
-rw-r--r--kernel/sysctl.c23
-rw-r--r--kernel/taskstats.c172
-rw-r--r--kernel/trace/Kconfig2
-rw-r--r--kernel/trace/blktrace.c4
-rw-r--r--kernel/trace/ring_buffer.c335
-rw-r--r--kernel/trace/trace.c28
-rw-r--r--kernel/trace/trace_kprobe.c1
-rw-r--r--kernel/tsacct.c10
-rw-r--r--kernel/user.c1
-rw-r--r--kernel/wait.c6
-rw-r--r--kernel/watchdog.c2
-rw-r--r--kernel/workqueue.c6
-rw-r--r--lib/Kconfig.debug32
-rw-r--r--lib/bitmap.c3
-rw-r--r--lib/debug_locks.c2
-rw-r--r--lib/div64.c52
-rw-r--r--lib/idr.c65
-rw-r--r--lib/list_sort.c172
-rw-r--r--lib/parser.c7
-rw-r--r--lib/percpu_counter.c55
-rw-r--r--lib/radix-tree.c83
-rw-r--r--lib/vsprintf.c19
-rw-r--r--mm/backing-dev.c74
-rw-r--r--mm/dmapool.c2
-rw-r--r--mm/filemap.c73
-rw-r--r--mm/highmem.c66
-rw-r--r--mm/hugetlb.c241
-rw-r--r--mm/internal.h2
-rw-r--r--mm/ksm.c7
-rw-r--r--mm/maccess.c2
-rw-r--r--mm/memcontrol.c488
-rw-r--r--mm/memory-failure.c184
-rw-r--r--mm/memory.c35
-rw-r--r--mm/memory_hotplug.c79
-rw-r--r--mm/mempolicy.c20
-rw-r--r--mm/migrate.c249
-rw-r--r--mm/mmap.c2
-rw-r--r--mm/mprotect.c2
-rw-r--r--mm/mremap.c4
-rw-r--r--mm/nommu.c52
-rw-r--r--mm/oom_kill.c33
-rw-r--r--mm/page-writeback.c31
-rw-r--r--mm/page_alloc.c132
-rw-r--r--mm/page_isolation.c3
-rw-r--r--mm/pagewalk.c5
-rw-r--r--mm/rmap.c37
-rw-r--r--mm/shmem.c17
-rw-r--r--mm/slab.c2
-rw-r--r--mm/slub.c7
-rw-r--r--mm/swap.c1
-rw-r--r--mm/swapfile.c49
-rw-r--r--mm/vmalloc.c84
-rw-r--r--mm/vmscan.c218
-rw-r--r--mm/vmstat.c44
-rw-r--r--net/802/garp.c18
-rw-r--r--net/802/stp.c4
-rw-r--r--net/8021q/vlan.c6
-rw-r--r--net/9p/client.c178
-rw-r--r--net/9p/protocol.c5
-rw-r--r--net/9p/trans_virtio.c76
-rw-r--r--net/ax25/af_ax25.c2
-rw-r--r--net/bluetooth/hci_event.c6
-rw-r--r--net/bluetooth/hidp/Kconfig2
-rw-r--r--net/bluetooth/l2cap.c8
-rw-r--r--net/bluetooth/rfcomm/core.c13
-rw-r--r--net/caif/caif_config_util.c13
-rw-r--r--net/caif/caif_dev.c2
-rw-r--r--net/caif/caif_socket.c45
-rw-r--r--net/caif/cfcnfg.c17
-rw-r--r--net/caif/cfctrl.c3
-rw-r--r--net/caif/cfdbgl.c14
-rw-r--r--net/caif/cfrfml.c2
-rw-r--r--net/can/bcm.c2
-rw-r--r--net/ceph/Makefile22
-rw-r--r--net/ceph/buffer.c2
-rw-r--r--net/ceph/messenger.c13
-rw-r--r--net/ceph/osd_client.c25
-rw-r--r--net/ceph/pagevec.c3
-rw-r--r--net/compat.c10
-rw-r--r--net/core/dev.c40
-rw-r--r--net/core/dst.c1
-rw-r--r--net/core/fib_rules.c21
-rw-r--r--net/core/filter.c70
-rw-r--r--net/core/iovec.c20
-rw-r--r--net/core/net-sysfs.c26
-rw-r--r--net/core/net_namespace.c4
-rw-r--r--net/core/pktgen.c41
-rw-r--r--net/core/request_sock.c4
-rw-r--r--net/core/rtnetlink.c9
-rw-r--r--net/core/sock.c16
-rw-r--r--net/core/sysctl_net_core.c3
-rw-r--r--net/dccp/ccid.h34
-rw-r--r--net/dccp/ccids/ccid2.c23
-rw-r--r--net/dccp/ccids/ccid2.h5
-rw-r--r--net/dccp/ccids/ccid3.c12
-rw-r--r--net/dccp/dccp.h5
-rw-r--r--net/dccp/input.c3
-rw-r--r--net/dccp/output.c209
-rw-r--r--net/dccp/proto.c21
-rw-r--r--net/dccp/timer.c27
-rw-r--r--net/decnet/af_decnet.c4
-rw-r--r--net/decnet/sysctl_net_decnet.c4
-rw-r--r--net/econet/af_econet.c91
-rw-r--r--net/ipv4/fib_frontend.c2
-rw-r--r--net/ipv4/fib_hash.c54
-rw-r--r--net/ipv4/fib_lookup.h5
-rw-r--r--net/ipv4/fib_trie.c7
-rw-r--r--net/ipv4/gre.c5
-rw-r--r--net/ipv4/icmp.c3
-rw-r--r--net/ipv4/igmp.c4
-rw-r--r--net/ipv4/inet_diag.c27
-rw-r--r--net/ipv4/inet_hashtables.c3
-rw-r--r--net/ipv4/inetpeer.c138
-rw-r--r--net/ipv4/ip_gre.c7
-rw-r--r--net/ipv4/ip_sockglue.c10
-rw-r--r--net/ipv4/ipip.c1
-rw-r--r--net/ipv4/netfilter/arp_tables.c1
-rw-r--r--net/ipv4/netfilter/ip_tables.c1
-rw-r--r--net/ipv4/netfilter/nf_nat_core.c40
-rw-r--r--net/ipv4/proc.c8
-rw-r--r--net/ipv4/protocol.c8
-rw-r--r--net/ipv4/route.c75
-rw-r--r--net/ipv4/sysctl_net_ipv4.c11
-rw-r--r--net/ipv4/tcp.c6
-rw-r--r--net/ipv4/tcp_input.c11
-rw-r--r--net/ipv4/tcp_ipv4.c12
-rw-r--r--net/ipv4/tunnel4.c29
-rw-r--r--net/ipv4/udp.c6
-rw-r--r--net/ipv6/addrconf.c68
-rw-r--r--net/ipv6/ip6_tunnel.c2
-rw-r--r--net/ipv6/ipv6_sockglue.c4
-rw-r--r--net/ipv6/netfilter/Kconfig5
-rw-r--r--net/ipv6/netfilter/Makefile5
-rw-r--r--net/ipv6/netfilter/ip6_tables.c1
-rw-r--r--net/ipv6/netfilter/nf_conntrack_reasm.c7
-rw-r--r--net/ipv6/proc.c4
-rw-r--r--net/ipv6/protocol.c8
-rw-r--r--net/ipv6/raw.c2
-rw-r--r--net/ipv6/reassembly.c2
-rw-r--r--net/ipv6/route.c8
-rw-r--r--net/ipv6/sit.c1
-rw-r--r--net/ipv6/tunnel6.c24
-rw-r--r--net/ipv6/udp.c2
-rw-r--r--net/irda/af_irda.c1
-rw-r--r--net/irda/irnet/irnet_ppp.c1
-rw-r--r--net/irda/irttp.c30
-rw-r--r--net/iucv/iucv.c3
-rw-r--r--net/l2tp/l2tp_core.c53
-rw-r--r--net/l2tp/l2tp_core.h33
-rw-r--r--net/l2tp/l2tp_debugfs.c2
-rw-r--r--net/l2tp/l2tp_ip.c2
-rw-r--r--net/mac80211/Kconfig2
-rw-r--r--net/mac80211/debugfs_key.c6
-rw-r--r--net/mac80211/ibss.c1
-rw-r--r--net/mac80211/iface.c6
-rw-r--r--net/mac80211/main.c13
-rw-r--r--net/mac80211/rate.c3
-rw-r--r--net/netfilter/Kconfig2
-rw-r--r--net/netfilter/ipvs/Kconfig1
-rw-r--r--net/netfilter/nf_conntrack_core.c3
-rw-r--r--net/netfilter/nf_conntrack_proto.c6
-rw-r--r--net/netfilter/xt_TPROXY.c10
-rw-r--r--net/netfilter/xt_socket.c19
-rw-r--r--net/netlink/af_netlink.c65
-rw-r--r--net/packet/af_packet.c7
-rw-r--r--net/rds/loop.c4
-rw-r--r--net/rds/message.c7
-rw-r--r--net/rds/rdma.c128
-rw-r--r--net/rds/send.c4
-rw-r--r--net/rds/tcp.c6
-rw-r--r--net/sched/cls_basic.c4
-rw-r--r--net/sched/cls_cgroup.c2
-rw-r--r--net/sched/em_text.c3
-rw-r--r--net/sctp/protocol.c2
-rw-r--r--net/sctp/socket.c4
-rw-r--r--net/sctp/sysctl.c4
-rw-r--r--net/socket.c20
-rw-r--r--net/sunrpc/Kconfig19
-rw-r--r--net/sunrpc/auth.c4
-rw-r--r--net/sunrpc/auth_generic.c2
-rw-r--r--net/sunrpc/auth_gss/Makefile5
-rw-r--r--net/sunrpc/auth_gss/gss_krb5_mech.c2
-rw-r--r--net/sunrpc/auth_gss/gss_spkm3_mech.c247
-rw-r--r--net/sunrpc/auth_gss/gss_spkm3_seal.c186
-rw-r--r--net/sunrpc/auth_gss/gss_spkm3_token.c267
-rw-r--r--net/sunrpc/auth_gss/gss_spkm3_unseal.c127
-rw-r--r--net/sunrpc/auth_gss/svcauth_gss.c51
-rw-r--r--net/sunrpc/cache.c288
-rw-r--r--net/sunrpc/clnt.c27
-rw-r--r--net/sunrpc/netns.h19
-rw-r--r--net/sunrpc/rpc_pipe.c19
-rw-r--r--net/sunrpc/rpcb_clnt.c60
-rw-r--r--net/sunrpc/sched.c2
-rw-r--r--net/sunrpc/stats.c47
-rw-r--r--net/sunrpc/sunrpc_syms.c58
-rw-r--r--net/sunrpc/svc.c3
-rw-r--r--net/sunrpc/svc_xprt.c60
-rw-r--r--net/sunrpc/svcauth_unix.c194
-rw-r--r--net/sunrpc/svcsock.c27
-rw-r--r--net/sunrpc/xdr.c61
-rw-r--r--net/sunrpc/xprt.c39
-rw-r--r--net/sunrpc/xprtrdma/svc_rdma.c11
-rw-r--r--net/sunrpc/xprtrdma/svc_rdma_recvfrom.c19
-rw-r--r--net/sunrpc/xprtrdma/svc_rdma_sendto.c82
-rw-r--r--net/sunrpc/xprtrdma/svc_rdma_transport.c49
-rw-r--r--net/sunrpc/xprtrdma/transport.c25
-rw-r--r--net/sunrpc/xprtsock.c358
-rw-r--r--net/tipc/socket.c1
-rw-r--r--net/unix/af_unix.c51
-rw-r--r--net/unix/garbage.c9
-rw-r--r--net/wireless/chan.c54
-rw-r--r--net/wireless/nl80211.c4
-rw-r--r--net/wireless/reg.c2
-rw-r--r--net/x25/x25_facilities.c20
-rw-r--r--net/x25/x25_in.c2
-rw-r--r--net/xfrm/xfrm_hash.c2
-rw-r--r--samples/Kconfig7
-rw-r--r--samples/Makefile2
-rw-r--r--samples/kdb/Makefile1
-rw-r--r--samples/kdb/kdb_hello.c60
-rw-r--r--scripts/Makefile.clean2
-rw-r--r--scripts/Makefile.lib4
-rw-r--r--scripts/basic/docproc.c5
-rwxr-xr-xscripts/checkpatch.pl158
-rwxr-xr-xscripts/coccicheck46
-rw-r--r--scripts/coccinelle/api/alloc/drop_kmalloc_cast.cocci (renamed from scripts/coccinelle/alloc/drop_kmalloc_cast.cocci)0
-rw-r--r--scripts/coccinelle/api/alloc/kzalloc-simple.cocci (renamed from scripts/coccinelle/alloc/kzalloc-simple.cocci)6
-rw-r--r--scripts/coccinelle/api/err_cast.cocci (renamed from scripts/coccinelle/err_cast.cocci)0
-rw-r--r--scripts/coccinelle/api/kstrdup.cocci39
-rw-r--r--scripts/coccinelle/api/memdup.cocci40
-rw-r--r--scripts/coccinelle/api/memdup_user.cocci35
-rw-r--r--scripts/coccinelle/api/resource_size.cocci (renamed from scripts/coccinelle/resource_size.cocci)0
-rw-r--r--scripts/coccinelle/free/kfree.cocci117
-rw-r--r--scripts/coccinelle/iterators/fen.cocci64
-rw-r--r--scripts/coccinelle/iterators/itnull.cocci58
-rw-r--r--scripts/coccinelle/iterators/list_entry_update.cocci62
-rw-r--r--scripts/coccinelle/locks/call_kern.cocci74
-rw-r--r--scripts/coccinelle/locks/double_lock.cocci92
-rw-r--r--scripts/coccinelle/locks/flags.cocci80
-rw-r--r--scripts/coccinelle/locks/mini_lock.cocci95
-rw-r--r--scripts/coccinelle/misc/doubleinit.cocci53
-rw-r--r--scripts/coccinelle/misc/ifcol.cocci48
-rw-r--r--scripts/coccinelle/null/deref_null.cocci (renamed from scripts/coccinelle/deref_null.cocci)0
-rw-r--r--scripts/coccinelle/null/eno.cocci20
-rw-r--r--scripts/coccinelle/null/kmerr.cocci72
-rw-r--r--scripts/coccinelle/tests/doublebitand.cocci54
-rw-r--r--scripts/coccinelle/tests/doubletest.cocci40
-rwxr-xr-xscripts/extract-ikconfig41
-rwxr-xr-xscripts/get_maintainer.pl1162
-rw-r--r--scripts/gfp-translate7
-rw-r--r--scripts/kallsyms.c8
-rw-r--r--scripts/kconfig/Makefile87
-rw-r--r--scripts/kconfig/conf.c15
-rw-r--r--scripts/kconfig/confdata.c126
-rw-r--r--scripts/kconfig/expr.h3
-rw-r--r--scripts/kconfig/gconf.c20
-rw-r--r--scripts/kconfig/gconf.glade1
-rw-r--r--scripts/kconfig/kxgettext.c15
-rw-r--r--scripts/kconfig/lex.zconf.c_shipped7
-rw-r--r--scripts/kconfig/lkc.h9
-rw-r--r--scripts/kconfig/lkc_proto.h3
-rw-r--r--scripts/kconfig/lxdialog/check-lxdialog.sh2
-rw-r--r--scripts/kconfig/mconf.c64
-rw-r--r--scripts/kconfig/menu.c17
-rw-r--r--scripts/kconfig/nconf.c487
-rw-r--r--scripts/kconfig/nconf.gui.c22
-rw-r--r--scripts/kconfig/nconf.h3
-rw-r--r--scripts/kconfig/qconf.cc174
-rw-r--r--scripts/kconfig/qconf.h76
-rw-r--r--scripts/kconfig/streamline_config.pl43
-rw-r--r--scripts/kconfig/symbol.c51
-rw-r--r--scripts/kconfig/util.c7
-rw-r--r--scripts/kconfig/zconf.gperf1
-rw-r--r--scripts/kconfig/zconf.hash.c_shipped122
-rw-r--r--scripts/kconfig/zconf.l7
-rw-r--r--scripts/kconfig/zconf.tab.c_shipped603
-rw-r--r--scripts/kconfig/zconf.y37
-rwxr-xr-xscripts/kernel-doc12
-rw-r--r--scripts/mod/modpost.c5
-rwxr-xr-xscripts/namespace.pl147
-rw-r--r--scripts/package/builddeb4
-rwxr-xr-xscripts/package/mkspec2
-rw-r--r--scripts/recordmcount.c44
-rw-r--r--scripts/recordmcount.h86
-rwxr-xr-xscripts/setlocalversion6
-rw-r--r--security/Kconfig12
-rw-r--r--security/apparmor/lsm.c6
-rw-r--r--security/apparmor/path.c2
-rw-r--r--security/apparmor/policy.c2
-rw-r--r--security/capability.c5
-rw-r--r--security/commoncap.c19
-rw-r--r--security/inode.c9
-rw-r--r--security/integrity/ima/ima.h18
-rw-r--r--security/integrity/ima/ima_api.c2
-rw-r--r--security/integrity/ima/ima_iint.c158
-rw-r--r--security/integrity/ima/ima_main.c184
-rw-r--r--security/keys/process_keys.c2
-rw-r--r--security/security.c14
-rw-r--r--security/selinux/hooks.c6
-rw-r--r--security/selinux/selinuxfs.c10
-rw-r--r--security/smack/smack_lsm.c8
-rw-r--r--security/smack/smackfs.c12
-rw-r--r--security/tomoyo/realpath.c2
-rw-r--r--sound/atmel/abdac.c4
-rw-r--r--sound/core/info.c1
-rw-r--r--sound/core/init.c9
-rw-r--r--sound/core/oss/mixer_oss.c34
-rw-r--r--sound/core/oss/pcm_oss.c19
-rw-r--r--sound/core/pcm.c3
-rw-r--r--sound/core/pcm_lib.c16
-rw-r--r--sound/core/pcm_native.c5
-rw-r--r--sound/core/sound.c1
-rw-r--r--sound/drivers/Kconfig19
-rw-r--r--sound/drivers/Makefile2
-rw-r--r--sound/drivers/aloop.c1258
-rw-r--r--sound/drivers/virmidi.c2
-rw-r--r--sound/i2c/other/ak4xxx-adda.c2
-rw-r--r--sound/isa/Kconfig36
-rw-r--r--sound/isa/Makefile4
-rw-r--r--sound/isa/ad1816a/ad1816a.c2
-rw-r--r--sound/isa/azt2320.c2
-rw-r--r--sound/isa/galaxy/Makefile10
-rw-r--r--sound/isa/galaxy/azt1605.c91
-rw-r--r--sound/isa/galaxy/azt2316.c111
-rw-r--r--sound/isa/galaxy/galaxy.c652
-rw-r--r--sound/isa/gus/gusmax.c4
-rw-r--r--sound/isa/sb/sb8.c2
-rw-r--r--sound/isa/sgalaxy.c369
-rw-r--r--sound/oss/Kconfig8
-rw-r--r--sound/oss/Makefile1
-rw-r--r--sound/oss/au1550_ac97.c48
-rw-r--r--sound/oss/dev_table.c6
-rw-r--r--sound/oss/dmasound/dmasound_core.c41
-rw-r--r--sound/oss/midibuf.c4
-rw-r--r--sound/oss/msnd_pinnacle.c15
-rw-r--r--sound/oss/pss.c6
-rw-r--r--sound/oss/sb_ess.c1
-rw-r--r--sound/oss/sequencer.c4
-rw-r--r--sound/oss/sh_dac_audio.c325
-rw-r--r--sound/oss/soundcard.c43
-rw-r--r--sound/oss/swarm_cs4297a.c20
-rw-r--r--sound/oss/vwsnd.c30
-rw-r--r--sound/pci/Kconfig17
-rw-r--r--sound/pci/asihpi/hpi6000.c2
-rw-r--r--sound/pci/asihpi/hpi6205.c2
-rw-r--r--sound/pci/asihpi/hpicmn.c12
-rw-r--r--sound/pci/asihpi/hpioctl.c2
-rw-r--r--sound/pci/au88x0/au88x0_mixer.c2
-rw-r--r--sound/pci/azt3328.c26
-rw-r--r--sound/pci/ca0106/ca0106.h5
-rw-r--r--sound/pci/ca0106/ca0106_main.c136
-rw-r--r--sound/pci/ca0106/ca0106_mixer.c93
-rw-r--r--sound/pci/cs46xx/dsp_spos.c33
-rw-r--r--sound/pci/ctxfi/ctpcm.c16
-rw-r--r--sound/pci/emu10k1/emumpu401.c2
-rw-r--r--sound/pci/hda/Kconfig39
-rw-r--r--sound/pci/hda/Makefile15
-rw-r--r--sound/pci/hda/hda_codec.c271
-rw-r--r--sound/pci/hda/hda_codec.h13
-rw-r--r--sound/pci/hda/hda_eld.c7
-rw-r--r--sound/pci/hda/hda_generic.c41
-rw-r--r--sound/pci/hda/hda_intel.c101
-rw-r--r--sound/pci/hda/hda_local.h51
-rw-r--r--sound/pci/hda/patch_analog.c48
-rw-r--r--sound/pci/hda/patch_atihdmi.c224
-rw-r--r--sound/pci/hda/patch_ca0110.c10
-rw-r--r--sound/pci/hda/patch_cirrus.c95
-rw-r--r--sound/pci/hda/patch_conexant.c653
-rw-r--r--sound/pci/hda/patch_hdmi.c797
-rw-r--r--sound/pci/hda/patch_intelhdmi.c220
-rw-r--r--sound/pci/hda/patch_nvhdmi.c608
-rw-r--r--sound/pci/hda/patch_realtek.c989
-rw-r--r--sound/pci/hda/patch_sigmatel.c475
-rw-r--r--sound/pci/hda/patch_via.c587
-rw-r--r--sound/pci/ice1712/delta.c10
-rw-r--r--sound/pci/ice1712/delta.h4
-rw-r--r--sound/pci/ice1712/pontis.c6
-rw-r--r--sound/pci/ice1712/prodigy192.c2
-rw-r--r--sound/pci/intel8x0.c6
-rw-r--r--sound/pci/lx6464es/lx6464es.c4
-rw-r--r--sound/pci/lx6464es/lx6464es.h2
-rw-r--r--sound/pci/lx6464es/lx_core.c2
-rw-r--r--sound/pci/mixart/mixart_hwdep.h10
-rw-r--r--sound/pci/oxygen/oxygen.c4
-rw-r--r--sound/pci/oxygen/oxygen.h1
-rw-r--r--sound/pci/oxygen/oxygen_lib.c55
-rw-r--r--sound/pci/oxygen/oxygen_mixer.c5
-rw-r--r--sound/pci/oxygen/oxygen_pcm.c12
-rw-r--r--sound/pci/oxygen/oxygen_regs.h10
-rw-r--r--sound/pci/oxygen/virtuoso.c5
-rw-r--r--sound/pci/oxygen/xonar_cs43xx.c8
-rw-r--r--sound/pci/oxygen/xonar_pcm179x.c29
-rw-r--r--sound/pci/oxygen/xonar_wm87x6.c121
-rw-r--r--sound/pci/rme96.c8
-rw-r--r--sound/pci/rme9652/hdsp.c8
-rw-r--r--sound/ppc/pmac.c12
-rw-r--r--sound/ppc/tumbler.c2
-rw-r--r--sound/sh/aica.c2
-rw-r--r--sound/soc/atmel/Kconfig5
-rw-r--r--sound/soc/atmel/atmel-pcm.c59
-rw-r--r--sound/soc/atmel/atmel-pcm.h3
-rw-r--r--sound/soc/atmel/atmel_ssc_dai.c148
-rw-r--r--sound/soc/atmel/atmel_ssc_dai.h3
-rw-r--r--sound/soc/atmel/playpaq_wm8510.c65
-rw-r--r--sound/soc/atmel/sam9g20_wm8731.c68
-rw-r--r--sound/soc/atmel/snd-soc-afeb9260.c36
-rw-r--r--sound/soc/au1x/db1200.c39
-rw-r--r--sound/soc/au1x/dbdma2.c95
-rw-r--r--sound/soc/au1x/psc-ac97.c71
-rw-r--r--sound/soc/au1x/psc-i2s.c53
-rw-r--r--sound/soc/au1x/psc.h10
-rw-r--r--sound/soc/blackfin/bf5xx-ac97-pcm.c43
-rw-r--r--sound/soc/blackfin/bf5xx-ac97-pcm.h3
-rw-r--r--sound/soc/blackfin/bf5xx-ac97.c41
-rw-r--r--sound/soc/blackfin/bf5xx-ac97.h2
-rw-r--r--sound/soc/blackfin/bf5xx-ad1836.c23
-rw-r--r--sound/soc/blackfin/bf5xx-ad193x.c23
-rw-r--r--sound/soc/blackfin/bf5xx-ad1980.c19
-rw-r--r--sound/soc/blackfin/bf5xx-ad73311.c22
-rw-r--r--sound/soc/blackfin/bf5xx-i2s-pcm.c44
-rw-r--r--sound/soc/blackfin/bf5xx-i2s-pcm.h3
-rw-r--r--sound/soc/blackfin/bf5xx-i2s.c45
-rw-r--r--sound/soc/blackfin/bf5xx-i2s.h14
-rw-r--r--sound/soc/blackfin/bf5xx-ssm2602.c38
-rw-r--r--sound/soc/blackfin/bf5xx-tdm-pcm.c43
-rw-r--r--sound/soc/blackfin/bf5xx-tdm-pcm.h3
-rw-r--r--sound/soc/blackfin/bf5xx-tdm.c15
-rw-r--r--sound/soc/blackfin/bf5xx-tdm.h2
-rw-r--r--sound/soc/codecs/88pm860x-codec.c1486
-rw-r--r--sound/soc/codecs/88pm860x-codec.h97
-rw-r--r--sound/soc/codecs/Kconfig27
-rw-r--r--sound/soc/codecs/Makefile12
-rw-r--r--sound/soc/codecs/ac97.c125
-rw-r--r--sound/soc/codecs/ac97.h19
-rw-r--r--sound/soc/codecs/ad1836.c191
-rw-r--r--sound/soc/codecs/ad1836.h2
-rw-r--r--sound/soc/codecs/ad193x.c217
-rw-r--r--sound/soc/codecs/ad193x.h3
-rw-r--r--sound/soc/codecs/ad1980.c113
-rw-r--r--sound/soc/codecs/ad1980.h3
-rw-r--r--sound/soc/codecs/ad73311.c66
-rw-r--r--sound/soc/codecs/ad73311.h2
-rw-r--r--sound/soc/codecs/ads117x.c72
-rw-r--r--sound/soc/codecs/ads117x.h4
-rw-r--r--sound/soc/codecs/ak4104.c149
-rw-r--r--sound/soc/codecs/ak4104.h7
-rw-r--r--sound/soc/codecs/ak4535.c236
-rw-r--r--sound/soc/codecs/ak4535.h8
-rw-r--r--sound/soc/codecs/ak4642.c235
-rw-r--r--sound/soc/codecs/ak4642.h20
-rw-r--r--sound/soc/codecs/ak4671.c140
-rw-r--r--sound/soc/codecs/ak4671.h3
-rw-r--r--sound/soc/codecs/cq93vc.c132
-rw-r--r--sound/soc/codecs/cq93vc.h29
-rw-r--r--sound/soc/codecs/cs4270.c394
-rw-r--r--sound/soc/codecs/cs4270.h28
-rw-r--r--sound/soc/codecs/cs42l51.c295
-rw-r--r--sound/soc/codecs/cs42l51.h2
-rw-r--r--sound/soc/codecs/cx20442.c173
-rw-r--r--sound/soc/codecs/cx20442.h2
-rw-r--r--sound/soc/codecs/da7210.c163
-rw-r--r--sound/soc/codecs/da7210.h24
-rw-r--r--sound/soc/codecs/jz4740.c116
-rw-r--r--sound/soc/codecs/jz4740.h20
-rw-r--r--sound/soc/codecs/max98088.c2109
-rw-r--r--sound/soc/codecs/max98088.h193
-rw-r--r--sound/soc/codecs/pcm3008.c92
-rw-r--r--sound/soc/codecs/pcm3008.h3
-rw-r--r--sound/soc/codecs/spdif_transciever.c102
-rw-r--r--sound/soc/codecs/spdif_transciever.h18
-rw-r--r--sound/soc/codecs/ssm2602.c218
-rw-r--r--sound/soc/codecs/ssm2602.h3
-rw-r--r--sound/soc/codecs/stac9766.c119
-rw-r--r--sound/soc/codecs/stac9766.h4
-rw-r--r--sound/soc/codecs/tlv320aic23.c182
-rw-r--r--sound/soc/codecs/tlv320aic23.h3
-rw-r--r--sound/soc/codecs/tlv320aic26.c180
-rw-r--r--sound/soc/codecs/tlv320aic26.h3
-rw-r--r--sound/soc/codecs/tlv320aic3x.c1236
-rw-r--r--sound/soc/codecs/tlv320aic3x.h100
-rw-r--r--sound/soc/codecs/tlv320dac33.c308
-rw-r--r--sound/soc/codecs/tlv320dac33.h3
-rw-r--r--sound/soc/codecs/tpa6130a2.c38
-rw-r--r--sound/soc/codecs/twl4030.c228
-rw-r--r--sound/soc/codecs/twl4030.h55
-rw-r--r--sound/soc/codecs/twl6040.c170
-rw-r--r--sound/soc/codecs/twl6040.h3
-rw-r--r--sound/soc/codecs/uda134x.c155
-rw-r--r--sound/soc/codecs/uda134x.h3
-rw-r--r--sound/soc/codecs/uda1380.c331
-rw-r--r--sound/soc/codecs/uda1380.h3
-rw-r--r--sound/soc/codecs/wl1273.c528
-rw-r--r--sound/soc/codecs/wl1273.h101
-rw-r--r--sound/soc/codecs/wm2000.h3
-rw-r--r--sound/soc/codecs/wm8350.c240
-rw-r--r--sound/soc/codecs/wm8350.h3
-rw-r--r--sound/soc/codecs/wm8400.c181
-rw-r--r--sound/soc/codecs/wm8400.h3
-rw-r--r--sound/soc/codecs/wm8510.c288
-rw-r--r--sound/soc/codecs/wm8510.h3
-rw-r--r--sound/soc/codecs/wm8523.c178
-rw-r--r--sound/soc/codecs/wm8523.h3
-rw-r--r--sound/soc/codecs/wm8580.c321
-rw-r--r--sound/soc/codecs/wm8580.h17
-rw-r--r--sound/soc/codecs/wm8711.c203
-rw-r--r--sound/soc/codecs/wm8711.h3
-rw-r--r--sound/soc/codecs/wm8727.c106
-rw-r--r--sound/soc/codecs/wm8727.h21
-rw-r--r--sound/soc/codecs/wm8728.c289
-rw-r--r--sound/soc/codecs/wm8728.h9
-rw-r--r--sound/soc/codecs/wm8731.c247
-rw-r--r--sound/soc/codecs/wm8731.h7
-rw-r--r--sound/soc/codecs/wm8741.c378
-rw-r--r--sound/soc/codecs/wm8741.h3
-rw-r--r--sound/soc/codecs/wm8750.c264
-rw-r--r--sound/soc/codecs/wm8750.h9
-rw-r--r--sound/soc/codecs/wm8753.c404
-rw-r--r--sound/soc/codecs/wm8753.h3
-rw-r--r--sound/soc/codecs/wm8776.c249
-rw-r--r--sound/soc/codecs/wm8776.h3
-rw-r--r--sound/soc/codecs/wm8804.c833
-rw-r--r--sound/soc/codecs/wm8804.h61
-rw-r--r--sound/soc/codecs/wm8900.c250
-rw-r--r--sound/soc/codecs/wm8900.h3
-rw-r--r--sound/soc/codecs/wm8903.c265
-rw-r--r--sound/soc/codecs/wm8903.h3
-rw-r--r--sound/soc/codecs/wm8904.c210
-rw-r--r--sound/soc/codecs/wm8904.h3
-rw-r--r--sound/soc/codecs/wm8940.c199
-rw-r--r--sound/soc/codecs/wm8940.h2
-rw-r--r--sound/soc/codecs/wm8955.c181
-rw-r--r--sound/soc/codecs/wm8955.h3
-rw-r--r--sound/soc/codecs/wm8960.c209
-rw-r--r--sound/soc/codecs/wm8960.h3
-rw-r--r--sound/soc/codecs/wm8961.c241
-rw-r--r--sound/soc/codecs/wm8961.h3
-rw-r--r--sound/soc/codecs/wm8962.c3980
-rw-r--r--sound/soc/codecs/wm8962.h3780
-rw-r--r--sound/soc/codecs/wm8971.c247
-rw-r--r--sound/soc/codecs/wm8971.h8
-rw-r--r--sound/soc/codecs/wm8974.c167
-rw-r--r--sound/soc/codecs/wm8974.h3
-rw-r--r--sound/soc/codecs/wm8978.c190
-rw-r--r--sound/soc/codecs/wm8978.h3
-rw-r--r--sound/soc/codecs/wm8985.c1192
-rw-r--r--sound/soc/codecs/wm8985.h1045
-rw-r--r--sound/soc/codecs/wm8988.c261
-rw-r--r--sound/soc/codecs/wm8988.h3
-rw-r--r--sound/soc/codecs/wm8990.c223
-rw-r--r--sound/soc/codecs/wm8990.h8
-rw-r--r--sound/soc/codecs/wm8993.c304
-rw-r--r--sound/soc/codecs/wm8993.h3
-rw-r--r--sound/soc/codecs/wm8994.c3412
-rw-r--r--sound/soc/codecs/wm8994.h3
-rw-r--r--sound/soc/codecs/wm9081.c208
-rw-r--r--sound/soc/codecs/wm9081.h3
-rw-r--r--sound/soc/codecs/wm9090.c185
-rw-r--r--sound/soc/codecs/wm9090.h2
-rw-r--r--sound/soc/codecs/wm9705.c116
-rw-r--r--sound/soc/codecs/wm9705.h3
-rw-r--r--sound/soc/codecs/wm9712.c124
-rw-r--r--sound/soc/codecs/wm9712.h3
-rw-r--r--sound/soc/codecs/wm9713.c131
-rw-r--r--sound/soc/codecs/wm9713.h3
-rw-r--r--sound/soc/codecs/wm_hubs.c2
-rw-r--r--sound/soc/davinci/davinci-evm.c137
-rw-r--r--sound/soc/davinci/davinci-i2s.c57
-rw-r--r--sound/soc/davinci/davinci-i2s.h2
-rw-r--r--sound/soc/davinci/davinci-mcasp.c41
-rw-r--r--sound/soc/davinci/davinci-mcasp.h2
-rw-r--r--sound/soc/davinci/davinci-pcm.c45
-rw-r--r--sound/soc/davinci/davinci-pcm.h3
-rw-r--r--sound/soc/davinci/davinci-sffsdr.c29
-rw-r--r--sound/soc/davinci/davinci-vcif.c35
-rw-r--r--sound/soc/davinci/davinci-vcif.h28
-rw-r--r--sound/soc/ep93xx/Kconfig16
-rw-r--r--sound/soc/ep93xx/Makefile4
-rw-r--r--sound/soc/ep93xx/ep93xx-ac97.c468
-rw-r--r--sound/soc/ep93xx/ep93xx-i2s.c34
-rw-r--r--sound/soc/ep93xx/ep93xx-i2s.h18
-rw-r--r--sound/soc/ep93xx/ep93xx-pcm.c37
-rw-r--r--sound/soc/ep93xx/ep93xx-pcm.h2
-rw-r--r--sound/soc/ep93xx/simone.c91
-rw-r--r--sound/soc/ep93xx/snappercl15.c24
-rw-r--r--sound/soc/fsl/Kconfig27
-rw-r--r--sound/soc/fsl/Makefile11
-rw-r--r--sound/soc/fsl/efika-audio-fabric.c21
-rw-r--r--sound/soc/fsl/fsl_dma.c458
-rw-r--r--sound/soc/fsl/fsl_dma.h20
-rw-r--r--sound/soc/fsl/fsl_ssi.c298
-rw-r--r--sound/soc/fsl/fsl_ssi.h26
-rw-r--r--sound/soc/fsl/mpc5200_dma.c65
-rw-r--r--sound/soc/fsl/mpc5200_dma.h5
-rw-r--r--sound/soc/fsl/mpc5200_psc_ac97.c34
-rw-r--r--sound/soc/fsl/mpc5200_psc_ac97.h2
-rw-r--r--sound/soc/fsl/mpc5200_psc_i2s.c21
-rw-r--r--sound/soc/fsl/mpc8610_hpcd.c661
-rw-r--r--sound/soc/fsl/p1022_ds.c592
-rw-r--r--sound/soc/fsl/pcm030-audio-fabric.c23
-rw-r--r--sound/soc/fsl/soc-of-simple.c172
-rw-r--r--sound/soc/imx/Kconfig16
-rw-r--r--sound/soc/imx/Makefile10
-rw-r--r--sound/soc/imx/eukrea-tlv320.c20
-rw-r--r--sound/soc/imx/imx-pcm-dma-mx2.c260
-rw-r--r--sound/soc/imx/imx-pcm-fiq.c68
-rw-r--r--sound/soc/imx/imx-ssi.c169
-rw-r--r--sound/soc/imx/imx-ssi.h11
-rw-r--r--sound/soc/imx/phycore-ac97.c44
-rw-r--r--sound/soc/imx/wm1133-ev1.c27
-rw-r--r--sound/soc/jz4740/jz4740-i2s.c104
-rw-r--r--sound/soc/jz4740/jz4740-i2s.h2
-rw-r--r--sound/soc/jz4740/jz4740-pcm.c18
-rw-r--r--sound/soc/jz4740/jz4740-pcm.h2
-rw-r--r--sound/soc/jz4740/qi_lb60.c25
-rw-r--r--sound/soc/kirkwood/kirkwood-dma.c69
-rw-r--r--sound/soc/kirkwood/kirkwood-dma.h17
-rw-r--r--sound/soc/kirkwood/kirkwood-i2s.c55
-rw-r--r--sound/soc/kirkwood/kirkwood-i2s.h17
-rw-r--r--sound/soc/kirkwood/kirkwood-openrd.c24
-rw-r--r--sound/soc/nuc900/nuc900-ac97.c30
-rw-r--r--sound/soc/nuc900/nuc900-audio.c16
-rw-r--r--sound/soc/nuc900/nuc900-audio.h2
-rw-r--r--sound/soc/nuc900/nuc900-pcm.c47
-rw-r--r--sound/soc/omap/Kconfig2
-rw-r--r--sound/soc/omap/am3517evm.c29
-rw-r--r--sound/soc/omap/ams-delta.c98
-rw-r--r--sound/soc/omap/igep0020.c26
-rw-r--r--sound/soc/omap/mcpdm.c19
-rw-r--r--sound/soc/omap/mcpdm.h2
-rw-r--r--sound/soc/omap/n810.c42
-rw-r--r--sound/soc/omap/omap-mcbsp.c252
-rw-r--r--sound/soc/omap/omap-mcbsp.h2
-rw-r--r--sound/soc/omap/omap-mcpdm.c72
-rw-r--r--sound/soc/omap/omap-pcm.c47
-rw-r--r--sound/soc/omap/omap-pcm.h2
-rw-r--r--sound/soc/omap/omap2evm.c29
-rw-r--r--sound/soc/omap/omap3beagle.c27
-rw-r--r--sound/soc/omap/omap3evm.c34
-rw-r--r--sound/soc/omap/omap3pandora.c37
-rw-r--r--sound/soc/omap/osk5912.c35
-rw-r--r--sound/soc/omap/overo.c22
-rw-r--r--sound/soc/omap/rx51.c41
-rw-r--r--sound/soc/omap/sdp3430.c60
-rw-r--r--sound/soc/omap/sdp4430.c27
-rw-r--r--sound/soc/omap/zoom2.c68
-rw-r--r--sound/soc/pxa/Kconfig19
-rw-r--r--sound/soc/pxa/Makefile4
-rw-r--r--sound/soc/pxa/corgi.c33
-rw-r--r--sound/soc/pxa/e740_wm9705.c29
-rw-r--r--sound/soc/pxa/e750_wm9705.c26
-rw-r--r--sound/soc/pxa/e800_wm9712.c26
-rw-r--r--sound/soc/pxa/em-x270.c22
-rw-r--r--sound/soc/pxa/imote2.c20
-rw-r--r--sound/soc/pxa/magician.c39
-rw-r--r--sound/soc/pxa/mioa701_wm9713.c33
-rw-r--r--sound/soc/pxa/palm27x.c27
-rw-r--r--sound/soc/pxa/poodle.c34
-rw-r--r--sound/soc/pxa/pxa-ssp.c174
-rw-r--r--sound/soc/pxa/pxa-ssp.h2
-rw-r--r--sound/soc/pxa/pxa2xx-ac97.c46
-rw-r--r--sound/soc/pxa/pxa2xx-ac97.h2
-rw-r--r--sound/soc/pxa/pxa2xx-i2s.c91
-rw-r--r--sound/soc/pxa/pxa2xx-i2s.h2
-rw-r--r--sound/soc/pxa/pxa2xx-pcm.c46
-rw-r--r--sound/soc/pxa/pxa2xx-pcm.h19
-rw-r--r--sound/soc/pxa/raumfeld.c114
-rw-r--r--sound/soc/pxa/saarb.c200
-rw-r--r--sound/soc/pxa/spitz.c31
-rw-r--r--sound/soc/pxa/tavorevb3.c200
-rw-r--r--sound/soc/pxa/tosa.c32
-rw-r--r--sound/soc/pxa/z2.c26
-rw-r--r--sound/soc/pxa/zylonite.c40
-rw-r--r--sound/soc/s3c24xx/Kconfig38
-rw-r--r--sound/soc/s3c24xx/Makefile10
-rw-r--r--sound/soc/s3c24xx/aquila_wm8994.c295
-rw-r--r--sound/soc/s3c24xx/goni_wm8994.c298
-rw-r--r--sound/soc/s3c24xx/jive_wm8750.c23
-rw-r--r--sound/soc/s3c24xx/ln2440sbc_alc650.c17
-rw-r--r--sound/soc/s3c24xx/neo1973_gta02_wm8753.c58
-rw-r--r--sound/soc/s3c24xx/neo1973_wm8753.c37
-rw-r--r--sound/soc/s3c24xx/rx1950_uda1380.c319
-rw-r--r--sound/soc/s3c24xx/s3c-ac97.c36
-rw-r--r--sound/soc/s3c24xx/s3c-ac97.h2
-rw-r--r--sound/soc/s3c24xx/s3c-dma.c46
-rw-r--r--sound/soc/s3c24xx/s3c-dma.h1
-rw-r--r--sound/soc/s3c24xx/s3c-i2s-v2.c50
-rw-r--r--sound/soc/s3c24xx/s3c-i2s-v2.h13
-rw-r--r--sound/soc/s3c24xx/s3c-pcm.c54
-rw-r--r--sound/soc/s3c24xx/s3c-pcm.h3
-rw-r--r--sound/soc/s3c24xx/s3c2412-i2s.c54
-rw-r--r--sound/soc/s3c24xx/s3c2412-i2s.h2
-rw-r--r--sound/soc/s3c24xx/s3c24xx-i2s.c40
-rw-r--r--sound/soc/s3c24xx/s3c24xx-i2s.h2
-rw-r--r--sound/soc/s3c24xx/s3c24xx_simtec.c15
-rw-r--r--sound/soc/s3c24xx/s3c24xx_simtec.h4
-rw-r--r--sound/soc/s3c24xx/s3c24xx_simtec_hermes.c25
-rw-r--r--sound/soc/s3c24xx/s3c24xx_simtec_tlv320aic23.c21
-rw-r--r--sound/soc/s3c24xx/s3c24xx_uda134x.c21
-rw-r--r--sound/soc/s3c24xx/s3c64xx-i2s-v4.c135
-rw-r--r--sound/soc/s3c24xx/s3c64xx-i2s.c206
-rw-r--r--sound/soc/s3c24xx/s3c64xx-i2s.h3
-rw-r--r--sound/soc/s3c24xx/smartq_wm8987.c15
-rw-r--r--sound/soc/s3c24xx/smdk2443_wm9710.c17
-rw-r--r--sound/soc/s3c24xx/smdk64xx_wm8580.c68
-rw-r--r--sound/soc/s3c24xx/smdk_spdif.c223
-rw-r--r--sound/soc/s3c24xx/smdk_wm9713.c42
-rw-r--r--sound/soc/s3c24xx/spdif.c501
-rw-r--r--sound/soc/s3c24xx/spdif.h19
-rw-r--r--sound/soc/s6000/s6000-i2s.c58
-rw-r--r--sound/soc/s6000/s6000-i2s.h2
-rw-r--r--sound/soc/s6000/s6000-pcm.c102
-rw-r--r--sound/soc/s6000/s6000-pcm.h2
-rw-r--r--sound/soc/s6000/s6105-ipcam.c31
-rw-r--r--sound/soc/sh/Kconfig11
-rw-r--r--sound/soc/sh/Makefile2
-rw-r--r--sound/soc/sh/dma-sh7760.c53
-rw-r--r--sound/soc/sh/fsi-ak4642.c31
-rw-r--r--sound/soc/sh/fsi-da7210.c24
-rw-r--r--sound/soc/sh/fsi-hdmi.c60
-rw-r--r--sound/soc/sh/fsi.c627
-rw-r--r--sound/soc/sh/hac.c46
-rw-r--r--sound/soc/sh/migor.c29
-rw-r--r--sound/soc/sh/sh7760-ac97.c29
-rw-r--r--sound/soc/sh/siu.h6
-rw-r--r--sound/soc/sh/siu_dai.c97
-rw-r--r--sound/soc/sh/siu_pcm.c34
-rw-r--r--sound/soc/sh/ssi.c55
-rw-r--r--sound/soc/soc-cache.c198
-rw-r--r--sound/soc/soc-core.c1823
-rw-r--r--sound/soc/soc-dapm.c92
-rw-r--r--sound/soc/soc-jack.c21
-rw-r--r--sound/soc/txx9/txx9aclc-ac97.c55
-rw-r--r--sound/soc/txx9/txx9aclc-generic.c24
-rw-r--r--sound/soc/txx9/txx9aclc.c141
-rw-r--r--sound/soc/txx9/txx9aclc.h13
-rw-r--r--sound/sound_core.c1
-rw-r--r--sound/spi/at73c213.c2
-rw-r--r--sound/synth/emux/emux_hwdep.c3
-rw-r--r--sound/usb/Kconfig2
-rw-r--r--sound/usb/caiaq/audio.c175
-rw-r--r--sound/usb/caiaq/control.c208
-rw-r--r--sound/usb/caiaq/device.c10
-rw-r--r--sound/usb/caiaq/device.h6
-rw-r--r--sound/usb/caiaq/input.c248
-rw-r--r--sound/usb/card.c31
-rw-r--r--sound/usb/card.h2
-rw-r--r--sound/usb/endpoint.c2
-rw-r--r--sound/usb/helper.c17
-rw-r--r--sound/usb/midi.c16
-rw-r--r--sound/usb/mixer.c9
-rw-r--r--sound/usb/mixer_quirks.c14
-rw-r--r--sound/usb/pcm.c10
-rw-r--r--sound/usb/proc.c7
-rw-r--r--sound/usb/quirks-table.h184
-rw-r--r--sound/usb/quirks.c2
-rw-r--r--sound/usb/urb.c172
-rw-r--r--sound/usb/usbaudio.h2
-rw-r--r--sound/usb/usx2y/usx2yhwdeppcm.c6
-rw-r--r--tools/perf/Documentation/perf-list.txt17
-rw-r--r--tools/perf/Documentation/perf-probe.txt18
-rw-r--r--tools/perf/Documentation/perf-record.txt4
-rw-r--r--tools/perf/Documentation/perf-trace.txt57
-rw-r--r--tools/perf/builtin-probe.c78
-rw-r--r--tools/perf/builtin-record.c39
-rw-r--r--tools/perf/builtin-top.c12
-rw-r--r--tools/perf/builtin-trace.c216
-rw-r--r--tools/perf/scripts/perl/bin/failed-syscalls-record2
-rw-r--r--tools/perf/scripts/perl/bin/failed-syscalls-report2
-rw-r--r--tools/perf/scripts/perl/bin/rw-by-file-record2
-rw-r--r--tools/perf/scripts/perl/bin/rw-by-file-report2
-rw-r--r--tools/perf/scripts/perl/bin/rw-by-pid-record2
-rw-r--r--tools/perf/scripts/perl/bin/rw-by-pid-report2
-rw-r--r--tools/perf/scripts/perl/bin/rwtop-record2
-rw-r--r--tools/perf/scripts/perl/bin/rwtop-report2
-rw-r--r--tools/perf/scripts/perl/bin/wakeup-latency-record2
-rw-r--r--tools/perf/scripts/perl/bin/wakeup-latency-report2
-rw-r--r--tools/perf/scripts/perl/bin/workqueue-stats-record2
-rw-r--r--tools/perf/scripts/perl/bin/workqueue-stats-report2
-rw-r--r--tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/Util.py58
-rw-r--r--tools/perf/scripts/python/bin/failed-syscalls-by-pid-record2
-rw-r--r--tools/perf/scripts/python/bin/failed-syscalls-by-pid-report2
-rw-r--r--tools/perf/scripts/python/bin/futex-contention-record2
-rw-r--r--tools/perf/scripts/python/bin/futex-contention-report4
-rw-r--r--tools/perf/scripts/python/bin/netdev-times-record2
-rw-r--r--tools/perf/scripts/python/bin/netdev-times-report2
-rw-r--r--tools/perf/scripts/python/bin/sched-migration-record2
-rw-r--r--tools/perf/scripts/python/bin/sched-migration-report2
-rw-r--r--tools/perf/scripts/python/bin/sctop-record2
-rw-r--r--tools/perf/scripts/python/bin/sctop-report2
-rw-r--r--tools/perf/scripts/python/bin/syscall-counts-by-pid-record2
-rw-r--r--tools/perf/scripts/python/bin/syscall-counts-by-pid-report2
-rw-r--r--tools/perf/scripts/python/bin/syscall-counts-record2
-rw-r--r--tools/perf/scripts/python/bin/syscall-counts-report2
-rw-r--r--tools/perf/scripts/python/failed-syscalls-by-pid.py21
-rw-r--r--tools/perf/scripts/python/futex-contention.py50
-rw-r--r--tools/perf/scripts/python/sctop.py9
-rw-r--r--tools/perf/scripts/python/syscall-counts-by-pid.py21
-rw-r--r--tools/perf/scripts/python/syscall-counts.py5
-rw-r--r--tools/perf/util/debug.c4
-rw-r--r--tools/perf/util/debug.h2
-rw-r--r--tools/perf/util/header.c11
-rw-r--r--tools/perf/util/map.h10
-rw-r--r--tools/perf/util/probe-event.c189
-rw-r--r--tools/perf/util/probe-event.h16
-rw-r--r--tools/perf/util/probe-finder.c645
-rw-r--r--tools/perf/util/probe-finder.h31
-rw-r--r--tools/perf/util/symbol.c63
-rw-r--r--tools/perf/util/ui/browser.c1
-rw-r--r--tools/perf/util/ui/util.c5
-rw-r--r--usr/Makefile6
-rw-r--r--usr/initramfs_data.S22
-rw-r--r--usr/initramfs_data.bz2.S29
-rw-r--r--usr/initramfs_data.gz.S29
-rw-r--r--usr/initramfs_data.lzma.S29
-rw-r--r--usr/initramfs_data.lzo.S29
6828 files changed, 694481 insertions, 331450 deletions
diff --git a/Documentation/ABI/obsolete/dv1394 b/Documentation/ABI/obsolete/dv1394
deleted file mode 100644
index 2ee36864ca10..000000000000
--- a/Documentation/ABI/obsolete/dv1394
+++ /dev/null
@@ -1,9 +0,0 @@
-What: dv1394 (a.k.a. "OHCI-DV I/O support" for FireWire)
-Contact: linux1394-devel@lists.sourceforge.net
-Description:
- New application development should use raw1394 + userspace libraries
- instead, notably libiec61883 which is functionally equivalent.
-
-Users:
- ffmpeg/libavformat (used by a variety of media players)
- dvgrab v1.x (replaced by dvgrab2 on top of raw1394 and resp. libraries)
diff --git a/Documentation/ABI/obsolete/proc-pid-oom_adj b/Documentation/ABI/obsolete/proc-pid-oom_adj
new file mode 100644
index 000000000000..cf63f264ce0f
--- /dev/null
+++ b/Documentation/ABI/obsolete/proc-pid-oom_adj
@@ -0,0 +1,22 @@
+What: /proc/<pid>/oom_adj
+When: August 2012
+Why: /proc/<pid>/oom_adj allows userspace to influence the oom killer's
+ badness heuristic used to determine which task to kill when the kernel
+ is out of memory.
+
+ The badness heuristic has since been rewritten since the introduction of
+ this tunable such that its meaning is deprecated. The value was
+ implemented as a bitshift on a score generated by the badness()
+ function that did not have any precise units of measure. With the
+ rewrite, the score is given as a proportion of available memory to the
+ task allocating pages, so using a bitshift which grows the score
+ exponentially is, thus, impossible to tune with fine granularity.
+
+ A much more powerful interface, /proc/<pid>/oom_score_adj, was
+ introduced with the oom killer rewrite that allows users to increase or
+ decrease the badness() score linearly. This interface will replace
+ /proc/<pid>/oom_adj.
+
+ A warning will be emitted to the kernel log if an application uses this
+ deprecated interface. After it is printed once, future warnings will be
+ suppressed until the kernel is rebooted.
diff --git a/Documentation/ABI/removed/dv1394 b/Documentation/ABI/removed/dv1394
new file mode 100644
index 000000000000..c2310b6676f4
--- /dev/null
+++ b/Documentation/ABI/removed/dv1394
@@ -0,0 +1,14 @@
+What: dv1394 (a.k.a. "OHCI-DV I/O support" for FireWire)
+Date: May 2010 (scheduled), finally removed in kernel v2.6.37
+Contact: linux1394-devel@lists.sourceforge.net
+Description:
+ /dev/dv1394/* were character device files, one for each FireWire
+ controller and for NTSC and PAL respectively, from which DV data
+ could be received by read() or transmitted by write(). A few
+ ioctl()s allowed limited control.
+ This special-purpose interface has been superseded by libraw1394 +
+ libiec61883 which are functionally equivalent, support HDV, and
+ transparently work on top of the newer firewire kernel drivers.
+
+Users:
+ ffmpeg/libavformat (if configured for DV1394)
diff --git a/Documentation/ABI/removed/raw1394 b/Documentation/ABI/removed/raw1394
new file mode 100644
index 000000000000..490aa1efc4ae
--- /dev/null
+++ b/Documentation/ABI/removed/raw1394
@@ -0,0 +1,15 @@
+What: raw1394 (a.k.a. "Raw IEEE1394 I/O support" for FireWire)
+Date: May 2010 (scheduled), finally removed in kernel v2.6.37
+Contact: linux1394-devel@lists.sourceforge.net
+Description:
+ /dev/raw1394 was a character device file that allowed low-level
+ access to FireWire buses. Its major drawbacks were its inability
+ to implement sensible device security policies, and its low level
+ of abstraction that required userspace clients do duplicate much
+ of the kernel's ieee1394 core functionality.
+ Replaced by /dev/fw*, i.e. the <linux/firewire-cdev.h> ABI of
+ firewire-core.
+
+Users:
+ libraw1394 (works with firewire-cdev too, transparent to library ABI
+ users)
diff --git a/Documentation/ABI/removed/raw1394_legacy_isochronous b/Documentation/ABI/removed/raw1394_legacy_isochronous
deleted file mode 100644
index 1b629622d883..000000000000
--- a/Documentation/ABI/removed/raw1394_legacy_isochronous
+++ /dev/null
@@ -1,16 +0,0 @@
-What: legacy isochronous ABI of raw1394 (1st generation iso ABI)
-Date: June 2007 (scheduled), removed in kernel v2.6.23
-Contact: linux1394-devel@lists.sourceforge.net
-Description:
- The two request types RAW1394_REQ_ISO_SEND, RAW1394_REQ_ISO_LISTEN have
- been deprecated for quite some time. They are very inefficient as they
- come with high interrupt load and several layers of callbacks for each
- packet. Because of these deficiencies, the video1394 and dv1394 drivers
- and the 3rd-generation isochronous ABI in raw1394 (rawiso) were created.
-
-Users:
- libraw1394 users via the long deprecated API raw1394_iso_write,
- raw1394_start_iso_write, raw1394_start_iso_rcv, raw1394_stop_iso_rcv
-
- libdc1394, which optionally uses these old libraw1394 calls
- alternatively to the more efficient video1394 ABI
diff --git a/Documentation/ABI/removed/video1394 b/Documentation/ABI/removed/video1394
new file mode 100644
index 000000000000..c39c25aee77b
--- /dev/null
+++ b/Documentation/ABI/removed/video1394
@@ -0,0 +1,16 @@
+What: video1394 (a.k.a. "OHCI-1394 Video support" for FireWire)
+Date: May 2010 (scheduled), finally removed in kernel v2.6.37
+Contact: linux1394-devel@lists.sourceforge.net
+Description:
+ /dev/video1394/* were character device files, one for each FireWire
+ controller, which were used for isochronous I/O. It was added as an
+ alternative to raw1394's isochronous I/O functionality which had
+ performance issues in its first generation. Any video1394 user had
+ to use raw1394 + libraw1394 too because video1394 did not provide
+ asynchronous I/O for device discovery and configuration.
+ Replaced by /dev/fw*, i.e. the <linux/firewire-cdev.h> ABI of
+ firewire-core.
+
+Users:
+ libdc1394 (works with firewire-cdev too, transparent to library ABI
+ users)
diff --git a/Documentation/ABI/testing/sysfs-block-zram b/Documentation/ABI/testing/sysfs-block-zram
new file mode 100644
index 000000000000..c8b3b48ec62c
--- /dev/null
+++ b/Documentation/ABI/testing/sysfs-block-zram
@@ -0,0 +1,99 @@
+What: /sys/block/zram<id>/disksize
+Date: August 2010
+Contact: Nitin Gupta <ngupta@vflare.org>
+Description:
+ The disksize file is read-write and specifies the disk size
+ which represents the limit on the *uncompressed* worth of data
+ that can be stored in this disk.
+
+What: /sys/block/zram<id>/initstate
+Date: August 2010
+Contact: Nitin Gupta <ngupta@vflare.org>
+Description:
+ The disksize file is read-only and shows the initialization
+ state of the device.
+
+What: /sys/block/zram<id>/reset
+Date: August 2010
+Contact: Nitin Gupta <ngupta@vflare.org>
+Description:
+ The disksize file is write-only and allows resetting the
+ device. The reset operation frees all the memory assocaited
+ with this device.
+
+What: /sys/block/zram<id>/num_reads
+Date: August 2010
+Contact: Nitin Gupta <ngupta@vflare.org>
+Description:
+ The num_reads file is read-only and specifies the number of
+ reads (failed or successful) done on this device.
+
+What: /sys/block/zram<id>/num_writes
+Date: August 2010
+Contact: Nitin Gupta <ngupta@vflare.org>
+Description:
+ The num_writes file is read-only and specifies the number of
+ writes (failed or successful) done on this device.
+
+What: /sys/block/zram<id>/invalid_io
+Date: August 2010
+Contact: Nitin Gupta <ngupta@vflare.org>
+Description:
+ The invalid_io file is read-only and specifies the number of
+ non-page-size-aligned I/O requests issued to this device.
+
+What: /sys/block/zram<id>/notify_free
+Date: August 2010
+Contact: Nitin Gupta <ngupta@vflare.org>
+Description:
+ The notify_free file is read-only and specifies the number of
+ swap slot free notifications received by this device. These
+ notifications are send to a swap block device when a swap slot
+ is freed. This statistic is applicable only when this disk is
+ being used as a swap disk.
+
+What: /sys/block/zram<id>/discard
+Date: August 2010
+Contact: Nitin Gupta <ngupta@vflare.org>
+Description:
+ The discard file is read-only and specifies the number of
+ discard requests received by this device. These requests
+ provide information to block device regarding blocks which are
+ no longer used by filesystem.
+
+What: /sys/block/zram<id>/zero_pages
+Date: August 2010
+Contact: Nitin Gupta <ngupta@vflare.org>
+Description:
+ The zero_pages file is read-only and specifies number of zero
+ filled pages written to this disk. No memory is allocated for
+ such pages.
+
+What: /sys/block/zram<id>/orig_data_size
+Date: August 2010
+Contact: Nitin Gupta <ngupta@vflare.org>
+Description:
+ The orig_data_size file is read-only and specifies uncompressed
+ size of data stored in this disk. This excludes zero-filled
+ pages (zero_pages) since no memory is allocated for them.
+ Unit: bytes
+
+What: /sys/block/zram<id>/compr_data_size
+Date: August 2010
+Contact: Nitin Gupta <ngupta@vflare.org>
+Description:
+ The compr_data_size file is read-only and specifies compressed
+ size of data stored in this disk. So, compression ratio can be
+ calculated using orig_data_size and this statistic.
+ Unit: bytes
+
+What: /sys/block/zram<id>/mem_used_total
+Date: August 2010
+Contact: Nitin Gupta <ngupta@vflare.org>
+Description:
+ The mem_used_total file is read-only and specifies the amount
+ of memory, including allocator fragmentation and metadata
+ overhead, allocated for this disk. So, allocator space
+ efficiency can be calculated using compr_data_size and this
+ statistic.
+ Unit: bytes \ No newline at end of file
diff --git a/Documentation/ABI/testing/sysfs-bus-rbd b/Documentation/ABI/testing/sysfs-bus-rbd
new file mode 100644
index 000000000000..90a87e2a572b
--- /dev/null
+++ b/Documentation/ABI/testing/sysfs-bus-rbd
@@ -0,0 +1,83 @@
+What: /sys/bus/rbd/
+Date: November 2010
+Contact: Yehuda Sadeh <yehuda@hq.newdream.net>,
+ Sage Weil <sage@newdream.net>
+Description:
+
+Being used for adding and removing rbd block devices.
+
+Usage: <mon ip addr> <options> <pool name> <rbd image name> [snap name]
+
+ $ echo "192.168.0.1 name=admin rbd foo" > /sys/bus/rbd/add
+
+The snapshot name can be "-" or omitted to map the image read/write. A <dev-id>
+will be assigned for any registered block device. If snapshot is used, it will
+be mapped read-only.
+
+Removal of a device:
+
+ $ echo <dev-id> > /sys/bus/rbd/remove
+
+Entries under /sys/bus/rbd/devices/<dev-id>/
+--------------------------------------------
+
+client_id
+
+ The ceph unique client id that was assigned for this specific session.
+
+major
+
+ The block device major number.
+
+name
+
+ The name of the rbd image.
+
+pool
+
+ The pool where this rbd image resides. The pool-name pair is unique
+ per rados system.
+
+size
+
+ The size (in bytes) of the mapped block device.
+
+refresh
+
+ Writing to this file will reread the image header data and set
+ all relevant datastructures accordingly.
+
+current_snap
+
+ The current snapshot for which the device is mapped.
+
+create_snap
+
+ Create a snapshot:
+
+ $ echo <snap-name> > /sys/bus/rbd/devices/<dev-id>/snap_create
+
+rollback_snap
+
+ Rolls back data to the specified snapshot. This goes over the entire
+ list of rados blocks and sends a rollback command to each.
+
+ $ echo <snap-name> > /sys/bus/rbd/devices/<dev-id>/snap_rollback
+
+snap_*
+
+ A directory per each snapshot
+
+
+Entries under /sys/bus/rbd/devices/<dev-id>/snap_<snap-name>
+-------------------------------------------------------------
+
+id
+
+ The rados internal snapshot id assigned for this snapshot
+
+size
+
+ The size of the image when this snapshot was taken.
+
+
diff --git a/Documentation/ABI/testing/sysfs-devices-system-ibm-rtl b/Documentation/ABI/testing/sysfs-devices-system-ibm-rtl
new file mode 100644
index 000000000000..b82deeaec314
--- /dev/null
+++ b/Documentation/ABI/testing/sysfs-devices-system-ibm-rtl
@@ -0,0 +1,22 @@
+What: state
+Date: Sep 2010
+KernelVersion: 2.6.37
+Contact: Vernon Mauery <vernux@us.ibm.com>
+Description: The state file allows a means by which to change in and
+ out of Premium Real-Time Mode (PRTM), as well as the
+ ability to query the current state.
+ 0 => PRTM off
+ 1 => PRTM enabled
+Users: The ibm-prtm userspace daemon uses this interface.
+
+
+What: version
+Date: Sep 2010
+KernelVersion: 2.6.37
+Contact: Vernon Mauery <vernux@us.ibm.com>
+Description: The version file provides a means by which to query
+ the RTL table version that lives in the Extended
+ BIOS Data Area (EBDA).
+Users: The ibm-prtm userspace daemon uses this interface.
+
+
diff --git a/Documentation/ABI/testing/sysfs-platform-asus-laptop b/Documentation/ABI/testing/sysfs-platform-asus-laptop
index 1d775390e856..41ff8ae4dee0 100644
--- a/Documentation/ABI/testing/sysfs-platform-asus-laptop
+++ b/Documentation/ABI/testing/sysfs-platform-asus-laptop
@@ -47,6 +47,20 @@ Date: January 2007
KernelVersion: 2.6.20
Contact: "Corentin Chary" <corentincj@iksaif.net>
Description:
- Control the bluetooth device. 1 means on, 0 means off.
+ Control the wlan device. 1 means on, 0 means off.
This may control the led, the device or both.
Users: Lapsus
+
+What: /sys/devices/platform/asus_laptop/wimax
+Date: October 2010
+KernelVersion: 2.6.37
+Contact: "Corentin Chary" <corentincj@iksaif.net>
+Description:
+ Control the wimax device. 1 means on, 0 means off.
+
+What: /sys/devices/platform/asus_laptop/wwan
+Date: October 2010
+KernelVersion: 2.6.37
+Contact: "Corentin Chary" <corentincj@iksaif.net>
+Description:
+ Control the wwan (3G) device. 1 means on, 0 means off.
diff --git a/Documentation/ABI/testing/sysfs-platform-eeepc-wmi b/Documentation/ABI/testing/sysfs-platform-eeepc-wmi
new file mode 100644
index 000000000000..e4b5fef5fadd
--- /dev/null
+++ b/Documentation/ABI/testing/sysfs-platform-eeepc-wmi
@@ -0,0 +1,10 @@
+What: /sys/devices/platform/eeepc-wmi/cpufv
+Date: Oct 2010
+KernelVersion: 2.6.37
+Contact: "Corentin Chary" <corentincj@iksaif.net>
+Description:
+ Change CPU clock configuration (write-only).
+ There are three available clock configuration:
+ * 0 -> Super Performance Mode
+ * 1 -> High Performance Mode
+ * 2 -> Power Saving Mode
diff --git a/Documentation/DocBook/device-drivers.tmpl b/Documentation/DocBook/device-drivers.tmpl
index feca0758391e..22edcbb9ddaf 100644
--- a/Documentation/DocBook/device-drivers.tmpl
+++ b/Documentation/DocBook/device-drivers.tmpl
@@ -51,8 +51,13 @@
<sect1><title>Delaying, scheduling, and timer routines</title>
!Iinclude/linux/sched.h
!Ekernel/sched.c
+!Iinclude/linux/completion.h
!Ekernel/timer.c
</sect1>
+ <sect1><title>Wait queues and Wake events</title>
+!Iinclude/linux/wait.h
+!Ekernel/wait.c
+ </sect1>
<sect1><title>High-resolution timers</title>
!Iinclude/linux/ktime.h
!Iinclude/linux/hrtimer.h
diff --git a/Documentation/DocBook/kernel-api.tmpl b/Documentation/DocBook/kernel-api.tmpl
index 6b4e07f28b69..7160652a8736 100644
--- a/Documentation/DocBook/kernel-api.tmpl
+++ b/Documentation/DocBook/kernel-api.tmpl
@@ -93,6 +93,12 @@ X!Ilib/string.c
!Elib/crc32.c
!Elib/crc-ccitt.c
</sect1>
+
+ <sect1 id="idr"><title>idr/ida Functions</title>
+!Pinclude/linux/idr.h idr sync
+!Plib/idr.c IDA description
+!Elib/idr.c
+ </sect1>
</chapter>
<chapter id="mm">
diff --git a/Documentation/DocBook/kgdb.tmpl b/Documentation/DocBook/kgdb.tmpl
index 490d862c5f0d..d71b57fcf116 100644
--- a/Documentation/DocBook/kgdb.tmpl
+++ b/Documentation/DocBook/kgdb.tmpl
@@ -710,7 +710,18 @@ Task Addr Pid Parent [*] cpu State Thread Command
<listitem><para>A simple shell</para></listitem>
<listitem><para>The kdb core command set</para></listitem>
<listitem><para>A registration API to register additional kdb shell commands.</para>
- <para>A good example of a self-contained kdb module is the "ftdump" command for dumping the ftrace buffer. See: kernel/trace/trace_kdb.c</para></listitem>
+ <itemizedlist>
+ <listitem><para>A good example of a self-contained kdb module
+ is the "ftdump" command for dumping the ftrace buffer. See:
+ kernel/trace/trace_kdb.c</para></listitem>
+ <listitem><para>For an example of how to dynamically register
+ a new kdb command you can build the kdb_hello.ko kernel module
+ from samples/kdb/kdb_hello.c. To build this example you can
+ set CONFIG_SAMPLES=y and CONFIG_SAMPLE_KDB=m in your kernel
+ config. Later run "modprobe kdb_hello" and the next time you
+ enter the kdb shell, you can run the "hello"
+ command.</para></listitem>
+ </itemizedlist></listitem>
<listitem><para>The implementation for kdb_printf() which
emits messages directly to I/O drivers, bypassing the kernel
log.</para></listitem>
diff --git a/Documentation/DocBook/media-entities.tmpl b/Documentation/DocBook/media-entities.tmpl
index 6ae97157b1c7..be34dcbe0d90 100644
--- a/Documentation/DocBook/media-entities.tmpl
+++ b/Documentation/DocBook/media-entities.tmpl
@@ -250,6 +250,9 @@
<!ENTITY sub-yuv422p SYSTEM "v4l/pixfmt-yuv422p.xml">
<!ENTITY sub-yuyv SYSTEM "v4l/pixfmt-yuyv.xml">
<!ENTITY sub-yvyu SYSTEM "v4l/pixfmt-yvyu.xml">
+<!ENTITY sub-srggb10 SYSTEM "v4l/pixfmt-srggb10.xml">
+<!ENTITY sub-srggb8 SYSTEM "v4l/pixfmt-srggb8.xml">
+<!ENTITY sub-y10 SYSTEM "v4l/pixfmt-y10.xml">
<!ENTITY sub-pixfmt SYSTEM "v4l/pixfmt.xml">
<!ENTITY sub-cropcap SYSTEM "v4l/vidioc-cropcap.xml">
<!ENTITY sub-dbg-g-register SYSTEM "v4l/vidioc-dbg-g-register.xml">
@@ -347,6 +350,9 @@
<!ENTITY yuv422p SYSTEM "v4l/pixfmt-yuv422p.xml">
<!ENTITY yuyv SYSTEM "v4l/pixfmt-yuyv.xml">
<!ENTITY yvyu SYSTEM "v4l/pixfmt-yvyu.xml">
+<!ENTITY srggb10 SYSTEM "v4l/pixfmt-srggb10.xml">
+<!ENTITY srggb8 SYSTEM "v4l/pixfmt-srggb8.xml">
+<!ENTITY y10 SYSTEM "v4l/pixfmt-y10.xml">
<!ENTITY cropcap SYSTEM "v4l/vidioc-cropcap.xml">
<!ENTITY dbg-g-register SYSTEM "v4l/vidioc-dbg-g-register.xml">
<!ENTITY encoder-cmd SYSTEM "v4l/vidioc-encoder-cmd.xml">
diff --git a/Documentation/DocBook/sh.tmpl b/Documentation/DocBook/sh.tmpl
index d858d92cf6d9..4a38f604fa66 100644
--- a/Documentation/DocBook/sh.tmpl
+++ b/Documentation/DocBook/sh.tmpl
@@ -79,10 +79,6 @@
</sect2>
</sect1>
</chapter>
- <chapter id="clk">
- <title>Clock Framework Extensions</title>
-!Iinclude/linux/sh_clk.h
- </chapter>
<chapter id="mach">
<title>Machine Specific Interfaces</title>
<sect1 id="dreamcast">
diff --git a/Documentation/DocBook/uio-howto.tmpl b/Documentation/DocBook/uio-howto.tmpl
index 4d4ce0e61e42..b4665b9c40b0 100644
--- a/Documentation/DocBook/uio-howto.tmpl
+++ b/Documentation/DocBook/uio-howto.tmpl
@@ -16,7 +16,7 @@
</orgname>
<address>
- <email>hjk@linutronix.de</email>
+ <email>hjk@hansjkoch.de</email>
</address>
</affiliation>
</author>
@@ -114,7 +114,7 @@ GPL version 2.
<para>If you know of any translations for this document, or you are
interested in translating it, please email me
-<email>hjk@linutronix.de</email>.
+<email>hjk@hansjkoch.de</email>.
</para>
</sect1>
@@ -171,7 +171,7 @@ interested in translating it, please email me
<title>Feedback</title>
<para>Find something wrong with this document? (Or perhaps something
right?) I would love to hear from you. Please email me at
- <email>hjk@linutronix.de</email>.</para>
+ <email>hjk@hansjkoch.de</email>.</para>
</sect1>
</chapter>
diff --git a/Documentation/DocBook/v4l/compat.xml b/Documentation/DocBook/v4l/compat.xml
index 54447f0d0784..c9ce61d981f5 100644
--- a/Documentation/DocBook/v4l/compat.xml
+++ b/Documentation/DocBook/v4l/compat.xml
@@ -21,11 +21,15 @@ API.</para>
<title>Opening and Closing Devices</title>
<para>For compatibility reasons the character device file names
-recommended for V4L2 video capture, overlay, radio, teletext and raw
+recommended for V4L2 video capture, overlay, radio and raw
vbi capture devices did not change from those used by V4L. They are
listed in <xref linkend="devices" /> and below in <xref
linkend="v4l-dev" />.</para>
+ <para>The teletext devices (minor range 192-223) have been removed in
+V4L2 and no longer exist. There is no hardware available anymore for handling
+pure teletext. Instead raw or sliced VBI is used.</para>
+
<para>The V4L <filename>videodev</filename> module automatically
assigns minor numbers to drivers in load order, depending on the
registered device type. We recommend that V4L2 drivers by default
@@ -66,13 +70,6 @@ not compatible with V4L or V4L2.</para> </footnote>,
<entry>64-127</entry>
</row>
<row>
- <entry>Teletext decoder</entry>
- <entry><para><filename>/dev/vtx</filename>,
-<filename>/dev/vtx0</filename> to
-<filename>/dev/vtx31</filename></para></entry>
- <entry>192-223</entry>
- </row>
- <row>
<entry>Raw VBI capture</entry>
<entry><para><filename>/dev/vbi</filename>,
<filename>/dev/vbi0</filename> to
@@ -2345,6 +2342,17 @@ more information.</para>
</listitem>
</orderedlist>
</section>
+ <section>
+ <title>V4L2 in Linux 2.6.37</title>
+ <orderedlist>
+ <listitem>
+ <para>Remove the vtx (videotext/teletext) API. This API was no longer
+used and no hardware exists to verify the API. Nor were any userspace applications found
+that used it. It was originally scheduled for removal in 2.6.35.
+ </para>
+ </listitem>
+ </orderedlist>
+ </section>
<section id="other">
<title>Relation of V4L2 to other Linux multimedia APIs</title>
diff --git a/Documentation/DocBook/v4l/controls.xml b/Documentation/DocBook/v4l/controls.xml
index 8408caaee276..2fae3e87ce73 100644
--- a/Documentation/DocBook/v4l/controls.xml
+++ b/Documentation/DocBook/v4l/controls.xml
@@ -312,10 +312,17 @@ minimum value disables backlight compensation.</entry>
information and bits 24-31 must be zero.</entry>
</row>
<row>
+ <entry><constant>V4L2_CID_ILLUMINATORS_1</constant>
+ <constant>V4L2_CID_ILLUMINATORS_2</constant></entry>
+ <entry>boolean</entry>
+ <entry>Switch on or off the illuminator 1 or 2 of the device
+ (usually a microscope).</entry>
+ </row>
+ <row>
<entry><constant>V4L2_CID_LASTP1</constant></entry>
<entry></entry>
<entry>End of the predefined control IDs (currently
-<constant>V4L2_CID_BG_COLOR</constant> + 1).</entry>
+<constant>V4L2_CID_ILLUMINATORS_2</constant> + 1).</entry>
</row>
<row>
<entry><constant>V4L2_CID_PRIVATE_BASE</constant></entry>
@@ -357,9 +364,6 @@ enumerate_menu (void)
querymenu.index++) {
if (0 == ioctl (fd, &VIDIOC-QUERYMENU;, &amp;querymenu)) {
printf (" %s\n", querymenu.name);
- } else {
- perror ("VIDIOC_QUERYMENU");
- exit (EXIT_FAILURE);
}
}
}
diff --git a/Documentation/DocBook/v4l/dev-rds.xml b/Documentation/DocBook/v4l/dev-rds.xml
index 0869d701b1e5..360d2737e649 100644
--- a/Documentation/DocBook/v4l/dev-rds.xml
+++ b/Documentation/DocBook/v4l/dev-rds.xml
@@ -3,15 +3,16 @@
<para>The Radio Data System transmits supplementary
information in binary format, for example the station name or travel
information, on an inaudible audio subcarrier of a radio program. This
-interface is aimed at devices capable of receiving and decoding RDS
+interface is aimed at devices capable of receiving and/or transmitting RDS
information.</para>
<para>For more information see the core RDS standard <xref linkend="en50067" />
and the RBDS standard <xref linkend="nrsc4" />.</para>
<para>Note that the RBDS standard as is used in the USA is almost identical
-to the RDS standard. Any RDS decoder can also handle RBDS. Only some of the fields
-have slightly different meanings. See the RBDS standard for more information.</para>
+to the RDS standard. Any RDS decoder/encoder can also handle RBDS. Only some of the
+fields have slightly different meanings. See the RBDS standard for more
+information.</para>
<para>The RBDS standard also specifies support for MMBS (Modified Mobile Search).
This is a proprietary format which seems to be discontinued. The RDS interface does not
@@ -21,16 +22,25 @@ be needed, then please contact the linux-media mailing list: &v4l-ml;.</para>
<section>
<title>Querying Capabilities</title>
- <para>Devices supporting the RDS capturing API
-set the <constant>V4L2_CAP_RDS_CAPTURE</constant> flag in
+ <para>Devices supporting the RDS capturing API set
+the <constant>V4L2_CAP_RDS_CAPTURE</constant> flag in
the <structfield>capabilities</structfield> field of &v4l2-capability;
-returned by the &VIDIOC-QUERYCAP; ioctl.
-Any tuner that supports RDS will set the
-<constant>V4L2_TUNER_CAP_RDS</constant> flag in the <structfield>capability</structfield>
-field of &v4l2-tuner;.
-Whether an RDS signal is present can be detected by looking at
-the <structfield>rxsubchans</structfield> field of &v4l2-tuner;: the
-<constant>V4L2_TUNER_SUB_RDS</constant> will be set if RDS data was detected.</para>
+returned by the &VIDIOC-QUERYCAP; ioctl. Any tuner that supports RDS
+will set the <constant>V4L2_TUNER_CAP_RDS</constant> flag in
+the <structfield>capability</structfield> field of &v4l2-tuner;. If
+the driver only passes RDS blocks without interpreting the data
+the <constant>V4L2_TUNER_SUB_RDS_BLOCK_IO</constant> flag has to be
+set, see <link linkend="reading-rds-data">Reading RDS data</link>.
+For future use the
+flag <constant>V4L2_TUNER_SUB_RDS_CONTROLS</constant> has also been
+defined. However, a driver for a radio tuner with this capability does
+not yet exist, so if you are planning to write such a driver you
+should discuss this on the linux-media mailing list: &v4l-ml;.</para>
+
+ <para> Whether an RDS signal is present can be detected by looking
+at the <structfield>rxsubchans</structfield> field of &v4l2-tuner;:
+the <constant>V4L2_TUNER_SUB_RDS</constant> will be set if RDS data
+was detected.</para>
<para>Devices supporting the RDS output API
set the <constant>V4L2_CAP_RDS_OUTPUT</constant> flag in
@@ -40,16 +50,31 @@ Any modulator that supports RDS will set the
<constant>V4L2_TUNER_CAP_RDS</constant> flag in the <structfield>capability</structfield>
field of &v4l2-modulator;.
In order to enable the RDS transmission one must set the <constant>V4L2_TUNER_SUB_RDS</constant>
-bit in the <structfield>txsubchans</structfield> field of &v4l2-modulator;.</para>
-
+bit in the <structfield>txsubchans</structfield> field of &v4l2-modulator;.
+If the driver only passes RDS blocks without interpreting the data
+the <constant>V4L2_TUNER_SUB_RDS_BLOCK_IO</constant> flag has to be set. If the
+tuner is capable of handling RDS entities like program identification codes and radio
+text, the flag <constant>V4L2_TUNER_SUB_RDS_CONTROLS</constant> should be set,
+see <link linkend="writing-rds-data">Writing RDS data</link> and
+<link linkend="fm-tx-controls">FM Transmitter Control Reference</link>.</para>
</section>
- <section>
+ <section id="reading-rds-data">
<title>Reading RDS data</title>
<para>RDS data can be read from the radio device
-with the &func-read; function. The data is packed in groups of three bytes,
+with the &func-read; function. The data is packed in groups of three bytes.</para>
+ </section>
+
+ <section id="writing-rds-data">
+ <title>Writing RDS data</title>
+
+ <para>RDS data can be written to the radio device
+with the &func-write; function. The data is packed in groups of three bytes,
as follows:</para>
+ </section>
+
+ <section>
<table frame="none" pgwide="1" id="v4l2-rds-data">
<title>struct
<structname>v4l2_rds_data</structname></title>
@@ -111,48 +136,57 @@ as follows:</para>
<tbody valign="top">
<row>
<entry>V4L2_RDS_BLOCK_MSK</entry>
+ <entry> </entry>
<entry>7</entry>
<entry>Mask for bits 0-2 to get the block ID.</entry>
</row>
<row>
<entry>V4L2_RDS_BLOCK_A</entry>
+ <entry> </entry>
<entry>0</entry>
<entry>Block A.</entry>
</row>
<row>
<entry>V4L2_RDS_BLOCK_B</entry>
+ <entry> </entry>
<entry>1</entry>
<entry>Block B.</entry>
</row>
<row>
<entry>V4L2_RDS_BLOCK_C</entry>
+ <entry> </entry>
<entry>2</entry>
<entry>Block C.</entry>
</row>
<row>
<entry>V4L2_RDS_BLOCK_D</entry>
+ <entry> </entry>
<entry>3</entry>
<entry>Block D.</entry>
</row>
<row>
<entry>V4L2_RDS_BLOCK_C_ALT</entry>
+ <entry> </entry>
<entry>4</entry>
<entry>Block C'.</entry>
</row>
<row>
<entry>V4L2_RDS_BLOCK_INVALID</entry>
+ <entry>read-only</entry>
<entry>7</entry>
<entry>An invalid block.</entry>
</row>
<row>
<entry>V4L2_RDS_BLOCK_CORRECTED</entry>
+ <entry>read-only</entry>
<entry>0x40</entry>
<entry>A bit error was detected but corrected.</entry>
</row>
<row>
<entry>V4L2_RDS_BLOCK_ERROR</entry>
+ <entry>read-only</entry>
<entry>0x80</entry>
- <entry>An incorrectable error occurred.</entry>
+ <entry>An uncorrectable error occurred.</entry>
</row>
</tbody>
</tgroup>
diff --git a/Documentation/DocBook/v4l/dev-teletext.xml b/Documentation/DocBook/v4l/dev-teletext.xml
index 76184e8ed618..414b1cfff9f4 100644
--- a/Documentation/DocBook/v4l/dev-teletext.xml
+++ b/Documentation/DocBook/v4l/dev-teletext.xml
@@ -1,35 +1,32 @@
<title>Teletext Interface</title>
- <para>This interface aims at devices receiving and demodulating
+ <para>This interface was aimed at devices receiving and demodulating
Teletext data [<xref linkend="ets300706" />, <xref linkend="itu653" />], evaluating the
Teletext packages and storing formatted pages in cache memory. Such
devices are usually implemented as microcontrollers with serial
-interface (I<superscript>2</superscript>C) and can be found on older
+interface (I<superscript>2</superscript>C) and could be found on old
TV cards, dedicated Teletext decoding cards and home-brew devices
connected to the PC parallel port.</para>
- <para>The Teletext API was designed by Martin Buck. It is defined in
+ <para>The Teletext API was designed by Martin Buck. It was defined in
the kernel header file <filename>linux/videotext.h</filename>, the
specification is available from <ulink url="ftp://ftp.gwdg.de/pub/linux/misc/videotext/">
ftp://ftp.gwdg.de/pub/linux/misc/videotext/</ulink>. (Videotext is the name of
-the German public television Teletext service.) Conventional character
-device file names are <filename>/dev/vtx</filename> and
-<filename>/dev/vttuner</filename>, with device number 83, 0 and 83, 16
-respectively. A similar interface exists for the Philips SAA5249
-Teletext decoder [specification?] with character device file names
-<filename>/dev/tlkN</filename>, device number 102, N.</para>
+the German public television Teletext service.)</para>
<para>Eventually the Teletext API was integrated into the V4L API
with character device file names <filename>/dev/vtx0</filename> to
<filename>/dev/vtx31</filename>, device major number 81, minor numbers
-192 to 223. For reference the V4L Teletext API specification is
-reproduced here in full: "Teletext interfaces talk the existing VTX
-API." Teletext devices with major number 83 and 102 will be removed in
-Linux 2.6.</para>
+192 to 223.</para>
- <para>There are no plans to replace the Teletext API or to integrate
-it into V4L2. Please write to the linux-media mailing list: &v4l-ml;
-when the need arises.</para>
+ <para>However, teletext decoders were quickly replaced by more
+generic VBI demodulators and those dedicated teletext decoders no longer exist.
+For many years the vtx devices were still around, even though nobody used
+them. So the decision was made to finally remove support for the Teletext API in
+kernel 2.6.37.</para>
+
+ <para>Modern devices all use the <link linkend="raw-vbi">raw</link> or
+<link linkend="sliced">sliced</link> VBI API.</para>
<!--
Local Variables:
diff --git a/Documentation/DocBook/v4l/pixfmt-packed-rgb.xml b/Documentation/DocBook/v4l/pixfmt-packed-rgb.xml
index 26e879231088..4db272b8a0d3 100644
--- a/Documentation/DocBook/v4l/pixfmt-packed-rgb.xml
+++ b/Documentation/DocBook/v4l/pixfmt-packed-rgb.xml
@@ -739,7 +739,7 @@ defined in error. Drivers may interpret them as in <xref
<entry>b<subscript>1</subscript></entry>
<entry>b<subscript>0</subscript></entry>
</row>
- <row id="V4L2-PIX-FMT-BGR666">
+ <row><!-- id="V4L2-PIX-FMT-BGR666" -->
<entry><constant>V4L2_PIX_FMT_BGR666</constant></entry>
<entry>'BGRH'</entry>
<entry></entry>
diff --git a/Documentation/DocBook/v4l/pixfmt-srggb10.xml b/Documentation/DocBook/v4l/pixfmt-srggb10.xml
new file mode 100644
index 000000000000..7b274092e60c
--- /dev/null
+++ b/Documentation/DocBook/v4l/pixfmt-srggb10.xml
@@ -0,0 +1,90 @@
+ <refentry>
+ <refmeta>
+ <refentrytitle>V4L2_PIX_FMT_SRGGB10 ('RG10'),
+ V4L2_PIX_FMT_SGRBG10 ('BA10'),
+ V4L2_PIX_FMT_SGBRG10 ('GB10'),
+ V4L2_PIX_FMT_SBGGR10 ('BG10'),
+ </refentrytitle>
+ &manvol;
+ </refmeta>
+ <refnamediv>
+ <refname id="V4L2-PIX-FMT-SRGGB10"><constant>V4L2_PIX_FMT_SRGGB10</constant></refname>
+ <refname id="V4L2-PIX-FMT-SGRBG10"><constant>V4L2_PIX_FMT_SGRBG10</constant></refname>
+ <refname id="V4L2-PIX-FMT-SGBRG10"><constant>V4L2_PIX_FMT_SGBRG10</constant></refname>
+ <refname id="V4L2-PIX-FMT-SBGGR10"><constant>V4L2_PIX_FMT_SBGGR10</constant></refname>
+ <refpurpose>10-bit Bayer formats expanded to 16 bits</refpurpose>
+ </refnamediv>
+ <refsect1>
+ <title>Description</title>
+
+ <para>The following four pixel formats are raw sRGB / Bayer formats with
+10 bits per colour. Each colour component is stored in a 16-bit word, with 6
+unused high bits filled with zeros. Each n-pixel row contains n/2 green samples
+and n/2 blue or red samples, with alternating red and blue rows. Bytes are
+stored in memory in little endian order. They are conventionally described
+as GRGR... BGBG..., RGRG... GBGB..., etc. Below is an example of one of these
+formats</para>
+
+ <example>
+ <title><constant>V4L2_PIX_FMT_SBGGR10</constant> 4 &times; 4
+pixel image</title>
+
+ <formalpara>
+ <title>Byte Order.</title>
+ <para>Each cell is one byte, high 6 bits in high bytes are 0.
+ <informaltable frame="none">
+ <tgroup cols="5" align="center">
+ <colspec align="left" colwidth="2*" />
+ <tbody valign="top">
+ <row>
+ <entry>start&nbsp;+&nbsp;0:</entry>
+ <entry>B<subscript>00low</subscript></entry>
+ <entry>B<subscript>00high</subscript></entry>
+ <entry>G<subscript>01low</subscript></entry>
+ <entry>G<subscript>01high</subscript></entry>
+ <entry>B<subscript>02low</subscript></entry>
+ <entry>B<subscript>02high</subscript></entry>
+ <entry>G<subscript>03low</subscript></entry>
+ <entry>G<subscript>03high</subscript></entry>
+ </row>
+ <row>
+ <entry>start&nbsp;+&nbsp;8:</entry>
+ <entry>G<subscript>10low</subscript></entry>
+ <entry>G<subscript>10high</subscript></entry>
+ <entry>R<subscript>11low</subscript></entry>
+ <entry>R<subscript>11high</subscript></entry>
+ <entry>G<subscript>12low</subscript></entry>
+ <entry>G<subscript>12high</subscript></entry>
+ <entry>R<subscript>13low</subscript></entry>
+ <entry>R<subscript>13high</subscript></entry>
+ </row>
+ <row>
+ <entry>start&nbsp;+&nbsp;16:</entry>
+ <entry>B<subscript>20low</subscript></entry>
+ <entry>B<subscript>20high</subscript></entry>
+ <entry>G<subscript>21low</subscript></entry>
+ <entry>G<subscript>21high</subscript></entry>
+ <entry>B<subscript>22low</subscript></entry>
+ <entry>B<subscript>22high</subscript></entry>
+ <entry>G<subscript>23low</subscript></entry>
+ <entry>G<subscript>23high</subscript></entry>
+ </row>
+ <row>
+ <entry>start&nbsp;+&nbsp;24:</entry>
+ <entry>G<subscript>30low</subscript></entry>
+ <entry>G<subscript>30high</subscript></entry>
+ <entry>R<subscript>31low</subscript></entry>
+ <entry>R<subscript>31high</subscript></entry>
+ <entry>G<subscript>32low</subscript></entry>
+ <entry>G<subscript>32high</subscript></entry>
+ <entry>R<subscript>33low</subscript></entry>
+ <entry>R<subscript>33high</subscript></entry>
+ </row>
+ </tbody>
+ </tgroup>
+ </informaltable>
+ </para>
+ </formalpara>
+ </example>
+ </refsect1>
+</refentry>
diff --git a/Documentation/DocBook/v4l/pixfmt-srggb8.xml b/Documentation/DocBook/v4l/pixfmt-srggb8.xml
new file mode 100644
index 000000000000..2570e3be3cf1
--- /dev/null
+++ b/Documentation/DocBook/v4l/pixfmt-srggb8.xml
@@ -0,0 +1,67 @@
+ <refentry id="V4L2-PIX-FMT-SRGGB8">
+ <refmeta>
+ <refentrytitle>V4L2_PIX_FMT_SRGGB8 ('RGGB')</refentrytitle>
+ &manvol;
+ </refmeta>
+ <refnamediv>
+ <refname><constant>V4L2_PIX_FMT_SRGGB8</constant></refname>
+ <refpurpose>Bayer RGB format</refpurpose>
+ </refnamediv>
+ <refsect1>
+ <title>Description</title>
+
+ <para>This is commonly the native format of digital cameras,
+reflecting the arrangement of sensors on the CCD device. Only one red,
+green or blue value is given for each pixel. Missing components must
+be interpolated from neighbouring pixels. From left to right the first
+row consists of a red and green value, the second row of a green and
+blue value. This scheme repeats to the right and down for every two
+columns and rows.</para>
+
+ <example>
+ <title><constant>V4L2_PIX_FMT_SRGGB8</constant> 4 &times; 4
+pixel image</title>
+
+ <formalpara>
+ <title>Byte Order.</title>
+ <para>Each cell is one byte.
+ <informaltable frame="none">
+ <tgroup cols="5" align="center">
+ <colspec align="left" colwidth="2*" />
+ <tbody valign="top">
+ <row>
+ <entry>start&nbsp;+&nbsp;0:</entry>
+ <entry>R<subscript>00</subscript></entry>
+ <entry>G<subscript>01</subscript></entry>
+ <entry>R<subscript>02</subscript></entry>
+ <entry>G<subscript>03</subscript></entry>
+ </row>
+ <row>
+ <entry>start&nbsp;+&nbsp;4:</entry>
+ <entry>G<subscript>10</subscript></entry>
+ <entry>B<subscript>11</subscript></entry>
+ <entry>G<subscript>12</subscript></entry>
+ <entry>B<subscript>13</subscript></entry>
+ </row>
+ <row>
+ <entry>start&nbsp;+&nbsp;8:</entry>
+ <entry>R<subscript>20</subscript></entry>
+ <entry>G<subscript>21</subscript></entry>
+ <entry>R<subscript>22</subscript></entry>
+ <entry>G<subscript>23</subscript></entry>
+ </row>
+ <row>
+ <entry>start&nbsp;+&nbsp;12:</entry>
+ <entry>G<subscript>30</subscript></entry>
+ <entry>B<subscript>31</subscript></entry>
+ <entry>G<subscript>32</subscript></entry>
+ <entry>B<subscript>33</subscript></entry>
+ </row>
+ </tbody>
+ </tgroup>
+ </informaltable>
+ </para>
+ </formalpara>
+ </example>
+ </refsect1>
+ </refentry>
diff --git a/Documentation/DocBook/v4l/pixfmt-y10.xml b/Documentation/DocBook/v4l/pixfmt-y10.xml
new file mode 100644
index 000000000000..d065043db8d8
--- /dev/null
+++ b/Documentation/DocBook/v4l/pixfmt-y10.xml
@@ -0,0 +1,79 @@
+<refentry id="V4L2-PIX-FMT-Y10">
+ <refmeta>
+ <refentrytitle>V4L2_PIX_FMT_Y10 ('Y10 ')</refentrytitle>
+ &manvol;
+ </refmeta>
+ <refnamediv>
+ <refname><constant>V4L2_PIX_FMT_Y10</constant></refname>
+ <refpurpose>Grey-scale image</refpurpose>
+ </refnamediv>
+ <refsect1>
+ <title>Description</title>
+
+ <para>This is a grey-scale image with a depth of 10 bits per pixel. Pixels
+are stored in 16-bit words with unused high bits padded with 0. The least
+significant byte is stored at lower memory addresses (little-endian).</para>
+
+ <example>
+ <title><constant>V4L2_PIX_FMT_Y10</constant> 4 &times; 4
+pixel image</title>
+
+ <formalpara>
+ <title>Byte Order.</title>
+ <para>Each cell is one byte.
+ <informaltable frame="none">
+ <tgroup cols="9" align="center">
+ <colspec align="left" colwidth="2*" />
+ <tbody valign="top">
+ <row>
+ <entry>start&nbsp;+&nbsp;0:</entry>
+ <entry>Y'<subscript>00low</subscript></entry>
+ <entry>Y'<subscript>00high</subscript></entry>
+ <entry>Y'<subscript>01low</subscript></entry>
+ <entry>Y'<subscript>01high</subscript></entry>
+ <entry>Y'<subscript>02low</subscript></entry>
+ <entry>Y'<subscript>02high</subscript></entry>
+ <entry>Y'<subscript>03low</subscript></entry>
+ <entry>Y'<subscript>03high</subscript></entry>
+ </row>
+ <row>
+ <entry>start&nbsp;+&nbsp;8:</entry>
+ <entry>Y'<subscript>10low</subscript></entry>
+ <entry>Y'<subscript>10high</subscript></entry>
+ <entry>Y'<subscript>11low</subscript></entry>
+ <entry>Y'<subscript>11high</subscript></entry>
+ <entry>Y'<subscript>12low</subscript></entry>
+ <entry>Y'<subscript>12high</subscript></entry>
+ <entry>Y'<subscript>13low</subscript></entry>
+ <entry>Y'<subscript>13high</subscript></entry>
+ </row>
+ <row>
+ <entry>start&nbsp;+&nbsp;16:</entry>
+ <entry>Y'<subscript>20low</subscript></entry>
+ <entry>Y'<subscript>20high</subscript></entry>
+ <entry>Y'<subscript>21low</subscript></entry>
+ <entry>Y'<subscript>21high</subscript></entry>
+ <entry>Y'<subscript>22low</subscript></entry>
+ <entry>Y'<subscript>22high</subscript></entry>
+ <entry>Y'<subscript>23low</subscript></entry>
+ <entry>Y'<subscript>23high</subscript></entry>
+ </row>
+ <row>
+ <entry>start&nbsp;+&nbsp;24:</entry>
+ <entry>Y'<subscript>30low</subscript></entry>
+ <entry>Y'<subscript>30high</subscript></entry>
+ <entry>Y'<subscript>31low</subscript></entry>
+ <entry>Y'<subscript>31high</subscript></entry>
+ <entry>Y'<subscript>32low</subscript></entry>
+ <entry>Y'<subscript>32high</subscript></entry>
+ <entry>Y'<subscript>33low</subscript></entry>
+ <entry>Y'<subscript>33high</subscript></entry>
+ </row>
+ </tbody>
+ </tgroup>
+ </informaltable>
+ </para>
+ </formalpara>
+ </example>
+ </refsect1>
+</refentry>
diff --git a/Documentation/DocBook/v4l/pixfmt.xml b/Documentation/DocBook/v4l/pixfmt.xml
index c4ad0a8e42dc..d7c467187095 100644
--- a/Documentation/DocBook/v4l/pixfmt.xml
+++ b/Documentation/DocBook/v4l/pixfmt.xml
@@ -566,7 +566,9 @@ access the palette, this must be done with ioctls of the Linux framebuffer API.<
&sub-sbggr8;
&sub-sgbrg8;
&sub-sgrbg8;
+ &sub-srggb8;
&sub-sbggr16;
+ &sub-srggb10;
</section>
<section id="yuv-formats">
@@ -589,6 +591,7 @@ information.</para>
&sub-packed-yuv;
&sub-grey;
+ &sub-y10;
&sub-y16;
&sub-yuyv;
&sub-uyvy;
@@ -685,6 +688,11 @@ http://www.ivtvdriver.org/</ulink></para><para>The format is documented in the
kernel sources in the file <filename>Documentation/video4linux/cx2341x/README.hm12</filename>
</para></entry>
</row>
+ <row id="V4L2-PIX-FMT-CPIA1">
+ <entry><constant>V4L2_PIX_FMT_CPIA1</constant></entry>
+ <entry>'CPIA'</entry>
+ <entry>YUV format used by the gspca cpia1 driver.</entry>
+ </row>
<row id="V4L2-PIX-FMT-SPCA501">
<entry><constant>V4L2_PIX_FMT_SPCA501</constant></entry>
<entry>'S501'</entry>
@@ -705,11 +713,6 @@ kernel sources in the file <filename>Documentation/video4linux/cx2341x/README.hm
<entry>'S561'</entry>
<entry>Compressed GBRG Bayer format used by the gspca driver.</entry>
</row>
- <row id="V4L2-PIX-FMT-SGRBG10">
- <entry><constant>V4L2_PIX_FMT_SGRBG10</constant></entry>
- <entry>'DA10'</entry>
- <entry>10 bit raw Bayer, expanded to 16 bits.</entry>
- </row>
<row id="V4L2-PIX-FMT-SGRBG10DPCM8">
<entry><constant>V4L2_PIX_FMT_SGRBG10DPCM8</constant></entry>
<entry>'DB10'</entry>
@@ -770,6 +773,11 @@ kernel sources in the file <filename>Documentation/video4linux/cx2341x/README.hm
<entry>'S920'</entry>
<entry>YUV 4:2:0 format of the gspca sn9c20x driver.</entry>
</row>
+ <row id="V4L2-PIX-FMT-SN9C2028">
+ <entry><constant>V4L2_PIX_FMT_SN9C2028</constant></entry>
+ <entry>'SONX'</entry>
+ <entry>Compressed GBRG bayer format of the gspca sn9c2028 driver.</entry>
+ </row>
<row id="V4L2-PIX-FMT-STV0680">
<entry><constant>V4L2_PIX_FMT_STV0680</constant></entry>
<entry>'S680'</entry>
@@ -787,6 +795,20 @@ http://www.thedirks.org/winnov/</ulink></para></entry>
<entry>'TM60'</entry>
<entry><para>Used by Trident tm6000</para></entry>
</row>
+ <row id="V4L2-PIX-FMT-CIT-YYVYUY">
+ <entry><constant>V4L2_PIX_FMT_CIT_YYVYUY</constant></entry>
+ <entry>'CITV'</entry>
+ <entry><para>Used by xirlink CIT, found at IBM webcams.</para>
+ <para>Uses one line of Y then 1 line of VYUY</para>
+ </entry>
+ </row>
+ <row id="V4L2-PIX-FMT-KONICA420">
+ <entry><constant>V4L2_PIX_FMT_KONICA420</constant></entry>
+ <entry>'KONI'</entry>
+ <entry><para>Used by Konica webcams.</para>
+ <para>YUV420 planar in blocks of 256 pixels.</para>
+ </entry>
+ </row>
<row id="V4L2-PIX-FMT-YYUV">
<entry><constant>V4L2_PIX_FMT_YYUV</constant></entry>
<entry>'YYUV'</entry>
diff --git a/Documentation/DocBook/v4l/v4l2.xml b/Documentation/DocBook/v4l/v4l2.xml
index 7c3c098d5d08..839e93e875ae 100644
--- a/Documentation/DocBook/v4l/v4l2.xml
+++ b/Documentation/DocBook/v4l/v4l2.xml
@@ -99,6 +99,7 @@ Remote Controller chapter.</contrib>
<year>2007</year>
<year>2008</year>
<year>2009</year>
+ <year>2010</year>
<holder>Bill Dirks, Michael H. Schimek, Hans Verkuil, Martin
Rubli, Andy Walls, Muralidharan Karicheri, Mauro Carvalho Chehab</holder>
</copyright>
@@ -110,10 +111,17 @@ Rubli, Andy Walls, Muralidharan Karicheri, Mauro Carvalho Chehab</holder>
<!-- Put document revisions here, newest first. -->
<!-- API revisions (changes and additions of defines, enums,
structs, ioctls) must be noted in more detail in the history chapter
-(compat.sgml), along with the possible impact on existing drivers and
+(compat.xml), along with the possible impact on existing drivers and
applications. -->
<revision>
+ <revnumber>2.6.37</revnumber>
+ <date>2010-08-06</date>
+ <authorinitials>hv</authorinitials>
+ <revremark>Removed obsolete vtx (videotext) API.</revremark>
+ </revision>
+
+ <revision>
<revnumber>2.6.33</revnumber>
<date>2009-12-03</date>
<authorinitials>mk</authorinitials>
diff --git a/Documentation/DocBook/v4l/videodev2.h.xml b/Documentation/DocBook/v4l/videodev2.h.xml
index 865b06d9e679..325b23b6964c 100644
--- a/Documentation/DocBook/v4l/videodev2.h.xml
+++ b/Documentation/DocBook/v4l/videodev2.h.xml
@@ -154,23 +154,13 @@ enum <link linkend="v4l2-buf-type">v4l2_buf_type</link> {
V4L2_BUF_TYPE_VBI_OUTPUT = 5,
V4L2_BUF_TYPE_SLICED_VBI_CAPTURE = 6,
V4L2_BUF_TYPE_SLICED_VBI_OUTPUT = 7,
-#if 1 /*KEEP*/
+#if 1
/* Experimental */
V4L2_BUF_TYPE_VIDEO_OUTPUT_OVERLAY = 8,
#endif
V4L2_BUF_TYPE_PRIVATE = 0x80,
};
-enum <link linkend="v4l2-ctrl-type">v4l2_ctrl_type</link> {
- V4L2_CTRL_TYPE_INTEGER = 1,
- V4L2_CTRL_TYPE_BOOLEAN = 2,
- V4L2_CTRL_TYPE_MENU = 3,
- V4L2_CTRL_TYPE_BUTTON = 4,
- V4L2_CTRL_TYPE_INTEGER64 = 5,
- V4L2_CTRL_TYPE_CTRL_CLASS = 6,
- V4L2_CTRL_TYPE_STRING = 7,
-};
-
enum <link linkend="v4l2-tuner-type">v4l2_tuner_type</link> {
V4L2_TUNER_RADIO = 1,
V4L2_TUNER_ANALOG_TV = 2,
@@ -288,6 +278,7 @@ struct <link linkend="v4l2-pix-format">v4l2_pix_format</link> {
#define <link linkend="V4L2-PIX-FMT-RGB565">V4L2_PIX_FMT_RGB565</link> v4l2_fourcc('R', 'G', 'B', 'P') /* 16 RGB-5-6-5 */
#define <link linkend="V4L2-PIX-FMT-RGB555X">V4L2_PIX_FMT_RGB555X</link> v4l2_fourcc('R', 'G', 'B', 'Q') /* 16 RGB-5-5-5 BE */
#define <link linkend="V4L2-PIX-FMT-RGB565X">V4L2_PIX_FMT_RGB565X</link> v4l2_fourcc('R', 'G', 'B', 'R') /* 16 RGB-5-6-5 BE */
+#define <link linkend="V4L2-PIX-FMT-BGR666">V4L2_PIX_FMT_BGR666</link> v4l2_fourcc('B', 'G', 'R', 'H') /* 18 BGR-6-6-6 */
#define <link linkend="V4L2-PIX-FMT-BGR24">V4L2_PIX_FMT_BGR24</link> v4l2_fourcc('B', 'G', 'R', '3') /* 24 BGR-8-8-8 */
#define <link linkend="V4L2-PIX-FMT-RGB24">V4L2_PIX_FMT_RGB24</link> v4l2_fourcc('R', 'G', 'B', '3') /* 24 RGB-8-8-8 */
#define <link linkend="V4L2-PIX-FMT-BGR32">V4L2_PIX_FMT_BGR32</link> v4l2_fourcc('B', 'G', 'R', '4') /* 32 BGR-8-8-8-8 */
@@ -295,6 +286,9 @@ struct <link linkend="v4l2-pix-format">v4l2_pix_format</link> {
/* Grey formats */
#define <link linkend="V4L2-PIX-FMT-GREY">V4L2_PIX_FMT_GREY</link> v4l2_fourcc('G', 'R', 'E', 'Y') /* 8 Greyscale */
+#define <link linkend="V4L2-PIX-FMT-Y4">V4L2_PIX_FMT_Y4</link> v4l2_fourcc('Y', '0', '4', ' ') /* 4 Greyscale */
+#define <link linkend="V4L2-PIX-FMT-Y6">V4L2_PIX_FMT_Y6</link> v4l2_fourcc('Y', '0', '6', ' ') /* 6 Greyscale */
+#define <link linkend="V4L2-PIX-FMT-Y10">V4L2_PIX_FMT_Y10</link> v4l2_fourcc('Y', '1', '0', ' ') /* 10 Greyscale */
#define <link linkend="V4L2-PIX-FMT-Y16">V4L2_PIX_FMT_Y16</link> v4l2_fourcc('Y', '1', '6', ' ') /* 16 Greyscale */
/* Palette formats */
@@ -330,7 +324,11 @@ struct <link linkend="v4l2-pix-format">v4l2_pix_format</link> {
#define <link linkend="V4L2-PIX-FMT-SBGGR8">V4L2_PIX_FMT_SBGGR8</link> v4l2_fourcc('B', 'A', '8', '1') /* 8 BGBG.. GRGR.. */
#define <link linkend="V4L2-PIX-FMT-SGBRG8">V4L2_PIX_FMT_SGBRG8</link> v4l2_fourcc('G', 'B', 'R', 'G') /* 8 GBGB.. RGRG.. */
#define <link linkend="V4L2-PIX-FMT-SGRBG8">V4L2_PIX_FMT_SGRBG8</link> v4l2_fourcc('G', 'R', 'B', 'G') /* 8 GRGR.. BGBG.. */
-#define <link linkend="V4L2-PIX-FMT-SGRBG10">V4L2_PIX_FMT_SGRBG10</link> v4l2_fourcc('B', 'A', '1', '0') /* 10bit raw bayer */
+#define <link linkend="V4L2-PIX-FMT-SRGGB8">V4L2_PIX_FMT_SRGGB8</link> v4l2_fourcc('R', 'G', 'G', 'B') /* 8 RGRG.. GBGB.. */
+#define <link linkend="V4L2-PIX-FMT-SBGGR10">V4L2_PIX_FMT_SBGGR10</link> v4l2_fourcc('B', 'G', '1', '0') /* 10 BGBG.. GRGR.. */
+#define <link linkend="V4L2-PIX-FMT-SGBRG10">V4L2_PIX_FMT_SGBRG10</link> v4l2_fourcc('G', 'B', '1', '0') /* 10 GBGB.. RGRG.. */
+#define <link linkend="V4L2-PIX-FMT-SGRBG10">V4L2_PIX_FMT_SGRBG10</link> v4l2_fourcc('B', 'A', '1', '0') /* 10 GRGR.. BGBG.. */
+#define <link linkend="V4L2-PIX-FMT-SRGGB10">V4L2_PIX_FMT_SRGGB10</link> v4l2_fourcc('R', 'G', '1', '0') /* 10 RGRG.. GBGB.. */
/* 10bit raw bayer DPCM compressed to 8 bits */
#define <link linkend="V4L2-PIX-FMT-SGRBG10DPCM8">V4L2_PIX_FMT_SGRBG10DPCM8</link> v4l2_fourcc('B', 'D', '1', '0')
/*
@@ -346,6 +344,7 @@ struct <link linkend="v4l2-pix-format">v4l2_pix_format</link> {
#define <link linkend="V4L2-PIX-FMT-MPEG">V4L2_PIX_FMT_MPEG</link> v4l2_fourcc('M', 'P', 'E', 'G') /* MPEG-1/2/4 */
/* Vendor-specific formats */
+#define <link linkend="V4L2-PIX-FMT-CPIA1">V4L2_PIX_FMT_CPIA1</link> v4l2_fourcc('C', 'P', 'I', 'A') /* cpia1 YUV */
#define <link linkend="V4L2-PIX-FMT-WNVA">V4L2_PIX_FMT_WNVA</link> v4l2_fourcc('W', 'N', 'V', 'A') /* Winnov hw compress */
#define <link linkend="V4L2-PIX-FMT-SN9C10X">V4L2_PIX_FMT_SN9C10X</link> v4l2_fourcc('S', '9', '1', '0') /* SN9C10x compression */
#define <link linkend="V4L2-PIX-FMT-SN9C20X-I420">V4L2_PIX_FMT_SN9C20X_I420</link> v4l2_fourcc('S', '9', '2', '0') /* SN9C20x YUV 4:2:0 */
@@ -358,12 +357,15 @@ struct <link linkend="v4l2-pix-format">v4l2_pix_format</link> {
#define <link linkend="V4L2-PIX-FMT-SPCA561">V4L2_PIX_FMT_SPCA561</link> v4l2_fourcc('S', '5', '6', '1') /* compressed GBRG bayer */
#define <link linkend="V4L2-PIX-FMT-PAC207">V4L2_PIX_FMT_PAC207</link> v4l2_fourcc('P', '2', '0', '7') /* compressed BGGR bayer */
#define <link linkend="V4L2-PIX-FMT-MR97310A">V4L2_PIX_FMT_MR97310A</link> v4l2_fourcc('M', '3', '1', '0') /* compressed BGGR bayer */
+#define <link linkend="V4L2-PIX-FMT-SN9C2028">V4L2_PIX_FMT_SN9C2028</link> v4l2_fourcc('S', 'O', 'N', 'X') /* compressed GBRG bayer */
#define <link linkend="V4L2-PIX-FMT-SQ905C">V4L2_PIX_FMT_SQ905C</link> v4l2_fourcc('9', '0', '5', 'C') /* compressed RGGB bayer */
#define <link linkend="V4L2-PIX-FMT-PJPG">V4L2_PIX_FMT_PJPG</link> v4l2_fourcc('P', 'J', 'P', 'G') /* Pixart 73xx JPEG */
#define <link linkend="V4L2-PIX-FMT-OV511">V4L2_PIX_FMT_OV511</link> v4l2_fourcc('O', '5', '1', '1') /* ov511 JPEG */
#define <link linkend="V4L2-PIX-FMT-OV518">V4L2_PIX_FMT_OV518</link> v4l2_fourcc('O', '5', '1', '8') /* ov518 JPEG */
-#define <link linkend="V4L2-PIX-FMT-TM6000">V4L2_PIX_FMT_TM6000</link> v4l2_fourcc('T', 'M', '6', '0') /* tm5600/tm60x0 */
#define <link linkend="V4L2-PIX-FMT-STV0680">V4L2_PIX_FMT_STV0680</link> v4l2_fourcc('S', '6', '8', '0') /* stv0680 bayer */
+#define <link linkend="V4L2-PIX-FMT-TM6000">V4L2_PIX_FMT_TM6000</link> v4l2_fourcc('T', 'M', '6', '0') /* tm5600/tm60x0 */
+#define <link linkend="V4L2-PIX-FMT-CIT-YYVYUY">V4L2_PIX_FMT_CIT_YYVYUY</link> v4l2_fourcc('C', 'I', 'T', 'V') /* one line of Y then 1 line of VYUY */
+#define <link linkend="V4L2-PIX-FMT-KONICA420">V4L2_PIX_FMT_KONICA420</link> v4l2_fourcc('K', 'O', 'N', 'I') /* YUV420 planar in blocks of 256 pixels */
/*
* F O R M A T E N U M E R A T I O N
@@ -380,7 +382,7 @@ struct <link linkend="v4l2-fmtdesc">v4l2_fmtdesc</link> {
#define V4L2_FMT_FLAG_COMPRESSED 0x0001
#define V4L2_FMT_FLAG_EMULATED 0x0002
-#if 1 /*KEEP*/
+#if 1
/* Experimental Frame Size and frame rate enumeration */
/*
* F R A M E S I Z E E N U M E R A T I O N
@@ -544,6 +546,8 @@ struct <link linkend="v4l2-buffer">v4l2_buffer</link> {
#define V4L2_BUF_FLAG_KEYFRAME 0x0008 /* Image is a keyframe (I-frame) */
#define V4L2_BUF_FLAG_PFRAME 0x0010 /* Image is a P-frame */
#define V4L2_BUF_FLAG_BFRAME 0x0020 /* Image is a B-frame */
+/* Buffer is ready, but the data contained within is corrupted. */
+#define V4L2_BUF_FLAG_ERROR 0x0040
#define V4L2_BUF_FLAG_TIMECODE 0x0100 /* timecode field is valid */
#define V4L2_BUF_FLAG_INPUT 0x0200 /* input field is valid */
@@ -934,6 +938,16 @@ struct <link linkend="v4l2-ext-controls">v4l2_ext_controls</link> {
#define V4L2_CTRL_ID2CLASS(id) ((id) &amp; 0x0fff0000UL)
#define V4L2_CTRL_DRIVER_PRIV(id) (((id) &amp; 0xffff) &gt;= 0x1000)
+enum <link linkend="v4l2-ctrl-type">v4l2_ctrl_type</link> {
+ V4L2_CTRL_TYPE_INTEGER = 1,
+ V4L2_CTRL_TYPE_BOOLEAN = 2,
+ V4L2_CTRL_TYPE_MENU = 3,
+ V4L2_CTRL_TYPE_BUTTON = 4,
+ V4L2_CTRL_TYPE_INTEGER64 = 5,
+ V4L2_CTRL_TYPE_CTRL_CLASS = 6,
+ V4L2_CTRL_TYPE_STRING = 7,
+};
+
/* Used in the VIDIOC_QUERYCTRL ioctl for querying controls */
struct <link linkend="v4l2-queryctrl">v4l2_queryctrl</link> {
__u32 id;
@@ -1018,21 +1032,27 @@ enum <link linkend="v4l2-colorfx">v4l2_colorfx</link> {
V4L2_COLORFX_NONE = 0,
V4L2_COLORFX_BW = 1,
V4L2_COLORFX_SEPIA = 2,
- V4L2_COLORFX_NEGATIVE = 3,
- V4L2_COLORFX_EMBOSS = 4,
- V4L2_COLORFX_SKETCH = 5,
- V4L2_COLORFX_SKY_BLUE = 6,
+ V4L2_COLORFX_NEGATIVE = 3,
+ V4L2_COLORFX_EMBOSS = 4,
+ V4L2_COLORFX_SKETCH = 5,
+ V4L2_COLORFX_SKY_BLUE = 6,
V4L2_COLORFX_GRASS_GREEN = 7,
V4L2_COLORFX_SKIN_WHITEN = 8,
- V4L2_COLORFX_VIVID = 9.
+ V4L2_COLORFX_VIVID = 9,
};
#define V4L2_CID_AUTOBRIGHTNESS (V4L2_CID_BASE+32)
#define V4L2_CID_BAND_STOP_FILTER (V4L2_CID_BASE+33)
#define V4L2_CID_ROTATE (V4L2_CID_BASE+34)
#define V4L2_CID_BG_COLOR (V4L2_CID_BASE+35)
+
+#define V4L2_CID_CHROMA_GAIN (V4L2_CID_BASE+36)
+
+#define V4L2_CID_ILLUMINATORS_1 (V4L2_CID_BASE+37)
+#define V4L2_CID_ILLUMINATORS_2 (V4L2_CID_BASE+38)
+
/* last CID + 1 */
-#define V4L2_CID_LASTP1 (V4L2_CID_BASE+36)
+#define V4L2_CID_LASTP1 (V4L2_CID_BASE+39)
/* MPEG-class control IDs defined by V4L2 */
#define V4L2_CID_MPEG_BASE (V4L2_CTRL_CLASS_MPEG | 0x900)
@@ -1349,6 +1369,8 @@ struct <link linkend="v4l2-modulator">v4l2_modulator</link> {
#define V4L2_TUNER_CAP_SAP 0x0020
#define V4L2_TUNER_CAP_LANG1 0x0040
#define V4L2_TUNER_CAP_RDS 0x0080
+#define V4L2_TUNER_CAP_RDS_BLOCK_IO 0x0100
+#define V4L2_TUNER_CAP_RDS_CONTROLS 0x0200
/* Flags for the 'rxsubchans' field */
#define V4L2_TUNER_SUB_MONO 0x0001
@@ -1378,7 +1400,8 @@ struct <link linkend="v4l2-hw-freq-seek">v4l2_hw_freq_seek</link> {
enum <link linkend="v4l2-tuner-type">v4l2_tuner_type</link> type;
__u32 seek_upward;
__u32 wrap_around;
- __u32 reserved[8];
+ __u32 spacing;
+ __u32 reserved[7];
};
/*
@@ -1433,7 +1456,7 @@ struct <link linkend="v4l2-audioout">v4l2_audioout</link> {
*
* NOTE: EXPERIMENTAL API
*/
-#if 1 /*KEEP*/
+#if 1
#define V4L2_ENC_IDX_FRAME_I (0)
#define V4L2_ENC_IDX_FRAME_P (1)
#define V4L2_ENC_IDX_FRAME_B (2)
@@ -1626,6 +1649,38 @@ struct <link linkend="v4l2-streamparm">v4l2_streamparm</link> {
};
/*
+ * E V E N T S
+ */
+
+#define V4L2_EVENT_ALL 0
+#define V4L2_EVENT_VSYNC 1
+#define V4L2_EVENT_EOS 2
+#define V4L2_EVENT_PRIVATE_START 0x08000000
+
+/* Payload for V4L2_EVENT_VSYNC */
+struct <link linkend="v4l2-event-vsync">v4l2_event_vsync</link> {
+ /* Can be V4L2_FIELD_ANY, _NONE, _TOP or _BOTTOM */
+ __u8 field;
+} __attribute__ ((packed));
+
+struct <link linkend="v4l2-event">v4l2_event</link> {
+ __u32 type;
+ union {
+ struct <link linkend="v4l2-event-vsync">v4l2_event_vsync</link> vsync;
+ __u8 data[64];
+ } u;
+ __u32 pending;
+ __u32 sequence;
+ struct timespec timestamp;
+ __u32 reserved[9];
+};
+
+struct <link linkend="v4l2-event-subscription">v4l2_event_subscription</link> {
+ __u32 type;
+ __u32 reserved[7];
+};
+
+/*
* A D V A N C E D D E B U G G I N G
*
* NOTE: EXPERIMENTAL API, NEVER RELY ON THIS IN APPLICATIONS!
@@ -1720,7 +1775,7 @@ struct <link linkend="v4l2-dbg-chip-ident">v4l2_dbg_chip_ident</link> {
#define VIDIOC_G_EXT_CTRLS _IOWR('V', 71, struct <link linkend="v4l2-ext-controls">v4l2_ext_controls</link>)
#define VIDIOC_S_EXT_CTRLS _IOWR('V', 72, struct <link linkend="v4l2-ext-controls">v4l2_ext_controls</link>)
#define VIDIOC_TRY_EXT_CTRLS _IOWR('V', 73, struct <link linkend="v4l2-ext-controls">v4l2_ext_controls</link>)
-#if 1 /*KEEP*/
+#if 1
#define VIDIOC_ENUM_FRAMESIZES _IOWR('V', 74, struct <link linkend="v4l2-frmsizeenum">v4l2_frmsizeenum</link>)
#define VIDIOC_ENUM_FRAMEINTERVALS _IOWR('V', 75, struct <link linkend="v4l2-frmivalenum">v4l2_frmivalenum</link>)
#define VIDIOC_G_ENC_INDEX _IOR('V', 76, struct <link linkend="v4l2-enc-idx">v4l2_enc_idx</link>)
@@ -1728,7 +1783,7 @@ struct <link linkend="v4l2-dbg-chip-ident">v4l2_dbg_chip_ident</link> {
#define VIDIOC_TRY_ENCODER_CMD _IOWR('V', 78, struct <link linkend="v4l2-encoder-cmd">v4l2_encoder_cmd</link>)
#endif
-#if 1 /*KEEP*/
+#if 1
/* Experimental, meant for debugging, testing and internal use.
Only implemented if CONFIG_VIDEO_ADV_DEBUG is defined.
You must be root to use these ioctls. Never use these in applications! */
@@ -1747,6 +1802,9 @@ struct <link linkend="v4l2-dbg-chip-ident">v4l2_dbg_chip_ident</link> {
#define VIDIOC_QUERY_DV_PRESET _IOR('V', 86, struct <link linkend="v4l2-dv-preset">v4l2_dv_preset</link>)
#define VIDIOC_S_DV_TIMINGS _IOWR('V', 87, struct <link linkend="v4l2-dv-timings">v4l2_dv_timings</link>)
#define VIDIOC_G_DV_TIMINGS _IOWR('V', 88, struct <link linkend="v4l2-dv-timings">v4l2_dv_timings</link>)
+#define VIDIOC_DQEVENT _IOR('V', 89, struct <link linkend="v4l2-event">v4l2_event</link>)
+#define VIDIOC_SUBSCRIBE_EVENT _IOW('V', 90, struct <link linkend="v4l2-event-subscription">v4l2_event_subscription</link>)
+#define VIDIOC_UNSUBSCRIBE_EVENT _IOW('V', 91, struct <link linkend="v4l2-event-subscription">v4l2_event_subscription</link>)
/* Reminder: when adding new ioctls please add support for them to
drivers/media/video/v4l2-compat-ioctl32.c as well! */
diff --git a/Documentation/DocBook/v4l/vidioc-g-dv-preset.xml b/Documentation/DocBook/v4l/vidioc-g-dv-preset.xml
index 3c6784e132f3..d733721a7519 100644
--- a/Documentation/DocBook/v4l/vidioc-g-dv-preset.xml
+++ b/Documentation/DocBook/v4l/vidioc-g-dv-preset.xml
@@ -16,8 +16,7 @@
<funcdef>int <function>ioctl</function></funcdef>
<paramdef>int <parameter>fd</parameter></paramdef>
<paramdef>int <parameter>request</parameter></paramdef>
- <paramdef>&v4l2-dv-preset;
-*<parameter>argp</parameter></paramdef>
+ <paramdef>struct v4l2_dv_preset *<parameter>argp</parameter></paramdef>
</funcprototype>
</funcsynopsis>
</refsynopsisdiv>
diff --git a/Documentation/DocBook/v4l/vidioc-g-dv-timings.xml b/Documentation/DocBook/v4l/vidioc-g-dv-timings.xml
index ecc19576bb8f..d5ec6abf0ce2 100644
--- a/Documentation/DocBook/v4l/vidioc-g-dv-timings.xml
+++ b/Documentation/DocBook/v4l/vidioc-g-dv-timings.xml
@@ -16,8 +16,7 @@
<funcdef>int <function>ioctl</function></funcdef>
<paramdef>int <parameter>fd</parameter></paramdef>
<paramdef>int <parameter>request</parameter></paramdef>
- <paramdef>&v4l2-dv-timings;
-*<parameter>argp</parameter></paramdef>
+ <paramdef>struct v4l2_dv_timings *<parameter>argp</parameter></paramdef>
</funcprototype>
</funcsynopsis>
</refsynopsisdiv>
diff --git a/Documentation/DocBook/v4l/vidioc-query-dv-preset.xml b/Documentation/DocBook/v4l/vidioc-query-dv-preset.xml
index 402229ee06f6..d272f7ab91b8 100644
--- a/Documentation/DocBook/v4l/vidioc-query-dv-preset.xml
+++ b/Documentation/DocBook/v4l/vidioc-query-dv-preset.xml
@@ -16,7 +16,7 @@ input</refpurpose>
<funcdef>int <function>ioctl</function></funcdef>
<paramdef>int <parameter>fd</parameter></paramdef>
<paramdef>int <parameter>request</parameter></paramdef>
- <paramdef>&v4l2-dv-preset; *<parameter>argp</parameter></paramdef>
+ <paramdef>struct v4l2_dv_preset *<parameter>argp</parameter></paramdef>
</funcprototype>
</funcsynopsis>
</refsynopsisdiv>
diff --git a/Documentation/DocBook/v4l/vidioc-querycap.xml b/Documentation/DocBook/v4l/vidioc-querycap.xml
index 6ab7e25b31b6..d499da93a450 100644
--- a/Documentation/DocBook/v4l/vidioc-querycap.xml
+++ b/Documentation/DocBook/v4l/vidioc-querycap.xml
@@ -184,7 +184,7 @@ data.</entry>
<row>
<entry><constant>V4L2_CAP_RDS_CAPTURE</constant></entry>
<entry>0x00000100</entry>
- <entry>The device supports the <link linkend="rds">RDS</link> interface.</entry>
+ <entry>The device supports the <link linkend="rds">RDS</link> capture interface.</entry>
</row>
<row>
<entry><constant>V4L2_CAP_VIDEO_OUTPUT_OVERLAY</constant></entry>
@@ -206,6 +206,11 @@ driver capabilities.</para></footnote></entry>
hardware frequency seeking.</entry>
</row>
<row>
+ <entry><constant>V4L2_CAP_RDS_OUTPUT</constant></entry>
+ <entry>0x00000800</entry>
+ <entry>The device supports the <link linkend="rds">RDS</link> output interface.</entry>
+ </row>
+ <row>
<entry><constant>V4L2_CAP_TUNER</constant></entry>
<entry>0x00010000</entry>
<entry>The device has some sort of tuner to
diff --git a/Documentation/DocBook/v4l/vidioc-queryctrl.xml b/Documentation/DocBook/v4l/vidioc-queryctrl.xml
index 8e0e055ac934..0d5e8283cf32 100644
--- a/Documentation/DocBook/v4l/vidioc-queryctrl.xml
+++ b/Documentation/DocBook/v4l/vidioc-queryctrl.xml
@@ -103,8 +103,12 @@ structure. The driver fills the rest of the structure or returns an
<structfield>index</structfield> is invalid. Menu items are enumerated
by calling <constant>VIDIOC_QUERYMENU</constant> with successive
<structfield>index</structfield> values from &v4l2-queryctrl;
-<structfield>minimum</structfield> (0) to
-<structfield>maximum</structfield>, inclusive.</para>
+<structfield>minimum</structfield> to
+<structfield>maximum</structfield>, inclusive. Note that it is possible
+for <constant>VIDIOC_QUERYMENU</constant> to return an &EINVAL; for some
+indices between <structfield>minimum</structfield> and <structfield>maximum</structfield>.
+In that case that particular menu item is not supported by this driver. Also note that
+the <structfield>minimum</structfield> value is not necessarily 0.</para>
<para>See also the examples in <xref linkend="control" />.</para>
@@ -139,7 +143,7 @@ string. This information is intended for the user.</entry>
<entry><structfield>minimum</structfield></entry>
<entry>Minimum value, inclusive. This field gives a lower
bound for <constant>V4L2_CTRL_TYPE_INTEGER</constant> controls and the
-lowest valid index (always 0) for <constant>V4L2_CTRL_TYPE_MENU</constant> controls.
+lowest valid index for <constant>V4L2_CTRL_TYPE_MENU</constant> controls.
For <constant>V4L2_CTRL_TYPE_STRING</constant> controls the minimum value
gives the minimum length of the string. This length <emphasis>does not include the terminating
zero</emphasis>. It may not be valid for any other type of control, including
@@ -279,7 +283,7 @@ values which are actually different on the hardware.</entry>
</row>
<row>
<entry><constant>V4L2_CTRL_TYPE_MENU</constant></entry>
- <entry>0</entry>
+ <entry>&ge; 0</entry>
<entry>1</entry>
<entry>N-1</entry>
<entry>The control has a menu of N choices. The names of
@@ -405,8 +409,10 @@ writing a value will cause the device to carry out a given action
<term><errorcode>EINVAL</errorcode></term>
<listitem>
<para>The &v4l2-queryctrl; <structfield>id</structfield>
-is invalid. The &v4l2-querymenu; <structfield>id</structfield> or
-<structfield>index</structfield> is invalid.</para>
+is invalid. The &v4l2-querymenu; <structfield>id</structfield> is
+invalid or <structfield>index</structfield> is out of range (less than
+<structfield>minimum</structfield> or greater than <structfield>maximum</structfield>)
+or this particular menu item is not supported by the driver.</para>
</listitem>
</varlistentry>
<varlistentry>
diff --git a/Documentation/DocBook/v4l/vidioc-s-hw-freq-seek.xml b/Documentation/DocBook/v4l/vidioc-s-hw-freq-seek.xml
index 14b3ec7ed75b..c30dcc4232c0 100644
--- a/Documentation/DocBook/v4l/vidioc-s-hw-freq-seek.xml
+++ b/Documentation/DocBook/v4l/vidioc-s-hw-freq-seek.xml
@@ -51,7 +51,8 @@
<para>Start a hardware frequency seek from the current frequency.
To do this applications initialize the <structfield>tuner</structfield>,
-<structfield>type</structfield>, <structfield>seek_upward</structfield> and
+<structfield>type</structfield>, <structfield>seek_upward</structfield>,
+<structfield>spacing</structfield> and
<structfield>wrap_around</structfield> fields, and zero out the
<structfield>reserved</structfield> array of a &v4l2-hw-freq-seek; and
call the <constant>VIDIOC_S_HW_FREQ_SEEK</constant> ioctl with a pointer
@@ -89,7 +90,12 @@ field and the &v4l2-tuner; <structfield>index</structfield> field.</entry>
</row>
<row>
<entry>__u32</entry>
- <entry><structfield>reserved</structfield>[8]</entry>
+ <entry><structfield>spacing</structfield></entry>
+ <entry>If non-zero, defines the hardware seek resolution in Hz. The driver selects the nearest value that is supported by the device. If spacing is zero a reasonable default value is used.</entry>
+ </row>
+ <row>
+ <entry>__u32</entry>
+ <entry><structfield>reserved</structfield>[7]</entry>
<entry>Reserved for future extensions. Drivers and
applications must set the array to zero.</entry>
</row>
diff --git a/Documentation/accounting/getdelays.c b/Documentation/accounting/getdelays.c
index 6e25c2659e0a..a2976a6de033 100644
--- a/Documentation/accounting/getdelays.c
+++ b/Documentation/accounting/getdelays.c
@@ -21,6 +21,7 @@
#include <sys/types.h>
#include <sys/stat.h>
#include <sys/socket.h>
+#include <sys/wait.h>
#include <signal.h>
#include <linux/genetlink.h>
@@ -266,11 +267,13 @@ int main(int argc, char *argv[])
int containerset = 0;
char containerpath[1024];
int cfd = 0;
+ int forking = 0;
+ sigset_t sigset;
struct msgtemplate msg;
- while (1) {
- c = getopt(argc, argv, "qdiw:r:m:t:p:vlC:");
+ while (!forking) {
+ c = getopt(argc, argv, "qdiw:r:m:t:p:vlC:c:");
if (c < 0)
break;
@@ -319,6 +322,28 @@ int main(int argc, char *argv[])
err(1, "Invalid pid\n");
cmd_type = TASKSTATS_CMD_ATTR_PID;
break;
+ case 'c':
+
+ /* Block SIGCHLD for sigwait() later */
+ if (sigemptyset(&sigset) == -1)
+ err(1, "Failed to empty sigset");
+ if (sigaddset(&sigset, SIGCHLD))
+ err(1, "Failed to set sigchld in sigset");
+ sigprocmask(SIG_BLOCK, &sigset, NULL);
+
+ /* fork/exec a child */
+ tid = fork();
+ if (tid < 0)
+ err(1, "Fork failed\n");
+ if (tid == 0)
+ if (execvp(argv[optind - 1],
+ &argv[optind - 1]) < 0)
+ exit(-1);
+
+ /* Set the command type and avoid further processing */
+ cmd_type = TASKSTATS_CMD_ATTR_PID;
+ forking = 1;
+ break;
case 'v':
printf("debug on\n");
dbg = 1;
@@ -370,6 +395,15 @@ int main(int argc, char *argv[])
goto err;
}
+ /*
+ * If we forked a child, wait for it to exit. Cannot use waitpid()
+ * as all the delicious data would be reaped as part of the wait
+ */
+ if (tid && forking) {
+ int sig_received;
+ sigwait(&sigset, &sig_received);
+ }
+
if (tid) {
rc = send_cmd(nl_sd, id, mypid, TASKSTATS_CMD_GET,
cmd_type, &tid, sizeof(__u32));
diff --git a/Documentation/arm/OMAP/DSS b/Documentation/arm/OMAP/DSS
index 0af0e9eed5d6..888ae7b83ae4 100644
--- a/Documentation/arm/OMAP/DSS
+++ b/Documentation/arm/OMAP/DSS
@@ -255,9 +255,10 @@ framebuffer parameters.
Kernel boot arguments
---------------------
-vram=<size>
- - Amount of total VRAM to preallocate. For example, "10M". omapfb
- allocates memory for framebuffers from VRAM.
+vram=<size>[,<physaddr>]
+ - Amount of total VRAM to preallocate and optionally a physical start
+ memory address. For example, "10M". omapfb allocates memory for
+ framebuffers from VRAM.
omapfb.mode=<display>:<mode>[,...]
- Default video mode for specified displays. For example,
diff --git a/Documentation/block/switching-sched.txt b/Documentation/block/switching-sched.txt
index d5af3f630814..71cfbdc0f74d 100644
--- a/Documentation/block/switching-sched.txt
+++ b/Documentation/block/switching-sched.txt
@@ -16,7 +16,7 @@ you can do so by typing:
As of the Linux 2.6.10 kernel, it is now possible to change the
IO scheduler for a given block device on the fly (thus making it possible,
for instance, to set the CFQ scheduler for the system default, but
-set a specific device to use the anticipatory or noop schedulers - which
+set a specific device to use the deadline or noop schedulers - which
can improve that device's throughput).
To set a specific scheduler, simply do this:
@@ -31,7 +31,7 @@ a "cat /sys/block/DEV/queue/scheduler" - the list of valid names
will be displayed, with the currently selected scheduler in brackets:
# cat /sys/block/hda/queue/scheduler
-noop anticipatory deadline [cfq]
-# echo anticipatory > /sys/block/hda/queue/scheduler
+noop deadline [cfq]
+# echo deadline > /sys/block/hda/queue/scheduler
# cat /sys/block/hda/queue/scheduler
-noop [anticipatory] deadline cfq
+noop [deadline] cfq
diff --git a/Documentation/cgroups/cgroups.txt b/Documentation/cgroups/cgroups.txt
index b34823ff1646..190018b0c649 100644
--- a/Documentation/cgroups/cgroups.txt
+++ b/Documentation/cgroups/cgroups.txt
@@ -18,7 +18,8 @@ CONTENTS:
1.2 Why are cgroups needed ?
1.3 How are cgroups implemented ?
1.4 What does notify_on_release do ?
- 1.5 How do I use cgroups ?
+ 1.5 What does clone_children do ?
+ 1.6 How do I use cgroups ?
2. Usage Examples and Syntax
2.1 Basic Usage
2.2 Attaching processes
@@ -293,7 +294,16 @@ notify_on_release in the root cgroup at system boot is disabled
value of their parents notify_on_release setting. The default value of
a cgroup hierarchy's release_agent path is empty.
-1.5 How do I use cgroups ?
+1.5 What does clone_children do ?
+---------------------------------
+
+If the clone_children flag is enabled (1) in a cgroup, then all
+cgroups created beneath will call the post_clone callbacks for each
+subsystem of the newly created cgroup. Usually when this callback is
+implemented for a subsystem, it copies the values of the parent
+subsystem, this is the case for the cpuset.
+
+1.6 How do I use cgroups ?
--------------------------
To start a new job that is to be contained within a cgroup, using
diff --git a/Documentation/coccinelle.txt b/Documentation/coccinelle.txt
index cd2b02837066..4a276ea7001c 100644
--- a/Documentation/coccinelle.txt
+++ b/Documentation/coccinelle.txt
@@ -24,6 +24,9 @@ of many distributions, e.g. :
You can get the latest version released from the Coccinelle homepage at
http://coccinelle.lip6.fr/
+Information and tips about Coccinelle are also provided on the wiki
+pages at http://cocci.ekstranet.diku.dk/wiki/doku.php
+
Once you have it, run the following command:
./configure
@@ -41,20 +44,22 @@ A Coccinelle-specific target is defined in the top level
Makefile. This target is named 'coccicheck' and calls the 'coccicheck'
front-end in the 'scripts' directory.
-Four modes are defined: report, patch, context, and org. The mode to
+Four modes are defined: patch, report, context, and org. The mode to
use is specified by setting the MODE variable with 'MODE=<mode>'.
+'patch' proposes a fix, when possible.
+
'report' generates a list in the following format:
file:line:column-column: message
-'patch' proposes a fix, when possible.
-
'context' highlights lines of interest and their context in a
diff-like style.Lines of interest are indicated with '-'.
'org' generates a report in the Org mode format of Emacs.
-Note that not all semantic patches implement all modes.
+Note that not all semantic patches implement all modes. For easy use
+of Coccinelle, the default mode is "chain" which tries the previous
+modes in the order above until one succeeds.
To make a report for every semantic patch, run the following command:
@@ -68,9 +73,9 @@ To produce patches, run:
The coccicheck target applies every semantic patch available in the
-subdirectories of 'scripts/coccinelle' to the entire Linux kernel.
+sub-directories of 'scripts/coccinelle' to the entire Linux kernel.
-For each semantic patch, a changelog message is proposed. It gives a
+For each semantic patch, a commit message is proposed. It gives a
description of the problem being checked by the semantic patch, and
includes a reference to Coccinelle.
@@ -93,12 +98,35 @@ or
make coccicheck COCCI=<my_SP.cocci> MODE=report
+ Using Coccinelle on (modified) files
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+To apply Coccinelle on a file basis, instead of a directory basis, the
+following command may be used:
+
+ make C=1 CHECK="scripts/coccicheck"
+
+To check only newly edited code, use the value 2 for the C flag, i.e.
+
+ make C=2 CHECK="scripts/coccicheck"
+
+This runs every semantic patch in scripts/coccinelle by default. The
+COCCI variable may additionally be used to only apply a single
+semantic patch as shown in the previous section.
+
+The "chain" mode is the default. You can select another one with the
+MODE variable explained above.
+
+In this mode, there is no information about semantic patches
+displayed, and no commit message proposed.
+
+
Proposing new semantic patches
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
New semantic patches can be proposed and submitted by kernel
developers. For sake of clarity, they should be organized in the
-subdirectories of 'scripts/coccinelle/'.
+sub-directories of 'scripts/coccinelle/'.
Detailed description of the 'report' mode
@@ -111,7 +139,7 @@ Example:
Running
- make coccicheck MODE=report COCCI=scripts/coccinelle/err_cast.cocci
+ make coccicheck MODE=report COCCI=scripts/coccinelle/api/err_cast.cocci
will execute the following part of the SmPL script.
@@ -149,7 +177,7 @@ identified.
Example:
Running
- make coccicheck MODE=patch COCCI=scripts/coccinelle/err_cast.cocci
+ make coccicheck MODE=patch COCCI=scripts/coccinelle/api/err_cast.cocci
will execute the following part of the SmPL script.
@@ -193,7 +221,7 @@ NOTE: The diff-like output generated is NOT an applicable patch. The
Example:
Running
- make coccicheck MODE=context COCCI=scripts/coccinelle/err_cast.cocci
+ make coccicheck MODE=context COCCI=scripts/coccinelle/api/err_cast.cocci
will execute the following part of the SmPL script.
@@ -228,7 +256,7 @@ diff -u -p /home/user/linux/crypto/ctr.c /tmp/nothing
Example:
Running
- make coccicheck MODE=org COCCI=scripts/coccinelle/err_cast.cocci
+ make coccicheck MODE=org COCCI=scripts/coccinelle/api/err_cast.cocci
will execute the following part of the SmPL script.
diff --git a/Documentation/development-process/2.Process b/Documentation/development-process/2.Process
index 97726eba6102..911a45186340 100644
--- a/Documentation/development-process/2.Process
+++ b/Documentation/development-process/2.Process
@@ -154,7 +154,7 @@ The stages that a patch goes through are, generally:
inclusion, it should be accepted by a relevant subsystem maintainer -
though this acceptance is not a guarantee that the patch will make it
all the way to the mainline. The patch will show up in the maintainer's
- subsystem tree and into the staging trees (described below). When the
+ subsystem tree and into the -next trees (described below). When the
process works, this step leads to more extensive review of the patch and
the discovery of any problems resulting from the integration of this
patch with work being done by others.
@@ -236,7 +236,7 @@ finding the right maintainer. Sending patches directly to Linus is not
normally the right way to go.
-2.4: STAGING TREES
+2.4: NEXT TREES
The chain of subsystem trees guides the flow of patches into the kernel,
but it also raises an interesting question: what if somebody wants to look
@@ -250,7 +250,7 @@ changes land in the mainline kernel. One could pull changes from all of
the interesting subsystem trees, but that would be a big and error-prone
job.
-The answer comes in the form of staging trees, where subsystem trees are
+The answer comes in the form of -next trees, where subsystem trees are
collected for testing and review. The older of these trees, maintained by
Andrew Morton, is called "-mm" (for memory management, which is how it got
started). The -mm tree integrates patches from a long list of subsystem
@@ -275,7 +275,7 @@ directory at:
Use of the MMOTM tree is likely to be a frustrating experience, though;
there is a definite chance that it will not even compile.
-The other staging tree, started more recently, is linux-next, maintained by
+The other -next tree, started more recently, is linux-next, maintained by
Stephen Rothwell. The linux-next tree is, by design, a snapshot of what
the mainline is expected to look like after the next merge window closes.
Linux-next trees are announced on the linux-kernel and linux-next mailing
@@ -303,12 +303,25 @@ volatility of linux-next tends to make it a difficult development target.
See http://lwn.net/Articles/289013/ for more information on this topic, and
stay tuned; much is still in flux where linux-next is involved.
-Besides the mmotm and linux-next trees, the kernel source tree now contains
-the drivers/staging/ directory and many sub-directories for drivers or
-filesystems that are on their way to being added to the kernel tree
-proper, but they remain in drivers/staging/ while they still need more
-work.
-
+2.4.1: STAGING TREES
+
+The kernel source tree now contains the drivers/staging/ directory, where
+many sub-directories for drivers or filesystems that are on their way to
+being added to the kernel tree live. They remain in drivers/staging while
+they still need more work; once complete, they can be moved into the
+kernel proper. This is a way to keep track of drivers that aren't
+up to Linux kernel coding or quality standards, but people may want to use
+them and track development.
+
+Greg Kroah-Hartman currently (as of 2.6.36) maintains the staging tree.
+Drivers that still need work are sent to him, with each driver having
+its own subdirectory in drivers/staging/. Along with the driver source
+files, a TODO file should be present in the directory as well. The TODO
+file lists the pending work that the driver needs for acceptance into
+the kernel proper, as well as a list of people that should be Cc'd for any
+patches to the driver. Staging drivers that don't currently build should
+have their config entries depend upon CONFIG_BROKEN. Once they can
+be successfully built without outside patches, CONFIG_BROKEN can be removed.
2.5: TOOLS
diff --git a/Documentation/devices.txt b/Documentation/devices.txt
index c58abf1ccc71..eccffe715229 100644
--- a/Documentation/devices.txt
+++ b/Documentation/devices.txt
@@ -1496,9 +1496,6 @@ Your cooperation is appreciated.
64 = /dev/radio0 Radio device
...
127 = /dev/radio63 Radio device
- 192 = /dev/vtx0 Teletext device
- ...
- 223 = /dev/vtx31 Teletext device
224 = /dev/vbi0 Vertical blank interrupt
...
255 = /dev/vbi31 Vertical blank interrupt
@@ -2520,6 +2517,12 @@ Your cooperation is appreciated.
8 = /dev/mmcblk1 Second SD/MMC card
...
+ The start of next SD/MMC card can be configured with
+ CONFIG_MMC_BLOCK_MINORS, or overridden at boot/modprobe
+ time using the mmcblk.perdev_minors option. That would
+ bump the offset between each card to be the configured
+ value instead of the default 8.
+
179 char CCube DVXChip-based PCI products
0 = /dev/dvxirq0 First DVX device
1 = /dev/dvxirq1 Second DVX device
diff --git a/Documentation/driver-model/interface.txt b/Documentation/driver-model/interface.txt
deleted file mode 100644
index c66912bfe866..000000000000
--- a/Documentation/driver-model/interface.txt
+++ /dev/null
@@ -1,129 +0,0 @@
-
-Device Interfaces
-
-Introduction
-~~~~~~~~~~~~
-
-Device interfaces are the logical interfaces of device classes that correlate
-directly to userspace interfaces, like device nodes.
-
-Each device class may have multiple interfaces through which you can
-access the same device. An input device may support the mouse interface,
-the 'evdev' interface, and the touchscreen interface. A SCSI disk would
-support the disk interface, the SCSI generic interface, and possibly a raw
-device interface.
-
-Device interfaces are registered with the class they belong to. As devices
-are added to the class, they are added to each interface registered with
-the class. The interface is responsible for determining whether the device
-supports the interface or not.
-
-
-Programming Interface
-~~~~~~~~~~~~~~~~~~~~~
-
-struct device_interface {
- char * name;
- rwlock_t lock;
- u32 devnum;
- struct device_class * devclass;
-
- struct list_head node;
- struct driver_dir_entry dir;
-
- int (*add_device)(struct device *);
- int (*add_device)(struct intf_data *);
-};
-
-int interface_register(struct device_interface *);
-void interface_unregister(struct device_interface *);
-
-
-An interface must specify the device class it belongs to. It is added
-to that class's list of interfaces on registration.
-
-
-Interfaces can be added to a device class at any time. Whenever it is
-added, each device in the class is passed to the interface's
-add_device callback. When an interface is removed, each device is
-removed from the interface.
-
-
-Devices
-~~~~~~~
-Once a device is added to a device class, it is added to each
-interface that is registered with the device class. The class
-is expected to place a class-specific data structure in
-struct device::class_data. The interface can use that (along with
-other fields of struct device) to determine whether or not the driver
-and/or device support that particular interface.
-
-
-Data
-~~~~
-
-struct intf_data {
- struct list_head node;
- struct device_interface * intf;
- struct device * dev;
- u32 intf_num;
-};
-
-int interface_add_data(struct interface_data *);
-
-The interface is responsible for allocating and initializing a struct
-intf_data and calling interface_add_data() to add it to the device's list
-of interfaces it belongs to. This list will be iterated over when the device
-is removed from the class (instead of all possible interfaces for a class).
-This structure should probably be embedded in whatever per-device data
-structure the interface is allocating anyway.
-
-Devices are enumerated within the interface. This happens in interface_add_data()
-and the enumerated value is stored in the struct intf_data for that device.
-
-sysfs
-~~~~~
-Each interface is given a directory in the directory of the device
-class it belongs to:
-
-Interfaces get a directory in the class's directory as well:
-
- class/
- `-- input
- |-- devices
- |-- drivers
- |-- mouse
- `-- evdev
-
-When a device is added to the interface, a symlink is created that points
-to the device's directory in the physical hierarchy:
-
- class/
- `-- input
- |-- devices
- | `-- 1 -> ../../../root/pci0/00:1f.0/usb_bus/00:1f.2-1:0/
- |-- drivers
- | `-- usb:usb_mouse -> ../../../bus/drivers/usb_mouse/
- |-- mouse
- | `-- 1 -> ../../../root/pci0/00:1f.0/usb_bus/00:1f.2-1:0/
- `-- evdev
- `-- 1 -> ../../../root/pci0/00:1f.0/usb_bus/00:1f.2-1:0/
-
-
-Future Plans
-~~~~~~~~~~~~
-A device interface is correlated directly with a userspace interface
-for a device, specifically a device node. For instance, a SCSI disk
-exposes at least two interfaces to userspace: the standard SCSI disk
-interface and the SCSI generic interface. It might also export a raw
-device interface.
-
-Many interfaces have a major number associated with them and each
-device gets a minor number. Or, multiple interfaces might share one
-major number, and each will receive a range of minor numbers (like in
-the case of input devices).
-
-These major and minor numbers could be stored in the interface
-structure. Major and minor allocations could happen when the interface
-is registered with the class, or via a helper function.
-
diff --git a/Documentation/dvb/get_dvb_firmware b/Documentation/dvb/get_dvb_firmware
index 350959f4e41b..59690de8ebfe 100644
--- a/Documentation/dvb/get_dvb_firmware
+++ b/Documentation/dvb/get_dvb_firmware
@@ -26,7 +26,8 @@ use IO::Handle;
"dec3000s", "vp7041", "dibusb", "nxt2002", "nxt2004",
"or51211", "or51132_qam", "or51132_vsb", "bluebird",
"opera1", "cx231xx", "cx18", "cx23885", "pvrusb2", "mpc718",
- "af9015", "ngene", "az6027");
+ "af9015", "ngene", "az6027", "lme2510_lg", "lme2510c_s7395",
+ "lme2510c_s7395_old");
# Check args
syntax() if (scalar(@ARGV) != 1);
@@ -584,6 +585,49 @@ sub az6027{
$firmware;
}
+
+sub lme2510_lg {
+ my $sourcefile = "LMEBDA_DVBS.sys";
+ my $hash = "fc6017ad01e79890a97ec53bea157ed2";
+ my $outfile = "dvb-usb-lme2510-lg.fw";
+ my $hasho = "caa065d5fdbd2c09ad57b399bbf55cad";
+
+ checkstandard();
+
+ verify($sourcefile, $hash);
+ extract($sourcefile, 4168, 3841, $outfile);
+ verify($outfile, $hasho);
+ $outfile;
+}
+
+sub lme2510c_s7395 {
+ my $sourcefile = "US2A0D.sys";
+ my $hash = "b0155a8083fb822a3bd47bc360e74601";
+ my $outfile = "dvb-usb-lme2510c-s7395.fw";
+ my $hasho = "3a3cf1aeebd17b6ddc04cebe131e94cf";
+
+ checkstandard();
+
+ verify($sourcefile, $hash);
+ extract($sourcefile, 37248, 3720, $outfile);
+ verify($outfile, $hasho);
+ $outfile;
+}
+
+sub lme2510c_s7395_old {
+ my $sourcefile = "LMEBDA_DVBS7395C.sys";
+ my $hash = "7572ae0eb9cdf91baabd7c0ba9e09b31";
+ my $outfile = "dvb-usb-lme2510c-s7395.fw";
+ my $hasho = "90430c5b435eb5c6f88fd44a9d950674";
+
+ checkstandard();
+
+ verify($sourcefile, $hash);
+ extract($sourcefile, 4208, 3881, $outfile);
+ verify($outfile, $hasho);
+ $outfile;
+}
+
# ---------------------------------------------------------------
# Utilities
diff --git a/Documentation/dvb/lmedm04.txt b/Documentation/dvb/lmedm04.txt
new file mode 100644
index 000000000000..e175784b89bf
--- /dev/null
+++ b/Documentation/dvb/lmedm04.txt
@@ -0,0 +1,58 @@
+To extract firmware for the DM04/QQBOX you need to copy the
+following file(s) to this directory.
+
+for DM04+/QQBOX LME2510C (Sharp 7395 Tuner)
+-------------------------------------------
+
+The Sharp 7395 driver can be found in windows/system32/driver
+
+US2A0D.sys (dated 17 Mar 2009)
+
+
+and run
+./get_dvb_firmware lme2510c_s7395
+
+ will produce
+ dvb-usb-lme2510c-s7395.fw
+
+An alternative but older firmware can be found on the driver
+disk DVB-S_EN_3.5A in BDADriver/driver
+
+LMEBDA_DVBS7395C.sys (dated 18 Jan 2008)
+
+and run
+./get_dvb_firmware lme2510c_s7395_old
+
+ will produce
+ dvb-usb-lme2510c-s7395.fw
+
+--------------------------------------------------------------------
+
+The LG firmware can be found on the driver
+disk DM04+_5.1A[LG] in BDADriver/driver
+
+for DM04 LME2510 (LG Tuner)
+---------------------------
+
+LMEBDA_DVBS.sys (dated 13 Nov 2007)
+
+and run
+./get_dvb_firmware lme2510_lg
+
+ will produce
+ dvb-usb-lme2510-lg.fw
+
+
+Other LG firmware can be extracted manually from US280D.sys
+only found in windows/system32/driver.
+
+dd if=US280D.sys ibs=1 skip=42616 count=3668 of=dvb-usb-lme2510-lg.fw
+
+for DM04 LME2510C (LG Tuner)
+---------------------------
+
+dd if=US280D.sys ibs=1 skip=35200 count=3850 of=dvb-usb-lme2510c-lg.fw
+
+---------------------------------------------------------------------
+
+Copy the firmware file(s) to /lib/firmware
diff --git a/Documentation/edac.txt b/Documentation/edac.txt
index 0b875e8da969..9ee774de57cd 100644
--- a/Documentation/edac.txt
+++ b/Documentation/edac.txt
@@ -196,7 +196,7 @@ csrow3.
The representation of the above is reflected in the directory tree
in EDAC's sysfs interface. Starting in directory
/sys/devices/system/edac/mc each memory controller will be represented
-by its own 'mcX' directory, where 'X" is the index of the MC.
+by its own 'mcX' directory, where 'X' is the index of the MC.
..../edac/mc/
@@ -207,7 +207,7 @@ by its own 'mcX' directory, where 'X" is the index of the MC.
....
Under each 'mcX' directory each 'csrowX' is again represented by a
-'csrowX', where 'X" is the csrow index:
+'csrowX', where 'X' is the csrow index:
.../mc/mc0/
@@ -232,7 +232,7 @@ EDAC control and attribute files.
In 'mcX' directories are EDAC control and attribute files for
-this 'X" instance of the memory controllers:
+this 'X' instance of the memory controllers:
Counter reset control file:
@@ -343,7 +343,7 @@ Sdram memory scrubbing rate:
'csrowX' DIRECTORIES
In the 'csrowX' directories are EDAC control and attribute files for
-this 'X" instance of csrow:
+this 'X' instance of csrow:
Total Uncorrectable Errors count attribute file:
diff --git a/Documentation/fb/00-INDEX b/Documentation/fb/00-INDEX
index a618fd99c9f0..30a70542e823 100644
--- a/Documentation/fb/00-INDEX
+++ b/Documentation/fb/00-INDEX
@@ -4,33 +4,41 @@ please mail me.
Geert Uytterhoeven <geert@linux-m68k.org>
00-INDEX
- - this file
+ - this file.
arkfb.txt
- info on the fbdev driver for ARK Logic chips.
aty128fb.txt
- info on the ATI Rage128 frame buffer driver.
cirrusfb.txt
- info on the driver for Cirrus Logic chipsets.
+cmap_xfbdev.txt
+ - an introduction to fbdev's cmap structures.
deferred_io.txt
- an introduction to deferred IO.
+efifb.txt
+ - info on the EFI platform driver for Intel based Apple computers.
+ep93xx-fb.txt
+ - info on the driver for EP93xx LCD controller.
fbcon.txt
- intro to and usage guide for the framebuffer console (fbcon).
framebuffer.txt
- introduction to frame buffer devices.
-imacfb.txt
- - info on the generic EFI platform driver for Intel based Macs.
+gxfb.txt
+ - info on the framebuffer driver for AMD Geode GX2 based processors.
intel810.txt
- documentation for the Intel 810/815 framebuffer driver.
intelfb.txt
- docs for Intel 830M/845G/852GM/855GM/865G/915G/945G fb driver.
internals.txt
- quick overview of frame buffer device internals.
+lxfb.txt
+ - info on the framebuffer driver for AMD Geode LX based processors.
matroxfb.txt
- info on the Matrox framebuffer driver for Alpha, Intel and PPC.
+metronomefb.txt
+ - info on the driver for the Metronome display controller.
modedb.txt
- info on the video mode database.
-matroxfb.txt
- - info on the Matrox frame buffer driver.
pvr2fb.txt
- info on the PowerVR 2 frame buffer driver.
pxafb.txt
@@ -39,13 +47,23 @@ s3fb.txt
- info on the fbdev driver for S3 Trio/Virge chips.
sa1100fb.txt
- information about the driver for the SA-1100 LCD controller.
+sh7760fb.txt
+ - info on the SH7760/SH7763 integrated LCDC Framebuffer driver.
sisfb.txt
- info on the framebuffer device driver for various SiS chips.
sstfb.txt
- info on the frame buffer driver for 3dfx' Voodoo Graphics boards.
tgafb.txt
- - info on the TGA (DECChip 21030) frame buffer driver
+ - info on the TGA (DECChip 21030) frame buffer driver.
+tridentfb.txt
+ info on the framebuffer driver for some Trident chip based cards.
+uvesafb.txt
+ - info on the userspace VESA (VBE2+ compliant) frame buffer device.
vesafb.txt
- - info on the VESA frame buffer device
+ - info on the VESA frame buffer device.
+viafb.modes
+ - list of modes for VIA Integration Graphic Chip.
+viafb.txt
+ - info on the VIA Integration Graphic Chip console framebuffer driver.
vt8623fb.txt
- info on the fb driver for the graphics core in VIA VT8623 chipsets.
diff --git a/Documentation/fb/viafb.txt b/Documentation/fb/viafb.txt
index f3e046a6a987..1a2e8aa3fbb1 100644
--- a/Documentation/fb/viafb.txt
+++ b/Documentation/fb/viafb.txt
@@ -197,6 +197,54 @@ Notes:
example,
# fbset -depth 16
+
+[Configure viafb via /proc]
+---------------------------
+ The following files exist in /proc/viafb
+
+ supported_output_devices
+
+ This read-only file contains a full ',' seperated list containing all
+ output devices that could be available on your platform. It is likely
+ that not all of those have a connector on your hardware but it should
+ provide a good starting point to figure out which of those names match
+ a real connector.
+ Example:
+ # cat /proc/viafb/supported_output_devices
+
+ iga1/output_devices
+ iga2/output_devices
+
+ These two files are readable and writable. iga1 and iga2 are the two
+ independent units that produce the screen image. Those images can be
+ forwarded to one or more output devices. Reading those files is a way
+ to query which output devices are currently used by an iga.
+ Example:
+ # cat /proc/viafb/iga1/output_devices
+ If there are no output devices printed the output of this iga is lost.
+ This can happen for example if only one (the other) iga is used.
+ Writing to these files allows adjusting the output devices during
+ runtime. One can add new devices, remove existing ones or switch
+ between igas. Essentially you can write a ',' seperated list of device
+ names (or a single one) in the same format as the output to those
+ files. You can add a '+' or '-' as a prefix allowing simple addition
+ and removal of devices. So a prefix '+' adds the devices from your list
+ to the already existing ones, '-' removes the listed devices from the
+ existing ones and if no prefix is given it replaces all existing ones
+ with the listed ones. If you remove devices they are expected to turn
+ off. If you add devices that are already part of the other iga they are
+ removed there and added to the new one.
+ Examples:
+ Add CRT as output device to iga1
+ # echo +CRT > /proc/viafb/iga1/output_devices
+
+ Remove (turn off) DVP1 and LVDS1 as output devices of iga2
+ # echo -DVP1,LVDS1 > /proc/viafb/iga2/output_devices
+
+ Replace all iga1 output devices by CRT
+ # echo CRT > /proc/viafb/iga1/output_devices
+
+
[Bootup with viafb]:
--------------------
Add the following line to your grub.conf:
diff --git a/Documentation/feature-removal-schedule.txt b/Documentation/feature-removal-schedule.txt
index 9961f1564d22..6c2f55e05f13 100644
--- a/Documentation/feature-removal-schedule.txt
+++ b/Documentation/feature-removal-schedule.txt
@@ -98,7 +98,7 @@ Who: Pavel Machek <pavel@ucw.cz>
---------------------------
What: Video4Linux API 1 ioctls and from Video devices.
-When: July 2009
+When: kernel 2.6.38
Files: include/linux/videodev.h
Check: include/linux/videodev.h
Why: V4L1 AP1 was replaced by V4L2 API during migration from 2.4 to 2.6
@@ -116,6 +116,21 @@ Who: Mauro Carvalho Chehab <mchehab@infradead.org>
---------------------------
+What: Video4Linux obsolete drivers using V4L1 API
+When: kernel 2.6.38
+Files: drivers/staging/cpia/* drivers/staging/stradis/*
+Check: drivers/staging/cpia/cpia.c drivers/staging/stradis/stradis.c
+Why: There are some drivers still using V4L1 API, despite all efforts we've done
+ to migrate. Those drivers are for obsolete hardware that the old maintainer
+ didn't care (or not have the hardware anymore), and that no other developer
+ could find any hardware to buy. They probably have no practical usage today,
+ and people with such old hardware could probably keep using an older version
+ of the kernel. Those drivers will be moved to staging on 2.6.37 and, if nobody
+ care enough to port and test them with V4L2 API, they'll be removed on 2.6.38.
+Who: Mauro Carvalho Chehab <mchehab@infradead.org>
+
+---------------------------
+
What: sys_sysctl
When: September 2010
Option: CONFIG_SYSCTL_SYSCALL
@@ -470,29 +485,6 @@ When: April 2011
Why: Superseded by xt_CT
Who: Netfilter developer team <netfilter-devel@vger.kernel.org>
----------------------------
-
-What: video4linux /dev/vtx teletext API support
-When: 2.6.35
-Files: drivers/media/video/saa5246a.c drivers/media/video/saa5249.c
- include/linux/videotext.h
-Why: The vtx device nodes have been superseded by vbi device nodes
- for many years. No applications exist that use the vtx support.
- Of the two i2c drivers that actually support this API the saa5249
- has been impossible to use for a year now and no known hardware
- that supports this device exists. The saa5246a is theoretically
- supported by the old mxb boards, but it never actually worked.
-
- In summary: there is no hardware that can use this API and there
- are no applications actually implementing this API.
-
- The vtx support still reserves minors 192-223 and we would really
- like to reuse those for upcoming new functionality. In the unlikely
- event that new hardware appears that wants to use the functionality
- provided by the vtx API, then that functionality should be build
- around the sliced VBI API instead.
-Who: Hans Verkuil <hverkuil@xs4all.nl>
-
----------------------------
What: IRQF_DISABLED
@@ -502,16 +494,6 @@ Who: Thomas Gleixner <tglx@linutronix.de>
----------------------------
-What: old ieee1394 subsystem (CONFIG_IEEE1394)
-When: 2.6.37
-Files: drivers/ieee1394/ except init_ohci1394_dma.c
-Why: superseded by drivers/firewire/ (CONFIG_FIREWIRE) which offers more
- features, better performance, and better security, all with smaller
- and more modern code base
-Who: Stefan Richter <stefanr@s5r6.in-berlin.de>
-
-----------------------------
-
What: The acpi_sleep=s4_nonvs command line option
When: 2.6.37
Files: arch/x86/kernel/acpi/sleep.c
@@ -536,6 +518,23 @@ Who: FUJITA Tomonori <fujita.tomonori@lab.ntt.co.jp>
----------------------------
+What: namespace cgroup (ns_cgroup)
+When: 2.6.38
+Why: The ns_cgroup leads to some problems:
+ * cgroup creation is out-of-control
+ * cgroup name can conflict when pids are looping
+ * it is not possible to have a single process handling
+ a lot of namespaces without falling in a exponential creation time
+ * we may want to create a namespace without creating a cgroup
+
+ The ns_cgroup is replaced by a compatibility flag 'clone_children',
+ where a newly created cgroup will copy the parent cgroup values.
+ The userspace has to manually create a cgroup and add a task to
+ the 'tasks' file.
+Who: Daniel Lezcano <daniel.lezcano@free.fr>
+
+----------------------------
+
What: iwlwifi disable_hw_scan module parameters
When: 2.6.40
Why: Hareware scan is the prefer method for iwlwifi devices for
@@ -545,3 +544,23 @@ Why: Hareware scan is the prefer method for iwlwifi devices for
Who: Wey-Yi Guy <wey-yi.w.guy@intel.com>
----------------------------
+
+What: access to nfsd auth cache through sys_nfsservctl or '.' files
+ in the 'nfsd' filesystem.
+When: 2.6.40
+Why: This is a legacy interface which have been replaced by a more
+ dynamic cache. Continuing to maintain this interface is an
+ unnecessary burden.
+Who: NeilBrown <neilb@suse.de>
+
+----------------------------
+
+What: i2c_adapter.id
+When: June 2011
+Why: This field is deprecated. I2C device drivers shouldn't change their
+ behavior based on the underlying I2C adapter. Instead, the I2C
+ adapter driver should instantiate the I2C devices and provide the
+ needed platform-specific information.
+Who: Jean Delvare <khali@linux-fr.org>
+
+----------------------------
diff --git a/Documentation/filesystems/00-INDEX b/Documentation/filesystems/00-INDEX
index 4303614b5add..8c624a18f67d 100644
--- a/Documentation/filesystems/00-INDEX
+++ b/Documentation/filesystems/00-INDEX
@@ -96,8 +96,6 @@ seq_file.txt
- how to use the seq_file API
sharedsubtree.txt
- a description of shared subtrees for namespaces.
-smbfs.txt
- - info on using filesystems with the SMB protocol (Win 3.11 and NT).
spufs.txt
- info and mount options for the SPU filesystem used on Cell.
sysfs-pci.txt
diff --git a/Documentation/filesystems/9p.txt b/Documentation/filesystems/9p.txt
index f9765e8cf086..b22abba78fed 100644
--- a/Documentation/filesystems/9p.txt
+++ b/Documentation/filesystems/9p.txt
@@ -111,7 +111,7 @@ OPTIONS
This can be used to share devices/named pipes/sockets between
hosts. This functionality will be expanded in later versions.
- access there are three access modes.
+ access there are four access modes.
user = if a user tries to access a file on v9fs
filesystem for the first time, v9fs sends an
attach command (Tattach) for that user.
@@ -120,6 +120,8 @@ OPTIONS
the files on the mounted filesystem
any = v9fs does single attach and performs all
operations as one user
+ client = ACL based access check on the 9p client
+ side for access validation
cachetag cache tag to use the specified persistent cache.
cache tags for existing cache sessions can be listed at
diff --git a/Documentation/filesystems/Locking b/Documentation/filesystems/Locking
index 2db4283efa8d..a91f30890011 100644
--- a/Documentation/filesystems/Locking
+++ b/Documentation/filesystems/Locking
@@ -322,7 +322,6 @@ fl_release_private: yes yes
prototypes:
int (*fl_compare_owner)(struct file_lock *, struct file_lock *);
void (*fl_notify)(struct file_lock *); /* unblock callback */
- void (*fl_copy_lock)(struct file_lock *, struct file_lock *);
void (*fl_release_private)(struct file_lock *);
void (*fl_break)(struct file_lock *); /* break_lease callback */
@@ -330,7 +329,6 @@ locking rules:
BKL may block
fl_compare_owner: yes no
fl_notify: yes no
-fl_copy_lock: yes no
fl_release_private: yes yes
fl_break: yes no
@@ -349,21 +347,36 @@ call this method upon the IO completion.
--------------------------- block_device_operations -----------------------
prototypes:
- int (*open) (struct inode *, struct file *);
- int (*release) (struct inode *, struct file *);
- int (*ioctl) (struct inode *, struct file *, unsigned, unsigned long);
+ int (*open) (struct block_device *, fmode_t);
+ int (*release) (struct gendisk *, fmode_t);
+ int (*ioctl) (struct block_device *, fmode_t, unsigned, unsigned long);
+ int (*compat_ioctl) (struct block_device *, fmode_t, unsigned, unsigned long);
+ int (*direct_access) (struct block_device *, sector_t, void **, unsigned long *);
int (*media_changed) (struct gendisk *);
+ void (*unlock_native_capacity) (struct gendisk *);
int (*revalidate_disk) (struct gendisk *);
+ int (*getgeo)(struct block_device *, struct hd_geometry *);
+ void (*swap_slot_free_notify) (struct block_device *, unsigned long);
locking rules:
- BKL bd_sem
-open: yes yes
-release: yes yes
-ioctl: yes no
+ BKL bd_mutex
+open: no yes
+release: no yes
+ioctl: no no
+compat_ioctl: no no
+direct_access: no no
media_changed: no no
+unlock_native_capacity: no no
revalidate_disk: no no
+getgeo: no no
+swap_slot_free_notify: no no (see below)
+
+media_changed, unlock_native_capacity and revalidate_disk are called only from
+check_disk_change().
+
+swap_slot_free_notify is called with swap_lock and sometimes the page lock
+held.
-The last two are called only from check_disk_change().
--------------------------- file_operations -------------------------------
prototypes:
diff --git a/Documentation/filesystems/configfs/configfs_example_explicit.c b/Documentation/filesystems/configfs/configfs_example_explicit.c
index d428cc9f07f3..fd53869f5633 100644
--- a/Documentation/filesystems/configfs/configfs_example_explicit.c
+++ b/Documentation/filesystems/configfs/configfs_example_explicit.c
@@ -89,7 +89,7 @@ static ssize_t childless_storeme_write(struct childless *childless,
char *p = (char *) page;
tmp = simple_strtoul(p, &p, 10);
- if (!p || (*p && (*p != '\n')))
+ if ((*p != '\0') && (*p != '\n'))
return -EINVAL;
if (tmp > INT_MAX)
diff --git a/Documentation/filesystems/ext4.txt b/Documentation/filesystems/ext4.txt
index e1def1786e50..6ab9442d7eeb 100644
--- a/Documentation/filesystems/ext4.txt
+++ b/Documentation/filesystems/ext4.txt
@@ -353,6 +353,20 @@ noauto_da_alloc replacing existing files via patterns such as
system crashes before the delayed allocation
blocks are forced to disk.
+noinit_itable Do not initialize any uninitialized inode table
+ blocks in the background. This feature may be
+ used by installation CD's so that the install
+ process can complete as quickly as possible; the
+ inode table initialization process would then be
+ deferred until the next time the file system
+ is unmounted.
+
+init_itable=n The lazy itable init code will wait n times the
+ number of milliseconds it took to zero out the
+ previous block group's inode table. This
+ minimizes the impact on the systme performance
+ while file system's inode table is being initialized.
+
discard Controls whether ext4 should issue discard/TRIM
nodiscard(*) commands to the underlying block device when
blocks are freed. This is useful for SSD devices
diff --git a/Documentation/filesystems/nfs/00-INDEX b/Documentation/filesystems/nfs/00-INDEX
index 2f68cd688769..a57e12411d2a 100644
--- a/Documentation/filesystems/nfs/00-INDEX
+++ b/Documentation/filesystems/nfs/00-INDEX
@@ -12,5 +12,9 @@ nfs-rdma.txt
- how to install and setup the Linux NFS/RDMA client and server software
nfsroot.txt
- short guide on setting up a diskless box with NFS root filesystem.
+pnfs.txt
+ - short explanation of some of the internals of the pnfs client code
rpc-cache.txt
- introduction to the caching mechanisms in the sunrpc layer.
+idmapper.txt
+ - information for configuring request-keys to be used by idmapper
diff --git a/Documentation/filesystems/nfs/idmapper.txt b/Documentation/filesystems/nfs/idmapper.txt
new file mode 100644
index 000000000000..b9b4192ea8b5
--- /dev/null
+++ b/Documentation/filesystems/nfs/idmapper.txt
@@ -0,0 +1,67 @@
+
+=========
+ID Mapper
+=========
+Id mapper is used by NFS to translate user and group ids into names, and to
+translate user and group names into ids. Part of this translation involves
+performing an upcall to userspace to request the information. Id mapper will
+user request-key to perform this upcall and cache the result. The program
+/usr/sbin/nfs.idmap should be called by request-key, and will perform the
+translation and initialize a key with the resulting information.
+
+ NFS_USE_NEW_IDMAPPER must be selected when configuring the kernel to use this
+ feature.
+
+===========
+Configuring
+===========
+The file /etc/request-key.conf will need to be modified so /sbin/request-key can
+direct the upcall. The following line should be added:
+
+#OP TYPE DESCRIPTION CALLOUT INFO PROGRAM ARG1 ARG2 ARG3 ...
+#====== ======= =============== =============== ===============================
+create id_resolver * * /usr/sbin/nfs.idmap %k %d 600
+
+This will direct all id_resolver requests to the program /usr/sbin/nfs.idmap.
+The last parameter, 600, defines how many seconds into the future the key will
+expire. This parameter is optional for /usr/sbin/nfs.idmap. When the timeout
+is not specified, nfs.idmap will default to 600 seconds.
+
+id mapper uses for key descriptions:
+ uid: Find the UID for the given user
+ gid: Find the GID for the given group
+ user: Find the user name for the given UID
+ group: Find the group name for the given GID
+
+You can handle any of these individually, rather than using the generic upcall
+program. If you would like to use your own program for a uid lookup then you
+would edit your request-key.conf so it look similar to this:
+
+#OP TYPE DESCRIPTION CALLOUT INFO PROGRAM ARG1 ARG2 ARG3 ...
+#====== ======= =============== =============== ===============================
+create id_resolver uid:* * /some/other/program %k %d 600
+create id_resolver * * /usr/sbin/nfs.idmap %k %d 600
+
+Notice that the new line was added above the line for the generic program.
+request-key will find the first matching line and corresponding program. In
+this case, /some/other/program will handle all uid lookups and
+/usr/sbin/nfs.idmap will handle gid, user, and group lookups.
+
+See <file:Documentation/keys-request-keys.txt> for more information about the
+request-key function.
+
+
+=========
+nfs.idmap
+=========
+nfs.idmap is designed to be called by request-key, and should not be run "by
+hand". This program takes two arguments, a serialized key and a key
+description. The serialized key is first converted into a key_serial_t, and
+then passed as an argument to keyctl_instantiate (both are part of keyutils.h).
+
+The actual lookups are performed by functions found in nfsidmap.h. nfs.idmap
+determines the correct function to call by looking at the first part of the
+description string. For example, a uid lookup description will appear as
+"uid:user@domain".
+
+nfs.idmap will return 0 if the key was instantiated, and non-zero otherwise.
diff --git a/Documentation/filesystems/nfs/nfsroot.txt b/Documentation/filesystems/nfs/nfsroot.txt
index f2430a7974e1..90c71c6f0d00 100644
--- a/Documentation/filesystems/nfs/nfsroot.txt
+++ b/Documentation/filesystems/nfs/nfsroot.txt
@@ -159,6 +159,28 @@ ip=<client-ip>:<server-ip>:<gw-ip>:<netmask>:<hostname>:<device>:<autoconf>
Default: any
+nfsrootdebug
+
+ This parameter enables debugging messages to appear in the kernel
+ log at boot time so that administrators can verify that the correct
+ NFS mount options, server address, and root path are passed to the
+ NFS client.
+
+
+rdinit=<executable file>
+
+ To specify which file contains the program that starts system
+ initialization, administrators can use this command line parameter.
+ The default value of this parameter is "/init". If the specified
+ file exists and the kernel can execute it, root filesystem related
+ kernel command line parameters, including `nfsroot=', are ignored.
+
+ A description of the process of mounting the root file system can be
+ found in:
+
+ Documentation/early-userspace/README
+
+
3.) Boot Loader
diff --git a/Documentation/filesystems/nfs/pnfs.txt b/Documentation/filesystems/nfs/pnfs.txt
new file mode 100644
index 000000000000..bc0b9cfe095b
--- /dev/null
+++ b/Documentation/filesystems/nfs/pnfs.txt
@@ -0,0 +1,48 @@
+Reference counting in pnfs:
+==========================
+
+The are several inter-related caches. We have layouts which can
+reference multiple devices, each of which can reference multiple data servers.
+Each data server can be referenced by multiple devices. Each device
+can be referenced by multiple layouts. To keep all of this straight,
+we need to reference count.
+
+
+struct pnfs_layout_hdr
+----------------------
+The on-the-wire command LAYOUTGET corresponds to struct
+pnfs_layout_segment, usually referred to by the variable name lseg.
+Each nfs_inode may hold a pointer to a cache of of these layout
+segments in nfsi->layout, of type struct pnfs_layout_hdr.
+
+We reference the header for the inode pointing to it, across each
+outstanding RPC call that references it (LAYOUTGET, LAYOUTRETURN,
+LAYOUTCOMMIT), and for each lseg held within.
+
+Each header is also (when non-empty) put on a list associated with
+struct nfs_client (cl_layouts). Being put on this list does not bump
+the reference count, as the layout is kept around by the lseg that
+keeps it in the list.
+
+deviceid_cache
+--------------
+lsegs reference device ids, which are resolved per nfs_client and
+layout driver type. The device ids are held in a RCU cache (struct
+nfs4_deviceid_cache). The cache itself is referenced across each
+mount. The entries (struct nfs4_deviceid) themselves are held across
+the lifetime of each lseg referencing them.
+
+RCU is used because the deviceid is basically a write once, read many
+data structure. The hlist size of 32 buckets needs better
+justification, but seems reasonable given that we can have multiple
+deviceid's per filesystem, and multiple filesystems per nfs_client.
+
+The hash code is copied from the nfsd code base. A discussion of
+hashing and variations of this algorithm can be found at:
+http://groups.google.com/group/comp.lang.c/browse_thread/thread/9522965e2b8d3809
+
+data server cache
+-----------------
+file driver devices refer to data servers, which are kept in a module
+level cache. Its reference is held over the lifetime of the deviceid
+pointing to it.
diff --git a/Documentation/filesystems/proc.txt b/Documentation/filesystems/proc.txt
index a6aca8740883..e73df2722ff3 100644
--- a/Documentation/filesystems/proc.txt
+++ b/Documentation/filesystems/proc.txt
@@ -136,6 +136,7 @@ Table 1-1: Process specific entries in /proc
statm Process memory status information
status Process status in human readable form
wchan If CONFIG_KALLSYMS is set, a pre-decoded wchan
+ pagemap Page table
stack Report full stack trace, enable via CONFIG_STACKTRACE
smaps a extension based on maps, showing the memory consumption of
each mapping
@@ -370,17 +371,24 @@ Shared_Dirty: 0 kB
Private_Clean: 0 kB
Private_Dirty: 0 kB
Referenced: 892 kB
+Anonymous: 0 kB
Swap: 0 kB
KernelPageSize: 4 kB
MMUPageSize: 4 kB
-The first of these lines shows the same information as is displayed for the
-mapping in /proc/PID/maps. The remaining lines show the size of the mapping,
-the amount of the mapping that is currently resident in RAM, the "proportional
-set size” (divide each shared page by the number of processes sharing it), the
-number of clean and dirty shared pages in the mapping, and the number of clean
-and dirty private pages in the mapping. The "Referenced" indicates the amount
-of memory currently marked as referenced or accessed.
+The first of these lines shows the same information as is displayed for the
+mapping in /proc/PID/maps. The remaining lines show the size of the mapping
+(size), the amount of the mapping that is currently resident in RAM (RSS), the
+process' proportional share of this mapping (PSS), the number of clean and
+dirty private pages in the mapping. Note that even a page which is part of a
+MAP_SHARED mapping, but has only a single pte mapped, i.e. is currently used
+by only one process, is accounted as private and not as shared. "Referenced"
+indicates the amount of memory currently marked as referenced or accessed.
+"Anonymous" shows the amount of memory that does not belong to any file. Even
+a mapping associated with a file may contain anonymous pages: when MAP_PRIVATE
+and a page is modified, the file page is replaced by a private anonymous copy.
+"Swap" shows how much would-be-anonymous memory is also used, but out on
+swap.
This file is only present if the CONFIG_MMU kernel configuration option is
enabled.
@@ -397,6 +405,9 @@ To clear the bits for the file mapped pages associated with the process
> echo 3 > /proc/PID/clear_refs
Any other value written to /proc/PID/clear_refs will have no effect.
+The /proc/pid/pagemap gives the PFN, which can be used to find the pageflags
+using /proc/kpageflags and number of times a page is mapped using
+/proc/kpagecount. For detailed explanation, see Documentation/vm/pagemap.txt.
1.2 Kernel data
---------------
diff --git a/Documentation/filesystems/sharedsubtree.txt b/Documentation/filesystems/sharedsubtree.txt
index fc0e39af43c3..4ede421c9687 100644
--- a/Documentation/filesystems/sharedsubtree.txt
+++ b/Documentation/filesystems/sharedsubtree.txt
@@ -62,10 +62,10 @@ replicas continue to be exactly same.
# mount /dev/sd0 /tmp/a
#ls /tmp/a
- t1 t2 t2
+ t1 t2 t3
#ls /mnt/a
- t1 t2 t2
+ t1 t2 t3
Note that the mount has propagated to the mount at /mnt as well.
diff --git a/Documentation/filesystems/vfs.txt b/Documentation/filesystems/vfs.txt
index ed7e5efc06d8..55c28b79d8dc 100644
--- a/Documentation/filesystems/vfs.txt
+++ b/Documentation/filesystems/vfs.txt
@@ -660,11 +660,10 @@ struct address_space_operations {
releasepage: releasepage is called on PagePrivate pages to indicate
that the page should be freed if possible. ->releasepage
should remove any private data from the page and clear the
- PagePrivate flag. It may also remove the page from the
- address_space. If this fails for some reason, it may indicate
- failure with a 0 return value.
- This is used in two distinct though related cases. The first
- is when the VM finds a clean page with no active users and
+ PagePrivate flag. If releasepage() fails for some reason, it must
+ indicate failure with a 0 return value.
+ releasepage() is used in two distinct though related cases. The
+ first is when the VM finds a clean page with no active users and
wants to make it a free page. If ->releasepage succeeds, the
page will be removed from the address_space and become free.
diff --git a/Documentation/filesystems/xfs-delayed-logging-design.txt b/Documentation/filesystems/xfs-delayed-logging-design.txt
index 96d0df28bed3..7445bf335dae 100644
--- a/Documentation/filesystems/xfs-delayed-logging-design.txt
+++ b/Documentation/filesystems/xfs-delayed-logging-design.txt
@@ -794,17 +794,6 @@ designed.
Roadmap:
-2.6.37 Remove experimental tag from mount option
- => should be roughly 6 months after initial merge
- => enough time to:
- => gain confidence and fix problems reported by early
- adopters (a.k.a. guinea pigs)
- => address worst performance regressions and undesired
- behaviours
- => start tuning/optimising code for parallelism
- => start tuning/optimising algorithms consuming
- excessive CPU time
-
2.6.39 Switch default mount option to use delayed logging
=> should be roughly 12 months after initial merge
=> enough time to shake out remaining problems before next round of
diff --git a/Documentation/gpio.txt b/Documentation/gpio.txt
index 9633da01ff46..792faa3c06cf 100644
--- a/Documentation/gpio.txt
+++ b/Documentation/gpio.txt
@@ -617,6 +617,16 @@ and have the following read/write attributes:
is configured as an output, this value may be written;
any nonzero value is treated as high.
+ If the pin can be configured as interrupt-generating interrupt
+ and if it has been configured to generate interrupts (see the
+ description of "edge"), you can poll(2) on that file and
+ poll(2) will return whenever the interrupt was triggered. If
+ you use poll(2), set the events POLLPRI and POLLERR. If you
+ use select(2), set the file descriptor in exceptfds. After
+ poll(2) returns, either lseek(2) to the beginning of the sysfs
+ file and read the new value or close the file and re-open it
+ to read the value.
+
"edge" ... reads as either "none", "rising", "falling", or
"both". Write these strings to select the signal edge(s)
that will make poll(2) on the "value" file return.
diff --git a/Documentation/hwmon/it87 b/Documentation/hwmon/it87
index 8d08bf0d38ed..38425f0f2645 100644
--- a/Documentation/hwmon/it87
+++ b/Documentation/hwmon/it87
@@ -22,6 +22,10 @@ Supported chips:
Prefix: 'it8720'
Addresses scanned: from Super I/O config space (8 I/O ports)
Datasheet: Not publicly available
+ * IT8721F/IT8758E
+ Prefix: 'it8721'
+ Addresses scanned: from Super I/O config space (8 I/O ports)
+ Datasheet: Not publicly available
* SiS950 [clone of IT8705F]
Prefix: 'it87'
Addresses scanned: from Super I/O config space (8 I/O ports)
@@ -67,7 +71,7 @@ Description
-----------
This driver implements support for the IT8705F, IT8712F, IT8716F,
-IT8718F, IT8720F, IT8726F and SiS950 chips.
+IT8718F, IT8720F, IT8721F, IT8726F, IT8758E and SiS950 chips.
These chips are 'Super I/O chips', supporting floppy disks, infrared ports,
joysticks and other miscellaneous stuff. For hardware monitoring, they
@@ -86,14 +90,15 @@ the driver won't notice and report changes in the VID value. The two
upper VID bits share their pins with voltage inputs (in5 and in6) so you
can't have both on a given board.
-The IT8716F, IT8718F, IT8720F and later IT8712F revisions have support for
-2 additional fans. The additional fans are supported by the driver.
+The IT8716F, IT8718F, IT8720F, IT8721F/IT8758E and later IT8712F revisions
+have support for 2 additional fans. The additional fans are supported by the
+driver.
-The IT8716F, IT8718F and IT8720F, and late IT8712F and IT8705F also have
-optional 16-bit tachometer counters for fans 1 to 3. This is better (no more
-fan clock divider mess) but not compatible with the older chips and
-revisions. The 16-bit tachometer mode is enabled by the driver when one
-of the above chips is detected.
+The IT8716F, IT8718F, IT8720F and IT8721F/IT8758E, and late IT8712F and
+IT8705F also have optional 16-bit tachometer counters for fans 1 to 3. This
+is better (no more fan clock divider mess) but not compatible with the older
+chips and revisions. The 16-bit tachometer mode is enabled by the driver when
+one of the above chips is detected.
The IT8726F is just bit enhanced IT8716F with additional hardware
for AMD power sequencing. Therefore the chip will appear as IT8716F
@@ -115,7 +120,12 @@ alarm is triggered if the voltage has crossed a programmable minimum or
maximum limit. Note that minimum in this case always means 'closest to
zero'; this is important for negative voltage measurements. All voltage
inputs can measure voltages between 0 and 4.08 volts, with a resolution of
-0.016 volt. The battery voltage in8 does not have limit registers.
+0.016 volt (except IT8721F/IT8758E: 0.012 volt.) The battery voltage in8 does
+not have limit registers.
+
+On the IT8721F/IT8758E, some voltage inputs are internal and scaled inside
+the chip (in7, in8 and optionally in3). The driver handles this transparently
+so user-space doesn't have to care.
The VID lines (IT8712F/IT8716F/IT8718F/IT8720F) encode the core voltage value:
the voltage level your processor should work with. This is hardcoded by
diff --git a/Documentation/hwmon/lm85 b/Documentation/hwmon/lm85
index b98e0e0d1910..239258a63c81 100644
--- a/Documentation/hwmon/lm85
+++ b/Documentation/hwmon/lm85
@@ -14,6 +14,10 @@ Supported chips:
Prefix: 'adt7463'
Addresses scanned: I2C 0x2c, 0x2d, 0x2e
Datasheet: http://www.onsemi.com/PowerSolutions/product.do?id=ADT7463
+ * Analog Devices ADT7468
+ Prefix: 'adt7468'
+ Addresses scanned: I2C 0x2c, 0x2d, 0x2e
+ Datasheet: http://www.onsemi.com/PowerSolutions/product.do?id=ADT7468
* SMSC EMC6D100, SMSC EMC6D101
Prefix: 'emc6d100'
Addresses scanned: I2C 0x2c, 0x2d, 0x2e
@@ -34,7 +38,7 @@ Description
-----------
This driver implements support for the National Semiconductor LM85 and
-compatible chips including the Analog Devices ADM1027, ADT7463 and
+compatible chips including the Analog Devices ADM1027, ADT7463, ADT7468 and
SMSC EMC6D10x chips family.
The LM85 uses the 2-wire interface compatible with the SMBUS 2.0
@@ -87,14 +91,22 @@ To smooth the response of fans to changes in temperature, the LM85 has an
optional filter for smoothing temperatures. The ADM1027 has the same
config option but uses it to rate limit the changes to fan speed instead.
-The ADM1027 and ADT7463 have a 10-bit ADC and can therefore measure
-temperatures with 0.25 degC resolution. They also provide an offset to the
-temperature readings that is automatically applied during measurement.
-This offset can be used to zero out any errors due to traces and placement.
-The documentation says that the offset is in 0.25 degC steps, but in
-initial testing of the ADM1027 it was 1.00 degC steps. Analog Devices has
-confirmed this "bug". The ADT7463 is reported to work as described in the
-documentation. The current lm85 driver does not show the offset register.
+The ADM1027, ADT7463 and ADT7468 have a 10-bit ADC and can therefore
+measure temperatures with 0.25 degC resolution. They also provide an offset
+to the temperature readings that is automatically applied during
+measurement. This offset can be used to zero out any errors due to traces
+and placement. The documentation says that the offset is in 0.25 degC
+steps, but in initial testing of the ADM1027 it was 1.00 degC steps. Analog
+Devices has confirmed this "bug". The ADT7463 is reported to work as
+described in the documentation. The current lm85 driver does not show the
+offset register.
+
+The ADT7468 has a high-frequency PWM mode, where all PWM outputs are
+driven by a 22.5 kHz clock. This is a global mode, not per-PWM output,
+which means that setting any PWM frequency above 11.3 kHz will switch
+all 3 PWM outputs to a 22.5 kHz frequency. Conversely, setting any PWM
+frequency below 11.3 kHz will switch all 3 PWM outputs to a frequency
+between 10 and 100 Hz, which can then be tuned separately.
See the vendor datasheets for more information. There is application note
from National (AN-1260) with some additional information about the LM85.
@@ -125,17 +137,17 @@ datasheet for a complete description of the differences. Other than
identifying the chip, the driver behaves no differently with regard to
these two chips. The LM85B is recommended for new designs.
-The ADM1027 and ADT7463 chips have an optional SMBALERT output that can be
-used to signal the chipset in case a limit is exceeded or the temperature
-sensors fail. Individual sensor interrupts can be masked so they won't
-trigger SMBALERT. The SMBALERT output if configured replaces one of the other
-functions (PWM2 or IN0). This functionality is not implemented in current
-driver.
+The ADM1027, ADT7463 and ADT7468 chips have an optional SMBALERT output
+that can be used to signal the chipset in case a limit is exceeded or the
+temperature sensors fail. Individual sensor interrupts can be masked so
+they won't trigger SMBALERT. The SMBALERT output if configured replaces one
+of the other functions (PWM2 or IN0). This functionality is not implemented
+in current driver.
-The ADT7463 also has an optional THERM output/input which can be connected
-to the processor PROC_HOT output. If available, the autofan control
-dynamic Tmin feature can be enabled to keep the system temperature within
-spec (just?!) with the least possible fan noise.
+The ADT7463 and ADT7468 also have an optional THERM output/input which can
+be connected to the processor PROC_HOT output. If available, the autofan
+control dynamic Tmin feature can be enabled to keep the system temperature
+within spec (just?!) with the least possible fan noise.
Configuration Notes
-------------------
@@ -201,8 +213,8 @@ the temperatures to compensate for systemic errors in the
measurements. These features are not currently supported by the lm85
driver.
-In addition to the ADM1027 features, the ADT7463 also has Tmin control
-and THERM asserted counts. Automatic Tmin control acts to adjust the
-Tmin value to maintain the measured temperature sensor at a specified
-temperature. There isn't much documentation on this feature in the
-ADT7463 data sheet. This is not supported by current driver.
+In addition to the ADM1027 features, the ADT7463 and ADT7468 also have
+Tmin control and THERM asserted counts. Automatic Tmin control acts to
+adjust the Tmin value to maintain the measured temperature sensor at a
+specified temperature. There isn't much documentation on this feature in
+the ADT7463 data sheet. This is not supported by current driver.
diff --git a/Documentation/hwmon/lm90 b/Documentation/hwmon/lm90
index 6a03dd4bcc94..fa475c0a48a3 100644
--- a/Documentation/hwmon/lm90
+++ b/Documentation/hwmon/lm90
@@ -63,8 +63,8 @@ Supported chips:
Datasheet: Publicly available at the Maxim website
http://www.maxim-ic.com/quick_view2.cfm/qv_pk/2578
* Maxim MAX6659
- Prefix: 'max6657'
- Addresses scanned: I2C 0x4c, 0x4d (unsupported 0x4e)
+ Prefix: 'max6659'
+ Addresses scanned: I2C 0x4c, 0x4d, 0x4e
Datasheet: Publicly available at the Maxim website
http://www.maxim-ic.com/quick_view2.cfm/qv_pk/2578
* Maxim MAX6680
@@ -84,6 +84,21 @@ Supported chips:
Addresses scanned: I2C 0x4c
Datasheet: Publicly available at the Maxim website
http://www.maxim-ic.com/quick_view2.cfm/qv_pk/3500
+ * Maxim MAX6695
+ Prefix: 'max6695'
+ Addresses scanned: I2C 0x18
+ Datasheet: Publicly available at the Maxim website
+ http://www.maxim-ic.com/datasheet/index.mvp/id/4199
+ * Maxim MAX6696
+ Prefix: 'max6695'
+ Addresses scanned: I2C 0x18, 0x19, 0x1a, 0x29, 0x2a, 0x2b,
+ 0x4c, 0x4d and 0x4e
+ Datasheet: Publicly available at the Maxim website
+ http://www.maxim-ic.com/datasheet/index.mvp/id/4199
+ * Winbond/Nuvoton W83L771W/G
+ Prefix: 'w83l771'
+ Addresses scanned: I2C 0x4c
+ Datasheet: No longer available
* Winbond/Nuvoton W83L771AWG/ASG
Prefix: 'w83l771'
Addresses scanned: I2C 0x4c
@@ -101,10 +116,11 @@ well as the temperature of up to one external diode. It is compatible
with many other devices, many of which are supported by this driver.
Note that there is no easy way to differentiate between the MAX6657,
-MAX6658 and MAX6659 variants. The extra address and features of the
-MAX6659 are not supported by this driver. The MAX6680 and MAX6681 only
-differ in their pinout, therefore they obviously can't (and don't need to)
-be distinguished.
+MAX6658 and MAX6659 variants. The extra features of the MAX6659 are only
+supported by this driver if the chip is located at address 0x4d or 0x4e,
+or if the chip type is explicitly selected as max6659.
+The MAX6680 and MAX6681 only differ in their pinout, therefore they obviously
+can't (and don't need to) be distinguished.
The specificity of this family of chipsets over the ADM1021/LM84
family is that it features critical limits with hysteresis, and an
@@ -151,11 +167,21 @@ MAX6680 and MAX6681:
* Selectable address
* Remote sensor type selection
+MAX6695 and MAX6696:
+ * Better local resolution
+ * Selectable address (max6696)
+ * Second critical temperature limit
+ * Two remote sensors
+
+W83L771W/G
+ * The G variant is lead-free, otherwise similar to the W.
+ * Filter and alert configuration register at 0xBF
+ * Moving average (depending on conversion rate)
+
W83L771AWG/ASG
+ * Successor of the W83L771W/G, same features.
* The AWG and ASG variants only differ in package format.
- * Filter and alert configuration register at 0xBF
* Diode ideality factor configuration (remote sensor) at 0xE3
- * Moving average (depending on conversion rate)
All temperature values are given in degrees Celsius. Resolution
is 1.0 degree for the local temperature, 0.125 degree for the remote
diff --git a/Documentation/hwmon/lm93 b/Documentation/hwmon/lm93
index ac711f357faf..7a10616d0b44 100644
--- a/Documentation/hwmon/lm93
+++ b/Documentation/hwmon/lm93
@@ -11,7 +11,7 @@ Authors:
Mark M. Hoffman <mhoffman@lightlink.com>
Ported to 2.6 by Eric J. Bowersox <ericb@aspsys.com>
Adapted to 2.6.20 by Carsten Emde <ce@osadl.org>
- Modified for mainline integration by Hans J. Koch <hjk@linutronix.de>
+ Modified for mainline integration by Hans J. Koch <hjk@hansjkoch.de>
Module Parameters
-----------------
diff --git a/Documentation/hwmon/ltc4261 b/Documentation/hwmon/ltc4261
new file mode 100644
index 000000000000..eba2e2c4b94d
--- /dev/null
+++ b/Documentation/hwmon/ltc4261
@@ -0,0 +1,63 @@
+Kernel driver ltc4261
+=====================
+
+Supported chips:
+ * Linear Technology LTC4261
+ Prefix: 'ltc4261'
+ Addresses scanned: -
+ Datasheet:
+ http://cds.linear.com/docs/Datasheet/42612fb.pdf
+
+Author: Guenter Roeck <guenter.roeck@ericsson.com>
+
+
+Description
+-----------
+
+The LTC4261/LTC4261-2 negative voltage Hot Swap controllers allow a board
+to be safely inserted and removed from a live backplane.
+
+
+Usage Notes
+-----------
+
+This driver does not probe for LTC4261 devices, since there is no register
+which can be safely used to identify the chip. You will have to instantiate
+the devices explicitly.
+
+Example: the following will load the driver for an LTC4261 at address 0x10
+on I2C bus #1:
+$ modprobe ltc4261
+$ echo ltc4261 0x10 > /sys/bus/i2c/devices/i2c-1/new_device
+
+
+Sysfs entries
+-------------
+
+Voltage readings provided by this driver are reported as obtained from the ADC
+registers. If a set of voltage divider resistors is installed, calculate the
+real voltage by multiplying the reported value with (R1+R2)/R2, where R1 is the
+value of the divider resistor against the measured voltage and R2 is the value
+of the divider resistor against Ground.
+
+Current reading provided by this driver is reported as obtained from the ADC
+Current Sense register. The reported value assumes that a 1 mOhm sense resistor
+is installed. If a different sense resistor is installed, calculate the real
+current by dividing the reported value by the sense resistor value in mOhm.
+
+The chip has two voltage sensors, but only one set of voltage alarm status bits.
+In many many designs, those alarms are associated with the ADIN2 sensor, due to
+the proximity of the ADIN2 pin to the OV pin. ADIN2 is, however, not available
+on all chip variants. To ensure that the alarm condition is reported to the user,
+report it with both voltage sensors.
+
+in1_input ADIN2 voltage (mV)
+in1_min_alarm ADIN/ADIN2 Undervoltage alarm
+in1_max_alarm ADIN/ADIN2 Overvoltage alarm
+
+in2_input ADIN voltage (mV)
+in2_min_alarm ADIN/ADIN2 Undervoltage alarm
+in2_max_alarm ADIN/ADIN2 Overvoltage alarm
+
+curr1_input SENSE current (mA)
+curr1_alarm SENSE overcurrent alarm
diff --git a/Documentation/hwmon/max6650 b/Documentation/hwmon/max6650
index 8be7beb9e3e8..c565650fcfc6 100644
--- a/Documentation/hwmon/max6650
+++ b/Documentation/hwmon/max6650
@@ -8,7 +8,7 @@ Supported chips:
Datasheet: http://pdfserv.maxim-ic.com/en/ds/MAX6650-MAX6651.pdf
Authors:
- Hans J. Koch <hjk@linutronix.de>
+ Hans J. Koch <hjk@hansjkoch.de>
John Morris <john.morris@spirentcom.com>
Claus Gindhart <claus.gindhart@kontron.com>
diff --git a/Documentation/hwmon/pcf8591 b/Documentation/hwmon/pcf8591
index e76a7892f68e..ac020b3bb7b3 100644
--- a/Documentation/hwmon/pcf8591
+++ b/Documentation/hwmon/pcf8591
@@ -4,7 +4,7 @@ Kernel driver pcf8591
Supported chips:
* Philips/NXP PCF8591
Prefix: 'pcf8591'
- Addresses scanned: I2C 0x48 - 0x4f
+ Addresses scanned: none
Datasheet: Publicly available at the NXP website
http://www.nxp.com/pip/PCF8591_6.html
@@ -58,18 +58,16 @@ Module parameters
Accessing PCF8591 via /sys interface
-------------------------------------
-! Be careful !
-The PCF8591 is plainly impossible to detect! Stupid chip.
-So every chip with address in the interval [0x48..0x4f] is
-detected as PCF8591. If you have other chips in this address
-range, the workaround is to load this module after the one
-for your others chips.
+The PCF8591 is plainly impossible to detect! Thus the driver won't even
+try. You have to explicitly instantiate the device at the relevant
+address (in the interval [0x48..0x4f]) either through platform data, or
+using the sysfs interface. See Documentation/i2c/instantiating-devices
+for details.
-On detection (i.e. insmod, modprobe et al.), directories are being
-created for each detected PCF8591:
+Directories are being created for each instantiated PCF8591:
/sys/bus/i2c/devices/<0>-<1>/
-where <0> is the bus the chip was detected on (e. g. i2c-0)
+where <0> is the bus the chip is connected to (e. g. i2c-0)
and <1> the chip address ([48..4f])
Inside these directories, there are such files:
diff --git a/Documentation/hwmon/sysfs-interface b/Documentation/hwmon/sysfs-interface
index 48ceabedf55d..645699010551 100644
--- a/Documentation/hwmon/sysfs-interface
+++ b/Documentation/hwmon/sysfs-interface
@@ -309,6 +309,20 @@ temp[1-*]_crit_hyst
from the critical value.
RW
+temp[1-*]_emergency
+ Temperature emergency max value, for chips supporting more than
+ two upper temperature limits. Must be equal or greater than
+ corresponding temp_crit values.
+ Unit: millidegree Celsius
+ RW
+
+temp[1-*]_emergency_hyst
+ Temperature hysteresis value for emergency limit.
+ Unit: millidegree Celsius
+ Must be reported as an absolute temperature, NOT a delta
+ from the emergency value.
+ RW
+
temp[1-*]_lcrit Temperature critical min value, typically lower than
corresponding temp_min values.
Unit: millidegree Celsius
@@ -505,6 +519,7 @@ fan[1-*]_max_alarm
temp[1-*]_min_alarm
temp[1-*]_max_alarm
temp[1-*]_crit_alarm
+temp[1-*]_emergency_alarm
Limit alarm
0: no alarm
1: alarm
diff --git a/Documentation/i2c/busses/i2c-i801 b/Documentation/i2c/busses/i2c-i801
index e307914a3eda..93fe76e56522 100644
--- a/Documentation/i2c/busses/i2c-i801
+++ b/Documentation/i2c/busses/i2c-i801
@@ -15,10 +15,14 @@ Supported adapters:
* Intel 82801I (ICH9)
* Intel EP80579 (Tolapai)
* Intel 82801JI (ICH10)
- * Intel 3400/5 Series (PCH)
+ * Intel 5/3400 Series (PCH)
* Intel Cougar Point (PCH)
+ * Intel Patsburg (PCH)
Datasheets: Publicly available at the Intel website
+On Intel Patsburg and later chipsets, both the normal host SMBus controller
+and the additional 'Integrated Device Function' controllers are supported.
+
Authors:
Mark Studebaker <mdsxyz123@yahoo.com>
Jean Delvare <khali@linux-fr.org>
diff --git a/Documentation/ioctl/ioctl-number.txt b/Documentation/ioctl/ioctl-number.txt
index 33223ff121d8..63ffd78824d8 100644
--- a/Documentation/ioctl/ioctl-number.txt
+++ b/Documentation/ioctl/ioctl-number.txt
@@ -259,7 +259,7 @@ Code Seq#(hex) Include File Comments
't' 00-7F linux/if_ppp.h
't' 80-8F linux/isdn_ppp.h
't' 90 linux/toshiba.h
-'u' 00-1F linux/smb_fs.h
+'u' 00-1F linux/smb_fs.h gone
'v' all linux/videodev.h conflict!
'v' 00-1F linux/ext2_fs.h conflict!
'v' 00-1F linux/fs.h conflict!
@@ -278,7 +278,6 @@ Code Seq#(hex) Include File Comments
<mailto:oe@port.de>
'z' 10-4F drivers/s390/crypto/zcrypt_api.h conflict!
0x80 00-1F linux/fb.h
-0x81 00-1F linux/videotext.h
0x88 00-3F media/ovcamchip.h
0x89 00-06 arch/x86/include/asm/sockios.h
0x89 0B-DF linux/sockios.h
diff --git a/Documentation/kbuild/kconfig-language.txt b/Documentation/kbuild/kconfig-language.txt
index b472e4e0ba67..2fe93ca7c77c 100644
--- a/Documentation/kbuild/kconfig-language.txt
+++ b/Documentation/kbuild/kconfig-language.txt
@@ -322,7 +322,8 @@ mainmenu:
"mainmenu" <prompt>
This sets the config program's title bar if the config program chooses
-to use it.
+to use it. It should be placed at the top of the configuration, before any
+other statement.
Kconfig hints
diff --git a/Documentation/kbuild/makefiles.txt b/Documentation/kbuild/makefiles.txt
index c787ae512120..0ef00bd6e54d 100644
--- a/Documentation/kbuild/makefiles.txt
+++ b/Documentation/kbuild/makefiles.txt
@@ -776,6 +776,13 @@ This will delete the directory debian, including all subdirectories.
Kbuild will assume the directories to be in the same relative path as the
Makefile if no absolute path is specified (path does not start with '/').
+To exclude certain files from make clean, use the $(no-clean-files) variable.
+This is only a special case used in the top level Kbuild file:
+
+ Example:
+ #Kbuild
+ no-clean-files := $(bounds-file) $(offsets-file)
+
Usually kbuild descends down in subdirectories due to "obj-* := dir/",
but in the architecture makefiles where the kbuild infrastructure
is not sufficient this sometimes needs to be explicit.
diff --git a/Documentation/kbuild/modules.txt b/Documentation/kbuild/modules.txt
index 0767cf69c69e..3fb39e0116b4 100644
--- a/Documentation/kbuild/modules.txt
+++ b/Documentation/kbuild/modules.txt
@@ -1,215 +1,185 @@
+Building External Modules
-In this document you will find information about:
-- how to build external modules
-- how to make your module use the kbuild infrastructure
-- how kbuild will install a kernel
-- how to install modules in a non-standard location
+This document describes how to build an out-of-tree kernel module.
=== Table of Contents
=== 1 Introduction
- === 2 How to build external modules
- --- 2.1 Building external modules
- --- 2.2 Available targets
- --- 2.3 Available options
- --- 2.4 Preparing the kernel tree for module build
- --- 2.5 Building separate files for a module
- === 3. Example commands
- === 4. Creating a kbuild file for an external module
- === 5. Include files
- --- 5.1 How to include files from the kernel include dir
- --- 5.2 External modules using an include/ dir
- --- 5.3 External modules using several directories
- === 6. Module installation
- --- 6.1 INSTALL_MOD_PATH
- --- 6.2 INSTALL_MOD_DIR
- === 7. Module versioning & Module.symvers
- --- 7.1 Symbols from the kernel (vmlinux + modules)
- --- 7.2 Symbols and external modules
- --- 7.3 Symbols from another external module
- === 8. Tips & Tricks
- --- 8.1 Testing for CONFIG_FOO_BAR
+ === 2 How to Build External Modules
+ --- 2.1 Command Syntax
+ --- 2.2 Options
+ --- 2.3 Targets
+ --- 2.4 Building Separate Files
+ === 3. Creating a Kbuild File for an External Module
+ --- 3.1 Shared Makefile
+ --- 3.2 Separate Kbuild file and Makefile
+ --- 3.3 Binary Blobs
+ --- 3.4 Building Multiple Modules
+ === 4. Include Files
+ --- 4.1 Kernel Includes
+ --- 4.2 Single Subdirectory
+ --- 4.3 Several Subdirectories
+ === 5. Module Installation
+ --- 5.1 INSTALL_MOD_PATH
+ --- 5.2 INSTALL_MOD_DIR
+ === 6. Module Versioning
+ --- 6.1 Symbols From the Kernel (vmlinux + modules)
+ --- 6.2 Symbols and External Modules
+ --- 6.3 Symbols From Another External Module
+ === 7. Tips & Tricks
+ --- 7.1 Testing for CONFIG_FOO_BAR
=== 1. Introduction
-kbuild includes functionality for building modules both
-within the kernel source tree and outside the kernel source tree.
-The latter is usually referred to as external or "out-of-tree"
-modules and is used both during development and for modules that
-are not planned to be included in the kernel tree.
+"kbuild" is the build system used by the Linux kernel. Modules must use
+kbuild to stay compatible with changes in the build infrastructure and
+to pick up the right flags to "gcc." Functionality for building modules
+both in-tree and out-of-tree is provided. The method for building
+either is similar, and all modules are initially developed and built
+out-of-tree.
-What is covered within this file is mainly information to authors
-of modules. The author of an external module should supply
-a makefile that hides most of the complexity, so one only has to type
-'make' to build the module. A complete example will be presented in
-chapter 4, "Creating a kbuild file for an external module".
+Covered in this document is information aimed at developers interested
+in building out-of-tree (or "external") modules. The author of an
+external module should supply a makefile that hides most of the
+complexity, so one only has to type "make" to build the module. This is
+easily accomplished, and a complete example will be presented in
+section 3.
-=== 2. How to build external modules
+=== 2. How to Build External Modules
-kbuild offers functionality to build external modules, with the
-prerequisite that there is a pre-built kernel available with full source.
-A subset of the targets available when building the kernel is available
-when building an external module.
+To build external modules, you must have a prebuilt kernel available
+that contains the configuration and header files used in the build.
+Also, the kernel must have been built with modules enabled. If you are
+using a distribution kernel, there will be a package for the kernel you
+are running provided by your distribution.
---- 2.1 Building external modules
+An alternative is to use the "make" target "modules_prepare." This will
+make sure the kernel contains the information required. The target
+exists solely as a simple way to prepare a kernel source tree for
+building external modules.
- Use the following command to build an external module:
+NOTE: "modules_prepare" will not build Module.symvers even if
+CONFIG_MODVERSIONS is set; therefore, a full kernel build needs to be
+executed to make module versioning work.
- make -C <path-to-kernel> M=`pwd`
+--- 2.1 Command Syntax
- For the running kernel use:
+ The command to build an external module is:
- make -C /lib/modules/`uname -r`/build M=`pwd`
+ $ make -C <path_to_kernel_src> M=$PWD
- For the above command to succeed, the kernel must have been
- built with modules enabled.
+ The kbuild system knows that an external module is being built
+ due to the "M=<dir>" option given in the command.
- To install the modules that were just built:
+ To build against the running kernel use:
- make -C <path-to-kernel> M=`pwd` modules_install
+ $ make -C /lib/modules/`uname -r`/build M=$PWD
- More complex examples will be shown later, the above should
- be enough to get you started.
+ Then to install the module(s) just built, add the target
+ "modules_install" to the command:
---- 2.2 Available targets
+ $ make -C /lib/modules/`uname -r`/build M=$PWD modules_install
- $KDIR refers to the path to the kernel source top-level directory
+--- 2.2 Options
- make -C $KDIR M=`pwd`
- Will build the module(s) located in current directory.
- All output files will be located in the same directory
- as the module source.
- No attempts are made to update the kernel source, and it is
- a precondition that a successful make has been executed
- for the kernel.
+ ($KDIR refers to the path of the kernel source directory.)
- make -C $KDIR M=`pwd` modules
- The modules target is implied when no target is given.
- Same functionality as if no target was specified.
- See description above.
+ make -C $KDIR M=$PWD
- make -C $KDIR M=`pwd` modules_install
- Install the external module(s).
- Installation default is in /lib/modules/<kernel-version>/extra,
- but may be prefixed with INSTALL_MOD_PATH - see separate
- chapter.
+ -C $KDIR
+ The directory where the kernel source is located.
+ "make" will actually change to the specified directory
+ when executing and will change back when finished.
- make -C $KDIR M=`pwd` clean
- Remove all generated files for the module - the kernel
- source directory is not modified.
+ M=$PWD
+ Informs kbuild that an external module is being built.
+ The value given to "M" is the absolute path of the
+ directory where the external module (kbuild file) is
+ located.
- make -C $KDIR M=`pwd` help
- help will list the available target when building external
- modules.
+--- 2.3 Targets
---- 2.3 Available options:
+ When building an external module, only a subset of the "make"
+ targets are available.
- $KDIR refers to the path to the kernel source top-level directory
+ make -C $KDIR M=$PWD [target]
- make -C $KDIR
- Used to specify where to find the kernel source.
- '$KDIR' represent the directory where the kernel source is.
- Make will actually change directory to the specified directory
- when executed but change back when finished.
+ The default will build the module(s) located in the current
+ directory, so a target does not need to be specified. All
+ output files will also be generated in this directory. No
+ attempts are made to update the kernel source, and it is a
+ precondition that a successful "make" has been executed for the
+ kernel.
- make -C $KDIR M=`pwd`
- M= is used to tell kbuild that an external module is
- being built.
- The option given to M= is the directory where the external
- module (kbuild file) is located.
- When an external module is being built only a subset of the
- usual targets are available.
+ modules
+ The default target for external modules. It has the
+ same functionality as if no target was specified. See
+ description above.
- make -C $KDIR SUBDIRS=`pwd`
- Same as M=. The SUBDIRS= syntax is kept for backwards
- compatibility.
+ modules_install
+ Install the external module(s). The default location is
+ /lib/modules/<kernel_release>/extra/, but a prefix may
+ be added with INSTALL_MOD_PATH (discussed in section 5).
---- 2.4 Preparing the kernel tree for module build
+ clean
+ Remove all generated files in the module directory only.
- To make sure the kernel contains the information required to
- build external modules the target 'modules_prepare' must be used.
- 'modules_prepare' exists solely as a simple way to prepare
- a kernel source tree for building external modules.
- Note: modules_prepare will not build Module.symvers even if
- CONFIG_MODVERSIONS is set. Therefore a full kernel build
- needs to be executed to make module versioning work.
+ help
+ List the available targets for external modules.
---- 2.5 Building separate files for a module
- It is possible to build single files which are part of a module.
- This works equally well for the kernel, a module and even for
- external modules.
- Examples (module foo.ko, consist of bar.o, baz.o):
- make -C $KDIR M=`pwd` bar.lst
- make -C $KDIR M=`pwd` bar.o
- make -C $KDIR M=`pwd` foo.ko
- make -C $KDIR M=`pwd` /
-
-
-=== 3. Example commands
-
-This example shows the actual commands to be executed when building
-an external module for the currently running kernel.
-In the example below, the distribution is supposed to use the
-facility to locate output files for a kernel compile in a different
-directory than the kernel source - but the examples will also work
-when the source and the output files are mixed in the same directory.
+--- 2.4 Building Separate Files
-# Kernel source
-/lib/modules/<kernel-version>/source -> /usr/src/linux-<version>
-
-# Output from kernel compile
-/lib/modules/<kernel-version>/build -> /usr/src/linux-<version>-up
-
-Change to the directory where the kbuild file is located and execute
-the following commands to build the module:
+ It is possible to build single files that are part of a module.
+ This works equally well for the kernel, a module, and even for
+ external modules.
- cd /home/user/src/module
- make -C /usr/src/`uname -r`/source \
- O=/lib/modules/`uname-r`/build \
- M=`pwd`
+ Example (The module foo.ko, consist of bar.o and baz.o):
+ make -C $KDIR M=$PWD bar.lst
+ make -C $KDIR M=$PWD baz.o
+ make -C $KDIR M=$PWD foo.ko
+ make -C $KDIR M=$PWD /
-Then, to install the module use the following command:
- make -C /usr/src/`uname -r`/source \
- O=/lib/modules/`uname-r`/build \
- M=`pwd` \
- modules_install
+=== 3. Creating a Kbuild File for an External Module
-If you look closely you will see that this is the same command as
-listed before - with the directories spelled out.
+In the last section we saw the command to build a module for the
+running kernel. The module is not actually built, however, because a
+build file is required. Contained in this file will be the name of
+the module(s) being built, along with the list of requisite source
+files. The file may be as simple as a single line:
-The above are rather long commands, and the following chapter
-lists a few tricks to make it all easier.
+ obj-m := <module_name>.o
+The kbuild system will build <module_name>.o from <module_name>.c,
+and, after linking, will result in the kernel module <module_name>.ko.
+The above line can be put in either a "Kbuild" file or a "Makefile."
+When the module is built from multiple sources, an additional line is
+needed listing the files:
-=== 4. Creating a kbuild file for an external module
+ <module_name>-y := <src1>.o <src2>.o ...
-kbuild is the build system for the kernel, and external modules
-must use kbuild to stay compatible with changes in the build system
-and to pick up the right flags to gcc etc.
+NOTE: Further documentation describing the syntax used by kbuild is
+located in Documentation/kbuild/makefiles.txt.
-The kbuild file used as input shall follow the syntax described
-in Documentation/kbuild/makefiles.txt. This chapter will introduce a few
-more tricks to be used when dealing with external modules.
+The examples below demonstrate how to create a build file for the
+module 8123.ko, which is built from the following files:
-In the following a Makefile will be created for a module with the
-following files:
8123_if.c
8123_if.h
8123_pci.c
8123_bin.o_shipped <= Binary blob
---- 4.1 Shared Makefile for module and kernel
+--- 3.1 Shared Makefile
- An external module always includes a wrapper Makefile supporting
- building the module using 'make' with no arguments.
- The Makefile provided will most likely include additional
- functionality such as test targets etc. and this part shall
- be filtered away from kbuild since it may impact kbuild if
- name clashes occurs.
+ An external module always includes a wrapper makefile that
+ supports building the module using "make" with no arguments.
+ This target is not used by kbuild; it is only for convenience.
+ Additional functionality, such as test targets, can be included
+ but should be filtered out from kbuild due to possible name
+ clashes.
Example 1:
--> filename: Makefile
@@ -219,11 +189,11 @@ following files:
8123-y := 8123_if.o 8123_pci.o 8123_bin.o
else
- # Normal Makefile
+ # normal makefile
+ KDIR ?= /lib/modules/`uname -r`/build
- KERNELDIR := /lib/modules/`uname -r`/build
- all::
- $(MAKE) -C $(KERNELDIR) M=`pwd` $@
+ default:
+ $(MAKE) -C $(KDIR) M=$$PWD
# Module specific targets
genbin:
@@ -231,15 +201,20 @@ following files:
endif
- In example 1, the check for KERNELRELEASE is used to separate
- the two parts of the Makefile. kbuild will only see the two
- assignments whereas make will see everything except the two
- kbuild assignments.
+ The check for KERNELRELEASE is used to separate the two parts
+ of the makefile. In the example, kbuild will only see the two
+ assignments, whereas "make" will see everything except these
+ two assignments. This is due to two passes made on the file:
+ the first pass is by the "make" instance run on the command
+ line; the second pass is by the kbuild system, which is
+ initiated by the parameterized "make" in the default target.
+
+--- 3.2 Separate Kbuild File and Makefile
- In recent versions of the kernel, kbuild will look for a file named
- Kbuild and as second option look for a file named Makefile.
- Utilising the Kbuild file makes us split up the Makefile in example 1
- into two files as shown in example 2:
+ In newer versions of the kernel, kbuild will first look for a
+ file named "Kbuild," and only if that is not found, will it
+ then look for a makefile. Utilizing a "Kbuild" file allows us
+ to split up the makefile from example 1 into two files:
Example 2:
--> filename: Kbuild
@@ -247,20 +222,21 @@ following files:
8123-y := 8123_if.o 8123_pci.o 8123_bin.o
--> filename: Makefile
- KERNELDIR := /lib/modules/`uname -r`/build
- all::
- $(MAKE) -C $(KERNELDIR) M=`pwd` $@
+ KDIR ?= /lib/modules/`uname -r`/build
+
+ default:
+ $(MAKE) -C $(KDIR) M=$$PWD
# Module specific targets
genbin:
echo "X" > 8123_bin.o_shipped
+ The split in example 2 is questionable due to the simplicity of
+ each file; however, some external modules use makefiles
+ consisting of several hundred lines, and here it really pays
+ off to separate the kbuild part from the rest.
- In example 2, we are down to two fairly simple files and for simple
- files as used in this example the split is questionable. But some
- external modules use Makefiles of several hundred lines and here it
- really pays off to separate the kbuild part from the rest.
- Example 3 shows a backward compatible version.
+ The next example shows a backward compatible version.
Example 3:
--> filename: Kbuild
@@ -269,13 +245,15 @@ following files:
--> filename: Makefile
ifneq ($(KERNELRELEASE),)
+ # kbuild part of makefile
include Kbuild
+
else
- # Normal Makefile
+ # normal makefile
+ KDIR ?= /lib/modules/`uname -r`/build
- KERNELDIR := /lib/modules/`uname -r`/build
- all::
- $(MAKE) -C $(KERNELDIR) M=`pwd` $@
+ default:
+ $(MAKE) -C $(KDIR) M=$$PWD
# Module specific targets
genbin:
@@ -283,260 +261,271 @@ following files:
endif
- The trick here is to include the Kbuild file from Makefile, so
- if an older version of kbuild picks up the Makefile, the Kbuild
- file will be included.
+ Here the "Kbuild" file is included from the makefile. This
+ allows an older version of kbuild, which only knows of
+ makefiles, to be used when the "make" and kbuild parts are
+ split into separate files.
---- 4.2 Binary blobs included in a module
+--- 3.3 Binary Blobs
- Some external modules needs to include a .o as a blob. kbuild
- has support for this, but requires the blob file to be named
- <filename>_shipped. In our example the blob is named
- 8123_bin.o_shipped and when the kbuild rules kick in the file
- 8123_bin.o is created as a simple copy off the 8213_bin.o_shipped file
- with the _shipped part stripped of the filename.
- This allows the 8123_bin.o filename to be used in the assignment to
- the module.
+ Some external modules need to include an object file as a blob.
+ kbuild has support for this, but requires the blob file to be
+ named <filename>_shipped. When the kbuild rules kick in, a copy
+ of <filename>_shipped is created with _shipped stripped off,
+ giving us <filename>. This shortened filename can be used in
+ the assignment to the module.
+
+ Throughout this section, 8123_bin.o_shipped has been used to
+ build the kernel module 8123.ko; it has been included as
+ 8123_bin.o.
- Example 4:
- obj-m := 8123.o
8123-y := 8123_if.o 8123_pci.o 8123_bin.o
- In example 4, there is no distinction between the ordinary .c/.h files
- and the binary file. But kbuild will pick up different rules to create
- the .o file.
+ Although there is no distinction between the ordinary source
+ files and the binary file, kbuild will pick up different rules
+ when creating the object file for the module.
+
+--- 3.4 Building Multiple Modules
+ kbuild supports building multiple modules with a single build
+ file. For example, if you wanted to build two modules, foo.ko
+ and bar.ko, the kbuild lines would be:
-=== 5. Include files
+ obj-m := foo.o bar.o
+ foo-y := <foo_srcs>
+ bar-y := <bar_srcs>
-Include files are a necessity when a .c file uses something from other .c
-files (not strictly in the sense of C, but if good programming practice is
-used). Any module that consists of more than one .c file will have a .h file
-for one of the .c files.
+ It is that simple!
-- If the .h file only describes a module internal interface, then the .h file
- shall be placed in the same directory as the .c files.
-- If the .h files describe an interface used by other parts of the kernel
- located in different directories, the .h files shall be located in
- include/linux/ or other include/ directories as appropriate.
-One exception for this rule is larger subsystems that have their own directory
-under include/ such as include/scsi. Another exception is arch-specific
-.h files which are located under include/asm-$(ARCH)/*.
+=== 4. Include Files
-External modules have a tendency to locate include files in a separate include/
-directory and therefore need to deal with this in their kbuild file.
+Within the kernel, header files are kept in standard locations
+according to the following rule:
---- 5.1 How to include files from the kernel include dir
+ * If the header file only describes the internal interface of a
+ module, then the file is placed in the same directory as the
+ source files.
+ * If the header file describes an interface used by other parts
+ of the kernel that are located in different directories, then
+ the file is placed in include/linux/.
- When a module needs to include a file from include/linux/, then one
- just uses:
+ NOTE: There are two notable exceptions to this rule: larger
+ subsystems have their own directory under include/, such as
+ include/scsi; and architecture specific headers are located
+ under arch/$(ARCH)/include/.
- #include <linux/modules.h>
+--- 4.1 Kernel Includes
- kbuild will make sure to add options to gcc so the relevant
- directories are searched.
- Likewise for .h files placed in the same directory as the .c file.
+ To include a header file located under include/linux/, simply
+ use:
- #include "8123_if.h"
+ #include <linux/module.h>
- will do the job.
+ kbuild will add options to "gcc" so the relevant directories
+ are searched.
---- 5.2 External modules using an include/ dir
+--- 4.2 Single Subdirectory
- External modules often locate their .h files in a separate include/
- directory although this is not usual kernel style. When an external
- module uses an include/ dir then kbuild needs to be told so.
- The trick here is to use either EXTRA_CFLAGS (take effect for all .c
- files) or CFLAGS_$F.o (take effect only for a single file).
+ External modules tend to place header files in a separate
+ include/ directory where their source is located, although this
+ is not the usual kernel style. To inform kbuild of the
+ directory, use either ccflags-y or CFLAGS_<filename>.o.
- In our example, if we move 8123_if.h to a subdirectory named include/
- the resulting Kbuild file would look like:
+ Using the example from section 3, if we moved 8123_if.h to a
+ subdirectory named include, the resulting kbuild file would
+ look like:
--> filename: Kbuild
- obj-m := 8123.o
+ obj-m := 8123.o
- EXTRA_CFLAGS := -Iinclude
+ ccflags-y := -Iinclude
8123-y := 8123_if.o 8123_pci.o 8123_bin.o
- Note that in the assignment there is no space between -I and the path.
- This is a kbuild limitation: there must be no space present.
+ Note that in the assignment there is no space between -I and
+ the path. This is a limitation of kbuild: there must be no
+ space present.
---- 5.3 External modules using several directories
-
- If an external module does not follow the usual kernel style, but
- decides to spread files over several directories, then kbuild can
- handle this too.
+--- 4.3 Several Subdirectories
+ kbuild can handle files that are spread over several directories.
Consider the following example:
- |
- +- src/complex_main.c
- | +- hal/hardwareif.c
- | +- hal/include/hardwareif.h
- +- include/complex.h
-
- To build a single module named complex.ko, we then need the following
+ .
+ |__ src
+ | |__ complex_main.c
+ | |__ hal
+ | |__ hardwareif.c
+ | |__ include
+ | |__ hardwareif.h
+ |__ include
+ |__ complex.h
+
+ To build the module complex.ko, we then need the following
kbuild file:
- Kbuild:
+ --> filename: Kbuild
obj-m := complex.o
complex-y := src/complex_main.o
complex-y += src/hal/hardwareif.o
- EXTRA_CFLAGS := -I$(src)/include
- EXTRA_CFLAGS += -I$(src)src/hal/include
+ ccflags-y := -I$(src)/include
+ ccflags-y += -I$(src)/src/hal/include
+ As you can see, kbuild knows how to handle object files located
+ in other directories. The trick is to specify the directory
+ relative to the kbuild file's location. That being said, this
+ is NOT recommended practice.
- kbuild knows how to handle .o files located in another directory -
- although this is NOT recommended practice. The syntax is to specify
- the directory relative to the directory where the Kbuild file is
- located.
+ For the header files, kbuild must be explicitly told where to
+ look. When kbuild executes, the current directory is always the
+ root of the kernel tree (the argument to "-C") and therefore an
+ absolute path is needed. $(src) provides the absolute path by
+ pointing to the directory where the currently executing kbuild
+ file is located.
- To find the .h files, we have to explicitly tell kbuild where to look
- for the .h files. When kbuild executes, the current directory is always
- the root of the kernel tree (argument to -C) and therefore we have to
- tell kbuild how to find the .h files using absolute paths.
- $(src) will specify the absolute path to the directory where the
- Kbuild file are located when being build as an external module.
- Therefore -I$(src)/ is used to point out the directory of the Kbuild
- file and any additional path are just appended.
-=== 6. Module installation
+=== 5. Module Installation
-Modules which are included in the kernel are installed in the directory:
+Modules which are included in the kernel are installed in the
+directory:
- /lib/modules/$(KERNELRELEASE)/kernel
+ /lib/modules/$(KERNELRELEASE)/kernel/
-External modules are installed in the directory:
+And external modules are installed in:
- /lib/modules/$(KERNELRELEASE)/extra
+ /lib/modules/$(KERNELRELEASE)/extra/
---- 6.1 INSTALL_MOD_PATH
+--- 5.1 INSTALL_MOD_PATH
- Above are the default directories, but as always, some level of
- customization is possible. One can prefix the path using the variable
- INSTALL_MOD_PATH:
+ Above are the default directories but as always some level of
+ customization is possible. A prefix can be added to the
+ installation path using the variable INSTALL_MOD_PATH:
$ make INSTALL_MOD_PATH=/frodo modules_install
- => Install dir: /frodo/lib/modules/$(KERNELRELEASE)/kernel
-
- INSTALL_MOD_PATH may be set as an ordinary shell variable or as in the
- example above, can be specified on the command line when calling make.
- INSTALL_MOD_PATH has effect both when installing modules included in
- the kernel as well as when installing external modules.
+ => Install dir: /frodo/lib/modules/$(KERNELRELEASE)/kernel/
---- 6.2 INSTALL_MOD_DIR
+ INSTALL_MOD_PATH may be set as an ordinary shell variable or,
+ as shown above, can be specified on the command line when
+ calling "make." This has effect when installing both in-tree
+ and out-of-tree modules.
- When installing external modules they are by default installed to a
- directory under /lib/modules/$(KERNELRELEASE)/extra, but one may wish
- to locate modules for a specific functionality in a separate
- directory. For this purpose, one can use INSTALL_MOD_DIR to specify an
- alternative name to 'extra'.
+--- 5.2 INSTALL_MOD_DIR
- $ make INSTALL_MOD_DIR=gandalf -C KERNELDIR \
- M=`pwd` modules_install
- => Install dir: /lib/modules/$(KERNELRELEASE)/gandalf
+ External modules are by default installed to a directory under
+ /lib/modules/$(KERNELRELEASE)/extra/, but you may wish to
+ locate modules for a specific functionality in a separate
+ directory. For this purpose, use INSTALL_MOD_DIR to specify an
+ alternative name to "extra."
+ $ make INSTALL_MOD_DIR=gandalf -C $KDIR \
+ M=$PWD modules_install
+ => Install dir: /lib/modules/$(KERNELRELEASE)/gandalf/
-=== 7. Module versioning & Module.symvers
-Module versioning is enabled by the CONFIG_MODVERSIONS tag.
+=== 6. Module Versioning
-Module versioning is used as a simple ABI consistency check. The Module
-versioning creates a CRC value of the full prototype for an exported symbol and
-when a module is loaded/used then the CRC values contained in the kernel are
-compared with similar values in the module. If they are not equal, then the
-kernel refuses to load the module.
+Module versioning is enabled by the CONFIG_MODVERSIONS tag, and is used
+as a simple ABI consistency check. A CRC value of the full prototype
+for an exported symbol is created. When a module is loaded/used, the
+CRC values contained in the kernel are compared with similar values in
+the module; if they are not equal, the kernel refuses to load the
+module.
-Module.symvers contains a list of all exported symbols from a kernel build.
+Module.symvers contains a list of all exported symbols from a kernel
+build.
---- 7.1 Symbols from the kernel (vmlinux + modules)
+--- 6.1 Symbols From the Kernel (vmlinux + modules)
- During a kernel build, a file named Module.symvers will be generated.
- Module.symvers contains all exported symbols from the kernel and
- compiled modules. For each symbols, the corresponding CRC value
- is stored too.
+ During a kernel build, a file named Module.symvers will be
+ generated. Module.symvers contains all exported symbols from
+ the kernel and compiled modules. For each symbol, the
+ corresponding CRC value is also stored.
The syntax of the Module.symvers file is:
- <CRC> <Symbol> <module>
- Sample:
+ <CRC> <Symbol> <module>
+
0x2d036834 scsi_remove_host drivers/scsi/scsi_mod
- For a kernel build without CONFIG_MODVERSIONS enabled, the crc
- would read: 0x00000000
+ For a kernel build without CONFIG_MODVERSIONS enabled, the CRC
+ would read 0x00000000.
Module.symvers serves two purposes:
- 1) It lists all exported symbols both from vmlinux and all modules
- 2) It lists the CRC if CONFIG_MODVERSIONS is enabled
-
---- 7.2 Symbols and external modules
-
- When building an external module, the build system needs access to
- the symbols from the kernel to check if all external symbols are
- defined. This is done in the MODPOST step and to obtain all
- symbols, modpost reads Module.symvers from the kernel.
- If a Module.symvers file is present in the directory where
- the external module is being built, this file will be read too.
- During the MODPOST step, a new Module.symvers file will be written
- containing all exported symbols that were not defined in the kernel.
-
---- 7.3 Symbols from another external module
-
- Sometimes, an external module uses exported symbols from another
- external module. Kbuild needs to have full knowledge on all symbols
- to avoid spitting out warnings about undefined symbols.
- Three solutions exist to let kbuild know all symbols of more than
- one external module.
- The method with a top-level kbuild file is recommended but may be
- impractical in certain situations.
-
- Use a top-level Kbuild file
- If you have two modules: 'foo' and 'bar', and 'foo' needs
- symbols from 'bar', then one can use a common top-level kbuild
- file so both modules are compiled in same build.
-
- Consider following directory layout:
- ./foo/ <= contains the foo module
- ./bar/ <= contains the bar module
- The top-level Kbuild file would then look like:
-
- #./Kbuild: (this file may also be named Makefile)
+ 1) It lists all exported symbols from vmlinux and all modules.
+ 2) It lists the CRC if CONFIG_MODVERSIONS is enabled.
+
+--- 6.2 Symbols and External Modules
+
+ When building an external module, the build system needs access
+ to the symbols from the kernel to check if all external symbols
+ are defined. This is done in the MODPOST step. modpost obtains
+ the symbols by reading Module.symvers from the kernel source
+ tree. If a Module.symvers file is present in the directory
+ where the external module is being built, this file will be
+ read too. During the MODPOST step, a new Module.symvers file
+ will be written containing all exported symbols that were not
+ defined in the kernel.
+
+--- 6.3 Symbols From Another External Module
+
+ Sometimes, an external module uses exported symbols from
+ another external module. kbuild needs to have full knowledge of
+ all symbols to avoid spitting out warnings about undefined
+ symbols. Three solutions exist for this situation.
+
+ NOTE: The method with a top-level kbuild file is recommended
+ but may be impractical in certain situations.
+
+ Use a top-level kbuild file
+ If you have two modules, foo.ko and bar.ko, where
+ foo.ko needs symbols from bar.ko, you can use a
+ common top-level kbuild file so both modules are
+ compiled in the same build. Consider the following
+ directory layout:
+
+ ./foo/ <= contains foo.ko
+ ./bar/ <= contains bar.ko
+
+ The top-level kbuild file would then look like:
+
+ #./Kbuild (or ./Makefile):
obj-y := foo/ bar/
- Executing:
- make -C $KDIR M=`pwd`
+ And executing
+
+ $ make -C $KDIR M=$PWD
- will then do the expected and compile both modules with full
- knowledge on symbols from both modules.
+ will then do the expected and compile both modules with
+ full knowledge of symbols from either module.
Use an extra Module.symvers file
- When an external module is built, a Module.symvers file is
- generated containing all exported symbols which are not
- defined in the kernel.
- To get access to symbols from module 'bar', one can copy the
- Module.symvers file from the compilation of the 'bar' module
- to the directory where the 'foo' module is built.
- During the module build, kbuild will read the Module.symvers
- file in the directory of the external module and when the
- build is finished, a new Module.symvers file is created
- containing the sum of all symbols defined and not part of the
- kernel.
-
- Use make variable KBUILD_EXTRA_SYMBOLS in the Makefile
- If it is impractical to copy Module.symvers from another
- module, you can assign a space separated list of files to
- KBUILD_EXTRA_SYMBOLS in your Makfile. These files will be
- loaded by modpost during the initialisation of its symbol
- tables.
-
-=== 8. Tips & Tricks
-
---- 8.1 Testing for CONFIG_FOO_BAR
-
- Modules often need to check for certain CONFIG_ options to decide if
- a specific feature shall be included in the module. When kbuild is used
- this is done by referencing the CONFIG_ variable directly.
+ When an external module is built, a Module.symvers file
+ is generated containing all exported symbols which are
+ not defined in the kernel. To get access to symbols
+ from bar.ko, copy the Module.symvers file from the
+ compilation of bar.ko to the directory where foo.ko is
+ built. During the module build, kbuild will read the
+ Module.symvers file in the directory of the external
+ module, and when the build is finished, a new
+ Module.symvers file is created containing the sum of
+ all symbols defined and not part of the kernel.
+
+ Use "make" variable KBUILD_EXTRA_SYMBOLS
+ If it is impractical to copy Module.symvers from
+ another module, you can assign a space separated list
+ of files to KBUILD_EXTRA_SYMBOLS in your build file.
+ These files will be loaded by modpost during the
+ initialization of its symbol tables.
+
+
+=== 7. Tips & Tricks
+
+--- 7.1 Testing for CONFIG_FOO_BAR
+
+ Modules often need to check for certain CONFIG_ options to
+ decide if a specific feature is included in the module. In
+ kbuild this is done by referencing the CONFIG_ variable
+ directly.
#fs/ext2/Makefile
obj-$(CONFIG_EXT2_FS) += ext2.o
@@ -544,9 +533,9 @@ Module.symvers contains a list of all exported symbols from a kernel build.
ext2-y := balloc.o bitmap.o dir.o
ext2-$(CONFIG_EXT2_FS_XATTR) += xattr.o
- External modules have traditionally used grep to check for specific
- CONFIG_ settings directly in .config. This usage is broken.
- As introduced before, external modules shall use kbuild when building
- and therefore can use the same methods as in-kernel modules when
- testing for CONFIG_ definitions.
+ External modules have traditionally used "grep" to check for
+ specific CONFIG_ settings directly in .config. This usage is
+ broken. As introduced before, external modules should use
+ kbuild for building and can therefore use the same methods as
+ in-tree modules when testing for CONFIG_ definitions.
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
index 0b6815504e6d..cdd2a6e8a3b7 100644
--- a/Documentation/kernel-parameters.txt
+++ b/Documentation/kernel-parameters.txt
@@ -706,7 +706,7 @@ and is between 256 and 4096 characters. It is defined in the file
arch/x86/kernel/cpu/cpufreq/elanfreq.c.
elevator= [IOSCHED]
- Format: {"anticipatory" | "cfq" | "deadline" | "noop"}
+ Format: {"cfq" | "deadline" | "noop"}
See Documentation/block/as-iosched.txt and
Documentation/block/deadline-iosched.txt for details.
@@ -1541,12 +1541,15 @@ and is between 256 and 4096 characters. It is defined in the file
1 to enable accounting
Default value is 0.
- nfsaddrs= [NFS]
+ nfsaddrs= [NFS] Deprecated. Use ip= instead.
See Documentation/filesystems/nfs/nfsroot.txt.
nfsroot= [NFS] nfs root filesystem for disk-less boxes.
See Documentation/filesystems/nfs/nfsroot.txt.
+ nfsrootdebug [NFS] enable nfsroot debugging messages.
+ See Documentation/filesystems/nfs/nfsroot.txt.
+
nfs.callback_tcpport=
[NFS] set the TCP port on which the NFSv4 callback
channel should listen.
@@ -2172,6 +2175,11 @@ and is between 256 and 4096 characters. It is defined in the file
reset_devices [KNL] Force drivers to reset the underlying device
during initialization.
+ resource_alloc_from_bottom
+ Allocate new resources from the beginning of available
+ space, not the end. If you need to use this, please
+ report a bug.
+
resume= [SWSUSP]
Specify the partition device for software suspend
@@ -2377,6 +2385,11 @@ and is between 256 and 4096 characters. It is defined in the file
improve throughput, but will also increase the
amount of memory reserved for use by the client.
+ swapaccount[=0|1]
+ [KNL] Enable accounting of swap in memory resource
+ controller if no parameter or 1 is given or disable
+ it if 0 is given (See Documentation/cgroups/memory.txt)
+
swiotlb= [IA-64] Number of I/O TLB slabs
switches= [HW,M68k]
@@ -2438,7 +2451,7 @@ and is between 256 and 4096 characters. It is defined in the file
topology informations if the hardware supports these.
The scheduler will make use of these informations and
e.g. base its process migration decisions on it.
- Default is off.
+ Default is on.
tp720= [HW,PS2]
diff --git a/Documentation/leds-class.txt b/Documentation/leds-class.txt
index 8fd5ca2ae32d..58b266bd1846 100644
--- a/Documentation/leds-class.txt
+++ b/Documentation/leds-class.txt
@@ -60,15 +60,18 @@ Hardware accelerated blink of LEDs
Some LEDs can be programmed to blink without any CPU interaction. To
support this feature, a LED driver can optionally implement the
-blink_set() function (see <linux/leds.h>). If implemented, triggers can
-attempt to use it before falling back to software timers. The blink_set()
-function should return 0 if the blink setting is supported, or -EINVAL
-otherwise, which means that LED blinking will be handled by software.
-
-The blink_set() function should choose a user friendly blinking
-value if it is called with *delay_on==0 && *delay_off==0 parameters. In
-this case the driver should give back the chosen value through delay_on
-and delay_off parameters to the leds subsystem.
+blink_set() function (see <linux/leds.h>). To set an LED to blinking,
+however, it is better to use use the API function led_blink_set(),
+as it will check and implement software fallback if necessary.
+
+To turn off blinking again, use the API function led_brightness_set()
+as that will not just set the LED brightness but also stop any software
+timers that may have been required for blinking.
+
+The blink_set() function should choose a user friendly blinking value
+if it is called with *delay_on==0 && *delay_off==0 parameters. In this
+case the driver should give back the chosen value through delay_on and
+delay_off parameters to the leds subsystem.
Setting the brightness to zero with brightness_set() callback function
should completely turn off the LED and cancel the previously programmed
diff --git a/Documentation/leds/leds-lp5521.txt b/Documentation/leds/leds-lp5521.txt
new file mode 100644
index 000000000000..c4d8d151e0fe
--- /dev/null
+++ b/Documentation/leds/leds-lp5521.txt
@@ -0,0 +1,88 @@
+Kernel driver for lp5521
+========================
+
+* National Semiconductor LP5521 led driver chip
+* Datasheet: http://www.national.com/pf/LP/LP5521.html
+
+Authors: Mathias Nyman, Yuri Zaporozhets, Samu Onkalo
+Contact: Samu Onkalo (samu.p.onkalo-at-nokia.com)
+
+Description
+-----------
+
+LP5521 can drive up to 3 channels. Leds can be controlled directly via
+the led class control interface. Channels have generic names:
+lp5521:channelx, where x is 0 .. 2
+
+All three channels can be also controlled using the engine micro programs.
+More details of the instructions can be found from the public data sheet.
+
+Control interface for the engines:
+x is 1 .. 3
+enginex_mode : disabled, load, run
+enginex_load : store program (visible only in engine load mode)
+
+Example (start to blink the channel 2 led):
+cd /sys/class/leds/lp5521:channel2/device
+echo "load" > engine3_mode
+echo "037f4d0003ff6000" > engine3_load
+echo "run" > engine3_mode
+
+stop the engine:
+echo "disabled" > engine3_mode
+
+sysfs contains a selftest entry.
+The test communicates with the chip and checks that
+the clock mode is automatically set to the requested one.
+
+Each channel has its own led current settings.
+/sys/class/leds/lp5521:channel0/led_current - RW
+/sys/class/leds/lp5521:channel0/max_current - RO
+Format: 10x mA i.e 10 means 1.0 mA
+
+example platform data:
+
+Note: chan_nr can have values between 0 and 2.
+
+static struct lp5521_led_config lp5521_led_config[] = {
+ {
+ .chan_nr = 0,
+ .led_current = 50,
+ .max_current = 130,
+ }, {
+ .chan_nr = 1,
+ .led_current = 0,
+ .max_current = 130,
+ }, {
+ .chan_nr = 2,
+ .led_current = 0,
+ .max_current = 130,
+ }
+};
+
+static int lp5521_setup(void)
+{
+ /* setup HW resources */
+}
+
+static void lp5521_release(void)
+{
+ /* Release HW resources */
+}
+
+static void lp5521_enable(bool state)
+{
+ /* Control of chip enable signal */
+}
+
+static struct lp5521_platform_data lp5521_platform_data = {
+ .led_config = lp5521_led_config,
+ .num_channels = ARRAY_SIZE(lp5521_led_config),
+ .clock_mode = LP5521_CLOCK_EXT,
+ .setup_resources = lp5521_setup,
+ .release_resources = lp5521_release,
+ .enable = lp5521_enable,
+};
+
+If the current is set to 0 in the platform data, that channel is
+disabled and it is not visible in the sysfs.
diff --git a/Documentation/leds/leds-lp5523.txt b/Documentation/leds/leds-lp5523.txt
new file mode 100644
index 000000000000..fad2feb8b7ce
--- /dev/null
+++ b/Documentation/leds/leds-lp5523.txt
@@ -0,0 +1,83 @@
+Kernel driver for lp5523
+========================
+
+* National Semiconductor LP5523 led driver chip
+* Datasheet: http://www.national.com/pf/LP/LP5523.html
+
+Authors: Mathias Nyman, Yuri Zaporozhets, Samu Onkalo
+Contact: Samu Onkalo (samu.p.onkalo-at-nokia.com)
+
+Description
+-----------
+LP5523 can drive up to 9 channels. Leds can be controlled directly via
+the led class control interface. Channels have generic names:
+lp5523:channelx where x is 0...8
+
+The chip provides 3 engines. Each engine can control channels without
+interaction from the main CPU. Details of the micro engine code can be found
+from the public data sheet. Leds can be muxed to different channels.
+
+Control interface for the engines:
+x is 1 .. 3
+enginex_mode : disabled, load, run
+enginex_load : microcode load (visible only in load mode)
+enginex_leds : led mux control (visible only in load mode)
+
+cd /sys/class/leds/lp5523:channel2/device
+echo "load" > engine3_mode
+echo "9d80400004ff05ff437f0000" > engine3_load
+echo "111111111" > engine3_leds
+echo "run" > engine3_mode
+
+sysfs contains a selftest entry. It measures each channel
+voltage level and checks if it looks reasonable. If the level is too high,
+the led is missing; if the level is too low, there is a short circuit.
+
+Selftest uses always the current from the platform data.
+
+Each channel contains led current settings.
+/sys/class/leds/lp5523:channel2/led_current - RW
+/sys/class/leds/lp5523:channel2/max_current - RO
+Format: 10x mA i.e 10 means 1.0 mA
+
+Example platform data:
+
+Note - chan_nr can have values between 0 and 8.
+
+static struct lp5523_led_config lp5523_led_config[] = {
+ {
+ .chan_nr = 0,
+ .led_current = 50,
+ .max_current = 130,
+ },
+...
+ }, {
+ .chan_nr = 8,
+ .led_current = 50,
+ .max_current = 130,
+ }
+};
+
+static int lp5523_setup(void)
+{
+ /* Setup HW resources */
+}
+
+static void lp5523_release(void)
+{
+ /* Release HW resources */
+}
+
+static void lp5523_enable(bool state)
+{
+ /* Control chip enable signal */
+}
+
+static struct lp5523_platform_data lp5523_platform_data = {
+ .led_config = lp5523_led_config,
+ .num_channels = ARRAY_SIZE(lp5523_led_config),
+ .clock_mode = LP5523_CLOCK_EXT,
+ .setup_resources = lp5523_setup,
+ .release_resources = lp5523_release,
+ .enable = lp5523_enable,
+};
diff --git a/Documentation/misc-devices/apds990x.txt b/Documentation/misc-devices/apds990x.txt
new file mode 100644
index 000000000000..d5408cade32f
--- /dev/null
+++ b/Documentation/misc-devices/apds990x.txt
@@ -0,0 +1,111 @@
+Kernel driver apds990x
+======================
+
+Supported chips:
+Avago APDS990X
+
+Data sheet:
+Not freely available
+
+Author:
+Samu Onkalo <samu.p.onkalo@nokia.com>
+
+Description
+-----------
+
+APDS990x is a combined ambient light and proximity sensor. ALS and proximity
+functionality are highly connected. ALS measurement path must be running
+while the proximity functionality is enabled.
+
+ALS produces raw measurement values for two channels: Clear channel
+(infrared + visible light) and IR only. However, threshold comparisons happen
+using clear channel only. Lux value and the threshold level on the HW
+might vary quite much depending the spectrum of the light source.
+
+Driver makes necessary conversions to both directions so that user handles
+only lux values. Lux value is calculated using information from the both
+channels. HW threshold level is calculated from the given lux value to match
+with current type of the lightning. Sometimes inaccuracy of the estimations
+lead to false interrupt, but that doesn't harm.
+
+ALS contains 4 different gain steps. Driver automatically
+selects suitable gain step. After each measurement, reliability of the results
+is estimated and new measurement is trigged if necessary.
+
+Platform data can provide tuned values to the conversion formulas if
+values are known. Otherwise plain sensor default values are used.
+
+Proximity side is little bit simpler. There is no need for complex conversions.
+It produces directly usable values.
+
+Driver controls chip operational state using pm_runtime framework.
+Voltage regulators are controlled based on chip operational state.
+
+SYSFS
+-----
+
+
+chip_id
+ RO - shows detected chip type and version
+
+power_state
+ RW - enable / disable chip. Uses counting logic
+ 1 enables the chip
+ 0 disables the chip
+lux0_input
+ RO - measured lux value
+ sysfs_notify called when threshold interrupt occurs
+
+lux0_sensor_range
+ RO - lux0_input max value. Actually never reaches since sensor tends
+ to saturate much before that. Real max value varies depending
+ on the light spectrum etc.
+
+lux0_rate
+ RW - measurement rate in Hz
+
+lux0_rate_avail
+ RO - supported measurement rates
+
+lux0_calibscale
+ RW - calibration value. Set to neutral value by default.
+ Output results are multiplied with calibscale / calibscale_default
+ value.
+
+lux0_calibscale_default
+ RO - neutral calibration value
+
+lux0_thresh_above_value
+ RW - HI level threshold value. All results above the value
+ trigs an interrupt. 65535 (i.e. sensor_range) disables the above
+ interrupt.
+
+lux0_thresh_below_value
+ RW - LO level threshold value. All results below the value
+ trigs an interrupt. 0 disables the below interrupt.
+
+prox0_raw
+ RO - measured proximity value
+ sysfs_notify called when threshold interrupt occurs
+
+prox0_sensor_range
+ RO - prox0_raw max value (1023)
+
+prox0_raw_en
+ RW - enable / disable proximity - uses counting logic
+ 1 enables the proximity
+ 0 disables the proximity
+
+prox0_reporting_mode
+ RW - trigger / periodic. In "trigger" mode the driver tells two possible
+ values: 0 or prox0_sensor_range value. 0 means no proximity,
+ 1023 means proximity. This causes minimal number of interrupts.
+ In "periodic" mode the driver reports all values above
+ prox0_thresh_above. This causes more interrupts, but it can give
+ _rough_ estimate about the distance.
+
+prox0_reporting_mode_avail
+ RO - accepted values to prox0_reporting_mode (trigger, periodic)
+
+prox0_thresh_above_value
+ RW - threshold level which trigs proximity events.
diff --git a/Documentation/misc-devices/bh1770glc.txt b/Documentation/misc-devices/bh1770glc.txt
new file mode 100644
index 000000000000..7d64c014dc70
--- /dev/null
+++ b/Documentation/misc-devices/bh1770glc.txt
@@ -0,0 +1,116 @@
+Kernel driver bh1770glc
+=======================
+
+Supported chips:
+ROHM BH1770GLC
+OSRAM SFH7770
+
+Data sheet:
+Not freely available
+
+Author:
+Samu Onkalo <samu.p.onkalo@nokia.com>
+
+Description
+-----------
+BH1770GLC and SFH7770 are combined ambient light and proximity sensors.
+ALS and proximity parts operates on their own, but they shares common I2C
+interface and interrupt logic. In principle they can run on their own,
+but ALS side results are used to estimate reliability of the proximity sensor.
+
+ALS produces 16 bit lux values. The chip contains interrupt logic to produce
+low and high threshold interrupts.
+
+Proximity part contains IR-led driver up to 3 IR leds. The chip measures
+amount of reflected IR light and produces proximity result. Resolution is
+8 bit. Driver supports only one channel. Driver uses ALS results to estimate
+reliability of the proximity results. Thus ALS is always running while
+proximity detection is needed.
+
+Driver uses threshold interrupts to avoid need for polling the values.
+Proximity low interrupt doesn't exists in the chip. This is simulated
+by using a delayed work. As long as there is proximity threshold above
+interrupts the delayed work is pushed forward. So, when proximity level goes
+below the threshold value, there is no interrupt and the delayed work will
+finally run. This is handled as no proximity indication.
+
+Chip state is controlled via runtime pm framework when enabled in config.
+
+Calibscale factor is used to hide differences between the chips. By default
+value set to neutral state meaning factor of 1.00. To get proper values,
+calibrated source of light is needed as a reference. Calibscale factor is set
+so that measurement produces about the expected lux value.
+
+SYSFS
+-----
+
+chip_id
+ RO - shows detected chip type and version
+
+power_state
+ RW - enable / disable chip. Uses counting logic
+ 1 enables the chip
+ 0 disables the chip
+
+lux0_input
+ RO - measured lux value
+ sysfs_notify called when threshold interrupt occurs
+
+lux0_sensor_range
+ RO - lux0_input max value
+
+lux0_rate
+ RW - measurement rate in Hz
+
+lux0_rate_avail
+ RO - supported measurement rates
+
+lux0_thresh_above_value
+ RW - HI level threshold value. All results above the value
+ trigs an interrupt. 65535 (i.e. sensor_range) disables the above
+ interrupt.
+
+lux0_thresh_below_value
+ RW - LO level threshold value. All results below the value
+ trigs an interrupt. 0 disables the below interrupt.
+
+lux0_calibscale
+ RW - calibration value. Set to neutral value by default.
+ Output results are multiplied with calibscale / calibscale_default
+ value.
+
+lux0_calibscale_default
+ RO - neutral calibration value
+
+prox0_raw
+ RO - measured proximity value
+ sysfs_notify called when threshold interrupt occurs
+
+prox0_sensor_range
+ RO - prox0_raw max value
+
+prox0_raw_en
+ RW - enable / disable proximity - uses counting logic
+ 1 enables the proximity
+ 0 disables the proximity
+
+prox0_thresh_above_count
+ RW - number of proximity interrupts needed before triggering the event
+
+prox0_rate_above
+ RW - Measurement rate (in Hz) when the level is above threshold
+ i.e. when proximity on has been reported.
+
+prox0_rate_below
+ RW - Measurement rate (in Hz) when the level is below threshold
+ i.e. when proximity off has been reported.
+
+prox0_rate_avail
+ RO - Supported proximity measurement rates in Hz
+
+prox0_thresh_above0_value
+ RW - threshold level which trigs proximity events.
+ Filtered by persistence filter (prox0_thresh_above_count)
+
+prox0_thresh_above1_value
+ RW - threshold level which trigs event immediately
diff --git a/Documentation/networking/ip-sysctl.txt b/Documentation/networking/ip-sysctl.txt
index c7165f4cb792..3c5e465296e1 100644
--- a/Documentation/networking/ip-sysctl.txt
+++ b/Documentation/networking/ip-sysctl.txt
@@ -20,6 +20,15 @@ ip_no_pmtu_disc - BOOLEAN
min_pmtu - INTEGER
default 562 - minimum discovered Path MTU
+route/max_size - INTEGER
+ Maximum number of routes allowed in the kernel. Increase
+ this when using large numbers of interfaces and/or routes.
+
+neigh/default/gc_thresh3 - INTEGER
+ Maximum number of neighbor entries allowed. Increase this
+ when using large numbers of interfaces and when communicating
+ with large numbers of directly-connected peers.
+
mtu_expires - INTEGER
Time, in seconds, that cached PMTU information is kept.
@@ -135,6 +144,7 @@ tcp_adv_win_scale - INTEGER
Count buffering overhead as bytes/2^tcp_adv_win_scale
(if tcp_adv_win_scale > 0) or bytes-bytes/2^(-tcp_adv_win_scale),
if it is <= 0.
+ Possible values are [-31, 31], inclusive.
Default: 2
tcp_allowed_congestion_control - STRING
diff --git a/Documentation/networking/phy.txt b/Documentation/networking/phy.txt
index 88bb71b46da4..9eb1ba52013d 100644
--- a/Documentation/networking/phy.txt
+++ b/Documentation/networking/phy.txt
@@ -177,18 +177,6 @@ Doing it all yourself
A convenience function to print out the PHY status neatly.
- int phy_clear_interrupt(struct phy_device *phydev);
- int phy_config_interrupt(struct phy_device *phydev, u32 interrupts);
-
- Clear the PHY's interrupt, and configure which ones are allowed,
- respectively. Currently only supports all on, or all off.
-
- int phy_enable_interrupts(struct phy_device *phydev);
- int phy_disable_interrupts(struct phy_device *phydev);
-
- Functions which enable/disable PHY interrupts, clearing them
- before and after, respectively.
-
int phy_start_interrupts(struct phy_device *phydev);
int phy_stop_interrupts(struct phy_device *phydev);
@@ -213,12 +201,6 @@ Doing it all yourself
Fills the phydev structure with up-to-date information about the current
settings in the PHY.
- void phy_sanitize_settings(struct phy_device *phydev)
-
- Resolves differences between currently desired settings, and
- supported settings for the given PHY device. Does not make
- the changes in the hardware, though.
-
int phy_ethtool_sset(struct phy_device *phydev, struct ethtool_cmd *cmd);
int phy_ethtool_gset(struct phy_device *phydev, struct ethtool_cmd *cmd);
diff --git a/Documentation/power/opp.txt b/Documentation/power/opp.txt
index 44d87ad3cea9..cd445582d1f8 100644
--- a/Documentation/power/opp.txt
+++ b/Documentation/power/opp.txt
@@ -37,6 +37,9 @@ Typical usage of the OPP library is as follows:
SoC framework -> modifies on required cases certain OPPs -> OPP layer
-> queries to search/retrieve information ->
+Architectures that provide a SoC framework for OPP should select ARCH_HAS_OPP
+to make the OPP layer available.
+
OPP layer expects each domain to be represented by a unique device pointer. SoC
framework registers a set of initial OPPs per device with the OPP layer. This
list is expected to be an optimally small number typically around 5 per device.
diff --git a/Documentation/rbtree.txt b/Documentation/rbtree.txt
index 221f38be98f4..19f8278c3854 100644
--- a/Documentation/rbtree.txt
+++ b/Documentation/rbtree.txt
@@ -21,8 +21,8 @@ three rotations, respectively, to balance the tree), with slightly slower
To quote Linux Weekly News:
There are a number of red-black trees in use in the kernel.
- The anticipatory, deadline, and CFQ I/O schedulers all employ
- rbtrees to track requests; the packet CD/DVD driver does the same.
+ The deadline and CFQ I/O schedulers employ rbtrees to
+ track requests; the packet CD/DVD driver does the same.
The high-resolution timer code uses an rbtree to organize outstanding
timer requests. The ext3 filesystem tracks directory entries in a
red-black tree. Virtual memory areas (VMAs) are tracked with red-black
diff --git a/Documentation/scsi/ChangeLog.megaraid_sas b/Documentation/scsi/ChangeLog.megaraid_sas
index 30023568805e..00301ed9c371 100644
--- a/Documentation/scsi/ChangeLog.megaraid_sas
+++ b/Documentation/scsi/ChangeLog.megaraid_sas
@@ -1,3 +1,50 @@
+1 Release Date : Thur. May 03, 2010 09:12:45 PST 2009 -
+ (emaild-id:megaraidlinux@lsi.com)
+ Bo Yang
+
+2 Current Version : 00.00.04.31-rc1
+3 Older Version : 00.00.04.17.1-rc1
+
+1. Add the Online Controller Reset (OCR) to the Driver.
+ OCR is the new feature for megaraid_sas driver which
+ will allow the fw to do the chip reset which will not
+ affact the OS behavious.
+
+ To add the OCR support, driver need to do:
+ a). reset the controller chips -- Xscale and Gen2 which
+ will change the function calls and add the reset function
+ related to this two chips.
+
+ b). during the reset, driver will store the pending cmds
+ which not returned by FW to driver's pending queue. Driver
+ will re-issue those pending cmds again to FW after the OCR
+ finished.
+
+ c). In driver's timeout routine, driver will report to
+ OS as reset. Also driver's queue routine will block the
+ cmds until the OCR finished.
+
+ d). in Driver's ISR routine, if driver get the FW state as
+ state change, FW in Failure status and FW support online controller
+ reset (OCR), driver will start to do the controller reset.
+
+ e). In driver's IOCTL routine, the application cmds will wait for the
+ OCR to finish, then issue the cmds to FW.
+
+ f). Before driver kill adapter, driver will do last chance of
+ OCR to see if driver can bring back the FW.
+
+2. Add the support update flag to the driver to tell LSI megaraid_sas
+ application which driver will support the device update. So application
+ will not need to do the device update after application add/del the device
+ from the system.
+3. In driver's timeout routine, driver will do three time reset if fw is in
+ failed state. Driver will kill adapter if can't bring back FW after the
+ this three times reset.
+4. Add the input parameter max_sectors to 1MB support to our GEN2 controller.
+ customer can use the input paramenter max_sectors to add 1MB support to GEN2
+ controller.
+
1 Release Date : Thur. Oct 29, 2009 09:12:45 PST 2009 -
(emaild-id:megaraidlinux@lsi.com)
Bo Yang
diff --git a/Documentation/sh/clk.txt b/Documentation/sh/clk.txt
deleted file mode 100644
index 114b595cfa97..000000000000
--- a/Documentation/sh/clk.txt
+++ /dev/null
@@ -1,32 +0,0 @@
-Clock framework on SuperH architecture
-
-The framework on SH extends existing API by the function clk_set_rate_ex,
-which prototype is as follows:
-
- clk_set_rate_ex (struct clk *clk, unsigned long rate, int algo_id)
-
-The algo_id parameter is used to specify algorithm used to recalculate clocks,
-adjanced to clock, specified as first argument. It is assumed that algo_id==0
-means no changes to adjanced clock
-
-Internally, the clk_set_rate_ex forwards request to clk->ops->set_rate method,
-if it is present in ops structure. The method should set the clock rate and adjust
-all needed clocks according to the passed algo_id.
-Exact values for algo_id are machine-dependent. For the sh7722, the following
-values are defined:
-
- NO_CHANGE = 0,
- IUS_N1_N1, /* I:U = N:1, U:Sh = N:1 */
- IUS_322, /* I:U:Sh = 3:2:2 */
- IUS_522, /* I:U:Sh = 5:2:2 */
- IUS_N11, /* I:U:Sh = N:1:1 */
- SB_N1, /* Sh:B = N:1 */
- SB3_N1, /* Sh:B3 = N:1 */
- SB3_32, /* Sh:B3 = 3:2 */
- SB3_43, /* Sh:B3 = 4:3 */
- SB3_54, /* Sh:B3 = 5:4 */
- BP_N1, /* B:P = N:1 */
- IP_N1 /* I:P = N:1 */
-
-Each of these constants means relation between clocks that can be set via the FRQCR
-register
diff --git a/Documentation/sound/alsa/ALSA-Configuration.txt b/Documentation/sound/alsa/ALSA-Configuration.txt
index 7f4dcebda9c6..d0eb696d32e8 100644
--- a/Documentation/sound/alsa/ALSA-Configuration.txt
+++ b/Documentation/sound/alsa/ALSA-Configuration.txt
@@ -300,6 +300,74 @@ Prior to version 0.9.0rc4 options had a 'snd_' prefix. This was removed.
control correctly. If you have problems regarding this, try
another ALSA compliant mixer (alsamixer works).
+ Module snd-azt1605
+ ------------------
+
+ Module for Aztech Sound Galaxy soundcards based on the Aztech AZT1605
+ chipset.
+
+ port - port # for BASE (0x220,0x240,0x260,0x280)
+ wss_port - port # for WSS (0x530,0x604,0xe80,0xf40)
+ irq - IRQ # for WSS (7,9,10,11)
+ dma1 - DMA # for WSS playback (0,1,3)
+ dma2 - DMA # for WSS capture (0,1), -1 = disabled (default)
+ mpu_port - port # for MPU-401 UART (0x300,0x330), -1 = disabled (default)
+ mpu_irq - IRQ # for MPU-401 UART (3,5,7,9), -1 = disabled (default)
+ fm_port - port # for OPL3 (0x388), -1 = disabled (default)
+
+ This module supports multiple cards. It does not support autoprobe: port,
+ wss_port, irq and dma1 have to be specified. The other values are
+ optional.
+
+ "port" needs to match the BASE ADDRESS jumper on the card (0x220 or 0x240)
+ or the value stored in the card's EEPROM for cards that have an EEPROM and
+ their "CONFIG MODE" jumper set to "EEPROM SETTING". The other values can
+ be choosen freely from the options enumerated above.
+
+ If dma2 is specified and different from dma1, the card will operate in
+ full-duplex mode. When dma1=3, only dma2=0 is valid and the only way to
+ enable capture since only channels 0 and 1 are available for capture.
+
+ Generic settings are "port=0x220 wss_port=0x530 irq=10 dma1=1 dma2=0
+ mpu_port=0x330 mpu_irq=9 fm_port=0x388".
+
+ Whatever IRQ and DMA channels you pick, be sure to reserve them for
+ legacy ISA in your BIOS.
+
+ Module snd-azt2316
+ ------------------
+
+ Module for Aztech Sound Galaxy soundcards based on the Aztech AZT2316
+ chipset.
+
+ port - port # for BASE (0x220,0x240,0x260,0x280)
+ wss_port - port # for WSS (0x530,0x604,0xe80,0xf40)
+ irq - IRQ # for WSS (7,9,10,11)
+ dma1 - DMA # for WSS playback (0,1,3)
+ dma2 - DMA # for WSS capture (0,1), -1 = disabled (default)
+ mpu_port - port # for MPU-401 UART (0x300,0x330), -1 = disabled (default)
+ mpu_irq - IRQ # for MPU-401 UART (5,7,9,10), -1 = disabled (default)
+ fm_port - port # for OPL3 (0x388), -1 = disabled (default)
+
+ This module supports multiple cards. It does not support autoprobe: port,
+ wss_port, irq and dma1 have to be specified. The other values are
+ optional.
+
+ "port" needs to match the BASE ADDRESS jumper on the card (0x220 or 0x240)
+ or the value stored in the card's EEPROM for cards that have an EEPROM and
+ their "CONFIG MODE" jumper set to "EEPROM SETTING". The other values can
+ be choosen freely from the options enumerated above.
+
+ If dma2 is specified and different from dma1, the card will operate in
+ full-duplex mode. When dma1=3, only dma2=0 is valid and the only way to
+ enable capture since only channels 0 and 1 are available for capture.
+
+ Generic settings are "port=0x220 wss_port=0x530 irq=10 dma1=1 dma2=0
+ mpu_port=0x330 mpu_irq=9 fm_port=0x388".
+
+ Whatever IRQ and DMA channels you pick, be sure to reserve them for
+ legacy ISA in your BIOS.
+
Module snd-aw2
--------------
@@ -1641,20 +1709,6 @@ Prior to version 0.9.0rc4 options had a 'snd_' prefix. This was removed.
This card is also known as Audio Excel DSP 16 or Zoltrix AV302.
- Module snd-sgalaxy
- ------------------
-
- Module for Aztech Sound Galaxy sound card.
-
- sbport - Port # for SB16 interface (0x220,0x240)
- wssport - Port # for WSS interface (0x530,0xe80,0xf40,0x604)
- irq - IRQ # (7,9,10,11)
- dma1 - DMA #
-
- This module supports multiple cards.
-
- The power-management is supported.
-
Module snd-sscape
-----------------
diff --git a/Documentation/sound/alsa/HD-Audio.txt b/Documentation/sound/alsa/HD-Audio.txt
index 278cc2122ea0..c82beb007634 100644
--- a/Documentation/sound/alsa/HD-Audio.txt
+++ b/Documentation/sound/alsa/HD-Audio.txt
@@ -57,9 +57,11 @@ dead. However, this detection isn't perfect on some devices. In such
a case, you can change the default method via `position_fix` option.
`position_fix=1` means to use LPIB method explicitly.
-`position_fix=2` means to use the position-buffer. 0 is the default
-value, the automatic check and fallback to LPIB as described in the
-above. If you get a problem of repeated sounds, this option might
+`position_fix=2` means to use the position-buffer.
+`position_fix=3` means to use a combination of both methods, needed
+for some VIA and ATI controllers. 0 is the default value for all other
+controllers, the automatic check and fallback to LPIB as described in
+the above. If you get a problem of repeated sounds, this option might
help.
In addition to that, every controller is known to be broken regarding
diff --git a/Documentation/sysctl/kernel.txt b/Documentation/sysctl/kernel.txt
index 3894eaa23486..209e1584c3dc 100644
--- a/Documentation/sysctl/kernel.txt
+++ b/Documentation/sysctl/kernel.txt
@@ -28,6 +28,7 @@ show up in /proc/sys/kernel:
- core_uses_pid
- ctrl-alt-del
- dentry-state
+- dmesg_restrict
- domainname
- hostname
- hotplug
@@ -213,6 +214,19 @@ to decide what to do with it.
==============================================================
+dmesg_restrict:
+
+This toggle indicates whether unprivileged users are prevented from using
+dmesg(8) to view messages from the kernel's log buffer. When
+dmesg_restrict is set to (0) there are no restrictions. When
+dmesg_restrict is set set to (1), users must have CAP_SYS_ADMIN to use
+dmesg(8).
+
+The kernel config option CONFIG_SECURITY_DMESG_RESTRICT sets the default
+value of dmesg_restrict.
+
+==============================================================
+
domainname & hostname:
These files can be used to set the NIS/YP domainname and the
diff --git a/Documentation/sysctl/vm.txt b/Documentation/sysctl/vm.txt
index b606c2c4dd37..30289fab86eb 100644
--- a/Documentation/sysctl/vm.txt
+++ b/Documentation/sysctl/vm.txt
@@ -80,8 +80,10 @@ dirty_background_bytes
Contains the amount of dirty memory at which the pdflush background writeback
daemon will start writeback.
-If dirty_background_bytes is written, dirty_background_ratio becomes a function
-of its value (dirty_background_bytes / the amount of dirtyable system memory).
+Note: dirty_background_bytes is the counterpart of dirty_background_ratio. Only
+one of them may be specified at a time. When one sysctl is written it is
+immediately taken into account to evaluate the dirty memory limits and the
+other appears as 0 when read.
==============================================================
@@ -97,8 +99,10 @@ dirty_bytes
Contains the amount of dirty memory at which a process generating disk writes
will itself start writeback.
-If dirty_bytes is written, dirty_ratio becomes a function of its value
-(dirty_bytes / the amount of dirtyable system memory).
+Note: dirty_bytes is the counterpart of dirty_ratio. Only one of them may be
+specified at a time. When one sysctl is written it is immediately taken into
+account to evaluate the dirty memory limits and the other appears as 0 when
+read.
Note: the minimum value allowed for dirty_bytes is two pages (in bytes); any
value lower than this limit will be ignored and the old configuration will be
diff --git a/Documentation/sysrq.txt b/Documentation/sysrq.txt
index 5c17196c8fe9..312e3754e8c5 100644
--- a/Documentation/sysrq.txt
+++ b/Documentation/sysrq.txt
@@ -75,7 +75,7 @@ On all - write a character to /proc/sysrq-trigger. e.g.:
'f' - Will call oom_kill to kill a memory hog process.
-'g' - Used by kgdb on ppc and sh platforms.
+'g' - Used by kgdb (kernel debugger)
'h' - Will display help (actually any other key than those listed
here will display help. but 'h' is easy to remember :-)
@@ -110,12 +110,15 @@ On all - write a character to /proc/sysrq-trigger. e.g.:
'u' - Will attempt to remount all mounted filesystems read-only.
-'v' - Dumps Voyager SMP processor info to your console.
+'v' - Forcefully restores framebuffer console
+'v' - Causes ETM buffer dump [ARM-specific]
'w' - Dumps tasks that are in uninterruptable (blocked) state.
'x' - Used by xmon interface on ppc/powerpc platforms.
+'y' - Show global CPU Registers [SPARC-64 specific]
+
'z' - Dump the ftrace buffer
'0'-'9' - Sets the console log level, controlling which kernel messages
diff --git a/Documentation/timers/hpet_example.c b/Documentation/timers/hpet_example.c
index 4bfafb7bc4c5..9a3e7012c190 100644
--- a/Documentation/timers/hpet_example.c
+++ b/Documentation/timers/hpet_example.c
@@ -97,6 +97,33 @@ hpet_open_close(int argc, const char **argv)
void
hpet_info(int argc, const char **argv)
{
+ struct hpet_info info;
+ int fd;
+
+ if (argc != 1) {
+ fprintf(stderr, "hpet_info: device-name\n");
+ return;
+ }
+
+ fd = open(argv[0], O_RDONLY);
+ if (fd < 0) {
+ fprintf(stderr, "hpet_info: open of %s failed\n", argv[0]);
+ return;
+ }
+
+ if (ioctl(fd, HPET_INFO, &info) < 0) {
+ fprintf(stderr, "hpet_info: failed to get info\n");
+ goto out;
+ }
+
+ fprintf(stderr, "hpet_info: hi_irqfreq 0x%lx hi_flags 0x%lx ",
+ info.hi_ireqfreq, info.hi_flags);
+ fprintf(stderr, "hi_hpet %d hi_timer %d\n",
+ info.hi_hpet, info.hi_timer);
+
+out:
+ close(fd);
+ return;
}
void
diff --git a/Documentation/trace/postprocess/trace-vmscan-postprocess.pl b/Documentation/trace/postprocess/trace-vmscan-postprocess.pl
index 1b55146d1c8d..b3e73ddb1567 100644
--- a/Documentation/trace/postprocess/trace-vmscan-postprocess.pl
+++ b/Documentation/trace/postprocess/trace-vmscan-postprocess.pl
@@ -46,7 +46,7 @@ use constant HIGH_KSWAPD_LATENCY => 20;
use constant HIGH_KSWAPD_REWAKEUP => 21;
use constant HIGH_NR_SCANNED => 22;
use constant HIGH_NR_TAKEN => 23;
-use constant HIGH_NR_RECLAIM => 24;
+use constant HIGH_NR_RECLAIMED => 24;
use constant HIGH_NR_CONTIG_DIRTY => 25;
my %perprocesspid;
@@ -58,11 +58,13 @@ my $opt_read_procstat;
my $total_wakeup_kswapd;
my ($total_direct_reclaim, $total_direct_nr_scanned);
my ($total_direct_latency, $total_kswapd_latency);
+my ($total_direct_nr_reclaimed);
my ($total_direct_writepage_file_sync, $total_direct_writepage_file_async);
my ($total_direct_writepage_anon_sync, $total_direct_writepage_anon_async);
my ($total_kswapd_nr_scanned, $total_kswapd_wake);
my ($total_kswapd_writepage_file_sync, $total_kswapd_writepage_file_async);
my ($total_kswapd_writepage_anon_sync, $total_kswapd_writepage_anon_async);
+my ($total_kswapd_nr_reclaimed);
# Catch sigint and exit on request
my $sigint_report = 0;
@@ -104,7 +106,7 @@ my $regex_kswapd_wake_default = 'nid=([0-9]*) order=([0-9]*)';
my $regex_kswapd_sleep_default = 'nid=([0-9]*)';
my $regex_wakeup_kswapd_default = 'nid=([0-9]*) zid=([0-9]*) order=([0-9]*)';
my $regex_lru_isolate_default = 'isolate_mode=([0-9]*) order=([0-9]*) nr_requested=([0-9]*) nr_scanned=([0-9]*) nr_taken=([0-9]*) contig_taken=([0-9]*) contig_dirty=([0-9]*) contig_failed=([0-9]*)';
-my $regex_lru_shrink_inactive_default = 'lru=([A-Z_]*) nr_scanned=([0-9]*) nr_reclaimed=([0-9]*) priority=([0-9]*)';
+my $regex_lru_shrink_inactive_default = 'nid=([0-9]*) zid=([0-9]*) nr_scanned=([0-9]*) nr_reclaimed=([0-9]*) priority=([0-9]*) flags=([A-Z_|]*)';
my $regex_lru_shrink_active_default = 'lru=([A-Z_]*) nr_scanned=([0-9]*) nr_rotated=([0-9]*) priority=([0-9]*)';
my $regex_writepage_default = 'page=([0-9a-f]*) pfn=([0-9]*) flags=([A-Z_|]*)';
@@ -203,8 +205,8 @@ $regex_lru_shrink_inactive = generate_traceevent_regex(
"vmscan/mm_vmscan_lru_shrink_inactive",
$regex_lru_shrink_inactive_default,
"nid", "zid",
- "lru",
- "nr_scanned", "nr_reclaimed", "priority");
+ "nr_scanned", "nr_reclaimed", "priority",
+ "flags");
$regex_lru_shrink_active = generate_traceevent_regex(
"vmscan/mm_vmscan_lru_shrink_active",
$regex_lru_shrink_active_default,
@@ -375,6 +377,16 @@ EVENT_PROCESS:
my $nr_contig_dirty = $7;
$perprocesspid{$process_pid}->{HIGH_NR_SCANNED} += $nr_scanned;
$perprocesspid{$process_pid}->{HIGH_NR_CONTIG_DIRTY} += $nr_contig_dirty;
+ } elsif ($tracepoint eq "mm_vmscan_lru_shrink_inactive") {
+ $details = $5;
+ if ($details !~ /$regex_lru_shrink_inactive/o) {
+ print "WARNING: Failed to parse mm_vmscan_lru_shrink_inactive as expected\n";
+ print " $details\n";
+ print " $regex_lru_shrink_inactive/o\n";
+ next;
+ }
+ my $nr_reclaimed = $4;
+ $perprocesspid{$process_pid}->{HIGH_NR_RECLAIMED} += $nr_reclaimed;
} elsif ($tracepoint eq "mm_vmscan_writepage") {
$details = $5;
if ($details !~ /$regex_writepage/o) {
@@ -464,8 +476,8 @@ sub dump_stats {
# Print out process activity
printf("\n");
- printf("%-" . $max_strlen . "s %8s %10s %8s %8s %8s %8s %8s\n", "Process", "Direct", "Wokeup", "Pages", "Pages", "Pages", "Time");
- printf("%-" . $max_strlen . "s %8s %10s %8s %8s %8s %8s %8s\n", "details", "Rclms", "Kswapd", "Scanned", "Sync-IO", "ASync-IO", "Stalled");
+ printf("%-" . $max_strlen . "s %8s %10s %8s %8s %8s %8s %8s %8s\n", "Process", "Direct", "Wokeup", "Pages", "Pages", "Pages", "Pages", "Time");
+ printf("%-" . $max_strlen . "s %8s %10s %8s %8s %8s %8s %8s %8s\n", "details", "Rclms", "Kswapd", "Scanned", "Rclmed", "Sync-IO", "ASync-IO", "Stalled");
foreach $process_pid (keys %stats) {
if (!$stats{$process_pid}->{MM_VMSCAN_DIRECT_RECLAIM_BEGIN}) {
@@ -475,6 +487,7 @@ sub dump_stats {
$total_direct_reclaim += $stats{$process_pid}->{MM_VMSCAN_DIRECT_RECLAIM_BEGIN};
$total_wakeup_kswapd += $stats{$process_pid}->{MM_VMSCAN_WAKEUP_KSWAPD};
$total_direct_nr_scanned += $stats{$process_pid}->{HIGH_NR_SCANNED};
+ $total_direct_nr_reclaimed += $stats{$process_pid}->{HIGH_NR_RECLAIMED};
$total_direct_writepage_file_sync += $stats{$process_pid}->{MM_VMSCAN_WRITEPAGE_FILE_SYNC};
$total_direct_writepage_anon_sync += $stats{$process_pid}->{MM_VMSCAN_WRITEPAGE_ANON_SYNC};
$total_direct_writepage_file_async += $stats{$process_pid}->{MM_VMSCAN_WRITEPAGE_FILE_ASYNC};
@@ -489,11 +502,12 @@ sub dump_stats {
$index++;
}
- printf("%-" . $max_strlen . "s %8d %10d %8u %8u %8u %8.3f",
+ printf("%-" . $max_strlen . "s %8d %10d %8u %8u %8u %8u %8.3f",
$process_pid,
$stats{$process_pid}->{MM_VMSCAN_DIRECT_RECLAIM_BEGIN},
$stats{$process_pid}->{MM_VMSCAN_WAKEUP_KSWAPD},
$stats{$process_pid}->{HIGH_NR_SCANNED},
+ $stats{$process_pid}->{HIGH_NR_RECLAIMED},
$stats{$process_pid}->{MM_VMSCAN_WRITEPAGE_FILE_SYNC} + $stats{$process_pid}->{MM_VMSCAN_WRITEPAGE_ANON_SYNC},
$stats{$process_pid}->{MM_VMSCAN_WRITEPAGE_FILE_ASYNC} + $stats{$process_pid}->{MM_VMSCAN_WRITEPAGE_ANON_ASYNC},
$this_reclaim_delay / 1000);
@@ -529,8 +543,8 @@ sub dump_stats {
# Print out kswapd activity
printf("\n");
- printf("%-" . $max_strlen . "s %8s %10s %8s %8s %8s %8s\n", "Kswapd", "Kswapd", "Order", "Pages", "Pages", "Pages");
- printf("%-" . $max_strlen . "s %8s %10s %8s %8s %8s %8s\n", "Instance", "Wakeups", "Re-wakeup", "Scanned", "Sync-IO", "ASync-IO");
+ printf("%-" . $max_strlen . "s %8s %10s %8s %8s %8s %8s\n", "Kswapd", "Kswapd", "Order", "Pages", "Pages", "Pages", "Pages");
+ printf("%-" . $max_strlen . "s %8s %10s %8s %8s %8s %8s\n", "Instance", "Wakeups", "Re-wakeup", "Scanned", "Rclmed", "Sync-IO", "ASync-IO");
foreach $process_pid (keys %stats) {
if (!$stats{$process_pid}->{MM_VMSCAN_KSWAPD_WAKE}) {
@@ -539,16 +553,18 @@ sub dump_stats {
$total_kswapd_wake += $stats{$process_pid}->{MM_VMSCAN_KSWAPD_WAKE};
$total_kswapd_nr_scanned += $stats{$process_pid}->{HIGH_NR_SCANNED};
+ $total_kswapd_nr_reclaimed += $stats{$process_pid}->{HIGH_NR_RECLAIMED};
$total_kswapd_writepage_file_sync += $stats{$process_pid}->{MM_VMSCAN_WRITEPAGE_FILE_SYNC};
$total_kswapd_writepage_anon_sync += $stats{$process_pid}->{MM_VMSCAN_WRITEPAGE_ANON_SYNC};
$total_kswapd_writepage_file_async += $stats{$process_pid}->{MM_VMSCAN_WRITEPAGE_FILE_ASYNC};
$total_kswapd_writepage_anon_async += $stats{$process_pid}->{MM_VMSCAN_WRITEPAGE_ANON_ASYNC};
- printf("%-" . $max_strlen . "s %8d %10d %8u %8i %8u",
+ printf("%-" . $max_strlen . "s %8d %10d %8u %8u %8i %8u",
$process_pid,
$stats{$process_pid}->{MM_VMSCAN_KSWAPD_WAKE},
$stats{$process_pid}->{HIGH_KSWAPD_REWAKEUP},
$stats{$process_pid}->{HIGH_NR_SCANNED},
+ $stats{$process_pid}->{HIGH_NR_RECLAIMED},
$stats{$process_pid}->{MM_VMSCAN_WRITEPAGE_FILE_SYNC} + $stats{$process_pid}->{MM_VMSCAN_WRITEPAGE_ANON_SYNC},
$stats{$process_pid}->{MM_VMSCAN_WRITEPAGE_FILE_ASYNC} + $stats{$process_pid}->{MM_VMSCAN_WRITEPAGE_ANON_ASYNC});
@@ -579,6 +595,7 @@ sub dump_stats {
print "\nSummary\n";
print "Direct reclaims: $total_direct_reclaim\n";
print "Direct reclaim pages scanned: $total_direct_nr_scanned\n";
+ print "Direct reclaim pages reclaimed: $total_direct_nr_reclaimed\n";
print "Direct reclaim write file sync I/O: $total_direct_writepage_file_sync\n";
print "Direct reclaim write anon sync I/O: $total_direct_writepage_anon_sync\n";
print "Direct reclaim write file async I/O: $total_direct_writepage_file_async\n";
@@ -588,6 +605,7 @@ sub dump_stats {
print "\n";
print "Kswapd wakeups: $total_kswapd_wake\n";
print "Kswapd pages scanned: $total_kswapd_nr_scanned\n";
+ print "Kswapd pages reclaimed: $total_kswapd_nr_reclaimed\n";
print "Kswapd reclaim write file sync I/O: $total_kswapd_writepage_file_sync\n";
print "Kswapd reclaim write anon sync I/O: $total_kswapd_writepage_anon_sync\n";
print "Kswapd reclaim write file async I/O: $total_kswapd_writepage_file_async\n";
@@ -612,6 +630,7 @@ sub aggregate_perprocesspid() {
$perprocess{$process}->{MM_VMSCAN_WAKEUP_KSWAPD} += $perprocesspid{$process_pid}->{MM_VMSCAN_WAKEUP_KSWAPD};
$perprocess{$process}->{HIGH_KSWAPD_REWAKEUP} += $perprocesspid{$process_pid}->{HIGH_KSWAPD_REWAKEUP};
$perprocess{$process}->{HIGH_NR_SCANNED} += $perprocesspid{$process_pid}->{HIGH_NR_SCANNED};
+ $perprocess{$process}->{HIGH_NR_RECLAIMED} += $perprocesspid{$process_pid}->{HIGH_NR_RECLAIMED};
$perprocess{$process}->{MM_VMSCAN_WRITEPAGE_FILE_SYNC} += $perprocesspid{$process_pid}->{MM_VMSCAN_WRITEPAGE_FILE_SYNC};
$perprocess{$process}->{MM_VMSCAN_WRITEPAGE_ANON_SYNC} += $perprocesspid{$process_pid}->{MM_VMSCAN_WRITEPAGE_ANON_SYNC};
$perprocess{$process}->{MM_VMSCAN_WRITEPAGE_FILE_ASYNC} += $perprocesspid{$process_pid}->{MM_VMSCAN_WRITEPAGE_FILE_ASYNC};
diff --git a/Documentation/video4linux/CARDLIST.cx88 b/Documentation/video4linux/CARDLIST.cx88
index f2510541373b..42517d9121de 100644
--- a/Documentation/video4linux/CARDLIST.cx88
+++ b/Documentation/video4linux/CARDLIST.cx88
@@ -83,3 +83,4 @@
82 -> WinFast DTV2000 H rev. J [107d:6f2b]
83 -> Prof 7301 DVB-S/S2 [b034:3034]
84 -> Samsung SMT 7020 DVB-S [18ac:dc00,18ac:dccd]
+ 85 -> Twinhan VP-1027 DVB-S [1822:0023]
diff --git a/Documentation/video4linux/CARDLIST.em28xx b/Documentation/video4linux/CARDLIST.em28xx
index 5c568757c301..ac2616a62fc3 100644
--- a/Documentation/video4linux/CARDLIST.em28xx
+++ b/Documentation/video4linux/CARDLIST.em28xx
@@ -31,6 +31,7 @@
30 -> Videology 20K14XUSB USB2.0 (em2820/em2840)
31 -> Usbgear VD204v9 (em2821)
32 -> Supercomp USB 2.0 TV (em2821)
+ 33 -> Elgato Video Capture (em2860) [0fd9:0033]
34 -> Terratec Cinergy A Hybrid XS (em2860) [0ccd:004f]
35 -> Typhoon DVD Maker (em2860)
36 -> NetGMBH Cam (em2860)
@@ -45,7 +46,7 @@
45 -> Pinnacle PCTV DVB-T (em2870)
46 -> Compro, VideoMate U3 (em2870) [185b:2870]
47 -> KWorld DVB-T 305U (em2880) [eb1a:e305]
- 48 -> KWorld DVB-T 310U (em2880) [eb1a:e310]
+ 48 -> KWorld DVB-T 310U (em2880)
49 -> MSI DigiVox A/D (em2880) [eb1a:e310]
50 -> MSI DigiVox A/D II (em2880) [eb1a:e320]
51 -> Terratec Hybrid XS Secam (em2880) [0ccd:004c]
diff --git a/Documentation/video4linux/CARDLIST.saa7134 b/Documentation/video4linux/CARDLIST.saa7134
index 4000c29fcfb6..8d9afc7d8014 100644
--- a/Documentation/video4linux/CARDLIST.saa7134
+++ b/Documentation/video4linux/CARDLIST.saa7134
@@ -126,7 +126,7 @@
125 -> Beholder BeholdTV 409 [0000:4090]
126 -> Beholder BeholdTV 505 FM [5ace:5050]
127 -> Beholder BeholdTV 507 FM / BeholdTV 509 FM [5ace:5070,5ace:5090]
-128 -> Beholder BeholdTV Columbus TVFM [0000:5201]
+128 -> Beholder BeholdTV Columbus TV/FM [0000:5201]
129 -> Beholder BeholdTV 607 FM [5ace:6070]
130 -> Beholder BeholdTV M6 [5ace:6190]
131 -> Twinhan Hybrid DTV-DVB 3056 PCI [1822:0022]
diff --git a/Documentation/video4linux/bttv/MAKEDEV b/Documentation/video4linux/bttv/MAKEDEV
index 9d112f7fd5f7..093c0cd18042 100644
--- a/Documentation/video4linux/bttv/MAKEDEV
+++ b/Documentation/video4linux/bttv/MAKEDEV
@@ -19,7 +19,6 @@ function makedev () {
echo "*** new device names ***"
makedev video 0
makedev radio 64
-makedev vtx 192
makedev vbi 224
#echo "*** old device names (for compatibility only) ***"
diff --git a/Documentation/video4linux/gspca.txt b/Documentation/video4linux/gspca.txt
index 56ba7bba7168..6a562eeeb4cd 100644
--- a/Documentation/video4linux/gspca.txt
+++ b/Documentation/video4linux/gspca.txt
@@ -302,12 +302,14 @@ sonixj 0c45:60fb Surfer NoName
sonixj 0c45:60fc LG-LIC300
sonixj 0c45:60fe Microdia Audio
sonixj 0c45:6100 PC Camera (SN9C128)
+sonixj 0c45:6102 PC Camera (SN9C128)
sonixj 0c45:610a PC Camera (SN9C128)
sonixj 0c45:610b PC Camera (SN9C128)
sonixj 0c45:610c PC Camera (SN9C128)
sonixj 0c45:610e PC Camera (SN9C128)
sonixj 0c45:6128 Microdia/Sonix SNP325
sonixj 0c45:612a Avant Camera
+sonixj 0c45:612b Speed-Link REFLECT2
sonixj 0c45:612c Typhoon Rasy Cam 1.3MPix
sonixj 0c45:6130 Sonix Pccam
sonixj 0c45:6138 Sn9c120 Mo4000
diff --git a/Documentation/video4linux/v4l2-framework.txt b/Documentation/video4linux/v4l2-framework.txt
index e831aaca66f8..f22f35c271f3 100644
--- a/Documentation/video4linux/v4l2-framework.txt
+++ b/Documentation/video4linux/v4l2-framework.txt
@@ -44,8 +44,8 @@ All drivers have the following structure:
2) A way of initializing and commanding sub-devices (if any).
-3) Creating V4L2 device nodes (/dev/videoX, /dev/vbiX, /dev/radioX and
- /dev/vtxX) and keeping track of device-node specific data.
+3) Creating V4L2 device nodes (/dev/videoX, /dev/vbiX and /dev/radioX)
+ and keeping track of device-node specific data.
4) Filehandle-specific structs containing per-filehandle data;
@@ -192,6 +192,11 @@ You also need a way to go from the low-level struct to v4l2_subdev. For the
common i2c_client struct the i2c_set_clientdata() call is used to store a
v4l2_subdev pointer, for other busses you may have to use other methods.
+Bridges might also need to store per-subdev private data, such as a pointer to
+bridge-specific per-subdev private data. The v4l2_subdev structure provides
+host private data for that purpose that can be accessed with
+v4l2_get_subdev_hostdata() and v4l2_set_subdev_hostdata().
+
From the bridge driver perspective you load the sub-device module and somehow
obtain the v4l2_subdev pointer. For i2c devices this is easy: you call
i2c_get_clientdata(). For other busses something similar needs to be done.
@@ -448,6 +453,10 @@ You should also set these fields:
- ioctl_ops: if you use the v4l2_ioctl_ops to simplify ioctl maintenance
(highly recommended to use this and it might become compulsory in the
future!), then set this to your v4l2_ioctl_ops struct.
+- lock: leave to NULL if you want to do all the locking in the driver.
+ Otherwise you give it a pointer to a struct mutex_lock and before any
+ of the v4l2_file_operations is called this lock will be taken by the
+ core and released afterwards.
- parent: you only set this if v4l2_device was registered with NULL as
the parent device struct. This only happens in cases where one hardware
device has multiple PCI devices that all share the same v4l2_device core.
@@ -464,6 +473,22 @@ If you use v4l2_ioctl_ops, then you should set either .unlocked_ioctl or
The v4l2_file_operations struct is a subset of file_operations. The main
difference is that the inode argument is omitted since it is never used.
+v4l2_file_operations and locking
+--------------------------------
+
+You can set a pointer to a mutex_lock in struct video_device. Usually this
+will be either a top-level mutex or a mutex per device node. If you want
+finer-grained locking then you have to set it to NULL and do you own locking.
+
+If a lock is specified then all file operations will be serialized on that
+lock. If you use videobuf then you must pass the same lock to the videobuf
+queue initialize function: if videobuf has to wait for a frame to arrive, then
+it will temporarily unlock the lock and relock it afterwards. If your driver
+also waits in the code, then you should do the same to allow other processes
+to access the device node while the first process is waiting for something.
+
+The implementation of a hotplug disconnect should also take the lock before
+calling v4l2_device_disconnect.
video_device registration
-------------------------
@@ -483,7 +508,6 @@ types exist:
VFL_TYPE_GRABBER: videoX for video input/output devices
VFL_TYPE_VBI: vbiX for vertical blank data (i.e. closed captions, teletext)
VFL_TYPE_RADIO: radioX for radio tuners
-VFL_TYPE_VTX: vtxX for teletext devices (deprecated, don't use)
The last argument gives you a certain amount of control over the device
device node number used (i.e. the X in videoX). Normally you will pass -1
@@ -547,9 +571,8 @@ from /dev).
After video_unregister_device() returns no new opens can be done. However,
in the case of USB devices some application might still have one of these
-device nodes open. So after the unregister all file operations will return
-an error as well, except for the ioctl and unlocked_ioctl file operations:
-those will still be passed on since some buffer ioctls may still be needed.
+device nodes open. So after the unregister all file operations (except
+release, of course) will return an error as well.
When the last user of the video device node exits, then the vdev->release()
callback is called and you can do the final cleanup there.
diff --git a/Documentation/vm/highmem.txt b/Documentation/vm/highmem.txt
new file mode 100644
index 000000000000..4324d24ffacd
--- /dev/null
+++ b/Documentation/vm/highmem.txt
@@ -0,0 +1,162 @@
+
+ ====================
+ HIGH MEMORY HANDLING
+ ====================
+
+By: Peter Zijlstra <a.p.zijlstra@chello.nl>
+
+Contents:
+
+ (*) What is high memory?
+
+ (*) Temporary virtual mappings.
+
+ (*) Using kmap_atomic.
+
+ (*) Cost of temporary mappings.
+
+ (*) i386 PAE.
+
+
+====================
+WHAT IS HIGH MEMORY?
+====================
+
+High memory (highmem) is used when the size of physical memory approaches or
+exceeds the maximum size of virtual memory. At that point it becomes
+impossible for the kernel to keep all of the available physical memory mapped
+at all times. This means the kernel needs to start using temporary mappings of
+the pieces of physical memory that it wants to access.
+
+The part of (physical) memory not covered by a permanent mapping is what we
+refer to as 'highmem'. There are various architecture dependent constraints on
+where exactly that border lies.
+
+In the i386 arch, for example, we choose to map the kernel into every process's
+VM space so that we don't have to pay the full TLB invalidation costs for
+kernel entry/exit. This means the available virtual memory space (4GiB on
+i386) has to be divided between user and kernel space.
+
+The traditional split for architectures using this approach is 3:1, 3GiB for
+userspace and the top 1GiB for kernel space:
+
+ +--------+ 0xffffffff
+ | Kernel |
+ +--------+ 0xc0000000
+ | |
+ | User |
+ | |
+ +--------+ 0x00000000
+
+This means that the kernel can at most map 1GiB of physical memory at any one
+time, but because we need virtual address space for other things - including
+temporary maps to access the rest of the physical memory - the actual direct
+map will typically be less (usually around ~896MiB).
+
+Other architectures that have mm context tagged TLBs can have separate kernel
+and user maps. Some hardware (like some ARMs), however, have limited virtual
+space when they use mm context tags.
+
+
+==========================
+TEMPORARY VIRTUAL MAPPINGS
+==========================
+
+The kernel contains several ways of creating temporary mappings:
+
+ (*) vmap(). This can be used to make a long duration mapping of multiple
+ physical pages into a contiguous virtual space. It needs global
+ synchronization to unmap.
+
+ (*) kmap(). This permits a short duration mapping of a single page. It needs
+ global synchronization, but is amortized somewhat. It is also prone to
+ deadlocks when using in a nested fashion, and so it is not recommended for
+ new code.
+
+ (*) kmap_atomic(). This permits a very short duration mapping of a single
+ page. Since the mapping is restricted to the CPU that issued it, it
+ performs well, but the issuing task is therefore required to stay on that
+ CPU until it has finished, lest some other task displace its mappings.
+
+ kmap_atomic() may also be used by interrupt contexts, since it is does not
+ sleep and the caller may not sleep until after kunmap_atomic() is called.
+
+ It may be assumed that k[un]map_atomic() won't fail.
+
+
+=================
+USING KMAP_ATOMIC
+=================
+
+When and where to use kmap_atomic() is straightforward. It is used when code
+wants to access the contents of a page that might be allocated from high memory
+(see __GFP_HIGHMEM), for example a page in the pagecache. The API has two
+functions, and they can be used in a manner similar to the following:
+
+ /* Find the page of interest. */
+ struct page *page = find_get_page(mapping, offset);
+
+ /* Gain access to the contents of that page. */
+ void *vaddr = kmap_atomic(page);
+
+ /* Do something to the contents of that page. */
+ memset(vaddr, 0, PAGE_SIZE);
+
+ /* Unmap that page. */
+ kunmap_atomic(vaddr);
+
+Note that the kunmap_atomic() call takes the result of the kmap_atomic() call
+not the argument.
+
+If you need to map two pages because you want to copy from one page to
+another you need to keep the kmap_atomic calls strictly nested, like:
+
+ vaddr1 = kmap_atomic(page1);
+ vaddr2 = kmap_atomic(page2);
+
+ memcpy(vaddr1, vaddr2, PAGE_SIZE);
+
+ kunmap_atomic(vaddr2);
+ kunmap_atomic(vaddr1);
+
+
+==========================
+COST OF TEMPORARY MAPPINGS
+==========================
+
+The cost of creating temporary mappings can be quite high. The arch has to
+manipulate the kernel's page tables, the data TLB and/or the MMU's registers.
+
+If CONFIG_HIGHMEM is not set, then the kernel will try and create a mapping
+simply with a bit of arithmetic that will convert the page struct address into
+a pointer to the page contents rather than juggling mappings about. In such a
+case, the unmap operation may be a null operation.
+
+If CONFIG_MMU is not set, then there can be no temporary mappings and no
+highmem. In such a case, the arithmetic approach will also be used.
+
+
+========
+i386 PAE
+========
+
+The i386 arch, under some circumstances, will permit you to stick up to 64GiB
+of RAM into your 32-bit machine. This has a number of consequences:
+
+ (*) Linux needs a page-frame structure for each page in the system and the
+ pageframes need to live in the permanent mapping, which means:
+
+ (*) you can have 896M/sizeof(struct page) page-frames at most; with struct
+ page being 32-bytes that would end up being something in the order of 112G
+ worth of pages; the kernel, however, needs to store more than just
+ page-frames in that memory...
+
+ (*) PAE makes your page tables larger - which slows the system down as more
+ data has to be accessed to traverse in TLB fills and the like. One
+ advantage is that PAE has more PTE bits and can provide advanced features
+ like NX and PAT.
+
+The general recommendation is that you don't use more than 8GiB on a 32-bit
+machine - although more might work for you and your workload, you're pretty
+much on your own - don't expect kernel developers to really care much if things
+come apart.
diff --git a/Kbuild b/Kbuild
index 431f7ca2404c..2114113ceca2 100644
--- a/Kbuild
+++ b/Kbuild
@@ -53,7 +53,7 @@ targets += arch/$(SRCARCH)/kernel/asm-offsets.s
# Default sed regexp - multiline due to syntax constraints
define sed-y
"/^->/{s:->#\(.*\):/* \1 */:; \
- s:^->\([^ ]*\) [\$$#]*\([-0-9]*\) \(.*\):#define \1 (\2) /* \3 */:; \
+ s:^->\([^ ]*\) [\$$#]*\([-0-9]*\) \(.*\):#define \1 \2 /* \3 */:; \
s:^->\([^ ]*\) [\$$#]*\([^ ]*\) \(.*\):#define \1 \2 /* \3 */:; \
s:->::; p;}"
endef
@@ -95,5 +95,5 @@ PHONY += missing-syscalls
missing-syscalls: scripts/checksyscalls.sh FORCE
$(call cmd,syscalls)
-# Delete all targets during make clean
-clean-files := $(addprefix $(objtree)/,$(filter-out $(bounds-file) $(offsets-file),$(targets)))
+# Keep these two files during make clean
+no-clean-files := $(bounds-file) $(offsets-file)
diff --git a/Kconfig b/Kconfig
new file mode 100644
index 000000000000..c13f48d65898
--- /dev/null
+++ b/Kconfig
@@ -0,0 +1,11 @@
+#
+# For a description of the syntax of this configuration file,
+# see Documentation/kbuild/kconfig-language.txt.
+#
+mainmenu "Linux/$ARCH $KERNELVERSION Kernel Configuration"
+
+config SRCARCH
+ string
+ option env="SRCARCH"
+
+source "arch/$SRCARCH/Kconfig"
diff --git a/MAINTAINERS b/MAINTAINERS
index 69aa8fe060b3..1a1c27b9c557 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -161,7 +161,7 @@ M: Greg Kroah-Hartman <gregkh@suse.de>
L: linux-serial@vger.kernel.org
W: http://serial.sourceforge.net
S: Maintained
-T: quilt kernel.org/pub/linux/kernel/people/gregkh/gregkh-2.6/
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/tty-2.6.git
F: drivers/serial/8250*
F: include/linux/serial_8250.h
@@ -243,21 +243,6 @@ F: drivers/pnp/pnpacpi/
F: include/linux/acpi.h
F: include/acpi/
-ACPI BATTERY DRIVERS
-M: Alexey Starikovskiy <astarikovskiy@suse.de>
-L: linux-acpi@vger.kernel.org
-W: http://www.lesswatts.org/projects/acpi/
-S: Supported
-F: drivers/acpi/battery.c
-F: drivers/acpi/*sbs*
-
-ACPI EC DRIVER
-M: Alexey Starikovskiy <astarikovskiy@suse.de>
-L: linux-acpi@vger.kernel.org
-W: http://www.lesswatts.org/projects/acpi/
-S: Supported
-F: drivers/acpi/ec.c
-
ACPI FAN DRIVER
M: Zhang Rui <rui.zhang@intel.com>
L: linux-acpi@vger.kernel.org
@@ -447,7 +432,7 @@ AMS (Apple Motion Sensor) DRIVER
M: Stelian Pop <stelian@popies.net>
M: Michael Hanselmann <linux-kernel@hansmi.ch>
S: Supported
-F: drivers/hwmon/ams/
+F: drivers/macintosh/ams/
AMSO1100 RNIC DRIVER
M: Tom Tucker <tom@opengridcomputing.com>
@@ -657,7 +642,7 @@ ARM/FARADAY FA526 PORT
M: Hans Ulli Kroll <ulli.kroll@googlemail.com>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
S: Maintained
-T: git://git.berlios.de/gemini-board
+T: git git://git.berlios.de/gemini-board
F: arch/arm/mm/*-fa*
ARM/FOOTBRIDGE ARCHITECTURE
@@ -672,7 +657,7 @@ ARM/FREESCALE IMX / MXC ARM ARCHITECTURE
M: Sascha Hauer <kernel@pengutronix.de>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
S: Maintained
-T: git://git.pengutronix.de/git/imx/linux-2.6.git
+T: git git://git.pengutronix.de/git/imx/linux-2.6.git
F: arch/arm/mach-mx*/
F: arch/arm/plat-mxc/
@@ -710,8 +695,7 @@ ARM/INCOME PXA270 SUPPORT
M: Marek Vasut <marek.vasut@gmail.com>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
S: Maintained
-F: arch/arm/mach-pxa/income.c
-F: arch/arm/mach-pxa/include/mach-pxa/income.h
+F: arch/arm/mach-pxa/colibri-pxa270-income.c
ARM/INTEL IOP32X ARM ARCHITECTURE
M: Lennert Buytenhek <kernel@wantstofly.org>
@@ -758,13 +742,7 @@ L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
S: Maintained
F: arch/arm/mach-ixp4xx/
-ARM/INTEL RESEARCH IMOTE 2 MACHINE SUPPORT
-M: Jonathan Cameron <jic23@cam.ac.uk>
-L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
-S: Maintained
-F: arch/arm/mach-pxa/imote2.c
-
-ARM/INTEL RESEARCH STARGATE 2 MACHINE SUPPORT
+ARM/INTEL RESEARCH IMOTE/STARGATE 2 MACHINE SUPPORT
M: Jonathan Cameron <jic23@cam.ac.uk>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
S: Maintained
@@ -929,40 +907,20 @@ W: http://www.fluff.org/ben/linux/
S: Maintained
F: arch/arm/mach-s3c2410/
-ARM/S3C2440 ARM ARCHITECTURE
+ARM/S3C244x ARM ARCHITECTURE
M: Ben Dooks <ben-linux@fluff.org>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
W: http://www.fluff.org/ben/linux/
S: Maintained
F: arch/arm/mach-s3c2440/
-
-ARM/S3C2442 ARM ARCHITECTURE
-M: Ben Dooks <ben-linux@fluff.org>
-L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
-W: http://www.fluff.org/ben/linux/
-S: Maintained
-F: arch/arm/mach-s3c2442/
-
-ARM/S3C2443 ARM ARCHITECTURE
-M: Ben Dooks <ben-linux@fluff.org>
-L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
-W: http://www.fluff.org/ben/linux/
-S: Maintained
F: arch/arm/mach-s3c2443/
-ARM/S3C6400 ARM ARCHITECTURE
-M: Ben Dooks <ben-linux@fluff.org>
-L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
-W: http://www.fluff.org/ben/linux/
-S: Maintained
-F: arch/arm/mach-s3c6400/
-
-ARM/S3C6410 ARM ARCHITECTURE
+ARM/S3C64xx ARM ARCHITECTURE
M: Ben Dooks <ben-linux@fluff.org>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
W: http://www.fluff.org/ben/linux/
S: Maintained
-F: arch/arm/mach-s3c6410/
+F: arch/arm/mach-s3c64xx/
ARM/S5P ARM ARCHITECTURES
M: Kukjin Kim <kgene.kim@samsung.com>
@@ -987,7 +945,7 @@ M: Magnus Damm <magnus.damm@gmail.com>
L: linux-sh@vger.kernel.org
W: http://oss.renesas.com
Q: http://patchwork.kernel.org/project/linux-sh/list/
-T: git git://git.kernel.org/pub/scm/linux/kernel/git/lethal/genesis-2.6.git
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/lethal/sh-2.6.git rmobile-latest
S: Supported
F: arch/arm/mach-shmobile/
F: drivers/sh/
@@ -1401,7 +1359,7 @@ F: include/net/bluetooth/
BONDING DRIVER
M: Jay Vosburgh <fubar@us.ibm.com>
-L: bonding-devel@lists.sourceforge.net
+L: netdev@vger.kernel.org
W: http://sourceforge.net/projects/bonding/
S: Supported
F: drivers/net/bonding/
@@ -1433,6 +1391,14 @@ L: netdev@vger.kernel.org
S: Supported
F: drivers/net/tg3.*
+BROADCOM BRCM80211 IEEE802.11n WIRELESS DRIVER
+M: Brett Rudley <brudley@broadcom.com>
+M: Henry Ptasinski <henryp@broadcom.com>
+M: Nohee Ko <noheek@broadcom.com>
+L: linux-wireless@vger.kernel.org
+S: Supported
+F: drivers/staging/brcm80211/
+
BROCADE BFA FC SCSI DRIVER
M: Jing Huang <huangj@brocade.com>
L: linux-scsi@vger.kernel.org
@@ -1562,9 +1528,8 @@ F: net/ceph
F: include/linux/ceph
CERTIFIED WIRELESS USB (WUSB) SUBSYSTEM:
-M: David Vrabel <david.vrabel@csr.com>
L: linux-usb@vger.kernel.org
-S: Supported
+S: Orphan
F: Documentation/usb/WUSB-Design-overview.txt
F: Documentation/usb/wusb-cbaf
F: drivers/usb/host/hwa-hc.c
@@ -1648,7 +1613,7 @@ F: drivers/platform/x86/classmate-laptop.c
COCCINELLE/Semantic Patches (SmPL)
M: Julia Lawall <julia@diku.dk>
M: Gilles Muller <Gilles.Muller@lip6.fr>
-M: Nicolas Palix <npalix@diku.dk>
+M: Nicolas Palix <npalix.work@gmail.com>
L: cocci@diku.dk (moderated for non-subscribers)
W: http://coccinelle.lip6.fr/
S: Supported
@@ -1792,6 +1757,7 @@ L: linux-cris-kernel@axis.com
W: http://developer.axis.com
S: Maintained
F: arch/cris/
+F: drivers/serial/crisv10.*
CRYPTO API
M: Herbert Xu <herbert@gondor.apana.org.au>
@@ -1863,6 +1829,13 @@ W: http://www.chelsio.com
S: Supported
F: drivers/net/cxgb4vf/
+STMMAC ETHERNET DRIVER
+M: Giuseppe Cavallaro <peppe.cavallaro@st.com>
+L: netdev@vger.kernel.org
+W: http://www.stlinux.com
+S: Supported
+F: drivers/net/stmmac/
+
CYBERPRO FB DRIVER
M: Russell King <linux@arm.linux.org.uk>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
@@ -2042,6 +2015,7 @@ F: drivers/hwmon/dme1737.c
DOCBOOK FOR DOCUMENTATION
M: Randy Dunlap <rdunlap@xenotime.net>
S: Maintained
+F: scripts/kernel-doc
DOCKING STATION DRIVER
M: Shaohua Li <shaohua.li@intel.com>
@@ -2052,6 +2026,7 @@ F: drivers/acpi/dock.c
DOCUMENTATION
M: Randy Dunlap <rdunlap@xenotime.net>
L: linux-doc@vger.kernel.org
+T: quilt oss.oracle.com/~rdunlap/kernel-doc-patches/current/
S: Maintained
F: Documentation/
@@ -2085,7 +2060,7 @@ F: Documentation/blockdev/drbd/
DRIVER CORE, KOBJECTS, DEBUGFS AND SYSFS
M: Greg Kroah-Hartman <gregkh@suse.de>
-T: quilt kernel.org/pub/linux/kernel/people/gregkh/gregkh-2.6/
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/driver-core-2.6.git
S: Supported
F: Documentation/kobject.txt
F: drivers/base/
@@ -2103,6 +2078,15 @@ S: Maintained
F: drivers/gpu/drm/
F: include/drm/
+INTEL DRM DRIVERS (excluding Poulsbo, Moorestown and derivative chipsets)
+M: Chris Wilson <chris@chris-wilson.co.uk>
+L: intel-gfx@lists.freedesktop.org (subscribers-only)
+L: dri-devel@lists.freedesktop.org
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/ickle/drm-intel.git
+S: Supported
+F: drivers/gpu/drm/i915
+F: include/drm/i915*
+
DSCC4 DRIVER
M: Francois Romieu <romieu@fr.zoreil.com>
L: netdev@vger.kernel.org
@@ -2460,9 +2444,12 @@ F: drivers/net/wan/sdla.c
FRAMEBUFFER LAYER
L: linux-fbdev@vger.kernel.org
W: http://linux-fbdev.sourceforge.net/
+Q: http://patchwork.kernel.org/project/linux-fbdev/list/
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/lethal/fbdev-2.6.git
S: Orphan
F: Documentation/fb/
-F: drivers/video/fb*
+F: drivers/video/
+F: include/video/
F: include/linux/fb.h
FREESCALE DMA DRIVER
@@ -2646,10 +2633,10 @@ F: drivers/net/greth*
HARD DRIVE ACTIVE PROTECTION SYSTEM (HDAPS) DRIVER
M: Frank Seidel <frank@f-seidel.de>
-L: lm-sensors@lm-sensors.org
+L: platform-driver-x86@vger.kernel.org
W: http://www.kernel.org/pub/linux/kernel/people/fseidel/hdaps/
S: Maintained
-F: drivers/hwmon/hdaps.c
+F: drivers/platform/x86/hdaps.c
HWPOISON MEMORY FAILURE HANDLING
M: Andi Kleen <andi@firstfloor.org>
@@ -3016,7 +3003,7 @@ M: Roland Dreier <rolandd@cisco.com>
M: Sean Hefty <sean.hefty@intel.com>
M: Hal Rosenstock <hal.rosenstock@gmail.com>
L: linux-rdma@vger.kernel.org
-W: http://www.openib.org/
+W: http://www.openfabrics.org/
Q: http://patchwork.kernel.org/project/linux-rdma/list/
T: git git://git.kernel.org/pub/scm/linux/kernel/git/roland/infiniband.git
S: Supported
@@ -3417,8 +3404,8 @@ F: Documentation/kdump/
KERNEL AUTOMOUNTER (AUTOFS)
M: "H. Peter Anvin" <hpa@zytor.com>
L: autofs@linux.kernel.org
-S: Odd Fixes
-F: fs/autofs/
+S: Obsolete
+F: drivers/staging/autofs/
KERNEL AUTOMOUNTER v4 (AUTOFS4)
M: Ian Kent <raven@themaw.net>
@@ -3766,6 +3753,13 @@ L: linux-scsi@vger.kernel.org
S: Maintained
F: drivers/scsi/sym53c8xx_2/
+LTC4261 HARDWARE MONITOR DRIVER
+M: Guenter Roeck <linux@roeck-us.net>
+L: lm-sensors@lm-sensors.org
+S: Maintained
+F: Documentation/hwmon/ltc4261
+F: drivers/hwmon/ltc4261.c
+
LTP (Linux Test Project)
M: Rishikesh K Rajak <risrajak@linux.vnet.ibm.com>
M: Garrett Cooper <yanegomi@gmail.com>
@@ -3861,7 +3855,7 @@ F: drivers/net/wireless/mwl8k.c
MARVELL SOC MMC/SD/SDIO CONTROLLER DRIVER
M: Nicolas Pitre <nico@fluxnic.net>
S: Odd Fixes
-F: drivers/mmc/host/mvsdio.*
+F: drivers/mmc/host/mvsdio.*
MARVELL YUKON / SYSKONNECT DRIVER
M: Mirko Lindner <mlindner@syskonnect.de>
@@ -4070,9 +4064,8 @@ F: drivers/scsi/NCR_D700.*
NETEFFECT IWARP RNIC DRIVER (IW_NES)
M: Faisal Latif <faisal.latif@intel.com>
-M: Chien Tung <chien.tin.tung@intel.com>
L: linux-rdma@vger.kernel.org
-W: http://www.neteffect.com
+W: http://www.intel.com/Products/Server/Adapters/Server-Cluster/Server-Cluster-overview.htm
S: Supported
F: drivers/infiniband/hw/nes/
@@ -4475,7 +4468,7 @@ L: platform-driver-x86@vger.kernel.org
S: Maintained
F: drivers/platform/x86/panasonic-laptop.c
-PANASONIC MN10300/AM33 PORT
+PANASONIC MN10300/AM33/AM34 PORT
M: David Howells <dhowells@redhat.com>
M: Koichi Yasutake <yasutake.koichi@jp.panasonic.com>
L: linux-am33-list@redhat.com (moderated for non-subscribers)
@@ -4952,7 +4945,7 @@ RCUTORTURE MODULE
M: Josh Triplett <josh@freedesktop.org>
M: "Paul E. McKenney" <paulmck@linux.vnet.ibm.com>
S: Supported
-T: git://git.kernel.org/pub/scm/linux/kernel/git/paulmck/linux-2.6-rcu.git
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/paulmck/linux-2.6-rcu.git
F: Documentation/RCU/torture.txt
F: kernel/rcutorture.c
@@ -4977,7 +4970,7 @@ M: Dipankar Sarma <dipankar@in.ibm.com>
M: "Paul E. McKenney" <paulmck@linux.vnet.ibm.com>
W: http://www.rdrop.com/users/paulmck/rclock/
S: Supported
-T: git://git.kernel.org/pub/scm/linux/kernel/git/paulmck/linux-2.6-rcu.git
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/paulmck/linux-2.6-rcu.git
F: Documentation/RCU/
F: include/linux/rcu*
F: include/linux/srcu*
@@ -5167,6 +5160,16 @@ W: http://www.kernel.dk
S: Maintained
F: drivers/scsi/sr*
+SCSI RDMA PROTOCOL (SRP) INITIATOR
+M: David Dillow <dillowda@ornl.gov>
+L: linux-rdma@vger.kernel.org
+S: Supported
+W: http://www.openfabrics.org
+Q: http://patchwork.kernel.org/project/linux-rdma/list/
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/dad/srp-initiator.git
+F: drivers/infiniband/ulp/srp/
+F: include/scsi/srp.h
+
SCSI SG DRIVER
M: Doug Gilbert <dgilbert@interlog.com>
L: linux-scsi@vger.kernel.org
@@ -5389,8 +5392,8 @@ F: drivers/*/*s3c2410*
F: drivers/*/*/*s3c2410*
TI DAVINCI MACHINE SUPPORT
-P: Kevin Hilman
-M: davinci-linux-open-source@linux.davincidsp.com
+M: Kevin Hilman <khilman@deeprootsystems.com>
+L: davinci-linux-open-source@linux.davincidsp.com (subscribers-only)
Q: http://patchwork.kernel.org/project/linux-davinci/list/
S: Supported
F: arch/arm/mach-davinci
@@ -5684,7 +5687,7 @@ S: Maintained
STAGING SUBSYSTEM
M: Greg Kroah-Hartman <gregkh@suse.de>
-T: quilt kernel.org/pub/linux/kernel/people/gregkh/gregkh-2.6/
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/staging-2.6.git
L: devel@driverdev.osuosl.org
S: Maintained
F: drivers/staging/
@@ -5713,7 +5716,7 @@ M: Paul Mundt <lethal@linux-sh.org>
L: linux-sh@vger.kernel.org
W: http://www.linux-sh.org
Q: http://patchwork.kernel.org/project/linux-sh/list/
-T: git git://git.kernel.org/pub/scm/linux/kernel/git/lethal/sh-2.6.git
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/lethal/sh-2.6.git sh-latest
S: Supported
F: Documentation/sh/
F: arch/sh/
@@ -5835,6 +5838,8 @@ M: Chris Metcalf <cmetcalf@tilera.com>
W: http://www.tilera.com/scm/
S: Supported
F: arch/tile/
+F: drivers/char/hvc_tile.c
+F: drivers/net/tile/
TLAN NETWORK DRIVER
M: Samuel Chessman <chessman@tux.org>
@@ -5918,7 +5923,7 @@ S: Maintained
TTY LAYER
M: Greg Kroah-Hartman <gregkh@suse.de>
S: Maintained
-T: quilt kernel.org/pub/linux/kernel/people/gregkh/gregkh-2.6/
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/tty-2.6.git
F: drivers/char/tty_*
F: drivers/serial/serial_core.c
F: include/linux/serial_core.h
@@ -5991,13 +5996,9 @@ F: Documentation/filesystems/ufs.txt
F: fs/ufs/
ULTRA-WIDEBAND (UWB) SUBSYSTEM:
-M: David Vrabel <david.vrabel@csr.com>
L: linux-usb@vger.kernel.org
-S: Supported
+S: Orphan
F: drivers/uwb/
-X: drivers/uwb/wlp/
-X: drivers/uwb/i1480/i1480u-wlp/
-X: drivers/uwb/i1480/i1480-wlp.h
F: include/linux/uwb.h
F: include/linux/uwb/
@@ -6139,13 +6140,6 @@ L: linux-usb@vger.kernel.org
S: Maintained
F: drivers/usb/serial/option.c
-USB OV511 DRIVER
-M: Mark McClelland <mmcclell@bigfoot.com>
-L: linux-usb@vger.kernel.org
-W: http://alpha.dyndns.org/ov511/
-S: Maintained
-F: drivers/media/video/ov511.*
-
USB PEGASUS DRIVER
M: Petko Manolov <petkan@users.sourceforge.net>
L: linux-usb@vger.kernel.org
@@ -6252,7 +6246,7 @@ USB SUBSYSTEM
M: Greg Kroah-Hartman <gregkh@suse.de>
L: linux-usb@vger.kernel.org
W: http://www.linux-usb.org
-T: quilt kernel.org/pub/linux/kernel/people/gregkh/gregkh-2.6/
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/usb-2.6.git
S: Supported
F: Documentation/usb/
F: drivers/net/usb/
@@ -6306,16 +6300,6 @@ S: Supported
F: drivers/usb/host/xhci*
F: drivers/usb/host/pci-quirks*
-USB ZC0301 DRIVER
-M: Luca Risolia <luca.risolia@studio.unibo.it>
-L: linux-usb@vger.kernel.org
-L: linux-media@vger.kernel.org
-T: git git://git.kernel.org/pub/scm/linux/kernel/git/mchehab/linux-2.6.git
-W: http://www.linux-projects.org
-S: Maintained
-F: Documentation/video4linux/zc0301.txt
-F: drivers/media/video/zc0301/
-
USB ZD1201 DRIVER
L: linux-wireless@vger.kernel.org
W: http://linux-lc100020.sourceforge.net
@@ -6495,6 +6479,12 @@ S: Maintained
F: Documentation/hwmon/w83793
F: drivers/hwmon/w83793.c
+W83795 HARDWARE MONITORING DRIVER
+M: Jean Delvare <khali@linux-fr.org>
+L: lm-sensors@lm-sensors.org
+S: Maintained
+F: drivers/hwmon/w83795.c
+
W83L51xD SD/MMC CARD INTERFACE DRIVER
M: Pierre Ossman <pierre@ossman.eu>
S: Maintained
@@ -6533,15 +6523,6 @@ F: include/linux/wimax/debug.h
F: include/net/wimax.h
F: net/wimax/
-WIMEDIA LLC PROTOCOL (WLP) SUBSYSTEM
-M: David Vrabel <david.vrabel@csr.com>
-L: netdev@vger.kernel.org
-S: Maintained
-F: include/linux/wlp.h
-F: drivers/uwb/wlp/
-F: drivers/uwb/i1480/i1480u-wlp/
-F: drivers/uwb/i1480/i1480-wlp.h
-
WISTRON LAPTOP BUTTON DRIVER
M: Miloslav Trmac <mitr@volny.cz>
S: Maintained
@@ -6628,11 +6609,25 @@ T: git git://git.kernel.org/pub/scm/linux/kernel/git/mjg59/platform-drivers-x86.
S: Maintained
F: drivers/platform/x86
+XEN PCI SUBSYSTEM
+M: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
+L: xen-devel@lists.xensource.com (moderated for non-subscribers)
+S: Supported
+F: arch/x86/pci/*xen*
+F: drivers/pci/*xen*
+
+XEN SWIOTLB SUBSYSTEM
+M: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
+L: xen-devel@lists.xensource.com (moderated for non-subscribers)
+S: Supported
+F: arch/x86/xen/*swiotlb*
+F: drivers/xen/*swiotlb*
+
XEN HYPERVISOR INTERFACE
-M: Jeremy Fitzhardinge <jeremy@xensource.com>
-M: Chris Wright <chrisw@sous-sol.org>
+M: Jeremy Fitzhardinge <jeremy.fitzhardinge@citrix.com>
+M: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
+L: xen-devel@lists.xensource.com (moderated for non-subscribers)
L: virtualization@lists.osdl.org
-L: xen-devel@lists.xensource.com
S: Supported
F: arch/x86/xen/
F: drivers/*/xen-*front.c
diff --git a/Makefile b/Makefile
index 3e438055a92c..3d94974542ea 100644
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
VERSION = 2
PATCHLEVEL = 6
-SUBLEVEL = 36
-EXTRAVERSION =
+SUBLEVEL = 37
+EXTRAVERSION = -rc5
NAME = Flesh-Eating Bats with Fangs
# *DOCUMENTATION*
@@ -204,6 +204,9 @@ ifeq ($(ARCH),x86_64)
endif
# Additional ARCH settings for sparc
+ifeq ($(ARCH),sparc32)
+ SRCARCH := sparc
+endif
ifeq ($(ARCH),sparc64)
SRCARCH := sparc
endif
@@ -1137,21 +1140,13 @@ MRPROPER_FILES += .config .config.old .version .old_version \
#
clean: rm-dirs := $(CLEAN_DIRS)
clean: rm-files := $(CLEAN_FILES)
-clean-dirs := $(addprefix _clean_,$(srctree) $(vmlinux-alldirs) Documentation)
+clean-dirs := $(addprefix _clean_, . $(vmlinux-alldirs) Documentation)
PHONY += $(clean-dirs) clean archclean
$(clean-dirs):
$(Q)$(MAKE) $(clean)=$(patsubst _clean_%,%,$@)
-clean: archclean $(clean-dirs)
- $(call cmd,rmdirs)
- $(call cmd,rmfiles)
- @find . $(RCS_FIND_IGNORE) \
- \( -name '*.[oas]' -o -name '*.ko' -o -name '.*.cmd' \
- -o -name '.*.d' -o -name '.*.tmp' -o -name '*.mod.c' \
- -o -name '*.symtypes' -o -name 'modules.order' \
- -o -name modules.builtin -o -name '.tmp_*.o.*' \
- -o -name '*.gcno' \) -type f -print | xargs rm -f
+clean: archclean
# mrproper - Delete all generated files, including .config
#
@@ -1352,16 +1347,7 @@ $(clean-dirs):
$(Q)$(MAKE) $(clean)=$(patsubst _clean_%,%,$@)
clean: rm-dirs := $(MODVERDIR)
-clean: rm-files := $(KBUILD_EXTMOD)/Module.symvers \
- $(KBUILD_EXTMOD)/modules.order \
- $(KBUILD_EXTMOD)/modules.builtin
-clean: $(clean-dirs)
- $(call cmd,rmdirs)
- $(call cmd,rmfiles)
- @find $(KBUILD_EXTMOD) $(RCS_FIND_IGNORE) \
- \( -name '*.[oas]' -o -name '*.ko' -o -name '.*.cmd' \
- -o -name '.*.d' -o -name '.*.tmp' -o -name '*.mod.c' \
- -o -name '*.gcno' \) -type f -print | xargs rm -f
+clean: rm-files := $(KBUILD_EXTMOD)/Module.symvers
help:
@echo ' Building external modules.'
@@ -1378,6 +1364,16 @@ prepare: ;
scripts: ;
endif # KBUILD_EXTMOD
+clean: $(clean-dirs)
+ $(call cmd,rmdirs)
+ $(call cmd,rmfiles)
+ @find $(or $(KBUILD_EXTMOD), .) $(RCS_FIND_IGNORE) \
+ \( -name '*.[oas]' -o -name '*.ko' -o -name '.*.cmd' \
+ -o -name '.*.d' -o -name '.*.tmp' -o -name '*.mod.c' \
+ -o -name '*.symtypes' -o -name 'modules.order' \
+ -o -name modules.builtin -o -name '.tmp_*.o.*' \
+ -o -name '*.gcno' \) -type f -print | xargs rm -f
+
# Generate tags for editors
# ---------------------------------------------------------------------------
quiet_cmd_tags = GEN $@
diff --git a/README b/README
index 737838fe73cc..1b81d2836873 100644
--- a/README
+++ b/README
@@ -166,6 +166,7 @@ CONFIGURING the kernel:
- Alternate configuration commands are:
"make config" Plain text interface.
"make menuconfig" Text based color menus, radiolists & dialogs.
+ "make nconfig" Enhanced text based color menus.
"make xconfig" X windows (Qt) based configuration tool.
"make gconfig" X windows (Gtk) based configuration tool.
"make oldconfig" Default all questions based on the contents of
diff --git a/arch/Kconfig b/arch/Kconfig
index 53d7f619a1b9..8bf0fa652eb6 100644
--- a/arch/Kconfig
+++ b/arch/Kconfig
@@ -42,6 +42,20 @@ config KPROBES
for kernel debugging, non-intrusive instrumentation and testing.
If in doubt, say "N".
+config JUMP_LABEL
+ bool "Optimize trace point call sites"
+ depends on HAVE_ARCH_JUMP_LABEL
+ help
+ If it is detected that the compiler has support for "asm goto",
+ the kernel will compile trace point locations with just a
+ nop instruction. When trace points are enabled, the nop will
+ be converted to a jump to the trace function. This technique
+ lowers overhead and stress on the branch prediction of the
+ processor.
+
+ On i386, options added to the compiler flags may increase
+ the size of the kernel slightly.
+
config OPTPROBES
def_bool y
depends on KPROBES && HAVE_OPTPROBES
diff --git a/arch/alpha/Kconfig b/arch/alpha/Kconfig
index d04ccd73af45..943fe6930f77 100644
--- a/arch/alpha/Kconfig
+++ b/arch/alpha/Kconfig
@@ -1,7 +1,3 @@
-#
-# For a description of the syntax of this configuration file,
-# see Documentation/kbuild/kconfig-language.txt.
-#
config ALPHA
bool
default y
@@ -55,6 +51,9 @@ config ZONE_DMA
bool
default y
+config ARCH_DMA_ADDR_T_64BIT
+ def_bool y
+
config NEED_DMA_MAP_STATE
def_bool y
diff --git a/arch/alpha/include/asm/core_mcpcia.h b/arch/alpha/include/asm/core_mcpcia.h
index 21ac53383b37..9f67a056b461 100644
--- a/arch/alpha/include/asm/core_mcpcia.h
+++ b/arch/alpha/include/asm/core_mcpcia.h
@@ -247,7 +247,7 @@ struct el_MCPCIA_uncorrected_frame_mcheck {
#define vip volatile int __force *
#define vuip volatile unsigned int __force *
-#ifdef MCPCIA_ONE_HAE_WINDOW
+#ifndef MCPCIA_ONE_HAE_WINDOW
#define MCPCIA_FROB_MMIO \
if (__mcpcia_is_mmio(hose)) { \
set_hae(hose & 0xffffffff); \
diff --git a/arch/alpha/include/asm/core_t2.h b/arch/alpha/include/asm/core_t2.h
index 471c07292e0b..91b46801b290 100644
--- a/arch/alpha/include/asm/core_t2.h
+++ b/arch/alpha/include/asm/core_t2.h
@@ -1,6 +1,9 @@
#ifndef __ALPHA_T2__H__
#define __ALPHA_T2__H__
+/* Fit everything into one 128MB HAE window. */
+#define T2_ONE_HAE_WINDOW 1
+
#include <linux/types.h>
#include <linux/spinlock.h>
#include <asm/compiler.h>
@@ -19,7 +22,7 @@
*
*/
-#define T2_MEM_R1_MASK 0x07ffffff /* Mem sparse region 1 mask is 26 bits */
+#define T2_MEM_R1_MASK 0x07ffffff /* Mem sparse region 1 mask is 27 bits */
/* GAMMA-SABLE is a SABLE with EV5-based CPUs */
/* All LYNX machines, EV4 or EV5, use the GAMMA bias also */
@@ -85,7 +88,9 @@
#define T2_DIR (IDENT_ADDR + GAMMA_BIAS + 0x38e0004a0UL)
#define T2_ICE (IDENT_ADDR + GAMMA_BIAS + 0x38e0004c0UL)
+#ifndef T2_ONE_HAE_WINDOW
#define T2_HAE_ADDRESS T2_HAE_1
+#endif
/* T2 CSRs are in the non-cachable primary IO space from 3.8000.0000 to
3.8fff.ffff
@@ -429,13 +434,15 @@ extern inline void t2_outl(u32 b, unsigned long addr)
*
*/
+#ifdef T2_ONE_HAE_WINDOW
+#define t2_set_hae
+#else
#define t2_set_hae { \
- msb = addr >> 27; \
+ unsigned long msb = addr >> 27; \
addr &= T2_MEM_R1_MASK; \
set_hae(msb); \
}
-
-extern raw_spinlock_t t2_hae_lock;
+#endif
/*
* NOTE: take T2_DENSE_MEM off in each readX/writeX routine, since
@@ -446,28 +453,22 @@ extern raw_spinlock_t t2_hae_lock;
__EXTERN_INLINE u8 t2_readb(const volatile void __iomem *xaddr)
{
unsigned long addr = (unsigned long) xaddr - T2_DENSE_MEM;
- unsigned long result, msb;
- unsigned long flags;
- raw_spin_lock_irqsave(&t2_hae_lock, flags);
+ unsigned long result;
t2_set_hae;
result = *(vip) ((addr << 5) + T2_SPARSE_MEM + 0x00);
- raw_spin_unlock_irqrestore(&t2_hae_lock, flags);
return __kernel_extbl(result, addr & 3);
}
__EXTERN_INLINE u16 t2_readw(const volatile void __iomem *xaddr)
{
unsigned long addr = (unsigned long) xaddr - T2_DENSE_MEM;
- unsigned long result, msb;
- unsigned long flags;
- raw_spin_lock_irqsave(&t2_hae_lock, flags);
+ unsigned long result;
t2_set_hae;
result = *(vuip) ((addr << 5) + T2_SPARSE_MEM + 0x08);
- raw_spin_unlock_irqrestore(&t2_hae_lock, flags);
return __kernel_extwl(result, addr & 3);
}
@@ -478,59 +479,47 @@ __EXTERN_INLINE u16 t2_readw(const volatile void __iomem *xaddr)
__EXTERN_INLINE u32 t2_readl(const volatile void __iomem *xaddr)
{
unsigned long addr = (unsigned long) xaddr - T2_DENSE_MEM;
- unsigned long result, msb;
- unsigned long flags;
- raw_spin_lock_irqsave(&t2_hae_lock, flags);
+ unsigned long result;
t2_set_hae;
result = *(vuip) ((addr << 5) + T2_SPARSE_MEM + 0x18);
- raw_spin_unlock_irqrestore(&t2_hae_lock, flags);
return result & 0xffffffffUL;
}
__EXTERN_INLINE u64 t2_readq(const volatile void __iomem *xaddr)
{
unsigned long addr = (unsigned long) xaddr - T2_DENSE_MEM;
- unsigned long r0, r1, work, msb;
- unsigned long flags;
- raw_spin_lock_irqsave(&t2_hae_lock, flags);
+ unsigned long r0, r1, work;
t2_set_hae;
work = (addr << 5) + T2_SPARSE_MEM + 0x18;
r0 = *(vuip)(work);
r1 = *(vuip)(work + (4 << 5));
- raw_spin_unlock_irqrestore(&t2_hae_lock, flags);
return r1 << 32 | r0;
}
__EXTERN_INLINE void t2_writeb(u8 b, volatile void __iomem *xaddr)
{
unsigned long addr = (unsigned long) xaddr - T2_DENSE_MEM;
- unsigned long msb, w;
- unsigned long flags;
- raw_spin_lock_irqsave(&t2_hae_lock, flags);
+ unsigned long w;
t2_set_hae;
w = __kernel_insbl(b, addr & 3);
*(vuip) ((addr << 5) + T2_SPARSE_MEM + 0x00) = w;
- raw_spin_unlock_irqrestore(&t2_hae_lock, flags);
}
__EXTERN_INLINE void t2_writew(u16 b, volatile void __iomem *xaddr)
{
unsigned long addr = (unsigned long) xaddr - T2_DENSE_MEM;
- unsigned long msb, w;
- unsigned long flags;
- raw_spin_lock_irqsave(&t2_hae_lock, flags);
+ unsigned long w;
t2_set_hae;
w = __kernel_inswl(b, addr & 3);
*(vuip) ((addr << 5) + T2_SPARSE_MEM + 0x08) = w;
- raw_spin_unlock_irqrestore(&t2_hae_lock, flags);
}
/*
@@ -540,29 +529,22 @@ __EXTERN_INLINE void t2_writew(u16 b, volatile void __iomem *xaddr)
__EXTERN_INLINE void t2_writel(u32 b, volatile void __iomem *xaddr)
{
unsigned long addr = (unsigned long) xaddr - T2_DENSE_MEM;
- unsigned long msb;
- unsigned long flags;
- raw_spin_lock_irqsave(&t2_hae_lock, flags);
t2_set_hae;
*(vuip) ((addr << 5) + T2_SPARSE_MEM + 0x18) = b;
- raw_spin_unlock_irqrestore(&t2_hae_lock, flags);
}
__EXTERN_INLINE void t2_writeq(u64 b, volatile void __iomem *xaddr)
{
unsigned long addr = (unsigned long) xaddr - T2_DENSE_MEM;
- unsigned long msb, work;
- unsigned long flags;
- raw_spin_lock_irqsave(&t2_hae_lock, flags);
+ unsigned long work;
t2_set_hae;
work = (addr << 5) + T2_SPARSE_MEM + 0x18;
*(vuip)work = b;
*(vuip)(work + (4 << 5)) = b >> 32;
- raw_spin_unlock_irqrestore(&t2_hae_lock, flags);
}
__EXTERN_INLINE void __iomem *t2_ioportmap(unsigned long addr)
diff --git a/arch/alpha/include/asm/pgtable.h b/arch/alpha/include/asm/pgtable.h
index 71a243294142..de98a732683d 100644
--- a/arch/alpha/include/asm/pgtable.h
+++ b/arch/alpha/include/asm/pgtable.h
@@ -318,9 +318,7 @@ extern inline pte_t * pte_offset_kernel(pmd_t * dir, unsigned long address)
}
#define pte_offset_map(dir,addr) pte_offset_kernel((dir),(addr))
-#define pte_offset_map_nested(dir,addr) pte_offset_kernel((dir),(addr))
#define pte_unmap(pte) do { } while (0)
-#define pte_unmap_nested(pte) do { } while (0)
extern pgd_t swapper_pg_dir[1024];
diff --git a/arch/alpha/kernel/core_t2.c b/arch/alpha/kernel/core_t2.c
index e6d90568b65d..2f770e994289 100644
--- a/arch/alpha/kernel/core_t2.c
+++ b/arch/alpha/kernel/core_t2.c
@@ -74,8 +74,6 @@
# define DBG(args)
#endif
-DEFINE_RAW_SPINLOCK(t2_hae_lock);
-
static volatile unsigned int t2_mcheck_any_expected;
static volatile unsigned int t2_mcheck_last_taken;
@@ -406,6 +404,7 @@ void __init
t2_init_arch(void)
{
struct pci_controller *hose;
+ struct resource *hae_mem;
unsigned long temp;
unsigned int i;
@@ -433,7 +432,13 @@ t2_init_arch(void)
*/
pci_isa_hose = hose = alloc_pci_controller();
hose->io_space = &ioport_resource;
- hose->mem_space = &iomem_resource;
+ hae_mem = alloc_resource();
+ hae_mem->start = 0;
+ hae_mem->end = T2_MEM_R1_MASK;
+ hae_mem->name = pci_hae0_name;
+ if (request_resource(&iomem_resource, hae_mem) < 0)
+ printk(KERN_ERR "Failed to request HAE_MEM\n");
+ hose->mem_space = hae_mem;
hose->index = 0;
hose->sparse_mem_base = T2_SPARSE_MEM - IDENT_ADDR;
diff --git a/arch/alpha/kernel/machvec_impl.h b/arch/alpha/kernel/machvec_impl.h
index 512685f78097..7fa62488bd16 100644
--- a/arch/alpha/kernel/machvec_impl.h
+++ b/arch/alpha/kernel/machvec_impl.h
@@ -25,6 +25,9 @@
#ifdef MCPCIA_ONE_HAE_WINDOW
#define MCPCIA_HAE_ADDRESS (&alpha_mv.hae_cache)
#endif
+#ifdef T2_ONE_HAE_WINDOW
+#define T2_HAE_ADDRESS (&alpha_mv.hae_cache)
+#endif
/* Only a few systems don't define IACK_SC, handling all interrupts through
the SRM console. But splitting out that one case from IO() below
diff --git a/arch/alpha/kernel/pci_iommu.c b/arch/alpha/kernel/pci_iommu.c
index d1dbd9acd1df..022c2748fa41 100644
--- a/arch/alpha/kernel/pci_iommu.c
+++ b/arch/alpha/kernel/pci_iommu.c
@@ -223,7 +223,7 @@ iommu_arena_free(struct pci_iommu_arena *arena, long ofs, long n)
*/
static int pci_dac_dma_supported(struct pci_dev *dev, u64 mask)
{
- dma64_addr_t dac_offset = alpha_mv.pci_dac_offset;
+ dma_addr_t dac_offset = alpha_mv.pci_dac_offset;
int ok = 1;
/* If this is not set, the machine doesn't support DAC at all. */
@@ -756,7 +756,7 @@ static void alpha_pci_unmap_sg(struct device *dev, struct scatterlist *sg,
spin_lock_irqsave(&arena->lock, flags);
for (end = sg + nents; sg < end; ++sg) {
- dma64_addr_t addr;
+ dma_addr_t addr;
size_t size;
long npages, ofs;
dma_addr_t tend;
diff --git a/arch/alpha/kernel/ptrace.c b/arch/alpha/kernel/ptrace.c
index baa903602f6a..e2af5eb59bb4 100644
--- a/arch/alpha/kernel/ptrace.c
+++ b/arch/alpha/kernel/ptrace.c
@@ -269,7 +269,8 @@ void ptrace_disable(struct task_struct *child)
user_disable_single_step(child);
}
-long arch_ptrace(struct task_struct *child, long request, long addr, long data)
+long arch_ptrace(struct task_struct *child, long request,
+ unsigned long addr, unsigned long data)
{
unsigned long tmp;
size_t copied;
@@ -292,7 +293,7 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
case PTRACE_PEEKUSR:
force_successful_syscall_return();
ret = get_reg(child, addr);
- DBG(DBG_MEM, ("peek $%ld->%#lx\n", addr, ret));
+ DBG(DBG_MEM, ("peek $%lu->%#lx\n", addr, ret));
break;
/* When I and D space are separate, this will have to be fixed. */
@@ -302,7 +303,7 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
break;
case PTRACE_POKEUSR: /* write the specified register */
- DBG(DBG_MEM, ("poke $%ld<-%#lx\n", addr, data));
+ DBG(DBG_MEM, ("poke $%lu<-%#lx\n", addr, data));
ret = put_reg(child, addr, data);
break;
default:
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index b64e465ac49c..f1d9297b1050 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -1,10 +1,3 @@
-#
-# For a description of the syntax of this configuration file,
-# see Documentation/kbuild/kconfig-language.txt.
-#
-
-mainmenu "Linux Kernel Configuration"
-
config ARM
bool
default y
@@ -13,10 +6,10 @@ config ARM
select HAVE_MEMBLOCK
select RTC_LIB
select SYS_SUPPORTS_APM_EMULATION
- select GENERIC_ATOMIC64 if (!CPU_32v6K)
+ select GENERIC_ATOMIC64 if (!CPU_32v6K || !AEABI)
select HAVE_OPROFILE if (HAVE_PERF_EVENTS)
select HAVE_ARCH_KGDB
- select HAVE_KPROBES if (!XIP_KERNEL)
+ select HAVE_KPROBES if (!XIP_KERNEL && !THUMB2_KERNEL)
select HAVE_KRETPROBES if (HAVE_KPROBES)
select HAVE_FUNCTION_TRACER if (!XIP_KERNEL)
select HAVE_FTRACE_MCOUNT_RECORD if (!XIP_KERNEL)
@@ -573,6 +566,7 @@ config ARCH_TEGRA
select HAVE_CLK
select COMMON_CLKDEV
select ARCH_HAS_BARRIERS if CACHE_L2X0
+ select ARCH_HAS_CPUFREQ
help
This enables support for NVIDIA Tegra based systems (Tegra APX,
Tegra 6xx and Tegra 2 series).
@@ -652,7 +646,7 @@ config ARCH_S3C2410
select ARCH_HAS_CPUFREQ
select HAVE_CLK
select ARCH_USES_GETTIMEOFFSET
- select HAVE_S3C2410_I2C
+ select HAVE_S3C2410_I2C if I2C
help
Samsung S3C2410X CPU based systems, such as the Simtec Electronics
BAST (<http://www.simtec.co.uk/products/EB110ITX/>), the IPAQ 1940 or
@@ -682,8 +676,8 @@ config ARCH_S3C64XX
select S3C_DEV_NAND
select USB_ARCH_HAS_OHCI
select SAMSUNG_GPIOLIB_4BIT
- select HAVE_S3C2410_I2C
- select HAVE_S3C2410_WATCHDOG
+ select HAVE_S3C2410_I2C if I2C
+ select HAVE_S3C2410_WATCHDOG if WATCHDOG
help
Samsung S3C64XX series based systems
@@ -692,10 +686,10 @@ config ARCH_S5P64X0
select CPU_V6
select GENERIC_GPIO
select HAVE_CLK
- select HAVE_S3C2410_WATCHDOG
+ select HAVE_S3C2410_WATCHDOG if WATCHDOG
select ARCH_USES_GETTIMEOFFSET
- select HAVE_S3C2410_I2C
- select HAVE_S3C_RTC
+ select HAVE_S3C2410_I2C if I2C
+ select HAVE_S3C_RTC if RTC_CLASS
help
Samsung S5P64X0 CPU based systems, such as the Samsung SMDK6440,
SMDK6450.
@@ -706,7 +700,7 @@ config ARCH_S5P6442
select GENERIC_GPIO
select HAVE_CLK
select ARCH_USES_GETTIMEOFFSET
- select HAVE_S3C2410_WATCHDOG
+ select HAVE_S3C2410_WATCHDOG if WATCHDOG
help
Samsung S5P6442 CPU based systems
@@ -717,31 +711,37 @@ config ARCH_S5PC100
select CPU_V7
select ARM_L1_CACHE_SHIFT_6
select ARCH_USES_GETTIMEOFFSET
- select HAVE_S3C2410_I2C
- select HAVE_S3C_RTC
- select HAVE_S3C2410_WATCHDOG
+ select HAVE_S3C2410_I2C if I2C
+ select HAVE_S3C_RTC if RTC_CLASS
+ select HAVE_S3C2410_WATCHDOG if WATCHDOG
help
Samsung S5PC100 series based systems
config ARCH_S5PV210
bool "Samsung S5PV210/S5PC110"
select CPU_V7
+ select ARCH_SPARSEMEM_ENABLE
select GENERIC_GPIO
select HAVE_CLK
select ARM_L1_CACHE_SHIFT_6
+ select ARCH_HAS_CPUFREQ
select ARCH_USES_GETTIMEOFFSET
- select HAVE_S3C2410_I2C
- select HAVE_S3C_RTC
- select HAVE_S3C2410_WATCHDOG
+ select HAVE_S3C2410_I2C if I2C
+ select HAVE_S3C_RTC if RTC_CLASS
+ select HAVE_S3C2410_WATCHDOG if WATCHDOG
help
Samsung S5PV210/S5PC110 series based systems
config ARCH_S5PV310
bool "Samsung S5PV310/S5PC210"
select CPU_V7
+ select ARCH_SPARSEMEM_ENABLE
select GENERIC_GPIO
select HAVE_CLK
select GENERIC_CLOCKEVENTS
+ select HAVE_S3C_RTC if RTC_CLASS
+ select HAVE_S3C2410_I2C if I2C
+ select HAVE_S3C2410_WATCHDOG if WATCHDOG
help
Samsung S5PV310 series based systems
@@ -831,7 +831,7 @@ config ARCH_OMAP
select GENERIC_CLOCKEVENTS
select ARCH_HAS_HOLES_MEMORYMODEL
help
- Support for TI's OMAP platform (OMAP1 and OMAP2).
+ Support for TI's OMAP platform (OMAP1/2/3/4).
config PLAT_SPEAR
bool "ST SPEAr"
@@ -1668,6 +1668,12 @@ if ARCH_HAS_CPUFREQ
source "drivers/cpufreq/Kconfig"
+config CPU_FREQ_IMX
+ tristate "CPUfreq driver for i.MX CPUs"
+ depends on ARCH_MXC && CPU_FREQ
+ help
+ This enables the CPUfreq driver for i.MX CPUs.
+
config CPU_FREQ_SA1100
bool
diff --git a/arch/arm/boot/Makefile b/arch/arm/boot/Makefile
index 4a590f4113e2..4d26f2c52a75 100644
--- a/arch/arm/boot/Makefile
+++ b/arch/arm/boot/Makefile
@@ -70,12 +70,7 @@ else
$(obj)/uImage: LOADADDR=$(ZRELADDR)
endif
-ifeq ($(CONFIG_THUMB2_KERNEL),y)
-# Set bit 0 to 1 so that "mov pc, rx" switches to Thumb-2 mode
-$(obj)/uImage: STARTADDR=$(shell echo $(LOADADDR) | sed -e "s/.$$/1/")
-else
$(obj)/uImage: STARTADDR=$(LOADADDR)
-endif
$(obj)/uImage: $(obj)/zImage FORCE
$(call if_changed,uimage)
diff --git a/arch/arm/boot/bootp/init.S b/arch/arm/boot/bootp/init.S
index 8b0de41c3dcb..78b508075161 100644
--- a/arch/arm/boot/bootp/init.S
+++ b/arch/arm/boot/bootp/init.S
@@ -73,6 +73,8 @@ move: ldmia r4!, {r7 - r10} @ move 32-bytes at a time
.size _start, . - _start
+ .align
+
.type data,#object
data: .word initrd_start @ source initrd address
.word initrd_phys @ destination initrd address
diff --git a/arch/arm/boot/compressed/head.S b/arch/arm/boot/compressed/head.S
index 6825c34646d4..7193884ed8b0 100644
--- a/arch/arm/boot/compressed/head.S
+++ b/arch/arm/boot/compressed/head.S
@@ -125,9 +125,13 @@ wait: mrc p14, 0, pc, c0, c1, 0
* sort out different calling conventions
*/
.align
+ .arm @ Always enter in ARM state
start:
.type start,#function
- .rept 8
+ THUMB( adr r12, BSYM(1f) )
+ THUMB( bx r12 )
+ THUMB( .rept 6 )
+ ARM( .rept 8 )
mov r0, r0
.endr
@@ -135,6 +139,7 @@ start:
.word 0x016f2818 @ Magic numbers to help the loader
.word start @ absolute load/run zImage address
.word _edata @ zImage end address
+ THUMB( .thumb )
1: mov r7, r1 @ save architecture ID
mov r8, r2 @ save atags pointer
@@ -174,7 +179,8 @@ not_angel:
ldr sp, [r0, #28]
#ifdef CONFIG_AUTO_ZRELADDR
@ determine final kernel image address
- and r4, pc, #0xf8000000
+ mov r4, pc
+ and r4, r4, #0xf8000000
add r4, r4, #TEXT_OFFSET
#else
ldr r4, =zreladdr
@@ -445,7 +451,8 @@ __setup_mmu: sub r3, r4, #16384 @ Page directory size
*/
mov r1, #0x1e
orr r1, r1, #3 << 10
- mov r2, pc, lsr #20
+ mov r2, pc
+ mov r2, r2, lsr #20
orr r1, r1, r2, lsl #20
add r0, r3, r2, lsl #2
str r1, [r0], #4
@@ -1084,6 +1091,6 @@ memdump: mov r12, r0
reloc_end:
.align
- .section ".stack", "w"
+ .section ".stack", "aw", %nobits
user_stack: .space 4096
user_stack_end:
diff --git a/arch/arm/boot/compressed/vmlinux.lds.in b/arch/arm/boot/compressed/vmlinux.lds.in
index d08168941bd6..366a924019ac 100644
--- a/arch/arm/boot/compressed/vmlinux.lds.in
+++ b/arch/arm/boot/compressed/vmlinux.lds.in
@@ -57,7 +57,7 @@ SECTIONS
.bss : { *(.bss) }
_end = .;
- .stack (NOLOAD) : { *(.stack) }
+ .stack : { *(.stack) }
.stab 0 : { *(.stab) }
.stabstr 0 : { *(.stabstr) }
diff --git a/arch/arm/common/gic.c b/arch/arm/common/gic.c
index ada6359160eb..e6388dcd8cfa 100644
--- a/arch/arm/common/gic.c
+++ b/arch/arm/common/gic.c
@@ -146,9 +146,15 @@ static int gic_set_cpu(unsigned int irq, const struct cpumask *mask_val)
unsigned int shift = (irq % 4) * 8;
unsigned int cpu = cpumask_first(mask_val);
u32 val;
+ struct irq_desc *desc;
spin_lock(&irq_controller_lock);
- irq_desc[irq].node = cpu;
+ desc = irq_to_desc(irq);
+ if (desc == NULL) {
+ spin_unlock(&irq_controller_lock);
+ return -EINVAL;
+ }
+ desc->node = cpu;
val = readl(reg) & ~(0xff << shift);
val |= 1 << (cpu + shift);
writel(val, reg);
@@ -210,7 +216,7 @@ void __init gic_cascade_irq(unsigned int gic_nr, unsigned int irq)
void __init gic_dist_init(unsigned int gic_nr, void __iomem *base,
unsigned int irq_start)
{
- unsigned int max_irq, i;
+ unsigned int gic_irqs, irq_limit, i;
u32 cpumask = 1 << smp_processor_id();
if (gic_nr >= MAX_GIC_NR)
@@ -226,46 +232,49 @@ void __init gic_dist_init(unsigned int gic_nr, void __iomem *base,
/*
* Find out how many interrupts are supported.
- */
- max_irq = readl(base + GIC_DIST_CTR) & 0x1f;
- max_irq = (max_irq + 1) * 32;
-
- /*
* The GIC only supports up to 1020 interrupt sources.
- * Limit this to either the architected maximum, or the
- * platform maximum.
*/
- if (max_irq > max(1020, NR_IRQS))
- max_irq = max(1020, NR_IRQS);
+ gic_irqs = readl(base + GIC_DIST_CTR) & 0x1f;
+ gic_irqs = (gic_irqs + 1) * 32;
+ if (gic_irqs > 1020)
+ gic_irqs = 1020;
/*
* Set all global interrupts to be level triggered, active low.
*/
- for (i = 32; i < max_irq; i += 16)
+ for (i = 32; i < gic_irqs; i += 16)
writel(0, base + GIC_DIST_CONFIG + i * 4 / 16);
/*
* Set all global interrupts to this CPU only.
*/
- for (i = 32; i < max_irq; i += 4)
+ for (i = 32; i < gic_irqs; i += 4)
writel(cpumask, base + GIC_DIST_TARGET + i * 4 / 4);
/*
- * Set priority on all interrupts.
+ * Set priority on all global interrupts.
*/
- for (i = 0; i < max_irq; i += 4)
+ for (i = 32; i < gic_irqs; i += 4)
writel(0xa0a0a0a0, base + GIC_DIST_PRI + i * 4 / 4);
/*
- * Disable all interrupts.
+ * Disable all interrupts. Leave the PPI and SGIs alone
+ * as these enables are banked registers.
*/
- for (i = 0; i < max_irq; i += 32)
+ for (i = 32; i < gic_irqs; i += 32)
writel(0xffffffff, base + GIC_DIST_ENABLE_CLEAR + i * 4 / 32);
/*
+ * Limit number of interrupts registered to the platform maximum
+ */
+ irq_limit = gic_data[gic_nr].irq_offset + gic_irqs;
+ if (WARN_ON(irq_limit > NR_IRQS))
+ irq_limit = NR_IRQS;
+
+ /*
* Setup the Linux IRQ subsystem.
*/
- for (i = irq_start; i < gic_data[gic_nr].irq_offset + max_irq; i++) {
+ for (i = irq_start; i < irq_limit; i++) {
set_irq_chip(i, &gic_chip);
set_irq_chip_data(i, &gic_data[gic_nr]);
set_irq_handler(i, handle_level_irq);
@@ -277,11 +286,30 @@ void __init gic_dist_init(unsigned int gic_nr, void __iomem *base,
void __cpuinit gic_cpu_init(unsigned int gic_nr, void __iomem *base)
{
+ void __iomem *dist_base;
+ int i;
+
if (gic_nr >= MAX_GIC_NR)
BUG();
+ dist_base = gic_data[gic_nr].dist_base;
+ BUG_ON(!dist_base);
+
gic_data[gic_nr].cpu_base = base;
+ /*
+ * Deal with the banked PPI and SGI interrupts - disable all
+ * PPI interrupts, ensure all SGI interrupts are enabled.
+ */
+ writel(0xffff0000, dist_base + GIC_DIST_ENABLE_CLEAR);
+ writel(0x0000ffff, dist_base + GIC_DIST_ENABLE_SET);
+
+ /*
+ * Set priority on PPI and SGI interrupts
+ */
+ for (i = 0; i < 32; i += 4)
+ writel(0xa0a0a0a0, dist_base + GIC_DIST_PRI + i * 4 / 4);
+
writel(0xf0, base + GIC_CPU_PRIMASK);
writel(1, base + GIC_CPU_CTRL);
}
diff --git a/arch/arm/configs/at91rm9200_defconfig b/arch/arm/configs/at91rm9200_defconfig
new file mode 100644
index 000000000000..38cb7c985426
--- /dev/null
+++ b/arch/arm/configs/at91rm9200_defconfig
@@ -0,0 +1,341 @@
+CONFIG_EXPERIMENTAL=y
+# CONFIG_LOCALVERSION_AUTO is not set
+# CONFIG_SWAP is not set
+CONFIG_SYSVIPC=y
+CONFIG_IKCONFIG=y
+CONFIG_IKCONFIG_PROC=y
+CONFIG_LOG_BUF_SHIFT=14
+CONFIG_SYSFS_DEPRECATED_V2=y
+CONFIG_BLK_DEV_INITRD=y
+CONFIG_MODULES=y
+CONFIG_MODULE_FORCE_LOAD=y
+CONFIG_MODULE_UNLOAD=y
+CONFIG_MODVERSIONS=y
+CONFIG_MODULE_SRCVERSION_ALL=y
+# CONFIG_BLK_DEV_BSG is not set
+# CONFIG_IOSCHED_CFQ is not set
+CONFIG_ARCH_AT91=y
+CONFIG_MACH_ONEARM=y
+CONFIG_ARCH_AT91RM9200DK=y
+CONFIG_MACH_AT91RM9200EK=y
+CONFIG_MACH_CSB337=y
+CONFIG_MACH_CSB637=y
+CONFIG_MACH_CARMEVA=y
+CONFIG_MACH_ATEB9200=y
+CONFIG_MACH_KB9200=y
+CONFIG_MACH_PICOTUX2XX=y
+CONFIG_MACH_KAFA=y
+CONFIG_MACH_ECBAT91=y
+CONFIG_MACH_YL9200=y
+CONFIG_MACH_CPUAT91=y
+CONFIG_MACH_ECO920=y
+CONFIG_MTD_AT91_DATAFLASH_CARD=y
+CONFIG_AT91_PROGRAMMABLE_CLOCKS=y
+CONFIG_AT91_TIMER_HZ=100
+# CONFIG_ARM_THUMB is not set
+CONFIG_PCCARD=y
+CONFIG_AT91_CF=y
+CONFIG_NO_HZ=y
+CONFIG_HIGH_RES_TIMERS=y
+CONFIG_PREEMPT=y
+CONFIG_AEABI=y
+CONFIG_LEDS=y
+CONFIG_LEDS_CPU=y
+CONFIG_ZBOOT_ROM_TEXT=0x10000000
+CONFIG_ZBOOT_ROM_BSS=0x20040000
+CONFIG_KEXEC=y
+CONFIG_FPE_NWFPE=y
+CONFIG_BINFMT_MISC=y
+CONFIG_NET=y
+CONFIG_PACKET=y
+CONFIG_UNIX=y
+CONFIG_XFRM_USER=m
+CONFIG_INET=y
+CONFIG_IP_MULTICAST=y
+CONFIG_IP_PNP=y
+CONFIG_IP_PNP_DHCP=y
+CONFIG_IP_PNP_BOOTP=y
+CONFIG_NET_IPIP=m
+CONFIG_NET_IPGRE=m
+CONFIG_INET_AH=m
+CONFIG_INET_ESP=m
+CONFIG_INET_IPCOMP=m
+CONFIG_INET_XFRM_MODE_TRANSPORT=m
+CONFIG_INET_XFRM_MODE_TUNNEL=m
+CONFIG_INET_XFRM_MODE_BEET=m
+CONFIG_IPV6_PRIVACY=y
+CONFIG_IPV6_ROUTER_PREF=y
+CONFIG_IPV6_ROUTE_INFO=y
+CONFIG_INET6_AH=m
+CONFIG_INET6_ESP=m
+CONFIG_INET6_IPCOMP=m
+CONFIG_IPV6_MIP6=m
+CONFIG_INET6_XFRM_MODE_ROUTEOPTIMIZATION=m
+CONFIG_IPV6_TUNNEL=m
+CONFIG_BRIDGE=m
+CONFIG_VLAN_8021Q=m
+CONFIG_BT=m
+CONFIG_BT_L2CAP=m
+CONFIG_BT_SCO=m
+CONFIG_BT_RFCOMM=m
+CONFIG_BT_RFCOMM_TTY=y
+CONFIG_BT_BNEP=m
+CONFIG_BT_BNEP_MC_FILTER=y
+CONFIG_BT_BNEP_PROTO_FILTER=y
+CONFIG_BT_HIDP=m
+CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
+CONFIG_MTD=y
+CONFIG_MTD_CONCAT=y
+CONFIG_MTD_PARTITIONS=y
+CONFIG_MTD_CMDLINE_PARTS=y
+CONFIG_MTD_AFS_PARTS=y
+CONFIG_MTD_CHAR=y
+CONFIG_MTD_BLOCK=y
+CONFIG_MTD_CFI=y
+CONFIG_MTD_JEDECPROBE=y
+CONFIG_MTD_CFI_INTELEXT=y
+CONFIG_MTD_CFI_AMDSTD=y
+CONFIG_MTD_COMPLEX_MAPPINGS=y
+CONFIG_MTD_PHYSMAP=y
+CONFIG_MTD_PLATRAM=y
+CONFIG_MTD_DATAFLASH=y
+CONFIG_MTD_NAND=y
+CONFIG_MTD_NAND_ATMEL=y
+CONFIG_MTD_NAND_PLATFORM=y
+CONFIG_MTD_UBI=y
+CONFIG_MTD_UBI_GLUEBI=y
+CONFIG_BLK_DEV_LOOP=y
+CONFIG_BLK_DEV_NBD=y
+CONFIG_BLK_DEV_RAM=y
+CONFIG_BLK_DEV_RAM_SIZE=8192
+CONFIG_ATMEL_TCLIB=y
+CONFIG_EEPROM_LEGACY=m
+CONFIG_SCSI=y
+CONFIG_BLK_DEV_SD=y
+CONFIG_BLK_DEV_SR=m
+CONFIG_BLK_DEV_SR_VENDOR=y
+CONFIG_CHR_DEV_SG=m
+CONFIG_SCSI_MULTI_LUN=y
+# CONFIG_SCSI_LOWLEVEL is not set
+CONFIG_NETDEVICES=y
+CONFIG_TUN=m
+CONFIG_PHYLIB=y
+CONFIG_DAVICOM_PHY=y
+CONFIG_SMSC_PHY=y
+CONFIG_MICREL_PHY=y
+CONFIG_NET_ETHERNET=y
+CONFIG_ARM_AT91_ETHER=y
+# CONFIG_NETDEV_1000 is not set
+# CONFIG_NETDEV_10000 is not set
+CONFIG_USB_CATC=m
+CONFIG_USB_KAWETH=m
+CONFIG_USB_PEGASUS=m
+CONFIG_USB_RTL8150=m
+CONFIG_USB_USBNET=m
+CONFIG_USB_NET_DM9601=m
+CONFIG_USB_NET_GL620A=m
+CONFIG_USB_NET_PLUSB=m
+CONFIG_USB_NET_RNDIS_HOST=m
+CONFIG_USB_ALI_M5632=y
+CONFIG_USB_AN2720=y
+CONFIG_USB_EPSON2888=y
+CONFIG_PPP=y
+CONFIG_PPP_MULTILINK=y
+CONFIG_PPP_FILTER=y
+CONFIG_PPP_ASYNC=y
+CONFIG_PPP_DEFLATE=y
+CONFIG_PPP_BSDCOMP=y
+CONFIG_PPP_MPPE=m
+CONFIG_PPPOE=m
+CONFIG_SLIP=m
+CONFIG_SLIP_COMPRESSED=y
+CONFIG_SLIP_SMART=y
+CONFIG_SLIP_MODE_SLIP6=y
+# CONFIG_INPUT_MOUSEDEV_PSAUX is not set
+CONFIG_INPUT_MOUSEDEV_SCREEN_X=640
+CONFIG_INPUT_MOUSEDEV_SCREEN_Y=480
+CONFIG_INPUT_EVDEV=y
+CONFIG_KEYBOARD_GPIO=y
+# CONFIG_INPUT_MOUSE is not set
+CONFIG_INPUT_TOUCHSCREEN=y
+CONFIG_SERIAL_ATMEL=y
+CONFIG_SERIAL_ATMEL_CONSOLE=y
+CONFIG_LEGACY_PTY_COUNT=32
+CONFIG_HW_RANDOM=y
+CONFIG_I2C=y
+CONFIG_I2C_CHARDEV=y
+CONFIG_I2C_GPIO=y
+CONFIG_SPI=y
+CONFIG_SPI_ATMEL=y
+CONFIG_SPI_BITBANG=y
+CONFIG_GPIO_SYSFS=y
+CONFIG_HWMON=m
+CONFIG_SENSORS_ADM1021=m
+CONFIG_SENSORS_ADM1025=m
+CONFIG_SENSORS_ADM1026=m
+CONFIG_SENSORS_ADM1029=m
+CONFIG_SENSORS_ADM1031=m
+CONFIG_SENSORS_ADM9240=m
+CONFIG_SENSORS_DS1621=m
+CONFIG_SENSORS_GL518SM=m
+CONFIG_SENSORS_GL520SM=m
+CONFIG_SENSORS_IT87=m
+CONFIG_SENSORS_LM63=m
+CONFIG_SENSORS_LM73=m
+CONFIG_SENSORS_LM75=m
+CONFIG_SENSORS_LM77=m
+CONFIG_SENSORS_LM78=m
+CONFIG_SENSORS_LM80=m
+CONFIG_SENSORS_LM83=m
+CONFIG_SENSORS_LM85=m
+CONFIG_SENSORS_LM87=m
+CONFIG_SENSORS_LM90=m
+CONFIG_SENSORS_LM92=m
+CONFIG_SENSORS_MAX1619=m
+CONFIG_SENSORS_PCF8591=m
+CONFIG_SENSORS_SMSC47B397=m
+CONFIG_SENSORS_W83781D=m
+CONFIG_SENSORS_W83791D=m
+CONFIG_SENSORS_W83792D=m
+CONFIG_SENSORS_W83793=m
+CONFIG_SENSORS_W83L785TS=m
+CONFIG_WATCHDOG=y
+CONFIG_WATCHDOG_NOWAYOUT=y
+CONFIG_AT91RM9200_WATCHDOG=y
+CONFIG_FB=y
+CONFIG_FB_MODE_HELPERS=y
+CONFIG_FB_TILEBLITTING=y
+CONFIG_FB_S1D13XXX=y
+CONFIG_BACKLIGHT_LCD_SUPPORT=y
+CONFIG_LCD_CLASS_DEVICE=y
+CONFIG_BACKLIGHT_CLASS_DEVICE=y
+# CONFIG_BACKLIGHT_GENERIC is not set
+CONFIG_DISPLAY_SUPPORT=y
+CONFIG_FRAMEBUFFER_CONSOLE=y
+CONFIG_FONTS=y
+CONFIG_FONT_MINI_4x6=y
+CONFIG_LOGO=y
+# CONFIG_LOGO_LINUX_MONO is not set
+# CONFIG_LOGO_LINUX_VGA16 is not set
+CONFIG_USB=y
+CONFIG_USB_DEVICEFS=y
+# CONFIG_USB_DEVICE_CLASS is not set
+CONFIG_USB_MON=y
+CONFIG_USB_OHCI_HCD=y
+CONFIG_USB_ACM=m
+CONFIG_USB_PRINTER=m
+CONFIG_USB_STORAGE=y
+CONFIG_USB_SERIAL=y
+CONFIG_USB_SERIAL_CONSOLE=y
+CONFIG_USB_SERIAL_GENERIC=y
+CONFIG_USB_SERIAL_FTDI_SIO=y
+CONFIG_USB_SERIAL_KEYSPAN=y
+CONFIG_USB_SERIAL_KEYSPAN_MPR=y
+CONFIG_USB_SERIAL_KEYSPAN_USA28=y
+CONFIG_USB_SERIAL_KEYSPAN_USA28X=y
+CONFIG_USB_SERIAL_KEYSPAN_USA28XA=y
+CONFIG_USB_SERIAL_KEYSPAN_USA28XB=y
+CONFIG_USB_SERIAL_KEYSPAN_USA19=y
+CONFIG_USB_SERIAL_KEYSPAN_USA18X=y
+CONFIG_USB_SERIAL_KEYSPAN_USA19W=y
+CONFIG_USB_SERIAL_KEYSPAN_USA19QW=y
+CONFIG_USB_SERIAL_KEYSPAN_USA19QI=y
+CONFIG_USB_SERIAL_KEYSPAN_USA49W=y
+CONFIG_USB_SERIAL_KEYSPAN_USA49WLC=y
+CONFIG_USB_SERIAL_MCT_U232=y
+CONFIG_USB_SERIAL_PL2303=y
+CONFIG_USB_GADGET=y
+CONFIG_USB_ETH=m
+CONFIG_USB_MASS_STORAGE=m
+CONFIG_MMC=y
+CONFIG_MMC_AT91=y
+CONFIG_NEW_LEDS=y
+CONFIG_LEDS_CLASS=y
+CONFIG_LEDS_GPIO=y
+CONFIG_LEDS_TRIGGERS=y
+CONFIG_LEDS_TRIGGER_TIMER=y
+CONFIG_LEDS_TRIGGER_HEARTBEAT=y
+CONFIG_LEDS_TRIGGER_GPIO=y
+CONFIG_LEDS_TRIGGER_DEFAULT_ON=y
+CONFIG_RTC_CLASS=y
+# CONFIG_RTC_HCTOSYS is not set
+CONFIG_RTC_DRV_DS1307=y
+CONFIG_RTC_DRV_PCF8563=y
+CONFIG_RTC_DRV_AT91RM9200=y
+CONFIG_EXT2_FS=y
+CONFIG_EXT2_FS_XATTR=y
+CONFIG_EXT3_FS=y
+# CONFIG_EXT3_FS_XATTR is not set
+CONFIG_REISERFS_FS=y
+CONFIG_AUTOFS4_FS=y
+CONFIG_ISO9660_FS=y
+CONFIG_JOLIET=y
+CONFIG_ZISOFS=y
+CONFIG_UDF_FS=y
+CONFIG_MSDOS_FS=y
+CONFIG_VFAT_FS=y
+CONFIG_NTFS_FS=m
+CONFIG_TMPFS=y
+CONFIG_CONFIGFS_FS=y
+CONFIG_JFFS2_FS=y
+CONFIG_JFFS2_SUMMARY=y
+CONFIG_JFFS2_COMPRESSION_OPTIONS=y
+CONFIG_JFFS2_LZO=y
+CONFIG_JFFS2_RUBIN=y
+CONFIG_CRAMFS=y
+CONFIG_MINIX_FS=y
+CONFIG_NFS_FS=y
+CONFIG_NFS_V3=y
+CONFIG_NFS_V3_ACL=y
+CONFIG_NFS_V4=y
+CONFIG_ROOT_NFS=y
+CONFIG_NFSD=y
+CONFIG_SMB_FS=m
+CONFIG_CIFS=m
+CONFIG_PARTITION_ADVANCED=y
+CONFIG_MAC_PARTITION=y
+CONFIG_NLS_CODEPAGE_437=y
+CONFIG_NLS_CODEPAGE_737=m
+CONFIG_NLS_CODEPAGE_775=m
+CONFIG_NLS_CODEPAGE_850=m
+CONFIG_NLS_CODEPAGE_852=m
+CONFIG_NLS_CODEPAGE_855=m
+CONFIG_NLS_CODEPAGE_857=m
+CONFIG_NLS_CODEPAGE_860=m
+CONFIG_NLS_CODEPAGE_861=m
+CONFIG_NLS_CODEPAGE_862=m
+CONFIG_NLS_CODEPAGE_863=m
+CONFIG_NLS_CODEPAGE_864=m
+CONFIG_NLS_CODEPAGE_865=m
+CONFIG_NLS_CODEPAGE_866=m
+CONFIG_NLS_CODEPAGE_869=m
+CONFIG_NLS_CODEPAGE_936=m
+CONFIG_NLS_CODEPAGE_950=m
+CONFIG_NLS_CODEPAGE_932=m
+CONFIG_NLS_CODEPAGE_949=m
+CONFIG_NLS_CODEPAGE_874=m
+CONFIG_NLS_ISO8859_8=m
+CONFIG_NLS_CODEPAGE_1250=m
+CONFIG_NLS_CODEPAGE_1251=m
+CONFIG_NLS_ASCII=m
+CONFIG_NLS_ISO8859_1=y
+CONFIG_NLS_ISO8859_2=m
+CONFIG_NLS_ISO8859_3=m
+CONFIG_NLS_ISO8859_4=m
+CONFIG_NLS_ISO8859_5=m
+CONFIG_NLS_ISO8859_6=m
+CONFIG_NLS_ISO8859_7=m
+CONFIG_NLS_ISO8859_9=m
+CONFIG_NLS_ISO8859_13=m
+CONFIG_NLS_ISO8859_14=m
+CONFIG_NLS_ISO8859_15=m
+CONFIG_NLS_KOI8_R=m
+CONFIG_NLS_KOI8_U=m
+CONFIG_NLS_UTF8=y
+CONFIG_MAGIC_SYSRQ=y
+CONFIG_DEBUG_FS=y
+CONFIG_DEBUG_KERNEL=y
+# CONFIG_RCU_CPU_STALL_DETECTOR is not set
+# CONFIG_FTRACE is not set
+CONFIG_CRYPTO_PCBC=y
+CONFIG_CRYPTO_SHA1=y
diff --git a/arch/arm/configs/at91rm9200dk_defconfig b/arch/arm/configs/at91rm9200dk_defconfig
deleted file mode 100644
index 4438e64f3bfb..000000000000
--- a/arch/arm/configs/at91rm9200dk_defconfig
+++ /dev/null
@@ -1,72 +0,0 @@
-CONFIG_EXPERIMENTAL=y
-# CONFIG_SWAP is not set
-CONFIG_SYSVIPC=y
-CONFIG_LOG_BUF_SHIFT=14
-CONFIG_BLK_DEV_INITRD=y
-CONFIG_MODULES=y
-CONFIG_MODULE_UNLOAD=y
-# CONFIG_IOSCHED_DEADLINE is not set
-# CONFIG_IOSCHED_CFQ is not set
-CONFIG_ARCH_AT91=y
-CONFIG_ARCH_AT91RM9200DK=y
-CONFIG_MACH_ECO920=y
-CONFIG_AT91_PROGRAMMABLE_CLOCKS=y
-# CONFIG_ARM_THUMB is not set
-CONFIG_PCCARD=y
-CONFIG_AT91_CF=y
-CONFIG_LEDS=y
-CONFIG_ZBOOT_ROM_TEXT=0x0
-CONFIG_ZBOOT_ROM_BSS=0x0
-CONFIG_CMDLINE="mem=32M console=ttyS0,115200 initrd=0x20410000,3145728 root=/dev/ram0 rw"
-CONFIG_FPE_NWFPE=y
-CONFIG_NET=y
-CONFIG_PACKET=y
-CONFIG_UNIX=y
-CONFIG_INET=y
-CONFIG_IP_PNP=y
-CONFIG_IP_PNP_BOOTP=y
-# CONFIG_IPV6 is not set
-CONFIG_MTD=y
-CONFIG_MTD_PARTITIONS=y
-CONFIG_MTD_CMDLINE_PARTS=y
-CONFIG_MTD_CHAR=y
-CONFIG_MTD_BLOCK=y
-CONFIG_MTD_CFI=y
-CONFIG_MTD_JEDECPROBE=y
-CONFIG_MTD_CFI_AMDSTD=y
-CONFIG_MTD_PHYSMAP=y
-CONFIG_BLK_DEV_RAM=y
-CONFIG_BLK_DEV_RAM_SIZE=8192
-CONFIG_NETDEVICES=y
-CONFIG_NET_ETHERNET=y
-CONFIG_ARM_AT91_ETHER=y
-# CONFIG_INPUT_MOUSEDEV_PSAUX is not set
-# CONFIG_INPUT_KEYBOARD is not set
-# CONFIG_INPUT_MOUSE is not set
-# CONFIG_SERIO is not set
-CONFIG_SERIAL_ATMEL=y
-CONFIG_SERIAL_ATMEL_CONSOLE=y
-CONFIG_I2C=y
-CONFIG_I2C_CHARDEV=y
-CONFIG_I2C_GPIO=y
-CONFIG_WATCHDOG=y
-CONFIG_WATCHDOG_NOWAYOUT=y
-CONFIG_AT91RM9200_WATCHDOG=y
-# CONFIG_VGA_CONSOLE is not set
-# CONFIG_USB_HID is not set
-CONFIG_USB=y
-CONFIG_USB_DEBUG=y
-CONFIG_USB_DEVICEFS=y
-CONFIG_USB_MON=y
-CONFIG_USB_OHCI_HCD=y
-CONFIG_USB_GADGET=y
-CONFIG_MMC=y
-CONFIG_RTC_CLASS=y
-CONFIG_RTC_DRV_AT91RM9200=y
-CONFIG_EXT2_FS=y
-CONFIG_INOTIFY=y
-CONFIG_TMPFS=y
-CONFIG_CRAMFS=y
-CONFIG_DEBUG_KERNEL=y
-CONFIG_DEBUG_USER=y
-CONFIG_DEBUG_LL=y
diff --git a/arch/arm/configs/at91rm9200ek_defconfig b/arch/arm/configs/at91rm9200ek_defconfig
deleted file mode 100644
index ccd517c64bc7..000000000000
--- a/arch/arm/configs/at91rm9200ek_defconfig
+++ /dev/null
@@ -1,73 +0,0 @@
-CONFIG_EXPERIMENTAL=y
-# CONFIG_LOCALVERSION_AUTO is not set
-# CONFIG_SWAP is not set
-CONFIG_SYSVIPC=y
-CONFIG_LOG_BUF_SHIFT=14
-CONFIG_BLK_DEV_INITRD=y
-CONFIG_MODULES=y
-CONFIG_MODULE_UNLOAD=y
-# CONFIG_IOSCHED_DEADLINE is not set
-# CONFIG_IOSCHED_CFQ is not set
-CONFIG_ARCH_AT91=y
-CONFIG_MACH_AT91RM9200EK=y
-CONFIG_AT91_PROGRAMMABLE_CLOCKS=y
-# CONFIG_ARM_THUMB is not set
-CONFIG_LEDS=y
-CONFIG_LEDS_CPU=y
-CONFIG_ZBOOT_ROM_TEXT=0x0
-CONFIG_ZBOOT_ROM_BSS=0x0
-CONFIG_CMDLINE="mem=32M console=ttyS0,115200 initrd=0x20410000,3145728 root=/dev/ram0 rw"
-CONFIG_FPE_NWFPE=y
-CONFIG_NET=y
-CONFIG_PACKET=y
-CONFIG_UNIX=y
-CONFIG_INET=y
-CONFIG_IP_PNP=y
-CONFIG_IP_PNP_BOOTP=y
-# CONFIG_IPV6 is not set
-CONFIG_MTD=y
-CONFIG_MTD_PARTITIONS=y
-CONFIG_MTD_CMDLINE_PARTS=y
-CONFIG_MTD_CHAR=y
-CONFIG_MTD_BLOCK=y
-CONFIG_MTD_CFI=y
-CONFIG_MTD_JEDECPROBE=y
-CONFIG_MTD_CFI_AMDSTD=y
-CONFIG_MTD_PHYSMAP=y
-CONFIG_BLK_DEV_RAM=y
-CONFIG_BLK_DEV_RAM_SIZE=8192
-CONFIG_NETDEVICES=y
-CONFIG_NET_ETHERNET=y
-CONFIG_ARM_AT91_ETHER=y
-# CONFIG_INPUT_MOUSEDEV_PSAUX is not set
-# CONFIG_INPUT_KEYBOARD is not set
-# CONFIG_INPUT_MOUSE is not set
-# CONFIG_SERIO is not set
-CONFIG_SERIAL_ATMEL=y
-CONFIG_SERIAL_ATMEL_CONSOLE=y
-CONFIG_I2C=y
-CONFIG_I2C_CHARDEV=y
-CONFIG_I2C_GPIO=y
-CONFIG_WATCHDOG=y
-CONFIG_WATCHDOG_NOWAYOUT=y
-CONFIG_AT91RM9200_WATCHDOG=y
-CONFIG_FB=y
-CONFIG_FB_S1D13XXX=y
-# CONFIG_VGA_CONSOLE is not set
-# CONFIG_USB_HID is not set
-CONFIG_USB=y
-CONFIG_USB_DEBUG=y
-CONFIG_USB_DEVICEFS=y
-CONFIG_USB_MON=y
-CONFIG_USB_OHCI_HCD=y
-CONFIG_USB_GADGET=y
-CONFIG_MMC=y
-CONFIG_RTC_CLASS=y
-CONFIG_RTC_DRV_AT91RM9200=y
-CONFIG_EXT2_FS=y
-CONFIG_INOTIFY=y
-CONFIG_TMPFS=y
-CONFIG_CRAMFS=y
-CONFIG_DEBUG_KERNEL=y
-CONFIG_DEBUG_USER=y
-CONFIG_DEBUG_LL=y
diff --git a/arch/arm/configs/ateb9200_defconfig b/arch/arm/configs/ateb9200_defconfig
deleted file mode 100644
index 1b0e9a1689bb..000000000000
--- a/arch/arm/configs/ateb9200_defconfig
+++ /dev/null
@@ -1,131 +0,0 @@
-CONFIG_EXPERIMENTAL=y
-CONFIG_SYSVIPC=y
-CONFIG_LOG_BUF_SHIFT=14
-CONFIG_EMBEDDED=y
-CONFIG_SLAB=y
-CONFIG_PROFILING=y
-CONFIG_OPROFILE=m
-CONFIG_MODULES=y
-CONFIG_MODULE_UNLOAD=y
-CONFIG_ARCH_AT91=y
-CONFIG_MACH_ATEB9200=y
-CONFIG_PCCARD=m
-CONFIG_AT91_CF=m
-CONFIG_PREEMPT=y
-CONFIG_ZBOOT_ROM_TEXT=0x0
-CONFIG_ZBOOT_ROM_BSS=0x0
-CONFIG_FPE_NWFPE=y
-CONFIG_PM=y
-CONFIG_NET=y
-CONFIG_PACKET=y
-CONFIG_UNIX=y
-CONFIG_NET_KEY=y
-CONFIG_INET=y
-# CONFIG_IPV6 is not set
-CONFIG_BRIDGE=m
-CONFIG_VLAN_8021Q=m
-CONFIG_MTD=y
-CONFIG_MTD_PARTITIONS=y
-CONFIG_MTD_CMDLINE_PARTS=y
-CONFIG_MTD_CHAR=y
-CONFIG_MTD_BLOCK_RO=y
-CONFIG_BLK_DEV_LOOP=m
-CONFIG_BLK_DEV_NBD=m
-CONFIG_SCSI=m
-CONFIG_BLK_DEV_SD=m
-CONFIG_BLK_DEV_SR=m
-CONFIG_BLK_DEV_SR_VENDOR=y
-CONFIG_CHR_DEV_SG=m
-CONFIG_SCSI_MULTI_LUN=y
-CONFIG_NETDEVICES=y
-CONFIG_DUMMY=m
-CONFIG_TUN=m
-CONFIG_PHYLIB=y
-CONFIG_DAVICOM_PHY=y
-CONFIG_NET_ETHERNET=y
-CONFIG_ARM_AT91_ETHER=y
-CONFIG_USB_USBNET=y
-CONFIG_USB_NET_GL620A=y
-CONFIG_USB_NET_PLUSB=y
-CONFIG_USB_NET_RNDIS_HOST=y
-CONFIG_USB_ALI_M5632=y
-CONFIG_USB_AN2720=y
-CONFIG_USB_EPSON2888=y
-CONFIG_PPP=m
-CONFIG_PPP_ASYNC=m
-CONFIG_PPP_SYNC_TTY=m
-CONFIG_PPP_DEFLATE=m
-CONFIG_PPP_BSDCOMP=m
-CONFIG_PPPOE=m
-CONFIG_SERIAL_ATMEL=y
-CONFIG_SERIAL_ATMEL_CONSOLE=y
-CONFIG_I2C=m
-CONFIG_I2C_CHARDEV=m
-CONFIG_I2C_GPIO=m
-# CONFIG_VGA_CONSOLE is not set
-CONFIG_SOUND=y
-CONFIG_USB_HID=m
-CONFIG_HID_PID=y
-CONFIG_USB_HIDDEV=y
-CONFIG_USB=y
-CONFIG_USB_DEVICEFS=y
-CONFIG_USB_MON=y
-CONFIG_USB_OHCI_HCD=y
-CONFIG_USB_ACM=m
-CONFIG_USB_PRINTER=m
-CONFIG_USB_STORAGE=m
-CONFIG_USB_STORAGE_DATAFAB=m
-CONFIG_USB_STORAGE_FREECOM=m
-CONFIG_USB_STORAGE_USBAT=m
-CONFIG_USB_STORAGE_SDDR09=m
-CONFIG_USB_STORAGE_SDDR55=m
-CONFIG_USB_STORAGE_JUMPSHOT=m
-CONFIG_USB_SERIAL=m
-CONFIG_USB_SERIAL_GENERIC=y
-CONFIG_USB_SERIAL_FTDI_SIO=m
-CONFIG_USB_SERIAL_PL2303=m
-CONFIG_USB_GADGET=m
-CONFIG_USB_ETH=m
-CONFIG_USB_GADGETFS=m
-CONFIG_USB_FILE_STORAGE=m
-CONFIG_USB_G_SERIAL=m
-CONFIG_MMC=m
-CONFIG_MMC_DEBUG=y
-CONFIG_RTC_CLASS=y
-# CONFIG_RTC_HCTOSYS is not set
-CONFIG_RTC_DRV_AT91RM9200=y
-CONFIG_EXT2_FS=m
-CONFIG_EXT3_FS=m
-CONFIG_REISERFS_FS=m
-CONFIG_INOTIFY=y
-CONFIG_ISO9660_FS=m
-CONFIG_JOLIET=y
-CONFIG_ZISOFS=y
-CONFIG_UDF_FS=m
-CONFIG_MSDOS_FS=m
-CONFIG_VFAT_FS=m
-CONFIG_NTFS_FS=m
-CONFIG_NTFS_RW=y
-CONFIG_TMPFS=y
-CONFIG_CRAMFS=y
-CONFIG_NFS_FS=m
-CONFIG_NFS_V3=y
-CONFIG_NFS_V3_ACL=y
-CONFIG_NFS_V4=y
-CONFIG_NFSD=m
-CONFIG_NFSD_V4=y
-CONFIG_PARTITION_ADVANCED=y
-CONFIG_MAC_PARTITION=y
-CONFIG_BSD_DISKLABEL=y
-CONFIG_MINIX_SUBPARTITION=y
-CONFIG_SOLARIS_X86_PARTITION=y
-CONFIG_UNIXWARE_DISKLABEL=y
-CONFIG_NLS_CODEPAGE_932=m
-CONFIG_NLS_ASCII=m
-CONFIG_NLS_ISO8859_15=m
-CONFIG_NLS_UTF8=m
-CONFIG_CRYPTO_MD5=y
-CONFIG_CRYPTO_MICHAEL_MIC=m
-CONFIG_CRYPTO_ARC4=m
-CONFIG_CRC16=m
-CONFIG_LIBCRC32C=m
diff --git a/arch/arm/configs/carmeva_defconfig b/arch/arm/configs/carmeva_defconfig
deleted file mode 100644
index ac64dbd8a49c..000000000000
--- a/arch/arm/configs/carmeva_defconfig
+++ /dev/null
@@ -1,47 +0,0 @@
-CONFIG_EXPERIMENTAL=y
-CONFIG_LOG_BUF_SHIFT=14
-CONFIG_BLK_DEV_INITRD=y
-CONFIG_EMBEDDED=y
-# CONFIG_HOTPLUG is not set
-CONFIG_MODULES=y
-CONFIG_MODULE_UNLOAD=y
-CONFIG_MODULE_FORCE_UNLOAD=y
-CONFIG_ARCH_AT91=y
-CONFIG_MACH_CARMEVA=y
-CONFIG_ZBOOT_ROM_TEXT=0x0
-CONFIG_ZBOOT_ROM_BSS=0x0
-CONFIG_FPE_NWFPE=y
-CONFIG_NET=y
-CONFIG_UNIX=y
-CONFIG_INET=y
-CONFIG_IP_MULTICAST=y
-CONFIG_IP_PNP=y
-# CONFIG_IPV6 is not set
-CONFIG_MTD=y
-CONFIG_MTD_PARTITIONS=y
-CONFIG_MTD_CMDLINE_PARTS=y
-CONFIG_MTD_CHAR=y
-CONFIG_MTD_BLOCK=y
-CONFIG_BLK_DEV_RAM=y
-CONFIG_NETDEVICES=y
-CONFIG_NET_ETHERNET=y
-CONFIG_ARM_AT91_ETHER=y
-# CONFIG_INPUT_MOUSEDEV is not set
-# CONFIG_INPUT_KEYBOARD is not set
-# CONFIG_INPUT_MOUSE is not set
-CONFIG_SERIO=m
-CONFIG_SERIAL_ATMEL=y
-CONFIG_SERIAL_ATMEL_CONSOLE=y
-# CONFIG_VGA_CONSOLE is not set
-CONFIG_MMC=m
-CONFIG_MMC_DEBUG=y
-CONFIG_EXT2_FS=y
-CONFIG_EXT2_FS_XATTR=y
-# CONFIG_DNOTIFY is not set
-CONFIG_JFFS2_FS=y
-CONFIG_JFFS2_COMPRESSION_OPTIONS=y
-CONFIG_NFS_FS=y
-CONFIG_NFS_V3=y
-CONFIG_NFS_V4=y
-CONFIG_ROOT_NFS=y
-CONFIG_NFSD=y
diff --git a/arch/arm/configs/cpuat91_defconfig b/arch/arm/configs/cpuat91_defconfig
deleted file mode 100644
index 022aeb55b676..000000000000
--- a/arch/arm/configs/cpuat91_defconfig
+++ /dev/null
@@ -1,112 +0,0 @@
-CONFIG_EXPERIMENTAL=y
-# CONFIG_LOCALVERSION_AUTO is not set
-# CONFIG_SWAP is not set
-CONFIG_SYSVIPC=y
-CONFIG_LOG_BUF_SHIFT=14
-CONFIG_SYSFS_DEPRECATED_V2=y
-CONFIG_MODULES=y
-CONFIG_MODULE_UNLOAD=y
-# CONFIG_BLK_DEV_BSG is not set
-# CONFIG_IOSCHED_CFQ is not set
-CONFIG_ARCH_AT91=y
-CONFIG_MACH_CPUAT91=y
-CONFIG_AT91_TIMER_HZ=100
-# CONFIG_ARM_THUMB is not set
-CONFIG_PREEMPT=y
-CONFIG_ZBOOT_ROM_TEXT=0x0
-CONFIG_ZBOOT_ROM_BSS=0x0
-CONFIG_NET=y
-CONFIG_PACKET=y
-CONFIG_UNIX=y
-CONFIG_INET=y
-CONFIG_IP_PNP=y
-# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
-# CONFIG_INET_XFRM_MODE_TUNNEL is not set
-# CONFIG_INET_XFRM_MODE_BEET is not set
-# CONFIG_IPV6 is not set
-CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
-CONFIG_MTD=y
-CONFIG_MTD_PARTITIONS=y
-CONFIG_MTD_CMDLINE_PARTS=y
-CONFIG_MTD_CHAR=y
-CONFIG_MTD_BLOCK=y
-CONFIG_MTD_CFI=y
-CONFIG_MTD_CFI_INTELEXT=y
-CONFIG_MTD_PHYSMAP=y
-CONFIG_MTD_PLATRAM=y
-CONFIG_BLK_DEV_LOOP=y
-CONFIG_BLK_DEV_NBD=y
-CONFIG_BLK_DEV_RAM=y
-# CONFIG_MISC_DEVICES is not set
-CONFIG_SCSI=y
-CONFIG_BLK_DEV_SD=y
-CONFIG_SCSI_MULTI_LUN=y
-# CONFIG_SCSI_LOWLEVEL is not set
-CONFIG_NETDEVICES=y
-CONFIG_PHYLIB=y
-CONFIG_NET_ETHERNET=y
-CONFIG_ARM_AT91_ETHER=y
-# CONFIG_NETDEV_1000 is not set
-# CONFIG_NETDEV_10000 is not set
-CONFIG_PPP=y
-CONFIG_PPP_ASYNC=y
-CONFIG_PPP_DEFLATE=y
-CONFIG_PPP_BSDCOMP=y
-# CONFIG_INPUT_MOUSEDEV_PSAUX is not set
-# CONFIG_INPUT_KEYBOARD is not set
-# CONFIG_INPUT_MOUSE is not set
-# CONFIG_SERIO is not set
-CONFIG_SERIAL_ATMEL=y
-CONFIG_SERIAL_ATMEL_CONSOLE=y
-CONFIG_LEGACY_PTY_COUNT=32
-# CONFIG_HW_RANDOM is not set
-CONFIG_I2C=y
-CONFIG_I2C_CHARDEV=y
-CONFIG_I2C_GPIO=y
-CONFIG_GPIO_SYSFS=y
-# CONFIG_HWMON is not set
-CONFIG_WATCHDOG=y
-CONFIG_WATCHDOG_NOWAYOUT=y
-CONFIG_AT91RM9200_WATCHDOG=y
-# CONFIG_VGA_CONSOLE is not set
-# CONFIG_HID_SUPPORT is not set
-CONFIG_USB=y
-# CONFIG_USB_DEVICE_CLASS is not set
-CONFIG_USB_OHCI_HCD=y
-CONFIG_USB_STORAGE=y
-CONFIG_USB_GADGET=y
-CONFIG_USB_ETH=m
-CONFIG_MMC=y
-CONFIG_MMC_AT91=m
-CONFIG_NEW_LEDS=y
-CONFIG_LEDS_CLASS=y
-CONFIG_LEDS_GPIO=y
-CONFIG_LEDS_TRIGGERS=y
-CONFIG_LEDS_TRIGGER_TIMER=y
-CONFIG_LEDS_TRIGGER_HEARTBEAT=y
-CONFIG_LEDS_TRIGGER_GPIO=y
-CONFIG_LEDS_TRIGGER_DEFAULT_ON=y
-CONFIG_RTC_CLASS=y
-# CONFIG_RTC_HCTOSYS is not set
-CONFIG_RTC_DRV_DS1307=y
-CONFIG_RTC_DRV_PCF8563=y
-CONFIG_EXT2_FS=y
-CONFIG_EXT3_FS=y
-# CONFIG_EXT3_FS_XATTR is not set
-CONFIG_INOTIFY=y
-CONFIG_AUTOFS4_FS=y
-CONFIG_MSDOS_FS=y
-CONFIG_VFAT_FS=y
-CONFIG_TMPFS=y
-CONFIG_JFFS2_FS=y
-CONFIG_JFFS2_SUMMARY=y
-CONFIG_CRAMFS=y
-CONFIG_MINIX_FS=y
-CONFIG_NFS_FS=y
-CONFIG_NFS_V3=y
-CONFIG_ROOT_NFS=y
-CONFIG_PARTITION_ADVANCED=y
-CONFIG_NLS_CODEPAGE_437=y
-CONFIG_NLS_ISO8859_1=y
-CONFIG_NLS_UTF8=y
-# CONFIG_RCU_CPU_STALL_DETECTOR is not set
diff --git a/arch/arm/configs/csb337_defconfig b/arch/arm/configs/csb337_defconfig
deleted file mode 100644
index a24c448840c4..000000000000
--- a/arch/arm/configs/csb337_defconfig
+++ /dev/null
@@ -1,104 +0,0 @@
-CONFIG_EXPERIMENTAL=y
-# CONFIG_SWAP is not set
-CONFIG_SYSVIPC=y
-CONFIG_LOG_BUF_SHIFT=14
-CONFIG_BLK_DEV_INITRD=y
-CONFIG_MODULES=y
-CONFIG_MODULE_UNLOAD=y
-# CONFIG_BLK_DEV_BSG is not set
-CONFIG_ARCH_AT91=y
-CONFIG_MACH_CSB337=y
-CONFIG_AT91_PROGRAMMABLE_CLOCKS=y
-# CONFIG_ARM_THUMB is not set
-CONFIG_PCCARD=y
-CONFIG_AT91_CF=y
-CONFIG_LEDS=y
-CONFIG_LEDS_CPU=y
-CONFIG_ZBOOT_ROM_TEXT=0x0
-CONFIG_ZBOOT_ROM_BSS=0x0
-CONFIG_CMDLINE="mem=32M console=ttyS0,38400 initrd=0x20410000,3145728 root=/dev/ram0 rw"
-CONFIG_FPE_NWFPE=y
-CONFIG_NET=y
-CONFIG_PACKET=y
-CONFIG_UNIX=y
-CONFIG_INET=y
-CONFIG_IP_PNP=y
-CONFIG_IP_PNP_DHCP=y
-CONFIG_IP_PNP_BOOTP=y
-# CONFIG_INET_LRO is not set
-# CONFIG_IPV6 is not set
-CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
-CONFIG_MTD=y
-CONFIG_MTD_PARTITIONS=y
-CONFIG_MTD_CMDLINE_PARTS=y
-CONFIG_MTD_CHAR=y
-CONFIG_MTD_BLOCK=y
-CONFIG_MTD_CFI=y
-CONFIG_MTD_CFI_INTELEXT=y
-CONFIG_MTD_PHYSMAP=y
-CONFIG_BLK_DEV_LOOP=y
-CONFIG_BLK_DEV_RAM=y
-CONFIG_BLK_DEV_RAM_SIZE=8192
-CONFIG_ATMEL_SSC=y
-CONFIG_SCSI=y
-CONFIG_NETDEVICES=y
-CONFIG_NET_ETHERNET=y
-CONFIG_ARM_AT91_ETHER=y
-# CONFIG_INPUT_MOUSEDEV_PSAUX is not set
-# CONFIG_INPUT_KEYBOARD is not set
-# CONFIG_INPUT_MOUSE is not set
-# CONFIG_SERIO is not set
-CONFIG_SERIAL_ATMEL=y
-CONFIG_SERIAL_ATMEL_CONSOLE=y
-# CONFIG_HW_RANDOM is not set
-CONFIG_I2C=y
-CONFIG_I2C_CHARDEV=y
-CONFIG_I2C_GPIO=y
-# CONFIG_HWMON is not set
-CONFIG_WATCHDOG=y
-CONFIG_WATCHDOG_NOWAYOUT=y
-CONFIG_AT91RM9200_WATCHDOG=y
-# CONFIG_VGA_CONSOLE is not set
-# CONFIG_USB_HID is not set
-CONFIG_USB=y
-CONFIG_USB_DEBUG=y
-CONFIG_USB_DEVICEFS=y
-CONFIG_USB_MON=y
-CONFIG_USB_OHCI_HCD=y
-CONFIG_USB_STORAGE=y
-CONFIG_USB_SERIAL=y
-CONFIG_USB_SERIAL_CONSOLE=y
-CONFIG_USB_SERIAL_GENERIC=y
-CONFIG_USB_SERIAL_FTDI_SIO=y
-CONFIG_USB_SERIAL_KEYSPAN=y
-CONFIG_USB_SERIAL_KEYSPAN_MPR=y
-CONFIG_USB_SERIAL_KEYSPAN_USA28=y
-CONFIG_USB_SERIAL_KEYSPAN_USA28X=y
-CONFIG_USB_SERIAL_KEYSPAN_USA28XA=y
-CONFIG_USB_SERIAL_KEYSPAN_USA28XB=y
-CONFIG_USB_SERIAL_KEYSPAN_USA19=y
-CONFIG_USB_SERIAL_KEYSPAN_USA18X=y
-CONFIG_USB_SERIAL_KEYSPAN_USA19W=y
-CONFIG_USB_SERIAL_KEYSPAN_USA19QW=y
-CONFIG_USB_SERIAL_KEYSPAN_USA19QI=y
-CONFIG_USB_SERIAL_KEYSPAN_USA49W=y
-CONFIG_USB_SERIAL_KEYSPAN_USA49WLC=y
-CONFIG_USB_SERIAL_MCT_U232=y
-CONFIG_USB_GADGET=y
-CONFIG_MMC=y
-CONFIG_RTC_CLASS=y
-CONFIG_RTC_HCTOSYS_DEVICE="rtc1"
-# CONFIG_RTC_INTF_SYSFS is not set
-CONFIG_RTC_DRV_DS1307=y
-CONFIG_RTC_DRV_AT91RM9200=y
-CONFIG_EXT2_FS=y
-CONFIG_INOTIFY=y
-CONFIG_TMPFS=y
-CONFIG_CRAMFS=y
-CONFIG_NFS_FS=y
-CONFIG_NFS_V3=y
-CONFIG_NFS_V4=y
-CONFIG_ROOT_NFS=y
-CONFIG_DEBUG_KERNEL=y
-CONFIG_DEBUG_USER=y
-CONFIG_DEBUG_LL=y
diff --git a/arch/arm/configs/csb637_defconfig b/arch/arm/configs/csb637_defconfig
deleted file mode 100644
index 98552adac5fb..000000000000
--- a/arch/arm/configs/csb637_defconfig
+++ /dev/null
@@ -1,98 +0,0 @@
-CONFIG_EXPERIMENTAL=y
-# CONFIG_SWAP is not set
-CONFIG_SYSVIPC=y
-CONFIG_LOG_BUF_SHIFT=14
-CONFIG_SYSFS_DEPRECATED_V2=y
-CONFIG_BLK_DEV_INITRD=y
-CONFIG_MODULES=y
-CONFIG_MODULE_UNLOAD=y
-# CONFIG_BLK_DEV_BSG is not set
-CONFIG_ARCH_AT91=y
-CONFIG_MACH_CSB637=y
-CONFIG_AT91_PROGRAMMABLE_CLOCKS=y
-# CONFIG_ARM_THUMB is not set
-CONFIG_PCCARD=y
-CONFIG_AT91_CF=y
-CONFIG_LEDS=y
-CONFIG_LEDS_CPU=y
-CONFIG_ZBOOT_ROM_TEXT=0x0
-CONFIG_ZBOOT_ROM_BSS=0x0
-CONFIG_CMDLINE="mem=32M console=ttyS0,38400 initrd=0x20410000,3145728 root=/dev/ram0 rw"
-CONFIG_FPE_NWFPE=y
-CONFIG_NET=y
-CONFIG_PACKET=y
-CONFIG_UNIX=y
-CONFIG_INET=y
-CONFIG_IP_PNP=y
-CONFIG_IP_PNP_DHCP=y
-CONFIG_IP_PNP_BOOTP=y
-# CONFIG_INET_LRO is not set
-# CONFIG_IPV6 is not set
-CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
-CONFIG_MTD=y
-CONFIG_MTD_PARTITIONS=y
-CONFIG_MTD_CMDLINE_PARTS=y
-CONFIG_MTD_CHAR=y
-CONFIG_MTD_BLOCK=y
-CONFIG_MTD_CFI=y
-CONFIG_MTD_CFI_INTELEXT=y
-CONFIG_MTD_PHYSMAP=y
-CONFIG_BLK_DEV_LOOP=y
-CONFIG_BLK_DEV_RAM=y
-CONFIG_BLK_DEV_RAM_SIZE=8192
-CONFIG_SCSI=y
-CONFIG_NETDEVICES=y
-CONFIG_NET_ETHERNET=y
-CONFIG_ARM_AT91_ETHER=y
-# CONFIG_INPUT_KEYBOARD is not set
-# CONFIG_INPUT_MOUSE is not set
-# CONFIG_SERIO is not set
-CONFIG_SERIAL_ATMEL=y
-CONFIG_SERIAL_ATMEL_CONSOLE=y
-CONFIG_I2C=y
-CONFIG_I2C_CHARDEV=y
-CONFIG_WATCHDOG=y
-CONFIG_WATCHDOG_NOWAYOUT=y
-CONFIG_AT91RM9200_WATCHDOG=y
-# CONFIG_VGA_CONSOLE is not set
-# CONFIG_USB_HID is not set
-CONFIG_USB=y
-CONFIG_USB_DEBUG=y
-CONFIG_USB_DEVICEFS=y
-CONFIG_USB_MON=y
-CONFIG_USB_OHCI_HCD=y
-CONFIG_USB_STORAGE=y
-CONFIG_USB_SERIAL=y
-CONFIG_USB_SERIAL_CONSOLE=y
-CONFIG_USB_SERIAL_GENERIC=y
-CONFIG_USB_SERIAL_FTDI_SIO=y
-CONFIG_USB_SERIAL_KEYSPAN=y
-CONFIG_USB_SERIAL_KEYSPAN_MPR=y
-CONFIG_USB_SERIAL_KEYSPAN_USA28=y
-CONFIG_USB_SERIAL_KEYSPAN_USA28X=y
-CONFIG_USB_SERIAL_KEYSPAN_USA28XA=y
-CONFIG_USB_SERIAL_KEYSPAN_USA28XB=y
-CONFIG_USB_SERIAL_KEYSPAN_USA19=y
-CONFIG_USB_SERIAL_KEYSPAN_USA18X=y
-CONFIG_USB_SERIAL_KEYSPAN_USA19W=y
-CONFIG_USB_SERIAL_KEYSPAN_USA19QW=y
-CONFIG_USB_SERIAL_KEYSPAN_USA19QI=y
-CONFIG_USB_SERIAL_KEYSPAN_USA49W=y
-CONFIG_USB_SERIAL_KEYSPAN_USA49WLC=y
-CONFIG_USB_SERIAL_MCT_U232=y
-CONFIG_NEW_LEDS=y
-CONFIG_LEDS_CLASS=y
-CONFIG_LEDS_GPIO=y
-CONFIG_LEDS_TRIGGERS=y
-CONFIG_LEDS_TRIGGER_HEARTBEAT=y
-CONFIG_EXT2_FS=y
-CONFIG_INOTIFY=y
-CONFIG_TMPFS=y
-CONFIG_CRAMFS=y
-CONFIG_NFS_FS=y
-CONFIG_NFS_V3=y
-CONFIG_NFS_V4=y
-CONFIG_ROOT_NFS=y
-CONFIG_DEBUG_KERNEL=y
-CONFIG_DEBUG_USER=y
-CONFIG_DEBUG_LL=y
diff --git a/arch/arm/configs/da8xx_omapl_defconfig b/arch/arm/configs/da8xx_omapl_defconfig
index ba6670556f78..cdc40c4b8c48 100644
--- a/arch/arm/configs/da8xx_omapl_defconfig
+++ b/arch/arm/configs/da8xx_omapl_defconfig
@@ -17,6 +17,8 @@ CONFIG_MODVERSIONS=y
CONFIG_ARCH_DAVINCI=y
CONFIG_ARCH_DAVINCI_DA830=y
CONFIG_ARCH_DAVINCI_DA850=y
+CONFIG_MACH_MITYOMAPL138=y
+CONFIG_MACH_OMAPL138_HAWKBOARD=y
CONFIG_DAVINCI_RESET_CLOCKS=y
CONFIG_NO_HZ=y
CONFIG_HIGH_RES_TIMERS=y
@@ -79,6 +81,7 @@ CONFIG_I2C_DAVINCI=y
# CONFIG_HWMON is not set
CONFIG_WATCHDOG=y
CONFIG_REGULATOR=y
+CONFIG_REGULATOR_DUMMY=y
CONFIG_REGULATOR_TPS6507X=y
CONFIG_FB=y
CONFIG_FB_DA8XX=y
diff --git a/arch/arm/configs/ecbat91_defconfig b/arch/arm/configs/ecbat91_defconfig
deleted file mode 100644
index 6bb6abdcea8c..000000000000
--- a/arch/arm/configs/ecbat91_defconfig
+++ /dev/null
@@ -1,99 +0,0 @@
-CONFIG_EXPERIMENTAL=y
-CONFIG_SYSVIPC=y
-CONFIG_IKCONFIG=y
-CONFIG_IKCONFIG_PROC=y
-CONFIG_LOG_BUF_SHIFT=14
-CONFIG_SLAB=y
-CONFIG_MODULES=y
-CONFIG_MODULE_UNLOAD=y
-# CONFIG_IOSCHED_DEADLINE is not set
-# CONFIG_IOSCHED_CFQ is not set
-CONFIG_ARCH_AT91=y
-CONFIG_MACH_ECBAT91=y
-CONFIG_AT91_PROGRAMMABLE_CLOCKS=y
-CONFIG_PCCARD=y
-CONFIG_AT91_CF=y
-CONFIG_PREEMPT=y
-CONFIG_LEDS=y
-CONFIG_LEDS_CPU=y
-CONFIG_ZBOOT_ROM_TEXT=0x0
-CONFIG_ZBOOT_ROM_BSS=0x0
-CONFIG_CMDLINE="rootfstype=reiserfs root=/dev/mmcblk0p1 console=ttyS0,115200n8 rootdelay=1"
-CONFIG_FPE_NWFPE=y
-CONFIG_NET=y
-CONFIG_PACKET=y
-CONFIG_UNIX=y
-CONFIG_INET=y
-CONFIG_IP_PNP=y
-CONFIG_IP_PNP_DHCP=y
-# CONFIG_IPV6 is not set
-CONFIG_CFG80211=y
-CONFIG_MAC80211=y
-# CONFIG_STANDALONE is not set
-# CONFIG_PREVENT_FIRMWARE_BUILD is not set
-CONFIG_MTD=y
-CONFIG_MTD_PARTITIONS=y
-CONFIG_MTD_CMDLINE_PARTS=y
-CONFIG_MTD_AFS_PARTS=y
-CONFIG_MTD_CHAR=y
-CONFIG_MTD_BLOCK=y
-CONFIG_MTD_DATAFLASH=y
-CONFIG_BLK_DEV_LOOP=y
-CONFIG_SCSI=y
-CONFIG_BLK_DEV_SD=y
-CONFIG_CHR_DEV_SG=y
-CONFIG_NETDEVICES=y
-CONFIG_NET_ETHERNET=y
-CONFIG_ARM_AT91_ETHER=y
-# CONFIG_NETDEV_1000 is not set
-# CONFIG_NETDEV_10000 is not set
-CONFIG_PPP=y
-CONFIG_PPP_MULTILINK=y
-CONFIG_PPP_FILTER=y
-CONFIG_PPP_ASYNC=y
-# CONFIG_INPUT_MOUSEDEV_PSAUX is not set
-# CONFIG_INPUT_KEYBOARD is not set
-# CONFIG_INPUT_MOUSE is not set
-# CONFIG_SERIO is not set
-CONFIG_SERIAL_ATMEL=y
-CONFIG_SERIAL_ATMEL_CONSOLE=y
-CONFIG_HW_RANDOM=y
-CONFIG_I2C=y
-CONFIG_I2C_CHARDEV=y
-CONFIG_SPI=y
-CONFIG_SPI_BITBANG=y
-CONFIG_WATCHDOG=y
-CONFIG_WATCHDOG_NOWAYOUT=y
-# CONFIG_VGA_CONSOLE is not set
-# CONFIG_USB_HID is not set
-CONFIG_USB=y
-CONFIG_USB_DEVICEFS=y
-# CONFIG_USB_DEVICE_CLASS is not set
-CONFIG_USB_OHCI_HCD=y
-CONFIG_USB_PRINTER=y
-CONFIG_USB_STORAGE=y
-CONFIG_USB_GADGET=y
-CONFIG_MMC=y
-CONFIG_MMC_DEBUG=y
-CONFIG_MMC_AT91=m
-CONFIG_NEW_LEDS=y
-CONFIG_LEDS_CLASS=y
-CONFIG_RTC_CLASS=y
-# CONFIG_RTC_HCTOSYS is not set
-CONFIG_RTC_DRV_AT91RM9200=y
-CONFIG_EXT2_FS=y
-CONFIG_EXT3_FS=y
-CONFIG_REISERFS_FS=y
-CONFIG_INOTIFY=y
-CONFIG_TMPFS=y
-CONFIG_CONFIGFS_FS=y
-CONFIG_CRAMFS=y
-CONFIG_NFS_FS=y
-CONFIG_NFS_V3=y
-CONFIG_NFS_V3_ACL=y
-CONFIG_NFS_V4=y
-CONFIG_ROOT_NFS=y
-CONFIG_PARTITION_ADVANCED=y
-CONFIG_DEBUG_USER=y
-CONFIG_CRYPTO_PCBC=y
-CONFIG_CRYPTO_SHA1=y
diff --git a/arch/arm/configs/kafa_defconfig b/arch/arm/configs/kafa_defconfig
deleted file mode 100644
index 896dbe00dc6e..000000000000
--- a/arch/arm/configs/kafa_defconfig
+++ /dev/null
@@ -1,61 +0,0 @@
-CONFIG_EXPERIMENTAL=y
-# CONFIG_LOCALVERSION_AUTO is not set
-# CONFIG_SWAP is not set
-CONFIG_SYSVIPC=y
-CONFIG_LOG_BUF_SHIFT=14
-CONFIG_SLAB=y
-CONFIG_MODULES=y
-CONFIG_MODULE_UNLOAD=y
-# CONFIG_IOSCHED_CFQ is not set
-CONFIG_ARCH_AT91=y
-CONFIG_MACH_KAFA=y
-# CONFIG_ARM_THUMB is not set
-CONFIG_PREEMPT=y
-CONFIG_LEDS=y
-CONFIG_LEDS_CPU=y
-CONFIG_ZBOOT_ROM_TEXT=0x0
-CONFIG_ZBOOT_ROM_BSS=0x0
-CONFIG_CMDLINE="mem=32M console=ttyS0,115200 initrd=0x20800000,10M root=/dev/ram0 rw"
-CONFIG_FPE_NWFPE=y
-CONFIG_BINFMT_MISC=y
-CONFIG_NET=y
-CONFIG_PACKET=y
-CONFIG_UNIX=y
-CONFIG_INET=y
-# CONFIG_INET_DIAG is not set
-# CONFIG_IPV6 is not set
-CONFIG_MTD=y
-CONFIG_MTD_PARTITIONS=y
-CONFIG_MTD_CHAR=y
-CONFIG_MTD_BLOCK_RO=y
-CONFIG_NETDEVICES=y
-CONFIG_PHYLIB=y
-CONFIG_DAVICOM_PHY=y
-CONFIG_NET_ETHERNET=y
-CONFIG_ARM_AT91_ETHER=y
-# CONFIG_INPUT_MOUSEDEV_PSAUX is not set
-# CONFIG_INPUT_KEYBOARD is not set
-# CONFIG_INPUT_MOUSE is not set
-# CONFIG_SERIO is not set
-CONFIG_SERIAL_ATMEL=y
-CONFIG_SERIAL_ATMEL_CONSOLE=y
-CONFIG_LEGACY_PTY_COUNT=32
-CONFIG_I2C=y
-CONFIG_I2C_CHARDEV=y
-CONFIG_I2C_GPIO=y
-# CONFIG_HWMON is not set
-CONFIG_WATCHDOG=y
-CONFIG_WATCHDOG_NOWAYOUT=y
-CONFIG_AT91RM9200_WATCHDOG=y
-# CONFIG_VGA_CONSOLE is not set
-CONFIG_RTC_CLASS=y
-# CONFIG_RTC_HCTOSYS is not set
-CONFIG_RTC_DRV_AT91RM9200=y
-CONFIG_EXT3_FS=y
-# CONFIG_EXT3_FS_XATTR is not set
-CONFIG_TMPFS=y
-CONFIG_CRAMFS=y
-CONFIG_NFS_FS=m
-CONFIG_NFS_V3=y
-CONFIG_CRYPTO_MD5=y
-CONFIG_CRYPTO_DES=y
diff --git a/arch/arm/configs/kb9202_defconfig b/arch/arm/configs/kb9202_defconfig
deleted file mode 100644
index 9f906a85f5c2..000000000000
--- a/arch/arm/configs/kb9202_defconfig
+++ /dev/null
@@ -1,127 +0,0 @@
-CONFIG_EXPERIMENTAL=y
-# CONFIG_SWAP is not set
-CONFIG_SYSVIPC=y
-CONFIG_POSIX_MQUEUE=y
-CONFIG_BSD_PROCESS_ACCT=y
-CONFIG_AUDIT=y
-CONFIG_IKCONFIG=y
-CONFIG_IKCONFIG_PROC=y
-CONFIG_BLK_DEV_INITRD=y
-CONFIG_KALLSYMS_EXTRA_PASS=y
-CONFIG_MODULES=y
-CONFIG_MODULE_UNLOAD=y
-CONFIG_MODVERSIONS=y
-CONFIG_MODULE_SRCVERSION_ALL=y
-# CONFIG_BLK_DEV_BSG is not set
-# CONFIG_IOSCHED_DEADLINE is not set
-CONFIG_ARCH_AT91=y
-CONFIG_MACH_KB9200=y
-CONFIG_AT91_PROGRAMMABLE_CLOCKS=y
-CONFIG_NO_HZ=y
-CONFIG_HIGH_RES_TIMERS=y
-CONFIG_PREEMPT=y
-CONFIG_AEABI=y
-CONFIG_ZBOOT_ROM_TEXT=0x10000000
-CONFIG_ZBOOT_ROM_BSS=0x20040000
-CONFIG_CMDLINE="noinitrd root=/dev/mtdblock0 rootfstype=jffs2 mem=64M"
-CONFIG_KEXEC=y
-CONFIG_FPE_NWFPE=y
-CONFIG_BINFMT_MISC=y
-CONFIG_NET=y
-CONFIG_PACKET=y
-CONFIG_UNIX=y
-CONFIG_INET=y
-CONFIG_IP_PNP=y
-CONFIG_IP_PNP_DHCP=y
-CONFIG_IP_PNP_BOOTP=y
-# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
-# CONFIG_INET_XFRM_MODE_TUNNEL is not set
-# CONFIG_INET_XFRM_MODE_BEET is not set
-# CONFIG_INET_LRO is not set
-# CONFIG_INET_DIAG is not set
-# CONFIG_IPV6 is not set
-CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
-# CONFIG_FIRMWARE_IN_KERNEL is not set
-CONFIG_MTD=y
-CONFIG_MTD_CONCAT=y
-CONFIG_MTD_PARTITIONS=y
-CONFIG_MTD_CMDLINE_PARTS=y
-CONFIG_MTD_CHAR=y
-CONFIG_MTD_BLOCK=y
-CONFIG_MTD_CFI=y
-CONFIG_MTD_CFI_INTELEXT=y
-CONFIG_MTD_COMPLEX_MAPPINGS=y
-CONFIG_MTD_PHYSMAP=y
-CONFIG_MTD_NAND=y
-CONFIG_MTD_NAND_ATMEL=y
-CONFIG_MTD_UBI=y
-CONFIG_MTD_UBI_GLUEBI=y
-CONFIG_BLK_DEV_LOOP=y
-CONFIG_BLK_DEV_RAM=y
-CONFIG_BLK_DEV_RAM_SIZE=16384
-CONFIG_ATMEL_TCLIB=y
-CONFIG_ATMEL_SSC=y
-CONFIG_SCSI=y
-CONFIG_BLK_DEV_SD=y
-CONFIG_CHR_DEV_SG=y
-CONFIG_SCSI_MULTI_LUN=y
-CONFIG_SCSI_CONSTANTS=y
-CONFIG_SCSI_LOGGING=y
-CONFIG_SCSI_SPI_ATTRS=m
-# CONFIG_SCSI_LOWLEVEL is not set
-CONFIG_NETDEVICES=y
-CONFIG_NET_ETHERNET=y
-CONFIG_ARM_AT91_ETHER=y
-# CONFIG_NETDEV_1000 is not set
-# CONFIG_NETDEV_10000 is not set
-# CONFIG_INPUT_MOUSEDEV_PSAUX is not set
-# CONFIG_INPUT_KEYBOARD is not set
-# CONFIG_INPUT_MOUSE is not set
-# CONFIG_SERIO is not set
-CONFIG_SERIAL_ATMEL=y
-CONFIG_SERIAL_ATMEL_CONSOLE=y
-# CONFIG_LEGACY_PTYS is not set
-# CONFIG_HW_RANDOM is not set
-# CONFIG_HWMON is not set
-CONFIG_WATCHDOG=y
-CONFIG_AT91RM9200_WATCHDOG=y
-CONFIG_FB=y
-CONFIG_FB_MODE_HELPERS=y
-CONFIG_FB_TILEBLITTING=y
-CONFIG_BACKLIGHT_LCD_SUPPORT=y
-# CONFIG_LCD_CLASS_DEVICE is not set
-CONFIG_BACKLIGHT_CLASS_DEVICE=y
-# CONFIG_BACKLIGHT_GENERIC is not set
-# CONFIG_VGA_CONSOLE is not set
-CONFIG_FRAMEBUFFER_CONSOLE=y
-CONFIG_FONTS=y
-CONFIG_FONT_MINI_4x6=y
-# CONFIG_HID_SUPPORT is not set
-CONFIG_USB=y
-CONFIG_USB_DEVICEFS=y
-CONFIG_USB_OHCI_HCD=y
-CONFIG_USB_STORAGE=y
-CONFIG_USB_LIBUSUAL=y
-CONFIG_MMC=y
-CONFIG_MMC_AT91=m
-CONFIG_RTC_CLASS=y
-CONFIG_RTC_DRV_AT91RM9200=y
-CONFIG_EXT2_FS=y
-CONFIG_EXT3_FS=y
-# CONFIG_DNOTIFY is not set
-CONFIG_INOTIFY=y
-CONFIG_VFAT_FS=y
-CONFIG_TMPFS=y
-CONFIG_CONFIGFS_FS=y
-CONFIG_JFFS2_FS=y
-CONFIG_NFS_FS=y
-CONFIG_NFS_V3=y
-CONFIG_ROOT_NFS=y
-CONFIG_NLS_CODEPAGE_437=y
-CONFIG_NLS_UTF8=y
-CONFIG_MAGIC_SYSRQ=y
-CONFIG_DEBUG_FS=y
-CONFIG_DEBUG_KERNEL=y
-# CONFIG_SCHED_DEBUG is not set
-# CONFIG_DEBUG_PREEMPT is not set
-# CONFIG_RCU_CPU_STALL_DETECTOR is not set
diff --git a/arch/arm/configs/mx51_defconfig b/arch/arm/configs/mx51_defconfig
index 163cfee7644c..5c7a87260fab 100644
--- a/arch/arm/configs/mx51_defconfig
+++ b/arch/arm/configs/mx51_defconfig
@@ -82,6 +82,7 @@ CONFIG_FEC=y
CONFIG_INPUT_FF_MEMLESS=m
# CONFIG_INPUT_MOUSEDEV_PSAUX is not set
CONFIG_INPUT_EVDEV=y
+CONFIG_KEYBOARD_GPIO=y
CONFIG_INPUT_EVBUG=m
CONFIG_MOUSE_PS2=m
CONFIG_MOUSE_PS2_ELANTECH=y
diff --git a/arch/arm/configs/n8x0_defconfig b/arch/arm/configs/n8x0_defconfig
deleted file mode 100644
index 56aebb69411d..000000000000
--- a/arch/arm/configs/n8x0_defconfig
+++ /dev/null
@@ -1,94 +0,0 @@
-CONFIG_EXPERIMENTAL=y
-CONFIG_SYSVIPC=y
-CONFIG_LOG_BUF_SHIFT=14
-CONFIG_BLK_DEV_INITRD=y
-CONFIG_MODULES=y
-CONFIG_MODULE_UNLOAD=y
-# CONFIG_LBDAF is not set
-# CONFIG_BLK_DEV_BSG is not set
-# CONFIG_IOSCHED_DEADLINE is not set
-CONFIG_ARCH_OMAP=y
-CONFIG_ARCH_OMAP2=y
-CONFIG_OMAP_RESET_CLOCKS=y
-# CONFIG_OMAP_MUX is not set
-# CONFIG_OMAP_MCBSP is not set
-CONFIG_OMAP_MBOX_FWK=y
-CONFIG_OMAP_32K_TIMER=y
-CONFIG_ARCH_OMAP2420=y
-CONFIG_MACH_NOKIA_N8X0=y
-CONFIG_AEABI=y
-CONFIG_LEDS=y
-CONFIG_ZBOOT_ROM_TEXT=0x10C08000
-CONFIG_ZBOOT_ROM_BSS=0x10200000
-CONFIG_CMDLINE="root=/dev/mmcblk0p2 console=ttyS2,115200n8 debug earlyprintk rootwait"
-CONFIG_FPE_NWFPE=y
-CONFIG_VFP=y
-CONFIG_PM=y
-CONFIG_PM_RUNTIME=y
-CONFIG_NET=y
-CONFIG_UNIX=y
-CONFIG_INET=y
-# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
-# CONFIG_INET_XFRM_MODE_TUNNEL is not set
-# CONFIG_INET_XFRM_MODE_BEET is not set
-# CONFIG_INET_LRO is not set
-# CONFIG_IPV6 is not set
-CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
-CONFIG_MTD=y
-CONFIG_MTD_CMDLINE_PARTS=y
-CONFIG_MTD_ONENAND=y
-CONFIG_MTD_ONENAND_OMAP2=y
-CONFIG_MTD_ONENAND_OTP=y
-CONFIG_BLK_DEV_RAM=y
-# CONFIG_MISC_DEVICES is not set
-# CONFIG_INPUT_MOUSEDEV_PSAUX is not set
-# CONFIG_INPUT_KEYBOARD is not set
-# CONFIG_INPUT_MOUSE is not set
-CONFIG_SERIAL_8250=y
-CONFIG_SERIAL_8250_CONSOLE=y
-# CONFIG_LEGACY_PTYS is not set
-# CONFIG_HW_RANDOM is not set
-CONFIG_I2C=y
-# CONFIG_I2C_COMPAT is not set
-# CONFIG_I2C_HELPER_AUTO is not set
-CONFIG_I2C_OMAP=y
-CONFIG_SPI=y
-CONFIG_SPI_OMAP24XX=y
-# CONFIG_HWMON is not set
-CONFIG_MENELAUS=y
-CONFIG_REGULATOR=y
-# CONFIG_VGA_CONSOLE is not set
-# CONFIG_HID_SUPPORT is not set
-CONFIG_USB=y
-CONFIG_USB_DEBUG=y
-CONFIG_USB_ANNOUNCE_NEW_DEVICES=y
-CONFIG_USB_DEVICEFS=y
-CONFIG_USB_SUSPEND=y
-# CONFIG_USB_OTG_WHITELIST is not set
-CONFIG_USB_MUSB_HDRC=y
-CONFIG_USB_MUSB_OTG=y
-CONFIG_USB_GADGET_MUSB_HDRC=y
-# CONFIG_MUSB_PIO_ONLY is not set
-CONFIG_USB_MUSB_DEBUG=y
-CONFIG_USB_GADGET=y
-CONFIG_USB_GADGET_DEBUG=y
-CONFIG_USB_GADGET_DEBUG_FILES=y
-CONFIG_USB_ETH=m
-CONFIG_USB_ETH_EEM=y
-CONFIG_MMC=y
-CONFIG_MMC_OMAP=y
-CONFIG_EXT3_FS=y
-CONFIG_INOTIFY=y
-CONFIG_VFAT_FS=y
-CONFIG_TMPFS=y
-CONFIG_JFFS2_FS=y
-CONFIG_JFFS2_SUMMARY=y
-CONFIG_JFFS2_COMPRESSION_OPTIONS=y
-CONFIG_JFFS2_LZO=y
-CONFIG_PRINTK_TIME=y
-CONFIG_DEBUG_KERNEL=y
-CONFIG_DEBUG_INFO=y
-# CONFIG_RCU_CPU_STALL_DETECTOR is not set
-CONFIG_DEBUG_USER=y
-CONFIG_DEBUG_ERRORS=y
-CONFIG_CRC_CCITT=y
diff --git a/arch/arm/configs/omap3_defconfig b/arch/arm/configs/omap2plus_defconfig
index 5db9a6be2054..ccedde1371c3 100644
--- a/arch/arm/configs/omap3_defconfig
+++ b/arch/arm/configs/omap2plus_defconfig
@@ -53,18 +53,18 @@ CONFIG_MACH_SBC3530=y
CONFIG_MACH_OMAP_3630SDP=y
CONFIG_MACH_OMAP_4430SDP=y
CONFIG_ARM_THUMBEE=y
+CONFIG_ARM_L1_CACHE_SHIFT=5
+CONFIG_ARM_ERRATA_411920=y
CONFIG_NO_HZ=y
CONFIG_HIGH_RES_TIMERS=y
+CONFIG_SMP=y
+# CONFIG_LOCAL_TIMERS is not set
CONFIG_AEABI=y
CONFIG_LEDS=y
CONFIG_ZBOOT_ROM_TEXT=0x0
CONFIG_ZBOOT_ROM_BSS=0x0
-CONFIG_CMDLINE="root=/dev/mmcblk0p2 rootwait console=ttyS2,115200"
+CONFIG_CMDLINE="root=/dev/mmcblk0p2 rootwait console=ttyO2,115200"
CONFIG_KEXEC=y
-CONFIG_CPU_FREQ=y
-CONFIG_CPU_FREQ_STAT_DETAILS=y
-CONFIG_CPU_FREQ_GOV_USERSPACE=y
-CONFIG_CPU_FREQ_GOV_ONDEMAND=y
CONFIG_FPE_NWFPE=y
CONFIG_VFP=y
CONFIG_NEON=y
@@ -87,23 +87,23 @@ CONFIG_IP_PNP_RARP=y
# CONFIG_INET_LRO is not set
# CONFIG_IPV6 is not set
CONFIG_NETFILTER=y
-CONFIG_BT=y
-CONFIG_BT_L2CAP=y
-CONFIG_BT_SCO=y
+CONFIG_BT=m
+CONFIG_BT_L2CAP=m
+CONFIG_BT_SCO=m
CONFIG_BT_RFCOMM=y
CONFIG_BT_RFCOMM_TTY=y
-CONFIG_BT_BNEP=y
+CONFIG_BT_BNEP=m
CONFIG_BT_BNEP_MC_FILTER=y
CONFIG_BT_BNEP_PROTO_FILTER=y
-CONFIG_BT_HIDP=y
-CONFIG_BT_HCIUART=y
+CONFIG_BT_HIDP=m
+CONFIG_BT_HCIUART=m
CONFIG_BT_HCIUART_H4=y
CONFIG_BT_HCIUART_BCSP=y
CONFIG_BT_HCIUART_LL=y
-CONFIG_BT_HCIBCM203X=y
-CONFIG_BT_HCIBPA10X=y
-CONFIG_CFG80211=y
-CONFIG_MAC80211=y
+CONFIG_BT_HCIBCM203X=m
+CONFIG_BT_HCIBPA10X=m
+CONFIG_CFG80211=m
+CONFIG_MAC80211=m
CONFIG_MAC80211_RC_PID=y
CONFIG_MAC80211_RC_DEFAULT_PID=y
CONFIG_MAC80211_LEDS=y
@@ -137,9 +137,11 @@ CONFIG_SMSC_PHY=y
CONFIG_NET_ETHERNET=y
CONFIG_SMC91X=y
CONFIG_SMSC911X=y
-CONFIG_LIBERTAS=y
-CONFIG_LIBERTAS_USB=y
-CONFIG_LIBERTAS_SDIO=y
+CONFIG_KS8851=y
+CONFIG_KS8851_MLL=y
+CONFIG_LIBERTAS=m
+CONFIG_LIBERTAS_USB=m
+CONFIG_LIBERTAS_SDIO=m
CONFIG_LIBERTAS_DEBUG=y
CONFIG_USB_USBNET=y
CONFIG_USB_ALI_M5632=y
@@ -201,8 +203,8 @@ CONFIG_FONTS=y
CONFIG_FONT_8x8=y
CONFIG_FONT_8x16=y
CONFIG_LOGO=y
-CONFIG_SOUND=y
-CONFIG_SND=y
+CONFIG_SOUND=m
+CONFIG_SND=m
CONFIG_SND_MIXER_OSS=y
CONFIG_SND_PCM_OSS=y
CONFIG_SND_VERBOSE_PRINTK=y
@@ -218,9 +220,9 @@ CONFIG_USB_DEVICEFS=y
CONFIG_USB_SUSPEND=y
# CONFIG_USB_OTG_WHITELIST is not set
CONFIG_USB_MON=y
-CONFIG_USB_MUSB_HDRC=y
-CONFIG_USB_MUSB_OTG=y
-CONFIG_USB_GADGET_MUSB_HDRC=y
+# CONFIG_USB_MUSB_HDRC is not set
+# CONFIG_USB_MUSB_OTG is not set
+# CONFIG_USB_GADGET_MUSB_HDRC is not set
CONFIG_USB_MUSB_DEBUG=y
CONFIG_USB_WDM=y
CONFIG_USB_STORAGE=y
@@ -276,12 +278,11 @@ CONFIG_DEBUG_KERNEL=y
CONFIG_SCHEDSTATS=y
CONFIG_TIMER_STATS=y
CONFIG_PROVE_LOCKING=y
-CONFIG_LOCK_STAT=y
+# CONFIG_LOCK_STAT is not set
CONFIG_DEBUG_SPINLOCK_SLEEP=y
# CONFIG_DEBUG_BUGVERBOSE is not set
CONFIG_DEBUG_INFO=y
# CONFIG_RCU_CPU_STALL_DETECTOR is not set
-CONFIG_DEBUG_LL=y
CONFIG_SECURITY=y
CONFIG_CRYPTO_MICHAEL_MIC=y
# CONFIG_CRYPTO_ANSI_CPRNG is not set
diff --git a/arch/arm/configs/omap_4430sdp_defconfig b/arch/arm/configs/omap_4430sdp_defconfig
deleted file mode 100644
index 14c1e18c648f..000000000000
--- a/arch/arm/configs/omap_4430sdp_defconfig
+++ /dev/null
@@ -1,125 +0,0 @@
-CONFIG_EXPERIMENTAL=y
-CONFIG_SYSVIPC=y
-CONFIG_BSD_PROCESS_ACCT=y
-CONFIG_LOG_BUF_SHIFT=14
-CONFIG_BLK_DEV_INITRD=y
-CONFIG_EMBEDDED=y
-# CONFIG_SYSCTL_SYSCALL is not set
-# CONFIG_ELF_CORE is not set
-CONFIG_MODULES=y
-CONFIG_MODULE_UNLOAD=y
-CONFIG_MODVERSIONS=y
-CONFIG_MODULE_SRCVERSION_ALL=y
-# CONFIG_BLK_DEV_BSG is not set
-CONFIG_ARCH_OMAP=y
-CONFIG_ARCH_OMAP4=y
-# CONFIG_ARCH_OMAP2PLUS_TYPICAL is not set
-# CONFIG_ARCH_OMAP2 is not set
-# CONFIG_ARCH_OMAP3 is not set
-# CONFIG_OMAP_MUX is not set
-CONFIG_OMAP_32K_TIMER=y
-CONFIG_OMAP_DM_TIMER=y
-CONFIG_MACH_OMAP_4430SDP=y
-# CONFIG_ARM_THUMB is not set
-CONFIG_PL310_ERRATA_588369=y
-CONFIG_SMP=y
-CONFIG_NR_CPUS=2
-# CONFIG_LOCAL_TIMERS is not set
-CONFIG_PREEMPT=y
-CONFIG_AEABI=y
-CONFIG_ZBOOT_ROM_TEXT=0x0
-CONFIG_ZBOOT_ROM_BSS=0x0
-CONFIG_CMDLINE="root=/dev/ram0 rw mem=128M console=ttyS2,115200n8 initrd=0x81600000,20M ramdisk_size=20480"
-CONFIG_VFP=y
-CONFIG_NEON=y
-CONFIG_BINFMT_MISC=y
-CONFIG_NET=y
-CONFIG_PACKET=y
-CONFIG_INET=y
-CONFIG_IP_PNP=y
-CONFIG_IP_PNP_DHCP=y
-CONFIG_IP_PNP_BOOTP=y
-CONFIG_IP_PNP_RARP=y
-# CONFIG_IPV6 is not set
-# CONFIG_WIRELESS is not set
-CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
-# CONFIG_FW_LOADER is not set
-CONFIG_BLK_DEV_LOOP=y
-CONFIG_BLK_DEV_RAM=y
-CONFIG_BLK_DEV_RAM_SIZE=16384
-# CONFIG_MISC_DEVICES is not set
-CONFIG_NETDEVICES=y
-CONFIG_NET_ETHERNET=y
-CONFIG_KS8851=y
-# CONFIG_NETDEV_1000 is not set
-# CONFIG_NETDEV_10000 is not set
-# CONFIG_WLAN is not set
-# CONFIG_INPUT_MOUSEDEV is not set
-CONFIG_INPUT_EVDEV=y
-# CONFIG_INPUT_KEYBOARD is not set
-# CONFIG_INPUT_MOUSE is not set
-# CONFIG_SERIO is not set
-CONFIG_SERIAL_8250=y
-CONFIG_SERIAL_8250_CONSOLE=y
-CONFIG_SERIAL_8250_NR_UARTS=32
-CONFIG_SERIAL_8250_EXTENDED=y
-CONFIG_SERIAL_8250_MANY_PORTS=y
-CONFIG_SERIAL_8250_SHARE_IRQ=y
-CONFIG_SERIAL_8250_DETECT_IRQ=y
-CONFIG_SERIAL_8250_RSA=y
-# CONFIG_LEGACY_PTYS is not set
-CONFIG_HW_RANDOM=y
-CONFIG_I2C=y
-CONFIG_I2C_CHARDEV=y
-CONFIG_I2C_OMAP=y
-CONFIG_SPI=y
-CONFIG_SPI_OMAP24XX=y
-# CONFIG_HWMON is not set
-CONFIG_WATCHDOG=y
-CONFIG_OMAP_WATCHDOG=y
-CONFIG_TWL4030_CORE=y
-CONFIG_REGULATOR=y
-CONFIG_REGULATOR_TWL4030=y
-# CONFIG_VGA_CONSOLE is not set
-# CONFIG_HID_SUPPORT is not set
-# CONFIG_USB_SUPPORT is not set
-CONFIG_MMC=y
-CONFIG_MMC_OMAP_HS=y
-CONFIG_RTC_CLASS=y
-CONFIG_RTC_DRV_TWL4030=y
-CONFIG_EXT2_FS=y
-CONFIG_EXT3_FS=y
-# CONFIG_EXT3_FS_XATTR is not set
-CONFIG_INOTIFY=y
-CONFIG_QUOTA=y
-CONFIG_QFMT_V2=y
-CONFIG_MSDOS_FS=y
-CONFIG_VFAT_FS=y
-CONFIG_TMPFS=y
-CONFIG_NFS_FS=y
-CONFIG_NFS_V3=y
-CONFIG_NFS_V3_ACL=y
-CONFIG_NFS_V4=y
-CONFIG_ROOT_NFS=y
-CONFIG_PARTITION_ADVANCED=y
-CONFIG_NLS_CODEPAGE_437=y
-CONFIG_NLS_ISO8859_1=y
-# CONFIG_ENABLE_WARN_DEPRECATED is not set
-# CONFIG_ENABLE_MUST_CHECK is not set
-CONFIG_MAGIC_SYSRQ=y
-CONFIG_DEBUG_KERNEL=y
-# CONFIG_DETECT_SOFTLOCKUP is not set
-CONFIG_DETECT_HUNG_TASK=y
-# CONFIG_SCHED_DEBUG is not set
-# CONFIG_DEBUG_PREEMPT is not set
-# CONFIG_DEBUG_BUGVERBOSE is not set
-CONFIG_DEBUG_INFO=y
-# CONFIG_RCU_CPU_STALL_DETECTOR is not set
-# CONFIG_FTRACE is not set
-# CONFIG_ARM_UNWIND is not set
-CONFIG_CRYPTO_ECB=m
-CONFIG_CRYPTO_PCBC=m
-# CONFIG_CRYPTO_ANSI_CPRNG is not set
-CONFIG_CRC_CCITT=y
-CONFIG_CRC_T10DIF=y
-CONFIG_LIBCRC32C=y
diff --git a/arch/arm/configs/omap_generic_2420_defconfig b/arch/arm/configs/omap_generic_2420_defconfig
deleted file mode 100644
index ac08e51180dd..000000000000
--- a/arch/arm/configs/omap_generic_2420_defconfig
+++ /dev/null
@@ -1,37 +0,0 @@
-CONFIG_EXPERIMENTAL=y
-CONFIG_SYSVIPC=y
-CONFIG_LOG_BUF_SHIFT=14
-CONFIG_BLK_DEV_INITRD=y
-CONFIG_MODULES=y
-CONFIG_MODULE_UNLOAD=y
-# CONFIG_BLK_DEV_BSG is not set
-CONFIG_ARCH_OMAP=y
-CONFIG_ARCH_OMAP2=y
-# CONFIG_OMAP_MUX is not set
-CONFIG_MACH_OMAP_GENERIC=y
-CONFIG_ARCH_OMAP2420=y
-CONFIG_LEDS=y
-CONFIG_ZBOOT_ROM_TEXT=0x10C08000
-CONFIG_ZBOOT_ROM_BSS=0x10200000
-CONFIG_FPE_NWFPE=y
-CONFIG_BLK_DEV_RAM=y
-CONFIG_INPUT_EVDEV=y
-# CONFIG_INPUT_KEYBOARD is not set
-# CONFIG_INPUT_MOUSE is not set
-CONFIG_SERIAL_8250=y
-CONFIG_SERIAL_8250_CONSOLE=y
-# CONFIG_LEGACY_PTYS is not set
-CONFIG_WATCHDOG=y
-CONFIG_WATCHDOG_NOWAYOUT=y
-CONFIG_VIDEO_OUTPUT_CONTROL=m
-# CONFIG_VGA_CONSOLE is not set
-CONFIG_EXT2_FS=y
-CONFIG_EXT2_FS_XATTR=y
-CONFIG_INOTIFY=y
-CONFIG_ROMFS_FS=y
-CONFIG_DEBUG_KERNEL=y
-CONFIG_DEBUG_INFO=y
-CONFIG_DEBUG_USER=y
-CONFIG_DEBUG_ERRORS=y
-CONFIG_DEBUG_LL=y
-CONFIG_CRC_CCITT=y
diff --git a/arch/arm/configs/onearm_defconfig b/arch/arm/configs/onearm_defconfig
deleted file mode 100644
index 1579857aeeaa..000000000000
--- a/arch/arm/configs/onearm_defconfig
+++ /dev/null
@@ -1,80 +0,0 @@
-CONFIG_EXPERIMENTAL=y
-# CONFIG_SWAP is not set
-CONFIG_SYSVIPC=y
-CONFIG_LOG_BUF_SHIFT=14
-CONFIG_BLK_DEV_INITRD=y
-CONFIG_EMBEDDED=y
-CONFIG_SLAB=y
-CONFIG_MODULES=y
-CONFIG_MODULE_UNLOAD=y
-# CONFIG_IOSCHED_DEADLINE is not set
-# CONFIG_IOSCHED_CFQ is not set
-CONFIG_ARCH_AT91=y
-CONFIG_MACH_ONEARM=y
-CONFIG_AT91_PROGRAMMABLE_CLOCKS=y
-# CONFIG_ARM_THUMB is not set
-CONFIG_PCCARD=y
-CONFIG_AT91_CF=y
-CONFIG_LEDS=y
-CONFIG_ZBOOT_ROM_TEXT=0x0
-CONFIG_ZBOOT_ROM_BSS=0x0
-CONFIG_CMDLINE="console=ttyS0,115200 root=/dev/nfs ip=bootp mem=64M"
-CONFIG_FPE_NWFPE=y
-CONFIG_NET=y
-CONFIG_PACKET=y
-CONFIG_UNIX=y
-CONFIG_INET=y
-CONFIG_IP_PNP=y
-CONFIG_IP_PNP_BOOTP=y
-CONFIG_IPV6=y
-# CONFIG_INET6_XFRM_MODE_TRANSPORT is not set
-# CONFIG_INET6_XFRM_MODE_TUNNEL is not set
-# CONFIG_INET6_XFRM_MODE_BEET is not set
-# CONFIG_IPV6_SIT is not set
-CONFIG_MTD=y
-CONFIG_MTD_PARTITIONS=y
-CONFIG_MTD_CMDLINE_PARTS=y
-CONFIG_MTD_CHAR=y
-CONFIG_MTD_BLOCK=y
-CONFIG_MTD_CFI=y
-CONFIG_MTD_JEDECPROBE=y
-CONFIG_MTD_CFI_AMDSTD=y
-CONFIG_MTD_PHYSMAP=y
-CONFIG_BLK_DEV_NBD=y
-CONFIG_BLK_DEV_RAM=y
-CONFIG_BLK_DEV_RAM_SIZE=8192
-CONFIG_NETDEVICES=y
-CONFIG_NET_ETHERNET=y
-CONFIG_ARM_AT91_ETHER=y
-# CONFIG_INPUT_MOUSEDEV_PSAUX is not set
-# CONFIG_INPUT_KEYBOARD is not set
-# CONFIG_INPUT_MOUSE is not set
-# CONFIG_SERIO is not set
-# CONFIG_VT is not set
-CONFIG_SERIAL_ATMEL=y
-CONFIG_SERIAL_ATMEL_CONSOLE=y
-# CONFIG_HW_RANDOM is not set
-CONFIG_I2C=y
-CONFIG_I2C_CHARDEV=y
-CONFIG_WATCHDOG=y
-CONFIG_WATCHDOG_NOWAYOUT=y
-CONFIG_AT91RM9200_WATCHDOG=y
-# CONFIG_USB_HID is not set
-CONFIG_USB=y
-CONFIG_USB_DEBUG=y
-CONFIG_USB_DEVICEFS=y
-CONFIG_USB_MON=y
-CONFIG_USB_OHCI_HCD=y
-CONFIG_USB_GADGET=y
-CONFIG_MMC=y
-CONFIG_EXT2_FS=y
-CONFIG_INOTIFY=y
-CONFIG_TMPFS=y
-CONFIG_CRAMFS=y
-CONFIG_NFS_FS=y
-CONFIG_NFS_V3=y
-CONFIG_NFS_V3_ACL=y
-CONFIG_ROOT_NFS=y
-CONFIG_DEBUG_KERNEL=y
-CONFIG_DEBUG_USER=y
-CONFIG_DEBUG_LL=y
diff --git a/arch/arm/configs/pcontrol_g20_defconfig b/arch/arm/configs/pcontrol_g20_defconfig
new file mode 100644
index 000000000000..b42ee62c4d77
--- /dev/null
+++ b/arch/arm/configs/pcontrol_g20_defconfig
@@ -0,0 +1,175 @@
+CONFIG_EXPERIMENTAL=y
+CONFIG_CROSS_COMPILE="/opt/arm-2010q1/bin/arm-none-linux-gnueabi-"
+# CONFIG_LOCALVERSION_AUTO is not set
+# CONFIG_SWAP is not set
+CONFIG_SYSVIPC=y
+CONFIG_POSIX_MQUEUE=y
+CONFIG_TREE_PREEMPT_RCU=y
+CONFIG_IKCONFIG=y
+CONFIG_IKCONFIG_PROC=y
+CONFIG_LOG_BUF_SHIFT=14
+CONFIG_NAMESPACES=y
+CONFIG_BLK_DEV_INITRD=y
+CONFIG_EMBEDDED=y
+# CONFIG_SYSCTL_SYSCALL is not set
+# CONFIG_KALLSYMS is not set
+# CONFIG_VM_EVENT_COUNTERS is not set
+# CONFIG_COMPAT_BRK is not set
+CONFIG_SLAB=y
+CONFIG_MODULES=y
+CONFIG_MODULE_UNLOAD=y
+# CONFIG_LBDAF is not set
+# CONFIG_BLK_DEV_BSG is not set
+CONFIG_DEFAULT_DEADLINE=y
+CONFIG_ARCH_AT91=y
+CONFIG_ARCH_AT91SAM9G20=y
+CONFIG_MACH_PCONTROL_G20=y
+CONFIG_AT91_PROGRAMMABLE_CLOCKS=y
+CONFIG_NO_HZ=y
+CONFIG_HIGH_RES_TIMERS=y
+CONFIG_PREEMPT=y
+CONFIG_AEABI=y
+# CONFIG_OABI_COMPAT is not set
+CONFIG_ZBOOT_ROM_TEXT=0x0
+CONFIG_ZBOOT_ROM_BSS=0x0
+CONFIG_CMDLINE="console=ttyS0,115200 mem=128M mtdparts=atmel_nand:128k(bootstrap)ro,256k(uboot)ro,128k(env1)ro,128k(env2)ro,2M(linux),-(root) root=/dev/mmcblk0p1 rootwait rw"
+CONFIG_VFP=y
+CONFIG_BINFMT_MISC=y
+CONFIG_NET=y
+CONFIG_PACKET=y
+CONFIG_UNIX=y
+CONFIG_INET=y
+# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
+# CONFIG_INET_XFRM_MODE_TUNNEL is not set
+# CONFIG_INET_XFRM_MODE_BEET is not set
+# CONFIG_INET_LRO is not set
+# CONFIG_IPV6 is not set
+CONFIG_VLAN_8021Q=y
+# CONFIG_WIRELESS is not set
+CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
+# CONFIG_FW_LOADER is not set
+CONFIG_MTD=y
+CONFIG_MTD_PARTITIONS=y
+CONFIG_MTD_CMDLINE_PARTS=y
+CONFIG_MTD_CHAR=y
+CONFIG_MTD_BLOCK=y
+CONFIG_MTD_COMPLEX_MAPPINGS=y
+CONFIG_MTD_PHRAM=m
+CONFIG_MTD_NAND=y
+CONFIG_MTD_NAND_ATMEL=y
+CONFIG_BLK_DEV_LOOP=y
+CONFIG_BLK_DEV_RAM=y
+CONFIG_BLK_DEV_RAM_SIZE=8192
+CONFIG_ATMEL_TCLIB=y
+CONFIG_EEPROM_AT24=m
+CONFIG_SCSI=m
+# CONFIG_SCSI_PROC_FS is not set
+CONFIG_BLK_DEV_SD=m
+CONFIG_SCSI_MULTI_LUN=y
+# CONFIG_SCSI_LOWLEVEL is not set
+CONFIG_NETDEVICES=y
+CONFIG_MACVLAN=m
+CONFIG_TUN=m
+CONFIG_SMSC_PHY=m
+CONFIG_BROADCOM_PHY=m
+CONFIG_NET_ETHERNET=y
+CONFIG_MII=y
+CONFIG_MACB=y
+CONFIG_SMSC911X=m
+# CONFIG_NETDEV_1000 is not set
+# CONFIG_NETDEV_10000 is not set
+# CONFIG_WLAN is not set
+CONFIG_PPP=m
+CONFIG_PPP_ASYNC=m
+CONFIG_PPP_DEFLATE=m
+CONFIG_PPP_MPPE=m
+CONFIG_INPUT_POLLDEV=y
+CONFIG_INPUT_SPARSEKMAP=y
+# CONFIG_INPUT_MOUSEDEV is not set
+CONFIG_INPUT_EVDEV=m
+CONFIG_INPUT_EVBUG=m
+# CONFIG_KEYBOARD_ATKBD is not set
+CONFIG_KEYBOARD_GPIO=m
+CONFIG_KEYBOARD_MATRIX=m
+# CONFIG_INPUT_MOUSE is not set
+CONFIG_INPUT_TOUCHSCREEN=y
+CONFIG_INPUT_MISC=y
+CONFIG_INPUT_UINPUT=m
+CONFIG_INPUT_GPIO_ROTARY_ENCODER=m
+# CONFIG_SERIO is not set
+# CONFIG_DEVKMEM is not set
+CONFIG_SERIAL_ATMEL=y
+CONFIG_SERIAL_ATMEL_CONSOLE=y
+CONFIG_SERIAL_MAX3100=m
+# CONFIG_LEGACY_PTYS is not set
+# CONFIG_HW_RANDOM is not set
+CONFIG_R3964=m
+CONFIG_I2C=m
+CONFIG_I2C_CHARDEV=m
+# CONFIG_I2C_HELPER_AUTO is not set
+CONFIG_I2C_GPIO=m
+CONFIG_SPI=y
+CONFIG_SPI_ATMEL=m
+CONFIG_SPI_SPIDEV=m
+CONFIG_GPIO_SYSFS=y
+CONFIG_W1=m
+CONFIG_W1_MASTER_GPIO=m
+CONFIG_W1_SLAVE_DS2431=m
+# CONFIG_HWMON is not set
+CONFIG_WATCHDOG=y
+CONFIG_AT91SAM9X_WATCHDOG=y
+# CONFIG_MFD_SUPPORT is not set
+# CONFIG_HID_SUPPORT is not set
+CONFIG_USB=y
+# CONFIG_USB_DEVICE_CLASS is not set
+CONFIG_USB_OHCI_HCD=y
+CONFIG_USB_STORAGE=m
+CONFIG_USB_LIBUSUAL=y
+CONFIG_USB_SERIAL=m
+CONFIG_USB_SERIAL_GENERIC=y
+CONFIG_USB_SERIAL_FTDI_SIO=m
+CONFIG_USB_SERIAL_PL2303=m
+CONFIG_USB_GADGET=y
+CONFIG_USB_ZERO=m
+CONFIG_USB_ETH=m
+CONFIG_USB_FILE_STORAGE=m
+CONFIG_USB_G_SERIAL=m
+CONFIG_USB_G_HID=m
+CONFIG_MMC=y
+CONFIG_MMC_UNSAFE_RESUME=y
+CONFIG_MMC_ATMELMCI=y
+CONFIG_NEW_LEDS=y
+CONFIG_LEDS_CLASS=y
+CONFIG_LEDS_GPIO=y
+CONFIG_LEDS_TRIGGERS=y
+CONFIG_LEDS_TRIGGER_TIMER=y
+CONFIG_LEDS_TRIGGER_HEARTBEAT=y
+CONFIG_LEDS_TRIGGER_DEFAULT_ON=y
+CONFIG_RTC_CLASS=y
+CONFIG_RTC_DRV_AT91SAM9=y
+CONFIG_AUXDISPLAY=y
+CONFIG_UIO=y
+CONFIG_UIO_PDRV=y
+CONFIG_STAGING=y
+# CONFIG_STAGING_EXCLUDE_BUILD is not set
+CONFIG_IIO=y
+CONFIG_EXT2_FS=y
+CONFIG_EXT3_FS=y
+# CONFIG_EXT3_FS_XATTR is not set
+CONFIG_VFAT_FS=y
+CONFIG_TMPFS=y
+CONFIG_JFFS2_FS=y
+CONFIG_NFS_FS=y
+CONFIG_NFS_V3=y
+CONFIG_NFS_V4=y
+CONFIG_PARTITION_ADVANCED=y
+CONFIG_NLS_CODEPAGE_437=y
+CONFIG_NLS_CODEPAGE_850=y
+CONFIG_NLS_ISO8859_1=y
+CONFIG_NLS_ISO8859_15=y
+CONFIG_NLS_UTF8=y
+# CONFIG_RCU_CPU_STALL_DETECTOR is not set
+CONFIG_CRYPTO=y
+CONFIG_CRYPTO_ANSI_CPRNG=y
+# CONFIG_CRYPTO_HW is not set
+CONFIG_CRC_CCITT=y
diff --git a/arch/arm/configs/picotux200_defconfig b/arch/arm/configs/picotux200_defconfig
deleted file mode 100644
index 4c9afa478d57..000000000000
--- a/arch/arm/configs/picotux200_defconfig
+++ /dev/null
@@ -1,242 +0,0 @@
-CONFIG_EXPERIMENTAL=y
-CONFIG_SYSVIPC=y
-CONFIG_IKCONFIG=m
-CONFIG_IKCONFIG_PROC=y
-CONFIG_LOG_BUF_SHIFT=14
-CONFIG_EMBEDDED=y
-# CONFIG_KALLSYMS is not set
-CONFIG_SLAB=y
-CONFIG_MODULES=y
-CONFIG_MODULE_UNLOAD=y
-# CONFIG_IOSCHED_DEADLINE is not set
-# CONFIG_IOSCHED_CFQ is not set
-CONFIG_ARCH_AT91=y
-CONFIG_MACH_PICOTUX2XX=y
-CONFIG_AT91_PROGRAMMABLE_CLOCKS=y
-CONFIG_AEABI=y
-CONFIG_ZBOOT_ROM_TEXT=0x0
-CONFIG_ZBOOT_ROM_BSS=0x0
-CONFIG_KEXEC=y
-CONFIG_FPE_NWFPE=y
-CONFIG_BINFMT_MISC=m
-CONFIG_NET=y
-CONFIG_PACKET=m
-CONFIG_UNIX=y
-CONFIG_XFRM_USER=m
-CONFIG_INET=y
-CONFIG_IP_PNP=y
-CONFIG_IP_PNP_BOOTP=y
-CONFIG_NET_IPIP=m
-CONFIG_NET_IPGRE=m
-CONFIG_INET_AH=m
-CONFIG_INET_ESP=m
-CONFIG_INET_IPCOMP=m
-CONFIG_INET_XFRM_MODE_TRANSPORT=m
-CONFIG_INET_XFRM_MODE_TUNNEL=m
-CONFIG_INET_XFRM_MODE_BEET=m
-CONFIG_INET_DIAG=m
-CONFIG_IPV6_PRIVACY=y
-CONFIG_IPV6_ROUTER_PREF=y
-CONFIG_IPV6_ROUTE_INFO=y
-CONFIG_INET6_AH=m
-CONFIG_INET6_ESP=m
-CONFIG_INET6_IPCOMP=m
-CONFIG_IPV6_MIP6=m
-CONFIG_INET6_XFRM_MODE_ROUTEOPTIMIZATION=m
-CONFIG_IPV6_TUNNEL=m
-CONFIG_BRIDGE=m
-CONFIG_VLAN_8021Q=m
-CONFIG_BT=m
-CONFIG_BT_L2CAP=m
-CONFIG_BT_SCO=m
-CONFIG_BT_RFCOMM=m
-CONFIG_BT_RFCOMM_TTY=y
-CONFIG_BT_BNEP=m
-CONFIG_BT_BNEP_MC_FILTER=y
-CONFIG_BT_BNEP_PROTO_FILTER=y
-CONFIG_BT_HIDP=m
-CONFIG_FW_LOADER=m
-CONFIG_MTD=y
-CONFIG_MTD_PARTITIONS=y
-CONFIG_MTD_CMDLINE_PARTS=y
-CONFIG_MTD_CHAR=y
-CONFIG_MTD_BLOCK=y
-CONFIG_MTD_CFI=y
-CONFIG_MTD_CFI_AMDSTD=y
-CONFIG_MTD_PHYSMAP=y
-CONFIG_BLK_DEV_LOOP=m
-CONFIG_EEPROM_LEGACY=m
-CONFIG_SCSI=m
-CONFIG_BLK_DEV_SD=m
-CONFIG_BLK_DEV_SR=m
-CONFIG_BLK_DEV_SR_VENDOR=y
-CONFIG_CHR_DEV_SG=m
-CONFIG_NETDEVICES=y
-CONFIG_TUN=m
-CONFIG_NET_ETHERNET=y
-CONFIG_ARM_AT91_ETHER=y
-CONFIG_USB_CATC=m
-CONFIG_USB_KAWETH=m
-CONFIG_USB_PEGASUS=m
-CONFIG_USB_RTL8150=m
-CONFIG_USB_USBNET=m
-CONFIG_USB_NET_DM9601=m
-CONFIG_USB_NET_GL620A=m
-CONFIG_USB_NET_PLUSB=m
-CONFIG_USB_NET_MCS7830=m
-CONFIG_USB_NET_RNDIS_HOST=m
-CONFIG_USB_ALI_M5632=y
-CONFIG_USB_AN2720=y
-CONFIG_USB_EPSON2888=y
-CONFIG_USB_KC2190=y
-CONFIG_PPP=m
-CONFIG_PPP_FILTER=y
-CONFIG_PPP_ASYNC=m
-CONFIG_PPP_DEFLATE=m
-CONFIG_PPP_BSDCOMP=m
-CONFIG_PPP_MPPE=m
-CONFIG_PPPOE=m
-CONFIG_SLIP=m
-CONFIG_SLIP_COMPRESSED=y
-CONFIG_SLIP_SMART=y
-CONFIG_SLIP_MODE_SLIP6=y
-# CONFIG_INPUT_MOUSEDEV is not set
-# CONFIG_INPUT_KEYBOARD is not set
-# CONFIG_INPUT_MOUSE is not set
-# CONFIG_SERIO is not set
-# CONFIG_VT is not set
-CONFIG_SERIAL_ATMEL=y
-CONFIG_SERIAL_ATMEL_CONSOLE=y
-# CONFIG_LEGACY_PTYS is not set
-CONFIG_I2C=m
-CONFIG_I2C_CHARDEV=m
-CONFIG_I2C_GPIO=m
-CONFIG_HWMON=m
-CONFIG_SENSORS_ADM1021=m
-CONFIG_SENSORS_ADM1025=m
-CONFIG_SENSORS_ADM1026=m
-CONFIG_SENSORS_ADM1029=m
-CONFIG_SENSORS_ADM1031=m
-CONFIG_SENSORS_ADM9240=m
-CONFIG_SENSORS_DS1621=m
-CONFIG_SENSORS_GL518SM=m
-CONFIG_SENSORS_GL520SM=m
-CONFIG_SENSORS_IT87=m
-CONFIG_SENSORS_LM63=m
-CONFIG_SENSORS_LM75=m
-CONFIG_SENSORS_LM77=m
-CONFIG_SENSORS_LM78=m
-CONFIG_SENSORS_LM80=m
-CONFIG_SENSORS_LM83=m
-CONFIG_SENSORS_LM85=m
-CONFIG_SENSORS_LM87=m
-CONFIG_SENSORS_LM90=m
-CONFIG_SENSORS_LM92=m
-CONFIG_SENSORS_MAX1619=m
-CONFIG_SENSORS_PCF8591=m
-CONFIG_SENSORS_SMSC47B397=m
-CONFIG_SENSORS_W83781D=m
-CONFIG_SENSORS_W83791D=m
-CONFIG_SENSORS_W83792D=m
-CONFIG_SENSORS_W83793=m
-CONFIG_SENSORS_W83L785TS=m
-CONFIG_WATCHDOG=y
-CONFIG_WATCHDOG_NOWAYOUT=y
-CONFIG_AT91RM9200_WATCHDOG=m
-CONFIG_HID=m
-CONFIG_USB=m
-CONFIG_USB_DEVICEFS=y
-CONFIG_USB_OHCI_HCD=m
-CONFIG_USB_ACM=m
-CONFIG_USB_PRINTER=m
-CONFIG_USB_STORAGE=m
-CONFIG_USB_SERIAL=m
-CONFIG_USB_SERIAL_GENERIC=y
-CONFIG_USB_SERIAL_PL2303=m
-CONFIG_MMC=m
-CONFIG_MMC_AT91=m
-CONFIG_RTC_CLASS=m
-CONFIG_RTC_DRV_AT91RM9200=m
-CONFIG_EXT2_FS=m
-CONFIG_EXT3_FS=m
-# CONFIG_EXT3_FS_XATTR is not set
-CONFIG_INOTIFY=y
-CONFIG_ISO9660_FS=m
-CONFIG_JOLIET=y
-CONFIG_UDF_FS=m
-CONFIG_MSDOS_FS=m
-CONFIG_VFAT_FS=m
-CONFIG_NTFS_FS=m
-CONFIG_TMPFS=y
-CONFIG_JFFS2_FS=y
-CONFIG_JFFS2_SUMMARY=y
-CONFIG_JFFS2_COMPRESSION_OPTIONS=y
-CONFIG_NFS_FS=m
-CONFIG_SMB_FS=m
-CONFIG_CIFS=m
-CONFIG_PARTITION_ADVANCED=y
-CONFIG_AMIGA_PARTITION=y
-CONFIG_NLS_DEFAULT="utf-8"
-CONFIG_NLS_CODEPAGE_437=m
-CONFIG_NLS_CODEPAGE_737=m
-CONFIG_NLS_CODEPAGE_775=m
-CONFIG_NLS_CODEPAGE_850=m
-CONFIG_NLS_CODEPAGE_852=m
-CONFIG_NLS_CODEPAGE_855=m
-CONFIG_NLS_CODEPAGE_857=m
-CONFIG_NLS_CODEPAGE_860=m
-CONFIG_NLS_CODEPAGE_861=m
-CONFIG_NLS_CODEPAGE_862=m
-CONFIG_NLS_CODEPAGE_863=m
-CONFIG_NLS_CODEPAGE_864=m
-CONFIG_NLS_CODEPAGE_865=m
-CONFIG_NLS_CODEPAGE_866=m
-CONFIG_NLS_CODEPAGE_869=m
-CONFIG_NLS_CODEPAGE_936=m
-CONFIG_NLS_CODEPAGE_950=m
-CONFIG_NLS_CODEPAGE_932=m
-CONFIG_NLS_CODEPAGE_949=m
-CONFIG_NLS_CODEPAGE_874=m
-CONFIG_NLS_ISO8859_8=m
-CONFIG_NLS_CODEPAGE_1250=m
-CONFIG_NLS_CODEPAGE_1251=m
-CONFIG_NLS_ASCII=m
-CONFIG_NLS_ISO8859_1=m
-CONFIG_NLS_ISO8859_2=m
-CONFIG_NLS_ISO8859_3=m
-CONFIG_NLS_ISO8859_4=m
-CONFIG_NLS_ISO8859_5=m
-CONFIG_NLS_ISO8859_6=m
-CONFIG_NLS_ISO8859_7=m
-CONFIG_NLS_ISO8859_9=m
-CONFIG_NLS_ISO8859_13=m
-CONFIG_NLS_ISO8859_14=m
-CONFIG_NLS_ISO8859_15=m
-CONFIG_NLS_KOI8_R=m
-CONFIG_NLS_KOI8_U=m
-CONFIG_NLS_UTF8=m
-CONFIG_DEBUG_KERNEL=y
-# CONFIG_DEBUG_BUGVERBOSE is not set
-CONFIG_DEBUG_LL=y
-CONFIG_CRYPTO_NULL=m
-CONFIG_CRYPTO_TEST=m
-CONFIG_CRYPTO_LRW=m
-CONFIG_CRYPTO_PCBC=m
-CONFIG_CRYPTO_XCBC=m
-CONFIG_CRYPTO_MD4=m
-CONFIG_CRYPTO_MICHAEL_MIC=m
-CONFIG_CRYPTO_SHA256=m
-CONFIG_CRYPTO_SHA512=m
-CONFIG_CRYPTO_TGR192=m
-CONFIG_CRYPTO_WP512=m
-CONFIG_CRYPTO_ANUBIS=m
-CONFIG_CRYPTO_BLOWFISH=m
-CONFIG_CRYPTO_CAMELLIA=m
-CONFIG_CRYPTO_CAST5=m
-CONFIG_CRYPTO_CAST6=m
-CONFIG_CRYPTO_FCRYPT=m
-CONFIG_CRYPTO_KHAZAD=m
-CONFIG_CRYPTO_SERPENT=m
-CONFIG_CRYPTO_TEA=m
-CONFIG_CRYPTO_TWOFISH=m
-CONFIG_LIBCRC32C=m
diff --git a/arch/arm/configs/yl9200_defconfig b/arch/arm/configs/yl9200_defconfig
deleted file mode 100644
index 30c537f61089..000000000000
--- a/arch/arm/configs/yl9200_defconfig
+++ /dev/null
@@ -1,137 +0,0 @@
-# CONFIG_SWAP is not set
-CONFIG_SYSVIPC=y
-CONFIG_LOG_BUF_SHIFT=14
-CONFIG_BLK_DEV_INITRD=y
-# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_MODULES=y
-CONFIG_MODULE_UNLOAD=y
-# CONFIG_IOSCHED_DEADLINE is not set
-# CONFIG_IOSCHED_CFQ is not set
-CONFIG_ARCH_AT91=y
-CONFIG_ARCH_AT91RM9200DK=y
-CONFIG_MACH_YL9200=y
-# CONFIG_ARM_THUMB is not set
-CONFIG_ZBOOT_ROM_TEXT=0x0
-CONFIG_ZBOOT_ROM_BSS=0x0
-CONFIG_CMDLINE="mem=32M console=ttyS0,115200 initrd=0x20410000,3145728 root=/dev/ram0 rw"
-CONFIG_FPE_NWFPE=y
-CONFIG_NET=y
-CONFIG_PACKET=y
-CONFIG_UNIX=y
-CONFIG_INET=y
-CONFIG_IP_PNP=y
-CONFIG_IP_PNP_DHCP=y
-# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
-# CONFIG_INET_XFRM_MODE_TUNNEL is not set
-# CONFIG_INET_XFRM_MODE_BEET is not set
-# CONFIG_INET_LRO is not set
-# CONFIG_INET_DIAG is not set
-# CONFIG_IPV6 is not set
-CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
-CONFIG_MTD=y
-CONFIG_MTD_CONCAT=y
-CONFIG_MTD_PARTITIONS=y
-CONFIG_MTD_CMDLINE_PARTS=y
-CONFIG_MTD_CHAR=y
-CONFIG_MTD_BLOCK=y
-CONFIG_MTD_CFI=y
-CONFIG_MTD_JEDECPROBE=y
-CONFIG_MTD_CFI_INTELEXT=y
-CONFIG_MTD_COMPLEX_MAPPINGS=y
-CONFIG_MTD_PHYSMAP=y
-CONFIG_MTD_PLATRAM=y
-CONFIG_MTD_NAND=y
-CONFIG_MTD_NAND_ATMEL=y
-CONFIG_MTD_NAND_PLATFORM=y
-CONFIG_BLK_DEV_LOOP=y
-CONFIG_BLK_DEV_RAM=y
-CONFIG_BLK_DEV_RAM_COUNT=3
-CONFIG_BLK_DEV_RAM_SIZE=8192
-# CONFIG_MISC_DEVICES is not set
-CONFIG_BLK_DEV_SD=y
-CONFIG_ATA=y
-CONFIG_NETDEVICES=y
-CONFIG_PHYLIB=y
-CONFIG_DAVICOM_PHY=y
-CONFIG_NET_ETHERNET=y
-CONFIG_ARM_AT91_ETHER=y
-# CONFIG_NETDEV_1000 is not set
-# CONFIG_NETDEV_10000 is not set
-# CONFIG_INPUT_MOUSEDEV_PSAUX is not set
-CONFIG_INPUT_MOUSEDEV_SCREEN_X=640
-CONFIG_INPUT_MOUSEDEV_SCREEN_Y=480
-CONFIG_INPUT_EVDEV=y
-# CONFIG_KEYBOARD_ATKBD is not set
-CONFIG_KEYBOARD_GPIO=y
-CONFIG_INPUT_TOUCHSCREEN=y
-CONFIG_TOUCHSCREEN_ADS7846=y
-# CONFIG_SERIO_SERPORT is not set
-CONFIG_SERIAL_ATMEL=y
-CONFIG_SERIAL_ATMEL_CONSOLE=y
-# CONFIG_HW_RANDOM is not set
-CONFIG_I2C=y
-CONFIG_SPI=y
-CONFIG_SPI_DEBUG=y
-CONFIG_SPI_ATMEL=y
-CONFIG_FB=y
-CONFIG_BACKLIGHT_LCD_SUPPORT=y
-CONFIG_LCD_CLASS_DEVICE=y
-CONFIG_BACKLIGHT_CLASS_DEVICE=y
-CONFIG_DISPLAY_SUPPORT=y
-# CONFIG_VGA_CONSOLE is not set
-CONFIG_LOGO=y
-# CONFIG_LOGO_LINUX_MONO is not set
-# CONFIG_LOGO_LINUX_VGA16 is not set
-CONFIG_USB=y
-CONFIG_USB_DEBUG=y
-CONFIG_USB_DEVICEFS=y
-# CONFIG_USB_DEVICE_CLASS is not set
-CONFIG_USB_MON=y
-CONFIG_USB_OHCI_HCD=y
-CONFIG_USB_STORAGE=y
-CONFIG_USB_GADGET=y
-CONFIG_USB_GADGET_M66592=y
-CONFIG_USB_FILE_STORAGE=m
-CONFIG_MMC=y
-CONFIG_MMC_DEBUG=y
-# CONFIG_MMC_BLOCK_BOUNCE is not set
-CONFIG_MMC_AT91=m
-CONFIG_NEW_LEDS=y
-CONFIG_LEDS_CLASS=y
-CONFIG_LEDS_GPIO=y
-CONFIG_LEDS_TRIGGERS=y
-CONFIG_LEDS_TRIGGER_TIMER=y
-CONFIG_LEDS_TRIGGER_HEARTBEAT=y
-CONFIG_RTC_CLASS=y
-CONFIG_RTC_DRV_AT91RM9200=y
-CONFIG_EXT2_FS=y
-CONFIG_EXT2_FS_XATTR=y
-CONFIG_EXT3_FS=y
-CONFIG_REISERFS_FS=y
-CONFIG_INOTIFY=y
-CONFIG_ISO9660_FS=y
-CONFIG_JOLIET=y
-CONFIG_ZISOFS=y
-CONFIG_UDF_FS=y
-CONFIG_MSDOS_FS=y
-CONFIG_VFAT_FS=y
-CONFIG_TMPFS=y
-CONFIG_JFFS2_FS=y
-CONFIG_JFFS2_FS_DEBUG=1
-CONFIG_JFFS2_COMPRESSION_OPTIONS=y
-CONFIG_JFFS2_RUBIN=y
-CONFIG_CRAMFS=y
-CONFIG_PARTITION_ADVANCED=y
-CONFIG_MAC_PARTITION=y
-CONFIG_NLS_CODEPAGE_437=y
-CONFIG_NLS_ISO8859_1=y
-# CONFIG_ENABLE_MUST_CHECK is not set
-CONFIG_DEBUG_FS=y
-CONFIG_DEBUG_KERNEL=y
-CONFIG_SLUB_DEBUG_ON=y
-CONFIG_DEBUG_KOBJECT=y
-CONFIG_DEBUG_INFO=y
-CONFIG_DEBUG_LIST=y
-CONFIG_DEBUG_USER=y
-CONFIG_DEBUG_ERRORS=y
-CONFIG_DEBUG_LL=y
diff --git a/arch/arm/include/asm/assembler.h b/arch/arm/include/asm/assembler.h
index 062b58c029ab..749bb6622404 100644
--- a/arch/arm/include/asm/assembler.h
+++ b/arch/arm/include/asm/assembler.h
@@ -238,7 +238,7 @@
@ Slightly optimised to avoid incrementing the pointer twice
usraccoff \instr, \reg, \ptr, \inc, 0, \cond, \abort
.if \rept == 2
- usraccoff \instr, \reg, \ptr, \inc, 4, \cond, \abort
+ usraccoff \instr, \reg, \ptr, \inc, \inc, \cond, \abort
.endif
add\cond \ptr, #\rept * \inc
diff --git a/arch/arm/include/asm/hardware/cache-l2x0.h b/arch/arm/include/asm/hardware/cache-l2x0.h
index 6bcba48800fe..cc42d5fdee17 100644
--- a/arch/arm/include/asm/hardware/cache-l2x0.h
+++ b/arch/arm/include/asm/hardware/cache-l2x0.h
@@ -21,9 +21,6 @@
#define __ASM_ARM_HARDWARE_L2X0_H
#define L2X0_CACHE_ID 0x000
-#define L2X0_CACHE_ID_PART_MASK (0xf << 6)
-#define L2X0_CACHE_ID_PART_L210 (1 << 6)
-#define L2X0_CACHE_ID_PART_L310 (3 << 6)
#define L2X0_CACHE_TYPE 0x004
#define L2X0_CTRL 0x100
#define L2X0_AUX_CTRL 0x104
@@ -53,6 +50,16 @@
#define L2X0_LINE_DATA 0xF10
#define L2X0_LINE_TAG 0xF30
#define L2X0_DEBUG_CTRL 0xF40
+#define L2X0_PREFETCH_CTRL 0xF60
+#define L2X0_POWER_CTRL 0xF80
+#define L2X0_DYNAMIC_CLK_GATING_EN (1 << 1)
+#define L2X0_STNDBY_MODE_EN (1 << 0)
+
+/* Registers shifts and masks */
+#define L2X0_CACHE_ID_PART_MASK (0xf << 6)
+#define L2X0_CACHE_ID_PART_L210 (1 << 6)
+#define L2X0_CACHE_ID_PART_L310 (3 << 6)
+#define L2X0_AUX_CTRL_WAY_SIZE_MASK (0x3 << 17)
#ifndef __ASSEMBLY__
extern void __init l2x0_init(void __iomem *base, __u32 aux_val, __u32 aux_mask);
diff --git a/arch/arm/include/asm/hardware/it8152.h b/arch/arm/include/asm/hardware/it8152.h
index 6700c7fc7ebd..21fa272301f8 100644
--- a/arch/arm/include/asm/hardware/it8152.h
+++ b/arch/arm/include/asm/hardware/it8152.h
@@ -75,7 +75,7 @@ extern unsigned long it8152_base_address;
IT8152_PD_IRQ(1) USB (USBR)
IT8152_PD_IRQ(0) Audio controller (ACR)
*/
-#define IT8152_IRQ(x) (IRQ_BOARD_END + (x))
+#define IT8152_IRQ(x) (IRQ_BOARD_START + (x))
/* IRQ-sources in 3 groups - local devices, LPC (serial), and external PCI */
#define IT8152_LD_IRQ_COUNT 9
diff --git a/arch/arm/include/asm/highmem.h b/arch/arm/include/asm/highmem.h
index 5aff58126602..1fc684e70ab6 100644
--- a/arch/arm/include/asm/highmem.h
+++ b/arch/arm/include/asm/highmem.h
@@ -35,9 +35,9 @@ extern void kunmap_high_l1_vipt(struct page *page, pte_t saved_pte);
#ifdef CONFIG_HIGHMEM
extern void *kmap(struct page *page);
extern void kunmap(struct page *page);
-extern void *kmap_atomic(struct page *page, enum km_type type);
-extern void kunmap_atomic_notypecheck(void *kvaddr, enum km_type type);
-extern void *kmap_atomic_pfn(unsigned long pfn, enum km_type type);
+extern void *__kmap_atomic(struct page *page);
+extern void __kunmap_atomic(void *kvaddr);
+extern void *kmap_atomic_pfn(unsigned long pfn);
extern struct page *kmap_atomic_to_page(const void *ptr);
#endif
diff --git a/arch/arm/include/asm/kgdb.h b/arch/arm/include/asm/kgdb.h
index 08265993227f..48066ce9ea34 100644
--- a/arch/arm/include/asm/kgdb.h
+++ b/arch/arm/include/asm/kgdb.h
@@ -70,7 +70,8 @@ extern int kgdb_fault_expected;
#define _GP_REGS 16
#define _FP_REGS 8
#define _EXTRA_REGS 2
-#define DBG_MAX_REG_NUM (_GP_REGS + (_FP_REGS * 3) + _EXTRA_REGS)
+#define GDB_MAX_REGS (_GP_REGS + (_FP_REGS * 3) + _EXTRA_REGS)
+#define DBG_MAX_REG_NUM (_GP_REGS + _FP_REGS + _EXTRA_REGS)
#define KGDB_MAX_NO_CPUS 1
#define BUFMAX 400
@@ -93,7 +94,7 @@ extern int kgdb_fault_expected;
#define _SPT 13
#define _LR 14
#define _PC 15
-#define _CPSR (DBG_MAX_REG_NUM - 1)
+#define _CPSR (GDB_MAX_REGS - 1)
/*
* So that we can denote the end of a frame for tracing,
diff --git a/arch/arm/include/asm/memblock.h b/arch/arm/include/asm/memblock.h
index fdbc43b2e6c0..b8da2e415e4e 100644
--- a/arch/arm/include/asm/memblock.h
+++ b/arch/arm/include/asm/memblock.h
@@ -1,13 +1,6 @@
#ifndef _ASM_ARM_MEMBLOCK_H
#define _ASM_ARM_MEMBLOCK_H
-#ifdef CONFIG_MMU
-extern phys_addr_t lowmem_end_addr;
-#define MEMBLOCK_REAL_LIMIT lowmem_end_addr
-#else
-#define MEMBLOCK_REAL_LIMIT 0
-#endif
-
struct meminfo;
struct machine_desc;
diff --git a/arch/arm/include/asm/mmu.h b/arch/arm/include/asm/mmu.h
index 68870c776671..b4ffe9d5b526 100644
--- a/arch/arm/include/asm/mmu.h
+++ b/arch/arm/include/asm/mmu.h
@@ -13,6 +13,10 @@ typedef struct {
#ifdef CONFIG_CPU_HAS_ASID
#define ASID(mm) ((mm)->context.id & 255)
+
+/* init_mm.context.id_lock should be initialized. */
+#define INIT_MM_CONTEXT(name) \
+ .context.id_lock = __SPIN_LOCK_UNLOCKED(name.context.id_lock),
#else
#define ASID(mm) (0)
#endif
diff --git a/arch/arm/include/asm/outercache.h b/arch/arm/include/asm/outercache.h
index 25f76bae57ab..fc1900925275 100644
--- a/arch/arm/include/asm/outercache.h
+++ b/arch/arm/include/asm/outercache.h
@@ -25,6 +25,9 @@ struct outer_cache_fns {
void (*inv_range)(unsigned long, unsigned long);
void (*clean_range)(unsigned long, unsigned long);
void (*flush_range)(unsigned long, unsigned long);
+ void (*flush_all)(void);
+ void (*inv_all)(void);
+ void (*disable)(void);
#ifdef CONFIG_OUTER_CACHE_SYNC
void (*sync)(void);
#endif
@@ -50,6 +53,24 @@ static inline void outer_flush_range(unsigned long start, unsigned long end)
outer_cache.flush_range(start, end);
}
+static inline void outer_flush_all(void)
+{
+ if (outer_cache.flush_all)
+ outer_cache.flush_all();
+}
+
+static inline void outer_inv_all(void)
+{
+ if (outer_cache.inv_all)
+ outer_cache.inv_all();
+}
+
+static inline void outer_disable(void)
+{
+ if (outer_cache.disable)
+ outer_cache.disable();
+}
+
#else
static inline void outer_inv_range(unsigned long start, unsigned long end)
@@ -58,6 +79,9 @@ static inline void outer_clean_range(unsigned long start, unsigned long end)
{ }
static inline void outer_flush_range(unsigned long start, unsigned long end)
{ }
+static inline void outer_flush_all(void) { }
+static inline void outer_inv_all(void) { }
+static inline void outer_disable(void) { }
#endif
diff --git a/arch/arm/include/asm/pgtable.h b/arch/arm/include/asm/pgtable.h
index a9672e8406a3..53d1d5deb111 100644
--- a/arch/arm/include/asm/pgtable.h
+++ b/arch/arm/include/asm/pgtable.h
@@ -263,17 +263,15 @@ extern struct page *empty_zero_page;
#define pte_page(pte) (pfn_to_page(pte_pfn(pte)))
#define pte_offset_kernel(dir,addr) (pmd_page_vaddr(*(dir)) + __pte_index(addr))
-#define pte_offset_map(dir,addr) (__pte_map(dir, KM_PTE0) + __pte_index(addr))
-#define pte_offset_map_nested(dir,addr) (__pte_map(dir, KM_PTE1) + __pte_index(addr))
-#define pte_unmap(pte) __pte_unmap(pte, KM_PTE0)
-#define pte_unmap_nested(pte) __pte_unmap(pte, KM_PTE1)
+#define pte_offset_map(dir,addr) (__pte_map(dir) + __pte_index(addr))
+#define pte_unmap(pte) __pte_unmap(pte)
#ifndef CONFIG_HIGHPTE
-#define __pte_map(dir,km) pmd_page_vaddr(*(dir))
-#define __pte_unmap(pte,km) do { } while (0)
+#define __pte_map(dir) pmd_page_vaddr(*(dir))
+#define __pte_unmap(pte) do { } while (0)
#else
-#define __pte_map(dir,km) ((pte_t *)kmap_atomic(pmd_page(*(dir)), km) + PTRS_PER_PTE)
-#define __pte_unmap(pte,km) kunmap_atomic((pte - PTRS_PER_PTE), km)
+#define __pte_map(dir) ((pte_t *)kmap_atomic(pmd_page(*(dir))) + PTRS_PER_PTE)
+#define __pte_unmap(pte) kunmap_atomic((pte - PTRS_PER_PTE))
#endif
#define set_pte_ext(ptep,pte,ext) cpu_set_pte_ext(ptep,pte,ext)
@@ -376,6 +374,9 @@ static inline pte_t *pmd_page_vaddr(pmd_t pmd)
#define pmd_page(pmd) pfn_to_page(__phys_to_pfn(pmd_val(pmd)))
+/* we don't need complex calculations here as the pmd is folded into the pgd */
+#define pmd_addr_end(addr,end) (end)
+
/*
* Conversion functions: convert a page and protection to a page entry,
* and a page entry and page directory to the page they refer to.
diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S
index c09e3573c5de..bb96a7d4bbf5 100644
--- a/arch/arm/kernel/entry-armv.S
+++ b/arch/arm/kernel/entry-armv.S
@@ -911,7 +911,7 @@ __kuser_cmpxchg: @ 0xffff0fc0
* A special ghost syscall is used for that (see traps.c).
*/
stmfd sp!, {r7, lr}
- ldr r7, =1f @ it's 20 bits
+ ldr r7, 1f @ it's 20 bits
swi __ARM_NR_cmpxchg
ldmfd sp!, {r7, pc}
1: .word __ARM_NR_cmpxchg
diff --git a/arch/arm/kernel/head.S b/arch/arm/kernel/head.S
index dd6b369ac69c..6bd82d25683c 100644
--- a/arch/arm/kernel/head.S
+++ b/arch/arm/kernel/head.S
@@ -85,9 +85,11 @@ ENTRY(stext)
mrc p15, 0, r9, c0, c0 @ get processor id
bl __lookup_processor_type @ r5=procinfo r9=cpuid
movs r10, r5 @ invalid processor (r5=0)?
+ THUMB( it eq ) @ force fixup-able long branch encoding
beq __error_p @ yes, error 'p'
bl __lookup_machine_type @ r5=machinfo
movs r8, r5 @ invalid machine (r5=0)?
+ THUMB( it eq ) @ force fixup-able long branch encoding
beq __error_a @ yes, error 'a'
bl __vet_atags
#ifdef CONFIG_SMP_ON_UP
@@ -262,6 +264,7 @@ __create_page_tables:
mov pc, lr
ENDPROC(__create_page_tables)
.ltorg
+ .align
__enable_mmu_loc:
.long .
.long __enable_mmu
@@ -282,6 +285,7 @@ ENTRY(secondary_startup)
bl __lookup_processor_type
movs r10, r5 @ invalid processor?
moveq r0, #'p' @ yes, error 'p'
+ THUMB( it eq ) @ force fixup-able long branch encoding
beq __error_p
/*
@@ -308,6 +312,8 @@ ENTRY(__secondary_switched)
b secondary_start_kernel
ENDPROC(__secondary_switched)
+ .align
+
.type __secondary_data, %object
__secondary_data:
.long .
@@ -413,6 +419,7 @@ __fixup_smp_on_up:
mov pc, lr
ENDPROC(__fixup_smp)
+ .align
1: .word .
.word __smpalt_begin
.word __smpalt_end
diff --git a/arch/arm/kernel/hw_breakpoint.c b/arch/arm/kernel/hw_breakpoint.c
index 54593b0c241b..21e3a4ab3b8c 100644
--- a/arch/arm/kernel/hw_breakpoint.c
+++ b/arch/arm/kernel/hw_breakpoint.c
@@ -748,8 +748,7 @@ static int hw_breakpoint_pending(unsigned long addr, unsigned int fsr,
breakpoint_handler(addr, regs);
break;
case ARM_ENTRY_ASYNC_WATCHPOINT:
- WARN_ON("Asynchronous watchpoint exception taken. "
- "Debugging results may be unreliable");
+ WARN(1, "Asynchronous watchpoint exception taken. Debugging results may be unreliable\n");
case ARM_ENTRY_SYNC_WATCHPOINT:
watchpoint_handler(addr, regs);
break;
diff --git a/arch/arm/kernel/kgdb.c b/arch/arm/kernel/kgdb.c
index d6e8b4d2e60d..778c2f7024ff 100644
--- a/arch/arm/kernel/kgdb.c
+++ b/arch/arm/kernel/kgdb.c
@@ -79,7 +79,7 @@ sleeping_thread_to_gdb_regs(unsigned long *gdb_regs, struct task_struct *task)
return;
/* Initialize to zero */
- for (regno = 0; regno < DBG_MAX_REG_NUM; regno++)
+ for (regno = 0; regno < GDB_MAX_REGS; regno++)
gdb_regs[regno] = 0;
/* Otherwise, we have only some registers from switch_to() */
diff --git a/arch/arm/kernel/machine_kexec.c b/arch/arm/kernel/machine_kexec.c
index 1fc74cbd1a19..3a8fd5140d7a 100644
--- a/arch/arm/kernel/machine_kexec.c
+++ b/arch/arm/kernel/machine_kexec.c
@@ -78,7 +78,10 @@ void machine_kexec(struct kimage *image)
local_fiq_disable();
setup_mm_for_reboot(0); /* mode is not used, so just pass 0*/
flush_cache_all();
+ outer_flush_all();
+ outer_disable();
cpu_proc_fin();
+ outer_inv_all();
flush_cache_all();
cpu_reset(reboot_code_buffer_phys);
}
diff --git a/arch/arm/kernel/perf_event.c b/arch/arm/kernel/perf_event.c
index 49643b1467e6..07a50357492a 100644
--- a/arch/arm/kernel/perf_event.c
+++ b/arch/arm/kernel/perf_event.c
@@ -1749,7 +1749,7 @@ static inline int armv7_pmnc_has_overflowed(unsigned long pmnc)
static inline int armv7_pmnc_counter_has_overflowed(unsigned long pmnc,
enum armv7_counters counter)
{
- int ret;
+ int ret = 0;
if (counter == ARMV7_CYCLE_COUNTER)
ret = pmnc & ARMV7_FLAG_C;
diff --git a/arch/arm/kernel/ptrace.c b/arch/arm/kernel/ptrace.c
index e0cb6370ed14..3e97483abcf0 100644
--- a/arch/arm/kernel/ptrace.c
+++ b/arch/arm/kernel/ptrace.c
@@ -1075,13 +1075,15 @@ out:
}
#endif
-long arch_ptrace(struct task_struct *child, long request, long addr, long data)
+long arch_ptrace(struct task_struct *child, long request,
+ unsigned long addr, unsigned long data)
{
int ret;
+ unsigned long __user *datap = (unsigned long __user *) data;
switch (request) {
case PTRACE_PEEKUSR:
- ret = ptrace_read_user(child, addr, (unsigned long __user *)data);
+ ret = ptrace_read_user(child, addr, datap);
break;
case PTRACE_POKEUSR:
@@ -1089,34 +1091,34 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
break;
case PTRACE_GETREGS:
- ret = ptrace_getregs(child, (void __user *)data);
+ ret = ptrace_getregs(child, datap);
break;
case PTRACE_SETREGS:
- ret = ptrace_setregs(child, (void __user *)data);
+ ret = ptrace_setregs(child, datap);
break;
case PTRACE_GETFPREGS:
- ret = ptrace_getfpregs(child, (void __user *)data);
+ ret = ptrace_getfpregs(child, datap);
break;
case PTRACE_SETFPREGS:
- ret = ptrace_setfpregs(child, (void __user *)data);
+ ret = ptrace_setfpregs(child, datap);
break;
#ifdef CONFIG_IWMMXT
case PTRACE_GETWMMXREGS:
- ret = ptrace_getwmmxregs(child, (void __user *)data);
+ ret = ptrace_getwmmxregs(child, datap);
break;
case PTRACE_SETWMMXREGS:
- ret = ptrace_setwmmxregs(child, (void __user *)data);
+ ret = ptrace_setwmmxregs(child, datap);
break;
#endif
case PTRACE_GET_THREAD_AREA:
ret = put_user(task_thread_info(child)->tp_value,
- (unsigned long __user *) data);
+ datap);
break;
case PTRACE_SET_SYSCALL:
@@ -1126,21 +1128,21 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
#ifdef CONFIG_CRUNCH
case PTRACE_GETCRUNCHREGS:
- ret = ptrace_getcrunchregs(child, (void __user *)data);
+ ret = ptrace_getcrunchregs(child, datap);
break;
case PTRACE_SETCRUNCHREGS:
- ret = ptrace_setcrunchregs(child, (void __user *)data);
+ ret = ptrace_setcrunchregs(child, datap);
break;
#endif
#ifdef CONFIG_VFP
case PTRACE_GETVFPREGS:
- ret = ptrace_getvfpregs(child, (void __user *)data);
+ ret = ptrace_getvfpregs(child, datap);
break;
case PTRACE_SETVFPREGS:
- ret = ptrace_setvfpregs(child, (void __user *)data);
+ ret = ptrace_setvfpregs(child, datap);
break;
#endif
diff --git a/arch/arm/kernel/relocate_kernel.S b/arch/arm/kernel/relocate_kernel.S
index fd26f8d65151..9cf4cbf8f95b 100644
--- a/arch/arm/kernel/relocate_kernel.S
+++ b/arch/arm/kernel/relocate_kernel.S
@@ -59,6 +59,8 @@ relocate_new_kernel:
ldr r2,kexec_boot_atags
mov pc,lr
+ .align
+
.globl kexec_start_address
kexec_start_address:
.long 0x0
diff --git a/arch/arm/kernel/stacktrace.c b/arch/arm/kernel/stacktrace.c
index 20b7411e47fd..c2e112e1a05f 100644
--- a/arch/arm/kernel/stacktrace.c
+++ b/arch/arm/kernel/stacktrace.c
@@ -28,7 +28,7 @@ int notrace unwind_frame(struct stackframe *frame)
/* only go to a higher address on the stack */
low = frame->sp;
- high = ALIGN(low, THREAD_SIZE) + THREAD_SIZE;
+ high = ALIGN(low, THREAD_SIZE);
/* check current frame pointer is within bounds */
if (fp < (low + 12) || fp + 4 >= high)
diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c
index cda78d59aa31..446aee97436f 100644
--- a/arch/arm/kernel/traps.c
+++ b/arch/arm/kernel/traps.c
@@ -53,10 +53,7 @@ static void dump_mem(const char *, const char *, unsigned long, unsigned long);
void dump_backtrace_entry(unsigned long where, unsigned long from, unsigned long frame)
{
#ifdef CONFIG_KALLSYMS
- char sym1[KSYM_SYMBOL_LEN], sym2[KSYM_SYMBOL_LEN];
- sprint_symbol(sym1, where);
- sprint_symbol(sym2, from);
- printk("[<%08lx>] (%s) from [<%08lx>] (%s)\n", where, sym1, from, sym2);
+ printk("[<%08lx>] (%pS) from [<%08lx>] (%pS)\n", where, (void *)where, from, (void *)from);
#else
printk("Function entered at [<%08lx>] from [<%08lx>]\n", where, from);
#endif
diff --git a/arch/arm/kernel/unwind.c b/arch/arm/kernel/unwind.c
index 2a161765f6d5..d2cb0b3c9872 100644
--- a/arch/arm/kernel/unwind.c
+++ b/arch/arm/kernel/unwind.c
@@ -279,7 +279,7 @@ int unwind_frame(struct stackframe *frame)
/* only go to a higher address on the stack */
low = frame->sp;
- high = ALIGN(low, THREAD_SIZE) + THREAD_SIZE;
+ high = ALIGN(low, THREAD_SIZE);
pr_debug("%s(pc = %08lx lr = %08lx sp = %08lx)\n", __func__,
frame->pc, frame->lr, frame->sp);
diff --git a/arch/arm/kernel/vmlinux.lds.S b/arch/arm/kernel/vmlinux.lds.S
index 1953e3d21abf..cead8893b46b 100644
--- a/arch/arm/kernel/vmlinux.lds.S
+++ b/arch/arm/kernel/vmlinux.lds.S
@@ -113,6 +113,7 @@ SECTIONS
*(.rodata.*)
*(.glue_7)
*(.glue_7t)
+ . = ALIGN(4);
*(.got) /* Global offset table */
ARM_CPU_KEEP(PROC_INFO)
}
diff --git a/arch/arm/lib/findbit.S b/arch/arm/lib/findbit.S
index 1e4cbd4e7be9..64f6bc1a9132 100644
--- a/arch/arm/lib/findbit.S
+++ b/arch/arm/lib/findbit.S
@@ -174,8 +174,8 @@ ENDPROC(_find_next_bit_be)
*/
.L_found:
#if __LINUX_ARM_ARCH__ >= 5
- rsb r1, r3, #0
- and r3, r3, r1
+ rsb r0, r3, #0
+ and r3, r3, r0
clz r3, r3
rsb r3, r3, #31
add r0, r2, r3
@@ -190,5 +190,7 @@ ENDPROC(_find_next_bit_be)
addeq r2, r2, #1
mov r0, r2
#endif
+ cmp r1, r0 @ Clamp to maxbit
+ movlo r0, r1
mov pc, lr
diff --git a/arch/arm/mach-aaec2000/include/mach/vmalloc.h b/arch/arm/mach-aaec2000/include/mach/vmalloc.h
index cff4e0a996ce..a6299e8321bd 100644
--- a/arch/arm/mach-aaec2000/include/mach/vmalloc.h
+++ b/arch/arm/mach-aaec2000/include/mach/vmalloc.h
@@ -11,6 +11,6 @@
#ifndef __ASM_ARCH_VMALLOC_H
#define __ASM_ARCH_VMALLOC_H
-#define VMALLOC_END 0xd0000000
+#define VMALLOC_END 0xd0000000UL
#endif /* __ASM_ARCH_VMALLOC_H */
diff --git a/arch/arm/mach-at91/Kconfig b/arch/arm/mach-at91/Kconfig
index abed4d15a7fd..c015b684b4fe 100644
--- a/arch/arm/mach-at91/Kconfig
+++ b/arch/arm/mach-at91/Kconfig
@@ -375,6 +375,12 @@ config MACH_STAMP9G20
evaluation board.
<http://www.taskit.de/en/>
+config MACH_PCONTROL_G20
+ bool "PControl G20 CPU module"
+ help
+ Select this if you are using taskit's Stamp9G20 CPU module on this
+ carrier board, beeing the decentralized unit of a building automation
+ system; featuring nvram, eth-switch, iso-rs485, display, io
endif
if (ARCH_AT91SAM9260 || ARCH_AT91SAM9G20)
diff --git a/arch/arm/mach-at91/Makefile b/arch/arm/mach-at91/Makefile
index 412b3a471a4b..62d686f0b426 100644
--- a/arch/arm/mach-at91/Makefile
+++ b/arch/arm/mach-at91/Makefile
@@ -11,12 +11,12 @@ obj-$(CONFIG_AT91_PMC_UNIT) += clock.o
# CPU-specific support
obj-$(CONFIG_ARCH_AT91RM9200) += at91rm9200.o at91rm9200_time.o at91rm9200_devices.o
-obj-$(CONFIG_ARCH_AT91SAM9260) += at91sam9260.o at91sam926x_time.o at91sam9260_devices.o sam9_smc.o
-obj-$(CONFIG_ARCH_AT91SAM9261) += at91sam9261.o at91sam926x_time.o at91sam9261_devices.o sam9_smc.o
-obj-$(CONFIG_ARCH_AT91SAM9G10) += at91sam9261.o at91sam926x_time.o at91sam9261_devices.o sam9_smc.o
-obj-$(CONFIG_ARCH_AT91SAM9263) += at91sam9263.o at91sam926x_time.o at91sam9263_devices.o sam9_smc.o
-obj-$(CONFIG_ARCH_AT91SAM9RL) += at91sam9rl.o at91sam926x_time.o at91sam9rl_devices.o sam9_smc.o
-obj-$(CONFIG_ARCH_AT91SAM9G20) += at91sam9260.o at91sam926x_time.o at91sam9260_devices.o sam9_smc.o
+obj-$(CONFIG_ARCH_AT91SAM9260) += at91sam9260.o at91sam926x_time.o at91sam9260_devices.o sam9_smc.o at91sam9_alt_reset.o
+obj-$(CONFIG_ARCH_AT91SAM9261) += at91sam9261.o at91sam926x_time.o at91sam9261_devices.o sam9_smc.o at91sam9_alt_reset.o
+obj-$(CONFIG_ARCH_AT91SAM9G10) += at91sam9261.o at91sam926x_time.o at91sam9261_devices.o sam9_smc.o at91sam9_alt_reset.o
+obj-$(CONFIG_ARCH_AT91SAM9263) += at91sam9263.o at91sam926x_time.o at91sam9263_devices.o sam9_smc.o at91sam9_alt_reset.o
+obj-$(CONFIG_ARCH_AT91SAM9RL) += at91sam9rl.o at91sam926x_time.o at91sam9rl_devices.o sam9_smc.o at91sam9_alt_reset.o
+obj-$(CONFIG_ARCH_AT91SAM9G20) += at91sam9260.o at91sam926x_time.o at91sam9260_devices.o sam9_smc.o at91sam9_alt_reset.o
obj-$(CONFIG_ARCH_AT91SAM9G45) += at91sam9g45.o at91sam926x_time.o at91sam9g45_devices.o sam9_smc.o
obj-$(CONFIG_ARCH_AT91CAP9) += at91cap9.o at91sam926x_time.o at91cap9_devices.o sam9_smc.o
obj-$(CONFIG_ARCH_AT572D940HF) += at572d940hf.o at91sam926x_time.o at572d940hf_devices.o sam9_smc.o
@@ -24,8 +24,8 @@ obj-$(CONFIG_ARCH_AT91X40) += at91x40.o at91x40_time.o
# AT91RM9200 board-specific support
obj-$(CONFIG_MACH_ONEARM) += board-1arm.o
-obj-$(CONFIG_ARCH_AT91RM9200DK) += board-dk.o
-obj-$(CONFIG_MACH_AT91RM9200EK) += board-ek.o
+obj-$(CONFIG_ARCH_AT91RM9200DK) += board-rm9200dk.o
+obj-$(CONFIG_MACH_AT91RM9200EK) += board-rm9200ek.o
obj-$(CONFIG_MACH_CSB337) += board-csb337.o
obj-$(CONFIG_MACH_CSB637) += board-csb637.o
obj-$(CONFIG_MACH_CARMEVA) += board-carmeva.o
@@ -65,6 +65,7 @@ obj-$(CONFIG_MACH_AT91SAM9G20EK) += board-sam9g20ek.o
obj-$(CONFIG_MACH_CPU9G20) += board-cpu9krea.o
obj-$(CONFIG_MACH_STAMP9G20) += board-stamp9g20.o
obj-$(CONFIG_MACH_PORTUXG20) += board-stamp9g20.o
+obj-$(CONFIG_MACH_PCONTROL_G20) += board-pcontrol-g20.o
# AT91SAM9260/AT91SAM9G20 board-specific support
obj-$(CONFIG_MACH_SNAPPER_9260) += board-snapper9260.o
diff --git a/arch/arm/mach-at91/at91rm9200_devices.c b/arch/arm/mach-at91/at91rm9200_devices.c
index 9338825cfcd7..7b539228e0ef 100644
--- a/arch/arm/mach-at91/at91rm9200_devices.c
+++ b/arch/arm/mach-at91/at91rm9200_devices.c
@@ -1106,51 +1106,6 @@ static inline void configure_usart3_pins(unsigned pins)
static struct platform_device *__initdata at91_uarts[ATMEL_MAX_UART]; /* the UARTs to use */
struct platform_device *atmel_default_console_device; /* the serial console device */
-void __init __deprecated at91_init_serial(struct at91_uart_config *config)
-{
- int i;
-
- /* Fill in list of supported UARTs */
- for (i = 0; i < config->nr_tty; i++) {
- switch (config->tty_map[i]) {
- case 0:
- configure_usart0_pins(ATMEL_UART_CTS | ATMEL_UART_RTS);
- at91_uarts[i] = &at91rm9200_uart0_device;
- at91_clock_associate("usart0_clk", &at91rm9200_uart0_device.dev, "usart");
- break;
- case 1:
- configure_usart1_pins(ATMEL_UART_CTS | ATMEL_UART_RTS | ATMEL_UART_DSR | ATMEL_UART_DTR | ATMEL_UART_DCD | ATMEL_UART_RI);
- at91_uarts[i] = &at91rm9200_uart1_device;
- at91_clock_associate("usart1_clk", &at91rm9200_uart1_device.dev, "usart");
- break;
- case 2:
- configure_usart2_pins(0);
- at91_uarts[i] = &at91rm9200_uart2_device;
- at91_clock_associate("usart2_clk", &at91rm9200_uart2_device.dev, "usart");
- break;
- case 3:
- configure_usart3_pins(0);
- at91_uarts[i] = &at91rm9200_uart3_device;
- at91_clock_associate("usart3_clk", &at91rm9200_uart3_device.dev, "usart");
- break;
- case 4:
- configure_dbgu_pins();
- at91_uarts[i] = &at91rm9200_dbgu_device;
- at91_clock_associate("mck", &at91rm9200_dbgu_device.dev, "usart");
- break;
- default:
- continue;
- }
- at91_uarts[i]->id = i; /* update ID number to mapped ID */
- }
-
- /* Set serial console device */
- if (config->console_tty < ATMEL_MAX_UART)
- atmel_default_console_device = at91_uarts[config->console_tty];
- if (!atmel_default_console_device)
- printk(KERN_INFO "AT91: No default serial console defined.\n");
-}
-
void __init at91_register_uart(unsigned id, unsigned portnr, unsigned pins)
{
struct platform_device *pdev;
diff --git a/arch/arm/mach-at91/at91sam9260.c b/arch/arm/mach-at91/at91sam9260.c
index 0894f1077be7..195208b30024 100644
--- a/arch/arm/mach-at91/at91sam9260.c
+++ b/arch/arm/mach-at91/at91sam9260.c
@@ -279,11 +279,6 @@ static struct at91_gpio_bank at91sam9260_gpio[] = {
}
};
-static void at91sam9260_reset(void)
-{
- at91_sys_write(AT91_RSTC_CR, AT91_RSTC_KEY | AT91_RSTC_PROCRST | AT91_RSTC_PERRST);
-}
-
static void at91sam9260_poweroff(void)
{
at91_sys_write(AT91_SHDW_CR, AT91_SHDW_KEY | AT91_SHDW_SHDW);
@@ -327,7 +322,7 @@ void __init at91sam9260_initialize(unsigned long main_clock)
else
iotable_init(at91sam9260_sram_desc, ARRAY_SIZE(at91sam9260_sram_desc));
- at91_arch_reset = at91sam9260_reset;
+ at91_arch_reset = at91sam9_alt_reset;
pm_power_off = at91sam9260_poweroff;
at91_extern_irq = (1 << AT91SAM9260_ID_IRQ0) | (1 << AT91SAM9260_ID_IRQ1)
| (1 << AT91SAM9260_ID_IRQ2);
diff --git a/arch/arm/mach-at91/at91sam9261.c b/arch/arm/mach-at91/at91sam9261.c
index 4ecf37996c77..fcad88668504 100644
--- a/arch/arm/mach-at91/at91sam9261.c
+++ b/arch/arm/mach-at91/at91sam9261.c
@@ -257,11 +257,6 @@ static struct at91_gpio_bank at91sam9261_gpio[] = {
}
};
-static void at91sam9261_reset(void)
-{
- at91_sys_write(AT91_RSTC_CR, AT91_RSTC_KEY | AT91_RSTC_PROCRST | AT91_RSTC_PERRST);
-}
-
static void at91sam9261_poweroff(void)
{
at91_sys_write(AT91_SHDW_CR, AT91_SHDW_KEY | AT91_SHDW_SHDW);
@@ -283,7 +278,7 @@ void __init at91sam9261_initialize(unsigned long main_clock)
iotable_init(at91sam9261_sram_desc, ARRAY_SIZE(at91sam9261_sram_desc));
- at91_arch_reset = at91sam9261_reset;
+ at91_arch_reset = at91sam9_alt_reset;
pm_power_off = at91sam9261_poweroff;
at91_extern_irq = (1 << AT91SAM9261_ID_IRQ0) | (1 << AT91SAM9261_ID_IRQ1)
| (1 << AT91SAM9261_ID_IRQ2);
diff --git a/arch/arm/mach-at91/at91sam9263.c b/arch/arm/mach-at91/at91sam9263.c
index 942792d630d8..249f900954d8 100644
--- a/arch/arm/mach-at91/at91sam9263.c
+++ b/arch/arm/mach-at91/at91sam9263.c
@@ -269,11 +269,6 @@ static struct at91_gpio_bank at91sam9263_gpio[] = {
}
};
-static void at91sam9263_reset(void)
-{
- at91_sys_write(AT91_RSTC_CR, AT91_RSTC_KEY | AT91_RSTC_PROCRST | AT91_RSTC_PERRST);
-}
-
static void at91sam9263_poweroff(void)
{
at91_sys_write(AT91_SHDW_CR, AT91_SHDW_KEY | AT91_SHDW_SHDW);
@@ -289,7 +284,7 @@ void __init at91sam9263_initialize(unsigned long main_clock)
/* Map peripherals */
iotable_init(at91sam9263_io_desc, ARRAY_SIZE(at91sam9263_io_desc));
- at91_arch_reset = at91sam9263_reset;
+ at91_arch_reset = at91sam9_alt_reset;
pm_power_off = at91sam9263_poweroff;
at91_extern_irq = (1 << AT91SAM9263_ID_IRQ0) | (1 << AT91SAM9263_ID_IRQ1);
diff --git a/arch/arm/mach-at91/at91sam9_alt_reset.S b/arch/arm/mach-at91/at91sam9_alt_reset.S
new file mode 100644
index 000000000000..e0256deb91fb
--- /dev/null
+++ b/arch/arm/mach-at91/at91sam9_alt_reset.S
@@ -0,0 +1,48 @@
+/*
+ * reset AT91SAM9G20 as per errata
+ *
+ * (C) BitBox Ltd 2010
+ *
+ * unless the SDRAM is cleanly shutdown before we hit the
+ * reset register it can be left driving the data bus and
+ * killing the chance of a subsequent boot from NAND
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/linkage.h>
+#include <asm/system.h>
+#include <mach/hardware.h>
+#include <mach/at91sam9_sdramc.h>
+#include <mach/at91_rstc.h>
+
+ .arm
+
+ .globl at91sam9_alt_reset
+
+at91sam9_alt_reset: mrc p15, 0, r0, c1, c0, 0
+ orr r0, r0, #CR_I
+ mcr p15, 0, r0, c1, c0, 0 @ enable I-cache
+
+ ldr r0, .at91_va_base_sdramc @ preload constants
+ ldr r1, .at91_va_base_rstc_cr
+
+ mov r2, #1
+ mov r3, #AT91_SDRAMC_LPCB_POWER_DOWN
+ ldr r4, =AT91_RSTC_KEY | AT91_RSTC_PERRST | AT91_RSTC_PROCRST
+
+ .balign 32 @ align to cache line
+
+ str r2, [r0, #AT91_SDRAMC_TR] @ disable SDRAM access
+ str r3, [r0, #AT91_SDRAMC_LPR] @ power down SDRAM
+ str r4, [r1] @ reset processor
+
+ b .
+
+.at91_va_base_sdramc:
+ .word AT91_VA_BASE_SYS + AT91_SDRAMC0
+.at91_va_base_rstc_cr:
+ .word AT91_VA_BASE_SYS + AT91_RSTC_CR
diff --git a/arch/arm/mach-at91/at91sam9g45_devices.c b/arch/arm/mach-at91/at91sam9g45_devices.c
index 1276babf84d5..1e8f275c17f6 100644
--- a/arch/arm/mach-at91/at91sam9g45_devices.c
+++ b/arch/arm/mach-at91/at91sam9g45_devices.c
@@ -15,6 +15,7 @@
#include <linux/dma-mapping.h>
#include <linux/platform_device.h>
#include <linux/i2c-gpio.h>
+#include <linux/atmel-mci.h>
#include <linux/fb.h>
#include <video/atmel_lcdc.h>
@@ -25,6 +26,7 @@
#include <mach/at91sam9g45_matrix.h>
#include <mach/at91sam9_smc.h>
#include <mach/at_hdmac.h>
+#include <mach/atmel-mci.h>
#include "generic.h"
@@ -350,6 +352,169 @@ void __init at91_add_device_eth(struct at91_eth_data *data) {}
/* --------------------------------------------------------------------
+ * MMC / SD
+ * -------------------------------------------------------------------- */
+
+#if defined(CONFIG_MMC_ATMELMCI) || defined(CONFIG_MMC_ATMELMCI_MODULE)
+static u64 mmc_dmamask = DMA_BIT_MASK(32);
+static struct mci_platform_data mmc0_data, mmc1_data;
+
+static struct resource mmc0_resources[] = {
+ [0] = {
+ .start = AT91SAM9G45_BASE_MCI0,
+ .end = AT91SAM9G45_BASE_MCI0 + SZ_16K - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = AT91SAM9G45_ID_MCI0,
+ .end = AT91SAM9G45_ID_MCI0,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct platform_device at91sam9g45_mmc0_device = {
+ .name = "atmel_mci",
+ .id = 0,
+ .dev = {
+ .dma_mask = &mmc_dmamask,
+ .coherent_dma_mask = DMA_BIT_MASK(32),
+ .platform_data = &mmc0_data,
+ },
+ .resource = mmc0_resources,
+ .num_resources = ARRAY_SIZE(mmc0_resources),
+};
+
+static struct resource mmc1_resources[] = {
+ [0] = {
+ .start = AT91SAM9G45_BASE_MCI1,
+ .end = AT91SAM9G45_BASE_MCI1 + SZ_16K - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = AT91SAM9G45_ID_MCI1,
+ .end = AT91SAM9G45_ID_MCI1,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct platform_device at91sam9g45_mmc1_device = {
+ .name = "atmel_mci",
+ .id = 1,
+ .dev = {
+ .dma_mask = &mmc_dmamask,
+ .coherent_dma_mask = DMA_BIT_MASK(32),
+ .platform_data = &mmc1_data,
+ },
+ .resource = mmc1_resources,
+ .num_resources = ARRAY_SIZE(mmc1_resources),
+};
+
+/* Consider only one slot : slot 0 */
+void __init at91_add_device_mci(short mmc_id, struct mci_platform_data *data)
+{
+
+ if (!data)
+ return;
+
+ /* Must have at least one usable slot */
+ if (!data->slot[0].bus_width)
+ return;
+
+#if defined(CONFIG_AT_HDMAC) || defined(CONFIG_AT_HDMAC_MODULE)
+ {
+ struct at_dma_slave *atslave;
+ struct mci_dma_data *alt_atslave;
+
+ alt_atslave = kzalloc(sizeof(struct mci_dma_data), GFP_KERNEL);
+ atslave = &alt_atslave->sdata;
+
+ /* DMA slave channel configuration */
+ atslave->dma_dev = &at_hdmac_device.dev;
+ atslave->reg_width = AT_DMA_SLAVE_WIDTH_32BIT;
+ atslave->cfg = ATC_FIFOCFG_HALFFIFO
+ | ATC_SRC_H2SEL_HW | ATC_DST_H2SEL_HW;
+ atslave->ctrla = ATC_SCSIZE_16 | ATC_DCSIZE_16;
+ if (mmc_id == 0) /* MCI0 */
+ atslave->cfg |= ATC_SRC_PER(AT_DMA_ID_MCI0)
+ | ATC_DST_PER(AT_DMA_ID_MCI0);
+
+ else /* MCI1 */
+ atslave->cfg |= ATC_SRC_PER(AT_DMA_ID_MCI1)
+ | ATC_DST_PER(AT_DMA_ID_MCI1);
+
+ data->dma_slave = alt_atslave;
+ }
+#endif
+
+
+ /* input/irq */
+ if (data->slot[0].detect_pin) {
+ at91_set_gpio_input(data->slot[0].detect_pin, 1);
+ at91_set_deglitch(data->slot[0].detect_pin, 1);
+ }
+ if (data->slot[0].wp_pin)
+ at91_set_gpio_input(data->slot[0].wp_pin, 1);
+
+ if (mmc_id == 0) { /* MCI0 */
+
+ /* CLK */
+ at91_set_A_periph(AT91_PIN_PA0, 0);
+
+ /* CMD */
+ at91_set_A_periph(AT91_PIN_PA1, 1);
+
+ /* DAT0, maybe DAT1..DAT3 and maybe DAT4..DAT7 */
+ at91_set_A_periph(AT91_PIN_PA2, 1);
+ if (data->slot[0].bus_width == 4) {
+ at91_set_A_periph(AT91_PIN_PA3, 1);
+ at91_set_A_periph(AT91_PIN_PA4, 1);
+ at91_set_A_periph(AT91_PIN_PA5, 1);
+ if (data->slot[0].bus_width == 8) {
+ at91_set_A_periph(AT91_PIN_PA6, 1);
+ at91_set_A_periph(AT91_PIN_PA7, 1);
+ at91_set_A_periph(AT91_PIN_PA8, 1);
+ at91_set_A_periph(AT91_PIN_PA9, 1);
+ }
+ }
+
+ mmc0_data = *data;
+ at91_clock_associate("mci0_clk", &at91sam9g45_mmc0_device.dev, "mci_clk");
+ platform_device_register(&at91sam9g45_mmc0_device);
+
+ } else { /* MCI1 */
+
+ /* CLK */
+ at91_set_A_periph(AT91_PIN_PA31, 0);
+
+ /* CMD */
+ at91_set_A_periph(AT91_PIN_PA22, 1);
+
+ /* DAT0, maybe DAT1..DAT3 and maybe DAT4..DAT7 */
+ at91_set_A_periph(AT91_PIN_PA23, 1);
+ if (data->slot[0].bus_width == 4) {
+ at91_set_A_periph(AT91_PIN_PA24, 1);
+ at91_set_A_periph(AT91_PIN_PA25, 1);
+ at91_set_A_periph(AT91_PIN_PA26, 1);
+ if (data->slot[0].bus_width == 8) {
+ at91_set_A_periph(AT91_PIN_PA27, 1);
+ at91_set_A_periph(AT91_PIN_PA28, 1);
+ at91_set_A_periph(AT91_PIN_PA29, 1);
+ at91_set_A_periph(AT91_PIN_PA30, 1);
+ }
+ }
+
+ mmc1_data = *data;
+ at91_clock_associate("mci1_clk", &at91sam9g45_mmc1_device.dev, "mci_clk");
+ platform_device_register(&at91sam9g45_mmc1_device);
+
+ }
+}
+#else
+void __init at91_add_device_mci(short mmc_id, struct mci_platform_data *data) {}
+#endif
+
+
+/* --------------------------------------------------------------------
* NAND / SmartMedia
* -------------------------------------------------------------------- */
diff --git a/arch/arm/mach-at91/at91sam9rl.c b/arch/arm/mach-at91/at91sam9rl.c
index 211c5c14a1e6..6a9d24e5ed8e 100644
--- a/arch/arm/mach-at91/at91sam9rl.c
+++ b/arch/arm/mach-at91/at91sam9rl.c
@@ -242,11 +242,6 @@ static struct at91_gpio_bank at91sam9rl_gpio[] = {
}
};
-static void at91sam9rl_reset(void)
-{
- at91_sys_write(AT91_RSTC_CR, AT91_RSTC_KEY | AT91_RSTC_PROCRST | AT91_RSTC_PERRST);
-}
-
static void at91sam9rl_poweroff(void)
{
at91_sys_write(AT91_SHDW_CR, AT91_SHDW_KEY | AT91_SHDW_SHDW);
@@ -281,7 +276,7 @@ void __init at91sam9rl_initialize(unsigned long main_clock)
/* Map SRAM */
iotable_init(at91sam9rl_sram_desc, ARRAY_SIZE(at91sam9rl_sram_desc));
- at91_arch_reset = at91sam9rl_reset;
+ at91_arch_reset = at91sam9_alt_reset;
pm_power_off = at91sam9rl_poweroff;
at91_extern_irq = (1 << AT91SAM9RL_ID_IRQ0);
diff --git a/arch/arm/mach-at91/board-1arm.c b/arch/arm/mach-at91/board-1arm.c
index 46bdc82d3fbf..8a3fc84847c1 100644
--- a/arch/arm/mach-at91/board-1arm.c
+++ b/arch/arm/mach-at91/board-1arm.c
@@ -39,24 +39,24 @@
#include "generic.h"
-/*
- * Serial port configuration.
- * 0 .. 3 = USART0 .. USART3
- * 4 = DBGU
- */
-static struct at91_uart_config __initdata onearm_uart_config = {
- .console_tty = 0, /* ttyS0 */
- .nr_tty = 3,
- .tty_map = { 4, 0, 1, -1, -1 }, /* ttyS0, ..., ttyS4 */
-};
-
static void __init onearm_map_io(void)
{
/* Initialize processor: 18.432 MHz crystal */
at91rm9200_initialize(18432000, AT91RM9200_PQFP);
- /* Setup the serial ports and console */
- at91_init_serial(&onearm_uart_config);
+ /* DBGU on ttyS0. (Rx & Tx only) */
+ at91_register_uart(0, 0, 0);
+
+ /* USART0 on ttyS1 (Rx, Tx, CTS, RTS) */
+ at91_register_uart(AT91RM9200_ID_US0, 1, ATMEL_UART_CTS | ATMEL_UART_RTS);
+
+ /* USART1 on ttyS2 (Rx, Tx, CTS, RTS, DTR, DSR, DCD, RI) */
+ at91_register_uart(AT91RM9200_ID_US1, 2, ATMEL_UART_CTS | ATMEL_UART_RTS
+ | ATMEL_UART_DTR | ATMEL_UART_DSR | ATMEL_UART_DCD
+ | ATMEL_UART_RI);
+
+ /* set serial console to ttyS0 (ie, DBGU) */
+ at91_set_serial_console(0);
}
static void __init onearm_init_irq(void)
diff --git a/arch/arm/mach-at91/board-kafa.c b/arch/arm/mach-at91/board-kafa.c
index c0ce79d431a0..d2e1f4ec1fcc 100644
--- a/arch/arm/mach-at91/board-kafa.c
+++ b/arch/arm/mach-at91/board-kafa.c
@@ -39,17 +39,6 @@
#include "generic.h"
-/*
- * Serial port configuration.
- * 0 .. 3 = USART0 .. USART3
- * 4 = DBGU
- */
-static struct at91_uart_config __initdata kafa_uart_config = {
- .console_tty = 0, /* ttyS0 */
- .nr_tty = 2,
- .tty_map = { 4, 0, -1, -1, -1 } /* ttyS0, ..., ttyS4 */
-};
-
static void __init kafa_map_io(void)
{
/* Initialize processor: 18.432 MHz crystal */
@@ -58,8 +47,14 @@ static void __init kafa_map_io(void)
/* Set up the LEDs */
at91_init_leds(AT91_PIN_PB4, AT91_PIN_PB4);
- /* Setup the serial ports and console */
- at91_init_serial(&kafa_uart_config);
+ /* DBGU on ttyS0. (Rx & Tx only) */
+ at91_register_uart(0, 0, 0);
+
+ /* USART0 on ttyS1 (Rx, Tx, CTS, RTS) */
+ at91_register_uart(AT91RM9200_ID_US0, 1, ATMEL_UART_CTS | ATMEL_UART_RTS);
+
+ /* set serial console to ttyS0 (ie, DBGU) */
+ at91_set_serial_console(0);
}
static void __init kafa_init_irq(void)
diff --git a/arch/arm/mach-at91/board-pcontrol-g20.c b/arch/arm/mach-at91/board-pcontrol-g20.c
new file mode 100644
index 000000000000..bba5a560e02b
--- /dev/null
+++ b/arch/arm/mach-at91/board-pcontrol-g20.c
@@ -0,0 +1,322 @@
+/*
+ * Copyright (C) 2010 Christian Glindkamp <christian.glindkamp@taskit.de>
+ * taskit GmbH
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+/*
+ * copied and adjusted from board-stamp9g20.c
+ * by Peter Gsellmann <pgsellmann@portner-elektronik.at>
+ */
+
+#include <linux/mm.h>
+#include <linux/platform_device.h>
+#include <linux/gpio.h>
+#include <linux/w1-gpio.h>
+
+#include <asm/mach-types.h>
+#include <asm/mach/arch.h>
+
+#include <mach/board.h>
+#include <mach/at91sam9_smc.h>
+
+#include "sam9_smc.h"
+#include "generic.h"
+
+
+static void __init pcontrol_g20_map_io(void)
+{
+ /* Initialize processor: 18.432 MHz crystal */
+ at91sam9260_initialize(18432000);
+
+ /* DGBU on ttyS0. (Rx, Tx) only TTL -> JTAG connector X7 17,19 ) */
+ at91_register_uart(0, 0, 0);
+
+ /* USART0 on ttyS1. (Rx, Tx, CTS, RTS) piggyback A2 */
+ at91_register_uart(AT91SAM9260_ID_US0, 1, ATMEL_UART_CTS
+ | ATMEL_UART_RTS);
+
+ /* USART1 on ttyS2. (Rx, Tx, CTS, RTS) isolated RS485 X5 */
+ at91_register_uart(AT91SAM9260_ID_US1, 2, ATMEL_UART_CTS
+ | ATMEL_UART_RTS);
+
+ /* USART2 on ttyS3. (Rx, Tx) 9bit-Bus Multidrop-mode X4 */
+ at91_register_uart(AT91SAM9260_ID_US4, 3, 0);
+
+ /* set serial console to ttyS0 (ie, DBGU) */
+ at91_set_serial_console(0);
+}
+
+
+static void __init init_irq(void)
+{
+ at91sam9260_init_interrupts(NULL);
+}
+
+
+/*
+ * NAND flash 512MiB 1,8V 8-bit, sector size 128 KiB
+ */
+static struct atmel_nand_data __initdata nand_data = {
+ .ale = 21,
+ .cle = 22,
+ .rdy_pin = AT91_PIN_PC13,
+ .enable_pin = AT91_PIN_PC14,
+};
+
+/*
+ * Bus timings; unit = 7.57ns
+ */
+static struct sam9_smc_config __initdata nand_smc_config = {
+ .ncs_read_setup = 0,
+ .nrd_setup = 2,
+ .ncs_write_setup = 0,
+ .nwe_setup = 2,
+
+ .ncs_read_pulse = 4,
+ .nrd_pulse = 4,
+ .ncs_write_pulse = 4,
+ .nwe_pulse = 4,
+
+ .read_cycle = 7,
+ .write_cycle = 7,
+
+ .mode = AT91_SMC_READMODE | AT91_SMC_WRITEMODE
+ | AT91_SMC_EXNWMODE_DISABLE | AT91_SMC_DBW_8,
+ .tdf_cycles = 3,
+};
+
+static struct sam9_smc_config __initdata pcontrol_smc_config[2] = { {
+ .ncs_read_setup = 16,
+ .nrd_setup = 18,
+ .ncs_write_setup = 16,
+ .nwe_setup = 18,
+
+ .ncs_read_pulse = 63,
+ .nrd_pulse = 55,
+ .ncs_write_pulse = 63,
+ .nwe_pulse = 55,
+
+ .read_cycle = 127,
+ .write_cycle = 127,
+
+ .mode = AT91_SMC_READMODE | AT91_SMC_WRITEMODE
+ | AT91_SMC_EXNWMODE_DISABLE | AT91_SMC_BAT_SELECT
+ | AT91_SMC_DBW_8 | AT91_SMC_PS_4
+ | AT91_SMC_TDFMODE,
+ .tdf_cycles = 3,
+}, {
+ .ncs_read_setup = 0,
+ .nrd_setup = 0,
+ .ncs_write_setup = 0,
+ .nwe_setup = 1,
+
+ .ncs_read_pulse = 8,
+ .nrd_pulse = 8,
+ .ncs_write_pulse = 5,
+ .nwe_pulse = 4,
+
+ .read_cycle = 8,
+ .write_cycle = 7,
+
+ .mode = AT91_SMC_READMODE | AT91_SMC_WRITEMODE
+ | AT91_SMC_EXNWMODE_DISABLE | AT91_SMC_BAT_SELECT
+ | AT91_SMC_DBW_16 | AT91_SMC_PS_8
+ | AT91_SMC_TDFMODE,
+ .tdf_cycles = 1,
+} };
+
+static void __init add_device_nand(void)
+{
+ /* configure chip-select 3 (NAND) */
+ sam9_smc_configure(3, &nand_smc_config);
+ at91_add_device_nand(&nand_data);
+}
+
+
+static void __init add_device_pcontrol(void)
+{
+ /* configure chip-select 4 (IO compatible to 8051 X4 ) */
+ sam9_smc_configure(4, &pcontrol_smc_config[0]);
+ /* configure chip-select 7 (FerroRAM 256KiBx16bit MR2A16A D4 ) */
+ sam9_smc_configure(7, &pcontrol_smc_config[1]);
+}
+
+
+/*
+ * MCI (SD/MMC)
+ * det_pin, wp_pin and vcc_pin are not connected
+ */
+#if defined(CONFIG_MMC_ATMELMCI) || defined(CONFIG_MMC_ATMELMCI_MODULE)
+static struct mci_platform_data __initdata mmc_data = {
+ .slot[0] = {
+ .bus_width = 4,
+ },
+};
+#else
+static struct at91_mmc_data __initdata mmc_data = {
+ .wire4 = 1,
+};
+#endif
+
+
+/*
+ * USB Host port
+ */
+static struct at91_usbh_data __initdata usbh_data = {
+ .ports = 2,
+};
+
+
+/*
+ * USB Device port
+ */
+static struct at91_udc_data __initdata pcontrol_g20_udc_data = {
+ .vbus_pin = AT91_PIN_PA22, /* Detect +5V bus voltage */
+ .pullup_pin = AT91_PIN_PA4, /* K-state, active low */
+};
+
+
+/*
+ * MACB Ethernet device
+ */
+static struct at91_eth_data __initdata macb_data = {
+ .phy_irq_pin = AT91_PIN_PA28,
+ .is_rmii = 1,
+};
+
+
+/*
+ * I2C devices: eeprom and phy/switch
+ */
+static struct i2c_board_info __initdata pcontrol_g20_i2c_devices[] = {
+{ /* D7 address width=2, 8KiB */
+ I2C_BOARD_INFO("24c64", 0x50)
+}, { /* D8 address width=1, 1 byte has 32 bits! */
+ I2C_BOARD_INFO("lan9303", 0x0a)
+}, };
+
+
+/*
+ * LEDs
+ */
+static struct gpio_led pcontrol_g20_leds[] = {
+ {
+ .name = "LED1", /* red H5 */
+ .gpio = AT91_PIN_PB18,
+ .active_low = 1,
+ .default_trigger = "none", /* supervisor */
+ }, {
+ .name = "LED2", /* yellow H7 */
+ .gpio = AT91_PIN_PB19,
+ .active_low = 1,
+ .default_trigger = "mmc0", /* SD-card activity */
+ }, {
+ .name = "LED3", /* green H2 */
+ .gpio = AT91_PIN_PB20,
+ .active_low = 1,
+ .default_trigger = "heartbeat", /* blinky */
+ }, {
+ .name = "LED4", /* red H3 */
+ .gpio = AT91_PIN_PC6,
+ .active_low = 1,
+ .default_trigger = "none", /* connection lost */
+ }, {
+ .name = "LED5", /* yellow H6 */
+ .gpio = AT91_PIN_PC7,
+ .active_low = 1,
+ .default_trigger = "none", /* unsent data */
+ }, {
+ .name = "LED6", /* green H1 */
+ .gpio = AT91_PIN_PC9,
+ .active_low = 1,
+ .default_trigger = "none", /* snafu */
+ }
+};
+
+
+/*
+ * SPI devices
+ */
+static struct spi_board_info pcontrol_g20_spi_devices[] = {
+ {
+ .modalias = "spidev", /* HMI port X4 */
+ .chip_select = 1,
+ .max_speed_hz = 50 * 1000 * 1000,
+ .bus_num = 0,
+ }, {
+ .modalias = "spidev", /* piggyback A2 */
+ .chip_select = 0,
+ .max_speed_hz = 50 * 1000 * 1000,
+ .bus_num = 1,
+ },
+};
+
+
+/*
+ * Dallas 1-Wire DS2431
+ */
+static struct w1_gpio_platform_data w1_gpio_pdata = {
+ .pin = AT91_PIN_PA29,
+ .is_open_drain = 1,
+};
+
+static struct platform_device w1_device = {
+ .name = "w1-gpio",
+ .id = -1,
+ .dev.platform_data = &w1_gpio_pdata,
+};
+
+static void add_wire1(void)
+{
+ at91_set_GPIO_periph(w1_gpio_pdata.pin, 1);
+ at91_set_multi_drive(w1_gpio_pdata.pin, 1);
+ platform_device_register(&w1_device);
+}
+
+
+static void __init pcontrol_g20_board_init(void)
+{
+ at91_add_device_serial();
+ add_device_nand();
+#if defined(CONFIG_MMC_ATMELMCI) || defined(CONFIG_MMC_ATMELMCI_MODULE)
+ at91_add_device_mci(0, &mmc_data);
+#else
+ at91_add_device_mmc(0, &mmc_data);
+#endif
+ at91_add_device_usbh(&usbh_data);
+ at91_add_device_eth(&macb_data);
+ at91_add_device_i2c(pcontrol_g20_i2c_devices,
+ ARRAY_SIZE(pcontrol_g20_i2c_devices));
+ add_wire1();
+ add_device_pcontrol();
+ at91_add_device_spi(pcontrol_g20_spi_devices,
+ ARRAY_SIZE(pcontrol_g20_spi_devices));
+ at91_add_device_udc(&pcontrol_g20_udc_data);
+ at91_gpio_leds(pcontrol_g20_leds,
+ ARRAY_SIZE(pcontrol_g20_leds));
+ /* piggyback A2 */
+ at91_set_gpio_output(AT91_PIN_PB31, 1);
+}
+
+
+MACHINE_START(PCONTROL_G20, "PControl G20")
+ /* Maintainer: pgsellmann@portner-elektronik.at */
+ .boot_params = AT91_SDRAM_BASE + 0x100,
+ .timer = &at91sam926x_timer,
+ .map_io = pcontrol_g20_map_io,
+ .init_irq = init_irq,
+ .init_machine = pcontrol_g20_board_init,
+MACHINE_END
diff --git a/arch/arm/mach-at91/board-picotux200.c b/arch/arm/mach-at91/board-picotux200.c
index 9d833bbc592d..55dad3a46547 100644
--- a/arch/arm/mach-at91/board-picotux200.c
+++ b/arch/arm/mach-at91/board-picotux200.c
@@ -43,24 +43,21 @@
#include "generic.h"
-/*
- * Serial port configuration.
- * 0 .. 3 = USART0 .. USART3
- * 4 = DBGU
- */
-static struct at91_uart_config __initdata picotux200_uart_config = {
- .console_tty = 0, /* ttyS0 */
- .nr_tty = 2,
- .tty_map = { 4, 1, -1, -1, -1 } /* ttyS0, ..., ttyS4 */
-};
-
static void __init picotux200_map_io(void)
{
/* Initialize processor: 18.432 MHz crystal */
at91rm9200_initialize(18432000, AT91RM9200_BGA);
- /* Setup the serial ports and console */
- at91_init_serial(&picotux200_uart_config);
+ /* DBGU on ttyS0. (Rx & Tx only) */
+ at91_register_uart(0, 0, 0);
+
+ /* USART1 on ttyS1. (Rx, Tx, CTS, RTS, DTR, DSR, DCD, RI) */
+ at91_register_uart(AT91RM9200_ID_US1, 1, ATMEL_UART_CTS | ATMEL_UART_RTS
+ | ATMEL_UART_DTR | ATMEL_UART_DSR | ATMEL_UART_DCD
+ | ATMEL_UART_RI);
+
+ /* set serial console to ttyS0 (ie, DBGU) */
+ at91_set_serial_console(0);
}
static void __init picotux200_init_irq(void)
@@ -77,11 +74,6 @@ static struct at91_usbh_data __initdata picotux200_usbh_data = {
.ports = 1,
};
-// static struct at91_udc_data __initdata picotux200_udc_data = {
-// .vbus_pin = AT91_PIN_PD4,
-// .pullup_pin = AT91_PIN_PD5,
-// };
-
static struct at91_mmc_data __initdata picotux200_mmc_data = {
.det_pin = AT91_PIN_PB27,
.slot_b = 0,
@@ -89,21 +81,6 @@ static struct at91_mmc_data __initdata picotux200_mmc_data = {
.wp_pin = AT91_PIN_PA17,
};
-// static struct spi_board_info picotux200_spi_devices[] = {
-// { /* DataFlash chip */
-// .modalias = "mtd_dataflash",
-// .chip_select = 0,
-// .max_speed_hz = 15 * 1000 * 1000,
-// },
-// #ifdef CONFIG_MTD_AT91_DATAFLASH_CARD
-// { /* DataFlash card */
-// .modalias = "mtd_dataflash",
-// .chip_select = 3,
-// .max_speed_hz = 15 * 1000 * 1000,
-// },
-// #endif
-// };
-
#define PICOTUX200_FLASH_BASE AT91_CHIPSELECT_0
#define PICOTUX200_FLASH_SIZE SZ_4M
@@ -135,21 +112,11 @@ static void __init picotux200_board_init(void)
at91_add_device_eth(&picotux200_eth_data);
/* USB Host */
at91_add_device_usbh(&picotux200_usbh_data);
- /* USB Device */
- // at91_add_device_udc(&picotux200_udc_data);
- // at91_set_multi_drive(picotux200_udc_data.pullup_pin, 1); /* pullup_pin is connected to reset */
/* I2C */
at91_add_device_i2c(NULL, 0);
- /* SPI */
- // at91_add_device_spi(picotux200_spi_devices, ARRAY_SIZE(picotux200_spi_devices));
-#ifdef CONFIG_MTD_AT91_DATAFLASH_CARD
- /* DataFlash card */
- at91_set_gpio_output(AT91_PIN_PB22, 0);
-#else
/* MMC */
at91_set_gpio_output(AT91_PIN_PB22, 1); /* this MMC card slot can optionally use SPI signaling (CS3). */
at91_add_device_mmc(0, &picotux200_mmc_data);
-#endif
/* NOR Flash */
platform_device_register(&picotux200_flash);
}
diff --git a/arch/arm/mach-at91/board-dk.c b/arch/arm/mach-at91/board-rm9200dk.c
index e14f0e165680..4c1047c8200d 100644
--- a/arch/arm/mach-at91/board-dk.c
+++ b/arch/arm/mach-at91/board-rm9200dk.c
@@ -1,5 +1,5 @@
/*
- * linux/arch/arm/mach-at91/board-dk.c
+ * linux/arch/arm/mach-at91/board-rm9200dk.c
*
* Copyright (C) 2005 SAN People
*
@@ -91,10 +91,12 @@ static struct at91_cf_data __initdata dk_cf_data = {
// .vcc_pin = ... always powered
};
+#ifndef CONFIG_MTD_AT91_DATAFLASH_CARD
static struct at91_mmc_data __initdata dk_mmc_data = {
.slot_b = 0,
.wire4 = 1,
};
+#endif
static struct spi_board_info dk_spi_devices[] = {
{ /* DataFlash chip */
diff --git a/arch/arm/mach-at91/board-ek.c b/arch/arm/mach-at91/board-rm9200ek.c
index 56e92c4bbc2a..9df1be8818c0 100644
--- a/arch/arm/mach-at91/board-ek.c
+++ b/arch/arm/mach-at91/board-rm9200ek.c
@@ -1,5 +1,5 @@
/*
- * linux/arch/arm/mach-at91/board-ek.c
+ * linux/arch/arm/mach-at91/board-rm9200ek.c
*
* Copyright (C) 2005 SAN People
*
@@ -84,12 +84,14 @@ static struct at91_udc_data __initdata ek_udc_data = {
.pullup_pin = AT91_PIN_PD5,
};
+#ifndef CONFIG_MTD_AT91_DATAFLASH_CARD
static struct at91_mmc_data __initdata ek_mmc_data = {
.det_pin = AT91_PIN_PB27,
.slot_b = 0,
.wire4 = 1,
.wp_pin = AT91_PIN_PA17,
};
+#endif
static struct spi_board_info ek_spi_devices[] = {
{ /* DataFlash chip */
diff --git a/arch/arm/mach-at91/board-sam9m10g45ek.c b/arch/arm/mach-at91/board-sam9m10g45ek.c
index 7913984f6de9..86ff4b52db32 100644
--- a/arch/arm/mach-at91/board-sam9m10g45ek.c
+++ b/arch/arm/mach-at91/board-sam9m10g45ek.c
@@ -24,7 +24,9 @@
#include <linux/input.h>
#include <linux/leds.h>
#include <linux/clk.h>
+#include <linux/atmel-mci.h>
+#include <mach/hardware.h>
#include <video/atmel_lcdc.h>
#include <asm/setup.h>
@@ -98,6 +100,25 @@ static struct spi_board_info ek_spi_devices[] = {
/*
+ * MCI (SD/MMC)
+ */
+static struct mci_platform_data __initdata mci0_data = {
+ .slot[0] = {
+ .bus_width = 4,
+ .detect_pin = AT91_PIN_PD10,
+ },
+};
+
+static struct mci_platform_data __initdata mci1_data = {
+ .slot[0] = {
+ .bus_width = 4,
+ .detect_pin = AT91_PIN_PD11,
+ .wp_pin = AT91_PIN_PD29,
+ },
+};
+
+
+/*
* MACB Ethernet device
*/
static struct at91_eth_data __initdata ek_macb_data = {
@@ -380,6 +401,9 @@ static void __init ek_board_init(void)
at91_add_device_usba(&ek_usba_udc_data);
/* SPI */
at91_add_device_spi(ek_spi_devices, ARRAY_SIZE(ek_spi_devices));
+ /* MMC */
+ at91_add_device_mci(0, &mci0_data);
+ at91_add_device_mci(1, &mci1_data);
/* Ethernet */
at91_add_device_eth(&ek_macb_data);
/* NAND */
diff --git a/arch/arm/mach-at91/board-yl-9200.c b/arch/arm/mach-at91/board-yl-9200.c
index 89df00a9d2f7..e0f0080eb639 100644
--- a/arch/arm/mach-at91/board-yl-9200.c
+++ b/arch/arm/mach-at91/board-yl-9200.c
@@ -387,7 +387,7 @@ static struct spi_board_info yl9200_spi_devices[] = {
* EPSON S1D13806 FB (discontinued chip)
* EPSON S1D13506 FB
*/
-#if defined(CONFIG_FB_S1D135XX) || defined(CONFIG_FB_S1D13XXX_MODULE)
+#if defined(CONFIG_FB_S1D13XXX) || defined(CONFIG_FB_S1D13XXX_MODULE)
#include <video/s1d13xxxfb.h>
diff --git a/arch/arm/mach-at91/generic.h b/arch/arm/mach-at91/generic.h
index 65c3dc5ba0d0..0c66deb2db39 100644
--- a/arch/arm/mach-at91/generic.h
+++ b/arch/arm/mach-at91/generic.h
@@ -46,6 +46,9 @@ extern void __init at91_clock_associate(const char *id, struct device *dev, cons
extern void at91_irq_suspend(void);
extern void at91_irq_resume(void);
+/* reset */
+extern void at91sam9_alt_reset(void);
+
/* GPIO */
#define AT91RM9200_PQFP 3 /* AT91RM9200 PQFP package has 3 banks */
#define AT91RM9200_BGA 4 /* AT91RM9200 BGA package has 4 banks */
diff --git a/arch/arm/mach-at91/include/mach/board.h b/arch/arm/mach-at91/include/mach/board.h
index 58528aa9c8a8..2b499eb343a1 100644
--- a/arch/arm/mach-at91/include/mach/board.h
+++ b/arch/arm/mach-at91/include/mach/board.h
@@ -137,13 +137,7 @@ extern void __init at91_add_device_spi(struct spi_board_info *devices, int nr_de
extern void __init at91_register_uart(unsigned id, unsigned portnr, unsigned pins);
extern void __init at91_set_serial_console(unsigned portnr);
-struct at91_uart_config {
- unsigned short console_tty; /* tty number of serial console */
- unsigned short nr_tty; /* number of serial tty's */
- short tty_map[]; /* map UART to tty number */
-};
extern struct platform_device *atmel_default_console_device;
-extern void __init __deprecated at91_init_serial(struct at91_uart_config *config);
struct atmel_uart_data {
short use_dma_tx; /* use transmit DMA? */
diff --git a/arch/arm/mach-at91/pm.c b/arch/arm/mach-at91/pm.c
index 615668986480..dafbacc25eb1 100644
--- a/arch/arm/mach-at91/pm.c
+++ b/arch/arm/mach-at91/pm.c
@@ -258,16 +258,23 @@ static int at91_pm_enter(suspend_state_t state)
* NOTE: the Wait-for-Interrupt instruction needs to be
* in icache so no SDRAM accesses are needed until the
* wakeup IRQ occurs and self-refresh is terminated.
+ * For ARM 926 based chips, this requirement is weaker
+ * as at91sam9 can access a RAM in self-refresh mode.
*/
- asm("b 1f; .align 5; 1:");
- asm("mcr p15, 0, r0, c7, c10, 4"); /* drain write buffer */
+ asm volatile ( "mov r0, #0\n\t"
+ "b 1f\n\t"
+ ".align 5\n\t"
+ "1: mcr p15, 0, r0, c7, c10, 4\n\t"
+ : /* no output */
+ : /* no input */
+ : "r0");
saved_lpr = sdram_selfrefresh_enable();
- asm("mcr p15, 0, r0, c7, c0, 4"); /* wait for interrupt */
+ wait_for_interrupt_enable();
sdram_selfrefresh_disable(saved_lpr);
break;
case PM_SUSPEND_ON:
- asm("mcr p15, 0, r0, c7, c0, 4"); /* wait for interrupt */
+ cpu_do_idle();
break;
default:
diff --git a/arch/arm/mach-at91/pm.h b/arch/arm/mach-at91/pm.h
index 8c87d0c1b8f8..ce9a20699111 100644
--- a/arch/arm/mach-at91/pm.h
+++ b/arch/arm/mach-at91/pm.h
@@ -21,6 +21,8 @@ static inline u32 sdram_selfrefresh_enable(void)
}
#define sdram_selfrefresh_disable(saved_lpr) at91_sys_write(AT91_SDRAMC_LPR, saved_lpr)
+#define wait_for_interrupt_enable() asm volatile ("mcr p15, 0, %0, c7, c0, 4" \
+ : : "r" (0))
#elif defined(CONFIG_ARCH_AT91CAP9)
#include <mach/at91cap9_ddrsdr.h>
@@ -38,6 +40,7 @@ static inline u32 sdram_selfrefresh_enable(void)
}
#define sdram_selfrefresh_disable(saved_lpr) at91_ramc_write(0, AT91_DDRSDRC_LPR, saved_lpr)
+#define wait_for_interrupt_enable() cpu_do_idle()
#elif defined(CONFIG_ARCH_AT91SAM9G45)
#include <mach/at91sam9_ddrsdr.h>
@@ -74,6 +77,7 @@ static inline u32 sdram_selfrefresh_enable(void)
at91_ramc_write(0, AT91_DDRSDRC_LPR, saved_lpr0); \
at91_ramc_write(1, AT91_DDRSDRC_LPR, saved_lpr1); \
} while (0)
+#define wait_for_interrupt_enable() cpu_do_idle()
#else
#include <mach/at91sam9_sdramc.h>
@@ -98,5 +102,6 @@ static inline u32 sdram_selfrefresh_enable(void)
}
#define sdram_selfrefresh_disable(saved_lpr) at91_ramc_write(0, AT91_SDRAMC_LPR, saved_lpr)
+#define wait_for_interrupt_enable() cpu_do_idle()
#endif
diff --git a/arch/arm/mach-at91/pm_slowclock.S b/arch/arm/mach-at91/pm_slowclock.S
index b6b00a1f6125..f7922a436172 100644
--- a/arch/arm/mach-at91/pm_slowclock.S
+++ b/arch/arm/mach-at91/pm_slowclock.S
@@ -124,6 +124,7 @@ ENTRY(at91_slow_clock)
ldr r5, .at91_va_base_ramc1
/* Drain write buffer */
+ mov r0, #0
mcr p15, 0, r0, c7, c10, 4
#ifdef CONFIG_ARCH_AT91RM9200
diff --git a/arch/arm/mach-bcmring/include/mach/vmalloc.h b/arch/arm/mach-bcmring/include/mach/vmalloc.h
index 3db3a09fd398..7397bd7817d9 100644
--- a/arch/arm/mach-bcmring/include/mach/vmalloc.h
+++ b/arch/arm/mach-bcmring/include/mach/vmalloc.h
@@ -22,4 +22,4 @@
* 0xe0000000 to 0xefffffff. This gives us 256 MB of vm space and handles
* larger physical memory designs better.
*/
-#define VMALLOC_END 0xf0000000
+#define VMALLOC_END 0xf0000000UL
diff --git a/arch/arm/mach-clps711x/include/mach/vmalloc.h b/arch/arm/mach-clps711x/include/mach/vmalloc.h
index 30b3a287ed88..467b96137e47 100644
--- a/arch/arm/mach-clps711x/include/mach/vmalloc.h
+++ b/arch/arm/mach-clps711x/include/mach/vmalloc.h
@@ -17,4 +17,4 @@
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
-#define VMALLOC_END 0xd0000000
+#define VMALLOC_END 0xd0000000UL
diff --git a/arch/arm/mach-cns3xxx/pcie.c b/arch/arm/mach-cns3xxx/pcie.c
index 38088c36936c..78defd71a829 100644
--- a/arch/arm/mach-cns3xxx/pcie.c
+++ b/arch/arm/mach-cns3xxx/pcie.c
@@ -369,7 +369,7 @@ static int __init cns3xxx_pcie_init(void)
{
int i;
- hook_fault_code(16 + 6, cns3xxx_pcie_abort_handler, SIGBUS,
+ hook_fault_code(16 + 6, cns3xxx_pcie_abort_handler, SIGBUS, 0,
"imprecise external abort");
for (i = 0; i < ARRAY_SIZE(cns3xxx_pcie); i++) {
diff --git a/arch/arm/mach-davinci/Kconfig b/arch/arm/mach-davinci/Kconfig
index 71f90f864748..b77b860b36d7 100644
--- a/arch/arm/mach-davinci/Kconfig
+++ b/arch/arm/mach-davinci/Kconfig
@@ -20,23 +20,23 @@ config ARCH_DAVINCI_DM644x
select ARCH_DAVINCI_DMx
config ARCH_DAVINCI_DM355
- bool "DaVinci 355 based system"
+ bool "DaVinci 355 based system"
select AINTC
select ARCH_DAVINCI_DMx
config ARCH_DAVINCI_DM646x
- bool "DaVinci 646x based system"
+ bool "DaVinci 646x based system"
select AINTC
select ARCH_DAVINCI_DMx
config ARCH_DAVINCI_DA830
- bool "DA830/OMAP-L137 based system"
+ bool "DA830/OMAP-L137/AM17x based system"
select CP_INTC
select ARCH_DAVINCI_DA8XX
select CPU_DCACHE_WRITETHROUGH # needed on silicon revs 1.0, 1.1
config ARCH_DAVINCI_DA850
- bool "DA850/OMAP-L138 based system"
+ bool "DA850/OMAP-L138/AM18x based system"
select CP_INTC
select ARCH_DAVINCI_DA8XX
select ARCH_HAS_CPUFREQ
@@ -115,21 +115,21 @@ config MACH_DAVINCI_DM365_EVM
for development is a DM365 EVM
config MACH_DAVINCI_DA830_EVM
- bool "TI DA830/OMAP-L137 Reference Platform"
+ bool "TI DA830/OMAP-L137/AM17x Reference Platform"
default ARCH_DAVINCI_DA830
depends on ARCH_DAVINCI_DA830
select GPIO_PCF857X
help
- Say Y here to select the TI DA830/OMAP-L137 Evaluation Module.
+ Say Y here to select the TI DA830/OMAP-L137/AM17x Evaluation Module.
choice
- prompt "Select DA830/OMAP-L137 UI board peripheral"
+ prompt "Select DA830/OMAP-L137/AM17x UI board peripheral"
depends on MACH_DAVINCI_DA830_EVM
help
- The presence of UI card on the DA830/OMAP-L137 EVM is detected
- automatically based on successful probe of the I2C based GPIO
- expander on that board. This option selected in this menu has
- an effect only in case of a successful UI card detection.
+ The presence of UI card on the DA830/OMAP-L137/AM17x EVM is
+ detected automatically based on successful probe of the I2C
+ based GPIO expander on that board. This option selected in this
+ menu has an effect only in case of a successful UI card detection.
config DA830_UI_LCD
bool "LCD"
@@ -140,23 +140,23 @@ config DA830_UI_LCD
config DA830_UI_NAND
bool "NAND flash"
help
- Say Y here to use the NAND flash. Do not forget to setup
+ Say Y here to use the NAND flash. Do not forget to setup
the switch correctly.
endchoice
config MACH_DAVINCI_DA850_EVM
- bool "TI DA850/OMAP-L138 Reference Platform"
+ bool "TI DA850/OMAP-L138/AM18x Reference Platform"
default ARCH_DAVINCI_DA850
depends on ARCH_DAVINCI_DA850
select GPIO_PCA953X
help
- Say Y here to select the TI DA850/OMAP-L138 Evaluation Module.
+ Say Y here to select the TI DA850/OMAP-L138/AM18x Evaluation Module.
choice
prompt "Select peripherals connected to expander on UI board"
depends on MACH_DAVINCI_DA850_EVM
help
- The presence of User Interface (UI) card on the DA850/OMAP-L138
+ The presence of User Interface (UI) card on the DA850/OMAP-L138/AM18x
EVM is detected automatically based on successful probe of the I2C
based GPIO expander on that card. This option selected in this
menu has an effect only in case of a successful UI card detection.
@@ -165,13 +165,13 @@ config DA850_UI_NONE
bool "No peripheral is enabled"
help
Say Y if you do not want to enable any of the peripherals connected
- to TCA6416 expander on DA850/OMAP-L138 EVM UI card
+ to TCA6416 expander on DA850/OMAP-L138/AM18x EVM UI card
config DA850_UI_RMII
bool "RMII Ethernet PHY"
help
- Say Y if you want to use the RMII PHY on the DA850/OMAP-L138 EVM.
- This PHY is found on the UI daughter card that is supplied with
+ Say Y if you want to use the RMII PHY on the DA850/OMAP-L138/AM18x
+ EVM. This PHY is found on the UI daughter card that is supplied with
the EVM.
NOTE: Please take care while choosing this option, MII PHY will
not be functional if RMII mode is selected.
@@ -185,6 +185,22 @@ config MACH_TNETV107X
help
Say Y here to select the TI TNETV107X Evaluation Module.
+config MACH_MITYOMAPL138
+ bool "Critical Link MityDSP-L138/MityARM-1808 SoM"
+ depends on ARCH_DAVINCI_DA850
+ help
+ Say Y here to select the Critical Link MityDSP-L138/MityARM-1808
+ System on Module. Information on this SoM may be found at
+ http://www.mitydsp.com
+
+config MACH_OMAPL138_HAWKBOARD
+ bool "TI AM1808 / OMAPL-138 Hawkboard platform"
+ depends on ARCH_DAVINCI_DA850
+ help
+ Say Y here to select the TI AM1808 / OMAPL-138 Hawkboard platform .
+ Information of this board may be found at
+ http://www.hawkboard.org/
+
config DAVINCI_MUX
bool "DAVINCI multiplexing support"
depends on ARCH_DAVINCI
@@ -195,20 +211,20 @@ config DAVINCI_MUX
say Y.
config DAVINCI_MUX_DEBUG
- bool "Multiplexing debug output"
- depends on DAVINCI_MUX
- help
- Makes the multiplexing functions print out a lot of debug info.
- This is useful if you want to find out the correct values of the
- multiplexing registers.
+ bool "Multiplexing debug output"
+ depends on DAVINCI_MUX
+ help
+ Makes the multiplexing functions print out a lot of debug info.
+ This is useful if you want to find out the correct values of the
+ multiplexing registers.
config DAVINCI_MUX_WARNINGS
- bool "Warn about pins the bootloader didn't set up"
- depends on DAVINCI_MUX
- help
- Choose Y here to warn whenever driver initialization logic needs
- to change the pin multiplexing setup. When there are no warnings
- printed, it's safe to deselect DAVINCI_MUX for your product.
+ bool "Warn about pins the bootloader didn't set up"
+ depends on DAVINCI_MUX
+ help
+ Choose Y here to warn whenever driver initialization logic needs
+ to change the pin multiplexing setup. When there are no warnings
+ printed, it's safe to deselect DAVINCI_MUX for your product.
config DAVINCI_RESET_CLOCKS
bool "Reset unused clocks during boot"
diff --git a/arch/arm/mach-davinci/Makefile b/arch/arm/mach-davinci/Makefile
index eab4c0fd667a..0b87a1ca2bb3 100644
--- a/arch/arm/mach-davinci/Makefile
+++ b/arch/arm/mach-davinci/Makefile
@@ -5,7 +5,7 @@
# Common objects
obj-y := time.o clock.o serial.o io.o psc.o \
- gpio.o dma.o usb.o common.o sram.o
+ gpio.o dma.o usb.o common.o sram.o aemif.o
obj-$(CONFIG_DAVINCI_MUX) += mux.o
@@ -33,6 +33,8 @@ obj-$(CONFIG_MACH_DAVINCI_DM365_EVM) += board-dm365-evm.o
obj-$(CONFIG_MACH_DAVINCI_DA830_EVM) += board-da830-evm.o
obj-$(CONFIG_MACH_DAVINCI_DA850_EVM) += board-da850-evm.o
obj-$(CONFIG_MACH_TNETV107X) += board-tnetv107x-evm.o
+obj-$(CONFIG_MACH_MITYOMAPL138) += board-mityomapl138.o
+obj-$(CONFIG_MACH_OMAPL138_HAWKBOARD) += board-omapl138-hawk.o
# Power Management
obj-$(CONFIG_CPU_FREQ) += cpufreq.o
diff --git a/arch/arm/mach-davinci/aemif.c b/arch/arm/mach-davinci/aemif.c
new file mode 100644
index 000000000000..9c3f500fc12f
--- /dev/null
+++ b/arch/arm/mach-davinci/aemif.c
@@ -0,0 +1,133 @@
+/*
+ * AEMIF support for DaVinci SoCs
+ *
+ * Copyright (C) 2010 Texas Instruments Incorporated. http://www.ti.com/
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/kernel.h>
+#include <linux/io.h>
+#include <linux/err.h>
+#include <linux/clk.h>
+#include <linux/module.h>
+#include <linux/time.h>
+
+#include <mach/aemif.h>
+
+/* Timing value configuration */
+
+#define TA(x) ((x) << 2)
+#define RHOLD(x) ((x) << 4)
+#define RSTROBE(x) ((x) << 7)
+#define RSETUP(x) ((x) << 13)
+#define WHOLD(x) ((x) << 17)
+#define WSTROBE(x) ((x) << 20)
+#define WSETUP(x) ((x) << 26)
+
+#define TA_MAX 0x3
+#define RHOLD_MAX 0x7
+#define RSTROBE_MAX 0x3f
+#define RSETUP_MAX 0xf
+#define WHOLD_MAX 0x7
+#define WSTROBE_MAX 0x3f
+#define WSETUP_MAX 0xf
+
+#define TIMING_MASK (TA(TA_MAX) | \
+ RHOLD(RHOLD_MAX) | \
+ RSTROBE(RSTROBE_MAX) | \
+ RSETUP(RSETUP_MAX) | \
+ WHOLD(WHOLD_MAX) | \
+ WSTROBE(WSTROBE_MAX) | \
+ WSETUP(WSETUP_MAX))
+
+/*
+ * aemif_calc_rate - calculate timing data.
+ * @wanted: The cycle time needed in nanoseconds.
+ * @clk: The input clock rate in kHz.
+ * @max: The maximum divider value that can be programmed.
+ *
+ * On success, returns the calculated timing value minus 1 for easy
+ * programming into AEMIF timing registers, else negative errno.
+ */
+static int aemif_calc_rate(int wanted, unsigned long clk, int max)
+{
+ int result;
+
+ result = DIV_ROUND_UP((wanted * clk), NSEC_PER_MSEC) - 1;
+
+ pr_debug("%s: result %d from %ld, %d\n", __func__, result, clk, wanted);
+
+ /* It is generally OK to have a more relaxed timing than requested... */
+ if (result < 0)
+ result = 0;
+
+ /* ... But configuring tighter timings is not an option. */
+ else if (result > max)
+ result = -EINVAL;
+
+ return result;
+}
+
+/**
+ * davinci_aemif_setup_timing - setup timing values for a given AEMIF interface
+ * @t: timing values to be progammed
+ * @base: The virtual base address of the AEMIF interface
+ * @cs: chip-select to program the timing values for
+ *
+ * This function programs the given timing values (in real clock) into the
+ * AEMIF registers taking the AEMIF clock into account.
+ *
+ * This function does not use any locking while programming the AEMIF
+ * because it is expected that there is only one user of a given
+ * chip-select.
+ *
+ * Returns 0 on success, else negative errno.
+ */
+int davinci_aemif_setup_timing(struct davinci_aemif_timing *t,
+ void __iomem *base, unsigned cs)
+{
+ unsigned set, val;
+ unsigned ta, rhold, rstrobe, rsetup, whold, wstrobe, wsetup;
+ unsigned offset = A1CR_OFFSET + cs * 4;
+ struct clk *aemif_clk;
+ unsigned long clkrate;
+
+ if (!t)
+ return 0; /* Nothing to do */
+
+ aemif_clk = clk_get(NULL, "aemif");
+ if (IS_ERR(aemif_clk))
+ return PTR_ERR(aemif_clk);
+
+ clkrate = clk_get_rate(aemif_clk);
+
+ clkrate /= 1000; /* turn clock into kHz for ease of use */
+
+ ta = aemif_calc_rate(t->ta, clkrate, TA_MAX);
+ rhold = aemif_calc_rate(t->rhold, clkrate, RHOLD_MAX);
+ rstrobe = aemif_calc_rate(t->rstrobe, clkrate, RSTROBE_MAX);
+ rsetup = aemif_calc_rate(t->rsetup, clkrate, RSETUP_MAX);
+ whold = aemif_calc_rate(t->whold, clkrate, WHOLD_MAX);
+ wstrobe = aemif_calc_rate(t->wstrobe, clkrate, WSTROBE_MAX);
+ wsetup = aemif_calc_rate(t->wsetup, clkrate, WSETUP_MAX);
+
+ if (ta < 0 || rhold < 0 || rstrobe < 0 || rsetup < 0 ||
+ whold < 0 || wstrobe < 0 || wsetup < 0) {
+ pr_err("%s: cannot get suitable timings\n", __func__);
+ return -EINVAL;
+ }
+
+ set = TA(ta) | RHOLD(rhold) | RSTROBE(rstrobe) | RSETUP(rsetup) |
+ WHOLD(whold) | WSTROBE(wstrobe) | WSETUP(wsetup);
+
+ val = __raw_readl(base + offset);
+ val &= ~TIMING_MASK;
+ val |= set;
+ __raw_writel(val, base + offset);
+
+ return 0;
+}
+EXPORT_SYMBOL(davinci_aemif_setup_timing);
diff --git a/arch/arm/mach-davinci/board-da830-evm.c b/arch/arm/mach-davinci/board-da830-evm.c
index 7f3cdbfc0fbb..b52a3a1abd94 100644
--- a/arch/arm/mach-davinci/board-da830-evm.c
+++ b/arch/arm/mach-davinci/board-da830-evm.c
@@ -29,10 +29,9 @@
#include <mach/nand.h>
#include <mach/da8xx.h>
#include <mach/usb.h>
+#include <mach/aemif.h>
-#define DA830_EVM_PHY_MASK 0x0
-#define DA830_EVM_MDIO_FREQUENCY 2200000 /* PHY bus frequency */
-
+#define DA830_EVM_PHY_ID ""
/*
* USB1 VBUS is controlled by GPIO1[15], over-current is reported on GPIO2[4].
*/
@@ -360,6 +359,16 @@ static struct nand_bbt_descr da830_evm_nand_bbt_mirror_descr = {
.pattern = da830_evm_nand_mirror_pattern
};
+static struct davinci_aemif_timing da830_evm_nandflash_timing = {
+ .wsetup = 24,
+ .wstrobe = 21,
+ .whold = 14,
+ .rsetup = 19,
+ .rstrobe = 50,
+ .rhold = 0,
+ .ta = 20,
+};
+
static struct davinci_nand_pdata da830_evm_nand_pdata = {
.parts = da830_evm_nand_partitions,
.nr_parts = ARRAY_SIZE(da830_evm_nand_partitions),
@@ -368,6 +377,7 @@ static struct davinci_nand_pdata da830_evm_nand_pdata = {
.options = NAND_USE_FLASH_BBT,
.bbt_td = &da830_evm_nand_bbt_main_descr,
.bbt_md = &da830_evm_nand_bbt_mirror_descr,
+ .timing = &da830_evm_nandflash_timing,
};
static struct resource da830_evm_nand_resources[] = {
@@ -546,9 +556,8 @@ static __init void da830_evm_init(void)
da830_evm_usb_init();
- soc_info->emac_pdata->phy_mask = DA830_EVM_PHY_MASK;
- soc_info->emac_pdata->mdio_max_freq = DA830_EVM_MDIO_FREQUENCY;
soc_info->emac_pdata->rmii_en = 1;
+ soc_info->emac_pdata->phy_id = DA830_EVM_PHY_ID;
ret = davinci_cfg_reg_list(da830_cpgmac_pins);
if (ret)
@@ -586,6 +595,9 @@ static __init void da830_evm_init(void)
#ifdef CONFIG_SERIAL_8250_CONSOLE
static int __init da830_evm_console_init(void)
{
+ if (!machine_is_davinci_da830_evm())
+ return 0;
+
return add_preferred_console("ttyS", 2, "115200");
}
console_initcall(da830_evm_console_init);
@@ -596,7 +608,7 @@ static void __init da830_evm_map_io(void)
da830_init();
}
-MACHINE_START(DAVINCI_DA830_EVM, "DaVinci DA830/OMAP-L137 EVM")
+MACHINE_START(DAVINCI_DA830_EVM, "DaVinci DA830/OMAP-L137/AM17x EVM")
.boot_params = (DA8XX_DDR_BASE + 0x100),
.map_io = da830_evm_map_io,
.init_irq = cp_intc_init,
diff --git a/arch/arm/mach-davinci/board-da850-evm.c b/arch/arm/mach-davinci/board-da850-evm.c
index b26f5cbfce3e..c6e11c682e4c 100644
--- a/arch/arm/mach-davinci/board-da850-evm.c
+++ b/arch/arm/mach-davinci/board-da850-evm.c
@@ -26,7 +26,6 @@
#include <linux/mtd/physmap.h>
#include <linux/regulator/machine.h>
#include <linux/regulator/tps6507x.h>
-#include <linux/mfd/tps6507x.h>
#include <linux/input/tps6507x-ts.h>
#include <asm/mach-types.h>
@@ -36,10 +35,9 @@
#include <mach/da8xx.h>
#include <mach/nand.h>
#include <mach/mux.h>
+#include <mach/aemif.h>
-#define DA850_EVM_PHY_MASK 0x1
-#define DA850_EVM_MDIO_FREQUENCY 2200000 /* PHY bus frequency */
-
+#define DA850_EVM_PHY_ID "0:00"
#define DA850_LCD_PWR_PIN GPIO_TO_PIN(2, 8)
#define DA850_LCD_BL_PIN GPIO_TO_PIN(2, 15)
@@ -110,7 +108,7 @@ static struct platform_device da850_pm_device = {
* to boot, using TI's tools to install the secondary boot loader
* (UBL) and U-Boot.
*/
-struct mtd_partition da850_evm_nandflash_partition[] = {
+static struct mtd_partition da850_evm_nandflash_partition[] = {
{
.name = "u-boot env",
.offset = 0,
@@ -143,12 +141,23 @@ struct mtd_partition da850_evm_nandflash_partition[] = {
},
};
+static struct davinci_aemif_timing da850_evm_nandflash_timing = {
+ .wsetup = 24,
+ .wstrobe = 21,
+ .whold = 14,
+ .rsetup = 19,
+ .rstrobe = 50,
+ .rhold = 0,
+ .ta = 20,
+};
+
static struct davinci_nand_pdata da850_evm_nandflash_data = {
.parts = da850_evm_nandflash_partition,
.nr_parts = ARRAY_SIZE(da850_evm_nandflash_partition),
.ecc_mode = NAND_ECC_HW,
.ecc_bits = 4,
.options = NAND_USE_FLASH_BBT,
+ .timing = &da850_evm_nandflash_timing,
};
static struct resource da850_evm_nandflash_resource[] = {
@@ -196,6 +205,30 @@ static void __init da850_evm_init_nor(void)
iounmap(aemif_addr);
}
+static const short da850_evm_nand_pins[] = {
+ DA850_EMA_D_0, DA850_EMA_D_1, DA850_EMA_D_2, DA850_EMA_D_3,
+ DA850_EMA_D_4, DA850_EMA_D_5, DA850_EMA_D_6, DA850_EMA_D_7,
+ DA850_EMA_A_1, DA850_EMA_A_2, DA850_NEMA_CS_3, DA850_NEMA_CS_4,
+ DA850_NEMA_WE, DA850_NEMA_OE,
+ -1
+};
+
+static const short da850_evm_nor_pins[] = {
+ DA850_EMA_BA_1, DA850_EMA_CLK, DA850_EMA_WAIT_1, DA850_NEMA_CS_2,
+ DA850_NEMA_WE, DA850_NEMA_OE, DA850_EMA_D_0, DA850_EMA_D_1,
+ DA850_EMA_D_2, DA850_EMA_D_3, DA850_EMA_D_4, DA850_EMA_D_5,
+ DA850_EMA_D_6, DA850_EMA_D_7, DA850_EMA_D_8, DA850_EMA_D_9,
+ DA850_EMA_D_10, DA850_EMA_D_11, DA850_EMA_D_12, DA850_EMA_D_13,
+ DA850_EMA_D_14, DA850_EMA_D_15, DA850_EMA_A_0, DA850_EMA_A_1,
+ DA850_EMA_A_2, DA850_EMA_A_3, DA850_EMA_A_4, DA850_EMA_A_5,
+ DA850_EMA_A_6, DA850_EMA_A_7, DA850_EMA_A_8, DA850_EMA_A_9,
+ DA850_EMA_A_10, DA850_EMA_A_11, DA850_EMA_A_12, DA850_EMA_A_13,
+ DA850_EMA_A_14, DA850_EMA_A_15, DA850_EMA_A_16, DA850_EMA_A_17,
+ DA850_EMA_A_18, DA850_EMA_A_19, DA850_EMA_A_20, DA850_EMA_A_21,
+ DA850_EMA_A_22, DA850_EMA_A_23,
+ -1
+};
+
static u32 ui_card_detected;
#if defined(CONFIG_MMC_DAVINCI) || \
@@ -205,17 +238,17 @@ static u32 ui_card_detected;
#define HAS_MMC 0
#endif
-static __init void da850_evm_setup_nor_nand(void)
+static inline void da850_evm_setup_nor_nand(void)
{
int ret = 0;
if (ui_card_detected & !HAS_MMC) {
- ret = davinci_cfg_reg_list(da850_nand_pins);
+ ret = davinci_cfg_reg_list(da850_evm_nand_pins);
if (ret)
pr_warning("da850_evm_init: nand mux setup failed: "
"%d\n", ret);
- ret = davinci_cfg_reg_list(da850_nor_pins);
+ ret = davinci_cfg_reg_list(da850_evm_nor_pins);
if (ret)
pr_warning("da850_evm_init: nor mux setup failed: %d\n",
ret);
@@ -406,7 +439,7 @@ static int da850_lcd_hw_init(void)
/* TPS65070 voltage regulator support */
/* 3.3V */
-struct regulator_consumer_supply tps65070_dcdc1_consumers[] = {
+static struct regulator_consumer_supply tps65070_dcdc1_consumers[] = {
{
.supply = "usb0_vdda33",
},
@@ -416,7 +449,7 @@ struct regulator_consumer_supply tps65070_dcdc1_consumers[] = {
};
/* 3.3V or 1.8V */
-struct regulator_consumer_supply tps65070_dcdc2_consumers[] = {
+static struct regulator_consumer_supply tps65070_dcdc2_consumers[] = {
{
.supply = "dvdd3318_a",
},
@@ -429,14 +462,14 @@ struct regulator_consumer_supply tps65070_dcdc2_consumers[] = {
};
/* 1.2V */
-struct regulator_consumer_supply tps65070_dcdc3_consumers[] = {
+static struct regulator_consumer_supply tps65070_dcdc3_consumers[] = {
{
.supply = "cvdd",
},
};
/* 1.8V LDO */
-struct regulator_consumer_supply tps65070_ldo1_consumers[] = {
+static struct regulator_consumer_supply tps65070_ldo1_consumers[] = {
{
.supply = "sata_vddr",
},
@@ -452,7 +485,7 @@ struct regulator_consumer_supply tps65070_ldo1_consumers[] = {
};
/* 1.2V LDO */
-struct regulator_consumer_supply tps65070_ldo2_consumers[] = {
+static struct regulator_consumer_supply tps65070_ldo2_consumers[] = {
{
.supply = "sata_vdd",
},
@@ -475,7 +508,7 @@ static struct tps6507x_reg_platform_data tps6507x_platform_data = {
.defdcdc_default = true,
};
-struct regulator_init_data tps65070_regulator_data[] = {
+static struct regulator_init_data tps65070_regulator_data[] = {
/* dcdc1 */
{
.constraints = {
@@ -576,6 +609,23 @@ static const short da850_evm_lcdc_pins[] = {
-1
};
+static const short da850_evm_mii_pins[] = {
+ DA850_MII_TXEN, DA850_MII_TXCLK, DA850_MII_COL, DA850_MII_TXD_3,
+ DA850_MII_TXD_2, DA850_MII_TXD_1, DA850_MII_TXD_0, DA850_MII_RXER,
+ DA850_MII_CRS, DA850_MII_RXCLK, DA850_MII_RXDV, DA850_MII_RXD_3,
+ DA850_MII_RXD_2, DA850_MII_RXD_1, DA850_MII_RXD_0, DA850_MDIO_CLK,
+ DA850_MDIO_D,
+ -1
+};
+
+static const short da850_evm_rmii_pins[] = {
+ DA850_RMII_TXD_0, DA850_RMII_TXD_1, DA850_RMII_TXEN,
+ DA850_RMII_CRS_DV, DA850_RMII_RXD_0, DA850_RMII_RXD_1,
+ DA850_RMII_RXER, DA850_RMII_MHZ_50_CLK, DA850_MDIO_CLK,
+ DA850_MDIO_D,
+ -1
+};
+
static int __init da850_evm_config_emac(void)
{
void __iomem *cfg_chip3_base;
@@ -593,12 +643,12 @@ static int __init da850_evm_config_emac(void)
if (rmii_en) {
val |= BIT(8);
- ret = davinci_cfg_reg_list(da850_rmii_pins);
+ ret = davinci_cfg_reg_list(da850_evm_rmii_pins);
pr_info("EMAC: RMII PHY configured, MII PHY will not be"
" functional\n");
} else {
val &= ~BIT(8);
- ret = davinci_cfg_reg_list(da850_cpgmac_pins);
+ ret = davinci_cfg_reg_list(da850_evm_mii_pins);
pr_info("EMAC: MII PHY configured, RMII PHY will not be"
" functional\n");
}
@@ -625,8 +675,7 @@ static int __init da850_evm_config_emac(void)
/* Enable/Disable MII MDIO clock */
gpio_direction_output(DA850_MII_MDIO_CLKEN_PIN, rmii_en);
- soc_info->emac_pdata->phy_mask = DA850_EVM_PHY_MASK;
- soc_info->emac_pdata->mdio_max_freq = DA850_EVM_MDIO_FREQUENCY;
+ soc_info->emac_pdata->phy_id = DA850_EVM_PHY_ID;
ret = da8xx_register_emac();
if (ret)
@@ -787,7 +836,7 @@ static __init void da850_evm_init(void)
if (ret)
pr_warning("da850_evm_init: rtc setup failed: %d\n", ret);
- ret = da850_register_cpufreq();
+ ret = da850_register_cpufreq("pll0_sysclk3");
if (ret)
pr_warning("da850_evm_init: cpufreq registration failed: %d\n",
ret);
@@ -806,6 +855,9 @@ static __init void da850_evm_init(void)
#ifdef CONFIG_SERIAL_8250_CONSOLE
static int __init da850_evm_console_init(void)
{
+ if (!machine_is_davinci_da850_evm())
+ return 0;
+
return add_preferred_console("ttyS", 2, "115200");
}
console_initcall(da850_evm_console_init);
@@ -816,7 +868,7 @@ static void __init da850_evm_map_io(void)
da850_init();
}
-MACHINE_START(DAVINCI_DA850_EVM, "DaVinci DA850/OMAP-L138 EVM")
+MACHINE_START(DAVINCI_DA850_EVM, "DaVinci DA850/OMAP-L138/AM18x EVM")
.boot_params = (DA8XX_DDR_BASE + 0x100),
.map_io = da850_evm_map_io,
.init_irq = cp_intc_init,
diff --git a/arch/arm/mach-davinci/board-dm365-evm.c b/arch/arm/mach-davinci/board-dm365-evm.c
index 944a0cbaf5cb..c67f684ee3e5 100644
--- a/arch/arm/mach-davinci/board-dm365-evm.c
+++ b/arch/arm/mach-davinci/board-dm365-evm.c
@@ -54,9 +54,7 @@ static inline int have_tvp7002(void)
return 0;
}
-#define DM365_EVM_PHY_MASK (0x2)
-#define DM365_EVM_MDIO_FREQUENCY (2200000) /* PHY bus frequency */
-
+#define DM365_EVM_PHY_ID "0:01"
/*
* A MAX-II CPLD is used for various board control functions.
*/
@@ -175,7 +173,9 @@ static struct at24_platform_data eeprom_info = {
.context = (void *)0x7f00,
};
-static struct snd_platform_data dm365_evm_snd_data;
+static struct snd_platform_data dm365_evm_snd_data = {
+ .asp_chan_q = EVENTQ_3,
+};
static struct i2c_board_info i2c_info[] = {
{
@@ -533,8 +533,7 @@ fail:
/* ... and ENET ... */
dm365evm_emac_configure();
- soc_info->emac_pdata->phy_mask = DM365_EVM_PHY_MASK;
- soc_info->emac_pdata->mdio_max_freq = DM365_EVM_MDIO_FREQUENCY;
+ soc_info->emac_pdata->phy_id = DM365_EVM_PHY_ID;
resets &= ~BIT(3);
/* ... and AIC33 */
diff --git a/arch/arm/mach-davinci/board-dm644x-evm.c b/arch/arm/mach-davinci/board-dm644x-evm.c
index d59fba15ba8d..0ca90b834586 100644
--- a/arch/arm/mach-davinci/board-dm644x-evm.c
+++ b/arch/arm/mach-davinci/board-dm644x-evm.c
@@ -37,10 +37,9 @@
#include <mach/nand.h>
#include <mach/mmc.h>
#include <mach/usb.h>
+#include <mach/aemif.h>
-#define DM644X_EVM_PHY_MASK (0x2)
-#define DM644X_EVM_MDIO_FREQUENCY (2200000) /* PHY bus frequency */
-
+#define DM644X_EVM_PHY_ID "0:01"
#define LXT971_PHY_ID (0x001378e2)
#define LXT971_PHY_MASK (0xfffffff0)
@@ -137,11 +136,22 @@ static struct mtd_partition davinci_evm_nandflash_partition[] = {
*/
};
+static struct davinci_aemif_timing davinci_evm_nandflash_timing = {
+ .wsetup = 20,
+ .wstrobe = 40,
+ .whold = 20,
+ .rsetup = 10,
+ .rstrobe = 40,
+ .rhold = 10,
+ .ta = 40,
+};
+
static struct davinci_nand_pdata davinci_evm_nandflash_data = {
.parts = davinci_evm_nandflash_partition,
.nr_parts = ARRAY_SIZE(davinci_evm_nandflash_partition),
.ecc_mode = NAND_ECC_HW,
.options = NAND_USE_FLASH_BBT,
+ .timing = &davinci_evm_nandflash_timing,
};
static struct resource davinci_evm_nandflash_resource[] = {
@@ -695,9 +705,7 @@ static __init void davinci_evm_init(void)
davinci_serial_init(&uart_config);
dm644x_init_asp(&dm644x_evm_snd_data);
- soc_info->emac_pdata->phy_mask = DM644X_EVM_PHY_MASK;
- soc_info->emac_pdata->mdio_max_freq = DM644X_EVM_MDIO_FREQUENCY;
-
+ soc_info->emac_pdata->phy_id = DM644X_EVM_PHY_ID;
/* Register the fixup for PHY on DaVinci */
phy_register_fixup_for_uid(LXT971_PHY_ID, LXT971_PHY_MASK,
davinci_phy_fixup);
diff --git a/arch/arm/mach-davinci/board-dm646x-evm.c b/arch/arm/mach-davinci/board-dm646x-evm.c
index 6890488fb92b..f6ac9ba74878 100644
--- a/arch/arm/mach-davinci/board-dm646x-evm.c
+++ b/arch/arm/mach-davinci/board-dm646x-evm.c
@@ -42,6 +42,7 @@
#include <mach/nand.h>
#include <mach/clock.h>
#include <mach/cdce949.h>
+#include <mach/aemif.h>
#include "clock.h"
@@ -71,6 +72,16 @@ static struct mtd_partition davinci_nand_partitions[] = {
}
};
+static struct davinci_aemif_timing dm6467tevm_nandflash_timing = {
+ .wsetup = 29,
+ .wstrobe = 24,
+ .whold = 14,
+ .rsetup = 19,
+ .rstrobe = 33,
+ .rhold = 0,
+ .ta = 29,
+};
+
static struct davinci_nand_pdata davinci_nand_data = {
.mask_cle = 0x80000,
.mask_ale = 0x40000,
@@ -718,9 +729,7 @@ static struct davinci_uart_config uart_config __initdata = {
.enabled_uarts = (1 << 0),
};
-#define DM646X_EVM_PHY_MASK (0x2)
-#define DM646X_EVM_MDIO_FREQUENCY (2200000) /* PHY bus frequency */
-
+#define DM646X_EVM_PHY_ID "0:01"
/*
* The following EDMA channels/slots are not being used by drivers (for
* example: Timer, GPIO, UART events etc) on dm646x, hence they are being
@@ -763,6 +772,9 @@ static __init void evm_init(void)
dm646x_init_mcasp0(&dm646x_evm_snd_data[0]);
dm646x_init_mcasp1(&dm646x_evm_snd_data[1]);
+ if (machine_is_davinci_dm6467tevm())
+ davinci_nand_data.timing = &dm6467tevm_nandflash_timing;
+
platform_device_register(&davinci_nand_device);
dm646x_init_edma(dm646x_edma_rsv);
@@ -770,8 +782,7 @@ static __init void evm_init(void)
if (HAS_ATA)
davinci_init_ide();
- soc_info->emac_pdata->phy_mask = DM646X_EVM_PHY_MASK;
- soc_info->emac_pdata->mdio_max_freq = DM646X_EVM_MDIO_FREQUENCY;
+ soc_info->emac_pdata->phy_id = DM646X_EVM_PHY_ID;
}
#define DM646X_EVM_REF_FREQ 27000000
diff --git a/arch/arm/mach-davinci/board-mityomapl138.c b/arch/arm/mach-davinci/board-mityomapl138.c
new file mode 100644
index 000000000000..0bb5f0ce4fdc
--- /dev/null
+++ b/arch/arm/mach-davinci/board-mityomapl138.c
@@ -0,0 +1,422 @@
+/*
+ * Critical Link MityOMAP-L138 SoM
+ *
+ * Copyright (C) 2010 Critical Link LLC - http://www.criticallink.com
+ *
+ * This file is licensed under the terms of the GNU General Public License
+ * version 2. This program is licensed "as is" without any warranty of
+ * any kind, whether express or implied.
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/console.h>
+#include <linux/platform_device.h>
+#include <linux/mtd/partitions.h>
+#include <linux/regulator/machine.h>
+#include <linux/i2c.h>
+#include <linux/i2c/at24.h>
+#include <linux/etherdevice.h>
+
+#include <asm/mach-types.h>
+#include <asm/mach/arch.h>
+#include <mach/common.h>
+#include <mach/cp_intc.h>
+#include <mach/da8xx.h>
+#include <mach/nand.h>
+#include <mach/mux.h>
+
+#define MITYOMAPL138_PHY_ID "0:03"
+
+#define FACTORY_CONFIG_MAGIC 0x012C0138
+#define FACTORY_CONFIG_VERSION 0x00010001
+
+/* Data Held in On-Board I2C device */
+struct factory_config {
+ u32 magic;
+ u32 version;
+ u8 mac[6];
+ u32 fpga_type;
+ u32 spare;
+ u32 serialnumber;
+ char partnum[32];
+};
+
+static struct factory_config factory_config;
+
+static void read_factory_config(struct memory_accessor *a, void *context)
+{
+ int ret;
+ struct davinci_soc_info *soc_info = &davinci_soc_info;
+
+ ret = a->read(a, (char *)&factory_config, 0, sizeof(factory_config));
+ if (ret != sizeof(struct factory_config)) {
+ pr_warning("MityOMAPL138: Read Factory Config Failed: %d\n",
+ ret);
+ return;
+ }
+
+ if (factory_config.magic != FACTORY_CONFIG_MAGIC) {
+ pr_warning("MityOMAPL138: Factory Config Magic Wrong (%X)\n",
+ factory_config.magic);
+ return;
+ }
+
+ if (factory_config.version != FACTORY_CONFIG_VERSION) {
+ pr_warning("MityOMAPL138: Factory Config Version Wrong (%X)\n",
+ factory_config.version);
+ return;
+ }
+
+ pr_info("MityOMAPL138: Found MAC = %pM\n", factory_config.mac);
+ pr_info("MityOMAPL138: Part Number = %s\n", factory_config.partnum);
+ if (is_valid_ether_addr(factory_config.mac))
+ memcpy(soc_info->emac_pdata->mac_addr,
+ factory_config.mac, ETH_ALEN);
+ else
+ pr_warning("MityOMAPL138: Invalid MAC found "
+ "in factory config block\n");
+}
+
+static struct at24_platform_data mityomapl138_fd_chip = {
+ .byte_len = 256,
+ .page_size = 8,
+ .flags = AT24_FLAG_READONLY | AT24_FLAG_IRUGO,
+ .setup = read_factory_config,
+ .context = NULL,
+};
+
+static struct davinci_i2c_platform_data mityomap_i2c_0_pdata = {
+ .bus_freq = 100, /* kHz */
+ .bus_delay = 0, /* usec */
+};
+
+/* TPS65023 voltage regulator support */
+/* 1.2V Core */
+static struct regulator_consumer_supply tps65023_dcdc1_consumers[] = {
+ {
+ .supply = "cvdd",
+ },
+};
+
+/* 1.8V */
+static struct regulator_consumer_supply tps65023_dcdc2_consumers[] = {
+ {
+ .supply = "usb0_vdda18",
+ },
+ {
+ .supply = "usb1_vdda18",
+ },
+ {
+ .supply = "ddr_dvdd18",
+ },
+ {
+ .supply = "sata_vddr",
+ },
+};
+
+/* 1.2V */
+static struct regulator_consumer_supply tps65023_dcdc3_consumers[] = {
+ {
+ .supply = "sata_vdd",
+ },
+ {
+ .supply = "usb_cvdd",
+ },
+ {
+ .supply = "pll0_vdda",
+ },
+ {
+ .supply = "pll1_vdda",
+ },
+};
+
+/* 1.8V Aux LDO, not used */
+static struct regulator_consumer_supply tps65023_ldo1_consumers[] = {
+ {
+ .supply = "1.8v_aux",
+ },
+};
+
+/* FPGA VCC Aux (2.5 or 3.3) LDO */
+static struct regulator_consumer_supply tps65023_ldo2_consumers[] = {
+ {
+ .supply = "vccaux",
+ },
+};
+
+static struct regulator_init_data tps65023_regulator_data[] = {
+ /* dcdc1 */
+ {
+ .constraints = {
+ .min_uV = 1150000,
+ .max_uV = 1350000,
+ .valid_ops_mask = REGULATOR_CHANGE_VOLTAGE |
+ REGULATOR_CHANGE_STATUS,
+ .boot_on = 1,
+ },
+ .num_consumer_supplies = ARRAY_SIZE(tps65023_dcdc1_consumers),
+ .consumer_supplies = tps65023_dcdc1_consumers,
+ },
+ /* dcdc2 */
+ {
+ .constraints = {
+ .min_uV = 1800000,
+ .max_uV = 1800000,
+ .valid_ops_mask = REGULATOR_CHANGE_STATUS,
+ .boot_on = 1,
+ },
+ .num_consumer_supplies = ARRAY_SIZE(tps65023_dcdc2_consumers),
+ .consumer_supplies = tps65023_dcdc2_consumers,
+ },
+ /* dcdc3 */
+ {
+ .constraints = {
+ .min_uV = 1200000,
+ .max_uV = 1200000,
+ .valid_ops_mask = REGULATOR_CHANGE_STATUS,
+ .boot_on = 1,
+ },
+ .num_consumer_supplies = ARRAY_SIZE(tps65023_dcdc3_consumers),
+ .consumer_supplies = tps65023_dcdc3_consumers,
+ },
+ /* ldo1 */
+ {
+ .constraints = {
+ .min_uV = 1800000,
+ .max_uV = 1800000,
+ .valid_ops_mask = REGULATOR_CHANGE_STATUS,
+ .boot_on = 1,
+ },
+ .num_consumer_supplies = ARRAY_SIZE(tps65023_ldo1_consumers),
+ .consumer_supplies = tps65023_ldo1_consumers,
+ },
+ /* ldo2 */
+ {
+ .constraints = {
+ .min_uV = 2500000,
+ .max_uV = 3300000,
+ .valid_ops_mask = REGULATOR_CHANGE_VOLTAGE |
+ REGULATOR_CHANGE_STATUS,
+ .boot_on = 1,
+ },
+ .num_consumer_supplies = ARRAY_SIZE(tps65023_ldo2_consumers),
+ .consumer_supplies = tps65023_ldo2_consumers,
+ },
+};
+
+static struct i2c_board_info __initdata mityomap_tps65023_info[] = {
+ {
+ I2C_BOARD_INFO("tps65023", 0x48),
+ .platform_data = &tps65023_regulator_data[0],
+ },
+ {
+ I2C_BOARD_INFO("24c02", 0x50),
+ .platform_data = &mityomapl138_fd_chip,
+ },
+};
+
+static int __init pmic_tps65023_init(void)
+{
+ return i2c_register_board_info(1, mityomap_tps65023_info,
+ ARRAY_SIZE(mityomap_tps65023_info));
+}
+
+/*
+ * MityDSP-L138 includes a 256 MByte large-page NAND flash
+ * (128K blocks).
+ */
+static struct mtd_partition mityomapl138_nandflash_partition[] = {
+ {
+ .name = "rootfs",
+ .offset = 0,
+ .size = SZ_128M,
+ .mask_flags = 0, /* MTD_WRITEABLE, */
+ },
+ {
+ .name = "homefs",
+ .offset = MTDPART_OFS_APPEND,
+ .size = MTDPART_SIZ_FULL,
+ .mask_flags = 0,
+ },
+};
+
+static struct davinci_nand_pdata mityomapl138_nandflash_data = {
+ .parts = mityomapl138_nandflash_partition,
+ .nr_parts = ARRAY_SIZE(mityomapl138_nandflash_partition),
+ .ecc_mode = NAND_ECC_HW,
+ .options = NAND_USE_FLASH_BBT | NAND_BUSWIDTH_16,
+ .ecc_bits = 1, /* 4 bit mode is not supported with 16 bit NAND */
+};
+
+static struct resource mityomapl138_nandflash_resource[] = {
+ {
+ .start = DA8XX_AEMIF_CS3_BASE,
+ .end = DA8XX_AEMIF_CS3_BASE + SZ_512K + 2 * SZ_1K - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ {
+ .start = DA8XX_AEMIF_CTL_BASE,
+ .end = DA8XX_AEMIF_CTL_BASE + SZ_32K - 1,
+ .flags = IORESOURCE_MEM,
+ },
+};
+
+static struct platform_device mityomapl138_nandflash_device = {
+ .name = "davinci_nand",
+ .id = 0,
+ .dev = {
+ .platform_data = &mityomapl138_nandflash_data,
+ },
+ .num_resources = ARRAY_SIZE(mityomapl138_nandflash_resource),
+ .resource = mityomapl138_nandflash_resource,
+};
+
+static struct platform_device *mityomapl138_devices[] __initdata = {
+ &mityomapl138_nandflash_device,
+};
+
+static void __init mityomapl138_setup_nand(void)
+{
+ platform_add_devices(mityomapl138_devices,
+ ARRAY_SIZE(mityomapl138_devices));
+}
+
+static struct davinci_uart_config mityomapl138_uart_config __initdata = {
+ .enabled_uarts = 0x7,
+};
+
+static const short mityomap_mii_pins[] = {
+ DA850_MII_TXEN, DA850_MII_TXCLK, DA850_MII_COL, DA850_MII_TXD_3,
+ DA850_MII_TXD_2, DA850_MII_TXD_1, DA850_MII_TXD_0, DA850_MII_RXER,
+ DA850_MII_CRS, DA850_MII_RXCLK, DA850_MII_RXDV, DA850_MII_RXD_3,
+ DA850_MII_RXD_2, DA850_MII_RXD_1, DA850_MII_RXD_0, DA850_MDIO_CLK,
+ DA850_MDIO_D,
+ -1
+};
+
+static const short mityomap_rmii_pins[] = {
+ DA850_RMII_TXD_0, DA850_RMII_TXD_1, DA850_RMII_TXEN,
+ DA850_RMII_CRS_DV, DA850_RMII_RXD_0, DA850_RMII_RXD_1,
+ DA850_RMII_RXER, DA850_RMII_MHZ_50_CLK, DA850_MDIO_CLK,
+ DA850_MDIO_D,
+ -1
+};
+
+static void __init mityomapl138_config_emac(void)
+{
+ void __iomem *cfg_chip3_base;
+ int ret;
+ u32 val;
+ struct davinci_soc_info *soc_info = &davinci_soc_info;
+
+ soc_info->emac_pdata->rmii_en = 0; /* hardcoded for now */
+
+ cfg_chip3_base = DA8XX_SYSCFG0_VIRT(DA8XX_CFGCHIP3_REG);
+ val = __raw_readl(cfg_chip3_base);
+
+ if (soc_info->emac_pdata->rmii_en) {
+ val |= BIT(8);
+ ret = davinci_cfg_reg_list(mityomap_rmii_pins);
+ pr_info("RMII PHY configured\n");
+ } else {
+ val &= ~BIT(8);
+ ret = davinci_cfg_reg_list(mityomap_mii_pins);
+ pr_info("MII PHY configured\n");
+ }
+
+ if (ret) {
+ pr_warning("mii/rmii mux setup failed: %d\n", ret);
+ return;
+ }
+
+ /* configure the CFGCHIP3 register for RMII or MII */
+ __raw_writel(val, cfg_chip3_base);
+
+ soc_info->emac_pdata->phy_id = MITYOMAPL138_PHY_ID;
+
+ ret = da8xx_register_emac();
+ if (ret)
+ pr_warning("emac registration failed: %d\n", ret);
+}
+
+static struct davinci_pm_config da850_pm_pdata = {
+ .sleepcount = 128,
+};
+
+static struct platform_device da850_pm_device = {
+ .name = "pm-davinci",
+ .dev = {
+ .platform_data = &da850_pm_pdata,
+ },
+ .id = -1,
+};
+
+static void __init mityomapl138_init(void)
+{
+ int ret;
+
+ /* for now, no special EDMA channels are reserved */
+ ret = da850_register_edma(NULL);
+ if (ret)
+ pr_warning("edma registration failed: %d\n", ret);
+
+ ret = da8xx_register_watchdog();
+ if (ret)
+ pr_warning("watchdog registration failed: %d\n", ret);
+
+ davinci_serial_init(&mityomapl138_uart_config);
+
+ ret = da8xx_register_i2c(0, &mityomap_i2c_0_pdata);
+ if (ret)
+ pr_warning("i2c0 registration failed: %d\n", ret);
+
+ ret = pmic_tps65023_init();
+ if (ret)
+ pr_warning("TPS65023 PMIC init failed: %d\n", ret);
+
+ mityomapl138_setup_nand();
+
+ mityomapl138_config_emac();
+
+ ret = da8xx_register_rtc();
+ if (ret)
+ pr_warning("rtc setup failed: %d\n", ret);
+
+ ret = da850_register_cpufreq("pll0_sysclk3");
+ if (ret)
+ pr_warning("cpufreq registration failed: %d\n", ret);
+
+ ret = da8xx_register_cpuidle();
+ if (ret)
+ pr_warning("cpuidle registration failed: %d\n", ret);
+
+ ret = da850_register_pm(&da850_pm_device);
+ if (ret)
+ pr_warning("da850_evm_init: suspend registration failed: %d\n",
+ ret);
+}
+
+#ifdef CONFIG_SERIAL_8250_CONSOLE
+static int __init mityomapl138_console_init(void)
+{
+ if (!machine_is_mityomapl138())
+ return 0;
+
+ return add_preferred_console("ttyS", 1, "115200");
+}
+console_initcall(mityomapl138_console_init);
+#endif
+
+static void __init mityomapl138_map_io(void)
+{
+ da850_init();
+}
+
+MACHINE_START(MITYOMAPL138, "MityDSP-L138/MityARM-1808")
+ .boot_params = (DA8XX_DDR_BASE + 0x100),
+ .map_io = mityomapl138_map_io,
+ .init_irq = cp_intc_init,
+ .timer = &davinci_timer,
+ .init_machine = mityomapl138_init,
+MACHINE_END
diff --git a/arch/arm/mach-davinci/board-neuros-osd2.c b/arch/arm/mach-davinci/board-neuros-osd2.c
index a4def889275c..6c389ff1020e 100644
--- a/arch/arm/mach-davinci/board-neuros-osd2.c
+++ b/arch/arm/mach-davinci/board-neuros-osd2.c
@@ -39,9 +39,7 @@
#include <mach/mmc.h>
#include <mach/usb.h>
-#define NEUROS_OSD2_PHY_MASK 0x2
-#define NEUROS_OSD2_MDIO_FREQUENCY 2200000 /* PHY bus frequency */
-
+#define NEUROS_OSD2_PHY_ID "0:01"
#define LXT971_PHY_ID 0x001378e2
#define LXT971_PHY_MASK 0xfffffff0
@@ -252,8 +250,7 @@ static __init void davinci_ntosd2_init(void)
davinci_serial_init(&uart_config);
dm644x_init_asp(&dm644x_ntosd2_snd_data);
- soc_info->emac_pdata->phy_mask = NEUROS_OSD2_PHY_MASK;
- soc_info->emac_pdata->mdio_max_freq = NEUROS_OSD2_MDIO_FREQUENCY;
+ soc_info->emac_pdata->phy_id = NEUROS_OSD2_PHY_ID;
davinci_setup_usb(1000, 8);
/*
diff --git a/arch/arm/mach-davinci/board-omapl138-hawk.c b/arch/arm/mach-davinci/board-omapl138-hawk.c
new file mode 100644
index 000000000000..0b8dbdb79fe0
--- /dev/null
+++ b/arch/arm/mach-davinci/board-omapl138-hawk.c
@@ -0,0 +1,62 @@
+/*
+ * Hawkboard.org based on TI's OMAP-L138 Platform
+ *
+ * Initial code: Syed Mohammed Khasim
+ *
+ * Copyright (C) 2009 Texas Instruments Incorporated - http://www.ti.com
+ *
+ * This file is licensed under the terms of the GNU General Public License
+ * version 2. This program is licensed "as is" without any warranty of
+ * any kind, whether express or implied.
+ */
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/console.h>
+#include <linux/gpio.h>
+
+#include <asm/mach-types.h>
+#include <asm/mach/arch.h>
+
+#include <mach/cp_intc.h>
+#include <mach/da8xx.h>
+
+static struct davinci_uart_config omapl138_hawk_uart_config __initdata = {
+ .enabled_uarts = 0x7,
+};
+
+static __init void omapl138_hawk_init(void)
+{
+ int ret;
+
+ davinci_serial_init(&omapl138_hawk_uart_config);
+
+ ret = da8xx_register_watchdog();
+ if (ret)
+ pr_warning("omapl138_hawk_init: "
+ "watchdog registration failed: %d\n",
+ ret);
+}
+
+#ifdef CONFIG_SERIAL_8250_CONSOLE
+static int __init omapl138_hawk_console_init(void)
+{
+ if (!machine_is_omapl138_hawkboard())
+ return 0;
+
+ return add_preferred_console("ttyS", 2, "115200");
+}
+console_initcall(omapl138_hawk_console_init);
+#endif
+
+static void __init omapl138_hawk_map_io(void)
+{
+ da850_init();
+}
+
+MACHINE_START(OMAPL138_HAWKBOARD, "AM18x/OMAP-L138 Hawkboard")
+ .boot_params = (DA8XX_DDR_BASE + 0x100),
+ .map_io = omapl138_hawk_map_io,
+ .init_irq = cp_intc_init,
+ .timer = &davinci_timer,
+ .init_machine = omapl138_hawk_init,
+MACHINE_END
diff --git a/arch/arm/mach-davinci/board-sffsdr.c b/arch/arm/mach-davinci/board-sffsdr.c
index 9bdf8aafcc84..61ac96d8f00d 100644
--- a/arch/arm/mach-davinci/board-sffsdr.c
+++ b/arch/arm/mach-davinci/board-sffsdr.c
@@ -42,9 +42,7 @@
#include <mach/mux.h>
#include <mach/usb.h>
-#define SFFSDR_PHY_MASK (0x2)
-#define SFFSDR_MDIO_FREQUENCY (2200000) /* PHY bus frequency */
-
+#define SFFSDR_PHY_ID "0:01"
static struct mtd_partition davinci_sffsdr_nandflash_partition[] = {
/* U-Boot Environment: Block 0
* UBL: Block 1
@@ -143,8 +141,7 @@ static __init void davinci_sffsdr_init(void)
ARRAY_SIZE(davinci_sffsdr_devices));
sffsdr_init_i2c();
davinci_serial_init(&uart_config);
- soc_info->emac_pdata->phy_mask = SFFSDR_PHY_MASK;
- soc_info->emac_pdata->mdio_max_freq = SFFSDR_MDIO_FREQUENCY;
+ soc_info->emac_pdata->phy_id = SFFSDR_PHY_ID;
davinci_setup_usb(0, 0); /* We support only peripheral mode. */
/* mux VLYNQ pins */
diff --git a/arch/arm/mach-davinci/board-tnetv107x-evm.c b/arch/arm/mach-davinci/board-tnetv107x-evm.c
index b4de35b78904..a6db85460227 100644
--- a/arch/arm/mach-davinci/board-tnetv107x-evm.c
+++ b/arch/arm/mach-davinci/board-tnetv107x-evm.c
@@ -23,6 +23,9 @@
#include <linux/ratelimit.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/partitions.h>
+#include <linux/input.h>
+#include <linux/input/matrix_keypad.h>
+
#include <asm/mach/arch.h>
#include <asm/mach-types.h>
@@ -141,10 +144,63 @@ static struct davinci_uart_config serial_config __initconst = {
.enabled_uarts = BIT(1),
};
+static const uint32_t keymap[] = {
+ KEY(0, 0, KEY_NUMERIC_1),
+ KEY(0, 1, KEY_NUMERIC_2),
+ KEY(0, 2, KEY_NUMERIC_3),
+ KEY(0, 3, KEY_FN_F1),
+ KEY(0, 4, KEY_MENU),
+
+ KEY(1, 0, KEY_NUMERIC_4),
+ KEY(1, 1, KEY_NUMERIC_5),
+ KEY(1, 2, KEY_NUMERIC_6),
+ KEY(1, 3, KEY_UP),
+ KEY(1, 4, KEY_FN_F2),
+
+ KEY(2, 0, KEY_NUMERIC_7),
+ KEY(2, 1, KEY_NUMERIC_8),
+ KEY(2, 2, KEY_NUMERIC_9),
+ KEY(2, 3, KEY_LEFT),
+ KEY(2, 4, KEY_ENTER),
+
+ KEY(3, 0, KEY_NUMERIC_STAR),
+ KEY(3, 1, KEY_NUMERIC_0),
+ KEY(3, 2, KEY_NUMERIC_POUND),
+ KEY(3, 3, KEY_DOWN),
+ KEY(3, 4, KEY_RIGHT),
+
+ KEY(4, 0, KEY_FN_F3),
+ KEY(4, 1, KEY_FN_F4),
+ KEY(4, 2, KEY_MUTE),
+ KEY(4, 3, KEY_HOME),
+ KEY(4, 4, KEY_BACK),
+
+ KEY(5, 0, KEY_VOLUMEDOWN),
+ KEY(5, 1, KEY_VOLUMEUP),
+ KEY(5, 2, KEY_F1),
+ KEY(5, 3, KEY_F2),
+ KEY(5, 4, KEY_F3),
+};
+
+static const struct matrix_keymap_data keymap_data = {
+ .keymap = keymap,
+ .keymap_size = ARRAY_SIZE(keymap),
+};
+
+static struct matrix_keypad_platform_data keypad_config = {
+ .keymap_data = &keymap_data,
+ .num_row_gpios = 6,
+ .num_col_gpios = 5,
+ .debounce_ms = 0, /* minimum */
+ .active_low = 0, /* pull up realization */
+ .no_autorepeat = 0,
+};
+
static struct tnetv107x_device_info evm_device_info __initconst = {
.serial_config = &serial_config,
.mmc_config[1] = &mmc_config, /* controller 1 */
.nand_config[0] = &nand_config, /* chip select 0 */
+ .keypad_config = &keypad_config,
};
static __init void tnetv107x_evm_board_init(void)
diff --git a/arch/arm/mach-davinci/clock.c b/arch/arm/mach-davinci/clock.c
index 054c303caead..01ba080433db 100644
--- a/arch/arm/mach-davinci/clock.c
+++ b/arch/arm/mach-davinci/clock.c
@@ -236,7 +236,7 @@ static int __init clk_disable_unused(void)
if (!davinci_psc_is_clk_active(ck->gpsc, ck->lpsc))
continue;
- pr_info("Clocks: disable unused %s\n", ck->name);
+ pr_debug("Clocks: disable unused %s\n", ck->name);
davinci_psc_config(psc_domain(ck), ck->gpsc, ck->lpsc,
(ck->flags & PSC_SWRSTDISABLE) ?
@@ -287,6 +287,79 @@ static unsigned long clk_sysclk_recalc(struct clk *clk)
return rate;
}
+int davinci_set_sysclk_rate(struct clk *clk, unsigned long rate)
+{
+ unsigned v;
+ struct pll_data *pll;
+ unsigned long input;
+ unsigned ratio = 0;
+
+ /* If this is the PLL base clock, wrong function to call */
+ if (clk->pll_data)
+ return -EINVAL;
+
+ /* There must be a parent... */
+ if (WARN_ON(!clk->parent))
+ return -EINVAL;
+
+ /* ... the parent must be a PLL... */
+ if (WARN_ON(!clk->parent->pll_data))
+ return -EINVAL;
+
+ /* ... and this clock must have a divider. */
+ if (WARN_ON(!clk->div_reg))
+ return -EINVAL;
+
+ pll = clk->parent->pll_data;
+
+ input = clk->parent->rate;
+
+ /* If pre-PLL, source clock is before the multiplier and divider(s) */
+ if (clk->flags & PRE_PLL)
+ input = pll->input_rate;
+
+ if (input > rate) {
+ /*
+ * Can afford to provide an output little higher than requested
+ * only if maximum rate supported by hardware on this sysclk
+ * is known.
+ */
+ if (clk->maxrate) {
+ ratio = DIV_ROUND_CLOSEST(input, rate);
+ if (input / ratio > clk->maxrate)
+ ratio = 0;
+ }
+
+ if (ratio == 0)
+ ratio = DIV_ROUND_UP(input, rate);
+
+ ratio--;
+ }
+
+ if (ratio > PLLDIV_RATIO_MASK)
+ return -EINVAL;
+
+ do {
+ v = __raw_readl(pll->base + PLLSTAT);
+ } while (v & PLLSTAT_GOSTAT);
+
+ v = __raw_readl(pll->base + clk->div_reg);
+ v &= ~PLLDIV_RATIO_MASK;
+ v |= ratio | PLLDIV_EN;
+ __raw_writel(v, pll->base + clk->div_reg);
+
+ v = __raw_readl(pll->base + PLLCMD);
+ v |= PLLCMD_GOSET;
+ __raw_writel(v, pll->base + PLLCMD);
+
+ do {
+ v = __raw_readl(pll->base + PLLSTAT);
+ } while (v & PLLSTAT_GOSTAT);
+
+ return 0;
+}
+EXPORT_SYMBOL(davinci_set_sysclk_rate);
+
static unsigned long clk_leafclk_recalc(struct clk *clk)
{
if (WARN_ON(!clk->parent))
diff --git a/arch/arm/mach-davinci/clock.h b/arch/arm/mach-davinci/clock.h
index 01e36483ac3d..11099980b58b 100644
--- a/arch/arm/mach-davinci/clock.h
+++ b/arch/arm/mach-davinci/clock.h
@@ -70,6 +70,9 @@
#include <linux/list.h>
#include <asm/clkdev.h>
+#define PLLSTAT_GOSTAT BIT(0)
+#define PLLCMD_GOSET BIT(0)
+
struct pll_data {
u32 phys_base;
void __iomem *base;
@@ -86,6 +89,7 @@ struct clk {
struct module *owner;
const char *name;
unsigned long rate;
+ unsigned long maxrate; /* H/W supported max rate */
u8 usecount;
u8 lpsc;
u8 gpsc;
@@ -118,6 +122,7 @@ struct clk {
int davinci_clk_init(struct clk_lookup *clocks);
int davinci_set_pllrate(struct pll_data *pll, unsigned int prediv,
unsigned int mult, unsigned int postdiv);
+int davinci_set_sysclk_rate(struct clk *clk, unsigned long rate);
extern struct platform_device davinci_wdt_device;
extern void davinci_watchdog_reset(struct platform_device *);
diff --git a/arch/arm/mach-davinci/cpufreq.c b/arch/arm/mach-davinci/cpufreq.c
index d3fa6de1e20f..343de73161fa 100644
--- a/arch/arm/mach-davinci/cpufreq.c
+++ b/arch/arm/mach-davinci/cpufreq.c
@@ -34,6 +34,8 @@
struct davinci_cpufreq {
struct device *dev;
struct clk *armclk;
+ struct clk *asyncclk;
+ unsigned long asyncrate;
};
static struct davinci_cpufreq cpufreq;
@@ -104,15 +106,27 @@ static int davinci_target(struct cpufreq_policy *policy,
cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
/* if moving to higher frequency, up the voltage beforehand */
- if (pdata->set_voltage && freqs.new > freqs.old)
- pdata->set_voltage(idx);
+ if (pdata->set_voltage && freqs.new > freqs.old) {
+ ret = pdata->set_voltage(idx);
+ if (ret)
+ goto out;
+ }
ret = clk_set_rate(armclk, idx);
+ if (ret)
+ goto out;
+
+ if (cpufreq.asyncclk) {
+ ret = clk_set_rate(cpufreq.asyncclk, cpufreq.asyncrate);
+ if (ret)
+ goto out;
+ }
/* if moving to lower freq, lower the voltage after lowering freq */
if (pdata->set_voltage && freqs.new < freqs.old)
pdata->set_voltage(idx);
+out:
cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
return ret;
@@ -185,6 +199,7 @@ static struct cpufreq_driver davinci_driver = {
static int __init davinci_cpufreq_probe(struct platform_device *pdev)
{
struct davinci_cpufreq_config *pdata = pdev->dev.platform_data;
+ struct clk *asyncclk;
if (!pdata)
return -EINVAL;
@@ -199,6 +214,12 @@ static int __init davinci_cpufreq_probe(struct platform_device *pdev)
return PTR_ERR(cpufreq.armclk);
}
+ asyncclk = clk_get(cpufreq.dev, "async");
+ if (!IS_ERR(asyncclk)) {
+ cpufreq.asyncclk = asyncclk;
+ cpufreq.asyncrate = clk_get_rate(asyncclk);
+ }
+
return cpufreq_register_driver(&davinci_driver);
}
@@ -206,6 +227,9 @@ static int __exit davinci_cpufreq_remove(struct platform_device *pdev)
{
clk_put(cpufreq.armclk);
+ if (cpufreq.asyncclk)
+ clk_put(cpufreq.asyncclk);
+
return cpufreq_unregister_driver(&davinci_driver);
}
diff --git a/arch/arm/mach-davinci/da850.c b/arch/arm/mach-davinci/da850.c
index 68ed58a48252..63916b902760 100644
--- a/arch/arm/mach-davinci/da850.c
+++ b/arch/arm/mach-davinci/da850.c
@@ -86,6 +86,8 @@ static struct clk pll0_sysclk3 = {
.parent = &pll0_clk,
.flags = CLK_PLL,
.div_reg = PLLDIV3,
+ .set_rate = davinci_set_sysclk_rate,
+ .maxrate = 100000000,
};
static struct clk pll0_sysclk4 = {
@@ -323,12 +325,19 @@ static struct clk lcdc_clk = {
.gpsc = 1,
};
-static struct clk mmcsd_clk = {
- .name = "mmcsd",
+static struct clk mmcsd0_clk = {
+ .name = "mmcsd0",
.parent = &pll0_sysclk2,
.lpsc = DA8XX_LPSC0_MMC_SD,
};
+static struct clk mmcsd1_clk = {
+ .name = "mmcsd1",
+ .parent = &pll0_sysclk2,
+ .lpsc = DA850_LPSC1_MMC_SD1,
+ .gpsc = 1,
+};
+
static struct clk aemif_clk = {
.name = "aemif",
.parent = &pll0_sysclk3,
@@ -375,7 +384,8 @@ static struct clk_lookup da850_clks[] = {
CLK("davinci_emac.1", NULL, &emac_clk),
CLK("davinci-mcasp.0", NULL, &mcasp_clk),
CLK("da8xx_lcdc.0", NULL, &lcdc_clk),
- CLK("davinci_mmc.0", NULL, &mmcsd_clk),
+ CLK("davinci_mmc.0", NULL, &mmcsd0_clk),
+ CLK("davinci_mmc.1", NULL, &mmcsd1_clk),
CLK(NULL, "aemif", &aemif_clk),
CLK(NULL, NULL, NULL),
};
@@ -572,15 +582,9 @@ const short da850_cpgmac_pins[] __initdata = {
DA850_MII_TXD_2, DA850_MII_TXD_1, DA850_MII_TXD_0, DA850_MII_RXER,
DA850_MII_CRS, DA850_MII_RXCLK, DA850_MII_RXDV, DA850_MII_RXD_3,
DA850_MII_RXD_2, DA850_MII_RXD_1, DA850_MII_RXD_0, DA850_MDIO_CLK,
- DA850_MDIO_D,
- -1
-};
-
-const short da850_rmii_pins[] __initdata = {
- DA850_RMII_TXD_0, DA850_RMII_TXD_1, DA850_RMII_TXEN,
- DA850_RMII_CRS_DV, DA850_RMII_RXD_0, DA850_RMII_RXD_1,
- DA850_RMII_RXER, DA850_RMII_MHZ_50_CLK, DA850_MDIO_CLK,
- DA850_MDIO_D,
+ DA850_MDIO_D, DA850_RMII_TXD_0, DA850_RMII_TXD_1, DA850_RMII_TXEN,
+ DA850_RMII_CRS_DV, DA850_RMII_RXD_0, DA850_RMII_RXD_1, DA850_RMII_RXER,
+ DA850_RMII_MHZ_50_CLK,
-1
};
@@ -607,27 +611,19 @@ const short da850_mmcsd0_pins[] __initdata = {
-1
};
-const short da850_nand_pins[] __initdata = {
- DA850_EMA_D_7, DA850_EMA_D_6, DA850_EMA_D_5, DA850_EMA_D_4,
- DA850_EMA_D_3, DA850_EMA_D_2, DA850_EMA_D_1, DA850_EMA_D_0,
- DA850_EMA_A_1, DA850_EMA_A_2, DA850_NEMA_CS_3, DA850_NEMA_CS_4,
- DA850_NEMA_WE, DA850_NEMA_OE,
- -1
-};
-
-const short da850_nor_pins[] __initdata = {
+const short da850_emif25_pins[] __initdata = {
DA850_EMA_BA_1, DA850_EMA_CLK, DA850_EMA_WAIT_1, DA850_NEMA_CS_2,
- DA850_NEMA_WE, DA850_NEMA_OE, DA850_EMA_D_0, DA850_EMA_D_1,
- DA850_EMA_D_2, DA850_EMA_D_3, DA850_EMA_D_4, DA850_EMA_D_5,
- DA850_EMA_D_6, DA850_EMA_D_7, DA850_EMA_D_8, DA850_EMA_D_9,
- DA850_EMA_D_10, DA850_EMA_D_11, DA850_EMA_D_12, DA850_EMA_D_13,
- DA850_EMA_D_14, DA850_EMA_D_15, DA850_EMA_A_0, DA850_EMA_A_1,
- DA850_EMA_A_2, DA850_EMA_A_3, DA850_EMA_A_4, DA850_EMA_A_5,
- DA850_EMA_A_6, DA850_EMA_A_7, DA850_EMA_A_8, DA850_EMA_A_9,
- DA850_EMA_A_10, DA850_EMA_A_11, DA850_EMA_A_12, DA850_EMA_A_13,
- DA850_EMA_A_14, DA850_EMA_A_15, DA850_EMA_A_16, DA850_EMA_A_17,
- DA850_EMA_A_18, DA850_EMA_A_19, DA850_EMA_A_20, DA850_EMA_A_21,
- DA850_EMA_A_22, DA850_EMA_A_23,
+ DA850_NEMA_CS_3, DA850_NEMA_CS_4, DA850_NEMA_WE, DA850_NEMA_OE,
+ DA850_EMA_D_0, DA850_EMA_D_1, DA850_EMA_D_2, DA850_EMA_D_3,
+ DA850_EMA_D_4, DA850_EMA_D_5, DA850_EMA_D_6, DA850_EMA_D_7,
+ DA850_EMA_D_8, DA850_EMA_D_9, DA850_EMA_D_10, DA850_EMA_D_11,
+ DA850_EMA_D_12, DA850_EMA_D_13, DA850_EMA_D_14, DA850_EMA_D_15,
+ DA850_EMA_A_0, DA850_EMA_A_1, DA850_EMA_A_2, DA850_EMA_A_3,
+ DA850_EMA_A_4, DA850_EMA_A_5, DA850_EMA_A_6, DA850_EMA_A_7,
+ DA850_EMA_A_8, DA850_EMA_A_9, DA850_EMA_A_10, DA850_EMA_A_11,
+ DA850_EMA_A_12, DA850_EMA_A_13, DA850_EMA_A_14, DA850_EMA_A_15,
+ DA850_EMA_A_16, DA850_EMA_A_17, DA850_EMA_A_18, DA850_EMA_A_19,
+ DA850_EMA_A_20, DA850_EMA_A_21, DA850_EMA_A_22, DA850_EMA_A_23,
-1
};
@@ -851,7 +847,7 @@ static const struct da850_opp da850_opp_300 = {
.prediv = 1,
.mult = 25,
.postdiv = 2,
- .cvdd_min = 1140000,
+ .cvdd_min = 1200000,
.cvdd_max = 1320000,
};
@@ -860,7 +856,7 @@ static const struct da850_opp da850_opp_200 = {
.prediv = 1,
.mult = 25,
.postdiv = 3,
- .cvdd_min = 1050000,
+ .cvdd_min = 1100000,
.cvdd_max = 1160000,
};
@@ -869,7 +865,7 @@ static const struct da850_opp da850_opp_96 = {
.prediv = 1,
.mult = 20,
.postdiv = 5,
- .cvdd_min = 950000,
+ .cvdd_min = 1000000,
.cvdd_max = 1050000,
};
@@ -929,10 +925,16 @@ static struct platform_device da850_cpufreq_device = {
.dev = {
.platform_data = &cpufreq_info,
},
+ .id = -1,
};
-int __init da850_register_cpufreq(void)
+int __init da850_register_cpufreq(char *async_clk)
{
+ /* cpufreq driver can help keep an "async" clock constant */
+ if (async_clk)
+ clk_add_alias("async", da850_cpufreq_device.name,
+ async_clk, NULL);
+
return platform_device_register(&da850_cpufreq_device);
}
@@ -983,7 +985,7 @@ static int da850_set_pll0rate(struct clk *clk, unsigned long index)
return 0;
}
#else
-int __init da850_register_cpufreq(void)
+int __init da850_register_cpufreq(char *async_clk)
{
return 0;
}
diff --git a/arch/arm/mach-davinci/devices-da8xx.c b/arch/arm/mach-davinci/devices-da8xx.c
index 52bc7b1c6ca3..9eec63070e0c 100644
--- a/arch/arm/mach-davinci/devices-da8xx.c
+++ b/arch/arm/mach-davinci/devices-da8xx.c
@@ -24,6 +24,7 @@
#include "clock.h"
#define DA8XX_TPCC_BASE 0x01c00000
+#define DA850_MMCSD1_BASE 0x01e1b000
#define DA850_TPCC1_BASE 0x01e30000
#define DA8XX_TPTC0_BASE 0x01c08000
#define DA8XX_TPTC1_BASE 0x01c08400
@@ -41,7 +42,6 @@
#define DA8XX_EMAC_CTRL_REG_OFFSET 0x3000
#define DA8XX_EMAC_MOD_REG_OFFSET 0x2000
#define DA8XX_EMAC_RAM_OFFSET 0x0000
-#define DA8XX_MDIO_REG_OFFSET 0x4000
#define DA8XX_EMAC_CTRL_RAM_SIZE SZ_8K
void __iomem *da8xx_syscfg0_base;
@@ -351,7 +351,7 @@ int __init da8xx_register_watchdog(void)
static struct resource da8xx_emac_resources[] = {
{
.start = DA8XX_EMAC_CPPI_PORT_BASE,
- .end = DA8XX_EMAC_CPPI_PORT_BASE + 0x5000 - 1,
+ .end = DA8XX_EMAC_CPPI_PORT_BASE + SZ_16K - 1,
.flags = IORESOURCE_MEM,
},
{
@@ -380,7 +380,6 @@ struct emac_platform_data da8xx_emac_pdata = {
.ctrl_reg_offset = DA8XX_EMAC_CTRL_REG_OFFSET,
.ctrl_mod_reg_offset = DA8XX_EMAC_MOD_REG_OFFSET,
.ctrl_ram_offset = DA8XX_EMAC_RAM_OFFSET,
- .mdio_reg_offset = DA8XX_MDIO_REG_OFFSET,
.ctrl_ram_size = DA8XX_EMAC_CTRL_RAM_SIZE,
.version = EMAC_VERSION_2,
};
@@ -395,9 +394,34 @@ static struct platform_device da8xx_emac_device = {
.resource = da8xx_emac_resources,
};
+static struct resource da8xx_mdio_resources[] = {
+ {
+ .start = DA8XX_EMAC_MDIO_BASE,
+ .end = DA8XX_EMAC_MDIO_BASE + SZ_4K - 1,
+ .flags = IORESOURCE_MEM,
+ },
+};
+
+static struct platform_device da8xx_mdio_device = {
+ .name = "davinci_mdio",
+ .id = 0,
+ .num_resources = ARRAY_SIZE(da8xx_mdio_resources),
+ .resource = da8xx_mdio_resources,
+};
+
int __init da8xx_register_emac(void)
{
- return platform_device_register(&da8xx_emac_device);
+ int ret;
+
+ ret = platform_device_register(&da8xx_mdio_device);
+ if (ret < 0)
+ return ret;
+ ret = platform_device_register(&da8xx_emac_device);
+ if (ret < 0)
+ return ret;
+ ret = clk_add_alias(NULL, dev_name(&da8xx_mdio_device.dev),
+ NULL, &da8xx_emac_device.dev);
+ return ret;
}
static struct resource da830_mcasp1_resources[] = {
@@ -566,6 +590,44 @@ int __init da8xx_register_mmcsd0(struct davinci_mmc_config *config)
return platform_device_register(&da8xx_mmcsd0_device);
}
+#ifdef CONFIG_ARCH_DAVINCI_DA850
+static struct resource da850_mmcsd1_resources[] = {
+ { /* registers */
+ .start = DA850_MMCSD1_BASE,
+ .end = DA850_MMCSD1_BASE + SZ_4K - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ { /* interrupt */
+ .start = IRQ_DA850_MMCSDINT0_1,
+ .end = IRQ_DA850_MMCSDINT0_1,
+ .flags = IORESOURCE_IRQ,
+ },
+ { /* DMA RX */
+ .start = EDMA_CTLR_CHAN(1, 28),
+ .end = EDMA_CTLR_CHAN(1, 28),
+ .flags = IORESOURCE_DMA,
+ },
+ { /* DMA TX */
+ .start = EDMA_CTLR_CHAN(1, 29),
+ .end = EDMA_CTLR_CHAN(1, 29),
+ .flags = IORESOURCE_DMA,
+ },
+};
+
+static struct platform_device da850_mmcsd1_device = {
+ .name = "davinci_mmc",
+ .id = 1,
+ .num_resources = ARRAY_SIZE(da850_mmcsd1_resources),
+ .resource = da850_mmcsd1_resources,
+};
+
+int __init da850_register_mmcsd1(struct davinci_mmc_config *config)
+{
+ da850_mmcsd1_device.dev.platform_data = config;
+ return platform_device_register(&da850_mmcsd1_device);
+}
+#endif
+
static struct resource da8xx_rtc_resources[] = {
{
.start = DA8XX_RTC_BASE,
diff --git a/arch/arm/mach-davinci/devices-tnetv107x.c b/arch/arm/mach-davinci/devices-tnetv107x.c
index 2718a3a90dff..c9a86d8130d1 100644
--- a/arch/arm/mach-davinci/devices-tnetv107x.c
+++ b/arch/arm/mach-davinci/devices-tnetv107x.c
@@ -31,8 +31,10 @@
#define TNETV107X_TPTC0_BASE 0x01c10000
#define TNETV107X_TPTC1_BASE 0x01c10400
#define TNETV107X_WDOG_BASE 0x08086700
+#define TNETV107X_TSC_BASE 0x08088500
#define TNETV107X_SDIO0_BASE 0x08088700
#define TNETV107X_SDIO1_BASE 0x08088800
+#define TNETV107X_KEYPAD_BASE 0x08088a00
#define TNETV107X_ASYNC_EMIF_CNTRL_BASE 0x08200000
#define TNETV107X_ASYNC_EMIF_DATA_CE0_BASE 0x30000000
#define TNETV107X_ASYNC_EMIF_DATA_CE1_BASE 0x40000000
@@ -298,12 +300,55 @@ static int __init nand_init(int chipsel, struct davinci_nand_pdata *data)
return platform_device_register(pdev);
}
+static struct resource keypad_resources[] = {
+ {
+ .start = TNETV107X_KEYPAD_BASE,
+ .end = TNETV107X_KEYPAD_BASE + 0xff,
+ .flags = IORESOURCE_MEM,
+ },
+ {
+ .start = IRQ_TNETV107X_KEYPAD,
+ .flags = IORESOURCE_IRQ,
+ .name = "press",
+ },
+ {
+ .start = IRQ_TNETV107X_KEYPAD_FREE,
+ .flags = IORESOURCE_IRQ,
+ .name = "release",
+ },
+};
+
+static struct platform_device keypad_device = {
+ .name = "tnetv107x-keypad",
+ .num_resources = ARRAY_SIZE(keypad_resources),
+ .resource = keypad_resources,
+};
+
+static struct resource tsc_resources[] = {
+ {
+ .start = TNETV107X_TSC_BASE,
+ .end = TNETV107X_TSC_BASE + 0xff,
+ .flags = IORESOURCE_MEM,
+ },
+ {
+ .start = IRQ_TNETV107X_TSC,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct platform_device tsc_device = {
+ .name = "tnetv107x-ts",
+ .num_resources = ARRAY_SIZE(tsc_resources),
+ .resource = tsc_resources,
+};
+
void __init tnetv107x_devices_init(struct tnetv107x_device_info *info)
{
int i;
platform_device_register(&edma_device);
platform_device_register(&tnetv107x_wdt_device);
+ platform_device_register(&tsc_device);
if (info->serial_config)
davinci_serial_init(info->serial_config);
@@ -317,4 +362,9 @@ void __init tnetv107x_devices_init(struct tnetv107x_device_info *info)
for (i = 0; i < 4; i++)
if (info->nand_config[i])
nand_init(i, info->nand_config[i]);
+
+ if (info->keypad_config) {
+ keypad_device.dev.platform_data = info->keypad_config;
+ platform_device_register(&keypad_device);
+ }
}
diff --git a/arch/arm/mach-davinci/devices.c b/arch/arm/mach-davinci/devices.c
index 8b7201e4c79c..22ebc64bc9d9 100644
--- a/arch/arm/mach-davinci/devices.c
+++ b/arch/arm/mach-davinci/devices.c
@@ -213,7 +213,7 @@ void __init davinci_setup_mmc(int module, struct davinci_mmc_config *config)
IO_ADDRESS(DAVINCI_SYSTEM_MODULE_BASE + 0x7c);
/* Configure pull down control */
- __raw_writel((__raw_readl(pupdctl1) & ~0x400),
+ __raw_writel((__raw_readl(pupdctl1) & ~0xfc0),
pupdctl1);
mmcsd1_resources[0].start = DM365_MMCSD1_BASE;
@@ -295,6 +295,18 @@ static void davinci_init_wdt(void)
/*-------------------------------------------------------------------------*/
+struct platform_device davinci_pcm_device = {
+ .name = "davinci-pcm-audio",
+ .id = -1,
+};
+
+static void davinci_init_pcm(void)
+{
+ platform_device_register(&davinci_pcm_device);
+}
+
+/*-------------------------------------------------------------------------*/
+
struct davinci_timer_instance davinci_timer_instance[2] = {
{
.base = DAVINCI_TIMER0_BASE,
@@ -315,6 +327,7 @@ static int __init davinci_init_devices(void)
/* please keep these calls, and their implementations above,
* in alphabetical order so they're easier to sort through.
*/
+ davinci_init_pcm();
davinci_init_wdt();
return 0;
diff --git a/arch/arm/mach-davinci/dm355.c b/arch/arm/mach-davinci/dm355.c
index 9be261beae7d..2652af124acd 100644
--- a/arch/arm/mach-davinci/dm355.c
+++ b/arch/arm/mach-davinci/dm355.c
@@ -359,8 +359,8 @@ static struct clk_lookup dm355_clks[] = {
CLK(NULL, "uart1", &uart1_clk),
CLK(NULL, "uart2", &uart2_clk),
CLK("i2c_davinci.1", NULL, &i2c_clk),
- CLK("davinci-asp.0", NULL, &asp0_clk),
- CLK("davinci-asp.1", NULL, &asp1_clk),
+ CLK("davinci-mcbsp.0", NULL, &asp0_clk),
+ CLK("davinci-mcbsp.1", NULL, &asp1_clk),
CLK("davinci_mmc.0", NULL, &mmcsd0_clk),
CLK("davinci_mmc.1", NULL, &mmcsd1_clk),
CLK("spi_davinci.0", NULL, &spi0_clk),
@@ -664,7 +664,7 @@ static struct resource dm355_asp1_resources[] = {
};
static struct platform_device dm355_asp1_device = {
- .name = "davinci-asp",
+ .name = "davinci-mcbsp",
.id = 1,
.num_resources = ARRAY_SIZE(dm355_asp1_resources),
.resource = dm355_asp1_resources,
diff --git a/arch/arm/mach-davinci/dm365.c b/arch/arm/mach-davinci/dm365.c
index 7781e35daec3..c466d710d3c1 100644
--- a/arch/arm/mach-davinci/dm365.c
+++ b/arch/arm/mach-davinci/dm365.c
@@ -459,7 +459,7 @@ static struct clk_lookup dm365_clks[] = {
CLK(NULL, "usb", &usb_clk),
CLK("davinci_emac.1", NULL, &emac_clk),
CLK("davinci_voicecodec", NULL, &voicecodec_clk),
- CLK("davinci-asp.0", NULL, &asp0_clk),
+ CLK("davinci-mcbsp", NULL, &asp0_clk),
CLK(NULL, "rto", &rto_clk),
CLK(NULL, "mjcp", &mjcp_clk),
CLK(NULL, NULL, NULL),
@@ -691,7 +691,6 @@ static struct emac_platform_data dm365_emac_pdata = {
.ctrl_reg_offset = DM365_EMAC_CNTRL_OFFSET,
.ctrl_mod_reg_offset = DM365_EMAC_CNTRL_MOD_OFFSET,
.ctrl_ram_offset = DM365_EMAC_CNTRL_RAM_OFFSET,
- .mdio_reg_offset = DM365_EMAC_MDIO_OFFSET,
.ctrl_ram_size = DM365_EMAC_CNTRL_RAM_SIZE,
.version = EMAC_VERSION_2,
};
@@ -699,7 +698,7 @@ static struct emac_platform_data dm365_emac_pdata = {
static struct resource dm365_emac_resources[] = {
{
.start = DM365_EMAC_BASE,
- .end = DM365_EMAC_BASE + 0x47ff,
+ .end = DM365_EMAC_BASE + SZ_16K - 1,
.flags = IORESOURCE_MEM,
},
{
@@ -734,6 +733,21 @@ static struct platform_device dm365_emac_device = {
.resource = dm365_emac_resources,
};
+static struct resource dm365_mdio_resources[] = {
+ {
+ .start = DM365_EMAC_MDIO_BASE,
+ .end = DM365_EMAC_MDIO_BASE + SZ_4K - 1,
+ .flags = IORESOURCE_MEM,
+ },
+};
+
+static struct platform_device dm365_mdio_device = {
+ .name = "davinci_mdio",
+ .id = 0,
+ .num_resources = ARRAY_SIZE(dm365_mdio_resources),
+ .resource = dm365_mdio_resources,
+};
+
static u8 dm365_default_priorities[DAVINCI_N_AINTC_IRQ] = {
[IRQ_VDINT0] = 2,
[IRQ_VDINT1] = 6,
@@ -908,8 +922,8 @@ static struct resource dm365_asp_resources[] = {
};
static struct platform_device dm365_asp_device = {
- .name = "davinci-asp",
- .id = 0,
+ .name = "davinci-mcbsp",
+ .id = -1,
.num_resources = ARRAY_SIZE(dm365_asp_resources),
.resource = dm365_asp_resources,
};
@@ -1219,7 +1233,12 @@ static int __init dm365_init_devices(void)
davinci_cfg_reg(DM365_INT_EDMA_CC);
platform_device_register(&dm365_edma_device);
+
+ platform_device_register(&dm365_mdio_device);
platform_device_register(&dm365_emac_device);
+ clk_add_alias(NULL, dev_name(&dm365_mdio_device.dev),
+ NULL, &dm365_emac_device.dev);
+
/* Add isif clock alias */
clk_add_alias("master", dm365_isif_dev.name, "vpss_master", NULL);
platform_device_register(&dm365_vpss_device);
diff --git a/arch/arm/mach-davinci/dm644x.c b/arch/arm/mach-davinci/dm644x.c
index 5e5b0a7831fb..9a2376b3137c 100644
--- a/arch/arm/mach-davinci/dm644x.c
+++ b/arch/arm/mach-davinci/dm644x.c
@@ -302,7 +302,7 @@ static struct clk_lookup dm644x_clks[] = {
CLK("davinci_emac.1", NULL, &emac_clk),
CLK("i2c_davinci.1", NULL, &i2c_clk),
CLK("palm_bk3710", NULL, &ide_clk),
- CLK("davinci-asp", NULL, &asp_clk),
+ CLK("davinci-mcbsp", NULL, &asp_clk),
CLK("davinci_mmc.0", NULL, &mmcsd_clk),
CLK(NULL, "spi", &spi_clk),
CLK(NULL, "gpio", &gpio_clk),
@@ -322,7 +322,6 @@ static struct emac_platform_data dm644x_emac_pdata = {
.ctrl_reg_offset = DM644X_EMAC_CNTRL_OFFSET,
.ctrl_mod_reg_offset = DM644X_EMAC_CNTRL_MOD_OFFSET,
.ctrl_ram_offset = DM644X_EMAC_CNTRL_RAM_OFFSET,
- .mdio_reg_offset = DM644X_EMAC_MDIO_OFFSET,
.ctrl_ram_size = DM644X_EMAC_CNTRL_RAM_SIZE,
.version = EMAC_VERSION_1,
};
@@ -330,7 +329,7 @@ static struct emac_platform_data dm644x_emac_pdata = {
static struct resource dm644x_emac_resources[] = {
{
.start = DM644X_EMAC_BASE,
- .end = DM644X_EMAC_BASE + 0x47ff,
+ .end = DM644X_EMAC_BASE + SZ_16K - 1,
.flags = IORESOURCE_MEM,
},
{
@@ -350,6 +349,21 @@ static struct platform_device dm644x_emac_device = {
.resource = dm644x_emac_resources,
};
+static struct resource dm644x_mdio_resources[] = {
+ {
+ .start = DM644X_EMAC_MDIO_BASE,
+ .end = DM644X_EMAC_MDIO_BASE + SZ_4K - 1,
+ .flags = IORESOURCE_MEM,
+ },
+};
+
+static struct platform_device dm644x_mdio_device = {
+ .name = "davinci_mdio",
+ .id = 0,
+ .num_resources = ARRAY_SIZE(dm644x_mdio_resources),
+ .resource = dm644x_mdio_resources,
+};
+
/*
* Device specific mux setup
*
@@ -566,7 +580,7 @@ static struct resource dm644x_asp_resources[] = {
};
static struct platform_device dm644x_asp_device = {
- .name = "davinci-asp",
+ .name = "davinci-mcbsp",
.id = -1,
.num_resources = ARRAY_SIZE(dm644x_asp_resources),
.resource = dm644x_asp_resources,
@@ -776,7 +790,12 @@ static int __init dm644x_init_devices(void)
clk_add_alias("master", dm644x_ccdc_dev.name, "vpss_master", NULL);
clk_add_alias("slave", dm644x_ccdc_dev.name, "vpss_slave", NULL);
platform_device_register(&dm644x_edma_device);
+
+ platform_device_register(&dm644x_mdio_device);
platform_device_register(&dm644x_emac_device);
+ clk_add_alias(NULL, dev_name(&dm644x_mdio_device.dev),
+ NULL, &dm644x_emac_device.dev);
+
platform_device_register(&dm644x_vpss_device);
platform_device_register(&dm644x_ccdc_dev);
platform_device_register(&vpfe_capture_dev);
diff --git a/arch/arm/mach-davinci/dm646x.c b/arch/arm/mach-davinci/dm646x.c
index 26e8a9c7f50b..1e0f809644bb 100644
--- a/arch/arm/mach-davinci/dm646x.c
+++ b/arch/arm/mach-davinci/dm646x.c
@@ -358,7 +358,6 @@ static struct emac_platform_data dm646x_emac_pdata = {
.ctrl_reg_offset = DM646X_EMAC_CNTRL_OFFSET,
.ctrl_mod_reg_offset = DM646X_EMAC_CNTRL_MOD_OFFSET,
.ctrl_ram_offset = DM646X_EMAC_CNTRL_RAM_OFFSET,
- .mdio_reg_offset = DM646X_EMAC_MDIO_OFFSET,
.ctrl_ram_size = DM646X_EMAC_CNTRL_RAM_SIZE,
.version = EMAC_VERSION_2,
};
@@ -366,7 +365,7 @@ static struct emac_platform_data dm646x_emac_pdata = {
static struct resource dm646x_emac_resources[] = {
{
.start = DM646X_EMAC_BASE,
- .end = DM646X_EMAC_BASE + 0x47ff,
+ .end = DM646X_EMAC_BASE + SZ_16K - 1,
.flags = IORESOURCE_MEM,
},
{
@@ -401,6 +400,21 @@ static struct platform_device dm646x_emac_device = {
.resource = dm646x_emac_resources,
};
+static struct resource dm646x_mdio_resources[] = {
+ {
+ .start = DM646X_EMAC_MDIO_BASE,
+ .end = DM646X_EMAC_MDIO_BASE + SZ_4K - 1,
+ .flags = IORESOURCE_MEM,
+ },
+};
+
+static struct platform_device dm646x_mdio_device = {
+ .name = "davinci_mdio",
+ .id = 0,
+ .num_resources = ARRAY_SIZE(dm646x_mdio_resources),
+ .resource = dm646x_mdio_resources,
+};
+
/*
* Device specific mux setup
*
@@ -896,7 +910,11 @@ static int __init dm646x_init_devices(void)
if (!cpu_is_davinci_dm646x())
return 0;
+ platform_device_register(&dm646x_mdio_device);
platform_device_register(&dm646x_emac_device);
+ clk_add_alias(NULL, dev_name(&dm646x_mdio_device.dev),
+ NULL, &dm646x_emac_device.dev);
+
return 0;
}
postcore_initcall(dm646x_init_devices);
diff --git a/arch/arm/mach-davinci/dma.c b/arch/arm/mach-davinci/dma.c
index 2ede598b77dd..6b9669869c46 100644
--- a/arch/arm/mach-davinci/dma.c
+++ b/arch/arm/mach-davinci/dma.c
@@ -354,10 +354,12 @@ static int irq2ctlr(int irq)
static irqreturn_t dma_irq_handler(int irq, void *data)
{
int i;
- unsigned ctlr;
+ int ctlr;
unsigned int cnt = 0;
ctlr = irq2ctlr(irq);
+ if (ctlr < 0)
+ return IRQ_NONE;
dev_dbg(data, "dma_irq_handler\n");
@@ -408,10 +410,12 @@ static irqreturn_t dma_irq_handler(int irq, void *data)
static irqreturn_t dma_ccerr_handler(int irq, void *data)
{
int i;
- unsigned ctlr;
+ int ctlr;
unsigned int cnt = 0;
ctlr = irq2ctlr(irq);
+ if (ctlr < 0)
+ return IRQ_NONE;
dev_dbg(data, "dma_ccerr_handler\n");
diff --git a/arch/arm/mach-davinci/include/mach/aemif.h b/arch/arm/mach-davinci/include/mach/aemif.h
new file mode 100644
index 000000000000..05b293443097
--- /dev/null
+++ b/arch/arm/mach-davinci/include/mach/aemif.h
@@ -0,0 +1,36 @@
+/*
+ * TI DaVinci AEMIF support
+ *
+ * Copyright 2010 (C) Texas Instruments, Inc. http://www.ti.com/
+ *
+ * This file is licensed under the terms of the GNU General Public License
+ * version 2. This program is licensed "as is" without any warranty of any
+ * kind, whether express or implied.
+ */
+#ifndef _MACH_DAVINCI_AEMIF_H
+#define _MACH_DAVINCI_AEMIF_H
+
+#define NRCSR_OFFSET 0x00
+#define AWCCR_OFFSET 0x04
+#define A1CR_OFFSET 0x10
+
+#define ACR_ASIZE_MASK 0x3
+#define ACR_EW_MASK BIT(30)
+#define ACR_SS_MASK BIT(31)
+
+/* All timings in nanoseconds */
+struct davinci_aemif_timing {
+ u8 wsetup;
+ u8 wstrobe;
+ u8 whold;
+
+ u8 rsetup;
+ u8 rstrobe;
+ u8 rhold;
+
+ u8 ta;
+};
+
+int davinci_aemif_setup_timing(struct davinci_aemif_timing *t,
+ void __iomem *base, unsigned cs);
+#endif
diff --git a/arch/arm/mach-davinci/include/mach/da8xx.h b/arch/arm/mach-davinci/include/mach/da8xx.h
index 3c07059f526e..4247b3f53b33 100644
--- a/arch/arm/mach-davinci/include/mach/da8xx.h
+++ b/arch/arm/mach-davinci/include/mach/da8xx.h
@@ -76,9 +76,10 @@ int da8xx_register_usb11(struct da8xx_ohci_root_hub *pdata);
int da8xx_register_emac(void);
int da8xx_register_lcdc(struct da8xx_lcdc_platform_data *pdata);
int da8xx_register_mmcsd0(struct davinci_mmc_config *config);
+int da850_register_mmcsd1(struct davinci_mmc_config *config);
void __init da8xx_register_mcasp(int id, struct snd_platform_data *pdata);
int da8xx_register_rtc(void);
-int da850_register_cpufreq(void);
+int da850_register_cpufreq(char *async_clk);
int da8xx_register_cpuidle(void);
void __iomem * __init da8xx_get_mem_ctlr(void);
int da850_register_pm(struct platform_device *pdev);
@@ -121,11 +122,9 @@ extern const short da850_uart2_pins[];
extern const short da850_i2c0_pins[];
extern const short da850_i2c1_pins[];
extern const short da850_cpgmac_pins[];
-extern const short da850_rmii_pins[];
extern const short da850_mcasp_pins[];
extern const short da850_lcdcntl_pins[];
extern const short da850_mmcsd0_pins[];
-extern const short da850_nand_pins[];
-extern const short da850_nor_pins[];
+extern const short da850_emif25_pins[];
#endif /* __ASM_ARCH_DAVINCI_DA8XX_H */
diff --git a/arch/arm/mach-davinci/include/mach/dm365.h b/arch/arm/mach-davinci/include/mach/dm365.h
index ea5df3b49ec4..2563bf4e93a1 100644
--- a/arch/arm/mach-davinci/include/mach/dm365.h
+++ b/arch/arm/mach-davinci/include/mach/dm365.h
@@ -21,10 +21,10 @@
#include <media/davinci/vpfe_capture.h>
#define DM365_EMAC_BASE (0x01D07000)
+#define DM365_EMAC_MDIO_BASE (DM365_EMAC_BASE + 0x4000)
#define DM365_EMAC_CNTRL_OFFSET (0x0000)
#define DM365_EMAC_CNTRL_MOD_OFFSET (0x3000)
#define DM365_EMAC_CNTRL_RAM_OFFSET (0x1000)
-#define DM365_EMAC_MDIO_OFFSET (0x4000)
#define DM365_EMAC_CNTRL_RAM_SIZE (0x2000)
/* Base of key scan register bank */
diff --git a/arch/arm/mach-davinci/include/mach/dm644x.h b/arch/arm/mach-davinci/include/mach/dm644x.h
index 6fca568a0fd2..5a1b26d4e68b 100644
--- a/arch/arm/mach-davinci/include/mach/dm644x.h
+++ b/arch/arm/mach-davinci/include/mach/dm644x.h
@@ -28,10 +28,10 @@
#include <media/davinci/vpfe_capture.h>
#define DM644X_EMAC_BASE (0x01C80000)
+#define DM644X_EMAC_MDIO_BASE (DM644X_EMAC_BASE + 0x4000)
#define DM644X_EMAC_CNTRL_OFFSET (0x0000)
#define DM644X_EMAC_CNTRL_MOD_OFFSET (0x1000)
#define DM644X_EMAC_CNTRL_RAM_OFFSET (0x2000)
-#define DM644X_EMAC_MDIO_OFFSET (0x4000)
#define DM644X_EMAC_CNTRL_RAM_SIZE (0x2000)
#define DM644X_ASYNC_EMIF_CONTROL_BASE 0x01E00000
diff --git a/arch/arm/mach-davinci/include/mach/dm646x.h b/arch/arm/mach-davinci/include/mach/dm646x.h
index 0a27ee9a70e1..7a27f3f13913 100644
--- a/arch/arm/mach-davinci/include/mach/dm646x.h
+++ b/arch/arm/mach-davinci/include/mach/dm646x.h
@@ -19,10 +19,10 @@
#include <linux/davinci_emac.h>
#define DM646X_EMAC_BASE (0x01C80000)
+#define DM646X_EMAC_MDIO_BASE (DM646X_EMAC_BASE + 0x4000)
#define DM646X_EMAC_CNTRL_OFFSET (0x0000)
#define DM646X_EMAC_CNTRL_MOD_OFFSET (0x1000)
#define DM646X_EMAC_CNTRL_RAM_OFFSET (0x2000)
-#define DM646X_EMAC_MDIO_OFFSET (0x4000)
#define DM646X_EMAC_CNTRL_RAM_SIZE (0x2000)
#define DM646X_ASYNC_EMIF_CONTROL_BASE 0x20008000
diff --git a/arch/arm/mach-davinci/include/mach/nand.h b/arch/arm/mach-davinci/include/mach/nand.h
index b2ad8090bd10..025151049f05 100644
--- a/arch/arm/mach-davinci/include/mach/nand.h
+++ b/arch/arm/mach-davinci/include/mach/nand.h
@@ -30,9 +30,6 @@
#include <linux/mtd/nand.h>
-#define NRCSR_OFFSET 0x00
-#define AWCCR_OFFSET 0x04
-#define A1CR_OFFSET 0x10
#define NANDFCR_OFFSET 0x60
#define NANDFSR_OFFSET 0x64
#define NANDF1ECC_OFFSET 0x70
@@ -83,6 +80,9 @@ struct davinci_nand_pdata { /* platform_data */
/* Main and mirror bbt descriptor overrides */
struct nand_bbt_descr *bbt_td;
struct nand_bbt_descr *bbt_md;
+
+ /* Access timings */
+ struct davinci_aemif_timing *timing;
};
#endif /* __ARCH_ARM_DAVINCI_NAND_H */
diff --git a/arch/arm/mach-davinci/include/mach/psc.h b/arch/arm/mach-davinci/include/mach/psc.h
index 983da6e4554c..62b0858f68ca 100644
--- a/arch/arm/mach-davinci/include/mach/psc.h
+++ b/arch/arm/mach-davinci/include/mach/psc.h
@@ -172,6 +172,7 @@
#define DA8XX_LPSC1_UART2 13
#define DA8XX_LPSC1_LCDC 16
#define DA8XX_LPSC1_PWM 17
+#define DA850_LPSC1_MMC_SD1 18
#define DA8XX_LPSC1_ECAP 20
#define DA830_LPSC1_EQEP 21
#define DA850_LPSC1_TPTC2 21
diff --git a/arch/arm/mach-davinci/include/mach/tnetv107x.h b/arch/arm/mach-davinci/include/mach/tnetv107x.h
index c72064733123..5a681d880dcb 100644
--- a/arch/arm/mach-davinci/include/mach/tnetv107x.h
+++ b/arch/arm/mach-davinci/include/mach/tnetv107x.h
@@ -33,6 +33,8 @@
#ifndef __ASSEMBLY__
#include <linux/serial_8250.h>
+#include <linux/input/matrix_keypad.h>
+
#include <mach/mmc.h>
#include <mach/nand.h>
#include <mach/serial.h>
@@ -41,6 +43,7 @@ struct tnetv107x_device_info {
struct davinci_uart_config *serial_config;
struct davinci_mmc_config *mmc_config[2]; /* 2 controllers */
struct davinci_nand_pdata *nand_config[4]; /* 4 chipsels */
+ struct matrix_keypad_platform_data *keypad_config;
};
extern struct platform_device tnetv107x_wdt_device;
diff --git a/arch/arm/mach-davinci/include/mach/uncompress.h b/arch/arm/mach-davinci/include/mach/uncompress.h
index 15a6192ad6eb..47723e8d75a4 100644
--- a/arch/arm/mach-davinci/include/mach/uncompress.h
+++ b/arch/arm/mach-davinci/include/mach/uncompress.h
@@ -88,6 +88,8 @@ static inline void __arch_decomp_setup(unsigned long arch_id)
/* DA8xx boards */
DEBUG_LL_DA8XX(davinci_da830_evm, 2);
DEBUG_LL_DA8XX(davinci_da850_evm, 2);
+ DEBUG_LL_DA8XX(mityomapl138, 1);
+ DEBUG_LL_DA8XX(omapl138_hawkboard, 2);
/* TNETV107x boards */
DEBUG_LL_TNETV107X(tnetv107x, 1);
diff --git a/arch/arm/mach-davinci/tnetv107x.c b/arch/arm/mach-davinci/tnetv107x.c
index 864e60482c53..daeae06430b9 100644
--- a/arch/arm/mach-davinci/tnetv107x.c
+++ b/arch/arm/mach-davinci/tnetv107x.c
@@ -104,7 +104,7 @@ static u32 pll_ext_freq[] = {
};
/* PSC control registers */
-static u32 psc_regs[] __initconst = { TNETV107X_PSC_BASE };
+static u32 psc_regs[] = { TNETV107X_PSC_BASE };
/* Host map for interrupt controller */
static u32 intc_host_map[] = { 0x01010000, 0x01010101, -1 };
@@ -581,7 +581,14 @@ static struct davinci_id ids[] = {
.part_no = 0xb8a1,
.manufacturer = 0x017,
.cpu_id = DAVINCI_CPU_ID_TNETV107X,
- .name = "tnetv107x rev1.0",
+ .name = "tnetv107x rev 1.0",
+ },
+ {
+ .variant = 0x1,
+ .part_no = 0xb8a1,
+ .manufacturer = 0x017,
+ .cpu_id = DAVINCI_CPU_ID_TNETV107X,
+ .name = "tnetv107x rev 1.1/1.2",
},
};
diff --git a/arch/arm/mach-ebsa110/include/mach/vmalloc.h b/arch/arm/mach-ebsa110/include/mach/vmalloc.h
index 60bde56fba4c..ea141b7a3e03 100644
--- a/arch/arm/mach-ebsa110/include/mach/vmalloc.h
+++ b/arch/arm/mach-ebsa110/include/mach/vmalloc.h
@@ -7,4 +7,4 @@
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
-#define VMALLOC_END 0xdf000000
+#define VMALLOC_END 0xdf000000UL
diff --git a/arch/arm/mach-ep93xx/clock.c b/arch/arm/mach-ep93xx/clock.c
index 4566bd1c8660..ef06c66a6f16 100644
--- a/arch/arm/mach-ep93xx/clock.c
+++ b/arch/arm/mach-ep93xx/clock.c
@@ -358,8 +358,7 @@ static int calc_clk_div(struct clk *clk, unsigned long rate,
int i, found = 0, __div = 0, __pdiv = 0;
/* Don't exceed the maximum rate */
- max_rate = max(max(clk_pll1.rate / 4, clk_pll2.rate / 4),
- clk_xtali.rate / 4);
+ max_rate = max3(clk_pll1.rate / 4, clk_pll2.rate / 4, clk_xtali.rate / 4);
rate = min(rate, max_rate);
/*
diff --git a/arch/arm/mach-ep93xx/core.c b/arch/arm/mach-ep93xx/core.c
index 4cb55d3902ff..ffdf87be2958 100644
--- a/arch/arm/mach-ep93xx/core.c
+++ b/arch/arm/mach-ep93xx/core.c
@@ -776,9 +776,15 @@ static struct platform_device ep93xx_i2s_device = {
.resource = ep93xx_i2s_resource,
};
+static struct platform_device ep93xx_pcm_device = {
+ .name = "ep93xx-pcm-audio",
+ .id = -1,
+};
+
void __init ep93xx_register_i2s(void)
{
platform_device_register(&ep93xx_i2s_device);
+ platform_device_register(&ep93xx_pcm_device);
}
#define EP93XX_SYSCON_DEVCFG_I2S_MASK (EP93XX_SYSCON_DEVCFG_I2SONSSP | \
@@ -826,6 +832,40 @@ void ep93xx_i2s_release(void)
}
EXPORT_SYMBOL(ep93xx_i2s_release);
+/*************************************************************************
+ * EP93xx AC97 audio peripheral handling
+ *************************************************************************/
+static struct resource ep93xx_ac97_resources[] = {
+ {
+ .start = EP93XX_AAC_PHYS_BASE,
+ .end = EP93XX_AAC_PHYS_BASE + 0xb0 - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ {
+ .start = IRQ_EP93XX_AACINTR,
+ .end = IRQ_EP93XX_AACINTR,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct platform_device ep93xx_ac97_device = {
+ .name = "ep93xx-ac97",
+ .id = -1,
+ .num_resources = ARRAY_SIZE(ep93xx_ac97_resources),
+ .resource = ep93xx_ac97_resources,
+};
+
+void __init ep93xx_register_ac97(void)
+{
+ /*
+ * Make sure that the AC97 pins are not used by I2S.
+ */
+ ep93xx_devcfg_clear_bits(EP93XX_SYSCON_DEVCFG_I2SONAC97);
+
+ platform_device_register(&ep93xx_ac97_device);
+ platform_device_register(&ep93xx_pcm_device);
+}
+
extern void ep93xx_gpio_init(void);
void __init ep93xx_init_devices(void)
diff --git a/arch/arm/mach-ep93xx/include/mach/dma.h b/arch/arm/mach-ep93xx/include/mach/dma.h
index 3a5961d3f3b1..5e31b2b25da9 100644
--- a/arch/arm/mach-ep93xx/include/mach/dma.h
+++ b/arch/arm/mach-ep93xx/include/mach/dma.h
@@ -1,5 +1,13 @@
-/*
- * arch/arm/mach-ep93xx/include/mach/dma.h
+/**
+ * DOC: EP93xx DMA M2P memory to peripheral and peripheral to memory engine
+ *
+ * The EP93xx DMA M2P subsystem handles DMA transfers between memory and
+ * peripherals. DMA M2P channels are available for audio, UARTs and IrDA.
+ * See chapter 10 of the EP93xx users guide for full details on the DMA M2P
+ * engine.
+ *
+ * See sound/soc/ep93xx/ep93xx-pcm.c for an example use of the DMA M2P code.
+ *
*/
#ifndef __ASM_ARCH_DMA_H
@@ -8,12 +16,34 @@
#include <linux/list.h>
#include <linux/types.h>
+/**
+ * struct ep93xx_dma_buffer - Information about a buffer to be transferred
+ * using the DMA M2P engine
+ *
+ * @list: Entry in DMA buffer list
+ * @bus_addr: Physical address of the buffer
+ * @size: Size of the buffer in bytes
+ */
struct ep93xx_dma_buffer {
struct list_head list;
u32 bus_addr;
u16 size;
};
+/**
+ * struct ep93xx_dma_m2p_client - Information about a DMA M2P client
+ *
+ * @name: Unique name for this client
+ * @flags: Client flags
+ * @cookie: User data to pass to callback functions
+ * @buffer_started: Non NULL function to call when a transfer is started.
+ * The arguments are the user data cookie and the DMA
+ * buffer which is starting.
+ * @buffer_finished: Non NULL function to call when a transfer is completed.
+ * The arguments are the user data cookie, the DMA buffer
+ * which has completed, and a boolean flag indicating if
+ * the transfer had an error.
+ */
struct ep93xx_dma_m2p_client {
char *name;
u8 flags;
@@ -24,10 +54,11 @@ struct ep93xx_dma_m2p_client {
struct ep93xx_dma_buffer *buf,
int bytes, int error);
- /* Internal to the DMA code. */
+ /* private: Internal use only */
void *channel;
};
+/* DMA M2P ports */
#define EP93XX_DMA_M2P_PORT_I2S1 0x00
#define EP93XX_DMA_M2P_PORT_I2S2 0x01
#define EP93XX_DMA_M2P_PORT_AAC1 0x02
@@ -39,18 +70,80 @@ struct ep93xx_dma_m2p_client {
#define EP93XX_DMA_M2P_PORT_UART3 0x08
#define EP93XX_DMA_M2P_PORT_IRDA 0x09
#define EP93XX_DMA_M2P_PORT_MASK 0x0f
-#define EP93XX_DMA_M2P_TX 0x00
-#define EP93XX_DMA_M2P_RX 0x10
-#define EP93XX_DMA_M2P_ABORT_ON_ERROR 0x20
-#define EP93XX_DMA_M2P_IGNORE_ERROR 0x40
-#define EP93XX_DMA_M2P_ERROR_MASK 0x60
-int ep93xx_dma_m2p_client_register(struct ep93xx_dma_m2p_client *m2p);
+/* DMA M2P client flags */
+#define EP93XX_DMA_M2P_TX 0x00 /* Memory to peripheral */
+#define EP93XX_DMA_M2P_RX 0x10 /* Peripheral to memory */
+
+/*
+ * DMA M2P client error handling flags. See the EP93xx users guide
+ * documentation on the DMA M2P CONTROL register for more details
+ */
+#define EP93XX_DMA_M2P_ABORT_ON_ERROR 0x20 /* Abort on peripheral error */
+#define EP93XX_DMA_M2P_IGNORE_ERROR 0x40 /* Ignore peripheral errors */
+#define EP93XX_DMA_M2P_ERROR_MASK 0x60 /* Mask of error bits */
+
+/**
+ * ep93xx_dma_m2p_client_register - Register a client with the DMA M2P
+ * subsystem
+ *
+ * @m2p: Client information to register
+ * returns 0 on success
+ *
+ * The DMA M2P subsystem allocates a channel and an interrupt line for the DMA
+ * client
+ */
+int ep93xx_dma_m2p_client_register(struct ep93xx_dma_m2p_client *m2p);
+
+/**
+ * ep93xx_dma_m2p_client_unregister - Unregister a client from the DMA M2P
+ * subsystem
+ *
+ * @m2p: Client to unregister
+ *
+ * Any transfers currently in progress will be completed in hardware, but
+ * ignored in software.
+ */
void ep93xx_dma_m2p_client_unregister(struct ep93xx_dma_m2p_client *m2p);
+
+/**
+ * ep93xx_dma_m2p_submit - Submit a DMA M2P transfer
+ *
+ * @m2p: DMA Client to submit the transfer on
+ * @buf: DMA Buffer to submit
+ *
+ * If the current or next transfer positions are free on the M2P client then
+ * the transfer is started immediately. If not, the transfer is added to the
+ * list of pending transfers. This function must not be called from the
+ * buffer_finished callback for an M2P channel.
+ *
+ */
void ep93xx_dma_m2p_submit(struct ep93xx_dma_m2p_client *m2p,
struct ep93xx_dma_buffer *buf);
+
+/**
+ * ep93xx_dma_m2p_submit_recursive - Put a DMA transfer on the pending list
+ * for an M2P channel
+ *
+ * @m2p: DMA Client to submit the transfer on
+ * @buf: DMA Buffer to submit
+ *
+ * This function must only be called from the buffer_finished callback for an
+ * M2P channel. It is commonly used to add the next transfer in a chained list
+ * of DMA transfers.
+ */
void ep93xx_dma_m2p_submit_recursive(struct ep93xx_dma_m2p_client *m2p,
struct ep93xx_dma_buffer *buf);
+
+/**
+ * ep93xx_dma_m2p_flush - Flush all pending transfers on a DMA M2P client
+ *
+ * @m2p: DMA client to flush transfers on
+ *
+ * Any transfers currently in progress will be completed in hardware, but
+ * ignored in software.
+ *
+ */
void ep93xx_dma_m2p_flush(struct ep93xx_dma_m2p_client *m2p);
#endif /* __ASM_ARCH_DMA_H */
diff --git a/arch/arm/mach-ep93xx/include/mach/ep93xx-regs.h b/arch/arm/mach-ep93xx/include/mach/ep93xx-regs.h
index c54b3e56ba63..9ac4d1055097 100644
--- a/arch/arm/mach-ep93xx/include/mach/ep93xx-regs.h
+++ b/arch/arm/mach-ep93xx/include/mach/ep93xx-regs.h
@@ -105,6 +105,7 @@
#define EP93XX_GPIO_B_INT_STATUS EP93XX_GPIO_REG(0xbc)
#define EP93XX_GPIO_EEDRIVE EP93XX_GPIO_REG(0xc8)
+#define EP93XX_AAC_PHYS_BASE EP93XX_APB_PHYS(0x00080000)
#define EP93XX_AAC_BASE EP93XX_APB_IOMEM(0x00080000)
#define EP93XX_SPI_PHYS_BASE EP93XX_APB_PHYS(0x000a0000)
diff --git a/arch/arm/mach-ep93xx/include/mach/platform.h b/arch/arm/mach-ep93xx/include/mach/platform.h
index 3330b36d79e6..50660455b1d8 100644
--- a/arch/arm/mach-ep93xx/include/mach/platform.h
+++ b/arch/arm/mach-ep93xx/include/mach/platform.h
@@ -61,6 +61,7 @@ void ep93xx_keypad_release_gpio(struct platform_device *pdev);
void ep93xx_register_i2s(void);
int ep93xx_i2s_acquire(unsigned i2s_pins, unsigned i2s_config);
void ep93xx_i2s_release(void);
+void ep93xx_register_ac97(void);
void ep93xx_init_devices(void);
extern struct sys_timer ep93xx_timer;
diff --git a/arch/arm/mach-ep93xx/simone.c b/arch/arm/mach-ep93xx/simone.c
index f22ce8db7947..d96dc1c5da20 100644
--- a/arch/arm/mach-ep93xx/simone.c
+++ b/arch/arm/mach-ep93xx/simone.c
@@ -61,6 +61,7 @@ static void __init simone_init_machine(void)
ep93xx_register_fb(&simone_fb_info);
ep93xx_register_i2c(&simone_i2c_gpio_data, simone_i2c_board_info,
ARRAY_SIZE(simone_i2c_board_info));
+ ep93xx_register_ac97();
}
MACHINE_START(SIM_ONE, "Simplemachines Sim.One Board")
diff --git a/arch/arm/mach-footbridge/include/mach/vmalloc.h b/arch/arm/mach-footbridge/include/mach/vmalloc.h
index 0ffbb7c85e59..40ba78e5782b 100644
--- a/arch/arm/mach-footbridge/include/mach/vmalloc.h
+++ b/arch/arm/mach-footbridge/include/mach/vmalloc.h
@@ -7,4 +7,4 @@
*/
-#define VMALLOC_END 0xf0000000
+#define VMALLOC_END 0xf0000000UL
diff --git a/arch/arm/mach-h720x/include/mach/vmalloc.h b/arch/arm/mach-h720x/include/mach/vmalloc.h
index a45915b88756..8520b4a4d4e6 100644
--- a/arch/arm/mach-h720x/include/mach/vmalloc.h
+++ b/arch/arm/mach-h720x/include/mach/vmalloc.h
@@ -5,6 +5,6 @@
#ifndef __ARCH_ARM_VMALLOC_H
#define __ARCH_ARM_VMALLOC_H
-#define VMALLOC_END 0xd0000000
+#define VMALLOC_END 0xd0000000UL
#endif
diff --git a/arch/arm/mach-imx/eukrea_mbimx27-baseboard.c b/arch/arm/mach-imx/eukrea_mbimx27-baseboard.c
index 026263c665ca..7e1e9dc2c8fc 100644
--- a/arch/arm/mach-imx/eukrea_mbimx27-baseboard.c
+++ b/arch/arm/mach-imx/eukrea_mbimx27-baseboard.c
@@ -250,9 +250,6 @@ static const struct imxuart_platform_data uart_pdata __initconst = {
.flags = IMXUART_HAVE_RTSCTS,
};
-#if defined(CONFIG_TOUCHSCREEN_ADS7846) \
- || defined(CONFIG_TOUCHSCREEN_ADS7846_MODULE)
-
#define ADS7846_PENDOWN (GPIO_PORTD | 25)
static void ads7846_dev_init(void)
@@ -273,9 +270,7 @@ static struct ads7846_platform_data ads7846_config __initdata = {
.get_pendown_state = ads7846_get_pendown_state,
.keep_vref_on = 1,
};
-#endif
-#if defined(CONFIG_SPI_IMX) || defined(CONFIG_SPI_IMX_MODULE)
static struct spi_board_info eukrea_mbimx27_spi_board_info[] __initdata = {
[0] = {
.modalias = "ads7846",
@@ -294,7 +289,6 @@ static const struct spi_imx_master eukrea_mbimx27_spi0_data __initconst = {
.chipselect = eukrea_mbimx27_spi_cs,
.num_chipselect = ARRAY_SIZE(eukrea_mbimx27_spi_cs),
};
-#endif
static struct i2c_board_info eukrea_mbimx27_i2c_devices[] = {
{
diff --git a/arch/arm/mach-imx/include/mach/dma-v1.h b/arch/arm/mach-imx/include/mach/dma-v1.h
index 287431cc13e5..ac6fd713828a 100644
--- a/arch/arm/mach-imx/include/mach/dma-v1.h
+++ b/arch/arm/mach-imx/include/mach/dma-v1.h
@@ -27,6 +27,8 @@
#define imx_has_dma_v1() (cpu_is_mx1() || cpu_is_mx21() || cpu_is_mx27())
+#include <mach/dma.h>
+
#define IMX_DMA_CHANNELS 16
#define DMA_MODE_READ 0
@@ -96,12 +98,6 @@ int imx_dma_request(int channel, const char *name);
void imx_dma_free(int channel);
-enum imx_dma_prio {
- DMA_PRIO_HIGH = 0,
- DMA_PRIO_MEDIUM = 1,
- DMA_PRIO_LOW = 2
-};
-
int imx_dma_request_by_prio(const char *name, enum imx_dma_prio prio);
#endif /* __MACH_DMA_V1_H__ */
diff --git a/arch/arm/mach-imx/mach-mx27_3ds.c b/arch/arm/mach-imx/mach-mx27_3ds.c
index b8bbd31aa850..84a5ba03f1ba 100644
--- a/arch/arm/mach-imx/mach-mx27_3ds.c
+++ b/arch/arm/mach-imx/mach-mx27_3ds.c
@@ -23,16 +23,20 @@
#include <linux/platform_device.h>
#include <linux/gpio.h>
#include <linux/input/matrix_keypad.h>
+#include <linux/irq.h>
#include <asm/mach-types.h>
#include <asm/mach/arch.h>
#include <asm/mach/time.h>
#include <mach/hardware.h>
#include <mach/common.h>
#include <mach/iomux-mx27.h>
+#include <mach/mmc.h>
#include "devices-imx27.h"
#include "devices.h"
+#define SD1_EN_GPIO (GPIO_PORTB + 25)
+
static const int mx27pdk_pins[] __initconst = {
/* UART1 */
PE12_PF_UART1_TXD,
@@ -58,6 +62,14 @@ static const int mx27pdk_pins[] __initconst = {
PD15_AOUT_FEC_COL,
PD16_AIN_FEC_TX_ER,
PF23_AIN_FEC_TX_EN,
+ /* SDHC1 */
+ PE18_PF_SD1_D0,
+ PE19_PF_SD1_D1,
+ PE20_PF_SD1_D2,
+ PE21_PF_SD1_D3,
+ PE22_PF_SD1_CMD,
+ PE23_PF_SD1_CLK,
+ SD1_EN_GPIO | GPIO_GPIO | GPIO_OUT,
};
static const struct imxuart_platform_data uart_pdata __initconst = {
@@ -85,13 +97,39 @@ static struct matrix_keymap_data mx27_3ds_keymap_data = {
.keymap_size = ARRAY_SIZE(mx27_3ds_keymap),
};
+static int mx27_3ds_sdhc1_init(struct device *dev, irq_handler_t detect_irq,
+ void *data)
+{
+ return request_irq(IRQ_GPIOB(26), detect_irq, IRQF_TRIGGER_FALLING |
+ IRQF_TRIGGER_RISING, "sdhc1-card-detect", data);
+}
+
+static void mx27_3ds_sdhc1_exit(struct device *dev, void *data)
+{
+ free_irq(IRQ_GPIOB(26), data);
+}
+
+static struct imxmmc_platform_data sdhc1_pdata = {
+ .init = mx27_3ds_sdhc1_init,
+ .exit = mx27_3ds_sdhc1_exit,
+};
+
+static void mx27_3ds_sdhc1_enable_level_translator(void)
+{
+ /* Turn on TXB0108 OE pin */
+ gpio_request(SD1_EN_GPIO, "sd1_enable");
+ gpio_direction_output(SD1_EN_GPIO, 1);
+}
+
static void __init mx27pdk_init(void)
{
mxc_gpio_setup_multiple_pins(mx27pdk_pins, ARRAY_SIZE(mx27pdk_pins),
"mx27pdk");
+ mx27_3ds_sdhc1_enable_level_translator();
imx27_add_imx_uart0(&uart_pdata);
imx27_add_fec(NULL);
mxc_register_device(&imx_kpp_device, &mx27_3ds_keymap_data);
+ mxc_register_device(&mxc_sdhc_device0, &sdhc1_pdata);
}
static void __init mx27pdk_timer_init(void)
diff --git a/arch/arm/mach-integrator/include/mach/vmalloc.h b/arch/arm/mach-integrator/include/mach/vmalloc.h
index e056e7cf5645..2f5a2bafb11f 100644
--- a/arch/arm/mach-integrator/include/mach/vmalloc.h
+++ b/arch/arm/mach-integrator/include/mach/vmalloc.h
@@ -17,4 +17,4 @@
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
-#define VMALLOC_END 0xd0000000
+#define VMALLOC_END 0xd0000000UL
diff --git a/arch/arm/mach-ixp2000/core.c b/arch/arm/mach-ixp2000/core.c
index babb22597163..e24e3d05397f 100644
--- a/arch/arm/mach-ixp2000/core.c
+++ b/arch/arm/mach-ixp2000/core.c
@@ -197,7 +197,7 @@ unsigned long ixp2000_gettimeoffset (void)
return offset / ticks_per_usec;
}
-static int ixp2000_timer_interrupt(int irq, void *dev_id)
+static irqreturn_t ixp2000_timer_interrupt(int irq, void *dev_id)
{
/* clear timer 1 */
ixp2000_reg_wrb(IXP2000_T1_CLR, 1);
diff --git a/arch/arm/mach-kirkwood/common.c b/arch/arm/mach-kirkwood/common.c
index 1c82d4290dad..3688123b5ad8 100644
--- a/arch/arm/mach-kirkwood/common.c
+++ b/arch/arm/mach-kirkwood/common.c
@@ -854,10 +854,9 @@ int __init kirkwood_find_tclk(void)
kirkwood_pcie_id(&dev, &rev);
- if ((dev == MV88F6281_DEV_ID && (rev == MV88F6281_REV_A0 ||
- rev == MV88F6281_REV_A1)) ||
- (dev == MV88F6282_DEV_ID))
- return 200000000;
+ if (dev == MV88F6281_DEV_ID || dev == MV88F6282_DEV_ID)
+ if (((readl(SAMPLE_AT_RESET) >> 21) & 1) == 0)
+ return 200000000;
return 166666667;
}
@@ -903,10 +902,16 @@ static struct platform_device kirkwood_i2s_device = {
},
};
+static struct platform_device kirkwood_pcm_device = {
+ .name = "kirkwood-pcm-audio",
+ .id = -1,
+};
+
void __init kirkwood_audio_init(void)
{
kirkwood_clk_ctrl |= CGC_AUDIO;
platform_device_register(&kirkwood_i2s_device);
+ platform_device_register(&kirkwood_pcm_device);
}
/*****************************************************************************
diff --git a/arch/arm/mach-kirkwood/d2net_v2-setup.c b/arch/arm/mach-kirkwood/d2net_v2-setup.c
index 4aa86e4a152c..a31c9499ab36 100644
--- a/arch/arm/mach-kirkwood/d2net_v2-setup.c
+++ b/arch/arm/mach-kirkwood/d2net_v2-setup.c
@@ -225,5 +225,5 @@ MACHINE_START(D2NET_V2, "LaCie d2 Network v2")
.init_machine = d2net_v2_init,
.map_io = kirkwood_map_io,
.init_irq = kirkwood_init_irq,
- .timer = &lacie_v2_timer,
+ .timer = &kirkwood_timer,
MACHINE_END
diff --git a/arch/arm/mach-kirkwood/lacie_v2-common.c b/arch/arm/mach-kirkwood/lacie_v2-common.c
index d3ea1b6c8a02..285edab776e9 100644
--- a/arch/arm/mach-kirkwood/lacie_v2-common.c
+++ b/arch/arm/mach-kirkwood/lacie_v2-common.c
@@ -111,17 +111,3 @@ void __init lacie_v2_hdd_power_init(int hdd_num)
pr_err("Failed to power up HDD%d\n", i + 1);
}
}
-
-/*****************************************************************************
- * Timer
- ****************************************************************************/
-
-static void lacie_v2_timer_init(void)
-{
- kirkwood_tclk = 166666667;
- orion_time_init(IRQ_KIRKWOOD_BRIDGE, kirkwood_tclk);
-}
-
-struct sys_timer lacie_v2_timer = {
- .init = lacie_v2_timer_init,
-};
diff --git a/arch/arm/mach-kirkwood/lacie_v2-common.h b/arch/arm/mach-kirkwood/lacie_v2-common.h
index af521315b87b..fc64f578536e 100644
--- a/arch/arm/mach-kirkwood/lacie_v2-common.h
+++ b/arch/arm/mach-kirkwood/lacie_v2-common.h
@@ -13,6 +13,4 @@ void lacie_v2_register_flash(void);
void lacie_v2_register_i2c_devices(void);
void lacie_v2_hdd_power_init(int hdd_num);
-extern struct sys_timer lacie_v2_timer;
-
#endif
diff --git a/arch/arm/mach-kirkwood/mpp.c b/arch/arm/mach-kirkwood/mpp.c
index 065187d177c6..27901f702feb 100644
--- a/arch/arm/mach-kirkwood/mpp.c
+++ b/arch/arm/mach-kirkwood/mpp.c
@@ -59,7 +59,7 @@ void __init kirkwood_mpp_conf(unsigned int *mpp_list)
}
printk("\n");
- while (*mpp_list) {
+ for ( ; *mpp_list; mpp_list++) {
unsigned int num = MPP_NUM(*mpp_list);
unsigned int sel = MPP_SEL(*mpp_list);
int shift, gpio_mode;
@@ -88,8 +88,6 @@ void __init kirkwood_mpp_conf(unsigned int *mpp_list)
if (sel != 0)
gpio_mode = 0;
orion_gpio_set_valid(num, gpio_mode);
-
- mpp_list++;
}
printk(KERN_DEBUG " final MPP regs:");
diff --git a/arch/arm/mach-kirkwood/netspace_v2-setup.c b/arch/arm/mach-kirkwood/netspace_v2-setup.c
index 5e286441b8f4..65ee21fd2f3b 100644
--- a/arch/arm/mach-kirkwood/netspace_v2-setup.c
+++ b/arch/arm/mach-kirkwood/netspace_v2-setup.c
@@ -30,6 +30,7 @@
#include <linux/gpio.h>
#include <linux/gpio_keys.h>
#include <linux/leds.h>
+#include <linux/gpio-fan.h>
#include <asm/mach-types.h>
#include <asm/mach/arch.h>
#include <mach/kirkwood.h>
@@ -137,6 +138,46 @@ static struct platform_device netspace_v2_leds = {
};
/*****************************************************************************
+ * GPIO fan
+ ****************************************************************************/
+
+/* Designed for fan 40x40x16: ADDA AD0412LB-D50 6000rpm@12v */
+static struct gpio_fan_speed netspace_max_v2_fan_speed[] = {
+ { 0, 0 },
+ { 1500, 15 },
+ { 1700, 14 },
+ { 1800, 13 },
+ { 2100, 12 },
+ { 3100, 11 },
+ { 3300, 10 },
+ { 4300, 9 },
+ { 5500, 8 },
+};
+
+static unsigned netspace_max_v2_fan_ctrl[] = { 22, 7, 33, 23 };
+
+static struct gpio_fan_alarm netspace_max_v2_fan_alarm = {
+ .gpio = 25,
+ .active_low = 1,
+};
+
+static struct gpio_fan_platform_data netspace_max_v2_fan_data = {
+ .num_ctrl = ARRAY_SIZE(netspace_max_v2_fan_ctrl),
+ .ctrl = netspace_max_v2_fan_ctrl,
+ .alarm = &netspace_max_v2_fan_alarm,
+ .num_speed = ARRAY_SIZE(netspace_max_v2_fan_speed),
+ .speed = netspace_max_v2_fan_speed,
+};
+
+static struct platform_device netspace_max_v2_gpio_fan = {
+ .name = "gpio-fan",
+ .id = -1,
+ .dev = {
+ .platform_data = &netspace_max_v2_fan_data,
+ },
+};
+
+/*****************************************************************************
* General Setup
****************************************************************************/
@@ -205,6 +246,8 @@ static void __init netspace_v2_init(void)
platform_device_register(&netspace_v2_leds);
platform_device_register(&netspace_v2_gpio_leds);
platform_device_register(&netspace_v2_gpio_buttons);
+ if (machine_is_netspace_max_v2())
+ platform_device_register(&netspace_max_v2_gpio_fan);
if (gpio_request(NETSPACE_V2_GPIO_POWER_OFF, "power-off") == 0 &&
gpio_direction_output(NETSPACE_V2_GPIO_POWER_OFF, 0) == 0)
@@ -219,7 +262,7 @@ MACHINE_START(NETSPACE_V2, "LaCie Network Space v2")
.init_machine = netspace_v2_init,
.map_io = kirkwood_map_io,
.init_irq = kirkwood_init_irq,
- .timer = &lacie_v2_timer,
+ .timer = &kirkwood_timer,
MACHINE_END
#endif
@@ -229,7 +272,7 @@ MACHINE_START(INETSPACE_V2, "LaCie Internet Space v2")
.init_machine = netspace_v2_init,
.map_io = kirkwood_map_io,
.init_irq = kirkwood_init_irq,
- .timer = &lacie_v2_timer,
+ .timer = &kirkwood_timer,
MACHINE_END
#endif
@@ -239,6 +282,6 @@ MACHINE_START(NETSPACE_MAX_V2, "LaCie Network Space Max v2")
.init_machine = netspace_v2_init,
.map_io = kirkwood_map_io,
.init_irq = kirkwood_init_irq,
- .timer = &lacie_v2_timer,
+ .timer = &kirkwood_timer,
MACHINE_END
#endif
diff --git a/arch/arm/mach-kirkwood/netxbig_v2-setup.c b/arch/arm/mach-kirkwood/netxbig_v2-setup.c
index a1b45d501aef..93afd3c8bfd8 100644
--- a/arch/arm/mach-kirkwood/netxbig_v2-setup.c
+++ b/arch/arm/mach-kirkwood/netxbig_v2-setup.c
@@ -403,7 +403,7 @@ MACHINE_START(NET2BIG_V2, "LaCie 2Big Network v2")
.init_machine = netxbig_v2_init,
.map_io = kirkwood_map_io,
.init_irq = kirkwood_init_irq,
- .timer = &lacie_v2_timer,
+ .timer = &kirkwood_timer,
MACHINE_END
#endif
@@ -413,6 +413,6 @@ MACHINE_START(NET5BIG_V2, "LaCie 5Big Network v2")
.init_machine = netxbig_v2_init,
.map_io = kirkwood_map_io,
.init_irq = kirkwood_init_irq,
- .timer = &lacie_v2_timer,
+ .timer = &kirkwood_timer,
MACHINE_END
#endif
diff --git a/arch/arm/mach-kirkwood/ts41x-setup.c b/arch/arm/mach-kirkwood/ts41x-setup.c
index 8be09a0ce4ac..3587a281d993 100644
--- a/arch/arm/mach-kirkwood/ts41x-setup.c
+++ b/arch/arm/mach-kirkwood/ts41x-setup.c
@@ -27,6 +27,10 @@
#include "mpp.h"
#include "tsx1x-common.h"
+/* for the PCIe reset workaround */
+#include <plat/pcie.h>
+
+
#define QNAP_TS41X_JUMPER_JP1 45
static struct i2c_board_info __initdata qnap_ts41x_i2c_rtc = {
@@ -140,8 +144,16 @@ static void __init qnap_ts41x_init(void)
static int __init ts41x_pci_init(void)
{
- if (machine_is_ts41x())
+ if (machine_is_ts41x()) {
+ /*
+ * Without this explicit reset, the PCIe SATA controller
+ * (Marvell 88sx7042/sata_mv) is known to stop working
+ * after a few minutes.
+ */
+ orion_pcie_reset((void __iomem *)PCIE_VIRT_BASE);
+
kirkwood_pcie_init(KW_PCIE0);
+ }
return 0;
}
diff --git a/arch/arm/mach-mmp/include/mach/cputype.h b/arch/arm/mach-mmp/include/mach/cputype.h
index f43a68b213f1..8a3b56dfd35d 100644
--- a/arch/arm/mach-mmp/include/mach/cputype.h
+++ b/arch/arm/mach-mmp/include/mach/cputype.h
@@ -46,7 +46,8 @@ static inline int cpu_is_pxa910(void)
#ifdef CONFIG_CPU_MMP2
static inline int cpu_is_mmp2(void)
{
- return (((cpu_readid_id() >> 8) & 0xff) == 0x58);
+ return (((read_cpuid_id() >> 8) & 0xff) == 0x58);
+}
#else
#define cpu_is_mmp2() (0)
#endif
diff --git a/arch/arm/mach-msm/Kconfig b/arch/arm/mach-msm/Kconfig
index 3115a29dec4e..dbbcfeb919db 100644
--- a/arch/arm/mach-msm/Kconfig
+++ b/arch/arm/mach-msm/Kconfig
@@ -6,6 +6,7 @@ choice
config ARCH_MSM7X00A
bool "MSM7x00A / MSM7x01A"
+ select MACH_TROUT if !MACH_HALIBUT
select ARCH_MSM_ARM11
select MSM_SMD
select MSM_SMD_PKG3
@@ -15,34 +16,34 @@ config ARCH_MSM7X00A
config ARCH_MSM7X30
bool "MSM7x30"
+ select MACH_MSM7X30_SURF # if !
select ARCH_MSM_SCORPION
select MSM_SMD
select MSM_VIC
select CPU_V7
- select MSM_REMOTE_SPINLOCK_DEKKERS
select MSM_GPIOMUX
select MSM_PROC_COMM
select HAS_MSM_DEBUG_UART_PHYS
config ARCH_QSD8X50
bool "QSD8X50"
+ select MACH_QSD8X50_SURF if !MACH_QSD8X50A_ST1_5
select ARCH_MSM_SCORPION
select MSM_SMD
select MSM_VIC
select CPU_V7
- select MSM_REMOTE_SPINLOCK_LDREX
select MSM_GPIOMUX
select MSM_PROC_COMM
select HAS_MSM_DEBUG_UART_PHYS
config ARCH_MSM8X60
bool "MSM8X60"
+ select MACH_MSM8X60_SURF if (!MACH_MSM8X60_RUMI3 && !MACH_MSM8X60_SIM \
+ && !MACH_MSM8X60_FFA)
select ARM_GIC
select CPU_V7
select MSM_V2_TLMM
select MSM_GPIOMUX
- select MACH_MSM8X60_SURF if (!MACH_MSM8X60_RUMI3 && !MACH_MSM8X60_SIM \
- && !MACH_MSM8X60_FFA)
endchoice
diff --git a/arch/arm/mach-msm/board-halibut.c b/arch/arm/mach-msm/board-halibut.c
index 59edecbe126c..75dabb16c802 100644
--- a/arch/arm/mach-msm/board-halibut.c
+++ b/arch/arm/mach-msm/board-halibut.c
@@ -83,7 +83,6 @@ static void __init halibut_fixup(struct machine_desc *desc, struct tag *tags,
{
mi->nr_banks=1;
mi->bank[0].start = PHYS_OFFSET;
- mi->bank[0].node = PHYS_TO_NID(PHYS_OFFSET);
mi->bank[0].size = (101*1024*1024);
}
diff --git a/arch/arm/mach-msm/include/mach/debug-macro.S b/arch/arm/mach-msm/include/mach/debug-macro.S
index fbd5d90dcc8c..646b99ebc773 100644
--- a/arch/arm/mach-msm/include/mach/debug-macro.S
+++ b/arch/arm/mach-msm/include/mach/debug-macro.S
@@ -19,7 +19,7 @@
#include <mach/hardware.h>
#include <mach/msm_iomap.h>
-#ifdef CONFIG_HAS_MSM_DEBUG_UART_PHYS
+#if defined(CONFIG_HAS_MSM_DEBUG_UART_PHYS) && !defined(CONFIG_MSM_DEBUG_UART_NONE)
.macro addruart, rp, rv
ldr \rp, =MSM_DEBUG_UART_PHYS
ldr \rv, =MSM_DEBUG_UART_BASE
@@ -36,7 +36,18 @@
tst \rd, #0x04
beq 1001b
.endm
+#else
+ .macro addruart, rp, rv
+ mov \rv, #0xff000000
+ orr \rv, \rv, #0x00f00000
+ .endm
- .macro busyuart,rd,rx
+ .macro senduart,rd,rx
+ .endm
+
+ .macro waituart,rd,rx
.endm
#endif
+
+ .macro busyuart,rd,rx
+ .endm
diff --git a/arch/arm/mach-msm/include/mach/vmalloc.h b/arch/arm/mach-msm/include/mach/vmalloc.h
index 31a32ad062dc..d138448eff16 100644
--- a/arch/arm/mach-msm/include/mach/vmalloc.h
+++ b/arch/arm/mach-msm/include/mach/vmalloc.h
@@ -16,7 +16,7 @@
#ifndef __ASM_ARCH_MSM_VMALLOC_H
#define __ASM_ARCH_MSM_VMALLOC_H
-#define VMALLOC_END 0xd0000000
+#define VMALLOC_END 0xd0000000UL
#endif
diff --git a/arch/arm/mach-msm/iommu_dev.c b/arch/arm/mach-msm/iommu_dev.c
index c33ae786c41f..9019cee2907b 100644
--- a/arch/arm/mach-msm/iommu_dev.c
+++ b/arch/arm/mach-msm/iommu_dev.c
@@ -128,7 +128,7 @@ static void msm_iommu_reset(void __iomem *base)
static int msm_iommu_probe(struct platform_device *pdev)
{
- struct resource *r;
+ struct resource *r, *r2;
struct clk *iommu_clk;
struct msm_iommu_drvdata *drvdata;
struct msm_iommu_dev *iommu_dev = pdev->dev.platform_data;
@@ -183,27 +183,27 @@ static int msm_iommu_probe(struct platform_device *pdev)
len = r->end - r->start + 1;
- r = request_mem_region(r->start, len, r->name);
- if (!r) {
+ r2 = request_mem_region(r->start, len, r->name);
+ if (!r2) {
pr_err("Could not request memory region: "
"start=%p, len=%d\n", (void *) r->start, len);
ret = -EBUSY;
goto fail;
}
- regs_base = ioremap(r->start, len);
+ regs_base = ioremap(r2->start, len);
if (!regs_base) {
pr_err("Could not ioremap: start=%p, len=%d\n",
- (void *) r->start, len);
+ (void *) r2->start, len);
ret = -EBUSY;
- goto fail;
+ goto fail_mem;
}
irq = platform_get_irq_byname(pdev, "secure_irq");
if (irq < 0) {
ret = -ENODEV;
- goto fail;
+ goto fail_io;
}
mb();
@@ -211,14 +211,14 @@ static int msm_iommu_probe(struct platform_device *pdev)
if (GET_IDR(regs_base) == 0) {
pr_err("Invalid IDR value detected\n");
ret = -ENODEV;
- goto fail;
+ goto fail_io;
}
ret = request_irq(irq, msm_iommu_fault_handler, 0,
"msm_iommu_secure_irpt_handler", drvdata);
if (ret) {
pr_err("Request IRQ %d failed with ret=%d\n", irq, ret);
- goto fail;
+ goto fail_io;
}
msm_iommu_reset(regs_base);
@@ -237,6 +237,10 @@ static int msm_iommu_probe(struct platform_device *pdev)
return 0;
+fail_io:
+ iounmap(regs_base);
+fail_mem:
+ release_mem_region(r->start, len);
fail:
kfree(drvdata);
return ret;
diff --git a/arch/arm/mach-msm/timer.c b/arch/arm/mach-msm/timer.c
index 7689848ec680..950100f19d07 100644
--- a/arch/arm/mach-msm/timer.c
+++ b/arch/arm/mach-msm/timer.c
@@ -137,7 +137,7 @@ static struct msm_clock msm_clocks[] = {
.rating = 200,
.read = msm_gpt_read,
.mask = CLOCKSOURCE_MASK(32),
- .shift = 24,
+ .shift = 17,
.flags = CLOCK_SOURCE_IS_CONTINUOUS,
},
.irq = {
diff --git a/arch/arm/mach-mv78xx0/mpp.c b/arch/arm/mach-mv78xx0/mpp.c
index 354ac514eb89..84db2dfc475c 100644
--- a/arch/arm/mach-mv78xx0/mpp.c
+++ b/arch/arm/mach-mv78xx0/mpp.c
@@ -54,7 +54,7 @@ void __init mv78xx0_mpp_conf(unsigned int *mpp_list)
}
printk("\n");
- while (*mpp_list) {
+ for ( ; *mpp_list; mpp_list++) {
unsigned int num = MPP_NUM(*mpp_list);
unsigned int sel = MPP_SEL(*mpp_list);
int shift, gpio_mode;
@@ -83,8 +83,6 @@ void __init mv78xx0_mpp_conf(unsigned int *mpp_list)
if (sel != 0)
gpio_mode = 0;
orion_gpio_set_valid(num, gpio_mode);
-
- mpp_list++;
}
printk(KERN_DEBUG " final MPP regs:");
diff --git a/arch/arm/mach-mx25/Kconfig b/arch/arm/mach-mx25/Kconfig
index aa57e35ce3cd..38ca09a5df9d 100644
--- a/arch/arm/mach-mx25/Kconfig
+++ b/arch/arm/mach-mx25/Kconfig
@@ -6,6 +6,7 @@ config MACH_MX25_3DS
bool "Support MX25PDK (3DS) Platform"
select IMX_HAVE_PLATFORM_IMX_UART
select IMX_HAVE_PLATFORM_MXC_NAND
+ select IMX_HAVE_PLATFORM_ESDHC
config MACH_EUKREA_CPUIMX25
bool "Support Eukrea CPUIMX25 Platform"
diff --git a/arch/arm/mach-mx25/devices-imx25.h b/arch/arm/mach-mx25/devices-imx25.h
index 93afa10b13cf..d94d282fa676 100644
--- a/arch/arm/mach-mx25/devices-imx25.h
+++ b/arch/arm/mach-mx25/devices-imx25.h
@@ -42,9 +42,9 @@ extern const struct imx_mxc_nand_data imx25_mxc_nand_data __initconst;
#define imx25_add_mxc_nand(pdata) \
imx_add_mxc_nand(&imx25_mxc_nand_data, pdata)
-extern const struct imx_spi_imx_data imx25_spi_imx_data[] __initconst;
+extern const struct imx_spi_imx_data imx25_cspi_data[] __initconst;
#define imx25_add_spi_imx(id, pdata) \
- imx_add_spi_imx(&imx25_spi_imx_data[id], pdata)
+ imx_add_spi_imx(&imx25_cspi_data[id], pdata)
#define imx25_add_spi_imx0(pdata) imx25_add_spi_imx(0, pdata)
#define imx25_add_spi_imx1(pdata) imx25_add_spi_imx(1, pdata)
#define imx25_add_spi_imx2(pdata) imx25_add_spi_imx(2, pdata)
diff --git a/arch/arm/mach-mx25/mach-mx25_3ds.c b/arch/arm/mach-mx25/mach-mx25_3ds.c
index 80805107a73e..f8be1eb0c062 100644
--- a/arch/arm/mach-mx25/mach-mx25_3ds.c
+++ b/arch/arm/mach-mx25/mach-mx25_3ds.c
@@ -96,6 +96,14 @@ static struct pad_desc mx25pdk_pads[] = {
MX25_PAD_KPP_COL1__KPP_COL1,
MX25_PAD_KPP_COL2__KPP_COL2,
MX25_PAD_KPP_COL3__KPP_COL3,
+
+ /* SD1 */
+ MX25_PAD_SD1_CMD__SD1_CMD,
+ MX25_PAD_SD1_CLK__SD1_CLK,
+ MX25_PAD_SD1_DATA0__SD1_DATA0,
+ MX25_PAD_SD1_DATA1__SD1_DATA1,
+ MX25_PAD_SD1_DATA2__SD1_DATA2,
+ MX25_PAD_SD1_DATA3__SD1_DATA3,
};
static const struct fec_platform_data mx25_fec_pdata __initconst = {
@@ -193,6 +201,8 @@ static void __init mx25pdk_init(void)
mx25pdk_fec_reset();
imx25_add_fec(&mx25_fec_pdata);
mxc_register_device(&mx25_kpp_device, &mx25pdk_keymap_data);
+
+ imx25_add_esdhc(0, NULL);
}
static void __init mx25pdk_timer_init(void)
diff --git a/arch/arm/mach-mx3/Kconfig b/arch/arm/mach-mx3/Kconfig
index 096fd33f8ab9..5000ac1f93e3 100644
--- a/arch/arm/mach-mx3/Kconfig
+++ b/arch/arm/mach-mx3/Kconfig
@@ -143,8 +143,10 @@ config MACH_ARMADILLO5X0
config MACH_MX35_3DS
bool "Support MX35PDK platform"
select ARCH_MX35
+ select MXC_DEBUG_BOARD
select IMX_HAVE_PLATFORM_IMX_UART
select IMX_HAVE_PLATFORM_MXC_NAND
+ select IMX_HAVE_PLATFORM_ESDHC
default n
help
Include support for MX35PDK platform. This includes specific
diff --git a/arch/arm/mach-mx3/devices.c b/arch/arm/mach-mx3/devices.c
index f4dff11aaee7..d4da9496089a 100644
--- a/arch/arm/mach-mx3/devices.c
+++ b/arch/arm/mach-mx3/devices.c
@@ -72,24 +72,24 @@ struct platform_device mxc_w1_master_device = {
#ifdef CONFIG_ARCH_MX31
static struct resource mxcsdhc0_resources[] = {
{
- .start = MMC_SDHC1_BASE_ADDR,
- .end = MMC_SDHC1_BASE_ADDR + SZ_16K - 1,
+ .start = MX31_MMC_SDHC1_BASE_ADDR,
+ .end = MX31_MMC_SDHC1_BASE_ADDR + SZ_16K - 1,
.flags = IORESOURCE_MEM,
}, {
- .start = MXC_INT_MMC_SDHC1,
- .end = MXC_INT_MMC_SDHC1,
+ .start = MX31_INT_MMC_SDHC1,
+ .end = MX31_INT_MMC_SDHC1,
.flags = IORESOURCE_IRQ,
},
};
static struct resource mxcsdhc1_resources[] = {
{
- .start = MMC_SDHC2_BASE_ADDR,
- .end = MMC_SDHC2_BASE_ADDR + SZ_16K - 1,
+ .start = MX31_MMC_SDHC2_BASE_ADDR,
+ .end = MX31_MMC_SDHC2_BASE_ADDR + SZ_16K - 1,
.flags = IORESOURCE_MEM,
}, {
- .start = MXC_INT_MMC_SDHC2,
- .end = MXC_INT_MMC_SDHC2,
+ .start = MX31_INT_MMC_SDHC2,
+ .end = MX31_INT_MMC_SDHC2,
.flags = IORESOURCE_IRQ,
},
};
diff --git a/arch/arm/mach-mx3/mach-mx31_3ds.c b/arch/arm/mach-mx3/mach-mx31_3ds.c
index 5c1d0e86c91e..0ad9e7821082 100644
--- a/arch/arm/mach-mx3/mach-mx31_3ds.c
+++ b/arch/arm/mach-mx3/mach-mx31_3ds.c
@@ -38,39 +38,9 @@
#include "devices-imx31.h"
#include "devices.h"
-/* Definitions for components on the Debug board */
-
-/* Base address of CPLD controller on the Debug board */
-#define DEBUG_BASE_ADDRESS CS5_IO_ADDRESS(MX3x_CS5_BASE_ADDR)
-
-/* LAN9217 ethernet base address */
-#define LAN9217_BASE_ADDR MX3x_CS5_BASE_ADDR
-
-/* CPLD config and interrupt base address */
-#define CPLD_ADDR (DEBUG_BASE_ADDRESS + 0x20000)
-
-/* status, interrupt */
-#define CPLD_INT_STATUS_REG (CPLD_ADDR + 0x10)
-#define CPLD_INT_MASK_REG (CPLD_ADDR + 0x38)
-#define CPLD_INT_RESET_REG (CPLD_ADDR + 0x20)
-/* magic word for debug CPLD */
-#define CPLD_MAGIC_NUMBER1_REG (CPLD_ADDR + 0x40)
-#define CPLD_MAGIC_NUMBER2_REG (CPLD_ADDR + 0x48)
-/* CPLD code version */
-#define CPLD_CODE_VER_REG (CPLD_ADDR + 0x50)
-/* magic word for debug CPLD */
-#define CPLD_MAGIC_NUMBER3_REG (CPLD_ADDR + 0x58)
-
/* CPLD IRQ line for external uart, external ethernet etc */
#define EXPIO_PARENT_INT IOMUX_TO_IRQ(MX31_PIN_GPIO1_1)
-#define MXC_EXP_IO_BASE (MXC_BOARD_IRQ_START)
-#define MXC_IRQ_TO_EXPIO(irq) ((irq) - MXC_EXP_IO_BASE)
-
-#define EXPIO_INT_ENET (MXC_EXP_IO_BASE + 0)
-
-#define MXC_MAX_EXP_IO_LINES 16
-
/*
* This file contains the board-specific initialization routines.
*/
@@ -272,7 +242,7 @@ static void __init mxc_board_init(void)
imx31_add_imx_uart0(&uart_pdata);
imx31_add_mxc_nand(&mx31_3ds_nand_board_info);
- imx31_add_spi_imx0(&spi1_pdata);
+ imx31_add_spi_imx1(&spi1_pdata);
spi_register_board_info(mx31_3ds_spi_devs,
ARRAY_SIZE(mx31_3ds_spi_devs));
@@ -281,9 +251,9 @@ static void __init mxc_board_init(void)
mx31_3ds_usbotg_init();
mxc_register_device(&mxc_otg_udc_device, &usbotg_pdata);
- if (!mxc_expio_init(CS5_BASE_ADDR, EXPIO_PARENT_INT))
- printk(KERN_WARNING "Init of the debugboard failed, all "
- "devices on the board are unusable.\n");
+ if (mxc_expio_init(MX31_CS5_BASE_ADDR, EXPIO_PARENT_INT))
+ printk(KERN_WARNING "Init of the debug board failed, all "
+ "devices on the debug board are unusable.\n");
}
static void __init mx31_3ds_timer_init(void)
diff --git a/arch/arm/mach-mx3/mach-mx35_3ds.c b/arch/arm/mach-mx3/mach-mx35_3ds.c
index 05f628d90725..b66a75aa2e88 100644
--- a/arch/arm/mach-mx3/mach-mx35_3ds.c
+++ b/arch/arm/mach-mx3/mach-mx35_3ds.c
@@ -38,11 +38,15 @@
#include <mach/hardware.h>
#include <mach/common.h>
#include <mach/iomux-mx35.h>
+#include <mach/irqs.h>
+#include <mach/3ds_debugboard.h>
#include <mach/mxc_ehci.h>
#include "devices-imx35.h"
#include "devices.h"
+#define EXPIO_PARENT_INT (MXC_INTERNAL_IRQS + GPIO_PORTA + 1)
+
static const struct imxuart_platform_data uart_pdata __initconst = {
.flags = IMXUART_HAVE_RTSCTS,
};
@@ -108,6 +112,13 @@ static struct pad_desc mx35pdk_pads[] = {
/* USBH1 */
MX35_PAD_I2C2_CLK__USB_TOP_USBH2_PWR,
MX35_PAD_I2C2_DAT__USB_TOP_USBH2_OC,
+ /* SDCARD */
+ MX35_PAD_SD1_CMD__ESDHC1_CMD,
+ MX35_PAD_SD1_CLK__ESDHC1_CLK,
+ MX35_PAD_SD1_DATA0__ESDHC1_DAT0,
+ MX35_PAD_SD1_DATA1__ESDHC1_DAT1,
+ MX35_PAD_SD1_DATA2__ESDHC1_DAT2,
+ MX35_PAD_SD1_DATA3__ESDHC1_DAT3,
};
/* OTG config */
@@ -140,6 +151,11 @@ static void __init mxc_board_init(void)
mxc_register_device(&mxc_usbh1, &usb_host_pdata);
imx35_add_mxc_nand(&mx35pdk_nand_board_info);
+ imx35_add_esdhc(0, NULL);
+
+ if (mxc_expio_init(MX35_CS5_BASE_ADDR, EXPIO_PARENT_INT))
+ pr_warn("Init of the debugboard failed, all "
+ "devices on the debugboard are unusable.\n");
}
static void __init mx35pdk_timer_init(void)
diff --git a/arch/arm/mach-mx3/mach-pcm037.c b/arch/arm/mach-mx3/mach-pcm037.c
index 86e86c1300d5..2ff3f661a48e 100644
--- a/arch/arm/mach-mx3/mach-pcm037.c
+++ b/arch/arm/mach-mx3/mach-pcm037.c
@@ -311,7 +311,6 @@ static struct soc_camera_link iclink_mt9v022 = {
.bus_id = 0, /* Must match with the camera ID */
.board_info = &pcm037_i2c_camera[1],
.i2c_adapter_id = 2,
- .module_name = "mt9v022",
};
static struct soc_camera_link iclink_mt9t031 = {
@@ -319,7 +318,6 @@ static struct soc_camera_link iclink_mt9t031 = {
.power = pcm037_camera_power,
.board_info = &pcm037_i2c_camera[0],
.i2c_adapter_id = 2,
- .module_name = "mt9t031",
};
static struct i2c_board_info pcm037_i2c_devices[] = {
diff --git a/arch/arm/mach-mx3/mach-pcm037_eet.c b/arch/arm/mach-mx3/mach-pcm037_eet.c
index 99e0894e07db..fda56545d2fd 100644
--- a/arch/arm/mach-mx3/mach-pcm037_eet.c
+++ b/arch/arm/mach-mx3/mach-pcm037_eet.c
@@ -14,6 +14,7 @@
#include <mach/common.h>
#include <mach/iomux-mx3.h>
+#include <mach/spi.h>
#include <asm/mach-types.h>
@@ -59,14 +60,12 @@ static struct spi_board_info pcm037_spi_dev[] = {
};
/* Platform Data for MXC CSPI */
-#if defined(CONFIG_SPI_IMX) || defined(CONFIG_SPI_IMX_MODULE)
static int pcm037_spi1_cs[] = {MXC_SPI_CS(1), IOMUX_TO_GPIO(MX31_PIN_KEY_COL7)};
static const struct spi_imx_master pcm037_spi1_pdata __initconst = {
.chipselect = pcm037_spi1_cs,
.num_chipselect = ARRAY_SIZE(pcm037_spi1_cs),
};
-#endif
/* GPIO-keys input device */
static struct gpio_keys_button pcm037_gpio_keys[] = {
@@ -171,7 +170,7 @@ static struct platform_device pcm037_gpio_keys_device = {
},
};
-static int eet_init_devices(void)
+static int __init eet_init_devices(void)
{
if (!machine_is_pcm037() || pcm037_variant() != PCM037_EET)
return 0;
diff --git a/arch/arm/mach-mx3/mx31moboard-marxbot.c b/arch/arm/mach-mx3/mx31moboard-marxbot.c
index 0551eb39d97e..18069cb7d068 100644
--- a/arch/arm/mach-mx3/mx31moboard-marxbot.c
+++ b/arch/arm/mach-mx3/mx31moboard-marxbot.c
@@ -179,7 +179,6 @@ static struct soc_camera_link base_iclink = {
.reset = marxbot_basecam_reset,
.board_info = &marxbot_i2c_devices[0],
.i2c_adapter_id = 0,
- .module_name = "mt9t031",
};
static struct platform_device marxbot_camera[] = {
diff --git a/arch/arm/mach-mx3/mx31moboard-smartbot.c b/arch/arm/mach-mx3/mx31moboard-smartbot.c
index 417757e78c65..04760a53005a 100644
--- a/arch/arm/mach-mx3/mx31moboard-smartbot.c
+++ b/arch/arm/mach-mx3/mx31moboard-smartbot.c
@@ -88,7 +88,6 @@ static struct soc_camera_link base_iclink = {
.reset = smartbot_cam_reset,
.board_info = &smartbot_i2c_devices[0],
.i2c_adapter_id = 0,
- .module_name = "mt9t031",
};
static struct platform_device smartbot_camera[] = {
diff --git a/arch/arm/mach-mx5/Kconfig b/arch/arm/mach-mx5/Kconfig
index a2df9ac37996..3ec910a7a182 100644
--- a/arch/arm/mach-mx5/Kconfig
+++ b/arch/arm/mach-mx5/Kconfig
@@ -6,6 +6,7 @@ config ARCH_MX51
select MXC_TZIC
select ARCH_MXC_IOMUX_V3
select ARCH_MXC_AUDMUX_V2
+ select ARCH_HAS_CPUFREQ
comment "MX5 platforms:"
@@ -13,6 +14,7 @@ config MACH_MX51_BABBAGE
bool "Support MX51 BABBAGE platforms"
select IMX_HAVE_PLATFORM_IMX_I2C
select IMX_HAVE_PLATFORM_IMX_UART
+ select IMX_HAVE_PLATFORM_ESDHC
help
Include support for MX51 Babbage platform, also known as MX51EVK in
u-boot. This includes specific configurations for the board and its
diff --git a/arch/arm/mach-mx5/Makefile b/arch/arm/mach-mx5/Makefile
index 1769c161a60d..462f177eddfe 100644
--- a/arch/arm/mach-mx5/Makefile
+++ b/arch/arm/mach-mx5/Makefile
@@ -5,6 +5,7 @@
# Object file lists.
obj-y := cpu.o mm.o clock-mx51.o devices.o
+obj-$(CONFIG_CPU_FREQ_IMX) += cpu_op-mx51.o
obj-$(CONFIG_MACH_MX51_BABBAGE) += board-mx51_babbage.o
obj-$(CONFIG_MACH_MX51_3DS) += board-mx51_3ds.o
obj-$(CONFIG_MACH_EUKREA_CPUIMX51) += board-cpuimx51.o
diff --git a/arch/arm/mach-mx5/board-mx51_babbage.c b/arch/arm/mach-mx5/board-mx51_babbage.c
index 0821fe9b3b27..acbe30df2e69 100644
--- a/arch/arm/mach-mx5/board-mx51_babbage.c
+++ b/arch/arm/mach-mx5/board-mx51_babbage.c
@@ -1,5 +1,5 @@
/*
- * Copyright 2009 Freescale Semiconductor, Inc. All Rights Reserved.
+ * Copyright 2009-2010 Freescale Semiconductor, Inc. All Rights Reserved.
* Copyright (C) 2009-2010 Amit Kucheria <amit.kucheria@canonical.com>
*
* The code contained herein is licensed under the GNU General Public
@@ -18,6 +18,8 @@
#include <linux/io.h>
#include <linux/fsl_devices.h>
#include <linux/fec.h>
+#include <linux/gpio_keys.h>
+#include <linux/input.h>
#include <mach/common.h>
#include <mach/hardware.h>
@@ -32,11 +34,13 @@
#include "devices-imx51.h"
#include "devices.h"
+#include "cpu_op-mx51.h"
#define BABBAGE_USB_HUB_RESET (0*32 + 7) /* GPIO_1_7 */
#define BABBAGE_USBH1_STP (0*32 + 27) /* GPIO_1_27 */
#define BABBAGE_PHY_RESET (1*32 + 5) /* GPIO_2_5 */
#define BABBAGE_FEC_PHY_RESET (1*32 + 14) /* GPIO_2_14 */
+#define BABBAGE_POWER_KEY (1*32 + 21) /* GPIO_2_21 */
/* USB_CTRL_1 */
#define MX51_USB_CTRL_1_OFFSET 0x10
@@ -46,6 +50,21 @@
#define MX51_USB_PLL_DIV_19_2_MHZ 0x01
#define MX51_USB_PLL_DIV_24_MHZ 0x02
+static struct gpio_keys_button babbage_buttons[] = {
+ {
+ .gpio = BABBAGE_POWER_KEY,
+ .code = BTN_0,
+ .desc = "PWR",
+ .active_low = 1,
+ .wakeup = 1,
+ },
+};
+
+static const struct gpio_keys_platform_data imx_button_data __initconst = {
+ .buttons = babbage_buttons,
+ .nbuttons = ARRAY_SIZE(babbage_buttons),
+};
+
static struct pad_desc mx51babbage_pads[] = {
/* UART1 */
MX51_PAD_UART1_RXD__UART1_RXD,
@@ -112,6 +131,22 @@ static struct pad_desc mx51babbage_pads[] = {
/* FEC PHY reset line */
MX51_PAD_EIM_A20__GPIO_2_14,
+
+ /* SD 1 */
+ MX51_PAD_SD1_CMD__SD1_CMD,
+ MX51_PAD_SD1_CLK__SD1_CLK,
+ MX51_PAD_SD1_DATA0__SD1_DATA0,
+ MX51_PAD_SD1_DATA1__SD1_DATA1,
+ MX51_PAD_SD1_DATA2__SD1_DATA2,
+ MX51_PAD_SD1_DATA3__SD1_DATA3,
+
+ /* SD 2 */
+ MX51_PAD_SD2_CMD__SD2_CMD,
+ MX51_PAD_SD2_CLK__SD2_CLK,
+ MX51_PAD_SD2_DATA0__SD2_DATA0,
+ MX51_PAD_SD2_DATA1__SD2_DATA1,
+ MX51_PAD_SD2_DATA2__SD2_DATA2,
+ MX51_PAD_SD2_DATA3__SD2_DATA3,
};
/* Serial ports */
@@ -281,13 +316,22 @@ __setup("otg_mode=", babbage_otg_mode);
static void __init mxc_board_init(void)
{
struct pad_desc usbh1stp = MX51_PAD_USBH1_STP__USBH1_STP;
+ struct pad_desc power_key = MX51_PAD_EIM_A27__GPIO_2_21;
+#if defined(CONFIG_CPU_FREQ_IMX)
+ get_cpu_op = mx51_get_cpu_op;
+#endif
mxc_iomux_v3_setup_multiple_pads(mx51babbage_pads,
ARRAY_SIZE(mx51babbage_pads));
mxc_init_imx_uart();
babbage_fec_reset();
imx51_add_fec(NULL);
+ /* Set the PAD settings for the pwr key. */
+ power_key.pad_ctrl = MX51_GPIO_PAD_CTRL_2;
+ mxc_iomux_v3_setup_pad(&power_key);
+ imx51_add_gpio_keys(&imx_button_data);
+
imx51_add_imx_i2c(0, &babbage_i2c_data);
imx51_add_imx_i2c(1, &babbage_i2c_data);
mxc_register_device(&mxc_hsi2c_device, &babbage_hsi2c_data);
@@ -304,6 +348,9 @@ static void __init mxc_board_init(void)
/* setback USBH1_STP to be function */
mxc_iomux_v3_setup_pad(&usbh1stp);
babbage_usbhub_reset();
+
+ imx51_add_esdhc(0, NULL);
+ imx51_add_esdhc(1, NULL);
}
static void __init mx51_babbage_timer_init(void)
diff --git a/arch/arm/mach-mx5/clock-mx51.c b/arch/arm/mach-mx5/clock-mx51.c
index f2aae92cf0e2..8ac36d882927 100644
--- a/arch/arm/mach-mx5/clock-mx51.c
+++ b/arch/arm/mach-mx5/clock-mx51.c
@@ -362,7 +362,7 @@ static int _clk_lp_apm_set_parent(struct clk *clk, struct clk *parent)
return 0;
}
-static unsigned long clk_arm_get_rate(struct clk *clk)
+static unsigned long clk_cpu_get_rate(struct clk *clk)
{
u32 cacrr, div;
unsigned long parent_rate;
@@ -374,6 +374,22 @@ static unsigned long clk_arm_get_rate(struct clk *clk)
return parent_rate / div;
}
+static int clk_cpu_set_rate(struct clk *clk, unsigned long rate)
+{
+ u32 reg, cpu_podf;
+ unsigned long parent_rate;
+
+ parent_rate = clk_get_rate(clk->parent);
+ cpu_podf = parent_rate / rate - 1;
+ /* use post divider to change freq */
+ reg = __raw_readl(MXC_CCM_CACRR);
+ reg &= ~MXC_CCM_CACRR_ARM_PODF_MASK;
+ reg |= cpu_podf << MXC_CCM_CACRR_ARM_PODF_OFFSET;
+ __raw_writel(reg, MXC_CCM_CACRR);
+
+ return 0;
+}
+
static int _clk_periph_apm_set_parent(struct clk *clk, struct clk *parent)
{
u32 reg, mux;
@@ -736,7 +752,8 @@ static struct clk periph_apm_clk = {
static struct clk cpu_clk = {
.parent = &pll1_sw_clk,
- .get_rate = clk_arm_get_rate,
+ .get_rate = clk_cpu_get_rate,
+ .set_rate = clk_cpu_set_rate,
};
static struct clk ahb_clk = {
@@ -1064,6 +1081,7 @@ static struct clk_lookup lookups[] = {
_REGISTER_CLOCK("imx51-cspi.0", NULL, cspi_clk)
_REGISTER_CLOCK("sdhci-esdhc-imx.0", NULL, esdhc1_clk)
_REGISTER_CLOCK("sdhci-esdhc-imx.1", NULL, esdhc2_clk)
+ _REGISTER_CLOCK(NULL, "cpu_clk", cpu_clk)
};
static void clk_tree_init(void)
diff --git a/arch/arm/mach-mx5/cpu_op-mx51.c b/arch/arm/mach-mx5/cpu_op-mx51.c
new file mode 100644
index 000000000000..9d34c3d4c024
--- /dev/null
+++ b/arch/arm/mach-mx5/cpu_op-mx51.c
@@ -0,0 +1,29 @@
+/*
+ * Copyright (C) 2010 Freescale Semiconductor, Inc. All Rights Reserved.
+ */
+
+/*
+ * The code contained herein is licensed under the GNU General Public
+ * License. You may obtain a copy of the GNU General Public License
+ * Version 2 or later at the following locations:
+ *
+ * http://www.opensource.org/licenses/gpl-license.html
+ * http://www.gnu.org/copyleft/gpl.html
+ */
+
+#include <linux/types.h>
+#include <mach/hardware.h>
+#include <linux/kernel.h>
+
+static struct cpu_op mx51_cpu_op[] = {
+ {
+ .cpu_rate = 160000000,},
+ {
+ .cpu_rate = 800000000,},
+};
+
+struct cpu_op *mx51_get_cpu_op(int *op)
+{
+ *op = ARRAY_SIZE(mx51_cpu_op);
+ return mx51_cpu_op;
+}
diff --git a/arch/arm/mach-mx5/cpu_op-mx51.h b/arch/arm/mach-mx5/cpu_op-mx51.h
new file mode 100644
index 000000000000..97477fecb469
--- /dev/null
+++ b/arch/arm/mach-mx5/cpu_op-mx51.h
@@ -0,0 +1,14 @@
+/*
+ * Copyright (C) 2010 Freescale Semiconductor, Inc. All Rights Reserved.
+ */
+
+/*
+ * The code contained herein is licensed under the GNU General Public
+ * License. You may obtain a copy of the GNU General Public License
+ * Version 2 or later at the following locations:
+ *
+ * http://www.opensource.org/licenses/gpl-license.html
+ * http://www.gnu.org/copyleft/gpl.html
+ */
+
+extern struct cpu_op *mx51_get_cpu_op(int *op);
diff --git a/arch/arm/mach-mx5/devices-imx51.h b/arch/arm/mach-mx5/devices-imx51.h
index 5cc910e60538..8c50cb5d05f5 100644
--- a/arch/arm/mach-mx5/devices-imx51.h
+++ b/arch/arm/mach-mx5/devices-imx51.h
@@ -13,6 +13,8 @@ extern const struct imx_fec_data imx51_fec_data __initconst;
#define imx51_add_fec(pdata) \
imx_add_fec(&imx51_fec_data, pdata)
+#define imx51_add_gpio_keys(pdata) imx_add_gpio_keys(pdata)
+
extern const struct imx_imx_i2c_data imx51_imx_i2c_data[] __initconst;
#define imx51_add_imx_i2c(id, pdata) \
imx_add_imx_i2c(&imx51_imx_i2c_data[id], pdata)
diff --git a/arch/arm/mach-netx/include/mach/vmalloc.h b/arch/arm/mach-netx/include/mach/vmalloc.h
index 7cca3574308f..871f1ef7bff5 100644
--- a/arch/arm/mach-netx/include/mach/vmalloc.h
+++ b/arch/arm/mach-netx/include/mach/vmalloc.h
@@ -16,4 +16,4 @@
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
-#define VMALLOC_END 0xd0000000
+#define VMALLOC_END 0xd0000000UL
diff --git a/arch/arm/mach-omap1/Makefile b/arch/arm/mach-omap1/Makefile
index facfaeb1ae5c..9a304d854e33 100644
--- a/arch/arm/mach-omap1/Makefile
+++ b/arch/arm/mach-omap1/Makefile
@@ -12,7 +12,7 @@ obj-$(CONFIG_OMAP_MPU_TIMER) += time.o
obj-$(CONFIG_OMAP_32K_TIMER) += timer32k.o
# Power Management
-obj-$(CONFIG_PM) += pm.o sleep.o
+obj-$(CONFIG_PM) += pm.o sleep.o pm_bus.o
# DSP
obj-$(CONFIG_OMAP_MBOX_FWK) += mailbox_mach.o
diff --git a/arch/arm/mach-omap1/board-ams-delta.c b/arch/arm/mach-omap1/board-ams-delta.c
index 73c86392fcd3..1d4163b9f0b7 100644
--- a/arch/arm/mach-omap1/board-ams-delta.c
+++ b/arch/arm/mach-omap1/board-ams-delta.c
@@ -16,9 +16,12 @@
#include <linux/init.h>
#include <linux/input.h>
#include <linux/interrupt.h>
+#include <linux/leds.h>
#include <linux/platform_device.h>
#include <linux/serial_8250.h>
+#include <media/soc_camera.h>
+
#include <asm/serial.h>
#include <mach/hardware.h>
#include <asm/mach-types.h>
@@ -32,6 +35,7 @@
#include <plat/usb.h>
#include <plat/board.h>
#include <plat/common.h>
+#include <mach/camera.h>
#include <mach/ams-delta-fiq.h>
@@ -213,10 +217,56 @@ static struct platform_device ams_delta_led_device = {
.id = -1
};
+static struct i2c_board_info ams_delta_camera_board_info[] = {
+ {
+ I2C_BOARD_INFO("ov6650", 0x60),
+ },
+};
+
+#ifdef CONFIG_LEDS_TRIGGERS
+DEFINE_LED_TRIGGER(ams_delta_camera_led_trigger);
+
+static int ams_delta_camera_power(struct device *dev, int power)
+{
+ /*
+ * turn on camera LED
+ */
+ if (power)
+ led_trigger_event(ams_delta_camera_led_trigger, LED_FULL);
+ else
+ led_trigger_event(ams_delta_camera_led_trigger, LED_OFF);
+ return 0;
+}
+#else
+#define ams_delta_camera_power NULL
+#endif
+
+static struct soc_camera_link __initdata ams_delta_iclink = {
+ .bus_id = 0, /* OMAP1 SoC camera bus */
+ .i2c_adapter_id = 1,
+ .board_info = &ams_delta_camera_board_info[0],
+ .module_name = "ov6650",
+ .power = ams_delta_camera_power,
+};
+
+static struct platform_device ams_delta_camera_device = {
+ .name = "soc-camera-pdrv",
+ .id = 0,
+ .dev = {
+ .platform_data = &ams_delta_iclink,
+ },
+};
+
+static struct omap1_cam_platform_data ams_delta_camera_platform_data = {
+ .camexclk_khz = 12000, /* default 12MHz clock, no extra DPLL */
+ .lclk_khz_max = 1334, /* results in 5fps CIF, 10fps QCIF */
+};
+
static struct platform_device *ams_delta_devices[] __initdata = {
&ams_delta_kp_device,
&ams_delta_lcd_device,
&ams_delta_led_device,
+ &ams_delta_camera_device,
};
static void __init ams_delta_init(void)
@@ -225,6 +275,20 @@ static void __init ams_delta_init(void)
omap_cfg_reg(UART1_TX);
omap_cfg_reg(UART1_RTS);
+ /* parallel camera interface */
+ omap_cfg_reg(H19_1610_CAM_EXCLK);
+ omap_cfg_reg(J15_1610_CAM_LCLK);
+ omap_cfg_reg(L18_1610_CAM_VS);
+ omap_cfg_reg(L15_1610_CAM_HS);
+ omap_cfg_reg(L19_1610_CAM_D0);
+ omap_cfg_reg(K14_1610_CAM_D1);
+ omap_cfg_reg(K15_1610_CAM_D2);
+ omap_cfg_reg(K19_1610_CAM_D3);
+ omap_cfg_reg(K18_1610_CAM_D4);
+ omap_cfg_reg(J14_1610_CAM_D5);
+ omap_cfg_reg(J19_1610_CAM_D6);
+ omap_cfg_reg(J18_1610_CAM_D7);
+
iotable_init(ams_delta_io_desc, ARRAY_SIZE(ams_delta_io_desc));
omap_board_config = ams_delta_config;
@@ -236,6 +300,11 @@ static void __init ams_delta_init(void)
ams_delta_latch2_write(~0, 0);
omap1_usb_init(&ams_delta_usb_config);
+ omap1_set_camera_info(&ams_delta_camera_platform_data);
+#ifdef CONFIG_LEDS_TRIGGERS
+ led_trigger_register_simple("ams_delta_camera",
+ &ams_delta_camera_led_trigger);
+#endif
platform_add_devices(ams_delta_devices, ARRAY_SIZE(ams_delta_devices));
#ifdef CONFIG_AMS_DELTA_FIQ
diff --git a/arch/arm/mach-omap1/board-h2-mmc.c b/arch/arm/mach-omap1/board-h2-mmc.c
index b30c4990744d..f2fc43d8382b 100644
--- a/arch/arm/mach-omap1/board-h2-mmc.c
+++ b/arch/arm/mach-omap1/board-h2-mmc.c
@@ -58,8 +58,7 @@ static struct omap_mmc_platform_data mmc1_data = {
.dma_mask = 0xffffffff,
.slots[0] = {
.set_power = mmc_set_power,
- .ocr_mask = MMC_VDD_28_29 | MMC_VDD_30_31 |
- MMC_VDD_32_33 | MMC_VDD_33_34,
+ .ocr_mask = MMC_VDD_32_33 | MMC_VDD_33_34,
.name = "mmcblk",
},
};
diff --git a/arch/arm/mach-omap1/board-h3-mmc.c b/arch/arm/mach-omap1/board-h3-mmc.c
index 54b0f063e263..2098525e7cc5 100644
--- a/arch/arm/mach-omap1/board-h3-mmc.c
+++ b/arch/arm/mach-omap1/board-h3-mmc.c
@@ -40,8 +40,7 @@ static struct omap_mmc_platform_data mmc1_data = {
.dma_mask = 0xffffffff,
.slots[0] = {
.set_power = mmc_set_power,
- .ocr_mask = MMC_VDD_28_29 | MMC_VDD_30_31 |
- MMC_VDD_32_33 | MMC_VDD_33_34,
+ .ocr_mask = MMC_VDD_32_33 | MMC_VDD_33_34,
.name = "mmcblk",
},
};
diff --git a/arch/arm/mach-omap1/board-htcherald.c b/arch/arm/mach-omap1/board-htcherald.c
index 86afb2952225..071af3e47789 100644
--- a/arch/arm/mach-omap1/board-htcherald.c
+++ b/arch/arm/mach-omap1/board-htcherald.c
@@ -30,6 +30,13 @@
#include <linux/input.h>
#include <linux/io.h>
#include <linux/gpio.h>
+#include <linux/gpio_keys.h>
+#include <linux/i2c.h>
+#include <linux/i2c-gpio.h>
+#include <linux/htcpld.h>
+#include <linux/leds.h>
+#include <linux/spi/spi.h>
+#include <linux/spi/ads7846.h>
#include <asm/mach-types.h>
#include <asm/mach/arch.h>
@@ -39,6 +46,7 @@
#include <plat/board.h>
#include <plat/keypad.h>
#include <plat/usb.h>
+#include <plat/mmc.h>
#include <mach/irqs.h>
@@ -52,13 +60,123 @@
#define OMAP_LCDC_CTRL_LCD_EN (1 << 0)
#define OMAP_LCDC_STAT_DONE (1 << 0)
-static struct omap_lcd_config htcherald_lcd_config __initdata = {
- .ctrl_name = "internal",
-};
+/* GPIO definitions for the power button and keyboard slide switch */
+#define HTCHERALD_GPIO_POWER 139
+#define HTCHERALD_GPIO_SLIDE 174
+#define HTCHERALD_GIRQ_BTNS 141
-static struct omap_board_config_kernel htcherald_config[] __initdata = {
- { OMAP_TAG_LCD, &htcherald_lcd_config },
-};
+/* GPIO definitions for the touchscreen */
+#define HTCHERALD_GPIO_TS 76
+
+/* HTCPLD definitions */
+
+/*
+ * CPLD Logic
+ *
+ * Chip 3 - 0x03
+ *
+ * Function 7 6 5 4 3 2 1 0
+ * ------------------------------------
+ * DPAD light x x x x x x x 1
+ * SoundDev x x x x 1 x x x
+ * Screen white 1 x x x x x x x
+ * MMC power on x x x x x 1 x x
+ * Happy times (n) 0 x x x x 1 x x
+ *
+ * Chip 4 - 0x04
+ *
+ * Function 7 6 5 4 3 2 1 0
+ * ------------------------------------
+ * Keyboard light x x x x x x x 1
+ * LCD Bright (4) x x x x x 1 1 x
+ * LCD Bright (3) x x x x x 0 1 x
+ * LCD Bright (2) x x x x x 1 0 x
+ * LCD Bright (1) x x x x x 0 0 x
+ * LCD Off x x x x 0 x x x
+ * LCD image (fb) 1 x x x x x x x
+ * LCD image (white) 0 x x x x x x x
+ * Caps lock LED x x 1 x x x x x
+ *
+ * Chip 5 - 0x05
+ *
+ * Function 7 6 5 4 3 2 1 0
+ * ------------------------------------
+ * Red (solid) x x x x x 1 x x
+ * Red (flash) x x x x x x 1 x
+ * Green (GSM flash) x x x x 1 x x x
+ * Green (GSM solid) x x x 1 x x x x
+ * Green (wifi flash) x x 1 x x x x x
+ * Blue (bt flash) x 1 x x x x x x
+ * DPAD Int Enable 1 x x x x x x 0
+ *
+ * (Combinations of the above can be made for different colors.)
+ * The direction pad interrupt enable must be set each time the
+ * interrupt is handled.
+ *
+ * Chip 6 - 0x06
+ *
+ * Function 7 6 5 4 3 2 1 0
+ * ------------------------------------
+ * Vibrator x x x x 1 x x x
+ * Alt LED x x x 1 x x x x
+ * Screen white 1 x x x x x x x
+ * Screen white x x 1 x x x x x
+ * Screen white x 0 x x x x x x
+ * Enable kbd dpad x x x x x x 0 x
+ * Happy Times 0 1 0 x x x 0 x
+ */
+
+/*
+ * HTCPLD GPIO lines start 16 after OMAP_MAX_GPIO_LINES to account
+ * for the 16 MPUIO lines.
+ */
+#define HTCPLD_GPIO_START_OFFSET (OMAP_MAX_GPIO_LINES + 16)
+#define HTCPLD_IRQ(chip, offset) (OMAP_IRQ_END + 8 * (chip) + (offset))
+#define HTCPLD_BASE(chip, offset) \
+ (HTCPLD_GPIO_START_OFFSET + 8 * (chip) + (offset))
+
+#define HTCPLD_GPIO_LED_DPAD HTCPLD_BASE(0, 0)
+#define HTCPLD_GPIO_LED_KBD HTCPLD_BASE(1, 0)
+#define HTCPLD_GPIO_LED_CAPS HTCPLD_BASE(1, 5)
+#define HTCPLD_GPIO_LED_RED_FLASH HTCPLD_BASE(2, 1)
+#define HTCPLD_GPIO_LED_RED_SOLID HTCPLD_BASE(2, 2)
+#define HTCPLD_GPIO_LED_GREEN_FLASH HTCPLD_BASE(2, 3)
+#define HTCPLD_GPIO_LED_GREEN_SOLID HTCPLD_BASE(2, 4)
+#define HTCPLD_GPIO_LED_WIFI HTCPLD_BASE(2, 5)
+#define HTCPLD_GPIO_LED_BT HTCPLD_BASE(2, 6)
+#define HTCPLD_GPIO_LED_VIBRATE HTCPLD_BASE(3, 3)
+#define HTCPLD_GPIO_LED_ALT HTCPLD_BASE(3, 4)
+
+#define HTCPLD_GPIO_RIGHT_KBD HTCPLD_BASE(6, 7)
+#define HTCPLD_GPIO_UP_KBD HTCPLD_BASE(6, 6)
+#define HTCPLD_GPIO_LEFT_KBD HTCPLD_BASE(6, 5)
+#define HTCPLD_GPIO_DOWN_KBD HTCPLD_BASE(6, 4)
+
+#define HTCPLD_GPIO_RIGHT_DPAD HTCPLD_BASE(7, 7)
+#define HTCPLD_GPIO_UP_DPAD HTCPLD_BASE(7, 6)
+#define HTCPLD_GPIO_LEFT_DPAD HTCPLD_BASE(7, 5)
+#define HTCPLD_GPIO_DOWN_DPAD HTCPLD_BASE(7, 4)
+#define HTCPLD_GPIO_ENTER_DPAD HTCPLD_BASE(7, 3)
+
+/*
+ * The htcpld chip requires a gpio write to a specific line
+ * to re-enable interrupts after one has occurred.
+ */
+#define HTCPLD_GPIO_INT_RESET_HI HTCPLD_BASE(2, 7)
+#define HTCPLD_GPIO_INT_RESET_LO HTCPLD_BASE(2, 0)
+
+/* Chip 5 */
+#define HTCPLD_IRQ_RIGHT_KBD HTCPLD_IRQ(0, 7)
+#define HTCPLD_IRQ_UP_KBD HTCPLD_IRQ(0, 6)
+#define HTCPLD_IRQ_LEFT_KBD HTCPLD_IRQ(0, 5)
+#define HTCPLD_IRQ_DOWN_KBD HTCPLD_IRQ(0, 4)
+
+/* Chip 6 */
+#define HTCPLD_IRQ_RIGHT_DPAD HTCPLD_IRQ(1, 7)
+#define HTCPLD_IRQ_UP_DPAD HTCPLD_IRQ(1, 6)
+#define HTCPLD_IRQ_LEFT_DPAD HTCPLD_IRQ(1, 5)
+#define HTCPLD_IRQ_DOWN_DPAD HTCPLD_IRQ(1, 4)
+#define HTCPLD_IRQ_ENTER_DPAD HTCPLD_IRQ(1, 3)
/* Keyboard definition */
@@ -140,6 +258,129 @@ static struct platform_device kp_device = {
.resource = kp_resources,
};
+/* GPIO buttons for keyboard slide and power button */
+static struct gpio_keys_button herald_gpio_keys_table[] = {
+ {BTN_0, HTCHERALD_GPIO_POWER, 1, "POWER", EV_KEY, 1, 20},
+ {SW_LID, HTCHERALD_GPIO_SLIDE, 0, "SLIDE", EV_SW, 1, 20},
+
+ {KEY_LEFT, HTCPLD_GPIO_LEFT_KBD, 1, "LEFT", EV_KEY, 1, 20},
+ {KEY_RIGHT, HTCPLD_GPIO_RIGHT_KBD, 1, "RIGHT", EV_KEY, 1, 20},
+ {KEY_UP, HTCPLD_GPIO_UP_KBD, 1, "UP", EV_KEY, 1, 20},
+ {KEY_DOWN, HTCPLD_GPIO_DOWN_KBD, 1, "DOWN", EV_KEY, 1, 20},
+
+ {KEY_LEFT, HTCPLD_GPIO_LEFT_DPAD, 1, "DLEFT", EV_KEY, 1, 20},
+ {KEY_RIGHT, HTCPLD_GPIO_RIGHT_DPAD, 1, "DRIGHT", EV_KEY, 1, 20},
+ {KEY_UP, HTCPLD_GPIO_UP_DPAD, 1, "DUP", EV_KEY, 1, 20},
+ {KEY_DOWN, HTCPLD_GPIO_DOWN_DPAD, 1, "DDOWN", EV_KEY, 1, 20},
+ {KEY_ENTER, HTCPLD_GPIO_ENTER_DPAD, 1, "DENTER", EV_KEY, 1, 20},
+};
+
+static struct gpio_keys_platform_data herald_gpio_keys_data = {
+ .buttons = herald_gpio_keys_table,
+ .nbuttons = ARRAY_SIZE(herald_gpio_keys_table),
+ .rep = 1,
+};
+
+static struct platform_device herald_gpiokeys_device = {
+ .name = "gpio-keys",
+ .id = -1,
+ .dev = {
+ .platform_data = &herald_gpio_keys_data,
+ },
+};
+
+/* LEDs for the Herald. These connect to the HTCPLD GPIO device. */
+static struct gpio_led gpio_leds[] = {
+ {"dpad", NULL, HTCPLD_GPIO_LED_DPAD, 0, 0, LEDS_GPIO_DEFSTATE_OFF},
+ {"kbd", NULL, HTCPLD_GPIO_LED_KBD, 0, 0, LEDS_GPIO_DEFSTATE_OFF},
+ {"vibrate", NULL, HTCPLD_GPIO_LED_VIBRATE, 0, 0, LEDS_GPIO_DEFSTATE_OFF},
+ {"green_solid", NULL, HTCPLD_GPIO_LED_GREEN_SOLID, 0, 0, LEDS_GPIO_DEFSTATE_OFF},
+ {"green_flash", NULL, HTCPLD_GPIO_LED_GREEN_FLASH, 0, 0, LEDS_GPIO_DEFSTATE_OFF},
+ {"red_solid", "mmc0", HTCPLD_GPIO_LED_RED_SOLID, 0, 0, LEDS_GPIO_DEFSTATE_OFF},
+ {"red_flash", NULL, HTCPLD_GPIO_LED_RED_FLASH, 0, 0, LEDS_GPIO_DEFSTATE_OFF},
+ {"wifi", NULL, HTCPLD_GPIO_LED_WIFI, 0, 0, LEDS_GPIO_DEFSTATE_OFF},
+ {"bt", NULL, HTCPLD_GPIO_LED_BT, 0, 0, LEDS_GPIO_DEFSTATE_OFF},
+ {"caps", NULL, HTCPLD_GPIO_LED_CAPS, 0, 0, LEDS_GPIO_DEFSTATE_OFF},
+ {"alt", NULL, HTCPLD_GPIO_LED_ALT, 0, 0, LEDS_GPIO_DEFSTATE_OFF},
+};
+
+static struct gpio_led_platform_data gpio_leds_data = {
+ .leds = gpio_leds,
+ .num_leds = ARRAY_SIZE(gpio_leds),
+};
+
+static struct platform_device gpio_leds_device = {
+ .name = "leds-gpio",
+ .id = 0,
+ .dev = {
+ .platform_data = &gpio_leds_data,
+ },
+};
+
+/* HTC PLD chips */
+
+static struct resource htcpld_resources[] = {
+ [0] = {
+ .start = OMAP_GPIO_IRQ(HTCHERALD_GIRQ_BTNS),
+ .end = OMAP_GPIO_IRQ(HTCHERALD_GIRQ_BTNS),
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+struct htcpld_chip_platform_data htcpld_chips[] = {
+ [0] = {
+ .addr = 0x03,
+ .reset = 0x04,
+ .num_gpios = 8,
+ .gpio_out_base = HTCPLD_BASE(0, 0),
+ .gpio_in_base = HTCPLD_BASE(4, 0),
+ },
+ [1] = {
+ .addr = 0x04,
+ .reset = 0x8e,
+ .num_gpios = 8,
+ .gpio_out_base = HTCPLD_BASE(1, 0),
+ .gpio_in_base = HTCPLD_BASE(5, 0),
+ },
+ [2] = {
+ .addr = 0x05,
+ .reset = 0x80,
+ .num_gpios = 8,
+ .gpio_out_base = HTCPLD_BASE(2, 0),
+ .gpio_in_base = HTCPLD_BASE(6, 0),
+ .irq_base = HTCPLD_IRQ(0, 0),
+ .num_irqs = 8,
+ },
+ [3] = {
+ .addr = 0x06,
+ .reset = 0x40,
+ .num_gpios = 8,
+ .gpio_out_base = HTCPLD_BASE(3, 0),
+ .gpio_in_base = HTCPLD_BASE(7, 0),
+ .irq_base = HTCPLD_IRQ(1, 0),
+ .num_irqs = 8,
+ },
+};
+
+struct htcpld_core_platform_data htcpld_pfdata = {
+ .int_reset_gpio_hi = HTCPLD_GPIO_INT_RESET_HI,
+ .int_reset_gpio_lo = HTCPLD_GPIO_INT_RESET_LO,
+ .i2c_adapter_id = 1,
+
+ .chip = htcpld_chips,
+ .num_chip = ARRAY_SIZE(htcpld_chips),
+};
+
+static struct platform_device htcpld_device = {
+ .name = "i2c-htcpld",
+ .id = -1,
+ .resource = htcpld_resources,
+ .num_resources = ARRAY_SIZE(htcpld_resources),
+ .dev = {
+ .platform_data = &htcpld_pfdata,
+ },
+};
+
/* USB Device */
static struct omap_usb_config htcherald_usb_config __initdata = {
.otg = 0,
@@ -150,14 +391,71 @@ static struct omap_usb_config htcherald_usb_config __initdata = {
};
/* LCD Device resources */
+static struct omap_lcd_config htcherald_lcd_config __initdata = {
+ .ctrl_name = "internal",
+};
+
+static struct omap_board_config_kernel htcherald_config[] __initdata = {
+ { OMAP_TAG_LCD, &htcherald_lcd_config },
+};
+
static struct platform_device lcd_device = {
.name = "lcd_htcherald",
.id = -1,
};
+/* MMC Card */
+#if defined(CONFIG_MMC_OMAP) || defined(CONFIG_MMC_OMAP_MODULE)
+static struct omap_mmc_platform_data htc_mmc1_data = {
+ .nr_slots = 1,
+ .switch_slot = NULL,
+ .slots[0] = {
+ .ocr_mask = MMC_VDD_32_33 | MMC_VDD_33_34,
+ .name = "mmcblk",
+ .nomux = 1,
+ .wires = 4,
+ .switch_pin = -1,
+ },
+};
+
+static struct omap_mmc_platform_data *htc_mmc_data[1];
+#endif
+
+
+/* Platform devices for the Herald */
static struct platform_device *devices[] __initdata = {
&kp_device,
&lcd_device,
+ &htcpld_device,
+ &gpio_leds_device,
+ &herald_gpiokeys_device,
+};
+
+/*
+ * Touchscreen
+ */
+static const struct ads7846_platform_data htcherald_ts_platform_data = {
+ .model = 7846,
+ .keep_vref_on = 1,
+ .x_plate_ohms = 496,
+ .gpio_pendown = HTCHERALD_GPIO_TS,
+ .pressure_max = 100000,
+ .pressure_min = 5000,
+ .x_min = 528,
+ .x_max = 3760,
+ .y_min = 624,
+ .y_max = 3760,
+};
+
+static struct spi_board_info __initdata htcherald_spi_board_info[] = {
+ {
+ .modalias = "ads7846",
+ .platform_data = &htcherald_ts_platform_data,
+ .irq = OMAP_GPIO_IRQ(HTCHERALD_GPIO_TS),
+ .max_speed_hz = 2500000,
+ .bus_num = 2,
+ .chip_select = 1,
+ }
};
/*
@@ -278,6 +576,7 @@ static void __init htcherald_init(void)
{
printk(KERN_INFO "HTC Herald init.\n");
+ /* Do board initialization before we register all the devices */
omap_gpio_init();
omap_board_config = htcherald_config;
@@ -288,6 +587,16 @@ static void __init htcherald_init(void)
htcherald_usb_enable();
omap1_usb_init(&htcherald_usb_config);
+
+ spi_register_board_info(htcherald_spi_board_info,
+ ARRAY_SIZE(htcherald_spi_board_info));
+
+ omap_register_i2c_bus(1, 100, NULL, 0);
+
+#if defined(CONFIG_MMC_OMAP) || defined(CONFIG_MMC_OMAP_MODULE)
+ htc_mmc_data[0] = &htc_mmc1_data;
+ omap1_init_mmc(htc_mmc_data, 1);
+#endif
}
static void __init htcherald_init_irq(void)
diff --git a/arch/arm/mach-omap1/board-sx1-mmc.c b/arch/arm/mach-omap1/board-sx1-mmc.c
index 5b33ae8141bc..e8ddd86e3fda 100644
--- a/arch/arm/mach-omap1/board-sx1-mmc.c
+++ b/arch/arm/mach-omap1/board-sx1-mmc.c
@@ -44,8 +44,7 @@ static struct omap_mmc_platform_data mmc1_data = {
.nr_slots = 1,
.slots[0] = {
.set_power = mmc_set_power,
- .ocr_mask = MMC_VDD_28_29 | MMC_VDD_30_31 |
- MMC_VDD_32_33 | MMC_VDD_33_34,
+ .ocr_mask = MMC_VDD_32_33 | MMC_VDD_33_34,
.name = "mmcblk",
},
};
diff --git a/arch/arm/mach-omap1/devices.c b/arch/arm/mach-omap1/devices.c
index aa0725608fb1..e7f9ee63dce5 100644
--- a/arch/arm/mach-omap1/devices.c
+++ b/arch/arm/mach-omap1/devices.c
@@ -9,6 +9,7 @@
* (at your option) any later version.
*/
+#include <linux/dma-mapping.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/init.h>
@@ -25,6 +26,7 @@
#include <mach/gpio.h>
#include <plat/mmc.h>
#include <plat/omap7xx.h>
+#include <plat/mcbsp.h>
/*-------------------------------------------------------------------------*/
@@ -191,10 +193,76 @@ static inline void omap_init_spi100k(void)
}
#endif
+
+#define OMAP1_CAMERA_BASE 0xfffb6800
+#define OMAP1_CAMERA_IOSIZE 0x1c
+
+static struct resource omap1_camera_resources[] = {
+ [0] = {
+ .start = OMAP1_CAMERA_BASE,
+ .end = OMAP1_CAMERA_BASE + OMAP1_CAMERA_IOSIZE - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = INT_CAMERA,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static u64 omap1_camera_dma_mask = DMA_BIT_MASK(32);
+
+static struct platform_device omap1_camera_device = {
+ .name = "omap1-camera",
+ .id = 0, /* This is used to put cameras on this interface */
+ .dev = {
+ .dma_mask = &omap1_camera_dma_mask,
+ .coherent_dma_mask = DMA_BIT_MASK(32),
+ },
+ .num_resources = ARRAY_SIZE(omap1_camera_resources),
+ .resource = omap1_camera_resources,
+};
+
+void __init omap1_camera_init(void *info)
+{
+ struct platform_device *dev = &omap1_camera_device;
+ int ret;
+
+ dev->dev.platform_data = info;
+
+ ret = platform_device_register(dev);
+ if (ret)
+ dev_err(&dev->dev, "unable to register device: %d\n", ret);
+}
+
+
/*-------------------------------------------------------------------------*/
static inline void omap_init_sti(void) {}
+#if defined(CONFIG_SND_SOC) || defined(CONFIG_SND_SOC_MODULE)
+
+static struct platform_device omap_pcm = {
+ .name = "omap-pcm-audio",
+ .id = -1,
+};
+
+OMAP_MCBSP_PLATFORM_DEVICE(1);
+OMAP_MCBSP_PLATFORM_DEVICE(2);
+OMAP_MCBSP_PLATFORM_DEVICE(3);
+
+static void omap_init_audio(void)
+{
+ platform_device_register(&omap_mcbsp1);
+ platform_device_register(&omap_mcbsp2);
+ if (!cpu_is_omap7xx())
+ platform_device_register(&omap_mcbsp3);
+ platform_device_register(&omap_pcm);
+}
+
+#else
+static inline void omap_init_audio(void) {}
+#endif
+
/*-------------------------------------------------------------------------*/
/*
@@ -227,8 +295,35 @@ static int __init omap1_init_devices(void)
omap_init_rtc();
omap_init_spi100k();
omap_init_sti();
+ omap_init_audio();
return 0;
}
arch_initcall(omap1_init_devices);
+#if defined(CONFIG_OMAP_WATCHDOG) || defined(CONFIG_OMAP_WATCHDOG_MODULE)
+
+static struct resource wdt_resources[] = {
+ {
+ .start = 0xfffeb000,
+ .end = 0xfffeb07F,
+ .flags = IORESOURCE_MEM,
+ },
+};
+
+static struct platform_device omap_wdt_device = {
+ .name = "omap_wdt",
+ .id = -1,
+ .num_resources = ARRAY_SIZE(wdt_resources),
+ .resource = wdt_resources,
+};
+
+static int __init omap_init_wdt(void)
+{
+ if (!cpu_is_omap16xx())
+ return -ENODEV;
+
+ return platform_device_register(&omap_wdt_device);
+}
+subsys_initcall(omap_init_wdt);
+#endif
diff --git a/arch/arm/mach-omap1/include/mach/camera.h b/arch/arm/mach-omap1/include/mach/camera.h
new file mode 100644
index 000000000000..847d00f0bb0a
--- /dev/null
+++ b/arch/arm/mach-omap1/include/mach/camera.h
@@ -0,0 +1,13 @@
+#ifndef __ASM_ARCH_CAMERA_H_
+#define __ASM_ARCH_CAMERA_H_
+
+#include <media/omap1_camera.h>
+
+void omap1_camera_init(void *);
+
+static inline void omap1_set_camera_info(struct omap1_cam_platform_data *info)
+{
+ omap1_camera_init(info);
+}
+
+#endif /* __ASM_ARCH_CAMERA_H_ */
diff --git a/arch/arm/mach-omap1/include/mach/vmalloc.h b/arch/arm/mach-omap1/include/mach/vmalloc.h
index b001f67d695b..22ec4a479577 100644
--- a/arch/arm/mach-omap1/include/mach/vmalloc.h
+++ b/arch/arm/mach-omap1/include/mach/vmalloc.h
@@ -17,4 +17,4 @@
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
-#define VMALLOC_END 0xd8000000
+#define VMALLOC_END 0xd8000000UL
diff --git a/arch/arm/mach-omap1/pm_bus.c b/arch/arm/mach-omap1/pm_bus.c
new file mode 100644
index 000000000000..8b66392be745
--- /dev/null
+++ b/arch/arm/mach-omap1/pm_bus.c
@@ -0,0 +1,98 @@
+/*
+ * Runtime PM support code for OMAP1
+ *
+ * Author: Kevin Hilman, Deep Root Systems, LLC
+ *
+ * Copyright (C) 2010 Texas Instruments, Inc.
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/io.h>
+#include <linux/pm_runtime.h>
+#include <linux/platform_device.h>
+#include <linux/mutex.h>
+#include <linux/clk.h>
+#include <linux/err.h>
+
+#include <plat/omap_device.h>
+#include <plat/omap-pm.h>
+
+#ifdef CONFIG_PM_RUNTIME
+static int omap1_pm_runtime_suspend(struct device *dev)
+{
+ struct clk *iclk, *fclk;
+ int ret = 0;
+
+ dev_dbg(dev, "%s\n", __func__);
+
+ ret = pm_generic_runtime_suspend(dev);
+
+ fclk = clk_get(dev, "fck");
+ if (!IS_ERR(fclk)) {
+ clk_disable(fclk);
+ clk_put(fclk);
+ }
+
+ iclk = clk_get(dev, "ick");
+ if (!IS_ERR(iclk)) {
+ clk_disable(iclk);
+ clk_put(iclk);
+ }
+
+ return 0;
+};
+
+static int omap1_pm_runtime_resume(struct device *dev)
+{
+ int ret = 0;
+ struct clk *iclk, *fclk;
+
+ dev_dbg(dev, "%s\n", __func__);
+
+ iclk = clk_get(dev, "ick");
+ if (!IS_ERR(iclk)) {
+ clk_enable(iclk);
+ clk_put(iclk);
+ }
+
+ fclk = clk_get(dev, "fck");
+ if (!IS_ERR(fclk)) {
+ clk_enable(fclk);
+ clk_put(fclk);
+ }
+
+ return pm_generic_runtime_resume(dev);
+};
+
+static int __init omap1_pm_runtime_init(void)
+{
+ const struct dev_pm_ops *pm;
+ struct dev_pm_ops *omap_pm;
+
+ pm = platform_bus_get_pm_ops();
+ if (!pm) {
+ pr_err("%s: unable to get dev_pm_ops from platform_bus\n",
+ __func__);
+ return -ENODEV;
+ }
+
+ omap_pm = kmemdup(pm, sizeof(struct dev_pm_ops), GFP_KERNEL);
+ if (!omap_pm) {
+ pr_err("%s: unable to alloc memory for new dev_pm_ops\n",
+ __func__);
+ return -ENOMEM;
+ }
+
+ omap_pm->runtime_suspend = omap1_pm_runtime_suspend;
+ omap_pm->runtime_resume = omap1_pm_runtime_resume;
+
+ platform_bus_set_pm_ops(omap_pm);
+
+ return 0;
+}
+core_initcall(omap1_pm_runtime_init);
+#endif /* CONFIG_PM_RUNTIME */
diff --git a/arch/arm/mach-omap2/Kconfig b/arch/arm/mach-omap2/Kconfig
index b48bacf0a7aa..ab784bfde908 100644
--- a/arch/arm/mach-omap2/Kconfig
+++ b/arch/arm/mach-omap2/Kconfig
@@ -11,9 +11,8 @@ config ARCH_OMAP2PLUS_TYPICAL
select PM_RUNTIME
select VFP
select NEON if ARCH_OMAP3 || ARCH_OMAP4
- select SERIAL_8250
- select SERIAL_CORE_CONSOLE
- select SERIAL_8250_CONSOLE
+ select SERIAL_OMAP
+ select SERIAL_OMAP_CONSOLE
select I2C
select I2C_OMAP
select MFD
@@ -35,7 +34,7 @@ config ARCH_OMAP3
default y
select CPU_V7
select USB_ARCH_HAS_EHCI
- select ARM_L1_CACHE_SHIFT_6
+ select ARM_L1_CACHE_SHIFT_6 if !ARCH_OMAP4
config ARCH_OMAP4
bool "TI OMAP4"
@@ -43,6 +42,8 @@ config ARCH_OMAP4
depends on ARCH_OMAP2PLUS
select CPU_V7
select ARM_GIC
+ select PL310_ERRATA_588369
+ select ARM_ERRATA_720789
comment "OMAP Core Type"
depends on ARCH_OMAP2
@@ -99,20 +100,20 @@ config MACH_OMAP2_TUSB6010
config MACH_OMAP_H4
bool "OMAP 2420 H4 board"
- depends on ARCH_OMAP2
+ depends on ARCH_OMAP2420
default y
select OMAP_PACKAGE_ZAF
select OMAP_DEBUG_DEVICES
config MACH_OMAP_APOLLON
bool "OMAP 2420 Apollon board"
- depends on ARCH_OMAP2
+ depends on ARCH_OMAP2420
default y
select OMAP_PACKAGE_ZAC
config MACH_OMAP_2430SDP
bool "OMAP 2430 SDP board"
- depends on ARCH_OMAP2
+ depends on ARCH_OMAP2430
default y
select OMAP_PACKAGE_ZAC
@@ -135,6 +136,26 @@ config MACH_OMAP_LDP
default y
select OMAP_PACKAGE_CBB
+config MACH_OMAP3530_LV_SOM
+ bool "OMAP3 Logic 3530 LV SOM board"
+ depends on ARCH_OMAP3
+ select OMAP_PACKAGE_CBB
+ default y
+ help
+ Support for the LogicPD OMAP3530 SOM Development kit
+ for full description please see the products webpage at
+ http://www.logicpd.com/products/development-kits/texas-instruments-zoom%E2%84%A2-omap35x-development-kit
+
+config MACH_OMAP3_TORPEDO
+ bool "OMAP3 Logic 35x Torpedo board"
+ depends on ARCH_OMAP3
+ select OMAP_PACKAGE_CBB
+ default y
+ help
+ Support for the LogicPD OMAP35x Torpedo Development kit
+ for full description please see the products webpage at
+ http://www.logicpd.com/products/development-kits/zoom-omap35x-torpedo-development-kit
+
config MACH_OVERO
bool "Gumstix Overo board"
depends on ARCH_OMAP3
@@ -200,12 +221,18 @@ config MACH_OMAP_ZOOM2
depends on ARCH_OMAP3
default y
select OMAP_PACKAGE_CBB
+ select SERIAL_8250
+ select SERIAL_CORE_CONSOLE
+ select SERIAL_8250_CONSOLE
config MACH_OMAP_ZOOM3
bool "OMAP3630 Zoom3 board"
depends on ARCH_OMAP3
default y
select OMAP_PACKAGE_CBP
+ select SERIAL_8250
+ select SERIAL_CORE_CONSOLE
+ select SERIAL_8250_CONSOLE
config MACH_CM_T35
bool "CompuLab CM-T35 module"
@@ -214,12 +241,25 @@ config MACH_CM_T35
select OMAP_PACKAGE_CUS
select OMAP_MUX
+config MACH_CM_T3517
+ bool "CompuLab CM-T3517 module"
+ depends on ARCH_OMAP3
+ default y
+ select OMAP_PACKAGE_CBB
+ select OMAP_MUX
+
config MACH_IGEP0020
bool "IGEP v2 board"
depends on ARCH_OMAP3
default y
select OMAP_PACKAGE_CBB
+config MACH_IGEP0030
+ bool "IGEP OMAP3 module"
+ depends on ARCH_OMAP3
+ default y
+ select OMAP_PACKAGE_CBB
+
config MACH_SBC3530
bool "OMAP3 SBC STALKER board"
depends on ARCH_OMAP3
diff --git a/arch/arm/mach-omap2/Makefile b/arch/arm/mach-omap2/Makefile
index 88d3a1e920f5..60e51bcf53bd 100644
--- a/arch/arm/mach-omap2/Makefile
+++ b/arch/arm/mach-omap2/Makefile
@@ -3,9 +3,10 @@
#
# Common support
-obj-y := id.o io.o control.o mux.o devices.o serial.o gpmc.o timer-gp.o pm.o
+obj-y := id.o io.o control.o mux.o devices.o serial.o gpmc.o timer-gp.o pm.o \
+ common.o
-omap-2-3-common = irq.o sdrc.o
+omap-2-3-common = irq.o sdrc.o prm2xxx_3xxx.o
hwmod-common = omap_hwmod.o \
omap_hwmod_common_data.o
prcm-common = prcm.o powerdomain.o
@@ -15,7 +16,7 @@ clock-common = clock.o clock_common_data.o \
obj-$(CONFIG_ARCH_OMAP2) += $(omap-2-3-common) $(prcm-common) $(hwmod-common)
obj-$(CONFIG_ARCH_OMAP3) += $(omap-2-3-common) $(prcm-common) $(hwmod-common)
-obj-$(CONFIG_ARCH_OMAP4) += $(prcm-common) $(hwmod-common)
+obj-$(CONFIG_ARCH_OMAP4) += $(prcm-common) prm44xx.o $(hwmod-common)
obj-$(CONFIG_OMAP_MCBSP) += mcbsp.o
@@ -49,14 +50,18 @@ obj-$(CONFIG_ARCH_OMAP2) += sdrc2xxx.o
# Power Management
ifeq ($(CONFIG_PM),y)
obj-$(CONFIG_ARCH_OMAP2) += pm24xx.o
-obj-$(CONFIG_ARCH_OMAP2) += sleep24xx.o
-obj-$(CONFIG_ARCH_OMAP3) += pm34xx.o sleep34xx.o cpuidle34xx.o
-obj-$(CONFIG_ARCH_OMAP4) += pm44xx.o
+obj-$(CONFIG_ARCH_OMAP2) += sleep24xx.o pm_bus.o
+obj-$(CONFIG_ARCH_OMAP3) += pm34xx.o sleep34xx.o cpuidle34xx.o pm_bus.o
+obj-$(CONFIG_ARCH_OMAP4) += pm44xx.o pm_bus.o
obj-$(CONFIG_PM_DEBUG) += pm-debug.o
AFLAGS_sleep24xx.o :=-Wa,-march=armv6
AFLAGS_sleep34xx.o :=-Wa,-march=armv7-a
+ifeq ($(CONFIG_PM_VERBOSE),y)
+CFLAGS_pm_bus.o += -DDEBUG
+endif
+
endif
# PRCM
@@ -87,6 +92,7 @@ obj-$(CONFIG_ARCH_OMAP2430) += opp2430_data.o
obj-$(CONFIG_ARCH_OMAP2420) += omap_hwmod_2420_data.o
obj-$(CONFIG_ARCH_OMAP2430) += omap_hwmod_2430_data.o
obj-$(CONFIG_ARCH_OMAP3) += omap_hwmod_3xxx_data.o
+obj-$(CONFIG_ARCH_OMAP4) += omap_hwmod_44xx_data.o
# EMU peripherals
obj-$(CONFIG_OMAP3_EMU) += emu.o
@@ -102,6 +108,10 @@ obj-y += $(iommu-m) $(iommu-y)
i2c-omap-$(CONFIG_I2C_OMAP) := i2c.o
obj-y += $(i2c-omap-m) $(i2c-omap-y)
+ifneq ($(CONFIG_TIDSPBRIDGE),)
+obj-y += dsp.o
+endif
+
# Specific board support
obj-$(CONFIG_MACH_OMAP_GENERIC) += board-generic.o
obj-$(CONFIG_MACH_OMAP_H4) += board-h4.o
@@ -115,6 +125,10 @@ obj-$(CONFIG_MACH_DEVKIT8000) += board-devkit8000.o \
obj-$(CONFIG_MACH_OMAP_LDP) += board-ldp.o \
board-flash.o \
hsmmc.o
+obj-$(CONFIG_MACH_OMAP3530_LV_SOM) += board-omap3logic.o \
+ hsmmc.o
+obj-$(CONFIG_MACH_OMAP3_TORPEDO) += board-omap3logic.o \
+ hsmmc.o
obj-$(CONFIG_MACH_OVERO) += board-overo.o \
hsmmc.o
obj-$(CONFIG_MACH_OMAP3EVM) += board-omap3evm.o \
@@ -146,8 +160,11 @@ obj-$(CONFIG_MACH_OMAP_3630SDP) += board-3630sdp.o \
hsmmc.o
obj-$(CONFIG_MACH_CM_T35) += board-cm-t35.o \
hsmmc.o
+obj-$(CONFIG_MACH_CM_T3517) += board-cm-t3517.o
obj-$(CONFIG_MACH_IGEP0020) += board-igep0020.o \
hsmmc.o
+obj-$(CONFIG_MACH_IGEP0030) += board-igep0030.o \
+ hsmmc.o
obj-$(CONFIG_MACH_OMAP3_TOUCHBOOK) += board-omap3touchbook.o \
hsmmc.o
obj-$(CONFIG_MACH_OMAP_4430SDP) += board-4430sdp.o \
@@ -174,3 +191,6 @@ obj-y += $(nand-m) $(nand-y)
smc91x-$(CONFIG_SMC91X) := gpmc-smc91x.o
obj-y += $(smc91x-m) $(smc91x-y)
+
+smsc911x-$(CONFIG_SMSC911X) := gpmc-smsc911x.o
+obj-y += $(smsc911x-m) $(smsc911x-y)
diff --git a/arch/arm/mach-omap2/board-2430sdp.c b/arch/arm/mach-omap2/board-2430sdp.c
index b857ce484510..b527f8d187ad 100644
--- a/arch/arm/mach-omap2/board-2430sdp.c
+++ b/arch/arm/mach-omap2/board-2430sdp.c
@@ -19,6 +19,7 @@
#include <linux/mtd/mtd.h>
#include <linux/mtd/partitions.h>
#include <linux/mtd/physmap.h>
+#include <linux/mmc/host.h>
#include <linux/delay.h>
#include <linux/i2c/twl.h>
#include <linux/err.h>
@@ -190,7 +191,7 @@ static int __init omap2430_i2c_init(void)
static struct omap2_hsmmc_info mmc[] __initdata = {
{
.mmc = 1,
- .wires = 4,
+ .caps = MMC_CAP_4_BIT_DATA,
.gpio_cd = -EINVAL,
.gpio_wp = -EINVAL,
.ext_clock = 1,
diff --git a/arch/arm/mach-omap2/board-3430sdp.c b/arch/arm/mach-omap2/board-3430sdp.c
index a5b095cf2adc..4e3742c512b8 100644
--- a/arch/arm/mach-omap2/board-3430sdp.c
+++ b/arch/arm/mach-omap2/board-3430sdp.c
@@ -24,6 +24,7 @@
#include <linux/regulator/machine.h>
#include <linux/io.h>
#include <linux/gpio.h>
+#include <linux/mmc/host.h>
#include <mach/hardware.h>
#include <asm/mach-types.h>
@@ -38,15 +39,14 @@
#include <plat/gpmc.h>
#include <plat/display.h>
-#include <plat/control.h>
#include <plat/gpmc-smc91x.h>
-#include <mach/board-flash.h>
-
+#include "board-flash.h"
#include "mux.h"
#include "sdram-qimonda-hyb18m512160af-6.h"
#include "hsmmc.h"
#include "pm.h"
+#include "control.h"
#define CONFIG_DISABLE_HFCLK 1
@@ -76,7 +76,7 @@ static struct cpuidle_params omap3_cpuidle_params_table[] = {
{1, 10000, 30000, 300000},
};
-static int board_keymap[] = {
+static uint32_t board_keymap[] = {
KEY(0, 0, KEY_LEFT),
KEY(0, 1, KEY_RIGHT),
KEY(0, 2, KEY_A),
@@ -353,12 +353,12 @@ static struct omap2_hsmmc_info mmc[] = {
/* 8 bits (default) requires S6.3 == ON,
* so the SIM card isn't used; else 4 bits.
*/
- .wires = 8,
+ .caps = MMC_CAP_4_BIT_DATA | MMC_CAP_8_BIT_DATA,
.gpio_wp = 4,
},
{
.mmc = 2,
- .wires = 8,
+ .caps = MMC_CAP_4_BIT_DATA | MMC_CAP_8_BIT_DATA,
.gpio_wp = 7,
},
{} /* Terminator */
diff --git a/arch/arm/mach-omap2/board-3630sdp.c b/arch/arm/mach-omap2/board-3630sdp.c
index fd27ac0860b0..bbcf580fa097 100644
--- a/arch/arm/mach-omap2/board-3630sdp.c
+++ b/arch/arm/mach-omap2/board-3630sdp.c
@@ -21,8 +21,8 @@
#include <plat/usb.h>
#include <mach/board-zoom.h>
-#include <mach/board-flash.h>
+#include "board-flash.h"
#include "mux.h"
#include "sdram-hynix-h8mbx00u0mer-0em.h"
@@ -208,7 +208,6 @@ static struct flash_partitions sdp_flash_partitions[] = {
static void __init omap_sdp_init(void)
{
omap3_mux_init(board_mux, OMAP_PACKAGE_CBP);
- omap_serial_init();
zoom_peripherals_init();
board_smc91x_init();
board_flash_init(sdp_flash_partitions, chip_sel_sdp);
diff --git a/arch/arm/mach-omap2/board-4430sdp.c b/arch/arm/mach-omap2/board-4430sdp.c
index 0b6a65f3a10a..df5a425a49d1 100644
--- a/arch/arm/mach-omap2/board-4430sdp.c
+++ b/arch/arm/mach-omap2/board-4430sdp.c
@@ -20,6 +20,7 @@
#include <linux/usb/otg.h>
#include <linux/spi/spi.h>
#include <linux/i2c/twl.h>
+#include <linux/gpio_keys.h>
#include <linux/regulator/machine.h>
#include <linux/leds.h>
@@ -31,15 +32,18 @@
#include <plat/board.h>
#include <plat/common.h>
-#include <plat/control.h>
-#include <plat/timer-gp.h>
#include <plat/usb.h>
#include <plat/mmc.h>
+
#include "hsmmc.h"
+#include "timer-gp.h"
+#include "control.h"
#define ETH_KS8851_IRQ 34
#define ETH_KS8851_POWER_ON 48
#define ETH_KS8851_QUART 138
+#define OMAP4_SFH7741_SENSOR_OUTPUT_GPIO 184
+#define OMAP4_SFH7741_ENABLE_GPIO 188
static struct gpio_led sdp4430_gpio_leds[] = {
{
@@ -77,11 +81,47 @@ static struct gpio_led sdp4430_gpio_leds[] = {
};
+static struct gpio_keys_button sdp4430_gpio_keys[] = {
+ {
+ .desc = "Proximity Sensor",
+ .type = EV_SW,
+ .code = SW_FRONT_PROXIMITY,
+ .gpio = OMAP4_SFH7741_SENSOR_OUTPUT_GPIO,
+ .active_low = 0,
+ }
+};
+
static struct gpio_led_platform_data sdp4430_led_data = {
.leds = sdp4430_gpio_leds,
.num_leds = ARRAY_SIZE(sdp4430_gpio_leds),
};
+static int omap_prox_activate(struct device *dev)
+{
+ gpio_set_value(OMAP4_SFH7741_ENABLE_GPIO , 1);
+ return 0;
+}
+
+static void omap_prox_deactivate(struct device *dev)
+{
+ gpio_set_value(OMAP4_SFH7741_ENABLE_GPIO , 0);
+}
+
+static struct gpio_keys_platform_data sdp4430_gpio_keys_data = {
+ .buttons = sdp4430_gpio_keys,
+ .nbuttons = ARRAY_SIZE(sdp4430_gpio_keys),
+ .enable = omap_prox_activate,
+ .disable = omap_prox_deactivate,
+};
+
+static struct platform_device sdp4430_gpio_keys_device = {
+ .name = "gpio-keys",
+ .id = -1,
+ .dev = {
+ .platform_data = &sdp4430_gpio_keys_data,
+ },
+};
+
static struct platform_device sdp4430_leds_gpio = {
.name = "leds-gpio",
.id = -1,
@@ -161,6 +201,7 @@ static struct platform_device sdp4430_lcd_device = {
static struct platform_device *sdp4430_devices[] __initdata = {
&sdp4430_lcd_device,
+ &sdp4430_gpio_keys_device,
&sdp4430_leds_gpio,
};
@@ -193,15 +234,16 @@ static struct omap_musb_board_data musb_board_data = {
static struct omap2_hsmmc_info mmc[] = {
{
.mmc = 1,
- .wires = 8,
+ .caps = MMC_CAP_4_BIT_DATA | MMC_CAP_8_BIT_DATA,
.gpio_wp = -EINVAL,
},
{
.mmc = 2,
- .wires = 8,
+ .caps = MMC_CAP_4_BIT_DATA | MMC_CAP_8_BIT_DATA,
.gpio_cd = -EINVAL,
.gpio_wp = -EINVAL,
.nonremovable = true,
+ .ocr_mask = MMC_VDD_29_30,
},
{} /* Terminator */
};
@@ -227,16 +269,27 @@ static int omap4_twl6030_hsmmc_late_init(struct device *dev)
struct omap_mmc_platform_data *pdata = dev->platform_data;
/* Setting MMC1 Card detect Irq */
- if (pdev->id == 0)
+ if (pdev->id == 0) {
+ ret = twl6030_mmc_card_detect_config();
+ if (ret)
+ pr_err("Failed configuring MMC1 card detect\n");
pdata->slots[0].card_detect_irq = TWL6030_IRQ_BASE +
MMCDETECT_INTR_OFFSET;
+ pdata->slots[0].card_detect = twl6030_mmc_card_detect;
+ }
return ret;
}
static __init void omap4_twl6030_hsmmc_set_late_init(struct device *dev)
{
- struct omap_mmc_platform_data *pdata = dev->platform_data;
+ struct omap_mmc_platform_data *pdata;
+ /* dev can be null if CONFIG_MMC_OMAP_HS is not set */
+ if (!dev) {
+ pr_err("Failed %s\n", __func__);
+ return;
+ }
+ pdata = dev->platform_data;
pdata->init = omap4_twl6030_hsmmc_late_init;
}
@@ -412,6 +465,11 @@ static struct i2c_board_info __initdata sdp4430_i2c_3_boardinfo[] = {
I2C_BOARD_INFO("tmp105", 0x48),
},
};
+static struct i2c_board_info __initdata sdp4430_i2c_4_boardinfo[] = {
+ {
+ I2C_BOARD_INFO("hmc5843", 0x1e),
+ },
+};
static int __init omap4_i2c_init(void)
{
/*
@@ -423,14 +481,36 @@ static int __init omap4_i2c_init(void)
omap_register_i2c_bus(2, 400, NULL, 0);
omap_register_i2c_bus(3, 400, sdp4430_i2c_3_boardinfo,
ARRAY_SIZE(sdp4430_i2c_3_boardinfo));
- omap_register_i2c_bus(4, 400, NULL, 0);
+ omap_register_i2c_bus(4, 400, sdp4430_i2c_4_boardinfo,
+ ARRAY_SIZE(sdp4430_i2c_4_boardinfo));
return 0;
}
+
+static void __init omap_sfh7741prox_init(void)
+{
+ int error;
+
+ error = gpio_request(OMAP4_SFH7741_ENABLE_GPIO, "sfh7741");
+ if (error < 0) {
+ pr_err("%s:failed to request GPIO %d, error %d\n",
+ __func__, OMAP4_SFH7741_ENABLE_GPIO, error);
+ return;
+ }
+
+ error = gpio_direction_output(OMAP4_SFH7741_ENABLE_GPIO , 0);
+ if (error < 0) {
+ pr_err("%s: GPIO configuration failed: GPIO %d,error %d\n",
+ __func__, OMAP4_SFH7741_ENABLE_GPIO, error);
+ gpio_free(OMAP4_SFH7741_ENABLE_GPIO);
+ }
+}
+
static void __init omap_4430sdp_init(void)
{
int status;
omap4_i2c_init();
+ omap_sfh7741prox_init();
platform_add_devices(sdp4430_devices, ARRAY_SIZE(sdp4430_devices));
omap_serial_init();
omap4_twl6030_hsmmc_init(mmc);
diff --git a/arch/arm/mach-omap2/board-am3517evm.c b/arch/arm/mach-omap2/board-am3517evm.c
index d547036aff3f..07399505312b 100644
--- a/arch/arm/mach-omap2/board-am3517evm.c
+++ b/arch/arm/mach-omap2/board-am3517evm.c
@@ -18,6 +18,7 @@
#include <linux/kernel.h>
#include <linux/init.h>
+#include <linux/clk.h>
#include <linux/platform_device.h>
#include <linux/gpio.h>
#include <linux/i2c/pca953x.h>
@@ -32,25 +33,43 @@
#include <plat/board.h>
#include <plat/common.h>
-#include <plat/control.h>
#include <plat/usb.h>
#include <plat/display.h>
#include "mux.h"
+#include "control.h"
-#define AM35XX_EVM_PHY_MASK (0xF)
#define AM35XX_EVM_MDIO_FREQUENCY (1000000)
+static struct mdio_platform_data am3517_evm_mdio_pdata = {
+ .bus_freq = AM35XX_EVM_MDIO_FREQUENCY,
+};
+
+static struct resource am3517_mdio_resources[] = {
+ {
+ .start = AM35XX_IPSS_EMAC_BASE + AM35XX_EMAC_MDIO_OFFSET,
+ .end = AM35XX_IPSS_EMAC_BASE + AM35XX_EMAC_MDIO_OFFSET +
+ SZ_4K - 1,
+ .flags = IORESOURCE_MEM,
+ },
+};
+
+static struct platform_device am3517_mdio_device = {
+ .name = "davinci_mdio",
+ .id = 0,
+ .num_resources = ARRAY_SIZE(am3517_mdio_resources),
+ .resource = am3517_mdio_resources,
+ .dev.platform_data = &am3517_evm_mdio_pdata,
+};
+
static struct emac_platform_data am3517_evm_emac_pdata = {
- .phy_mask = AM35XX_EVM_PHY_MASK,
- .mdio_max_freq = AM35XX_EVM_MDIO_FREQUENCY,
.rmii_en = 1,
};
static struct resource am3517_emac_resources[] = {
{
.start = AM35XX_IPSS_EMAC_BASE,
- .end = AM35XX_IPSS_EMAC_BASE + 0x3FFFF,
+ .end = AM35XX_IPSS_EMAC_BASE + 0x2FFFF,
.flags = IORESOURCE_MEM,
},
{
@@ -106,14 +125,13 @@ static void am3517_disable_ethernet_int(void)
regval = omap_ctrl_readl(AM35XX_CONTROL_LVL_INTR_CLEAR);
}
-void am3517_evm_ethernet_init(struct emac_platform_data *pdata)
+static void am3517_evm_ethernet_init(struct emac_platform_data *pdata)
{
unsigned int regval;
pdata->ctrl_reg_offset = AM35XX_EMAC_CNTRL_OFFSET;
pdata->ctrl_mod_reg_offset = AM35XX_EMAC_CNTRL_MOD_OFFSET;
pdata->ctrl_ram_offset = AM35XX_EMAC_CNTRL_RAM_OFFSET;
- pdata->mdio_reg_offset = AM35XX_EMAC_MDIO_OFFSET;
pdata->ctrl_ram_size = AM35XX_EMAC_CNTRL_RAM_SIZE;
pdata->version = EMAC_VERSION_2;
pdata->hw_ram_addr = AM35XX_EMAC_HW_RAM_ADDR;
@@ -121,6 +139,9 @@ void am3517_evm_ethernet_init(struct emac_platform_data *pdata)
pdata->interrupt_disable = am3517_disable_ethernet_int;
am3517_emac_device.dev.platform_data = pdata;
platform_device_register(&am3517_emac_device);
+ platform_device_register(&am3517_mdio_device);
+ clk_add_alias(NULL, dev_name(&am3517_mdio_device.dev),
+ NULL, &am3517_emac_device.dev);
regval = omap_ctrl_readl(AM35XX_CONTROL_IP_SW_RESET);
regval = regval & (~(AM35XX_CPGMACSS_SW_RST));
@@ -139,7 +160,6 @@ void am3517_evm_ethernet_init(struct emac_platform_data *pdata)
static struct i2c_board_info __initdata am3517evm_i2c1_boardinfo[] = {
{
I2C_BOARD_INFO("s35390a", 0x30),
- .type = "s35390a",
},
};
@@ -347,7 +367,7 @@ static struct omap_dss_board_info am3517_evm_dss_data = {
.default_device = &am3517_evm_lcd_device,
};
-struct platform_device am3517_evm_dss_device = {
+static struct platform_device am3517_evm_dss_device = {
.name = "omapdss",
.id = -1,
.dev = {
diff --git a/arch/arm/mach-omap2/board-apollon.c b/arch/arm/mach-omap2/board-apollon.c
index 68f07f5f441a..2c6db1aaeb29 100644
--- a/arch/arm/mach-omap2/board-apollon.c
+++ b/arch/arm/mach-omap2/board-apollon.c
@@ -39,9 +39,9 @@
#include <plat/board.h>
#include <plat/common.h>
#include <plat/gpmc.h>
-#include <plat/control.h>
#include "mux.h"
+#include "control.h"
/* LED & Switch macros */
#define LED0_GPIO13 13
diff --git a/arch/arm/mach-omap2/board-cm-t35.c b/arch/arm/mach-omap2/board-cm-t35.c
index 934d9380c372..63f764e2af3f 100644
--- a/arch/arm/mach-omap2/board-cm-t35.c
+++ b/arch/arm/mach-omap2/board-cm-t35.c
@@ -31,6 +31,7 @@
#include <linux/i2c/at24.h>
#include <linux/i2c/twl.h>
#include <linux/regulator/machine.h>
+#include <linux/mmc/host.h>
#include <linux/spi/spi.h>
#include <linux/spi/tdo24m.h>
@@ -237,8 +238,6 @@ static inline void cm_t35_init_nand(void) {}
defined(CONFIG_TOUCHSCREEN_ADS7846_MODULE)
#include <linux/spi/ads7846.h>
-#include <plat/mcspi.h>
-
static struct omap2_mcspi_device_config ads7846_mcspi_config = {
.turbo_mode = 0,
.single_channel = 1, /* 0: slave, 1: master */
@@ -558,7 +557,7 @@ static struct twl4030_usb_data cm_t35_usb_data = {
.usb_mode = T2_USB_MODE_ULPI,
};
-static int cm_t35_keymap[] = {
+static uint32_t cm_t35_keymap[] = {
KEY(0, 0, KEY_A), KEY(0, 1, KEY_B), KEY(0, 2, KEY_LEFT),
KEY(1, 0, KEY_UP), KEY(1, 1, KEY_ENTER), KEY(1, 2, KEY_DOWN),
KEY(2, 0, KEY_RIGHT), KEY(2, 1, KEY_C), KEY(2, 2, KEY_D),
@@ -579,14 +578,14 @@ static struct twl4030_keypad_data cm_t35_kp_data = {
static struct omap2_hsmmc_info mmc[] = {
{
.mmc = 1,
- .wires = 4,
+ .caps = MMC_CAP_4_BIT_DATA,
.gpio_cd = -EINVAL,
.gpio_wp = -EINVAL,
},
{
.mmc = 2,
- .wires = 4,
+ .caps = MMC_CAP_4_BIT_DATA,
.transceiver = 1,
.gpio_cd = -EINVAL,
.gpio_wp = -EINVAL,
diff --git a/arch/arm/mach-omap2/board-cm-t3517.c b/arch/arm/mach-omap2/board-cm-t3517.c
new file mode 100644
index 000000000000..1dd303e9a267
--- /dev/null
+++ b/arch/arm/mach-omap2/board-cm-t3517.c
@@ -0,0 +1,292 @@
+/*
+ * linux/arch/arm/mach-omap2/board-cm-t3517.c
+ *
+ * Support for the CompuLab CM-T3517 modules
+ *
+ * Copyright (C) 2010 CompuLab, Ltd.
+ * Author: Igor Grinberg <grinberg@compulab.co.il>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/platform_device.h>
+#include <linux/delay.h>
+#include <linux/gpio.h>
+#include <linux/leds.h>
+#include <linux/rtc-v3020.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/nand.h>
+#include <linux/mtd/partitions.h>
+#include <linux/can/platform/ti_hecc.h>
+
+#include <asm/mach-types.h>
+#include <asm/mach/arch.h>
+#include <asm/mach/map.h>
+
+#include <plat/board.h>
+#include <plat/common.h>
+#include <plat/usb.h>
+#include <plat/nand.h>
+#include <plat/gpmc.h>
+
+#include <mach/am35xx.h>
+
+#include "mux.h"
+#include "control.h"
+
+#if defined(CONFIG_LEDS_GPIO) || defined(CONFIG_LEDS_GPIO_MODULE)
+static struct gpio_led cm_t3517_leds[] = {
+ [0] = {
+ .gpio = 186,
+ .name = "cm-t3517:green",
+ .default_trigger = "heartbeat",
+ .active_low = 0,
+ },
+};
+
+static struct gpio_led_platform_data cm_t3517_led_pdata = {
+ .num_leds = ARRAY_SIZE(cm_t3517_leds),
+ .leds = cm_t3517_leds,
+};
+
+static struct platform_device cm_t3517_led_device = {
+ .name = "leds-gpio",
+ .id = -1,
+ .dev = {
+ .platform_data = &cm_t3517_led_pdata,
+ },
+};
+
+static void __init cm_t3517_init_leds(void)
+{
+ platform_device_register(&cm_t3517_led_device);
+}
+#else
+static inline void cm_t3517_init_leds(void) {}
+#endif
+
+#if defined(CONFIG_CAN_TI_HECC) || defined(CONFIG_CAN_TI_HECC_MODULE)
+static struct resource cm_t3517_hecc_resources[] = {
+ {
+ .start = AM35XX_IPSS_HECC_BASE,
+ .end = AM35XX_IPSS_HECC_BASE + SZ_16K - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ {
+ .start = INT_35XX_HECC0_IRQ,
+ .end = INT_35XX_HECC0_IRQ,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct ti_hecc_platform_data cm_t3517_hecc_pdata = {
+ .scc_hecc_offset = AM35XX_HECC_SCC_HECC_OFFSET,
+ .scc_ram_offset = AM35XX_HECC_SCC_RAM_OFFSET,
+ .hecc_ram_offset = AM35XX_HECC_RAM_OFFSET,
+ .mbx_offset = AM35XX_HECC_MBOX_OFFSET,
+ .int_line = AM35XX_HECC_INT_LINE,
+ .version = AM35XX_HECC_VERSION,
+};
+
+static struct platform_device cm_t3517_hecc_device = {
+ .name = "ti_hecc",
+ .id = 1,
+ .num_resources = ARRAY_SIZE(cm_t3517_hecc_resources),
+ .resource = cm_t3517_hecc_resources,
+ .dev = {
+ .platform_data = &cm_t3517_hecc_pdata,
+ },
+};
+
+static void cm_t3517_init_hecc(void)
+{
+ platform_device_register(&cm_t3517_hecc_device);
+}
+#else
+static inline void cm_t3517_init_hecc(void) {}
+#endif
+
+#if defined(CONFIG_RTC_DRV_V3020) || defined(CONFIG_RTC_DRV_V3020_MODULE)
+#define RTC_IO_GPIO (153)
+#define RTC_WR_GPIO (154)
+#define RTC_RD_GPIO (160)
+#define RTC_CS_GPIO (163)
+
+struct v3020_platform_data cm_t3517_v3020_pdata = {
+ .use_gpio = 1,
+ .gpio_cs = RTC_CS_GPIO,
+ .gpio_wr = RTC_WR_GPIO,
+ .gpio_rd = RTC_RD_GPIO,
+ .gpio_io = RTC_IO_GPIO,
+};
+
+static struct platform_device cm_t3517_rtc_device = {
+ .name = "v3020",
+ .id = -1,
+ .dev = {
+ .platform_data = &cm_t3517_v3020_pdata,
+ }
+};
+
+static void __init cm_t3517_init_rtc(void)
+{
+ platform_device_register(&cm_t3517_rtc_device);
+}
+#else
+static inline void cm_t3517_init_rtc(void) {}
+#endif
+
+#if defined(CONFIG_USB_EHCI_HCD) || defined(CONFIG_USB_EHCI_HCD_MODULE)
+#define HSUSB1_RESET_GPIO (146)
+#define HSUSB2_RESET_GPIO (147)
+#define USB_HUB_RESET_GPIO (152)
+
+static struct ehci_hcd_omap_platform_data cm_t3517_ehci_pdata __initdata = {
+ .port_mode[0] = EHCI_HCD_OMAP_MODE_PHY,
+ .port_mode[1] = EHCI_HCD_OMAP_MODE_PHY,
+ .port_mode[2] = EHCI_HCD_OMAP_MODE_UNKNOWN,
+
+ .phy_reset = true,
+ .reset_gpio_port[0] = HSUSB1_RESET_GPIO,
+ .reset_gpio_port[1] = HSUSB2_RESET_GPIO,
+ .reset_gpio_port[2] = -EINVAL,
+};
+
+static int cm_t3517_init_usbh(void)
+{
+ int err;
+
+ err = gpio_request(USB_HUB_RESET_GPIO, "usb hub rst");
+ if (err) {
+ pr_err("CM-T3517: usb hub rst gpio request failed: %d\n", err);
+ } else {
+ gpio_direction_output(USB_HUB_RESET_GPIO, 0);
+ udelay(10);
+ gpio_set_value(USB_HUB_RESET_GPIO, 1);
+ msleep(1);
+ }
+
+ usb_ehci_init(&cm_t3517_ehci_pdata);
+
+ return 0;
+}
+#else
+static inline int cm_t3517_init_usbh(void)
+{
+ return 0;
+}
+#endif
+
+#if defined(CONFIG_MTD_NAND_OMAP2) || defined(CONFIG_MTD_NAND_OMAP2_MODULE)
+#define NAND_BLOCK_SIZE SZ_128K
+
+static struct mtd_partition cm_t3517_nand_partitions[] = {
+ {
+ .name = "xloader",
+ .offset = 0, /* Offset = 0x00000 */
+ .size = 4 * NAND_BLOCK_SIZE,
+ .mask_flags = MTD_WRITEABLE
+ },
+ {
+ .name = "uboot",
+ .offset = MTDPART_OFS_APPEND, /* Offset = 0x80000 */
+ .size = 15 * NAND_BLOCK_SIZE,
+ },
+ {
+ .name = "uboot environment",
+ .offset = MTDPART_OFS_APPEND, /* Offset = 0x260000 */
+ .size = 2 * NAND_BLOCK_SIZE,
+ },
+ {
+ .name = "linux",
+ .offset = MTDPART_OFS_APPEND, /* Offset = 0x280000 */
+ .size = 32 * NAND_BLOCK_SIZE,
+ },
+ {
+ .name = "rootfs",
+ .offset = MTDPART_OFS_APPEND, /* Offset = 0x680000 */
+ .size = MTDPART_SIZ_FULL,
+ },
+};
+
+static struct omap_nand_platform_data cm_t3517_nand_data = {
+ .parts = cm_t3517_nand_partitions,
+ .nr_parts = ARRAY_SIZE(cm_t3517_nand_partitions),
+ .dma_channel = -1, /* disable DMA in OMAP NAND driver */
+ .cs = 0,
+};
+
+static void __init cm_t3517_init_nand(void)
+{
+ if (gpmc_nand_init(&cm_t3517_nand_data) < 0)
+ pr_err("CM-T3517: NAND initialization failed\n");
+}
+#else
+static inline void cm_t3517_init_nand(void) {}
+#endif
+
+static struct omap_board_config_kernel cm_t3517_config[] __initdata = {
+};
+
+static void __init cm_t3517_init_irq(void)
+{
+ omap_board_config = cm_t3517_config;
+ omap_board_config_size = ARRAY_SIZE(cm_t3517_config);
+
+ omap2_init_common_hw(NULL, NULL);
+ omap_init_irq();
+ omap_gpio_init();
+}
+
+static struct omap_board_mux board_mux[] __initdata = {
+ /* GPIO186 - Green LED */
+ OMAP3_MUX(SYS_CLKOUT2, OMAP_MUX_MODE4 | OMAP_PIN_OUTPUT),
+ /* RTC GPIOs: IO, WR#, RD#, CS# */
+ OMAP3_MUX(MCBSP4_DR, OMAP_MUX_MODE4 | OMAP_PIN_INPUT),
+ OMAP3_MUX(MCBSP4_DX, OMAP_MUX_MODE4 | OMAP_PIN_INPUT),
+ OMAP3_MUX(MCBSP_CLKS, OMAP_MUX_MODE4 | OMAP_PIN_INPUT),
+ OMAP3_MUX(UART3_CTS_RCTX, OMAP_MUX_MODE4 | OMAP_PIN_INPUT),
+ /* HSUSB1 RESET */
+ OMAP3_MUX(UART2_TX, OMAP_MUX_MODE4 | OMAP_PIN_OUTPUT),
+ /* HSUSB2 RESET */
+ OMAP3_MUX(UART2_RX, OMAP_MUX_MODE4 | OMAP_PIN_OUTPUT),
+ /* CM-T3517 USB HUB nRESET */
+ OMAP3_MUX(MCBSP4_CLKX, OMAP_MUX_MODE4 | OMAP_PIN_OUTPUT),
+
+ { .reg_offset = OMAP_MUX_TERMINATOR },
+};
+
+static void __init cm_t3517_init(void)
+{
+ omap3_mux_init(board_mux, OMAP_PACKAGE_CBB);
+ omap_serial_init();
+ cm_t3517_init_leds();
+ cm_t3517_init_nand();
+ cm_t3517_init_rtc();
+ cm_t3517_init_usbh();
+ cm_t3517_init_hecc();
+}
+
+MACHINE_START(CM_T3517, "Compulab CM-T3517")
+ .boot_params = 0x80000100,
+ .map_io = omap3_map_io,
+ .reserve = omap_reserve,
+ .init_irq = cm_t3517_init_irq,
+ .init_machine = cm_t3517_init,
+ .timer = &omap_timer,
+MACHINE_END
diff --git a/arch/arm/mach-omap2/board-devkit8000.c b/arch/arm/mach-omap2/board-devkit8000.c
index 2205c20a4cdb..53ac762518bd 100644
--- a/arch/arm/mach-omap2/board-devkit8000.c
+++ b/arch/arm/mach-omap2/board-devkit8000.c
@@ -28,6 +28,7 @@
#include <linux/mtd/mtd.h>
#include <linux/mtd/partitions.h>
#include <linux/mtd/nand.h>
+#include <linux/mmc/host.h>
#include <linux/regulator/machine.h>
#include <linux/i2c/twl.h>
@@ -44,7 +45,6 @@
#include <plat/gpmc.h>
#include <plat/nand.h>
#include <plat/usb.h>
-#include <plat/timer-gp.h>
#include <plat/display.h>
#include <plat/mcspi.h>
@@ -58,6 +58,7 @@
#include "mux.h"
#include "hsmmc.h"
+#include "timer-gp.h"
#define NAND_BLOCK_SIZE SZ_128K
@@ -105,7 +106,7 @@ static struct omap_nand_platform_data devkit8000_nand_data = {
static struct omap2_hsmmc_info mmc[] = {
{
.mmc = 1,
- .wires = 8,
+ .caps = MMC_CAP_4_BIT_DATA | MMC_CAP_8_BIT_DATA,
.gpio_wp = 29,
},
{} /* Terminator */
@@ -198,7 +199,7 @@ static struct platform_device devkit8000_dss_device = {
static struct regulator_consumer_supply devkit8000_vdda_dac_supply =
REGULATOR_SUPPLY("vdda_dac", "omapdss");
-static int board_keymap[] = {
+static uint32_t board_keymap[] = {
KEY(0, 0, KEY_1),
KEY(1, 0, KEY_2),
KEY(2, 0, KEY_3),
@@ -241,9 +242,6 @@ static int devkit8000_twl_gpio_setup(struct device *dev,
mmc[0].gpio_cd = gpio + 0;
omap2_hsmmc_init(mmc);
- /* link regulators to MMC adapters */
- devkit8000_vmmc1_supply.dev = mmc[0].dev;
-
/* TWL4030_GPIO_MAX + 1 == ledB, PMU_STAT (out, active low LED) */
gpio_leds[2].gpio = gpio + TWL4030_GPIO_MAX + 1;
diff --git a/arch/arm/mach-omap2/board-flash.c b/arch/arm/mach-omap2/board-flash.c
index ac834aa7abf6..fd38c05bb47f 100644
--- a/arch/arm/mach-omap2/board-flash.c
+++ b/arch/arm/mach-omap2/board-flash.c
@@ -21,7 +21,8 @@
#include <plat/nand.h>
#include <plat/onenand.h>
#include <plat/tc.h>
-#include <mach/board-flash.h>
+
+#include "board-flash.h"
#define REG_FPGA_REV 0x10
#define REG_FPGA_DIP_SWITCH_INPUT2 0x60
diff --git a/arch/arm/mach-omap2/include/mach/board-flash.h b/arch/arm/mach-omap2/board-flash.h
index b2242ae2bb6f..69befe00dd2f 100644
--- a/arch/arm/mach-omap2/include/mach/board-flash.h
+++ b/arch/arm/mach-omap2/board-flash.h
@@ -26,3 +26,5 @@ struct flash_partitions {
extern void board_flash_init(struct flash_partitions [],
char chip_sel[][GPMC_CS_NUM]);
+extern void board_nand_init(struct mtd_partition *nand_parts,
+ u8 nr_parts, u8 cs);
diff --git a/arch/arm/mach-omap2/board-generic.c b/arch/arm/mach-omap2/board-generic.c
index 69064b1c6a75..b1c2c9a11c38 100644
--- a/arch/arm/mach-omap2/board-generic.c
+++ b/arch/arm/mach-omap2/board-generic.c
@@ -48,10 +48,22 @@ static void __init omap_generic_init(void)
static void __init omap_generic_map_io(void)
{
- omap2_set_globals_242x(); /* should be 242x, 243x, or 343x */
- omap242x_map_common_io();
+ if (cpu_is_omap242x()) {
+ omap2_set_globals_242x();
+ omap242x_map_common_io();
+ } else if (cpu_is_omap243x()) {
+ omap2_set_globals_243x();
+ omap243x_map_common_io();
+ } else if (cpu_is_omap34xx()) {
+ omap2_set_globals_3xxx();
+ omap34xx_map_common_io();
+ } else if (cpu_is_omap44xx()) {
+ omap2_set_globals_443x();
+ omap44xx_map_common_io();
+ }
}
+/* XXX This machine entry name should be updated */
MACHINE_START(OMAP_GENERIC, "Generic OMAP24xx")
/* Maintainer: Paul Mundt <paul.mundt@nokia.com> */
.boot_params = 0x80000100,
diff --git a/arch/arm/mach-omap2/board-h4.c b/arch/arm/mach-omap2/board-h4.c
index cc39fc866524..929993b4bf26 100644
--- a/arch/arm/mach-omap2/board-h4.c
+++ b/arch/arm/mach-omap2/board-h4.c
@@ -31,7 +31,6 @@
#include <asm/mach/arch.h>
#include <asm/mach/map.h>
-#include <plat/control.h>
#include <mach/gpio.h>
#include <plat/usb.h>
#include <plat/board.h>
@@ -42,6 +41,7 @@
#include <plat/gpmc.h>
#include "mux.h"
+#include "control.h"
#define H4_FLASH_CS 0
#define H4_SMC91X_CS 1
diff --git a/arch/arm/mach-omap2/board-igep0020.c b/arch/arm/mach-omap2/board-igep0020.c
index b62a68ba069b..5e035a58b809 100644
--- a/arch/arm/mach-omap2/board-igep0020.c
+++ b/arch/arm/mach-omap2/board-igep0020.c
@@ -20,6 +20,7 @@
#include <linux/regulator/machine.h>
#include <linux/i2c/twl.h>
+#include <linux/mmc/host.h>
#include <asm/mach-types.h>
#include <asm/mach/arch.h>
@@ -38,12 +39,61 @@
#define IGEP2_SMSC911X_CS 5
#define IGEP2_SMSC911X_GPIO 176
#define IGEP2_GPIO_USBH_NRESET 24
-#define IGEP2_GPIO_LED0_GREEN 26
-#define IGEP2_GPIO_LED0_RED 27
-#define IGEP2_GPIO_LED1_RED 28
-#define IGEP2_GPIO_DVI_PUP 170
-#define IGEP2_GPIO_WIFI_NPD 94
-#define IGEP2_GPIO_WIFI_NRESET 95
+#define IGEP2_GPIO_LED0_GREEN 26
+#define IGEP2_GPIO_LED0_RED 27
+#define IGEP2_GPIO_LED1_RED 28
+#define IGEP2_GPIO_DVI_PUP 170
+
+#define IGEP2_RB_GPIO_WIFI_NPD 94
+#define IGEP2_RB_GPIO_WIFI_NRESET 95
+#define IGEP2_RB_GPIO_BT_NRESET 137
+#define IGEP2_RC_GPIO_WIFI_NPD 138
+#define IGEP2_RC_GPIO_WIFI_NRESET 139
+#define IGEP2_RC_GPIO_BT_NRESET 137
+
+/*
+ * IGEP2 Hardware Revision Table
+ *
+ * --------------------------------------------------------------------------
+ * | Id. | Hw Rev. | HW0 (28) | WIFI_NPD | WIFI_NRESET | BT_NRESET |
+ * --------------------------------------------------------------------------
+ * | 0 | B | high | gpio94 | gpio95 | - |
+ * | 0 | B/C (B-compatible) | high | gpio94 | gpio95 | gpio137 |
+ * | 1 | C | low | gpio138 | gpio139 | gpio137 |
+ * --------------------------------------------------------------------------
+ */
+
+#define IGEP2_BOARD_HWREV_B 0
+#define IGEP2_BOARD_HWREV_C 1
+
+static u8 hwrev;
+
+static void __init igep2_get_revision(void)
+{
+ u8 ret;
+
+ omap_mux_init_gpio(IGEP2_GPIO_LED1_RED, OMAP_PIN_INPUT);
+
+ if ((gpio_request(IGEP2_GPIO_LED1_RED, "GPIO_HW0_REV") == 0) &&
+ (gpio_direction_input(IGEP2_GPIO_LED1_RED) == 0)) {
+ ret = gpio_get_value(IGEP2_GPIO_LED1_RED);
+ if (ret == 0) {
+ pr_info("IGEP2: Hardware Revision C (B-NON compatible)\n");
+ hwrev = IGEP2_BOARD_HWREV_C;
+ } else if (ret == 1) {
+ pr_info("IGEP2: Hardware Revision B/C (B compatible)\n");
+ hwrev = IGEP2_BOARD_HWREV_B;
+ } else {
+ pr_err("IGEP2: Unknown Hardware Revision\n");
+ hwrev = -1;
+ }
+ } else {
+ pr_warning("IGEP2: Could not obtain gpio GPIO_HW0_REV\n");
+ pr_err("IGEP2: Unknown Hardware Revision\n");
+ }
+
+ gpio_free(IGEP2_GPIO_LED1_RED);
+}
#if defined(CONFIG_MTD_ONENAND_OMAP2) || \
defined(CONFIG_MTD_ONENAND_OMAP2_MODULE)
@@ -107,7 +157,7 @@ static struct platform_device igep2_onenand_device = {
},
};
-void __init igep2_flash_init(void)
+static void __init igep2_flash_init(void)
{
u8 cs = 0;
u8 onenandcs = GPMC_CS_NUM + 1;
@@ -141,7 +191,7 @@ void __init igep2_flash_init(void)
}
#else
-void __init igep2_flash_init(void) {}
+static void __init igep2_flash_init(void) {}
#endif
#if defined(CONFIG_SMSC911X) || defined(CONFIG_SMSC911X_MODULE)
@@ -211,10 +261,6 @@ static struct regulator_consumer_supply igep2_vmmc1_supply = {
.supply = "vmmc",
};
-static struct regulator_consumer_supply igep2_vmmc2_supply = {
- .supply = "vmmc",
-};
-
/* VMMC1 for OMAP VDD_MMC1 (i/o) and MMC1 card */
static struct regulator_init_data igep2_vmmc1 = {
.constraints = {
@@ -230,37 +276,95 @@ static struct regulator_init_data igep2_vmmc1 = {
.consumer_supplies = &igep2_vmmc1_supply,
};
-/* VMMC2 for OMAP VDD_MMC2 (i/o) and MMC2 WIFI */
-static struct regulator_init_data igep2_vmmc2 = {
- .constraints = {
- .min_uV = 1850000,
- .max_uV = 3150000,
- .valid_modes_mask = REGULATOR_MODE_NORMAL
- | REGULATOR_MODE_STANDBY,
- .valid_ops_mask = REGULATOR_CHANGE_VOLTAGE
- | REGULATOR_CHANGE_MODE
- | REGULATOR_CHANGE_STATUS,
- },
- .num_consumer_supplies = 1,
- .consumer_supplies = &igep2_vmmc2_supply,
-};
-
static struct omap2_hsmmc_info mmc[] = {
{
.mmc = 1,
- .wires = 4,
+ .caps = MMC_CAP_4_BIT_DATA,
.gpio_cd = -EINVAL,
.gpio_wp = -EINVAL,
},
+#if defined(CONFIG_LIBERTAS_SDIO) || defined(CONFIG_LIBERTAS_SDIO_MODULE)
{
.mmc = 2,
- .wires = 4,
+ .caps = MMC_CAP_4_BIT_DATA,
.gpio_cd = -EINVAL,
.gpio_wp = -EINVAL,
},
+#endif
{} /* Terminator */
};
+#if defined(CONFIG_LEDS_GPIO) || defined(CONFIG_LEDS_GPIO_MODULE)
+#include <linux/leds.h>
+
+static struct gpio_led igep2_gpio_leds[] = {
+ [0] = {
+ .name = "gpio-led:red:d0",
+ .gpio = IGEP2_GPIO_LED0_RED,
+ .default_trigger = "default-off"
+ },
+ [1] = {
+ .name = "gpio-led:green:d0",
+ .gpio = IGEP2_GPIO_LED0_GREEN,
+ .default_trigger = "default-off",
+ },
+ [2] = {
+ .name = "gpio-led:red:d1",
+ .gpio = IGEP2_GPIO_LED1_RED,
+ .default_trigger = "default-off",
+ },
+ [3] = {
+ .name = "gpio-led:green:d1",
+ .default_trigger = "heartbeat",
+ .gpio = -EINVAL, /* gets replaced */
+ },
+};
+
+static struct gpio_led_platform_data igep2_led_pdata = {
+ .leds = igep2_gpio_leds,
+ .num_leds = ARRAY_SIZE(igep2_gpio_leds),
+};
+
+static struct platform_device igep2_led_device = {
+ .name = "leds-gpio",
+ .id = -1,
+ .dev = {
+ .platform_data = &igep2_led_pdata,
+ },
+};
+
+static void __init igep2_leds_init(void)
+{
+ platform_device_register(&igep2_led_device);
+}
+
+#else
+static inline void igep2_leds_init(void)
+{
+ if ((gpio_request(IGEP2_GPIO_LED0_RED, "gpio-led:red:d0") == 0) &&
+ (gpio_direction_output(IGEP2_GPIO_LED0_RED, 1) == 0)) {
+ gpio_export(IGEP2_GPIO_LED0_RED, 0);
+ gpio_set_value(IGEP2_GPIO_LED0_RED, 0);
+ } else
+ pr_warning("IGEP v2: Could not obtain gpio GPIO_LED0_RED\n");
+
+ if ((gpio_request(IGEP2_GPIO_LED0_GREEN, "gpio-led:green:d0") == 0) &&
+ (gpio_direction_output(IGEP2_GPIO_LED0_GREEN, 1) == 0)) {
+ gpio_export(IGEP2_GPIO_LED0_GREEN, 0);
+ gpio_set_value(IGEP2_GPIO_LED0_GREEN, 0);
+ } else
+ pr_warning("IGEP v2: Could not obtain gpio GPIO_LED0_GREEN\n");
+
+ if ((gpio_request(IGEP2_GPIO_LED1_RED, "gpio-led:red:d1") == 0) &&
+ (gpio_direction_output(IGEP2_GPIO_LED1_RED, 1) == 0)) {
+ gpio_export(IGEP2_GPIO_LED1_RED, 0);
+ gpio_set_value(IGEP2_GPIO_LED1_RED, 0);
+ } else
+ pr_warning("IGEP v2: Could not obtain gpio GPIO_LED1_RED\n");
+
+}
+#endif
+
static int igep2_twl_gpio_setup(struct device *dev,
unsigned gpio, unsigned ngpio)
{
@@ -268,20 +372,48 @@ static int igep2_twl_gpio_setup(struct device *dev,
mmc[0].gpio_cd = gpio + 0;
omap2_hsmmc_init(mmc);
- /* link regulators to MMC adapters ... we "know" the
+ /*
+ * link regulators to MMC adapters ... we "know" the
* regulators will be set up only *after* we return.
- */
+ */
igep2_vmmc1_supply.dev = mmc[0].dev;
- igep2_vmmc2_supply.dev = mmc[1].dev;
+
+ /*
+ * REVISIT: need ehci-omap hooks for external VBUS
+ * power switch and overcurrent detect
+ */
+ if ((gpio_request(gpio + 1, "GPIO_EHCI_NOC") < 0) ||
+ (gpio_direction_input(gpio + 1) < 0))
+ pr_err("IGEP2: Could not obtain gpio for EHCI NOC");
+
+ /*
+ * TWL4030_GPIO_MAX + 0 == ledA, GPIO_USBH_CPEN
+ * (out, active low)
+ */
+ if ((gpio_request(gpio + TWL4030_GPIO_MAX, "GPIO_USBH_CPEN") < 0) ||
+ (gpio_direction_output(gpio + TWL4030_GPIO_MAX, 0) < 0))
+ pr_err("IGEP2: Could not obtain gpio for USBH_CPEN");
+
+ /* TWL4030_GPIO_MAX + 1 == ledB (out, active low LED) */
+#if !defined(CONFIG_LEDS_GPIO) && !defined(CONFIG_LEDS_GPIO_MODULE)
+ if ((gpio_request(gpio+TWL4030_GPIO_MAX+1, "gpio-led:green:d1") == 0)
+ && (gpio_direction_output(gpio + TWL4030_GPIO_MAX + 1, 1) == 0)) {
+ gpio_export(gpio + TWL4030_GPIO_MAX + 1, 0);
+ gpio_set_value(gpio + TWL4030_GPIO_MAX + 1, 0);
+ } else
+ pr_warning("IGEP v2: Could not obtain gpio GPIO_LED1_GREEN\n");
+#else
+ igep2_gpio_leds[3].gpio = gpio + TWL4030_GPIO_MAX + 1;
+#endif
return 0;
};
-static struct twl4030_gpio_platform_data igep2_gpio_data = {
+static struct twl4030_gpio_platform_data igep2_twl4030_gpio_pdata = {
.gpio_base = OMAP_MAX_GPIO_LINES,
.irq_base = TWL4030_GPIO_IRQ_BASE,
.irq_end = TWL4030_GPIO_IRQ_END,
- .use_leds = false,
+ .use_leds = true,
.setup = igep2_twl_gpio_setup,
};
@@ -355,47 +487,6 @@ static void __init igep2_display_init(void)
pr_err("IGEP v2: Could not obtain gpio GPIO_DVI_PUP\n");
}
-#if defined(CONFIG_LEDS_GPIO) || defined(CONFIG_LEDS_GPIO_MODULE)
-#include <linux/leds.h>
-
-static struct gpio_led igep2_gpio_leds[] = {
- {
- .name = "led0:red",
- .gpio = IGEP2_GPIO_LED0_RED,
- },
- {
- .name = "led0:green",
- .default_trigger = "heartbeat",
- .gpio = IGEP2_GPIO_LED0_GREEN,
- },
- {
- .name = "led1:red",
- .gpio = IGEP2_GPIO_LED1_RED,
- },
-};
-
-static struct gpio_led_platform_data igep2_led_pdata = {
- .leds = igep2_gpio_leds,
- .num_leds = ARRAY_SIZE(igep2_gpio_leds),
-};
-
-static struct platform_device igep2_led_device = {
- .name = "leds-gpio",
- .id = -1,
- .dev = {
- .platform_data = &igep2_led_pdata,
- },
-};
-
-static void __init igep2_init_led(void)
-{
- platform_device_register(&igep2_led_device);
-}
-
-#else
-static inline void igep2_init_led(void) {}
-#endif
-
static struct platform_device *igep2_devices[] __initdata = {
&igep2_dss_device,
};
@@ -425,14 +516,13 @@ static struct twl4030_platform_data igep2_twldata = {
/* platform_data for children goes here */
.usb = &igep2_usb_data,
.codec = &igep2_codec_data,
- .gpio = &igep2_gpio_data,
+ .gpio = &igep2_twl4030_gpio_pdata,
.vmmc1 = &igep2_vmmc1,
- .vmmc2 = &igep2_vmmc2,
.vpll2 = &igep2_vpll2,
};
-static struct i2c_board_info __initdata igep2_i2c_boardinfo[] = {
+static struct i2c_board_info __initdata igep2_i2c1_boardinfo[] = {
{
I2C_BOARD_INFO("twl4030", 0x48),
.flags = I2C_CLIENT_WAKE,
@@ -441,14 +531,29 @@ static struct i2c_board_info __initdata igep2_i2c_boardinfo[] = {
},
};
-static int __init igep2_i2c_init(void)
+static struct i2c_board_info __initdata igep2_i2c3_boardinfo[] = {
+ {
+ I2C_BOARD_INFO("eeprom", 0x50),
+ },
+};
+
+static void __init igep2_i2c_init(void)
{
- omap_register_i2c_bus(1, 2600, igep2_i2c_boardinfo,
- ARRAY_SIZE(igep2_i2c_boardinfo));
- /* Bus 3 is attached to the DVI port where devices like the pico DLP
- * projector don't work reliably with 400kHz */
- omap_register_i2c_bus(3, 100, NULL, 0);
- return 0;
+ int ret;
+
+ ret = omap_register_i2c_bus(1, 2600, igep2_i2c1_boardinfo,
+ ARRAY_SIZE(igep2_i2c1_boardinfo));
+ if (ret)
+ pr_warning("IGEP2: Could not register I2C1 bus (%d)\n", ret);
+
+ /*
+ * Bus 3 is attached to the DVI port where devices like the pico DLP
+ * projector don't work reliably with 400kHz
+ */
+ ret = omap_register_i2c_bus(3, 100, igep2_i2c3_boardinfo,
+ ARRAY_SIZE(igep2_i2c3_boardinfo));
+ if (ret)
+ pr_warning("IGEP2: Could not register I2C3 bus (%d)\n", ret);
}
static struct omap_musb_board_data musb_board_data = {
@@ -476,9 +581,57 @@ static struct omap_board_mux board_mux[] __initdata = {
#define board_mux NULL
#endif
+#if defined(CONFIG_LIBERTAS_SDIO) || defined(CONFIG_LIBERTAS_SDIO_MODULE)
+
+static void __init igep2_wlan_bt_init(void)
+{
+ unsigned npd, wreset, btreset;
+
+ /* GPIO's for WLAN-BT combo depends on hardware revision */
+ if (hwrev == IGEP2_BOARD_HWREV_B) {
+ npd = IGEP2_RB_GPIO_WIFI_NPD;
+ wreset = IGEP2_RB_GPIO_WIFI_NRESET;
+ btreset = IGEP2_RB_GPIO_BT_NRESET;
+ } else if (hwrev == IGEP2_BOARD_HWREV_C) {
+ npd = IGEP2_RC_GPIO_WIFI_NPD;
+ wreset = IGEP2_RC_GPIO_WIFI_NRESET;
+ btreset = IGEP2_RC_GPIO_BT_NRESET;
+ } else
+ return;
+
+ /* Set GPIO's for WLAN-BT combo module */
+ if ((gpio_request(npd, "GPIO_WIFI_NPD") == 0) &&
+ (gpio_direction_output(npd, 1) == 0)) {
+ gpio_export(npd, 0);
+ } else
+ pr_warning("IGEP2: Could not obtain gpio GPIO_WIFI_NPD\n");
+
+ if ((gpio_request(wreset, "GPIO_WIFI_NRESET") == 0) &&
+ (gpio_direction_output(wreset, 1) == 0)) {
+ gpio_export(wreset, 0);
+ gpio_set_value(wreset, 0);
+ udelay(10);
+ gpio_set_value(wreset, 1);
+ } else
+ pr_warning("IGEP2: Could not obtain gpio GPIO_WIFI_NRESET\n");
+
+ if ((gpio_request(btreset, "GPIO_BT_NRESET") == 0) &&
+ (gpio_direction_output(btreset, 1) == 0)) {
+ gpio_export(btreset, 0);
+ } else
+ pr_warning("IGEP2: Could not obtain gpio GPIO_BT_NRESET\n");
+}
+#else
+static inline void __init igep2_wlan_bt_init(void) { }
+#endif
+
static void __init igep2_init(void)
{
omap3_mux_init(board_mux, OMAP_PACKAGE_CBB);
+
+ /* Get IGEP2 hardware revision */
+ igep2_get_revision();
+ /* Register I2C busses and drivers */
igep2_i2c_init();
platform_add_devices(igep2_devices, ARRAY_SIZE(igep2_devices));
omap_serial_init();
@@ -486,50 +639,16 @@ static void __init igep2_init(void)
usb_ehci_init(&ehci_pdata);
igep2_flash_init();
- igep2_init_led();
+ igep2_leds_init();
igep2_display_init();
igep2_init_smsc911x();
- /* GPIO userspace leds */
-#if !defined(CONFIG_LEDS_GPIO) && !defined(CONFIG_LEDS_GPIO_MODULE)
- if ((gpio_request(IGEP2_GPIO_LED0_RED, "led0:red") == 0) &&
- (gpio_direction_output(IGEP2_GPIO_LED0_RED, 1) == 0)) {
- gpio_export(IGEP2_GPIO_LED0_RED, 0);
- gpio_set_value(IGEP2_GPIO_LED0_RED, 0);
- } else
- pr_warning("IGEP v2: Could not obtain gpio GPIO_LED0_RED\n");
-
- if ((gpio_request(IGEP2_GPIO_LED0_GREEN, "led0:green") == 0) &&
- (gpio_direction_output(IGEP2_GPIO_LED0_GREEN, 1) == 0)) {
- gpio_export(IGEP2_GPIO_LED0_GREEN, 0);
- gpio_set_value(IGEP2_GPIO_LED0_GREEN, 0);
- } else
- pr_warning("IGEP v2: Could not obtain gpio GPIO_LED0_GREEN\n");
-
- if ((gpio_request(IGEP2_GPIO_LED1_RED, "led1:red") == 0) &&
- (gpio_direction_output(IGEP2_GPIO_LED1_RED, 1) == 0)) {
- gpio_export(IGEP2_GPIO_LED1_RED, 0);
- gpio_set_value(IGEP2_GPIO_LED1_RED, 0);
- } else
- pr_warning("IGEP v2: Could not obtain gpio GPIO_LED1_RED\n");
-#endif
-
- /* GPIO W-LAN + Bluetooth combo module */
- if ((gpio_request(IGEP2_GPIO_WIFI_NPD, "GPIO_WIFI_NPD") == 0) &&
- (gpio_direction_output(IGEP2_GPIO_WIFI_NPD, 1) == 0)) {
- gpio_export(IGEP2_GPIO_WIFI_NPD, 0);
-/* gpio_set_value(IGEP2_GPIO_WIFI_NPD, 0); */
- } else
- pr_warning("IGEP v2: Could not obtain gpio GPIO_WIFI_NPD\n");
+ /*
+ * WLAN-BT combo module from MuRata wich has a Marvell WLAN
+ * (88W8686) + CSR Bluetooth chipset. Uses SDIO interface.
+ */
+ igep2_wlan_bt_init();
- if ((gpio_request(IGEP2_GPIO_WIFI_NRESET, "GPIO_WIFI_NRESET") == 0) &&
- (gpio_direction_output(IGEP2_GPIO_WIFI_NRESET, 1) == 0)) {
- gpio_export(IGEP2_GPIO_WIFI_NRESET, 0);
- gpio_set_value(IGEP2_GPIO_WIFI_NRESET, 0);
- udelay(10);
- gpio_set_value(IGEP2_GPIO_WIFI_NRESET, 1);
- } else
- pr_warning("IGEP v2: Could not obtain gpio GPIO_WIFI_NRESET\n");
}
MACHINE_START(IGEP0020, "IGEP v2 board")
diff --git a/arch/arm/mach-omap2/board-igep0030.c b/arch/arm/mach-omap2/board-igep0030.c
new file mode 100644
index 000000000000..22b0b253e16b
--- /dev/null
+++ b/arch/arm/mach-omap2/board-igep0030.c
@@ -0,0 +1,400 @@
+/*
+ * Copyright (C) 2010 - ISEE 2007 SL
+ *
+ * Modified from mach-omap2/board-generic.c
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/platform_device.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/gpio.h>
+#include <linux/interrupt.h>
+
+#include <linux/regulator/machine.h>
+#include <linux/i2c/twl.h>
+#include <linux/mmc/host.h>
+
+#include <asm/mach-types.h>
+#include <asm/mach/arch.h>
+
+#include <plat/board.h>
+#include <plat/common.h>
+#include <plat/gpmc.h>
+#include <plat/usb.h>
+#include <plat/onenand.h>
+
+#include "mux.h"
+#include "hsmmc.h"
+#include "sdram-numonyx-m65kxxxxam.h"
+
+#define IGEP3_GPIO_LED0_GREEN 54
+#define IGEP3_GPIO_LED0_RED 53
+#define IGEP3_GPIO_LED1_RED 16
+
+#define IGEP3_GPIO_WIFI_NPD 138
+#define IGEP3_GPIO_WIFI_NRESET 139
+#define IGEP3_GPIO_BT_NRESET 137
+
+#define IGEP3_GPIO_USBH_NRESET 115
+
+
+#if defined(CONFIG_MTD_ONENAND_OMAP2) || \
+ defined(CONFIG_MTD_ONENAND_OMAP2_MODULE)
+
+#define ONENAND_MAP 0x20000000
+
+/*
+ * x2 Flash built-in COMBO POP MEMORY
+ * Since the device is equipped with two DataRAMs, and two-plane NAND
+ * Flash memory array, these two component enables simultaneous program
+ * of 4KiB. Plane1 has only even blocks such as block0, block2, block4
+ * while Plane2 has only odd blocks such as block1, block3, block5.
+ * So MTD regards it as 4KiB page size and 256KiB block size 64*(2*2048)
+ */
+
+static struct mtd_partition igep3_onenand_partitions[] = {
+ {
+ .name = "X-Loader",
+ .offset = 0,
+ .size = 2 * (64*(2*2048))
+ },
+ {
+ .name = "U-Boot",
+ .offset = MTDPART_OFS_APPEND,
+ .size = 6 * (64*(2*2048)),
+ },
+ {
+ .name = "Environment",
+ .offset = MTDPART_OFS_APPEND,
+ .size = 2 * (64*(2*2048)),
+ },
+ {
+ .name = "Kernel",
+ .offset = MTDPART_OFS_APPEND,
+ .size = 12 * (64*(2*2048)),
+ },
+ {
+ .name = "File System",
+ .offset = MTDPART_OFS_APPEND,
+ .size = MTDPART_SIZ_FULL,
+ },
+};
+
+static struct omap_onenand_platform_data igep3_onenand_pdata = {
+ .parts = igep3_onenand_partitions,
+ .nr_parts = ARRAY_SIZE(igep3_onenand_partitions),
+ .onenand_setup = NULL,
+ .dma_channel = -1, /* disable DMA in OMAP OneNAND driver */
+};
+
+static struct platform_device igep3_onenand_device = {
+ .name = "omap2-onenand",
+ .id = -1,
+ .dev = {
+ .platform_data = &igep3_onenand_pdata,
+ },
+};
+
+void __init igep3_flash_init(void)
+{
+ u8 cs = 0;
+ u8 onenandcs = GPMC_CS_NUM + 1;
+
+ for (cs = 0; cs < GPMC_CS_NUM; cs++) {
+ u32 ret;
+ ret = gpmc_cs_read_reg(cs, GPMC_CS_CONFIG1);
+
+ /* Check if NAND/oneNAND is configured */
+ if ((ret & 0xC00) == 0x800)
+ /* NAND found */
+ pr_err("IGEP3: Unsupported NAND found\n");
+ else {
+ ret = gpmc_cs_read_reg(cs, GPMC_CS_CONFIG7);
+
+ if ((ret & 0x3F) == (ONENAND_MAP >> 24))
+ /* OneNAND found */
+ onenandcs = cs;
+ }
+ }
+
+ if (onenandcs > GPMC_CS_NUM) {
+ pr_err("IGEP3: Unable to find configuration in GPMC\n");
+ return;
+ }
+
+ igep3_onenand_pdata.cs = onenandcs;
+
+ if (platform_device_register(&igep3_onenand_device) < 0)
+ pr_err("IGEP3: Unable to register OneNAND device\n");
+}
+
+#else
+void __init igep3_flash_init(void) {}
+#endif
+
+static struct regulator_consumer_supply igep3_vmmc1_supply = {
+ .supply = "vmmc",
+};
+
+/* VMMC1 for OMAP VDD_MMC1 (i/o) and MMC1 card */
+static struct regulator_init_data igep3_vmmc1 = {
+ .constraints = {
+ .min_uV = 1850000,
+ .max_uV = 3150000,
+ .valid_modes_mask = REGULATOR_MODE_NORMAL
+ | REGULATOR_MODE_STANDBY,
+ .valid_ops_mask = REGULATOR_CHANGE_VOLTAGE
+ | REGULATOR_CHANGE_MODE
+ | REGULATOR_CHANGE_STATUS,
+ },
+ .num_consumer_supplies = 1,
+ .consumer_supplies = &igep3_vmmc1_supply,
+};
+
+static struct omap2_hsmmc_info mmc[] = {
+ [0] = {
+ .mmc = 1,
+ .caps = MMC_CAP_4_BIT_DATA,
+ .gpio_cd = -EINVAL,
+ .gpio_wp = -EINVAL,
+ },
+#if defined(CONFIG_LIBERTAS_SDIO) || defined(CONFIG_LIBERTAS_SDIO_MODULE)
+ [1] = {
+ .mmc = 2,
+ .caps = MMC_CAP_4_BIT_DATA,
+ .gpio_cd = -EINVAL,
+ .gpio_wp = -EINVAL,
+ },
+#endif
+ {} /* Terminator */
+};
+
+#if defined(CONFIG_LEDS_GPIO) || defined(CONFIG_LEDS_GPIO_MODULE)
+#include <linux/leds.h>
+
+static struct gpio_led igep3_gpio_leds[] = {
+ [0] = {
+ .name = "gpio-led:red:d0",
+ .gpio = IGEP3_GPIO_LED0_RED,
+ .default_trigger = "default-off"
+ },
+ [1] = {
+ .name = "gpio-led:green:d0",
+ .gpio = IGEP3_GPIO_LED0_GREEN,
+ .default_trigger = "default-off",
+ },
+ [2] = {
+ .name = "gpio-led:red:d1",
+ .gpio = IGEP3_GPIO_LED1_RED,
+ .default_trigger = "default-off",
+ },
+ [3] = {
+ .name = "gpio-led:green:d1",
+ .default_trigger = "heartbeat",
+ .gpio = -EINVAL, /* gets replaced */
+ },
+};
+
+static struct gpio_led_platform_data igep3_led_pdata = {
+ .leds = igep3_gpio_leds,
+ .num_leds = ARRAY_SIZE(igep3_gpio_leds),
+};
+
+static struct platform_device igep3_led_device = {
+ .name = "leds-gpio",
+ .id = -1,
+ .dev = {
+ .platform_data = &igep3_led_pdata,
+ },
+};
+
+static void __init igep3_leds_init(void)
+{
+ platform_device_register(&igep3_led_device);
+}
+
+#else
+static inline void igep3_leds_init(void)
+{
+ if ((gpio_request(IGEP3_GPIO_LED0_RED, "gpio-led:red:d0") == 0) &&
+ (gpio_direction_output(IGEP3_GPIO_LED0_RED, 1) == 0)) {
+ gpio_export(IGEP3_GPIO_LED0_RED, 0);
+ gpio_set_value(IGEP3_GPIO_LED0_RED, 1);
+ } else
+ pr_warning("IGEP3: Could not obtain gpio GPIO_LED0_RED\n");
+
+ if ((gpio_request(IGEP3_GPIO_LED0_GREEN, "gpio-led:green:d0") == 0) &&
+ (gpio_direction_output(IGEP3_GPIO_LED0_GREEN, 1) == 0)) {
+ gpio_export(IGEP3_GPIO_LED0_GREEN, 0);
+ gpio_set_value(IGEP3_GPIO_LED0_GREEN, 1);
+ } else
+ pr_warning("IGEP3: Could not obtain gpio GPIO_LED0_GREEN\n");
+
+ if ((gpio_request(IGEP3_GPIO_LED1_RED, "gpio-led:red:d1") == 0) &&
+ (gpio_direction_output(IGEP3_GPIO_LED1_RED, 1) == 0)) {
+ gpio_export(IGEP3_GPIO_LED1_RED, 0);
+ gpio_set_value(IGEP3_GPIO_LED1_RED, 1);
+ } else
+ pr_warning("IGEP3: Could not obtain gpio GPIO_LED1_RED\n");
+}
+#endif
+
+static int igep3_twl4030_gpio_setup(struct device *dev,
+ unsigned gpio, unsigned ngpio)
+{
+ /* gpio + 0 is "mmc0_cd" (input/IRQ) */
+ mmc[0].gpio_cd = gpio + 0;
+ omap2_hsmmc_init(mmc);
+
+ /*
+ * link regulators to MMC adapters ... we "know" the
+ * regulators will be set up only *after* we return.
+ */
+ igep3_vmmc1_supply.dev = mmc[0].dev;
+
+ /* TWL4030_GPIO_MAX + 1 == ledB (out, active low LED) */
+#if !defined(CONFIG_LEDS_GPIO) && !defined(CONFIG_LEDS_GPIO_MODULE)
+ if ((gpio_request(gpio+TWL4030_GPIO_MAX+1, "gpio-led:green:d1") == 0)
+ && (gpio_direction_output(gpio + TWL4030_GPIO_MAX + 1, 1) == 0)) {
+ gpio_export(gpio + TWL4030_GPIO_MAX + 1, 0);
+ gpio_set_value(gpio + TWL4030_GPIO_MAX + 1, 0);
+ } else
+ pr_warning("IGEP3: Could not obtain gpio GPIO_LED1_GREEN\n");
+#else
+ igep3_gpio_leds[3].gpio = gpio + TWL4030_GPIO_MAX + 1;
+#endif
+
+ return 0;
+};
+
+static struct twl4030_gpio_platform_data igep3_twl4030_gpio_pdata = {
+ .gpio_base = OMAP_MAX_GPIO_LINES,
+ .irq_base = TWL4030_GPIO_IRQ_BASE,
+ .irq_end = TWL4030_GPIO_IRQ_END,
+ .use_leds = true,
+ .setup = igep3_twl4030_gpio_setup,
+};
+
+static struct twl4030_usb_data igep3_twl4030_usb_data = {
+ .usb_mode = T2_USB_MODE_ULPI,
+};
+
+static void __init igep3_init_irq(void)
+{
+ omap2_init_common_hw(m65kxxxxam_sdrc_params, m65kxxxxam_sdrc_params);
+ omap_init_irq();
+ omap_gpio_init();
+}
+
+static struct twl4030_platform_data igep3_twl4030_pdata = {
+ .irq_base = TWL4030_IRQ_BASE,
+ .irq_end = TWL4030_IRQ_END,
+
+ /* platform_data for children goes here */
+ .usb = &igep3_twl4030_usb_data,
+ .gpio = &igep3_twl4030_gpio_pdata,
+ .vmmc1 = &igep3_vmmc1,
+};
+
+static struct i2c_board_info __initdata igep3_i2c_boardinfo[] = {
+ {
+ I2C_BOARD_INFO("twl4030", 0x48),
+ .flags = I2C_CLIENT_WAKE,
+ .irq = INT_34XX_SYS_NIRQ,
+ .platform_data = &igep3_twl4030_pdata,
+ },
+};
+
+static int __init igep3_i2c_init(void)
+{
+ omap_register_i2c_bus(1, 2600, igep3_i2c_boardinfo,
+ ARRAY_SIZE(igep3_i2c_boardinfo));
+
+ return 0;
+}
+
+static struct omap_musb_board_data musb_board_data = {
+ .interface_type = MUSB_INTERFACE_ULPI,
+ .mode = MUSB_OTG,
+ .power = 100,
+};
+
+#if defined(CONFIG_LIBERTAS_SDIO) || defined(CONFIG_LIBERTAS_SDIO_MODULE)
+
+static void __init igep3_wifi_bt_init(void)
+{
+ /* Configure MUX values for W-LAN + Bluetooth GPIO's */
+ omap_mux_init_gpio(IGEP3_GPIO_WIFI_NPD, OMAP_PIN_OUTPUT);
+ omap_mux_init_gpio(IGEP3_GPIO_WIFI_NRESET, OMAP_PIN_OUTPUT);
+ omap_mux_init_gpio(IGEP3_GPIO_BT_NRESET, OMAP_PIN_OUTPUT);
+
+ /* Set GPIO's for W-LAN + Bluetooth combo module */
+ if ((gpio_request(IGEP3_GPIO_WIFI_NPD, "GPIO_WIFI_NPD") == 0) &&
+ (gpio_direction_output(IGEP3_GPIO_WIFI_NPD, 1) == 0)) {
+ gpio_export(IGEP3_GPIO_WIFI_NPD, 0);
+ } else
+ pr_warning("IGEP3: Could not obtain gpio GPIO_WIFI_NPD\n");
+
+ if ((gpio_request(IGEP3_GPIO_WIFI_NRESET, "GPIO_WIFI_NRESET") == 0) &&
+ (gpio_direction_output(IGEP3_GPIO_WIFI_NRESET, 1) == 0)) {
+ gpio_export(IGEP3_GPIO_WIFI_NRESET, 0);
+ gpio_set_value(IGEP3_GPIO_WIFI_NRESET, 0);
+ udelay(10);
+ gpio_set_value(IGEP3_GPIO_WIFI_NRESET, 1);
+ } else
+ pr_warning("IGEP3: Could not obtain gpio GPIO_WIFI_NRESET\n");
+
+ if ((gpio_request(IGEP3_GPIO_BT_NRESET, "GPIO_BT_NRESET") == 0) &&
+ (gpio_direction_output(IGEP3_GPIO_BT_NRESET, 1) == 0)) {
+ gpio_export(IGEP3_GPIO_BT_NRESET, 0);
+ } else
+ pr_warning("IGEP3: Could not obtain gpio GPIO_BT_NRESET\n");
+}
+#else
+void __init igep3_wifi_bt_init(void) {}
+#endif
+
+#ifdef CONFIG_OMAP_MUX
+static struct omap_board_mux board_mux[] __initdata = {
+ { .reg_offset = OMAP_MUX_TERMINATOR },
+};
+#else
+#define board_mux NULL
+#endif
+
+static void __init igep3_init(void)
+{
+ omap3_mux_init(board_mux, OMAP_PACKAGE_CBB);
+
+ /* Register I2C busses and drivers */
+ igep3_i2c_init();
+
+ omap_serial_init();
+ usb_musb_init(&musb_board_data);
+
+ igep3_flash_init();
+ igep3_leds_init();
+
+ /*
+ * WLAN-BT combo module from MuRata wich has a Marvell WLAN
+ * (88W8686) + CSR Bluetooth chipset. Uses SDIO interface.
+ */
+ igep3_wifi_bt_init();
+
+}
+
+MACHINE_START(IGEP0030, "IGEP OMAP3 module")
+ .boot_params = 0x80000100,
+ .map_io = omap3_map_io,
+ .init_irq = igep3_init_irq,
+ .init_machine = igep3_init,
+ .timer = &omap_timer,
+MACHINE_END
diff --git a/arch/arm/mach-omap2/board-ldp.c b/arch/arm/mach-omap2/board-ldp.c
index f28fd77bceb3..001fd9713f39 100644
--- a/arch/arm/mach-omap2/board-ldp.c
+++ b/arch/arm/mach-omap2/board-ldp.c
@@ -27,6 +27,7 @@
#include <linux/i2c/twl.h>
#include <linux/io.h>
#include <linux/smsc911x.h>
+#include <linux/mmc/host.h>
#include <mach/hardware.h>
#include <asm/mach-types.h>
@@ -41,11 +42,12 @@
#include <mach/board-zoom.h>
#include <asm/delay.h>
-#include <plat/control.h>
#include <plat/usb.h>
+#include "board-flash.h"
#include "mux.h"
#include "hsmmc.h"
+#include "control.h"
#define LDP_SMSC911X_CS 1
#define LDP_SMSC911X_GPIO 152
@@ -82,7 +84,7 @@ static struct platform_device ldp_smsc911x_device = {
},
};
-static int board_keymap[] = {
+static uint32_t board_keymap[] = {
KEY(0, 0, KEY_1),
KEY(1, 0, KEY_2),
KEY(2, 0, KEY_3),
@@ -362,7 +364,7 @@ static int __init omap_i2c_init(void)
static struct omap2_hsmmc_info mmc[] __initdata = {
{
.mmc = 1,
- .wires = 4,
+ .caps = MMC_CAP_4_BIT_DATA,
.gpio_cd = -EINVAL,
.gpio_wp = -EINVAL,
},
diff --git a/arch/arm/mach-omap2/board-n8x0.c b/arch/arm/mach-omap2/board-n8x0.c
index 3f7966873507..e823c7042ab3 100644
--- a/arch/arm/mach-omap2/board-n8x0.c
+++ b/arch/arm/mach-omap2/board-n8x0.c
@@ -20,6 +20,7 @@
#include <linux/i2c.h>
#include <linux/spi/spi.h>
#include <linux/usb/musb.h>
+#include <sound/tlv320aic3x.h>
#include <asm/mach/arch.h>
#include <asm/mach-types.h>
@@ -383,15 +384,6 @@ static void n8x0_mmc_callback(void *data, u8 card_mask)
omap_mmc_notify_cover_event(mmc_device, index, *openp);
}
-void n8x0_mmc_slot1_cover_handler(void *arg, int closed_state)
-{
- if (mmc_device == NULL)
- return;
-
- slot1_cover_open = !closed_state;
- omap_mmc_notify_cover_event(mmc_device, 0, closed_state);
-}
-
static int n8x0_mmc_late_init(struct device *dev)
{
int r, bit, *openp;
@@ -511,7 +503,7 @@ static struct omap_mmc_platform_data mmc1_data = {
static struct omap_mmc_platform_data *mmc_data[OMAP24XX_NR_MMC];
-void __init n8x0_mmc_init(void)
+static void __init n8x0_mmc_init(void)
{
int err;
@@ -560,11 +552,6 @@ void __init n8x0_mmc_init(void)
void __init n8x0_mmc_init(void)
{
}
-
-void n8x0_mmc_slot1_cover_handler(void *arg, int state)
-{
-}
-
#endif /* CONFIG_MMC_OMAP */
#ifdef CONFIG_MENELAUS
@@ -614,29 +601,35 @@ static int n8x0_menelaus_late_init(struct device *dev)
return 0;
}
-static struct i2c_board_info __initdata n8x0_i2c_board_info_1[] = {
+#else
+static int n8x0_menelaus_late_init(struct device *dev)
+{
+ return 0;
+}
+#endif
+
+static struct menelaus_platform_data n8x0_menelaus_platform_data __initdata = {
+ .late_init = n8x0_menelaus_late_init,
+};
+
+static struct i2c_board_info __initdata n8x0_i2c_board_info_1[] __initdata = {
{
I2C_BOARD_INFO("menelaus", 0x72),
.irq = INT_24XX_SYS_NIRQ,
+ .platform_data = &n8x0_menelaus_platform_data,
},
};
-static struct menelaus_platform_data n8x0_menelaus_platform_data = {
- .late_init = n8x0_menelaus_late_init,
+static struct aic3x_pdata n810_aic33_data __initdata = {
+ .gpio_reset = 118,
};
-static void __init n8x0_menelaus_init(void)
-{
- n8x0_i2c_board_info_1[0].platform_data = &n8x0_menelaus_platform_data;
- omap_register_i2c_bus(1, 400, n8x0_i2c_board_info_1,
- ARRAY_SIZE(n8x0_i2c_board_info_1));
-}
-
-#else
-static inline void __init n8x0_menelaus_init(void)
-{
-}
-#endif
+static struct i2c_board_info n810_i2c_board_info_2[] __initdata = {
+ {
+ I2C_BOARD_INFO("tlv320aic3x", 0x18),
+ .platform_data = &n810_aic33_data,
+ },
+};
static void __init n8x0_map_io(void)
{
@@ -653,6 +646,11 @@ static void __init n8x0_init_irq(void)
#ifdef CONFIG_OMAP_MUX
static struct omap_board_mux board_mux[] __initdata = {
+ /* I2S codec port pins for McBSP block */
+ OMAP2420_MUX(EAC_AC_SCLK, OMAP_MUX_MODE1 | OMAP_PIN_INPUT),
+ OMAP2420_MUX(EAC_AC_FS, OMAP_MUX_MODE1 | OMAP_PIN_INPUT),
+ OMAP2420_MUX(EAC_AC_DIN, OMAP_MUX_MODE1 | OMAP_PIN_INPUT),
+ OMAP2420_MUX(EAC_AC_DOUT, OMAP_MUX_MODE1 | OMAP_PIN_OUTPUT),
{ .reg_offset = OMAP_MUX_TERMINATOR },
};
#else
@@ -665,9 +663,14 @@ static void __init n8x0_init_machine(void)
/* FIXME: add n810 spi devices */
spi_register_board_info(n800_spi_board_info,
ARRAY_SIZE(n800_spi_board_info));
+ omap_register_i2c_bus(1, 400, n8x0_i2c_board_info_1,
+ ARRAY_SIZE(n8x0_i2c_board_info_1));
+ omap_register_i2c_bus(2, 400, NULL, 0);
+ if (machine_is_nokia_n810())
+ i2c_register_board_info(2, n810_i2c_board_info_2,
+ ARRAY_SIZE(n810_i2c_board_info_2));
omap_serial_init();
- n8x0_menelaus_init();
n8x0_onenand_init();
n8x0_mmc_init();
n8x0_usb_init();
diff --git a/arch/arm/mach-omap2/board-omap3beagle.c b/arch/arm/mach-omap2/board-omap3beagle.c
index 9d9f5b881ee8..14f42240ae79 100644
--- a/arch/arm/mach-omap2/board-omap3beagle.c
+++ b/arch/arm/mach-omap2/board-omap3beagle.c
@@ -27,6 +27,7 @@
#include <linux/mtd/mtd.h>
#include <linux/mtd/partitions.h>
#include <linux/mtd/nand.h>
+#include <linux/mmc/host.h>
#include <linux/regulator/machine.h>
#include <linux/i2c/twl.h>
@@ -43,13 +44,100 @@
#include <plat/gpmc.h>
#include <plat/nand.h>
#include <plat/usb.h>
-#include <plat/timer-gp.h>
#include "mux.h"
#include "hsmmc.h"
+#include "timer-gp.h"
#define NAND_BLOCK_SIZE SZ_128K
+/*
+ * OMAP3 Beagle revision
+ * Run time detection of Beagle revision is done by reading GPIO.
+ * GPIO ID -
+ * AXBX = GPIO173, GPIO172, GPIO171: 1 1 1
+ * C1_3 = GPIO173, GPIO172, GPIO171: 1 1 0
+ * C4 = GPIO173, GPIO172, GPIO171: 1 0 1
+ * XM = GPIO173, GPIO172, GPIO171: 0 0 0
+ */
+enum {
+ OMAP3BEAGLE_BOARD_UNKN = 0,
+ OMAP3BEAGLE_BOARD_AXBX,
+ OMAP3BEAGLE_BOARD_C1_3,
+ OMAP3BEAGLE_BOARD_C4,
+ OMAP3BEAGLE_BOARD_XM,
+};
+
+static u8 omap3_beagle_version;
+
+static u8 omap3_beagle_get_rev(void)
+{
+ return omap3_beagle_version;
+}
+
+static void __init omap3_beagle_init_rev(void)
+{
+ int ret;
+ u16 beagle_rev = 0;
+
+ omap_mux_init_gpio(171, OMAP_PIN_INPUT_PULLUP);
+ omap_mux_init_gpio(172, OMAP_PIN_INPUT_PULLUP);
+ omap_mux_init_gpio(173, OMAP_PIN_INPUT_PULLUP);
+
+ ret = gpio_request(171, "rev_id_0");
+ if (ret < 0)
+ goto fail0;
+
+ ret = gpio_request(172, "rev_id_1");
+ if (ret < 0)
+ goto fail1;
+
+ ret = gpio_request(173, "rev_id_2");
+ if (ret < 0)
+ goto fail2;
+
+ gpio_direction_input(171);
+ gpio_direction_input(172);
+ gpio_direction_input(173);
+
+ beagle_rev = gpio_get_value(171) | (gpio_get_value(172) << 1)
+ | (gpio_get_value(173) << 2);
+
+ switch (beagle_rev) {
+ case 7:
+ printk(KERN_INFO "OMAP3 Beagle Rev: Ax/Bx\n");
+ omap3_beagle_version = OMAP3BEAGLE_BOARD_AXBX;
+ break;
+ case 6:
+ printk(KERN_INFO "OMAP3 Beagle Rev: C1/C2/C3\n");
+ omap3_beagle_version = OMAP3BEAGLE_BOARD_C1_3;
+ break;
+ case 5:
+ printk(KERN_INFO "OMAP3 Beagle Rev: C4\n");
+ omap3_beagle_version = OMAP3BEAGLE_BOARD_C4;
+ break;
+ case 0:
+ printk(KERN_INFO "OMAP3 Beagle Rev: xM\n");
+ omap3_beagle_version = OMAP3BEAGLE_BOARD_XM;
+ break;
+ default:
+ printk(KERN_INFO "OMAP3 Beagle Rev: unknown %hd\n", beagle_rev);
+ omap3_beagle_version = OMAP3BEAGLE_BOARD_UNKN;
+ }
+
+ return;
+
+fail2:
+ gpio_free(172);
+fail1:
+ gpio_free(171);
+fail0:
+ printk(KERN_ERR "Unable to get revision detection GPIO pins\n");
+ omap3_beagle_version = OMAP3BEAGLE_BOARD_UNKN;
+
+ return;
+}
+
static struct mtd_partition omap3beagle_nand_partitions[] = {
/* All the partition sizes are listed in terms of NAND block size */
{
@@ -166,7 +254,7 @@ static void __init beagle_display_init(void)
static struct omap2_hsmmc_info mmc[] = {
{
.mmc = 1,
- .wires = 8,
+ .caps = MMC_CAP_4_BIT_DATA | MMC_CAP_8_BIT_DATA,
.gpio_wp = 29,
},
{} /* Terminator */
@@ -185,7 +273,10 @@ static struct gpio_led gpio_leds[];
static int beagle_twl_gpio_setup(struct device *dev,
unsigned gpio, unsigned ngpio)
{
- if (system_rev >= 0x20 && system_rev <= 0x34301000) {
+ if (omap3_beagle_get_rev() == OMAP3BEAGLE_BOARD_XM) {
+ mmc[0].gpio_wp = -EINVAL;
+ } else if ((omap3_beagle_get_rev() == OMAP3BEAGLE_BOARD_C1_3) ||
+ (omap3_beagle_get_rev() == OMAP3BEAGLE_BOARD_C4)) {
omap_mux_init_gpio(23, OMAP_PIN_INPUT);
mmc[0].gpio_wp = 23;
} else {
@@ -322,13 +413,19 @@ static struct i2c_board_info __initdata beagle_i2c_boardinfo[] = {
},
};
+static struct i2c_board_info __initdata beagle_i2c_eeprom[] = {
+ {
+ I2C_BOARD_INFO("eeprom", 0x50),
+ },
+};
+
static int __init omap3_beagle_i2c_init(void)
{
omap_register_i2c_bus(1, 2600, beagle_i2c_boardinfo,
ARRAY_SIZE(beagle_i2c_boardinfo));
/* Bus 3 is attached to the DVI port where devices like the pico DLP
* projector don't work reliably with 400kHz */
- omap_register_i2c_bus(3, 100, NULL, 0);
+ omap_register_i2c_bus(3, 100, beagle_i2c_eeprom, ARRAY_SIZE(beagle_i2c_eeprom));
return 0;
}
@@ -464,6 +561,7 @@ static struct omap_musb_board_data musb_board_data = {
static void __init omap3_beagle_init(void)
{
omap3_mux_init(board_mux, OMAP_PACKAGE_CBB);
+ omap3_beagle_init_rev();
omap3_beagle_i2c_init();
platform_add_devices(omap3_beagle_devices,
ARRAY_SIZE(omap3_beagle_devices));
diff --git a/arch/arm/mach-omap2/board-omap3evm.c b/arch/arm/mach-omap2/board-omap3evm.c
index 8936e4fba334..b04365c6bb10 100644
--- a/arch/arm/mach-omap2/board-omap3evm.c
+++ b/arch/arm/mach-omap2/board-omap3evm.c
@@ -31,6 +31,7 @@
#include <linux/smsc911x.h>
#include <linux/regulator/machine.h>
+#include <linux/mmc/host.h>
#include <mach/hardware.h>
#include <asm/mach-types.h>
@@ -370,7 +371,7 @@ static struct regulator_init_data omap3evm_vsim = {
static struct omap2_hsmmc_info mmc[] = {
{
.mmc = 1,
- .wires = 4,
+ .caps = MMC_CAP_4_BIT_DATA,
.gpio_cd = -EINVAL,
.gpio_wp = 63,
},
@@ -446,7 +447,7 @@ static struct twl4030_usb_data omap3evm_usb_data = {
.usb_mode = T2_USB_MODE_ULPI,
};
-static int board_keymap[] = {
+static uint32_t board_keymap[] = {
KEY(0, 0, KEY_LEFT),
KEY(0, 1, KEY_DOWN),
KEY(0, 2, KEY_ENTER),
@@ -584,7 +585,7 @@ static int ads7846_get_pendown_state(void)
return !gpio_get_value(OMAP3_EVM_TS_GPIO);
}
-struct ads7846_platform_data ads7846_config = {
+static struct ads7846_platform_data ads7846_config = {
.x_max = 0x0fff,
.y_max = 0x0fff,
.x_plate_ohms = 180,
@@ -603,7 +604,7 @@ static struct omap2_mcspi_device_config ads7846_mcspi_config = {
.single_channel = 1, /* 0: slave, 1: master */
};
-struct spi_board_info omap3evm_spi_board_info[] = {
+static struct spi_board_info omap3evm_spi_board_info[] = {
[0] = {
.modalias = "ads7846",
.bus_num = 1,
diff --git a/arch/arm/mach-omap2/board-omap3logic.c b/arch/arm/mach-omap2/board-omap3logic.c
new file mode 100644
index 000000000000..5f7d2c1e7ef5
--- /dev/null
+++ b/arch/arm/mach-omap2/board-omap3logic.c
@@ -0,0 +1,241 @@
+/*
+ * linux/arch/arm/mach-omap2/board-omap3logic.c
+ *
+ * Copyright (C) 2010 Li-Pro.Net
+ * Stephan Linz <linz@li-pro.net>
+ *
+ * Copyright (C) 2010 Logic Product Development, Inc.
+ * Peter Barada <peter.barada@logicpd.com>
+ *
+ * Modified from Beagle, EVM, and RX51
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/platform_device.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/gpio.h>
+
+#include <linux/regulator/machine.h>
+
+#include <linux/i2c/twl.h>
+#include <linux/mmc/host.h>
+
+#include <mach/hardware.h>
+#include <asm/mach-types.h>
+#include <asm/mach/arch.h>
+#include <asm/mach/map.h>
+
+#include "mux.h"
+#include "hsmmc.h"
+#include "timer-gp.h"
+#include "control.h"
+
+#include <plat/mux.h>
+#include <plat/board.h>
+#include <plat/common.h>
+#include <plat/gpmc-smsc911x.h>
+#include <plat/gpmc.h>
+#include <plat/sdrc.h>
+
+#define OMAP3LOGIC_SMSC911X_CS 1
+
+#define OMAP3530_LV_SOM_MMC_GPIO_CD 110
+#define OMAP3530_LV_SOM_MMC_GPIO_WP 126
+#define OMAP3530_LV_SOM_SMSC911X_GPIO_IRQ 152
+
+#define OMAP3_TORPEDO_MMC_GPIO_CD 127
+#define OMAP3_TORPEDO_SMSC911X_GPIO_IRQ 129
+
+static struct regulator_consumer_supply omap3logic_vmmc1_supply = {
+ .supply = "vmmc",
+};
+
+/* VMMC1 for MMC1 pins CMD, CLK, DAT0..DAT3 (20 mA, plus card == max 220 mA) */
+static struct regulator_init_data omap3logic_vmmc1 = {
+ .constraints = {
+ .name = "VMMC1",
+ .min_uV = 1850000,
+ .max_uV = 3150000,
+ .valid_modes_mask = REGULATOR_MODE_NORMAL
+ | REGULATOR_MODE_STANDBY,
+ .valid_ops_mask = REGULATOR_CHANGE_VOLTAGE
+ | REGULATOR_CHANGE_MODE
+ | REGULATOR_CHANGE_STATUS,
+ },
+ .num_consumer_supplies = 1,
+ .consumer_supplies = &omap3logic_vmmc1_supply,
+};
+
+static struct twl4030_gpio_platform_data omap3logic_gpio_data = {
+ .gpio_base = OMAP_MAX_GPIO_LINES,
+ .irq_base = TWL4030_GPIO_IRQ_BASE,
+ .irq_end = TWL4030_GPIO_IRQ_END,
+ .use_leds = true,
+ .pullups = BIT(1),
+ .pulldowns = BIT(2) | BIT(6) | BIT(7) | BIT(8)
+ | BIT(13) | BIT(15) | BIT(16) | BIT(17),
+};
+
+static struct twl4030_platform_data omap3logic_twldata = {
+ .irq_base = TWL4030_IRQ_BASE,
+ .irq_end = TWL4030_IRQ_END,
+
+ /* platform_data for children goes here */
+ .gpio = &omap3logic_gpio_data,
+ .vmmc1 = &omap3logic_vmmc1,
+};
+
+static struct i2c_board_info __initdata omap3logic_i2c_boardinfo[] = {
+ {
+ I2C_BOARD_INFO("twl4030", 0x48),
+ .flags = I2C_CLIENT_WAKE,
+ .irq = INT_34XX_SYS_NIRQ,
+ .platform_data = &omap3logic_twldata,
+ },
+};
+
+static int __init omap3logic_i2c_init(void)
+{
+ omap_register_i2c_bus(1, 2600, omap3logic_i2c_boardinfo,
+ ARRAY_SIZE(omap3logic_i2c_boardinfo));
+ return 0;
+}
+
+static struct omap2_hsmmc_info __initdata board_mmc_info[] = {
+ {
+ .name = "external",
+ .mmc = 1,
+ .caps = MMC_CAP_4_BIT_DATA,
+ .gpio_cd = -EINVAL,
+ .gpio_wp = -EINVAL,
+ },
+ {} /* Terminator */
+};
+
+static void __init board_mmc_init(void)
+{
+ if (machine_is_omap3530_lv_som()) {
+ /* OMAP3530 LV SOM board */
+ board_mmc_info[0].gpio_cd = OMAP3530_LV_SOM_MMC_GPIO_CD;
+ board_mmc_info[0].gpio_wp = OMAP3530_LV_SOM_MMC_GPIO_WP;
+ omap_mux_init_signal("gpio_110", OMAP_PIN_OUTPUT);
+ omap_mux_init_signal("gpio_126", OMAP_PIN_OUTPUT);
+ } else if (machine_is_omap3_torpedo()) {
+ /* OMAP3 Torpedo board */
+ board_mmc_info[0].gpio_cd = OMAP3_TORPEDO_MMC_GPIO_CD;
+ omap_mux_init_signal("gpio_127", OMAP_PIN_OUTPUT);
+ } else {
+ /* unsupported board */
+ printk(KERN_ERR "%s(): unknown machine type\n", __func__);
+ return;
+ }
+
+ omap2_hsmmc_init(board_mmc_info);
+ /* link regulators to MMC adapters */
+ omap3logic_vmmc1_supply.dev = board_mmc_info[0].dev;
+}
+
+static struct omap_smsc911x_platform_data __initdata board_smsc911x_data = {
+ .cs = OMAP3LOGIC_SMSC911X_CS,
+ .gpio_irq = -EINVAL,
+ .gpio_reset = -EINVAL,
+ .flags = IORESOURCE_IRQ_LOWLEVEL,
+};
+
+/* TODO/FIXME (comment by Peter Barada, LogicPD):
+ * Fix the PBIAS voltage for Torpedo MMC1 pins that
+ * are used for other needs (IRQs, etc). */
+static void omap3torpedo_fix_pbias_voltage(void)
+{
+ u16 control_pbias_offset = OMAP343X_CONTROL_PBIAS_LITE;
+ u32 reg;
+
+ if (machine_is_omap3_torpedo())
+ {
+ /* Set the bias for the pin */
+ reg = omap_ctrl_readl(control_pbias_offset);
+
+ reg &= ~OMAP343X_PBIASLITEPWRDNZ1;
+ omap_ctrl_writel(reg, control_pbias_offset);
+
+ /* 100ms delay required for PBIAS configuration */
+ msleep(100);
+
+ reg |= OMAP343X_PBIASLITEVMODE1;
+ reg |= OMAP343X_PBIASLITEPWRDNZ1;
+ omap_ctrl_writel(reg | 0x300, control_pbias_offset);
+ }
+}
+
+static inline void __init board_smsc911x_init(void)
+{
+ if (machine_is_omap3530_lv_som()) {
+ /* OMAP3530 LV SOM board */
+ board_smsc911x_data.gpio_irq =
+ OMAP3530_LV_SOM_SMSC911X_GPIO_IRQ;
+ omap_mux_init_signal("gpio_152", OMAP_PIN_INPUT);
+ } else if (machine_is_omap3_torpedo()) {
+ /* OMAP3 Torpedo board */
+ board_smsc911x_data.gpio_irq = OMAP3_TORPEDO_SMSC911X_GPIO_IRQ;
+ omap_mux_init_signal("gpio_129", OMAP_PIN_INPUT);
+ } else {
+ /* unsupported board */
+ printk(KERN_ERR "%s(): unknown machine type\n", __func__);
+ return;
+ }
+
+ gpmc_smsc911x_init(&board_smsc911x_data);
+}
+
+static void __init omap3logic_init_irq(void)
+{
+ omap2_init_common_hw(NULL, NULL);
+ omap_init_irq();
+ omap_gpio_init();
+}
+
+#ifdef CONFIG_OMAP_MUX
+static struct omap_board_mux board_mux[] __initdata = {
+ { .reg_offset = OMAP_MUX_TERMINATOR },
+};
+#else
+#define board_mux NULL
+#endif
+
+static void __init omap3logic_init(void)
+{
+ omap3_mux_init(board_mux, OMAP_PACKAGE_CBB);
+ omap3torpedo_fix_pbias_voltage();
+ omap3logic_i2c_init();
+ omap_serial_init();
+ board_mmc_init();
+ board_smsc911x_init();
+
+ /* Ensure SDRC pins are mux'd for self-refresh */
+ omap_mux_init_signal("sdrc_cke0", OMAP_PIN_OUTPUT);
+ omap_mux_init_signal("sdrc_cke1", OMAP_PIN_OUTPUT);
+}
+
+MACHINE_START(OMAP3_TORPEDO, "Logic OMAP3 Torpedo board")
+ .boot_params = 0x80000100,
+ .map_io = omap3_map_io,
+ .init_irq = omap3logic_init_irq,
+ .init_machine = omap3logic_init,
+ .timer = &omap_timer,
+MACHINE_END
+
+MACHINE_START(OMAP3530_LV_SOM, "OMAP Logic 3530 LV SOM board")
+ .boot_params = 0x80000100,
+ .map_io = omap3_map_io,
+ .init_irq = omap3logic_init_irq,
+ .init_machine = omap3logic_init,
+ .timer = &omap_timer,
+MACHINE_END
diff --git a/arch/arm/mach-omap2/board-omap3pandora.c b/arch/arm/mach-omap2/board-omap3pandora.c
index 41d6f549070c..89ed1be2d62e 100644
--- a/arch/arm/mach-omap2/board-omap3pandora.c
+++ b/arch/arm/mach-omap2/board-omap3pandora.c
@@ -32,7 +32,9 @@
#include <linux/input.h>
#include <linux/input/matrix_keypad.h>
#include <linux/gpio_keys.h>
+#include <linux/mmc/host.h>
#include <linux/mmc/card.h>
+#include <linux/regulator/fixed.h>
#include <asm/mach-types.h>
#include <asm/mach/arch.h>
@@ -276,14 +278,14 @@ static void pandora_wl1251_init_card(struct mmc_card *card)
static struct omap2_hsmmc_info omap3pandora_mmc[] = {
{
.mmc = 1,
- .wires = 4,
+ .caps = MMC_CAP_4_BIT_DATA,
.gpio_cd = -EINVAL,
.gpio_wp = 126,
.ext_clock = 0,
},
{
.mmc = 2,
- .wires = 4,
+ .caps = MMC_CAP_4_BIT_DATA,
.gpio_cd = -EINVAL,
.gpio_wp = 127,
.ext_clock = 1,
@@ -291,7 +293,7 @@ static struct omap2_hsmmc_info omap3pandora_mmc[] = {
},
{
.mmc = 3,
- .wires = 4,
+ .caps = MMC_CAP_4_BIT_DATA,
.gpio_cd = -EINVAL,
.gpio_wp = -EINVAL,
.init_card = pandora_wl1251_init_card,
@@ -344,6 +346,9 @@ static struct regulator_consumer_supply pandora_vmmc1_supply =
static struct regulator_consumer_supply pandora_vmmc2_supply =
REGULATOR_SUPPLY("vmmc", "mmci-omap-hs.1");
+static struct regulator_consumer_supply pandora_vmmc3_supply =
+ REGULATOR_SUPPLY("vmmc", "mmci-omap-hs.2");
+
static struct regulator_consumer_supply pandora_vdda_dac_supply =
REGULATOR_SUPPLY("vdda_dac", "omapdss");
@@ -488,6 +493,33 @@ static struct regulator_init_data pandora_vsim = {
.consumer_supplies = &pandora_adac_supply,
};
+/* Fixed regulator internal to Wifi module */
+static struct regulator_init_data pandora_vmmc3 = {
+ .constraints = {
+ .valid_ops_mask = REGULATOR_CHANGE_STATUS,
+ },
+ .num_consumer_supplies = 1,
+ .consumer_supplies = &pandora_vmmc3_supply,
+};
+
+static struct fixed_voltage_config pandora_vwlan = {
+ .supply_name = "vwlan",
+ .microvolts = 1800000, /* 1.8V */
+ .gpio = PANDORA_WIFI_NRESET_GPIO,
+ .startup_delay = 50000, /* 50ms */
+ .enable_high = 1,
+ .enabled_at_boot = 0,
+ .init_data = &pandora_vmmc3,
+};
+
+static struct platform_device pandora_vwlan_device = {
+ .name = "reg-fixed-voltage",
+ .id = 1,
+ .dev = {
+ .platform_data = &pandora_vwlan,
+ },
+};
+
static struct twl4030_usb_data omap3pandora_usb_data = {
.usb_mode = T2_USB_MODE_ULPI,
};
@@ -501,6 +533,8 @@ static struct twl4030_codec_data omap3pandora_codec_data = {
.audio = &omap3pandora_audio_data,
};
+static struct twl4030_bci_platform_data pandora_bci_data;
+
static struct twl4030_platform_data omap3pandora_twldata = {
.irq_base = TWL4030_IRQ_BASE,
.irq_end = TWL4030_IRQ_END,
@@ -516,6 +550,7 @@ static struct twl4030_platform_data omap3pandora_twldata = {
.vaux4 = &pandora_vaux4,
.vsim = &pandora_vsim,
.keypad = &pandora_kp_data,
+ .bci = &pandora_bci_data,
};
static struct i2c_board_info __initdata omap3pandora_i2c_boardinfo[] = {
@@ -644,19 +679,8 @@ static void pandora_wl1251_init(void)
if (pandora_wl1251_pdata.irq < 0)
goto fail_irq;
- ret = gpio_request(PANDORA_WIFI_NRESET_GPIO, "wl1251 nreset");
- if (ret < 0)
- goto fail_irq;
-
- /* start powered so that it probes with MMC subsystem */
- ret = gpio_direction_output(PANDORA_WIFI_NRESET_GPIO, 1);
- if (ret < 0)
- goto fail_nreset;
-
return;
-fail_nreset:
- gpio_free(PANDORA_WIFI_NRESET_GPIO);
fail_irq:
gpio_free(PANDORA_WIFI_IRQ_GPIO);
fail:
@@ -668,6 +692,7 @@ static struct platform_device *omap3pandora_devices[] __initdata = {
&pandora_keys_gpio,
&pandora_dss_device,
&pandora_wl1251_data,
+ &pandora_vwlan_device,
};
static const struct ehci_hcd_omap_platform_data ehci_pdata __initconst = {
diff --git a/arch/arm/mach-omap2/board-omap3stalker.c b/arch/arm/mach-omap2/board-omap3stalker.c
index bc5ac83bd4cf..f25272125413 100644
--- a/arch/arm/mach-omap2/board-omap3stalker.c
+++ b/arch/arm/mach-omap2/board-omap3stalker.c
@@ -26,6 +26,7 @@
#include <linux/regulator/machine.h>
#include <linux/i2c/twl.h>
+#include <linux/mmc/host.h>
#include <mach/hardware.h>
#include <asm/mach-types.h>
@@ -38,7 +39,6 @@
#include <plat/gpmc.h>
#include <plat/nand.h>
#include <plat/usb.h>
-#include <plat/timer-gp.h>
#include <plat/display.h>
#include <plat/mcspi.h>
@@ -52,6 +52,7 @@
#include "sdram-micron-mt46h32m32lf-6.h"
#include "mux.h"
#include "hsmmc.h"
+#include "timer-gp.h"
#if defined(CONFIG_SMSC911X) || defined(CONFIG_SMSC911X_MODULE)
#define OMAP3STALKER_ETHR_START 0x2c000000
@@ -275,7 +276,7 @@ static struct regulator_init_data omap3stalker_vsim = {
static struct omap2_hsmmc_info mmc[] = {
{
.mmc = 1,
- .wires = 4,
+ .caps = MMC_CAP_4_BIT_DATA,
.gpio_cd = -EINVAL,
.gpio_wp = 23,
},
@@ -389,7 +390,7 @@ static struct twl4030_usb_data omap3stalker_usb_data = {
.usb_mode = T2_USB_MODE_ULPI,
};
-static int board_keymap[] = {
+static uint32_t board_keymap[] = {
KEY(0, 0, KEY_LEFT),
KEY(0, 1, KEY_DOWN),
KEY(0, 2, KEY_ENTER),
@@ -564,7 +565,7 @@ static struct omap2_mcspi_device_config ads7846_mcspi_config = {
.single_channel = 1, /* 0: slave, 1: master */
};
-struct spi_board_info omap3stalker_spi_board_info[] = {
+static struct spi_board_info omap3stalker_spi_board_info[] = {
[0] = {
.modalias = "ads7846",
.bus_num = 1,
diff --git a/arch/arm/mach-omap2/board-omap3touchbook.c b/arch/arm/mach-omap2/board-omap3touchbook.c
index 0e99ce584dbf..41104bb8774c 100644
--- a/arch/arm/mach-omap2/board-omap3touchbook.c
+++ b/arch/arm/mach-omap2/board-omap3touchbook.c
@@ -27,6 +27,7 @@
#include <linux/mtd/mtd.h>
#include <linux/mtd/partitions.h>
#include <linux/mtd/nand.h>
+#include <linux/mmc/host.h>
#include <plat/mcspi.h>
#include <linux/spi/spi.h>
@@ -47,10 +48,10 @@
#include <plat/gpmc.h>
#include <plat/nand.h>
#include <plat/usb.h>
-#include <plat/timer-gp.h>
#include "mux.h"
#include "hsmmc.h"
+#include "timer-gp.h"
#include <asm/setup.h>
@@ -61,7 +62,7 @@
#define TB_BL_PWM_TIMER 9
#define TB_KILL_POWER_GPIO 168
-unsigned long touchbook_revision;
+static unsigned long touchbook_revision;
static struct mtd_partition omap3touchbook_nand_partitions[] = {
/* All the partition sizes are listed in terms of NAND block size */
@@ -108,7 +109,7 @@ static struct omap_nand_platform_data omap3touchbook_nand_data = {
static struct omap2_hsmmc_info mmc[] = {
{
.mmc = 1,
- .wires = 8,
+ .caps = MMC_CAP_4_BIT_DATA | MMC_CAP_8_BIT_DATA,
.gpio_wp = 29,
},
{} /* Terminator */
diff --git a/arch/arm/mach-omap2/board-omap4panda.c b/arch/arm/mach-omap2/board-omap4panda.c
index db69bcadf4c7..1ecd0a6cefb7 100644
--- a/arch/arm/mach-omap2/board-omap4panda.c
+++ b/arch/arm/mach-omap2/board-omap4panda.c
@@ -20,6 +20,7 @@
#include <linux/init.h>
#include <linux/platform_device.h>
#include <linux/io.h>
+#include <linux/leds.h>
#include <linux/gpio.h>
#include <linux/usb/otg.h>
#include <linux/i2c/twl.h>
@@ -33,12 +34,45 @@
#include <plat/board.h>
#include <plat/common.h>
-#include <plat/control.h>
-#include <plat/timer-gp.h>
#include <plat/usb.h>
#include <plat/mmc.h>
+#include "timer-gp.h"
+
#include "hsmmc.h"
+#include "control.h"
+
+#define GPIO_HUB_POWER 1
+#define GPIO_HUB_NRESET 62
+
+static struct gpio_led gpio_leds[] = {
+ {
+ .name = "pandaboard::status1",
+ .default_trigger = "heartbeat",
+ .gpio = 7,
+ },
+ {
+ .name = "pandaboard::status2",
+ .default_trigger = "mmc0",
+ .gpio = 8,
+ },
+};
+static struct gpio_led_platform_data gpio_led_info = {
+ .leds = gpio_leds,
+ .num_leds = ARRAY_SIZE(gpio_leds),
+};
+
+static struct platform_device leds_gpio = {
+ .name = "leds-gpio",
+ .id = -1,
+ .dev = {
+ .platform_data = &gpio_led_info,
+ },
+};
+
+static struct platform_device *panda_devices[] __initdata = {
+ &leds_gpio,
+};
static void __init omap4_panda_init_irq(void)
{
@@ -47,6 +81,56 @@ static void __init omap4_panda_init_irq(void)
omap_gpio_init();
}
+static const struct ehci_hcd_omap_platform_data ehci_pdata __initconst = {
+ .port_mode[0] = EHCI_HCD_OMAP_MODE_PHY,
+ .port_mode[1] = EHCI_HCD_OMAP_MODE_UNKNOWN,
+ .port_mode[2] = EHCI_HCD_OMAP_MODE_UNKNOWN,
+ .phy_reset = false,
+ .reset_gpio_port[0] = -EINVAL,
+ .reset_gpio_port[1] = -EINVAL,
+ .reset_gpio_port[2] = -EINVAL
+};
+
+static void __init omap4_ehci_init(void)
+{
+ int ret;
+
+
+ /* disable the power to the usb hub prior to init */
+ ret = gpio_request(GPIO_HUB_POWER, "hub_power");
+ if (ret) {
+ pr_err("Cannot request GPIO %d\n", GPIO_HUB_POWER);
+ goto error1;
+ }
+ gpio_export(GPIO_HUB_POWER, 0);
+ gpio_direction_output(GPIO_HUB_POWER, 0);
+ gpio_set_value(GPIO_HUB_POWER, 0);
+
+ /* reset phy+hub */
+ ret = gpio_request(GPIO_HUB_NRESET, "hub_nreset");
+ if (ret) {
+ pr_err("Cannot request GPIO %d\n", GPIO_HUB_NRESET);
+ goto error2;
+ }
+ gpio_export(GPIO_HUB_NRESET, 0);
+ gpio_direction_output(GPIO_HUB_NRESET, 0);
+ gpio_set_value(GPIO_HUB_NRESET, 0);
+ gpio_set_value(GPIO_HUB_NRESET, 1);
+
+ usb_ehci_init(&ehci_pdata);
+
+ /* enable power to hub */
+ gpio_set_value(GPIO_HUB_POWER, 1);
+ return;
+
+error2:
+ gpio_free(GPIO_HUB_POWER);
+error1:
+ pr_err("Unable to initialize EHCI power/reset\n");
+ return;
+
+}
+
static struct omap_musb_board_data musb_board_data = {
.interface_type = MUSB_INTERFACE_UTMI,
.mode = MUSB_PERIPHERAL,
@@ -56,7 +140,7 @@ static struct omap_musb_board_data musb_board_data = {
static struct omap2_hsmmc_info mmc[] = {
{
.mmc = 1,
- .wires = 8,
+ .caps = MMC_CAP_4_BIT_DATA | MMC_CAP_8_BIT_DATA,
.gpio_wp = -EINVAL,
},
{} /* Terminator */
@@ -67,10 +151,6 @@ static struct regulator_consumer_supply omap4_panda_vmmc_supply[] = {
.supply = "vmmc",
.dev_name = "mmci-omap-hs.0",
},
- {
- .supply = "vmmc",
- .dev_name = "mmci-omap-hs.1",
- },
};
static int omap4_twl6030_hsmmc_late_init(struct device *dev)
@@ -80,16 +160,32 @@ static int omap4_twl6030_hsmmc_late_init(struct device *dev)
struct platform_device, dev);
struct omap_mmc_platform_data *pdata = dev->platform_data;
+ if (!pdata) {
+ dev_err(dev, "%s: NULL platform data\n", __func__);
+ return -EINVAL;
+ }
/* Setting MMC1 Card detect Irq */
- if (pdev->id == 0)
- pdata->slots[0].card_detect_irq = TWL6030_IRQ_BASE +
- MMCDETECT_INTR_OFFSET;
+ if (pdev->id == 0) {
+ ret = twl6030_mmc_card_detect_config();
+ if (ret)
+ dev_err(dev, "%s: Error card detect config(%d)\n",
+ __func__, ret);
+ else
+ pdata->slots[0].card_detect = twl6030_mmc_card_detect;
+ }
return ret;
}
static __init void omap4_twl6030_hsmmc_set_late_init(struct device *dev)
{
- struct omap_mmc_platform_data *pdata = dev->platform_data;
+ struct omap_mmc_platform_data *pdata;
+
+ /* dev can be null if CONFIG_MMC_OMAP_HS is not set */
+ if (!dev) {
+ pr_err("Failed omap4_twl6030_hsmmc_set_late_init\n");
+ return;
+ }
+ pdata = dev->platform_data;
pdata->init = omap4_twl6030_hsmmc_late_init;
}
@@ -156,7 +252,7 @@ static struct regulator_init_data omap4_panda_vmmc = {
| REGULATOR_CHANGE_MODE
| REGULATOR_CHANGE_STATUS,
},
- .num_consumer_supplies = 2,
+ .num_consumer_supplies = 1,
.consumer_supplies = omap4_panda_vmmc_supply,
};
@@ -274,13 +370,13 @@ static int __init omap4_panda_i2c_init(void)
}
static void __init omap4_panda_init(void)
{
- int status;
-
omap4_panda_i2c_init();
+ platform_add_devices(panda_devices, ARRAY_SIZE(panda_devices));
omap_serial_init();
omap4_twl6030_hsmmc_init(mmc);
/* OMAP4 Panda uses internal transceiver so register nop transceiver */
usb_nop_xceiv_register();
+ omap4_ehci_init();
/* FIXME: allow multi-omap to boot until musb is updated for omap4 */
if (!cpu_is_omap44xx())
usb_musb_init(&musb_board_data);
diff --git a/arch/arm/mach-omap2/board-overo.c b/arch/arm/mach-omap2/board-overo.c
index 5e528ca015a1..7053bc0b46db 100644
--- a/arch/arm/mach-omap2/board-overo.c
+++ b/arch/arm/mach-omap2/board-overo.c
@@ -32,6 +32,7 @@
#include <linux/mtd/mtd.h>
#include <linux/mtd/nand.h>
#include <linux/mtd/partitions.h>
+#include <linux/mmc/host.h>
#include <asm/mach-types.h>
#include <asm/mach/arch.h>
@@ -303,13 +304,13 @@ static void __init overo_flash_init(void)
static struct omap2_hsmmc_info mmc[] = {
{
.mmc = 1,
- .wires = 4,
+ .caps = MMC_CAP_4_BIT_DATA,
.gpio_cd = -EINVAL,
.gpio_wp = -EINVAL,
},
{
.mmc = 2,
- .wires = 4,
+ .caps = MMC_CAP_4_BIT_DATA,
.gpio_cd = -EINVAL,
.gpio_wp = -EINVAL,
.transceiver = true,
diff --git a/arch/arm/mach-omap2/board-rx51-peripherals.c b/arch/arm/mach-omap2/board-rx51-peripherals.c
index ce28a851dcd3..3fec4d62a91a 100644
--- a/arch/arm/mach-omap2/board-rx51-peripherals.c
+++ b/arch/arm/mach-omap2/board-rx51-peripherals.c
@@ -23,6 +23,7 @@
#include <linux/gpio.h>
#include <linux/gpio_keys.h>
#include <linux/mmc/host.h>
+#include <sound/tlv320aic3x.h>
#include <plat/mcspi.h>
#include <plat/board.h>
@@ -32,6 +33,8 @@
#include <plat/onenand.h>
#include <plat/gpmc-smc91x.h>
+#include <mach/board-rx51.h>
+
#include <sound/tlv320aic3x.h>
#include <sound/tpa6130a2-plat.h>
@@ -104,6 +107,10 @@ static struct spi_board_info rx51_peripherals_spi_board_info[] __initdata = {
},
};
+static struct platform_device rx51_charger_device = {
+ .name = "isp1704_charger",
+};
+
#if defined(CONFIG_KEYBOARD_GPIO) || defined(CONFIG_KEYBOARD_GPIO_MODULE)
#define RX51_GPIO_CAMERA_LENS_COVER 110
@@ -184,7 +191,7 @@ static void __init rx51_add_gpio_keys(void)
}
#endif /* CONFIG_KEYBOARD_GPIO || CONFIG_KEYBOARD_GPIO_MODULE */
-static int board_keymap[] = {
+static uint32_t board_keymap[] = {
/*
* Note that KEY(x, 8, KEY_XXX) entries represent "entrire row
* connected to the ground" matrix state.
@@ -302,7 +309,7 @@ static struct omap2_hsmmc_info mmc[] __initdata = {
{
.name = "external",
.mmc = 1,
- .wires = 4,
+ .caps = MMC_CAP_4_BIT_DATA,
.cover_only = true,
.gpio_cd = 160,
.gpio_wp = -EINVAL,
@@ -311,7 +318,8 @@ static struct omap2_hsmmc_info mmc[] __initdata = {
{
.name = "internal",
.mmc = 2,
- .wires = 8, /* See also rx51_mmc2_remux */
+ .caps = MMC_CAP_4_BIT_DATA | MMC_CAP_8_BIT_DATA,
+ /* See also rx51_mmc2_remux */
.gpio_cd = -EINVAL,
.gpio_wp = -EINVAL,
.nonremovable = true,
@@ -689,7 +697,6 @@ static struct twl4030_power_data rx51_t2scripts_data __initdata = {
};
-
static struct twl4030_platform_data rx51_twldata __initdata = {
.irq_base = TWL4030_IRQ_BASE,
.irq_end = TWL4030_IRQ_END,
@@ -710,10 +717,6 @@ static struct twl4030_platform_data rx51_twldata __initdata = {
.vio = &rx51_vio,
};
-static struct aic3x_pdata rx51_aic3x_data __initdata = {
- .gpio_reset = 60,
-};
-
static struct tpa6130a2_platform_data rx51_tpa6130a2_data __initdata = {
.id = TPA6130A2,
.power_gpio = 98,
@@ -728,6 +731,17 @@ static struct i2c_board_info __initdata rx51_peripherals_i2c_board_info_1[] = {
},
};
+/* Audio setup data */
+static struct aic3x_setup_data rx51_aic34_setup = {
+ .gpio_func[0] = AIC3X_GPIO1_FUNC_DISABLED,
+ .gpio_func[1] = AIC3X_GPIO2_FUNC_DIGITAL_MIC_INPUT,
+};
+
+static struct aic3x_pdata rx51_aic3x_data = {
+ .setup = &rx51_aic34_setup,
+ .gpio_reset = 60,
+};
+
static struct i2c_board_info __initdata rx51_peripherals_i2c_board_info_2[] = {
{
I2C_BOARD_INFO("tlv320aic3x", 0x18),
@@ -909,5 +923,6 @@ void __init rx51_peripherals_init(void)
spi_register_board_info(rx51_peripherals_spi_board_info,
ARRAY_SIZE(rx51_peripherals_spi_board_info));
omap2_hsmmc_init(mmc);
+ platform_device_register(&rx51_charger_device);
}
diff --git a/arch/arm/mach-omap2/board-rx51-sdram.c b/arch/arm/mach-omap2/board-rx51-sdram.c
index f392844195d2..a43b2c5c838b 100644
--- a/arch/arm/mach-omap2/board-rx51-sdram.c
+++ b/arch/arm/mach-omap2/board-rx51-sdram.c
@@ -43,7 +43,7 @@ struct sdram_timings {
u32 tWTR;
};
-struct omap_sdrc_params rx51_sdrc_params[4];
+static struct omap_sdrc_params rx51_sdrc_params[4];
static const struct sdram_timings rx51_timings[] = {
{
diff --git a/arch/arm/mach-omap2/board-rx51-video.c b/arch/arm/mach-omap2/board-rx51-video.c
index 5a1005ba9815..85503fed4e13 100644
--- a/arch/arm/mach-omap2/board-rx51-video.c
+++ b/arch/arm/mach-omap2/board-rx51-video.c
@@ -20,6 +20,8 @@
#include <plat/vram.h>
#include <plat/mcspi.h>
+#include <mach/board-rx51.h>
+
#include "mux.h"
#define RX51_LCD_RESET_GPIO 90
diff --git a/arch/arm/mach-omap2/board-zoom-debugboard.c b/arch/arm/mach-omap2/board-zoom-debugboard.c
index 1d7f827b0408..007ebdc6c993 100644
--- a/arch/arm/mach-omap2/board-zoom-debugboard.c
+++ b/arch/arm/mach-omap2/board-zoom-debugboard.c
@@ -16,6 +16,8 @@
#include <plat/gpmc.h>
+#include <mach/board-zoom.h>
+
#define ZOOM_SMSC911X_CS 7
#define ZOOM_SMSC911X_GPIO 158
#define ZOOM_QUADUART_CS 3
diff --git a/arch/arm/mach-omap2/board-zoom-peripherals.c b/arch/arm/mach-omap2/board-zoom-peripherals.c
index 189a6d1600b2..86c9b2102952 100644
--- a/arch/arm/mach-omap2/board-zoom-peripherals.c
+++ b/arch/arm/mach-omap2/board-zoom-peripherals.c
@@ -18,6 +18,7 @@
#include <linux/regulator/machine.h>
#include <linux/regulator/fixed.h>
#include <linux/wl12xx.h>
+#include <linux/mmc/host.h>
#include <asm/mach-types.h>
#include <asm/mach/arch.h>
@@ -26,6 +27,8 @@
#include <plat/common.h>
#include <plat/usb.h>
+#include <mach/board-zoom.h>
+
#include "mux.h"
#include "hsmmc.h"
@@ -33,7 +36,7 @@
#define OMAP_ZOOM_WLAN_IRQ_GPIO (162)
/* Zoom2 has Qwerty keyboard*/
-static int board_keymap[] = {
+static uint32_t board_keymap[] = {
KEY(0, 0, KEY_E),
KEY(0, 1, KEY_R),
KEY(0, 2, KEY_T),
@@ -197,14 +200,14 @@ static struct omap2_hsmmc_info mmc[] __initdata = {
{
.name = "external",
.mmc = 1,
- .wires = 4,
+ .caps = MMC_CAP_4_BIT_DATA,
.gpio_wp = -EINVAL,
.power_saving = true,
},
{
.name = "internal",
.mmc = 2,
- .wires = 8,
+ .caps = MMC_CAP_4_BIT_DATA | MMC_CAP_8_BIT_DATA,
.gpio_cd = -EINVAL,
.gpio_wp = -EINVAL,
.nonremovable = true,
@@ -238,6 +241,11 @@ static int zoom_twl_gpio_setup(struct device *dev,
return 0;
}
+/* EXTMUTE callback function */
+void zoom2_set_hs_extmute(int mute)
+{
+ gpio_set_value(ZOOM2_HEADSET_EXTMUTE_GPIO, mute);
+}
static int zoom_batt_table[] = {
/* 0 C*/
@@ -307,6 +315,11 @@ static struct i2c_board_info __initdata zoom_i2c_boardinfo[] = {
static int __init omap_i2c_init(void)
{
+ if (machine_is_omap_zoom2()) {
+ zoom_audio_data.ramp_delay_value = 3; /* 161 ms */
+ zoom_audio_data.hs_extmute = 1;
+ zoom_audio_data.set_hs_extmute = zoom2_set_hs_extmute;
+ }
omap_register_i2c_bus(1, 2400, zoom_i2c_boardinfo,
ARRAY_SIZE(zoom_i2c_boardinfo));
omap_register_i2c_bus(2, 400, NULL, 0);
@@ -336,4 +349,5 @@ void __init zoom_peripherals_init(void)
platform_device_register(&omap_vwlan_device);
usb_musb_init(&musb_board_data);
enable_board_wakeup_source();
+ omap_serial_init();
}
diff --git a/arch/arm/mach-omap2/board-zoom2.c b/arch/arm/mach-omap2/board-zoom2.c
index 24bbd0def64f..2992a9f3a585 100644
--- a/arch/arm/mach-omap2/board-zoom2.c
+++ b/arch/arm/mach-omap2/board-zoom2.c
@@ -14,6 +14,7 @@
#include <linux/platform_device.h>
#include <linux/input.h>
#include <linux/gpio.h>
+#include <linux/i2c/twl.h>
#include <asm/mach-types.h>
#include <asm/mach/arch.h>
@@ -23,6 +24,7 @@
#include <mach/board-zoom.h>
+#include "board-flash.h"
#include "mux.h"
#include "sdram-micron-mt46h32m32lf-6.h"
@@ -34,41 +36,6 @@ static void __init omap_zoom2_init_irq(void)
omap_gpio_init();
}
-/* REVISIT: These audio entries can be removed once MFD code is merged */
-#if 0
-
-static struct twl4030_madc_platform_data zoom2_madc_data = {
- .irq_line = 1,
-};
-
-static struct twl4030_codec_audio_data zoom2_audio_data = {
- .audio_mclk = 26000000,
-};
-
-static struct twl4030_codec_data zoom2_codec_data = {
- .audio_mclk = 26000000,
- .audio = &zoom2_audio_data,
-};
-
-static struct twl4030_platform_data zoom2_twldata = {
- .irq_base = TWL4030_IRQ_BASE,
- .irq_end = TWL4030_IRQ_END,
-
- /* platform_data for children goes here */
- .bci = &zoom2_bci_data,
- .madc = &zoom2_madc_data,
- .usb = &zoom2_usb_data,
- .gpio = &zoom2_gpio_data,
- .keypad = &zoom2_kp_twl4030_data,
- .codec = &zoom2_codec_data,
- .vmmc1 = &zoom2_vmmc1,
- .vmmc2 = &zoom2_vmmc2,
- .vsim = &zoom2_vsim,
-
-};
-
-#endif
-
#ifdef CONFIG_OMAP_MUX
static struct omap_board_mux board_mux[] __initdata = {
/* WLAN IRQ - GPIO 162 */
diff --git a/arch/arm/mach-omap2/board-zoom3.c b/arch/arm/mach-omap2/board-zoom3.c
index b2bb3ff971ac..5adde12c0395 100644
--- a/arch/arm/mach-omap2/board-zoom3.c
+++ b/arch/arm/mach-omap2/board-zoom3.c
@@ -22,6 +22,7 @@
#include <plat/board.h>
#include <plat/usb.h>
+#include "board-flash.h"
#include "mux.h"
#include "sdram-hynix-h8mbx00u0mer-0em.h"
diff --git a/arch/arm/mach-omap2/clock.c b/arch/arm/mach-omap2/clock.c
index 605f531783a8..b5babf5440e4 100644
--- a/arch/arm/mach-omap2/clock.c
+++ b/arch/arm/mach-omap2/clock.c
@@ -395,7 +395,7 @@ void omap2_clk_disable_unused(struct clk *clk)
if ((regval32 & (1 << clk->enable_bit)) == v)
return;
- printk(KERN_DEBUG "Disabling unused clock \"%s\"\n", clk->name);
+ pr_debug("Disabling unused clock \"%s\"\n", clk->name);
if (cpu_is_omap34xx()) {
omap2_clk_enable(clk);
omap2_clk_disable(clk);
diff --git a/arch/arm/mach-omap2/clock2420_data.c b/arch/arm/mach-omap2/clock2420_data.c
index 5f2066a6ba74..21f856252ad8 100644
--- a/arch/arm/mach-omap2/clock2420_data.c
+++ b/arch/arm/mach-omap2/clock2420_data.c
@@ -27,6 +27,7 @@
#include "prm-regbits-24xx.h"
#include "cm-regbits-24xx.h"
#include "sdrc.h"
+#include "control.h"
#define OMAP_CM_REGADDR OMAP2420_CM_REGADDR
@@ -89,6 +90,12 @@ static struct clk alt_ck = { /* Typical 54M or 48M, may not exist */
.clkdm_name = "wkup_clkdm",
};
+/* Optional external clock input for McBSP CLKS */
+static struct clk mcbsp_clks = {
+ .name = "mcbsp_clks",
+ .ops = &clkops_null,
+};
+
/*
* Analog domain root source clocks
*/
@@ -1135,14 +1142,34 @@ static struct clk mcbsp1_ick = {
.recalc = &followparent_recalc,
};
+static const struct clksel_rate common_mcbsp_96m_rates[] = {
+ { .div = 1, .val = 0, .flags = RATE_IN_24XX },
+ { .div = 0 }
+};
+
+static const struct clksel_rate common_mcbsp_mcbsp_rates[] = {
+ { .div = 1, .val = 1, .flags = RATE_IN_24XX },
+ { .div = 0 }
+};
+
+static const struct clksel mcbsp_fck_clksel[] = {
+ { .parent = &func_96m_ck, .rates = common_mcbsp_96m_rates },
+ { .parent = &mcbsp_clks, .rates = common_mcbsp_mcbsp_rates },
+ { .parent = NULL }
+};
+
static struct clk mcbsp1_fck = {
.name = "mcbsp1_fck",
.ops = &clkops_omap2_dflt_wait,
.parent = &func_96m_ck,
+ .init = &omap2_init_clksel_parent,
.clkdm_name = "core_l4_clkdm",
.enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1),
.enable_bit = OMAP24XX_EN_MCBSP1_SHIFT,
- .recalc = &followparent_recalc,
+ .clksel_reg = OMAP242X_CTRL_REGADDR(OMAP2_CONTROL_DEVCONF0),
+ .clksel_mask = OMAP2_MCBSP1_CLKS_MASK,
+ .clksel = mcbsp_fck_clksel,
+ .recalc = &omap2_clksel_recalc,
};
static struct clk mcbsp2_ick = {
@@ -1159,10 +1186,14 @@ static struct clk mcbsp2_fck = {
.name = "mcbsp2_fck",
.ops = &clkops_omap2_dflt_wait,
.parent = &func_96m_ck,
+ .init = &omap2_init_clksel_parent,
.clkdm_name = "core_l4_clkdm",
.enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1),
.enable_bit = OMAP24XX_EN_MCBSP2_SHIFT,
- .recalc = &followparent_recalc,
+ .clksel_reg = OMAP242X_CTRL_REGADDR(OMAP2_CONTROL_DEVCONF0),
+ .clksel_mask = OMAP2_MCBSP2_CLKS_MASK,
+ .clksel = mcbsp_fck_clksel,
+ .recalc = &omap2_clksel_recalc,
};
static struct clk mcspi1_ick = {
@@ -1721,6 +1752,9 @@ static struct omap_clk omap2420_clks[] = {
CLK(NULL, "osc_ck", &osc_ck, CK_242X),
CLK(NULL, "sys_ck", &sys_ck, CK_242X),
CLK(NULL, "alt_ck", &alt_ck, CK_242X),
+ CLK("omap-mcbsp.1", "pad_fck", &mcbsp_clks, CK_242X),
+ CLK("omap-mcbsp.2", "pad_fck", &mcbsp_clks, CK_242X),
+ CLK(NULL, "mcbsp_clks", &mcbsp_clks, CK_242X),
/* internal analog sources */
CLK(NULL, "dpll_ck", &dpll_ck, CK_242X),
CLK(NULL, "apll96_ck", &apll96_ck, CK_242X),
@@ -1728,6 +1762,8 @@ static struct omap_clk omap2420_clks[] = {
/* internal prcm root sources */
CLK(NULL, "func_54m_ck", &func_54m_ck, CK_242X),
CLK(NULL, "core_ck", &core_ck, CK_242X),
+ CLK("omap-mcbsp.1", "prcm_fck", &func_96m_ck, CK_242X),
+ CLK("omap-mcbsp.2", "prcm_fck", &func_96m_ck, CK_242X),
CLK(NULL, "func_96m_ck", &func_96m_ck, CK_242X),
CLK(NULL, "func_48m_ck", &func_48m_ck, CK_242X),
CLK(NULL, "func_12m_ck", &func_12m_ck, CK_242X),
diff --git a/arch/arm/mach-omap2/clock2430_data.c b/arch/arm/mach-omap2/clock2430_data.c
index 701a1716019e..e32afcbdfb88 100644
--- a/arch/arm/mach-omap2/clock2430_data.c
+++ b/arch/arm/mach-omap2/clock2430_data.c
@@ -27,6 +27,7 @@
#include "prm-regbits-24xx.h"
#include "cm-regbits-24xx.h"
#include "sdrc.h"
+#include "control.h"
#define OMAP_CM_REGADDR OMAP2430_CM_REGADDR
@@ -89,6 +90,12 @@ static struct clk alt_ck = { /* Typical 54M or 48M, may not exist */
.clkdm_name = "wkup_clkdm",
};
+/* Optional external clock input for McBSP CLKS */
+static struct clk mcbsp_clks = {
+ .name = "mcbsp_clks",
+ .ops = &clkops_null,
+};
+
/*
* Analog domain root source clocks
*/
@@ -1123,14 +1130,34 @@ static struct clk mcbsp1_ick = {
.recalc = &followparent_recalc,
};
+static const struct clksel_rate common_mcbsp_96m_rates[] = {
+ { .div = 1, .val = 0, .flags = RATE_IN_24XX },
+ { .div = 0 }
+};
+
+static const struct clksel_rate common_mcbsp_mcbsp_rates[] = {
+ { .div = 1, .val = 1, .flags = RATE_IN_24XX },
+ { .div = 0 }
+};
+
+static const struct clksel mcbsp_fck_clksel[] = {
+ { .parent = &func_96m_ck, .rates = common_mcbsp_96m_rates },
+ { .parent = &mcbsp_clks, .rates = common_mcbsp_mcbsp_rates },
+ { .parent = NULL }
+};
+
static struct clk mcbsp1_fck = {
.name = "mcbsp1_fck",
.ops = &clkops_omap2_dflt_wait,
.parent = &func_96m_ck,
+ .init = &omap2_init_clksel_parent,
.clkdm_name = "core_l4_clkdm",
.enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1),
.enable_bit = OMAP24XX_EN_MCBSP1_SHIFT,
- .recalc = &followparent_recalc,
+ .clksel_reg = OMAP243X_CTRL_REGADDR(OMAP2_CONTROL_DEVCONF0),
+ .clksel_mask = OMAP2_MCBSP1_CLKS_MASK,
+ .clksel = mcbsp_fck_clksel,
+ .recalc = &omap2_clksel_recalc,
};
static struct clk mcbsp2_ick = {
@@ -1147,10 +1174,14 @@ static struct clk mcbsp2_fck = {
.name = "mcbsp2_fck",
.ops = &clkops_omap2_dflt_wait,
.parent = &func_96m_ck,
+ .init = &omap2_init_clksel_parent,
.clkdm_name = "core_l4_clkdm",
.enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1),
.enable_bit = OMAP24XX_EN_MCBSP2_SHIFT,
- .recalc = &followparent_recalc,
+ .clksel_reg = OMAP243X_CTRL_REGADDR(OMAP2_CONTROL_DEVCONF0),
+ .clksel_mask = OMAP2_MCBSP2_CLKS_MASK,
+ .clksel = mcbsp_fck_clksel,
+ .recalc = &omap2_clksel_recalc,
};
static struct clk mcbsp3_ick = {
@@ -1167,10 +1198,14 @@ static struct clk mcbsp3_fck = {
.name = "mcbsp3_fck",
.ops = &clkops_omap2_dflt_wait,
.parent = &func_96m_ck,
+ .init = &omap2_init_clksel_parent,
.clkdm_name = "core_l4_clkdm",
.enable_reg = OMAP_CM_REGADDR(CORE_MOD, OMAP24XX_CM_FCLKEN2),
.enable_bit = OMAP2430_EN_MCBSP3_SHIFT,
- .recalc = &followparent_recalc,
+ .clksel_reg = OMAP243X_CTRL_REGADDR(OMAP243X_CONTROL_DEVCONF1),
+ .clksel_mask = OMAP2_MCBSP3_CLKS_MASK,
+ .clksel = mcbsp_fck_clksel,
+ .recalc = &omap2_clksel_recalc,
};
static struct clk mcbsp4_ick = {
@@ -1187,10 +1222,14 @@ static struct clk mcbsp4_fck = {
.name = "mcbsp4_fck",
.ops = &clkops_omap2_dflt_wait,
.parent = &func_96m_ck,
+ .init = &omap2_init_clksel_parent,
.clkdm_name = "core_l4_clkdm",
.enable_reg = OMAP_CM_REGADDR(CORE_MOD, OMAP24XX_CM_FCLKEN2),
.enable_bit = OMAP2430_EN_MCBSP4_SHIFT,
- .recalc = &followparent_recalc,
+ .clksel_reg = OMAP243X_CTRL_REGADDR(OMAP243X_CONTROL_DEVCONF1),
+ .clksel_mask = OMAP2_MCBSP4_CLKS_MASK,
+ .clksel = mcbsp_fck_clksel,
+ .recalc = &omap2_clksel_recalc,
};
static struct clk mcbsp5_ick = {
@@ -1207,10 +1246,14 @@ static struct clk mcbsp5_fck = {
.name = "mcbsp5_fck",
.ops = &clkops_omap2_dflt_wait,
.parent = &func_96m_ck,
+ .init = &omap2_init_clksel_parent,
.clkdm_name = "core_l4_clkdm",
.enable_reg = OMAP_CM_REGADDR(CORE_MOD, OMAP24XX_CM_FCLKEN2),
.enable_bit = OMAP2430_EN_MCBSP5_SHIFT,
- .recalc = &followparent_recalc,
+ .clksel_reg = OMAP243X_CTRL_REGADDR(OMAP243X_CONTROL_DEVCONF1),
+ .clksel_mask = OMAP2_MCBSP5_CLKS_MASK,
+ .clksel = mcbsp_fck_clksel,
+ .recalc = &omap2_clksel_recalc,
};
static struct clk mcspi1_ick = {
@@ -1808,6 +1851,12 @@ static struct omap_clk omap2430_clks[] = {
CLK(NULL, "osc_ck", &osc_ck, CK_243X),
CLK(NULL, "sys_ck", &sys_ck, CK_243X),
CLK(NULL, "alt_ck", &alt_ck, CK_243X),
+ CLK("omap-mcbsp.1", "pad_fck", &mcbsp_clks, CK_243X),
+ CLK("omap-mcbsp.2", "pad_fck", &mcbsp_clks, CK_243X),
+ CLK("omap-mcbsp.3", "pad_fck", &mcbsp_clks, CK_243X),
+ CLK("omap-mcbsp.4", "pad_fck", &mcbsp_clks, CK_243X),
+ CLK("omap-mcbsp.5", "pad_fck", &mcbsp_clks, CK_243X),
+ CLK(NULL, "mcbsp_clks", &mcbsp_clks, CK_243X),
/* internal analog sources */
CLK(NULL, "dpll_ck", &dpll_ck, CK_243X),
CLK(NULL, "apll96_ck", &apll96_ck, CK_243X),
@@ -1815,6 +1864,11 @@ static struct omap_clk omap2430_clks[] = {
/* internal prcm root sources */
CLK(NULL, "func_54m_ck", &func_54m_ck, CK_243X),
CLK(NULL, "core_ck", &core_ck, CK_243X),
+ CLK("omap-mcbsp.1", "prcm_fck", &func_96m_ck, CK_243X),
+ CLK("omap-mcbsp.2", "prcm_fck", &func_96m_ck, CK_243X),
+ CLK("omap-mcbsp.3", "prcm_fck", &func_96m_ck, CK_243X),
+ CLK("omap-mcbsp.4", "prcm_fck", &func_96m_ck, CK_243X),
+ CLK("omap-mcbsp.5", "prcm_fck", &func_96m_ck, CK_243X),
CLK(NULL, "func_96m_ck", &func_96m_ck, CK_243X),
CLK(NULL, "func_48m_ck", &func_48m_ck, CK_243X),
CLK(NULL, "func_12m_ck", &func_12m_ck, CK_243X),
diff --git a/arch/arm/mach-omap2/clock3xxx_data.c b/arch/arm/mach-omap2/clock3xxx_data.c
index c73906d17458..d85ecd5aebfd 100644
--- a/arch/arm/mach-omap2/clock3xxx_data.c
+++ b/arch/arm/mach-omap2/clock3xxx_data.c
@@ -20,7 +20,6 @@
#include <linux/clk.h>
#include <linux/list.h>
-#include <plat/control.h>
#include <plat/clkdev_omap.h>
#include "clock.h"
@@ -33,6 +32,7 @@
#include "cm-regbits-34xx.h"
#include "prm.h"
#include "prm-regbits-34xx.h"
+#include "control.h"
/*
* clocks
@@ -2465,6 +2465,16 @@ static struct clk uart3_fck = {
.recalc = &followparent_recalc,
};
+static struct clk uart4_fck = {
+ .name = "uart4_fck",
+ .ops = &clkops_omap2_dflt_wait,
+ .parent = &per_48m_fck,
+ .enable_reg = OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_FCLKEN),
+ .enable_bit = OMAP3630_EN_UART4_SHIFT,
+ .clkdm_name = "per_clkdm",
+ .recalc = &followparent_recalc,
+};
+
static struct clk gpt2_fck = {
.name = "gpt2_fck",
.ops = &clkops_omap2_dflt_wait,
@@ -2715,6 +2725,16 @@ static struct clk uart3_ick = {
.recalc = &followparent_recalc,
};
+static struct clk uart4_ick = {
+ .name = "uart4_ick",
+ .ops = &clkops_omap2_dflt_wait,
+ .parent = &per_l4_ick,
+ .enable_reg = OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_ICLKEN),
+ .enable_bit = OMAP3630_EN_UART4_SHIFT,
+ .clkdm_name = "per_clkdm",
+ .recalc = &followparent_recalc,
+};
+
static struct clk gpt9_ick = {
.name = "gpt9_ick",
.ops = &clkops_omap2_dflt_wait,
@@ -3188,6 +3208,11 @@ static struct omap_clk omap3xxx_clks[] = {
CLK(NULL, "osc_sys_ck", &osc_sys_ck, CK_3XXX),
CLK(NULL, "sys_ck", &sys_ck, CK_3XXX),
CLK(NULL, "sys_altclk", &sys_altclk, CK_3XXX),
+ CLK("omap-mcbsp.1", "pad_fck", &mcbsp_clks, CK_3XXX),
+ CLK("omap-mcbsp.2", "pad_fck", &mcbsp_clks, CK_3XXX),
+ CLK("omap-mcbsp.3", "pad_fck", &mcbsp_clks, CK_3XXX),
+ CLK("omap-mcbsp.4", "pad_fck", &mcbsp_clks, CK_3XXX),
+ CLK("omap-mcbsp.5", "pad_fck", &mcbsp_clks, CK_3XXX),
CLK(NULL, "mcbsp_clks", &mcbsp_clks, CK_3XXX),
CLK(NULL, "sys_clkout1", &sys_clkout1, CK_3XXX),
CLK(NULL, "dpll1_ck", &dpll1_ck, CK_3XXX),
@@ -3253,6 +3278,8 @@ static struct omap_clk omap3xxx_clks[] = {
CLK(NULL, "cpefuse_fck", &cpefuse_fck, CK_3430ES2 | CK_AM35XX),
CLK(NULL, "ts_fck", &ts_fck, CK_3430ES2 | CK_AM35XX),
CLK(NULL, "usbtll_fck", &usbtll_fck, CK_3430ES2 | CK_AM35XX),
+ CLK("omap-mcbsp.1", "prcm_fck", &core_96m_fck, CK_3XXX),
+ CLK("omap-mcbsp.5", "prcm_fck", &core_96m_fck, CK_3XXX),
CLK(NULL, "core_96m_fck", &core_96m_fck, CK_3XXX),
CLK("mmci-omap-hs.2", "fck", &mmchs3_fck, CK_3430ES2 | CK_AM35XX),
CLK("mmci-omap-hs.1", "fck", &mmchs2_fck, CK_3XXX),
@@ -3346,9 +3373,13 @@ static struct omap_clk omap3xxx_clks[] = {
CLK(NULL, "omap_32ksync_ick", &omap_32ksync_ick, CK_3XXX),
CLK(NULL, "gpt12_ick", &gpt12_ick, CK_3XXX),
CLK(NULL, "gpt1_ick", &gpt1_ick, CK_3XXX),
+ CLK("omap-mcbsp.2", "prcm_fck", &per_96m_fck, CK_3XXX),
+ CLK("omap-mcbsp.3", "prcm_fck", &per_96m_fck, CK_3XXX),
+ CLK("omap-mcbsp.4", "prcm_fck", &per_96m_fck, CK_3XXX),
CLK(NULL, "per_96m_fck", &per_96m_fck, CK_3XXX),
CLK(NULL, "per_48m_fck", &per_48m_fck, CK_3XXX),
CLK(NULL, "uart3_fck", &uart3_fck, CK_3XXX),
+ CLK(NULL, "uart4_fck", &uart4_fck, CK_36XX),
CLK(NULL, "gpt2_fck", &gpt2_fck, CK_3XXX),
CLK(NULL, "gpt3_fck", &gpt3_fck, CK_3XXX),
CLK(NULL, "gpt4_fck", &gpt4_fck, CK_3XXX),
@@ -3372,6 +3403,7 @@ static struct omap_clk omap3xxx_clks[] = {
CLK(NULL, "gpio2_ick", &gpio2_ick, CK_3XXX),
CLK(NULL, "wdt3_ick", &wdt3_ick, CK_3XXX),
CLK(NULL, "uart3_ick", &uart3_ick, CK_3XXX),
+ CLK(NULL, "uart4_ick", &uart4_ick, CK_36XX),
CLK(NULL, "gpt9_ick", &gpt9_ick, CK_3XXX),
CLK(NULL, "gpt8_ick", &gpt8_ick, CK_3XXX),
CLK(NULL, "gpt7_ick", &gpt7_ick, CK_3XXX),
diff --git a/arch/arm/mach-omap2/clock44xx_data.c b/arch/arm/mach-omap2/clock44xx_data.c
index e10db7a90cb2..1599836ba3d9 100644
--- a/arch/arm/mach-omap2/clock44xx_data.c
+++ b/arch/arm/mach-omap2/clock44xx_data.c
@@ -17,13 +17,15 @@
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
+ *
+ * XXX Some of the ES1 clocks have been removed/changed; once support
+ * is added for discriminating clocks by ES level, these should be added back
+ * in.
*/
#include <linux/kernel.h>
#include <linux/list.h>
#include <linux/clk.h>
-
-#include <plat/control.h>
#include <plat/clkdev_omap.h>
#include "clock.h"
@@ -32,6 +34,7 @@
#include "cm-regbits-44xx.h"
#include "prm.h"
#include "prm-regbits-44xx.h"
+#include "control.h"
/* Root clocks */
@@ -175,21 +178,27 @@ static struct clk sys_clkin_ck = {
.recalc = &omap2_clksel_recalc,
};
+static struct clk tie_low_clock_ck = {
+ .name = "tie_low_clock_ck",
+ .rate = 0,
+ .ops = &clkops_null,
+};
+
static struct clk utmi_phy_clkout_ck = {
.name = "utmi_phy_clkout_ck",
- .rate = 12000000,
+ .rate = 60000000,
.ops = &clkops_null,
};
static struct clk xclk60mhsp1_ck = {
.name = "xclk60mhsp1_ck",
- .rate = 12000000,
+ .rate = 60000000,
.ops = &clkops_null,
};
static struct clk xclk60mhsp2_ck = {
.name = "xclk60mhsp2_ck",
- .rate = 12000000,
+ .rate = 60000000,
.ops = &clkops_null,
};
@@ -201,39 +210,23 @@ static struct clk xclk60motg_ck = {
/* Module clocks and DPLL outputs */
-static const struct clksel_rate div2_1to2_rates[] = {
- { .div = 1, .val = 0, .flags = RATE_IN_4430 },
- { .div = 2, .val = 1, .flags = RATE_IN_4430 },
- { .div = 0 },
-};
-
-static const struct clksel dpll_sys_ref_clk_div[] = {
- { .parent = &sys_clkin_ck, .rates = div2_1to2_rates },
+static const struct clksel abe_dpll_bypass_clk_mux_sel[] = {
+ { .parent = &sys_clkin_ck, .rates = div_1_0_rates },
+ { .parent = &sys_32k_ck, .rates = div_1_1_rates },
{ .parent = NULL },
};
-static struct clk dpll_sys_ref_clk = {
- .name = "dpll_sys_ref_clk",
+static struct clk abe_dpll_bypass_clk_mux_ck = {
+ .name = "abe_dpll_bypass_clk_mux_ck",
.parent = &sys_clkin_ck,
- .clksel = dpll_sys_ref_clk_div,
- .clksel_reg = OMAP4430_CM_DPLL_SYS_REF_CLKSEL,
- .clksel_mask = OMAP4430_CLKSEL_0_0_MASK,
.ops = &clkops_null,
- .recalc = &omap2_clksel_recalc,
- .round_rate = &omap2_clksel_round_rate,
- .set_rate = &omap2_clksel_set_rate,
-};
-
-static const struct clksel abe_dpll_refclk_mux_sel[] = {
- { .parent = &dpll_sys_ref_clk, .rates = div_1_0_rates },
- { .parent = &sys_32k_ck, .rates = div_1_1_rates },
- { .parent = NULL },
+ .recalc = &followparent_recalc,
};
static struct clk abe_dpll_refclk_mux_ck = {
.name = "abe_dpll_refclk_mux_ck",
- .parent = &dpll_sys_ref_clk,
- .clksel = abe_dpll_refclk_mux_sel,
+ .parent = &sys_clkin_ck,
+ .clksel = abe_dpll_bypass_clk_mux_sel,
.init = &omap2_init_clksel_parent,
.clksel_reg = OMAP4430_CM_ABE_PLL_REF_CLKSEL,
.clksel_mask = OMAP4430_CLKSEL_0_0_MASK,
@@ -244,7 +237,7 @@ static struct clk abe_dpll_refclk_mux_ck = {
/* DPLL_ABE */
static struct dpll_data dpll_abe_dd = {
.mult_div1_reg = OMAP4430_CM_CLKSEL_DPLL_ABE,
- .clk_bypass = &sys_clkin_ck,
+ .clk_bypass = &abe_dpll_bypass_clk_mux_ck,
.clk_ref = &abe_dpll_refclk_mux_ck,
.control_reg = OMAP4430_CM_CLKMODE_DPLL_ABE,
.modes = (1 << DPLL_LOW_POWER_BYPASS) | (1 << DPLL_LOCKED),
@@ -310,6 +303,12 @@ static struct clk abe_clk = {
.set_rate = &omap2_clksel_set_rate,
};
+static const struct clksel_rate div2_1to2_rates[] = {
+ { .div = 1, .val = 0, .flags = RATE_IN_4430 },
+ { .div = 2, .val = 1, .flags = RATE_IN_4430 },
+ { .div = 0 },
+};
+
static const struct clksel aess_fclk_div[] = {
{ .parent = &abe_clk, .rates = div2_1to2_rates },
{ .parent = NULL },
@@ -380,14 +379,14 @@ static struct clk dpll_abe_m3_ck = {
};
static const struct clksel core_hsd_byp_clk_mux_sel[] = {
- { .parent = &dpll_sys_ref_clk, .rates = div_1_0_rates },
+ { .parent = &sys_clkin_ck, .rates = div_1_0_rates },
{ .parent = &dpll_abe_m3_ck, .rates = div_1_1_rates },
{ .parent = NULL },
};
static struct clk core_hsd_byp_clk_mux_ck = {
.name = "core_hsd_byp_clk_mux_ck",
- .parent = &dpll_sys_ref_clk,
+ .parent = &sys_clkin_ck,
.clksel = core_hsd_byp_clk_mux_sel,
.init = &omap2_init_clksel_parent,
.clksel_reg = OMAP4430_CM_CLKSEL_DPLL_CORE,
@@ -400,7 +399,7 @@ static struct clk core_hsd_byp_clk_mux_ck = {
static struct dpll_data dpll_core_dd = {
.mult_div1_reg = OMAP4430_CM_CLKSEL_DPLL_CORE,
.clk_bypass = &core_hsd_byp_clk_mux_ck,
- .clk_ref = &dpll_sys_ref_clk,
+ .clk_ref = &sys_clkin_ck,
.control_reg = OMAP4430_CM_CLKMODE_DPLL_CORE,
.modes = (1 << DPLL_LOW_POWER_BYPASS) | (1 << DPLL_LOCKED),
.autoidle_reg = OMAP4430_CM_AUTOIDLE_DPLL_CORE,
@@ -418,7 +417,7 @@ static struct dpll_data dpll_core_dd = {
static struct clk dpll_core_ck = {
.name = "dpll_core_ck",
- .parent = &dpll_sys_ref_clk,
+ .parent = &sys_clkin_ck,
.dpll_data = &dpll_core_dd,
.init = &omap2_init_dpll_parent,
.ops = &clkops_null,
@@ -596,14 +595,14 @@ static struct clk dpll_core_m7_ck = {
};
static const struct clksel iva_hsd_byp_clk_mux_sel[] = {
- { .parent = &dpll_sys_ref_clk, .rates = div_1_0_rates },
+ { .parent = &sys_clkin_ck, .rates = div_1_0_rates },
{ .parent = &div_iva_hs_clk, .rates = div_1_1_rates },
{ .parent = NULL },
};
static struct clk iva_hsd_byp_clk_mux_ck = {
.name = "iva_hsd_byp_clk_mux_ck",
- .parent = &dpll_sys_ref_clk,
+ .parent = &sys_clkin_ck,
.ops = &clkops_null,
.recalc = &followparent_recalc,
};
@@ -612,7 +611,7 @@ static struct clk iva_hsd_byp_clk_mux_ck = {
static struct dpll_data dpll_iva_dd = {
.mult_div1_reg = OMAP4430_CM_CLKSEL_DPLL_IVA,
.clk_bypass = &iva_hsd_byp_clk_mux_ck,
- .clk_ref = &dpll_sys_ref_clk,
+ .clk_ref = &sys_clkin_ck,
.control_reg = OMAP4430_CM_CLKMODE_DPLL_IVA,
.modes = (1 << DPLL_LOW_POWER_BYPASS) | (1 << DPLL_LOCKED),
.autoidle_reg = OMAP4430_CM_AUTOIDLE_DPLL_IVA,
@@ -630,7 +629,7 @@ static struct dpll_data dpll_iva_dd = {
static struct clk dpll_iva_ck = {
.name = "dpll_iva_ck",
- .parent = &dpll_sys_ref_clk,
+ .parent = &sys_clkin_ck,
.dpll_data = &dpll_iva_dd,
.init = &omap2_init_dpll_parent,
.ops = &clkops_omap3_noncore_dpll_ops,
@@ -672,7 +671,7 @@ static struct clk dpll_iva_m5_ck = {
static struct dpll_data dpll_mpu_dd = {
.mult_div1_reg = OMAP4430_CM_CLKSEL_DPLL_MPU,
.clk_bypass = &div_mpu_hs_clk,
- .clk_ref = &dpll_sys_ref_clk,
+ .clk_ref = &sys_clkin_ck,
.control_reg = OMAP4430_CM_CLKMODE_DPLL_MPU,
.modes = (1 << DPLL_LOW_POWER_BYPASS) | (1 << DPLL_LOCKED),
.autoidle_reg = OMAP4430_CM_AUTOIDLE_DPLL_MPU,
@@ -690,7 +689,7 @@ static struct dpll_data dpll_mpu_dd = {
static struct clk dpll_mpu_ck = {
.name = "dpll_mpu_ck",
- .parent = &dpll_sys_ref_clk,
+ .parent = &sys_clkin_ck,
.dpll_data = &dpll_mpu_dd,
.init = &omap2_init_dpll_parent,
.ops = &clkops_omap3_noncore_dpll_ops,
@@ -724,14 +723,14 @@ static struct clk per_hs_clk_div_ck = {
};
static const struct clksel per_hsd_byp_clk_mux_sel[] = {
- { .parent = &dpll_sys_ref_clk, .rates = div_1_0_rates },
+ { .parent = &sys_clkin_ck, .rates = div_1_0_rates },
{ .parent = &per_hs_clk_div_ck, .rates = div_1_1_rates },
{ .parent = NULL },
};
static struct clk per_hsd_byp_clk_mux_ck = {
.name = "per_hsd_byp_clk_mux_ck",
- .parent = &dpll_sys_ref_clk,
+ .parent = &sys_clkin_ck,
.clksel = per_hsd_byp_clk_mux_sel,
.init = &omap2_init_clksel_parent,
.clksel_reg = OMAP4430_CM_CLKSEL_DPLL_PER,
@@ -744,7 +743,7 @@ static struct clk per_hsd_byp_clk_mux_ck = {
static struct dpll_data dpll_per_dd = {
.mult_div1_reg = OMAP4430_CM_CLKSEL_DPLL_PER,
.clk_bypass = &per_hsd_byp_clk_mux_ck,
- .clk_ref = &dpll_sys_ref_clk,
+ .clk_ref = &sys_clkin_ck,
.control_reg = OMAP4430_CM_CLKMODE_DPLL_PER,
.modes = (1 << DPLL_LOW_POWER_BYPASS) | (1 << DPLL_LOCKED),
.autoidle_reg = OMAP4430_CM_AUTOIDLE_DPLL_PER,
@@ -762,7 +761,7 @@ static struct dpll_data dpll_per_dd = {
static struct clk dpll_per_ck = {
.name = "dpll_per_ck",
- .parent = &dpll_sys_ref_clk,
+ .parent = &sys_clkin_ck,
.dpll_data = &dpll_per_dd,
.init = &omap2_init_dpll_parent,
.ops = &clkops_omap3_noncore_dpll_ops,
@@ -858,8 +857,8 @@ static struct clk dpll_per_m7_ck = {
/* DPLL_UNIPRO */
static struct dpll_data dpll_unipro_dd = {
.mult_div1_reg = OMAP4430_CM_CLKSEL_DPLL_UNIPRO,
- .clk_bypass = &dpll_sys_ref_clk,
- .clk_ref = &dpll_sys_ref_clk,
+ .clk_bypass = &sys_clkin_ck,
+ .clk_ref = &sys_clkin_ck,
.control_reg = OMAP4430_CM_CLKMODE_DPLL_UNIPRO,
.modes = (1 << DPLL_LOW_POWER_BYPASS) | (1 << DPLL_LOCKED),
.autoidle_reg = OMAP4430_CM_AUTOIDLE_DPLL_UNIPRO,
@@ -877,7 +876,7 @@ static struct dpll_data dpll_unipro_dd = {
static struct clk dpll_unipro_ck = {
.name = "dpll_unipro_ck",
- .parent = &dpll_sys_ref_clk,
+ .parent = &sys_clkin_ck,
.dpll_data = &dpll_unipro_dd,
.init = &omap2_init_dpll_parent,
.ops = &clkops_omap3_noncore_dpll_ops,
@@ -914,7 +913,8 @@ static struct clk usb_hs_clk_div_ck = {
static struct dpll_data dpll_usb_dd = {
.mult_div1_reg = OMAP4430_CM_CLKSEL_DPLL_USB,
.clk_bypass = &usb_hs_clk_div_ck,
- .clk_ref = &dpll_sys_ref_clk,
+ .flags = DPLL_J_TYPE | DPLL_NO_DCO_SEL,
+ .clk_ref = &sys_clkin_ck,
.control_reg = OMAP4430_CM_CLKMODE_DPLL_USB,
.modes = (1 << DPLL_LOW_POWER_BYPASS) | (1 << DPLL_LOCKED),
.autoidle_reg = OMAP4430_CM_AUTOIDLE_DPLL_USB,
@@ -927,13 +927,12 @@ static struct dpll_data dpll_usb_dd = {
.max_multiplier = OMAP4430_MAX_DPLL_MULT,
.max_divider = OMAP4430_MAX_DPLL_DIV,
.min_divider = 1,
- .flags = DPLL_J_TYPE | DPLL_NO_DCO_SEL
};
static struct clk dpll_usb_ck = {
.name = "dpll_usb_ck",
- .parent = &dpll_sys_ref_clk,
+ .parent = &sys_clkin_ck,
.dpll_data = &dpll_usb_dd,
.init = &omap2_init_dpll_parent,
.ops = &clkops_omap3_noncore_dpll_ops,
@@ -1222,7 +1221,7 @@ static struct clk per_abe_24m_fclk = {
static const struct clksel pmd_stm_clock_mux_sel[] = {
{ .parent = &sys_clkin_ck, .rates = div_1_0_rates },
{ .parent = &dpll_core_m6_ck, .rates = div_1_1_rates },
- { .parent = &dpll_per_m7_ck, .rates = div_1_2_rates },
+ { .parent = &tie_low_clock_ck, .rates = div_1_2_rates },
{ .parent = NULL },
};
@@ -1240,10 +1239,15 @@ static struct clk pmd_trace_clk_mux_ck = {
.recalc = &followparent_recalc,
};
+static const struct clksel syc_clk_div_div[] = {
+ { .parent = &sys_clkin_ck, .rates = div2_1to2_rates },
+ { .parent = NULL },
+};
+
static struct clk syc_clk_div_ck = {
.name = "syc_clk_div_ck",
.parent = &sys_clkin_ck,
- .clksel = dpll_sys_ref_clk_div,
+ .clksel = syc_clk_div_div,
.clksel_reg = OMAP4430_CM_ABE_DSS_SYS_CLKSEL,
.clksel_mask = OMAP4430_CLKSEL_0_0_MASK,
.ops = &clkops_null,
@@ -1284,13 +1288,13 @@ static struct clk aess_fck = {
.recalc = &followparent_recalc,
};
-static struct clk cust_efuse_fck = {
- .name = "cust_efuse_fck",
+static struct clk bandgap_fclk = {
+ .name = "bandgap_fclk",
.ops = &clkops_omap2_dflt,
- .enable_reg = OMAP4430_CM_CEFUSE_CEFUSE_CLKCTRL,
- .enable_bit = OMAP4430_MODULEMODE_SWCTRL,
- .clkdm_name = "l4_cefuse_clkdm",
- .parent = &sys_clkin_ck,
+ .enable_reg = OMAP4430_CM_WKUP_BANDGAP_CLKCTRL,
+ .enable_bit = OMAP4430_OPTFCLKEN_BGAP_32K_SHIFT,
+ .clkdm_name = "l4_wkup_clkdm",
+ .parent = &sys_32k_ck,
.recalc = &followparent_recalc,
};
@@ -1344,6 +1348,56 @@ static struct clk dmic_fck = {
.clkdm_name = "abe_clkdm",
};
+static struct clk dsp_fck = {
+ .name = "dsp_fck",
+ .ops = &clkops_omap2_dflt,
+ .enable_reg = OMAP4430_CM_TESLA_TESLA_CLKCTRL,
+ .enable_bit = OMAP4430_MODULEMODE_HWCTRL,
+ .clkdm_name = "tesla_clkdm",
+ .parent = &dpll_iva_m4_ck,
+ .recalc = &followparent_recalc,
+};
+
+static struct clk dss_sys_clk = {
+ .name = "dss_sys_clk",
+ .ops = &clkops_omap2_dflt,
+ .enable_reg = OMAP4430_CM_DSS_DSS_CLKCTRL,
+ .enable_bit = OMAP4430_OPTFCLKEN_SYS_CLK_SHIFT,
+ .clkdm_name = "l3_dss_clkdm",
+ .parent = &syc_clk_div_ck,
+ .recalc = &followparent_recalc,
+};
+
+static struct clk dss_tv_clk = {
+ .name = "dss_tv_clk",
+ .ops = &clkops_omap2_dflt,
+ .enable_reg = OMAP4430_CM_DSS_DSS_CLKCTRL,
+ .enable_bit = OMAP4430_OPTFCLKEN_TV_CLK_SHIFT,
+ .clkdm_name = "l3_dss_clkdm",
+ .parent = &extalt_clkin_ck,
+ .recalc = &followparent_recalc,
+};
+
+static struct clk dss_dss_clk = {
+ .name = "dss_dss_clk",
+ .ops = &clkops_omap2_dflt,
+ .enable_reg = OMAP4430_CM_DSS_DSS_CLKCTRL,
+ .enable_bit = OMAP4430_OPTFCLKEN_DSSCLK_SHIFT,
+ .clkdm_name = "l3_dss_clkdm",
+ .parent = &dpll_per_m5_ck,
+ .recalc = &followparent_recalc,
+};
+
+static struct clk dss_48mhz_clk = {
+ .name = "dss_48mhz_clk",
+ .ops = &clkops_omap2_dflt,
+ .enable_reg = OMAP4430_CM_DSS_DSS_CLKCTRL,
+ .enable_bit = OMAP4430_OPTFCLKEN_48MHZ_CLK_SHIFT,
+ .clkdm_name = "l3_dss_clkdm",
+ .parent = &func_48mc_fclk,
+ .recalc = &followparent_recalc,
+};
+
static struct clk dss_fck = {
.name = "dss_fck",
.ops = &clkops_omap2_dflt,
@@ -1354,18 +1408,18 @@ static struct clk dss_fck = {
.recalc = &followparent_recalc,
};
-static struct clk ducati_ick = {
- .name = "ducati_ick",
+static struct clk efuse_ctrl_cust_fck = {
+ .name = "efuse_ctrl_cust_fck",
.ops = &clkops_omap2_dflt,
- .enable_reg = OMAP4430_CM_DUCATI_DUCATI_CLKCTRL,
- .enable_bit = OMAP4430_MODULEMODE_HWCTRL,
- .clkdm_name = "ducati_clkdm",
- .parent = &ducati_clk_mux_ck,
+ .enable_reg = OMAP4430_CM_CEFUSE_CEFUSE_CLKCTRL,
+ .enable_bit = OMAP4430_MODULEMODE_SWCTRL,
+ .clkdm_name = "l4_cefuse_clkdm",
+ .parent = &sys_clkin_ck,
.recalc = &followparent_recalc,
};
-static struct clk emif1_ick = {
- .name = "emif1_ick",
+static struct clk emif1_fck = {
+ .name = "emif1_fck",
.ops = &clkops_omap2_dflt,
.enable_reg = OMAP4430_CM_MEMIF_EMIF_1_CLKCTRL,
.enable_bit = OMAP4430_MODULEMODE_HWCTRL,
@@ -1375,8 +1429,8 @@ static struct clk emif1_ick = {
.recalc = &followparent_recalc,
};
-static struct clk emif2_ick = {
- .name = "emif2_ick",
+static struct clk emif2_fck = {
+ .name = "emif2_fck",
.ops = &clkops_omap2_dflt,
.enable_reg = OMAP4430_CM_MEMIF_EMIF_2_CLKCTRL,
.enable_bit = OMAP4430_MODULEMODE_HWCTRL,
@@ -1407,42 +1461,24 @@ static struct clk fdif_fck = {
.clkdm_name = "iss_clkdm",
};
-static const struct clksel per_sgx_fclk_div[] = {
- { .parent = &dpll_per_m2x2_ck, .rates = div3_1to4_rates },
- { .parent = NULL },
-};
-
-static struct clk per_sgx_fclk = {
- .name = "per_sgx_fclk",
- .parent = &dpll_per_m2x2_ck,
- .clksel = per_sgx_fclk_div,
- .clksel_reg = OMAP4430_CM_GFX_GFX_CLKCTRL,
- .clksel_mask = OMAP4430_CLKSEL_PER_192M_MASK,
- .ops = &clkops_null,
- .recalc = &omap2_clksel_recalc,
- .round_rate = &omap2_clksel_round_rate,
- .set_rate = &omap2_clksel_set_rate,
-};
-
-static const struct clksel sgx_clk_mux_sel[] = {
- { .parent = &dpll_core_m7_ck, .rates = div_1_0_rates },
- { .parent = &per_sgx_fclk, .rates = div_1_1_rates },
- { .parent = NULL },
+static struct clk fpka_fck = {
+ .name = "fpka_fck",
+ .ops = &clkops_omap2_dflt,
+ .enable_reg = OMAP4430_CM_L4SEC_PKAEIP29_CLKCTRL,
+ .enable_bit = OMAP4430_MODULEMODE_SWCTRL,
+ .clkdm_name = "l4_secure_clkdm",
+ .parent = &l4_div_ck,
+ .recalc = &followparent_recalc,
};
-/* Merged sgx_clk_mux into gfx */
-static struct clk gfx_fck = {
- .name = "gfx_fck",
- .parent = &dpll_core_m7_ck,
- .clksel = sgx_clk_mux_sel,
- .init = &omap2_init_clksel_parent,
- .clksel_reg = OMAP4430_CM_GFX_GFX_CLKCTRL,
- .clksel_mask = OMAP4430_CLKSEL_SGX_FCLK_MASK,
+static struct clk gpio1_dbclk = {
+ .name = "gpio1_dbclk",
.ops = &clkops_omap2_dflt,
- .recalc = &omap2_clksel_recalc,
- .enable_reg = OMAP4430_CM_GFX_GFX_CLKCTRL,
- .enable_bit = OMAP4430_MODULEMODE_SWCTRL,
- .clkdm_name = "l3_gfx_clkdm",
+ .enable_reg = OMAP4430_CM_WKUP_GPIO1_CLKCTRL,
+ .enable_bit = OMAP4430_OPTFCLKEN_DBCLK_SHIFT,
+ .clkdm_name = "l4_wkup_clkdm",
+ .parent = &sys_32k_ck,
+ .recalc = &followparent_recalc,
};
static struct clk gpio1_ick = {
@@ -1455,6 +1491,16 @@ static struct clk gpio1_ick = {
.recalc = &followparent_recalc,
};
+static struct clk gpio2_dbclk = {
+ .name = "gpio2_dbclk",
+ .ops = &clkops_omap2_dflt,
+ .enable_reg = OMAP4430_CM_L4PER_GPIO2_CLKCTRL,
+ .enable_bit = OMAP4430_OPTFCLKEN_DBCLK_SHIFT,
+ .clkdm_name = "l4_per_clkdm",
+ .parent = &sys_32k_ck,
+ .recalc = &followparent_recalc,
+};
+
static struct clk gpio2_ick = {
.name = "gpio2_ick",
.ops = &clkops_omap2_dflt,
@@ -1465,6 +1511,16 @@ static struct clk gpio2_ick = {
.recalc = &followparent_recalc,
};
+static struct clk gpio3_dbclk = {
+ .name = "gpio3_dbclk",
+ .ops = &clkops_omap2_dflt,
+ .enable_reg = OMAP4430_CM_L4PER_GPIO3_CLKCTRL,
+ .enable_bit = OMAP4430_OPTFCLKEN_DBCLK_SHIFT,
+ .clkdm_name = "l4_per_clkdm",
+ .parent = &sys_32k_ck,
+ .recalc = &followparent_recalc,
+};
+
static struct clk gpio3_ick = {
.name = "gpio3_ick",
.ops = &clkops_omap2_dflt,
@@ -1475,6 +1531,16 @@ static struct clk gpio3_ick = {
.recalc = &followparent_recalc,
};
+static struct clk gpio4_dbclk = {
+ .name = "gpio4_dbclk",
+ .ops = &clkops_omap2_dflt,
+ .enable_reg = OMAP4430_CM_L4PER_GPIO4_CLKCTRL,
+ .enable_bit = OMAP4430_OPTFCLKEN_DBCLK_SHIFT,
+ .clkdm_name = "l4_per_clkdm",
+ .parent = &sys_32k_ck,
+ .recalc = &followparent_recalc,
+};
+
static struct clk gpio4_ick = {
.name = "gpio4_ick",
.ops = &clkops_omap2_dflt,
@@ -1485,6 +1551,16 @@ static struct clk gpio4_ick = {
.recalc = &followparent_recalc,
};
+static struct clk gpio5_dbclk = {
+ .name = "gpio5_dbclk",
+ .ops = &clkops_omap2_dflt,
+ .enable_reg = OMAP4430_CM_L4PER_GPIO5_CLKCTRL,
+ .enable_bit = OMAP4430_OPTFCLKEN_DBCLK_SHIFT,
+ .clkdm_name = "l4_per_clkdm",
+ .parent = &sys_32k_ck,
+ .recalc = &followparent_recalc,
+};
+
static struct clk gpio5_ick = {
.name = "gpio5_ick",
.ops = &clkops_omap2_dflt,
@@ -1495,6 +1571,16 @@ static struct clk gpio5_ick = {
.recalc = &followparent_recalc,
};
+static struct clk gpio6_dbclk = {
+ .name = "gpio6_dbclk",
+ .ops = &clkops_omap2_dflt,
+ .enable_reg = OMAP4430_CM_L4PER_GPIO6_CLKCTRL,
+ .enable_bit = OMAP4430_OPTFCLKEN_DBCLK_SHIFT,
+ .clkdm_name = "l4_per_clkdm",
+ .parent = &sys_32k_ck,
+ .recalc = &followparent_recalc,
+};
+
static struct clk gpio6_ick = {
.name = "gpio6_ick",
.ops = &clkops_omap2_dflt,
@@ -1515,214 +1601,25 @@ static struct clk gpmc_ick = {
.recalc = &followparent_recalc,
};
-static const struct clksel dmt1_clk_mux_sel[] = {
- { .parent = &sys_clkin_ck, .rates = div_1_0_rates },
- { .parent = &sys_32k_ck, .rates = div_1_1_rates },
- { .parent = NULL },
-};
-
-/*
- * Merged dmt1_clk_mux into gptimer1
- * gptimer1 renamed temporarily into gpt1 to match OMAP3 convention
- */
-static struct clk gpt1_fck = {
- .name = "gpt1_fck",
- .parent = &sys_clkin_ck,
- .clksel = dmt1_clk_mux_sel,
- .init = &omap2_init_clksel_parent,
- .clksel_reg = OMAP4430_CM_WKUP_TIMER1_CLKCTRL,
- .clksel_mask = OMAP4430_CLKSEL_MASK,
- .ops = &clkops_omap2_dflt,
- .recalc = &omap2_clksel_recalc,
- .enable_reg = OMAP4430_CM_WKUP_TIMER1_CLKCTRL,
- .enable_bit = OMAP4430_MODULEMODE_SWCTRL,
- .clkdm_name = "l4_wkup_clkdm",
-};
-
-/*
- * Merged cm2_dm10_mux into gptimer10
- * gptimer10 renamed temporarily into gpt10 to match OMAP3 convention
- */
-static struct clk gpt10_fck = {
- .name = "gpt10_fck",
- .parent = &sys_clkin_ck,
- .clksel = dmt1_clk_mux_sel,
- .init = &omap2_init_clksel_parent,
- .clksel_reg = OMAP4430_CM_L4PER_DMTIMER10_CLKCTRL,
- .clksel_mask = OMAP4430_CLKSEL_MASK,
- .ops = &clkops_omap2_dflt,
- .recalc = &omap2_clksel_recalc,
- .enable_reg = OMAP4430_CM_L4PER_DMTIMER10_CLKCTRL,
- .enable_bit = OMAP4430_MODULEMODE_SWCTRL,
- .clkdm_name = "l4_per_clkdm",
-};
-
-/*
- * Merged cm2_dm11_mux into gptimer11
- * gptimer11 renamed temporarily into gpt11 to match OMAP3 convention
- */
-static struct clk gpt11_fck = {
- .name = "gpt11_fck",
- .parent = &sys_clkin_ck,
- .clksel = dmt1_clk_mux_sel,
- .init = &omap2_init_clksel_parent,
- .clksel_reg = OMAP4430_CM_L4PER_DMTIMER11_CLKCTRL,
- .clksel_mask = OMAP4430_CLKSEL_MASK,
- .ops = &clkops_omap2_dflt,
- .recalc = &omap2_clksel_recalc,
- .enable_reg = OMAP4430_CM_L4PER_DMTIMER11_CLKCTRL,
- .enable_bit = OMAP4430_MODULEMODE_SWCTRL,
- .clkdm_name = "l4_per_clkdm",
-};
-
-/*
- * Merged cm2_dm2_mux into gptimer2
- * gptimer2 renamed temporarily into gpt2 to match OMAP3 convention
- */
-static struct clk gpt2_fck = {
- .name = "gpt2_fck",
- .parent = &sys_clkin_ck,
- .clksel = dmt1_clk_mux_sel,
- .init = &omap2_init_clksel_parent,
- .clksel_reg = OMAP4430_CM_L4PER_DMTIMER2_CLKCTRL,
- .clksel_mask = OMAP4430_CLKSEL_MASK,
- .ops = &clkops_omap2_dflt,
- .recalc = &omap2_clksel_recalc,
- .enable_reg = OMAP4430_CM_L4PER_DMTIMER2_CLKCTRL,
- .enable_bit = OMAP4430_MODULEMODE_SWCTRL,
- .clkdm_name = "l4_per_clkdm",
-};
-
-/*
- * Merged cm2_dm3_mux into gptimer3
- * gptimer3 renamed temporarily into gpt3 to match OMAP3 convention
- */
-static struct clk gpt3_fck = {
- .name = "gpt3_fck",
- .parent = &sys_clkin_ck,
- .clksel = dmt1_clk_mux_sel,
- .init = &omap2_init_clksel_parent,
- .clksel_reg = OMAP4430_CM_L4PER_DMTIMER3_CLKCTRL,
- .clksel_mask = OMAP4430_CLKSEL_MASK,
- .ops = &clkops_omap2_dflt,
- .recalc = &omap2_clksel_recalc,
- .enable_reg = OMAP4430_CM_L4PER_DMTIMER3_CLKCTRL,
- .enable_bit = OMAP4430_MODULEMODE_SWCTRL,
- .clkdm_name = "l4_per_clkdm",
-};
-
-/*
- * Merged cm2_dm4_mux into gptimer4
- * gptimer4 renamed temporarily into gpt4 to match OMAP3 convention
- */
-static struct clk gpt4_fck = {
- .name = "gpt4_fck",
- .parent = &sys_clkin_ck,
- .clksel = dmt1_clk_mux_sel,
- .init = &omap2_init_clksel_parent,
- .clksel_reg = OMAP4430_CM_L4PER_DMTIMER4_CLKCTRL,
- .clksel_mask = OMAP4430_CLKSEL_MASK,
- .ops = &clkops_omap2_dflt,
- .recalc = &omap2_clksel_recalc,
- .enable_reg = OMAP4430_CM_L4PER_DMTIMER4_CLKCTRL,
- .enable_bit = OMAP4430_MODULEMODE_SWCTRL,
- .clkdm_name = "l4_per_clkdm",
-};
-
-static const struct clksel timer5_sync_mux_sel[] = {
- { .parent = &syc_clk_div_ck, .rates = div_1_0_rates },
- { .parent = &sys_32k_ck, .rates = div_1_1_rates },
+static const struct clksel sgx_clk_mux_sel[] = {
+ { .parent = &dpll_core_m7_ck, .rates = div_1_0_rates },
+ { .parent = &dpll_per_m7_ck, .rates = div_1_1_rates },
{ .parent = NULL },
};
-/*
- * Merged timer5_sync_mux into gptimer5
- * gptimer5 renamed temporarily into gpt5 to match OMAP3 convention
- */
-static struct clk gpt5_fck = {
- .name = "gpt5_fck",
- .parent = &syc_clk_div_ck,
- .clksel = timer5_sync_mux_sel,
- .init = &omap2_init_clksel_parent,
- .clksel_reg = OMAP4430_CM1_ABE_TIMER5_CLKCTRL,
- .clksel_mask = OMAP4430_CLKSEL_MASK,
- .ops = &clkops_omap2_dflt,
- .recalc = &omap2_clksel_recalc,
- .enable_reg = OMAP4430_CM1_ABE_TIMER5_CLKCTRL,
- .enable_bit = OMAP4430_MODULEMODE_SWCTRL,
- .clkdm_name = "abe_clkdm",
-};
-
-/*
- * Merged timer6_sync_mux into gptimer6
- * gptimer6 renamed temporarily into gpt6 to match OMAP3 convention
- */
-static struct clk gpt6_fck = {
- .name = "gpt6_fck",
- .parent = &syc_clk_div_ck,
- .clksel = timer5_sync_mux_sel,
- .init = &omap2_init_clksel_parent,
- .clksel_reg = OMAP4430_CM1_ABE_TIMER6_CLKCTRL,
- .clksel_mask = OMAP4430_CLKSEL_MASK,
- .ops = &clkops_omap2_dflt,
- .recalc = &omap2_clksel_recalc,
- .enable_reg = OMAP4430_CM1_ABE_TIMER6_CLKCTRL,
- .enable_bit = OMAP4430_MODULEMODE_SWCTRL,
- .clkdm_name = "abe_clkdm",
-};
-
-/*
- * Merged timer7_sync_mux into gptimer7
- * gptimer7 renamed temporarily into gpt7 to match OMAP3 convention
- */
-static struct clk gpt7_fck = {
- .name = "gpt7_fck",
- .parent = &syc_clk_div_ck,
- .clksel = timer5_sync_mux_sel,
- .init = &omap2_init_clksel_parent,
- .clksel_reg = OMAP4430_CM1_ABE_TIMER7_CLKCTRL,
- .clksel_mask = OMAP4430_CLKSEL_MASK,
- .ops = &clkops_omap2_dflt,
- .recalc = &omap2_clksel_recalc,
- .enable_reg = OMAP4430_CM1_ABE_TIMER7_CLKCTRL,
- .enable_bit = OMAP4430_MODULEMODE_SWCTRL,
- .clkdm_name = "abe_clkdm",
-};
-
-/*
- * Merged timer8_sync_mux into gptimer8
- * gptimer8 renamed temporarily into gpt8 to match OMAP3 convention
- */
-static struct clk gpt8_fck = {
- .name = "gpt8_fck",
- .parent = &syc_clk_div_ck,
- .clksel = timer5_sync_mux_sel,
- .init = &omap2_init_clksel_parent,
- .clksel_reg = OMAP4430_CM1_ABE_TIMER8_CLKCTRL,
- .clksel_mask = OMAP4430_CLKSEL_MASK,
- .ops = &clkops_omap2_dflt,
- .recalc = &omap2_clksel_recalc,
- .enable_reg = OMAP4430_CM1_ABE_TIMER8_CLKCTRL,
- .enable_bit = OMAP4430_MODULEMODE_SWCTRL,
- .clkdm_name = "abe_clkdm",
-};
-
-/*
- * Merged cm2_dm9_mux into gptimer9
- * gptimer9 renamed temporarily into gpt9 to match OMAP3 convention
- */
-static struct clk gpt9_fck = {
- .name = "gpt9_fck",
- .parent = &sys_clkin_ck,
- .clksel = dmt1_clk_mux_sel,
+/* Merged sgx_clk_mux into gpu */
+static struct clk gpu_fck = {
+ .name = "gpu_fck",
+ .parent = &dpll_core_m7_ck,
+ .clksel = sgx_clk_mux_sel,
.init = &omap2_init_clksel_parent,
- .clksel_reg = OMAP4430_CM_L4PER_DMTIMER9_CLKCTRL,
- .clksel_mask = OMAP4430_CLKSEL_MASK,
+ .clksel_reg = OMAP4430_CM_GFX_GFX_CLKCTRL,
+ .clksel_mask = OMAP4430_CLKSEL_SGX_FCLK_MASK,
.ops = &clkops_omap2_dflt,
.recalc = &omap2_clksel_recalc,
- .enable_reg = OMAP4430_CM_L4PER_DMTIMER9_CLKCTRL,
+ .enable_reg = OMAP4430_CM_GFX_GFX_CLKCTRL,
.enable_bit = OMAP4430_MODULEMODE_SWCTRL,
- .clkdm_name = "l4_per_clkdm",
+ .clkdm_name = "l3_gfx_clkdm",
};
static struct clk hdq1w_fck = {
@@ -1735,11 +1632,16 @@ static struct clk hdq1w_fck = {
.recalc = &followparent_recalc,
};
+static const struct clksel hsi_fclk_div[] = {
+ { .parent = &dpll_per_m2x2_ck, .rates = div3_1to4_rates },
+ { .parent = NULL },
+};
+
/* Merged hsi_fclk into hsi */
-static struct clk hsi_ick = {
- .name = "hsi_ick",
+static struct clk hsi_fck = {
+ .name = "hsi_fck",
.parent = &dpll_per_m2x2_ck,
- .clksel = per_sgx_fclk_div,
+ .clksel = hsi_fclk_div,
.clksel_reg = OMAP4430_CM_L3INIT_HSI_CLKCTRL,
.clksel_mask = OMAP4430_CLKSEL_24_25_MASK,
.ops = &clkops_omap2_dflt,
@@ -1791,6 +1693,26 @@ static struct clk i2c4_fck = {
.recalc = &followparent_recalc,
};
+static struct clk ipu_fck = {
+ .name = "ipu_fck",
+ .ops = &clkops_omap2_dflt,
+ .enable_reg = OMAP4430_CM_DUCATI_DUCATI_CLKCTRL,
+ .enable_bit = OMAP4430_MODULEMODE_HWCTRL,
+ .clkdm_name = "ducati_clkdm",
+ .parent = &ducati_clk_mux_ck,
+ .recalc = &followparent_recalc,
+};
+
+static struct clk iss_ctrlclk = {
+ .name = "iss_ctrlclk",
+ .ops = &clkops_omap2_dflt,
+ .enable_reg = OMAP4430_CM_CAM_ISS_CLKCTRL,
+ .enable_bit = OMAP4430_OPTFCLKEN_CTRLCLK_SHIFT,
+ .clkdm_name = "iss_clkdm",
+ .parent = &func_96m_fclk,
+ .recalc = &followparent_recalc,
+};
+
static struct clk iss_fck = {
.name = "iss_fck",
.ops = &clkops_omap2_dflt,
@@ -1801,8 +1723,8 @@ static struct clk iss_fck = {
.recalc = &followparent_recalc,
};
-static struct clk ivahd_ick = {
- .name = "ivahd_ick",
+static struct clk iva_fck = {
+ .name = "iva_fck",
.ops = &clkops_omap2_dflt,
.enable_reg = OMAP4430_CM_IVAHD_IVAHD_CLKCTRL,
.enable_bit = OMAP4430_MODULEMODE_HWCTRL,
@@ -1811,8 +1733,8 @@ static struct clk ivahd_ick = {
.recalc = &followparent_recalc,
};
-static struct clk keyboard_fck = {
- .name = "keyboard_fck",
+static struct clk kbd_fck = {
+ .name = "kbd_fck",
.ops = &clkops_omap2_dflt,
.enable_reg = OMAP4430_CM_WKUP_KEYBOARD_CLKCTRL,
.enable_bit = OMAP4430_MODULEMODE_SWCTRL,
@@ -1821,8 +1743,8 @@ static struct clk keyboard_fck = {
.recalc = &followparent_recalc,
};
-static struct clk l3_instr_interconnect_ick = {
- .name = "l3_instr_interconnect_ick",
+static struct clk l3_instr_ick = {
+ .name = "l3_instr_ick",
.ops = &clkops_omap2_dflt,
.enable_reg = OMAP4430_CM_L3INSTR_L3_INSTR_CLKCTRL,
.enable_bit = OMAP4430_MODULEMODE_HWCTRL,
@@ -1831,8 +1753,8 @@ static struct clk l3_instr_interconnect_ick = {
.recalc = &followparent_recalc,
};
-static struct clk l3_interconnect_3_ick = {
- .name = "l3_interconnect_3_ick",
+static struct clk l3_main_3_ick = {
+ .name = "l3_main_3_ick",
.ops = &clkops_omap2_dflt,
.enable_reg = OMAP4430_CM_L3INSTR_L3_3_CLKCTRL,
.enable_bit = OMAP4430_MODULEMODE_HWCTRL,
@@ -2005,6 +1927,16 @@ static struct clk mcbsp4_fck = {
.clkdm_name = "l4_per_clkdm",
};
+static struct clk mcpdm_fck = {
+ .name = "mcpdm_fck",
+ .ops = &clkops_omap2_dflt,
+ .enable_reg = OMAP4430_CM1_ABE_PDM_CLKCTRL,
+ .enable_bit = OMAP4430_MODULEMODE_SWCTRL,
+ .clkdm_name = "abe_clkdm",
+ .parent = &pad_clks_ck,
+ .recalc = &followparent_recalc,
+};
+
static struct clk mcspi1_fck = {
.name = "mcspi1_fck",
.ops = &clkops_omap2_dflt,
@@ -2105,33 +2037,33 @@ static struct clk mmc5_fck = {
.recalc = &followparent_recalc,
};
-static struct clk ocp_wp1_ick = {
- .name = "ocp_wp1_ick",
+static struct clk ocp2scp_usb_phy_phy_48m = {
+ .name = "ocp2scp_usb_phy_phy_48m",
.ops = &clkops_omap2_dflt,
- .enable_reg = OMAP4430_CM_L3INSTR_OCP_WP1_CLKCTRL,
- .enable_bit = OMAP4430_MODULEMODE_HWCTRL,
- .clkdm_name = "l3_instr_clkdm",
- .parent = &l3_div_ck,
+ .enable_reg = OMAP4430_CM_L3INIT_USBPHYOCP2SCP_CLKCTRL,
+ .enable_bit = OMAP4430_OPTFCLKEN_PHY_48M_SHIFT,
+ .clkdm_name = "l3_init_clkdm",
+ .parent = &func_48m_fclk,
.recalc = &followparent_recalc,
};
-static struct clk pdm_fck = {
- .name = "pdm_fck",
+static struct clk ocp2scp_usb_phy_ick = {
+ .name = "ocp2scp_usb_phy_ick",
.ops = &clkops_omap2_dflt,
- .enable_reg = OMAP4430_CM1_ABE_PDM_CLKCTRL,
- .enable_bit = OMAP4430_MODULEMODE_SWCTRL,
- .clkdm_name = "abe_clkdm",
- .parent = &pad_clks_ck,
+ .enable_reg = OMAP4430_CM_L3INIT_USBPHYOCP2SCP_CLKCTRL,
+ .enable_bit = OMAP4430_MODULEMODE_HWCTRL,
+ .clkdm_name = "l3_init_clkdm",
+ .parent = &l4_div_ck,
.recalc = &followparent_recalc,
};
-static struct clk pkaeip29_fck = {
- .name = "pkaeip29_fck",
+static struct clk ocp_wp_noc_ick = {
+ .name = "ocp_wp_noc_ick",
.ops = &clkops_omap2_dflt,
- .enable_reg = OMAP4430_CM_L4SEC_PKAEIP29_CLKCTRL,
- .enable_bit = OMAP4430_MODULEMODE_SWCTRL,
- .clkdm_name = "l4_secure_clkdm",
- .parent = &l4_div_ck,
+ .enable_reg = OMAP4430_CM_L3INSTR_OCP_WP1_CLKCTRL,
+ .enable_bit = OMAP4430_MODULEMODE_HWCTRL,
+ .clkdm_name = "l3_instr_clkdm",
+ .parent = &l3_div_ck,
.recalc = &followparent_recalc,
};
@@ -2145,8 +2077,8 @@ static struct clk rng_ick = {
.recalc = &followparent_recalc,
};
-static struct clk sha2md51_fck = {
- .name = "sha2md51_fck",
+static struct clk sha2md5_fck = {
+ .name = "sha2md5_fck",
.ops = &clkops_omap2_dflt,
.enable_reg = OMAP4430_CM_L4SEC_SHA2MD51_CLKCTRL,
.enable_bit = OMAP4430_MODULEMODE_SWCTRL,
@@ -2155,8 +2087,8 @@ static struct clk sha2md51_fck = {
.recalc = &followparent_recalc,
};
-static struct clk sl2_ick = {
- .name = "sl2_ick",
+static struct clk sl2if_ick = {
+ .name = "sl2if_ick",
.ops = &clkops_omap2_dflt,
.enable_reg = OMAP4430_CM_IVAHD_SL2_CLKCTRL,
.enable_bit = OMAP4430_MODULEMODE_HWCTRL,
@@ -2165,6 +2097,46 @@ static struct clk sl2_ick = {
.recalc = &followparent_recalc,
};
+static struct clk slimbus1_fclk_1 = {
+ .name = "slimbus1_fclk_1",
+ .ops = &clkops_omap2_dflt,
+ .enable_reg = OMAP4430_CM1_ABE_SLIMBUS_CLKCTRL,
+ .enable_bit = OMAP4430_OPTFCLKEN_FCLK1_SHIFT,
+ .clkdm_name = "abe_clkdm",
+ .parent = &func_24m_clk,
+ .recalc = &followparent_recalc,
+};
+
+static struct clk slimbus1_fclk_0 = {
+ .name = "slimbus1_fclk_0",
+ .ops = &clkops_omap2_dflt,
+ .enable_reg = OMAP4430_CM1_ABE_SLIMBUS_CLKCTRL,
+ .enable_bit = OMAP4430_OPTFCLKEN_FCLK0_SHIFT,
+ .clkdm_name = "abe_clkdm",
+ .parent = &abe_24m_fclk,
+ .recalc = &followparent_recalc,
+};
+
+static struct clk slimbus1_fclk_2 = {
+ .name = "slimbus1_fclk_2",
+ .ops = &clkops_omap2_dflt,
+ .enable_reg = OMAP4430_CM1_ABE_SLIMBUS_CLKCTRL,
+ .enable_bit = OMAP4430_OPTFCLKEN_FCLK2_SHIFT,
+ .clkdm_name = "abe_clkdm",
+ .parent = &pad_clks_ck,
+ .recalc = &followparent_recalc,
+};
+
+static struct clk slimbus1_slimbus_clk = {
+ .name = "slimbus1_slimbus_clk",
+ .ops = &clkops_omap2_dflt,
+ .enable_reg = OMAP4430_CM1_ABE_SLIMBUS_CLKCTRL,
+ .enable_bit = OMAP4430_OPTFCLKEN_SLIMBUS_CLK_11_11_SHIFT,
+ .clkdm_name = "abe_clkdm",
+ .parent = &slimbus_clk,
+ .recalc = &followparent_recalc,
+};
+
static struct clk slimbus1_fck = {
.name = "slimbus1_fck",
.ops = &clkops_omap2_dflt,
@@ -2175,6 +2147,36 @@ static struct clk slimbus1_fck = {
.recalc = &followparent_recalc,
};
+static struct clk slimbus2_fclk_1 = {
+ .name = "slimbus2_fclk_1",
+ .ops = &clkops_omap2_dflt,
+ .enable_reg = OMAP4430_CM_L4PER_SLIMBUS2_CLKCTRL,
+ .enable_bit = OMAP4430_OPTFCLKEN_PERABE24M_GFCLK_SHIFT,
+ .clkdm_name = "l4_per_clkdm",
+ .parent = &per_abe_24m_fclk,
+ .recalc = &followparent_recalc,
+};
+
+static struct clk slimbus2_fclk_0 = {
+ .name = "slimbus2_fclk_0",
+ .ops = &clkops_omap2_dflt,
+ .enable_reg = OMAP4430_CM_L4PER_SLIMBUS2_CLKCTRL,
+ .enable_bit = OMAP4430_OPTFCLKEN_PER24MC_GFCLK_SHIFT,
+ .clkdm_name = "l4_per_clkdm",
+ .parent = &func_24mc_fclk,
+ .recalc = &followparent_recalc,
+};
+
+static struct clk slimbus2_slimbus_clk = {
+ .name = "slimbus2_slimbus_clk",
+ .ops = &clkops_omap2_dflt,
+ .enable_reg = OMAP4430_CM_L4PER_SLIMBUS2_CLKCTRL,
+ .enable_bit = OMAP4430_OPTFCLKEN_SLIMBUS_CLK_SHIFT,
+ .clkdm_name = "l4_per_clkdm",
+ .parent = &pad_slimbus_core_clks_ck,
+ .recalc = &followparent_recalc,
+};
+
static struct clk slimbus2_fck = {
.name = "slimbus2_fck",
.ops = &clkops_omap2_dflt,
@@ -2185,8 +2187,8 @@ static struct clk slimbus2_fck = {
.recalc = &followparent_recalc,
};
-static struct clk sr_core_fck = {
- .name = "sr_core_fck",
+static struct clk smartreflex_core_fck = {
+ .name = "smartreflex_core_fck",
.ops = &clkops_omap2_dflt,
.enable_reg = OMAP4430_CM_ALWON_SR_CORE_CLKCTRL,
.enable_bit = OMAP4430_MODULEMODE_SWCTRL,
@@ -2195,8 +2197,8 @@ static struct clk sr_core_fck = {
.recalc = &followparent_recalc,
};
-static struct clk sr_iva_fck = {
- .name = "sr_iva_fck",
+static struct clk smartreflex_iva_fck = {
+ .name = "smartreflex_iva_fck",
.ops = &clkops_omap2_dflt,
.enable_reg = OMAP4430_CM_ALWON_SR_IVA_CLKCTRL,
.enable_bit = OMAP4430_MODULEMODE_SWCTRL,
@@ -2205,8 +2207,8 @@ static struct clk sr_iva_fck = {
.recalc = &followparent_recalc,
};
-static struct clk sr_mpu_fck = {
- .name = "sr_mpu_fck",
+static struct clk smartreflex_mpu_fck = {
+ .name = "smartreflex_mpu_fck",
.ops = &clkops_omap2_dflt,
.enable_reg = OMAP4430_CM_ALWON_SR_MPU_CLKCTRL,
.enable_bit = OMAP4430_MODULEMODE_SWCTRL,
@@ -2215,14 +2217,175 @@ static struct clk sr_mpu_fck = {
.recalc = &followparent_recalc,
};
-static struct clk tesla_ick = {
- .name = "tesla_ick",
+/* Merged dmt1_clk_mux into timer1 */
+static struct clk timer1_fck = {
+ .name = "timer1_fck",
+ .parent = &sys_clkin_ck,
+ .clksel = abe_dpll_bypass_clk_mux_sel,
+ .init = &omap2_init_clksel_parent,
+ .clksel_reg = OMAP4430_CM_WKUP_TIMER1_CLKCTRL,
+ .clksel_mask = OMAP4430_CLKSEL_MASK,
.ops = &clkops_omap2_dflt,
- .enable_reg = OMAP4430_CM_TESLA_TESLA_CLKCTRL,
- .enable_bit = OMAP4430_MODULEMODE_HWCTRL,
- .clkdm_name = "tesla_clkdm",
- .parent = &dpll_iva_m4_ck,
- .recalc = &followparent_recalc,
+ .recalc = &omap2_clksel_recalc,
+ .enable_reg = OMAP4430_CM_WKUP_TIMER1_CLKCTRL,
+ .enable_bit = OMAP4430_MODULEMODE_SWCTRL,
+ .clkdm_name = "l4_wkup_clkdm",
+};
+
+/* Merged cm2_dm10_mux into timer10 */
+static struct clk timer10_fck = {
+ .name = "timer10_fck",
+ .parent = &sys_clkin_ck,
+ .clksel = abe_dpll_bypass_clk_mux_sel,
+ .init = &omap2_init_clksel_parent,
+ .clksel_reg = OMAP4430_CM_L4PER_DMTIMER10_CLKCTRL,
+ .clksel_mask = OMAP4430_CLKSEL_MASK,
+ .ops = &clkops_omap2_dflt,
+ .recalc = &omap2_clksel_recalc,
+ .enable_reg = OMAP4430_CM_L4PER_DMTIMER10_CLKCTRL,
+ .enable_bit = OMAP4430_MODULEMODE_SWCTRL,
+ .clkdm_name = "l4_per_clkdm",
+};
+
+/* Merged cm2_dm11_mux into timer11 */
+static struct clk timer11_fck = {
+ .name = "timer11_fck",
+ .parent = &sys_clkin_ck,
+ .clksel = abe_dpll_bypass_clk_mux_sel,
+ .init = &omap2_init_clksel_parent,
+ .clksel_reg = OMAP4430_CM_L4PER_DMTIMER11_CLKCTRL,
+ .clksel_mask = OMAP4430_CLKSEL_MASK,
+ .ops = &clkops_omap2_dflt,
+ .recalc = &omap2_clksel_recalc,
+ .enable_reg = OMAP4430_CM_L4PER_DMTIMER11_CLKCTRL,
+ .enable_bit = OMAP4430_MODULEMODE_SWCTRL,
+ .clkdm_name = "l4_per_clkdm",
+};
+
+/* Merged cm2_dm2_mux into timer2 */
+static struct clk timer2_fck = {
+ .name = "timer2_fck",
+ .parent = &sys_clkin_ck,
+ .clksel = abe_dpll_bypass_clk_mux_sel,
+ .init = &omap2_init_clksel_parent,
+ .clksel_reg = OMAP4430_CM_L4PER_DMTIMER2_CLKCTRL,
+ .clksel_mask = OMAP4430_CLKSEL_MASK,
+ .ops = &clkops_omap2_dflt,
+ .recalc = &omap2_clksel_recalc,
+ .enable_reg = OMAP4430_CM_L4PER_DMTIMER2_CLKCTRL,
+ .enable_bit = OMAP4430_MODULEMODE_SWCTRL,
+ .clkdm_name = "l4_per_clkdm",
+};
+
+/* Merged cm2_dm3_mux into timer3 */
+static struct clk timer3_fck = {
+ .name = "timer3_fck",
+ .parent = &sys_clkin_ck,
+ .clksel = abe_dpll_bypass_clk_mux_sel,
+ .init = &omap2_init_clksel_parent,
+ .clksel_reg = OMAP4430_CM_L4PER_DMTIMER3_CLKCTRL,
+ .clksel_mask = OMAP4430_CLKSEL_MASK,
+ .ops = &clkops_omap2_dflt,
+ .recalc = &omap2_clksel_recalc,
+ .enable_reg = OMAP4430_CM_L4PER_DMTIMER3_CLKCTRL,
+ .enable_bit = OMAP4430_MODULEMODE_SWCTRL,
+ .clkdm_name = "l4_per_clkdm",
+};
+
+/* Merged cm2_dm4_mux into timer4 */
+static struct clk timer4_fck = {
+ .name = "timer4_fck",
+ .parent = &sys_clkin_ck,
+ .clksel = abe_dpll_bypass_clk_mux_sel,
+ .init = &omap2_init_clksel_parent,
+ .clksel_reg = OMAP4430_CM_L4PER_DMTIMER4_CLKCTRL,
+ .clksel_mask = OMAP4430_CLKSEL_MASK,
+ .ops = &clkops_omap2_dflt,
+ .recalc = &omap2_clksel_recalc,
+ .enable_reg = OMAP4430_CM_L4PER_DMTIMER4_CLKCTRL,
+ .enable_bit = OMAP4430_MODULEMODE_SWCTRL,
+ .clkdm_name = "l4_per_clkdm",
+};
+
+static const struct clksel timer5_sync_mux_sel[] = {
+ { .parent = &syc_clk_div_ck, .rates = div_1_0_rates },
+ { .parent = &sys_32k_ck, .rates = div_1_1_rates },
+ { .parent = NULL },
+};
+
+/* Merged timer5_sync_mux into timer5 */
+static struct clk timer5_fck = {
+ .name = "timer5_fck",
+ .parent = &syc_clk_div_ck,
+ .clksel = timer5_sync_mux_sel,
+ .init = &omap2_init_clksel_parent,
+ .clksel_reg = OMAP4430_CM1_ABE_TIMER5_CLKCTRL,
+ .clksel_mask = OMAP4430_CLKSEL_MASK,
+ .ops = &clkops_omap2_dflt,
+ .recalc = &omap2_clksel_recalc,
+ .enable_reg = OMAP4430_CM1_ABE_TIMER5_CLKCTRL,
+ .enable_bit = OMAP4430_MODULEMODE_SWCTRL,
+ .clkdm_name = "abe_clkdm",
+};
+
+/* Merged timer6_sync_mux into timer6 */
+static struct clk timer6_fck = {
+ .name = "timer6_fck",
+ .parent = &syc_clk_div_ck,
+ .clksel = timer5_sync_mux_sel,
+ .init = &omap2_init_clksel_parent,
+ .clksel_reg = OMAP4430_CM1_ABE_TIMER6_CLKCTRL,
+ .clksel_mask = OMAP4430_CLKSEL_MASK,
+ .ops = &clkops_omap2_dflt,
+ .recalc = &omap2_clksel_recalc,
+ .enable_reg = OMAP4430_CM1_ABE_TIMER6_CLKCTRL,
+ .enable_bit = OMAP4430_MODULEMODE_SWCTRL,
+ .clkdm_name = "abe_clkdm",
+};
+
+/* Merged timer7_sync_mux into timer7 */
+static struct clk timer7_fck = {
+ .name = "timer7_fck",
+ .parent = &syc_clk_div_ck,
+ .clksel = timer5_sync_mux_sel,
+ .init = &omap2_init_clksel_parent,
+ .clksel_reg = OMAP4430_CM1_ABE_TIMER7_CLKCTRL,
+ .clksel_mask = OMAP4430_CLKSEL_MASK,
+ .ops = &clkops_omap2_dflt,
+ .recalc = &omap2_clksel_recalc,
+ .enable_reg = OMAP4430_CM1_ABE_TIMER7_CLKCTRL,
+ .enable_bit = OMAP4430_MODULEMODE_SWCTRL,
+ .clkdm_name = "abe_clkdm",
+};
+
+/* Merged timer8_sync_mux into timer8 */
+static struct clk timer8_fck = {
+ .name = "timer8_fck",
+ .parent = &syc_clk_div_ck,
+ .clksel = timer5_sync_mux_sel,
+ .init = &omap2_init_clksel_parent,
+ .clksel_reg = OMAP4430_CM1_ABE_TIMER8_CLKCTRL,
+ .clksel_mask = OMAP4430_CLKSEL_MASK,
+ .ops = &clkops_omap2_dflt,
+ .recalc = &omap2_clksel_recalc,
+ .enable_reg = OMAP4430_CM1_ABE_TIMER8_CLKCTRL,
+ .enable_bit = OMAP4430_MODULEMODE_SWCTRL,
+ .clkdm_name = "abe_clkdm",
+};
+
+/* Merged cm2_dm9_mux into timer9 */
+static struct clk timer9_fck = {
+ .name = "timer9_fck",
+ .parent = &sys_clkin_ck,
+ .clksel = abe_dpll_bypass_clk_mux_sel,
+ .init = &omap2_init_clksel_parent,
+ .clksel_reg = OMAP4430_CM_L4PER_DMTIMER9_CLKCTRL,
+ .clksel_mask = OMAP4430_CLKSEL_MASK,
+ .ops = &clkops_omap2_dflt,
+ .recalc = &omap2_clksel_recalc,
+ .enable_reg = OMAP4430_CM_L4PER_DMTIMER9_CLKCTRL,
+ .enable_bit = OMAP4430_MODULEMODE_SWCTRL,
+ .clkdm_name = "l4_per_clkdm",
};
static struct clk uart1_fck = {
@@ -2265,105 +2428,148 @@ static struct clk uart4_fck = {
.recalc = &followparent_recalc,
};
-static struct clk unipro1_fck = {
- .name = "unipro1_fck",
+static struct clk usb_host_fs_fck = {
+ .name = "usb_host_fs_fck",
.ops = &clkops_omap2_dflt,
- .enable_reg = OMAP4430_CM_L3INIT_UNIPRO1_CLKCTRL,
+ .enable_reg = OMAP4430_CM_L3INIT_USB_HOST_FS_CLKCTRL,
.enable_bit = OMAP4430_MODULEMODE_SWCTRL,
.clkdm_name = "l3_init_clkdm",
- .parent = &func_96m_fclk,
+ .parent = &func_48mc_fclk,
.recalc = &followparent_recalc,
};
-static struct clk usb_host_fck = {
- .name = "usb_host_fck",
+static struct clk usb_host_hs_utmi_p3_clk = {
+ .name = "usb_host_hs_utmi_p3_clk",
.ops = &clkops_omap2_dflt,
.enable_reg = OMAP4430_CM_L3INIT_USB_HOST_CLKCTRL,
- .enable_bit = OMAP4430_MODULEMODE_SWCTRL,
+ .enable_bit = OMAP4430_OPTFCLKEN_UTMI_P3_CLK_SHIFT,
.clkdm_name = "l3_init_clkdm",
.parent = &init_60m_fclk,
.recalc = &followparent_recalc,
};
-static struct clk usb_host_fs_fck = {
- .name = "usb_host_fs_fck",
+static struct clk usb_host_hs_hsic60m_p1_clk = {
+ .name = "usb_host_hs_hsic60m_p1_clk",
.ops = &clkops_omap2_dflt,
- .enable_reg = OMAP4430_CM_L3INIT_USB_HOST_FS_CLKCTRL,
- .enable_bit = OMAP4430_MODULEMODE_SWCTRL,
+ .enable_reg = OMAP4430_CM_L3INIT_USB_HOST_CLKCTRL,
+ .enable_bit = OMAP4430_OPTFCLKEN_HSIC60M_P1_CLK_SHIFT,
.clkdm_name = "l3_init_clkdm",
- .parent = &func_48mc_fclk,
+ .parent = &init_60m_fclk,
.recalc = &followparent_recalc,
};
-static struct clk usb_otg_ick = {
- .name = "usb_otg_ick",
+static struct clk usb_host_hs_hsic60m_p2_clk = {
+ .name = "usb_host_hs_hsic60m_p2_clk",
.ops = &clkops_omap2_dflt,
- .enable_reg = OMAP4430_CM_L3INIT_USB_OTG_CLKCTRL,
- .enable_bit = OMAP4430_MODULEMODE_HWCTRL,
+ .enable_reg = OMAP4430_CM_L3INIT_USB_HOST_CLKCTRL,
+ .enable_bit = OMAP4430_OPTFCLKEN_HSIC60M_P2_CLK_SHIFT,
.clkdm_name = "l3_init_clkdm",
- .parent = &l3_div_ck,
+ .parent = &init_60m_fclk,
.recalc = &followparent_recalc,
};
-static struct clk usb_tll_ick = {
- .name = "usb_tll_ick",
+static const struct clksel utmi_p1_gfclk_sel[] = {
+ { .parent = &init_60m_fclk, .rates = div_1_0_rates },
+ { .parent = &xclk60mhsp1_ck, .rates = div_1_1_rates },
+ { .parent = NULL },
+};
+
+static struct clk utmi_p1_gfclk = {
+ .name = "utmi_p1_gfclk",
+ .parent = &init_60m_fclk,
+ .clksel = utmi_p1_gfclk_sel,
+ .init = &omap2_init_clksel_parent,
+ .clksel_reg = OMAP4430_CM_L3INIT_USB_HOST_CLKCTRL,
+ .clksel_mask = OMAP4430_CLKSEL_UTMI_P1_MASK,
+ .ops = &clkops_null,
+ .recalc = &omap2_clksel_recalc,
+};
+
+static struct clk usb_host_hs_utmi_p1_clk = {
+ .name = "usb_host_hs_utmi_p1_clk",
.ops = &clkops_omap2_dflt,
- .enable_reg = OMAP4430_CM_L3INIT_USB_TLL_CLKCTRL,
- .enable_bit = OMAP4430_MODULEMODE_HWCTRL,
+ .enable_reg = OMAP4430_CM_L3INIT_USB_HOST_CLKCTRL,
+ .enable_bit = OMAP4430_OPTFCLKEN_UTMI_P1_CLK_SHIFT,
.clkdm_name = "l3_init_clkdm",
- .parent = &l4_div_ck,
+ .parent = &utmi_p1_gfclk,
.recalc = &followparent_recalc,
};
-static struct clk usbphyocp2scp_ick = {
- .name = "usbphyocp2scp_ick",
+static const struct clksel utmi_p2_gfclk_sel[] = {
+ { .parent = &init_60m_fclk, .rates = div_1_0_rates },
+ { .parent = &xclk60mhsp2_ck, .rates = div_1_1_rates },
+ { .parent = NULL },
+};
+
+static struct clk utmi_p2_gfclk = {
+ .name = "utmi_p2_gfclk",
+ .parent = &init_60m_fclk,
+ .clksel = utmi_p2_gfclk_sel,
+ .init = &omap2_init_clksel_parent,
+ .clksel_reg = OMAP4430_CM_L3INIT_USB_HOST_CLKCTRL,
+ .clksel_mask = OMAP4430_CLKSEL_UTMI_P2_MASK,
+ .ops = &clkops_null,
+ .recalc = &omap2_clksel_recalc,
+};
+
+static struct clk usb_host_hs_utmi_p2_clk = {
+ .name = "usb_host_hs_utmi_p2_clk",
.ops = &clkops_omap2_dflt,
- .enable_reg = OMAP4430_CM_L3INIT_USBPHYOCP2SCP_CLKCTRL,
- .enable_bit = OMAP4430_MODULEMODE_HWCTRL,
+ .enable_reg = OMAP4430_CM_L3INIT_USB_HOST_CLKCTRL,
+ .enable_bit = OMAP4430_OPTFCLKEN_UTMI_P2_CLK_SHIFT,
.clkdm_name = "l3_init_clkdm",
- .parent = &l4_div_ck,
+ .parent = &utmi_p2_gfclk,
.recalc = &followparent_recalc,
};
-static struct clk usim_fck = {
- .name = "usim_fck",
+static struct clk usb_host_hs_hsic480m_p1_clk = {
+ .name = "usb_host_hs_hsic480m_p1_clk",
.ops = &clkops_omap2_dflt,
- .enable_reg = OMAP4430_CM_WKUP_USIM_CLKCTRL,
- .enable_bit = OMAP4430_MODULEMODE_SWCTRL,
- .clkdm_name = "l4_wkup_clkdm",
- .parent = &sys_32k_ck,
+ .enable_reg = OMAP4430_CM_L3INIT_USB_HOST_CLKCTRL,
+ .enable_bit = OMAP4430_OPTFCLKEN_HSIC480M_P1_CLK_SHIFT,
+ .clkdm_name = "l3_init_clkdm",
+ .parent = &dpll_usb_m2_ck,
.recalc = &followparent_recalc,
};
-static struct clk wdt2_fck = {
- .name = "wdt2_fck",
+static struct clk usb_host_hs_hsic480m_p2_clk = {
+ .name = "usb_host_hs_hsic480m_p2_clk",
.ops = &clkops_omap2_dflt,
- .enable_reg = OMAP4430_CM_WKUP_WDT2_CLKCTRL,
- .enable_bit = OMAP4430_MODULEMODE_SWCTRL,
- .clkdm_name = "l4_wkup_clkdm",
- .parent = &sys_32k_ck,
+ .enable_reg = OMAP4430_CM_L3INIT_USB_HOST_CLKCTRL,
+ .enable_bit = OMAP4430_OPTFCLKEN_HSIC480M_P2_CLK_SHIFT,
+ .clkdm_name = "l3_init_clkdm",
+ .parent = &dpll_usb_m2_ck,
.recalc = &followparent_recalc,
};
-static struct clk wdt3_fck = {
- .name = "wdt3_fck",
+static struct clk usb_host_hs_func48mclk = {
+ .name = "usb_host_hs_func48mclk",
.ops = &clkops_omap2_dflt,
- .enable_reg = OMAP4430_CM1_ABE_WDT3_CLKCTRL,
+ .enable_reg = OMAP4430_CM_L3INIT_USB_HOST_CLKCTRL,
+ .enable_bit = OMAP4430_OPTFCLKEN_FUNC48MCLK_SHIFT,
+ .clkdm_name = "l3_init_clkdm",
+ .parent = &func_48mc_fclk,
+ .recalc = &followparent_recalc,
+};
+
+static struct clk usb_host_hs_fck = {
+ .name = "usb_host_hs_fck",
+ .ops = &clkops_omap2_dflt,
+ .enable_reg = OMAP4430_CM_L3INIT_USB_HOST_CLKCTRL,
.enable_bit = OMAP4430_MODULEMODE_SWCTRL,
- .clkdm_name = "abe_clkdm",
- .parent = &sys_32k_ck,
+ .clkdm_name = "l3_init_clkdm",
+ .parent = &init_60m_fclk,
.recalc = &followparent_recalc,
};
-/* Remaining optional clocks */
static const struct clksel otg_60m_gfclk_sel[] = {
{ .parent = &utmi_phy_clkout_ck, .rates = div_1_0_rates },
{ .parent = &xclk60motg_ck, .rates = div_1_1_rates },
{ .parent = NULL },
};
-static struct clk otg_60m_gfclk_ck = {
- .name = "otg_60m_gfclk_ck",
+static struct clk otg_60m_gfclk = {
+ .name = "otg_60m_gfclk",
.parent = &utmi_phy_clkout_ck,
.clksel = otg_60m_gfclk_sel,
.init = &omap2_init_clksel_parent,
@@ -2373,38 +2579,74 @@ static struct clk otg_60m_gfclk_ck = {
.recalc = &omap2_clksel_recalc,
};
-static const struct clksel stm_clk_div_div[] = {
- { .parent = &pmd_stm_clock_mux_ck, .rates = div3_1to4_rates },
- { .parent = NULL },
+static struct clk usb_otg_hs_xclk = {
+ .name = "usb_otg_hs_xclk",
+ .ops = &clkops_omap2_dflt,
+ .enable_reg = OMAP4430_CM_L3INIT_USB_OTG_CLKCTRL,
+ .enable_bit = OMAP4430_OPTFCLKEN_XCLK_SHIFT,
+ .clkdm_name = "l3_init_clkdm",
+ .parent = &otg_60m_gfclk,
+ .recalc = &followparent_recalc,
};
-static struct clk stm_clk_div_ck = {
- .name = "stm_clk_div_ck",
- .parent = &pmd_stm_clock_mux_ck,
- .clksel = stm_clk_div_div,
- .clksel_reg = OMAP4430_CM_EMU_DEBUGSS_CLKCTRL,
- .clksel_mask = OMAP4430_CLKSEL_PMD_STM_CLK_MASK,
- .ops = &clkops_null,
- .recalc = &omap2_clksel_recalc,
- .round_rate = &omap2_clksel_round_rate,
- .set_rate = &omap2_clksel_set_rate,
+static struct clk usb_otg_hs_ick = {
+ .name = "usb_otg_hs_ick",
+ .ops = &clkops_omap2_dflt,
+ .enable_reg = OMAP4430_CM_L3INIT_USB_OTG_CLKCTRL,
+ .enable_bit = OMAP4430_MODULEMODE_HWCTRL,
+ .clkdm_name = "l3_init_clkdm",
+ .parent = &l3_div_ck,
+ .recalc = &followparent_recalc,
};
-static const struct clksel trace_clk_div_div[] = {
- { .parent = &pmd_trace_clk_mux_ck, .rates = div3_1to4_rates },
- { .parent = NULL },
+static struct clk usb_phy_cm_clk32k = {
+ .name = "usb_phy_cm_clk32k",
+ .ops = &clkops_omap2_dflt,
+ .enable_reg = OMAP4430_CM_ALWON_USBPHY_CLKCTRL,
+ .enable_bit = OMAP4430_OPTFCLKEN_CLK32K_SHIFT,
+ .clkdm_name = "l4_ao_clkdm",
+ .parent = &sys_32k_ck,
+ .recalc = &followparent_recalc,
};
-static struct clk trace_clk_div_ck = {
- .name = "trace_clk_div_ck",
- .parent = &pmd_trace_clk_mux_ck,
- .clksel = trace_clk_div_div,
- .clksel_reg = OMAP4430_CM_EMU_DEBUGSS_CLKCTRL,
- .clksel_mask = OMAP4430_CLKSEL_PMD_TRACE_CLK_MASK,
- .ops = &clkops_null,
- .recalc = &omap2_clksel_recalc,
- .round_rate = &omap2_clksel_round_rate,
- .set_rate = &omap2_clksel_set_rate,
+static struct clk usb_tll_hs_usb_ch2_clk = {
+ .name = "usb_tll_hs_usb_ch2_clk",
+ .ops = &clkops_omap2_dflt,
+ .enable_reg = OMAP4430_CM_L3INIT_USB_TLL_CLKCTRL,
+ .enable_bit = OMAP4430_OPTFCLKEN_USB_CH2_CLK_SHIFT,
+ .clkdm_name = "l3_init_clkdm",
+ .parent = &init_60m_fclk,
+ .recalc = &followparent_recalc,
+};
+
+static struct clk usb_tll_hs_usb_ch0_clk = {
+ .name = "usb_tll_hs_usb_ch0_clk",
+ .ops = &clkops_omap2_dflt,
+ .enable_reg = OMAP4430_CM_L3INIT_USB_TLL_CLKCTRL,
+ .enable_bit = OMAP4430_OPTFCLKEN_USB_CH0_CLK_SHIFT,
+ .clkdm_name = "l3_init_clkdm",
+ .parent = &init_60m_fclk,
+ .recalc = &followparent_recalc,
+};
+
+static struct clk usb_tll_hs_usb_ch1_clk = {
+ .name = "usb_tll_hs_usb_ch1_clk",
+ .ops = &clkops_omap2_dflt,
+ .enable_reg = OMAP4430_CM_L3INIT_USB_TLL_CLKCTRL,
+ .enable_bit = OMAP4430_OPTFCLKEN_USB_CH1_CLK_SHIFT,
+ .clkdm_name = "l3_init_clkdm",
+ .parent = &init_60m_fclk,
+ .recalc = &followparent_recalc,
+};
+
+static struct clk usb_tll_hs_ick = {
+ .name = "usb_tll_hs_ick",
+ .ops = &clkops_omap2_dflt,
+ .enable_reg = OMAP4430_CM_L3INIT_USB_TLL_CLKCTRL,
+ .enable_bit = OMAP4430_MODULEMODE_HWCTRL,
+ .clkdm_name = "l3_init_clkdm",
+ .parent = &l4_div_ck,
+ .recalc = &followparent_recalc,
};
static const struct clksel_rate div2_14to18_rates[] = {
@@ -2418,8 +2660,8 @@ static const struct clksel usim_fclk_div[] = {
{ .parent = NULL },
};
-static struct clk usim_fclk = {
- .name = "usim_fclk",
+static struct clk usim_ck = {
+ .name = "usim_ck",
.parent = &dpll_per_m4_ck,
.clksel = usim_fclk_div,
.clksel_reg = OMAP4430_CM_WKUP_USIM_CLKCTRL,
@@ -2430,38 +2672,79 @@ static struct clk usim_fclk = {
.set_rate = &omap2_clksel_set_rate,
};
-static const struct clksel utmi_p1_gfclk_sel[] = {
- { .parent = &init_60m_fclk, .rates = div_1_0_rates },
- { .parent = &xclk60mhsp1_ck, .rates = div_1_1_rates },
+static struct clk usim_fclk = {
+ .name = "usim_fclk",
+ .ops = &clkops_omap2_dflt,
+ .enable_reg = OMAP4430_CM_WKUP_USIM_CLKCTRL,
+ .enable_bit = OMAP4430_OPTFCLKEN_FCLK_SHIFT,
+ .clkdm_name = "l4_wkup_clkdm",
+ .parent = &usim_ck,
+ .recalc = &followparent_recalc,
+};
+
+static struct clk usim_fck = {
+ .name = "usim_fck",
+ .ops = &clkops_omap2_dflt,
+ .enable_reg = OMAP4430_CM_WKUP_USIM_CLKCTRL,
+ .enable_bit = OMAP4430_MODULEMODE_HWCTRL,
+ .clkdm_name = "l4_wkup_clkdm",
+ .parent = &sys_32k_ck,
+ .recalc = &followparent_recalc,
+};
+
+static struct clk wd_timer2_fck = {
+ .name = "wd_timer2_fck",
+ .ops = &clkops_omap2_dflt,
+ .enable_reg = OMAP4430_CM_WKUP_WDT2_CLKCTRL,
+ .enable_bit = OMAP4430_MODULEMODE_SWCTRL,
+ .clkdm_name = "l4_wkup_clkdm",
+ .parent = &sys_32k_ck,
+ .recalc = &followparent_recalc,
+};
+
+static struct clk wd_timer3_fck = {
+ .name = "wd_timer3_fck",
+ .ops = &clkops_omap2_dflt,
+ .enable_reg = OMAP4430_CM1_ABE_WDT3_CLKCTRL,
+ .enable_bit = OMAP4430_MODULEMODE_SWCTRL,
+ .clkdm_name = "abe_clkdm",
+ .parent = &sys_32k_ck,
+ .recalc = &followparent_recalc,
+};
+
+/* Remaining optional clocks */
+static const struct clksel stm_clk_div_div[] = {
+ { .parent = &pmd_stm_clock_mux_ck, .rates = div3_1to4_rates },
{ .parent = NULL },
};
-static struct clk utmi_p1_gfclk_ck = {
- .name = "utmi_p1_gfclk_ck",
- .parent = &init_60m_fclk,
- .clksel = utmi_p1_gfclk_sel,
- .init = &omap2_init_clksel_parent,
- .clksel_reg = OMAP4430_CM_L3INIT_USB_HOST_CLKCTRL,
- .clksel_mask = OMAP4430_CLKSEL_UTMI_P1_MASK,
+static struct clk stm_clk_div_ck = {
+ .name = "stm_clk_div_ck",
+ .parent = &pmd_stm_clock_mux_ck,
+ .clksel = stm_clk_div_div,
+ .clksel_reg = OMAP4430_CM_EMU_DEBUGSS_CLKCTRL,
+ .clksel_mask = OMAP4430_CLKSEL_PMD_STM_CLK_MASK,
.ops = &clkops_null,
.recalc = &omap2_clksel_recalc,
+ .round_rate = &omap2_clksel_round_rate,
+ .set_rate = &omap2_clksel_set_rate,
};
-static const struct clksel utmi_p2_gfclk_sel[] = {
- { .parent = &init_60m_fclk, .rates = div_1_0_rates },
- { .parent = &xclk60mhsp2_ck, .rates = div_1_1_rates },
+static const struct clksel trace_clk_div_div[] = {
+ { .parent = &pmd_trace_clk_mux_ck, .rates = div3_1to4_rates },
{ .parent = NULL },
};
-static struct clk utmi_p2_gfclk_ck = {
- .name = "utmi_p2_gfclk_ck",
- .parent = &init_60m_fclk,
- .clksel = utmi_p2_gfclk_sel,
- .init = &omap2_init_clksel_parent,
- .clksel_reg = OMAP4430_CM_L3INIT_USB_HOST_CLKCTRL,
- .clksel_mask = OMAP4430_CLKSEL_UTMI_P2_MASK,
+static struct clk trace_clk_div_ck = {
+ .name = "trace_clk_div_ck",
+ .parent = &pmd_trace_clk_mux_ck,
+ .clksel = trace_clk_div_div,
+ .clksel_reg = OMAP4430_CM_EMU_DEBUGSS_CLKCTRL,
+ .clksel_mask = OMAP4430_CLKSEL_PMD_TRACE_CLK_MASK,
.ops = &clkops_null,
.recalc = &omap2_clksel_recalc,
+ .round_rate = &omap2_clksel_round_rate,
+ .set_rate = &omap2_clksel_set_rate,
};
/*
@@ -2483,11 +2766,12 @@ static struct omap_clk omap44xx_clks[] = {
CLK(NULL, "virt_27000000_ck", &virt_27000000_ck, CK_443X),
CLK(NULL, "virt_38400000_ck", &virt_38400000_ck, CK_443X),
CLK(NULL, "sys_clkin_ck", &sys_clkin_ck, CK_443X),
+ CLK(NULL, "tie_low_clock_ck", &tie_low_clock_ck, CK_443X),
CLK(NULL, "utmi_phy_clkout_ck", &utmi_phy_clkout_ck, CK_443X),
CLK(NULL, "xclk60mhsp1_ck", &xclk60mhsp1_ck, CK_443X),
CLK(NULL, "xclk60mhsp2_ck", &xclk60mhsp2_ck, CK_443X),
CLK(NULL, "xclk60motg_ck", &xclk60motg_ck, CK_443X),
- CLK(NULL, "dpll_sys_ref_clk", &dpll_sys_ref_clk, CK_443X),
+ CLK(NULL, "abe_dpll_bypass_clk_mux_ck", &abe_dpll_bypass_clk_mux_ck, CK_443X),
CLK(NULL, "abe_dpll_refclk_mux_ck", &abe_dpll_refclk_mux_ck, CK_443X),
CLK(NULL, "dpll_abe_ck", &dpll_abe_ck, CK_443X),
CLK(NULL, "dpll_abe_m2x2_ck", &dpll_abe_m2x2_ck, CK_443X),
@@ -2557,46 +2841,48 @@ static struct omap_clk omap44xx_clks[] = {
CLK(NULL, "aes1_fck", &aes1_fck, CK_443X),
CLK(NULL, "aes2_fck", &aes2_fck, CK_443X),
CLK(NULL, "aess_fck", &aess_fck, CK_443X),
- CLK(NULL, "cust_efuse_fck", &cust_efuse_fck, CK_443X),
+ CLK(NULL, "bandgap_fclk", &bandgap_fclk, CK_443X),
CLK(NULL, "des3des_fck", &des3des_fck, CK_443X),
CLK(NULL, "dmic_sync_mux_ck", &dmic_sync_mux_ck, CK_443X),
CLK(NULL, "dmic_fck", &dmic_fck, CK_443X),
+ CLK(NULL, "dsp_fck", &dsp_fck, CK_443X),
+ CLK(NULL, "dss_sys_clk", &dss_sys_clk, CK_443X),
+ CLK(NULL, "dss_tv_clk", &dss_tv_clk, CK_443X),
+ CLK(NULL, "dss_dss_clk", &dss_dss_clk, CK_443X),
+ CLK(NULL, "dss_48mhz_clk", &dss_48mhz_clk, CK_443X),
CLK(NULL, "dss_fck", &dss_fck, CK_443X),
- CLK(NULL, "ducati_ick", &ducati_ick, CK_443X),
- CLK(NULL, "emif1_ick", &emif1_ick, CK_443X),
- CLK(NULL, "emif2_ick", &emif2_ick, CK_443X),
+ CLK(NULL, "efuse_ctrl_cust_fck", &efuse_ctrl_cust_fck, CK_443X),
+ CLK(NULL, "emif1_fck", &emif1_fck, CK_443X),
+ CLK(NULL, "emif2_fck", &emif2_fck, CK_443X),
CLK(NULL, "fdif_fck", &fdif_fck, CK_443X),
- CLK(NULL, "per_sgx_fclk", &per_sgx_fclk, CK_443X),
- CLK(NULL, "gfx_fck", &gfx_fck, CK_443X),
+ CLK(NULL, "fpka_fck", &fpka_fck, CK_443X),
+ CLK(NULL, "gpio1_dbck", &gpio1_dbclk, CK_443X),
CLK(NULL, "gpio1_ick", &gpio1_ick, CK_443X),
+ CLK(NULL, "gpio2_dbck", &gpio2_dbclk, CK_443X),
CLK(NULL, "gpio2_ick", &gpio2_ick, CK_443X),
+ CLK(NULL, "gpio3_dbck", &gpio3_dbclk, CK_443X),
CLK(NULL, "gpio3_ick", &gpio3_ick, CK_443X),
+ CLK(NULL, "gpio4_dbck", &gpio4_dbclk, CK_443X),
CLK(NULL, "gpio4_ick", &gpio4_ick, CK_443X),
+ CLK(NULL, "gpio5_dbck", &gpio5_dbclk, CK_443X),
CLK(NULL, "gpio5_ick", &gpio5_ick, CK_443X),
+ CLK(NULL, "gpio6_dbck", &gpio6_dbclk, CK_443X),
CLK(NULL, "gpio6_ick", &gpio6_ick, CK_443X),
CLK(NULL, "gpmc_ick", &gpmc_ick, CK_443X),
- CLK(NULL, "gpt1_fck", &gpt1_fck, CK_443X),
- CLK(NULL, "gpt10_fck", &gpt10_fck, CK_443X),
- CLK(NULL, "gpt11_fck", &gpt11_fck, CK_443X),
- CLK(NULL, "gpt2_fck", &gpt2_fck, CK_443X),
- CLK(NULL, "gpt3_fck", &gpt3_fck, CK_443X),
- CLK(NULL, "gpt4_fck", &gpt4_fck, CK_443X),
- CLK(NULL, "gpt5_fck", &gpt5_fck, CK_443X),
- CLK(NULL, "gpt6_fck", &gpt6_fck, CK_443X),
- CLK(NULL, "gpt7_fck", &gpt7_fck, CK_443X),
- CLK(NULL, "gpt8_fck", &gpt8_fck, CK_443X),
- CLK(NULL, "gpt9_fck", &gpt9_fck, CK_443X),
+ CLK(NULL, "gpu_fck", &gpu_fck, CK_443X),
CLK("omap2_hdq.0", "fck", &hdq1w_fck, CK_443X),
- CLK(NULL, "hsi_ick", &hsi_ick, CK_443X),
+ CLK(NULL, "hsi_fck", &hsi_fck, CK_443X),
CLK("i2c_omap.1", "fck", &i2c1_fck, CK_443X),
CLK("i2c_omap.2", "fck", &i2c2_fck, CK_443X),
CLK("i2c_omap.3", "fck", &i2c3_fck, CK_443X),
CLK("i2c_omap.4", "fck", &i2c4_fck, CK_443X),
+ CLK(NULL, "ipu_fck", &ipu_fck, CK_443X),
+ CLK(NULL, "iss_ctrlclk", &iss_ctrlclk, CK_443X),
CLK(NULL, "iss_fck", &iss_fck, CK_443X),
- CLK(NULL, "ivahd_ick", &ivahd_ick, CK_443X),
- CLK(NULL, "keyboard_fck", &keyboard_fck, CK_443X),
- CLK(NULL, "l3_instr_interconnect_ick", &l3_instr_interconnect_ick, CK_443X),
- CLK(NULL, "l3_interconnect_3_ick", &l3_interconnect_3_ick, CK_443X),
+ CLK(NULL, "iva_fck", &iva_fck, CK_443X),
+ CLK(NULL, "kbd_fck", &kbd_fck, CK_443X),
+ CLK(NULL, "l3_instr_ick", &l3_instr_ick, CK_443X),
+ CLK(NULL, "l3_main_3_ick", &l3_main_3_ick, CK_443X),
CLK(NULL, "mcasp_sync_mux_ck", &mcasp_sync_mux_ck, CK_443X),
CLK(NULL, "mcasp_fck", &mcasp_fck, CK_443X),
CLK(NULL, "mcbsp1_sync_mux_ck", &mcbsp1_sync_mux_ck, CK_443X),
@@ -2607,6 +2893,7 @@ static struct omap_clk omap44xx_clks[] = {
CLK("omap-mcbsp.3", "fck", &mcbsp3_fck, CK_443X),
CLK(NULL, "mcbsp4_sync_mux_ck", &mcbsp4_sync_mux_ck, CK_443X),
CLK("omap-mcbsp.4", "fck", &mcbsp4_fck, CK_443X),
+ CLK(NULL, "mcpdm_fck", &mcpdm_fck, CK_443X),
CLK("omap2_mcspi.1", "fck", &mcspi1_fck, CK_443X),
CLK("omap2_mcspi.2", "fck", &mcspi2_fck, CK_443X),
CLK("omap2_mcspi.3", "fck", &mcspi3_fck, CK_443X),
@@ -2616,43 +2903,66 @@ static struct omap_clk omap44xx_clks[] = {
CLK("mmci-omap-hs.2", "fck", &mmc3_fck, CK_443X),
CLK("mmci-omap-hs.3", "fck", &mmc4_fck, CK_443X),
CLK("mmci-omap-hs.4", "fck", &mmc5_fck, CK_443X),
- CLK(NULL, "ocp_wp1_ick", &ocp_wp1_ick, CK_443X),
- CLK(NULL, "pdm_fck", &pdm_fck, CK_443X),
- CLK(NULL, "pkaeip29_fck", &pkaeip29_fck, CK_443X),
+ CLK(NULL, "ocp2scp_usb_phy_phy_48m", &ocp2scp_usb_phy_phy_48m, CK_443X),
+ CLK(NULL, "ocp2scp_usb_phy_ick", &ocp2scp_usb_phy_ick, CK_443X),
+ CLK(NULL, "ocp_wp_noc_ick", &ocp_wp_noc_ick, CK_443X),
CLK("omap_rng", "ick", &rng_ick, CK_443X),
- CLK(NULL, "sha2md51_fck", &sha2md51_fck, CK_443X),
- CLK(NULL, "sl2_ick", &sl2_ick, CK_443X),
+ CLK(NULL, "sha2md5_fck", &sha2md5_fck, CK_443X),
+ CLK(NULL, "sl2if_ick", &sl2if_ick, CK_443X),
+ CLK(NULL, "slimbus1_fclk_1", &slimbus1_fclk_1, CK_443X),
+ CLK(NULL, "slimbus1_fclk_0", &slimbus1_fclk_0, CK_443X),
+ CLK(NULL, "slimbus1_fclk_2", &slimbus1_fclk_2, CK_443X),
+ CLK(NULL, "slimbus1_slimbus_clk", &slimbus1_slimbus_clk, CK_443X),
CLK(NULL, "slimbus1_fck", &slimbus1_fck, CK_443X),
+ CLK(NULL, "slimbus2_fclk_1", &slimbus2_fclk_1, CK_443X),
+ CLK(NULL, "slimbus2_fclk_0", &slimbus2_fclk_0, CK_443X),
+ CLK(NULL, "slimbus2_slimbus_clk", &slimbus2_slimbus_clk, CK_443X),
CLK(NULL, "slimbus2_fck", &slimbus2_fck, CK_443X),
- CLK(NULL, "sr_core_fck", &sr_core_fck, CK_443X),
- CLK(NULL, "sr_iva_fck", &sr_iva_fck, CK_443X),
- CLK(NULL, "sr_mpu_fck", &sr_mpu_fck, CK_443X),
- CLK(NULL, "tesla_ick", &tesla_ick, CK_443X),
+ CLK(NULL, "smartreflex_core_fck", &smartreflex_core_fck, CK_443X),
+ CLK(NULL, "smartreflex_iva_fck", &smartreflex_iva_fck, CK_443X),
+ CLK(NULL, "smartreflex_mpu_fck", &smartreflex_mpu_fck, CK_443X),
+ CLK(NULL, "gpt1_fck", &timer1_fck, CK_443X),
+ CLK(NULL, "gpt10_fck", &timer10_fck, CK_443X),
+ CLK(NULL, "gpt11_fck", &timer11_fck, CK_443X),
+ CLK(NULL, "gpt2_fck", &timer2_fck, CK_443X),
+ CLK(NULL, "gpt3_fck", &timer3_fck, CK_443X),
+ CLK(NULL, "gpt4_fck", &timer4_fck, CK_443X),
+ CLK(NULL, "gpt5_fck", &timer5_fck, CK_443X),
+ CLK(NULL, "gpt6_fck", &timer6_fck, CK_443X),
+ CLK(NULL, "gpt7_fck", &timer7_fck, CK_443X),
+ CLK(NULL, "gpt8_fck", &timer8_fck, CK_443X),
+ CLK(NULL, "gpt9_fck", &timer9_fck, CK_443X),
CLK(NULL, "uart1_fck", &uart1_fck, CK_443X),
CLK(NULL, "uart2_fck", &uart2_fck, CK_443X),
CLK(NULL, "uart3_fck", &uart3_fck, CK_443X),
CLK(NULL, "uart4_fck", &uart4_fck, CK_443X),
- CLK(NULL, "unipro1_fck", &unipro1_fck, CK_443X),
- CLK(NULL, "usb_host_fck", &usb_host_fck, CK_443X),
CLK(NULL, "usb_host_fs_fck", &usb_host_fs_fck, CK_443X),
- CLK("musb_hdrc", "ick", &usb_otg_ick, CK_443X),
- CLK(NULL, "usb_tll_ick", &usb_tll_ick, CK_443X),
- CLK(NULL, "usbphyocp2scp_ick", &usbphyocp2scp_ick, CK_443X),
+ CLK(NULL, "usb_host_hs_utmi_p3_clk", &usb_host_hs_utmi_p3_clk, CK_443X),
+ CLK(NULL, "usb_host_hs_hsic60m_p1_clk", &usb_host_hs_hsic60m_p1_clk, CK_443X),
+ CLK(NULL, "usb_host_hs_hsic60m_p2_clk", &usb_host_hs_hsic60m_p2_clk, CK_443X),
+ CLK(NULL, "utmi_p1_gfclk", &utmi_p1_gfclk, CK_443X),
+ CLK(NULL, "usb_host_hs_utmi_p1_clk", &usb_host_hs_utmi_p1_clk, CK_443X),
+ CLK(NULL, "utmi_p2_gfclk", &utmi_p2_gfclk, CK_443X),
+ CLK(NULL, "usb_host_hs_utmi_p2_clk", &usb_host_hs_utmi_p2_clk, CK_443X),
+ CLK(NULL, "usb_host_hs_hsic480m_p1_clk", &usb_host_hs_hsic480m_p1_clk, CK_443X),
+ CLK(NULL, "usb_host_hs_hsic480m_p2_clk", &usb_host_hs_hsic480m_p2_clk, CK_443X),
+ CLK(NULL, "usb_host_hs_func48mclk", &usb_host_hs_func48mclk, CK_443X),
+ CLK(NULL, "usb_host_hs_fck", &usb_host_hs_fck, CK_443X),
+ CLK(NULL, "otg_60m_gfclk", &otg_60m_gfclk, CK_443X),
+ CLK(NULL, "usb_otg_hs_xclk", &usb_otg_hs_xclk, CK_443X),
+ CLK("musb_hdrc", "ick", &usb_otg_hs_ick, CK_443X),
+ CLK(NULL, "usb_phy_cm_clk32k", &usb_phy_cm_clk32k, CK_443X),
+ CLK(NULL, "usb_tll_hs_usb_ch2_clk", &usb_tll_hs_usb_ch2_clk, CK_443X),
+ CLK(NULL, "usb_tll_hs_usb_ch0_clk", &usb_tll_hs_usb_ch0_clk, CK_443X),
+ CLK(NULL, "usb_tll_hs_usb_ch1_clk", &usb_tll_hs_usb_ch1_clk, CK_443X),
+ CLK(NULL, "usb_tll_hs_ick", &usb_tll_hs_ick, CK_443X),
+ CLK(NULL, "usim_ck", &usim_ck, CK_443X),
+ CLK(NULL, "usim_fclk", &usim_fclk, CK_443X),
CLK(NULL, "usim_fck", &usim_fck, CK_443X),
- CLK("omap_wdt", "fck", &wdt2_fck, CK_443X),
- CLK(NULL, "wdt3_fck", &wdt3_fck, CK_443X),
- CLK(NULL, "otg_60m_gfclk_ck", &otg_60m_gfclk_ck, CK_443X),
+ CLK("omap_wdt", "fck", &wd_timer2_fck, CK_443X),
+ CLK(NULL, "wd_timer3_fck", &wd_timer3_fck, CK_443X),
CLK(NULL, "stm_clk_div_ck", &stm_clk_div_ck, CK_443X),
CLK(NULL, "trace_clk_div_ck", &trace_clk_div_ck, CK_443X),
- CLK(NULL, "usim_fclk", &usim_fclk, CK_443X),
- CLK(NULL, "utmi_p1_gfclk_ck", &utmi_p1_gfclk_ck, CK_443X),
- CLK(NULL, "utmi_p2_gfclk_ck", &utmi_p2_gfclk_ck, CK_443X),
- CLK(NULL, "gpio1_dbck", &dummy_ck, CK_443X),
- CLK(NULL, "gpio2_dbck", &dummy_ck, CK_443X),
- CLK(NULL, "gpio3_dbck", &dummy_ck, CK_443X),
- CLK(NULL, "gpio4_dbck", &dummy_ck, CK_443X),
- CLK(NULL, "gpio5_dbck", &dummy_ck, CK_443X),
- CLK(NULL, "gpio6_dbck", &dummy_ck, CK_443X),
CLK(NULL, "gpmc_ck", &dummy_ck, CK_443X),
CLK(NULL, "gpt1_ick", &dummy_ck, CK_443X),
CLK(NULL, "gpt2_ick", &dummy_ck, CK_443X),
@@ -2669,19 +2979,19 @@ static struct omap_clk omap44xx_clks[] = {
CLK("i2c_omap.2", "ick", &dummy_ck, CK_443X),
CLK("i2c_omap.3", "ick", &dummy_ck, CK_443X),
CLK("i2c_omap.4", "ick", &dummy_ck, CK_443X),
+ CLK("mmci-omap-hs.0", "ick", &dummy_ck, CK_443X),
+ CLK("mmci-omap-hs.1", "ick", &dummy_ck, CK_443X),
+ CLK("mmci-omap-hs.2", "ick", &dummy_ck, CK_443X),
+ CLK("mmci-omap-hs.3", "ick", &dummy_ck, CK_443X),
+ CLK("mmci-omap-hs.4", "ick", &dummy_ck, CK_443X),
CLK("omap-mcbsp.1", "ick", &dummy_ck, CK_443X),
CLK("omap-mcbsp.2", "ick", &dummy_ck, CK_443X),
CLK("omap-mcbsp.3", "ick", &dummy_ck, CK_443X),
CLK("omap-mcbsp.4", "ick", &dummy_ck, CK_443X),
- CLK("omap2_mcspi.1", "ick", &dummy_ck, CK_443X),
- CLK("omap2_mcspi.2", "ick", &dummy_ck, CK_443X),
- CLK("omap2_mcspi.3", "ick", &dummy_ck, CK_443X),
- CLK("omap2_mcspi.4", "ick", &dummy_ck, CK_443X),
- CLK("mmci-omap-hs.0", "ick", &dummy_ck, CK_443X),
- CLK("mmci-omap-hs.1", "ick", &dummy_ck, CK_443X),
- CLK("mmci-omap-hs.2", "ick", &dummy_ck, CK_443X),
- CLK("mmci-omap-hs.3", "ick", &dummy_ck, CK_443X),
- CLK("mmci-omap-hs.4", "ick", &dummy_ck, CK_443X),
+ CLK("omap2_mcspi.1", "ick", &dummy_ck, CK_443X),
+ CLK("omap2_mcspi.2", "ick", &dummy_ck, CK_443X),
+ CLK("omap2_mcspi.3", "ick", &dummy_ck, CK_443X),
+ CLK("omap2_mcspi.4", "ick", &dummy_ck, CK_443X),
CLK(NULL, "uart1_ick", &dummy_ck, CK_443X),
CLK(NULL, "uart2_ick", &dummy_ck, CK_443X),
CLK(NULL, "uart3_ick", &dummy_ck, CK_443X),
diff --git a/arch/arm/mach-omap2/clockdomain.c b/arch/arm/mach-omap2/clockdomain.c
index 5d80cb897489..6fb61b1a0d46 100644
--- a/arch/arm/mach-omap2/clockdomain.c
+++ b/arch/arm/mach-omap2/clockdomain.c
@@ -258,97 +258,6 @@ static void _omap2_clkdm_set_hwsup(struct clockdomain *clkdm, int enable)
}
-/**
- * _init_wkdep_usecount - initialize wkdep usecounts to match hardware
- * @clkdm: clockdomain to initialize wkdep usecounts
- *
- * Initialize the wakeup dependency usecount variables for clockdomain @clkdm.
- * If a wakeup dependency is present in the hardware, the usecount will be
- * set to 1; otherwise, it will be set to 0. Software should clear all
- * software wakeup dependencies prior to calling this function if it wishes
- * to ensure that all usecounts start at 0. No return value.
- */
-static void _init_wkdep_usecount(struct clockdomain *clkdm)
-{
- u32 v;
- struct clkdm_dep *cd;
-
- if (!clkdm->wkdep_srcs)
- return;
-
- for (cd = clkdm->wkdep_srcs; cd->clkdm_name; cd++) {
- if (!omap_chip_is(cd->omap_chip))
- continue;
-
- if (!cd->clkdm && cd->clkdm_name)
- cd->clkdm = _clkdm_lookup(cd->clkdm_name);
-
- if (!cd->clkdm) {
- WARN(!cd->clkdm, "clockdomain: %s: wkdep clkdm %s not "
- "found\n", clkdm->name, cd->clkdm_name);
- continue;
- }
-
- v = prm_read_mod_bits_shift(clkdm->pwrdm.ptr->prcm_offs,
- PM_WKDEP,
- (1 << cd->clkdm->dep_bit));
-
- if (v)
- pr_debug("clockdomain: %s: wakeup dependency already "
- "set to wake up when %s wakes\n",
- clkdm->name, cd->clkdm->name);
-
- atomic_set(&cd->wkdep_usecount, (v) ? 1 : 0);
- }
-}
-
-/**
- * _init_sleepdep_usecount - initialize sleepdep usecounts to match hardware
- * @clkdm: clockdomain to initialize sleepdep usecounts
- *
- * Initialize the sleep dependency usecount variables for clockdomain @clkdm.
- * If a sleep dependency is present in the hardware, the usecount will be
- * set to 1; otherwise, it will be set to 0. Software should clear all
- * software sleep dependencies prior to calling this function if it wishes
- * to ensure that all usecounts start at 0. No return value.
- */
-static void _init_sleepdep_usecount(struct clockdomain *clkdm)
-{
- u32 v;
- struct clkdm_dep *cd;
-
- if (!cpu_is_omap34xx())
- return;
-
- if (!clkdm->sleepdep_srcs)
- return;
-
- for (cd = clkdm->sleepdep_srcs; cd->clkdm_name; cd++) {
- if (!omap_chip_is(cd->omap_chip))
- continue;
-
- if (!cd->clkdm && cd->clkdm_name)
- cd->clkdm = _clkdm_lookup(cd->clkdm_name);
-
- if (!cd->clkdm) {
- WARN(!cd->clkdm, "clockdomain: %s: sleepdep clkdm %s "
- "not found\n", clkdm->name, cd->clkdm_name);
- continue;
- }
-
- v = prm_read_mod_bits_shift(clkdm->pwrdm.ptr->prcm_offs,
- OMAP3430_CM_SLEEPDEP,
- (1 << cd->clkdm->dep_bit));
-
- if (v)
- pr_debug("clockdomain: %s: sleep dependency already "
- "set to prevent from idling until %s "
- "idles\n", clkdm->name, cd->clkdm->name);
-
- atomic_set(&cd->sleepdep_usecount, (v) ? 1 : 0);
- }
-};
-
/* Public functions */
/**
@@ -379,12 +288,17 @@ void clkdm_init(struct clockdomain **clkdms,
_autodep_lookup(autodep);
/*
- * Ensure that the *dep_usecount registers reflect the current
- * state of the PRCM.
+ * Put all clockdomains into software-supervised mode; PM code
+ * should later enable hardware-supervised mode as appropriate
*/
list_for_each_entry(clkdm, &clkdm_list, node) {
- _init_wkdep_usecount(clkdm);
- _init_sleepdep_usecount(clkdm);
+ if (clkdm->flags & CLKDM_CAN_FORCE_WAKEUP)
+ omap2_clkdm_wakeup(clkdm);
+ else if (clkdm->flags & CLKDM_CAN_DISABLE_AUTO)
+ omap2_clkdm_deny_idle(clkdm);
+
+ clkdm_clear_all_wkdeps(clkdm);
+ clkdm_clear_all_sleepdeps(clkdm);
}
}
@@ -592,6 +506,9 @@ int clkdm_clear_all_wkdeps(struct clockdomain *clkdm)
if (!omap_chip_is(cd->omap_chip))
continue;
+ if (!cd->clkdm && cd->clkdm_name)
+ cd->clkdm = _clkdm_lookup(cd->clkdm_name);
+
/* PRM accesses are slow, so minimize them */
mask |= 1 << cd->clkdm->dep_bit;
atomic_set(&cd->wkdep_usecount, 0);
@@ -752,6 +669,9 @@ int clkdm_clear_all_sleepdeps(struct clockdomain *clkdm)
if (!omap_chip_is(cd->omap_chip))
continue;
+ if (!cd->clkdm && cd->clkdm_name)
+ cd->clkdm = _clkdm_lookup(cd->clkdm_name);
+
/* PRM accesses are slow, so minimize them */
mask |= 1 << cd->clkdm->dep_bit;
atomic_set(&cd->sleepdep_usecount, 0);
diff --git a/arch/arm/mach-omap2/cm-regbits-34xx.h b/arch/arm/mach-omap2/cm-regbits-34xx.h
index fe82b79d5f3b..4f959a7d881c 100644
--- a/arch/arm/mach-omap2/cm-regbits-34xx.h
+++ b/arch/arm/mach-omap2/cm-regbits-34xx.h
@@ -649,6 +649,8 @@
#define OMAP3430_ST_MCBSP2_MASK (1 << 0)
/* CM_AUTOIDLE_PER */
+#define OMAP3630_AUTO_UART4_MASK (1 << 18)
+#define OMAP3630_AUTO_UART4_SHIFT 18
#define OMAP3430_AUTO_GPIO6_MASK (1 << 17)
#define OMAP3430_AUTO_GPIO6_SHIFT 17
#define OMAP3430_AUTO_GPIO5_MASK (1 << 16)
diff --git a/arch/arm/mach-omap2/cm-regbits-44xx.h b/arch/arm/mach-omap2/cm-regbits-44xx.h
index ac8458e43252..0b72be433776 100644
--- a/arch/arm/mach-omap2/cm-regbits-44xx.h
+++ b/arch/arm/mach-omap2/cm-regbits-44xx.h
@@ -1,8 +1,8 @@
/*
* OMAP44xx Clock Management register bits
*
- * Copyright (C) 2009 Texas Instruments, Inc.
- * Copyright (C) 2009 Nokia Corporation
+ * Copyright (C) 2009-2010 Texas Instruments, Inc.
+ * Copyright (C) 2009-2010 Nokia Corporation
*
* Paul Walmsley (paul@pwsan.com)
* Rajendra Nayak (rnayak@ti.com)
@@ -25,453 +25,459 @@
#include "cm.h"
-/* Used by CM_L3_1_DYNAMICDEP, CM_MPU_DYNAMICDEP, CM_TESLA_DYNAMICDEP */
+/*
+ * Used by CM_L3_1_DYNAMICDEP, CM_L3_1_DYNAMICDEP_RESTORE, CM_MPU_DYNAMICDEP,
+ * CM_TESLA_DYNAMICDEP
+ */
#define OMAP4430_ABE_DYNDEP_SHIFT 3
-#define OMAP4430_ABE_DYNDEP_MASK BITFIELD(3, 3)
+#define OMAP4430_ABE_DYNDEP_MASK (1 << 3)
/*
- * Used by CM_D2D_STATICDEP, CM_DUCATI_STATICDEP, CM_SDMA_STATICDEP,
- * CM_L3INIT_STATICDEP, CM_SDMA_STATICDEP_RESTORE, CM_MPU_STATICDEP,
- * CM_TESLA_STATICDEP
+ * Used by CM_D2D_STATICDEP, CM_D2D_STATICDEP_RESTORE, CM_DUCATI_STATICDEP,
+ * CM_L3INIT_STATICDEP, CM_MPU_STATICDEP, CM_SDMA_STATICDEP,
+ * CM_SDMA_STATICDEP_RESTORE, CM_TESLA_STATICDEP
*/
#define OMAP4430_ABE_STATDEP_SHIFT 3
-#define OMAP4430_ABE_STATDEP_MASK BITFIELD(3, 3)
+#define OMAP4430_ABE_STATDEP_MASK (1 << 3)
-/* Used by CM_L4CFG_DYNAMICDEP */
+/* Used by CM_L4CFG_DYNAMICDEP, CM_L4CFG_DYNAMICDEP_RESTORE */
#define OMAP4430_ALWONCORE_DYNDEP_SHIFT 16
-#define OMAP4430_ALWONCORE_DYNDEP_MASK BITFIELD(16, 16)
+#define OMAP4430_ALWONCORE_DYNDEP_MASK (1 << 16)
/* Used by CM_DUCATI_STATICDEP, CM_MPU_STATICDEP, CM_TESLA_STATICDEP */
#define OMAP4430_ALWONCORE_STATDEP_SHIFT 16
-#define OMAP4430_ALWONCORE_STATDEP_MASK BITFIELD(16, 16)
+#define OMAP4430_ALWONCORE_STATDEP_MASK (1 << 16)
/*
- * Used by CM_AUTOIDLE_DPLL_PER, CM_AUTOIDLE_DPLL_UNIPRO, CM_AUTOIDLE_DPLL_USB,
- * CM_AUTOIDLE_DPLL_CORE_RESTORE, CM_AUTOIDLE_DPLL_ABE, CM_AUTOIDLE_DPLL_CORE,
- * CM_AUTOIDLE_DPLL_DDRPHY, CM_AUTOIDLE_DPLL_IVA, CM_AUTOIDLE_DPLL_MPU
+ * Used by CM_AUTOIDLE_DPLL_ABE, CM_AUTOIDLE_DPLL_CORE,
+ * CM_AUTOIDLE_DPLL_CORE_RESTORE, CM_AUTOIDLE_DPLL_DDRPHY,
+ * CM_AUTOIDLE_DPLL_IVA, CM_AUTOIDLE_DPLL_MPU, CM_AUTOIDLE_DPLL_PER,
+ * CM_AUTOIDLE_DPLL_UNIPRO, CM_AUTOIDLE_DPLL_USB
*/
#define OMAP4430_AUTO_DPLL_MODE_SHIFT 0
-#define OMAP4430_AUTO_DPLL_MODE_MASK BITFIELD(0, 2)
+#define OMAP4430_AUTO_DPLL_MODE_MASK (0x7 << 0)
-/* Used by CM_L4CFG_DYNAMICDEP */
+/* Used by CM_L4CFG_DYNAMICDEP, CM_L4CFG_DYNAMICDEP_RESTORE */
#define OMAP4430_CEFUSE_DYNDEP_SHIFT 17
-#define OMAP4430_CEFUSE_DYNDEP_MASK BITFIELD(17, 17)
+#define OMAP4430_CEFUSE_DYNDEP_MASK (1 << 17)
/* Used by CM_DUCATI_STATICDEP, CM_MPU_STATICDEP, CM_TESLA_STATICDEP */
#define OMAP4430_CEFUSE_STATDEP_SHIFT 17
-#define OMAP4430_CEFUSE_STATDEP_MASK BITFIELD(17, 17)
+#define OMAP4430_CEFUSE_STATDEP_MASK (1 << 17)
/* Used by CM1_ABE_CLKSTCTRL */
#define OMAP4430_CLKACTIVITY_ABE_24M_GFCLK_SHIFT 13
-#define OMAP4430_CLKACTIVITY_ABE_24M_GFCLK_MASK BITFIELD(13, 13)
+#define OMAP4430_CLKACTIVITY_ABE_24M_GFCLK_MASK (1 << 13)
/* Used by CM1_ABE_CLKSTCTRL */
#define OMAP4430_CLKACTIVITY_ABE_ALWON_32K_CLK_SHIFT 12
-#define OMAP4430_CLKACTIVITY_ABE_ALWON_32K_CLK_MASK BITFIELD(12, 12)
+#define OMAP4430_CLKACTIVITY_ABE_ALWON_32K_CLK_MASK (1 << 12)
/* Used by CM_WKUP_CLKSTCTRL */
#define OMAP4430_CLKACTIVITY_ABE_LP_CLK_SHIFT 9
-#define OMAP4430_CLKACTIVITY_ABE_LP_CLK_MASK BITFIELD(9, 9)
+#define OMAP4430_CLKACTIVITY_ABE_LP_CLK_MASK (1 << 9)
/* Used by CM1_ABE_CLKSTCTRL */
#define OMAP4430_CLKACTIVITY_ABE_SYSCLK_SHIFT 11
-#define OMAP4430_CLKACTIVITY_ABE_SYSCLK_MASK BITFIELD(11, 11)
+#define OMAP4430_CLKACTIVITY_ABE_SYSCLK_MASK (1 << 11)
/* Used by CM1_ABE_CLKSTCTRL */
#define OMAP4430_CLKACTIVITY_ABE_X2_CLK_SHIFT 8
-#define OMAP4430_CLKACTIVITY_ABE_X2_CLK_MASK BITFIELD(8, 8)
+#define OMAP4430_CLKACTIVITY_ABE_X2_CLK_MASK (1 << 8)
/* Used by CM_MEMIF_CLKSTCTRL, CM_MEMIF_CLKSTCTRL_RESTORE */
#define OMAP4430_CLKACTIVITY_ASYNC_DLL_CLK_SHIFT 11
-#define OMAP4430_CLKACTIVITY_ASYNC_DLL_CLK_MASK BITFIELD(11, 11)
+#define OMAP4430_CLKACTIVITY_ASYNC_DLL_CLK_MASK (1 << 11)
/* Used by CM_MEMIF_CLKSTCTRL, CM_MEMIF_CLKSTCTRL_RESTORE */
#define OMAP4430_CLKACTIVITY_ASYNC_PHY1_CLK_SHIFT 12
-#define OMAP4430_CLKACTIVITY_ASYNC_PHY1_CLK_MASK BITFIELD(12, 12)
+#define OMAP4430_CLKACTIVITY_ASYNC_PHY1_CLK_MASK (1 << 12)
/* Used by CM_MEMIF_CLKSTCTRL, CM_MEMIF_CLKSTCTRL_RESTORE */
#define OMAP4430_CLKACTIVITY_ASYNC_PHY2_CLK_SHIFT 13
-#define OMAP4430_CLKACTIVITY_ASYNC_PHY2_CLK_MASK BITFIELD(13, 13)
+#define OMAP4430_CLKACTIVITY_ASYNC_PHY2_CLK_MASK (1 << 13)
/* Used by CM_CAM_CLKSTCTRL */
#define OMAP4430_CLKACTIVITY_CAM_PHY_CTRL_GCLK_SHIFT 9
-#define OMAP4430_CLKACTIVITY_CAM_PHY_CTRL_GCLK_MASK BITFIELD(9, 9)
+#define OMAP4430_CLKACTIVITY_CAM_PHY_CTRL_GCLK_MASK (1 << 9)
+
+/* Used by CM_ALWON_CLKSTCTRL */
+#define OMAP4430_CLKACTIVITY_CORE_ALWON_32K_GFCLK_SHIFT 12
+#define OMAP4430_CLKACTIVITY_CORE_ALWON_32K_GFCLK_MASK (1 << 12)
/* Used by CM_EMU_CLKSTCTRL */
#define OMAP4430_CLKACTIVITY_CORE_DPLL_EMU_CLK_SHIFT 9
-#define OMAP4430_CLKACTIVITY_CORE_DPLL_EMU_CLK_MASK BITFIELD(9, 9)
+#define OMAP4430_CLKACTIVITY_CORE_DPLL_EMU_CLK_MASK (1 << 9)
/* Used by CM_CEFUSE_CLKSTCTRL */
#define OMAP4430_CLKACTIVITY_CUST_EFUSE_SYS_CLK_SHIFT 9
-#define OMAP4430_CLKACTIVITY_CUST_EFUSE_SYS_CLK_MASK BITFIELD(9, 9)
+#define OMAP4430_CLKACTIVITY_CUST_EFUSE_SYS_CLK_MASK (1 << 9)
/* Used by CM_MEMIF_CLKSTCTRL, CM_MEMIF_CLKSTCTRL_RESTORE */
#define OMAP4430_CLKACTIVITY_DLL_CLK_SHIFT 9
-#define OMAP4430_CLKACTIVITY_DLL_CLK_MASK BITFIELD(9, 9)
+#define OMAP4430_CLKACTIVITY_DLL_CLK_MASK (1 << 9)
/* Used by CM_L4PER_CLKSTCTRL, CM_L4PER_CLKSTCTRL_RESTORE */
#define OMAP4430_CLKACTIVITY_DMT10_GFCLK_SHIFT 9
-#define OMAP4430_CLKACTIVITY_DMT10_GFCLK_MASK BITFIELD(9, 9)
+#define OMAP4430_CLKACTIVITY_DMT10_GFCLK_MASK (1 << 9)
/* Used by CM_L4PER_CLKSTCTRL, CM_L4PER_CLKSTCTRL_RESTORE */
#define OMAP4430_CLKACTIVITY_DMT11_GFCLK_SHIFT 10
-#define OMAP4430_CLKACTIVITY_DMT11_GFCLK_MASK BITFIELD(10, 10)
+#define OMAP4430_CLKACTIVITY_DMT11_GFCLK_MASK (1 << 10)
/* Used by CM_L4PER_CLKSTCTRL, CM_L4PER_CLKSTCTRL_RESTORE */
#define OMAP4430_CLKACTIVITY_DMT2_GFCLK_SHIFT 11
-#define OMAP4430_CLKACTIVITY_DMT2_GFCLK_MASK BITFIELD(11, 11)
+#define OMAP4430_CLKACTIVITY_DMT2_GFCLK_MASK (1 << 11)
/* Used by CM_L4PER_CLKSTCTRL, CM_L4PER_CLKSTCTRL_RESTORE */
#define OMAP4430_CLKACTIVITY_DMT3_GFCLK_SHIFT 12
-#define OMAP4430_CLKACTIVITY_DMT3_GFCLK_MASK BITFIELD(12, 12)
+#define OMAP4430_CLKACTIVITY_DMT3_GFCLK_MASK (1 << 12)
/* Used by CM_L4PER_CLKSTCTRL, CM_L4PER_CLKSTCTRL_RESTORE */
#define OMAP4430_CLKACTIVITY_DMT4_GFCLK_SHIFT 13
-#define OMAP4430_CLKACTIVITY_DMT4_GFCLK_MASK BITFIELD(13, 13)
+#define OMAP4430_CLKACTIVITY_DMT4_GFCLK_MASK (1 << 13)
/* Used by CM_L4PER_CLKSTCTRL, CM_L4PER_CLKSTCTRL_RESTORE */
#define OMAP4430_CLKACTIVITY_DMT9_GFCLK_SHIFT 14
-#define OMAP4430_CLKACTIVITY_DMT9_GFCLK_MASK BITFIELD(14, 14)
+#define OMAP4430_CLKACTIVITY_DMT9_GFCLK_MASK (1 << 14)
/* Used by CM_DSS_CLKSTCTRL */
#define OMAP4430_CLKACTIVITY_DSS_ALWON_SYS_CLK_SHIFT 10
-#define OMAP4430_CLKACTIVITY_DSS_ALWON_SYS_CLK_MASK BITFIELD(10, 10)
+#define OMAP4430_CLKACTIVITY_DSS_ALWON_SYS_CLK_MASK (1 << 10)
/* Used by CM_DSS_CLKSTCTRL */
#define OMAP4430_CLKACTIVITY_DSS_FCLK_SHIFT 9
-#define OMAP4430_CLKACTIVITY_DSS_FCLK_MASK BITFIELD(9, 9)
+#define OMAP4430_CLKACTIVITY_DSS_FCLK_MASK (1 << 9)
/* Used by CM_DUCATI_CLKSTCTRL */
#define OMAP4430_CLKACTIVITY_DUCATI_GCLK_SHIFT 8
-#define OMAP4430_CLKACTIVITY_DUCATI_GCLK_MASK BITFIELD(8, 8)
-
-/* Used by CM_L3INIT_CLKSTCTRL, CM_L3INIT_CLKSTCTRL_RESTORE */
-#define OMAP4430_CLKACTIVITY_EMAC_50MHZ_CLK_SHIFT 10
-#define OMAP4430_CLKACTIVITY_EMAC_50MHZ_CLK_MASK BITFIELD(10, 10)
+#define OMAP4430_CLKACTIVITY_DUCATI_GCLK_MASK (1 << 8)
/* Used by CM_EMU_CLKSTCTRL */
#define OMAP4430_CLKACTIVITY_EMU_SYS_CLK_SHIFT 8
-#define OMAP4430_CLKACTIVITY_EMU_SYS_CLK_MASK BITFIELD(8, 8)
+#define OMAP4430_CLKACTIVITY_EMU_SYS_CLK_MASK (1 << 8)
/* Used by CM_CAM_CLKSTCTRL */
#define OMAP4430_CLKACTIVITY_FDIF_GFCLK_SHIFT 10
-#define OMAP4430_CLKACTIVITY_FDIF_GFCLK_MASK BITFIELD(10, 10)
+#define OMAP4430_CLKACTIVITY_FDIF_GFCLK_MASK (1 << 10)
/* Used by CM_L4PER_CLKSTCTRL, CM_L4PER_CLKSTCTRL_RESTORE */
#define OMAP4430_CLKACTIVITY_FUNC_12M_GFCLK_SHIFT 15
-#define OMAP4430_CLKACTIVITY_FUNC_12M_GFCLK_MASK BITFIELD(15, 15)
+#define OMAP4430_CLKACTIVITY_FUNC_12M_GFCLK_MASK (1 << 15)
/* Used by CM1_ABE_CLKSTCTRL */
#define OMAP4430_CLKACTIVITY_FUNC_24M_GFCLK_SHIFT 10
-#define OMAP4430_CLKACTIVITY_FUNC_24M_GFCLK_MASK BITFIELD(10, 10)
+#define OMAP4430_CLKACTIVITY_FUNC_24M_GFCLK_MASK (1 << 10)
/* Used by CM_DSS_CLKSTCTRL */
#define OMAP4430_CLKACTIVITY_HDMI_PHY_48MHZ_GFCLK_SHIFT 11
-#define OMAP4430_CLKACTIVITY_HDMI_PHY_48MHZ_GFCLK_MASK BITFIELD(11, 11)
+#define OMAP4430_CLKACTIVITY_HDMI_PHY_48MHZ_GFCLK_MASK (1 << 11)
/* Used by CM_L3INIT_CLKSTCTRL, CM_L3INIT_CLKSTCTRL_RESTORE */
#define OMAP4430_CLKACTIVITY_HSIC_P1_480M_GFCLK_SHIFT 20
-#define OMAP4430_CLKACTIVITY_HSIC_P1_480M_GFCLK_MASK BITFIELD(20, 20)
+#define OMAP4430_CLKACTIVITY_HSIC_P1_480M_GFCLK_MASK (1 << 20)
/* Used by CM_L3INIT_CLKSTCTRL, CM_L3INIT_CLKSTCTRL_RESTORE */
#define OMAP4430_CLKACTIVITY_HSIC_P1_GFCLK_SHIFT 26
-#define OMAP4430_CLKACTIVITY_HSIC_P1_GFCLK_MASK BITFIELD(26, 26)
+#define OMAP4430_CLKACTIVITY_HSIC_P1_GFCLK_MASK (1 << 26)
/* Used by CM_L3INIT_CLKSTCTRL, CM_L3INIT_CLKSTCTRL_RESTORE */
#define OMAP4430_CLKACTIVITY_HSIC_P2_480M_GFCLK_SHIFT 21
-#define OMAP4430_CLKACTIVITY_HSIC_P2_480M_GFCLK_MASK BITFIELD(21, 21)
+#define OMAP4430_CLKACTIVITY_HSIC_P2_480M_GFCLK_MASK (1 << 21)
/* Used by CM_L3INIT_CLKSTCTRL, CM_L3INIT_CLKSTCTRL_RESTORE */
#define OMAP4430_CLKACTIVITY_HSIC_P2_GFCLK_SHIFT 27
-#define OMAP4430_CLKACTIVITY_HSIC_P2_GFCLK_MASK BITFIELD(27, 27)
-
-/* Used by CM_L3INIT_CLKSTCTRL */
-#define OMAP4430_CLKACTIVITY_INIT_32K_GFCLK_SHIFT 31
-#define OMAP4430_CLKACTIVITY_INIT_32K_GFCLK_MASK BITFIELD(31, 31)
+#define OMAP4430_CLKACTIVITY_HSIC_P2_GFCLK_MASK (1 << 27)
/* Used by CM_L3INIT_CLKSTCTRL, CM_L3INIT_CLKSTCTRL_RESTORE */
#define OMAP4430_CLKACTIVITY_INIT_48MC_GFCLK_SHIFT 13
-#define OMAP4430_CLKACTIVITY_INIT_48MC_GFCLK_MASK BITFIELD(13, 13)
+#define OMAP4430_CLKACTIVITY_INIT_48MC_GFCLK_MASK (1 << 13)
/* Used by CM_L3INIT_CLKSTCTRL, CM_L3INIT_CLKSTCTRL_RESTORE */
#define OMAP4430_CLKACTIVITY_INIT_48M_GFCLK_SHIFT 12
-#define OMAP4430_CLKACTIVITY_INIT_48M_GFCLK_MASK BITFIELD(12, 12)
+#define OMAP4430_CLKACTIVITY_INIT_48M_GFCLK_MASK (1 << 12)
/* Used by CM_L3INIT_CLKSTCTRL, CM_L3INIT_CLKSTCTRL_RESTORE */
#define OMAP4430_CLKACTIVITY_INIT_60M_P1_GFCLK_SHIFT 28
-#define OMAP4430_CLKACTIVITY_INIT_60M_P1_GFCLK_MASK BITFIELD(28, 28)
+#define OMAP4430_CLKACTIVITY_INIT_60M_P1_GFCLK_MASK (1 << 28)
/* Used by CM_L3INIT_CLKSTCTRL, CM_L3INIT_CLKSTCTRL_RESTORE */
#define OMAP4430_CLKACTIVITY_INIT_60M_P2_GFCLK_SHIFT 29
-#define OMAP4430_CLKACTIVITY_INIT_60M_P2_GFCLK_MASK BITFIELD(29, 29)
+#define OMAP4430_CLKACTIVITY_INIT_60M_P2_GFCLK_MASK (1 << 29)
/* Used by CM_L3INIT_CLKSTCTRL, CM_L3INIT_CLKSTCTRL_RESTORE */
#define OMAP4430_CLKACTIVITY_INIT_96M_GFCLK_SHIFT 11
-#define OMAP4430_CLKACTIVITY_INIT_96M_GFCLK_MASK BITFIELD(11, 11)
+#define OMAP4430_CLKACTIVITY_INIT_96M_GFCLK_MASK (1 << 11)
/* Used by CM_L3INIT_CLKSTCTRL, CM_L3INIT_CLKSTCTRL_RESTORE */
#define OMAP4430_CLKACTIVITY_INIT_HSI_GFCLK_SHIFT 16
-#define OMAP4430_CLKACTIVITY_INIT_HSI_GFCLK_MASK BITFIELD(16, 16)
+#define OMAP4430_CLKACTIVITY_INIT_HSI_GFCLK_MASK (1 << 16)
/* Used by CM_L3INIT_CLKSTCTRL, CM_L3INIT_CLKSTCTRL_RESTORE */
#define OMAP4430_CLKACTIVITY_INIT_HSMMC1_GFCLK_SHIFT 17
-#define OMAP4430_CLKACTIVITY_INIT_HSMMC1_GFCLK_MASK BITFIELD(17, 17)
+#define OMAP4430_CLKACTIVITY_INIT_HSMMC1_GFCLK_MASK (1 << 17)
/* Used by CM_L3INIT_CLKSTCTRL, CM_L3INIT_CLKSTCTRL_RESTORE */
#define OMAP4430_CLKACTIVITY_INIT_HSMMC2_GFCLK_SHIFT 18
-#define OMAP4430_CLKACTIVITY_INIT_HSMMC2_GFCLK_MASK BITFIELD(18, 18)
+#define OMAP4430_CLKACTIVITY_INIT_HSMMC2_GFCLK_MASK (1 << 18)
/* Used by CM_L3INIT_CLKSTCTRL, CM_L3INIT_CLKSTCTRL_RESTORE */
#define OMAP4430_CLKACTIVITY_INIT_HSMMC6_GFCLK_SHIFT 19
-#define OMAP4430_CLKACTIVITY_INIT_HSMMC6_GFCLK_MASK BITFIELD(19, 19)
+#define OMAP4430_CLKACTIVITY_INIT_HSMMC6_GFCLK_MASK (1 << 19)
/* Used by CM_CAM_CLKSTCTRL */
#define OMAP4430_CLKACTIVITY_ISS_GCLK_SHIFT 8
-#define OMAP4430_CLKACTIVITY_ISS_GCLK_MASK BITFIELD(8, 8)
+#define OMAP4430_CLKACTIVITY_ISS_GCLK_MASK (1 << 8)
/* Used by CM_IVAHD_CLKSTCTRL */
#define OMAP4430_CLKACTIVITY_IVAHD_ROOT_CLK_SHIFT 8
-#define OMAP4430_CLKACTIVITY_IVAHD_ROOT_CLK_MASK BITFIELD(8, 8)
+#define OMAP4430_CLKACTIVITY_IVAHD_ROOT_CLK_MASK (1 << 8)
-/* Used by CM_L3INIT_CLKSTCTRL, CM_L3INIT_CLKSTCTRL_RESTORE */
-#define OMAP4430_CLKACTIVITY_L3INIT_DPLL_ALWON_CLK_SHIFT 14
-#define OMAP4430_CLKACTIVITY_L3INIT_DPLL_ALWON_CLK_MASK BITFIELD(14, 14)
+/* Used by CM_D2D_CLKSTCTRL */
+#define OMAP4430_CLKACTIVITY_L3X2_D2D_GICLK_SHIFT 10
+#define OMAP4430_CLKACTIVITY_L3X2_D2D_GICLK_MASK (1 << 10)
/* Used by CM_L3_1_CLKSTCTRL, CM_L3_1_CLKSTCTRL_RESTORE */
#define OMAP4430_CLKACTIVITY_L3_1_GICLK_SHIFT 8
-#define OMAP4430_CLKACTIVITY_L3_1_GICLK_MASK BITFIELD(8, 8)
+#define OMAP4430_CLKACTIVITY_L3_1_GICLK_MASK (1 << 8)
/* Used by CM_L3_2_CLKSTCTRL, CM_L3_2_CLKSTCTRL_RESTORE */
#define OMAP4430_CLKACTIVITY_L3_2_GICLK_SHIFT 8
-#define OMAP4430_CLKACTIVITY_L3_2_GICLK_MASK BITFIELD(8, 8)
+#define OMAP4430_CLKACTIVITY_L3_2_GICLK_MASK (1 << 8)
/* Used by CM_D2D_CLKSTCTRL */
#define OMAP4430_CLKACTIVITY_L3_D2D_GICLK_SHIFT 8
-#define OMAP4430_CLKACTIVITY_L3_D2D_GICLK_MASK BITFIELD(8, 8)
+#define OMAP4430_CLKACTIVITY_L3_D2D_GICLK_MASK (1 << 8)
/* Used by CM_SDMA_CLKSTCTRL */
#define OMAP4430_CLKACTIVITY_L3_DMA_GICLK_SHIFT 8
-#define OMAP4430_CLKACTIVITY_L3_DMA_GICLK_MASK BITFIELD(8, 8)
+#define OMAP4430_CLKACTIVITY_L3_DMA_GICLK_MASK (1 << 8)
/* Used by CM_DSS_CLKSTCTRL */
#define OMAP4430_CLKACTIVITY_L3_DSS_GICLK_SHIFT 8
-#define OMAP4430_CLKACTIVITY_L3_DSS_GICLK_MASK BITFIELD(8, 8)
+#define OMAP4430_CLKACTIVITY_L3_DSS_GICLK_MASK (1 << 8)
/* Used by CM_MEMIF_CLKSTCTRL, CM_MEMIF_CLKSTCTRL_RESTORE */
#define OMAP4430_CLKACTIVITY_L3_EMIF_GICLK_SHIFT 8
-#define OMAP4430_CLKACTIVITY_L3_EMIF_GICLK_MASK BITFIELD(8, 8)
+#define OMAP4430_CLKACTIVITY_L3_EMIF_GICLK_MASK (1 << 8)
/* Used by CM_GFX_CLKSTCTRL */
#define OMAP4430_CLKACTIVITY_L3_GFX_GICLK_SHIFT 8
-#define OMAP4430_CLKACTIVITY_L3_GFX_GICLK_MASK BITFIELD(8, 8)
+#define OMAP4430_CLKACTIVITY_L3_GFX_GICLK_MASK (1 << 8)
/* Used by CM_L3INIT_CLKSTCTRL, CM_L3INIT_CLKSTCTRL_RESTORE */
#define OMAP4430_CLKACTIVITY_L3_INIT_GICLK_SHIFT 8
-#define OMAP4430_CLKACTIVITY_L3_INIT_GICLK_MASK BITFIELD(8, 8)
+#define OMAP4430_CLKACTIVITY_L3_INIT_GICLK_MASK (1 << 8)
/* Used by CM_L3INSTR_CLKSTCTRL */
#define OMAP4430_CLKACTIVITY_L3_INSTR_GICLK_SHIFT 8
-#define OMAP4430_CLKACTIVITY_L3_INSTR_GICLK_MASK BITFIELD(8, 8)
+#define OMAP4430_CLKACTIVITY_L3_INSTR_GICLK_MASK (1 << 8)
/* Used by CM_L4SEC_CLKSTCTRL */
#define OMAP4430_CLKACTIVITY_L3_SECURE_GICLK_SHIFT 8
-#define OMAP4430_CLKACTIVITY_L3_SECURE_GICLK_MASK BITFIELD(8, 8)
+#define OMAP4430_CLKACTIVITY_L3_SECURE_GICLK_MASK (1 << 8)
/* Used by CM_ALWON_CLKSTCTRL */
#define OMAP4430_CLKACTIVITY_L4_AO_ICLK_SHIFT 8
-#define OMAP4430_CLKACTIVITY_L4_AO_ICLK_MASK BITFIELD(8, 8)
+#define OMAP4430_CLKACTIVITY_L4_AO_ICLK_MASK (1 << 8)
/* Used by CM_CEFUSE_CLKSTCTRL */
#define OMAP4430_CLKACTIVITY_L4_CEFUSE_GICLK_SHIFT 8
-#define OMAP4430_CLKACTIVITY_L4_CEFUSE_GICLK_MASK BITFIELD(8, 8)
+#define OMAP4430_CLKACTIVITY_L4_CEFUSE_GICLK_MASK (1 << 8)
/* Used by CM_L4CFG_CLKSTCTRL, CM_L4CFG_CLKSTCTRL_RESTORE */
#define OMAP4430_CLKACTIVITY_L4_CFG_GICLK_SHIFT 8
-#define OMAP4430_CLKACTIVITY_L4_CFG_GICLK_MASK BITFIELD(8, 8)
+#define OMAP4430_CLKACTIVITY_L4_CFG_GICLK_MASK (1 << 8)
/* Used by CM_D2D_CLKSTCTRL */
#define OMAP4430_CLKACTIVITY_L4_D2D_GICLK_SHIFT 9
-#define OMAP4430_CLKACTIVITY_L4_D2D_GICLK_MASK BITFIELD(9, 9)
+#define OMAP4430_CLKACTIVITY_L4_D2D_GICLK_MASK (1 << 9)
/* Used by CM_L3INIT_CLKSTCTRL, CM_L3INIT_CLKSTCTRL_RESTORE */
#define OMAP4430_CLKACTIVITY_L4_INIT_GICLK_SHIFT 9
-#define OMAP4430_CLKACTIVITY_L4_INIT_GICLK_MASK BITFIELD(9, 9)
+#define OMAP4430_CLKACTIVITY_L4_INIT_GICLK_MASK (1 << 9)
/* Used by CM_L4PER_CLKSTCTRL, CM_L4PER_CLKSTCTRL_RESTORE */
#define OMAP4430_CLKACTIVITY_L4_PER_GICLK_SHIFT 8
-#define OMAP4430_CLKACTIVITY_L4_PER_GICLK_MASK BITFIELD(8, 8)
+#define OMAP4430_CLKACTIVITY_L4_PER_GICLK_MASK (1 << 8)
/* Used by CM_L4SEC_CLKSTCTRL */
#define OMAP4430_CLKACTIVITY_L4_SECURE_GICLK_SHIFT 9
-#define OMAP4430_CLKACTIVITY_L4_SECURE_GICLK_MASK BITFIELD(9, 9)
+#define OMAP4430_CLKACTIVITY_L4_SECURE_GICLK_MASK (1 << 9)
/* Used by CM_WKUP_CLKSTCTRL */
#define OMAP4430_CLKACTIVITY_L4_WKUP_GICLK_SHIFT 12
-#define OMAP4430_CLKACTIVITY_L4_WKUP_GICLK_MASK BITFIELD(12, 12)
+#define OMAP4430_CLKACTIVITY_L4_WKUP_GICLK_MASK (1 << 12)
/* Used by CM_MPU_CLKSTCTRL, CM_MPU_CLKSTCTRL_RESTORE */
#define OMAP4430_CLKACTIVITY_MPU_DPLL_CLK_SHIFT 8
-#define OMAP4430_CLKACTIVITY_MPU_DPLL_CLK_MASK BITFIELD(8, 8)
+#define OMAP4430_CLKACTIVITY_MPU_DPLL_CLK_MASK (1 << 8)
/* Used by CM1_ABE_CLKSTCTRL */
#define OMAP4430_CLKACTIVITY_OCP_ABE_GICLK_SHIFT 9
-#define OMAP4430_CLKACTIVITY_OCP_ABE_GICLK_MASK BITFIELD(9, 9)
+#define OMAP4430_CLKACTIVITY_OCP_ABE_GICLK_MASK (1 << 9)
/* Used by CM_L4PER_CLKSTCTRL, CM_L4PER_CLKSTCTRL_RESTORE */
#define OMAP4430_CLKACTIVITY_PER_24MC_GFCLK_SHIFT 16
-#define OMAP4430_CLKACTIVITY_PER_24MC_GFCLK_MASK BITFIELD(16, 16)
+#define OMAP4430_CLKACTIVITY_PER_24MC_GFCLK_MASK (1 << 16)
/* Used by CM_L4PER_CLKSTCTRL, CM_L4PER_CLKSTCTRL_RESTORE */
#define OMAP4430_CLKACTIVITY_PER_32K_GFCLK_SHIFT 17
-#define OMAP4430_CLKACTIVITY_PER_32K_GFCLK_MASK BITFIELD(17, 17)
+#define OMAP4430_CLKACTIVITY_PER_32K_GFCLK_MASK (1 << 17)
/* Used by CM_L4PER_CLKSTCTRL, CM_L4PER_CLKSTCTRL_RESTORE */
#define OMAP4430_CLKACTIVITY_PER_48M_GFCLK_SHIFT 18
-#define OMAP4430_CLKACTIVITY_PER_48M_GFCLK_MASK BITFIELD(18, 18)
+#define OMAP4430_CLKACTIVITY_PER_48M_GFCLK_MASK (1 << 18)
/* Used by CM_L4PER_CLKSTCTRL, CM_L4PER_CLKSTCTRL_RESTORE */
#define OMAP4430_CLKACTIVITY_PER_96M_GFCLK_SHIFT 19
-#define OMAP4430_CLKACTIVITY_PER_96M_GFCLK_MASK BITFIELD(19, 19)
+#define OMAP4430_CLKACTIVITY_PER_96M_GFCLK_MASK (1 << 19)
/* Used by CM_L4PER_CLKSTCTRL, CM_L4PER_CLKSTCTRL_RESTORE */
#define OMAP4430_CLKACTIVITY_PER_ABE_24M_GFCLK_SHIFT 25
-#define OMAP4430_CLKACTIVITY_PER_ABE_24M_GFCLK_MASK BITFIELD(25, 25)
-
-/* Used by CM_EMU_CLKSTCTRL */
-#define OMAP4430_CLKACTIVITY_PER_DPLL_EMU_CLK_SHIFT 10
-#define OMAP4430_CLKACTIVITY_PER_DPLL_EMU_CLK_MASK BITFIELD(10, 10)
+#define OMAP4430_CLKACTIVITY_PER_ABE_24M_GFCLK_MASK (1 << 25)
/* Used by CM_L4PER_CLKSTCTRL, CM_L4PER_CLKSTCTRL_RESTORE */
#define OMAP4430_CLKACTIVITY_PER_MCASP2_GFCLK_SHIFT 20
-#define OMAP4430_CLKACTIVITY_PER_MCASP2_GFCLK_MASK BITFIELD(20, 20)
+#define OMAP4430_CLKACTIVITY_PER_MCASP2_GFCLK_MASK (1 << 20)
/* Used by CM_L4PER_CLKSTCTRL, CM_L4PER_CLKSTCTRL_RESTORE */
#define OMAP4430_CLKACTIVITY_PER_MCASP3_GFCLK_SHIFT 21
-#define OMAP4430_CLKACTIVITY_PER_MCASP3_GFCLK_MASK BITFIELD(21, 21)
+#define OMAP4430_CLKACTIVITY_PER_MCASP3_GFCLK_MASK (1 << 21)
/* Used by CM_L4PER_CLKSTCTRL, CM_L4PER_CLKSTCTRL_RESTORE */
#define OMAP4430_CLKACTIVITY_PER_MCBSP4_GFCLK_SHIFT 22
-#define OMAP4430_CLKACTIVITY_PER_MCBSP4_GFCLK_MASK BITFIELD(22, 22)
+#define OMAP4430_CLKACTIVITY_PER_MCBSP4_GFCLK_MASK (1 << 22)
/* Used by CM_L4PER_CLKSTCTRL, CM_L4PER_CLKSTCTRL_RESTORE */
#define OMAP4430_CLKACTIVITY_PER_SYS_GFCLK_SHIFT 24
-#define OMAP4430_CLKACTIVITY_PER_SYS_GFCLK_MASK BITFIELD(24, 24)
+#define OMAP4430_CLKACTIVITY_PER_SYS_GFCLK_MASK (1 << 24)
/* Used by CM_MEMIF_CLKSTCTRL, CM_MEMIF_CLKSTCTRL_RESTORE */
#define OMAP4430_CLKACTIVITY_PHY_ROOT_CLK_SHIFT 10
-#define OMAP4430_CLKACTIVITY_PHY_ROOT_CLK_MASK BITFIELD(10, 10)
+#define OMAP4430_CLKACTIVITY_PHY_ROOT_CLK_MASK (1 << 10)
/* Used by CM_GFX_CLKSTCTRL */
#define OMAP4430_CLKACTIVITY_SGX_GFCLK_SHIFT 9
-#define OMAP4430_CLKACTIVITY_SGX_GFCLK_MASK BITFIELD(9, 9)
+#define OMAP4430_CLKACTIVITY_SGX_GFCLK_MASK (1 << 9)
/* Used by CM_ALWON_CLKSTCTRL */
#define OMAP4430_CLKACTIVITY_SR_CORE_SYSCLK_SHIFT 11
-#define OMAP4430_CLKACTIVITY_SR_CORE_SYSCLK_MASK BITFIELD(11, 11)
+#define OMAP4430_CLKACTIVITY_SR_CORE_SYSCLK_MASK (1 << 11)
/* Used by CM_ALWON_CLKSTCTRL */
#define OMAP4430_CLKACTIVITY_SR_IVA_SYSCLK_SHIFT 10
-#define OMAP4430_CLKACTIVITY_SR_IVA_SYSCLK_MASK BITFIELD(10, 10)
+#define OMAP4430_CLKACTIVITY_SR_IVA_SYSCLK_MASK (1 << 10)
/* Used by CM_ALWON_CLKSTCTRL */
#define OMAP4430_CLKACTIVITY_SR_MPU_SYSCLK_SHIFT 9
-#define OMAP4430_CLKACTIVITY_SR_MPU_SYSCLK_MASK BITFIELD(9, 9)
+#define OMAP4430_CLKACTIVITY_SR_MPU_SYSCLK_MASK (1 << 9)
/* Used by CM_WKUP_CLKSTCTRL */
#define OMAP4430_CLKACTIVITY_SYS_CLK_SHIFT 8
-#define OMAP4430_CLKACTIVITY_SYS_CLK_MASK BITFIELD(8, 8)
+#define OMAP4430_CLKACTIVITY_SYS_CLK_MASK (1 << 8)
/* Used by CM_TESLA_CLKSTCTRL */
#define OMAP4430_CLKACTIVITY_TESLA_ROOT_CLK_SHIFT 8
-#define OMAP4430_CLKACTIVITY_TESLA_ROOT_CLK_MASK BITFIELD(8, 8)
+#define OMAP4430_CLKACTIVITY_TESLA_ROOT_CLK_MASK (1 << 8)
/* Used by CM_L3INIT_CLKSTCTRL, CM_L3INIT_CLKSTCTRL_RESTORE */
#define OMAP4430_CLKACTIVITY_TLL_CH0_GFCLK_SHIFT 22
-#define OMAP4430_CLKACTIVITY_TLL_CH0_GFCLK_MASK BITFIELD(22, 22)
+#define OMAP4430_CLKACTIVITY_TLL_CH0_GFCLK_MASK (1 << 22)
/* Used by CM_L3INIT_CLKSTCTRL, CM_L3INIT_CLKSTCTRL_RESTORE */
#define OMAP4430_CLKACTIVITY_TLL_CH1_GFCLK_SHIFT 23
-#define OMAP4430_CLKACTIVITY_TLL_CH1_GFCLK_MASK BITFIELD(23, 23)
+#define OMAP4430_CLKACTIVITY_TLL_CH1_GFCLK_MASK (1 << 23)
/* Used by CM_L3INIT_CLKSTCTRL, CM_L3INIT_CLKSTCTRL_RESTORE */
#define OMAP4430_CLKACTIVITY_TLL_CH2_GFCLK_SHIFT 24
-#define OMAP4430_CLKACTIVITY_TLL_CH2_GFCLK_MASK BITFIELD(24, 24)
+#define OMAP4430_CLKACTIVITY_TLL_CH2_GFCLK_MASK (1 << 24)
+
+/* Used by CM_L3INIT_CLKSTCTRL, CM_L3INIT_CLKSTCTRL_RESTORE */
+#define OMAP4430_CLKACTIVITY_UNIPRO_DPLL_CLK_SHIFT 10
+#define OMAP4430_CLKACTIVITY_UNIPRO_DPLL_CLK_MASK (1 << 10)
+
+/* Used by CM_L3INIT_CLKSTCTRL, CM_L3INIT_CLKSTCTRL_RESTORE */
+#define OMAP4430_CLKACTIVITY_USB_DPLL_CLK_SHIFT 14
+#define OMAP4430_CLKACTIVITY_USB_DPLL_CLK_MASK (1 << 14)
/* Used by CM_L3INIT_CLKSTCTRL, CM_L3INIT_CLKSTCTRL_RESTORE */
#define OMAP4430_CLKACTIVITY_USB_DPLL_HS_CLK_SHIFT 15
-#define OMAP4430_CLKACTIVITY_USB_DPLL_HS_CLK_MASK BITFIELD(15, 15)
+#define OMAP4430_CLKACTIVITY_USB_DPLL_HS_CLK_MASK (1 << 15)
/* Used by CM_WKUP_CLKSTCTRL */
#define OMAP4430_CLKACTIVITY_USIM_GFCLK_SHIFT 10
-#define OMAP4430_CLKACTIVITY_USIM_GFCLK_MASK BITFIELD(10, 10)
+#define OMAP4430_CLKACTIVITY_USIM_GFCLK_MASK (1 << 10)
/* Used by CM_L3INIT_CLKSTCTRL, CM_L3INIT_CLKSTCTRL_RESTORE */
#define OMAP4430_CLKACTIVITY_UTMI_P3_GFCLK_SHIFT 30
-#define OMAP4430_CLKACTIVITY_UTMI_P3_GFCLK_MASK BITFIELD(30, 30)
+#define OMAP4430_CLKACTIVITY_UTMI_P3_GFCLK_MASK (1 << 30)
/* Used by CM_L3INIT_CLKSTCTRL, CM_L3INIT_CLKSTCTRL_RESTORE */
#define OMAP4430_CLKACTIVITY_UTMI_ROOT_GFCLK_SHIFT 25
-#define OMAP4430_CLKACTIVITY_UTMI_ROOT_GFCLK_MASK BITFIELD(25, 25)
+#define OMAP4430_CLKACTIVITY_UTMI_ROOT_GFCLK_MASK (1 << 25)
/* Used by CM_WKUP_CLKSTCTRL */
#define OMAP4430_CLKACTIVITY_WKUP_32K_GFCLK_SHIFT 11
-#define OMAP4430_CLKACTIVITY_WKUP_32K_GFCLK_MASK BITFIELD(11, 11)
+#define OMAP4430_CLKACTIVITY_WKUP_32K_GFCLK_MASK (1 << 11)
/*
- * Used by CM_WKUP_TIMER1_CLKCTRL, CM_L4PER_DMTIMER10_CLKCTRL,
+ * Used by CM1_ABE_TIMER5_CLKCTRL, CM1_ABE_TIMER6_CLKCTRL,
+ * CM1_ABE_TIMER7_CLKCTRL, CM1_ABE_TIMER8_CLKCTRL, CM_L3INIT_MMC1_CLKCTRL,
+ * CM_L3INIT_MMC2_CLKCTRL, CM_L3INIT_MMC6_CLKCTRL, CM_L4PER_DMTIMER10_CLKCTRL,
* CM_L4PER_DMTIMER11_CLKCTRL, CM_L4PER_DMTIMER2_CLKCTRL,
* CM_L4PER_DMTIMER3_CLKCTRL, CM_L4PER_DMTIMER4_CLKCTRL,
* CM_L4PER_DMTIMER9_CLKCTRL, CM_L4PER_MCASP2_CLKCTRL, CM_L4PER_MCASP3_CLKCTRL,
- * CM_L3INIT_MMC1_CLKCTRL, CM_L3INIT_MMC2_CLKCTRL, CM_L3INIT_MMC6_CLKCTRL,
- * CM1_ABE_TIMER5_CLKCTRL, CM1_ABE_TIMER6_CLKCTRL, CM1_ABE_TIMER7_CLKCTRL,
- * CM1_ABE_TIMER8_CLKCTRL
+ * CM_WKUP_TIMER1_CLKCTRL
*/
#define OMAP4430_CLKSEL_SHIFT 24
-#define OMAP4430_CLKSEL_MASK BITFIELD(24, 24)
+#define OMAP4430_CLKSEL_MASK (1 << 24)
/*
* Renamed from CLKSEL Used by CM_ABE_DSS_SYS_CLKSEL, CM_ABE_PLL_REF_CLKSEL,
- * CM_DPLL_SYS_REF_CLKSEL, CM_L4_WKUP_CLKSEL, CM_CLKSEL_DUCATI_ISS_ROOT,
- * CM_CLKSEL_USB_60MHZ
+ * CM_L4_WKUP_CLKSEL, CM_CLKSEL_DUCATI_ISS_ROOT, CM_CLKSEL_USB_60MHZ
*/
#define OMAP4430_CLKSEL_0_0_SHIFT 0
-#define OMAP4430_CLKSEL_0_0_MASK BITFIELD(0, 0)
+#define OMAP4430_CLKSEL_0_0_MASK (1 << 0)
/* Renamed from CLKSEL Used by CM_BYPCLK_DPLL_IVA, CM_BYPCLK_DPLL_MPU */
#define OMAP4430_CLKSEL_0_1_SHIFT 0
-#define OMAP4430_CLKSEL_0_1_MASK BITFIELD(0, 1)
+#define OMAP4430_CLKSEL_0_1_MASK (0x3 << 0)
/* Renamed from CLKSEL Used by CM_L3INIT_HSI_CLKCTRL */
#define OMAP4430_CLKSEL_24_25_SHIFT 24
-#define OMAP4430_CLKSEL_24_25_MASK BITFIELD(24, 25)
+#define OMAP4430_CLKSEL_24_25_MASK (0x3 << 24)
/* Used by CM_L3INIT_USB_OTG_CLKCTRL */
#define OMAP4430_CLKSEL_60M_SHIFT 24
-#define OMAP4430_CLKSEL_60M_MASK BITFIELD(24, 24)
+#define OMAP4430_CLKSEL_60M_MASK (1 << 24)
/* Used by CM1_ABE_AESS_CLKCTRL */
#define OMAP4430_CLKSEL_AESS_FCLK_SHIFT 24
-#define OMAP4430_CLKSEL_AESS_FCLK_MASK BITFIELD(24, 24)
+#define OMAP4430_CLKSEL_AESS_FCLK_MASK (1 << 24)
-/* Used by CM_CLKSEL_CORE_RESTORE, CM_CLKSEL_CORE */
+/* Used by CM_CLKSEL_CORE, CM_CLKSEL_CORE_RESTORE */
#define OMAP4430_CLKSEL_CORE_SHIFT 0
-#define OMAP4430_CLKSEL_CORE_MASK BITFIELD(0, 0)
+#define OMAP4430_CLKSEL_CORE_MASK (1 << 0)
-/* Renamed from CLKSEL_CORE Used by CM_SHADOW_FREQ_CONFIG2 */
+/*
+ * Renamed from CLKSEL_CORE Used by CM_SHADOW_FREQ_CONFIG2_RESTORE,
+ * CM_SHADOW_FREQ_CONFIG2
+ */
#define OMAP4430_CLKSEL_CORE_1_1_SHIFT 1
-#define OMAP4430_CLKSEL_CORE_1_1_MASK BITFIELD(1, 1)
+#define OMAP4430_CLKSEL_CORE_1_1_MASK (1 << 1)
/* Used by CM_WKUP_USIM_CLKCTRL */
#define OMAP4430_CLKSEL_DIV_SHIFT 24
-#define OMAP4430_CLKSEL_DIV_MASK BITFIELD(24, 24)
+#define OMAP4430_CLKSEL_DIV_MASK (1 << 24)
/* Used by CM_CAM_FDIF_CLKCTRL */
#define OMAP4430_CLKSEL_FCLK_SHIFT 24
-#define OMAP4430_CLKSEL_FCLK_MASK BITFIELD(24, 25)
+#define OMAP4430_CLKSEL_FCLK_MASK (0x3 << 24)
/* Used by CM_L4PER_MCBSP4_CLKCTRL */
#define OMAP4430_CLKSEL_INTERNAL_SOURCE_SHIFT 25
-#define OMAP4430_CLKSEL_INTERNAL_SOURCE_MASK BITFIELD(25, 25)
+#define OMAP4430_CLKSEL_INTERNAL_SOURCE_MASK (1 << 25)
/*
* Renamed from CLKSEL_INTERNAL_SOURCE Used by CM1_ABE_DMIC_CLKCTRL,
@@ -479,836 +485,869 @@
* CM1_ABE_MCBSP3_CLKCTRL
*/
#define OMAP4430_CLKSEL_INTERNAL_SOURCE_CM1_ABE_DMIC_SHIFT 26
-#define OMAP4430_CLKSEL_INTERNAL_SOURCE_CM1_ABE_DMIC_MASK BITFIELD(26, 27)
+#define OMAP4430_CLKSEL_INTERNAL_SOURCE_CM1_ABE_DMIC_MASK (0x3 << 26)
-/* Used by CM_CLKSEL_CORE_RESTORE, CM_CLKSEL_CORE */
+/* Used by CM_CLKSEL_CORE, CM_CLKSEL_CORE_RESTORE */
#define OMAP4430_CLKSEL_L3_SHIFT 4
-#define OMAP4430_CLKSEL_L3_MASK BITFIELD(4, 4)
+#define OMAP4430_CLKSEL_L3_MASK (1 << 4)
-/* Renamed from CLKSEL_L3 Used by CM_SHADOW_FREQ_CONFIG2 */
+/*
+ * Renamed from CLKSEL_L3 Used by CM_SHADOW_FREQ_CONFIG2_RESTORE,
+ * CM_SHADOW_FREQ_CONFIG2
+ */
#define OMAP4430_CLKSEL_L3_SHADOW_SHIFT 2
-#define OMAP4430_CLKSEL_L3_SHADOW_MASK BITFIELD(2, 2)
+#define OMAP4430_CLKSEL_L3_SHADOW_MASK (1 << 2)
-/* Used by CM_CLKSEL_CORE_RESTORE, CM_CLKSEL_CORE */
+/* Used by CM_CLKSEL_CORE, CM_CLKSEL_CORE_RESTORE */
#define OMAP4430_CLKSEL_L4_SHIFT 8
-#define OMAP4430_CLKSEL_L4_MASK BITFIELD(8, 8)
+#define OMAP4430_CLKSEL_L4_MASK (1 << 8)
/* Used by CM_CLKSEL_ABE */
#define OMAP4430_CLKSEL_OPP_SHIFT 0
-#define OMAP4430_CLKSEL_OPP_MASK BITFIELD(0, 1)
-
-/* Used by CM_GFX_GFX_CLKCTRL */
-#define OMAP4430_CLKSEL_PER_192M_SHIFT 25
-#define OMAP4430_CLKSEL_PER_192M_MASK BITFIELD(25, 26)
+#define OMAP4430_CLKSEL_OPP_MASK (0x3 << 0)
/* Used by CM_EMU_DEBUGSS_CLKCTRL */
#define OMAP4430_CLKSEL_PMD_STM_CLK_SHIFT 27
-#define OMAP4430_CLKSEL_PMD_STM_CLK_MASK BITFIELD(27, 29)
+#define OMAP4430_CLKSEL_PMD_STM_CLK_MASK (0x7 << 27)
/* Used by CM_EMU_DEBUGSS_CLKCTRL */
#define OMAP4430_CLKSEL_PMD_TRACE_CLK_SHIFT 24
-#define OMAP4430_CLKSEL_PMD_TRACE_CLK_MASK BITFIELD(24, 26)
+#define OMAP4430_CLKSEL_PMD_TRACE_CLK_MASK (0x7 << 24)
/* Used by CM_GFX_GFX_CLKCTRL */
#define OMAP4430_CLKSEL_SGX_FCLK_SHIFT 24
-#define OMAP4430_CLKSEL_SGX_FCLK_MASK BITFIELD(24, 24)
+#define OMAP4430_CLKSEL_SGX_FCLK_MASK (1 << 24)
/*
* Used by CM1_ABE_DMIC_CLKCTRL, CM1_ABE_MCASP_CLKCTRL, CM1_ABE_MCBSP1_CLKCTRL,
* CM1_ABE_MCBSP2_CLKCTRL, CM1_ABE_MCBSP3_CLKCTRL
*/
#define OMAP4430_CLKSEL_SOURCE_SHIFT 24
-#define OMAP4430_CLKSEL_SOURCE_MASK BITFIELD(24, 25)
+#define OMAP4430_CLKSEL_SOURCE_MASK (0x3 << 24)
/* Renamed from CLKSEL_SOURCE Used by CM_L4PER_MCBSP4_CLKCTRL */
#define OMAP4430_CLKSEL_SOURCE_24_24_SHIFT 24
-#define OMAP4430_CLKSEL_SOURCE_24_24_MASK BITFIELD(24, 24)
+#define OMAP4430_CLKSEL_SOURCE_24_24_MASK (1 << 24)
/* Used by CM_L3INIT_USB_HOST_CLKCTRL, CM_L3INIT_USB_HOST_CLKCTRL_RESTORE */
#define OMAP4430_CLKSEL_UTMI_P1_SHIFT 24
-#define OMAP4430_CLKSEL_UTMI_P1_MASK BITFIELD(24, 24)
+#define OMAP4430_CLKSEL_UTMI_P1_MASK (1 << 24)
/* Used by CM_L3INIT_USB_HOST_CLKCTRL, CM_L3INIT_USB_HOST_CLKCTRL_RESTORE */
#define OMAP4430_CLKSEL_UTMI_P2_SHIFT 25
-#define OMAP4430_CLKSEL_UTMI_P2_MASK BITFIELD(25, 25)
+#define OMAP4430_CLKSEL_UTMI_P2_MASK (1 << 25)
/*
- * Used by CM_WKUP_CLKSTCTRL, CM_EMU_CLKSTCTRL, CM_D2D_CLKSTCTRL,
- * CM_DUCATI_CLKSTCTRL, CM_L3INSTR_CLKSTCTRL, CM_L3_1_CLKSTCTRL,
- * CM_L3_2_CLKSTCTRL, CM_L4CFG_CLKSTCTRL, CM_MEMIF_CLKSTCTRL,
- * CM_SDMA_CLKSTCTRL, CM_GFX_CLKSTCTRL, CM_L4PER_CLKSTCTRL, CM_L4SEC_CLKSTCTRL,
- * CM_L3INIT_CLKSTCTRL, CM_CAM_CLKSTCTRL, CM_CEFUSE_CLKSTCTRL,
- * CM_L3INIT_CLKSTCTRL_RESTORE, CM_L3_1_CLKSTCTRL_RESTORE,
- * CM_L3_2_CLKSTCTRL_RESTORE, CM_L4CFG_CLKSTCTRL_RESTORE,
- * CM_L4PER_CLKSTCTRL_RESTORE, CM_MEMIF_CLKSTCTRL_RESTORE, CM_ALWON_CLKSTCTRL,
- * CM_IVAHD_CLKSTCTRL, CM_DSS_CLKSTCTRL, CM_MPU_CLKSTCTRL, CM_TESLA_CLKSTCTRL,
- * CM1_ABE_CLKSTCTRL, CM_MPU_CLKSTCTRL_RESTORE
+ * Used by CM1_ABE_CLKSTCTRL, CM_ALWON_CLKSTCTRL, CM_CAM_CLKSTCTRL,
+ * CM_CEFUSE_CLKSTCTRL, CM_D2D_CLKSTCTRL, CM_DSS_CLKSTCTRL,
+ * CM_DUCATI_CLKSTCTRL, CM_EMU_CLKSTCTRL, CM_GFX_CLKSTCTRL, CM_IVAHD_CLKSTCTRL,
+ * CM_L3INIT_CLKSTCTRL, CM_L3INIT_CLKSTCTRL_RESTORE, CM_L3INSTR_CLKSTCTRL,
+ * CM_L3_1_CLKSTCTRL, CM_L3_1_CLKSTCTRL_RESTORE, CM_L3_2_CLKSTCTRL,
+ * CM_L3_2_CLKSTCTRL_RESTORE, CM_L4CFG_CLKSTCTRL, CM_L4CFG_CLKSTCTRL_RESTORE,
+ * CM_L4PER_CLKSTCTRL, CM_L4PER_CLKSTCTRL_RESTORE, CM_L4SEC_CLKSTCTRL,
+ * CM_MEMIF_CLKSTCTRL, CM_MEMIF_CLKSTCTRL_RESTORE, CM_MPU_CLKSTCTRL,
+ * CM_MPU_CLKSTCTRL_RESTORE, CM_SDMA_CLKSTCTRL, CM_TESLA_CLKSTCTRL,
+ * CM_WKUP_CLKSTCTRL
*/
#define OMAP4430_CLKTRCTRL_SHIFT 0
-#define OMAP4430_CLKTRCTRL_MASK BITFIELD(0, 1)
+#define OMAP4430_CLKTRCTRL_MASK (0x3 << 0)
/* Used by CM_EMU_OVERRIDE_DPLL_CORE */
#define OMAP4430_CORE_DPLL_EMU_DIV_SHIFT 0
-#define OMAP4430_CORE_DPLL_EMU_DIV_MASK BITFIELD(0, 6)
+#define OMAP4430_CORE_DPLL_EMU_DIV_MASK (0x7f << 0)
/* Used by CM_EMU_OVERRIDE_DPLL_CORE */
#define OMAP4430_CORE_DPLL_EMU_MULT_SHIFT 8
-#define OMAP4430_CORE_DPLL_EMU_MULT_MASK BITFIELD(8, 18)
+#define OMAP4430_CORE_DPLL_EMU_MULT_MASK (0x7ff << 8)
+
+/* Used by REVISION_CM1, REVISION_CM2 */
+#define OMAP4430_CUSTOM_SHIFT 6
+#define OMAP4430_CUSTOM_MASK (0x3 << 6)
-/* Used by CM_L3_2_DYNAMICDEP, CM_L4CFG_DYNAMICDEP */
+/*
+ * Used by CM_L3_2_DYNAMICDEP, CM_L3_2_DYNAMICDEP_RESTORE, CM_L4CFG_DYNAMICDEP,
+ * CM_L4CFG_DYNAMICDEP_RESTORE
+ */
#define OMAP4430_D2D_DYNDEP_SHIFT 18
-#define OMAP4430_D2D_DYNDEP_MASK BITFIELD(18, 18)
+#define OMAP4430_D2D_DYNDEP_MASK (1 << 18)
/* Used by CM_MPU_STATICDEP */
#define OMAP4430_D2D_STATDEP_SHIFT 18
-#define OMAP4430_D2D_STATDEP_MASK BITFIELD(18, 18)
+#define OMAP4430_D2D_STATDEP_MASK (1 << 18)
/*
- * Used by CM_SSC_DELTAMSTEP_DPLL_PER, CM_SSC_DELTAMSTEP_DPLL_UNIPRO,
- * CM_SSC_DELTAMSTEP_DPLL_USB, CM_SSC_DELTAMSTEP_DPLL_CORE_RESTORE,
- * CM_SSC_DELTAMSTEP_DPLL_ABE, CM_SSC_DELTAMSTEP_DPLL_CORE,
- * CM_SSC_DELTAMSTEP_DPLL_DDRPHY, CM_SSC_DELTAMSTEP_DPLL_IVA,
- * CM_SSC_DELTAMSTEP_DPLL_MPU
+ * Used by CM_SSC_DELTAMSTEP_DPLL_ABE, CM_SSC_DELTAMSTEP_DPLL_CORE,
+ * CM_SSC_DELTAMSTEP_DPLL_CORE_RESTORE, CM_SSC_DELTAMSTEP_DPLL_DDRPHY,
+ * CM_SSC_DELTAMSTEP_DPLL_IVA, CM_SSC_DELTAMSTEP_DPLL_MPU,
+ * CM_SSC_DELTAMSTEP_DPLL_PER, CM_SSC_DELTAMSTEP_DPLL_UNIPRO,
+ * CM_SSC_DELTAMSTEP_DPLL_USB
*/
#define OMAP4430_DELTAMSTEP_SHIFT 0
-#define OMAP4430_DELTAMSTEP_MASK BITFIELD(0, 19)
+#define OMAP4430_DELTAMSTEP_MASK (0xfffff << 0)
-/* Used by CM_SHADOW_FREQ_CONFIG1_RESTORE, CM_SHADOW_FREQ_CONFIG1 */
+/* Used by CM_SHADOW_FREQ_CONFIG1, CM_SHADOW_FREQ_CONFIG1_RESTORE */
#define OMAP4430_DLL_OVERRIDE_SHIFT 2
-#define OMAP4430_DLL_OVERRIDE_MASK BITFIELD(2, 2)
+#define OMAP4430_DLL_OVERRIDE_MASK (1 << 2)
/* Renamed from DLL_OVERRIDE Used by CM_DLL_CTRL */
#define OMAP4430_DLL_OVERRIDE_0_0_SHIFT 0
-#define OMAP4430_DLL_OVERRIDE_0_0_MASK BITFIELD(0, 0)
+#define OMAP4430_DLL_OVERRIDE_0_0_MASK (1 << 0)
-/* Used by CM_SHADOW_FREQ_CONFIG1_RESTORE, CM_SHADOW_FREQ_CONFIG1 */
+/* Used by CM_SHADOW_FREQ_CONFIG1, CM_SHADOW_FREQ_CONFIG1_RESTORE */
#define OMAP4430_DLL_RESET_SHIFT 3
-#define OMAP4430_DLL_RESET_MASK BITFIELD(3, 3)
+#define OMAP4430_DLL_RESET_MASK (1 << 3)
/*
- * Used by CM_CLKSEL_DPLL_PER, CM_CLKSEL_DPLL_UNIPRO, CM_CLKSEL_DPLL_USB,
- * CM_CLKSEL_DPLL_CORE_RESTORE, CM_CLKSEL_DPLL_ABE, CM_CLKSEL_DPLL_CORE,
- * CM_CLKSEL_DPLL_DDRPHY, CM_CLKSEL_DPLL_IVA, CM_CLKSEL_DPLL_MPU
+ * Used by CM_CLKSEL_DPLL_ABE, CM_CLKSEL_DPLL_CORE,
+ * CM_CLKSEL_DPLL_CORE_RESTORE, CM_CLKSEL_DPLL_DDRPHY, CM_CLKSEL_DPLL_IVA,
+ * CM_CLKSEL_DPLL_MPU, CM_CLKSEL_DPLL_PER, CM_CLKSEL_DPLL_UNIPRO,
+ * CM_CLKSEL_DPLL_USB
*/
#define OMAP4430_DPLL_BYP_CLKSEL_SHIFT 23
-#define OMAP4430_DPLL_BYP_CLKSEL_MASK BITFIELD(23, 23)
+#define OMAP4430_DPLL_BYP_CLKSEL_MASK (1 << 23)
/* Used by CM_CLKDCOLDO_DPLL_USB */
#define OMAP4430_DPLL_CLKDCOLDO_GATE_CTRL_SHIFT 8
-#define OMAP4430_DPLL_CLKDCOLDO_GATE_CTRL_MASK BITFIELD(8, 8)
+#define OMAP4430_DPLL_CLKDCOLDO_GATE_CTRL_MASK (1 << 8)
-/* Used by CM_CLKSEL_DPLL_CORE_RESTORE, CM_CLKSEL_DPLL_CORE */
+/* Used by CM_CLKSEL_DPLL_CORE, CM_CLKSEL_DPLL_CORE_RESTORE */
#define OMAP4430_DPLL_CLKOUTHIF_CLKSEL_SHIFT 20
-#define OMAP4430_DPLL_CLKOUTHIF_CLKSEL_MASK BITFIELD(20, 20)
+#define OMAP4430_DPLL_CLKOUTHIF_CLKSEL_MASK (1 << 20)
/*
- * Used by CM_DIV_M3_DPLL_PER, CM_DIV_M3_DPLL_CORE_RESTORE, CM_DIV_M3_DPLL_ABE,
- * CM_DIV_M3_DPLL_CORE
+ * Used by CM_DIV_M3_DPLL_ABE, CM_DIV_M3_DPLL_CORE,
+ * CM_DIV_M3_DPLL_CORE_RESTORE, CM_DIV_M3_DPLL_PER
*/
#define OMAP4430_DPLL_CLKOUTHIF_DIV_SHIFT 0
-#define OMAP4430_DPLL_CLKOUTHIF_DIV_MASK BITFIELD(0, 4)
+#define OMAP4430_DPLL_CLKOUTHIF_DIV_MASK (0x1f << 0)
/*
- * Used by CM_DIV_M3_DPLL_PER, CM_DIV_M3_DPLL_CORE_RESTORE, CM_DIV_M3_DPLL_ABE,
- * CM_DIV_M3_DPLL_CORE
+ * Used by CM_DIV_M3_DPLL_ABE, CM_DIV_M3_DPLL_CORE,
+ * CM_DIV_M3_DPLL_CORE_RESTORE, CM_DIV_M3_DPLL_PER
*/
#define OMAP4430_DPLL_CLKOUTHIF_DIVCHACK_SHIFT 5
-#define OMAP4430_DPLL_CLKOUTHIF_DIVCHACK_MASK BITFIELD(5, 5)
+#define OMAP4430_DPLL_CLKOUTHIF_DIVCHACK_MASK (1 << 5)
/*
- * Used by CM_DIV_M3_DPLL_PER, CM_DIV_M3_DPLL_CORE_RESTORE, CM_DIV_M3_DPLL_ABE,
- * CM_DIV_M3_DPLL_CORE
+ * Used by CM_DIV_M3_DPLL_ABE, CM_DIV_M3_DPLL_CORE,
+ * CM_DIV_M3_DPLL_CORE_RESTORE, CM_DIV_M3_DPLL_PER
*/
#define OMAP4430_DPLL_CLKOUTHIF_GATE_CTRL_SHIFT 8
-#define OMAP4430_DPLL_CLKOUTHIF_GATE_CTRL_MASK BITFIELD(8, 8)
+#define OMAP4430_DPLL_CLKOUTHIF_GATE_CTRL_MASK (1 << 8)
-/* Used by CM_DIV_M2_DPLL_PER, CM_DIV_M2_DPLL_UNIPRO, CM_DIV_M2_DPLL_ABE */
+/* Used by CM_DIV_M2_DPLL_ABE, CM_DIV_M2_DPLL_PER, CM_DIV_M2_DPLL_UNIPRO */
#define OMAP4430_DPLL_CLKOUTX2_GATE_CTRL_SHIFT 10
-#define OMAP4430_DPLL_CLKOUTX2_GATE_CTRL_MASK BITFIELD(10, 10)
+#define OMAP4430_DPLL_CLKOUTX2_GATE_CTRL_MASK (1 << 10)
/*
- * Used by CM_DIV_M2_DPLL_PER, CM_DIV_M2_DPLL_UNIPRO,
- * CM_DIV_M2_DPLL_CORE_RESTORE, CM_DIV_M2_DPLL_ABE, CM_DIV_M2_DPLL_CORE,
- * CM_DIV_M2_DPLL_DDRPHY, CM_DIV_M2_DPLL_MPU
+ * Used by CM_DIV_M2_DPLL_ABE, CM_DIV_M2_DPLL_CORE,
+ * CM_DIV_M2_DPLL_CORE_RESTORE, CM_DIV_M2_DPLL_DDRPHY, CM_DIV_M2_DPLL_MPU,
+ * CM_DIV_M2_DPLL_PER, CM_DIV_M2_DPLL_UNIPRO
*/
#define OMAP4430_DPLL_CLKOUT_DIV_SHIFT 0
-#define OMAP4430_DPLL_CLKOUT_DIV_MASK BITFIELD(0, 4)
+#define OMAP4430_DPLL_CLKOUT_DIV_MASK (0x1f << 0)
/* Renamed from DPLL_CLKOUT_DIV Used by CM_DIV_M2_DPLL_USB */
#define OMAP4430_DPLL_CLKOUT_DIV_0_6_SHIFT 0
-#define OMAP4430_DPLL_CLKOUT_DIV_0_6_MASK BITFIELD(0, 6)
+#define OMAP4430_DPLL_CLKOUT_DIV_0_6_MASK (0x7f << 0)
/*
- * Used by CM_DIV_M2_DPLL_PER, CM_DIV_M2_DPLL_UNIPRO,
- * CM_DIV_M2_DPLL_CORE_RESTORE, CM_DIV_M2_DPLL_ABE, CM_DIV_M2_DPLL_CORE,
- * CM_DIV_M2_DPLL_DDRPHY, CM_DIV_M2_DPLL_MPU
+ * Used by CM_DIV_M2_DPLL_ABE, CM_DIV_M2_DPLL_CORE,
+ * CM_DIV_M2_DPLL_CORE_RESTORE, CM_DIV_M2_DPLL_DDRPHY, CM_DIV_M2_DPLL_MPU,
+ * CM_DIV_M2_DPLL_PER, CM_DIV_M2_DPLL_UNIPRO
*/
#define OMAP4430_DPLL_CLKOUT_DIVCHACK_SHIFT 5
-#define OMAP4430_DPLL_CLKOUT_DIVCHACK_MASK BITFIELD(5, 5)
+#define OMAP4430_DPLL_CLKOUT_DIVCHACK_MASK (1 << 5)
/* Renamed from DPLL_CLKOUT_DIVCHACK Used by CM_DIV_M2_DPLL_USB */
#define OMAP4430_DPLL_CLKOUT_DIVCHACK_M2_USB_SHIFT 7
-#define OMAP4430_DPLL_CLKOUT_DIVCHACK_M2_USB_MASK BITFIELD(7, 7)
+#define OMAP4430_DPLL_CLKOUT_DIVCHACK_M2_USB_MASK (1 << 7)
/*
- * Used by CM_DIV_M2_DPLL_PER, CM_DIV_M2_DPLL_USB, CM_DIV_M2_DPLL_CORE_RESTORE,
- * CM_DIV_M2_DPLL_ABE, CM_DIV_M2_DPLL_CORE, CM_DIV_M2_DPLL_DDRPHY,
- * CM_DIV_M2_DPLL_MPU
+ * Used by CM_DIV_M2_DPLL_ABE, CM_DIV_M2_DPLL_CORE,
+ * CM_DIV_M2_DPLL_CORE_RESTORE, CM_DIV_M2_DPLL_DDRPHY, CM_DIV_M2_DPLL_MPU,
+ * CM_DIV_M2_DPLL_PER, CM_DIV_M2_DPLL_USB
*/
#define OMAP4430_DPLL_CLKOUT_GATE_CTRL_SHIFT 8
-#define OMAP4430_DPLL_CLKOUT_GATE_CTRL_MASK BITFIELD(8, 8)
+#define OMAP4430_DPLL_CLKOUT_GATE_CTRL_MASK (1 << 8)
-/* Used by CM_SHADOW_FREQ_CONFIG1_RESTORE, CM_SHADOW_FREQ_CONFIG1 */
+/* Used by CM_SHADOW_FREQ_CONFIG1, CM_SHADOW_FREQ_CONFIG1_RESTORE */
#define OMAP4430_DPLL_CORE_DPLL_EN_SHIFT 8
-#define OMAP4430_DPLL_CORE_DPLL_EN_MASK BITFIELD(8, 10)
+#define OMAP4430_DPLL_CORE_DPLL_EN_MASK (0x7 << 8)
-/* Used by CM_SHADOW_FREQ_CONFIG1_RESTORE, CM_SHADOW_FREQ_CONFIG1 */
+/* Used by CM_SHADOW_FREQ_CONFIG1, CM_SHADOW_FREQ_CONFIG1_RESTORE */
#define OMAP4430_DPLL_CORE_M2_DIV_SHIFT 11
-#define OMAP4430_DPLL_CORE_M2_DIV_MASK BITFIELD(11, 15)
+#define OMAP4430_DPLL_CORE_M2_DIV_MASK (0x1f << 11)
-/* Used by CM_SHADOW_FREQ_CONFIG2 */
+/* Used by CM_SHADOW_FREQ_CONFIG2, CM_SHADOW_FREQ_CONFIG2_RESTORE */
#define OMAP4430_DPLL_CORE_M5_DIV_SHIFT 3
-#define OMAP4430_DPLL_CORE_M5_DIV_MASK BITFIELD(3, 7)
-
-/* Used by CM_SHADOW_FREQ_CONFIG1_RESTORE, CM_SHADOW_FREQ_CONFIG1 */
-#define OMAP4430_DPLL_CORE_SYS_REF_CLKSEL_SHIFT 1
-#define OMAP4430_DPLL_CORE_SYS_REF_CLKSEL_MASK BITFIELD(1, 1)
+#define OMAP4430_DPLL_CORE_M5_DIV_MASK (0x1f << 3)
/*
- * Used by CM_CLKSEL_DPLL_PER, CM_CLKSEL_DPLL_UNIPRO,
- * CM_CLKSEL_DPLL_CORE_RESTORE, CM_CLKSEL_DPLL_ABE, CM_CLKSEL_DPLL_CORE,
- * CM_CLKSEL_DPLL_DDRPHY, CM_CLKSEL_DPLL_IVA, CM_CLKSEL_DPLL_MPU
+ * Used by CM_CLKSEL_DPLL_ABE, CM_CLKSEL_DPLL_CORE,
+ * CM_CLKSEL_DPLL_CORE_RESTORE, CM_CLKSEL_DPLL_DDRPHY, CM_CLKSEL_DPLL_IVA,
+ * CM_CLKSEL_DPLL_MPU, CM_CLKSEL_DPLL_PER, CM_CLKSEL_DPLL_UNIPRO
*/
#define OMAP4430_DPLL_DIV_SHIFT 0
-#define OMAP4430_DPLL_DIV_MASK BITFIELD(0, 6)
+#define OMAP4430_DPLL_DIV_MASK (0x7f << 0)
/* Renamed from DPLL_DIV Used by CM_CLKSEL_DPLL_USB */
#define OMAP4430_DPLL_DIV_0_7_SHIFT 0
-#define OMAP4430_DPLL_DIV_0_7_MASK BITFIELD(0, 7)
+#define OMAP4430_DPLL_DIV_0_7_MASK (0xff << 0)
/*
- * Used by CM_CLKMODE_DPLL_PER, CM_CLKMODE_DPLL_USB,
- * CM_CLKMODE_DPLL_CORE_RESTORE, CM_CLKMODE_DPLL_ABE, CM_CLKMODE_DPLL_CORE,
- * CM_CLKMODE_DPLL_DDRPHY, CM_CLKMODE_DPLL_IVA, CM_CLKMODE_DPLL_MPU
+ * Used by CM_CLKMODE_DPLL_ABE, CM_CLKMODE_DPLL_CORE,
+ * CM_CLKMODE_DPLL_CORE_RESTORE, CM_CLKMODE_DPLL_DDRPHY, CM_CLKMODE_DPLL_IVA,
+ * CM_CLKMODE_DPLL_MPU, CM_CLKMODE_DPLL_PER
*/
#define OMAP4430_DPLL_DRIFTGUARD_EN_SHIFT 8
-#define OMAP4430_DPLL_DRIFTGUARD_EN_MASK BITFIELD(8, 8)
+#define OMAP4430_DPLL_DRIFTGUARD_EN_MASK (1 << 8)
/* Renamed from DPLL_DRIFTGUARD_EN Used by CM_CLKMODE_DPLL_UNIPRO */
#define OMAP4430_DPLL_DRIFTGUARD_EN_3_3_SHIFT 3
-#define OMAP4430_DPLL_DRIFTGUARD_EN_3_3_MASK BITFIELD(3, 3)
+#define OMAP4430_DPLL_DRIFTGUARD_EN_3_3_MASK (1 << 3)
/*
- * Used by CM_CLKMODE_DPLL_PER, CM_CLKMODE_DPLL_UNIPRO, CM_CLKMODE_DPLL_USB,
- * CM_CLKMODE_DPLL_CORE_RESTORE, CM_CLKMODE_DPLL_ABE, CM_CLKMODE_DPLL_CORE,
- * CM_CLKMODE_DPLL_DDRPHY, CM_CLKMODE_DPLL_IVA, CM_CLKMODE_DPLL_MPU
+ * Used by CM_CLKMODE_DPLL_ABE, CM_CLKMODE_DPLL_CORE,
+ * CM_CLKMODE_DPLL_CORE_RESTORE, CM_CLKMODE_DPLL_DDRPHY, CM_CLKMODE_DPLL_IVA,
+ * CM_CLKMODE_DPLL_MPU, CM_CLKMODE_DPLL_PER, CM_CLKMODE_DPLL_UNIPRO,
+ * CM_CLKMODE_DPLL_USB
*/
#define OMAP4430_DPLL_EN_SHIFT 0
-#define OMAP4430_DPLL_EN_MASK BITFIELD(0, 2)
+#define OMAP4430_DPLL_EN_MASK (0x7 << 0)
/*
- * Used by CM_CLKMODE_DPLL_PER, CM_CLKMODE_DPLL_UNIPRO,
- * CM_CLKMODE_DPLL_CORE_RESTORE, CM_CLKMODE_DPLL_ABE, CM_CLKMODE_DPLL_CORE,
- * CM_CLKMODE_DPLL_DDRPHY, CM_CLKMODE_DPLL_IVA, CM_CLKMODE_DPLL_MPU
+ * Used by CM_CLKMODE_DPLL_ABE, CM_CLKMODE_DPLL_CORE,
+ * CM_CLKMODE_DPLL_CORE_RESTORE, CM_CLKMODE_DPLL_DDRPHY, CM_CLKMODE_DPLL_IVA,
+ * CM_CLKMODE_DPLL_MPU, CM_CLKMODE_DPLL_PER, CM_CLKMODE_DPLL_UNIPRO
*/
#define OMAP4430_DPLL_LPMODE_EN_SHIFT 10
-#define OMAP4430_DPLL_LPMODE_EN_MASK BITFIELD(10, 10)
+#define OMAP4430_DPLL_LPMODE_EN_MASK (1 << 10)
/*
- * Used by CM_CLKSEL_DPLL_PER, CM_CLKSEL_DPLL_UNIPRO,
- * CM_CLKSEL_DPLL_CORE_RESTORE, CM_CLKSEL_DPLL_ABE, CM_CLKSEL_DPLL_CORE,
- * CM_CLKSEL_DPLL_DDRPHY, CM_CLKSEL_DPLL_IVA, CM_CLKSEL_DPLL_MPU
+ * Used by CM_CLKSEL_DPLL_ABE, CM_CLKSEL_DPLL_CORE,
+ * CM_CLKSEL_DPLL_CORE_RESTORE, CM_CLKSEL_DPLL_DDRPHY, CM_CLKSEL_DPLL_IVA,
+ * CM_CLKSEL_DPLL_MPU, CM_CLKSEL_DPLL_PER, CM_CLKSEL_DPLL_UNIPRO
*/
#define OMAP4430_DPLL_MULT_SHIFT 8
-#define OMAP4430_DPLL_MULT_MASK BITFIELD(8, 18)
+#define OMAP4430_DPLL_MULT_MASK (0x7ff << 8)
/* Renamed from DPLL_MULT Used by CM_CLKSEL_DPLL_USB */
#define OMAP4430_DPLL_MULT_USB_SHIFT 8
-#define OMAP4430_DPLL_MULT_USB_MASK BITFIELD(8, 19)
+#define OMAP4430_DPLL_MULT_USB_MASK (0xfff << 8)
/*
- * Used by CM_CLKMODE_DPLL_PER, CM_CLKMODE_DPLL_UNIPRO,
- * CM_CLKMODE_DPLL_CORE_RESTORE, CM_CLKMODE_DPLL_ABE, CM_CLKMODE_DPLL_CORE,
- * CM_CLKMODE_DPLL_DDRPHY, CM_CLKMODE_DPLL_IVA, CM_CLKMODE_DPLL_MPU
+ * Used by CM_CLKMODE_DPLL_ABE, CM_CLKMODE_DPLL_CORE,
+ * CM_CLKMODE_DPLL_CORE_RESTORE, CM_CLKMODE_DPLL_DDRPHY, CM_CLKMODE_DPLL_IVA,
+ * CM_CLKMODE_DPLL_MPU, CM_CLKMODE_DPLL_PER, CM_CLKMODE_DPLL_UNIPRO
*/
#define OMAP4430_DPLL_REGM4XEN_SHIFT 11
-#define OMAP4430_DPLL_REGM4XEN_MASK BITFIELD(11, 11)
+#define OMAP4430_DPLL_REGM4XEN_MASK (1 << 11)
/* Used by CM_CLKSEL_DPLL_USB */
#define OMAP4430_DPLL_SD_DIV_SHIFT 24
-#define OMAP4430_DPLL_SD_DIV_MASK BITFIELD(24, 31)
+#define OMAP4430_DPLL_SD_DIV_MASK (0xff << 24)
/*
- * Used by CM_CLKMODE_DPLL_PER, CM_CLKMODE_DPLL_UNIPRO, CM_CLKMODE_DPLL_USB,
- * CM_CLKMODE_DPLL_CORE_RESTORE, CM_CLKMODE_DPLL_ABE, CM_CLKMODE_DPLL_CORE,
- * CM_CLKMODE_DPLL_DDRPHY, CM_CLKMODE_DPLL_IVA, CM_CLKMODE_DPLL_MPU
+ * Used by CM_CLKMODE_DPLL_ABE, CM_CLKMODE_DPLL_CORE,
+ * CM_CLKMODE_DPLL_CORE_RESTORE, CM_CLKMODE_DPLL_DDRPHY, CM_CLKMODE_DPLL_IVA,
+ * CM_CLKMODE_DPLL_MPU, CM_CLKMODE_DPLL_PER, CM_CLKMODE_DPLL_UNIPRO,
+ * CM_CLKMODE_DPLL_USB
*/
#define OMAP4430_DPLL_SSC_ACK_SHIFT 13
-#define OMAP4430_DPLL_SSC_ACK_MASK BITFIELD(13, 13)
+#define OMAP4430_DPLL_SSC_ACK_MASK (1 << 13)
/*
- * Used by CM_CLKMODE_DPLL_PER, CM_CLKMODE_DPLL_UNIPRO, CM_CLKMODE_DPLL_USB,
- * CM_CLKMODE_DPLL_CORE_RESTORE, CM_CLKMODE_DPLL_ABE, CM_CLKMODE_DPLL_CORE,
- * CM_CLKMODE_DPLL_DDRPHY, CM_CLKMODE_DPLL_IVA, CM_CLKMODE_DPLL_MPU
+ * Used by CM_CLKMODE_DPLL_ABE, CM_CLKMODE_DPLL_CORE,
+ * CM_CLKMODE_DPLL_CORE_RESTORE, CM_CLKMODE_DPLL_DDRPHY, CM_CLKMODE_DPLL_IVA,
+ * CM_CLKMODE_DPLL_MPU, CM_CLKMODE_DPLL_PER, CM_CLKMODE_DPLL_UNIPRO,
+ * CM_CLKMODE_DPLL_USB
*/
#define OMAP4430_DPLL_SSC_DOWNSPREAD_SHIFT 14
-#define OMAP4430_DPLL_SSC_DOWNSPREAD_MASK BITFIELD(14, 14)
+#define OMAP4430_DPLL_SSC_DOWNSPREAD_MASK (1 << 14)
/*
- * Used by CM_CLKMODE_DPLL_PER, CM_CLKMODE_DPLL_UNIPRO, CM_CLKMODE_DPLL_USB,
- * CM_CLKMODE_DPLL_CORE_RESTORE, CM_CLKMODE_DPLL_ABE, CM_CLKMODE_DPLL_CORE,
- * CM_CLKMODE_DPLL_DDRPHY, CM_CLKMODE_DPLL_IVA, CM_CLKMODE_DPLL_MPU
+ * Used by CM_CLKMODE_DPLL_ABE, CM_CLKMODE_DPLL_CORE,
+ * CM_CLKMODE_DPLL_CORE_RESTORE, CM_CLKMODE_DPLL_DDRPHY, CM_CLKMODE_DPLL_IVA,
+ * CM_CLKMODE_DPLL_MPU, CM_CLKMODE_DPLL_PER, CM_CLKMODE_DPLL_UNIPRO,
+ * CM_CLKMODE_DPLL_USB
*/
#define OMAP4430_DPLL_SSC_EN_SHIFT 12
-#define OMAP4430_DPLL_SSC_EN_MASK BITFIELD(12, 12)
+#define OMAP4430_DPLL_SSC_EN_MASK (1 << 12)
-/* Used by CM_L3_2_DYNAMICDEP, CM_L4CFG_DYNAMICDEP, CM_L4PER_DYNAMICDEP */
+/*
+ * Used by CM_L3_2_DYNAMICDEP, CM_L3_2_DYNAMICDEP_RESTORE, CM_L4CFG_DYNAMICDEP,
+ * CM_L4CFG_DYNAMICDEP_RESTORE, CM_L4PER_DYNAMICDEP, CM_L4PER_DYNAMICDEP_RESTORE
+ */
#define OMAP4430_DSS_DYNDEP_SHIFT 8
-#define OMAP4430_DSS_DYNDEP_MASK BITFIELD(8, 8)
+#define OMAP4430_DSS_DYNDEP_MASK (1 << 8)
/*
- * Used by CM_DUCATI_STATICDEP, CM_SDMA_STATICDEP, CM_SDMA_STATICDEP_RESTORE,
- * CM_MPU_STATICDEP
+ * Used by CM_DUCATI_STATICDEP, CM_MPU_STATICDEP, CM_SDMA_STATICDEP,
+ * CM_SDMA_STATICDEP_RESTORE
*/
#define OMAP4430_DSS_STATDEP_SHIFT 8
-#define OMAP4430_DSS_STATDEP_MASK BITFIELD(8, 8)
+#define OMAP4430_DSS_STATDEP_MASK (1 << 8)
-/* Used by CM_L3_2_DYNAMICDEP */
+/* Used by CM_L3_2_DYNAMICDEP, CM_L3_2_DYNAMICDEP_RESTORE */
#define OMAP4430_DUCATI_DYNDEP_SHIFT 0
-#define OMAP4430_DUCATI_DYNDEP_MASK BITFIELD(0, 0)
+#define OMAP4430_DUCATI_DYNDEP_MASK (1 << 0)
-/* Used by CM_SDMA_STATICDEP, CM_SDMA_STATICDEP_RESTORE, CM_MPU_STATICDEP */
+/* Used by CM_MPU_STATICDEP, CM_SDMA_STATICDEP, CM_SDMA_STATICDEP_RESTORE */
#define OMAP4430_DUCATI_STATDEP_SHIFT 0
-#define OMAP4430_DUCATI_STATDEP_MASK BITFIELD(0, 0)
+#define OMAP4430_DUCATI_STATDEP_MASK (1 << 0)
-/* Used by CM_SHADOW_FREQ_CONFIG1_RESTORE, CM_SHADOW_FREQ_CONFIG1 */
+/* Used by CM_SHADOW_FREQ_CONFIG1, CM_SHADOW_FREQ_CONFIG1_RESTORE */
#define OMAP4430_FREQ_UPDATE_SHIFT 0
-#define OMAP4430_FREQ_UPDATE_MASK BITFIELD(0, 0)
+#define OMAP4430_FREQ_UPDATE_MASK (1 << 0)
+
+/* Used by REVISION_CM1, REVISION_CM2 */
+#define OMAP4430_FUNC_SHIFT 16
+#define OMAP4430_FUNC_MASK (0xfff << 16)
-/* Used by CM_L3_2_DYNAMICDEP */
+/* Used by CM_L3_2_DYNAMICDEP, CM_L3_2_DYNAMICDEP_RESTORE */
#define OMAP4430_GFX_DYNDEP_SHIFT 10
-#define OMAP4430_GFX_DYNDEP_MASK BITFIELD(10, 10)
+#define OMAP4430_GFX_DYNDEP_MASK (1 << 10)
/* Used by CM_DUCATI_STATICDEP, CM_MPU_STATICDEP */
#define OMAP4430_GFX_STATDEP_SHIFT 10
-#define OMAP4430_GFX_STATDEP_MASK BITFIELD(10, 10)
+#define OMAP4430_GFX_STATDEP_MASK (1 << 10)
-/* Used by CM_SHADOW_FREQ_CONFIG2 */
+/* Used by CM_SHADOW_FREQ_CONFIG2, CM_SHADOW_FREQ_CONFIG2_RESTORE */
#define OMAP4430_GPMC_FREQ_UPDATE_SHIFT 0
-#define OMAP4430_GPMC_FREQ_UPDATE_MASK BITFIELD(0, 0)
+#define OMAP4430_GPMC_FREQ_UPDATE_MASK (1 << 0)
/*
- * Used by CM_DIV_M4_DPLL_PER, CM_DIV_M4_DPLL_CORE_RESTORE,
- * CM_DIV_M4_DPLL_CORE, CM_DIV_M4_DPLL_DDRPHY, CM_DIV_M4_DPLL_IVA
+ * Used by CM_DIV_M4_DPLL_CORE, CM_DIV_M4_DPLL_CORE_RESTORE,
+ * CM_DIV_M4_DPLL_DDRPHY, CM_DIV_M4_DPLL_IVA, CM_DIV_M4_DPLL_PER
*/
#define OMAP4430_HSDIVIDER_CLKOUT1_DIV_SHIFT 0
-#define OMAP4430_HSDIVIDER_CLKOUT1_DIV_MASK BITFIELD(0, 4)
+#define OMAP4430_HSDIVIDER_CLKOUT1_DIV_MASK (0x1f << 0)
/*
- * Used by CM_DIV_M4_DPLL_PER, CM_DIV_M4_DPLL_CORE_RESTORE,
- * CM_DIV_M4_DPLL_CORE, CM_DIV_M4_DPLL_DDRPHY, CM_DIV_M4_DPLL_IVA
+ * Used by CM_DIV_M4_DPLL_CORE, CM_DIV_M4_DPLL_CORE_RESTORE,
+ * CM_DIV_M4_DPLL_DDRPHY, CM_DIV_M4_DPLL_IVA, CM_DIV_M4_DPLL_PER
*/
#define OMAP4430_HSDIVIDER_CLKOUT1_DIVCHACK_SHIFT 5
-#define OMAP4430_HSDIVIDER_CLKOUT1_DIVCHACK_MASK BITFIELD(5, 5)
+#define OMAP4430_HSDIVIDER_CLKOUT1_DIVCHACK_MASK (1 << 5)
/*
- * Used by CM_DIV_M4_DPLL_PER, CM_DIV_M4_DPLL_CORE_RESTORE,
- * CM_DIV_M4_DPLL_CORE, CM_DIV_M4_DPLL_DDRPHY, CM_DIV_M4_DPLL_IVA
+ * Used by CM_DIV_M4_DPLL_CORE, CM_DIV_M4_DPLL_CORE_RESTORE,
+ * CM_DIV_M4_DPLL_DDRPHY, CM_DIV_M4_DPLL_IVA, CM_DIV_M4_DPLL_PER
*/
#define OMAP4430_HSDIVIDER_CLKOUT1_GATE_CTRL_SHIFT 8
-#define OMAP4430_HSDIVIDER_CLKOUT1_GATE_CTRL_MASK BITFIELD(8, 8)
+#define OMAP4430_HSDIVIDER_CLKOUT1_GATE_CTRL_MASK (1 << 8)
/*
- * Used by CM_DIV_M4_DPLL_PER, CM_DIV_M4_DPLL_CORE_RESTORE,
- * CM_DIV_M4_DPLL_CORE, CM_DIV_M4_DPLL_DDRPHY, CM_DIV_M4_DPLL_IVA
+ * Used by CM_DIV_M4_DPLL_CORE, CM_DIV_M4_DPLL_CORE_RESTORE,
+ * CM_DIV_M4_DPLL_DDRPHY, CM_DIV_M4_DPLL_IVA, CM_DIV_M4_DPLL_PER
*/
#define OMAP4430_HSDIVIDER_CLKOUT1_PWDN_SHIFT 12
-#define OMAP4430_HSDIVIDER_CLKOUT1_PWDN_MASK BITFIELD(12, 12)
+#define OMAP4430_HSDIVIDER_CLKOUT1_PWDN_MASK (1 << 12)
/*
- * Used by CM_DIV_M5_DPLL_PER, CM_DIV_M5_DPLL_CORE_RESTORE,
- * CM_DIV_M5_DPLL_CORE, CM_DIV_M5_DPLL_DDRPHY, CM_DIV_M5_DPLL_IVA
+ * Used by CM_DIV_M5_DPLL_CORE, CM_DIV_M5_DPLL_CORE_RESTORE,
+ * CM_DIV_M5_DPLL_DDRPHY, CM_DIV_M5_DPLL_IVA, CM_DIV_M5_DPLL_PER
*/
#define OMAP4430_HSDIVIDER_CLKOUT2_DIV_SHIFT 0
-#define OMAP4430_HSDIVIDER_CLKOUT2_DIV_MASK BITFIELD(0, 4)
+#define OMAP4430_HSDIVIDER_CLKOUT2_DIV_MASK (0x1f << 0)
/*
- * Used by CM_DIV_M5_DPLL_PER, CM_DIV_M5_DPLL_CORE_RESTORE,
- * CM_DIV_M5_DPLL_CORE, CM_DIV_M5_DPLL_DDRPHY, CM_DIV_M5_DPLL_IVA
+ * Used by CM_DIV_M5_DPLL_CORE, CM_DIV_M5_DPLL_CORE_RESTORE,
+ * CM_DIV_M5_DPLL_DDRPHY, CM_DIV_M5_DPLL_IVA, CM_DIV_M5_DPLL_PER
*/
#define OMAP4430_HSDIVIDER_CLKOUT2_DIVCHACK_SHIFT 5
-#define OMAP4430_HSDIVIDER_CLKOUT2_DIVCHACK_MASK BITFIELD(5, 5)
+#define OMAP4430_HSDIVIDER_CLKOUT2_DIVCHACK_MASK (1 << 5)
/*
- * Used by CM_DIV_M5_DPLL_PER, CM_DIV_M5_DPLL_CORE_RESTORE,
- * CM_DIV_M5_DPLL_CORE, CM_DIV_M5_DPLL_DDRPHY, CM_DIV_M5_DPLL_IVA
+ * Used by CM_DIV_M5_DPLL_CORE, CM_DIV_M5_DPLL_CORE_RESTORE,
+ * CM_DIV_M5_DPLL_DDRPHY, CM_DIV_M5_DPLL_IVA, CM_DIV_M5_DPLL_PER
*/
#define OMAP4430_HSDIVIDER_CLKOUT2_GATE_CTRL_SHIFT 8
-#define OMAP4430_HSDIVIDER_CLKOUT2_GATE_CTRL_MASK BITFIELD(8, 8)
+#define OMAP4430_HSDIVIDER_CLKOUT2_GATE_CTRL_MASK (1 << 8)
/*
- * Used by CM_DIV_M5_DPLL_PER, CM_DIV_M5_DPLL_CORE_RESTORE,
- * CM_DIV_M5_DPLL_CORE, CM_DIV_M5_DPLL_DDRPHY, CM_DIV_M5_DPLL_IVA
+ * Used by CM_DIV_M5_DPLL_CORE, CM_DIV_M5_DPLL_CORE_RESTORE,
+ * CM_DIV_M5_DPLL_DDRPHY, CM_DIV_M5_DPLL_IVA, CM_DIV_M5_DPLL_PER
*/
#define OMAP4430_HSDIVIDER_CLKOUT2_PWDN_SHIFT 12
-#define OMAP4430_HSDIVIDER_CLKOUT2_PWDN_MASK BITFIELD(12, 12)
+#define OMAP4430_HSDIVIDER_CLKOUT2_PWDN_MASK (1 << 12)
/*
- * Used by CM_DIV_M6_DPLL_PER, CM_DIV_M6_DPLL_CORE_RESTORE,
- * CM_DIV_M6_DPLL_CORE, CM_DIV_M6_DPLL_DDRPHY
+ * Used by CM_DIV_M6_DPLL_CORE, CM_DIV_M6_DPLL_CORE_RESTORE,
+ * CM_DIV_M6_DPLL_DDRPHY, CM_DIV_M6_DPLL_PER
*/
#define OMAP4430_HSDIVIDER_CLKOUT3_DIV_SHIFT 0
-#define OMAP4430_HSDIVIDER_CLKOUT3_DIV_MASK BITFIELD(0, 4)
+#define OMAP4430_HSDIVIDER_CLKOUT3_DIV_MASK (0x1f << 0)
/*
- * Used by CM_DIV_M6_DPLL_PER, CM_DIV_M6_DPLL_CORE_RESTORE,
- * CM_DIV_M6_DPLL_CORE, CM_DIV_M6_DPLL_DDRPHY
+ * Used by CM_DIV_M6_DPLL_CORE, CM_DIV_M6_DPLL_CORE_RESTORE,
+ * CM_DIV_M6_DPLL_DDRPHY, CM_DIV_M6_DPLL_PER
*/
#define OMAP4430_HSDIVIDER_CLKOUT3_DIVCHACK_SHIFT 5
-#define OMAP4430_HSDIVIDER_CLKOUT3_DIVCHACK_MASK BITFIELD(5, 5)
+#define OMAP4430_HSDIVIDER_CLKOUT3_DIVCHACK_MASK (1 << 5)
/*
- * Used by CM_DIV_M6_DPLL_PER, CM_DIV_M6_DPLL_CORE_RESTORE,
- * CM_DIV_M6_DPLL_CORE, CM_DIV_M6_DPLL_DDRPHY
+ * Used by CM_DIV_M6_DPLL_CORE, CM_DIV_M6_DPLL_CORE_RESTORE,
+ * CM_DIV_M6_DPLL_DDRPHY, CM_DIV_M6_DPLL_PER
*/
#define OMAP4430_HSDIVIDER_CLKOUT3_GATE_CTRL_SHIFT 8
-#define OMAP4430_HSDIVIDER_CLKOUT3_GATE_CTRL_MASK BITFIELD(8, 8)
+#define OMAP4430_HSDIVIDER_CLKOUT3_GATE_CTRL_MASK (1 << 8)
/*
- * Used by CM_DIV_M6_DPLL_PER, CM_DIV_M6_DPLL_CORE_RESTORE,
- * CM_DIV_M6_DPLL_CORE, CM_DIV_M6_DPLL_DDRPHY
+ * Used by CM_DIV_M6_DPLL_CORE, CM_DIV_M6_DPLL_CORE_RESTORE,
+ * CM_DIV_M6_DPLL_DDRPHY, CM_DIV_M6_DPLL_PER
*/
#define OMAP4430_HSDIVIDER_CLKOUT3_PWDN_SHIFT 12
-#define OMAP4430_HSDIVIDER_CLKOUT3_PWDN_MASK BITFIELD(12, 12)
+#define OMAP4430_HSDIVIDER_CLKOUT3_PWDN_MASK (1 << 12)
/*
- * Used by CM_DIV_M7_DPLL_PER, CM_DIV_M7_DPLL_CORE_RESTORE,
- * CM_DIV_M7_DPLL_CORE
+ * Used by CM_DIV_M7_DPLL_CORE, CM_DIV_M7_DPLL_CORE_RESTORE,
+ * CM_DIV_M7_DPLL_PER
*/
#define OMAP4430_HSDIVIDER_CLKOUT4_DIV_SHIFT 0
-#define OMAP4430_HSDIVIDER_CLKOUT4_DIV_MASK BITFIELD(0, 4)
+#define OMAP4430_HSDIVIDER_CLKOUT4_DIV_MASK (0x1f << 0)
/*
- * Used by CM_DIV_M7_DPLL_PER, CM_DIV_M7_DPLL_CORE_RESTORE,
- * CM_DIV_M7_DPLL_CORE
+ * Used by CM_DIV_M7_DPLL_CORE, CM_DIV_M7_DPLL_CORE_RESTORE,
+ * CM_DIV_M7_DPLL_PER
*/
#define OMAP4430_HSDIVIDER_CLKOUT4_DIVCHACK_SHIFT 5
-#define OMAP4430_HSDIVIDER_CLKOUT4_DIVCHACK_MASK BITFIELD(5, 5)
+#define OMAP4430_HSDIVIDER_CLKOUT4_DIVCHACK_MASK (1 << 5)
/*
- * Used by CM_DIV_M7_DPLL_PER, CM_DIV_M7_DPLL_CORE_RESTORE,
- * CM_DIV_M7_DPLL_CORE
+ * Used by CM_DIV_M7_DPLL_CORE, CM_DIV_M7_DPLL_CORE_RESTORE,
+ * CM_DIV_M7_DPLL_PER
*/
#define OMAP4430_HSDIVIDER_CLKOUT4_GATE_CTRL_SHIFT 8
-#define OMAP4430_HSDIVIDER_CLKOUT4_GATE_CTRL_MASK BITFIELD(8, 8)
+#define OMAP4430_HSDIVIDER_CLKOUT4_GATE_CTRL_MASK (1 << 8)
/*
- * Used by CM_DIV_M7_DPLL_PER, CM_DIV_M7_DPLL_CORE_RESTORE,
- * CM_DIV_M7_DPLL_CORE
+ * Used by CM_DIV_M7_DPLL_CORE, CM_DIV_M7_DPLL_CORE_RESTORE,
+ * CM_DIV_M7_DPLL_PER
*/
#define OMAP4430_HSDIVIDER_CLKOUT4_PWDN_SHIFT 12
-#define OMAP4430_HSDIVIDER_CLKOUT4_PWDN_MASK BITFIELD(12, 12)
-
-/*
- * Used by PRM_PRM_PROFILING_CLKCTRL, CM_WKUP_GPIO1_CLKCTRL,
- * CM_WKUP_KEYBOARD_CLKCTRL, CM_WKUP_L4WKUP_CLKCTRL, CM_WKUP_RTC_CLKCTRL,
- * CM_WKUP_SARRAM_CLKCTRL, CM_WKUP_SYNCTIMER_CLKCTRL, CM_WKUP_TIMER12_CLKCTRL,
- * CM_WKUP_TIMER1_CLKCTRL, CM_WKUP_USIM_CLKCTRL, CM_WKUP_WDT1_CLKCTRL,
- * CM_WKUP_WDT2_CLKCTRL, CM_EMU_DEBUGSS_CLKCTRL, CM_D2D_MODEM_ICR_CLKCTRL,
- * CM_D2D_SAD2D_CLKCTRL, CM_D2D_SAD2D_FW_CLKCTRL, CM_DUCATI_DUCATI_CLKCTRL,
- * CM_L3INSTR_L3_3_CLKCTRL, CM_L3INSTR_L3_INSTR_CLKCTRL,
- * CM_L3INSTR_OCP_WP1_CLKCTRL, CM_L3_1_L3_1_CLKCTRL, CM_L3_2_GPMC_CLKCTRL,
- * CM_L3_2_L3_2_CLKCTRL, CM_L3_2_OCMC_RAM_CLKCTRL, CM_L4CFG_HW_SEM_CLKCTRL,
- * CM_L4CFG_L4_CFG_CLKCTRL, CM_L4CFG_MAILBOX_CLKCTRL, CM_L4CFG_SAR_ROM_CLKCTRL,
- * CM_MEMIF_DMM_CLKCTRL, CM_MEMIF_EMIF_1_CLKCTRL, CM_MEMIF_EMIF_2_CLKCTRL,
- * CM_MEMIF_EMIF_FW_CLKCTRL, CM_MEMIF_EMIF_H1_CLKCTRL,
- * CM_MEMIF_EMIF_H2_CLKCTRL, CM_SDMA_SDMA_CLKCTRL, CM_GFX_GFX_CLKCTRL,
- * CM_L4PER_ADC_CLKCTRL, CM_L4PER_DMTIMER10_CLKCTRL,
- * CM_L4PER_DMTIMER11_CLKCTRL, CM_L4PER_DMTIMER2_CLKCTRL,
- * CM_L4PER_DMTIMER3_CLKCTRL, CM_L4PER_DMTIMER4_CLKCTRL,
- * CM_L4PER_DMTIMER9_CLKCTRL, CM_L4PER_ELM_CLKCTRL, CM_L4PER_GPIO2_CLKCTRL,
- * CM_L4PER_GPIO3_CLKCTRL, CM_L4PER_GPIO4_CLKCTRL, CM_L4PER_GPIO5_CLKCTRL,
- * CM_L4PER_GPIO6_CLKCTRL, CM_L4PER_HDQ1W_CLKCTRL, CM_L4PER_HECC1_CLKCTRL,
- * CM_L4PER_HECC2_CLKCTRL, CM_L4PER_I2C1_CLKCTRL, CM_L4PER_I2C2_CLKCTRL,
- * CM_L4PER_I2C3_CLKCTRL, CM_L4PER_I2C4_CLKCTRL, CM_L4PER_I2C5_CLKCTRL,
- * CM_L4PER_L4PER_CLKCTRL, CM_L4PER_MCASP2_CLKCTRL, CM_L4PER_MCASP3_CLKCTRL,
- * CM_L4PER_MCBSP4_CLKCTRL, CM_L4PER_MCSPI1_CLKCTRL, CM_L4PER_MCSPI2_CLKCTRL,
- * CM_L4PER_MCSPI3_CLKCTRL, CM_L4PER_MCSPI4_CLKCTRL, CM_L4PER_MGATE_CLKCTRL,
- * CM_L4PER_MMCSD3_CLKCTRL, CM_L4PER_MMCSD4_CLKCTRL, CM_L4PER_MMCSD5_CLKCTRL,
- * CM_L4PER_MSPROHG_CLKCTRL, CM_L4PER_SLIMBUS2_CLKCTRL, CM_L4PER_UART1_CLKCTRL,
- * CM_L4PER_UART2_CLKCTRL, CM_L4PER_UART3_CLKCTRL, CM_L4PER_UART4_CLKCTRL,
- * CM_L4SEC_AES1_CLKCTRL, CM_L4SEC_AES2_CLKCTRL, CM_L4SEC_CRYPTODMA_CLKCTRL,
- * CM_L4SEC_DES3DES_CLKCTRL, CM_L4SEC_PKAEIP29_CLKCTRL, CM_L4SEC_RNG_CLKCTRL,
- * CM_L4SEC_SHA2MD51_CLKCTRL, CM_L3INIT_CCPTX_CLKCTRL, CM_L3INIT_EMAC_CLKCTRL,
+#define OMAP4430_HSDIVIDER_CLKOUT4_PWDN_MASK (1 << 12)
+
+/*
+ * Used by CM1_ABE_AESS_CLKCTRL, CM1_ABE_DMIC_CLKCTRL, CM1_ABE_L4ABE_CLKCTRL,
+ * CM1_ABE_MCASP_CLKCTRL, CM1_ABE_MCBSP1_CLKCTRL, CM1_ABE_MCBSP2_CLKCTRL,
+ * CM1_ABE_MCBSP3_CLKCTRL, CM1_ABE_PDM_CLKCTRL, CM1_ABE_SLIMBUS_CLKCTRL,
+ * CM1_ABE_TIMER5_CLKCTRL, CM1_ABE_TIMER6_CLKCTRL, CM1_ABE_TIMER7_CLKCTRL,
+ * CM1_ABE_TIMER8_CLKCTRL, CM1_ABE_WDT3_CLKCTRL, CM_ALWON_MDMINTC_CLKCTRL,
+ * CM_ALWON_SR_CORE_CLKCTRL, CM_ALWON_SR_IVA_CLKCTRL, CM_ALWON_SR_MPU_CLKCTRL,
+ * CM_CAM_FDIF_CLKCTRL, CM_CAM_ISS_CLKCTRL, CM_CEFUSE_CEFUSE_CLKCTRL,
+ * CM_CM1_PROFILING_CLKCTRL, CM_CM1_PROFILING_CLKCTRL_RESTORE,
+ * CM_CM2_PROFILING_CLKCTRL, CM_CM2_PROFILING_CLKCTRL_RESTORE,
+ * CM_D2D_MODEM_ICR_CLKCTRL, CM_D2D_SAD2D_CLKCTRL, CM_D2D_SAD2D_FW_CLKCTRL,
+ * CM_DSS_DEISS_CLKCTRL, CM_DSS_DSS_CLKCTRL, CM_DUCATI_DUCATI_CLKCTRL,
+ * CM_EMU_DEBUGSS_CLKCTRL, CM_GFX_GFX_CLKCTRL, CM_IVAHD_IVAHD_CLKCTRL,
+ * CM_IVAHD_SL2_CLKCTRL, CM_L3INIT_CCPTX_CLKCTRL, CM_L3INIT_EMAC_CLKCTRL,
* CM_L3INIT_HSI_CLKCTRL, CM_L3INIT_MMC1_CLKCTRL, CM_L3INIT_MMC2_CLKCTRL,
* CM_L3INIT_MMC6_CLKCTRL, CM_L3INIT_P1500_CLKCTRL, CM_L3INIT_PCIESS_CLKCTRL,
* CM_L3INIT_SATA_CLKCTRL, CM_L3INIT_TPPSS_CLKCTRL, CM_L3INIT_UNIPRO1_CLKCTRL,
* CM_L3INIT_USBPHYOCP2SCP_CLKCTRL, CM_L3INIT_USB_HOST_CLKCTRL,
- * CM_L3INIT_USB_HOST_FS_CLKCTRL, CM_L3INIT_USB_OTG_CLKCTRL,
- * CM_L3INIT_USB_TLL_CLKCTRL, CM_L3INIT_XHPI_CLKCTRL, CM_CAM_FDIF_CLKCTRL,
- * CM_CAM_ISS_CLKCTRL, CM_CEFUSE_CEFUSE_CLKCTRL,
- * CM_L3INIT_USB_HOST_CLKCTRL_RESTORE, CM_L3INIT_USB_TLL_CLKCTRL_RESTORE,
- * CM_L3INSTR_L3_3_CLKCTRL_RESTORE, CM_L3INSTR_L3_INSTR_CLKCTRL_RESTORE,
- * CM_L3INSTR_OCP_WP1_CLKCTRL_RESTORE, CM_L4PER_GPIO2_CLKCTRL_RESTORE,
- * CM_L4PER_GPIO3_CLKCTRL_RESTORE, CM_L4PER_GPIO4_CLKCTRL_RESTORE,
- * CM_L4PER_GPIO5_CLKCTRL_RESTORE, CM_L4PER_GPIO6_CLKCTRL_RESTORE,
- * CM_ALWON_MDMINTC_CLKCTRL, CM_ALWON_SR_CORE_CLKCTRL, CM_ALWON_SR_IVA_CLKCTRL,
- * CM_ALWON_SR_MPU_CLKCTRL, CM_IVAHD_IVAHD_CLKCTRL, CM_IVAHD_SL2_CLKCTRL,
- * CM_DSS_DEISS_CLKCTRL, CM_DSS_DSS_CLKCTRL, CM_CM2_PROFILING_CLKCTRL,
- * CM_MPU_MPU_CLKCTRL, CM_TESLA_TESLA_CLKCTRL, CM1_ABE_AESS_CLKCTRL,
- * CM1_ABE_DMIC_CLKCTRL, CM1_ABE_L4ABE_CLKCTRL, CM1_ABE_MCASP_CLKCTRL,
- * CM1_ABE_MCBSP1_CLKCTRL, CM1_ABE_MCBSP2_CLKCTRL, CM1_ABE_MCBSP3_CLKCTRL,
- * CM1_ABE_PDM_CLKCTRL, CM1_ABE_SLIMBUS_CLKCTRL, CM1_ABE_TIMER5_CLKCTRL,
- * CM1_ABE_TIMER6_CLKCTRL, CM1_ABE_TIMER7_CLKCTRL, CM1_ABE_TIMER8_CLKCTRL,
- * CM1_ABE_WDT3_CLKCTRL, CM_CM1_PROFILING_CLKCTRL
+ * CM_L3INIT_USB_HOST_CLKCTRL_RESTORE, CM_L3INIT_USB_HOST_FS_CLKCTRL,
+ * CM_L3INIT_USB_OTG_CLKCTRL, CM_L3INIT_USB_TLL_CLKCTRL,
+ * CM_L3INIT_USB_TLL_CLKCTRL_RESTORE, CM_L3INIT_XHPI_CLKCTRL,
+ * CM_L3INSTR_L3_3_CLKCTRL, CM_L3INSTR_L3_3_CLKCTRL_RESTORE,
+ * CM_L3INSTR_L3_INSTR_CLKCTRL, CM_L3INSTR_L3_INSTR_CLKCTRL_RESTORE,
+ * CM_L3INSTR_OCP_WP1_CLKCTRL, CM_L3INSTR_OCP_WP1_CLKCTRL_RESTORE,
+ * CM_L3_1_L3_1_CLKCTRL, CM_L3_2_GPMC_CLKCTRL, CM_L3_2_L3_2_CLKCTRL,
+ * CM_L3_2_OCMC_RAM_CLKCTRL, CM_L4CFG_HW_SEM_CLKCTRL, CM_L4CFG_L4_CFG_CLKCTRL,
+ * CM_L4CFG_MAILBOX_CLKCTRL, CM_L4CFG_SAR_ROM_CLKCTRL, CM_L4PER_ADC_CLKCTRL,
+ * CM_L4PER_DMTIMER10_CLKCTRL, CM_L4PER_DMTIMER11_CLKCTRL,
+ * CM_L4PER_DMTIMER2_CLKCTRL, CM_L4PER_DMTIMER3_CLKCTRL,
+ * CM_L4PER_DMTIMER4_CLKCTRL, CM_L4PER_DMTIMER9_CLKCTRL, CM_L4PER_ELM_CLKCTRL,
+ * CM_L4PER_GPIO2_CLKCTRL, CM_L4PER_GPIO2_CLKCTRL_RESTORE,
+ * CM_L4PER_GPIO3_CLKCTRL, CM_L4PER_GPIO3_CLKCTRL_RESTORE,
+ * CM_L4PER_GPIO4_CLKCTRL, CM_L4PER_GPIO4_CLKCTRL_RESTORE,
+ * CM_L4PER_GPIO5_CLKCTRL, CM_L4PER_GPIO5_CLKCTRL_RESTORE,
+ * CM_L4PER_GPIO6_CLKCTRL, CM_L4PER_GPIO6_CLKCTRL_RESTORE,
+ * CM_L4PER_HDQ1W_CLKCTRL, CM_L4PER_HECC1_CLKCTRL, CM_L4PER_HECC2_CLKCTRL,
+ * CM_L4PER_I2C1_CLKCTRL, CM_L4PER_I2C2_CLKCTRL, CM_L4PER_I2C3_CLKCTRL,
+ * CM_L4PER_I2C4_CLKCTRL, CM_L4PER_I2C5_CLKCTRL, CM_L4PER_L4PER_CLKCTRL,
+ * CM_L4PER_MCASP2_CLKCTRL, CM_L4PER_MCASP3_CLKCTRL, CM_L4PER_MCBSP4_CLKCTRL,
+ * CM_L4PER_MCSPI1_CLKCTRL, CM_L4PER_MCSPI2_CLKCTRL, CM_L4PER_MCSPI3_CLKCTRL,
+ * CM_L4PER_MCSPI4_CLKCTRL, CM_L4PER_MGATE_CLKCTRL, CM_L4PER_MMCSD3_CLKCTRL,
+ * CM_L4PER_MMCSD4_CLKCTRL, CM_L4PER_MMCSD5_CLKCTRL, CM_L4PER_MSPROHG_CLKCTRL,
+ * CM_L4PER_SLIMBUS2_CLKCTRL, CM_L4PER_UART1_CLKCTRL, CM_L4PER_UART2_CLKCTRL,
+ * CM_L4PER_UART3_CLKCTRL, CM_L4PER_UART4_CLKCTRL, CM_L4SEC_AES1_CLKCTRL,
+ * CM_L4SEC_AES2_CLKCTRL, CM_L4SEC_CRYPTODMA_CLKCTRL, CM_L4SEC_DES3DES_CLKCTRL,
+ * CM_L4SEC_PKAEIP29_CLKCTRL, CM_L4SEC_RNG_CLKCTRL, CM_L4SEC_SHA2MD51_CLKCTRL,
+ * CM_MEMIF_DMM_CLKCTRL, CM_MEMIF_EMIF_1_CLKCTRL, CM_MEMIF_EMIF_2_CLKCTRL,
+ * CM_MEMIF_EMIF_FW_CLKCTRL, CM_MEMIF_EMIF_H1_CLKCTRL,
+ * CM_MEMIF_EMIF_H2_CLKCTRL, CM_MPU_MPU_CLKCTRL, CM_SDMA_SDMA_CLKCTRL,
+ * CM_TESLA_TESLA_CLKCTRL, CM_WKUP_GPIO1_CLKCTRL, CM_WKUP_KEYBOARD_CLKCTRL,
+ * CM_WKUP_L4WKUP_CLKCTRL, CM_WKUP_RTC_CLKCTRL, CM_WKUP_SARRAM_CLKCTRL,
+ * CM_WKUP_SYNCTIMER_CLKCTRL, CM_WKUP_TIMER12_CLKCTRL, CM_WKUP_TIMER1_CLKCTRL,
+ * CM_WKUP_USIM_CLKCTRL, CM_WKUP_WDT1_CLKCTRL, CM_WKUP_WDT2_CLKCTRL
*/
#define OMAP4430_IDLEST_SHIFT 16
-#define OMAP4430_IDLEST_MASK BITFIELD(16, 17)
+#define OMAP4430_IDLEST_MASK (0x3 << 16)
-/* Used by CM_DUCATI_DYNAMICDEP, CM_L3_2_DYNAMICDEP, CM_L4CFG_DYNAMICDEP */
+/*
+ * Used by CM_DUCATI_DYNAMICDEP, CM_L3_2_DYNAMICDEP,
+ * CM_L3_2_DYNAMICDEP_RESTORE, CM_L4CFG_DYNAMICDEP, CM_L4CFG_DYNAMICDEP_RESTORE
+ */
#define OMAP4430_ISS_DYNDEP_SHIFT 9
-#define OMAP4430_ISS_DYNDEP_MASK BITFIELD(9, 9)
+#define OMAP4430_ISS_DYNDEP_MASK (1 << 9)
/*
- * Used by CM_DUCATI_STATICDEP, CM_SDMA_STATICDEP, CM_SDMA_STATICDEP_RESTORE,
- * CM_MPU_STATICDEP, CM_TESLA_STATICDEP
+ * Used by CM_DUCATI_STATICDEP, CM_MPU_STATICDEP, CM_SDMA_STATICDEP,
+ * CM_SDMA_STATICDEP_RESTORE, CM_TESLA_STATICDEP
*/
#define OMAP4430_ISS_STATDEP_SHIFT 9
-#define OMAP4430_ISS_STATDEP_MASK BITFIELD(9, 9)
+#define OMAP4430_ISS_STATDEP_MASK (1 << 9)
-/* Used by CM_L3_2_DYNAMICDEP, CM_TESLA_DYNAMICDEP */
+/* Used by CM_L3_2_DYNAMICDEP, CM_L3_2_DYNAMICDEP_RESTORE, CM_TESLA_DYNAMICDEP */
#define OMAP4430_IVAHD_DYNDEP_SHIFT 2
-#define OMAP4430_IVAHD_DYNDEP_MASK BITFIELD(2, 2)
+#define OMAP4430_IVAHD_DYNDEP_MASK (1 << 2)
/*
- * Used by CM_D2D_STATICDEP, CM_DUCATI_STATICDEP, CM_SDMA_STATICDEP,
- * CM_GFX_STATICDEP, CM_L3INIT_STATICDEP, CM_CAM_STATICDEP,
- * CM_SDMA_STATICDEP_RESTORE, CM_DSS_STATICDEP, CM_MPU_STATICDEP,
- * CM_TESLA_STATICDEP
+ * Used by CM_CAM_STATICDEP, CM_D2D_STATICDEP, CM_D2D_STATICDEP_RESTORE,
+ * CM_DSS_STATICDEP, CM_DUCATI_STATICDEP, CM_GFX_STATICDEP,
+ * CM_L3INIT_STATICDEP, CM_MPU_STATICDEP, CM_SDMA_STATICDEP,
+ * CM_SDMA_STATICDEP_RESTORE, CM_TESLA_STATICDEP
*/
#define OMAP4430_IVAHD_STATDEP_SHIFT 2
-#define OMAP4430_IVAHD_STATDEP_MASK BITFIELD(2, 2)
+#define OMAP4430_IVAHD_STATDEP_MASK (1 << 2)
-/* Used by CM_L3_2_DYNAMICDEP, CM_L4CFG_DYNAMICDEP, CM_L4PER_DYNAMICDEP */
+/*
+ * Used by CM_L3_2_DYNAMICDEP, CM_L3_2_DYNAMICDEP_RESTORE, CM_L4CFG_DYNAMICDEP,
+ * CM_L4CFG_DYNAMICDEP_RESTORE, CM_L4PER_DYNAMICDEP, CM_L4PER_DYNAMICDEP_RESTORE
+ */
#define OMAP4430_L3INIT_DYNDEP_SHIFT 7
-#define OMAP4430_L3INIT_DYNDEP_MASK BITFIELD(7, 7)
+#define OMAP4430_L3INIT_DYNDEP_MASK (1 << 7)
/*
- * Used by CM_D2D_STATICDEP, CM_DUCATI_STATICDEP, CM_SDMA_STATICDEP,
- * CM_SDMA_STATICDEP_RESTORE, CM_MPU_STATICDEP, CM_TESLA_STATICDEP
+ * Used by CM_D2D_STATICDEP, CM_D2D_STATICDEP_RESTORE, CM_DUCATI_STATICDEP,
+ * CM_MPU_STATICDEP, CM_SDMA_STATICDEP, CM_SDMA_STATICDEP_RESTORE,
+ * CM_TESLA_STATICDEP
*/
#define OMAP4430_L3INIT_STATDEP_SHIFT 7
-#define OMAP4430_L3INIT_STATDEP_MASK BITFIELD(7, 7)
+#define OMAP4430_L3INIT_STATDEP_MASK (1 << 7)
/*
- * Used by CM_L3_2_DYNAMICDEP, CM_L4CFG_DYNAMICDEP, CM_L3INIT_DYNAMICDEP,
- * CM_DSS_DYNAMICDEP, CM_MPU_DYNAMICDEP, CM_TESLA_DYNAMICDEP
+ * Used by CM_DSS_DYNAMICDEP, CM_L3INIT_DYNAMICDEP, CM_L3_2_DYNAMICDEP,
+ * CM_L3_2_DYNAMICDEP_RESTORE, CM_L4CFG_DYNAMICDEP,
+ * CM_L4CFG_DYNAMICDEP_RESTORE, CM_MPU_DYNAMICDEP, CM_TESLA_DYNAMICDEP
*/
#define OMAP4430_L3_1_DYNDEP_SHIFT 5
-#define OMAP4430_L3_1_DYNDEP_MASK BITFIELD(5, 5)
+#define OMAP4430_L3_1_DYNDEP_MASK (1 << 5)
/*
- * Used by CM_D2D_STATICDEP, CM_DUCATI_STATICDEP, CM_SDMA_STATICDEP,
- * CM_GFX_STATICDEP, CM_L4SEC_STATICDEP, CM_L3INIT_STATICDEP, CM_CAM_STATICDEP,
- * CM_SDMA_STATICDEP_RESTORE, CM_IVAHD_STATICDEP, CM_DSS_STATICDEP,
- * CM_MPU_STATICDEP, CM_TESLA_STATICDEP
+ * Used by CM_CAM_STATICDEP, CM_D2D_STATICDEP, CM_D2D_STATICDEP_RESTORE,
+ * CM_DSS_STATICDEP, CM_DUCATI_STATICDEP, CM_GFX_STATICDEP, CM_IVAHD_STATICDEP,
+ * CM_L3INIT_STATICDEP, CM_L4SEC_STATICDEP, CM_MPU_STATICDEP,
+ * CM_SDMA_STATICDEP, CM_SDMA_STATICDEP_RESTORE, CM_TESLA_STATICDEP
*/
#define OMAP4430_L3_1_STATDEP_SHIFT 5
-#define OMAP4430_L3_1_STATDEP_MASK BITFIELD(5, 5)
+#define OMAP4430_L3_1_STATDEP_MASK (1 << 5)
/*
- * Used by CM_EMU_DYNAMICDEP, CM_D2D_DYNAMICDEP, CM_DUCATI_DYNAMICDEP,
- * CM_L3_1_DYNAMICDEP, CM_L4CFG_DYNAMICDEP, CM_SDMA_DYNAMICDEP,
- * CM_GFX_DYNAMICDEP, CM_L4SEC_DYNAMICDEP, CM_L3INIT_DYNAMICDEP,
- * CM_CAM_DYNAMICDEP, CM_IVAHD_DYNAMICDEP
+ * Used by CM_CAM_DYNAMICDEP, CM_D2D_DYNAMICDEP, CM_D2D_DYNAMICDEP_RESTORE,
+ * CM_DUCATI_DYNAMICDEP, CM_EMU_DYNAMICDEP, CM_GFX_DYNAMICDEP,
+ * CM_IVAHD_DYNAMICDEP, CM_L3INIT_DYNAMICDEP, CM_L3_1_DYNAMICDEP,
+ * CM_L3_1_DYNAMICDEP_RESTORE, CM_L4CFG_DYNAMICDEP,
+ * CM_L4CFG_DYNAMICDEP_RESTORE, CM_L4SEC_DYNAMICDEP, CM_SDMA_DYNAMICDEP
*/
#define OMAP4430_L3_2_DYNDEP_SHIFT 6
-#define OMAP4430_L3_2_DYNDEP_MASK BITFIELD(6, 6)
+#define OMAP4430_L3_2_DYNDEP_MASK (1 << 6)
/*
- * Used by CM_D2D_STATICDEP, CM_DUCATI_STATICDEP, CM_SDMA_STATICDEP,
- * CM_GFX_STATICDEP, CM_L4SEC_STATICDEP, CM_L3INIT_STATICDEP, CM_CAM_STATICDEP,
- * CM_SDMA_STATICDEP_RESTORE, CM_IVAHD_STATICDEP, CM_DSS_STATICDEP,
- * CM_MPU_STATICDEP, CM_TESLA_STATICDEP
+ * Used by CM_CAM_STATICDEP, CM_D2D_STATICDEP, CM_D2D_STATICDEP_RESTORE,
+ * CM_DSS_STATICDEP, CM_DUCATI_STATICDEP, CM_GFX_STATICDEP, CM_IVAHD_STATICDEP,
+ * CM_L3INIT_STATICDEP, CM_L4SEC_STATICDEP, CM_MPU_STATICDEP,
+ * CM_SDMA_STATICDEP, CM_SDMA_STATICDEP_RESTORE, CM_TESLA_STATICDEP
*/
#define OMAP4430_L3_2_STATDEP_SHIFT 6
-#define OMAP4430_L3_2_STATDEP_MASK BITFIELD(6, 6)
+#define OMAP4430_L3_2_STATDEP_MASK (1 << 6)
-/* Used by CM_L3_1_DYNAMICDEP */
+/* Used by CM_L3_1_DYNAMICDEP, CM_L3_1_DYNAMICDEP_RESTORE */
#define OMAP4430_L4CFG_DYNDEP_SHIFT 12
-#define OMAP4430_L4CFG_DYNDEP_MASK BITFIELD(12, 12)
+#define OMAP4430_L4CFG_DYNDEP_MASK (1 << 12)
/*
- * Used by CM_D2D_STATICDEP, CM_DUCATI_STATICDEP, CM_SDMA_STATICDEP,
- * CM_L3INIT_STATICDEP, CM_SDMA_STATICDEP_RESTORE, CM_MPU_STATICDEP,
- * CM_TESLA_STATICDEP
+ * Used by CM_D2D_STATICDEP, CM_D2D_STATICDEP_RESTORE, CM_DUCATI_STATICDEP,
+ * CM_L3INIT_STATICDEP, CM_MPU_STATICDEP, CM_SDMA_STATICDEP,
+ * CM_SDMA_STATICDEP_RESTORE, CM_TESLA_STATICDEP
*/
#define OMAP4430_L4CFG_STATDEP_SHIFT 12
-#define OMAP4430_L4CFG_STATDEP_MASK BITFIELD(12, 12)
+#define OMAP4430_L4CFG_STATDEP_MASK (1 << 12)
-/* Used by CM_L3_2_DYNAMICDEP */
+/* Used by CM_L3_2_DYNAMICDEP, CM_L3_2_DYNAMICDEP_RESTORE */
#define OMAP4430_L4PER_DYNDEP_SHIFT 13
-#define OMAP4430_L4PER_DYNDEP_MASK BITFIELD(13, 13)
+#define OMAP4430_L4PER_DYNDEP_MASK (1 << 13)
/*
- * Used by CM_D2D_STATICDEP, CM_DUCATI_STATICDEP, CM_SDMA_STATICDEP,
- * CM_L4SEC_STATICDEP, CM_L3INIT_STATICDEP, CM_SDMA_STATICDEP_RESTORE,
- * CM_MPU_STATICDEP, CM_TESLA_STATICDEP
+ * Used by CM_D2D_STATICDEP, CM_D2D_STATICDEP_RESTORE, CM_DUCATI_STATICDEP,
+ * CM_L3INIT_STATICDEP, CM_L4SEC_STATICDEP, CM_MPU_STATICDEP,
+ * CM_SDMA_STATICDEP, CM_SDMA_STATICDEP_RESTORE, CM_TESLA_STATICDEP
*/
#define OMAP4430_L4PER_STATDEP_SHIFT 13
-#define OMAP4430_L4PER_STATDEP_MASK BITFIELD(13, 13)
+#define OMAP4430_L4PER_STATDEP_MASK (1 << 13)
-/* Used by CM_L3_2_DYNAMICDEP, CM_L4PER_DYNAMICDEP */
+/*
+ * Used by CM_L3_2_DYNAMICDEP, CM_L3_2_DYNAMICDEP_RESTORE, CM_L4PER_DYNAMICDEP,
+ * CM_L4PER_DYNAMICDEP_RESTORE
+ */
#define OMAP4430_L4SEC_DYNDEP_SHIFT 14
-#define OMAP4430_L4SEC_DYNDEP_MASK BITFIELD(14, 14)
+#define OMAP4430_L4SEC_DYNDEP_MASK (1 << 14)
/*
- * Used by CM_DUCATI_STATICDEP, CM_SDMA_STATICDEP, CM_L3INIT_STATICDEP,
- * CM_SDMA_STATICDEP_RESTORE, CM_MPU_STATICDEP
+ * Used by CM_DUCATI_STATICDEP, CM_L3INIT_STATICDEP, CM_MPU_STATICDEP,
+ * CM_SDMA_STATICDEP, CM_SDMA_STATICDEP_RESTORE
*/
#define OMAP4430_L4SEC_STATDEP_SHIFT 14
-#define OMAP4430_L4SEC_STATDEP_MASK BITFIELD(14, 14)
+#define OMAP4430_L4SEC_STATDEP_MASK (1 << 14)
-/* Used by CM_L4CFG_DYNAMICDEP */
+/* Used by CM_L4CFG_DYNAMICDEP, CM_L4CFG_DYNAMICDEP_RESTORE */
#define OMAP4430_L4WKUP_DYNDEP_SHIFT 15
-#define OMAP4430_L4WKUP_DYNDEP_MASK BITFIELD(15, 15)
+#define OMAP4430_L4WKUP_DYNDEP_MASK (1 << 15)
/*
- * Used by CM_DUCATI_STATICDEP, CM_SDMA_STATICDEP, CM_L3INIT_STATICDEP,
- * CM_SDMA_STATICDEP_RESTORE, CM_MPU_STATICDEP, CM_TESLA_STATICDEP
+ * Used by CM_DUCATI_STATICDEP, CM_L3INIT_STATICDEP, CM_MPU_STATICDEP,
+ * CM_SDMA_STATICDEP, CM_SDMA_STATICDEP_RESTORE, CM_TESLA_STATICDEP
*/
#define OMAP4430_L4WKUP_STATDEP_SHIFT 15
-#define OMAP4430_L4WKUP_STATDEP_MASK BITFIELD(15, 15)
+#define OMAP4430_L4WKUP_STATDEP_MASK (1 << 15)
/*
- * Used by CM_D2D_DYNAMICDEP, CM_L3_1_DYNAMICDEP, CM_L4CFG_DYNAMICDEP,
- * CM_MPU_DYNAMICDEP
+ * Used by CM_D2D_DYNAMICDEP, CM_D2D_DYNAMICDEP_RESTORE, CM_L3_1_DYNAMICDEP,
+ * CM_L3_1_DYNAMICDEP_RESTORE, CM_L4CFG_DYNAMICDEP,
+ * CM_L4CFG_DYNAMICDEP_RESTORE, CM_MPU_DYNAMICDEP
*/
#define OMAP4430_MEMIF_DYNDEP_SHIFT 4
-#define OMAP4430_MEMIF_DYNDEP_MASK BITFIELD(4, 4)
+#define OMAP4430_MEMIF_DYNDEP_MASK (1 << 4)
/*
- * Used by CM_D2D_STATICDEP, CM_DUCATI_STATICDEP, CM_SDMA_STATICDEP,
- * CM_GFX_STATICDEP, CM_L4SEC_STATICDEP, CM_L3INIT_STATICDEP, CM_CAM_STATICDEP,
- * CM_SDMA_STATICDEP_RESTORE, CM_IVAHD_STATICDEP, CM_DSS_STATICDEP,
- * CM_MPU_STATICDEP, CM_TESLA_STATICDEP
+ * Used by CM_CAM_STATICDEP, CM_D2D_STATICDEP, CM_D2D_STATICDEP_RESTORE,
+ * CM_DSS_STATICDEP, CM_DUCATI_STATICDEP, CM_GFX_STATICDEP, CM_IVAHD_STATICDEP,
+ * CM_L3INIT_STATICDEP, CM_L4SEC_STATICDEP, CM_MPU_STATICDEP,
+ * CM_SDMA_STATICDEP, CM_SDMA_STATICDEP_RESTORE, CM_TESLA_STATICDEP
*/
#define OMAP4430_MEMIF_STATDEP_SHIFT 4
-#define OMAP4430_MEMIF_STATDEP_MASK BITFIELD(4, 4)
+#define OMAP4430_MEMIF_STATDEP_MASK (1 << 4)
/*
- * Used by CM_SSC_MODFREQDIV_DPLL_PER, CM_SSC_MODFREQDIV_DPLL_UNIPRO,
- * CM_SSC_MODFREQDIV_DPLL_USB, CM_SSC_MODFREQDIV_DPLL_CORE_RESTORE,
- * CM_SSC_MODFREQDIV_DPLL_ABE, CM_SSC_MODFREQDIV_DPLL_CORE,
- * CM_SSC_MODFREQDIV_DPLL_DDRPHY, CM_SSC_MODFREQDIV_DPLL_IVA,
- * CM_SSC_MODFREQDIV_DPLL_MPU
+ * Used by CM_SSC_MODFREQDIV_DPLL_ABE, CM_SSC_MODFREQDIV_DPLL_CORE,
+ * CM_SSC_MODFREQDIV_DPLL_CORE_RESTORE, CM_SSC_MODFREQDIV_DPLL_DDRPHY,
+ * CM_SSC_MODFREQDIV_DPLL_IVA, CM_SSC_MODFREQDIV_DPLL_MPU,
+ * CM_SSC_MODFREQDIV_DPLL_PER, CM_SSC_MODFREQDIV_DPLL_UNIPRO,
+ * CM_SSC_MODFREQDIV_DPLL_USB
*/
#define OMAP4430_MODFREQDIV_EXPONENT_SHIFT 8
-#define OMAP4430_MODFREQDIV_EXPONENT_MASK BITFIELD(8, 10)
+#define OMAP4430_MODFREQDIV_EXPONENT_MASK (0x7 << 8)
/*
- * Used by CM_SSC_MODFREQDIV_DPLL_PER, CM_SSC_MODFREQDIV_DPLL_UNIPRO,
- * CM_SSC_MODFREQDIV_DPLL_USB, CM_SSC_MODFREQDIV_DPLL_CORE_RESTORE,
- * CM_SSC_MODFREQDIV_DPLL_ABE, CM_SSC_MODFREQDIV_DPLL_CORE,
- * CM_SSC_MODFREQDIV_DPLL_DDRPHY, CM_SSC_MODFREQDIV_DPLL_IVA,
- * CM_SSC_MODFREQDIV_DPLL_MPU
+ * Used by CM_SSC_MODFREQDIV_DPLL_ABE, CM_SSC_MODFREQDIV_DPLL_CORE,
+ * CM_SSC_MODFREQDIV_DPLL_CORE_RESTORE, CM_SSC_MODFREQDIV_DPLL_DDRPHY,
+ * CM_SSC_MODFREQDIV_DPLL_IVA, CM_SSC_MODFREQDIV_DPLL_MPU,
+ * CM_SSC_MODFREQDIV_DPLL_PER, CM_SSC_MODFREQDIV_DPLL_UNIPRO,
+ * CM_SSC_MODFREQDIV_DPLL_USB
*/
#define OMAP4430_MODFREQDIV_MANTISSA_SHIFT 0
-#define OMAP4430_MODFREQDIV_MANTISSA_MASK BITFIELD(0, 6)
-
-/*
- * Used by PRM_PRM_PROFILING_CLKCTRL, CM_WKUP_GPIO1_CLKCTRL,
- * CM_WKUP_KEYBOARD_CLKCTRL, CM_WKUP_L4WKUP_CLKCTRL, CM_WKUP_RTC_CLKCTRL,
- * CM_WKUP_SARRAM_CLKCTRL, CM_WKUP_SYNCTIMER_CLKCTRL, CM_WKUP_TIMER12_CLKCTRL,
- * CM_WKUP_TIMER1_CLKCTRL, CM_WKUP_USIM_CLKCTRL, CM_WKUP_WDT1_CLKCTRL,
- * CM_WKUP_WDT2_CLKCTRL, CM_EMU_DEBUGSS_CLKCTRL, CM_D2D_MODEM_ICR_CLKCTRL,
- * CM_D2D_SAD2D_CLKCTRL, CM_D2D_SAD2D_FW_CLKCTRL, CM_DUCATI_DUCATI_CLKCTRL,
- * CM_L3INSTR_L3_3_CLKCTRL, CM_L3INSTR_L3_INSTR_CLKCTRL,
- * CM_L3INSTR_OCP_WP1_CLKCTRL, CM_L3_1_L3_1_CLKCTRL, CM_L3_2_GPMC_CLKCTRL,
- * CM_L3_2_L3_2_CLKCTRL, CM_L3_2_OCMC_RAM_CLKCTRL, CM_L4CFG_HW_SEM_CLKCTRL,
- * CM_L4CFG_L4_CFG_CLKCTRL, CM_L4CFG_MAILBOX_CLKCTRL, CM_L4CFG_SAR_ROM_CLKCTRL,
- * CM_MEMIF_DMM_CLKCTRL, CM_MEMIF_EMIF_1_CLKCTRL, CM_MEMIF_EMIF_2_CLKCTRL,
- * CM_MEMIF_EMIF_FW_CLKCTRL, CM_MEMIF_EMIF_H1_CLKCTRL,
- * CM_MEMIF_EMIF_H2_CLKCTRL, CM_SDMA_SDMA_CLKCTRL, CM_GFX_GFX_CLKCTRL,
- * CM_L4PER_ADC_CLKCTRL, CM_L4PER_DMTIMER10_CLKCTRL,
- * CM_L4PER_DMTIMER11_CLKCTRL, CM_L4PER_DMTIMER2_CLKCTRL,
- * CM_L4PER_DMTIMER3_CLKCTRL, CM_L4PER_DMTIMER4_CLKCTRL,
- * CM_L4PER_DMTIMER9_CLKCTRL, CM_L4PER_ELM_CLKCTRL, CM_L4PER_GPIO2_CLKCTRL,
- * CM_L4PER_GPIO3_CLKCTRL, CM_L4PER_GPIO4_CLKCTRL, CM_L4PER_GPIO5_CLKCTRL,
- * CM_L4PER_GPIO6_CLKCTRL, CM_L4PER_HDQ1W_CLKCTRL, CM_L4PER_HECC1_CLKCTRL,
- * CM_L4PER_HECC2_CLKCTRL, CM_L4PER_I2C1_CLKCTRL, CM_L4PER_I2C2_CLKCTRL,
- * CM_L4PER_I2C3_CLKCTRL, CM_L4PER_I2C4_CLKCTRL, CM_L4PER_I2C5_CLKCTRL,
- * CM_L4PER_L4PER_CLKCTRL, CM_L4PER_MCASP2_CLKCTRL, CM_L4PER_MCASP3_CLKCTRL,
- * CM_L4PER_MCBSP4_CLKCTRL, CM_L4PER_MCSPI1_CLKCTRL, CM_L4PER_MCSPI2_CLKCTRL,
- * CM_L4PER_MCSPI3_CLKCTRL, CM_L4PER_MCSPI4_CLKCTRL, CM_L4PER_MGATE_CLKCTRL,
- * CM_L4PER_MMCSD3_CLKCTRL, CM_L4PER_MMCSD4_CLKCTRL, CM_L4PER_MMCSD5_CLKCTRL,
- * CM_L4PER_MSPROHG_CLKCTRL, CM_L4PER_SLIMBUS2_CLKCTRL, CM_L4PER_UART1_CLKCTRL,
- * CM_L4PER_UART2_CLKCTRL, CM_L4PER_UART3_CLKCTRL, CM_L4PER_UART4_CLKCTRL,
- * CM_L4SEC_AES1_CLKCTRL, CM_L4SEC_AES2_CLKCTRL, CM_L4SEC_CRYPTODMA_CLKCTRL,
- * CM_L4SEC_DES3DES_CLKCTRL, CM_L4SEC_PKAEIP29_CLKCTRL, CM_L4SEC_RNG_CLKCTRL,
- * CM_L4SEC_SHA2MD51_CLKCTRL, CM_L3INIT_CCPTX_CLKCTRL, CM_L3INIT_EMAC_CLKCTRL,
+#define OMAP4430_MODFREQDIV_MANTISSA_MASK (0x7f << 0)
+
+/*
+ * Used by CM1_ABE_AESS_CLKCTRL, CM1_ABE_DMIC_CLKCTRL, CM1_ABE_L4ABE_CLKCTRL,
+ * CM1_ABE_MCASP_CLKCTRL, CM1_ABE_MCBSP1_CLKCTRL, CM1_ABE_MCBSP2_CLKCTRL,
+ * CM1_ABE_MCBSP3_CLKCTRL, CM1_ABE_PDM_CLKCTRL, CM1_ABE_SLIMBUS_CLKCTRL,
+ * CM1_ABE_TIMER5_CLKCTRL, CM1_ABE_TIMER6_CLKCTRL, CM1_ABE_TIMER7_CLKCTRL,
+ * CM1_ABE_TIMER8_CLKCTRL, CM1_ABE_WDT3_CLKCTRL, CM_ALWON_MDMINTC_CLKCTRL,
+ * CM_ALWON_SR_CORE_CLKCTRL, CM_ALWON_SR_IVA_CLKCTRL, CM_ALWON_SR_MPU_CLKCTRL,
+ * CM_CAM_FDIF_CLKCTRL, CM_CAM_ISS_CLKCTRL, CM_CEFUSE_CEFUSE_CLKCTRL,
+ * CM_CM1_PROFILING_CLKCTRL, CM_CM1_PROFILING_CLKCTRL_RESTORE,
+ * CM_CM2_PROFILING_CLKCTRL, CM_CM2_PROFILING_CLKCTRL_RESTORE,
+ * CM_D2D_MODEM_ICR_CLKCTRL, CM_D2D_SAD2D_CLKCTRL, CM_D2D_SAD2D_FW_CLKCTRL,
+ * CM_DSS_DEISS_CLKCTRL, CM_DSS_DSS_CLKCTRL, CM_DUCATI_DUCATI_CLKCTRL,
+ * CM_EMU_DEBUGSS_CLKCTRL, CM_GFX_GFX_CLKCTRL, CM_IVAHD_IVAHD_CLKCTRL,
+ * CM_IVAHD_SL2_CLKCTRL, CM_L3INIT_CCPTX_CLKCTRL, CM_L3INIT_EMAC_CLKCTRL,
* CM_L3INIT_HSI_CLKCTRL, CM_L3INIT_MMC1_CLKCTRL, CM_L3INIT_MMC2_CLKCTRL,
* CM_L3INIT_MMC6_CLKCTRL, CM_L3INIT_P1500_CLKCTRL, CM_L3INIT_PCIESS_CLKCTRL,
* CM_L3INIT_SATA_CLKCTRL, CM_L3INIT_TPPSS_CLKCTRL, CM_L3INIT_UNIPRO1_CLKCTRL,
* CM_L3INIT_USBPHYOCP2SCP_CLKCTRL, CM_L3INIT_USB_HOST_CLKCTRL,
- * CM_L3INIT_USB_HOST_FS_CLKCTRL, CM_L3INIT_USB_OTG_CLKCTRL,
- * CM_L3INIT_USB_TLL_CLKCTRL, CM_L3INIT_XHPI_CLKCTRL, CM_CAM_FDIF_CLKCTRL,
- * CM_CAM_ISS_CLKCTRL, CM_CEFUSE_CEFUSE_CLKCTRL,
- * CM_L3INIT_USB_HOST_CLKCTRL_RESTORE, CM_L3INIT_USB_TLL_CLKCTRL_RESTORE,
- * CM_L3INSTR_L3_3_CLKCTRL_RESTORE, CM_L3INSTR_L3_INSTR_CLKCTRL_RESTORE,
- * CM_L3INSTR_OCP_WP1_CLKCTRL_RESTORE, CM_L4PER_GPIO2_CLKCTRL_RESTORE,
- * CM_L4PER_GPIO3_CLKCTRL_RESTORE, CM_L4PER_GPIO4_CLKCTRL_RESTORE,
- * CM_L4PER_GPIO5_CLKCTRL_RESTORE, CM_L4PER_GPIO6_CLKCTRL_RESTORE,
- * CM_ALWON_MDMINTC_CLKCTRL, CM_ALWON_SR_CORE_CLKCTRL, CM_ALWON_SR_IVA_CLKCTRL,
- * CM_ALWON_SR_MPU_CLKCTRL, CM_IVAHD_IVAHD_CLKCTRL, CM_IVAHD_SL2_CLKCTRL,
- * CM_DSS_DEISS_CLKCTRL, CM_DSS_DSS_CLKCTRL, CM_CM2_PROFILING_CLKCTRL,
- * CM_MPU_MPU_CLKCTRL, CM_TESLA_TESLA_CLKCTRL, CM1_ABE_AESS_CLKCTRL,
- * CM1_ABE_DMIC_CLKCTRL, CM1_ABE_L4ABE_CLKCTRL, CM1_ABE_MCASP_CLKCTRL,
- * CM1_ABE_MCBSP1_CLKCTRL, CM1_ABE_MCBSP2_CLKCTRL, CM1_ABE_MCBSP3_CLKCTRL,
- * CM1_ABE_PDM_CLKCTRL, CM1_ABE_SLIMBUS_CLKCTRL, CM1_ABE_TIMER5_CLKCTRL,
- * CM1_ABE_TIMER6_CLKCTRL, CM1_ABE_TIMER7_CLKCTRL, CM1_ABE_TIMER8_CLKCTRL,
- * CM1_ABE_WDT3_CLKCTRL, CM_CM1_PROFILING_CLKCTRL
+ * CM_L3INIT_USB_HOST_CLKCTRL_RESTORE, CM_L3INIT_USB_HOST_FS_CLKCTRL,
+ * CM_L3INIT_USB_OTG_CLKCTRL, CM_L3INIT_USB_TLL_CLKCTRL,
+ * CM_L3INIT_USB_TLL_CLKCTRL_RESTORE, CM_L3INIT_XHPI_CLKCTRL,
+ * CM_L3INSTR_L3_3_CLKCTRL, CM_L3INSTR_L3_3_CLKCTRL_RESTORE,
+ * CM_L3INSTR_L3_INSTR_CLKCTRL, CM_L3INSTR_L3_INSTR_CLKCTRL_RESTORE,
+ * CM_L3INSTR_OCP_WP1_CLKCTRL, CM_L3INSTR_OCP_WP1_CLKCTRL_RESTORE,
+ * CM_L3_1_L3_1_CLKCTRL, CM_L3_2_GPMC_CLKCTRL, CM_L3_2_L3_2_CLKCTRL,
+ * CM_L3_2_OCMC_RAM_CLKCTRL, CM_L4CFG_HW_SEM_CLKCTRL, CM_L4CFG_L4_CFG_CLKCTRL,
+ * CM_L4CFG_MAILBOX_CLKCTRL, CM_L4CFG_SAR_ROM_CLKCTRL, CM_L4PER_ADC_CLKCTRL,
+ * CM_L4PER_DMTIMER10_CLKCTRL, CM_L4PER_DMTIMER11_CLKCTRL,
+ * CM_L4PER_DMTIMER2_CLKCTRL, CM_L4PER_DMTIMER3_CLKCTRL,
+ * CM_L4PER_DMTIMER4_CLKCTRL, CM_L4PER_DMTIMER9_CLKCTRL, CM_L4PER_ELM_CLKCTRL,
+ * CM_L4PER_GPIO2_CLKCTRL, CM_L4PER_GPIO2_CLKCTRL_RESTORE,
+ * CM_L4PER_GPIO3_CLKCTRL, CM_L4PER_GPIO3_CLKCTRL_RESTORE,
+ * CM_L4PER_GPIO4_CLKCTRL, CM_L4PER_GPIO4_CLKCTRL_RESTORE,
+ * CM_L4PER_GPIO5_CLKCTRL, CM_L4PER_GPIO5_CLKCTRL_RESTORE,
+ * CM_L4PER_GPIO6_CLKCTRL, CM_L4PER_GPIO6_CLKCTRL_RESTORE,
+ * CM_L4PER_HDQ1W_CLKCTRL, CM_L4PER_HECC1_CLKCTRL, CM_L4PER_HECC2_CLKCTRL,
+ * CM_L4PER_I2C1_CLKCTRL, CM_L4PER_I2C2_CLKCTRL, CM_L4PER_I2C3_CLKCTRL,
+ * CM_L4PER_I2C4_CLKCTRL, CM_L4PER_I2C5_CLKCTRL, CM_L4PER_L4PER_CLKCTRL,
+ * CM_L4PER_MCASP2_CLKCTRL, CM_L4PER_MCASP3_CLKCTRL, CM_L4PER_MCBSP4_CLKCTRL,
+ * CM_L4PER_MCSPI1_CLKCTRL, CM_L4PER_MCSPI2_CLKCTRL, CM_L4PER_MCSPI3_CLKCTRL,
+ * CM_L4PER_MCSPI4_CLKCTRL, CM_L4PER_MGATE_CLKCTRL, CM_L4PER_MMCSD3_CLKCTRL,
+ * CM_L4PER_MMCSD4_CLKCTRL, CM_L4PER_MMCSD5_CLKCTRL, CM_L4PER_MSPROHG_CLKCTRL,
+ * CM_L4PER_SLIMBUS2_CLKCTRL, CM_L4PER_UART1_CLKCTRL, CM_L4PER_UART2_CLKCTRL,
+ * CM_L4PER_UART3_CLKCTRL, CM_L4PER_UART4_CLKCTRL, CM_L4SEC_AES1_CLKCTRL,
+ * CM_L4SEC_AES2_CLKCTRL, CM_L4SEC_CRYPTODMA_CLKCTRL, CM_L4SEC_DES3DES_CLKCTRL,
+ * CM_L4SEC_PKAEIP29_CLKCTRL, CM_L4SEC_RNG_CLKCTRL, CM_L4SEC_SHA2MD51_CLKCTRL,
+ * CM_MEMIF_DMM_CLKCTRL, CM_MEMIF_EMIF_1_CLKCTRL, CM_MEMIF_EMIF_2_CLKCTRL,
+ * CM_MEMIF_EMIF_FW_CLKCTRL, CM_MEMIF_EMIF_H1_CLKCTRL,
+ * CM_MEMIF_EMIF_H2_CLKCTRL, CM_MPU_MPU_CLKCTRL, CM_SDMA_SDMA_CLKCTRL,
+ * CM_TESLA_TESLA_CLKCTRL, CM_WKUP_GPIO1_CLKCTRL, CM_WKUP_KEYBOARD_CLKCTRL,
+ * CM_WKUP_L4WKUP_CLKCTRL, CM_WKUP_RTC_CLKCTRL, CM_WKUP_SARRAM_CLKCTRL,
+ * CM_WKUP_SYNCTIMER_CLKCTRL, CM_WKUP_TIMER12_CLKCTRL, CM_WKUP_TIMER1_CLKCTRL,
+ * CM_WKUP_USIM_CLKCTRL, CM_WKUP_WDT1_CLKCTRL, CM_WKUP_WDT2_CLKCTRL
*/
#define OMAP4430_MODULEMODE_SHIFT 0
-#define OMAP4430_MODULEMODE_MASK BITFIELD(0, 1)
+#define OMAP4430_MODULEMODE_MASK (0x3 << 0)
/* Used by CM_DSS_DSS_CLKCTRL */
#define OMAP4430_OPTFCLKEN_48MHZ_CLK_SHIFT 9
-#define OMAP4430_OPTFCLKEN_48MHZ_CLK_MASK BITFIELD(9, 9)
+#define OMAP4430_OPTFCLKEN_48MHZ_CLK_MASK (1 << 9)
/* Used by CM_WKUP_BANDGAP_CLKCTRL */
#define OMAP4430_OPTFCLKEN_BGAP_32K_SHIFT 8
-#define OMAP4430_OPTFCLKEN_BGAP_32K_MASK BITFIELD(8, 8)
+#define OMAP4430_OPTFCLKEN_BGAP_32K_MASK (1 << 8)
-/* Used by CM_L3INIT_USBPHYOCP2SCP_CLKCTRL */
-#define OMAP4430_OPTFCLKEN_CLK32K_SHIFT 9
-#define OMAP4430_OPTFCLKEN_CLK32K_MASK BITFIELD(9, 9)
+/* Used by CM_ALWON_USBPHY_CLKCTRL */
+#define OMAP4430_OPTFCLKEN_CLK32K_SHIFT 8
+#define OMAP4430_OPTFCLKEN_CLK32K_MASK (1 << 8)
/* Used by CM_CAM_ISS_CLKCTRL */
#define OMAP4430_OPTFCLKEN_CTRLCLK_SHIFT 8
-#define OMAP4430_OPTFCLKEN_CTRLCLK_MASK BITFIELD(8, 8)
+#define OMAP4430_OPTFCLKEN_CTRLCLK_MASK (1 << 8)
/*
- * Used by CM_WKUP_GPIO1_CLKCTRL, CM_L4PER_GPIO2_CLKCTRL,
- * CM_L4PER_GPIO3_CLKCTRL, CM_L4PER_GPIO4_CLKCTRL, CM_L4PER_GPIO5_CLKCTRL,
- * CM_L4PER_GPIO6_CLKCTRL, CM_L4PER_GPIO2_CLKCTRL_RESTORE,
- * CM_L4PER_GPIO3_CLKCTRL_RESTORE, CM_L4PER_GPIO4_CLKCTRL_RESTORE,
- * CM_L4PER_GPIO5_CLKCTRL_RESTORE, CM_L4PER_GPIO6_CLKCTRL_RESTORE
+ * Used by CM_L4PER_GPIO2_CLKCTRL, CM_L4PER_GPIO2_CLKCTRL_RESTORE,
+ * CM_L4PER_GPIO3_CLKCTRL, CM_L4PER_GPIO3_CLKCTRL_RESTORE,
+ * CM_L4PER_GPIO4_CLKCTRL, CM_L4PER_GPIO4_CLKCTRL_RESTORE,
+ * CM_L4PER_GPIO5_CLKCTRL, CM_L4PER_GPIO5_CLKCTRL_RESTORE,
+ * CM_L4PER_GPIO6_CLKCTRL, CM_L4PER_GPIO6_CLKCTRL_RESTORE, CM_WKUP_GPIO1_CLKCTRL
*/
#define OMAP4430_OPTFCLKEN_DBCLK_SHIFT 8
-#define OMAP4430_OPTFCLKEN_DBCLK_MASK BITFIELD(8, 8)
+#define OMAP4430_OPTFCLKEN_DBCLK_MASK (1 << 8)
/* Used by CM_MEMIF_DLL_CLKCTRL, CM_MEMIF_DLL_H_CLKCTRL */
#define OMAP4430_OPTFCLKEN_DLL_CLK_SHIFT 8
-#define OMAP4430_OPTFCLKEN_DLL_CLK_MASK BITFIELD(8, 8)
+#define OMAP4430_OPTFCLKEN_DLL_CLK_MASK (1 << 8)
/* Used by CM_DSS_DSS_CLKCTRL */
#define OMAP4430_OPTFCLKEN_DSSCLK_SHIFT 8
-#define OMAP4430_OPTFCLKEN_DSSCLK_MASK BITFIELD(8, 8)
+#define OMAP4430_OPTFCLKEN_DSSCLK_MASK (1 << 8)
+
+/* Used by CM_WKUP_USIM_CLKCTRL */
+#define OMAP4430_OPTFCLKEN_FCLK_SHIFT 8
+#define OMAP4430_OPTFCLKEN_FCLK_MASK (1 << 8)
/* Used by CM1_ABE_SLIMBUS_CLKCTRL */
#define OMAP4430_OPTFCLKEN_FCLK0_SHIFT 8
-#define OMAP4430_OPTFCLKEN_FCLK0_MASK BITFIELD(8, 8)
+#define OMAP4430_OPTFCLKEN_FCLK0_MASK (1 << 8)
/* Used by CM1_ABE_SLIMBUS_CLKCTRL */
#define OMAP4430_OPTFCLKEN_FCLK1_SHIFT 9
-#define OMAP4430_OPTFCLKEN_FCLK1_MASK BITFIELD(9, 9)
+#define OMAP4430_OPTFCLKEN_FCLK1_MASK (1 << 9)
/* Used by CM1_ABE_SLIMBUS_CLKCTRL */
#define OMAP4430_OPTFCLKEN_FCLK2_SHIFT 10
-#define OMAP4430_OPTFCLKEN_FCLK2_MASK BITFIELD(10, 10)
+#define OMAP4430_OPTFCLKEN_FCLK2_MASK (1 << 10)
/* Used by CM_L3INIT_USB_HOST_CLKCTRL, CM_L3INIT_USB_HOST_CLKCTRL_RESTORE */
#define OMAP4430_OPTFCLKEN_FUNC48MCLK_SHIFT 15
-#define OMAP4430_OPTFCLKEN_FUNC48MCLK_MASK BITFIELD(15, 15)
+#define OMAP4430_OPTFCLKEN_FUNC48MCLK_MASK (1 << 15)
/* Used by CM_L3INIT_USB_HOST_CLKCTRL, CM_L3INIT_USB_HOST_CLKCTRL_RESTORE */
#define OMAP4430_OPTFCLKEN_HSIC480M_P1_CLK_SHIFT 13
-#define OMAP4430_OPTFCLKEN_HSIC480M_P1_CLK_MASK BITFIELD(13, 13)
+#define OMAP4430_OPTFCLKEN_HSIC480M_P1_CLK_MASK (1 << 13)
/* Used by CM_L3INIT_USB_HOST_CLKCTRL, CM_L3INIT_USB_HOST_CLKCTRL_RESTORE */
#define OMAP4430_OPTFCLKEN_HSIC480M_P2_CLK_SHIFT 14
-#define OMAP4430_OPTFCLKEN_HSIC480M_P2_CLK_MASK BITFIELD(14, 14)
+#define OMAP4430_OPTFCLKEN_HSIC480M_P2_CLK_MASK (1 << 14)
/* Used by CM_L3INIT_USB_HOST_CLKCTRL, CM_L3INIT_USB_HOST_CLKCTRL_RESTORE */
#define OMAP4430_OPTFCLKEN_HSIC60M_P1_CLK_SHIFT 11
-#define OMAP4430_OPTFCLKEN_HSIC60M_P1_CLK_MASK BITFIELD(11, 11)
+#define OMAP4430_OPTFCLKEN_HSIC60M_P1_CLK_MASK (1 << 11)
/* Used by CM_L3INIT_USB_HOST_CLKCTRL, CM_L3INIT_USB_HOST_CLKCTRL_RESTORE */
#define OMAP4430_OPTFCLKEN_HSIC60M_P2_CLK_SHIFT 12
-#define OMAP4430_OPTFCLKEN_HSIC60M_P2_CLK_MASK BITFIELD(12, 12)
+#define OMAP4430_OPTFCLKEN_HSIC60M_P2_CLK_MASK (1 << 12)
/* Used by CM_L4PER_SLIMBUS2_CLKCTRL */
#define OMAP4430_OPTFCLKEN_PER24MC_GFCLK_SHIFT 8
-#define OMAP4430_OPTFCLKEN_PER24MC_GFCLK_MASK BITFIELD(8, 8)
+#define OMAP4430_OPTFCLKEN_PER24MC_GFCLK_MASK (1 << 8)
/* Used by CM_L4PER_SLIMBUS2_CLKCTRL */
#define OMAP4430_OPTFCLKEN_PERABE24M_GFCLK_SHIFT 9
-#define OMAP4430_OPTFCLKEN_PERABE24M_GFCLK_MASK BITFIELD(9, 9)
+#define OMAP4430_OPTFCLKEN_PERABE24M_GFCLK_MASK (1 << 9)
/* Used by CM_L3INIT_USBPHYOCP2SCP_CLKCTRL */
#define OMAP4430_OPTFCLKEN_PHY_48M_SHIFT 8
-#define OMAP4430_OPTFCLKEN_PHY_48M_MASK BITFIELD(8, 8)
+#define OMAP4430_OPTFCLKEN_PHY_48M_MASK (1 << 8)
/* Used by CM_L4PER_SLIMBUS2_CLKCTRL */
#define OMAP4430_OPTFCLKEN_SLIMBUS_CLK_SHIFT 10
-#define OMAP4430_OPTFCLKEN_SLIMBUS_CLK_MASK BITFIELD(10, 10)
+#define OMAP4430_OPTFCLKEN_SLIMBUS_CLK_MASK (1 << 10)
/* Renamed from OPTFCLKEN_SLIMBUS_CLK Used by CM1_ABE_SLIMBUS_CLKCTRL */
#define OMAP4430_OPTFCLKEN_SLIMBUS_CLK_11_11_SHIFT 11
-#define OMAP4430_OPTFCLKEN_SLIMBUS_CLK_11_11_MASK BITFIELD(11, 11)
+#define OMAP4430_OPTFCLKEN_SLIMBUS_CLK_11_11_MASK (1 << 11)
/* Used by CM_DSS_DSS_CLKCTRL */
#define OMAP4430_OPTFCLKEN_SYS_CLK_SHIFT 10
-#define OMAP4430_OPTFCLKEN_SYS_CLK_MASK BITFIELD(10, 10)
+#define OMAP4430_OPTFCLKEN_SYS_CLK_MASK (1 << 10)
/* Used by CM_DSS_DSS_CLKCTRL */
#define OMAP4430_OPTFCLKEN_TV_CLK_SHIFT 11
-#define OMAP4430_OPTFCLKEN_TV_CLK_MASK BITFIELD(11, 11)
+#define OMAP4430_OPTFCLKEN_TV_CLK_MASK (1 << 11)
/* Used by CM_L3INIT_UNIPRO1_CLKCTRL */
#define OMAP4430_OPTFCLKEN_TXPHYCLK_SHIFT 8
-#define OMAP4430_OPTFCLKEN_TXPHYCLK_MASK BITFIELD(8, 8)
+#define OMAP4430_OPTFCLKEN_TXPHYCLK_MASK (1 << 8)
/* Used by CM_L3INIT_USB_TLL_CLKCTRL, CM_L3INIT_USB_TLL_CLKCTRL_RESTORE */
#define OMAP4430_OPTFCLKEN_USB_CH0_CLK_SHIFT 8
-#define OMAP4430_OPTFCLKEN_USB_CH0_CLK_MASK BITFIELD(8, 8)
+#define OMAP4430_OPTFCLKEN_USB_CH0_CLK_MASK (1 << 8)
/* Used by CM_L3INIT_USB_TLL_CLKCTRL, CM_L3INIT_USB_TLL_CLKCTRL_RESTORE */
#define OMAP4430_OPTFCLKEN_USB_CH1_CLK_SHIFT 9
-#define OMAP4430_OPTFCLKEN_USB_CH1_CLK_MASK BITFIELD(9, 9)
+#define OMAP4430_OPTFCLKEN_USB_CH1_CLK_MASK (1 << 9)
/* Used by CM_L3INIT_USB_TLL_CLKCTRL, CM_L3INIT_USB_TLL_CLKCTRL_RESTORE */
#define OMAP4430_OPTFCLKEN_USB_CH2_CLK_SHIFT 10
-#define OMAP4430_OPTFCLKEN_USB_CH2_CLK_MASK BITFIELD(10, 10)
+#define OMAP4430_OPTFCLKEN_USB_CH2_CLK_MASK (1 << 10)
/* Used by CM_L3INIT_USB_HOST_CLKCTRL, CM_L3INIT_USB_HOST_CLKCTRL_RESTORE */
#define OMAP4430_OPTFCLKEN_UTMI_P1_CLK_SHIFT 8
-#define OMAP4430_OPTFCLKEN_UTMI_P1_CLK_MASK BITFIELD(8, 8)
+#define OMAP4430_OPTFCLKEN_UTMI_P1_CLK_MASK (1 << 8)
/* Used by CM_L3INIT_USB_HOST_CLKCTRL, CM_L3INIT_USB_HOST_CLKCTRL_RESTORE */
#define OMAP4430_OPTFCLKEN_UTMI_P2_CLK_SHIFT 9
-#define OMAP4430_OPTFCLKEN_UTMI_P2_CLK_MASK BITFIELD(9, 9)
+#define OMAP4430_OPTFCLKEN_UTMI_P2_CLK_MASK (1 << 9)
/* Used by CM_L3INIT_USB_HOST_CLKCTRL, CM_L3INIT_USB_HOST_CLKCTRL_RESTORE */
#define OMAP4430_OPTFCLKEN_UTMI_P3_CLK_SHIFT 10
-#define OMAP4430_OPTFCLKEN_UTMI_P3_CLK_MASK BITFIELD(10, 10)
+#define OMAP4430_OPTFCLKEN_UTMI_P3_CLK_MASK (1 << 10)
/* Used by CM_L3INIT_USB_OTG_CLKCTRL */
#define OMAP4430_OPTFCLKEN_XCLK_SHIFT 8
-#define OMAP4430_OPTFCLKEN_XCLK_MASK BITFIELD(8, 8)
+#define OMAP4430_OPTFCLKEN_XCLK_MASK (1 << 8)
-/* Used by CM_EMU_OVERRIDE_DPLL_PER, CM_EMU_OVERRIDE_DPLL_CORE */
+/* Used by CM_EMU_OVERRIDE_DPLL_CORE */
#define OMAP4430_OVERRIDE_ENABLE_SHIFT 19
-#define OMAP4430_OVERRIDE_ENABLE_MASK BITFIELD(19, 19)
+#define OMAP4430_OVERRIDE_ENABLE_MASK (1 << 19)
/* Used by CM_CLKSEL_ABE */
#define OMAP4430_PAD_CLKS_GATE_SHIFT 8
-#define OMAP4430_PAD_CLKS_GATE_MASK BITFIELD(8, 8)
+#define OMAP4430_PAD_CLKS_GATE_MASK (1 << 8)
/* Used by CM_CORE_DVFS_CURRENT, CM_IVA_DVFS_CURRENT */
#define OMAP4430_PERF_CURRENT_SHIFT 0
-#define OMAP4430_PERF_CURRENT_MASK BITFIELD(0, 7)
+#define OMAP4430_PERF_CURRENT_MASK (0xff << 0)
/*
* Used by CM_CORE_DVFS_PERF1, CM_CORE_DVFS_PERF2, CM_CORE_DVFS_PERF3,
@@ -1316,159 +1355,173 @@
* CM_IVA_DVFS_PERF_TESLA
*/
#define OMAP4430_PERF_REQ_SHIFT 0
-#define OMAP4430_PERF_REQ_MASK BITFIELD(0, 7)
-
-/* Used by CM_EMU_OVERRIDE_DPLL_PER */
-#define OMAP4430_PER_DPLL_EMU_DIV_SHIFT 0
-#define OMAP4430_PER_DPLL_EMU_DIV_MASK BITFIELD(0, 6)
-
-/* Used by CM_EMU_OVERRIDE_DPLL_PER */
-#define OMAP4430_PER_DPLL_EMU_MULT_SHIFT 8
-#define OMAP4430_PER_DPLL_EMU_MULT_MASK BITFIELD(8, 18)
+#define OMAP4430_PERF_REQ_MASK (0xff << 0)
/* Used by CM_RESTORE_ST */
#define OMAP4430_PHASE1_COMPLETED_SHIFT 0
-#define OMAP4430_PHASE1_COMPLETED_MASK BITFIELD(0, 0)
+#define OMAP4430_PHASE1_COMPLETED_MASK (1 << 0)
/* Used by CM_RESTORE_ST */
#define OMAP4430_PHASE2A_COMPLETED_SHIFT 1
-#define OMAP4430_PHASE2A_COMPLETED_MASK BITFIELD(1, 1)
+#define OMAP4430_PHASE2A_COMPLETED_MASK (1 << 1)
/* Used by CM_RESTORE_ST */
#define OMAP4430_PHASE2B_COMPLETED_SHIFT 2
-#define OMAP4430_PHASE2B_COMPLETED_MASK BITFIELD(2, 2)
+#define OMAP4430_PHASE2B_COMPLETED_MASK (1 << 2)
/* Used by CM_EMU_DEBUGSS_CLKCTRL */
#define OMAP4430_PMD_STM_MUX_CTRL_SHIFT 20
-#define OMAP4430_PMD_STM_MUX_CTRL_MASK BITFIELD(20, 21)
+#define OMAP4430_PMD_STM_MUX_CTRL_MASK (0x3 << 20)
/* Used by CM_EMU_DEBUGSS_CLKCTRL */
#define OMAP4430_PMD_TRACE_MUX_CTRL_SHIFT 22
-#define OMAP4430_PMD_TRACE_MUX_CTRL_MASK BITFIELD(22, 23)
+#define OMAP4430_PMD_TRACE_MUX_CTRL_MASK (0x3 << 22)
-/* Used by CM_DYN_DEP_PRESCAL */
+/* Used by CM_DYN_DEP_PRESCAL, CM_DYN_DEP_PRESCAL_RESTORE */
#define OMAP4430_PRESCAL_SHIFT 0
-#define OMAP4430_PRESCAL_MASK BITFIELD(0, 5)
+#define OMAP4430_PRESCAL_MASK (0x3f << 0)
-/* Used by REVISION_CM2, REVISION_CM1 */
-#define OMAP4430_REV_SHIFT 0
-#define OMAP4430_REV_MASK BITFIELD(0, 7)
+/* Used by REVISION_CM1, REVISION_CM2 */
+#define OMAP4430_R_RTL_SHIFT 11
+#define OMAP4430_R_RTL_MASK (0x1f << 11)
/*
- * Used by CM_L3INIT_USB_HOST_CLKCTRL, CM_L3INIT_USB_TLL_CLKCTRL,
- * CM_L3INIT_USB_HOST_CLKCTRL_RESTORE, CM_L3INIT_USB_TLL_CLKCTRL_RESTORE
+ * Used by CM_L3INIT_USB_HOST_CLKCTRL, CM_L3INIT_USB_HOST_CLKCTRL_RESTORE,
+ * CM_L3INIT_USB_TLL_CLKCTRL, CM_L3INIT_USB_TLL_CLKCTRL_RESTORE
*/
#define OMAP4430_SAR_MODE_SHIFT 4
-#define OMAP4430_SAR_MODE_MASK BITFIELD(4, 4)
+#define OMAP4430_SAR_MODE_MASK (1 << 4)
/* Used by CM_SCALE_FCLK */
#define OMAP4430_SCALE_FCLK_SHIFT 0
-#define OMAP4430_SCALE_FCLK_MASK BITFIELD(0, 0)
+#define OMAP4430_SCALE_FCLK_MASK (1 << 0)
+
+/* Used by REVISION_CM1, REVISION_CM2 */
+#define OMAP4430_SCHEME_SHIFT 30
+#define OMAP4430_SCHEME_MASK (0x3 << 30)
-/* Used by CM_L4CFG_DYNAMICDEP */
+/* Used by CM_L4CFG_DYNAMICDEP, CM_L4CFG_DYNAMICDEP_RESTORE */
#define OMAP4430_SDMA_DYNDEP_SHIFT 11
-#define OMAP4430_SDMA_DYNDEP_MASK BITFIELD(11, 11)
+#define OMAP4430_SDMA_DYNDEP_MASK (1 << 11)
/* Used by CM_DUCATI_STATICDEP, CM_MPU_STATICDEP */
#define OMAP4430_SDMA_STATDEP_SHIFT 11
-#define OMAP4430_SDMA_STATDEP_MASK BITFIELD(11, 11)
+#define OMAP4430_SDMA_STATDEP_MASK (1 << 11)
/* Used by CM_CLKSEL_ABE */
#define OMAP4430_SLIMBUS_CLK_GATE_SHIFT 10
-#define OMAP4430_SLIMBUS_CLK_GATE_MASK BITFIELD(10, 10)
+#define OMAP4430_SLIMBUS_CLK_GATE_MASK (1 << 10)
/*
- * Used by CM_EMU_DEBUGSS_CLKCTRL, CM_D2D_SAD2D_CLKCTRL,
- * CM_DUCATI_DUCATI_CLKCTRL, CM_SDMA_SDMA_CLKCTRL, CM_GFX_GFX_CLKCTRL,
- * CM_L4SEC_CRYPTODMA_CLKCTRL, CM_L3INIT_CCPTX_CLKCTRL, CM_L3INIT_EMAC_CLKCTRL,
+ * Used by CM1_ABE_AESS_CLKCTRL, CM_CAM_FDIF_CLKCTRL, CM_CAM_ISS_CLKCTRL,
+ * CM_D2D_SAD2D_CLKCTRL, CM_DSS_DEISS_CLKCTRL, CM_DSS_DSS_CLKCTRL,
+ * CM_DUCATI_DUCATI_CLKCTRL, CM_EMU_DEBUGSS_CLKCTRL, CM_GFX_GFX_CLKCTRL,
+ * CM_IVAHD_IVAHD_CLKCTRL, CM_L3INIT_CCPTX_CLKCTRL, CM_L3INIT_EMAC_CLKCTRL,
* CM_L3INIT_HSI_CLKCTRL, CM_L3INIT_MMC1_CLKCTRL, CM_L3INIT_MMC2_CLKCTRL,
* CM_L3INIT_MMC6_CLKCTRL, CM_L3INIT_P1500_CLKCTRL, CM_L3INIT_PCIESS_CLKCTRL,
* CM_L3INIT_SATA_CLKCTRL, CM_L3INIT_TPPSS_CLKCTRL, CM_L3INIT_UNIPRO1_CLKCTRL,
- * CM_L3INIT_USB_HOST_CLKCTRL, CM_L3INIT_USB_HOST_FS_CLKCTRL,
- * CM_L3INIT_USB_OTG_CLKCTRL, CM_L3INIT_XHPI_CLKCTRL, CM_CAM_FDIF_CLKCTRL,
- * CM_CAM_ISS_CLKCTRL, CM_L3INIT_USB_HOST_CLKCTRL_RESTORE,
- * CM_IVAHD_IVAHD_CLKCTRL, CM_DSS_DEISS_CLKCTRL, CM_DSS_DSS_CLKCTRL,
- * CM_MPU_MPU_CLKCTRL, CM_TESLA_TESLA_CLKCTRL, CM1_ABE_AESS_CLKCTRL
+ * CM_L3INIT_USB_HOST_CLKCTRL, CM_L3INIT_USB_HOST_CLKCTRL_RESTORE,
+ * CM_L3INIT_USB_HOST_FS_CLKCTRL, CM_L3INIT_USB_OTG_CLKCTRL,
+ * CM_L3INIT_XHPI_CLKCTRL, CM_L4SEC_CRYPTODMA_CLKCTRL, CM_MPU_MPU_CLKCTRL,
+ * CM_SDMA_SDMA_CLKCTRL, CM_TESLA_TESLA_CLKCTRL
*/
#define OMAP4430_STBYST_SHIFT 18
-#define OMAP4430_STBYST_MASK BITFIELD(18, 18)
+#define OMAP4430_STBYST_MASK (1 << 18)
/*
- * Used by CM_IDLEST_DPLL_PER, CM_IDLEST_DPLL_UNIPRO, CM_IDLEST_DPLL_USB,
- * CM_IDLEST_DPLL_ABE, CM_IDLEST_DPLL_CORE, CM_IDLEST_DPLL_DDRPHY,
- * CM_IDLEST_DPLL_IVA, CM_IDLEST_DPLL_MPU
+ * Used by CM_IDLEST_DPLL_ABE, CM_IDLEST_DPLL_CORE, CM_IDLEST_DPLL_DDRPHY,
+ * CM_IDLEST_DPLL_IVA, CM_IDLEST_DPLL_MPU, CM_IDLEST_DPLL_PER,
+ * CM_IDLEST_DPLL_UNIPRO, CM_IDLEST_DPLL_USB
*/
#define OMAP4430_ST_DPLL_CLK_SHIFT 0
-#define OMAP4430_ST_DPLL_CLK_MASK BITFIELD(0, 0)
+#define OMAP4430_ST_DPLL_CLK_MASK (1 << 0)
/* Used by CM_CLKDCOLDO_DPLL_USB */
#define OMAP4430_ST_DPLL_CLKDCOLDO_SHIFT 9
-#define OMAP4430_ST_DPLL_CLKDCOLDO_MASK BITFIELD(9, 9)
+#define OMAP4430_ST_DPLL_CLKDCOLDO_MASK (1 << 9)
/*
- * Used by CM_DIV_M2_DPLL_PER, CM_DIV_M2_DPLL_USB, CM_DIV_M2_DPLL_CORE_RESTORE,
- * CM_DIV_M2_DPLL_ABE, CM_DIV_M2_DPLL_CORE, CM_DIV_M2_DPLL_DDRPHY,
- * CM_DIV_M2_DPLL_MPU
+ * Used by CM_DIV_M2_DPLL_ABE, CM_DIV_M2_DPLL_CORE,
+ * CM_DIV_M2_DPLL_CORE_RESTORE, CM_DIV_M2_DPLL_DDRPHY, CM_DIV_M2_DPLL_MPU,
+ * CM_DIV_M2_DPLL_PER, CM_DIV_M2_DPLL_USB
*/
#define OMAP4430_ST_DPLL_CLKOUT_SHIFT 9
-#define OMAP4430_ST_DPLL_CLKOUT_MASK BITFIELD(9, 9)
+#define OMAP4430_ST_DPLL_CLKOUT_MASK (1 << 9)
/*
- * Used by CM_DIV_M3_DPLL_PER, CM_DIV_M3_DPLL_CORE_RESTORE, CM_DIV_M3_DPLL_ABE,
- * CM_DIV_M3_DPLL_CORE
+ * Used by CM_DIV_M3_DPLL_ABE, CM_DIV_M3_DPLL_CORE,
+ * CM_DIV_M3_DPLL_CORE_RESTORE, CM_DIV_M3_DPLL_PER
*/
#define OMAP4430_ST_DPLL_CLKOUTHIF_SHIFT 9
-#define OMAP4430_ST_DPLL_CLKOUTHIF_MASK BITFIELD(9, 9)
+#define OMAP4430_ST_DPLL_CLKOUTHIF_MASK (1 << 9)
-/* Used by CM_DIV_M2_DPLL_PER, CM_DIV_M2_DPLL_UNIPRO, CM_DIV_M2_DPLL_ABE */
+/* Used by CM_DIV_M2_DPLL_ABE, CM_DIV_M2_DPLL_PER, CM_DIV_M2_DPLL_UNIPRO */
#define OMAP4430_ST_DPLL_CLKOUTX2_SHIFT 11
-#define OMAP4430_ST_DPLL_CLKOUTX2_MASK BITFIELD(11, 11)
+#define OMAP4430_ST_DPLL_CLKOUTX2_MASK (1 << 11)
/*
- * Used by CM_DIV_M4_DPLL_PER, CM_DIV_M4_DPLL_CORE_RESTORE,
- * CM_DIV_M4_DPLL_CORE, CM_DIV_M4_DPLL_DDRPHY, CM_DIV_M4_DPLL_IVA
+ * Used by CM_DIV_M4_DPLL_CORE, CM_DIV_M4_DPLL_CORE_RESTORE,
+ * CM_DIV_M4_DPLL_DDRPHY, CM_DIV_M4_DPLL_IVA, CM_DIV_M4_DPLL_PER
*/
#define OMAP4430_ST_HSDIVIDER_CLKOUT1_SHIFT 9
-#define OMAP4430_ST_HSDIVIDER_CLKOUT1_MASK BITFIELD(9, 9)
+#define OMAP4430_ST_HSDIVIDER_CLKOUT1_MASK (1 << 9)
/*
- * Used by CM_DIV_M5_DPLL_PER, CM_DIV_M5_DPLL_CORE_RESTORE,
- * CM_DIV_M5_DPLL_CORE, CM_DIV_M5_DPLL_DDRPHY, CM_DIV_M5_DPLL_IVA
+ * Used by CM_DIV_M5_DPLL_CORE, CM_DIV_M5_DPLL_CORE_RESTORE,
+ * CM_DIV_M5_DPLL_DDRPHY, CM_DIV_M5_DPLL_IVA, CM_DIV_M5_DPLL_PER
*/
#define OMAP4430_ST_HSDIVIDER_CLKOUT2_SHIFT 9
-#define OMAP4430_ST_HSDIVIDER_CLKOUT2_MASK BITFIELD(9, 9)
+#define OMAP4430_ST_HSDIVIDER_CLKOUT2_MASK (1 << 9)
/*
- * Used by CM_DIV_M6_DPLL_PER, CM_DIV_M6_DPLL_CORE_RESTORE,
- * CM_DIV_M6_DPLL_CORE, CM_DIV_M6_DPLL_DDRPHY
+ * Used by CM_DIV_M6_DPLL_CORE, CM_DIV_M6_DPLL_CORE_RESTORE,
+ * CM_DIV_M6_DPLL_DDRPHY, CM_DIV_M6_DPLL_PER
*/
#define OMAP4430_ST_HSDIVIDER_CLKOUT3_SHIFT 9
-#define OMAP4430_ST_HSDIVIDER_CLKOUT3_MASK BITFIELD(9, 9)
+#define OMAP4430_ST_HSDIVIDER_CLKOUT3_MASK (1 << 9)
/*
- * Used by CM_DIV_M7_DPLL_PER, CM_DIV_M7_DPLL_CORE_RESTORE,
- * CM_DIV_M7_DPLL_CORE
+ * Used by CM_DIV_M7_DPLL_CORE, CM_DIV_M7_DPLL_CORE_RESTORE,
+ * CM_DIV_M7_DPLL_PER
*/
#define OMAP4430_ST_HSDIVIDER_CLKOUT4_SHIFT 9
-#define OMAP4430_ST_HSDIVIDER_CLKOUT4_MASK BITFIELD(9, 9)
+#define OMAP4430_ST_HSDIVIDER_CLKOUT4_MASK (1 << 9)
+
+/*
+ * Used by CM_IDLEST_DPLL_ABE, CM_IDLEST_DPLL_CORE, CM_IDLEST_DPLL_DDRPHY,
+ * CM_IDLEST_DPLL_IVA, CM_IDLEST_DPLL_MPU, CM_IDLEST_DPLL_PER,
+ * CM_IDLEST_DPLL_UNIPRO, CM_IDLEST_DPLL_USB
+ */
+#define OMAP4430_ST_MN_BYPASS_SHIFT 8
+#define OMAP4430_ST_MN_BYPASS_MASK (1 << 8)
/* Used by CM_SYS_CLKSEL */
#define OMAP4430_SYS_CLKSEL_SHIFT 0
-#define OMAP4430_SYS_CLKSEL_MASK BITFIELD(0, 2)
+#define OMAP4430_SYS_CLKSEL_MASK (0x7 << 0)
-/* Used by CM_L4CFG_DYNAMICDEP */
+/* Used by CM_L4CFG_DYNAMICDEP, CM_L4CFG_DYNAMICDEP_RESTORE */
#define OMAP4430_TESLA_DYNDEP_SHIFT 1
-#define OMAP4430_TESLA_DYNDEP_MASK BITFIELD(1, 1)
+#define OMAP4430_TESLA_DYNDEP_MASK (1 << 1)
/* Used by CM_DUCATI_STATICDEP, CM_MPU_STATICDEP */
#define OMAP4430_TESLA_STATDEP_SHIFT 1
-#define OMAP4430_TESLA_STATDEP_MASK BITFIELD(1, 1)
+#define OMAP4430_TESLA_STATDEP_MASK (1 << 1)
/*
- * Used by CM_EMU_DYNAMICDEP, CM_D2D_DYNAMICDEP, CM_DUCATI_DYNAMICDEP,
- * CM_L3_1_DYNAMICDEP, CM_L3_2_DYNAMICDEP, CM_L4CFG_DYNAMICDEP,
- * CM_L4PER_DYNAMICDEP, CM_MPU_DYNAMICDEP, CM_TESLA_DYNAMICDEP
+ * Used by CM_D2D_DYNAMICDEP, CM_D2D_DYNAMICDEP_RESTORE, CM_DUCATI_DYNAMICDEP,
+ * CM_EMU_DYNAMICDEP, CM_L3_1_DYNAMICDEP, CM_L3_1_DYNAMICDEP_RESTORE,
+ * CM_L3_2_DYNAMICDEP, CM_L3_2_DYNAMICDEP_RESTORE, CM_L4CFG_DYNAMICDEP,
+ * CM_L4CFG_DYNAMICDEP_RESTORE, CM_L4PER_DYNAMICDEP,
+ * CM_L4PER_DYNAMICDEP_RESTORE, CM_MPU_DYNAMICDEP, CM_TESLA_DYNAMICDEP
*/
#define OMAP4430_WINDOWSIZE_SHIFT 24
-#define OMAP4430_WINDOWSIZE_MASK BITFIELD(24, 27)
+#define OMAP4430_WINDOWSIZE_MASK (0xf << 24)
+
+/* Used by REVISION_CM1, REVISION_CM2 */
+#define OMAP4430_X_MAJOR_SHIFT 8
+#define OMAP4430_X_MAJOR_MASK (0x7 << 8)
+
+/* Used by REVISION_CM1, REVISION_CM2 */
+#define OMAP4430_Y_MINOR_SHIFT 0
+#define OMAP4430_Y_MINOR_MASK (0x3f << 0)
#endif
diff --git a/arch/arm/mach-omap2/cm44xx.h b/arch/arm/mach-omap2/cm44xx.h
index 336d94889e5b..3c35a87cb90c 100644
--- a/arch/arm/mach-omap2/cm44xx.h
+++ b/arch/arm/mach-omap2/cm44xx.h
@@ -195,6 +195,42 @@
#define OMAP4_CM1_ABE_WDT3_CLKCTRL_OFFSET 0x0088
#define OMAP4430_CM1_ABE_WDT3_CLKCTRL OMAP44XX_CM1_REGADDR(OMAP4430_CM1_ABE_MOD, 0x0088)
+/* CM1.RESTORE_CM1 register offsets */
+#define OMAP4_CM_CLKSEL_CORE_RESTORE_OFFSET 0x0000
+#define OMAP4430_CM_CLKSEL_CORE_RESTORE OMAP44XX_CM1_REGADDR(OMAP4430_CM1_RESTORE_MOD, 0x0000)
+#define OMAP4_CM_DIV_M2_DPLL_CORE_RESTORE_OFFSET 0x0004
+#define OMAP4430_CM_DIV_M2_DPLL_CORE_RESTORE OMAP44XX_CM1_REGADDR(OMAP4430_CM1_RESTORE_MOD, 0x0004)
+#define OMAP4_CM_DIV_M3_DPLL_CORE_RESTORE_OFFSET 0x0008
+#define OMAP4430_CM_DIV_M3_DPLL_CORE_RESTORE OMAP44XX_CM1_REGADDR(OMAP4430_CM1_RESTORE_MOD, 0x0008)
+#define OMAP4_CM_DIV_M4_DPLL_CORE_RESTORE_OFFSET 0x000c
+#define OMAP4430_CM_DIV_M4_DPLL_CORE_RESTORE OMAP44XX_CM1_REGADDR(OMAP4430_CM1_RESTORE_MOD, 0x000c)
+#define OMAP4_CM_DIV_M5_DPLL_CORE_RESTORE_OFFSET 0x0010
+#define OMAP4430_CM_DIV_M5_DPLL_CORE_RESTORE OMAP44XX_CM1_REGADDR(OMAP4430_CM1_RESTORE_MOD, 0x0010)
+#define OMAP4_CM_DIV_M6_DPLL_CORE_RESTORE_OFFSET 0x0014
+#define OMAP4430_CM_DIV_M6_DPLL_CORE_RESTORE OMAP44XX_CM1_REGADDR(OMAP4430_CM1_RESTORE_MOD, 0x0014)
+#define OMAP4_CM_DIV_M7_DPLL_CORE_RESTORE_OFFSET 0x0018
+#define OMAP4430_CM_DIV_M7_DPLL_CORE_RESTORE OMAP44XX_CM1_REGADDR(OMAP4430_CM1_RESTORE_MOD, 0x0018)
+#define OMAP4_CM_CLKSEL_DPLL_CORE_RESTORE_OFFSET 0x001c
+#define OMAP4430_CM_CLKSEL_DPLL_CORE_RESTORE OMAP44XX_CM1_REGADDR(OMAP4430_CM1_RESTORE_MOD, 0x001c)
+#define OMAP4_CM_SSC_DELTAMSTEP_DPLL_CORE_RESTORE_OFFSET 0x0020
+#define OMAP4430_CM_SSC_DELTAMSTEP_DPLL_CORE_RESTORE OMAP44XX_CM1_REGADDR(OMAP4430_CM1_RESTORE_MOD, 0x0020)
+#define OMAP4_CM_SSC_MODFREQDIV_DPLL_CORE_RESTORE_OFFSET 0x0024
+#define OMAP4430_CM_SSC_MODFREQDIV_DPLL_CORE_RESTORE OMAP44XX_CM1_REGADDR(OMAP4430_CM1_RESTORE_MOD, 0x0024)
+#define OMAP4_CM_CLKMODE_DPLL_CORE_RESTORE_OFFSET 0x0028
+#define OMAP4430_CM_CLKMODE_DPLL_CORE_RESTORE OMAP44XX_CM1_REGADDR(OMAP4430_CM1_RESTORE_MOD, 0x0028)
+#define OMAP4_CM_SHADOW_FREQ_CONFIG2_RESTORE_OFFSET 0x002c
+#define OMAP4430_CM_SHADOW_FREQ_CONFIG2_RESTORE OMAP44XX_CM1_REGADDR(OMAP4430_CM1_RESTORE_MOD, 0x002c)
+#define OMAP4_CM_SHADOW_FREQ_CONFIG1_RESTORE_OFFSET 0x0030
+#define OMAP4430_CM_SHADOW_FREQ_CONFIG1_RESTORE OMAP44XX_CM1_REGADDR(OMAP4430_CM1_RESTORE_MOD, 0x0030)
+#define OMAP4_CM_AUTOIDLE_DPLL_CORE_RESTORE_OFFSET 0x0034
+#define OMAP4430_CM_AUTOIDLE_DPLL_CORE_RESTORE OMAP44XX_CM1_REGADDR(OMAP4430_CM1_RESTORE_MOD, 0x0034)
+#define OMAP4_CM_MPU_CLKSTCTRL_RESTORE_OFFSET 0x0038
+#define OMAP4430_CM_MPU_CLKSTCTRL_RESTORE OMAP44XX_CM1_REGADDR(OMAP4430_CM1_RESTORE_MOD, 0x0038)
+#define OMAP4_CM_CM1_PROFILING_CLKCTRL_RESTORE_OFFSET 0x003c
+#define OMAP4430_CM_CM1_PROFILING_CLKCTRL_RESTORE OMAP44XX_CM1_REGADDR(OMAP4430_CM1_RESTORE_MOD, 0x003c)
+#define OMAP4_CM_DYN_DEP_PRESCAL_RESTORE_OFFSET 0x0040
+#define OMAP4430_CM_DYN_DEP_PRESCAL_RESTORE OMAP44XX_CM1_REGADDR(OMAP4430_CM1_RESTORE_MOD, 0x0040)
+
/* CM2 */
/* CM2.OCP_SOCKET_CM2 register offsets */
@@ -252,8 +288,6 @@
#define OMAP4430_CM_SSC_DELTAMSTEP_DPLL_PER OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CKGEN_MOD, 0x0068)
#define OMAP4_CM_SSC_MODFREQDIV_DPLL_PER_OFFSET 0x006c
#define OMAP4430_CM_SSC_MODFREQDIV_DPLL_PER OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CKGEN_MOD, 0x006c)
-#define OMAP4_CM_EMU_OVERRIDE_DPLL_PER_OFFSET 0x0070
-#define OMAP4430_CM_EMU_OVERRIDE_DPLL_PER OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CKGEN_MOD, 0x0070)
#define OMAP4_CM_CLKMODE_DPLL_USB_OFFSET 0x0080
#define OMAP4430_CM_CLKMODE_DPLL_USB OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CKGEN_MOD, 0x0080)
#define OMAP4_CM_IDLEST_DPLL_USB_OFFSET 0x0084
@@ -296,6 +330,8 @@
#define OMAP4430_CM_ALWON_SR_IVA_CLKCTRL OMAP44XX_CM2_REGADDR(OMAP4430_CM2_ALWAYS_ON_MOD, 0x0030)
#define OMAP4_CM_ALWON_SR_CORE_CLKCTRL_OFFSET 0x0038
#define OMAP4430_CM_ALWON_SR_CORE_CLKCTRL OMAP44XX_CM2_REGADDR(OMAP4430_CM2_ALWAYS_ON_MOD, 0x0038)
+#define OMAP4_CM_ALWON_USBPHY_CLKCTRL_OFFSET 0x0040
+#define OMAP4430_CM_ALWON_USBPHY_CLKCTRL OMAP44XX_CM2_REGADDR(OMAP4430_CM2_ALWAYS_ON_MOD, 0x0040)
/* CM2.CORE_CM2 register offsets */
#define OMAP4_CM_L3_1_CLKSTCTRL_OFFSET 0x0000
@@ -578,4 +614,54 @@
#define OMAP4430_CM_CEFUSE_CLKSTCTRL OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CEFUSE_MOD, 0x0000)
#define OMAP4_CM_CEFUSE_CEFUSE_CLKCTRL_OFFSET 0x0020
#define OMAP4430_CM_CEFUSE_CEFUSE_CLKCTRL OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CEFUSE_MOD, 0x0020)
+
+/* CM2.RESTORE_CM2 register offsets */
+#define OMAP4_CM_L3_1_CLKSTCTRL_RESTORE_OFFSET 0x0000
+#define OMAP4430_CM_L3_1_CLKSTCTRL_RESTORE OMAP44XX_CM2_REGADDR(OMAP4430_CM2_RESTORE_MOD, 0x0000)
+#define OMAP4_CM_L3_2_CLKSTCTRL_RESTORE_OFFSET 0x0004
+#define OMAP4430_CM_L3_2_CLKSTCTRL_RESTORE OMAP44XX_CM2_REGADDR(OMAP4430_CM2_RESTORE_MOD, 0x0004)
+#define OMAP4_CM_L4CFG_CLKSTCTRL_RESTORE_OFFSET 0x0008
+#define OMAP4430_CM_L4CFG_CLKSTCTRL_RESTORE OMAP44XX_CM2_REGADDR(OMAP4430_CM2_RESTORE_MOD, 0x0008)
+#define OMAP4_CM_MEMIF_CLKSTCTRL_RESTORE_OFFSET 0x000c
+#define OMAP4430_CM_MEMIF_CLKSTCTRL_RESTORE OMAP44XX_CM2_REGADDR(OMAP4430_CM2_RESTORE_MOD, 0x000c)
+#define OMAP4_CM_L4PER_CLKSTCTRL_RESTORE_OFFSET 0x0010
+#define OMAP4430_CM_L4PER_CLKSTCTRL_RESTORE OMAP44XX_CM2_REGADDR(OMAP4430_CM2_RESTORE_MOD, 0x0010)
+#define OMAP4_CM_L3INIT_CLKSTCTRL_RESTORE_OFFSET 0x0014
+#define OMAP4430_CM_L3INIT_CLKSTCTRL_RESTORE OMAP44XX_CM2_REGADDR(OMAP4430_CM2_RESTORE_MOD, 0x0014)
+#define OMAP4_CM_L3INSTR_L3_3_CLKCTRL_RESTORE_OFFSET 0x0018
+#define OMAP4430_CM_L3INSTR_L3_3_CLKCTRL_RESTORE OMAP44XX_CM2_REGADDR(OMAP4430_CM2_RESTORE_MOD, 0x0018)
+#define OMAP4_CM_L3INSTR_L3_INSTR_CLKCTRL_RESTORE_OFFSET 0x001c
+#define OMAP4430_CM_L3INSTR_L3_INSTR_CLKCTRL_RESTORE OMAP44XX_CM2_REGADDR(OMAP4430_CM2_RESTORE_MOD, 0x001c)
+#define OMAP4_CM_L3INSTR_OCP_WP1_CLKCTRL_RESTORE_OFFSET 0x0020
+#define OMAP4430_CM_L3INSTR_OCP_WP1_CLKCTRL_RESTORE OMAP44XX_CM2_REGADDR(OMAP4430_CM2_RESTORE_MOD, 0x0020)
+#define OMAP4_CM_CM2_PROFILING_CLKCTRL_RESTORE_OFFSET 0x0024
+#define OMAP4430_CM_CM2_PROFILING_CLKCTRL_RESTORE OMAP44XX_CM2_REGADDR(OMAP4430_CM2_RESTORE_MOD, 0x0024)
+#define OMAP4_CM_D2D_STATICDEP_RESTORE_OFFSET 0x0028
+#define OMAP4430_CM_D2D_STATICDEP_RESTORE OMAP44XX_CM2_REGADDR(OMAP4430_CM2_RESTORE_MOD, 0x0028)
+#define OMAP4_CM_L3_1_DYNAMICDEP_RESTORE_OFFSET 0x002c
+#define OMAP4430_CM_L3_1_DYNAMICDEP_RESTORE OMAP44XX_CM2_REGADDR(OMAP4430_CM2_RESTORE_MOD, 0x002c)
+#define OMAP4_CM_L3_2_DYNAMICDEP_RESTORE_OFFSET 0x0030
+#define OMAP4430_CM_L3_2_DYNAMICDEP_RESTORE OMAP44XX_CM2_REGADDR(OMAP4430_CM2_RESTORE_MOD, 0x0030)
+#define OMAP4_CM_D2D_DYNAMICDEP_RESTORE_OFFSET 0x0034
+#define OMAP4430_CM_D2D_DYNAMICDEP_RESTORE OMAP44XX_CM2_REGADDR(OMAP4430_CM2_RESTORE_MOD, 0x0034)
+#define OMAP4_CM_L4CFG_DYNAMICDEP_RESTORE_OFFSET 0x0038
+#define OMAP4430_CM_L4CFG_DYNAMICDEP_RESTORE OMAP44XX_CM2_REGADDR(OMAP4430_CM2_RESTORE_MOD, 0x0038)
+#define OMAP4_CM_L4PER_DYNAMICDEP_RESTORE_OFFSET 0x003c
+#define OMAP4430_CM_L4PER_DYNAMICDEP_RESTORE OMAP44XX_CM2_REGADDR(OMAP4430_CM2_RESTORE_MOD, 0x003c)
+#define OMAP4_CM_L4PER_GPIO2_CLKCTRL_RESTORE_OFFSET 0x0040
+#define OMAP4430_CM_L4PER_GPIO2_CLKCTRL_RESTORE OMAP44XX_CM2_REGADDR(OMAP4430_CM2_RESTORE_MOD, 0x0040)
+#define OMAP4_CM_L4PER_GPIO3_CLKCTRL_RESTORE_OFFSET 0x0044
+#define OMAP4430_CM_L4PER_GPIO3_CLKCTRL_RESTORE OMAP44XX_CM2_REGADDR(OMAP4430_CM2_RESTORE_MOD, 0x0044)
+#define OMAP4_CM_L4PER_GPIO4_CLKCTRL_RESTORE_OFFSET 0x0048
+#define OMAP4430_CM_L4PER_GPIO4_CLKCTRL_RESTORE OMAP44XX_CM2_REGADDR(OMAP4430_CM2_RESTORE_MOD, 0x0048)
+#define OMAP4_CM_L4PER_GPIO5_CLKCTRL_RESTORE_OFFSET 0x004c
+#define OMAP4430_CM_L4PER_GPIO5_CLKCTRL_RESTORE OMAP44XX_CM2_REGADDR(OMAP4430_CM2_RESTORE_MOD, 0x004c)
+#define OMAP4_CM_L4PER_GPIO6_CLKCTRL_RESTORE_OFFSET 0x0050
+#define OMAP4430_CM_L4PER_GPIO6_CLKCTRL_RESTORE OMAP44XX_CM2_REGADDR(OMAP4430_CM2_RESTORE_MOD, 0x0050)
+#define OMAP4_CM_L3INIT_USB_HOST_CLKCTRL_RESTORE_OFFSET 0x0054
+#define OMAP4430_CM_L3INIT_USB_HOST_CLKCTRL_RESTORE OMAP44XX_CM2_REGADDR(OMAP4430_CM2_RESTORE_MOD, 0x0054)
+#define OMAP4_CM_L3INIT_USB_TLL_CLKCTRL_RESTORE_OFFSET 0x0058
+#define OMAP4430_CM_L3INIT_USB_TLL_CLKCTRL_RESTORE OMAP44XX_CM2_REGADDR(OMAP4430_CM2_RESTORE_MOD, 0x0058)
+#define OMAP4_CM_SDMA_STATICDEP_RESTORE_OFFSET 0x005c
+#define OMAP4430_CM_SDMA_STATICDEP_RESTORE OMAP44XX_CM2_REGADDR(OMAP4430_CM2_RESTORE_MOD, 0x005c)
#endif
diff --git a/arch/arm/mach-omap2/cm4xxx.c b/arch/arm/mach-omap2/cm4xxx.c
index b101091e95d6..f8a660a1a4a6 100644
--- a/arch/arm/mach-omap2/cm4xxx.c
+++ b/arch/arm/mach-omap2/cm4xxx.c
@@ -43,7 +43,6 @@
* using separate functional clock
* 0x3 disabled: Module is disabled and cannot be accessed
*
- * TODO: Need to handle module accessible in idle state
*/
int omap4_cm_wait_module_ready(void __iomem *clkctrl_reg)
{
@@ -52,9 +51,11 @@ int omap4_cm_wait_module_ready(void __iomem *clkctrl_reg)
if (!clkctrl_reg)
return 0;
- omap_test_timeout(((__raw_readl(clkctrl_reg) &
- OMAP4430_IDLEST_MASK) == 0),
- MAX_MODULE_READY_TIME, i);
+ omap_test_timeout((
+ ((__raw_readl(clkctrl_reg) & OMAP4430_IDLEST_MASK) == 0) ||
+ (((__raw_readl(clkctrl_reg) & OMAP4430_IDLEST_MASK) >>
+ OMAP4430_IDLEST_SHIFT) == 0x2)),
+ MAX_MODULE_READY_TIME, i);
return (i < MAX_MODULE_READY_TIME) ? 0 : -EBUSY;
}
diff --git a/arch/arm/mach-omap2/common.c b/arch/arm/mach-omap2/common.c
new file mode 100644
index 000000000000..778929f7e92d
--- /dev/null
+++ b/arch/arm/mach-omap2/common.c
@@ -0,0 +1,135 @@
+/*
+ * linux/arch/arm/mach-omap2/common.c
+ *
+ * Code common to all OMAP2+ machines.
+ *
+ * Copyright (C) 2009 Texas Instruments
+ * Copyright (C) 2010 Nokia Corporation
+ * Tony Lindgren <tony@atomide.com>
+ * Added OMAP4 support - Santosh Shilimkar <santosh.shilimkar@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/clk.h>
+#include <linux/io.h>
+
+#include <plat/common.h>
+#include <plat/board.h>
+#include <plat/mux.h>
+
+#include <plat/clock.h>
+
+#include "sdrc.h"
+#include "control.h"
+
+/* Global address base setup code */
+
+#if defined(CONFIG_ARCH_OMAP2) || defined(CONFIG_ARCH_OMAP3)
+
+static void __init __omap2_set_globals(struct omap_globals *omap2_globals)
+{
+ omap2_set_globals_tap(omap2_globals);
+ omap2_set_globals_sdrc(omap2_globals);
+ omap2_set_globals_control(omap2_globals);
+ omap2_set_globals_prcm(omap2_globals);
+}
+
+#endif
+
+#if defined(CONFIG_ARCH_OMAP2420)
+
+static struct omap_globals omap242x_globals = {
+ .class = OMAP242X_CLASS,
+ .tap = OMAP2_L4_IO_ADDRESS(0x48014000),
+ .sdrc = OMAP2420_SDRC_BASE,
+ .sms = OMAP2420_SMS_BASE,
+ .ctrl = OMAP242X_CTRL_BASE,
+ .prm = OMAP2420_PRM_BASE,
+ .cm = OMAP2420_CM_BASE,
+ .uart1_phys = OMAP2_UART1_BASE,
+ .uart2_phys = OMAP2_UART2_BASE,
+ .uart3_phys = OMAP2_UART3_BASE,
+};
+
+void __init omap2_set_globals_242x(void)
+{
+ __omap2_set_globals(&omap242x_globals);
+}
+#endif
+
+#if defined(CONFIG_ARCH_OMAP2430)
+
+static struct omap_globals omap243x_globals = {
+ .class = OMAP243X_CLASS,
+ .tap = OMAP2_L4_IO_ADDRESS(0x4900a000),
+ .sdrc = OMAP243X_SDRC_BASE,
+ .sms = OMAP243X_SMS_BASE,
+ .ctrl = OMAP243X_CTRL_BASE,
+ .prm = OMAP2430_PRM_BASE,
+ .cm = OMAP2430_CM_BASE,
+ .uart1_phys = OMAP2_UART1_BASE,
+ .uart2_phys = OMAP2_UART2_BASE,
+ .uart3_phys = OMAP2_UART3_BASE,
+};
+
+void __init omap2_set_globals_243x(void)
+{
+ __omap2_set_globals(&omap243x_globals);
+}
+#endif
+
+#if defined(CONFIG_ARCH_OMAP3)
+
+static struct omap_globals omap3_globals = {
+ .class = OMAP343X_CLASS,
+ .tap = OMAP2_L4_IO_ADDRESS(0x4830A000),
+ .sdrc = OMAP343X_SDRC_BASE,
+ .sms = OMAP343X_SMS_BASE,
+ .ctrl = OMAP343X_CTRL_BASE,
+ .prm = OMAP3430_PRM_BASE,
+ .cm = OMAP3430_CM_BASE,
+ .uart1_phys = OMAP3_UART1_BASE,
+ .uart2_phys = OMAP3_UART2_BASE,
+ .uart3_phys = OMAP3_UART3_BASE,
+ .uart4_phys = OMAP3_UART4_BASE, /* Only on 3630 */
+};
+
+void __init omap2_set_globals_3xxx(void)
+{
+ __omap2_set_globals(&omap3_globals);
+}
+
+void __init omap3_map_io(void)
+{
+ omap2_set_globals_3xxx();
+ omap34xx_map_common_io();
+}
+#endif
+
+#if defined(CONFIG_ARCH_OMAP4)
+static struct omap_globals omap4_globals = {
+ .class = OMAP443X_CLASS,
+ .tap = OMAP2_L4_IO_ADDRESS(OMAP443X_SCM_BASE),
+ .ctrl = OMAP443X_SCM_BASE,
+ .ctrl_pad = OMAP443X_CTRL_BASE,
+ .prm = OMAP4430_PRM_BASE,
+ .cm = OMAP4430_CM_BASE,
+ .cm2 = OMAP4430_CM2_BASE,
+ .uart1_phys = OMAP4_UART1_BASE,
+ .uart2_phys = OMAP4_UART2_BASE,
+ .uart3_phys = OMAP4_UART3_BASE,
+ .uart4_phys = OMAP4_UART4_BASE,
+};
+
+void __init omap2_set_globals_443x(void)
+{
+ omap2_set_globals_tap(&omap4_globals);
+ omap2_set_globals_control(&omap4_globals);
+ omap2_set_globals_prcm(&omap4_globals);
+}
+#endif
+
diff --git a/arch/arm/mach-omap2/control.c b/arch/arm/mach-omap2/control.c
index a8d20eef2306..1fa3294b6048 100644
--- a/arch/arm/mach-omap2/control.c
+++ b/arch/arm/mach-omap2/control.c
@@ -16,15 +16,18 @@
#include <linux/io.h>
#include <plat/common.h>
-#include <plat/control.h>
#include <plat/sdrc.h>
+
#include "cm-regbits-34xx.h"
#include "prm-regbits-34xx.h"
#include "cm.h"
#include "prm.h"
#include "sdrc.h"
+#include "pm.h"
+#include "control.h"
static void __iomem *omap2_ctrl_base;
+static void __iomem *omap4_ctrl_pad_base;
#if defined(CONFIG_ARCH_OMAP3) && defined(CONFIG_PM)
struct omap3_scratchpad {
@@ -137,6 +140,7 @@ static struct omap3_control_regs control_context;
#endif /* CONFIG_ARCH_OMAP3 && CONFIG_PM */
#define OMAP_CTRL_REGADDR(reg) (omap2_ctrl_base + (reg))
+#define OMAP4_CTRL_PAD_REGADDR(reg) (omap4_ctrl_pad_base + (reg))
void __init omap2_set_globals_control(struct omap_globals *omap2_globals)
{
@@ -145,6 +149,12 @@ void __init omap2_set_globals_control(struct omap_globals *omap2_globals)
omap2_ctrl_base = ioremap(omap2_globals->ctrl, SZ_4K);
WARN_ON(!omap2_ctrl_base);
}
+
+ /* Static mapping, never released */
+ if (omap2_globals->ctrl_pad) {
+ omap4_ctrl_pad_base = ioremap(omap2_globals->ctrl_pad, SZ_4K);
+ WARN_ON(!omap4_ctrl_pad_base);
+ }
}
void __iomem *omap_ctrl_base_get(void)
@@ -182,6 +192,23 @@ void omap_ctrl_writel(u32 val, u16 offset)
__raw_writel(val, OMAP_CTRL_REGADDR(offset));
}
+/*
+ * On OMAP4 control pad are not addressable from control
+ * core base. So the common omap_ctrl_read/write APIs breaks
+ * Hence export separate APIs to manage the omap4 pad control
+ * registers. This APIs will work only for OMAP4
+ */
+
+u32 omap4_ctrl_pad_readl(u16 offset)
+{
+ return __raw_readl(OMAP4_CTRL_PAD_REGADDR(offset));
+}
+
+void omap4_ctrl_pad_writel(u32 val, u16 offset)
+{
+ __raw_writel(val, OMAP4_CTRL_PAD_REGADDR(offset));
+}
+
#if defined(CONFIG_ARCH_OMAP3) && defined(CONFIG_PM)
/*
* Clears the scratchpad contents in case of cold boot-
@@ -190,7 +217,7 @@ void omap_ctrl_writel(u32 val, u16 offset)
void omap3_clear_scratchpad_contents(void)
{
u32 max_offset = OMAP343X_SCRATCHPAD_ROM_OFFSET;
- u32 *v_addr;
+ void __iomem *v_addr;
u32 offset = 0;
v_addr = OMAP2_L4_IO_ADDRESS(OMAP343X_SCRATCHPAD_ROM);
if (prm_read_mod_reg(OMAP3430_GR_MOD, OMAP3_PRM_RSTST_OFFSET) &
@@ -206,7 +233,7 @@ void omap3_clear_scratchpad_contents(void)
/* Populate the scratchpad structure with restore structure */
void omap3_save_scratchpad_contents(void)
{
- void * __iomem scratchpad_address;
+ void __iomem *scratchpad_address;
u32 arm_context_addr;
struct omap3_scratchpad scratchpad_contents;
struct omap3_scratchpad_prcm_block prcm_block_contents;
diff --git a/arch/arm/plat-omap/include/plat/control.h b/arch/arm/mach-omap2/control.h
index 131bf405c2f6..b6c6b7c450b3 100644
--- a/arch/arm/plat-omap/include/plat/control.h
+++ b/arch/arm/mach-omap2/control.h
@@ -1,10 +1,10 @@
/*
- * arch/arm/plat-omap/include/mach/control.h
+ * arch/arm/mach-omap2/control.h
*
* OMAP2/3/4 System Control Module definitions
*
- * Copyright (C) 2007-2009 Texas Instruments, Inc.
- * Copyright (C) 2007-2008 Nokia Corporation
+ * Copyright (C) 2007-2010 Texas Instruments, Inc.
+ * Copyright (C) 2007-2008, 2010 Nokia Corporation
*
* Written by Paul Walmsley
*
@@ -13,10 +13,14 @@
* the Free Software Foundation.
*/
-#ifndef __ASM_ARCH_CONTROL_H
-#define __ASM_ARCH_CONTROL_H
+#ifndef __ARCH_ARM_MACH_OMAP2_CONTROL_H
+#define __ARCH_ARM_MACH_OMAP2_CONTROL_H
#include <mach/io.h>
+#include <mach/ctrl_module_core_44xx.h>
+#include <mach/ctrl_module_wkup_44xx.h>
+#include <mach/ctrl_module_pad_core_44xx.h>
+#include <mach/ctrl_module_pad_wkup_44xx.h>
#ifndef __ASSEMBLY__
#define OMAP242X_CTRL_REGADDR(reg) \
@@ -204,12 +208,6 @@
#define OMAP3_PADCONF_SAD2D_MSTANDBY 0x250
#define OMAP3_PADCONF_SAD2D_IDLEACK 0x254
-/* 44xx control status register offset */
-#define OMAP44XX_CONTROL_STATUS 0x2c4
-
-/* 44xx-only CONTROL_GENERAL register offsets */
-#define OMAP44XX_CONTROL_MMC1 0x628
-#define OMAP44XX_CONTROL_PBIAS_LITE 0x600
/*
* REVISIT: This list of registers is not comprehensive - there are more
* that should be added.
@@ -225,6 +223,8 @@
#define OMAP2_MMCSDIO1ADPCLKISEL (1 << 24) /* MMC1 loop back clock */
#define OMAP24XX_USBSTANDBYCTRL (1 << 15)
#define OMAP2_MCBSP2_CLKS_MASK (1 << 6)
+#define OMAP2_MCBSP1_FSR_MASK (1 << 4)
+#define OMAP2_MCBSP1_CLKR_MASK (1 << 3)
#define OMAP2_MCBSP1_CLKS_MASK (1 << 2)
/* CONTROL_DEVCONF1 bits */
@@ -255,23 +255,6 @@
#define OMAP2_PBIASLITEPWRDNZ0 (1 << 1)
#define OMAP2_PBIASLITEVMODE0 (1 << 0)
-/* CONTROL_PBIAS_LITE bits for OMAP4 */
-#define OMAP4_MMC1_PWRDNZ (1 << 26)
-#define OMAP4_MMC1_PBIASLITE_HIZ_MODE (1 << 25)
-#define OMAP4_MMC1_PBIASLITE_SUPPLY_HI_OUT (1 << 24)
-#define OMAP4_MMC1_PBIASLITE_VMODE_ERROR (1 << 23)
-#define OMAP4_MMC1_PBIASLITE_PWRDNZ (1 << 22)
-#define OMAP4_MMC1_PBIASLITE_VMODE (1 << 21)
-#define OMAP4_USBC1_ICUSB_PWRDNZ (1 << 20)
-
-#define OMAP4_CONTROL_SDMMC1_PUSTRENGTHGRP0 (1 << 31)
-#define OMAP4_CONTROL_SDMMC1_PUSTRENGTHGRP1 (1 << 30)
-#define OMAP4_CONTROL_SDMMC1_PUSTRENGTHGRP2 (1 << 29)
-#define OMAP4_CONTROL_SDMMC1_PUSTRENGTHGRP3 (1 << 28)
-#define OMAP4_CONTROL_SDMMC1_DR0_SPEEDCTRL (1 << 27)
-#define OMAP4_CONTROL_SDMMC1_DR1_SPEEDCTRL (1 << 26)
-#define OMAP4_CONTROL_SDMMC1_DR2_SPEEDCTRL (1 << 25)
-
/* CONTROL_PROG_IO1 bits */
#define OMAP3630_PRG_SDMMC1_SPEEDCTRL (1 << 20)
@@ -338,12 +321,12 @@
#define FEAT_L2CACHE_256KB 3
#define OMAP3_ISP_SHIFT 5
-#define OMAP3_ISP_MASK (1<< OMAP3_ISP_SHIFT)
+#define OMAP3_ISP_MASK (1 << OMAP3_ISP_SHIFT)
#define FEAT_ISP 0
#define FEAT_ISP_NONE 1
#define OMAP3_NEON_SHIFT 4
-#define OMAP3_NEON_MASK (1<< OMAP3_NEON_SHIFT)
+#define OMAP3_NEON_MASK (1 << OMAP3_NEON_SHIFT)
#define FEAT_NEON 0
#define FEAT_NEON_NONE 1
@@ -354,9 +337,11 @@ extern void __iomem *omap_ctrl_base_get(void);
extern u8 omap_ctrl_readb(u16 offset);
extern u16 omap_ctrl_readw(u16 offset);
extern u32 omap_ctrl_readl(u16 offset);
+extern u32 omap4_ctrl_pad_readl(u16 offset);
extern void omap_ctrl_writeb(u8 val, u16 offset);
extern void omap_ctrl_writew(u16 val, u16 offset);
extern void omap_ctrl_writel(u32 val, u16 offset);
+extern void omap4_ctrl_pad_writel(u32 val, u16 offset);
extern void omap3_save_scratchpad_contents(void);
extern void omap3_clear_scratchpad_contents(void);
@@ -371,11 +356,13 @@ extern void omap3_control_restore_context(void);
#define omap_ctrl_readb(x) 0
#define omap_ctrl_readw(x) 0
#define omap_ctrl_readl(x) 0
+#define omap4_ctrl_pad_readl(x) 0
#define omap_ctrl_writeb(x, y) WARN_ON(1)
#define omap_ctrl_writew(x, y) WARN_ON(1)
#define omap_ctrl_writel(x, y) WARN_ON(1)
+#define omap4_ctrl_pad_writel(x, y) WARN_ON(1)
#endif
#endif /* __ASSEMBLY__ */
-#endif /* __ASM_ARCH_CONTROL_H */
+#endif /* __ARCH_ARM_MACH_OMAP2_CONTROL_H */
diff --git a/arch/arm/mach-omap2/cpuidle34xx.c b/arch/arm/mach-omap2/cpuidle34xx.c
index 3d3d035db9af..0d50b45d041c 100644
--- a/arch/arm/mach-omap2/cpuidle34xx.c
+++ b/arch/arm/mach-omap2/cpuidle34xx.c
@@ -29,10 +29,10 @@
#include <plat/irqs.h>
#include <plat/powerdomain.h>
#include <plat/clockdomain.h>
-#include <plat/control.h>
#include <plat/serial.h>
#include "pm.h"
+#include "control.h"
#ifdef CONFIG_CPU_IDLE
@@ -60,7 +60,8 @@ struct omap3_processor_cx {
struct omap3_processor_cx omap3_power_states[OMAP3_MAX_STATES];
struct omap3_processor_cx current_cx_state;
-struct powerdomain *mpu_pd, *core_pd;
+struct powerdomain *mpu_pd, *core_pd, *per_pd;
+struct powerdomain *cam_pd;
/*
* The latencies/thresholds for various C states have
@@ -233,14 +234,60 @@ static int omap3_enter_idle_bm(struct cpuidle_device *dev,
struct cpuidle_state *state)
{
struct cpuidle_state *new_state = next_valid_state(dev, state);
+ u32 core_next_state, per_next_state = 0, per_saved_state = 0;
+ u32 cam_state;
+ struct omap3_processor_cx *cx;
+ int ret;
if ((state->flags & CPUIDLE_FLAG_CHECK_BM) && omap3_idle_bm_check()) {
BUG_ON(!dev->safe_state);
new_state = dev->safe_state;
+ goto select_state;
}
+ cx = cpuidle_get_statedata(state);
+ core_next_state = cx->core_state;
+
+ /*
+ * FIXME: we currently manage device-specific idle states
+ * for PER and CORE in combination with CPU-specific
+ * idle states. This is wrong, and device-specific
+ * idle managment needs to be separated out into
+ * its own code.
+ */
+
+ /*
+ * Prevent idle completely if CAM is active.
+ * CAM does not have wakeup capability in OMAP3.
+ */
+ cam_state = pwrdm_read_pwrst(cam_pd);
+ if (cam_state == PWRDM_POWER_ON) {
+ new_state = dev->safe_state;
+ goto select_state;
+ }
+
+ /*
+ * Prevent PER off if CORE is not in retention or off as this
+ * would disable PER wakeups completely.
+ */
+ per_next_state = per_saved_state = pwrdm_read_next_pwrst(per_pd);
+ if ((per_next_state == PWRDM_POWER_OFF) &&
+ (core_next_state > PWRDM_POWER_RET))
+ per_next_state = PWRDM_POWER_RET;
+
+ /* Are we changing PER target state? */
+ if (per_next_state != per_saved_state)
+ pwrdm_set_next_pwrst(per_pd, per_next_state);
+
+select_state:
dev->last_state = new_state;
- return omap3_enter_idle(dev, new_state);
+ ret = omap3_enter_idle(dev, new_state);
+
+ /* Restore original PER state if it was modified */
+ if (per_next_state != per_saved_state)
+ pwrdm_set_next_pwrst(per_pd, per_saved_state);
+
+ return ret;
}
DEFINE_PER_CPU(struct cpuidle_device, omap3_idle_dev);
@@ -328,7 +375,8 @@ void omap_init_power_states(void)
cpuidle_params_table[OMAP3_STATE_C2].threshold;
omap3_power_states[OMAP3_STATE_C2].mpu_state = PWRDM_POWER_ON;
omap3_power_states[OMAP3_STATE_C2].core_state = PWRDM_POWER_ON;
- omap3_power_states[OMAP3_STATE_C2].flags = CPUIDLE_FLAG_TIME_VALID;
+ omap3_power_states[OMAP3_STATE_C2].flags = CPUIDLE_FLAG_TIME_VALID |
+ CPUIDLE_FLAG_CHECK_BM;
/* C3 . MPU CSWR + Core inactive */
omap3_power_states[OMAP3_STATE_C3].valid =
@@ -426,6 +474,8 @@ int __init omap3_idle_init(void)
mpu_pd = pwrdm_lookup("mpu_pwrdm");
core_pd = pwrdm_lookup("core_pwrdm");
+ per_pd = pwrdm_lookup("per_pwrdm");
+ cam_pd = pwrdm_lookup("cam_pwrdm");
omap_init_power_states();
cpuidle_register_driver(&omap3_idle_driver);
diff --git a/arch/arm/mach-omap2/devices.c b/arch/arm/mach-omap2/devices.c
index b27e7cbb3f29..5a0c148e23bc 100644
--- a/arch/arm/mach-omap2/devices.c
+++ b/arch/arm/mach-omap2/devices.c
@@ -9,12 +9,12 @@
* (at your option) any later version.
*/
-#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/platform_device.h>
#include <linux/io.h>
#include <linux/clk.h>
+#include <linux/err.h>
#include <mach/hardware.h>
#include <mach/irqs.h>
@@ -22,14 +22,17 @@
#include <asm/mach/map.h>
#include <asm/pmu.h>
-#include <plat/control.h>
#include <plat/tc.h>
#include <plat/board.h>
+#include <plat/mcbsp.h>
#include <mach/gpio.h>
#include <plat/mmc.h>
#include <plat/dma.h>
+#include <plat/omap_hwmod.h>
+#include <plat/omap_device.h>
#include "mux.h"
+#include "control.h"
#if defined(CONFIG_VIDEO_OMAP2) || defined(CONFIG_VIDEO_OMAP2_MODULE)
@@ -235,6 +238,43 @@ static inline void omap_init_mbox(void) { }
static inline void omap_init_sti(void) {}
+#if defined(CONFIG_SND_SOC) || defined(CONFIG_SND_SOC_MODULE)
+
+static struct platform_device omap_pcm = {
+ .name = "omap-pcm-audio",
+ .id = -1,
+};
+
+/*
+ * OMAP2420 has 2 McBSP ports
+ * OMAP2430 has 5 McBSP ports
+ * OMAP3 has 5 McBSP ports
+ * OMAP4 has 4 McBSP ports
+ */
+OMAP_MCBSP_PLATFORM_DEVICE(1);
+OMAP_MCBSP_PLATFORM_DEVICE(2);
+OMAP_MCBSP_PLATFORM_DEVICE(3);
+OMAP_MCBSP_PLATFORM_DEVICE(4);
+OMAP_MCBSP_PLATFORM_DEVICE(5);
+
+static void omap_init_audio(void)
+{
+ platform_device_register(&omap_mcbsp1);
+ platform_device_register(&omap_mcbsp2);
+ if (cpu_is_omap243x() || cpu_is_omap34xx() || cpu_is_omap44xx()) {
+ platform_device_register(&omap_mcbsp3);
+ platform_device_register(&omap_mcbsp4);
+ }
+ if (cpu_is_omap243x() || cpu_is_omap34xx())
+ platform_device_register(&omap_mcbsp5);
+
+ platform_device_register(&omap_pcm);
+}
+
+#else
+static inline void omap_init_audio(void) {}
+#endif
+
#if defined(CONFIG_SPI_OMAP24XX) || defined(CONFIG_SPI_OMAP24XX_MODULE)
#include <plat/mcspi.h>
@@ -500,7 +540,7 @@ static inline void omap_init_sham(void) { }
#if defined(CONFIG_CRYPTO_DEV_OMAP_AES) || defined(CONFIG_CRYPTO_DEV_OMAP_AES_MODULE)
-#ifdef CONFIG_ARCH_OMAP24XX
+#ifdef CONFIG_ARCH_OMAP2
static struct resource omap2_aes_resources[] = {
{
.start = OMAP24XX_SEC_AES_BASE,
@@ -522,7 +562,7 @@ static int omap2_aes_resources_sz = ARRAY_SIZE(omap2_aes_resources);
#define omap2_aes_resources_sz 0
#endif
-#ifdef CONFIG_ARCH_OMAP34XX
+#ifdef CONFIG_ARCH_OMAP3
static struct resource omap3_aes_resources[] = {
{
.start = OMAP34XX_SEC_AES_BASE,
@@ -694,7 +734,7 @@ static inline void omap2_mmc_mux(struct omap_mmc_platform_data *mmc_controller,
omap_mux_init_signal("sdmmc_dat0", 0);
omap_mux_init_signal("sdmmc_dat_dir0", 0);
omap_mux_init_signal("sdmmc_cmd_dir", 0);
- if (mmc_controller->slots[0].wires == 4) {
+ if (mmc_controller->slots[0].caps & MMC_CAP_4_BIT_DATA) {
omap_mux_init_signal("sdmmc_dat1", 0);
omap_mux_init_signal("sdmmc_dat2", 0);
omap_mux_init_signal("sdmmc_dat3", 0);
@@ -722,8 +762,8 @@ static inline void omap2_mmc_mux(struct omap_mmc_platform_data *mmc_controller,
OMAP_PIN_INPUT_PULLUP);
omap_mux_init_signal("sdmmc1_dat0",
OMAP_PIN_INPUT_PULLUP);
- if (mmc_controller->slots[0].wires == 4 ||
- mmc_controller->slots[0].wires == 8) {
+ if (mmc_controller->slots[0].caps &
+ (MMC_CAP_4_BIT_DATA | MMC_CAP_8_BIT_DATA)) {
omap_mux_init_signal("sdmmc1_dat1",
OMAP_PIN_INPUT_PULLUP);
omap_mux_init_signal("sdmmc1_dat2",
@@ -731,7 +771,8 @@ static inline void omap2_mmc_mux(struct omap_mmc_platform_data *mmc_controller,
omap_mux_init_signal("sdmmc1_dat3",
OMAP_PIN_INPUT_PULLUP);
}
- if (mmc_controller->slots[0].wires == 8) {
+ if (mmc_controller->slots[0].caps &
+ MMC_CAP_8_BIT_DATA) {
omap_mux_init_signal("sdmmc1_dat4",
OMAP_PIN_INPUT_PULLUP);
omap_mux_init_signal("sdmmc1_dat5",
@@ -755,8 +796,8 @@ static inline void omap2_mmc_mux(struct omap_mmc_platform_data *mmc_controller,
* For 8 wire configurations, Lines DAT4, 5, 6 and 7 need to be muxed
* in the board-*.c files
*/
- if (mmc_controller->slots[0].wires == 4 ||
- mmc_controller->slots[0].wires == 8) {
+ if (mmc_controller->slots[0].caps &
+ (MMC_CAP_4_BIT_DATA | MMC_CAP_8_BIT_DATA)) {
omap_mux_init_signal("sdmmc2_dat1",
OMAP_PIN_INPUT_PULLUP);
omap_mux_init_signal("sdmmc2_dat2",
@@ -764,7 +805,8 @@ static inline void omap2_mmc_mux(struct omap_mmc_platform_data *mmc_controller,
omap_mux_init_signal("sdmmc2_dat3",
OMAP_PIN_INPUT_PULLUP);
}
- if (mmc_controller->slots[0].wires == 8) {
+ if (mmc_controller->slots[0].caps &
+ MMC_CAP_8_BIT_DATA) {
omap_mux_init_signal("sdmmc2_dat4.sdmmc2_dat4",
OMAP_PIN_INPUT_PULLUP);
omap_mux_init_signal("sdmmc2_dat5.sdmmc2_dat5",
@@ -815,13 +857,13 @@ void __init omap2_init_mmc(struct omap_mmc_platform_data **mmc_data,
case 3:
if (!cpu_is_omap44xx())
return;
- base = OMAP4_MMC4_BASE + OMAP4_MMC_REG_OFFSET;
+ base = OMAP4_MMC4_BASE;
irq = OMAP44XX_IRQ_MMC4;
break;
case 4:
if (!cpu_is_omap44xx())
return;
- base = OMAP4_MMC5_BASE + OMAP4_MMC_REG_OFFSET;
+ base = OMAP4_MMC5_BASE;
irq = OMAP44XX_IRQ_MMC5;
break;
default:
@@ -832,10 +874,8 @@ void __init omap2_init_mmc(struct omap_mmc_platform_data **mmc_data,
size = OMAP2420_MMC_SIZE;
name = "mmci-omap";
} else if (cpu_is_omap44xx()) {
- if (i < 3) {
- base += OMAP4_MMC_REG_OFFSET;
+ if (i < 3)
irq += OMAP44XX_IRQ_GIC_START;
- }
size = OMAP4_HSMMC_SIZE;
name = "mmci-omap-hs";
} else {
@@ -911,12 +951,74 @@ static inline void omap_init_vout(void) {}
/*-------------------------------------------------------------------------*/
+/*
+ * Inorder to avoid any assumptions from bootloader regarding WDT
+ * settings, WDT module is reset during init. This enables the watchdog
+ * timer. Hence it is required to disable the watchdog after the WDT reset
+ * during init. Otherwise the system would reboot as per the default
+ * watchdog timer registers settings.
+ */
+#define OMAP_WDT_WPS (0x34)
+#define OMAP_WDT_SPR (0x48)
+
+static int omap2_disable_wdt(struct omap_hwmod *oh, void *unused)
+{
+ void __iomem *base;
+ int ret;
+
+ if (!oh) {
+ pr_err("%s: Could not look up wdtimer_hwmod\n", __func__);
+ return -EINVAL;
+ }
+
+ base = omap_hwmod_get_mpu_rt_va(oh);
+ if (!base) {
+ pr_err("%s: Could not get the base address for %s\n",
+ oh->name, __func__);
+ return -EINVAL;
+ }
+
+ /* Enable the clocks before accessing the WDT registers */
+ ret = omap_hwmod_enable(oh);
+ if (ret) {
+ pr_err("%s: Could not enable clocks for %s\n",
+ oh->name, __func__);
+ return ret;
+ }
+
+ /* sequence required to disable watchdog */
+ __raw_writel(0xAAAA, base + OMAP_WDT_SPR);
+ while (__raw_readl(base + OMAP_WDT_WPS) & 0x10)
+ cpu_relax();
+
+ __raw_writel(0x5555, base + OMAP_WDT_SPR);
+ while (__raw_readl(base + OMAP_WDT_WPS) & 0x10)
+ cpu_relax();
+
+ ret = omap_hwmod_idle(oh);
+ if (ret)
+ pr_err("%s: Could not disable clocks for %s\n",
+ oh->name, __func__);
+
+ return ret;
+}
+
+static void __init omap_disable_wdt(void)
+{
+ if (cpu_class_is_omap2())
+ omap_hwmod_for_each_by_class("wd_timer",
+ omap2_disable_wdt, NULL);
+ return;
+}
+
static int __init omap2_init_devices(void)
{
/* please keep these calls, and their implementations above,
* in alphabetical order so they're easier to sort through.
*/
+ omap_disable_wdt();
omap_hsmmc_reset();
+ omap_init_audio();
omap_init_camera();
omap_init_mbox();
omap_init_mcspi();
@@ -930,3 +1032,39 @@ static int __init omap2_init_devices(void)
return 0;
}
arch_initcall(omap2_init_devices);
+
+#if defined(CONFIG_OMAP_WATCHDOG) || defined(CONFIG_OMAP_WATCHDOG_MODULE)
+struct omap_device_pm_latency omap_wdt_latency[] = {
+ [0] = {
+ .deactivate_func = omap_device_idle_hwmods,
+ .activate_func = omap_device_enable_hwmods,
+ .flags = OMAP_DEVICE_LATENCY_AUTO_ADJUST,
+ },
+};
+
+static int __init omap_init_wdt(void)
+{
+ int id = -1;
+ struct omap_device *od;
+ struct omap_hwmod *oh;
+ char *oh_name = "wd_timer2";
+ char *dev_name = "omap_wdt";
+
+ if (!cpu_class_is_omap2())
+ return 0;
+
+ oh = omap_hwmod_lookup(oh_name);
+ if (!oh) {
+ pr_err("Could not look up wd_timer%d hwmod\n", id);
+ return -EINVAL;
+ }
+
+ od = omap_device_build(dev_name, id, oh, NULL, 0,
+ omap_wdt_latency,
+ ARRAY_SIZE(omap_wdt_latency), 0);
+ WARN(IS_ERR(od), "Cant build omap_device for %s:%s.\n",
+ dev_name, oh->name);
+ return 0;
+}
+subsys_initcall(omap_init_wdt);
+#endif
diff --git a/arch/arm/mach-omap2/dsp.c b/arch/arm/mach-omap2/dsp.c
new file mode 100644
index 000000000000..6feeeae6c21b
--- /dev/null
+++ b/arch/arm/mach-omap2/dsp.c
@@ -0,0 +1,85 @@
+/*
+ * TI's OMAP DSP platform device registration
+ *
+ * Copyright (C) 2005-2006 Texas Instruments, Inc.
+ * Copyright (C) 2009 Nokia Corporation
+ *
+ * Written by Hiroshi DOYU <Hiroshi.DOYU@nokia.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/platform_device.h>
+#include "prm.h"
+#include "cm.h"
+#ifdef CONFIG_BRIDGE_DVFS
+#include <plat/omap-pm.h>
+#endif
+
+#include <plat/dsp.h>
+
+extern phys_addr_t omap_dsp_get_mempool_base(void);
+
+static struct platform_device *omap_dsp_pdev;
+
+static struct omap_dsp_platform_data omap_dsp_pdata __initdata = {
+#ifdef CONFIG_BRIDGE_DVFS
+ .dsp_set_min_opp = omap_pm_dsp_set_min_opp,
+ .dsp_get_opp = omap_pm_dsp_get_opp,
+ .cpu_set_freq = omap_pm_cpu_set_freq,
+ .cpu_get_freq = omap_pm_cpu_get_freq,
+#endif
+ .dsp_prm_read = prm_read_mod_reg,
+ .dsp_prm_write = prm_write_mod_reg,
+ .dsp_prm_rmw_bits = prm_rmw_mod_reg_bits,
+ .dsp_cm_read = cm_read_mod_reg,
+ .dsp_cm_write = cm_write_mod_reg,
+ .dsp_cm_rmw_bits = cm_rmw_mod_reg_bits,
+};
+
+static int __init omap_dsp_init(void)
+{
+ struct platform_device *pdev;
+ int err = -ENOMEM;
+ struct omap_dsp_platform_data *pdata = &omap_dsp_pdata;
+
+ pdata->phys_mempool_base = omap_dsp_get_mempool_base();
+
+ if (pdata->phys_mempool_base) {
+ pdata->phys_mempool_size = CONFIG_TIDSPBRIDGE_MEMPOOL_SIZE;
+ pr_info("%s: %x bytes @ %x\n", __func__,
+ pdata->phys_mempool_size, pdata->phys_mempool_base);
+ }
+
+ pdev = platform_device_alloc("omap-dsp", -1);
+ if (!pdev)
+ goto err_out;
+
+ err = platform_device_add_data(pdev, pdata, sizeof(*pdata));
+ if (err)
+ goto err_out;
+
+ err = platform_device_add(pdev);
+ if (err)
+ goto err_out;
+
+ omap_dsp_pdev = pdev;
+ return 0;
+
+err_out:
+ platform_device_put(pdev);
+ return err;
+}
+module_init(omap_dsp_init);
+
+static void __exit omap_dsp_exit(void)
+{
+ platform_device_unregister(omap_dsp_pdev);
+}
+module_exit(omap_dsp_exit);
+
+MODULE_AUTHOR("Hiroshi DOYU");
+MODULE_DESCRIPTION("TI's OMAP DSP platform device registration");
+MODULE_LICENSE("GPL");
diff --git a/arch/arm/mach-omap2/gpmc-smsc911x.c b/arch/arm/mach-omap2/gpmc-smsc911x.c
new file mode 100644
index 000000000000..703f150dd01d
--- /dev/null
+++ b/arch/arm/mach-omap2/gpmc-smsc911x.c
@@ -0,0 +1,113 @@
+/*
+ * linux/arch/arm/mach-omap2/gpmc-smsc911x.c
+ *
+ * Copyright (C) 2009 Li-Pro.Net
+ * Stephan Linz <linz@li-pro.net>
+ *
+ * Modified from linux/arch/arm/mach-omap2/gpmc-smc91x.c
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/kernel.h>
+#include <linux/platform_device.h>
+#include <linux/gpio.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/smsc911x.h>
+
+#include <plat/board.h>
+#include <plat/gpmc.h>
+#include <plat/gpmc-smsc911x.h>
+
+static struct omap_smsc911x_platform_data *gpmc_cfg;
+
+static struct resource gpmc_smsc911x_resources[] = {
+ [0] = {
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct smsc911x_platform_config gpmc_smsc911x_config = {
+ .phy_interface = PHY_INTERFACE_MODE_MII,
+ .irq_polarity = SMSC911X_IRQ_POLARITY_ACTIVE_LOW,
+ .irq_type = SMSC911X_IRQ_TYPE_OPEN_DRAIN,
+ .flags = SMSC911X_USE_16BIT,
+};
+
+static struct platform_device gpmc_smsc911x_device = {
+ .name = "smsc911x",
+ .id = -1,
+ .num_resources = ARRAY_SIZE(gpmc_smsc911x_resources),
+ .resource = gpmc_smsc911x_resources,
+ .dev = {
+ .platform_data = &gpmc_smsc911x_config,
+ },
+};
+
+/*
+ * Initialize smsc911x device connected to the GPMC. Note that we
+ * assume that pin multiplexing is done in the board-*.c file,
+ * or in the bootloader.
+ */
+void __init gpmc_smsc911x_init(struct omap_smsc911x_platform_data *board_data)
+{
+ unsigned long cs_mem_base;
+ int ret;
+
+ gpmc_cfg = board_data;
+
+ if (gpmc_cs_request(gpmc_cfg->cs, SZ_16M, &cs_mem_base) < 0) {
+ printk(KERN_ERR "Failed to request GPMC mem for smsc911x\n");
+ return;
+ }
+
+ gpmc_smsc911x_resources[0].start = cs_mem_base + 0x0;
+ gpmc_smsc911x_resources[0].end = cs_mem_base + 0xff;
+
+ if (gpio_request(gpmc_cfg->gpio_irq, "smsc911x irq") < 0) {
+ printk(KERN_ERR "Failed to request GPIO%d for smsc911x IRQ\n",
+ gpmc_cfg->gpio_irq);
+ goto free1;
+ }
+
+ gpio_direction_input(gpmc_cfg->gpio_irq);
+ gpmc_smsc911x_resources[1].start = gpio_to_irq(gpmc_cfg->gpio_irq);
+ gpmc_smsc911x_resources[1].flags |=
+ (gpmc_cfg->flags & IRQF_TRIGGER_MASK);
+
+ if (gpio_is_valid(gpmc_cfg->gpio_reset)) {
+ ret = gpio_request(gpmc_cfg->gpio_reset, "smsc911x reset");
+ if (ret) {
+ printk(KERN_ERR "Failed to request GPIO%d for smsc911x reset\n",
+ gpmc_cfg->gpio_reset);
+ goto free2;
+ }
+
+ gpio_direction_output(gpmc_cfg->gpio_reset, 1);
+ gpio_set_value(gpmc_cfg->gpio_reset, 0);
+ msleep(100);
+ gpio_set_value(gpmc_cfg->gpio_reset, 1);
+ }
+
+ if (platform_device_register(&gpmc_smsc911x_device) < 0) {
+ printk(KERN_ERR "Unable to register smsc911x device\n");
+ gpio_free(gpmc_cfg->gpio_reset);
+ goto free2;
+ }
+
+ return;
+
+free2:
+ gpio_free(gpmc_cfg->gpio_irq);
+free1:
+ gpmc_cs_free(gpmc_cfg->cs);
+
+ printk(KERN_ERR "Could not initialize smsc911x\n");
+}
diff --git a/arch/arm/mach-omap2/hsmmc.c b/arch/arm/mach-omap2/hsmmc.c
index c8f647b6205e..34272e4863fd 100644
--- a/arch/arm/mach-omap2/hsmmc.c
+++ b/arch/arm/mach-omap2/hsmmc.c
@@ -14,11 +14,11 @@
#include <linux/string.h>
#include <linux/delay.h>
#include <mach/hardware.h>
-#include <plat/control.h>
#include <plat/mmc.h>
#include <plat/omap-pm.h>
#include "hsmmc.h"
+#include "control.h"
#if defined(CONFIG_MMC_OMAP_HS) || defined(CONFIG_MMC_OMAP_HS_MODULE)
@@ -135,10 +135,11 @@ static void omap4_hsmmc1_before_set_reg(struct device *dev, int slot,
*
* FIXME handle VMMC1A as needed ...
*/
- reg = omap_ctrl_readl(control_pbias_offset);
- reg &= ~(OMAP4_MMC1_PBIASLITE_PWRDNZ | OMAP4_MMC1_PWRDNZ |
- OMAP4_USBC1_ICUSB_PWRDNZ);
- omap_ctrl_writel(reg, control_pbias_offset);
+ reg = omap4_ctrl_pad_readl(control_pbias_offset);
+ reg &= ~(OMAP4_MMC1_PBIASLITE_PWRDNZ_MASK |
+ OMAP4_MMC1_PWRDNZ_MASK |
+ OMAP4_USBC1_ICUSB_PWRDNZ_MASK);
+ omap4_ctrl_pad_writel(reg, control_pbias_offset);
}
static void omap4_hsmmc1_after_set_reg(struct device *dev, int slot,
@@ -147,30 +148,33 @@ static void omap4_hsmmc1_after_set_reg(struct device *dev, int slot,
u32 reg;
if (power_on) {
- reg = omap_ctrl_readl(control_pbias_offset);
- reg |= OMAP4_MMC1_PBIASLITE_PWRDNZ;
+ reg = omap4_ctrl_pad_readl(control_pbias_offset);
+ reg |= OMAP4_MMC1_PBIASLITE_PWRDNZ_MASK;
if ((1 << vdd) <= MMC_VDD_165_195)
- reg &= ~OMAP4_MMC1_PBIASLITE_VMODE;
+ reg &= ~OMAP4_MMC1_PBIASLITE_VMODE_MASK;
else
- reg |= OMAP4_MMC1_PBIASLITE_VMODE;
- reg |= (OMAP4_MMC1_PBIASLITE_PWRDNZ | OMAP4_MMC1_PWRDNZ |
- OMAP4_USBC1_ICUSB_PWRDNZ);
- omap_ctrl_writel(reg, control_pbias_offset);
+ reg |= OMAP4_MMC1_PBIASLITE_VMODE_MASK;
+ reg |= (OMAP4_MMC1_PBIASLITE_PWRDNZ_MASK |
+ OMAP4_MMC1_PWRDNZ_MASK |
+ OMAP4_USBC1_ICUSB_PWRDNZ_MASK);
+ omap4_ctrl_pad_writel(reg, control_pbias_offset);
/* 4 microsec delay for comparator to generate an error*/
udelay(4);
- reg = omap_ctrl_readl(control_pbias_offset);
- if (reg & OMAP4_MMC1_PBIASLITE_VMODE_ERROR) {
+ reg = omap4_ctrl_pad_readl(control_pbias_offset);
+ if (reg & OMAP4_MMC1_PBIASLITE_VMODE_ERROR_MASK) {
pr_err("Pbias Voltage is not same as LDO\n");
/* Caution : On VMODE_ERROR Power Down MMC IO */
- reg &= ~(OMAP4_MMC1_PWRDNZ | OMAP4_USBC1_ICUSB_PWRDNZ);
- omap_ctrl_writel(reg, control_pbias_offset);
+ reg &= ~(OMAP4_MMC1_PWRDNZ_MASK |
+ OMAP4_USBC1_ICUSB_PWRDNZ_MASK);
+ omap4_ctrl_pad_writel(reg, control_pbias_offset);
}
} else {
- reg = omap_ctrl_readl(control_pbias_offset);
- reg |= (OMAP4_MMC1_PBIASLITE_PWRDNZ |
- OMAP4_MMC1_PBIASLITE_VMODE | OMAP4_MMC1_PWRDNZ |
- OMAP4_USBC1_ICUSB_PWRDNZ);
- omap_ctrl_writel(reg, control_pbias_offset);
+ reg = omap4_ctrl_pad_readl(control_pbias_offset);
+ reg |= (OMAP4_MMC1_PBIASLITE_PWRDNZ_MASK |
+ OMAP4_MMC1_PWRDNZ_MASK |
+ OMAP4_MMC1_PBIASLITE_VMODE_MASK |
+ OMAP4_USBC1_ICUSB_PWRDNZ_MASK);
+ omap4_ctrl_pad_writel(reg, control_pbias_offset);
}
}
@@ -218,17 +222,18 @@ void __init omap2_hsmmc_init(struct omap2_hsmmc_info *controllers)
control_devconf1_offset = OMAP343X_CONTROL_DEVCONF1;
}
} else {
- control_pbias_offset = OMAP44XX_CONTROL_PBIAS_LITE;
- control_mmc1 = OMAP44XX_CONTROL_MMC1;
- reg = omap_ctrl_readl(control_mmc1);
- reg |= (OMAP4_CONTROL_SDMMC1_PUSTRENGTHGRP0 |
- OMAP4_CONTROL_SDMMC1_PUSTRENGTHGRP1);
- reg &= ~(OMAP4_CONTROL_SDMMC1_PUSTRENGTHGRP2 |
- OMAP4_CONTROL_SDMMC1_PUSTRENGTHGRP3);
- reg |= (OMAP4_CONTROL_SDMMC1_DR0_SPEEDCTRL |
- OMAP4_CONTROL_SDMMC1_DR1_SPEEDCTRL |
- OMAP4_CONTROL_SDMMC1_DR2_SPEEDCTRL);
- omap_ctrl_writel(reg, control_mmc1);
+ control_pbias_offset =
+ OMAP4_CTRL_MODULE_PAD_CORE_CONTROL_PBIASLITE;
+ control_mmc1 = OMAP4_CTRL_MODULE_PAD_CORE_CONTROL_MMC1;
+ reg = omap4_ctrl_pad_readl(control_mmc1);
+ reg |= (OMAP4_SDMMC1_PUSTRENGTH_GRP0_MASK |
+ OMAP4_SDMMC1_PUSTRENGTH_GRP1_MASK);
+ reg &= ~(OMAP4_SDMMC1_PUSTRENGTH_GRP2_MASK |
+ OMAP4_SDMMC1_PUSTRENGTH_GRP3_MASK);
+ reg |= (OMAP4_USBC1_DR0_SPEEDCTRL_MASK|
+ OMAP4_SDMMC1_DR1_SPEEDCTRL_MASK |
+ OMAP4_SDMMC1_DR2_SPEEDCTRL_MASK);
+ omap4_ctrl_pad_writel(reg, control_mmc1);
}
for (c = controllers; c->mmc; c++) {
@@ -258,9 +263,13 @@ void __init omap2_hsmmc_init(struct omap2_hsmmc_info *controllers)
"mmc%islot%i", c->mmc, 1);
mmc->slots[0].name = hc->name;
mmc->nr_slots = 1;
- mmc->slots[0].wires = c->wires;
+ mmc->slots[0].caps = c->caps;
mmc->slots[0].internal_clock = !c->ext_clock;
mmc->dma_mask = 0xffffffff;
+ if (cpu_is_omap44xx())
+ mmc->reg_offset = OMAP4_MMC_REG_OFFSET;
+ else
+ mmc->reg_offset = 0;
mmc->get_context_loss_count = hsmmc_get_context_loss;
@@ -298,6 +307,9 @@ void __init omap2_hsmmc_init(struct omap2_hsmmc_info *controllers)
else
mmc->slots[0].features |= HSMMC_HAS_PBIAS;
+ if (cpu_is_omap44xx() && (omap_rev() > OMAP4430_REV_ES1_0))
+ mmc->slots[0].features |= HSMMC_HAS_UPDATED_RESET;
+
switch (c->mmc) {
case 1:
if (mmc->slots[0].features & HSMMC_HAS_PBIAS) {
@@ -316,16 +328,20 @@ void __init omap2_hsmmc_init(struct omap2_hsmmc_info *controllers)
}
/* Omap3630 HSMMC1 supports only 4-bit */
- if (cpu_is_omap3630() && c->wires > 4) {
- c->wires = 4;
- mmc->slots[0].wires = c->wires;
+ if (cpu_is_omap3630() &&
+ (c->caps & MMC_CAP_8_BIT_DATA)) {
+ c->caps &= ~MMC_CAP_8_BIT_DATA;
+ c->caps |= MMC_CAP_4_BIT_DATA;
+ mmc->slots[0].caps = c->caps;
}
break;
case 2:
if (c->ext_clock)
c->transceiver = 1;
- if (c->transceiver && c->wires > 4)
- c->wires = 4;
+ if (c->transceiver && (c->caps & MMC_CAP_8_BIT_DATA)) {
+ c->caps &= ~MMC_CAP_8_BIT_DATA;
+ c->caps |= MMC_CAP_4_BIT_DATA;
+ }
/* FALLTHROUGH */
case 3:
if (mmc->slots[0].features & HSMMC_HAS_PBIAS) {
diff --git a/arch/arm/mach-omap2/hsmmc.h b/arch/arm/mach-omap2/hsmmc.h
index 0f8a2e6ee284..f119348827d4 100644
--- a/arch/arm/mach-omap2/hsmmc.h
+++ b/arch/arm/mach-omap2/hsmmc.h
@@ -10,7 +10,8 @@ struct mmc_card;
struct omap2_hsmmc_info {
u8 mmc; /* controller 1/2/3 */
- u8 wires; /* 1/4/8 wires */
+ u32 caps; /* 4/8 wires and any additional host
+ * capabilities OR'd (ref. linux/mmc/host.h) */
bool transceiver; /* MMC-2 option */
bool ext_clock; /* use external pin for input clock */
bool cover_only; /* No card detect - just cover switch */
diff --git a/arch/arm/mach-omap2/id.c b/arch/arm/mach-omap2/id.c
index 9a879f959509..5f9086c65e48 100644
--- a/arch/arm/mach-omap2/id.c
+++ b/arch/arm/mach-omap2/id.c
@@ -22,11 +22,12 @@
#include <asm/cputype.h>
#include <plat/common.h>
-#include <plat/control.h>
#include <plat/cpu.h>
#include <mach/id.h>
+#include "control.h"
+
static struct omap_chip_id omap_chip;
static unsigned int omap_revision;
@@ -60,7 +61,7 @@ int omap_type(void)
} else if (cpu_is_omap34xx()) {
val = omap_ctrl_readl(OMAP343X_CONTROL_STATUS);
} else if (cpu_is_omap44xx()) {
- val = omap_ctrl_readl(OMAP44XX_CONTROL_STATUS);
+ val = omap_ctrl_readl(OMAP4_CTRL_MODULE_CORE_STATUS);
} else {
pr_err("Cannot detect omap type!\n");
goto out;
@@ -298,7 +299,6 @@ static void __init omap4_check_revision(void)
u32 idcode;
u16 hawkeye;
u8 rev;
- char *rev_name = "ES1.0";
/*
* The IC rev detection is done with hawkeye and rev.
@@ -309,14 +309,39 @@ static void __init omap4_check_revision(void)
hawkeye = (idcode >> 12) & 0xffff;
rev = (idcode >> 28) & 0xff;
- if ((hawkeye == 0xb852) && (rev == 0x0)) {
- omap_revision = OMAP4430_REV_ES1_0;
- omap_chip.oc |= CHIP_IS_OMAP4430ES1;
- pr_info("OMAP%04x %s\n", omap_rev() >> 16, rev_name);
- return;
+ /*
+ * Few initial ES2.0 samples IDCODE is same as ES1.0
+ * Use ARM register to detect the correct ES version
+ */
+ if (!rev) {
+ idcode = read_cpuid(CPUID_ID);
+ rev = (idcode & 0xf) - 1;
+ }
+
+ switch (hawkeye) {
+ case 0xb852:
+ switch (rev) {
+ case 0:
+ omap_revision = OMAP4430_REV_ES1_0;
+ omap_chip.oc |= CHIP_IS_OMAP4430ES1;
+ break;
+ case 1:
+ omap_revision = OMAP4430_REV_ES2_0;
+ omap_chip.oc |= CHIP_IS_OMAP4430ES2;
+ break;
+ default:
+ omap_revision = OMAP4430_REV_ES2_0;
+ omap_chip.oc |= CHIP_IS_OMAP4430ES2;
+ }
+ break;
+ default:
+ /* Unknown default to latest silicon rev as default*/
+ omap_revision = OMAP4430_REV_ES2_0;
+ omap_chip.oc |= CHIP_IS_OMAP4430ES2;
}
- pr_err("Unknown OMAP4 CPU id\n");
+ pr_info("OMAP%04x ES%d.0\n",
+ omap_rev() >> 16, ((omap_rev() >> 12) & 0xf) + 1);
}
#define OMAP3_SHOW_FEATURE(feat) \
@@ -361,30 +386,54 @@ static void __init omap3_cpuinfo(void)
strcpy(cpu_name, "OMAP3503");
}
- switch (rev) {
- case OMAP_REVBITS_00:
- strcpy(cpu_rev, "1.0");
- break;
- case OMAP_REVBITS_01:
- strcpy(cpu_rev, "1.1");
- break;
- case OMAP_REVBITS_02:
- strcpy(cpu_rev, "1.2");
- break;
- case OMAP_REVBITS_10:
- strcpy(cpu_rev, "2.0");
- break;
- case OMAP_REVBITS_20:
- strcpy(cpu_rev, "2.1");
- break;
- case OMAP_REVBITS_30:
- strcpy(cpu_rev, "3.0");
- break;
- case OMAP_REVBITS_40:
- /* FALLTHROUGH */
- default:
- /* Use the latest known revision as default */
- strcpy(cpu_rev, "3.1");
+ if (cpu_is_omap3630()) {
+ switch (rev) {
+ case OMAP_REVBITS_00:
+ strcpy(cpu_rev, "1.0");
+ break;
+ case OMAP_REVBITS_01:
+ strcpy(cpu_rev, "1.1");
+ break;
+ case OMAP_REVBITS_02:
+ /* FALLTHROUGH */
+ default:
+ /* Use the latest known revision as default */
+ strcpy(cpu_rev, "1.2");
+ }
+ } else if (cpu_is_omap3505() || cpu_is_omap3517()) {
+ switch (rev) {
+ case OMAP_REVBITS_00:
+ strcpy(cpu_rev, "1.0");
+ break;
+ case OMAP_REVBITS_01:
+ /* FALLTHROUGH */
+ default:
+ /* Use the latest known revision as default */
+ strcpy(cpu_rev, "1.1");
+ }
+ } else {
+ switch (rev) {
+ case OMAP_REVBITS_00:
+ strcpy(cpu_rev, "1.0");
+ break;
+ case OMAP_REVBITS_01:
+ strcpy(cpu_rev, "2.0");
+ break;
+ case OMAP_REVBITS_02:
+ strcpy(cpu_rev, "2.1");
+ break;
+ case OMAP_REVBITS_03:
+ strcpy(cpu_rev, "3.0");
+ break;
+ case OMAP_REVBITS_04:
+ strcpy(cpu_rev, "3.1");
+ break;
+ case OMAP_REVBITS_05:
+ /* FALLTHROUGH */
+ default:
+ /* Use the latest known revision as default */
+ strcpy(cpu_rev, "3.1.2");
+ }
}
/* Print verbose information */
diff --git a/arch/arm/mach-omap2/include/mach/board-rx51.h b/arch/arm/mach-omap2/include/mach/board-rx51.h
new file mode 100644
index 000000000000..b76f49e7eed5
--- /dev/null
+++ b/arch/arm/mach-omap2/include/mach/board-rx51.h
@@ -0,0 +1,11 @@
+/*
+ * Defines for rx51 boards
+ */
+
+#ifndef _OMAP_BOARD_RX51_H
+#define _OMAP_BOARD_RX51_H
+
+extern void __init rx51_peripherals_init(void);
+extern void __init rx51_video_mem_init(void);
+
+#endif
diff --git a/arch/arm/mach-omap2/include/mach/board-zoom.h b/arch/arm/mach-omap2/include/mach/board-zoom.h
index 3af69d2c3dcd..f93ca3928c3b 100644
--- a/arch/arm/mach-omap2/include/mach/board-zoom.h
+++ b/arch/arm/mach-omap2/include/mach/board-zoom.h
@@ -1,11 +1,9 @@
/*
* Defines for zoom boards
*/
-#include <linux/mtd/mtd.h>
-#include <linux/mtd/partitions.h>
-
#define ZOOM_NAND_CS 0
-extern void __init board_nand_init(struct mtd_partition *, u8 nr_parts, u8 cs);
extern int __init zoom_debugboard_init(void);
extern void __init zoom_peripherals_init(void);
+
+#define ZOOM2_HEADSET_EXTMUTE_GPIO 153
diff --git a/arch/arm/mach-omap2/include/mach/ctrl_module_core_44xx.h b/arch/arm/mach-omap2/include/mach/ctrl_module_core_44xx.h
new file mode 100644
index 000000000000..2f7ac70a20d8
--- /dev/null
+++ b/arch/arm/mach-omap2/include/mach/ctrl_module_core_44xx.h
@@ -0,0 +1,391 @@
+/*
+ * OMAP44xx CTRL_MODULE_CORE registers and bitfields
+ *
+ * Copyright (C) 2009-2010 Texas Instruments, Inc.
+ *
+ * Benoit Cousson (b-cousson@ti.com)
+ * Santosh Shilimkar (santosh.shilimkar@ti.com)
+ *
+ * This file is automatically generated from the OMAP hardware databases.
+ * We respectfully ask that any modifications to this file be coordinated
+ * with the public linux-omap@vger.kernel.org mailing list and the
+ * authors above to ensure that the autogeneration scripts are kept
+ * up-to-date with the file contents.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __ARCH_ARM_MACH_OMAP2_CTRL_MODULE_CORE_44XX_H
+#define __ARCH_ARM_MACH_OMAP2_CTRL_MODULE_CORE_44XX_H
+
+
+/* Base address */
+#define OMAP4_CTRL_MODULE_CORE 0x4a002000
+
+/* Registers offset */
+#define OMAP4_CTRL_MODULE_CORE_IP_REVISION 0x0000
+#define OMAP4_CTRL_MODULE_CORE_IP_HWINFO 0x0004
+#define OMAP4_CTRL_MODULE_CORE_IP_SYSCONFIG 0x0010
+#define OMAP4_CTRL_MODULE_CORE_STD_FUSE_DIE_ID_0 0x0200
+#define OMAP4_CTRL_MODULE_CORE_ID_CODE 0x0204
+#define OMAP4_CTRL_MODULE_CORE_STD_FUSE_DIE_ID_1 0x0208
+#define OMAP4_CTRL_MODULE_CORE_STD_FUSE_DIE_ID_2 0x020c
+#define OMAP4_CTRL_MODULE_CORE_STD_FUSE_DIE_ID_3 0x0210
+#define OMAP4_CTRL_MODULE_CORE_STD_FUSE_PROD_ID_0 0x0214
+#define OMAP4_CTRL_MODULE_CORE_STD_FUSE_PROD_ID_1 0x0218
+#define OMAP4_CTRL_MODULE_CORE_STD_FUSE_USB_CONF 0x021c
+#define OMAP4_CTRL_MODULE_CORE_STD_FUSE_OPP_VDD_WKUP 0x0228
+#define OMAP4_CTRL_MODULE_CORE_STD_FUSE_OPP_BGAP 0x0260
+#define OMAP4_CTRL_MODULE_CORE_STD_FUSE_OPP_DPLL_0 0x0264
+#define OMAP4_CTRL_MODULE_CORE_STD_FUSE_OPP_DPLL_1 0x0268
+#define OMAP4_CTRL_MODULE_CORE_STATUS 0x02c4
+#define OMAP4_CTRL_MODULE_CORE_DEV_CONF 0x0300
+#define OMAP4_CTRL_MODULE_CORE_LDOVBB_IVA_VOLTAGE_CTRL 0x0314
+#define OMAP4_CTRL_MODULE_CORE_LDOVBB_MPU_VOLTAGE_CTRL 0x0318
+#define OMAP4_CTRL_MODULE_CORE_LDOSRAM_IVA_VOLTAGE_CTRL 0x0320
+#define OMAP4_CTRL_MODULE_CORE_LDOSRAM_MPU_VOLTAGE_CTRL 0x0324
+#define OMAP4_CTRL_MODULE_CORE_LDOSRAM_CORE_VOLTAGE_CTRL 0x0328
+#define OMAP4_CTRL_MODULE_CORE_TEMP_SENSOR 0x032c
+#define OMAP4_CTRL_MODULE_CORE_DPLL_NWELL_TRIM_0 0x0330
+#define OMAP4_CTRL_MODULE_CORE_DPLL_NWELL_TRIM_1 0x0334
+#define OMAP4_CTRL_MODULE_CORE_USBOTGHS_CONTROL 0x033c
+#define OMAP4_CTRL_MODULE_CORE_DSS_CONTROL 0x0340
+#define OMAP4_CTRL_MODULE_CORE_HWOBS_CONTROL 0x0350
+#define OMAP4_CTRL_MODULE_CORE_DEBOBS_FINAL_MUX_SEL 0x0400
+#define OMAP4_CTRL_MODULE_CORE_DEBOBS_MMR_MPU 0x0408
+#define OMAP4_CTRL_MODULE_CORE_CONF_SDMA_REQ_SEL0 0x042c
+#define OMAP4_CTRL_MODULE_CORE_CONF_SDMA_REQ_SEL1 0x0430
+#define OMAP4_CTRL_MODULE_CORE_CONF_SDMA_REQ_SEL2 0x0434
+#define OMAP4_CTRL_MODULE_CORE_CONF_SDMA_REQ_SEL3 0x0438
+#define OMAP4_CTRL_MODULE_CORE_CONF_CLK_SEL0 0x0440
+#define OMAP4_CTRL_MODULE_CORE_CONF_CLK_SEL1 0x0444
+#define OMAP4_CTRL_MODULE_CORE_CONF_CLK_SEL2 0x0448
+#define OMAP4_CTRL_MODULE_CORE_CONF_DPLL_FREQLOCK_SEL 0x044c
+#define OMAP4_CTRL_MODULE_CORE_CONF_DPLL_TINITZ_SEL 0x0450
+#define OMAP4_CTRL_MODULE_CORE_CONF_DPLL_PHASELOCK_SEL 0x0454
+#define OMAP4_CTRL_MODULE_CORE_CONF_DEBUG_SEL_TST_0 0x0480
+#define OMAP4_CTRL_MODULE_CORE_CONF_DEBUG_SEL_TST_1 0x0484
+#define OMAP4_CTRL_MODULE_CORE_CONF_DEBUG_SEL_TST_2 0x0488
+#define OMAP4_CTRL_MODULE_CORE_CONF_DEBUG_SEL_TST_3 0x048c
+#define OMAP4_CTRL_MODULE_CORE_CONF_DEBUG_SEL_TST_4 0x0490
+#define OMAP4_CTRL_MODULE_CORE_CONF_DEBUG_SEL_TST_5 0x0494
+#define OMAP4_CTRL_MODULE_CORE_CONF_DEBUG_SEL_TST_6 0x0498
+#define OMAP4_CTRL_MODULE_CORE_CONF_DEBUG_SEL_TST_7 0x049c
+#define OMAP4_CTRL_MODULE_CORE_CONF_DEBUG_SEL_TST_8 0x04a0
+#define OMAP4_CTRL_MODULE_CORE_CONF_DEBUG_SEL_TST_9 0x04a4
+#define OMAP4_CTRL_MODULE_CORE_CONF_DEBUG_SEL_TST_10 0x04a8
+#define OMAP4_CTRL_MODULE_CORE_CONF_DEBUG_SEL_TST_11 0x04ac
+#define OMAP4_CTRL_MODULE_CORE_CONF_DEBUG_SEL_TST_12 0x04b0
+#define OMAP4_CTRL_MODULE_CORE_CONF_DEBUG_SEL_TST_13 0x04b4
+#define OMAP4_CTRL_MODULE_CORE_CONF_DEBUG_SEL_TST_14 0x04b8
+#define OMAP4_CTRL_MODULE_CORE_CONF_DEBUG_SEL_TST_15 0x04bc
+#define OMAP4_CTRL_MODULE_CORE_CONF_DEBUG_SEL_TST_16 0x04c0
+#define OMAP4_CTRL_MODULE_CORE_CONF_DEBUG_SEL_TST_17 0x04c4
+#define OMAP4_CTRL_MODULE_CORE_CONF_DEBUG_SEL_TST_18 0x04c8
+#define OMAP4_CTRL_MODULE_CORE_CONF_DEBUG_SEL_TST_19 0x04cc
+#define OMAP4_CTRL_MODULE_CORE_CONF_DEBUG_SEL_TST_20 0x04d0
+#define OMAP4_CTRL_MODULE_CORE_CONF_DEBUG_SEL_TST_21 0x04d4
+#define OMAP4_CTRL_MODULE_CORE_CONF_DEBUG_SEL_TST_22 0x04d8
+#define OMAP4_CTRL_MODULE_CORE_CONF_DEBUG_SEL_TST_23 0x04dc
+#define OMAP4_CTRL_MODULE_CORE_CONF_DEBUG_SEL_TST_24 0x04e0
+#define OMAP4_CTRL_MODULE_CORE_CONF_DEBUG_SEL_TST_25 0x04e4
+#define OMAP4_CTRL_MODULE_CORE_CONF_DEBUG_SEL_TST_26 0x04e8
+#define OMAP4_CTRL_MODULE_CORE_CONF_DEBUG_SEL_TST_27 0x04ec
+#define OMAP4_CTRL_MODULE_CORE_CONF_DEBUG_SEL_TST_28 0x04f0
+#define OMAP4_CTRL_MODULE_CORE_CONF_DEBUG_SEL_TST_29 0x04f4
+#define OMAP4_CTRL_MODULE_CORE_CONF_DEBUG_SEL_TST_30 0x04f8
+#define OMAP4_CTRL_MODULE_CORE_CONF_DEBUG_SEL_TST_31 0x04fc
+
+/* Registers shifts and masks */
+
+/* IP_REVISION */
+#define OMAP4_IP_REV_SCHEME_SHIFT 30
+#define OMAP4_IP_REV_SCHEME_MASK (0x3 << 30)
+#define OMAP4_IP_REV_FUNC_SHIFT 16
+#define OMAP4_IP_REV_FUNC_MASK (0xfff << 16)
+#define OMAP4_IP_REV_RTL_SHIFT 11
+#define OMAP4_IP_REV_RTL_MASK (0x1f << 11)
+#define OMAP4_IP_REV_MAJOR_SHIFT 8
+#define OMAP4_IP_REV_MAJOR_MASK (0x7 << 8)
+#define OMAP4_IP_REV_CUSTOM_SHIFT 6
+#define OMAP4_IP_REV_CUSTOM_MASK (0x3 << 6)
+#define OMAP4_IP_REV_MINOR_SHIFT 0
+#define OMAP4_IP_REV_MINOR_MASK (0x3f << 0)
+
+/* IP_HWINFO */
+#define OMAP4_IP_HWINFO_SHIFT 0
+#define OMAP4_IP_HWINFO_MASK (0xffffffff << 0)
+
+/* IP_SYSCONFIG */
+#define OMAP4_IP_SYSCONFIG_IDLEMODE_SHIFT 2
+#define OMAP4_IP_SYSCONFIG_IDLEMODE_MASK (0x3 << 2)
+
+/* STD_FUSE_DIE_ID_0 */
+#define OMAP4_STD_FUSE_DIE_ID_0_SHIFT 0
+#define OMAP4_STD_FUSE_DIE_ID_0_MASK (0xffffffff << 0)
+
+/* ID_CODE */
+#define OMAP4_STD_FUSE_IDCODE_SHIFT 0
+#define OMAP4_STD_FUSE_IDCODE_MASK (0xffffffff << 0)
+
+/* STD_FUSE_DIE_ID_1 */
+#define OMAP4_STD_FUSE_DIE_ID_1_SHIFT 0
+#define OMAP4_STD_FUSE_DIE_ID_1_MASK (0xffffffff << 0)
+
+/* STD_FUSE_DIE_ID_2 */
+#define OMAP4_STD_FUSE_DIE_ID_2_SHIFT 0
+#define OMAP4_STD_FUSE_DIE_ID_2_MASK (0xffffffff << 0)
+
+/* STD_FUSE_DIE_ID_3 */
+#define OMAP4_STD_FUSE_DIE_ID_3_SHIFT 0
+#define OMAP4_STD_FUSE_DIE_ID_3_MASK (0xffffffff << 0)
+
+/* STD_FUSE_PROD_ID_0 */
+#define OMAP4_STD_FUSE_PROD_ID_0_SHIFT 0
+#define OMAP4_STD_FUSE_PROD_ID_0_MASK (0xffffffff << 0)
+
+/* STD_FUSE_PROD_ID_1 */
+#define OMAP4_STD_FUSE_PROD_ID_1_SHIFT 0
+#define OMAP4_STD_FUSE_PROD_ID_1_MASK (0xffffffff << 0)
+
+/* STD_FUSE_USB_CONF */
+#define OMAP4_USB_PROD_ID_SHIFT 16
+#define OMAP4_USB_PROD_ID_MASK (0xffff << 16)
+#define OMAP4_USB_VENDOR_ID_SHIFT 0
+#define OMAP4_USB_VENDOR_ID_MASK (0xffff << 0)
+
+/* STD_FUSE_OPP_VDD_WKUP */
+#define OMAP4_STD_FUSE_OPP_VDD_WKUP_SHIFT 0
+#define OMAP4_STD_FUSE_OPP_VDD_WKUP_MASK (0xffffffff << 0)
+
+/* STD_FUSE_OPP_BGAP */
+#define OMAP4_STD_FUSE_OPP_BGAP_SHIFT 0
+#define OMAP4_STD_FUSE_OPP_BGAP_MASK (0xffffffff << 0)
+
+/* STD_FUSE_OPP_DPLL_0 */
+#define OMAP4_STD_FUSE_OPP_DPLL_0_SHIFT 0
+#define OMAP4_STD_FUSE_OPP_DPLL_0_MASK (0xffffffff << 0)
+
+/* STD_FUSE_OPP_DPLL_1 */
+#define OMAP4_STD_FUSE_OPP_DPLL_1_SHIFT 0
+#define OMAP4_STD_FUSE_OPP_DPLL_1_MASK (0xffffffff << 0)
+
+/* STATUS */
+#define OMAP4_ATTILA_CONF_SHIFT 11
+#define OMAP4_ATTILA_CONF_MASK (0x3 << 11)
+#define OMAP4_DEVICE_TYPE_SHIFT 8
+#define OMAP4_DEVICE_TYPE_MASK (0x7 << 8)
+#define OMAP4_SYS_BOOT_SHIFT 0
+#define OMAP4_SYS_BOOT_MASK (0xff << 0)
+
+/* DEV_CONF */
+#define OMAP4_DEV_CONF_SHIFT 1
+#define OMAP4_DEV_CONF_MASK (0x7fffffff << 1)
+#define OMAP4_USBPHY_PD_SHIFT 0
+#define OMAP4_USBPHY_PD_MASK (1 << 0)
+
+/* LDOVBB_IVA_VOLTAGE_CTRL */
+#define OMAP4_LDOVBBIVA_RBB_MUX_CTRL_SHIFT 26
+#define OMAP4_LDOVBBIVA_RBB_MUX_CTRL_MASK (1 << 26)
+#define OMAP4_LDOVBBIVA_RBB_VSET_IN_SHIFT 21
+#define OMAP4_LDOVBBIVA_RBB_VSET_IN_MASK (0x1f << 21)
+#define OMAP4_LDOVBBIVA_RBB_VSET_OUT_SHIFT 16
+#define OMAP4_LDOVBBIVA_RBB_VSET_OUT_MASK (0x1f << 16)
+#define OMAP4_LDOVBBIVA_FBB_MUX_CTRL_SHIFT 10
+#define OMAP4_LDOVBBIVA_FBB_MUX_CTRL_MASK (1 << 10)
+#define OMAP4_LDOVBBIVA_FBB_VSET_IN_SHIFT 5
+#define OMAP4_LDOVBBIVA_FBB_VSET_IN_MASK (0x1f << 5)
+#define OMAP4_LDOVBBIVA_FBB_VSET_OUT_SHIFT 0
+#define OMAP4_LDOVBBIVA_FBB_VSET_OUT_MASK (0x1f << 0)
+
+/* LDOVBB_MPU_VOLTAGE_CTRL */
+#define OMAP4_LDOVBBMPU_RBB_MUX_CTRL_SHIFT 26
+#define OMAP4_LDOVBBMPU_RBB_MUX_CTRL_MASK (1 << 26)
+#define OMAP4_LDOVBBMPU_RBB_VSET_IN_SHIFT 21
+#define OMAP4_LDOVBBMPU_RBB_VSET_IN_MASK (0x1f << 21)
+#define OMAP4_LDOVBBMPU_RBB_VSET_OUT_SHIFT 16
+#define OMAP4_LDOVBBMPU_RBB_VSET_OUT_MASK (0x1f << 16)
+#define OMAP4_LDOVBBMPU_FBB_MUX_CTRL_SHIFT 10
+#define OMAP4_LDOVBBMPU_FBB_MUX_CTRL_MASK (1 << 10)
+#define OMAP4_LDOVBBMPU_FBB_VSET_IN_SHIFT 5
+#define OMAP4_LDOVBBMPU_FBB_VSET_IN_MASK (0x1f << 5)
+#define OMAP4_LDOVBBMPU_FBB_VSET_OUT_SHIFT 0
+#define OMAP4_LDOVBBMPU_FBB_VSET_OUT_MASK (0x1f << 0)
+
+/* LDOSRAM_IVA_VOLTAGE_CTRL */
+#define OMAP4_LDOSRAMIVA_RETMODE_MUX_CTRL_SHIFT 26
+#define OMAP4_LDOSRAMIVA_RETMODE_MUX_CTRL_MASK (1 << 26)
+#define OMAP4_LDOSRAMIVA_RETMODE_VSET_IN_SHIFT 21
+#define OMAP4_LDOSRAMIVA_RETMODE_VSET_IN_MASK (0x1f << 21)
+#define OMAP4_LDOSRAMIVA_RETMODE_VSET_OUT_SHIFT 16
+#define OMAP4_LDOSRAMIVA_RETMODE_VSET_OUT_MASK (0x1f << 16)
+#define OMAP4_LDOSRAMIVA_ACTMODE_MUX_CTRL_SHIFT 10
+#define OMAP4_LDOSRAMIVA_ACTMODE_MUX_CTRL_MASK (1 << 10)
+#define OMAP4_LDOSRAMIVA_ACTMODE_VSET_IN_SHIFT 5
+#define OMAP4_LDOSRAMIVA_ACTMODE_VSET_IN_MASK (0x1f << 5)
+#define OMAP4_LDOSRAMIVA_ACTMODE_VSET_OUT_SHIFT 0
+#define OMAP4_LDOSRAMIVA_ACTMODE_VSET_OUT_MASK (0x1f << 0)
+
+/* LDOSRAM_MPU_VOLTAGE_CTRL */
+#define OMAP4_LDOSRAMMPU_RETMODE_MUX_CTRL_SHIFT 26
+#define OMAP4_LDOSRAMMPU_RETMODE_MUX_CTRL_MASK (1 << 26)
+#define OMAP4_LDOSRAMMPU_RETMODE_VSET_IN_SHIFT 21
+#define OMAP4_LDOSRAMMPU_RETMODE_VSET_IN_MASK (0x1f << 21)
+#define OMAP4_LDOSRAMMPU_RETMODE_VSET_OUT_SHIFT 16
+#define OMAP4_LDOSRAMMPU_RETMODE_VSET_OUT_MASK (0x1f << 16)
+#define OMAP4_LDOSRAMMPU_ACTMODE_MUX_CTRL_SHIFT 10
+#define OMAP4_LDOSRAMMPU_ACTMODE_MUX_CTRL_MASK (1 << 10)
+#define OMAP4_LDOSRAMMPU_ACTMODE_VSET_IN_SHIFT 5
+#define OMAP4_LDOSRAMMPU_ACTMODE_VSET_IN_MASK (0x1f << 5)
+#define OMAP4_LDOSRAMMPU_ACTMODE_VSET_OUT_SHIFT 0
+#define OMAP4_LDOSRAMMPU_ACTMODE_VSET_OUT_MASK (0x1f << 0)
+
+/* LDOSRAM_CORE_VOLTAGE_CTRL */
+#define OMAP4_LDOSRAMCORE_RETMODE_MUX_CTRL_SHIFT 26
+#define OMAP4_LDOSRAMCORE_RETMODE_MUX_CTRL_MASK (1 << 26)
+#define OMAP4_LDOSRAMCORE_RETMODE_VSET_IN_SHIFT 21
+#define OMAP4_LDOSRAMCORE_RETMODE_VSET_IN_MASK (0x1f << 21)
+#define OMAP4_LDOSRAMCORE_RETMODE_VSET_OUT_SHIFT 16
+#define OMAP4_LDOSRAMCORE_RETMODE_VSET_OUT_MASK (0x1f << 16)
+#define OMAP4_LDOSRAMCORE_ACTMODE_MUX_CTRL_SHIFT 10
+#define OMAP4_LDOSRAMCORE_ACTMODE_MUX_CTRL_MASK (1 << 10)
+#define OMAP4_LDOSRAMCORE_ACTMODE_VSET_IN_SHIFT 5
+#define OMAP4_LDOSRAMCORE_ACTMODE_VSET_IN_MASK (0x1f << 5)
+#define OMAP4_LDOSRAMCORE_ACTMODE_VSET_OUT_SHIFT 0
+#define OMAP4_LDOSRAMCORE_ACTMODE_VSET_OUT_MASK (0x1f << 0)
+
+/* TEMP_SENSOR */
+#define OMAP4_BGAP_TEMPSOFF_SHIFT 12
+#define OMAP4_BGAP_TEMPSOFF_MASK (1 << 12)
+#define OMAP4_BGAP_TSHUT_SHIFT 11
+#define OMAP4_BGAP_TSHUT_MASK (1 << 11)
+#define OMAP4_BGAP_TEMP_SENSOR_CONTCONV_SHIFT 10
+#define OMAP4_BGAP_TEMP_SENSOR_CONTCONV_MASK (1 << 10)
+#define OMAP4_BGAP_TEMP_SENSOR_SOC_SHIFT 9
+#define OMAP4_BGAP_TEMP_SENSOR_SOC_MASK (1 << 9)
+#define OMAP4_BGAP_TEMP_SENSOR_EOCZ_SHIFT 8
+#define OMAP4_BGAP_TEMP_SENSOR_EOCZ_MASK (1 << 8)
+#define OMAP4_BGAP_TEMP_SENSOR_DTEMP_SHIFT 0
+#define OMAP4_BGAP_TEMP_SENSOR_DTEMP_MASK (0xff << 0)
+
+/* DPLL_NWELL_TRIM_0 */
+#define OMAP4_DPLL_ABE_NWELL_TRIM_MUX_CTRL_SHIFT 29
+#define OMAP4_DPLL_ABE_NWELL_TRIM_MUX_CTRL_MASK (1 << 29)
+#define OMAP4_DPLL_ABE_NWELL_TRIM_SHIFT 24
+#define OMAP4_DPLL_ABE_NWELL_TRIM_MASK (0x1f << 24)
+#define OMAP4_DPLL_PER_NWELL_TRIM_MUX_CTRL_SHIFT 23
+#define OMAP4_DPLL_PER_NWELL_TRIM_MUX_CTRL_MASK (1 << 23)
+#define OMAP4_DPLL_PER_NWELL_TRIM_SHIFT 18
+#define OMAP4_DPLL_PER_NWELL_TRIM_MASK (0x1f << 18)
+#define OMAP4_DPLL_CORE_NWELL_TRIM_MUX_CTRL_SHIFT 17
+#define OMAP4_DPLL_CORE_NWELL_TRIM_MUX_CTRL_MASK (1 << 17)
+#define OMAP4_DPLL_CORE_NWELL_TRIM_SHIFT 12
+#define OMAP4_DPLL_CORE_NWELL_TRIM_MASK (0x1f << 12)
+#define OMAP4_DPLL_IVA_NWELL_TRIM_MUX_CTRL_SHIFT 11
+#define OMAP4_DPLL_IVA_NWELL_TRIM_MUX_CTRL_MASK (1 << 11)
+#define OMAP4_DPLL_IVA_NWELL_TRIM_SHIFT 6
+#define OMAP4_DPLL_IVA_NWELL_TRIM_MASK (0x1f << 6)
+#define OMAP4_DPLL_MPU_NWELL_TRIM_MUX_CTRL_SHIFT 5
+#define OMAP4_DPLL_MPU_NWELL_TRIM_MUX_CTRL_MASK (1 << 5)
+#define OMAP4_DPLL_MPU_NWELL_TRIM_SHIFT 0
+#define OMAP4_DPLL_MPU_NWELL_TRIM_MASK (0x1f << 0)
+
+/* DPLL_NWELL_TRIM_1 */
+#define OMAP4_DPLL_UNIPRO_NWELL_TRIM_MUX_CTRL_SHIFT 29
+#define OMAP4_DPLL_UNIPRO_NWELL_TRIM_MUX_CTRL_MASK (1 << 29)
+#define OMAP4_DPLL_UNIPRO_NWELL_TRIM_SHIFT 24
+#define OMAP4_DPLL_UNIPRO_NWELL_TRIM_MASK (0x1f << 24)
+#define OMAP4_DPLL_USB_NWELL_TRIM_MUX_CTRL_SHIFT 23
+#define OMAP4_DPLL_USB_NWELL_TRIM_MUX_CTRL_MASK (1 << 23)
+#define OMAP4_DPLL_USB_NWELL_TRIM_SHIFT 18
+#define OMAP4_DPLL_USB_NWELL_TRIM_MASK (0x1f << 18)
+#define OMAP4_DPLL_HDMI_NWELL_TRIM_MUX_CTRL_SHIFT 17
+#define OMAP4_DPLL_HDMI_NWELL_TRIM_MUX_CTRL_MASK (1 << 17)
+#define OMAP4_DPLL_HDMI_NWELL_TRIM_SHIFT 12
+#define OMAP4_DPLL_HDMI_NWELL_TRIM_MASK (0x1f << 12)
+#define OMAP4_DPLL_DSI2_NWELL_TRIM_MUX_CTRL_SHIFT 11
+#define OMAP4_DPLL_DSI2_NWELL_TRIM_MUX_CTRL_MASK (1 << 11)
+#define OMAP4_DPLL_DSI2_NWELL_TRIM_SHIFT 6
+#define OMAP4_DPLL_DSI2_NWELL_TRIM_MASK (0x1f << 6)
+#define OMAP4_DPLL_DSI1_NWELL_TRIM_MUX_CTRL_SHIFT 5
+#define OMAP4_DPLL_DSI1_NWELL_TRIM_MUX_CTRL_MASK (1 << 5)
+#define OMAP4_DPLL_DSI1_NWELL_TRIM_SHIFT 0
+#define OMAP4_DPLL_DSI1_NWELL_TRIM_MASK (0x1f << 0)
+
+/* USBOTGHS_CONTROL */
+#define OMAP4_DISCHRGVBUS_SHIFT 8
+#define OMAP4_DISCHRGVBUS_MASK (1 << 8)
+#define OMAP4_CHRGVBUS_SHIFT 7
+#define OMAP4_CHRGVBUS_MASK (1 << 7)
+#define OMAP4_DRVVBUS_SHIFT 6
+#define OMAP4_DRVVBUS_MASK (1 << 6)
+#define OMAP4_IDPULLUP_SHIFT 5
+#define OMAP4_IDPULLUP_MASK (1 << 5)
+#define OMAP4_IDDIG_SHIFT 4
+#define OMAP4_IDDIG_MASK (1 << 4)
+#define OMAP4_SESSEND_SHIFT 3
+#define OMAP4_SESSEND_MASK (1 << 3)
+#define OMAP4_VBUSVALID_SHIFT 2
+#define OMAP4_VBUSVALID_MASK (1 << 2)
+#define OMAP4_BVALID_SHIFT 1
+#define OMAP4_BVALID_MASK (1 << 1)
+#define OMAP4_AVALID_SHIFT 0
+#define OMAP4_AVALID_MASK (1 << 0)
+
+/* DSS_CONTROL */
+#define OMAP4_DSS_MUX6_SELECT_SHIFT 0
+#define OMAP4_DSS_MUX6_SELECT_MASK (1 << 0)
+
+/* HWOBS_CONTROL */
+#define OMAP4_HWOBS_CLKDIV_SEL_SHIFT 3
+#define OMAP4_HWOBS_CLKDIV_SEL_MASK (0x1f << 3)
+#define OMAP4_HWOBS_ALL_ZERO_MODE_SHIFT 2
+#define OMAP4_HWOBS_ALL_ZERO_MODE_MASK (1 << 2)
+#define OMAP4_HWOBS_ALL_ONE_MODE_SHIFT 1
+#define OMAP4_HWOBS_ALL_ONE_MODE_MASK (1 << 1)
+#define OMAP4_HWOBS_MACRO_ENABLE_SHIFT 0
+#define OMAP4_HWOBS_MACRO_ENABLE_MASK (1 << 0)
+
+/* DEBOBS_FINAL_MUX_SEL */
+#define OMAP4_SELECT_SHIFT 0
+#define OMAP4_SELECT_MASK (0xffffffff << 0)
+
+/* DEBOBS_MMR_MPU */
+#define OMAP4_SELECT_DEBOBS_MMR_MPU_SHIFT 0
+#define OMAP4_SELECT_DEBOBS_MMR_MPU_MASK (0xf << 0)
+
+/* CONF_SDMA_REQ_SEL0 */
+#define OMAP4_MULT_SHIFT 0
+#define OMAP4_MULT_MASK (0x7f << 0)
+
+/* CONF_CLK_SEL0 */
+#define OMAP4_MULT_CONF_CLK_SEL0_SHIFT 0
+#define OMAP4_MULT_CONF_CLK_SEL0_MASK (0x7 << 0)
+
+/* CONF_CLK_SEL1 */
+#define OMAP4_MULT_CONF_CLK_SEL1_SHIFT 0
+#define OMAP4_MULT_CONF_CLK_SEL1_MASK (0x7 << 0)
+
+/* CONF_CLK_SEL2 */
+#define OMAP4_MULT_CONF_CLK_SEL2_SHIFT 0
+#define OMAP4_MULT_CONF_CLK_SEL2_MASK (0x7 << 0)
+
+/* CONF_DPLL_FREQLOCK_SEL */
+#define OMAP4_MULT_CONF_DPLL_FREQLOCK_SEL_SHIFT 0
+#define OMAP4_MULT_CONF_DPLL_FREQLOCK_SEL_MASK (0x7 << 0)
+
+/* CONF_DPLL_TINITZ_SEL */
+#define OMAP4_MULT_CONF_DPLL_TINITZ_SEL_SHIFT 0
+#define OMAP4_MULT_CONF_DPLL_TINITZ_SEL_MASK (0x7 << 0)
+
+/* CONF_DPLL_PHASELOCK_SEL */
+#define OMAP4_MULT_CONF_DPLL_PHASELOCK_SEL_SHIFT 0
+#define OMAP4_MULT_CONF_DPLL_PHASELOCK_SEL_MASK (0x7 << 0)
+
+/* CONF_DEBUG_SEL_TST_0 */
+#define OMAP4_MODE_SHIFT 0
+#define OMAP4_MODE_MASK (0xf << 0)
+
+#endif
diff --git a/arch/arm/mach-omap2/include/mach/ctrl_module_pad_core_44xx.h b/arch/arm/mach-omap2/include/mach/ctrl_module_pad_core_44xx.h
new file mode 100644
index 000000000000..c88420de1151
--- /dev/null
+++ b/arch/arm/mach-omap2/include/mach/ctrl_module_pad_core_44xx.h
@@ -0,0 +1,1409 @@
+/*
+ * OMAP44xx CTRL_MODULE_PAD_CORE registers and bitfields
+ *
+ * Copyright (C) 2009-2010 Texas Instruments, Inc.
+ *
+ * Benoit Cousson (b-cousson@ti.com)
+ * Santosh Shilimkar (santosh.shilimkar@ti.com)
+ *
+ * This file is automatically generated from the OMAP hardware databases.
+ * We respectfully ask that any modifications to this file be coordinated
+ * with the public linux-omap@vger.kernel.org mailing list and the
+ * authors above to ensure that the autogeneration scripts are kept
+ * up-to-date with the file contents.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __ARCH_ARM_MACH_OMAP2_CTRL_MODULE_PAD_CORE_44XX_H
+#define __ARCH_ARM_MACH_OMAP2_CTRL_MODULE_PAD_CORE_44XX_H
+
+
+/* Base address */
+#define OMAP4_CTRL_MODULE_PAD_CORE 0x4a100000
+
+/* Registers offset */
+#define OMAP4_CTRL_MODULE_PAD_CORE_IP_REVISION 0x0000
+#define OMAP4_CTRL_MODULE_PAD_CORE_IP_HWINFO 0x0004
+#define OMAP4_CTRL_MODULE_PAD_CORE_IP_SYSCONFIG 0x0010
+#define OMAP4_CTRL_MODULE_PAD_CORE_PADCONF_WAKEUPEVENT_0 0x01d8
+#define OMAP4_CTRL_MODULE_PAD_CORE_PADCONF_WAKEUPEVENT_1 0x01dc
+#define OMAP4_CTRL_MODULE_PAD_CORE_PADCONF_WAKEUPEVENT_2 0x01e0
+#define OMAP4_CTRL_MODULE_PAD_CORE_PADCONF_WAKEUPEVENT_3 0x01e4
+#define OMAP4_CTRL_MODULE_PAD_CORE_PADCONF_WAKEUPEVENT_4 0x01e8
+#define OMAP4_CTRL_MODULE_PAD_CORE_PADCONF_WAKEUPEVENT_5 0x01ec
+#define OMAP4_CTRL_MODULE_PAD_CORE_PADCONF_WAKEUPEVENT_6 0x01f0
+#define OMAP4_CTRL_MODULE_PAD_CORE_CONTROL_PADCONF_GLOBAL 0x05a0
+#define OMAP4_CTRL_MODULE_PAD_CORE_CONTROL_PADCONF_MODE 0x05a4
+#define OMAP4_CTRL_MODULE_PAD_CORE_CONTROL_SMART1IO_PADCONF_0 0x05a8
+#define OMAP4_CTRL_MODULE_PAD_CORE_CONTROL_SMART1IO_PADCONF_1 0x05ac
+#define OMAP4_CTRL_MODULE_PAD_CORE_CONTROL_SMART2IO_PADCONF_0 0x05b0
+#define OMAP4_CTRL_MODULE_PAD_CORE_CONTROL_SMART2IO_PADCONF_1 0x05b4
+#define OMAP4_CTRL_MODULE_PAD_CORE_CONTROL_SMART3IO_PADCONF_0 0x05b8
+#define OMAP4_CTRL_MODULE_PAD_CORE_CONTROL_SMART3IO_PADCONF_1 0x05bc
+#define OMAP4_CTRL_MODULE_PAD_CORE_CONTROL_SMART3IO_PADCONF_2 0x05c0
+#define OMAP4_CTRL_MODULE_PAD_CORE_CONTROL_USBB_HSIC 0x05c4
+#define OMAP4_CTRL_MODULE_PAD_CORE_CONTROL_SLIMBUS 0x05c8
+#define OMAP4_CTRL_MODULE_PAD_CORE_CONTROL_PBIASLITE 0x0600
+#define OMAP4_CTRL_MODULE_PAD_CORE_CONTROL_I2C_0 0x0604
+#define OMAP4_CTRL_MODULE_PAD_CORE_CONTROL_CAMERA_RX 0x0608
+#define OMAP4_CTRL_MODULE_PAD_CORE_CONTROL_AVDAC 0x060c
+#define OMAP4_CTRL_MODULE_PAD_CORE_CONTROL_HDMI_TX_PHY 0x0610
+#define OMAP4_CTRL_MODULE_PAD_CORE_CONTROL_MMC2 0x0614
+#define OMAP4_CTRL_MODULE_PAD_CORE_CONTROL_DSIPHY 0x0618
+#define OMAP4_CTRL_MODULE_PAD_CORE_CONTROL_MCBSPLP 0x061c
+#define OMAP4_CTRL_MODULE_PAD_CORE_CONTROL_USB2PHYCORE 0x0620
+#define OMAP4_CTRL_MODULE_PAD_CORE_CONTROL_I2C_1 0x0624
+#define OMAP4_CTRL_MODULE_PAD_CORE_CONTROL_MMC1 0x0628
+#define OMAP4_CTRL_MODULE_PAD_CORE_CONTROL_HSI 0x062c
+#define OMAP4_CTRL_MODULE_PAD_CORE_CONTROL_USB 0x0630
+#define OMAP4_CTRL_MODULE_PAD_CORE_CONTROL_HDQ 0x0634
+#define OMAP4_CTRL_MODULE_PAD_CORE_CONTROL_LPDDR2IO1_0 0x0638
+#define OMAP4_CTRL_MODULE_PAD_CORE_CONTROL_LPDDR2IO1_1 0x063c
+#define OMAP4_CTRL_MODULE_PAD_CORE_CONTROL_LPDDR2IO1_2 0x0640
+#define OMAP4_CTRL_MODULE_PAD_CORE_CONTROL_LPDDR2IO1_3 0x0644
+#define OMAP4_CTRL_MODULE_PAD_CORE_CONTROL_LPDDR2IO2_0 0x0648
+#define OMAP4_CTRL_MODULE_PAD_CORE_CONTROL_LPDDR2IO2_1 0x064c
+#define OMAP4_CTRL_MODULE_PAD_CORE_CONTROL_LPDDR2IO2_2 0x0650
+#define OMAP4_CTRL_MODULE_PAD_CORE_CONTROL_LPDDR2IO2_3 0x0654
+#define OMAP4_CTRL_MODULE_PAD_CORE_CONTROL_BUS_HOLD 0x0658
+#define OMAP4_CTRL_MODULE_PAD_CORE_CONTROL_C2C 0x065c
+#define OMAP4_CTRL_MODULE_PAD_CORE_CORE_CONTROL_SPARE_RW 0x0660
+#define OMAP4_CTRL_MODULE_PAD_CORE_CORE_CONTROL_SPARE_R 0x0664
+#define OMAP4_CTRL_MODULE_PAD_CORE_CORE_CONTROL_SPARE_R_C0 0x0668
+#define OMAP4_CTRL_MODULE_PAD_CORE_CONTROL_EFUSE_1 0x0700
+#define OMAP4_CTRL_MODULE_PAD_CORE_CONTROL_EFUSE_2 0x0704
+#define OMAP4_CTRL_MODULE_PAD_CORE_CONTROL_EFUSE_3 0x0708
+#define OMAP4_CTRL_MODULE_PAD_CORE_CONTROL_EFUSE_4 0x070c
+
+/* Registers shifts and masks */
+
+/* IP_REVISION */
+#define OMAP4_IP_REV_SCHEME_SHIFT 30
+#define OMAP4_IP_REV_SCHEME_MASK (0x3 << 30)
+#define OMAP4_IP_REV_FUNC_SHIFT 16
+#define OMAP4_IP_REV_FUNC_MASK (0xfff << 16)
+#define OMAP4_IP_REV_RTL_SHIFT 11
+#define OMAP4_IP_REV_RTL_MASK (0x1f << 11)
+#define OMAP4_IP_REV_MAJOR_SHIFT 8
+#define OMAP4_IP_REV_MAJOR_MASK (0x7 << 8)
+#define OMAP4_IP_REV_CUSTOM_SHIFT 6
+#define OMAP4_IP_REV_CUSTOM_MASK (0x3 << 6)
+#define OMAP4_IP_REV_MINOR_SHIFT 0
+#define OMAP4_IP_REV_MINOR_MASK (0x3f << 0)
+
+/* IP_HWINFO */
+#define OMAP4_IP_HWINFO_SHIFT 0
+#define OMAP4_IP_HWINFO_MASK (0xffffffff << 0)
+
+/* IP_SYSCONFIG */
+#define OMAP4_IP_SYSCONFIG_IDLEMODE_SHIFT 2
+#define OMAP4_IP_SYSCONFIG_IDLEMODE_MASK (0x3 << 2)
+
+/* PADCONF_WAKEUPEVENT_0 */
+#define OMAP4_GPMC_CLK_DUPLICATEWAKEUPEVENT_SHIFT 31
+#define OMAP4_GPMC_CLK_DUPLICATEWAKEUPEVENT_MASK (1 << 31)
+#define OMAP4_GPMC_NWP_DUPLICATEWAKEUPEVENT_SHIFT 30
+#define OMAP4_GPMC_NWP_DUPLICATEWAKEUPEVENT_MASK (1 << 30)
+#define OMAP4_GPMC_NCS3_DUPLICATEWAKEUPEVENT_SHIFT 29
+#define OMAP4_GPMC_NCS3_DUPLICATEWAKEUPEVENT_MASK (1 << 29)
+#define OMAP4_GPMC_NCS2_DUPLICATEWAKEUPEVENT_SHIFT 28
+#define OMAP4_GPMC_NCS2_DUPLICATEWAKEUPEVENT_MASK (1 << 28)
+#define OMAP4_GPMC_NCS1_DUPLICATEWAKEUPEVENT_SHIFT 27
+#define OMAP4_GPMC_NCS1_DUPLICATEWAKEUPEVENT_MASK (1 << 27)
+#define OMAP4_GPMC_NCS0_DUPLICATEWAKEUPEVENT_SHIFT 26
+#define OMAP4_GPMC_NCS0_DUPLICATEWAKEUPEVENT_MASK (1 << 26)
+#define OMAP4_GPMC_A25_DUPLICATEWAKEUPEVENT_SHIFT 25
+#define OMAP4_GPMC_A25_DUPLICATEWAKEUPEVENT_MASK (1 << 25)
+#define OMAP4_GPMC_A24_DUPLICATEWAKEUPEVENT_SHIFT 24
+#define OMAP4_GPMC_A24_DUPLICATEWAKEUPEVENT_MASK (1 << 24)
+#define OMAP4_GPMC_A23_DUPLICATEWAKEUPEVENT_SHIFT 23
+#define OMAP4_GPMC_A23_DUPLICATEWAKEUPEVENT_MASK (1 << 23)
+#define OMAP4_GPMC_A22_DUPLICATEWAKEUPEVENT_SHIFT 22
+#define OMAP4_GPMC_A22_DUPLICATEWAKEUPEVENT_MASK (1 << 22)
+#define OMAP4_GPMC_A21_DUPLICATEWAKEUPEVENT_SHIFT 21
+#define OMAP4_GPMC_A21_DUPLICATEWAKEUPEVENT_MASK (1 << 21)
+#define OMAP4_GPMC_A20_DUPLICATEWAKEUPEVENT_SHIFT 20
+#define OMAP4_GPMC_A20_DUPLICATEWAKEUPEVENT_MASK (1 << 20)
+#define OMAP4_GPMC_A19_DUPLICATEWAKEUPEVENT_SHIFT 19
+#define OMAP4_GPMC_A19_DUPLICATEWAKEUPEVENT_MASK (1 << 19)
+#define OMAP4_GPMC_A18_DUPLICATEWAKEUPEVENT_SHIFT 18
+#define OMAP4_GPMC_A18_DUPLICATEWAKEUPEVENT_MASK (1 << 18)
+#define OMAP4_GPMC_A17_DUPLICATEWAKEUPEVENT_SHIFT 17
+#define OMAP4_GPMC_A17_DUPLICATEWAKEUPEVENT_MASK (1 << 17)
+#define OMAP4_GPMC_A16_DUPLICATEWAKEUPEVENT_SHIFT 16
+#define OMAP4_GPMC_A16_DUPLICATEWAKEUPEVENT_MASK (1 << 16)
+#define OMAP4_GPMC_AD15_DUPLICATEWAKEUPEVENT_SHIFT 15
+#define OMAP4_GPMC_AD15_DUPLICATEWAKEUPEVENT_MASK (1 << 15)
+#define OMAP4_GPMC_AD14_DUPLICATEWAKEUPEVENT_SHIFT 14
+#define OMAP4_GPMC_AD14_DUPLICATEWAKEUPEVENT_MASK (1 << 14)
+#define OMAP4_GPMC_AD13_DUPLICATEWAKEUPEVENT_SHIFT 13
+#define OMAP4_GPMC_AD13_DUPLICATEWAKEUPEVENT_MASK (1 << 13)
+#define OMAP4_GPMC_AD12_DUPLICATEWAKEUPEVENT_SHIFT 12
+#define OMAP4_GPMC_AD12_DUPLICATEWAKEUPEVENT_MASK (1 << 12)
+#define OMAP4_GPMC_AD11_DUPLICATEWAKEUPEVENT_SHIFT 11
+#define OMAP4_GPMC_AD11_DUPLICATEWAKEUPEVENT_MASK (1 << 11)
+#define OMAP4_GPMC_AD10_DUPLICATEWAKEUPEVENT_SHIFT 10
+#define OMAP4_GPMC_AD10_DUPLICATEWAKEUPEVENT_MASK (1 << 10)
+#define OMAP4_GPMC_AD9_DUPLICATEWAKEUPEVENT_SHIFT 9
+#define OMAP4_GPMC_AD9_DUPLICATEWAKEUPEVENT_MASK (1 << 9)
+#define OMAP4_GPMC_AD8_DUPLICATEWAKEUPEVENT_SHIFT 8
+#define OMAP4_GPMC_AD8_DUPLICATEWAKEUPEVENT_MASK (1 << 8)
+#define OMAP4_GPMC_AD7_DUPLICATEWAKEUPEVENT_SHIFT 7
+#define OMAP4_GPMC_AD7_DUPLICATEWAKEUPEVENT_MASK (1 << 7)
+#define OMAP4_GPMC_AD6_DUPLICATEWAKEUPEVENT_SHIFT 6
+#define OMAP4_GPMC_AD6_DUPLICATEWAKEUPEVENT_MASK (1 << 6)
+#define OMAP4_GPMC_AD5_DUPLICATEWAKEUPEVENT_SHIFT 5
+#define OMAP4_GPMC_AD5_DUPLICATEWAKEUPEVENT_MASK (1 << 5)
+#define OMAP4_GPMC_AD4_DUPLICATEWAKEUPEVENT_SHIFT 4
+#define OMAP4_GPMC_AD4_DUPLICATEWAKEUPEVENT_MASK (1 << 4)
+#define OMAP4_GPMC_AD3_DUPLICATEWAKEUPEVENT_SHIFT 3
+#define OMAP4_GPMC_AD3_DUPLICATEWAKEUPEVENT_MASK (1 << 3)
+#define OMAP4_GPMC_AD2_DUPLICATEWAKEUPEVENT_SHIFT 2
+#define OMAP4_GPMC_AD2_DUPLICATEWAKEUPEVENT_MASK (1 << 2)
+#define OMAP4_GPMC_AD1_DUPLICATEWAKEUPEVENT_SHIFT 1
+#define OMAP4_GPMC_AD1_DUPLICATEWAKEUPEVENT_MASK (1 << 1)
+#define OMAP4_GPMC_AD0_DUPLICATEWAKEUPEVENT_SHIFT 0
+#define OMAP4_GPMC_AD0_DUPLICATEWAKEUPEVENT_MASK (1 << 0)
+
+/* PADCONF_WAKEUPEVENT_1 */
+#define OMAP4_CAM_STROBE_DUPLICATEWAKEUPEVENT_SHIFT 31
+#define OMAP4_CAM_STROBE_DUPLICATEWAKEUPEVENT_MASK (1 << 31)
+#define OMAP4_CAM_SHUTTER_DUPLICATEWAKEUPEVENT_SHIFT 30
+#define OMAP4_CAM_SHUTTER_DUPLICATEWAKEUPEVENT_MASK (1 << 30)
+#define OMAP4_CSI22_DY1_DUPLICATEWAKEUPEVENT_SHIFT 29
+#define OMAP4_CSI22_DY1_DUPLICATEWAKEUPEVENT_MASK (1 << 29)
+#define OMAP4_CSI22_DX1_DUPLICATEWAKEUPEVENT_SHIFT 28
+#define OMAP4_CSI22_DX1_DUPLICATEWAKEUPEVENT_MASK (1 << 28)
+#define OMAP4_CSI22_DY0_DUPLICATEWAKEUPEVENT_SHIFT 27
+#define OMAP4_CSI22_DY0_DUPLICATEWAKEUPEVENT_MASK (1 << 27)
+#define OMAP4_CSI22_DX0_DUPLICATEWAKEUPEVENT_SHIFT 26
+#define OMAP4_CSI22_DX0_DUPLICATEWAKEUPEVENT_MASK (1 << 26)
+#define OMAP4_CSI21_DY4_DUPLICATEWAKEUPEVENT_SHIFT 25
+#define OMAP4_CSI21_DY4_DUPLICATEWAKEUPEVENT_MASK (1 << 25)
+#define OMAP4_CSI21_DX4_DUPLICATEWAKEUPEVENT_SHIFT 24
+#define OMAP4_CSI21_DX4_DUPLICATEWAKEUPEVENT_MASK (1 << 24)
+#define OMAP4_CSI21_DY3_DUPLICATEWAKEUPEVENT_SHIFT 23
+#define OMAP4_CSI21_DY3_DUPLICATEWAKEUPEVENT_MASK (1 << 23)
+#define OMAP4_CSI21_DX3_DUPLICATEWAKEUPEVENT_SHIFT 22
+#define OMAP4_CSI21_DX3_DUPLICATEWAKEUPEVENT_MASK (1 << 22)
+#define OMAP4_CSI21_DY2_DUPLICATEWAKEUPEVENT_SHIFT 21
+#define OMAP4_CSI21_DY2_DUPLICATEWAKEUPEVENT_MASK (1 << 21)
+#define OMAP4_CSI21_DX2_DUPLICATEWAKEUPEVENT_SHIFT 20
+#define OMAP4_CSI21_DX2_DUPLICATEWAKEUPEVENT_MASK (1 << 20)
+#define OMAP4_CSI21_DY1_DUPLICATEWAKEUPEVENT_SHIFT 19
+#define OMAP4_CSI21_DY1_DUPLICATEWAKEUPEVENT_MASK (1 << 19)
+#define OMAP4_CSI21_DX1_DUPLICATEWAKEUPEVENT_SHIFT 18
+#define OMAP4_CSI21_DX1_DUPLICATEWAKEUPEVENT_MASK (1 << 18)
+#define OMAP4_CSI21_DY0_DUPLICATEWAKEUPEVENT_SHIFT 17
+#define OMAP4_CSI21_DY0_DUPLICATEWAKEUPEVENT_MASK (1 << 17)
+#define OMAP4_CSI21_DX0_DUPLICATEWAKEUPEVENT_SHIFT 16
+#define OMAP4_CSI21_DX0_DUPLICATEWAKEUPEVENT_MASK (1 << 16)
+#define OMAP4_HDMI_DDC_SDA_DUPLICATEWAKEUPEVENT_SHIFT 15
+#define OMAP4_HDMI_DDC_SDA_DUPLICATEWAKEUPEVENT_MASK (1 << 15)
+#define OMAP4_HDMI_DDC_SCL_DUPLICATEWAKEUPEVENT_SHIFT 14
+#define OMAP4_HDMI_DDC_SCL_DUPLICATEWAKEUPEVENT_MASK (1 << 14)
+#define OMAP4_HDMI_CEC_DUPLICATEWAKEUPEVENT_SHIFT 13
+#define OMAP4_HDMI_CEC_DUPLICATEWAKEUPEVENT_MASK (1 << 13)
+#define OMAP4_HDMI_HPD_DUPLICATEWAKEUPEVENT_SHIFT 12
+#define OMAP4_HDMI_HPD_DUPLICATEWAKEUPEVENT_MASK (1 << 12)
+#define OMAP4_C2C_DATA15_DUPLICATEWAKEUPEVENT_SHIFT 11
+#define OMAP4_C2C_DATA15_DUPLICATEWAKEUPEVENT_MASK (1 << 11)
+#define OMAP4_C2C_DATA14_DUPLICATEWAKEUPEVENT_SHIFT 10
+#define OMAP4_C2C_DATA14_DUPLICATEWAKEUPEVENT_MASK (1 << 10)
+#define OMAP4_C2C_DATA13_DUPLICATEWAKEUPEVENT_SHIFT 9
+#define OMAP4_C2C_DATA13_DUPLICATEWAKEUPEVENT_MASK (1 << 9)
+#define OMAP4_C2C_DATA12_DUPLICATEWAKEUPEVENT_SHIFT 8
+#define OMAP4_C2C_DATA12_DUPLICATEWAKEUPEVENT_MASK (1 << 8)
+#define OMAP4_C2C_DATA11_DUPLICATEWAKEUPEVENT_SHIFT 7
+#define OMAP4_C2C_DATA11_DUPLICATEWAKEUPEVENT_MASK (1 << 7)
+#define OMAP4_GPMC_WAIT1_DUPLICATEWAKEUPEVENT_SHIFT 6
+#define OMAP4_GPMC_WAIT1_DUPLICATEWAKEUPEVENT_MASK (1 << 6)
+#define OMAP4_GPMC_WAIT0_DUPLICATEWAKEUPEVENT_SHIFT 5
+#define OMAP4_GPMC_WAIT0_DUPLICATEWAKEUPEVENT_MASK (1 << 5)
+#define OMAP4_GPMC_NBE1_DUPLICATEWAKEUPEVENT_SHIFT 4
+#define OMAP4_GPMC_NBE1_DUPLICATEWAKEUPEVENT_MASK (1 << 4)
+#define OMAP4_GPMC_NBE0_CLE_DUPLICATEWAKEUPEVENT_SHIFT 3
+#define OMAP4_GPMC_NBE0_CLE_DUPLICATEWAKEUPEVENT_MASK (1 << 3)
+#define OMAP4_GPMC_NWE_DUPLICATEWAKEUPEVENT_SHIFT 2
+#define OMAP4_GPMC_NWE_DUPLICATEWAKEUPEVENT_MASK (1 << 2)
+#define OMAP4_GPMC_NOE_DUPLICATEWAKEUPEVENT_SHIFT 1
+#define OMAP4_GPMC_NOE_DUPLICATEWAKEUPEVENT_MASK (1 << 1)
+#define OMAP4_GPMC_NADV_ALE_DUPLICATEWAKEUPEVENT_SHIFT 0
+#define OMAP4_GPMC_NADV_ALE_DUPLICATEWAKEUPEVENT_MASK (1 << 0)
+
+/* PADCONF_WAKEUPEVENT_2 */
+#define OMAP4_ABE_MCBSP1_CLKX_DUPLICATEWAKEUPEVENT_SHIFT 31
+#define OMAP4_ABE_MCBSP1_CLKX_DUPLICATEWAKEUPEVENT_MASK (1 << 31)
+#define OMAP4_ABE_MCBSP2_FSX_DUPLICATEWAKEUPEVENT_SHIFT 30
+#define OMAP4_ABE_MCBSP2_FSX_DUPLICATEWAKEUPEVENT_MASK (1 << 30)
+#define OMAP4_ABE_MCBSP2_DX_DUPLICATEWAKEUPEVENT_SHIFT 29
+#define OMAP4_ABE_MCBSP2_DX_DUPLICATEWAKEUPEVENT_MASK (1 << 29)
+#define OMAP4_ABE_MCBSP2_DR_DUPLICATEWAKEUPEVENT_SHIFT 28
+#define OMAP4_ABE_MCBSP2_DR_DUPLICATEWAKEUPEVENT_MASK (1 << 28)
+#define OMAP4_ABE_MCBSP2_CLKX_DUPLICATEWAKEUPEVENT_SHIFT 27
+#define OMAP4_ABE_MCBSP2_CLKX_DUPLICATEWAKEUPEVENT_MASK (1 << 27)
+#define OMAP4_SDMMC1_DAT7_DUPLICATEWAKEUPEVENT_SHIFT 26
+#define OMAP4_SDMMC1_DAT7_DUPLICATEWAKEUPEVENT_MASK (1 << 26)
+#define OMAP4_SDMMC1_DAT6_DUPLICATEWAKEUPEVENT_SHIFT 25
+#define OMAP4_SDMMC1_DAT6_DUPLICATEWAKEUPEVENT_MASK (1 << 25)
+#define OMAP4_SDMMC1_DAT5_DUPLICATEWAKEUPEVENT_SHIFT 24
+#define OMAP4_SDMMC1_DAT5_DUPLICATEWAKEUPEVENT_MASK (1 << 24)
+#define OMAP4_SDMMC1_DAT4_DUPLICATEWAKEUPEVENT_SHIFT 23
+#define OMAP4_SDMMC1_DAT4_DUPLICATEWAKEUPEVENT_MASK (1 << 23)
+#define OMAP4_SDMMC1_DAT3_DUPLICATEWAKEUPEVENT_SHIFT 22
+#define OMAP4_SDMMC1_DAT3_DUPLICATEWAKEUPEVENT_MASK (1 << 22)
+#define OMAP4_SDMMC1_DAT2_DUPLICATEWAKEUPEVENT_SHIFT 21
+#define OMAP4_SDMMC1_DAT2_DUPLICATEWAKEUPEVENT_MASK (1 << 21)
+#define OMAP4_SDMMC1_DAT1_DUPLICATEWAKEUPEVENT_SHIFT 20
+#define OMAP4_SDMMC1_DAT1_DUPLICATEWAKEUPEVENT_MASK (1 << 20)
+#define OMAP4_SDMMC1_DAT0_DUPLICATEWAKEUPEVENT_SHIFT 19
+#define OMAP4_SDMMC1_DAT0_DUPLICATEWAKEUPEVENT_MASK (1 << 19)
+#define OMAP4_SDMMC1_CMD_DUPLICATEWAKEUPEVENT_SHIFT 18
+#define OMAP4_SDMMC1_CMD_DUPLICATEWAKEUPEVENT_MASK (1 << 18)
+#define OMAP4_SDMMC1_CLK_DUPLICATEWAKEUPEVENT_SHIFT 17
+#define OMAP4_SDMMC1_CLK_DUPLICATEWAKEUPEVENT_MASK (1 << 17)
+#define OMAP4_USBC1_ICUSB_DM_DUPLICATEWAKEUPEVENT_SHIFT 16
+#define OMAP4_USBC1_ICUSB_DM_DUPLICATEWAKEUPEVENT_MASK (1 << 16)
+#define OMAP4_USBC1_ICUSB_DP_DUPLICATEWAKEUPEVENT_SHIFT 15
+#define OMAP4_USBC1_ICUSB_DP_DUPLICATEWAKEUPEVENT_MASK (1 << 15)
+#define OMAP4_USBB1_HSIC_STROBE_DUPLICATEWAKEUPEVENT_SHIFT 14
+#define OMAP4_USBB1_HSIC_STROBE_DUPLICATEWAKEUPEVENT_MASK (1 << 14)
+#define OMAP4_USBB1_HSIC_DATA_DUPLICATEWAKEUPEVENT_SHIFT 13
+#define OMAP4_USBB1_HSIC_DATA_DUPLICATEWAKEUPEVENT_MASK (1 << 13)
+#define OMAP4_USBB1_ULPITLL_DAT7_DUPLICATEWAKEUPEVENT_SHIFT 12
+#define OMAP4_USBB1_ULPITLL_DAT7_DUPLICATEWAKEUPEVENT_MASK (1 << 12)
+#define OMAP4_USBB1_ULPITLL_DAT6_DUPLICATEWAKEUPEVENT_SHIFT 11
+#define OMAP4_USBB1_ULPITLL_DAT6_DUPLICATEWAKEUPEVENT_MASK (1 << 11)
+#define OMAP4_USBB1_ULPITLL_DAT5_DUPLICATEWAKEUPEVENT_SHIFT 10
+#define OMAP4_USBB1_ULPITLL_DAT5_DUPLICATEWAKEUPEVENT_MASK (1 << 10)
+#define OMAP4_USBB1_ULPITLL_DAT4_DUPLICATEWAKEUPEVENT_SHIFT 9
+#define OMAP4_USBB1_ULPITLL_DAT4_DUPLICATEWAKEUPEVENT_MASK (1 << 9)
+#define OMAP4_USBB1_ULPITLL_DAT3_DUPLICATEWAKEUPEVENT_SHIFT 8
+#define OMAP4_USBB1_ULPITLL_DAT3_DUPLICATEWAKEUPEVENT_MASK (1 << 8)
+#define OMAP4_USBB1_ULPITLL_DAT2_DUPLICATEWAKEUPEVENT_SHIFT 7
+#define OMAP4_USBB1_ULPITLL_DAT2_DUPLICATEWAKEUPEVENT_MASK (1 << 7)
+#define OMAP4_USBB1_ULPITLL_DAT1_DUPLICATEWAKEUPEVENT_SHIFT 6
+#define OMAP4_USBB1_ULPITLL_DAT1_DUPLICATEWAKEUPEVENT_MASK (1 << 6)
+#define OMAP4_USBB1_ULPITLL_DAT0_DUPLICATEWAKEUPEVENT_SHIFT 5
+#define OMAP4_USBB1_ULPITLL_DAT0_DUPLICATEWAKEUPEVENT_MASK (1 << 5)
+#define OMAP4_USBB1_ULPITLL_NXT_DUPLICATEWAKEUPEVENT_SHIFT 4
+#define OMAP4_USBB1_ULPITLL_NXT_DUPLICATEWAKEUPEVENT_MASK (1 << 4)
+#define OMAP4_USBB1_ULPITLL_DIR_DUPLICATEWAKEUPEVENT_SHIFT 3
+#define OMAP4_USBB1_ULPITLL_DIR_DUPLICATEWAKEUPEVENT_MASK (1 << 3)
+#define OMAP4_USBB1_ULPITLL_STP_DUPLICATEWAKEUPEVENT_SHIFT 2
+#define OMAP4_USBB1_ULPITLL_STP_DUPLICATEWAKEUPEVENT_MASK (1 << 2)
+#define OMAP4_USBB1_ULPITLL_CLK_DUPLICATEWAKEUPEVENT_SHIFT 1
+#define OMAP4_USBB1_ULPITLL_CLK_DUPLICATEWAKEUPEVENT_MASK (1 << 1)
+#define OMAP4_CAM_GLOBALRESET_DUPLICATEWAKEUPEVENT_SHIFT 0
+#define OMAP4_CAM_GLOBALRESET_DUPLICATEWAKEUPEVENT_MASK (1 << 0)
+
+/* PADCONF_WAKEUPEVENT_3 */
+#define OMAP4_MCSPI1_CS3_DUPLICATEWAKEUPEVENT_SHIFT 31
+#define OMAP4_MCSPI1_CS3_DUPLICATEWAKEUPEVENT_MASK (1 << 31)
+#define OMAP4_MCSPI1_CS2_DUPLICATEWAKEUPEVENT_SHIFT 30
+#define OMAP4_MCSPI1_CS2_DUPLICATEWAKEUPEVENT_MASK (1 << 30)
+#define OMAP4_MCSPI1_CS1_DUPLICATEWAKEUPEVENT_SHIFT 29
+#define OMAP4_MCSPI1_CS1_DUPLICATEWAKEUPEVENT_MASK (1 << 29)
+#define OMAP4_MCSPI1_CS0_DUPLICATEWAKEUPEVENT_SHIFT 28
+#define OMAP4_MCSPI1_CS0_DUPLICATEWAKEUPEVENT_MASK (1 << 28)
+#define OMAP4_MCSPI1_SIMO_DUPLICATEWAKEUPEVENT_SHIFT 27
+#define OMAP4_MCSPI1_SIMO_DUPLICATEWAKEUPEVENT_MASK (1 << 27)
+#define OMAP4_MCSPI1_SOMI_DUPLICATEWAKEUPEVENT_SHIFT 26
+#define OMAP4_MCSPI1_SOMI_DUPLICATEWAKEUPEVENT_MASK (1 << 26)
+#define OMAP4_MCSPI1_CLK_DUPLICATEWAKEUPEVENT_SHIFT 25
+#define OMAP4_MCSPI1_CLK_DUPLICATEWAKEUPEVENT_MASK (1 << 25)
+#define OMAP4_I2C4_SDA_DUPLICATEWAKEUPEVENT_SHIFT 24
+#define OMAP4_I2C4_SDA_DUPLICATEWAKEUPEVENT_MASK (1 << 24)
+#define OMAP4_I2C4_SCL_DUPLICATEWAKEUPEVENT_SHIFT 23
+#define OMAP4_I2C4_SCL_DUPLICATEWAKEUPEVENT_MASK (1 << 23)
+#define OMAP4_I2C3_SDA_DUPLICATEWAKEUPEVENT_SHIFT 22
+#define OMAP4_I2C3_SDA_DUPLICATEWAKEUPEVENT_MASK (1 << 22)
+#define OMAP4_I2C3_SCL_DUPLICATEWAKEUPEVENT_SHIFT 21
+#define OMAP4_I2C3_SCL_DUPLICATEWAKEUPEVENT_MASK (1 << 21)
+#define OMAP4_I2C2_SDA_DUPLICATEWAKEUPEVENT_SHIFT 20
+#define OMAP4_I2C2_SDA_DUPLICATEWAKEUPEVENT_MASK (1 << 20)
+#define OMAP4_I2C2_SCL_DUPLICATEWAKEUPEVENT_SHIFT 19
+#define OMAP4_I2C2_SCL_DUPLICATEWAKEUPEVENT_MASK (1 << 19)
+#define OMAP4_I2C1_SDA_DUPLICATEWAKEUPEVENT_SHIFT 18
+#define OMAP4_I2C1_SDA_DUPLICATEWAKEUPEVENT_MASK (1 << 18)
+#define OMAP4_I2C1_SCL_DUPLICATEWAKEUPEVENT_SHIFT 17
+#define OMAP4_I2C1_SCL_DUPLICATEWAKEUPEVENT_MASK (1 << 17)
+#define OMAP4_HDQ_SIO_DUPLICATEWAKEUPEVENT_SHIFT 16
+#define OMAP4_HDQ_SIO_DUPLICATEWAKEUPEVENT_MASK (1 << 16)
+#define OMAP4_UART2_TX_DUPLICATEWAKEUPEVENT_SHIFT 15
+#define OMAP4_UART2_TX_DUPLICATEWAKEUPEVENT_MASK (1 << 15)
+#define OMAP4_UART2_RX_DUPLICATEWAKEUPEVENT_SHIFT 14
+#define OMAP4_UART2_RX_DUPLICATEWAKEUPEVENT_MASK (1 << 14)
+#define OMAP4_UART2_RTS_DUPLICATEWAKEUPEVENT_SHIFT 13
+#define OMAP4_UART2_RTS_DUPLICATEWAKEUPEVENT_MASK (1 << 13)
+#define OMAP4_UART2_CTS_DUPLICATEWAKEUPEVENT_SHIFT 12
+#define OMAP4_UART2_CTS_DUPLICATEWAKEUPEVENT_MASK (1 << 12)
+#define OMAP4_ABE_DMIC_DIN3_DUPLICATEWAKEUPEVENT_SHIFT 11
+#define OMAP4_ABE_DMIC_DIN3_DUPLICATEWAKEUPEVENT_MASK (1 << 11)
+#define OMAP4_ABE_DMIC_DIN2_DUPLICATEWAKEUPEVENT_SHIFT 10
+#define OMAP4_ABE_DMIC_DIN2_DUPLICATEWAKEUPEVENT_MASK (1 << 10)
+#define OMAP4_ABE_DMIC_DIN1_DUPLICATEWAKEUPEVENT_SHIFT 9
+#define OMAP4_ABE_DMIC_DIN1_DUPLICATEWAKEUPEVENT_MASK (1 << 9)
+#define OMAP4_ABE_DMIC_CLK1_DUPLICATEWAKEUPEVENT_SHIFT 8
+#define OMAP4_ABE_DMIC_CLK1_DUPLICATEWAKEUPEVENT_MASK (1 << 8)
+#define OMAP4_ABE_CLKS_DUPLICATEWAKEUPEVENT_SHIFT 7
+#define OMAP4_ABE_CLKS_DUPLICATEWAKEUPEVENT_MASK (1 << 7)
+#define OMAP4_ABE_PDM_LB_CLK_DUPLICATEWAKEUPEVENT_SHIFT 6
+#define OMAP4_ABE_PDM_LB_CLK_DUPLICATEWAKEUPEVENT_MASK (1 << 6)
+#define OMAP4_ABE_PDM_FRAME_DUPLICATEWAKEUPEVENT_SHIFT 5
+#define OMAP4_ABE_PDM_FRAME_DUPLICATEWAKEUPEVENT_MASK (1 << 5)
+#define OMAP4_ABE_PDM_DL_DATA_DUPLICATEWAKEUPEVENT_SHIFT 4
+#define OMAP4_ABE_PDM_DL_DATA_DUPLICATEWAKEUPEVENT_MASK (1 << 4)
+#define OMAP4_ABE_PDM_UL_DATA_DUPLICATEWAKEUPEVENT_SHIFT 3
+#define OMAP4_ABE_PDM_UL_DATA_DUPLICATEWAKEUPEVENT_MASK (1 << 3)
+#define OMAP4_ABE_MCBSP1_FSX_DUPLICATEWAKEUPEVENT_SHIFT 2
+#define OMAP4_ABE_MCBSP1_FSX_DUPLICATEWAKEUPEVENT_MASK (1 << 2)
+#define OMAP4_ABE_MCBSP1_DX_DUPLICATEWAKEUPEVENT_SHIFT 1
+#define OMAP4_ABE_MCBSP1_DX_DUPLICATEWAKEUPEVENT_MASK (1 << 1)
+#define OMAP4_ABE_MCBSP1_DR_DUPLICATEWAKEUPEVENT_SHIFT 0
+#define OMAP4_ABE_MCBSP1_DR_DUPLICATEWAKEUPEVENT_MASK (1 << 0)
+
+/* PADCONF_WAKEUPEVENT_4 */
+#define OMAP4_UNIPRO_TY0_DUPLICATEWAKEUPEVENT_SHIFT 31
+#define OMAP4_UNIPRO_TY0_DUPLICATEWAKEUPEVENT_MASK (1 << 31)
+#define OMAP4_UNIPRO_TX0_DUPLICATEWAKEUPEVENT_SHIFT 30
+#define OMAP4_UNIPRO_TX0_DUPLICATEWAKEUPEVENT_MASK (1 << 30)
+#define OMAP4_USBB2_HSIC_STROBE_DUPLICATEWAKEUPEVENT_SHIFT 29
+#define OMAP4_USBB2_HSIC_STROBE_DUPLICATEWAKEUPEVENT_MASK (1 << 29)
+#define OMAP4_USBB2_HSIC_DATA_DUPLICATEWAKEUPEVENT_SHIFT 28
+#define OMAP4_USBB2_HSIC_DATA_DUPLICATEWAKEUPEVENT_MASK (1 << 28)
+#define OMAP4_USBB2_ULPITLL_DAT7_DUPLICATEWAKEUPEVENT_SHIFT 27
+#define OMAP4_USBB2_ULPITLL_DAT7_DUPLICATEWAKEUPEVENT_MASK (1 << 27)
+#define OMAP4_USBB2_ULPITLL_DAT6_DUPLICATEWAKEUPEVENT_SHIFT 26
+#define OMAP4_USBB2_ULPITLL_DAT6_DUPLICATEWAKEUPEVENT_MASK (1 << 26)
+#define OMAP4_USBB2_ULPITLL_DAT5_DUPLICATEWAKEUPEVENT_SHIFT 25
+#define OMAP4_USBB2_ULPITLL_DAT5_DUPLICATEWAKEUPEVENT_MASK (1 << 25)
+#define OMAP4_USBB2_ULPITLL_DAT4_DUPLICATEWAKEUPEVENT_SHIFT 24
+#define OMAP4_USBB2_ULPITLL_DAT4_DUPLICATEWAKEUPEVENT_MASK (1 << 24)
+#define OMAP4_USBB2_ULPITLL_DAT3_DUPLICATEWAKEUPEVENT_SHIFT 23
+#define OMAP4_USBB2_ULPITLL_DAT3_DUPLICATEWAKEUPEVENT_MASK (1 << 23)
+#define OMAP4_USBB2_ULPITLL_DAT2_DUPLICATEWAKEUPEVENT_SHIFT 22
+#define OMAP4_USBB2_ULPITLL_DAT2_DUPLICATEWAKEUPEVENT_MASK (1 << 22)
+#define OMAP4_USBB2_ULPITLL_DAT1_DUPLICATEWAKEUPEVENT_SHIFT 21
+#define OMAP4_USBB2_ULPITLL_DAT1_DUPLICATEWAKEUPEVENT_MASK (1 << 21)
+#define OMAP4_USBB2_ULPITLL_DAT0_DUPLICATEWAKEUPEVENT_SHIFT 20
+#define OMAP4_USBB2_ULPITLL_DAT0_DUPLICATEWAKEUPEVENT_MASK (1 << 20)
+#define OMAP4_USBB2_ULPITLL_NXT_DUPLICATEWAKEUPEVENT_SHIFT 19
+#define OMAP4_USBB2_ULPITLL_NXT_DUPLICATEWAKEUPEVENT_MASK (1 << 19)
+#define OMAP4_USBB2_ULPITLL_DIR_DUPLICATEWAKEUPEVENT_SHIFT 18
+#define OMAP4_USBB2_ULPITLL_DIR_DUPLICATEWAKEUPEVENT_MASK (1 << 18)
+#define OMAP4_USBB2_ULPITLL_STP_DUPLICATEWAKEUPEVENT_SHIFT 17
+#define OMAP4_USBB2_ULPITLL_STP_DUPLICATEWAKEUPEVENT_MASK (1 << 17)
+#define OMAP4_USBB2_ULPITLL_CLK_DUPLICATEWAKEUPEVENT_SHIFT 16
+#define OMAP4_USBB2_ULPITLL_CLK_DUPLICATEWAKEUPEVENT_MASK (1 << 16)
+#define OMAP4_UART4_TX_DUPLICATEWAKEUPEVENT_SHIFT 15
+#define OMAP4_UART4_TX_DUPLICATEWAKEUPEVENT_MASK (1 << 15)
+#define OMAP4_UART4_RX_DUPLICATEWAKEUPEVENT_SHIFT 14
+#define OMAP4_UART4_RX_DUPLICATEWAKEUPEVENT_MASK (1 << 14)
+#define OMAP4_MCSPI4_CS0_DUPLICATEWAKEUPEVENT_SHIFT 13
+#define OMAP4_MCSPI4_CS0_DUPLICATEWAKEUPEVENT_MASK (1 << 13)
+#define OMAP4_MCSPI4_SOMI_DUPLICATEWAKEUPEVENT_SHIFT 12
+#define OMAP4_MCSPI4_SOMI_DUPLICATEWAKEUPEVENT_MASK (1 << 12)
+#define OMAP4_MCSPI4_SIMO_DUPLICATEWAKEUPEVENT_SHIFT 11
+#define OMAP4_MCSPI4_SIMO_DUPLICATEWAKEUPEVENT_MASK (1 << 11)
+#define OMAP4_MCSPI4_CLK_DUPLICATEWAKEUPEVENT_SHIFT 10
+#define OMAP4_MCSPI4_CLK_DUPLICATEWAKEUPEVENT_MASK (1 << 10)
+#define OMAP4_SDMMC5_DAT3_DUPLICATEWAKEUPEVENT_SHIFT 9
+#define OMAP4_SDMMC5_DAT3_DUPLICATEWAKEUPEVENT_MASK (1 << 9)
+#define OMAP4_SDMMC5_DAT2_DUPLICATEWAKEUPEVENT_SHIFT 8
+#define OMAP4_SDMMC5_DAT2_DUPLICATEWAKEUPEVENT_MASK (1 << 8)
+#define OMAP4_SDMMC5_DAT1_DUPLICATEWAKEUPEVENT_SHIFT 7
+#define OMAP4_SDMMC5_DAT1_DUPLICATEWAKEUPEVENT_MASK (1 << 7)
+#define OMAP4_SDMMC5_DAT0_DUPLICATEWAKEUPEVENT_SHIFT 6
+#define OMAP4_SDMMC5_DAT0_DUPLICATEWAKEUPEVENT_MASK (1 << 6)
+#define OMAP4_SDMMC5_CMD_DUPLICATEWAKEUPEVENT_SHIFT 5
+#define OMAP4_SDMMC5_CMD_DUPLICATEWAKEUPEVENT_MASK (1 << 5)
+#define OMAP4_SDMMC5_CLK_DUPLICATEWAKEUPEVENT_SHIFT 4
+#define OMAP4_SDMMC5_CLK_DUPLICATEWAKEUPEVENT_MASK (1 << 4)
+#define OMAP4_UART3_TX_IRTX_DUPLICATEWAKEUPEVENT_SHIFT 3
+#define OMAP4_UART3_TX_IRTX_DUPLICATEWAKEUPEVENT_MASK (1 << 3)
+#define OMAP4_UART3_RX_IRRX_DUPLICATEWAKEUPEVENT_SHIFT 2
+#define OMAP4_UART3_RX_IRRX_DUPLICATEWAKEUPEVENT_MASK (1 << 2)
+#define OMAP4_UART3_RTS_SD_DUPLICATEWAKEUPEVENT_SHIFT 1
+#define OMAP4_UART3_RTS_SD_DUPLICATEWAKEUPEVENT_MASK (1 << 1)
+#define OMAP4_UART3_CTS_RCTX_DUPLICATEWAKEUPEVENT_SHIFT 0
+#define OMAP4_UART3_CTS_RCTX_DUPLICATEWAKEUPEVENT_MASK (1 << 0)
+
+/* PADCONF_WAKEUPEVENT_5 */
+#define OMAP4_DPM_EMU11_DUPLICATEWAKEUPEVENT_SHIFT 31
+#define OMAP4_DPM_EMU11_DUPLICATEWAKEUPEVENT_MASK (1 << 31)
+#define OMAP4_DPM_EMU10_DUPLICATEWAKEUPEVENT_SHIFT 30
+#define OMAP4_DPM_EMU10_DUPLICATEWAKEUPEVENT_MASK (1 << 30)
+#define OMAP4_DPM_EMU9_DUPLICATEWAKEUPEVENT_SHIFT 29
+#define OMAP4_DPM_EMU9_DUPLICATEWAKEUPEVENT_MASK (1 << 29)
+#define OMAP4_DPM_EMU8_DUPLICATEWAKEUPEVENT_SHIFT 28
+#define OMAP4_DPM_EMU8_DUPLICATEWAKEUPEVENT_MASK (1 << 28)
+#define OMAP4_DPM_EMU7_DUPLICATEWAKEUPEVENT_SHIFT 27
+#define OMAP4_DPM_EMU7_DUPLICATEWAKEUPEVENT_MASK (1 << 27)
+#define OMAP4_DPM_EMU6_DUPLICATEWAKEUPEVENT_SHIFT 26
+#define OMAP4_DPM_EMU6_DUPLICATEWAKEUPEVENT_MASK (1 << 26)
+#define OMAP4_DPM_EMU5_DUPLICATEWAKEUPEVENT_SHIFT 25
+#define OMAP4_DPM_EMU5_DUPLICATEWAKEUPEVENT_MASK (1 << 25)
+#define OMAP4_DPM_EMU4_DUPLICATEWAKEUPEVENT_SHIFT 24
+#define OMAP4_DPM_EMU4_DUPLICATEWAKEUPEVENT_MASK (1 << 24)
+#define OMAP4_DPM_EMU3_DUPLICATEWAKEUPEVENT_SHIFT 23
+#define OMAP4_DPM_EMU3_DUPLICATEWAKEUPEVENT_MASK (1 << 23)
+#define OMAP4_DPM_EMU2_DUPLICATEWAKEUPEVENT_SHIFT 22
+#define OMAP4_DPM_EMU2_DUPLICATEWAKEUPEVENT_MASK (1 << 22)
+#define OMAP4_DPM_EMU1_DUPLICATEWAKEUPEVENT_SHIFT 21
+#define OMAP4_DPM_EMU1_DUPLICATEWAKEUPEVENT_MASK (1 << 21)
+#define OMAP4_DPM_EMU0_DUPLICATEWAKEUPEVENT_SHIFT 20
+#define OMAP4_DPM_EMU0_DUPLICATEWAKEUPEVENT_MASK (1 << 20)
+#define OMAP4_SYS_BOOT5_DUPLICATEWAKEUPEVENT_SHIFT 19
+#define OMAP4_SYS_BOOT5_DUPLICATEWAKEUPEVENT_MASK (1 << 19)
+#define OMAP4_SYS_BOOT4_DUPLICATEWAKEUPEVENT_SHIFT 18
+#define OMAP4_SYS_BOOT4_DUPLICATEWAKEUPEVENT_MASK (1 << 18)
+#define OMAP4_SYS_BOOT3_DUPLICATEWAKEUPEVENT_SHIFT 17
+#define OMAP4_SYS_BOOT3_DUPLICATEWAKEUPEVENT_MASK (1 << 17)
+#define OMAP4_SYS_BOOT2_DUPLICATEWAKEUPEVENT_SHIFT 16
+#define OMAP4_SYS_BOOT2_DUPLICATEWAKEUPEVENT_MASK (1 << 16)
+#define OMAP4_SYS_BOOT1_DUPLICATEWAKEUPEVENT_SHIFT 15
+#define OMAP4_SYS_BOOT1_DUPLICATEWAKEUPEVENT_MASK (1 << 15)
+#define OMAP4_SYS_BOOT0_DUPLICATEWAKEUPEVENT_SHIFT 14
+#define OMAP4_SYS_BOOT0_DUPLICATEWAKEUPEVENT_MASK (1 << 14)
+#define OMAP4_SYS_NIRQ2_DUPLICATEWAKEUPEVENT_SHIFT 13
+#define OMAP4_SYS_NIRQ2_DUPLICATEWAKEUPEVENT_MASK (1 << 13)
+#define OMAP4_SYS_NIRQ1_DUPLICATEWAKEUPEVENT_SHIFT 12
+#define OMAP4_SYS_NIRQ1_DUPLICATEWAKEUPEVENT_MASK (1 << 12)
+#define OMAP4_FREF_CLK2_OUT_DUPLICATEWAKEUPEVENT_SHIFT 11
+#define OMAP4_FREF_CLK2_OUT_DUPLICATEWAKEUPEVENT_MASK (1 << 11)
+#define OMAP4_FREF_CLK1_OUT_DUPLICATEWAKEUPEVENT_SHIFT 10
+#define OMAP4_FREF_CLK1_OUT_DUPLICATEWAKEUPEVENT_MASK (1 << 10)
+#define OMAP4_UNIPRO_RY2_DUPLICATEWAKEUPEVENT_SHIFT 9
+#define OMAP4_UNIPRO_RY2_DUPLICATEWAKEUPEVENT_MASK (1 << 9)
+#define OMAP4_UNIPRO_RX2_DUPLICATEWAKEUPEVENT_SHIFT 8
+#define OMAP4_UNIPRO_RX2_DUPLICATEWAKEUPEVENT_MASK (1 << 8)
+#define OMAP4_UNIPRO_RY1_DUPLICATEWAKEUPEVENT_SHIFT 7
+#define OMAP4_UNIPRO_RY1_DUPLICATEWAKEUPEVENT_MASK (1 << 7)
+#define OMAP4_UNIPRO_RX1_DUPLICATEWAKEUPEVENT_SHIFT 6
+#define OMAP4_UNIPRO_RX1_DUPLICATEWAKEUPEVENT_MASK (1 << 6)
+#define OMAP4_UNIPRO_RY0_DUPLICATEWAKEUPEVENT_SHIFT 5
+#define OMAP4_UNIPRO_RY0_DUPLICATEWAKEUPEVENT_MASK (1 << 5)
+#define OMAP4_UNIPRO_RX0_DUPLICATEWAKEUPEVENT_SHIFT 4
+#define OMAP4_UNIPRO_RX0_DUPLICATEWAKEUPEVENT_MASK (1 << 4)
+#define OMAP4_UNIPRO_TY2_DUPLICATEWAKEUPEVENT_SHIFT 3
+#define OMAP4_UNIPRO_TY2_DUPLICATEWAKEUPEVENT_MASK (1 << 3)
+#define OMAP4_UNIPRO_TX2_DUPLICATEWAKEUPEVENT_SHIFT 2
+#define OMAP4_UNIPRO_TX2_DUPLICATEWAKEUPEVENT_MASK (1 << 2)
+#define OMAP4_UNIPRO_TY1_DUPLICATEWAKEUPEVENT_SHIFT 1
+#define OMAP4_UNIPRO_TY1_DUPLICATEWAKEUPEVENT_MASK (1 << 1)
+#define OMAP4_UNIPRO_TX1_DUPLICATEWAKEUPEVENT_SHIFT 0
+#define OMAP4_UNIPRO_TX1_DUPLICATEWAKEUPEVENT_MASK (1 << 0)
+
+/* PADCONF_WAKEUPEVENT_6 */
+#define OMAP4_DPM_EMU19_DUPLICATEWAKEUPEVENT_SHIFT 7
+#define OMAP4_DPM_EMU19_DUPLICATEWAKEUPEVENT_MASK (1 << 7)
+#define OMAP4_DPM_EMU18_DUPLICATEWAKEUPEVENT_SHIFT 6
+#define OMAP4_DPM_EMU18_DUPLICATEWAKEUPEVENT_MASK (1 << 6)
+#define OMAP4_DPM_EMU17_DUPLICATEWAKEUPEVENT_SHIFT 5
+#define OMAP4_DPM_EMU17_DUPLICATEWAKEUPEVENT_MASK (1 << 5)
+#define OMAP4_DPM_EMU16_DUPLICATEWAKEUPEVENT_SHIFT 4
+#define OMAP4_DPM_EMU16_DUPLICATEWAKEUPEVENT_MASK (1 << 4)
+#define OMAP4_DPM_EMU15_DUPLICATEWAKEUPEVENT_SHIFT 3
+#define OMAP4_DPM_EMU15_DUPLICATEWAKEUPEVENT_MASK (1 << 3)
+#define OMAP4_DPM_EMU14_DUPLICATEWAKEUPEVENT_SHIFT 2
+#define OMAP4_DPM_EMU14_DUPLICATEWAKEUPEVENT_MASK (1 << 2)
+#define OMAP4_DPM_EMU13_DUPLICATEWAKEUPEVENT_SHIFT 1
+#define OMAP4_DPM_EMU13_DUPLICATEWAKEUPEVENT_MASK (1 << 1)
+#define OMAP4_DPM_EMU12_DUPLICATEWAKEUPEVENT_SHIFT 0
+#define OMAP4_DPM_EMU12_DUPLICATEWAKEUPEVENT_MASK (1 << 0)
+
+/* CONTROL_PADCONF_GLOBAL */
+#define OMAP4_FORCE_OFFMODE_EN_SHIFT 31
+#define OMAP4_FORCE_OFFMODE_EN_MASK (1 << 31)
+
+/* CONTROL_PADCONF_MODE */
+#define OMAP4_VDDS_DV_BANK0_SHIFT 31
+#define OMAP4_VDDS_DV_BANK0_MASK (1 << 31)
+#define OMAP4_VDDS_DV_BANK1_SHIFT 30
+#define OMAP4_VDDS_DV_BANK1_MASK (1 << 30)
+#define OMAP4_VDDS_DV_BANK3_SHIFT 29
+#define OMAP4_VDDS_DV_BANK3_MASK (1 << 29)
+#define OMAP4_VDDS_DV_BANK4_SHIFT 28
+#define OMAP4_VDDS_DV_BANK4_MASK (1 << 28)
+#define OMAP4_VDDS_DV_BANK5_SHIFT 27
+#define OMAP4_VDDS_DV_BANK5_MASK (1 << 27)
+#define OMAP4_VDDS_DV_BANK6_SHIFT 26
+#define OMAP4_VDDS_DV_BANK6_MASK (1 << 26)
+#define OMAP4_VDDS_DV_C2C_SHIFT 25
+#define OMAP4_VDDS_DV_C2C_MASK (1 << 25)
+#define OMAP4_VDDS_DV_CAM_SHIFT 24
+#define OMAP4_VDDS_DV_CAM_MASK (1 << 24)
+#define OMAP4_VDDS_DV_GPMC_SHIFT 23
+#define OMAP4_VDDS_DV_GPMC_MASK (1 << 23)
+#define OMAP4_VDDS_DV_SDMMC2_SHIFT 22
+#define OMAP4_VDDS_DV_SDMMC2_MASK (1 << 22)
+
+/* CONTROL_SMART1IO_PADCONF_0 */
+#define OMAP4_ABE_DR0_SC_SHIFT 30
+#define OMAP4_ABE_DR0_SC_MASK (0x3 << 30)
+#define OMAP4_CAM_DR0_SC_SHIFT 28
+#define OMAP4_CAM_DR0_SC_MASK (0x3 << 28)
+#define OMAP4_FREF_DR2_SC_SHIFT 26
+#define OMAP4_FREF_DR2_SC_MASK (0x3 << 26)
+#define OMAP4_FREF_DR3_SC_SHIFT 24
+#define OMAP4_FREF_DR3_SC_MASK (0x3 << 24)
+#define OMAP4_GPIO_DR8_SC_SHIFT 22
+#define OMAP4_GPIO_DR8_SC_MASK (0x3 << 22)
+#define OMAP4_GPIO_DR9_SC_SHIFT 20
+#define OMAP4_GPIO_DR9_SC_MASK (0x3 << 20)
+#define OMAP4_GPMC_DR2_SC_SHIFT 18
+#define OMAP4_GPMC_DR2_SC_MASK (0x3 << 18)
+#define OMAP4_GPMC_DR3_SC_SHIFT 16
+#define OMAP4_GPMC_DR3_SC_MASK (0x3 << 16)
+#define OMAP4_GPMC_DR6_SC_SHIFT 14
+#define OMAP4_GPMC_DR6_SC_MASK (0x3 << 14)
+#define OMAP4_HDMI_DR0_SC_SHIFT 12
+#define OMAP4_HDMI_DR0_SC_MASK (0x3 << 12)
+#define OMAP4_MCSPI1_DR0_SC_SHIFT 10
+#define OMAP4_MCSPI1_DR0_SC_MASK (0x3 << 10)
+#define OMAP4_UART1_DR0_SC_SHIFT 8
+#define OMAP4_UART1_DR0_SC_MASK (0x3 << 8)
+#define OMAP4_UART3_DR0_SC_SHIFT 6
+#define OMAP4_UART3_DR0_SC_MASK (0x3 << 6)
+#define OMAP4_UART3_DR1_SC_SHIFT 4
+#define OMAP4_UART3_DR1_SC_MASK (0x3 << 4)
+#define OMAP4_UNIPRO_DR0_SC_SHIFT 2
+#define OMAP4_UNIPRO_DR0_SC_MASK (0x3 << 2)
+#define OMAP4_UNIPRO_DR1_SC_SHIFT 0
+#define OMAP4_UNIPRO_DR1_SC_MASK (0x3 << 0)
+
+/* CONTROL_SMART1IO_PADCONF_1 */
+#define OMAP4_ABE_DR0_LB_SHIFT 30
+#define OMAP4_ABE_DR0_LB_MASK (0x3 << 30)
+#define OMAP4_CAM_DR0_LB_SHIFT 28
+#define OMAP4_CAM_DR0_LB_MASK (0x3 << 28)
+#define OMAP4_FREF_DR2_LB_SHIFT 26
+#define OMAP4_FREF_DR2_LB_MASK (0x3 << 26)
+#define OMAP4_FREF_DR3_LB_SHIFT 24
+#define OMAP4_FREF_DR3_LB_MASK (0x3 << 24)
+#define OMAP4_GPIO_DR8_LB_SHIFT 22
+#define OMAP4_GPIO_DR8_LB_MASK (0x3 << 22)
+#define OMAP4_GPIO_DR9_LB_SHIFT 20
+#define OMAP4_GPIO_DR9_LB_MASK (0x3 << 20)
+#define OMAP4_GPMC_DR2_LB_SHIFT 18
+#define OMAP4_GPMC_DR2_LB_MASK (0x3 << 18)
+#define OMAP4_GPMC_DR3_LB_SHIFT 16
+#define OMAP4_GPMC_DR3_LB_MASK (0x3 << 16)
+#define OMAP4_GPMC_DR6_LB_SHIFT 14
+#define OMAP4_GPMC_DR6_LB_MASK (0x3 << 14)
+#define OMAP4_HDMI_DR0_LB_SHIFT 12
+#define OMAP4_HDMI_DR0_LB_MASK (0x3 << 12)
+#define OMAP4_MCSPI1_DR0_LB_SHIFT 10
+#define OMAP4_MCSPI1_DR0_LB_MASK (0x3 << 10)
+#define OMAP4_UART1_DR0_LB_SHIFT 8
+#define OMAP4_UART1_DR0_LB_MASK (0x3 << 8)
+#define OMAP4_UART3_DR0_LB_SHIFT 6
+#define OMAP4_UART3_DR0_LB_MASK (0x3 << 6)
+#define OMAP4_UART3_DR1_LB_SHIFT 4
+#define OMAP4_UART3_DR1_LB_MASK (0x3 << 4)
+#define OMAP4_UNIPRO_DR0_LB_SHIFT 2
+#define OMAP4_UNIPRO_DR0_LB_MASK (0x3 << 2)
+#define OMAP4_UNIPRO_DR1_LB_SHIFT 0
+#define OMAP4_UNIPRO_DR1_LB_MASK (0x3 << 0)
+
+/* CONTROL_SMART2IO_PADCONF_0 */
+#define OMAP4_C2C_DR0_LB_SHIFT 31
+#define OMAP4_C2C_DR0_LB_MASK (1 << 31)
+#define OMAP4_DPM_DR1_LB_SHIFT 30
+#define OMAP4_DPM_DR1_LB_MASK (1 << 30)
+#define OMAP4_DPM_DR2_LB_SHIFT 29
+#define OMAP4_DPM_DR2_LB_MASK (1 << 29)
+#define OMAP4_DPM_DR3_LB_SHIFT 28
+#define OMAP4_DPM_DR3_LB_MASK (1 << 28)
+#define OMAP4_GPIO_DR0_LB_SHIFT 27
+#define OMAP4_GPIO_DR0_LB_MASK (1 << 27)
+#define OMAP4_GPIO_DR1_LB_SHIFT 26
+#define OMAP4_GPIO_DR1_LB_MASK (1 << 26)
+#define OMAP4_GPIO_DR10_LB_SHIFT 25
+#define OMAP4_GPIO_DR10_LB_MASK (1 << 25)
+#define OMAP4_GPIO_DR2_LB_SHIFT 24
+#define OMAP4_GPIO_DR2_LB_MASK (1 << 24)
+#define OMAP4_GPMC_DR0_LB_SHIFT 23
+#define OMAP4_GPMC_DR0_LB_MASK (1 << 23)
+#define OMAP4_GPMC_DR1_LB_SHIFT 22
+#define OMAP4_GPMC_DR1_LB_MASK (1 << 22)
+#define OMAP4_GPMC_DR4_LB_SHIFT 21
+#define OMAP4_GPMC_DR4_LB_MASK (1 << 21)
+#define OMAP4_GPMC_DR5_LB_SHIFT 20
+#define OMAP4_GPMC_DR5_LB_MASK (1 << 20)
+#define OMAP4_GPMC_DR7_LB_SHIFT 19
+#define OMAP4_GPMC_DR7_LB_MASK (1 << 19)
+#define OMAP4_HSI2_DR0_LB_SHIFT 18
+#define OMAP4_HSI2_DR0_LB_MASK (1 << 18)
+#define OMAP4_HSI2_DR1_LB_SHIFT 17
+#define OMAP4_HSI2_DR1_LB_MASK (1 << 17)
+#define OMAP4_HSI2_DR2_LB_SHIFT 16
+#define OMAP4_HSI2_DR2_LB_MASK (1 << 16)
+#define OMAP4_KPD_DR0_LB_SHIFT 15
+#define OMAP4_KPD_DR0_LB_MASK (1 << 15)
+#define OMAP4_KPD_DR1_LB_SHIFT 14
+#define OMAP4_KPD_DR1_LB_MASK (1 << 14)
+#define OMAP4_PDM_DR0_LB_SHIFT 13
+#define OMAP4_PDM_DR0_LB_MASK (1 << 13)
+#define OMAP4_SDMMC2_DR0_LB_SHIFT 12
+#define OMAP4_SDMMC2_DR0_LB_MASK (1 << 12)
+#define OMAP4_SDMMC3_DR0_LB_SHIFT 11
+#define OMAP4_SDMMC3_DR0_LB_MASK (1 << 11)
+#define OMAP4_SDMMC4_DR0_LB_SHIFT 10
+#define OMAP4_SDMMC4_DR0_LB_MASK (1 << 10)
+#define OMAP4_SDMMC4_DR1_LB_SHIFT 9
+#define OMAP4_SDMMC4_DR1_LB_MASK (1 << 9)
+#define OMAP4_SPI3_DR0_LB_SHIFT 8
+#define OMAP4_SPI3_DR0_LB_MASK (1 << 8)
+#define OMAP4_SPI3_DR1_LB_SHIFT 7
+#define OMAP4_SPI3_DR1_LB_MASK (1 << 7)
+#define OMAP4_UART3_DR2_LB_SHIFT 6
+#define OMAP4_UART3_DR2_LB_MASK (1 << 6)
+#define OMAP4_UART3_DR3_LB_SHIFT 5
+#define OMAP4_UART3_DR3_LB_MASK (1 << 5)
+#define OMAP4_UART3_DR4_LB_SHIFT 4
+#define OMAP4_UART3_DR4_LB_MASK (1 << 4)
+#define OMAP4_UART3_DR5_LB_SHIFT 3
+#define OMAP4_UART3_DR5_LB_MASK (1 << 3)
+#define OMAP4_USBA0_DR1_LB_SHIFT 2
+#define OMAP4_USBA0_DR1_LB_MASK (1 << 2)
+#define OMAP4_USBA_DR2_LB_SHIFT 1
+#define OMAP4_USBA_DR2_LB_MASK (1 << 1)
+
+/* CONTROL_SMART2IO_PADCONF_1 */
+#define OMAP4_USBB1_DR0_LB_SHIFT 31
+#define OMAP4_USBB1_DR0_LB_MASK (1 << 31)
+#define OMAP4_USBB2_DR0_LB_SHIFT 30
+#define OMAP4_USBB2_DR0_LB_MASK (1 << 30)
+#define OMAP4_USBA0_DR0_LB_SHIFT 29
+#define OMAP4_USBA0_DR0_LB_MASK (1 << 29)
+
+/* CONTROL_SMART3IO_PADCONF_0 */
+#define OMAP4_DMIC_DR0_MB_SHIFT 30
+#define OMAP4_DMIC_DR0_MB_MASK (0x3 << 30)
+#define OMAP4_GPIO_DR3_MB_SHIFT 28
+#define OMAP4_GPIO_DR3_MB_MASK (0x3 << 28)
+#define OMAP4_GPIO_DR4_MB_SHIFT 26
+#define OMAP4_GPIO_DR4_MB_MASK (0x3 << 26)
+#define OMAP4_GPIO_DR5_MB_SHIFT 24
+#define OMAP4_GPIO_DR5_MB_MASK (0x3 << 24)
+#define OMAP4_GPIO_DR6_MB_SHIFT 22
+#define OMAP4_GPIO_DR6_MB_MASK (0x3 << 22)
+#define OMAP4_HSI_DR1_MB_SHIFT 20
+#define OMAP4_HSI_DR1_MB_MASK (0x3 << 20)
+#define OMAP4_HSI_DR2_MB_SHIFT 18
+#define OMAP4_HSI_DR2_MB_MASK (0x3 << 18)
+#define OMAP4_HSI_DR3_MB_SHIFT 16
+#define OMAP4_HSI_DR3_MB_MASK (0x3 << 16)
+#define OMAP4_MCBSP2_DR0_MB_SHIFT 14
+#define OMAP4_MCBSP2_DR0_MB_MASK (0x3 << 14)
+#define OMAP4_MCSPI4_DR0_MB_SHIFT 12
+#define OMAP4_MCSPI4_DR0_MB_MASK (0x3 << 12)
+#define OMAP4_MCSPI4_DR1_MB_SHIFT 10
+#define OMAP4_MCSPI4_DR1_MB_MASK (0x3 << 10)
+#define OMAP4_SDMMC3_DR0_MB_SHIFT 8
+#define OMAP4_SDMMC3_DR0_MB_MASK (0x3 << 8)
+#define OMAP4_SPI2_DR0_MB_SHIFT 0
+#define OMAP4_SPI2_DR0_MB_MASK (0x3 << 0)
+
+/* CONTROL_SMART3IO_PADCONF_1 */
+#define OMAP4_SPI2_DR1_MB_SHIFT 30
+#define OMAP4_SPI2_DR1_MB_MASK (0x3 << 30)
+#define OMAP4_SPI2_DR2_MB_SHIFT 28
+#define OMAP4_SPI2_DR2_MB_MASK (0x3 << 28)
+#define OMAP4_UART2_DR0_MB_SHIFT 26
+#define OMAP4_UART2_DR0_MB_MASK (0x3 << 26)
+#define OMAP4_UART2_DR1_MB_SHIFT 24
+#define OMAP4_UART2_DR1_MB_MASK (0x3 << 24)
+#define OMAP4_UART4_DR0_MB_SHIFT 22
+#define OMAP4_UART4_DR0_MB_MASK (0x3 << 22)
+#define OMAP4_HSI_DR0_MB_SHIFT 20
+#define OMAP4_HSI_DR0_MB_MASK (0x3 << 20)
+
+/* CONTROL_SMART3IO_PADCONF_2 */
+#define OMAP4_DMIC_DR0_LB_SHIFT 31
+#define OMAP4_DMIC_DR0_LB_MASK (1 << 31)
+#define OMAP4_GPIO_DR3_LB_SHIFT 30
+#define OMAP4_GPIO_DR3_LB_MASK (1 << 30)
+#define OMAP4_GPIO_DR4_LB_SHIFT 29
+#define OMAP4_GPIO_DR4_LB_MASK (1 << 29)
+#define OMAP4_GPIO_DR5_LB_SHIFT 28
+#define OMAP4_GPIO_DR5_LB_MASK (1 << 28)
+#define OMAP4_GPIO_DR6_LB_SHIFT 27
+#define OMAP4_GPIO_DR6_LB_MASK (1 << 27)
+#define OMAP4_HSI_DR1_LB_SHIFT 26
+#define OMAP4_HSI_DR1_LB_MASK (1 << 26)
+#define OMAP4_HSI_DR2_LB_SHIFT 25
+#define OMAP4_HSI_DR2_LB_MASK (1 << 25)
+#define OMAP4_HSI_DR3_LB_SHIFT 24
+#define OMAP4_HSI_DR3_LB_MASK (1 << 24)
+#define OMAP4_MCBSP2_DR0_LB_SHIFT 23
+#define OMAP4_MCBSP2_DR0_LB_MASK (1 << 23)
+#define OMAP4_MCSPI4_DR0_LB_SHIFT 22
+#define OMAP4_MCSPI4_DR0_LB_MASK (1 << 22)
+#define OMAP4_MCSPI4_DR1_LB_SHIFT 21
+#define OMAP4_MCSPI4_DR1_LB_MASK (1 << 21)
+#define OMAP4_SLIMBUS2_DR0_LB_SHIFT 18
+#define OMAP4_SLIMBUS2_DR0_LB_MASK (1 << 18)
+#define OMAP4_SPI2_DR0_LB_SHIFT 16
+#define OMAP4_SPI2_DR0_LB_MASK (1 << 16)
+#define OMAP4_SPI2_DR1_LB_SHIFT 15
+#define OMAP4_SPI2_DR1_LB_MASK (1 << 15)
+#define OMAP4_SPI2_DR2_LB_SHIFT 14
+#define OMAP4_SPI2_DR2_LB_MASK (1 << 14)
+#define OMAP4_UART2_DR0_LB_SHIFT 13
+#define OMAP4_UART2_DR0_LB_MASK (1 << 13)
+#define OMAP4_UART2_DR1_LB_SHIFT 12
+#define OMAP4_UART2_DR1_LB_MASK (1 << 12)
+#define OMAP4_UART4_DR0_LB_SHIFT 11
+#define OMAP4_UART4_DR0_LB_MASK (1 << 11)
+#define OMAP4_HSI_DR0_LB_SHIFT 10
+#define OMAP4_HSI_DR0_LB_MASK (1 << 10)
+
+/* CONTROL_USBB_HSIC */
+#define OMAP4_USBB2_DR1_SR_SHIFT 30
+#define OMAP4_USBB2_DR1_SR_MASK (0x3 << 30)
+#define OMAP4_USBB2_DR1_I_SHIFT 27
+#define OMAP4_USBB2_DR1_I_MASK (0x7 << 27)
+#define OMAP4_USBB1_DR1_SR_SHIFT 25
+#define OMAP4_USBB1_DR1_SR_MASK (0x3 << 25)
+#define OMAP4_USBB1_DR1_I_SHIFT 22
+#define OMAP4_USBB1_DR1_I_MASK (0x7 << 22)
+#define OMAP4_USBB1_HSIC_DATA_WD_SHIFT 20
+#define OMAP4_USBB1_HSIC_DATA_WD_MASK (0x3 << 20)
+#define OMAP4_USBB1_HSIC_STROBE_WD_SHIFT 18
+#define OMAP4_USBB1_HSIC_STROBE_WD_MASK (0x3 << 18)
+#define OMAP4_USBB2_HSIC_DATA_WD_SHIFT 16
+#define OMAP4_USBB2_HSIC_DATA_WD_MASK (0x3 << 16)
+#define OMAP4_USBB2_HSIC_STROBE_WD_SHIFT 14
+#define OMAP4_USBB2_HSIC_STROBE_WD_MASK (0x3 << 14)
+#define OMAP4_USBB1_HSIC_DATA_OFFMODE_WD_ENABLE_SHIFT 13
+#define OMAP4_USBB1_HSIC_DATA_OFFMODE_WD_ENABLE_MASK (1 << 13)
+#define OMAP4_USBB1_HSIC_DATA_OFFMODE_WD_SHIFT 11
+#define OMAP4_USBB1_HSIC_DATA_OFFMODE_WD_MASK (0x3 << 11)
+#define OMAP4_USBB1_HSIC_STROBE_OFFMODE_WD_ENABLE_SHIFT 10
+#define OMAP4_USBB1_HSIC_STROBE_OFFMODE_WD_ENABLE_MASK (1 << 10)
+#define OMAP4_USBB1_HSIC_STROBE_OFFMODE_WD_SHIFT 8
+#define OMAP4_USBB1_HSIC_STROBE_OFFMODE_WD_MASK (0x3 << 8)
+#define OMAP4_USBB2_HSIC_DATA_OFFMODE_WD_ENABLE_SHIFT 7
+#define OMAP4_USBB2_HSIC_DATA_OFFMODE_WD_ENABLE_MASK (1 << 7)
+#define OMAP4_USBB2_HSIC_DATA_OFFMODE_WD_SHIFT 5
+#define OMAP4_USBB2_HSIC_DATA_OFFMODE_WD_MASK (0x3 << 5)
+#define OMAP4_USBB2_HSIC_STROBE_OFFMODE_WD_ENABLE_SHIFT 4
+#define OMAP4_USBB2_HSIC_STROBE_OFFMODE_WD_ENABLE_MASK (1 << 4)
+#define OMAP4_USBB2_HSIC_STROBE_OFFMODE_WD_SHIFT 2
+#define OMAP4_USBB2_HSIC_STROBE_OFFMODE_WD_MASK (0x3 << 2)
+
+/* CONTROL_SLIMBUS */
+#define OMAP4_SLIMBUS1_DR0_MB_SHIFT 30
+#define OMAP4_SLIMBUS1_DR0_MB_MASK (0x3 << 30)
+#define OMAP4_SLIMBUS1_DR1_MB_SHIFT 28
+#define OMAP4_SLIMBUS1_DR1_MB_MASK (0x3 << 28)
+#define OMAP4_SLIMBUS2_DR0_MB_SHIFT 26
+#define OMAP4_SLIMBUS2_DR0_MB_MASK (0x3 << 26)
+#define OMAP4_SLIMBUS2_DR1_MB_SHIFT 24
+#define OMAP4_SLIMBUS2_DR1_MB_MASK (0x3 << 24)
+#define OMAP4_SLIMBUS2_DR2_MB_SHIFT 22
+#define OMAP4_SLIMBUS2_DR2_MB_MASK (0x3 << 22)
+#define OMAP4_SLIMBUS2_DR3_MB_SHIFT 20
+#define OMAP4_SLIMBUS2_DR3_MB_MASK (0x3 << 20)
+#define OMAP4_SLIMBUS1_DR0_LB_SHIFT 19
+#define OMAP4_SLIMBUS1_DR0_LB_MASK (1 << 19)
+#define OMAP4_SLIMBUS2_DR1_LB_SHIFT 18
+#define OMAP4_SLIMBUS2_DR1_LB_MASK (1 << 18)
+
+/* CONTROL_PBIASLITE */
+#define OMAP4_USIM_PBIASLITE_HIZ_MODE_SHIFT 31
+#define OMAP4_USIM_PBIASLITE_HIZ_MODE_MASK (1 << 31)
+#define OMAP4_USIM_PBIASLITE_SUPPLY_HI_OUT_SHIFT 30
+#define OMAP4_USIM_PBIASLITE_SUPPLY_HI_OUT_MASK (1 << 30)
+#define OMAP4_USIM_PBIASLITE_VMODE_ERROR_SHIFT 29
+#define OMAP4_USIM_PBIASLITE_VMODE_ERROR_MASK (1 << 29)
+#define OMAP4_USIM_PBIASLITE_PWRDNZ_SHIFT 28
+#define OMAP4_USIM_PBIASLITE_PWRDNZ_MASK (1 << 28)
+#define OMAP4_USIM_PBIASLITE_VMODE_SHIFT 27
+#define OMAP4_USIM_PBIASLITE_VMODE_MASK (1 << 27)
+#define OMAP4_MMC1_PWRDNZ_SHIFT 26
+#define OMAP4_MMC1_PWRDNZ_MASK (1 << 26)
+#define OMAP4_MMC1_PBIASLITE_HIZ_MODE_SHIFT 25
+#define OMAP4_MMC1_PBIASLITE_HIZ_MODE_MASK (1 << 25)
+#define OMAP4_MMC1_PBIASLITE_SUPPLY_HI_OUT_SHIFT 24
+#define OMAP4_MMC1_PBIASLITE_SUPPLY_HI_OUT_MASK (1 << 24)
+#define OMAP4_MMC1_PBIASLITE_VMODE_ERROR_SHIFT 23
+#define OMAP4_MMC1_PBIASLITE_VMODE_ERROR_MASK (1 << 23)
+#define OMAP4_MMC1_PBIASLITE_PWRDNZ_SHIFT 22
+#define OMAP4_MMC1_PBIASLITE_PWRDNZ_MASK (1 << 22)
+#define OMAP4_MMC1_PBIASLITE_VMODE_SHIFT 21
+#define OMAP4_MMC1_PBIASLITE_VMODE_MASK (1 << 21)
+#define OMAP4_USBC1_ICUSB_PWRDNZ_SHIFT 20
+#define OMAP4_USBC1_ICUSB_PWRDNZ_MASK (1 << 20)
+
+/* CONTROL_I2C_0 */
+#define OMAP4_I2C4_SDA_GLFENB_SHIFT 31
+#define OMAP4_I2C4_SDA_GLFENB_MASK (1 << 31)
+#define OMAP4_I2C4_SDA_LOAD_BITS_SHIFT 29
+#define OMAP4_I2C4_SDA_LOAD_BITS_MASK (0x3 << 29)
+#define OMAP4_I2C4_SDA_PULLUPRESX_SHIFT 28
+#define OMAP4_I2C4_SDA_PULLUPRESX_MASK (1 << 28)
+#define OMAP4_I2C3_SDA_GLFENB_SHIFT 27
+#define OMAP4_I2C3_SDA_GLFENB_MASK (1 << 27)
+#define OMAP4_I2C3_SDA_LOAD_BITS_SHIFT 25
+#define OMAP4_I2C3_SDA_LOAD_BITS_MASK (0x3 << 25)
+#define OMAP4_I2C3_SDA_PULLUPRESX_SHIFT 24
+#define OMAP4_I2C3_SDA_PULLUPRESX_MASK (1 << 24)
+#define OMAP4_I2C2_SDA_GLFENB_SHIFT 23
+#define OMAP4_I2C2_SDA_GLFENB_MASK (1 << 23)
+#define OMAP4_I2C2_SDA_LOAD_BITS_SHIFT 21
+#define OMAP4_I2C2_SDA_LOAD_BITS_MASK (0x3 << 21)
+#define OMAP4_I2C2_SDA_PULLUPRESX_SHIFT 20
+#define OMAP4_I2C2_SDA_PULLUPRESX_MASK (1 << 20)
+#define OMAP4_I2C1_SDA_GLFENB_SHIFT 19
+#define OMAP4_I2C1_SDA_GLFENB_MASK (1 << 19)
+#define OMAP4_I2C1_SDA_LOAD_BITS_SHIFT 17
+#define OMAP4_I2C1_SDA_LOAD_BITS_MASK (0x3 << 17)
+#define OMAP4_I2C1_SDA_PULLUPRESX_SHIFT 16
+#define OMAP4_I2C1_SDA_PULLUPRESX_MASK (1 << 16)
+#define OMAP4_I2C4_SCL_GLFENB_SHIFT 15
+#define OMAP4_I2C4_SCL_GLFENB_MASK (1 << 15)
+#define OMAP4_I2C4_SCL_LOAD_BITS_SHIFT 13
+#define OMAP4_I2C4_SCL_LOAD_BITS_MASK (0x3 << 13)
+#define OMAP4_I2C4_SCL_PULLUPRESX_SHIFT 12
+#define OMAP4_I2C4_SCL_PULLUPRESX_MASK (1 << 12)
+#define OMAP4_I2C3_SCL_GLFENB_SHIFT 11
+#define OMAP4_I2C3_SCL_GLFENB_MASK (1 << 11)
+#define OMAP4_I2C3_SCL_LOAD_BITS_SHIFT 9
+#define OMAP4_I2C3_SCL_LOAD_BITS_MASK (0x3 << 9)
+#define OMAP4_I2C3_SCL_PULLUPRESX_SHIFT 8
+#define OMAP4_I2C3_SCL_PULLUPRESX_MASK (1 << 8)
+#define OMAP4_I2C2_SCL_GLFENB_SHIFT 7
+#define OMAP4_I2C2_SCL_GLFENB_MASK (1 << 7)
+#define OMAP4_I2C2_SCL_LOAD_BITS_SHIFT 5
+#define OMAP4_I2C2_SCL_LOAD_BITS_MASK (0x3 << 5)
+#define OMAP4_I2C2_SCL_PULLUPRESX_SHIFT 4
+#define OMAP4_I2C2_SCL_PULLUPRESX_MASK (1 << 4)
+#define OMAP4_I2C1_SCL_GLFENB_SHIFT 3
+#define OMAP4_I2C1_SCL_GLFENB_MASK (1 << 3)
+#define OMAP4_I2C1_SCL_LOAD_BITS_SHIFT 1
+#define OMAP4_I2C1_SCL_LOAD_BITS_MASK (0x3 << 1)
+#define OMAP4_I2C1_SCL_PULLUPRESX_SHIFT 0
+#define OMAP4_I2C1_SCL_PULLUPRESX_MASK (1 << 0)
+
+/* CONTROL_CAMERA_RX */
+#define OMAP4_CAMERARX_UNIPRO_CTRLCLKEN_SHIFT 31
+#define OMAP4_CAMERARX_UNIPRO_CTRLCLKEN_MASK (1 << 31)
+#define OMAP4_CAMERARX_CSI22_LANEENABLE_SHIFT 29
+#define OMAP4_CAMERARX_CSI22_LANEENABLE_MASK (0x3 << 29)
+#define OMAP4_CAMERARX_CSI21_LANEENABLE_SHIFT 24
+#define OMAP4_CAMERARX_CSI21_LANEENABLE_MASK (0x1f << 24)
+#define OMAP4_CAMERARX_UNIPRO_CAMMODE_SHIFT 22
+#define OMAP4_CAMERARX_UNIPRO_CAMMODE_MASK (0x3 << 22)
+#define OMAP4_CAMERARX_CSI22_CTRLCLKEN_SHIFT 21
+#define OMAP4_CAMERARX_CSI22_CTRLCLKEN_MASK (1 << 21)
+#define OMAP4_CAMERARX_CSI22_CAMMODE_SHIFT 19
+#define OMAP4_CAMERARX_CSI22_CAMMODE_MASK (0x3 << 19)
+#define OMAP4_CAMERARX_CSI21_CTRLCLKEN_SHIFT 18
+#define OMAP4_CAMERARX_CSI21_CTRLCLKEN_MASK (1 << 18)
+#define OMAP4_CAMERARX_CSI21_CAMMODE_SHIFT 16
+#define OMAP4_CAMERARX_CSI21_CAMMODE_MASK (0x3 << 16)
+
+/* CONTROL_AVDAC */
+#define OMAP4_AVDAC_ACEN_SHIFT 31
+#define OMAP4_AVDAC_ACEN_MASK (1 << 31)
+#define OMAP4_AVDAC_TVOUTBYPASS_SHIFT 30
+#define OMAP4_AVDAC_TVOUTBYPASS_MASK (1 << 30)
+#define OMAP4_AVDAC_INPUTINV_SHIFT 29
+#define OMAP4_AVDAC_INPUTINV_MASK (1 << 29)
+#define OMAP4_AVDAC_CTL_SHIFT 13
+#define OMAP4_AVDAC_CTL_MASK (0xffff << 13)
+#define OMAP4_AVDAC_CTL_WR_ACK_SHIFT 12
+#define OMAP4_AVDAC_CTL_WR_ACK_MASK (1 << 12)
+
+/* CONTROL_HDMI_TX_PHY */
+#define OMAP4_HDMITXPHY_PADORDER_SHIFT 31
+#define OMAP4_HDMITXPHY_PADORDER_MASK (1 << 31)
+#define OMAP4_HDMITXPHY_TXVALID_SHIFT 30
+#define OMAP4_HDMITXPHY_TXVALID_MASK (1 << 30)
+#define OMAP4_HDMITXPHY_ENBYPASSCLK_SHIFT 29
+#define OMAP4_HDMITXPHY_ENBYPASSCLK_MASK (1 << 29)
+#define OMAP4_HDMITXPHY_PD_PULLUPDET_SHIFT 28
+#define OMAP4_HDMITXPHY_PD_PULLUPDET_MASK (1 << 28)
+
+/* CONTROL_MMC2 */
+#define OMAP4_MMC2_FEEDBACK_CLK_SEL_SHIFT 31
+#define OMAP4_MMC2_FEEDBACK_CLK_SEL_MASK (1 << 31)
+
+/* CONTROL_DSIPHY */
+#define OMAP4_DSI2_LANEENABLE_SHIFT 29
+#define OMAP4_DSI2_LANEENABLE_MASK (0x7 << 29)
+#define OMAP4_DSI1_LANEENABLE_SHIFT 24
+#define OMAP4_DSI1_LANEENABLE_MASK (0x1f << 24)
+#define OMAP4_DSI1_PIPD_SHIFT 19
+#define OMAP4_DSI1_PIPD_MASK (0x1f << 19)
+#define OMAP4_DSI2_PIPD_SHIFT 14
+#define OMAP4_DSI2_PIPD_MASK (0x1f << 14)
+
+/* CONTROL_MCBSPLP */
+#define OMAP4_ALBCTRLRX_FSX_SHIFT 31
+#define OMAP4_ALBCTRLRX_FSX_MASK (1 << 31)
+#define OMAP4_ALBCTRLRX_CLKX_SHIFT 30
+#define OMAP4_ALBCTRLRX_CLKX_MASK (1 << 30)
+#define OMAP4_ABE_MCBSP1_DR_EN_SHIFT 29
+#define OMAP4_ABE_MCBSP1_DR_EN_MASK (1 << 29)
+
+/* CONTROL_USB2PHYCORE */
+#define OMAP4_USB2PHY_AUTORESUME_EN_SHIFT 31
+#define OMAP4_USB2PHY_AUTORESUME_EN_MASK (1 << 31)
+#define OMAP4_USB2PHY_DISCHGDET_SHIFT 30
+#define OMAP4_USB2PHY_DISCHGDET_MASK (1 << 30)
+#define OMAP4_USB2PHY_GPIOMODE_SHIFT 29
+#define OMAP4_USB2PHY_GPIOMODE_MASK (1 << 29)
+#define OMAP4_USB2PHY_CHG_DET_EXT_CTL_SHIFT 28
+#define OMAP4_USB2PHY_CHG_DET_EXT_CTL_MASK (1 << 28)
+#define OMAP4_USB2PHY_RDM_PD_CHGDET_EN_SHIFT 27
+#define OMAP4_USB2PHY_RDM_PD_CHGDET_EN_MASK (1 << 27)
+#define OMAP4_USB2PHY_RDP_PU_CHGDET_EN_SHIFT 26
+#define OMAP4_USB2PHY_RDP_PU_CHGDET_EN_MASK (1 << 26)
+#define OMAP4_USB2PHY_CHG_VSRC_EN_SHIFT 25
+#define OMAP4_USB2PHY_CHG_VSRC_EN_MASK (1 << 25)
+#define OMAP4_USB2PHY_CHG_ISINK_EN_SHIFT 24
+#define OMAP4_USB2PHY_CHG_ISINK_EN_MASK (1 << 24)
+#define OMAP4_USB2PHY_CHG_DET_STATUS_SHIFT 21
+#define OMAP4_USB2PHY_CHG_DET_STATUS_MASK (0x7 << 21)
+#define OMAP4_USB2PHY_CHG_DET_DM_COMP_SHIFT 20
+#define OMAP4_USB2PHY_CHG_DET_DM_COMP_MASK (1 << 20)
+#define OMAP4_USB2PHY_CHG_DET_DP_COMP_SHIFT 19
+#define OMAP4_USB2PHY_CHG_DET_DP_COMP_MASK (1 << 19)
+#define OMAP4_USB2PHY_DATADET_SHIFT 18
+#define OMAP4_USB2PHY_DATADET_MASK (1 << 18)
+#define OMAP4_USB2PHY_SINKONDP_SHIFT 17
+#define OMAP4_USB2PHY_SINKONDP_MASK (1 << 17)
+#define OMAP4_USB2PHY_SRCONDM_SHIFT 16
+#define OMAP4_USB2PHY_SRCONDM_MASK (1 << 16)
+#define OMAP4_USB2PHY_RESTARTCHGDET_SHIFT 15
+#define OMAP4_USB2PHY_RESTARTCHGDET_MASK (1 << 15)
+#define OMAP4_USB2PHY_CHGDETDONE_SHIFT 14
+#define OMAP4_USB2PHY_CHGDETDONE_MASK (1 << 14)
+#define OMAP4_USB2PHY_CHGDETECTED_SHIFT 13
+#define OMAP4_USB2PHY_CHGDETECTED_MASK (1 << 13)
+#define OMAP4_USB2PHY_MCPCPUEN_SHIFT 12
+#define OMAP4_USB2PHY_MCPCPUEN_MASK (1 << 12)
+#define OMAP4_USB2PHY_MCPCMODEEN_SHIFT 11
+#define OMAP4_USB2PHY_MCPCMODEEN_MASK (1 << 11)
+#define OMAP4_USB2PHY_RESETDONEMCLK_SHIFT 10
+#define OMAP4_USB2PHY_RESETDONEMCLK_MASK (1 << 10)
+#define OMAP4_USB2PHY_UTMIRESETDONE_SHIFT 9
+#define OMAP4_USB2PHY_UTMIRESETDONE_MASK (1 << 9)
+#define OMAP4_USB2PHY_TXBITSTUFFENABLE_SHIFT 8
+#define OMAP4_USB2PHY_TXBITSTUFFENABLE_MASK (1 << 8)
+#define OMAP4_USB2PHY_DATAPOLARITYN_SHIFT 7
+#define OMAP4_USB2PHY_DATAPOLARITYN_MASK (1 << 7)
+#define OMAP4_USBDPLL_FREQLOCK_SHIFT 6
+#define OMAP4_USBDPLL_FREQLOCK_MASK (1 << 6)
+#define OMAP4_USB2PHY_RESETDONETCLK_SHIFT 5
+#define OMAP4_USB2PHY_RESETDONETCLK_MASK (1 << 5)
+
+/* CONTROL_I2C_1 */
+#define OMAP4_HDMI_DDC_SDA_GLFENB_SHIFT 31
+#define OMAP4_HDMI_DDC_SDA_GLFENB_MASK (1 << 31)
+#define OMAP4_HDMI_DDC_SDA_LOAD_BITS_SHIFT 29
+#define OMAP4_HDMI_DDC_SDA_LOAD_BITS_MASK (0x3 << 29)
+#define OMAP4_HDMI_DDC_SDA_PULLUPRESX_SHIFT 28
+#define OMAP4_HDMI_DDC_SDA_PULLUPRESX_MASK (1 << 28)
+#define OMAP4_HDMI_DDC_SCL_GLFENB_SHIFT 27
+#define OMAP4_HDMI_DDC_SCL_GLFENB_MASK (1 << 27)
+#define OMAP4_HDMI_DDC_SCL_LOAD_BITS_SHIFT 25
+#define OMAP4_HDMI_DDC_SCL_LOAD_BITS_MASK (0x3 << 25)
+#define OMAP4_HDMI_DDC_SCL_PULLUPRESX_SHIFT 24
+#define OMAP4_HDMI_DDC_SCL_PULLUPRESX_MASK (1 << 24)
+#define OMAP4_HDMI_DDC_SDA_HSMODE_SHIFT 23
+#define OMAP4_HDMI_DDC_SDA_HSMODE_MASK (1 << 23)
+#define OMAP4_HDMI_DDC_SDA_NMODE_SHIFT 22
+#define OMAP4_HDMI_DDC_SDA_NMODE_MASK (1 << 22)
+#define OMAP4_HDMI_DDC_SCL_HSMODE_SHIFT 21
+#define OMAP4_HDMI_DDC_SCL_HSMODE_MASK (1 << 21)
+#define OMAP4_HDMI_DDC_SCL_NMODE_SHIFT 20
+#define OMAP4_HDMI_DDC_SCL_NMODE_MASK (1 << 20)
+
+/* CONTROL_MMC1 */
+#define OMAP4_SDMMC1_PUSTRENGTH_GRP0_SHIFT 31
+#define OMAP4_SDMMC1_PUSTRENGTH_GRP0_MASK (1 << 31)
+#define OMAP4_SDMMC1_PUSTRENGTH_GRP1_SHIFT 30
+#define OMAP4_SDMMC1_PUSTRENGTH_GRP1_MASK (1 << 30)
+#define OMAP4_SDMMC1_PUSTRENGTH_GRP2_SHIFT 29
+#define OMAP4_SDMMC1_PUSTRENGTH_GRP2_MASK (1 << 29)
+#define OMAP4_SDMMC1_PUSTRENGTH_GRP3_SHIFT 28
+#define OMAP4_SDMMC1_PUSTRENGTH_GRP3_MASK (1 << 28)
+#define OMAP4_SDMMC1_DR0_SPEEDCTRL_SHIFT 27
+#define OMAP4_SDMMC1_DR0_SPEEDCTRL_MASK (1 << 27)
+#define OMAP4_SDMMC1_DR1_SPEEDCTRL_SHIFT 26
+#define OMAP4_SDMMC1_DR1_SPEEDCTRL_MASK (1 << 26)
+#define OMAP4_SDMMC1_DR2_SPEEDCTRL_SHIFT 25
+#define OMAP4_SDMMC1_DR2_SPEEDCTRL_MASK (1 << 25)
+#define OMAP4_USBC1_DR0_SPEEDCTRL_SHIFT 24
+#define OMAP4_USBC1_DR0_SPEEDCTRL_MASK (1 << 24)
+#define OMAP4_USB_FD_CDEN_SHIFT 23
+#define OMAP4_USB_FD_CDEN_MASK (1 << 23)
+#define OMAP4_USBC1_ICUSB_DP_PDDIS_SHIFT 22
+#define OMAP4_USBC1_ICUSB_DP_PDDIS_MASK (1 << 22)
+#define OMAP4_USBC1_ICUSB_DM_PDDIS_SHIFT 21
+#define OMAP4_USBC1_ICUSB_DM_PDDIS_MASK (1 << 21)
+
+/* CONTROL_HSI */
+#define OMAP4_HSI1_CALLOOP_SEL_SHIFT 31
+#define OMAP4_HSI1_CALLOOP_SEL_MASK (1 << 31)
+#define OMAP4_HSI1_CALMUX_SEL_SHIFT 30
+#define OMAP4_HSI1_CALMUX_SEL_MASK (1 << 30)
+#define OMAP4_HSI2_CALLOOP_SEL_SHIFT 29
+#define OMAP4_HSI2_CALLOOP_SEL_MASK (1 << 29)
+#define OMAP4_HSI2_CALMUX_SEL_SHIFT 28
+#define OMAP4_HSI2_CALMUX_SEL_MASK (1 << 28)
+
+/* CONTROL_USB */
+#define OMAP4_CARKIT_USBA0_ULPIPHY_DAT0_AUTO_EN_SHIFT 31
+#define OMAP4_CARKIT_USBA0_ULPIPHY_DAT0_AUTO_EN_MASK (1 << 31)
+#define OMAP4_CARKIT_USBA0_ULPIPHY_DAT1_AUTO_EN_SHIFT 30
+#define OMAP4_CARKIT_USBA0_ULPIPHY_DAT1_AUTO_EN_MASK (1 << 30)
+
+/* CONTROL_HDQ */
+#define OMAP4_HDQ_SIO_PWRDNZ_SHIFT 31
+#define OMAP4_HDQ_SIO_PWRDNZ_MASK (1 << 31)
+
+/* CONTROL_LPDDR2IO1_0 */
+#define OMAP4_LPDDR2IO1_GR4_SR_SHIFT 30
+#define OMAP4_LPDDR2IO1_GR4_SR_MASK (0x3 << 30)
+#define OMAP4_LPDDR2IO1_GR4_I_SHIFT 27
+#define OMAP4_LPDDR2IO1_GR4_I_MASK (0x7 << 27)
+#define OMAP4_LPDDR2IO1_GR4_WD_SHIFT 25
+#define OMAP4_LPDDR2IO1_GR4_WD_MASK (0x3 << 25)
+#define OMAP4_LPDDR2IO1_GR3_SR_SHIFT 22
+#define OMAP4_LPDDR2IO1_GR3_SR_MASK (0x3 << 22)
+#define OMAP4_LPDDR2IO1_GR3_I_SHIFT 19
+#define OMAP4_LPDDR2IO1_GR3_I_MASK (0x7 << 19)
+#define OMAP4_LPDDR2IO1_GR3_WD_SHIFT 17
+#define OMAP4_LPDDR2IO1_GR3_WD_MASK (0x3 << 17)
+#define OMAP4_LPDDR2IO1_GR2_SR_SHIFT 14
+#define OMAP4_LPDDR2IO1_GR2_SR_MASK (0x3 << 14)
+#define OMAP4_LPDDR2IO1_GR2_I_SHIFT 11
+#define OMAP4_LPDDR2IO1_GR2_I_MASK (0x7 << 11)
+#define OMAP4_LPDDR2IO1_GR2_WD_SHIFT 9
+#define OMAP4_LPDDR2IO1_GR2_WD_MASK (0x3 << 9)
+#define OMAP4_LPDDR2IO1_GR1_SR_SHIFT 6
+#define OMAP4_LPDDR2IO1_GR1_SR_MASK (0x3 << 6)
+#define OMAP4_LPDDR2IO1_GR1_I_SHIFT 3
+#define OMAP4_LPDDR2IO1_GR1_I_MASK (0x7 << 3)
+#define OMAP4_LPDDR2IO1_GR1_WD_SHIFT 1
+#define OMAP4_LPDDR2IO1_GR1_WD_MASK (0x3 << 1)
+
+/* CONTROL_LPDDR2IO1_1 */
+#define OMAP4_LPDDR2IO1_GR8_SR_SHIFT 30
+#define OMAP4_LPDDR2IO1_GR8_SR_MASK (0x3 << 30)
+#define OMAP4_LPDDR2IO1_GR8_I_SHIFT 27
+#define OMAP4_LPDDR2IO1_GR8_I_MASK (0x7 << 27)
+#define OMAP4_LPDDR2IO1_GR8_WD_SHIFT 25
+#define OMAP4_LPDDR2IO1_GR8_WD_MASK (0x3 << 25)
+#define OMAP4_LPDDR2IO1_GR7_SR_SHIFT 22
+#define OMAP4_LPDDR2IO1_GR7_SR_MASK (0x3 << 22)
+#define OMAP4_LPDDR2IO1_GR7_I_SHIFT 19
+#define OMAP4_LPDDR2IO1_GR7_I_MASK (0x7 << 19)
+#define OMAP4_LPDDR2IO1_GR7_WD_SHIFT 17
+#define OMAP4_LPDDR2IO1_GR7_WD_MASK (0x3 << 17)
+#define OMAP4_LPDDR2IO1_GR6_SR_SHIFT 14
+#define OMAP4_LPDDR2IO1_GR6_SR_MASK (0x3 << 14)
+#define OMAP4_LPDDR2IO1_GR6_I_SHIFT 11
+#define OMAP4_LPDDR2IO1_GR6_I_MASK (0x7 << 11)
+#define OMAP4_LPDDR2IO1_GR6_WD_SHIFT 9
+#define OMAP4_LPDDR2IO1_GR6_WD_MASK (0x3 << 9)
+#define OMAP4_LPDDR2IO1_GR5_SR_SHIFT 6
+#define OMAP4_LPDDR2IO1_GR5_SR_MASK (0x3 << 6)
+#define OMAP4_LPDDR2IO1_GR5_I_SHIFT 3
+#define OMAP4_LPDDR2IO1_GR5_I_MASK (0x7 << 3)
+#define OMAP4_LPDDR2IO1_GR5_WD_SHIFT 1
+#define OMAP4_LPDDR2IO1_GR5_WD_MASK (0x3 << 1)
+
+/* CONTROL_LPDDR2IO1_2 */
+#define OMAP4_LPDDR2IO1_GR11_SR_SHIFT 30
+#define OMAP4_LPDDR2IO1_GR11_SR_MASK (0x3 << 30)
+#define OMAP4_LPDDR2IO1_GR11_I_SHIFT 27
+#define OMAP4_LPDDR2IO1_GR11_I_MASK (0x7 << 27)
+#define OMAP4_LPDDR2IO1_GR11_WD_SHIFT 25
+#define OMAP4_LPDDR2IO1_GR11_WD_MASK (0x3 << 25)
+#define OMAP4_LPDDR2IO1_GR10_SR_SHIFT 22
+#define OMAP4_LPDDR2IO1_GR10_SR_MASK (0x3 << 22)
+#define OMAP4_LPDDR2IO1_GR10_I_SHIFT 19
+#define OMAP4_LPDDR2IO1_GR10_I_MASK (0x7 << 19)
+#define OMAP4_LPDDR2IO1_GR10_WD_SHIFT 17
+#define OMAP4_LPDDR2IO1_GR10_WD_MASK (0x3 << 17)
+#define OMAP4_LPDDR2IO1_GR9_SR_SHIFT 14
+#define OMAP4_LPDDR2IO1_GR9_SR_MASK (0x3 << 14)
+#define OMAP4_LPDDR2IO1_GR9_I_SHIFT 11
+#define OMAP4_LPDDR2IO1_GR9_I_MASK (0x7 << 11)
+#define OMAP4_LPDDR2IO1_GR9_WD_SHIFT 9
+#define OMAP4_LPDDR2IO1_GR9_WD_MASK (0x3 << 9)
+
+/* CONTROL_LPDDR2IO1_3 */
+#define OMAP4_LPDDR21_VREF_CA_CCAP0_SHIFT 31
+#define OMAP4_LPDDR21_VREF_CA_CCAP0_MASK (1 << 31)
+#define OMAP4_LPDDR21_VREF_CA_CCAP1_SHIFT 30
+#define OMAP4_LPDDR21_VREF_CA_CCAP1_MASK (1 << 30)
+#define OMAP4_LPDDR21_VREF_CA_INT_CCAP0_SHIFT 29
+#define OMAP4_LPDDR21_VREF_CA_INT_CCAP0_MASK (1 << 29)
+#define OMAP4_LPDDR21_VREF_CA_INT_CCAP1_SHIFT 28
+#define OMAP4_LPDDR21_VREF_CA_INT_CCAP1_MASK (1 << 28)
+#define OMAP4_LPDDR21_VREF_CA_INT_TAP0_SHIFT 27
+#define OMAP4_LPDDR21_VREF_CA_INT_TAP0_MASK (1 << 27)
+#define OMAP4_LPDDR21_VREF_CA_INT_TAP1_SHIFT 26
+#define OMAP4_LPDDR21_VREF_CA_INT_TAP1_MASK (1 << 26)
+#define OMAP4_LPDDR21_VREF_CA_TAP0_SHIFT 25
+#define OMAP4_LPDDR21_VREF_CA_TAP0_MASK (1 << 25)
+#define OMAP4_LPDDR21_VREF_CA_TAP1_SHIFT 24
+#define OMAP4_LPDDR21_VREF_CA_TAP1_MASK (1 << 24)
+#define OMAP4_LPDDR21_VREF_DQ0_INT_CCAP0_SHIFT 23
+#define OMAP4_LPDDR21_VREF_DQ0_INT_CCAP0_MASK (1 << 23)
+#define OMAP4_LPDDR21_VREF_DQ0_INT_CCAP1_SHIFT 22
+#define OMAP4_LPDDR21_VREF_DQ0_INT_CCAP1_MASK (1 << 22)
+#define OMAP4_LPDDR21_VREF_DQ0_INT_TAP0_SHIFT 21
+#define OMAP4_LPDDR21_VREF_DQ0_INT_TAP0_MASK (1 << 21)
+#define OMAP4_LPDDR21_VREF_DQ0_INT_TAP1_SHIFT 20
+#define OMAP4_LPDDR21_VREF_DQ0_INT_TAP1_MASK (1 << 20)
+#define OMAP4_LPDDR21_VREF_DQ1_INT_CCAP0_SHIFT 19
+#define OMAP4_LPDDR21_VREF_DQ1_INT_CCAP0_MASK (1 << 19)
+#define OMAP4_LPDDR21_VREF_DQ1_INT_CCAP1_SHIFT 18
+#define OMAP4_LPDDR21_VREF_DQ1_INT_CCAP1_MASK (1 << 18)
+#define OMAP4_LPDDR21_VREF_DQ1_INT_TAP0_SHIFT 17
+#define OMAP4_LPDDR21_VREF_DQ1_INT_TAP0_MASK (1 << 17)
+#define OMAP4_LPDDR21_VREF_DQ1_INT_TAP1_SHIFT 16
+#define OMAP4_LPDDR21_VREF_DQ1_INT_TAP1_MASK (1 << 16)
+#define OMAP4_LPDDR21_VREF_DQ_CCAP0_SHIFT 15
+#define OMAP4_LPDDR21_VREF_DQ_CCAP0_MASK (1 << 15)
+#define OMAP4_LPDDR21_VREF_DQ_CCAP1_SHIFT 14
+#define OMAP4_LPDDR21_VREF_DQ_CCAP1_MASK (1 << 14)
+#define OMAP4_LPDDR21_VREF_DQ_TAP0_SHIFT 13
+#define OMAP4_LPDDR21_VREF_DQ_TAP0_MASK (1 << 13)
+#define OMAP4_LPDDR21_VREF_DQ_TAP1_SHIFT 12
+#define OMAP4_LPDDR21_VREF_DQ_TAP1_MASK (1 << 12)
+
+/* CONTROL_LPDDR2IO2_0 */
+#define OMAP4_LPDDR2IO2_GR4_SR_SHIFT 30
+#define OMAP4_LPDDR2IO2_GR4_SR_MASK (0x3 << 30)
+#define OMAP4_LPDDR2IO2_GR4_I_SHIFT 27
+#define OMAP4_LPDDR2IO2_GR4_I_MASK (0x7 << 27)
+#define OMAP4_LPDDR2IO2_GR4_WD_SHIFT 25
+#define OMAP4_LPDDR2IO2_GR4_WD_MASK (0x3 << 25)
+#define OMAP4_LPDDR2IO2_GR3_SR_SHIFT 22
+#define OMAP4_LPDDR2IO2_GR3_SR_MASK (0x3 << 22)
+#define OMAP4_LPDDR2IO2_GR3_I_SHIFT 19
+#define OMAP4_LPDDR2IO2_GR3_I_MASK (0x7 << 19)
+#define OMAP4_LPDDR2IO2_GR3_WD_SHIFT 17
+#define OMAP4_LPDDR2IO2_GR3_WD_MASK (0x3 << 17)
+#define OMAP4_LPDDR2IO2_GR2_SR_SHIFT 14
+#define OMAP4_LPDDR2IO2_GR2_SR_MASK (0x3 << 14)
+#define OMAP4_LPDDR2IO2_GR2_I_SHIFT 11
+#define OMAP4_LPDDR2IO2_GR2_I_MASK (0x7 << 11)
+#define OMAP4_LPDDR2IO2_GR2_WD_SHIFT 9
+#define OMAP4_LPDDR2IO2_GR2_WD_MASK (0x3 << 9)
+#define OMAP4_LPDDR2IO2_GR1_SR_SHIFT 6
+#define OMAP4_LPDDR2IO2_GR1_SR_MASK (0x3 << 6)
+#define OMAP4_LPDDR2IO2_GR1_I_SHIFT 3
+#define OMAP4_LPDDR2IO2_GR1_I_MASK (0x7 << 3)
+#define OMAP4_LPDDR2IO2_GR1_WD_SHIFT 1
+#define OMAP4_LPDDR2IO2_GR1_WD_MASK (0x3 << 1)
+
+/* CONTROL_LPDDR2IO2_1 */
+#define OMAP4_LPDDR2IO2_GR8_SR_SHIFT 30
+#define OMAP4_LPDDR2IO2_GR8_SR_MASK (0x3 << 30)
+#define OMAP4_LPDDR2IO2_GR8_I_SHIFT 27
+#define OMAP4_LPDDR2IO2_GR8_I_MASK (0x7 << 27)
+#define OMAP4_LPDDR2IO2_GR8_WD_SHIFT 25
+#define OMAP4_LPDDR2IO2_GR8_WD_MASK (0x3 << 25)
+#define OMAP4_LPDDR2IO2_GR7_SR_SHIFT 22
+#define OMAP4_LPDDR2IO2_GR7_SR_MASK (0x3 << 22)
+#define OMAP4_LPDDR2IO2_GR7_I_SHIFT 19
+#define OMAP4_LPDDR2IO2_GR7_I_MASK (0x7 << 19)
+#define OMAP4_LPDDR2IO2_GR7_WD_SHIFT 17
+#define OMAP4_LPDDR2IO2_GR7_WD_MASK (0x3 << 17)
+#define OMAP4_LPDDR2IO2_GR6_SR_SHIFT 14
+#define OMAP4_LPDDR2IO2_GR6_SR_MASK (0x3 << 14)
+#define OMAP4_LPDDR2IO2_GR6_I_SHIFT 11
+#define OMAP4_LPDDR2IO2_GR6_I_MASK (0x7 << 11)
+#define OMAP4_LPDDR2IO2_GR6_WD_SHIFT 9
+#define OMAP4_LPDDR2IO2_GR6_WD_MASK (0x3 << 9)
+#define OMAP4_LPDDR2IO2_GR5_SR_SHIFT 6
+#define OMAP4_LPDDR2IO2_GR5_SR_MASK (0x3 << 6)
+#define OMAP4_LPDDR2IO2_GR5_I_SHIFT 3
+#define OMAP4_LPDDR2IO2_GR5_I_MASK (0x7 << 3)
+#define OMAP4_LPDDR2IO2_GR5_WD_SHIFT 1
+#define OMAP4_LPDDR2IO2_GR5_WD_MASK (0x3 << 1)
+
+/* CONTROL_LPDDR2IO2_2 */
+#define OMAP4_LPDDR2IO2_GR11_SR_SHIFT 30
+#define OMAP4_LPDDR2IO2_GR11_SR_MASK (0x3 << 30)
+#define OMAP4_LPDDR2IO2_GR11_I_SHIFT 27
+#define OMAP4_LPDDR2IO2_GR11_I_MASK (0x7 << 27)
+#define OMAP4_LPDDR2IO2_GR11_WD_SHIFT 25
+#define OMAP4_LPDDR2IO2_GR11_WD_MASK (0x3 << 25)
+#define OMAP4_LPDDR2IO2_GR10_SR_SHIFT 22
+#define OMAP4_LPDDR2IO2_GR10_SR_MASK (0x3 << 22)
+#define OMAP4_LPDDR2IO2_GR10_I_SHIFT 19
+#define OMAP4_LPDDR2IO2_GR10_I_MASK (0x7 << 19)
+#define OMAP4_LPDDR2IO2_GR10_WD_SHIFT 17
+#define OMAP4_LPDDR2IO2_GR10_WD_MASK (0x3 << 17)
+#define OMAP4_LPDDR2IO2_GR9_SR_SHIFT 14
+#define OMAP4_LPDDR2IO2_GR9_SR_MASK (0x3 << 14)
+#define OMAP4_LPDDR2IO2_GR9_I_SHIFT 11
+#define OMAP4_LPDDR2IO2_GR9_I_MASK (0x7 << 11)
+#define OMAP4_LPDDR2IO2_GR9_WD_SHIFT 9
+#define OMAP4_LPDDR2IO2_GR9_WD_MASK (0x3 << 9)
+
+/* CONTROL_LPDDR2IO2_3 */
+#define OMAP4_LPDDR22_VREF_CA_CCAP0_SHIFT 31
+#define OMAP4_LPDDR22_VREF_CA_CCAP0_MASK (1 << 31)
+#define OMAP4_LPDDR22_VREF_CA_CCAP1_SHIFT 30
+#define OMAP4_LPDDR22_VREF_CA_CCAP1_MASK (1 << 30)
+#define OMAP4_LPDDR22_VREF_CA_INT_CCAP0_SHIFT 29
+#define OMAP4_LPDDR22_VREF_CA_INT_CCAP0_MASK (1 << 29)
+#define OMAP4_LPDDR22_VREF_CA_INT_CCAP1_SHIFT 28
+#define OMAP4_LPDDR22_VREF_CA_INT_CCAP1_MASK (1 << 28)
+#define OMAP4_LPDDR22_VREF_CA_INT_TAP0_SHIFT 27
+#define OMAP4_LPDDR22_VREF_CA_INT_TAP0_MASK (1 << 27)
+#define OMAP4_LPDDR22_VREF_CA_INT_TAP1_SHIFT 26
+#define OMAP4_LPDDR22_VREF_CA_INT_TAP1_MASK (1 << 26)
+#define OMAP4_LPDDR22_VREF_CA_TAP0_SHIFT 25
+#define OMAP4_LPDDR22_VREF_CA_TAP0_MASK (1 << 25)
+#define OMAP4_LPDDR22_VREF_CA_TAP1_SHIFT 24
+#define OMAP4_LPDDR22_VREF_CA_TAP1_MASK (1 << 24)
+#define OMAP4_LPDDR22_VREF_DQ0_INT_CCAP0_SHIFT 23
+#define OMAP4_LPDDR22_VREF_DQ0_INT_CCAP0_MASK (1 << 23)
+#define OMAP4_LPDDR22_VREF_DQ0_INT_CCAP1_SHIFT 22
+#define OMAP4_LPDDR22_VREF_DQ0_INT_CCAP1_MASK (1 << 22)
+#define OMAP4_LPDDR22_VREF_DQ0_INT_TAP0_SHIFT 21
+#define OMAP4_LPDDR22_VREF_DQ0_INT_TAP0_MASK (1 << 21)
+#define OMAP4_LPDDR22_VREF_DQ0_INT_TAP1_SHIFT 20
+#define OMAP4_LPDDR22_VREF_DQ0_INT_TAP1_MASK (1 << 20)
+#define OMAP4_LPDDR22_VREF_DQ1_INT_CCAP0_SHIFT 19
+#define OMAP4_LPDDR22_VREF_DQ1_INT_CCAP0_MASK (1 << 19)
+#define OMAP4_LPDDR22_VREF_DQ1_INT_CCAP1_SHIFT 18
+#define OMAP4_LPDDR22_VREF_DQ1_INT_CCAP1_MASK (1 << 18)
+#define OMAP4_LPDDR22_VREF_DQ1_INT_TAP0_SHIFT 17
+#define OMAP4_LPDDR22_VREF_DQ1_INT_TAP0_MASK (1 << 17)
+#define OMAP4_LPDDR22_VREF_DQ1_INT_TAP1_SHIFT 16
+#define OMAP4_LPDDR22_VREF_DQ1_INT_TAP1_MASK (1 << 16)
+#define OMAP4_LPDDR22_VREF_DQ_CCAP0_SHIFT 15
+#define OMAP4_LPDDR22_VREF_DQ_CCAP0_MASK (1 << 15)
+#define OMAP4_LPDDR22_VREF_DQ_CCAP1_SHIFT 14
+#define OMAP4_LPDDR22_VREF_DQ_CCAP1_MASK (1 << 14)
+#define OMAP4_LPDDR22_VREF_DQ_TAP0_SHIFT 13
+#define OMAP4_LPDDR22_VREF_DQ_TAP0_MASK (1 << 13)
+#define OMAP4_LPDDR22_VREF_DQ_TAP1_SHIFT 12
+#define OMAP4_LPDDR22_VREF_DQ_TAP1_MASK (1 << 12)
+
+/* CONTROL_BUS_HOLD */
+#define OMAP4_ABE_DMIC_DIN3_EN_SHIFT 31
+#define OMAP4_ABE_DMIC_DIN3_EN_MASK (1 << 31)
+#define OMAP4_MCSPI1_CS3_EN_SHIFT 30
+#define OMAP4_MCSPI1_CS3_EN_MASK (1 << 30)
+
+/* CONTROL_C2C */
+#define OMAP4_MIRROR_MODE_EN_SHIFT 31
+#define OMAP4_MIRROR_MODE_EN_MASK (1 << 31)
+#define OMAP4_C2C_SPARE_SHIFT 24
+#define OMAP4_C2C_SPARE_MASK (0x7f << 24)
+
+/* CORE_CONTROL_SPARE_RW */
+#define OMAP4_CORE_CONTROL_SPARE_RW_SHIFT 0
+#define OMAP4_CORE_CONTROL_SPARE_RW_MASK (0xffffffff << 0)
+
+/* CORE_CONTROL_SPARE_R */
+#define OMAP4_CORE_CONTROL_SPARE_R_SHIFT 0
+#define OMAP4_CORE_CONTROL_SPARE_R_MASK (0xffffffff << 0)
+
+/* CORE_CONTROL_SPARE_R_C0 */
+#define OMAP4_CORE_CONTROL_SPARE_R_C0_SHIFT 31
+#define OMAP4_CORE_CONTROL_SPARE_R_C0_MASK (1 << 31)
+#define OMAP4_CORE_CONTROL_SPARE_R_C1_SHIFT 30
+#define OMAP4_CORE_CONTROL_SPARE_R_C1_MASK (1 << 30)
+#define OMAP4_CORE_CONTROL_SPARE_R_C2_SHIFT 29
+#define OMAP4_CORE_CONTROL_SPARE_R_C2_MASK (1 << 29)
+#define OMAP4_CORE_CONTROL_SPARE_R_C3_SHIFT 28
+#define OMAP4_CORE_CONTROL_SPARE_R_C3_MASK (1 << 28)
+#define OMAP4_CORE_CONTROL_SPARE_R_C4_SHIFT 27
+#define OMAP4_CORE_CONTROL_SPARE_R_C4_MASK (1 << 27)
+#define OMAP4_CORE_CONTROL_SPARE_R_C5_SHIFT 26
+#define OMAP4_CORE_CONTROL_SPARE_R_C5_MASK (1 << 26)
+#define OMAP4_CORE_CONTROL_SPARE_R_C6_SHIFT 25
+#define OMAP4_CORE_CONTROL_SPARE_R_C6_MASK (1 << 25)
+#define OMAP4_CORE_CONTROL_SPARE_R_C7_SHIFT 24
+#define OMAP4_CORE_CONTROL_SPARE_R_C7_MASK (1 << 24)
+
+/* CONTROL_EFUSE_1 */
+#define OMAP4_AVDAC_TRIM_BYTE3_SHIFT 24
+#define OMAP4_AVDAC_TRIM_BYTE3_MASK (0x7f << 24)
+#define OMAP4_AVDAC_TRIM_BYTE2_SHIFT 16
+#define OMAP4_AVDAC_TRIM_BYTE2_MASK (0xff << 16)
+#define OMAP4_AVDAC_TRIM_BYTE1_SHIFT 8
+#define OMAP4_AVDAC_TRIM_BYTE1_MASK (0xff << 8)
+#define OMAP4_AVDAC_TRIM_BYTE0_SHIFT 0
+#define OMAP4_AVDAC_TRIM_BYTE0_MASK (0xff << 0)
+
+/* CONTROL_EFUSE_2 */
+#define OMAP4_EFUSE_SMART2TEST_P0_SHIFT 31
+#define OMAP4_EFUSE_SMART2TEST_P0_MASK (1 << 31)
+#define OMAP4_EFUSE_SMART2TEST_P1_SHIFT 30
+#define OMAP4_EFUSE_SMART2TEST_P1_MASK (1 << 30)
+#define OMAP4_EFUSE_SMART2TEST_P2_SHIFT 29
+#define OMAP4_EFUSE_SMART2TEST_P2_MASK (1 << 29)
+#define OMAP4_EFUSE_SMART2TEST_P3_SHIFT 28
+#define OMAP4_EFUSE_SMART2TEST_P3_MASK (1 << 28)
+#define OMAP4_EFUSE_SMART2TEST_N0_SHIFT 27
+#define OMAP4_EFUSE_SMART2TEST_N0_MASK (1 << 27)
+#define OMAP4_EFUSE_SMART2TEST_N1_SHIFT 26
+#define OMAP4_EFUSE_SMART2TEST_N1_MASK (1 << 26)
+#define OMAP4_EFUSE_SMART2TEST_N2_SHIFT 25
+#define OMAP4_EFUSE_SMART2TEST_N2_MASK (1 << 25)
+#define OMAP4_EFUSE_SMART2TEST_N3_SHIFT 24
+#define OMAP4_EFUSE_SMART2TEST_N3_MASK (1 << 24)
+#define OMAP4_LPDDR2_PTV_N1_SHIFT 23
+#define OMAP4_LPDDR2_PTV_N1_MASK (1 << 23)
+#define OMAP4_LPDDR2_PTV_N2_SHIFT 22
+#define OMAP4_LPDDR2_PTV_N2_MASK (1 << 22)
+#define OMAP4_LPDDR2_PTV_N3_SHIFT 21
+#define OMAP4_LPDDR2_PTV_N3_MASK (1 << 21)
+#define OMAP4_LPDDR2_PTV_N4_SHIFT 20
+#define OMAP4_LPDDR2_PTV_N4_MASK (1 << 20)
+#define OMAP4_LPDDR2_PTV_N5_SHIFT 19
+#define OMAP4_LPDDR2_PTV_N5_MASK (1 << 19)
+#define OMAP4_LPDDR2_PTV_P1_SHIFT 18
+#define OMAP4_LPDDR2_PTV_P1_MASK (1 << 18)
+#define OMAP4_LPDDR2_PTV_P2_SHIFT 17
+#define OMAP4_LPDDR2_PTV_P2_MASK (1 << 17)
+#define OMAP4_LPDDR2_PTV_P3_SHIFT 16
+#define OMAP4_LPDDR2_PTV_P3_MASK (1 << 16)
+#define OMAP4_LPDDR2_PTV_P4_SHIFT 15
+#define OMAP4_LPDDR2_PTV_P4_MASK (1 << 15)
+#define OMAP4_LPDDR2_PTV_P5_SHIFT 14
+#define OMAP4_LPDDR2_PTV_P5_MASK (1 << 14)
+
+/* CONTROL_EFUSE_3 */
+#define OMAP4_STD_FUSE_SPARE_1_SHIFT 24
+#define OMAP4_STD_FUSE_SPARE_1_MASK (0xff << 24)
+#define OMAP4_STD_FUSE_SPARE_2_SHIFT 16
+#define OMAP4_STD_FUSE_SPARE_2_MASK (0xff << 16)
+#define OMAP4_STD_FUSE_SPARE_3_SHIFT 8
+#define OMAP4_STD_FUSE_SPARE_3_MASK (0xff << 8)
+#define OMAP4_STD_FUSE_SPARE_4_SHIFT 0
+#define OMAP4_STD_FUSE_SPARE_4_MASK (0xff << 0)
+
+/* CONTROL_EFUSE_4 */
+#define OMAP4_STD_FUSE_SPARE_5_SHIFT 24
+#define OMAP4_STD_FUSE_SPARE_5_MASK (0xff << 24)
+#define OMAP4_STD_FUSE_SPARE_6_SHIFT 16
+#define OMAP4_STD_FUSE_SPARE_6_MASK (0xff << 16)
+#define OMAP4_STD_FUSE_SPARE_7_SHIFT 8
+#define OMAP4_STD_FUSE_SPARE_7_MASK (0xff << 8)
+#define OMAP4_STD_FUSE_SPARE_8_SHIFT 0
+#define OMAP4_STD_FUSE_SPARE_8_MASK (0xff << 0)
+
+#endif
diff --git a/arch/arm/mach-omap2/include/mach/ctrl_module_pad_wkup_44xx.h b/arch/arm/mach-omap2/include/mach/ctrl_module_pad_wkup_44xx.h
new file mode 100644
index 000000000000..17c9b37042c0
--- /dev/null
+++ b/arch/arm/mach-omap2/include/mach/ctrl_module_pad_wkup_44xx.h
@@ -0,0 +1,236 @@
+/*
+ * OMAP44xx CTRL_MODULE_PAD_WKUP registers and bitfields
+ *
+ * Copyright (C) 2009-2010 Texas Instruments, Inc.
+ *
+ * Benoit Cousson (b-cousson@ti.com)
+ * Santosh Shilimkar (santosh.shilimkar@ti.com)
+ *
+ * This file is automatically generated from the OMAP hardware databases.
+ * We respectfully ask that any modifications to this file be coordinated
+ * with the public linux-omap@vger.kernel.org mailing list and the
+ * authors above to ensure that the autogeneration scripts are kept
+ * up-to-date with the file contents.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __ARCH_ARM_MACH_OMAP2_CTRL_MODULE_PAD_WKUP_44XX_H
+#define __ARCH_ARM_MACH_OMAP2_CTRL_MODULE_PAD_WKUP_44XX_H
+
+
+/* Base address */
+#define OMAP4_CTRL_MODULE_PAD_WKUP 0x4a31e000
+
+/* Registers offset */
+#define OMAP4_CTRL_MODULE_PAD_WKUP_IP_REVISION 0x0000
+#define OMAP4_CTRL_MODULE_PAD_WKUP_IP_HWINFO 0x0004
+#define OMAP4_CTRL_MODULE_PAD_WKUP_IP_SYSCONFIG 0x0010
+#define OMAP4_CTRL_MODULE_PAD_WKUP_PADCONF_WAKEUPEVENT_0 0x007c
+#define OMAP4_CTRL_MODULE_PAD_WKUP_CONTROL_SMART1NOPMIO_PADCONF_0 0x05a0
+#define OMAP4_CTRL_MODULE_PAD_WKUP_CONTROL_SMART1NOPMIO_PADCONF_1 0x05a4
+#define OMAP4_CTRL_MODULE_PAD_WKUP_CONTROL_PADCONF_MODE 0x05a8
+#define OMAP4_CTRL_MODULE_PAD_WKUP_CONTROL_XTAL_OSCILLATOR 0x05ac
+#define OMAP4_CTRL_MODULE_PAD_WKUP_CONTROL_USIMIO 0x0600
+#define OMAP4_CTRL_MODULE_PAD_WKUP_CONTROL_I2C_2 0x0604
+#define OMAP4_CTRL_MODULE_PAD_WKUP_CONTROL_JTAG 0x0608
+#define OMAP4_CTRL_MODULE_PAD_WKUP_CONTROL_SYS 0x060c
+#define OMAP4_CTRL_MODULE_PAD_WKUP_WKUP_CONTROL_SPARE_RW 0x0614
+#define OMAP4_CTRL_MODULE_PAD_WKUP_WKUP_CONTROL_SPARE_R 0x0618
+#define OMAP4_CTRL_MODULE_PAD_WKUP_WKUP_CONTROL_SPARE_R_C0 0x061c
+
+/* Registers shifts and masks */
+
+/* IP_REVISION */
+#define OMAP4_IP_REV_SCHEME_SHIFT 30
+#define OMAP4_IP_REV_SCHEME_MASK (0x3 << 30)
+#define OMAP4_IP_REV_FUNC_SHIFT 16
+#define OMAP4_IP_REV_FUNC_MASK (0xfff << 16)
+#define OMAP4_IP_REV_RTL_SHIFT 11
+#define OMAP4_IP_REV_RTL_MASK (0x1f << 11)
+#define OMAP4_IP_REV_MAJOR_SHIFT 8
+#define OMAP4_IP_REV_MAJOR_MASK (0x7 << 8)
+#define OMAP4_IP_REV_CUSTOM_SHIFT 6
+#define OMAP4_IP_REV_CUSTOM_MASK (0x3 << 6)
+#define OMAP4_IP_REV_MINOR_SHIFT 0
+#define OMAP4_IP_REV_MINOR_MASK (0x3f << 0)
+
+/* IP_HWINFO */
+#define OMAP4_IP_HWINFO_SHIFT 0
+#define OMAP4_IP_HWINFO_MASK (0xffffffff << 0)
+
+/* IP_SYSCONFIG */
+#define OMAP4_IP_SYSCONFIG_IDLEMODE_SHIFT 2
+#define OMAP4_IP_SYSCONFIG_IDLEMODE_MASK (0x3 << 2)
+
+/* PADCONF_WAKEUPEVENT_0 */
+#define OMAP4_JTAG_TDO_DUPLICATEWAKEUPEVENT_SHIFT 24
+#define OMAP4_JTAG_TDO_DUPLICATEWAKEUPEVENT_MASK (1 << 24)
+#define OMAP4_JTAG_TDI_DUPLICATEWAKEUPEVENT_SHIFT 23
+#define OMAP4_JTAG_TDI_DUPLICATEWAKEUPEVENT_MASK (1 << 23)
+#define OMAP4_JTAG_TMS_TMSC_DUPLICATEWAKEUPEVENT_SHIFT 22
+#define OMAP4_JTAG_TMS_TMSC_DUPLICATEWAKEUPEVENT_MASK (1 << 22)
+#define OMAP4_JTAG_RTCK_DUPLICATEWAKEUPEVENT_SHIFT 21
+#define OMAP4_JTAG_RTCK_DUPLICATEWAKEUPEVENT_MASK (1 << 21)
+#define OMAP4_JTAG_TCK_DUPLICATEWAKEUPEVENT_SHIFT 20
+#define OMAP4_JTAG_TCK_DUPLICATEWAKEUPEVENT_MASK (1 << 20)
+#define OMAP4_JTAG_NTRST_DUPLICATEWAKEUPEVENT_SHIFT 19
+#define OMAP4_JTAG_NTRST_DUPLICATEWAKEUPEVENT_MASK (1 << 19)
+#define OMAP4_SYS_BOOT7_DUPLICATEWAKEUPEVENT_SHIFT 18
+#define OMAP4_SYS_BOOT7_DUPLICATEWAKEUPEVENT_MASK (1 << 18)
+#define OMAP4_SYS_BOOT6_DUPLICATEWAKEUPEVENT_SHIFT 17
+#define OMAP4_SYS_BOOT6_DUPLICATEWAKEUPEVENT_MASK (1 << 17)
+#define OMAP4_SYS_PWRON_RESET_OUT_DUPLICATEWAKEUPEVENT_SHIFT 16
+#define OMAP4_SYS_PWRON_RESET_OUT_DUPLICATEWAKEUPEVENT_MASK (1 << 16)
+#define OMAP4_SYS_PWR_REQ_DUPLICATEWAKEUPEVENT_SHIFT 15
+#define OMAP4_SYS_PWR_REQ_DUPLICATEWAKEUPEVENT_MASK (1 << 15)
+#define OMAP4_SYS_NRESWARM_DUPLICATEWAKEUPEVENT_SHIFT 14
+#define OMAP4_SYS_NRESWARM_DUPLICATEWAKEUPEVENT_MASK (1 << 14)
+#define OMAP4_SYS_32K_DUPLICATEWAKEUPEVENT_SHIFT 13
+#define OMAP4_SYS_32K_DUPLICATEWAKEUPEVENT_MASK (1 << 13)
+#define OMAP4_FREF_CLK4_OUT_DUPLICATEWAKEUPEVENT_SHIFT 12
+#define OMAP4_FREF_CLK4_OUT_DUPLICATEWAKEUPEVENT_MASK (1 << 12)
+#define OMAP4_FREF_CLK4_REQ_DUPLICATEWAKEUPEVENT_SHIFT 11
+#define OMAP4_FREF_CLK4_REQ_DUPLICATEWAKEUPEVENT_MASK (1 << 11)
+#define OMAP4_FREF_CLK3_OUT_DUPLICATEWAKEUPEVENT_SHIFT 10
+#define OMAP4_FREF_CLK3_OUT_DUPLICATEWAKEUPEVENT_MASK (1 << 10)
+#define OMAP4_FREF_CLK3_REQ_DUPLICATEWAKEUPEVENT_SHIFT 9
+#define OMAP4_FREF_CLK3_REQ_DUPLICATEWAKEUPEVENT_MASK (1 << 9)
+#define OMAP4_FREF_CLK0_OUT_DUPLICATEWAKEUPEVENT_SHIFT 8
+#define OMAP4_FREF_CLK0_OUT_DUPLICATEWAKEUPEVENT_MASK (1 << 8)
+#define OMAP4_FREF_CLK_IOREQ_DUPLICATEWAKEUPEVENT_SHIFT 7
+#define OMAP4_FREF_CLK_IOREQ_DUPLICATEWAKEUPEVENT_MASK (1 << 7)
+#define OMAP4_SR_SDA_DUPLICATEWAKEUPEVENT_SHIFT 6
+#define OMAP4_SR_SDA_DUPLICATEWAKEUPEVENT_MASK (1 << 6)
+#define OMAP4_SR_SCL_DUPLICATEWAKEUPEVENT_SHIFT 5
+#define OMAP4_SR_SCL_DUPLICATEWAKEUPEVENT_MASK (1 << 5)
+#define OMAP4_SIM_PWRCTRL_DUPLICATEWAKEUPEVENT_SHIFT 4
+#define OMAP4_SIM_PWRCTRL_DUPLICATEWAKEUPEVENT_MASK (1 << 4)
+#define OMAP4_SIM_CD_DUPLICATEWAKEUPEVENT_SHIFT 3
+#define OMAP4_SIM_CD_DUPLICATEWAKEUPEVENT_MASK (1 << 3)
+#define OMAP4_SIM_RESET_DUPLICATEWAKEUPEVENT_SHIFT 2
+#define OMAP4_SIM_RESET_DUPLICATEWAKEUPEVENT_MASK (1 << 2)
+#define OMAP4_SIM_CLK_DUPLICATEWAKEUPEVENT_SHIFT 1
+#define OMAP4_SIM_CLK_DUPLICATEWAKEUPEVENT_MASK (1 << 1)
+#define OMAP4_SIM_IO_DUPLICATEWAKEUPEVENT_SHIFT 0
+#define OMAP4_SIM_IO_DUPLICATEWAKEUPEVENT_MASK (1 << 0)
+
+/* CONTROL_SMART1NOPMIO_PADCONF_0 */
+#define OMAP4_FREF_DR0_SC_SHIFT 30
+#define OMAP4_FREF_DR0_SC_MASK (0x3 << 30)
+#define OMAP4_FREF_DR1_SC_SHIFT 28
+#define OMAP4_FREF_DR1_SC_MASK (0x3 << 28)
+#define OMAP4_FREF_DR4_SC_SHIFT 26
+#define OMAP4_FREF_DR4_SC_MASK (0x3 << 26)
+#define OMAP4_FREF_DR5_SC_SHIFT 24
+#define OMAP4_FREF_DR5_SC_MASK (0x3 << 24)
+#define OMAP4_FREF_DR6_SC_SHIFT 22
+#define OMAP4_FREF_DR6_SC_MASK (0x3 << 22)
+#define OMAP4_FREF_DR7_SC_SHIFT 20
+#define OMAP4_FREF_DR7_SC_MASK (0x3 << 20)
+#define OMAP4_GPIO_DR7_SC_SHIFT 18
+#define OMAP4_GPIO_DR7_SC_MASK (0x3 << 18)
+#define OMAP4_DPM_DR0_SC_SHIFT 14
+#define OMAP4_DPM_DR0_SC_MASK (0x3 << 14)
+#define OMAP4_SIM_DR0_SC_SHIFT 12
+#define OMAP4_SIM_DR0_SC_MASK (0x3 << 12)
+
+/* CONTROL_SMART1NOPMIO_PADCONF_1 */
+#define OMAP4_FREF_DR0_LB_SHIFT 30
+#define OMAP4_FREF_DR0_LB_MASK (0x3 << 30)
+#define OMAP4_FREF_DR1_LB_SHIFT 28
+#define OMAP4_FREF_DR1_LB_MASK (0x3 << 28)
+#define OMAP4_FREF_DR4_LB_SHIFT 26
+#define OMAP4_FREF_DR4_LB_MASK (0x3 << 26)
+#define OMAP4_FREF_DR5_LB_SHIFT 24
+#define OMAP4_FREF_DR5_LB_MASK (0x3 << 24)
+#define OMAP4_FREF_DR6_LB_SHIFT 22
+#define OMAP4_FREF_DR6_LB_MASK (0x3 << 22)
+#define OMAP4_FREF_DR7_LB_SHIFT 20
+#define OMAP4_FREF_DR7_LB_MASK (0x3 << 20)
+#define OMAP4_GPIO_DR7_LB_SHIFT 18
+#define OMAP4_GPIO_DR7_LB_MASK (0x3 << 18)
+#define OMAP4_DPM_DR0_LB_SHIFT 14
+#define OMAP4_DPM_DR0_LB_MASK (0x3 << 14)
+#define OMAP4_SIM_DR0_LB_SHIFT 12
+#define OMAP4_SIM_DR0_LB_MASK (0x3 << 12)
+
+/* CONTROL_PADCONF_MODE */
+#define OMAP4_VDDS_DV_FREF_SHIFT 31
+#define OMAP4_VDDS_DV_FREF_MASK (1 << 31)
+#define OMAP4_VDDS_DV_BANK2_SHIFT 30
+#define OMAP4_VDDS_DV_BANK2_MASK (1 << 30)
+
+/* CONTROL_XTAL_OSCILLATOR */
+#define OMAP4_OSCILLATOR_BOOST_SHIFT 31
+#define OMAP4_OSCILLATOR_BOOST_MASK (1 << 31)
+#define OMAP4_OSCILLATOR_OS_OUT_SHIFT 30
+#define OMAP4_OSCILLATOR_OS_OUT_MASK (1 << 30)
+
+/* CONTROL_USIMIO */
+#define OMAP4_PAD_USIM_CLK_LOW_SHIFT 31
+#define OMAP4_PAD_USIM_CLK_LOW_MASK (1 << 31)
+#define OMAP4_PAD_USIM_RST_LOW_SHIFT 29
+#define OMAP4_PAD_USIM_RST_LOW_MASK (1 << 29)
+#define OMAP4_USIM_PWRDNZ_SHIFT 28
+#define OMAP4_USIM_PWRDNZ_MASK (1 << 28)
+
+/* CONTROL_I2C_2 */
+#define OMAP4_SR_SDA_GLFENB_SHIFT 31
+#define OMAP4_SR_SDA_GLFENB_MASK (1 << 31)
+#define OMAP4_SR_SDA_LOAD_BITS_SHIFT 29
+#define OMAP4_SR_SDA_LOAD_BITS_MASK (0x3 << 29)
+#define OMAP4_SR_SDA_PULLUPRESX_SHIFT 28
+#define OMAP4_SR_SDA_PULLUPRESX_MASK (1 << 28)
+#define OMAP4_SR_SCL_GLFENB_SHIFT 27
+#define OMAP4_SR_SCL_GLFENB_MASK (1 << 27)
+#define OMAP4_SR_SCL_LOAD_BITS_SHIFT 25
+#define OMAP4_SR_SCL_LOAD_BITS_MASK (0x3 << 25)
+#define OMAP4_SR_SCL_PULLUPRESX_SHIFT 24
+#define OMAP4_SR_SCL_PULLUPRESX_MASK (1 << 24)
+
+/* CONTROL_JTAG */
+#define OMAP4_JTAG_NTRST_EN_SHIFT 31
+#define OMAP4_JTAG_NTRST_EN_MASK (1 << 31)
+#define OMAP4_JTAG_TCK_EN_SHIFT 30
+#define OMAP4_JTAG_TCK_EN_MASK (1 << 30)
+#define OMAP4_JTAG_RTCK_EN_SHIFT 29
+#define OMAP4_JTAG_RTCK_EN_MASK (1 << 29)
+#define OMAP4_JTAG_TDI_EN_SHIFT 28
+#define OMAP4_JTAG_TDI_EN_MASK (1 << 28)
+#define OMAP4_JTAG_TDO_EN_SHIFT 27
+#define OMAP4_JTAG_TDO_EN_MASK (1 << 27)
+
+/* CONTROL_SYS */
+#define OMAP4_SYS_NRESWARM_PIPU_SHIFT 31
+#define OMAP4_SYS_NRESWARM_PIPU_MASK (1 << 31)
+
+/* WKUP_CONTROL_SPARE_RW */
+#define OMAP4_WKUP_CONTROL_SPARE_RW_SHIFT 0
+#define OMAP4_WKUP_CONTROL_SPARE_RW_MASK (0xffffffff << 0)
+
+/* WKUP_CONTROL_SPARE_R */
+#define OMAP4_WKUP_CONTROL_SPARE_R_SHIFT 0
+#define OMAP4_WKUP_CONTROL_SPARE_R_MASK (0xffffffff << 0)
+
+/* WKUP_CONTROL_SPARE_R_C0 */
+#define OMAP4_WKUP_CONTROL_SPARE_R_C0_SHIFT 31
+#define OMAP4_WKUP_CONTROL_SPARE_R_C0_MASK (1 << 31)
+#define OMAP4_WKUP_CONTROL_SPARE_R_C1_SHIFT 30
+#define OMAP4_WKUP_CONTROL_SPARE_R_C1_MASK (1 << 30)
+#define OMAP4_WKUP_CONTROL_SPARE_R_C2_SHIFT 29
+#define OMAP4_WKUP_CONTROL_SPARE_R_C2_MASK (1 << 29)
+#define OMAP4_WKUP_CONTROL_SPARE_R_C3_SHIFT 28
+#define OMAP4_WKUP_CONTROL_SPARE_R_C3_MASK (1 << 28)
+#define OMAP4_WKUP_CONTROL_SPARE_R_C4_SHIFT 27
+#define OMAP4_WKUP_CONTROL_SPARE_R_C4_MASK (1 << 27)
+#define OMAP4_WKUP_CONTROL_SPARE_R_C5_SHIFT 26
+#define OMAP4_WKUP_CONTROL_SPARE_R_C5_MASK (1 << 26)
+#define OMAP4_WKUP_CONTROL_SPARE_R_C6_SHIFT 25
+#define OMAP4_WKUP_CONTROL_SPARE_R_C6_MASK (1 << 25)
+#define OMAP4_WKUP_CONTROL_SPARE_R_C7_SHIFT 24
+#define OMAP4_WKUP_CONTROL_SPARE_R_C7_MASK (1 << 24)
+
+#endif
diff --git a/arch/arm/mach-omap2/include/mach/ctrl_module_wkup_44xx.h b/arch/arm/mach-omap2/include/mach/ctrl_module_wkup_44xx.h
new file mode 100644
index 000000000000..a0af9baec3f7
--- /dev/null
+++ b/arch/arm/mach-omap2/include/mach/ctrl_module_wkup_44xx.h
@@ -0,0 +1,92 @@
+/*
+ * OMAP44xx CTRL_MODULE_WKUP registers and bitfields
+ *
+ * Copyright (C) 2009-2010 Texas Instruments, Inc.
+ *
+ * Benoit Cousson (b-cousson@ti.com)
+ * Santosh Shilimkar (santosh.shilimkar@ti.com)
+ *
+ * This file is automatically generated from the OMAP hardware databases.
+ * We respectfully ask that any modifications to this file be coordinated
+ * with the public linux-omap@vger.kernel.org mailing list and the
+ * authors above to ensure that the autogeneration scripts are kept
+ * up-to-date with the file contents.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __ARCH_ARM_MACH_OMAP2_CTRL_MODULE_WKUP_44XX_H
+#define __ARCH_ARM_MACH_OMAP2_CTRL_MODULE_WKUP_44XX_H
+
+
+/* Base address */
+#define OMAP4_CTRL_MODULE_WKUP 0x4a30c000
+
+/* Registers offset */
+#define OMAP4_CTRL_MODULE_WKUP_IP_REVISION 0x0000
+#define OMAP4_CTRL_MODULE_WKUP_IP_HWINFO 0x0004
+#define OMAP4_CTRL_MODULE_WKUP_IP_SYSCONFIG 0x0010
+#define OMAP4_CTRL_MODULE_WKUP_CONF_DEBUG_SEL_TST_0 0x0460
+#define OMAP4_CTRL_MODULE_WKUP_CONF_DEBUG_SEL_TST_1 0x0464
+#define OMAP4_CTRL_MODULE_WKUP_CONF_DEBUG_SEL_TST_2 0x0468
+#define OMAP4_CTRL_MODULE_WKUP_CONF_DEBUG_SEL_TST_3 0x046c
+#define OMAP4_CTRL_MODULE_WKUP_CONF_DEBUG_SEL_TST_4 0x0470
+#define OMAP4_CTRL_MODULE_WKUP_CONF_DEBUG_SEL_TST_5 0x0474
+#define OMAP4_CTRL_MODULE_WKUP_CONF_DEBUG_SEL_TST_6 0x0478
+#define OMAP4_CTRL_MODULE_WKUP_CONF_DEBUG_SEL_TST_7 0x047c
+#define OMAP4_CTRL_MODULE_WKUP_CONF_DEBUG_SEL_TST_8 0x0480
+#define OMAP4_CTRL_MODULE_WKUP_CONF_DEBUG_SEL_TST_9 0x0484
+#define OMAP4_CTRL_MODULE_WKUP_CONF_DEBUG_SEL_TST_10 0x0488
+#define OMAP4_CTRL_MODULE_WKUP_CONF_DEBUG_SEL_TST_11 0x048c
+#define OMAP4_CTRL_MODULE_WKUP_CONF_DEBUG_SEL_TST_12 0x0490
+#define OMAP4_CTRL_MODULE_WKUP_CONF_DEBUG_SEL_TST_13 0x0494
+#define OMAP4_CTRL_MODULE_WKUP_CONF_DEBUG_SEL_TST_14 0x0498
+#define OMAP4_CTRL_MODULE_WKUP_CONF_DEBUG_SEL_TST_15 0x049c
+#define OMAP4_CTRL_MODULE_WKUP_CONF_DEBUG_SEL_TST_16 0x04a0
+#define OMAP4_CTRL_MODULE_WKUP_CONF_DEBUG_SEL_TST_17 0x04a4
+#define OMAP4_CTRL_MODULE_WKUP_CONF_DEBUG_SEL_TST_18 0x04a8
+#define OMAP4_CTRL_MODULE_WKUP_CONF_DEBUG_SEL_TST_19 0x04ac
+#define OMAP4_CTRL_MODULE_WKUP_CONF_DEBUG_SEL_TST_20 0x04b0
+#define OMAP4_CTRL_MODULE_WKUP_CONF_DEBUG_SEL_TST_21 0x04b4
+#define OMAP4_CTRL_MODULE_WKUP_CONF_DEBUG_SEL_TST_22 0x04b8
+#define OMAP4_CTRL_MODULE_WKUP_CONF_DEBUG_SEL_TST_23 0x04bc
+#define OMAP4_CTRL_MODULE_WKUP_CONF_DEBUG_SEL_TST_24 0x04c0
+#define OMAP4_CTRL_MODULE_WKUP_CONF_DEBUG_SEL_TST_25 0x04c4
+#define OMAP4_CTRL_MODULE_WKUP_CONF_DEBUG_SEL_TST_26 0x04c8
+#define OMAP4_CTRL_MODULE_WKUP_CONF_DEBUG_SEL_TST_27 0x04cc
+#define OMAP4_CTRL_MODULE_WKUP_CONF_DEBUG_SEL_TST_28 0x04d0
+#define OMAP4_CTRL_MODULE_WKUP_CONF_DEBUG_SEL_TST_29 0x04d4
+#define OMAP4_CTRL_MODULE_WKUP_CONF_DEBUG_SEL_TST_30 0x04d8
+#define OMAP4_CTRL_MODULE_WKUP_CONF_DEBUG_SEL_TST_31 0x04dc
+
+/* Registers shifts and masks */
+
+/* IP_REVISION */
+#define OMAP4_IP_REV_SCHEME_SHIFT 30
+#define OMAP4_IP_REV_SCHEME_MASK (0x3 << 30)
+#define OMAP4_IP_REV_FUNC_SHIFT 16
+#define OMAP4_IP_REV_FUNC_MASK (0xfff << 16)
+#define OMAP4_IP_REV_RTL_SHIFT 11
+#define OMAP4_IP_REV_RTL_MASK (0x1f << 11)
+#define OMAP4_IP_REV_MAJOR_SHIFT 8
+#define OMAP4_IP_REV_MAJOR_MASK (0x7 << 8)
+#define OMAP4_IP_REV_CUSTOM_SHIFT 6
+#define OMAP4_IP_REV_CUSTOM_MASK (0x3 << 6)
+#define OMAP4_IP_REV_MINOR_SHIFT 0
+#define OMAP4_IP_REV_MINOR_MASK (0x3f << 0)
+
+/* IP_HWINFO */
+#define OMAP4_IP_HWINFO_SHIFT 0
+#define OMAP4_IP_HWINFO_MASK (0xffffffff << 0)
+
+/* IP_SYSCONFIG */
+#define OMAP4_IP_SYSCONFIG_IDLEMODE_SHIFT 2
+#define OMAP4_IP_SYSCONFIG_IDLEMODE_MASK (0x3 << 2)
+
+/* CONF_DEBUG_SEL_TST_0 */
+#define OMAP4_WKUP_MODE_SHIFT 0
+#define OMAP4_WKUP_MODE_MASK (1 << 0)
+
+#endif
diff --git a/arch/arm/mach-omap2/include/mach/vmalloc.h b/arch/arm/mach-omap2/include/mach/vmalloc.h
index 4da31e997efe..866319947760 100644
--- a/arch/arm/mach-omap2/include/mach/vmalloc.h
+++ b/arch/arm/mach-omap2/include/mach/vmalloc.h
@@ -17,4 +17,4 @@
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
-#define VMALLOC_END 0xf8000000
+#define VMALLOC_END 0xf8000000UL
diff --git a/arch/arm/mach-omap2/io.c b/arch/arm/mach-omap2/io.c
index b9ea70bce563..40562ddd3ee4 100644
--- a/arch/arm/mach-omap2/io.c
+++ b/arch/arm/mach-omap2/io.c
@@ -36,6 +36,7 @@
#include "clock2xxx.h"
#include "clock3xxx.h"
#include "clock44xx.h"
+#include "io.h"
#include <plat/omap-pm.h>
#include <plat/powerdomain.h>
@@ -323,6 +324,9 @@ void __init omap2_init_common_hw(struct omap_sdrc_params *sdrc_cs0,
omap2430_hwmod_init();
else if (cpu_is_omap34xx())
omap3xxx_hwmod_init();
+ else if (cpu_is_omap44xx())
+ omap44xx_hwmod_init();
+
/* The OPP tables have to be registered before a clk init */
omap_pm_if_early_init(mpu_opps, dsp_opps, l3_opps);
@@ -342,9 +346,7 @@ void __init omap2_init_common_hw(struct omap_sdrc_params *sdrc_cs0,
#ifndef CONFIG_PM_RUNTIME
skip_setup_idle = 1;
#endif
- if (cpu_is_omap24xx() || cpu_is_omap34xx()) /* FIXME: OMAP4 */
- omap_hwmod_late_init(skip_setup_idle);
-
+ omap_hwmod_late_init(skip_setup_idle);
if (cpu_is_omap24xx() || cpu_is_omap34xx()) {
omap2_sdrc_init(sdrc_cs0, sdrc_cs1);
_omap2_init_reprogram_sdrc();
diff --git a/arch/arm/mach-omap2/io.h b/arch/arm/mach-omap2/io.h
new file mode 100644
index 000000000000..fd230c6cded5
--- /dev/null
+++ b/arch/arm/mach-omap2/io.h
@@ -0,0 +1,7 @@
+
+#ifndef __MACH_OMAP2_IO_H__
+#define __MACH_OMAP2_IO_H__
+
+extern int __init omap_sram_init(void);
+
+#endif /* __MACH_OMAP2_IO_H__ */
diff --git a/arch/arm/mach-omap2/irq.c b/arch/arm/mach-omap2/irq.c
index 26aeef560aa3..32eeabe9d2ab 100644
--- a/arch/arm/mach-omap2/irq.c
+++ b/arch/arm/mach-omap2/irq.c
@@ -47,7 +47,6 @@ static struct omap_irq_bank {
} __attribute__ ((aligned(4))) irq_banks[] = {
{
/* MPU INTC */
- .base_reg = 0,
.nr_irqs = 96,
},
};
diff --git a/arch/arm/mach-omap2/mailbox.c b/arch/arm/mach-omap2/mailbox.c
index 42dbfa46e656..40ddecab93a9 100644
--- a/arch/arm/mach-omap2/mailbox.c
+++ b/arch/arm/mach-omap2/mailbox.c
@@ -181,7 +181,7 @@ static int omap2_mbox_fifo_full(struct omap_mbox *mbox)
static void omap2_mbox_enable_irq(struct omap_mbox *mbox,
omap_mbox_type_t irq)
{
- struct omap_mbox2_priv *p = (struct omap_mbox2_priv *)mbox->priv;
+ struct omap_mbox2_priv *p = mbox->priv;
u32 l, bit = (irq == IRQ_TX) ? p->notfull_bit : p->newmsg_bit;
l = mbox_read_reg(p->irqenable);
@@ -192,7 +192,7 @@ static void omap2_mbox_enable_irq(struct omap_mbox *mbox,
static void omap2_mbox_disable_irq(struct omap_mbox *mbox,
omap_mbox_type_t irq)
{
- struct omap_mbox2_priv *p = (struct omap_mbox2_priv *)mbox->priv;
+ struct omap_mbox2_priv *p = mbox->priv;
u32 l, bit = (irq == IRQ_TX) ? p->notfull_bit : p->newmsg_bit;
l = mbox_read_reg(p->irqdisable);
l &= ~bit;
@@ -202,7 +202,7 @@ static void omap2_mbox_disable_irq(struct omap_mbox *mbox,
static void omap2_mbox_ack_irq(struct omap_mbox *mbox,
omap_mbox_type_t irq)
{
- struct omap_mbox2_priv *p = (struct omap_mbox2_priv *)mbox->priv;
+ struct omap_mbox2_priv *p = mbox->priv;
u32 bit = (irq == IRQ_TX) ? p->notfull_bit : p->newmsg_bit;
mbox_write_reg(bit, p->irqstatus);
@@ -214,7 +214,7 @@ static void omap2_mbox_ack_irq(struct omap_mbox *mbox,
static int omap2_mbox_is_irq(struct omap_mbox *mbox,
omap_mbox_type_t irq)
{
- struct omap_mbox2_priv *p = (struct omap_mbox2_priv *)mbox->priv;
+ struct omap_mbox2_priv *p = mbox->priv;
u32 bit = (irq == IRQ_TX) ? p->notfull_bit : p->newmsg_bit;
u32 enable = mbox_read_reg(p->irqenable);
u32 status = mbox_read_reg(p->irqstatus);
diff --git a/arch/arm/mach-omap2/mcbsp.c b/arch/arm/mach-omap2/mcbsp.c
index 467aae245781..f9c9df5b5ff1 100644
--- a/arch/arm/mach-omap2/mcbsp.c
+++ b/arch/arm/mach-omap2/mcbsp.c
@@ -23,29 +23,86 @@
#include <plat/cpu.h>
#include <plat/mcbsp.h>
-#include "mux.h"
+#include "control.h"
-static void omap2_mcbsp2_mux_setup(void)
+
+/* McBSP internal signal muxing functions */
+
+void omap2_mcbsp1_mux_clkr_src(u8 mux)
{
- omap_mux_init_signal("eac_ac_sclk.mcbsp2_clkx", OMAP_PULL_ENA);
- omap_mux_init_signal("eac_ac_fs.mcbsp2_fsx", OMAP_PULL_ENA);
- omap_mux_init_signal("eac_ac_din.mcbsp2_dr", OMAP_PULL_ENA);
- omap_mux_init_signal("eac_ac_dout.mcbsp2_dx", OMAP_PULL_ENA);
- omap_mux_init_gpio(117, OMAP_PULL_ENA);
- /*
- * TODO: Need to add MUX settings for OMAP 2430 SDP
- */
+ u32 v;
+
+ v = omap_ctrl_readl(OMAP2_CONTROL_DEVCONF0);
+ if (mux == CLKR_SRC_CLKR)
+ v &= ~OMAP2_MCBSP1_CLKR_MASK;
+ else if (mux == CLKR_SRC_CLKX)
+ v |= OMAP2_MCBSP1_CLKR_MASK;
+ omap_ctrl_writel(v, OMAP2_CONTROL_DEVCONF0);
}
+EXPORT_SYMBOL(omap2_mcbsp1_mux_clkr_src);
-static void omap2_mcbsp_request(unsigned int id)
+void omap2_mcbsp1_mux_fsr_src(u8 mux)
{
- if (cpu_is_omap2420() && (id == OMAP_MCBSP2))
- omap2_mcbsp2_mux_setup();
+ u32 v;
+
+ v = omap_ctrl_readl(OMAP2_CONTROL_DEVCONF0);
+ if (mux == FSR_SRC_FSR)
+ v &= ~OMAP2_MCBSP1_FSR_MASK;
+ else if (mux == FSR_SRC_FSX)
+ v |= OMAP2_MCBSP1_FSR_MASK;
+ omap_ctrl_writel(v, OMAP2_CONTROL_DEVCONF0);
}
+EXPORT_SYMBOL(omap2_mcbsp1_mux_fsr_src);
-static struct omap_mcbsp_ops omap2_mcbsp_ops = {
- .request = omap2_mcbsp_request,
-};
+/* McBSP CLKS source switching function */
+
+int omap2_mcbsp_set_clks_src(u8 id, u8 fck_src_id)
+{
+ struct omap_mcbsp *mcbsp;
+ struct clk *fck_src;
+ char *fck_src_name;
+ int r;
+
+ if (!omap_mcbsp_check_valid_id(id)) {
+ pr_err("%s: Invalid id (%d)\n", __func__, id + 1);
+ return -EINVAL;
+ }
+ mcbsp = id_to_mcbsp_ptr(id);
+
+ if (fck_src_id == MCBSP_CLKS_PAD_SRC)
+ fck_src_name = "pad_fck";
+ else if (fck_src_id == MCBSP_CLKS_PRCM_SRC)
+ fck_src_name = "prcm_fck";
+ else
+ return -EINVAL;
+
+ fck_src = clk_get(mcbsp->dev, fck_src_name);
+ if (IS_ERR_OR_NULL(fck_src)) {
+ pr_err("omap-mcbsp: %s: could not clk_get() %s\n", "clks",
+ fck_src_name);
+ return -EINVAL;
+ }
+
+ clk_disable(mcbsp->fclk);
+
+ r = clk_set_parent(mcbsp->fclk, fck_src);
+ if (IS_ERR_VALUE(r)) {
+ pr_err("omap-mcbsp: %s: could not clk_set_parent() to %s\n",
+ "clks", fck_src_name);
+ clk_put(fck_src);
+ return -EINVAL;
+ }
+
+ clk_enable(mcbsp->fclk);
+
+ clk_put(fck_src);
+
+ return 0;
+}
+EXPORT_SYMBOL(omap2_mcbsp_set_clks_src);
+
+
+/* Platform data */
#ifdef CONFIG_ARCH_OMAP2420
static struct omap_mcbsp_platform_data omap2420_mcbsp_pdata[] = {
@@ -55,7 +112,6 @@ static struct omap_mcbsp_platform_data omap2420_mcbsp_pdata[] = {
.dma_tx_sync = OMAP24XX_DMA_MCBSP1_TX,
.rx_irq = INT_24XX_MCBSP1_IRQ_RX,
.tx_irq = INT_24XX_MCBSP1_IRQ_TX,
- .ops = &omap2_mcbsp_ops,
},
{
.phys_base = OMAP24XX_MCBSP2_BASE,
@@ -63,7 +119,6 @@ static struct omap_mcbsp_platform_data omap2420_mcbsp_pdata[] = {
.dma_tx_sync = OMAP24XX_DMA_MCBSP2_TX,
.rx_irq = INT_24XX_MCBSP2_IRQ_RX,
.tx_irq = INT_24XX_MCBSP2_IRQ_TX,
- .ops = &omap2_mcbsp_ops,
},
};
#define OMAP2420_MCBSP_PDATA_SZ ARRAY_SIZE(omap2420_mcbsp_pdata)
@@ -82,7 +137,6 @@ static struct omap_mcbsp_platform_data omap2430_mcbsp_pdata[] = {
.dma_tx_sync = OMAP24XX_DMA_MCBSP1_TX,
.rx_irq = INT_24XX_MCBSP1_IRQ_RX,
.tx_irq = INT_24XX_MCBSP1_IRQ_TX,
- .ops = &omap2_mcbsp_ops,
},
{
.phys_base = OMAP24XX_MCBSP2_BASE,
@@ -90,7 +144,6 @@ static struct omap_mcbsp_platform_data omap2430_mcbsp_pdata[] = {
.dma_tx_sync = OMAP24XX_DMA_MCBSP2_TX,
.rx_irq = INT_24XX_MCBSP2_IRQ_RX,
.tx_irq = INT_24XX_MCBSP2_IRQ_TX,
- .ops = &omap2_mcbsp_ops,
},
{
.phys_base = OMAP2430_MCBSP3_BASE,
@@ -98,7 +151,6 @@ static struct omap_mcbsp_platform_data omap2430_mcbsp_pdata[] = {
.dma_tx_sync = OMAP24XX_DMA_MCBSP3_TX,
.rx_irq = INT_24XX_MCBSP3_IRQ_RX,
.tx_irq = INT_24XX_MCBSP3_IRQ_TX,
- .ops = &omap2_mcbsp_ops,
},
{
.phys_base = OMAP2430_MCBSP4_BASE,
@@ -106,7 +158,6 @@ static struct omap_mcbsp_platform_data omap2430_mcbsp_pdata[] = {
.dma_tx_sync = OMAP24XX_DMA_MCBSP4_TX,
.rx_irq = INT_24XX_MCBSP4_IRQ_RX,
.tx_irq = INT_24XX_MCBSP4_IRQ_TX,
- .ops = &omap2_mcbsp_ops,
},
{
.phys_base = OMAP2430_MCBSP5_BASE,
@@ -114,7 +165,6 @@ static struct omap_mcbsp_platform_data omap2430_mcbsp_pdata[] = {
.dma_tx_sync = OMAP24XX_DMA_MCBSP5_TX,
.rx_irq = INT_24XX_MCBSP5_IRQ_RX,
.tx_irq = INT_24XX_MCBSP5_IRQ_TX,
- .ops = &omap2_mcbsp_ops,
},
};
#define OMAP2430_MCBSP_PDATA_SZ ARRAY_SIZE(omap2430_mcbsp_pdata)
@@ -133,7 +183,6 @@ static struct omap_mcbsp_platform_data omap34xx_mcbsp_pdata[] = {
.dma_tx_sync = OMAP24XX_DMA_MCBSP1_TX,
.rx_irq = INT_24XX_MCBSP1_IRQ_RX,
.tx_irq = INT_24XX_MCBSP1_IRQ_TX,
- .ops = &omap2_mcbsp_ops,
.buffer_size = 0x80, /* The FIFO has 128 locations */
},
{
@@ -143,7 +192,6 @@ static struct omap_mcbsp_platform_data omap34xx_mcbsp_pdata[] = {
.dma_tx_sync = OMAP24XX_DMA_MCBSP2_TX,
.rx_irq = INT_24XX_MCBSP2_IRQ_RX,
.tx_irq = INT_24XX_MCBSP2_IRQ_TX,
- .ops = &omap2_mcbsp_ops,
.buffer_size = 0x500, /* The FIFO has 1024 + 256 locations */
},
{
@@ -153,7 +201,6 @@ static struct omap_mcbsp_platform_data omap34xx_mcbsp_pdata[] = {
.dma_tx_sync = OMAP24XX_DMA_MCBSP3_TX,
.rx_irq = INT_24XX_MCBSP3_IRQ_RX,
.tx_irq = INT_24XX_MCBSP3_IRQ_TX,
- .ops = &omap2_mcbsp_ops,
.buffer_size = 0x80, /* The FIFO has 128 locations */
},
{
@@ -162,7 +209,6 @@ static struct omap_mcbsp_platform_data omap34xx_mcbsp_pdata[] = {
.dma_tx_sync = OMAP24XX_DMA_MCBSP4_TX,
.rx_irq = INT_24XX_MCBSP4_IRQ_RX,
.tx_irq = INT_24XX_MCBSP4_IRQ_TX,
- .ops = &omap2_mcbsp_ops,
.buffer_size = 0x80, /* The FIFO has 128 locations */
},
{
@@ -171,7 +217,6 @@ static struct omap_mcbsp_platform_data omap34xx_mcbsp_pdata[] = {
.dma_tx_sync = OMAP24XX_DMA_MCBSP5_TX,
.rx_irq = INT_24XX_MCBSP5_IRQ_RX,
.tx_irq = INT_24XX_MCBSP5_IRQ_TX,
- .ops = &omap2_mcbsp_ops,
.buffer_size = 0x80, /* The FIFO has 128 locations */
},
};
@@ -189,28 +234,24 @@ static struct omap_mcbsp_platform_data omap44xx_mcbsp_pdata[] = {
.dma_rx_sync = OMAP44XX_DMA_MCBSP1_RX,
.dma_tx_sync = OMAP44XX_DMA_MCBSP1_TX,
.tx_irq = OMAP44XX_IRQ_MCBSP1,
- .ops = &omap2_mcbsp_ops,
},
{
.phys_base = OMAP44XX_MCBSP2_BASE,
.dma_rx_sync = OMAP44XX_DMA_MCBSP2_RX,
.dma_tx_sync = OMAP44XX_DMA_MCBSP2_TX,
.tx_irq = OMAP44XX_IRQ_MCBSP2,
- .ops = &omap2_mcbsp_ops,
},
{
.phys_base = OMAP44XX_MCBSP3_BASE,
.dma_rx_sync = OMAP44XX_DMA_MCBSP3_RX,
.dma_tx_sync = OMAP44XX_DMA_MCBSP3_TX,
.tx_irq = OMAP44XX_IRQ_MCBSP3,
- .ops = &omap2_mcbsp_ops,
},
{
.phys_base = OMAP44XX_MCBSP4_BASE,
.dma_rx_sync = OMAP44XX_DMA_MCBSP4_RX,
.dma_tx_sync = OMAP44XX_DMA_MCBSP4_TX,
.tx_irq = OMAP44XX_IRQ_MCBSP4,
- .ops = &omap2_mcbsp_ops,
},
};
#define OMAP44XX_MCBSP_PDATA_SZ ARRAY_SIZE(omap44xx_mcbsp_pdata)
diff --git a/arch/arm/mach-omap2/mux.c b/arch/arm/mach-omap2/mux.c
index ab403b2ed26b..074536ae401f 100644
--- a/arch/arm/mach-omap2/mux.c
+++ b/arch/arm/mach-omap2/mux.c
@@ -23,12 +23,11 @@
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
*/
-#include <linux/module.h>
+#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/io.h>
-#include <linux/slab.h>
-#include <linux/spinlock.h>
#include <linux/list.h>
+#include <linux/slab.h>
#include <linux/ctype.h>
#include <linux/debugfs.h>
#include <linux/seq_file.h>
@@ -36,8 +35,7 @@
#include <asm/system.h>
-#include <plat/control.h>
-
+#include "control.h"
#include "mux.h"
#define OMAP_MUX_BASE_OFFSET 0x30 /* Offset from CTRL_BASE */
@@ -87,7 +85,7 @@ static char *omap_mux_options;
int __init omap_mux_init_gpio(int gpio, int val)
{
struct omap_mux_entry *e;
- struct omap_mux *gpio_mux;
+ struct omap_mux *gpio_mux = NULL;
u16 old_mode;
u16 mux_mode;
int found = 0;
@@ -127,17 +125,16 @@ int __init omap_mux_init_gpio(int gpio, int val)
return 0;
}
-int __init omap_mux_init_signal(char *muxname, int val)
+int __init omap_mux_init_signal(const char *muxname, int val)
{
struct omap_mux_entry *e;
- char *m0_name = NULL, *mode_name = NULL;
- int found = 0;
+ const char *mode_name;
+ int found = 0, mode0_len = 0;
mode_name = strchr(muxname, '.');
if (mode_name) {
- *mode_name = '\0';
+ mode0_len = strlen(muxname) - strlen(mode_name);
mode_name++;
- m0_name = muxname;
} else {
mode_name = muxname;
}
@@ -147,9 +144,11 @@ int __init omap_mux_init_signal(char *muxname, int val)
char *m0_entry = m->muxnames[0];
int i;
- if (m0_name && strcmp(m0_name, m0_entry))
+ /* First check for full name in mode0.muxmode format */
+ if (mode0_len && strncmp(muxname, m0_entry, mode0_len))
continue;
+ /* Then check for muxmode only */
for (i = 0; i < OMAP_MUX_NR_MODES; i++) {
char *mode_cur = m->muxnames[i];
diff --git a/arch/arm/mach-omap2/mux.h b/arch/arm/mach-omap2/mux.h
index a8e040c2c7e9..350c04f27383 100644
--- a/arch/arm/mach-omap2/mux.h
+++ b/arch/arm/mach-omap2/mux.h
@@ -120,7 +120,7 @@ int omap_mux_init_gpio(int gpio, int val);
* @muxname: Mux name in mode0_name.signal_name format
* @val: Options for the mux register value
*/
-int omap_mux_init_signal(char *muxname, int val);
+int omap_mux_init_signal(const char *muxname, int val);
#else
diff --git a/arch/arm/mach-omap2/mux2420.c b/arch/arm/mach-omap2/mux2420.c
index fdb04a7eb8aa..414af5434456 100644
--- a/arch/arm/mach-omap2/mux2420.c
+++ b/arch/arm/mach-omap2/mux2420.c
@@ -507,7 +507,7 @@ static struct omap_mux __initdata omap2420_muxmodes[] = {
* Balls for 447-pin POP package
*/
#ifdef CONFIG_DEBUG_FS
-struct omap_ball __initdata omap2420_pop_ball[] = {
+static struct omap_ball __initdata omap2420_pop_ball[] = {
_OMAP2420_BALLENTRY(CAM_D0, "y4", NULL),
_OMAP2420_BALLENTRY(CAM_D1, "y3", NULL),
_OMAP2420_BALLENTRY(CAM_D2, "u7", NULL),
diff --git a/arch/arm/mach-omap2/mux2430.c b/arch/arm/mach-omap2/mux2430.c
index 7dcaaa8af32a..84d2c5a7ecd7 100644
--- a/arch/arm/mach-omap2/mux2430.c
+++ b/arch/arm/mach-omap2/mux2430.c
@@ -586,7 +586,7 @@ static struct omap_mux __initdata omap2430_muxmodes[] = {
* 447-pin s-PBGA Package, 0.00mm Ball Pitch (Bottom)
*/
#ifdef CONFIG_DEBUG_FS
-struct omap_ball __initdata omap2430_pop_ball[] = {
+static struct omap_ball __initdata omap2430_pop_ball[] = {
_OMAP2430_BALLENTRY(CAM_D0, "t8", NULL),
_OMAP2430_BALLENTRY(CAM_D1, "t4", NULL),
_OMAP2430_BALLENTRY(CAM_D10, "r4", NULL),
diff --git a/arch/arm/mach-omap2/mux34xx.c b/arch/arm/mach-omap2/mux34xx.c
index f64d7eea3451..574e54ea3ab7 100644
--- a/arch/arm/mach-omap2/mux34xx.c
+++ b/arch/arm/mach-omap2/mux34xx.c
@@ -931,7 +931,7 @@ struct omap_ball __initdata omap3_cbc_ball[] = {
* Signals different on CUS package compared to superset
*/
#if defined(CONFIG_OMAP_MUX) && defined(CONFIG_OMAP_PACKAGE_CUS)
-struct omap_mux __initdata omap3_cus_subset[] = {
+static struct omap_mux __initdata omap3_cus_subset[] = {
_OMAP3_MUXENTRY(CAM_D10, 109,
"cam_d10", NULL, NULL, NULL,
"gpio_109", NULL, NULL, "safe_mode"),
@@ -1077,7 +1077,7 @@ struct omap_mux __initdata omap3_cus_subset[] = {
*/
#if defined(CONFIG_OMAP_MUX) && defined(CONFIG_DEBUG_FS) \
&& defined(CONFIG_OMAP_PACKAGE_CUS)
-struct omap_ball __initdata omap3_cus_ball[] = {
+static struct omap_ball __initdata omap3_cus_ball[] = {
_OMAP3_BALLENTRY(CAM_D0, "ab18", NULL),
_OMAP3_BALLENTRY(CAM_D1, "ac18", NULL),
_OMAP3_BALLENTRY(CAM_D10, "f21", NULL),
@@ -1269,7 +1269,7 @@ struct omap_ball __initdata omap3_cus_ball[] = {
* Signals different on CBB package comapared to superset
*/
#if defined(CONFIG_OMAP_MUX) && defined(CONFIG_OMAP_PACKAGE_CBB)
-struct omap_mux __initdata omap3_cbb_subset[] = {
+static struct omap_mux __initdata omap3_cbb_subset[] = {
_OMAP3_MUXENTRY(CAM_D10, 109,
"cam_d10", NULL, NULL, NULL,
"gpio_109", NULL, NULL, "safe_mode"),
@@ -1390,7 +1390,7 @@ struct omap_mux __initdata omap3_cbb_subset[] = {
*/
#if defined(CONFIG_OMAP_MUX) && defined(CONFIG_DEBUG_FS) \
&& defined(CONFIG_OMAP_PACKAGE_CBB)
-struct omap_ball __initdata omap3_cbb_ball[] = {
+static struct omap_ball __initdata omap3_cbb_ball[] = {
_OMAP3_BALLENTRY(CAM_D0, "ag17", NULL),
_OMAP3_BALLENTRY(CAM_D1, "ah17", NULL),
_OMAP3_BALLENTRY(CAM_D10, "b25", NULL),
@@ -1600,7 +1600,7 @@ struct omap_ball __initdata omap3_cbb_ball[] = {
* Signals different on 36XX CBP package comapared to 34XX CBC package
*/
#if defined(CONFIG_OMAP_MUX) && defined(CONFIG_OMAP_PACKAGE_CBP)
-struct omap_mux __initdata omap36xx_cbp_subset[] = {
+static struct omap_mux __initdata omap36xx_cbp_subset[] = {
_OMAP3_MUXENTRY(CAM_D0, 99,
"cam_d0", NULL, "csi2_dx2", NULL,
"gpio_99", NULL, NULL, "safe_mode"),
@@ -1818,7 +1818,7 @@ struct omap_mux __initdata omap36xx_cbp_subset[] = {
*/
#if defined(CONFIG_OMAP_MUX) && defined(CONFIG_DEBUG_FS) \
&& defined (CONFIG_OMAP_PACKAGE_CBP)
-struct omap_ball __initdata omap36xx_cbp_ball[] = {
+static struct omap_ball __initdata omap36xx_cbp_ball[] = {
_OMAP3_BALLENTRY(CAM_D0, "ag17", NULL),
_OMAP3_BALLENTRY(CAM_D1, "ah17", NULL),
_OMAP3_BALLENTRY(CAM_D10, "b25", NULL),
diff --git a/arch/arm/mach-omap2/omap4-common.c b/arch/arm/mach-omap2/omap4-common.c
index 13dc9794dcc2..2f895553e6a8 100644
--- a/arch/arm/mach-omap2/omap4-common.c
+++ b/arch/arm/mach-omap2/omap4-common.c
@@ -44,6 +44,13 @@ void __init gic_init_irq(void)
}
#ifdef CONFIG_CACHE_L2X0
+
+static void omap4_l2x0_disable(void)
+{
+ /* Disable PL310 L2 Cache controller */
+ omap_smc1(0x102, 0x0);
+}
+
static int __init omap_l2_cache_init(void)
{
/*
@@ -61,10 +68,20 @@ static int __init omap_l2_cache_init(void)
omap_smc1(0x102, 0x1);
/*
- * 32KB way size, 16-way associativity,
- * parity disabled
+ * 16-way associativity, parity disabled
+ * Way size - 32KB (es1.0)
+ * Way size - 64KB (es2.0 +)
*/
- l2x0_init(l2cache_base, 0x0e050000, 0xc0000fff);
+ if (omap_rev() == OMAP4430_REV_ES1_0)
+ l2x0_init(l2cache_base, 0x0e050000, 0xc0000fff);
+ else
+ l2x0_init(l2cache_base, 0x0e070000, 0xc0000fff);
+
+ /*
+ * Override default outer_cache.disable with a OMAP4
+ * specific one
+ */
+ outer_cache.disable = omap4_l2x0_disable;
return 0;
}
diff --git a/arch/arm/mach-omap2/omap_hwmod.c b/arch/arm/mach-omap2/omap_hwmod.c
index cb911d7d1a3c..5a30658444d0 100644
--- a/arch/arm/mach-omap2/omap_hwmod.c
+++ b/arch/arm/mach-omap2/omap_hwmod.c
@@ -13,10 +13,102 @@
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
- * This code manages "OMAP modules" (on-chip devices) and their
- * integration with Linux device driver and bus code.
- *
- * References:
+ * Introduction
+ * ------------
+ * One way to view an OMAP SoC is as a collection of largely unrelated
+ * IP blocks connected by interconnects. The IP blocks include
+ * devices such as ARM processors, audio serial interfaces, UARTs,
+ * etc. Some of these devices, like the DSP, are created by TI;
+ * others, like the SGX, largely originate from external vendors. In
+ * TI's documentation, on-chip devices are referred to as "OMAP
+ * modules." Some of these IP blocks are identical across several
+ * OMAP versions. Others are revised frequently.
+ *
+ * These OMAP modules are tied together by various interconnects.
+ * Most of the address and data flow between modules is via OCP-based
+ * interconnects such as the L3 and L4 buses; but there are other
+ * interconnects that distribute the hardware clock tree, handle idle
+ * and reset signaling, supply power, and connect the modules to
+ * various pads or balls on the OMAP package.
+ *
+ * OMAP hwmod provides a consistent way to describe the on-chip
+ * hardware blocks and their integration into the rest of the chip.
+ * This description can be automatically generated from the TI
+ * hardware database. OMAP hwmod provides a standard, consistent API
+ * to reset, enable, idle, and disable these hardware blocks. And
+ * hwmod provides a way for other core code, such as the Linux device
+ * code or the OMAP power management and address space mapping code,
+ * to query the hardware database.
+ *
+ * Using hwmod
+ * -----------
+ * Drivers won't call hwmod functions directly. That is done by the
+ * omap_device code, and in rare occasions, by custom integration code
+ * in arch/arm/ *omap*. The omap_device code includes functions to
+ * build a struct platform_device using omap_hwmod data, and that is
+ * currently how hwmod data is communicated to drivers and to the
+ * Linux driver model. Most drivers will call omap_hwmod functions only
+ * indirectly, via pm_runtime*() functions.
+ *
+ * From a layering perspective, here is where the OMAP hwmod code
+ * fits into the kernel software stack:
+ *
+ * +-------------------------------+
+ * | Device driver code |
+ * | (e.g., drivers/) |
+ * +-------------------------------+
+ * | Linux driver model |
+ * | (platform_device / |
+ * | platform_driver data/code) |
+ * +-------------------------------+
+ * | OMAP core-driver integration |
+ * |(arch/arm/mach-omap2/devices.c)|
+ * +-------------------------------+
+ * | omap_device code |
+ * | (../plat-omap/omap_device.c) |
+ * +-------------------------------+
+ * ----> | omap_hwmod code/data | <-----
+ * | (../mach-omap2/omap_hwmod*) |
+ * +-------------------------------+
+ * | OMAP clock/PRCM/register fns |
+ * | (__raw_{read,write}l, clk*) |
+ * +-------------------------------+
+ *
+ * Device drivers should not contain any OMAP-specific code or data in
+ * them. They should only contain code to operate the IP block that
+ * the driver is responsible for. This is because these IP blocks can
+ * also appear in other SoCs, either from TI (such as DaVinci) or from
+ * other manufacturers; and drivers should be reusable across other
+ * platforms.
+ *
+ * The OMAP hwmod code also will attempt to reset and idle all on-chip
+ * devices upon boot. The goal here is for the kernel to be
+ * completely self-reliant and independent from bootloaders. This is
+ * to ensure a repeatable configuration, both to ensure consistent
+ * runtime behavior, and to make it easier for others to reproduce
+ * bugs.
+ *
+ * OMAP module activity states
+ * ---------------------------
+ * The hwmod code considers modules to be in one of several activity
+ * states. IP blocks start out in an UNKNOWN state, then once they
+ * are registered via the hwmod code, proceed to the REGISTERED state.
+ * Once their clock names are resolved to clock pointers, the module
+ * enters the CLKS_INITED state; and finally, once the module has been
+ * reset and the integration registers programmed, the INITIALIZED state
+ * is entered. The hwmod code will then place the module into either
+ * the IDLE state to save power, or in the case of a critical system
+ * module, the ENABLED state.
+ *
+ * OMAP core integration code can then call omap_hwmod*() functions
+ * directly to move the module between the IDLE, ENABLED, and DISABLED
+ * states, as needed. This is done during both the PM idle loop, and
+ * in the OMAP core integration code's implementation of the PM runtime
+ * functions.
+ *
+ * References
+ * ----------
+ * This is a partial list.
* - OMAP2420 Multimedia Processor Silicon Revision 2.1.1, 2.2 (SWPU064)
* - OMAP2430 Multimedia Device POP Silicon Revision 2.1 (SWPU090)
* - OMAP34xx Multimedia Device Silicon Revision 3.1 (SWPU108)
@@ -50,11 +142,13 @@
#include <plat/powerdomain.h>
#include <plat/clock.h>
#include <plat/omap_hwmod.h>
+#include <plat/prcm.h>
#include "cm.h"
+#include "prm.h"
-/* Maximum microseconds to wait for OMAP module to reset */
-#define MAX_MODULE_RESET_WAIT 10000
+/* Maximum microseconds to wait for OMAP module to softreset */
+#define MAX_MODULE_SOFTRESET_WAIT 10000
/* Name of the OMAP hwmod for the MPU */
#define MPU_INITIATOR_NAME "mpu"
@@ -90,7 +184,7 @@ static int _update_sysc_cache(struct omap_hwmod *oh)
/* XXX ensure module interface clock is up */
- oh->_sysc_cache = omap_hwmod_readl(oh, oh->class->sysc->sysc_offs);
+ oh->_sysc_cache = omap_hwmod_read(oh, oh->class->sysc->sysc_offs);
if (!(oh->class->sysc->sysc_flags & SYSC_NO_CACHE))
oh->_int_flags |= _HWMOD_SYSCONFIG_LOADED;
@@ -117,7 +211,7 @@ static void _write_sysconfig(u32 v, struct omap_hwmod *oh)
if (oh->_sysc_cache != v) {
oh->_sysc_cache = v;
- omap_hwmod_writel(v, oh, oh->class->sysc->sysc_offs);
+ omap_hwmod_write(v, oh, oh->class->sysc->sysc_offs);
}
}
@@ -544,6 +638,36 @@ static int _disable_clocks(struct omap_hwmod *oh)
return 0;
}
+static void _enable_optional_clocks(struct omap_hwmod *oh)
+{
+ struct omap_hwmod_opt_clk *oc;
+ int i;
+
+ pr_debug("omap_hwmod: %s: enabling optional clocks\n", oh->name);
+
+ for (i = oh->opt_clks_cnt, oc = oh->opt_clks; i > 0; i--, oc++)
+ if (oc->_clk) {
+ pr_debug("omap_hwmod: enable %s:%s\n", oc->role,
+ oc->_clk->name);
+ clk_enable(oc->_clk);
+ }
+}
+
+static void _disable_optional_clocks(struct omap_hwmod *oh)
+{
+ struct omap_hwmod_opt_clk *oc;
+ int i;
+
+ pr_debug("omap_hwmod: %s: disabling optional clocks\n", oh->name);
+
+ for (i = oh->opt_clks_cnt, oc = oh->opt_clks; i > 0; i--, oc++)
+ if (oc->_clk) {
+ pr_debug("omap_hwmod: disable %s:%s\n", oc->role,
+ oc->_clk->name);
+ clk_disable(oc->_clk);
+ }
+}
+
/**
* _find_mpu_port_index - find hwmod OCP slave port ID intended for MPU use
* @oh: struct omap_hwmod *
@@ -622,7 +746,7 @@ static void __iomem *_find_mpu_rt_base(struct omap_hwmod *oh, u8 index)
}
/**
- * _sysc_enable - try to bring a module out of idle via OCP_SYSCONFIG
+ * _enable_sysc - try to bring a module out of idle via OCP_SYSCONFIG
* @oh: struct omap_hwmod *
*
* If module is marked as SWSUP_SIDLE, force the module out of slave
@@ -630,7 +754,7 @@ static void __iomem *_find_mpu_rt_base(struct omap_hwmod *oh, u8 index)
* as SWSUP_MSUSPEND, force the module out of master standby;
* otherwise, configure it for smart-standby. No return value.
*/
-static void _sysc_enable(struct omap_hwmod *oh)
+static void _enable_sysc(struct omap_hwmod *oh)
{
u8 idlemode, sf;
u32 v;
@@ -653,14 +777,6 @@ static void _sysc_enable(struct omap_hwmod *oh)
_set_master_standbymode(oh, idlemode, &v);
}
- if (sf & SYSC_HAS_AUTOIDLE) {
- idlemode = (oh->flags & HWMOD_NO_OCP_AUTOIDLE) ?
- 0 : 1;
- _set_module_autoidle(oh, idlemode, &v);
- }
-
- /* XXX OCP ENAWAKEUP bit? */
-
/*
* XXX The clock framework should handle this, by
* calling into this code. But this must wait until the
@@ -671,10 +787,25 @@ static void _sysc_enable(struct omap_hwmod *oh)
_set_clockactivity(oh, oh->class->sysc->clockact, &v);
_write_sysconfig(v, oh);
+
+ /* If slave is in SMARTIDLE, also enable wakeup */
+ if ((sf & SYSC_HAS_SIDLEMODE) && !(oh->flags & HWMOD_SWSUP_SIDLE))
+ _enable_wakeup(oh);
+
+ /*
+ * Set the autoidle bit only after setting the smartidle bit
+ * Setting this will not have any impact on the other modules.
+ */
+ if (sf & SYSC_HAS_AUTOIDLE) {
+ idlemode = (oh->flags & HWMOD_NO_OCP_AUTOIDLE) ?
+ 0 : 1;
+ _set_module_autoidle(oh, idlemode, &v);
+ _write_sysconfig(v, oh);
+ }
}
/**
- * _sysc_idle - try to put a module into idle via OCP_SYSCONFIG
+ * _idle_sysc - try to put a module into idle via OCP_SYSCONFIG
* @oh: struct omap_hwmod *
*
* If module is marked as SWSUP_SIDLE, force the module into slave
@@ -682,7 +813,7 @@ static void _sysc_enable(struct omap_hwmod *oh)
* as SWSUP_MSUSPEND, force the module into master standby; otherwise,
* configure it for smart-standby. No return value.
*/
-static void _sysc_idle(struct omap_hwmod *oh)
+static void _idle_sysc(struct omap_hwmod *oh)
{
u8 idlemode, sf;
u32 v;
@@ -709,13 +840,13 @@ static void _sysc_idle(struct omap_hwmod *oh)
}
/**
- * _sysc_shutdown - force a module into idle via OCP_SYSCONFIG
+ * _shutdown_sysc - force a module into idle via OCP_SYSCONFIG
* @oh: struct omap_hwmod *
*
* Force the module into slave idle and master suspend. No return
* value.
*/
-static void _sysc_shutdown(struct omap_hwmod *oh)
+static void _shutdown_sysc(struct omap_hwmod *oh)
{
u32 v;
u8 sf;
@@ -767,10 +898,10 @@ static struct omap_hwmod *_lookup(const char *name)
* @data: not used; pass NULL
*
* Called by omap_hwmod_late_init() (after omap2_clk_init()).
- * Resolves all clock names embedded in the hwmod. Must be called
- * with omap_hwmod_mutex held. Returns -EINVAL if the omap_hwmod
- * has not yet been registered or if the clocks have already been
- * initialized, 0 on success, or a non-zero error on failure.
+ * Resolves all clock names embedded in the hwmod. Returns -EINVAL if
+ * the omap_hwmod has not yet been registered or if the clocks have
+ * already been initialized, 0 on success, or a non-zero error on
+ * failure.
*/
static int _init_clocks(struct omap_hwmod *oh, void *data)
{
@@ -834,56 +965,202 @@ static int _wait_target_ready(struct omap_hwmod *oh)
}
/**
+ * _lookup_hardreset - return the register bit shift for this hwmod/reset line
+ * @oh: struct omap_hwmod *
+ * @name: name of the reset line in the context of this hwmod
+ *
+ * Return the bit position of the reset line that match the
+ * input name. Return -ENOENT if not found.
+ */
+static u8 _lookup_hardreset(struct omap_hwmod *oh, const char *name)
+{
+ int i;
+
+ for (i = 0; i < oh->rst_lines_cnt; i++) {
+ const char *rst_line = oh->rst_lines[i].name;
+ if (!strcmp(rst_line, name)) {
+ u8 shift = oh->rst_lines[i].rst_shift;
+ pr_debug("omap_hwmod: %s: _lookup_hardreset: %s: %d\n",
+ oh->name, rst_line, shift);
+
+ return shift;
+ }
+ }
+
+ return -ENOENT;
+}
+
+/**
+ * _assert_hardreset - assert the HW reset line of submodules
+ * contained in the hwmod module.
+ * @oh: struct omap_hwmod *
+ * @name: name of the reset line to lookup and assert
+ *
+ * Some IP like dsp, ipu or iva contain processor that require
+ * an HW reset line to be assert / deassert in order to enable fully
+ * the IP.
+ */
+static int _assert_hardreset(struct omap_hwmod *oh, const char *name)
+{
+ u8 shift;
+
+ if (!oh)
+ return -EINVAL;
+
+ shift = _lookup_hardreset(oh, name);
+ if (IS_ERR_VALUE(shift))
+ return shift;
+
+ if (cpu_is_omap24xx() || cpu_is_omap34xx())
+ return omap2_prm_assert_hardreset(oh->prcm.omap2.module_offs,
+ shift);
+ else if (cpu_is_omap44xx())
+ return omap4_prm_assert_hardreset(oh->prcm.omap4.rstctrl_reg,
+ shift);
+ else
+ return -EINVAL;
+}
+
+/**
+ * _deassert_hardreset - deassert the HW reset line of submodules contained
+ * in the hwmod module.
+ * @oh: struct omap_hwmod *
+ * @name: name of the reset line to look up and deassert
+ *
+ * Some IP like dsp, ipu or iva contain processor that require
+ * an HW reset line to be assert / deassert in order to enable fully
+ * the IP.
+ */
+static int _deassert_hardreset(struct omap_hwmod *oh, const char *name)
+{
+ u8 shift;
+ int r;
+
+ if (!oh)
+ return -EINVAL;
+
+ shift = _lookup_hardreset(oh, name);
+ if (IS_ERR_VALUE(shift))
+ return shift;
+
+ if (cpu_is_omap24xx() || cpu_is_omap34xx())
+ r = omap2_prm_deassert_hardreset(oh->prcm.omap2.module_offs,
+ shift);
+ else if (cpu_is_omap44xx())
+ r = omap4_prm_deassert_hardreset(oh->prcm.omap4.rstctrl_reg,
+ shift);
+ else
+ return -EINVAL;
+
+ if (r == -EBUSY)
+ pr_warning("omap_hwmod: %s: failed to hardreset\n", oh->name);
+
+ return r;
+}
+
+/**
+ * _read_hardreset - read the HW reset line state of submodules
+ * contained in the hwmod module
+ * @oh: struct omap_hwmod *
+ * @name: name of the reset line to look up and read
+ *
+ * Return the state of the reset line.
+ */
+static int _read_hardreset(struct omap_hwmod *oh, const char *name)
+{
+ u8 shift;
+
+ if (!oh)
+ return -EINVAL;
+
+ shift = _lookup_hardreset(oh, name);
+ if (IS_ERR_VALUE(shift))
+ return shift;
+
+ if (cpu_is_omap24xx() || cpu_is_omap34xx()) {
+ return omap2_prm_is_hardreset_asserted(oh->prcm.omap2.module_offs,
+ shift);
+ } else if (cpu_is_omap44xx()) {
+ return omap4_prm_is_hardreset_asserted(oh->prcm.omap4.rstctrl_reg,
+ shift);
+ } else {
+ return -EINVAL;
+ }
+}
+
+/**
* _reset - reset an omap_hwmod
* @oh: struct omap_hwmod *
*
* Resets an omap_hwmod @oh via the OCP_SYSCONFIG bit. hwmod must be
- * enabled for this to work. Must be called with omap_hwmod_mutex
- * held. Returns -EINVAL if the hwmod cannot be reset this way or if
- * the hwmod is in the wrong state, -ETIMEDOUT if the module did not
- * reset in time, or 0 upon success.
+ * enabled for this to work. Returns -EINVAL if the hwmod cannot be
+ * reset this way or if the hwmod is in the wrong state, -ETIMEDOUT if
+ * the module did not reset in time, or 0 upon success.
+ *
+ * In OMAP3 a specific SYSSTATUS register is used to get the reset status.
+ * Starting in OMAP4, some IPs does not have SYSSTATUS register and instead
+ * use the SYSCONFIG softreset bit to provide the status.
+ *
+ * Note that some IP like McBSP does have a reset control but no reset status.
*/
static int _reset(struct omap_hwmod *oh)
{
- u32 r, v;
+ u32 v;
int c = 0;
+ int ret = 0;
if (!oh->class->sysc ||
- !(oh->class->sysc->sysc_flags & SYSC_HAS_SOFTRESET) ||
- (oh->class->sysc->sysc_flags & SYSS_MISSING))
+ !(oh->class->sysc->sysc_flags & SYSC_HAS_SOFTRESET))
return -EINVAL;
/* clocks must be on for this operation */
if (oh->_state != _HWMOD_STATE_ENABLED) {
- WARN(1, "omap_hwmod: %s: reset can only be entered from "
- "enabled state\n", oh->name);
+ pr_warning("omap_hwmod: %s: reset can only be entered from "
+ "enabled state\n", oh->name);
return -EINVAL;
}
+ /* For some modules, all optionnal clocks need to be enabled as well */
+ if (oh->flags & HWMOD_CONTROL_OPT_CLKS_IN_RESET)
+ _enable_optional_clocks(oh);
+
pr_debug("omap_hwmod: %s: resetting\n", oh->name);
v = oh->_sysc_cache;
- r = _set_softreset(oh, &v);
- if (r)
- return r;
+ ret = _set_softreset(oh, &v);
+ if (ret)
+ goto dis_opt_clks;
_write_sysconfig(v, oh);
- omap_test_timeout((omap_hwmod_readl(oh, oh->class->sysc->syss_offs) &
- SYSS_RESETDONE_MASK),
- MAX_MODULE_RESET_WAIT, c);
-
- if (c == MAX_MODULE_RESET_WAIT)
- WARN(1, "omap_hwmod: %s: failed to reset in %d usec\n",
- oh->name, MAX_MODULE_RESET_WAIT);
+ if (oh->class->sysc->sysc_flags & SYSS_HAS_RESET_STATUS)
+ omap_test_timeout((omap_hwmod_read(oh,
+ oh->class->sysc->syss_offs)
+ & SYSS_RESETDONE_MASK),
+ MAX_MODULE_SOFTRESET_WAIT, c);
+ else if (oh->class->sysc->sysc_flags & SYSC_HAS_RESET_STATUS)
+ omap_test_timeout(!(omap_hwmod_read(oh,
+ oh->class->sysc->sysc_offs)
+ & SYSC_TYPE2_SOFTRESET_MASK),
+ MAX_MODULE_SOFTRESET_WAIT, c);
+
+ if (c == MAX_MODULE_SOFTRESET_WAIT)
+ pr_warning("omap_hwmod: %s: softreset failed (waited %d usec)\n",
+ oh->name, MAX_MODULE_SOFTRESET_WAIT);
else
- pr_debug("omap_hwmod: %s: reset in %d usec\n", oh->name, c);
+ pr_debug("omap_hwmod: %s: softreset in %d usec\n", oh->name, c);
/*
* XXX add _HWMOD_STATE_WEDGED for modules that don't come back from
* _wait_target_ready() or _reset()
*/
- return (c == MAX_MODULE_RESET_WAIT) ? -ETIMEDOUT : 0;
+ ret = (c == MAX_MODULE_SOFTRESET_WAIT) ? -ETIMEDOUT : 0;
+
+dis_opt_clks:
+ if (oh->flags & HWMOD_CONTROL_OPT_CLKS_IN_RESET)
+ _disable_optional_clocks(oh);
+
+ return ret;
}
/**
@@ -891,9 +1168,11 @@ static int _reset(struct omap_hwmod *oh)
* @oh: struct omap_hwmod *
*
* Enables an omap_hwmod @oh such that the MPU can access the hwmod's
- * register target. Must be called with omap_hwmod_mutex held.
- * Returns -EINVAL if the hwmod is in the wrong state or passes along
- * the return value of _wait_target_ready().
+ * register target. (This function has a full name --
+ * _omap_hwmod_enable() rather than simply _enable() -- because it is
+ * currently required by the pm34xx.c idle loop.) Returns -EINVAL if
+ * the hwmod is in the wrong state or passes along the return value of
+ * _wait_target_ready().
*/
int _omap_hwmod_enable(struct omap_hwmod *oh)
{
@@ -909,6 +1188,15 @@ int _omap_hwmod_enable(struct omap_hwmod *oh)
pr_debug("omap_hwmod: %s: enabling\n", oh->name);
+ /*
+ * If an IP contains only one HW reset line, then de-assert it in order
+ * to allow to enable the clocks. Otherwise the PRCM will return
+ * Intransition status, and the init will failed.
+ */
+ if ((oh->_state == _HWMOD_STATE_INITIALIZED ||
+ oh->_state == _HWMOD_STATE_DISABLED) && oh->rst_lines_cnt == 1)
+ _deassert_hardreset(oh, oh->rst_lines[0].name);
+
/* XXX mux balls */
_add_initiator_dep(oh, mpu_oh);
@@ -922,7 +1210,7 @@ int _omap_hwmod_enable(struct omap_hwmod *oh)
if (oh->class->sysc) {
if (!(oh->_int_flags & _HWMOD_SYSCONFIG_LOADED))
_update_sysc_cache(oh);
- _sysc_enable(oh);
+ _enable_sysc(oh);
}
} else {
pr_debug("omap_hwmod: %s: _wait_target_ready: %d\n",
@@ -933,12 +1221,14 @@ int _omap_hwmod_enable(struct omap_hwmod *oh)
}
/**
- * _idle - idle an omap_hwmod
+ * _omap_hwmod_idle - idle an omap_hwmod
* @oh: struct omap_hwmod *
*
* Idles an omap_hwmod @oh. This should be called once the hwmod has
- * no further work. Returns -EINVAL if the hwmod is in the wrong
- * state or returns 0.
+ * no further work. (This function has a full name --
+ * _omap_hwmod_idle() rather than simply _idle() -- because it is
+ * currently required by the pm34xx.c idle loop.) Returns -EINVAL if
+ * the hwmod is in the wrong state or returns 0.
*/
int _omap_hwmod_idle(struct omap_hwmod *oh)
{
@@ -951,7 +1241,7 @@ int _omap_hwmod_idle(struct omap_hwmod *oh)
pr_debug("omap_hwmod: %s: idling\n", oh->name);
if (oh->class->sysc)
- _sysc_idle(oh);
+ _idle_sysc(oh);
_del_initiator_dep(oh, mpu_oh);
_disable_clocks(oh);
@@ -981,10 +1271,21 @@ static int _shutdown(struct omap_hwmod *oh)
pr_debug("omap_hwmod: %s: disabling\n", oh->name);
if (oh->class->sysc)
- _sysc_shutdown(oh);
- _del_initiator_dep(oh, mpu_oh);
- /* XXX what about the other system initiators here? DMA, tesla, d2d */
- _disable_clocks(oh);
+ _shutdown_sysc(oh);
+
+ /*
+ * If an IP contains only one HW reset line, then assert it
+ * before disabling the clocks and shutting down the IP.
+ */
+ if (oh->rst_lines_cnt == 1)
+ _assert_hardreset(oh, oh->rst_lines[0].name);
+
+ /* clocks and deps are already disabled in idle */
+ if (oh->_state == _HWMOD_STATE_ENABLED) {
+ _del_initiator_dep(oh, mpu_oh);
+ /* XXX what about the other system initiators here? dma, dsp */
+ _disable_clocks(oh);
+ }
/* XXX Should this code also force-disable the optional clocks? */
/* XXX mux any associated balls to safe mode */
@@ -1000,11 +1301,10 @@ static int _shutdown(struct omap_hwmod *oh)
* @skip_setup_idle_p: do not idle hwmods at the end of the fn if 1
*
* Writes the CLOCKACTIVITY bits @clockact to the hwmod @oh
- * OCP_SYSCONFIG register. Must be called with omap_hwmod_mutex held.
- * @skip_setup_idle is intended to be used on a system that will not
- * call omap_hwmod_enable() to enable devices (e.g., a system without
- * PM runtime). Returns -EINVAL if the hwmod is in the wrong state or
- * returns 0.
+ * OCP_SYSCONFIG register. @skip_setup_idle is intended to be used on
+ * a system that will not call omap_hwmod_enable() to enable devices
+ * (e.g., a system without PM runtime). Returns -EINVAL if the hwmod
+ * is in the wrong state or returns 0.
*/
static int _setup(struct omap_hwmod *oh, void *data)
{
@@ -1034,8 +1334,19 @@ static int _setup(struct omap_hwmod *oh, void *data)
}
}
+ mutex_init(&oh->_mutex);
oh->_state = _HWMOD_STATE_INITIALIZED;
+ /*
+ * In the case of hwmod with hardreset that should not be
+ * de-assert at boot time, we have to keep the module
+ * initialized, because we cannot enable it properly with the
+ * reset asserted. Exit without warning because that behavior is
+ * expected.
+ */
+ if ((oh->flags & HWMOD_INIT_NO_RESET) && oh->rst_lines_cnt == 1)
+ return 0;
+
r = _omap_hwmod_enable(oh);
if (r) {
pr_warning("omap_hwmod: %s: cannot be enabled (%d)\n",
@@ -1044,16 +1355,16 @@ static int _setup(struct omap_hwmod *oh, void *data)
}
if (!(oh->flags & HWMOD_INIT_NO_RESET)) {
+ _reset(oh);
+
/*
- * XXX Do the OCP_SYSCONFIG bits need to be
- * reprogrammed after a reset? If not, then this can
- * be removed. If they do, then probably the
- * _omap_hwmod_enable() function should be split to avoid the
- * rewrite of the OCP_SYSCONFIG register.
+ * OCP_SYSCONFIG bits need to be reprogrammed after a softreset.
+ * The _omap_hwmod_enable() function should be split to
+ * avoid the rewrite of the OCP_SYSCONFIG register.
*/
if (oh->class->sysc) {
_update_sysc_cache(oh);
- _sysc_enable(oh);
+ _enable_sysc(oh);
}
}
@@ -1067,14 +1378,20 @@ static int _setup(struct omap_hwmod *oh, void *data)
/* Public functions */
-u32 omap_hwmod_readl(struct omap_hwmod *oh, u16 reg_offs)
+u32 omap_hwmod_read(struct omap_hwmod *oh, u16 reg_offs)
{
- return __raw_readl(oh->_mpu_rt_va + reg_offs);
+ if (oh->flags & HWMOD_16BIT_REG)
+ return __raw_readw(oh->_mpu_rt_va + reg_offs);
+ else
+ return __raw_readl(oh->_mpu_rt_va + reg_offs);
}
-void omap_hwmod_writel(u32 v, struct omap_hwmod *oh, u16 reg_offs)
+void omap_hwmod_write(u32 v, struct omap_hwmod *oh, u16 reg_offs)
{
- __raw_writel(v, oh->_mpu_rt_va + reg_offs);
+ if (oh->flags & HWMOD_16BIT_REG)
+ __raw_writew(v, oh->_mpu_rt_va + reg_offs);
+ else
+ __raw_writel(v, oh->_mpu_rt_va + reg_offs);
}
/**
@@ -1309,7 +1626,7 @@ int omap_hwmod_unregister(struct omap_hwmod *oh)
* omap_hwmod_enable - enable an omap_hwmod
* @oh: struct omap_hwmod *
*
- * Enable an omap_hwomd @oh. Intended to be called by omap_device_enable().
+ * Enable an omap_hwmod @oh. Intended to be called by omap_device_enable().
* Returns -EINVAL on error or passes along the return value from _enable().
*/
int omap_hwmod_enable(struct omap_hwmod *oh)
@@ -1319,9 +1636,9 @@ int omap_hwmod_enable(struct omap_hwmod *oh)
if (!oh)
return -EINVAL;
- mutex_lock(&omap_hwmod_mutex);
+ mutex_lock(&oh->_mutex);
r = _omap_hwmod_enable(oh);
- mutex_unlock(&omap_hwmod_mutex);
+ mutex_unlock(&oh->_mutex);
return r;
}
@@ -1331,7 +1648,7 @@ int omap_hwmod_enable(struct omap_hwmod *oh)
* omap_hwmod_idle - idle an omap_hwmod
* @oh: struct omap_hwmod *
*
- * Idle an omap_hwomd @oh. Intended to be called by omap_device_idle().
+ * Idle an omap_hwmod @oh. Intended to be called by omap_device_idle().
* Returns -EINVAL on error or passes along the return value from _idle().
*/
int omap_hwmod_idle(struct omap_hwmod *oh)
@@ -1339,9 +1656,9 @@ int omap_hwmod_idle(struct omap_hwmod *oh)
if (!oh)
return -EINVAL;
- mutex_lock(&omap_hwmod_mutex);
+ mutex_lock(&oh->_mutex);
_omap_hwmod_idle(oh);
- mutex_unlock(&omap_hwmod_mutex);
+ mutex_unlock(&oh->_mutex);
return 0;
}
@@ -1350,7 +1667,7 @@ int omap_hwmod_idle(struct omap_hwmod *oh)
* omap_hwmod_shutdown - shutdown an omap_hwmod
* @oh: struct omap_hwmod *
*
- * Shutdown an omap_hwomd @oh. Intended to be called by
+ * Shutdown an omap_hwmod @oh. Intended to be called by
* omap_device_shutdown(). Returns -EINVAL on error or passes along
* the return value from _shutdown().
*/
@@ -1359,9 +1676,9 @@ int omap_hwmod_shutdown(struct omap_hwmod *oh)
if (!oh)
return -EINVAL;
- mutex_lock(&omap_hwmod_mutex);
+ mutex_lock(&oh->_mutex);
_shutdown(oh);
- mutex_unlock(&omap_hwmod_mutex);
+ mutex_unlock(&oh->_mutex);
return 0;
}
@@ -1374,9 +1691,9 @@ int omap_hwmod_shutdown(struct omap_hwmod *oh)
*/
int omap_hwmod_enable_clocks(struct omap_hwmod *oh)
{
- mutex_lock(&omap_hwmod_mutex);
+ mutex_lock(&oh->_mutex);
_enable_clocks(oh);
- mutex_unlock(&omap_hwmod_mutex);
+ mutex_unlock(&oh->_mutex);
return 0;
}
@@ -1389,9 +1706,9 @@ int omap_hwmod_enable_clocks(struct omap_hwmod *oh)
*/
int omap_hwmod_disable_clocks(struct omap_hwmod *oh)
{
- mutex_lock(&omap_hwmod_mutex);
+ mutex_lock(&oh->_mutex);
_disable_clocks(oh);
- mutex_unlock(&omap_hwmod_mutex);
+ mutex_unlock(&oh->_mutex);
return 0;
}
@@ -1421,7 +1738,7 @@ void omap_hwmod_ocp_barrier(struct omap_hwmod *oh)
* Forces posted writes to complete on the OCP thread handling
* register writes
*/
- omap_hwmod_readl(oh, oh->class->sysc->sysc_offs);
+ omap_hwmod_read(oh, oh->class->sysc->sysc_offs);
}
/**
@@ -1430,20 +1747,18 @@ void omap_hwmod_ocp_barrier(struct omap_hwmod *oh)
*
* Under some conditions, a driver may wish to reset the entire device.
* Called from omap_device code. Returns -EINVAL on error or passes along
- * the return value from _reset()/_enable().
+ * the return value from _reset().
*/
int omap_hwmod_reset(struct omap_hwmod *oh)
{
int r;
- if (!oh || !(oh->_state & _HWMOD_STATE_ENABLED))
+ if (!oh)
return -EINVAL;
- mutex_lock(&omap_hwmod_mutex);
+ mutex_lock(&oh->_mutex);
r = _reset(oh);
- if (!r)
- r = _omap_hwmod_enable(oh);
- mutex_unlock(&omap_hwmod_mutex);
+ mutex_unlock(&oh->_mutex);
return r;
}
@@ -1468,7 +1783,7 @@ int omap_hwmod_count_resources(struct omap_hwmod *oh)
{
int ret, i;
- ret = oh->mpu_irqs_cnt + oh->sdma_chs_cnt;
+ ret = oh->mpu_irqs_cnt + oh->sdma_reqs_cnt;
for (i = 0; i < oh->slaves_cnt; i++)
ret += oh->slaves[i]->addr_cnt;
@@ -1501,10 +1816,10 @@ int omap_hwmod_fill_resources(struct omap_hwmod *oh, struct resource *res)
r++;
}
- for (i = 0; i < oh->sdma_chs_cnt; i++) {
- (res + r)->name = (oh->sdma_chs + i)->name;
- (res + r)->start = (oh->sdma_chs + i)->dma_ch;
- (res + r)->end = (oh->sdma_chs + i)->dma_ch;
+ for (i = 0; i < oh->sdma_reqs_cnt; i++) {
+ (res + r)->name = (oh->sdma_reqs + i)->name;
+ (res + r)->start = (oh->sdma_reqs + i)->dma_req;
+ (res + r)->end = (oh->sdma_reqs + i)->dma_req;
(res + r)->flags = IORESOURCE_DMA;
r++;
}
@@ -1644,9 +1959,9 @@ int omap_hwmod_enable_wakeup(struct omap_hwmod *oh)
!(oh->class->sysc->sysc_flags & SYSC_HAS_ENAWAKEUP))
return -EINVAL;
- mutex_lock(&omap_hwmod_mutex);
+ mutex_lock(&oh->_mutex);
_enable_wakeup(oh);
- mutex_unlock(&omap_hwmod_mutex);
+ mutex_unlock(&oh->_mutex);
return 0;
}
@@ -1669,14 +1984,92 @@ int omap_hwmod_disable_wakeup(struct omap_hwmod *oh)
!(oh->class->sysc->sysc_flags & SYSC_HAS_ENAWAKEUP))
return -EINVAL;
- mutex_lock(&omap_hwmod_mutex);
+ mutex_lock(&oh->_mutex);
_disable_wakeup(oh);
- mutex_unlock(&omap_hwmod_mutex);
+ mutex_unlock(&oh->_mutex);
return 0;
}
/**
+ * omap_hwmod_assert_hardreset - assert the HW reset line of submodules
+ * contained in the hwmod module.
+ * @oh: struct omap_hwmod *
+ * @name: name of the reset line to lookup and assert
+ *
+ * Some IP like dsp, ipu or iva contain processor that require
+ * an HW reset line to be assert / deassert in order to enable fully
+ * the IP. Returns -EINVAL if @oh is null or if the operation is not
+ * yet supported on this OMAP; otherwise, passes along the return value
+ * from _assert_hardreset().
+ */
+int omap_hwmod_assert_hardreset(struct omap_hwmod *oh, const char *name)
+{
+ int ret;
+
+ if (!oh)
+ return -EINVAL;
+
+ mutex_lock(&oh->_mutex);
+ ret = _assert_hardreset(oh, name);
+ mutex_unlock(&oh->_mutex);
+
+ return ret;
+}
+
+/**
+ * omap_hwmod_deassert_hardreset - deassert the HW reset line of submodules
+ * contained in the hwmod module.
+ * @oh: struct omap_hwmod *
+ * @name: name of the reset line to look up and deassert
+ *
+ * Some IP like dsp, ipu or iva contain processor that require
+ * an HW reset line to be assert / deassert in order to enable fully
+ * the IP. Returns -EINVAL if @oh is null or if the operation is not
+ * yet supported on this OMAP; otherwise, passes along the return value
+ * from _deassert_hardreset().
+ */
+int omap_hwmod_deassert_hardreset(struct omap_hwmod *oh, const char *name)
+{
+ int ret;
+
+ if (!oh)
+ return -EINVAL;
+
+ mutex_lock(&oh->_mutex);
+ ret = _deassert_hardreset(oh, name);
+ mutex_unlock(&oh->_mutex);
+
+ return ret;
+}
+
+/**
+ * omap_hwmod_read_hardreset - read the HW reset line state of submodules
+ * contained in the hwmod module
+ * @oh: struct omap_hwmod *
+ * @name: name of the reset line to look up and read
+ *
+ * Return the current state of the hwmod @oh's reset line named @name:
+ * returns -EINVAL upon parameter error or if this operation
+ * is unsupported on the current OMAP; otherwise, passes along the return
+ * value from _read_hardreset().
+ */
+int omap_hwmod_read_hardreset(struct omap_hwmod *oh, const char *name)
+{
+ int ret;
+
+ if (!oh)
+ return -EINVAL;
+
+ mutex_lock(&oh->_mutex);
+ ret = _read_hardreset(oh, name);
+ mutex_unlock(&oh->_mutex);
+
+ return ret;
+}
+
+
+/**
* omap_hwmod_for_each_by_class - call @fn for each hwmod of class @classname
* @classname: struct omap_hwmod_class name to search for
* @fn: callback function pointer to call for each hwmod in class @classname
diff --git a/arch/arm/mach-omap2/omap_hwmod_2420_data.c b/arch/arm/mach-omap2/omap_hwmod_2420_data.c
index 3cc768e8bc04..adf6e3632a2b 100644
--- a/arch/arm/mach-omap2/omap_hwmod_2420_data.c
+++ b/arch/arm/mach-omap2/omap_hwmod_2420_data.c
@@ -15,10 +15,12 @@
#include <mach/irqs.h>
#include <plat/cpu.h>
#include <plat/dma.h>
+#include <plat/serial.h>
#include "omap_hwmod_common_data.h"
#include "prm-regbits-24xx.h"
+#include "cm-regbits-24xx.h"
/*
* OMAP2420 hardware module integration data
@@ -33,6 +35,7 @@ static struct omap_hwmod omap2420_mpu_hwmod;
static struct omap_hwmod omap2420_iva_hwmod;
static struct omap_hwmod omap2420_l3_main_hwmod;
static struct omap_hwmod omap2420_l4_core_hwmod;
+static struct omap_hwmod omap2420_wd_timer2_hwmod;
/* L3 -> L4_CORE interface */
static struct omap_hwmod_ocp_if omap2420_l3_main__l4_core = {
@@ -71,6 +74,9 @@ static struct omap_hwmod omap2420_l3_main_hwmod = {
};
static struct omap_hwmod omap2420_l4_wkup_hwmod;
+static struct omap_hwmod omap2420_uart1_hwmod;
+static struct omap_hwmod omap2420_uart2_hwmod;
+static struct omap_hwmod omap2420_uart3_hwmod;
/* L4_CORE -> L4_WKUP interface */
static struct omap_hwmod_ocp_if omap2420_l4_core__l4_wkup = {
@@ -79,6 +85,60 @@ static struct omap_hwmod_ocp_if omap2420_l4_core__l4_wkup = {
.user = OCP_USER_MPU | OCP_USER_SDMA,
};
+/* L4 CORE -> UART1 interface */
+static struct omap_hwmod_addr_space omap2420_uart1_addr_space[] = {
+ {
+ .pa_start = OMAP2_UART1_BASE,
+ .pa_end = OMAP2_UART1_BASE + SZ_8K - 1,
+ .flags = ADDR_MAP_ON_INIT | ADDR_TYPE_RT,
+ },
+};
+
+static struct omap_hwmod_ocp_if omap2_l4_core__uart1 = {
+ .master = &omap2420_l4_core_hwmod,
+ .slave = &omap2420_uart1_hwmod,
+ .clk = "uart1_ick",
+ .addr = omap2420_uart1_addr_space,
+ .addr_cnt = ARRAY_SIZE(omap2420_uart1_addr_space),
+ .user = OCP_USER_MPU | OCP_USER_SDMA,
+};
+
+/* L4 CORE -> UART2 interface */
+static struct omap_hwmod_addr_space omap2420_uart2_addr_space[] = {
+ {
+ .pa_start = OMAP2_UART2_BASE,
+ .pa_end = OMAP2_UART2_BASE + SZ_1K - 1,
+ .flags = ADDR_MAP_ON_INIT | ADDR_TYPE_RT,
+ },
+};
+
+static struct omap_hwmod_ocp_if omap2_l4_core__uart2 = {
+ .master = &omap2420_l4_core_hwmod,
+ .slave = &omap2420_uart2_hwmod,
+ .clk = "uart2_ick",
+ .addr = omap2420_uart2_addr_space,
+ .addr_cnt = ARRAY_SIZE(omap2420_uart2_addr_space),
+ .user = OCP_USER_MPU | OCP_USER_SDMA,
+};
+
+/* L4 PER -> UART3 interface */
+static struct omap_hwmod_addr_space omap2420_uart3_addr_space[] = {
+ {
+ .pa_start = OMAP2_UART3_BASE,
+ .pa_end = OMAP2_UART3_BASE + SZ_1K - 1,
+ .flags = ADDR_MAP_ON_INIT | ADDR_TYPE_RT,
+ },
+};
+
+static struct omap_hwmod_ocp_if omap2_l4_core__uart3 = {
+ .master = &omap2420_l4_core_hwmod,
+ .slave = &omap2420_uart3_hwmod,
+ .clk = "uart3_ick",
+ .addr = omap2420_uart3_addr_space,
+ .addr_cnt = ARRAY_SIZE(omap2420_uart3_addr_space),
+ .user = OCP_USER_MPU | OCP_USER_SDMA,
+};
+
/* Slave interfaces on the L4_CORE interconnect */
static struct omap_hwmod_ocp_if *omap2420_l4_core_slaves[] = {
&omap2420_l3_main__l4_core,
@@ -87,6 +147,9 @@ static struct omap_hwmod_ocp_if *omap2420_l4_core_slaves[] = {
/* Master interfaces on the L4_CORE interconnect */
static struct omap_hwmod_ocp_if *omap2420_l4_core_masters[] = {
&omap2420_l4_core__l4_wkup,
+ &omap2_l4_core__uart1,
+ &omap2_l4_core__uart2,
+ &omap2_l4_core__uart3,
};
/* L4 CORE */
@@ -165,12 +228,206 @@ static struct omap_hwmod omap2420_iva_hwmod = {
.omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP2420)
};
+/* l4_wkup -> wd_timer2 */
+static struct omap_hwmod_addr_space omap2420_wd_timer2_addrs[] = {
+ {
+ .pa_start = 0x48022000,
+ .pa_end = 0x4802207f,
+ .flags = ADDR_TYPE_RT
+ },
+};
+
+static struct omap_hwmod_ocp_if omap2420_l4_wkup__wd_timer2 = {
+ .master = &omap2420_l4_wkup_hwmod,
+ .slave = &omap2420_wd_timer2_hwmod,
+ .clk = "mpu_wdt_ick",
+ .addr = omap2420_wd_timer2_addrs,
+ .addr_cnt = ARRAY_SIZE(omap2420_wd_timer2_addrs),
+ .user = OCP_USER_MPU | OCP_USER_SDMA,
+};
+
+/*
+ * 'wd_timer' class
+ * 32-bit watchdog upward counter that generates a pulse on the reset pin on
+ * overflow condition
+ */
+
+static struct omap_hwmod_class_sysconfig omap2420_wd_timer_sysc = {
+ .rev_offs = 0x0000,
+ .sysc_offs = 0x0010,
+ .syss_offs = 0x0014,
+ .sysc_flags = (SYSC_HAS_EMUFREE | SYSC_HAS_SOFTRESET |
+ SYSC_HAS_AUTOIDLE),
+ .sysc_fields = &omap_hwmod_sysc_type1,
+};
+
+static struct omap_hwmod_class omap2420_wd_timer_hwmod_class = {
+ .name = "wd_timer",
+ .sysc = &omap2420_wd_timer_sysc,
+};
+
+/* wd_timer2 */
+static struct omap_hwmod_ocp_if *omap2420_wd_timer2_slaves[] = {
+ &omap2420_l4_wkup__wd_timer2,
+};
+
+static struct omap_hwmod omap2420_wd_timer2_hwmod = {
+ .name = "wd_timer2",
+ .class = &omap2420_wd_timer_hwmod_class,
+ .main_clk = "mpu_wdt_fck",
+ .prcm = {
+ .omap2 = {
+ .prcm_reg_id = 1,
+ .module_bit = OMAP24XX_EN_MPU_WDT_SHIFT,
+ .module_offs = WKUP_MOD,
+ .idlest_reg_id = 1,
+ .idlest_idle_bit = OMAP24XX_ST_MPU_WDT_SHIFT,
+ },
+ },
+ .slaves = omap2420_wd_timer2_slaves,
+ .slaves_cnt = ARRAY_SIZE(omap2420_wd_timer2_slaves),
+ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP2420),
+};
+
+/* UART */
+
+static struct omap_hwmod_class_sysconfig uart_sysc = {
+ .rev_offs = 0x50,
+ .sysc_offs = 0x54,
+ .syss_offs = 0x58,
+ .sysc_flags = (SYSC_HAS_SIDLEMODE |
+ SYSC_HAS_ENAWAKEUP | SYSC_HAS_SOFTRESET |
+ SYSC_HAS_AUTOIDLE),
+ .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART),
+ .sysc_fields = &omap_hwmod_sysc_type1,
+};
+
+static struct omap_hwmod_class uart_class = {
+ .name = "uart",
+ .sysc = &uart_sysc,
+};
+
+/* UART1 */
+
+static struct omap_hwmod_irq_info uart1_mpu_irqs[] = {
+ { .irq = INT_24XX_UART1_IRQ, },
+};
+
+static struct omap_hwmod_dma_info uart1_sdma_reqs[] = {
+ { .name = "rx", .dma_req = OMAP24XX_DMA_UART1_RX, },
+ { .name = "tx", .dma_req = OMAP24XX_DMA_UART1_TX, },
+};
+
+static struct omap_hwmod_ocp_if *omap2420_uart1_slaves[] = {
+ &omap2_l4_core__uart1,
+};
+
+static struct omap_hwmod omap2420_uart1_hwmod = {
+ .name = "uart1",
+ .mpu_irqs = uart1_mpu_irqs,
+ .mpu_irqs_cnt = ARRAY_SIZE(uart1_mpu_irqs),
+ .sdma_reqs = uart1_sdma_reqs,
+ .sdma_reqs_cnt = ARRAY_SIZE(uart1_sdma_reqs),
+ .main_clk = "uart1_fck",
+ .prcm = {
+ .omap2 = {
+ .module_offs = CORE_MOD,
+ .prcm_reg_id = 1,
+ .module_bit = OMAP24XX_EN_UART1_SHIFT,
+ .idlest_reg_id = 1,
+ .idlest_idle_bit = OMAP24XX_EN_UART1_SHIFT,
+ },
+ },
+ .slaves = omap2420_uart1_slaves,
+ .slaves_cnt = ARRAY_SIZE(omap2420_uart1_slaves),
+ .class = &uart_class,
+ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP2420),
+};
+
+/* UART2 */
+
+static struct omap_hwmod_irq_info uart2_mpu_irqs[] = {
+ { .irq = INT_24XX_UART2_IRQ, },
+};
+
+static struct omap_hwmod_dma_info uart2_sdma_reqs[] = {
+ { .name = "rx", .dma_req = OMAP24XX_DMA_UART2_RX, },
+ { .name = "tx", .dma_req = OMAP24XX_DMA_UART2_TX, },
+};
+
+static struct omap_hwmod_ocp_if *omap2420_uart2_slaves[] = {
+ &omap2_l4_core__uart2,
+};
+
+static struct omap_hwmod omap2420_uart2_hwmod = {
+ .name = "uart2",
+ .mpu_irqs = uart2_mpu_irqs,
+ .mpu_irqs_cnt = ARRAY_SIZE(uart2_mpu_irqs),
+ .sdma_reqs = uart2_sdma_reqs,
+ .sdma_reqs_cnt = ARRAY_SIZE(uart2_sdma_reqs),
+ .main_clk = "uart2_fck",
+ .prcm = {
+ .omap2 = {
+ .module_offs = CORE_MOD,
+ .prcm_reg_id = 1,
+ .module_bit = OMAP24XX_EN_UART2_SHIFT,
+ .idlest_reg_id = 1,
+ .idlest_idle_bit = OMAP24XX_EN_UART2_SHIFT,
+ },
+ },
+ .slaves = omap2420_uart2_slaves,
+ .slaves_cnt = ARRAY_SIZE(omap2420_uart2_slaves),
+ .class = &uart_class,
+ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP2420),
+};
+
+/* UART3 */
+
+static struct omap_hwmod_irq_info uart3_mpu_irqs[] = {
+ { .irq = INT_24XX_UART3_IRQ, },
+};
+
+static struct omap_hwmod_dma_info uart3_sdma_reqs[] = {
+ { .name = "rx", .dma_req = OMAP24XX_DMA_UART3_RX, },
+ { .name = "tx", .dma_req = OMAP24XX_DMA_UART3_TX, },
+};
+
+static struct omap_hwmod_ocp_if *omap2420_uart3_slaves[] = {
+ &omap2_l4_core__uart3,
+};
+
+static struct omap_hwmod omap2420_uart3_hwmod = {
+ .name = "uart3",
+ .mpu_irqs = uart3_mpu_irqs,
+ .mpu_irqs_cnt = ARRAY_SIZE(uart3_mpu_irqs),
+ .sdma_reqs = uart3_sdma_reqs,
+ .sdma_reqs_cnt = ARRAY_SIZE(uart3_sdma_reqs),
+ .main_clk = "uart3_fck",
+ .prcm = {
+ .omap2 = {
+ .module_offs = CORE_MOD,
+ .prcm_reg_id = 2,
+ .module_bit = OMAP24XX_EN_UART3_SHIFT,
+ .idlest_reg_id = 2,
+ .idlest_idle_bit = OMAP24XX_EN_UART3_SHIFT,
+ },
+ },
+ .slaves = omap2420_uart3_slaves,
+ .slaves_cnt = ARRAY_SIZE(omap2420_uart3_slaves),
+ .class = &uart_class,
+ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP2420),
+};
+
static __initdata struct omap_hwmod *omap2420_hwmods[] = {
&omap2420_l3_main_hwmod,
&omap2420_l4_core_hwmod,
&omap2420_l4_wkup_hwmod,
&omap2420_mpu_hwmod,
&omap2420_iva_hwmod,
+ &omap2420_wd_timer2_hwmod,
+ &omap2420_uart1_hwmod,
+ &omap2420_uart2_hwmod,
+ &omap2420_uart3_hwmod,
NULL,
};
diff --git a/arch/arm/mach-omap2/omap_hwmod_2430_data.c b/arch/arm/mach-omap2/omap_hwmod_2430_data.c
index 4526628ed287..12d939e456cf 100644
--- a/arch/arm/mach-omap2/omap_hwmod_2430_data.c
+++ b/arch/arm/mach-omap2/omap_hwmod_2430_data.c
@@ -15,10 +15,12 @@
#include <mach/irqs.h>
#include <plat/cpu.h>
#include <plat/dma.h>
+#include <plat/serial.h>
#include "omap_hwmod_common_data.h"
#include "prm-regbits-24xx.h"
+#include "cm-regbits-24xx.h"
/*
* OMAP2430 hardware module integration data
@@ -33,6 +35,7 @@ static struct omap_hwmod omap2430_mpu_hwmod;
static struct omap_hwmod omap2430_iva_hwmod;
static struct omap_hwmod omap2430_l3_main_hwmod;
static struct omap_hwmod omap2430_l4_core_hwmod;
+static struct omap_hwmod omap2430_wd_timer2_hwmod;
/* L3 -> L4_CORE interface */
static struct omap_hwmod_ocp_if omap2430_l3_main__l4_core = {
@@ -71,6 +74,9 @@ static struct omap_hwmod omap2430_l3_main_hwmod = {
};
static struct omap_hwmod omap2430_l4_wkup_hwmod;
+static struct omap_hwmod omap2430_uart1_hwmod;
+static struct omap_hwmod omap2430_uart2_hwmod;
+static struct omap_hwmod omap2430_uart3_hwmod;
/* L4_CORE -> L4_WKUP interface */
static struct omap_hwmod_ocp_if omap2430_l4_core__l4_wkup = {
@@ -79,6 +85,60 @@ static struct omap_hwmod_ocp_if omap2430_l4_core__l4_wkup = {
.user = OCP_USER_MPU | OCP_USER_SDMA,
};
+/* L4 CORE -> UART1 interface */
+static struct omap_hwmod_addr_space omap2430_uart1_addr_space[] = {
+ {
+ .pa_start = OMAP2_UART1_BASE,
+ .pa_end = OMAP2_UART1_BASE + SZ_8K - 1,
+ .flags = ADDR_MAP_ON_INIT | ADDR_TYPE_RT,
+ },
+};
+
+static struct omap_hwmod_ocp_if omap2_l4_core__uart1 = {
+ .master = &omap2430_l4_core_hwmod,
+ .slave = &omap2430_uart1_hwmod,
+ .clk = "uart1_ick",
+ .addr = omap2430_uart1_addr_space,
+ .addr_cnt = ARRAY_SIZE(omap2430_uart1_addr_space),
+ .user = OCP_USER_MPU | OCP_USER_SDMA,
+};
+
+/* L4 CORE -> UART2 interface */
+static struct omap_hwmod_addr_space omap2430_uart2_addr_space[] = {
+ {
+ .pa_start = OMAP2_UART2_BASE,
+ .pa_end = OMAP2_UART2_BASE + SZ_1K - 1,
+ .flags = ADDR_MAP_ON_INIT | ADDR_TYPE_RT,
+ },
+};
+
+static struct omap_hwmod_ocp_if omap2_l4_core__uart2 = {
+ .master = &omap2430_l4_core_hwmod,
+ .slave = &omap2430_uart2_hwmod,
+ .clk = "uart2_ick",
+ .addr = omap2430_uart2_addr_space,
+ .addr_cnt = ARRAY_SIZE(omap2430_uart2_addr_space),
+ .user = OCP_USER_MPU | OCP_USER_SDMA,
+};
+
+/* L4 PER -> UART3 interface */
+static struct omap_hwmod_addr_space omap2430_uart3_addr_space[] = {
+ {
+ .pa_start = OMAP2_UART3_BASE,
+ .pa_end = OMAP2_UART3_BASE + SZ_1K - 1,
+ .flags = ADDR_MAP_ON_INIT | ADDR_TYPE_RT,
+ },
+};
+
+static struct omap_hwmod_ocp_if omap2_l4_core__uart3 = {
+ .master = &omap2430_l4_core_hwmod,
+ .slave = &omap2430_uart3_hwmod,
+ .clk = "uart3_ick",
+ .addr = omap2430_uart3_addr_space,
+ .addr_cnt = ARRAY_SIZE(omap2430_uart3_addr_space),
+ .user = OCP_USER_MPU | OCP_USER_SDMA,
+};
+
/* Slave interfaces on the L4_CORE interconnect */
static struct omap_hwmod_ocp_if *omap2430_l4_core_slaves[] = {
&omap2430_l3_main__l4_core,
@@ -104,6 +164,9 @@ static struct omap_hwmod omap2430_l4_core_hwmod = {
/* Slave interfaces on the L4_WKUP interconnect */
static struct omap_hwmod_ocp_if *omap2430_l4_wkup_slaves[] = {
&omap2430_l4_core__l4_wkup,
+ &omap2_l4_core__uart1,
+ &omap2_l4_core__uart2,
+ &omap2_l4_core__uart3,
};
/* Master interfaces on the L4_WKUP interconnect */
@@ -165,12 +228,206 @@ static struct omap_hwmod omap2430_iva_hwmod = {
.omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP2430)
};
+/* l4_wkup -> wd_timer2 */
+static struct omap_hwmod_addr_space omap2430_wd_timer2_addrs[] = {
+ {
+ .pa_start = 0x49016000,
+ .pa_end = 0x4901607f,
+ .flags = ADDR_TYPE_RT
+ },
+};
+
+static struct omap_hwmod_ocp_if omap2430_l4_wkup__wd_timer2 = {
+ .master = &omap2430_l4_wkup_hwmod,
+ .slave = &omap2430_wd_timer2_hwmod,
+ .clk = "mpu_wdt_ick",
+ .addr = omap2430_wd_timer2_addrs,
+ .addr_cnt = ARRAY_SIZE(omap2430_wd_timer2_addrs),
+ .user = OCP_USER_MPU | OCP_USER_SDMA,
+};
+
+/*
+ * 'wd_timer' class
+ * 32-bit watchdog upward counter that generates a pulse on the reset pin on
+ * overflow condition
+ */
+
+static struct omap_hwmod_class_sysconfig omap2430_wd_timer_sysc = {
+ .rev_offs = 0x0,
+ .sysc_offs = 0x0010,
+ .syss_offs = 0x0014,
+ .sysc_flags = (SYSC_HAS_EMUFREE | SYSC_HAS_SOFTRESET |
+ SYSC_HAS_AUTOIDLE),
+ .sysc_fields = &omap_hwmod_sysc_type1,
+};
+
+static struct omap_hwmod_class omap2430_wd_timer_hwmod_class = {
+ .name = "wd_timer",
+ .sysc = &omap2430_wd_timer_sysc,
+};
+
+/* wd_timer2 */
+static struct omap_hwmod_ocp_if *omap2430_wd_timer2_slaves[] = {
+ &omap2430_l4_wkup__wd_timer2,
+};
+
+static struct omap_hwmod omap2430_wd_timer2_hwmod = {
+ .name = "wd_timer2",
+ .class = &omap2430_wd_timer_hwmod_class,
+ .main_clk = "mpu_wdt_fck",
+ .prcm = {
+ .omap2 = {
+ .prcm_reg_id = 1,
+ .module_bit = OMAP24XX_EN_MPU_WDT_SHIFT,
+ .module_offs = WKUP_MOD,
+ .idlest_reg_id = 1,
+ .idlest_idle_bit = OMAP24XX_ST_MPU_WDT_SHIFT,
+ },
+ },
+ .slaves = omap2430_wd_timer2_slaves,
+ .slaves_cnt = ARRAY_SIZE(omap2430_wd_timer2_slaves),
+ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP2430),
+};
+
+/* UART */
+
+static struct omap_hwmod_class_sysconfig uart_sysc = {
+ .rev_offs = 0x50,
+ .sysc_offs = 0x54,
+ .syss_offs = 0x58,
+ .sysc_flags = (SYSC_HAS_SIDLEMODE |
+ SYSC_HAS_ENAWAKEUP | SYSC_HAS_SOFTRESET |
+ SYSC_HAS_AUTOIDLE),
+ .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART),
+ .sysc_fields = &omap_hwmod_sysc_type1,
+};
+
+static struct omap_hwmod_class uart_class = {
+ .name = "uart",
+ .sysc = &uart_sysc,
+};
+
+/* UART1 */
+
+static struct omap_hwmod_irq_info uart1_mpu_irqs[] = {
+ { .irq = INT_24XX_UART1_IRQ, },
+};
+
+static struct omap_hwmod_dma_info uart1_sdma_reqs[] = {
+ { .name = "rx", .dma_req = OMAP24XX_DMA_UART1_RX, },
+ { .name = "tx", .dma_req = OMAP24XX_DMA_UART1_TX, },
+};
+
+static struct omap_hwmod_ocp_if *omap2430_uart1_slaves[] = {
+ &omap2_l4_core__uart1,
+};
+
+static struct omap_hwmod omap2430_uart1_hwmod = {
+ .name = "uart1",
+ .mpu_irqs = uart1_mpu_irqs,
+ .mpu_irqs_cnt = ARRAY_SIZE(uart1_mpu_irqs),
+ .sdma_reqs = uart1_sdma_reqs,
+ .sdma_reqs_cnt = ARRAY_SIZE(uart1_sdma_reqs),
+ .main_clk = "uart1_fck",
+ .prcm = {
+ .omap2 = {
+ .module_offs = CORE_MOD,
+ .prcm_reg_id = 1,
+ .module_bit = OMAP24XX_EN_UART1_SHIFT,
+ .idlest_reg_id = 1,
+ .idlest_idle_bit = OMAP24XX_EN_UART1_SHIFT,
+ },
+ },
+ .slaves = omap2430_uart1_slaves,
+ .slaves_cnt = ARRAY_SIZE(omap2430_uart1_slaves),
+ .class = &uart_class,
+ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP2430),
+};
+
+/* UART2 */
+
+static struct omap_hwmod_irq_info uart2_mpu_irqs[] = {
+ { .irq = INT_24XX_UART2_IRQ, },
+};
+
+static struct omap_hwmod_dma_info uart2_sdma_reqs[] = {
+ { .name = "rx", .dma_req = OMAP24XX_DMA_UART2_RX, },
+ { .name = "tx", .dma_req = OMAP24XX_DMA_UART2_TX, },
+};
+
+static struct omap_hwmod_ocp_if *omap2430_uart2_slaves[] = {
+ &omap2_l4_core__uart2,
+};
+
+static struct omap_hwmod omap2430_uart2_hwmod = {
+ .name = "uart2",
+ .mpu_irqs = uart2_mpu_irqs,
+ .mpu_irqs_cnt = ARRAY_SIZE(uart2_mpu_irqs),
+ .sdma_reqs = uart2_sdma_reqs,
+ .sdma_reqs_cnt = ARRAY_SIZE(uart2_sdma_reqs),
+ .main_clk = "uart2_fck",
+ .prcm = {
+ .omap2 = {
+ .module_offs = CORE_MOD,
+ .prcm_reg_id = 1,
+ .module_bit = OMAP24XX_EN_UART2_SHIFT,
+ .idlest_reg_id = 1,
+ .idlest_idle_bit = OMAP24XX_EN_UART2_SHIFT,
+ },
+ },
+ .slaves = omap2430_uart2_slaves,
+ .slaves_cnt = ARRAY_SIZE(omap2430_uart2_slaves),
+ .class = &uart_class,
+ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP2430),
+};
+
+/* UART3 */
+
+static struct omap_hwmod_irq_info uart3_mpu_irqs[] = {
+ { .irq = INT_24XX_UART3_IRQ, },
+};
+
+static struct omap_hwmod_dma_info uart3_sdma_reqs[] = {
+ { .name = "rx", .dma_req = OMAP24XX_DMA_UART3_RX, },
+ { .name = "tx", .dma_req = OMAP24XX_DMA_UART3_TX, },
+};
+
+static struct omap_hwmod_ocp_if *omap2430_uart3_slaves[] = {
+ &omap2_l4_core__uart3,
+};
+
+static struct omap_hwmod omap2430_uart3_hwmod = {
+ .name = "uart3",
+ .mpu_irqs = uart3_mpu_irqs,
+ .mpu_irqs_cnt = ARRAY_SIZE(uart3_mpu_irqs),
+ .sdma_reqs = uart3_sdma_reqs,
+ .sdma_reqs_cnt = ARRAY_SIZE(uart3_sdma_reqs),
+ .main_clk = "uart3_fck",
+ .prcm = {
+ .omap2 = {
+ .module_offs = CORE_MOD,
+ .prcm_reg_id = 2,
+ .module_bit = OMAP24XX_EN_UART3_SHIFT,
+ .idlest_reg_id = 2,
+ .idlest_idle_bit = OMAP24XX_EN_UART3_SHIFT,
+ },
+ },
+ .slaves = omap2430_uart3_slaves,
+ .slaves_cnt = ARRAY_SIZE(omap2430_uart3_slaves),
+ .class = &uart_class,
+ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP2430),
+};
+
static __initdata struct omap_hwmod *omap2430_hwmods[] = {
&omap2430_l3_main_hwmod,
&omap2430_l4_core_hwmod,
&omap2430_l4_wkup_hwmod,
&omap2430_mpu_hwmod,
&omap2430_iva_hwmod,
+ &omap2430_wd_timer2_hwmod,
+ &omap2430_uart1_hwmod,
+ &omap2430_uart2_hwmod,
+ &omap2430_uart3_hwmod,
NULL,
};
diff --git a/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c b/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c
index 5d8eb58ba5e3..cb97ecf0a3f6 100644
--- a/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c
+++ b/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c
@@ -17,10 +17,12 @@
#include <mach/irqs.h>
#include <plat/cpu.h>
#include <plat/dma.h>
+#include <plat/serial.h>
#include "omap_hwmod_common_data.h"
#include "prm-regbits-34xx.h"
+#include "cm-regbits-34xx.h"
/*
* OMAP3xxx hardware module integration data
@@ -36,6 +38,7 @@ static struct omap_hwmod omap3xxx_iva_hwmod;
static struct omap_hwmod omap3xxx_l3_main_hwmod;
static struct omap_hwmod omap3xxx_l4_core_hwmod;
static struct omap_hwmod omap3xxx_l4_per_hwmod;
+static struct omap_hwmod omap3xxx_wd_timer2_hwmod;
/* L3 -> L4_CORE interface */
static struct omap_hwmod_ocp_if omap3xxx_l3_main__l4_core = {
@@ -82,6 +85,10 @@ static struct omap_hwmod omap3xxx_l3_main_hwmod = {
};
static struct omap_hwmod omap3xxx_l4_wkup_hwmod;
+static struct omap_hwmod omap3xxx_uart1_hwmod;
+static struct omap_hwmod omap3xxx_uart2_hwmod;
+static struct omap_hwmod omap3xxx_uart3_hwmod;
+static struct omap_hwmod omap3xxx_uart4_hwmod;
/* L4_CORE -> L4_WKUP interface */
static struct omap_hwmod_ocp_if omap3xxx_l4_core__l4_wkup = {
@@ -90,6 +97,78 @@ static struct omap_hwmod_ocp_if omap3xxx_l4_core__l4_wkup = {
.user = OCP_USER_MPU | OCP_USER_SDMA,
};
+/* L4 CORE -> UART1 interface */
+static struct omap_hwmod_addr_space omap3xxx_uart1_addr_space[] = {
+ {
+ .pa_start = OMAP3_UART1_BASE,
+ .pa_end = OMAP3_UART1_BASE + SZ_8K - 1,
+ .flags = ADDR_MAP_ON_INIT | ADDR_TYPE_RT,
+ },
+};
+
+static struct omap_hwmod_ocp_if omap3_l4_core__uart1 = {
+ .master = &omap3xxx_l4_core_hwmod,
+ .slave = &omap3xxx_uart1_hwmod,
+ .clk = "uart1_ick",
+ .addr = omap3xxx_uart1_addr_space,
+ .addr_cnt = ARRAY_SIZE(omap3xxx_uart1_addr_space),
+ .user = OCP_USER_MPU | OCP_USER_SDMA,
+};
+
+/* L4 CORE -> UART2 interface */
+static struct omap_hwmod_addr_space omap3xxx_uart2_addr_space[] = {
+ {
+ .pa_start = OMAP3_UART2_BASE,
+ .pa_end = OMAP3_UART2_BASE + SZ_1K - 1,
+ .flags = ADDR_MAP_ON_INIT | ADDR_TYPE_RT,
+ },
+};
+
+static struct omap_hwmod_ocp_if omap3_l4_core__uart2 = {
+ .master = &omap3xxx_l4_core_hwmod,
+ .slave = &omap3xxx_uart2_hwmod,
+ .clk = "uart2_ick",
+ .addr = omap3xxx_uart2_addr_space,
+ .addr_cnt = ARRAY_SIZE(omap3xxx_uart2_addr_space),
+ .user = OCP_USER_MPU | OCP_USER_SDMA,
+};
+
+/* L4 PER -> UART3 interface */
+static struct omap_hwmod_addr_space omap3xxx_uart3_addr_space[] = {
+ {
+ .pa_start = OMAP3_UART3_BASE,
+ .pa_end = OMAP3_UART3_BASE + SZ_1K - 1,
+ .flags = ADDR_MAP_ON_INIT | ADDR_TYPE_RT,
+ },
+};
+
+static struct omap_hwmod_ocp_if omap3_l4_per__uart3 = {
+ .master = &omap3xxx_l4_per_hwmod,
+ .slave = &omap3xxx_uart3_hwmod,
+ .clk = "uart3_ick",
+ .addr = omap3xxx_uart3_addr_space,
+ .addr_cnt = ARRAY_SIZE(omap3xxx_uart3_addr_space),
+ .user = OCP_USER_MPU | OCP_USER_SDMA,
+};
+
+/* L4 PER -> UART4 interface */
+static struct omap_hwmod_addr_space omap3xxx_uart4_addr_space[] = {
+ {
+ .pa_start = OMAP3_UART4_BASE,
+ .pa_end = OMAP3_UART4_BASE + SZ_1K - 1,
+ .flags = ADDR_MAP_ON_INIT | ADDR_TYPE_RT,
+ },
+};
+
+static struct omap_hwmod_ocp_if omap3_l4_per__uart4 = {
+ .master = &omap3xxx_l4_per_hwmod,
+ .slave = &omap3xxx_uart4_hwmod,
+ .clk = "uart4_ick",
+ .addr = omap3xxx_uart4_addr_space,
+ .addr_cnt = ARRAY_SIZE(omap3xxx_uart4_addr_space),
+ .user = OCP_USER_MPU | OCP_USER_SDMA,
+};
+
/* Slave interfaces on the L4_CORE interconnect */
static struct omap_hwmod_ocp_if *omap3xxx_l4_core_slaves[] = {
&omap3xxx_l3_main__l4_core,
@@ -98,6 +177,8 @@ static struct omap_hwmod_ocp_if *omap3xxx_l4_core_slaves[] = {
/* Master interfaces on the L4_CORE interconnect */
static struct omap_hwmod_ocp_if *omap3xxx_l4_core_masters[] = {
&omap3xxx_l4_core__l4_wkup,
+ &omap3_l4_core__uart1,
+ &omap3_l4_core__uart2,
};
/* L4 CORE */
@@ -119,6 +200,8 @@ static struct omap_hwmod_ocp_if *omap3xxx_l4_per_slaves[] = {
/* Master interfaces on the L4_PER interconnect */
static struct omap_hwmod_ocp_if *omap3xxx_l4_per_masters[] = {
+ &omap3_l4_per__uart3,
+ &omap3_l4_per__uart4,
};
/* L4 PER */
@@ -197,6 +280,235 @@ static struct omap_hwmod omap3xxx_iva_hwmod = {
.omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP3430)
};
+/* l4_wkup -> wd_timer2 */
+static struct omap_hwmod_addr_space omap3xxx_wd_timer2_addrs[] = {
+ {
+ .pa_start = 0x48314000,
+ .pa_end = 0x4831407f,
+ .flags = ADDR_TYPE_RT
+ },
+};
+
+static struct omap_hwmod_ocp_if omap3xxx_l4_wkup__wd_timer2 = {
+ .master = &omap3xxx_l4_wkup_hwmod,
+ .slave = &omap3xxx_wd_timer2_hwmod,
+ .clk = "wdt2_ick",
+ .addr = omap3xxx_wd_timer2_addrs,
+ .addr_cnt = ARRAY_SIZE(omap3xxx_wd_timer2_addrs),
+ .user = OCP_USER_MPU | OCP_USER_SDMA,
+};
+
+/*
+ * 'wd_timer' class
+ * 32-bit watchdog upward counter that generates a pulse on the reset pin on
+ * overflow condition
+ */
+
+static struct omap_hwmod_class_sysconfig omap3xxx_wd_timer_sysc = {
+ .rev_offs = 0x0000,
+ .sysc_offs = 0x0010,
+ .syss_offs = 0x0014,
+ .sysc_flags = (SYSC_HAS_SIDLEMODE | SYSC_HAS_EMUFREE |
+ SYSC_HAS_ENAWAKEUP | SYSC_HAS_SOFTRESET |
+ SYSC_HAS_AUTOIDLE | SYSC_HAS_CLOCKACTIVITY),
+ .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART),
+ .sysc_fields = &omap_hwmod_sysc_type1,
+};
+
+static struct omap_hwmod_class omap3xxx_wd_timer_hwmod_class = {
+ .name = "wd_timer",
+ .sysc = &omap3xxx_wd_timer_sysc,
+};
+
+/* wd_timer2 */
+static struct omap_hwmod_ocp_if *omap3xxx_wd_timer2_slaves[] = {
+ &omap3xxx_l4_wkup__wd_timer2,
+};
+
+static struct omap_hwmod omap3xxx_wd_timer2_hwmod = {
+ .name = "wd_timer2",
+ .class = &omap3xxx_wd_timer_hwmod_class,
+ .main_clk = "wdt2_fck",
+ .prcm = {
+ .omap2 = {
+ .prcm_reg_id = 1,
+ .module_bit = OMAP3430_EN_WDT2_SHIFT,
+ .module_offs = WKUP_MOD,
+ .idlest_reg_id = 1,
+ .idlest_idle_bit = OMAP3430_ST_WDT2_SHIFT,
+ },
+ },
+ .slaves = omap3xxx_wd_timer2_slaves,
+ .slaves_cnt = ARRAY_SIZE(omap3xxx_wd_timer2_slaves),
+ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP3430),
+};
+
+/* UART common */
+
+static struct omap_hwmod_class_sysconfig uart_sysc = {
+ .rev_offs = 0x50,
+ .sysc_offs = 0x54,
+ .syss_offs = 0x58,
+ .sysc_flags = (SYSC_HAS_SIDLEMODE |
+ SYSC_HAS_ENAWAKEUP | SYSC_HAS_SOFTRESET |
+ SYSC_HAS_AUTOIDLE),
+ .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART),
+ .sysc_fields = &omap_hwmod_sysc_type1,
+};
+
+static struct omap_hwmod_class uart_class = {
+ .name = "uart",
+ .sysc = &uart_sysc,
+};
+
+/* UART1 */
+
+static struct omap_hwmod_irq_info uart1_mpu_irqs[] = {
+ { .irq = INT_24XX_UART1_IRQ, },
+};
+
+static struct omap_hwmod_dma_info uart1_sdma_reqs[] = {
+ { .name = "tx", .dma_req = OMAP24XX_DMA_UART1_TX, },
+ { .name = "rx", .dma_req = OMAP24XX_DMA_UART1_RX, },
+};
+
+static struct omap_hwmod_ocp_if *omap3xxx_uart1_slaves[] = {
+ &omap3_l4_core__uart1,
+};
+
+static struct omap_hwmod omap3xxx_uart1_hwmod = {
+ .name = "uart1",
+ .mpu_irqs = uart1_mpu_irqs,
+ .mpu_irqs_cnt = ARRAY_SIZE(uart1_mpu_irqs),
+ .sdma_reqs = uart1_sdma_reqs,
+ .sdma_reqs_cnt = ARRAY_SIZE(uart1_sdma_reqs),
+ .main_clk = "uart1_fck",
+ .prcm = {
+ .omap2 = {
+ .module_offs = CORE_MOD,
+ .prcm_reg_id = 1,
+ .module_bit = OMAP3430_EN_UART1_SHIFT,
+ .idlest_reg_id = 1,
+ .idlest_idle_bit = OMAP3430_EN_UART1_SHIFT,
+ },
+ },
+ .slaves = omap3xxx_uart1_slaves,
+ .slaves_cnt = ARRAY_SIZE(omap3xxx_uart1_slaves),
+ .class = &uart_class,
+ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP3430),
+};
+
+/* UART2 */
+
+static struct omap_hwmod_irq_info uart2_mpu_irqs[] = {
+ { .irq = INT_24XX_UART2_IRQ, },
+};
+
+static struct omap_hwmod_dma_info uart2_sdma_reqs[] = {
+ { .name = "tx", .dma_req = OMAP24XX_DMA_UART2_TX, },
+ { .name = "rx", .dma_req = OMAP24XX_DMA_UART2_RX, },
+};
+
+static struct omap_hwmod_ocp_if *omap3xxx_uart2_slaves[] = {
+ &omap3_l4_core__uart2,
+};
+
+static struct omap_hwmod omap3xxx_uart2_hwmod = {
+ .name = "uart2",
+ .mpu_irqs = uart2_mpu_irqs,
+ .mpu_irqs_cnt = ARRAY_SIZE(uart2_mpu_irqs),
+ .sdma_reqs = uart2_sdma_reqs,
+ .sdma_reqs_cnt = ARRAY_SIZE(uart2_sdma_reqs),
+ .main_clk = "uart2_fck",
+ .prcm = {
+ .omap2 = {
+ .module_offs = CORE_MOD,
+ .prcm_reg_id = 1,
+ .module_bit = OMAP3430_EN_UART2_SHIFT,
+ .idlest_reg_id = 1,
+ .idlest_idle_bit = OMAP3430_EN_UART2_SHIFT,
+ },
+ },
+ .slaves = omap3xxx_uart2_slaves,
+ .slaves_cnt = ARRAY_SIZE(omap3xxx_uart2_slaves),
+ .class = &uart_class,
+ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP3430),
+};
+
+/* UART3 */
+
+static struct omap_hwmod_irq_info uart3_mpu_irqs[] = {
+ { .irq = INT_24XX_UART3_IRQ, },
+};
+
+static struct omap_hwmod_dma_info uart3_sdma_reqs[] = {
+ { .name = "tx", .dma_req = OMAP24XX_DMA_UART3_TX, },
+ { .name = "rx", .dma_req = OMAP24XX_DMA_UART3_RX, },
+};
+
+static struct omap_hwmod_ocp_if *omap3xxx_uart3_slaves[] = {
+ &omap3_l4_per__uart3,
+};
+
+static struct omap_hwmod omap3xxx_uart3_hwmod = {
+ .name = "uart3",
+ .mpu_irqs = uart3_mpu_irqs,
+ .mpu_irqs_cnt = ARRAY_SIZE(uart3_mpu_irqs),
+ .sdma_reqs = uart3_sdma_reqs,
+ .sdma_reqs_cnt = ARRAY_SIZE(uart3_sdma_reqs),
+ .main_clk = "uart3_fck",
+ .prcm = {
+ .omap2 = {
+ .module_offs = OMAP3430_PER_MOD,
+ .prcm_reg_id = 1,
+ .module_bit = OMAP3430_EN_UART3_SHIFT,
+ .idlest_reg_id = 1,
+ .idlest_idle_bit = OMAP3430_EN_UART3_SHIFT,
+ },
+ },
+ .slaves = omap3xxx_uart3_slaves,
+ .slaves_cnt = ARRAY_SIZE(omap3xxx_uart3_slaves),
+ .class = &uart_class,
+ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP3430),
+};
+
+/* UART4 */
+
+static struct omap_hwmod_irq_info uart4_mpu_irqs[] = {
+ { .irq = INT_36XX_UART4_IRQ, },
+};
+
+static struct omap_hwmod_dma_info uart4_sdma_reqs[] = {
+ { .name = "rx", .dma_req = OMAP36XX_DMA_UART4_RX, },
+ { .name = "tx", .dma_req = OMAP36XX_DMA_UART4_TX, },
+};
+
+static struct omap_hwmod_ocp_if *omap3xxx_uart4_slaves[] = {
+ &omap3_l4_per__uart4,
+};
+
+static struct omap_hwmod omap3xxx_uart4_hwmod = {
+ .name = "uart4",
+ .mpu_irqs = uart4_mpu_irqs,
+ .mpu_irqs_cnt = ARRAY_SIZE(uart4_mpu_irqs),
+ .sdma_reqs = uart4_sdma_reqs,
+ .sdma_reqs_cnt = ARRAY_SIZE(uart4_sdma_reqs),
+ .main_clk = "uart4_fck",
+ .prcm = {
+ .omap2 = {
+ .module_offs = OMAP3430_PER_MOD,
+ .prcm_reg_id = 1,
+ .module_bit = OMAP3630_EN_UART4_SHIFT,
+ .idlest_reg_id = 1,
+ .idlest_idle_bit = OMAP3630_EN_UART4_SHIFT,
+ },
+ },
+ .slaves = omap3xxx_uart4_slaves,
+ .slaves_cnt = ARRAY_SIZE(omap3xxx_uart4_slaves),
+ .class = &uart_class,
+ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP3630ES1),
+};
+
static __initdata struct omap_hwmod *omap3xxx_hwmods[] = {
&omap3xxx_l3_main_hwmod,
&omap3xxx_l4_core_hwmod,
@@ -204,6 +516,11 @@ static __initdata struct omap_hwmod *omap3xxx_hwmods[] = {
&omap3xxx_l4_wkup_hwmod,
&omap3xxx_mpu_hwmod,
&omap3xxx_iva_hwmod,
+ &omap3xxx_wd_timer2_hwmod,
+ &omap3xxx_uart1_hwmod,
+ &omap3xxx_uart2_hwmod,
+ &omap3xxx_uart3_hwmod,
+ &omap3xxx_uart4_hwmod,
NULL,
};
@@ -211,5 +528,3 @@ int __init omap3xxx_hwmod_init(void)
{
return omap_hwmod_init(omap3xxx_hwmods);
}
-
-
diff --git a/arch/arm/mach-omap2/omap_hwmod_44xx_data.c b/arch/arm/mach-omap2/omap_hwmod_44xx_data.c
new file mode 100644
index 000000000000..7274db4de487
--- /dev/null
+++ b/arch/arm/mach-omap2/omap_hwmod_44xx_data.c
@@ -0,0 +1,850 @@
+/*
+ * Hardware modules present on the OMAP44xx chips
+ *
+ * Copyright (C) 2009-2010 Texas Instruments, Inc.
+ * Copyright (C) 2009-2010 Nokia Corporation
+ *
+ * Paul Walmsley
+ * Benoit Cousson
+ *
+ * This file is automatically generated from the OMAP hardware databases.
+ * We respectfully ask that any modifications to this file be coordinated
+ * with the public linux-omap@vger.kernel.org mailing list and the
+ * authors above to ensure that the autogeneration scripts are kept
+ * up-to-date with the file contents.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/io.h>
+
+#include <plat/omap_hwmod.h>
+#include <plat/cpu.h>
+
+#include "omap_hwmod_common_data.h"
+
+#include "cm.h"
+#include "prm-regbits-44xx.h"
+
+/* Base offset for all OMAP4 interrupts external to MPUSS */
+#define OMAP44XX_IRQ_GIC_START 32
+
+/* Base offset for all OMAP4 dma requests */
+#define OMAP44XX_DMA_REQ_START 1
+
+/* Backward references (IPs with Bus Master capability) */
+static struct omap_hwmod omap44xx_dmm_hwmod;
+static struct omap_hwmod omap44xx_emif_fw_hwmod;
+static struct omap_hwmod omap44xx_l3_instr_hwmod;
+static struct omap_hwmod omap44xx_l3_main_1_hwmod;
+static struct omap_hwmod omap44xx_l3_main_2_hwmod;
+static struct omap_hwmod omap44xx_l3_main_3_hwmod;
+static struct omap_hwmod omap44xx_l4_abe_hwmod;
+static struct omap_hwmod omap44xx_l4_cfg_hwmod;
+static struct omap_hwmod omap44xx_l4_per_hwmod;
+static struct omap_hwmod omap44xx_l4_wkup_hwmod;
+static struct omap_hwmod omap44xx_mpu_hwmod;
+static struct omap_hwmod omap44xx_mpu_private_hwmod;
+
+/*
+ * Interconnects omap_hwmod structures
+ * hwmods that compose the global OMAP interconnect
+ */
+
+/*
+ * 'dmm' class
+ * instance(s): dmm
+ */
+static struct omap_hwmod_class omap44xx_dmm_hwmod_class = {
+ .name = "dmm",
+};
+
+/* dmm interface data */
+/* l3_main_1 -> dmm */
+static struct omap_hwmod_ocp_if omap44xx_l3_main_1__dmm = {
+ .master = &omap44xx_l3_main_1_hwmod,
+ .slave = &omap44xx_dmm_hwmod,
+ .clk = "l3_div_ck",
+ .user = OCP_USER_MPU | OCP_USER_SDMA,
+};
+
+/* mpu -> dmm */
+static struct omap_hwmod_ocp_if omap44xx_mpu__dmm = {
+ .master = &omap44xx_mpu_hwmod,
+ .slave = &omap44xx_dmm_hwmod,
+ .clk = "l3_div_ck",
+ .user = OCP_USER_MPU | OCP_USER_SDMA,
+};
+
+/* dmm slave ports */
+static struct omap_hwmod_ocp_if *omap44xx_dmm_slaves[] = {
+ &omap44xx_l3_main_1__dmm,
+ &omap44xx_mpu__dmm,
+};
+
+static struct omap_hwmod_irq_info omap44xx_dmm_irqs[] = {
+ { .irq = 113 + OMAP44XX_IRQ_GIC_START },
+};
+
+static struct omap_hwmod omap44xx_dmm_hwmod = {
+ .name = "dmm",
+ .class = &omap44xx_dmm_hwmod_class,
+ .slaves = omap44xx_dmm_slaves,
+ .slaves_cnt = ARRAY_SIZE(omap44xx_dmm_slaves),
+ .mpu_irqs = omap44xx_dmm_irqs,
+ .mpu_irqs_cnt = ARRAY_SIZE(omap44xx_dmm_irqs),
+ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP4430),
+};
+
+/*
+ * 'emif_fw' class
+ * instance(s): emif_fw
+ */
+static struct omap_hwmod_class omap44xx_emif_fw_hwmod_class = {
+ .name = "emif_fw",
+};
+
+/* emif_fw interface data */
+/* dmm -> emif_fw */
+static struct omap_hwmod_ocp_if omap44xx_dmm__emif_fw = {
+ .master = &omap44xx_dmm_hwmod,
+ .slave = &omap44xx_emif_fw_hwmod,
+ .clk = "l3_div_ck",
+ .user = OCP_USER_MPU | OCP_USER_SDMA,
+};
+
+/* l4_cfg -> emif_fw */
+static struct omap_hwmod_ocp_if omap44xx_l4_cfg__emif_fw = {
+ .master = &omap44xx_l4_cfg_hwmod,
+ .slave = &omap44xx_emif_fw_hwmod,
+ .clk = "l4_div_ck",
+ .user = OCP_USER_MPU | OCP_USER_SDMA,
+};
+
+/* emif_fw slave ports */
+static struct omap_hwmod_ocp_if *omap44xx_emif_fw_slaves[] = {
+ &omap44xx_dmm__emif_fw,
+ &omap44xx_l4_cfg__emif_fw,
+};
+
+static struct omap_hwmod omap44xx_emif_fw_hwmod = {
+ .name = "emif_fw",
+ .class = &omap44xx_emif_fw_hwmod_class,
+ .slaves = omap44xx_emif_fw_slaves,
+ .slaves_cnt = ARRAY_SIZE(omap44xx_emif_fw_slaves),
+ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP4430),
+};
+
+/*
+ * 'l3' class
+ * instance(s): l3_instr, l3_main_1, l3_main_2, l3_main_3
+ */
+static struct omap_hwmod_class omap44xx_l3_hwmod_class = {
+ .name = "l3",
+};
+
+/* l3_instr interface data */
+/* l3_main_3 -> l3_instr */
+static struct omap_hwmod_ocp_if omap44xx_l3_main_3__l3_instr = {
+ .master = &omap44xx_l3_main_3_hwmod,
+ .slave = &omap44xx_l3_instr_hwmod,
+ .clk = "l3_div_ck",
+ .user = OCP_USER_MPU | OCP_USER_SDMA,
+};
+
+/* l3_instr slave ports */
+static struct omap_hwmod_ocp_if *omap44xx_l3_instr_slaves[] = {
+ &omap44xx_l3_main_3__l3_instr,
+};
+
+static struct omap_hwmod omap44xx_l3_instr_hwmod = {
+ .name = "l3_instr",
+ .class = &omap44xx_l3_hwmod_class,
+ .slaves = omap44xx_l3_instr_slaves,
+ .slaves_cnt = ARRAY_SIZE(omap44xx_l3_instr_slaves),
+ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP4430),
+};
+
+/* l3_main_2 -> l3_main_1 */
+static struct omap_hwmod_ocp_if omap44xx_l3_main_2__l3_main_1 = {
+ .master = &omap44xx_l3_main_2_hwmod,
+ .slave = &omap44xx_l3_main_1_hwmod,
+ .clk = "l3_div_ck",
+ .user = OCP_USER_MPU | OCP_USER_SDMA,
+};
+
+/* l4_cfg -> l3_main_1 */
+static struct omap_hwmod_ocp_if omap44xx_l4_cfg__l3_main_1 = {
+ .master = &omap44xx_l4_cfg_hwmod,
+ .slave = &omap44xx_l3_main_1_hwmod,
+ .clk = "l4_div_ck",
+ .user = OCP_USER_MPU | OCP_USER_SDMA,
+};
+
+/* mpu -> l3_main_1 */
+static struct omap_hwmod_ocp_if omap44xx_mpu__l3_main_1 = {
+ .master = &omap44xx_mpu_hwmod,
+ .slave = &omap44xx_l3_main_1_hwmod,
+ .clk = "l3_div_ck",
+ .user = OCP_USER_MPU | OCP_USER_SDMA,
+};
+
+/* l3_main_1 slave ports */
+static struct omap_hwmod_ocp_if *omap44xx_l3_main_1_slaves[] = {
+ &omap44xx_l3_main_2__l3_main_1,
+ &omap44xx_l4_cfg__l3_main_1,
+ &omap44xx_mpu__l3_main_1,
+};
+
+static struct omap_hwmod omap44xx_l3_main_1_hwmod = {
+ .name = "l3_main_1",
+ .class = &omap44xx_l3_hwmod_class,
+ .slaves = omap44xx_l3_main_1_slaves,
+ .slaves_cnt = ARRAY_SIZE(omap44xx_l3_main_1_slaves),
+ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP4430),
+};
+
+/* l3_main_2 interface data */
+/* l3_main_1 -> l3_main_2 */
+static struct omap_hwmod_ocp_if omap44xx_l3_main_1__l3_main_2 = {
+ .master = &omap44xx_l3_main_1_hwmod,
+ .slave = &omap44xx_l3_main_2_hwmod,
+ .clk = "l3_div_ck",
+ .user = OCP_USER_MPU | OCP_USER_SDMA,
+};
+
+/* l4_cfg -> l3_main_2 */
+static struct omap_hwmod_ocp_if omap44xx_l4_cfg__l3_main_2 = {
+ .master = &omap44xx_l4_cfg_hwmod,
+ .slave = &omap44xx_l3_main_2_hwmod,
+ .clk = "l4_div_ck",
+ .user = OCP_USER_MPU | OCP_USER_SDMA,
+};
+
+/* l3_main_2 slave ports */
+static struct omap_hwmod_ocp_if *omap44xx_l3_main_2_slaves[] = {
+ &omap44xx_l3_main_1__l3_main_2,
+ &omap44xx_l4_cfg__l3_main_2,
+};
+
+static struct omap_hwmod omap44xx_l3_main_2_hwmod = {
+ .name = "l3_main_2",
+ .class = &omap44xx_l3_hwmod_class,
+ .slaves = omap44xx_l3_main_2_slaves,
+ .slaves_cnt = ARRAY_SIZE(omap44xx_l3_main_2_slaves),
+ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP4430),
+};
+
+/* l3_main_3 interface data */
+/* l3_main_1 -> l3_main_3 */
+static struct omap_hwmod_ocp_if omap44xx_l3_main_1__l3_main_3 = {
+ .master = &omap44xx_l3_main_1_hwmod,
+ .slave = &omap44xx_l3_main_3_hwmod,
+ .clk = "l3_div_ck",
+ .user = OCP_USER_MPU | OCP_USER_SDMA,
+};
+
+/* l3_main_2 -> l3_main_3 */
+static struct omap_hwmod_ocp_if omap44xx_l3_main_2__l3_main_3 = {
+ .master = &omap44xx_l3_main_2_hwmod,
+ .slave = &omap44xx_l3_main_3_hwmod,
+ .clk = "l3_div_ck",
+ .user = OCP_USER_MPU | OCP_USER_SDMA,
+};
+
+/* l4_cfg -> l3_main_3 */
+static struct omap_hwmod_ocp_if omap44xx_l4_cfg__l3_main_3 = {
+ .master = &omap44xx_l4_cfg_hwmod,
+ .slave = &omap44xx_l3_main_3_hwmod,
+ .clk = "l4_div_ck",
+ .user = OCP_USER_MPU | OCP_USER_SDMA,
+};
+
+/* l3_main_3 slave ports */
+static struct omap_hwmod_ocp_if *omap44xx_l3_main_3_slaves[] = {
+ &omap44xx_l3_main_1__l3_main_3,
+ &omap44xx_l3_main_2__l3_main_3,
+ &omap44xx_l4_cfg__l3_main_3,
+};
+
+static struct omap_hwmod omap44xx_l3_main_3_hwmod = {
+ .name = "l3_main_3",
+ .class = &omap44xx_l3_hwmod_class,
+ .slaves = omap44xx_l3_main_3_slaves,
+ .slaves_cnt = ARRAY_SIZE(omap44xx_l3_main_3_slaves),
+ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP4430),
+};
+
+/*
+ * 'l4' class
+ * instance(s): l4_abe, l4_cfg, l4_per, l4_wkup
+ */
+static struct omap_hwmod_class omap44xx_l4_hwmod_class = {
+ .name = "l4",
+};
+
+/* l4_abe interface data */
+/* l3_main_1 -> l4_abe */
+static struct omap_hwmod_ocp_if omap44xx_l3_main_1__l4_abe = {
+ .master = &omap44xx_l3_main_1_hwmod,
+ .slave = &omap44xx_l4_abe_hwmod,
+ .clk = "l3_div_ck",
+ .user = OCP_USER_MPU | OCP_USER_SDMA,
+};
+
+/* mpu -> l4_abe */
+static struct omap_hwmod_ocp_if omap44xx_mpu__l4_abe = {
+ .master = &omap44xx_mpu_hwmod,
+ .slave = &omap44xx_l4_abe_hwmod,
+ .clk = "ocp_abe_iclk",
+ .user = OCP_USER_MPU | OCP_USER_SDMA,
+};
+
+/* l4_abe slave ports */
+static struct omap_hwmod_ocp_if *omap44xx_l4_abe_slaves[] = {
+ &omap44xx_l3_main_1__l4_abe,
+ &omap44xx_mpu__l4_abe,
+};
+
+static struct omap_hwmod omap44xx_l4_abe_hwmod = {
+ .name = "l4_abe",
+ .class = &omap44xx_l4_hwmod_class,
+ .slaves = omap44xx_l4_abe_slaves,
+ .slaves_cnt = ARRAY_SIZE(omap44xx_l4_abe_slaves),
+ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP4430),
+};
+
+/* l4_cfg interface data */
+/* l3_main_1 -> l4_cfg */
+static struct omap_hwmod_ocp_if omap44xx_l3_main_1__l4_cfg = {
+ .master = &omap44xx_l3_main_1_hwmod,
+ .slave = &omap44xx_l4_cfg_hwmod,
+ .clk = "l3_div_ck",
+ .user = OCP_USER_MPU | OCP_USER_SDMA,
+};
+
+/* l4_cfg slave ports */
+static struct omap_hwmod_ocp_if *omap44xx_l4_cfg_slaves[] = {
+ &omap44xx_l3_main_1__l4_cfg,
+};
+
+static struct omap_hwmod omap44xx_l4_cfg_hwmod = {
+ .name = "l4_cfg",
+ .class = &omap44xx_l4_hwmod_class,
+ .slaves = omap44xx_l4_cfg_slaves,
+ .slaves_cnt = ARRAY_SIZE(omap44xx_l4_cfg_slaves),
+ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP4430),
+};
+
+/* l4_per interface data */
+/* l3_main_2 -> l4_per */
+static struct omap_hwmod_ocp_if omap44xx_l3_main_2__l4_per = {
+ .master = &omap44xx_l3_main_2_hwmod,
+ .slave = &omap44xx_l4_per_hwmod,
+ .clk = "l3_div_ck",
+ .user = OCP_USER_MPU | OCP_USER_SDMA,
+};
+
+/* l4_per slave ports */
+static struct omap_hwmod_ocp_if *omap44xx_l4_per_slaves[] = {
+ &omap44xx_l3_main_2__l4_per,
+};
+
+static struct omap_hwmod omap44xx_l4_per_hwmod = {
+ .name = "l4_per",
+ .class = &omap44xx_l4_hwmod_class,
+ .slaves = omap44xx_l4_per_slaves,
+ .slaves_cnt = ARRAY_SIZE(omap44xx_l4_per_slaves),
+ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP4430),
+};
+
+/* l4_wkup interface data */
+/* l4_cfg -> l4_wkup */
+static struct omap_hwmod_ocp_if omap44xx_l4_cfg__l4_wkup = {
+ .master = &omap44xx_l4_cfg_hwmod,
+ .slave = &omap44xx_l4_wkup_hwmod,
+ .clk = "l4_div_ck",
+ .user = OCP_USER_MPU | OCP_USER_SDMA,
+};
+
+/* l4_wkup slave ports */
+static struct omap_hwmod_ocp_if *omap44xx_l4_wkup_slaves[] = {
+ &omap44xx_l4_cfg__l4_wkup,
+};
+
+static struct omap_hwmod omap44xx_l4_wkup_hwmod = {
+ .name = "l4_wkup",
+ .class = &omap44xx_l4_hwmod_class,
+ .slaves = omap44xx_l4_wkup_slaves,
+ .slaves_cnt = ARRAY_SIZE(omap44xx_l4_wkup_slaves),
+ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP4430),
+};
+
+/*
+ * 'mpu_bus' class
+ * instance(s): mpu_private
+ */
+static struct omap_hwmod_class omap44xx_mpu_bus_hwmod_class = {
+ .name = "mpu_bus",
+};
+
+/* mpu_private interface data */
+/* mpu -> mpu_private */
+static struct omap_hwmod_ocp_if omap44xx_mpu__mpu_private = {
+ .master = &omap44xx_mpu_hwmod,
+ .slave = &omap44xx_mpu_private_hwmod,
+ .clk = "l3_div_ck",
+ .user = OCP_USER_MPU | OCP_USER_SDMA,
+};
+
+/* mpu_private slave ports */
+static struct omap_hwmod_ocp_if *omap44xx_mpu_private_slaves[] = {
+ &omap44xx_mpu__mpu_private,
+};
+
+static struct omap_hwmod omap44xx_mpu_private_hwmod = {
+ .name = "mpu_private",
+ .class = &omap44xx_mpu_bus_hwmod_class,
+ .slaves = omap44xx_mpu_private_slaves,
+ .slaves_cnt = ARRAY_SIZE(omap44xx_mpu_private_slaves),
+ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP4430),
+};
+
+/*
+ * 'mpu' class
+ * mpu sub-system
+ */
+
+static struct omap_hwmod_class omap44xx_mpu_hwmod_class = {
+ .name = "mpu",
+};
+
+/* mpu */
+static struct omap_hwmod_irq_info omap44xx_mpu_irqs[] = {
+ { .name = "pl310", .irq = 0 + OMAP44XX_IRQ_GIC_START },
+ { .name = "cti0", .irq = 1 + OMAP44XX_IRQ_GIC_START },
+ { .name = "cti1", .irq = 2 + OMAP44XX_IRQ_GIC_START },
+};
+
+/* mpu master ports */
+static struct omap_hwmod_ocp_if *omap44xx_mpu_masters[] = {
+ &omap44xx_mpu__l3_main_1,
+ &omap44xx_mpu__l4_abe,
+ &omap44xx_mpu__dmm,
+};
+
+static struct omap_hwmod omap44xx_mpu_hwmod = {
+ .name = "mpu",
+ .class = &omap44xx_mpu_hwmod_class,
+ .flags = (HWMOD_INIT_NO_IDLE | HWMOD_INIT_NO_RESET),
+ .mpu_irqs = omap44xx_mpu_irqs,
+ .mpu_irqs_cnt = ARRAY_SIZE(omap44xx_mpu_irqs),
+ .main_clk = "dpll_mpu_m2_ck",
+ .prcm = {
+ .omap4 = {
+ .clkctrl_reg = OMAP4430_CM_MPU_MPU_CLKCTRL,
+ },
+ },
+ .masters = omap44xx_mpu_masters,
+ .masters_cnt = ARRAY_SIZE(omap44xx_mpu_masters),
+ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP4430),
+};
+
+/*
+ * 'wd_timer' class
+ * 32-bit watchdog upward counter that generates a pulse on the reset pin on
+ * overflow condition
+ */
+
+static struct omap_hwmod_class_sysconfig omap44xx_wd_timer_sysc = {
+ .rev_offs = 0x0000,
+ .sysc_offs = 0x0010,
+ .syss_offs = 0x0014,
+ .sysc_flags = (SYSC_HAS_SIDLEMODE | SYSC_HAS_EMUFREE |
+ SYSC_HAS_SOFTRESET),
+ .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART),
+ .sysc_fields = &omap_hwmod_sysc_type1,
+};
+
+/*
+ * 'uart' class
+ * universal asynchronous receiver/transmitter (uart)
+ */
+
+static struct omap_hwmod_class_sysconfig omap44xx_uart_sysc = {
+ .rev_offs = 0x0050,
+ .sysc_offs = 0x0054,
+ .syss_offs = 0x0058,
+ .sysc_flags = (SYSC_HAS_ENAWAKEUP | SYSC_HAS_SIDLEMODE |
+ SYSC_HAS_SOFTRESET | SYSC_HAS_AUTOIDLE),
+ .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART),
+ .sysc_fields = &omap_hwmod_sysc_type1,
+};
+
+static struct omap_hwmod_class omap44xx_wd_timer_hwmod_class = {
+ .name = "wd_timer",
+ .sysc = &omap44xx_wd_timer_sysc,
+};
+
+/* wd_timer2 */
+static struct omap_hwmod omap44xx_wd_timer2_hwmod;
+static struct omap_hwmod_irq_info omap44xx_wd_timer2_irqs[] = {
+ { .irq = 80 + OMAP44XX_IRQ_GIC_START },
+};
+
+static struct omap_hwmod_addr_space omap44xx_wd_timer2_addrs[] = {
+ {
+ .pa_start = 0x4a314000,
+ .pa_end = 0x4a31407f,
+ .flags = ADDR_TYPE_RT
+ },
+};
+
+static struct omap_hwmod_class omap44xx_uart_hwmod_class = {
+ .name = "uart",
+ .sysc = &omap44xx_uart_sysc,
+};
+
+/* uart1 */
+static struct omap_hwmod omap44xx_uart1_hwmod;
+static struct omap_hwmod_irq_info omap44xx_uart1_irqs[] = {
+ { .irq = 72 + OMAP44XX_IRQ_GIC_START },
+};
+
+static struct omap_hwmod_dma_info omap44xx_uart1_sdma_reqs[] = {
+ { .name = "tx", .dma_req = 48 + OMAP44XX_DMA_REQ_START },
+ { .name = "rx", .dma_req = 49 + OMAP44XX_DMA_REQ_START },
+};
+
+static struct omap_hwmod_addr_space omap44xx_uart1_addrs[] = {
+ {
+ .pa_start = 0x4806a000,
+ .pa_end = 0x4806a0ff,
+ .flags = ADDR_TYPE_RT
+ },
+};
+
+/* l4_per -> uart1 */
+static struct omap_hwmod_ocp_if omap44xx_l4_per__uart1 = {
+ .master = &omap44xx_l4_per_hwmod,
+ .slave = &omap44xx_uart1_hwmod,
+ .clk = "l4_div_ck",
+ .addr = omap44xx_uart1_addrs,
+ .addr_cnt = ARRAY_SIZE(omap44xx_uart1_addrs),
+ .user = OCP_USER_MPU | OCP_USER_SDMA,
+};
+
+/* uart1 slave ports */
+static struct omap_hwmod_ocp_if *omap44xx_uart1_slaves[] = {
+ &omap44xx_l4_per__uart1,
+};
+
+static struct omap_hwmod omap44xx_uart1_hwmod = {
+ .name = "uart1",
+ .class = &omap44xx_uart_hwmod_class,
+ .mpu_irqs = omap44xx_uart1_irqs,
+ .mpu_irqs_cnt = ARRAY_SIZE(omap44xx_uart1_irqs),
+ .sdma_reqs = omap44xx_uart1_sdma_reqs,
+ .sdma_reqs_cnt = ARRAY_SIZE(omap44xx_uart1_sdma_reqs),
+ .main_clk = "uart1_fck",
+ .prcm = {
+ .omap4 = {
+ .clkctrl_reg = OMAP4430_CM_L4PER_UART1_CLKCTRL,
+ },
+ },
+ .slaves = omap44xx_uart1_slaves,
+ .slaves_cnt = ARRAY_SIZE(omap44xx_uart1_slaves),
+ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP4430),
+};
+
+/* uart2 */
+static struct omap_hwmod omap44xx_uart2_hwmod;
+static struct omap_hwmod_irq_info omap44xx_uart2_irqs[] = {
+ { .irq = 73 + OMAP44XX_IRQ_GIC_START },
+};
+
+static struct omap_hwmod_dma_info omap44xx_uart2_sdma_reqs[] = {
+ { .name = "tx", .dma_req = 50 + OMAP44XX_DMA_REQ_START },
+ { .name = "rx", .dma_req = 51 + OMAP44XX_DMA_REQ_START },
+};
+
+static struct omap_hwmod_addr_space omap44xx_uart2_addrs[] = {
+ {
+ .pa_start = 0x4806c000,
+ .pa_end = 0x4806c0ff,
+ .flags = ADDR_TYPE_RT
+ },
+};
+
+/* l4_wkup -> wd_timer2 */
+static struct omap_hwmod_ocp_if omap44xx_l4_wkup__wd_timer2 = {
+ .master = &omap44xx_l4_wkup_hwmod,
+ .slave = &omap44xx_wd_timer2_hwmod,
+ .clk = "l4_wkup_clk_mux_ck",
+ .addr = omap44xx_wd_timer2_addrs,
+ .addr_cnt = ARRAY_SIZE(omap44xx_wd_timer2_addrs),
+ .user = OCP_USER_MPU | OCP_USER_SDMA,
+};
+
+/* wd_timer2 slave ports */
+static struct omap_hwmod_ocp_if *omap44xx_wd_timer2_slaves[] = {
+ &omap44xx_l4_wkup__wd_timer2,
+};
+
+static struct omap_hwmod omap44xx_wd_timer2_hwmod = {
+ .name = "wd_timer2",
+ .class = &omap44xx_wd_timer_hwmod_class,
+ .mpu_irqs = omap44xx_wd_timer2_irqs,
+ .mpu_irqs_cnt = ARRAY_SIZE(omap44xx_wd_timer2_irqs),
+ .main_clk = "wd_timer2_fck",
+ .prcm = {
+ .omap4 = {
+ .clkctrl_reg = OMAP4430_CM_WKUP_WDT2_CLKCTRL,
+ },
+ },
+ .slaves = omap44xx_wd_timer2_slaves,
+ .slaves_cnt = ARRAY_SIZE(omap44xx_wd_timer2_slaves),
+ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP4430),
+};
+
+/* wd_timer3 */
+static struct omap_hwmod omap44xx_wd_timer3_hwmod;
+static struct omap_hwmod_irq_info omap44xx_wd_timer3_irqs[] = {
+ { .irq = 36 + OMAP44XX_IRQ_GIC_START },
+};
+
+static struct omap_hwmod_addr_space omap44xx_wd_timer3_addrs[] = {
+ {
+ .pa_start = 0x40130000,
+ .pa_end = 0x4013007f,
+ .flags = ADDR_TYPE_RT
+ },
+};
+
+/* l4_per -> uart2 */
+static struct omap_hwmod_ocp_if omap44xx_l4_per__uart2 = {
+ .master = &omap44xx_l4_per_hwmod,
+ .slave = &omap44xx_uart2_hwmod,
+ .clk = "l4_div_ck",
+ .addr = omap44xx_uart2_addrs,
+ .addr_cnt = ARRAY_SIZE(omap44xx_uart2_addrs),
+ .user = OCP_USER_MPU | OCP_USER_SDMA,
+};
+
+/* uart2 slave ports */
+static struct omap_hwmod_ocp_if *omap44xx_uart2_slaves[] = {
+ &omap44xx_l4_per__uart2,
+};
+
+static struct omap_hwmod omap44xx_uart2_hwmod = {
+ .name = "uart2",
+ .class = &omap44xx_uart_hwmod_class,
+ .mpu_irqs = omap44xx_uart2_irqs,
+ .mpu_irqs_cnt = ARRAY_SIZE(omap44xx_uart2_irqs),
+ .sdma_reqs = omap44xx_uart2_sdma_reqs,
+ .sdma_reqs_cnt = ARRAY_SIZE(omap44xx_uart2_sdma_reqs),
+ .main_clk = "uart2_fck",
+ .prcm = {
+ .omap4 = {
+ .clkctrl_reg = OMAP4430_CM_L4PER_UART2_CLKCTRL,
+ },
+ },
+ .slaves = omap44xx_uart2_slaves,
+ .slaves_cnt = ARRAY_SIZE(omap44xx_uart2_slaves),
+ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP4430),
+};
+
+/* uart3 */
+static struct omap_hwmod omap44xx_uart3_hwmod;
+static struct omap_hwmod_irq_info omap44xx_uart3_irqs[] = {
+ { .irq = 74 + OMAP44XX_IRQ_GIC_START },
+};
+
+static struct omap_hwmod_dma_info omap44xx_uart3_sdma_reqs[] = {
+ { .name = "tx", .dma_req = 52 + OMAP44XX_DMA_REQ_START },
+ { .name = "rx", .dma_req = 53 + OMAP44XX_DMA_REQ_START },
+};
+
+static struct omap_hwmod_addr_space omap44xx_uart3_addrs[] = {
+ {
+ .pa_start = 0x48020000,
+ .pa_end = 0x480200ff,
+ .flags = ADDR_TYPE_RT
+ },
+};
+
+/* l4_abe -> wd_timer3 */
+static struct omap_hwmod_ocp_if omap44xx_l4_abe__wd_timer3 = {
+ .master = &omap44xx_l4_abe_hwmod,
+ .slave = &omap44xx_wd_timer3_hwmod,
+ .clk = "ocp_abe_iclk",
+ .addr = omap44xx_wd_timer3_addrs,
+ .addr_cnt = ARRAY_SIZE(omap44xx_wd_timer3_addrs),
+ .user = OCP_USER_MPU,
+};
+
+/* l4_abe -> wd_timer3 (dma) */
+static struct omap_hwmod_addr_space omap44xx_wd_timer3_dma_addrs[] = {
+ {
+ .pa_start = 0x49030000,
+ .pa_end = 0x4903007f,
+ .flags = ADDR_TYPE_RT
+ },
+};
+
+/* l4_per -> uart3 */
+static struct omap_hwmod_ocp_if omap44xx_l4_per__uart3 = {
+ .master = &omap44xx_l4_per_hwmod,
+ .slave = &omap44xx_uart3_hwmod,
+ .clk = "l4_div_ck",
+ .addr = omap44xx_uart3_addrs,
+ .addr_cnt = ARRAY_SIZE(omap44xx_uart3_addrs),
+ .user = OCP_USER_MPU | OCP_USER_SDMA,
+};
+
+/* uart3 slave ports */
+static struct omap_hwmod_ocp_if *omap44xx_uart3_slaves[] = {
+ &omap44xx_l4_per__uart3,
+};
+
+static struct omap_hwmod omap44xx_uart3_hwmod = {
+ .name = "uart3",
+ .class = &omap44xx_uart_hwmod_class,
+ .flags = (HWMOD_INIT_NO_IDLE | HWMOD_INIT_NO_RESET),
+ .mpu_irqs = omap44xx_uart3_irqs,
+ .mpu_irqs_cnt = ARRAY_SIZE(omap44xx_uart3_irqs),
+ .sdma_reqs = omap44xx_uart3_sdma_reqs,
+ .sdma_reqs_cnt = ARRAY_SIZE(omap44xx_uart3_sdma_reqs),
+ .main_clk = "uart3_fck",
+ .prcm = {
+ .omap4 = {
+ .clkctrl_reg = OMAP4430_CM_L4PER_UART3_CLKCTRL,
+ },
+ },
+ .slaves = omap44xx_uart3_slaves,
+ .slaves_cnt = ARRAY_SIZE(omap44xx_uart3_slaves),
+ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP4430),
+};
+
+/* uart4 */
+static struct omap_hwmod omap44xx_uart4_hwmod;
+static struct omap_hwmod_irq_info omap44xx_uart4_irqs[] = {
+ { .irq = 70 + OMAP44XX_IRQ_GIC_START },
+};
+
+static struct omap_hwmod_dma_info omap44xx_uart4_sdma_reqs[] = {
+ { .name = "tx", .dma_req = 54 + OMAP44XX_DMA_REQ_START },
+ { .name = "rx", .dma_req = 55 + OMAP44XX_DMA_REQ_START },
+};
+
+static struct omap_hwmod_addr_space omap44xx_uart4_addrs[] = {
+ {
+ .pa_start = 0x4806e000,
+ .pa_end = 0x4806e0ff,
+ .flags = ADDR_TYPE_RT
+ },
+};
+
+static struct omap_hwmod_ocp_if omap44xx_l4_abe__wd_timer3_dma = {
+ .master = &omap44xx_l4_abe_hwmod,
+ .slave = &omap44xx_wd_timer3_hwmod,
+ .clk = "ocp_abe_iclk",
+ .addr = omap44xx_wd_timer3_dma_addrs,
+ .addr_cnt = ARRAY_SIZE(omap44xx_wd_timer3_dma_addrs),
+ .user = OCP_USER_SDMA,
+};
+
+/* wd_timer3 slave ports */
+static struct omap_hwmod_ocp_if *omap44xx_wd_timer3_slaves[] = {
+ &omap44xx_l4_abe__wd_timer3,
+ &omap44xx_l4_abe__wd_timer3_dma,
+};
+
+static struct omap_hwmod omap44xx_wd_timer3_hwmod = {
+ .name = "wd_timer3",
+ .class = &omap44xx_wd_timer_hwmod_class,
+ .mpu_irqs = omap44xx_wd_timer3_irqs,
+ .mpu_irqs_cnt = ARRAY_SIZE(omap44xx_wd_timer3_irqs),
+ .main_clk = "wd_timer3_fck",
+ .prcm = {
+ .omap4 = {
+ .clkctrl_reg = OMAP4430_CM1_ABE_WDT3_CLKCTRL,
+ },
+ },
+ .slaves = omap44xx_wd_timer3_slaves,
+ .slaves_cnt = ARRAY_SIZE(omap44xx_wd_timer3_slaves),
+ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP4430),
+};
+
+/* l4_per -> uart4 */
+static struct omap_hwmod_ocp_if omap44xx_l4_per__uart4 = {
+ .master = &omap44xx_l4_per_hwmod,
+ .slave = &omap44xx_uart4_hwmod,
+ .clk = "l4_div_ck",
+ .addr = omap44xx_uart4_addrs,
+ .addr_cnt = ARRAY_SIZE(omap44xx_uart4_addrs),
+ .user = OCP_USER_MPU | OCP_USER_SDMA,
+};
+
+/* uart4 slave ports */
+static struct omap_hwmod_ocp_if *omap44xx_uart4_slaves[] = {
+ &omap44xx_l4_per__uart4,
+};
+
+static struct omap_hwmod omap44xx_uart4_hwmod = {
+ .name = "uart4",
+ .class = &omap44xx_uart_hwmod_class,
+ .mpu_irqs = omap44xx_uart4_irqs,
+ .mpu_irqs_cnt = ARRAY_SIZE(omap44xx_uart4_irqs),
+ .sdma_reqs = omap44xx_uart4_sdma_reqs,
+ .sdma_reqs_cnt = ARRAY_SIZE(omap44xx_uart4_sdma_reqs),
+ .main_clk = "uart4_fck",
+ .prcm = {
+ .omap4 = {
+ .clkctrl_reg = OMAP4430_CM_L4PER_UART4_CLKCTRL,
+ },
+ },
+ .slaves = omap44xx_uart4_slaves,
+ .slaves_cnt = ARRAY_SIZE(omap44xx_uart4_slaves),
+ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP4430),
+};
+
+static __initdata struct omap_hwmod *omap44xx_hwmods[] = {
+ /* dmm class */
+ &omap44xx_dmm_hwmod,
+ /* emif_fw class */
+ &omap44xx_emif_fw_hwmod,
+ /* l3 class */
+ &omap44xx_l3_instr_hwmod,
+ &omap44xx_l3_main_1_hwmod,
+ &omap44xx_l3_main_2_hwmod,
+ &omap44xx_l3_main_3_hwmod,
+ /* l4 class */
+ &omap44xx_l4_abe_hwmod,
+ &omap44xx_l4_cfg_hwmod,
+ &omap44xx_l4_per_hwmod,
+ &omap44xx_l4_wkup_hwmod,
+ /* mpu_bus class */
+ &omap44xx_mpu_private_hwmod,
+
+ /* mpu class */
+ &omap44xx_mpu_hwmod,
+ /* wd_timer class */
+ &omap44xx_wd_timer2_hwmod,
+ &omap44xx_wd_timer3_hwmod,
+
+ /* uart class */
+ &omap44xx_uart1_hwmod,
+ &omap44xx_uart2_hwmod,
+ &omap44xx_uart3_hwmod,
+ &omap44xx_uart4_hwmod,
+ NULL,
+};
+
+int __init omap44xx_hwmod_init(void)
+{
+ return omap_hwmod_init(omap44xx_hwmods);
+}
+
diff --git a/arch/arm/mach-omap2/pm-debug.c b/arch/arm/mach-omap2/pm-debug.c
index 723b44e252fd..5e81517a7af2 100644
--- a/arch/arm/mach-omap2/pm-debug.c
+++ b/arch/arm/mach-omap2/pm-debug.c
@@ -31,12 +31,17 @@
#include <plat/board.h>
#include <plat/powerdomain.h>
#include <plat/clockdomain.h>
+#include <plat/dmtimer.h>
#include "prm.h"
#include "cm.h"
#include "pm.h"
int omap2_pm_debug;
+u32 enable_off_mode;
+u32 sleep_while_idle;
+u32 wakeup_timer_seconds;
+u32 wakeup_timer_milliseconds;
#define DUMP_PRM_MOD_REG(mod, reg) \
regs[reg_count].name = #mod "." #reg; \
@@ -162,7 +167,7 @@ void omap2_pm_dump(int mode, int resume, unsigned int us)
static void pm_dbg_regset_store(u32 *ptr);
-struct dentry *pm_dbg_dir;
+static struct dentry *pm_dbg_dir;
static int pm_dbg_init_done;
@@ -349,6 +354,23 @@ void pm_dbg_update_time(struct powerdomain *pwrdm, int prev)
pwrdm->timer = t;
}
+void omap2_pm_wakeup_on_timer(u32 seconds, u32 milliseconds)
+{
+ u32 tick_rate, cycles;
+
+ if (!seconds && !milliseconds)
+ return;
+
+ tick_rate = clk_get_rate(omap_dm_timer_get_fclk(gptimer_wakeup));
+ cycles = tick_rate * seconds + tick_rate * milliseconds / 1000;
+ omap_dm_timer_stop(gptimer_wakeup);
+ omap_dm_timer_set_load_start(gptimer_wakeup, 0, 0xffffffff - cycles);
+
+ pr_info("PM: Resume timer in %u.%03u secs"
+ " (%d ticks at %d ticks/sec.)\n",
+ seconds, milliseconds, cycles, tick_rate);
+}
+
static int clkdm_dbg_show_counter(struct clockdomain *clkdm, void *user)
{
struct seq_file *s = (struct seq_file *)user;
@@ -494,8 +516,10 @@ int pm_dbg_regset_init(int reg_set)
static int pwrdm_suspend_get(void *data, u64 *val)
{
- int ret;
- ret = omap3_pm_get_suspend_state((struct powerdomain *)data);
+ int ret = -EINVAL;
+
+ if (cpu_is_omap34xx())
+ ret = omap3_pm_get_suspend_state((struct powerdomain *)data);
*val = ret;
if (ret >= 0)
@@ -505,7 +529,10 @@ static int pwrdm_suspend_get(void *data, u64 *val)
static int pwrdm_suspend_set(void *data, u64 val)
{
- return omap3_pm_set_suspend_state((struct powerdomain *)data, (int)val);
+ if (cpu_is_omap34xx())
+ return omap3_pm_set_suspend_state(
+ (struct powerdomain *)data, (int)val);
+ return -EINVAL;
}
DEFINE_SIMPLE_ATTRIBUTE(pwrdm_suspend_fops, pwrdm_suspend_get,
@@ -553,8 +580,10 @@ static int option_set(void *data, u64 val)
*option = val;
- if (option == &enable_off_mode)
- omap3_pm_off_mode_enable(val);
+ if (option == &enable_off_mode) {
+ if (cpu_is_omap34xx())
+ omap3_pm_off_mode_enable(val);
+ }
return 0;
}
@@ -609,6 +638,9 @@ static int __init pm_dbg_init(void)
&sleep_while_idle, &pm_dbg_option_fops);
(void) debugfs_create_file("wakeup_timer_seconds", S_IRUGO | S_IWUGO, d,
&wakeup_timer_seconds, &pm_dbg_option_fops);
+ (void) debugfs_create_file("wakeup_timer_milliseconds",
+ S_IRUGO | S_IWUGO, d, &wakeup_timer_milliseconds,
+ &pm_dbg_option_fops);
pm_dbg_init_done = 1;
return 0;
diff --git a/arch/arm/mach-omap2/pm.c b/arch/arm/mach-omap2/pm.c
index 68f9f2e95891..59ca03b0e691 100644
--- a/arch/arm/mach-omap2/pm.c
+++ b/arch/arm/mach-omap2/pm.c
@@ -18,11 +18,15 @@
#include <plat/omap_device.h>
#include <plat/common.h>
+#include <plat/powerdomain.h>
+#include <plat/clockdomain.h>
+
static struct omap_device_pm_latency *pm_lats;
static struct device *mpu_dev;
-static struct device *dsp_dev;
+static struct device *iva_dev;
static struct device *l3_dev;
+static struct device *dsp_dev;
struct device *omap2_get_mpuss_device(void)
{
@@ -30,10 +34,10 @@ struct device *omap2_get_mpuss_device(void)
return mpu_dev;
}
-struct device *omap2_get_dsp_device(void)
+struct device *omap2_get_iva_device(void)
{
- WARN_ON_ONCE(!dsp_dev);
- return dsp_dev;
+ WARN_ON_ONCE(!iva_dev);
+ return iva_dev;
}
struct device *omap2_get_l3_device(void)
@@ -42,6 +46,13 @@ struct device *omap2_get_l3_device(void)
return l3_dev;
}
+struct device *omap4_get_dsp_device(void)
+{
+ WARN_ON_ONCE(!dsp_dev);
+ return dsp_dev;
+}
+EXPORT_SYMBOL(omap4_get_dsp_device);
+
/* static int _init_omap_device(struct omap_hwmod *oh, void *user) */
static int _init_omap_device(char *name, struct device **new_dev)
{
@@ -69,8 +80,60 @@ static int _init_omap_device(char *name, struct device **new_dev)
static void omap2_init_processor_devices(void)
{
_init_omap_device("mpu", &mpu_dev);
- _init_omap_device("iva", &dsp_dev);
- _init_omap_device("l3_main", &l3_dev);
+ _init_omap_device("iva", &iva_dev);
+ if (cpu_is_omap44xx()) {
+ _init_omap_device("l3_main_1", &l3_dev);
+ _init_omap_device("dsp", &dsp_dev);
+ } else {
+ _init_omap_device("l3_main", &l3_dev);
+ }
+}
+
+/*
+ * This sets pwrdm state (other than mpu & core. Currently only ON &
+ * RET are supported. Function is assuming that clkdm doesn't have
+ * hw_sup mode enabled.
+ */
+int omap_set_pwrdm_state(struct powerdomain *pwrdm, u32 state)
+{
+ u32 cur_state;
+ int sleep_switch = 0;
+ int ret = 0;
+
+ if (pwrdm == NULL || IS_ERR(pwrdm))
+ return -EINVAL;
+
+ while (!(pwrdm->pwrsts & (1 << state))) {
+ if (state == PWRDM_POWER_OFF)
+ return ret;
+ state--;
+ }
+
+ cur_state = pwrdm_read_next_pwrst(pwrdm);
+ if (cur_state == state)
+ return ret;
+
+ if (pwrdm_read_pwrst(pwrdm) < PWRDM_POWER_ON) {
+ omap2_clkdm_wakeup(pwrdm->pwrdm_clkdms[0]);
+ sleep_switch = 1;
+ pwrdm_wait_transition(pwrdm);
+ }
+
+ ret = pwrdm_set_next_pwrst(pwrdm, state);
+ if (ret) {
+ printk(KERN_ERR "Unable to set state of powerdomain: %s\n",
+ pwrdm->name);
+ goto err;
+ }
+
+ if (sleep_switch) {
+ omap2_clkdm_allow_idle(pwrdm->pwrdm_clkdms[0]);
+ pwrdm_wait_transition(pwrdm);
+ pwrdm_state_switch(pwrdm);
+ }
+
+err:
+ return ret;
}
static int __init omap2_common_pm_init(void)
diff --git a/arch/arm/mach-omap2/pm.h b/arch/arm/mach-omap2/pm.h
index 3de6ece23fc8..0d75bfd1fdbe 100644
--- a/arch/arm/mach-omap2/pm.h
+++ b/arch/arm/mach-omap2/pm.h
@@ -13,14 +13,11 @@
#include <plat/powerdomain.h>
-extern u32 enable_off_mode;
-extern u32 sleep_while_idle;
-
extern void *omap3_secure_ram_storage;
extern void omap3_pm_off_mode_enable(int);
extern void omap_sram_idle(void);
extern int omap3_can_sleep(void);
-extern int set_pwrdm_state(struct powerdomain *pwrdm, u32 state);
+extern int omap_set_pwrdm_state(struct powerdomain *pwrdm, u32 state);
extern int omap3_idle_init(void);
struct cpuidle_params {
@@ -48,10 +45,16 @@ extern struct omap_dm_timer *gptimer_wakeup;
#ifdef CONFIG_PM_DEBUG
extern void omap2_pm_dump(int mode, int resume, unsigned int us);
+extern void omap2_pm_wakeup_on_timer(u32 seconds, u32 milliseconds);
extern int omap2_pm_debug;
+extern u32 enable_off_mode;
+extern u32 sleep_while_idle;
#else
#define omap2_pm_dump(mode, resume, us) do {} while (0);
+#define omap2_pm_wakeup_on_timer(seconds, milliseconds) do {} while (0);
#define omap2_pm_debug 0
+#define enable_off_mode 0
+#define sleep_while_idle 0
#endif
#if defined(CONFIG_CPU_IDLE)
diff --git a/arch/arm/mach-omap2/pm24xx.c b/arch/arm/mach-omap2/pm24xx.c
index 6aeedeacdad8..c85923e56b85 100644
--- a/arch/arm/mach-omap2/pm24xx.c
+++ b/arch/arm/mach-omap2/pm24xx.c
@@ -30,6 +30,7 @@
#include <linux/irq.h>
#include <linux/time.h>
#include <linux/gpio.h>
+#include <linux/console.h>
#include <asm/mach/time.h>
#include <asm/mach/irq.h>
@@ -38,7 +39,6 @@
#include <mach/irqs.h>
#include <plat/clock.h>
#include <plat/sram.h>
-#include <plat/control.h>
#include <plat/dma.h>
#include <plat/board.h>
@@ -48,6 +48,7 @@
#include "cm-regbits-24xx.h"
#include "sdrc.h"
#include "pm.h"
+#include "control.h"
#include <plat/powerdomain.h>
#include <plat/clockdomain.h>
@@ -118,6 +119,10 @@ static void omap2_enter_full_retention(void)
if (omap_irq_pending())
goto no_sleep;
+ /* Block console output in case it is on one of the OMAP UARTs */
+ if (try_acquire_console_sem())
+ goto no_sleep;
+
omap_uart_prepare_idle(0);
omap_uart_prepare_idle(1);
omap_uart_prepare_idle(2);
@@ -131,6 +136,8 @@ static void omap2_enter_full_retention(void)
omap_uart_resume_idle(1);
omap_uart_resume_idle(0);
+ release_console_sem();
+
no_sleep:
if (omap2_pm_debug) {
unsigned long long tmp;
@@ -245,6 +252,8 @@ static int omap2_can_sleep(void)
{
if (omap2_fclks_active())
return 0;
+ if (!omap_uart_can_sleep())
+ return 0;
if (osc_ck->usecount > 1)
return 0;
if (omap_dma_running())
diff --git a/arch/arm/mach-omap2/pm34xx.c b/arch/arm/mach-omap2/pm34xx.c
index 7b03426c72a3..0ec8a04b7473 100644
--- a/arch/arm/mach-omap2/pm34xx.c
+++ b/arch/arm/mach-omap2/pm34xx.c
@@ -28,17 +28,16 @@
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/slab.h>
+#include <linux/console.h>
#include <plat/sram.h>
#include <plat/clockdomain.h>
#include <plat/powerdomain.h>
-#include <plat/control.h>
#include <plat/serial.h>
#include <plat/sdrc.h>
#include <plat/prcm.h>
#include <plat/gpmc.h>
#include <plat/dma.h>
-#include <plat/dmtimer.h>
#include <asm/tlbflush.h>
@@ -49,16 +48,12 @@
#include "prm.h"
#include "pm.h"
#include "sdrc.h"
+#include "control.h"
/* Scratchpad offsets */
-#define OMAP343X_TABLE_ADDRESS_OFFSET 0x31
-#define OMAP343X_TABLE_VALUE_OFFSET 0x30
-#define OMAP343X_CONTROL_REG_VALUE_OFFSET 0x32
-
-u32 enable_off_mode;
-u32 sleep_while_idle;
-u32 wakeup_timer_seconds;
-u32 wakeup_timer_milliseconds;
+#define OMAP343X_TABLE_ADDRESS_OFFSET 0xc4
+#define OMAP343X_TABLE_VALUE_OFFSET 0xc0
+#define OMAP343X_CONTROL_REG_VALUE_OFFSET 0xc8
struct power_state {
struct powerdomain *pwrdm;
@@ -316,7 +311,7 @@ static void restore_control_register(u32 val)
/* Function to restore the table entry that was modified for enabling MMU */
static void restore_table_entry(void)
{
- u32 *scratchpad_address;
+ void __iomem *scratchpad_address;
u32 previous_value, control_reg_value;
u32 *address;
@@ -351,7 +346,6 @@ void omap_sram_idle(void)
int core_next_state = PWRDM_POWER_ON;
int core_prev_state, per_prev_state;
u32 sdrc_pwr = 0;
- int per_state_modified = 0;
if (!_omap_sram_idle)
return;
@@ -385,30 +379,28 @@ void omap_sram_idle(void)
/* Enable IO-PAD and IO-CHAIN wakeups */
per_next_state = pwrdm_read_next_pwrst(per_pwrdm);
core_next_state = pwrdm_read_next_pwrst(core_pwrdm);
- if (omap3_has_io_wakeup() && \
- (per_next_state < PWRDM_POWER_ON ||
- core_next_state < PWRDM_POWER_ON)) {
+ if (omap3_has_io_wakeup() &&
+ (per_next_state < PWRDM_POWER_ON ||
+ core_next_state < PWRDM_POWER_ON)) {
prm_set_mod_reg_bits(OMAP3430_EN_IO_MASK, WKUP_MOD, PM_WKEN);
omap3_enable_io_chain();
}
+ /* Block console output in case it is on one of the OMAP UARTs */
+ if (per_next_state < PWRDM_POWER_ON ||
+ core_next_state < PWRDM_POWER_ON)
+ if (try_acquire_console_sem())
+ goto console_still_active;
+
/* PER */
if (per_next_state < PWRDM_POWER_ON) {
omap_uart_prepare_idle(2);
+ omap_uart_prepare_idle(3);
omap2_gpio_prepare_for_idle(per_next_state);
- if (per_next_state == PWRDM_POWER_OFF) {
- if (core_next_state == PWRDM_POWER_ON) {
- per_next_state = PWRDM_POWER_RET;
- pwrdm_set_next_pwrst(per_pwrdm, per_next_state);
- per_state_modified = 1;
- } else
+ if (per_next_state == PWRDM_POWER_OFF)
omap3_per_save_context();
- }
}
- if (pwrdm_read_pwrst(cam_pwrdm) == PWRDM_POWER_ON)
- omap2_clkdm_deny_idle(mpu_pwrdm->pwrdm_clkdms[0]);
-
/* CORE */
if (core_next_state < PWRDM_POWER_ON) {
omap_uart_prepare_idle(0);
@@ -475,10 +467,12 @@ void omap_sram_idle(void)
if (per_prev_state == PWRDM_POWER_OFF)
omap3_per_restore_context();
omap_uart_resume_idle(2);
- if (per_state_modified)
- pwrdm_set_next_pwrst(per_pwrdm, PWRDM_POWER_OFF);
+ omap_uart_resume_idle(3);
}
+ release_console_sem();
+
+console_still_active:
/* Disable IO-PAD and IO-CHAIN wakeup */
if (omap3_has_io_wakeup() &&
(per_next_state < PWRDM_POWER_ON ||
@@ -501,51 +495,6 @@ int omap3_can_sleep(void)
return 1;
}
-/* This sets pwrdm state (other than mpu & core. Currently only ON &
- * RET are supported. Function is assuming that clkdm doesn't have
- * hw_sup mode enabled. */
-int set_pwrdm_state(struct powerdomain *pwrdm, u32 state)
-{
- u32 cur_state;
- int sleep_switch = 0;
- int ret = 0;
-
- if (pwrdm == NULL || IS_ERR(pwrdm))
- return -EINVAL;
-
- while (!(pwrdm->pwrsts & (1 << state))) {
- if (state == PWRDM_POWER_OFF)
- return ret;
- state--;
- }
-
- cur_state = pwrdm_read_next_pwrst(pwrdm);
- if (cur_state == state)
- return ret;
-
- if (pwrdm_read_pwrst(pwrdm) < PWRDM_POWER_ON) {
- omap2_clkdm_wakeup(pwrdm->pwrdm_clkdms[0]);
- sleep_switch = 1;
- pwrdm_wait_transition(pwrdm);
- }
-
- ret = pwrdm_set_next_pwrst(pwrdm, state);
- if (ret) {
- printk(KERN_ERR "Unable to set state of powerdomain: %s\n",
- pwrdm->name);
- goto err;
- }
-
- if (sleep_switch) {
- omap2_clkdm_allow_idle(pwrdm->pwrdm_clkdms[0]);
- pwrdm_wait_transition(pwrdm);
- pwrdm_state_switch(pwrdm);
- }
-
-err:
- return ret;
-}
-
static void omap3_pm_idle(void)
{
local_irq_disable();
@@ -567,23 +516,6 @@ out:
#ifdef CONFIG_SUSPEND
static suspend_state_t suspend_state;
-static void omap2_pm_wakeup_on_timer(u32 seconds, u32 milliseconds)
-{
- u32 tick_rate, cycles;
-
- if (!seconds && !milliseconds)
- return;
-
- tick_rate = clk_get_rate(omap_dm_timer_get_fclk(gptimer_wakeup));
- cycles = tick_rate * seconds + tick_rate * milliseconds / 1000;
- omap_dm_timer_stop(gptimer_wakeup);
- omap_dm_timer_set_load_start(gptimer_wakeup, 0, 0xffffffff - cycles);
-
- pr_info("PM: Resume timer in %u.%03u secs"
- " (%d ticks at %d ticks/sec.)\n",
- seconds, milliseconds, cycles, tick_rate);
-}
-
static int omap3_pm_prepare(void)
{
disable_hlt();
@@ -604,7 +536,7 @@ static int omap3_pm_suspend(void)
pwrst->saved_state = pwrdm_read_next_pwrst(pwrst->pwrdm);
/* Set ones wanted by suspend */
list_for_each_entry(pwrst, &pwrst_list, node) {
- if (set_pwrdm_state(pwrst->pwrdm, pwrst->next_state))
+ if (omap_set_pwrdm_state(pwrst->pwrdm, pwrst->next_state))
goto restore;
if (pwrdm_clear_all_prev_pwrst(pwrst->pwrdm))
goto restore;
@@ -625,7 +557,7 @@ restore:
pwrst->pwrdm->name, pwrst->next_state);
ret = -1;
}
- set_pwrdm_state(pwrst->pwrdm, pwrst->saved_state);
+ omap_set_pwrdm_state(pwrst->pwrdm, pwrst->saved_state);
}
if (ret)
printk(KERN_ERR "Could not enter target state in pm_suspend\n");
@@ -756,6 +688,14 @@ static void __init omap3_d2d_idle(void)
static void __init prcm_setup_regs(void)
{
+ u32 omap3630_auto_uart4_mask = cpu_is_omap3630() ?
+ OMAP3630_AUTO_UART4_MASK : 0;
+ u32 omap3630_en_uart4_mask = cpu_is_omap3630() ?
+ OMAP3630_EN_UART4_MASK : 0;
+ u32 omap3630_grpsel_uart4_mask = cpu_is_omap3630() ?
+ OMAP3630_GRPSEL_UART4_MASK : 0;
+
+
/* XXX Reset all wkdeps. This should be done when initializing
* powerdomains */
prm_write_mod_reg(0, OMAP3430_IVA2_MOD, PM_WKDEP);
@@ -842,6 +782,7 @@ static void __init prcm_setup_regs(void)
CM_AUTOIDLE);
cm_write_mod_reg(
+ omap3630_auto_uart4_mask |
OMAP3430_AUTO_GPIO6_MASK |
OMAP3430_AUTO_GPIO5_MASK |
OMAP3430_AUTO_GPIO4_MASK |
@@ -918,14 +859,16 @@ static void __init prcm_setup_regs(void)
OMAP3430_DSS_MOD, PM_WKEN);
/* Enable wakeups in PER */
- prm_write_mod_reg(OMAP3430_EN_GPIO2_MASK | OMAP3430_EN_GPIO3_MASK |
+ prm_write_mod_reg(omap3630_en_uart4_mask |
+ OMAP3430_EN_GPIO2_MASK | OMAP3430_EN_GPIO3_MASK |
OMAP3430_EN_GPIO4_MASK | OMAP3430_EN_GPIO5_MASK |
OMAP3430_EN_GPIO6_MASK | OMAP3430_EN_UART3_MASK |
OMAP3430_EN_MCBSP2_MASK | OMAP3430_EN_MCBSP3_MASK |
OMAP3430_EN_MCBSP4_MASK,
OMAP3430_PER_MOD, PM_WKEN);
/* and allow them to wake up MPU */
- prm_write_mod_reg(OMAP3430_GRPSEL_GPIO2_MASK |
+ prm_write_mod_reg(omap3630_grpsel_uart4_mask |
+ OMAP3430_GRPSEL_GPIO2_MASK |
OMAP3430_GRPSEL_GPIO3_MASK |
OMAP3430_GRPSEL_GPIO4_MASK |
OMAP3430_GRPSEL_GPIO5_MASK |
@@ -974,7 +917,7 @@ void omap3_pm_off_mode_enable(int enable)
list_for_each_entry(pwrst, &pwrst_list, node) {
pwrst->next_state = state;
- set_pwrdm_state(pwrst->pwrdm, state);
+ omap_set_pwrdm_state(pwrst->pwrdm, state);
}
}
@@ -1019,7 +962,7 @@ static int __init pwrdms_setup(struct powerdomain *pwrdm, void *unused)
if (pwrdm_has_hdwr_sar(pwrdm))
pwrdm_enable_hdwr_sar(pwrdm);
- return set_pwrdm_state(pwrst->pwrdm, pwrst->next_state);
+ return omap_set_pwrdm_state(pwrst->pwrdm, pwrst->next_state);
}
/*
@@ -1029,9 +972,6 @@ static int __init pwrdms_setup(struct powerdomain *pwrdm, void *unused)
*/
static int __init clkdms_setup(struct clockdomain *clkdm, void *unused)
{
- clkdm_clear_all_wkdeps(clkdm);
- clkdm_clear_all_sleepdeps(clkdm);
-
if (clkdm->flags & CLKDM_CAN_ENABLE_AUTO)
omap2_clkdm_allow_idle(clkdm);
else if (clkdm->flags & CLKDM_CAN_FORCE_SLEEP &&
diff --git a/arch/arm/mach-omap2/pm_bus.c b/arch/arm/mach-omap2/pm_bus.c
new file mode 100644
index 000000000000..784989f8f2f5
--- /dev/null
+++ b/arch/arm/mach-omap2/pm_bus.c
@@ -0,0 +1,85 @@
+/*
+ * Runtime PM support code for OMAP
+ *
+ * Author: Kevin Hilman, Deep Root Systems, LLC
+ *
+ * Copyright (C) 2010 Texas Instruments, Inc.
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/io.h>
+#include <linux/pm_runtime.h>
+#include <linux/platform_device.h>
+#include <linux/mutex.h>
+
+#include <plat/omap_device.h>
+#include <plat/omap-pm.h>
+
+#ifdef CONFIG_PM_RUNTIME
+int omap_pm_runtime_suspend(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ int r, ret = 0;
+
+ dev_dbg(dev, "%s\n", __func__);
+
+ ret = pm_generic_runtime_suspend(dev);
+
+ if (!ret && dev->parent == &omap_device_parent) {
+ r = omap_device_idle(pdev);
+ WARN_ON(r);
+ }
+
+ return ret;
+};
+
+int omap_pm_runtime_resume(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ int r;
+
+ dev_dbg(dev, "%s\n", __func__);
+
+ if (dev->parent == &omap_device_parent) {
+ r = omap_device_enable(pdev);
+ WARN_ON(r);
+ }
+
+ return pm_generic_runtime_resume(dev);
+};
+#else
+#define omap_pm_runtime_suspend NULL
+#define omap_pm_runtime_resume NULL
+#endif /* CONFIG_PM_RUNTIME */
+
+static int __init omap_pm_runtime_init(void)
+{
+ const struct dev_pm_ops *pm;
+ struct dev_pm_ops *omap_pm;
+
+ pm = platform_bus_get_pm_ops();
+ if (!pm) {
+ pr_err("%s: unable to get dev_pm_ops from platform_bus\n",
+ __func__);
+ return -ENODEV;
+ }
+
+ omap_pm = kmemdup(pm, sizeof(struct dev_pm_ops), GFP_KERNEL);
+ if (!omap_pm) {
+ pr_err("%s: unable to alloc memory for new dev_pm_ops\n",
+ __func__);
+ return -ENOMEM;
+ }
+
+ omap_pm->runtime_suspend = omap_pm_runtime_suspend;
+ omap_pm->runtime_resume = omap_pm_runtime_resume;
+
+ platform_bus_set_pm_ops(omap_pm);
+
+ return 0;
+}
+core_initcall(omap_pm_runtime_init);
diff --git a/arch/arm/mach-omap2/powerdomains44xx.h b/arch/arm/mach-omap2/powerdomains44xx.h
index c7219513472a..9c01b55d6102 100644
--- a/arch/arm/mach-omap2/powerdomains44xx.h
+++ b/arch/arm/mach-omap2/powerdomains44xx.h
@@ -98,7 +98,7 @@ static struct powerdomain dss_44xx_pwrdm = {
.prcm_offs = OMAP4430_PRM_DSS_MOD,
.omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP4430),
.pwrsts = PWRSTS_OFF_RET_ON,
- .pwrsts_logic_ret = PWRSTS_OFF_RET,
+ .pwrsts_logic_ret = PWRSTS_OFF,
.banks = 1,
.pwrsts_mem_ret = {
[0] = PWRDM_POWER_OFF, /* dss_mem */
diff --git a/arch/arm/mach-omap2/prcm-common.h b/arch/arm/mach-omap2/prcm-common.h
index 995b7edbf18d..298a22a754e2 100644
--- a/arch/arm/mach-omap2/prcm-common.h
+++ b/arch/arm/mach-omap2/prcm-common.h
@@ -382,6 +382,9 @@
#define OMAP3430_EN_MPU_SHIFT 1
/* CM_FCLKEN_PER, CM_ICLKEN_PER, PM_WKEN_PER shared bits */
+
+#define OMAP3630_EN_UART4_MASK (1 << 18)
+#define OMAP3630_EN_UART4_SHIFT 18
#define OMAP3430_EN_GPIO6_MASK (1 << 17)
#define OMAP3430_EN_GPIO6_SHIFT 17
#define OMAP3430_EN_GPIO5_MASK (1 << 16)
@@ -422,6 +425,8 @@
#define OMAP3430_EN_MCBSP2_SHIFT 0
/* CM_IDLEST_PER, PM_WKST_PER shared bits */
+#define OMAP3630_ST_UART4_SHIFT 18
+#define OMAP3630_ST_UART4_MASK (1 << 18)
#define OMAP3430_ST_GPIO6_SHIFT 17
#define OMAP3430_ST_GPIO6_MASK (1 << 17)
#define OMAP3430_ST_GPIO5_SHIFT 16
diff --git a/arch/arm/mach-omap2/prcm.c b/arch/arm/mach-omap2/prcm.c
index c20137497c92..a51846e3a6fa 100644
--- a/arch/arm/mach-omap2/prcm.c
+++ b/arch/arm/mach-omap2/prcm.c
@@ -26,13 +26,14 @@
#include <plat/common.h>
#include <plat/prcm.h>
#include <plat/irqs.h>
-#include <plat/control.h>
#include "clock.h"
#include "clock2xxx.h"
#include "cm.h"
#include "prm.h"
#include "prm-regbits-24xx.h"
+#include "prm-regbits-44xx.h"
+#include "control.h"
static void __iomem *prm_base;
static void __iomem *cm_base;
@@ -118,7 +119,7 @@ struct omap3_prcm_regs {
u32 wkup_pm_wken;
};
-struct omap3_prcm_regs prcm_context;
+static struct omap3_prcm_regs prcm_context;
u32 omap_prcm_get_reset_sources(void)
{
@@ -161,8 +162,8 @@ void omap_prcm_arch_reset(char mode, const char *cmd)
prm_set_mod_reg_bits(OMAP_RST_DPLL3_MASK, prcm_offs,
OMAP2_RM_RSTCTRL);
if (cpu_is_omap44xx())
- prm_set_mod_reg_bits(OMAP_RST_DPLL3_MASK, prcm_offs,
- OMAP4_RM_RSTCTRL);
+ prm_set_mod_reg_bits(OMAP4430_RST_GLOBAL_WARM_SW_MASK,
+ prcm_offs, OMAP4_RM_RSTCTRL);
}
static inline u32 __omap_prcm_read(void __iomem *base, s16 module, u16 reg)
@@ -215,6 +216,30 @@ u32 prm_read_mod_bits_shift(s16 domain, s16 idx, u32 mask)
return v;
}
+/* Read a PRM register, AND it, and shift the result down to bit 0 */
+u32 omap4_prm_read_bits_shift(void __iomem *reg, u32 mask)
+{
+ u32 v;
+
+ v = __raw_readl(reg);
+ v &= mask;
+ v >>= __ffs(mask);
+
+ return v;
+}
+
+/* Read-modify-write a register in a PRM module. Caller must lock */
+u32 omap4_prm_rmw_reg_bits(u32 mask, u32 bits, void __iomem *reg)
+{
+ u32 v;
+
+ v = __raw_readl(reg);
+ v &= ~mask;
+ v |= bits;
+ __raw_writel(v, reg);
+
+ return v;
+}
/* Read a register in a CM module */
u32 cm_read_mod_reg(s16 module, u16 idx)
{
diff --git a/arch/arm/mach-omap2/prm-regbits-34xx.h b/arch/arm/mach-omap2/prm-regbits-34xx.h
index 7fd6023edf96..9e63cb743a97 100644
--- a/arch/arm/mach-omap2/prm-regbits-34xx.h
+++ b/arch/arm/mach-omap2/prm-regbits-34xx.h
@@ -122,6 +122,7 @@
#define OMAP3430_MEMRETSTATE_MASK (1 << 8)
/* PM_MPUGRPSEL_PER, PM_IVA2GRPSEL_PER shared bits */
+#define OMAP3630_GRPSEL_UART4_MASK (1 << 18)
#define OMAP3430_GRPSEL_GPIO6_MASK (1 << 17)
#define OMAP3430_GRPSEL_GPIO5_MASK (1 << 16)
#define OMAP3430_GRPSEL_GPIO4_MASK (1 << 15)
diff --git a/arch/arm/mach-omap2/prm-regbits-44xx.h b/arch/arm/mach-omap2/prm-regbits-44xx.h
index 597be4a2b9ff..25b19b610177 100644
--- a/arch/arm/mach-omap2/prm-regbits-44xx.h
+++ b/arch/arm/mach-omap2/prm-regbits-44xx.h
@@ -1,8 +1,8 @@
/*
* OMAP44xx Power Management register bits
*
- * Copyright (C) 2009 Texas Instruments, Inc.
- * Copyright (C) 2009 Nokia Corporation
+ * Copyright (C) 2009-2010 Texas Instruments, Inc.
+ * Copyright (C) 2009-2010 Nokia Corporation
*
* Paul Walmsley (paul@pwsan.com)
* Rajendra Nayak (rnayak@ti.com)
@@ -30,587 +30,611 @@
* PRM_LDO_SRAM_MPU_SETUP
*/
#define OMAP4430_ABBOFF_ACT_EXPORT_SHIFT 1
-#define OMAP4430_ABBOFF_ACT_EXPORT_MASK BITFIELD(1, 1)
+#define OMAP4430_ABBOFF_ACT_EXPORT_MASK (1 << 1)
/*
* Used by PRM_LDO_SRAM_CORE_SETUP, PRM_LDO_SRAM_IVA_SETUP,
* PRM_LDO_SRAM_MPU_SETUP
*/
#define OMAP4430_ABBOFF_SLEEP_EXPORT_SHIFT 2
-#define OMAP4430_ABBOFF_SLEEP_EXPORT_MASK BITFIELD(2, 2)
+#define OMAP4430_ABBOFF_SLEEP_EXPORT_MASK (1 << 2)
/* Used by PRM_IRQENABLE_DUCATI, PRM_IRQENABLE_MPU */
#define OMAP4430_ABB_IVA_DONE_EN_SHIFT 31
-#define OMAP4430_ABB_IVA_DONE_EN_MASK BITFIELD(31, 31)
+#define OMAP4430_ABB_IVA_DONE_EN_MASK (1 << 31)
/* Used by PRM_IRQSTATUS_DUCATI, PRM_IRQSTATUS_MPU */
#define OMAP4430_ABB_IVA_DONE_ST_SHIFT 31
-#define OMAP4430_ABB_IVA_DONE_ST_MASK BITFIELD(31, 31)
+#define OMAP4430_ABB_IVA_DONE_ST_MASK (1 << 31)
/* Used by PRM_IRQENABLE_MPU_2 */
#define OMAP4430_ABB_MPU_DONE_EN_SHIFT 7
-#define OMAP4430_ABB_MPU_DONE_EN_MASK BITFIELD(7, 7)
+#define OMAP4430_ABB_MPU_DONE_EN_MASK (1 << 7)
/* Used by PRM_IRQSTATUS_MPU_2 */
#define OMAP4430_ABB_MPU_DONE_ST_SHIFT 7
-#define OMAP4430_ABB_MPU_DONE_ST_MASK BITFIELD(7, 7)
+#define OMAP4430_ABB_MPU_DONE_ST_MASK (1 << 7)
/* Used by PRM_LDO_ABB_IVA_SETUP, PRM_LDO_ABB_MPU_SETUP */
#define OMAP4430_ACTIVE_FBB_SEL_SHIFT 2
-#define OMAP4430_ACTIVE_FBB_SEL_MASK BITFIELD(2, 2)
+#define OMAP4430_ACTIVE_FBB_SEL_MASK (1 << 2)
/* Used by PRM_LDO_ABB_IVA_SETUP, PRM_LDO_ABB_MPU_SETUP */
#define OMAP4430_ACTIVE_RBB_SEL_SHIFT 1
-#define OMAP4430_ACTIVE_RBB_SEL_MASK BITFIELD(1, 1)
+#define OMAP4430_ACTIVE_RBB_SEL_MASK (1 << 1)
/* Used by PM_ABE_PWRSTCTRL */
#define OMAP4430_AESSMEM_ONSTATE_SHIFT 16
-#define OMAP4430_AESSMEM_ONSTATE_MASK BITFIELD(16, 17)
+#define OMAP4430_AESSMEM_ONSTATE_MASK (0x3 << 16)
/* Used by PM_ABE_PWRSTCTRL */
#define OMAP4430_AESSMEM_RETSTATE_SHIFT 8
-#define OMAP4430_AESSMEM_RETSTATE_MASK BITFIELD(8, 8)
+#define OMAP4430_AESSMEM_RETSTATE_MASK (1 << 8)
/* Used by PM_ABE_PWRSTST */
#define OMAP4430_AESSMEM_STATEST_SHIFT 4
-#define OMAP4430_AESSMEM_STATEST_MASK BITFIELD(4, 5)
+#define OMAP4430_AESSMEM_STATEST_MASK (0x3 << 4)
/*
* Used by PRM_LDO_SRAM_CORE_SETUP, PRM_LDO_SRAM_IVA_SETUP,
* PRM_LDO_SRAM_MPU_SETUP
*/
#define OMAP4430_AIPOFF_SHIFT 8
-#define OMAP4430_AIPOFF_MASK BITFIELD(8, 8)
+#define OMAP4430_AIPOFF_MASK (1 << 8)
/* Used by PRM_VOLTCTRL */
#define OMAP4430_AUTO_CTRL_VDD_CORE_L_SHIFT 0
-#define OMAP4430_AUTO_CTRL_VDD_CORE_L_MASK BITFIELD(0, 1)
+#define OMAP4430_AUTO_CTRL_VDD_CORE_L_MASK (0x3 << 0)
/* Used by PRM_VOLTCTRL */
#define OMAP4430_AUTO_CTRL_VDD_IVA_L_SHIFT 4
-#define OMAP4430_AUTO_CTRL_VDD_IVA_L_MASK BITFIELD(4, 5)
+#define OMAP4430_AUTO_CTRL_VDD_IVA_L_MASK (0x3 << 4)
/* Used by PRM_VOLTCTRL */
#define OMAP4430_AUTO_CTRL_VDD_MPU_L_SHIFT 2
-#define OMAP4430_AUTO_CTRL_VDD_MPU_L_MASK BITFIELD(2, 3)
+#define OMAP4430_AUTO_CTRL_VDD_MPU_L_MASK (0x3 << 2)
+
+/* Used by PRM_VC_ERRST */
+#define OMAP4430_BYPS_RA_ERR_SHIFT 25
+#define OMAP4430_BYPS_RA_ERR_MASK (1 << 25)
+
+/* Used by PRM_VC_ERRST */
+#define OMAP4430_BYPS_SA_ERR_SHIFT 24
+#define OMAP4430_BYPS_SA_ERR_MASK (1 << 24)
+
+/* Used by PRM_VC_ERRST */
+#define OMAP4430_BYPS_TIMEOUT_ERR_SHIFT 26
+#define OMAP4430_BYPS_TIMEOUT_ERR_MASK (1 << 26)
+
+/* Used by PRM_RSTST */
+#define OMAP4430_C2C_RST_SHIFT 10
+#define OMAP4430_C2C_RST_MASK (1 << 10)
/* Used by PM_CAM_PWRSTCTRL */
#define OMAP4430_CAM_MEM_ONSTATE_SHIFT 16
-#define OMAP4430_CAM_MEM_ONSTATE_MASK BITFIELD(16, 17)
+#define OMAP4430_CAM_MEM_ONSTATE_MASK (0x3 << 16)
/* Used by PM_CAM_PWRSTST */
#define OMAP4430_CAM_MEM_STATEST_SHIFT 4
-#define OMAP4430_CAM_MEM_STATEST_MASK BITFIELD(4, 5)
+#define OMAP4430_CAM_MEM_STATEST_MASK (0x3 << 4)
/* Used by PRM_CLKREQCTRL */
#define OMAP4430_CLKREQ_COND_SHIFT 0
-#define OMAP4430_CLKREQ_COND_MASK BITFIELD(0, 2)
+#define OMAP4430_CLKREQ_COND_MASK (0x7 << 0)
/* Used by PRM_VC_VAL_SMPS_RA_CMD */
#define OMAP4430_CMDRA_VDD_CORE_L_SHIFT 0
-#define OMAP4430_CMDRA_VDD_CORE_L_MASK BITFIELD(0, 7)
+#define OMAP4430_CMDRA_VDD_CORE_L_MASK (0xff << 0)
/* Used by PRM_VC_VAL_SMPS_RA_CMD */
#define OMAP4430_CMDRA_VDD_IVA_L_SHIFT 8
-#define OMAP4430_CMDRA_VDD_IVA_L_MASK BITFIELD(8, 15)
+#define OMAP4430_CMDRA_VDD_IVA_L_MASK (0xff << 8)
/* Used by PRM_VC_VAL_SMPS_RA_CMD */
#define OMAP4430_CMDRA_VDD_MPU_L_SHIFT 16
-#define OMAP4430_CMDRA_VDD_MPU_L_MASK BITFIELD(16, 23)
+#define OMAP4430_CMDRA_VDD_MPU_L_MASK (0xff << 16)
/* Used by PRM_VC_CFG_CHANNEL */
#define OMAP4430_CMD_VDD_CORE_L_SHIFT 4
-#define OMAP4430_CMD_VDD_CORE_L_MASK BITFIELD(4, 4)
+#define OMAP4430_CMD_VDD_CORE_L_MASK (1 << 4)
/* Used by PRM_VC_CFG_CHANNEL */
#define OMAP4430_CMD_VDD_IVA_L_SHIFT 12
-#define OMAP4430_CMD_VDD_IVA_L_MASK BITFIELD(12, 12)
+#define OMAP4430_CMD_VDD_IVA_L_MASK (1 << 12)
/* Used by PRM_VC_CFG_CHANNEL */
#define OMAP4430_CMD_VDD_MPU_L_SHIFT 17
-#define OMAP4430_CMD_VDD_MPU_L_MASK BITFIELD(17, 17)
+#define OMAP4430_CMD_VDD_MPU_L_MASK (1 << 17)
/* Used by PM_CORE_PWRSTCTRL */
#define OMAP4430_CORE_OCMRAM_ONSTATE_SHIFT 18
-#define OMAP4430_CORE_OCMRAM_ONSTATE_MASK BITFIELD(18, 19)
+#define OMAP4430_CORE_OCMRAM_ONSTATE_MASK (0x3 << 18)
/* Used by PM_CORE_PWRSTCTRL */
#define OMAP4430_CORE_OCMRAM_RETSTATE_SHIFT 9
-#define OMAP4430_CORE_OCMRAM_RETSTATE_MASK BITFIELD(9, 9)
+#define OMAP4430_CORE_OCMRAM_RETSTATE_MASK (1 << 9)
/* Used by PM_CORE_PWRSTST */
#define OMAP4430_CORE_OCMRAM_STATEST_SHIFT 6
-#define OMAP4430_CORE_OCMRAM_STATEST_MASK BITFIELD(6, 7)
+#define OMAP4430_CORE_OCMRAM_STATEST_MASK (0x3 << 6)
/* Used by PM_CORE_PWRSTCTRL */
#define OMAP4430_CORE_OTHER_BANK_ONSTATE_SHIFT 16
-#define OMAP4430_CORE_OTHER_BANK_ONSTATE_MASK BITFIELD(16, 17)
+#define OMAP4430_CORE_OTHER_BANK_ONSTATE_MASK (0x3 << 16)
/* Used by PM_CORE_PWRSTCTRL */
#define OMAP4430_CORE_OTHER_BANK_RETSTATE_SHIFT 8
-#define OMAP4430_CORE_OTHER_BANK_RETSTATE_MASK BITFIELD(8, 8)
+#define OMAP4430_CORE_OTHER_BANK_RETSTATE_MASK (1 << 8)
/* Used by PM_CORE_PWRSTST */
#define OMAP4430_CORE_OTHER_BANK_STATEST_SHIFT 4
-#define OMAP4430_CORE_OTHER_BANK_STATEST_MASK BITFIELD(4, 5)
+#define OMAP4430_CORE_OTHER_BANK_STATEST_MASK (0x3 << 4)
+
+/* Used by REVISION_PRM */
+#define OMAP4430_CUSTOM_SHIFT 6
+#define OMAP4430_CUSTOM_MASK (0x3 << 6)
/* Used by PRM_VC_VAL_BYPASS */
#define OMAP4430_DATA_SHIFT 16
-#define OMAP4430_DATA_MASK BITFIELD(16, 23)
+#define OMAP4430_DATA_MASK (0xff << 16)
/* Used by PRM_DEVICE_OFF_CTRL */
#define OMAP4430_DEVICE_OFF_ENABLE_SHIFT 0
-#define OMAP4430_DEVICE_OFF_ENABLE_MASK BITFIELD(0, 0)
+#define OMAP4430_DEVICE_OFF_ENABLE_MASK (1 << 0)
/* Used by PRM_VC_CFG_I2C_MODE */
#define OMAP4430_DFILTEREN_SHIFT 6
-#define OMAP4430_DFILTEREN_MASK BITFIELD(6, 6)
+#define OMAP4430_DFILTEREN_MASK (1 << 6)
-/* Used by PRM_IRQENABLE_MPU, PRM_IRQENABLE_TESLA */
+/*
+ * Used by PRM_LDO_SRAM_CORE_SETUP, PRM_LDO_SRAM_IVA_SETUP,
+ * PRM_LDO_SRAM_MPU_SETUP, PRM_SRAM_WKUP_SETUP
+ */
+#define OMAP4430_DISABLE_RTA_EXPORT_SHIFT 0
+#define OMAP4430_DISABLE_RTA_EXPORT_MASK (1 << 0)
+
+/* Used by PRM_IRQENABLE_DUCATI, PRM_IRQENABLE_MPU, PRM_IRQENABLE_TESLA */
#define OMAP4430_DPLL_ABE_RECAL_EN_SHIFT 4
-#define OMAP4430_DPLL_ABE_RECAL_EN_MASK BITFIELD(4, 4)
+#define OMAP4430_DPLL_ABE_RECAL_EN_MASK (1 << 4)
-/* Used by PRM_IRQSTATUS_MPU, PRM_IRQSTATUS_TESLA */
+/* Used by PRM_IRQSTATUS_DUCATI, PRM_IRQSTATUS_MPU, PRM_IRQSTATUS_TESLA */
#define OMAP4430_DPLL_ABE_RECAL_ST_SHIFT 4
-#define OMAP4430_DPLL_ABE_RECAL_ST_MASK BITFIELD(4, 4)
+#define OMAP4430_DPLL_ABE_RECAL_ST_MASK (1 << 4)
/* Used by PRM_IRQENABLE_DUCATI, PRM_IRQENABLE_MPU */
#define OMAP4430_DPLL_CORE_RECAL_EN_SHIFT 0
-#define OMAP4430_DPLL_CORE_RECAL_EN_MASK BITFIELD(0, 0)
+#define OMAP4430_DPLL_CORE_RECAL_EN_MASK (1 << 0)
/* Used by PRM_IRQSTATUS_DUCATI, PRM_IRQSTATUS_MPU */
#define OMAP4430_DPLL_CORE_RECAL_ST_SHIFT 0
-#define OMAP4430_DPLL_CORE_RECAL_ST_MASK BITFIELD(0, 0)
+#define OMAP4430_DPLL_CORE_RECAL_ST_MASK (1 << 0)
/* Used by PRM_IRQENABLE_MPU */
#define OMAP4430_DPLL_DDRPHY_RECAL_EN_SHIFT 6
-#define OMAP4430_DPLL_DDRPHY_RECAL_EN_MASK BITFIELD(6, 6)
+#define OMAP4430_DPLL_DDRPHY_RECAL_EN_MASK (1 << 6)
/* Used by PRM_IRQSTATUS_MPU */
#define OMAP4430_DPLL_DDRPHY_RECAL_ST_SHIFT 6
-#define OMAP4430_DPLL_DDRPHY_RECAL_ST_MASK BITFIELD(6, 6)
+#define OMAP4430_DPLL_DDRPHY_RECAL_ST_MASK (1 << 6)
/* Used by PRM_IRQENABLE_DUCATI, PRM_IRQENABLE_MPU, PRM_IRQENABLE_TESLA */
#define OMAP4430_DPLL_IVA_RECAL_EN_SHIFT 2
-#define OMAP4430_DPLL_IVA_RECAL_EN_MASK BITFIELD(2, 2)
+#define OMAP4430_DPLL_IVA_RECAL_EN_MASK (1 << 2)
/* Used by PRM_IRQSTATUS_DUCATI, PRM_IRQSTATUS_MPU, PRM_IRQSTATUS_TESLA */
#define OMAP4430_DPLL_IVA_RECAL_ST_SHIFT 2
-#define OMAP4430_DPLL_IVA_RECAL_ST_MASK BITFIELD(2, 2)
+#define OMAP4430_DPLL_IVA_RECAL_ST_MASK (1 << 2)
/* Used by PRM_IRQENABLE_MPU */
#define OMAP4430_DPLL_MPU_RECAL_EN_SHIFT 1
-#define OMAP4430_DPLL_MPU_RECAL_EN_MASK BITFIELD(1, 1)
+#define OMAP4430_DPLL_MPU_RECAL_EN_MASK (1 << 1)
/* Used by PRM_IRQSTATUS_MPU */
#define OMAP4430_DPLL_MPU_RECAL_ST_SHIFT 1
-#define OMAP4430_DPLL_MPU_RECAL_ST_MASK BITFIELD(1, 1)
+#define OMAP4430_DPLL_MPU_RECAL_ST_MASK (1 << 1)
/* Used by PRM_IRQENABLE_DUCATI, PRM_IRQENABLE_MPU */
#define OMAP4430_DPLL_PER_RECAL_EN_SHIFT 3
-#define OMAP4430_DPLL_PER_RECAL_EN_MASK BITFIELD(3, 3)
+#define OMAP4430_DPLL_PER_RECAL_EN_MASK (1 << 3)
/* Used by PRM_IRQSTATUS_DUCATI, PRM_IRQSTATUS_MPU */
#define OMAP4430_DPLL_PER_RECAL_ST_SHIFT 3
-#define OMAP4430_DPLL_PER_RECAL_ST_MASK BITFIELD(3, 3)
+#define OMAP4430_DPLL_PER_RECAL_ST_MASK (1 << 3)
/* Used by PRM_IRQENABLE_DUCATI, PRM_IRQENABLE_MPU */
#define OMAP4430_DPLL_UNIPRO_RECAL_EN_SHIFT 7
-#define OMAP4430_DPLL_UNIPRO_RECAL_EN_MASK BITFIELD(7, 7)
+#define OMAP4430_DPLL_UNIPRO_RECAL_EN_MASK (1 << 7)
/* Used by PRM_IRQSTATUS_DUCATI, PRM_IRQSTATUS_MPU */
#define OMAP4430_DPLL_UNIPRO_RECAL_ST_SHIFT 7
-#define OMAP4430_DPLL_UNIPRO_RECAL_ST_MASK BITFIELD(7, 7)
-
-/* Used by PRM_IRQENABLE_MPU */
-#define OMAP4430_DPLL_USB_RECAL_EN_SHIFT 5
-#define OMAP4430_DPLL_USB_RECAL_EN_MASK BITFIELD(5, 5)
-
-/* Used by PRM_IRQSTATUS_MPU */
-#define OMAP4430_DPLL_USB_RECAL_ST_SHIFT 5
-#define OMAP4430_DPLL_USB_RECAL_ST_MASK BITFIELD(5, 5)
+#define OMAP4430_DPLL_UNIPRO_RECAL_ST_MASK (1 << 7)
/* Used by PM_DSS_PWRSTCTRL */
#define OMAP4430_DSS_MEM_ONSTATE_SHIFT 16
-#define OMAP4430_DSS_MEM_ONSTATE_MASK BITFIELD(16, 17)
+#define OMAP4430_DSS_MEM_ONSTATE_MASK (0x3 << 16)
/* Used by PM_DSS_PWRSTCTRL */
#define OMAP4430_DSS_MEM_RETSTATE_SHIFT 8
-#define OMAP4430_DSS_MEM_RETSTATE_MASK BITFIELD(8, 8)
+#define OMAP4430_DSS_MEM_RETSTATE_MASK (1 << 8)
/* Used by PM_DSS_PWRSTST */
#define OMAP4430_DSS_MEM_STATEST_SHIFT 4
-#define OMAP4430_DSS_MEM_STATEST_MASK BITFIELD(4, 5)
+#define OMAP4430_DSS_MEM_STATEST_MASK (0x3 << 4)
/* Used by PM_CORE_PWRSTCTRL */
#define OMAP4430_DUCATI_L2RAM_ONSTATE_SHIFT 20
-#define OMAP4430_DUCATI_L2RAM_ONSTATE_MASK BITFIELD(20, 21)
+#define OMAP4430_DUCATI_L2RAM_ONSTATE_MASK (0x3 << 20)
/* Used by PM_CORE_PWRSTCTRL */
#define OMAP4430_DUCATI_L2RAM_RETSTATE_SHIFT 10
-#define OMAP4430_DUCATI_L2RAM_RETSTATE_MASK BITFIELD(10, 10)
+#define OMAP4430_DUCATI_L2RAM_RETSTATE_MASK (1 << 10)
/* Used by PM_CORE_PWRSTST */
#define OMAP4430_DUCATI_L2RAM_STATEST_SHIFT 8
-#define OMAP4430_DUCATI_L2RAM_STATEST_MASK BITFIELD(8, 9)
+#define OMAP4430_DUCATI_L2RAM_STATEST_MASK (0x3 << 8)
/* Used by PM_CORE_PWRSTCTRL */
#define OMAP4430_DUCATI_UNICACHE_ONSTATE_SHIFT 22
-#define OMAP4430_DUCATI_UNICACHE_ONSTATE_MASK BITFIELD(22, 23)
+#define OMAP4430_DUCATI_UNICACHE_ONSTATE_MASK (0x3 << 22)
/* Used by PM_CORE_PWRSTCTRL */
#define OMAP4430_DUCATI_UNICACHE_RETSTATE_SHIFT 11
-#define OMAP4430_DUCATI_UNICACHE_RETSTATE_MASK BITFIELD(11, 11)
+#define OMAP4430_DUCATI_UNICACHE_RETSTATE_MASK (1 << 11)
/* Used by PM_CORE_PWRSTST */
#define OMAP4430_DUCATI_UNICACHE_STATEST_SHIFT 10
-#define OMAP4430_DUCATI_UNICACHE_STATEST_MASK BITFIELD(10, 11)
+#define OMAP4430_DUCATI_UNICACHE_STATEST_MASK (0x3 << 10)
/* Used by RM_MPU_RSTST */
#define OMAP4430_EMULATION_RST_SHIFT 0
-#define OMAP4430_EMULATION_RST_MASK BITFIELD(0, 0)
+#define OMAP4430_EMULATION_RST_MASK (1 << 0)
/* Used by RM_DUCATI_RSTST */
#define OMAP4430_EMULATION_RST1ST_SHIFT 3
-#define OMAP4430_EMULATION_RST1ST_MASK BITFIELD(3, 3)
+#define OMAP4430_EMULATION_RST1ST_MASK (1 << 3)
/* Used by RM_DUCATI_RSTST */
#define OMAP4430_EMULATION_RST2ST_SHIFT 4
-#define OMAP4430_EMULATION_RST2ST_MASK BITFIELD(4, 4)
+#define OMAP4430_EMULATION_RST2ST_MASK (1 << 4)
/* Used by RM_IVAHD_RSTST */
#define OMAP4430_EMULATION_SEQ1_RST1ST_SHIFT 3
-#define OMAP4430_EMULATION_SEQ1_RST1ST_MASK BITFIELD(3, 3)
+#define OMAP4430_EMULATION_SEQ1_RST1ST_MASK (1 << 3)
/* Used by RM_IVAHD_RSTST */
#define OMAP4430_EMULATION_SEQ2_RST2ST_SHIFT 4
-#define OMAP4430_EMULATION_SEQ2_RST2ST_MASK BITFIELD(4, 4)
+#define OMAP4430_EMULATION_SEQ2_RST2ST_MASK (1 << 4)
/* Used by PM_EMU_PWRSTCTRL */
#define OMAP4430_EMU_BANK_ONSTATE_SHIFT 16
-#define OMAP4430_EMU_BANK_ONSTATE_MASK BITFIELD(16, 17)
+#define OMAP4430_EMU_BANK_ONSTATE_MASK (0x3 << 16)
/* Used by PM_EMU_PWRSTST */
#define OMAP4430_EMU_BANK_STATEST_SHIFT 4
-#define OMAP4430_EMU_BANK_STATEST_MASK BITFIELD(4, 5)
-
-/*
- * Used by PRM_LDO_SRAM_CORE_SETUP, PRM_LDO_SRAM_IVA_SETUP,
- * PRM_LDO_SRAM_MPU_SETUP, PRM_SRAM_WKUP_SETUP
- */
-#define OMAP4430_ENABLE_RTA_EXPORT_SHIFT 0
-#define OMAP4430_ENABLE_RTA_EXPORT_MASK BITFIELD(0, 0)
+#define OMAP4430_EMU_BANK_STATEST_MASK (0x3 << 4)
/*
* Used by PRM_LDO_SRAM_CORE_SETUP, PRM_LDO_SRAM_IVA_SETUP,
* PRM_LDO_SRAM_MPU_SETUP
*/
-#define OMAP4430_ENFUNC1_SHIFT 3
-#define OMAP4430_ENFUNC1_MASK BITFIELD(3, 3)
+#define OMAP4430_ENFUNC1_EXPORT_SHIFT 3
+#define OMAP4430_ENFUNC1_EXPORT_MASK (1 << 3)
/*
* Used by PRM_LDO_SRAM_CORE_SETUP, PRM_LDO_SRAM_IVA_SETUP,
* PRM_LDO_SRAM_MPU_SETUP
*/
-#define OMAP4430_ENFUNC3_SHIFT 5
-#define OMAP4430_ENFUNC3_MASK BITFIELD(5, 5)
+#define OMAP4430_ENFUNC3_EXPORT_SHIFT 5
+#define OMAP4430_ENFUNC3_EXPORT_MASK (1 << 5)
/*
* Used by PRM_LDO_SRAM_CORE_SETUP, PRM_LDO_SRAM_IVA_SETUP,
* PRM_LDO_SRAM_MPU_SETUP
*/
#define OMAP4430_ENFUNC4_SHIFT 6
-#define OMAP4430_ENFUNC4_MASK BITFIELD(6, 6)
+#define OMAP4430_ENFUNC4_MASK (1 << 6)
/*
* Used by PRM_LDO_SRAM_CORE_SETUP, PRM_LDO_SRAM_IVA_SETUP,
* PRM_LDO_SRAM_MPU_SETUP
*/
#define OMAP4430_ENFUNC5_SHIFT 7
-#define OMAP4430_ENFUNC5_MASK BITFIELD(7, 7)
+#define OMAP4430_ENFUNC5_MASK (1 << 7)
/* Used by PRM_VP_CORE_CONFIG, PRM_VP_IVA_CONFIG, PRM_VP_MPU_CONFIG */
#define OMAP4430_ERRORGAIN_SHIFT 16
-#define OMAP4430_ERRORGAIN_MASK BITFIELD(16, 23)
+#define OMAP4430_ERRORGAIN_MASK (0xff << 16)
/* Used by PRM_VP_CORE_CONFIG, PRM_VP_IVA_CONFIG, PRM_VP_MPU_CONFIG */
#define OMAP4430_ERROROFFSET_SHIFT 24
-#define OMAP4430_ERROROFFSET_MASK BITFIELD(24, 31)
+#define OMAP4430_ERROROFFSET_MASK (0xff << 24)
/* Used by PRM_RSTST */
#define OMAP4430_EXTERNAL_WARM_RST_SHIFT 5
-#define OMAP4430_EXTERNAL_WARM_RST_MASK BITFIELD(5, 5)
+#define OMAP4430_EXTERNAL_WARM_RST_MASK (1 << 5)
/* Used by PRM_VP_CORE_CONFIG, PRM_VP_IVA_CONFIG, PRM_VP_MPU_CONFIG */
#define OMAP4430_FORCEUPDATE_SHIFT 1
-#define OMAP4430_FORCEUPDATE_MASK BITFIELD(1, 1)
+#define OMAP4430_FORCEUPDATE_MASK (1 << 1)
/* Used by PRM_VP_CORE_VOLTAGE, PRM_VP_IVA_VOLTAGE, PRM_VP_MPU_VOLTAGE */
#define OMAP4430_FORCEUPDATEWAIT_SHIFT 8
-#define OMAP4430_FORCEUPDATEWAIT_MASK BITFIELD(8, 31)
+#define OMAP4430_FORCEUPDATEWAIT_MASK (0xffffff << 8)
/* Used by PRM_IRQENABLE_DUCATI, PRM_IRQENABLE_TESLA */
#define OMAP4430_FORCEWKUP_EN_SHIFT 10
-#define OMAP4430_FORCEWKUP_EN_MASK BITFIELD(10, 10)
+#define OMAP4430_FORCEWKUP_EN_MASK (1 << 10)
/* Used by PRM_IRQSTATUS_DUCATI, PRM_IRQSTATUS_TESLA */
#define OMAP4430_FORCEWKUP_ST_SHIFT 10
-#define OMAP4430_FORCEWKUP_ST_MASK BITFIELD(10, 10)
+#define OMAP4430_FORCEWKUP_ST_MASK (1 << 10)
+
+/* Used by REVISION_PRM */
+#define OMAP4430_FUNC_SHIFT 16
+#define OMAP4430_FUNC_MASK (0xfff << 16)
/* Used by PM_GFX_PWRSTCTRL */
#define OMAP4430_GFX_MEM_ONSTATE_SHIFT 16
-#define OMAP4430_GFX_MEM_ONSTATE_MASK BITFIELD(16, 17)
+#define OMAP4430_GFX_MEM_ONSTATE_MASK (0x3 << 16)
/* Used by PM_GFX_PWRSTST */
#define OMAP4430_GFX_MEM_STATEST_SHIFT 4
-#define OMAP4430_GFX_MEM_STATEST_MASK BITFIELD(4, 5)
+#define OMAP4430_GFX_MEM_STATEST_MASK (0x3 << 4)
/* Used by PRM_RSTST */
#define OMAP4430_GLOBAL_COLD_RST_SHIFT 0
-#define OMAP4430_GLOBAL_COLD_RST_MASK BITFIELD(0, 0)
+#define OMAP4430_GLOBAL_COLD_RST_MASK (1 << 0)
/* Used by PRM_RSTST */
#define OMAP4430_GLOBAL_WARM_SW_RST_SHIFT 1
-#define OMAP4430_GLOBAL_WARM_SW_RST_MASK BITFIELD(1, 1)
+#define OMAP4430_GLOBAL_WARM_SW_RST_MASK (1 << 1)
/* Used by PRM_IO_PMCTRL */
#define OMAP4430_GLOBAL_WUEN_SHIFT 16
-#define OMAP4430_GLOBAL_WUEN_MASK BITFIELD(16, 16)
+#define OMAP4430_GLOBAL_WUEN_MASK (1 << 16)
/* Used by PRM_VC_CFG_I2C_MODE */
#define OMAP4430_HSMCODE_SHIFT 0
-#define OMAP4430_HSMCODE_MASK BITFIELD(0, 2)
+#define OMAP4430_HSMCODE_MASK (0x7 << 0)
/* Used by PRM_VC_CFG_I2C_MODE */
#define OMAP4430_HSMODEEN_SHIFT 3
-#define OMAP4430_HSMODEEN_MASK BITFIELD(3, 3)
+#define OMAP4430_HSMODEEN_MASK (1 << 3)
/* Used by PRM_VC_CFG_I2C_CLK */
#define OMAP4430_HSSCLH_SHIFT 16
-#define OMAP4430_HSSCLH_MASK BITFIELD(16, 23)
+#define OMAP4430_HSSCLH_MASK (0xff << 16)
/* Used by PRM_VC_CFG_I2C_CLK */
#define OMAP4430_HSSCLL_SHIFT 24
-#define OMAP4430_HSSCLL_MASK BITFIELD(24, 31)
+#define OMAP4430_HSSCLL_MASK (0xff << 24)
/* Used by PM_IVAHD_PWRSTCTRL */
#define OMAP4430_HWA_MEM_ONSTATE_SHIFT 16
-#define OMAP4430_HWA_MEM_ONSTATE_MASK BITFIELD(16, 17)
+#define OMAP4430_HWA_MEM_ONSTATE_MASK (0x3 << 16)
/* Used by PM_IVAHD_PWRSTCTRL */
#define OMAP4430_HWA_MEM_RETSTATE_SHIFT 8
-#define OMAP4430_HWA_MEM_RETSTATE_MASK BITFIELD(8, 8)
+#define OMAP4430_HWA_MEM_RETSTATE_MASK (1 << 8)
/* Used by PM_IVAHD_PWRSTST */
#define OMAP4430_HWA_MEM_STATEST_SHIFT 4
-#define OMAP4430_HWA_MEM_STATEST_MASK BITFIELD(4, 5)
+#define OMAP4430_HWA_MEM_STATEST_MASK (0x3 << 4)
/* Used by RM_MPU_RSTST */
#define OMAP4430_ICECRUSHER_MPU_RST_SHIFT 1
-#define OMAP4430_ICECRUSHER_MPU_RST_MASK BITFIELD(1, 1)
+#define OMAP4430_ICECRUSHER_MPU_RST_MASK (1 << 1)
/* Used by RM_DUCATI_RSTST */
#define OMAP4430_ICECRUSHER_RST1ST_SHIFT 5
-#define OMAP4430_ICECRUSHER_RST1ST_MASK BITFIELD(5, 5)
+#define OMAP4430_ICECRUSHER_RST1ST_MASK (1 << 5)
/* Used by RM_DUCATI_RSTST */
#define OMAP4430_ICECRUSHER_RST2ST_SHIFT 6
-#define OMAP4430_ICECRUSHER_RST2ST_MASK BITFIELD(6, 6)
+#define OMAP4430_ICECRUSHER_RST2ST_MASK (1 << 6)
/* Used by RM_IVAHD_RSTST */
#define OMAP4430_ICECRUSHER_SEQ1_RST1ST_SHIFT 5
-#define OMAP4430_ICECRUSHER_SEQ1_RST1ST_MASK BITFIELD(5, 5)
+#define OMAP4430_ICECRUSHER_SEQ1_RST1ST_MASK (1 << 5)
/* Used by RM_IVAHD_RSTST */
#define OMAP4430_ICECRUSHER_SEQ2_RST2ST_SHIFT 6
-#define OMAP4430_ICECRUSHER_SEQ2_RST2ST_MASK BITFIELD(6, 6)
+#define OMAP4430_ICECRUSHER_SEQ2_RST2ST_MASK (1 << 6)
/* Used by PRM_RSTST */
#define OMAP4430_ICEPICK_RST_SHIFT 9
-#define OMAP4430_ICEPICK_RST_MASK BITFIELD(9, 9)
+#define OMAP4430_ICEPICK_RST_MASK (1 << 9)
/* Used by PRM_VP_CORE_CONFIG, PRM_VP_IVA_CONFIG, PRM_VP_MPU_CONFIG */
#define OMAP4430_INITVDD_SHIFT 2
-#define OMAP4430_INITVDD_MASK BITFIELD(2, 2)
+#define OMAP4430_INITVDD_MASK (1 << 2)
/* Used by PRM_VP_CORE_CONFIG, PRM_VP_IVA_CONFIG, PRM_VP_MPU_CONFIG */
#define OMAP4430_INITVOLTAGE_SHIFT 8
-#define OMAP4430_INITVOLTAGE_MASK BITFIELD(8, 15)
+#define OMAP4430_INITVOLTAGE_MASK (0xff << 8)
/*
- * Used by PM_EMU_PWRSTST, PM_CORE_PWRSTST, PM_CAM_PWRSTST, PM_L3INIT_PWRSTST,
- * PM_ABE_PWRSTST, PM_GFX_PWRSTST, PM_MPU_PWRSTST, PM_CEFUSE_PWRSTST,
- * PM_DSS_PWRSTST, PM_L4PER_PWRSTST, PM_TESLA_PWRSTST, PM_IVAHD_PWRSTST
+ * Used by PM_ABE_PWRSTST, PM_CAM_PWRSTST, PM_CEFUSE_PWRSTST, PM_CORE_PWRSTST,
+ * PM_DSS_PWRSTST, PM_EMU_PWRSTST, PM_GFX_PWRSTST, PM_IVAHD_PWRSTST,
+ * PM_L3INIT_PWRSTST, PM_L4PER_PWRSTST, PM_MPU_PWRSTST, PM_TESLA_PWRSTST
*/
#define OMAP4430_INTRANSITION_SHIFT 20
-#define OMAP4430_INTRANSITION_MASK BITFIELD(20, 20)
+#define OMAP4430_INTRANSITION_MASK (1 << 20)
/* Used by PRM_IRQENABLE_DUCATI, PRM_IRQENABLE_MPU */
#define OMAP4430_IO_EN_SHIFT 9
-#define OMAP4430_IO_EN_MASK BITFIELD(9, 9)
+#define OMAP4430_IO_EN_MASK (1 << 9)
/* Used by PRM_IO_PMCTRL */
#define OMAP4430_IO_ON_STATUS_SHIFT 5
-#define OMAP4430_IO_ON_STATUS_MASK BITFIELD(5, 5)
+#define OMAP4430_IO_ON_STATUS_MASK (1 << 5)
/* Used by PRM_IRQSTATUS_DUCATI, PRM_IRQSTATUS_MPU */
#define OMAP4430_IO_ST_SHIFT 9
-#define OMAP4430_IO_ST_MASK BITFIELD(9, 9)
+#define OMAP4430_IO_ST_MASK (1 << 9)
/* Used by PRM_IO_PMCTRL */
#define OMAP4430_ISOCLK_OVERRIDE_SHIFT 0
-#define OMAP4430_ISOCLK_OVERRIDE_MASK BITFIELD(0, 0)
+#define OMAP4430_ISOCLK_OVERRIDE_MASK (1 << 0)
/* Used by PRM_IO_PMCTRL */
#define OMAP4430_ISOCLK_STATUS_SHIFT 1
-#define OMAP4430_ISOCLK_STATUS_MASK BITFIELD(1, 1)
+#define OMAP4430_ISOCLK_STATUS_MASK (1 << 1)
/* Used by PRM_IO_PMCTRL */
#define OMAP4430_ISOOVR_EXTEND_SHIFT 4
-#define OMAP4430_ISOOVR_EXTEND_MASK BITFIELD(4, 4)
+#define OMAP4430_ISOOVR_EXTEND_MASK (1 << 4)
/* Used by PRM_IO_COUNT */
#define OMAP4430_ISO_2_ON_TIME_SHIFT 0
-#define OMAP4430_ISO_2_ON_TIME_MASK BITFIELD(0, 7)
+#define OMAP4430_ISO_2_ON_TIME_MASK (0xff << 0)
/* Used by PM_L3INIT_PWRSTCTRL */
#define OMAP4430_L3INIT_BANK1_ONSTATE_SHIFT 16
-#define OMAP4430_L3INIT_BANK1_ONSTATE_MASK BITFIELD(16, 17)
+#define OMAP4430_L3INIT_BANK1_ONSTATE_MASK (0x3 << 16)
/* Used by PM_L3INIT_PWRSTCTRL */
#define OMAP4430_L3INIT_BANK1_RETSTATE_SHIFT 8
-#define OMAP4430_L3INIT_BANK1_RETSTATE_MASK BITFIELD(8, 8)
+#define OMAP4430_L3INIT_BANK1_RETSTATE_MASK (1 << 8)
/* Used by PM_L3INIT_PWRSTST */
#define OMAP4430_L3INIT_BANK1_STATEST_SHIFT 4
-#define OMAP4430_L3INIT_BANK1_STATEST_MASK BITFIELD(4, 5)
+#define OMAP4430_L3INIT_BANK1_STATEST_MASK (0x3 << 4)
+
+/*
+ * Used by PM_ABE_PWRSTST, PM_CORE_PWRSTST, PM_IVAHD_PWRSTST,
+ * PM_L3INIT_PWRSTST, PM_L4PER_PWRSTST, PM_MPU_PWRSTST, PM_TESLA_PWRSTST
+ */
+#define OMAP4430_LASTPOWERSTATEENTERED_SHIFT 24
+#define OMAP4430_LASTPOWERSTATEENTERED_MASK (0x3 << 24)
/*
- * Used by PM_CORE_PWRSTCTRL, PM_L3INIT_PWRSTCTRL, PM_ABE_PWRSTCTRL,
- * PM_MPU_PWRSTCTRL, PM_DSS_PWRSTCTRL, PM_L4PER_PWRSTCTRL, PM_TESLA_PWRSTCTRL,
- * PM_IVAHD_PWRSTCTRL
+ * Used by PM_ABE_PWRSTCTRL, PM_CORE_PWRSTCTRL, PM_DSS_PWRSTCTRL,
+ * PM_IVAHD_PWRSTCTRL, PM_L3INIT_PWRSTCTRL, PM_L4PER_PWRSTCTRL,
+ * PM_MPU_PWRSTCTRL, PM_TESLA_PWRSTCTRL
*/
#define OMAP4430_LOGICRETSTATE_SHIFT 2
-#define OMAP4430_LOGICRETSTATE_MASK BITFIELD(2, 2)
+#define OMAP4430_LOGICRETSTATE_MASK (1 << 2)
/*
- * Used by PM_EMU_PWRSTST, PM_CORE_PWRSTST, PM_CAM_PWRSTST, PM_L3INIT_PWRSTST,
- * PM_ABE_PWRSTST, PM_GFX_PWRSTST, PM_MPU_PWRSTST, PM_CEFUSE_PWRSTST,
- * PM_DSS_PWRSTST, PM_L4PER_PWRSTST, PM_TESLA_PWRSTST, PM_IVAHD_PWRSTST
+ * Used by PM_ABE_PWRSTST, PM_CAM_PWRSTST, PM_CEFUSE_PWRSTST, PM_CORE_PWRSTST,
+ * PM_DSS_PWRSTST, PM_EMU_PWRSTST, PM_GFX_PWRSTST, PM_IVAHD_PWRSTST,
+ * PM_L3INIT_PWRSTST, PM_L4PER_PWRSTST, PM_MPU_PWRSTST, PM_TESLA_PWRSTST
*/
#define OMAP4430_LOGICSTATEST_SHIFT 2
-#define OMAP4430_LOGICSTATEST_MASK BITFIELD(2, 2)
+#define OMAP4430_LOGICSTATEST_MASK (1 << 2)
/*
- * Used by RM_WKUP_GPIO1_CONTEXT, RM_WKUP_KEYBOARD_CONTEXT,
- * RM_WKUP_L4WKUP_CONTEXT, RM_WKUP_RTC_CONTEXT, RM_WKUP_SARRAM_CONTEXT,
- * RM_WKUP_SYNCTIMER_CONTEXT, RM_WKUP_TIMER12_CONTEXT, RM_WKUP_TIMER1_CONTEXT,
- * RM_WKUP_USIM_CONTEXT, RM_WKUP_WDT1_CONTEXT, RM_WKUP_WDT2_CONTEXT,
- * RM_EMU_DEBUGSS_CONTEXT, RM_D2D_SAD2D_CONTEXT, RM_D2D_SAD2D_FW_CONTEXT,
- * RM_DUCATI_DUCATI_CONTEXT, RM_L3INSTR_L3_3_CONTEXT,
- * RM_L3INSTR_L3_INSTR_CONTEXT, RM_L3INSTR_OCP_WP1_CONTEXT,
- * RM_L3_1_L3_1_CONTEXT, RM_L3_2_L3_2_CONTEXT, RM_L3_2_OCMC_RAM_CONTEXT,
- * RM_L4CFG_L4_CFG_CONTEXT, RM_L4CFG_SAR_ROM_CONTEXT, RM_MEMIF_DLL_CONTEXT,
- * RM_MEMIF_DLL_H_CONTEXT, RM_MEMIF_DMM_CONTEXT, RM_MEMIF_EMIF_FW_CONTEXT,
- * RM_CAM_FDIF_CONTEXT, RM_CAM_ISS_CONTEXT, RM_L3INIT_CCPTX_CONTEXT,
- * RM_L3INIT_EMAC_CONTEXT, RM_L3INIT_P1500_CONTEXT, RM_L3INIT_PCIESS_CONTEXT,
- * RM_L3INIT_SATA_CONTEXT, RM_L3INIT_TPPSS_CONTEXT, RM_L3INIT_UNIPRO1_CONTEXT,
- * RM_L3INIT_USBPHYOCP2SCP_CONTEXT, RM_L3INIT_XHPI_CONTEXT,
- * RM_ABE_AESS_CONTEXT, RM_ABE_DMIC_CONTEXT, RM_ABE_MCASP_CONTEXT,
+ * Used by RM_ABE_AESS_CONTEXT, RM_ABE_DMIC_CONTEXT, RM_ABE_MCASP_CONTEXT,
* RM_ABE_MCBSP1_CONTEXT, RM_ABE_MCBSP2_CONTEXT, RM_ABE_MCBSP3_CONTEXT,
* RM_ABE_PDM_CONTEXT, RM_ABE_SLIMBUS_CONTEXT, RM_ABE_TIMER5_CONTEXT,
* RM_ABE_TIMER6_CONTEXT, RM_ABE_TIMER7_CONTEXT, RM_ABE_TIMER8_CONTEXT,
- * RM_ABE_WDT3_CONTEXT, RM_GFX_GFX_CONTEXT, RM_MPU_MPU_CONTEXT,
- * RM_CEFUSE_CEFUSE_CONTEXT, RM_ALWON_MDMINTC_CONTEXT,
- * RM_ALWON_SR_CORE_CONTEXT, RM_ALWON_SR_IVA_CONTEXT, RM_ALWON_SR_MPU_CONTEXT,
- * RM_DSS_DEISS_CONTEXT, RM_DSS_DSS_CONTEXT, RM_L4PER_ADC_CONTEXT,
- * RM_L4PER_DMTIMER10_CONTEXT, RM_L4PER_DMTIMER11_CONTEXT,
- * RM_L4PER_DMTIMER2_CONTEXT, RM_L4PER_DMTIMER3_CONTEXT,
- * RM_L4PER_DMTIMER4_CONTEXT, RM_L4PER_DMTIMER9_CONTEXT, RM_L4PER_ELM_CONTEXT,
- * RM_L4PER_HDQ1W_CONTEXT, RM_L4PER_HECC1_CONTEXT, RM_L4PER_HECC2_CONTEXT,
- * RM_L4PER_I2C2_CONTEXT, RM_L4PER_I2C3_CONTEXT, RM_L4PER_I2C4_CONTEXT,
- * RM_L4PER_I2C5_CONTEXT, RM_L4PER_L4_PER_CONTEXT, RM_L4PER_MCASP2_CONTEXT,
- * RM_L4PER_MCASP3_CONTEXT, RM_L4PER_MCBSP4_CONTEXT, RM_L4PER_MCSPI1_CONTEXT,
- * RM_L4PER_MCSPI2_CONTEXT, RM_L4PER_MCSPI3_CONTEXT, RM_L4PER_MCSPI4_CONTEXT,
- * RM_L4PER_MGATE_CONTEXT, RM_L4PER_MMCSD3_CONTEXT, RM_L4PER_MMCSD4_CONTEXT,
- * RM_L4PER_MMCSD5_CONTEXT, RM_L4PER_MSPROHG_CONTEXT,
- * RM_L4PER_SLIMBUS2_CONTEXT, RM_L4SEC_PKAEIP29_CONTEXT,
- * RM_TESLA_TESLA_CONTEXT, RM_IVAHD_IVAHD_CONTEXT, RM_IVAHD_SL2_CONTEXT
+ * RM_ABE_WDT3_CONTEXT, RM_ALWON_MDMINTC_CONTEXT, RM_ALWON_SR_CORE_CONTEXT,
+ * RM_ALWON_SR_IVA_CONTEXT, RM_ALWON_SR_MPU_CONTEXT, RM_CAM_FDIF_CONTEXT,
+ * RM_CAM_ISS_CONTEXT, RM_CEFUSE_CEFUSE_CONTEXT, RM_D2D_SAD2D_CONTEXT,
+ * RM_D2D_SAD2D_FW_CONTEXT, RM_DSS_DEISS_CONTEXT, RM_DSS_DSS_CONTEXT,
+ * RM_DUCATI_DUCATI_CONTEXT, RM_EMU_DEBUGSS_CONTEXT, RM_GFX_GFX_CONTEXT,
+ * RM_IVAHD_IVAHD_CONTEXT, RM_IVAHD_SL2_CONTEXT, RM_L3INIT_CCPTX_CONTEXT,
+ * RM_L3INIT_EMAC_CONTEXT, RM_L3INIT_P1500_CONTEXT, RM_L3INIT_PCIESS_CONTEXT,
+ * RM_L3INIT_SATA_CONTEXT, RM_L3INIT_TPPSS_CONTEXT, RM_L3INIT_UNIPRO1_CONTEXT,
+ * RM_L3INIT_USBPHYOCP2SCP_CONTEXT, RM_L3INIT_XHPI_CONTEXT,
+ * RM_L3INSTR_L3_3_CONTEXT, RM_L3INSTR_L3_INSTR_CONTEXT,
+ * RM_L3INSTR_OCP_WP1_CONTEXT, RM_L3_1_L3_1_CONTEXT, RM_L3_2_L3_2_CONTEXT,
+ * RM_L3_2_OCMC_RAM_CONTEXT, RM_L4CFG_L4_CFG_CONTEXT, RM_L4CFG_SAR_ROM_CONTEXT,
+ * RM_L4PER_ADC_CONTEXT, RM_L4PER_DMTIMER10_CONTEXT,
+ * RM_L4PER_DMTIMER11_CONTEXT, RM_L4PER_DMTIMER2_CONTEXT,
+ * RM_L4PER_DMTIMER3_CONTEXT, RM_L4PER_DMTIMER4_CONTEXT,
+ * RM_L4PER_DMTIMER9_CONTEXT, RM_L4PER_ELM_CONTEXT, RM_L4PER_HDQ1W_CONTEXT,
+ * RM_L4PER_HECC1_CONTEXT, RM_L4PER_HECC2_CONTEXT, RM_L4PER_I2C2_CONTEXT,
+ * RM_L4PER_I2C3_CONTEXT, RM_L4PER_I2C4_CONTEXT, RM_L4PER_I2C5_CONTEXT,
+ * RM_L4PER_L4_PER_CONTEXT, RM_L4PER_MCASP2_CONTEXT, RM_L4PER_MCASP3_CONTEXT,
+ * RM_L4PER_MCBSP4_CONTEXT, RM_L4PER_MCSPI1_CONTEXT, RM_L4PER_MCSPI2_CONTEXT,
+ * RM_L4PER_MCSPI3_CONTEXT, RM_L4PER_MCSPI4_CONTEXT, RM_L4PER_MGATE_CONTEXT,
+ * RM_L4PER_MMCSD3_CONTEXT, RM_L4PER_MMCSD4_CONTEXT, RM_L4PER_MMCSD5_CONTEXT,
+ * RM_L4PER_MSPROHG_CONTEXT, RM_L4PER_SLIMBUS2_CONTEXT,
+ * RM_L4SEC_PKAEIP29_CONTEXT, RM_MEMIF_DLL_CONTEXT, RM_MEMIF_DLL_H_CONTEXT,
+ * RM_MEMIF_DMM_CONTEXT, RM_MEMIF_EMIF_1_CONTEXT, RM_MEMIF_EMIF_2_CONTEXT,
+ * RM_MEMIF_EMIF_FW_CONTEXT, RM_MPU_MPU_CONTEXT, RM_TESLA_TESLA_CONTEXT,
+ * RM_WKUP_GPIO1_CONTEXT, RM_WKUP_KEYBOARD_CONTEXT, RM_WKUP_L4WKUP_CONTEXT,
+ * RM_WKUP_RTC_CONTEXT, RM_WKUP_SARRAM_CONTEXT, RM_WKUP_SYNCTIMER_CONTEXT,
+ * RM_WKUP_TIMER12_CONTEXT, RM_WKUP_TIMER1_CONTEXT, RM_WKUP_USIM_CONTEXT,
+ * RM_WKUP_WDT1_CONTEXT, RM_WKUP_WDT2_CONTEXT
*/
#define OMAP4430_LOSTCONTEXT_DFF_SHIFT 0
-#define OMAP4430_LOSTCONTEXT_DFF_MASK BITFIELD(0, 0)
+#define OMAP4430_LOSTCONTEXT_DFF_MASK (1 << 0)
/*
* Used by RM_D2D_MODEM_ICR_CONTEXT, RM_D2D_SAD2D_CONTEXT,
- * RM_D2D_SAD2D_FW_CONTEXT, RM_DUCATI_DUCATI_CONTEXT, RM_L3INSTR_L3_3_CONTEXT,
+ * RM_D2D_SAD2D_FW_CONTEXT, RM_DSS_DSS_CONTEXT, RM_DUCATI_DUCATI_CONTEXT,
+ * RM_L3INIT_HSI_CONTEXT, RM_L3INIT_MMC1_CONTEXT, RM_L3INIT_MMC2_CONTEXT,
+ * RM_L3INIT_MMC6_CONTEXT, RM_L3INIT_USB_HOST_CONTEXT,
+ * RM_L3INIT_USB_HOST_FS_CONTEXT, RM_L3INIT_USB_OTG_CONTEXT,
+ * RM_L3INIT_USB_TLL_CONTEXT, RM_L3INSTR_L3_3_CONTEXT,
* RM_L3INSTR_OCP_WP1_CONTEXT, RM_L3_1_L3_1_CONTEXT, RM_L3_2_GPMC_CONTEXT,
* RM_L3_2_L3_2_CONTEXT, RM_L4CFG_HW_SEM_CONTEXT, RM_L4CFG_L4_CFG_CONTEXT,
- * RM_L4CFG_MAILBOX_CONTEXT, RM_MEMIF_DMM_CONTEXT, RM_MEMIF_EMIF_1_CONTEXT,
- * RM_MEMIF_EMIF_2_CONTEXT, RM_MEMIF_EMIF_FW_CONTEXT, RM_MEMIF_EMIF_H1_CONTEXT,
- * RM_MEMIF_EMIF_H2_CONTEXT, RM_SDMA_SDMA_CONTEXT, RM_L3INIT_HSI_CONTEXT,
- * RM_L3INIT_MMC1_CONTEXT, RM_L3INIT_MMC2_CONTEXT, RM_L3INIT_MMC6_CONTEXT,
- * RM_L3INIT_USB_HOST_CONTEXT, RM_L3INIT_USB_HOST_FS_CONTEXT,
- * RM_L3INIT_USB_OTG_CONTEXT, RM_L3INIT_USB_TLL_CONTEXT, RM_DSS_DSS_CONTEXT,
- * RM_L4PER_GPIO2_CONTEXT, RM_L4PER_GPIO3_CONTEXT, RM_L4PER_GPIO4_CONTEXT,
- * RM_L4PER_GPIO5_CONTEXT, RM_L4PER_GPIO6_CONTEXT, RM_L4PER_I2C1_CONTEXT,
- * RM_L4PER_L4_PER_CONTEXT, RM_L4PER_UART1_CONTEXT, RM_L4PER_UART2_CONTEXT,
- * RM_L4PER_UART3_CONTEXT, RM_L4PER_UART4_CONTEXT, RM_L4SEC_AES1_CONTEXT,
- * RM_L4SEC_AES2_CONTEXT, RM_L4SEC_CRYPTODMA_CONTEXT, RM_L4SEC_DES3DES_CONTEXT,
- * RM_L4SEC_RNG_CONTEXT, RM_L4SEC_SHA2MD51_CONTEXT, RM_TESLA_TESLA_CONTEXT
+ * RM_L4CFG_MAILBOX_CONTEXT, RM_L4PER_GPIO2_CONTEXT, RM_L4PER_GPIO3_CONTEXT,
+ * RM_L4PER_GPIO4_CONTEXT, RM_L4PER_GPIO5_CONTEXT, RM_L4PER_GPIO6_CONTEXT,
+ * RM_L4PER_I2C1_CONTEXT, RM_L4PER_L4_PER_CONTEXT, RM_L4PER_UART1_CONTEXT,
+ * RM_L4PER_UART2_CONTEXT, RM_L4PER_UART3_CONTEXT, RM_L4PER_UART4_CONTEXT,
+ * RM_L4SEC_AES1_CONTEXT, RM_L4SEC_AES2_CONTEXT, RM_L4SEC_CRYPTODMA_CONTEXT,
+ * RM_L4SEC_DES3DES_CONTEXT, RM_L4SEC_RNG_CONTEXT, RM_L4SEC_SHA2MD51_CONTEXT,
+ * RM_MEMIF_DMM_CONTEXT, RM_MEMIF_EMIF_1_CONTEXT, RM_MEMIF_EMIF_2_CONTEXT,
+ * RM_MEMIF_EMIF_FW_CONTEXT, RM_MEMIF_EMIF_H1_CONTEXT,
+ * RM_MEMIF_EMIF_H2_CONTEXT, RM_SDMA_SDMA_CONTEXT, RM_TESLA_TESLA_CONTEXT
*/
#define OMAP4430_LOSTCONTEXT_RFF_SHIFT 1
-#define OMAP4430_LOSTCONTEXT_RFF_MASK BITFIELD(1, 1)
+#define OMAP4430_LOSTCONTEXT_RFF_MASK (1 << 1)
/* Used by RM_ABE_AESS_CONTEXT */
#define OMAP4430_LOSTMEM_AESSMEM_SHIFT 8
-#define OMAP4430_LOSTMEM_AESSMEM_MASK BITFIELD(8, 8)
+#define OMAP4430_LOSTMEM_AESSMEM_MASK (1 << 8)
/* Used by RM_CAM_FDIF_CONTEXT, RM_CAM_ISS_CONTEXT */
#define OMAP4430_LOSTMEM_CAM_MEM_SHIFT 8
-#define OMAP4430_LOSTMEM_CAM_MEM_MASK BITFIELD(8, 8)
+#define OMAP4430_LOSTMEM_CAM_MEM_MASK (1 << 8)
/* Used by RM_L3INSTR_OCP_WP1_CONTEXT */
#define OMAP4430_LOSTMEM_CORE_NRET_BANK_SHIFT 8
-#define OMAP4430_LOSTMEM_CORE_NRET_BANK_MASK BITFIELD(8, 8)
+#define OMAP4430_LOSTMEM_CORE_NRET_BANK_MASK (1 << 8)
/* Renamed from LOSTMEM_CORE_NRET_BANK Used by RM_MEMIF_DMM_CONTEXT */
#define OMAP4430_LOSTMEM_CORE_NRET_BANK_9_9_SHIFT 9
-#define OMAP4430_LOSTMEM_CORE_NRET_BANK_9_9_MASK BITFIELD(9, 9)
+#define OMAP4430_LOSTMEM_CORE_NRET_BANK_9_9_MASK (1 << 9)
/* Used by RM_L3_2_OCMC_RAM_CONTEXT */
#define OMAP4430_LOSTMEM_CORE_OCMRAM_SHIFT 8
-#define OMAP4430_LOSTMEM_CORE_OCMRAM_MASK BITFIELD(8, 8)
+#define OMAP4430_LOSTMEM_CORE_OCMRAM_MASK (1 << 8)
/*
* Used by RM_D2D_MODEM_ICR_CONTEXT, RM_MEMIF_DMM_CONTEXT,
* RM_SDMA_SDMA_CONTEXT
*/
#define OMAP4430_LOSTMEM_CORE_OTHER_BANK_SHIFT 8
-#define OMAP4430_LOSTMEM_CORE_OTHER_BANK_MASK BITFIELD(8, 8)
+#define OMAP4430_LOSTMEM_CORE_OTHER_BANK_MASK (1 << 8)
/* Used by RM_DSS_DEISS_CONTEXT, RM_DSS_DSS_CONTEXT */
#define OMAP4430_LOSTMEM_DSS_MEM_SHIFT 8
-#define OMAP4430_LOSTMEM_DSS_MEM_MASK BITFIELD(8, 8)
+#define OMAP4430_LOSTMEM_DSS_MEM_MASK (1 << 8)
/* Used by RM_DUCATI_DUCATI_CONTEXT */
#define OMAP4430_LOSTMEM_DUCATI_L2RAM_SHIFT 9
-#define OMAP4430_LOSTMEM_DUCATI_L2RAM_MASK BITFIELD(9, 9)
+#define OMAP4430_LOSTMEM_DUCATI_L2RAM_MASK (1 << 9)
/* Used by RM_DUCATI_DUCATI_CONTEXT */
#define OMAP4430_LOSTMEM_DUCATI_UNICACHE_SHIFT 8
-#define OMAP4430_LOSTMEM_DUCATI_UNICACHE_MASK BITFIELD(8, 8)
+#define OMAP4430_LOSTMEM_DUCATI_UNICACHE_MASK (1 << 8)
/* Used by RM_EMU_DEBUGSS_CONTEXT */
#define OMAP4430_LOSTMEM_EMU_BANK_SHIFT 8
-#define OMAP4430_LOSTMEM_EMU_BANK_MASK BITFIELD(8, 8)
+#define OMAP4430_LOSTMEM_EMU_BANK_MASK (1 << 8)
/* Used by RM_GFX_GFX_CONTEXT */
#define OMAP4430_LOSTMEM_GFX_MEM_SHIFT 8
-#define OMAP4430_LOSTMEM_GFX_MEM_MASK BITFIELD(8, 8)
+#define OMAP4430_LOSTMEM_GFX_MEM_MASK (1 << 8)
/* Used by RM_IVAHD_IVAHD_CONTEXT */
#define OMAP4430_LOSTMEM_HWA_MEM_SHIFT 10
-#define OMAP4430_LOSTMEM_HWA_MEM_MASK BITFIELD(10, 10)
+#define OMAP4430_LOSTMEM_HWA_MEM_MASK (1 << 10)
/*
* Used by RM_L3INIT_CCPTX_CONTEXT, RM_L3INIT_EMAC_CONTEXT,
@@ -620,19 +644,19 @@
* RM_L3INIT_USB_OTG_CONTEXT, RM_L3INIT_XHPI_CONTEXT
*/
#define OMAP4430_LOSTMEM_L3INIT_BANK1_SHIFT 8
-#define OMAP4430_LOSTMEM_L3INIT_BANK1_MASK BITFIELD(8, 8)
+#define OMAP4430_LOSTMEM_L3INIT_BANK1_MASK (1 << 8)
/* Used by RM_MPU_MPU_CONTEXT */
#define OMAP4430_LOSTMEM_MPU_L1_SHIFT 8
-#define OMAP4430_LOSTMEM_MPU_L1_MASK BITFIELD(8, 8)
+#define OMAP4430_LOSTMEM_MPU_L1_MASK (1 << 8)
/* Used by RM_MPU_MPU_CONTEXT */
#define OMAP4430_LOSTMEM_MPU_L2_SHIFT 9
-#define OMAP4430_LOSTMEM_MPU_L2_MASK BITFIELD(9, 9)
+#define OMAP4430_LOSTMEM_MPU_L2_MASK (1 << 9)
/* Used by RM_MPU_MPU_CONTEXT */
#define OMAP4430_LOSTMEM_MPU_RAM_SHIFT 10
-#define OMAP4430_LOSTMEM_MPU_RAM_MASK BITFIELD(10, 10)
+#define OMAP4430_LOSTMEM_MPU_RAM_MASK (1 << 10)
/*
* Used by RM_L4PER_HECC1_CONTEXT, RM_L4PER_HECC2_CONTEXT,
@@ -640,14 +664,14 @@
* RM_L4PER_MMCSD5_CONTEXT, RM_L4PER_SLIMBUS2_CONTEXT, RM_L4SEC_PKAEIP29_CONTEXT
*/
#define OMAP4430_LOSTMEM_NONRETAINED_BANK_SHIFT 8
-#define OMAP4430_LOSTMEM_NONRETAINED_BANK_MASK BITFIELD(8, 8)
+#define OMAP4430_LOSTMEM_NONRETAINED_BANK_MASK (1 << 8)
/*
* Used by RM_ABE_DMIC_CONTEXT, RM_ABE_MCBSP1_CONTEXT, RM_ABE_MCBSP2_CONTEXT,
* RM_ABE_MCBSP3_CONTEXT, RM_ABE_PDM_CONTEXT, RM_ABE_SLIMBUS_CONTEXT
*/
#define OMAP4430_LOSTMEM_PERIHPMEM_SHIFT 8
-#define OMAP4430_LOSTMEM_PERIHPMEM_MASK BITFIELD(8, 8)
+#define OMAP4430_LOSTMEM_PERIHPMEM_MASK (1 << 8)
/*
* Used by RM_L4PER_MSPROHG_CONTEXT, RM_L4PER_UART1_CONTEXT,
@@ -655,245 +679,237 @@
* RM_L4SEC_CRYPTODMA_CONTEXT
*/
#define OMAP4430_LOSTMEM_RETAINED_BANK_SHIFT 8
-#define OMAP4430_LOSTMEM_RETAINED_BANK_MASK BITFIELD(8, 8)
+#define OMAP4430_LOSTMEM_RETAINED_BANK_MASK (1 << 8)
/* Used by RM_IVAHD_SL2_CONTEXT */
#define OMAP4430_LOSTMEM_SL2_MEM_SHIFT 8
-#define OMAP4430_LOSTMEM_SL2_MEM_MASK BITFIELD(8, 8)
+#define OMAP4430_LOSTMEM_SL2_MEM_MASK (1 << 8)
/* Used by RM_IVAHD_IVAHD_CONTEXT */
#define OMAP4430_LOSTMEM_TCM1_MEM_SHIFT 8
-#define OMAP4430_LOSTMEM_TCM1_MEM_MASK BITFIELD(8, 8)
+#define OMAP4430_LOSTMEM_TCM1_MEM_MASK (1 << 8)
/* Used by RM_IVAHD_IVAHD_CONTEXT */
#define OMAP4430_LOSTMEM_TCM2_MEM_SHIFT 9
-#define OMAP4430_LOSTMEM_TCM2_MEM_MASK BITFIELD(9, 9)
+#define OMAP4430_LOSTMEM_TCM2_MEM_MASK (1 << 9)
/* Used by RM_TESLA_TESLA_CONTEXT */
#define OMAP4430_LOSTMEM_TESLA_EDMA_SHIFT 10
-#define OMAP4430_LOSTMEM_TESLA_EDMA_MASK BITFIELD(10, 10)
+#define OMAP4430_LOSTMEM_TESLA_EDMA_MASK (1 << 10)
/* Used by RM_TESLA_TESLA_CONTEXT */
#define OMAP4430_LOSTMEM_TESLA_L1_SHIFT 8
-#define OMAP4430_LOSTMEM_TESLA_L1_MASK BITFIELD(8, 8)
+#define OMAP4430_LOSTMEM_TESLA_L1_MASK (1 << 8)
/* Used by RM_TESLA_TESLA_CONTEXT */
#define OMAP4430_LOSTMEM_TESLA_L2_SHIFT 9
-#define OMAP4430_LOSTMEM_TESLA_L2_MASK BITFIELD(9, 9)
+#define OMAP4430_LOSTMEM_TESLA_L2_MASK (1 << 9)
/* Used by RM_WKUP_SARRAM_CONTEXT */
#define OMAP4430_LOSTMEM_WKUP_BANK_SHIFT 8
-#define OMAP4430_LOSTMEM_WKUP_BANK_MASK BITFIELD(8, 8)
+#define OMAP4430_LOSTMEM_WKUP_BANK_MASK (1 << 8)
/*
- * Used by PM_CORE_PWRSTCTRL, PM_CAM_PWRSTCTRL, PM_L3INIT_PWRSTCTRL,
- * PM_ABE_PWRSTCTRL, PM_GFX_PWRSTCTRL, PM_MPU_PWRSTCTRL, PM_CEFUSE_PWRSTCTRL,
- * PM_DSS_PWRSTCTRL, PM_L4PER_PWRSTCTRL, PM_TESLA_PWRSTCTRL, PM_IVAHD_PWRSTCTRL
+ * Used by PM_ABE_PWRSTCTRL, PM_CAM_PWRSTCTRL, PM_CEFUSE_PWRSTCTRL,
+ * PM_CORE_PWRSTCTRL, PM_DSS_PWRSTCTRL, PM_GFX_PWRSTCTRL, PM_IVAHD_PWRSTCTRL,
+ * PM_L3INIT_PWRSTCTRL, PM_L4PER_PWRSTCTRL, PM_MPU_PWRSTCTRL, PM_TESLA_PWRSTCTRL
*/
#define OMAP4430_LOWPOWERSTATECHANGE_SHIFT 4
-#define OMAP4430_LOWPOWERSTATECHANGE_MASK BITFIELD(4, 4)
-
-/* Used by PM_CORE_PWRSTCTRL */
-#define OMAP4430_MEMORYCHANGE_SHIFT 3
-#define OMAP4430_MEMORYCHANGE_MASK BITFIELD(3, 3)
+#define OMAP4430_LOWPOWERSTATECHANGE_MASK (1 << 4)
/* Used by PRM_MODEM_IF_CTRL */
#define OMAP4430_MODEM_READY_SHIFT 1
-#define OMAP4430_MODEM_READY_MASK BITFIELD(1, 1)
+#define OMAP4430_MODEM_READY_MASK (1 << 1)
/* Used by PRM_MODEM_IF_CTRL */
#define OMAP4430_MODEM_SHUTDOWN_IRQ_SHIFT 9
-#define OMAP4430_MODEM_SHUTDOWN_IRQ_MASK BITFIELD(9, 9)
+#define OMAP4430_MODEM_SHUTDOWN_IRQ_MASK (1 << 9)
/* Used by PRM_MODEM_IF_CTRL */
#define OMAP4430_MODEM_SLEEP_ST_SHIFT 16
-#define OMAP4430_MODEM_SLEEP_ST_MASK BITFIELD(16, 16)
+#define OMAP4430_MODEM_SLEEP_ST_MASK (1 << 16)
/* Used by PRM_MODEM_IF_CTRL */
#define OMAP4430_MODEM_WAKE_IRQ_SHIFT 8
-#define OMAP4430_MODEM_WAKE_IRQ_MASK BITFIELD(8, 8)
+#define OMAP4430_MODEM_WAKE_IRQ_MASK (1 << 8)
/* Used by PM_MPU_PWRSTCTRL */
#define OMAP4430_MPU_L1_ONSTATE_SHIFT 16
-#define OMAP4430_MPU_L1_ONSTATE_MASK BITFIELD(16, 17)
+#define OMAP4430_MPU_L1_ONSTATE_MASK (0x3 << 16)
/* Used by PM_MPU_PWRSTCTRL */
#define OMAP4430_MPU_L1_RETSTATE_SHIFT 8
-#define OMAP4430_MPU_L1_RETSTATE_MASK BITFIELD(8, 8)
+#define OMAP4430_MPU_L1_RETSTATE_MASK (1 << 8)
/* Used by PM_MPU_PWRSTST */
#define OMAP4430_MPU_L1_STATEST_SHIFT 4
-#define OMAP4430_MPU_L1_STATEST_MASK BITFIELD(4, 5)
+#define OMAP4430_MPU_L1_STATEST_MASK (0x3 << 4)
/* Used by PM_MPU_PWRSTCTRL */
#define OMAP4430_MPU_L2_ONSTATE_SHIFT 18
-#define OMAP4430_MPU_L2_ONSTATE_MASK BITFIELD(18, 19)
+#define OMAP4430_MPU_L2_ONSTATE_MASK (0x3 << 18)
/* Used by PM_MPU_PWRSTCTRL */
#define OMAP4430_MPU_L2_RETSTATE_SHIFT 9
-#define OMAP4430_MPU_L2_RETSTATE_MASK BITFIELD(9, 9)
+#define OMAP4430_MPU_L2_RETSTATE_MASK (1 << 9)
/* Used by PM_MPU_PWRSTST */
#define OMAP4430_MPU_L2_STATEST_SHIFT 6
-#define OMAP4430_MPU_L2_STATEST_MASK BITFIELD(6, 7)
+#define OMAP4430_MPU_L2_STATEST_MASK (0x3 << 6)
/* Used by PM_MPU_PWRSTCTRL */
#define OMAP4430_MPU_RAM_ONSTATE_SHIFT 20
-#define OMAP4430_MPU_RAM_ONSTATE_MASK BITFIELD(20, 21)
+#define OMAP4430_MPU_RAM_ONSTATE_MASK (0x3 << 20)
/* Used by PM_MPU_PWRSTCTRL */
#define OMAP4430_MPU_RAM_RETSTATE_SHIFT 10
-#define OMAP4430_MPU_RAM_RETSTATE_MASK BITFIELD(10, 10)
+#define OMAP4430_MPU_RAM_RETSTATE_MASK (1 << 10)
/* Used by PM_MPU_PWRSTST */
#define OMAP4430_MPU_RAM_STATEST_SHIFT 8
-#define OMAP4430_MPU_RAM_STATEST_MASK BITFIELD(8, 9)
+#define OMAP4430_MPU_RAM_STATEST_MASK (0x3 << 8)
/* Used by PRM_RSTST */
#define OMAP4430_MPU_SECURITY_VIOL_RST_SHIFT 2
-#define OMAP4430_MPU_SECURITY_VIOL_RST_MASK BITFIELD(2, 2)
+#define OMAP4430_MPU_SECURITY_VIOL_RST_MASK (1 << 2)
/* Used by PRM_RSTST */
#define OMAP4430_MPU_WDT_RST_SHIFT 3
-#define OMAP4430_MPU_WDT_RST_MASK BITFIELD(3, 3)
+#define OMAP4430_MPU_WDT_RST_MASK (1 << 3)
/* Used by PM_L4PER_PWRSTCTRL */
#define OMAP4430_NONRETAINED_BANK_ONSTATE_SHIFT 18
-#define OMAP4430_NONRETAINED_BANK_ONSTATE_MASK BITFIELD(18, 19)
+#define OMAP4430_NONRETAINED_BANK_ONSTATE_MASK (0x3 << 18)
/* Used by PM_L4PER_PWRSTCTRL */
#define OMAP4430_NONRETAINED_BANK_RETSTATE_SHIFT 9
-#define OMAP4430_NONRETAINED_BANK_RETSTATE_MASK BITFIELD(9, 9)
+#define OMAP4430_NONRETAINED_BANK_RETSTATE_MASK (1 << 9)
/* Used by PM_L4PER_PWRSTST */
#define OMAP4430_NONRETAINED_BANK_STATEST_SHIFT 6
-#define OMAP4430_NONRETAINED_BANK_STATEST_MASK BITFIELD(6, 7)
+#define OMAP4430_NONRETAINED_BANK_STATEST_MASK (0x3 << 6)
/* Used by PM_CORE_PWRSTCTRL */
#define OMAP4430_OCP_NRET_BANK_ONSTATE_SHIFT 24
-#define OMAP4430_OCP_NRET_BANK_ONSTATE_MASK BITFIELD(24, 25)
+#define OMAP4430_OCP_NRET_BANK_ONSTATE_MASK (0x3 << 24)
/* Used by PM_CORE_PWRSTCTRL */
#define OMAP4430_OCP_NRET_BANK_RETSTATE_SHIFT 12
-#define OMAP4430_OCP_NRET_BANK_RETSTATE_MASK BITFIELD(12, 12)
+#define OMAP4430_OCP_NRET_BANK_RETSTATE_MASK (1 << 12)
/* Used by PM_CORE_PWRSTST */
#define OMAP4430_OCP_NRET_BANK_STATEST_SHIFT 12
-#define OMAP4430_OCP_NRET_BANK_STATEST_MASK BITFIELD(12, 13)
+#define OMAP4430_OCP_NRET_BANK_STATEST_MASK (0x3 << 12)
/*
* Used by PRM_VC_VAL_CMD_VDD_CORE_L, PRM_VC_VAL_CMD_VDD_IVA_L,
* PRM_VC_VAL_CMD_VDD_MPU_L
*/
#define OMAP4430_OFF_SHIFT 0
-#define OMAP4430_OFF_MASK BITFIELD(0, 7)
-
-/* Used by PRM_LDO_BANDGAP_CTRL */
-#define OMAP4430_OFF_ENABLE_SHIFT 0
-#define OMAP4430_OFF_ENABLE_MASK BITFIELD(0, 0)
+#define OMAP4430_OFF_MASK (0xff << 0)
/*
* Used by PRM_VC_VAL_CMD_VDD_CORE_L, PRM_VC_VAL_CMD_VDD_IVA_L,
* PRM_VC_VAL_CMD_VDD_MPU_L
*/
#define OMAP4430_ON_SHIFT 24
-#define OMAP4430_ON_MASK BITFIELD(24, 31)
+#define OMAP4430_ON_MASK (0xff << 24)
/*
* Used by PRM_VC_VAL_CMD_VDD_CORE_L, PRM_VC_VAL_CMD_VDD_IVA_L,
* PRM_VC_VAL_CMD_VDD_MPU_L
*/
#define OMAP4430_ONLP_SHIFT 16
-#define OMAP4430_ONLP_MASK BITFIELD(16, 23)
+#define OMAP4430_ONLP_MASK (0xff << 16)
/* Used by PRM_LDO_ABB_IVA_CTRL, PRM_LDO_ABB_MPU_CTRL */
#define OMAP4430_OPP_CHANGE_SHIFT 2
-#define OMAP4430_OPP_CHANGE_MASK BITFIELD(2, 2)
+#define OMAP4430_OPP_CHANGE_MASK (1 << 2)
/* Used by PRM_LDO_ABB_IVA_CTRL, PRM_LDO_ABB_MPU_CTRL */
#define OMAP4430_OPP_SEL_SHIFT 0
-#define OMAP4430_OPP_SEL_MASK BITFIELD(0, 1)
+#define OMAP4430_OPP_SEL_MASK (0x3 << 0)
/* Used by PRM_SRAM_COUNT */
#define OMAP4430_PCHARGECNT_VALUE_SHIFT 0
-#define OMAP4430_PCHARGECNT_VALUE_MASK BITFIELD(0, 5)
+#define OMAP4430_PCHARGECNT_VALUE_MASK (0x3f << 0)
/* Used by PRM_PSCON_COUNT */
#define OMAP4430_PCHARGE_TIME_SHIFT 0
-#define OMAP4430_PCHARGE_TIME_MASK BITFIELD(0, 7)
+#define OMAP4430_PCHARGE_TIME_MASK (0xff << 0)
/* Used by PM_ABE_PWRSTCTRL */
#define OMAP4430_PERIPHMEM_ONSTATE_SHIFT 20
-#define OMAP4430_PERIPHMEM_ONSTATE_MASK BITFIELD(20, 21)
+#define OMAP4430_PERIPHMEM_ONSTATE_MASK (0x3 << 20)
/* Used by PM_ABE_PWRSTCTRL */
#define OMAP4430_PERIPHMEM_RETSTATE_SHIFT 10
-#define OMAP4430_PERIPHMEM_RETSTATE_MASK BITFIELD(10, 10)
+#define OMAP4430_PERIPHMEM_RETSTATE_MASK (1 << 10)
/* Used by PM_ABE_PWRSTST */
#define OMAP4430_PERIPHMEM_STATEST_SHIFT 8
-#define OMAP4430_PERIPHMEM_STATEST_MASK BITFIELD(8, 9)
+#define OMAP4430_PERIPHMEM_STATEST_MASK (0x3 << 8)
/* Used by PRM_PHASE1_CNDP */
#define OMAP4430_PHASE1_CNDP_SHIFT 0
-#define OMAP4430_PHASE1_CNDP_MASK BITFIELD(0, 31)
+#define OMAP4430_PHASE1_CNDP_MASK (0xffffffff << 0)
/* Used by PRM_PHASE2A_CNDP */
#define OMAP4430_PHASE2A_CNDP_SHIFT 0
-#define OMAP4430_PHASE2A_CNDP_MASK BITFIELD(0, 31)
+#define OMAP4430_PHASE2A_CNDP_MASK (0xffffffff << 0)
/* Used by PRM_PHASE2B_CNDP */
#define OMAP4430_PHASE2B_CNDP_SHIFT 0
-#define OMAP4430_PHASE2B_CNDP_MASK BITFIELD(0, 31)
+#define OMAP4430_PHASE2B_CNDP_MASK (0xffffffff << 0)
/* Used by PRM_PSCON_COUNT */
#define OMAP4430_PONOUT_2_PGOODIN_TIME_SHIFT 8
-#define OMAP4430_PONOUT_2_PGOODIN_TIME_MASK BITFIELD(8, 15)
+#define OMAP4430_PONOUT_2_PGOODIN_TIME_MASK (0xff << 8)
/*
- * Used by PM_EMU_PWRSTCTRL, PM_CORE_PWRSTCTRL, PM_CAM_PWRSTCTRL,
- * PM_L3INIT_PWRSTCTRL, PM_ABE_PWRSTCTRL, PM_GFX_PWRSTCTRL, PM_MPU_PWRSTCTRL,
- * PM_CEFUSE_PWRSTCTRL, PM_DSS_PWRSTCTRL, PM_L4PER_PWRSTCTRL,
- * PM_TESLA_PWRSTCTRL, PM_IVAHD_PWRSTCTRL
+ * Used by PM_ABE_PWRSTCTRL, PM_CAM_PWRSTCTRL, PM_CEFUSE_PWRSTCTRL,
+ * PM_CORE_PWRSTCTRL, PM_DSS_PWRSTCTRL, PM_EMU_PWRSTCTRL, PM_GFX_PWRSTCTRL,
+ * PM_IVAHD_PWRSTCTRL, PM_L3INIT_PWRSTCTRL, PM_L4PER_PWRSTCTRL,
+ * PM_MPU_PWRSTCTRL, PM_TESLA_PWRSTCTRL
*/
#define OMAP4430_POWERSTATE_SHIFT 0
-#define OMAP4430_POWERSTATE_MASK BITFIELD(0, 1)
+#define OMAP4430_POWERSTATE_MASK (0x3 << 0)
/*
- * Used by PM_EMU_PWRSTST, PM_CORE_PWRSTST, PM_CAM_PWRSTST, PM_L3INIT_PWRSTST,
- * PM_ABE_PWRSTST, PM_GFX_PWRSTST, PM_MPU_PWRSTST, PM_CEFUSE_PWRSTST,
- * PM_DSS_PWRSTST, PM_L4PER_PWRSTST, PM_TESLA_PWRSTST, PM_IVAHD_PWRSTST
+ * Used by PM_ABE_PWRSTST, PM_CAM_PWRSTST, PM_CEFUSE_PWRSTST, PM_CORE_PWRSTST,
+ * PM_DSS_PWRSTST, PM_EMU_PWRSTST, PM_GFX_PWRSTST, PM_IVAHD_PWRSTST,
+ * PM_L3INIT_PWRSTST, PM_L4PER_PWRSTST, PM_MPU_PWRSTST, PM_TESLA_PWRSTST
*/
#define OMAP4430_POWERSTATEST_SHIFT 0
-#define OMAP4430_POWERSTATEST_MASK BITFIELD(0, 1)
+#define OMAP4430_POWERSTATEST_MASK (0x3 << 0)
/* Used by PRM_PWRREQCTRL */
#define OMAP4430_PWRREQ_COND_SHIFT 0
-#define OMAP4430_PWRREQ_COND_MASK BITFIELD(0, 1)
+#define OMAP4430_PWRREQ_COND_MASK (0x3 << 0)
/* Used by PRM_VC_CFG_CHANNEL */
#define OMAP4430_RACEN_VDD_CORE_L_SHIFT 3
-#define OMAP4430_RACEN_VDD_CORE_L_MASK BITFIELD(3, 3)
+#define OMAP4430_RACEN_VDD_CORE_L_MASK (1 << 3)
/* Used by PRM_VC_CFG_CHANNEL */
#define OMAP4430_RACEN_VDD_IVA_L_SHIFT 11
-#define OMAP4430_RACEN_VDD_IVA_L_MASK BITFIELD(11, 11)
+#define OMAP4430_RACEN_VDD_IVA_L_MASK (1 << 11)
/* Used by PRM_VC_CFG_CHANNEL */
#define OMAP4430_RACEN_VDD_MPU_L_SHIFT 20
-#define OMAP4430_RACEN_VDD_MPU_L_MASK BITFIELD(20, 20)
+#define OMAP4430_RACEN_VDD_MPU_L_MASK (1 << 20)
/* Used by PRM_VC_CFG_CHANNEL */
#define OMAP4430_RAC_VDD_CORE_L_SHIFT 2
-#define OMAP4430_RAC_VDD_CORE_L_MASK BITFIELD(2, 2)
+#define OMAP4430_RAC_VDD_CORE_L_MASK (1 << 2)
/* Used by PRM_VC_CFG_CHANNEL */
#define OMAP4430_RAC_VDD_IVA_L_SHIFT 10
-#define OMAP4430_RAC_VDD_IVA_L_MASK BITFIELD(10, 10)
+#define OMAP4430_RAC_VDD_IVA_L_MASK (1 << 10)
/* Used by PRM_VC_CFG_CHANNEL */
#define OMAP4430_RAC_VDD_MPU_L_SHIFT 19
-#define OMAP4430_RAC_VDD_MPU_L_MASK BITFIELD(19, 19)
+#define OMAP4430_RAC_VDD_MPU_L_MASK (1 << 19)
/*
* Used by PRM_VOLTSETUP_CORE_OFF, PRM_VOLTSETUP_CORE_RET_SLEEP,
@@ -901,7 +917,7 @@
* PRM_VOLTSETUP_MPU_RET_SLEEP
*/
#define OMAP4430_RAMP_DOWN_COUNT_SHIFT 16
-#define OMAP4430_RAMP_DOWN_COUNT_MASK BITFIELD(16, 21)
+#define OMAP4430_RAMP_DOWN_COUNT_MASK (0x3f << 16)
/*
* Used by PRM_VOLTSETUP_CORE_OFF, PRM_VOLTSETUP_CORE_RET_SLEEP,
@@ -909,7 +925,7 @@
* PRM_VOLTSETUP_MPU_RET_SLEEP
*/
#define OMAP4430_RAMP_DOWN_PRESCAL_SHIFT 24
-#define OMAP4430_RAMP_DOWN_PRESCAL_MASK BITFIELD(24, 25)
+#define OMAP4430_RAMP_DOWN_PRESCAL_MASK (0x3 << 24)
/*
* Used by PRM_VOLTSETUP_CORE_OFF, PRM_VOLTSETUP_CORE_RET_SLEEP,
@@ -917,7 +933,7 @@
* PRM_VOLTSETUP_MPU_RET_SLEEP
*/
#define OMAP4430_RAMP_UP_COUNT_SHIFT 0
-#define OMAP4430_RAMP_UP_COUNT_MASK BITFIELD(0, 5)
+#define OMAP4430_RAMP_UP_COUNT_MASK (0x3f << 0)
/*
* Used by PRM_VOLTSETUP_CORE_OFF, PRM_VOLTSETUP_CORE_RET_SLEEP,
@@ -925,1281 +941,1381 @@
* PRM_VOLTSETUP_MPU_RET_SLEEP
*/
#define OMAP4430_RAMP_UP_PRESCAL_SHIFT 8
-#define OMAP4430_RAMP_UP_PRESCAL_MASK BITFIELD(8, 9)
+#define OMAP4430_RAMP_UP_PRESCAL_MASK (0x3 << 8)
/* Used by PRM_VC_CFG_CHANNEL */
#define OMAP4430_RAV_VDD_CORE_L_SHIFT 1
-#define OMAP4430_RAV_VDD_CORE_L_MASK BITFIELD(1, 1)
+#define OMAP4430_RAV_VDD_CORE_L_MASK (1 << 1)
/* Used by PRM_VC_CFG_CHANNEL */
#define OMAP4430_RAV_VDD_IVA_L_SHIFT 9
-#define OMAP4430_RAV_VDD_IVA_L_MASK BITFIELD(9, 9)
+#define OMAP4430_RAV_VDD_IVA_L_MASK (1 << 9)
/* Used by PRM_VC_CFG_CHANNEL */
#define OMAP4430_RAV_VDD_MPU_L_SHIFT 18
-#define OMAP4430_RAV_VDD_MPU_L_MASK BITFIELD(18, 18)
+#define OMAP4430_RAV_VDD_MPU_L_MASK (1 << 18)
/* Used by PRM_VC_VAL_BYPASS */
#define OMAP4430_REGADDR_SHIFT 8
-#define OMAP4430_REGADDR_MASK BITFIELD(8, 15)
+#define OMAP4430_REGADDR_MASK (0xff << 8)
/*
* Used by PRM_VC_VAL_CMD_VDD_CORE_L, PRM_VC_VAL_CMD_VDD_IVA_L,
* PRM_VC_VAL_CMD_VDD_MPU_L
*/
#define OMAP4430_RET_SHIFT 8
-#define OMAP4430_RET_MASK BITFIELD(8, 15)
+#define OMAP4430_RET_MASK (0xff << 8)
/* Used by PM_L4PER_PWRSTCTRL */
#define OMAP4430_RETAINED_BANK_ONSTATE_SHIFT 16
-#define OMAP4430_RETAINED_BANK_ONSTATE_MASK BITFIELD(16, 17)
+#define OMAP4430_RETAINED_BANK_ONSTATE_MASK (0x3 << 16)
/* Used by PM_L4PER_PWRSTCTRL */
#define OMAP4430_RETAINED_BANK_RETSTATE_SHIFT 8
-#define OMAP4430_RETAINED_BANK_RETSTATE_MASK BITFIELD(8, 8)
+#define OMAP4430_RETAINED_BANK_RETSTATE_MASK (1 << 8)
/* Used by PM_L4PER_PWRSTST */
#define OMAP4430_RETAINED_BANK_STATEST_SHIFT 4
-#define OMAP4430_RETAINED_BANK_STATEST_MASK BITFIELD(4, 5)
+#define OMAP4430_RETAINED_BANK_STATEST_MASK (0x3 << 4)
/*
* Used by PRM_LDO_SRAM_CORE_CTRL, PRM_LDO_SRAM_IVA_CTRL,
* PRM_LDO_SRAM_MPU_CTRL
*/
#define OMAP4430_RETMODE_ENABLE_SHIFT 0
-#define OMAP4430_RETMODE_ENABLE_MASK BITFIELD(0, 0)
+#define OMAP4430_RETMODE_ENABLE_MASK (1 << 0)
-/* Used by REVISION_PRM */
-#define OMAP4430_REV_SHIFT 0
-#define OMAP4430_REV_MASK BITFIELD(0, 7)
-
-/* Used by RM_DUCATI_RSTCTRL, RM_TESLA_RSTCTRL, RM_IVAHD_RSTCTRL */
+/* Used by RM_DUCATI_RSTCTRL, RM_IVAHD_RSTCTRL, RM_TESLA_RSTCTRL */
#define OMAP4430_RST1_SHIFT 0
-#define OMAP4430_RST1_MASK BITFIELD(0, 0)
+#define OMAP4430_RST1_MASK (1 << 0)
-/* Used by RM_DUCATI_RSTST, RM_TESLA_RSTST, RM_IVAHD_RSTST */
+/* Used by RM_DUCATI_RSTST, RM_IVAHD_RSTST, RM_TESLA_RSTST */
#define OMAP4430_RST1ST_SHIFT 0
-#define OMAP4430_RST1ST_MASK BITFIELD(0, 0)
+#define OMAP4430_RST1ST_MASK (1 << 0)
-/* Used by RM_DUCATI_RSTCTRL, RM_TESLA_RSTCTRL, RM_IVAHD_RSTCTRL */
+/* Used by RM_DUCATI_RSTCTRL, RM_IVAHD_RSTCTRL, RM_TESLA_RSTCTRL */
#define OMAP4430_RST2_SHIFT 1
-#define OMAP4430_RST2_MASK BITFIELD(1, 1)
+#define OMAP4430_RST2_MASK (1 << 1)
-/* Used by RM_DUCATI_RSTST, RM_TESLA_RSTST, RM_IVAHD_RSTST */
+/* Used by RM_DUCATI_RSTST, RM_IVAHD_RSTST, RM_TESLA_RSTST */
#define OMAP4430_RST2ST_SHIFT 1
-#define OMAP4430_RST2ST_MASK BITFIELD(1, 1)
+#define OMAP4430_RST2ST_MASK (1 << 1)
/* Used by RM_DUCATI_RSTCTRL, RM_IVAHD_RSTCTRL */
#define OMAP4430_RST3_SHIFT 2
-#define OMAP4430_RST3_MASK BITFIELD(2, 2)
+#define OMAP4430_RST3_MASK (1 << 2)
/* Used by RM_DUCATI_RSTST, RM_IVAHD_RSTST */
#define OMAP4430_RST3ST_SHIFT 2
-#define OMAP4430_RST3ST_MASK BITFIELD(2, 2)
+#define OMAP4430_RST3ST_MASK (1 << 2)
/* Used by PRM_RSTTIME */
#define OMAP4430_RSTTIME1_SHIFT 0
-#define OMAP4430_RSTTIME1_MASK BITFIELD(0, 9)
+#define OMAP4430_RSTTIME1_MASK (0x3ff << 0)
/* Used by PRM_RSTTIME */
#define OMAP4430_RSTTIME2_SHIFT 10
-#define OMAP4430_RSTTIME2_MASK BITFIELD(10, 14)
+#define OMAP4430_RSTTIME2_MASK (0x1f << 10)
/* Used by PRM_RSTCTRL */
#define OMAP4430_RST_GLOBAL_COLD_SW_SHIFT 1
-#define OMAP4430_RST_GLOBAL_COLD_SW_MASK BITFIELD(1, 1)
+#define OMAP4430_RST_GLOBAL_COLD_SW_MASK (1 << 1)
/* Used by PRM_RSTCTRL */
#define OMAP4430_RST_GLOBAL_WARM_SW_SHIFT 0
-#define OMAP4430_RST_GLOBAL_WARM_SW_MASK BITFIELD(0, 0)
+#define OMAP4430_RST_GLOBAL_WARM_SW_MASK (1 << 0)
+
+/* Used by REVISION_PRM */
+#define OMAP4430_R_RTL_SHIFT 11
+#define OMAP4430_R_RTL_MASK (0x1f << 11)
/* Used by PRM_VC_CFG_CHANNEL */
#define OMAP4430_SA_VDD_CORE_L_SHIFT 0
-#define OMAP4430_SA_VDD_CORE_L_MASK BITFIELD(0, 0)
+#define OMAP4430_SA_VDD_CORE_L_MASK (1 << 0)
/* Renamed from SA_VDD_CORE_L Used by PRM_VC_SMPS_SA */
#define OMAP4430_SA_VDD_CORE_L_0_6_SHIFT 0
-#define OMAP4430_SA_VDD_CORE_L_0_6_MASK BITFIELD(0, 6)
+#define OMAP4430_SA_VDD_CORE_L_0_6_MASK (0x7f << 0)
/* Used by PRM_VC_CFG_CHANNEL */
#define OMAP4430_SA_VDD_IVA_L_SHIFT 8
-#define OMAP4430_SA_VDD_IVA_L_MASK BITFIELD(8, 8)
+#define OMAP4430_SA_VDD_IVA_L_MASK (1 << 8)
/* Renamed from SA_VDD_IVA_L Used by PRM_VC_SMPS_SA */
#define OMAP4430_SA_VDD_IVA_L_PRM_VC_SMPS_SA_SHIFT 8
-#define OMAP4430_SA_VDD_IVA_L_PRM_VC_SMPS_SA_MASK BITFIELD(8, 14)
+#define OMAP4430_SA_VDD_IVA_L_PRM_VC_SMPS_SA_MASK (0x7f << 8)
/* Used by PRM_VC_CFG_CHANNEL */
#define OMAP4430_SA_VDD_MPU_L_SHIFT 16
-#define OMAP4430_SA_VDD_MPU_L_MASK BITFIELD(16, 16)
+#define OMAP4430_SA_VDD_MPU_L_MASK (1 << 16)
/* Renamed from SA_VDD_MPU_L Used by PRM_VC_SMPS_SA */
#define OMAP4430_SA_VDD_MPU_L_PRM_VC_SMPS_SA_SHIFT 16
-#define OMAP4430_SA_VDD_MPU_L_PRM_VC_SMPS_SA_MASK BITFIELD(16, 22)
+#define OMAP4430_SA_VDD_MPU_L_PRM_VC_SMPS_SA_MASK (0x7f << 16)
+
+/* Used by REVISION_PRM */
+#define OMAP4430_SCHEME_SHIFT 30
+#define OMAP4430_SCHEME_MASK (0x3 << 30)
/* Used by PRM_VC_CFG_I2C_CLK */
#define OMAP4430_SCLH_SHIFT 0
-#define OMAP4430_SCLH_MASK BITFIELD(0, 7)
+#define OMAP4430_SCLH_MASK (0xff << 0)
/* Used by PRM_VC_CFG_I2C_CLK */
#define OMAP4430_SCLL_SHIFT 8
-#define OMAP4430_SCLL_MASK BITFIELD(8, 15)
+#define OMAP4430_SCLL_MASK (0xff << 8)
/* Used by PRM_RSTST */
#define OMAP4430_SECURE_WDT_RST_SHIFT 4
-#define OMAP4430_SECURE_WDT_RST_MASK BITFIELD(4, 4)
+#define OMAP4430_SECURE_WDT_RST_MASK (1 << 4)
/* Used by PM_IVAHD_PWRSTCTRL */
#define OMAP4430_SL2_MEM_ONSTATE_SHIFT 18
-#define OMAP4430_SL2_MEM_ONSTATE_MASK BITFIELD(18, 19)
+#define OMAP4430_SL2_MEM_ONSTATE_MASK (0x3 << 18)
/* Used by PM_IVAHD_PWRSTCTRL */
#define OMAP4430_SL2_MEM_RETSTATE_SHIFT 9
-#define OMAP4430_SL2_MEM_RETSTATE_MASK BITFIELD(9, 9)
+#define OMAP4430_SL2_MEM_RETSTATE_MASK (1 << 9)
/* Used by PM_IVAHD_PWRSTST */
#define OMAP4430_SL2_MEM_STATEST_SHIFT 6
-#define OMAP4430_SL2_MEM_STATEST_MASK BITFIELD(6, 7)
+#define OMAP4430_SL2_MEM_STATEST_MASK (0x3 << 6)
/* Used by PRM_VC_VAL_BYPASS */
#define OMAP4430_SLAVEADDR_SHIFT 0
-#define OMAP4430_SLAVEADDR_MASK BITFIELD(0, 6)
+#define OMAP4430_SLAVEADDR_MASK (0x7f << 0)
/* Used by PRM_LDO_ABB_IVA_SETUP, PRM_LDO_ABB_MPU_SETUP */
#define OMAP4430_SLEEP_RBB_SEL_SHIFT 3
-#define OMAP4430_SLEEP_RBB_SEL_MASK BITFIELD(3, 3)
+#define OMAP4430_SLEEP_RBB_SEL_MASK (1 << 3)
/* Used by PRM_SRAM_COUNT */
#define OMAP4430_SLPCNT_VALUE_SHIFT 16
-#define OMAP4430_SLPCNT_VALUE_MASK BITFIELD(16, 23)
+#define OMAP4430_SLPCNT_VALUE_MASK (0xff << 16)
/* Used by PRM_VP_CORE_VSTEPMAX, PRM_VP_IVA_VSTEPMAX, PRM_VP_MPU_VSTEPMAX */
#define OMAP4430_SMPSWAITTIMEMAX_SHIFT 8
-#define OMAP4430_SMPSWAITTIMEMAX_MASK BITFIELD(8, 23)
+#define OMAP4430_SMPSWAITTIMEMAX_MASK (0xffff << 8)
/* Used by PRM_VP_CORE_VSTEPMIN, PRM_VP_IVA_VSTEPMIN, PRM_VP_MPU_VSTEPMIN */
#define OMAP4430_SMPSWAITTIMEMIN_SHIFT 8
-#define OMAP4430_SMPSWAITTIMEMIN_MASK BITFIELD(8, 23)
+#define OMAP4430_SMPSWAITTIMEMIN_MASK (0xffff << 8)
+
+/* Used by PRM_VC_ERRST */
+#define OMAP4430_SMPS_RA_ERR_CORE_SHIFT 1
+#define OMAP4430_SMPS_RA_ERR_CORE_MASK (1 << 1)
+
+/* Used by PRM_VC_ERRST */
+#define OMAP4430_SMPS_RA_ERR_IVA_SHIFT 9
+#define OMAP4430_SMPS_RA_ERR_IVA_MASK (1 << 9)
+
+/* Used by PRM_VC_ERRST */
+#define OMAP4430_SMPS_RA_ERR_MPU_SHIFT 17
+#define OMAP4430_SMPS_RA_ERR_MPU_MASK (1 << 17)
+
+/* Used by PRM_VC_ERRST */
+#define OMAP4430_SMPS_SA_ERR_CORE_SHIFT 0
+#define OMAP4430_SMPS_SA_ERR_CORE_MASK (1 << 0)
+
+/* Used by PRM_VC_ERRST */
+#define OMAP4430_SMPS_SA_ERR_IVA_SHIFT 8
+#define OMAP4430_SMPS_SA_ERR_IVA_MASK (1 << 8)
+
+/* Used by PRM_VC_ERRST */
+#define OMAP4430_SMPS_SA_ERR_MPU_SHIFT 16
+#define OMAP4430_SMPS_SA_ERR_MPU_MASK (1 << 16)
+
+/* Used by PRM_VC_ERRST */
+#define OMAP4430_SMPS_TIMEOUT_ERR_CORE_SHIFT 2
+#define OMAP4430_SMPS_TIMEOUT_ERR_CORE_MASK (1 << 2)
+
+/* Used by PRM_VC_ERRST */
+#define OMAP4430_SMPS_TIMEOUT_ERR_IVA_SHIFT 10
+#define OMAP4430_SMPS_TIMEOUT_ERR_IVA_MASK (1 << 10)
+
+/* Used by PRM_VC_ERRST */
+#define OMAP4430_SMPS_TIMEOUT_ERR_MPU_SHIFT 18
+#define OMAP4430_SMPS_TIMEOUT_ERR_MPU_MASK (1 << 18)
/* Used by PRM_LDO_ABB_IVA_SETUP, PRM_LDO_ABB_MPU_SETUP */
#define OMAP4430_SR2EN_SHIFT 0
-#define OMAP4430_SR2EN_MASK BITFIELD(0, 0)
+#define OMAP4430_SR2EN_MASK (1 << 0)
/* Used by PRM_LDO_ABB_IVA_CTRL, PRM_LDO_ABB_MPU_CTRL */
#define OMAP4430_SR2_IN_TRANSITION_SHIFT 6
-#define OMAP4430_SR2_IN_TRANSITION_MASK BITFIELD(6, 6)
+#define OMAP4430_SR2_IN_TRANSITION_MASK (1 << 6)
/* Used by PRM_LDO_ABB_IVA_CTRL, PRM_LDO_ABB_MPU_CTRL */
#define OMAP4430_SR2_STATUS_SHIFT 3
-#define OMAP4430_SR2_STATUS_MASK BITFIELD(3, 4)
+#define OMAP4430_SR2_STATUS_MASK (0x3 << 3)
/* Used by PRM_LDO_ABB_IVA_SETUP, PRM_LDO_ABB_MPU_SETUP */
#define OMAP4430_SR2_WTCNT_VALUE_SHIFT 8
-#define OMAP4430_SR2_WTCNT_VALUE_MASK BITFIELD(8, 15)
+#define OMAP4430_SR2_WTCNT_VALUE_MASK (0xff << 8)
/*
* Used by PRM_LDO_SRAM_CORE_CTRL, PRM_LDO_SRAM_IVA_CTRL,
* PRM_LDO_SRAM_MPU_CTRL
*/
#define OMAP4430_SRAMLDO_STATUS_SHIFT 8
-#define OMAP4430_SRAMLDO_STATUS_MASK BITFIELD(8, 8)
+#define OMAP4430_SRAMLDO_STATUS_MASK (1 << 8)
/*
* Used by PRM_LDO_SRAM_CORE_CTRL, PRM_LDO_SRAM_IVA_CTRL,
* PRM_LDO_SRAM_MPU_CTRL
*/
#define OMAP4430_SRAM_IN_TRANSITION_SHIFT 9
-#define OMAP4430_SRAM_IN_TRANSITION_MASK BITFIELD(9, 9)
+#define OMAP4430_SRAM_IN_TRANSITION_MASK (1 << 9)
/* Used by PRM_VC_CFG_I2C_MODE */
#define OMAP4430_SRMODEEN_SHIFT 4
-#define OMAP4430_SRMODEEN_MASK BITFIELD(4, 4)
+#define OMAP4430_SRMODEEN_MASK (1 << 4)
/* Used by PRM_VOLTSETUP_WARMRESET */
#define OMAP4430_STABLE_COUNT_SHIFT 0
-#define OMAP4430_STABLE_COUNT_MASK BITFIELD(0, 5)
+#define OMAP4430_STABLE_COUNT_MASK (0x3f << 0)
/* Used by PRM_VOLTSETUP_WARMRESET */
#define OMAP4430_STABLE_PRESCAL_SHIFT 8
-#define OMAP4430_STABLE_PRESCAL_MASK BITFIELD(8, 9)
+#define OMAP4430_STABLE_PRESCAL_MASK (0x3 << 8)
+
+/* Used by PRM_LDO_BANDGAP_SETUP */
+#define OMAP4430_STARTUP_COUNT_SHIFT 0
+#define OMAP4430_STARTUP_COUNT_MASK (0xff << 0)
+
+/* Renamed from STARTUP_COUNT Used by PRM_SRAM_COUNT */
+#define OMAP4430_STARTUP_COUNT_24_31_SHIFT 24
+#define OMAP4430_STARTUP_COUNT_24_31_MASK (0xff << 24)
/* Used by PM_IVAHD_PWRSTCTRL */
#define OMAP4430_TCM1_MEM_ONSTATE_SHIFT 20
-#define OMAP4430_TCM1_MEM_ONSTATE_MASK BITFIELD(20, 21)
+#define OMAP4430_TCM1_MEM_ONSTATE_MASK (0x3 << 20)
/* Used by PM_IVAHD_PWRSTCTRL */
#define OMAP4430_TCM1_MEM_RETSTATE_SHIFT 10
-#define OMAP4430_TCM1_MEM_RETSTATE_MASK BITFIELD(10, 10)
+#define OMAP4430_TCM1_MEM_RETSTATE_MASK (1 << 10)
/* Used by PM_IVAHD_PWRSTST */
#define OMAP4430_TCM1_MEM_STATEST_SHIFT 8
-#define OMAP4430_TCM1_MEM_STATEST_MASK BITFIELD(8, 9)
+#define OMAP4430_TCM1_MEM_STATEST_MASK (0x3 << 8)
/* Used by PM_IVAHD_PWRSTCTRL */
#define OMAP4430_TCM2_MEM_ONSTATE_SHIFT 22
-#define OMAP4430_TCM2_MEM_ONSTATE_MASK BITFIELD(22, 23)
+#define OMAP4430_TCM2_MEM_ONSTATE_MASK (0x3 << 22)
/* Used by PM_IVAHD_PWRSTCTRL */
#define OMAP4430_TCM2_MEM_RETSTATE_SHIFT 11
-#define OMAP4430_TCM2_MEM_RETSTATE_MASK BITFIELD(11, 11)
+#define OMAP4430_TCM2_MEM_RETSTATE_MASK (1 << 11)
/* Used by PM_IVAHD_PWRSTST */
#define OMAP4430_TCM2_MEM_STATEST_SHIFT 10
-#define OMAP4430_TCM2_MEM_STATEST_MASK BITFIELD(10, 11)
+#define OMAP4430_TCM2_MEM_STATEST_MASK (0x3 << 10)
/* Used by RM_TESLA_RSTST */
#define OMAP4430_TESLASS_EMU_RSTST_SHIFT 2
-#define OMAP4430_TESLASS_EMU_RSTST_MASK BITFIELD(2, 2)
+#define OMAP4430_TESLASS_EMU_RSTST_MASK (1 << 2)
/* Used by RM_TESLA_RSTST */
#define OMAP4430_TESLA_DSP_EMU_REQ_RSTST_SHIFT 3
-#define OMAP4430_TESLA_DSP_EMU_REQ_RSTST_MASK BITFIELD(3, 3)
+#define OMAP4430_TESLA_DSP_EMU_REQ_RSTST_MASK (1 << 3)
/* Used by PM_TESLA_PWRSTCTRL */
#define OMAP4430_TESLA_EDMA_ONSTATE_SHIFT 20
-#define OMAP4430_TESLA_EDMA_ONSTATE_MASK BITFIELD(20, 21)
+#define OMAP4430_TESLA_EDMA_ONSTATE_MASK (0x3 << 20)
/* Used by PM_TESLA_PWRSTCTRL */
#define OMAP4430_TESLA_EDMA_RETSTATE_SHIFT 10
-#define OMAP4430_TESLA_EDMA_RETSTATE_MASK BITFIELD(10, 10)
+#define OMAP4430_TESLA_EDMA_RETSTATE_MASK (1 << 10)
/* Used by PM_TESLA_PWRSTST */
#define OMAP4430_TESLA_EDMA_STATEST_SHIFT 8
-#define OMAP4430_TESLA_EDMA_STATEST_MASK BITFIELD(8, 9)
+#define OMAP4430_TESLA_EDMA_STATEST_MASK (0x3 << 8)
/* Used by PM_TESLA_PWRSTCTRL */
#define OMAP4430_TESLA_L1_ONSTATE_SHIFT 16
-#define OMAP4430_TESLA_L1_ONSTATE_MASK BITFIELD(16, 17)
+#define OMAP4430_TESLA_L1_ONSTATE_MASK (0x3 << 16)
/* Used by PM_TESLA_PWRSTCTRL */
#define OMAP4430_TESLA_L1_RETSTATE_SHIFT 8
-#define OMAP4430_TESLA_L1_RETSTATE_MASK BITFIELD(8, 8)
+#define OMAP4430_TESLA_L1_RETSTATE_MASK (1 << 8)
/* Used by PM_TESLA_PWRSTST */
#define OMAP4430_TESLA_L1_STATEST_SHIFT 4
-#define OMAP4430_TESLA_L1_STATEST_MASK BITFIELD(4, 5)
+#define OMAP4430_TESLA_L1_STATEST_MASK (0x3 << 4)
/* Used by PM_TESLA_PWRSTCTRL */
#define OMAP4430_TESLA_L2_ONSTATE_SHIFT 18
-#define OMAP4430_TESLA_L2_ONSTATE_MASK BITFIELD(18, 19)
+#define OMAP4430_TESLA_L2_ONSTATE_MASK (0x3 << 18)
/* Used by PM_TESLA_PWRSTCTRL */
#define OMAP4430_TESLA_L2_RETSTATE_SHIFT 9
-#define OMAP4430_TESLA_L2_RETSTATE_MASK BITFIELD(9, 9)
+#define OMAP4430_TESLA_L2_RETSTATE_MASK (1 << 9)
/* Used by PM_TESLA_PWRSTST */
#define OMAP4430_TESLA_L2_STATEST_SHIFT 6
-#define OMAP4430_TESLA_L2_STATEST_MASK BITFIELD(6, 7)
+#define OMAP4430_TESLA_L2_STATEST_MASK (0x3 << 6)
/* Used by PRM_VP_CORE_VLIMITTO, PRM_VP_IVA_VLIMITTO, PRM_VP_MPU_VLIMITTO */
#define OMAP4430_TIMEOUT_SHIFT 0
-#define OMAP4430_TIMEOUT_MASK BITFIELD(0, 15)
+#define OMAP4430_TIMEOUT_MASK (0xffff << 0)
/* Used by PRM_VP_CORE_CONFIG, PRM_VP_IVA_CONFIG, PRM_VP_MPU_CONFIG */
#define OMAP4430_TIMEOUTEN_SHIFT 3
-#define OMAP4430_TIMEOUTEN_MASK BITFIELD(3, 3)
+#define OMAP4430_TIMEOUTEN_MASK (1 << 3)
/* Used by PRM_IRQENABLE_DUCATI, PRM_IRQENABLE_MPU */
#define OMAP4430_TRANSITION_EN_SHIFT 8
-#define OMAP4430_TRANSITION_EN_MASK BITFIELD(8, 8)
+#define OMAP4430_TRANSITION_EN_MASK (1 << 8)
/* Used by PRM_IRQSTATUS_DUCATI, PRM_IRQSTATUS_MPU */
#define OMAP4430_TRANSITION_ST_SHIFT 8
-#define OMAP4430_TRANSITION_ST_MASK BITFIELD(8, 8)
+#define OMAP4430_TRANSITION_ST_MASK (1 << 8)
/* Used by PRM_VC_VAL_BYPASS */
#define OMAP4430_VALID_SHIFT 24
-#define OMAP4430_VALID_MASK BITFIELD(24, 24)
+#define OMAP4430_VALID_MASK (1 << 24)
/* Used by PRM_IRQENABLE_DUCATI, PRM_IRQENABLE_MPU */
#define OMAP4430_VC_BYPASSACK_EN_SHIFT 14
-#define OMAP4430_VC_BYPASSACK_EN_MASK BITFIELD(14, 14)
+#define OMAP4430_VC_BYPASSACK_EN_MASK (1 << 14)
/* Used by PRM_IRQSTATUS_DUCATI, PRM_IRQSTATUS_MPU */
#define OMAP4430_VC_BYPASSACK_ST_SHIFT 14
-#define OMAP4430_VC_BYPASSACK_ST_MASK BITFIELD(14, 14)
+#define OMAP4430_VC_BYPASSACK_ST_MASK (1 << 14)
+
+/* Used by PRM_IRQENABLE_DUCATI, PRM_IRQENABLE_MPU */
+#define OMAP4430_VC_CORE_VPACK_EN_SHIFT 22
+#define OMAP4430_VC_CORE_VPACK_EN_MASK (1 << 22)
+
+/* Used by PRM_IRQSTATUS_DUCATI, PRM_IRQSTATUS_MPU */
+#define OMAP4430_VC_CORE_VPACK_ST_SHIFT 22
+#define OMAP4430_VC_CORE_VPACK_ST_MASK (1 << 22)
/* Used by PRM_IRQENABLE_DUCATI, PRM_IRQENABLE_MPU */
#define OMAP4430_VC_IVA_VPACK_EN_SHIFT 30
-#define OMAP4430_VC_IVA_VPACK_EN_MASK BITFIELD(30, 30)
+#define OMAP4430_VC_IVA_VPACK_EN_MASK (1 << 30)
/* Used by PRM_IRQSTATUS_DUCATI, PRM_IRQSTATUS_MPU */
#define OMAP4430_VC_IVA_VPACK_ST_SHIFT 30
-#define OMAP4430_VC_IVA_VPACK_ST_MASK BITFIELD(30, 30)
+#define OMAP4430_VC_IVA_VPACK_ST_MASK (1 << 30)
/* Used by PRM_IRQENABLE_MPU_2 */
#define OMAP4430_VC_MPU_VPACK_EN_SHIFT 6
-#define OMAP4430_VC_MPU_VPACK_EN_MASK BITFIELD(6, 6)
+#define OMAP4430_VC_MPU_VPACK_EN_MASK (1 << 6)
/* Used by PRM_IRQSTATUS_MPU_2 */
#define OMAP4430_VC_MPU_VPACK_ST_SHIFT 6
-#define OMAP4430_VC_MPU_VPACK_ST_MASK BITFIELD(6, 6)
+#define OMAP4430_VC_MPU_VPACK_ST_MASK (1 << 6)
/* Used by PRM_IRQENABLE_DUCATI, PRM_IRQENABLE_MPU */
#define OMAP4430_VC_RAERR_EN_SHIFT 12
-#define OMAP4430_VC_RAERR_EN_MASK BITFIELD(12, 12)
+#define OMAP4430_VC_RAERR_EN_MASK (1 << 12)
/* Used by PRM_IRQSTATUS_DUCATI, PRM_IRQSTATUS_MPU */
#define OMAP4430_VC_RAERR_ST_SHIFT 12
-#define OMAP4430_VC_RAERR_ST_MASK BITFIELD(12, 12)
+#define OMAP4430_VC_RAERR_ST_MASK (1 << 12)
/* Used by PRM_IRQENABLE_DUCATI, PRM_IRQENABLE_MPU */
#define OMAP4430_VC_SAERR_EN_SHIFT 11
-#define OMAP4430_VC_SAERR_EN_MASK BITFIELD(11, 11)
+#define OMAP4430_VC_SAERR_EN_MASK (1 << 11)
/* Used by PRM_IRQSTATUS_DUCATI, PRM_IRQSTATUS_MPU */
#define OMAP4430_VC_SAERR_ST_SHIFT 11
-#define OMAP4430_VC_SAERR_ST_MASK BITFIELD(11, 11)
+#define OMAP4430_VC_SAERR_ST_MASK (1 << 11)
/* Used by PRM_IRQENABLE_DUCATI, PRM_IRQENABLE_MPU */
#define OMAP4430_VC_TOERR_EN_SHIFT 13
-#define OMAP4430_VC_TOERR_EN_MASK BITFIELD(13, 13)
+#define OMAP4430_VC_TOERR_EN_MASK (1 << 13)
/* Used by PRM_IRQSTATUS_DUCATI, PRM_IRQSTATUS_MPU */
#define OMAP4430_VC_TOERR_ST_SHIFT 13
-#define OMAP4430_VC_TOERR_ST_MASK BITFIELD(13, 13)
+#define OMAP4430_VC_TOERR_ST_MASK (1 << 13)
/* Used by PRM_VP_CORE_VLIMITTO, PRM_VP_IVA_VLIMITTO, PRM_VP_MPU_VLIMITTO */
#define OMAP4430_VDDMAX_SHIFT 24
-#define OMAP4430_VDDMAX_MASK BITFIELD(24, 31)
+#define OMAP4430_VDDMAX_MASK (0xff << 24)
/* Used by PRM_VP_CORE_VLIMITTO, PRM_VP_IVA_VLIMITTO, PRM_VP_MPU_VLIMITTO */
#define OMAP4430_VDDMIN_SHIFT 16
-#define OMAP4430_VDDMIN_MASK BITFIELD(16, 23)
+#define OMAP4430_VDDMIN_MASK (0xff << 16)
/* Used by PRM_VOLTCTRL */
#define OMAP4430_VDD_CORE_I2C_DISABLE_SHIFT 12
-#define OMAP4430_VDD_CORE_I2C_DISABLE_MASK BITFIELD(12, 12)
+#define OMAP4430_VDD_CORE_I2C_DISABLE_MASK (1 << 12)
/* Used by PRM_RSTST */
#define OMAP4430_VDD_CORE_VOLT_MGR_RST_SHIFT 8
-#define OMAP4430_VDD_CORE_VOLT_MGR_RST_MASK BITFIELD(8, 8)
+#define OMAP4430_VDD_CORE_VOLT_MGR_RST_MASK (1 << 8)
/* Used by PRM_VOLTCTRL */
#define OMAP4430_VDD_IVA_I2C_DISABLE_SHIFT 14
-#define OMAP4430_VDD_IVA_I2C_DISABLE_MASK BITFIELD(14, 14)
+#define OMAP4430_VDD_IVA_I2C_DISABLE_MASK (1 << 14)
/* Used by PRM_VOLTCTRL */
#define OMAP4430_VDD_IVA_PRESENCE_SHIFT 9
-#define OMAP4430_VDD_IVA_PRESENCE_MASK BITFIELD(9, 9)
+#define OMAP4430_VDD_IVA_PRESENCE_MASK (1 << 9)
/* Used by PRM_RSTST */
#define OMAP4430_VDD_IVA_VOLT_MGR_RST_SHIFT 7
-#define OMAP4430_VDD_IVA_VOLT_MGR_RST_MASK BITFIELD(7, 7)
+#define OMAP4430_VDD_IVA_VOLT_MGR_RST_MASK (1 << 7)
/* Used by PRM_VOLTCTRL */
#define OMAP4430_VDD_MPU_I2C_DISABLE_SHIFT 13
-#define OMAP4430_VDD_MPU_I2C_DISABLE_MASK BITFIELD(13, 13)
+#define OMAP4430_VDD_MPU_I2C_DISABLE_MASK (1 << 13)
/* Used by PRM_VOLTCTRL */
#define OMAP4430_VDD_MPU_PRESENCE_SHIFT 8
-#define OMAP4430_VDD_MPU_PRESENCE_MASK BITFIELD(8, 8)
+#define OMAP4430_VDD_MPU_PRESENCE_MASK (1 << 8)
/* Used by PRM_RSTST */
#define OMAP4430_VDD_MPU_VOLT_MGR_RST_SHIFT 6
-#define OMAP4430_VDD_MPU_VOLT_MGR_RST_MASK BITFIELD(6, 6)
+#define OMAP4430_VDD_MPU_VOLT_MGR_RST_MASK (1 << 6)
+
+/* Used by PRM_VC_ERRST */
+#define OMAP4430_VFSM_RA_ERR_CORE_SHIFT 4
+#define OMAP4430_VFSM_RA_ERR_CORE_MASK (1 << 4)
+
+/* Used by PRM_VC_ERRST */
+#define OMAP4430_VFSM_RA_ERR_IVA_SHIFT 12
+#define OMAP4430_VFSM_RA_ERR_IVA_MASK (1 << 12)
+
+/* Used by PRM_VC_ERRST */
+#define OMAP4430_VFSM_RA_ERR_MPU_SHIFT 20
+#define OMAP4430_VFSM_RA_ERR_MPU_MASK (1 << 20)
+
+/* Used by PRM_VC_ERRST */
+#define OMAP4430_VFSM_SA_ERR_CORE_SHIFT 3
+#define OMAP4430_VFSM_SA_ERR_CORE_MASK (1 << 3)
+
+/* Used by PRM_VC_ERRST */
+#define OMAP4430_VFSM_SA_ERR_IVA_SHIFT 11
+#define OMAP4430_VFSM_SA_ERR_IVA_MASK (1 << 11)
+
+/* Used by PRM_VC_ERRST */
+#define OMAP4430_VFSM_SA_ERR_MPU_SHIFT 19
+#define OMAP4430_VFSM_SA_ERR_MPU_MASK (1 << 19)
+
+/* Used by PRM_VC_ERRST */
+#define OMAP4430_VFSM_TIMEOUT_ERR_CORE_SHIFT 5
+#define OMAP4430_VFSM_TIMEOUT_ERR_CORE_MASK (1 << 5)
+
+/* Used by PRM_VC_ERRST */
+#define OMAP4430_VFSM_TIMEOUT_ERR_IVA_SHIFT 13
+#define OMAP4430_VFSM_TIMEOUT_ERR_IVA_MASK (1 << 13)
+
+/* Used by PRM_VC_ERRST */
+#define OMAP4430_VFSM_TIMEOUT_ERR_MPU_SHIFT 21
+#define OMAP4430_VFSM_TIMEOUT_ERR_MPU_MASK (1 << 21)
/* Used by PRM_VC_VAL_SMPS_RA_VOL */
#define OMAP4430_VOLRA_VDD_CORE_L_SHIFT 0
-#define OMAP4430_VOLRA_VDD_CORE_L_MASK BITFIELD(0, 7)
+#define OMAP4430_VOLRA_VDD_CORE_L_MASK (0xff << 0)
/* Used by PRM_VC_VAL_SMPS_RA_VOL */
#define OMAP4430_VOLRA_VDD_IVA_L_SHIFT 8
-#define OMAP4430_VOLRA_VDD_IVA_L_MASK BITFIELD(8, 15)
+#define OMAP4430_VOLRA_VDD_IVA_L_MASK (0xff << 8)
/* Used by PRM_VC_VAL_SMPS_RA_VOL */
#define OMAP4430_VOLRA_VDD_MPU_L_SHIFT 16
-#define OMAP4430_VOLRA_VDD_MPU_L_MASK BITFIELD(16, 23)
+#define OMAP4430_VOLRA_VDD_MPU_L_MASK (0xff << 16)
/* Used by PRM_VP_CORE_CONFIG, PRM_VP_IVA_CONFIG, PRM_VP_MPU_CONFIG */
#define OMAP4430_VPENABLE_SHIFT 0
-#define OMAP4430_VPENABLE_MASK BITFIELD(0, 0)
+#define OMAP4430_VPENABLE_MASK (1 << 0)
/* Used by PRM_VP_CORE_STATUS, PRM_VP_IVA_STATUS, PRM_VP_MPU_STATUS */
#define OMAP4430_VPINIDLE_SHIFT 0
-#define OMAP4430_VPINIDLE_MASK BITFIELD(0, 0)
+#define OMAP4430_VPINIDLE_MASK (1 << 0)
/* Used by PRM_VP_CORE_VOLTAGE, PRM_VP_IVA_VOLTAGE, PRM_VP_MPU_VOLTAGE */
#define OMAP4430_VPVOLTAGE_SHIFT 0
-#define OMAP4430_VPVOLTAGE_MASK BITFIELD(0, 7)
+#define OMAP4430_VPVOLTAGE_MASK (0xff << 0)
/* Used by PRM_IRQENABLE_DUCATI, PRM_IRQENABLE_MPU */
#define OMAP4430_VP_CORE_EQVALUE_EN_SHIFT 20
-#define OMAP4430_VP_CORE_EQVALUE_EN_MASK BITFIELD(20, 20)
+#define OMAP4430_VP_CORE_EQVALUE_EN_MASK (1 << 20)
/* Used by PRM_IRQSTATUS_DUCATI, PRM_IRQSTATUS_MPU */
#define OMAP4430_VP_CORE_EQVALUE_ST_SHIFT 20
-#define OMAP4430_VP_CORE_EQVALUE_ST_MASK BITFIELD(20, 20)
+#define OMAP4430_VP_CORE_EQVALUE_ST_MASK (1 << 20)
/* Used by PRM_IRQENABLE_DUCATI, PRM_IRQENABLE_MPU */
#define OMAP4430_VP_CORE_MAXVDD_EN_SHIFT 18
-#define OMAP4430_VP_CORE_MAXVDD_EN_MASK BITFIELD(18, 18)
+#define OMAP4430_VP_CORE_MAXVDD_EN_MASK (1 << 18)
/* Used by PRM_IRQSTATUS_DUCATI, PRM_IRQSTATUS_MPU */
#define OMAP4430_VP_CORE_MAXVDD_ST_SHIFT 18
-#define OMAP4430_VP_CORE_MAXVDD_ST_MASK BITFIELD(18, 18)
+#define OMAP4430_VP_CORE_MAXVDD_ST_MASK (1 << 18)
/* Used by PRM_IRQENABLE_DUCATI, PRM_IRQENABLE_MPU */
#define OMAP4430_VP_CORE_MINVDD_EN_SHIFT 17
-#define OMAP4430_VP_CORE_MINVDD_EN_MASK BITFIELD(17, 17)
+#define OMAP4430_VP_CORE_MINVDD_EN_MASK (1 << 17)
/* Used by PRM_IRQSTATUS_DUCATI, PRM_IRQSTATUS_MPU */
#define OMAP4430_VP_CORE_MINVDD_ST_SHIFT 17
-#define OMAP4430_VP_CORE_MINVDD_ST_MASK BITFIELD(17, 17)
+#define OMAP4430_VP_CORE_MINVDD_ST_MASK (1 << 17)
/* Used by PRM_IRQENABLE_DUCATI, PRM_IRQENABLE_MPU */
#define OMAP4430_VP_CORE_NOSMPSACK_EN_SHIFT 19
-#define OMAP4430_VP_CORE_NOSMPSACK_EN_MASK BITFIELD(19, 19)
+#define OMAP4430_VP_CORE_NOSMPSACK_EN_MASK (1 << 19)
/* Used by PRM_IRQSTATUS_DUCATI, PRM_IRQSTATUS_MPU */
#define OMAP4430_VP_CORE_NOSMPSACK_ST_SHIFT 19
-#define OMAP4430_VP_CORE_NOSMPSACK_ST_MASK BITFIELD(19, 19)
+#define OMAP4430_VP_CORE_NOSMPSACK_ST_MASK (1 << 19)
/* Used by PRM_IRQENABLE_DUCATI, PRM_IRQENABLE_MPU */
#define OMAP4430_VP_CORE_OPPCHANGEDONE_EN_SHIFT 16
-#define OMAP4430_VP_CORE_OPPCHANGEDONE_EN_MASK BITFIELD(16, 16)
+#define OMAP4430_VP_CORE_OPPCHANGEDONE_EN_MASK (1 << 16)
/* Used by PRM_IRQSTATUS_DUCATI, PRM_IRQSTATUS_MPU */
#define OMAP4430_VP_CORE_OPPCHANGEDONE_ST_SHIFT 16
-#define OMAP4430_VP_CORE_OPPCHANGEDONE_ST_MASK BITFIELD(16, 16)
+#define OMAP4430_VP_CORE_OPPCHANGEDONE_ST_MASK (1 << 16)
/* Used by PRM_IRQENABLE_DUCATI, PRM_IRQENABLE_MPU */
#define OMAP4430_VP_CORE_TRANXDONE_EN_SHIFT 21
-#define OMAP4430_VP_CORE_TRANXDONE_EN_MASK BITFIELD(21, 21)
+#define OMAP4430_VP_CORE_TRANXDONE_EN_MASK (1 << 21)
/* Used by PRM_IRQSTATUS_DUCATI, PRM_IRQSTATUS_MPU */
#define OMAP4430_VP_CORE_TRANXDONE_ST_SHIFT 21
-#define OMAP4430_VP_CORE_TRANXDONE_ST_MASK BITFIELD(21, 21)
+#define OMAP4430_VP_CORE_TRANXDONE_ST_MASK (1 << 21)
/* Used by PRM_IRQENABLE_DUCATI, PRM_IRQENABLE_MPU */
#define OMAP4430_VP_IVA_EQVALUE_EN_SHIFT 28
-#define OMAP4430_VP_IVA_EQVALUE_EN_MASK BITFIELD(28, 28)
+#define OMAP4430_VP_IVA_EQVALUE_EN_MASK (1 << 28)
/* Used by PRM_IRQSTATUS_DUCATI, PRM_IRQSTATUS_MPU */
#define OMAP4430_VP_IVA_EQVALUE_ST_SHIFT 28
-#define OMAP4430_VP_IVA_EQVALUE_ST_MASK BITFIELD(28, 28)
+#define OMAP4430_VP_IVA_EQVALUE_ST_MASK (1 << 28)
/* Used by PRM_IRQENABLE_DUCATI, PRM_IRQENABLE_MPU */
#define OMAP4430_VP_IVA_MAXVDD_EN_SHIFT 26
-#define OMAP4430_VP_IVA_MAXVDD_EN_MASK BITFIELD(26, 26)
+#define OMAP4430_VP_IVA_MAXVDD_EN_MASK (1 << 26)
/* Used by PRM_IRQSTATUS_DUCATI, PRM_IRQSTATUS_MPU */
#define OMAP4430_VP_IVA_MAXVDD_ST_SHIFT 26
-#define OMAP4430_VP_IVA_MAXVDD_ST_MASK BITFIELD(26, 26)
+#define OMAP4430_VP_IVA_MAXVDD_ST_MASK (1 << 26)
/* Used by PRM_IRQENABLE_DUCATI, PRM_IRQENABLE_MPU */
#define OMAP4430_VP_IVA_MINVDD_EN_SHIFT 25
-#define OMAP4430_VP_IVA_MINVDD_EN_MASK BITFIELD(25, 25)
+#define OMAP4430_VP_IVA_MINVDD_EN_MASK (1 << 25)
/* Used by PRM_IRQSTATUS_DUCATI, PRM_IRQSTATUS_MPU */
#define OMAP4430_VP_IVA_MINVDD_ST_SHIFT 25
-#define OMAP4430_VP_IVA_MINVDD_ST_MASK BITFIELD(25, 25)
+#define OMAP4430_VP_IVA_MINVDD_ST_MASK (1 << 25)
/* Used by PRM_IRQENABLE_DUCATI, PRM_IRQENABLE_MPU */
#define OMAP4430_VP_IVA_NOSMPSACK_EN_SHIFT 27
-#define OMAP4430_VP_IVA_NOSMPSACK_EN_MASK BITFIELD(27, 27)
+#define OMAP4430_VP_IVA_NOSMPSACK_EN_MASK (1 << 27)
/* Used by PRM_IRQSTATUS_DUCATI, PRM_IRQSTATUS_MPU */
#define OMAP4430_VP_IVA_NOSMPSACK_ST_SHIFT 27
-#define OMAP4430_VP_IVA_NOSMPSACK_ST_MASK BITFIELD(27, 27)
+#define OMAP4430_VP_IVA_NOSMPSACK_ST_MASK (1 << 27)
/* Used by PRM_IRQENABLE_DUCATI, PRM_IRQENABLE_MPU */
#define OMAP4430_VP_IVA_OPPCHANGEDONE_EN_SHIFT 24
-#define OMAP4430_VP_IVA_OPPCHANGEDONE_EN_MASK BITFIELD(24, 24)
+#define OMAP4430_VP_IVA_OPPCHANGEDONE_EN_MASK (1 << 24)
/* Used by PRM_IRQSTATUS_DUCATI, PRM_IRQSTATUS_MPU */
#define OMAP4430_VP_IVA_OPPCHANGEDONE_ST_SHIFT 24
-#define OMAP4430_VP_IVA_OPPCHANGEDONE_ST_MASK BITFIELD(24, 24)
+#define OMAP4430_VP_IVA_OPPCHANGEDONE_ST_MASK (1 << 24)
/* Used by PRM_IRQENABLE_DUCATI, PRM_IRQENABLE_MPU */
#define OMAP4430_VP_IVA_TRANXDONE_EN_SHIFT 29
-#define OMAP4430_VP_IVA_TRANXDONE_EN_MASK BITFIELD(29, 29)
+#define OMAP4430_VP_IVA_TRANXDONE_EN_MASK (1 << 29)
/* Used by PRM_IRQSTATUS_DUCATI, PRM_IRQSTATUS_MPU */
#define OMAP4430_VP_IVA_TRANXDONE_ST_SHIFT 29
-#define OMAP4430_VP_IVA_TRANXDONE_ST_MASK BITFIELD(29, 29)
+#define OMAP4430_VP_IVA_TRANXDONE_ST_MASK (1 << 29)
/* Used by PRM_IRQENABLE_MPU_2 */
#define OMAP4430_VP_MPU_EQVALUE_EN_SHIFT 4
-#define OMAP4430_VP_MPU_EQVALUE_EN_MASK BITFIELD(4, 4)
+#define OMAP4430_VP_MPU_EQVALUE_EN_MASK (1 << 4)
/* Used by PRM_IRQSTATUS_MPU_2 */
#define OMAP4430_VP_MPU_EQVALUE_ST_SHIFT 4
-#define OMAP4430_VP_MPU_EQVALUE_ST_MASK BITFIELD(4, 4)
+#define OMAP4430_VP_MPU_EQVALUE_ST_MASK (1 << 4)
/* Used by PRM_IRQENABLE_MPU_2 */
#define OMAP4430_VP_MPU_MAXVDD_EN_SHIFT 2
-#define OMAP4430_VP_MPU_MAXVDD_EN_MASK BITFIELD(2, 2)
+#define OMAP4430_VP_MPU_MAXVDD_EN_MASK (1 << 2)
/* Used by PRM_IRQSTATUS_MPU_2 */
#define OMAP4430_VP_MPU_MAXVDD_ST_SHIFT 2
-#define OMAP4430_VP_MPU_MAXVDD_ST_MASK BITFIELD(2, 2)
+#define OMAP4430_VP_MPU_MAXVDD_ST_MASK (1 << 2)
/* Used by PRM_IRQENABLE_MPU_2 */
#define OMAP4430_VP_MPU_MINVDD_EN_SHIFT 1
-#define OMAP4430_VP_MPU_MINVDD_EN_MASK BITFIELD(1, 1)
+#define OMAP4430_VP_MPU_MINVDD_EN_MASK (1 << 1)
/* Used by PRM_IRQSTATUS_MPU_2 */
#define OMAP4430_VP_MPU_MINVDD_ST_SHIFT 1
-#define OMAP4430_VP_MPU_MINVDD_ST_MASK BITFIELD(1, 1)
+#define OMAP4430_VP_MPU_MINVDD_ST_MASK (1 << 1)
/* Used by PRM_IRQENABLE_MPU_2 */
#define OMAP4430_VP_MPU_NOSMPSACK_EN_SHIFT 3
-#define OMAP4430_VP_MPU_NOSMPSACK_EN_MASK BITFIELD(3, 3)
+#define OMAP4430_VP_MPU_NOSMPSACK_EN_MASK (1 << 3)
/* Used by PRM_IRQSTATUS_MPU_2 */
#define OMAP4430_VP_MPU_NOSMPSACK_ST_SHIFT 3
-#define OMAP4430_VP_MPU_NOSMPSACK_ST_MASK BITFIELD(3, 3)
+#define OMAP4430_VP_MPU_NOSMPSACK_ST_MASK (1 << 3)
/* Used by PRM_IRQENABLE_MPU_2 */
#define OMAP4430_VP_MPU_OPPCHANGEDONE_EN_SHIFT 0
-#define OMAP4430_VP_MPU_OPPCHANGEDONE_EN_MASK BITFIELD(0, 0)
+#define OMAP4430_VP_MPU_OPPCHANGEDONE_EN_MASK (1 << 0)
/* Used by PRM_IRQSTATUS_MPU_2 */
#define OMAP4430_VP_MPU_OPPCHANGEDONE_ST_SHIFT 0
-#define OMAP4430_VP_MPU_OPPCHANGEDONE_ST_MASK BITFIELD(0, 0)
+#define OMAP4430_VP_MPU_OPPCHANGEDONE_ST_MASK (1 << 0)
/* Used by PRM_IRQENABLE_MPU_2 */
#define OMAP4430_VP_MPU_TRANXDONE_EN_SHIFT 5
-#define OMAP4430_VP_MPU_TRANXDONE_EN_MASK BITFIELD(5, 5)
+#define OMAP4430_VP_MPU_TRANXDONE_EN_MASK (1 << 5)
/* Used by PRM_IRQSTATUS_MPU_2 */
#define OMAP4430_VP_MPU_TRANXDONE_ST_SHIFT 5
-#define OMAP4430_VP_MPU_TRANXDONE_ST_MASK BITFIELD(5, 5)
+#define OMAP4430_VP_MPU_TRANXDONE_ST_MASK (1 << 5)
/* Used by PRM_SRAM_COUNT */
#define OMAP4430_VSETUPCNT_VALUE_SHIFT 8
-#define OMAP4430_VSETUPCNT_VALUE_MASK BITFIELD(8, 15)
+#define OMAP4430_VSETUPCNT_VALUE_MASK (0xff << 8)
/* Used by PRM_VP_CORE_VSTEPMAX, PRM_VP_IVA_VSTEPMAX, PRM_VP_MPU_VSTEPMAX */
#define OMAP4430_VSTEPMAX_SHIFT 0
-#define OMAP4430_VSTEPMAX_MASK BITFIELD(0, 7)
+#define OMAP4430_VSTEPMAX_MASK (0xff << 0)
/* Used by PRM_VP_CORE_VSTEPMIN, PRM_VP_IVA_VSTEPMIN, PRM_VP_MPU_VSTEPMIN */
#define OMAP4430_VSTEPMIN_SHIFT 0
-#define OMAP4430_VSTEPMIN_MASK BITFIELD(0, 7)
+#define OMAP4430_VSTEPMIN_MASK (0xff << 0)
/* Used by PRM_MODEM_IF_CTRL */
#define OMAP4430_WAKE_MODEM_SHIFT 0
-#define OMAP4430_WAKE_MODEM_MASK BITFIELD(0, 0)
+#define OMAP4430_WAKE_MODEM_MASK (1 << 0)
/* Used by PM_DSS_DSS_WKDEP */
#define OMAP4430_WKUPDEP_DISPC_DUCATI_SHIFT 1
-#define OMAP4430_WKUPDEP_DISPC_DUCATI_MASK BITFIELD(1, 1)
+#define OMAP4430_WKUPDEP_DISPC_DUCATI_MASK (1 << 1)
/* Used by PM_DSS_DSS_WKDEP */
#define OMAP4430_WKUPDEP_DISPC_MPU_SHIFT 0
-#define OMAP4430_WKUPDEP_DISPC_MPU_MASK BITFIELD(0, 0)
+#define OMAP4430_WKUPDEP_DISPC_MPU_MASK (1 << 0)
/* Used by PM_DSS_DSS_WKDEP */
#define OMAP4430_WKUPDEP_DISPC_SDMA_SHIFT 3
-#define OMAP4430_WKUPDEP_DISPC_SDMA_MASK BITFIELD(3, 3)
+#define OMAP4430_WKUPDEP_DISPC_SDMA_MASK (1 << 3)
/* Used by PM_DSS_DSS_WKDEP */
#define OMAP4430_WKUPDEP_DISPC_TESLA_SHIFT 2
-#define OMAP4430_WKUPDEP_DISPC_TESLA_MASK BITFIELD(2, 2)
+#define OMAP4430_WKUPDEP_DISPC_TESLA_MASK (1 << 2)
/* Used by PM_ABE_DMIC_WKDEP */
#define OMAP4430_WKUPDEP_DMIC_DMA_SDMA_SHIFT 7
-#define OMAP4430_WKUPDEP_DMIC_DMA_SDMA_MASK BITFIELD(7, 7)
+#define OMAP4430_WKUPDEP_DMIC_DMA_SDMA_MASK (1 << 7)
/* Used by PM_ABE_DMIC_WKDEP */
#define OMAP4430_WKUPDEP_DMIC_DMA_TESLA_SHIFT 6
-#define OMAP4430_WKUPDEP_DMIC_DMA_TESLA_MASK BITFIELD(6, 6)
+#define OMAP4430_WKUPDEP_DMIC_DMA_TESLA_MASK (1 << 6)
/* Used by PM_ABE_DMIC_WKDEP */
#define OMAP4430_WKUPDEP_DMIC_IRQ_MPU_SHIFT 0
-#define OMAP4430_WKUPDEP_DMIC_IRQ_MPU_MASK BITFIELD(0, 0)
+#define OMAP4430_WKUPDEP_DMIC_IRQ_MPU_MASK (1 << 0)
/* Used by PM_ABE_DMIC_WKDEP */
#define OMAP4430_WKUPDEP_DMIC_IRQ_TESLA_SHIFT 2
-#define OMAP4430_WKUPDEP_DMIC_IRQ_TESLA_MASK BITFIELD(2, 2)
+#define OMAP4430_WKUPDEP_DMIC_IRQ_TESLA_MASK (1 << 2)
/* Used by PM_L4PER_DMTIMER10_WKDEP */
#define OMAP4430_WKUPDEP_DMTIMER10_MPU_SHIFT 0
-#define OMAP4430_WKUPDEP_DMTIMER10_MPU_MASK BITFIELD(0, 0)
+#define OMAP4430_WKUPDEP_DMTIMER10_MPU_MASK (1 << 0)
/* Used by PM_L4PER_DMTIMER11_WKDEP */
#define OMAP4430_WKUPDEP_DMTIMER11_DUCATI_SHIFT 1
-#define OMAP4430_WKUPDEP_DMTIMER11_DUCATI_MASK BITFIELD(1, 1)
+#define OMAP4430_WKUPDEP_DMTIMER11_DUCATI_MASK (1 << 1)
/* Used by PM_L4PER_DMTIMER11_WKDEP */
#define OMAP4430_WKUPDEP_DMTIMER11_MPU_SHIFT 0
-#define OMAP4430_WKUPDEP_DMTIMER11_MPU_MASK BITFIELD(0, 0)
+#define OMAP4430_WKUPDEP_DMTIMER11_MPU_MASK (1 << 0)
/* Used by PM_L4PER_DMTIMER2_WKDEP */
#define OMAP4430_WKUPDEP_DMTIMER2_MPU_SHIFT 0
-#define OMAP4430_WKUPDEP_DMTIMER2_MPU_MASK BITFIELD(0, 0)
+#define OMAP4430_WKUPDEP_DMTIMER2_MPU_MASK (1 << 0)
/* Used by PM_L4PER_DMTIMER3_WKDEP */
#define OMAP4430_WKUPDEP_DMTIMER3_DUCATI_SHIFT 1
-#define OMAP4430_WKUPDEP_DMTIMER3_DUCATI_MASK BITFIELD(1, 1)
+#define OMAP4430_WKUPDEP_DMTIMER3_DUCATI_MASK (1 << 1)
/* Used by PM_L4PER_DMTIMER3_WKDEP */
#define OMAP4430_WKUPDEP_DMTIMER3_MPU_SHIFT 0
-#define OMAP4430_WKUPDEP_DMTIMER3_MPU_MASK BITFIELD(0, 0)
+#define OMAP4430_WKUPDEP_DMTIMER3_MPU_MASK (1 << 0)
/* Used by PM_L4PER_DMTIMER4_WKDEP */
#define OMAP4430_WKUPDEP_DMTIMER4_DUCATI_SHIFT 1
-#define OMAP4430_WKUPDEP_DMTIMER4_DUCATI_MASK BITFIELD(1, 1)
+#define OMAP4430_WKUPDEP_DMTIMER4_DUCATI_MASK (1 << 1)
/* Used by PM_L4PER_DMTIMER4_WKDEP */
#define OMAP4430_WKUPDEP_DMTIMER4_MPU_SHIFT 0
-#define OMAP4430_WKUPDEP_DMTIMER4_MPU_MASK BITFIELD(0, 0)
+#define OMAP4430_WKUPDEP_DMTIMER4_MPU_MASK (1 << 0)
/* Used by PM_L4PER_DMTIMER9_WKDEP */
#define OMAP4430_WKUPDEP_DMTIMER9_DUCATI_SHIFT 1
-#define OMAP4430_WKUPDEP_DMTIMER9_DUCATI_MASK BITFIELD(1, 1)
+#define OMAP4430_WKUPDEP_DMTIMER9_DUCATI_MASK (1 << 1)
/* Used by PM_L4PER_DMTIMER9_WKDEP */
#define OMAP4430_WKUPDEP_DMTIMER9_MPU_SHIFT 0
-#define OMAP4430_WKUPDEP_DMTIMER9_MPU_MASK BITFIELD(0, 0)
+#define OMAP4430_WKUPDEP_DMTIMER9_MPU_MASK (1 << 0)
/* Used by PM_DSS_DSS_WKDEP */
#define OMAP4430_WKUPDEP_DSI1_DUCATI_SHIFT 5
-#define OMAP4430_WKUPDEP_DSI1_DUCATI_MASK BITFIELD(5, 5)
+#define OMAP4430_WKUPDEP_DSI1_DUCATI_MASK (1 << 5)
/* Used by PM_DSS_DSS_WKDEP */
#define OMAP4430_WKUPDEP_DSI1_MPU_SHIFT 4
-#define OMAP4430_WKUPDEP_DSI1_MPU_MASK BITFIELD(4, 4)
+#define OMAP4430_WKUPDEP_DSI1_MPU_MASK (1 << 4)
/* Used by PM_DSS_DSS_WKDEP */
#define OMAP4430_WKUPDEP_DSI1_SDMA_SHIFT 7
-#define OMAP4430_WKUPDEP_DSI1_SDMA_MASK BITFIELD(7, 7)
+#define OMAP4430_WKUPDEP_DSI1_SDMA_MASK (1 << 7)
/* Used by PM_DSS_DSS_WKDEP */
#define OMAP4430_WKUPDEP_DSI1_TESLA_SHIFT 6
-#define OMAP4430_WKUPDEP_DSI1_TESLA_MASK BITFIELD(6, 6)
+#define OMAP4430_WKUPDEP_DSI1_TESLA_MASK (1 << 6)
/* Used by PM_DSS_DSS_WKDEP */
#define OMAP4430_WKUPDEP_DSI2_DUCATI_SHIFT 9
-#define OMAP4430_WKUPDEP_DSI2_DUCATI_MASK BITFIELD(9, 9)
+#define OMAP4430_WKUPDEP_DSI2_DUCATI_MASK (1 << 9)
/* Used by PM_DSS_DSS_WKDEP */
#define OMAP4430_WKUPDEP_DSI2_MPU_SHIFT 8
-#define OMAP4430_WKUPDEP_DSI2_MPU_MASK BITFIELD(8, 8)
+#define OMAP4430_WKUPDEP_DSI2_MPU_MASK (1 << 8)
/* Used by PM_DSS_DSS_WKDEP */
#define OMAP4430_WKUPDEP_DSI2_SDMA_SHIFT 11
-#define OMAP4430_WKUPDEP_DSI2_SDMA_MASK BITFIELD(11, 11)
+#define OMAP4430_WKUPDEP_DSI2_SDMA_MASK (1 << 11)
/* Used by PM_DSS_DSS_WKDEP */
#define OMAP4430_WKUPDEP_DSI2_TESLA_SHIFT 10
-#define OMAP4430_WKUPDEP_DSI2_TESLA_MASK BITFIELD(10, 10)
+#define OMAP4430_WKUPDEP_DSI2_TESLA_MASK (1 << 10)
/* Used by PM_WKUP_GPIO1_WKDEP */
#define OMAP4430_WKUPDEP_GPIO1_IRQ1_DUCATI_SHIFT 1
-#define OMAP4430_WKUPDEP_GPIO1_IRQ1_DUCATI_MASK BITFIELD(1, 1)
+#define OMAP4430_WKUPDEP_GPIO1_IRQ1_DUCATI_MASK (1 << 1)
/* Used by PM_WKUP_GPIO1_WKDEP */
#define OMAP4430_WKUPDEP_GPIO1_IRQ1_MPU_SHIFT 0
-#define OMAP4430_WKUPDEP_GPIO1_IRQ1_MPU_MASK BITFIELD(0, 0)
+#define OMAP4430_WKUPDEP_GPIO1_IRQ1_MPU_MASK (1 << 0)
/* Used by PM_WKUP_GPIO1_WKDEP */
#define OMAP4430_WKUPDEP_GPIO1_IRQ2_TESLA_SHIFT 6
-#define OMAP4430_WKUPDEP_GPIO1_IRQ2_TESLA_MASK BITFIELD(6, 6)
+#define OMAP4430_WKUPDEP_GPIO1_IRQ2_TESLA_MASK (1 << 6)
/* Used by PM_L4PER_GPIO2_WKDEP */
#define OMAP4430_WKUPDEP_GPIO2_IRQ1_DUCATI_SHIFT 1
-#define OMAP4430_WKUPDEP_GPIO2_IRQ1_DUCATI_MASK BITFIELD(1, 1)
+#define OMAP4430_WKUPDEP_GPIO2_IRQ1_DUCATI_MASK (1 << 1)
/* Used by PM_L4PER_GPIO2_WKDEP */
#define OMAP4430_WKUPDEP_GPIO2_IRQ1_MPU_SHIFT 0
-#define OMAP4430_WKUPDEP_GPIO2_IRQ1_MPU_MASK BITFIELD(0, 0)
+#define OMAP4430_WKUPDEP_GPIO2_IRQ1_MPU_MASK (1 << 0)
/* Used by PM_L4PER_GPIO2_WKDEP */
#define OMAP4430_WKUPDEP_GPIO2_IRQ2_TESLA_SHIFT 6
-#define OMAP4430_WKUPDEP_GPIO2_IRQ2_TESLA_MASK BITFIELD(6, 6)
+#define OMAP4430_WKUPDEP_GPIO2_IRQ2_TESLA_MASK (1 << 6)
/* Used by PM_L4PER_GPIO3_WKDEP */
#define OMAP4430_WKUPDEP_GPIO3_IRQ1_MPU_SHIFT 0
-#define OMAP4430_WKUPDEP_GPIO3_IRQ1_MPU_MASK BITFIELD(0, 0)
+#define OMAP4430_WKUPDEP_GPIO3_IRQ1_MPU_MASK (1 << 0)
/* Used by PM_L4PER_GPIO3_WKDEP */
#define OMAP4430_WKUPDEP_GPIO3_IRQ2_TESLA_SHIFT 6
-#define OMAP4430_WKUPDEP_GPIO3_IRQ2_TESLA_MASK BITFIELD(6, 6)
+#define OMAP4430_WKUPDEP_GPIO3_IRQ2_TESLA_MASK (1 << 6)
/* Used by PM_L4PER_GPIO4_WKDEP */
#define OMAP4430_WKUPDEP_GPIO4_IRQ1_MPU_SHIFT 0
-#define OMAP4430_WKUPDEP_GPIO4_IRQ1_MPU_MASK BITFIELD(0, 0)
+#define OMAP4430_WKUPDEP_GPIO4_IRQ1_MPU_MASK (1 << 0)
/* Used by PM_L4PER_GPIO4_WKDEP */
#define OMAP4430_WKUPDEP_GPIO4_IRQ2_TESLA_SHIFT 6
-#define OMAP4430_WKUPDEP_GPIO4_IRQ2_TESLA_MASK BITFIELD(6, 6)
+#define OMAP4430_WKUPDEP_GPIO4_IRQ2_TESLA_MASK (1 << 6)
/* Used by PM_L4PER_GPIO5_WKDEP */
#define OMAP4430_WKUPDEP_GPIO5_IRQ1_MPU_SHIFT 0
-#define OMAP4430_WKUPDEP_GPIO5_IRQ1_MPU_MASK BITFIELD(0, 0)
+#define OMAP4430_WKUPDEP_GPIO5_IRQ1_MPU_MASK (1 << 0)
/* Used by PM_L4PER_GPIO5_WKDEP */
#define OMAP4430_WKUPDEP_GPIO5_IRQ2_TESLA_SHIFT 6
-#define OMAP4430_WKUPDEP_GPIO5_IRQ2_TESLA_MASK BITFIELD(6, 6)
+#define OMAP4430_WKUPDEP_GPIO5_IRQ2_TESLA_MASK (1 << 6)
/* Used by PM_L4PER_GPIO6_WKDEP */
#define OMAP4430_WKUPDEP_GPIO6_IRQ1_MPU_SHIFT 0
-#define OMAP4430_WKUPDEP_GPIO6_IRQ1_MPU_MASK BITFIELD(0, 0)
+#define OMAP4430_WKUPDEP_GPIO6_IRQ1_MPU_MASK (1 << 0)
/* Used by PM_L4PER_GPIO6_WKDEP */
#define OMAP4430_WKUPDEP_GPIO6_IRQ2_TESLA_SHIFT 6
-#define OMAP4430_WKUPDEP_GPIO6_IRQ2_TESLA_MASK BITFIELD(6, 6)
+#define OMAP4430_WKUPDEP_GPIO6_IRQ2_TESLA_MASK (1 << 6)
/* Used by PM_DSS_DSS_WKDEP */
#define OMAP4430_WKUPDEP_HDMIDMA_SDMA_SHIFT 19
-#define OMAP4430_WKUPDEP_HDMIDMA_SDMA_MASK BITFIELD(19, 19)
+#define OMAP4430_WKUPDEP_HDMIDMA_SDMA_MASK (1 << 19)
/* Used by PM_DSS_DSS_WKDEP */
#define OMAP4430_WKUPDEP_HDMIIRQ_DUCATI_SHIFT 13
-#define OMAP4430_WKUPDEP_HDMIIRQ_DUCATI_MASK BITFIELD(13, 13)
+#define OMAP4430_WKUPDEP_HDMIIRQ_DUCATI_MASK (1 << 13)
/* Used by PM_DSS_DSS_WKDEP */
#define OMAP4430_WKUPDEP_HDMIIRQ_MPU_SHIFT 12
-#define OMAP4430_WKUPDEP_HDMIIRQ_MPU_MASK BITFIELD(12, 12)
+#define OMAP4430_WKUPDEP_HDMIIRQ_MPU_MASK (1 << 12)
/* Used by PM_DSS_DSS_WKDEP */
#define OMAP4430_WKUPDEP_HDMIIRQ_TESLA_SHIFT 14
-#define OMAP4430_WKUPDEP_HDMIIRQ_TESLA_MASK BITFIELD(14, 14)
+#define OMAP4430_WKUPDEP_HDMIIRQ_TESLA_MASK (1 << 14)
/* Used by PM_L4PER_HECC1_WKDEP */
#define OMAP4430_WKUPDEP_HECC1_MPU_SHIFT 0
-#define OMAP4430_WKUPDEP_HECC1_MPU_MASK BITFIELD(0, 0)
+#define OMAP4430_WKUPDEP_HECC1_MPU_MASK (1 << 0)
/* Used by PM_L4PER_HECC2_WKDEP */
#define OMAP4430_WKUPDEP_HECC2_MPU_SHIFT 0
-#define OMAP4430_WKUPDEP_HECC2_MPU_MASK BITFIELD(0, 0)
+#define OMAP4430_WKUPDEP_HECC2_MPU_MASK (1 << 0)
/* Used by PM_L3INIT_HSI_WKDEP */
#define OMAP4430_WKUPDEP_HSI_DSP_TESLA_SHIFT 6
-#define OMAP4430_WKUPDEP_HSI_DSP_TESLA_MASK BITFIELD(6, 6)
+#define OMAP4430_WKUPDEP_HSI_DSP_TESLA_MASK (1 << 6)
/* Used by PM_L3INIT_HSI_WKDEP */
#define OMAP4430_WKUPDEP_HSI_MCU_DUCATI_SHIFT 1
-#define OMAP4430_WKUPDEP_HSI_MCU_DUCATI_MASK BITFIELD(1, 1)
+#define OMAP4430_WKUPDEP_HSI_MCU_DUCATI_MASK (1 << 1)
/* Used by PM_L3INIT_HSI_WKDEP */
#define OMAP4430_WKUPDEP_HSI_MCU_MPU_SHIFT 0
-#define OMAP4430_WKUPDEP_HSI_MCU_MPU_MASK BITFIELD(0, 0)
+#define OMAP4430_WKUPDEP_HSI_MCU_MPU_MASK (1 << 0)
/* Used by PM_L4PER_I2C1_WKDEP */
#define OMAP4430_WKUPDEP_I2C1_DMA_SDMA_SHIFT 7
-#define OMAP4430_WKUPDEP_I2C1_DMA_SDMA_MASK BITFIELD(7, 7)
+#define OMAP4430_WKUPDEP_I2C1_DMA_SDMA_MASK (1 << 7)
/* Used by PM_L4PER_I2C1_WKDEP */
#define OMAP4430_WKUPDEP_I2C1_IRQ_DUCATI_SHIFT 1
-#define OMAP4430_WKUPDEP_I2C1_IRQ_DUCATI_MASK BITFIELD(1, 1)
+#define OMAP4430_WKUPDEP_I2C1_IRQ_DUCATI_MASK (1 << 1)
/* Used by PM_L4PER_I2C1_WKDEP */
#define OMAP4430_WKUPDEP_I2C1_IRQ_MPU_SHIFT 0
-#define OMAP4430_WKUPDEP_I2C1_IRQ_MPU_MASK BITFIELD(0, 0)
+#define OMAP4430_WKUPDEP_I2C1_IRQ_MPU_MASK (1 << 0)
/* Used by PM_L4PER_I2C2_WKDEP */
#define OMAP4430_WKUPDEP_I2C2_DMA_SDMA_SHIFT 7
-#define OMAP4430_WKUPDEP_I2C2_DMA_SDMA_MASK BITFIELD(7, 7)
+#define OMAP4430_WKUPDEP_I2C2_DMA_SDMA_MASK (1 << 7)
/* Used by PM_L4PER_I2C2_WKDEP */
#define OMAP4430_WKUPDEP_I2C2_IRQ_DUCATI_SHIFT 1
-#define OMAP4430_WKUPDEP_I2C2_IRQ_DUCATI_MASK BITFIELD(1, 1)
+#define OMAP4430_WKUPDEP_I2C2_IRQ_DUCATI_MASK (1 << 1)
/* Used by PM_L4PER_I2C2_WKDEP */
#define OMAP4430_WKUPDEP_I2C2_IRQ_MPU_SHIFT 0
-#define OMAP4430_WKUPDEP_I2C2_IRQ_MPU_MASK BITFIELD(0, 0)
+#define OMAP4430_WKUPDEP_I2C2_IRQ_MPU_MASK (1 << 0)
/* Used by PM_L4PER_I2C3_WKDEP */
#define OMAP4430_WKUPDEP_I2C3_DMA_SDMA_SHIFT 7
-#define OMAP4430_WKUPDEP_I2C3_DMA_SDMA_MASK BITFIELD(7, 7)
+#define OMAP4430_WKUPDEP_I2C3_DMA_SDMA_MASK (1 << 7)
/* Used by PM_L4PER_I2C3_WKDEP */
#define OMAP4430_WKUPDEP_I2C3_IRQ_DUCATI_SHIFT 1
-#define OMAP4430_WKUPDEP_I2C3_IRQ_DUCATI_MASK BITFIELD(1, 1)
+#define OMAP4430_WKUPDEP_I2C3_IRQ_DUCATI_MASK (1 << 1)
/* Used by PM_L4PER_I2C3_WKDEP */
#define OMAP4430_WKUPDEP_I2C3_IRQ_MPU_SHIFT 0
-#define OMAP4430_WKUPDEP_I2C3_IRQ_MPU_MASK BITFIELD(0, 0)
+#define OMAP4430_WKUPDEP_I2C3_IRQ_MPU_MASK (1 << 0)
/* Used by PM_L4PER_I2C4_WKDEP */
#define OMAP4430_WKUPDEP_I2C4_DMA_SDMA_SHIFT 7
-#define OMAP4430_WKUPDEP_I2C4_DMA_SDMA_MASK BITFIELD(7, 7)
+#define OMAP4430_WKUPDEP_I2C4_DMA_SDMA_MASK (1 << 7)
/* Used by PM_L4PER_I2C4_WKDEP */
#define OMAP4430_WKUPDEP_I2C4_IRQ_DUCATI_SHIFT 1
-#define OMAP4430_WKUPDEP_I2C4_IRQ_DUCATI_MASK BITFIELD(1, 1)
+#define OMAP4430_WKUPDEP_I2C4_IRQ_DUCATI_MASK (1 << 1)
/* Used by PM_L4PER_I2C4_WKDEP */
#define OMAP4430_WKUPDEP_I2C4_IRQ_MPU_SHIFT 0
-#define OMAP4430_WKUPDEP_I2C4_IRQ_MPU_MASK BITFIELD(0, 0)
+#define OMAP4430_WKUPDEP_I2C4_IRQ_MPU_MASK (1 << 0)
/* Used by PM_L4PER_I2C5_WKDEP */
#define OMAP4430_WKUPDEP_I2C5_DMA_SDMA_SHIFT 7
-#define OMAP4430_WKUPDEP_I2C5_DMA_SDMA_MASK BITFIELD(7, 7)
+#define OMAP4430_WKUPDEP_I2C5_DMA_SDMA_MASK (1 << 7)
/* Used by PM_L4PER_I2C5_WKDEP */
#define OMAP4430_WKUPDEP_I2C5_IRQ_MPU_SHIFT 0
-#define OMAP4430_WKUPDEP_I2C5_IRQ_MPU_MASK BITFIELD(0, 0)
+#define OMAP4430_WKUPDEP_I2C5_IRQ_MPU_MASK (1 << 0)
/* Used by PM_WKUP_KEYBOARD_WKDEP */
#define OMAP4430_WKUPDEP_KEYBOARD_MPU_SHIFT 0
-#define OMAP4430_WKUPDEP_KEYBOARD_MPU_MASK BITFIELD(0, 0)
+#define OMAP4430_WKUPDEP_KEYBOARD_MPU_MASK (1 << 0)
/* Used by PM_ABE_MCASP_WKDEP */
#define OMAP4430_WKUPDEP_MCASP1_DMA_SDMA_SHIFT 7
-#define OMAP4430_WKUPDEP_MCASP1_DMA_SDMA_MASK BITFIELD(7, 7)
+#define OMAP4430_WKUPDEP_MCASP1_DMA_SDMA_MASK (1 << 7)
/* Used by PM_ABE_MCASP_WKDEP */
#define OMAP4430_WKUPDEP_MCASP1_DMA_TESLA_SHIFT 6
-#define OMAP4430_WKUPDEP_MCASP1_DMA_TESLA_MASK BITFIELD(6, 6)
+#define OMAP4430_WKUPDEP_MCASP1_DMA_TESLA_MASK (1 << 6)
/* Used by PM_ABE_MCASP_WKDEP */
#define OMAP4430_WKUPDEP_MCASP1_IRQ_MPU_SHIFT 0
-#define OMAP4430_WKUPDEP_MCASP1_IRQ_MPU_MASK BITFIELD(0, 0)
+#define OMAP4430_WKUPDEP_MCASP1_IRQ_MPU_MASK (1 << 0)
/* Used by PM_ABE_MCASP_WKDEP */
#define OMAP4430_WKUPDEP_MCASP1_IRQ_TESLA_SHIFT 2
-#define OMAP4430_WKUPDEP_MCASP1_IRQ_TESLA_MASK BITFIELD(2, 2)
+#define OMAP4430_WKUPDEP_MCASP1_IRQ_TESLA_MASK (1 << 2)
/* Used by PM_L4PER_MCASP2_WKDEP */
#define OMAP4430_WKUPDEP_MCASP2_DMA_SDMA_SHIFT 7
-#define OMAP4430_WKUPDEP_MCASP2_DMA_SDMA_MASK BITFIELD(7, 7)
+#define OMAP4430_WKUPDEP_MCASP2_DMA_SDMA_MASK (1 << 7)
/* Used by PM_L4PER_MCASP2_WKDEP */
#define OMAP4430_WKUPDEP_MCASP2_DMA_TESLA_SHIFT 6
-#define OMAP4430_WKUPDEP_MCASP2_DMA_TESLA_MASK BITFIELD(6, 6)
+#define OMAP4430_WKUPDEP_MCASP2_DMA_TESLA_MASK (1 << 6)
/* Used by PM_L4PER_MCASP2_WKDEP */
#define OMAP4430_WKUPDEP_MCASP2_IRQ_MPU_SHIFT 0
-#define OMAP4430_WKUPDEP_MCASP2_IRQ_MPU_MASK BITFIELD(0, 0)
+#define OMAP4430_WKUPDEP_MCASP2_IRQ_MPU_MASK (1 << 0)
/* Used by PM_L4PER_MCASP2_WKDEP */
#define OMAP4430_WKUPDEP_MCASP2_IRQ_TESLA_SHIFT 2
-#define OMAP4430_WKUPDEP_MCASP2_IRQ_TESLA_MASK BITFIELD(2, 2)
+#define OMAP4430_WKUPDEP_MCASP2_IRQ_TESLA_MASK (1 << 2)
/* Used by PM_L4PER_MCASP3_WKDEP */
#define OMAP4430_WKUPDEP_MCASP3_DMA_SDMA_SHIFT 7
-#define OMAP4430_WKUPDEP_MCASP3_DMA_SDMA_MASK BITFIELD(7, 7)
+#define OMAP4430_WKUPDEP_MCASP3_DMA_SDMA_MASK (1 << 7)
/* Used by PM_L4PER_MCASP3_WKDEP */
#define OMAP4430_WKUPDEP_MCASP3_DMA_TESLA_SHIFT 6
-#define OMAP4430_WKUPDEP_MCASP3_DMA_TESLA_MASK BITFIELD(6, 6)
+#define OMAP4430_WKUPDEP_MCASP3_DMA_TESLA_MASK (1 << 6)
/* Used by PM_L4PER_MCASP3_WKDEP */
#define OMAP4430_WKUPDEP_MCASP3_IRQ_MPU_SHIFT 0
-#define OMAP4430_WKUPDEP_MCASP3_IRQ_MPU_MASK BITFIELD(0, 0)
+#define OMAP4430_WKUPDEP_MCASP3_IRQ_MPU_MASK (1 << 0)
/* Used by PM_L4PER_MCASP3_WKDEP */
#define OMAP4430_WKUPDEP_MCASP3_IRQ_TESLA_SHIFT 2
-#define OMAP4430_WKUPDEP_MCASP3_IRQ_TESLA_MASK BITFIELD(2, 2)
+#define OMAP4430_WKUPDEP_MCASP3_IRQ_TESLA_MASK (1 << 2)
/* Used by PM_ABE_MCBSP1_WKDEP */
#define OMAP4430_WKUPDEP_MCBSP1_MPU_SHIFT 0
-#define OMAP4430_WKUPDEP_MCBSP1_MPU_MASK BITFIELD(0, 0)
+#define OMAP4430_WKUPDEP_MCBSP1_MPU_MASK (1 << 0)
/* Used by PM_ABE_MCBSP1_WKDEP */
#define OMAP4430_WKUPDEP_MCBSP1_SDMA_SHIFT 3
-#define OMAP4430_WKUPDEP_MCBSP1_SDMA_MASK BITFIELD(3, 3)
+#define OMAP4430_WKUPDEP_MCBSP1_SDMA_MASK (1 << 3)
/* Used by PM_ABE_MCBSP1_WKDEP */
#define OMAP4430_WKUPDEP_MCBSP1_TESLA_SHIFT 2
-#define OMAP4430_WKUPDEP_MCBSP1_TESLA_MASK BITFIELD(2, 2)
+#define OMAP4430_WKUPDEP_MCBSP1_TESLA_MASK (1 << 2)
/* Used by PM_ABE_MCBSP2_WKDEP */
#define OMAP4430_WKUPDEP_MCBSP2_MPU_SHIFT 0
-#define OMAP4430_WKUPDEP_MCBSP2_MPU_MASK BITFIELD(0, 0)
+#define OMAP4430_WKUPDEP_MCBSP2_MPU_MASK (1 << 0)
/* Used by PM_ABE_MCBSP2_WKDEP */
#define OMAP4430_WKUPDEP_MCBSP2_SDMA_SHIFT 3
-#define OMAP4430_WKUPDEP_MCBSP2_SDMA_MASK BITFIELD(3, 3)
+#define OMAP4430_WKUPDEP_MCBSP2_SDMA_MASK (1 << 3)
/* Used by PM_ABE_MCBSP2_WKDEP */
#define OMAP4430_WKUPDEP_MCBSP2_TESLA_SHIFT 2
-#define OMAP4430_WKUPDEP_MCBSP2_TESLA_MASK BITFIELD(2, 2)
+#define OMAP4430_WKUPDEP_MCBSP2_TESLA_MASK (1 << 2)
/* Used by PM_ABE_MCBSP3_WKDEP */
#define OMAP4430_WKUPDEP_MCBSP3_MPU_SHIFT 0
-#define OMAP4430_WKUPDEP_MCBSP3_MPU_MASK BITFIELD(0, 0)
+#define OMAP4430_WKUPDEP_MCBSP3_MPU_MASK (1 << 0)
/* Used by PM_ABE_MCBSP3_WKDEP */
#define OMAP4430_WKUPDEP_MCBSP3_SDMA_SHIFT 3
-#define OMAP4430_WKUPDEP_MCBSP3_SDMA_MASK BITFIELD(3, 3)
+#define OMAP4430_WKUPDEP_MCBSP3_SDMA_MASK (1 << 3)
/* Used by PM_ABE_MCBSP3_WKDEP */
#define OMAP4430_WKUPDEP_MCBSP3_TESLA_SHIFT 2
-#define OMAP4430_WKUPDEP_MCBSP3_TESLA_MASK BITFIELD(2, 2)
+#define OMAP4430_WKUPDEP_MCBSP3_TESLA_MASK (1 << 2)
/* Used by PM_L4PER_MCBSP4_WKDEP */
#define OMAP4430_WKUPDEP_MCBSP4_MPU_SHIFT 0
-#define OMAP4430_WKUPDEP_MCBSP4_MPU_MASK BITFIELD(0, 0)
+#define OMAP4430_WKUPDEP_MCBSP4_MPU_MASK (1 << 0)
/* Used by PM_L4PER_MCBSP4_WKDEP */
#define OMAP4430_WKUPDEP_MCBSP4_SDMA_SHIFT 3
-#define OMAP4430_WKUPDEP_MCBSP4_SDMA_MASK BITFIELD(3, 3)
+#define OMAP4430_WKUPDEP_MCBSP4_SDMA_MASK (1 << 3)
/* Used by PM_L4PER_MCBSP4_WKDEP */
#define OMAP4430_WKUPDEP_MCBSP4_TESLA_SHIFT 2
-#define OMAP4430_WKUPDEP_MCBSP4_TESLA_MASK BITFIELD(2, 2)
+#define OMAP4430_WKUPDEP_MCBSP4_TESLA_MASK (1 << 2)
/* Used by PM_L4PER_MCSPI1_WKDEP */
#define OMAP4430_WKUPDEP_MCSPI1_DUCATI_SHIFT 1
-#define OMAP4430_WKUPDEP_MCSPI1_DUCATI_MASK BITFIELD(1, 1)
+#define OMAP4430_WKUPDEP_MCSPI1_DUCATI_MASK (1 << 1)
/* Used by PM_L4PER_MCSPI1_WKDEP */
#define OMAP4430_WKUPDEP_MCSPI1_MPU_SHIFT 0
-#define OMAP4430_WKUPDEP_MCSPI1_MPU_MASK BITFIELD(0, 0)
+#define OMAP4430_WKUPDEP_MCSPI1_MPU_MASK (1 << 0)
/* Used by PM_L4PER_MCSPI1_WKDEP */
#define OMAP4430_WKUPDEP_MCSPI1_SDMA_SHIFT 3
-#define OMAP4430_WKUPDEP_MCSPI1_SDMA_MASK BITFIELD(3, 3)
+#define OMAP4430_WKUPDEP_MCSPI1_SDMA_MASK (1 << 3)
/* Used by PM_L4PER_MCSPI1_WKDEP */
#define OMAP4430_WKUPDEP_MCSPI1_TESLA_SHIFT 2
-#define OMAP4430_WKUPDEP_MCSPI1_TESLA_MASK BITFIELD(2, 2)
+#define OMAP4430_WKUPDEP_MCSPI1_TESLA_MASK (1 << 2)
/* Used by PM_L4PER_MCSPI2_WKDEP */
#define OMAP4430_WKUPDEP_MCSPI2_DUCATI_SHIFT 1
-#define OMAP4430_WKUPDEP_MCSPI2_DUCATI_MASK BITFIELD(1, 1)
+#define OMAP4430_WKUPDEP_MCSPI2_DUCATI_MASK (1 << 1)
/* Used by PM_L4PER_MCSPI2_WKDEP */
#define OMAP4430_WKUPDEP_MCSPI2_MPU_SHIFT 0
-#define OMAP4430_WKUPDEP_MCSPI2_MPU_MASK BITFIELD(0, 0)
+#define OMAP4430_WKUPDEP_MCSPI2_MPU_MASK (1 << 0)
/* Used by PM_L4PER_MCSPI2_WKDEP */
#define OMAP4430_WKUPDEP_MCSPI2_SDMA_SHIFT 3
-#define OMAP4430_WKUPDEP_MCSPI2_SDMA_MASK BITFIELD(3, 3)
+#define OMAP4430_WKUPDEP_MCSPI2_SDMA_MASK (1 << 3)
/* Used by PM_L4PER_MCSPI3_WKDEP */
#define OMAP4430_WKUPDEP_MCSPI3_MPU_SHIFT 0
-#define OMAP4430_WKUPDEP_MCSPI3_MPU_MASK BITFIELD(0, 0)
+#define OMAP4430_WKUPDEP_MCSPI3_MPU_MASK (1 << 0)
/* Used by PM_L4PER_MCSPI3_WKDEP */
#define OMAP4430_WKUPDEP_MCSPI3_SDMA_SHIFT 3
-#define OMAP4430_WKUPDEP_MCSPI3_SDMA_MASK BITFIELD(3, 3)
+#define OMAP4430_WKUPDEP_MCSPI3_SDMA_MASK (1 << 3)
/* Used by PM_L4PER_MCSPI4_WKDEP */
#define OMAP4430_WKUPDEP_MCSPI4_MPU_SHIFT 0
-#define OMAP4430_WKUPDEP_MCSPI4_MPU_MASK BITFIELD(0, 0)
+#define OMAP4430_WKUPDEP_MCSPI4_MPU_MASK (1 << 0)
/* Used by PM_L4PER_MCSPI4_WKDEP */
#define OMAP4430_WKUPDEP_MCSPI4_SDMA_SHIFT 3
-#define OMAP4430_WKUPDEP_MCSPI4_SDMA_MASK BITFIELD(3, 3)
+#define OMAP4430_WKUPDEP_MCSPI4_SDMA_MASK (1 << 3)
/* Used by PM_L3INIT_MMC1_WKDEP */
#define OMAP4430_WKUPDEP_MMC1_DUCATI_SHIFT 1
-#define OMAP4430_WKUPDEP_MMC1_DUCATI_MASK BITFIELD(1, 1)
+#define OMAP4430_WKUPDEP_MMC1_DUCATI_MASK (1 << 1)
/* Used by PM_L3INIT_MMC1_WKDEP */
#define OMAP4430_WKUPDEP_MMC1_MPU_SHIFT 0
-#define OMAP4430_WKUPDEP_MMC1_MPU_MASK BITFIELD(0, 0)
+#define OMAP4430_WKUPDEP_MMC1_MPU_MASK (1 << 0)
/* Used by PM_L3INIT_MMC1_WKDEP */
#define OMAP4430_WKUPDEP_MMC1_SDMA_SHIFT 3
-#define OMAP4430_WKUPDEP_MMC1_SDMA_MASK BITFIELD(3, 3)
+#define OMAP4430_WKUPDEP_MMC1_SDMA_MASK (1 << 3)
/* Used by PM_L3INIT_MMC1_WKDEP */
#define OMAP4430_WKUPDEP_MMC1_TESLA_SHIFT 2
-#define OMAP4430_WKUPDEP_MMC1_TESLA_MASK BITFIELD(2, 2)
+#define OMAP4430_WKUPDEP_MMC1_TESLA_MASK (1 << 2)
/* Used by PM_L3INIT_MMC2_WKDEP */
#define OMAP4430_WKUPDEP_MMC2_DUCATI_SHIFT 1
-#define OMAP4430_WKUPDEP_MMC2_DUCATI_MASK BITFIELD(1, 1)
+#define OMAP4430_WKUPDEP_MMC2_DUCATI_MASK (1 << 1)
/* Used by PM_L3INIT_MMC2_WKDEP */
#define OMAP4430_WKUPDEP_MMC2_MPU_SHIFT 0
-#define OMAP4430_WKUPDEP_MMC2_MPU_MASK BITFIELD(0, 0)
+#define OMAP4430_WKUPDEP_MMC2_MPU_MASK (1 << 0)
/* Used by PM_L3INIT_MMC2_WKDEP */
#define OMAP4430_WKUPDEP_MMC2_SDMA_SHIFT 3
-#define OMAP4430_WKUPDEP_MMC2_SDMA_MASK BITFIELD(3, 3)
+#define OMAP4430_WKUPDEP_MMC2_SDMA_MASK (1 << 3)
/* Used by PM_L3INIT_MMC2_WKDEP */
#define OMAP4430_WKUPDEP_MMC2_TESLA_SHIFT 2
-#define OMAP4430_WKUPDEP_MMC2_TESLA_MASK BITFIELD(2, 2)
+#define OMAP4430_WKUPDEP_MMC2_TESLA_MASK (1 << 2)
/* Used by PM_L3INIT_MMC6_WKDEP */
#define OMAP4430_WKUPDEP_MMC6_DUCATI_SHIFT 1
-#define OMAP4430_WKUPDEP_MMC6_DUCATI_MASK BITFIELD(1, 1)
+#define OMAP4430_WKUPDEP_MMC6_DUCATI_MASK (1 << 1)
/* Used by PM_L3INIT_MMC6_WKDEP */
#define OMAP4430_WKUPDEP_MMC6_MPU_SHIFT 0
-#define OMAP4430_WKUPDEP_MMC6_MPU_MASK BITFIELD(0, 0)
+#define OMAP4430_WKUPDEP_MMC6_MPU_MASK (1 << 0)
/* Used by PM_L3INIT_MMC6_WKDEP */
#define OMAP4430_WKUPDEP_MMC6_TESLA_SHIFT 2
-#define OMAP4430_WKUPDEP_MMC6_TESLA_MASK BITFIELD(2, 2)
+#define OMAP4430_WKUPDEP_MMC6_TESLA_MASK (1 << 2)
/* Used by PM_L4PER_MMCSD3_WKDEP */
#define OMAP4430_WKUPDEP_MMCSD3_DUCATI_SHIFT 1
-#define OMAP4430_WKUPDEP_MMCSD3_DUCATI_MASK BITFIELD(1, 1)
+#define OMAP4430_WKUPDEP_MMCSD3_DUCATI_MASK (1 << 1)
/* Used by PM_L4PER_MMCSD3_WKDEP */
#define OMAP4430_WKUPDEP_MMCSD3_MPU_SHIFT 0
-#define OMAP4430_WKUPDEP_MMCSD3_MPU_MASK BITFIELD(0, 0)
+#define OMAP4430_WKUPDEP_MMCSD3_MPU_MASK (1 << 0)
/* Used by PM_L4PER_MMCSD3_WKDEP */
#define OMAP4430_WKUPDEP_MMCSD3_SDMA_SHIFT 3
-#define OMAP4430_WKUPDEP_MMCSD3_SDMA_MASK BITFIELD(3, 3)
+#define OMAP4430_WKUPDEP_MMCSD3_SDMA_MASK (1 << 3)
/* Used by PM_L4PER_MMCSD4_WKDEP */
#define OMAP4430_WKUPDEP_MMCSD4_DUCATI_SHIFT 1
-#define OMAP4430_WKUPDEP_MMCSD4_DUCATI_MASK BITFIELD(1, 1)
+#define OMAP4430_WKUPDEP_MMCSD4_DUCATI_MASK (1 << 1)
/* Used by PM_L4PER_MMCSD4_WKDEP */
#define OMAP4430_WKUPDEP_MMCSD4_MPU_SHIFT 0
-#define OMAP4430_WKUPDEP_MMCSD4_MPU_MASK BITFIELD(0, 0)
+#define OMAP4430_WKUPDEP_MMCSD4_MPU_MASK (1 << 0)
/* Used by PM_L4PER_MMCSD4_WKDEP */
#define OMAP4430_WKUPDEP_MMCSD4_SDMA_SHIFT 3
-#define OMAP4430_WKUPDEP_MMCSD4_SDMA_MASK BITFIELD(3, 3)
+#define OMAP4430_WKUPDEP_MMCSD4_SDMA_MASK (1 << 3)
/* Used by PM_L4PER_MMCSD5_WKDEP */
#define OMAP4430_WKUPDEP_MMCSD5_DUCATI_SHIFT 1
-#define OMAP4430_WKUPDEP_MMCSD5_DUCATI_MASK BITFIELD(1, 1)
+#define OMAP4430_WKUPDEP_MMCSD5_DUCATI_MASK (1 << 1)
/* Used by PM_L4PER_MMCSD5_WKDEP */
#define OMAP4430_WKUPDEP_MMCSD5_MPU_SHIFT 0
-#define OMAP4430_WKUPDEP_MMCSD5_MPU_MASK BITFIELD(0, 0)
+#define OMAP4430_WKUPDEP_MMCSD5_MPU_MASK (1 << 0)
/* Used by PM_L4PER_MMCSD5_WKDEP */
#define OMAP4430_WKUPDEP_MMCSD5_SDMA_SHIFT 3
-#define OMAP4430_WKUPDEP_MMCSD5_SDMA_MASK BITFIELD(3, 3)
+#define OMAP4430_WKUPDEP_MMCSD5_SDMA_MASK (1 << 3)
/* Used by PM_L3INIT_PCIESS_WKDEP */
#define OMAP4430_WKUPDEP_PCIESS_MPU_SHIFT 0
-#define OMAP4430_WKUPDEP_PCIESS_MPU_MASK BITFIELD(0, 0)
+#define OMAP4430_WKUPDEP_PCIESS_MPU_MASK (1 << 0)
/* Used by PM_L3INIT_PCIESS_WKDEP */
#define OMAP4430_WKUPDEP_PCIESS_TESLA_SHIFT 2
-#define OMAP4430_WKUPDEP_PCIESS_TESLA_MASK BITFIELD(2, 2)
+#define OMAP4430_WKUPDEP_PCIESS_TESLA_MASK (1 << 2)
/* Used by PM_ABE_PDM_WKDEP */
#define OMAP4430_WKUPDEP_PDM_DMA_SDMA_SHIFT 7
-#define OMAP4430_WKUPDEP_PDM_DMA_SDMA_MASK BITFIELD(7, 7)
+#define OMAP4430_WKUPDEP_PDM_DMA_SDMA_MASK (1 << 7)
/* Used by PM_ABE_PDM_WKDEP */
#define OMAP4430_WKUPDEP_PDM_DMA_TESLA_SHIFT 6
-#define OMAP4430_WKUPDEP_PDM_DMA_TESLA_MASK BITFIELD(6, 6)
+#define OMAP4430_WKUPDEP_PDM_DMA_TESLA_MASK (1 << 6)
/* Used by PM_ABE_PDM_WKDEP */
#define OMAP4430_WKUPDEP_PDM_IRQ_MPU_SHIFT 0
-#define OMAP4430_WKUPDEP_PDM_IRQ_MPU_MASK BITFIELD(0, 0)
+#define OMAP4430_WKUPDEP_PDM_IRQ_MPU_MASK (1 << 0)
/* Used by PM_ABE_PDM_WKDEP */
#define OMAP4430_WKUPDEP_PDM_IRQ_TESLA_SHIFT 2
-#define OMAP4430_WKUPDEP_PDM_IRQ_TESLA_MASK BITFIELD(2, 2)
+#define OMAP4430_WKUPDEP_PDM_IRQ_TESLA_MASK (1 << 2)
/* Used by PM_WKUP_RTC_WKDEP */
#define OMAP4430_WKUPDEP_RTC_MPU_SHIFT 0
-#define OMAP4430_WKUPDEP_RTC_MPU_MASK BITFIELD(0, 0)
+#define OMAP4430_WKUPDEP_RTC_MPU_MASK (1 << 0)
/* Used by PM_L3INIT_SATA_WKDEP */
#define OMAP4430_WKUPDEP_SATA_MPU_SHIFT 0
-#define OMAP4430_WKUPDEP_SATA_MPU_MASK BITFIELD(0, 0)
+#define OMAP4430_WKUPDEP_SATA_MPU_MASK (1 << 0)
/* Used by PM_L3INIT_SATA_WKDEP */
#define OMAP4430_WKUPDEP_SATA_TESLA_SHIFT 2
-#define OMAP4430_WKUPDEP_SATA_TESLA_MASK BITFIELD(2, 2)
+#define OMAP4430_WKUPDEP_SATA_TESLA_MASK (1 << 2)
/* Used by PM_ABE_SLIMBUS_WKDEP */
#define OMAP4430_WKUPDEP_SLIMBUS1_DMA_SDMA_SHIFT 7
-#define OMAP4430_WKUPDEP_SLIMBUS1_DMA_SDMA_MASK BITFIELD(7, 7)
+#define OMAP4430_WKUPDEP_SLIMBUS1_DMA_SDMA_MASK (1 << 7)
/* Used by PM_ABE_SLIMBUS_WKDEP */
#define OMAP4430_WKUPDEP_SLIMBUS1_DMA_TESLA_SHIFT 6
-#define OMAP4430_WKUPDEP_SLIMBUS1_DMA_TESLA_MASK BITFIELD(6, 6)
+#define OMAP4430_WKUPDEP_SLIMBUS1_DMA_TESLA_MASK (1 << 6)
/* Used by PM_ABE_SLIMBUS_WKDEP */
#define OMAP4430_WKUPDEP_SLIMBUS1_IRQ_MPU_SHIFT 0
-#define OMAP4430_WKUPDEP_SLIMBUS1_IRQ_MPU_MASK BITFIELD(0, 0)
+#define OMAP4430_WKUPDEP_SLIMBUS1_IRQ_MPU_MASK (1 << 0)
/* Used by PM_ABE_SLIMBUS_WKDEP */
#define OMAP4430_WKUPDEP_SLIMBUS1_IRQ_TESLA_SHIFT 2
-#define OMAP4430_WKUPDEP_SLIMBUS1_IRQ_TESLA_MASK BITFIELD(2, 2)
+#define OMAP4430_WKUPDEP_SLIMBUS1_IRQ_TESLA_MASK (1 << 2)
/* Used by PM_L4PER_SLIMBUS2_WKDEP */
#define OMAP4430_WKUPDEP_SLIMBUS2_DMA_SDMA_SHIFT 7
-#define OMAP4430_WKUPDEP_SLIMBUS2_DMA_SDMA_MASK BITFIELD(7, 7)
+#define OMAP4430_WKUPDEP_SLIMBUS2_DMA_SDMA_MASK (1 << 7)
/* Used by PM_L4PER_SLIMBUS2_WKDEP */
#define OMAP4430_WKUPDEP_SLIMBUS2_DMA_TESLA_SHIFT 6
-#define OMAP4430_WKUPDEP_SLIMBUS2_DMA_TESLA_MASK BITFIELD(6, 6)
+#define OMAP4430_WKUPDEP_SLIMBUS2_DMA_TESLA_MASK (1 << 6)
/* Used by PM_L4PER_SLIMBUS2_WKDEP */
#define OMAP4430_WKUPDEP_SLIMBUS2_IRQ_MPU_SHIFT 0
-#define OMAP4430_WKUPDEP_SLIMBUS2_IRQ_MPU_MASK BITFIELD(0, 0)
+#define OMAP4430_WKUPDEP_SLIMBUS2_IRQ_MPU_MASK (1 << 0)
/* Used by PM_L4PER_SLIMBUS2_WKDEP */
#define OMAP4430_WKUPDEP_SLIMBUS2_IRQ_TESLA_SHIFT 2
-#define OMAP4430_WKUPDEP_SLIMBUS2_IRQ_TESLA_MASK BITFIELD(2, 2)
+#define OMAP4430_WKUPDEP_SLIMBUS2_IRQ_TESLA_MASK (1 << 2)
/* Used by PM_ALWON_SR_CORE_WKDEP */
#define OMAP4430_WKUPDEP_SR_CORE_DUCATI_SHIFT 1
-#define OMAP4430_WKUPDEP_SR_CORE_DUCATI_MASK BITFIELD(1, 1)
+#define OMAP4430_WKUPDEP_SR_CORE_DUCATI_MASK (1 << 1)
/* Used by PM_ALWON_SR_CORE_WKDEP */
#define OMAP4430_WKUPDEP_SR_CORE_MPU_SHIFT 0
-#define OMAP4430_WKUPDEP_SR_CORE_MPU_MASK BITFIELD(0, 0)
+#define OMAP4430_WKUPDEP_SR_CORE_MPU_MASK (1 << 0)
/* Used by PM_ALWON_SR_IVA_WKDEP */
#define OMAP4430_WKUPDEP_SR_IVA_DUCATI_SHIFT 1
-#define OMAP4430_WKUPDEP_SR_IVA_DUCATI_MASK BITFIELD(1, 1)
+#define OMAP4430_WKUPDEP_SR_IVA_DUCATI_MASK (1 << 1)
/* Used by PM_ALWON_SR_IVA_WKDEP */
#define OMAP4430_WKUPDEP_SR_IVA_MPU_SHIFT 0
-#define OMAP4430_WKUPDEP_SR_IVA_MPU_MASK BITFIELD(0, 0)
+#define OMAP4430_WKUPDEP_SR_IVA_MPU_MASK (1 << 0)
/* Used by PM_ALWON_SR_MPU_WKDEP */
#define OMAP4430_WKUPDEP_SR_MPU_MPU_SHIFT 0
-#define OMAP4430_WKUPDEP_SR_MPU_MPU_MASK BITFIELD(0, 0)
+#define OMAP4430_WKUPDEP_SR_MPU_MPU_MASK (1 << 0)
/* Used by PM_WKUP_TIMER12_WKDEP */
#define OMAP4430_WKUPDEP_TIMER12_MPU_SHIFT 0
-#define OMAP4430_WKUPDEP_TIMER12_MPU_MASK BITFIELD(0, 0)
+#define OMAP4430_WKUPDEP_TIMER12_MPU_MASK (1 << 0)
/* Used by PM_WKUP_TIMER1_WKDEP */
#define OMAP4430_WKUPDEP_TIMER1_MPU_SHIFT 0
-#define OMAP4430_WKUPDEP_TIMER1_MPU_MASK BITFIELD(0, 0)
+#define OMAP4430_WKUPDEP_TIMER1_MPU_MASK (1 << 0)
/* Used by PM_ABE_TIMER5_WKDEP */
#define OMAP4430_WKUPDEP_TIMER5_MPU_SHIFT 0
-#define OMAP4430_WKUPDEP_TIMER5_MPU_MASK BITFIELD(0, 0)
+#define OMAP4430_WKUPDEP_TIMER5_MPU_MASK (1 << 0)
/* Used by PM_ABE_TIMER5_WKDEP */
#define OMAP4430_WKUPDEP_TIMER5_TESLA_SHIFT 2
-#define OMAP4430_WKUPDEP_TIMER5_TESLA_MASK BITFIELD(2, 2)
+#define OMAP4430_WKUPDEP_TIMER5_TESLA_MASK (1 << 2)
/* Used by PM_ABE_TIMER6_WKDEP */
#define OMAP4430_WKUPDEP_TIMER6_MPU_SHIFT 0
-#define OMAP4430_WKUPDEP_TIMER6_MPU_MASK BITFIELD(0, 0)
+#define OMAP4430_WKUPDEP_TIMER6_MPU_MASK (1 << 0)
/* Used by PM_ABE_TIMER6_WKDEP */
#define OMAP4430_WKUPDEP_TIMER6_TESLA_SHIFT 2
-#define OMAP4430_WKUPDEP_TIMER6_TESLA_MASK BITFIELD(2, 2)
+#define OMAP4430_WKUPDEP_TIMER6_TESLA_MASK (1 << 2)
/* Used by PM_ABE_TIMER7_WKDEP */
#define OMAP4430_WKUPDEP_TIMER7_MPU_SHIFT 0
-#define OMAP4430_WKUPDEP_TIMER7_MPU_MASK BITFIELD(0, 0)
+#define OMAP4430_WKUPDEP_TIMER7_MPU_MASK (1 << 0)
/* Used by PM_ABE_TIMER7_WKDEP */
#define OMAP4430_WKUPDEP_TIMER7_TESLA_SHIFT 2
-#define OMAP4430_WKUPDEP_TIMER7_TESLA_MASK BITFIELD(2, 2)
+#define OMAP4430_WKUPDEP_TIMER7_TESLA_MASK (1 << 2)
/* Used by PM_ABE_TIMER8_WKDEP */
#define OMAP4430_WKUPDEP_TIMER8_MPU_SHIFT 0
-#define OMAP4430_WKUPDEP_TIMER8_MPU_MASK BITFIELD(0, 0)
+#define OMAP4430_WKUPDEP_TIMER8_MPU_MASK (1 << 0)
/* Used by PM_ABE_TIMER8_WKDEP */
#define OMAP4430_WKUPDEP_TIMER8_TESLA_SHIFT 2
-#define OMAP4430_WKUPDEP_TIMER8_TESLA_MASK BITFIELD(2, 2)
+#define OMAP4430_WKUPDEP_TIMER8_TESLA_MASK (1 << 2)
/* Used by PM_L4PER_UART1_WKDEP */
#define OMAP4430_WKUPDEP_UART1_MPU_SHIFT 0
-#define OMAP4430_WKUPDEP_UART1_MPU_MASK BITFIELD(0, 0)
+#define OMAP4430_WKUPDEP_UART1_MPU_MASK (1 << 0)
/* Used by PM_L4PER_UART1_WKDEP */
#define OMAP4430_WKUPDEP_UART1_SDMA_SHIFT 3
-#define OMAP4430_WKUPDEP_UART1_SDMA_MASK BITFIELD(3, 3)
+#define OMAP4430_WKUPDEP_UART1_SDMA_MASK (1 << 3)
/* Used by PM_L4PER_UART2_WKDEP */
#define OMAP4430_WKUPDEP_UART2_MPU_SHIFT 0
-#define OMAP4430_WKUPDEP_UART2_MPU_MASK BITFIELD(0, 0)
+#define OMAP4430_WKUPDEP_UART2_MPU_MASK (1 << 0)
/* Used by PM_L4PER_UART2_WKDEP */
#define OMAP4430_WKUPDEP_UART2_SDMA_SHIFT 3
-#define OMAP4430_WKUPDEP_UART2_SDMA_MASK BITFIELD(3, 3)
+#define OMAP4430_WKUPDEP_UART2_SDMA_MASK (1 << 3)
/* Used by PM_L4PER_UART3_WKDEP */
#define OMAP4430_WKUPDEP_UART3_DUCATI_SHIFT 1
-#define OMAP4430_WKUPDEP_UART3_DUCATI_MASK BITFIELD(1, 1)
+#define OMAP4430_WKUPDEP_UART3_DUCATI_MASK (1 << 1)
/* Used by PM_L4PER_UART3_WKDEP */
#define OMAP4430_WKUPDEP_UART3_MPU_SHIFT 0
-#define OMAP4430_WKUPDEP_UART3_MPU_MASK BITFIELD(0, 0)
+#define OMAP4430_WKUPDEP_UART3_MPU_MASK (1 << 0)
/* Used by PM_L4PER_UART3_WKDEP */
#define OMAP4430_WKUPDEP_UART3_SDMA_SHIFT 3
-#define OMAP4430_WKUPDEP_UART3_SDMA_MASK BITFIELD(3, 3)
+#define OMAP4430_WKUPDEP_UART3_SDMA_MASK (1 << 3)
/* Used by PM_L4PER_UART3_WKDEP */
#define OMAP4430_WKUPDEP_UART3_TESLA_SHIFT 2
-#define OMAP4430_WKUPDEP_UART3_TESLA_MASK BITFIELD(2, 2)
+#define OMAP4430_WKUPDEP_UART3_TESLA_MASK (1 << 2)
/* Used by PM_L4PER_UART4_WKDEP */
#define OMAP4430_WKUPDEP_UART4_MPU_SHIFT 0
-#define OMAP4430_WKUPDEP_UART4_MPU_MASK BITFIELD(0, 0)
+#define OMAP4430_WKUPDEP_UART4_MPU_MASK (1 << 0)
/* Used by PM_L4PER_UART4_WKDEP */
#define OMAP4430_WKUPDEP_UART4_SDMA_SHIFT 3
-#define OMAP4430_WKUPDEP_UART4_SDMA_MASK BITFIELD(3, 3)
+#define OMAP4430_WKUPDEP_UART4_SDMA_MASK (1 << 3)
/* Used by PM_L3INIT_UNIPRO1_WKDEP */
#define OMAP4430_WKUPDEP_UNIPRO1_DUCATI_SHIFT 1
-#define OMAP4430_WKUPDEP_UNIPRO1_DUCATI_MASK BITFIELD(1, 1)
+#define OMAP4430_WKUPDEP_UNIPRO1_DUCATI_MASK (1 << 1)
/* Used by PM_L3INIT_UNIPRO1_WKDEP */
#define OMAP4430_WKUPDEP_UNIPRO1_MPU_SHIFT 0
-#define OMAP4430_WKUPDEP_UNIPRO1_MPU_MASK BITFIELD(0, 0)
+#define OMAP4430_WKUPDEP_UNIPRO1_MPU_MASK (1 << 0)
/* Used by PM_L3INIT_USB_HOST_WKDEP */
#define OMAP4430_WKUPDEP_USB_HOST_DUCATI_SHIFT 1
-#define OMAP4430_WKUPDEP_USB_HOST_DUCATI_MASK BITFIELD(1, 1)
+#define OMAP4430_WKUPDEP_USB_HOST_DUCATI_MASK (1 << 1)
/* Used by PM_L3INIT_USB_HOST_FS_WKDEP */
#define OMAP4430_WKUPDEP_USB_HOST_FS_DUCATI_SHIFT 1
-#define OMAP4430_WKUPDEP_USB_HOST_FS_DUCATI_MASK BITFIELD(1, 1)
+#define OMAP4430_WKUPDEP_USB_HOST_FS_DUCATI_MASK (1 << 1)
/* Used by PM_L3INIT_USB_HOST_FS_WKDEP */
#define OMAP4430_WKUPDEP_USB_HOST_FS_MPU_SHIFT 0
-#define OMAP4430_WKUPDEP_USB_HOST_FS_MPU_MASK BITFIELD(0, 0)
+#define OMAP4430_WKUPDEP_USB_HOST_FS_MPU_MASK (1 << 0)
/* Used by PM_L3INIT_USB_HOST_WKDEP */
#define OMAP4430_WKUPDEP_USB_HOST_MPU_SHIFT 0
-#define OMAP4430_WKUPDEP_USB_HOST_MPU_MASK BITFIELD(0, 0)
+#define OMAP4430_WKUPDEP_USB_HOST_MPU_MASK (1 << 0)
/* Used by PM_L3INIT_USB_OTG_WKDEP */
#define OMAP4430_WKUPDEP_USB_OTG_DUCATI_SHIFT 1
-#define OMAP4430_WKUPDEP_USB_OTG_DUCATI_MASK BITFIELD(1, 1)
+#define OMAP4430_WKUPDEP_USB_OTG_DUCATI_MASK (1 << 1)
/* Used by PM_L3INIT_USB_OTG_WKDEP */
#define OMAP4430_WKUPDEP_USB_OTG_MPU_SHIFT 0
-#define OMAP4430_WKUPDEP_USB_OTG_MPU_MASK BITFIELD(0, 0)
+#define OMAP4430_WKUPDEP_USB_OTG_MPU_MASK (1 << 0)
/* Used by PM_L3INIT_USB_TLL_WKDEP */
#define OMAP4430_WKUPDEP_USB_TLL_DUCATI_SHIFT 1
-#define OMAP4430_WKUPDEP_USB_TLL_DUCATI_MASK BITFIELD(1, 1)
+#define OMAP4430_WKUPDEP_USB_TLL_DUCATI_MASK (1 << 1)
/* Used by PM_L3INIT_USB_TLL_WKDEP */
#define OMAP4430_WKUPDEP_USB_TLL_MPU_SHIFT 0
-#define OMAP4430_WKUPDEP_USB_TLL_MPU_MASK BITFIELD(0, 0)
+#define OMAP4430_WKUPDEP_USB_TLL_MPU_MASK (1 << 0)
/* Used by PM_WKUP_USIM_WKDEP */
#define OMAP4430_WKUPDEP_USIM_MPU_SHIFT 0
-#define OMAP4430_WKUPDEP_USIM_MPU_MASK BITFIELD(0, 0)
+#define OMAP4430_WKUPDEP_USIM_MPU_MASK (1 << 0)
/* Used by PM_WKUP_USIM_WKDEP */
#define OMAP4430_WKUPDEP_USIM_SDMA_SHIFT 3
-#define OMAP4430_WKUPDEP_USIM_SDMA_MASK BITFIELD(3, 3)
+#define OMAP4430_WKUPDEP_USIM_SDMA_MASK (1 << 3)
/* Used by PM_WKUP_WDT2_WKDEP */
#define OMAP4430_WKUPDEP_WDT2_DUCATI_SHIFT 1
-#define OMAP4430_WKUPDEP_WDT2_DUCATI_MASK BITFIELD(1, 1)
+#define OMAP4430_WKUPDEP_WDT2_DUCATI_MASK (1 << 1)
/* Used by PM_WKUP_WDT2_WKDEP */
#define OMAP4430_WKUPDEP_WDT2_MPU_SHIFT 0
-#define OMAP4430_WKUPDEP_WDT2_MPU_MASK BITFIELD(0, 0)
+#define OMAP4430_WKUPDEP_WDT2_MPU_MASK (1 << 0)
/* Used by PM_ABE_WDT3_WKDEP */
#define OMAP4430_WKUPDEP_WDT3_MPU_SHIFT 0
-#define OMAP4430_WKUPDEP_WDT3_MPU_MASK BITFIELD(0, 0)
+#define OMAP4430_WKUPDEP_WDT3_MPU_MASK (1 << 0)
/* Used by PM_L3INIT_HSI_WKDEP */
#define OMAP4430_WKUPDEP_WGM_HSI_WAKE_MPU_SHIFT 8
-#define OMAP4430_WKUPDEP_WGM_HSI_WAKE_MPU_MASK BITFIELD(8, 8)
+#define OMAP4430_WKUPDEP_WGM_HSI_WAKE_MPU_MASK (1 << 8)
/* Used by PM_L3INIT_XHPI_WKDEP */
#define OMAP4430_WKUPDEP_XHPI_DUCATI_SHIFT 1
-#define OMAP4430_WKUPDEP_XHPI_DUCATI_MASK BITFIELD(1, 1)
+#define OMAP4430_WKUPDEP_XHPI_DUCATI_MASK (1 << 1)
/* Used by PRM_IO_PMCTRL */
#define OMAP4430_WUCLK_CTRL_SHIFT 8
-#define OMAP4430_WUCLK_CTRL_MASK BITFIELD(8, 8)
+#define OMAP4430_WUCLK_CTRL_MASK (1 << 8)
/* Used by PRM_IO_PMCTRL */
#define OMAP4430_WUCLK_STATUS_SHIFT 9
-#define OMAP4430_WUCLK_STATUS_MASK BITFIELD(9, 9)
+#define OMAP4430_WUCLK_STATUS_MASK (1 << 9)
+
+/* Used by REVISION_PRM */
+#define OMAP4430_X_MAJOR_SHIFT 8
+#define OMAP4430_X_MAJOR_MASK (0x7 << 8)
+
+/* Used by REVISION_PRM */
+#define OMAP4430_Y_MINOR_SHIFT 0
+#define OMAP4430_Y_MINOR_MASK (0x3f << 0)
#endif
diff --git a/arch/arm/mach-omap2/prm.h b/arch/arm/mach-omap2/prm.h
index 588873b9303a..7be040b2fdab 100644
--- a/arch/arm/mach-omap2/prm.h
+++ b/arch/arm/mach-omap2/prm.h
@@ -5,7 +5,7 @@
* OMAP2/3 Power/Reset Management (PRM) register definitions
*
* Copyright (C) 2007-2009 Texas Instruments, Inc.
- * Copyright (C) 2009 Nokia Corporation
+ * Copyright (C) 2010 Nokia Corporation
*
* Written by Paul Walmsley
*
@@ -246,6 +246,15 @@ static inline u32 prm_clear_mod_reg_bits(u32 bits, s16 module, s16 idx)
return prm_rmw_mod_reg_bits(bits, 0x0, module, idx);
}
+/* These omap2_ PRM functions apply to both OMAP2 and 3 */
+int omap2_prm_is_hardreset_asserted(s16 prm_mod, u8 shift);
+int omap2_prm_assert_hardreset(s16 prm_mod, u8 shift);
+int omap2_prm_deassert_hardreset(s16 prm_mod, u8 shift);
+
+int omap4_prm_is_hardreset_asserted(void __iomem *rstctrl_reg, u8 shift);
+int omap4_prm_assert_hardreset(void __iomem *rstctrl_reg, u8 shift);
+int omap4_prm_deassert_hardreset(void __iomem *rstctrl_reg, u8 shift);
+
#endif
/*
@@ -398,4 +407,11 @@ static inline u32 prm_clear_mod_reg_bits(u32 bits, s16 module, s16 idx)
#define OMAP_POWERSTATE_MASK (0x3 << 0)
+/*
+ * MAX_MODULE_HARDRESET_WAIT: Maximum microseconds to wait for an OMAP
+ * submodule to exit hardreset
+ */
+#define MAX_MODULE_HARDRESET_WAIT 10000
+
+
#endif
diff --git a/arch/arm/mach-omap2/prm2xxx_3xxx.c b/arch/arm/mach-omap2/prm2xxx_3xxx.c
new file mode 100644
index 000000000000..421771eee450
--- /dev/null
+++ b/arch/arm/mach-omap2/prm2xxx_3xxx.c
@@ -0,0 +1,110 @@
+/*
+ * OMAP2/3 PRM module functions
+ *
+ * Copyright (C) 2010 Texas Instruments, Inc.
+ * Copyright (C) 2010 Nokia Corporation
+ * Benoît Cousson
+ * Paul Walmsley
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/kernel.h>
+#include <linux/delay.h>
+#include <linux/errno.h>
+#include <linux/err.h>
+
+#include <plat/common.h>
+#include <plat/cpu.h>
+#include <plat/prcm.h>
+
+#include "prm.h"
+#include "prm-regbits-24xx.h"
+#include "prm-regbits-34xx.h"
+
+/**
+ * omap2_prm_is_hardreset_asserted - read the HW reset line state of
+ * submodules contained in the hwmod module
+ * @prm_mod: PRM submodule base (e.g. CORE_MOD)
+ * @shift: register bit shift corresponding to the reset line to check
+ *
+ * Returns 1 if the (sub)module hardreset line is currently asserted,
+ * 0 if the (sub)module hardreset line is not currently asserted, or
+ * -EINVAL if called while running on a non-OMAP2/3 chip.
+ */
+int omap2_prm_is_hardreset_asserted(s16 prm_mod, u8 shift)
+{
+ if (!(cpu_is_omap24xx() || cpu_is_omap34xx()))
+ return -EINVAL;
+
+ return prm_read_mod_bits_shift(prm_mod, OMAP2_RM_RSTCTRL,
+ (1 << shift));
+}
+
+/**
+ * omap2_prm_assert_hardreset - assert the HW reset line of a submodule
+ * @prm_mod: PRM submodule base (e.g. CORE_MOD)
+ * @shift: register bit shift corresponding to the reset line to assert
+ *
+ * Some IPs like dsp or iva contain processors that require an HW
+ * reset line to be asserted / deasserted in order to fully enable the
+ * IP. These modules may have multiple hard-reset lines that reset
+ * different 'submodules' inside the IP block. This function will
+ * place the submodule into reset. Returns 0 upon success or -EINVAL
+ * upon an argument error.
+ */
+int omap2_prm_assert_hardreset(s16 prm_mod, u8 shift)
+{
+ u32 mask;
+
+ if (!(cpu_is_omap24xx() || cpu_is_omap34xx()))
+ return -EINVAL;
+
+ mask = 1 << shift;
+ prm_rmw_mod_reg_bits(mask, mask, prm_mod, OMAP2_RM_RSTCTRL);
+
+ return 0;
+}
+
+/**
+ * omap2_prm_deassert_hardreset - deassert a submodule hardreset line and wait
+ * @prm_mod: PRM submodule base (e.g. CORE_MOD)
+ * @shift: register bit shift corresponding to the reset line to deassert
+ *
+ * Some IPs like dsp or iva contain processors that require an HW
+ * reset line to be asserted / deasserted in order to fully enable the
+ * IP. These modules may have multiple hard-reset lines that reset
+ * different 'submodules' inside the IP block. This function will
+ * take the submodule out of reset and wait until the PRCM indicates
+ * that the reset has completed before returning. Returns 0 upon success or
+ * -EINVAL upon an argument error, -EEXIST if the submodule was already out
+ * of reset, or -EBUSY if the submodule did not exit reset promptly.
+ */
+int omap2_prm_deassert_hardreset(s16 prm_mod, u8 shift)
+{
+ u32 mask;
+ int c;
+
+ if (!(cpu_is_omap24xx() || cpu_is_omap34xx()))
+ return -EINVAL;
+
+ mask = 1 << shift;
+
+ /* Check the current status to avoid de-asserting the line twice */
+ if (prm_read_mod_bits_shift(prm_mod, OMAP2_RM_RSTCTRL, mask) == 0)
+ return -EEXIST;
+
+ /* Clear the reset status by writing 1 to the status bit */
+ prm_rmw_mod_reg_bits(0xffffffff, mask, prm_mod, OMAP2_RM_RSTST);
+ /* de-assert the reset control line */
+ prm_rmw_mod_reg_bits(mask, 0, prm_mod, OMAP2_RM_RSTCTRL);
+ /* wait the status to be set */
+ omap_test_timeout(prm_read_mod_bits_shift(prm_mod, OMAP2_RM_RSTST,
+ mask),
+ MAX_MODULE_HARDRESET_WAIT, c);
+
+ return (c == MAX_MODULE_HARDRESET_WAIT) ? -EBUSY : 0;
+}
+
diff --git a/arch/arm/mach-omap2/prm44xx.c b/arch/arm/mach-omap2/prm44xx.c
new file mode 100644
index 000000000000..a1ff918d9bed
--- /dev/null
+++ b/arch/arm/mach-omap2/prm44xx.c
@@ -0,0 +1,116 @@
+/*
+ * OMAP4 PRM module functions
+ *
+ * Copyright (C) 2010 Texas Instruments, Inc.
+ * Copyright (C) 2010 Nokia Corporation
+ * Benoît Cousson
+ * Paul Walmsley
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/kernel.h>
+#include <linux/delay.h>
+#include <linux/errno.h>
+#include <linux/err.h>
+
+#include <plat/common.h>
+#include <plat/cpu.h>
+#include <plat/prcm.h>
+
+#include "prm.h"
+#include "prm-regbits-44xx.h"
+
+/*
+ * Address offset (in bytes) between the reset control and the reset
+ * status registers: 4 bytes on OMAP4
+ */
+#define OMAP4_RST_CTRL_ST_OFFSET 4
+
+/**
+ * omap4_prm_is_hardreset_asserted - read the HW reset line state of
+ * submodules contained in the hwmod module
+ * @rstctrl_reg: RM_RSTCTRL register address for this module
+ * @shift: register bit shift corresponding to the reset line to check
+ *
+ * Returns 1 if the (sub)module hardreset line is currently asserted,
+ * 0 if the (sub)module hardreset line is not currently asserted, or
+ * -EINVAL upon parameter error.
+ */
+int omap4_prm_is_hardreset_asserted(void __iomem *rstctrl_reg, u8 shift)
+{
+ if (!cpu_is_omap44xx() || !rstctrl_reg)
+ return -EINVAL;
+
+ return omap4_prm_read_bits_shift(rstctrl_reg, (1 << shift));
+}
+
+/**
+ * omap4_prm_assert_hardreset - assert the HW reset line of a submodule
+ * @rstctrl_reg: RM_RSTCTRL register address for this module
+ * @shift: register bit shift corresponding to the reset line to assert
+ *
+ * Some IPs like dsp, ipu or iva contain processors that require an HW
+ * reset line to be asserted / deasserted in order to fully enable the
+ * IP. These modules may have multiple hard-reset lines that reset
+ * different 'submodules' inside the IP block. This function will
+ * place the submodule into reset. Returns 0 upon success or -EINVAL
+ * upon an argument error.
+ */
+int omap4_prm_assert_hardreset(void __iomem *rstctrl_reg, u8 shift)
+{
+ u32 mask;
+
+ if (!cpu_is_omap44xx() || !rstctrl_reg)
+ return -EINVAL;
+
+ mask = 1 << shift;
+ omap4_prm_rmw_reg_bits(mask, mask, rstctrl_reg);
+
+ return 0;
+}
+
+/**
+ * omap4_prm_deassert_hardreset - deassert a submodule hardreset line and wait
+ * @rstctrl_reg: RM_RSTCTRL register address for this module
+ * @shift: register bit shift corresponding to the reset line to deassert
+ *
+ * Some IPs like dsp, ipu or iva contain processors that require an HW
+ * reset line to be asserted / deasserted in order to fully enable the
+ * IP. These modules may have multiple hard-reset lines that reset
+ * different 'submodules' inside the IP block. This function will
+ * take the submodule out of reset and wait until the PRCM indicates
+ * that the reset has completed before returning. Returns 0 upon success or
+ * -EINVAL upon an argument error, -EEXIST if the submodule was already out
+ * of reset, or -EBUSY if the submodule did not exit reset promptly.
+ */
+int omap4_prm_deassert_hardreset(void __iomem *rstctrl_reg, u8 shift)
+{
+ u32 mask;
+ void __iomem *rstst_reg;
+ int c;
+
+ if (!cpu_is_omap44xx() || !rstctrl_reg)
+ return -EINVAL;
+
+ rstst_reg = rstctrl_reg + OMAP4_RST_CTRL_ST_OFFSET;
+
+ mask = 1 << shift;
+
+ /* Check the current status to avoid de-asserting the line twice */
+ if (omap4_prm_read_bits_shift(rstctrl_reg, mask) == 0)
+ return -EEXIST;
+
+ /* Clear the reset status by writing 1 to the status bit */
+ omap4_prm_rmw_reg_bits(0xffffffff, mask, rstst_reg);
+ /* de-assert the reset control line */
+ omap4_prm_rmw_reg_bits(mask, 0, rstctrl_reg);
+ /* wait the status to be set */
+ omap_test_timeout(omap4_prm_read_bits_shift(rstst_reg, mask),
+ MAX_MODULE_HARDRESET_WAIT, c);
+
+ return (c == MAX_MODULE_HARDRESET_WAIT) ? -EBUSY : 0;
+}
+
diff --git a/arch/arm/mach-omap2/prm44xx.h b/arch/arm/mach-omap2/prm44xx.h
index fe8ef26431e5..59839dbabd84 100644
--- a/arch/arm/mach-omap2/prm44xx.h
+++ b/arch/arm/mach-omap2/prm44xx.h
@@ -44,14 +44,12 @@
#define OMAP4430_PRM_IRQSTATUS_TESLA OMAP44XX_PRM_REGADDR(OMAP4430_PRM_OCP_SOCKET_MOD, 0x0030)
#define OMAP4_PRM_IRQENABLE_TESLA_OFFSET 0x0038
#define OMAP4430_PRM_IRQENABLE_TESLA OMAP44XX_PRM_REGADDR(OMAP4430_PRM_OCP_SOCKET_MOD, 0x0038)
-#define OMAP4_PRM_PRM_PROFILING_CLKCTRL_OFFSET 0x0040
-#define OMAP4430_PRM_PRM_PROFILING_CLKCTRL OMAP44XX_PRM_REGADDR(OMAP4430_PRM_OCP_SOCKET_MOD, 0x0040)
+#define OMAP4_CM_PRM_PROFILING_CLKCTRL_OFFSET 0x0040
+#define OMAP4430_CM_PRM_PROFILING_CLKCTRL OMAP44XX_PRM_REGADDR(OMAP4430_PRM_OCP_SOCKET_MOD, 0x0040)
/* PRM.CKGEN_PRM register offsets */
#define OMAP4_CM_ABE_DSS_SYS_CLKSEL_OFFSET 0x0000
#define OMAP4430_CM_ABE_DSS_SYS_CLKSEL OMAP44XX_PRM_REGADDR(OMAP4430_PRM_CKGEN_MOD, 0x0000)
-#define OMAP4_CM_DPLL_SYS_REF_CLKSEL_OFFSET 0x0004
-#define OMAP4430_CM_DPLL_SYS_REF_CLKSEL OMAP44XX_PRM_REGADDR(OMAP4430_PRM_CKGEN_MOD, 0x0004)
#define OMAP4_CM_L4_WKUP_CLKSEL_OFFSET 0x0008
#define OMAP4430_CM_L4_WKUP_CLKSEL OMAP44XX_PRM_REGADDR(OMAP4430_PRM_CKGEN_MOD, 0x0008)
#define OMAP4_CM_ABE_PLL_REF_CLKSEL_OFFSET 0x000c
@@ -686,8 +684,8 @@
#define OMAP4430_PRM_LDO_ABB_IVA_SETUP OMAP44XX_PRM_REGADDR(OMAP4430_PRM_DEVICE_MOD, 0x00d8)
#define OMAP4_PRM_LDO_ABB_IVA_CTRL_OFFSET 0x00dc
#define OMAP4430_PRM_LDO_ABB_IVA_CTRL OMAP44XX_PRM_REGADDR(OMAP4430_PRM_DEVICE_MOD, 0x00dc)
-#define OMAP4_PRM_LDO_BANDGAP_CTRL_OFFSET 0x00e0
-#define OMAP4430_PRM_LDO_BANDGAP_CTRL OMAP44XX_PRM_REGADDR(OMAP4430_PRM_DEVICE_MOD, 0x00e0)
+#define OMAP4_PRM_LDO_BANDGAP_SETUP_OFFSET 0x00e0
+#define OMAP4430_PRM_LDO_BANDGAP_SETUP OMAP44XX_PRM_REGADDR(OMAP4430_PRM_DEVICE_MOD, 0x00e0)
#define OMAP4_PRM_DEVICE_OFF_CTRL_OFFSET 0x00e4
#define OMAP4430_PRM_DEVICE_OFF_CTRL OMAP44XX_PRM_REGADDR(OMAP4430_PRM_DEVICE_MOD, 0x00e4)
#define OMAP4_PRM_PHASE1_CNDP_OFFSET 0x00e8
@@ -698,6 +696,8 @@
#define OMAP4430_PRM_PHASE2B_CNDP OMAP44XX_PRM_REGADDR(OMAP4430_PRM_DEVICE_MOD, 0x00f0)
#define OMAP4_PRM_MODEM_IF_CTRL_OFFSET 0x00f4
#define OMAP4430_PRM_MODEM_IF_CTRL OMAP44XX_PRM_REGADDR(OMAP4430_PRM_DEVICE_MOD, 0x00f4)
+#define OMAP4_PRM_VC_ERRST_OFFSET 0x00f8
+#define OMAP4430_PRM_VC_ERRST OMAP44XX_PRM_REGADDR(OMAP4430_PRM_DEVICE_MOD, 0x00f8)
/*
* PRCM_MPU
@@ -715,6 +715,8 @@
/* PRCM_MPU.DEVICE_PRM register offsets */
#define OMAP4_PRCM_MPU_PRM_RSTST_OFFSET 0x0000
#define OMAP4430_PRCM_MPU_PRM_RSTST OMAP44XX_PRCM_MPU_REGADDR(OMAP4430_PRCM_MPU_DEVICE_PRM_MOD, 0x0000)
+#define OMAP4_PRCM_MPU_PRM_PSCON_COUNT_OFFSET 0x0004
+#define OMAP4430_PRCM_MPU_PRM_PSCON_COUNT OMAP44XX_PRCM_MPU_REGADDR(OMAP4430_PRCM_MPU_DEVICE_PRM_MOD, 0x0004)
/* PRCM_MPU.CPU0 register offsets */
#define OMAP4_PM_CPU0_PWRSTCTRL_OFFSET 0x0000
diff --git a/arch/arm/mach-omap2/serial.c b/arch/arm/mach-omap2/serial.c
index 566e991ede81..d17960a1be25 100644
--- a/arch/arm/mach-omap2/serial.c
+++ b/arch/arm/mach-omap2/serial.c
@@ -19,20 +19,32 @@
*/
#include <linux/kernel.h>
#include <linux/init.h>
-#include <linux/serial_8250.h>
#include <linux/serial_reg.h>
#include <linux/clk.h>
#include <linux/io.h>
#include <linux/delay.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/serial_8250.h>
+#include <linux/pm_runtime.h>
+#include <linux/console.h>
+
+#ifdef CONFIG_SERIAL_OMAP
+#include <plat/omap-serial.h>
+#endif
#include <plat/common.h>
#include <plat/board.h>
#include <plat/clock.h>
-#include <plat/control.h>
+#include <plat/dma.h>
+#include <plat/omap_hwmod.h>
+#include <plat/omap_device.h>
#include "prm.h"
#include "pm.h"
+#include "cm.h"
#include "prm-regbits-34xx.h"
+#include "control.h"
#define UART_OMAP_NO_EMPTY_FIFO_READ_IP_REV 0x52
#define UART_OMAP_WER 0x17 /* Wake-up enable register */
@@ -48,6 +60,8 @@
*/
#define DEFAULT_TIMEOUT 0
+#define MAX_UART_HWMOD_NAME_LEN 16
+
struct omap_uart_state {
int num;
int can_sleep;
@@ -58,14 +72,21 @@ struct omap_uart_state {
void __iomem *wk_en;
u32 wk_mask;
u32 padconf;
+ u32 dma_enabled;
struct clk *ick;
struct clk *fck;
int clocked;
- struct plat_serial8250_port *p;
+ int irq;
+ int regshift;
+ int irqflags;
+ void __iomem *membase;
+ resource_size_t mapbase;
+
struct list_head node;
- struct platform_device pdev;
+ struct omap_hwmod *oh;
+ struct platform_device *pdev;
u32 errata;
#if defined(CONFIG_ARCH_OMAP3) && defined(CONFIG_PM)
@@ -83,75 +104,47 @@ struct omap_uart_state {
};
static LIST_HEAD(uart_list);
+static u8 num_uarts;
-static struct plat_serial8250_port serial_platform_data0[] = {
- {
- .irq = 72,
- .flags = UPF_BOOT_AUTOCONF,
- .iotype = UPIO_MEM,
- .regshift = 2,
- .uartclk = OMAP24XX_BASE_BAUD * 16,
- }, {
- .flags = 0
- }
-};
+/*
+ * Since these idle/enable hooks are used in the idle path itself
+ * which has interrupts disabled, use the non-locking versions of
+ * the hwmod enable/disable functions.
+ */
+static int uart_idle_hwmod(struct omap_device *od)
+{
+ _omap_hwmod_idle(od->hwmods[0]);
-static struct plat_serial8250_port serial_platform_data1[] = {
- {
- .irq = 73,
- .flags = UPF_BOOT_AUTOCONF,
- .iotype = UPIO_MEM,
- .regshift = 2,
- .uartclk = OMAP24XX_BASE_BAUD * 16,
- }, {
- .flags = 0
- }
-};
+ return 0;
+}
-static struct plat_serial8250_port serial_platform_data2[] = {
- {
- .irq = 74,
- .flags = UPF_BOOT_AUTOCONF,
- .iotype = UPIO_MEM,
- .regshift = 2,
- .uartclk = OMAP24XX_BASE_BAUD * 16,
- }, {
- .flags = 0
- }
-};
+static int uart_enable_hwmod(struct omap_device *od)
+{
+ _omap_hwmod_enable(od->hwmods[0]);
-static struct plat_serial8250_port serial_platform_data3[] = {
+ return 0;
+}
+
+static struct omap_device_pm_latency omap_uart_latency[] = {
{
- .irq = 70,
- .flags = UPF_BOOT_AUTOCONF,
- .iotype = UPIO_MEM,
- .regshift = 2,
- .uartclk = OMAP24XX_BASE_BAUD * 16,
- }, {
- .flags = 0
- }
+ .deactivate_func = uart_idle_hwmod,
+ .activate_func = uart_enable_hwmod,
+ .flags = OMAP_DEVICE_LATENCY_AUTO_ADJUST,
+ },
};
-void __init omap2_set_globals_uart(struct omap_globals *omap2_globals)
-{
- serial_platform_data0[0].mapbase = omap2_globals->uart1_phys;
- serial_platform_data1[0].mapbase = omap2_globals->uart2_phys;
- serial_platform_data2[0].mapbase = omap2_globals->uart3_phys;
- serial_platform_data3[0].mapbase = omap2_globals->uart4_phys;
-}
-
static inline unsigned int __serial_read_reg(struct uart_port *up,
- int offset)
+ int offset)
{
offset <<= up->regshift;
return (unsigned int)__raw_readb(up->membase + offset);
}
-static inline unsigned int serial_read_reg(struct plat_serial8250_port *up,
+static inline unsigned int serial_read_reg(struct omap_uart_state *uart,
int offset)
{
- offset <<= up->regshift;
- return (unsigned int)__raw_readb(up->membase + offset);
+ offset <<= uart->regshift;
+ return (unsigned int)__raw_readb(uart->membase + offset);
}
static inline void __serial_write_reg(struct uart_port *up, int offset,
@@ -161,11 +154,11 @@ static inline void __serial_write_reg(struct uart_port *up, int offset,
__raw_writeb(value, up->membase + offset);
}
-static inline void serial_write_reg(struct plat_serial8250_port *p, int offset,
+static inline void serial_write_reg(struct omap_uart_state *uart, int offset,
int value)
{
- offset <<= p->regshift;
- __raw_writeb(value, p->membase + offset);
+ offset <<= uart->regshift;
+ __raw_writeb(value, uart->membase + offset);
}
/*
@@ -173,14 +166,12 @@ static inline void serial_write_reg(struct plat_serial8250_port *p, int offset,
* properly. Note that the TX watermark initialization may not be needed
* once the 8250.c watermark handling code is merged.
*/
+
static inline void __init omap_uart_reset(struct omap_uart_state *uart)
{
- struct plat_serial8250_port *p = uart->p;
-
- serial_write_reg(p, UART_OMAP_MDR1, 0x07);
- serial_write_reg(p, UART_OMAP_SCR, 0x08);
- serial_write_reg(p, UART_OMAP_MDR1, 0x00);
- serial_write_reg(p, UART_OMAP_SYSC, (0x02 << 3) | (1 << 2) | (1 << 0));
+ serial_write_reg(uart, UART_OMAP_MDR1, 0x07);
+ serial_write_reg(uart, UART_OMAP_SCR, 0x08);
+ serial_write_reg(uart, UART_OMAP_MDR1, 0x00);
}
#if defined(CONFIG_PM) && defined(CONFIG_ARCH_OMAP3)
@@ -197,24 +188,23 @@ static inline void __init omap_uart_reset(struct omap_uart_state *uart)
static void omap_uart_mdr1_errataset(struct omap_uart_state *uart, u8 mdr1_val,
u8 fcr_val)
{
- struct plat_serial8250_port *p = uart->p;
u8 timeout = 255;
- serial_write_reg(p, UART_OMAP_MDR1, mdr1_val);
+ serial_write_reg(uart, UART_OMAP_MDR1, mdr1_val);
udelay(2);
- serial_write_reg(p, UART_FCR, fcr_val | UART_FCR_CLEAR_XMIT |
+ serial_write_reg(uart, UART_FCR, fcr_val | UART_FCR_CLEAR_XMIT |
UART_FCR_CLEAR_RCVR);
/*
* Wait for FIFO to empty: when empty, RX_FIFO_E bit is 0 and
* TX_FIFO_E bit is 1.
*/
- while (UART_LSR_THRE != (serial_read_reg(p, UART_LSR) &
+ while (UART_LSR_THRE != (serial_read_reg(uart, UART_LSR) &
(UART_LSR_THRE | UART_LSR_DR))) {
timeout--;
if (!timeout) {
/* Should *never* happen. we warn and carry on */
- dev_crit(&uart->pdev.dev, "Errata i202: timedout %x\n",
- serial_read_reg(p, UART_LSR));
+ dev_crit(&uart->pdev->dev, "Errata i202: timedout %x\n",
+ serial_read_reg(uart, UART_LSR));
break;
}
udelay(1);
@@ -224,23 +214,22 @@ static void omap_uart_mdr1_errataset(struct omap_uart_state *uart, u8 mdr1_val,
static void omap_uart_save_context(struct omap_uart_state *uart)
{
u16 lcr = 0;
- struct plat_serial8250_port *p = uart->p;
if (!enable_off_mode)
return;
- lcr = serial_read_reg(p, UART_LCR);
- serial_write_reg(p, UART_LCR, 0xBF);
- uart->dll = serial_read_reg(p, UART_DLL);
- uart->dlh = serial_read_reg(p, UART_DLM);
- serial_write_reg(p, UART_LCR, lcr);
- uart->ier = serial_read_reg(p, UART_IER);
- uart->sysc = serial_read_reg(p, UART_OMAP_SYSC);
- uart->scr = serial_read_reg(p, UART_OMAP_SCR);
- uart->wer = serial_read_reg(p, UART_OMAP_WER);
- serial_write_reg(p, UART_LCR, 0x80);
- uart->mcr = serial_read_reg(p, UART_MCR);
- serial_write_reg(p, UART_LCR, lcr);
+ lcr = serial_read_reg(uart, UART_LCR);
+ serial_write_reg(uart, UART_LCR, 0xBF);
+ uart->dll = serial_read_reg(uart, UART_DLL);
+ uart->dlh = serial_read_reg(uart, UART_DLM);
+ serial_write_reg(uart, UART_LCR, lcr);
+ uart->ier = serial_read_reg(uart, UART_IER);
+ uart->sysc = serial_read_reg(uart, UART_OMAP_SYSC);
+ uart->scr = serial_read_reg(uart, UART_OMAP_SCR);
+ uart->wer = serial_read_reg(uart, UART_OMAP_WER);
+ serial_write_reg(uart, UART_LCR, 0x80);
+ uart->mcr = serial_read_reg(uart, UART_MCR);
+ serial_write_reg(uart, UART_LCR, lcr);
uart->context_valid = 1;
}
@@ -248,7 +237,6 @@ static void omap_uart_save_context(struct omap_uart_state *uart)
static void omap_uart_restore_context(struct omap_uart_state *uart)
{
u16 efr = 0;
- struct plat_serial8250_port *p = uart->p;
if (!enable_off_mode)
return;
@@ -261,29 +249,30 @@ static void omap_uart_restore_context(struct omap_uart_state *uart)
if (uart->errata & UART_ERRATA_i202_MDR1_ACCESS)
omap_uart_mdr1_errataset(uart, 0x07, 0xA0);
else
- serial_write_reg(p, UART_OMAP_MDR1, 0x7);
- serial_write_reg(p, UART_LCR, 0xBF); /* Config B mode */
- efr = serial_read_reg(p, UART_EFR);
- serial_write_reg(p, UART_EFR, UART_EFR_ECB);
- serial_write_reg(p, UART_LCR, 0x0); /* Operational mode */
- serial_write_reg(p, UART_IER, 0x0);
- serial_write_reg(p, UART_LCR, 0xBF); /* Config B mode */
- serial_write_reg(p, UART_DLL, uart->dll);
- serial_write_reg(p, UART_DLM, uart->dlh);
- serial_write_reg(p, UART_LCR, 0x0); /* Operational mode */
- serial_write_reg(p, UART_IER, uart->ier);
- serial_write_reg(p, UART_LCR, 0x80);
- serial_write_reg(p, UART_MCR, uart->mcr);
- serial_write_reg(p, UART_LCR, 0xBF); /* Config B mode */
- serial_write_reg(p, UART_EFR, efr);
- serial_write_reg(p, UART_LCR, UART_LCR_WLEN8);
- serial_write_reg(p, UART_OMAP_SCR, uart->scr);
- serial_write_reg(p, UART_OMAP_WER, uart->wer);
- serial_write_reg(p, UART_OMAP_SYSC, uart->sysc);
+ serial_write_reg(uart, UART_OMAP_MDR1, 0x7);
+ serial_write_reg(uart, UART_LCR, 0xBF); /* Config B mode */
+ efr = serial_read_reg(uart, UART_EFR);
+ serial_write_reg(uart, UART_EFR, UART_EFR_ECB);
+ serial_write_reg(uart, UART_LCR, 0x0); /* Operational mode */
+ serial_write_reg(uart, UART_IER, 0x0);
+ serial_write_reg(uart, UART_LCR, 0xBF); /* Config B mode */
+ serial_write_reg(uart, UART_DLL, uart->dll);
+ serial_write_reg(uart, UART_DLM, uart->dlh);
+ serial_write_reg(uart, UART_LCR, 0x0); /* Operational mode */
+ serial_write_reg(uart, UART_IER, uart->ier);
+ serial_write_reg(uart, UART_LCR, 0x80);
+ serial_write_reg(uart, UART_MCR, uart->mcr);
+ serial_write_reg(uart, UART_LCR, 0xBF); /* Config B mode */
+ serial_write_reg(uart, UART_EFR, efr);
+ serial_write_reg(uart, UART_LCR, UART_LCR_WLEN8);
+ serial_write_reg(uart, UART_OMAP_SCR, uart->scr);
+ serial_write_reg(uart, UART_OMAP_WER, uart->wer);
+ serial_write_reg(uart, UART_OMAP_SYSC, uart->sysc);
if (uart->errata & UART_ERRATA_i202_MDR1_ACCESS)
omap_uart_mdr1_errataset(uart, 0x00, 0xA1);
else
- serial_write_reg(p, UART_OMAP_MDR1, 0x00); /* UART 16x mode */
+ /* UART 16x mode */
+ serial_write_reg(uart, UART_OMAP_MDR1, 0x00);
}
#else
static inline void omap_uart_save_context(struct omap_uart_state *uart) {}
@@ -295,8 +284,7 @@ static inline void omap_uart_enable_clocks(struct omap_uart_state *uart)
if (uart->clocked)
return;
- clk_enable(uart->ick);
- clk_enable(uart->fck);
+ omap_device_enable(uart->pdev);
uart->clocked = 1;
omap_uart_restore_context(uart);
}
@@ -310,8 +298,7 @@ static inline void omap_uart_disable_clocks(struct omap_uart_state *uart)
omap_uart_save_context(uart);
uart->clocked = 0;
- clk_disable(uart->ick);
- clk_disable(uart->fck);
+ omap_device_idle(uart->pdev);
}
static void omap_uart_enable_wakeup(struct omap_uart_state *uart)
@@ -349,18 +336,24 @@ static void omap_uart_disable_wakeup(struct omap_uart_state *uart)
}
static void omap_uart_smart_idle_enable(struct omap_uart_state *uart,
- int enable)
+ int enable)
{
- struct plat_serial8250_port *p = uart->p;
- u16 sysc;
+ u8 idlemode;
- sysc = serial_read_reg(p, UART_OMAP_SYSC) & 0x7;
- if (enable)
- sysc |= 0x2 << 3;
- else
- sysc |= 0x1 << 3;
+ if (enable) {
+ /**
+ * Errata 2.15: [UART]:Cannot Acknowledge Idle Requests
+ * in Smartidle Mode When Configured for DMA Operations.
+ */
+ if (uart->dma_enabled)
+ idlemode = HWMOD_IDLEMODE_FORCE;
+ else
+ idlemode = HWMOD_IDLEMODE_SMART;
+ } else {
+ idlemode = HWMOD_IDLEMODE_NO;
+ }
- serial_write_reg(p, UART_OMAP_SYSC, sysc);
+ omap_hwmod_set_slave_idlemode(uart->oh, idlemode);
}
static void omap_uart_block_sleep(struct omap_uart_state *uart)
@@ -377,7 +370,7 @@ static void omap_uart_block_sleep(struct omap_uart_state *uart)
static void omap_uart_allow_sleep(struct omap_uart_state *uart)
{
- if (device_may_wakeup(&uart->pdev.dev))
+ if (device_may_wakeup(&uart->pdev->dev))
omap_uart_enable_wakeup(uart);
else
omap_uart_disable_wakeup(uart);
@@ -414,7 +407,7 @@ void omap_uart_resume_idle(int num)
struct omap_uart_state *uart;
list_for_each_entry(uart, &uart_list, node) {
- if (num == uart->num) {
+ if (num == uart->num && uart->can_sleep) {
omap_uart_enable_clocks(uart);
/* Check for IO pad wakeup */
@@ -472,6 +465,7 @@ int omap_uart_can_sleep(void)
* UART will not idle or sleep for its timeout period.
*
**/
+/* static int first_interrupt; */
static irqreturn_t omap_uart_interrupt(int irq, void *dev_id)
{
struct omap_uart_state *uart = dev_id;
@@ -483,7 +477,6 @@ static irqreturn_t omap_uart_interrupt(int irq, void *dev_id)
static void omap_uart_idle_init(struct omap_uart_state *uart)
{
- struct plat_serial8250_port *p = uart->p;
int ret;
uart->can_sleep = 0;
@@ -495,7 +488,7 @@ static void omap_uart_idle_init(struct omap_uart_state *uart)
omap_uart_smart_idle_enable(uart, 0);
if (cpu_is_omap34xx()) {
- u32 mod = (uart->num == 2) ? OMAP3430_PER_MOD : CORE_MOD;
+ u32 mod = (uart->num > 1) ? OMAP3430_PER_MOD : CORE_MOD;
u32 wk_mask = 0;
u32 padconf = 0;
@@ -514,19 +507,17 @@ static void omap_uart_idle_init(struct omap_uart_state *uart)
wk_mask = OMAP3430_ST_UART3_MASK;
padconf = 0x19e;
break;
+ case 3:
+ wk_mask = OMAP3630_ST_UART4_MASK;
+ padconf = 0x0d2;
+ break;
}
uart->wk_mask = wk_mask;
uart->padconf = padconf;
} else if (cpu_is_omap24xx()) {
u32 wk_mask = 0;
+ u32 wk_en = PM_WKEN1, wk_st = PM_WKST1;
- if (cpu_is_omap2430()) {
- uart->wk_en = OMAP2430_PRM_REGADDR(CORE_MOD, PM_WKEN1);
- uart->wk_st = OMAP2430_PRM_REGADDR(CORE_MOD, PM_WKST1);
- } else if (cpu_is_omap2420()) {
- uart->wk_en = OMAP2420_PRM_REGADDR(CORE_MOD, PM_WKEN1);
- uart->wk_st = OMAP2420_PRM_REGADDR(CORE_MOD, PM_WKST1);
- }
switch (uart->num) {
case 0:
wk_mask = OMAP24XX_ST_UART1_MASK;
@@ -535,10 +526,19 @@ static void omap_uart_idle_init(struct omap_uart_state *uart)
wk_mask = OMAP24XX_ST_UART2_MASK;
break;
case 2:
+ wk_en = OMAP24XX_PM_WKEN2;
+ wk_st = OMAP24XX_PM_WKST2;
wk_mask = OMAP24XX_ST_UART3_MASK;
break;
}
uart->wk_mask = wk_mask;
+ if (cpu_is_omap2430()) {
+ uart->wk_en = OMAP2430_PRM_REGADDR(CORE_MOD, wk_en);
+ uart->wk_st = OMAP2430_PRM_REGADDR(CORE_MOD, wk_st);
+ } else if (cpu_is_omap2420()) {
+ uart->wk_en = OMAP2420_PRM_REGADDR(CORE_MOD, wk_en);
+ uart->wk_st = OMAP2420_PRM_REGADDR(CORE_MOD, wk_st);
+ }
} else {
uart->wk_en = NULL;
uart->wk_st = NULL;
@@ -546,9 +546,9 @@ static void omap_uart_idle_init(struct omap_uart_state *uart)
uart->padconf = 0;
}
- p->irqflags |= IRQF_SHARED;
- ret = request_irq(p->irq, omap_uart_interrupt, IRQF_SHARED,
- "serial idle", (void *)uart);
+ uart->irqflags |= IRQF_SHARED;
+ ret = request_threaded_irq(uart->irq, NULL, omap_uart_interrupt,
+ IRQF_SHARED, "serial idle", (void *)uart);
WARN_ON(ret);
}
@@ -558,11 +558,17 @@ void omap_uart_enable_irqs(int enable)
struct omap_uart_state *uart;
list_for_each_entry(uart, &uart_list, node) {
- if (enable)
- ret = request_irq(uart->p->irq, omap_uart_interrupt,
- IRQF_SHARED, "serial idle", (void *)uart);
- else
- free_irq(uart->p->irq, (void *)uart);
+ if (enable) {
+ pm_runtime_put_sync(&uart->pdev->dev);
+ ret = request_threaded_irq(uart->irq, NULL,
+ omap_uart_interrupt,
+ IRQF_SHARED,
+ "serial idle",
+ (void *)uart);
+ } else {
+ pm_runtime_get_noresume(&uart->pdev->dev);
+ free_irq(uart->irq, (void *)uart);
+ }
}
}
@@ -570,10 +576,9 @@ static ssize_t sleep_timeout_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
- struct platform_device *pdev = container_of(dev,
- struct platform_device, dev);
- struct omap_uart_state *uart = container_of(pdev,
- struct omap_uart_state, pdev);
+ struct platform_device *pdev = to_platform_device(dev);
+ struct omap_device *odev = to_omap_device(pdev);
+ struct omap_uart_state *uart = odev->hwmods[0]->dev_attr;
return sprintf(buf, "%u\n", uart->timeout / HZ);
}
@@ -582,10 +587,9 @@ static ssize_t sleep_timeout_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t n)
{
- struct platform_device *pdev = container_of(dev,
- struct platform_device, dev);
- struct omap_uart_state *uart = container_of(pdev,
- struct omap_uart_state, pdev);
+ struct platform_device *pdev = to_platform_device(dev);
+ struct omap_device *odev = to_omap_device(pdev);
+ struct omap_uart_state *uart = odev->hwmods[0]->dev_attr;
unsigned int value;
if (sscanf(buf, "%u", &value) != 1) {
@@ -608,48 +612,15 @@ static DEVICE_ATTR(sleep_timeout, 0644, sleep_timeout_show,
#define DEV_CREATE_FILE(dev, attr) WARN_ON(device_create_file(dev, attr))
#else
static inline void omap_uart_idle_init(struct omap_uart_state *uart) {}
+static void omap_uart_block_sleep(struct omap_uart_state *uart)
+{
+ /* Needed to enable UART clocks when built without CONFIG_PM */
+ omap_uart_enable_clocks(uart);
+}
#define DEV_CREATE_FILE(dev, attr)
#endif /* CONFIG_PM */
-static struct omap_uart_state omap_uart[] = {
- {
- .pdev = {
- .name = "serial8250",
- .id = PLAT8250_DEV_PLATFORM,
- .dev = {
- .platform_data = serial_platform_data0,
- },
- },
- }, {
- .pdev = {
- .name = "serial8250",
- .id = PLAT8250_DEV_PLATFORM1,
- .dev = {
- .platform_data = serial_platform_data1,
- },
- },
- }, {
- .pdev = {
- .name = "serial8250",
- .id = PLAT8250_DEV_PLATFORM2,
- .dev = {
- .platform_data = serial_platform_data2,
- },
- },
- },
-#if defined(CONFIG_ARCH_OMAP3) || defined(CONFIG_ARCH_OMAP4)
- {
- .pdev = {
- .name = "serial8250",
- .id = 3,
- .dev = {
- .platform_data = serial_platform_data3,
- },
- },
- },
-#endif
-};
-
+#ifndef CONFIG_SERIAL_OMAP
/*
* Override the default 8250 read handler: mem_serial_in()
* Empty RX fifo read causes an abort on omap3630 and omap4
@@ -682,71 +653,44 @@ static void serial_out_override(struct uart_port *up, int offset, int value)
}
__serial_write_reg(up, offset, value);
}
+#endif
+
void __init omap_serial_early_init(void)
{
- int i, nr_ports;
- char name[16];
+ int i = 0;
- if (!(cpu_is_omap3630() || cpu_is_omap4430()))
- nr_ports = 3;
- else
- nr_ports = ARRAY_SIZE(omap_uart);
+ do {
+ char oh_name[MAX_UART_HWMOD_NAME_LEN];
+ struct omap_hwmod *oh;
+ struct omap_uart_state *uart;
- /*
- * Make sure the serial ports are muxed on at this point.
- * You have to mux them off in device drivers later on
- * if not needed.
- */
+ snprintf(oh_name, MAX_UART_HWMOD_NAME_LEN,
+ "uart%d", i + 1);
+ oh = omap_hwmod_lookup(oh_name);
+ if (!oh)
+ break;
- for (i = 0; i < nr_ports; i++) {
- struct omap_uart_state *uart = &omap_uart[i];
- struct platform_device *pdev = &uart->pdev;
- struct device *dev = &pdev->dev;
- struct plat_serial8250_port *p = dev->platform_data;
+ uart = kzalloc(sizeof(struct omap_uart_state), GFP_KERNEL);
+ if (WARN_ON(!uart))
+ return;
+
+ uart->oh = oh;
+ uart->num = i++;
+ list_add_tail(&uart->node, &uart_list);
+ num_uarts++;
- /* Don't map zero-based physical address */
- if (p->mapbase == 0) {
- dev_warn(dev, "no physical address for uart#%d,"
- " so skipping early_init...\n", i);
- continue;
- }
/*
- * Module 4KB + L4 interconnect 4KB
- * Static mapping, never released
+ * NOTE: omap_hwmod_init() has not yet been called,
+ * so no hwmod functions will work yet.
*/
- p->membase = ioremap(p->mapbase, SZ_8K);
- if (!p->membase) {
- dev_err(dev, "ioremap failed for uart%i\n", i + 1);
- continue;
- }
-
- sprintf(name, "uart%d_ick", i + 1);
- uart->ick = clk_get(NULL, name);
- if (IS_ERR(uart->ick)) {
- dev_err(dev, "Could not get uart%d_ick\n", i + 1);
- uart->ick = NULL;
- }
-
- sprintf(name, "uart%d_fck", i+1);
- uart->fck = clk_get(NULL, name);
- if (IS_ERR(uart->fck)) {
- dev_err(dev, "Could not get uart%d_fck\n", i + 1);
- uart->fck = NULL;
- }
- /* FIXME: Remove this once the clkdev is ready */
- if (!cpu_is_omap44xx()) {
- if (!uart->ick || !uart->fck)
- continue;
- }
-
- uart->num = i;
- p->private_data = uart;
- uart->p = p;
-
- if (cpu_is_omap44xx())
- p->irq += 32;
- }
+ /*
+ * During UART early init, device need to be probed
+ * to determine SoC specific init before omap_device
+ * is ready. Therefore, don't allow idle here
+ */
+ uart->oh->flags |= HWMOD_INIT_NO_IDLE | HWMOD_INIT_NO_RESET;
+ } while (1);
}
/**
@@ -763,53 +707,139 @@ void __init omap_serial_early_init(void)
void __init omap_serial_init_port(int port)
{
struct omap_uart_state *uart;
- struct platform_device *pdev;
- struct device *dev;
-
- BUG_ON(port < 0);
- BUG_ON(port >= ARRAY_SIZE(omap_uart));
-
- uart = &omap_uart[port];
- pdev = &uart->pdev;
- dev = &pdev->dev;
+ struct omap_hwmod *oh;
+ struct omap_device *od;
+ void *pdata = NULL;
+ u32 pdata_size = 0;
+ char *name;
+#ifndef CONFIG_SERIAL_OMAP
+ struct plat_serial8250_port ports[2] = {
+ {},
+ {.flags = 0},
+ };
+ struct plat_serial8250_port *p = &ports[0];
+#else
+ struct omap_uart_port_info omap_up;
+#endif
- /* Don't proceed if there's no clocks available */
- if (unlikely(!uart->ick || !uart->fck)) {
- WARN(1, "%s: can't init uart%d, no clocks available\n",
- kobject_name(&dev->kobj), port);
+ if (WARN_ON(port < 0))
+ return;
+ if (WARN_ON(port >= num_uarts))
return;
- }
-
- omap_uart_enable_clocks(uart);
-
- omap_uart_reset(uart);
- omap_uart_idle_init(uart);
- list_add_tail(&uart->node, &uart_list);
+ list_for_each_entry(uart, &uart_list, node)
+ if (port == uart->num)
+ break;
- if (WARN_ON(platform_device_register(pdev)))
- return;
+ oh = uart->oh;
+ uart->dma_enabled = 0;
+#ifndef CONFIG_SERIAL_OMAP
+ name = "serial8250";
- if ((cpu_is_omap34xx() && uart->padconf) ||
- (uart->wk_en && uart->wk_mask)) {
- device_init_wakeup(dev, true);
- DEV_CREATE_FILE(dev, &dev_attr_sleep_timeout);
- }
+ /*
+ * !! 8250 driver does not use standard IORESOURCE* It
+ * has it's own custom pdata that can be taken from
+ * the hwmod resource data. But, this needs to be
+ * done after the build.
+ *
+ * ?? does it have to be done before the register ??
+ * YES, because platform_device_data_add() copies
+ * pdata, it does not use a pointer.
+ */
+ p->flags = UPF_BOOT_AUTOCONF;
+ p->iotype = UPIO_MEM;
+ p->regshift = 2;
+ p->uartclk = OMAP24XX_BASE_BAUD * 16;
+ p->irq = oh->mpu_irqs[0].irq;
+ p->mapbase = oh->slaves[0]->addr->pa_start;
+ p->membase = omap_hwmod_get_mpu_rt_va(oh);
+ p->irqflags = IRQF_SHARED;
+ p->private_data = uart;
/*
* omap44xx: Never read empty UART fifo
* omap3xxx: Never read empty UART fifo on UARTs
* with IP rev >=0x52
*/
+ uart->regshift = p->regshift;
+ uart->membase = p->membase;
if (cpu_is_omap44xx())
uart->errata |= UART_ERRATA_FIFO_FULL_ABORT;
- else if ((serial_read_reg(uart->p, UART_OMAP_MVER) & 0xFF)
+ else if ((serial_read_reg(uart, UART_OMAP_MVER) & 0xFF)
>= UART_OMAP_NO_EMPTY_FIFO_READ_IP_REV)
uart->errata |= UART_ERRATA_FIFO_FULL_ABORT;
if (uart->errata & UART_ERRATA_FIFO_FULL_ABORT) {
- uart->p->serial_in = serial_in_override;
- uart->p->serial_out = serial_out_override;
+ p->serial_in = serial_in_override;
+ p->serial_out = serial_out_override;
+ }
+
+ pdata = &ports[0];
+ pdata_size = 2 * sizeof(struct plat_serial8250_port);
+#else
+
+ name = DRIVER_NAME;
+
+ omap_up.dma_enabled = uart->dma_enabled;
+ omap_up.uartclk = OMAP24XX_BASE_BAUD * 16;
+ omap_up.mapbase = oh->slaves[0]->addr->pa_start;
+ omap_up.membase = omap_hwmod_get_mpu_rt_va(oh);
+ omap_up.irqflags = IRQF_SHARED;
+ omap_up.flags = UPF_BOOT_AUTOCONF | UPF_SHARE_IRQ;
+
+ pdata = &omap_up;
+ pdata_size = sizeof(struct omap_uart_port_info);
+#endif
+
+ if (WARN_ON(!oh))
+ return;
+
+ od = omap_device_build(name, uart->num, oh, pdata, pdata_size,
+ omap_uart_latency,
+ ARRAY_SIZE(omap_uart_latency), false);
+ WARN(IS_ERR(od), "Could not build omap_device for %s: %s.\n",
+ name, oh->name);
+
+ uart->irq = oh->mpu_irqs[0].irq;
+ uart->regshift = 2;
+ uart->mapbase = oh->slaves[0]->addr->pa_start;
+ uart->membase = omap_hwmod_get_mpu_rt_va(oh);
+ uart->pdev = &od->pdev;
+
+ oh->dev_attr = uart;
+
+ acquire_console_sem(); /* in case the earlycon is on the UART */
+
+ /*
+ * Because of early UART probing, UART did not get idled
+ * on init. Now that omap_device is ready, ensure full idle
+ * before doing omap_device_enable().
+ */
+ omap_hwmod_idle(uart->oh);
+
+ omap_device_enable(uart->pdev);
+ omap_uart_idle_init(uart);
+ omap_uart_reset(uart);
+ omap_hwmod_enable_wakeup(uart->oh);
+ omap_device_idle(uart->pdev);
+
+ /*
+ * Need to block sleep long enough for interrupt driven
+ * driver to start. Console driver is in polling mode
+ * so device needs to be kept enabled while polling driver
+ * is in use.
+ */
+ if (uart->timeout)
+ uart->timeout = (30 * HZ);
+ omap_uart_block_sleep(uart);
+ uart->timeout = DEFAULT_TIMEOUT;
+
+ release_console_sem();
+
+ if ((cpu_is_omap34xx() && uart->padconf) ||
+ (uart->wk_en && uart->wk_mask)) {
+ device_init_wakeup(&od->pdev.dev, true);
+ DEV_CREATE_FILE(&od->pdev.dev, &dev_attr_sleep_timeout);
}
/* Enable the MDR1 errata for OMAP3 */
@@ -826,13 +856,8 @@ void __init omap_serial_init_port(int port)
*/
void __init omap_serial_init(void)
{
- int i, nr_ports;
-
- if (!(cpu_is_omap3630() || cpu_is_omap4430()))
- nr_ports = 3;
- else
- nr_ports = ARRAY_SIZE(omap_uart);
+ struct omap_uart_state *uart;
- for (i = 0; i < nr_ports; i++)
- omap_serial_init_port(i);
+ list_for_each_entry(uart, &uart_list, node)
+ omap_serial_init_port(uart->num);
}
diff --git a/arch/arm/mach-omap2/sleep34xx.S b/arch/arm/mach-omap2/sleep34xx.S
index ba53191ae4c5..2fb205a7f285 100644
--- a/arch/arm/mach-omap2/sleep34xx.S
+++ b/arch/arm/mach-omap2/sleep34xx.S
@@ -27,11 +27,11 @@
#include <linux/linkage.h>
#include <asm/assembler.h>
#include <mach/io.h>
-#include <plat/control.h>
#include "cm.h"
#include "prm.h"
#include "sdrc.h"
+#include "control.h"
#define SDRC_SCRATCHPAD_SEM_V 0xfa00291c
diff --git a/arch/arm/mach-omap2/sram34xx.S b/arch/arm/mach-omap2/sram34xx.S
index de99ba2a57ab..3637274af5be 100644
--- a/arch/arm/mach-omap2/sram34xx.S
+++ b/arch/arm/mach-omap2/sram34xx.S
@@ -129,8 +129,11 @@ ENTRY(omap3_sram_configure_core_dpll)
ldr r4, [sp, #80]
str r4, omap_sdrc_mr_1_val
skip_cs1_params:
+ mrc p15, 0, r8, c1, c0, 0 @ read ctrl register
+ bic r10, r8, #0x800 @ clear Z-bit, disable branch prediction
+ mcr p15, 0, r10, c1, c0, 0 @ write ctrl register
dsb @ flush buffered writes to interconnect
-
+ isb @ prevent speculative exec past here
cmp r3, #1 @ if increasing SDRC clk rate,
bleq configure_sdrc @ program the SDRC regs early (for RFR)
cmp r1, #SDRC_UNLOCK_DLL @ set the intended DLL state
@@ -148,6 +151,7 @@ skip_cs1_params:
beq return_to_sdram @ return to SDRAM code, otherwise,
bl configure_sdrc @ reprogram SDRC regs now
return_to_sdram:
+ mcr p15, 0, r8, c1, c0, 0 @ restore ctrl register
isb @ prevent speculative exec past here
mov r0, #0 @ return value
ldmfd sp!, {r1-r12, pc} @ restore regs and return
diff --git a/arch/arm/mach-omap2/timer-gp.c b/arch/arm/mach-omap2/timer-gp.c
index 74fbed8491f2..e13c29eecf2b 100644
--- a/arch/arm/mach-omap2/timer-gp.c
+++ b/arch/arm/mach-omap2/timer-gp.c
@@ -40,6 +40,8 @@
#include <plat/dmtimer.h>
#include <asm/localtimer.h>
+#include "timer-gp.h"
+
/* MAX_GPTIMER_ID: number of GPTIMERs on the chip */
#define MAX_GPTIMER_ID 12
@@ -228,8 +230,10 @@ static void __init omap2_gp_clocksource_init(void)
static void __init omap2_gp_timer_init(void)
{
#ifdef CONFIG_LOCAL_TIMERS
- twd_base = ioremap(OMAP44XX_LOCAL_TWD_BASE, SZ_256);
- BUG_ON(!twd_base);
+ if (cpu_is_omap44xx()) {
+ twd_base = ioremap(OMAP44XX_LOCAL_TWD_BASE, SZ_256);
+ BUG_ON(!twd_base);
+ }
#endif
omap_dm_timer_init();
diff --git a/arch/arm/plat-omap/include/plat/timer-gp.h b/arch/arm/mach-omap2/timer-gp.h
index c88d346b59d9..5c1072c6783b 100644
--- a/arch/arm/plat-omap/include/plat/timer-gp.h
+++ b/arch/arm/mach-omap2/timer-gp.h
@@ -11,7 +11,6 @@
#ifndef __ARCH_ARM_PLAT_OMAP_INCLUDE_MACH_TIMER_GP_H
#define __ARCH_ARM_PLAT_OMAP_INCLUDE_MACH_TIMER_GP_H
-int __init omap2_gp_clockevent_set_gptimer(u8 id);
+extern int __init omap2_gp_clockevent_set_gptimer(u8 id);
#endif
-
diff --git a/arch/arm/mach-omap2/usb-fs.c b/arch/arm/mach-omap2/usb-fs.c
index a216d88b04b5..1481078763b8 100644
--- a/arch/arm/mach-omap2/usb-fs.c
+++ b/arch/arm/mach-omap2/usb-fs.c
@@ -29,18 +29,18 @@
#include <asm/irq.h>
-#include <plat/control.h>
#include <plat/usb.h>
#include <plat/board.h>
+#include "control.h"
+#include "mux.h"
+
#define INT_USB_IRQ_GEN INT_24XX_USB_IRQ_GEN
#define INT_USB_IRQ_NISO INT_24XX_USB_IRQ_NISO
#define INT_USB_IRQ_ISO INT_24XX_USB_IRQ_ISO
#define INT_USB_IRQ_HGEN INT_24XX_USB_IRQ_HGEN
#define INT_USB_IRQ_OTG INT_24XX_USB_IRQ_OTG
-#include "mux.h"
-
#if defined(CONFIG_ARCH_OMAP2)
#ifdef CONFIG_USB_GADGET_OMAP
diff --git a/arch/arm/mach-orion5x/mpp.c b/arch/arm/mach-orion5x/mpp.c
index bc4c3b9aaf83..db485d3b8144 100644
--- a/arch/arm/mach-orion5x/mpp.c
+++ b/arch/arm/mach-orion5x/mpp.c
@@ -127,7 +127,7 @@ void __init orion5x_mpp_conf(struct orion5x_mpp_mode *mode)
/* Initialize gpiolib. */
orion_gpio_init();
- while (mode->mpp >= 0) {
+ for ( ; mode->mpp >= 0; mode++) {
u32 *reg;
int num_type;
int shift;
@@ -160,8 +160,6 @@ void __init orion5x_mpp_conf(struct orion5x_mpp_mode *mode)
orion_gpio_set_unused(mode->mpp);
orion_gpio_set_valid(mode->mpp, !!(mode->type == MPP_GPIO));
-
- mode++;
}
writel(mpp_0_7_ctrl, MPP_0_7_CTRL);
diff --git a/arch/arm/mach-orion5x/ts78xx-setup.c b/arch/arm/mach-orion5x/ts78xx-setup.c
index 16f1bd5324be..c1c1cd04bdde 100644
--- a/arch/arm/mach-orion5x/ts78xx-setup.c
+++ b/arch/arm/mach-orion5x/ts78xx-setup.c
@@ -239,7 +239,7 @@ static struct platform_nand_data ts78xx_ts_nand_data = {
static struct resource ts78xx_ts_nand_resources = {
.start = TS_NAND_DATA,
.end = TS_NAND_DATA + 4,
- .flags = IORESOURCE_IO,
+ .flags = IORESOURCE_MEM,
};
static struct platform_device ts78xx_ts_nand_device = {
diff --git a/arch/arm/mach-pnx4008/include/mach/vmalloc.h b/arch/arm/mach-pnx4008/include/mach/vmalloc.h
index 31b65ee07b0b..184913c71141 100644
--- a/arch/arm/mach-pnx4008/include/mach/vmalloc.h
+++ b/arch/arm/mach-pnx4008/include/mach/vmalloc.h
@@ -17,4 +17,4 @@
* The vmalloc() routines leaves a hole of 4kB between each vmalloced
* area for the same reason. ;)
*/
-#define VMALLOC_END 0xd0000000
+#define VMALLOC_END 0xd0000000UL
diff --git a/arch/arm/mach-pxa/cm-x2xx.c b/arch/arm/mach-pxa/cm-x2xx.c
index ac5598ce9724..d34b99febeb9 100644
--- a/arch/arm/mach-pxa/cm-x2xx.c
+++ b/arch/arm/mach-pxa/cm-x2xx.c
@@ -476,8 +476,6 @@ static void __init cmx2xx_init(void)
static void __init cmx2xx_init_irq(void)
{
- pxa27x_init_irq();
-
if (cpu_is_pxa25x()) {
pxa25x_init_irq();
cmx2xx_pci_init_irq(CMX255_GPIO_IT8152_IRQ);
diff --git a/arch/arm/mach-pxa/devices.c b/arch/arm/mach-pxa/devices.c
index 08b410343870..aaa1166df964 100644
--- a/arch/arm/mach-pxa/devices.c
+++ b/arch/arm/mach-pxa/devices.c
@@ -382,6 +382,31 @@ struct platform_device pxa_device_i2s = {
.num_resources = ARRAY_SIZE(pxai2s_resources),
};
+struct platform_device pxa_device_asoc_ssp1 = {
+ .name = "pxa-ssp-dai",
+ .id = 0,
+};
+
+struct platform_device pxa_device_asoc_ssp2= {
+ .name = "pxa-ssp-dai",
+ .id = 1,
+};
+
+struct platform_device pxa_device_asoc_ssp3 = {
+ .name = "pxa-ssp-dai",
+ .id = 2,
+};
+
+struct platform_device pxa_device_asoc_ssp4 = {
+ .name = "pxa-ssp-dai",
+ .id = 3,
+};
+
+struct platform_device pxa_device_asoc_platform = {
+ .name = "pxa-pcm-audio",
+ .id = -1,
+};
+
static u64 pxaficp_dmamask = ~(u32)0;
struct platform_device pxa_device_ficp = {
diff --git a/arch/arm/mach-pxa/devices.h b/arch/arm/mach-pxa/devices.h
index 715e8bd02e24..2fd5a8b35757 100644
--- a/arch/arm/mach-pxa/devices.h
+++ b/arch/arm/mach-pxa/devices.h
@@ -39,4 +39,10 @@ extern struct platform_device pxa3xx_device_i2c_power;
extern struct platform_device pxa3xx_device_gcu;
+extern struct platform_device pxa_device_asoc_platform;
+extern struct platform_device pxa_device_asoc_ssp1;
+extern struct platform_device pxa_device_asoc_ssp2;
+extern struct platform_device pxa_device_asoc_ssp3;
+extern struct platform_device pxa_device_asoc_ssp4;
+
void __init pxa_register_device(struct platform_device *dev, void *data);
diff --git a/arch/arm/mach-pxa/em-x270.c b/arch/arm/mach-pxa/em-x270.c
index ab48bb81b570..ed0dbfdb22ed 100644
--- a/arch/arm/mach-pxa/em-x270.c
+++ b/arch/arm/mach-pxa/em-x270.c
@@ -1015,7 +1015,6 @@ static struct soc_camera_link iclink = {
.power = em_x270_sensor_power,
.board_info = &em_x270_i2c_cam_info[0],
.i2c_adapter_id = 0,
- .module_name = "mt9m111",
};
static struct platform_device em_x270_camera = {
diff --git a/arch/arm/mach-pxa/ezx.c b/arch/arm/mach-pxa/ezx.c
index 80a9352d43f3..142c711f4cda 100644
--- a/arch/arm/mach-pxa/ezx.c
+++ b/arch/arm/mach-pxa/ezx.c
@@ -755,7 +755,6 @@ static struct soc_camera_link a780_iclink = {
.flags = SOCAM_SENSOR_INVERT_PCLK,
.i2c_adapter_id = 0,
.board_info = &a780_camera_i2c_board_info,
- .module_name = "mt9m111",
.power = a780_camera_power,
.reset = a780_camera_reset,
};
@@ -1024,7 +1023,6 @@ static struct soc_camera_link a910_iclink = {
.bus_id = 0,
.i2c_adapter_id = 0,
.board_info = &a910_camera_i2c_board_info,
- .module_name = "mt9m111",
.power = a910_camera_power,
.reset = a910_camera_reset,
};
diff --git a/arch/arm/mach-pxa/mioa701.c b/arch/arm/mach-pxa/mioa701.c
index 0c31fabfc7fd..f5fb915e1315 100644
--- a/arch/arm/mach-pxa/mioa701.c
+++ b/arch/arm/mach-pxa/mioa701.c
@@ -711,7 +711,6 @@ static struct soc_camera_link iclink = {
.bus_id = 0, /* Match id in pxa27x_device_camera in device.c */
.board_info = &mioa701_i2c_devices[0],
.i2c_adapter_id = 0,
- .module_name = "mt9m111",
};
struct i2c_pxa_platform_data i2c_pdata = {
diff --git a/arch/arm/mach-pxa/pcm990-baseboard.c b/arch/arm/mach-pxa/pcm990-baseboard.c
index f56ae1008759..f33647a8e0b7 100644
--- a/arch/arm/mach-pxa/pcm990-baseboard.c
+++ b/arch/arm/mach-pxa/pcm990-baseboard.c
@@ -453,7 +453,6 @@ static struct soc_camera_link iclink[] = {
.query_bus_param = pcm990_camera_query_bus_param,
.set_bus_param = pcm990_camera_set_bus_param,
.free_bus = pcm990_camera_free_bus,
- .module_name = "mt9v022",
}, {
.bus_id = 0, /* Must match with the camera ID */
.board_info = &pcm990_camera_i2c[1],
@@ -461,7 +460,6 @@ static struct soc_camera_link iclink[] = {
.query_bus_param = pcm990_camera_query_bus_param,
.set_bus_param = pcm990_camera_set_bus_param,
.free_bus = pcm990_camera_free_bus,
- .module_name = "mt9m001",
},
};
diff --git a/arch/arm/mach-pxa/pxa27x.c b/arch/arm/mach-pxa/pxa27x.c
index 12e5b9f01e6f..d1fbf29d561c 100644
--- a/arch/arm/mach-pxa/pxa27x.c
+++ b/arch/arm/mach-pxa/pxa27x.c
@@ -385,6 +385,10 @@ static struct platform_device *devices[] __initdata = {
&pxa27x_device_udc,
&pxa_device_pmu,
&pxa_device_i2s,
+ &pxa_device_asoc_ssp1,
+ &pxa_device_asoc_ssp2,
+ &pxa_device_asoc_ssp3,
+ &pxa_device_asoc_platform,
&sa1100_device_rtc,
&pxa_device_rtc,
&pxa27x_device_ssp1,
diff --git a/arch/arm/mach-pxa/pxa3xx.c b/arch/arm/mach-pxa/pxa3xx.c
index c85c3a7abd31..d1c747cdacf8 100644
--- a/arch/arm/mach-pxa/pxa3xx.c
+++ b/arch/arm/mach-pxa/pxa3xx.c
@@ -593,6 +593,11 @@ static struct platform_device *devices[] __initdata = {
&pxa27x_device_udc,
&pxa_device_pmu,
&pxa_device_i2s,
+ &pxa_device_asoc_ssp1,
+ &pxa_device_asoc_ssp2,
+ &pxa_device_asoc_ssp3,
+ &pxa_device_asoc_ssp4,
+ &pxa_device_asoc_platform,
&sa1100_device_rtc,
&pxa_device_rtc,
&pxa27x_device_ssp1,
diff --git a/arch/arm/mach-pxa/saar.c b/arch/arm/mach-pxa/saar.c
index 4b521e045d75..ffa50e633ee6 100644
--- a/arch/arm/mach-pxa/saar.c
+++ b/arch/arm/mach-pxa/saar.c
@@ -116,7 +116,7 @@ static struct platform_device smc91x_device = {
},
};
-#if defined(CONFIG_FB_PXA) || (CONFIG_FB_PXA_MODULE)
+#if defined(CONFIG_FB_PXA) || defined(CONFIG_FB_PXA_MODULE)
static uint16_t lcd_power_on[] = {
/* single frame */
SMART_CMD_NOOP,
diff --git a/arch/arm/mach-pxa/zylonite.c b/arch/arm/mach-pxa/zylonite.c
index f25fb6245bd7..702f7a68e87d 100644
--- a/arch/arm/mach-pxa/zylonite.c
+++ b/arch/arm/mach-pxa/zylonite.c
@@ -45,6 +45,16 @@ int wm9713_irq;
int lcd_id;
int lcd_orientation;
+struct platform_device pxa_device_wm9713_audio = {
+ .name = "wm9713-codec",
+ .id = -1,
+};
+
+static void __init zylonite_init_wm9713_audio(void)
+{
+ platform_device_register(&pxa_device_wm9713_audio);
+}
+
static struct resource smc91x_resources[] = {
[0] = {
.start = ZYLONITE_ETH_PHYS + 0x300,
@@ -408,6 +418,7 @@ static void __init zylonite_init(void)
zylonite_init_nand();
zylonite_init_leds();
zylonite_init_ohci();
+ zylonite_init_wm9713_audio();
}
MACHINE_START(ZYLONITE, "PXA3xx Platform Development Kit (aka Zylonite)")
diff --git a/arch/arm/mach-realview/headsmp.S b/arch/arm/mach-realview/headsmp.S
index 4075473cf68a..b34be4554d40 100644
--- a/arch/arm/mach-realview/headsmp.S
+++ b/arch/arm/mach-realview/headsmp.S
@@ -35,5 +35,6 @@ pen: ldr r7, [r6]
*/
b secondary_startup
+ .align
1: .long .
.long pen_release
diff --git a/arch/arm/mach-rpc/include/mach/vmalloc.h b/arch/arm/mach-rpc/include/mach/vmalloc.h
index 3bcd86fadb81..fb700228637a 100644
--- a/arch/arm/mach-rpc/include/mach/vmalloc.h
+++ b/arch/arm/mach-rpc/include/mach/vmalloc.h
@@ -7,4 +7,4 @@
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
-#define VMALLOC_END 0xdc000000
+#define VMALLOC_END 0xdc000000UL
diff --git a/arch/arm/mach-s3c2410/h1940-bluetooth.c b/arch/arm/mach-s3c2410/h1940-bluetooth.c
index 8cdeb14af592..6b86a722a7db 100644
--- a/arch/arm/mach-s3c2410/h1940-bluetooth.c
+++ b/arch/arm/mach-s3c2410/h1940-bluetooth.c
@@ -30,7 +30,7 @@ static void h1940bt_enable(int on)
{
if (on) {
/* Power on the chip */
- h1940_latch_control(0, H1940_LATCH_BLUETOOTH_POWER);
+ gpio_set_value(H1940_LATCH_BLUETOOTH_POWER, 1);
/* Reset the chip */
mdelay(10);
@@ -43,7 +43,7 @@ static void h1940bt_enable(int on)
mdelay(10);
gpio_set_value(S3C2410_GPH(1), 0);
mdelay(10);
- h1940_latch_control(H1940_LATCH_BLUETOOTH_POWER, 0);
+ gpio_set_value(H1940_LATCH_BLUETOOTH_POWER, 0);
}
}
@@ -64,19 +64,26 @@ static int __devinit h1940bt_probe(struct platform_device *pdev)
ret = gpio_request(S3C2410_GPH(1), dev_name(&pdev->dev));
if (ret) {
- dev_err(&pdev->dev, "could not get GPH1\n");\
+ dev_err(&pdev->dev, "could not get GPH1\n");
+ return ret;
+ }
+
+ ret = gpio_request(H1940_LATCH_BLUETOOTH_POWER, dev_name(&pdev->dev));
+ if (ret) {
+ gpio_free(S3C2410_GPH(1));
+ dev_err(&pdev->dev, "could not get BT_POWER\n");
return ret;
}
/* Configures BT serial port GPIOs */
s3c_gpio_cfgpin(S3C2410_GPH(0), S3C2410_GPH0_nCTS0);
- s3c_gpio_cfgpull(S3C2410_GPH(0), S3C_GPIO_PULL_NONE);
+ s3c_gpio_setpull(S3C2410_GPH(0), S3C_GPIO_PULL_NONE);
s3c_gpio_cfgpin(S3C2410_GPH(1), S3C2410_GPIO_OUTPUT);
- s3c_gpio_cfgpull(S3C2410_GPH(1), S3C_GPIO_PULL_NONE);
+ s3c_gpio_setpull(S3C2410_GPH(1), S3C_GPIO_PULL_NONE);
s3c_gpio_cfgpin(S3C2410_GPH(2), S3C2410_GPH2_TXD0);
- s3c_gpio_cfgpull(S3C2410_GPH(2), S3C_GPIO_PULL_NONE);
+ s3c_gpio_setpull(S3C2410_GPH(2), S3C_GPIO_PULL_NONE);
s3c_gpio_cfgpin(S3C2410_GPH(3), S3C2410_GPH3_RXD0);
- s3c_gpio_cfgpull(S3C2410_GPH(3), S3C_GPIO_PULL_NONE);
+ s3c_gpio_setpull(S3C2410_GPH(3), S3C_GPIO_PULL_NONE);
rfk = rfkill_alloc(DRV_NAME, &pdev->dev, RFKILL_TYPE_BLUETOOTH,
diff --git a/arch/arm/mach-s3c2410/include/mach/gpio.h b/arch/arm/mach-s3c2410/include/mach/gpio.h
index b649bf2ccd5c..f7f6b07df30e 100644
--- a/arch/arm/mach-s3c2410/include/mach/gpio.h
+++ b/arch/arm/mach-s3c2410/include/mach/gpio.h
@@ -22,6 +22,8 @@
#ifdef CONFIG_CPU_S3C244X
#define ARCH_NR_GPIOS (32 * 9 + CONFIG_S3C24XX_GPIO_EXTRA)
+#elif defined(CONFIG_CPU_S3C2443) || defined(CONFIG_CPU_S3C2416)
+#define ARCH_NR_GPIOS (32 * 12 + CONFIG_S3C24XX_GPIO_EXTRA)
#else
#define ARCH_NR_GPIOS (256 + CONFIG_S3C24XX_GPIO_EXTRA)
#endif
@@ -30,8 +32,10 @@
#include <mach/gpio-nrs.h>
#include <mach/gpio-fns.h>
-#ifdef CONFIG_CPU_S3C24XX
-#define S3C_GPIO_END (S3C2410_GPIO_BANKJ + 32)
+#ifdef CONFIG_CPU_S3C244X
+#define S3C_GPIO_END (S3C2410_GPJ(0) + 32)
+#elif defined(CONFIG_CPU_S3C2443) || defined(CONFIG_CPU_S3C2416)
+#define S3C_GPIO_END (S3C2410_GPM(0) + 32)
#else
-#define S3C_GPIO_END (S3C2410_GPIO_BANKH + 32)
+#define S3C_GPIO_END (S3C2410_GPH(0) + 32)
#endif
diff --git a/arch/arm/mach-s3c2410/include/mach/h1940-latch.h b/arch/arm/mach-s3c2410/include/mach/h1940-latch.h
index d8a832729a8a..97e42bfce81e 100644
--- a/arch/arm/mach-s3c2410/include/mach/h1940-latch.h
+++ b/arch/arm/mach-s3c2410/include/mach/h1940-latch.h
@@ -14,51 +14,30 @@
#ifndef __ASM_ARCH_H1940_LATCH_H
#define __ASM_ARCH_H1940_LATCH_H
+#include <mach/gpio.h>
-#ifndef __ASSEMBLY__
-#define H1940_LATCH ((void __force __iomem *)0xF8000000)
-#else
-#define H1940_LATCH 0xF8000000
-#endif
-
-#define H1940_PA_LATCH (S3C2410_CS2)
+#define H1940_LATCH_GPIO(x) (S3C_GPIO_END + (x))
/* SD layer latch */
-#define H1940_LATCH_SDQ1 (1<<16)
-#define H1940_LATCH_LCD_P1 (1<<17)
-#define H1940_LATCH_LCD_P2 (1<<18)
-#define H1940_LATCH_LCD_P3 (1<<19)
-#define H1940_LATCH_MAX1698_nSHUTDOWN (1<<20) /* LCD backlight */
-#define H1940_LATCH_LED_RED (1<<21)
-#define H1940_LATCH_SDQ7 (1<<22)
-#define H1940_LATCH_USB_DP (1<<23)
+#define H1940_LATCH_LCD_P0 H1940_LATCH_GPIO(0)
+#define H1940_LATCH_LCD_P1 H1940_LATCH_GPIO(1)
+#define H1940_LATCH_LCD_P2 H1940_LATCH_GPIO(2)
+#define H1940_LATCH_LCD_P3 H1940_LATCH_GPIO(3)
+#define H1940_LATCH_MAX1698_nSHUTDOWN H1940_LATCH_GPIO(4)
+#define H1940_LATCH_LED_RED H1940_LATCH_GPIO(5)
+#define H1940_LATCH_SDQ7 H1940_LATCH_GPIO(6)
+#define H1940_LATCH_USB_DP H1940_LATCH_GPIO(7)
/* CPU layer latch */
-#define H1940_LATCH_UDA_POWER (1<<24)
-#define H1940_LATCH_AUDIO_POWER (1<<25)
-#define H1940_LATCH_SM803_ENABLE (1<<26)
-#define H1940_LATCH_LCD_P4 (1<<27)
-#define H1940_LATCH_CPUQ5 (1<<28) /* untraced */
-#define H1940_LATCH_BLUETOOTH_POWER (1<<29) /* active high */
-#define H1940_LATCH_LED_GREEN (1<<30)
-#define H1940_LATCH_LED_FLASH (1<<31)
-
-/* default settings */
-
-#define H1940_LATCH_DEFAULT \
- H1940_LATCH_LCD_P4 | \
- H1940_LATCH_SM803_ENABLE | \
- H1940_LATCH_SDQ1 | \
- H1940_LATCH_LCD_P1 | \
- H1940_LATCH_LCD_P2 | \
- H1940_LATCH_LCD_P3 | \
- H1940_LATCH_MAX1698_nSHUTDOWN | \
- H1940_LATCH_CPUQ5
-
-/* control functions */
-
-extern void h1940_latch_control(unsigned int clear, unsigned int set);
+#define H1940_LATCH_UDA_POWER H1940_LATCH_GPIO(8)
+#define H1940_LATCH_AUDIO_POWER H1940_LATCH_GPIO(9)
+#define H1940_LATCH_SM803_ENABLE H1940_LATCH_GPIO(10)
+#define H1940_LATCH_LCD_P4 H1940_LATCH_GPIO(11)
+#define H1940_LATCH_SD_POWER H1940_LATCH_GPIO(12)
+#define H1940_LATCH_BLUETOOTH_POWER H1940_LATCH_GPIO(13)
+#define H1940_LATCH_LED_GREEN H1940_LATCH_GPIO(14)
+#define H1940_LATCH_LED_FLASH H1940_LATCH_GPIO(15)
#endif /* __ASM_ARCH_H1940_LATCH_H */
diff --git a/arch/arm/mach-s3c2410/include/mach/regs-s3c2443-clock.h b/arch/arm/mach-s3c2410/include/mach/regs-s3c2443-clock.h
index 08ab9dfb6ae6..101aeea22310 100644
--- a/arch/arm/mach-s3c2410/include/mach/regs-s3c2443-clock.h
+++ b/arch/arm/mach-s3c2410/include/mach/regs-s3c2443-clock.h
@@ -118,6 +118,8 @@
#define S3C2443_SCLKCON_UARTCLK (1<<8)
#define S3C2443_SCLKCON_USBHOST (1<<1)
+#define S3C2443_PWRCFG_SLEEP (1<<15)
+
#include <asm/div64.h>
static inline unsigned int
diff --git a/arch/arm/mach-s3c2410/include/mach/vmalloc.h b/arch/arm/mach-s3c2410/include/mach/vmalloc.h
index 54297eb0bf5e..7a311e8dddba 100644
--- a/arch/arm/mach-s3c2410/include/mach/vmalloc.h
+++ b/arch/arm/mach-s3c2410/include/mach/vmalloc.h
@@ -15,6 +15,6 @@
#ifndef __ASM_ARCH_VMALLOC_H
#define __ASM_ARCH_VMALLOC_H
-#define VMALLOC_END 0xE0000000UL
+#define VMALLOC_END 0xF6000000UL
#endif /* __ASM_ARCH_VMALLOC_H */
diff --git a/arch/arm/mach-s3c2410/mach-h1940.c b/arch/arm/mach-s3c2410/mach-h1940.c
index 98c5c9e81ee9..d7ada8c7e41f 100644
--- a/arch/arm/mach-s3c2410/mach-h1940.c
+++ b/arch/arm/mach-s3c2410/mach-h1940.c
@@ -24,6 +24,7 @@
#include <linux/io.h>
#include <linux/gpio.h>
#include <linux/pwm_backlight.h>
+#include <linux/i2c.h>
#include <video/platform_lcd.h>
#include <linux/mmc/host.h>
@@ -59,6 +60,14 @@
#include <plat/mci.h>
#include <plat/ts.h>
+#include <sound/uda1380.h>
+
+#define H1940_LATCH ((void __force __iomem *)0xF8000000)
+
+#define H1940_PA_LATCH S3C2410_CS2
+
+#define H1940_LATCH_BIT(x) (1 << ((x) + 16 - S3C_GPIO_END))
+
static struct map_desc h1940_iodesc[] __initdata = {
[0] = {
.virtual = (unsigned long)H1940_LATCH,
@@ -100,9 +109,9 @@ static struct s3c2410_uartcfg h1940_uartcfgs[] __initdata = {
/* Board control latch control */
-static unsigned int latch_state = H1940_LATCH_DEFAULT;
+static unsigned int latch_state;
-void h1940_latch_control(unsigned int clear, unsigned int set)
+static void h1940_latch_control(unsigned int clear, unsigned int set)
{
unsigned long flags;
@@ -116,7 +125,42 @@ void h1940_latch_control(unsigned int clear, unsigned int set)
local_irq_restore(flags);
}
-EXPORT_SYMBOL_GPL(h1940_latch_control);
+static inline int h1940_gpiolib_to_latch(int offset)
+{
+ return 1 << (offset + 16);
+}
+
+static void h1940_gpiolib_latch_set(struct gpio_chip *chip,
+ unsigned offset, int value)
+{
+ int latch_bit = h1940_gpiolib_to_latch(offset);
+
+ h1940_latch_control(value ? 0 : latch_bit,
+ value ? latch_bit : 0);
+}
+
+static int h1940_gpiolib_latch_output(struct gpio_chip *chip,
+ unsigned offset, int value)
+{
+ h1940_gpiolib_latch_set(chip, offset, value);
+ return 0;
+}
+
+static int h1940_gpiolib_latch_get(struct gpio_chip *chip,
+ unsigned offset)
+{
+ return (latch_state >> (offset + 16)) & 1;
+}
+
+struct gpio_chip h1940_latch_gpiochip = {
+ .base = H1940_LATCH_GPIO(0),
+ .owner = THIS_MODULE,
+ .label = "H1940_LATCH",
+ .ngpio = 16,
+ .direction_output = h1940_gpiolib_latch_output,
+ .set = h1940_gpiolib_latch_set,
+ .get = h1940_gpiolib_latch_get,
+};
static void h1940_udc_pullup(enum s3c2410_udc_cmd_e cmd)
{
@@ -125,10 +169,10 @@ static void h1940_udc_pullup(enum s3c2410_udc_cmd_e cmd)
switch (cmd)
{
case S3C2410_UDC_P_ENABLE :
- h1940_latch_control(0, H1940_LATCH_USB_DP);
+ gpio_set_value(H1940_LATCH_USB_DP, 1);
break;
case S3C2410_UDC_P_DISABLE :
- h1940_latch_control(H1940_LATCH_USB_DP, 0);
+ gpio_set_value(H1940_LATCH_USB_DP, 0);
break;
case S3C2410_UDC_P_RESET :
break;
@@ -199,10 +243,25 @@ static struct platform_device h1940_device_bluetooth = {
.id = -1,
};
+static void h1940_set_mmc_power(unsigned char power_mode, unsigned short vdd)
+{
+ switch (power_mode) {
+ case MMC_POWER_OFF:
+ gpio_set_value(H1940_LATCH_SD_POWER, 0);
+ break;
+ case MMC_POWER_UP:
+ case MMC_POWER_ON:
+ gpio_set_value(H1940_LATCH_SD_POWER, 1);
+ break;
+ default:
+ break;
+ };
+}
+
static struct s3c24xx_mci_pdata h1940_mmc_cfg __initdata = {
.gpio_detect = S3C2410_GPF(5),
.gpio_wprotect = S3C2410_GPH(8),
- .set_power = NULL,
+ .set_power = h1940_set_mmc_power,
.ocr_avail = MMC_VDD_32_33,
};
@@ -213,15 +272,32 @@ static int h1940_backlight_init(struct device *dev)
gpio_direction_output(S3C2410_GPB(0), 0);
s3c_gpio_setpull(S3C2410_GPB(0), S3C_GPIO_PULL_NONE);
s3c_gpio_cfgpin(S3C2410_GPB(0), S3C2410_GPB0_TOUT0);
+ gpio_set_value(H1940_LATCH_MAX1698_nSHUTDOWN, 1);
return 0;
}
+static int h1940_backlight_notify(struct device *dev, int brightness)
+{
+ if (!brightness) {
+ gpio_direction_output(S3C2410_GPB(0), 1);
+ gpio_set_value(H1940_LATCH_MAX1698_nSHUTDOWN, 0);
+ } else {
+ gpio_direction_output(S3C2410_GPB(0), 0);
+ s3c_gpio_setpull(S3C2410_GPB(0), S3C_GPIO_PULL_NONE);
+ s3c_gpio_cfgpin(S3C2410_GPB(0), S3C2410_GPB0_TOUT0);
+ gpio_set_value(H1940_LATCH_MAX1698_nSHUTDOWN, 1);
+ }
+ return brightness;
+}
+
static void h1940_backlight_exit(struct device *dev)
{
gpio_direction_output(S3C2410_GPB(0), 1);
+ gpio_set_value(H1940_LATCH_MAX1698_nSHUTDOWN, 0);
}
+
static struct platform_pwm_backlight_data backlight_data = {
.pwm_id = 0,
.max_brightness = 100,
@@ -229,6 +305,7 @@ static struct platform_pwm_backlight_data backlight_data = {
/* tcnt = 0x31 */
.pwm_period_ns = 36296,
.init = h1940_backlight_init,
+ .notify = h1940_backlight_notify,
.exit = h1940_backlight_exit,
};
@@ -247,19 +324,37 @@ static void h1940_lcd_power_set(struct plat_lcd_data *pd,
int value;
if (!power) {
- /* set to 3ec */
- gpio_direction_output(S3C2410_GPC(0), 0);
+ gpio_set_value(S3C2410_GPC(0), 0);
/* wait for 3ac */
do {
value = gpio_get_value(S3C2410_GPC(6));
} while (value);
- /* set to 38c */
- gpio_direction_output(S3C2410_GPC(5), 0);
+
+ gpio_set_value(H1940_LATCH_LCD_P2, 0);
+ gpio_set_value(H1940_LATCH_LCD_P3, 0);
+ gpio_set_value(H1940_LATCH_LCD_P4, 0);
+
+ gpio_direction_output(S3C2410_GPC(1), 0);
+ gpio_direction_output(S3C2410_GPC(4), 0);
+
+ gpio_set_value(H1940_LATCH_LCD_P1, 0);
+ gpio_set_value(H1940_LATCH_LCD_P0, 0);
+
+ gpio_set_value(S3C2410_GPC(5), 0);
+
} else {
- /* Set to 3ac */
- gpio_direction_output(S3C2410_GPC(5), 1);
- /* Set to 3ad */
- gpio_direction_output(S3C2410_GPC(0), 1);
+ gpio_set_value(H1940_LATCH_LCD_P0, 1);
+ gpio_set_value(H1940_LATCH_LCD_P1, 1);
+
+ s3c_gpio_cfgpin(S3C2410_GPC(1), S3C_GPIO_SFN(2));
+ s3c_gpio_cfgpin(S3C2410_GPC(4), S3C_GPIO_SFN(2));
+
+ gpio_set_value(S3C2410_GPC(5), 1);
+ gpio_set_value(S3C2410_GPC(0), 1);
+
+ gpio_set_value(H1940_LATCH_LCD_P3, 1);
+ gpio_set_value(H1940_LATCH_LCD_P2, 1);
+ gpio_set_value(H1940_LATCH_LCD_P4, 1);
}
}
@@ -273,12 +368,26 @@ static struct platform_device h1940_lcd_powerdev = {
.dev.platform_data = &h1940_lcd_power_data,
};
+static struct uda1380_platform_data uda1380_info = {
+ .gpio_power = H1940_LATCH_UDA_POWER,
+ .gpio_reset = S3C2410_GPA(12),
+ .dac_clk = UDA1380_DAC_CLK_SYSCLK,
+};
+
+static struct i2c_board_info h1940_i2c_devices[] = {
+ {
+ I2C_BOARD_INFO("uda1380", 0x1a),
+ .platform_data = &uda1380_info,
+ },
+};
+
static struct platform_device *h1940_devices[] __initdata = {
&s3c_device_ohci,
&s3c_device_lcd,
&s3c_device_wdt,
&s3c_device_i2c0,
&s3c_device_iis,
+ &s3c_device_pcm,
&s3c_device_usbgadget,
&h1940_device_leds,
&h1940_device_bluetooth,
@@ -303,6 +412,10 @@ static void __init h1940_map_io(void)
memcpy(phys_to_virt(H1940_SUSPEND_RESUMEAT), h1940_pm_return, 1024);
#endif
s3c_pm_init();
+
+ /* Add latch gpio chip, set latch initial value */
+ h1940_latch_control(0, 0);
+ WARN_ON(gpiochip_add(&h1940_latch_gpiochip));
}
/* H1940 and RX3715 need to reserve this for suspend */
@@ -340,12 +453,38 @@ static void __init h1940_init(void)
writel(tmp, S3C2410_UPLLCON);
gpio_request(S3C2410_GPC(0), "LCD power");
+ gpio_request(S3C2410_GPC(1), "LCD power");
+ gpio_request(S3C2410_GPC(4), "LCD power");
gpio_request(S3C2410_GPC(5), "LCD power");
gpio_request(S3C2410_GPC(6), "LCD power");
-
+ gpio_request(H1940_LATCH_LCD_P0, "LCD power");
+ gpio_request(H1940_LATCH_LCD_P1, "LCD power");
+ gpio_request(H1940_LATCH_LCD_P2, "LCD power");
+ gpio_request(H1940_LATCH_LCD_P3, "LCD power");
+ gpio_request(H1940_LATCH_LCD_P4, "LCD power");
+ gpio_request(H1940_LATCH_MAX1698_nSHUTDOWN, "LCD power");
+ gpio_direction_output(S3C2410_GPC(0), 0);
+ gpio_direction_output(S3C2410_GPC(1), 0);
+ gpio_direction_output(S3C2410_GPC(4), 0);
+ gpio_direction_output(S3C2410_GPC(5), 0);
gpio_direction_input(S3C2410_GPC(6));
+ gpio_direction_output(H1940_LATCH_LCD_P0, 0);
+ gpio_direction_output(H1940_LATCH_LCD_P1, 0);
+ gpio_direction_output(H1940_LATCH_LCD_P2, 0);
+ gpio_direction_output(H1940_LATCH_LCD_P3, 0);
+ gpio_direction_output(H1940_LATCH_LCD_P4, 0);
+ gpio_direction_output(H1940_LATCH_MAX1698_nSHUTDOWN, 0);
+
+ gpio_request(H1940_LATCH_USB_DP, "USB pullup");
+ gpio_direction_output(H1940_LATCH_USB_DP, 0);
+
+ gpio_request(H1940_LATCH_SD_POWER, "SD power");
+ gpio_direction_output(H1940_LATCH_SD_POWER, 0);
platform_add_devices(h1940_devices, ARRAY_SIZE(h1940_devices));
+
+ i2c_register_board_info(0, h1940_i2c_devices,
+ ARRAY_SIZE(h1940_i2c_devices));
}
MACHINE_START(H1940, "IPAQ-H1940")
diff --git a/arch/arm/mach-s3c2412/Kconfig b/arch/arm/mach-s3c2412/Kconfig
index cef6a65637bd..fa2e5bffbb8e 100644
--- a/arch/arm/mach-s3c2412/Kconfig
+++ b/arch/arm/mach-s3c2412/Kconfig
@@ -16,7 +16,7 @@ config CPU_S3C2412
config CPU_S3C2412_ONLY
bool
depends on ARCH_S3C2410 && !CPU_S3C2400 && !CPU_S3C2410 && \
- !CPU_2416 && !CPU_S3C2440 && !CPU_S3C2442 && \
+ !CPU_S3C2416 && !CPU_S3C2440 && !CPU_S3C2442 && \
!CPU_S3C2443 && CPU_S3C2412
default y if CPU_S3C2412
diff --git a/arch/arm/mach-s3c2412/s3c2412.c b/arch/arm/mach-s3c2412/s3c2412.c
index bef39f77729d..4c6df51ddf33 100644
--- a/arch/arm/mach-s3c2412/s3c2412.c
+++ b/arch/arm/mach-s3c2412/s3c2412.c
@@ -51,6 +51,7 @@
#include <plat/clock.h>
#include <plat/pm.h>
#include <plat/pll.h>
+#include <plat/nand-core.h>
#ifndef CONFIG_CPU_S3C2412_ONLY
void __iomem *s3c24xx_va_gpio2 = S3C24XX_VA_GPIO;
@@ -92,7 +93,7 @@ void __init s3c2412_init_uarts(struct s3c2410_uartcfg *cfg, int no)
/* rename devices that are s3c2412/s3c2413 specific */
s3c_device_sdi.name = "s3c2412-sdi";
s3c_device_lcd.name = "s3c2412-lcd";
- s3c_device_nand.name = "s3c2412-nand";
+ s3c_nand_setname("s3c2412-nand");
/* alter IRQ of SDI controller */
diff --git a/arch/arm/mach-s3c2416/Kconfig b/arch/arm/mach-s3c2416/Kconfig
index 657e4fe17f39..27b3e7c9d613 100644
--- a/arch/arm/mach-s3c2416/Kconfig
+++ b/arch/arm/mach-s3c2416/Kconfig
@@ -25,14 +25,23 @@ config S3C2416_DMA
help
Internal config node for S3C2416 DMA support
+config S3C2416_PM
+ bool
+ help
+ Internal config node to apply S3C2416 power management
+
menu "S3C2416 Machines"
config MACH_SMDK2416
bool "SMDK2416"
select CPU_S3C2416
+ select MACH_SMDK
select S3C_DEV_FB
select S3C_DEV_HSMMC
select S3C_DEV_HSMMC1
+ select S3C_DEV_NAND
+ select S3C_DEV_USB_HOST
+ select S3C2416_PM if PM
help
Say Y here if you are using an SMDK2416
diff --git a/arch/arm/mach-s3c2416/Makefile b/arch/arm/mach-s3c2416/Makefile
index 6c12c7bf40ad..ef038d62ffdb 100644
--- a/arch/arm/mach-s3c2416/Makefile
+++ b/arch/arm/mach-s3c2416/Makefile
@@ -11,7 +11,7 @@ obj- :=
obj-$(CONFIG_CPU_S3C2416) += s3c2416.o clock.o
obj-$(CONFIG_CPU_S3C2416) += irq.o
-
+obj-$(CONFIG_S3C2416_PM) += pm.o
#obj-$(CONFIG_S3C2416_DMA) += dma.o
# Machine support
diff --git a/arch/arm/mach-s3c2416/irq.c b/arch/arm/mach-s3c2416/irq.c
index 89f521d59d06..00174daf1526 100644
--- a/arch/arm/mach-s3c2416/irq.c
+++ b/arch/arm/mach-s3c2416/irq.c
@@ -168,12 +168,11 @@ static struct irq_chip s3c2416_irq_dma = {
static void s3c2416_irq_demux_uart3(unsigned int irq, struct irq_desc *desc)
{
- s3c2416_irq_demux(IRQ_S3C2443_UART3, 3);
+ s3c2416_irq_demux(IRQ_S3C2443_RX3, 3);
}
#define INTMSK_UART3 (1UL << (IRQ_S3C2443_UART3 - IRQ_EINT0))
-#define SUBMSK_UART3 (0xf << (IRQ_S3C2443_RX3 - S3C2410_IRQSUB(0)))
-
+#define SUBMSK_UART3 (0x7 << (IRQ_S3C2443_RX3 - S3C2410_IRQSUB(0)))
static void s3c2416_irq_uart3_mask(unsigned int irqno)
{
@@ -243,6 +242,8 @@ static int __init s3c2416_irq_add(struct sys_device *sysdev)
static struct sysdev_driver s3c2416_irq_driver = {
.add = s3c2416_irq_add,
+ .suspend = s3c24xx_irq_suspend,
+ .resume = s3c24xx_irq_resume,
};
static int __init s3c2416_irq_init(void)
diff --git a/arch/arm/mach-s3c2416/pm.c b/arch/arm/mach-s3c2416/pm.c
new file mode 100644
index 000000000000..4a04205b04d5
--- /dev/null
+++ b/arch/arm/mach-s3c2416/pm.c
@@ -0,0 +1,84 @@
+/* linux/arch/arm/mach-s3c2416/pm.c
+ *
+ * Copyright (c) 2010 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com
+ *
+ * S3C2416 - PM support (Based on Ben Dooks' S3C2412 PM support)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+*/
+
+#include <linux/sysdev.h>
+#include <linux/io.h>
+
+#include <asm/cacheflush.h>
+
+#include <mach/regs-power.h>
+#include <mach/regs-s3c2443-clock.h>
+
+#include <plat/cpu.h>
+#include <plat/pm.h>
+
+extern void s3c2412_sleep_enter(void);
+
+static void s3c2416_cpu_suspend(void)
+{
+ flush_cache_all();
+
+ /* enable wakeup sources regardless of battery state */
+ __raw_writel(S3C2443_PWRCFG_SLEEP, S3C2443_PWRCFG);
+
+ /* set the mode as sleep, 2BED represents "Go to BED" */
+ __raw_writel(0x2BED, S3C2443_PWRMODE);
+
+ s3c2412_sleep_enter();
+}
+
+static void s3c2416_pm_prepare(void)
+{
+ /*
+ * write the magic value u-boot uses to check for resume into
+ * the INFORM0 register, and ensure INFORM1 is set to the
+ * correct address to resume from.
+ */
+ __raw_writel(0x2BED, S3C2412_INFORM0);
+ __raw_writel(virt_to_phys(s3c_cpu_resume), S3C2412_INFORM1);
+}
+
+static int s3c2416_pm_add(struct sys_device *sysdev)
+{
+ pm_cpu_prep = s3c2416_pm_prepare;
+ pm_cpu_sleep = s3c2416_cpu_suspend;
+
+ return 0;
+}
+
+static int s3c2416_pm_suspend(struct sys_device *dev, pm_message_t state)
+{
+ return 0;
+}
+
+static int s3c2416_pm_resume(struct sys_device *dev)
+{
+ /* unset the return-from-sleep amd inform flags */
+ __raw_writel(0x0, S3C2443_PWRMODE);
+ __raw_writel(0x0, S3C2412_INFORM0);
+ __raw_writel(0x0, S3C2412_INFORM1);
+
+ return 0;
+}
+
+static struct sysdev_driver s3c2416_pm_driver = {
+ .add = s3c2416_pm_add,
+ .suspend = s3c2416_pm_suspend,
+ .resume = s3c2416_pm_resume,
+};
+
+static __init int s3c2416_pm_init(void)
+{
+ return sysdev_driver_register(&s3c2416_sysclass, &s3c2416_pm_driver);
+}
+
+arch_initcall(s3c2416_pm_init);
diff --git a/arch/arm/mach-s3c2416/s3c2416.c b/arch/arm/mach-s3c2416/s3c2416.c
index bc30245e133b..63f39cdc0972 100644
--- a/arch/arm/mach-s3c2416/s3c2416.c
+++ b/arch/arm/mach-s3c2416/s3c2416.c
@@ -56,6 +56,7 @@
#include <plat/iic-core.h>
#include <plat/fb-core.h>
+#include <plat/nand-core.h>
static struct map_desc s3c2416_iodesc[] __initdata = {
IODESC_ENT(WATCHDOG),
@@ -100,7 +101,7 @@ void __init s3c2416_init_uarts(struct s3c2410_uartcfg *cfg, int no)
{
s3c24xx_init_uartdevs("s3c2440-uart", s3c2410_uart_resources, cfg, no);
- s3c_device_nand.name = "s3c2416-nand";
+ s3c_nand_setname("s3c2412-nand");
}
/* s3c2416_map_io
diff --git a/arch/arm/mach-s3c2440/Kconfig b/arch/arm/mach-s3c2440/Kconfig
index cd8e7de388f0..a0cb2581894f 100644
--- a/arch/arm/mach-s3c2440/Kconfig
+++ b/arch/arm/mach-s3c2440/Kconfig
@@ -4,7 +4,6 @@
config CPU_S3C2440
bool
- depends on ARCH_S3C2410
select CPU_ARM920T
select S3C_GPIO_PULL_UP
select S3C2410_CLOCK
@@ -18,8 +17,8 @@ config CPU_S3C2440
config CPU_S3C2442
bool
- depends on ARCH_S3C2410
select CPU_ARM920T
+ select S3C_GPIO_PULL_DOWN
select S3C2410_CLOCK
select S3C2410_GPIO
select S3C2410_PM if PM
@@ -30,7 +29,7 @@ config CPU_S3C2442
config CPU_S3C244X
bool
- depends on ARCH_S3C2410 && (CPU_S3C2440 || CPU_S3C2442)
+ depends on CPU_S3C2440 || CPU_S3C2442
help
Support for S3C2440 and S3C2442 Samsung Mobile CPU based systems.
@@ -72,7 +71,7 @@ config S3C2440_PLL_16934400
config S3C2440_DMA
bool
- depends on ARCH_S3C2410 && CPU_S3C24405B
+ depends on CPU_S3C2440
help
Support for S3C2440 specific DMA code5A
@@ -180,8 +179,10 @@ config MACH_MINI2440
bool "MINI2440 development board"
select CPU_S3C2440
select EEPROM_AT24
+ select NEW_LEDS
+ select LEDS_CLASS
+ select LEDS_TRIGGER
select LEDS_TRIGGER_BACKLIGHT
- select SND_S3C24XX_SOC_S3C24XX_UDA134X
select S3C_DEV_NAND
select S3C_DEV_USB_HOST
help
diff --git a/arch/arm/mach-s3c2440/mach-rx1950.c b/arch/arm/mach-s3c2440/mach-rx1950.c
index 32019bd9db3b..e0622bbb6dfa 100644
--- a/arch/arm/mach-s3c2440/mach-rx1950.c
+++ b/arch/arm/mach-s3c2440/mach-rx1950.c
@@ -25,8 +25,12 @@
#include <linux/input.h>
#include <linux/gpio_keys.h>
#include <linux/sysdev.h>
+#include <linux/pda_power.h>
#include <linux/pwm_backlight.h>
#include <linux/pwm.h>
+#include <linux/s3c_adc_battery.h>
+#include <linux/leds.h>
+#include <linux/i2c.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/partitions.h>
@@ -55,6 +59,8 @@
#include <plat/irq.h>
#include <plat/ts.h>
+#include <sound/uda1380.h>
+
#define LCD_PWM_PERIOD 192960
#define LCD_PWM_DUTY 127353
@@ -127,6 +133,193 @@ static struct s3c2410fb_display rx1950_display = {
};
+static int power_supply_init(struct device *dev)
+{
+ return gpio_request(S3C2410_GPF(2), "cable plugged");
+}
+
+static int rx1950_is_ac_online(void)
+{
+ return !gpio_get_value(S3C2410_GPF(2));
+}
+
+static void power_supply_exit(struct device *dev)
+{
+ gpio_free(S3C2410_GPF(2));
+}
+
+static char *rx1950_supplicants[] = {
+ "main-battery"
+};
+
+static struct pda_power_pdata power_supply_info = {
+ .init = power_supply_init,
+ .is_ac_online = rx1950_is_ac_online,
+ .exit = power_supply_exit,
+ .supplied_to = rx1950_supplicants,
+ .num_supplicants = ARRAY_SIZE(rx1950_supplicants),
+};
+
+static struct resource power_supply_resources[] = {
+ [0] = {
+ .name = "ac",
+ .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_LOWEDGE |
+ IORESOURCE_IRQ_HIGHEDGE,
+ .start = IRQ_EINT2,
+ .end = IRQ_EINT2,
+ },
+};
+
+static struct platform_device power_supply = {
+ .name = "pda-power",
+ .id = -1,
+ .dev = {
+ .platform_data =
+ &power_supply_info,
+ },
+ .resource = power_supply_resources,
+ .num_resources = ARRAY_SIZE(power_supply_resources),
+};
+
+static const struct s3c_adc_bat_thresh bat_lut_noac[] = {
+ { .volt = 4100, .cur = 156, .level = 100},
+ { .volt = 4050, .cur = 156, .level = 95},
+ { .volt = 4025, .cur = 141, .level = 90},
+ { .volt = 3995, .cur = 144, .level = 85},
+ { .volt = 3957, .cur = 162, .level = 80},
+ { .volt = 3931, .cur = 147, .level = 75},
+ { .volt = 3902, .cur = 147, .level = 70},
+ { .volt = 3863, .cur = 153, .level = 65},
+ { .volt = 3838, .cur = 150, .level = 60},
+ { .volt = 3800, .cur = 153, .level = 55},
+ { .volt = 3765, .cur = 153, .level = 50},
+ { .volt = 3748, .cur = 172, .level = 45},
+ { .volt = 3740, .cur = 153, .level = 40},
+ { .volt = 3714, .cur = 175, .level = 35},
+ { .volt = 3710, .cur = 156, .level = 30},
+ { .volt = 3963, .cur = 156, .level = 25},
+ { .volt = 3672, .cur = 178, .level = 20},
+ { .volt = 3651, .cur = 178, .level = 15},
+ { .volt = 3629, .cur = 178, .level = 10},
+ { .volt = 3612, .cur = 162, .level = 5},
+ { .volt = 3605, .cur = 162, .level = 0},
+};
+
+static const struct s3c_adc_bat_thresh bat_lut_acin[] = {
+ { .volt = 4200, .cur = 0, .level = 100},
+ { .volt = 4190, .cur = 0, .level = 99},
+ { .volt = 4178, .cur = 0, .level = 95},
+ { .volt = 4110, .cur = 0, .level = 70},
+ { .volt = 4076, .cur = 0, .level = 65},
+ { .volt = 4046, .cur = 0, .level = 60},
+ { .volt = 4021, .cur = 0, .level = 55},
+ { .volt = 3999, .cur = 0, .level = 50},
+ { .volt = 3982, .cur = 0, .level = 45},
+ { .volt = 3965, .cur = 0, .level = 40},
+ { .volt = 3957, .cur = 0, .level = 35},
+ { .volt = 3948, .cur = 0, .level = 30},
+ { .volt = 3936, .cur = 0, .level = 25},
+ { .volt = 3927, .cur = 0, .level = 20},
+ { .volt = 3906, .cur = 0, .level = 15},
+ { .volt = 3880, .cur = 0, .level = 10},
+ { .volt = 3829, .cur = 0, .level = 5},
+ { .volt = 3820, .cur = 0, .level = 0},
+};
+
+int rx1950_bat_init(void)
+{
+ int ret;
+
+ ret = gpio_request(S3C2410_GPJ(2), "rx1950-charger-enable-1");
+ if (ret)
+ goto err_gpio1;
+ ret = gpio_request(S3C2410_GPJ(3), "rx1950-charger-enable-2");
+ if (ret)
+ goto err_gpio2;
+
+ return 0;
+
+err_gpio2:
+ gpio_free(S3C2410_GPJ(2));
+err_gpio1:
+ return ret;
+}
+
+void rx1950_bat_exit(void)
+{
+ gpio_free(S3C2410_GPJ(2));
+ gpio_free(S3C2410_GPJ(3));
+}
+
+void rx1950_enable_charger(void)
+{
+ gpio_direction_output(S3C2410_GPJ(2), 1);
+ gpio_direction_output(S3C2410_GPJ(3), 1);
+}
+
+void rx1950_disable_charger(void)
+{
+ gpio_direction_output(S3C2410_GPJ(2), 0);
+ gpio_direction_output(S3C2410_GPJ(3), 0);
+}
+
+static struct gpio_led rx1950_leds_desc[] = {
+ {
+ .name = "Green",
+ .default_trigger = "main-battery-charging-or-full",
+ .gpio = S3C2410_GPA(6),
+ },
+ {
+ .name = "Red",
+ .default_trigger = "main-battery-full",
+ .gpio = S3C2410_GPA(7),
+ },
+ {
+ .name = "Blue",
+ .default_trigger = "rx1950-acx-mem",
+ .gpio = S3C2410_GPA(11),
+ },
+};
+
+static struct gpio_led_platform_data rx1950_leds_pdata = {
+ .num_leds = ARRAY_SIZE(rx1950_leds_desc),
+ .leds = rx1950_leds_desc,
+};
+
+static struct platform_device rx1950_leds = {
+ .name = "leds-gpio",
+ .id = -1,
+ .dev = {
+ .platform_data = &rx1950_leds_pdata,
+ },
+};
+
+static struct s3c_adc_bat_pdata rx1950_bat_cfg = {
+ .init = rx1950_bat_init,
+ .exit = rx1950_bat_exit,
+ .enable_charger = rx1950_enable_charger,
+ .disable_charger = rx1950_disable_charger,
+ .gpio_charge_finished = S3C2410_GPF(3),
+ .lut_noac = bat_lut_noac,
+ .lut_noac_cnt = ARRAY_SIZE(bat_lut_noac),
+ .lut_acin = bat_lut_acin,
+ .lut_acin_cnt = ARRAY_SIZE(bat_lut_acin),
+ .volt_channel = 0,
+ .current_channel = 1,
+ .volt_mult = 4235,
+ .current_mult = 2900,
+ .internal_impedance = 200,
+};
+
+static struct platform_device rx1950_battery = {
+ .name = "s3c-adc-battery",
+ .id = -1,
+ .dev = {
+ .parent = &s3c_device_adc.dev,
+ .platform_data = &rx1950_bat_cfg,
+ },
+};
+
static struct s3c2410fb_mach_info rx1950_lcd_cfg = {
.displays = &rx1950_display,
.num_displays = 1,
@@ -481,11 +674,17 @@ static struct platform_device rx1950_device_gpiokeys = {
.dev.platform_data = &rx1950_gpio_keys_data,
};
-static struct s3c2410_platform_i2c rx1950_i2c_data = {
- .flags = 0,
- .slave_addr = 0x42,
- .frequency = 400 * 1000,
- .sda_delay = S3C2410_IICLC_SDA_DELAY5 | S3C2410_IICLC_FILTER_ON,
+static struct uda1380_platform_data uda1380_info = {
+ .gpio_power = S3C2410_GPJ(0),
+ .gpio_reset = S3C2410_GPD(0),
+ .dac_clk = UDA1380_DAC_CLK_SYSCLK,
+};
+
+static struct i2c_board_info rx1950_i2c_devices[] = {
+ {
+ I2C_BOARD_INFO("uda1380", 0x1a),
+ .platform_data = &uda1380_info,
+ },
};
static struct platform_device *rx1950_devices[] __initdata = {
@@ -493,6 +692,7 @@ static struct platform_device *rx1950_devices[] __initdata = {
&s3c_device_wdt,
&s3c_device_i2c0,
&s3c_device_iis,
+ &s3c_device_pcm,
&s3c_device_usbgadget,
&s3c_device_rtc,
&s3c_device_nand,
@@ -503,6 +703,9 @@ static struct platform_device *rx1950_devices[] __initdata = {
&s3c_device_timer[1],
&rx1950_backlight,
&rx1950_device_gpiokeys,
+ &power_supply,
+ &rx1950_battery,
+ &rx1950_leds,
};
static struct clk *rx1950_clocks[] __initdata = {
@@ -538,7 +741,7 @@ static void __init rx1950_init_machine(void)
s3c24xx_udc_set_platdata(&rx1950_udc_cfg);
s3c24xx_ts_set_platdata(&rx1950_ts_cfg);
s3c24xx_mci_set_platdata(&rx1950_mmc_cfg);
- s3c_i2c0_set_platdata(&rx1950_i2c_data);
+ s3c_i2c0_set_platdata(NULL);
s3c_nand_set_platdata(&rx1950_nand_info);
/* Turn off suspend on both USB ports, and switch the
@@ -569,6 +772,9 @@ static void __init rx1950_init_machine(void)
WARN_ON(gpio_request(S3C2410_GPB(1), "LCD power"));
platform_add_devices(rx1950_devices, ARRAY_SIZE(rx1950_devices));
+
+ i2c_register_board_info(0, rx1950_i2c_devices,
+ ARRAY_SIZE(rx1950_i2c_devices));
}
/* H1940 and RX3715 need to reserve this for suspend */
diff --git a/arch/arm/mach-s3c2440/s3c2440.c b/arch/arm/mach-s3c2440/s3c2440.c
index d50f3ae6173d..f7663f731ea0 100644
--- a/arch/arm/mach-s3c2440/s3c2440.c
+++ b/arch/arm/mach-s3c2440/s3c2440.c
@@ -46,9 +46,6 @@ int __init s3c2440_init(void)
{
printk("S3C2440: Initialising architecture\n");
- s3c24xx_gpiocfg_default.set_pull = s3c_gpio_setpull_1up;
- s3c24xx_gpiocfg_default.get_pull = s3c_gpio_getpull_1up;
-
/* change irq for watchdog */
s3c_device_wdt.resource[1].start = IRQ_S3C2440_WDT;
@@ -58,3 +55,11 @@ int __init s3c2440_init(void)
return sysdev_register(&s3c2440_sysdev);
}
+
+void __init s3c2440_map_io(void)
+{
+ s3c244x_map_io();
+
+ s3c24xx_gpiocfg_default.set_pull = s3c_gpio_setpull_1up;
+ s3c24xx_gpiocfg_default.get_pull = s3c_gpio_getpull_1up;
+}
diff --git a/arch/arm/mach-s3c2440/s3c2442.c b/arch/arm/mach-s3c2440/s3c2442.c
index 188ad1e57dc0..ecf813546554 100644
--- a/arch/arm/mach-s3c2440/s3c2442.c
+++ b/arch/arm/mach-s3c2440/s3c2442.c
@@ -32,6 +32,7 @@
#include <linux/interrupt.h>
#include <linux/ioport.h>
#include <linux/mutex.h>
+#include <linux/gpio.h>
#include <linux/clk.h>
#include <linux/io.h>
@@ -43,6 +44,11 @@
#include <plat/clock.h>
#include <plat/cpu.h>
+#include <plat/s3c244x.h>
+
+#include <plat/gpio-core.h>
+#include <plat/gpio-cfg.h>
+#include <plat/gpio-cfg-helpers.h>
/* S3C2442 extended clock support */
@@ -163,3 +169,11 @@ int __init s3c2442_init(void)
return sysdev_register(&s3c2442_sysdev);
}
+
+void __init s3c2442_map_io(void)
+{
+ s3c244x_map_io();
+
+ s3c24xx_gpiocfg_default.set_pull = s3c_gpio_setpull_1down;
+ s3c24xx_gpiocfg_default.get_pull = s3c_gpio_getpull_1down;
+}
diff --git a/arch/arm/mach-s3c2440/s3c244x.c b/arch/arm/mach-s3c2440/s3c244x.c
index 5e4a97e76533..90c1707b9c95 100644
--- a/arch/arm/mach-s3c2440/s3c244x.c
+++ b/arch/arm/mach-s3c2440/s3c244x.c
@@ -44,6 +44,7 @@
#include <plat/cpu.h>
#include <plat/pm.h>
#include <plat/pll.h>
+#include <plat/nand-core.h>
static struct map_desc s3c244x_iodesc[] __initdata = {
IODESC_ENT(CLKPWR),
@@ -68,7 +69,7 @@ void __init s3c244x_map_io(void)
s3c_device_sdi.name = "s3c2440-sdi";
s3c_device_i2c0.name = "s3c2440-i2c";
- s3c_device_nand.name = "s3c2440-nand";
+ s3c_nand_setname("s3c2440-nand");
s3c_device_ts.name = "s3c2440-ts";
s3c_device_usbgadget.name = "s3c2440-usbgadget";
}
diff --git a/arch/arm/mach-s3c2443/Kconfig b/arch/arm/mach-s3c2443/Kconfig
index 4fef723126fa..31babec90cec 100644
--- a/arch/arm/mach-s3c2443/Kconfig
+++ b/arch/arm/mach-s3c2443/Kconfig
@@ -5,6 +5,7 @@
config CPU_S3C2443
bool
depends on ARCH_S3C2410
+ select CPU_ARM920T
select S3C2443_DMA if S3C2410_DMA
select CPU_LLSERIAL_S3C2440
select SAMSUNG_CLKSRC
diff --git a/arch/arm/mach-s3c2443/irq.c b/arch/arm/mach-s3c2443/irq.c
index 0e0d693f3974..893424767ce1 100644
--- a/arch/arm/mach-s3c2443/irq.c
+++ b/arch/arm/mach-s3c2443/irq.c
@@ -166,12 +166,11 @@ static struct irq_chip s3c2443_irq_dma = {
static void s3c2443_irq_demux_uart3(unsigned int irq, struct irq_desc *desc)
{
- s3c2443_irq_demux(IRQ_S3C2443_UART3, 3);
+ s3c2443_irq_demux(IRQ_S3C2443_RX3, 3);
}
#define INTMSK_UART3 (1UL << (IRQ_S3C2443_UART3 - IRQ_EINT0))
-#define SUBMSK_UART3 (0xf << (IRQ_S3C2443_RX3 - S3C2410_IRQSUB(0)))
-
+#define SUBMSK_UART3 (0x7 << (IRQ_S3C2443_RX3 - S3C2410_IRQSUB(0)))
static void s3c2443_irq_uart3_mask(unsigned int irqno)
{
diff --git a/arch/arm/mach-s3c2443/s3c2443.c b/arch/arm/mach-s3c2443/s3c2443.c
index 839b6b2ced74..33d18dd1ebd5 100644
--- a/arch/arm/mach-s3c2443/s3c2443.c
+++ b/arch/arm/mach-s3c2443/s3c2443.c
@@ -36,6 +36,7 @@
#include <plat/devs.h>
#include <plat/cpu.h>
#include <plat/fb-core.h>
+#include <plat/nand-core.h>
static struct map_desc s3c2443_iodesc[] __initdata = {
IODESC_ENT(WATCHDOG),
@@ -62,7 +63,7 @@ int __init s3c2443_init(void)
s3c24xx_reset_hook = s3c2443_hard_reset;
- s3c_device_nand.name = "s3c2412-nand";
+ s3c_nand_setname("s3c2412-nand");
s3c_fb_setname("s3c2443-fb");
/* change WDT IRQ number */
diff --git a/arch/arm/mach-s3c24a0/include/mach/vmalloc.h b/arch/arm/mach-s3c24a0/include/mach/vmalloc.h
index 914656820794..6480b15277f3 100644
--- a/arch/arm/mach-s3c24a0/include/mach/vmalloc.h
+++ b/arch/arm/mach-s3c24a0/include/mach/vmalloc.h
@@ -12,6 +12,6 @@
#ifndef __ASM_ARCH_VMALLOC_H
#define __ASM_ARCH_VMALLOC_H
-#define VMALLOC_END (0xe0000000UL)
+#define VMALLOC_END 0xF6000000UL
#endif /* __ASM_ARCH_VMALLOC_H */
diff --git a/arch/arm/mach-s3c64xx/Kconfig b/arch/arm/mach-s3c64xx/Kconfig
index 1e4d78af7d84..579d2f0f4dd0 100644
--- a/arch/arm/mach-s3c64xx/Kconfig
+++ b/arch/arm/mach-s3c64xx/Kconfig
@@ -98,12 +98,33 @@ config MACH_ANW6410
help
Machine support for the A&W6410
+config MACH_MINI6410
+ bool "MINI6410"
+ select CPU_S3C6410
+ select S3C_DEV_HSMMC
+ select S3C_DEV_HSMMC1
+ select S3C64XX_SETUP_SDHCI
+ select S3C_DEV_USB_HOST
+ select S3C_DEV_NAND
+ select S3C_DEV_FB
+ select S3C64XX_SETUP_FB_24BPP
+ select SAMSUNG_DEV_ADC
+ select SAMSUNG_DEV_TS
+ help
+ Machine support for the FriendlyARM MINI6410
+
config MACH_REAL6410
bool "REAL6410"
select CPU_S3C6410
select S3C_DEV_HSMMC
select S3C_DEV_HSMMC1
select S3C64XX_SETUP_SDHCI
+ select S3C_DEV_FB
+ select S3C64XX_SETUP_FB_24BPP
+ select S3C_DEV_NAND
+ select SAMSUNG_DEV_ADC
+ select SAMSUNG_DEV_TS
+ select S3C_DEV_USB_HOST
help
Machine support for the CoreWind REAL6410
@@ -122,7 +143,7 @@ config MACH_SMDK6410
select S3C_DEV_USB_HSOTG
select S3C_DEV_WDT
select SAMSUNG_DEV_KEYPAD
- select HAVE_S3C2410_WATCHDOG
+ select HAVE_S3C2410_WATCHDOG if WATCHDOG
select S3C64XX_SETUP_SDHCI
select S3C64XX_SETUP_I2C1
select S3C64XX_SETUP_IDE
@@ -185,6 +206,7 @@ config SMDK6410_WM1192_EV1
select REGULATOR_WM831X
select S3C24XX_GPIO_EXTRA64
select MFD_WM831X
+ select MFD_WM831X_I2C
help
The Wolfson Microelectronics 1192-EV1 is a WM831x based PMIC
daughtercard for the Samsung SMDK6410 reference platform.
diff --git a/arch/arm/mach-s3c64xx/Makefile b/arch/arm/mach-s3c64xx/Makefile
index 90221a2e0c55..4657363f0674 100644
--- a/arch/arm/mach-s3c64xx/Makefile
+++ b/arch/arm/mach-s3c64xx/Makefile
@@ -53,6 +53,7 @@ obj-$(CONFIG_MACH_ANW6410) += mach-anw6410.o
obj-$(CONFIG_MACH_SMDK6400) += mach-smdk6400.o
obj-$(CONFIG_MACH_SMDK6410) += mach-smdk6410.o
obj-$(CONFIG_MACH_REAL6410) += mach-real6410.o
+obj-$(CONFIG_MACH_MINI6410) += mach-mini6410.o
obj-$(CONFIG_MACH_NCP) += mach-ncp.o
obj-$(CONFIG_MACH_HMT) += mach-hmt.o
obj-$(CONFIG_MACH_SMARTQ) += mach-smartq.o
diff --git a/arch/arm/mach-s3c64xx/dev-audio.c b/arch/arm/mach-s3c64xx/dev-audio.c
index 9648fbc36eec..76426a32c013 100644
--- a/arch/arm/mach-s3c64xx/dev-audio.c
+++ b/arch/arm/mach-s3c64xx/dev-audio.c
@@ -22,44 +22,34 @@
#include <plat/audio.h>
#include <plat/gpio-cfg.h>
-#include <mach/gpio-bank-c.h>
-#include <mach/gpio-bank-d.h>
-#include <mach/gpio-bank-e.h>
-#include <mach/gpio-bank-h.h>
-
static int s3c64xx_i2sv3_cfg_gpio(struct platform_device *pdev)
{
+ unsigned int base;
+
switch (pdev->id) {
case 0:
- s3c_gpio_cfgpin(S3C64XX_GPD(0), S3C64XX_GPD0_I2S0_CLK);
- s3c_gpio_cfgpin(S3C64XX_GPD(1), S3C64XX_GPD1_I2S0_CDCLK);
- s3c_gpio_cfgpin(S3C64XX_GPD(2), S3C64XX_GPD2_I2S0_LRCLK);
- s3c_gpio_cfgpin(S3C64XX_GPD(3), S3C64XX_GPD3_I2S0_DI);
- s3c_gpio_cfgpin(S3C64XX_GPD(4), S3C64XX_GPD4_I2S0_D0);
+ base = S3C64XX_GPD(0);
break;
case 1:
- s3c_gpio_cfgpin(S3C64XX_GPE(0), S3C64XX_GPE0_I2S1_CLK);
- s3c_gpio_cfgpin(S3C64XX_GPE(1), S3C64XX_GPE1_I2S1_CDCLK);
- s3c_gpio_cfgpin(S3C64XX_GPE(2), S3C64XX_GPE2_I2S1_LRCLK);
- s3c_gpio_cfgpin(S3C64XX_GPE(3), S3C64XX_GPE3_I2S1_DI);
- s3c_gpio_cfgpin(S3C64XX_GPE(4), S3C64XX_GPE4_I2S1_D0);
+ base = S3C64XX_GPE(0);
+ break;
default:
- printk(KERN_DEBUG "Invalid I2S Controller number!");
+ printk(KERN_DEBUG "Invalid I2S Controller number: %d\n",
+ pdev->id);
return -EINVAL;
}
+ s3c_gpio_cfgpin_range(base, 5, S3C_GPIO_SFN(3));
+
return 0;
}
static int s3c64xx_i2sv4_cfg_gpio(struct platform_device *pdev)
{
- s3c_gpio_cfgpin(S3C64XX_GPC(4), S3C64XX_GPC4_I2S_V40_DO0);
- s3c_gpio_cfgpin(S3C64XX_GPC(5), S3C64XX_GPC5_I2S_V40_DO1);
- s3c_gpio_cfgpin(S3C64XX_GPC(7), S3C64XX_GPC7_I2S_V40_DO2);
- s3c_gpio_cfgpin(S3C64XX_GPH(6), S3C64XX_GPH6_I2S_V40_BCLK);
- s3c_gpio_cfgpin(S3C64XX_GPH(7), S3C64XX_GPH7_I2S_V40_CDCLK);
- s3c_gpio_cfgpin(S3C64XX_GPH(8), S3C64XX_GPH8_I2S_V40_LRCLK);
- s3c_gpio_cfgpin(S3C64XX_GPH(9), S3C64XX_GPH9_I2S_V40_DI);
+ s3c_gpio_cfgpin(S3C64XX_GPC(4), S3C_GPIO_SFN(5));
+ s3c_gpio_cfgpin(S3C64XX_GPC(5), S3C_GPIO_SFN(5));
+ s3c_gpio_cfgpin(S3C64XX_GPC(7), S3C_GPIO_SFN(5));
+ s3c_gpio_cfgpin_range(S3C64XX_GPH(6), 4, S3C_GPIO_SFN(5));
return 0;
}
@@ -168,26 +158,22 @@ EXPORT_SYMBOL(s3c64xx_device_iisv4);
static int s3c64xx_pcm_cfg_gpio(struct platform_device *pdev)
{
+ unsigned int base;
+
switch (pdev->id) {
case 0:
- s3c_gpio_cfgpin(S3C64XX_GPD(0), S3C64XX_GPD0_PCM0_SCLK);
- s3c_gpio_cfgpin(S3C64XX_GPD(1), S3C64XX_GPD1_PCM0_EXTCLK);
- s3c_gpio_cfgpin(S3C64XX_GPD(2), S3C64XX_GPD2_PCM0_FSYNC);
- s3c_gpio_cfgpin(S3C64XX_GPD(3), S3C64XX_GPD3_PCM0_SIN);
- s3c_gpio_cfgpin(S3C64XX_GPD(4), S3C64XX_GPD4_PCM0_SOUT);
+ base = S3C64XX_GPD(0);
break;
case 1:
- s3c_gpio_cfgpin(S3C64XX_GPE(0), S3C64XX_GPE0_PCM1_SCLK);
- s3c_gpio_cfgpin(S3C64XX_GPE(1), S3C64XX_GPE1_PCM1_EXTCLK);
- s3c_gpio_cfgpin(S3C64XX_GPE(2), S3C64XX_GPE2_PCM1_FSYNC);
- s3c_gpio_cfgpin(S3C64XX_GPE(3), S3C64XX_GPE3_PCM1_SIN);
- s3c_gpio_cfgpin(S3C64XX_GPE(4), S3C64XX_GPE4_PCM1_SOUT);
+ base = S3C64XX_GPE(0);
break;
default:
- printk(KERN_DEBUG "Invalid PCM Controller number!");
+ printk(KERN_DEBUG "Invalid PCM Controller number: %d\n",
+ pdev->id);
return -EINVAL;
}
+ s3c_gpio_cfgpin_range(base, 5, S3C_GPIO_SFN(2));
return 0;
}
@@ -261,24 +247,12 @@ EXPORT_SYMBOL(s3c64xx_device_pcm1);
static int s3c64xx_ac97_cfg_gpd(struct platform_device *pdev)
{
- s3c_gpio_cfgpin(S3C64XX_GPD(0), S3C64XX_GPD0_AC97_BITCLK);
- s3c_gpio_cfgpin(S3C64XX_GPD(1), S3C64XX_GPD1_AC97_nRESET);
- s3c_gpio_cfgpin(S3C64XX_GPD(2), S3C64XX_GPD2_AC97_SYNC);
- s3c_gpio_cfgpin(S3C64XX_GPD(3), S3C64XX_GPD3_AC97_SDI);
- s3c_gpio_cfgpin(S3C64XX_GPD(4), S3C64XX_GPD4_AC97_SDO);
-
- return 0;
+ return s3c_gpio_cfgpin_range(S3C64XX_GPD(0), 5, S3C_GPIO_SFN(4));
}
static int s3c64xx_ac97_cfg_gpe(struct platform_device *pdev)
{
- s3c_gpio_cfgpin(S3C64XX_GPE(0), S3C64XX_GPE0_AC97_BITCLK);
- s3c_gpio_cfgpin(S3C64XX_GPE(1), S3C64XX_GPE1_AC97_nRESET);
- s3c_gpio_cfgpin(S3C64XX_GPE(2), S3C64XX_GPE2_AC97_SYNC);
- s3c_gpio_cfgpin(S3C64XX_GPE(3), S3C64XX_GPE3_AC97_SDI);
- s3c_gpio_cfgpin(S3C64XX_GPE(4), S3C64XX_GPE4_AC97_SDO);
-
- return 0;
+ return s3c_gpio_cfgpin_range(S3C64XX_GPE(0), 5, S3C_GPIO_SFN(4));
}
static struct resource s3c64xx_ac97_resource[] = {
@@ -333,3 +307,16 @@ void __init s3c64xx_ac97_setup_gpio(int num)
else
s3c_ac97_pdata.cfg_gpio = s3c64xx_ac97_cfg_gpe;
}
+
+static u64 s3c_device_audio_dmamask = 0xffffffffUL;
+
+struct platform_device s3c_device_pcm = {
+ .name = "s3c24xx-pcm-audio",
+ .id = -1,
+ .dev = {
+ .dma_mask = &s3c_device_audio_dmamask,
+ .coherent_dma_mask = 0xffffffffUL
+ }
+};
+EXPORT_SYMBOL(s3c_device_pcm);
+
diff --git a/arch/arm/mach-s3c64xx/gpiolib.c b/arch/arm/mach-s3c64xx/gpiolib.c
index 300dee4a667b..fd99a82e82c4 100644
--- a/arch/arm/mach-s3c64xx/gpiolib.c
+++ b/arch/arm/mach-s3c64xx/gpiolib.c
@@ -195,11 +195,6 @@ static struct s3c_gpio_cfg gpio_2bit_cfg_eint11 = {
.get_pull = s3c_gpio_getpull_updown,
};
-int s3c64xx_gpio2int_gpn(struct gpio_chip *chip, unsigned pin)
-{
- return IRQ_EINT(0) + pin;
-}
-
static struct s3c_gpio_chip gpio_2bit[] = {
{
.base = S3C64XX_GPF_BASE,
@@ -227,12 +222,13 @@ static struct s3c_gpio_chip gpio_2bit[] = {
},
}, {
.base = S3C64XX_GPN_BASE,
+ .irq_base = IRQ_EINT(0),
.config = &gpio_2bit_cfg_eint10,
.chip = {
.base = S3C64XX_GPN(0),
.ngpio = S3C64XX_GPIO_N_NR,
.label = "GPN",
- .to_irq = s3c64xx_gpio2int_gpn,
+ .to_irq = samsung_gpiolib_to_irq,
},
}, {
.base = S3C64XX_GPO_BASE,
diff --git a/arch/arm/mach-s3c64xx/include/mach/vmalloc.h b/arch/arm/mach-s3c64xx/include/mach/vmalloc.h
index bc0e91389864..23f75e556a30 100644
--- a/arch/arm/mach-s3c64xx/include/mach/vmalloc.h
+++ b/arch/arm/mach-s3c64xx/include/mach/vmalloc.h
@@ -15,6 +15,6 @@
#ifndef __ASM_ARCH_VMALLOC_H
#define __ASM_ARCH_VMALLOC_H
-#define VMALLOC_END 0xE0000000UL
+#define VMALLOC_END 0xF6000000UL
#endif /* __ASM_ARCH_VMALLOC_H */
diff --git a/arch/arm/mach-s3c64xx/mach-mini6410.c b/arch/arm/mach-s3c64xx/mach-mini6410.c
new file mode 100644
index 000000000000..89f35e02e883
--- /dev/null
+++ b/arch/arm/mach-s3c64xx/mach-mini6410.c
@@ -0,0 +1,357 @@
+/* linux/arch/arm/mach-s3c64xx/mach-mini6410.c
+ *
+ * Copyright 2010 Darius Augulis <augulis.darius@gmail.com>
+ * Copyright 2008 Openmoko, Inc.
+ * Copyright 2008 Simtec Electronics
+ * Ben Dooks <ben@simtec.co.uk>
+ * http://armlinux.simtec.co.uk/
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+*/
+
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/fb.h>
+#include <linux/gpio.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/dm9000.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/partitions.h>
+#include <linux/serial_core.h>
+#include <linux/types.h>
+
+#include <asm/mach-types.h>
+#include <asm/mach/arch.h>
+#include <asm/mach/map.h>
+
+#include <mach/map.h>
+#include <mach/regs-fb.h>
+#include <mach/regs-gpio.h>
+#include <mach/regs-modem.h>
+#include <mach/regs-srom.h>
+#include <mach/s3c6410.h>
+
+#include <plat/adc.h>
+#include <plat/cpu.h>
+#include <plat/devs.h>
+#include <plat/fb.h>
+#include <plat/nand.h>
+#include <plat/regs-serial.h>
+#include <plat/ts.h>
+
+#include <video/platform_lcd.h>
+
+#define UCON S3C2410_UCON_DEFAULT
+#define ULCON (S3C2410_LCON_CS8 | S3C2410_LCON_PNONE | S3C2410_LCON_STOPB)
+#define UFCON (S3C2410_UFCON_RXTRIG8 | S3C2410_UFCON_FIFOMODE)
+
+static struct s3c2410_uartcfg mini6410_uartcfgs[] __initdata = {
+ [0] = {
+ .hwport = 0,
+ .flags = 0,
+ .ucon = UCON,
+ .ulcon = ULCON,
+ .ufcon = UFCON,
+ },
+ [1] = {
+ .hwport = 1,
+ .flags = 0,
+ .ucon = UCON,
+ .ulcon = ULCON,
+ .ufcon = UFCON,
+ },
+ [2] = {
+ .hwport = 2,
+ .flags = 0,
+ .ucon = UCON,
+ .ulcon = ULCON,
+ .ufcon = UFCON,
+ },
+ [3] = {
+ .hwport = 3,
+ .flags = 0,
+ .ucon = UCON,
+ .ulcon = ULCON,
+ .ufcon = UFCON,
+ },
+};
+
+/* DM9000AEP 10/100 ethernet controller */
+
+static struct resource mini6410_dm9k_resource[] = {
+ [0] = {
+ .start = S3C64XX_PA_XM0CSN1,
+ .end = S3C64XX_PA_XM0CSN1 + 1,
+ .flags = IORESOURCE_MEM
+ },
+ [1] = {
+ .start = S3C64XX_PA_XM0CSN1 + 4,
+ .end = S3C64XX_PA_XM0CSN1 + 5,
+ .flags = IORESOURCE_MEM
+ },
+ [2] = {
+ .start = S3C_EINT(7),
+ .end = S3C_EINT(7),
+ .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHLEVEL
+ }
+};
+
+static struct dm9000_plat_data mini6410_dm9k_pdata = {
+ .flags = (DM9000_PLATF_16BITONLY | DM9000_PLATF_NO_EEPROM),
+};
+
+static struct platform_device mini6410_device_eth = {
+ .name = "dm9000",
+ .id = -1,
+ .num_resources = ARRAY_SIZE(mini6410_dm9k_resource),
+ .resource = mini6410_dm9k_resource,
+ .dev = {
+ .platform_data = &mini6410_dm9k_pdata,
+ },
+};
+
+static struct mtd_partition mini6410_nand_part[] = {
+ [0] = {
+ .name = "uboot",
+ .size = SZ_1M,
+ .offset = 0,
+ },
+ [1] = {
+ .name = "kernel",
+ .size = SZ_2M,
+ .offset = SZ_1M,
+ },
+ [2] = {
+ .name = "rootfs",
+ .size = MTDPART_SIZ_FULL,
+ .offset = SZ_1M + SZ_2M,
+ },
+};
+
+static struct s3c2410_nand_set mini6410_nand_sets[] = {
+ [0] = {
+ .name = "nand",
+ .nr_chips = 1,
+ .nr_partitions = ARRAY_SIZE(mini6410_nand_part),
+ .partitions = mini6410_nand_part,
+ },
+};
+
+static struct s3c2410_platform_nand mini6410_nand_info = {
+ .tacls = 25,
+ .twrph0 = 55,
+ .twrph1 = 40,
+ .nr_sets = ARRAY_SIZE(mini6410_nand_sets),
+ .sets = mini6410_nand_sets,
+};
+
+static struct s3c_fb_pd_win mini6410_fb_win[] = {
+ {
+ .win_mode = { /* 4.3" 480x272 */
+ .left_margin = 3,
+ .right_margin = 2,
+ .upper_margin = 1,
+ .lower_margin = 1,
+ .hsync_len = 40,
+ .vsync_len = 1,
+ .xres = 480,
+ .yres = 272,
+ },
+ .max_bpp = 32,
+ .default_bpp = 16,
+ }, {
+ .win_mode = { /* 7.0" 800x480 */
+ .left_margin = 8,
+ .right_margin = 13,
+ .upper_margin = 7,
+ .lower_margin = 5,
+ .hsync_len = 3,
+ .vsync_len = 1,
+ .xres = 800,
+ .yres = 480,
+ },
+ .max_bpp = 32,
+ .default_bpp = 16,
+ },
+};
+
+static struct s3c_fb_platdata mini6410_lcd_pdata __initdata = {
+ .setup_gpio = s3c64xx_fb_gpio_setup_24bpp,
+ .win[0] = &mini6410_fb_win[0],
+ .vidcon0 = VIDCON0_VIDOUT_RGB | VIDCON0_PNRMODE_RGB,
+ .vidcon1 = VIDCON1_INV_HSYNC | VIDCON1_INV_VSYNC,
+};
+
+static void mini6410_lcd_power_set(struct plat_lcd_data *pd,
+ unsigned int power)
+{
+ if (power)
+ gpio_direction_output(S3C64XX_GPE(0), 1);
+ else
+ gpio_direction_output(S3C64XX_GPE(0), 0);
+}
+
+static struct plat_lcd_data mini6410_lcd_power_data = {
+ .set_power = mini6410_lcd_power_set,
+};
+
+static struct platform_device mini6410_lcd_powerdev = {
+ .name = "platform-lcd",
+ .dev.parent = &s3c_device_fb.dev,
+ .dev.platform_data = &mini6410_lcd_power_data,
+};
+
+static struct s3c2410_ts_mach_info s3c_ts_platform __initdata = {
+ .delay = 10000,
+ .presc = 49,
+ .oversampling_shift = 2,
+};
+
+static struct platform_device *mini6410_devices[] __initdata = {
+ &mini6410_device_eth,
+ &s3c_device_hsmmc0,
+ &s3c_device_hsmmc1,
+ &s3c_device_ohci,
+ &s3c_device_nand,
+ &s3c_device_fb,
+ &mini6410_lcd_powerdev,
+ &s3c_device_adc,
+ &s3c_device_ts,
+};
+
+static void __init mini6410_map_io(void)
+{
+ u32 tmp;
+
+ s3c64xx_init_io(NULL, 0);
+ s3c24xx_init_clocks(12000000);
+ s3c24xx_init_uarts(mini6410_uartcfgs, ARRAY_SIZE(mini6410_uartcfgs));
+
+ /* set the LCD type */
+ tmp = __raw_readl(S3C64XX_SPCON);
+ tmp &= ~S3C64XX_SPCON_LCD_SEL_MASK;
+ tmp |= S3C64XX_SPCON_LCD_SEL_RGB;
+ __raw_writel(tmp, S3C64XX_SPCON);
+
+ /* remove the LCD bypass */
+ tmp = __raw_readl(S3C64XX_MODEM_MIFPCON);
+ tmp &= ~MIFPCON_LCD_BYPASS;
+ __raw_writel(tmp, S3C64XX_MODEM_MIFPCON);
+}
+
+/*
+ * mini6410_features string
+ *
+ * 0-9 LCD configuration
+ *
+ */
+static char mini6410_features_str[12] __initdata = "0";
+
+static int __init mini6410_features_setup(char *str)
+{
+ if (str)
+ strlcpy(mini6410_features_str, str,
+ sizeof(mini6410_features_str));
+ return 1;
+}
+
+__setup("mini6410=", mini6410_features_setup);
+
+#define FEATURE_SCREEN (1 << 0)
+
+struct mini6410_features_t {
+ int done;
+ int lcd_index;
+};
+
+static void mini6410_parse_features(
+ struct mini6410_features_t *features,
+ const char *features_str)
+{
+ const char *fp = features_str;
+
+ features->done = 0;
+ features->lcd_index = 0;
+
+ while (*fp) {
+ char f = *fp++;
+
+ switch (f) {
+ case '0'...'9': /* tft screen */
+ if (features->done & FEATURE_SCREEN) {
+ printk(KERN_INFO "MINI6410: '%c' ignored, "
+ "screen type already set\n", f);
+ } else {
+ int li = f - '0';
+ if (li >= ARRAY_SIZE(mini6410_fb_win))
+ printk(KERN_INFO "MINI6410: '%c' out "
+ "of range LCD mode\n", f);
+ else {
+ features->lcd_index = li;
+ }
+ }
+ features->done |= FEATURE_SCREEN;
+ break;
+ }
+ }
+}
+
+static void __init mini6410_machine_init(void)
+{
+ u32 cs1;
+ struct mini6410_features_t features = { 0 };
+
+ printk(KERN_INFO "MINI6410: Option string mini6410=%s\n",
+ mini6410_features_str);
+
+ /* Parse the feature string */
+ mini6410_parse_features(&features, mini6410_features_str);
+
+ mini6410_lcd_pdata.win[0] = &mini6410_fb_win[features.lcd_index];
+
+ printk(KERN_INFO "MINI6410: selected LCD display is %dx%d\n",
+ mini6410_lcd_pdata.win[0]->win_mode.xres,
+ mini6410_lcd_pdata.win[0]->win_mode.yres);
+
+ s3c_nand_set_platdata(&mini6410_nand_info);
+ s3c_fb_set_platdata(&mini6410_lcd_pdata);
+ s3c24xx_ts_set_platdata(&s3c_ts_platform);
+
+ /* configure nCS1 width to 16 bits */
+
+ cs1 = __raw_readl(S3C64XX_SROM_BW) &
+ ~(S3C64XX_SROM_BW__CS_MASK << S3C64XX_SROM_BW__NCS1__SHIFT);
+ cs1 |= ((1 << S3C64XX_SROM_BW__DATAWIDTH__SHIFT) |
+ (1 << S3C64XX_SROM_BW__WAITENABLE__SHIFT) |
+ (1 << S3C64XX_SROM_BW__BYTEENABLE__SHIFT)) <<
+ S3C64XX_SROM_BW__NCS1__SHIFT;
+ __raw_writel(cs1, S3C64XX_SROM_BW);
+
+ /* set timing for nCS1 suitable for ethernet chip */
+
+ __raw_writel((0 << S3C64XX_SROM_BCX__PMC__SHIFT) |
+ (6 << S3C64XX_SROM_BCX__TACP__SHIFT) |
+ (4 << S3C64XX_SROM_BCX__TCAH__SHIFT) |
+ (1 << S3C64XX_SROM_BCX__TCOH__SHIFT) |
+ (13 << S3C64XX_SROM_BCX__TACC__SHIFT) |
+ (4 << S3C64XX_SROM_BCX__TCOS__SHIFT) |
+ (0 << S3C64XX_SROM_BCX__TACS__SHIFT), S3C64XX_SROM_BC1);
+
+ gpio_request(S3C64XX_GPF(15), "LCD power");
+ gpio_request(S3C64XX_GPE(0), "LCD power");
+
+ platform_add_devices(mini6410_devices, ARRAY_SIZE(mini6410_devices));
+}
+
+MACHINE_START(MINI6410, "MINI6410")
+ /* Maintainer: Darius Augulis <augulis.darius@gmail.com> */
+ .boot_params = S3C64XX_PA_SDRAM + 0x100,
+ .init_irq = s3c6410_init_irq,
+ .map_io = mini6410_map_io,
+ .init_machine = mini6410_machine_init,
+ .timer = &s3c24xx_timer,
+MACHINE_END
diff --git a/arch/arm/mach-s3c64xx/mach-real6410.c b/arch/arm/mach-s3c64xx/mach-real6410.c
index 4b4475da8ec6..4957ab0a0d4a 100644
--- a/arch/arm/mach-s3c64xx/mach-real6410.c
+++ b/arch/arm/mach-s3c64xx/mach-real6410.c
@@ -12,25 +12,41 @@
*
*/
-#include <linux/kernel.h>
-#include <linux/types.h>
+#include <linux/init.h>
#include <linux/interrupt.h>
+#include <linux/fb.h>
+#include <linux/gpio.h>
+#include <linux/kernel.h>
#include <linux/list.h>
-#include <linux/init.h>
#include <linux/dm9000.h>
-#include <linux/serial_core.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/partitions.h>
#include <linux/platform_device.h>
+#include <linux/serial_core.h>
+#include <linux/types.h>
+
#include <asm/mach-types.h>
#include <asm/mach/arch.h>
#include <asm/mach/map.h>
+
#include <mach/map.h>
-#include <mach/s3c6410.h>
+#include <mach/regs-fb.h>
+#include <mach/regs-gpio.h>
+#include <mach/regs-modem.h>
#include <mach/regs-srom.h>
+#include <mach/s3c6410.h>
+
+#include <plat/adc.h>
#include <plat/cpu.h>
#include <plat/devs.h>
+#include <plat/fb.h>
+#include <plat/nand.h>
#include <plat/regs-serial.h>
+#include <plat/ts.h>
+
+#include <video/platform_lcd.h>
-#define UCON (S3C2410_UCON_DEFAULT | S3C2410_UCON_UCLK)
+#define UCON S3C2410_UCON_DEFAULT
#define ULCON (S3C2410_LCON_CS8 | S3C2410_LCON_PNONE | S3C2410_LCON_STOPB)
#define UFCON (S3C2410_UFCON_RXTRIG8 | S3C2410_UFCON_FIFOMODE)
@@ -99,22 +115,192 @@ static struct platform_device real6410_device_eth = {
},
};
+static struct s3c_fb_pd_win real6410_fb_win[] = {
+ {
+ .win_mode = { /* 4.3" 480x272 */
+ .left_margin = 3,
+ .right_margin = 2,
+ .upper_margin = 1,
+ .lower_margin = 1,
+ .hsync_len = 40,
+ .vsync_len = 1,
+ .xres = 480,
+ .yres = 272,
+ },
+ .max_bpp = 32,
+ .default_bpp = 16,
+ }, {
+ .win_mode = { /* 7.0" 800x480 */
+ .left_margin = 8,
+ .right_margin = 13,
+ .upper_margin = 7,
+ .lower_margin = 5,
+ .hsync_len = 3,
+ .vsync_len = 1,
+ .xres = 800,
+ .yres = 480,
+ },
+ .max_bpp = 32,
+ .default_bpp = 16,
+ },
+};
+
+static struct s3c_fb_platdata real6410_lcd_pdata __initdata = {
+ .setup_gpio = s3c64xx_fb_gpio_setup_24bpp,
+ .win[0] = &real6410_fb_win[0],
+ .vidcon0 = VIDCON0_VIDOUT_RGB | VIDCON0_PNRMODE_RGB,
+ .vidcon1 = VIDCON1_INV_HSYNC | VIDCON1_INV_VSYNC,
+};
+
+static struct mtd_partition real6410_nand_part[] = {
+ [0] = {
+ .name = "uboot",
+ .size = SZ_1M,
+ .offset = 0,
+ },
+ [1] = {
+ .name = "kernel",
+ .size = SZ_2M,
+ .offset = SZ_1M,
+ },
+ [2] = {
+ .name = "rootfs",
+ .size = MTDPART_SIZ_FULL,
+ .offset = SZ_1M + SZ_2M,
+ },
+};
+
+static struct s3c2410_nand_set real6410_nand_sets[] = {
+ [0] = {
+ .name = "nand",
+ .nr_chips = 1,
+ .nr_partitions = ARRAY_SIZE(real6410_nand_part),
+ .partitions = real6410_nand_part,
+ },
+};
+
+static struct s3c2410_platform_nand real6410_nand_info = {
+ .tacls = 25,
+ .twrph0 = 55,
+ .twrph1 = 40,
+ .nr_sets = ARRAY_SIZE(real6410_nand_sets),
+ .sets = real6410_nand_sets,
+};
+
static struct platform_device *real6410_devices[] __initdata = {
&real6410_device_eth,
&s3c_device_hsmmc0,
&s3c_device_hsmmc1,
+ &s3c_device_fb,
+ &s3c_device_nand,
+ &s3c_device_adc,
+ &s3c_device_ts,
+ &s3c_device_ohci,
+};
+
+static struct s3c2410_ts_mach_info s3c_ts_platform __initdata = {
+ .delay = 10000,
+ .presc = 49,
+ .oversampling_shift = 2,
};
static void __init real6410_map_io(void)
{
+ u32 tmp;
+
s3c64xx_init_io(NULL, 0);
s3c24xx_init_clocks(12000000);
s3c24xx_init_uarts(real6410_uartcfgs, ARRAY_SIZE(real6410_uartcfgs));
+
+ /* set the LCD type */
+ tmp = __raw_readl(S3C64XX_SPCON);
+ tmp &= ~S3C64XX_SPCON_LCD_SEL_MASK;
+ tmp |= S3C64XX_SPCON_LCD_SEL_RGB;
+ __raw_writel(tmp, S3C64XX_SPCON);
+
+ /* remove the LCD bypass */
+ tmp = __raw_readl(S3C64XX_MODEM_MIFPCON);
+ tmp &= ~MIFPCON_LCD_BYPASS;
+ __raw_writel(tmp, S3C64XX_MODEM_MIFPCON);
+}
+
+/*
+ * real6410_features string
+ *
+ * 0-9 LCD configuration
+ *
+ */
+static char real6410_features_str[12] __initdata = "0";
+
+static int __init real6410_features_setup(char *str)
+{
+ if (str)
+ strlcpy(real6410_features_str, str,
+ sizeof(real6410_features_str));
+ return 1;
+}
+
+__setup("real6410=", real6410_features_setup);
+
+#define FEATURE_SCREEN (1 << 0)
+
+struct real6410_features_t {
+ int done;
+ int lcd_index;
+};
+
+static void real6410_parse_features(
+ struct real6410_features_t *features,
+ const char *features_str)
+{
+ const char *fp = features_str;
+
+ features->done = 0;
+ features->lcd_index = 0;
+
+ while (*fp) {
+ char f = *fp++;
+
+ switch (f) {
+ case '0'...'9': /* tft screen */
+ if (features->done & FEATURE_SCREEN) {
+ printk(KERN_INFO "REAL6410: '%c' ignored, "
+ "screen type already set\n", f);
+ } else {
+ int li = f - '0';
+ if (li >= ARRAY_SIZE(real6410_fb_win))
+ printk(KERN_INFO "REAL6410: '%c' out "
+ "of range LCD mode\n", f);
+ else {
+ features->lcd_index = li;
+ }
+ }
+ features->done |= FEATURE_SCREEN;
+ break;
+ }
+ }
}
static void __init real6410_machine_init(void)
{
u32 cs1;
+ struct real6410_features_t features = { 0 };
+
+ printk(KERN_INFO "REAL6410: Option string real6410=%s\n",
+ real6410_features_str);
+
+ /* Parse the feature string */
+ real6410_parse_features(&features, real6410_features_str);
+
+ real6410_lcd_pdata.win[0] = &real6410_fb_win[features.lcd_index];
+
+ printk(KERN_INFO "REAL6410: selected LCD display is %dx%d\n",
+ real6410_lcd_pdata.win[0]->win_mode.xres,
+ real6410_lcd_pdata.win[0]->win_mode.yres);
+
+ s3c_fb_set_platdata(&real6410_lcd_pdata);
+ s3c_nand_set_platdata(&real6410_nand_info);
+ s3c24xx_ts_set_platdata(&s3c_ts_platform);
/* configure nCS1 width to 16 bits */
@@ -136,6 +322,8 @@ static void __init real6410_machine_init(void)
(4 << S3C64XX_SROM_BCX__TCOS__SHIFT) |
(0 << S3C64XX_SROM_BCX__TACS__SHIFT), S3C64XX_SROM_BC1);
+ gpio_request(S3C64XX_GPF(15), "LCD power");
+
platform_add_devices(real6410_devices, ARRAY_SIZE(real6410_devices));
}
diff --git a/arch/arm/mach-s3c64xx/mach-smdk6410.c b/arch/arm/mach-s3c64xx/mach-smdk6410.c
index ec8865c03a19..77488facfe4c 100644
--- a/arch/arm/mach-s3c64xx/mach-smdk6410.c
+++ b/arch/arm/mach-s3c64xx/mach-smdk6410.c
@@ -283,6 +283,7 @@ static struct platform_device *smdk6410_devices[] __initdata = {
&s3c_device_fb,
&s3c_device_ohci,
&s3c_device_usb_hsotg,
+ &s3c_device_pcm,
&s3c64xx_device_iisv4,
&samsung_device_keypad,
diff --git a/arch/arm/mach-s3c64xx/setup-fb-24bpp.c b/arch/arm/mach-s3c64xx/setup-fb-24bpp.c
index 000736877df2..8f3091182f9c 100644
--- a/arch/arm/mach-s3c64xx/setup-fb-24bpp.c
+++ b/arch/arm/mach-s3c64xx/setup-fb-24bpp.c
@@ -23,15 +23,6 @@
extern void s3c64xx_fb_gpio_setup_24bpp(void)
{
- unsigned int gpio;
-
- for (gpio = S3C64XX_GPI(0); gpio <= S3C64XX_GPI(15); gpio++) {
- s3c_gpio_cfgpin(gpio, S3C_GPIO_SFN(2));
- s3c_gpio_setpull(gpio, S3C_GPIO_PULL_NONE);
- }
-
- for (gpio = S3C64XX_GPJ(0); gpio <= S3C64XX_GPJ(11); gpio++) {
- s3c_gpio_cfgpin(gpio, S3C_GPIO_SFN(2));
- s3c_gpio_setpull(gpio, S3C_GPIO_PULL_NONE);
- }
+ s3c_gpio_cfgrange_nopull(S3C64XX_GPI(0), 16, S3C_GPIO_SFN(2));
+ s3c_gpio_cfgrange_nopull(S3C64XX_GPJ(0), 12, S3C_GPIO_SFN(2));
}
diff --git a/arch/arm/mach-s3c64xx/setup-ide.c b/arch/arm/mach-s3c64xx/setup-ide.c
index c12c315f33bc..41b425602d88 100644
--- a/arch/arm/mach-s3c64xx/setup-ide.c
+++ b/arch/arm/mach-s3c64xx/setup-ide.c
@@ -17,11 +17,11 @@
#include <mach/map.h>
#include <mach/regs-clock.h>
#include <plat/gpio-cfg.h>
+#include <plat/ata.h>
void s3c64xx_ide_setup_gpio(void)
{
u32 reg;
- u32 gpio = 0;
reg = readl(S3C_MEM_SYS_CFG) & (~0x3f);
@@ -32,15 +32,12 @@ void s3c64xx_ide_setup_gpio(void)
s3c_gpio_cfgpin(S3C64XX_GPB(4), S3C_GPIO_SFN(4));
/* Set XhiDATA[15:0] pins as CF Data[15:0] */
- for (gpio = S3C64XX_GPK(0); gpio <= S3C64XX_GPK(15); gpio++)
- s3c_gpio_cfgpin(gpio, S3C_GPIO_SFN(5));
+ s3c_gpio_cfgpin_range(S3C64XX_GPK(0), 16, S3C_GPIO_SFN(5));
/* Set XhiADDR[2:0] pins as CF ADDR[2:0] */
- for (gpio = S3C64XX_GPL(0); gpio <= S3C64XX_GPL(2); gpio++)
- s3c_gpio_cfgpin(gpio, S3C_GPIO_SFN(6));
+ s3c_gpio_cfgpin_range(S3C64XX_GPL(0), 3, S3C_GPIO_SFN(6));
/* Set Xhi ctrl pins as CF ctrl pins(IORDY, IOWR, IORD, CE[0:1]) */
s3c_gpio_cfgpin(S3C64XX_GPM(5), S3C_GPIO_SFN(1));
- for (gpio = S3C64XX_GPM(0); gpio <= S3C64XX_GPM(4); gpio++)
- s3c_gpio_cfgpin(gpio, S3C_GPIO_SFN(6));
+ s3c_gpio_cfgpin_range(S3C64XX_GPM(0), 5, S3C_GPIO_SFN(6));
}
diff --git a/arch/arm/mach-s3c64xx/setup-keypad.c b/arch/arm/mach-s3c64xx/setup-keypad.c
index abc34e4e1a93..f8ed0d22db70 100644
--- a/arch/arm/mach-s3c64xx/setup-keypad.c
+++ b/arch/arm/mach-s3c64xx/setup-keypad.c
@@ -12,23 +12,13 @@
#include <linux/gpio.h>
#include <plat/gpio-cfg.h>
+#include <plat/keypad.h>
void samsung_keypad_cfg_gpio(unsigned int rows, unsigned int cols)
{
- unsigned int gpio;
- unsigned int end;
-
/* Set all the necessary GPK pins to special-function 3: KP_ROW[x] */
- end = S3C64XX_GPK(8 + rows);
- for (gpio = S3C64XX_GPK(8); gpio < end; gpio++) {
- s3c_gpio_cfgpin(gpio, S3C_GPIO_SFN(3));
- s3c_gpio_setpull(gpio, S3C_GPIO_PULL_NONE);
- }
+ s3c_gpio_cfgrange_nopull(S3C64XX_GPK(8), 8 + rows, S3C_GPIO_SFN(3));
/* Set all the necessary GPL pins to special-function 3: KP_COL[x] */
- end = S3C64XX_GPL(0 + cols);
- for (gpio = S3C64XX_GPL(0); gpio < end; gpio++) {
- s3c_gpio_cfgpin(gpio, S3C_GPIO_SFN(3));
- s3c_gpio_setpull(gpio, S3C_GPIO_PULL_NONE);
- }
+ s3c_gpio_cfgrange_nopull(S3C64XX_GPL(0), cols, S3C_GPIO_SFN(3));
}
diff --git a/arch/arm/mach-s3c64xx/setup-sdhci-gpio.c b/arch/arm/mach-s3c64xx/setup-sdhci-gpio.c
index 322359591374..6eac071afae2 100644
--- a/arch/arm/mach-s3c64xx/setup-sdhci-gpio.c
+++ b/arch/arm/mach-s3c64xx/setup-sdhci-gpio.c
@@ -24,16 +24,9 @@
void s3c64xx_setup_sdhci0_cfg_gpio(struct platform_device *dev, int width)
{
struct s3c_sdhci_platdata *pdata = dev->dev.platform_data;
- unsigned int gpio;
- unsigned int end;
- end = S3C64XX_GPG(2 + width);
-
- /* Set all the necessary GPG pins to special-function 0 */
- for (gpio = S3C64XX_GPG(0); gpio < end; gpio++) {
- s3c_gpio_cfgpin(gpio, S3C_GPIO_SFN(2));
- s3c_gpio_setpull(gpio, S3C_GPIO_PULL_NONE);
- }
+ /* Set all the necessary GPG pins to special-function 2 */
+ s3c_gpio_cfgrange_nopull(S3C64XX_GPG(0), 2 + width, S3C_GPIO_SFN(2));
if (pdata->cd_type == S3C_SDHCI_CD_INTERNAL) {
s3c_gpio_setpull(S3C64XX_GPG(6), S3C_GPIO_PULL_UP);
@@ -44,16 +37,9 @@ void s3c64xx_setup_sdhci0_cfg_gpio(struct platform_device *dev, int width)
void s3c64xx_setup_sdhci1_cfg_gpio(struct platform_device *dev, int width)
{
struct s3c_sdhci_platdata *pdata = dev->dev.platform_data;
- unsigned int gpio;
- unsigned int end;
- end = S3C64XX_GPH(2 + width);
-
- /* Set all the necessary GPG pins to special-function 0 */
- for (gpio = S3C64XX_GPH(0); gpio < end; gpio++) {
- s3c_gpio_cfgpin(gpio, S3C_GPIO_SFN(2));
- s3c_gpio_setpull(gpio, S3C_GPIO_PULL_NONE);
- }
+ /* Set all the necessary GPH pins to special-function 2 */
+ s3c_gpio_cfgrange_nopull(S3C64XX_GPH(0), 2 + width, S3C_GPIO_SFN(2));
if (pdata->cd_type == S3C_SDHCI_CD_INTERNAL) {
s3c_gpio_setpull(S3C64XX_GPG(6), S3C_GPIO_PULL_UP);
@@ -63,20 +49,9 @@ void s3c64xx_setup_sdhci1_cfg_gpio(struct platform_device *dev, int width)
void s3c64xx_setup_sdhci2_cfg_gpio(struct platform_device *dev, int width)
{
- unsigned int gpio;
- unsigned int end;
+ /* Set all the necessary GPH pins to special-function 3 */
+ s3c_gpio_cfgrange_nopull(S3C64XX_GPH(6), width, S3C_GPIO_SFN(3));
- end = S3C64XX_GPH(6 + width);
-
- /* Set all the necessary GPH pins to special-function 1 */
- for (gpio = S3C64XX_GPH(6); gpio < end; gpio++) {
- s3c_gpio_cfgpin(gpio, S3C_GPIO_SFN(3));
- s3c_gpio_setpull(gpio, S3C_GPIO_PULL_NONE);
- }
-
- /* Set all the necessary GPC pins to special-function 1 */
- for (gpio = S3C64XX_GPC(4); gpio < S3C64XX_GPC(6); gpio++) {
- s3c_gpio_cfgpin(gpio, S3C_GPIO_SFN(3));
- s3c_gpio_setpull(gpio, S3C_GPIO_PULL_NONE);
- }
+ /* Set all the necessary GPC pins to special-function 3 */
+ s3c_gpio_cfgrange_nopull(S3C64XX_GPC(4), 2, S3C_GPIO_SFN(3));
}
diff --git a/arch/arm/mach-s5p6442/Kconfig b/arch/arm/mach-s5p6442/Kconfig
index 0fda0a5df968..33569e4007c4 100644
--- a/arch/arm/mach-s5p6442/Kconfig
+++ b/arch/arm/mach-s5p6442/Kconfig
@@ -11,7 +11,6 @@ if ARCH_S5P6442
config CPU_S5P6442
bool
- select PLAT_S5P
select S3C_PL330_DMA
help
Enable S5P6442 CPU support
diff --git a/arch/arm/mach-s5p6442/clock.c b/arch/arm/mach-s5p6442/clock.c
index dcd20f17212a..16d6e7e61b50 100644
--- a/arch/arm/mach-s5p6442/clock.c
+++ b/arch/arm/mach-s5p6442/clock.c
@@ -192,6 +192,11 @@ static struct clk clk_pclkd1 = {
.parent = &clk_hclkd1,
};
+int s5p6442_clk_ip0_ctrl(struct clk *clk, int enable)
+{
+ return s5p_gatectrl(S5P_CLKGATE_IP0, clk, enable);
+}
+
int s5p6442_clk_ip3_ctrl(struct clk *clk, int enable)
{
return s5p_gatectrl(S5P_CLKGATE_IP3, clk, enable);
@@ -335,6 +340,16 @@ void __init_or_cpufreq s5p6442_setup_clocks(void)
clk_pclkd1.rate = pclkd1;
}
+static struct clk init_clocks_disable[] = {
+ {
+ .name = "pdma",
+ .id = -1,
+ .parent = &clk_pclkd1,
+ .enable = s5p6442_clk_ip0_ctrl,
+ .ctrlbit = (1 << 3),
+ },
+};
+
static struct clk init_clocks[] = {
{
.name = "systimer",
@@ -393,10 +408,23 @@ static struct clk *clks[] __initdata = {
void __init s5p6442_register_clocks(void)
{
+ struct clk *clkptr;
+ int i, ret;
+
s3c24xx_register_clocks(clks, ARRAY_SIZE(clks));
s3c_register_clksrc(clksrcs, ARRAY_SIZE(clksrcs));
s3c_register_clocks(init_clocks, ARRAY_SIZE(init_clocks));
+ clkptr = init_clocks_disable;
+ for (i = 0; i < ARRAY_SIZE(init_clocks_disable); i++, clkptr++) {
+ ret = s3c24xx_register_clock(clkptr);
+ if (ret < 0) {
+ printk(KERN_ERR "Fail to register clock %s (%d)\n",
+ clkptr->name, ret);
+ } else
+ (clkptr->enable)(clkptr, 0);
+ }
+
s3c_pwmclk_init();
}
diff --git a/arch/arm/mach-s5p6442/dev-audio.c b/arch/arm/mach-s5p6442/dev-audio.c
index 7a4e34720b7b..3462197ff352 100644
--- a/arch/arm/mach-s5p6442/dev-audio.c
+++ b/arch/arm/mach-s5p6442/dev-audio.c
@@ -21,22 +21,16 @@
static int s5p6442_cfg_i2s(struct platform_device *pdev)
{
+ unsigned int base;
+
/* configure GPIO for i2s port */
switch (pdev->id) {
case 1:
- s3c_gpio_cfgpin(S5P6442_GPC1(0), S3C_GPIO_SFN(2));
- s3c_gpio_cfgpin(S5P6442_GPC1(1), S3C_GPIO_SFN(2));
- s3c_gpio_cfgpin(S5P6442_GPC1(2), S3C_GPIO_SFN(2));
- s3c_gpio_cfgpin(S5P6442_GPC1(3), S3C_GPIO_SFN(2));
- s3c_gpio_cfgpin(S5P6442_GPC1(4), S3C_GPIO_SFN(2));
+ base = S5P6442_GPC1(0);
break;
case -1:
- s3c_gpio_cfgpin(S5P6442_GPC0(0), S3C_GPIO_SFN(2));
- s3c_gpio_cfgpin(S5P6442_GPC0(1), S3C_GPIO_SFN(2));
- s3c_gpio_cfgpin(S5P6442_GPC0(2), S3C_GPIO_SFN(2));
- s3c_gpio_cfgpin(S5P6442_GPC0(3), S3C_GPIO_SFN(2));
- s3c_gpio_cfgpin(S5P6442_GPC0(4), S3C_GPIO_SFN(2));
+ base = S5P6442_GPC0(0);
break;
default:
@@ -44,6 +38,7 @@ static int s5p6442_cfg_i2s(struct platform_device *pdev)
return -EINVAL;
}
+ s3c_gpio_cfgpin_range(base, 5, S3C_GPIO_SFN(2));
return 0;
}
@@ -111,21 +106,15 @@ struct platform_device s5p6442_device_iis1 = {
static int s5p6442_pcm_cfg_gpio(struct platform_device *pdev)
{
+ unsigned int base;
+
switch (pdev->id) {
case 0:
- s3c_gpio_cfgpin(S5P6442_GPC0(0), S3C_GPIO_SFN(3));
- s3c_gpio_cfgpin(S5P6442_GPC0(1), S3C_GPIO_SFN(3));
- s3c_gpio_cfgpin(S5P6442_GPC0(2), S3C_GPIO_SFN(3));
- s3c_gpio_cfgpin(S5P6442_GPC0(3), S3C_GPIO_SFN(3));
- s3c_gpio_cfgpin(S5P6442_GPC0(4), S3C_GPIO_SFN(3));
+ base = S5P6442_GPC0(0);
break;
case 1:
- s3c_gpio_cfgpin(S5P6442_GPC1(0), S3C_GPIO_SFN(3));
- s3c_gpio_cfgpin(S5P6442_GPC1(1), S3C_GPIO_SFN(3));
- s3c_gpio_cfgpin(S5P6442_GPC1(2), S3C_GPIO_SFN(3));
- s3c_gpio_cfgpin(S5P6442_GPC1(3), S3C_GPIO_SFN(3));
- s3c_gpio_cfgpin(S5P6442_GPC1(4), S3C_GPIO_SFN(3));
+ base = S5P6442_GPC1(0);
break;
default:
@@ -133,6 +122,7 @@ static int s5p6442_pcm_cfg_gpio(struct platform_device *pdev)
return -EINVAL;
}
+ s3c_gpio_cfgpin_range(base, 5, S3C_GPIO_SFN(3));
return 0;
}
diff --git a/arch/arm/mach-s5p6442/dev-spi.c b/arch/arm/mach-s5p6442/dev-spi.c
index e894651a88bd..cce8c2470709 100644
--- a/arch/arm/mach-s5p6442/dev-spi.c
+++ b/arch/arm/mach-s5p6442/dev-spi.c
@@ -38,11 +38,9 @@ static int s5p6442_spi_cfg_gpio(struct platform_device *pdev)
switch (pdev->id) {
case 0:
s3c_gpio_cfgpin(S5P6442_GPB(0), S3C_GPIO_SFN(2));
- s3c_gpio_cfgpin(S5P6442_GPB(2), S3C_GPIO_SFN(2));
- s3c_gpio_cfgpin(S5P6442_GPB(3), S3C_GPIO_SFN(2));
s3c_gpio_setpull(S5P6442_GPB(0), S3C_GPIO_PULL_UP);
- s3c_gpio_setpull(S5P6442_GPB(2), S3C_GPIO_PULL_UP);
- s3c_gpio_setpull(S5P6442_GPB(3), S3C_GPIO_PULL_UP);
+ s3c_gpio_cfgall_range(S5P6442_GPB(2), 2,
+ S3C_GPIO_SFN(2), S3C_GPIO_PULL_UP);
break;
default:
diff --git a/arch/arm/mach-s5p6442/dma.c b/arch/arm/mach-s5p6442/dma.c
index ad4f8704b93d..7dfb13654f8a 100644
--- a/arch/arm/mach-s5p6442/dma.c
+++ b/arch/arm/mach-s5p6442/dma.c
@@ -82,7 +82,7 @@ static struct s3c_pl330_platdata s5p6442_pdma_pdata = {
static struct platform_device s5p6442_device_pdma = {
.name = "s3c-pl330",
- .id = 1,
+ .id = -1,
.num_resources = ARRAY_SIZE(s5p6442_pdma_resource),
.resource = s5p6442_pdma_resource,
.dev = {
diff --git a/arch/arm/mach-s5p6442/include/mach/regs-clock.h b/arch/arm/mach-s5p6442/include/mach/regs-clock.h
index d8360b5d4ece..00828a336991 100644
--- a/arch/arm/mach-s5p6442/include/mach/regs-clock.h
+++ b/arch/arm/mach-s5p6442/include/mach/regs-clock.h
@@ -46,6 +46,7 @@
#define S5P_CLK_DIV5 S5P_CLKREG(0x314)
#define S5P_CLK_DIV6 S5P_CLKREG(0x318)
+#define S5P_CLKGATE_IP0 S5P_CLKREG(0x460)
#define S5P_CLKGATE_IP3 S5P_CLKREG(0x46C)
/* CLK_OUT */
diff --git a/arch/arm/mach-s5p6442/include/mach/vmalloc.h b/arch/arm/mach-s5p6442/include/mach/vmalloc.h
index f5c83f02c18e..4aa55e55ac47 100644
--- a/arch/arm/mach-s5p6442/include/mach/vmalloc.h
+++ b/arch/arm/mach-s5p6442/include/mach/vmalloc.h
@@ -12,6 +12,6 @@
#ifndef __ASM_ARCH_VMALLOC_H
#define __ASM_ARCH_VMALLOC_H
-#define VMALLOC_END 0xE0000000UL
+#define VMALLOC_END 0xF6000000UL
#endif /* __ASM_ARCH_VMALLOC_H */
diff --git a/arch/arm/mach-s5p64x0/Kconfig b/arch/arm/mach-s5p64x0/Kconfig
index fbcae9352022..164d2783d381 100644
--- a/arch/arm/mach-s5p64x0/Kconfig
+++ b/arch/arm/mach-s5p64x0/Kconfig
@@ -9,14 +9,12 @@ if ARCH_S5P64X0
config CPU_S5P6440
bool
- select PLAT_S5P
select S3C_PL330_DMA
help
Enable S5P6440 CPU support
config CPU_S5P6450
bool
- select PLAT_S5P
select S3C_PL330_DMA
help
Enable S5P6450 CPU support
diff --git a/arch/arm/mach-s5p64x0/clock-s5p6440.c b/arch/arm/mach-s5p64x0/clock-s5p6440.c
index f93dcd8b4d6a..e4883dc1c8d7 100644
--- a/arch/arm/mach-s5p64x0/clock-s5p6440.c
+++ b/arch/arm/mach-s5p64x0/clock-s5p6440.c
@@ -79,13 +79,16 @@ static int s5p6440_epll_set_rate(struct clk *clk, unsigned long rate)
__raw_writel(epll_con, S5P64X0_EPLL_CON);
__raw_writel(epll_con_k, S5P64X0_EPLL_CON_K);
+ printk(KERN_WARNING "EPLL Rate changes from %lu to %lu\n",
+ clk->rate, rate);
+
clk->rate = rate;
return 0;
}
static struct clk_ops s5p6440_epll_ops = {
- .get_rate = s5p64x0_epll_get_rate,
+ .get_rate = s5p_epll_get_rate,
.set_rate = s5p6440_epll_set_rate,
};
@@ -150,6 +153,12 @@ static struct clk init_clocks_disable[] = {
.enable = s5p64x0_hclk0_ctrl,
.ctrlbit = (1 << 8),
}, {
+ .name = "pdma",
+ .id = -1,
+ .parent = &clk_hclk_low.clk,
+ .enable = s5p64x0_hclk0_ctrl,
+ .ctrlbit = (1 << 12),
+ }, {
.name = "hsmmc",
.id = 0,
.parent = &clk_hclk_low.clk,
@@ -331,12 +340,6 @@ static struct clk init_clocks[] = {
.enable = s5p64x0_hclk0_ctrl,
.ctrlbit = (1 << 21),
}, {
- .name = "dma",
- .id = -1,
- .parent = &clk_hclk_low.clk,
- .enable = s5p64x0_hclk0_ctrl,
- .ctrlbit = (1 << 12),
- }, {
.name = "uart",
.id = 0,
.parent = &clk_pclk_low.clk,
@@ -548,7 +551,7 @@ void __init_or_cpufreq s5p6440_setup_clocks(void)
/* Set S5P6440 functions for clk_fout_epll */
- clk_fout_epll.enable = s5p64x0_epll_enable;
+ clk_fout_epll.enable = s5p_epll_enable;
clk_fout_epll.ops = &s5p6440_epll_ops;
clk_48m.enable = s5p64x0_clk48m_ctrl;
diff --git a/arch/arm/mach-s5p64x0/clock-s5p6450.c b/arch/arm/mach-s5p64x0/clock-s5p6450.c
index f9afb05b217c..7dbf3c968f53 100644
--- a/arch/arm/mach-s5p64x0/clock-s5p6450.c
+++ b/arch/arm/mach-s5p64x0/clock-s5p6450.c
@@ -80,13 +80,16 @@ static int s5p6450_epll_set_rate(struct clk *clk, unsigned long rate)
__raw_writel(epll_con, S5P64X0_EPLL_CON);
__raw_writel(epll_con_k, S5P64X0_EPLL_CON_K);
+ printk(KERN_WARNING "EPLL Rate changes from %lu to %lu\n",
+ clk->rate, rate);
+
clk->rate = rate;
return 0;
}
static struct clk_ops s5p6450_epll_ops = {
- .get_rate = s5p64x0_epll_get_rate,
+ .get_rate = s5p_epll_get_rate,
.set_rate = s5p6450_epll_set_rate,
};
@@ -186,6 +189,12 @@ static struct clk init_clocks_disable[] = {
.enable = s5p64x0_hclk0_ctrl,
.ctrlbit = (1 << 3),
}, {
+ .name = "pdma",
+ .id = -1,
+ .parent = &clk_hclk_low.clk,
+ .enable = s5p64x0_hclk0_ctrl,
+ .ctrlbit = (1 << 12),
+ }, {
.name = "hsmmc",
.id = 0,
.parent = &clk_hclk_low.clk,
@@ -283,12 +292,6 @@ static struct clk init_clocks[] = {
.enable = s5p64x0_hclk0_ctrl,
.ctrlbit = (1 << 21),
}, {
- .name = "dma",
- .id = -1,
- .parent = &clk_hclk_low.clk,
- .enable = s5p64x0_hclk0_ctrl,
- .ctrlbit = (1 << 12),
- }, {
.name = "uart",
.id = 0,
.parent = &clk_pclk_low.clk,
@@ -581,7 +584,7 @@ void __init_or_cpufreq s5p6450_setup_clocks(void)
/* Set S5P6450 functions for clk_fout_epll */
- clk_fout_epll.enable = s5p64x0_epll_enable;
+ clk_fout_epll.enable = s5p_epll_enable;
clk_fout_epll.ops = &s5p6450_epll_ops;
clk_48m.enable = s5p64x0_clk48m_ctrl;
diff --git a/arch/arm/mach-s5p64x0/clock.c b/arch/arm/mach-s5p64x0/clock.c
index 523ba8039ac2..b52c6e2f37a6 100644
--- a/arch/arm/mach-s5p64x0/clock.c
+++ b/arch/arm/mach-s5p64x0/clock.c
@@ -73,24 +73,6 @@ static const u32 clock_table[][3] = {
{L2 * 1000, (3 << ARM_DIV_RATIO_SHIFT), (0 << S5P64X0_CLKDIV0_HCLK_SHIFT)},
};
-int s5p64x0_epll_enable(struct clk *clk, int enable)
-{
- unsigned int ctrlbit = clk->ctrlbit;
- unsigned int epll_con = __raw_readl(S5P64X0_EPLL_CON) & ~ctrlbit;
-
- if (enable)
- __raw_writel(epll_con | ctrlbit, S5P64X0_EPLL_CON);
- else
- __raw_writel(epll_con, S5P64X0_EPLL_CON);
-
- return 0;
-}
-
-unsigned long s5p64x0_epll_get_rate(struct clk *clk)
-{
- return clk->rate;
-}
-
unsigned long s5p64x0_armclk_get_rate(struct clk *clk)
{
unsigned long rate = clk_get_rate(clk->parent);
diff --git a/arch/arm/mach-s5p64x0/dev-audio.c b/arch/arm/mach-s5p64x0/dev-audio.c
index fa097bd68ca4..396bacc0a39a 100644
--- a/arch/arm/mach-s5p64x0/dev-audio.c
+++ b/arch/arm/mach-s5p64x0/dev-audio.c
@@ -24,13 +24,8 @@ static int s5p6440_cfg_i2s(struct platform_device *pdev)
/* configure GPIO for i2s port */
switch (pdev->id) {
case -1:
- s3c_gpio_cfgpin(S5P6440_GPR(4), S3C_GPIO_SFN(5));
- s3c_gpio_cfgpin(S5P6440_GPR(5), S3C_GPIO_SFN(5));
- s3c_gpio_cfgpin(S5P6440_GPR(6), S3C_GPIO_SFN(5));
- s3c_gpio_cfgpin(S5P6440_GPR(7), S3C_GPIO_SFN(5));
- s3c_gpio_cfgpin(S5P6440_GPR(8), S3C_GPIO_SFN(5));
- s3c_gpio_cfgpin(S5P6440_GPR(13), S3C_GPIO_SFN(5));
- s3c_gpio_cfgpin(S5P6440_GPR(14), S3C_GPIO_SFN(5));
+ s3c_gpio_cfgpin_range(S5P6440_GPR(4), 5, S3C_GPIO_SFN(5));
+ s3c_gpio_cfgpin_range(S5P6440_GPR(13), 2, S3C_GPIO_SFN(5));
break;
default:
@@ -47,13 +42,9 @@ static int s5p6450_cfg_i2s(struct platform_device *pdev)
switch (pdev->id) {
case -1:
s3c_gpio_cfgpin(S5P6450_GPB(4), S3C_GPIO_SFN(5));
- s3c_gpio_cfgpin(S5P6450_GPR(4), S3C_GPIO_SFN(5));
- s3c_gpio_cfgpin(S5P6450_GPR(5), S3C_GPIO_SFN(5));
- s3c_gpio_cfgpin(S5P6450_GPR(6), S3C_GPIO_SFN(5));
- s3c_gpio_cfgpin(S5P6450_GPR(7), S3C_GPIO_SFN(5));
- s3c_gpio_cfgpin(S5P6450_GPR(8), S3C_GPIO_SFN(5));
- s3c_gpio_cfgpin(S5P6450_GPR(13), S3C_GPIO_SFN(5));
- s3c_gpio_cfgpin(S5P6450_GPR(14), S3C_GPIO_SFN(5));
+ s3c_gpio_cfgpin_range(S5P6450_GPR(4), 5, S3C_GPIO_SFN(5));
+ s3c_gpio_cfgpin_range(S5P6450_GPR(13), 2, S3C_GPIO_SFN(5));
+
break;
default:
@@ -116,11 +107,8 @@ static int s5p6440_pcm_cfg_gpio(struct platform_device *pdev)
{
switch (pdev->id) {
case 0:
- s3c_gpio_cfgpin(S5P6440_GPR(7), S3C_GPIO_SFN(2));
- s3c_gpio_cfgpin(S5P6440_GPR(13), S3C_GPIO_SFN(2));
- s3c_gpio_cfgpin(S5P6440_GPR(14), S3C_GPIO_SFN(2));
- s3c_gpio_cfgpin(S5P6440_GPR(8), S3C_GPIO_SFN(2));
- s3c_gpio_cfgpin(S5P6440_GPR(6), S3C_GPIO_SFN(2));
+ s3c_gpio_cfgpin_range(S5P6440_GPR(6), 3, S3C_GPIO_SFN(2));
+ s3c_gpio_cfgpin_range(S5P6440_GPR(13), 2, S3C_GPIO_SFN(2));
break;
default:
diff --git a/arch/arm/mach-s5p64x0/dev-spi.c b/arch/arm/mach-s5p64x0/dev-spi.c
index 5b69ec4c8af3..e78ee18c76e3 100644
--- a/arch/arm/mach-s5p64x0/dev-spi.c
+++ b/arch/arm/mach-s5p64x0/dev-spi.c
@@ -39,23 +39,15 @@ static char *s5p64x0_spi_src_clks[] = {
*/
static int s5p6440_spi_cfg_gpio(struct platform_device *pdev)
{
+ unsigned int base;
+
switch (pdev->id) {
case 0:
- s3c_gpio_cfgpin(S5P6440_GPC(0), S3C_GPIO_SFN(2));
- s3c_gpio_cfgpin(S5P6440_GPC(1), S3C_GPIO_SFN(2));
- s3c_gpio_cfgpin(S5P6440_GPC(2), S3C_GPIO_SFN(2));
- s3c_gpio_setpull(S5P6440_GPC(0), S3C_GPIO_PULL_UP);
- s3c_gpio_setpull(S5P6440_GPC(1), S3C_GPIO_PULL_UP);
- s3c_gpio_setpull(S5P6440_GPC(2), S3C_GPIO_PULL_UP);
+ base = S5P6440_GPC(0);
break;
case 1:
- s3c_gpio_cfgpin(S5P6440_GPC(4), S3C_GPIO_SFN(2));
- s3c_gpio_cfgpin(S5P6440_GPC(5), S3C_GPIO_SFN(2));
- s3c_gpio_cfgpin(S5P6440_GPC(6), S3C_GPIO_SFN(2));
- s3c_gpio_setpull(S5P6440_GPC(4), S3C_GPIO_PULL_UP);
- s3c_gpio_setpull(S5P6440_GPC(5), S3C_GPIO_PULL_UP);
- s3c_gpio_setpull(S5P6440_GPC(6), S3C_GPIO_PULL_UP);
+ base = S5P6440_GPC(4);
break;
default:
@@ -63,28 +55,23 @@ static int s5p6440_spi_cfg_gpio(struct platform_device *pdev)
return -EINVAL;
}
+ s3c_gpio_cfgall_range(base, 3,
+ S3C_GPIO_SFN(2), S3C_GPIO_PULL_UP);
+
return 0;
}
static int s5p6450_spi_cfg_gpio(struct platform_device *pdev)
{
+ unsigned int base;
+
switch (pdev->id) {
case 0:
- s3c_gpio_cfgpin(S5P6450_GPC(0), S3C_GPIO_SFN(2));
- s3c_gpio_cfgpin(S5P6450_GPC(1), S3C_GPIO_SFN(2));
- s3c_gpio_cfgpin(S5P6450_GPC(2), S3C_GPIO_SFN(2));
- s3c_gpio_setpull(S5P6450_GPC(0), S3C_GPIO_PULL_UP);
- s3c_gpio_setpull(S5P6450_GPC(1), S3C_GPIO_PULL_UP);
- s3c_gpio_setpull(S5P6450_GPC(2), S3C_GPIO_PULL_UP);
+ base = S5P6450_GPC(0);
break;
case 1:
- s3c_gpio_cfgpin(S5P6450_GPC(4), S3C_GPIO_SFN(2));
- s3c_gpio_cfgpin(S5P6450_GPC(5), S3C_GPIO_SFN(2));
- s3c_gpio_cfgpin(S5P6450_GPC(6), S3C_GPIO_SFN(2));
- s3c_gpio_setpull(S5P6450_GPC(4), S3C_GPIO_PULL_UP);
- s3c_gpio_setpull(S5P6450_GPC(5), S3C_GPIO_PULL_UP);
- s3c_gpio_setpull(S5P6450_GPC(6), S3C_GPIO_PULL_UP);
+ base = S5P6450_GPC(4);
break;
default:
@@ -92,6 +79,9 @@ static int s5p6450_spi_cfg_gpio(struct platform_device *pdev)
return -EINVAL;
}
+ s3c_gpio_cfgall_range(base, 3,
+ S3C_GPIO_SFN(2), S3C_GPIO_PULL_UP);
+
return 0;
}
diff --git a/arch/arm/mach-s5p64x0/dma.c b/arch/arm/mach-s5p64x0/dma.c
index 29a8c2410049..d7ad944b3475 100644
--- a/arch/arm/mach-s5p64x0/dma.c
+++ b/arch/arm/mach-s5p64x0/dma.c
@@ -122,7 +122,7 @@ static struct s3c_pl330_platdata s5p6450_pdma_pdata = {
static struct platform_device s5p64x0_device_pdma = {
.name = "s3c-pl330",
- .id = 0,
+ .id = -1,
.num_resources = ARRAY_SIZE(s5p64x0_pdma_resource),
.resource = s5p64x0_pdma_resource,
.dev = {
diff --git a/arch/arm/mach-s5p64x0/include/mach/regs-clock.h b/arch/arm/mach-s5p64x0/include/mach/regs-clock.h
index 58e1bc813804..a133f22fa155 100644
--- a/arch/arm/mach-s5p64x0/include/mach/regs-clock.h
+++ b/arch/arm/mach-s5p64x0/include/mach/regs-clock.h
@@ -60,4 +60,6 @@
#define ARM_DIV_RATIO_SHIFT 0
#define ARM_DIV_MASK (0xF << ARM_DIV_RATIO_SHIFT)
+#define S5P_EPLL_CON S5P64X0_EPLL_CON
+
#endif /* __ASM_ARCH_REGS_CLOCK_H */
diff --git a/arch/arm/mach-s5p64x0/include/mach/vmalloc.h b/arch/arm/mach-s5p64x0/include/mach/vmalloc.h
index 97a9df38f1cf..38dcc71a03cc 100644
--- a/arch/arm/mach-s5p64x0/include/mach/vmalloc.h
+++ b/arch/arm/mach-s5p64x0/include/mach/vmalloc.h
@@ -15,6 +15,6 @@
#ifndef __ASM_ARCH_VMALLOC_H
#define __ASM_ARCH_VMALLOC_H
-#define VMALLOC_END 0xE0000000UL
+#define VMALLOC_END 0xF6000000UL
#endif /* __ASM_ARCH_VMALLOC_H */
diff --git a/arch/arm/mach-s5p64x0/setup-i2c0.c b/arch/arm/mach-s5p64x0/setup-i2c0.c
index dc4cc65a5019..46b463917c54 100644
--- a/arch/arm/mach-s5p64x0/setup-i2c0.c
+++ b/arch/arm/mach-s5p64x0/setup-i2c0.c
@@ -25,18 +25,14 @@ struct platform_device; /* don't need the contents */
void s5p6440_i2c0_cfg_gpio(struct platform_device *dev)
{
- s3c_gpio_cfgpin(S5P6440_GPB(5), S3C_GPIO_SFN(2));
- s3c_gpio_setpull(S5P6440_GPB(5), S3C_GPIO_PULL_UP);
- s3c_gpio_cfgpin(S5P6440_GPB(6), S3C_GPIO_SFN(2));
- s3c_gpio_setpull(S5P6440_GPB(6), S3C_GPIO_PULL_UP);
+ s3c_gpio_cfgall_range(S5P6440_GPB(5), 2,
+ S3C_GPIO_SFN(2), S3C_GPIO_PULL_UP);
}
void s5p6450_i2c0_cfg_gpio(struct platform_device *dev)
{
- s3c_gpio_cfgpin(S5P6450_GPB(5), S3C_GPIO_SFN(2));
- s3c_gpio_setpull(S5P6450_GPB(5), S3C_GPIO_PULL_UP);
- s3c_gpio_cfgpin(S5P6450_GPB(6), S3C_GPIO_SFN(2));
- s3c_gpio_setpull(S5P6450_GPB(6), S3C_GPIO_PULL_UP);
+ s3c_gpio_cfgall_range(S5P6450_GPB(5), 2,
+ S3C_GPIO_SFN(2), S3C_GPIO_PULL_UP);
}
void s3c_i2c0_cfg_gpio(struct platform_device *dev) { }
diff --git a/arch/arm/mach-s5p64x0/setup-i2c1.c b/arch/arm/mach-s5p64x0/setup-i2c1.c
index 2edd7912f8e4..6ad3b986021c 100644
--- a/arch/arm/mach-s5p64x0/setup-i2c1.c
+++ b/arch/arm/mach-s5p64x0/setup-i2c1.c
@@ -25,18 +25,14 @@ struct platform_device; /* don't need the contents */
void s5p6440_i2c1_cfg_gpio(struct platform_device *dev)
{
- s3c_gpio_cfgpin(S5P6440_GPR(9), S3C_GPIO_SFN(6));
- s3c_gpio_setpull(S5P6440_GPR(9), S3C_GPIO_PULL_UP);
- s3c_gpio_cfgpin(S5P6440_GPR(10), S3C_GPIO_SFN(6));
- s3c_gpio_setpull(S5P6440_GPR(10), S3C_GPIO_PULL_UP);
+ s3c_gpio_cfgall_range(S5P6440_GPR(9), 2,
+ S3C_GPIO_SFN(6), S3C_GPIO_PULL_UP);
}
void s5p6450_i2c1_cfg_gpio(struct platform_device *dev)
{
- s3c_gpio_cfgpin(S5P6450_GPR(9), S3C_GPIO_SFN(6));
- s3c_gpio_setpull(S5P6450_GPR(9), S3C_GPIO_PULL_UP);
- s3c_gpio_cfgpin(S5P6450_GPR(10), S3C_GPIO_SFN(6));
- s3c_gpio_setpull(S5P6450_GPR(10), S3C_GPIO_PULL_UP);
+ s3c_gpio_cfgall_range(S5P6450_GPR(9), 2,
+ S3C_GPIO_SFN(6), S3C_GPIO_PULL_UP);
}
void s3c_i2c1_cfg_gpio(struct platform_device *dev) { }
diff --git a/arch/arm/mach-s5pc100/Kconfig b/arch/arm/mach-s5pc100/Kconfig
index 77ae4bfb74ba..b8fbf2fcba6f 100644
--- a/arch/arm/mach-s5pc100/Kconfig
+++ b/arch/arm/mach-s5pc100/Kconfig
@@ -9,7 +9,6 @@ if ARCH_S5PC100
config CPU_S5PC100
bool
- select PLAT_S5P
select S5P_EXT_INT
select S3C_PL330_DMA
help
diff --git a/arch/arm/mach-s5pc100/Makefile b/arch/arm/mach-s5pc100/Makefile
index a021ed1fb4b6..eecab57d2e5d 100644
--- a/arch/arm/mach-s5pc100/Makefile
+++ b/arch/arm/mach-s5pc100/Makefile
@@ -11,7 +11,7 @@ obj- :=
# Core support for S5PC100 system
-obj-$(CONFIG_CPU_S5PC100) += cpu.o init.o clock.o gpiolib.o irq-gpio.o
+obj-$(CONFIG_CPU_S5PC100) += cpu.o init.o clock.o gpiolib.o
obj-$(CONFIG_CPU_S5PC100) += setup-i2c0.o
obj-$(CONFIG_CPU_S5PC100) += dma.o
diff --git a/arch/arm/mach-s5pc100/clock.c b/arch/arm/mach-s5pc100/clock.c
index 084abd13b0a5..2d4a761a5163 100644
--- a/arch/arm/mach-s5pc100/clock.c
+++ b/arch/arm/mach-s5pc100/clock.c
@@ -273,24 +273,6 @@ static struct clksrc_clk clk_div_hdmi = {
.reg_div = { .reg = S5P_CLK_DIV3, .shift = 28, .size = 4 },
};
-static int s5pc100_epll_enable(struct clk *clk, int enable)
-{
- unsigned int ctrlbit = clk->ctrlbit;
- unsigned int epll_con = __raw_readl(S5P_EPLL_CON) & ~ctrlbit;
-
- if (enable)
- __raw_writel(epll_con | ctrlbit, S5P_EPLL_CON);
- else
- __raw_writel(epll_con, S5P_EPLL_CON);
-
- return 0;
-}
-
-static unsigned long s5pc100_epll_get_rate(struct clk *clk)
-{
- return clk->rate;
-}
-
static u32 epll_div[][4] = {
{ 32750000, 131, 3, 4 },
{ 32768000, 131, 3, 4 },
@@ -341,13 +323,16 @@ static int s5pc100_epll_set_rate(struct clk *clk, unsigned long rate)
__raw_writel(epll_con, S5P_EPLL_CON);
+ printk(KERN_WARNING "EPLL Rate changes from %lu to %lu\n",
+ clk->rate, rate);
+
clk->rate = rate;
return 0;
}
static struct clk_ops s5pc100_epll_ops = {
- .get_rate = s5pc100_epll_get_rate,
+ .get_rate = s5p_epll_get_rate,
.set_rate = s5pc100_epll_set_rate,
};
@@ -691,55 +676,55 @@ static struct clk init_clocks_disable[] = {
}, {
.name = "iis",
.id = 0,
- .parent = &clk_div_d1_bus.clk,
+ .parent = &clk_div_pclkd1.clk,
.enable = s5pc100_d1_5_ctrl,
.ctrlbit = (1 << 0),
}, {
.name = "iis",
.id = 1,
- .parent = &clk_div_d1_bus.clk,
+ .parent = &clk_div_pclkd1.clk,
.enable = s5pc100_d1_5_ctrl,
.ctrlbit = (1 << 1),
}, {
.name = "iis",
.id = 2,
- .parent = &clk_div_d1_bus.clk,
+ .parent = &clk_div_pclkd1.clk,
.enable = s5pc100_d1_5_ctrl,
.ctrlbit = (1 << 2),
}, {
.name = "ac97",
.id = -1,
- .parent = &clk_div_d1_bus.clk,
+ .parent = &clk_div_pclkd1.clk,
.enable = s5pc100_d1_5_ctrl,
.ctrlbit = (1 << 3),
}, {
.name = "pcm",
.id = 0,
- .parent = &clk_div_d1_bus.clk,
+ .parent = &clk_div_pclkd1.clk,
.enable = s5pc100_d1_5_ctrl,
.ctrlbit = (1 << 4),
}, {
.name = "pcm",
.id = 1,
- .parent = &clk_div_d1_bus.clk,
+ .parent = &clk_div_pclkd1.clk,
.enable = s5pc100_d1_5_ctrl,
.ctrlbit = (1 << 5),
}, {
.name = "spdif",
.id = -1,
- .parent = &clk_div_d1_bus.clk,
+ .parent = &clk_div_pclkd1.clk,
.enable = s5pc100_d1_5_ctrl,
.ctrlbit = (1 << 6),
}, {
.name = "adc",
.id = -1,
- .parent = &clk_div_d1_bus.clk,
+ .parent = &clk_div_pclkd1.clk,
.enable = s5pc100_d1_5_ctrl,
.ctrlbit = (1 << 7),
}, {
.name = "keypad",
.id = -1,
- .parent = &clk_div_d1_bus.clk,
+ .parent = &clk_div_pclkd1.clk,
.enable = s5pc100_d1_5_ctrl,
.ctrlbit = (1 << 8),
}, {
@@ -848,6 +833,18 @@ struct clksrc_sources clk_src_group3 = {
.nr_sources = ARRAY_SIZE(clk_src_group3_list),
};
+static struct clksrc_clk clk_sclk_audio0 = {
+ .clk = {
+ .name = "sclk_audio",
+ .id = 0,
+ .ctrlbit = (1 << 8),
+ .enable = s5pc100_sclk1_ctrl,
+ },
+ .sources = &clk_src_group3,
+ .reg_src = { .reg = S5P_CLK_SRC3, .shift = 12, .size = 3 },
+ .reg_div = { .reg = S5P_CLK_DIV4, .shift = 12, .size = 4 },
+};
+
static struct clk *clk_src_group4_list[] = {
[0] = &clk_mout_epll.clk,
[1] = &clk_div_mpll.clk,
@@ -862,6 +859,18 @@ struct clksrc_sources clk_src_group4 = {
.nr_sources = ARRAY_SIZE(clk_src_group4_list),
};
+static struct clksrc_clk clk_sclk_audio1 = {
+ .clk = {
+ .name = "sclk_audio",
+ .id = 1,
+ .ctrlbit = (1 << 9),
+ .enable = s5pc100_sclk1_ctrl,
+ },
+ .sources = &clk_src_group4,
+ .reg_src = { .reg = S5P_CLK_SRC3, .shift = 16, .size = 3 },
+ .reg_div = { .reg = S5P_CLK_DIV4, .shift = 16, .size = 4 },
+};
+
static struct clk *clk_src_group5_list[] = {
[0] = &clk_mout_epll.clk,
[1] = &clk_div_mpll.clk,
@@ -875,6 +884,18 @@ struct clksrc_sources clk_src_group5 = {
.nr_sources = ARRAY_SIZE(clk_src_group5_list),
};
+static struct clksrc_clk clk_sclk_audio2 = {
+ .clk = {
+ .name = "sclk_audio",
+ .id = 2,
+ .ctrlbit = (1 << 10),
+ .enable = s5pc100_sclk1_ctrl,
+ },
+ .sources = &clk_src_group5,
+ .reg_src = { .reg = S5P_CLK_SRC3, .shift = 20, .size = 3 },
+ .reg_div = { .reg = S5P_CLK_DIV4, .shift = 20, .size = 4 },
+};
+
static struct clk *clk_src_group6_list[] = {
[0] = &s5p_clk_27m,
[1] = &clk_vclk54m,
@@ -944,6 +965,64 @@ struct clksrc_sources clk_src_pwi = {
.nr_sources = ARRAY_SIZE(clk_src_pwi_list),
};
+static struct clk *clk_sclk_spdif_list[] = {
+ [0] = &clk_sclk_audio0.clk,
+ [1] = &clk_sclk_audio1.clk,
+ [2] = &clk_sclk_audio2.clk,
+};
+
+struct clksrc_sources clk_src_sclk_spdif = {
+ .sources = clk_sclk_spdif_list,
+ .nr_sources = ARRAY_SIZE(clk_sclk_spdif_list),
+};
+
+static int s5pc100_spdif_set_rate(struct clk *clk, unsigned long rate)
+{
+ struct clk *pclk;
+ int ret;
+
+ pclk = clk_get_parent(clk);
+ if (IS_ERR(pclk))
+ return -EINVAL;
+
+ ret = pclk->ops->set_rate(pclk, rate);
+ clk_put(pclk);
+
+ return ret;
+}
+
+static unsigned long s5pc100_spdif_get_rate(struct clk *clk)
+{
+ struct clk *pclk;
+ int rate;
+
+ pclk = clk_get_parent(clk);
+ if (IS_ERR(pclk))
+ return -EINVAL;
+
+ rate = pclk->ops->get_rate(clk);
+ clk_put(pclk);
+
+ return rate;
+}
+
+static struct clk_ops s5pc100_sclk_spdif_ops = {
+ .set_rate = s5pc100_spdif_set_rate,
+ .get_rate = s5pc100_spdif_get_rate,
+};
+
+static struct clksrc_clk clk_sclk_spdif = {
+ .clk = {
+ .name = "sclk_spdif",
+ .id = -1,
+ .ctrlbit = (1 << 11),
+ .enable = s5pc100_sclk1_ctrl,
+ .ops = &s5pc100_sclk_spdif_ops,
+ },
+ .sources = &clk_src_sclk_spdif,
+ .reg_src = { .reg = S5P_CLK_SRC3, .shift = 24, .size = 2 },
+};
+
static struct clksrc_clk clksrcs[] = {
{
.clk = {
@@ -1001,39 +1080,6 @@ static struct clksrc_clk clksrcs[] = {
.reg_src = { .reg = S5P_CLK_SRC2, .shift = 28, .size = 2 },
}, {
.clk = {
- .name = "sclk_audio",
- .id = 0,
- .ctrlbit = (1 << 8),
- .enable = s5pc100_sclk1_ctrl,
-
- },
- .sources = &clk_src_group3,
- .reg_src = { .reg = S5P_CLK_SRC3, .shift = 12, .size = 3 },
- .reg_div = { .reg = S5P_CLK_DIV4, .shift = 12, .size = 4 },
- }, {
- .clk = {
- .name = "sclk_audio",
- .id = 1,
- .ctrlbit = (1 << 9),
- .enable = s5pc100_sclk1_ctrl,
-
- },
- .sources = &clk_src_group4,
- .reg_src = { .reg = S5P_CLK_SRC3, .shift = 16, .size = 3 },
- .reg_div = { .reg = S5P_CLK_DIV4, .shift = 16, .size = 4 },
- }, {
- .clk = {
- .name = "sclk_audio",
- .id = 2,
- .ctrlbit = (1 << 10),
- .enable = s5pc100_sclk1_ctrl,
-
- },
- .sources = &clk_src_group5,
- .reg_src = { .reg = S5P_CLK_SRC3, .shift = 20, .size = 3 },
- .reg_div = { .reg = S5P_CLK_DIV4, .shift = 20, .size = 4 },
- }, {
- .clk = {
.name = "sclk_lcd",
.id = -1,
.ctrlbit = (1 << 0),
@@ -1179,6 +1225,10 @@ static struct clksrc_clk *sysclks[] = {
&clk_div_pclkd1,
&clk_div_cam,
&clk_div_hdmi,
+ &clk_sclk_audio0,
+ &clk_sclk_audio1,
+ &clk_sclk_audio2,
+ &clk_sclk_spdif,
};
void __init_or_cpufreq s5pc100_setup_clocks(void)
@@ -1196,7 +1246,7 @@ void __init_or_cpufreq s5pc100_setup_clocks(void)
unsigned int ptr;
/* Set S5PC100 functions for clk_fout_epll */
- clk_fout_epll.enable = s5pc100_epll_enable;
+ clk_fout_epll.enable = s5p_epll_enable;
clk_fout_epll.ops = &s5pc100_epll_ops;
printk(KERN_DEBUG "%s: registering clocks\n", __func__);
diff --git a/arch/arm/mach-s5pc100/dev-audio.c b/arch/arm/mach-s5pc100/dev-audio.c
index a699ed6acc23..564e195ec493 100644
--- a/arch/arm/mach-s5pc100/dev-audio.c
+++ b/arch/arm/mach-s5pc100/dev-audio.c
@@ -24,19 +24,11 @@ static int s5pc100_cfg_i2s(struct platform_device *pdev)
/* configure GPIO for i2s port */
switch (pdev->id) {
case 1:
- s3c_gpio_cfgpin(S5PC100_GPC(0), S3C_GPIO_SFN(2));
- s3c_gpio_cfgpin(S5PC100_GPC(1), S3C_GPIO_SFN(2));
- s3c_gpio_cfgpin(S5PC100_GPC(2), S3C_GPIO_SFN(2));
- s3c_gpio_cfgpin(S5PC100_GPC(3), S3C_GPIO_SFN(2));
- s3c_gpio_cfgpin(S5PC100_GPC(4), S3C_GPIO_SFN(2));
+ s3c_gpio_cfgpin_range(S5PC100_GPC(0), 5, S3C_GPIO_SFN(2));
break;
case 2:
- s3c_gpio_cfgpin(S5PC100_GPG3(0), S3C_GPIO_SFN(4));
- s3c_gpio_cfgpin(S5PC100_GPG3(1), S3C_GPIO_SFN(4));
- s3c_gpio_cfgpin(S5PC100_GPG3(2), S3C_GPIO_SFN(4));
- s3c_gpio_cfgpin(S5PC100_GPG3(3), S3C_GPIO_SFN(4));
- s3c_gpio_cfgpin(S5PC100_GPG3(4), S3C_GPIO_SFN(4));
+ s3c_gpio_cfgpin_range(S5PC100_GPG3(0), 5, S3C_GPIO_SFN(4));
break;
case -1: /* Dedicated pins */
@@ -144,19 +136,11 @@ static int s5pc100_pcm_cfg_gpio(struct platform_device *pdev)
{
switch (pdev->id) {
case 0:
- s3c_gpio_cfgpin(S5PC100_GPG3(0), S3C_GPIO_SFN(5));
- s3c_gpio_cfgpin(S5PC100_GPG3(1), S3C_GPIO_SFN(5));
- s3c_gpio_cfgpin(S5PC100_GPG3(2), S3C_GPIO_SFN(5));
- s3c_gpio_cfgpin(S5PC100_GPG3(3), S3C_GPIO_SFN(5));
- s3c_gpio_cfgpin(S5PC100_GPG3(4), S3C_GPIO_SFN(5));
+ s3c_gpio_cfgpin_range(S5PC100_GPG3(0), 5, S3C_GPIO_SFN(5));
break;
case 1:
- s3c_gpio_cfgpin(S5PC100_GPC(0), S3C_GPIO_SFN(3));
- s3c_gpio_cfgpin(S5PC100_GPC(1), S3C_GPIO_SFN(3));
- s3c_gpio_cfgpin(S5PC100_GPC(2), S3C_GPIO_SFN(3));
- s3c_gpio_cfgpin(S5PC100_GPC(3), S3C_GPIO_SFN(3));
- s3c_gpio_cfgpin(S5PC100_GPC(4), S3C_GPIO_SFN(3));
+ s3c_gpio_cfgpin_range(S5PC100_GPC(0), 5, S3C_GPIO_SFN(3));
break;
default:
@@ -231,13 +215,7 @@ struct platform_device s5pc100_device_pcm1 = {
static int s5pc100_ac97_cfg_gpio(struct platform_device *pdev)
{
- s3c_gpio_cfgpin(S5PC100_GPC(0), S3C_GPIO_SFN(4));
- s3c_gpio_cfgpin(S5PC100_GPC(1), S3C_GPIO_SFN(4));
- s3c_gpio_cfgpin(S5PC100_GPC(2), S3C_GPIO_SFN(4));
- s3c_gpio_cfgpin(S5PC100_GPC(3), S3C_GPIO_SFN(4));
- s3c_gpio_cfgpin(S5PC100_GPC(4), S3C_GPIO_SFN(4));
-
- return 0;
+ return s3c_gpio_cfgpin_range(S5PC100_GPC(0), 5, S3C_GPIO_SFN(4));
}
static struct resource s5pc100_ac97_resource[] = {
@@ -285,3 +263,57 @@ struct platform_device s5pc100_device_ac97 = {
.coherent_dma_mask = DMA_BIT_MASK(32),
},
};
+
+/* S/PDIF Controller platform_device */
+static int s5pc100_spdif_cfg_gpd(struct platform_device *pdev)
+{
+ s3c_gpio_cfgpin_range(S5PC100_GPD(5), 2, S3C_GPIO_SFN(3));
+
+ return 0;
+}
+
+static int s5pc100_spdif_cfg_gpg3(struct platform_device *pdev)
+{
+ s3c_gpio_cfgpin_range(S5PC100_GPG3(5), 2, S3C_GPIO_SFN(3));
+
+ return 0;
+}
+
+static struct resource s5pc100_spdif_resource[] = {
+ [0] = {
+ .start = S5PC100_PA_SPDIF,
+ .end = S5PC100_PA_SPDIF + 0x100 - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = DMACH_SPDIF,
+ .end = DMACH_SPDIF,
+ .flags = IORESOURCE_DMA,
+ },
+};
+
+static struct s3c_audio_pdata s5p_spdif_pdata = {
+ .cfg_gpio = s5pc100_spdif_cfg_gpd,
+};
+
+static u64 s5pc100_spdif_dmamask = DMA_BIT_MASK(32);
+
+struct platform_device s5pc100_device_spdif = {
+ .name = "samsung-spdif",
+ .id = -1,
+ .num_resources = ARRAY_SIZE(s5pc100_spdif_resource),
+ .resource = s5pc100_spdif_resource,
+ .dev = {
+ .platform_data = &s5p_spdif_pdata,
+ .dma_mask = &s5pc100_spdif_dmamask,
+ .coherent_dma_mask = DMA_BIT_MASK(32),
+ },
+};
+
+void __init s5pc100_spdif_setup_gpio(int gpio)
+{
+ if (gpio == S5PC100_SPDIF_GPD)
+ s5p_spdif_pdata.cfg_gpio = s5pc100_spdif_cfg_gpd;
+ else
+ s5p_spdif_pdata.cfg_gpio = s5pc100_spdif_cfg_gpg3;
+}
diff --git a/arch/arm/mach-s5pc100/dev-spi.c b/arch/arm/mach-s5pc100/dev-spi.c
index a0ef7c302c16..57b19794d9bb 100644
--- a/arch/arm/mach-s5pc100/dev-spi.c
+++ b/arch/arm/mach-s5pc100/dev-spi.c
@@ -38,30 +38,20 @@ static int s5pc100_spi_cfg_gpio(struct platform_device *pdev)
{
switch (pdev->id) {
case 0:
- s3c_gpio_cfgpin(S5PC100_GPB(0), S3C_GPIO_SFN(2));
- s3c_gpio_cfgpin(S5PC100_GPB(1), S3C_GPIO_SFN(2));
- s3c_gpio_cfgpin(S5PC100_GPB(2), S3C_GPIO_SFN(2));
- s3c_gpio_setpull(S5PC100_GPB(0), S3C_GPIO_PULL_UP);
- s3c_gpio_setpull(S5PC100_GPB(1), S3C_GPIO_PULL_UP);
- s3c_gpio_setpull(S5PC100_GPB(2), S3C_GPIO_PULL_UP);
+ s3c_gpio_cfgall_range(S5PC100_GPB(0), 3,
+ S3C_GPIO_SFN(2), S3C_GPIO_PULL_UP);
break;
case 1:
- s3c_gpio_cfgpin(S5PC100_GPB(4), S3C_GPIO_SFN(2));
- s3c_gpio_cfgpin(S5PC100_GPB(5), S3C_GPIO_SFN(2));
- s3c_gpio_cfgpin(S5PC100_GPB(6), S3C_GPIO_SFN(2));
- s3c_gpio_setpull(S5PC100_GPB(4), S3C_GPIO_PULL_UP);
- s3c_gpio_setpull(S5PC100_GPB(5), S3C_GPIO_PULL_UP);
- s3c_gpio_setpull(S5PC100_GPB(6), S3C_GPIO_PULL_UP);
+ s3c_gpio_cfgall_range(S5PC100_GPB(4), 3,
+ S3C_GPIO_SFN(2), S3C_GPIO_PULL_UP);
break;
case 2:
s3c_gpio_cfgpin(S5PC100_GPG3(0), S3C_GPIO_SFN(3));
- s3c_gpio_cfgpin(S5PC100_GPG3(2), S3C_GPIO_SFN(3));
- s3c_gpio_cfgpin(S5PC100_GPG3(3), S3C_GPIO_SFN(3));
s3c_gpio_setpull(S5PC100_GPG3(0), S3C_GPIO_PULL_UP);
- s3c_gpio_setpull(S5PC100_GPG3(2), S3C_GPIO_PULL_UP);
- s3c_gpio_setpull(S5PC100_GPG3(3), S3C_GPIO_PULL_UP);
+ s3c_gpio_cfgall_range(S5PC100_GPB(2), 2,
+ S3C_GPIO_SFN(3), S3C_GPIO_PULL_UP);
break;
default:
diff --git a/arch/arm/mach-s5pc100/dma.c b/arch/arm/mach-s5pc100/dma.c
index 0f5517571e2c..bf4cd0fb97c6 100644
--- a/arch/arm/mach-s5pc100/dma.c
+++ b/arch/arm/mach-s5pc100/dma.c
@@ -81,7 +81,7 @@ static struct s3c_pl330_platdata s5pc100_pdma0_pdata = {
static struct platform_device s5pc100_device_pdma0 = {
.name = "s3c-pl330",
- .id = 1,
+ .id = 0,
.num_resources = ARRAY_SIZE(s5pc100_pdma0_resource),
.resource = s5pc100_pdma0_resource,
.dev = {
@@ -143,7 +143,7 @@ static struct s3c_pl330_platdata s5pc100_pdma1_pdata = {
static struct platform_device s5pc100_device_pdma1 = {
.name = "s3c-pl330",
- .id = 2,
+ .id = 1,
.num_resources = ARRAY_SIZE(s5pc100_pdma1_resource),
.resource = s5pc100_pdma1_resource,
.dev = {
diff --git a/arch/arm/mach-s5pc100/gpiolib.c b/arch/arm/mach-s5pc100/gpiolib.c
index 0fab7f2cd8bf..20856eb7dd51 100644
--- a/arch/arm/mach-s5pc100/gpiolib.c
+++ b/arch/arm/mach-s5pc100/gpiolib.c
@@ -1,5 +1,7 @@
-/*
- * arch/arm/plat-s5pc100/gpiolib.c
+/* linux/arch/arm/mach-s5pc100/gpiolib.c
+ *
+ * Copyright (c) 2010 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com
*
* Copyright 2009 Samsung Electronics Co
* Kyungmin Park <kyungmin.park@samsung.com>
@@ -61,30 +63,6 @@
* L3 8 4Bit None
*/
-static int s5pc100_gpiolib_to_irq(struct gpio_chip *chip, unsigned int offset)
-{
- return S3C_IRQ_GPIO(chip->base + offset);
-}
-
-static int s5pc100_gpiolib_to_eint(struct gpio_chip *chip, unsigned int offset)
-{
- int base;
-
- base = chip->base - S5PC100_GPH0(0);
- if (base == 0)
- return IRQ_EINT(offset);
- base = chip->base - S5PC100_GPH1(0);
- if (base == 0)
- return IRQ_EINT(8 + offset);
- base = chip->base - S5PC100_GPH2(0);
- if (base == 0)
- return IRQ_EINT(16 + offset);
- base = chip->base - S5PC100_GPH3(0);
- if (base == 0)
- return IRQ_EINT(24 + offset);
- return -EINVAL;
-}
-
static struct s3c_gpio_cfg gpio_cfg = {
.set_config = s3c_gpio_setcfg_s3c64xx_4bit,
.set_pull = s3c_gpio_setpull_updown,
@@ -104,209 +82,150 @@ static struct s3c_gpio_cfg gpio_cfg_noint = {
.get_pull = s3c_gpio_getpull_updown,
};
+/*
+ * GPIO bank's base address given the index of the bank in the
+ * list of all gpio banks.
+ */
+#define S5PC100_BANK_BASE(bank_nr) (S5P_VA_GPIO + ((bank_nr) * 0x20))
+
+/*
+ * Following are the gpio banks in S5PC100.
+ *
+ * The 'config' member when left to NULL, is initialized to the default
+ * structure gpio_cfg in the init function below.
+ *
+ * The 'base' member is also initialized in the init function below.
+ * Note: The initialization of 'base' member of s3c_gpio_chip structure
+ * uses the above macro and depends on the banks being listed in order here.
+ */
static struct s3c_gpio_chip s5pc100_gpio_chips[] = {
{
- .base = S5PC100_GPA0_BASE,
- .config = &gpio_cfg,
.chip = {
.base = S5PC100_GPA0(0),
.ngpio = S5PC100_GPIO_A0_NR,
.label = "GPA0",
},
}, {
- .base = S5PC100_GPA1_BASE,
- .config = &gpio_cfg,
.chip = {
.base = S5PC100_GPA1(0),
.ngpio = S5PC100_GPIO_A1_NR,
.label = "GPA1",
},
}, {
- .base = S5PC100_GPB_BASE,
- .config = &gpio_cfg,
.chip = {
.base = S5PC100_GPB(0),
.ngpio = S5PC100_GPIO_B_NR,
.label = "GPB",
},
}, {
- .base = S5PC100_GPC_BASE,
- .config = &gpio_cfg,
.chip = {
.base = S5PC100_GPC(0),
.ngpio = S5PC100_GPIO_C_NR,
.label = "GPC",
},
}, {
- .base = S5PC100_GPD_BASE,
- .config = &gpio_cfg,
.chip = {
.base = S5PC100_GPD(0),
.ngpio = S5PC100_GPIO_D_NR,
.label = "GPD",
},
}, {
- .base = S5PC100_GPE0_BASE,
- .config = &gpio_cfg,
.chip = {
.base = S5PC100_GPE0(0),
.ngpio = S5PC100_GPIO_E0_NR,
.label = "GPE0",
},
}, {
- .base = S5PC100_GPE1_BASE,
- .config = &gpio_cfg,
.chip = {
.base = S5PC100_GPE1(0),
.ngpio = S5PC100_GPIO_E1_NR,
.label = "GPE1",
},
}, {
- .base = S5PC100_GPF0_BASE,
- .config = &gpio_cfg,
.chip = {
.base = S5PC100_GPF0(0),
.ngpio = S5PC100_GPIO_F0_NR,
.label = "GPF0",
},
}, {
- .base = S5PC100_GPF1_BASE,
- .config = &gpio_cfg,
.chip = {
.base = S5PC100_GPF1(0),
.ngpio = S5PC100_GPIO_F1_NR,
.label = "GPF1",
},
}, {
- .base = S5PC100_GPF2_BASE,
- .config = &gpio_cfg,
.chip = {
.base = S5PC100_GPF2(0),
.ngpio = S5PC100_GPIO_F2_NR,
.label = "GPF2",
},
}, {
- .base = S5PC100_GPF3_BASE,
- .config = &gpio_cfg,
.chip = {
.base = S5PC100_GPF3(0),
.ngpio = S5PC100_GPIO_F3_NR,
.label = "GPF3",
},
}, {
- .base = S5PC100_GPG0_BASE,
- .config = &gpio_cfg,
.chip = {
.base = S5PC100_GPG0(0),
.ngpio = S5PC100_GPIO_G0_NR,
.label = "GPG0",
},
}, {
- .base = S5PC100_GPG1_BASE,
- .config = &gpio_cfg,
.chip = {
.base = S5PC100_GPG1(0),
.ngpio = S5PC100_GPIO_G1_NR,
.label = "GPG1",
},
}, {
- .base = S5PC100_GPG2_BASE,
- .config = &gpio_cfg,
.chip = {
.base = S5PC100_GPG2(0),
.ngpio = S5PC100_GPIO_G2_NR,
.label = "GPG2",
},
}, {
- .base = S5PC100_GPG3_BASE,
- .config = &gpio_cfg,
.chip = {
.base = S5PC100_GPG3(0),
.ngpio = S5PC100_GPIO_G3_NR,
.label = "GPG3",
},
}, {
- .base = S5PC100_GPH0_BASE,
- .config = &gpio_cfg_eint,
- .chip = {
- .base = S5PC100_GPH0(0),
- .ngpio = S5PC100_GPIO_H0_NR,
- .label = "GPH0",
- },
- }, {
- .base = S5PC100_GPH1_BASE,
- .config = &gpio_cfg_eint,
- .chip = {
- .base = S5PC100_GPH1(0),
- .ngpio = S5PC100_GPIO_H1_NR,
- .label = "GPH1",
- },
- }, {
- .base = S5PC100_GPH2_BASE,
- .config = &gpio_cfg_eint,
- .chip = {
- .base = S5PC100_GPH2(0),
- .ngpio = S5PC100_GPIO_H2_NR,
- .label = "GPH2",
- },
- }, {
- .base = S5PC100_GPH3_BASE,
- .config = &gpio_cfg_eint,
- .chip = {
- .base = S5PC100_GPH3(0),
- .ngpio = S5PC100_GPIO_H3_NR,
- .label = "GPH3",
- },
- }, {
- .base = S5PC100_GPI_BASE,
- .config = &gpio_cfg,
.chip = {
.base = S5PC100_GPI(0),
.ngpio = S5PC100_GPIO_I_NR,
.label = "GPI",
},
}, {
- .base = S5PC100_GPJ0_BASE,
- .config = &gpio_cfg,
.chip = {
.base = S5PC100_GPJ0(0),
.ngpio = S5PC100_GPIO_J0_NR,
.label = "GPJ0",
},
}, {
- .base = S5PC100_GPJ1_BASE,
- .config = &gpio_cfg,
.chip = {
.base = S5PC100_GPJ1(0),
.ngpio = S5PC100_GPIO_J1_NR,
.label = "GPJ1",
},
}, {
- .base = S5PC100_GPJ2_BASE,
- .config = &gpio_cfg,
.chip = {
.base = S5PC100_GPJ2(0),
.ngpio = S5PC100_GPIO_J2_NR,
.label = "GPJ2",
},
}, {
- .base = S5PC100_GPJ3_BASE,
- .config = &gpio_cfg,
.chip = {
.base = S5PC100_GPJ3(0),
.ngpio = S5PC100_GPIO_J3_NR,
.label = "GPJ3",
},
}, {
- .base = S5PC100_GPJ4_BASE,
- .config = &gpio_cfg,
.chip = {
.base = S5PC100_GPJ4(0),
.ngpio = S5PC100_GPIO_J4_NR,
.label = "GPJ4",
},
}, {
- .base = S5PC100_GPK0_BASE,
.config = &gpio_cfg_noint,
.chip = {
.base = S5PC100_GPK0(0),
@@ -314,7 +233,6 @@ static struct s3c_gpio_chip s5pc100_gpio_chips[] = {
.label = "GPK0",
},
}, {
- .base = S5PC100_GPK1_BASE,
.config = &gpio_cfg_noint,
.chip = {
.base = S5PC100_GPK1(0),
@@ -322,7 +240,6 @@ static struct s3c_gpio_chip s5pc100_gpio_chips[] = {
.label = "GPK1",
},
}, {
- .base = S5PC100_GPK2_BASE,
.config = &gpio_cfg_noint,
.chip = {
.base = S5PC100_GPK2(0),
@@ -330,7 +247,6 @@ static struct s3c_gpio_chip s5pc100_gpio_chips[] = {
.label = "GPK2",
},
}, {
- .base = S5PC100_GPK3_BASE,
.config = &gpio_cfg_noint,
.chip = {
.base = S5PC100_GPK3(0),
@@ -338,7 +254,6 @@ static struct s3c_gpio_chip s5pc100_gpio_chips[] = {
.label = "GPK3",
},
}, {
- .base = S5PC100_GPL0_BASE,
.config = &gpio_cfg_noint,
.chip = {
.base = S5PC100_GPL0(0),
@@ -346,7 +261,6 @@ static struct s3c_gpio_chip s5pc100_gpio_chips[] = {
.label = "GPL0",
},
}, {
- .base = S5PC100_GPL1_BASE,
.config = &gpio_cfg_noint,
.chip = {
.base = S5PC100_GPL1(0),
@@ -354,7 +268,6 @@ static struct s3c_gpio_chip s5pc100_gpio_chips[] = {
.label = "GPL1",
},
}, {
- .base = S5PC100_GPL2_BASE,
.config = &gpio_cfg_noint,
.chip = {
.base = S5PC100_GPL2(0),
@@ -362,7 +275,6 @@ static struct s3c_gpio_chip s5pc100_gpio_chips[] = {
.label = "GPL2",
},
}, {
- .base = S5PC100_GPL3_BASE,
.config = &gpio_cfg_noint,
.chip = {
.base = S5PC100_GPL3(0),
@@ -370,56 +282,72 @@ static struct s3c_gpio_chip s5pc100_gpio_chips[] = {
.label = "GPL3",
},
}, {
- .base = S5PC100_GPL4_BASE,
.config = &gpio_cfg_noint,
.chip = {
.base = S5PC100_GPL4(0),
.ngpio = S5PC100_GPIO_L4_NR,
.label = "GPL4",
},
+ }, {
+ .base = (S5P_VA_GPIO + 0xC00),
+ .config = &gpio_cfg_eint,
+ .irq_base = IRQ_EINT(0),
+ .chip = {
+ .base = S5PC100_GPH0(0),
+ .ngpio = S5PC100_GPIO_H0_NR,
+ .label = "GPH0",
+ .to_irq = samsung_gpiolib_to_irq,
+ },
+ }, {
+ .base = (S5P_VA_GPIO + 0xC20),
+ .config = &gpio_cfg_eint,
+ .irq_base = IRQ_EINT(8),
+ .chip = {
+ .base = S5PC100_GPH1(0),
+ .ngpio = S5PC100_GPIO_H1_NR,
+ .label = "GPH1",
+ .to_irq = samsung_gpiolib_to_irq,
+ },
+ }, {
+ .base = (S5P_VA_GPIO + 0xC40),
+ .config = &gpio_cfg_eint,
+ .irq_base = IRQ_EINT(16),
+ .chip = {
+ .base = S5PC100_GPH2(0),
+ .ngpio = S5PC100_GPIO_H2_NR,
+ .label = "GPH2",
+ .to_irq = samsung_gpiolib_to_irq,
+ },
+ }, {
+ .base = (S5P_VA_GPIO + 0xC60),
+ .config = &gpio_cfg_eint,
+ .irq_base = IRQ_EINT(24),
+ .chip = {
+ .base = S5PC100_GPH3(0),
+ .ngpio = S5PC100_GPIO_H3_NR,
+ .label = "GPH3",
+ .to_irq = samsung_gpiolib_to_irq,
+ },
},
};
-/* FIXME move from irq-gpio.c */
-extern struct irq_chip s5pc100_gpioint;
-extern void s5pc100_irq_gpioint_handler(unsigned int irq, struct irq_desc *desc);
-
-static __init void s5pc100_gpiolib_link(struct s3c_gpio_chip *chip)
+static __init int s5pc100_gpiolib_init(void)
{
- /* Interrupt */
- if (chip->config == &gpio_cfg) {
- int i, irq;
-
- chip->chip.to_irq = s5pc100_gpiolib_to_irq;
+ struct s3c_gpio_chip *chip = s5pc100_gpio_chips;
+ int nr_chips = ARRAY_SIZE(s5pc100_gpio_chips);
+ int gpioint_group = 0;
+ int i;
- for (i = 0; i < chip->chip.ngpio; i++) {
- irq = S3C_IRQ_GPIO_BASE + chip->chip.base + i;
- set_irq_chip(irq, &s5pc100_gpioint);
- set_irq_data(irq, &chip->chip);
- set_irq_handler(irq, handle_level_irq);
- set_irq_flags(irq, IRQF_VALID);
+ for (i = 0; i < nr_chips; i++, chip++) {
+ if (chip->config == NULL) {
+ chip->config = &gpio_cfg;
+ chip->group = gpioint_group++;
}
- } else if (chip->config == &gpio_cfg_eint) {
- chip->chip.to_irq = s5pc100_gpiolib_to_eint;
+ if (chip->base == NULL)
+ chip->base = S5PC100_BANK_BASE(i);
}
-}
-
-static __init int s5pc100_gpiolib_init(void)
-{
- struct s3c_gpio_chip *chip;
- int nr_chips;
-
- chip = s5pc100_gpio_chips;
- nr_chips = ARRAY_SIZE(s5pc100_gpio_chips);
-
- for (; nr_chips > 0; nr_chips--, chip++)
- s5pc100_gpiolib_link(chip);
-
- samsung_gpiolib_add_4bit_chips(s5pc100_gpio_chips,
- ARRAY_SIZE(s5pc100_gpio_chips));
- /* Interrupt */
- set_irq_chained_handler(IRQ_GPIOINT, s5pc100_irq_gpioint_handler);
+ samsung_gpiolib_add_4bit_chips(s5pc100_gpio_chips, nr_chips);
return 0;
}
diff --git a/arch/arm/mach-s5pc100/include/mach/gpio.h b/arch/arm/mach-s5pc100/include/mach/gpio.h
index 71ae1f52df1d..29a8a12d9b4f 100644
--- a/arch/arm/mach-s5pc100/include/mach/gpio.h
+++ b/arch/arm/mach-s5pc100/include/mach/gpio.h
@@ -146,13 +146,6 @@ enum s5p_gpio_number {
/* define the number of gpios we need to the one after the MP04() range */
#define ARCH_NR_GPIOS (S5PC100_GPIO_END + 1)
-#define EINT_MODE S3C_GPIO_SFN(0x2)
-
-#define EINT_GPIO_0(x) S5PC100_GPH0(x)
-#define EINT_GPIO_1(x) S5PC100_GPH1(x)
-#define EINT_GPIO_2(x) S5PC100_GPH2(x)
-#define EINT_GPIO_3(x) S5PC100_GPH3(x)
-
#include <asm-generic/gpio.h>
#endif /* __ASM_ARCH_GPIO_H */
diff --git a/arch/arm/mach-s5pc100/include/mach/irqs.h b/arch/arm/mach-s5pc100/include/mach/irqs.h
index 06513e647242..d2eb4757381f 100644
--- a/arch/arm/mach-s5pc100/include/mach/irqs.h
+++ b/arch/arm/mach-s5pc100/include/mach/irqs.h
@@ -48,8 +48,8 @@
#define IRQ_SPI1 S5P_IRQ_VIC1(16)
#define IRQ_SPI2 S5P_IRQ_VIC1(17)
#define IRQ_IRDA S5P_IRQ_VIC1(18)
-#define IRQ_CAN0 S5P_IRQ_VIC1(19)
-#define IRQ_CAN1 S5P_IRQ_VIC1(20)
+#define IRQ_IIC2 S5P_IRQ_VIC1(19)
+#define IRQ_IIC3 S5P_IRQ_VIC1(20)
#define IRQ_HSIRX S5P_IRQ_VIC1(21)
#define IRQ_HSITX S5P_IRQ_VIC1(22)
#define IRQ_UHOST S5P_IRQ_VIC1(23)
@@ -100,11 +100,12 @@
#define S5P_EINT_BASE1 (S5P_IRQ_VIC0(0))
#define S5P_EINT_BASE2 (IRQ_VIC_END + 1)
-#define S3C_IRQ_GPIO_BASE (IRQ_EINT(31) + 1)
-#define S3C_IRQ_GPIO(x) (S3C_IRQ_GPIO_BASE + (x))
+/* GPIO interrupt */
+#define S5P_GPIOINT_BASE (IRQ_EINT(31) + 1)
+#define S5P_GPIOINT_GROUP_MAXNR 21
-/* Until MP04 Groups -> 40 (exactly 39) Groups * 8 ~= 320 GPIOs */
-#define NR_IRQS (S3C_IRQ_GPIO(320) + 1)
+/* Set the default NR_IRQS */
+#define NR_IRQS (IRQ_EINT(31) + S5P_GPIOINT_COUNT + 1)
/* Compatibility */
#define IRQ_LCD_FIFO IRQ_LCD0
diff --git a/arch/arm/mach-s5pc100/include/mach/map.h b/arch/arm/mach-s5pc100/include/mach/map.h
index 8751ef4a6804..32e9cab5c864 100644
--- a/arch/arm/mach-s5pc100/include/mach/map.h
+++ b/arch/arm/mach-s5pc100/include/mach/map.h
@@ -110,6 +110,8 @@
#define S5PC100_PA_PCM0 0xF2400000
#define S5PC100_PA_PCM1 0xF2500000
+#define S5PC100_PA_SPDIF 0xF2600000
+
#define S5PC100_PA_TSADC (0xF3000000)
/* KEYPAD */
diff --git a/arch/arm/mach-s5pc100/include/mach/regs-gpio.h b/arch/arm/mach-s5pc100/include/mach/regs-gpio.h
index dd6295e1251d..0bf73209ec7b 100644
--- a/arch/arm/mach-s5pc100/include/mach/regs-gpio.h
+++ b/arch/arm/mach-s5pc100/include/mach/regs-gpio.h
@@ -11,43 +11,6 @@
#include <mach/map.h>
-/* S5PC100 */
-#define S5PC100_GPIO_BASE S5P_VA_GPIO
-#define S5PC100_GPA0_BASE (S5PC100_GPIO_BASE + 0x0000)
-#define S5PC100_GPA1_BASE (S5PC100_GPIO_BASE + 0x0020)
-#define S5PC100_GPB_BASE (S5PC100_GPIO_BASE + 0x0040)
-#define S5PC100_GPC_BASE (S5PC100_GPIO_BASE + 0x0060)
-#define S5PC100_GPD_BASE (S5PC100_GPIO_BASE + 0x0080)
-#define S5PC100_GPE0_BASE (S5PC100_GPIO_BASE + 0x00A0)
-#define S5PC100_GPE1_BASE (S5PC100_GPIO_BASE + 0x00C0)
-#define S5PC100_GPF0_BASE (S5PC100_GPIO_BASE + 0x00E0)
-#define S5PC100_GPF1_BASE (S5PC100_GPIO_BASE + 0x0100)
-#define S5PC100_GPF2_BASE (S5PC100_GPIO_BASE + 0x0120)
-#define S5PC100_GPF3_BASE (S5PC100_GPIO_BASE + 0x0140)
-#define S5PC100_GPG0_BASE (S5PC100_GPIO_BASE + 0x0160)
-#define S5PC100_GPG1_BASE (S5PC100_GPIO_BASE + 0x0180)
-#define S5PC100_GPG2_BASE (S5PC100_GPIO_BASE + 0x01A0)
-#define S5PC100_GPG3_BASE (S5PC100_GPIO_BASE + 0x01C0)
-#define S5PC100_GPH0_BASE (S5PC100_GPIO_BASE + 0x0C00)
-#define S5PC100_GPH1_BASE (S5PC100_GPIO_BASE + 0x0C20)
-#define S5PC100_GPH2_BASE (S5PC100_GPIO_BASE + 0x0C40)
-#define S5PC100_GPH3_BASE (S5PC100_GPIO_BASE + 0x0C60)
-#define S5PC100_GPI_BASE (S5PC100_GPIO_BASE + 0x01E0)
-#define S5PC100_GPJ0_BASE (S5PC100_GPIO_BASE + 0x0200)
-#define S5PC100_GPJ1_BASE (S5PC100_GPIO_BASE + 0x0220)
-#define S5PC100_GPJ2_BASE (S5PC100_GPIO_BASE + 0x0240)
-#define S5PC100_GPJ3_BASE (S5PC100_GPIO_BASE + 0x0260)
-#define S5PC100_GPJ4_BASE (S5PC100_GPIO_BASE + 0x0280)
-#define S5PC100_GPK0_BASE (S5PC100_GPIO_BASE + 0x02A0)
-#define S5PC100_GPK1_BASE (S5PC100_GPIO_BASE + 0x02C0)
-#define S5PC100_GPK2_BASE (S5PC100_GPIO_BASE + 0x02E0)
-#define S5PC100_GPK3_BASE (S5PC100_GPIO_BASE + 0x0300)
-#define S5PC100_GPL0_BASE (S5PC100_GPIO_BASE + 0x0320)
-#define S5PC100_GPL1_BASE (S5PC100_GPIO_BASE + 0x0340)
-#define S5PC100_GPL2_BASE (S5PC100_GPIO_BASE + 0x0360)
-#define S5PC100_GPL3_BASE (S5PC100_GPIO_BASE + 0x0380)
-#define S5PC100_GPL4_BASE (S5PC100_GPIO_BASE + 0x03A0)
-
#define S5PC100EINT30CON (S5P_VA_GPIO + 0xE00)
#define S5P_EINT_CON(x) (S5PC100EINT30CON + ((x) * 0x4))
@@ -64,12 +27,12 @@
#define eint_irq_to_bit(irq) (1 << (EINT_OFFSET(irq) & 0x7))
-/* values for S5P_EXTINT0 */
-#define S5P_EXTINT_LOWLEV (0x00)
-#define S5P_EXTINT_HILEV (0x01)
-#define S5P_EXTINT_FALLEDGE (0x02)
-#define S5P_EXTINT_RISEEDGE (0x03)
-#define S5P_EXTINT_BOTHEDGE (0x04)
+#define EINT_MODE S3C_GPIO_SFN(0x2)
+
+#define EINT_GPIO_0(x) S5PC100_GPH0(x)
+#define EINT_GPIO_1(x) S5PC100_GPH1(x)
+#define EINT_GPIO_2(x) S5PC100_GPH2(x)
+#define EINT_GPIO_3(x) S5PC100_GPH3(x)
#endif /* __ASM_MACH_S5PC100_REGS_GPIO_H */
diff --git a/arch/arm/mach-s5pc100/include/mach/vmalloc.h b/arch/arm/mach-s5pc100/include/mach/vmalloc.h
index be9df79903ed..44c8e5726d9d 100644
--- a/arch/arm/mach-s5pc100/include/mach/vmalloc.h
+++ b/arch/arm/mach-s5pc100/include/mach/vmalloc.h
@@ -12,6 +12,6 @@
#ifndef __ASM_ARCH_VMALLOC_H
#define __ASM_ARCH_VMALLOC_H
-#define VMALLOC_END (0xe0000000UL)
+#define VMALLOC_END 0xF6000000UL
#endif /* __ASM_ARCH_VMALLOC_H */
diff --git a/arch/arm/mach-s5pc100/irq-gpio.c b/arch/arm/mach-s5pc100/irq-gpio.c
deleted file mode 100644
index 2bf86c18bc73..000000000000
--- a/arch/arm/mach-s5pc100/irq-gpio.c
+++ /dev/null
@@ -1,266 +0,0 @@
-/*
- * arch/arm/mach-s5pc100/irq-gpio.c
- *
- * Copyright (C) 2009 Samsung Electronics
- *
- * S5PC100 - Interrupt handling for IRQ_GPIO${group}(x)
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#include <linux/kernel.h>
-#include <linux/interrupt.h>
-#include <linux/irq.h>
-#include <linux/io.h>
-#include <linux/gpio.h>
-
-#include <mach/map.h>
-#include <plat/gpio-cfg.h>
-
-#define S5P_GPIOREG(x) (S5P_VA_GPIO + (x))
-
-#define CON_OFFSET 0x700
-#define MASK_OFFSET 0x900
-#define PEND_OFFSET 0xA00
-#define CON_OFFSET_2 0xE00
-#define MASK_OFFSET_2 0xF00
-#define PEND_OFFSET_2 0xF40
-
-#define GPIOINT_LEVEL_LOW 0x0
-#define GPIOINT_LEVEL_HIGH 0x1
-#define GPIOINT_EDGE_FALLING 0x2
-#define GPIOINT_EDGE_RISING 0x3
-#define GPIOINT_EDGE_BOTH 0x4
-
-static int group_to_con_offset(int group)
-{
- return group << 2;
-}
-
-static int group_to_mask_offset(int group)
-{
- return group << 2;
-}
-
-static int group_to_pend_offset(int group)
-{
- return group << 2;
-}
-
-static int s5pc100_get_start(unsigned int group)
-{
- switch (group) {
- case 0: return S5PC100_GPIO_A0_START;
- case 1: return S5PC100_GPIO_A1_START;
- case 2: return S5PC100_GPIO_B_START;
- case 3: return S5PC100_GPIO_C_START;
- case 4: return S5PC100_GPIO_D_START;
- case 5: return S5PC100_GPIO_E0_START;
- case 6: return S5PC100_GPIO_E1_START;
- case 7: return S5PC100_GPIO_F0_START;
- case 8: return S5PC100_GPIO_F1_START;
- case 9: return S5PC100_GPIO_F2_START;
- case 10: return S5PC100_GPIO_F3_START;
- case 11: return S5PC100_GPIO_G0_START;
- case 12: return S5PC100_GPIO_G1_START;
- case 13: return S5PC100_GPIO_G2_START;
- case 14: return S5PC100_GPIO_G3_START;
- case 15: return S5PC100_GPIO_I_START;
- case 16: return S5PC100_GPIO_J0_START;
- case 17: return S5PC100_GPIO_J1_START;
- case 18: return S5PC100_GPIO_J2_START;
- case 19: return S5PC100_GPIO_J3_START;
- case 20: return S5PC100_GPIO_J4_START;
- default:
- BUG();
- }
-
- return -EINVAL;
-}
-
-static int s5pc100_get_group(unsigned int irq)
-{
- irq -= S3C_IRQ_GPIO(0);
-
- switch (irq) {
- case S5PC100_GPIO_A0_START ... S5PC100_GPIO_A1_START - 1:
- return 0;
- case S5PC100_GPIO_A1_START ... S5PC100_GPIO_B_START - 1:
- return 1;
- case S5PC100_GPIO_B_START ... S5PC100_GPIO_C_START - 1:
- return 2;
- case S5PC100_GPIO_C_START ... S5PC100_GPIO_D_START - 1:
- return 3;
- case S5PC100_GPIO_D_START ... S5PC100_GPIO_E0_START - 1:
- return 4;
- case S5PC100_GPIO_E0_START ... S5PC100_GPIO_E1_START - 1:
- return 5;
- case S5PC100_GPIO_E1_START ... S5PC100_GPIO_F0_START - 1:
- return 6;
- case S5PC100_GPIO_F0_START ... S5PC100_GPIO_F1_START - 1:
- return 7;
- case S5PC100_GPIO_F1_START ... S5PC100_GPIO_F2_START - 1:
- return 8;
- case S5PC100_GPIO_F2_START ... S5PC100_GPIO_F3_START - 1:
- return 9;
- case S5PC100_GPIO_F3_START ... S5PC100_GPIO_G0_START - 1:
- return 10;
- case S5PC100_GPIO_G0_START ... S5PC100_GPIO_G1_START - 1:
- return 11;
- case S5PC100_GPIO_G1_START ... S5PC100_GPIO_G2_START - 1:
- return 12;
- case S5PC100_GPIO_G2_START ... S5PC100_GPIO_G3_START - 1:
- return 13;
- case S5PC100_GPIO_G3_START ... S5PC100_GPIO_H0_START - 1:
- return 14;
- case S5PC100_GPIO_I_START ... S5PC100_GPIO_J0_START - 1:
- return 15;
- case S5PC100_GPIO_J0_START ... S5PC100_GPIO_J1_START - 1:
- return 16;
- case S5PC100_GPIO_J1_START ... S5PC100_GPIO_J2_START - 1:
- return 17;
- case S5PC100_GPIO_J2_START ... S5PC100_GPIO_J3_START - 1:
- return 18;
- case S5PC100_GPIO_J3_START ... S5PC100_GPIO_J4_START - 1:
- return 19;
- case S5PC100_GPIO_J4_START ... S5PC100_GPIO_K0_START - 1:
- return 20;
- default:
- BUG();
- }
-
- return -EINVAL;
-}
-
-static int s5pc100_get_offset(unsigned int irq)
-{
- struct gpio_chip *chip = get_irq_data(irq);
- return irq - S3C_IRQ_GPIO(chip->base);
-}
-
-static void s5pc100_gpioint_ack(unsigned int irq)
-{
- int group, offset, pend_offset;
- unsigned int value;
-
- group = s5pc100_get_group(irq);
- offset = s5pc100_get_offset(irq);
- pend_offset = group_to_pend_offset(group);
-
- value = __raw_readl(S5P_GPIOREG(PEND_OFFSET) + pend_offset);
- value |= 1 << offset;
- __raw_writel(value, S5P_GPIOREG(PEND_OFFSET) + pend_offset);
-}
-
-static void s5pc100_gpioint_mask(unsigned int irq)
-{
- int group, offset, mask_offset;
- unsigned int value;
-
- group = s5pc100_get_group(irq);
- offset = s5pc100_get_offset(irq);
- mask_offset = group_to_mask_offset(group);
-
- value = __raw_readl(S5P_GPIOREG(MASK_OFFSET) + mask_offset);
- value |= 1 << offset;
- __raw_writel(value, S5P_GPIOREG(MASK_OFFSET) + mask_offset);
-}
-
-static void s5pc100_gpioint_unmask(unsigned int irq)
-{
- int group, offset, mask_offset;
- unsigned int value;
-
- group = s5pc100_get_group(irq);
- offset = s5pc100_get_offset(irq);
- mask_offset = group_to_mask_offset(group);
-
- value = __raw_readl(S5P_GPIOREG(MASK_OFFSET) + mask_offset);
- value &= ~(1 << offset);
- __raw_writel(value, S5P_GPIOREG(MASK_OFFSET) + mask_offset);
-}
-
-static void s5pc100_gpioint_mask_ack(unsigned int irq)
-{
- s5pc100_gpioint_mask(irq);
- s5pc100_gpioint_ack(irq);
-}
-
-static int s5pc100_gpioint_set_type(unsigned int irq, unsigned int type)
-{
- int group, offset, con_offset;
- unsigned int value;
-
- group = s5pc100_get_group(irq);
- offset = s5pc100_get_offset(irq);
- con_offset = group_to_con_offset(group);
-
- switch (type) {
- case IRQ_TYPE_NONE:
- printk(KERN_WARNING "No irq type\n");
- return -EINVAL;
- case IRQ_TYPE_EDGE_RISING:
- type = GPIOINT_EDGE_RISING;
- break;
- case IRQ_TYPE_EDGE_FALLING:
- type = GPIOINT_EDGE_FALLING;
- break;
- case IRQ_TYPE_EDGE_BOTH:
- type = GPIOINT_EDGE_BOTH;
- break;
- case IRQ_TYPE_LEVEL_HIGH:
- type = GPIOINT_LEVEL_HIGH;
- break;
- case IRQ_TYPE_LEVEL_LOW:
- type = GPIOINT_LEVEL_LOW;
- break;
- default:
- BUG();
- }
-
-
- value = __raw_readl(S5P_GPIOREG(CON_OFFSET) + con_offset);
- value &= ~(0xf << (offset * 0x4));
- value |= (type << (offset * 0x4));
- __raw_writel(value, S5P_GPIOREG(CON_OFFSET) + con_offset);
-
- return 0;
-}
-
-struct irq_chip s5pc100_gpioint = {
- .name = "GPIO",
- .ack = s5pc100_gpioint_ack,
- .mask = s5pc100_gpioint_mask,
- .mask_ack = s5pc100_gpioint_mask_ack,
- .unmask = s5pc100_gpioint_unmask,
- .set_type = s5pc100_gpioint_set_type,
-};
-
-void s5pc100_irq_gpioint_handler(unsigned int irq, struct irq_desc *desc)
-{
- int group, offset, pend_offset, mask_offset;
- int real_irq, group_end;
- unsigned int pend, mask;
-
- group_end = 21;
-
- for (group = 0; group < group_end; group++) {
- pend_offset = group_to_pend_offset(group);
- pend = __raw_readl(S5P_GPIOREG(PEND_OFFSET) + pend_offset);
- if (!pend)
- continue;
-
- mask_offset = group_to_mask_offset(group);
- mask = __raw_readl(S5P_GPIOREG(MASK_OFFSET) + mask_offset);
- pend &= ~mask;
-
- for (offset = 0; offset < 8; offset++) {
- if (pend & (1 << offset)) {
- real_irq = s5pc100_get_start(group) + offset;
- generic_handle_irq(S3C_IRQ_GPIO(real_irq));
- }
- }
- }
-}
diff --git a/arch/arm/mach-s5pc100/mach-smdkc100.c b/arch/arm/mach-s5pc100/mach-smdkc100.c
index 880fb075092c..18b405d514d6 100644
--- a/arch/arm/mach-s5pc100/mach-smdkc100.c
+++ b/arch/arm/mach-s5pc100/mach-smdkc100.c
@@ -47,6 +47,7 @@
#include <plat/adc.h>
#include <plat/keypad.h>
#include <plat/ts.h>
+#include <plat/audio.h>
/* Following are default values for UCON, ULCON and UFCON UART registers */
#define SMDKC100_UCON_DEFAULT (S3C2410_UCON_TXILEVEL | \
@@ -196,6 +197,7 @@ static struct platform_device *smdkc100_devices[] __initdata = {
&s5p_device_fimc0,
&s5p_device_fimc1,
&s5p_device_fimc2,
+ &s5pc100_device_spdif,
};
static struct s3c2410_ts_mach_info s3c_ts_platform __initdata = {
@@ -226,6 +228,8 @@ static void __init smdkc100_machine_init(void)
samsung_keypad_set_platdata(&smdkc100_keypad_data);
+ s5pc100_spdif_setup_gpio(S5PC100_SPDIF_GPD);
+
/* LCD init */
gpio_request(S5PC100_GPD(0), "GPD");
gpio_request(S5PC100_GPH0(6), "GPH0");
diff --git a/arch/arm/mach-s5pc100/setup-fb-24bpp.c b/arch/arm/mach-s5pc100/setup-fb-24bpp.c
index 6eba6cb8e2f4..d31c0f3fe222 100644
--- a/arch/arm/mach-s5pc100/setup-fb-24bpp.c
+++ b/arch/arm/mach-s5pc100/setup-fb-24bpp.c
@@ -22,27 +22,15 @@
#define DISR_OFFSET 0x7008
-void s5pc100_fb_gpio_setup_24bpp(void)
+static void s5pc100_fb_setgpios(unsigned int base, unsigned int nr)
{
- unsigned int gpio = 0;
-
- for (gpio = S5PC100_GPF0(0); gpio <= S5PC100_GPF0(7); gpio++) {
- s3c_gpio_cfgpin(gpio, S3C_GPIO_SFN(2));
- s3c_gpio_setpull(gpio, S3C_GPIO_PULL_NONE);
- }
-
- for (gpio = S5PC100_GPF1(0); gpio <= S5PC100_GPF1(7); gpio++) {
- s3c_gpio_cfgpin(gpio, S3C_GPIO_SFN(2));
- s3c_gpio_setpull(gpio, S3C_GPIO_PULL_NONE);
- }
-
- for (gpio = S5PC100_GPF2(0); gpio <= S5PC100_GPF2(7); gpio++) {
- s3c_gpio_cfgpin(gpio, S3C_GPIO_SFN(2));
- s3c_gpio_setpull(gpio, S3C_GPIO_PULL_NONE);
- }
+ s3c_gpio_cfgrange_nopull(base, nr, S3C_GPIO_SFN(2));
+}
- for (gpio = S5PC100_GPF3(0); gpio <= S5PC100_GPF3(3); gpio++) {
- s3c_gpio_cfgpin(gpio, S3C_GPIO_SFN(2));
- s3c_gpio_setpull(gpio, S3C_GPIO_PULL_NONE);
- }
+void s5pc100_fb_gpio_setup_24bpp(void)
+{
+ s5pc100_fb_setgpios(S5PC100_GPF0(0), 8);
+ s5pc100_fb_setgpios(S5PC100_GPF1(0), 8);
+ s5pc100_fb_setgpios(S5PC100_GPF2(0), 8);
+ s5pc100_fb_setgpios(S5PC100_GPF3(0), 4);
}
diff --git a/arch/arm/mach-s5pc100/setup-i2c0.c b/arch/arm/mach-s5pc100/setup-i2c0.c
index dd3174e6ecc5..eaef7a3bda49 100644
--- a/arch/arm/mach-s5pc100/setup-i2c0.c
+++ b/arch/arm/mach-s5pc100/setup-i2c0.c
@@ -23,8 +23,6 @@ struct platform_device; /* don't need the contents */
void s3c_i2c0_cfg_gpio(struct platform_device *dev)
{
- s3c_gpio_cfgpin(S5PC100_GPD(3), S3C_GPIO_SFN(2));
- s3c_gpio_setpull(S5PC100_GPD(3), S3C_GPIO_PULL_UP);
- s3c_gpio_cfgpin(S5PC100_GPD(4), S3C_GPIO_SFN(2));
- s3c_gpio_setpull(S5PC100_GPD(4), S3C_GPIO_PULL_UP);
+ s3c_gpio_cfgall_range(S5PC100_GPD(3), 2,
+ S3C_GPIO_SFN(2), S3C_GPIO_PULL_UP);
}
diff --git a/arch/arm/mach-s5pc100/setup-i2c1.c b/arch/arm/mach-s5pc100/setup-i2c1.c
index d1fec26b69ee..aaff74a90dee 100644
--- a/arch/arm/mach-s5pc100/setup-i2c1.c
+++ b/arch/arm/mach-s5pc100/setup-i2c1.c
@@ -23,8 +23,6 @@ struct platform_device; /* don't need the contents */
void s3c_i2c1_cfg_gpio(struct platform_device *dev)
{
- s3c_gpio_cfgpin(S5PC100_GPD(5), S3C_GPIO_SFN(2));
- s3c_gpio_setpull(S5PC100_GPD(5), S3C_GPIO_PULL_UP);
- s3c_gpio_cfgpin(S5PC100_GPD(6), S3C_GPIO_SFN(2));
- s3c_gpio_setpull(S5PC100_GPD(6), S3C_GPIO_PULL_UP);
+ s3c_gpio_cfgall_range(S5PC100_GPD(5), 2,
+ S3C_GPIO_SFN(2), S3C_GPIO_PULL_UP);
}
diff --git a/arch/arm/mach-s5pc100/setup-ide.c b/arch/arm/mach-s5pc100/setup-ide.c
index 83575671fb59..223aae044466 100644
--- a/arch/arm/mach-s5pc100/setup-ide.c
+++ b/arch/arm/mach-s5pc100/setup-ide.c
@@ -17,52 +17,39 @@
#include <mach/regs-clock.h>
#include <plat/gpio-cfg.h>
+static void s5pc100_ide_cfg_gpios(unsigned int base, unsigned int nr)
+{
+ s3c_gpio_cfgrange_nopull(base, nr, S3C_GPIO_SFN(4));
+
+ for (; nr > 0; nr--, base++)
+ s5p_gpio_set_drvstr(base, S5P_GPIO_DRVSTR_LV4);
+}
+
void s5pc100_ide_setup_gpio(void)
{
u32 reg;
- u32 gpio = 0;
/* Independent CF interface, CF chip select configuration */
reg = readl(S5PC100_MEM_SYS_CFG) & (~0x3f);
writel(reg | MEM_SYS_CFG_EBI_FIX_PRI_CFCON, S5PC100_MEM_SYS_CFG);
/* CF_Add[0 - 2], CF_IORDY, CF_INTRQ, CF_DMARQ, CF_DMARST, CF_DMACK */
- for (gpio = S5PC100_GPJ0(0); gpio <= S5PC100_GPJ0(7); gpio++) {
- s3c_gpio_cfgpin(gpio, S3C_GPIO_SFN(4));
- s3c_gpio_setpull(gpio, S3C_GPIO_PULL_NONE);
- s5p_gpio_set_drvstr(gpio, S5P_GPIO_DRVSTR_LV4);
- }
+ s5pc100_ide_cfg_gpios(S5PC100_GPJ0(0), 8);
/*CF_Data[0 - 7] */
- for (gpio = S5PC100_GPJ2(0); gpio <= S5PC100_GPJ2(7); gpio++) {
- s3c_gpio_cfgpin(gpio, S3C_GPIO_SFN(4));
- s3c_gpio_setpull(gpio, S3C_GPIO_PULL_NONE);
- s5p_gpio_set_drvstr(gpio, S5P_GPIO_DRVSTR_LV4);
- }
+ s5pc100_ide_cfg_gpios(S5PC100_GPJ2(0), 8);
/* CF_Data[8 - 15] */
- for (gpio = S5PC100_GPJ3(0); gpio <= S5PC100_GPJ3(7); gpio++) {
- s3c_gpio_cfgpin(gpio, S3C_GPIO_SFN(4));
- s3c_gpio_setpull(gpio, S3C_GPIO_PULL_NONE);
- s5p_gpio_set_drvstr(gpio, S5P_GPIO_DRVSTR_LV4);
- }
+ s5pc100_ide_cfg_gpios(S5PC100_GPJ3(0), 8);
/* CF_CS0, CF_CS1, CF_IORD, CF_IOWR */
- for (gpio = S5PC100_GPJ4(0); gpio <= S5PC100_GPJ4(3); gpio++) {
- s3c_gpio_cfgpin(gpio, S3C_GPIO_SFN(4));
- s3c_gpio_setpull(gpio, S3C_GPIO_PULL_NONE);
- s5p_gpio_set_drvstr(gpio, S5P_GPIO_DRVSTR_LV4);
- }
+ s5pc100_ide_cfg_gpios(S5PC100_GPJ4(0), 4);
/* EBI_OE, EBI_WE */
- for (gpio = S5PC100_GPK0(6); gpio <= S5PC100_GPK0(7); gpio++)
- s3c_gpio_cfgpin(gpio, S3C_GPIO_SFN(0));
+ s3c_gpio_cfgpin_range(S5PC100_GPK0(6), 2, S3C_GPIO_SFN(0));
/* CF_OE, CF_WE */
- for (gpio = S5PC100_GPK1(6); gpio <= S5PC100_GPK1(7); gpio++) {
- s3c_gpio_cfgpin(gpio, S3C_GPIO_SFN(2));
- s3c_gpio_setpull(gpio, S3C_GPIO_PULL_NONE);
- }
+ s3c_gpio_cfgrange_nopull(S5PC100_GPK1(6), 8, S3C_GPIO_SFN(2));
/* CF_CD */
s3c_gpio_cfgpin(S5PC100_GPK3(5), S3C_GPIO_SFN(2));
diff --git a/arch/arm/mach-s5pc100/setup-keypad.c b/arch/arm/mach-s5pc100/setup-keypad.c
index d0837a72a58e..ada377f0c206 100644
--- a/arch/arm/mach-s5pc100/setup-keypad.c
+++ b/arch/arm/mach-s5pc100/setup-keypad.c
@@ -15,20 +15,9 @@
void samsung_keypad_cfg_gpio(unsigned int rows, unsigned int cols)
{
- unsigned int gpio;
- unsigned int end;
-
/* Set all the necessary GPH3 pins to special-function 3: KP_ROW[x] */
- end = S5PC100_GPH3(rows);
- for (gpio = S5PC100_GPH3(0); gpio < end; gpio++) {
- s3c_gpio_cfgpin(gpio, S3C_GPIO_SFN(3));
- s3c_gpio_setpull(gpio, S3C_GPIO_PULL_NONE);
- }
+ s3c_gpio_cfgrange_nopull(S5PC100_GPH3(0), rows, S3C_GPIO_SFN(3));
/* Set all the necessary GPH2 pins to special-function 3: KP_COL[x] */
- end = S5PC100_GPH2(cols);
- for (gpio = S5PC100_GPH2(0); gpio < end; gpio++) {
- s3c_gpio_cfgpin(gpio, S3C_GPIO_SFN(3));
- s3c_gpio_setpull(gpio, S3C_GPIO_PULL_NONE);
- }
+ s3c_gpio_cfgrange_nopull(S5PC100_GPH2(0), cols, S3C_GPIO_SFN(3));
}
diff --git a/arch/arm/mach-s5pc100/setup-sdhci-gpio.c b/arch/arm/mach-s5pc100/setup-sdhci-gpio.c
index dc7208c639ea..03c02d04c68c 100644
--- a/arch/arm/mach-s5pc100/setup-sdhci-gpio.c
+++ b/arch/arm/mach-s5pc100/setup-sdhci-gpio.c
@@ -25,8 +25,6 @@
void s5pc100_setup_sdhci0_cfg_gpio(struct platform_device *dev, int width)
{
struct s3c_sdhci_platdata *pdata = dev->dev.platform_data;
- unsigned int gpio;
- unsigned int end;
unsigned int num;
num = width;
@@ -34,20 +32,11 @@ void s5pc100_setup_sdhci0_cfg_gpio(struct platform_device *dev, int width)
if (width == 8)
num = width - 2;
- end = S5PC100_GPG0(2 + num);
-
/* Set all the necessary GPG0/GPG1 pins to special-function 0 */
- for (gpio = S5PC100_GPG0(0); gpio < end; gpio++) {
- s3c_gpio_cfgpin(gpio, S3C_GPIO_SFN(2));
- s3c_gpio_setpull(gpio, S3C_GPIO_PULL_NONE);
- }
+ s3c_gpio_cfgrange_nopull(S5PC100_GPG0(0), 2 + num, S3C_GPIO_SFN(2));
- if (width == 8) {
- for (gpio = S5PC100_GPG1(0); gpio <= S5PC100_GPG1(1); gpio++) {
- s3c_gpio_cfgpin(gpio, S3C_GPIO_SFN(2));
- s3c_gpio_setpull(gpio, S3C_GPIO_PULL_NONE);
- }
- }
+ if (width == 8)
+ s3c_gpio_cfgrange_nopull(S5PC100_GPG1(0), 2, S3C_GPIO_SFN(2));
if (pdata->cd_type == S3C_SDHCI_CD_INTERNAL) {
s3c_gpio_setpull(S5PC100_GPG1(2), S3C_GPIO_PULL_UP);
@@ -58,16 +47,9 @@ void s5pc100_setup_sdhci0_cfg_gpio(struct platform_device *dev, int width)
void s5pc100_setup_sdhci1_cfg_gpio(struct platform_device *dev, int width)
{
struct s3c_sdhci_platdata *pdata = dev->dev.platform_data;
- unsigned int gpio;
- unsigned int end;
-
- end = S5PC100_GPG2(2 + width);
/* Set all the necessary GPG2 pins to special-function 2 */
- for (gpio = S5PC100_GPG2(0); gpio < end; gpio++) {
- s3c_gpio_cfgpin(gpio, S3C_GPIO_SFN(2));
- s3c_gpio_setpull(gpio, S3C_GPIO_PULL_NONE);
- }
+ s3c_gpio_cfgrange_nopull(S5PC100_GPG2(0), 2 + width, S3C_GPIO_SFN(2));
if (pdata->cd_type == S3C_SDHCI_CD_INTERNAL) {
s3c_gpio_setpull(S5PC100_GPG2(6), S3C_GPIO_PULL_UP);
@@ -78,16 +60,9 @@ void s5pc100_setup_sdhci1_cfg_gpio(struct platform_device *dev, int width)
void s5pc100_setup_sdhci2_cfg_gpio(struct platform_device *dev, int width)
{
struct s3c_sdhci_platdata *pdata = dev->dev.platform_data;
- unsigned int gpio;
- unsigned int end;
-
- end = S5PC100_GPG3(2 + width);
/* Set all the necessary GPG3 pins to special-function 2 */
- for (gpio = S5PC100_GPG3(0); gpio < end; gpio++) {
- s3c_gpio_cfgpin(gpio, S3C_GPIO_SFN(2));
- s3c_gpio_setpull(gpio, S3C_GPIO_PULL_NONE);
- }
+ s3c_gpio_cfgrange_nopull(S5PC100_GPG3(0), 2 + width, S3C_GPIO_SFN(2));
if (pdata->cd_type == S3C_SDHCI_CD_INTERNAL) {
s3c_gpio_setpull(S5PC100_GPG3(6), S3C_GPIO_PULL_UP);
diff --git a/arch/arm/mach-s5pv210/Kconfig b/arch/arm/mach-s5pv210/Kconfig
index 5315fec3db86..862f239a0fdb 100644
--- a/arch/arm/mach-s5pv210/Kconfig
+++ b/arch/arm/mach-s5pv210/Kconfig
@@ -11,9 +11,9 @@ if ARCH_S5PV210
config CPU_S5PV210
bool
- select PLAT_S5P
select S3C_PL330_DMA
select S5P_EXT_INT
+ select S5PV210_PM if PM
help
Enable S5PV210 CPU support
@@ -58,7 +58,6 @@ menu "S5PC110 Machines"
config MACH_AQUILA
bool "Aquila"
select CPU_S5PV210
- select ARCH_SPARSEMEM_ENABLE
select S3C_DEV_FB
select S5P_DEV_FIMC0
select S5P_DEV_FIMC1
@@ -75,7 +74,7 @@ config MACH_AQUILA
config MACH_GONI
bool "GONI"
select CPU_S5PV210
- select ARCH_SPARSEMEM_ENABLE
+ select S5P_GPIO_INT
select S3C_DEV_FB
select S5P_DEV_FIMC0
select S5P_DEV_FIMC1
@@ -83,8 +82,15 @@ config MACH_GONI
select S3C_DEV_HSMMC
select S3C_DEV_HSMMC1
select S3C_DEV_HSMMC2
+ select S3C_DEV_I2C1
+ select S3C_DEV_I2C2
+ select S3C_DEV_USB_HSOTG
select S5P_DEV_ONENAND
+ select SAMSUNG_DEV_KEYPAD
select S5PV210_SETUP_FB_24BPP
+ select S5PV210_SETUP_I2C1
+ select S5PV210_SETUP_I2C2
+ select S5PV210_SETUP_KEYPAD
select S5PV210_SETUP_SDHCI
help
Machine support for Samsung GONI board
@@ -93,7 +99,6 @@ config MACH_GONI
config MACH_SMDKC110
bool "SMDKC110"
select CPU_S5PV210
- select ARCH_SPARSEMEM_ENABLE
select S3C_DEV_I2C1
select S3C_DEV_I2C2
select S3C_DEV_RTC
@@ -113,7 +118,6 @@ menu "S5PV210 Machines"
config MACH_SMDKV210
bool "SMDKV210"
select CPU_S5PV210
- select ARCH_SPARSEMEM_ENABLE
select S3C_DEV_HSMMC
select S3C_DEV_HSMMC1
select S3C_DEV_HSMMC2
@@ -134,6 +138,29 @@ config MACH_SMDKV210
help
Machine support for Samsung SMDKV210
+config MACH_TORBRECK
+ bool "Torbreck"
+ select CPU_S5PV210
+ select ARCH_SPARSEMEM_ENABLE
+ select S3C_DEV_HSMMC
+ select S3C_DEV_HSMMC1
+ select S3C_DEV_HSMMC2
+ select S3C_DEV_HSMMC3
+ select S3C_DEV_I2C1
+ select S3C_DEV_I2C2
+ select S3C_DEV_RTC
+ select S3C_DEV_WDT
+ select S5PV210_SETUP_I2C1
+ select S5PV210_SETUP_I2C2
+ select S5PV210_SETUP_SDHCI
+ help
+ Machine support for aESOP Torbreck
+
endmenu
+config S5PV210_PM
+ bool
+ help
+ Power Management code common to S5PV210
+
endif
diff --git a/arch/arm/mach-s5pv210/Makefile b/arch/arm/mach-s5pv210/Makefile
index 704548912408..ff1a0db57a2f 100644
--- a/arch/arm/mach-s5pv210/Makefile
+++ b/arch/arm/mach-s5pv210/Makefile
@@ -14,6 +14,8 @@ obj- :=
obj-$(CONFIG_CPU_S5PV210) += cpu.o init.o clock.o dma.o gpiolib.o
obj-$(CONFIG_CPU_S5PV210) += setup-i2c0.o
+obj-$(CONFIG_S5PV210_PM) += pm.o sleep.o
+obj-$(CONFIG_CPU_FREQ) += cpufreq.o
# machine support
@@ -21,6 +23,7 @@ obj-$(CONFIG_MACH_AQUILA) += mach-aquila.o
obj-$(CONFIG_MACH_SMDKV210) += mach-smdkv210.o
obj-$(CONFIG_MACH_SMDKC110) += mach-smdkc110.o
obj-$(CONFIG_MACH_GONI) += mach-goni.o
+obj-$(CONFIG_MACH_TORBRECK) += mach-torbreck.o
# device support
diff --git a/arch/arm/mach-s5pv210/clock.c b/arch/arm/mach-s5pv210/clock.c
index d562670e1b0b..019c3a69b0e4 100644
--- a/arch/arm/mach-s5pv210/clock.c
+++ b/arch/arm/mach-s5pv210/clock.c
@@ -31,6 +31,8 @@
#include <plat/clock-clksrc.h>
#include <plat/s5pv210.h>
+static unsigned long xtal;
+
static struct clksrc_clk clk_mout_apll = {
.clk = {
.name = "mout_apll",
@@ -259,6 +261,36 @@ static struct clksrc_clk clk_sclk_vpll = {
.reg_src = { .reg = S5P_CLK_SRC0, .shift = 12, .size = 1 },
};
+static struct clk *clkset_moutdmc0src_list[] = {
+ [0] = &clk_sclk_a2m.clk,
+ [1] = &clk_mout_mpll.clk,
+ [2] = NULL,
+ [3] = NULL,
+};
+
+static struct clksrc_sources clkset_moutdmc0src = {
+ .sources = clkset_moutdmc0src_list,
+ .nr_sources = ARRAY_SIZE(clkset_moutdmc0src_list),
+};
+
+static struct clksrc_clk clk_mout_dmc0 = {
+ .clk = {
+ .name = "mout_dmc0",
+ .id = -1,
+ },
+ .sources = &clkset_moutdmc0src,
+ .reg_src = { .reg = S5P_CLK_SRC6, .shift = 24, .size = 2 },
+};
+
+static struct clksrc_clk clk_sclk_dmc0 = {
+ .clk = {
+ .name = "sclk_dmc0",
+ .id = -1,
+ .parent = &clk_mout_dmc0.clk,
+ },
+ .reg_div = { .reg = S5P_CLK_DIV6, .shift = 28, .size = 4 },
+};
+
static unsigned long s5pv210_clk_imem_get_rate(struct clk *clk)
{
return clk_get_rate(clk->parent) / 2;
@@ -268,8 +300,29 @@ static struct clk_ops clk_hclk_imem_ops = {
.get_rate = s5pv210_clk_imem_get_rate,
};
+static unsigned long s5pv210_clk_fout_apll_get_rate(struct clk *clk)
+{
+ return s5p_get_pll45xx(xtal, __raw_readl(S5P_APLL_CON), pll_4508);
+}
+
+static struct clk_ops clk_fout_apll_ops = {
+ .get_rate = s5pv210_clk_fout_apll_get_rate,
+};
+
static struct clk init_clocks_disable[] = {
{
+ .name = "pdma",
+ .id = 0,
+ .parent = &clk_hclk_psys.clk,
+ .enable = s5pv210_clk_ip0_ctrl,
+ .ctrlbit = (1 << 3),
+ }, {
+ .name = "pdma",
+ .id = 1,
+ .parent = &clk_hclk_psys.clk,
+ .enable = s5pv210_clk_ip0_ctrl,
+ .ctrlbit = (1 << 4),
+ }, {
.name = "rot",
.id = -1,
.parent = &clk_hclk_dsys.clk,
@@ -431,6 +484,12 @@ static struct clk init_clocks_disable[] = {
.parent = &clk_p,
.enable = s5pv210_clk_ip3_ctrl,
.ctrlbit = (1 << 6),
+ }, {
+ .name = "spdif",
+ .id = -1,
+ .parent = &clk_p,
+ .enable = s5pv210_clk_ip3_ctrl,
+ .ctrlbit = (1 << 0),
},
};
@@ -660,6 +719,53 @@ static struct clksrc_sources clkset_sclk_spdif = {
.nr_sources = ARRAY_SIZE(clkset_sclk_spdif_list),
};
+static int s5pv210_spdif_set_rate(struct clk *clk, unsigned long rate)
+{
+ struct clk *pclk;
+ int ret;
+
+ pclk = clk_get_parent(clk);
+ if (IS_ERR(pclk))
+ return -EINVAL;
+
+ ret = pclk->ops->set_rate(pclk, rate);
+ clk_put(pclk);
+
+ return ret;
+}
+
+static unsigned long s5pv210_spdif_get_rate(struct clk *clk)
+{
+ struct clk *pclk;
+ int rate;
+
+ pclk = clk_get_parent(clk);
+ if (IS_ERR(pclk))
+ return -EINVAL;
+
+ rate = pclk->ops->get_rate(clk);
+ clk_put(pclk);
+
+ return rate;
+}
+
+static struct clk_ops s5pv210_sclk_spdif_ops = {
+ .set_rate = s5pv210_spdif_set_rate,
+ .get_rate = s5pv210_spdif_get_rate,
+};
+
+static struct clksrc_clk clk_sclk_spdif = {
+ .clk = {
+ .name = "sclk_spdif",
+ .id = -1,
+ .enable = s5pv210_clk_mask0_ctrl,
+ .ctrlbit = (1 << 27),
+ .ops = &s5pv210_sclk_spdif_ops,
+ },
+ .sources = &clkset_sclk_spdif,
+ .reg_src = { .reg = S5P_CLK_SRC6, .shift = 12, .size = 2 },
+};
+
static struct clk *clkset_group2_list[] = {
[0] = &clk_ext_xtal_mux,
[1] = &clk_xusbxti,
@@ -744,15 +850,6 @@ static struct clksrc_clk clksrcs[] = {
.sources = &clkset_sclk_mixer,
.reg_src = { .reg = S5P_CLK_SRC1, .shift = 4, .size = 1 },
}, {
- .clk = {
- .name = "sclk_spdif",
- .id = -1,
- .enable = s5pv210_clk_mask0_ctrl,
- .ctrlbit = (1 << 27),
- },
- .sources = &clkset_sclk_spdif,
- .reg_src = { .reg = S5P_CLK_SRC6, .shift = 12, .size = 2 },
- }, {
.clk = {
.name = "sclk_fimc",
.id = 0,
@@ -953,12 +1050,93 @@ static struct clksrc_clk *sysclks[] = {
&clk_sclk_dac,
&clk_sclk_pixel,
&clk_sclk_hdmi,
+ &clk_mout_dmc0,
+ &clk_sclk_dmc0,
+ &clk_sclk_audio0,
+ &clk_sclk_audio1,
+ &clk_sclk_audio2,
+ &clk_sclk_spdif,
+};
+
+static u32 epll_div[][6] = {
+ { 48000000, 0, 48, 3, 3, 0 },
+ { 96000000, 0, 48, 3, 2, 0 },
+ { 144000000, 1, 72, 3, 2, 0 },
+ { 192000000, 0, 48, 3, 1, 0 },
+ { 288000000, 1, 72, 3, 1, 0 },
+ { 32750000, 1, 65, 3, 4, 35127 },
+ { 32768000, 1, 65, 3, 4, 35127 },
+ { 45158400, 0, 45, 3, 3, 10355 },
+ { 45000000, 0, 45, 3, 3, 10355 },
+ { 45158000, 0, 45, 3, 3, 10355 },
+ { 49125000, 0, 49, 3, 3, 9961 },
+ { 49152000, 0, 49, 3, 3, 9961 },
+ { 67737600, 1, 67, 3, 3, 48366 },
+ { 67738000, 1, 67, 3, 3, 48366 },
+ { 73800000, 1, 73, 3, 3, 47710 },
+ { 73728000, 1, 73, 3, 3, 47710 },
+ { 36000000, 1, 32, 3, 4, 0 },
+ { 60000000, 1, 60, 3, 3, 0 },
+ { 72000000, 1, 72, 3, 3, 0 },
+ { 80000000, 1, 80, 3, 3, 0 },
+ { 84000000, 0, 42, 3, 2, 0 },
+ { 50000000, 0, 50, 3, 3, 0 },
+};
+
+static int s5pv210_epll_set_rate(struct clk *clk, unsigned long rate)
+{
+ unsigned int epll_con, epll_con_k;
+ unsigned int i;
+
+ /* Return if nothing changed */
+ if (clk->rate == rate)
+ return 0;
+
+ epll_con = __raw_readl(S5P_EPLL_CON);
+ epll_con_k = __raw_readl(S5P_EPLL_CON1);
+
+ epll_con_k &= ~PLL46XX_KDIV_MASK;
+ epll_con &= ~(1 << 27 |
+ PLL46XX_MDIV_MASK << PLL46XX_MDIV_SHIFT |
+ PLL46XX_PDIV_MASK << PLL46XX_PDIV_SHIFT |
+ PLL46XX_SDIV_MASK << PLL46XX_SDIV_SHIFT);
+
+ for (i = 0; i < ARRAY_SIZE(epll_div); i++) {
+ if (epll_div[i][0] == rate) {
+ epll_con_k |= epll_div[i][5] << 0;
+ epll_con |= (epll_div[i][1] << 27 |
+ epll_div[i][2] << PLL46XX_MDIV_SHIFT |
+ epll_div[i][3] << PLL46XX_PDIV_SHIFT |
+ epll_div[i][4] << PLL46XX_SDIV_SHIFT);
+ break;
+ }
+ }
+
+ if (i == ARRAY_SIZE(epll_div)) {
+ printk(KERN_ERR "%s: Invalid Clock EPLL Frequency\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ __raw_writel(epll_con, S5P_EPLL_CON);
+ __raw_writel(epll_con_k, S5P_EPLL_CON1);
+
+ printk(KERN_WARNING "EPLL Rate changes from %lu to %lu\n",
+ clk->rate, rate);
+
+ clk->rate = rate;
+
+ return 0;
+}
+
+static struct clk_ops s5pv210_epll_ops = {
+ .set_rate = s5pv210_epll_set_rate,
+ .get_rate = s5p_epll_get_rate,
};
void __init_or_cpufreq s5pv210_setup_clocks(void)
{
struct clk *xtal_clk;
- unsigned long xtal;
unsigned long vpllsrc;
unsigned long armclk;
unsigned long hclk_msys;
@@ -974,6 +1152,10 @@ void __init_or_cpufreq s5pv210_setup_clocks(void)
unsigned int ptr;
u32 clkdiv0, clkdiv1;
+ /* Set functions for clk_fout_epll */
+ clk_fout_epll.enable = s5p_epll_enable;
+ clk_fout_epll.ops = &s5pv210_epll_ops;
+
printk(KERN_DEBUG "%s: registering clocks\n", __func__);
clkdiv0 = __raw_readl(S5P_CLK_DIV0);
@@ -992,11 +1174,12 @@ void __init_or_cpufreq s5pv210_setup_clocks(void)
apll = s5p_get_pll45xx(xtal, __raw_readl(S5P_APLL_CON), pll_4508);
mpll = s5p_get_pll45xx(xtal, __raw_readl(S5P_MPLL_CON), pll_4502);
- epll = s5p_get_pll45xx(xtal, __raw_readl(S5P_EPLL_CON), pll_4500);
+ epll = s5p_get_pll46xx(xtal, __raw_readl(S5P_EPLL_CON),
+ __raw_readl(S5P_EPLL_CON1), pll_4600);
vpllsrc = clk_get_rate(&clk_vpllsrc.clk);
vpll = s5p_get_pll45xx(vpllsrc, __raw_readl(S5P_VPLL_CON), pll_4502);
- clk_fout_apll.rate = apll;
+ clk_fout_apll.ops = &clk_fout_apll_ops;
clk_fout_mpll.rate = mpll;
clk_fout_epll.rate = epll;
clk_fout_vpll.rate = vpll;
diff --git a/arch/arm/mach-s5pv210/cpu.c b/arch/arm/mach-s5pv210/cpu.c
index 2f16bfc0a116..8eb480e201b0 100644
--- a/arch/arm/mach-s5pv210/cpu.c
+++ b/arch/arm/mach-s5pv210/cpu.c
@@ -85,6 +85,21 @@ static struct map_desc s5pv210_iodesc[] __initdata = {
.pfn = __phys_to_pfn(S5PV210_PA_SROMC),
.length = SZ_4K,
.type = MT_DEVICE,
+ }, {
+ .virtual = (unsigned long)S5P_VA_DMC0,
+ .pfn = __phys_to_pfn(S5PV210_PA_DMC0),
+ .length = SZ_4K,
+ .type = MT_DEVICE,
+ }, {
+ .virtual = (unsigned long)S5P_VA_DMC1,
+ .pfn = __phys_to_pfn(S5PV210_PA_DMC1),
+ .length = SZ_4K,
+ .type = MT_DEVICE,
+ }, {
+ .virtual = (unsigned long)S3C_VA_USB_HSPHY,
+ .pfn =__phys_to_pfn(S5PV210_PA_HSPHY),
+ .length = SZ_4K,
+ .type = MT_DEVICE,
}
};
diff --git a/arch/arm/mach-s5pv210/cpufreq.c b/arch/arm/mach-s5pv210/cpufreq.c
new file mode 100644
index 000000000000..a6f22920a2c2
--- /dev/null
+++ b/arch/arm/mach-s5pv210/cpufreq.c
@@ -0,0 +1,484 @@
+/* linux/arch/arm/mach-s5pv210/cpufreq.c
+ *
+ * Copyright (c) 2010 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com
+ *
+ * CPU frequency scaling for S5PC110/S5PV210
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+*/
+
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/err.h>
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/cpufreq.h>
+
+#include <mach/map.h>
+#include <mach/regs-clock.h>
+
+static struct clk *cpu_clk;
+static struct clk *dmc0_clk;
+static struct clk *dmc1_clk;
+static struct cpufreq_freqs freqs;
+
+/* APLL M,P,S values for 1G/800Mhz */
+#define APLL_VAL_1000 ((1 << 31) | (125 << 16) | (3 << 8) | 1)
+#define APLL_VAL_800 ((1 << 31) | (100 << 16) | (3 << 8) | 1)
+
+/*
+ * DRAM configurations to calculate refresh counter for changing
+ * frequency of memory.
+ */
+struct dram_conf {
+ unsigned long freq; /* HZ */
+ unsigned long refresh; /* DRAM refresh counter * 1000 */
+};
+
+/* DRAM configuration (DMC0 and DMC1) */
+static struct dram_conf s5pv210_dram_conf[2];
+
+enum perf_level {
+ L0, L1, L2, L3, L4,
+};
+
+enum s5pv210_mem_type {
+ LPDDR = 0x1,
+ LPDDR2 = 0x2,
+ DDR2 = 0x4,
+};
+
+enum s5pv210_dmc_port {
+ DMC0 = 0,
+ DMC1,
+};
+
+static struct cpufreq_frequency_table s5pv210_freq_table[] = {
+ {L0, 1000*1000},
+ {L1, 800*1000},
+ {L2, 400*1000},
+ {L3, 200*1000},
+ {L4, 100*1000},
+ {0, CPUFREQ_TABLE_END},
+};
+
+static u32 clkdiv_val[5][11] = {
+ /*
+ * Clock divider value for following
+ * { APLL, A2M, HCLK_MSYS, PCLK_MSYS,
+ * HCLK_DSYS, PCLK_DSYS, HCLK_PSYS, PCLK_PSYS,
+ * ONEDRAM, MFC, G3D }
+ */
+
+ /* L0 : [1000/200/100][166/83][133/66][200/200] */
+ {0, 4, 4, 1, 3, 1, 4, 1, 3, 0, 0},
+
+ /* L1 : [800/200/100][166/83][133/66][200/200] */
+ {0, 3, 3, 1, 3, 1, 4, 1, 3, 0, 0},
+
+ /* L2 : [400/200/100][166/83][133/66][200/200] */
+ {1, 3, 1, 1, 3, 1, 4, 1, 3, 0, 0},
+
+ /* L3 : [200/200/100][166/83][133/66][200/200] */
+ {3, 3, 1, 1, 3, 1, 4, 1, 3, 0, 0},
+
+ /* L4 : [100/100/100][83/83][66/66][100/100] */
+ {7, 7, 0, 0, 7, 0, 9, 0, 7, 0, 0},
+};
+
+/*
+ * This function set DRAM refresh counter
+ * accoriding to operating frequency of DRAM
+ * ch: DMC port number 0 or 1
+ * freq: Operating frequency of DRAM(KHz)
+ */
+static void s5pv210_set_refresh(enum s5pv210_dmc_port ch, unsigned long freq)
+{
+ unsigned long tmp, tmp1;
+ void __iomem *reg = NULL;
+
+ if (ch == DMC0)
+ reg = (S5P_VA_DMC0 + 0x30);
+ else if (ch == DMC1)
+ reg = (S5P_VA_DMC1 + 0x30);
+ else
+ printk(KERN_ERR "Cannot find DMC port\n");
+
+ /* Find current DRAM frequency */
+ tmp = s5pv210_dram_conf[ch].freq;
+
+ do_div(tmp, freq);
+
+ tmp1 = s5pv210_dram_conf[ch].refresh;
+
+ do_div(tmp1, tmp);
+
+ __raw_writel(tmp1, reg);
+}
+
+int s5pv210_verify_speed(struct cpufreq_policy *policy)
+{
+ if (policy->cpu)
+ return -EINVAL;
+
+ return cpufreq_frequency_table_verify(policy, s5pv210_freq_table);
+}
+
+unsigned int s5pv210_getspeed(unsigned int cpu)
+{
+ if (cpu)
+ return 0;
+
+ return clk_get_rate(cpu_clk) / 1000;
+}
+
+static int s5pv210_target(struct cpufreq_policy *policy,
+ unsigned int target_freq,
+ unsigned int relation)
+{
+ unsigned long reg;
+ unsigned int index, priv_index;
+ unsigned int pll_changing = 0;
+ unsigned int bus_speed_changing = 0;
+
+ freqs.old = s5pv210_getspeed(0);
+
+ if (cpufreq_frequency_table_target(policy, s5pv210_freq_table,
+ target_freq, relation, &index))
+ return -EINVAL;
+
+ freqs.new = s5pv210_freq_table[index].frequency;
+ freqs.cpu = 0;
+
+ if (freqs.new == freqs.old)
+ return 0;
+
+ /* Finding current running level index */
+ if (cpufreq_frequency_table_target(policy, s5pv210_freq_table,
+ freqs.old, relation, &priv_index))
+ return -EINVAL;
+
+ cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
+
+ if (freqs.new > freqs.old) {
+ /* Voltage up: will be implemented */
+ }
+
+ /* Check if there need to change PLL */
+ if ((index == L0) || (priv_index == L0))
+ pll_changing = 1;
+
+ /* Check if there need to change System bus clock */
+ if ((index == L4) || (priv_index == L4))
+ bus_speed_changing = 1;
+
+ if (bus_speed_changing) {
+ /*
+ * Reconfigure DRAM refresh counter value for minimum
+ * temporary clock while changing divider.
+ * expected clock is 83Mhz : 7.8usec/(1/83Mhz) = 0x287
+ */
+ if (pll_changing)
+ s5pv210_set_refresh(DMC1, 83000);
+ else
+ s5pv210_set_refresh(DMC1, 100000);
+
+ s5pv210_set_refresh(DMC0, 83000);
+ }
+
+ /*
+ * APLL should be changed in this level
+ * APLL -> MPLL(for stable transition) -> APLL
+ * Some clock source's clock API are not prepared.
+ * Do not use clock API in below code.
+ */
+ if (pll_changing) {
+ /*
+ * 1. Temporary Change divider for MFC and G3D
+ * SCLKA2M(200/1=200)->(200/4=50)Mhz
+ */
+ reg = __raw_readl(S5P_CLK_DIV2);
+ reg &= ~(S5P_CLKDIV2_G3D_MASK | S5P_CLKDIV2_MFC_MASK);
+ reg |= (3 << S5P_CLKDIV2_G3D_SHIFT) |
+ (3 << S5P_CLKDIV2_MFC_SHIFT);
+ __raw_writel(reg, S5P_CLK_DIV2);
+
+ /* For MFC, G3D dividing */
+ do {
+ reg = __raw_readl(S5P_CLKDIV_STAT0);
+ } while (reg & ((1 << 16) | (1 << 17)));
+
+ /*
+ * 2. Change SCLKA2M(200Mhz)to SCLKMPLL in MFC_MUX, G3D MUX
+ * (200/4=50)->(667/4=166)Mhz
+ */
+ reg = __raw_readl(S5P_CLK_SRC2);
+ reg &= ~(S5P_CLKSRC2_G3D_MASK | S5P_CLKSRC2_MFC_MASK);
+ reg |= (1 << S5P_CLKSRC2_G3D_SHIFT) |
+ (1 << S5P_CLKSRC2_MFC_SHIFT);
+ __raw_writel(reg, S5P_CLK_SRC2);
+
+ do {
+ reg = __raw_readl(S5P_CLKMUX_STAT1);
+ } while (reg & ((1 << 7) | (1 << 3)));
+
+ /*
+ * 3. DMC1 refresh count for 133Mhz if (index == L4) is
+ * true refresh counter is already programed in upper
+ * code. 0x287@83Mhz
+ */
+ if (!bus_speed_changing)
+ s5pv210_set_refresh(DMC1, 133000);
+
+ /* 4. SCLKAPLL -> SCLKMPLL */
+ reg = __raw_readl(S5P_CLK_SRC0);
+ reg &= ~(S5P_CLKSRC0_MUX200_MASK);
+ reg |= (0x1 << S5P_CLKSRC0_MUX200_SHIFT);
+ __raw_writel(reg, S5P_CLK_SRC0);
+
+ do {
+ reg = __raw_readl(S5P_CLKMUX_STAT0);
+ } while (reg & (0x1 << 18));
+
+ }
+
+ /* Change divider */
+ reg = __raw_readl(S5P_CLK_DIV0);
+
+ reg &= ~(S5P_CLKDIV0_APLL_MASK | S5P_CLKDIV0_A2M_MASK |
+ S5P_CLKDIV0_HCLK200_MASK | S5P_CLKDIV0_PCLK100_MASK |
+ S5P_CLKDIV0_HCLK166_MASK | S5P_CLKDIV0_PCLK83_MASK |
+ S5P_CLKDIV0_HCLK133_MASK | S5P_CLKDIV0_PCLK66_MASK);
+
+ reg |= ((clkdiv_val[index][0] << S5P_CLKDIV0_APLL_SHIFT) |
+ (clkdiv_val[index][1] << S5P_CLKDIV0_A2M_SHIFT) |
+ (clkdiv_val[index][2] << S5P_CLKDIV0_HCLK200_SHIFT) |
+ (clkdiv_val[index][3] << S5P_CLKDIV0_PCLK100_SHIFT) |
+ (clkdiv_val[index][4] << S5P_CLKDIV0_HCLK166_SHIFT) |
+ (clkdiv_val[index][5] << S5P_CLKDIV0_PCLK83_SHIFT) |
+ (clkdiv_val[index][6] << S5P_CLKDIV0_HCLK133_SHIFT) |
+ (clkdiv_val[index][7] << S5P_CLKDIV0_PCLK66_SHIFT));
+
+ __raw_writel(reg, S5P_CLK_DIV0);
+
+ do {
+ reg = __raw_readl(S5P_CLKDIV_STAT0);
+ } while (reg & 0xff);
+
+ /* ARM MCS value changed */
+ reg = __raw_readl(S5P_ARM_MCS_CON);
+ reg &= ~0x3;
+ if (index >= L3)
+ reg |= 0x3;
+ else
+ reg |= 0x1;
+
+ __raw_writel(reg, S5P_ARM_MCS_CON);
+
+ if (pll_changing) {
+ /* 5. Set Lock time = 30us*24Mhz = 0x2cf */
+ __raw_writel(0x2cf, S5P_APLL_LOCK);
+
+ /*
+ * 6. Turn on APLL
+ * 6-1. Set PMS values
+ * 6-2. Wait untile the PLL is locked
+ */
+ if (index == L0)
+ __raw_writel(APLL_VAL_1000, S5P_APLL_CON);
+ else
+ __raw_writel(APLL_VAL_800, S5P_APLL_CON);
+
+ do {
+ reg = __raw_readl(S5P_APLL_CON);
+ } while (!(reg & (0x1 << 29)));
+
+ /*
+ * 7. Change souce clock from SCLKMPLL(667Mhz)
+ * to SCLKA2M(200Mhz) in MFC_MUX and G3D MUX
+ * (667/4=166)->(200/4=50)Mhz
+ */
+ reg = __raw_readl(S5P_CLK_SRC2);
+ reg &= ~(S5P_CLKSRC2_G3D_MASK | S5P_CLKSRC2_MFC_MASK);
+ reg |= (0 << S5P_CLKSRC2_G3D_SHIFT) |
+ (0 << S5P_CLKSRC2_MFC_SHIFT);
+ __raw_writel(reg, S5P_CLK_SRC2);
+
+ do {
+ reg = __raw_readl(S5P_CLKMUX_STAT1);
+ } while (reg & ((1 << 7) | (1 << 3)));
+
+ /*
+ * 8. Change divider for MFC and G3D
+ * (200/4=50)->(200/1=200)Mhz
+ */
+ reg = __raw_readl(S5P_CLK_DIV2);
+ reg &= ~(S5P_CLKDIV2_G3D_MASK | S5P_CLKDIV2_MFC_MASK);
+ reg |= (clkdiv_val[index][10] << S5P_CLKDIV2_G3D_SHIFT) |
+ (clkdiv_val[index][9] << S5P_CLKDIV2_MFC_SHIFT);
+ __raw_writel(reg, S5P_CLK_DIV2);
+
+ /* For MFC, G3D dividing */
+ do {
+ reg = __raw_readl(S5P_CLKDIV_STAT0);
+ } while (reg & ((1 << 16) | (1 << 17)));
+
+ /* 9. Change MPLL to APLL in MSYS_MUX */
+ reg = __raw_readl(S5P_CLK_SRC0);
+ reg &= ~(S5P_CLKSRC0_MUX200_MASK);
+ reg |= (0x0 << S5P_CLKSRC0_MUX200_SHIFT);
+ __raw_writel(reg, S5P_CLK_SRC0);
+
+ do {
+ reg = __raw_readl(S5P_CLKMUX_STAT0);
+ } while (reg & (0x1 << 18));
+
+ /*
+ * 10. DMC1 refresh counter
+ * L4 : DMC1 = 100Mhz 7.8us/(1/100) = 0x30c
+ * Others : DMC1 = 200Mhz 7.8us/(1/200) = 0x618
+ */
+ if (!bus_speed_changing)
+ s5pv210_set_refresh(DMC1, 200000);
+ }
+
+ /*
+ * L4 level need to change memory bus speed, hence onedram clock divier
+ * and memory refresh parameter should be changed
+ */
+ if (bus_speed_changing) {
+ reg = __raw_readl(S5P_CLK_DIV6);
+ reg &= ~S5P_CLKDIV6_ONEDRAM_MASK;
+ reg |= (clkdiv_val[index][8] << S5P_CLKDIV6_ONEDRAM_SHIFT);
+ __raw_writel(reg, S5P_CLK_DIV6);
+
+ do {
+ reg = __raw_readl(S5P_CLKDIV_STAT1);
+ } while (reg & (1 << 15));
+
+ /* Reconfigure DRAM refresh counter value */
+ if (index != L4) {
+ /*
+ * DMC0 : 166Mhz
+ * DMC1 : 200Mhz
+ */
+ s5pv210_set_refresh(DMC0, 166000);
+ s5pv210_set_refresh(DMC1, 200000);
+ } else {
+ /*
+ * DMC0 : 83Mhz
+ * DMC1 : 100Mhz
+ */
+ s5pv210_set_refresh(DMC0, 83000);
+ s5pv210_set_refresh(DMC1, 100000);
+ }
+ }
+
+ if (freqs.new < freqs.old) {
+ /* Voltage down: will be implemented */
+ }
+
+ cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
+
+ printk(KERN_DEBUG "Perf changed[L%d]\n", index);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int s5pv210_cpufreq_suspend(struct cpufreq_policy *policy,
+ pm_message_t pmsg)
+{
+ return 0;
+}
+
+static int s5pv210_cpufreq_resume(struct cpufreq_policy *policy)
+{
+ return 0;
+}
+#endif
+
+static int check_mem_type(void __iomem *dmc_reg)
+{
+ unsigned long val;
+
+ val = __raw_readl(dmc_reg + 0x4);
+ val = (val & (0xf << 8));
+
+ return val >> 8;
+}
+
+static int __init s5pv210_cpu_init(struct cpufreq_policy *policy)
+{
+ unsigned long mem_type;
+
+ cpu_clk = clk_get(NULL, "armclk");
+ if (IS_ERR(cpu_clk))
+ return PTR_ERR(cpu_clk);
+
+ dmc0_clk = clk_get(NULL, "sclk_dmc0");
+ if (IS_ERR(dmc0_clk)) {
+ clk_put(cpu_clk);
+ return PTR_ERR(dmc0_clk);
+ }
+
+ dmc1_clk = clk_get(NULL, "hclk_msys");
+ if (IS_ERR(dmc1_clk)) {
+ clk_put(dmc0_clk);
+ clk_put(cpu_clk);
+ return PTR_ERR(dmc1_clk);
+ }
+
+ if (policy->cpu != 0)
+ return -EINVAL;
+
+ /*
+ * check_mem_type : This driver only support LPDDR & LPDDR2.
+ * other memory type is not supported.
+ */
+ mem_type = check_mem_type(S5P_VA_DMC0);
+
+ if ((mem_type != LPDDR) && (mem_type != LPDDR2)) {
+ printk(KERN_ERR "CPUFreq doesn't support this memory type\n");
+ return -EINVAL;
+ }
+
+ /* Find current refresh counter and frequency each DMC */
+ s5pv210_dram_conf[0].refresh = (__raw_readl(S5P_VA_DMC0 + 0x30) * 1000);
+ s5pv210_dram_conf[0].freq = clk_get_rate(dmc0_clk);
+
+ s5pv210_dram_conf[1].refresh = (__raw_readl(S5P_VA_DMC1 + 0x30) * 1000);
+ s5pv210_dram_conf[1].freq = clk_get_rate(dmc1_clk);
+
+ policy->cur = policy->min = policy->max = s5pv210_getspeed(0);
+
+ cpufreq_frequency_table_get_attr(s5pv210_freq_table, policy->cpu);
+
+ policy->cpuinfo.transition_latency = 40000;
+
+ return cpufreq_frequency_table_cpuinfo(policy, s5pv210_freq_table);
+}
+
+static struct cpufreq_driver s5pv210_driver = {
+ .flags = CPUFREQ_STICKY,
+ .verify = s5pv210_verify_speed,
+ .target = s5pv210_target,
+ .get = s5pv210_getspeed,
+ .init = s5pv210_cpu_init,
+ .name = "s5pv210",
+#ifdef CONFIG_PM
+ .suspend = s5pv210_cpufreq_suspend,
+ .resume = s5pv210_cpufreq_resume,
+#endif
+};
+
+static int __init s5pv210_cpufreq_init(void)
+{
+ return cpufreq_register_driver(&s5pv210_driver);
+}
+
+late_initcall(s5pv210_cpufreq_init);
diff --git a/arch/arm/mach-s5pv210/dev-audio.c b/arch/arm/mach-s5pv210/dev-audio.c
index 21dc6cf955c3..1303fcb12b51 100644
--- a/arch/arm/mach-s5pv210/dev-audio.c
+++ b/arch/arm/mach-s5pv210/dev-audio.c
@@ -24,29 +24,15 @@ static int s5pv210_cfg_i2s(struct platform_device *pdev)
/* configure GPIO for i2s port */
switch (pdev->id) {
case 1:
- s3c_gpio_cfgpin(S5PV210_GPC0(0), S3C_GPIO_SFN(2));
- s3c_gpio_cfgpin(S5PV210_GPC0(1), S3C_GPIO_SFN(2));
- s3c_gpio_cfgpin(S5PV210_GPC0(2), S3C_GPIO_SFN(2));
- s3c_gpio_cfgpin(S5PV210_GPC0(3), S3C_GPIO_SFN(2));
- s3c_gpio_cfgpin(S5PV210_GPC0(4), S3C_GPIO_SFN(2));
+ s3c_gpio_cfgpin_range(S5PV210_GPC0(0), 5, S3C_GPIO_SFN(2));
break;
case 2:
- s3c_gpio_cfgpin(S5PV210_GPC1(0), S3C_GPIO_SFN(4));
- s3c_gpio_cfgpin(S5PV210_GPC1(1), S3C_GPIO_SFN(4));
- s3c_gpio_cfgpin(S5PV210_GPC1(2), S3C_GPIO_SFN(4));
- s3c_gpio_cfgpin(S5PV210_GPC1(3), S3C_GPIO_SFN(4));
- s3c_gpio_cfgpin(S5PV210_GPC1(4), S3C_GPIO_SFN(4));
+ s3c_gpio_cfgpin_range(S5PV210_GPC1(0), 5, S3C_GPIO_SFN(4));
break;
case -1:
- s3c_gpio_cfgpin(S5PV210_GPI(0), S3C_GPIO_SFN(2));
- s3c_gpio_cfgpin(S5PV210_GPI(1), S3C_GPIO_SFN(2));
- s3c_gpio_cfgpin(S5PV210_GPI(2), S3C_GPIO_SFN(2));
- s3c_gpio_cfgpin(S5PV210_GPI(3), S3C_GPIO_SFN(2));
- s3c_gpio_cfgpin(S5PV210_GPI(4), S3C_GPIO_SFN(2));
- s3c_gpio_cfgpin(S5PV210_GPI(5), S3C_GPIO_SFN(2));
- s3c_gpio_cfgpin(S5PV210_GPI(6), S3C_GPIO_SFN(2));
+ s3c_gpio_cfgpin_range(S5PV210_GPI(0), 7, S3C_GPIO_SFN(2));
break;
default:
@@ -151,25 +137,13 @@ static int s5pv210_pcm_cfg_gpio(struct platform_device *pdev)
{
switch (pdev->id) {
case 0:
- s3c_gpio_cfgpin(S5PV210_GPI(0), S3C_GPIO_SFN(3));
- s3c_gpio_cfgpin(S5PV210_GPI(1), S3C_GPIO_SFN(3));
- s3c_gpio_cfgpin(S5PV210_GPI(2), S3C_GPIO_SFN(3));
- s3c_gpio_cfgpin(S5PV210_GPI(3), S3C_GPIO_SFN(3));
- s3c_gpio_cfgpin(S5PV210_GPI(4), S3C_GPIO_SFN(3));
+ s3c_gpio_cfgpin_range(S5PV210_GPI(0), 5, S3C_GPIO_SFN(3));
break;
case 1:
- s3c_gpio_cfgpin(S5PV210_GPC0(0), S3C_GPIO_SFN(3));
- s3c_gpio_cfgpin(S5PV210_GPC0(1), S3C_GPIO_SFN(3));
- s3c_gpio_cfgpin(S5PV210_GPC0(2), S3C_GPIO_SFN(3));
- s3c_gpio_cfgpin(S5PV210_GPC0(3), S3C_GPIO_SFN(3));
- s3c_gpio_cfgpin(S5PV210_GPC0(4), S3C_GPIO_SFN(3));
+ s3c_gpio_cfgpin_range(S5PV210_GPC0(0), 5, S3C_GPIO_SFN(3));
break;
case 2:
- s3c_gpio_cfgpin(S5PV210_GPC1(0), S3C_GPIO_SFN(2));
- s3c_gpio_cfgpin(S5PV210_GPC1(1), S3C_GPIO_SFN(2));
- s3c_gpio_cfgpin(S5PV210_GPC1(2), S3C_GPIO_SFN(2));
- s3c_gpio_cfgpin(S5PV210_GPC1(3), S3C_GPIO_SFN(2));
- s3c_gpio_cfgpin(S5PV210_GPC1(4), S3C_GPIO_SFN(2));
+ s3c_gpio_cfgpin_range(S5PV210_GPC1(0), 5, S3C_GPIO_SFN(2));
break;
default:
printk(KERN_DEBUG "Invalid PCM Controller number!");
@@ -271,13 +245,7 @@ struct platform_device s5pv210_device_pcm2 = {
static int s5pv210_ac97_cfg_gpio(struct platform_device *pdev)
{
- s3c_gpio_cfgpin(S5PV210_GPC0(0), S3C_GPIO_SFN(4));
- s3c_gpio_cfgpin(S5PV210_GPC0(1), S3C_GPIO_SFN(4));
- s3c_gpio_cfgpin(S5PV210_GPC0(2), S3C_GPIO_SFN(4));
- s3c_gpio_cfgpin(S5PV210_GPC0(3), S3C_GPIO_SFN(4));
- s3c_gpio_cfgpin(S5PV210_GPC0(4), S3C_GPIO_SFN(4));
-
- return 0;
+ return s3c_gpio_cfgpin_range(S5PV210_GPC0(0), 5, S3C_GPIO_SFN(4));
}
static struct resource s5pv210_ac97_resource[] = {
@@ -325,3 +293,43 @@ struct platform_device s5pv210_device_ac97 = {
.coherent_dma_mask = DMA_BIT_MASK(32),
},
};
+
+/* S/PDIF Controller platform_device */
+
+static int s5pv210_spdif_cfg_gpio(struct platform_device *pdev)
+{
+ s3c_gpio_cfgpin_range(S5PV210_GPC1(0), 2, S3C_GPIO_SFN(3));
+
+ return 0;
+}
+
+static struct resource s5pv210_spdif_resource[] = {
+ [0] = {
+ .start = S5PV210_PA_SPDIF,
+ .end = S5PV210_PA_SPDIF + 0x100 - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = DMACH_SPDIF,
+ .end = DMACH_SPDIF,
+ .flags = IORESOURCE_DMA,
+ },
+};
+
+static struct s3c_audio_pdata samsung_spdif_pdata = {
+ .cfg_gpio = s5pv210_spdif_cfg_gpio,
+};
+
+static u64 s5pv210_spdif_dmamask = DMA_BIT_MASK(32);
+
+struct platform_device s5pv210_device_spdif = {
+ .name = "samsung-spdif",
+ .id = -1,
+ .num_resources = ARRAY_SIZE(s5pv210_spdif_resource),
+ .resource = s5pv210_spdif_resource,
+ .dev = {
+ .platform_data = &samsung_spdif_pdata,
+ .dma_mask = &s5pv210_spdif_dmamask,
+ .coherent_dma_mask = DMA_BIT_MASK(32),
+ },
+};
diff --git a/arch/arm/mach-s5pv210/dev-spi.c b/arch/arm/mach-s5pv210/dev-spi.c
index 826cdbc43e20..e3249a47e3b1 100644
--- a/arch/arm/mach-s5pv210/dev-spi.c
+++ b/arch/arm/mach-s5pv210/dev-spi.c
@@ -35,23 +35,15 @@ static char *spi_src_clks[] = {
*/
static int s5pv210_spi_cfg_gpio(struct platform_device *pdev)
{
+ unsigned int base;
+
switch (pdev->id) {
case 0:
- s3c_gpio_cfgpin(S5PV210_GPB(0), S3C_GPIO_SFN(2));
- s3c_gpio_cfgpin(S5PV210_GPB(1), S3C_GPIO_SFN(2));
- s3c_gpio_cfgpin(S5PV210_GPB(2), S3C_GPIO_SFN(2));
- s3c_gpio_setpull(S5PV210_GPB(0), S3C_GPIO_PULL_UP);
- s3c_gpio_setpull(S5PV210_GPB(1), S3C_GPIO_PULL_UP);
- s3c_gpio_setpull(S5PV210_GPB(2), S3C_GPIO_PULL_UP);
+ base = S5PV210_GPB(0);
break;
case 1:
- s3c_gpio_cfgpin(S5PV210_GPB(4), S3C_GPIO_SFN(2));
- s3c_gpio_cfgpin(S5PV210_GPB(5), S3C_GPIO_SFN(2));
- s3c_gpio_cfgpin(S5PV210_GPB(6), S3C_GPIO_SFN(2));
- s3c_gpio_setpull(S5PV210_GPB(4), S3C_GPIO_PULL_UP);
- s3c_gpio_setpull(S5PV210_GPB(5), S3C_GPIO_PULL_UP);
- s3c_gpio_setpull(S5PV210_GPB(6), S3C_GPIO_PULL_UP);
+ base = S5PV210_GPB(4);
break;
default:
@@ -59,6 +51,9 @@ static int s5pv210_spi_cfg_gpio(struct platform_device *pdev)
return -EINVAL;
}
+ s3c_gpio_cfgall_range(base, 3,
+ S3C_GPIO_SFN(2), S3C_GPIO_PULL_UP);
+
return 0;
}
diff --git a/arch/arm/mach-s5pv210/dma.c b/arch/arm/mach-s5pv210/dma.c
index 778ad5fe231a..497d3439a142 100644
--- a/arch/arm/mach-s5pv210/dma.c
+++ b/arch/arm/mach-s5pv210/dma.c
@@ -82,7 +82,7 @@ static struct s3c_pl330_platdata s5pv210_pdma0_pdata = {
static struct platform_device s5pv210_device_pdma0 = {
.name = "s3c-pl330",
- .id = 1,
+ .id = 0,
.num_resources = ARRAY_SIZE(s5pv210_pdma0_resource),
.resource = s5pv210_pdma0_resource,
.dev = {
@@ -144,7 +144,7 @@ static struct s3c_pl330_platdata s5pv210_pdma1_pdata = {
static struct platform_device s5pv210_device_pdma1 = {
.name = "s3c-pl330",
- .id = 2,
+ .id = 1,
.num_resources = ARRAY_SIZE(s5pv210_pdma1_resource),
.resource = s5pv210_pdma1_resource,
.dev = {
diff --git a/arch/arm/mach-s5pv210/gpiolib.c b/arch/arm/mach-s5pv210/gpiolib.c
index 0d459112d039..ab673effd767 100644
--- a/arch/arm/mach-s5pv210/gpiolib.c
+++ b/arch/arm/mach-s5pv210/gpiolib.c
@@ -150,6 +150,7 @@ static struct s3c_gpio_chip s5pv210_gpio_4bit[] = {
.label = "GPG3",
},
}, {
+ .config = &gpio_cfg_noint,
.chip = {
.base = S5PV210_GPI(0),
.ngpio = S5PV210_GPIO_I_NR,
@@ -223,34 +224,42 @@ static struct s3c_gpio_chip s5pv210_gpio_4bit[] = {
}, {
.base = (S5P_VA_GPIO + 0xC00),
.config = &gpio_cfg_noint,
+ .irq_base = IRQ_EINT(0),
.chip = {
.base = S5PV210_GPH0(0),
.ngpio = S5PV210_GPIO_H0_NR,
.label = "GPH0",
+ .to_irq = samsung_gpiolib_to_irq,
},
}, {
.base = (S5P_VA_GPIO + 0xC20),
.config = &gpio_cfg_noint,
+ .irq_base = IRQ_EINT(8),
.chip = {
.base = S5PV210_GPH1(0),
.ngpio = S5PV210_GPIO_H1_NR,
.label = "GPH1",
+ .to_irq = samsung_gpiolib_to_irq,
},
}, {
.base = (S5P_VA_GPIO + 0xC40),
.config = &gpio_cfg_noint,
+ .irq_base = IRQ_EINT(16),
.chip = {
.base = S5PV210_GPH2(0),
.ngpio = S5PV210_GPIO_H2_NR,
.label = "GPH2",
+ .to_irq = samsung_gpiolib_to_irq,
},
}, {
.base = (S5P_VA_GPIO + 0xC60),
.config = &gpio_cfg_noint,
+ .irq_base = IRQ_EINT(24),
.chip = {
.base = S5PV210_GPH3(0),
.ngpio = S5PV210_GPIO_H3_NR,
.label = "GPH3",
+ .to_irq = samsung_gpiolib_to_irq,
},
},
};
@@ -259,11 +268,14 @@ static __init int s5pv210_gpiolib_init(void)
{
struct s3c_gpio_chip *chip = s5pv210_gpio_4bit;
int nr_chips = ARRAY_SIZE(s5pv210_gpio_4bit);
+ int gpioint_group = 0;
int i = 0;
for (i = 0; i < nr_chips; i++, chip++) {
- if (chip->config == NULL)
+ if (chip->config == NULL) {
chip->config = &gpio_cfg;
+ chip->group = gpioint_group++;
+ }
if (chip->base == NULL)
chip->base = S5PV210_BANK_BASE(i);
}
diff --git a/arch/arm/mach-s5pv210/include/mach/irqs.h b/arch/arm/mach-s5pv210/include/mach/irqs.h
index e1c020e5a49b..119b95fdc3ce 100644
--- a/arch/arm/mach-s5pv210/include/mach/irqs.h
+++ b/arch/arm/mach-s5pv210/include/mach/irqs.h
@@ -55,8 +55,8 @@
#define IRQ_SPI1 S5P_IRQ_VIC1(16)
#define IRQ_SPI2 S5P_IRQ_VIC1(17)
#define IRQ_IRDA S5P_IRQ_VIC1(18)
-#define IRQ_CAN0 S5P_IRQ_VIC1(19)
-#define IRQ_CAN1 S5P_IRQ_VIC1(20)
+#define IRQ_IIC2 S5P_IRQ_VIC1(19)
+#define IRQ_IIC3 S5P_IRQ_VIC1(20)
#define IRQ_HSIRX S5P_IRQ_VIC1(21)
#define IRQ_HSITX S5P_IRQ_VIC1(22)
#define IRQ_UHOST S5P_IRQ_VIC1(23)
@@ -109,7 +109,7 @@
#define IRQ_IPC S5P_IRQ_VIC3(0)
#define IRQ_HOSTIF S5P_IRQ_VIC3(1)
-#define IRQ_MMC3 S5P_IRQ_VIC3(2)
+#define IRQ_HSMMC3 S5P_IRQ_VIC3(2)
#define IRQ_CEC S5P_IRQ_VIC3(3)
#define IRQ_TSI S5P_IRQ_VIC3(4)
#define IRQ_MDNIE0 S5P_IRQ_VIC3(5)
@@ -121,8 +121,12 @@
#define S5P_EINT_BASE1 (S5P_IRQ_VIC0(0))
#define S5P_EINT_BASE2 (IRQ_VIC_END + 1)
+/* GPIO interrupt */
+#define S5P_GPIOINT_BASE (IRQ_EINT(31) + 1)
+#define S5P_GPIOINT_GROUP_MAXNR 22
+
/* Set the default NR_IRQS */
-#define NR_IRQS (IRQ_EINT(31) + 1)
+#define NR_IRQS (IRQ_EINT(31) + S5P_GPIOINT_COUNT + 1)
/* Compatibility */
#define IRQ_LCD_FIFO IRQ_LCD0
diff --git a/arch/arm/mach-s5pv210/include/mach/map.h b/arch/arm/mach-s5pv210/include/mach/map.h
index bd9afd52466a..861d7fe11fc9 100644
--- a/arch/arm/mach-s5pv210/include/mach/map.h
+++ b/arch/arm/mach-s5pv210/include/mach/map.h
@@ -57,6 +57,8 @@
#define S5P_SZ_UART SZ_256
+#define S3C_VA_UARTx(x) (S3C_VA_UART + ((x) * S3C_UART_OFFSET))
+
#define S5PV210_PA_SROMC (0xE8000000)
#define S5PV210_PA_CFCON (0xE8200000)
@@ -73,6 +75,9 @@
#define S5PV210_PA_HSMMC(x) (0xEB000000 + ((x) * 0x100000))
+#define S5PV210_PA_HSOTG (0xEC000000)
+#define S5PV210_PA_HSPHY (0xEC100000)
+
#define S5PV210_PA_VIC0 (0xF2000000)
#define S5PV210_PA_VIC1 (0xF2100000)
#define S5PV210_PA_VIC2 (0xF2200000)
@@ -81,6 +86,9 @@
#define S5PV210_PA_SDRAM (0x20000000)
#define S5P_PA_SDRAM S5PV210_PA_SDRAM
+/* S/PDIF */
+#define S5PV210_PA_SPDIF 0xE1100000
+
/* I2S */
#define S5PV210_PA_IIS0 0xEEE30000
#define S5PV210_PA_IIS1 0xE2100000
@@ -96,6 +104,9 @@
#define S5PV210_PA_ADC (0xE1700000)
+#define S5PV210_PA_DMC0 (0xF0000000)
+#define S5PV210_PA_DMC1 (0xF1400000)
+
/* compatibiltiy defines. */
#define S3C_PA_UART S5PV210_PA_UART
#define S3C_PA_HSMMC0 S5PV210_PA_HSMMC(0)
@@ -108,6 +119,7 @@
#define S3C_PA_FB S5PV210_PA_FB
#define S3C_PA_RTC S5PV210_PA_RTC
#define S3C_PA_WDT S5PV210_PA_WATCHDOG
+#define S3C_PA_USB_HSOTG S5PV210_PA_HSOTG
#define S5P_PA_FIMC0 S5PV210_PA_FIMC0
#define S5P_PA_FIMC1 S5PV210_PA_FIMC1
#define S5P_PA_FIMC2 S5PV210_PA_FIMC2
diff --git a/arch/arm/mach-s5pv210/include/mach/pm-core.h b/arch/arm/mach-s5pv210/include/mach/pm-core.h
new file mode 100644
index 000000000000..e8d394f8b057
--- /dev/null
+++ b/arch/arm/mach-s5pv210/include/mach/pm-core.h
@@ -0,0 +1,43 @@
+/* linux/arch/arm/mach-s5pv210/include/mach/pm-core.h
+ *
+ * Copyright (c) 2010 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com
+ *
+ * Based on arch/arm/mach-s3c2410/include/mach/pm-core.h,
+ * Copyright 2008 Simtec Electronics
+ * Ben Dooks <ben@simtec.co.uk>
+ * http://armlinux.simtec.co.uk/
+ *
+ * S5PV210 - PM core support for arch/arm/plat-s5p/pm.c
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+*/
+
+static inline void s3c_pm_debug_init_uart(void)
+{
+ /* nothing here yet */
+}
+
+static inline void s3c_pm_arch_prepare_irqs(void)
+{
+ __raw_writel(s3c_irqwake_intmask, S5P_WAKEUP_MASK);
+ __raw_writel(s3c_irqwake_eintmask, S5P_EINT_WAKEUP_MASK);
+}
+
+static inline void s3c_pm_arch_stop_clocks(void)
+{
+ /* nothing here yet */
+}
+
+static inline void s3c_pm_arch_show_resume_irqs(void)
+{
+ /* nothing here yet */
+}
+
+static inline void s3c_pm_arch_update_uart(void __iomem *regs,
+ struct pm_uart_save *save)
+{
+ /* nothing here yet */
+}
diff --git a/arch/arm/mach-s5pv210/include/mach/regs-clock.h b/arch/arm/mach-s5pv210/include/mach/regs-clock.h
index 499aef737476..ebaabe021af9 100644
--- a/arch/arm/mach-s5pv210/include/mach/regs-clock.h
+++ b/arch/arm/mach-s5pv210/include/mach/regs-clock.h
@@ -25,6 +25,7 @@
#define S5P_APLL_CON S5P_CLKREG(0x100)
#define S5P_MPLL_CON S5P_CLKREG(0x108)
#define S5P_EPLL_CON S5P_CLKREG(0x110)
+#define S5P_EPLL_CON1 S5P_CLKREG(0x114)
#define S5P_VPLL_CON S5P_CLKREG(0x120)
#define S5P_CLK_SRC0 S5P_CLKREG(0x200)
@@ -67,11 +68,28 @@
#define S5P_CLKGATE_BUS1 S5P_CLKREG(0x488)
#define S5P_CLK_OUT S5P_CLKREG(0x500)
+/* DIV/MUX STATUS */
+#define S5P_CLKDIV_STAT0 S5P_CLKREG(0x1000)
+#define S5P_CLKDIV_STAT1 S5P_CLKREG(0x1004)
+#define S5P_CLKMUX_STAT0 S5P_CLKREG(0x1100)
+#define S5P_CLKMUX_STAT1 S5P_CLKREG(0x1104)
+
/* CLKSRC0 */
-#define S5P_CLKSRC0_MUX200_MASK (0x1<<16)
+#define S5P_CLKSRC0_MUX200_SHIFT (16)
+#define S5P_CLKSRC0_MUX200_MASK (0x1 << S5P_CLKSRC0_MUX200_SHIFT)
#define S5P_CLKSRC0_MUX166_MASK (0x1<<20)
#define S5P_CLKSRC0_MUX133_MASK (0x1<<24)
+/* CLKSRC2 */
+#define S5P_CLKSRC2_G3D_SHIFT (0)
+#define S5P_CLKSRC2_G3D_MASK (0x3 << S5P_CLKSRC2_G3D_SHIFT)
+#define S5P_CLKSRC2_MFC_SHIFT (4)
+#define S5P_CLKSRC2_MFC_MASK (0x3 << S5P_CLKSRC2_MFC_SHIFT)
+
+/* CLKSRC6*/
+#define S5P_CLKSRC6_ONEDRAM_SHIFT (24)
+#define S5P_CLKSRC6_ONEDRAM_MASK (0x3 << S5P_CLKSRC6_ONEDRAM_SHIFT)
+
/* CLKDIV0 */
#define S5P_CLKDIV0_APLL_SHIFT (0)
#define S5P_CLKDIV0_APLL_MASK (0x7 << S5P_CLKDIV0_APLL_SHIFT)
@@ -90,12 +108,24 @@
#define S5P_CLKDIV0_PCLK66_SHIFT (28)
#define S5P_CLKDIV0_PCLK66_MASK (0x7 << S5P_CLKDIV0_PCLK66_SHIFT)
+/* CLKDIV2 */
+#define S5P_CLKDIV2_G3D_SHIFT (0)
+#define S5P_CLKDIV2_G3D_MASK (0xF << S5P_CLKDIV2_G3D_SHIFT)
+#define S5P_CLKDIV2_MFC_SHIFT (4)
+#define S5P_CLKDIV2_MFC_MASK (0xF << S5P_CLKDIV2_MFC_SHIFT)
+
+/* CLKDIV6 */
+#define S5P_CLKDIV6_ONEDRAM_SHIFT (28)
+#define S5P_CLKDIV6_ONEDRAM_MASK (0xF << S5P_CLKDIV6_ONEDRAM_SHIFT)
+
#define S5P_SWRESET S5P_CLKREG(0x2000)
+#define S5P_ARM_MCS_CON S5P_CLKREG(0x6100)
+
/* Registers related to power management */
#define S5P_PWR_CFG S5P_CLKREG(0xC000)
#define S5P_EINT_WAKEUP_MASK S5P_CLKREG(0xC004)
-#define S5P_WAKEUP_MASK S5P_CLKREG(0xC008)
+#define S5P_WAKEUP_MASK S5P_CLKREG(0xC008)
#define S5P_PWR_MODE S5P_CLKREG(0xC00C)
#define S5P_NORMAL_CFG S5P_CLKREG(0xC010)
#define S5P_IDLE_CFG S5P_CLKREG(0xC020)
@@ -159,8 +189,11 @@
#define S5P_SLEEP_CFG_USBOSC_EN (1 << 1)
/* OTHERS Resgister */
+#define S5P_OTHERS_RET_IO (1 << 31)
+#define S5P_OTHERS_RET_CF (1 << 30)
+#define S5P_OTHERS_RET_MMC (1 << 29)
+#define S5P_OTHERS_RET_UART (1 << 28)
#define S5P_OTHERS_USB_SIG_MASK (1 << 16)
-#define S5P_OTHERS_MIPI_DPHY_EN (1 << 28)
/* MIPI */
#define S5P_MIPI_DPHY_EN (3)
diff --git a/arch/arm/mach-s5pv210/include/mach/regs-gpio.h b/arch/arm/mach-s5pv210/include/mach/regs-gpio.h
index 49e029b4978a..de0c89976078 100644
--- a/arch/arm/mach-s5pv210/include/mach/regs-gpio.h
+++ b/arch/arm/mach-s5pv210/include/mach/regs-gpio.h
@@ -31,13 +31,6 @@
#define eint_irq_to_bit(irq) (1 << (EINT_OFFSET(irq) & 0x7))
-/* values for S5P_EXTINT0 */
-#define S5P_EXTINT_LOWLEV (0x00)
-#define S5P_EXTINT_HILEV (0x01)
-#define S5P_EXTINT_FALLEDGE (0x02)
-#define S5P_EXTINT_RISEEDGE (0x03)
-#define S5P_EXTINT_BOTHEDGE (0x04)
-
#define EINT_MODE S3C_GPIO_SFN(0xf)
#define EINT_GPIO_0(x) S5PV210_GPH0(x)
diff --git a/arch/arm/mach-s5pv210/include/mach/regs-sys.h b/arch/arm/mach-s5pv210/include/mach/regs-sys.h
new file mode 100644
index 000000000000..26691d39d0f4
--- /dev/null
+++ b/arch/arm/mach-s5pv210/include/mach/regs-sys.h
@@ -0,0 +1,19 @@
+/* arch/arm/mach-s5pv210/include/mach/regs-sys.h
+ *
+ * Copyright (c) 2010 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com/
+ *
+ * S5PV210 - System registers definitions
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+*/
+
+#define S5PV210_USB_PHY_CON (S3C_VA_SYS + 0xE80C)
+#define S5PV210_USB_PHY0_EN (1 << 0)
+#define S5PV210_USB_PHY1_EN (1 << 1)
+
+/* compatibility defines for s3c-hsotg driver */
+#define S3C64XX_OTHERS S5PV210_USB_PHY_CON
+#define S3C64XX_OTHERS_USBMASK S5PV210_USB_PHY0_EN
diff --git a/arch/arm/mach-s5pv210/include/mach/vmalloc.h b/arch/arm/mach-s5pv210/include/mach/vmalloc.h
index df9a28808323..a6c659d68a5d 100644
--- a/arch/arm/mach-s5pv210/include/mach/vmalloc.h
+++ b/arch/arm/mach-s5pv210/include/mach/vmalloc.h
@@ -17,6 +17,6 @@
#ifndef __ASM_ARCH_VMALLOC_H
#define __ASM_ARCH_VMALLOC_H __FILE__
-#define VMALLOC_END (0xE0000000UL)
+#define VMALLOC_END 0xF6000000UL
#endif /* __ASM_ARCH_VMALLOC_H */
diff --git a/arch/arm/mach-s5pv210/mach-aquila.c b/arch/arm/mach-s5pv210/mach-aquila.c
index 00883087363c..28677caf3613 100644
--- a/arch/arm/mach-s5pv210/mach-aquila.c
+++ b/arch/arm/mach-s5pv210/mach-aquila.c
@@ -16,6 +16,8 @@
#include <linux/i2c.h>
#include <linux/i2c-gpio.h>
#include <linux/mfd/max8998.h>
+#include <linux/mfd/wm8994/pdata.h>
+#include <linux/regulator/fixed.h>
#include <linux/gpio_keys.h>
#include <linux/input.h>
#include <linux/gpio.h>
@@ -379,6 +381,119 @@ static struct max8998_platform_data aquila_max8998_pdata = {
};
#endif
+static struct regulator_consumer_supply wm8994_fixed_voltage0_supplies[] = {
+ {
+ .dev_name = "5-001a",
+ .supply = "DBVDD",
+ }, {
+ .dev_name = "5-001a",
+ .supply = "AVDD2",
+ }, {
+ .dev_name = "5-001a",
+ .supply = "CPVDD",
+ },
+};
+
+static struct regulator_consumer_supply wm8994_fixed_voltage1_supplies[] = {
+ {
+ .dev_name = "5-001a",
+ .supply = "SPKVDD1",
+ }, {
+ .dev_name = "5-001a",
+ .supply = "SPKVDD2",
+ },
+};
+
+static struct regulator_init_data wm8994_fixed_voltage0_init_data = {
+ .constraints = {
+ .always_on = 1,
+ },
+ .num_consumer_supplies = ARRAY_SIZE(wm8994_fixed_voltage0_supplies),
+ .consumer_supplies = wm8994_fixed_voltage0_supplies,
+};
+
+static struct regulator_init_data wm8994_fixed_voltage1_init_data = {
+ .constraints = {
+ .always_on = 1,
+ },
+ .num_consumer_supplies = ARRAY_SIZE(wm8994_fixed_voltage1_supplies),
+ .consumer_supplies = wm8994_fixed_voltage1_supplies,
+};
+
+static struct fixed_voltage_config wm8994_fixed_voltage0_config = {
+ .supply_name = "VCC_1.8V_PDA",
+ .microvolts = 1800000,
+ .gpio = -EINVAL,
+ .init_data = &wm8994_fixed_voltage0_init_data,
+};
+
+static struct fixed_voltage_config wm8994_fixed_voltage1_config = {
+ .supply_name = "V_BAT",
+ .microvolts = 3700000,
+ .gpio = -EINVAL,
+ .init_data = &wm8994_fixed_voltage1_init_data,
+};
+
+static struct platform_device wm8994_fixed_voltage0 = {
+ .name = "reg-fixed-voltage",
+ .id = 0,
+ .dev = {
+ .platform_data = &wm8994_fixed_voltage0_config,
+ },
+};
+
+static struct platform_device wm8994_fixed_voltage1 = {
+ .name = "reg-fixed-voltage",
+ .id = 1,
+ .dev = {
+ .platform_data = &wm8994_fixed_voltage1_config,
+ },
+};
+
+static struct regulator_consumer_supply wm8994_avdd1_supply = {
+ .dev_name = "5-001a",
+ .supply = "AVDD1",
+};
+
+static struct regulator_consumer_supply wm8994_dcvdd_supply = {
+ .dev_name = "5-001a",
+ .supply = "DCVDD",
+};
+
+static struct regulator_init_data wm8994_ldo1_data = {
+ .constraints = {
+ .name = "AVDD1_3.0V",
+ .valid_ops_mask = REGULATOR_CHANGE_STATUS,
+ },
+ .num_consumer_supplies = 1,
+ .consumer_supplies = &wm8994_avdd1_supply,
+};
+
+static struct regulator_init_data wm8994_ldo2_data = {
+ .constraints = {
+ .name = "DCVDD_1.0V",
+ },
+ .num_consumer_supplies = 1,
+ .consumer_supplies = &wm8994_dcvdd_supply,
+};
+
+static struct wm8994_pdata wm8994_platform_data = {
+ /* configure gpio1 function: 0x0001(Logic level input/output) */
+ .gpio_defaults[0] = 0x0001,
+ /* configure gpio3/4/5/7 function for AIF2 voice */
+ .gpio_defaults[2] = 0x8100,
+ .gpio_defaults[3] = 0x8100,
+ .gpio_defaults[4] = 0x8100,
+ .gpio_defaults[6] = 0x0100,
+ /* configure gpio8/9/10/11 function for AIF3 BT */
+ .gpio_defaults[7] = 0x8100,
+ .gpio_defaults[8] = 0x0100,
+ .gpio_defaults[9] = 0x0100,
+ .gpio_defaults[10] = 0x0100,
+ .ldo[0] = { S5PV210_MP03(6), NULL, &wm8994_ldo1_data }, /* XM0FRNB_2 */
+ .ldo[1] = { 0, NULL, &wm8994_ldo2_data },
+};
+
/* GPIO I2C PMIC */
#define AP_I2C_GPIO_PMIC_BUS_4 4
static struct i2c_gpio_platform_data aquila_i2c_gpio_pmic_data = {
@@ -404,6 +519,29 @@ static struct i2c_board_info i2c_gpio_pmic_devs[] __initdata = {
#endif
};
+/* GPIO I2C AP 1.8V */
+#define AP_I2C_GPIO_BUS_5 5
+static struct i2c_gpio_platform_data aquila_i2c_gpio5_data = {
+ .sda_pin = S5PV210_MP05(3), /* XM0ADDR_11 */
+ .scl_pin = S5PV210_MP05(2), /* XM0ADDR_10 */
+};
+
+static struct platform_device aquila_i2c_gpio5 = {
+ .name = "i2c-gpio",
+ .id = AP_I2C_GPIO_BUS_5,
+ .dev = {
+ .platform_data = &aquila_i2c_gpio5_data,
+ },
+};
+
+static struct i2c_board_info i2c_gpio5_devs[] __initdata = {
+ {
+ /* CS/ADDR = low 0x34 (FYI: high = 0x36) */
+ I2C_BOARD_INFO("wm8994", 0x1a),
+ .platform_data = &wm8994_platform_data,
+ },
+};
+
/* PMIC Power button */
static struct gpio_keys_button aquila_gpio_keys_table[] = {
{
@@ -475,6 +613,7 @@ static void aquila_setup_sdhci(void)
static struct platform_device *aquila_devices[] __initdata = {
&aquila_i2c_gpio_pmic,
+ &aquila_i2c_gpio5,
&aquila_device_gpiokeys,
&s3c_device_fb,
&s5p_device_onenand,
@@ -484,8 +623,33 @@ static struct platform_device *aquila_devices[] __initdata = {
&s5p_device_fimc0,
&s5p_device_fimc1,
&s5p_device_fimc2,
+ &s5pv210_device_iis0,
+ &wm8994_fixed_voltage0,
+ &wm8994_fixed_voltage1,
};
+static void __init aquila_sound_init(void)
+{
+ unsigned int gpio;
+
+ /* CODEC_XTAL_EN
+ *
+ * The Aquila board have a oscillator which provide main clock
+ * to WM8994 codec. The oscillator provide 24MHz clock to WM8994
+ * clock. Set gpio setting of "CODEC_XTAL_EN" to enable a oscillator.
+ * */
+ gpio = S5PV210_GPH3(2); /* XEINT_26 */
+ gpio_request(gpio, "CODEC_XTAL_EN");
+ s3c_gpio_cfgpin(gpio, S3C_GPIO_OUTPUT);
+ s3c_gpio_setpull(gpio, S3C_GPIO_PULL_NONE);
+
+ /* Ths main clock of WM8994 codec uses the output of CLKOUT pin.
+ * The CLKOUT[9:8] set to 0x3(XUSBXTI) of 0xE010E000(OTHERS)
+ * because it needs 24MHz clock to operate WM8994 codec.
+ */
+ __raw_writel(__raw_readl(S5P_OTHERS) | (0x3 << 8), S5P_OTHERS);
+}
+
static void __init aquila_map_io(void)
{
s5p_init_io(NULL, 0, S5P_VA_CHIPID);
@@ -506,6 +670,11 @@ static void __init aquila_machine_init(void)
s3c_fimc_setname(1, "s5p-fimc");
s3c_fimc_setname(2, "s5p-fimc");
+ /* SOUND */
+ aquila_sound_init();
+ i2c_register_board_info(AP_I2C_GPIO_BUS_5, i2c_gpio5_devs,
+ ARRAY_SIZE(i2c_gpio5_devs));
+
/* FB */
s3c_fb_set_platdata(&aquila_lcd_pdata);
diff --git a/arch/arm/mach-s5pv210/mach-goni.c b/arch/arm/mach-s5pv210/mach-goni.c
index d9ecf57fc2a5..b1dcf964a768 100644
--- a/arch/arm/mach-s5pv210/mach-goni.c
+++ b/arch/arm/mach-s5pv210/mach-goni.c
@@ -15,7 +15,13 @@
#include <linux/fb.h>
#include <linux/i2c.h>
#include <linux/i2c-gpio.h>
+#include <linux/i2c/qt602240_ts.h>
#include <linux/mfd/max8998.h>
+#include <linux/mfd/wm8994/pdata.h>
+#include <linux/regulator/fixed.h>
+#include <linux/spi/spi.h>
+#include <linux/spi/spi_gpio.h>
+#include <linux/lcd.h>
#include <linux/gpio_keys.h>
#include <linux/input.h>
#include <linux/gpio.h>
@@ -35,7 +41,10 @@
#include <plat/devs.h>
#include <plat/cpu.h>
#include <plat/fb.h>
+#include <plat/iic.h>
+#include <plat/keypad.h>
#include <plat/sdhci.h>
+#include <plat/clock.h>
/* Following are default values for UCON, ULCON and UFCON UART registers */
#define GONI_UCON_DEFAULT (S3C2410_UCON_TXILEVEL | \
@@ -87,13 +96,12 @@ static struct s3c2410_uartcfg goni_uartcfgs[] __initdata = {
/* Frame Buffer */
static struct s3c_fb_pd_win goni_fb_win0 = {
.win_mode = {
- .pixclock = 1000000000000ULL / ((16+16+2+480)*(28+3+2+800)*55),
.left_margin = 16,
.right_margin = 16,
- .upper_margin = 3,
+ .upper_margin = 2,
.lower_margin = 28,
.hsync_len = 2,
- .vsync_len = 2,
+ .vsync_len = 1,
.xres = 480,
.yres = 800,
.refresh = 55,
@@ -111,9 +119,160 @@ static struct s3c_fb_platdata goni_lcd_pdata __initdata = {
.setup_gpio = s5pv210_fb_gpio_setup_24bpp,
};
+static int lcd_power_on(struct lcd_device *ld, int enable)
+{
+ return 1;
+}
+
+static int reset_lcd(struct lcd_device *ld)
+{
+ static unsigned int first = 1;
+ int reset_gpio = -1;
+
+ reset_gpio = S5PV210_MP05(5);
+
+ if (first) {
+ gpio_request(reset_gpio, "MLCD_RST");
+ first = 0;
+ }
+
+ gpio_direction_output(reset_gpio, 1);
+ return 1;
+}
+
+static struct lcd_platform_data goni_lcd_platform_data = {
+ .reset = reset_lcd,
+ .power_on = lcd_power_on,
+ .lcd_enabled = 0,
+ .reset_delay = 120, /* 120ms */
+ .power_on_delay = 25, /* 25ms */
+ .power_off_delay = 200, /* 200ms */
+};
+
+#define LCD_BUS_NUM 3
+static struct spi_board_info spi_board_info[] __initdata = {
+ {
+ .modalias = "s6e63m0",
+ .platform_data = &goni_lcd_platform_data,
+ .max_speed_hz = 1200000,
+ .bus_num = LCD_BUS_NUM,
+ .chip_select = 0,
+ .mode = SPI_MODE_3,
+ .controller_data = (void *)S5PV210_MP01(1), /* DISPLAY_CS */
+ },
+};
+
+static struct spi_gpio_platform_data lcd_spi_gpio_data = {
+ .sck = S5PV210_MP04(1), /* DISPLAY_CLK */
+ .mosi = S5PV210_MP04(3), /* DISPLAY_SI */
+ .miso = SPI_GPIO_NO_MISO,
+ .num_chipselect = 1,
+};
+
+static struct platform_device goni_spi_gpio = {
+ .name = "spi_gpio",
+ .id = LCD_BUS_NUM,
+ .dev = {
+ .parent = &s3c_device_fb.dev,
+ .platform_data = &lcd_spi_gpio_data,
+ },
+};
+
+/* KEYPAD */
+static uint32_t keymap[] __initdata = {
+ /* KEY(row, col, keycode) */
+ KEY(0, 1, KEY_MENU), /* Send */
+ KEY(0, 2, KEY_BACK), /* End */
+ KEY(1, 1, KEY_CONFIG), /* Half shot */
+ KEY(1, 2, KEY_VOLUMEUP),
+ KEY(2, 1, KEY_CAMERA), /* Full shot */
+ KEY(2, 2, KEY_VOLUMEDOWN),
+};
+
+static struct matrix_keymap_data keymap_data __initdata = {
+ .keymap = keymap,
+ .keymap_size = ARRAY_SIZE(keymap),
+};
+
+static struct samsung_keypad_platdata keypad_data __initdata = {
+ .keymap_data = &keymap_data,
+ .rows = 3,
+ .cols = 3,
+};
+
+/* Radio */
+static struct i2c_board_info i2c1_devs[] __initdata = {
+ {
+ I2C_BOARD_INFO("si470x", 0x10),
+ },
+};
+
+static void __init goni_radio_init(void)
+{
+ int gpio;
+
+ gpio = S5PV210_GPJ2(4); /* XMSMDATA_4 */
+ gpio_request(gpio, "FM_INT");
+ s3c_gpio_cfgpin(gpio, S3C_GPIO_SFN(0xf));
+ i2c1_devs[0].irq = gpio_to_irq(gpio);
+
+ gpio = S5PV210_GPJ2(5); /* XMSMDATA_5 */
+ gpio_request(gpio, "FM_RST");
+ gpio_direction_output(gpio, 1);
+}
+
+/* TSP */
+static struct qt602240_platform_data qt602240_platform_data = {
+ .x_line = 17,
+ .y_line = 11,
+ .x_size = 800,
+ .y_size = 480,
+ .blen = 0x21,
+ .threshold = 0x28,
+ .voltage = 2800000, /* 2.8V */
+ .orient = QT602240_DIAGONAL,
+};
+
+static struct s3c2410_platform_i2c i2c2_data __initdata = {
+ .flags = 0,
+ .bus_num = 2,
+ .slave_addr = 0x10,
+ .frequency = 400 * 1000,
+ .sda_delay = 100,
+};
+
+static struct i2c_board_info i2c2_devs[] __initdata = {
+ {
+ I2C_BOARD_INFO("qt602240_ts", 0x4a),
+ .platform_data = &qt602240_platform_data,
+ },
+};
+
+static void __init goni_tsp_init(void)
+{
+ int gpio;
+
+ gpio = S5PV210_GPJ1(3); /* XMSMADDR_11 */
+ gpio_request(gpio, "TSP_LDO_ON");
+ gpio_direction_output(gpio, 1);
+ gpio_export(gpio, 0);
+
+ gpio = S5PV210_GPJ0(5); /* XMSMADDR_5 */
+ gpio_request(gpio, "TSP_INT");
+
+ s5p_register_gpio_interrupt(gpio);
+ s3c_gpio_cfgpin(gpio, S3C_GPIO_SFN(0xf));
+ s3c_gpio_setpull(gpio, S3C_GPIO_PULL_UP);
+ i2c2_devs[0].irq = gpio_to_irq(gpio);
+}
+
/* MAX8998 regulators */
#if defined(CONFIG_REGULATOR_MAX8998) || defined(CONFIG_REGULATOR_MAX8998_MODULE)
+static struct regulator_consumer_supply goni_ldo5_consumers[] = {
+ REGULATOR_SUPPLY("vmmc", "s3c-sdhci.0"),
+};
+
static struct regulator_init_data goni_ldo2_data = {
.constraints = {
.name = "VALIVE_1.1V",
@@ -153,6 +312,8 @@ static struct regulator_init_data goni_ldo5_data = {
.max_uV = 2800000,
.apply_uV = 1,
},
+ .num_consumer_supplies = ARRAY_SIZE(goni_ldo5_consumers),
+ .consumer_supplies = goni_ldo5_consumers,
};
static struct regulator_init_data goni_ldo6_data = {
@@ -360,6 +521,119 @@ static struct max8998_platform_data goni_max8998_pdata = {
};
#endif
+static struct regulator_consumer_supply wm8994_fixed_voltage0_supplies[] = {
+ {
+ .dev_name = "5-001a",
+ .supply = "DBVDD",
+ }, {
+ .dev_name = "5-001a",
+ .supply = "AVDD2",
+ }, {
+ .dev_name = "5-001a",
+ .supply = "CPVDD",
+ },
+};
+
+static struct regulator_consumer_supply wm8994_fixed_voltage1_supplies[] = {
+ {
+ .dev_name = "5-001a",
+ .supply = "SPKVDD1",
+ }, {
+ .dev_name = "5-001a",
+ .supply = "SPKVDD2",
+ },
+};
+
+static struct regulator_init_data wm8994_fixed_voltage0_init_data = {
+ .constraints = {
+ .always_on = 1,
+ },
+ .num_consumer_supplies = ARRAY_SIZE(wm8994_fixed_voltage0_supplies),
+ .consumer_supplies = wm8994_fixed_voltage0_supplies,
+};
+
+static struct regulator_init_data wm8994_fixed_voltage1_init_data = {
+ .constraints = {
+ .always_on = 1,
+ },
+ .num_consumer_supplies = ARRAY_SIZE(wm8994_fixed_voltage1_supplies),
+ .consumer_supplies = wm8994_fixed_voltage1_supplies,
+};
+
+static struct fixed_voltage_config wm8994_fixed_voltage0_config = {
+ .supply_name = "VCC_1.8V_PDA",
+ .microvolts = 1800000,
+ .gpio = -EINVAL,
+ .init_data = &wm8994_fixed_voltage0_init_data,
+};
+
+static struct fixed_voltage_config wm8994_fixed_voltage1_config = {
+ .supply_name = "V_BAT",
+ .microvolts = 3700000,
+ .gpio = -EINVAL,
+ .init_data = &wm8994_fixed_voltage1_init_data,
+};
+
+static struct platform_device wm8994_fixed_voltage0 = {
+ .name = "reg-fixed-voltage",
+ .id = 0,
+ .dev = {
+ .platform_data = &wm8994_fixed_voltage0_config,
+ },
+};
+
+static struct platform_device wm8994_fixed_voltage1 = {
+ .name = "reg-fixed-voltage",
+ .id = 1,
+ .dev = {
+ .platform_data = &wm8994_fixed_voltage1_config,
+ },
+};
+
+static struct regulator_consumer_supply wm8994_avdd1_supply = {
+ .dev_name = "5-001a",
+ .supply = "AVDD1",
+};
+
+static struct regulator_consumer_supply wm8994_dcvdd_supply = {
+ .dev_name = "5-001a",
+ .supply = "DCVDD",
+};
+
+static struct regulator_init_data wm8994_ldo1_data = {
+ .constraints = {
+ .name = "AVDD1_3.0V",
+ .valid_ops_mask = REGULATOR_CHANGE_STATUS,
+ },
+ .num_consumer_supplies = 1,
+ .consumer_supplies = &wm8994_avdd1_supply,
+};
+
+static struct regulator_init_data wm8994_ldo2_data = {
+ .constraints = {
+ .name = "DCVDD_1.0V",
+ },
+ .num_consumer_supplies = 1,
+ .consumer_supplies = &wm8994_dcvdd_supply,
+};
+
+static struct wm8994_pdata wm8994_platform_data = {
+ /* configure gpio1 function: 0x0001(Logic level input/output) */
+ .gpio_defaults[0] = 0x0001,
+ /* configure gpio3/4/5/7 function for AIF2 voice */
+ .gpio_defaults[2] = 0x8100,
+ .gpio_defaults[3] = 0x8100,
+ .gpio_defaults[4] = 0x8100,
+ .gpio_defaults[6] = 0x0100,
+ /* configure gpio8/9/10/11 function for AIF3 BT */
+ .gpio_defaults[7] = 0x8100,
+ .gpio_defaults[8] = 0x0100,
+ .gpio_defaults[9] = 0x0100,
+ .gpio_defaults[10] = 0x0100,
+ .ldo[0] = { S5PV210_MP03(6), NULL, &wm8994_ldo1_data }, /* XM0FRNB_2 */
+ .ldo[1] = { 0, NULL, &wm8994_ldo2_data },
+};
+
/* GPIO I2C PMIC */
#define AP_I2C_GPIO_PMIC_BUS_4 4
static struct i2c_gpio_platform_data goni_i2c_gpio_pmic_data = {
@@ -385,6 +659,29 @@ static struct i2c_board_info i2c_gpio_pmic_devs[] __initdata = {
#endif
};
+/* GPIO I2C AP 1.8V */
+#define AP_I2C_GPIO_BUS_5 5
+static struct i2c_gpio_platform_data goni_i2c_gpio5_data = {
+ .sda_pin = S5PV210_MP05(3), /* XM0ADDR_11 */
+ .scl_pin = S5PV210_MP05(2), /* XM0ADDR_10 */
+};
+
+static struct platform_device goni_i2c_gpio5 = {
+ .name = "i2c-gpio",
+ .id = AP_I2C_GPIO_BUS_5,
+ .dev = {
+ .platform_data = &goni_i2c_gpio5_data,
+ },
+};
+
+static struct i2c_board_info i2c_gpio5_devs[] __initdata = {
+ {
+ /* CS/ADDR = low 0x34 (FYI: high = 0x36) */
+ I2C_BOARD_INFO("wm8994", 0x1a),
+ .platform_data = &wm8994_platform_data,
+ },
+};
+
/* PMIC Power button */
static struct gpio_keys_button goni_gpio_keys_table[] = {
{
@@ -444,11 +741,37 @@ static struct s3c_sdhci_platdata goni_hsmmc2_data __initdata = {
.ext_cd_gpio_invert = 1,
};
+static struct regulator_consumer_supply mmc2_supplies[] = {
+ REGULATOR_SUPPLY("vmmc", "s3c-sdhci.2"),
+};
+
+static struct regulator_init_data mmc2_fixed_voltage_init_data = {
+ .constraints = {
+ .name = "V_TF_2.8V",
+ .valid_ops_mask = REGULATOR_CHANGE_STATUS,
+ },
+ .num_consumer_supplies = ARRAY_SIZE(mmc2_supplies),
+ .consumer_supplies = mmc2_supplies,
+};
+
+static struct fixed_voltage_config mmc2_fixed_voltage_config = {
+ .supply_name = "EXT_FLASH_EN",
+ .microvolts = 2800000,
+ .gpio = GONI_EXT_FLASH_EN,
+ .enable_high = true,
+ .init_data = &mmc2_fixed_voltage_init_data,
+};
+
+static struct platform_device mmc2_fixed_voltage = {
+ .name = "reg-fixed-voltage",
+ .id = 2,
+ .dev = {
+ .platform_data = &mmc2_fixed_voltage_config,
+ },
+};
+
static void goni_setup_sdhci(void)
{
- gpio_request(GONI_EXT_FLASH_EN, "FLASH_EN");
- gpio_direction_output(GONI_EXT_FLASH_EN, 1);
-
s3c_sdhci0_set_platdata(&goni_hsmmc0_data);
s3c_sdhci1_set_platdata(&goni_hsmmc1_data);
s3c_sdhci2_set_platdata(&goni_hsmmc2_data);
@@ -457,7 +780,10 @@ static void goni_setup_sdhci(void)
static struct platform_device *goni_devices[] __initdata = {
&s3c_device_fb,
&s5p_device_onenand,
+ &goni_spi_gpio,
&goni_i2c_gpio_pmic,
+ &goni_i2c_gpio5,
+ &mmc2_fixed_voltage,
&goni_device_gpiokeys,
&s5p_device_fimc0,
&s5p_device_fimc1,
@@ -465,8 +791,24 @@ static struct platform_device *goni_devices[] __initdata = {
&s3c_device_hsmmc0,
&s3c_device_hsmmc1,
&s3c_device_hsmmc2,
+ &s5pv210_device_iis0,
+ &s3c_device_usb_hsotg,
+ &samsung_device_keypad,
+ &s3c_device_i2c1,
+ &s3c_device_i2c2,
+ &wm8994_fixed_voltage0,
+ &wm8994_fixed_voltage1,
};
+static void __init goni_sound_init(void)
+{
+ /* Ths main clock of WM8994 codec uses the output of CLKOUT pin.
+ * The CLKOUT[9:8] set to 0x3(XUSBXTI) of 0xE010E000(OTHERS)
+ * because it needs 24MHz clock to operate WM8994 codec.
+ */
+ __raw_writel(__raw_readl(S5P_OTHERS) | (0x3 << 8), S5P_OTHERS);
+}
+
static void __init goni_map_io(void)
{
s5p_init_io(NULL, 0, S5P_VA_CHIPID);
@@ -476,6 +818,20 @@ static void __init goni_map_io(void)
static void __init goni_machine_init(void)
{
+ /* Radio: call before I2C 1 registeration */
+ goni_radio_init();
+
+ /* I2C1 */
+ s3c_i2c1_set_platdata(NULL);
+ i2c_register_board_info(1, i2c1_devs, ARRAY_SIZE(i2c1_devs));
+
+ /* TSP: call before I2C 2 registeration */
+ goni_tsp_init();
+
+ /* I2C2 */
+ s3c_i2c2_set_platdata(&i2c2_data);
+ i2c_register_board_info(2, i2c2_devs, ARRAY_SIZE(i2c2_devs));
+
/* PMIC */
goni_pmic_init();
i2c_register_board_info(AP_I2C_GPIO_PMIC_BUS_4, i2c_gpio_pmic_devs,
@@ -483,9 +839,22 @@ static void __init goni_machine_init(void)
/* SDHCI */
goni_setup_sdhci();
+ /* SOUND */
+ goni_sound_init();
+ i2c_register_board_info(AP_I2C_GPIO_BUS_5, i2c_gpio5_devs,
+ ARRAY_SIZE(i2c_gpio5_devs));
+
/* FB */
s3c_fb_set_platdata(&goni_lcd_pdata);
+ /* SPI */
+ spi_register_board_info(spi_board_info, ARRAY_SIZE(spi_board_info));
+
+ /* KEYPAD */
+ samsung_keypad_set_platdata(&keypad_data);
+
+ clk_xusbxti.rate = 24000000;
+
platform_add_devices(goni_devices, ARRAY_SIZE(goni_devices));
}
diff --git a/arch/arm/mach-s5pv210/mach-smdkc110.c b/arch/arm/mach-s5pv210/mach-smdkc110.c
index cea9bca79d88..5dd1681c069e 100644
--- a/arch/arm/mach-s5pv210/mach-smdkc110.c
+++ b/arch/arm/mach-s5pv210/mach-smdkc110.c
@@ -13,6 +13,7 @@
#include <linux/init.h>
#include <linux/serial_core.h>
#include <linux/i2c.h>
+#include <linux/sysdev.h>
#include <asm/mach/arch.h>
#include <asm/mach/map.h>
@@ -28,6 +29,7 @@
#include <plat/cpu.h>
#include <plat/ata.h>
#include <plat/iic.h>
+#include <plat/pm.h>
/* Following are default values for UCON, ULCON and UFCON UART registers */
#define SMDKC110_UCON_DEFAULT (S3C2410_UCON_TXILEVEL | \
@@ -81,6 +83,7 @@ static struct s3c_ide_platdata smdkc110_ide_pdata __initdata = {
static struct platform_device *smdkc110_devices[] __initdata = {
&s5pv210_device_iis0,
&s5pv210_device_ac97,
+ &s5pv210_device_spdif,
&s3c_device_cfcon,
&s3c_device_i2c0,
&s3c_device_i2c1,
@@ -110,6 +113,8 @@ static void __init smdkc110_map_io(void)
static void __init smdkc110_machine_init(void)
{
+ s3c_pm_init();
+
s3c_i2c0_set_platdata(NULL);
s3c_i2c1_set_platdata(NULL);
s3c_i2c2_set_platdata(NULL);
diff --git a/arch/arm/mach-s5pv210/mach-smdkv210.c b/arch/arm/mach-s5pv210/mach-smdkv210.c
index 83189ae9da9a..1fbc45b2a432 100644
--- a/arch/arm/mach-s5pv210/mach-smdkv210.c
+++ b/arch/arm/mach-s5pv210/mach-smdkv210.c
@@ -13,6 +13,7 @@
#include <linux/i2c.h>
#include <linux/init.h>
#include <linux/serial_core.h>
+#include <linux/sysdev.h>
#include <asm/mach/arch.h>
#include <asm/mach/map.h>
@@ -31,6 +32,7 @@
#include <plat/ata.h>
#include <plat/iic.h>
#include <plat/keypad.h>
+#include <plat/pm.h>
/* Following are default values for UCON, ULCON and UFCON UART registers */
#define SMDKV210_UCON_DEFAULT (S3C2410_UCON_TXILEVEL | \
@@ -103,6 +105,7 @@ static struct samsung_keypad_platdata smdkv210_keypad_data __initdata = {
static struct platform_device *smdkv210_devices[] __initdata = {
&s5pv210_device_iis0,
&s5pv210_device_ac97,
+ &s5pv210_device_spdif,
&s3c_device_adc,
&s3c_device_cfcon,
&s3c_device_hsmmc0,
@@ -145,6 +148,8 @@ static void __init smdkv210_map_io(void)
static void __init smdkv210_machine_init(void)
{
+ s3c_pm_init();
+
samsung_keypad_set_platdata(&smdkv210_keypad_data);
s3c24xx_ts_set_platdata(&s3c_ts_platform);
diff --git a/arch/arm/mach-s5pv210/mach-torbreck.c b/arch/arm/mach-s5pv210/mach-torbreck.c
new file mode 100644
index 000000000000..043c938806b0
--- /dev/null
+++ b/arch/arm/mach-s5pv210/mach-torbreck.c
@@ -0,0 +1,131 @@
+/* linux/arch/arm/mach-s5pv210/mach-torbreck.c
+ *
+ * Copyright (c) 2010 aESOP Community
+ * http://www.aesop.or.kr/
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+*/
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/i2c.h>
+#include <linux/init.h>
+#include <linux/serial_core.h>
+
+#include <asm/mach/arch.h>
+#include <asm/mach/map.h>
+#include <asm/setup.h>
+#include <asm/mach-types.h>
+
+#include <mach/map.h>
+#include <mach/regs-clock.h>
+
+#include <plat/regs-serial.h>
+#include <plat/s5pv210.h>
+#include <plat/devs.h>
+#include <plat/cpu.h>
+#include <plat/iic.h>
+
+/* Following are default values for UCON, ULCON and UFCON UART registers */
+#define TORBRECK_UCON_DEFAULT (S3C2410_UCON_TXILEVEL | \
+ S3C2410_UCON_RXILEVEL | \
+ S3C2410_UCON_TXIRQMODE | \
+ S3C2410_UCON_RXIRQMODE | \
+ S3C2410_UCON_RXFIFO_TOI | \
+ S3C2443_UCON_RXERR_IRQEN)
+
+#define TORBRECK_ULCON_DEFAULT S3C2410_LCON_CS8
+
+#define TORBRECK_UFCON_DEFAULT (S3C2410_UFCON_FIFOMODE | \
+ S5PV210_UFCON_TXTRIG4 | \
+ S5PV210_UFCON_RXTRIG4)
+
+static struct s3c2410_uartcfg torbreck_uartcfgs[] __initdata = {
+ [0] = {
+ .hwport = 0,
+ .flags = 0,
+ .ucon = TORBRECK_UCON_DEFAULT,
+ .ulcon = TORBRECK_ULCON_DEFAULT,
+ .ufcon = TORBRECK_UFCON_DEFAULT,
+ },
+ [1] = {
+ .hwport = 1,
+ .flags = 0,
+ .ucon = TORBRECK_UCON_DEFAULT,
+ .ulcon = TORBRECK_ULCON_DEFAULT,
+ .ufcon = TORBRECK_UFCON_DEFAULT,
+ },
+ [2] = {
+ .hwport = 2,
+ .flags = 0,
+ .ucon = TORBRECK_UCON_DEFAULT,
+ .ulcon = TORBRECK_ULCON_DEFAULT,
+ .ufcon = TORBRECK_UFCON_DEFAULT,
+ },
+ [3] = {
+ .hwport = 3,
+ .flags = 0,
+ .ucon = TORBRECK_UCON_DEFAULT,
+ .ulcon = TORBRECK_ULCON_DEFAULT,
+ .ufcon = TORBRECK_UFCON_DEFAULT,
+ },
+};
+
+static struct platform_device *torbreck_devices[] __initdata = {
+ &s5pv210_device_iis0,
+ &s3c_device_cfcon,
+ &s3c_device_hsmmc0,
+ &s3c_device_hsmmc1,
+ &s3c_device_hsmmc2,
+ &s3c_device_hsmmc3,
+ &s3c_device_i2c0,
+ &s3c_device_i2c1,
+ &s3c_device_i2c2,
+ &s3c_device_rtc,
+ &s3c_device_wdt,
+};
+
+static struct i2c_board_info torbreck_i2c_devs0[] __initdata = {
+ /* To Be Updated */
+};
+
+static struct i2c_board_info torbreck_i2c_devs1[] __initdata = {
+ /* To Be Updated */
+};
+
+static struct i2c_board_info torbreck_i2c_devs2[] __initdata = {
+ /* To Be Updated */
+};
+
+static void __init torbreck_map_io(void)
+{
+ s5p_init_io(NULL, 0, S5P_VA_CHIPID);
+ s3c24xx_init_clocks(24000000);
+ s3c24xx_init_uarts(torbreck_uartcfgs, ARRAY_SIZE(torbreck_uartcfgs));
+}
+
+static void __init torbreck_machine_init(void)
+{
+ s3c_i2c0_set_platdata(NULL);
+ s3c_i2c1_set_platdata(NULL);
+ s3c_i2c2_set_platdata(NULL);
+ i2c_register_board_info(0, torbreck_i2c_devs0,
+ ARRAY_SIZE(torbreck_i2c_devs0));
+ i2c_register_board_info(1, torbreck_i2c_devs1,
+ ARRAY_SIZE(torbreck_i2c_devs1));
+ i2c_register_board_info(2, torbreck_i2c_devs2,
+ ARRAY_SIZE(torbreck_i2c_devs2));
+
+ platform_add_devices(torbreck_devices, ARRAY_SIZE(torbreck_devices));
+}
+
+MACHINE_START(TORBRECK, "TORBRECK")
+ /* Maintainer: Hyunchul Ko <ghcstop@gmail.com> */
+ .boot_params = S5P_PA_SDRAM + 0x100,
+ .init_irq = s5pv210_init_irq,
+ .map_io = torbreck_map_io,
+ .init_machine = torbreck_machine_init,
+ .timer = &s3c24xx_timer,
+MACHINE_END
diff --git a/arch/arm/mach-s5pv210/pm.c b/arch/arm/mach-s5pv210/pm.c
new file mode 100644
index 000000000000..549d7924fd4c
--- /dev/null
+++ b/arch/arm/mach-s5pv210/pm.c
@@ -0,0 +1,166 @@
+/* linux/arch/arm/mach-s5pv210/pm.c
+ *
+ * Copyright (c) 2010 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com
+ *
+ * S5PV210 - Power Management support
+ *
+ * Based on arch/arm/mach-s3c2410/pm.c
+ * Copyright (c) 2006 Simtec Electronics
+ * Ben Dooks <ben@simtec.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+*/
+
+#include <linux/init.h>
+#include <linux/suspend.h>
+#include <linux/io.h>
+
+#include <plat/cpu.h>
+#include <plat/pm.h>
+#include <plat/regs-timer.h>
+
+#include <mach/regs-irq.h>
+#include <mach/regs-clock.h>
+
+static struct sleep_save s5pv210_core_save[] = {
+ /* Clock source */
+ SAVE_ITEM(S5P_CLK_SRC0),
+ SAVE_ITEM(S5P_CLK_SRC1),
+ SAVE_ITEM(S5P_CLK_SRC2),
+ SAVE_ITEM(S5P_CLK_SRC3),
+ SAVE_ITEM(S5P_CLK_SRC4),
+ SAVE_ITEM(S5P_CLK_SRC5),
+ SAVE_ITEM(S5P_CLK_SRC6),
+
+ /* Clock source Mask */
+ SAVE_ITEM(S5P_CLK_SRC_MASK0),
+ SAVE_ITEM(S5P_CLK_SRC_MASK1),
+
+ /* Clock Divider */
+ SAVE_ITEM(S5P_CLK_DIV0),
+ SAVE_ITEM(S5P_CLK_DIV1),
+ SAVE_ITEM(S5P_CLK_DIV2),
+ SAVE_ITEM(S5P_CLK_DIV3),
+ SAVE_ITEM(S5P_CLK_DIV4),
+ SAVE_ITEM(S5P_CLK_DIV5),
+ SAVE_ITEM(S5P_CLK_DIV6),
+ SAVE_ITEM(S5P_CLK_DIV7),
+
+ /* Clock Main Gate */
+ SAVE_ITEM(S5P_CLKGATE_MAIN0),
+ SAVE_ITEM(S5P_CLKGATE_MAIN1),
+ SAVE_ITEM(S5P_CLKGATE_MAIN2),
+
+ /* Clock source Peri Gate */
+ SAVE_ITEM(S5P_CLKGATE_PERI0),
+ SAVE_ITEM(S5P_CLKGATE_PERI1),
+
+ /* Clock source SCLK Gate */
+ SAVE_ITEM(S5P_CLKGATE_SCLK0),
+ SAVE_ITEM(S5P_CLKGATE_SCLK1),
+
+ /* Clock IP Clock gate */
+ SAVE_ITEM(S5P_CLKGATE_IP0),
+ SAVE_ITEM(S5P_CLKGATE_IP1),
+ SAVE_ITEM(S5P_CLKGATE_IP2),
+ SAVE_ITEM(S5P_CLKGATE_IP3),
+ SAVE_ITEM(S5P_CLKGATE_IP4),
+
+ /* Clock Blcok and Bus gate */
+ SAVE_ITEM(S5P_CLKGATE_BLOCK),
+ SAVE_ITEM(S5P_CLKGATE_BUS0),
+
+ /* Clock ETC */
+ SAVE_ITEM(S5P_CLK_OUT),
+ SAVE_ITEM(S5P_MDNIE_SEL),
+
+ /* PWM Register */
+ SAVE_ITEM(S3C2410_TCFG0),
+ SAVE_ITEM(S3C2410_TCFG1),
+ SAVE_ITEM(S3C64XX_TINT_CSTAT),
+ SAVE_ITEM(S3C2410_TCON),
+ SAVE_ITEM(S3C2410_TCNTB(0)),
+ SAVE_ITEM(S3C2410_TCMPB(0)),
+ SAVE_ITEM(S3C2410_TCNTO(0)),
+};
+
+void s5pv210_cpu_suspend(void)
+{
+ unsigned long tmp;
+
+ /* issue the standby signal into the pm unit. Note, we
+ * issue a write-buffer drain just in case */
+
+ tmp = 0;
+
+ asm("b 1f\n\t"
+ ".align 5\n\t"
+ "1:\n\t"
+ "mcr p15, 0, %0, c7, c10, 5\n\t"
+ "mcr p15, 0, %0, c7, c10, 4\n\t"
+ "wfi" : : "r" (tmp));
+
+ /* we should never get past here */
+ panic("sleep resumed to originator?");
+}
+
+static void s5pv210_pm_prepare(void)
+{
+ unsigned int tmp;
+
+ /* ensure at least INFORM0 has the resume address */
+ __raw_writel(virt_to_phys(s3c_cpu_resume), S5P_INFORM0);
+
+ tmp = __raw_readl(S5P_SLEEP_CFG);
+ tmp &= ~(S5P_SLEEP_CFG_OSC_EN | S5P_SLEEP_CFG_USBOSC_EN);
+ __raw_writel(tmp, S5P_SLEEP_CFG);
+
+ /* WFI for SLEEP mode configuration by SYSCON */
+ tmp = __raw_readl(S5P_PWR_CFG);
+ tmp &= S5P_CFG_WFI_CLEAN;
+ tmp |= S5P_CFG_WFI_SLEEP;
+ __raw_writel(tmp, S5P_PWR_CFG);
+
+ /* SYSCON interrupt handling disable */
+ tmp = __raw_readl(S5P_OTHERS);
+ tmp |= S5P_OTHER_SYSC_INTOFF;
+ __raw_writel(tmp, S5P_OTHERS);
+
+ s3c_pm_do_save(s5pv210_core_save, ARRAY_SIZE(s5pv210_core_save));
+}
+
+static int s5pv210_pm_add(struct sys_device *sysdev)
+{
+ pm_cpu_prep = s5pv210_pm_prepare;
+ pm_cpu_sleep = s5pv210_cpu_suspend;
+
+ return 0;
+}
+
+static int s5pv210_pm_resume(struct sys_device *dev)
+{
+ u32 tmp;
+
+ tmp = __raw_readl(S5P_OTHERS);
+ tmp |= (S5P_OTHERS_RET_IO | S5P_OTHERS_RET_CF |\
+ S5P_OTHERS_RET_MMC | S5P_OTHERS_RET_UART);
+ __raw_writel(tmp , S5P_OTHERS);
+
+ s3c_pm_do_restore_core(s5pv210_core_save, ARRAY_SIZE(s5pv210_core_save));
+
+ return 0;
+}
+
+static struct sysdev_driver s5pv210_pm_driver = {
+ .add = s5pv210_pm_add,
+ .resume = s5pv210_pm_resume,
+};
+
+static __init int s5pv210_pm_drvinit(void)
+{
+ return sysdev_driver_register(&s5pv210_sysclass, &s5pv210_pm_driver);
+}
+arch_initcall(s5pv210_pm_drvinit);
diff --git a/arch/arm/mach-s5pv210/setup-fb-24bpp.c b/arch/arm/mach-s5pv210/setup-fb-24bpp.c
index 928cf1f125fa..e932ebfac56d 100644
--- a/arch/arm/mach-s5pv210/setup-fb-24bpp.c
+++ b/arch/arm/mach-s5pv210/setup-fb-24bpp.c
@@ -21,33 +21,21 @@
#include <mach/regs-clock.h>
#include <plat/gpio-cfg.h>
-void s5pv210_fb_gpio_setup_24bpp(void)
+static void s5pv210_fb_cfg_gpios(unsigned int base, unsigned int nr)
{
- unsigned int gpio = 0;
-
- for (gpio = S5PV210_GPF0(0); gpio <= S5PV210_GPF0(7); gpio++) {
- s3c_gpio_cfgpin(gpio, S3C_GPIO_SFN(2));
- s3c_gpio_setpull(gpio, S3C_GPIO_PULL_NONE);
- s5p_gpio_set_drvstr(gpio, S5P_GPIO_DRVSTR_LV4);
- }
+ s3c_gpio_cfgrange_nopull(base, nr, S3C_GPIO_SFN(2));
- for (gpio = S5PV210_GPF1(0); gpio <= S5PV210_GPF1(7); gpio++) {
- s3c_gpio_cfgpin(gpio, S3C_GPIO_SFN(2));
- s3c_gpio_setpull(gpio, S3C_GPIO_PULL_NONE);
- s5p_gpio_set_drvstr(gpio, S5P_GPIO_DRVSTR_LV4);
- }
+ for (; nr > 0; nr--, base++)
+ s5p_gpio_set_drvstr(base, S5P_GPIO_DRVSTR_LV4);
+}
- for (gpio = S5PV210_GPF2(0); gpio <= S5PV210_GPF2(7); gpio++) {
- s3c_gpio_cfgpin(gpio, S3C_GPIO_SFN(2));
- s3c_gpio_setpull(gpio, S3C_GPIO_PULL_NONE);
- s5p_gpio_set_drvstr(gpio, S5P_GPIO_DRVSTR_LV4);
- }
- for (gpio = S5PV210_GPF3(0); gpio <= S5PV210_GPF3(3); gpio++) {
- s3c_gpio_cfgpin(gpio, S3C_GPIO_SFN(2));
- s3c_gpio_setpull(gpio, S3C_GPIO_PULL_NONE);
- s5p_gpio_set_drvstr(gpio, S5P_GPIO_DRVSTR_LV4);
- }
+void s5pv210_fb_gpio_setup_24bpp(void)
+{
+ s5pv210_fb_cfg_gpios(S5PV210_GPF0(0), 8);
+ s5pv210_fb_cfg_gpios(S5PV210_GPF1(0), 8);
+ s5pv210_fb_cfg_gpios(S5PV210_GPF2(0), 8);
+ s5pv210_fb_cfg_gpios(S5PV210_GPF3(0), 4);
/* Set DISPLAY_CONTROL register for Display path selection.
*
diff --git a/arch/arm/mach-s5pv210/setup-i2c0.c b/arch/arm/mach-s5pv210/setup-i2c0.c
index d38f7cb7e662..0f1cc3a1c1e8 100644
--- a/arch/arm/mach-s5pv210/setup-i2c0.c
+++ b/arch/arm/mach-s5pv210/setup-i2c0.c
@@ -23,8 +23,6 @@ struct platform_device; /* don't need the contents */
void s3c_i2c0_cfg_gpio(struct platform_device *dev)
{
- s3c_gpio_cfgpin(S5PV210_GPD1(0), S3C_GPIO_SFN(2));
- s3c_gpio_setpull(S5PV210_GPD1(0), S3C_GPIO_PULL_UP);
- s3c_gpio_cfgpin(S5PV210_GPD1(1), S3C_GPIO_SFN(2));
- s3c_gpio_setpull(S5PV210_GPD1(1), S3C_GPIO_PULL_UP);
+ s3c_gpio_cfgall_range(S5PV210_GPD1(0), 2,
+ S3C_GPIO_SFN(2), S3C_GPIO_PULL_UP);
}
diff --git a/arch/arm/mach-s5pv210/setup-i2c1.c b/arch/arm/mach-s5pv210/setup-i2c1.c
index 148bb7857d89..f61365a34c56 100644
--- a/arch/arm/mach-s5pv210/setup-i2c1.c
+++ b/arch/arm/mach-s5pv210/setup-i2c1.c
@@ -23,8 +23,6 @@ struct platform_device; /* don't need the contents */
void s3c_i2c1_cfg_gpio(struct platform_device *dev)
{
- s3c_gpio_cfgpin(S5PV210_GPD1(2), S3C_GPIO_SFN(2));
- s3c_gpio_setpull(S5PV210_GPD1(2), S3C_GPIO_PULL_UP);
- s3c_gpio_cfgpin(S5PV210_GPD1(3), S3C_GPIO_SFN(2));
- s3c_gpio_setpull(S5PV210_GPD1(3), S3C_GPIO_PULL_UP);
+ s3c_gpio_cfgall_range(S5PV210_GPD1(2), 2,
+ S3C_GPIO_SFN(2), S3C_GPIO_PULL_UP);
}
diff --git a/arch/arm/mach-s5pv210/setup-i2c2.c b/arch/arm/mach-s5pv210/setup-i2c2.c
index 2396cb8c373e..2f91b5cefbc6 100644
--- a/arch/arm/mach-s5pv210/setup-i2c2.c
+++ b/arch/arm/mach-s5pv210/setup-i2c2.c
@@ -23,8 +23,6 @@ struct platform_device; /* don't need the contents */
void s3c_i2c2_cfg_gpio(struct platform_device *dev)
{
- s3c_gpio_cfgpin(S5PV210_GPD1(4), S3C_GPIO_SFN(2));
- s3c_gpio_setpull(S5PV210_GPD1(4), S3C_GPIO_PULL_UP);
- s3c_gpio_cfgpin(S5PV210_GPD1(5), S3C_GPIO_SFN(2));
- s3c_gpio_setpull(S5PV210_GPD1(5), S3C_GPIO_PULL_UP);
+ s3c_gpio_cfgall_range(S5PV210_GPD1(4), 2,
+ S3C_GPIO_SFN(2), S3C_GPIO_PULL_UP);
}
diff --git a/arch/arm/mach-s5pv210/setup-ide.c b/arch/arm/mach-s5pv210/setup-ide.c
index b558b1cc8d60..ea123d546bd2 100644
--- a/arch/arm/mach-s5pv210/setup-ide.c
+++ b/arch/arm/mach-s5pv210/setup-ide.c
@@ -15,36 +15,25 @@
#include <plat/gpio-cfg.h>
+static void s5pv210_ide_cfg_gpios(unsigned int base, unsigned int nr)
+{
+ s3c_gpio_cfgrange_nopull(base, nr, S3C_GPIO_SFN(4));
+
+ for (; nr > 0; nr--, base++)
+ s5p_gpio_set_drvstr(base, S5P_GPIO_DRVSTR_LV4);
+}
+
void s5pv210_ide_setup_gpio(void)
{
- unsigned int gpio = 0;
-
- for (gpio = S5PV210_GPJ0(0); gpio <= S5PV210_GPJ0(7); gpio++) {
- /* CF_Add[0 - 2], CF_IORDY, CF_INTRQ, CF_DMARQ, CF_DMARST,
- CF_DMACK */
- s3c_gpio_cfgpin(gpio, S3C_GPIO_SFN(4));
- s3c_gpio_setpull(gpio, S3C_GPIO_PULL_NONE);
- s5p_gpio_set_drvstr(gpio, S5P_GPIO_DRVSTR_LV4);
- }
-
- for (gpio = S5PV210_GPJ2(0); gpio <= S5PV210_GPJ2(7); gpio++) {
- /*CF_Data[0 - 7] */
- s3c_gpio_cfgpin(gpio, S3C_GPIO_SFN(4));
- s3c_gpio_setpull(gpio, S3C_GPIO_PULL_NONE);
- s5p_gpio_set_drvstr(gpio, S5P_GPIO_DRVSTR_LV4);
- }
-
- for (gpio = S5PV210_GPJ3(0); gpio <= S5PV210_GPJ3(7); gpio++) {
- /* CF_Data[8 - 15] */
- s3c_gpio_cfgpin(gpio, S3C_GPIO_SFN(4));
- s3c_gpio_setpull(gpio, S3C_GPIO_PULL_NONE);
- s5p_gpio_set_drvstr(gpio, S5P_GPIO_DRVSTR_LV4);
- }
-
- for (gpio = S5PV210_GPJ4(0); gpio <= S5PV210_GPJ4(3); gpio++) {
- /* CF_CS0, CF_CS1, CF_IORD, CF_IOWR */
- s3c_gpio_cfgpin(gpio, S3C_GPIO_SFN(4));
- s3c_gpio_setpull(gpio, S3C_GPIO_PULL_NONE);
- s5p_gpio_set_drvstr(gpio, S5P_GPIO_DRVSTR_LV4);
- }
+ /* CF_Add[0 - 2], CF_IORDY, CF_INTRQ, CF_DMARQ, CF_DMARST, CF_DMACK */
+ s5pv210_ide_cfg_gpios(S5PV210_GPJ0(0), 8);
+
+ /* CF_Data[0 - 7] */
+ s5pv210_ide_cfg_gpios(S5PV210_GPJ2(0), 8);
+
+ /* CF_Data[8 - 15] */
+ s5pv210_ide_cfg_gpios(S5PV210_GPJ3(0), 8);
+
+ /* CF_CS0, CF_CS1, CF_IORD, CF_IOWR */
+ s5pv210_ide_cfg_gpios(S5PV210_GPJ4(0), 4);
}
diff --git a/arch/arm/mach-s5pv210/setup-keypad.c b/arch/arm/mach-s5pv210/setup-keypad.c
index 37b2790aafc3..c56420a52f48 100644
--- a/arch/arm/mach-s5pv210/setup-keypad.c
+++ b/arch/arm/mach-s5pv210/setup-keypad.c
@@ -16,19 +16,9 @@
void samsung_keypad_cfg_gpio(unsigned int rows, unsigned int cols)
{
- unsigned int gpio, end;
-
/* Set all the necessary GPH3 pins to special-function 3: KP_ROW[x] */
- end = S5PV210_GPH3(rows);
- for (gpio = S5PV210_GPH3(0); gpio < end; gpio++) {
- s3c_gpio_cfgpin(gpio, S3C_GPIO_SFN(3));
- s3c_gpio_setpull(gpio, S3C_GPIO_PULL_NONE);
- }
+ s3c_gpio_cfgrange_nopull(S5PV210_GPH3(0), rows, S3C_GPIO_SFN(3));
/* Set all the necessary GPH2 pins to special-function 3: KP_COL[x] */
- end = S5PV210_GPH2(cols);
- for (gpio = S5PV210_GPH2(0); gpio < end; gpio++) {
- s3c_gpio_cfgpin(gpio, S3C_GPIO_SFN(3));
- s3c_gpio_setpull(gpio, S3C_GPIO_PULL_NONE);
- }
+ s3c_gpio_cfgrange_nopull(S5PV210_GPH2(0), cols, S3C_GPIO_SFN(3));
}
diff --git a/arch/arm/mach-s5pv210/setup-sdhci-gpio.c b/arch/arm/mach-s5pv210/setup-sdhci-gpio.c
index b18587b1ec58..746777d56df9 100644
--- a/arch/arm/mach-s5pv210/setup-sdhci-gpio.c
+++ b/arch/arm/mach-s5pv210/setup-sdhci-gpio.c
@@ -26,26 +26,17 @@
void s5pv210_setup_sdhci0_cfg_gpio(struct platform_device *dev, int width)
{
struct s3c_sdhci_platdata *pdata = dev->dev.platform_data;
- unsigned int gpio;
/* Set all the necessary GPG0/GPG1 pins to special-function 2 */
- for (gpio = S5PV210_GPG0(0); gpio < S5PV210_GPG0(2); gpio++) {
- s3c_gpio_cfgpin(gpio, S3C_GPIO_SFN(2));
- s3c_gpio_setpull(gpio, S3C_GPIO_PULL_NONE);
- }
+ s3c_gpio_cfgrange_nopull(S5PV210_GPG0(0), 2, S3C_GPIO_SFN(2));
+
switch (width) {
case 8:
/* GPG1[3:6] special-funtion 3 */
- for (gpio = S5PV210_GPG1(3); gpio <= S5PV210_GPG1(6); gpio++) {
- s3c_gpio_cfgpin(gpio, S3C_GPIO_SFN(3));
- s3c_gpio_setpull(gpio, S3C_GPIO_PULL_NONE);
- }
+ s3c_gpio_cfgrange_nopull(S5PV210_GPG1(3), 4, S3C_GPIO_SFN(3));
case 4:
/* GPG0[3:6] special-funtion 2 */
- for (gpio = S5PV210_GPG0(3); gpio <= S5PV210_GPG0(6); gpio++) {
- s3c_gpio_cfgpin(gpio, S3C_GPIO_SFN(2));
- s3c_gpio_setpull(gpio, S3C_GPIO_PULL_NONE);
- }
+ s3c_gpio_cfgrange_nopull(S5PV210_GPG0(3), 4, S3C_GPIO_SFN(2));
default:
break;
}
@@ -59,19 +50,12 @@ void s5pv210_setup_sdhci0_cfg_gpio(struct platform_device *dev, int width)
void s5pv210_setup_sdhci1_cfg_gpio(struct platform_device *dev, int width)
{
struct s3c_sdhci_platdata *pdata = dev->dev.platform_data;
- unsigned int gpio;
/* Set all the necessary GPG1[0:1] pins to special-function 2 */
- for (gpio = S5PV210_GPG1(0); gpio < S5PV210_GPG1(2); gpio++) {
- s3c_gpio_cfgpin(gpio, S3C_GPIO_SFN(2));
- s3c_gpio_setpull(gpio, S3C_GPIO_PULL_NONE);
- }
+ s3c_gpio_cfgrange_nopull(S5PV210_GPG1(0), 2, S3C_GPIO_SFN(2));
/* Data pin GPG1[3:6] to special-function 2 */
- for (gpio = S5PV210_GPG1(3); gpio <= S5PV210_GPG1(6); gpio++) {
- s3c_gpio_cfgpin(gpio, S3C_GPIO_SFN(2));
- s3c_gpio_setpull(gpio, S3C_GPIO_PULL_NONE);
- }
+ s3c_gpio_cfgrange_nopull(S5PV210_GPG1(3), 4, S3C_GPIO_SFN(2));
if (pdata->cd_type == S3C_SDHCI_CD_INTERNAL) {
s3c_gpio_setpull(S5PV210_GPG1(2), S3C_GPIO_PULL_UP);
@@ -82,27 +66,17 @@ void s5pv210_setup_sdhci1_cfg_gpio(struct platform_device *dev, int width)
void s5pv210_setup_sdhci2_cfg_gpio(struct platform_device *dev, int width)
{
struct s3c_sdhci_platdata *pdata = dev->dev.platform_data;
- unsigned int gpio;
/* Set all the necessary GPG2[0:1] pins to special-function 2 */
- for (gpio = S5PV210_GPG2(0); gpio < S5PV210_GPG2(2); gpio++) {
- s3c_gpio_cfgpin(gpio, S3C_GPIO_SFN(2));
- s3c_gpio_setpull(gpio, S3C_GPIO_PULL_NONE);
- }
+ s3c_gpio_cfgrange_nopull(S5PV210_GPG2(0), 2, S3C_GPIO_SFN(2));
switch (width) {
case 8:
/* Data pin GPG3[3:6] to special-function 3 */
- for (gpio = S5PV210_GPG3(3); gpio <= S5PV210_GPG3(6); gpio++) {
- s3c_gpio_cfgpin(gpio, S3C_GPIO_SFN(3));
- s3c_gpio_setpull(gpio, S3C_GPIO_PULL_NONE);
- }
+ s3c_gpio_cfgrange_nopull(S5PV210_GPG3(3), 4, S3C_GPIO_SFN(3));
case 4:
/* Data pin GPG2[3:6] to special-function 2 */
- for (gpio = S5PV210_GPG2(3); gpio <= S5PV210_GPG2(6); gpio++) {
- s3c_gpio_cfgpin(gpio, S3C_GPIO_SFN(2));
- s3c_gpio_setpull(gpio, S3C_GPIO_PULL_NONE);
- }
+ s3c_gpio_cfgrange_nopull(S5PV210_GPG2(3), 4, S3C_GPIO_SFN(2));
default:
break;
}
@@ -116,19 +90,12 @@ void s5pv210_setup_sdhci2_cfg_gpio(struct platform_device *dev, int width)
void s5pv210_setup_sdhci3_cfg_gpio(struct platform_device *dev, int width)
{
struct s3c_sdhci_platdata *pdata = dev->dev.platform_data;
- unsigned int gpio;
- /* Set all the necessary GPG3[0:2] pins to special-function 2 */
- for (gpio = S5PV210_GPG3(0); gpio < S5PV210_GPG3(2); gpio++) {
- s3c_gpio_cfgpin(gpio, S3C_GPIO_SFN(2));
- s3c_gpio_setpull(gpio, S3C_GPIO_PULL_NONE);
- }
+ /* Set all the necessary GPG3[0:1] pins to special-function 2 */
+ s3c_gpio_cfgrange_nopull(S5PV210_GPG3(0), 2, S3C_GPIO_SFN(2));
/* Data pin GPG3[3:6] to special-function 2 */
- for (gpio = S5PV210_GPG3(3); gpio <= S5PV210_GPG3(6); gpio++) {
- s3c_gpio_cfgpin(gpio, S3C_GPIO_SFN(2));
- s3c_gpio_setpull(gpio, S3C_GPIO_PULL_NONE);
- }
+ s3c_gpio_cfgrange_nopull(S5PV210_GPG3(3), 4, S3C_GPIO_SFN(2));
if (pdata->cd_type == S3C_SDHCI_CD_INTERNAL) {
s3c_gpio_setpull(S5PV210_GPG3(2), S3C_GPIO_PULL_UP);
diff --git a/arch/arm/mach-s5pv210/sleep.S b/arch/arm/mach-s5pv210/sleep.S
new file mode 100644
index 000000000000..d4d222b716b4
--- /dev/null
+++ b/arch/arm/mach-s5pv210/sleep.S
@@ -0,0 +1,170 @@
+/* linux/arch/arm/plat-s5p/sleep.S
+ *
+ * Copyright (c) 2010 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com
+ *
+ * S5PV210 power Manager (Suspend-To-RAM) support
+ * Based on S3C2410 sleep code by:
+ * Ben Dooks, (c) 2004 Simtec Electronics
+ *
+ * Based on PXA/SA1100 sleep code by:
+ * Nicolas Pitre, (c) 2002 Monta Vista Software Inc
+ * Cliff Brake, (c) 2001
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+*/
+
+#include <linux/linkage.h>
+#include <asm/assembler.h>
+#include <asm/memory.h>
+
+ .text
+
+ /* s3c_cpu_save
+ *
+ * entry:
+ * r0 = save address (virtual addr of s3c_sleep_save_phys)
+ */
+
+ENTRY(s3c_cpu_save)
+
+ stmfd sp!, { r3 - r12, lr }
+
+ mrc p15, 0, r4, c13, c0, 0 @ FCSE/PID
+ mrc p15, 0, r5, c3, c0, 0 @ Domain ID
+ mrc p15, 0, r6, c2, c0, 0 @ Translation Table BASE0
+ mrc p15, 0, r7, c2, c0, 1 @ Translation Table BASE1
+ mrc p15, 0, r8, c2, c0, 2 @ Translation Table Control
+ mrc p15, 0, r9, c1, c0, 0 @ Control register
+ mrc p15, 0, r10, c1, c0, 1 @ Auxiliary control register
+ mrc p15, 0, r11, c1, c0, 2 @ Co-processor access controls
+ mrc p15, 0, r12, c10, c2, 0 @ Read PRRR
+ mrc p15, 0, r3, c10, c2, 1 @ READ NMRR
+
+ stmia r0, { r3 - r13 }
+
+ bl s3c_pm_cb_flushcache
+
+ ldr r0, =pm_cpu_sleep
+ ldr r0, [ r0 ]
+ mov pc, r0
+
+resume_with_mmu:
+ /*
+ * After MMU is turned on, restore the previous MMU table.
+ */
+ ldr r9 , =(PAGE_OFFSET - PHYS_OFFSET)
+ add r4, r4, r9
+ str r12, [r4]
+
+ ldmfd sp!, { r3 - r12, pc }
+
+ .ltorg
+
+ .data
+
+ .global s3c_sleep_save_phys
+s3c_sleep_save_phys:
+ .word 0
+
+ /* sleep magic, to allow the bootloader to check for an valid
+ * image to resume to. Must be the first word before the
+ * s3c_cpu_resume entry.
+ */
+
+ .word 0x2bedf00d
+
+ /* s3c_cpu_resume
+ *
+ * resume code entry for bootloader to call
+ *
+ * we must put this code here in the data segment as we have no
+ * other way of restoring the stack pointer after sleep, and we
+ * must not write to the code segment (code is read-only)
+ */
+
+ENTRY(s3c_cpu_resume)
+ mov r0, #PSR_I_BIT | PSR_F_BIT | SVC_MODE
+ msr cpsr_c, r0
+
+ mov r1, #0
+ mcr p15, 0, r1, c8, c7, 0 @ invalidate TLBs
+ mcr p15, 0, r1, c7, c5, 0 @ invalidate I Cache
+
+ ldr r0, s3c_sleep_save_phys @ address of restore block
+ ldmia r0, { r3 - r13 }
+
+ mcr p15, 0, r4, c13, c0, 0 @ FCSE/PID
+ mcr p15, 0, r5, c3, c0, 0 @ Domain ID
+
+ mcr p15, 0, r8, c2, c0, 2 @ Translation Table Control
+ mcr p15, 0, r7, c2, c0, 1 @ Translation Table BASE1
+ mcr p15, 0, r6, c2, c0, 0 @ Translation Table BASE0
+
+ mcr p15, 0, r10, c1, c0, 1 @ Auxiliary control register
+
+ mov r0, #0
+ mcr p15, 0, r0, c8, c7, 0 @ Invalidate I & D TLB
+
+ mov r0, #0 @ restore copro access
+ mcr p15, 0, r11, c1, c0, 2 @ Co-processor access
+ mcr p15, 0, r0, c7, c5, 4
+
+ mcr p15, 0, r12, c10, c2, 0 @ write PRRR
+ mcr p15, 0, r3, c10, c2, 1 @ write NMRR
+
+ /*
+ * In Cortex-A8, when MMU is turned on, the pipeline is flushed.
+ * And there are no valid entries in the MMU table at this point.
+ * So before turning on the MMU, the MMU entry for the DRAM address
+ * range is added. After the MMU is turned on, the other entries
+ * in the MMU table will be restored.
+ */
+
+ /* r6 = Translation Table BASE0 */
+ mov r4, r6
+ mov r4, r4, LSR #14
+ mov r4, r4, LSL #14
+
+ /* Load address for adding to MMU table list */
+ ldr r11, =0xE010F000 @ INFORM0 reg.
+ ldr r10, [r11, #0]
+ mov r10, r10, LSR #18
+ bic r10, r10, #0x3
+ orr r4, r4, r10
+
+ /* Calculate MMU table entry */
+ mov r10, r10, LSL #18
+ ldr r5, =0x40E
+ orr r10, r10, r5
+
+ /* Back up originally data */
+ ldr r12, [r4]
+
+ /* Add calculated MMU table entry into MMU table list */
+ str r10, [r4]
+
+ ldr r2, =resume_with_mmu
+ mcr p15, 0, r9, c1, c0, 0 @ turn on MMU, etc
+
+ nop
+ nop
+ nop
+ nop
+ nop @ second-to-last before mmu
+
+ mov pc, r2 @ go back to virtual address
+
+ .ltorg
diff --git a/arch/arm/mach-s5pv310/Kconfig b/arch/arm/mach-s5pv310/Kconfig
index 331b5bd97aba..1150b360f38c 100644
--- a/arch/arm/mach-s5pv310/Kconfig
+++ b/arch/arm/mach-s5pv310/Kconfig
@@ -11,7 +11,6 @@ if ARCH_S5PV310
config CPU_S5PV310
bool
- select PLAT_S5P
help
Enable S5PV310 CPU support
@@ -25,21 +24,105 @@ config S5PV310_SETUP_I2C2
help
Common setup code for i2c bus 2.
+config S5PV310_SETUP_I2C3
+ bool
+ help
+ Common setup code for i2c bus 3.
+
+config S5PV310_SETUP_I2C4
+ bool
+ help
+ Common setup code for i2c bus 4.
+
+config S5PV310_SETUP_I2C5
+ bool
+ help
+ Common setup code for i2c bus 5.
+
+config S5PV310_SETUP_I2C6
+ bool
+ help
+ Common setup code for i2c bus 6.
+
+config S5PV310_SETUP_I2C7
+ bool
+ help
+ Common setup code for i2c bus 7.
+
+config S5PV310_SETUP_SDHCI
+ bool
+ select S5PV310_SETUP_SDHCI_GPIO
+ help
+ Internal helper functions for S5PV310 based SDHCI systems.
+
+config S5PV310_SETUP_SDHCI_GPIO
+ bool
+ help
+ Common setup code for SDHCI gpio.
+
# machine support
-config MACH_SMDKV310
- bool "SMDKV310"
+menu "S5PC210 Machines"
+
+config MACH_SMDKC210
+ bool "SMDKC210"
select CPU_S5PV310
- select ARCH_SPARSEMEM_ENABLE
+ select S3C_DEV_RTC
+ select S3C_DEV_WDT
+ select S3C_DEV_HSMMC
+ select S3C_DEV_HSMMC1
+ select S3C_DEV_HSMMC2
+ select S3C_DEV_HSMMC3
+ select S5PV310_SETUP_SDHCI
help
- Machine support for Samsung SMDKV310
+ Machine support for Samsung SMDKC210
+ S5PC210(MCP) is one of package option of S5PV310
config MACH_UNIVERSAL_C210
bool "Mobile UNIVERSAL_C210 Board"
select CPU_S5PV310
- select ARCH_SPARSEMEM_ENABLE
+ select S5P_DEV_ONENAND
+ select S3C_DEV_I2C1
+ select S5PV310_SETUP_I2C1
help
Machine support for Samsung Mobile Universal S5PC210 Reference
Board. S5PC210(MCP) is one of package option of S5PV310
+endmenu
+
+menu "S5PV310 Machines"
+
+config MACH_SMDKV310
+ bool "SMDKV310"
+ select CPU_S5PV310
+ select S3C_DEV_RTC
+ select S3C_DEV_WDT
+ select S3C_DEV_HSMMC
+ select S3C_DEV_HSMMC1
+ select S3C_DEV_HSMMC2
+ select S3C_DEV_HSMMC3
+ select S5PV310_SETUP_SDHCI
+ help
+ Machine support for Samsung SMDKV310
+
+endmenu
+
+comment "Configuration for HSMMC bus width"
+
+menu "Use 8-bit bus width"
+
+config S5PV310_SDHCI_CH0_8BIT
+ bool "Channel 0 with 8-bit bus"
+ help
+ Support HSMMC Channel 0 8-bit bus.
+ If selected, Channel 1 is disabled.
+
+config S5PV310_SDHCI_CH2_8BIT
+ bool "Channel 2 with 8-bit bus"
+ help
+ Support HSMMC Channel 2 8-bit bus.
+ If selected, Channel 3 is disabled.
+
+endmenu
+
endif
diff --git a/arch/arm/mach-s5pv310/Makefile b/arch/arm/mach-s5pv310/Makefile
index d5b51c72340f..84afc64e7c01 100644
--- a/arch/arm/mach-s5pv310/Makefile
+++ b/arch/arm/mach-s5pv310/Makefile
@@ -13,7 +13,7 @@ obj- :=
# Core support for S5PV310 system
obj-$(CONFIG_CPU_S5PV310) += cpu.o init.o clock.o irq-combiner.o
-obj-$(CONFIG_CPU_S5PV310) += setup-i2c0.o time.o
+obj-$(CONFIG_CPU_S5PV310) += setup-i2c0.o time.o gpiolib.o irq-eint.o
obj-$(CONFIG_SMP) += platsmp.o headsmp.o
obj-$(CONFIG_LOCAL_TIMERS) += localtimer.o
@@ -21,6 +21,7 @@ obj-$(CONFIG_HOTPLUG_CPU) += hotplug.o
# machine support
+obj-$(CONFIG_MACH_SMDKC210) += mach-smdkc210.o
obj-$(CONFIG_MACH_SMDKV310) += mach-smdkv310.o
obj-$(CONFIG_MACH_UNIVERSAL_C210) += mach-universal_c210.o
@@ -28,3 +29,10 @@ obj-$(CONFIG_MACH_UNIVERSAL_C210) += mach-universal_c210.o
obj-$(CONFIG_S5PV310_SETUP_I2C1) += setup-i2c1.o
obj-$(CONFIG_S5PV310_SETUP_I2C2) += setup-i2c2.o
+obj-$(CONFIG_S5PV310_SETUP_I2C3) += setup-i2c3.o
+obj-$(CONFIG_S5PV310_SETUP_I2C4) += setup-i2c4.o
+obj-$(CONFIG_S5PV310_SETUP_I2C5) += setup-i2c5.o
+obj-$(CONFIG_S5PV310_SETUP_I2C6) += setup-i2c6.o
+obj-$(CONFIG_S5PV310_SETUP_I2C7) += setup-i2c7.o
+obj-$(CONFIG_S5PV310_SETUP_SDHCI) += setup-sdhci.o
+obj-$(CONFIG_S5PV310_SETUP_SDHCI_GPIO) += setup-sdhci-gpio.o
diff --git a/arch/arm/mach-s5pv310/clock.c b/arch/arm/mach-s5pv310/clock.c
index 26a0f03df8ea..58c9d33f36fe 100644
--- a/arch/arm/mach-s5pv310/clock.c
+++ b/arch/arm/mach-s5pv310/clock.c
@@ -30,16 +30,92 @@ static struct clk clk_sclk_hdmi27m = {
.rate = 27000000,
};
+static struct clk clk_sclk_hdmiphy = {
+ .name = "sclk_hdmiphy",
+ .id = -1,
+};
+
+static struct clk clk_sclk_usbphy0 = {
+ .name = "sclk_usbphy0",
+ .id = -1,
+ .rate = 27000000,
+};
+
+static struct clk clk_sclk_usbphy1 = {
+ .name = "sclk_usbphy1",
+ .id = -1,
+};
+
+static int s5pv310_clksrc_mask_top_ctrl(struct clk *clk, int enable)
+{
+ return s5p_gatectrl(S5P_CLKSRC_MASK_TOP, clk, enable);
+}
+
+static int s5pv310_clksrc_mask_cam_ctrl(struct clk *clk, int enable)
+{
+ return s5p_gatectrl(S5P_CLKSRC_MASK_CAM, clk, enable);
+}
+
+static int s5pv310_clksrc_mask_lcd0_ctrl(struct clk *clk, int enable)
+{
+ return s5p_gatectrl(S5P_CLKSRC_MASK_LCD0, clk, enable);
+}
+
+static int s5pv310_clksrc_mask_lcd1_ctrl(struct clk *clk, int enable)
+{
+ return s5p_gatectrl(S5P_CLKSRC_MASK_LCD1, clk, enable);
+}
+
+static int s5pv310_clksrc_mask_fsys_ctrl(struct clk *clk, int enable)
+{
+ return s5p_gatectrl(S5P_CLKSRC_MASK_FSYS, clk, enable);
+}
+
static int s5pv310_clksrc_mask_peril0_ctrl(struct clk *clk, int enable)
{
return s5p_gatectrl(S5P_CLKSRC_MASK_PERIL0, clk, enable);
}
+static int s5pv310_clksrc_mask_peril1_ctrl(struct clk *clk, int enable)
+{
+ return s5p_gatectrl(S5P_CLKSRC_MASK_PERIL1, clk, enable);
+}
+
+static int s5pv310_clk_ip_cam_ctrl(struct clk *clk, int enable)
+{
+ return s5p_gatectrl(S5P_CLKGATE_IP_CAM, clk, enable);
+}
+
+static int s5pv310_clk_ip_image_ctrl(struct clk *clk, int enable)
+{
+ return s5p_gatectrl(S5P_CLKGATE_IP_IMAGE, clk, enable);
+}
+
+static int s5pv310_clk_ip_lcd0_ctrl(struct clk *clk, int enable)
+{
+ return s5p_gatectrl(S5P_CLKGATE_IP_LCD0, clk, enable);
+}
+
+static int s5pv310_clk_ip_lcd1_ctrl(struct clk *clk, int enable)
+{
+ return s5p_gatectrl(S5P_CLKGATE_IP_LCD1, clk, enable);
+}
+
+static int s5pv310_clk_ip_fsys_ctrl(struct clk *clk, int enable)
+{
+ return s5p_gatectrl(S5P_CLKGATE_IP_FSYS, clk, enable);
+}
+
static int s5pv310_clk_ip_peril_ctrl(struct clk *clk, int enable)
{
return s5p_gatectrl(S5P_CLKGATE_IP_PERIL, clk, enable);
}
+static int s5pv310_clk_ip_perir_ctrl(struct clk *clk, int enable)
+{
+ return s5p_gatectrl(S5P_CLKGATE_IP_PERIR, clk, enable);
+}
+
/* Core list of CMU_CPU side */
static struct clksrc_clk clk_mout_apll = {
@@ -79,7 +155,7 @@ static struct clksrc_clk clk_mout_mpll = {
};
static struct clk *clkset_moutcore_list[] = {
- [0] = &clk_sclk_apll.clk,
+ [0] = &clk_mout_apll.clk,
[1] = &clk_mout_mpll.clk,
};
@@ -150,24 +226,6 @@ static struct clksrc_clk clk_periphclk = {
.reg_div = { .reg = S5P_CLKDIV_CPU, .shift = 12, .size = 3 },
};
-static struct clksrc_clk clk_atclk = {
- .clk = {
- .name = "atclk",
- .id = -1,
- .parent = &clk_moutcore.clk,
- },
- .reg_div = { .reg = S5P_CLKDIV_CPU, .shift = 16, .size = 3 },
-};
-
-static struct clksrc_clk clk_pclk_dbg = {
- .clk = {
- .name = "pclk_dbg",
- .id = -1,
- .parent = &clk_atclk.clk,
- },
- .reg_div = { .reg = S5P_CLKDIV_CPU, .shift = 20, .size = 3 },
-};
-
/* Core list of CMU_CORE side */
static struct clk *clkset_corebus_list[] = {
@@ -241,7 +299,7 @@ static struct clk *clkset_aclk_top_list[] = {
[1] = &clk_sclk_apll.clk,
};
-static struct clksrc_sources clkset_aclk_200 = {
+static struct clksrc_sources clkset_aclk = {
.sources = clkset_aclk_top_list,
.nr_sources = ARRAY_SIZE(clkset_aclk_top_list),
};
@@ -251,52 +309,37 @@ static struct clksrc_clk clk_aclk_200 = {
.name = "aclk_200",
.id = -1,
},
- .sources = &clkset_aclk_200,
+ .sources = &clkset_aclk,
.reg_src = { .reg = S5P_CLKSRC_TOP0, .shift = 12, .size = 1 },
.reg_div = { .reg = S5P_CLKDIV_TOP, .shift = 0, .size = 3 },
};
-static struct clksrc_sources clkset_aclk_100 = {
- .sources = clkset_aclk_top_list,
- .nr_sources = ARRAY_SIZE(clkset_aclk_top_list),
-};
-
static struct clksrc_clk clk_aclk_100 = {
.clk = {
.name = "aclk_100",
.id = -1,
},
- .sources = &clkset_aclk_100,
+ .sources = &clkset_aclk,
.reg_src = { .reg = S5P_CLKSRC_TOP0, .shift = 16, .size = 1 },
.reg_div = { .reg = S5P_CLKDIV_TOP, .shift = 4, .size = 4 },
};
-static struct clksrc_sources clkset_aclk_160 = {
- .sources = clkset_aclk_top_list,
- .nr_sources = ARRAY_SIZE(clkset_aclk_top_list),
-};
-
static struct clksrc_clk clk_aclk_160 = {
.clk = {
.name = "aclk_160",
.id = -1,
},
- .sources = &clkset_aclk_160,
+ .sources = &clkset_aclk,
.reg_src = { .reg = S5P_CLKSRC_TOP0, .shift = 20, .size = 1 },
.reg_div = { .reg = S5P_CLKDIV_TOP, .shift = 8, .size = 3 },
};
-static struct clksrc_sources clkset_aclk_133 = {
- .sources = clkset_aclk_top_list,
- .nr_sources = ARRAY_SIZE(clkset_aclk_top_list),
-};
-
static struct clksrc_clk clk_aclk_133 = {
.clk = {
.name = "aclk_133",
.id = -1,
},
- .sources = &clkset_aclk_133,
+ .sources = &clkset_aclk,
.reg_src = { .reg = S5P_CLKSRC_TOP0, .shift = 24, .size = 1 },
.reg_div = { .reg = S5P_CLKDIV_TOP, .shift = 12, .size = 3 },
};
@@ -315,6 +358,8 @@ static struct clksrc_clk clk_vpllsrc = {
.clk = {
.name = "vpll_src",
.id = -1,
+ .enable = s5pv310_clksrc_mask_top_ctrl,
+ .ctrlbit = (1 << 0),
},
.sources = &clkset_vpllsrc,
.reg_src = { .reg = S5P_CLKSRC_TOP1, .shift = 0, .size = 1 },
@@ -346,7 +391,175 @@ static struct clk init_clocks_disable[] = {
.parent = &clk_aclk_100.clk,
.enable = s5pv310_clk_ip_peril_ctrl,
.ctrlbit = (1<<24),
- }
+ }, {
+ .name = "csis",
+ .id = 0,
+ .enable = s5pv310_clk_ip_cam_ctrl,
+ .ctrlbit = (1 << 4),
+ }, {
+ .name = "csis",
+ .id = 1,
+ .enable = s5pv310_clk_ip_cam_ctrl,
+ .ctrlbit = (1 << 5),
+ }, {
+ .name = "fimc",
+ .id = 0,
+ .enable = s5pv310_clk_ip_cam_ctrl,
+ .ctrlbit = (1 << 0),
+ }, {
+ .name = "fimc",
+ .id = 1,
+ .enable = s5pv310_clk_ip_cam_ctrl,
+ .ctrlbit = (1 << 1),
+ }, {
+ .name = "fimc",
+ .id = 2,
+ .enable = s5pv310_clk_ip_cam_ctrl,
+ .ctrlbit = (1 << 2),
+ }, {
+ .name = "fimc",
+ .id = 3,
+ .enable = s5pv310_clk_ip_cam_ctrl,
+ .ctrlbit = (1 << 3),
+ }, {
+ .name = "fimd",
+ .id = 0,
+ .enable = s5pv310_clk_ip_lcd0_ctrl,
+ .ctrlbit = (1 << 0),
+ }, {
+ .name = "fimd",
+ .id = 1,
+ .enable = s5pv310_clk_ip_lcd1_ctrl,
+ .ctrlbit = (1 << 0),
+ }, {
+ .name = "hsmmc",
+ .id = 0,
+ .parent = &clk_aclk_133.clk,
+ .enable = s5pv310_clk_ip_fsys_ctrl,
+ .ctrlbit = (1 << 5),
+ }, {
+ .name = "hsmmc",
+ .id = 1,
+ .parent = &clk_aclk_133.clk,
+ .enable = s5pv310_clk_ip_fsys_ctrl,
+ .ctrlbit = (1 << 6),
+ }, {
+ .name = "hsmmc",
+ .id = 2,
+ .parent = &clk_aclk_133.clk,
+ .enable = s5pv310_clk_ip_fsys_ctrl,
+ .ctrlbit = (1 << 7),
+ }, {
+ .name = "hsmmc",
+ .id = 3,
+ .parent = &clk_aclk_133.clk,
+ .enable = s5pv310_clk_ip_fsys_ctrl,
+ .ctrlbit = (1 << 8),
+ }, {
+ .name = "hsmmc",
+ .id = 4,
+ .parent = &clk_aclk_133.clk,
+ .enable = s5pv310_clk_ip_fsys_ctrl,
+ .ctrlbit = (1 << 9),
+ }, {
+ .name = "sata",
+ .id = -1,
+ .enable = s5pv310_clk_ip_fsys_ctrl,
+ .ctrlbit = (1 << 10),
+ }, {
+ .name = "adc",
+ .id = -1,
+ .enable = s5pv310_clk_ip_peril_ctrl,
+ .ctrlbit = (1 << 15),
+ }, {
+ .name = "rtc",
+ .id = -1,
+ .enable = s5pv310_clk_ip_perir_ctrl,
+ .ctrlbit = (1 << 15),
+ }, {
+ .name = "watchdog",
+ .id = -1,
+ .enable = s5pv310_clk_ip_perir_ctrl,
+ .ctrlbit = (1 << 14),
+ }, {
+ .name = "usbhost",
+ .id = -1,
+ .enable = s5pv310_clk_ip_fsys_ctrl ,
+ .ctrlbit = (1 << 12),
+ }, {
+ .name = "otg",
+ .id = -1,
+ .enable = s5pv310_clk_ip_fsys_ctrl,
+ .ctrlbit = (1 << 13),
+ }, {
+ .name = "spi",
+ .id = 0,
+ .enable = s5pv310_clk_ip_peril_ctrl,
+ .ctrlbit = (1 << 16),
+ }, {
+ .name = "spi",
+ .id = 1,
+ .enable = s5pv310_clk_ip_peril_ctrl,
+ .ctrlbit = (1 << 17),
+ }, {
+ .name = "spi",
+ .id = 2,
+ .enable = s5pv310_clk_ip_peril_ctrl,
+ .ctrlbit = (1 << 18),
+ }, {
+ .name = "fimg2d",
+ .id = -1,
+ .enable = s5pv310_clk_ip_image_ctrl,
+ .ctrlbit = (1 << 0),
+ }, {
+ .name = "i2c",
+ .id = 0,
+ .parent = &clk_aclk_100.clk,
+ .enable = s5pv310_clk_ip_peril_ctrl,
+ .ctrlbit = (1 << 6),
+ }, {
+ .name = "i2c",
+ .id = 1,
+ .parent = &clk_aclk_100.clk,
+ .enable = s5pv310_clk_ip_peril_ctrl,
+ .ctrlbit = (1 << 7),
+ }, {
+ .name = "i2c",
+ .id = 2,
+ .parent = &clk_aclk_100.clk,
+ .enable = s5pv310_clk_ip_peril_ctrl,
+ .ctrlbit = (1 << 8),
+ }, {
+ .name = "i2c",
+ .id = 3,
+ .parent = &clk_aclk_100.clk,
+ .enable = s5pv310_clk_ip_peril_ctrl,
+ .ctrlbit = (1 << 9),
+ }, {
+ .name = "i2c",
+ .id = 4,
+ .parent = &clk_aclk_100.clk,
+ .enable = s5pv310_clk_ip_peril_ctrl,
+ .ctrlbit = (1 << 10),
+ }, {
+ .name = "i2c",
+ .id = 5,
+ .parent = &clk_aclk_100.clk,
+ .enable = s5pv310_clk_ip_peril_ctrl,
+ .ctrlbit = (1 << 11),
+ }, {
+ .name = "i2c",
+ .id = 6,
+ .parent = &clk_aclk_100.clk,
+ .enable = s5pv310_clk_ip_peril_ctrl,
+ .ctrlbit = (1 << 12),
+ }, {
+ .name = "i2c",
+ .id = 7,
+ .parent = &clk_aclk_100.clk,
+ .enable = s5pv310_clk_ip_peril_ctrl,
+ .ctrlbit = (1 << 13),
+ },
};
static struct clk init_clocks[] = {
@@ -387,6 +600,9 @@ static struct clk *clkset_group_list[] = {
[0] = &clk_ext_xtal_mux,
[1] = &clk_xusbxti,
[2] = &clk_sclk_hdmi27m,
+ [3] = &clk_sclk_usbphy0,
+ [4] = &clk_sclk_usbphy1,
+ [5] = &clk_sclk_hdmiphy,
[6] = &clk_mout_mpll.clk,
[7] = &clk_mout_epll.clk,
[8] = &clk_sclk_vpll.clk,
@@ -397,6 +613,104 @@ static struct clksrc_sources clkset_group = {
.nr_sources = ARRAY_SIZE(clkset_group_list),
};
+static struct clk *clkset_mout_g2d0_list[] = {
+ [0] = &clk_mout_mpll.clk,
+ [1] = &clk_sclk_apll.clk,
+};
+
+static struct clksrc_sources clkset_mout_g2d0 = {
+ .sources = clkset_mout_g2d0_list,
+ .nr_sources = ARRAY_SIZE(clkset_mout_g2d0_list),
+};
+
+static struct clksrc_clk clk_mout_g2d0 = {
+ .clk = {
+ .name = "mout_g2d0",
+ .id = -1,
+ },
+ .sources = &clkset_mout_g2d0,
+ .reg_src = { .reg = S5P_CLKSRC_IMAGE, .shift = 0, .size = 1 },
+};
+
+static struct clk *clkset_mout_g2d1_list[] = {
+ [0] = &clk_mout_epll.clk,
+ [1] = &clk_sclk_vpll.clk,
+};
+
+static struct clksrc_sources clkset_mout_g2d1 = {
+ .sources = clkset_mout_g2d1_list,
+ .nr_sources = ARRAY_SIZE(clkset_mout_g2d1_list),
+};
+
+static struct clksrc_clk clk_mout_g2d1 = {
+ .clk = {
+ .name = "mout_g2d1",
+ .id = -1,
+ },
+ .sources = &clkset_mout_g2d1,
+ .reg_src = { .reg = S5P_CLKSRC_IMAGE, .shift = 4, .size = 1 },
+};
+
+static struct clk *clkset_mout_g2d_list[] = {
+ [0] = &clk_mout_g2d0.clk,
+ [1] = &clk_mout_g2d1.clk,
+};
+
+static struct clksrc_sources clkset_mout_g2d = {
+ .sources = clkset_mout_g2d_list,
+ .nr_sources = ARRAY_SIZE(clkset_mout_g2d_list),
+};
+
+static struct clksrc_clk clk_dout_mmc0 = {
+ .clk = {
+ .name = "dout_mmc0",
+ .id = -1,
+ },
+ .sources = &clkset_group,
+ .reg_src = { .reg = S5P_CLKSRC_FSYS, .shift = 0, .size = 4 },
+ .reg_div = { .reg = S5P_CLKDIV_FSYS1, .shift = 0, .size = 4 },
+};
+
+static struct clksrc_clk clk_dout_mmc1 = {
+ .clk = {
+ .name = "dout_mmc1",
+ .id = -1,
+ },
+ .sources = &clkset_group,
+ .reg_src = { .reg = S5P_CLKSRC_FSYS, .shift = 4, .size = 4 },
+ .reg_div = { .reg = S5P_CLKDIV_FSYS1, .shift = 16, .size = 4 },
+};
+
+static struct clksrc_clk clk_dout_mmc2 = {
+ .clk = {
+ .name = "dout_mmc2",
+ .id = -1,
+ },
+ .sources = &clkset_group,
+ .reg_src = { .reg = S5P_CLKSRC_FSYS, .shift = 8, .size = 4 },
+ .reg_div = { .reg = S5P_CLKDIV_FSYS2, .shift = 0, .size = 4 },
+};
+
+static struct clksrc_clk clk_dout_mmc3 = {
+ .clk = {
+ .name = "dout_mmc3",
+ .id = -1,
+ },
+ .sources = &clkset_group,
+ .reg_src = { .reg = S5P_CLKSRC_FSYS, .shift = 12, .size = 4 },
+ .reg_div = { .reg = S5P_CLKDIV_FSYS2, .shift = 16, .size = 4 },
+};
+
+static struct clksrc_clk clk_dout_mmc4 = {
+ .clk = {
+ .name = "dout_mmc4",
+ .id = -1,
+ },
+ .sources = &clkset_group,
+ .reg_src = { .reg = S5P_CLKSRC_FSYS, .shift = 16, .size = 4 },
+ .reg_div = { .reg = S5P_CLKDIV_FSYS3, .shift = 0, .size = 4 },
+};
+
static struct clksrc_clk clksrcs[] = {
{
.clk = {
@@ -448,7 +762,200 @@ static struct clksrc_clk clksrcs[] = {
.sources = &clkset_group,
.reg_src = { .reg = S5P_CLKSRC_PERIL0, .shift = 24, .size = 4 },
.reg_div = { .reg = S5P_CLKDIV_PERIL3, .shift = 0, .size = 4 },
- },
+ }, {
+ .clk = {
+ .name = "sclk_csis",
+ .id = 0,
+ .enable = s5pv310_clksrc_mask_cam_ctrl,
+ .ctrlbit = (1 << 24),
+ },
+ .sources = &clkset_group,
+ .reg_src = { .reg = S5P_CLKSRC_CAM, .shift = 24, .size = 4 },
+ .reg_div = { .reg = S5P_CLKDIV_CAM, .shift = 24, .size = 4 },
+ }, {
+ .clk = {
+ .name = "sclk_csis",
+ .id = 1,
+ .enable = s5pv310_clksrc_mask_cam_ctrl,
+ .ctrlbit = (1 << 28),
+ },
+ .sources = &clkset_group,
+ .reg_src = { .reg = S5P_CLKSRC_CAM, .shift = 28, .size = 4 },
+ .reg_div = { .reg = S5P_CLKDIV_CAM, .shift = 28, .size = 4 },
+ }, {
+ .clk = {
+ .name = "sclk_cam",
+ .id = 0,
+ .enable = s5pv310_clksrc_mask_cam_ctrl,
+ .ctrlbit = (1 << 16),
+ },
+ .sources = &clkset_group,
+ .reg_src = { .reg = S5P_CLKSRC_CAM, .shift = 16, .size = 4 },
+ .reg_div = { .reg = S5P_CLKDIV_CAM, .shift = 16, .size = 4 },
+ }, {
+ .clk = {
+ .name = "sclk_cam",
+ .id = 1,
+ .enable = s5pv310_clksrc_mask_cam_ctrl,
+ .ctrlbit = (1 << 20),
+ },
+ .sources = &clkset_group,
+ .reg_src = { .reg = S5P_CLKSRC_CAM, .shift = 20, .size = 4 },
+ .reg_div = { .reg = S5P_CLKDIV_CAM, .shift = 20, .size = 4 },
+ }, {
+ .clk = {
+ .name = "sclk_fimc",
+ .id = 0,
+ .enable = s5pv310_clksrc_mask_cam_ctrl,
+ .ctrlbit = (1 << 0),
+ },
+ .sources = &clkset_group,
+ .reg_src = { .reg = S5P_CLKSRC_CAM, .shift = 0, .size = 4 },
+ .reg_div = { .reg = S5P_CLKDIV_CAM, .shift = 0, .size = 4 },
+ }, {
+ .clk = {
+ .name = "sclk_fimc",
+ .id = 1,
+ .enable = s5pv310_clksrc_mask_cam_ctrl,
+ .ctrlbit = (1 << 4),
+ },
+ .sources = &clkset_group,
+ .reg_src = { .reg = S5P_CLKSRC_CAM, .shift = 4, .size = 4 },
+ .reg_div = { .reg = S5P_CLKDIV_CAM, .shift = 4, .size = 4 },
+ }, {
+ .clk = {
+ .name = "sclk_fimc",
+ .id = 2,
+ .enable = s5pv310_clksrc_mask_cam_ctrl,
+ .ctrlbit = (1 << 8),
+ },
+ .sources = &clkset_group,
+ .reg_src = { .reg = S5P_CLKSRC_CAM, .shift = 8, .size = 4 },
+ .reg_div = { .reg = S5P_CLKDIV_CAM, .shift = 8, .size = 4 },
+ }, {
+ .clk = {
+ .name = "sclk_fimc",
+ .id = 3,
+ .enable = s5pv310_clksrc_mask_cam_ctrl,
+ .ctrlbit = (1 << 12),
+ },
+ .sources = &clkset_group,
+ .reg_src = { .reg = S5P_CLKSRC_CAM, .shift = 12, .size = 4 },
+ .reg_div = { .reg = S5P_CLKDIV_CAM, .shift = 12, .size = 4 },
+ }, {
+ .clk = {
+ .name = "sclk_fimd",
+ .id = 0,
+ .enable = s5pv310_clksrc_mask_lcd0_ctrl,
+ .ctrlbit = (1 << 0),
+ },
+ .sources = &clkset_group,
+ .reg_src = { .reg = S5P_CLKSRC_LCD0, .shift = 0, .size = 4 },
+ .reg_div = { .reg = S5P_CLKDIV_LCD0, .shift = 0, .size = 4 },
+ }, {
+ .clk = {
+ .name = "sclk_fimd",
+ .id = 1,
+ .enable = s5pv310_clksrc_mask_lcd1_ctrl,
+ .ctrlbit = (1 << 0),
+ },
+ .sources = &clkset_group,
+ .reg_src = { .reg = S5P_CLKSRC_LCD1, .shift = 0, .size = 4 },
+ .reg_div = { .reg = S5P_CLKDIV_LCD1, .shift = 0, .size = 4 },
+ }, {
+ .clk = {
+ .name = "sclk_sata",
+ .id = -1,
+ .enable = s5pv310_clksrc_mask_fsys_ctrl,
+ .ctrlbit = (1 << 24),
+ },
+ .sources = &clkset_mout_corebus,
+ .reg_src = { .reg = S5P_CLKSRC_FSYS, .shift = 24, .size = 1 },
+ .reg_div = { .reg = S5P_CLKDIV_FSYS0, .shift = 20, .size = 4 },
+ }, {
+ .clk = {
+ .name = "sclk_spi",
+ .id = 0,
+ .enable = s5pv310_clksrc_mask_peril1_ctrl,
+ .ctrlbit = (1 << 16),
+ },
+ .sources = &clkset_group,
+ .reg_src = { .reg = S5P_CLKSRC_PERIL1, .shift = 16, .size = 4 },
+ .reg_div = { .reg = S5P_CLKDIV_PERIL1, .shift = 0, .size = 4 },
+ }, {
+ .clk = {
+ .name = "sclk_spi",
+ .id = 1,
+ .enable = s5pv310_clksrc_mask_peril1_ctrl,
+ .ctrlbit = (1 << 20),
+ },
+ .sources = &clkset_group,
+ .reg_src = { .reg = S5P_CLKSRC_PERIL1, .shift = 20, .size = 4 },
+ .reg_div = { .reg = S5P_CLKDIV_PERIL1, .shift = 16, .size = 4 },
+ }, {
+ .clk = {
+ .name = "sclk_spi",
+ .id = 2,
+ .enable = s5pv310_clksrc_mask_peril1_ctrl,
+ .ctrlbit = (1 << 24),
+ },
+ .sources = &clkset_group,
+ .reg_src = { .reg = S5P_CLKSRC_PERIL1, .shift = 24, .size = 4 },
+ .reg_div = { .reg = S5P_CLKDIV_PERIL2, .shift = 0, .size = 4 },
+ }, {
+ .clk = {
+ .name = "sclk_fimg2d",
+ .id = -1,
+ },
+ .sources = &clkset_mout_g2d,
+ .reg_src = { .reg = S5P_CLKSRC_IMAGE, .shift = 8, .size = 1 },
+ .reg_div = { .reg = S5P_CLKDIV_IMAGE, .shift = 0, .size = 4 },
+ }, {
+ .clk = {
+ .name = "sclk_mmc",
+ .id = 0,
+ .parent = &clk_dout_mmc0.clk,
+ .enable = s5pv310_clksrc_mask_fsys_ctrl,
+ .ctrlbit = (1 << 0),
+ },
+ .reg_div = { .reg = S5P_CLKDIV_FSYS1, .shift = 8, .size = 8 },
+ }, {
+ .clk = {
+ .name = "sclk_mmc",
+ .id = 1,
+ .parent = &clk_dout_mmc1.clk,
+ .enable = s5pv310_clksrc_mask_fsys_ctrl,
+ .ctrlbit = (1 << 4),
+ },
+ .reg_div = { .reg = S5P_CLKDIV_FSYS1, .shift = 24, .size = 8 },
+ }, {
+ .clk = {
+ .name = "sclk_mmc",
+ .id = 2,
+ .parent = &clk_dout_mmc2.clk,
+ .enable = s5pv310_clksrc_mask_fsys_ctrl,
+ .ctrlbit = (1 << 8),
+ },
+ .reg_div = { .reg = S5P_CLKDIV_FSYS2, .shift = 8, .size = 8 },
+ }, {
+ .clk = {
+ .name = "sclk_mmc",
+ .id = 3,
+ .parent = &clk_dout_mmc3.clk,
+ .enable = s5pv310_clksrc_mask_fsys_ctrl,
+ .ctrlbit = (1 << 12),
+ },
+ .reg_div = { .reg = S5P_CLKDIV_FSYS2, .shift = 24, .size = 8 },
+ }, {
+ .clk = {
+ .name = "sclk_mmc",
+ .id = 4,
+ .parent = &clk_dout_mmc4.clk,
+ .enable = s5pv310_clksrc_mask_fsys_ctrl,
+ .ctrlbit = (1 << 16),
+ },
+ .reg_div = { .reg = S5P_CLKDIV_FSYS3, .shift = 8, .size = 8 },
+ }
};
/* Clock initialization code */
@@ -464,8 +971,6 @@ static struct clksrc_clk *sysclks[] = {
&clk_aclk_cores,
&clk_aclk_corem1,
&clk_periphclk,
- &clk_atclk,
- &clk_pclk_dbg,
&clk_mout_corebus,
&clk_sclk_dmc,
&clk_aclk_cored,
@@ -478,6 +983,11 @@ static struct clksrc_clk *sysclks[] = {
&clk_aclk_100,
&clk_aclk_160,
&clk_aclk_133,
+ &clk_dout_mmc0,
+ &clk_dout_mmc1,
+ &clk_dout_mmc2,
+ &clk_dout_mmc3,
+ &clk_dout_mmc4,
};
void __init_or_cpufreq s5pv310_setup_clocks(void)
@@ -490,15 +1000,11 @@ void __init_or_cpufreq s5pv310_setup_clocks(void)
unsigned long vpllsrc;
unsigned long xtal;
unsigned long armclk;
- unsigned long aclk_corem0;
- unsigned long aclk_cores;
- unsigned long aclk_corem1;
- unsigned long periphclk;
unsigned long sclk_dmc;
- unsigned long aclk_cored;
- unsigned long aclk_corep;
- unsigned long aclk_acp;
- unsigned long pclk_acp;
+ unsigned long aclk_200;
+ unsigned long aclk_100;
+ unsigned long aclk_160;
+ unsigned long aclk_133;
unsigned int ptr;
printk(KERN_DEBUG "%s: registering clocks\n", __func__);
@@ -529,26 +1035,21 @@ void __init_or_cpufreq s5pv310_setup_clocks(void)
apll, mpll, epll, vpll);
armclk = clk_get_rate(&clk_armclk.clk);
- aclk_corem0 = clk_get_rate(&clk_aclk_corem0.clk);
- aclk_cores = clk_get_rate(&clk_aclk_cores.clk);
- aclk_corem1 = clk_get_rate(&clk_aclk_corem1.clk);
- periphclk = clk_get_rate(&clk_periphclk.clk);
sclk_dmc = clk_get_rate(&clk_sclk_dmc.clk);
- aclk_cored = clk_get_rate(&clk_aclk_cored.clk);
- aclk_corep = clk_get_rate(&clk_aclk_corep.clk);
- aclk_acp = clk_get_rate(&clk_aclk_acp.clk);
- pclk_acp = clk_get_rate(&clk_pclk_acp.clk);
-
- printk(KERN_INFO "S5PV310: ARMCLK=%ld, COREM0=%ld, CORES=%ld\n"
- "COREM1=%ld, PERI=%ld, DMC=%ld, CORED=%ld\n"
- "COREP=%ld, ACLK_ACP=%ld, PCLK_ACP=%ld",
- armclk, aclk_corem0, aclk_cores, aclk_corem1,
- periphclk, sclk_dmc, aclk_cored, aclk_corep,
- aclk_acp, pclk_acp);
+
+ aclk_200 = clk_get_rate(&clk_aclk_200.clk);
+ aclk_100 = clk_get_rate(&clk_aclk_100.clk);
+ aclk_160 = clk_get_rate(&clk_aclk_160.clk);
+ aclk_133 = clk_get_rate(&clk_aclk_133.clk);
+
+ printk(KERN_INFO "S5PV310: ARMCLK=%ld, DMC=%ld, ACLK200=%ld\n"
+ "ACLK100=%ld, ACLK160=%ld, ACLK133=%ld\n",
+ armclk, sclk_dmc, aclk_200,
+ aclk_100, aclk_160, aclk_133);
clk_f.rate = armclk;
clk_h.rate = sclk_dmc;
- clk_p.rate = periphclk;
+ clk_p.rate = aclk_100;
for (ptr = 0; ptr < ARRAY_SIZE(clksrcs); ptr++)
s3c_set_clksrc(&clksrcs[ptr], true);
diff --git a/arch/arm/mach-s5pv310/cpu.c b/arch/arm/mach-s5pv310/cpu.c
index 4add39853ff9..82ce4aa6d61a 100644
--- a/arch/arm/mach-s5pv310/cpu.c
+++ b/arch/arm/mach-s5pv310/cpu.c
@@ -15,10 +15,12 @@
#include <asm/mach/irq.h>
#include <asm/proc-fns.h>
+#include <asm/hardware/cache-l2x0.h>
#include <plat/cpu.h>
#include <plat/clock.h>
#include <plat/s5pv310.h>
+#include <plat/sdhci.h>
#include <mach/regs-irq.h>
@@ -56,15 +58,30 @@ static struct map_desc s5pv310_iodesc[] __initdata = {
.length = SZ_4K,
.type = MT_DEVICE,
}, {
- .virtual = (unsigned long)S5P_VA_GPIO,
+ .virtual = (unsigned long)S5P_VA_GPIO1,
.pfn = __phys_to_pfn(S5PV310_PA_GPIO1),
.length = SZ_4K,
.type = MT_DEVICE,
}, {
+ .virtual = (unsigned long)S5P_VA_GPIO2,
+ .pfn = __phys_to_pfn(S5PV310_PA_GPIO2),
+ .length = SZ_4K,
+ .type = MT_DEVICE,
+ }, {
+ .virtual = (unsigned long)S5P_VA_GPIO3,
+ .pfn = __phys_to_pfn(S5PV310_PA_GPIO3),
+ .length = SZ_256,
+ .type = MT_DEVICE,
+ }, {
.virtual = (unsigned long)S3C_VA_UART,
.pfn = __phys_to_pfn(S3C_PA_UART),
.length = SZ_512K,
.type = MT_DEVICE,
+ }, {
+ .virtual = (unsigned long)S5P_VA_SROMC,
+ .pfn = __phys_to_pfn(S5PV310_PA_SROMC),
+ .length = SZ_4K,
+ .type = MT_DEVICE,
},
};
@@ -83,6 +100,12 @@ static void s5pv310_idle(void)
void __init s5pv310_map_io(void)
{
iotable_init(s5pv310_iodesc, ARRAY_SIZE(s5pv310_iodesc));
+
+ /* initialize device information early */
+ s5pv310_default_sdhci0();
+ s5pv310_default_sdhci1();
+ s5pv310_default_sdhci2();
+ s5pv310_default_sdhci3();
}
void __init s5pv310_init_clocks(int xtal)
@@ -131,6 +154,28 @@ static int __init s5pv310_core_init(void)
core_initcall(s5pv310_core_init);
+#ifdef CONFIG_CACHE_L2X0
+static int __init s5pv310_l2x0_cache_init(void)
+{
+ /* TAG, Data Latency Control: 2cycle */
+ __raw_writel(0x110, S5P_VA_L2CC + L2X0_TAG_LATENCY_CTRL);
+ __raw_writel(0x110, S5P_VA_L2CC + L2X0_DATA_LATENCY_CTRL);
+
+ /* L2X0 Prefetch Control */
+ __raw_writel(0x30000007, S5P_VA_L2CC + L2X0_PREFETCH_CTRL);
+
+ /* L2X0 Power Control */
+ __raw_writel(L2X0_DYNAMIC_CLK_GATING_EN | L2X0_STNDBY_MODE_EN,
+ S5P_VA_L2CC + L2X0_POWER_CTRL);
+
+ l2x0_init(S5P_VA_L2CC, 0x7C070001, 0xC200ffff);
+
+ return 0;
+}
+
+early_initcall(s5pv310_l2x0_cache_init);
+#endif
+
int __init s5pv310_init(void)
{
printk(KERN_INFO "S5PV310: Initializing architecture\n");
diff --git a/arch/arm/mach-s5pv310/gpiolib.c b/arch/arm/mach-s5pv310/gpiolib.c
new file mode 100644
index 000000000000..55217b8923ec
--- /dev/null
+++ b/arch/arm/mach-s5pv310/gpiolib.c
@@ -0,0 +1,304 @@
+/* linux/arch/arm/mach-s5pv310/gpiolib.c
+ *
+ * Copyright (c) 2010 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com
+ *
+ * S5PV310 - GPIOlib support
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+*/
+
+#include <linux/kernel.h>
+#include <linux/irq.h>
+#include <linux/io.h>
+#include <linux/gpio.h>
+
+#include <mach/map.h>
+
+#include <plat/gpio-core.h>
+#include <plat/gpio-cfg.h>
+#include <plat/gpio-cfg-helpers.h>
+
+static struct s3c_gpio_cfg gpio_cfg = {
+ .set_config = s3c_gpio_setcfg_s3c64xx_4bit,
+ .set_pull = s3c_gpio_setpull_updown,
+ .get_pull = s3c_gpio_getpull_updown,
+};
+
+static struct s3c_gpio_cfg gpio_cfg_noint = {
+ .set_config = s3c_gpio_setcfg_s3c64xx_4bit,
+ .set_pull = s3c_gpio_setpull_updown,
+ .get_pull = s3c_gpio_getpull_updown,
+};
+
+/*
+ * Following are the gpio banks in v310.
+ *
+ * The 'config' member when left to NULL, is initialized to the default
+ * structure gpio_cfg in the init function below.
+ *
+ * The 'base' member is also initialized in the init function below.
+ * Note: The initialization of 'base' member of s3c_gpio_chip structure
+ * uses the above macro and depends on the banks being listed in order here.
+ */
+static struct s3c_gpio_chip s5pv310_gpio_part1_4bit[] = {
+ {
+ .chip = {
+ .base = S5PV310_GPA0(0),
+ .ngpio = S5PV310_GPIO_A0_NR,
+ .label = "GPA0",
+ },
+ }, {
+ .chip = {
+ .base = S5PV310_GPA1(0),
+ .ngpio = S5PV310_GPIO_A1_NR,
+ .label = "GPA1",
+ },
+ }, {
+ .chip = {
+ .base = S5PV310_GPB(0),
+ .ngpio = S5PV310_GPIO_B_NR,
+ .label = "GPB",
+ },
+ }, {
+ .chip = {
+ .base = S5PV310_GPC0(0),
+ .ngpio = S5PV310_GPIO_C0_NR,
+ .label = "GPC0",
+ },
+ }, {
+ .chip = {
+ .base = S5PV310_GPC1(0),
+ .ngpio = S5PV310_GPIO_C1_NR,
+ .label = "GPC1",
+ },
+ }, {
+ .chip = {
+ .base = S5PV310_GPD0(0),
+ .ngpio = S5PV310_GPIO_D0_NR,
+ .label = "GPD0",
+ },
+ }, {
+ .chip = {
+ .base = S5PV310_GPD1(0),
+ .ngpio = S5PV310_GPIO_D1_NR,
+ .label = "GPD1",
+ },
+ }, {
+ .chip = {
+ .base = S5PV310_GPE0(0),
+ .ngpio = S5PV310_GPIO_E0_NR,
+ .label = "GPE0",
+ },
+ }, {
+ .chip = {
+ .base = S5PV310_GPE1(0),
+ .ngpio = S5PV310_GPIO_E1_NR,
+ .label = "GPE1",
+ },
+ }, {
+ .chip = {
+ .base = S5PV310_GPE2(0),
+ .ngpio = S5PV310_GPIO_E2_NR,
+ .label = "GPE2",
+ },
+ }, {
+ .chip = {
+ .base = S5PV310_GPE3(0),
+ .ngpio = S5PV310_GPIO_E3_NR,
+ .label = "GPE3",
+ },
+ }, {
+ .chip = {
+ .base = S5PV310_GPE4(0),
+ .ngpio = S5PV310_GPIO_E4_NR,
+ .label = "GPE4",
+ },
+ }, {
+ .chip = {
+ .base = S5PV310_GPF0(0),
+ .ngpio = S5PV310_GPIO_F0_NR,
+ .label = "GPF0",
+ },
+ }, {
+ .chip = {
+ .base = S5PV310_GPF1(0),
+ .ngpio = S5PV310_GPIO_F1_NR,
+ .label = "GPF1",
+ },
+ }, {
+ .chip = {
+ .base = S5PV310_GPF2(0),
+ .ngpio = S5PV310_GPIO_F2_NR,
+ .label = "GPF2",
+ },
+ }, {
+ .chip = {
+ .base = S5PV310_GPF3(0),
+ .ngpio = S5PV310_GPIO_F3_NR,
+ .label = "GPF3",
+ },
+ },
+};
+
+static struct s3c_gpio_chip s5pv310_gpio_part2_4bit[] = {
+ {
+ .chip = {
+ .base = S5PV310_GPJ0(0),
+ .ngpio = S5PV310_GPIO_J0_NR,
+ .label = "GPJ0",
+ },
+ }, {
+ .chip = {
+ .base = S5PV310_GPJ1(0),
+ .ngpio = S5PV310_GPIO_J1_NR,
+ .label = "GPJ1",
+ },
+ }, {
+ .chip = {
+ .base = S5PV310_GPK0(0),
+ .ngpio = S5PV310_GPIO_K0_NR,
+ .label = "GPK0",
+ },
+ }, {
+ .chip = {
+ .base = S5PV310_GPK1(0),
+ .ngpio = S5PV310_GPIO_K1_NR,
+ .label = "GPK1",
+ },
+ }, {
+ .chip = {
+ .base = S5PV310_GPK2(0),
+ .ngpio = S5PV310_GPIO_K2_NR,
+ .label = "GPK2",
+ },
+ }, {
+ .chip = {
+ .base = S5PV310_GPK3(0),
+ .ngpio = S5PV310_GPIO_K3_NR,
+ .label = "GPK3",
+ },
+ }, {
+ .chip = {
+ .base = S5PV310_GPL0(0),
+ .ngpio = S5PV310_GPIO_L0_NR,
+ .label = "GPL0",
+ },
+ }, {
+ .chip = {
+ .base = S5PV310_GPL1(0),
+ .ngpio = S5PV310_GPIO_L1_NR,
+ .label = "GPL1",
+ },
+ }, {
+ .chip = {
+ .base = S5PV310_GPL2(0),
+ .ngpio = S5PV310_GPIO_L2_NR,
+ .label = "GPL2",
+ },
+ }, {
+ .base = (S5P_VA_GPIO2 + 0xC00),
+ .config = &gpio_cfg_noint,
+ .irq_base = IRQ_EINT(0),
+ .chip = {
+ .base = S5PV310_GPX0(0),
+ .ngpio = S5PV310_GPIO_X0_NR,
+ .label = "GPX0",
+ .to_irq = samsung_gpiolib_to_irq,
+ },
+ }, {
+ .base = (S5P_VA_GPIO2 + 0xC20),
+ .config = &gpio_cfg_noint,
+ .irq_base = IRQ_EINT(8),
+ .chip = {
+ .base = S5PV310_GPX1(0),
+ .ngpio = S5PV310_GPIO_X1_NR,
+ .label = "GPX1",
+ .to_irq = samsung_gpiolib_to_irq,
+ },
+ }, {
+ .base = (S5P_VA_GPIO2 + 0xC40),
+ .config = &gpio_cfg_noint,
+ .irq_base = IRQ_EINT(16),
+ .chip = {
+ .base = S5PV310_GPX2(0),
+ .ngpio = S5PV310_GPIO_X2_NR,
+ .label = "GPX2",
+ .to_irq = samsung_gpiolib_to_irq,
+ },
+ }, {
+ .base = (S5P_VA_GPIO2 + 0xC60),
+ .config = &gpio_cfg_noint,
+ .irq_base = IRQ_EINT(24),
+ .chip = {
+ .base = S5PV310_GPX3(0),
+ .ngpio = S5PV310_GPIO_X3_NR,
+ .label = "GPX3",
+ .to_irq = samsung_gpiolib_to_irq,
+ },
+ },
+};
+
+static struct s3c_gpio_chip s5pv310_gpio_part3_4bit[] = {
+ {
+ .chip = {
+ .base = S5PV310_GPZ(0),
+ .ngpio = S5PV310_GPIO_Z_NR,
+ .label = "GPZ",
+ },
+ },
+};
+
+static __init int s5pv310_gpiolib_init(void)
+{
+ struct s3c_gpio_chip *chip;
+ int i;
+ int nr_chips;
+
+ /* GPIO part 1 */
+
+ chip = s5pv310_gpio_part1_4bit;
+ nr_chips = ARRAY_SIZE(s5pv310_gpio_part1_4bit);
+
+ for (i = 0; i < nr_chips; i++, chip++) {
+ if (chip->config == NULL)
+ chip->config = &gpio_cfg;
+ if (chip->base == NULL)
+ chip->base = S5P_VA_GPIO1 + (i) * 0x20;
+ }
+
+ samsung_gpiolib_add_4bit_chips(s5pv310_gpio_part1_4bit, nr_chips);
+
+ /* GPIO part 2 */
+
+ chip = s5pv310_gpio_part2_4bit;
+ nr_chips = ARRAY_SIZE(s5pv310_gpio_part2_4bit);
+
+ for (i = 0; i < nr_chips; i++, chip++) {
+ if (chip->config == NULL)
+ chip->config = &gpio_cfg;
+ if (chip->base == NULL)
+ chip->base = S5P_VA_GPIO2 + (i) * 0x20;
+ }
+
+ samsung_gpiolib_add_4bit_chips(s5pv310_gpio_part2_4bit, nr_chips);
+
+ /* GPIO part 3 */
+
+ chip = s5pv310_gpio_part3_4bit;
+ nr_chips = ARRAY_SIZE(s5pv310_gpio_part3_4bit);
+
+ for (i = 0; i < nr_chips; i++, chip++) {
+ if (chip->config == NULL)
+ chip->config = &gpio_cfg;
+ if (chip->base == NULL)
+ chip->base = S5P_VA_GPIO3 + (i) * 0x20;
+ }
+
+ samsung_gpiolib_add_4bit_chips(s5pv310_gpio_part3_4bit, nr_chips);
+
+ return 0;
+}
+core_initcall(s5pv310_gpiolib_init);
diff --git a/arch/arm/mach-s5pv310/hotplug.c b/arch/arm/mach-s5pv310/hotplug.c
new file mode 100644
index 000000000000..03652c3605f6
--- /dev/null
+++ b/arch/arm/mach-s5pv310/hotplug.c
@@ -0,0 +1,144 @@
+/* linux arch/arm/mach-s5pv310/hotplug.c
+ *
+ * Cloned from linux/arch/arm/mach-realview/hotplug.c
+ *
+ * Copyright (C) 2002 ARM Ltd.
+ * All Rights Reserved
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+*/
+
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/smp.h>
+#include <linux/completion.h>
+
+#include <asm/cacheflush.h>
+
+extern volatile int pen_release;
+
+static DECLARE_COMPLETION(cpu_killed);
+
+static inline void cpu_enter_lowpower(void)
+{
+ unsigned int v;
+
+ flush_cache_all();
+ asm volatile(
+ " mcr p15, 0, %1, c7, c5, 0\n"
+ " mcr p15, 0, %1, c7, c10, 4\n"
+ /*
+ * Turn off coherency
+ */
+ " mrc p15, 0, %0, c1, c0, 1\n"
+ " bic %0, %0, #0x20\n"
+ " mcr p15, 0, %0, c1, c0, 1\n"
+ " mrc p15, 0, %0, c1, c0, 0\n"
+ " bic %0, %0, #0x04\n"
+ " mcr p15, 0, %0, c1, c0, 0\n"
+ : "=&r" (v)
+ : "r" (0)
+ : "cc");
+}
+
+static inline void cpu_leave_lowpower(void)
+{
+ unsigned int v;
+
+ asm volatile(
+ "mrc p15, 0, %0, c1, c0, 0\n"
+ " orr %0, %0, #0x04\n"
+ " mcr p15, 0, %0, c1, c0, 0\n"
+ " mrc p15, 0, %0, c1, c0, 1\n"
+ " orr %0, %0, #0x20\n"
+ " mcr p15, 0, %0, c1, c0, 1\n"
+ : "=&r" (v)
+ :
+ : "cc");
+}
+
+static inline void platform_do_lowpower(unsigned int cpu)
+{
+ /*
+ * there is no power-control hardware on this platform, so all
+ * we can do is put the core into WFI; this is safe as the calling
+ * code will have already disabled interrupts
+ */
+ for (;;) {
+ /*
+ * here's the WFI
+ */
+ asm(".word 0xe320f003\n"
+ :
+ :
+ : "memory", "cc");
+
+ if (pen_release == cpu) {
+ /*
+ * OK, proper wakeup, we're done
+ */
+ break;
+ }
+
+ /*
+ * getting here, means that we have come out of WFI without
+ * having been woken up - this shouldn't happen
+ *
+ * The trouble is, letting people know about this is not really
+ * possible, since we are currently running incoherently, and
+ * therefore cannot safely call printk() or anything else
+ */
+#ifdef DEBUG
+ printk(KERN_WARN "CPU%u: spurious wakeup call\n", cpu);
+#endif
+ }
+}
+
+int platform_cpu_kill(unsigned int cpu)
+{
+ return wait_for_completion_timeout(&cpu_killed, 5000);
+}
+
+/*
+ * platform-specific code to shutdown a CPU
+ *
+ * Called with IRQs disabled
+ */
+void platform_cpu_die(unsigned int cpu)
+{
+#ifdef DEBUG
+ unsigned int this_cpu = hard_smp_processor_id();
+
+ if (cpu != this_cpu) {
+ printk(KERN_CRIT "Eek! platform_cpu_die running on %u, should be %u\n",
+ this_cpu, cpu);
+ BUG();
+ }
+#endif
+
+ printk(KERN_NOTICE "CPU%u: shutdown\n", cpu);
+ complete(&cpu_killed);
+
+ /*
+ * we're ready for shutdown now, so do it
+ */
+ cpu_enter_lowpower();
+ platform_do_lowpower(cpu);
+
+ /*
+ * bring this CPU back into the world of cache
+ * coherency, and then restore interrupts
+ */
+ cpu_leave_lowpower();
+}
+
+int platform_cpu_disable(unsigned int cpu)
+{
+ /*
+ * we don't allow CPU 0 to be shutdown (it is still too special
+ * e.g. clock tick interrupts)
+ */
+ return cpu == 0 ? -EPERM : 0;
+}
diff --git a/arch/arm/mach-s5pv310/include/mach/irqs.h b/arch/arm/mach-s5pv310/include/mach/irqs.h
index 471fc3bb199a..99e7dad8a85a 100644
--- a/arch/arm/mach-s5pv310/include/mach/irqs.h
+++ b/arch/arm/mach-s5pv310/include/mach/irqs.h
@@ -3,7 +3,7 @@
* Copyright (c) 2010 Samsung Electronics Co., Ltd.
* http://www.samsung.com/
*
- * S5PV210 - IRQ definitions
+ * S5PV310 - IRQ definitions
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -60,6 +60,9 @@
#define IRQ_TIMER3_VIC COMBINER_IRQ(22, 3)
#define IRQ_TIMER4_VIC COMBINER_IRQ(22, 4)
+#define IRQ_RTC_ALARM COMBINER_IRQ(23, 0)
+#define IRQ_RTC_TIC COMBINER_IRQ(23, 1)
+
#define IRQ_UART0 COMBINER_IRQ(26, 0)
#define IRQ_UART1 COMBINER_IRQ(26, 1)
#define IRQ_UART2 COMBINER_IRQ(26, 2)
@@ -67,13 +70,46 @@
#define IRQ_UART4 COMBINER_IRQ(26, 4)
#define IRQ_IIC COMBINER_IRQ(27, 0)
+#define IRQ_IIC1 COMBINER_IRQ(27, 1)
+#define IRQ_IIC2 COMBINER_IRQ(27, 2)
+#define IRQ_IIC3 COMBINER_IRQ(27, 3)
+#define IRQ_IIC4 COMBINER_IRQ(27, 4)
+#define IRQ_IIC5 COMBINER_IRQ(27, 5)
+#define IRQ_IIC6 COMBINER_IRQ(27, 6)
+#define IRQ_IIC7 COMBINER_IRQ(27, 7)
+
+#define IRQ_HSMMC0 COMBINER_IRQ(29, 0)
+#define IRQ_HSMMC1 COMBINER_IRQ(29, 1)
+#define IRQ_HSMMC2 COMBINER_IRQ(29, 2)
+#define IRQ_HSMMC3 COMBINER_IRQ(29, 3)
#define IRQ_ONENAND_AUDI COMBINER_IRQ(34, 0)
-/* Set the default NR_IRQS */
+#define IRQ_EINT4 COMBINER_IRQ(37, 0)
+#define IRQ_EINT5 COMBINER_IRQ(37, 1)
+#define IRQ_EINT6 COMBINER_IRQ(37, 2)
+#define IRQ_EINT7 COMBINER_IRQ(37, 3)
+#define IRQ_EINT8 COMBINER_IRQ(38, 0)
+
+#define IRQ_EINT9 COMBINER_IRQ(38, 1)
+#define IRQ_EINT10 COMBINER_IRQ(38, 2)
+#define IRQ_EINT11 COMBINER_IRQ(38, 3)
+#define IRQ_EINT12 COMBINER_IRQ(38, 4)
+#define IRQ_EINT13 COMBINER_IRQ(38, 5)
+#define IRQ_EINT14 COMBINER_IRQ(38, 6)
+#define IRQ_EINT15 COMBINER_IRQ(38, 7)
+
+#define IRQ_EINT16_31 COMBINER_IRQ(39, 0)
-#define NR_IRQS COMBINER_IRQ(MAX_COMBINER_NR, 0)
+#define MAX_COMBINER_NR 40
+
+#define S5P_IRQ_EINT_BASE COMBINER_IRQ(MAX_COMBINER_NR, 0)
+
+#define S5P_EINT_BASE1 (S5P_IRQ_EINT_BASE + 0)
+#define S5P_EINT_BASE2 (S5P_IRQ_EINT_BASE + 16)
+
+/* Set the default NR_IRQS */
-#define MAX_COMBINER_NR 39
+#define NR_IRQS (S5P_IRQ_EINT_BASE + 32)
#endif /* __ASM_ARCH_IRQS_H */
diff --git a/arch/arm/mach-s5pv310/include/mach/map.h b/arch/arm/mach-s5pv310/include/mach/map.h
index aff6d23624bb..7acf4e77e92e 100644
--- a/arch/arm/mach-s5pv310/include/mach/map.h
+++ b/arch/arm/mach-s5pv310/include/mach/map.h
@@ -25,6 +25,8 @@
#define S5PV310_PA_SYSRAM (0x02025000)
+#define S5PV310_PA_SROM_BANK(x) (0x04000000 + ((x) * 0x01000000))
+
#define S5PC210_PA_ONENAND (0x0C000000)
#define S5P_PA_ONENAND S5PC210_PA_ONENAND
@@ -34,12 +36,13 @@
#define S5PV310_PA_CHIPID (0x10000000)
#define S5P_PA_CHIPID S5PV310_PA_CHIPID
-#define S5PV310_PA_SYSCON (0x10020000)
+#define S5PV310_PA_SYSCON (0x10010000)
#define S5P_PA_SYSCON S5PV310_PA_SYSCON
#define S5PV310_PA_CMU (0x10030000)
#define S5PV310_PA_WATCHDOG (0x10060000)
+#define S5PV310_PA_RTC (0x10070000)
#define S5PV310_PA_COMBINER (0x10448000)
@@ -55,6 +58,8 @@
#define S5PV310_PA_HSMMC(x) (0x12510000 + ((x) * 0x10000))
+#define S5PV310_PA_SROMC (0x12570000)
+
#define S5PV310_PA_UART (0x13800000)
#define S5P_PA_UART(x) (S5PV310_PA_UART + ((x) * S3C_UART_OFFSET))
@@ -66,7 +71,7 @@
#define S5P_SZ_UART SZ_256
-#define S5PV310_PA_IIC0 (0x13860000)
+#define S5PV310_PA_IIC(x) (0x13860000 + ((x) * 0x10000))
#define S5PV310_PA_TIMER (0x139D0000)
#define S5P_PA_TIMER S5PV310_PA_TIMER
@@ -80,7 +85,15 @@
#define S3C_PA_HSMMC1 S5PV310_PA_HSMMC(1)
#define S3C_PA_HSMMC2 S5PV310_PA_HSMMC(2)
#define S3C_PA_HSMMC3 S5PV310_PA_HSMMC(3)
-#define S3C_PA_IIC S5PV310_PA_IIC0
+#define S3C_PA_IIC S5PV310_PA_IIC(0)
+#define S3C_PA_IIC1 S5PV310_PA_IIC(1)
+#define S3C_PA_IIC2 S5PV310_PA_IIC(2)
+#define S3C_PA_IIC3 S5PV310_PA_IIC(3)
+#define S3C_PA_IIC4 S5PV310_PA_IIC(4)
+#define S3C_PA_IIC5 S5PV310_PA_IIC(5)
+#define S3C_PA_IIC6 S5PV310_PA_IIC(6)
+#define S3C_PA_IIC7 S5PV310_PA_IIC(7)
+#define S3C_PA_RTC S5PV310_PA_RTC
#define S3C_PA_WDT S5PV310_PA_WATCHDOG
#endif /* __ASM_ARCH_MAP_H */
diff --git a/arch/arm/mach-s5pv310/include/mach/regs-clock.h b/arch/arm/mach-s5pv310/include/mach/regs-clock.h
index 4013553cd9be..f1028cad9788 100644
--- a/arch/arm/mach-s5pv310/include/mach/regs-clock.h
+++ b/arch/arm/mach-s5pv310/include/mach/regs-clock.h
@@ -26,11 +26,23 @@
#define S5P_CLKSRC_TOP0 S5P_CLKREG(0x0C210)
#define S5P_CLKSRC_TOP1 S5P_CLKREG(0x0C214)
-
+#define S5P_CLKSRC_CAM S5P_CLKREG(0x0C220)
+#define S5P_CLKSRC_IMAGE S5P_CLKREG(0x0C230)
+#define S5P_CLKSRC_LCD0 S5P_CLKREG(0x0C234)
+#define S5P_CLKSRC_LCD1 S5P_CLKREG(0x0C238)
+#define S5P_CLKSRC_FSYS S5P_CLKREG(0x0C240)
#define S5P_CLKSRC_PERIL0 S5P_CLKREG(0x0C250)
+#define S5P_CLKSRC_PERIL1 S5P_CLKREG(0x0C254)
#define S5P_CLKDIV_TOP S5P_CLKREG(0x0C510)
-
+#define S5P_CLKDIV_CAM S5P_CLKREG(0x0C520)
+#define S5P_CLKDIV_IMAGE S5P_CLKREG(0x0C530)
+#define S5P_CLKDIV_LCD0 S5P_CLKREG(0x0C534)
+#define S5P_CLKDIV_LCD1 S5P_CLKREG(0x0C538)
+#define S5P_CLKDIV_FSYS0 S5P_CLKREG(0x0C540)
+#define S5P_CLKDIV_FSYS1 S5P_CLKREG(0x0C544)
+#define S5P_CLKDIV_FSYS2 S5P_CLKREG(0x0C548)
+#define S5P_CLKDIV_FSYS3 S5P_CLKREG(0x0C54C)
#define S5P_CLKDIV_PERIL0 S5P_CLKREG(0x0C550)
#define S5P_CLKDIV_PERIL1 S5P_CLKREG(0x0C554)
#define S5P_CLKDIV_PERIL2 S5P_CLKREG(0x0C558)
@@ -38,9 +50,21 @@
#define S5P_CLKDIV_PERIL4 S5P_CLKREG(0x0C560)
#define S5P_CLKDIV_PERIL5 S5P_CLKREG(0x0C564)
+#define S5P_CLKSRC_MASK_TOP S5P_CLKREG(0x0C310)
+#define S5P_CLKSRC_MASK_CAM S5P_CLKREG(0x0C320)
+#define S5P_CLKSRC_MASK_LCD0 S5P_CLKREG(0x0C334)
+#define S5P_CLKSRC_MASK_LCD1 S5P_CLKREG(0x0C338)
+#define S5P_CLKSRC_MASK_FSYS S5P_CLKREG(0x0C340)
#define S5P_CLKSRC_MASK_PERIL0 S5P_CLKREG(0x0C350)
+#define S5P_CLKSRC_MASK_PERIL1 S5P_CLKREG(0x0C354)
+#define S5P_CLKGATE_IP_CAM S5P_CLKREG(0x0C920)
+#define S5P_CLKGATE_IP_IMAGE S5P_CLKREG(0x0C930)
+#define S5P_CLKGATE_IP_LCD0 S5P_CLKREG(0x0C934)
+#define S5P_CLKGATE_IP_LCD1 S5P_CLKREG(0x0C938)
+#define S5P_CLKGATE_IP_FSYS S5P_CLKREG(0x0C940)
#define S5P_CLKGATE_IP_PERIL S5P_CLKREG(0x0C950)
+#define S5P_CLKGATE_IP_PERIR S5P_CLKREG(0x0C960)
#define S5P_CLKSRC_CORE S5P_CLKREG(0x10200)
#define S5P_CLKDIV_CORE0 S5P_CLKREG(0x10500)
@@ -60,4 +84,8 @@
#define S5P_CLKGATE_SCLKCPU S5P_CLKREG(0x14800)
+/* Compatibility defines */
+
+#define S5P_EPLL_CON S5P_EPLL_CON0
+
#endif /* __ASM_ARCH_REGS_CLOCK_H */
diff --git a/arch/arm/mach-s5pv310/include/mach/regs-gpio.h b/arch/arm/mach-s5pv310/include/mach/regs-gpio.h
new file mode 100644
index 000000000000..82e9e0c9d452
--- /dev/null
+++ b/arch/arm/mach-s5pv310/include/mach/regs-gpio.h
@@ -0,0 +1,42 @@
+/* linux/arch/arm/mach-s5pv310/include/mach/regs-gpio.h
+ *
+ * Copyright (c) 2010 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com
+ *
+ * S5PV310 - GPIO (including EINT) register definitions
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+*/
+
+#ifndef __ASM_ARCH_REGS_GPIO_H
+#define __ASM_ARCH_REGS_GPIO_H __FILE__
+
+#include <mach/map.h>
+#include <mach/irqs.h>
+
+#define S5PV310_EINT40CON (S5P_VA_GPIO2 + 0xE00)
+#define S5P_EINT_CON(x) (S5PV310_EINT40CON + ((x) * 0x4))
+
+#define S5PV310_EINT40FLTCON0 (S5P_VA_GPIO2 + 0xE80)
+#define S5P_EINT_FLTCON(x) (S5PV310_EINT40FLTCON0 + ((x) * 0x4))
+
+#define S5PV310_EINT40MASK (S5P_VA_GPIO2 + 0xF00)
+#define S5P_EINT_MASK(x) (S5PV310_EINT40MASK + ((x) * 0x4))
+
+#define S5PV310_EINT40PEND (S5P_VA_GPIO2 + 0xF40)
+#define S5P_EINT_PEND(x) (S5PV310_EINT40PEND + ((x) * 0x4))
+
+#define EINT_REG_NR(x) (EINT_OFFSET(x) >> 3)
+
+#define eint_irq_to_bit(irq) (1 << (EINT_OFFSET(irq) & 0x7))
+
+#define EINT_MODE S3C_GPIO_SFN(0xf)
+
+#define EINT_GPIO_0(x) S5PV310_GPX0(x)
+#define EINT_GPIO_1(x) S5PV310_GPX1(x)
+#define EINT_GPIO_2(x) S5PV310_GPX2(x)
+#define EINT_GPIO_3(x) S5PV310_GPX3(x)
+
+#endif /* __ASM_ARCH_REGS_GPIO_H */
diff --git a/arch/arm/mach-s5pv310/include/mach/regs-srom.h b/arch/arm/mach-s5pv310/include/mach/regs-srom.h
new file mode 100644
index 000000000000..1898b3e10550
--- /dev/null
+++ b/arch/arm/mach-s5pv310/include/mach/regs-srom.h
@@ -0,0 +1,50 @@
+/* linux/arch/arm/mach-s5pv310/include/mach/regs-srom.h
+ *
+ * Copyright (c) 2010 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com
+ *
+ * S5PV310 - SROMC register definitions
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+*/
+
+#ifndef __ASM_ARCH_REGS_SROM_H
+#define __ASM_ARCH_REGS_SROM_H __FILE__
+
+#include <mach/map.h>
+
+#define S5PV310_SROMREG(x) (S5P_VA_SROMC + (x))
+
+#define S5PV310_SROM_BW S5PV310_SROMREG(0x0)
+#define S5PV310_SROM_BC0 S5PV310_SROMREG(0x4)
+#define S5PV310_SROM_BC1 S5PV310_SROMREG(0x8)
+#define S5PV310_SROM_BC2 S5PV310_SROMREG(0xc)
+#define S5PV310_SROM_BC3 S5PV310_SROMREG(0x10)
+
+/* one register BW holds 4 x 4-bit packed settings for NCS0 - NCS3 */
+
+#define S5PV310_SROM_BW__DATAWIDTH__SHIFT 0
+#define S5PV310_SROM_BW__ADDRMODE__SHIFT 1
+#define S5PV310_SROM_BW__WAITENABLE__SHIFT 2
+#define S5PV310_SROM_BW__BYTEENABLE__SHIFT 3
+
+#define S5PV310_SROM_BW__CS_MASK 0xf
+
+#define S5PV310_SROM_BW__NCS0__SHIFT 0
+#define S5PV310_SROM_BW__NCS1__SHIFT 4
+#define S5PV310_SROM_BW__NCS2__SHIFT 8
+#define S5PV310_SROM_BW__NCS3__SHIFT 12
+
+/* applies to same to BCS0 - BCS3 */
+
+#define S5PV310_SROM_BCX__PMC__SHIFT 0
+#define S5PV310_SROM_BCX__TACP__SHIFT 4
+#define S5PV310_SROM_BCX__TCAH__SHIFT 8
+#define S5PV310_SROM_BCX__TCOH__SHIFT 12
+#define S5PV310_SROM_BCX__TACC__SHIFT 16
+#define S5PV310_SROM_BCX__TCOS__SHIFT 24
+#define S5PV310_SROM_BCX__TACS__SHIFT 28
+
+#endif /* __ASM_ARCH_REGS_SROM_H */
diff --git a/arch/arm/mach-s5pv310/include/mach/vmalloc.h b/arch/arm/mach-s5pv310/include/mach/vmalloc.h
index 256f221edf3a..65759fb97581 100644
--- a/arch/arm/mach-s5pv310/include/mach/vmalloc.h
+++ b/arch/arm/mach-s5pv310/include/mach/vmalloc.h
@@ -17,6 +17,6 @@
#ifndef __ASM_ARCH_VMALLOC_H
#define __ASM_ARCH_VMALLOC_H __FILE__
-#define VMALLOC_END (0xF0000000UL)
+#define VMALLOC_END 0xF6000000UL
#endif /* __ASM_ARCH_VMALLOC_H */
diff --git a/arch/arm/mach-s5pv310/irq-combiner.c b/arch/arm/mach-s5pv310/irq-combiner.c
index 0f7052164f23..c3f88c3faf6c 100644
--- a/arch/arm/mach-s5pv310/irq-combiner.c
+++ b/arch/arm/mach-s5pv310/irq-combiner.c
@@ -66,11 +66,7 @@ static void combiner_handle_cascade_irq(unsigned int irq, struct irq_desc *desc)
if (status == 0)
goto out;
- for (combiner_irq = 0; combiner_irq < 32; combiner_irq++) {
- if (status & 0x1)
- break;
- status >>= 1;
- }
+ combiner_irq = __ffs(status);
cascade_irq = combiner_irq + (chip_data->irq_offset & ~31);
if (unlikely(cascade_irq >= NR_IRQS))
diff --git a/arch/arm/mach-s5pv310/irq-eint.c b/arch/arm/mach-s5pv310/irq-eint.c
new file mode 100644
index 000000000000..5877503e92c3
--- /dev/null
+++ b/arch/arm/mach-s5pv310/irq-eint.c
@@ -0,0 +1,228 @@
+/* linux/arch/arm/mach-s5pv310/irq-eint.c
+ *
+ * Copyright (c) 2010 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com
+ *
+ * S5PV310 - IRQ EINT support
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+*/
+
+#include <linux/kernel.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/io.h>
+#include <linux/sysdev.h>
+#include <linux/gpio.h>
+
+#include <plat/pm.h>
+#include <plat/cpu.h>
+#include <plat/gpio-cfg.h>
+
+#include <mach/regs-gpio.h>
+
+static DEFINE_SPINLOCK(eint_lock);
+
+static unsigned int eint0_15_data[16];
+
+static unsigned int s5pv310_get_irq_nr(unsigned int number)
+{
+ u32 ret = 0;
+
+ switch (number) {
+ case 0 ... 3:
+ ret = (number + IRQ_EINT0);
+ break;
+ case 4 ... 7:
+ ret = (number + (IRQ_EINT4 - 4));
+ break;
+ case 8 ... 15:
+ ret = (number + (IRQ_EINT8 - 8));
+ break;
+ default:
+ printk(KERN_ERR "number available : %d\n", number);
+ }
+
+ return ret;
+}
+
+static inline void s5pv310_irq_eint_mask(unsigned int irq)
+{
+ u32 mask;
+
+ spin_lock(&eint_lock);
+ mask = __raw_readl(S5P_EINT_MASK(EINT_REG_NR(irq)));
+ mask |= eint_irq_to_bit(irq);
+ __raw_writel(mask, S5P_EINT_MASK(EINT_REG_NR(irq)));
+ spin_unlock(&eint_lock);
+}
+
+static void s5pv310_irq_eint_unmask(unsigned int irq)
+{
+ u32 mask;
+
+ spin_lock(&eint_lock);
+ mask = __raw_readl(S5P_EINT_MASK(EINT_REG_NR(irq)));
+ mask &= ~(eint_irq_to_bit(irq));
+ __raw_writel(mask, S5P_EINT_MASK(EINT_REG_NR(irq)));
+ spin_unlock(&eint_lock);
+}
+
+static inline void s5pv310_irq_eint_ack(unsigned int irq)
+{
+ __raw_writel(eint_irq_to_bit(irq), S5P_EINT_PEND(EINT_REG_NR(irq)));
+}
+
+static void s5pv310_irq_eint_maskack(unsigned int irq)
+{
+ s5pv310_irq_eint_mask(irq);
+ s5pv310_irq_eint_ack(irq);
+}
+
+static int s5pv310_irq_eint_set_type(unsigned int irq, unsigned int type)
+{
+ int offs = EINT_OFFSET(irq);
+ int shift;
+ u32 ctrl, mask;
+ u32 newvalue = 0;
+
+ switch (type) {
+ case IRQ_TYPE_EDGE_RISING:
+ newvalue = S5P_IRQ_TYPE_EDGE_RISING;
+ break;
+
+ case IRQ_TYPE_EDGE_FALLING:
+ newvalue = S5P_IRQ_TYPE_EDGE_FALLING;
+ break;
+
+ case IRQ_TYPE_EDGE_BOTH:
+ newvalue = S5P_IRQ_TYPE_EDGE_BOTH;
+ break;
+
+ case IRQ_TYPE_LEVEL_LOW:
+ newvalue = S5P_IRQ_TYPE_LEVEL_LOW;
+ break;
+
+ case IRQ_TYPE_LEVEL_HIGH:
+ newvalue = S5P_IRQ_TYPE_LEVEL_HIGH;
+ break;
+
+ default:
+ printk(KERN_ERR "No such irq type %d", type);
+ return -EINVAL;
+ }
+
+ shift = (offs & 0x7) * 4;
+ mask = 0x7 << shift;
+
+ spin_lock(&eint_lock);
+ ctrl = __raw_readl(S5P_EINT_CON(EINT_REG_NR(irq)));
+ ctrl &= ~mask;
+ ctrl |= newvalue << shift;
+ __raw_writel(ctrl, S5P_EINT_CON(EINT_REG_NR(irq)));
+ spin_unlock(&eint_lock);
+
+ switch (offs) {
+ case 0 ... 7:
+ s3c_gpio_cfgpin(EINT_GPIO_0(offs & 0x7), EINT_MODE);
+ break;
+ case 8 ... 15:
+ s3c_gpio_cfgpin(EINT_GPIO_1(offs & 0x7), EINT_MODE);
+ break;
+ case 16 ... 23:
+ s3c_gpio_cfgpin(EINT_GPIO_2(offs & 0x7), EINT_MODE);
+ break;
+ case 24 ... 31:
+ s3c_gpio_cfgpin(EINT_GPIO_3(offs & 0x7), EINT_MODE);
+ break;
+ default:
+ printk(KERN_ERR "No such irq number %d", offs);
+ }
+
+ return 0;
+}
+
+static struct irq_chip s5pv310_irq_eint = {
+ .name = "s5pv310-eint",
+ .mask = s5pv310_irq_eint_mask,
+ .unmask = s5pv310_irq_eint_unmask,
+ .mask_ack = s5pv310_irq_eint_maskack,
+ .ack = s5pv310_irq_eint_ack,
+ .set_type = s5pv310_irq_eint_set_type,
+#ifdef CONFIG_PM
+ .set_wake = s3c_irqext_wake,
+#endif
+};
+
+/* s5pv310_irq_demux_eint
+ *
+ * This function demuxes the IRQ from from EINTs 16 to 31.
+ * It is designed to be inlined into the specific handler
+ * s5p_irq_demux_eintX_Y.
+ *
+ * Each EINT pend/mask registers handle eight of them.
+ */
+static inline void s5pv310_irq_demux_eint(unsigned int start)
+{
+ unsigned int irq;
+
+ u32 status = __raw_readl(S5P_EINT_PEND(EINT_REG_NR(start)));
+ u32 mask = __raw_readl(S5P_EINT_MASK(EINT_REG_NR(start)));
+
+ status &= ~mask;
+ status &= 0xff;
+
+ while (status) {
+ irq = fls(status) - 1;
+ generic_handle_irq(irq + start);
+ status &= ~(1 << irq);
+ }
+}
+
+static void s5pv310_irq_demux_eint16_31(unsigned int irq, struct irq_desc *desc)
+{
+ s5pv310_irq_demux_eint(IRQ_EINT(16));
+ s5pv310_irq_demux_eint(IRQ_EINT(24));
+}
+
+static void s5pv310_irq_eint0_15(unsigned int irq, struct irq_desc *desc)
+{
+ u32 *irq_data = get_irq_data(irq);
+ struct irq_chip *chip = get_irq_chip(irq);
+
+ chip->mask(irq);
+
+ if (chip->ack)
+ chip->ack(irq);
+
+ generic_handle_irq(*irq_data);
+
+ chip->unmask(irq);
+}
+
+int __init s5pv310_init_irq_eint(void)
+{
+ int irq;
+
+ for (irq = 0 ; irq <= 31 ; irq++) {
+ set_irq_chip(IRQ_EINT(irq), &s5pv310_irq_eint);
+ set_irq_handler(IRQ_EINT(irq), handle_level_irq);
+ set_irq_flags(IRQ_EINT(irq), IRQF_VALID);
+ }
+
+ set_irq_chained_handler(IRQ_EINT16_31, s5pv310_irq_demux_eint16_31);
+
+ for (irq = 0 ; irq <= 15 ; irq++) {
+ eint0_15_data[irq] = IRQ_EINT(irq);
+
+ set_irq_data(s5pv310_get_irq_nr(irq), &eint0_15_data[irq]);
+ set_irq_chained_handler(s5pv310_get_irq_nr(irq),
+ s5pv310_irq_eint0_15);
+ }
+
+ return 0;
+}
+
+arch_initcall(s5pv310_init_irq_eint);
diff --git a/arch/arm/mach-s5pv310/mach-smdkc210.c b/arch/arm/mach-s5pv310/mach-smdkc210.c
new file mode 100644
index 000000000000..2b8d4fc52d7c
--- /dev/null
+++ b/arch/arm/mach-s5pv310/mach-smdkc210.c
@@ -0,0 +1,202 @@
+/* linux/arch/arm/mach-s5pv310/mach-smdkc210.c
+ *
+ * Copyright (c) 2010 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com/
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+*/
+
+#include <linux/serial_core.h>
+#include <linux/gpio.h>
+#include <linux/mmc/host.h>
+#include <linux/platform_device.h>
+#include <linux/smsc911x.h>
+#include <linux/io.h>
+
+#include <asm/mach/arch.h>
+#include <asm/mach-types.h>
+
+#include <plat/regs-serial.h>
+#include <plat/s5pv310.h>
+#include <plat/cpu.h>
+#include <plat/devs.h>
+#include <plat/sdhci.h>
+
+#include <mach/map.h>
+#include <mach/regs-srom.h>
+
+/* Following are default values for UCON, ULCON and UFCON UART registers */
+#define SMDKC210_UCON_DEFAULT (S3C2410_UCON_TXILEVEL | \
+ S3C2410_UCON_RXILEVEL | \
+ S3C2410_UCON_TXIRQMODE | \
+ S3C2410_UCON_RXIRQMODE | \
+ S3C2410_UCON_RXFIFO_TOI | \
+ S3C2443_UCON_RXERR_IRQEN)
+
+#define SMDKC210_ULCON_DEFAULT S3C2410_LCON_CS8
+
+#define SMDKC210_UFCON_DEFAULT (S3C2410_UFCON_FIFOMODE | \
+ S5PV210_UFCON_TXTRIG4 | \
+ S5PV210_UFCON_RXTRIG4)
+
+static struct s3c2410_uartcfg smdkc210_uartcfgs[] __initdata = {
+ [0] = {
+ .hwport = 0,
+ .flags = 0,
+ .ucon = SMDKC210_UCON_DEFAULT,
+ .ulcon = SMDKC210_ULCON_DEFAULT,
+ .ufcon = SMDKC210_UFCON_DEFAULT,
+ },
+ [1] = {
+ .hwport = 1,
+ .flags = 0,
+ .ucon = SMDKC210_UCON_DEFAULT,
+ .ulcon = SMDKC210_ULCON_DEFAULT,
+ .ufcon = SMDKC210_UFCON_DEFAULT,
+ },
+ [2] = {
+ .hwport = 2,
+ .flags = 0,
+ .ucon = SMDKC210_UCON_DEFAULT,
+ .ulcon = SMDKC210_ULCON_DEFAULT,
+ .ufcon = SMDKC210_UFCON_DEFAULT,
+ },
+ [3] = {
+ .hwport = 3,
+ .flags = 0,
+ .ucon = SMDKC210_UCON_DEFAULT,
+ .ulcon = SMDKC210_ULCON_DEFAULT,
+ .ufcon = SMDKC210_UFCON_DEFAULT,
+ },
+};
+
+static struct s3c_sdhci_platdata smdkc210_hsmmc0_pdata __initdata = {
+ .cd_type = S3C_SDHCI_CD_GPIO,
+ .ext_cd_gpio = S5PV310_GPK0(2),
+ .ext_cd_gpio_invert = 1,
+ .clk_type = S3C_SDHCI_CLK_DIV_EXTERNAL,
+#ifdef CONFIG_S5PV310_SDHCI_CH0_8BIT
+ .max_width = 8,
+ .host_caps = MMC_CAP_8_BIT_DATA,
+#endif
+};
+
+static struct s3c_sdhci_platdata smdkc210_hsmmc1_pdata __initdata = {
+ .cd_type = S3C_SDHCI_CD_GPIO,
+ .ext_cd_gpio = S5PV310_GPK0(2),
+ .ext_cd_gpio_invert = 1,
+ .clk_type = S3C_SDHCI_CLK_DIV_EXTERNAL,
+};
+
+static struct s3c_sdhci_platdata smdkc210_hsmmc2_pdata __initdata = {
+ .cd_type = S3C_SDHCI_CD_GPIO,
+ .ext_cd_gpio = S5PV310_GPK2(2),
+ .ext_cd_gpio_invert = 1,
+ .clk_type = S3C_SDHCI_CLK_DIV_EXTERNAL,
+#ifdef CONFIG_S5PV310_SDHCI_CH2_8BIT
+ .max_width = 8,
+ .host_caps = MMC_CAP_8_BIT_DATA,
+#endif
+};
+
+static struct s3c_sdhci_platdata smdkc210_hsmmc3_pdata __initdata = {
+ .cd_type = S3C_SDHCI_CD_GPIO,
+ .ext_cd_gpio = S5PV310_GPK2(2),
+ .ext_cd_gpio_invert = 1,
+ .clk_type = S3C_SDHCI_CLK_DIV_EXTERNAL,
+};
+
+static struct resource smdkc210_smsc911x_resources[] = {
+ [0] = {
+ .start = S5PV310_PA_SROM_BANK(1),
+ .end = S5PV310_PA_SROM_BANK(1) + SZ_64K - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = IRQ_EINT(5),
+ .end = IRQ_EINT(5),
+ .flags = IORESOURCE_IRQ | IRQF_TRIGGER_LOW,
+ },
+};
+
+static struct smsc911x_platform_config smsc9215_config = {
+ .irq_polarity = SMSC911X_IRQ_POLARITY_ACTIVE_HIGH,
+ .irq_type = SMSC911X_IRQ_TYPE_PUSH_PULL,
+ .flags = SMSC911X_USE_16BIT | SMSC911X_FORCE_INTERNAL_PHY,
+ .phy_interface = PHY_INTERFACE_MODE_MII,
+ .mac = {0x00, 0x80, 0x00, 0x23, 0x45, 0x67},
+};
+
+static struct platform_device smdkc210_smsc911x = {
+ .name = "smsc911x",
+ .id = -1,
+ .num_resources = ARRAY_SIZE(smdkc210_smsc911x_resources),
+ .resource = smdkc210_smsc911x_resources,
+ .dev = {
+ .platform_data = &smsc9215_config,
+ },
+};
+
+static struct platform_device *smdkc210_devices[] __initdata = {
+ &s3c_device_hsmmc0,
+ &s3c_device_hsmmc1,
+ &s3c_device_hsmmc2,
+ &s3c_device_hsmmc3,
+ &s3c_device_rtc,
+ &s3c_device_wdt,
+ &smdkc210_smsc911x,
+};
+
+static void __init smdkc210_smsc911x_init(void)
+{
+ u32 cs1;
+
+ /* configure nCS1 width to 16 bits */
+ cs1 = __raw_readl(S5PV310_SROM_BW) &
+ ~(S5PV310_SROM_BW__CS_MASK <<
+ S5PV310_SROM_BW__NCS1__SHIFT);
+ cs1 |= ((1 << S5PV310_SROM_BW__DATAWIDTH__SHIFT) |
+ (1 << S5PV310_SROM_BW__WAITENABLE__SHIFT) |
+ (1 << S5PV310_SROM_BW__BYTEENABLE__SHIFT)) <<
+ S5PV310_SROM_BW__NCS1__SHIFT;
+ __raw_writel(cs1, S5PV310_SROM_BW);
+
+ /* set timing for nCS1 suitable for ethernet chip */
+ __raw_writel((0x1 << S5PV310_SROM_BCX__PMC__SHIFT) |
+ (0x9 << S5PV310_SROM_BCX__TACP__SHIFT) |
+ (0xc << S5PV310_SROM_BCX__TCAH__SHIFT) |
+ (0x1 << S5PV310_SROM_BCX__TCOH__SHIFT) |
+ (0x6 << S5PV310_SROM_BCX__TACC__SHIFT) |
+ (0x1 << S5PV310_SROM_BCX__TCOS__SHIFT) |
+ (0x1 << S5PV310_SROM_BCX__TACS__SHIFT), S5PV310_SROM_BC1);
+}
+
+static void __init smdkc210_map_io(void)
+{
+ s5p_init_io(NULL, 0, S5P_VA_CHIPID);
+ s3c24xx_init_clocks(24000000);
+ s3c24xx_init_uarts(smdkc210_uartcfgs, ARRAY_SIZE(smdkc210_uartcfgs));
+}
+
+static void __init smdkc210_machine_init(void)
+{
+ smdkc210_smsc911x_init();
+
+ s3c_sdhci0_set_platdata(&smdkc210_hsmmc0_pdata);
+ s3c_sdhci1_set_platdata(&smdkc210_hsmmc1_pdata);
+ s3c_sdhci2_set_platdata(&smdkc210_hsmmc2_pdata);
+ s3c_sdhci3_set_platdata(&smdkc210_hsmmc3_pdata);
+
+ platform_add_devices(smdkc210_devices, ARRAY_SIZE(smdkc210_devices));
+}
+
+MACHINE_START(SMDKC210, "SMDKC210")
+ /* Maintainer: Kukjin Kim <kgene.kim@samsung.com> */
+ .boot_params = S5P_PA_SDRAM + 0x100,
+ .init_irq = s5pv310_init_irq,
+ .map_io = smdkc210_map_io,
+ .init_machine = smdkc210_machine_init,
+ .timer = &s5pv310_timer,
+MACHINE_END
diff --git a/arch/arm/mach-s5pv310/mach-smdkv310.c b/arch/arm/mach-s5pv310/mach-smdkv310.c
index 46215a14b3bb..35826d66632c 100644
--- a/arch/arm/mach-s5pv310/mach-smdkv310.c
+++ b/arch/arm/mach-s5pv310/mach-smdkv310.c
@@ -9,16 +9,23 @@
*/
#include <linux/serial_core.h>
+#include <linux/gpio.h>
+#include <linux/mmc/host.h>
+#include <linux/platform_device.h>
+#include <linux/smsc911x.h>
+#include <linux/io.h>
#include <asm/mach/arch.h>
#include <asm/mach-types.h>
-#include <asm/hardware/cache-l2x0.h>
#include <plat/regs-serial.h>
#include <plat/s5pv310.h>
#include <plat/cpu.h>
+#include <plat/devs.h>
+#include <plat/sdhci.h>
#include <mach/map.h>
+#include <mach/regs-srom.h>
/* Following are default values for UCON, ULCON and UFCON UART registers */
#define SMDKV310_UCON_DEFAULT (S3C2410_UCON_TXILEVEL | \
@@ -65,6 +72,107 @@ static struct s3c2410_uartcfg smdkv310_uartcfgs[] __initdata = {
},
};
+static struct s3c_sdhci_platdata smdkv310_hsmmc0_pdata __initdata = {
+ .cd_type = S3C_SDHCI_CD_GPIO,
+ .ext_cd_gpio = S5PV310_GPK0(2),
+ .ext_cd_gpio_invert = 1,
+ .clk_type = S3C_SDHCI_CLK_DIV_EXTERNAL,
+#ifdef CONFIG_S5PV310_SDHCI_CH0_8BIT
+ .max_width = 8,
+ .host_caps = MMC_CAP_8_BIT_DATA,
+#endif
+};
+
+static struct s3c_sdhci_platdata smdkv310_hsmmc1_pdata __initdata = {
+ .cd_type = S3C_SDHCI_CD_GPIO,
+ .ext_cd_gpio = S5PV310_GPK0(2),
+ .ext_cd_gpio_invert = 1,
+ .clk_type = S3C_SDHCI_CLK_DIV_EXTERNAL,
+};
+
+static struct s3c_sdhci_platdata smdkv310_hsmmc2_pdata __initdata = {
+ .cd_type = S3C_SDHCI_CD_GPIO,
+ .ext_cd_gpio = S5PV310_GPK2(2),
+ .ext_cd_gpio_invert = 1,
+ .clk_type = S3C_SDHCI_CLK_DIV_EXTERNAL,
+#ifdef CONFIG_S5PV310_SDHCI_CH2_8BIT
+ .max_width = 8,
+ .host_caps = MMC_CAP_8_BIT_DATA,
+#endif
+};
+
+static struct s3c_sdhci_platdata smdkv310_hsmmc3_pdata __initdata = {
+ .cd_type = S3C_SDHCI_CD_GPIO,
+ .ext_cd_gpio = S5PV310_GPK2(2),
+ .ext_cd_gpio_invert = 1,
+ .clk_type = S3C_SDHCI_CLK_DIV_EXTERNAL,
+};
+
+static struct resource smdkv310_smsc911x_resources[] = {
+ [0] = {
+ .start = S5PV310_PA_SROM_BANK(1),
+ .end = S5PV310_PA_SROM_BANK(1) + SZ_64K - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = IRQ_EINT(5),
+ .end = IRQ_EINT(5),
+ .flags = IORESOURCE_IRQ | IRQF_TRIGGER_LOW,
+ },
+};
+
+static struct smsc911x_platform_config smsc9215_config = {
+ .irq_polarity = SMSC911X_IRQ_POLARITY_ACTIVE_HIGH,
+ .irq_type = SMSC911X_IRQ_TYPE_PUSH_PULL,
+ .flags = SMSC911X_USE_16BIT | SMSC911X_FORCE_INTERNAL_PHY,
+ .phy_interface = PHY_INTERFACE_MODE_MII,
+ .mac = {0x00, 0x80, 0x00, 0x23, 0x45, 0x67},
+};
+
+static struct platform_device smdkv310_smsc911x = {
+ .name = "smsc911x",
+ .id = -1,
+ .num_resources = ARRAY_SIZE(smdkv310_smsc911x_resources),
+ .resource = smdkv310_smsc911x_resources,
+ .dev = {
+ .platform_data = &smsc9215_config,
+ },
+};
+
+static struct platform_device *smdkv310_devices[] __initdata = {
+ &s3c_device_hsmmc0,
+ &s3c_device_hsmmc1,
+ &s3c_device_hsmmc2,
+ &s3c_device_hsmmc3,
+ &s3c_device_rtc,
+ &s3c_device_wdt,
+ &smdkv310_smsc911x,
+};
+
+static void __init smdkv310_smsc911x_init(void)
+{
+ u32 cs1;
+
+ /* configure nCS1 width to 16 bits */
+ cs1 = __raw_readl(S5PV310_SROM_BW) &
+ ~(S5PV310_SROM_BW__CS_MASK <<
+ S5PV310_SROM_BW__NCS1__SHIFT);
+ cs1 |= ((1 << S5PV310_SROM_BW__DATAWIDTH__SHIFT) |
+ (1 << S5PV310_SROM_BW__WAITENABLE__SHIFT) |
+ (1 << S5PV310_SROM_BW__BYTEENABLE__SHIFT)) <<
+ S5PV310_SROM_BW__NCS1__SHIFT;
+ __raw_writel(cs1, S5PV310_SROM_BW);
+
+ /* set timing for nCS1 suitable for ethernet chip */
+ __raw_writel((0x1 << S5PV310_SROM_BCX__PMC__SHIFT) |
+ (0x9 << S5PV310_SROM_BCX__TACP__SHIFT) |
+ (0xc << S5PV310_SROM_BCX__TCAH__SHIFT) |
+ (0x1 << S5PV310_SROM_BCX__TCOH__SHIFT) |
+ (0x6 << S5PV310_SROM_BCX__TACC__SHIFT) |
+ (0x1 << S5PV310_SROM_BCX__TCOS__SHIFT) |
+ (0x1 << S5PV310_SROM_BCX__TACS__SHIFT), S5PV310_SROM_BC1);
+}
+
static void __init smdkv310_map_io(void)
{
s5p_init_io(NULL, 0, S5P_VA_CHIPID);
@@ -74,9 +182,14 @@ static void __init smdkv310_map_io(void)
static void __init smdkv310_machine_init(void)
{
-#ifdef CONFIG_CACHE_L2X0
- l2x0_init(S5P_VA_L2CC, 1 << 28, 0xffffffff);
-#endif
+ smdkv310_smsc911x_init();
+
+ s3c_sdhci0_set_platdata(&smdkv310_hsmmc0_pdata);
+ s3c_sdhci1_set_platdata(&smdkv310_hsmmc1_pdata);
+ s3c_sdhci2_set_platdata(&smdkv310_hsmmc2_pdata);
+ s3c_sdhci3_set_platdata(&smdkv310_hsmmc3_pdata);
+
+ platform_add_devices(smdkv310_devices, ARRAY_SIZE(smdkv310_devices));
}
MACHINE_START(SMDKV310, "SMDKV310")
diff --git a/arch/arm/mach-s5pv310/mach-universal_c210.c b/arch/arm/mach-s5pv310/mach-universal_c210.c
index d7c2ec770f88..16d8fc00cafd 100644
--- a/arch/arm/mach-s5pv310/mach-universal_c210.c
+++ b/arch/arm/mach-s5pv310/mach-universal_c210.c
@@ -7,15 +7,20 @@
* published by the Free Software Foundation.
*/
+#include <linux/platform_device.h>
#include <linux/serial_core.h>
+#include <linux/input.h>
+#include <linux/i2c.h>
+#include <linux/gpio_keys.h>
+#include <linux/gpio.h>
#include <asm/mach/arch.h>
#include <asm/mach-types.h>
-#include <asm/hardware/cache-l2x0.h>
#include <plat/regs-serial.h>
#include <plat/s5pv310.h>
#include <plat/cpu.h>
+#include <plat/devs.h>
#include <mach/map.h>
@@ -60,6 +65,72 @@ static struct s3c2410_uartcfg universal_uartcfgs[] __initdata = {
},
};
+static struct gpio_keys_button universal_gpio_keys_tables[] = {
+ {
+ .code = KEY_VOLUMEUP,
+ .gpio = S5PV310_GPX2(0), /* XEINT16 */
+ .desc = "gpio-keys: KEY_VOLUMEUP",
+ .type = EV_KEY,
+ .active_low = 1,
+ .debounce_interval = 1,
+ }, {
+ .code = KEY_VOLUMEDOWN,
+ .gpio = S5PV310_GPX2(1), /* XEINT17 */
+ .desc = "gpio-keys: KEY_VOLUMEDOWN",
+ .type = EV_KEY,
+ .active_low = 1,
+ .debounce_interval = 1,
+ }, {
+ .code = KEY_CONFIG,
+ .gpio = S5PV310_GPX2(2), /* XEINT18 */
+ .desc = "gpio-keys: KEY_CONFIG",
+ .type = EV_KEY,
+ .active_low = 1,
+ .debounce_interval = 1,
+ }, {
+ .code = KEY_CAMERA,
+ .gpio = S5PV310_GPX2(3), /* XEINT19 */
+ .desc = "gpio-keys: KEY_CAMERA",
+ .type = EV_KEY,
+ .active_low = 1,
+ .debounce_interval = 1,
+ }, {
+ .code = KEY_OK,
+ .gpio = S5PV310_GPX3(5), /* XEINT29 */
+ .desc = "gpio-keys: KEY_OK",
+ .type = EV_KEY,
+ .active_low = 1,
+ .debounce_interval = 1,
+ },
+};
+
+static struct gpio_keys_platform_data universal_gpio_keys_data = {
+ .buttons = universal_gpio_keys_tables,
+ .nbuttons = ARRAY_SIZE(universal_gpio_keys_tables),
+};
+
+static struct platform_device universal_gpio_keys = {
+ .name = "gpio-keys",
+ .dev = {
+ .platform_data = &universal_gpio_keys_data,
+ },
+};
+
+/* I2C0 */
+static struct i2c_board_info i2c0_devs[] __initdata = {
+ /* Camera, To be updated */
+};
+
+/* I2C1 */
+static struct i2c_board_info i2c1_devs[] __initdata = {
+ /* Gyro, To be updated */
+};
+
+static struct platform_device *universal_devices[] __initdata = {
+ &universal_gpio_keys,
+ &s5p_device_onenand,
+};
+
static void __init universal_map_io(void)
{
s5p_init_io(NULL, 0, S5P_VA_CHIPID);
@@ -69,9 +140,11 @@ static void __init universal_map_io(void)
static void __init universal_machine_init(void)
{
-#ifdef CONFIG_CACHE_L2X0
- l2x0_init(S5P_VA_L2CC, 1 << 28, 0xffffffff);
-#endif
+ i2c_register_board_info(0, i2c0_devs, ARRAY_SIZE(i2c0_devs));
+ i2c_register_board_info(1, i2c1_devs, ARRAY_SIZE(i2c1_devs));
+
+ /* Last */
+ platform_add_devices(universal_devices, ARRAY_SIZE(universal_devices));
}
MACHINE_START(UNIVERSAL_C210, "UNIVERSAL_C210")
diff --git a/arch/arm/mach-s5pv310/setup-i2c0.c b/arch/arm/mach-s5pv310/setup-i2c0.c
index 436712807383..f47f8f3152ec 100644
--- a/arch/arm/mach-s5pv310/setup-i2c0.c
+++ b/arch/arm/mach-s5pv310/setup-i2c0.c
@@ -21,8 +21,6 @@ struct platform_device; /* don't need the contents */
void s3c_i2c0_cfg_gpio(struct platform_device *dev)
{
- s3c_gpio_cfgpin(S5PV310_GPD1(0), S3C_GPIO_SFN(2));
- s3c_gpio_setpull(S5PV310_GPD1(0), S3C_GPIO_PULL_UP);
- s3c_gpio_cfgpin(S5PV310_GPD1(1), S3C_GPIO_SFN(2));
- s3c_gpio_setpull(S5PV310_GPD1(1), S3C_GPIO_PULL_UP);
+ s3c_gpio_cfgall_range(S5PV310_GPD1(0), 2,
+ S3C_GPIO_SFN(2), S3C_GPIO_PULL_UP);
}
diff --git a/arch/arm/mach-s5pv310/setup-i2c1.c b/arch/arm/mach-s5pv310/setup-i2c1.c
index 1ecd5bc35b5a..9d07e4e2f14c 100644
--- a/arch/arm/mach-s5pv310/setup-i2c1.c
+++ b/arch/arm/mach-s5pv310/setup-i2c1.c
@@ -18,8 +18,6 @@ struct platform_device; /* don't need the contents */
void s3c_i2c1_cfg_gpio(struct platform_device *dev)
{
- s3c_gpio_cfgpin(S5PV310_GPD1(2), S3C_GPIO_SFN(2));
- s3c_gpio_setpull(S5PV310_GPD1(2), S3C_GPIO_PULL_UP);
- s3c_gpio_cfgpin(S5PV310_GPD1(3), S3C_GPIO_SFN(2));
- s3c_gpio_setpull(S5PV310_GPD1(3), S3C_GPIO_PULL_UP);
+ s3c_gpio_cfgall_range(S5PV310_GPD1(2), 2,
+ S3C_GPIO_SFN(2), S3C_GPIO_PULL_UP);
}
diff --git a/arch/arm/mach-s5pv310/setup-i2c2.c b/arch/arm/mach-s5pv310/setup-i2c2.c
index 4c0d8def660a..4163b1233daf 100644
--- a/arch/arm/mach-s5pv310/setup-i2c2.c
+++ b/arch/arm/mach-s5pv310/setup-i2c2.c
@@ -18,8 +18,6 @@ struct platform_device; /* don't need the contents */
void s3c_i2c2_cfg_gpio(struct platform_device *dev)
{
- s3c_gpio_cfgpin(S5PV310_GPA0(6), S3C_GPIO_SFN(3));
- s3c_gpio_setpull(S5PV310_GPA0(6), S3C_GPIO_PULL_UP);
- s3c_gpio_cfgpin(S5PV310_GPA0(7), S3C_GPIO_SFN(3));
- s3c_gpio_setpull(S5PV310_GPA0(7), S3C_GPIO_PULL_UP);
+ s3c_gpio_cfgall_range(S5PV310_GPA0(6), 2,
+ S3C_GPIO_SFN(3), S3C_GPIO_PULL_UP);
}
diff --git a/arch/arm/mach-s5pv310/setup-i2c3.c b/arch/arm/mach-s5pv310/setup-i2c3.c
new file mode 100644
index 000000000000..180f153d2a20
--- /dev/null
+++ b/arch/arm/mach-s5pv310/setup-i2c3.c
@@ -0,0 +1,23 @@
+/*
+ * linux/arch/arm/mach-s5pv310/setup-i2c3.c
+ *
+ * Copyright (c) 2010 Samsung Electronics Co., Ltd.
+ *
+ * I2C3 GPIO configuration.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+*/
+
+struct platform_device; /* don't need the contents */
+
+#include <linux/gpio.h>
+#include <plat/iic.h>
+#include <plat/gpio-cfg.h>
+
+void s3c_i2c3_cfg_gpio(struct platform_device *dev)
+{
+ s3c_gpio_cfgall_range(S5PV310_GPA1(2), 2,
+ S3C_GPIO_SFN(3), S3C_GPIO_PULL_UP);
+}
diff --git a/arch/arm/mach-s5pv310/setup-i2c4.c b/arch/arm/mach-s5pv310/setup-i2c4.c
new file mode 100644
index 000000000000..909e8dfc5316
--- /dev/null
+++ b/arch/arm/mach-s5pv310/setup-i2c4.c
@@ -0,0 +1,23 @@
+/*
+ * linux/arch/arm/mach-s5pv310/setup-i2c4.c
+ *
+ * Copyright (c) 2010 Samsung Electronics Co., Ltd.
+ *
+ * I2C4 GPIO configuration.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+*/
+
+struct platform_device; /* don't need the contents */
+
+#include <linux/gpio.h>
+#include <plat/iic.h>
+#include <plat/gpio-cfg.h>
+
+void s3c_i2c4_cfg_gpio(struct platform_device *dev)
+{
+ s3c_gpio_cfgall_range(S5PV310_GPB(2), 2,
+ S3C_GPIO_SFN(3), S3C_GPIO_PULL_UP);
+}
diff --git a/arch/arm/mach-s5pv310/setup-i2c5.c b/arch/arm/mach-s5pv310/setup-i2c5.c
new file mode 100644
index 000000000000..5d0fa4ac0283
--- /dev/null
+++ b/arch/arm/mach-s5pv310/setup-i2c5.c
@@ -0,0 +1,23 @@
+/*
+ * linux/arch/arm/mach-s5pv310/setup-i2c5.c
+ *
+ * Copyright (c) 2010 Samsung Electronics Co., Ltd.
+ *
+ * I2C5 GPIO configuration.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+*/
+
+struct platform_device; /* don't need the contents */
+
+#include <linux/gpio.h>
+#include <plat/iic.h>
+#include <plat/gpio-cfg.h>
+
+void s3c_i2c5_cfg_gpio(struct platform_device *dev)
+{
+ s3c_gpio_cfgall_range(S5PV310_GPB(6), 2,
+ S3C_GPIO_SFN(3), S3C_GPIO_PULL_UP);
+}
diff --git a/arch/arm/mach-s5pv310/setup-i2c6.c b/arch/arm/mach-s5pv310/setup-i2c6.c
new file mode 100644
index 000000000000..34aafab92ac4
--- /dev/null
+++ b/arch/arm/mach-s5pv310/setup-i2c6.c
@@ -0,0 +1,23 @@
+/*
+ * linux/arch/arm/mach-s5pv310/setup-i2c6.c
+ *
+ * Copyright (c) 2010 Samsung Electronics Co., Ltd.
+ *
+ * I2C6 GPIO configuration.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+*/
+
+struct platform_device; /* don't need the contents */
+
+#include <linux/gpio.h>
+#include <plat/iic.h>
+#include <plat/gpio-cfg.h>
+
+void s3c_i2c6_cfg_gpio(struct platform_device *dev)
+{
+ s3c_gpio_cfgall_range(S5PV310_GPC1(3), 2,
+ S3C_GPIO_SFN(4), S3C_GPIO_PULL_UP);
+}
diff --git a/arch/arm/mach-s5pv310/setup-i2c7.c b/arch/arm/mach-s5pv310/setup-i2c7.c
new file mode 100644
index 000000000000..9b25b8d18920
--- /dev/null
+++ b/arch/arm/mach-s5pv310/setup-i2c7.c
@@ -0,0 +1,23 @@
+/*
+ * linux/arch/arm/mach-s5pv310/setup-i2c7.c
+ *
+ * Copyright (c) 2010 Samsung Electronics Co., Ltd.
+ *
+ * I2C7 GPIO configuration.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+*/
+
+struct platform_device; /* don't need the contents */
+
+#include <linux/gpio.h>
+#include <plat/iic.h>
+#include <plat/gpio-cfg.h>
+
+void s3c_i2c7_cfg_gpio(struct platform_device *dev)
+{
+ s3c_gpio_cfgall_range(S5PV310_GPD0(2), 2,
+ S3C_GPIO_SFN(3), S3C_GPIO_PULL_UP);
+}
diff --git a/arch/arm/mach-s5pv310/setup-sdhci-gpio.c b/arch/arm/mach-s5pv310/setup-sdhci-gpio.c
new file mode 100644
index 000000000000..86d38cc49135
--- /dev/null
+++ b/arch/arm/mach-s5pv310/setup-sdhci-gpio.c
@@ -0,0 +1,152 @@
+/* linux/arch/arm/mach-s5pv310/setup-sdhci-gpio.c
+ *
+ * Copyright (c) 2010 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com/
+ *
+ * S5PV310 - Helper functions for setting up SDHCI device(s) GPIO (HSMMC)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+*/
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+#include <linux/io.h>
+#include <linux/gpio.h>
+#include <linux/mmc/host.h>
+#include <linux/mmc/card.h>
+
+#include <plat/gpio-cfg.h>
+#include <plat/regs-sdhci.h>
+#include <plat/sdhci.h>
+
+void s5pv310_setup_sdhci0_cfg_gpio(struct platform_device *dev, int width)
+{
+ struct s3c_sdhci_platdata *pdata = dev->dev.platform_data;
+ unsigned int gpio;
+
+ /* Set all the necessary GPK0[0:1] pins to special-function 2 */
+ for (gpio = S5PV310_GPK0(0); gpio < S5PV310_GPK0(2); gpio++) {
+ s3c_gpio_cfgpin(gpio, S3C_GPIO_SFN(2));
+ s3c_gpio_setpull(gpio, S3C_GPIO_PULL_NONE);
+ s5p_gpio_set_drvstr(gpio, S5P_GPIO_DRVSTR_LV4);
+ }
+
+ switch (width) {
+ case 8:
+ for (gpio = S5PV310_GPK1(3); gpio <= S5PV310_GPK1(6); gpio++) {
+ /* Data pin GPK1[3:6] to special-funtion 3 */
+ s3c_gpio_cfgpin(gpio, S3C_GPIO_SFN(3));
+ s3c_gpio_setpull(gpio, S3C_GPIO_PULL_UP);
+ s5p_gpio_set_drvstr(gpio, S5P_GPIO_DRVSTR_LV4);
+ }
+ case 4:
+ for (gpio = S5PV310_GPK0(3); gpio <= S5PV310_GPK0(6); gpio++) {
+ /* Data pin GPK0[3:6] to special-funtion 2 */
+ s3c_gpio_cfgpin(gpio, S3C_GPIO_SFN(2));
+ s3c_gpio_setpull(gpio, S3C_GPIO_PULL_UP);
+ s5p_gpio_set_drvstr(gpio, S5P_GPIO_DRVSTR_LV4);
+ }
+ default:
+ break;
+ }
+
+ if (pdata->cd_type == S3C_SDHCI_CD_INTERNAL) {
+ s3c_gpio_cfgpin(S5PV310_GPK0(2), S3C_GPIO_SFN(2));
+ s3c_gpio_setpull(S5PV310_GPK0(2), S3C_GPIO_PULL_UP);
+ s5p_gpio_set_drvstr(gpio, S5P_GPIO_DRVSTR_LV4);
+ }
+}
+
+void s5pv310_setup_sdhci1_cfg_gpio(struct platform_device *dev, int width)
+{
+ struct s3c_sdhci_platdata *pdata = dev->dev.platform_data;
+ unsigned int gpio;
+
+ /* Set all the necessary GPK1[0:1] pins to special-function 2 */
+ for (gpio = S5PV310_GPK1(0); gpio < S5PV310_GPK1(2); gpio++) {
+ s3c_gpio_cfgpin(gpio, S3C_GPIO_SFN(2));
+ s3c_gpio_setpull(gpio, S3C_GPIO_PULL_NONE);
+ s5p_gpio_set_drvstr(gpio, S5P_GPIO_DRVSTR_LV4);
+ }
+
+ for (gpio = S5PV310_GPK1(3); gpio <= S5PV310_GPK1(6); gpio++) {
+ /* Data pin GPK1[3:6] to special-function 2 */
+ s3c_gpio_cfgpin(gpio, S3C_GPIO_SFN(2));
+ s3c_gpio_setpull(gpio, S3C_GPIO_PULL_UP);
+ s5p_gpio_set_drvstr(gpio, S5P_GPIO_DRVSTR_LV4);
+ }
+
+ if (pdata->cd_type == S3C_SDHCI_CD_INTERNAL) {
+ s3c_gpio_cfgpin(S5PV310_GPK1(2), S3C_GPIO_SFN(2));
+ s3c_gpio_setpull(S5PV310_GPK1(2), S3C_GPIO_PULL_UP);
+ s5p_gpio_set_drvstr(gpio, S5P_GPIO_DRVSTR_LV4);
+ }
+}
+
+void s5pv310_setup_sdhci2_cfg_gpio(struct platform_device *dev, int width)
+{
+ struct s3c_sdhci_platdata *pdata = dev->dev.platform_data;
+ unsigned int gpio;
+
+ /* Set all the necessary GPK2[0:1] pins to special-function 2 */
+ for (gpio = S5PV310_GPK2(0); gpio < S5PV310_GPK2(2); gpio++) {
+ s3c_gpio_cfgpin(gpio, S3C_GPIO_SFN(2));
+ s3c_gpio_setpull(gpio, S3C_GPIO_PULL_NONE);
+ s5p_gpio_set_drvstr(gpio, S5P_GPIO_DRVSTR_LV4);
+ }
+
+ switch (width) {
+ case 8:
+ for (gpio = S5PV310_GPK3(3); gpio <= S5PV310_GPK3(6); gpio++) {
+ /* Data pin GPK3[3:6] to special-function 3 */
+ s3c_gpio_cfgpin(gpio, S3C_GPIO_SFN(3));
+ s3c_gpio_setpull(gpio, S3C_GPIO_PULL_UP);
+ s5p_gpio_set_drvstr(gpio, S5P_GPIO_DRVSTR_LV4);
+ }
+ case 4:
+ for (gpio = S5PV310_GPK2(3); gpio <= S5PV310_GPK2(6); gpio++) {
+ /* Data pin GPK2[3:6] to special-function 2 */
+ s3c_gpio_cfgpin(gpio, S3C_GPIO_SFN(2));
+ s3c_gpio_setpull(gpio, S3C_GPIO_PULL_UP);
+ s5p_gpio_set_drvstr(gpio, S5P_GPIO_DRVSTR_LV4);
+ }
+ default:
+ break;
+ }
+
+ if (pdata->cd_type == S3C_SDHCI_CD_INTERNAL) {
+ s3c_gpio_cfgpin(S5PV310_GPK2(2), S3C_GPIO_SFN(2));
+ s3c_gpio_setpull(S5PV310_GPK2(2), S3C_GPIO_PULL_UP);
+ s5p_gpio_set_drvstr(gpio, S5P_GPIO_DRVSTR_LV4);
+ }
+}
+
+void s5pv310_setup_sdhci3_cfg_gpio(struct platform_device *dev, int width)
+{
+ struct s3c_sdhci_platdata *pdata = dev->dev.platform_data;
+ unsigned int gpio;
+
+ /* Set all the necessary GPK3[0:1] pins to special-function 2 */
+ for (gpio = S5PV310_GPK3(0); gpio < S5PV310_GPK3(2); gpio++) {
+ s3c_gpio_cfgpin(gpio, S3C_GPIO_SFN(2));
+ s3c_gpio_setpull(gpio, S3C_GPIO_PULL_NONE);
+ s5p_gpio_set_drvstr(gpio, S5P_GPIO_DRVSTR_LV4);
+ }
+
+ for (gpio = S5PV310_GPK3(3); gpio <= S5PV310_GPK3(6); gpio++) {
+ /* Data pin GPK3[3:6] to special-function 2 */
+ s3c_gpio_cfgpin(gpio, S3C_GPIO_SFN(2));
+ s3c_gpio_setpull(gpio, S3C_GPIO_PULL_UP);
+ s5p_gpio_set_drvstr(gpio, S5P_GPIO_DRVSTR_LV4);
+ }
+
+ if (pdata->cd_type == S3C_SDHCI_CD_INTERNAL) {
+ s3c_gpio_cfgpin(S5PV310_GPK3(2), S3C_GPIO_SFN(2));
+ s3c_gpio_setpull(S5PV310_GPK3(2), S3C_GPIO_PULL_UP);
+ s5p_gpio_set_drvstr(gpio, S5P_GPIO_DRVSTR_LV4);
+ }
+}
diff --git a/arch/arm/mach-s5pv310/setup-sdhci.c b/arch/arm/mach-s5pv310/setup-sdhci.c
new file mode 100644
index 000000000000..db8358fc4662
--- /dev/null
+++ b/arch/arm/mach-s5pv310/setup-sdhci.c
@@ -0,0 +1,69 @@
+/* linux/arch/arm/mach-s5pv310/setup-sdhci.c
+ *
+ * Copyright (c) 2010 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com/
+ *
+ * S5PV310 - Helper functions for settign up SDHCI device(s) (HSMMC)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+*/
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+#include <linux/io.h>
+
+#include <linux/mmc/card.h>
+#include <linux/mmc/host.h>
+
+#include <plat/regs-sdhci.h>
+
+/* clock sources for the mmc bus clock, order as for the ctrl2[5..4] */
+
+char *s5pv310_hsmmc_clksrcs[4] = {
+ [0] = NULL,
+ [1] = NULL,
+ [2] = "sclk_mmc", /* mmc_bus */
+ [3] = NULL,
+};
+
+void s5pv310_setup_sdhci_cfg_card(struct platform_device *dev, void __iomem *r,
+ struct mmc_ios *ios, struct mmc_card *card)
+{
+ u32 ctrl2, ctrl3;
+
+ /* don't need to alter anything acording to card-type */
+
+ ctrl2 = readl(r + S3C_SDHCI_CONTROL2);
+
+ /* select base clock source to HCLK */
+
+ ctrl2 &= S3C_SDHCI_CTRL2_SELBASECLK_MASK;
+
+ /*
+ * clear async mode, enable conflict mask, rx feedback ctrl, SD
+ * clk hold and no use debounce count
+ */
+
+ ctrl2 |= (S3C64XX_SDHCI_CTRL2_ENSTAASYNCCLR |
+ S3C64XX_SDHCI_CTRL2_ENCMDCNFMSK |
+ S3C_SDHCI_CTRL2_ENFBCLKRX |
+ S3C_SDHCI_CTRL2_DFCNT_NONE |
+ S3C_SDHCI_CTRL2_ENCLKOUTHOLD);
+
+ /* Tx and Rx feedback clock delay control */
+
+ if (ios->clock < 25 * 1000000)
+ ctrl3 = (S3C_SDHCI_CTRL3_FCSEL3 |
+ S3C_SDHCI_CTRL3_FCSEL2 |
+ S3C_SDHCI_CTRL3_FCSEL1 |
+ S3C_SDHCI_CTRL3_FCSEL0);
+ else
+ ctrl3 = (S3C_SDHCI_CTRL3_FCSEL1 | S3C_SDHCI_CTRL3_FCSEL0);
+
+ writel(ctrl2, r + S3C_SDHCI_CONTROL2);
+ writel(ctrl3, r + S3C_SDHCI_CONTROL3);
+}
diff --git a/arch/arm/mach-sa1100/cpu-sa1100.c b/arch/arm/mach-sa1100/cpu-sa1100.c
index c0a13ef5436f..96f7dc103b59 100644
--- a/arch/arm/mach-sa1100/cpu-sa1100.c
+++ b/arch/arm/mach-sa1100/cpu-sa1100.c
@@ -184,16 +184,15 @@ static int sa1100_target(struct cpufreq_policy *policy,
{
unsigned int cur = sa11x0_getspeed(0);
unsigned int new_ppcr;
-
struct cpufreq_freqs freqs;
+
+ new_ppcr = sa11x0_freq_to_ppcr(target_freq);
switch(relation){
case CPUFREQ_RELATION_L:
- new_ppcr = sa11x0_freq_to_ppcr(target_freq);
if (sa11x0_ppcr_to_freq(new_ppcr) > policy->max)
new_ppcr--;
break;
case CPUFREQ_RELATION_H:
- new_ppcr = sa11x0_freq_to_ppcr(target_freq);
if ((sa11x0_ppcr_to_freq(new_ppcr) > target_freq) &&
(sa11x0_ppcr_to_freq(new_ppcr - 1) >= policy->min))
new_ppcr--;
diff --git a/arch/arm/mach-shark/include/mach/vmalloc.h b/arch/arm/mach-shark/include/mach/vmalloc.h
index 8e845b6a7cb5..b10df988526d 100644
--- a/arch/arm/mach-shark/include/mach/vmalloc.h
+++ b/arch/arm/mach-shark/include/mach/vmalloc.h
@@ -1,4 +1,4 @@
/*
* arch/arm/mach-shark/include/mach/vmalloc.h
*/
-#define VMALLOC_END 0xd0000000
+#define VMALLOC_END 0xd0000000UL
diff --git a/arch/arm/mach-shmobile/Kconfig b/arch/arm/mach-shmobile/Kconfig
index 54b479c35ee0..51dcd59eda6a 100644
--- a/arch/arm/mach-shmobile/Kconfig
+++ b/arch/arm/mach-shmobile/Kconfig
@@ -116,4 +116,6 @@ endmenu
config SH_CLK_CPG
bool
+source "drivers/sh/Kconfig"
+
endif
diff --git a/arch/arm/mach-shmobile/board-ap4evb.c b/arch/arm/mach-shmobile/board-ap4evb.c
index 14923989ea05..d440e5f456ad 100644
--- a/arch/arm/mach-shmobile/board-ap4evb.c
+++ b/arch/arm/mach-shmobile/board-ap4evb.c
@@ -30,7 +30,6 @@
#include <linux/mtd/mtd.h>
#include <linux/mtd/partitions.h>
#include <linux/mtd/physmap.h>
-#include <linux/mmc/host.h>
#include <linux/mmc/sh_mmcif.h>
#include <linux/i2c.h>
#include <linux/i2c/tsc2007.h>
@@ -44,6 +43,10 @@
#include <linux/input/sh_keysc.h>
#include <linux/usb/r8a66597.h>
+#include <media/sh_mobile_ceu.h>
+#include <media/sh_mobile_csi2.h>
+#include <media/soc_camera.h>
+
#include <sound/sh_fsi.h>
#include <video/sh_mobile_hdmi.h>
@@ -160,11 +163,13 @@ static struct mtd_partition nor_flash_partitions[] = {
.name = "loader",
.offset = 0x00000000,
.size = 512 * 1024,
+ .mask_flags = MTD_WRITEABLE,
},
{
.name = "bootenv",
.offset = MTDPART_OFS_APPEND,
.size = 512 * 1024,
+ .mask_flags = MTD_WRITEABLE,
},
{
.name = "kernel_ro",
@@ -235,10 +240,22 @@ static struct platform_device smc911x_device = {
},
};
+/*
+ * The card detect pin of the top SD/MMC slot (CN7) is active low and is
+ * connected to GPIO A22 of SH7372 (GPIO_PORT41).
+ */
+static int slot_cn7_get_cd(struct platform_device *pdev)
+{
+ if (gpio_is_valid(GPIO_PORT41))
+ return !gpio_get_value(GPIO_PORT41);
+ else
+ return -ENXIO;
+}
+
/* SH_MMCIF */
static struct resource sh_mmcif_resources[] = {
[0] = {
- .name = "SH_MMCIF",
+ .name = "MMCIF",
.start = 0xE6BD0000,
.end = 0xE6BD00FF,
.flags = IORESOURCE_MEM,
@@ -261,6 +278,7 @@ static struct sh_mmcif_plat_data sh_mmcif_plat = {
.caps = MMC_CAP_4_BIT_DATA |
MMC_CAP_8_BIT_DATA |
MMC_CAP_NEEDS_POLL,
+ .get_cd = slot_cn7_get_cd,
};
static struct platform_device sh_mmcif_device = {
@@ -310,6 +328,8 @@ static struct sh_mobile_sdhi_info sdhi1_info = {
.dma_slave_rx = SHDMA_SLAVE_SDHI1_RX,
.tmio_ocr_mask = MMC_VDD_165_195,
.tmio_flags = TMIO_MMC_WRPROTECT_DISABLE,
+ .tmio_caps = MMC_CAP_NEEDS_POLL,
+ .get_cd = slot_cn7_get_cd,
};
static struct resource sdhi1_resources[] = {
@@ -375,10 +395,40 @@ static struct platform_device usb1_host_device = {
.resource = usb1_host_resources,
};
+const static struct fb_videomode ap4evb_lcdc_modes[] = {
+ {
+#ifdef CONFIG_AP4EVB_QHD
+ .name = "R63302(QHD)",
+ .xres = 544,
+ .yres = 961,
+ .left_margin = 72,
+ .right_margin = 600,
+ .hsync_len = 16,
+ .upper_margin = 8,
+ .lower_margin = 8,
+ .vsync_len = 2,
+ .sync = FB_SYNC_VERT_HIGH_ACT | FB_SYNC_HOR_HIGH_ACT,
+#else
+ .name = "WVGA Panel",
+ .xres = 800,
+ .yres = 480,
+ .left_margin = 220,
+ .right_margin = 110,
+ .hsync_len = 70,
+ .upper_margin = 20,
+ .lower_margin = 5,
+ .vsync_len = 5,
+ .sync = 0,
+#endif
+ },
+};
+
static struct sh_mobile_lcdc_info lcdc_info = {
.ch[0] = {
.chan = LCDC_CHAN_MAINLCD,
.bpp = 16,
+ .lcd_cfg = ap4evb_lcdc_modes,
+ .num_cfg = ARRAY_SIZE(ap4evb_lcdc_modes),
}
};
@@ -517,26 +567,130 @@ static struct platform_device *qhd_devices[] __initdata = {
/* FSI */
#define IRQ_FSI evt2irq(0x1840)
-#define FSIACKCR 0xE6150018
-static void fsiackcr_init(struct clk *clk)
+static int __fsi_set_rate(struct clk *clk, long rate, int enable)
{
- u32 status = __raw_readl(clk->enable_reg);
+ int ret = 0;
+
+ if (rate <= 0)
+ return ret;
+
+ if (enable) {
+ ret = clk_set_rate(clk, rate);
+ if (0 == ret)
+ ret = clk_enable(clk);
+ } else {
+ clk_disable(clk);
+ }
- /* use external clock */
- status &= ~0x000000ff;
- status |= 0x00000080;
- __raw_writel(status, clk->enable_reg);
+ return ret;
}
-static struct clk_ops fsiackcr_clk_ops = {
- .init = fsiackcr_init,
-};
+static int __fsi_set_round_rate(struct clk *clk, long rate, int enable)
+{
+ return __fsi_set_rate(clk, clk_round_rate(clk, rate), enable);
+}
-static struct clk fsiackcr_clk = {
- .ops = &fsiackcr_clk_ops,
- .enable_reg = (void __iomem *)FSIACKCR,
- .rate = 0, /* unknown */
-};
+static int fsi_ak4642_set_rate(struct device *dev, int rate, int enable)
+{
+ struct clk *fsia_ick;
+ struct clk *fsiack;
+ int ret = -EIO;
+
+ fsia_ick = clk_get(dev, "icka");
+ if (IS_ERR(fsia_ick))
+ return PTR_ERR(fsia_ick);
+
+ /*
+ * FSIACK is connected to AK4642,
+ * and use external clock pin from it.
+ * it is parent of fsia_ick now.
+ */
+ fsiack = clk_get_parent(fsia_ick);
+ if (!fsiack)
+ goto fsia_ick_out;
+
+ /*
+ * we get 1/1 divided clock by setting same rate to fsiack and fsia_ick
+ *
+ ** FIXME **
+ * Because the freq_table of external clk (fsiack) are all 0,
+ * the return value of clk_round_rate became 0.
+ * So, it use __fsi_set_rate here.
+ */
+ ret = __fsi_set_rate(fsiack, rate, enable);
+ if (ret < 0)
+ goto fsiack_out;
+
+ ret = __fsi_set_round_rate(fsia_ick, rate, enable);
+ if ((ret < 0) && enable)
+ __fsi_set_round_rate(fsiack, rate, 0); /* disable FSI ACK */
+
+fsiack_out:
+ clk_put(fsiack);
+
+fsia_ick_out:
+ clk_put(fsia_ick);
+
+ return 0;
+}
+
+static int fsi_hdmi_set_rate(struct device *dev, int rate, int enable)
+{
+ struct clk *fsib_clk;
+ struct clk *fdiv_clk = &sh7372_fsidivb_clk;
+ long fsib_rate = 0;
+ long fdiv_rate = 0;
+ int ackmd_bpfmd;
+ int ret;
+
+ switch (rate) {
+ case 44100:
+ fsib_rate = rate * 256;
+ ackmd_bpfmd = SH_FSI_ACKMD_256 | SH_FSI_BPFMD_64;
+ break;
+ case 48000:
+ fsib_rate = 85428000; /* around 48kHz x 256 x 7 */
+ fdiv_rate = rate * 256;
+ ackmd_bpfmd = SH_FSI_ACKMD_256 | SH_FSI_BPFMD_64;
+ break;
+ default:
+ pr_err("unsupported rate in FSI2 port B\n");
+ return -EINVAL;
+ }
+
+ /* FSI B setting */
+ fsib_clk = clk_get(dev, "ickb");
+ if (IS_ERR(fsib_clk))
+ return -EIO;
+
+ ret = __fsi_set_round_rate(fsib_clk, fsib_rate, enable);
+ clk_put(fsib_clk);
+ if (ret < 0)
+ return ret;
+
+ /* FSI DIV setting */
+ ret = __fsi_set_round_rate(fdiv_clk, fdiv_rate, enable);
+ if (ret < 0) {
+ /* disable FSI B */
+ if (enable)
+ __fsi_set_round_rate(fsib_clk, fsib_rate, 0);
+ return ret;
+ }
+
+ return ackmd_bpfmd;
+}
+
+static int fsi_set_rate(struct device *dev, int is_porta, int rate, int enable)
+{
+ int ret;
+
+ if (is_porta)
+ ret = fsi_ak4642_set_rate(dev, rate, enable);
+ else
+ ret = fsi_hdmi_set_rate(dev, rate, enable);
+
+ return ret;
+}
static struct sh_fsi_platform_info fsi_info = {
.porta_flags = SH_FSI_BRS_INV |
@@ -544,6 +698,12 @@ static struct sh_fsi_platform_info fsi_info = {
SH_FSI_IN_SLAVE_MODE |
SH_FSI_OFMT(PCM) |
SH_FSI_IFMT(PCM),
+
+ .portb_flags = SH_FSI_BRS_INV |
+ SH_FSI_BRM_INV |
+ SH_FSI_LRS_INV |
+ SH_FSI_OFMT(SPDIF),
+ .set_rate = fsi_set_rate,
};
static struct resource fsi_resources[] = {
@@ -577,26 +737,6 @@ static struct sh_mobile_lcdc_info sh_mobile_lcdc1_info = {
.interface_type = RGB24,
.clock_divider = 1,
.flags = LCDC_FLAGS_DWPOL,
- .lcd_cfg = {
- .name = "HDMI",
- /* So far only 720p is supported */
- .xres = 1280,
- .yres = 720,
- /*
- * If left and right margins are not multiples of 8,
- * LDHAJR will be adjusted accordingly by the LCDC
- * driver. Until we start using EDID, these values
- * might have to be adjusted for different monitors.
- */
- .left_margin = 200,
- .right_margin = 88,
- .hsync_len = 48,
- .upper_margin = 20,
- .lower_margin = 5,
- .vsync_len = 5,
- .pixclock = 13468,
- .sync = FB_SYNC_VERT_HIGH_ACT | FB_SYNC_HOR_HIGH_ACT,
- },
}
};
@@ -608,7 +748,7 @@ static struct resource lcdc1_resources[] = {
.flags = IORESOURCE_MEM,
},
[1] = {
- .start = intcs_evt2irq(0x17a0),
+ .start = intcs_evt2irq(0x1780),
.flags = IORESOURCE_IRQ,
},
};
@@ -627,6 +767,7 @@ static struct platform_device lcdc1_device = {
static struct sh_mobile_hdmi_info hdmi_info = {
.lcd_chan = &sh_mobile_lcdc1_info.ch[0],
.lcd_dev = &lcdc1_device.dev,
+ .flags = HDMI_SND_SRC_SPDIF,
};
static struct resource hdmi_resources[] = {
@@ -689,6 +830,95 @@ static struct platform_device leds_device = {
},
};
+static struct i2c_board_info imx074_info = {
+ I2C_BOARD_INFO("imx074", 0x1a),
+};
+
+struct soc_camera_link imx074_link = {
+ .bus_id = 0,
+ .board_info = &imx074_info,
+ .i2c_adapter_id = 0,
+ .module_name = "imx074",
+};
+
+static struct platform_device ap4evb_camera = {
+ .name = "soc-camera-pdrv",
+ .id = 0,
+ .dev = {
+ .platform_data = &imx074_link,
+ },
+};
+
+static struct sh_csi2_client_config csi2_clients[] = {
+ {
+ .phy = SH_CSI2_PHY_MAIN,
+ .lanes = 3,
+ .channel = 0,
+ .pdev = &ap4evb_camera,
+ },
+};
+
+static struct sh_csi2_pdata csi2_info = {
+ .type = SH_CSI2C,
+ .clients = csi2_clients,
+ .num_clients = ARRAY_SIZE(csi2_clients),
+ .flags = SH_CSI2_ECC | SH_CSI2_CRC,
+};
+
+static struct resource csi2_resources[] = {
+ [0] = {
+ .name = "CSI2",
+ .start = 0xffc90000,
+ .end = 0xffc90fff,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = intcs_evt2irq(0x17a0),
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct platform_device csi2_device = {
+ .name = "sh-mobile-csi2",
+ .id = 0,
+ .num_resources = ARRAY_SIZE(csi2_resources),
+ .resource = csi2_resources,
+ .dev = {
+ .platform_data = &csi2_info,
+ },
+};
+
+static struct sh_mobile_ceu_info sh_mobile_ceu_info = {
+ .flags = SH_CEU_FLAG_USE_8BIT_BUS,
+ .csi2_dev = &csi2_device.dev,
+};
+
+static struct resource ceu_resources[] = {
+ [0] = {
+ .name = "CEU",
+ .start = 0xfe910000,
+ .end = 0xfe91009f,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = intcs_evt2irq(0x880),
+ .flags = IORESOURCE_IRQ,
+ },
+ [2] = {
+ /* place holder for contiguous memory */
+ },
+};
+
+static struct platform_device ceu_device = {
+ .name = "sh_mobile_ceu",
+ .id = 0, /* "ceu0" clock */
+ .num_resources = ARRAY_SIZE(ceu_resources),
+ .resource = ceu_resources,
+ .dev = {
+ .platform_data = &sh_mobile_ceu_info,
+ },
+};
+
static struct platform_device *ap4evb_devices[] __initdata = {
&leds_device,
&nor_flash_device,
@@ -701,6 +931,9 @@ static struct platform_device *ap4evb_devices[] __initdata = {
&lcdc1_device,
&lcdc_device,
&hdmi_device,
+ &csi2_device,
+ &ceu_device,
+ &ap4evb_camera,
};
static int __init hdmi_init_pm_clock(void)
@@ -715,30 +948,35 @@ static int __init hdmi_init_pm_clock(void)
goto out;
}
- ret = clk_set_parent(&pllc2_clk, &dv_clki_div2_clk);
+ ret = clk_set_parent(&sh7372_pllc2_clk, &sh7372_dv_clki_div2_clk);
if (ret < 0) {
- pr_err("Cannot set PLLC2 parent: %d, %d users\n", ret, pllc2_clk.usecount);
+ pr_err("Cannot set PLLC2 parent: %d, %d users\n", ret, sh7372_pllc2_clk.usecount);
goto out;
}
- pr_debug("PLLC2 initial frequency %lu\n", clk_get_rate(&pllc2_clk));
+ pr_debug("PLLC2 initial frequency %lu\n", clk_get_rate(&sh7372_pllc2_clk));
- rate = clk_round_rate(&pllc2_clk, 594000000);
+ rate = clk_round_rate(&sh7372_pllc2_clk, 594000000);
if (rate < 0) {
pr_err("Cannot get suitable rate: %ld\n", rate);
ret = rate;
goto out;
}
- ret = clk_set_rate(&pllc2_clk, rate);
+ ret = clk_set_rate(&sh7372_pllc2_clk, rate);
if (ret < 0) {
pr_err("Cannot set rate %ld: %d\n", rate, ret);
goto out;
}
+ ret = clk_enable(&sh7372_pllc2_clk);
+ if (ret < 0) {
+ pr_err("Cannot enable pllc2 clock\n");
+ goto out;
+ }
pr_debug("PLLC2 set frequency %lu\n", rate);
- ret = clk_set_parent(hdmi_ick, &pllc2_clk);
+ ret = clk_set_parent(hdmi_ick, &sh7372_pllc2_clk);
if (ret < 0) {
pr_err("Cannot set HDMI parent: %d\n", ret);
goto out;
@@ -752,11 +990,32 @@ out:
device_initcall(hdmi_init_pm_clock);
+static int __init fsi_init_pm_clock(void)
+{
+ struct clk *fsia_ick;
+ int ret;
+
+ fsia_ick = clk_get(&fsi_device.dev, "icka");
+ if (IS_ERR(fsia_ick)) {
+ ret = PTR_ERR(fsia_ick);
+ pr_err("Cannot get FSI ICK: %d\n", ret);
+ return ret;
+ }
+
+ ret = clk_set_parent(fsia_ick, &sh7372_fsiack_clk);
+ if (ret < 0)
+ pr_err("Cannot set FSI-A parent: %d\n", ret);
+
+ clk_put(fsia_ick);
+
+ return ret;
+}
+device_initcall(fsi_init_pm_clock);
+
/*
* FIXME !!
*
* gpio_no_direction
- * gpio_pull_up
* are quick_hack.
*
* current gpio frame work doesn't have
@@ -768,49 +1027,37 @@ static void __init gpio_no_direction(u32 addr)
__raw_writeb(0x00, addr);
}
-static void __init gpio_pull_up(u32 addr)
-{
- u8 data = __raw_readb(addr);
-
- data &= 0x0F;
- data |= 0xC0;
- __raw_writeb(data, addr);
-}
-
/* TouchScreen */
+#ifdef CONFIG_AP4EVB_QHD
+# define GPIO_TSC_IRQ GPIO_FN_IRQ28_123
+# define GPIO_TSC_PORT GPIO_PORT123
+#else /* WVGA */
+# define GPIO_TSC_IRQ GPIO_FN_IRQ7_40
+# define GPIO_TSC_PORT GPIO_PORT40
+#endif
+
#define IRQ28 evt2irq(0x3380) /* IRQ28A */
#define IRQ7 evt2irq(0x02e0) /* IRQ7A */
static int ts_get_pendown_state(void)
{
- int val1, val2;
+ int val;
- gpio_free(GPIO_FN_IRQ28_123);
- gpio_free(GPIO_FN_IRQ7_40);
+ gpio_free(GPIO_TSC_IRQ);
- gpio_request(GPIO_PORT123, NULL);
- gpio_request(GPIO_PORT40, NULL);
+ gpio_request(GPIO_TSC_PORT, NULL);
- gpio_direction_input(GPIO_PORT123);
- gpio_direction_input(GPIO_PORT40);
+ gpio_direction_input(GPIO_TSC_PORT);
- val1 = gpio_get_value(GPIO_PORT123);
- val2 = gpio_get_value(GPIO_PORT40);
+ val = gpio_get_value(GPIO_TSC_PORT);
- gpio_request(GPIO_FN_IRQ28_123, NULL); /* for QHD */
- gpio_request(GPIO_FN_IRQ7_40, NULL); /* for WVGA */
+ gpio_request(GPIO_TSC_IRQ, NULL);
- return val1 ^ val2;
+ return !val;
}
-#define PORT40CR 0xE6051028
-#define PORT123CR 0xE605007B
static int ts_init(void)
{
- gpio_request(GPIO_FN_IRQ28_123, NULL); /* for QHD */
- gpio_request(GPIO_FN_IRQ7_40, NULL); /* for WVGA */
-
- gpio_pull_up(PORT40CR);
- gpio_pull_up(PORT123CR);
+ gpio_request(GPIO_TSC_IRQ, NULL);
return 0;
}
@@ -865,6 +1112,7 @@ static void __init ap4evb_map_io(void)
#define GPIO_PORT9CR 0xE6051009
#define GPIO_PORT10CR 0xE605100A
+#define USCCR1 0xE6058144
static void __init ap4evb_init(void)
{
u32 srcr4;
@@ -935,7 +1183,7 @@ static void __init ap4evb_init(void)
/* setup USB phy */
__raw_writew(0x8a0a, 0xE6058130); /* USBCR2 */
- /* enable FSI2 */
+ /* enable FSI2 port A (ak4643) */
gpio_request(GPIO_FN_FSIAIBT, NULL);
gpio_request(GPIO_FN_FSIAILR, NULL);
gpio_request(GPIO_FN_FSIAISLD, NULL);
@@ -948,6 +1196,14 @@ static void __init ap4evb_init(void)
gpio_no_direction(GPIO_PORT9CR); /* FSIAOBT needs no direction */
gpio_no_direction(GPIO_PORT10CR); /* FSIAOLR needs no direction */
+ /* card detect pin for MMC slot (CN7) */
+ gpio_request(GPIO_PORT41, NULL);
+ gpio_direction_input(GPIO_PORT41);
+
+ /* setup FSI2 port B (HDMI) */
+ gpio_request(GPIO_FN_FSIBCK, NULL);
+ __raw_writew(__raw_readw(USCCR1) & ~(1 << 6), USCCR1); /* use SPDIF */
+
/* set SPU2 clock to 119.6 MHz */
clk = clk_get(NULL, "spu_clk");
if (!IS_ERR(clk)) {
@@ -955,14 +1211,6 @@ static void __init ap4evb_init(void)
clk_put(clk);
}
- /* change parent of FSI A */
- clk = clk_get(NULL, "fsia_clk");
- if (!IS_ERR(clk)) {
- clk_register(&fsiackcr_clk);
- clk_set_parent(clk, &fsiackcr_clk);
- clk_put(clk);
- }
-
/*
* set irq priority, to avoid sound chopping
* when NFS rootfs is used
@@ -977,8 +1225,10 @@ static void __init ap4evb_init(void)
ARRAY_SIZE(i2c1_devices));
#ifdef CONFIG_AP4EVB_QHD
+
/*
- * QHD
+ * For QHD Panel (MIPI-DSI, CONFIG_AP4EVB_QHD=y) and
+ * IRQ28 for Touch Panel, set dip switches S3, S43 as OFF, ON.
*/
/* enable KEYSC */
@@ -1004,17 +1254,6 @@ static void __init ap4evb_init(void)
lcdc_info.ch[0].interface_type = RGB24;
lcdc_info.ch[0].clock_divider = 1;
lcdc_info.ch[0].flags = LCDC_FLAGS_DWPOL;
- lcdc_info.ch[0].lcd_cfg.name = "R63302(QHD)";
- lcdc_info.ch[0].lcd_cfg.xres = 544;
- lcdc_info.ch[0].lcd_cfg.yres = 961;
- lcdc_info.ch[0].lcd_cfg.left_margin = 72;
- lcdc_info.ch[0].lcd_cfg.right_margin = 600;
- lcdc_info.ch[0].lcd_cfg.hsync_len = 16;
- lcdc_info.ch[0].lcd_cfg.upper_margin = 8;
- lcdc_info.ch[0].lcd_cfg.lower_margin = 8;
- lcdc_info.ch[0].lcd_cfg.vsync_len = 2;
- lcdc_info.ch[0].lcd_cfg.sync = FB_SYNC_VERT_HIGH_ACT |
- FB_SYNC_HOR_HIGH_ACT;
lcdc_info.ch[0].lcd_size_cfg.width = 44;
lcdc_info.ch[0].lcd_size_cfg.height = 79;
@@ -1022,8 +1261,10 @@ static void __init ap4evb_init(void)
#else
/*
- * WVGA
+ * For WVGA Panel (18-bit RGB, CONFIG_AP4EVB_WVGA=y) and
+ * IRQ7 for Touch Panel, set dip switches S3, S43 to ON, OFF.
*/
+
gpio_request(GPIO_FN_LCDD17, NULL);
gpio_request(GPIO_FN_LCDD16, NULL);
gpio_request(GPIO_FN_LCDD15, NULL);
@@ -1055,16 +1296,6 @@ static void __init ap4evb_init(void)
lcdc_info.ch[0].interface_type = RGB18;
lcdc_info.ch[0].clock_divider = 2;
lcdc_info.ch[0].flags = 0;
- lcdc_info.ch[0].lcd_cfg.name = "WVGA Panel";
- lcdc_info.ch[0].lcd_cfg.xres = 800;
- lcdc_info.ch[0].lcd_cfg.yres = 480;
- lcdc_info.ch[0].lcd_cfg.left_margin = 220;
- lcdc_info.ch[0].lcd_cfg.right_margin = 110;
- lcdc_info.ch[0].lcd_cfg.hsync_len = 70;
- lcdc_info.ch[0].lcd_cfg.upper_margin = 20;
- lcdc_info.ch[0].lcd_cfg.lower_margin = 5;
- lcdc_info.ch[0].lcd_cfg.vsync_len = 5;
- lcdc_info.ch[0].lcd_cfg.sync = 0;
lcdc_info.ch[0].lcd_size_cfg.width = 152;
lcdc_info.ch[0].lcd_size_cfg.height = 91;
@@ -1075,6 +1306,23 @@ static void __init ap4evb_init(void)
i2c_register_board_info(0, &tsc_device, 1);
#endif /* CONFIG_AP4EVB_QHD */
+ /* CEU */
+
+ /*
+ * TODO: reserve memory for V4L2 DMA buffers, when a suitable API
+ * becomes available
+ */
+
+ /* MIPI-CSI stuff */
+ gpio_request(GPIO_FN_VIO_CKO, NULL);
+
+ clk = clk_get(NULL, "vck1_clk");
+ if (!IS_ERR(clk)) {
+ clk_set_rate(clk, clk_round_rate(clk, 13000000));
+ clk_enable(clk);
+ clk_put(clk);
+ }
+
sh7372_add_standard_devices();
/* HDMI */
@@ -1097,7 +1345,7 @@ static void __init ap4evb_timer_init(void)
shmobile_timer.init();
/* External clock source */
- clk_set_rate(&dv_clki_clk, 27000000);
+ clk_set_rate(&sh7372_dv_clki_clk, 27000000);
}
static struct sys_timer ap4evb_timer = {
diff --git a/arch/arm/mach-shmobile/clock-sh7367.c b/arch/arm/mach-shmobile/clock-sh7367.c
index b6454c9f2abb..9f78729098f2 100644
--- a/arch/arm/mach-shmobile/clock-sh7367.c
+++ b/arch/arm/mach-shmobile/clock-sh7367.c
@@ -321,7 +321,7 @@ static struct clk_lookup lookups[] = {
CLKDEV_DEV_ID("sh-sci.3", &mstp_clks[SYMSTP001]), /* SCIFA3 */
CLKDEV_DEV_ID("sh-sci.4", &mstp_clks[SYMSTP000]), /* SCIFA4 */
CLKDEV_DEV_ID("sh_siu", &mstp_clks[SYMSTP231]), /* SIU */
- CLKDEV_CON_ID("cmt1", &mstp_clks[SYMSTP229]), /* CMT10 */
+ CLKDEV_DEV_ID("sh_cmt.10", &mstp_clks[SYMSTP229]), /* CMT10 */
CLKDEV_DEV_ID("sh_irda", &mstp_clks[SYMSTP225]), /* IRDA */
CLKDEV_DEV_ID("i2c-sh_mobile.1", &mstp_clks[SYMSTP223]), /* IIC1 */
CLKDEV_DEV_ID("r8a66597_hcd.0", &mstp_clks[SYMSTP222]), /* USBHS */
diff --git a/arch/arm/mach-shmobile/clock-sh7372.c b/arch/arm/mach-shmobile/clock-sh7372.c
index 759468992ad2..3aa026069435 100644
--- a/arch/arm/mach-shmobile/clock-sh7372.c
+++ b/arch/arm/mach-shmobile/clock-sh7372.c
@@ -50,8 +50,11 @@
#define SMSTPCR3 0xe615013c
#define SMSTPCR4 0xe6150140
+#define FSIDIVA 0xFE1F8000
+#define FSIDIVB 0xFE1F8008
+
/* Platforms must set frequency on their DV_CLKI pin */
-struct clk dv_clki_clk = {
+struct clk sh7372_dv_clki_clk = {
};
/* Fixed 32 KHz root clock from EXTALR pin */
@@ -86,9 +89,9 @@ static struct clk_ops div2_clk_ops = {
};
/* Divide dv_clki by two */
-struct clk dv_clki_div2_clk = {
+struct clk sh7372_dv_clki_div2_clk = {
.ops = &div2_clk_ops,
- .parent = &dv_clki_clk,
+ .parent = &sh7372_dv_clki_clk,
};
/* Divide extal1 by two */
@@ -150,7 +153,7 @@ static struct clk pllc1_div2_clk = {
static struct clk *pllc2_parent[] = {
[0] = &extal1_div2_clk,
[1] = &extal2_div2_clk,
- [2] = &dv_clki_div2_clk,
+ [2] = &sh7372_dv_clki_div2_clk,
};
/* Only multipliers 20 * 2 to 46 * 2 are valid, last entry for CPUFREQ_TABLE_END */
@@ -217,8 +220,7 @@ static void pllc2_disable(struct clk *clk)
__raw_writel(__raw_readl(PLLC2CR) & ~0x80000000, PLLC2CR);
}
-static int pllc2_set_rate(struct clk *clk,
- unsigned long rate, int algo_id)
+static int pllc2_set_rate(struct clk *clk, unsigned long rate)
{
unsigned long value;
int idx;
@@ -227,21 +229,13 @@ static int pllc2_set_rate(struct clk *clk,
if (idx < 0)
return idx;
- if (rate == clk->parent->rate) {
- pllc2_disable(clk);
- return 0;
- }
+ if (rate == clk->parent->rate)
+ return -EINVAL;
value = __raw_readl(PLLC2CR) & ~(0x3f << 24);
- if (value & 0x80000000)
- pllc2_disable(clk);
-
__raw_writel((value & ~0x80000000) | ((idx + 19) << 24), PLLC2CR);
- if (value & 0x80000000)
- return pllc2_enable(clk);
-
return 0;
}
@@ -284,27 +278,37 @@ static struct clk_ops pllc2_clk_ops = {
.set_parent = pllc2_set_parent,
};
-struct clk pllc2_clk = {
+struct clk sh7372_pllc2_clk = {
.ops = &pllc2_clk_ops,
.parent = &extal1_div2_clk,
.freq_table = pllc2_freq_table,
+ .nr_freqs = ARRAY_SIZE(pllc2_freq_table) - 1,
.parent_table = pllc2_parent,
.parent_num = ARRAY_SIZE(pllc2_parent),
};
+/* External input clock (pin name: FSIACK/FSIBCK ) */
+struct clk sh7372_fsiack_clk = {
+};
+
+struct clk sh7372_fsibck_clk = {
+};
+
static struct clk *main_clks[] = {
- &dv_clki_clk,
+ &sh7372_dv_clki_clk,
&r_clk,
&sh7372_extal1_clk,
&sh7372_extal2_clk,
- &dv_clki_div2_clk,
+ &sh7372_dv_clki_div2_clk,
&extal1_div2_clk,
&extal2_div2_clk,
&extal2_div4_clk,
&pllc0_clk,
&pllc1_clk,
&pllc1_div2_clk,
- &pllc2_clk,
+ &sh7372_pllc2_clk,
+ &sh7372_fsiack_clk,
+ &sh7372_fsibck_clk,
};
static void div4_kick(struct clk *clk)
@@ -357,7 +361,7 @@ static struct clk div4_clks[DIV4_NR] = {
};
enum { DIV6_VCK1, DIV6_VCK2, DIV6_VCK3, DIV6_FMSI, DIV6_FMSO,
- DIV6_FSIA, DIV6_FSIB, DIV6_SUB, DIV6_SPU,
+ DIV6_SUB, DIV6_SPU,
DIV6_VOU, DIV6_DSIT, DIV6_DSI0P, DIV6_DSI1P,
DIV6_NR };
@@ -367,8 +371,6 @@ static struct clk div6_clks[DIV6_NR] = {
[DIV6_VCK3] = SH_CLK_DIV6(&pllc1_div2_clk, VCLKCR3, 0),
[DIV6_FMSI] = SH_CLK_DIV6(&pllc1_div2_clk, FMSICKCR, 0),
[DIV6_FMSO] = SH_CLK_DIV6(&pllc1_div2_clk, FMSOCKCR, 0),
- [DIV6_FSIA] = SH_CLK_DIV6(&pllc1_div2_clk, FSIACKCR, 0),
- [DIV6_FSIB] = SH_CLK_DIV6(&pllc1_div2_clk, FSIBCKCR, 0),
[DIV6_SUB] = SH_CLK_DIV6(&sh7372_extal2_clk, SUBCKCR, 0),
[DIV6_SPU] = SH_CLK_DIV6(&pllc1_div2_clk, SPUCKCR, 0),
[DIV6_VOU] = SH_CLK_DIV6(&pllc1_div2_clk, VOUCKCR, 0),
@@ -377,24 +379,129 @@ static struct clk div6_clks[DIV6_NR] = {
[DIV6_DSI1P] = SH_CLK_DIV6(&pllc1_div2_clk, DSI1PCKCR, 0),
};
-enum { DIV6_HDMI, DIV6_REPARENT_NR };
+enum { DIV6_HDMI, DIV6_FSIA, DIV6_FSIB, DIV6_REPARENT_NR };
/* Indices are important - they are the actual src selecting values */
static struct clk *hdmi_parent[] = {
[0] = &pllc1_div2_clk,
- [1] = &pllc2_clk,
- [2] = &dv_clki_clk,
+ [1] = &sh7372_pllc2_clk,
+ [2] = &sh7372_dv_clki_clk,
[3] = NULL, /* pllc2_div4 not implemented yet */
};
+static struct clk *fsiackcr_parent[] = {
+ [0] = &pllc1_div2_clk,
+ [1] = &sh7372_pllc2_clk,
+ [2] = &sh7372_fsiack_clk, /* external input for FSI A */
+ [3] = NULL, /* setting prohibited */
+};
+
+static struct clk *fsibckcr_parent[] = {
+ [0] = &pllc1_div2_clk,
+ [1] = &sh7372_pllc2_clk,
+ [2] = &sh7372_fsibck_clk, /* external input for FSI B */
+ [3] = NULL, /* setting prohibited */
+};
+
static struct clk div6_reparent_clks[DIV6_REPARENT_NR] = {
[DIV6_HDMI] = SH_CLK_DIV6_EXT(&pllc1_div2_clk, HDMICKCR, 0,
hdmi_parent, ARRAY_SIZE(hdmi_parent), 6, 2),
+ [DIV6_FSIA] = SH_CLK_DIV6_EXT(&pllc1_div2_clk, FSIACKCR, 0,
+ fsiackcr_parent, ARRAY_SIZE(fsiackcr_parent), 6, 2),
+ [DIV6_FSIB] = SH_CLK_DIV6_EXT(&pllc1_div2_clk, FSIBCKCR, 0,
+ fsibckcr_parent, ARRAY_SIZE(fsibckcr_parent), 6, 2),
+};
+
+/* FSI DIV */
+static unsigned long fsidiv_recalc(struct clk *clk)
+{
+ unsigned long value;
+
+ value = __raw_readl(clk->mapping->base);
+
+ if ((value & 0x3) != 0x3)
+ return 0;
+
+ value >>= 16;
+ if (value < 2)
+ return 0;
+
+ return clk->parent->rate / value;
+}
+
+static long fsidiv_round_rate(struct clk *clk, unsigned long rate)
+{
+ return clk_rate_div_range_round(clk, 2, 0xffff, rate);
+}
+
+static void fsidiv_disable(struct clk *clk)
+{
+ __raw_writel(0, clk->mapping->base);
+}
+
+static int fsidiv_enable(struct clk *clk)
+{
+ unsigned long value;
+
+ value = __raw_readl(clk->mapping->base) >> 16;
+ if (value < 2)
+ return -EIO;
+
+ __raw_writel((value << 16) | 0x3, clk->mapping->base);
+
+ return 0;
+}
+
+static int fsidiv_set_rate(struct clk *clk, unsigned long rate)
+{
+ int idx;
+
+ idx = (clk->parent->rate / rate) & 0xffff;
+ if (idx < 2)
+ return -EINVAL;
+
+ __raw_writel(idx << 16, clk->mapping->base);
+ return 0;
+}
+
+static struct clk_ops fsidiv_clk_ops = {
+ .recalc = fsidiv_recalc,
+ .round_rate = fsidiv_round_rate,
+ .set_rate = fsidiv_set_rate,
+ .enable = fsidiv_enable,
+ .disable = fsidiv_disable,
+};
+
+static struct clk_mapping sh7372_fsidiva_clk_mapping = {
+ .phys = FSIDIVA,
+ .len = 8,
+};
+
+struct clk sh7372_fsidiva_clk = {
+ .ops = &fsidiv_clk_ops,
+ .parent = &div6_reparent_clks[DIV6_FSIA], /* late install */
+ .mapping = &sh7372_fsidiva_clk_mapping,
+};
+
+static struct clk_mapping sh7372_fsidivb_clk_mapping = {
+ .phys = FSIDIVB,
+ .len = 8,
+};
+
+struct clk sh7372_fsidivb_clk = {
+ .ops = &fsidiv_clk_ops,
+ .parent = &div6_reparent_clks[DIV6_FSIB], /* late install */
+ .mapping = &sh7372_fsidivb_clk_mapping,
+};
+
+static struct clk *late_main_clks[] = {
+ &sh7372_fsidiva_clk,
+ &sh7372_fsidivb_clk,
};
enum { MSTP001,
MSTP131, MSTP130,
- MSTP129, MSTP128, MSTP127, MSTP126,
+ MSTP129, MSTP128, MSTP127, MSTP126, MSTP125,
MSTP118, MSTP117, MSTP116,
MSTP106, MSTP101, MSTP100,
MSTP223,
@@ -414,6 +521,7 @@ static struct clk mstp_clks[MSTP_NR] = {
[MSTP128] = MSTP(&div4_clks[DIV4_B], SMSTPCR1, 28, 0), /* VEU0 */
[MSTP127] = MSTP(&div4_clks[DIV4_B], SMSTPCR1, 27, 0), /* CEU */
[MSTP126] = MSTP(&div4_clks[DIV4_B], SMSTPCR1, 26, 0), /* CSI2 */
+ [MSTP125] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR1, 25, 0), /* TMU0 */
[MSTP118] = MSTP(&div4_clks[DIV4_B], SMSTPCR1, 18, 0), /* DSITX */
[MSTP117] = MSTP(&div4_clks[DIV4_B], SMSTPCR1, 17, 0), /* LCDC1 */
[MSTP116] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR1, 16, 0), /* IIC0 */
@@ -429,7 +537,7 @@ static struct clk mstp_clks[MSTP_NR] = {
[MSTP201] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR2, 1, 0), /* SCIFA3 */
[MSTP200] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR2, 0, 0), /* SCIFA4 */
[MSTP329] = MSTP(&r_clk, SMSTPCR3, 29, 0), /* CMT10 */
- [MSTP328] = MSTP(&div6_clks[DIV6_SPU], SMSTPCR3, 28, 0), /* FSIA */
+ [MSTP328] = MSTP(&div6_clks[DIV6_SPU], SMSTPCR3, 28, 0), /* FSI2 */
[MSTP323] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR3, 23, 0), /* IIC1 */
[MSTP322] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR3, 22, 0), /* USB0 */
[MSTP314] = MSTP(&div4_clks[DIV4_HP], SMSTPCR3, 14, 0), /* SDHI0 */
@@ -445,10 +553,11 @@ static struct clk mstp_clks[MSTP_NR] = {
#define CLKDEV_CON_ID(_id, _clk) { .con_id = _id, .clk = _clk }
#define CLKDEV_DEV_ID(_id, _clk) { .dev_id = _id, .clk = _clk }
+#define CLKDEV_ICK_ID(_cid, _did, _clk) { .con_id = _cid, .dev_id = _did, .clk = _clk }
static struct clk_lookup lookups[] = {
/* main clocks */
- CLKDEV_CON_ID("dv_clki_div2_clk", &dv_clki_div2_clk),
+ CLKDEV_CON_ID("dv_clki_div2_clk", &sh7372_dv_clki_div2_clk),
CLKDEV_CON_ID("r_clk", &r_clk),
CLKDEV_CON_ID("extal1", &sh7372_extal1_clk),
CLKDEV_CON_ID("extal2", &sh7372_extal2_clk),
@@ -458,7 +567,7 @@ static struct clk_lookup lookups[] = {
CLKDEV_CON_ID("pllc0_clk", &pllc0_clk),
CLKDEV_CON_ID("pllc1_clk", &pllc1_clk),
CLKDEV_CON_ID("pllc1_div2_clk", &pllc1_div2_clk),
- CLKDEV_CON_ID("pllc2_clk", &pllc2_clk),
+ CLKDEV_CON_ID("pllc2_clk", &sh7372_pllc2_clk),
/* DIV4 clocks */
CLKDEV_CON_ID("i_clk", &div4_clks[DIV4_I]),
@@ -483,8 +592,6 @@ static struct clk_lookup lookups[] = {
CLKDEV_CON_ID("vck3_clk", &div6_clks[DIV6_VCK3]),
CLKDEV_CON_ID("fmsi_clk", &div6_clks[DIV6_FMSI]),
CLKDEV_CON_ID("fmso_clk", &div6_clks[DIV6_FMSO]),
- CLKDEV_CON_ID("fsia_clk", &div6_clks[DIV6_FSIA]),
- CLKDEV_CON_ID("fsib_clk", &div6_clks[DIV6_FSIB]),
CLKDEV_CON_ID("sub_clk", &div6_clks[DIV6_SUB]),
CLKDEV_CON_ID("spu_clk", &div6_clks[DIV6_SPU]),
CLKDEV_CON_ID("vou_clk", &div6_clks[DIV6_VOU]),
@@ -501,6 +608,8 @@ static struct clk_lookup lookups[] = {
CLKDEV_DEV_ID("uio_pdrv_genirq.1", &mstp_clks[MSTP128]), /* VEU0 */
CLKDEV_DEV_ID("sh_mobile_ceu.0", &mstp_clks[MSTP127]), /* CEU */
CLKDEV_DEV_ID("sh-mobile-csi2.0", &mstp_clks[MSTP126]), /* CSI2 */
+ CLKDEV_DEV_ID("sh_tmu.0", &mstp_clks[MSTP125]), /* TMU00 */
+ CLKDEV_DEV_ID("sh_tmu.1", &mstp_clks[MSTP125]), /* TMU01 */
CLKDEV_DEV_ID("sh-mipi-dsi.0", &mstp_clks[MSTP118]), /* DSITX */
CLKDEV_DEV_ID("sh_mobile_lcdc_fb.1", &mstp_clks[MSTP117]), /* LCDC1 */
CLKDEV_DEV_ID("i2c-sh_mobile.0", &mstp_clks[MSTP116]), /* IIC0 */
@@ -516,11 +625,11 @@ static struct clk_lookup lookups[] = {
CLKDEV_DEV_ID("sh-sci.2", &mstp_clks[MSTP202]), /* SCIFA2 */
CLKDEV_DEV_ID("sh-sci.3", &mstp_clks[MSTP201]), /* SCIFA3 */
CLKDEV_DEV_ID("sh-sci.4", &mstp_clks[MSTP200]), /* SCIFA4 */
- CLKDEV_CON_ID("cmt1", &mstp_clks[MSTP329]), /* CMT10 */
+ CLKDEV_DEV_ID("sh_cmt.10", &mstp_clks[MSTP329]), /* CMT10 */
CLKDEV_DEV_ID("sh_fsi2", &mstp_clks[MSTP328]), /* FSI2 */
CLKDEV_DEV_ID("i2c-sh_mobile.1", &mstp_clks[MSTP323]), /* IIC1 */
- CLKDEV_DEV_ID("r8a66597_hcd.0", &mstp_clks[MSTP323]), /* USB0 */
- CLKDEV_DEV_ID("r8a66597_udc.0", &mstp_clks[MSTP323]), /* USB0 */
+ CLKDEV_DEV_ID("r8a66597_hcd.0", &mstp_clks[MSTP322]), /* USB0 */
+ CLKDEV_DEV_ID("r8a66597_udc.0", &mstp_clks[MSTP322]), /* USB0 */
CLKDEV_DEV_ID("sh_mobile_sdhi.0", &mstp_clks[MSTP314]), /* SDHI0 */
CLKDEV_DEV_ID("sh_mobile_sdhi.1", &mstp_clks[MSTP313]), /* SDHI1 */
CLKDEV_DEV_ID("sh_mmcif.0", &mstp_clks[MSTP312]), /* MMC */
@@ -531,7 +640,10 @@ static struct clk_lookup lookups[] = {
CLKDEV_DEV_ID("r8a66597_hcd.1", &mstp_clks[MSTP406]), /* USB1 */
CLKDEV_DEV_ID("r8a66597_udc.1", &mstp_clks[MSTP406]), /* USB1 */
CLKDEV_DEV_ID("sh_keysc.0", &mstp_clks[MSTP403]), /* KEYSC */
- {.con_id = "ick", .dev_id = "sh-mobile-hdmi", .clk = &div6_reparent_clks[DIV6_HDMI]},
+
+ CLKDEV_ICK_ID("ick", "sh-mobile-hdmi", &div6_reparent_clks[DIV6_HDMI]),
+ CLKDEV_ICK_ID("icka", "sh_fsi2", &div6_reparent_clks[DIV6_FSIA]),
+ CLKDEV_ICK_ID("ickb", "sh_fsi2", &div6_reparent_clks[DIV6_FSIB]),
};
void __init sh7372_clock_init(void)
@@ -548,11 +660,14 @@ void __init sh7372_clock_init(void)
ret = sh_clk_div6_register(div6_clks, DIV6_NR);
if (!ret)
- ret = sh_clk_div6_reparent_register(div6_reparent_clks, DIV6_NR);
+ ret = sh_clk_div6_reparent_register(div6_reparent_clks, DIV6_REPARENT_NR);
if (!ret)
ret = sh_clk_mstp32_register(mstp_clks, MSTP_NR);
+ for (k = 0; !ret && (k < ARRAY_SIZE(late_main_clks)); k++)
+ ret = clk_register(late_main_clks[k]);
+
clkdev_add_table(lookups, ARRAY_SIZE(lookups));
if (!ret)
diff --git a/arch/arm/mach-shmobile/clock-sh7377.c b/arch/arm/mach-shmobile/clock-sh7377.c
index e007c28cf0a8..f91395aeb9ab 100644
--- a/arch/arm/mach-shmobile/clock-sh7377.c
+++ b/arch/arm/mach-shmobile/clock-sh7377.c
@@ -333,7 +333,7 @@ static struct clk_lookup lookups[] = {
CLKDEV_DEV_ID("sh-sci.3", &mstp_clks[MSTP201]), /* SCIFA3 */
CLKDEV_DEV_ID("sh-sci.4", &mstp_clks[MSTP200]), /* SCIFA4 */
CLKDEV_DEV_ID("sh-sci.6", &mstp_clks[MSTP331]), /* SCIFA6 */
- CLKDEV_CON_ID("cmt1", &mstp_clks[MSTP329]), /* CMT10 */
+ CLKDEV_DEV_ID("sh_cmt.10", &mstp_clks[MSTP329]), /* CMT10 */
CLKDEV_DEV_ID("sh_irda", &mstp_clks[MSTP325]), /* IRDA */
CLKDEV_DEV_ID("i2c-sh_mobile.1", &mstp_clks[MSTP323]), /* IIC1 */
CLKDEV_DEV_ID("r8a66597_hcd.0", &mstp_clks[MSTP322]), /* USBHS */
diff --git a/arch/arm/mach-shmobile/include/mach/gpio.h b/arch/arm/mach-shmobile/include/mach/gpio.h
index 5bc6bd444d72..2b1bb9e43dda 100644
--- a/arch/arm/mach-shmobile/include/mach/gpio.h
+++ b/arch/arm/mach-shmobile/include/mach/gpio.h
@@ -35,12 +35,12 @@ static inline int gpio_cansleep(unsigned gpio)
static inline int gpio_to_irq(unsigned gpio)
{
- return -ENOSYS;
+ return __gpio_to_irq(gpio);
}
static inline int irq_to_gpio(unsigned int irq)
{
- return -EINVAL;
+ return -ENOSYS;
}
#endif /* CONFIG_GPIOLIB */
diff --git a/arch/arm/mach-shmobile/include/mach/sh7372.h b/arch/arm/mach-shmobile/include/mach/sh7372.h
index 33e9700ded7e..e4f9004e7103 100644
--- a/arch/arm/mach-shmobile/include/mach/sh7372.h
+++ b/arch/arm/mach-shmobile/include/mach/sh7372.h
@@ -457,8 +457,14 @@ enum {
SHDMA_SLAVE_SDHI2_TX,
};
-extern struct clk dv_clki_clk;
-extern struct clk dv_clki_div2_clk;
-extern struct clk pllc2_clk;
+extern struct clk sh7372_extal1_clk;
+extern struct clk sh7372_extal2_clk;
+extern struct clk sh7372_dv_clki_clk;
+extern struct clk sh7372_dv_clki_div2_clk;
+extern struct clk sh7372_pllc2_clk;
+extern struct clk sh7372_fsiack_clk;
+extern struct clk sh7372_fsibck_clk;
+extern struct clk sh7372_fsidiva_clk;
+extern struct clk sh7372_fsidivb_clk;
#endif /* __ASM_SH7372_H__ */
diff --git a/arch/arm/mach-shmobile/intc-sh7372.c b/arch/arm/mach-shmobile/intc-sh7372.c
index e3551b56cd03..30b2f400666a 100644
--- a/arch/arm/mach-shmobile/intc-sh7372.c
+++ b/arch/arm/mach-shmobile/intc-sh7372.c
@@ -98,7 +98,7 @@ static struct intc_vect intca_vectors[] __initdata = {
INTC_VECT(IRQ14A, 0x03c0), INTC_VECT(IRQ15A, 0x03e0),
INTC_VECT(IRQ16A, 0x3200), INTC_VECT(IRQ17A, 0x3220),
INTC_VECT(IRQ18A, 0x3240), INTC_VECT(IRQ19A, 0x3260),
- INTC_VECT(IRQ20A, 0x3280), INTC_VECT(IRQ31A, 0x32a0),
+ INTC_VECT(IRQ20A, 0x3280), INTC_VECT(IRQ21A, 0x32a0),
INTC_VECT(IRQ22A, 0x32c0), INTC_VECT(IRQ23A, 0x32e0),
INTC_VECT(IRQ24A, 0x3300), INTC_VECT(IRQ25A, 0x3320),
INTC_VECT(IRQ26A, 0x3340), INTC_VECT(IRQ27A, 0x3360),
@@ -369,9 +369,13 @@ enum {
INTCS,
/* interrupt sources INTCS */
+
+ /* IRQ0S - IRQ31S */
VEU_VEU0, VEU_VEU1, VEU_VEU2, VEU_VEU3,
RTDMAC_1_DEI0, RTDMAC_1_DEI1, RTDMAC_1_DEI2, RTDMAC_1_DEI3,
CEU, BEU_BEU0, BEU_BEU1, BEU_BEU2,
+ /* MFI */
+ /* BBIF2 */
VPU,
TSIF1,
_3DG_SGX530,
@@ -379,13 +383,17 @@ enum {
IIC2_ALI2, IIC2_TACKI2, IIC2_WAITI2, IIC2_DTEI2,
IPMMU_IPMMUR, IPMMU_IPMMUR2,
RTDMAC_2_DEI4, RTDMAC_2_DEI5, RTDMAC_2_DADERR,
+ /* KEYSC */
+ /* TTI20 */
MSIOF,
IIC0_ALI0, IIC0_TACKI0, IIC0_WAITI0, IIC0_DTEI0,
TMU_TUNI0, TMU_TUNI1, TMU_TUNI2,
CMT0,
TSIF0,
+ /* CMT2 */
LMB,
CTI,
+ /* RWDT0 */
ICB,
JPU_JPEG,
LCDC,
@@ -397,11 +405,17 @@ enum {
CSIRX,
DSITX_DSITX0,
DSITX_DSITX1,
+ /* SPU2 */
+ /* FSI */
+ /* FMSI */
+ /* HDMI */
TMU1_TUNI0, TMU1_TUNI1, TMU1_TUNI2,
CMT4,
DSITX1_DSITX1_0,
DSITX1_DSITX1_1,
+ /* MFIS2 */
CPORTS2R,
+ /* CEC */
JPU6E,
/* interrupt groups INTCS */
@@ -410,12 +424,15 @@ enum {
};
static struct intc_vect intcs_vectors[] = {
+ /* IRQ0S - IRQ31S */
INTCS_VECT(VEU_VEU0, 0x700), INTCS_VECT(VEU_VEU1, 0x720),
INTCS_VECT(VEU_VEU2, 0x740), INTCS_VECT(VEU_VEU3, 0x760),
INTCS_VECT(RTDMAC_1_DEI0, 0x800), INTCS_VECT(RTDMAC_1_DEI1, 0x820),
INTCS_VECT(RTDMAC_1_DEI2, 0x840), INTCS_VECT(RTDMAC_1_DEI3, 0x860),
INTCS_VECT(CEU, 0x880), INTCS_VECT(BEU_BEU0, 0x8a0),
INTCS_VECT(BEU_BEU1, 0x8c0), INTCS_VECT(BEU_BEU2, 0x8e0),
+ /* MFI */
+ /* BBIF2 */
INTCS_VECT(VPU, 0x980),
INTCS_VECT(TSIF1, 0x9a0),
INTCS_VECT(_3DG_SGX530, 0x9e0),
@@ -425,14 +442,19 @@ static struct intc_vect intcs_vectors[] = {
INTCS_VECT(IPMMU_IPMMUR, 0xb00), INTCS_VECT(IPMMU_IPMMUR2, 0xb20),
INTCS_VECT(RTDMAC_2_DEI4, 0xb80), INTCS_VECT(RTDMAC_2_DEI5, 0xba0),
INTCS_VECT(RTDMAC_2_DADERR, 0xbc0),
+ /* KEYSC */
+ /* TTI20 */
+ INTCS_VECT(MSIOF, 0x0d20),
INTCS_VECT(IIC0_ALI0, 0xe00), INTCS_VECT(IIC0_TACKI0, 0xe20),
INTCS_VECT(IIC0_WAITI0, 0xe40), INTCS_VECT(IIC0_DTEI0, 0xe60),
INTCS_VECT(TMU_TUNI0, 0xe80), INTCS_VECT(TMU_TUNI1, 0xea0),
INTCS_VECT(TMU_TUNI2, 0xec0),
INTCS_VECT(CMT0, 0xf00),
INTCS_VECT(TSIF0, 0xf20),
+ /* CMT2 */
INTCS_VECT(LMB, 0xf60),
INTCS_VECT(CTI, 0x400),
+ /* RWDT0 */
INTCS_VECT(ICB, 0x480),
INTCS_VECT(JPU_JPEG, 0x560),
INTCS_VECT(LCDC, 0x580),
@@ -446,12 +468,18 @@ static struct intc_vect intcs_vectors[] = {
INTCS_VECT(CSIRX, 0x17a0),
INTCS_VECT(DSITX_DSITX0, 0x17c0),
INTCS_VECT(DSITX_DSITX1, 0x17e0),
+ /* SPU2 */
+ /* FSI */
+ /* FMSI */
+ /* HDMI */
INTCS_VECT(TMU1_TUNI0, 0x1900), INTCS_VECT(TMU1_TUNI1, 0x1920),
INTCS_VECT(TMU1_TUNI2, 0x1940),
INTCS_VECT(CMT4, 0x1980),
INTCS_VECT(DSITX1_DSITX1_0, 0x19a0),
INTCS_VECT(DSITX1_DSITX1_1, 0x19c0),
+ /* MFIS2 */
INTCS_VECT(CPORTS2R, 0x1a20),
+ /* CEC */
INTCS_VECT(JPU6E, 0x1a80),
INTC_VECT(INTCS, 0xf80),
diff --git a/arch/arm/mach-shmobile/pfc-sh7372.c b/arch/arm/mach-shmobile/pfc-sh7372.c
index ec420353f8e3..9c265dae138a 100644
--- a/arch/arm/mach-shmobile/pfc-sh7372.c
+++ b/arch/arm/mach-shmobile/pfc-sh7372.c
@@ -166,12 +166,12 @@ enum {
MSIOF2_TSYNC_MARK, MSIOF2_TSCK_MARK, MSIOF2_RXD_MARK,
MSIOF2_TXD_MARK,
- /* MSIOF3 */
+ /* BBIF1 */
BBIF1_RXD_MARK, BBIF1_TSYNC_MARK, BBIF1_TSCK_MARK,
BBIF1_TXD_MARK, BBIF1_RSCK_MARK, BBIF1_RSYNC_MARK,
BBIF1_FLOW_MARK, BB_RX_FLOW_N_MARK,
- /* MSIOF4 */
+ /* BBIF2 */
BBIF2_TSCK1_MARK, BBIF2_TSYNC1_MARK,
BBIF2_TXD1_MARK, BBIF2_RXD_MARK,
@@ -976,12 +976,12 @@ static struct pinmux_gpio pinmux_gpios[] = {
GPIO_FN(MSIOF2_TSYNC), GPIO_FN(MSIOF2_TSCK), GPIO_FN(MSIOF2_RXD),
GPIO_FN(MSIOF2_TXD),
- /* MSIOF3 */
+ /* BBIF1 */
GPIO_FN(BBIF1_RXD), GPIO_FN(BBIF1_TSYNC), GPIO_FN(BBIF1_TSCK),
GPIO_FN(BBIF1_TXD), GPIO_FN(BBIF1_RSCK), GPIO_FN(BBIF1_RSYNC),
GPIO_FN(BBIF1_FLOW), GPIO_FN(BB_RX_FLOW_N),
- /* MSIOF4 */
+ /* BBIF2 */
GPIO_FN(BBIF2_TSCK1), GPIO_FN(BBIF2_TSYNC1),
GPIO_FN(BBIF2_TXD1), GPIO_FN(BBIF2_RXD),
diff --git a/arch/arm/mach-shmobile/setup-sh7367.c b/arch/arm/mach-shmobile/setup-sh7367.c
index 3148c11a550e..003008c18360 100644
--- a/arch/arm/mach-shmobile/setup-sh7367.c
+++ b/arch/arm/mach-shmobile/setup-sh7367.c
@@ -154,7 +154,6 @@ static struct sh_timer_config cmt10_platform_data = {
.name = "CMT10",
.channel_offset = 0x10,
.timer_bit = 0,
- .clk = "r_clk",
.clockevent_rating = 125,
.clocksource_rating = 125,
};
diff --git a/arch/arm/mach-shmobile/setup-sh7372.c b/arch/arm/mach-shmobile/setup-sh7372.c
index e26686c9d0b6..564a6d0be473 100644
--- a/arch/arm/mach-shmobile/setup-sh7372.c
+++ b/arch/arm/mach-shmobile/setup-sh7372.c
@@ -158,7 +158,6 @@ static struct sh_timer_config cmt10_platform_data = {
.name = "CMT10",
.channel_offset = 0x10,
.timer_bit = 0,
- .clk = "cmt1",
.clockevent_rating = 125,
.clocksource_rating = 125,
};
@@ -186,6 +185,67 @@ static struct platform_device cmt10_device = {
.num_resources = ARRAY_SIZE(cmt10_resources),
};
+/* TMU */
+static struct sh_timer_config tmu00_platform_data = {
+ .name = "TMU00",
+ .channel_offset = 0x4,
+ .timer_bit = 0,
+ .clockevent_rating = 200,
+};
+
+static struct resource tmu00_resources[] = {
+ [0] = {
+ .name = "TMU00",
+ .start = 0xfff60008,
+ .end = 0xfff60013,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = intcs_evt2irq(0xe80), /* TMU_TUNI0 */
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct platform_device tmu00_device = {
+ .name = "sh_tmu",
+ .id = 0,
+ .dev = {
+ .platform_data = &tmu00_platform_data,
+ },
+ .resource = tmu00_resources,
+ .num_resources = ARRAY_SIZE(tmu00_resources),
+};
+
+static struct sh_timer_config tmu01_platform_data = {
+ .name = "TMU01",
+ .channel_offset = 0x10,
+ .timer_bit = 1,
+ .clocksource_rating = 200,
+};
+
+static struct resource tmu01_resources[] = {
+ [0] = {
+ .name = "TMU01",
+ .start = 0xfff60014,
+ .end = 0xfff6001f,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = intcs_evt2irq(0xea0), /* TMU_TUNI1 */
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct platform_device tmu01_device = {
+ .name = "sh_tmu",
+ .id = 1,
+ .dev = {
+ .platform_data = &tmu01_platform_data,
+ },
+ .resource = tmu01_resources,
+ .num_resources = ARRAY_SIZE(tmu01_resources),
+};
+
/* I2C */
static struct resource iic0_resources[] = {
[0] = {
@@ -419,14 +479,14 @@ static struct resource sh7372_dmae0_resources[] = {
},
{
/* DMA error IRQ */
- .start = 246,
- .end = 246,
+ .start = evt2irq(0x20c0),
+ .end = evt2irq(0x20c0),
.flags = IORESOURCE_IRQ,
},
{
/* IRQ for channels 0-5 */
- .start = 240,
- .end = 245,
+ .start = evt2irq(0x2000),
+ .end = evt2irq(0x20a0),
.flags = IORESOURCE_IRQ,
},
};
@@ -447,14 +507,14 @@ static struct resource sh7372_dmae1_resources[] = {
},
{
/* DMA error IRQ */
- .start = 254,
- .end = 254,
+ .start = evt2irq(0x21c0),
+ .end = evt2irq(0x21c0),
.flags = IORESOURCE_IRQ,
},
{
/* IRQ for channels 0-5 */
- .start = 248,
- .end = 253,
+ .start = evt2irq(0x2100),
+ .end = evt2irq(0x21a0),
.flags = IORESOURCE_IRQ,
},
};
@@ -475,14 +535,14 @@ static struct resource sh7372_dmae2_resources[] = {
},
{
/* DMA error IRQ */
- .start = 262,
- .end = 262,
+ .start = evt2irq(0x22c0),
+ .end = evt2irq(0x22c0),
.flags = IORESOURCE_IRQ,
},
{
/* IRQ for channels 0-5 */
- .start = 256,
- .end = 261,
+ .start = evt2irq(0x2200),
+ .end = evt2irq(0x22a0),
.flags = IORESOURCE_IRQ,
},
};
@@ -526,6 +586,11 @@ static struct platform_device *sh7372_early_devices[] __initdata = {
&scif5_device,
&scif6_device,
&cmt10_device,
+ &tmu00_device,
+ &tmu01_device,
+};
+
+static struct platform_device *sh7372_late_devices[] __initdata = {
&iic0_device,
&iic1_device,
&dma0_device,
@@ -537,6 +602,9 @@ void __init sh7372_add_standard_devices(void)
{
platform_add_devices(sh7372_early_devices,
ARRAY_SIZE(sh7372_early_devices));
+
+ platform_add_devices(sh7372_late_devices,
+ ARRAY_SIZE(sh7372_late_devices));
}
void __init sh7372_add_early_devices(void)
diff --git a/arch/arm/mach-shmobile/setup-sh7377.c b/arch/arm/mach-shmobile/setup-sh7377.c
index bb4adf17dbf4..575dbd6c2f1d 100644
--- a/arch/arm/mach-shmobile/setup-sh7377.c
+++ b/arch/arm/mach-shmobile/setup-sh7377.c
@@ -172,7 +172,6 @@ static struct sh_timer_config cmt10_platform_data = {
.name = "CMT10",
.channel_offset = 0x10,
.timer_bit = 0,
- .clk = "r_clk",
.clockevent_rating = 125,
.clocksource_rating = 125,
};
diff --git a/arch/arm/mach-tegra/Kconfig b/arch/arm/mach-tegra/Kconfig
index a57713c1954a..acd9552f8ada 100644
--- a/arch/arm/mach-tegra/Kconfig
+++ b/arch/arm/mach-tegra/Kconfig
@@ -16,6 +16,10 @@ config ARCH_TEGRA_2x_SOC
endchoice
+config TEGRA_PCI
+ bool "PCI Express support"
+ select PCI
+
comment "Tegra board type"
config MACH_HARMONY
@@ -47,4 +51,11 @@ config TEGRA_DEBUG_UARTE
endchoice
+config TEGRA_SYSTEM_DMA
+ bool "Enable system DMA driver for NVIDIA Tegra SoCs"
+ default y
+ help
+ Adds system DMA functionality for NVIDIA Tegra SoCs, used by
+ several Tegra device drivers
+
endif
diff --git a/arch/arm/mach-tegra/Makefile b/arch/arm/mach-tegra/Makefile
index 51e9370eed99..cdbc68e4c0ca 100644
--- a/arch/arm/mach-tegra/Makefile
+++ b/arch/arm/mach-tegra/Makefile
@@ -1,14 +1,21 @@
obj-y += common.o
obj-y += io.o
-obj-y += irq.o
+obj-y += irq.o legacy_irq.o
obj-y += clock.o
obj-y += timer.o
obj-y += gpio.o
obj-y += pinmux.o
+obj-y += fuse.o
obj-$(CONFIG_ARCH_TEGRA_2x_SOC) += clock.o
obj-$(CONFIG_ARCH_TEGRA_2x_SOC) += tegra2_clocks.o
+obj-$(CONFIG_ARCH_TEGRA_2x_SOC) += tegra2_dvfs.o
+obj-$(CONFIG_ARCH_TEGRA_2x_SOC) += pinmux-t2-tables.o
obj-$(CONFIG_SMP) += platsmp.o localtimer.o headsmp.o
obj-$(CONFIG_HOTPLUG_CPU) += hotplug.o
+obj-$(CONFIG_TEGRA_SYSTEM_DMA) += dma.o
+obj-$(CONFIG_CPU_FREQ) += cpu-tegra.o
+obj-$(CONFIG_TEGRA_PCI) += pcie.o
obj-${CONFIG_MACH_HARMONY} += board-harmony.o
obj-${CONFIG_MACH_HARMONY} += board-harmony-pinmux.o
+obj-${CONFIG_MACH_HARMONY} += board-harmony-pcie.o
diff --git a/arch/arm/mach-tegra/board-harmony-pcie.c b/arch/arm/mach-tegra/board-harmony-pcie.c
new file mode 100644
index 000000000000..f7e7d4514b6a
--- /dev/null
+++ b/arch/arm/mach-tegra/board-harmony-pcie.c
@@ -0,0 +1,57 @@
+/*
+ * arch/arm/mach-tegra/board-harmony-pcie.c
+ *
+ * Copyright (C) 2010 CompuLab, Ltd.
+ * Mike Rapoport <mike@compulab.co.il>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/gpio.h>
+#include <linux/err.h>
+#include <linux/regulator/consumer.h>
+
+#include <asm/mach-types.h>
+
+#include <mach/pinmux.h>
+#include "board.h"
+
+#ifdef CONFIG_TEGRA_PCI
+
+static int __init harmony_pcie_init(void)
+{
+ int err;
+
+ if (!machine_is_harmony())
+ return 0;
+
+ tegra_pinmux_set_tristate(TEGRA_PINGROUP_GPV, TEGRA_TRI_NORMAL);
+ tegra_pinmux_set_tristate(TEGRA_PINGROUP_SLXA, TEGRA_TRI_NORMAL);
+ tegra_pinmux_set_tristate(TEGRA_PINGROUP_SLXK, TEGRA_TRI_NORMAL);
+
+ err = tegra_pcie_init(true, true);
+ if (err)
+ goto err_pcie;
+
+ return 0;
+
+err_pcie:
+ tegra_pinmux_set_tristate(TEGRA_PINGROUP_GPV, TEGRA_TRI_TRISTATE);
+ tegra_pinmux_set_tristate(TEGRA_PINGROUP_SLXA, TEGRA_TRI_TRISTATE);
+ tegra_pinmux_set_tristate(TEGRA_PINGROUP_SLXK, TEGRA_TRI_TRISTATE);
+
+ return err;
+}
+
+subsys_initcall(harmony_pcie_init);
+
+#endif
diff --git a/arch/arm/mach-tegra/board.h b/arch/arm/mach-tegra/board.h
index 3d06354136f2..0de565ca37c5 100644
--- a/arch/arm/mach-tegra/board.h
+++ b/arch/arm/mach-tegra/board.h
@@ -27,6 +27,7 @@ void __init tegra_common_init(void);
void __init tegra_map_common_io(void);
void __init tegra_init_irq(void);
void __init tegra_init_clock(void);
+int __init tegra_pcie_init(bool init_port0, bool init_port1);
extern struct sys_timer tegra_timer;
#endif
diff --git a/arch/arm/mach-tegra/clock.c b/arch/arm/mach-tegra/clock.c
index 03ad578349b9..ae19f95585be 100644
--- a/arch/arm/mach-tegra/clock.c
+++ b/arch/arm/mach-tegra/clock.c
@@ -24,13 +24,80 @@
#include <linux/debugfs.h>
#include <linux/slab.h>
#include <linux/seq_file.h>
+#include <linux/regulator/consumer.h>
#include <asm/clkdev.h>
#include "clock.h"
+#include "board.h"
+#include "fuse.h"
static LIST_HEAD(clocks);
static DEFINE_SPINLOCK(clock_lock);
+static DEFINE_MUTEX(dvfs_lock);
+
+static int clk_is_dvfs(struct clk *c)
+{
+ return (c->dvfs != NULL);
+};
+
+static int dvfs_set_rate(struct dvfs *d, unsigned long rate)
+{
+ struct dvfs_table *t;
+
+ if (d->table == NULL)
+ return -ENODEV;
+
+ for (t = d->table; t->rate != 0; t++) {
+ if (rate <= t->rate) {
+ if (!d->reg)
+ return 0;
+
+ return regulator_set_voltage(d->reg,
+ t->millivolts * 1000,
+ d->max_millivolts * 1000);
+ }
+ }
+
+ return -EINVAL;
+}
+
+static void dvfs_init(struct clk *c)
+{
+ int process_id;
+ int i;
+ struct dvfs_table *table;
+
+ process_id = c->dvfs->cpu ? tegra_core_process_id() :
+ tegra_cpu_process_id();
+
+ for (i = 0; i < c->dvfs->process_id_table_length; i++)
+ if (process_id == c->dvfs->process_id_table[i].process_id)
+ c->dvfs->table = c->dvfs->process_id_table[i].table;
+
+ if (c->dvfs->table == NULL) {
+ pr_err("Failed to find dvfs table for clock %s process %d\n",
+ c->name, process_id);
+ return;
+ }
+
+ c->dvfs->max_millivolts = 0;
+ for (table = c->dvfs->table; table->rate != 0; table++)
+ if (c->dvfs->max_millivolts < table->millivolts)
+ c->dvfs->max_millivolts = table->millivolts;
+
+ c->dvfs->reg = regulator_get(NULL, c->dvfs->reg_id);
+
+ if (IS_ERR(c->dvfs->reg)) {
+ pr_err("Failed to get regulator %s for clock %s\n",
+ c->dvfs->reg_id, c->name);
+ c->dvfs->reg = NULL;
+ return;
+ }
+
+ if (c->refcnt > 0)
+ dvfs_set_rate(c->dvfs, c->rate);
+}
struct clk *tegra_get_clock_by_name(const char *name)
{
@@ -48,14 +115,31 @@ struct clk *tegra_get_clock_by_name(const char *name)
return ret;
}
+static void clk_recalculate_rate(struct clk *c)
+{
+ u64 rate;
+
+ if (!c->parent)
+ return;
+
+ rate = c->parent->rate;
+
+ if (c->mul != 0 && c->div != 0) {
+ rate = rate * c->mul;
+ do_div(rate, c->div);
+ }
+
+ if (rate > c->max_rate)
+ pr_warn("clocks: Set clock %s to rate %llu, max is %lu\n",
+ c->name, rate, c->max_rate);
+
+ c->rate = rate;
+}
+
int clk_reparent(struct clk *c, struct clk *parent)
{
pr_debug("%s: %s\n", __func__, c->name);
- if (c->refcnt && c->parent)
- clk_disable_locked(c->parent);
c->parent = parent;
- if (c->refcnt && c->parent)
- clk_enable_locked(c->parent);
list_del(&c->sibling);
list_add_tail(&c->sibling, &parent->children);
return 0;
@@ -67,8 +151,7 @@ static void propagate_rate(struct clk *c)
pr_debug("%s: %s\n", __func__, c->name);
list_for_each_entry(clkp, &c->children, sibling) {
pr_debug(" %s\n", clkp->name);
- if (clkp->ops->recalculate_rate)
- clkp->ops->recalculate_rate(clkp);
+ clk_recalculate_rate(clkp);
propagate_rate(clkp);
}
}
@@ -77,6 +160,8 @@ void clk_init(struct clk *c)
{
unsigned long flags;
+ pr_debug("%s: %s\n", __func__, c->name);
+
spin_lock_irqsave(&clock_lock, flags);
INIT_LIST_HEAD(&c->children);
@@ -85,6 +170,8 @@ void clk_init(struct clk *c)
if (c->ops && c->ops->init)
c->ops->init(c);
+ clk_recalculate_rate(c);
+
list_add(&c->node, &clocks);
if (c->parent)
@@ -122,13 +209,38 @@ int clk_enable_locked(struct clk *c)
return 0;
}
+int clk_enable_cansleep(struct clk *c)
+{
+ int ret;
+ unsigned long flags;
+
+ mutex_lock(&dvfs_lock);
+
+ if (clk_is_dvfs(c) && c->refcnt > 0)
+ dvfs_set_rate(c->dvfs, c->rate);
+
+ spin_lock_irqsave(&clock_lock, flags);
+ ret = clk_enable_locked(c);
+ spin_unlock_irqrestore(&clock_lock, flags);
+
+ mutex_unlock(&dvfs_lock);
+
+ return ret;
+}
+EXPORT_SYMBOL(clk_enable_cansleep);
+
int clk_enable(struct clk *c)
{
int ret;
unsigned long flags;
+
+ if (clk_is_dvfs(c))
+ BUG();
+
spin_lock_irqsave(&clock_lock, flags);
ret = clk_enable_locked(c);
spin_unlock_irqrestore(&clock_lock, flags);
+
return ret;
}
EXPORT_SYMBOL(clk_enable);
@@ -152,9 +264,30 @@ void clk_disable_locked(struct clk *c)
c->refcnt--;
}
+void clk_disable_cansleep(struct clk *c)
+{
+ unsigned long flags;
+
+ mutex_lock(&dvfs_lock);
+
+ spin_lock_irqsave(&clock_lock, flags);
+ clk_disable_locked(c);
+ spin_unlock_irqrestore(&clock_lock, flags);
+
+ if (clk_is_dvfs(c) && c->refcnt == 0)
+ dvfs_set_rate(c->dvfs, c->rate);
+
+ mutex_unlock(&dvfs_lock);
+}
+EXPORT_SYMBOL(clk_disable_cansleep);
+
void clk_disable(struct clk *c)
{
unsigned long flags;
+
+ if (clk_is_dvfs(c))
+ BUG();
+
spin_lock_irqsave(&clock_lock, flags);
clk_disable_locked(c);
spin_unlock_irqrestore(&clock_lock, flags);
@@ -175,6 +308,8 @@ int clk_set_parent_locked(struct clk *c, struct clk *parent)
if (ret)
return ret;
+ clk_recalculate_rate(c);
+
propagate_rate(c);
return 0;
@@ -197,22 +332,69 @@ struct clk *clk_get_parent(struct clk *c)
}
EXPORT_SYMBOL(clk_get_parent);
-int clk_set_rate(struct clk *c, unsigned long rate)
+int clk_set_rate_locked(struct clk *c, unsigned long rate)
+{
+ int ret;
+
+ if (rate > c->max_rate)
+ rate = c->max_rate;
+
+ if (!c->ops || !c->ops->set_rate)
+ return -ENOSYS;
+
+ ret = c->ops->set_rate(c, rate);
+
+ if (ret)
+ return ret;
+
+ clk_recalculate_rate(c);
+
+ propagate_rate(c);
+
+ return 0;
+}
+
+int clk_set_rate_cansleep(struct clk *c, unsigned long rate)
{
int ret = 0;
unsigned long flags;
+ pr_debug("%s: %s\n", __func__, c->name);
+
+ mutex_lock(&dvfs_lock);
+
+ if (rate > c->rate)
+ ret = dvfs_set_rate(c->dvfs, rate);
+ if (ret)
+ goto out;
+
spin_lock_irqsave(&clock_lock, flags);
+ ret = clk_set_rate_locked(c, rate);
+ spin_unlock_irqrestore(&clock_lock, flags);
- pr_debug("%s: %s\n", __func__, c->name);
+ if (ret)
+ goto out;
- if (c->ops && c->ops->set_rate)
- ret = c->ops->set_rate(c, rate);
- else
- ret = -ENOSYS;
+ ret = dvfs_set_rate(c->dvfs, rate);
- propagate_rate(c);
+out:
+ mutex_unlock(&dvfs_lock);
+ return ret;
+}
+EXPORT_SYMBOL(clk_set_rate_cansleep);
+
+int clk_set_rate(struct clk *c, unsigned long rate)
+{
+ int ret = 0;
+ unsigned long flags;
+
+ pr_debug("%s: %s\n", __func__, c->name);
+
+ if (clk_is_dvfs(c))
+ BUG();
+ spin_lock_irqsave(&clock_lock, flags);
+ ret = clk_set_rate_locked(c, rate);
spin_unlock_irqrestore(&clock_lock, flags);
return ret;
@@ -235,6 +417,20 @@ unsigned long clk_get_rate(struct clk *c)
}
EXPORT_SYMBOL(clk_get_rate);
+long clk_round_rate(struct clk *c, unsigned long rate)
+{
+ pr_debug("%s: %s\n", __func__, c->name);
+
+ if (!c->ops || !c->ops->round_rate)
+ return -ENOSYS;
+
+ if (rate > c->max_rate)
+ rate = c->max_rate;
+
+ return c->ops->round_rate(c, rate);
+}
+EXPORT_SYMBOL(clk_round_rate);
+
static int tegra_clk_init_one_from_table(struct tegra_clk_init_table *table)
{
struct clk *c;
@@ -308,13 +504,28 @@ void tegra_periph_reset_assert(struct clk *c)
}
EXPORT_SYMBOL(tegra_periph_reset_assert);
-int __init tegra_init_clock(void)
+void __init tegra_init_clock(void)
{
tegra2_init_clocks();
+}
+
+int __init tegra_init_dvfs(void)
+{
+ struct clk *c, *safe;
+
+ mutex_lock(&dvfs_lock);
+
+ list_for_each_entry_safe(c, safe, &clocks, node)
+ if (c->dvfs)
+ dvfs_init(c);
+
+ mutex_unlock(&dvfs_lock);
return 0;
}
+late_initcall(tegra_init_dvfs);
+
#ifdef CONFIG_DEBUG_FS
static struct dentry *clk_debugfs_root;
@@ -324,7 +535,7 @@ static void clock_tree_show_one(struct seq_file *s, struct clk *c, int level)
struct clk *child;
struct clk *safe;
const char *state = "uninit";
- char div[5] = {0};
+ char div[8] = {0};
if (c->state == ON)
state = "on";
@@ -332,16 +543,26 @@ static void clock_tree_show_one(struct seq_file *s, struct clk *c, int level)
state = "off";
if (c->mul != 0 && c->div != 0) {
- BUG_ON(c->mul > 2);
- if (c->mul > c->div)
- snprintf(div, sizeof(div), "x%d", c->mul / c->div);
- else
+ if (c->mul > c->div) {
+ int mul = c->mul / c->div;
+ int mul2 = (c->mul * 10 / c->div) % 10;
+ int mul3 = (c->mul * 10) % c->div;
+ if (mul2 == 0 && mul3 == 0)
+ snprintf(div, sizeof(div), "x%d", mul);
+ else if (mul3 == 0)
+ snprintf(div, sizeof(div), "x%d.%d", mul, mul2);
+ else
+ snprintf(div, sizeof(div), "x%d.%d..", mul, mul2);
+ } else {
snprintf(div, sizeof(div), "%d%s", c->div / c->mul,
(c->div % c->mul) ? ".5" : "");
+ }
}
- seq_printf(s, "%*s%-*s %-6s %-3d %-5s %-10lu\n",
- level * 3 + 1, c->set ? "" : "*",
+ seq_printf(s, "%*s%c%c%-*s %-6s %-3d %-8s %-10lu\n",
+ level * 3 + 1, "",
+ c->rate > c->max_rate ? '!' : ' ',
+ !c->set ? '*' : ' ',
30 - level * 3, c->name,
state, c->refcnt, div, c->rate);
list_for_each_entry_safe(child, safe, &c->children, sibling) {
@@ -353,8 +574,8 @@ static int clock_tree_show(struct seq_file *s, void *data)
{
struct clk *c;
unsigned long flags;
- seq_printf(s, " clock state ref div rate \n");
- seq_printf(s, "-----------------------------------------------------------\n");
+ seq_printf(s, " clock state ref div rate\n");
+ seq_printf(s, "--------------------------------------------------------------\n");
spin_lock_irqsave(&clock_lock, flags);
list_for_each_entry(c, &clocks, node)
if (c->parent == NULL)
diff --git a/arch/arm/mach-tegra/clock.h b/arch/arm/mach-tegra/clock.h
index af7c70e2a3ba..94fd859770f1 100644
--- a/arch/arm/mach-tegra/clock.h
+++ b/arch/arm/mach-tegra/clock.h
@@ -27,18 +27,43 @@
#define DIV_U71 (1 << 1)
#define DIV_U71_FIXED (1 << 2)
#define DIV_2 (1 << 3)
-#define PLL_FIXED (1 << 4)
-#define PLL_HAS_CPCON (1 << 5)
-#define MUX (1 << 6)
-#define PLLD (1 << 7)
-#define PERIPH_NO_RESET (1 << 8)
-#define PERIPH_NO_ENB (1 << 9)
-#define PERIPH_EMC_ENB (1 << 10)
-#define PERIPH_MANUAL_RESET (1 << 11)
-#define PLL_ALT_MISC_REG (1 << 12)
+#define DIV_U16 (1 << 4)
+#define PLL_FIXED (1 << 5)
+#define PLL_HAS_CPCON (1 << 6)
+#define MUX (1 << 7)
+#define PLLD (1 << 8)
+#define PERIPH_NO_RESET (1 << 9)
+#define PERIPH_NO_ENB (1 << 10)
+#define PERIPH_EMC_ENB (1 << 11)
+#define PERIPH_MANUAL_RESET (1 << 12)
+#define PLL_ALT_MISC_REG (1 << 13)
+#define PLLU (1 << 14)
#define ENABLE_ON_INIT (1 << 28)
struct clk;
+struct regulator;
+
+struct dvfs_table {
+ unsigned long rate;
+ int millivolts;
+};
+
+struct dvfs_process_id_table {
+ int process_id;
+ struct dvfs_table *table;
+};
+
+
+struct dvfs {
+ struct regulator *reg;
+ struct dvfs_table *table;
+ int max_millivolts;
+
+ int process_id_table_length;
+ const char *reg_id;
+ bool cpu;
+ struct dvfs_process_id_table process_id_table[];
+};
struct clk_mux_sel {
struct clk *input;
@@ -58,12 +83,9 @@ struct clk_ops {
void (*init)(struct clk *);
int (*enable)(struct clk *);
void (*disable)(struct clk *);
- void (*recalc)(struct clk *);
int (*set_parent)(struct clk *, struct clk *);
int (*set_rate)(struct clk *, unsigned long);
- unsigned long (*get_rate)(struct clk *);
long (*round_rate)(struct clk *, unsigned long);
- unsigned long (*recalculate_rate)(struct clk *);
};
enum clk_state {
@@ -85,6 +107,7 @@ struct clk {
struct clk *parent;
struct clk_lookup lookup;
unsigned long rate;
+ unsigned long max_rate;
u32 flags;
u32 refcnt;
const char *name;
@@ -103,10 +126,6 @@ struct clk {
unsigned long cf_max;
unsigned long vco_min;
unsigned long vco_max;
- u32 m;
- u32 n;
- u32 p;
- u32 cpcon;
const struct clk_pll_table *pll_table;
/* DIV */
@@ -117,6 +136,12 @@ struct clk {
const struct clk_mux_sel *inputs;
u32 sel;
u32 reg_mask;
+
+ /* Virtual cpu clock */
+ struct clk *main;
+ struct clk *backup;
+
+ struct dvfs *dvfs;
};
@@ -141,6 +166,7 @@ unsigned long clk_measure_input_freq(void);
void clk_disable_locked(struct clk *c);
int clk_enable_locked(struct clk *c);
int clk_set_parent_locked(struct clk *c, struct clk *parent);
+int clk_set_rate_locked(struct clk *c, unsigned long rate);
int clk_reparent(struct clk *c, struct clk *parent);
void tegra_clk_init_from_table(struct tegra_clk_init_table *table);
diff --git a/arch/arm/mach-tegra/common.c b/arch/arm/mach-tegra/common.c
index 039a514b61ef..7c91e2b9d643 100644
--- a/arch/arm/mach-tegra/common.c
+++ b/arch/arm/mach-tegra/common.c
@@ -19,13 +19,17 @@
#include <linux/init.h>
#include <linux/io.h>
+#include <linux/clk.h>
+#include <linux/delay.h>
#include <asm/hardware/cache-l2x0.h>
#include <mach/iomap.h>
+#include <mach/dma.h>
#include "board.h"
#include "clock.h"
+#include "fuse.h"
static __initdata struct tegra_clk_init_table common_clk_init_table[] = {
/* name parent rate enabled */
@@ -35,8 +39,8 @@ static __initdata struct tegra_clk_init_table common_clk_init_table[] = {
{ "pll_p_out2", "pll_p", 48000000, true },
{ "pll_p_out3", "pll_p", 72000000, true },
{ "pll_p_out4", "pll_p", 108000000, true },
- { "sys", "pll_p_out4", 108000000, true },
- { "hclk", "sys", 108000000, true },
+ { "sclk", "pll_p_out4", 108000000, true },
+ { "hclk", "sclk", 108000000, true },
{ "pclk", "hclk", 54000000, true },
{ NULL, NULL, 0, 0},
};
@@ -51,11 +55,16 @@ void __init tegra_init_cache(void)
l2x0_init(p, 0x6C080001, 0x8200c3fe);
#endif
+
}
void __init tegra_common_init(void)
{
+ tegra_init_fuse();
tegra_init_clock();
tegra_clk_init_from_table(common_clk_init_table);
tegra_init_cache();
+#ifdef CONFIG_TEGRA_SYSTEM_DMA
+ tegra_dma_init();
+#endif
}
diff --git a/arch/arm/mach-tegra/cpu-tegra.c b/arch/arm/mach-tegra/cpu-tegra.c
new file mode 100644
index 000000000000..fea5719c7072
--- /dev/null
+++ b/arch/arm/mach-tegra/cpu-tegra.c
@@ -0,0 +1,185 @@
+/*
+ * arch/arm/mach-tegra/cpu-tegra.c
+ *
+ * Copyright (C) 2010 Google, Inc.
+ *
+ * Author:
+ * Colin Cross <ccross@google.com>
+ * Based on arch/arm/plat-omap/cpu-omap.c, (C) 2005 Nokia Corporation
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/sched.h>
+#include <linux/cpufreq.h>
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/err.h>
+#include <linux/clk.h>
+#include <linux/io.h>
+
+#include <asm/system.h>
+
+#include <mach/hardware.h>
+#include <mach/clk.h>
+
+/* Frequency table index must be sequential starting at 0 */
+static struct cpufreq_frequency_table freq_table[] = {
+ { 0, 312000 },
+ { 1, 456000 },
+ { 2, 608000 },
+ { 3, 760000 },
+ { 4, 816000 },
+ { 5, 912000 },
+ { 6, 1000000 },
+ { 7, CPUFREQ_TABLE_END },
+};
+
+#define NUM_CPUS 2
+
+static struct clk *cpu_clk;
+
+static unsigned long target_cpu_speed[NUM_CPUS];
+
+int tegra_verify_speed(struct cpufreq_policy *policy)
+{
+ return cpufreq_frequency_table_verify(policy, freq_table);
+}
+
+unsigned int tegra_getspeed(unsigned int cpu)
+{
+ unsigned long rate;
+
+ if (cpu >= NUM_CPUS)
+ return 0;
+
+ rate = clk_get_rate(cpu_clk) / 1000;
+ return rate;
+}
+
+static int tegra_update_cpu_speed(void)
+{
+ int i;
+ unsigned long rate = 0;
+ int ret = 0;
+ struct cpufreq_freqs freqs;
+
+ for_each_online_cpu(i)
+ rate = max(rate, target_cpu_speed[i]);
+
+ freqs.old = tegra_getspeed(0);
+ freqs.new = rate;
+
+ if (freqs.old == freqs.new)
+ return ret;
+
+ for_each_online_cpu(freqs.cpu)
+ cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
+
+#ifdef CONFIG_CPU_FREQ_DEBUG
+ printk(KERN_DEBUG "cpufreq-tegra: transition: %u --> %u\n",
+ freqs.old, freqs.new);
+#endif
+
+ ret = clk_set_rate_cansleep(cpu_clk, freqs.new * 1000);
+ if (ret) {
+ pr_err("cpu-tegra: Failed to set cpu frequency to %d kHz\n",
+ freqs.new);
+ return ret;
+ }
+
+ for_each_online_cpu(freqs.cpu)
+ cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
+
+ return 0;
+}
+
+static int tegra_target(struct cpufreq_policy *policy,
+ unsigned int target_freq,
+ unsigned int relation)
+{
+ int idx;
+ unsigned int freq;
+
+ cpufreq_frequency_table_target(policy, freq_table, target_freq,
+ relation, &idx);
+
+ freq = freq_table[idx].frequency;
+
+ target_cpu_speed[policy->cpu] = freq;
+
+ return tegra_update_cpu_speed();
+}
+
+static int tegra_cpu_init(struct cpufreq_policy *policy)
+{
+ if (policy->cpu >= NUM_CPUS)
+ return -EINVAL;
+
+ cpu_clk = clk_get_sys(NULL, "cpu");
+ if (IS_ERR(cpu_clk))
+ return PTR_ERR(cpu_clk);
+
+ cpufreq_frequency_table_cpuinfo(policy, freq_table);
+ cpufreq_frequency_table_get_attr(freq_table, policy->cpu);
+ policy->cur = tegra_getspeed(policy->cpu);
+ target_cpu_speed[policy->cpu] = policy->cur;
+
+ /* FIXME: what's the actual transition time? */
+ policy->cpuinfo.transition_latency = 300 * 1000;
+
+ policy->shared_type = CPUFREQ_SHARED_TYPE_ALL;
+ cpumask_copy(policy->related_cpus, cpu_possible_mask);
+
+ return 0;
+}
+
+static int tegra_cpu_exit(struct cpufreq_policy *policy)
+{
+ cpufreq_frequency_table_cpuinfo(policy, freq_table);
+ clk_put(cpu_clk);
+ return 0;
+}
+
+static struct freq_attr *tegra_cpufreq_attr[] = {
+ &cpufreq_freq_attr_scaling_available_freqs,
+ NULL,
+};
+
+static struct cpufreq_driver tegra_cpufreq_driver = {
+ .verify = tegra_verify_speed,
+ .target = tegra_target,
+ .get = tegra_getspeed,
+ .init = tegra_cpu_init,
+ .exit = tegra_cpu_exit,
+ .name = "tegra",
+ .attr = tegra_cpufreq_attr,
+};
+
+static int __init tegra_cpufreq_init(void)
+{
+ return cpufreq_register_driver(&tegra_cpufreq_driver);
+}
+
+static void __exit tegra_cpufreq_exit(void)
+{
+ cpufreq_unregister_driver(&tegra_cpufreq_driver);
+}
+
+
+MODULE_AUTHOR("Colin Cross <ccross@android.com>");
+MODULE_DESCRIPTION("cpufreq driver for Nvidia Tegra2");
+MODULE_LICENSE("GPL");
+module_init(tegra_cpufreq_init);
+module_exit(tegra_cpufreq_exit);
diff --git a/arch/arm/mach-tegra/dma.c b/arch/arm/mach-tegra/dma.c
new file mode 100644
index 000000000000..edda6ec5e925
--- /dev/null
+++ b/arch/arm/mach-tegra/dma.c
@@ -0,0 +1,752 @@
+/*
+ * arch/arm/mach-tegra/dma.c
+ *
+ * System DMA driver for NVIDIA Tegra SoCs
+ *
+ * Copyright (c) 2008-2009, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include <linux/io.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/spinlock.h>
+#include <linux/err.h>
+#include <linux/irq.h>
+#include <linux/delay.h>
+#include <mach/dma.h>
+#include <mach/irqs.h>
+#include <mach/iomap.h>
+
+#define APB_DMA_GEN 0x000
+#define GEN_ENABLE (1<<31)
+
+#define APB_DMA_CNTRL 0x010
+
+#define APB_DMA_IRQ_MASK 0x01c
+
+#define APB_DMA_IRQ_MASK_SET 0x020
+
+#define APB_DMA_CHAN_CSR 0x000
+#define CSR_ENB (1<<31)
+#define CSR_IE_EOC (1<<30)
+#define CSR_HOLD (1<<29)
+#define CSR_DIR (1<<28)
+#define CSR_ONCE (1<<27)
+#define CSR_FLOW (1<<21)
+#define CSR_REQ_SEL_SHIFT 16
+#define CSR_REQ_SEL_MASK (0x1F<<CSR_REQ_SEL_SHIFT)
+#define CSR_REQ_SEL_INVALID (31<<CSR_REQ_SEL_SHIFT)
+#define CSR_WCOUNT_SHIFT 2
+#define CSR_WCOUNT_MASK 0xFFFC
+
+#define APB_DMA_CHAN_STA 0x004
+#define STA_BUSY (1<<31)
+#define STA_ISE_EOC (1<<30)
+#define STA_HALT (1<<29)
+#define STA_PING_PONG (1<<28)
+#define STA_COUNT_SHIFT 2
+#define STA_COUNT_MASK 0xFFFC
+
+#define APB_DMA_CHAN_AHB_PTR 0x010
+
+#define APB_DMA_CHAN_AHB_SEQ 0x014
+#define AHB_SEQ_INTR_ENB (1<<31)
+#define AHB_SEQ_BUS_WIDTH_SHIFT 28
+#define AHB_SEQ_BUS_WIDTH_MASK (0x7<<AHB_SEQ_BUS_WIDTH_SHIFT)
+#define AHB_SEQ_BUS_WIDTH_8 (0<<AHB_SEQ_BUS_WIDTH_SHIFT)
+#define AHB_SEQ_BUS_WIDTH_16 (1<<AHB_SEQ_BUS_WIDTH_SHIFT)
+#define AHB_SEQ_BUS_WIDTH_32 (2<<AHB_SEQ_BUS_WIDTH_SHIFT)
+#define AHB_SEQ_BUS_WIDTH_64 (3<<AHB_SEQ_BUS_WIDTH_SHIFT)
+#define AHB_SEQ_BUS_WIDTH_128 (4<<AHB_SEQ_BUS_WIDTH_SHIFT)
+#define AHB_SEQ_DATA_SWAP (1<<27)
+#define AHB_SEQ_BURST_MASK (0x7<<24)
+#define AHB_SEQ_BURST_1 (4<<24)
+#define AHB_SEQ_BURST_4 (5<<24)
+#define AHB_SEQ_BURST_8 (6<<24)
+#define AHB_SEQ_DBL_BUF (1<<19)
+#define AHB_SEQ_WRAP_SHIFT 16
+#define AHB_SEQ_WRAP_MASK (0x7<<AHB_SEQ_WRAP_SHIFT)
+
+#define APB_DMA_CHAN_APB_PTR 0x018
+
+#define APB_DMA_CHAN_APB_SEQ 0x01c
+#define APB_SEQ_BUS_WIDTH_SHIFT 28
+#define APB_SEQ_BUS_WIDTH_MASK (0x7<<APB_SEQ_BUS_WIDTH_SHIFT)
+#define APB_SEQ_BUS_WIDTH_8 (0<<APB_SEQ_BUS_WIDTH_SHIFT)
+#define APB_SEQ_BUS_WIDTH_16 (1<<APB_SEQ_BUS_WIDTH_SHIFT)
+#define APB_SEQ_BUS_WIDTH_32 (2<<APB_SEQ_BUS_WIDTH_SHIFT)
+#define APB_SEQ_BUS_WIDTH_64 (3<<APB_SEQ_BUS_WIDTH_SHIFT)
+#define APB_SEQ_BUS_WIDTH_128 (4<<APB_SEQ_BUS_WIDTH_SHIFT)
+#define APB_SEQ_DATA_SWAP (1<<27)
+#define APB_SEQ_WRAP_SHIFT 16
+#define APB_SEQ_WRAP_MASK (0x7<<APB_SEQ_WRAP_SHIFT)
+
+#define TEGRA_SYSTEM_DMA_CH_NR 16
+#define TEGRA_SYSTEM_DMA_AVP_CH_NUM 4
+#define TEGRA_SYSTEM_DMA_CH_MIN 0
+#define TEGRA_SYSTEM_DMA_CH_MAX \
+ (TEGRA_SYSTEM_DMA_CH_NR - TEGRA_SYSTEM_DMA_AVP_CH_NUM - 1)
+
+#define NV_DMA_MAX_TRASFER_SIZE 0x10000
+
+const unsigned int ahb_addr_wrap_table[8] = {
+ 0, 32, 64, 128, 256, 512, 1024, 2048
+};
+
+const unsigned int apb_addr_wrap_table[8] = {0, 1, 2, 4, 8, 16, 32, 64};
+
+const unsigned int bus_width_table[5] = {8, 16, 32, 64, 128};
+
+#define TEGRA_DMA_NAME_SIZE 16
+struct tegra_dma_channel {
+ struct list_head list;
+ int id;
+ spinlock_t lock;
+ char name[TEGRA_DMA_NAME_SIZE];
+ void __iomem *addr;
+ int mode;
+ int irq;
+
+ /* Register shadow */
+ u32 csr;
+ u32 ahb_seq;
+ u32 ahb_ptr;
+ u32 apb_seq;
+ u32 apb_ptr;
+};
+
+#define NV_DMA_MAX_CHANNELS 32
+
+static DECLARE_BITMAP(channel_usage, NV_DMA_MAX_CHANNELS);
+static struct tegra_dma_channel dma_channels[NV_DMA_MAX_CHANNELS];
+
+static void tegra_dma_update_hw(struct tegra_dma_channel *ch,
+ struct tegra_dma_req *req);
+static void tegra_dma_update_hw_partial(struct tegra_dma_channel *ch,
+ struct tegra_dma_req *req);
+static void tegra_dma_init_hw(struct tegra_dma_channel *ch);
+static void tegra_dma_stop(struct tegra_dma_channel *ch);
+
+void tegra_dma_flush(struct tegra_dma_channel *ch)
+{
+}
+EXPORT_SYMBOL(tegra_dma_flush);
+
+void tegra_dma_dequeue(struct tegra_dma_channel *ch)
+{
+ struct tegra_dma_req *req;
+
+ req = list_entry(ch->list.next, typeof(*req), node);
+
+ tegra_dma_dequeue_req(ch, req);
+ return;
+}
+
+void tegra_dma_stop(struct tegra_dma_channel *ch)
+{
+ unsigned int csr;
+ unsigned int status;
+
+ csr = ch->csr;
+ csr &= ~CSR_IE_EOC;
+ writel(csr, ch->addr + APB_DMA_CHAN_CSR);
+
+ csr &= ~CSR_ENB;
+ writel(csr, ch->addr + APB_DMA_CHAN_CSR);
+
+ status = readl(ch->addr + APB_DMA_CHAN_STA);
+ if (status & STA_ISE_EOC)
+ writel(status, ch->addr + APB_DMA_CHAN_STA);
+}
+
+int tegra_dma_cancel(struct tegra_dma_channel *ch)
+{
+ unsigned int csr;
+ unsigned long irq_flags;
+
+ spin_lock_irqsave(&ch->lock, irq_flags);
+ while (!list_empty(&ch->list))
+ list_del(ch->list.next);
+
+ csr = ch->csr;
+ csr &= ~CSR_REQ_SEL_MASK;
+ csr |= CSR_REQ_SEL_INVALID;
+
+ /* Set the enable as that is not shadowed */
+ csr |= CSR_ENB;
+ writel(csr, ch->addr + APB_DMA_CHAN_CSR);
+
+ tegra_dma_stop(ch);
+
+ spin_unlock_irqrestore(&ch->lock, irq_flags);
+ return 0;
+}
+
+int tegra_dma_dequeue_req(struct tegra_dma_channel *ch,
+ struct tegra_dma_req *_req)
+{
+ unsigned int csr;
+ unsigned int status;
+ struct tegra_dma_req *req = NULL;
+ int found = 0;
+ unsigned long irq_flags;
+ int to_transfer;
+ int req_transfer_count;
+
+ spin_lock_irqsave(&ch->lock, irq_flags);
+ list_for_each_entry(req, &ch->list, node) {
+ if (req == _req) {
+ list_del(&req->node);
+ found = 1;
+ break;
+ }
+ }
+ if (!found) {
+ spin_unlock_irqrestore(&ch->lock, irq_flags);
+ return 0;
+ }
+
+ /* STOP the DMA and get the transfer count.
+ * Getting the transfer count is tricky.
+ * - Change the source selector to invalid to stop the DMA from
+ * FIFO to memory.
+ * - Read the status register to know the number of pending
+ * bytes to be transfered.
+ * - Finally stop or program the DMA to the next buffer in the
+ * list.
+ */
+ csr = ch->csr;
+ csr &= ~CSR_REQ_SEL_MASK;
+ csr |= CSR_REQ_SEL_INVALID;
+
+ /* Set the enable as that is not shadowed */
+ csr |= CSR_ENB;
+ writel(csr, ch->addr + APB_DMA_CHAN_CSR);
+
+ /* Get the transfer count */
+ status = readl(ch->addr + APB_DMA_CHAN_STA);
+ to_transfer = (status & STA_COUNT_MASK) >> STA_COUNT_SHIFT;
+ req_transfer_count = (ch->csr & CSR_WCOUNT_MASK) >> CSR_WCOUNT_SHIFT;
+ req_transfer_count += 1;
+ to_transfer += 1;
+
+ req->bytes_transferred = req_transfer_count;
+
+ if (status & STA_BUSY)
+ req->bytes_transferred -= to_transfer;
+
+ /* In continous transfer mode, DMA only tracks the count of the
+ * half DMA buffer. So, if the DMA already finished half the DMA
+ * then add the half buffer to the completed count.
+ *
+ * FIXME: There can be a race here. What if the req to
+ * dequue happens at the same time as the DMA just moved to
+ * the new buffer and SW didn't yet received the interrupt?
+ */
+ if (ch->mode & TEGRA_DMA_MODE_CONTINOUS)
+ if (req->buffer_status == TEGRA_DMA_REQ_BUF_STATUS_HALF_FULL)
+ req->bytes_transferred += req_transfer_count;
+
+ req->bytes_transferred *= 4;
+
+ tegra_dma_stop(ch);
+ if (!list_empty(&ch->list)) {
+ /* if the list is not empty, queue the next request */
+ struct tegra_dma_req *next_req;
+ next_req = list_entry(ch->list.next,
+ typeof(*next_req), node);
+ tegra_dma_update_hw(ch, next_req);
+ }
+ req->status = -TEGRA_DMA_REQ_ERROR_ABORTED;
+
+ spin_unlock_irqrestore(&ch->lock, irq_flags);
+
+ /* Callback should be called without any lock */
+ req->complete(req);
+ return 0;
+}
+EXPORT_SYMBOL(tegra_dma_dequeue_req);
+
+bool tegra_dma_is_empty(struct tegra_dma_channel *ch)
+{
+ unsigned long irq_flags;
+ bool is_empty;
+
+ spin_lock_irqsave(&ch->lock, irq_flags);
+ if (list_empty(&ch->list))
+ is_empty = true;
+ else
+ is_empty = false;
+ spin_unlock_irqrestore(&ch->lock, irq_flags);
+ return is_empty;
+}
+EXPORT_SYMBOL(tegra_dma_is_empty);
+
+bool tegra_dma_is_req_inflight(struct tegra_dma_channel *ch,
+ struct tegra_dma_req *_req)
+{
+ unsigned long irq_flags;
+ struct tegra_dma_req *req;
+
+ spin_lock_irqsave(&ch->lock, irq_flags);
+ list_for_each_entry(req, &ch->list, node) {
+ if (req == _req) {
+ spin_unlock_irqrestore(&ch->lock, irq_flags);
+ return true;
+ }
+ }
+ spin_unlock_irqrestore(&ch->lock, irq_flags);
+ return false;
+}
+EXPORT_SYMBOL(tegra_dma_is_req_inflight);
+
+int tegra_dma_enqueue_req(struct tegra_dma_channel *ch,
+ struct tegra_dma_req *req)
+{
+ unsigned long irq_flags;
+ int start_dma = 0;
+
+ if (req->size > NV_DMA_MAX_TRASFER_SIZE ||
+ req->source_addr & 0x3 || req->dest_addr & 0x3) {
+ pr_err("Invalid DMA request for channel %d\n", ch->id);
+ return -EINVAL;
+ }
+
+ spin_lock_irqsave(&ch->lock, irq_flags);
+
+ req->bytes_transferred = 0;
+ req->status = 0;
+ req->buffer_status = 0;
+ if (list_empty(&ch->list))
+ start_dma = 1;
+
+ list_add_tail(&req->node, &ch->list);
+
+ if (start_dma)
+ tegra_dma_update_hw(ch, req);
+
+ spin_unlock_irqrestore(&ch->lock, irq_flags);
+
+ return 0;
+}
+EXPORT_SYMBOL(tegra_dma_enqueue_req);
+
+struct tegra_dma_channel *tegra_dma_allocate_channel(int mode)
+{
+ int channel;
+ struct tegra_dma_channel *ch;
+
+ /* first channel is the shared channel */
+ if (mode & TEGRA_DMA_SHARED) {
+ channel = TEGRA_SYSTEM_DMA_CH_MIN;
+ } else {
+ channel = find_first_zero_bit(channel_usage,
+ ARRAY_SIZE(dma_channels));
+ if (channel >= ARRAY_SIZE(dma_channels))
+ return NULL;
+ }
+ __set_bit(channel, channel_usage);
+ ch = &dma_channels[channel];
+ ch->mode = mode;
+ return ch;
+}
+EXPORT_SYMBOL(tegra_dma_allocate_channel);
+
+void tegra_dma_free_channel(struct tegra_dma_channel *ch)
+{
+ if (ch->mode & TEGRA_DMA_SHARED)
+ return;
+ tegra_dma_cancel(ch);
+ __clear_bit(ch->id, channel_usage);
+}
+EXPORT_SYMBOL(tegra_dma_free_channel);
+
+static void tegra_dma_update_hw_partial(struct tegra_dma_channel *ch,
+ struct tegra_dma_req *req)
+{
+ if (req->to_memory) {
+ ch->apb_ptr = req->source_addr;
+ ch->ahb_ptr = req->dest_addr;
+ } else {
+ ch->apb_ptr = req->dest_addr;
+ ch->ahb_ptr = req->source_addr;
+ }
+ writel(ch->apb_ptr, ch->addr + APB_DMA_CHAN_APB_PTR);
+ writel(ch->ahb_ptr, ch->addr + APB_DMA_CHAN_AHB_PTR);
+
+ req->status = TEGRA_DMA_REQ_INFLIGHT;
+ return;
+}
+
+static void tegra_dma_update_hw(struct tegra_dma_channel *ch,
+ struct tegra_dma_req *req)
+{
+ int ahb_addr_wrap;
+ int apb_addr_wrap;
+ int ahb_bus_width;
+ int apb_bus_width;
+ int index;
+ unsigned long csr;
+
+
+ ch->csr |= CSR_FLOW;
+ ch->csr &= ~CSR_REQ_SEL_MASK;
+ ch->csr |= req->req_sel << CSR_REQ_SEL_SHIFT;
+ ch->ahb_seq &= ~AHB_SEQ_BURST_MASK;
+ ch->ahb_seq |= AHB_SEQ_BURST_1;
+
+ /* One shot mode is always single buffered,
+ * continuous mode is always double buffered
+ * */
+ if (ch->mode & TEGRA_DMA_MODE_ONESHOT) {
+ ch->csr |= CSR_ONCE;
+ ch->ahb_seq &= ~AHB_SEQ_DBL_BUF;
+ ch->csr &= ~CSR_WCOUNT_MASK;
+ ch->csr |= ((req->size>>2) - 1) << CSR_WCOUNT_SHIFT;
+ } else {
+ ch->csr &= ~CSR_ONCE;
+ ch->ahb_seq |= AHB_SEQ_DBL_BUF;
+
+ /* In double buffered mode, we set the size to half the
+ * requested size and interrupt when half the buffer
+ * is full */
+ ch->csr &= ~CSR_WCOUNT_MASK;
+ ch->csr |= ((req->size>>3) - 1) << CSR_WCOUNT_SHIFT;
+ }
+
+ if (req->to_memory) {
+ ch->csr &= ~CSR_DIR;
+ ch->apb_ptr = req->source_addr;
+ ch->ahb_ptr = req->dest_addr;
+
+ apb_addr_wrap = req->source_wrap;
+ ahb_addr_wrap = req->dest_wrap;
+ apb_bus_width = req->source_bus_width;
+ ahb_bus_width = req->dest_bus_width;
+
+ } else {
+ ch->csr |= CSR_DIR;
+ ch->apb_ptr = req->dest_addr;
+ ch->ahb_ptr = req->source_addr;
+
+ apb_addr_wrap = req->dest_wrap;
+ ahb_addr_wrap = req->source_wrap;
+ apb_bus_width = req->dest_bus_width;
+ ahb_bus_width = req->source_bus_width;
+ }
+
+ apb_addr_wrap >>= 2;
+ ahb_addr_wrap >>= 2;
+
+ /* set address wrap for APB size */
+ index = 0;
+ do {
+ if (apb_addr_wrap_table[index] == apb_addr_wrap)
+ break;
+ index++;
+ } while (index < ARRAY_SIZE(apb_addr_wrap_table));
+ BUG_ON(index == ARRAY_SIZE(apb_addr_wrap_table));
+ ch->apb_seq &= ~APB_SEQ_WRAP_MASK;
+ ch->apb_seq |= index << APB_SEQ_WRAP_SHIFT;
+
+ /* set address wrap for AHB size */
+ index = 0;
+ do {
+ if (ahb_addr_wrap_table[index] == ahb_addr_wrap)
+ break;
+ index++;
+ } while (index < ARRAY_SIZE(ahb_addr_wrap_table));
+ BUG_ON(index == ARRAY_SIZE(ahb_addr_wrap_table));
+ ch->ahb_seq &= ~AHB_SEQ_WRAP_MASK;
+ ch->ahb_seq |= index << AHB_SEQ_WRAP_SHIFT;
+
+ for (index = 0; index < ARRAY_SIZE(bus_width_table); index++) {
+ if (bus_width_table[index] == ahb_bus_width)
+ break;
+ }
+ BUG_ON(index == ARRAY_SIZE(bus_width_table));
+ ch->ahb_seq &= ~AHB_SEQ_BUS_WIDTH_MASK;
+ ch->ahb_seq |= index << AHB_SEQ_BUS_WIDTH_SHIFT;
+
+ for (index = 0; index < ARRAY_SIZE(bus_width_table); index++) {
+ if (bus_width_table[index] == apb_bus_width)
+ break;
+ }
+ BUG_ON(index == ARRAY_SIZE(bus_width_table));
+ ch->apb_seq &= ~APB_SEQ_BUS_WIDTH_MASK;
+ ch->apb_seq |= index << APB_SEQ_BUS_WIDTH_SHIFT;
+
+ ch->csr |= CSR_IE_EOC;
+
+ /* update hw registers with the shadow */
+ writel(ch->csr, ch->addr + APB_DMA_CHAN_CSR);
+ writel(ch->apb_seq, ch->addr + APB_DMA_CHAN_APB_SEQ);
+ writel(ch->apb_ptr, ch->addr + APB_DMA_CHAN_APB_PTR);
+ writel(ch->ahb_seq, ch->addr + APB_DMA_CHAN_AHB_SEQ);
+ writel(ch->ahb_ptr, ch->addr + APB_DMA_CHAN_AHB_PTR);
+
+ csr = ch->csr | CSR_ENB;
+ writel(csr, ch->addr + APB_DMA_CHAN_CSR);
+
+ req->status = TEGRA_DMA_REQ_INFLIGHT;
+}
+
+static void tegra_dma_init_hw(struct tegra_dma_channel *ch)
+{
+ /* One shot with an interrupt to CPU after transfer */
+ ch->csr = CSR_ONCE | CSR_IE_EOC;
+ ch->ahb_seq = AHB_SEQ_BUS_WIDTH_32 | AHB_SEQ_INTR_ENB;
+ ch->apb_seq = APB_SEQ_BUS_WIDTH_32 | 1 << APB_SEQ_WRAP_SHIFT;
+}
+
+static void handle_oneshot_dma(struct tegra_dma_channel *ch)
+{
+ struct tegra_dma_req *req;
+
+ spin_lock(&ch->lock);
+ if (list_empty(&ch->list)) {
+ spin_unlock(&ch->lock);
+ return;
+ }
+
+ req = list_entry(ch->list.next, typeof(*req), node);
+ if (req) {
+ int bytes_transferred;
+
+ bytes_transferred =
+ (ch->csr & CSR_WCOUNT_MASK) >> CSR_WCOUNT_SHIFT;
+ bytes_transferred += 1;
+ bytes_transferred <<= 2;
+
+ list_del(&req->node);
+ req->bytes_transferred = bytes_transferred;
+ req->status = TEGRA_DMA_REQ_SUCCESS;
+
+ spin_unlock(&ch->lock);
+ /* Callback should be called without any lock */
+ pr_debug("%s: transferred %d bytes\n", __func__,
+ req->bytes_transferred);
+ req->complete(req);
+ spin_lock(&ch->lock);
+ }
+
+ if (!list_empty(&ch->list)) {
+ req = list_entry(ch->list.next, typeof(*req), node);
+ /* the complete function we just called may have enqueued
+ another req, in which case dma has already started */
+ if (req->status != TEGRA_DMA_REQ_INFLIGHT)
+ tegra_dma_update_hw(ch, req);
+ }
+ spin_unlock(&ch->lock);
+}
+
+static void handle_continuous_dma(struct tegra_dma_channel *ch)
+{
+ struct tegra_dma_req *req;
+
+ spin_lock(&ch->lock);
+ if (list_empty(&ch->list)) {
+ spin_unlock(&ch->lock);
+ return;
+ }
+
+ req = list_entry(ch->list.next, typeof(*req), node);
+ if (req) {
+ if (req->buffer_status == TEGRA_DMA_REQ_BUF_STATUS_EMPTY) {
+ /* Load the next request into the hardware, if available
+ * */
+ if (!list_is_last(&req->node, &ch->list)) {
+ struct tegra_dma_req *next_req;
+
+ next_req = list_entry(req->node.next,
+ typeof(*next_req), node);
+ tegra_dma_update_hw_partial(ch, next_req);
+ }
+ req->buffer_status = TEGRA_DMA_REQ_BUF_STATUS_HALF_FULL;
+ req->status = TEGRA_DMA_REQ_SUCCESS;
+ /* DMA lock is NOT held when callback is called */
+ spin_unlock(&ch->lock);
+ if (likely(req->threshold))
+ req->threshold(req);
+ return;
+
+ } else if (req->buffer_status ==
+ TEGRA_DMA_REQ_BUF_STATUS_HALF_FULL) {
+ /* Callback when the buffer is completely full (i.e on
+ * the second interrupt */
+ int bytes_transferred;
+
+ bytes_transferred =
+ (ch->csr & CSR_WCOUNT_MASK) >> CSR_WCOUNT_SHIFT;
+ bytes_transferred += 1;
+ bytes_transferred <<= 3;
+
+ req->buffer_status = TEGRA_DMA_REQ_BUF_STATUS_FULL;
+ req->bytes_transferred = bytes_transferred;
+ req->status = TEGRA_DMA_REQ_SUCCESS;
+ list_del(&req->node);
+
+ /* DMA lock is NOT held when callbak is called */
+ spin_unlock(&ch->lock);
+ req->complete(req);
+ return;
+
+ } else {
+ BUG();
+ }
+ }
+ spin_unlock(&ch->lock);
+}
+
+static irqreturn_t dma_isr(int irq, void *data)
+{
+ struct tegra_dma_channel *ch = data;
+ unsigned long status;
+
+ status = readl(ch->addr + APB_DMA_CHAN_STA);
+ if (status & STA_ISE_EOC)
+ writel(status, ch->addr + APB_DMA_CHAN_STA);
+ else {
+ pr_warning("Got a spurious ISR for DMA channel %d\n", ch->id);
+ return IRQ_HANDLED;
+ }
+ return IRQ_WAKE_THREAD;
+}
+
+static irqreturn_t dma_thread_fn(int irq, void *data)
+{
+ struct tegra_dma_channel *ch = data;
+
+ if (ch->mode & TEGRA_DMA_MODE_ONESHOT)
+ handle_oneshot_dma(ch);
+ else
+ handle_continuous_dma(ch);
+
+
+ return IRQ_HANDLED;
+}
+
+int __init tegra_dma_init(void)
+{
+ int ret = 0;
+ int i;
+ unsigned int irq;
+ void __iomem *addr;
+
+ addr = IO_ADDRESS(TEGRA_APB_DMA_BASE);
+ writel(GEN_ENABLE, addr + APB_DMA_GEN);
+ writel(0, addr + APB_DMA_CNTRL);
+ writel(0xFFFFFFFFul >> (31 - TEGRA_SYSTEM_DMA_CH_MAX),
+ addr + APB_DMA_IRQ_MASK_SET);
+
+ memset(channel_usage, 0, sizeof(channel_usage));
+ memset(dma_channels, 0, sizeof(dma_channels));
+
+ /* Reserve all the channels we are not supposed to touch */
+ for (i = 0; i < TEGRA_SYSTEM_DMA_CH_MIN; i++)
+ __set_bit(i, channel_usage);
+
+ for (i = TEGRA_SYSTEM_DMA_CH_MIN; i <= TEGRA_SYSTEM_DMA_CH_MAX; i++) {
+ struct tegra_dma_channel *ch = &dma_channels[i];
+
+ __clear_bit(i, channel_usage);
+
+ ch->id = i;
+ snprintf(ch->name, TEGRA_DMA_NAME_SIZE, "dma_channel_%d", i);
+
+ ch->addr = IO_ADDRESS(TEGRA_APB_DMA_CH0_BASE +
+ TEGRA_APB_DMA_CH0_SIZE * i);
+
+ spin_lock_init(&ch->lock);
+ INIT_LIST_HEAD(&ch->list);
+ tegra_dma_init_hw(ch);
+
+ irq = INT_APB_DMA_CH0 + i;
+ ret = request_threaded_irq(irq, dma_isr, dma_thread_fn, 0,
+ dma_channels[i].name, ch);
+ if (ret) {
+ pr_err("Failed to register IRQ %d for DMA %d\n",
+ irq, i);
+ goto fail;
+ }
+ ch->irq = irq;
+ }
+ /* mark the shared channel allocated */
+ __set_bit(TEGRA_SYSTEM_DMA_CH_MIN, channel_usage);
+
+ for (i = TEGRA_SYSTEM_DMA_CH_MAX+1; i < NV_DMA_MAX_CHANNELS; i++)
+ __set_bit(i, channel_usage);
+
+ return ret;
+fail:
+ writel(0, addr + APB_DMA_GEN);
+ for (i = TEGRA_SYSTEM_DMA_CH_MIN; i <= TEGRA_SYSTEM_DMA_CH_MAX; i++) {
+ struct tegra_dma_channel *ch = &dma_channels[i];
+ if (ch->irq)
+ free_irq(ch->irq, ch);
+ }
+ return ret;
+}
+
+#ifdef CONFIG_PM
+static u32 apb_dma[5*TEGRA_SYSTEM_DMA_CH_NR + 3];
+
+void tegra_dma_suspend(void)
+{
+ void __iomem *addr = IO_ADDRESS(TEGRA_APB_DMA_BASE);
+ u32 *ctx = apb_dma;
+ int i;
+
+ *ctx++ = readl(addr + APB_DMA_GEN);
+ *ctx++ = readl(addr + APB_DMA_CNTRL);
+ *ctx++ = readl(addr + APB_DMA_IRQ_MASK);
+
+ for (i = 0; i < TEGRA_SYSTEM_DMA_CH_NR; i++) {
+ addr = IO_ADDRESS(TEGRA_APB_DMA_CH0_BASE +
+ TEGRA_APB_DMA_CH0_SIZE * i);
+
+ *ctx++ = readl(addr + APB_DMA_CHAN_CSR);
+ *ctx++ = readl(addr + APB_DMA_CHAN_AHB_PTR);
+ *ctx++ = readl(addr + APB_DMA_CHAN_AHB_SEQ);
+ *ctx++ = readl(addr + APB_DMA_CHAN_APB_PTR);
+ *ctx++ = readl(addr + APB_DMA_CHAN_APB_SEQ);
+ }
+}
+
+void tegra_dma_resume(void)
+{
+ void __iomem *addr = IO_ADDRESS(TEGRA_APB_DMA_BASE);
+ u32 *ctx = apb_dma;
+ int i;
+
+ writel(*ctx++, addr + APB_DMA_GEN);
+ writel(*ctx++, addr + APB_DMA_CNTRL);
+ writel(*ctx++, addr + APB_DMA_IRQ_MASK);
+
+ for (i = 0; i < TEGRA_SYSTEM_DMA_CH_NR; i++) {
+ addr = IO_ADDRESS(TEGRA_APB_DMA_CH0_BASE +
+ TEGRA_APB_DMA_CH0_SIZE * i);
+
+ writel(*ctx++, addr + APB_DMA_CHAN_CSR);
+ writel(*ctx++, addr + APB_DMA_CHAN_AHB_PTR);
+ writel(*ctx++, addr + APB_DMA_CHAN_AHB_SEQ);
+ writel(*ctx++, addr + APB_DMA_CHAN_APB_PTR);
+ writel(*ctx++, addr + APB_DMA_CHAN_APB_SEQ);
+ }
+}
+
+#endif
diff --git a/arch/arm/mach-tegra/fuse.c b/arch/arm/mach-tegra/fuse.c
new file mode 100644
index 000000000000..1fa26d9a1a68
--- /dev/null
+++ b/arch/arm/mach-tegra/fuse.c
@@ -0,0 +1,84 @@
+/*
+ * arch/arm/mach-tegra/fuse.c
+ *
+ * Copyright (C) 2010 Google, Inc.
+ *
+ * Author:
+ * Colin Cross <ccross@android.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/io.h>
+
+#include <mach/iomap.h>
+
+#include "fuse.h"
+
+#define FUSE_UID_LOW 0x108
+#define FUSE_UID_HIGH 0x10c
+#define FUSE_SKU_INFO 0x110
+#define FUSE_SPARE_BIT 0x200
+
+static inline u32 fuse_readl(unsigned long offset)
+{
+ return readl(IO_TO_VIRT(TEGRA_FUSE_BASE + offset));
+}
+
+static inline void fuse_writel(u32 value, unsigned long offset)
+{
+ writel(value, IO_TO_VIRT(TEGRA_FUSE_BASE + offset));
+}
+
+void tegra_init_fuse(void)
+{
+ u32 reg = readl(IO_TO_VIRT(TEGRA_CLK_RESET_BASE + 0x48));
+ reg |= 1 << 28;
+ writel(reg, IO_TO_VIRT(TEGRA_CLK_RESET_BASE + 0x48));
+
+ pr_info("Tegra SKU: %d CPU Process: %d Core Process: %d\n",
+ tegra_sku_id(), tegra_cpu_process_id(),
+ tegra_core_process_id());
+}
+
+unsigned long long tegra_chip_uid(void)
+{
+ unsigned long long lo, hi;
+
+ lo = fuse_readl(FUSE_UID_LOW);
+ hi = fuse_readl(FUSE_UID_HIGH);
+ return (hi << 32ull) | lo;
+}
+
+int tegra_sku_id(void)
+{
+ int sku_id;
+ u32 reg = fuse_readl(FUSE_SKU_INFO);
+ sku_id = reg & 0xFF;
+ return sku_id;
+}
+
+int tegra_cpu_process_id(void)
+{
+ int cpu_process_id;
+ u32 reg = fuse_readl(FUSE_SPARE_BIT);
+ cpu_process_id = (reg >> 6) & 3;
+ return cpu_process_id;
+}
+
+int tegra_core_process_id(void)
+{
+ int core_process_id;
+ u32 reg = fuse_readl(FUSE_SPARE_BIT);
+ core_process_id = (reg >> 12) & 3;
+ return core_process_id;
+}
diff --git a/arch/arm/mach-tegra/fuse.h b/arch/arm/mach-tegra/fuse.h
new file mode 100644
index 000000000000..584b2e27dbda
--- /dev/null
+++ b/arch/arm/mach-tegra/fuse.h
@@ -0,0 +1,24 @@
+/*
+ * arch/arm/mach-tegra/fuse.c
+ *
+ * Copyright (C) 2010 Google, Inc.
+ *
+ * Author:
+ * Colin Cross <ccross@android.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+unsigned long long tegra_chip_uid(void);
+int tegra_sku_id(void);
+int tegra_cpu_process_id(void);
+int tegra_core_process_id(void);
+void tegra_init_fuse(void);
diff --git a/arch/arm/mach-tegra/gpio.c b/arch/arm/mach-tegra/gpio.c
index fe78fba25f3c..0775265e69f5 100644
--- a/arch/arm/mach-tegra/gpio.c
+++ b/arch/arm/mach-tegra/gpio.c
@@ -19,6 +19,7 @@
#include <linux/init.h>
#include <linux/irq.h>
+#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/gpio.h>
@@ -60,6 +61,13 @@ struct tegra_gpio_bank {
int bank;
int irq;
spinlock_t lvl_lock[4];
+#ifdef CONFIG_PM
+ u32 cnf[4];
+ u32 out[4];
+ u32 oe[4];
+ u32 int_enb[4];
+ u32 int_lvl[4];
+#endif
};
@@ -131,7 +139,7 @@ static struct gpio_chip tegra_gpio_chip = {
.direction_output = tegra_gpio_direction_output,
.set = tegra_gpio_set,
.base = 0,
- .ngpio = ARCH_NR_GPIOS,
+ .ngpio = TEGRA_NR_GPIOS,
};
static void tegra_gpio_irq_ack(unsigned int irq)
@@ -244,6 +252,76 @@ static void tegra_gpio_irq_handler(unsigned int irq, struct irq_desc *desc)
}
+#ifdef CONFIG_PM
+void tegra_gpio_resume(void)
+{
+ unsigned long flags;
+ int b, p, i;
+
+ local_irq_save(flags);
+
+ for (b = 0; b < ARRAY_SIZE(tegra_gpio_banks); b++) {
+ struct tegra_gpio_bank *bank = &tegra_gpio_banks[b];
+
+ for (p = 0; p < ARRAY_SIZE(bank->oe); p++) {
+ unsigned int gpio = (b<<5) | (p<<3);
+ __raw_writel(bank->cnf[p], GPIO_CNF(gpio));
+ __raw_writel(bank->out[p], GPIO_OUT(gpio));
+ __raw_writel(bank->oe[p], GPIO_OE(gpio));
+ __raw_writel(bank->int_lvl[p], GPIO_INT_LVL(gpio));
+ __raw_writel(bank->int_enb[p], GPIO_INT_ENB(gpio));
+ }
+ }
+
+ local_irq_restore(flags);
+
+ for (i = INT_GPIO_BASE; i < (INT_GPIO_BASE + TEGRA_NR_GPIOS); i++) {
+ struct irq_desc *desc = irq_to_desc(i);
+ if (!desc || (desc->status & IRQ_WAKEUP))
+ continue;
+ enable_irq(i);
+ }
+}
+
+void tegra_gpio_suspend(void)
+{
+ unsigned long flags;
+ int b, p, i;
+
+ for (i = INT_GPIO_BASE; i < (INT_GPIO_BASE + TEGRA_NR_GPIOS); i++) {
+ struct irq_desc *desc = irq_to_desc(i);
+ if (!desc)
+ continue;
+ if (desc->status & IRQ_WAKEUP) {
+ int gpio = i - INT_GPIO_BASE;
+ pr_debug("gpio %d.%d is wakeup\n", gpio/8, gpio&7);
+ continue;
+ }
+ disable_irq(i);
+ }
+
+ local_irq_save(flags);
+ for (b = 0; b < ARRAY_SIZE(tegra_gpio_banks); b++) {
+ struct tegra_gpio_bank *bank = &tegra_gpio_banks[b];
+
+ for (p = 0; p < ARRAY_SIZE(bank->oe); p++) {
+ unsigned int gpio = (b<<5) | (p<<3);
+ bank->cnf[p] = __raw_readl(GPIO_CNF(gpio));
+ bank->out[p] = __raw_readl(GPIO_OUT(gpio));
+ bank->oe[p] = __raw_readl(GPIO_OE(gpio));
+ bank->int_enb[p] = __raw_readl(GPIO_INT_ENB(gpio));
+ bank->int_lvl[p] = __raw_readl(GPIO_INT_LVL(gpio));
+ }
+ }
+ local_irq_restore(flags);
+}
+
+static int tegra_gpio_wake_enable(unsigned int irq, unsigned int enable)
+{
+ struct tegra_gpio_bank *bank = get_irq_chip_data(irq);
+ return set_irq_wake(bank->irq, enable);
+}
+#endif
static struct irq_chip tegra_gpio_irq_chip = {
.name = "GPIO",
@@ -251,6 +329,9 @@ static struct irq_chip tegra_gpio_irq_chip = {
.mask = tegra_gpio_irq_mask,
.unmask = tegra_gpio_irq_unmask,
.set_type = tegra_gpio_irq_set_type,
+#ifdef CONFIG_PM
+ .set_wake = tegra_gpio_wake_enable,
+#endif
};
@@ -274,7 +355,7 @@ static int __init tegra_gpio_init(void)
gpiochip_add(&tegra_gpio_chip);
- for (i = INT_GPIO_BASE; i < (INT_GPIO_BASE + ARCH_NR_GPIOS); i++) {
+ for (i = INT_GPIO_BASE; i < (INT_GPIO_BASE + TEGRA_NR_GPIOS); i++) {
bank = &tegra_gpio_banks[GPIO_BANK(irq_to_gpio(i))];
lockdep_set_class(&irq_desc[i].lock, &gpio_lock_class);
@@ -312,15 +393,16 @@ static int dbg_gpio_show(struct seq_file *s, void *unused)
for (i = 0; i < 7; i++) {
for (j = 0; j < 4; j++) {
int gpio = tegra_gpio_compose(i, j, 0);
- seq_printf(s, "%d:%d %02x %02x %02x %02x %02x %02x %06x\n",
- i, j,
- __raw_readl(GPIO_CNF(gpio)),
- __raw_readl(GPIO_OE(gpio)),
- __raw_readl(GPIO_OUT(gpio)),
- __raw_readl(GPIO_IN(gpio)),
- __raw_readl(GPIO_INT_STA(gpio)),
- __raw_readl(GPIO_INT_ENB(gpio)),
- __raw_readl(GPIO_INT_LVL(gpio)));
+ seq_printf(s,
+ "%d:%d %02x %02x %02x %02x %02x %02x %06x\n",
+ i, j,
+ __raw_readl(GPIO_CNF(gpio)),
+ __raw_readl(GPIO_OE(gpio)),
+ __raw_readl(GPIO_OUT(gpio)),
+ __raw_readl(GPIO_IN(gpio)),
+ __raw_readl(GPIO_INT_STA(gpio)),
+ __raw_readl(GPIO_INT_ENB(gpio)),
+ __raw_readl(GPIO_INT_LVL(gpio)));
}
}
return 0;
diff --git a/arch/arm/mach-tegra/include/mach/clk.h b/arch/arm/mach-tegra/include/mach/clk.h
index 2896f25ebfb5..d7723955dac7 100644
--- a/arch/arm/mach-tegra/include/mach/clk.h
+++ b/arch/arm/mach-tegra/include/mach/clk.h
@@ -23,4 +23,9 @@
void tegra_periph_reset_deassert(struct clk *c);
void tegra_periph_reset_assert(struct clk *c);
+int clk_enable_cansleep(struct clk *clk);
+void clk_disable_cansleep(struct clk *clk);
+int clk_set_rate_cansleep(struct clk *clk, unsigned long rate);
+int clk_set_parent_cansleep(struct clk *clk, struct clk *parent);
+
#endif
diff --git a/arch/arm/mach-tegra/include/mach/debug-macro.S b/arch/arm/mach-tegra/include/mach/debug-macro.S
index 8ea3bffb4e00..a0e7c12868bd 100644
--- a/arch/arm/mach-tegra/include/mach/debug-macro.S
+++ b/arch/arm/mach-tegra/include/mach/debug-macro.S
@@ -21,8 +21,8 @@
#include <mach/io.h>
.macro addruart, rp, rv
- ldreq \rp, =IO_APB_PHYS @ physical
- ldrne \rv, =IO_APB_VIRT @ virtual
+ ldr \rp, =IO_APB_PHYS @ physical
+ ldr \rv, =IO_APB_VIRT @ virtual
#if defined(CONFIG_TEGRA_DEBUG_UART_NONE)
#error "A debug UART must be selected in the kernel config to use DEBUG_LL"
#elif defined(CONFIG_TEGRA_DEBUG_UARTA)
diff --git a/arch/arm/mach-tegra/include/mach/dma.h b/arch/arm/mach-tegra/include/mach/dma.h
new file mode 100644
index 000000000000..39011bd9a925
--- /dev/null
+++ b/arch/arm/mach-tegra/include/mach/dma.h
@@ -0,0 +1,155 @@
+/*
+ * arch/arm/mach-tegra/include/mach/dma.h
+ *
+ * Copyright (c) 2008-2009, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __MACH_TEGRA_DMA_H
+#define __MACH_TEGRA_DMA_H
+
+#include <linux/list.h>
+
+#if defined(CONFIG_TEGRA_SYSTEM_DMA)
+
+struct tegra_dma_req;
+struct tegra_dma_channel;
+
+#define TEGRA_DMA_REQ_SEL_CNTR 0
+#define TEGRA_DMA_REQ_SEL_I2S_2 1
+#define TEGRA_DMA_REQ_SEL_I2S_1 2
+#define TEGRA_DMA_REQ_SEL_SPD_I 3
+#define TEGRA_DMA_REQ_SEL_UI_I 4
+#define TEGRA_DMA_REQ_SEL_MIPI 5
+#define TEGRA_DMA_REQ_SEL_I2S2_2 6
+#define TEGRA_DMA_REQ_SEL_I2S2_1 7
+#define TEGRA_DMA_REQ_SEL_UARTA 8
+#define TEGRA_DMA_REQ_SEL_UARTB 9
+#define TEGRA_DMA_REQ_SEL_UARTC 10
+#define TEGRA_DMA_REQ_SEL_SPI 11
+#define TEGRA_DMA_REQ_SEL_AC97 12
+#define TEGRA_DMA_REQ_SEL_ACMODEM 13
+#define TEGRA_DMA_REQ_SEL_SL4B 14
+#define TEGRA_DMA_REQ_SEL_SL2B1 15
+#define TEGRA_DMA_REQ_SEL_SL2B2 16
+#define TEGRA_DMA_REQ_SEL_SL2B3 17
+#define TEGRA_DMA_REQ_SEL_SL2B4 18
+#define TEGRA_DMA_REQ_SEL_UARTD 19
+#define TEGRA_DMA_REQ_SEL_UARTE 20
+#define TEGRA_DMA_REQ_SEL_I2C 21
+#define TEGRA_DMA_REQ_SEL_I2C2 22
+#define TEGRA_DMA_REQ_SEL_I2C3 23
+#define TEGRA_DMA_REQ_SEL_DVC_I2C 24
+#define TEGRA_DMA_REQ_SEL_OWR 25
+#define TEGRA_DMA_REQ_SEL_INVALID 31
+
+enum tegra_dma_mode {
+ TEGRA_DMA_SHARED = 1,
+ TEGRA_DMA_MODE_CONTINOUS = 2,
+ TEGRA_DMA_MODE_ONESHOT = 4,
+};
+
+enum tegra_dma_req_error {
+ TEGRA_DMA_REQ_SUCCESS = 0,
+ TEGRA_DMA_REQ_ERROR_ABORTED,
+ TEGRA_DMA_REQ_INFLIGHT,
+};
+
+enum tegra_dma_req_buff_status {
+ TEGRA_DMA_REQ_BUF_STATUS_EMPTY = 0,
+ TEGRA_DMA_REQ_BUF_STATUS_HALF_FULL,
+ TEGRA_DMA_REQ_BUF_STATUS_FULL,
+};
+
+struct tegra_dma_req {
+ struct list_head node;
+ unsigned int modid;
+ int instance;
+
+ /* Called when the req is complete and from the DMA ISR context.
+ * When this is called the req structure is no longer queued by
+ * the DMA channel.
+ *
+ * State of the DMA depends on the number of req it has. If there are
+ * no DMA requests queued up, then it will STOP the DMA. It there are
+ * more requests in the DMA, then it will queue the next request.
+ */
+ void (*complete)(struct tegra_dma_req *req);
+
+ /* This is a called from the DMA ISR context when the DMA is still in
+ * progress and is actively filling same buffer.
+ *
+ * In case of continous mode receive, this threshold is 1/2 the buffer
+ * size. In other cases, this will not even be called as there is no
+ * hardware support for it.
+ *
+ * In the case of continous mode receive, if there is next req already
+ * queued, DMA programs the HW to use that req when this req is
+ * completed. If there is no "next req" queued, then DMA ISR doesn't do
+ * anything before calling this callback.
+ *
+ * This is mainly used by the cases, where the clients has queued
+ * only one req and want to get some sort of DMA threshold
+ * callback to program the next buffer.
+ *
+ */
+ void (*threshold)(struct tegra_dma_req *req);
+
+ /* 1 to copy to memory.
+ * 0 to copy from the memory to device FIFO */
+ int to_memory;
+
+ void *virt_addr;
+
+ unsigned long source_addr;
+ unsigned long dest_addr;
+ unsigned long dest_wrap;
+ unsigned long source_wrap;
+ unsigned long source_bus_width;
+ unsigned long dest_bus_width;
+ unsigned long req_sel;
+ unsigned int size;
+
+ /* Updated by the DMA driver on the conpletion of the request. */
+ int bytes_transferred;
+ int status;
+
+ /* DMA completion tracking information */
+ int buffer_status;
+
+ /* Client specific data */
+ void *dev;
+};
+
+int tegra_dma_enqueue_req(struct tegra_dma_channel *ch,
+ struct tegra_dma_req *req);
+int tegra_dma_dequeue_req(struct tegra_dma_channel *ch,
+ struct tegra_dma_req *req);
+void tegra_dma_dequeue(struct tegra_dma_channel *ch);
+void tegra_dma_flush(struct tegra_dma_channel *ch);
+
+bool tegra_dma_is_req_inflight(struct tegra_dma_channel *ch,
+ struct tegra_dma_req *req);
+bool tegra_dma_is_empty(struct tegra_dma_channel *ch);
+
+struct tegra_dma_channel *tegra_dma_allocate_channel(int mode);
+void tegra_dma_free_channel(struct tegra_dma_channel *ch);
+
+int __init tegra_dma_init(void);
+
+#endif
+
+#endif
diff --git a/arch/arm/mach-tegra/include/mach/gpio.h b/arch/arm/mach-tegra/include/mach/gpio.h
index 540e822e50f7..e31f486d69a2 100644
--- a/arch/arm/mach-tegra/include/mach/gpio.h
+++ b/arch/arm/mach-tegra/include/mach/gpio.h
@@ -22,7 +22,7 @@
#include <mach/irqs.h>
-#define ARCH_NR_GPIOS INT_GPIO_NR
+#define TEGRA_NR_GPIOS INT_GPIO_NR
#include <asm-generic/gpio.h>
@@ -35,7 +35,7 @@
static inline int gpio_to_irq(unsigned int gpio)
{
- if (gpio < ARCH_NR_GPIOS)
+ if (gpio < TEGRA_NR_GPIOS)
return INT_GPIO_BASE + gpio;
return -EINVAL;
}
diff --git a/arch/arm/mach-tegra/include/mach/hardware.h b/arch/arm/mach-tegra/include/mach/hardware.h
index 6014edf60d93..56e43b3a5b97 100644
--- a/arch/arm/mach-tegra/include/mach/hardware.h
+++ b/arch/arm/mach-tegra/include/mach/hardware.h
@@ -21,4 +21,8 @@
#ifndef __MACH_TEGRA_HARDWARE_H
#define __MACH_TEGRA_HARDWARE_H
+#define PCIBIOS_MIN_IO 0x1000
+#define PCIBIOS_MIN_MEM 0
+#define pcibios_assign_all_busses() 1
+
#endif
diff --git a/arch/arm/mach-tegra/include/mach/io.h b/arch/arm/mach-tegra/include/mach/io.h
index 35edfc32ffc9..f0981b1ac59e 100644
--- a/arch/arm/mach-tegra/include/mach/io.h
+++ b/arch/arm/mach-tegra/include/mach/io.h
@@ -21,7 +21,7 @@
#ifndef __MACH_TEGRA_IO_H
#define __MACH_TEGRA_IO_H
-#define IO_SPACE_LIMIT 0xffffffff
+#define IO_SPACE_LIMIT 0xffff
/* On TEGRA, many peripherals are very closely packed in
* two 256MB io windows (that actually only use about 64KB
@@ -33,6 +33,10 @@
*
*/
+#define IO_IRAM_PHYS 0x40000000
+#define IO_IRAM_VIRT 0xFE400000
+#define IO_IRAM_SIZE SZ_256K
+
#define IO_CPU_PHYS 0x50040000
#define IO_CPU_VIRT 0xFE000000
#define IO_CPU_SIZE SZ_16K
@@ -55,6 +59,8 @@
IO_TO_VIRT_XLATE((n), IO_APB_PHYS, IO_APB_VIRT) : \
IO_TO_VIRT_BETWEEN((n), IO_CPU_PHYS, IO_CPU_SIZE) ? \
IO_TO_VIRT_XLATE((n), IO_CPU_PHYS, IO_CPU_VIRT) : \
+ IO_TO_VIRT_BETWEEN((n), IO_IRAM_PHYS, IO_IRAM_SIZE) ? \
+ IO_TO_VIRT_XLATE((n), IO_IRAM_PHYS, IO_IRAM_VIRT) : \
0)
#ifndef __ASSEMBLER__
@@ -67,10 +73,20 @@ void tegra_iounmap(volatile void __iomem *addr);
#define IO_ADDRESS(n) ((void __iomem *) IO_TO_VIRT(n))
+#ifdef CONFIG_TEGRA_PCI
+extern void __iomem *tegra_pcie_io_base;
+
+static inline void __iomem *__io(unsigned long addr)
+{
+ return tegra_pcie_io_base + (addr & IO_SPACE_LIMIT);
+}
+#else
static inline void __iomem *__io(unsigned long addr)
{
return (void __iomem *)addr;
}
+#endif
+
#define __io(a) __io(a)
#define __mem_pci(a) (a)
diff --git a/arch/arm/mach-tegra/include/mach/iomap.h b/arch/arm/mach-tegra/include/mach/iomap.h
index 1741f7dd7a9b..44a4f4bcf91f 100644
--- a/arch/arm/mach-tegra/include/mach/iomap.h
+++ b/arch/arm/mach-tegra/include/mach/iomap.h
@@ -23,9 +23,15 @@
#include <asm/sizes.h>
+#define TEGRA_IRAM_BASE 0x40000000
+#define TEGRA_IRAM_SIZE SZ_256K
+
#define TEGRA_ARM_PERIF_BASE 0x50040000
#define TEGRA_ARM_PERIF_SIZE SZ_8K
+#define TEGRA_ARM_PL310_BASE 0x50043000
+#define TEGRA_ARM_PL310_SIZE SZ_4K
+
#define TEGRA_ARM_INT_DIST_BASE 0x50041000
#define TEGRA_ARM_INT_DIST_SIZE SZ_4K
@@ -68,7 +74,22 @@
#define TEGRA_FLOW_CTRL_BASE 0x60007000
#define TEGRA_FLOW_CTRL_SIZE 20
-#define TEGRA_STATMON_BASE 0x6000C4000
+#define TEGRA_AHB_DMA_BASE 0x60008000
+#define TEGRA_AHB_DMA_SIZE SZ_4K
+
+#define TEGRA_AHB_DMA_CH0_BASE 0x60009000
+#define TEGRA_AHB_DMA_CH0_SIZE 32
+
+#define TEGRA_APB_DMA_BASE 0x6000A000
+#define TEGRA_APB_DMA_SIZE SZ_4K
+
+#define TEGRA_APB_DMA_CH0_BASE 0x6000B000
+#define TEGRA_APB_DMA_CH0_SIZE 32
+
+#define TEGRA_AHB_GIZMO_BASE 0x6000C004
+#define TEGRA_AHB_GIZMO_SIZE 0x10C
+
+#define TEGRA_STATMON_BASE 0x6000C400
#define TEGRA_STATMON_SIZE SZ_1K
#define TEGRA_GPIO_BASE 0x6000D000
@@ -137,7 +158,7 @@
#define TEGRA_I2C3_BASE 0x7000C500
#define TEGRA_I2C3_SIZE SZ_256
-#define TEGRA_OWR_BASE 0x7000D000
+#define TEGRA_OWR_BASE 0x7000C600
#define TEGRA_OWR_SIZE 80
#define TEGRA_DVC_BASE 0x7000D000
@@ -182,12 +203,12 @@
#define TEGRA_USB_BASE 0xC5000000
#define TEGRA_USB_SIZE SZ_16K
-#define TEGRA_USB1_BASE 0xC5004000
-#define TEGRA_USB1_SIZE SZ_16K
-
-#define TEGRA_USB2_BASE 0xC5008000
+#define TEGRA_USB2_BASE 0xC5004000
#define TEGRA_USB2_SIZE SZ_16K
+#define TEGRA_USB3_BASE 0xC5008000
+#define TEGRA_USB3_SIZE SZ_16K
+
#define TEGRA_SDMMC1_BASE 0xC8000000
#define TEGRA_SDMMC1_SIZE SZ_512
diff --git a/arch/arm/mach-tegra/include/mach/irqs.h b/arch/arm/mach-tegra/include/mach/irqs.h
index 20f640edaa0d..71bbf3422953 100644
--- a/arch/arm/mach-tegra/include/mach/irqs.h
+++ b/arch/arm/mach-tegra/include/mach/irqs.h
@@ -25,6 +25,7 @@
#define IRQ_LOCALTIMER 29
+#ifdef CONFIG_ARCH_TEGRA_2x_SOC
/* Primary Interrupt Controller */
#define INT_PRI_BASE (INT_GIC_BASE + 32)
#define INT_TMR1 (INT_PRI_BASE + 0)
@@ -169,5 +170,6 @@
#define INT_GPIO_NR (28 * 8)
#define NR_IRQS (INT_GPIO_BASE + INT_GPIO_NR)
+#endif
#endif
diff --git a/arch/arm/mach-tegra/include/mach/legacy_irq.h b/arch/arm/mach-tegra/include/mach/legacy_irq.h
new file mode 100644
index 000000000000..db1eb3dd04c8
--- /dev/null
+++ b/arch/arm/mach-tegra/include/mach/legacy_irq.h
@@ -0,0 +1,31 @@
+/*
+ * arch/arm/mach-tegra/include/mach/legacy_irq.h
+ *
+ * Copyright (C) 2010 Google, Inc.
+ * Author: Colin Cross <ccross@android.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _ARCH_ARM_MACH_TEGRA_LEGARY_IRQ_H
+#define _ARCH_ARM_MACH_TEGRA_LEGARY_IRQ_H
+
+void tegra_legacy_mask_irq(unsigned int irq);
+void tegra_legacy_unmask_irq(unsigned int irq);
+void tegra_legacy_select_fiq(unsigned int irq, bool fiq);
+void tegra_legacy_force_irq_set(unsigned int irq);
+void tegra_legacy_force_irq_clr(unsigned int irq);
+int tegra_legacy_force_irq_status(unsigned int irq);
+void tegra_legacy_select_fiq(unsigned int irq, bool fiq);
+unsigned long tegra_legacy_vfiq(int nr);
+unsigned long tegra_legacy_class(int nr);
+
+#endif
diff --git a/arch/arm/mach-tegra/include/mach/pinmux-t2.h b/arch/arm/mach-tegra/include/mach/pinmux-t2.h
new file mode 100644
index 000000000000..e5b9d740f973
--- /dev/null
+++ b/arch/arm/mach-tegra/include/mach/pinmux-t2.h
@@ -0,0 +1,174 @@
+/*
+ * linux/arch/arm/mach-tegra/include/mach/pinmux-t2.h
+ *
+ * Copyright (C) 2010 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __MACH_TEGRA_PINMUX_T2_H
+#define __MACH_TEGRA_PINMUX_T2_H
+
+enum tegra_pingroup {
+ TEGRA_PINGROUP_ATA = 0,
+ TEGRA_PINGROUP_ATB,
+ TEGRA_PINGROUP_ATC,
+ TEGRA_PINGROUP_ATD,
+ TEGRA_PINGROUP_ATE,
+ TEGRA_PINGROUP_CDEV1,
+ TEGRA_PINGROUP_CDEV2,
+ TEGRA_PINGROUP_CRTP,
+ TEGRA_PINGROUP_CSUS,
+ TEGRA_PINGROUP_DAP1,
+ TEGRA_PINGROUP_DAP2,
+ TEGRA_PINGROUP_DAP3,
+ TEGRA_PINGROUP_DAP4,
+ TEGRA_PINGROUP_DDC,
+ TEGRA_PINGROUP_DTA,
+ TEGRA_PINGROUP_DTB,
+ TEGRA_PINGROUP_DTC,
+ TEGRA_PINGROUP_DTD,
+ TEGRA_PINGROUP_DTE,
+ TEGRA_PINGROUP_DTF,
+ TEGRA_PINGROUP_GMA,
+ TEGRA_PINGROUP_GMB,
+ TEGRA_PINGROUP_GMC,
+ TEGRA_PINGROUP_GMD,
+ TEGRA_PINGROUP_GME,
+ TEGRA_PINGROUP_GPU,
+ TEGRA_PINGROUP_GPU7,
+ TEGRA_PINGROUP_GPV,
+ TEGRA_PINGROUP_HDINT,
+ TEGRA_PINGROUP_I2CP,
+ TEGRA_PINGROUP_IRRX,
+ TEGRA_PINGROUP_IRTX,
+ TEGRA_PINGROUP_KBCA,
+ TEGRA_PINGROUP_KBCB,
+ TEGRA_PINGROUP_KBCC,
+ TEGRA_PINGROUP_KBCD,
+ TEGRA_PINGROUP_KBCE,
+ TEGRA_PINGROUP_KBCF,
+ TEGRA_PINGROUP_LCSN,
+ TEGRA_PINGROUP_LD0,
+ TEGRA_PINGROUP_LD1,
+ TEGRA_PINGROUP_LD10,
+ TEGRA_PINGROUP_LD11,
+ TEGRA_PINGROUP_LD12,
+ TEGRA_PINGROUP_LD13,
+ TEGRA_PINGROUP_LD14,
+ TEGRA_PINGROUP_LD15,
+ TEGRA_PINGROUP_LD16,
+ TEGRA_PINGROUP_LD17,
+ TEGRA_PINGROUP_LD2,
+ TEGRA_PINGROUP_LD3,
+ TEGRA_PINGROUP_LD4,
+ TEGRA_PINGROUP_LD5,
+ TEGRA_PINGROUP_LD6,
+ TEGRA_PINGROUP_LD7,
+ TEGRA_PINGROUP_LD8,
+ TEGRA_PINGROUP_LD9,
+ TEGRA_PINGROUP_LDC,
+ TEGRA_PINGROUP_LDI,
+ TEGRA_PINGROUP_LHP0,
+ TEGRA_PINGROUP_LHP1,
+ TEGRA_PINGROUP_LHP2,
+ TEGRA_PINGROUP_LHS,
+ TEGRA_PINGROUP_LM0,
+ TEGRA_PINGROUP_LM1,
+ TEGRA_PINGROUP_LPP,
+ TEGRA_PINGROUP_LPW0,
+ TEGRA_PINGROUP_LPW1,
+ TEGRA_PINGROUP_LPW2,
+ TEGRA_PINGROUP_LSC0,
+ TEGRA_PINGROUP_LSC1,
+ TEGRA_PINGROUP_LSCK,
+ TEGRA_PINGROUP_LSDA,
+ TEGRA_PINGROUP_LSDI,
+ TEGRA_PINGROUP_LSPI,
+ TEGRA_PINGROUP_LVP0,
+ TEGRA_PINGROUP_LVP1,
+ TEGRA_PINGROUP_LVS,
+ TEGRA_PINGROUP_OWC,
+ TEGRA_PINGROUP_PMC,
+ TEGRA_PINGROUP_PTA,
+ TEGRA_PINGROUP_RM,
+ TEGRA_PINGROUP_SDB,
+ TEGRA_PINGROUP_SDC,
+ TEGRA_PINGROUP_SDD,
+ TEGRA_PINGROUP_SDIO1,
+ TEGRA_PINGROUP_SLXA,
+ TEGRA_PINGROUP_SLXC,
+ TEGRA_PINGROUP_SLXD,
+ TEGRA_PINGROUP_SLXK,
+ TEGRA_PINGROUP_SPDI,
+ TEGRA_PINGROUP_SPDO,
+ TEGRA_PINGROUP_SPIA,
+ TEGRA_PINGROUP_SPIB,
+ TEGRA_PINGROUP_SPIC,
+ TEGRA_PINGROUP_SPID,
+ TEGRA_PINGROUP_SPIE,
+ TEGRA_PINGROUP_SPIF,
+ TEGRA_PINGROUP_SPIG,
+ TEGRA_PINGROUP_SPIH,
+ TEGRA_PINGROUP_UAA,
+ TEGRA_PINGROUP_UAB,
+ TEGRA_PINGROUP_UAC,
+ TEGRA_PINGROUP_UAD,
+ TEGRA_PINGROUP_UCA,
+ TEGRA_PINGROUP_UCB,
+ TEGRA_PINGROUP_UDA,
+ /* these pin groups only have pullup and pull down control */
+ TEGRA_PINGROUP_CK32,
+ TEGRA_PINGROUP_DDRC,
+ TEGRA_PINGROUP_PMCA,
+ TEGRA_PINGROUP_PMCB,
+ TEGRA_PINGROUP_PMCC,
+ TEGRA_PINGROUP_PMCD,
+ TEGRA_PINGROUP_PMCE,
+ TEGRA_PINGROUP_XM2C,
+ TEGRA_PINGROUP_XM2D,
+ TEGRA_MAX_PINGROUP,
+};
+
+enum tegra_drive_pingroup {
+ TEGRA_DRIVE_PINGROUP_AO1 = 0,
+ TEGRA_DRIVE_PINGROUP_AO2,
+ TEGRA_DRIVE_PINGROUP_AT1,
+ TEGRA_DRIVE_PINGROUP_AT2,
+ TEGRA_DRIVE_PINGROUP_CDEV1,
+ TEGRA_DRIVE_PINGROUP_CDEV2,
+ TEGRA_DRIVE_PINGROUP_CSUS,
+ TEGRA_DRIVE_PINGROUP_DAP1,
+ TEGRA_DRIVE_PINGROUP_DAP2,
+ TEGRA_DRIVE_PINGROUP_DAP3,
+ TEGRA_DRIVE_PINGROUP_DAP4,
+ TEGRA_DRIVE_PINGROUP_DBG,
+ TEGRA_DRIVE_PINGROUP_LCD1,
+ TEGRA_DRIVE_PINGROUP_LCD2,
+ TEGRA_DRIVE_PINGROUP_SDMMC2,
+ TEGRA_DRIVE_PINGROUP_SDMMC3,
+ TEGRA_DRIVE_PINGROUP_SPI,
+ TEGRA_DRIVE_PINGROUP_UAA,
+ TEGRA_DRIVE_PINGROUP_UAB,
+ TEGRA_DRIVE_PINGROUP_UART2,
+ TEGRA_DRIVE_PINGROUP_UART3,
+ TEGRA_DRIVE_PINGROUP_VI1,
+ TEGRA_DRIVE_PINGROUP_VI2,
+ TEGRA_DRIVE_PINGROUP_XM2A,
+ TEGRA_DRIVE_PINGROUP_XM2C,
+ TEGRA_DRIVE_PINGROUP_XM2D,
+ TEGRA_DRIVE_PINGROUP_XM2CLK,
+ TEGRA_DRIVE_PINGROUP_MEMCOMP,
+ TEGRA_MAX_DRIVE_PINGROUP,
+};
+
+#endif
+
diff --git a/arch/arm/mach-tegra/include/mach/pinmux.h b/arch/arm/mach-tegra/include/mach/pinmux.h
index 41c8ce5b7c27..defd8775defa 100644
--- a/arch/arm/mach-tegra/include/mach/pinmux.h
+++ b/arch/arm/mach-tegra/include/mach/pinmux.h
@@ -17,126 +17,11 @@
#ifndef __MACH_TEGRA_PINMUX_H
#define __MACH_TEGRA_PINMUX_H
-enum tegra_pingroup {
- TEGRA_PINGROUP_ATA = 0,
- TEGRA_PINGROUP_ATB,
- TEGRA_PINGROUP_ATC,
- TEGRA_PINGROUP_ATD,
- TEGRA_PINGROUP_ATE,
- TEGRA_PINGROUP_CDEV1,
- TEGRA_PINGROUP_CDEV2,
- TEGRA_PINGROUP_CRTP,
- TEGRA_PINGROUP_CSUS,
- TEGRA_PINGROUP_DAP1,
- TEGRA_PINGROUP_DAP2,
- TEGRA_PINGROUP_DAP3,
- TEGRA_PINGROUP_DAP4,
- TEGRA_PINGROUP_DDC,
- TEGRA_PINGROUP_DTA,
- TEGRA_PINGROUP_DTB,
- TEGRA_PINGROUP_DTC,
- TEGRA_PINGROUP_DTD,
- TEGRA_PINGROUP_DTE,
- TEGRA_PINGROUP_DTF,
- TEGRA_PINGROUP_GMA,
- TEGRA_PINGROUP_GMB,
- TEGRA_PINGROUP_GMC,
- TEGRA_PINGROUP_GMD,
- TEGRA_PINGROUP_GME,
- TEGRA_PINGROUP_GPU,
- TEGRA_PINGROUP_GPU7,
- TEGRA_PINGROUP_GPV,
- TEGRA_PINGROUP_HDINT,
- TEGRA_PINGROUP_I2CP,
- TEGRA_PINGROUP_IRRX,
- TEGRA_PINGROUP_IRTX,
- TEGRA_PINGROUP_KBCA,
- TEGRA_PINGROUP_KBCB,
- TEGRA_PINGROUP_KBCC,
- TEGRA_PINGROUP_KBCD,
- TEGRA_PINGROUP_KBCE,
- TEGRA_PINGROUP_KBCF,
- TEGRA_PINGROUP_LCSN,
- TEGRA_PINGROUP_LD0,
- TEGRA_PINGROUP_LD1,
- TEGRA_PINGROUP_LD10,
- TEGRA_PINGROUP_LD11,
- TEGRA_PINGROUP_LD12,
- TEGRA_PINGROUP_LD13,
- TEGRA_PINGROUP_LD14,
- TEGRA_PINGROUP_LD15,
- TEGRA_PINGROUP_LD16,
- TEGRA_PINGROUP_LD17,
- TEGRA_PINGROUP_LD2,
- TEGRA_PINGROUP_LD3,
- TEGRA_PINGROUP_LD4,
- TEGRA_PINGROUP_LD5,
- TEGRA_PINGROUP_LD6,
- TEGRA_PINGROUP_LD7,
- TEGRA_PINGROUP_LD8,
- TEGRA_PINGROUP_LD9,
- TEGRA_PINGROUP_LDC,
- TEGRA_PINGROUP_LDI,
- TEGRA_PINGROUP_LHP0,
- TEGRA_PINGROUP_LHP1,
- TEGRA_PINGROUP_LHP2,
- TEGRA_PINGROUP_LHS,
- TEGRA_PINGROUP_LM0,
- TEGRA_PINGROUP_LM1,
- TEGRA_PINGROUP_LPP,
- TEGRA_PINGROUP_LPW0,
- TEGRA_PINGROUP_LPW1,
- TEGRA_PINGROUP_LPW2,
- TEGRA_PINGROUP_LSC0,
- TEGRA_PINGROUP_LSC1,
- TEGRA_PINGROUP_LSCK,
- TEGRA_PINGROUP_LSDA,
- TEGRA_PINGROUP_LSDI,
- TEGRA_PINGROUP_LSPI,
- TEGRA_PINGROUP_LVP0,
- TEGRA_PINGROUP_LVP1,
- TEGRA_PINGROUP_LVS,
- TEGRA_PINGROUP_OWC,
- TEGRA_PINGROUP_PMC,
- TEGRA_PINGROUP_PTA,
- TEGRA_PINGROUP_RM,
- TEGRA_PINGROUP_SDB,
- TEGRA_PINGROUP_SDC,
- TEGRA_PINGROUP_SDD,
- TEGRA_PINGROUP_SDIO1,
- TEGRA_PINGROUP_SLXA,
- TEGRA_PINGROUP_SLXC,
- TEGRA_PINGROUP_SLXD,
- TEGRA_PINGROUP_SLXK,
- TEGRA_PINGROUP_SPDI,
- TEGRA_PINGROUP_SPDO,
- TEGRA_PINGROUP_SPIA,
- TEGRA_PINGROUP_SPIB,
- TEGRA_PINGROUP_SPIC,
- TEGRA_PINGROUP_SPID,
- TEGRA_PINGROUP_SPIE,
- TEGRA_PINGROUP_SPIF,
- TEGRA_PINGROUP_SPIG,
- TEGRA_PINGROUP_SPIH,
- TEGRA_PINGROUP_UAA,
- TEGRA_PINGROUP_UAB,
- TEGRA_PINGROUP_UAC,
- TEGRA_PINGROUP_UAD,
- TEGRA_PINGROUP_UCA,
- TEGRA_PINGROUP_UCB,
- TEGRA_PINGROUP_UDA,
- /* these pin groups only have pullup and pull down control */
- TEGRA_PINGROUP_CK32,
- TEGRA_PINGROUP_DDRC,
- TEGRA_PINGROUP_PMCA,
- TEGRA_PINGROUP_PMCB,
- TEGRA_PINGROUP_PMCC,
- TEGRA_PINGROUP_PMCD,
- TEGRA_PINGROUP_PMCE,
- TEGRA_PINGROUP_XM2C,
- TEGRA_PINGROUP_XM2D,
- TEGRA_MAX_PINGROUP,
-};
+#if defined(CONFIG_ARCH_TEGRA_2x_SOC)
+#include "pinmux-t2.h"
+#else
+#error "Undefined Tegra architecture"
+#endif
enum tegra_mux_func {
TEGRA_MUX_RSVD = 0x8000,
@@ -205,6 +90,7 @@ enum tegra_mux_func {
TEGRA_MUX_VI,
TEGRA_MUX_VI_SENSOR_CLK,
TEGRA_MUX_XIO,
+ TEGRA_MUX_SAFE,
TEGRA_MAX_MUX,
};
@@ -219,6 +105,18 @@ enum tegra_tristate {
TEGRA_TRI_TRISTATE = 1,
};
+enum tegra_vddio {
+ TEGRA_VDDIO_BB = 0,
+ TEGRA_VDDIO_LCD,
+ TEGRA_VDDIO_VI,
+ TEGRA_VDDIO_UART,
+ TEGRA_VDDIO_DDR,
+ TEGRA_VDDIO_NAND,
+ TEGRA_VDDIO_SYS,
+ TEGRA_VDDIO_AUDIO,
+ TEGRA_VDDIO_SD,
+};
+
struct tegra_pingroup_config {
enum tegra_pingroup pingroup;
enum tegra_mux_func func;
@@ -270,38 +168,6 @@ enum tegra_pull_strength {
TEGRA_MAX_PULL,
};
-enum tegra_drive_pingroup {
- TEGRA_DRIVE_PINGROUP_AO1 = 0,
- TEGRA_DRIVE_PINGROUP_AO2,
- TEGRA_DRIVE_PINGROUP_AT1,
- TEGRA_DRIVE_PINGROUP_AT2,
- TEGRA_DRIVE_PINGROUP_CDEV1,
- TEGRA_DRIVE_PINGROUP_CDEV2,
- TEGRA_DRIVE_PINGROUP_CSUS,
- TEGRA_DRIVE_PINGROUP_DAP1,
- TEGRA_DRIVE_PINGROUP_DAP2,
- TEGRA_DRIVE_PINGROUP_DAP3,
- TEGRA_DRIVE_PINGROUP_DAP4,
- TEGRA_DRIVE_PINGROUP_DBG,
- TEGRA_DRIVE_PINGROUP_LCD1,
- TEGRA_DRIVE_PINGROUP_LCD2,
- TEGRA_DRIVE_PINGROUP_SDMMC2,
- TEGRA_DRIVE_PINGROUP_SDMMC3,
- TEGRA_DRIVE_PINGROUP_SPI,
- TEGRA_DRIVE_PINGROUP_UAA,
- TEGRA_DRIVE_PINGROUP_UAB,
- TEGRA_DRIVE_PINGROUP_UART2,
- TEGRA_DRIVE_PINGROUP_UART3,
- TEGRA_DRIVE_PINGROUP_VI1,
- TEGRA_DRIVE_PINGROUP_VI2,
- TEGRA_DRIVE_PINGROUP_XM2A,
- TEGRA_DRIVE_PINGROUP_XM2C,
- TEGRA_DRIVE_PINGROUP_XM2D,
- TEGRA_DRIVE_PINGROUP_XM2CLK,
- TEGRA_DRIVE_PINGROUP_MEMCOMP,
- TEGRA_MAX_DRIVE_PINGROUP,
-};
-
enum tegra_drive {
TEGRA_DRIVE_DIV_8 = 0,
TEGRA_DRIVE_DIV_4,
@@ -331,18 +197,44 @@ struct tegra_drive_pingroup_config {
enum tegra_slew slew_falling;
};
-int tegra_pinmux_set_func(enum tegra_pingroup pg, enum tegra_mux_func func);
-int tegra_pinmux_set_tristate(enum tegra_pingroup pg, enum tegra_tristate tristate);
-int tegra_pinmux_set_pullupdown(enum tegra_pingroup pg, enum tegra_pullupdown pupd);
+struct tegra_drive_pingroup_desc {
+ const char *name;
+ s16 reg;
+};
+
+struct tegra_pingroup_desc {
+ const char *name;
+ int funcs[4];
+ int func_safe;
+ int vddio;
+ s16 tri_reg; /* offset into the TRISTATE_REG_* register bank */
+ s16 mux_reg; /* offset into the PIN_MUX_CTL_* register bank */
+ s16 pupd_reg; /* offset into the PULL_UPDOWN_REG_* register bank */
+ s8 tri_bit; /* offset into the TRISTATE_REG_* register bit */
+ s8 mux_bit; /* offset into the PIN_MUX_CTL_* register bit */
+ s8 pupd_bit; /* offset into the PULL_UPDOWN_REG_* register bit */
+};
+
+extern const struct tegra_pingroup_desc tegra_soc_pingroups[];
+extern const struct tegra_drive_pingroup_desc tegra_soc_drive_pingroups[];
-void tegra_pinmux_config_pingroup(enum tegra_pingroup pingroup,
- enum tegra_mux_func func, enum tegra_pullupdown pupd,
+int tegra_pinmux_set_tristate(enum tegra_pingroup pg,
enum tegra_tristate tristate);
+int tegra_pinmux_set_pullupdown(enum tegra_pingroup pg,
+ enum tegra_pullupdown pupd);
-void tegra_pinmux_config_table(struct tegra_pingroup_config *config, int len);
+void tegra_pinmux_config_table(const struct tegra_pingroup_config *config,
+ int len);
void tegra_drive_pinmux_config_table(struct tegra_drive_pingroup_config *config,
int len);
-
+void tegra_pinmux_set_safe_pinmux_table(const struct tegra_pingroup_config *config,
+ int len);
+void tegra_pinmux_config_pinmux_table(const struct tegra_pingroup_config *config,
+ int len);
+void tegra_pinmux_config_tristate_table(const struct tegra_pingroup_config *config,
+ int len, enum tegra_tristate tristate);
+void tegra_pinmux_config_pullupdown_table(const struct tegra_pingroup_config *config,
+ int len, enum tegra_pullupdown pupd);
#endif
diff --git a/arch/arm/mach-tegra/io.c b/arch/arm/mach-tegra/io.c
index 9fe2c5c683d4..31848a9592f8 100644
--- a/arch/arm/mach-tegra/io.c
+++ b/arch/arm/mach-tegra/io.c
@@ -49,6 +49,12 @@ static struct map_desc tegra_io_desc[] __initdata = {
.length = IO_CPU_SIZE,
.type = MT_DEVICE,
},
+ {
+ .virtual = IO_IRAM_VIRT,
+ .pfn = __phys_to_pfn(IO_IRAM_PHYS),
+ .length = IO_IRAM_SIZE,
+ .type = MT_DEVICE,
+ },
};
void __init tegra_map_common_io(void)
diff --git a/arch/arm/mach-tegra/irq.c b/arch/arm/mach-tegra/irq.c
index 1fdbe708d43d..50a8dfb9a0cf 100644
--- a/arch/arm/mach-tegra/irq.c
+++ b/arch/arm/mach-tegra/irq.c
@@ -4,6 +4,8 @@
* Author:
* Colin Cross <ccross@google.com>
*
+ * Copyright (C) 2010, NVIDIA Corporation
+ *
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
* may be copied, distributed, and modified under those terms.
@@ -27,8 +29,143 @@
#include "board.h"
+#define INT_SYS_NR (INT_GPIO_BASE - INT_PRI_BASE)
+#define INT_SYS_SZ (INT_SEC_BASE - INT_PRI_BASE)
+#define PPI_NR ((INT_SYS_NR+INT_SYS_SZ-1)/INT_SYS_SZ)
+
+#define APBDMA_IRQ_STA_CPU 0x14
+#define APBDMA_IRQ_MASK_SET 0x20
+#define APBDMA_IRQ_MASK_CLR 0x24
+
+#define ICTLR_CPU_IER 0x20
+#define ICTLR_CPU_IER_SET 0x24
+#define ICTLR_CPU_IER_CLR 0x28
+#define ICTLR_CPU_IEP_CLASS 0x2c
+#define ICTLR_COP_IER 0x30
+#define ICTLR_COP_IER_SET 0x34
+#define ICTLR_COP_IER_CLR 0x38
+#define ICTLR_COP_IEP_CLASS 0x3c
+
+static void (*gic_mask_irq)(unsigned int irq);
+static void (*gic_unmask_irq)(unsigned int irq);
+
+#define irq_to_ictlr(irq) (((irq)-32) >> 5)
+static void __iomem *tegra_ictlr_base = IO_ADDRESS(TEGRA_PRIMARY_ICTLR_BASE);
+#define ictlr_to_virt(ictlr) (tegra_ictlr_base + (ictlr)*0x100)
+
+static void tegra_mask(unsigned int irq)
+{
+ void __iomem *addr = ictlr_to_virt(irq_to_ictlr(irq));
+ gic_mask_irq(irq);
+ writel(1<<(irq&31), addr+ICTLR_CPU_IER_CLR);
+}
+
+static void tegra_unmask(unsigned int irq)
+{
+ void __iomem *addr = ictlr_to_virt(irq_to_ictlr(irq));
+ gic_unmask_irq(irq);
+ writel(1<<(irq&31), addr+ICTLR_CPU_IER_SET);
+}
+
+#ifdef CONFIG_PM
+
+static int tegra_set_wake(unsigned int irq, unsigned int on)
+{
+ return 0;
+}
+#endif
+
+static struct irq_chip tegra_irq = {
+ .name = "PPI",
+ .mask = tegra_mask,
+ .unmask = tegra_unmask,
+#ifdef CONFIG_PM
+ .set_wake = tegra_set_wake,
+#endif
+};
+
void __init tegra_init_irq(void)
{
+ struct irq_chip *gic;
+ unsigned int i;
+
+ for (i = 0; i < PPI_NR; i++) {
+ writel(~0, ictlr_to_virt(i) + ICTLR_CPU_IER_CLR);
+ writel(0, ictlr_to_virt(i) + ICTLR_CPU_IEP_CLASS);
+ }
+
gic_dist_init(0, IO_ADDRESS(TEGRA_ARM_INT_DIST_BASE), 29);
gic_cpu_init(0, IO_ADDRESS(TEGRA_ARM_PERIF_BASE + 0x100));
+
+ gic = get_irq_chip(29);
+ gic_unmask_irq = gic->unmask;
+ gic_mask_irq = gic->mask;
+ tegra_irq.ack = gic->ack;
+#ifdef CONFIG_SMP
+ tegra_irq.set_affinity = gic->set_affinity;
+#endif
+
+ for (i = INT_PRI_BASE; i < INT_GPIO_BASE; i++) {
+ set_irq_chip(i, &tegra_irq);
+ set_irq_handler(i, handle_level_irq);
+ set_irq_flags(i, IRQF_VALID);
+ }
+}
+
+#ifdef CONFIG_PM
+static u32 cop_ier[PPI_NR];
+static u32 cpu_ier[PPI_NR];
+static u32 cpu_iep[PPI_NR];
+
+void tegra_irq_suspend(void)
+{
+ unsigned long flags;
+ int i;
+
+ for (i = INT_PRI_BASE; i < INT_GPIO_BASE; i++) {
+ struct irq_desc *desc = irq_to_desc(i);
+ if (!desc)
+ continue;
+ if (desc->status & IRQ_WAKEUP) {
+ pr_debug("irq %d is wakeup\n", i);
+ continue;
+ }
+ disable_irq(i);
+ }
+
+ local_irq_save(flags);
+ for (i = 0; i < PPI_NR; i++) {
+ void __iomem *ictlr = ictlr_to_virt(i);
+ cpu_ier[i] = readl(ictlr + ICTLR_CPU_IER);
+ cpu_iep[i] = readl(ictlr + ICTLR_CPU_IEP_CLASS);
+ cop_ier[i] = readl(ictlr + ICTLR_COP_IER);
+ writel(~0, ictlr + ICTLR_COP_IER_CLR);
+ }
+ local_irq_restore(flags);
+}
+
+void tegra_irq_resume(void)
+{
+ unsigned long flags;
+ int i;
+
+ local_irq_save(flags);
+ for (i = 0; i < PPI_NR; i++) {
+ void __iomem *ictlr = ictlr_to_virt(i);
+ writel(cpu_iep[i], ictlr + ICTLR_CPU_IEP_CLASS);
+ writel(~0ul, ictlr + ICTLR_CPU_IER_CLR);
+ writel(cpu_ier[i], ictlr + ICTLR_CPU_IER_SET);
+ writel(0, ictlr + ICTLR_COP_IEP_CLASS);
+ writel(~0ul, ictlr + ICTLR_COP_IER_CLR);
+ writel(cop_ier[i], ictlr + ICTLR_COP_IER_SET);
+ }
+ local_irq_restore(flags);
+
+ for (i = INT_PRI_BASE; i < INT_GPIO_BASE; i++) {
+ struct irq_desc *desc = irq_to_desc(i);
+ if (!desc || (desc->status & IRQ_WAKEUP))
+ continue;
+ enable_irq(i);
+ }
}
+#endif
diff --git a/arch/arm/mach-tegra/legacy_irq.c b/arch/arm/mach-tegra/legacy_irq.c
new file mode 100644
index 000000000000..7cc8601c19ff
--- /dev/null
+++ b/arch/arm/mach-tegra/legacy_irq.c
@@ -0,0 +1,114 @@
+/*
+ * arch/arm/mach-tegra/legacy_irq.c
+ *
+ * Copyright (C) 2010 Google, Inc.
+ * Author: Colin Cross <ccross@android.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <mach/iomap.h>
+#include <mach/legacy_irq.h>
+
+#define ICTLR_CPU_IER 0x20
+#define ICTLR_CPU_IER_SET 0x24
+#define ICTLR_CPU_IER_CLR 0x28
+#define ICTLR_CPU_IEP_CLASS 0x2C
+#define ICTLR_CPU_IEP_VFIQ 0x08
+#define ICTLR_CPU_IEP_FIR 0x14
+#define ICTLR_CPU_IEP_FIR_SET 0x18
+#define ICTLR_CPU_IEP_FIR_CLR 0x1c
+
+static void __iomem *ictlr_reg_base[] = {
+ IO_ADDRESS(TEGRA_PRIMARY_ICTLR_BASE),
+ IO_ADDRESS(TEGRA_SECONDARY_ICTLR_BASE),
+ IO_ADDRESS(TEGRA_TERTIARY_ICTLR_BASE),
+ IO_ADDRESS(TEGRA_QUATERNARY_ICTLR_BASE),
+};
+
+/* When going into deep sleep, the CPU is powered down, taking the GIC with it
+ In order to wake, the wake interrupts need to be enabled in the legacy
+ interrupt controller. */
+void tegra_legacy_unmask_irq(unsigned int irq)
+{
+ void __iomem *base;
+ pr_debug("%s: %d\n", __func__, irq);
+
+ irq -= 32;
+ base = ictlr_reg_base[irq>>5];
+ writel(1 << (irq & 31), base + ICTLR_CPU_IER_SET);
+}
+
+void tegra_legacy_mask_irq(unsigned int irq)
+{
+ void __iomem *base;
+ pr_debug("%s: %d\n", __func__, irq);
+
+ irq -= 32;
+ base = ictlr_reg_base[irq>>5];
+ writel(1 << (irq & 31), base + ICTLR_CPU_IER_CLR);
+}
+
+void tegra_legacy_force_irq_set(unsigned int irq)
+{
+ void __iomem *base;
+ pr_debug("%s: %d\n", __func__, irq);
+
+ irq -= 32;
+ base = ictlr_reg_base[irq>>5];
+ writel(1 << (irq & 31), base + ICTLR_CPU_IEP_FIR_SET);
+}
+
+void tegra_legacy_force_irq_clr(unsigned int irq)
+{
+ void __iomem *base;
+ pr_debug("%s: %d\n", __func__, irq);
+
+ irq -= 32;
+ base = ictlr_reg_base[irq>>5];
+ writel(1 << (irq & 31), base + ICTLR_CPU_IEP_FIR_CLR);
+}
+
+int tegra_legacy_force_irq_status(unsigned int irq)
+{
+ void __iomem *base;
+ pr_debug("%s: %d\n", __func__, irq);
+
+ irq -= 32;
+ base = ictlr_reg_base[irq>>5];
+ return !!(readl(base + ICTLR_CPU_IEP_FIR) & (1 << (irq & 31)));
+}
+
+void tegra_legacy_select_fiq(unsigned int irq, bool fiq)
+{
+ void __iomem *base;
+ pr_debug("%s: %d\n", __func__, irq);
+
+ irq -= 32;
+ base = ictlr_reg_base[irq>>5];
+ writel(fiq << (irq & 31), base + ICTLR_CPU_IEP_CLASS);
+}
+
+unsigned long tegra_legacy_vfiq(int nr)
+{
+ void __iomem *base;
+ base = ictlr_reg_base[nr];
+ return readl(base + ICTLR_CPU_IEP_VFIQ);
+}
+
+unsigned long tegra_legacy_class(int nr)
+{
+ void __iomem *base;
+ base = ictlr_reg_base[nr];
+ return readl(base + ICTLR_CPU_IEP_CLASS);
+}
diff --git a/arch/arm/mach-tegra/pcie.c b/arch/arm/mach-tegra/pcie.c
new file mode 100644
index 000000000000..53f5fa37014a
--- /dev/null
+++ b/arch/arm/mach-tegra/pcie.c
@@ -0,0 +1,915 @@
+/*
+ * arch/arm/mach-tegra/pci.c
+ *
+ * PCIe host controller driver for TEGRA(2) SOCs
+ *
+ * Copyright (c) 2010, CompuLab, Ltd.
+ * Author: Mike Rapoport <mike@compulab.co.il>
+ *
+ * Based on NVIDIA PCIe driver
+ * Copyright (c) 2008-2009, NVIDIA Corporation.
+ *
+ * Bits taken from arch/arm/mach-dove/pcie.c
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include <linux/kernel.h>
+#include <linux/pci.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/clk.h>
+#include <linux/delay.h>
+
+#include <asm/sizes.h>
+#include <asm/mach/pci.h>
+
+#include <mach/pinmux.h>
+#include <mach/iomap.h>
+#include <mach/clk.h>
+
+/* register definitions */
+#define AFI_OFFSET 0x3800
+#define PADS_OFFSET 0x3000
+#define RP0_OFFSET 0x0000
+#define RP1_OFFSET 0x1000
+
+#define AFI_AXI_BAR0_SZ 0x00
+#define AFI_AXI_BAR1_SZ 0x04
+#define AFI_AXI_BAR2_SZ 0x08
+#define AFI_AXI_BAR3_SZ 0x0c
+#define AFI_AXI_BAR4_SZ 0x10
+#define AFI_AXI_BAR5_SZ 0x14
+
+#define AFI_AXI_BAR0_START 0x18
+#define AFI_AXI_BAR1_START 0x1c
+#define AFI_AXI_BAR2_START 0x20
+#define AFI_AXI_BAR3_START 0x24
+#define AFI_AXI_BAR4_START 0x28
+#define AFI_AXI_BAR5_START 0x2c
+
+#define AFI_FPCI_BAR0 0x30
+#define AFI_FPCI_BAR1 0x34
+#define AFI_FPCI_BAR2 0x38
+#define AFI_FPCI_BAR3 0x3c
+#define AFI_FPCI_BAR4 0x40
+#define AFI_FPCI_BAR5 0x44
+
+#define AFI_CACHE_BAR0_SZ 0x48
+#define AFI_CACHE_BAR0_ST 0x4c
+#define AFI_CACHE_BAR1_SZ 0x50
+#define AFI_CACHE_BAR1_ST 0x54
+
+#define AFI_MSI_BAR_SZ 0x60
+#define AFI_MSI_FPCI_BAR_ST 0x64
+#define AFI_MSI_AXI_BAR_ST 0x68
+
+#define AFI_CONFIGURATION 0xac
+#define AFI_CONFIGURATION_EN_FPCI (1 << 0)
+
+#define AFI_FPCI_ERROR_MASKS 0xb0
+
+#define AFI_INTR_MASK 0xb4
+#define AFI_INTR_MASK_INT_MASK (1 << 0)
+#define AFI_INTR_MASK_MSI_MASK (1 << 8)
+
+#define AFI_INTR_CODE 0xb8
+#define AFI_INTR_CODE_MASK 0xf
+#define AFI_INTR_MASTER_ABORT 4
+#define AFI_INTR_LEGACY 6
+
+#define AFI_INTR_SIGNATURE 0xbc
+#define AFI_SM_INTR_ENABLE 0xc4
+
+#define AFI_AFI_INTR_ENABLE 0xc8
+#define AFI_INTR_EN_INI_SLVERR (1 << 0)
+#define AFI_INTR_EN_INI_DECERR (1 << 1)
+#define AFI_INTR_EN_TGT_SLVERR (1 << 2)
+#define AFI_INTR_EN_TGT_DECERR (1 << 3)
+#define AFI_INTR_EN_TGT_WRERR (1 << 4)
+#define AFI_INTR_EN_DFPCI_DECERR (1 << 5)
+#define AFI_INTR_EN_AXI_DECERR (1 << 6)
+#define AFI_INTR_EN_FPCI_TIMEOUT (1 << 7)
+
+#define AFI_PCIE_CONFIG 0x0f8
+#define AFI_PCIE_CONFIG_PCIEC0_DISABLE_DEVICE (1 << 1)
+#define AFI_PCIE_CONFIG_PCIEC1_DISABLE_DEVICE (1 << 2)
+#define AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_MASK (0xf << 20)
+#define AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_SINGLE (0x0 << 20)
+#define AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_DUAL (0x1 << 20)
+
+#define AFI_FUSE 0x104
+#define AFI_FUSE_PCIE_T0_GEN2_DIS (1 << 2)
+
+#define AFI_PEX0_CTRL 0x110
+#define AFI_PEX1_CTRL 0x118
+#define AFI_PEX_CTRL_RST (1 << 0)
+#define AFI_PEX_CTRL_REFCLK_EN (1 << 3)
+
+#define RP_VEND_XP 0x00000F00
+#define RP_VEND_XP_DL_UP (1 << 30)
+
+#define RP_LINK_CONTROL_STATUS 0x00000090
+#define RP_LINK_CONTROL_STATUS_LINKSTAT_MASK 0x3fff0000
+
+#define PADS_CTL_SEL 0x0000009C
+
+#define PADS_CTL 0x000000A0
+#define PADS_CTL_IDDQ_1L (1 << 0)
+#define PADS_CTL_TX_DATA_EN_1L (1 << 6)
+#define PADS_CTL_RX_DATA_EN_1L (1 << 10)
+
+#define PADS_PLL_CTL 0x000000B8
+#define PADS_PLL_CTL_RST_B4SM (1 << 1)
+#define PADS_PLL_CTL_LOCKDET (1 << 8)
+#define PADS_PLL_CTL_REFCLK_MASK (0x3 << 16)
+#define PADS_PLL_CTL_REFCLK_INTERNAL_CML (0 << 16)
+#define PADS_PLL_CTL_REFCLK_INTERNAL_CMOS (1 << 16)
+#define PADS_PLL_CTL_REFCLK_EXTERNAL (2 << 16)
+#define PADS_PLL_CTL_TXCLKREF_MASK (0x1 << 20)
+#define PADS_PLL_CTL_TXCLKREF_DIV10 (0 << 20)
+#define PADS_PLL_CTL_TXCLKREF_DIV5 (1 << 20)
+
+/* PMC access is required for PCIE xclk (un)clamping */
+#define PMC_SCRATCH42 0x144
+#define PMC_SCRATCH42_PCX_CLAMP (1 << 0)
+
+static void __iomem *reg_pmc_base = IO_ADDRESS(TEGRA_PMC_BASE);
+
+#define pmc_writel(value, reg) \
+ __raw_writel(value, (u32)reg_pmc_base + (reg))
+#define pmc_readl(reg) \
+ __raw_readl((u32)reg_pmc_base + (reg))
+
+/*
+ * Tegra2 defines 1GB in the AXI address map for PCIe.
+ *
+ * That address space is split into different regions, with sizes and
+ * offsets as follows:
+ *
+ * 0x80000000 - 0x80003fff - PCI controller registers
+ * 0x80004000 - 0x80103fff - PCI configuration space
+ * 0x80104000 - 0x80203fff - PCI extended configuration space
+ * 0x80203fff - 0x803fffff - unused
+ * 0x80400000 - 0x8040ffff - downstream IO
+ * 0x80410000 - 0x8fffffff - unused
+ * 0x90000000 - 0x9fffffff - non-prefetchable memory
+ * 0xa0000000 - 0xbfffffff - prefetchable memory
+ */
+#define TEGRA_PCIE_BASE 0x80000000
+
+#define PCIE_REGS_SZ SZ_16K
+#define PCIE_CFG_OFF PCIE_REGS_SZ
+#define PCIE_CFG_SZ SZ_1M
+#define PCIE_EXT_CFG_OFF (PCIE_CFG_SZ + PCIE_CFG_OFF)
+#define PCIE_EXT_CFG_SZ SZ_1M
+#define PCIE_IOMAP_SZ (PCIE_REGS_SZ + PCIE_CFG_SZ + PCIE_EXT_CFG_SZ)
+
+#define MMIO_BASE (TEGRA_PCIE_BASE + SZ_4M)
+#define MMIO_SIZE SZ_64K
+#define MEM_BASE_0 (TEGRA_PCIE_BASE + SZ_256M)
+#define MEM_SIZE_0 SZ_128M
+#define MEM_BASE_1 (MEM_BASE_0 + MEM_SIZE_0)
+#define MEM_SIZE_1 SZ_128M
+#define PREFETCH_MEM_BASE_0 (MEM_BASE_1 + MEM_SIZE_1)
+#define PREFETCH_MEM_SIZE_0 SZ_128M
+#define PREFETCH_MEM_BASE_1 (PREFETCH_MEM_BASE_0 + PREFETCH_MEM_SIZE_0)
+#define PREFETCH_MEM_SIZE_1 SZ_128M
+
+#define PCIE_CONF_BUS(b) ((b) << 16)
+#define PCIE_CONF_DEV(d) ((d) << 11)
+#define PCIE_CONF_FUNC(f) ((f) << 8)
+#define PCIE_CONF_REG(r) \
+ (((r) & ~0x3) | (((r) < 256) ? PCIE_CFG_OFF : PCIE_EXT_CFG_OFF))
+
+struct tegra_pcie_port {
+ int index;
+ u8 root_bus_nr;
+ void __iomem *base;
+
+ bool link_up;
+
+ char io_space_name[16];
+ char mem_space_name[16];
+ char prefetch_space_name[20];
+ struct resource res[3];
+};
+
+struct tegra_pcie_info {
+ struct tegra_pcie_port port[2];
+ int num_ports;
+
+ void __iomem *regs;
+ struct resource res_mmio;
+
+ struct clk *pex_clk;
+ struct clk *afi_clk;
+ struct clk *pcie_xclk;
+ struct clk *pll_e;
+};
+
+static struct tegra_pcie_info tegra_pcie = {
+ .res_mmio = {
+ .name = "PCI IO",
+ .start = MMIO_BASE,
+ .end = MMIO_BASE + MMIO_SIZE - 1,
+ .flags = IORESOURCE_MEM,
+ },
+};
+
+void __iomem *tegra_pcie_io_base;
+EXPORT_SYMBOL(tegra_pcie_io_base);
+
+static inline void afi_writel(u32 value, unsigned long offset)
+{
+ writel(value, offset + AFI_OFFSET + tegra_pcie.regs);
+}
+
+static inline u32 afi_readl(unsigned long offset)
+{
+ return readl(offset + AFI_OFFSET + tegra_pcie.regs);
+}
+
+static inline void pads_writel(u32 value, unsigned long offset)
+{
+ writel(value, offset + PADS_OFFSET + tegra_pcie.regs);
+}
+
+static inline u32 pads_readl(unsigned long offset)
+{
+ return readl(offset + PADS_OFFSET + tegra_pcie.regs);
+}
+
+static struct tegra_pcie_port *bus_to_port(int bus)
+{
+ int i;
+
+ for (i = tegra_pcie.num_ports - 1; i >= 0; i--) {
+ int rbus = tegra_pcie.port[i].root_bus_nr;
+ if (rbus != -1 && rbus == bus)
+ break;
+ }
+
+ return i >= 0 ? tegra_pcie.port + i : NULL;
+}
+
+static int tegra_pcie_read_conf(struct pci_bus *bus, unsigned int devfn,
+ int where, int size, u32 *val)
+{
+ struct tegra_pcie_port *pp = bus_to_port(bus->number);
+ void __iomem *addr;
+
+ if (pp) {
+ if (devfn != 0) {
+ *val = 0xffffffff;
+ return PCIBIOS_DEVICE_NOT_FOUND;
+ }
+
+ addr = pp->base + (where & ~0x3);
+ } else {
+ addr = tegra_pcie.regs + (PCIE_CONF_BUS(bus->number) +
+ PCIE_CONF_DEV(PCI_SLOT(devfn)) +
+ PCIE_CONF_FUNC(PCI_FUNC(devfn)) +
+ PCIE_CONF_REG(where));
+ }
+
+ *val = readl(addr);
+
+ if (size == 1)
+ *val = (*val >> (8 * (where & 3))) & 0xff;
+ else if (size == 2)
+ *val = (*val >> (8 * (where & 3))) & 0xffff;
+
+ return PCIBIOS_SUCCESSFUL;
+}
+
+static int tegra_pcie_write_conf(struct pci_bus *bus, unsigned int devfn,
+ int where, int size, u32 val)
+{
+ struct tegra_pcie_port *pp = bus_to_port(bus->number);
+ void __iomem *addr;
+
+ u32 mask;
+ u32 tmp;
+
+ if (pp) {
+ if (devfn != 0)
+ return PCIBIOS_DEVICE_NOT_FOUND;
+
+ addr = pp->base + (where & ~0x3);
+ } else {
+ addr = tegra_pcie.regs + (PCIE_CONF_BUS(bus->number) +
+ PCIE_CONF_DEV(PCI_SLOT(devfn)) +
+ PCIE_CONF_FUNC(PCI_FUNC(devfn)) +
+ PCIE_CONF_REG(where));
+ }
+
+ if (size == 4) {
+ writel(val, addr);
+ return PCIBIOS_SUCCESSFUL;
+ }
+
+ if (size == 2)
+ mask = ~(0xffff << ((where & 0x3) * 8));
+ else if (size == 1)
+ mask = ~(0xff << ((where & 0x3) * 8));
+ else
+ return PCIBIOS_BAD_REGISTER_NUMBER;
+
+ tmp = readl(addr) & mask;
+ tmp |= val << ((where & 0x3) * 8);
+ writel(tmp, addr);
+
+ return PCIBIOS_SUCCESSFUL;
+}
+
+static struct pci_ops tegra_pcie_ops = {
+ .read = tegra_pcie_read_conf,
+ .write = tegra_pcie_write_conf,
+};
+
+static void __devinit tegra_pcie_fixup_bridge(struct pci_dev *dev)
+{
+ u16 reg;
+
+ if ((dev->class >> 16) == PCI_BASE_CLASS_BRIDGE) {
+ pci_read_config_word(dev, PCI_COMMAND, &reg);
+ reg |= (PCI_COMMAND_IO | PCI_COMMAND_MEMORY |
+ PCI_COMMAND_MASTER | PCI_COMMAND_SERR);
+ pci_write_config_word(dev, PCI_COMMAND, reg);
+ }
+}
+DECLARE_PCI_FIXUP_FINAL(PCI_ANY_ID, PCI_ANY_ID, tegra_pcie_fixup_bridge);
+
+/* Tegra PCIE root complex wrongly reports device class */
+static void __devinit tegra_pcie_fixup_class(struct pci_dev *dev)
+{
+ dev->class = PCI_CLASS_BRIDGE_PCI << 8;
+}
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_NVIDIA, 0x0bf0, tegra_pcie_fixup_class);
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_NVIDIA, 0x0bf1, tegra_pcie_fixup_class);
+
+/* Tegra PCIE requires relaxed ordering */
+static void __devinit tegra_pcie_relax_enable(struct pci_dev *dev)
+{
+ u16 val16;
+ int pos = pci_find_capability(dev, PCI_CAP_ID_EXP);
+
+ if (pos <= 0) {
+ dev_err(&dev->dev, "skipping relaxed ordering fixup\n");
+ return;
+ }
+
+ pci_read_config_word(dev, pos + PCI_EXP_DEVCTL, &val16);
+ val16 |= PCI_EXP_DEVCTL_RELAX_EN;
+ pci_write_config_word(dev, pos + PCI_EXP_DEVCTL, val16);
+}
+DECLARE_PCI_FIXUP_FINAL(PCI_ANY_ID, PCI_ANY_ID, tegra_pcie_relax_enable);
+
+static int tegra_pcie_setup(int nr, struct pci_sys_data *sys)
+{
+ struct tegra_pcie_port *pp;
+
+ if (nr >= tegra_pcie.num_ports)
+ return 0;
+
+ pp = tegra_pcie.port + nr;
+ pp->root_bus_nr = sys->busnr;
+
+ /*
+ * IORESOURCE_IO
+ */
+ snprintf(pp->io_space_name, sizeof(pp->io_space_name),
+ "PCIe %d I/O", pp->index);
+ pp->io_space_name[sizeof(pp->io_space_name) - 1] = 0;
+ pp->res[0].name = pp->io_space_name;
+ if (pp->index == 0) {
+ pp->res[0].start = PCIBIOS_MIN_IO;
+ pp->res[0].end = pp->res[0].start + SZ_32K - 1;
+ } else {
+ pp->res[0].start = PCIBIOS_MIN_IO + SZ_32K;
+ pp->res[0].end = IO_SPACE_LIMIT;
+ }
+ pp->res[0].flags = IORESOURCE_IO;
+ if (request_resource(&ioport_resource, &pp->res[0]))
+ panic("Request PCIe IO resource failed\n");
+ sys->resource[0] = &pp->res[0];
+
+ /*
+ * IORESOURCE_MEM
+ */
+ snprintf(pp->mem_space_name, sizeof(pp->mem_space_name),
+ "PCIe %d MEM", pp->index);
+ pp->mem_space_name[sizeof(pp->mem_space_name) - 1] = 0;
+ pp->res[1].name = pp->mem_space_name;
+ if (pp->index == 0) {
+ pp->res[1].start = MEM_BASE_0;
+ pp->res[1].end = pp->res[1].start + MEM_SIZE_0 - 1;
+ } else {
+ pp->res[1].start = MEM_BASE_1;
+ pp->res[1].end = pp->res[1].start + MEM_SIZE_1 - 1;
+ }
+ pp->res[1].flags = IORESOURCE_MEM;
+ if (request_resource(&iomem_resource, &pp->res[1]))
+ panic("Request PCIe Memory resource failed\n");
+ sys->resource[1] = &pp->res[1];
+
+ /*
+ * IORESOURCE_MEM | IORESOURCE_PREFETCH
+ */
+ snprintf(pp->prefetch_space_name, sizeof(pp->prefetch_space_name),
+ "PCIe %d PREFETCH MEM", pp->index);
+ pp->prefetch_space_name[sizeof(pp->prefetch_space_name) - 1] = 0;
+ pp->res[2].name = pp->prefetch_space_name;
+ if (pp->index == 0) {
+ pp->res[2].start = PREFETCH_MEM_BASE_0;
+ pp->res[2].end = pp->res[2].start + PREFETCH_MEM_SIZE_0 - 1;
+ } else {
+ pp->res[2].start = PREFETCH_MEM_BASE_1;
+ pp->res[2].end = pp->res[2].start + PREFETCH_MEM_SIZE_1 - 1;
+ }
+ pp->res[2].flags = IORESOURCE_MEM | IORESOURCE_PREFETCH;
+ if (request_resource(&iomem_resource, &pp->res[2]))
+ panic("Request PCIe Prefetch Memory resource failed\n");
+ sys->resource[2] = &pp->res[2];
+
+ return 1;
+}
+
+static int tegra_pcie_map_irq(struct pci_dev *dev, u8 slot, u8 pin)
+{
+ return INT_PCIE_INTR;
+}
+
+static struct pci_bus __init *tegra_pcie_scan_bus(int nr,
+ struct pci_sys_data *sys)
+{
+ struct tegra_pcie_port *pp;
+
+ if (nr >= tegra_pcie.num_ports)
+ return 0;
+
+ pp = tegra_pcie.port + nr;
+ pp->root_bus_nr = sys->busnr;
+
+ return pci_scan_bus(sys->busnr, &tegra_pcie_ops, sys);
+}
+
+static struct hw_pci tegra_pcie_hw __initdata = {
+ .nr_controllers = 2,
+ .setup = tegra_pcie_setup,
+ .scan = tegra_pcie_scan_bus,
+ .swizzle = pci_std_swizzle,
+ .map_irq = tegra_pcie_map_irq,
+};
+
+
+static irqreturn_t tegra_pcie_isr(int irq, void *arg)
+{
+ const char *err_msg[] = {
+ "Unknown",
+ "AXI slave error",
+ "AXI decode error",
+ "Target abort",
+ "Master abort",
+ "Invalid write",
+ "Response decoding error",
+ "AXI response decoding error",
+ "Transcation timeout",
+ };
+
+ u32 code, signature;
+
+ code = afi_readl(AFI_INTR_CODE) & AFI_INTR_CODE_MASK;
+ signature = afi_readl(AFI_INTR_SIGNATURE);
+ afi_writel(0, AFI_INTR_CODE);
+
+ if (code == AFI_INTR_LEGACY)
+ return IRQ_NONE;
+
+ if (code >= ARRAY_SIZE(err_msg))
+ code = 0;
+
+ /*
+ * do not pollute kernel log with master abort reports since they
+ * happen a lot during enumeration
+ */
+ if (code == AFI_INTR_MASTER_ABORT)
+ pr_debug("PCIE: %s, signature: %08x\n", err_msg[code], signature);
+ else
+ pr_err("PCIE: %s, signature: %08x\n", err_msg[code], signature);
+
+ return IRQ_HANDLED;
+}
+
+static void tegra_pcie_setup_translations(void)
+{
+ u32 fpci_bar;
+ u32 size;
+ u32 axi_address;
+
+ /* Bar 0: config Bar */
+ fpci_bar = ((u32)0xfdff << 16);
+ size = PCIE_CFG_SZ;
+ axi_address = TEGRA_PCIE_BASE + PCIE_CFG_OFF;
+ afi_writel(axi_address, AFI_AXI_BAR0_START);
+ afi_writel(size >> 12, AFI_AXI_BAR0_SZ);
+ afi_writel(fpci_bar, AFI_FPCI_BAR0);
+
+ /* Bar 1: extended config Bar */
+ fpci_bar = ((u32)0xfe1 << 20);
+ size = PCIE_EXT_CFG_SZ;
+ axi_address = TEGRA_PCIE_BASE + PCIE_EXT_CFG_OFF;
+ afi_writel(axi_address, AFI_AXI_BAR1_START);
+ afi_writel(size >> 12, AFI_AXI_BAR1_SZ);
+ afi_writel(fpci_bar, AFI_FPCI_BAR1);
+
+ /* Bar 2: downstream IO bar */
+ fpci_bar = ((__u32)0xfdfc << 16);
+ size = MMIO_SIZE;
+ axi_address = MMIO_BASE;
+ afi_writel(axi_address, AFI_AXI_BAR2_START);
+ afi_writel(size >> 12, AFI_AXI_BAR2_SZ);
+ afi_writel(fpci_bar, AFI_FPCI_BAR2);
+
+ /* Bar 3: prefetchable memory BAR */
+ fpci_bar = (((PREFETCH_MEM_BASE_0 >> 12) & 0x0fffffff) << 4) | 0x1;
+ size = PREFETCH_MEM_SIZE_0 + PREFETCH_MEM_SIZE_1;
+ axi_address = PREFETCH_MEM_BASE_0;
+ afi_writel(axi_address, AFI_AXI_BAR3_START);
+ afi_writel(size >> 12, AFI_AXI_BAR3_SZ);
+ afi_writel(fpci_bar, AFI_FPCI_BAR3);
+
+ /* Bar 4: non prefetchable memory BAR */
+ fpci_bar = (((MEM_BASE_0 >> 12) & 0x0FFFFFFF) << 4) | 0x1;
+ size = MEM_SIZE_0 + MEM_SIZE_1;
+ axi_address = MEM_BASE_0;
+ afi_writel(axi_address, AFI_AXI_BAR4_START);
+ afi_writel(size >> 12, AFI_AXI_BAR4_SZ);
+ afi_writel(fpci_bar, AFI_FPCI_BAR4);
+
+ /* Bar 5: NULL out the remaining BAR as it is not used */
+ fpci_bar = 0;
+ size = 0;
+ axi_address = 0;
+ afi_writel(axi_address, AFI_AXI_BAR5_START);
+ afi_writel(size >> 12, AFI_AXI_BAR5_SZ);
+ afi_writel(fpci_bar, AFI_FPCI_BAR5);
+
+ /* map all upstream transactions as uncached */
+ afi_writel(PHYS_OFFSET, AFI_CACHE_BAR0_ST);
+ afi_writel(0, AFI_CACHE_BAR0_SZ);
+ afi_writel(0, AFI_CACHE_BAR1_ST);
+ afi_writel(0, AFI_CACHE_BAR1_SZ);
+
+ /* No MSI */
+ afi_writel(0, AFI_MSI_FPCI_BAR_ST);
+ afi_writel(0, AFI_MSI_BAR_SZ);
+ afi_writel(0, AFI_MSI_AXI_BAR_ST);
+ afi_writel(0, AFI_MSI_BAR_SZ);
+}
+
+static void tegra_pcie_enable_controller(void)
+{
+ u32 val, reg;
+ int i;
+
+ /* Enable slot clock and pulse the reset signals */
+ for (i = 0, reg = AFI_PEX0_CTRL; i < 2; i++, reg += 0x8) {
+ val = afi_readl(reg) | AFI_PEX_CTRL_REFCLK_EN;
+ afi_writel(val, reg);
+ val &= ~AFI_PEX_CTRL_RST;
+ afi_writel(val, reg);
+
+ val = afi_readl(reg) | AFI_PEX_CTRL_RST;
+ afi_writel(val, reg);
+ }
+
+ /* Enable dual controller and both ports */
+ val = afi_readl(AFI_PCIE_CONFIG);
+ val &= ~(AFI_PCIE_CONFIG_PCIEC0_DISABLE_DEVICE |
+ AFI_PCIE_CONFIG_PCIEC1_DISABLE_DEVICE |
+ AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_MASK);
+ val |= AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_DUAL;
+ afi_writel(val, AFI_PCIE_CONFIG);
+
+ val = afi_readl(AFI_FUSE) & ~AFI_FUSE_PCIE_T0_GEN2_DIS;
+ afi_writel(val, AFI_FUSE);
+
+ /* Initialze internal PHY, enable up to 16 PCIE lanes */
+ pads_writel(0x0, PADS_CTL_SEL);
+
+ /* override IDDQ to 1 on all 4 lanes */
+ val = pads_readl(PADS_CTL) | PADS_CTL_IDDQ_1L;
+ pads_writel(val, PADS_CTL);
+
+ /*
+ * set up PHY PLL inputs select PLLE output as refclock,
+ * set TX ref sel to div10 (not div5)
+ */
+ val = pads_readl(PADS_PLL_CTL);
+ val &= ~(PADS_PLL_CTL_REFCLK_MASK | PADS_PLL_CTL_TXCLKREF_MASK);
+ val |= (PADS_PLL_CTL_REFCLK_INTERNAL_CML | PADS_PLL_CTL_TXCLKREF_DIV10);
+ pads_writel(val, PADS_PLL_CTL);
+
+ /* take PLL out of reset */
+ val = pads_readl(PADS_PLL_CTL) | PADS_PLL_CTL_RST_B4SM;
+ pads_writel(val, PADS_PLL_CTL);
+
+ /*
+ * Hack, set the clock voltage to the DEFAULT provided by hw folks.
+ * This doesn't exist in the documentation
+ */
+ pads_writel(0xfa5cfa5c, 0xc8);
+
+ /* Wait for the PLL to lock */
+ do {
+ val = pads_readl(PADS_PLL_CTL);
+ } while (!(val & PADS_PLL_CTL_LOCKDET));
+
+ /* turn off IDDQ override */
+ val = pads_readl(PADS_CTL) & ~PADS_CTL_IDDQ_1L;
+ pads_writel(val, PADS_CTL);
+
+ /* enable TX/RX data */
+ val = pads_readl(PADS_CTL);
+ val |= (PADS_CTL_TX_DATA_EN_1L | PADS_CTL_RX_DATA_EN_1L);
+ pads_writel(val, PADS_CTL);
+
+ /* Take the PCIe interface module out of reset */
+ tegra_periph_reset_deassert(tegra_pcie.pcie_xclk);
+
+ /* Finally enable PCIe */
+ val = afi_readl(AFI_CONFIGURATION) | AFI_CONFIGURATION_EN_FPCI;
+ afi_writel(val, AFI_CONFIGURATION);
+
+ val = (AFI_INTR_EN_INI_SLVERR | AFI_INTR_EN_INI_DECERR |
+ AFI_INTR_EN_TGT_SLVERR | AFI_INTR_EN_TGT_DECERR |
+ AFI_INTR_EN_TGT_WRERR | AFI_INTR_EN_DFPCI_DECERR);
+ afi_writel(val, AFI_AFI_INTR_ENABLE);
+ afi_writel(0xffffffff, AFI_SM_INTR_ENABLE);
+
+ /* FIXME: No MSI for now, only INT */
+ afi_writel(AFI_INTR_MASK_INT_MASK, AFI_INTR_MASK);
+
+ /* Disable all execptions */
+ afi_writel(0, AFI_FPCI_ERROR_MASKS);
+
+ return;
+}
+
+static void tegra_pcie_xclk_clamp(bool clamp)
+{
+ u32 reg;
+
+ reg = pmc_readl(PMC_SCRATCH42) & ~PMC_SCRATCH42_PCX_CLAMP;
+
+ if (clamp)
+ reg |= PMC_SCRATCH42_PCX_CLAMP;
+
+ pmc_writel(reg, PMC_SCRATCH42);
+}
+
+static int tegra_pcie_power_on(void)
+{
+ tegra_pcie_xclk_clamp(true);
+ tegra_periph_reset_assert(tegra_pcie.pcie_xclk);
+ tegra_pcie_xclk_clamp(false);
+
+ clk_enable(tegra_pcie.afi_clk);
+ clk_enable(tegra_pcie.pex_clk);
+ return clk_enable(tegra_pcie.pll_e);
+}
+
+static void tegra_pcie_power_off(void)
+{
+ tegra_periph_reset_assert(tegra_pcie.pcie_xclk);
+ tegra_periph_reset_assert(tegra_pcie.afi_clk);
+ tegra_periph_reset_assert(tegra_pcie.pex_clk);
+
+ tegra_pcie_xclk_clamp(true);
+}
+
+static int tegra_pcie_clocks_get(void)
+{
+ int err;
+
+ tegra_pcie.pex_clk = clk_get(NULL, "pex");
+ if (IS_ERR(tegra_pcie.pex_clk))
+ return PTR_ERR(tegra_pcie.pex_clk);
+
+ tegra_pcie.afi_clk = clk_get(NULL, "afi");
+ if (IS_ERR(tegra_pcie.afi_clk)) {
+ err = PTR_ERR(tegra_pcie.afi_clk);
+ goto err_afi_clk;
+ }
+
+ tegra_pcie.pcie_xclk = clk_get(NULL, "pcie_xclk");
+ if (IS_ERR(tegra_pcie.pcie_xclk)) {
+ err = PTR_ERR(tegra_pcie.pcie_xclk);
+ goto err_pcie_xclk;
+ }
+
+ tegra_pcie.pll_e = clk_get_sys(NULL, "pll_e");
+ if (IS_ERR(tegra_pcie.pll_e)) {
+ err = PTR_ERR(tegra_pcie.pll_e);
+ goto err_pll_e;
+ }
+
+ return 0;
+
+err_pll_e:
+ clk_put(tegra_pcie.pcie_xclk);
+err_pcie_xclk:
+ clk_put(tegra_pcie.afi_clk);
+err_afi_clk:
+ clk_put(tegra_pcie.pex_clk);
+
+ return err;
+}
+
+static void tegra_pcie_clocks_put(void)
+{
+ clk_put(tegra_pcie.pll_e);
+ clk_put(tegra_pcie.pcie_xclk);
+ clk_put(tegra_pcie.afi_clk);
+ clk_put(tegra_pcie.pex_clk);
+}
+
+static int __init tegra_pcie_get_resources(void)
+{
+ struct resource *res_mmio = &tegra_pcie.res_mmio;
+ int err;
+
+ err = tegra_pcie_clocks_get();
+ if (err) {
+ pr_err("PCIE: failed to get clocks: %d\n", err);
+ return err;
+ }
+
+ err = tegra_pcie_power_on();
+ if (err) {
+ pr_err("PCIE: failed to power up: %d\n", err);
+ goto err_pwr_on;
+ }
+
+ tegra_pcie.regs = ioremap_nocache(TEGRA_PCIE_BASE, PCIE_IOMAP_SZ);
+ if (tegra_pcie.regs == NULL) {
+ pr_err("PCIE: Failed to map PCI/AFI registers\n");
+ err = -ENOMEM;
+ goto err_map_reg;
+ }
+
+ err = request_resource(&iomem_resource, res_mmio);
+ if (err) {
+ pr_err("PCIE: Failed to request resources: %d\n", err);
+ goto err_req_io;
+ }
+
+ tegra_pcie_io_base = ioremap_nocache(res_mmio->start,
+ resource_size(res_mmio));
+ if (tegra_pcie_io_base == NULL) {
+ pr_err("PCIE: Failed to map IO\n");
+ err = -ENOMEM;
+ goto err_map_io;
+ }
+
+ err = request_irq(INT_PCIE_INTR, tegra_pcie_isr,
+ IRQF_SHARED, "PCIE", &tegra_pcie);
+ if (err) {
+ pr_err("PCIE: Failed to register IRQ: %d\n", err);
+ goto err_irq;
+ }
+ set_irq_flags(INT_PCIE_INTR, IRQF_VALID);
+
+ return 0;
+
+err_irq:
+ iounmap(tegra_pcie_io_base);
+err_map_io:
+ release_resource(&tegra_pcie.res_mmio);
+err_req_io:
+ iounmap(tegra_pcie.regs);
+err_map_reg:
+ tegra_pcie_power_off();
+err_pwr_on:
+ tegra_pcie_clocks_put();
+
+ return err;
+}
+
+/*
+ * FIXME: If there are no PCIe cards attached, then calling this function
+ * can result in the increase of the bootup time as there are big timeout
+ * loops.
+ */
+#define TEGRA_PCIE_LINKUP_TIMEOUT 200 /* up to 1.2 seconds */
+static bool tegra_pcie_check_link(struct tegra_pcie_port *pp, int idx,
+ u32 reset_reg)
+{
+ u32 reg;
+ int retries = 3;
+ int timeout;
+
+ do {
+ timeout = TEGRA_PCIE_LINKUP_TIMEOUT;
+ while (timeout) {
+ reg = readl(pp->base + RP_VEND_XP);
+
+ if (reg & RP_VEND_XP_DL_UP)
+ break;
+
+ mdelay(1);
+ timeout--;
+ }
+
+ if (!timeout) {
+ pr_err("PCIE: port %d: link down, retrying\n", idx);
+ goto retry;
+ }
+
+ timeout = TEGRA_PCIE_LINKUP_TIMEOUT;
+ while (timeout) {
+ reg = readl(pp->base + RP_LINK_CONTROL_STATUS);
+
+ if (reg & 0x20000000)
+ return true;
+
+ mdelay(1);
+ timeout--;
+ }
+
+retry:
+ /* Pulse the PEX reset */
+ reg = afi_readl(reset_reg) | AFI_PEX_CTRL_RST;
+ afi_writel(reg, reset_reg);
+ mdelay(1);
+ reg = afi_readl(reset_reg) & ~AFI_PEX_CTRL_RST;
+ afi_writel(reg, reset_reg);
+
+ retries--;
+ } while (retries);
+
+ return false;
+}
+
+static void __init tegra_pcie_add_port(int index, u32 offset, u32 reset_reg)
+{
+ struct tegra_pcie_port *pp;
+
+ pp = tegra_pcie.port + tegra_pcie.num_ports;
+
+ pp->index = -1;
+ pp->base = tegra_pcie.regs + offset;
+ pp->link_up = tegra_pcie_check_link(pp, index, reset_reg);
+
+ if (!pp->link_up) {
+ pp->base = NULL;
+ printk(KERN_INFO "PCIE: port %d: link down, ignoring\n", index);
+ return;
+ }
+
+ tegra_pcie.num_ports++;
+ pp->index = index;
+ pp->root_bus_nr = -1;
+ memset(pp->res, 0, sizeof(pp->res));
+}
+
+int __init tegra_pcie_init(bool init_port0, bool init_port1)
+{
+ int err;
+
+ if (!(init_port0 || init_port1))
+ return -ENODEV;
+
+ err = tegra_pcie_get_resources();
+ if (err)
+ return err;
+
+ tegra_pcie_enable_controller();
+
+ /* setup the AFI address translations */
+ tegra_pcie_setup_translations();
+
+ if (init_port0)
+ tegra_pcie_add_port(0, RP0_OFFSET, AFI_PEX0_CTRL);
+
+ if (init_port1)
+ tegra_pcie_add_port(1, RP1_OFFSET, AFI_PEX1_CTRL);
+
+ pci_common_init(&tegra_pcie_hw);
+
+ return 0;
+}
diff --git a/arch/arm/mach-tegra/pinmux-t2-tables.c b/arch/arm/mach-tegra/pinmux-t2-tables.c
new file mode 100644
index 000000000000..a6ea34e782dc
--- /dev/null
+++ b/arch/arm/mach-tegra/pinmux-t2-tables.c
@@ -0,0 +1,260 @@
+/*
+ * linux/arch/arm/mach-tegra/pinmux-t2-tables.c
+ *
+ * Common pinmux configurations for Tegra 2 SoCs
+ *
+ * Copyright (C) 2010 NVIDIA Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/spinlock.h>
+#include <linux/io.h>
+#include <linux/init.h>
+#include <linux/string.h>
+
+#include <mach/iomap.h>
+#include <mach/pinmux.h>
+
+#define DRIVE_PINGROUP(pg_name, r) \
+ [TEGRA_DRIVE_PINGROUP_ ## pg_name] = { \
+ .name = #pg_name, \
+ .reg = r \
+ }
+
+const struct tegra_drive_pingroup_desc tegra_soc_drive_pingroups[TEGRA_MAX_DRIVE_PINGROUP] = {
+ DRIVE_PINGROUP(AO1, 0x868),
+ DRIVE_PINGROUP(AO2, 0x86c),
+ DRIVE_PINGROUP(AT1, 0x870),
+ DRIVE_PINGROUP(AT2, 0x874),
+ DRIVE_PINGROUP(CDEV1, 0x878),
+ DRIVE_PINGROUP(CDEV2, 0x87c),
+ DRIVE_PINGROUP(CSUS, 0x880),
+ DRIVE_PINGROUP(DAP1, 0x884),
+ DRIVE_PINGROUP(DAP2, 0x888),
+ DRIVE_PINGROUP(DAP3, 0x88c),
+ DRIVE_PINGROUP(DAP4, 0x890),
+ DRIVE_PINGROUP(DBG, 0x894),
+ DRIVE_PINGROUP(LCD1, 0x898),
+ DRIVE_PINGROUP(LCD2, 0x89c),
+ DRIVE_PINGROUP(SDMMC2, 0x8a0),
+ DRIVE_PINGROUP(SDMMC3, 0x8a4),
+ DRIVE_PINGROUP(SPI, 0x8a8),
+ DRIVE_PINGROUP(UAA, 0x8ac),
+ DRIVE_PINGROUP(UAB, 0x8b0),
+ DRIVE_PINGROUP(UART2, 0x8b4),
+ DRIVE_PINGROUP(UART3, 0x8b8),
+ DRIVE_PINGROUP(VI1, 0x8bc),
+ DRIVE_PINGROUP(VI2, 0x8c0),
+ DRIVE_PINGROUP(XM2A, 0x8c4),
+ DRIVE_PINGROUP(XM2C, 0x8c8),
+ DRIVE_PINGROUP(XM2D, 0x8cc),
+ DRIVE_PINGROUP(XM2CLK, 0x8d0),
+ DRIVE_PINGROUP(MEMCOMP, 0x8d4),
+};
+
+#define PINGROUP(pg_name, vdd, f0, f1, f2, f3, f_safe, \
+ tri_r, tri_b, mux_r, mux_b, pupd_r, pupd_b) \
+ [TEGRA_PINGROUP_ ## pg_name] = { \
+ .name = #pg_name, \
+ .vddio = TEGRA_VDDIO_ ## vdd, \
+ .funcs = { \
+ TEGRA_MUX_ ## f0, \
+ TEGRA_MUX_ ## f1, \
+ TEGRA_MUX_ ## f2, \
+ TEGRA_MUX_ ## f3, \
+ }, \
+ .func_safe = TEGRA_MUX_ ## f_safe, \
+ .tri_reg = tri_r, \
+ .tri_bit = tri_b, \
+ .mux_reg = mux_r, \
+ .mux_bit = mux_b, \
+ .pupd_reg = pupd_r, \
+ .pupd_bit = pupd_b, \
+ }
+
+const struct tegra_pingroup_desc tegra_soc_pingroups[TEGRA_MAX_PINGROUP] = {
+ PINGROUP(ATA, NAND, IDE, NAND, GMI, RSVD, IDE, 0x14, 0, 0x80, 24, 0xA0, 0),
+ PINGROUP(ATB, NAND, IDE, NAND, GMI, SDIO4, IDE, 0x14, 1, 0x80, 16, 0xA0, 2),
+ PINGROUP(ATC, NAND, IDE, NAND, GMI, SDIO4, IDE, 0x14, 2, 0x80, 22, 0xA0, 4),
+ PINGROUP(ATD, NAND, IDE, NAND, GMI, SDIO4, IDE, 0x14, 3, 0x80, 20, 0xA0, 6),
+ PINGROUP(ATE, NAND, IDE, NAND, GMI, RSVD, IDE, 0x18, 25, 0x80, 12, 0xA0, 8),
+ PINGROUP(CDEV1, AUDIO, OSC, PLLA_OUT, PLLM_OUT1, AUDIO_SYNC, OSC, 0x14, 4, 0x88, 2, 0xA8, 0),
+ PINGROUP(CDEV2, AUDIO, OSC, AHB_CLK, APB_CLK, PLLP_OUT4, OSC, 0x14, 5, 0x88, 4, 0xA8, 2),
+ PINGROUP(CRTP, LCD, CRT, RSVD, RSVD, RSVD, RSVD, 0x20, 14, 0x98, 20, 0xA4, 24),
+ PINGROUP(CSUS, VI, PLLC_OUT1, PLLP_OUT2, PLLP_OUT3, VI_SENSOR_CLK, PLLC_OUT1, 0x14, 6, 0x88, 6, 0xAC, 24),
+ PINGROUP(DAP1, AUDIO, DAP1, RSVD, GMI, SDIO2, DAP1, 0x14, 7, 0x88, 20, 0xA0, 10),
+ PINGROUP(DAP2, AUDIO, DAP2, TWC, RSVD, GMI, DAP2, 0x14, 8, 0x88, 22, 0xA0, 12),
+ PINGROUP(DAP3, BB, DAP3, RSVD, RSVD, RSVD, DAP3, 0x14, 9, 0x88, 24, 0xA0, 14),
+ PINGROUP(DAP4, UART, DAP4, RSVD, GMI, RSVD, DAP4, 0x14, 10, 0x88, 26, 0xA0, 16),
+ PINGROUP(DDC, LCD, I2C2, RSVD, RSVD, RSVD, RSVD4, 0x18, 31, 0x88, 0, 0xB0, 28),
+ PINGROUP(DTA, VI, RSVD, SDIO2, VI, RSVD, RSVD4, 0x14, 11, 0x84, 20, 0xA0, 18),
+ PINGROUP(DTB, VI, RSVD, RSVD, VI, SPI1, RSVD1, 0x14, 12, 0x84, 22, 0xA0, 20),
+ PINGROUP(DTC, VI, RSVD, RSVD, VI, RSVD, RSVD1, 0x14, 13, 0x84, 26, 0xA0, 22),
+ PINGROUP(DTD, VI, RSVD, SDIO2, VI, RSVD, RSVD1, 0x14, 14, 0x84, 28, 0xA0, 24),
+ PINGROUP(DTE, VI, RSVD, RSVD, VI, SPI1, RSVD1, 0x14, 15, 0x84, 30, 0xA0, 26),
+ PINGROUP(DTF, VI, I2C3, RSVD, VI, RSVD, RSVD4, 0x20, 12, 0x98, 30, 0xA0, 28),
+ PINGROUP(GMA, NAND, UARTE, SPI3, GMI, SDIO4, SPI3, 0x14, 28, 0x84, 0, 0xB0, 20),
+ PINGROUP(GMB, NAND, IDE, NAND, GMI, GMI_INT, GMI, 0x18, 29, 0x88, 28, 0xB0, 22),
+ PINGROUP(GMC, NAND, UARTD, SPI4, GMI, SFLASH, SPI4, 0x14, 29, 0x84, 2, 0xB0, 24),
+ PINGROUP(GMD, NAND, RSVD, NAND, GMI, SFLASH, GMI, 0x18, 30, 0x88, 30, 0xB0, 26),
+ PINGROUP(GME, NAND, RSVD, DAP5, GMI, SDIO4, GMI, 0x18, 0, 0x8C, 0, 0xA8, 24),
+ PINGROUP(GPU, UART, PWM, UARTA, GMI, RSVD, RSVD4, 0x14, 16, 0x8C, 4, 0xA4, 20),
+ PINGROUP(GPU7, SYS, RTCK, RSVD, RSVD, RSVD, RTCK, 0x20, 11, 0x98, 28, 0xA4, 6),
+ PINGROUP(GPV, SD, PCIE, RSVD, RSVD, RSVD, PCIE, 0x14, 17, 0x8C, 2, 0xA0, 30),
+ PINGROUP(HDINT, LCD, HDMI, RSVD, RSVD, RSVD, HDMI, 0x1C, 23, 0x84, 4, 0xAC, 22),
+ PINGROUP(I2CP, SYS, I2C, RSVD, RSVD, RSVD, RSVD4, 0x14, 18, 0x88, 8, 0xA4, 2),
+ PINGROUP(IRRX, UART, UARTA, UARTB, GMI, SPI4, UARTB, 0x14, 20, 0x88, 18, 0xA8, 22),
+ PINGROUP(IRTX, UART, UARTA, UARTB, GMI, SPI4, UARTB, 0x14, 19, 0x88, 16, 0xA8, 20),
+ PINGROUP(KBCA, SYS, KBC, NAND, SDIO2, EMC_TEST0_DLL, KBC, 0x14, 22, 0x88, 10, 0xA4, 8),
+ PINGROUP(KBCB, SYS, KBC, NAND, SDIO2, MIO, KBC, 0x14, 21, 0x88, 12, 0xA4, 10),
+ PINGROUP(KBCC, SYS, KBC, NAND, TRACE, EMC_TEST1_DLL, KBC, 0x18, 26, 0x88, 14, 0xA4, 12),
+ PINGROUP(KBCD, SYS, KBC, NAND, SDIO2, MIO, KBC, 0x20, 10, 0x98, 26, 0xA4, 14),
+ PINGROUP(KBCE, SYS, KBC, NAND, OWR, RSVD, KBC, 0x14, 26, 0x80, 28, 0xB0, 2),
+ PINGROUP(KBCF, SYS, KBC, NAND, TRACE, MIO, KBC, 0x14, 27, 0x80, 26, 0xB0, 0),
+ PINGROUP(LCSN, LCD, DISPLAYA, DISPLAYB, SPI3, RSVD, RSVD4, 0x1C, 31, 0x90, 12, 0xAC, 20),
+ PINGROUP(LD0, LCD, DISPLAYA, DISPLAYB, XIO, RSVD, RSVD4, 0x1C, 0, 0x94, 0, 0xAC, 12),
+ PINGROUP(LD1, LCD, DISPLAYA, DISPLAYB, XIO, RSVD, RSVD4, 0x1C, 1, 0x94, 2, 0xAC, 12),
+ PINGROUP(LD10, LCD, DISPLAYA, DISPLAYB, XIO, RSVD, RSVD4, 0x1C, 10, 0x94, 20, 0xAC, 12),
+ PINGROUP(LD11, LCD, DISPLAYA, DISPLAYB, XIO, RSVD, RSVD4, 0x1C, 11, 0x94, 22, 0xAC, 12),
+ PINGROUP(LD12, LCD, DISPLAYA, DISPLAYB, XIO, RSVD, RSVD4, 0x1C, 12, 0x94, 24, 0xAC, 12),
+ PINGROUP(LD13, LCD, DISPLAYA, DISPLAYB, XIO, RSVD, RSVD4, 0x1C, 13, 0x94, 26, 0xAC, 12),
+ PINGROUP(LD14, LCD, DISPLAYA, DISPLAYB, XIO, RSVD, RSVD4, 0x1C, 14, 0x94, 28, 0xAC, 12),
+ PINGROUP(LD15, LCD, DISPLAYA, DISPLAYB, XIO, RSVD, RSVD4, 0x1C, 15, 0x94, 30, 0xAC, 12),
+ PINGROUP(LD16, LCD, DISPLAYA, DISPLAYB, XIO, RSVD, RSVD4, 0x1C, 16, 0x98, 0, 0xAC, 12),
+ PINGROUP(LD17, LCD, DISPLAYA, DISPLAYB, RSVD, RSVD, RSVD4, 0x1C, 17, 0x98, 2, 0xAC, 12),
+ PINGROUP(LD2, LCD, DISPLAYA, DISPLAYB, XIO, RSVD, RSVD4, 0x1C, 2, 0x94, 4, 0xAC, 12),
+ PINGROUP(LD3, LCD, DISPLAYA, DISPLAYB, XIO, RSVD, RSVD4, 0x1C, 3, 0x94, 6, 0xAC, 12),
+ PINGROUP(LD4, LCD, DISPLAYA, DISPLAYB, XIO, RSVD, RSVD4, 0x1C, 4, 0x94, 8, 0xAC, 12),
+ PINGROUP(LD5, LCD, DISPLAYA, DISPLAYB, XIO, RSVD, RSVD4, 0x1C, 5, 0x94, 10, 0xAC, 12),
+ PINGROUP(LD6, LCD, DISPLAYA, DISPLAYB, XIO, RSVD, RSVD4, 0x1C, 6, 0x94, 12, 0xAC, 12),
+ PINGROUP(LD7, LCD, DISPLAYA, DISPLAYB, XIO, RSVD, RSVD4, 0x1C, 7, 0x94, 14, 0xAC, 12),
+ PINGROUP(LD8, LCD, DISPLAYA, DISPLAYB, XIO, RSVD, RSVD4, 0x1C, 8, 0x94, 16, 0xAC, 12),
+ PINGROUP(LD9, LCD, DISPLAYA, DISPLAYB, XIO, RSVD, RSVD4, 0x1C, 9, 0x94, 18, 0xAC, 12),
+ PINGROUP(LDC, LCD, DISPLAYA, DISPLAYB, RSVD, RSVD, RSVD4, 0x1C, 30, 0x90, 14, 0xAC, 20),
+ PINGROUP(LDI, LCD, DISPLAYA, DISPLAYB, RSVD, RSVD, RSVD4, 0x20, 6, 0x98, 16, 0xAC, 18),
+ PINGROUP(LHP0, LCD, DISPLAYA, DISPLAYB, RSVD, RSVD, RSVD4, 0x1C, 18, 0x98, 10, 0xAC, 16),
+ PINGROUP(LHP1, LCD, DISPLAYA, DISPLAYB, RSVD, RSVD, RSVD4, 0x1C, 19, 0x98, 4, 0xAC, 14),
+ PINGROUP(LHP2, LCD, DISPLAYA, DISPLAYB, RSVD, RSVD, RSVD4, 0x1C, 20, 0x98, 6, 0xAC, 14),
+ PINGROUP(LHS, LCD, DISPLAYA, DISPLAYB, XIO, RSVD, RSVD4, 0x20, 7, 0x90, 22, 0xAC, 22),
+ PINGROUP(LM0, LCD, DISPLAYA, DISPLAYB, SPI3, RSVD, RSVD4, 0x1C, 24, 0x90, 26, 0xAC, 22),
+ PINGROUP(LM1, LCD, DISPLAYA, DISPLAYB, RSVD, CRT, RSVD3, 0x1C, 25, 0x90, 28, 0xAC, 22),
+ PINGROUP(LPP, LCD, DISPLAYA, DISPLAYB, RSVD, RSVD, RSVD4, 0x20, 8, 0x98, 14, 0xAC, 18),
+ PINGROUP(LPW0, LCD, DISPLAYA, DISPLAYB, SPI3, HDMI, DISPLAYA, 0x20, 3, 0x90, 0, 0xAC, 20),
+ PINGROUP(LPW1, LCD, DISPLAYA, DISPLAYB, RSVD, RSVD, RSVD4, 0x20, 4, 0x90, 2, 0xAC, 20),
+ PINGROUP(LPW2, LCD, DISPLAYA, DISPLAYB, SPI3, HDMI, DISPLAYA, 0x20, 5, 0x90, 4, 0xAC, 20),
+ PINGROUP(LSC0, LCD, DISPLAYA, DISPLAYB, XIO, RSVD, RSVD4, 0x1C, 27, 0x90, 18, 0xAC, 22),
+ PINGROUP(LSC1, LCD, DISPLAYA, DISPLAYB, SPI3, HDMI, DISPLAYA, 0x1C, 28, 0x90, 20, 0xAC, 20),
+ PINGROUP(LSCK, LCD, DISPLAYA, DISPLAYB, SPI3, HDMI, DISPLAYA, 0x1C, 29, 0x90, 16, 0xAC, 20),
+ PINGROUP(LSDA, LCD, DISPLAYA, DISPLAYB, SPI3, HDMI, DISPLAYA, 0x20, 1, 0x90, 8, 0xAC, 20),
+ PINGROUP(LSDI, LCD, DISPLAYA, DISPLAYB, SPI3, RSVD, DISPLAYA, 0x20, 2, 0x90, 6, 0xAC, 20),
+ PINGROUP(LSPI, LCD, DISPLAYA, DISPLAYB, XIO, HDMI, DISPLAYA, 0x20, 0, 0x90, 10, 0xAC, 22),
+ PINGROUP(LVP0, LCD, DISPLAYA, DISPLAYB, RSVD, RSVD, RSVD4, 0x1C, 21, 0x90, 30, 0xAC, 22),
+ PINGROUP(LVP1, LCD, DISPLAYA, DISPLAYB, RSVD, RSVD, RSVD4, 0x1C, 22, 0x98, 8, 0xAC, 16),
+ PINGROUP(LVS, LCD, DISPLAYA, DISPLAYB, XIO, RSVD, RSVD4, 0x1C, 26, 0x90, 24, 0xAC, 22),
+ PINGROUP(OWC, SYS, OWR, RSVD, RSVD, RSVD, OWR, 0x14, 31, 0x84, 8, 0xB0, 30),
+ PINGROUP(PMC, SYS, PWR_ON, PWR_INTR, RSVD, RSVD, PWR_ON, 0x14, 23, 0x98, 18, -1, -1),
+ PINGROUP(PTA, NAND, I2C2, HDMI, GMI, RSVD, RSVD4, 0x14, 24, 0x98, 22, 0xA4, 4),
+ PINGROUP(RM, UART, I2C, RSVD, RSVD, RSVD, RSVD4, 0x14, 25, 0x80, 14, 0xA4, 0),
+ PINGROUP(SDB, SD, UARTA, PWM, SDIO3, SPI2, PWM, 0x20, 15, 0x8C, 10, -1, -1),
+ PINGROUP(SDC, SD, PWM, TWC, SDIO3, SPI3, TWC, 0x18, 1, 0x8C, 12, 0xAC, 28),
+ PINGROUP(SDD, SD, UARTA, PWM, SDIO3, SPI3, PWM, 0x18, 2, 0x8C, 14, 0xAC, 30),
+ PINGROUP(SDIO1, BB, SDIO1, RSVD, UARTE, UARTA, RSVD2, 0x14, 30, 0x80, 30, 0xB0, 18),
+ PINGROUP(SLXA, SD, PCIE, SPI4, SDIO3, SPI2, PCIE, 0x18, 3, 0x84, 6, 0xA4, 22),
+ PINGROUP(SLXC, SD, SPDIF, SPI4, SDIO3, SPI2, SPI4, 0x18, 5, 0x84, 10, 0xA4, 26),
+ PINGROUP(SLXD, SD, SPDIF, SPI4, SDIO3, SPI2, SPI4, 0x18, 6, 0x84, 12, 0xA4, 28),
+ PINGROUP(SLXK, SD, PCIE, SPI4, SDIO3, SPI2, PCIE, 0x18, 7, 0x84, 14, 0xA4, 30),
+ PINGROUP(SPDI, AUDIO, SPDIF, RSVD, I2C, SDIO2, RSVD2, 0x18, 8, 0x8C, 8, 0xA4, 16),
+ PINGROUP(SPDO, AUDIO, SPDIF, RSVD, I2C, SDIO2, RSVD2, 0x18, 9, 0x8C, 6, 0xA4, 18),
+ PINGROUP(SPIA, AUDIO, SPI1, SPI2, SPI3, GMI, GMI, 0x18, 10, 0x8C, 30, 0xA8, 4),
+ PINGROUP(SPIB, AUDIO, SPI1, SPI2, SPI3, GMI, GMI, 0x18, 11, 0x8C, 28, 0xA8, 6),
+ PINGROUP(SPIC, AUDIO, SPI1, SPI2, SPI3, GMI, GMI, 0x18, 12, 0x8C, 26, 0xA8, 8),
+ PINGROUP(SPID, AUDIO, SPI2, SPI1, SPI2_ALT, GMI, GMI, 0x18, 13, 0x8C, 24, 0xA8, 10),
+ PINGROUP(SPIE, AUDIO, SPI2, SPI1, SPI2_ALT, GMI, GMI, 0x18, 14, 0x8C, 22, 0xA8, 12),
+ PINGROUP(SPIF, AUDIO, SPI3, SPI1, SPI2, RSVD, RSVD4, 0x18, 15, 0x8C, 20, 0xA8, 14),
+ PINGROUP(SPIG, AUDIO, SPI3, SPI2, SPI2_ALT, I2C, SPI2_ALT, 0x18, 16, 0x8C, 18, 0xA8, 16),
+ PINGROUP(SPIH, AUDIO, SPI3, SPI2, SPI2_ALT, I2C, SPI2_ALT, 0x18, 17, 0x8C, 16, 0xA8, 18),
+ PINGROUP(UAA, BB, SPI3, MIPI_HS, UARTA, ULPI, MIPI_HS, 0x18, 18, 0x80, 0, 0xAC, 0),
+ PINGROUP(UAB, BB, SPI2, MIPI_HS, UARTA, ULPI, MIPI_HS, 0x18, 19, 0x80, 2, 0xAC, 2),
+ PINGROUP(UAC, BB, OWR, RSVD, RSVD, RSVD, RSVD4, 0x18, 20, 0x80, 4, 0xAC, 4),
+ PINGROUP(UAD, UART, IRDA, SPDIF, UARTA, SPI4, SPDIF, 0x18, 21, 0x80, 6, 0xAC, 6),
+ PINGROUP(UCA, UART, UARTC, RSVD, GMI, RSVD, RSVD4, 0x18, 22, 0x84, 16, 0xAC, 8),
+ PINGROUP(UCB, UART, UARTC, PWM, GMI, RSVD, RSVD4, 0x18, 23, 0x84, 18, 0xAC, 10),
+ PINGROUP(UDA, BB, SPI1, RSVD, UARTD, ULPI, RSVD2, 0x20, 13, 0x80, 8, 0xB0, 16),
+ /* these pin groups only have pullup and pull down control */
+ PINGROUP(CK32, SYS, RSVD, RSVD, RSVD, RSVD, RSVD, -1, -1, -1, -1, 0xB0, 14),
+ PINGROUP(DDRC, DDR, RSVD, RSVD, RSVD, RSVD, RSVD, -1, -1, -1, -1, 0xAC, 26),
+ PINGROUP(PMCA, SYS, RSVD, RSVD, RSVD, RSVD, RSVD, -1, -1, -1, -1, 0xB0, 4),
+ PINGROUP(PMCB, SYS, RSVD, RSVD, RSVD, RSVD, RSVD, -1, -1, -1, -1, 0xB0, 6),
+ PINGROUP(PMCC, SYS, RSVD, RSVD, RSVD, RSVD, RSVD, -1, -1, -1, -1, 0xB0, 8),
+ PINGROUP(PMCD, SYS, RSVD, RSVD, RSVD, RSVD, RSVD, -1, -1, -1, -1, 0xB0, 10),
+ PINGROUP(PMCE, SYS, RSVD, RSVD, RSVD, RSVD, RSVD, -1, -1, -1, -1, 0xB0, 12),
+ PINGROUP(XM2C, DDR, RSVD, RSVD, RSVD, RSVD, RSVD, -1, -1, -1, -1, 0xA8, 30),
+ PINGROUP(XM2D, DDR, RSVD, RSVD, RSVD, RSVD, RSVD, -1, -1, -1, -1, 0xA8, 28),
+};
+
+#ifdef CONFIG_PM
+#define TRISTATE_REG_A 0x14
+#define TRISTATE_REG_NUM 4
+#define PIN_MUX_CTL_REG_A 0x80
+#define PIN_MUX_CTL_REG_NUM 8
+#define PULLUPDOWN_REG_A 0xa0
+#define PULLUPDOWN_REG_NUM 5
+
+static u32 pinmux_reg[TRISTATE_REG_NUM + PIN_MUX_CTL_REG_NUM +
+ PULLUPDOWN_REG_NUM];
+
+static inline unsigned long pg_readl(unsigned long offset)
+{
+ return readl(IO_TO_VIRT(TEGRA_APB_MISC_BASE + offset));
+}
+
+static inline void pg_writel(unsigned long value, unsigned long offset)
+{
+ writel(value, IO_TO_VIRT(TEGRA_APB_MISC_BASE + offset));
+}
+
+void tegra_pinmux_suspend(void)
+{
+ unsigned int i;
+ u32 *ctx = pinmux_reg;
+
+ for (i = 0; i < TRISTATE_REG_NUM; i++)
+ *ctx++ = pg_readl(TRISTATE_REG_A + i*4);
+
+ for (i = 0; i < PIN_MUX_CTL_REG_NUM; i++)
+ *ctx++ = pg_readl(PIN_MUX_CTL_REG_A + i*4);
+
+ for (i = 0; i < PULLUPDOWN_REG_NUM; i++)
+ *ctx++ = pg_readl(PULLUPDOWN_REG_A + i*4);
+}
+
+void tegra_pinmux_resume(void)
+{
+ unsigned int i;
+ u32 *ctx = pinmux_reg;
+
+ for (i = 0; i < PIN_MUX_CTL_REG_NUM; i++)
+ pg_writel(*ctx++, PIN_MUX_CTL_REG_A + i*4);
+
+ for (i = 0; i < PULLUPDOWN_REG_NUM; i++)
+ pg_writel(*ctx++, PULLUPDOWN_REG_A + i*4);
+
+ for (i = 0; i < TRISTATE_REG_NUM; i++)
+ pg_writel(*ctx++, TRISTATE_REG_A + i*4);
+}
+#endif
diff --git a/arch/arm/mach-tegra/pinmux.c b/arch/arm/mach-tegra/pinmux.c
index 13ae10237e84..f80d507671bc 100644
--- a/arch/arm/mach-tegra/pinmux.c
+++ b/arch/arm/mach-tegra/pinmux.c
@@ -14,7 +14,8 @@
*
*/
-
+#include <linux/init.h>
+#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/spinlock.h>
@@ -23,21 +24,6 @@
#include <mach/iomap.h>
#include <mach/pinmux.h>
-
-#define TEGRA_TRI_STATE(x) (0x14 + (4 * (x)))
-#define TEGRA_PP_MUX_CTL(x) (0x80 + (4 * (x)))
-#define TEGRA_PP_PU_PD(x) (0xa0 + (4 * (x)))
-
-#define REG_A 0
-#define REG_B 1
-#define REG_C 2
-#define REG_D 3
-#define REG_E 4
-#define REG_F 5
-#define REG_G 6
-
-#define REG_N -1
-
#define HSM_EN(reg) (((reg) >> 2) & 0x1)
#define SCHMT_EN(reg) (((reg) >> 3) & 0x1)
#define LPMD(reg) (((reg) >> 4) & 0x3)
@@ -46,154 +32,8 @@
#define SLWR(reg) (((reg) >> 28) & 0x3)
#define SLWF(reg) (((reg) >> 30) & 0x3)
-struct tegra_pingroup_desc {
- const char *name;
- int funcs[4];
- s8 tri_reg; /* offset into the TRISTATE_REG_* register bank */
- s8 tri_bit; /* offset into the TRISTATE_REG_* register bit */
- s8 mux_reg; /* offset into the PIN_MUX_CTL_* register bank */
- s8 mux_bit; /* offset into the PIN_MUX_CTL_* register bit */
- s8 pupd_reg; /* offset into the PULL_UPDOWN_REG_* register bank */
- s8 pupd_bit; /* offset into the PULL_UPDOWN_REG_* register bit */
-};
-
-#define PINGROUP(pg_name, f0, f1, f2, f3, \
- tri_r, tri_b, mux_r, mux_b, pupd_r, pupd_b) \
- [TEGRA_PINGROUP_ ## pg_name] = { \
- .name = #pg_name, \
- .funcs = { \
- TEGRA_MUX_ ## f0, \
- TEGRA_MUX_ ## f1, \
- TEGRA_MUX_ ## f2, \
- TEGRA_MUX_ ## f3, \
- }, \
- .tri_reg = REG_ ## tri_r, \
- .tri_bit = tri_b, \
- .mux_reg = REG_ ## mux_r, \
- .mux_bit = mux_b, \
- .pupd_reg = REG_ ## pupd_r, \
- .pupd_bit = pupd_b, \
- }
-
-static const struct tegra_pingroup_desc pingroups[TEGRA_MAX_PINGROUP] = {
- PINGROUP(ATA, IDE, NAND, GMI, RSVD, A, 0, A, 24, A, 0),
- PINGROUP(ATB, IDE, NAND, GMI, SDIO4, A, 1, A, 16, A, 2),
- PINGROUP(ATC, IDE, NAND, GMI, SDIO4, A, 2, A, 22, A, 4),
- PINGROUP(ATD, IDE, NAND, GMI, SDIO4, A, 3, A, 20, A, 6),
- PINGROUP(ATE, IDE, NAND, GMI, RSVD, B, 25, A, 12, A, 8),
- PINGROUP(CDEV1, OSC, PLLA_OUT, PLLM_OUT1, AUDIO_SYNC, A, 4, C, 2, C, 0),
- PINGROUP(CDEV2, OSC, AHB_CLK, APB_CLK, PLLP_OUT4, A, 5, C, 4, C, 2),
- PINGROUP(CRTP, CRT, RSVD, RSVD, RSVD, D, 14, G, 20, B, 24),
- PINGROUP(CSUS, PLLC_OUT1, PLLP_OUT2, PLLP_OUT3, VI_SENSOR_CLK, A, 6, C, 6, D, 24),
- PINGROUP(DAP1, DAP1, RSVD, GMI, SDIO2, A, 7, C, 20, A, 10),
- PINGROUP(DAP2, DAP2, TWC, RSVD, GMI, A, 8, C, 22, A, 12),
- PINGROUP(DAP3, DAP3, RSVD, RSVD, RSVD, A, 9, C, 24, A, 14),
- PINGROUP(DAP4, DAP4, RSVD, GMI, RSVD, A, 10, C, 26, A, 16),
- PINGROUP(DDC, I2C2, RSVD, RSVD, RSVD, B, 31, C, 0, E, 28),
- PINGROUP(DTA, RSVD, SDIO2, VI, RSVD, A, 11, B, 20, A, 18),
- PINGROUP(DTB, RSVD, RSVD, VI, SPI1, A, 12, B, 22, A, 20),
- PINGROUP(DTC, RSVD, RSVD, VI, RSVD, A, 13, B, 26, A, 22),
- PINGROUP(DTD, RSVD, SDIO2, VI, RSVD, A, 14, B, 28, A, 24),
- PINGROUP(DTE, RSVD, RSVD, VI, SPI1, A, 15, B, 30, A, 26),
- PINGROUP(DTF, I2C3, RSVD, VI, RSVD, D, 12, G, 30, A, 28),
- PINGROUP(GMA, UARTE, SPI3, GMI, SDIO4, A, 28, B, 0, E, 20),
- PINGROUP(GMB, IDE, NAND, GMI, GMI_INT, B, 29, C, 28, E, 22),
- PINGROUP(GMC, UARTD, SPI4, GMI, SFLASH, A, 29, B, 2, E, 24),
- PINGROUP(GMD, RSVD, NAND, GMI, SFLASH, B, 30, C, 30, E, 26),
- PINGROUP(GME, RSVD, DAP5, GMI, SDIO4, B, 0, D, 0, C, 24),
- PINGROUP(GPU, PWM, UARTA, GMI, RSVD, A, 16, D, 4, B, 20),
- PINGROUP(GPU7, RTCK, RSVD, RSVD, RSVD, D, 11, G, 28, B, 6),
- PINGROUP(GPV, PCIE, RSVD, RSVD, RSVD, A, 17, D, 2, A, 30),
- PINGROUP(HDINT, HDMI, RSVD, RSVD, RSVD, C, 23, B, 4, D, 22),
- PINGROUP(I2CP, I2C, RSVD, RSVD, RSVD, A, 18, C, 8, B, 2),
- PINGROUP(IRRX, UARTA, UARTB, GMI, SPI4, A, 20, C, 18, C, 22),
- PINGROUP(IRTX, UARTA, UARTB, GMI, SPI4, A, 19, C, 16, C, 20),
- PINGROUP(KBCA, KBC, NAND, SDIO2, EMC_TEST0_DLL, A, 22, C, 10, B, 8),
- PINGROUP(KBCB, KBC, NAND, SDIO2, MIO, A, 21, C, 12, B, 10),
- PINGROUP(KBCC, KBC, NAND, TRACE, EMC_TEST1_DLL, B, 26, C, 14, B, 12),
- PINGROUP(KBCD, KBC, NAND, SDIO2, MIO, D, 10, G, 26, B, 14),
- PINGROUP(KBCE, KBC, NAND, OWR, RSVD, A, 26, A, 28, E, 2),
- PINGROUP(KBCF, KBC, NAND, TRACE, MIO, A, 27, A, 26, E, 0),
- PINGROUP(LCSN, DISPLAYA, DISPLAYB, SPI3, RSVD, C, 31, E, 12, D, 20),
- PINGROUP(LD0, DISPLAYA, DISPLAYB, XIO, RSVD, C, 0, F, 0, D, 12),
- PINGROUP(LD1, DISPLAYA, DISPLAYB, XIO, RSVD, C, 1, F, 2, D, 12),
- PINGROUP(LD10, DISPLAYA, DISPLAYB, XIO, RSVD, C, 10, F, 20, D, 12),
- PINGROUP(LD11, DISPLAYA, DISPLAYB, XIO, RSVD, C, 11, F, 22, D, 12),
- PINGROUP(LD12, DISPLAYA, DISPLAYB, XIO, RSVD, C, 12, F, 24, D, 12),
- PINGROUP(LD13, DISPLAYA, DISPLAYB, XIO, RSVD, C, 13, F, 26, D, 12),
- PINGROUP(LD14, DISPLAYA, DISPLAYB, XIO, RSVD, C, 14, F, 28, D, 12),
- PINGROUP(LD15, DISPLAYA, DISPLAYB, XIO, RSVD, C, 15, F, 30, D, 12),
- PINGROUP(LD16, DISPLAYA, DISPLAYB, XIO, RSVD, C, 16, G, 0, D, 12),
- PINGROUP(LD17, DISPLAYA, DISPLAYB, RSVD, RSVD, C, 17, G, 2, D, 12),
- PINGROUP(LD2, DISPLAYA, DISPLAYB, XIO, RSVD, C, 2, F, 4, D, 12),
- PINGROUP(LD3, DISPLAYA, DISPLAYB, XIO, RSVD, C, 3, F, 6, D, 12),
- PINGROUP(LD4, DISPLAYA, DISPLAYB, XIO, RSVD, C, 4, F, 8, D, 12),
- PINGROUP(LD5, DISPLAYA, DISPLAYB, XIO, RSVD, C, 5, F, 10, D, 12),
- PINGROUP(LD6, DISPLAYA, DISPLAYB, XIO, RSVD, C, 6, F, 12, D, 12),
- PINGROUP(LD7, DISPLAYA, DISPLAYB, XIO, RSVD, C, 7, F, 14, D, 12),
- PINGROUP(LD8, DISPLAYA, DISPLAYB, XIO, RSVD, C, 8, F, 16, D, 12),
- PINGROUP(LD9, DISPLAYA, DISPLAYB, XIO, RSVD, C, 9, F, 18, D, 12),
- PINGROUP(LDC, DISPLAYA, DISPLAYB, RSVD, RSVD, C, 30, E, 14, D, 20),
- PINGROUP(LDI, DISPLAYA, DISPLAYB, RSVD, RSVD, D, 6, G, 16, D, 18),
- PINGROUP(LHP0, DISPLAYA, DISPLAYB, RSVD, RSVD, C, 18, G, 10, D, 16),
- PINGROUP(LHP1, DISPLAYA, DISPLAYB, RSVD, RSVD, C, 19, G, 4, D, 14),
- PINGROUP(LHP2, DISPLAYA, DISPLAYB, RSVD, RSVD, C, 20, G, 6, D, 14),
- PINGROUP(LHS, DISPLAYA, DISPLAYB, XIO, RSVD, D, 7, E, 22, D, 22),
- PINGROUP(LM0, DISPLAYA, DISPLAYB, SPI3, RSVD, C, 24, E, 26, D, 22),
- PINGROUP(LM1, DISPLAYA, DISPLAYB, RSVD, CRT, C, 25, E, 28, D, 22),
- PINGROUP(LPP, DISPLAYA, DISPLAYB, RSVD, RSVD, D, 8, G, 14, D, 18),
- PINGROUP(LPW0, DISPLAYA, DISPLAYB, SPI3, HDMI, D, 3, E, 0, D, 20),
- PINGROUP(LPW1, DISPLAYA, DISPLAYB, RSVD, RSVD, D, 4, E, 2, D, 20),
- PINGROUP(LPW2, DISPLAYA, DISPLAYB, SPI3, HDMI, D, 5, E, 4, D, 20),
- PINGROUP(LSC0, DISPLAYA, DISPLAYB, XIO, RSVD, C, 27, E, 18, D, 22),
- PINGROUP(LSC1, DISPLAYA, DISPLAYB, SPI3, HDMI, C, 28, E, 20, D, 20),
- PINGROUP(LSCK, DISPLAYA, DISPLAYB, SPI3, HDMI, C, 29, E, 16, D, 20),
- PINGROUP(LSDA, DISPLAYA, DISPLAYB, SPI3, HDMI, D, 1, E, 8, D, 20),
- PINGROUP(LSDI, DISPLAYA, DISPLAYB, SPI3, RSVD, D, 2, E, 6, D, 20),
- PINGROUP(LSPI, DISPLAYA, DISPLAYB, XIO, HDMI, D, 0, E, 10, D, 22),
- PINGROUP(LVP0, DISPLAYA, DISPLAYB, RSVD, RSVD, C, 21, E, 30, D, 22),
- PINGROUP(LVP1, DISPLAYA, DISPLAYB, RSVD, RSVD, C, 22, G, 8, D, 16),
- PINGROUP(LVS, DISPLAYA, DISPLAYB, XIO, RSVD, C, 26, E, 24, D, 22),
- PINGROUP(OWC, OWR, RSVD, RSVD, RSVD, A, 31, B, 8, E, 30),
- PINGROUP(PMC, PWR_ON, PWR_INTR, RSVD, RSVD, A, 23, G, 18, N, -1),
- PINGROUP(PTA, I2C2, HDMI, GMI, RSVD, A, 24, G, 22, B, 4),
- PINGROUP(RM, I2C, RSVD, RSVD, RSVD, A, 25, A, 14, B, 0),
- PINGROUP(SDB, UARTA, PWM, SDIO3, SPI2, D, 15, D, 10, N, -1),
- PINGROUP(SDC, PWM, TWC, SDIO3, SPI3, B, 1, D, 12, D, 28),
- PINGROUP(SDD, UARTA, PWM, SDIO3, SPI3, B, 2, D, 14, D, 30),
- PINGROUP(SDIO1, SDIO1, RSVD, UARTE, UARTA, A, 30, A, 30, E, 18),
- PINGROUP(SLXA, PCIE, SPI4, SDIO3, SPI2, B, 3, B, 6, B, 22),
- PINGROUP(SLXC, SPDIF, SPI4, SDIO3, SPI2, B, 5, B, 10, B, 26),
- PINGROUP(SLXD, SPDIF, SPI4, SDIO3, SPI2, B, 6, B, 12, B, 28),
- PINGROUP(SLXK, PCIE, SPI4, SDIO3, SPI2, B, 7, B, 14, B, 30),
- PINGROUP(SPDI, SPDIF, RSVD, I2C, SDIO2, B, 8, D, 8, B, 16),
- PINGROUP(SPDO, SPDIF, RSVD, I2C, SDIO2, B, 9, D, 6, B, 18),
- PINGROUP(SPIA, SPI1, SPI2, SPI3, GMI, B, 10, D, 30, C, 4),
- PINGROUP(SPIB, SPI1, SPI2, SPI3, GMI, B, 11, D, 28, C, 6),
- PINGROUP(SPIC, SPI1, SPI2, SPI3, GMI, B, 12, D, 26, C, 8),
- PINGROUP(SPID, SPI2, SPI1, SPI2_ALT, GMI, B, 13, D, 24, C, 10),
- PINGROUP(SPIE, SPI2, SPI1, SPI2_ALT, GMI, B, 14, D, 22, C, 12),
- PINGROUP(SPIF, SPI3, SPI1, SPI2, RSVD, B, 15, D, 20, C, 14),
- PINGROUP(SPIG, SPI3, SPI2, SPI2_ALT, I2C, B, 16, D, 18, C, 16),
- PINGROUP(SPIH, SPI3, SPI2, SPI2_ALT, I2C, B, 17, D, 16, C, 18),
- PINGROUP(UAA, SPI3, MIPI_HS, UARTA, ULPI, B, 18, A, 0, D, 0),
- PINGROUP(UAB, SPI2, MIPI_HS, UARTA, ULPI, B, 19, A, 2, D, 2),
- PINGROUP(UAC, OWR, RSVD, RSVD, RSVD, B, 20, A, 4, D, 4),
- PINGROUP(UAD, IRDA, SPDIF, UARTA, SPI4, B, 21, A, 6, D, 6),
- PINGROUP(UCA, UARTC, RSVD, GMI, RSVD, B, 22, B, 16, D, 8),
- PINGROUP(UCB, UARTC, PWM, GMI, RSVD, B, 23, B, 18, D, 10),
- PINGROUP(UDA, SPI1, RSVD, UARTD, ULPI, D, 13, A, 8, E, 16),
- /* these pin groups only have pullup and pull down control */
- PINGROUP(CK32, RSVD, RSVD, RSVD, RSVD, N, -1, N, -1, E, 14),
- PINGROUP(DDRC, RSVD, RSVD, RSVD, RSVD, N, -1, N, -1, D, 26),
- PINGROUP(PMCA, RSVD, RSVD, RSVD, RSVD, N, -1, N, -1, E, 4),
- PINGROUP(PMCB, RSVD, RSVD, RSVD, RSVD, N, -1, N, -1, E, 6),
- PINGROUP(PMCC, RSVD, RSVD, RSVD, RSVD, N, -1, N, -1, E, 8),
- PINGROUP(PMCD, RSVD, RSVD, RSVD, RSVD, N, -1, N, -1, E, 10),
- PINGROUP(PMCE, RSVD, RSVD, RSVD, RSVD, N, -1, N, -1, E, 12),
- PINGROUP(XM2C, RSVD, RSVD, RSVD, RSVD, N, -1, N, -1, C, 30),
- PINGROUP(XM2D, RSVD, RSVD, RSVD, RSVD, N, -1, N, -1, C, 28),
-};
+static const struct tegra_pingroup_desc *const pingroups = tegra_soc_pingroups;
+static const struct tegra_drive_pingroup_desc *const drive_pingroups = tegra_soc_drive_pingroups;
static char *tegra_mux_names[TEGRA_MAX_MUX] = {
[TEGRA_MUX_AHB_CLK] = "AHB_CLK",
@@ -256,48 +96,7 @@ static char *tegra_mux_names[TEGRA_MAX_MUX] = {
[TEGRA_MUX_VI] = "VI",
[TEGRA_MUX_VI_SENSOR_CLK] = "VI_SENSOR_CLK",
[TEGRA_MUX_XIO] = "XIO",
-};
-
-struct tegra_drive_pingroup_desc {
- const char *name;
- s16 reg;
-};
-
-#define DRIVE_PINGROUP(pg_name, r) \
- [TEGRA_DRIVE_PINGROUP_ ## pg_name] = { \
- .name = #pg_name, \
- .reg = r \
- }
-
-static const struct tegra_drive_pingroup_desc drive_pingroups[TEGRA_MAX_PINGROUP] = {
- DRIVE_PINGROUP(AO1, 0x868),
- DRIVE_PINGROUP(AO2, 0x86c),
- DRIVE_PINGROUP(AT1, 0x870),
- DRIVE_PINGROUP(AT2, 0x874),
- DRIVE_PINGROUP(CDEV1, 0x878),
- DRIVE_PINGROUP(CDEV2, 0x87c),
- DRIVE_PINGROUP(CSUS, 0x880),
- DRIVE_PINGROUP(DAP1, 0x884),
- DRIVE_PINGROUP(DAP2, 0x888),
- DRIVE_PINGROUP(DAP3, 0x88c),
- DRIVE_PINGROUP(DAP4, 0x890),
- DRIVE_PINGROUP(DBG, 0x894),
- DRIVE_PINGROUP(LCD1, 0x898),
- DRIVE_PINGROUP(LCD2, 0x89c),
- DRIVE_PINGROUP(SDMMC2, 0x8a0),
- DRIVE_PINGROUP(SDMMC3, 0x8a4),
- DRIVE_PINGROUP(SPI, 0x8a8),
- DRIVE_PINGROUP(UAA, 0x8ac),
- DRIVE_PINGROUP(UAB, 0x8b0),
- DRIVE_PINGROUP(UART2, 0x8b4),
- DRIVE_PINGROUP(UART3, 0x8b8),
- DRIVE_PINGROUP(VI1, 0x8bc),
- DRIVE_PINGROUP(VI2, 0x8c0),
- DRIVE_PINGROUP(XM2A, 0x8c4),
- DRIVE_PINGROUP(XM2C, 0x8c8),
- DRIVE_PINGROUP(XM2D, 0x8cc),
- DRIVE_PINGROUP(XM2CLK, 0x8d0),
- DRIVE_PINGROUP(MEMCOMP, 0x8d4),
+ [TEGRA_MUX_SAFE] = "<safe>",
};
static const char *tegra_drive_names[TEGRA_MAX_DRIVE] = {
@@ -381,22 +180,27 @@ static inline void pg_writel(unsigned long value, unsigned long offset)
writel(value, IO_TO_VIRT(TEGRA_APB_MISC_BASE + offset));
}
-int tegra_pinmux_set_func(enum tegra_pingroup pg, enum tegra_mux_func func)
+static int tegra_pinmux_set_func(const struct tegra_pingroup_config *config)
{
int mux = -1;
int i;
unsigned long reg;
unsigned long flags;
+ enum tegra_pingroup pg = config->pingroup;
+ enum tegra_mux_func func = config->func;
if (pg < 0 || pg >= TEGRA_MAX_PINGROUP)
return -ERANGE;
- if (pingroups[pg].mux_reg == REG_N)
+ if (pingroups[pg].mux_reg < 0)
return -EINVAL;
if (func < 0)
return -ERANGE;
+ if (func == TEGRA_MUX_SAFE)
+ func = pingroups[pg].func_safe;
+
if (func & TEGRA_MUX_RSVD) {
mux = func & 0x3;
} else {
@@ -413,10 +217,10 @@ int tegra_pinmux_set_func(enum tegra_pingroup pg, enum tegra_mux_func func)
spin_lock_irqsave(&mux_lock, flags);
- reg = pg_readl(TEGRA_PP_MUX_CTL(pingroups[pg].mux_reg));
+ reg = pg_readl(pingroups[pg].mux_reg);
reg &= ~(0x3 << pingroups[pg].mux_bit);
reg |= mux << pingroups[pg].mux_bit;
- pg_writel(reg, TEGRA_PP_MUX_CTL(pingroups[pg].mux_reg));
+ pg_writel(reg, pingroups[pg].mux_reg);
spin_unlock_irqrestore(&mux_lock, flags);
@@ -432,16 +236,16 @@ int tegra_pinmux_set_tristate(enum tegra_pingroup pg,
if (pg < 0 || pg >= TEGRA_MAX_PINGROUP)
return -ERANGE;
- if (pingroups[pg].tri_reg == REG_N)
+ if (pingroups[pg].tri_reg < 0)
return -EINVAL;
spin_lock_irqsave(&mux_lock, flags);
- reg = pg_readl(TEGRA_TRI_STATE(pingroups[pg].tri_reg));
+ reg = pg_readl(pingroups[pg].tri_reg);
reg &= ~(0x1 << pingroups[pg].tri_bit);
if (tristate)
reg |= 1 << pingroups[pg].tri_bit;
- pg_writel(reg, TEGRA_TRI_STATE(pingroups[pg].tri_reg));
+ pg_writel(reg, pingroups[pg].tri_reg);
spin_unlock_irqrestore(&mux_lock, flags);
@@ -457,7 +261,7 @@ int tegra_pinmux_set_pullupdown(enum tegra_pingroup pg,
if (pg < 0 || pg >= TEGRA_MAX_PINGROUP)
return -ERANGE;
- if (pingroups[pg].pupd_reg == REG_N)
+ if (pingroups[pg].pupd_reg < 0)
return -EINVAL;
if (pupd != TEGRA_PUPD_NORMAL &&
@@ -468,38 +272,39 @@ int tegra_pinmux_set_pullupdown(enum tegra_pingroup pg,
spin_lock_irqsave(&mux_lock, flags);
- reg = pg_readl(TEGRA_PP_PU_PD(pingroups[pg].pupd_reg));
+ reg = pg_readl(pingroups[pg].pupd_reg);
reg &= ~(0x3 << pingroups[pg].pupd_bit);
reg |= pupd << pingroups[pg].pupd_bit;
- pg_writel(reg, TEGRA_PP_PU_PD(pingroups[pg].pupd_reg));
+ pg_writel(reg, pingroups[pg].pupd_reg);
spin_unlock_irqrestore(&mux_lock, flags);
return 0;
}
-void tegra_pinmux_config_pingroup(enum tegra_pingroup pingroup,
- enum tegra_mux_func func,
- enum tegra_pullupdown pupd,
- enum tegra_tristate tristate)
+static void tegra_pinmux_config_pingroup(const struct tegra_pingroup_config *config)
{
+ enum tegra_pingroup pingroup = config->pingroup;
+ enum tegra_mux_func func = config->func;
+ enum tegra_pullupdown pupd = config->pupd;
+ enum tegra_tristate tristate = config->tristate;
int err;
- if (pingroups[pingroup].mux_reg != REG_N) {
- err = tegra_pinmux_set_func(pingroup, func);
+ if (pingroups[pingroup].mux_reg >= 0) {
+ err = tegra_pinmux_set_func(config);
if (err < 0)
pr_err("pinmux: can't set pingroup %s func to %s: %d\n",
pingroup_name(pingroup), func_name(func), err);
}
- if (pingroups[pingroup].pupd_reg != REG_N) {
+ if (pingroups[pingroup].pupd_reg >= 0) {
err = tegra_pinmux_set_pullupdown(pingroup, pupd);
if (err < 0)
pr_err("pinmux: can't set pingroup %s pullupdown to %s: %d\n",
pingroup_name(pingroup), pupd_name(pupd), err);
}
- if (pingroups[pingroup].tri_reg != REG_N) {
+ if (pingroups[pingroup].tri_reg >= 0) {
err = tegra_pinmux_set_tristate(pingroup, tristate);
if (err < 0)
pr_err("pinmux: can't set pingroup %s tristate to %s: %d\n",
@@ -507,17 +312,12 @@ void tegra_pinmux_config_pingroup(enum tegra_pingroup pingroup,
}
}
-
-
-void tegra_pinmux_config_table(struct tegra_pingroup_config *config, int len)
+void tegra_pinmux_config_table(const struct tegra_pingroup_config *config, int len)
{
int i;
for (i = 0; i < len; i++)
- tegra_pinmux_config_pingroup(config[i].pingroup,
- config[i].func,
- config[i].pupd,
- config[i].tristate);
+ tegra_pinmux_config_pingroup(&config[i]);
}
static const char *drive_pinmux_name(enum tegra_drive_pingroup pg)
@@ -784,6 +584,86 @@ void tegra_drive_pinmux_config_table(struct tegra_drive_pingroup_config *config,
config[i].slew_falling);
}
+void tegra_pinmux_set_safe_pinmux_table(const struct tegra_pingroup_config *config,
+ int len)
+{
+ int i;
+ struct tegra_pingroup_config c;
+
+ for (i = 0; i < len; i++) {
+ int err;
+ c = config[i];
+ if (c.pingroup < 0 || c.pingroup >= TEGRA_MAX_PINGROUP) {
+ WARN_ON(1);
+ continue;
+ }
+ c.func = pingroups[c.pingroup].func_safe;
+ err = tegra_pinmux_set_func(&c);
+ if (err < 0)
+ pr_err("%s: tegra_pinmux_set_func returned %d setting "
+ "%s to %s\n", __func__, err,
+ pingroup_name(c.pingroup), func_name(c.func));
+ }
+}
+
+void tegra_pinmux_config_pinmux_table(const struct tegra_pingroup_config *config,
+ int len)
+{
+ int i;
+
+ for (i = 0; i < len; i++) {
+ int err;
+ if (config[i].pingroup < 0 ||
+ config[i].pingroup >= TEGRA_MAX_PINGROUP) {
+ WARN_ON(1);
+ continue;
+ }
+ err = tegra_pinmux_set_func(&config[i]);
+ if (err < 0)
+ pr_err("%s: tegra_pinmux_set_func returned %d setting "
+ "%s to %s\n", __func__, err,
+ pingroup_name(config[i].pingroup),
+ func_name(config[i].func));
+ }
+}
+
+void tegra_pinmux_config_tristate_table(const struct tegra_pingroup_config *config,
+ int len, enum tegra_tristate tristate)
+{
+ int i;
+ int err;
+ enum tegra_pingroup pingroup;
+
+ for (i = 0; i < len; i++) {
+ pingroup = config[i].pingroup;
+ if (pingroups[pingroup].tri_reg >= 0) {
+ err = tegra_pinmux_set_tristate(pingroup, tristate);
+ if (err < 0)
+ pr_err("pinmux: can't set pingroup %s tristate"
+ " to %s: %d\n", pingroup_name(pingroup),
+ tri_name(tristate), err);
+ }
+ }
+}
+
+void tegra_pinmux_config_pullupdown_table(const struct tegra_pingroup_config *config,
+ int len, enum tegra_pullupdown pupd)
+{
+ int i;
+ int err;
+ enum tegra_pingroup pingroup;
+
+ for (i = 0; i < len; i++) {
+ pingroup = config[i].pingroup;
+ if (pingroups[pingroup].pupd_reg >= 0) {
+ err = tegra_pinmux_set_pullupdown(pingroup, pupd);
+ if (err < 0)
+ pr_err("pinmux: can't set pingroup %s pullupdown"
+ " to %s: %d\n", pingroup_name(pingroup),
+ pupd_name(pupd), err);
+ }
+ }
+}
#ifdef CONFIG_DEBUG_FS
@@ -812,11 +692,11 @@ static int dbg_pinmux_show(struct seq_file *s, void *unused)
len = strlen(pingroups[i].name);
dbg_pad_field(s, 5 - len);
- if (pingroups[i].mux_reg == REG_N) {
+ if (pingroups[i].mux_reg < 0) {
seq_printf(s, "TEGRA_MUX_NONE");
len = strlen("NONE");
} else {
- mux = (pg_readl(TEGRA_PP_MUX_CTL(pingroups[i].mux_reg)) >>
+ mux = (pg_readl(pingroups[i].mux_reg) >>
pingroups[i].mux_bit) & 0x3;
if (pingroups[i].funcs[mux] == TEGRA_MUX_RSVD) {
seq_printf(s, "TEGRA_MUX_RSVD%1lu", mux+1);
@@ -829,21 +709,21 @@ static int dbg_pinmux_show(struct seq_file *s, void *unused)
}
dbg_pad_field(s, 13-len);
- if (pingroups[i].mux_reg == REG_N) {
+ if (pingroups[i].pupd_reg < 0) {
seq_printf(s, "TEGRA_PUPD_NORMAL");
len = strlen("NORMAL");
} else {
- pupd = (pg_readl(TEGRA_PP_PU_PD(pingroups[i].pupd_reg)) >>
+ pupd = (pg_readl(pingroups[i].pupd_reg) >>
pingroups[i].pupd_bit) & 0x3;
seq_printf(s, "TEGRA_PUPD_%s", pupd_name(pupd));
len = strlen(pupd_name(pupd));
}
dbg_pad_field(s, 9 - len);
- if (pingroups[i].tri_reg == REG_N) {
+ if (pingroups[i].tri_reg < 0) {
seq_printf(s, "TEGRA_TRI_NORMAL");
} else {
- tri = (pg_readl(TEGRA_TRI_STATE(pingroups[i].tri_reg)) >>
+ tri = (pg_readl(pingroups[i].tri_reg) >>
pingroups[i].tri_bit) & 0x1;
seq_printf(s, "TEGRA_TRI_%s", tri_name(tri));
diff --git a/arch/arm/mach-tegra/tegra2_clocks.c b/arch/arm/mach-tegra/tegra2_clocks.c
index 426163231fff..ae3b308e22a4 100644
--- a/arch/arm/mach-tegra/tegra2_clocks.c
+++ b/arch/arm/mach-tegra/tegra2_clocks.c
@@ -30,14 +30,21 @@
#include <mach/iomap.h>
#include "clock.h"
+#include "fuse.h"
+#include "tegra2_dvfs.h"
#define RST_DEVICES 0x004
#define RST_DEVICES_SET 0x300
#define RST_DEVICES_CLR 0x304
+#define RST_DEVICES_NUM 3
#define CLK_OUT_ENB 0x010
#define CLK_OUT_ENB_SET 0x320
#define CLK_OUT_ENB_CLR 0x324
+#define CLK_OUT_ENB_NUM 3
+
+#define CLK_MASK_ARM 0x44
+#define MISC_CLK_ENB 0x48
#define OSC_CTRL 0x50
#define OSC_CTRL_OSC_FREQ_MASK (3<<30)
@@ -45,6 +52,7 @@
#define OSC_CTRL_OSC_FREQ_19_2MHZ (1<<30)
#define OSC_CTRL_OSC_FREQ_12MHZ (2<<30)
#define OSC_CTRL_OSC_FREQ_26MHZ (3<<30)
+#define OSC_CTRL_MASK 0x3f2
#define OSC_FREQ_DET 0x58
#define OSC_FREQ_DET_TRIG (1<<31)
@@ -53,10 +61,17 @@
#define OSC_FREQ_DET_BUSY (1<<31)
#define OSC_FREQ_DET_CNT_MASK 0xFFFF
+#define PERIPH_CLK_SOURCE_I2S1 0x100
+#define PERIPH_CLK_SOURCE_EMC 0x19c
+#define PERIPH_CLK_SOURCE_OSC 0x1fc
+#define PERIPH_CLK_SOURCE_NUM \
+ ((PERIPH_CLK_SOURCE_OSC - PERIPH_CLK_SOURCE_I2S1) / 4)
+
#define PERIPH_CLK_SOURCE_MASK (3<<30)
#define PERIPH_CLK_SOURCE_SHIFT 30
#define PERIPH_CLK_SOURCE_ENABLE (1<<28)
-#define PERIPH_CLK_SOURCE_DIV_MASK 0xFF
+#define PERIPH_CLK_SOURCE_DIVU71_MASK 0xFF
+#define PERIPH_CLK_SOURCE_DIVU16_MASK 0xFFFF
#define PERIPH_CLK_SOURCE_DIV_SHIFT 0
#define PLL_BASE 0x0
@@ -79,8 +94,9 @@
#define PLL_OUT_RESET_DISABLE (1<<0)
#define PLL_MISC(c) (((c)->flags & PLL_ALT_MISC_REG) ? 0x4 : 0xc)
+#define PLL_MISC_LOCK_ENABLE(c) (((c)->flags & PLLU) ? (1<<22) : (1<<18))
+
#define PLL_MISC_DCCON_SHIFT 20
-#define PLL_MISC_LOCK_ENABLE (1<<18)
#define PLL_MISC_CPCON_SHIFT 8
#define PLL_MISC_CPCON_MASK (0xF<<PLL_MISC_CPCON_SHIFT)
#define PLL_MISC_LFCON_SHIFT 4
@@ -88,10 +104,14 @@
#define PLL_MISC_VCOCON_SHIFT 0
#define PLL_MISC_VCOCON_MASK (0xF<<PLL_MISC_VCOCON_SHIFT)
+#define PLLU_BASE_POST_DIV (1<<20)
+
#define PLLD_MISC_CLKENABLE (1<<30)
#define PLLD_MISC_DIV_RST (1<<23)
#define PLLD_MISC_DCCON_SHIFT 12
+#define PLLE_MISC_READY (1 << 15)
+
#define PERIPH_CLK_TO_ENB_REG(c) ((c->clk_num / 32) * 4)
#define PERIPH_CLK_TO_ENB_SET_REG(c) ((c->clk_num / 32) * 8)
#define PERIPH_CLK_TO_ENB_BIT(c) (1 << (c->clk_num % 32))
@@ -143,30 +163,37 @@ unsigned long clk_measure_input_freq(void)
}
}
-static int clk_div71_get_divider(struct clk *c, unsigned long rate)
+static int clk_div71_get_divider(unsigned long parent_rate, unsigned long rate)
{
- unsigned long divider_u71;
+ s64 divider_u71 = parent_rate * 2;
+ divider_u71 += rate - 1;
+ do_div(divider_u71, rate);
- divider_u71 = DIV_ROUND_UP(c->rate * 2, rate);
+ if (divider_u71 - 2 < 0)
+ return 0;
- if (divider_u71 - 2 > 255 || divider_u71 - 2 < 0)
+ if (divider_u71 - 2 > 255)
return -EINVAL;
return divider_u71 - 2;
}
-static unsigned long tegra2_clk_recalculate_rate(struct clk *c)
+static int clk_div16_get_divider(unsigned long parent_rate, unsigned long rate)
{
- unsigned long rate;
- rate = c->parent->rate;
+ s64 divider_u16;
- if (c->mul != 0 && c->div != 0)
- c->rate = rate * c->mul / c->div;
- else
- c->rate = rate;
- return c->rate;
-}
+ divider_u16 = parent_rate;
+ divider_u16 += rate - 1;
+ do_div(divider_u16, rate);
+
+ if (divider_u16 - 1 < 0)
+ return 0;
+ if (divider_u16 - 1 > 255)
+ return -EINVAL;
+
+ return divider_u16 - 1;
+}
/* clk_m functions */
static unsigned long tegra2_clk_m_autodetect_rate(struct clk *c)
@@ -244,7 +271,6 @@ static void tegra2_super_clk_init(struct clk *c)
}
BUG_ON(sel->input == NULL);
c->parent = sel->input;
- tegra2_clk_recalculate_rate(c);
}
static int tegra2_super_clk_enable(struct clk *c)
@@ -266,6 +292,7 @@ static int tegra2_super_clk_set_parent(struct clk *c, struct clk *p)
u32 val;
const struct clk_mux_sel *sel;
int shift;
+
val = clk_readl(c->reg + SUPER_CLK_MUX);;
BUG_ON(((val & SUPER_STATE_MASK) != SUPER_STATE_RUN) &&
((val & SUPER_STATE_MASK) != SUPER_STATE_IDLE));
@@ -273,11 +300,18 @@ static int tegra2_super_clk_set_parent(struct clk *c, struct clk *p)
SUPER_IDLE_SOURCE_SHIFT : SUPER_RUN_SOURCE_SHIFT;
for (sel = c->inputs; sel->input != NULL; sel++) {
if (sel->input == p) {
- clk_reparent(c, p);
val &= ~(SUPER_SOURCE_MASK << shift);
val |= sel->value << shift;
+
+ if (c->refcnt)
+ clk_enable_locked(p);
+
clk_writel(val, c->reg);
- c->rate = c->parent->rate;
+
+ if (c->refcnt && c->parent)
+ clk_disable_locked(c->parent);
+
+ clk_reparent(c, p);
return 0;
}
}
@@ -289,7 +323,61 @@ static struct clk_ops tegra_super_ops = {
.enable = tegra2_super_clk_enable,
.disable = tegra2_super_clk_disable,
.set_parent = tegra2_super_clk_set_parent,
- .recalculate_rate = tegra2_clk_recalculate_rate,
+};
+
+/* virtual cpu clock functions */
+/* some clocks can not be stopped (cpu, memory bus) while the SoC is running.
+ To change the frequency of these clocks, the parent pll may need to be
+ reprogrammed, so the clock must be moved off the pll, the pll reprogrammed,
+ and then the clock moved back to the pll. To hide this sequence, a virtual
+ clock handles it.
+ */
+static void tegra2_cpu_clk_init(struct clk *c)
+{
+}
+
+static int tegra2_cpu_clk_enable(struct clk *c)
+{
+ return 0;
+}
+
+static void tegra2_cpu_clk_disable(struct clk *c)
+{
+ pr_debug("%s on clock %s\n", __func__, c->name);
+
+ /* oops - don't disable the CPU clock! */
+ BUG();
+}
+
+static int tegra2_cpu_clk_set_rate(struct clk *c, unsigned long rate)
+{
+ int ret;
+ ret = clk_set_parent_locked(c->parent, c->backup);
+ if (ret) {
+ pr_err("Failed to switch cpu to clock %s\n", c->backup->name);
+ return ret;
+ }
+
+ ret = clk_set_rate_locked(c->main, rate);
+ if (ret) {
+ pr_err("Failed to change cpu pll to %lu\n", rate);
+ return ret;
+ }
+
+ ret = clk_set_parent_locked(c->parent, c->main);
+ if (ret) {
+ pr_err("Failed to switch cpu to clock %s\n", c->main->name);
+ return ret;
+ }
+
+ return 0;
+}
+
+static struct clk_ops tegra_cpu_ops = {
+ .init = tegra2_cpu_clk_init,
+ .enable = tegra2_cpu_clk_enable,
+ .disable = tegra2_cpu_clk_disable,
+ .set_rate = tegra2_cpu_clk_set_rate,
};
/* bus clock functions */
@@ -299,7 +387,6 @@ static void tegra2_bus_clk_init(struct clk *c)
c->state = ((val >> c->reg_shift) & BUS_CLK_DISABLE) ? OFF : ON;
c->div = ((val >> c->reg_shift) & BUS_CLK_DIV_MASK) + 1;
c->mul = 1;
- tegra2_clk_recalculate_rate(c);
}
static int tegra2_bus_clk_enable(struct clk *c)
@@ -340,27 +427,15 @@ static struct clk_ops tegra_bus_ops = {
.enable = tegra2_bus_clk_enable,
.disable = tegra2_bus_clk_disable,
.set_rate = tegra2_bus_clk_set_rate,
- .recalculate_rate = tegra2_clk_recalculate_rate,
};
/* PLL Functions */
-static unsigned long tegra2_pll_clk_recalculate_rate(struct clk *c)
-{
- u64 rate;
- rate = c->parent->rate;
- rate *= c->n;
- do_div(rate, c->m);
- if (c->p == 2)
- rate >>= 1;
- c->rate = rate;
- return c->rate;
-}
-
static int tegra2_pll_clk_wait_for_lock(struct clk *c)
{
ktime_t before;
before = ktime_get();
+
while (!(clk_readl(c->reg + PLL_BASE) & PLL_BASE_LOCK)) {
if (ktime_us_delta(ktime_get(), before) > 5000) {
pr_err("Timed out waiting for lock bit on pll %s",
@@ -380,24 +455,19 @@ static void tegra2_pll_clk_init(struct clk *c)
if (c->flags & PLL_FIXED && !(val & PLL_BASE_OVERRIDE)) {
pr_warning("Clock %s has unknown fixed frequency\n", c->name);
- c->n = 1;
- c->m = 0;
- c->p = 1;
+ c->mul = 1;
+ c->div = 1;
} else if (val & PLL_BASE_BYPASS) {
- c->n = 1;
- c->m = 1;
- c->p = 1;
+ c->mul = 1;
+ c->div = 1;
} else {
- c->n = (val & PLL_BASE_DIVN_MASK) >> PLL_BASE_DIVN_SHIFT;
- c->m = (val & PLL_BASE_DIVM_MASK) >> PLL_BASE_DIVM_SHIFT;
- c->p = (val & PLL_BASE_DIVP_MASK) ? 2 : 1;
+ c->mul = (val & PLL_BASE_DIVN_MASK) >> PLL_BASE_DIVN_SHIFT;
+ c->div = (val & PLL_BASE_DIVM_MASK) >> PLL_BASE_DIVM_SHIFT;
+ if (c->flags & PLLU)
+ c->div *= (val & PLLU_BASE_POST_DIV) ? 1 : 2;
+ else
+ c->div *= (val & PLL_BASE_DIVP_MASK) ? 2 : 1;
}
-
- val = clk_readl(c->reg + PLL_MISC(c));
- if (c->flags & PLL_HAS_CPCON)
- c->cpcon = (val & PLL_MISC_CPCON_MASK) >> PLL_MISC_CPCON_SHIFT;
-
- tegra2_pll_clk_recalculate_rate(c);
}
static int tegra2_pll_clk_enable(struct clk *c)
@@ -411,7 +481,7 @@ static int tegra2_pll_clk_enable(struct clk *c)
clk_writel(val, c->reg + PLL_BASE);
val = clk_readl(c->reg + PLL_MISC(c));
- val |= PLL_MISC_LOCK_ENABLE;
+ val |= PLL_MISC_LOCK_ENABLE(c);
clk_writel(val, c->reg + PLL_MISC(c));
tegra2_pll_clk_wait_for_lock(c);
@@ -441,33 +511,36 @@ static int tegra2_pll_clk_set_rate(struct clk *c, unsigned long rate)
input_rate = c->parent->rate;
for (sel = c->pll_table; sel->input_rate != 0; sel++) {
if (sel->input_rate == input_rate && sel->output_rate == rate) {
- c->n = sel->n;
- c->m = sel->m;
- c->p = sel->p;
- c->cpcon = sel->cpcon;
+ c->mul = sel->n;
+ c->div = sel->m * sel->p;
val = clk_readl(c->reg + PLL_BASE);
if (c->flags & PLL_FIXED)
val |= PLL_BASE_OVERRIDE;
val &= ~(PLL_BASE_DIVP_MASK | PLL_BASE_DIVN_MASK |
PLL_BASE_DIVM_MASK);
- val |= (c->m << PLL_BASE_DIVM_SHIFT) |
- (c->n << PLL_BASE_DIVN_SHIFT);
- BUG_ON(c->p > 2);
- if (c->p == 2)
- val |= 1 << PLL_BASE_DIVP_SHIFT;
+ val |= (sel->m << PLL_BASE_DIVM_SHIFT) |
+ (sel->n << PLL_BASE_DIVN_SHIFT);
+ BUG_ON(sel->p < 1 || sel->p > 2);
+ if (c->flags & PLLU) {
+ if (sel->p == 1)
+ val |= PLLU_BASE_POST_DIV;
+ } else {
+ if (sel->p == 2)
+ val |= 1 << PLL_BASE_DIVP_SHIFT;
+ }
clk_writel(val, c->reg + PLL_BASE);
if (c->flags & PLL_HAS_CPCON) {
- val = c->cpcon << PLL_MISC_CPCON_SHIFT;
- val |= PLL_MISC_LOCK_ENABLE;
+ val = clk_readl(c->reg + PLL_MISC(c));
+ val &= ~PLL_MISC_CPCON_MASK;
+ val |= sel->cpcon << PLL_MISC_CPCON_SHIFT;
clk_writel(val, c->reg + PLL_MISC(c));
}
if (c->state == ON)
tegra2_pll_clk_enable(c);
- c->rate = rate;
return 0;
}
}
@@ -479,7 +552,46 @@ static struct clk_ops tegra_pll_ops = {
.enable = tegra2_pll_clk_enable,
.disable = tegra2_pll_clk_disable,
.set_rate = tegra2_pll_clk_set_rate,
- .recalculate_rate = tegra2_pll_clk_recalculate_rate,
+};
+
+static void tegra2_pllx_clk_init(struct clk *c)
+{
+ tegra2_pll_clk_init(c);
+
+ if (tegra_sku_id() == 7)
+ c->max_rate = 750000000;
+}
+
+static struct clk_ops tegra_pllx_ops = {
+ .init = tegra2_pllx_clk_init,
+ .enable = tegra2_pll_clk_enable,
+ .disable = tegra2_pll_clk_disable,
+ .set_rate = tegra2_pll_clk_set_rate,
+};
+
+static int tegra2_plle_clk_enable(struct clk *c)
+{
+ u32 val;
+
+ pr_debug("%s on clock %s\n", __func__, c->name);
+
+ mdelay(1);
+
+ val = clk_readl(c->reg + PLL_BASE);
+ if (!(val & PLLE_MISC_READY))
+ return -EBUSY;
+
+ val = clk_readl(c->reg + PLL_BASE);
+ val |= PLL_BASE_ENABLE | PLL_BASE_BYPASS;
+ clk_writel(val, c->reg + PLL_BASE);
+
+ return 0;
+}
+
+static struct clk_ops tegra_plle_ops = {
+ .init = tegra2_pll_clk_init,
+ .enable = tegra2_plle_clk_enable,
+ .set_rate = tegra2_pll_clk_set_rate,
};
/* Clock divider ops */
@@ -503,8 +615,6 @@ static void tegra2_pll_div_clk_init(struct clk *c)
c->div = 1;
c->mul = 1;
}
-
- tegra2_clk_recalculate_rate(c);
}
static int tegra2_pll_div_clk_enable(struct clk *c)
@@ -565,7 +675,7 @@ static int tegra2_pll_div_clk_set_rate(struct clk *c, unsigned long rate)
int divider_u71;
pr_debug("%s: %s %lu\n", __func__, c->name, rate);
if (c->flags & DIV_U71) {
- divider_u71 = clk_div71_get_divider(c->parent, rate);
+ divider_u71 = clk_div71_get_divider(c->parent->rate, rate);
if (divider_u71 >= 0) {
val = clk_readl(c->reg);
new_val = val >> c->reg_shift;
@@ -580,25 +690,37 @@ static int tegra2_pll_div_clk_set_rate(struct clk *c, unsigned long rate)
clk_writel(val, c->reg);
c->div = divider_u71 + 2;
c->mul = 2;
- tegra2_clk_recalculate_rate(c);
return 0;
}
} else if (c->flags & DIV_2) {
- if (c->parent->rate == rate * 2) {
- c->rate = rate;
+ if (c->parent->rate == rate * 2)
return 0;
- }
}
return -EINVAL;
}
+static long tegra2_pll_div_clk_round_rate(struct clk *c, unsigned long rate)
+{
+ int divider;
+ pr_debug("%s: %s %lu\n", __func__, c->name, rate);
+
+ if (c->flags & DIV_U71) {
+ divider = clk_div71_get_divider(c->parent->rate, rate);
+ if (divider < 0)
+ return divider;
+ return c->parent->rate * 2 / (divider + 2);
+ } else if (c->flags & DIV_2) {
+ return c->parent->rate / 2;
+ }
+ return -EINVAL;
+}
static struct clk_ops tegra_pll_div_ops = {
.init = tegra2_pll_div_clk_init,
.enable = tegra2_pll_div_clk_enable,
.disable = tegra2_pll_div_clk_disable,
.set_rate = tegra2_pll_div_clk_set_rate,
- .recalculate_rate = tegra2_clk_recalculate_rate,
+ .round_rate = tegra2_pll_div_clk_round_rate,
};
/* Periph clk ops */
@@ -621,9 +743,13 @@ static void tegra2_periph_clk_init(struct clk *c)
}
if (c->flags & DIV_U71) {
- u32 divu71 = val & PERIPH_CLK_SOURCE_DIV_MASK;
+ u32 divu71 = val & PERIPH_CLK_SOURCE_DIVU71_MASK;
c->div = divu71 + 2;
c->mul = 2;
+ } else if (c->flags & DIV_U16) {
+ u32 divu16 = val & PERIPH_CLK_SOURCE_DIVU16_MASK;
+ c->div = divu16 + 1;
+ c->mul = 1;
} else {
c->div = 1;
c->mul = 1;
@@ -637,7 +763,6 @@ static void tegra2_periph_clk_init(struct clk *c)
if (clk_readl(RST_DEVICES + PERIPH_CLK_TO_ENB_REG(c)) &
PERIPH_CLK_TO_ENB_BIT(c))
c->state = OFF;
- tegra2_clk_recalculate_rate(c);
}
static int tegra2_periph_clk_enable(struct clk *c)
@@ -692,12 +817,19 @@ static int tegra2_periph_clk_set_parent(struct clk *c, struct clk *p)
pr_debug("%s: %s %s\n", __func__, c->name, p->name);
for (sel = c->inputs; sel->input != NULL; sel++) {
if (sel->input == p) {
- clk_reparent(c, p);
val = clk_readl(c->reg);
val &= ~PERIPH_CLK_SOURCE_MASK;
val |= (sel->value) << PERIPH_CLK_SOURCE_SHIFT;
+
+ if (c->refcnt)
+ clk_enable_locked(p);
+
clk_writel(val, c->reg);
- c->rate = c->parent->rate;
+
+ if (c->refcnt && c->parent)
+ clk_disable_locked(c->parent);
+
+ clk_reparent(c, p);
return 0;
}
}
@@ -708,20 +840,55 @@ static int tegra2_periph_clk_set_parent(struct clk *c, struct clk *p)
static int tegra2_periph_clk_set_rate(struct clk *c, unsigned long rate)
{
u32 val;
- int divider_u71;
+ int divider;
pr_debug("%s: %lu\n", __func__, rate);
if (c->flags & DIV_U71) {
- divider_u71 = clk_div71_get_divider(c->parent, rate);
- if (divider_u71 >= 0) {
+ divider = clk_div71_get_divider(c->parent->rate, rate);
+ if (divider >= 0) {
val = clk_readl(c->reg);
- val &= ~PERIPH_CLK_SOURCE_DIV_MASK;
- val |= divider_u71;
+ val &= ~PERIPH_CLK_SOURCE_DIVU71_MASK;
+ val |= divider;
clk_writel(val, c->reg);
- c->div = divider_u71 + 2;
+ c->div = divider + 2;
c->mul = 2;
- tegra2_clk_recalculate_rate(c);
return 0;
}
+ } else if (c->flags & DIV_U16) {
+ divider = clk_div16_get_divider(c->parent->rate, rate);
+ if (divider >= 0) {
+ val = clk_readl(c->reg);
+ val &= ~PERIPH_CLK_SOURCE_DIVU16_MASK;
+ val |= divider;
+ clk_writel(val, c->reg);
+ c->div = divider + 1;
+ c->mul = 1;
+ return 0;
+ }
+ } else if (c->parent->rate <= rate) {
+ c->div = 1;
+ c->mul = 1;
+ return 0;
+ }
+ return -EINVAL;
+}
+
+static long tegra2_periph_clk_round_rate(struct clk *c,
+ unsigned long rate)
+{
+ int divider;
+ pr_debug("%s: %s %lu\n", __func__, c->name, rate);
+
+ if (c->flags & DIV_U71) {
+ divider = clk_div71_get_divider(c->parent->rate, rate);
+ if (divider < 0)
+ return divider;
+
+ return c->parent->rate * 2 / (divider + 2);
+ } else if (c->flags & DIV_U16) {
+ divider = clk_div16_get_divider(c->parent->rate, rate);
+ if (divider < 0)
+ return divider;
+ return c->parent->rate / (divider + 1);
}
return -EINVAL;
}
@@ -732,7 +899,7 @@ static struct clk_ops tegra_periph_clk_ops = {
.disable = &tegra2_periph_clk_disable,
.set_parent = &tegra2_periph_clk_set_parent,
.set_rate = &tegra2_periph_clk_set_rate,
- .recalculate_rate = &tegra2_clk_recalculate_rate,
+ .round_rate = &tegra2_periph_clk_round_rate,
};
/* Clock doubler ops */
@@ -744,21 +911,108 @@ static void tegra2_clk_double_init(struct clk *c)
if (!(clk_readl(CLK_OUT_ENB + PERIPH_CLK_TO_ENB_REG(c)) &
PERIPH_CLK_TO_ENB_BIT(c)))
c->state = OFF;
- tegra2_clk_recalculate_rate(c);
};
+static int tegra2_clk_double_set_rate(struct clk *c, unsigned long rate)
+{
+ if (rate != 2 * c->parent->rate)
+ return -EINVAL;
+ c->mul = 2;
+ c->div = 1;
+ return 0;
+}
+
static struct clk_ops tegra_clk_double_ops = {
.init = &tegra2_clk_double_init,
.enable = &tegra2_periph_clk_enable,
.disable = &tegra2_periph_clk_disable,
- .recalculate_rate = &tegra2_clk_recalculate_rate,
+ .set_rate = &tegra2_clk_double_set_rate,
+};
+
+static void tegra2_audio_sync_clk_init(struct clk *c)
+{
+ int source;
+ const struct clk_mux_sel *sel;
+ u32 val = clk_readl(c->reg);
+ c->state = (val & (1<<4)) ? OFF : ON;
+ source = val & 0xf;
+ for (sel = c->inputs; sel->input != NULL; sel++)
+ if (sel->value == source)
+ break;
+ BUG_ON(sel->input == NULL);
+ c->parent = sel->input;
+}
+
+static int tegra2_audio_sync_clk_enable(struct clk *c)
+{
+ clk_writel(0, c->reg);
+ return 0;
+}
+
+static void tegra2_audio_sync_clk_disable(struct clk *c)
+{
+ clk_writel(1, c->reg);
+}
+
+static int tegra2_audio_sync_clk_set_parent(struct clk *c, struct clk *p)
+{
+ u32 val;
+ const struct clk_mux_sel *sel;
+ for (sel = c->inputs; sel->input != NULL; sel++) {
+ if (sel->input == p) {
+ val = clk_readl(c->reg);
+ val &= ~0xf;
+ val |= sel->value;
+
+ if (c->refcnt)
+ clk_enable_locked(p);
+
+ clk_writel(val, c->reg);
+
+ if (c->refcnt && c->parent)
+ clk_disable_locked(c->parent);
+
+ clk_reparent(c, p);
+ return 0;
+ }
+ }
+
+ return -EINVAL;
+}
+
+static int tegra2_audio_sync_clk_set_rate(struct clk *c, unsigned long rate)
+{
+ unsigned long parent_rate;
+ if (!c->parent) {
+ pr_err("%s: clock has no parent\n", __func__);
+ return -EINVAL;
+ }
+ parent_rate = c->parent->rate;
+ if (rate != parent_rate) {
+ pr_err("%s: %s/%ld differs from parent %s/%ld\n",
+ __func__,
+ c->name, rate,
+ c->parent->name, parent_rate);
+ return -EINVAL;
+ }
+ c->rate = parent_rate;
+ return 0;
+}
+
+static struct clk_ops tegra_audio_sync_clk_ops = {
+ .init = tegra2_audio_sync_clk_init,
+ .enable = tegra2_audio_sync_clk_enable,
+ .disable = tegra2_audio_sync_clk_disable,
+ .set_rate = tegra2_audio_sync_clk_set_rate,
+ .set_parent = tegra2_audio_sync_clk_set_parent,
};
/* Clock definitions */
static struct clk tegra_clk_32k = {
.name = "clk_32k",
- .rate = 32678,
+ .rate = 32768,
.ops = NULL,
+ .max_rate = 32768,
};
static struct clk_pll_table tegra_pll_s_table[] = {
@@ -782,6 +1036,7 @@ static struct clk tegra_pll_s = {
.vco_min = 12000000,
.vco_max = 26000000,
.pll_table = tegra_pll_s_table,
+ .max_rate = 26000000,
};
static struct clk_mux_sel tegra_clk_m_sel[] = {
@@ -797,6 +1052,7 @@ static struct clk tegra_clk_m = {
.reg = 0x1fc,
.reg_mask = (1<<28),
.reg_shift = 28,
+ .max_rate = 26000000,
};
static struct clk_pll_table tegra_pll_c_table[] = {
@@ -816,6 +1072,7 @@ static struct clk tegra_pll_c = {
.vco_min = 20000000,
.vco_max = 1400000000,
.pll_table = tegra_pll_c_table,
+ .max_rate = 600000000,
};
static struct clk tegra_pll_c_out1 = {
@@ -825,9 +1082,18 @@ static struct clk tegra_pll_c_out1 = {
.parent = &tegra_pll_c,
.reg = 0x84,
.reg_shift = 0,
+ .max_rate = 600000000,
};
static struct clk_pll_table tegra_pll_m_table[] = {
+ { 12000000, 666000000, 666, 12, 1, 8},
+ { 13000000, 666000000, 666, 13, 1, 8},
+ { 19200000, 666000000, 555, 16, 1, 8},
+ { 26000000, 666000000, 666, 26, 1, 8},
+ { 12000000, 600000000, 600, 12, 1, 8},
+ { 13000000, 600000000, 600, 13, 1, 8},
+ { 19200000, 600000000, 375, 12, 1, 6},
+ { 26000000, 600000000, 600, 26, 1, 8},
{ 0, 0, 0, 0, 0, 0 },
};
@@ -844,6 +1110,7 @@ static struct clk tegra_pll_m = {
.vco_min = 20000000,
.vco_max = 1200000000,
.pll_table = tegra_pll_m_table,
+ .max_rate = 800000000,
};
static struct clk tegra_pll_m_out1 = {
@@ -853,6 +1120,7 @@ static struct clk tegra_pll_m_out1 = {
.parent = &tegra_pll_m,
.reg = 0x94,
.reg_shift = 0,
+ .max_rate = 600000000,
};
static struct clk_pll_table tegra_pll_p_table[] = {
@@ -880,6 +1148,7 @@ static struct clk tegra_pll_p = {
.vco_min = 20000000,
.vco_max = 1400000000,
.pll_table = tegra_pll_p_table,
+ .max_rate = 432000000,
};
static struct clk tegra_pll_p_out1 = {
@@ -889,6 +1158,7 @@ static struct clk tegra_pll_p_out1 = {
.parent = &tegra_pll_p,
.reg = 0xa4,
.reg_shift = 0,
+ .max_rate = 432000000,
};
static struct clk tegra_pll_p_out2 = {
@@ -898,6 +1168,7 @@ static struct clk tegra_pll_p_out2 = {
.parent = &tegra_pll_p,
.reg = 0xa4,
.reg_shift = 16,
+ .max_rate = 432000000,
};
static struct clk tegra_pll_p_out3 = {
@@ -907,6 +1178,7 @@ static struct clk tegra_pll_p_out3 = {
.parent = &tegra_pll_p,
.reg = 0xa8,
.reg_shift = 0,
+ .max_rate = 432000000,
};
static struct clk tegra_pll_p_out4 = {
@@ -916,6 +1188,7 @@ static struct clk tegra_pll_p_out4 = {
.parent = &tegra_pll_p,
.reg = 0xa8,
.reg_shift = 16,
+ .max_rate = 432000000,
};
static struct clk_pll_table tegra_pll_a_table[] = {
@@ -923,6 +1196,7 @@ static struct clk_pll_table tegra_pll_a_table[] = {
{ 28800000, 73728000, 64, 25, 1, 1},
{ 28800000, 11289600, 49, 25, 1, 1},
{ 28800000, 12288000, 64, 25, 1, 1},
+ { 28800000, 24000000, 5, 6, 1, 1},
{ 0, 0, 0, 0, 0, 0 },
};
@@ -939,6 +1213,7 @@ static struct clk tegra_pll_a = {
.vco_min = 20000000,
.vco_max = 1400000000,
.pll_table = tegra_pll_a_table,
+ .max_rate = 56448000,
};
static struct clk tegra_pll_a_out0 = {
@@ -948,6 +1223,7 @@ static struct clk tegra_pll_a_out0 = {
.parent = &tegra_pll_a,
.reg = 0xb4,
.reg_shift = 0,
+ .max_rate = 56448000,
};
static struct clk_pll_table tegra_pll_d_table[] = {
@@ -971,6 +1247,7 @@ static struct clk tegra_pll_d = {
.vco_min = 40000000,
.vco_max = 1000000000,
.pll_table = tegra_pll_d_table,
+ .max_rate = 1000000000,
};
static struct clk tegra_pll_d_out0 = {
@@ -978,19 +1255,20 @@ static struct clk tegra_pll_d_out0 = {
.ops = &tegra_pll_div_ops,
.flags = DIV_2 | PLLD,
.parent = &tegra_pll_d,
+ .max_rate = 500000000,
};
static struct clk_pll_table tegra_pll_u_table[] = {
- { 12000000, 480000000, 960, 12, 1, 0},
- { 13000000, 480000000, 960, 13, 1, 0},
- { 19200000, 480000000, 200, 4, 1, 0},
- { 26000000, 480000000, 960, 26, 1, 0},
+ { 12000000, 480000000, 960, 12, 2, 0},
+ { 13000000, 480000000, 960, 13, 2, 0},
+ { 19200000, 480000000, 200, 4, 2, 0},
+ { 26000000, 480000000, 960, 26, 2, 0},
{ 0, 0, 0, 0, 0, 0 },
};
static struct clk tegra_pll_u = {
.name = "pll_u",
- .flags = 0,
+ .flags = PLLU,
.ops = &tegra_pll_ops,
.reg = 0xc0,
.input_min = 2000000,
@@ -1001,24 +1279,59 @@ static struct clk tegra_pll_u = {
.vco_min = 480000000,
.vco_max = 960000000,
.pll_table = tegra_pll_u_table,
+ .max_rate = 480000000,
};
static struct clk_pll_table tegra_pll_x_table[] = {
+ /* 1 GHz */
{ 12000000, 1000000000, 1000, 12, 1, 12},
{ 13000000, 1000000000, 1000, 13, 1, 12},
{ 19200000, 1000000000, 625, 12, 1, 8},
{ 26000000, 1000000000, 1000, 26, 1, 12},
- { 12000000, 750000000, 750, 12, 1, 12},
- { 13000000, 750000000, 750, 13, 1, 12},
- { 19200000, 750000000, 625, 16, 1, 8},
- { 26000000, 750000000, 750, 26, 1, 12},
+
+ /* 912 MHz */
+ { 12000000, 912000000, 912, 12, 1, 12},
+ { 13000000, 912000000, 912, 13, 1, 12},
+ { 19200000, 912000000, 760, 16, 1, 8},
+ { 26000000, 912000000, 912, 26, 1, 12},
+
+ /* 816 MHz */
+ { 12000000, 816000000, 816, 12, 1, 12},
+ { 13000000, 816000000, 816, 13, 1, 12},
+ { 19200000, 816000000, 680, 16, 1, 8},
+ { 26000000, 816000000, 816, 26, 1, 12},
+
+ /* 760 MHz */
+ { 12000000, 760000000, 760, 12, 1, 12},
+ { 13000000, 760000000, 760, 13, 1, 12},
+ { 19200000, 760000000, 950, 24, 1, 8},
+ { 26000000, 760000000, 760, 26, 1, 12},
+
+ /* 608 MHz */
+ { 12000000, 608000000, 760, 12, 1, 12},
+ { 13000000, 608000000, 760, 13, 1, 12},
+ { 19200000, 608000000, 380, 12, 1, 8},
+ { 26000000, 608000000, 760, 26, 1, 12},
+
+ /* 456 MHz */
+ { 12000000, 456000000, 456, 12, 1, 12},
+ { 13000000, 456000000, 456, 13, 1, 12},
+ { 19200000, 456000000, 380, 16, 1, 8},
+ { 26000000, 456000000, 456, 26, 1, 12},
+
+ /* 312 MHz */
+ { 12000000, 312000000, 312, 12, 1, 12},
+ { 13000000, 312000000, 312, 13, 1, 12},
+ { 19200000, 312000000, 260, 16, 1, 8},
+ { 26000000, 312000000, 312, 26, 1, 12},
+
{ 0, 0, 0, 0, 0, 0 },
};
static struct clk tegra_pll_x = {
.name = "pll_x",
.flags = PLL_HAS_CPCON | PLL_ALT_MISC_REG,
- .ops = &tegra_pll_ops,
+ .ops = &tegra_pllx_ops,
.reg = 0xe0,
.input_min = 2000000,
.input_max = 31000000,
@@ -1028,6 +1341,24 @@ static struct clk tegra_pll_x = {
.vco_min = 20000000,
.vco_max = 1200000000,
.pll_table = tegra_pll_x_table,
+ .max_rate = 1000000000,
+};
+
+static struct clk_pll_table tegra_pll_e_table[] = {
+ { 12000000, 100000000, 200, 24, 1, 0 },
+ { 0, 0, 0, 0, 0, 0 },
+};
+
+static struct clk tegra_pll_e = {
+ .name = "pll_e",
+ .flags = PLL_ALT_MISC_REG,
+ .ops = &tegra_plle_ops,
+ .input_min = 12000000,
+ .input_max = 12000000,
+ .max_rate = 100000000,
+ .parent = &tegra_clk_m,
+ .reg = 0xe8,
+ .pll_table = tegra_pll_e_table,
};
static struct clk tegra_clk_d = {
@@ -1038,19 +1369,77 @@ static struct clk tegra_clk_d = {
.reg = 0x34,
.reg_shift = 12,
.parent = &tegra_clk_m,
+ .max_rate = 52000000,
+};
+
+/* initialized before peripheral clocks */
+static struct clk_mux_sel mux_audio_sync_clk[8+1];
+static const struct audio_sources {
+ const char *name;
+ int value;
+} mux_audio_sync_clk_sources[] = {
+ { .name = "spdif_in", .value = 0 },
+ { .name = "i2s1", .value = 1 },
+ { .name = "i2s2", .value = 2 },
+ { .name = "pll_a_out0", .value = 4 },
+#if 0 /* FIXME: not implemented */
+ { .name = "ac97", .value = 3 },
+ { .name = "ext_audio_clk2", .value = 5 },
+ { .name = "ext_audio_clk1", .value = 6 },
+ { .name = "ext_vimclk", .value = 7 },
+#endif
+ { 0, 0 }
+};
+
+static struct clk tegra_clk_audio = {
+ .name = "audio",
+ .inputs = mux_audio_sync_clk,
+ .reg = 0x38,
+ .max_rate = 24000000,
+ .ops = &tegra_audio_sync_clk_ops
};
-/* FIXME: need tegra_audio
static struct clk tegra_clk_audio_2x = {
- .name = "clk_d",
+ .name = "audio_2x",
.flags = PERIPH_NO_RESET,
+ .max_rate = 48000000,
.ops = &tegra_clk_double_ops,
.clk_num = 89,
.reg = 0x34,
.reg_shift = 8,
- .parent = &tegra_audio,
+ .parent = &tegra_clk_audio,
+};
+
+struct clk_lookup tegra_audio_clk_lookups[] = {
+ { .con_id = "audio", .clk = &tegra_clk_audio },
+ { .con_id = "audio_2x", .clk = &tegra_clk_audio_2x }
+};
+
+/* This is called after peripheral clocks are initialized, as the
+ * audio_sync clock depends on some of the peripheral clocks.
+ */
+
+static void init_audio_sync_clock_mux(void)
+{
+ int i;
+ struct clk_mux_sel *sel = mux_audio_sync_clk;
+ const struct audio_sources *src = mux_audio_sync_clk_sources;
+ struct clk_lookup *lookup;
+
+ for (i = 0; src->name; i++, sel++, src++) {
+ sel->input = tegra_get_clock_by_name(src->name);
+ if (!sel->input)
+ pr_err("%s: could not find clk %s\n", __func__,
+ src->name);
+ sel->value = src->value;
+ }
+
+ lookup = tegra_audio_clk_lookups;
+ for (i = 0; i < ARRAY_SIZE(tegra_audio_clk_lookups); i++, lookup++) {
+ clk_init(lookup->clk);
+ clkdev_add(lookup);
+ }
}
-*/
static struct clk_mux_sel mux_cclk[] = {
{ .input = &tegra_clk_m, .value = 0},
@@ -1077,27 +1466,40 @@ static struct clk_mux_sel mux_sclk[] = {
{ 0, 0},
};
-static struct clk tegra_clk_cpu = {
- .name = "cpu",
+static struct clk tegra_clk_cclk = {
+ .name = "cclk",
.inputs = mux_cclk,
.reg = 0x20,
.ops = &tegra_super_ops,
+ .max_rate = 1000000000,
};
-static struct clk tegra_clk_sys = {
- .name = "sys",
+static struct clk tegra_clk_sclk = {
+ .name = "sclk",
.inputs = mux_sclk,
.reg = 0x28,
.ops = &tegra_super_ops,
+ .max_rate = 600000000,
+};
+
+static struct clk tegra_clk_virtual_cpu = {
+ .name = "cpu",
+ .parent = &tegra_clk_cclk,
+ .main = &tegra_pll_x,
+ .backup = &tegra_clk_m,
+ .ops = &tegra_cpu_ops,
+ .max_rate = 1000000000,
+ .dvfs = &tegra_dvfs_virtual_cpu_dvfs,
};
static struct clk tegra_clk_hclk = {
.name = "hclk",
.flags = DIV_BUS,
- .parent = &tegra_clk_sys,
+ .parent = &tegra_clk_sclk,
.reg = 0x30,
.reg_shift = 4,
.ops = &tegra_bus_ops,
+ .max_rate = 240000000,
};
static struct clk tegra_clk_pclk = {
@@ -1107,6 +1509,7 @@ static struct clk tegra_clk_pclk = {
.reg = 0x30,
.reg_shift = 0,
.ops = &tegra_bus_ops,
+ .max_rate = 108000000,
};
static struct clk_mux_sel mux_pllm_pllc_pllp_plla[] = {
@@ -1133,10 +1536,9 @@ static struct clk_mux_sel mux_pllp_pllc_pllm_clkm[] = {
{ 0, 0},
};
-static struct clk_mux_sel mux_plla_audio_pllp_clkm[] = {
- {.input = &tegra_pll_a, .value = 0},
- /* FIXME: no mux defined for tegra_audio
- {.input = &tegra_audio, .value = 1},*/
+static struct clk_mux_sel mux_pllaout0_audio2x_pllp_clkm[] = {
+ {.input = &tegra_pll_a_out0, .value = 0},
+ {.input = &tegra_clk_audio_2x, .value = 1},
{.input = &tegra_pll_p, .value = 2},
{.input = &tegra_clk_m, .value = 3},
{ 0, 0},
@@ -1153,8 +1555,7 @@ static struct clk_mux_sel mux_pllp_plld_pllc_clkm[] = {
static struct clk_mux_sel mux_pllp_pllc_audio_clkm_clk32[] = {
{.input = &tegra_pll_p, .value = 0},
{.input = &tegra_pll_c, .value = 1},
- /* FIXME: no mux defined for tegra_audio
- {.input = &tegra_audio, .value = 2},*/
+ {.input = &tegra_clk_audio, .value = 2},
{.input = &tegra_clk_m, .value = 3},
{.input = &tegra_clk_32k, .value = 4},
{ 0, 0},
@@ -1187,7 +1588,7 @@ static struct clk_mux_sel mux_clk_32k[] = {
{ 0, 0},
};
-#define PERIPH_CLK(_name, _dev, _con, _clk_num, _reg, _inputs, _flags) \
+#define PERIPH_CLK(_name, _dev, _con, _clk_num, _reg, _max, _inputs, _flags) \
{ \
.name = _name, \
.lookup = { \
@@ -1199,72 +1600,79 @@ static struct clk_mux_sel mux_clk_32k[] = {
.reg = _reg, \
.inputs = _inputs, \
.flags = _flags, \
+ .max_rate = _max, \
}
struct clk tegra_periph_clks[] = {
- PERIPH_CLK("rtc", "rtc-tegra", NULL, 4, 0, mux_clk_32k, PERIPH_NO_RESET),
- PERIPH_CLK("timer", "timer", NULL, 5, 0, mux_clk_m, 0),
- PERIPH_CLK("i2s1", "i2s.0", NULL, 11, 0x100, mux_plla_audio_pllp_clkm, MUX | DIV_U71),
- PERIPH_CLK("i2s2", "i2s.1", NULL, 18, 0x104, mux_plla_audio_pllp_clkm, MUX | DIV_U71),
+ PERIPH_CLK("rtc", "rtc-tegra", NULL, 4, 0, 32768, mux_clk_32k, PERIPH_NO_RESET),
+ PERIPH_CLK("timer", "timer", NULL, 5, 0, 26000000, mux_clk_m, 0),
+ PERIPH_CLK("i2s1", "i2s.0", NULL, 11, 0x100, 26000000, mux_pllaout0_audio2x_pllp_clkm, MUX | DIV_U71),
+ PERIPH_CLK("i2s2", "i2s.1", NULL, 18, 0x104, 26000000, mux_pllaout0_audio2x_pllp_clkm, MUX | DIV_U71),
/* FIXME: spdif has 2 clocks but 1 enable */
- PERIPH_CLK("spdif_out", "spdif_out", NULL, 10, 0x108, mux_plla_audio_pllp_clkm, MUX | DIV_U71),
- PERIPH_CLK("spdif_in", "spdif_in", NULL, 10, 0x10c, mux_pllp_pllc_pllm, MUX | DIV_U71),
- PERIPH_CLK("pwm", "pwm", NULL, 17, 0x110, mux_pllp_pllc_audio_clkm_clk32, MUX | DIV_U71),
- PERIPH_CLK("spi", "spi", NULL, 43, 0x114, mux_pllp_pllc_pllm_clkm, MUX | DIV_U71),
- PERIPH_CLK("xio", "xio", NULL, 45, 0x120, mux_pllp_pllc_pllm_clkm, MUX | DIV_U71),
- PERIPH_CLK("twc", "twc", NULL, 16, 0x12c, mux_pllp_pllc_pllm_clkm, MUX | DIV_U71),
- PERIPH_CLK("sbc1", "spi_tegra.0", NULL, 41, 0x134, mux_pllp_pllc_pllm_clkm, MUX | DIV_U71),
- PERIPH_CLK("sbc2", "spi_tegra.1", NULL, 44, 0x118, mux_pllp_pllc_pllm_clkm, MUX | DIV_U71),
- PERIPH_CLK("sbc3", "spi_tegra.2", NULL, 46, 0x11c, mux_pllp_pllc_pllm_clkm, MUX | DIV_U71),
- PERIPH_CLK("sbc4", "spi_tegra.3", NULL, 68, 0x1b4, mux_pllp_pllc_pllm_clkm, MUX | DIV_U71),
- PERIPH_CLK("ide", "ide", NULL, 25, 0x144, mux_pllp_pllc_pllm_clkm, MUX | DIV_U71),
- PERIPH_CLK("ndflash", "tegra_nand", NULL, 13, 0x160, mux_pllp_pllc_pllm_clkm, MUX | DIV_U71),
+ PERIPH_CLK("spdif_out", "spdif_out", NULL, 10, 0x108, 100000000, mux_pllaout0_audio2x_pllp_clkm, MUX | DIV_U71),
+ PERIPH_CLK("spdif_in", "spdif_in", NULL, 10, 0x10c, 100000000, mux_pllp_pllc_pllm, MUX | DIV_U71),
+ PERIPH_CLK("pwm", "pwm", NULL, 17, 0x110, 432000000, mux_pllp_pllc_audio_clkm_clk32, MUX | DIV_U71),
+ PERIPH_CLK("spi", "spi", NULL, 43, 0x114, 40000000, mux_pllp_pllc_pllm_clkm, MUX | DIV_U71),
+ PERIPH_CLK("xio", "xio", NULL, 45, 0x120, 150000000, mux_pllp_pllc_pllm_clkm, MUX | DIV_U71),
+ PERIPH_CLK("twc", "twc", NULL, 16, 0x12c, 150000000, mux_pllp_pllc_pllm_clkm, MUX | DIV_U71),
+ PERIPH_CLK("sbc1", "spi_tegra.0", NULL, 41, 0x134, 160000000, mux_pllp_pllc_pllm_clkm, MUX | DIV_U71),
+ PERIPH_CLK("sbc2", "spi_tegra.1", NULL, 44, 0x118, 160000000, mux_pllp_pllc_pllm_clkm, MUX | DIV_U71),
+ PERIPH_CLK("sbc3", "spi_tegra.2", NULL, 46, 0x11c, 160000000, mux_pllp_pllc_pllm_clkm, MUX | DIV_U71),
+ PERIPH_CLK("sbc4", "spi_tegra.3", NULL, 68, 0x1b4, 160000000, mux_pllp_pllc_pllm_clkm, MUX | DIV_U71),
+ PERIPH_CLK("ide", "ide", NULL, 25, 0x144, 100000000, mux_pllp_pllc_pllm_clkm, MUX | DIV_U71), /* requires min voltage */
+ PERIPH_CLK("ndflash", "tegra_nand", NULL, 13, 0x160, 164000000, mux_pllp_pllc_pllm_clkm, MUX | DIV_U71), /* scales with voltage */
/* FIXME: vfir shares an enable with uartb */
- PERIPH_CLK("vfir", "vfir", NULL, 7, 0x168, mux_pllp_pllc_pllm_clkm, MUX | DIV_U71),
- PERIPH_CLK("sdmmc1", "sdhci-tegra.0", NULL, 14, 0x150, mux_pllp_pllc_pllm_clkm, MUX | DIV_U71),
- PERIPH_CLK("sdmmc2", "sdhci-tegra.1", NULL, 9, 0x154, mux_pllp_pllc_pllm_clkm, MUX | DIV_U71),
- PERIPH_CLK("sdmmc3", "sdhci-tegra.2", NULL, 69, 0x1bc, mux_pllp_pllc_pllm_clkm, MUX | DIV_U71),
- PERIPH_CLK("sdmmc4", "sdhci-tegra.3", NULL, 15, 0x160, mux_pllp_pllc_pllm_clkm, MUX | DIV_U71),
- PERIPH_CLK("vde", "vde", NULL, 61, 0x1c8, mux_pllp_pllc_pllm_clkm, MUX | DIV_U71),
- PERIPH_CLK("csite", "csite", NULL, 73, 0x1d4, mux_pllp_pllc_pllm_clkm, MUX | DIV_U71),
+ PERIPH_CLK("vfir", "vfir", NULL, 7, 0x168, 72000000, mux_pllp_pllc_pllm_clkm, MUX | DIV_U71),
+ PERIPH_CLK("sdmmc1", "sdhci-tegra.0", NULL, 14, 0x150, 52000000, mux_pllp_pllc_pllm_clkm, MUX | DIV_U71), /* scales with voltage */
+ PERIPH_CLK("sdmmc2", "sdhci-tegra.1", NULL, 9, 0x154, 52000000, mux_pllp_pllc_pllm_clkm, MUX | DIV_U71), /* scales with voltage */
+ PERIPH_CLK("sdmmc3", "sdhci-tegra.2", NULL, 69, 0x1bc, 52000000, mux_pllp_pllc_pllm_clkm, MUX | DIV_U71), /* scales with voltage */
+ PERIPH_CLK("sdmmc4", "sdhci-tegra.3", NULL, 15, 0x160, 52000000, mux_pllp_pllc_pllm_clkm, MUX | DIV_U71), /* scales with voltage */
+ PERIPH_CLK("vde", "vde", NULL, 61, 0x1c8, 250000000, mux_pllp_pllc_pllm_clkm, MUX | DIV_U71), /* scales with voltage and process_id */
+ PERIPH_CLK("csite", "csite", NULL, 73, 0x1d4, 144000000, mux_pllp_pllc_pllm_clkm, MUX | DIV_U71), /* max rate ??? */
/* FIXME: what is la? */
- PERIPH_CLK("la", "la", NULL, 76, 0x1f8, mux_pllp_pllc_pllm_clkm, MUX | DIV_U71),
- PERIPH_CLK("owr", "owr", NULL, 71, 0x1cc, mux_pllp_pllc_pllm_clkm, MUX | DIV_U71),
- PERIPH_CLK("nor", "nor", NULL, 42, 0x1d0, mux_pllp_pllc_pllm_clkm, MUX | DIV_U71),
- PERIPH_CLK("mipi", "mipi", NULL, 50, 0x174, mux_pllp_pllc_pllm_clkm, MUX | DIV_U71),
- PERIPH_CLK("i2c1", "tegra-i2c.0", NULL, 12, 0x124, mux_pllp_pllc_pllm_clkm, MUX | DIV_U71),
- PERIPH_CLK("i2c2", "tegra-i2c.1", NULL, 54, 0x198, mux_pllp_pllc_pllm_clkm, MUX | DIV_U71),
- PERIPH_CLK("i2c3", "tegra-i2c.2", NULL, 67, 0x1b8, mux_pllp_pllc_pllm_clkm, MUX | DIV_U71),
- PERIPH_CLK("dvc", "tegra-i2c.3", NULL, 47, 0x128, mux_pllp_pllc_pllm_clkm, MUX | DIV_U71),
- PERIPH_CLK("i2c1_i2c", "tegra-i2c.0", "i2c", 0, 0, mux_pllp_out3, 0),
- PERIPH_CLK("i2c2_i2c", "tegra-i2c.1", "i2c", 0, 0, mux_pllp_out3, 0),
- PERIPH_CLK("i2c3_i2c", "tegra-i2c.2", "i2c", 0, 0, mux_pllp_out3, 0),
- PERIPH_CLK("dvc_i2c", "tegra-i2c.3", "i2c", 0, 0, mux_pllp_out3, 0),
- PERIPH_CLK("uarta", "uart.0", NULL, 6, 0x178, mux_pllp_pllc_pllm_clkm, MUX | DIV_U71),
- PERIPH_CLK("uartb", "uart.1", NULL, 7, 0x17c, mux_pllp_pllc_pllm_clkm, MUX | DIV_U71),
- PERIPH_CLK("uartc", "uart.2", NULL, 55, 0x1a0, mux_pllp_pllc_pllm_clkm, MUX | DIV_U71),
- PERIPH_CLK("uartd", "uart.3", NULL, 65, 0x1c0, mux_pllp_pllc_pllm_clkm, MUX | DIV_U71),
- PERIPH_CLK("uarte", "uart.4", NULL, 66, 0x1c4, mux_pllp_pllc_pllm_clkm, MUX | DIV_U71),
- PERIPH_CLK("3d", "3d", NULL, 24, 0x158, mux_pllm_pllc_pllp_plla, MUX | DIV_U71 | PERIPH_MANUAL_RESET),
- PERIPH_CLK("2d", "2d", NULL, 21, 0x15c, mux_pllm_pllc_pllp_plla, MUX | DIV_U71),
+ PERIPH_CLK("la", "la", NULL, 76, 0x1f8, 26000000, mux_pllp_pllc_pllm_clkm, MUX | DIV_U71),
+ PERIPH_CLK("owr", "tegra_w1", NULL, 71, 0x1cc, 26000000, mux_pllp_pllc_pllm_clkm, MUX | DIV_U71),
+ PERIPH_CLK("nor", "nor", NULL, 42, 0x1d0, 92000000, mux_pllp_pllc_pllm_clkm, MUX | DIV_U71), /* requires min voltage */
+ PERIPH_CLK("mipi", "mipi", NULL, 50, 0x174, 60000000, mux_pllp_pllc_pllm_clkm, MUX | DIV_U71), /* scales with voltage */
+ PERIPH_CLK("i2c1", "tegra-i2c.0", NULL, 12, 0x124, 26000000, mux_pllp_pllc_pllm_clkm, MUX | DIV_U16),
+ PERIPH_CLK("i2c2", "tegra-i2c.1", NULL, 54, 0x198, 26000000, mux_pllp_pllc_pllm_clkm, MUX | DIV_U16),
+ PERIPH_CLK("i2c3", "tegra-i2c.2", NULL, 67, 0x1b8, 26000000, mux_pllp_pllc_pllm_clkm, MUX | DIV_U16),
+ PERIPH_CLK("dvc", "tegra-i2c.3", NULL, 47, 0x128, 26000000, mux_pllp_pllc_pllm_clkm, MUX | DIV_U16),
+ PERIPH_CLK("i2c1_i2c", "tegra-i2c.0", "i2c", 0, 0, 72000000, mux_pllp_out3, 0),
+ PERIPH_CLK("i2c2_i2c", "tegra-i2c.1", "i2c", 0, 0, 72000000, mux_pllp_out3, 0),
+ PERIPH_CLK("i2c3_i2c", "tegra-i2c.2", "i2c", 0, 0, 72000000, mux_pllp_out3, 0),
+ PERIPH_CLK("dvc_i2c", "tegra-i2c.3", "i2c", 0, 0, 72000000, mux_pllp_out3, 0),
+ PERIPH_CLK("uarta", "uart.0", NULL, 6, 0x178, 216000000, mux_pllp_pllc_pllm_clkm, MUX),
+ PERIPH_CLK("uartb", "uart.1", NULL, 7, 0x17c, 216000000, mux_pllp_pllc_pllm_clkm, MUX),
+ PERIPH_CLK("uartc", "uart.2", NULL, 55, 0x1a0, 216000000, mux_pllp_pllc_pllm_clkm, MUX),
+ PERIPH_CLK("uartd", "uart.3", NULL, 65, 0x1c0, 216000000, mux_pllp_pllc_pllm_clkm, MUX),
+ PERIPH_CLK("uarte", "uart.4", NULL, 66, 0x1c4, 216000000, mux_pllp_pllc_pllm_clkm, MUX),
+ PERIPH_CLK("3d", "3d", NULL, 24, 0x158, 300000000, mux_pllm_pllc_pllp_plla, MUX | DIV_U71 | PERIPH_MANUAL_RESET), /* scales with voltage and process_id */
+ PERIPH_CLK("2d", "2d", NULL, 21, 0x15c, 300000000, mux_pllm_pllc_pllp_plla, MUX | DIV_U71), /* scales with voltage and process_id */
/* FIXME: vi and vi_sensor share an enable */
- PERIPH_CLK("vi", "vi", NULL, 20, 0x148, mux_pllm_pllc_pllp_plla, MUX | DIV_U71),
- PERIPH_CLK("vi_sensor", "vi_sensor", NULL, 20, 0x1a8, mux_pllm_pllc_pllp_plla, MUX | DIV_U71),
- PERIPH_CLK("epp", "epp", NULL, 19, 0x16c, mux_pllm_pllc_pllp_plla, MUX | DIV_U71),
- PERIPH_CLK("mpe", "mpe", NULL, 60, 0x170, mux_pllm_pllc_pllp_plla, MUX | DIV_U71),
- PERIPH_CLK("host1x", "host1x", NULL, 28, 0x180, mux_pllm_pllc_pllp_plla, MUX | DIV_U71),
+ PERIPH_CLK("vi", "vi", NULL, 20, 0x148, 150000000, mux_pllm_pllc_pllp_plla, MUX | DIV_U71), /* scales with voltage and process_id */
+ PERIPH_CLK("vi_sensor", "vi_sensor", NULL, 20, 0x1a8, 150000000, mux_pllm_pllc_pllp_plla, MUX | DIV_U71 | PERIPH_NO_RESET), /* scales with voltage and process_id */
+ PERIPH_CLK("epp", "epp", NULL, 19, 0x16c, 300000000, mux_pllm_pllc_pllp_plla, MUX | DIV_U71), /* scales with voltage and process_id */
+ PERIPH_CLK("mpe", "mpe", NULL, 60, 0x170, 250000000, mux_pllm_pllc_pllp_plla, MUX | DIV_U71), /* scales with voltage and process_id */
+ PERIPH_CLK("host1x", "host1x", NULL, 28, 0x180, 166000000, mux_pllm_pllc_pllp_plla, MUX | DIV_U71), /* scales with voltage and process_id */
/* FIXME: cve and tvo share an enable */
- PERIPH_CLK("cve", "cve", NULL, 49, 0x140, mux_pllp_plld_pllc_clkm, MUX | DIV_U71),
- PERIPH_CLK("tvo", "tvo", NULL, 49, 0x188, mux_pllp_plld_pllc_clkm, MUX | DIV_U71),
- PERIPH_CLK("hdmi", "hdmi", NULL, 51, 0x18c, mux_pllp_plld_pllc_clkm, MUX | DIV_U71),
- PERIPH_CLK("tvdac", "tvdac", NULL, 53, 0x194, mux_pllp_plld_pllc_clkm, MUX | DIV_U71),
- PERIPH_CLK("disp1", "tegrafb.0", NULL, 27, 0x138, mux_pllp_plld_pllc_clkm, MUX | DIV_U71),
- PERIPH_CLK("disp2", "tegrafb.1", NULL, 26, 0x13c, mux_pllp_plld_pllc_clkm, MUX | DIV_U71),
- PERIPH_CLK("usbd", "fsl-tegra-udc", NULL, 22, 0, mux_clk_m, 0),
- PERIPH_CLK("usb2", "usb.1", NULL, 58, 0, mux_clk_m, 0),
- PERIPH_CLK("usb3", "usb.2", NULL, 59, 0, mux_clk_m, 0),
- PERIPH_CLK("emc", "emc", NULL, 57, 0x19c, mux_pllm_pllc_pllp_clkm, MUX | DIV_U71 | PERIPH_EMC_ENB),
- PERIPH_CLK("dsi", "dsi", NULL, 48, 0, mux_plld, 0),
+ PERIPH_CLK("cve", "cve", NULL, 49, 0x140, 250000000, mux_pllp_plld_pllc_clkm, MUX | DIV_U71), /* requires min voltage */
+ PERIPH_CLK("tvo", "tvo", NULL, 49, 0x188, 250000000, mux_pllp_plld_pllc_clkm, MUX | DIV_U71), /* requires min voltage */
+ PERIPH_CLK("hdmi", "hdmi", NULL, 51, 0x18c, 148500000, mux_pllp_plld_pllc_clkm, MUX | DIV_U71), /* requires min voltage */
+ PERIPH_CLK("tvdac", "tvdac", NULL, 53, 0x194, 250000000, mux_pllp_plld_pllc_clkm, MUX | DIV_U71), /* requires min voltage */
+ PERIPH_CLK("disp1", "tegrafb.0", NULL, 27, 0x138, 190000000, mux_pllp_plld_pllc_clkm, MUX | DIV_U71), /* scales with voltage and process_id */
+ PERIPH_CLK("disp2", "tegrafb.1", NULL, 26, 0x13c, 190000000, mux_pllp_plld_pllc_clkm, MUX | DIV_U71), /* scales with voltage and process_id */
+ PERIPH_CLK("usbd", "fsl-tegra-udc", NULL, 22, 0, 480000000, mux_clk_m, 0), /* requires min voltage */
+ PERIPH_CLK("usb2", "tegra-ehci.1", NULL, 58, 0, 480000000, mux_clk_m, 0), /* requires min voltage */
+ PERIPH_CLK("usb3", "tegra-ehci.2", NULL, 59, 0, 480000000, mux_clk_m, 0), /* requires min voltage */
+ PERIPH_CLK("emc", "emc", NULL, 57, 0x19c, 800000000, mux_pllm_pllc_pllp_clkm, MUX | DIV_U71 | PERIPH_EMC_ENB),
+ PERIPH_CLK("dsi", "dsi", NULL, 48, 0, 500000000, mux_plld, 0), /* scales with voltage */
+ PERIPH_CLK("csi", "csi", NULL, 52, 0, 72000000, mux_pllp_out3, 0),
+ PERIPH_CLK("isp", "isp", NULL, 23, 0, 150000000, mux_clk_m, 0), /* same frequency as VI */
+ PERIPH_CLK("csus", "csus", NULL, 92, 0, 150000000, mux_clk_m, PERIPH_NO_RESET),
+ PERIPH_CLK("pex", NULL, "pex", 70, 0, 26000000, mux_clk_m, PERIPH_MANUAL_RESET),
+ PERIPH_CLK("afi", NULL, "afi", 72, 0, 26000000, mux_clk_m, PERIPH_MANUAL_RESET),
+ PERIPH_CLK("pcie_xclk", NULL, "pcie_xclk", 74, 0, 26000000, mux_clk_m, PERIPH_MANUAL_RESET),
};
#define CLK_DUPLICATE(_name, _dev, _con) \
@@ -1286,6 +1694,9 @@ struct clk_duplicate tegra_clk_duplicates[] = {
CLK_DUPLICATE("uartc", "tegra_uart.2", NULL),
CLK_DUPLICATE("uartd", "tegra_uart.3", NULL),
CLK_DUPLICATE("uarte", "tegra_uart.4", NULL),
+ CLK_DUPLICATE("host1x", "tegrafb.0", "host1x"),
+ CLK_DUPLICATE("host1x", "tegrafb.1", "host1x"),
+ CLK_DUPLICATE("usbd", "tegra-ehci.0", NULL),
};
#define CLK(dev, con, ck) \
@@ -1315,11 +1726,13 @@ struct clk_lookup tegra_clk_lookups[] = {
CLK(NULL, "pll_d_out0", &tegra_pll_d_out0),
CLK(NULL, "pll_u", &tegra_pll_u),
CLK(NULL, "pll_x", &tegra_pll_x),
- CLK(NULL, "cpu", &tegra_clk_cpu),
- CLK(NULL, "sys", &tegra_clk_sys),
+ CLK(NULL, "pll_e", &tegra_pll_e),
+ CLK(NULL, "cclk", &tegra_clk_cclk),
+ CLK(NULL, "sclk", &tegra_clk_sclk),
CLK(NULL, "hclk", &tegra_clk_hclk),
CLK(NULL, "pclk", &tegra_clk_pclk),
CLK(NULL, "clk_d", &tegra_clk_d),
+ CLK(NULL, "cpu", &tegra_clk_virtual_cpu),
};
void __init tegra2_init_clocks(void)
@@ -1356,4 +1769,75 @@ void __init tegra2_init_clocks(void)
cd->name);
}
}
+
+ init_audio_sync_clock_mux();
+}
+
+#ifdef CONFIG_PM
+static u32 clk_rst_suspend[RST_DEVICES_NUM + CLK_OUT_ENB_NUM +
+ PERIPH_CLK_SOURCE_NUM + 3];
+
+void tegra_clk_suspend(void)
+{
+ unsigned long off, i;
+ u32 *ctx = clk_rst_suspend;
+
+ *ctx++ = clk_readl(OSC_CTRL) & OSC_CTRL_MASK;
+
+ for (off = PERIPH_CLK_SOURCE_I2S1; off <= PERIPH_CLK_SOURCE_OSC;
+ off += 4) {
+ if (off == PERIPH_CLK_SOURCE_EMC)
+ continue;
+ *ctx++ = clk_readl(off);
+ }
+
+ off = RST_DEVICES;
+ for (i = 0; i < RST_DEVICES_NUM; i++, off += 4)
+ *ctx++ = clk_readl(off);
+
+ off = CLK_OUT_ENB;
+ for (i = 0; i < CLK_OUT_ENB_NUM; i++, off += 4)
+ *ctx++ = clk_readl(off);
+
+ *ctx++ = clk_readl(MISC_CLK_ENB);
+ *ctx++ = clk_readl(CLK_MASK_ARM);
+}
+
+void tegra_clk_resume(void)
+{
+ unsigned long off, i;
+ const u32 *ctx = clk_rst_suspend;
+ u32 val;
+
+ val = clk_readl(OSC_CTRL) & ~OSC_CTRL_MASK;
+ val |= *ctx++;
+ clk_writel(val, OSC_CTRL);
+
+ /* enable all clocks before configuring clock sources */
+ clk_writel(0xbffffff9ul, CLK_OUT_ENB);
+ clk_writel(0xfefffff7ul, CLK_OUT_ENB + 4);
+ clk_writel(0x77f01bfful, CLK_OUT_ENB + 8);
+ wmb();
+
+ for (off = PERIPH_CLK_SOURCE_I2S1; off <= PERIPH_CLK_SOURCE_OSC;
+ off += 4) {
+ if (off == PERIPH_CLK_SOURCE_EMC)
+ continue;
+ clk_writel(*ctx++, off);
+ }
+ wmb();
+
+ off = RST_DEVICES;
+ for (i = 0; i < RST_DEVICES_NUM; i++, off += 4)
+ clk_writel(*ctx++, off);
+ wmb();
+
+ off = CLK_OUT_ENB;
+ for (i = 0; i < CLK_OUT_ENB_NUM; i++, off += 4)
+ clk_writel(*ctx++, off);
+ wmb();
+
+ clk_writel(*ctx++, MISC_CLK_ENB);
+ clk_writel(*ctx++, CLK_MASK_ARM);
}
+#endif
diff --git a/arch/arm/mach-tegra/tegra2_dvfs.c b/arch/arm/mach-tegra/tegra2_dvfs.c
new file mode 100644
index 000000000000..5529c238dd77
--- /dev/null
+++ b/arch/arm/mach-tegra/tegra2_dvfs.c
@@ -0,0 +1,86 @@
+/*
+ * arch/arm/mach-tegra/tegra2_dvfs.c
+ *
+ * Copyright (C) 2010 Google, Inc.
+ *
+ * Author:
+ * Colin Cross <ccross@google.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/kernel.h>
+
+#include "clock.h"
+#include "tegra2_dvfs.h"
+
+static struct dvfs_table virtual_cpu_process_0[] = {
+ {314000000, 750},
+ {456000000, 825},
+ {608000000, 900},
+ {760000000, 975},
+ {817000000, 1000},
+ {912000000, 1050},
+ {1000000000, 1100},
+ {0, 0},
+};
+
+static struct dvfs_table virtual_cpu_process_1[] = {
+ {314000000, 750},
+ {456000000, 825},
+ {618000000, 900},
+ {770000000, 975},
+ {827000000, 1000},
+ {922000000, 1050},
+ {1000000000, 1100},
+ {0, 0},
+};
+
+static struct dvfs_table virtual_cpu_process_2[] = {
+ {494000000, 750},
+ {675000000, 825},
+ {817000000, 875},
+ {922000000, 925},
+ {1000000000, 975},
+ {0, 0},
+};
+
+static struct dvfs_table virtual_cpu_process_3[] = {
+ {730000000, 750},
+ {760000000, 775},
+ {845000000, 800},
+ {1000000000, 875},
+ {0, 0},
+};
+
+struct dvfs tegra_dvfs_virtual_cpu_dvfs = {
+ .reg_id = "vdd_cpu",
+ .process_id_table = {
+ {
+ .process_id = 0,
+ .table = virtual_cpu_process_0,
+ },
+ {
+ .process_id = 1,
+ .table = virtual_cpu_process_1,
+ },
+ {
+ .process_id = 2,
+ .table = virtual_cpu_process_2,
+ },
+ {
+ .process_id = 3,
+ .table = virtual_cpu_process_3,
+ },
+ },
+ .process_id_table_length = 4,
+ .cpu = 1,
+};
diff --git a/arch/arm/mach-tegra/tegra2_dvfs.h b/arch/arm/mach-tegra/tegra2_dvfs.h
new file mode 100644
index 000000000000..f8c1adba96a6
--- /dev/null
+++ b/arch/arm/mach-tegra/tegra2_dvfs.h
@@ -0,0 +1,20 @@
+/*
+ * arch/arm/mach-tegra/tegra2_dvfs.h
+ *
+ * Copyright (C) 2010 Google, Inc.
+ *
+ * Author:
+ * Colin Cross <ccross@google.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+extern struct dvfs tegra_dvfs_virtual_cpu_dvfs;
diff --git a/arch/arm/mach-tegra/timer.c b/arch/arm/mach-tegra/timer.c
index 2f420210d406..9057d6fd1d31 100644
--- a/arch/arm/mach-tegra/timer.c
+++ b/arch/arm/mach-tegra/timer.c
@@ -28,7 +28,6 @@
#include <linux/cnt32_to_63.h>
#include <asm/mach/time.h>
-#include <asm/mach/time.h>
#include <asm/localtimer.h>
#include <mach/iomap.h>
diff --git a/arch/arm/mach-u300/clock.c b/arch/arm/mach-u300/clock.c
index 60acf9e708ae..7458fc6df5c6 100644
--- a/arch/arm/mach-u300/clock.c
+++ b/arch/arm/mach-u300/clock.c
@@ -66,7 +66,7 @@ static DEFINE_SPINLOCK(syscon_resetreg_lock);
* AMBA bus
* |
* +- CPU
- * +- NANDIF NAND Flash interface
+ * +- FSMC NANDIF NAND Flash interface
* +- SEMI Shared Memory interface
* +- ISP Image Signal Processor (U335 only)
* +- CDS (U335 only)
@@ -726,7 +726,7 @@ static struct clk cpu_clk = {
};
static struct clk nandif_clk = {
- .name = "NANDIF",
+ .name = "FSMC",
.parent = &amba_clk,
.hw_ctrld = false,
.reset = true,
@@ -1259,7 +1259,7 @@ static struct clk_lookup lookups[] = {
/* Connected directly to the AMBA bus */
DEF_LOOKUP("amba", &amba_clk),
DEF_LOOKUP("cpu", &cpu_clk),
- DEF_LOOKUP("fsmc", &nandif_clk),
+ DEF_LOOKUP("fsmc-nand", &nandif_clk),
DEF_LOOKUP("semi", &semi_clk),
#ifdef CONFIG_MACH_U300_BS335
DEF_LOOKUP("isp", &isp_clk),
diff --git a/arch/arm/mach-u300/core.c b/arch/arm/mach-u300/core.c
index ea41c236be0f..aa53ee22438f 100644
--- a/arch/arm/mach-u300/core.c
+++ b/arch/arm/mach-u300/core.c
@@ -21,7 +21,8 @@
#include <linux/gpio.h>
#include <linux/clk.h>
#include <linux/err.h>
-#include <mach/coh901318.h>
+#include <linux/mtd/nand.h>
+#include <linux/mtd/fsmc.h>
#include <asm/types.h>
#include <asm/setup.h>
@@ -30,6 +31,7 @@
#include <asm/mach/map.h>
#include <asm/mach/irq.h>
+#include <mach/coh901318.h>
#include <mach/hardware.h>
#include <mach/syscon.h>
#include <mach/dma_channels.h>
@@ -285,6 +287,13 @@ static struct resource rtc_resources[] = {
*/
static struct resource fsmc_resources[] = {
{
+ .name = "nand_data",
+ .start = U300_NAND_CS0_PHYS_BASE,
+ .end = U300_NAND_CS0_PHYS_BASE + SZ_16K - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ {
+ .name = "fsmc_regs",
.start = U300_NAND_IF_PHYS_BASE,
.end = U300_NAND_IF_PHYS_BASE + SZ_4K - 1,
.flags = IORESOURCE_MEM,
@@ -1429,11 +1438,39 @@ static struct platform_device rtc_device = {
.resource = rtc_resources,
};
-static struct platform_device fsmc_device = {
- .name = "nandif",
+static struct mtd_partition u300_partitions[] = {
+ {
+ .name = "bootrecords",
+ .offset = 0,
+ .size = SZ_128K,
+ },
+ {
+ .name = "free",
+ .offset = SZ_128K,
+ .size = 8064 * SZ_1K,
+ },
+ {
+ .name = "platform",
+ .offset = 8192 * SZ_1K,
+ .size = 253952 * SZ_1K,
+ },
+};
+
+static struct fsmc_nand_platform_data nand_platform_data = {
+ .partitions = u300_partitions,
+ .nr_partitions = ARRAY_SIZE(u300_partitions),
+ .options = NAND_SKIP_BBTSCAN,
+ .width = FSMC_NAND_BW8,
+};
+
+static struct platform_device nand_device = {
+ .name = "fsmc-nand",
.id = -1,
- .num_resources = ARRAY_SIZE(fsmc_resources),
.resource = fsmc_resources,
+ .num_resources = ARRAY_SIZE(fsmc_resources),
+ .dev = {
+ .platform_data = &nand_platform_data,
+ },
};
static struct platform_device ave_device = {
@@ -1465,7 +1502,7 @@ static struct platform_device *platform_devs[] __initdata = {
&keypad_device,
&rtc_device,
&gpio_device,
- &fsmc_device,
+ &nand_device,
&wdog_device,
&ave_device
};
diff --git a/arch/arm/mach-u300/include/mach/u300-regs.h b/arch/arm/mach-u300/include/mach/u300-regs.h
index 56721a0cd2af..8b85df4c8d8f 100644
--- a/arch/arm/mach-u300/include/mach/u300-regs.h
+++ b/arch/arm/mach-u300/include/mach/u300-regs.h
@@ -20,11 +20,9 @@
/* NAND Flash CS0 */
#define U300_NAND_CS0_PHYS_BASE 0x80000000
-#define U300_NAND_CS0_VIRT_BASE 0xff040000
/* NFIF */
#define U300_NAND_IF_PHYS_BASE 0x9f800000
-#define U300_NAND_IF_VIRT_BASE 0xff030000
/* AHB Peripherals */
#define U300_AHB_PER_PHYS_BASE 0xa0000000
diff --git a/arch/arm/mach-u300/spi.c b/arch/arm/mach-u300/spi.c
index edb2c0d255c2..00869def5420 100644
--- a/arch/arm/mach-u300/spi.c
+++ b/arch/arm/mach-u300/spi.c
@@ -67,7 +67,7 @@ static struct spi_board_info u300_spi_devices[] = {
.bus_num = 0, /* Only one bus on this chip */
.chip_select = 0,
/* Means SPI_CS_HIGH, change if e.g low CS */
- .mode = SPI_MODE_1 | SPI_LSB_FIRST | SPI_LOOP,
+ .mode = SPI_MODE_1 | SPI_LOOP,
},
#endif
};
diff --git a/arch/arm/mach-ux500/board-mop500.c b/arch/arm/mach-ux500/board-mop500.c
index fcb587f825cc..cac83a694880 100644
--- a/arch/arm/mach-ux500/board-mop500.c
+++ b/arch/arm/mach-ux500/board-mop500.c
@@ -18,12 +18,14 @@
#include <linux/amba/pl022.h>
#include <linux/spi/spi.h>
#include <linux/mfd/ab8500.h>
+#include <linux/input/matrix_keypad.h>
#include <asm/mach-types.h>
#include <asm/mach/arch.h>
#include <plat/pincfg.h>
#include <plat/i2c.h>
+#include <plat/ske.h>
#include <mach/hardware.h>
#include <mach/setup.h>
@@ -49,6 +51,24 @@ static pin_cfg_t mop500_pins[] = {
GPIO11_I2C2_SCL,
GPIO229_I2C3_SDA,
GPIO230_I2C3_SCL,
+
+ /* SKE keypad */
+ GPIO153_KP_I7,
+ GPIO154_KP_I6,
+ GPIO155_KP_I5,
+ GPIO156_KP_I4,
+ GPIO157_KP_O7,
+ GPIO158_KP_O6,
+ GPIO159_KP_O5,
+ GPIO160_KP_O4,
+ GPIO161_KP_I3,
+ GPIO162_KP_I2,
+ GPIO163_KP_I1,
+ GPIO164_KP_I0,
+ GPIO165_KP_O3,
+ GPIO166_KP_O2,
+ GPIO167_KP_O1,
+ GPIO168_KP_O0,
};
static void ab4500_spi_cs_control(u32 command)
@@ -148,12 +168,120 @@ static struct amba_device *amba_devs[] __initdata = {
&u8500_ssp0_device,
};
+static const unsigned int ux500_keymap[] = {
+ KEY(2, 5, KEY_END),
+ KEY(4, 1, KEY_POWER),
+ KEY(3, 5, KEY_VOLUMEDOWN),
+ KEY(1, 3, KEY_3),
+ KEY(5, 2, KEY_RIGHT),
+ KEY(5, 0, KEY_9),
+
+ KEY(0, 5, KEY_MENU),
+ KEY(7, 6, KEY_ENTER),
+ KEY(4, 5, KEY_0),
+ KEY(6, 7, KEY_2),
+ KEY(3, 4, KEY_UP),
+ KEY(3, 3, KEY_DOWN),
+
+ KEY(6, 4, KEY_SEND),
+ KEY(6, 2, KEY_BACK),
+ KEY(4, 2, KEY_VOLUMEUP),
+ KEY(5, 5, KEY_1),
+ KEY(4, 3, KEY_LEFT),
+ KEY(3, 2, KEY_7),
+};
+
+static const struct matrix_keymap_data ux500_keymap_data = {
+ .keymap = ux500_keymap,
+ .keymap_size = ARRAY_SIZE(ux500_keymap),
+};
+
+/*
+ * Nomadik SKE keypad
+ */
+#define ROW_PIN_I0 164
+#define ROW_PIN_I1 163
+#define ROW_PIN_I2 162
+#define ROW_PIN_I3 161
+#define ROW_PIN_I4 156
+#define ROW_PIN_I5 155
+#define ROW_PIN_I6 154
+#define ROW_PIN_I7 153
+#define COL_PIN_O0 168
+#define COL_PIN_O1 167
+#define COL_PIN_O2 166
+#define COL_PIN_O3 165
+#define COL_PIN_O4 160
+#define COL_PIN_O5 159
+#define COL_PIN_O6 158
+#define COL_PIN_O7 157
+
+#define SKE_KPD_MAX_ROWS 8
+#define SKE_KPD_MAX_COLS 8
+
+static int ske_kp_rows[] = {
+ ROW_PIN_I0, ROW_PIN_I1, ROW_PIN_I2, ROW_PIN_I3,
+ ROW_PIN_I4, ROW_PIN_I5, ROW_PIN_I6, ROW_PIN_I7,
+};
+
+/*
+ * ske_set_gpio_row: request and set gpio rows
+ */
+static int ske_set_gpio_row(int gpio)
+{
+ int ret;
+
+ ret = gpio_request(gpio, "ske-kp");
+ if (ret < 0) {
+ pr_err("ske_set_gpio_row: gpio request failed\n");
+ return ret;
+ }
+
+ ret = gpio_direction_output(gpio, 1);
+ if (ret < 0) {
+ pr_err("ske_set_gpio_row: gpio direction failed\n");
+ gpio_free(gpio);
+ }
+
+ return ret;
+}
+
+/*
+ * ske_kp_init - enable the gpio configuration
+ */
+static int ske_kp_init(void)
+{
+ int ret, i;
+
+ for (i = 0; i < SKE_KPD_MAX_ROWS; i++) {
+ ret = ske_set_gpio_row(ske_kp_rows[i]);
+ if (ret < 0) {
+ pr_err("ske_kp_init: failed init\n");
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static struct ske_keypad_platform_data ske_keypad_board = {
+ .init = ske_kp_init,
+ .keymap_data = &ux500_keymap_data,
+ .no_autorepeat = true,
+ .krow = SKE_KPD_MAX_ROWS, /* 8x8 matrix */
+ .kcol = SKE_KPD_MAX_COLS,
+ .debounce_ms = 40, /* in millsecs */
+};
+
+
+
/* add any platform devices here - TODO */
static struct platform_device *platform_devs[] __initdata = {
&u8500_i2c0_device,
&ux500_i2c1_device,
&ux500_i2c2_device,
&ux500_i2c3_device,
+ &ux500_ske_keypad_device,
};
static void __init u8500_init_machine(void)
@@ -168,6 +296,7 @@ static void __init u8500_init_machine(void)
ux500_i2c1_device.dev.platform_data = &u8500_i2c1_data;
ux500_i2c2_device.dev.platform_data = &u8500_i2c2_data;
ux500_i2c3_device.dev.platform_data = &u8500_i2c3_data;
+ ux500_ske_keypad_device.dev.platform_data = &ske_keypad_board;
u8500_ssp0_device.dev.platform_data = &ssp0_platform_data;
diff --git a/arch/arm/mach-ux500/clock.c b/arch/arm/mach-ux500/clock.c
index d8ab7f184fe4..1675047daf20 100644
--- a/arch/arm/mach-ux500/clock.c
+++ b/arch/arm/mach-ux500/clock.c
@@ -477,6 +477,7 @@ static struct clk_lookup u8500_common_clks[] = {
CLK(sdi5, "sdi5", NULL),
CLK(uart2, "uart2", NULL),
CLK(ske, "ske", NULL),
+ CLK(ske, "nmk-ske-keypad", NULL),
CLK(sdi2, "sdi2", NULL),
CLK(i2c0, "nmk-i2c.0", NULL),
CLK(fsmc, "fsmc", NULL),
diff --git a/arch/arm/mach-ux500/cpu.c b/arch/arm/mach-ux500/cpu.c
index e0fd747e447a..608a1372b172 100644
--- a/arch/arm/mach-ux500/cpu.c
+++ b/arch/arm/mach-ux500/cpu.c
@@ -10,6 +10,7 @@
#include <linux/io.h>
#include <linux/clk.h>
+#include <asm/cacheflush.h>
#include <asm/hardware/cache-l2x0.h>
#include <asm/hardware/gic.h>
#include <asm/mach/map.h>
@@ -71,6 +72,46 @@ void __init ux500_init_irq(void)
}
#ifdef CONFIG_CACHE_L2X0
+static inline void ux500_cache_wait(void __iomem *reg, unsigned long mask)
+{
+ /* wait for the operation to complete */
+ while (readl_relaxed(reg) & mask)
+ ;
+}
+
+static inline void ux500_cache_sync(void)
+{
+ void __iomem *base = __io_address(UX500_L2CC_BASE);
+ writel_relaxed(0, base + L2X0_CACHE_SYNC);
+ ux500_cache_wait(base + L2X0_CACHE_SYNC, 1);
+}
+
+/*
+ * The L2 cache cannot be turned off in the non-secure world.
+ * Dummy until a secure service is in place.
+ */
+static void ux500_l2x0_disable(void)
+{
+}
+
+/*
+ * This is only called when doing a kexec, just after turning off the L2
+ * and L1 cache, and it is surrounded by a spinlock in the generic version.
+ * However, we're not really turning off the L2 cache right now and the
+ * PL310 does not support exclusive accesses (used to implement the spinlock).
+ * So, the invalidation needs to be done without the spinlock.
+ */
+static void ux500_l2x0_inv_all(void)
+{
+ void __iomem *l2x0_base = __io_address(UX500_L2CC_BASE);
+ uint32_t l2x0_way_mask = (1<<16) - 1; /* Bitmask of active ways */
+
+ /* invalidate all ways */
+ writel_relaxed(l2x0_way_mask, l2x0_base + L2X0_INV_WAY);
+ ux500_cache_wait(l2x0_base + L2X0_INV_WAY, l2x0_way_mask);
+ ux500_cache_sync();
+}
+
static int ux500_l2x0_init(void)
{
void __iomem *l2x0_base;
@@ -80,6 +121,10 @@ static int ux500_l2x0_init(void)
/* 64KB way size, 8 way associativity, force WA */
l2x0_init(l2x0_base, 0x3e060000, 0xc0000fff);
+ /* Override invalidate function */
+ outer_cache.disable = ux500_l2x0_disable;
+ outer_cache.inv_all = ux500_l2x0_inv_all;
+
return 0;
}
early_initcall(ux500_l2x0_init);
diff --git a/arch/arm/mach-ux500/devices-db8500.c b/arch/arm/mach-ux500/devices-db8500.c
index 40032fecbc16..4a94be3304b9 100644
--- a/arch/arm/mach-ux500/devices-db8500.c
+++ b/arch/arm/mach-ux500/devices-db8500.c
@@ -208,35 +208,25 @@ static struct resource dma40_resources[] = {
/* Default configuration for physcial memcpy */
struct stedma40_chan_cfg dma40_memcpy_conf_phy = {
- .channel_type = (STEDMA40_CHANNEL_IN_PHY_MODE |
- STEDMA40_LOW_PRIORITY_CHANNEL |
- STEDMA40_PCHAN_BASIC_MODE),
+ .mode = STEDMA40_MODE_PHYSICAL,
.dir = STEDMA40_MEM_TO_MEM,
- .src_info.endianess = STEDMA40_LITTLE_ENDIAN,
.src_info.data_width = STEDMA40_BYTE_WIDTH,
.src_info.psize = STEDMA40_PSIZE_PHY_1,
.src_info.flow_ctrl = STEDMA40_NO_FLOW_CTRL,
- .dst_info.endianess = STEDMA40_LITTLE_ENDIAN,
.dst_info.data_width = STEDMA40_BYTE_WIDTH,
.dst_info.psize = STEDMA40_PSIZE_PHY_1,
.dst_info.flow_ctrl = STEDMA40_NO_FLOW_CTRL,
};
/* Default configuration for logical memcpy */
struct stedma40_chan_cfg dma40_memcpy_conf_log = {
- .channel_type = (STEDMA40_CHANNEL_IN_LOG_MODE |
- STEDMA40_LOW_PRIORITY_CHANNEL |
- STEDMA40_LCHAN_SRC_LOG_DST_LOG |
- STEDMA40_NO_TIM_FOR_LINK),
.dir = STEDMA40_MEM_TO_MEM,
- .src_info.endianess = STEDMA40_LITTLE_ENDIAN,
.src_info.data_width = STEDMA40_BYTE_WIDTH,
.src_info.psize = STEDMA40_PSIZE_LOG_1,
.src_info.flow_ctrl = STEDMA40_NO_FLOW_CTRL,
- .dst_info.endianess = STEDMA40_LITTLE_ENDIAN,
.dst_info.data_width = STEDMA40_BYTE_WIDTH,
.dst_info.psize = STEDMA40_PSIZE_LOG_1,
.dst_info.flow_ctrl = STEDMA40_NO_FLOW_CTRL,
@@ -269,7 +259,6 @@ static struct stedma40_platform_data dma40_plat_data = {
.memcpy_len = ARRAY_SIZE(dma40_memcpy_event),
.memcpy_conf_phy = &dma40_memcpy_conf_phy,
.memcpy_conf_log = &dma40_memcpy_conf_log,
- .llis_per_log = 8,
.disabled_channels = {-1},
};
@@ -292,3 +281,23 @@ void dma40_u8500ed_fixup(void)
dma40_resources[1].start = U8500_DMA_LCPA_BASE_ED;
dma40_resources[1].end = U8500_DMA_LCPA_BASE_ED + 2 * SZ_1K - 1;
}
+
+struct resource keypad_resources[] = {
+ [0] = {
+ .start = U8500_SKE_BASE,
+ .end = U8500_SKE_BASE + SZ_4K - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = IRQ_DB8500_KB,
+ .end = IRQ_DB8500_KB,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+struct platform_device ux500_ske_keypad_device = {
+ .name = "nmk-ske-keypad",
+ .id = -1,
+ .num_resources = ARRAY_SIZE(keypad_resources),
+ .resource = keypad_resources,
+};
diff --git a/arch/arm/mach-ux500/include/mach/devices.h b/arch/arm/mach-ux500/include/mach/devices.h
index 33a120c2e82e..b91a4d1211a2 100644
--- a/arch/arm/mach-ux500/include/mach/devices.h
+++ b/arch/arm/mach-ux500/include/mach/devices.h
@@ -26,6 +26,7 @@ extern struct platform_device ux500_i2c3_device;
extern struct platform_device u8500_i2c0_device;
extern struct platform_device u8500_i2c4_device;
extern struct platform_device u8500_dma40_device;
+extern struct platform_device ux500_ske_keypad_device;
extern struct amba_device u8500_sdi0_device;
extern struct amba_device u8500_sdi1_device;
diff --git a/arch/arm/mach-ux500/pins-db8500.h b/arch/arm/mach-ux500/pins-db8500.h
index 66f8761cc823..f923764ee16c 100644
--- a/arch/arm/mach-ux500/pins-db8500.h
+++ b/arch/arm/mach-ux500/pins-db8500.h
@@ -459,82 +459,82 @@
#define GPIO152_KP_O9 PIN_CFG(152, ALT_C)
#define GPIO153_GPIO PIN_CFG(153, GPIO)
-#define GPIO153_KP_I7 PIN_CFG(153, ALT_A)
+#define GPIO153_KP_I7 PIN_CFG_PULL(153, ALT_A, DOWN)
#define GPIO153_LCD_D24 PIN_CFG(153, ALT_B)
#define GPIO153_U2_RXD PIN_CFG(153, ALT_C)
#define GPIO154_GPIO PIN_CFG(154, GPIO)
-#define GPIO154_KP_I6 PIN_CFG(154, ALT_A)
+#define GPIO154_KP_I6 PIN_CFG_PULL(154, ALT_A, DOWN)
#define GPIO154_LCD_D25 PIN_CFG(154, ALT_B)
#define GPIO154_U2_TXD PIN_CFG(154, ALT_C)
#define GPIO155_GPIO PIN_CFG(155, GPIO)
-#define GPIO155_KP_I5 PIN_CFG(155, ALT_A)
+#define GPIO155_KP_I5 PIN_CFG_PULL(155, ALT_A, DOWN)
#define GPIO155_LCD_D26 PIN_CFG(155, ALT_B)
#define GPIO155_STMAPE_CLK PIN_CFG(155, ALT_C)
#define GPIO156_GPIO PIN_CFG(156, GPIO)
-#define GPIO156_KP_I4 PIN_CFG(156, ALT_A)
+#define GPIO156_KP_I4 PIN_CFG_PULL(156, ALT_A, DOWN)
#define GPIO156_LCD_D27 PIN_CFG(156, ALT_B)
#define GPIO156_STMAPE_DAT3 PIN_CFG(156, ALT_C)
#define GPIO157_GPIO PIN_CFG(157, GPIO)
-#define GPIO157_KP_O7 PIN_CFG(157, ALT_A)
+#define GPIO157_KP_O7 PIN_CFG_PULL(157, ALT_A, UP)
#define GPIO157_LCD_D28 PIN_CFG(157, ALT_B)
#define GPIO157_STMAPE_DAT2 PIN_CFG(157, ALT_C)
#define GPIO158_GPIO PIN_CFG(158, GPIO)
-#define GPIO158_KP_O6 PIN_CFG(158, ALT_A)
+#define GPIO158_KP_O6 PIN_CFG_PULL(158, ALT_A, UP)
#define GPIO158_LCD_D29 PIN_CFG(158, ALT_B)
#define GPIO158_STMAPE_DAT1 PIN_CFG(158, ALT_C)
#define GPIO159_GPIO PIN_CFG(159, GPIO)
-#define GPIO159_KP_O5 PIN_CFG(159, ALT_A)
+#define GPIO159_KP_O5 PIN_CFG_PULL(159, ALT_A, UP)
#define GPIO159_LCD_D30 PIN_CFG(159, ALT_B)
#define GPIO159_STMAPE_DAT0 PIN_CFG(159, ALT_C)
#define GPIO160_GPIO PIN_CFG(160, GPIO)
-#define GPIO160_KP_O4 PIN_CFG(160, ALT_A)
+#define GPIO160_KP_O4 PIN_CFG_PULL(160, ALT_A, UP)
#define GPIO160_LCD_D31 PIN_CFG(160, ALT_B)
#define GPIO160_NONE PIN_CFG(160, ALT_C)
#define GPIO161_GPIO PIN_CFG(161, GPIO)
-#define GPIO161_KP_I3 PIN_CFG(161, ALT_A)
+#define GPIO161_KP_I3 PIN_CFG_PULL(161, ALT_A, DOWN)
#define GPIO161_LCD_D32 PIN_CFG(161, ALT_B)
#define GPIO161_UARTMOD_RXD PIN_CFG(161, ALT_C)
#define GPIO162_GPIO PIN_CFG(162, GPIO)
-#define GPIO162_KP_I2 PIN_CFG(162, ALT_A)
+#define GPIO162_KP_I2 PIN_CFG_PULL(162, ALT_A, DOWN)
#define GPIO162_LCD_D33 PIN_CFG(162, ALT_B)
#define GPIO162_UARTMOD_TXD PIN_CFG(162, ALT_C)
#define GPIO163_GPIO PIN_CFG(163, GPIO)
-#define GPIO163_KP_I1 PIN_CFG(163, ALT_A)
+#define GPIO163_KP_I1 PIN_CFG_PULL(163, ALT_A, DOWN)
#define GPIO163_LCD_D34 PIN_CFG(163, ALT_B)
#define GPIO163_STMMOD_CLK PIN_CFG(163, ALT_C)
#define GPIO164_GPIO PIN_CFG(164, GPIO)
-#define GPIO164_KP_I0 PIN_CFG(164, ALT_A)
+#define GPIO164_KP_I0 PIN_CFG_PULL(164, ALT_A, UP)
#define GPIO164_LCD_D35 PIN_CFG(164, ALT_B)
#define GPIO164_STMMOD_DAT3 PIN_CFG(164, ALT_C)
#define GPIO165_GPIO PIN_CFG(165, GPIO)
-#define GPIO165_KP_O3 PIN_CFG(165, ALT_A)
+#define GPIO165_KP_O3 PIN_CFG_PULL(165, ALT_A, UP)
#define GPIO165_LCD_D36 PIN_CFG(165, ALT_B)
#define GPIO165_STMMOD_DAT2 PIN_CFG(165, ALT_C)
#define GPIO166_GPIO PIN_CFG(166, GPIO)
-#define GPIO166_KP_O2 PIN_CFG(166, ALT_A)
+#define GPIO166_KP_O2 PIN_CFG_PULL(166, ALT_A, UP)
#define GPIO166_LCD_D37 PIN_CFG(166, ALT_B)
#define GPIO166_STMMOD_DAT1 PIN_CFG(166, ALT_C)
#define GPIO167_GPIO PIN_CFG(167, GPIO)
-#define GPIO167_KP_O1 PIN_CFG(167, ALT_A)
+#define GPIO167_KP_O1 PIN_CFG_PULL(167, ALT_A, UP)
#define GPIO167_LCD_D38 PIN_CFG(167, ALT_B)
#define GPIO167_STMMOD_DAT0 PIN_CFG(167, ALT_C)
#define GPIO168_GPIO PIN_CFG(168, GPIO)
-#define GPIO168_KP_O0 PIN_CFG(168, ALT_A)
+#define GPIO168_KP_O0 PIN_CFG_PULL(168, ALT_A, UP)
#define GPIO168_LCD_D39 PIN_CFG(168, ALT_B)
#define GPIO168_NONE PIN_CFG(168, ALT_C)
diff --git a/arch/arm/mach-versatile/include/mach/vmalloc.h b/arch/arm/mach-versatile/include/mach/vmalloc.h
index ebd8a2543d3b..7d8e069ad51b 100644
--- a/arch/arm/mach-versatile/include/mach/vmalloc.h
+++ b/arch/arm/mach-versatile/include/mach/vmalloc.h
@@ -18,4 +18,4 @@
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
-#define VMALLOC_END 0xd8000000
+#define VMALLOC_END 0xd8000000UL
diff --git a/arch/arm/mach-vexpress/ct-ca9x4.c b/arch/arm/mach-vexpress/ct-ca9x4.c
index c2e405a9e025..fd25ccd7272f 100644
--- a/arch/arm/mach-vexpress/ct-ca9x4.c
+++ b/arch/arm/mach-vexpress/ct-ca9x4.c
@@ -54,7 +54,9 @@ static struct map_desc ct_ca9x4_io_desc[] __initdata = {
static void __init ct_ca9x4_map_io(void)
{
+#ifdef CONFIG_LOCAL_TIMERS
twd_base = MMIO_P2V(A9_MPCORE_TWD);
+#endif
v2m_map_io(ct_ca9x4_io_desc, ARRAY_SIZE(ct_ca9x4_io_desc));
}
diff --git a/arch/arm/mach-vexpress/headsmp.S b/arch/arm/mach-vexpress/headsmp.S
index 8a78ff68e1ee..7a3f0632947c 100644
--- a/arch/arm/mach-vexpress/headsmp.S
+++ b/arch/arm/mach-vexpress/headsmp.S
@@ -35,5 +35,6 @@ pen: ldr r7, [r6]
*/
b secondary_startup
+ .align
1: .long .
.long pen_release
diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig
index a0a2928ae4dd..4414a01e1e8a 100644
--- a/arch/arm/mm/Kconfig
+++ b/arch/arm/mm/Kconfig
@@ -779,6 +779,14 @@ config CACHE_L2X0
help
This option enables the L2x0 PrimeCell.
+config CACHE_PL310
+ bool
+ depends on CACHE_L2X0
+ default y if CPU_V7 && !CPU_V6
+ help
+ This option enables optimisations for the PL310 cache
+ controller.
+
config CACHE_TAUROS2
bool "Enable the Tauros2 L2 cache controller"
depends on (ARCH_DOVE || ARCH_MMP)
diff --git a/arch/arm/mm/cache-fa.S b/arch/arm/mm/cache-fa.S
index 7148e53e6078..1fa6f71470de 100644
--- a/arch/arm/mm/cache-fa.S
+++ b/arch/arm/mm/cache-fa.S
@@ -38,6 +38,17 @@
#define CACHE_DLIMIT (CACHE_DSIZE * 2)
/*
+ * flush_icache_all()
+ *
+ * Unconditionally clean and invalidate the entire icache.
+ */
+ENTRY(fa_flush_icache_all)
+ mov r0, #0
+ mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache
+ mov pc, lr
+ENDPROC(fa_flush_icache_all)
+
+/*
* flush_user_cache_all()
*
* Clean and invalidate all cache entries in a particular address
@@ -233,6 +244,7 @@ ENDPROC(fa_dma_unmap_area)
.type fa_cache_fns, #object
ENTRY(fa_cache_fns)
+ .long fa_flush_icache_all
.long fa_flush_kern_cache_all
.long fa_flush_user_cache_all
.long fa_flush_user_cache_range
diff --git a/arch/arm/mm/cache-l2x0.c b/arch/arm/mm/cache-l2x0.c
index 9982eb385c0f..170c9bb95866 100644
--- a/arch/arm/mm/cache-l2x0.c
+++ b/arch/arm/mm/cache-l2x0.c
@@ -28,14 +28,24 @@
static void __iomem *l2x0_base;
static DEFINE_SPINLOCK(l2x0_lock);
static uint32_t l2x0_way_mask; /* Bitmask of active ways */
+static uint32_t l2x0_size;
-static inline void cache_wait(void __iomem *reg, unsigned long mask)
+static inline void cache_wait_way(void __iomem *reg, unsigned long mask)
{
- /* wait for the operation to complete */
+ /* wait for cache operation by line or way to complete */
while (readl_relaxed(reg) & mask)
;
}
+#ifdef CONFIG_CACHE_PL310
+static inline void cache_wait(void __iomem *reg, unsigned long mask)
+{
+ /* cache operations by line are atomic on PL310 */
+}
+#else
+#define cache_wait cache_wait_way
+#endif
+
static inline void cache_sync(void)
{
void __iomem *base = l2x0_base;
@@ -103,14 +113,40 @@ static void l2x0_cache_sync(void)
spin_unlock_irqrestore(&l2x0_lock, flags);
}
-static inline void l2x0_inv_all(void)
+static void l2x0_flush_all(void)
+{
+ unsigned long flags;
+
+ /* clean all ways */
+ spin_lock_irqsave(&l2x0_lock, flags);
+ writel_relaxed(l2x0_way_mask, l2x0_base + L2X0_CLEAN_INV_WAY);
+ cache_wait_way(l2x0_base + L2X0_CLEAN_INV_WAY, l2x0_way_mask);
+ cache_sync();
+ spin_unlock_irqrestore(&l2x0_lock, flags);
+}
+
+static void l2x0_clean_all(void)
+{
+ unsigned long flags;
+
+ /* clean all ways */
+ spin_lock_irqsave(&l2x0_lock, flags);
+ writel_relaxed(l2x0_way_mask, l2x0_base + L2X0_CLEAN_WAY);
+ cache_wait_way(l2x0_base + L2X0_CLEAN_WAY, l2x0_way_mask);
+ cache_sync();
+ spin_unlock_irqrestore(&l2x0_lock, flags);
+}
+
+static void l2x0_inv_all(void)
{
unsigned long flags;
/* invalidate all ways */
spin_lock_irqsave(&l2x0_lock, flags);
+ /* Invalidating when L2 is enabled is a nono */
+ BUG_ON(readl(l2x0_base + L2X0_CTRL) & 1);
writel_relaxed(l2x0_way_mask, l2x0_base + L2X0_INV_WAY);
- cache_wait(l2x0_base + L2X0_INV_WAY, l2x0_way_mask);
+ cache_wait_way(l2x0_base + L2X0_INV_WAY, l2x0_way_mask);
cache_sync();
spin_unlock_irqrestore(&l2x0_lock, flags);
}
@@ -159,6 +195,11 @@ static void l2x0_clean_range(unsigned long start, unsigned long end)
void __iomem *base = l2x0_base;
unsigned long flags;
+ if ((end - start) >= l2x0_size) {
+ l2x0_clean_all();
+ return;
+ }
+
spin_lock_irqsave(&l2x0_lock, flags);
start &= ~(CACHE_LINE_SIZE - 1);
while (start < end) {
@@ -184,6 +225,11 @@ static void l2x0_flush_range(unsigned long start, unsigned long end)
void __iomem *base = l2x0_base;
unsigned long flags;
+ if ((end - start) >= l2x0_size) {
+ l2x0_flush_all();
+ return;
+ }
+
spin_lock_irqsave(&l2x0_lock, flags);
start &= ~(CACHE_LINE_SIZE - 1);
while (start < end) {
@@ -206,10 +252,20 @@ static void l2x0_flush_range(unsigned long start, unsigned long end)
spin_unlock_irqrestore(&l2x0_lock, flags);
}
+static void l2x0_disable(void)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&l2x0_lock, flags);
+ writel(0, l2x0_base + L2X0_CTRL);
+ spin_unlock_irqrestore(&l2x0_lock, flags);
+}
+
void __init l2x0_init(void __iomem *base, __u32 aux_val, __u32 aux_mask)
{
__u32 aux;
__u32 cache_id;
+ __u32 way_size = 0;
int ways;
const char *type;
@@ -244,6 +300,13 @@ void __init l2x0_init(void __iomem *base, __u32 aux_val, __u32 aux_mask)
l2x0_way_mask = (1 << ways) - 1;
/*
+ * L2 cache Size = Way size * Number of ways
+ */
+ way_size = (aux & L2X0_AUX_CTRL_WAY_SIZE_MASK) >> 17;
+ way_size = 1 << (way_size + 3);
+ l2x0_size = ways * way_size * SZ_1K;
+
+ /*
* Check if l2x0 controller is already enabled.
* If you are booting from non-secure mode
* accessing the below registers will fault.
@@ -263,8 +326,11 @@ void __init l2x0_init(void __iomem *base, __u32 aux_val, __u32 aux_mask)
outer_cache.clean_range = l2x0_clean_range;
outer_cache.flush_range = l2x0_flush_range;
outer_cache.sync = l2x0_cache_sync;
+ outer_cache.flush_all = l2x0_flush_all;
+ outer_cache.inv_all = l2x0_inv_all;
+ outer_cache.disable = l2x0_disable;
printk(KERN_INFO "%s cache controller enabled\n", type);
- printk(KERN_INFO "l2x0: %d ways, CACHE_ID 0x%08x, AUX_CTRL 0x%08x\n",
- ways, cache_id, aux);
+ printk(KERN_INFO "l2x0: %d ways, CACHE_ID 0x%08x, AUX_CTRL 0x%08x, Cache size: %d B\n",
+ ways, cache_id, aux, l2x0_size);
}
diff --git a/arch/arm/mm/cache-v3.S b/arch/arm/mm/cache-v3.S
index c2ff3c599fee..2e2bc406a18d 100644
--- a/arch/arm/mm/cache-v3.S
+++ b/arch/arm/mm/cache-v3.S
@@ -13,6 +13,15 @@
#include "proc-macros.S"
/*
+ * flush_icache_all()
+ *
+ * Unconditionally clean and invalidate the entire icache.
+ */
+ENTRY(v3_flush_icache_all)
+ mov pc, lr
+ENDPROC(v3_flush_icache_all)
+
+/*
* flush_user_cache_all()
*
* Invalidate all cache entries in a particular address
@@ -122,6 +131,7 @@ ENDPROC(v3_dma_map_area)
.type v3_cache_fns, #object
ENTRY(v3_cache_fns)
+ .long v3_flush_icache_all
.long v3_flush_kern_cache_all
.long v3_flush_user_cache_all
.long v3_flush_user_cache_range
diff --git a/arch/arm/mm/cache-v4.S b/arch/arm/mm/cache-v4.S
index 4810f7e3e813..a8fefb523f19 100644
--- a/arch/arm/mm/cache-v4.S
+++ b/arch/arm/mm/cache-v4.S
@@ -13,6 +13,15 @@
#include "proc-macros.S"
/*
+ * flush_icache_all()
+ *
+ * Unconditionally clean and invalidate the entire icache.
+ */
+ENTRY(v4_flush_icache_all)
+ mov pc, lr
+ENDPROC(v4_flush_icache_all)
+
+/*
* flush_user_cache_all()
*
* Invalidate all cache entries in a particular address
@@ -134,6 +143,7 @@ ENDPROC(v4_dma_map_area)
.type v4_cache_fns, #object
ENTRY(v4_cache_fns)
+ .long v4_flush_icache_all
.long v4_flush_kern_cache_all
.long v4_flush_user_cache_all
.long v4_flush_user_cache_range
diff --git a/arch/arm/mm/cache-v4wb.S b/arch/arm/mm/cache-v4wb.S
index df8368afa102..d3644db467b7 100644
--- a/arch/arm/mm/cache-v4wb.S
+++ b/arch/arm/mm/cache-v4wb.S
@@ -51,6 +51,17 @@ flush_base:
.text
/*
+ * flush_icache_all()
+ *
+ * Unconditionally clean and invalidate the entire icache.
+ */
+ENTRY(v4wb_flush_icache_all)
+ mov r0, #0
+ mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache
+ mov pc, lr
+ENDPROC(v4wb_flush_icache_all)
+
+/*
* flush_user_cache_all()
*
* Clean and invalidate all cache entries in a particular address
@@ -244,6 +255,7 @@ ENDPROC(v4wb_dma_unmap_area)
.type v4wb_cache_fns, #object
ENTRY(v4wb_cache_fns)
+ .long v4wb_flush_icache_all
.long v4wb_flush_kern_cache_all
.long v4wb_flush_user_cache_all
.long v4wb_flush_user_cache_range
diff --git a/arch/arm/mm/cache-v4wt.S b/arch/arm/mm/cache-v4wt.S
index 45c70312f43b..49c2b66cf3dd 100644
--- a/arch/arm/mm/cache-v4wt.S
+++ b/arch/arm/mm/cache-v4wt.S
@@ -41,6 +41,17 @@
#define CACHE_DLIMIT 16384
/*
+ * flush_icache_all()
+ *
+ * Unconditionally clean and invalidate the entire icache.
+ */
+ENTRY(v4wt_flush_icache_all)
+ mov r0, #0
+ mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache
+ mov pc, lr
+ENDPROC(v4wt_flush_icache_all)
+
+/*
* flush_user_cache_all()
*
* Invalidate all cache entries in a particular address
@@ -188,6 +199,7 @@ ENDPROC(v4wt_dma_map_area)
.type v4wt_cache_fns, #object
ENTRY(v4wt_cache_fns)
+ .long v4wt_flush_icache_all
.long v4wt_flush_kern_cache_all
.long v4wt_flush_user_cache_all
.long v4wt_flush_user_cache_range
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
index e4dd0646e859..ac6a36142fcd 100644
--- a/arch/arm/mm/dma-mapping.c
+++ b/arch/arm/mm/dma-mapping.c
@@ -198,7 +198,7 @@ __dma_alloc_remap(struct page *page, size_t size, gfp_t gfp, pgprot_t prot)
* fragmentation of the DMA space, and also prevents allocations
* smaller than a section from crossing a section boundary.
*/
- bit = fls(size - 1) + 1;
+ bit = fls(size - 1);
if (bit > SECTION_SHIFT)
bit = SECTION_SHIFT;
align = 1 << bit;
diff --git a/arch/arm/mm/fault-armv.c b/arch/arm/mm/fault-armv.c
index 8440d952ba6d..83e59f870426 100644
--- a/arch/arm/mm/fault-armv.c
+++ b/arch/arm/mm/fault-armv.c
@@ -66,6 +66,30 @@ static int do_adjust_pte(struct vm_area_struct *vma, unsigned long address,
return ret;
}
+#if USE_SPLIT_PTLOCKS
+/*
+ * If we are using split PTE locks, then we need to take the page
+ * lock here. Otherwise we are using shared mm->page_table_lock
+ * which is already locked, thus cannot take it.
+ */
+static inline void do_pte_lock(spinlock_t *ptl)
+{
+ /*
+ * Use nested version here to indicate that we are already
+ * holding one similar spinlock.
+ */
+ spin_lock_nested(ptl, SINGLE_DEPTH_NESTING);
+}
+
+static inline void do_pte_unlock(spinlock_t *ptl)
+{
+ spin_unlock(ptl);
+}
+#else /* !USE_SPLIT_PTLOCKS */
+static inline void do_pte_lock(spinlock_t *ptl) {}
+static inline void do_pte_unlock(spinlock_t *ptl) {}
+#endif /* USE_SPLIT_PTLOCKS */
+
static int adjust_pte(struct vm_area_struct *vma, unsigned long address,
unsigned long pfn)
{
@@ -89,13 +113,13 @@ static int adjust_pte(struct vm_area_struct *vma, unsigned long address,
* open-code the spin-locking.
*/
ptl = pte_lockptr(vma->vm_mm, pmd);
- pte = pte_offset_map_nested(pmd, address);
- spin_lock(ptl);
+ pte = pte_offset_map(pmd, address);
+ do_pte_lock(ptl);
ret = do_adjust_pte(vma, address, pfn, pte);
- spin_unlock(ptl);
- pte_unmap_nested(pte);
+ do_pte_unlock(ptl);
+ pte_unmap(pte);
return ret;
}
diff --git a/arch/arm/mm/highmem.c b/arch/arm/mm/highmem.c
index 1fbdb55bfd1b..c435fd9e1da9 100644
--- a/arch/arm/mm/highmem.c
+++ b/arch/arm/mm/highmem.c
@@ -36,18 +36,17 @@ void kunmap(struct page *page)
}
EXPORT_SYMBOL(kunmap);
-void *kmap_atomic(struct page *page, enum km_type type)
+void *__kmap_atomic(struct page *page)
{
unsigned int idx;
unsigned long vaddr;
void *kmap;
+ int type;
pagefault_disable();
if (!PageHighMem(page))
return page_address(page);
- debug_kmap_atomic(type);
-
#ifdef CONFIG_DEBUG_HIGHMEM
/*
* There is no cache coherency issue when non VIVT, so force the
@@ -61,6 +60,8 @@ void *kmap_atomic(struct page *page, enum km_type type)
if (kmap)
return kmap;
+ type = kmap_atomic_idx_push();
+
idx = type + KM_TYPE_NR * smp_processor_id();
vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
#ifdef CONFIG_DEBUG_HIGHMEM
@@ -80,14 +81,17 @@ void *kmap_atomic(struct page *page, enum km_type type)
return (void *)vaddr;
}
-EXPORT_SYMBOL(kmap_atomic);
+EXPORT_SYMBOL(__kmap_atomic);
-void kunmap_atomic_notypecheck(void *kvaddr, enum km_type type)
+void __kunmap_atomic(void *kvaddr)
{
unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK;
- unsigned int idx = type + KM_TYPE_NR * smp_processor_id();
+ int idx, type;
if (kvaddr >= (void *)FIXADDR_START) {
+ type = kmap_atomic_idx();
+ idx = type + KM_TYPE_NR * smp_processor_id();
+
if (cache_is_vivt())
__cpuc_flush_dcache_area((void *)vaddr, PAGE_SIZE);
#ifdef CONFIG_DEBUG_HIGHMEM
@@ -97,21 +101,23 @@ void kunmap_atomic_notypecheck(void *kvaddr, enum km_type type)
#else
(void) idx; /* to kill a warning */
#endif
+ kmap_atomic_idx_pop();
} else if (vaddr >= PKMAP_ADDR(0) && vaddr < PKMAP_ADDR(LAST_PKMAP)) {
/* this address was obtained through kmap_high_get() */
kunmap_high(pte_page(pkmap_page_table[PKMAP_NR(vaddr)]));
}
pagefault_enable();
}
-EXPORT_SYMBOL(kunmap_atomic_notypecheck);
+EXPORT_SYMBOL(__kunmap_atomic);
-void *kmap_atomic_pfn(unsigned long pfn, enum km_type type)
+void *kmap_atomic_pfn(unsigned long pfn)
{
- unsigned int idx;
unsigned long vaddr;
+ int idx, type;
pagefault_disable();
+ type = kmap_atomic_idx_push();
idx = type + KM_TYPE_NR * smp_processor_id();
vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
#ifdef CONFIG_DEBUG_HIGHMEM
diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c
index 7fd9b5eb177f..5164069ced42 100644
--- a/arch/arm/mm/init.c
+++ b/arch/arm/mm/init.c
@@ -18,6 +18,7 @@
#include <linux/highmem.h>
#include <linux/gfp.h>
#include <linux/memblock.h>
+#include <linux/sort.h>
#include <asm/mach-types.h>
#include <asm/sections.h>
@@ -121,9 +122,10 @@ void show_mem(void)
printk("%d pages swap cached\n", cached);
}
-static void __init find_limits(struct meminfo *mi,
- unsigned long *min, unsigned long *max_low, unsigned long *max_high)
+static void __init find_limits(unsigned long *min, unsigned long *max_low,
+ unsigned long *max_high)
{
+ struct meminfo *mi = &meminfo;
int i;
*min = -1UL;
@@ -147,14 +149,13 @@ static void __init find_limits(struct meminfo *mi,
}
}
-static void __init arm_bootmem_init(struct meminfo *mi,
- unsigned long start_pfn, unsigned long end_pfn)
+static void __init arm_bootmem_init(unsigned long start_pfn,
+ unsigned long end_pfn)
{
struct memblock_region *reg;
unsigned int boot_pages;
phys_addr_t bitmap;
pg_data_t *pgdat;
- int i;
/*
* Allocate the bootmem bitmap page. This must be in a region
@@ -172,30 +173,39 @@ static void __init arm_bootmem_init(struct meminfo *mi,
pgdat = NODE_DATA(0);
init_bootmem_node(pgdat, __phys_to_pfn(bitmap), start_pfn, end_pfn);
- for_each_bank(i, mi) {
- struct membank *bank = &mi->bank[i];
- if (!bank->highmem)
- free_bootmem(bank_phys_start(bank), bank_phys_size(bank));
+ /* Free the lowmem regions from memblock into bootmem. */
+ for_each_memblock(memory, reg) {
+ unsigned long start = memblock_region_memory_base_pfn(reg);
+ unsigned long end = memblock_region_memory_end_pfn(reg);
+
+ if (end >= end_pfn)
+ end = end_pfn;
+ if (start >= end)
+ break;
+
+ free_bootmem(__pfn_to_phys(start), (end - start) << PAGE_SHIFT);
}
- /*
- * Reserve the memblock reserved regions in bootmem.
- */
+ /* Reserve the lowmem memblock reserved regions in bootmem. */
for_each_memblock(reserved, reg) {
- phys_addr_t start = memblock_region_reserved_base_pfn(reg);
- phys_addr_t end = memblock_region_reserved_end_pfn(reg);
- if (start >= start_pfn && end <= end_pfn)
- reserve_bootmem_node(pgdat, __pfn_to_phys(start),
- (end - start) << PAGE_SHIFT,
- BOOTMEM_DEFAULT);
+ unsigned long start = memblock_region_reserved_base_pfn(reg);
+ unsigned long end = memblock_region_reserved_end_pfn(reg);
+
+ if (end >= end_pfn)
+ end = end_pfn;
+ if (start >= end)
+ break;
+
+ reserve_bootmem(__pfn_to_phys(start),
+ (end - start) << PAGE_SHIFT, BOOTMEM_DEFAULT);
}
}
-static void __init arm_bootmem_free(struct meminfo *mi, unsigned long min,
- unsigned long max_low, unsigned long max_high)
+static void __init arm_bootmem_free(unsigned long min, unsigned long max_low,
+ unsigned long max_high)
{
unsigned long zone_size[MAX_NR_ZONES], zhole_size[MAX_NR_ZONES];
- int i;
+ struct memblock_region *reg;
/*
* initialise the zones.
@@ -217,13 +227,20 @@ static void __init arm_bootmem_free(struct meminfo *mi, unsigned long min,
* holes = node_size - sum(bank_sizes)
*/
memcpy(zhole_size, zone_size, sizeof(zhole_size));
- for_each_bank(i, mi) {
- int idx = 0;
+ for_each_memblock(memory, reg) {
+ unsigned long start = memblock_region_memory_base_pfn(reg);
+ unsigned long end = memblock_region_memory_end_pfn(reg);
+
+ if (start < max_low) {
+ unsigned long low_end = min(end, max_low);
+ zhole_size[0] -= low_end - start;
+ }
#ifdef CONFIG_HIGHMEM
- if (mi->bank[i].highmem)
- idx = ZONE_HIGHMEM;
+ if (end > max_low) {
+ unsigned long high_start = max(start, max_low);
+ zhole_size[ZONE_HIGHMEM] -= end - high_start;
+ }
#endif
- zhole_size[idx] -= bank_pfn_size(&mi->bank[i]);
}
/*
@@ -256,10 +273,19 @@ static void arm_memory_present(void)
}
#endif
+static int __init meminfo_cmp(const void *_a, const void *_b)
+{
+ const struct membank *a = _a, *b = _b;
+ long cmp = bank_pfn_start(a) - bank_pfn_start(b);
+ return cmp < 0 ? -1 : cmp > 0 ? 1 : 0;
+}
+
void __init arm_memblock_init(struct meminfo *mi, struct machine_desc *mdesc)
{
int i;
+ sort(&meminfo.bank, meminfo.nr_banks, sizeof(meminfo.bank[0]), meminfo_cmp, NULL);
+
memblock_init();
for (i = 0; i < mi->nr_banks; i++)
memblock_add(mi->bank[i].start, mi->bank[i].size);
@@ -292,14 +318,13 @@ void __init arm_memblock_init(struct meminfo *mi, struct machine_desc *mdesc)
void __init bootmem_init(void)
{
- struct meminfo *mi = &meminfo;
unsigned long min, max_low, max_high;
max_low = max_high = 0;
- find_limits(mi, &min, &max_low, &max_high);
+ find_limits(&min, &max_low, &max_high);
- arm_bootmem_init(mi, min, max_low);
+ arm_bootmem_init(min, max_low);
/*
* Sparsemem tries to allocate bootmem in memory_present(),
@@ -317,7 +342,7 @@ void __init bootmem_init(void)
* the sparse mem_map arrays initialized by sparse_init()
* for memmap_init_zone(), otherwise all PFNs are invalid.
*/
- arm_bootmem_free(mi, min, max_low, max_high);
+ arm_bootmem_free(min, max_low, max_high);
high_memory = __va((max_low << PAGE_SHIFT) - 1) + 1;
@@ -411,6 +436,56 @@ static void __init free_unused_memmap(struct meminfo *mi)
}
}
+static void __init free_highpages(void)
+{
+#ifdef CONFIG_HIGHMEM
+ unsigned long max_low = max_low_pfn + PHYS_PFN_OFFSET;
+ struct memblock_region *mem, *res;
+
+ /* set highmem page free */
+ for_each_memblock(memory, mem) {
+ unsigned long start = memblock_region_memory_base_pfn(mem);
+ unsigned long end = memblock_region_memory_end_pfn(mem);
+
+ /* Ignore complete lowmem entries */
+ if (end <= max_low)
+ continue;
+
+ /* Truncate partial highmem entries */
+ if (start < max_low)
+ start = max_low;
+
+ /* Find and exclude any reserved regions */
+ for_each_memblock(reserved, res) {
+ unsigned long res_start, res_end;
+
+ res_start = memblock_region_reserved_base_pfn(res);
+ res_end = memblock_region_reserved_end_pfn(res);
+
+ if (res_end < start)
+ continue;
+ if (res_start < start)
+ res_start = start;
+ if (res_start > end)
+ res_start = end;
+ if (res_end > end)
+ res_end = end;
+ if (res_start != start)
+ totalhigh_pages += free_area(start, res_start,
+ NULL);
+ start = res_end;
+ if (start == end)
+ break;
+ }
+
+ /* And now free anything which remains */
+ if (start < end)
+ totalhigh_pages += free_area(start, end, NULL);
+ }
+ totalram_pages += totalhigh_pages;
+#endif
+}
+
/*
* mem_init() marks the free areas in the mem_map and tells us how much
* memory is free. This is done after various parts of the system have
@@ -419,6 +494,7 @@ static void __init free_unused_memmap(struct meminfo *mi)
void __init mem_init(void)
{
unsigned long reserved_pages, free_pages;
+ struct memblock_region *reg;
int i;
#ifdef CONFIG_HAVE_TCM
/* These pointers are filled in on TCM detection */
@@ -439,16 +515,7 @@ void __init mem_init(void)
__phys_to_pfn(__pa(swapper_pg_dir)), NULL);
#endif
-#ifdef CONFIG_HIGHMEM
- /* set highmem page free */
- for_each_bank (i, &meminfo) {
- unsigned long start = bank_pfn_start(&meminfo.bank[i]);
- unsigned long end = bank_pfn_end(&meminfo.bank[i]);
- if (start >= max_low_pfn + PHYS_PFN_OFFSET)
- totalhigh_pages += free_area(start, end, NULL);
- }
- totalram_pages += totalhigh_pages;
-#endif
+ free_highpages();
reserved_pages = free_pages = 0;
@@ -478,9 +545,11 @@ void __init mem_init(void)
*/
printk(KERN_INFO "Memory:");
num_physpages = 0;
- for (i = 0; i < meminfo.nr_banks; i++) {
- num_physpages += bank_pfn_size(&meminfo.bank[i]);
- printk(" %ldMB", bank_phys_size(&meminfo.bank[i]) >> 20);
+ for_each_memblock(memory, reg) {
+ unsigned long pages = memblock_region_memory_end_pfn(reg) -
+ memblock_region_memory_base_pfn(reg);
+ num_physpages += pages;
+ printk(" %ldMB", pages >> (20 - PAGE_SHIFT));
}
printk(" = %luMB total\n", num_physpages >> (20 - PAGE_SHIFT));
diff --git a/arch/arm/mm/ioremap.c b/arch/arm/mm/ioremap.c
index 17e7b0b57e49..55c17a6fb22f 100644
--- a/arch/arm/mm/ioremap.c
+++ b/arch/arm/mm/ioremap.c
@@ -206,8 +206,8 @@ void __iomem * __arm_ioremap_pfn_caller(unsigned long pfn,
*/
if (pfn_valid(pfn)) {
printk(KERN_WARNING "BUG: Your driver calls ioremap() on system memory. This leads\n"
- KERN_WARNING "to architecturally unpredictable behaviour on ARMv6+, and ioremap()\n"
- KERN_WARNING "will fail in the next kernel release. Please fix your driver.\n");
+ "to architecturally unpredictable behaviour on ARMv6+, and ioremap()\n"
+ "will fail in the next kernel release. Please fix your driver.\n");
WARN_ON(1);
}
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
index c32f731d56d3..72ad3e1f56cf 100644
--- a/arch/arm/mm/mmu.c
+++ b/arch/arm/mm/mmu.c
@@ -14,7 +14,6 @@
#include <linux/mman.h>
#include <linux/nodemask.h>
#include <linux/memblock.h>
-#include <linux/sort.h>
#include <linux/fs.h>
#include <asm/cputype.h>
@@ -265,17 +264,17 @@ static struct mem_type mem_types[] = {
.domain = DOMAIN_KERNEL,
},
[MT_MEMORY_DTCM] = {
- .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG |
- L_PTE_DIRTY | L_PTE_WRITE,
- .prot_l1 = PMD_TYPE_TABLE,
- .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN,
- .domain = DOMAIN_KERNEL,
+ .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
+ L_PTE_WRITE,
+ .prot_l1 = PMD_TYPE_TABLE,
+ .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN,
+ .domain = DOMAIN_KERNEL,
},
[MT_MEMORY_ITCM] = {
.prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
- L_PTE_USER | L_PTE_EXEC,
+ L_PTE_WRITE | L_PTE_EXEC,
.prot_l1 = PMD_TYPE_TABLE,
- .domain = DOMAIN_IO,
+ .domain = DOMAIN_KERNEL,
},
};
@@ -745,13 +744,14 @@ static int __init early_vmalloc(char *arg)
}
early_param("vmalloc", early_vmalloc);
-phys_addr_t lowmem_end_addr;
+static phys_addr_t lowmem_limit __initdata = 0;
static void __init sanity_check_meminfo(void)
{
int i, j, highmem = 0;
- lowmem_end_addr = __pa(vmalloc_min - 1) + 1;
+ lowmem_limit = __pa(vmalloc_min - 1) + 1;
+ memblock_set_current_limit(lowmem_limit);
for (i = 0, j = 0; i < meminfo.nr_banks; i++) {
struct membank *bank = &meminfo.bank[j];
@@ -852,6 +852,7 @@ static void __init sanity_check_meminfo(void)
static inline void prepare_page_table(void)
{
unsigned long addr;
+ phys_addr_t end;
/*
* Clear out all the mappings below the kernel image.
@@ -867,10 +868,17 @@ static inline void prepare_page_table(void)
pmd_clear(pmd_off_k(addr));
/*
+ * Find the end of the first block of lowmem.
+ */
+ end = memblock.memory.regions[0].base + memblock.memory.regions[0].size;
+ if (end >= lowmem_limit)
+ end = lowmem_limit;
+
+ /*
* Clear out all the kernel space mappings, except for the first
* memory bank, up to the end of the vmalloc region.
*/
- for (addr = __phys_to_virt(bank_phys_end(&meminfo.bank[0]));
+ for (addr = __phys_to_virt(end);
addr < VMALLOC_END; addr += PGDIR_SIZE)
pmd_clear(pmd_off_k(addr));
}
@@ -987,37 +995,28 @@ static void __init kmap_init(void)
#endif
}
-static inline void map_memory_bank(struct membank *bank)
-{
- struct map_desc map;
-
- map.pfn = bank_pfn_start(bank);
- map.virtual = __phys_to_virt(bank_phys_start(bank));
- map.length = bank_phys_size(bank);
- map.type = MT_MEMORY;
-
- create_mapping(&map);
-}
-
static void __init map_lowmem(void)
{
- struct meminfo *mi = &meminfo;
- int i;
+ struct memblock_region *reg;
/* Map all the lowmem memory banks. */
- for (i = 0; i < mi->nr_banks; i++) {
- struct membank *bank = &mi->bank[i];
+ for_each_memblock(memory, reg) {
+ phys_addr_t start = reg->base;
+ phys_addr_t end = start + reg->size;
+ struct map_desc map;
+
+ if (end > lowmem_limit)
+ end = lowmem_limit;
+ if (start >= end)
+ break;
- if (!bank->highmem)
- map_memory_bank(bank);
- }
-}
+ map.pfn = __phys_to_pfn(start);
+ map.virtual = __phys_to_virt(start);
+ map.length = end - start;
+ map.type = MT_MEMORY;
-static int __init meminfo_cmp(const void *_a, const void *_b)
-{
- const struct membank *a = _a, *b = _b;
- long cmp = bank_pfn_start(a) - bank_pfn_start(b);
- return cmp < 0 ? -1 : cmp > 0 ? 1 : 0;
+ create_mapping(&map);
+ }
}
/*
@@ -1028,8 +1027,6 @@ void __init paging_init(struct machine_desc *mdesc)
{
void *zero_page;
- sort(&meminfo.bank, meminfo.nr_banks, sizeof(meminfo.bank[0]), meminfo_cmp, NULL);
-
build_mem_type_table();
sanity_check_meminfo();
prepare_page_table();
diff --git a/arch/arm/mm/pgd.c b/arch/arm/mm/pgd.c
index be5f58e153bf..69bbfc6645a6 100644
--- a/arch/arm/mm/pgd.c
+++ b/arch/arm/mm/pgd.c
@@ -57,9 +57,9 @@ pgd_t *get_pgd_slow(struct mm_struct *mm)
goto no_pte;
init_pmd = pmd_offset(init_pgd, 0);
- init_pte = pte_offset_map_nested(init_pmd, 0);
+ init_pte = pte_offset_map(init_pmd, 0);
set_pte_ext(new_pte, *init_pte, 0);
- pte_unmap_nested(init_pte);
+ pte_unmap(init_pte);
pte_unmap(new_pte);
}
diff --git a/arch/arm/mm/proc-arm1020.S b/arch/arm/mm/proc-arm1020.S
index a6f5f8475b96..bcf748d9f4e2 100644
--- a/arch/arm/mm/proc-arm1020.S
+++ b/arch/arm/mm/proc-arm1020.S
@@ -119,6 +119,20 @@ ENTRY(cpu_arm1020_do_idle)
/* ================================= CACHE ================================ */
.align 5
+
+/*
+ * flush_icache_all()
+ *
+ * Unconditionally clean and invalidate the entire icache.
+ */
+ENTRY(arm1020_flush_icache_all)
+#ifndef CONFIG_CPU_ICACHE_DISABLE
+ mov r0, #0
+ mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache
+#endif
+ mov pc, lr
+ENDPROC(arm1020_flush_icache_all)
+
/*
* flush_user_cache_all()
*
@@ -351,6 +365,7 @@ ENTRY(arm1020_dma_unmap_area)
ENDPROC(arm1020_dma_unmap_area)
ENTRY(arm1020_cache_fns)
+ .long arm1020_flush_icache_all
.long arm1020_flush_kern_cache_all
.long arm1020_flush_user_cache_all
.long arm1020_flush_user_cache_range
diff --git a/arch/arm/mm/proc-arm1020e.S b/arch/arm/mm/proc-arm1020e.S
index afc06b9c3133..ab7ec26657ea 100644
--- a/arch/arm/mm/proc-arm1020e.S
+++ b/arch/arm/mm/proc-arm1020e.S
@@ -119,6 +119,20 @@ ENTRY(cpu_arm1020e_do_idle)
/* ================================= CACHE ================================ */
.align 5
+
+/*
+ * flush_icache_all()
+ *
+ * Unconditionally clean and invalidate the entire icache.
+ */
+ENTRY(arm1020e_flush_icache_all)
+#ifndef CONFIG_CPU_ICACHE_DISABLE
+ mov r0, #0
+ mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache
+#endif
+ mov pc, lr
+ENDPROC(arm1020e_flush_icache_all)
+
/*
* flush_user_cache_all()
*
@@ -337,6 +351,7 @@ ENTRY(arm1020e_dma_unmap_area)
ENDPROC(arm1020e_dma_unmap_area)
ENTRY(arm1020e_cache_fns)
+ .long arm1020e_flush_icache_all
.long arm1020e_flush_kern_cache_all
.long arm1020e_flush_user_cache_all
.long arm1020e_flush_user_cache_range
diff --git a/arch/arm/mm/proc-arm1022.S b/arch/arm/mm/proc-arm1022.S
index 8915e0ba3fe5..831c5e54e22f 100644
--- a/arch/arm/mm/proc-arm1022.S
+++ b/arch/arm/mm/proc-arm1022.S
@@ -108,6 +108,20 @@ ENTRY(cpu_arm1022_do_idle)
/* ================================= CACHE ================================ */
.align 5
+
+/*
+ * flush_icache_all()
+ *
+ * Unconditionally clean and invalidate the entire icache.
+ */
+ENTRY(arm1022_flush_icache_all)
+#ifndef CONFIG_CPU_ICACHE_DISABLE
+ mov r0, #0
+ mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache
+#endif
+ mov pc, lr
+ENDPROC(arm1022_flush_icache_all)
+
/*
* flush_user_cache_all()
*
@@ -326,6 +340,7 @@ ENTRY(arm1022_dma_unmap_area)
ENDPROC(arm1022_dma_unmap_area)
ENTRY(arm1022_cache_fns)
+ .long arm1022_flush_icache_all
.long arm1022_flush_kern_cache_all
.long arm1022_flush_user_cache_all
.long arm1022_flush_user_cache_range
diff --git a/arch/arm/mm/proc-arm1026.S b/arch/arm/mm/proc-arm1026.S
index ff446c5d476f..e3f7e9a166bf 100644
--- a/arch/arm/mm/proc-arm1026.S
+++ b/arch/arm/mm/proc-arm1026.S
@@ -108,6 +108,20 @@ ENTRY(cpu_arm1026_do_idle)
/* ================================= CACHE ================================ */
.align 5
+
+/*
+ * flush_icache_all()
+ *
+ * Unconditionally clean and invalidate the entire icache.
+ */
+ENTRY(arm1026_flush_icache_all)
+#ifndef CONFIG_CPU_ICACHE_DISABLE
+ mov r0, #0
+ mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache
+#endif
+ mov pc, lr
+ENDPROC(arm1026_flush_icache_all)
+
/*
* flush_user_cache_all()
*
@@ -320,6 +334,7 @@ ENTRY(arm1026_dma_unmap_area)
ENDPROC(arm1026_dma_unmap_area)
ENTRY(arm1026_cache_fns)
+ .long arm1026_flush_icache_all
.long arm1026_flush_kern_cache_all
.long arm1026_flush_user_cache_all
.long arm1026_flush_user_cache_range
diff --git a/arch/arm/mm/proc-arm920.S b/arch/arm/mm/proc-arm920.S
index fecf570939f3..6109f278a904 100644
--- a/arch/arm/mm/proc-arm920.S
+++ b/arch/arm/mm/proc-arm920.S
@@ -110,6 +110,17 @@ ENTRY(cpu_arm920_do_idle)
#ifndef CONFIG_CPU_DCACHE_WRITETHROUGH
/*
+ * flush_icache_all()
+ *
+ * Unconditionally clean and invalidate the entire icache.
+ */
+ENTRY(arm920_flush_icache_all)
+ mov r0, #0
+ mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache
+ mov pc, lr
+ENDPROC(arm920_flush_icache_all)
+
+/*
* flush_user_cache_all()
*
* Invalidate all cache entries in a particular address
@@ -305,6 +316,7 @@ ENTRY(arm920_dma_unmap_area)
ENDPROC(arm920_dma_unmap_area)
ENTRY(arm920_cache_fns)
+ .long arm920_flush_icache_all
.long arm920_flush_kern_cache_all
.long arm920_flush_user_cache_all
.long arm920_flush_user_cache_range
diff --git a/arch/arm/mm/proc-arm922.S b/arch/arm/mm/proc-arm922.S
index e3cbf87c9480..bb2f0f46a5e6 100644
--- a/arch/arm/mm/proc-arm922.S
+++ b/arch/arm/mm/proc-arm922.S
@@ -112,6 +112,17 @@ ENTRY(cpu_arm922_do_idle)
#ifndef CONFIG_CPU_DCACHE_WRITETHROUGH
/*
+ * flush_icache_all()
+ *
+ * Unconditionally clean and invalidate the entire icache.
+ */
+ENTRY(arm922_flush_icache_all)
+ mov r0, #0
+ mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache
+ mov pc, lr
+ENDPROC(arm922_flush_icache_all)
+
+/*
* flush_user_cache_all()
*
* Clean and invalidate all cache entries in a particular
@@ -307,6 +318,7 @@ ENTRY(arm922_dma_unmap_area)
ENDPROC(arm922_dma_unmap_area)
ENTRY(arm922_cache_fns)
+ .long arm922_flush_icache_all
.long arm922_flush_kern_cache_all
.long arm922_flush_user_cache_all
.long arm922_flush_user_cache_range
diff --git a/arch/arm/mm/proc-arm925.S b/arch/arm/mm/proc-arm925.S
index 572424c867b5..c13e01accfe2 100644
--- a/arch/arm/mm/proc-arm925.S
+++ b/arch/arm/mm/proc-arm925.S
@@ -145,6 +145,17 @@ ENTRY(cpu_arm925_do_idle)
mov pc, lr
/*
+ * flush_icache_all()
+ *
+ * Unconditionally clean and invalidate the entire icache.
+ */
+ENTRY(arm925_flush_icache_all)
+ mov r0, #0
+ mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache
+ mov pc, lr
+ENDPROC(arm925_flush_icache_all)
+
+/*
* flush_user_cache_all()
*
* Clean and invalidate all cache entries in a particular
@@ -362,6 +373,7 @@ ENTRY(arm925_dma_unmap_area)
ENDPROC(arm925_dma_unmap_area)
ENTRY(arm925_cache_fns)
+ .long arm925_flush_icache_all
.long arm925_flush_kern_cache_all
.long arm925_flush_user_cache_all
.long arm925_flush_user_cache_range
diff --git a/arch/arm/mm/proc-arm926.S b/arch/arm/mm/proc-arm926.S
index 63d168b4ebe6..42eb4315740b 100644
--- a/arch/arm/mm/proc-arm926.S
+++ b/arch/arm/mm/proc-arm926.S
@@ -111,6 +111,17 @@ ENTRY(cpu_arm926_do_idle)
mov pc, lr
/*
+ * flush_icache_all()
+ *
+ * Unconditionally clean and invalidate the entire icache.
+ */
+ENTRY(arm926_flush_icache_all)
+ mov r0, #0
+ mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache
+ mov pc, lr
+ENDPROC(arm926_flush_icache_all)
+
+/*
* flush_user_cache_all()
*
* Clean and invalidate all cache entries in a particular
@@ -325,6 +336,7 @@ ENTRY(arm926_dma_unmap_area)
ENDPROC(arm926_dma_unmap_area)
ENTRY(arm926_cache_fns)
+ .long arm926_flush_icache_all
.long arm926_flush_kern_cache_all
.long arm926_flush_user_cache_all
.long arm926_flush_user_cache_range
diff --git a/arch/arm/mm/proc-arm940.S b/arch/arm/mm/proc-arm940.S
index f6a62822418e..7b11cdb9935f 100644
--- a/arch/arm/mm/proc-arm940.S
+++ b/arch/arm/mm/proc-arm940.S
@@ -68,6 +68,17 @@ ENTRY(cpu_arm940_do_idle)
mov pc, lr
/*
+ * flush_icache_all()
+ *
+ * Unconditionally clean and invalidate the entire icache.
+ */
+ENTRY(arm940_flush_icache_all)
+ mov r0, #0
+ mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache
+ mov pc, lr
+ENDPROC(arm940_flush_icache_all)
+
+/*
* flush_user_cache_all()
*/
ENTRY(arm940_flush_user_cache_all)
@@ -254,6 +265,7 @@ ENTRY(arm940_dma_unmap_area)
ENDPROC(arm940_dma_unmap_area)
ENTRY(arm940_cache_fns)
+ .long arm940_flush_icache_all
.long arm940_flush_kern_cache_all
.long arm940_flush_user_cache_all
.long arm940_flush_user_cache_range
diff --git a/arch/arm/mm/proc-arm946.S b/arch/arm/mm/proc-arm946.S
index ea2e7f2eb95b..1a5bbf080342 100644
--- a/arch/arm/mm/proc-arm946.S
+++ b/arch/arm/mm/proc-arm946.S
@@ -75,6 +75,17 @@ ENTRY(cpu_arm946_do_idle)
mov pc, lr
/*
+ * flush_icache_all()
+ *
+ * Unconditionally clean and invalidate the entire icache.
+ */
+ENTRY(arm946_flush_icache_all)
+ mov r0, #0
+ mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache
+ mov pc, lr
+ENDPROC(arm946_flush_icache_all)
+
+/*
* flush_user_cache_all()
*/
ENTRY(arm946_flush_user_cache_all)
@@ -296,6 +307,7 @@ ENTRY(arm946_dma_unmap_area)
ENDPROC(arm946_dma_unmap_area)
ENTRY(arm946_cache_fns)
+ .long arm946_flush_icache_all
.long arm946_flush_kern_cache_all
.long arm946_flush_user_cache_all
.long arm946_flush_user_cache_range
diff --git a/arch/arm/mm/proc-feroceon.S b/arch/arm/mm/proc-feroceon.S
index 578da69200cf..b4597edbff97 100644
--- a/arch/arm/mm/proc-feroceon.S
+++ b/arch/arm/mm/proc-feroceon.S
@@ -124,6 +124,17 @@ ENTRY(cpu_feroceon_do_idle)
mov pc, lr
/*
+ * flush_icache_all()
+ *
+ * Unconditionally clean and invalidate the entire icache.
+ */
+ENTRY(feroceon_flush_icache_all)
+ mov r0, #0
+ mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache
+ mov pc, lr
+ENDPROC(feroceon_flush_icache_all)
+
+/*
* flush_user_cache_all()
*
* Clean and invalidate all cache entries in a particular
@@ -401,6 +412,7 @@ ENTRY(feroceon_dma_unmap_area)
ENDPROC(feroceon_dma_unmap_area)
ENTRY(feroceon_cache_fns)
+ .long feroceon_flush_icache_all
.long feroceon_flush_kern_cache_all
.long feroceon_flush_user_cache_all
.long feroceon_flush_user_cache_range
@@ -412,6 +424,7 @@ ENTRY(feroceon_cache_fns)
.long feroceon_dma_flush_range
ENTRY(feroceon_range_cache_fns)
+ .long feroceon_flush_icache_all
.long feroceon_flush_kern_cache_all
.long feroceon_flush_user_cache_all
.long feroceon_flush_user_cache_range
diff --git a/arch/arm/mm/proc-v7.S b/arch/arm/mm/proc-v7.S
index 53cbe2225153..9b9ff5d949fd 100644
--- a/arch/arm/mm/proc-v7.S
+++ b/arch/arm/mm/proc-v7.S
@@ -381,7 +381,7 @@ __v7_ca9mp_proc_info:
PMD_SECT_XN | \
PMD_SECT_AP_WRITE | \
PMD_SECT_AP_READ
- b __v7_ca9mp_setup
+ W(b) __v7_ca9mp_setup
.long cpu_arch_name
.long cpu_elf_name
.long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP|HWCAP_TLS
@@ -413,7 +413,7 @@ __v7_proc_info:
PMD_SECT_XN | \
PMD_SECT_AP_WRITE | \
PMD_SECT_AP_READ
- b __v7_setup
+ W(b) __v7_setup
.long cpu_arch_name
.long cpu_elf_name
.long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP|HWCAP_TLS
diff --git a/arch/arm/mm/proc-xsc3.S b/arch/arm/mm/proc-xsc3.S
index cad07e403044..ec26355cb7c2 100644
--- a/arch/arm/mm/proc-xsc3.S
+++ b/arch/arm/mm/proc-xsc3.S
@@ -141,6 +141,17 @@ ENTRY(cpu_xsc3_do_idle)
/* ================================= CACHE ================================ */
/*
+ * flush_icache_all()
+ *
+ * Unconditionally clean and invalidate the entire icache.
+ */
+ENTRY(xsc3_flush_icache_all)
+ mov r0, #0
+ mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache
+ mov pc, lr
+ENDPROC(xsc3_flush_icache_all)
+
+/*
* flush_user_cache_all()
*
* Invalidate all cache entries in a particular address
@@ -325,6 +336,7 @@ ENTRY(xsc3_dma_unmap_area)
ENDPROC(xsc3_dma_unmap_area)
ENTRY(xsc3_cache_fns)
+ .long xsc3_flush_icache_all
.long xsc3_flush_kern_cache_all
.long xsc3_flush_user_cache_all
.long xsc3_flush_user_cache_range
diff --git a/arch/arm/mm/proc-xscale.S b/arch/arm/mm/proc-xscale.S
index cb245edb2c2b..523408c0bb38 100644
--- a/arch/arm/mm/proc-xscale.S
+++ b/arch/arm/mm/proc-xscale.S
@@ -181,6 +181,17 @@ ENTRY(cpu_xscale_do_idle)
/* ================================= CACHE ================================ */
/*
+ * flush_icache_all()
+ *
+ * Unconditionally clean and invalidate the entire icache.
+ */
+ENTRY(xscale_flush_icache_all)
+ mov r0, #0
+ mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache
+ mov pc, lr
+ENDPROC(xscale_flush_icache_all)
+
+/*
* flush_user_cache_all()
*
* Invalidate all cache entries in a particular address
@@ -397,6 +408,7 @@ ENTRY(xscale_dma_unmap_area)
ENDPROC(xscale_dma_unmap_area)
ENTRY(xscale_cache_fns)
+ .long xscale_flush_icache_all
.long xscale_flush_kern_cache_all
.long xscale_flush_user_cache_all
.long xscale_flush_user_cache_range
diff --git a/arch/arm/plat-iop/time.c b/arch/arm/plat-iop/time.c
index 85d3e55ca4a9..558cdfaf76b6 100644
--- a/arch/arm/plat-iop/time.c
+++ b/arch/arm/plat-iop/time.c
@@ -18,6 +18,7 @@
#include <linux/time.h>
#include <linux/init.h>
#include <linux/timex.h>
+#include <linux/sched.h>
#include <linux/io.h>
#include <linux/clocksource.h>
#include <linux/clockchips.h>
@@ -36,7 +37,7 @@
/*
* IOP clocksource (free-running timer 1).
*/
-static cycle_t iop_clocksource_read(struct clocksource *unused)
+static cycle_t notrace iop_clocksource_read(struct clocksource *unused)
{
return 0xffffffffu - read_tcr1();
}
diff --git a/arch/arm/plat-mxc/Makefile b/arch/arm/plat-mxc/Makefile
index 06875b4dd70f..372670952789 100644
--- a/arch/arm/plat-mxc/Makefile
+++ b/arch/arm/plat-mxc/Makefile
@@ -18,6 +18,7 @@ obj-$(CONFIG_MXC_USE_EPIT) += epit.o
obj-$(CONFIG_ARCH_MXC_AUDMUX_V1) += audmux-v1.o
obj-$(CONFIG_ARCH_MXC_AUDMUX_V2) += audmux-v2.o
obj-$(CONFIG_MXC_DEBUG_BOARD) += 3ds_debugboard.o
+obj-$(CONFIG_CPU_FREQ_IMX) += cpufreq.o
ifdef CONFIG_SND_IMX_SOC
obj-y += ssi-fiq.o
obj-y += ssi-fiq-ksym.o
diff --git a/arch/arm/plat-mxc/cpufreq.c b/arch/arm/plat-mxc/cpufreq.c
new file mode 100644
index 000000000000..039538e68793
--- /dev/null
+++ b/arch/arm/plat-mxc/cpufreq.c
@@ -0,0 +1,206 @@
+/*
+ * Copyright (C) 2010 Freescale Semiconductor, Inc. All Rights Reserved.
+ */
+
+/*
+ * The code contained herein is licensed under the GNU General Public
+ * License. You may obtain a copy of the GNU General Public License
+ * Version 2 or later at the following locations:
+ *
+ * http://www.opensource.org/licenses/gpl-license.html
+ * http://www.gnu.org/copyleft/gpl.html
+ */
+
+/*
+ * A driver for the Freescale Semiconductor i.MXC CPUfreq module.
+ * The CPUFREQ driver is for controling CPU frequency. It allows you to change
+ * the CPU clock speed on the fly.
+ */
+
+#include <linux/cpufreq.h>
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/slab.h>
+#include <mach/hardware.h>
+#include <mach/clock.h>
+
+#define CLK32_FREQ 32768
+#define NANOSECOND (1000 * 1000 * 1000)
+
+struct cpu_op *(*get_cpu_op)(int *op);
+
+static int cpu_freq_khz_min;
+static int cpu_freq_khz_max;
+
+static struct clk *cpu_clk;
+static struct cpufreq_frequency_table *imx_freq_table;
+
+static int cpu_op_nr;
+static struct cpu_op *cpu_op_tbl;
+
+static int set_cpu_freq(int freq)
+{
+ int ret = 0;
+ int org_cpu_rate;
+
+ org_cpu_rate = clk_get_rate(cpu_clk);
+ if (org_cpu_rate == freq)
+ return ret;
+
+ ret = clk_set_rate(cpu_clk, freq);
+ if (ret != 0) {
+ printk(KERN_DEBUG "cannot set CPU clock rate\n");
+ return ret;
+ }
+
+ return ret;
+}
+
+static int mxc_verify_speed(struct cpufreq_policy *policy)
+{
+ if (policy->cpu != 0)
+ return -EINVAL;
+
+ return cpufreq_frequency_table_verify(policy, imx_freq_table);
+}
+
+static unsigned int mxc_get_speed(unsigned int cpu)
+{
+ if (cpu)
+ return 0;
+
+ return clk_get_rate(cpu_clk) / 1000;
+}
+
+static int mxc_set_target(struct cpufreq_policy *policy,
+ unsigned int target_freq, unsigned int relation)
+{
+ struct cpufreq_freqs freqs;
+ int freq_Hz;
+ int ret = 0;
+ unsigned int index;
+
+ cpufreq_frequency_table_target(policy, imx_freq_table,
+ target_freq, relation, &index);
+ freq_Hz = imx_freq_table[index].frequency * 1000;
+
+ freqs.old = clk_get_rate(cpu_clk) / 1000;
+ freqs.new = freq_Hz / 1000;
+ freqs.cpu = 0;
+ freqs.flags = 0;
+ cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
+
+ ret = set_cpu_freq(freq_Hz);
+
+ cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
+
+ return ret;
+}
+
+static int __init mxc_cpufreq_init(struct cpufreq_policy *policy)
+{
+ int ret;
+ int i;
+
+ printk(KERN_INFO "i.MXC CPU frequency driver\n");
+
+ if (policy->cpu != 0)
+ return -EINVAL;
+
+ if (!get_cpu_op)
+ return -EINVAL;
+
+ cpu_clk = clk_get(NULL, "cpu_clk");
+ if (IS_ERR(cpu_clk)) {
+ printk(KERN_ERR "%s: failed to get cpu clock\n", __func__);
+ return PTR_ERR(cpu_clk);
+ }
+
+ cpu_op_tbl = get_cpu_op(&cpu_op_nr);
+
+ cpu_freq_khz_min = cpu_op_tbl[0].cpu_rate / 1000;
+ cpu_freq_khz_max = cpu_op_tbl[0].cpu_rate / 1000;
+
+ imx_freq_table = kmalloc(
+ sizeof(struct cpufreq_frequency_table) * (cpu_op_nr + 1),
+ GFP_KERNEL);
+ if (!imx_freq_table) {
+ ret = -ENOMEM;
+ goto err1;
+ }
+
+ for (i = 0; i < cpu_op_nr; i++) {
+ imx_freq_table[i].index = i;
+ imx_freq_table[i].frequency = cpu_op_tbl[i].cpu_rate / 1000;
+
+ if ((cpu_op_tbl[i].cpu_rate / 1000) < cpu_freq_khz_min)
+ cpu_freq_khz_min = cpu_op_tbl[i].cpu_rate / 1000;
+
+ if ((cpu_op_tbl[i].cpu_rate / 1000) > cpu_freq_khz_max)
+ cpu_freq_khz_max = cpu_op_tbl[i].cpu_rate / 1000;
+ }
+
+ imx_freq_table[i].index = i;
+ imx_freq_table[i].frequency = CPUFREQ_TABLE_END;
+
+ policy->cur = clk_get_rate(cpu_clk) / 1000;
+ policy->governor = CPUFREQ_DEFAULT_GOVERNOR;
+ policy->min = policy->cpuinfo.min_freq = cpu_freq_khz_min;
+ policy->max = policy->cpuinfo.max_freq = cpu_freq_khz_max;
+
+ /* Manual states, that PLL stabilizes in two CLK32 periods */
+ policy->cpuinfo.transition_latency = 2 * NANOSECOND / CLK32_FREQ;
+
+ ret = cpufreq_frequency_table_cpuinfo(policy, imx_freq_table);
+
+ if (ret < 0) {
+ printk(KERN_ERR "%s: failed to register i.MXC CPUfreq \
+ with error code %d\n", __func__, ret);
+ goto err;
+ }
+
+ cpufreq_frequency_table_get_attr(imx_freq_table, policy->cpu);
+ return 0;
+err:
+ kfree(imx_freq_table);
+err1:
+ clk_put(cpu_clk);
+ return ret;
+}
+
+static int mxc_cpufreq_exit(struct cpufreq_policy *policy)
+{
+ cpufreq_frequency_table_put_attr(policy->cpu);
+
+ set_cpu_freq(cpu_freq_khz_max * 1000);
+ clk_put(cpu_clk);
+ kfree(imx_freq_table);
+ return 0;
+}
+
+static struct cpufreq_driver mxc_driver = {
+ .flags = CPUFREQ_STICKY,
+ .verify = mxc_verify_speed,
+ .target = mxc_set_target,
+ .get = mxc_get_speed,
+ .init = mxc_cpufreq_init,
+ .exit = mxc_cpufreq_exit,
+ .name = "imx",
+};
+
+static int __devinit mxc_cpufreq_driver_init(void)
+{
+ return cpufreq_register_driver(&mxc_driver);
+}
+
+static void mxc_cpufreq_driver_exit(void)
+{
+ cpufreq_unregister_driver(&mxc_driver);
+}
+
+module_init(mxc_cpufreq_driver_init);
+module_exit(mxc_cpufreq_driver_exit);
+
+MODULE_AUTHOR("Freescale Semiconductor Inc. Yong Shen <yong.shen@linaro.org>");
+MODULE_DESCRIPTION("CPUfreq driver for i.MX");
+MODULE_LICENSE("GPL");
diff --git a/arch/arm/plat-mxc/devices/Kconfig b/arch/arm/plat-mxc/devices/Kconfig
index 404799487f17..9aa6f3ea9012 100644
--- a/arch/arm/plat-mxc/devices/Kconfig
+++ b/arch/arm/plat-mxc/devices/Kconfig
@@ -6,9 +6,13 @@ config IMX_HAVE_PLATFORM_FEC
default y if ARCH_MX25 || SOC_IMX27 || ARCH_MX35 || ARCH_MX51
config IMX_HAVE_PLATFORM_FLEXCAN
- select HAVE_CAN_FLEXCAN
+ select HAVE_CAN_FLEXCAN if CAN
bool
+config IMX_HAVE_PLATFORM_GPIO_KEYS
+ bool
+ default y if ARCH_MX51
+
config IMX_HAVE_PLATFORM_IMX_I2C
bool
diff --git a/arch/arm/plat-mxc/devices/Makefile b/arch/arm/plat-mxc/devices/Makefile
index 0a3c1f089413..45aefeb283ba 100644
--- a/arch/arm/plat-mxc/devices/Makefile
+++ b/arch/arm/plat-mxc/devices/Makefile
@@ -1,6 +1,7 @@
obj-$(CONFIG_IMX_HAVE_PLATFORM_ESDHC) += platform-esdhc.o
obj-$(CONFIG_IMX_HAVE_PLATFORM_FEC) += platform-fec.o
obj-$(CONFIG_IMX_HAVE_PLATFORM_FLEXCAN) += platform-flexcan.o
+obj-$(CONFIG_IMX_HAVE_PLATFORM_GPIO_KEYS) += platform-gpio_keys.o
obj-y += platform-imx-dma.o
obj-$(CONFIG_IMX_HAVE_PLATFORM_IMX_I2C) += platform-imx-i2c.o
obj-$(CONFIG_IMX_HAVE_PLATFORM_IMX_SSI) += platform-imx-ssi.o
diff --git a/arch/arm/plat-mxc/devices/platform-gpio_keys.c b/arch/arm/plat-mxc/devices/platform-gpio_keys.c
new file mode 100644
index 000000000000..1c53a532ea0e
--- /dev/null
+++ b/arch/arm/plat-mxc/devices/platform-gpio_keys.c
@@ -0,0 +1,27 @@
+/*
+ * Copyright (C) 2010 Freescale Semiconductor, Inc. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ */
+#include <asm/sizes.h>
+#include <mach/hardware.h>
+#include <mach/devices-common.h>
+
+struct platform_device *__init imx_add_gpio_keys(
+ const struct gpio_keys_platform_data *pdata)
+{
+ return imx_add_platform_device("gpio-keys", -1, NULL,
+ 0, pdata, sizeof(*pdata));
+}
diff --git a/arch/arm/plat-mxc/devices/platform-imx-dma.c b/arch/arm/plat-mxc/devices/platform-imx-dma.c
index 02d989018059..3a705c7877dd 100644
--- a/arch/arm/plat-mxc/devices/platform-imx-dma.c
+++ b/arch/arm/plat-mxc/devices/platform-imx-dma.c
@@ -12,15 +12,7 @@
#include <mach/hardware.h>
#include <mach/devices-common.h>
-#ifdef SDMA_IS_MERGED
#include <mach/sdma.h>
-#else
-struct sdma_platform_data {
- int sdma_version;
- char *cpu_name;
- int to_version;
-};
-#endif
struct imx_imx_sdma_data {
resource_size_t iobase;
diff --git a/arch/arm/plat-mxc/devices/platform-spi_imx.c b/arch/arm/plat-mxc/devices/platform-spi_imx.c
index e48340ec331e..17f724c9452d 100644
--- a/arch/arm/plat-mxc/devices/platform-spi_imx.c
+++ b/arch/arm/plat-mxc/devices/platform-spi_imx.c
@@ -27,6 +27,7 @@ const struct imx_spi_imx_data imx21_cspi_data[] __initconst = {
imx_spi_imx_data_entry(MX21, CSPI, "imx21-cspi", _id, _hwid, SZ_4K)
imx21_cspi_data_entry(0, 1),
imx21_cspi_data_entry(1, 2),
+};
#endif
#ifdef CONFIG_ARCH_MX25
diff --git a/arch/arm/plat-mxc/gpio.c b/arch/arm/plat-mxc/gpio.c
index 9d38da077edb..9c3e36232b5b 100644
--- a/arch/arm/plat-mxc/gpio.c
+++ b/arch/arm/plat-mxc/gpio.c
@@ -20,6 +20,7 @@
*/
#include <linux/init.h>
+#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/irq.h>
#include <linux/gpio.h>
@@ -201,11 +202,42 @@ static void mx2_gpio_irq_handler(u32 irq, struct irq_desc *desc)
}
}
+/*
+ * Set interrupt number "irq" in the GPIO as a wake-up source.
+ * While system is running, all registered GPIO interrupts need to have
+ * wake-up enabled. When system is suspended, only selected GPIO interrupts
+ * need to have wake-up enabled.
+ * @param irq interrupt source number
+ * @param enable enable as wake-up if equal to non-zero
+ * @return This function returns 0 on success.
+ */
+static int gpio_set_wake_irq(u32 irq, u32 enable)
+{
+ u32 gpio = irq_to_gpio(irq);
+ u32 gpio_idx = gpio & 0x1F;
+ struct mxc_gpio_port *port = &mxc_gpio_ports[gpio / 32];
+
+ if (enable) {
+ if (port->irq_high && (gpio_idx >= 16))
+ enable_irq_wake(port->irq_high);
+ else
+ enable_irq_wake(port->irq);
+ } else {
+ if (port->irq_high && (gpio_idx >= 16))
+ disable_irq_wake(port->irq_high);
+ else
+ disable_irq_wake(port->irq);
+ }
+
+ return 0;
+}
+
static struct irq_chip gpio_irq_chip = {
.ack = gpio_ack_irq,
.mask = gpio_mask_irq,
.unmask = gpio_unmask_irq,
.set_type = gpio_set_irq_type,
+ .set_wake = gpio_set_wake_irq,
};
static void _set_gpio_direction(struct gpio_chip *chip, unsigned offset,
diff --git a/arch/arm/plat-mxc/include/mach/devices-common.h b/arch/arm/plat-mxc/include/mach/devices-common.h
index 86d7575a564d..8c6896fd1e5f 100644
--- a/arch/arm/plat-mxc/include/mach/devices-common.h
+++ b/arch/arm/plat-mxc/include/mach/devices-common.h
@@ -29,6 +29,10 @@ struct platform_device *__init imx_add_flexcan(int id,
resource_size_t irq,
const struct flexcan_platform_data *pdata);
+#include <linux/gpio_keys.h>
+struct platform_device *__init imx_add_gpio_keys(
+ const struct gpio_keys_platform_data *pdata);
+
#include <mach/i2c.h>
struct imx_imx_i2c_data {
int id;
diff --git a/arch/arm/plat-mxc/include/mach/dma.h b/arch/arm/plat-mxc/include/mach/dma.h
new file mode 100644
index 000000000000..ef7751546f5f
--- /dev/null
+++ b/arch/arm/plat-mxc/include/mach/dma.h
@@ -0,0 +1,67 @@
+/*
+ * Copyright 2004-2009 Freescale Semiconductor, Inc. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __ASM_ARCH_MXC_DMA_H__
+#define __ASM_ARCH_MXC_DMA_H__
+
+#include <linux/scatterlist.h>
+#include <linux/device.h>
+#include <linux/dmaengine.h>
+
+/*
+ * This enumerates peripheral types. Used for SDMA.
+ */
+enum sdma_peripheral_type {
+ IMX_DMATYPE_SSI, /* MCU domain SSI */
+ IMX_DMATYPE_SSI_SP, /* Shared SSI */
+ IMX_DMATYPE_MMC, /* MMC */
+ IMX_DMATYPE_SDHC, /* SDHC */
+ IMX_DMATYPE_UART, /* MCU domain UART */
+ IMX_DMATYPE_UART_SP, /* Shared UART */
+ IMX_DMATYPE_FIRI, /* FIRI */
+ IMX_DMATYPE_CSPI, /* MCU domain CSPI */
+ IMX_DMATYPE_CSPI_SP, /* Shared CSPI */
+ IMX_DMATYPE_SIM, /* SIM */
+ IMX_DMATYPE_ATA, /* ATA */
+ IMX_DMATYPE_CCM, /* CCM */
+ IMX_DMATYPE_EXT, /* External peripheral */
+ IMX_DMATYPE_MSHC, /* Memory Stick Host Controller */
+ IMX_DMATYPE_MSHC_SP, /* Shared Memory Stick Host Controller */
+ IMX_DMATYPE_DSP, /* DSP */
+ IMX_DMATYPE_MEMORY, /* Memory */
+ IMX_DMATYPE_FIFO_MEMORY,/* FIFO type Memory */
+ IMX_DMATYPE_SPDIF, /* SPDIF */
+ IMX_DMATYPE_IPU_MEMORY, /* IPU Memory */
+ IMX_DMATYPE_ASRC, /* ASRC */
+ IMX_DMATYPE_ESAI, /* ESAI */
+};
+
+enum imx_dma_prio {
+ DMA_PRIO_HIGH = 0,
+ DMA_PRIO_MEDIUM = 1,
+ DMA_PRIO_LOW = 2
+};
+
+struct imx_dma_data {
+ int dma_request; /* DMA request line */
+ enum sdma_peripheral_type peripheral_type;
+ int priority;
+};
+
+static inline int imx_dma_is_ipu(struct dma_chan *chan)
+{
+ return !strcmp(dev_name(chan->device->dev), "ipu-core");
+}
+
+static inline int imx_dma_is_general_purpose(struct dma_chan *chan)
+{
+ return !strcmp(dev_name(chan->device->dev), "imx-sdma") ||
+ !strcmp(dev_name(chan->device->dev), "imx-dma");
+}
+
+#endif
diff --git a/arch/arm/plat-mxc/include/mach/iomux-mx51.h b/arch/arm/plat-mxc/include/mach/iomux-mx51.h
index e46b1c2836d4..d7a41e9a2605 100644
--- a/arch/arm/plat-mxc/include/mach/iomux-mx51.h
+++ b/arch/arm/plat-mxc/include/mach/iomux-mx51.h
@@ -45,6 +45,8 @@ typedef enum iomux_config {
PAD_CTL_PKE | PAD_CTL_HYS)
#define MX51_GPIO_PAD_CTRL (PAD_CTL_DSE_HIGH | PAD_CTL_PKE | \
PAD_CTL_SRE_FAST)
+#define MX51_GPIO_PAD_CTRL_2 (PAD_CTL_SRE_FAST | PAD_CTL_DSE_HIGH | \
+ PAD_CTL_PUS_100K_UP)
#define MX51_ECSPI_PAD_CTRL (PAD_CTL_HYS | PAD_CTL_PKE | PAD_CTL_DSE_HIGH | \
PAD_CTL_SRE_FAST)
#define MX51_SDHCI_PAD_CTRL (PAD_CTL_DSE_HIGH | PAD_CTL_PUS_47K_UP | \
diff --git a/arch/arm/plat-mxc/include/mach/mx31.h b/arch/arm/plat-mxc/include/mach/mx31.h
index 03e2afabc9fc..61cfe827498b 100644
--- a/arch/arm/plat-mxc/include/mach/mx31.h
+++ b/arch/arm/plat-mxc/include/mach/mx31.h
@@ -240,7 +240,6 @@ static inline void mx31_setup_weimcs(size_t cs,
#define MPEG4_ENC_BASE_ADDR MX31_MPEG4_ENC_BASE_ADDR
#define MXC_INT_MPEG4_ENCODER MX31_INT_MPEG4_ENCODER
#define MXC_INT_FIRI MX31_INT_FIRI
-#define MXC_INT_MMC_SDHC1 MX31_INT_MMC_SDHC1
#define MXC_INT_MBX MX31_INT_MBX
#define MXC_INT_CSPI3 MX31_INT_CSPI3
#define MXC_INT_SIM2 MX31_INT_SIM2
diff --git a/arch/arm/plat-mxc/include/mach/mx35.h b/arch/arm/plat-mxc/include/mach/mx35.h
index ff905cb32458..6267cff6035d 100644
--- a/arch/arm/plat-mxc/include/mach/mx35.h
+++ b/arch/arm/plat-mxc/include/mach/mx35.h
@@ -197,8 +197,6 @@
/* these should go away */
#define MXC_FEC_BASE_ADDR MX35_FEC_BASE_ADDR
#define MXC_INT_OWIRE MX35_INT_OWIRE
-#define MXC_INT_MMC_SDHC2 MX35_INT_MMC_SDHC2
-#define MXC_INT_MMC_SDHC3 MX35_INT_MMC_SDHC3
#define MXC_INT_GPU2D MX35_INT_GPU2D
#define MXC_INT_ASRC MX35_INT_ASRC
#define MXC_INT_USBHS MX35_INT_USBHS
diff --git a/arch/arm/plat-mxc/include/mach/mxc.h b/arch/arm/plat-mxc/include/mach/mxc.h
index a790bf212972..a42c7207082d 100644
--- a/arch/arm/plat-mxc/include/mach/mxc.h
+++ b/arch/arm/plat-mxc/include/mach/mxc.h
@@ -1,5 +1,5 @@
/*
- * Copyright 2004-2007 Freescale Semiconductor, Inc. All Rights Reserved.
+ * Copyright 2004-2007, 2010 Freescale Semiconductor, Inc. All Rights Reserved.
* Copyright (C) 2008 Juergen Beisert (kernel@pengutronix.de)
*
* This program is free software; you can redistribute it and/or
@@ -20,6 +20,8 @@
#ifndef __ASM_ARCH_MXC_H__
#define __ASM_ARCH_MXC_H__
+#include <linux/types.h>
+
#ifndef __ASM_ARCH_MXC_HARDWARE_H__
#error "Do not include directly."
#endif
@@ -133,6 +135,15 @@ extern unsigned int __mxc_cpu_type;
# define cpu_is_mxc91231() (0)
#endif
+#ifndef __ASSEMBLY__
+
+struct cpu_op {
+ u32 cpu_rate;
+};
+
+extern struct cpu_op *(*get_cpu_op)(int *op);
+#endif
+
#if defined(CONFIG_ARCH_MX3) || defined(CONFIG_ARCH_MX2)
/* These are deprecated, use mx[23][157]_setup_weimcs instead. */
#define CSCR_U(n) (IO_ADDRESS(WEIM_BASE_ADDR + n * 0x10))
diff --git a/arch/arm/plat-mxc/include/mach/sdma.h b/arch/arm/plat-mxc/include/mach/sdma.h
new file mode 100644
index 000000000000..9be112227ac4
--- /dev/null
+++ b/arch/arm/plat-mxc/include/mach/sdma.h
@@ -0,0 +1,17 @@
+#ifndef __MACH_MXC_SDMA_H__
+#define __MACH_MXC_SDMA_H__
+
+/**
+ * struct sdma_platform_data - platform specific data for SDMA engine
+ *
+ * @sdma_version The version of this SDMA engine
+ * @cpu_name used to generate the firmware name
+ * @to_version CPU Tape out version
+ */
+struct sdma_platform_data {
+ int sdma_version;
+ char *cpu_name;
+ int to_version;
+};
+
+#endif /* __MACH_MXC_SDMA_H__ */
diff --git a/arch/arm/plat-nomadik/include/plat/ske.h b/arch/arm/plat-nomadik/include/plat/ske.h
new file mode 100644
index 000000000000..31382fbc07dc
--- /dev/null
+++ b/arch/arm/plat-nomadik/include/plat/ske.h
@@ -0,0 +1,50 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ *
+ * License Terms: GNU General Public License v2
+ * Author: Naveen Kumar Gaddipati <naveen.gaddipati@stericsson.com>
+ *
+ * ux500 Scroll key and Keypad Encoder (SKE) header
+ */
+
+#ifndef __SKE_H
+#define __SKE_H
+
+#include <linux/input/matrix_keypad.h>
+
+/* register definitions for SKE peripheral */
+#define SKE_CR 0x00
+#define SKE_VAL0 0x04
+#define SKE_VAL1 0x08
+#define SKE_DBCR 0x0C
+#define SKE_IMSC 0x10
+#define SKE_RIS 0x14
+#define SKE_MIS 0x18
+#define SKE_ICR 0x1C
+
+/*
+ * Keypad module
+ */
+
+/**
+ * struct keypad_platform_data - structure for platform specific data
+ * @init: pointer to keypad init function
+ * @exit: pointer to keypad deinitialisation function
+ * @keymap_data: matrix scan code table for keycodes
+ * @krow: maximum number of rows
+ * @kcol: maximum number of columns
+ * @debounce_ms: platform specific debounce time
+ * @no_autorepeat: flag for auto repetition
+ * @wakeup_enable: allow waking up the system
+ */
+struct ske_keypad_platform_data {
+ int (*init)(void);
+ int (*exit)(void);
+ const struct matrix_keymap_data *keymap_data;
+ u8 krow;
+ u8 kcol;
+ u8 debounce_ms;
+ bool no_autorepeat;
+ bool wakeup_enable;
+};
+#endif /*__SKE_KPD_H*/
diff --git a/arch/arm/plat-nomadik/include/plat/ste_dma40.h b/arch/arm/plat-nomadik/include/plat/ste_dma40.h
index 5fbde4b8dc12..74b62f10d07f 100644
--- a/arch/arm/plat-nomadik/include/plat/ste_dma40.h
+++ b/arch/arm/plat-nomadik/include/plat/ste_dma40.h
@@ -1,10 +1,8 @@
/*
- * arch/arm/plat-nomadik/include/plat/ste_dma40.h
- *
- * Copyright (C) ST-Ericsson 2007-2010
+ * Copyright (C) ST-Ericsson SA 2007-2010
+ * Author: Per Forlin <per.forlin@stericsson.com> for ST-Ericsson
+ * Author: Jonas Aaberg <jonas.aberg@stericsson.com> for ST-Ericsson
* License terms: GNU General Public License (GPL) version 2
- * Author: Per Friden <per.friden@stericsson.com>
- * Author: Jonas Aaberg <jonas.aberg@stericsson.com>
*/
@@ -14,43 +12,25 @@
#include <linux/dmaengine.h>
#include <linux/workqueue.h>
#include <linux/interrupt.h>
-#include <linux/dmaengine.h>
/* dev types for memcpy */
#define STEDMA40_DEV_DST_MEMORY (-1)
#define STEDMA40_DEV_SRC_MEMORY (-1)
-/*
- * Description of bitfields of channel_type variable is available in
- * the info structure.
- */
+enum stedma40_mode {
+ STEDMA40_MODE_LOGICAL = 0,
+ STEDMA40_MODE_PHYSICAL,
+ STEDMA40_MODE_OPERATION,
+};
-/* Priority */
-#define STEDMA40_INFO_PRIO_TYPE_POS 2
-#define STEDMA40_HIGH_PRIORITY_CHANNEL (0x1 << STEDMA40_INFO_PRIO_TYPE_POS)
-#define STEDMA40_LOW_PRIORITY_CHANNEL (0x2 << STEDMA40_INFO_PRIO_TYPE_POS)
-
-/* Mode */
-#define STEDMA40_INFO_CH_MODE_TYPE_POS 6
-#define STEDMA40_CHANNEL_IN_PHY_MODE (0x1 << STEDMA40_INFO_CH_MODE_TYPE_POS)
-#define STEDMA40_CHANNEL_IN_LOG_MODE (0x2 << STEDMA40_INFO_CH_MODE_TYPE_POS)
-#define STEDMA40_CHANNEL_IN_OPER_MODE (0x3 << STEDMA40_INFO_CH_MODE_TYPE_POS)
-
-/* Mode options */
-#define STEDMA40_INFO_CH_MODE_OPT_POS 8
-#define STEDMA40_PCHAN_BASIC_MODE (0x1 << STEDMA40_INFO_CH_MODE_OPT_POS)
-#define STEDMA40_PCHAN_MODULO_MODE (0x2 << STEDMA40_INFO_CH_MODE_OPT_POS)
-#define STEDMA40_PCHAN_DOUBLE_DST_MODE (0x3 << STEDMA40_INFO_CH_MODE_OPT_POS)
-#define STEDMA40_LCHAN_SRC_PHY_DST_LOG (0x1 << STEDMA40_INFO_CH_MODE_OPT_POS)
-#define STEDMA40_LCHAN_SRC_LOG_DST_PHS (0x2 << STEDMA40_INFO_CH_MODE_OPT_POS)
-#define STEDMA40_LCHAN_SRC_LOG_DST_LOG (0x3 << STEDMA40_INFO_CH_MODE_OPT_POS)
-
-/* Interrupt */
-#define STEDMA40_INFO_TIM_POS 10
-#define STEDMA40_NO_TIM_FOR_LINK (0x0 << STEDMA40_INFO_TIM_POS)
-#define STEDMA40_TIM_FOR_LINK (0x1 << STEDMA40_INFO_TIM_POS)
-
-/* End of channel_type configuration */
+enum stedma40_mode_opt {
+ STEDMA40_PCHAN_BASIC_MODE = 0,
+ STEDMA40_LCHAN_SRC_LOG_DST_LOG = 0,
+ STEDMA40_PCHAN_MODULO_MODE,
+ STEDMA40_PCHAN_DOUBLE_DST_MODE,
+ STEDMA40_LCHAN_SRC_PHY_DST_LOG,
+ STEDMA40_LCHAN_SRC_LOG_DST_PHY,
+};
#define STEDMA40_ESIZE_8_BIT 0x0
#define STEDMA40_ESIZE_16_BIT 0x1
@@ -73,16 +53,14 @@
#define STEDMA40_PSIZE_LOG_8 STEDMA40_PSIZE_PHY_8
#define STEDMA40_PSIZE_LOG_16 STEDMA40_PSIZE_PHY_16
+/* Maximum number of possible physical channels */
+#define STEDMA40_MAX_PHYS 32
+
enum stedma40_flow_ctrl {
STEDMA40_NO_FLOW_CTRL,
STEDMA40_FLOW_CTRL,
};
-enum stedma40_endianess {
- STEDMA40_LITTLE_ENDIAN,
- STEDMA40_BIG_ENDIAN
-};
-
enum stedma40_periph_data_width {
STEDMA40_BYTE_WIDTH = STEDMA40_ESIZE_8_BIT,
STEDMA40_HALFWORD_WIDTH = STEDMA40_ESIZE_16_BIT,
@@ -90,15 +68,8 @@ enum stedma40_periph_data_width {
STEDMA40_DOUBLEWORD_WIDTH = STEDMA40_ESIZE_64_BIT
};
-struct stedma40_half_channel_info {
- enum stedma40_endianess endianess;
- enum stedma40_periph_data_width data_width;
- int psize;
- enum stedma40_flow_ctrl flow_ctrl;
-};
-
enum stedma40_xfer_dir {
- STEDMA40_MEM_TO_MEM,
+ STEDMA40_MEM_TO_MEM = 1,
STEDMA40_MEM_TO_PERIPH,
STEDMA40_PERIPH_TO_MEM,
STEDMA40_PERIPH_TO_PERIPH
@@ -106,18 +77,31 @@ enum stedma40_xfer_dir {
/**
+ * struct stedma40_chan_cfg - dst/src channel configuration
+ *
+ * @big_endian: true if the src/dst should be read as big endian
+ * @data_width: Data width of the src/dst hardware
+ * @p_size: Burst size
+ * @flow_ctrl: Flow control on/off.
+ */
+struct stedma40_half_channel_info {
+ bool big_endian;
+ enum stedma40_periph_data_width data_width;
+ int psize;
+ enum stedma40_flow_ctrl flow_ctrl;
+};
+
+/**
* struct stedma40_chan_cfg - Structure to be filled by client drivers.
*
* @dir: MEM 2 MEM, PERIPH 2 MEM , MEM 2 PERIPH, PERIPH 2 PERIPH
- * @channel_type: priority, mode, mode options and interrupt configuration.
+ * @high_priority: true if high-priority
+ * @mode: channel mode: physical, logical, or operation
+ * @mode_opt: options for the chosen channel mode
* @src_dev_type: Src device type
* @dst_dev_type: Dst device type
* @src_info: Parameters for dst half channel
* @dst_info: Parameters for dst half channel
- * @pre_transfer_data: Data to be passed on to the pre_transfer() function.
- * @pre_transfer: Callback used if needed before preparation of transfer.
- * Only called if device is set. size of bytes to transfer
- * (in case of multiple element transfer size is size of the first element).
*
*
* This structure has to be filled by the client drivers.
@@ -126,15 +110,13 @@ enum stedma40_xfer_dir {
*/
struct stedma40_chan_cfg {
enum stedma40_xfer_dir dir;
- unsigned int channel_type;
+ bool high_priority;
+ enum stedma40_mode mode;
+ enum stedma40_mode_opt mode_opt;
int src_dev_type;
int dst_dev_type;
struct stedma40_half_channel_info src_info;
struct stedma40_half_channel_info dst_info;
- void *pre_transfer_data;
- int (*pre_transfer) (struct dma_chan *chan,
- void *data,
- int size);
};
/**
@@ -147,7 +129,6 @@ struct stedma40_chan_cfg {
* @memcpy_len: length of memcpy
* @memcpy_conf_phy: default configuration of physical channel memcpy
* @memcpy_conf_log: default configuration of logical channel memcpy
- * @llis_per_log: number of max linked list items per logical channel
* @disabled_channels: A vector, ending with -1, that marks physical channels
* that are for different reasons not available for the driver.
*/
@@ -159,23 +140,10 @@ struct stedma40_platform_data {
u32 memcpy_len;
struct stedma40_chan_cfg *memcpy_conf_phy;
struct stedma40_chan_cfg *memcpy_conf_log;
- unsigned int llis_per_log;
- int disabled_channels[8];
+ int disabled_channels[STEDMA40_MAX_PHYS];
};
-/**
- * setdma40_set_psize() - Used for changing the package size of an
- * already configured dma channel.
- *
- * @chan: dmaengine handle
- * @src_psize: new package side for src. (STEDMA40_PSIZE*)
- * @src_psize: new package side for dst. (STEDMA40_PSIZE*)
- *
- * returns 0 on ok, otherwise negative error number.
- */
-int stedma40_set_psize(struct dma_chan *chan,
- int src_psize,
- int dst_psize);
+#ifdef CONFIG_STE_DMA40
/**
* stedma40_filter() - Provides stedma40_chan_cfg to the
@@ -238,4 +206,21 @@ dma_async_tx_descriptor *stedma40_slave_mem(struct dma_chan *chan,
direction, flags);
}
+#else
+static inline bool stedma40_filter(struct dma_chan *chan, void *data)
+{
+ return false;
+}
+
+static inline struct
+dma_async_tx_descriptor *stedma40_slave_mem(struct dma_chan *chan,
+ dma_addr_t addr,
+ unsigned int size,
+ enum dma_data_direction direction,
+ unsigned long flags)
+{
+ return NULL;
+}
+#endif
+
#endif
diff --git a/arch/arm/plat-nomadik/timer.c b/arch/arm/plat-nomadik/timer.c
index aedf9c1d645e..63cdc6025bd7 100644
--- a/arch/arm/plat-nomadik/timer.c
+++ b/arch/arm/plat-nomadik/timer.c
@@ -3,6 +3,7 @@
*
* Copyright (C) 2008 STMicroelectronics
* Copyright (C) 2010 Alessandro Rubini
+ * Copyright (C) 2010 Linus Walleij for ST-Ericsson
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2, as
@@ -16,11 +17,13 @@
#include <linux/clk.h>
#include <linux/jiffies.h>
#include <linux/err.h>
+#include <linux/cnt32_to_63.h>
+#include <linux/timer.h>
#include <asm/mach/time.h>
#include <plat/mtu.h>
-void __iomem *mtu_base; /* ssigned by machine code */
+void __iomem *mtu_base; /* Assigned by machine code */
/*
* Kernel assumes that sched_clock can be called early
@@ -48,16 +51,82 @@ static struct clocksource nmdk_clksrc = {
/*
* Override the global weak sched_clock symbol with this
* local implementation which uses the clocksource to get some
- * better resolution when scheduling the kernel. We accept that
- * this wraps around for now, since it is just a relative time
- * stamp. (Inspired by OMAP implementation.)
+ * better resolution when scheduling the kernel.
+ *
+ * Because the hardware timer period may be quite short
+ * (32.3 secs on the 133 MHz MTU timer selection on ux500)
+ * and because cnt32_to_63() needs to be called at least once per
+ * half period to work properly, a kernel keepwarm() timer is set up
+ * to ensure this requirement is always met.
+ *
+ * Also the sched_clock timer will wrap around at some point,
+ * here we set it to run continously for a year.
*/
+#define SCHED_CLOCK_MIN_WRAP 3600*24*365
+static struct timer_list cnt32_to_63_keepwarm_timer;
+static u32 sched_mult;
+static u32 sched_shift;
+
unsigned long long notrace sched_clock(void)
{
- return clocksource_cyc2ns(nmdk_clksrc.read(
- &nmdk_clksrc),
- nmdk_clksrc.mult,
- nmdk_clksrc.shift);
+ u64 cycles;
+
+ if (unlikely(!mtu_base))
+ return 0;
+
+ cycles = cnt32_to_63(-readl(mtu_base + MTU_VAL(0)));
+ /*
+ * sched_mult is guaranteed to be even so will
+ * shift out bit 63
+ */
+ return (cycles * sched_mult) >> sched_shift;
+}
+
+/* Just kick sched_clock every so often */
+static void cnt32_to_63_keepwarm(unsigned long data)
+{
+ mod_timer(&cnt32_to_63_keepwarm_timer, round_jiffies(jiffies + data));
+ (void) sched_clock();
+}
+
+/*
+ * Set up a timer to keep sched_clock():s 32_to_63 algorithm warm
+ * once in half a 32bit timer wrap interval.
+ */
+static void __init nmdk_sched_clock_init(unsigned long rate)
+{
+ u32 v;
+ unsigned long delta;
+ u64 days;
+
+ /* Find the apropriate mult and shift factors */
+ clocks_calc_mult_shift(&sched_mult, &sched_shift,
+ rate, NSEC_PER_SEC, SCHED_CLOCK_MIN_WRAP);
+ /* We need to multiply by an even number to get rid of bit 63 */
+ if (sched_mult & 1)
+ sched_mult++;
+
+ /* Let's see what we get, take max counter and scale it */
+ days = (0xFFFFFFFFFFFFFFFFLLU * sched_mult) >> sched_shift;
+ do_div(days, NSEC_PER_SEC);
+ do_div(days, (3600*24));
+
+ pr_info("sched_clock: using %d bits @ %lu Hz wrap in %lu days\n",
+ (64 - sched_shift), rate, (unsigned long) days);
+
+ /*
+ * Program a timer to kick us at half 32bit wraparound
+ * Formula: seconds per wrap = (2^32) / f
+ */
+ v = 0xFFFFFFFFUL / rate;
+ /* We want half of the wrap time to keep cnt32_to_63 warm */
+ v /= 2;
+ pr_debug("sched_clock: prescaled timer rate: %lu Hz, "
+ "initialize keepwarm timer every %d seconds\n", rate, v);
+ /* Convert seconds to jiffies */
+ delta = msecs_to_jiffies(v*1000);
+ setup_timer(&cnt32_to_63_keepwarm_timer, cnt32_to_63_keepwarm, delta);
+ mod_timer(&cnt32_to_63_keepwarm_timer, round_jiffies(jiffies + delta));
}
/* Clockevent device: use one-shot mode */
@@ -161,13 +230,15 @@ void __init nmdk_timer_init(void)
writel(0, mtu_base + MTU_BGLR(0));
writel(cr | MTU_CRn_ENA, mtu_base + MTU_CR(0));
- /* Now the scheduling clock is ready */
+ /* Now the clock source is ready */
nmdk_clksrc.read = nmdk_read_timer;
if (clocksource_register(&nmdk_clksrc))
pr_err("timer: failed to initialize clock source %s\n",
nmdk_clksrc.name);
+ nmdk_sched_clock_init(rate);
+
/* Timer 1 is used for events */
clockevents_calc_mult_shift(&nmdk_clkevt, rate, MTU_MIN_RANGE);
diff --git a/arch/arm/plat-omap/Kconfig b/arch/arm/plat-omap/Kconfig
index a92cb499313f..92c5bb7909f5 100644
--- a/arch/arm/plat-omap/Kconfig
+++ b/arch/arm/plat-omap/Kconfig
@@ -19,7 +19,7 @@ config ARCH_OMAP2PLUS
bool "TI OMAP2/3/4"
select COMMON_CLKDEV
help
- "Systems based on omap24xx, omap34xx or omap44xx"
+ "Systems based on OMAP2, OMAP3 or OMAP4"
endchoice
diff --git a/arch/arm/plat-omap/Makefile b/arch/arm/plat-omap/Makefile
index 9405831b746a..a4a12859fdd5 100644
--- a/arch/arm/plat-omap/Makefile
+++ b/arch/arm/plat-omap/Makefile
@@ -4,7 +4,7 @@
# Common support
obj-y := common.o sram.o clock.o devices.o dma.o mux.o gpio.o \
- usb.o fb.o io.o
+ usb.o fb.o io.o counter_32k.o
obj-m :=
obj-n :=
obj- :=
@@ -31,4 +31,4 @@ obj-y += $(i2c-omap-m) $(i2c-omap-y)
# OMAP mailbox framework
obj-$(CONFIG_OMAP_MBOX_FWK) += mailbox.o
-obj-$(CONFIG_OMAP_PM_NOOP) += omap-pm-noop.o \ No newline at end of file
+obj-$(CONFIG_OMAP_PM_NOOP) += omap-pm-noop.o
diff --git a/arch/arm/plat-omap/clock.c b/arch/arm/plat-omap/clock.c
index 7190cbd92620..fc62fb5fc20b 100644
--- a/arch/arm/plat-omap/clock.c
+++ b/arch/arm/plat-omap/clock.c
@@ -60,7 +60,7 @@ void clk_disable(struct clk *clk)
spin_lock_irqsave(&clockfw_lock, flags);
if (clk->usecount == 0) {
- printk(KERN_ERR "Trying disable clock %s with 0 usecount\n",
+ pr_err("Trying disable clock %s with 0 usecount\n",
clk->name);
WARN_ON(1);
goto out;
@@ -397,6 +397,7 @@ static int __init clk_disable_unused(void)
struct clk *ck;
unsigned long flags;
+ pr_info("clock: disabling unused clocks to save power\n");
list_for_each_entry(ck, &clocks, node) {
if (ck->ops == &clkops_null)
continue;
@@ -418,7 +419,7 @@ late_initcall(clk_disable_unused);
int __init clk_init(struct clk_functions * custom_clocks)
{
if (!custom_clocks) {
- printk(KERN_ERR "No custom clock functions registered\n");
+ pr_err("No custom clock functions registered\n");
BUG();
}
diff --git a/arch/arm/plat-omap/common.c b/arch/arm/plat-omap/common.c
index 3008e7104487..f04731820301 100644
--- a/arch/arm/plat-omap/common.c
+++ b/arch/arm/plat-omap/common.c
@@ -11,38 +11,16 @@
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
-#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/init.h>
-#include <linux/delay.h>
-#include <linux/console.h>
-#include <linux/serial.h>
-#include <linux/tty.h>
-#include <linux/serial_8250.h>
-#include <linux/serial_reg.h>
-#include <linux/clk.h>
#include <linux/io.h>
#include <linux/omapfb.h>
-#include <mach/hardware.h>
-#include <asm/system.h>
-#include <asm/pgtable.h>
-#include <asm/mach/map.h>
-#include <asm/setup.h>
-
#include <plat/common.h>
#include <plat/board.h>
-#include <plat/control.h>
-#include <plat/mux.h>
-#include <plat/fpga.h>
-#include <plat/serial.h>
#include <plat/vram.h>
+#include <plat/dsp.h>
-#include <plat/clock.h>
-
-#if defined(CONFIG_ARCH_OMAP2) || defined(CONFIG_ARCH_OMAP3)
-# include "../mach-omap2/sdrc.h"
-#endif
#define NO_LENGTH_CHECK 0xffffffff
@@ -87,271 +65,5 @@ void __init omap_reserve(void)
{
omapfb_reserve_sdram_memblock();
omap_vram_reserve_sdram_memblock();
+ omap_dsp_reserve_sdram_memblock();
}
-
-/*
- * 32KHz clocksource ... always available, on pretty most chips except
- * OMAP 730 and 1510. Other timers could be used as clocksources, with
- * higher resolution in free-running counter modes (e.g. 12 MHz xtal),
- * but systems won't necessarily want to spend resources that way.
- */
-
-#define OMAP16XX_TIMER_32K_SYNCHRONIZED 0xfffbc410
-
-#if !(defined(CONFIG_ARCH_OMAP730) || defined(CONFIG_ARCH_OMAP15XX))
-
-#include <linux/clocksource.h>
-
-/*
- * offset_32k holds the init time counter value. It is then subtracted
- * from every counter read to achieve a counter that counts time from the
- * kernel boot (needed for sched_clock()).
- */
-static u32 offset_32k __read_mostly;
-
-#ifdef CONFIG_ARCH_OMAP16XX
-static cycle_t omap16xx_32k_read(struct clocksource *cs)
-{
- return omap_readl(OMAP16XX_TIMER_32K_SYNCHRONIZED) - offset_32k;
-}
-#else
-#define omap16xx_32k_read NULL
-#endif
-
-#ifdef CONFIG_ARCH_OMAP2420
-static cycle_t omap2420_32k_read(struct clocksource *cs)
-{
- return omap_readl(OMAP2420_32KSYNCT_BASE + 0x10) - offset_32k;
-}
-#else
-#define omap2420_32k_read NULL
-#endif
-
-#ifdef CONFIG_ARCH_OMAP2430
-static cycle_t omap2430_32k_read(struct clocksource *cs)
-{
- return omap_readl(OMAP2430_32KSYNCT_BASE + 0x10) - offset_32k;
-}
-#else
-#define omap2430_32k_read NULL
-#endif
-
-#ifdef CONFIG_ARCH_OMAP3
-static cycle_t omap34xx_32k_read(struct clocksource *cs)
-{
- return omap_readl(OMAP3430_32KSYNCT_BASE + 0x10) - offset_32k;
-}
-#else
-#define omap34xx_32k_read NULL
-#endif
-
-#ifdef CONFIG_ARCH_OMAP4
-static cycle_t omap44xx_32k_read(struct clocksource *cs)
-{
- return omap_readl(OMAP4430_32KSYNCT_BASE + 0x10) - offset_32k;
-}
-#else
-#define omap44xx_32k_read NULL
-#endif
-
-/*
- * Kernel assumes that sched_clock can be called early but may not have
- * things ready yet.
- */
-static cycle_t omap_32k_read_dummy(struct clocksource *cs)
-{
- return 0;
-}
-
-static struct clocksource clocksource_32k = {
- .name = "32k_counter",
- .rating = 250,
- .read = omap_32k_read_dummy,
- .mask = CLOCKSOURCE_MASK(32),
- .shift = 10,
- .flags = CLOCK_SOURCE_IS_CONTINUOUS,
-};
-
-/*
- * Returns current time from boot in nsecs. It's OK for this to wrap
- * around for now, as it's just a relative time stamp.
- */
-unsigned long long sched_clock(void)
-{
- return clocksource_cyc2ns(clocksource_32k.read(&clocksource_32k),
- clocksource_32k.mult, clocksource_32k.shift);
-}
-
-/**
- * read_persistent_clock - Return time from a persistent clock.
- *
- * Reads the time from a source which isn't disabled during PM, the
- * 32k sync timer. Convert the cycles elapsed since last read into
- * nsecs and adds to a monotonically increasing timespec.
- */
-static struct timespec persistent_ts;
-static cycles_t cycles, last_cycles;
-void read_persistent_clock(struct timespec *ts)
-{
- unsigned long long nsecs;
- cycles_t delta;
- struct timespec *tsp = &persistent_ts;
-
- last_cycles = cycles;
- cycles = clocksource_32k.read(&clocksource_32k);
- delta = cycles - last_cycles;
-
- nsecs = clocksource_cyc2ns(delta,
- clocksource_32k.mult, clocksource_32k.shift);
-
- timespec_add_ns(tsp, nsecs);
- *ts = *tsp;
-}
-
-static int __init omap_init_clocksource_32k(void)
-{
- static char err[] __initdata = KERN_ERR
- "%s: can't register clocksource!\n";
-
- if (cpu_is_omap16xx() || cpu_class_is_omap2()) {
- struct clk *sync_32k_ick;
-
- if (cpu_is_omap16xx())
- clocksource_32k.read = omap16xx_32k_read;
- else if (cpu_is_omap2420())
- clocksource_32k.read = omap2420_32k_read;
- else if (cpu_is_omap2430())
- clocksource_32k.read = omap2430_32k_read;
- else if (cpu_is_omap34xx())
- clocksource_32k.read = omap34xx_32k_read;
- else if (cpu_is_omap44xx())
- clocksource_32k.read = omap44xx_32k_read;
- else
- return -ENODEV;
-
- sync_32k_ick = clk_get(NULL, "omap_32ksync_ick");
- if (sync_32k_ick)
- clk_enable(sync_32k_ick);
-
- clocksource_32k.mult = clocksource_hz2mult(32768,
- clocksource_32k.shift);
-
- offset_32k = clocksource_32k.read(&clocksource_32k);
-
- if (clocksource_register(&clocksource_32k))
- printk(err, clocksource_32k.name);
- }
- return 0;
-}
-arch_initcall(omap_init_clocksource_32k);
-
-#endif /* !(defined(CONFIG_ARCH_OMAP730) || defined(CONFIG_ARCH_OMAP15XX)) */
-
-/* Global address base setup code */
-
-#if defined(CONFIG_ARCH_OMAP2) || defined(CONFIG_ARCH_OMAP3)
-
-static void __init __omap2_set_globals(struct omap_globals *omap2_globals)
-{
- omap2_set_globals_tap(omap2_globals);
- omap2_set_globals_sdrc(omap2_globals);
- omap2_set_globals_control(omap2_globals);
- omap2_set_globals_prcm(omap2_globals);
- omap2_set_globals_uart(omap2_globals);
-}
-
-#endif
-
-#if defined(CONFIG_ARCH_OMAP2420)
-
-static struct omap_globals omap242x_globals = {
- .class = OMAP242X_CLASS,
- .tap = OMAP2_L4_IO_ADDRESS(0x48014000),
- .sdrc = OMAP2420_SDRC_BASE,
- .sms = OMAP2420_SMS_BASE,
- .ctrl = OMAP2420_CTRL_BASE,
- .prm = OMAP2420_PRM_BASE,
- .cm = OMAP2420_CM_BASE,
- .uart1_phys = OMAP2_UART1_BASE,
- .uart2_phys = OMAP2_UART2_BASE,
- .uart3_phys = OMAP2_UART3_BASE,
-};
-
-void __init omap2_set_globals_242x(void)
-{
- __omap2_set_globals(&omap242x_globals);
-}
-#endif
-
-#if defined(CONFIG_ARCH_OMAP2430)
-
-static struct omap_globals omap243x_globals = {
- .class = OMAP243X_CLASS,
- .tap = OMAP2_L4_IO_ADDRESS(0x4900a000),
- .sdrc = OMAP243X_SDRC_BASE,
- .sms = OMAP243X_SMS_BASE,
- .ctrl = OMAP243X_CTRL_BASE,
- .prm = OMAP2430_PRM_BASE,
- .cm = OMAP2430_CM_BASE,
- .uart1_phys = OMAP2_UART1_BASE,
- .uart2_phys = OMAP2_UART2_BASE,
- .uart3_phys = OMAP2_UART3_BASE,
-};
-
-void __init omap2_set_globals_243x(void)
-{
- __omap2_set_globals(&omap243x_globals);
-}
-#endif
-
-#if defined(CONFIG_ARCH_OMAP3)
-
-static struct omap_globals omap3_globals = {
- .class = OMAP343X_CLASS,
- .tap = OMAP2_L4_IO_ADDRESS(0x4830A000),
- .sdrc = OMAP343X_SDRC_BASE,
- .sms = OMAP343X_SMS_BASE,
- .ctrl = OMAP343X_CTRL_BASE,
- .prm = OMAP3430_PRM_BASE,
- .cm = OMAP3430_CM_BASE,
- .uart1_phys = OMAP3_UART1_BASE,
- .uart2_phys = OMAP3_UART2_BASE,
- .uart3_phys = OMAP3_UART3_BASE,
- .uart4_phys = OMAP3_UART4_BASE, /* Only on 3630 */
-};
-
-void __init omap2_set_globals_3xxx(void)
-{
- __omap2_set_globals(&omap3_globals);
-}
-
-void __init omap3_map_io(void)
-{
- omap2_set_globals_3xxx();
- omap34xx_map_common_io();
-}
-#endif
-
-#if defined(CONFIG_ARCH_OMAP4)
-static struct omap_globals omap4_globals = {
- .class = OMAP443X_CLASS,
- .tap = OMAP2_L4_IO_ADDRESS(OMAP443X_SCM_BASE),
- .ctrl = OMAP443X_CTRL_BASE,
- .prm = OMAP4430_PRM_BASE,
- .cm = OMAP4430_CM_BASE,
- .cm2 = OMAP4430_CM2_BASE,
- .uart1_phys = OMAP4_UART1_BASE,
- .uart2_phys = OMAP4_UART2_BASE,
- .uart3_phys = OMAP4_UART3_BASE,
- .uart4_phys = OMAP4_UART4_BASE,
-};
-
-void __init omap2_set_globals_443x(void)
-{
- omap2_set_globals_tap(&omap4_globals);
- omap2_set_globals_control(&omap4_globals);
- omap2_set_globals_prcm(&omap4_globals);
- omap2_set_globals_uart(&omap4_globals);
-}
-#endif
-
diff --git a/arch/arm/plat-omap/counter_32k.c b/arch/arm/plat-omap/counter_32k.c
new file mode 100644
index 000000000000..155fe43a672b
--- /dev/null
+++ b/arch/arm/plat-omap/counter_32k.c
@@ -0,0 +1,183 @@
+/*
+ * OMAP 32ksynctimer/counter_32k-related code
+ *
+ * Copyright (C) 2009 Texas Instruments
+ * Copyright (C) 2010 Nokia Corporation
+ * Tony Lindgren <tony@atomide.com>
+ * Added OMAP4 support - Santosh Shilimkar <santosh.shilimkar@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * NOTE: This timer is not the same timer as the old OMAP1 MPU timer.
+ */
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/clk.h>
+#include <linux/io.h>
+
+#include <plat/common.h>
+#include <plat/board.h>
+
+#include <plat/clock.h>
+
+
+/*
+ * 32KHz clocksource ... always available, on pretty most chips except
+ * OMAP 730 and 1510. Other timers could be used as clocksources, with
+ * higher resolution in free-running counter modes (e.g. 12 MHz xtal),
+ * but systems won't necessarily want to spend resources that way.
+ */
+
+#define OMAP16XX_TIMER_32K_SYNCHRONIZED 0xfffbc410
+
+#if !(defined(CONFIG_ARCH_OMAP730) || defined(CONFIG_ARCH_OMAP15XX))
+
+#include <linux/clocksource.h>
+
+/*
+ * offset_32k holds the init time counter value. It is then subtracted
+ * from every counter read to achieve a counter that counts time from the
+ * kernel boot (needed for sched_clock()).
+ */
+static u32 offset_32k __read_mostly;
+
+#ifdef CONFIG_ARCH_OMAP16XX
+static cycle_t omap16xx_32k_read(struct clocksource *cs)
+{
+ return omap_readl(OMAP16XX_TIMER_32K_SYNCHRONIZED) - offset_32k;
+}
+#else
+#define omap16xx_32k_read NULL
+#endif
+
+#ifdef CONFIG_ARCH_OMAP2420
+static cycle_t omap2420_32k_read(struct clocksource *cs)
+{
+ return omap_readl(OMAP2420_32KSYNCT_BASE + 0x10) - offset_32k;
+}
+#else
+#define omap2420_32k_read NULL
+#endif
+
+#ifdef CONFIG_ARCH_OMAP2430
+static cycle_t omap2430_32k_read(struct clocksource *cs)
+{
+ return omap_readl(OMAP2430_32KSYNCT_BASE + 0x10) - offset_32k;
+}
+#else
+#define omap2430_32k_read NULL
+#endif
+
+#ifdef CONFIG_ARCH_OMAP3
+static cycle_t omap34xx_32k_read(struct clocksource *cs)
+{
+ return omap_readl(OMAP3430_32KSYNCT_BASE + 0x10) - offset_32k;
+}
+#else
+#define omap34xx_32k_read NULL
+#endif
+
+#ifdef CONFIG_ARCH_OMAP4
+static cycle_t omap44xx_32k_read(struct clocksource *cs)
+{
+ return omap_readl(OMAP4430_32KSYNCT_BASE + 0x10) - offset_32k;
+}
+#else
+#define omap44xx_32k_read NULL
+#endif
+
+/*
+ * Kernel assumes that sched_clock can be called early but may not have
+ * things ready yet.
+ */
+static cycle_t omap_32k_read_dummy(struct clocksource *cs)
+{
+ return 0;
+}
+
+static struct clocksource clocksource_32k = {
+ .name = "32k_counter",
+ .rating = 250,
+ .read = omap_32k_read_dummy,
+ .mask = CLOCKSOURCE_MASK(32),
+ .shift = 10,
+ .flags = CLOCK_SOURCE_IS_CONTINUOUS,
+};
+
+/*
+ * Returns current time from boot in nsecs. It's OK for this to wrap
+ * around for now, as it's just a relative time stamp.
+ */
+unsigned long long sched_clock(void)
+{
+ return clocksource_cyc2ns(clocksource_32k.read(&clocksource_32k),
+ clocksource_32k.mult, clocksource_32k.shift);
+}
+
+/**
+ * read_persistent_clock - Return time from a persistent clock.
+ *
+ * Reads the time from a source which isn't disabled during PM, the
+ * 32k sync timer. Convert the cycles elapsed since last read into
+ * nsecs and adds to a monotonically increasing timespec.
+ */
+static struct timespec persistent_ts;
+static cycles_t cycles, last_cycles;
+void read_persistent_clock(struct timespec *ts)
+{
+ unsigned long long nsecs;
+ cycles_t delta;
+ struct timespec *tsp = &persistent_ts;
+
+ last_cycles = cycles;
+ cycles = clocksource_32k.read(&clocksource_32k);
+ delta = cycles - last_cycles;
+
+ nsecs = clocksource_cyc2ns(delta,
+ clocksource_32k.mult, clocksource_32k.shift);
+
+ timespec_add_ns(tsp, nsecs);
+ *ts = *tsp;
+}
+
+static int __init omap_init_clocksource_32k(void)
+{
+ static char err[] __initdata = KERN_ERR
+ "%s: can't register clocksource!\n";
+
+ if (cpu_is_omap16xx() || cpu_class_is_omap2()) {
+ struct clk *sync_32k_ick;
+
+ if (cpu_is_omap16xx())
+ clocksource_32k.read = omap16xx_32k_read;
+ else if (cpu_is_omap2420())
+ clocksource_32k.read = omap2420_32k_read;
+ else if (cpu_is_omap2430())
+ clocksource_32k.read = omap2430_32k_read;
+ else if (cpu_is_omap34xx())
+ clocksource_32k.read = omap34xx_32k_read;
+ else if (cpu_is_omap44xx())
+ clocksource_32k.read = omap44xx_32k_read;
+ else
+ return -ENODEV;
+
+ sync_32k_ick = clk_get(NULL, "omap_32ksync_ick");
+ if (sync_32k_ick)
+ clk_enable(sync_32k_ick);
+
+ clocksource_32k.mult = clocksource_hz2mult(32768,
+ clocksource_32k.shift);
+
+ offset_32k = clocksource_32k.read(&clocksource_32k);
+
+ if (clocksource_register(&clocksource_32k))
+ printk(err, clocksource_32k.name);
+ }
+ return 0;
+}
+arch_initcall(omap_init_clocksource_32k);
+
+#endif /* !(defined(CONFIG_ARCH_OMAP730) || defined(CONFIG_ARCH_OMAP15XX)) */
+
diff --git a/arch/arm/plat-omap/cpu-omap.c b/arch/arm/plat-omap/cpu-omap.c
index 6d3d33360056..11c54ec8d47f 100644
--- a/arch/arm/plat-omap/cpu-omap.c
+++ b/arch/arm/plat-omap/cpu-omap.c
@@ -40,7 +40,7 @@ static struct clk *mpu_clk;
/* TODO: Add support for SDRAM timing changes */
-int omap_verify_speed(struct cpufreq_policy *policy)
+static int omap_verify_speed(struct cpufreq_policy *policy)
{
if (freq_table)
return cpufreq_frequency_table_verify(policy, freq_table);
@@ -58,7 +58,7 @@ int omap_verify_speed(struct cpufreq_policy *policy)
return 0;
}
-unsigned int omap_getspeed(unsigned int cpu)
+static unsigned int omap_getspeed(unsigned int cpu)
{
unsigned long rate;
diff --git a/arch/arm/plat-omap/devices.c b/arch/arm/plat-omap/devices.c
index d1920be7833b..fc819120978d 100644
--- a/arch/arm/plat-omap/devices.c
+++ b/arch/arm/plat-omap/devices.c
@@ -15,13 +15,13 @@
#include <linux/platform_device.h>
#include <linux/io.h>
#include <linux/slab.h>
+#include <linux/memblock.h>
#include <mach/hardware.h>
#include <asm/mach-types.h>
#include <asm/mach/map.h>
#include <plat/tc.h>
-#include <plat/control.h>
#include <plat/board.h>
#include <plat/mmc.h>
#include <mach/gpio.h>
@@ -272,6 +272,37 @@ static void omap_init_wdt(void)
static inline void omap_init_wdt(void) {}
#endif
+#if defined(CONFIG_TIDSPBRIDGE) || defined(CONFIG_TIDSPBRIDGE_MODULE)
+
+static phys_addr_t omap_dsp_phys_mempool_base;
+
+void __init omap_dsp_reserve_sdram_memblock(void)
+{
+ phys_addr_t size = CONFIG_TIDSPBRIDGE_MEMPOOL_SIZE;
+ phys_addr_t paddr;
+
+ if (!size)
+ return;
+
+ paddr = memblock_alloc(size, SZ_1M);
+ if (!paddr) {
+ pr_err("%s: failed to reserve %x bytes\n",
+ __func__, size);
+ return;
+ }
+ memblock_free(paddr, size);
+ memblock_remove(paddr, size);
+
+ omap_dsp_phys_mempool_base = paddr;
+}
+
+phys_addr_t omap_dsp_get_mempool_base(void)
+{
+ return omap_dsp_phys_mempool_base;
+}
+EXPORT_SYMBOL(omap_dsp_get_mempool_base);
+#endif
+
/*
* This gets called after board-specific INIT_MACHINE, and initializes most
* on-chip peripherals accessible on this board (except for few like USB):
@@ -300,7 +331,6 @@ static int __init omap_init_devices(void)
omap_init_rng();
omap_init_mcpdm();
omap_init_uwire();
- omap_init_wdt();
return 0;
}
arch_initcall(omap_init_devices);
diff --git a/arch/arm/plat-omap/dma.c b/arch/arm/plat-omap/dma.c
index ec7eddf9e525..2c2826571d45 100644
--- a/arch/arm/plat-omap/dma.c
+++ b/arch/arm/plat-omap/dma.c
@@ -30,6 +30,7 @@
#include <linux/irq.h>
#include <linux/io.h>
#include <linux/slab.h>
+#include <linux/delay.h>
#include <asm/system.h>
#include <mach/hardware.h>
@@ -996,11 +997,17 @@ void omap_start_dma(int lch)
l = dma_read(CCR(lch));
/*
- * Errata: On ES2.0 BUFFERING disable must be set.
- * This will always fail on ES1.0
+ * Errata: Inter Frame DMA buffering issue (All OMAP2420 and
+ * OMAP2430ES1.0): DMA will wrongly buffer elements if packing and
+ * bursting is enabled. This might result in data gets stalled in
+ * FIFO at the end of the block.
+ * Workaround: DMA channels must have BUFFERING_DISABLED bit set to
+ * guarantee no data will stay in the DMA FIFO in case inter frame
+ * buffering occurs.
*/
- if (cpu_is_omap24xx())
- l |= OMAP_DMA_CCR_EN;
+ if (cpu_is_omap2420() ||
+ (cpu_is_omap2430() && (omap_type() == OMAP2430_REV_ES1_0)))
+ l |= OMAP_DMA_CCR_BUFFERING_DISABLE;
l |= OMAP_DMA_CCR_EN;
dma_write(l, CCR(lch));
@@ -1018,8 +1025,39 @@ void omap_stop_dma(int lch)
dma_write(0, CICR(lch));
l = dma_read(CCR(lch));
- l &= ~OMAP_DMA_CCR_EN;
- dma_write(l, CCR(lch));
+ /* OMAP3 Errata i541: sDMA FIFO draining does not finish */
+ if (cpu_is_omap34xx() && (l & OMAP_DMA_CCR_SEL_SRC_DST_SYNC)) {
+ int i = 0;
+ u32 sys_cf;
+
+ /* Configure No-Standby */
+ l = dma_read(OCP_SYSCONFIG);
+ sys_cf = l;
+ l &= ~DMA_SYSCONFIG_MIDLEMODE_MASK;
+ l |= DMA_SYSCONFIG_MIDLEMODE(DMA_IDLEMODE_NO_IDLE);
+ dma_write(l , OCP_SYSCONFIG);
+
+ l = dma_read(CCR(lch));
+ l &= ~OMAP_DMA_CCR_EN;
+ dma_write(l, CCR(lch));
+
+ /* Wait for sDMA FIFO drain */
+ l = dma_read(CCR(lch));
+ while (i < 100 && (l & (OMAP_DMA_CCR_RD_ACTIVE |
+ OMAP_DMA_CCR_WR_ACTIVE))) {
+ udelay(5);
+ i++;
+ l = dma_read(CCR(lch));
+ }
+ if (i >= 100)
+ printk(KERN_ERR "DMA drain did not complete on "
+ "lch %d\n", lch);
+ /* Restore OCP_SYSCONFIG */
+ dma_write(sys_cf, OCP_SYSCONFIG);
+ } else {
+ l &= ~OMAP_DMA_CCR_EN;
+ dma_write(l, CCR(lch));
+ }
if (!omap_dma_in_1510_mode() && dma_chan[lch].next_lch != -1) {
int next_lch, cur_lch = lch;
@@ -1945,6 +1983,8 @@ static int omap2_dma_handle_ch(int ch)
dma_write(OMAP2_DMA_CSR_CLEAR_MASK, CSR(ch));
dma_write(1 << ch, IRQSTATUS_L0);
+ /* read back the register to flush the write */
+ dma_read(IRQSTATUS_L0);
/* If the ch is not chained then chain_id will be -1 */
if (dma_chan[ch].chain_id != -1) {
diff --git a/arch/arm/plat-omap/dmtimer.c b/arch/arm/plat-omap/dmtimer.c
index 44bafdab2dce..1d706cf63ca0 100644
--- a/arch/arm/plat-omap/dmtimer.c
+++ b/arch/arm/plat-omap/dmtimer.c
@@ -581,7 +581,7 @@ int omap_dm_timer_set_source(struct omap_dm_timer *timer, int source)
* When the functional clock disappears, too quick writes seem
* to cause an abort. XXX Is this still necessary?
*/
- __delay(150000);
+ __delay(300000);
return ret;
}
diff --git a/arch/arm/plat-omap/fb.c b/arch/arm/plat-omap/fb.c
index 71934817e172..c9e5d7298c40 100644
--- a/arch/arm/plat-omap/fb.c
+++ b/arch/arm/plat-omap/fb.c
@@ -36,6 +36,8 @@
#include <plat/board.h>
#include <plat/sram.h>
+#include "fb.h"
+
#if defined(CONFIG_FB_OMAP) || defined(CONFIG_FB_OMAP_MODULE)
static struct omapfb_platform_data omapfb_config;
@@ -94,7 +96,7 @@ static int fbmem_region_reserved(unsigned long start, size_t size)
* Get the region_idx`th region from board config/ATAG and convert it to
* our internal format.
*/
-static int get_fbmem_region(int region_idx, struct omapfb_mem_region *rg)
+static int __init get_fbmem_region(int region_idx, struct omapfb_mem_region *rg)
{
const struct omap_fbmem_config *conf;
u32 paddr;
@@ -126,7 +128,7 @@ static int set_fbmem_region_type(struct omapfb_mem_region *rg, int mem_type,
* type = 0 && paddr = 0, a default don't care case maps to
* the SDRAM type.
*/
- if (rg->type || (!rg->type && !rg->paddr))
+ if (rg->type || !rg->paddr)
return 0;
if (ranges_overlap(rg->paddr, rg->size, mem_start, mem_size)) {
rg->type = mem_type;
@@ -258,7 +260,7 @@ void __init omapfb_reserve_sdram_memblock(void)
* this point, since the driver built as a module would have problem with
* freeing / reallocating the regions.
*/
-unsigned long omapfb_reserve_sram(unsigned long sram_pstart,
+unsigned long __init omapfb_reserve_sram(unsigned long sram_pstart,
unsigned long sram_vstart,
unsigned long sram_size,
unsigned long pstart_avail,
@@ -332,7 +334,7 @@ void omapfb_set_ctrl_platform_data(void *data)
omapfb_config.ctrl_platform_data = data;
}
-static inline int omap_init_fb(void)
+static int __init omap_init_fb(void)
{
const struct omap_lcd_config *conf;
@@ -377,7 +379,7 @@ void omapfb_set_platform_data(struct omapfb_platform_data *data)
omapfb_config = *data;
}
-static inline int omap_init_fb(void)
+static int __init omap_init_fb(void)
{
return platform_device_register(&omap_fb_device);
}
@@ -388,7 +390,7 @@ void omapfb_reserve_sdram_memblock(void)
{
}
-unsigned long omapfb_reserve_sram(unsigned long sram_pstart,
+unsigned long __init omapfb_reserve_sram(unsigned long sram_pstart,
unsigned long sram_vstart,
unsigned long sram_size,
unsigned long start_avail,
@@ -407,7 +409,7 @@ void omapfb_reserve_sdram_memblock(void)
{
}
-unsigned long omapfb_reserve_sram(unsigned long sram_pstart,
+unsigned long __init omapfb_reserve_sram(unsigned long sram_pstart,
unsigned long sram_vstart,
unsigned long sram_size,
unsigned long start_avail,
diff --git a/arch/arm/plat-omap/fb.h b/arch/arm/plat-omap/fb.h
new file mode 100644
index 000000000000..d765d0bd8520
--- /dev/null
+++ b/arch/arm/plat-omap/fb.h
@@ -0,0 +1,10 @@
+#ifndef __PLAT_OMAP_FB_H__
+#define __PLAT_OMAP_FB_H__
+
+extern unsigned long omapfb_reserve_sram(unsigned long sram_pstart,
+ unsigned long sram_vstart,
+ unsigned long sram_size,
+ unsigned long pstart_avail,
+ unsigned long size_avail);
+
+#endif /* __PLAT_OMAP_FB_H__ */
diff --git a/arch/arm/plat-omap/gpio.c b/arch/arm/plat-omap/gpio.c
index 7951eefe1a0e..c05c653d1674 100644
--- a/arch/arm/plat-omap/gpio.c
+++ b/arch/arm/plat-omap/gpio.c
@@ -2084,9 +2084,10 @@ void omap2_gpio_prepare_for_idle(int power_state)
for (i = min; i < gpio_bank_count; i++) {
struct gpio_bank *bank = &gpio_bank[i];
- u32 l1, l2;
+ u32 l1 = 0, l2 = 0;
+ int j;
- if (bank->dbck_enable_mask)
+ for (j = 0; j < hweight_long(bank->dbck_enable_mask); j++)
clk_disable(bank->dbck);
if (power_state > PWRDM_POWER_OFF)
@@ -2151,9 +2152,10 @@ void omap2_gpio_resume_after_idle(void)
min = 1;
for (i = min; i < gpio_bank_count; i++) {
struct gpio_bank *bank = &gpio_bank[i];
- u32 l, gen, gen0, gen1;
+ u32 l = 0, gen, gen0, gen1;
+ int j;
- if (bank->dbck_enable_mask)
+ for (j = 0; j < hweight_long(bank->dbck_enable_mask); j++)
clk_enable(bank->dbck);
if (!workaround_enabled)
diff --git a/arch/arm/plat-omap/include/plat/common.h b/arch/arm/plat-omap/include/plat/common.h
index 9776b41ad76f..a9d69a09920d 100644
--- a/arch/arm/plat-omap/include/plat/common.h
+++ b/arch/arm/plat-omap/include/plat/common.h
@@ -47,6 +47,7 @@ struct omap_globals {
unsigned long sdrc; /* SDRAM Controller */
unsigned long sms; /* SDRAM Memory Scheduler */
unsigned long ctrl; /* System Control Module */
+ unsigned long ctrl_pad; /* PAD Control Module */
unsigned long prm; /* Power and Reset Management */
unsigned long cm; /* Clock Management */
unsigned long cm2;
@@ -66,7 +67,6 @@ void omap2_set_globals_tap(struct omap_globals *);
void omap2_set_globals_sdrc(struct omap_globals *);
void omap2_set_globals_control(struct omap_globals *);
void omap2_set_globals_prcm(struct omap_globals *);
-void omap2_set_globals_uart(struct omap_globals *);
void omap3_map_io(void);
@@ -91,7 +91,8 @@ void omap3_map_io(void);
})
extern struct device *omap2_get_mpuss_device(void);
-extern struct device *omap2_get_dsp_device(void);
+extern struct device *omap2_get_iva_device(void);
extern struct device *omap2_get_l3_device(void);
+extern struct device *omap4_get_dsp_device(void);
#endif /* __ARCH_ARM_MACH_OMAP_COMMON_H */
diff --git a/arch/arm/plat-omap/include/plat/cpu.h b/arch/arm/plat-omap/include/plat/cpu.h
index 2e2ae530fced..3fd8b4055727 100644
--- a/arch/arm/plat-omap/include/plat/cpu.h
+++ b/arch/arm/plat-omap/include/plat/cpu.h
@@ -68,10 +68,9 @@ unsigned int omap_rev(void);
#define OMAP_REVBITS_00 0x00
#define OMAP_REVBITS_01 0x01
#define OMAP_REVBITS_02 0x02
-#define OMAP_REVBITS_10 0x10
-#define OMAP_REVBITS_20 0x20
-#define OMAP_REVBITS_30 0x30
-#define OMAP_REVBITS_40 0x40
+#define OMAP_REVBITS_03 0x03
+#define OMAP_REVBITS_04 0x04
+#define OMAP_REVBITS_05 0x05
/*
* Get the CPU revision for OMAP devices
@@ -363,23 +362,24 @@ IS_OMAP_TYPE(3517, 0x3517)
/* Various silicon revisions for omap2 */
#define OMAP242X_CLASS 0x24200024
-#define OMAP2420_REV_ES1_0 0x24200024
-#define OMAP2420_REV_ES2_0 0x24201024
+#define OMAP2420_REV_ES1_0 OMAP242X_CLASS
+#define OMAP2420_REV_ES2_0 (OMAP242X_CLASS | (OMAP_REVBITS_01 << 8))
#define OMAP243X_CLASS 0x24300024
-#define OMAP2430_REV_ES1_0 0x24300024
+#define OMAP2430_REV_ES1_0 OMAP243X_CLASS
#define OMAP343X_CLASS 0x34300034
-#define OMAP3430_REV_ES1_0 0x34300034
-#define OMAP3430_REV_ES2_0 0x34301034
-#define OMAP3430_REV_ES2_1 0x34302034
-#define OMAP3430_REV_ES3_0 0x34303034
-#define OMAP3430_REV_ES3_1 0x34304034
-#define OMAP3430_REV_ES3_1_2 0x34305034
-
-#define OMAP3630_REV_ES1_0 0x36300034
-#define OMAP3630_REV_ES1_1 0x36300134
-#define OMAP3630_REV_ES1_2 0x36300234
+#define OMAP3430_REV_ES1_0 OMAP343X_CLASS
+#define OMAP3430_REV_ES2_0 (OMAP343X_CLASS | (OMAP_REVBITS_01 << 8))
+#define OMAP3430_REV_ES2_1 (OMAP343X_CLASS | (OMAP_REVBITS_02 << 8))
+#define OMAP3430_REV_ES3_0 (OMAP343X_CLASS | (OMAP_REVBITS_03 << 8))
+#define OMAP3430_REV_ES3_1 (OMAP343X_CLASS | (OMAP_REVBITS_04 << 8))
+#define OMAP3430_REV_ES3_1_2 (OMAP343X_CLASS | (OMAP_REVBITS_05 << 8))
+
+#define OMAP363X_CLASS 0x36300034
+#define OMAP3630_REV_ES1_0 OMAP363X_CLASS
+#define OMAP3630_REV_ES1_1 (OMAP363X_CLASS | (OMAP_REVBITS_01 << 8))
+#define OMAP3630_REV_ES1_2 (OMAP363X_CLASS | (OMAP_REVBITS_02 << 8))
#define OMAP35XX_CLASS 0x35000034
#define OMAP3503_REV(v) (OMAP35XX_CLASS | (0x3503 << 16) | (v << 8))
@@ -390,7 +390,8 @@ IS_OMAP_TYPE(3517, 0x3517)
#define OMAP3517_REV(v) (OMAP35XX_CLASS | (0x3517 << 16) | (v << 8))
#define OMAP443X_CLASS 0x44300044
-#define OMAP4430_REV_ES1_0 0x44300044
+#define OMAP4430_REV_ES1_0 OMAP443X_CLASS
+#define OMAP4430_REV_ES2_0 0x44301044
/*
* omap_chip bits
@@ -417,10 +418,12 @@ IS_OMAP_TYPE(3517, 0x3517)
#define CHIP_IS_OMAP4430ES1 (1 << 8)
#define CHIP_IS_OMAP3630ES1_1 (1 << 9)
#define CHIP_IS_OMAP3630ES1_2 (1 << 10)
+#define CHIP_IS_OMAP4430ES2 (1 << 11)
#define CHIP_IS_OMAP24XX (CHIP_IS_OMAP2420 | CHIP_IS_OMAP2430)
-#define CHIP_IS_OMAP4430 (CHIP_IS_OMAP4430ES1)
+#define CHIP_IS_OMAP4430 (CHIP_IS_OMAP4430ES1 | \
+ CHIP_IS_OMAP4430ES2)
/*
* "GE" here represents "greater than or equal to" in terms of ES
diff --git a/arch/arm/plat-omap/include/plat/display.h b/arch/arm/plat-omap/include/plat/display.h
index 8bd15bdb4132..c915a661f1f5 100644
--- a/arch/arm/plat-omap/include/plat/display.h
+++ b/arch/arm/plat-omap/include/plat/display.h
@@ -81,37 +81,6 @@ enum omap_color_mode {
OMAP_DSS_COLOR_ARGB32 = 1 << 11, /* ARGB32 */
OMAP_DSS_COLOR_RGBA32 = 1 << 12, /* RGBA32 */
OMAP_DSS_COLOR_RGBX32 = 1 << 13, /* RGBx32 */
-
- OMAP_DSS_COLOR_GFX_OMAP2 =
- OMAP_DSS_COLOR_CLUT1 | OMAP_DSS_COLOR_CLUT2 |
- OMAP_DSS_COLOR_CLUT4 | OMAP_DSS_COLOR_CLUT8 |
- OMAP_DSS_COLOR_RGB12U | OMAP_DSS_COLOR_RGB16 |
- OMAP_DSS_COLOR_RGB24U | OMAP_DSS_COLOR_RGB24P,
-
- OMAP_DSS_COLOR_VID_OMAP2 =
- OMAP_DSS_COLOR_RGB16 | OMAP_DSS_COLOR_RGB24U |
- OMAP_DSS_COLOR_RGB24P | OMAP_DSS_COLOR_YUV2 |
- OMAP_DSS_COLOR_UYVY,
-
- OMAP_DSS_COLOR_GFX_OMAP3 =
- OMAP_DSS_COLOR_CLUT1 | OMAP_DSS_COLOR_CLUT2 |
- OMAP_DSS_COLOR_CLUT4 | OMAP_DSS_COLOR_CLUT8 |
- OMAP_DSS_COLOR_RGB12U | OMAP_DSS_COLOR_ARGB16 |
- OMAP_DSS_COLOR_RGB16 | OMAP_DSS_COLOR_RGB24U |
- OMAP_DSS_COLOR_RGB24P | OMAP_DSS_COLOR_ARGB32 |
- OMAP_DSS_COLOR_RGBA32 | OMAP_DSS_COLOR_RGBX32,
-
- OMAP_DSS_COLOR_VID1_OMAP3 =
- OMAP_DSS_COLOR_RGB12U | OMAP_DSS_COLOR_RGB16 |
- OMAP_DSS_COLOR_RGB24U | OMAP_DSS_COLOR_RGB24P |
- OMAP_DSS_COLOR_YUV2 | OMAP_DSS_COLOR_UYVY,
-
- OMAP_DSS_COLOR_VID2_OMAP3 =
- OMAP_DSS_COLOR_RGB12U | OMAP_DSS_COLOR_ARGB16 |
- OMAP_DSS_COLOR_RGB16 | OMAP_DSS_COLOR_RGB24U |
- OMAP_DSS_COLOR_RGB24P | OMAP_DSS_COLOR_YUV2 |
- OMAP_DSS_COLOR_UYVY | OMAP_DSS_COLOR_ARGB32 |
- OMAP_DSS_COLOR_RGBA32 | OMAP_DSS_COLOR_RGBX32,
};
enum omap_lcd_display_type {
diff --git a/arch/arm/plat-omap/include/plat/dma.h b/arch/arm/plat-omap/include/plat/dma.h
index af3a03941add..0cce4ca83aa0 100644
--- a/arch/arm/plat-omap/include/plat/dma.h
+++ b/arch/arm/plat-omap/include/plat/dma.h
@@ -319,6 +319,8 @@
#define OMAP34XX_DMA_USIM_TX 79 /* S_DMA_78 */
#define OMAP34XX_DMA_USIM_RX 80 /* S_DMA_79 */
+#define OMAP36XX_DMA_UART4_TX 81 /* S_DMA_80 */
+#define OMAP36XX_DMA_UART4_RX 82 /* S_DMA_81 */
/*----------------------------------------------------------------------------*/
#define OMAP1_DMA_TOUT_IRQ (1 << 0)
@@ -335,6 +337,10 @@
#define OMAP2_DMA_MISALIGNED_ERR_IRQ (1 << 11)
#define OMAP_DMA_CCR_EN (1 << 7)
+#define OMAP_DMA_CCR_RD_ACTIVE (1 << 9)
+#define OMAP_DMA_CCR_WR_ACTIVE (1 << 10)
+#define OMAP_DMA_CCR_SEL_SRC_DST_SYNC (1 << 24)
+#define OMAP_DMA_CCR_BUFFERING_DISABLE (1 << 25)
#define OMAP_DMA_DATA_TYPE_S8 0x00
#define OMAP_DMA_DATA_TYPE_S16 0x01
diff --git a/arch/arm/plat-omap/include/plat/dmtimer.h b/arch/arm/plat-omap/include/plat/dmtimer.h
index 20f1054c0a80..dfa3aff9761b 100644
--- a/arch/arm/plat-omap/include/plat/dmtimer.h
+++ b/arch/arm/plat-omap/include/plat/dmtimer.h
@@ -45,6 +45,8 @@
#define OMAP_TIMER_TRIGGER_OVERFLOW_AND_COMPARE 0x02
struct omap_dm_timer;
+extern struct omap_dm_timer *gptimer_wakeup;
+extern struct sys_timer omap_timer;
struct clk;
int omap_dm_timer_init(void);
diff --git a/arch/arm/plat-omap/include/plat/dsp.h b/arch/arm/plat-omap/include/plat/dsp.h
new file mode 100644
index 000000000000..9c604b390f9f
--- /dev/null
+++ b/arch/arm/plat-omap/include/plat/dsp.h
@@ -0,0 +1,31 @@
+#ifndef __OMAP_DSP_H__
+#define __OMAP_DSP_H__
+
+#include <linux/types.h>
+
+struct omap_dsp_platform_data {
+ void (*dsp_set_min_opp) (u8 opp_id);
+ u8 (*dsp_get_opp) (void);
+ void (*cpu_set_freq) (unsigned long f);
+ unsigned long (*cpu_get_freq) (void);
+ unsigned long mpu_speed[6];
+
+ /* functions to write and read PRCM registers */
+ void (*dsp_prm_write)(u32, s16 , u16);
+ u32 (*dsp_prm_read)(s16 , u16);
+ u32 (*dsp_prm_rmw_bits)(u32, u32, s16, s16);
+ void (*dsp_cm_write)(u32, s16 , u16);
+ u32 (*dsp_cm_read)(s16 , u16);
+ u32 (*dsp_cm_rmw_bits)(u32, u32, s16, s16);
+
+ phys_addr_t phys_mempool_base;
+ phys_addr_t phys_mempool_size;
+};
+
+#if defined(CONFIG_TIDSPBRIDGE) || defined(CONFIG_TIDSPBRIDGE_MODULE)
+extern void omap_dsp_reserve_sdram_memblock(void);
+#else
+static inline void omap_dsp_reserve_sdram_memblock(void) { }
+#endif
+
+#endif
diff --git a/arch/arm/plat-omap/include/plat/gpmc-smsc911x.h b/arch/arm/plat-omap/include/plat/gpmc-smsc911x.h
new file mode 100644
index 000000000000..872de0bf1e6b
--- /dev/null
+++ b/arch/arm/plat-omap/include/plat/gpmc-smsc911x.h
@@ -0,0 +1,35 @@
+/*
+ * arch/arm/plat-omap/include/plat/gpmc-smsc911x.h
+ *
+ * Copyright (C) 2009 Li-Pro.Net
+ * Stephan Linz <linz@li-pro.net>
+ *
+ * Modified from arch/arm/plat-omap/include/plat/gpmc-smc91x.h
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __ASM_ARCH_OMAP_GPMC_SMSC911X_H__
+
+struct omap_smsc911x_platform_data {
+ int cs;
+ int gpio_irq;
+ int gpio_reset;
+ u32 flags;
+};
+
+#if defined(CONFIG_SMSC911X) || \
+ defined(CONFIG_SMSC911X_MODULE)
+
+extern void gpmc_smsc911x_init(struct omap_smsc911x_platform_data *d);
+
+#else
+
+static inline void gpmc_smsc911x_init(struct omap_smsc911x_platform_data *d)
+{
+}
+
+#endif
+#endif
diff --git a/arch/arm/plat-omap/include/plat/i2c.h b/arch/arm/plat-omap/include/plat/i2c.h
index 87f6bf2ea4fa..36a0befd6168 100644
--- a/arch/arm/plat-omap/include/plat/i2c.h
+++ b/arch/arm/plat-omap/include/plat/i2c.h
@@ -18,6 +18,8 @@
* 02110-1301 USA
*
*/
+#ifndef __ASM__ARCH_OMAP_I2C_H
+#define __ASM__ARCH_OMAP_I2C_H
#include <linux/i2c.h>
@@ -36,3 +38,5 @@ static inline int omap_register_i2c_bus(int bus_id, u32 clkrate,
void __init omap1_i2c_mux_pins(int bus_id);
void __init omap2_i2c_mux_pins(int bus_id);
+
+#endif /* __ASM__ARCH_OMAP_I2C_H */
diff --git a/arch/arm/plat-omap/include/plat/irqs.h b/arch/arm/plat-omap/include/plat/irqs.h
index c01d9f08a198..65e20a686713 100644
--- a/arch/arm/plat-omap/include/plat/irqs.h
+++ b/arch/arm/plat-omap/include/plat/irqs.h
@@ -345,6 +345,8 @@
#define INT_34XX_MMC3_IRQ 94
#define INT_34XX_GPT12_IRQ 95
+#define INT_36XX_UART4_IRQ 80
+
#define INT_35XX_HECC0_IRQ 24
#define INT_35XX_HECC1_IRQ 28
#define INT_35XX_EMAC_C0_RXTHRESH_IRQ 67
diff --git a/arch/arm/plat-omap/include/plat/mcbsp.h b/arch/arm/plat-omap/include/plat/mcbsp.h
index b4ff6a11a8f2..b87d83ccd545 100644
--- a/arch/arm/plat-omap/include/plat/mcbsp.h
+++ b/arch/arm/plat-omap/include/plat/mcbsp.h
@@ -30,6 +30,13 @@
#include <mach/hardware.h>
#include <plat/clock.h>
+/* macro for building platform_device for McBSP ports */
+#define OMAP_MCBSP_PLATFORM_DEVICE(port_nr) \
+static struct platform_device omap_mcbsp##port_nr = { \
+ .name = "omap-mcbsp-dai", \
+ .id = OMAP_MCBSP##port_nr, \
+}
+
#define OMAP7XX_MCBSP1_BASE 0xfffb1000
#define OMAP7XX_MCBSP2_BASE 0xfffb1800
@@ -312,6 +319,18 @@
#define RFSREN 0x0002
#define RSYNCERREN 0x0001
+/* CLKR signal muxing options */
+#define CLKR_SRC_CLKR 0
+#define CLKR_SRC_CLKX 1
+
+/* FSR signal muxing options */
+#define FSR_SRC_FSR 0
+#define FSR_SRC_FSX 1
+
+/* McBSP functional clock sources */
+#define MCBSP_CLKS_PRCM_SRC 0
+#define MCBSP_CLKS_PAD_SRC 1
+
/* we don't do multichannel for now */
struct omap_mcbsp_reg_cfg {
u16 spcr2;
@@ -398,6 +417,7 @@ struct omap_mcbsp_spi_cfg {
struct omap_mcbsp_ops {
void (*request)(unsigned int);
void (*free)(unsigned int);
+ int (*set_clks_src)(u8, u8);
};
struct omap_mcbsp_platform_data {
@@ -464,6 +484,9 @@ struct omap_mcbsp {
extern struct omap_mcbsp **mcbsp_ptr;
extern int omap_mcbsp_count, omap_mcbsp_cache_size;
+#define omap_mcbsp_check_valid_id(id) (id < omap_mcbsp_count)
+#define id_to_mcbsp_ptr(id) mcbsp_ptr[id];
+
int omap_mcbsp_init(void);
void omap_mcbsp_register_board_cfg(struct omap_mcbsp_platform_data *config,
int size);
@@ -502,6 +525,8 @@ int omap_mcbsp_spi_master_xmit_word_poll(unsigned int id, u32 word);
int omap_mcbsp_spi_master_recv_word_poll(unsigned int id, u32 * word);
+/* McBSP functional clock source changing function */
+extern int omap2_mcbsp_set_clks_src(u8 id, u8 fck_src_id);
/* SPI specific API */
void omap_mcbsp_set_spi_mode(unsigned int id, const struct omap_mcbsp_spi_cfg * spi_cfg);
@@ -510,6 +535,10 @@ int omap_mcbsp_pollread(unsigned int id, u16 * buf);
int omap_mcbsp_pollwrite(unsigned int id, u16 buf);
int omap_mcbsp_set_io_type(unsigned int id, omap_mcbsp_io_type_t io_type);
+/* McBSP signal muxing API */
+void omap2_mcbsp1_mux_clkr_src(u8 mux);
+void omap2_mcbsp1_mux_fsr_src(u8 mux);
+
#ifdef CONFIG_ARCH_OMAP3
/* Sidetone specific API */
int omap_st_set_chgain(unsigned int id, int channel, s16 chgain);
diff --git a/arch/arm/plat-omap/include/plat/mmc.h b/arch/arm/plat-omap/include/plat/mmc.h
index 9b89ec601ee2..f57f36abb07e 100644
--- a/arch/arm/plat-omap/include/plat/mmc.h
+++ b/arch/arm/plat-omap/include/plat/mmc.h
@@ -71,12 +71,17 @@ struct omap_mmc_platform_data {
u64 dma_mask;
+ /* Register offset deviation */
+ u16 reg_offset;
+
struct omap_mmc_slot_data {
- /* 4 wire signaling is optional, and is used for SD/SDIO/HSMMC;
- * 8 wire signaling is also optional, and is used with HSMMC
+ /*
+ * 4/8 wires and any additional host capabilities
+ * need to OR'd all capabilities (ref. linux/mmc/host.h)
*/
- u8 wires;
+ u8 wires; /* Used for the MMC driver on omap1 and 2420 */
+ u32 caps; /* Used for the MMC driver on 2430 and later */
/*
* nomux means "standard" muxing is wrong on this board, and
@@ -104,6 +109,7 @@ struct omap_mmc_platform_data {
/* we can put the features above into this variable */
#define HSMMC_HAS_PBIAS (1 << 0)
+#define HSMMC_HAS_UPDATED_RESET (1 << 1)
unsigned features;
int switch_pin; /* gpio (card detect) */
diff --git a/arch/arm/plat-omap/include/plat/omap-serial.h b/arch/arm/plat-omap/include/plat/omap-serial.h
new file mode 100644
index 000000000000..c8dae02f0704
--- /dev/null
+++ b/arch/arm/plat-omap/include/plat/omap-serial.h
@@ -0,0 +1,128 @@
+/*
+ * Driver for OMAP-UART controller.
+ * Based on drivers/serial/8250.c
+ *
+ * Copyright (C) 2010 Texas Instruments.
+ *
+ * Authors:
+ * Govindraj R <govindraj.raja@ti.com>
+ * Thara Gopinath <thara@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef __OMAP_SERIAL_H__
+#define __OMAP_SERIAL_H__
+
+#include <linux/serial_core.h>
+#include <linux/platform_device.h>
+
+#include <plat/mux.h>
+
+#define DRIVER_NAME "omap-hsuart"
+
+/*
+ * Use tty device name as ttyO, [O -> OMAP]
+ * in bootargs we specify as console=ttyO0 if uart1
+ * is used as console uart.
+ */
+#define OMAP_SERIAL_NAME "ttyO"
+
+#define OMAP_MDR1_DISABLE 0x07
+#define OMAP_MDR1_MODE13X 0x03
+#define OMAP_MDR1_MODE16X 0x00
+#define OMAP_MODE13X_SPEED 230400
+
+/*
+ * LCR = 0XBF: Switch to Configuration Mode B.
+ * In configuration mode b allow access
+ * to EFR,DLL,DLH.
+ * Reference OMAP TRM Chapter 17
+ * Section: 1.4.3 Mode Selection
+ */
+#define OMAP_UART_LCR_CONF_MDB 0XBF
+
+/* WER = 0x7F
+ * Enable module level wakeup in WER reg
+ */
+#define OMAP_UART_WER_MOD_WKUP 0X7F
+
+/* Enable XON/XOFF flow control on output */
+#define OMAP_UART_SW_TX 0x04
+
+/* Enable XON/XOFF flow control on input */
+#define OMAP_UART_SW_RX 0x04
+
+#define OMAP_UART_SYSC_RESET 0X07
+#define OMAP_UART_TCR_TRIG 0X0F
+#define OMAP_UART_SW_CLR 0XF0
+#define OMAP_UART_FIFO_CLR 0X06
+
+#define OMAP_UART_DMA_CH_FREE -1
+
+#define RX_TIMEOUT (3 * HZ)
+#define OMAP_MAX_HSUART_PORTS 4
+
+#define MSR_SAVE_FLAGS UART_MSR_ANY_DELTA
+
+struct omap_uart_port_info {
+ bool dma_enabled; /* To specify DMA Mode */
+ unsigned int uartclk; /* UART clock rate */
+ void __iomem *membase; /* ioremap cookie or NULL */
+ resource_size_t mapbase; /* resource base */
+ unsigned long irqflags; /* request_irq flags */
+ upf_t flags; /* UPF_* flags */
+};
+
+struct uart_omap_dma {
+ u8 uart_dma_tx;
+ u8 uart_dma_rx;
+ int rx_dma_channel;
+ int tx_dma_channel;
+ dma_addr_t rx_buf_dma_phys;
+ dma_addr_t tx_buf_dma_phys;
+ unsigned int uart_base;
+ /*
+ * Buffer for rx dma.It is not required for tx because the buffer
+ * comes from port structure.
+ */
+ unsigned char *rx_buf;
+ unsigned int prev_rx_dma_pos;
+ int tx_buf_size;
+ int tx_dma_used;
+ int rx_dma_used;
+ spinlock_t tx_lock;
+ spinlock_t rx_lock;
+ /* timer to poll activity on rx dma */
+ struct timer_list rx_timer;
+ int rx_buf_size;
+ int rx_timeout;
+};
+
+struct uart_omap_port {
+ struct uart_port port;
+ struct uart_omap_dma uart_dma;
+ struct platform_device *pdev;
+
+ unsigned char ier;
+ unsigned char lcr;
+ unsigned char mcr;
+ unsigned char fcr;
+ unsigned char efr;
+
+ int use_dma;
+ /*
+ * Some bits in registers are cleared on a read, so they must
+ * be saved whenever the register is read but the bits will not
+ * be immediately processed.
+ */
+ unsigned int lsr_break_flag;
+ unsigned char msr_saved_flags;
+ char name[20];
+ unsigned long port_activity;
+};
+
+#endif /* __OMAP_SERIAL_H__ */
diff --git a/arch/arm/plat-omap/include/plat/omap24xx.h b/arch/arm/plat-omap/include/plat/omap24xx.h
index 7055672a8c68..92df9e27cc5c 100644
--- a/arch/arm/plat-omap/include/plat/omap24xx.h
+++ b/arch/arm/plat-omap/include/plat/omap24xx.h
@@ -40,7 +40,7 @@
#define OMAP24XX_IC_BASE (L4_24XX_BASE + 0xfe000)
#define OMAP24XX_IVA_INTC_BASE 0x40000000
-#define OMAP2420_CTRL_BASE L4_24XX_BASE
+#define OMAP242X_CTRL_BASE L4_24XX_BASE
#define OMAP2420_32KSYNCT_BASE (L4_24XX_BASE + 0x4000)
#define OMAP2420_PRCM_BASE (L4_24XX_BASE + 0x8000)
#define OMAP2420_CM_BASE (L4_24XX_BASE + 0x8000)
diff --git a/arch/arm/plat-omap/include/plat/omap4-keypad.h b/arch/arm/plat-omap/include/plat/omap4-keypad.h
new file mode 100644
index 000000000000..2b1d9bc1eebb
--- /dev/null
+++ b/arch/arm/plat-omap/include/plat/omap4-keypad.h
@@ -0,0 +1,14 @@
+#ifndef ARCH_ARM_PLAT_OMAP4_KEYPAD_H
+#define ARCH_ARM_PLAT_OMAP4_KEYPAD_H
+
+#include <linux/input/matrix_keypad.h>
+
+struct omap4_keypad_platform_data {
+ const struct matrix_keymap_data *keymap_data;
+
+ u8 rows;
+ u8 cols;
+};
+
+extern int omap4_keyboard_init(struct omap4_keypad_platform_data *);
+#endif
diff --git a/arch/arm/plat-omap/include/plat/omap_device.h b/arch/arm/plat-omap/include/plat/omap_device.h
index 25cd9ac3b095..28e2d1a78433 100644
--- a/arch/arm/plat-omap/include/plat/omap_device.h
+++ b/arch/arm/plat-omap/include/plat/omap_device.h
@@ -36,6 +36,8 @@
#include <plat/omap_hwmod.h>
+extern struct device omap_device_parent;
+
/* omap_device._state values */
#define OMAP_DEVICE_STATE_UNKNOWN 0
#define OMAP_DEVICE_STATE_ENABLED 1
@@ -62,7 +64,6 @@
*
*/
struct omap_device {
- u32 magic;
struct platform_device pdev;
struct omap_hwmod **hwmods;
struct omap_device_pm_latency *pm_lats;
@@ -82,7 +83,6 @@ int omap_device_shutdown(struct platform_device *pdev);
/* Core code interface */
-bool omap_device_is_valid(struct omap_device *od);
int omap_device_count_resources(struct omap_device *od);
int omap_device_fill_resources(struct omap_device *od, struct resource *res);
diff --git a/arch/arm/plat-omap/include/plat/omap_hwmod.h b/arch/arm/plat-omap/include/plat/omap_hwmod.h
index a4e508dfaba2..7eaa8edf3b14 100644
--- a/arch/arm/plat-omap/include/plat/omap_hwmod.h
+++ b/arch/arm/plat-omap/include/plat/omap_hwmod.h
@@ -14,19 +14,16 @@
*
* These headers and macros are used to define OMAP on-chip module
* data and their integration with other OMAP modules and Linux.
- *
- * References:
- * - OMAP2420 Multimedia Processor Silicon Revision 2.1.1, 2.2 (SWPU064)
- * - OMAP2430 Multimedia Device POP Silicon Revision 2.1 (SWPU090)
- * - OMAP34xx Multimedia Device Silicon Revision 3.1 (SWPU108)
- * - OMAP4430 Multimedia Device Silicon Revision 1.0 (SWPU140)
- * - Open Core Protocol Specification 2.2
+ * Copious documentation and references can also be found in the
+ * omap_hwmod code, in arch/arm/mach-omap2/omap_hwmod.c (as of this
+ * writing).
*
* To do:
* - add interconnect error log structures
* - add pinmuxing
* - init_conn_id_bit (CONNID_BIT_VECTOR)
* - implement default hwmod SMS/SDRC flags?
+ * - remove unused fields
*
*/
#ifndef __ARCH_ARM_PLAT_OMAP_INCLUDE_MACH_OMAP_HWMOD_H
@@ -35,6 +32,7 @@
#include <linux/kernel.h>
#include <linux/list.h>
#include <linux/ioport.h>
+#include <linux/mutex.h>
#include <plat/cpu.h>
struct omap_device;
@@ -96,7 +94,7 @@ struct omap_hwmod_irq_info {
/**
* struct omap_hwmod_dma_info - DMA channels used by the hwmod
* @name: name of the DMA channel (module local name)
- * @dma_ch: DMA channel ID
+ * @dma_req: DMA request ID
*
* @name should be something short, e.g., "tx" or "rx". It is for use
* by platform_get_resource_byname(). It is defined locally to the
@@ -104,7 +102,20 @@ struct omap_hwmod_irq_info {
*/
struct omap_hwmod_dma_info {
const char *name;
- u16 dma_ch;
+ u16 dma_req;
+};
+
+/**
+ * struct omap_hwmod_rst_info - IPs reset lines use by hwmod
+ * @name: name of the reset line (module local name)
+ * @rst_shift: Offset of the reset bit
+ *
+ * @name should be something short, e.g., "cpu0" or "rst". It is defined
+ * locally to the hwmod.
+ */
+struct omap_hwmod_rst_info {
+ const char *name;
+ u8 rst_shift;
};
/**
@@ -237,8 +248,9 @@ struct omap_hwmod_ocp_if {
#define SYSC_HAS_CLOCKACTIVITY (1 << 4)
#define SYSC_HAS_SIDLEMODE (1 << 5)
#define SYSC_HAS_MIDLEMODE (1 << 6)
-#define SYSS_MISSING (1 << 7)
+#define SYSS_HAS_RESET_STATUS (1 << 7)
#define SYSC_NO_CACHE (1 << 8) /* XXX SW flag, belongs elsewhere */
+#define SYSC_HAS_RESET_STATUS (1 << 9)
/* omap_hwmod_sysconfig.clockact flags */
#define CLOCKACT_TEST_BOTH 0x0
@@ -327,10 +339,12 @@ struct omap_hwmod_omap2_prcm {
/**
* struct omap_hwmod_omap4_prcm - OMAP4-specific PRCM data
* @clkctrl_reg: PRCM address of the clock control register
+ * @rstctrl_reg: adress of the XXX_RSTCTRL register located in the PRM
* @submodule_wkdep_bit: bit shift of the WKDEP range
*/
struct omap_hwmod_omap4_prcm {
void __iomem *clkctrl_reg;
+ void __iomem *rstctrl_reg;
u8 submodule_wkdep_bit;
};
@@ -352,6 +366,11 @@ struct omap_hwmod_omap4_prcm {
* HWMOD_SET_DEFAULT_CLOCKACT: program CLOCKACTIVITY bits at startup
* HWMOD_NO_IDLEST : this module does not have idle status - this is the case
* only for few initiator modules on OMAP2 & 3.
+ * HWMOD_CONTROL_OPT_CLKS_IN_RESET: Enable all optional clocks during reset.
+ * This is needed for devices like DSS that require optional clocks enabled
+ * in order to complete the reset. Optional clocks will be disabled
+ * again after the reset.
+ * HWMOD_16BIT_REG: Module has 16bit registers
*/
#define HWMOD_SWSUP_SIDLE (1 << 0)
#define HWMOD_SWSUP_MSTANDBY (1 << 1)
@@ -360,6 +379,8 @@ struct omap_hwmod_omap4_prcm {
#define HWMOD_NO_OCP_AUTOIDLE (1 << 4)
#define HWMOD_SET_DEFAULT_CLOCKACT (1 << 5)
#define HWMOD_NO_IDLEST (1 << 6)
+#define HWMOD_CONTROL_OPT_CLKS_IN_RESET (1 << 7)
+#define HWMOD_16BIT_REG (1 << 8)
/*
* omap_hwmod._int_flags definitions
@@ -410,7 +431,7 @@ struct omap_hwmod_class {
* @class: struct omap_hwmod_class * to the class of this hwmod
* @od: struct omap_device currently associated with this hwmod (internal use)
* @mpu_irqs: ptr to an array of MPU IRQs (see also mpu_irqs_cnt)
- * @sdma_chs: ptr to an array of SDMA channel IDs (see also sdma_chs_cnt)
+ * @sdma_reqs: ptr to an array of System DMA request IDs (see sdma_reqs_cnt)
* @prcm: PRCM data pertaining to this hwmod
* @main_clk: main clock: OMAP clock name
* @_clk: pointer to the main struct clk (filled in at runtime)
@@ -424,7 +445,7 @@ struct omap_hwmod_class {
* @msuspendmux_reg_id: CONTROL_MSUSPENDMUX register ID (1-6)
* @msuspendmux_shift: CONTROL_MSUSPENDMUX register bit shift
* @mpu_irqs_cnt: number of @mpu_irqs
- * @sdma_chs_cnt: number of @sdma_chs
+ * @sdma_reqs_cnt: number of @sdma_reqs
* @opt_clks_cnt: number of @opt_clks
* @master_cnt: number of @master entries
* @slaves_cnt: number of @slave entries
@@ -433,6 +454,7 @@ struct omap_hwmod_class {
* @_state: internal-use hwmod state
* @flags: hwmod flags (documented below)
* @omap_chip: OMAP chips this hwmod is present on
+ * @_mutex: mutex serializing operations on this hwmod
* @node: list node for hwmod list (internal use)
*
* @main_clk refers to this module's "main clock," which for our
@@ -448,7 +470,8 @@ struct omap_hwmod {
struct omap_hwmod_class *class;
struct omap_device *od;
struct omap_hwmod_irq_info *mpu_irqs;
- struct omap_hwmod_dma_info *sdma_chs;
+ struct omap_hwmod_dma_info *sdma_reqs;
+ struct omap_hwmod_rst_info *rst_lines;
union {
struct omap_hwmod_omap2_prcm omap2;
struct omap_hwmod_omap4_prcm omap4;
@@ -461,6 +484,7 @@ struct omap_hwmod {
void *dev_attr;
u32 _sysc_cache;
void __iomem *_mpu_rt_va;
+ struct mutex _mutex;
struct list_head node;
u16 flags;
u8 _mpu_port_index;
@@ -468,7 +492,8 @@ struct omap_hwmod {
u8 msuspendmux_shift;
u8 response_lat;
u8 mpu_irqs_cnt;
- u8 sdma_chs_cnt;
+ u8 sdma_reqs_cnt;
+ u8 rst_lines_cnt;
u8 opt_clks_cnt;
u8 masters_cnt;
u8 slaves_cnt;
@@ -492,6 +517,10 @@ int omap_hwmod_idle(struct omap_hwmod *oh);
int _omap_hwmod_idle(struct omap_hwmod *oh);
int omap_hwmod_shutdown(struct omap_hwmod *oh);
+int omap_hwmod_assert_hardreset(struct omap_hwmod *oh, const char *name);
+int omap_hwmod_deassert_hardreset(struct omap_hwmod *oh, const char *name);
+int omap_hwmod_read_hardreset(struct omap_hwmod *oh, const char *name);
+
int omap_hwmod_enable_clocks(struct omap_hwmod *oh);
int omap_hwmod_disable_clocks(struct omap_hwmod *oh);
@@ -500,8 +529,8 @@ int omap_hwmod_set_slave_idlemode(struct omap_hwmod *oh, u8 idlemode);
int omap_hwmod_reset(struct omap_hwmod *oh);
void omap_hwmod_ocp_barrier(struct omap_hwmod *oh);
-void omap_hwmod_writel(u32 v, struct omap_hwmod *oh, u16 reg_offs);
-u32 omap_hwmod_readl(struct omap_hwmod *oh, u16 reg_offs);
+void omap_hwmod_write(u32 v, struct omap_hwmod *oh, u16 reg_offs);
+u32 omap_hwmod_read(struct omap_hwmod *oh, u16 reg_offs);
int omap_hwmod_count_resources(struct omap_hwmod *oh);
int omap_hwmod_fill_resources(struct omap_hwmod *oh, struct resource *res);
@@ -534,5 +563,6 @@ int omap_hwmod_for_each_by_class(const char *classname,
extern int omap2420_hwmod_init(void);
extern int omap2430_hwmod_init(void);
extern int omap3xxx_hwmod_init(void);
+extern int omap44xx_hwmod_init(void);
#endif
diff --git a/arch/arm/plat-omap/include/plat/powerdomain.h b/arch/arm/plat-omap/include/plat/powerdomain.h
index fb6ec74fe39e..9ca420dcd2f8 100644
--- a/arch/arm/plat-omap/include/plat/powerdomain.h
+++ b/arch/arm/plat-omap/include/plat/powerdomain.h
@@ -32,6 +32,7 @@
/* Powerdomain allowable state bitfields */
#define PWRSTS_ON (1 << PWRDM_POWER_ON)
+#define PWRSTS_OFF (1 << PWRDM_POWER_OFF)
#define PWRSTS_OFF_ON ((1 << PWRDM_POWER_OFF) | \
(1 << PWRDM_POWER_ON))
@@ -161,5 +162,6 @@ int pwrdm_state_switch(struct powerdomain *pwrdm);
int pwrdm_clkdm_state_switch(struct clockdomain *clkdm);
int pwrdm_pre_transition(void);
int pwrdm_post_transition(void);
+int pwrdm_set_lowpwrstchange(struct powerdomain *pwrdm);
#endif
diff --git a/arch/arm/plat-omap/include/plat/prcm.h b/arch/arm/plat-omap/include/plat/prcm.h
index 9fbd91419cd1..ab77442e42ab 100644
--- a/arch/arm/plat-omap/include/plat/prcm.h
+++ b/arch/arm/plat-omap/include/plat/prcm.h
@@ -38,6 +38,8 @@ u32 prm_read_mod_reg(s16 module, u16 idx);
void prm_write_mod_reg(u32 val, s16 module, u16 idx);
u32 prm_rmw_mod_reg_bits(u32 mask, u32 bits, s16 module, s16 idx);
u32 prm_read_mod_bits_shift(s16 domain, s16 idx, u32 mask);
+u32 omap4_prm_read_bits_shift(void __iomem *reg, u32 mask);
+u32 omap4_prm_rmw_reg_bits(u32 mask, u32 bits, void __iomem *reg);
u32 cm_read_mod_reg(s16 module, u16 idx);
void cm_write_mod_reg(u32 val, s16 module, u16 idx);
u32 cm_rmw_mod_reg_bits(u32 mask, u32 bits, s16 module, s16 idx);
diff --git a/arch/arm/plat-omap/include/plat/sdrc.h b/arch/arm/plat-omap/include/plat/sdrc.h
index 7b76f50564ba..efd87c8dda69 100644
--- a/arch/arm/plat-omap/include/plat/sdrc.h
+++ b/arch/arm/plat-omap/include/plat/sdrc.h
@@ -147,6 +147,7 @@ struct memory_timings {
};
extern void omap2xxx_sdrc_init_params(u32 force_lock_to_unlock_mode);
+struct omap_sdrc_params *rx51_get_sdram_timings(void);
u32 omap2xxx_sdrc_dll_is_unlocked(void);
u32 omap2xxx_sdrc_reprogram(u32 level, u32 force);
diff --git a/arch/arm/plat-omap/include/plat/sram.h b/arch/arm/plat-omap/include/plat/sram.h
index 16a1b458d53c..5905100b29a1 100644
--- a/arch/arm/plat-omap/include/plat/sram.h
+++ b/arch/arm/plat-omap/include/plat/sram.h
@@ -11,7 +11,6 @@
#ifndef __ARCH_ARM_OMAP_SRAM_H
#define __ARCH_ARM_OMAP_SRAM_H
-extern int __init omap_sram_init(void);
extern void * omap_sram_push(void * start, unsigned long size);
extern void omap_sram_reprogram_clock(u32 dpllctl, u32 ckctl);
diff --git a/arch/arm/plat-omap/include/plat/uncompress.h b/arch/arm/plat-omap/include/plat/uncompress.h
index ddf723be48dc..9036e374e0ac 100644
--- a/arch/arm/plat-omap/include/plat/uncompress.h
+++ b/arch/arm/plat-omap/include/plat/uncompress.h
@@ -139,10 +139,14 @@ static inline void __arch_decomp_setup(unsigned long arch_id)
DEBUG_LL_OMAP2(1, omap3evm);
DEBUG_LL_OMAP3(1, omap_3430sdp);
DEBUG_LL_OMAP3(1, omap_3630sdp);
+ DEBUG_LL_OMAP3(1, omap3530_lv_som);
+ DEBUG_LL_OMAP3(1, omap3_torpedo);
/* omap3 based boards using UART3 */
DEBUG_LL_OMAP3(3, cm_t35);
+ DEBUG_LL_OMAP3(3, cm_t3517);
DEBUG_LL_OMAP3(3, igep0020);
+ DEBUG_LL_OMAP3(3, igep0030);
DEBUG_LL_OMAP3(3, nokia_rx51);
DEBUG_LL_OMAP3(3, omap3517evm);
DEBUG_LL_OMAP3(3, omap3_beagle);
@@ -153,6 +157,7 @@ static inline void __arch_decomp_setup(unsigned long arch_id)
/* omap4 based boards using UART3 */
DEBUG_LL_OMAP4(3, omap_4430sdp);
+ DEBUG_LL_OMAP4(3, omap4_panda);
/* zoom2/3 external uart */
DEBUG_LL_ZOOM(omap_zoom2);
diff --git a/arch/arm/plat-omap/include/plat/usb.h b/arch/arm/plat-omap/include/plat/usb.h
index 9feddacfe850..59c7fe731f28 100644
--- a/arch/arm/plat-omap/include/plat/usb.h
+++ b/arch/arm/plat-omap/include/plat/usb.h
@@ -105,7 +105,7 @@ static inline void omap1_usb_init(struct omap_usb_config *pdata)
#if defined(CONFIG_ARCH_OMAP_OTG) || defined(CONFIG_ARCH_OMAP_OTG_MODULE)
void omap2_usbfs_init(struct omap_usb_config *pdata);
#else
-static inline omap2_usbfs_init(struct omap_usb_config *pdata)
+static inline void omap2_usbfs_init(struct omap_usb_config *pdata)
{
}
#endif
diff --git a/arch/arm/plat-omap/include/plat/vrfb.h b/arch/arm/plat-omap/include/plat/vrfb.h
index d8a03ced3b10..3792bdea2f6d 100644
--- a/arch/arm/plat-omap/include/plat/vrfb.h
+++ b/arch/arm/plat-omap/include/plat/vrfb.h
@@ -35,6 +35,7 @@ struct vrfb {
bool yuv_mode;
};
+#ifdef CONFIG_OMAP2_VRFB
extern int omap_vrfb_request_ctx(struct vrfb *vrfb);
extern void omap_vrfb_release_ctx(struct vrfb *vrfb);
extern void omap_vrfb_adjust_size(u16 *width, u16 *height,
@@ -47,4 +48,19 @@ extern void omap_vrfb_setup(struct vrfb *vrfb, unsigned long paddr,
extern int omap_vrfb_map_angle(struct vrfb *vrfb, u16 height, u8 rot);
extern void omap_vrfb_restore_context(void);
+#else
+static inline int omap_vrfb_request_ctx(struct vrfb *vrfb) { return 0; }
+static inline void omap_vrfb_release_ctx(struct vrfb *vrfb) {}
+static inline void omap_vrfb_adjust_size(u16 *width, u16 *height,
+ u8 bytespp) {}
+static inline u32 omap_vrfb_min_phys_size(u16 width, u16 height, u8 bytespp)
+ { return 0; }
+static inline u16 omap_vrfb_max_height(u32 phys_size, u16 width, u8 bytespp)
+ { return 0; }
+static inline void omap_vrfb_setup(struct vrfb *vrfb, unsigned long paddr,
+ u16 width, u16 height, unsigned bytespp, bool yuv_mode) {}
+static inline int omap_vrfb_map_angle(struct vrfb *vrfb, u16 height, u8 rot)
+ { return 0; }
+static inline void omap_vrfb_restore_context(void) {}
+#endif
#endif /* __VRFB_H */
diff --git a/arch/arm/plat-omap/mcbsp.c b/arch/arm/plat-omap/mcbsp.c
index 0c8612fd8312..eac4b978e9fd 100644
--- a/arch/arm/plat-omap/mcbsp.c
+++ b/arch/arm/plat-omap/mcbsp.c
@@ -33,7 +33,7 @@
struct omap_mcbsp **mcbsp_ptr;
int omap_mcbsp_count, omap_mcbsp_cache_size;
-void omap_mcbsp_write(struct omap_mcbsp *mcbsp, u16 reg, u32 val)
+static void omap_mcbsp_write(struct omap_mcbsp *mcbsp, u16 reg, u32 val)
{
if (cpu_class_is_omap1()) {
((u16 *)mcbsp->reg_cache)[reg / sizeof(u16)] = (u16)val;
@@ -47,7 +47,7 @@ void omap_mcbsp_write(struct omap_mcbsp *mcbsp, u16 reg, u32 val)
}
}
-int omap_mcbsp_read(struct omap_mcbsp *mcbsp, u16 reg, bool from_cache)
+static int omap_mcbsp_read(struct omap_mcbsp *mcbsp, u16 reg, bool from_cache)
{
if (cpu_class_is_omap1()) {
return !from_cache ? __raw_readw(mcbsp->io_base + reg) :
@@ -62,12 +62,12 @@ int omap_mcbsp_read(struct omap_mcbsp *mcbsp, u16 reg, bool from_cache)
}
#ifdef CONFIG_ARCH_OMAP3
-void omap_mcbsp_st_write(struct omap_mcbsp *mcbsp, u16 reg, u32 val)
+static void omap_mcbsp_st_write(struct omap_mcbsp *mcbsp, u16 reg, u32 val)
{
__raw_writel(val, mcbsp->st_data->io_base_st + reg);
}
-int omap_mcbsp_st_read(struct omap_mcbsp *mcbsp, u16 reg)
+static int omap_mcbsp_st_read(struct omap_mcbsp *mcbsp, u16 reg)
{
return __raw_readl(mcbsp->st_data->io_base_st + reg);
}
@@ -80,9 +80,6 @@ int omap_mcbsp_st_read(struct omap_mcbsp *mcbsp, u16 reg)
#define MCBSP_READ_CACHE(mcbsp, reg) \
omap_mcbsp_read(mcbsp, OMAP_MCBSP_REG_##reg, 1)
-#define omap_mcbsp_check_valid_id(id) (id < omap_mcbsp_count)
-#define id_to_mcbsp_ptr(id) mcbsp_ptr[id];
-
#define MCBSP_ST_READ(mcbsp, reg) \
omap_mcbsp_st_read(mcbsp, OMAP_ST_REG_##reg)
#define MCBSP_ST_WRITE(mcbsp, reg, val) \
@@ -878,7 +875,7 @@ EXPORT_SYMBOL(omap_mcbsp_free);
void omap_mcbsp_start(unsigned int id, int tx, int rx)
{
struct omap_mcbsp *mcbsp;
- int idle;
+ int enable_srg = 0;
u16 w;
if (!omap_mcbsp_check_valid_id(id)) {
@@ -893,10 +890,13 @@ void omap_mcbsp_start(unsigned int id, int tx, int rx)
mcbsp->rx_word_length = (MCBSP_READ_CACHE(mcbsp, RCR1) >> 5) & 0x7;
mcbsp->tx_word_length = (MCBSP_READ_CACHE(mcbsp, XCR1) >> 5) & 0x7;
- idle = !((MCBSP_READ_CACHE(mcbsp, SPCR2) |
- MCBSP_READ_CACHE(mcbsp, SPCR1)) & 1);
+ /* Only enable SRG, if McBSP is master */
+ w = MCBSP_READ_CACHE(mcbsp, PCR0);
+ if (w & (FSXM | FSRM | CLKXM | CLKRM))
+ enable_srg = !((MCBSP_READ_CACHE(mcbsp, SPCR2) |
+ MCBSP_READ_CACHE(mcbsp, SPCR1)) & 1);
- if (idle) {
+ if (enable_srg) {
/* Start the sample generator */
w = MCBSP_READ_CACHE(mcbsp, SPCR2);
MCBSP_WRITE(mcbsp, SPCR2, w | (1 << 6));
@@ -919,7 +919,7 @@ void omap_mcbsp_start(unsigned int id, int tx, int rx)
*/
udelay(500);
- if (idle) {
+ if (enable_srg) {
/* Start frame sync */
w = MCBSP_READ_CACHE(mcbsp, SPCR2);
MCBSP_WRITE(mcbsp, SPCR2, w | (1 << 7));
@@ -1645,7 +1645,7 @@ static const struct attribute_group sidetone_attr_group = {
.attrs = (struct attribute **)sidetone_attrs,
};
-int __devinit omap_st_add(struct omap_mcbsp *mcbsp)
+static int __devinit omap_st_add(struct omap_mcbsp *mcbsp)
{
struct omap_mcbsp_platform_data *pdata = mcbsp->pdata;
struct omap_mcbsp_st_data *st_data;
diff --git a/arch/arm/plat-omap/omap_device.c b/arch/arm/plat-omap/omap_device.c
index d2b160942ccc..abe933cd8f09 100644
--- a/arch/arm/plat-omap/omap_device.c
+++ b/arch/arm/plat-omap/omap_device.c
@@ -82,6 +82,7 @@
#include <linux/slab.h>
#include <linux/err.h>
#include <linux/io.h>
+#include <linux/clk.h>
#include <plat/omap_device.h>
#include <plat/omap_hwmod.h>
@@ -90,12 +91,6 @@
#define USE_WAKEUP_LAT 0
#define IGNORE_WAKEUP_LAT 1
-/*
- * OMAP_DEVICE_MAGIC: used to determine whether a struct omap_device
- * obtained via container_of() is in fact a struct omap_device
- */
-#define OMAP_DEVICE_MAGIC 0xf00dcafe
-
/* Private functions */
/**
@@ -243,6 +238,44 @@ static inline struct omap_device *_find_by_pdev(struct platform_device *pdev)
return container_of(pdev, struct omap_device, pdev);
}
+/**
+ * _add_optional_clock_alias - Add clock alias for hwmod optional clocks
+ * @od: struct omap_device *od
+ *
+ * For every optional clock present per hwmod per omap_device, this function
+ * adds an entry in the clocks list of the form <dev-id=dev_name, con-id=role>
+ * if an entry is already present in it with the form <dev-id=NULL, con-id=role>
+ *
+ * The function is called from inside omap_device_build_ss(), after
+ * omap_device_register.
+ *
+ * This allows drivers to get a pointer to its optional clocks based on its role
+ * by calling clk_get(<dev*>, <role>).
+ *
+ * No return value.
+ */
+static void _add_optional_clock_alias(struct omap_device *od,
+ struct omap_hwmod *oh)
+{
+ int i;
+
+ for (i = 0; i < oh->opt_clks_cnt; i++) {
+ struct omap_hwmod_opt_clk *oc;
+ int r;
+
+ oc = &oh->opt_clks[i];
+
+ if (!oc->_clk)
+ continue;
+
+ r = clk_add_alias(oc->role, dev_name(&od->pdev.dev),
+ (char *)oc->clk, &od->pdev.dev);
+ if (r)
+ pr_err("omap_device: %s: clk_add_alias for %s failed\n",
+ dev_name(&od->pdev.dev), oc->role);
+ }
+}
+
/* Public functions for use by core code */
@@ -257,12 +290,11 @@ static inline struct omap_device *_find_by_pdev(struct platform_device *pdev)
*/
int omap_device_count_resources(struct omap_device *od)
{
- struct omap_hwmod *oh;
int c = 0;
int i;
- for (i = 0, oh = *od->hwmods; i < od->hwmods_cnt; i++, oh++)
- c += omap_hwmod_count_resources(oh);
+ for (i = 0; i < od->hwmods_cnt; i++)
+ c += omap_hwmod_count_resources(od->hwmods[i]);
pr_debug("omap_device: %s: counted %d total resources across %d "
"hwmods\n", od->pdev.name, c, od->hwmods_cnt);
@@ -289,12 +321,11 @@ int omap_device_count_resources(struct omap_device *od)
*/
int omap_device_fill_resources(struct omap_device *od, struct resource *res)
{
- struct omap_hwmod *oh;
int c = 0;
int i, r;
- for (i = 0, oh = *od->hwmods; i < od->hwmods_cnt; i++, oh++) {
- r = omap_hwmod_fill_resources(oh, res);
+ for (i = 0; i < od->hwmods_cnt; i++) {
+ r = omap_hwmod_fill_resources(od->hwmods[i], res);
res += r;
c += r;
}
@@ -414,15 +445,15 @@ struct omap_device *omap_device_build_ss(const char *pdev_name, int pdev_id,
od->pm_lats = pm_lats;
od->pm_lats_cnt = pm_lats_cnt;
- od->magic = OMAP_DEVICE_MAGIC;
-
if (is_early_device)
ret = omap_early_device_register(od);
else
ret = omap_device_register(od);
- for (i = 0; i < oh_cnt; i++)
+ for (i = 0; i < oh_cnt; i++) {
hwmods[i]->od = od;
+ _add_optional_clock_alias(od, hwmods[i]);
+ }
if (ret)
goto odbs_exit4;
@@ -473,6 +504,7 @@ int omap_device_register(struct omap_device *od)
{
pr_debug("omap_device: %s: registering\n", od->pdev.name);
+ od->pdev.dev.parent = &omap_device_parent;
return platform_device_register(&od->pdev);
}
@@ -566,7 +598,6 @@ int omap_device_shutdown(struct platform_device *pdev)
{
int ret, i;
struct omap_device *od;
- struct omap_hwmod *oh;
od = _find_by_pdev(pdev);
@@ -579,8 +610,8 @@ int omap_device_shutdown(struct platform_device *pdev)
ret = _omap_device_deactivate(od, IGNORE_WAKEUP_LAT);
- for (i = 0, oh = *od->hwmods; i < od->hwmods_cnt; i++, oh++)
- omap_hwmod_shutdown(oh);
+ for (i = 0; i < od->hwmods_cnt; i++)
+ omap_hwmod_shutdown(od->hwmods[i]);
od->_state = OMAP_DEVICE_STATE_SHUTDOWN;
@@ -627,18 +658,6 @@ int omap_device_align_pm_lat(struct platform_device *pdev,
}
/**
- * omap_device_is_valid - Check if pointer is a valid omap_device
- * @od: struct omap_device *
- *
- * Return whether struct omap_device pointer @od points to a valid
- * omap_device.
- */
-bool omap_device_is_valid(struct omap_device *od)
-{
- return (od && od->magic == OMAP_DEVICE_MAGIC);
-}
-
-/**
* omap_device_get_pwrdm - return the powerdomain * associated with @od
* @od: struct omap_device *
*
@@ -692,11 +711,10 @@ void __iomem *omap_device_get_rt_va(struct omap_device *od)
*/
int omap_device_enable_hwmods(struct omap_device *od)
{
- struct omap_hwmod *oh;
int i;
- for (i = 0, oh = *od->hwmods; i < od->hwmods_cnt; i++, oh++)
- omap_hwmod_enable(oh);
+ for (i = 0; i < od->hwmods_cnt; i++)
+ omap_hwmod_enable(od->hwmods[i]);
/* XXX pass along return value here? */
return 0;
@@ -710,11 +728,10 @@ int omap_device_enable_hwmods(struct omap_device *od)
*/
int omap_device_idle_hwmods(struct omap_device *od)
{
- struct omap_hwmod *oh;
int i;
- for (i = 0, oh = *od->hwmods; i < od->hwmods_cnt; i++, oh++)
- omap_hwmod_idle(oh);
+ for (i = 0; i < od->hwmods_cnt; i++)
+ omap_hwmod_idle(od->hwmods[i]);
/* XXX pass along return value here? */
return 0;
@@ -729,11 +746,10 @@ int omap_device_idle_hwmods(struct omap_device *od)
*/
int omap_device_disable_clocks(struct omap_device *od)
{
- struct omap_hwmod *oh;
int i;
- for (i = 0, oh = *od->hwmods; i < od->hwmods_cnt; i++, oh++)
- omap_hwmod_disable_clocks(oh);
+ for (i = 0; i < od->hwmods_cnt; i++)
+ omap_hwmod_disable_clocks(od->hwmods[i]);
/* XXX pass along return value here? */
return 0;
@@ -748,12 +764,22 @@ int omap_device_disable_clocks(struct omap_device *od)
*/
int omap_device_enable_clocks(struct omap_device *od)
{
- struct omap_hwmod *oh;
int i;
- for (i = 0, oh = *od->hwmods; i < od->hwmods_cnt; i++, oh++)
- omap_hwmod_enable_clocks(oh);
+ for (i = 0; i < od->hwmods_cnt; i++)
+ omap_hwmod_enable_clocks(od->hwmods[i]);
/* XXX pass along return value here? */
return 0;
}
+
+struct device omap_device_parent = {
+ .init_name = "omap",
+ .parent = &platform_bus,
+};
+
+static int __init omap_device_init(void)
+{
+ return device_register(&omap_device_parent);
+}
+core_initcall(omap_device_init);
diff --git a/arch/arm/plat-omap/sram.c b/arch/arm/plat-omap/sram.c
index 10b3b4c63372..e2c8eebe6b3a 100644
--- a/arch/arm/plat-omap/sram.c
+++ b/arch/arm/plat-omap/sram.c
@@ -19,6 +19,7 @@
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/io.h>
+#include <linux/omapfb.h>
#include <asm/tlb.h>
#include <asm/cacheflush.h>
@@ -30,8 +31,8 @@
#include <plat/cpu.h>
#include <plat/vram.h>
-#include <plat/control.h>
-
+#include "sram.h"
+#include "fb.h"
#if defined(CONFIG_ARCH_OMAP2) || defined(CONFIG_ARCH_OMAP3)
# include "../mach-omap2/prm.h"
# include "../mach-omap2/cm.h"
@@ -53,7 +54,7 @@
#define OMAP4_SRAM_PUB_PA (OMAP4_SRAM_PA + 0x4000)
#define OMAP4_SRAM_PUB_VA (OMAP4_SRAM_VA + 0x4000)
-#if defined(CONFIG_ARCH_OMAP2) || defined(CONFIG_ARCH_OMAP3)
+#if defined(CONFIG_ARCH_OMAP2PLUS)
#define SRAM_BOOTLOADER_SZ 0x00
#else
#define SRAM_BOOTLOADER_SZ 0x80
@@ -68,7 +69,6 @@
#define OMAP34XX_VA_WRITEPERM0 OMAP2_L3_IO_ADDRESS(0x68012858)
#define OMAP34XX_VA_ADDR_MATCH2 OMAP2_L3_IO_ADDRESS(0x68012880)
#define OMAP34XX_VA_SMS_RG_ATT0 OMAP2_L3_IO_ADDRESS(0x6C000048)
-#define OMAP34XX_VA_CONTROL_STAT OMAP2_L4_IO_ADDRESS(0x480022F0)
#define GP_DEVICE 0x300
@@ -79,12 +79,6 @@ static unsigned long omap_sram_base;
static unsigned long omap_sram_size;
static unsigned long omap_sram_ceil;
-extern unsigned long omapfb_reserve_sram(unsigned long sram_pstart,
- unsigned long sram_vstart,
- unsigned long sram_size,
- unsigned long pstart_avail,
- unsigned long size_avail);
-
/*
* Depending on the target RAMFS firewall setup, the public usable amount of
* SRAM varies. The default accessible size for all device types is 2k. A GP
@@ -93,16 +87,7 @@ extern unsigned long omapfb_reserve_sram(unsigned long sram_pstart,
*/
static int is_sram_locked(void)
{
- int type = 0;
-
- if (cpu_is_omap44xx())
- /* Not yet supported */
- return 0;
-
- if (cpu_is_omap242x())
- type = omap_rev() & OMAP2_DEVICETYPE_MASK;
-
- if (type == GP_DEVICE) {
+ if (OMAP2_DEVICE_TYPE_GP == omap_type()) {
/* RAMFW: R/W access to all initiators for all qualifier sets */
if (cpu_is_omap242x()) {
__raw_writel(0xFF, OMAP24XX_VA_REQINFOPERM0); /* all q-vects */
@@ -127,7 +112,7 @@ static int is_sram_locked(void)
* to secure SRAM will hang the system. Also the SRAM is not
* yet mapped at this point.
*/
-void __init omap_detect_sram(void)
+static void __init omap_detect_sram(void)
{
unsigned long reserved;
@@ -213,7 +198,7 @@ static struct map_desc omap_sram_io_desc[] __initdata = {
/*
* Note that we cannot use ioremap for SRAM, as clock init needs SRAM early.
*/
-void __init omap_map_sram(void)
+static void __init omap_map_sram(void)
{
unsigned long base;
@@ -330,7 +315,7 @@ u32 omap2_set_prcm(u32 dpll_ctrl_val, u32 sdrc_rfr_val, int bypass)
#endif
#ifdef CONFIG_ARCH_OMAP2420
-int __init omap242x_sram_init(void)
+static int __init omap242x_sram_init(void)
{
_omap2_sram_ddr_init = omap_sram_push(omap242x_sram_ddr_init,
omap242x_sram_ddr_init_sz);
@@ -351,7 +336,7 @@ static inline int omap242x_sram_init(void)
#endif
#ifdef CONFIG_ARCH_OMAP2430
-int __init omap243x_sram_init(void)
+static int __init omap243x_sram_init(void)
{
_omap2_sram_ddr_init = omap_sram_push(omap243x_sram_ddr_init,
omap243x_sram_ddr_init_sz);
@@ -407,7 +392,7 @@ void omap3_sram_restore_context(void)
}
#endif /* CONFIG_PM */
-int __init omap34xx_sram_init(void)
+static int __init omap34xx_sram_init(void)
{
_omap3_sram_configure_core_dpll =
omap_sram_push(omap3_sram_configure_core_dpll,
@@ -423,7 +408,7 @@ static inline int omap34xx_sram_init(void)
#endif
#ifdef CONFIG_ARCH_OMAP4
-int __init omap44xx_sram_init(void)
+static int __init omap44xx_sram_init(void)
{
printk(KERN_ERR "FIXME: %s not implemented\n", __func__);
diff --git a/arch/arm/plat-omap/sram.h b/arch/arm/plat-omap/sram.h
new file mode 100644
index 000000000000..29b43ef97f20
--- /dev/null
+++ b/arch/arm/plat-omap/sram.h
@@ -0,0 +1,6 @@
+#ifndef __PLAT_OMAP_SRAM_H__
+#define __PLAT_OMAP_SRAM_H__
+
+extern int __init omap_sram_init(void);
+
+#endif /* __PLAT_OMAP_SRAM_H__ */
diff --git a/arch/arm/plat-orion/include/plat/pcie.h b/arch/arm/plat-orion/include/plat/pcie.h
index 3ebfef72b4e7..cc99163e73fd 100644
--- a/arch/arm/plat-orion/include/plat/pcie.h
+++ b/arch/arm/plat-orion/include/plat/pcie.h
@@ -11,12 +11,15 @@
#ifndef __PLAT_PCIE_H
#define __PLAT_PCIE_H
+struct pci_bus;
+
u32 orion_pcie_dev_id(void __iomem *base);
u32 orion_pcie_rev(void __iomem *base);
int orion_pcie_link_up(void __iomem *base);
int orion_pcie_x4_mode(void __iomem *base);
int orion_pcie_get_local_bus_nr(void __iomem *base);
void orion_pcie_set_local_bus_nr(void __iomem *base, int nr);
+void orion_pcie_reset(void __iomem *base);
void orion_pcie_setup(void __iomem *base,
struct mbus_dram_target_info *dram);
int orion_pcie_rd_conf(void __iomem *base, struct pci_bus *bus,
diff --git a/arch/arm/plat-orion/pcie.c b/arch/arm/plat-orion/pcie.c
index 779553a1595e..af2d733c50b5 100644
--- a/arch/arm/plat-orion/pcie.c
+++ b/arch/arm/plat-orion/pcie.c
@@ -182,11 +182,6 @@ void __init orion_pcie_setup(void __iomem *base,
u32 mask;
/*
- * soft reset PCIe unit
- */
- orion_pcie_reset(base);
-
- /*
* Point PCIe unit MBUS decode windows to DRAM space.
*/
orion_pcie_setup_wins(base, dram);
diff --git a/arch/arm/plat-pxa/include/plat/pxa3xx_nand.h b/arch/arm/plat-pxa/include/plat/pxa3xx_nand.h
index 3478eae32d8a..01a8448e471c 100644
--- a/arch/arm/plat-pxa/include/plat/pxa3xx_nand.h
+++ b/arch/arm/plat-pxa/include/plat/pxa3xx_nand.h
@@ -30,15 +30,15 @@ struct pxa3xx_nand_cmdset {
};
struct pxa3xx_nand_flash {
- const struct pxa3xx_nand_timing *timing; /* NAND Flash timing */
- const struct pxa3xx_nand_cmdset *cmdset;
-
- uint32_t page_per_block;/* Pages per block (PG_PER_BLK) */
- uint32_t page_size; /* Page size in bytes (PAGE_SZ) */
- uint32_t flash_width; /* Width of Flash memory (DWIDTH_M) */
- uint32_t dfc_width; /* Width of flash controller(DWIDTH_C) */
- uint32_t num_blocks; /* Number of physical blocks in Flash */
- uint32_t chip_id;
+ uint32_t chip_id;
+ unsigned int page_per_block; /* Pages per block (PG_PER_BLK) */
+ unsigned int page_size; /* Page size in bytes (PAGE_SZ) */
+ unsigned int flash_width; /* Width of Flash memory (DWIDTH_M) */
+ unsigned int dfc_width; /* Width of flash controller(DWIDTH_C) */
+ unsigned int num_blocks; /* Number of physical blocks in Flash */
+
+ struct pxa3xx_nand_cmdset *cmdset; /* NAND command set */
+ struct pxa3xx_nand_timing *timing; /* NAND Flash timing */
};
struct pxa3xx_nand_platform_data {
diff --git a/arch/arm/plat-pxa/include/plat/sdhci.h b/arch/arm/plat-pxa/include/plat/sdhci.h
new file mode 100644
index 000000000000..1ab332e37d7d
--- /dev/null
+++ b/arch/arm/plat-pxa/include/plat/sdhci.h
@@ -0,0 +1,35 @@
+/* linux/arch/arm/plat-pxa/include/plat/sdhci.h
+ *
+ * Copyright 2010 Marvell
+ * Zhangfei Gao <zhangfei.gao@marvell.com>
+ *
+ * PXA Platform - SDHCI platform data definitions
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __PLAT_PXA_SDHCI_H
+#define __PLAT_PXA_SDHCI_H
+
+/* pxa specific flag */
+/* Require clock free running */
+#define PXA_FLAG_DISABLE_CLOCK_GATING (1<<0)
+
+/* Board design supports 8-bit data on SD/SDIO BUS */
+#define PXA_FLAG_SD_8_BIT_CAPABLE_SLOT (1<<2)
+
+/*
+ * struct pxa_sdhci_platdata() - Platform device data for PXA SDHCI
+ * @max_speed: the maximum speed supported
+ * @quirks: quirks of specific device
+ * @flags: flags for platform requirement
+ */
+struct sdhci_pxa_platdata {
+ unsigned int max_speed;
+ unsigned int quirks;
+ unsigned int flags;
+};
+
+#endif /* __PLAT_PXA_SDHCI_H */
diff --git a/arch/arm/plat-s3c24xx/Kconfig b/arch/arm/plat-s3c24xx/Kconfig
index 984bf66826d2..5a27b1b538f2 100644
--- a/arch/arm/plat-s3c24xx/Kconfig
+++ b/arch/arm/plat-s3c24xx/Kconfig
@@ -69,6 +69,7 @@ config S3C24XX_GPIO_EXTRA
int
default 128 if S3C24XX_GPIO_EXTRA128
default 64 if S3C24XX_GPIO_EXTRA64
+ default 16 if ARCH_H1940
default 0
config S3C24XX_GPIO_EXTRA64
diff --git a/arch/arm/plat-s3c24xx/common-smdk.c b/arch/arm/plat-s3c24xx/common-smdk.c
index 7b44d0c592b5..bcc43f346272 100644
--- a/arch/arm/plat-s3c24xx/common-smdk.c
+++ b/arch/arm/plat-s3c24xx/common-smdk.c
@@ -147,7 +147,7 @@ static struct mtd_partition smdk_default_nand_part[] = {
[7] = {
.name = "S3C2410 flash partition 7",
.offset = SZ_1M * 48,
- .size = SZ_16M,
+ .size = MTDPART_SIZ_FULL,
}
};
diff --git a/arch/arm/plat-s3c24xx/cpu.c b/arch/arm/plat-s3c24xx/cpu.c
index 76d0858c3cbb..4a10c0f684b2 100644
--- a/arch/arm/plat-s3c24xx/cpu.c
+++ b/arch/arm/plat-s3c24xx/cpu.c
@@ -88,7 +88,7 @@ static struct cpu_table cpu_ids[] __initdata = {
{
.idcode = 0x32440000,
.idmask = 0xffffffff,
- .map_io = s3c244x_map_io,
+ .map_io = s3c2440_map_io,
.init_clocks = s3c244x_init_clocks,
.init_uarts = s3c244x_init_uarts,
.init = s3c2440_init,
@@ -97,7 +97,7 @@ static struct cpu_table cpu_ids[] __initdata = {
{
.idcode = 0x32440001,
.idmask = 0xffffffff,
- .map_io = s3c244x_map_io,
+ .map_io = s3c2440_map_io,
.init_clocks = s3c244x_init_clocks,
.init_uarts = s3c244x_init_uarts,
.init = s3c2440_init,
@@ -106,7 +106,7 @@ static struct cpu_table cpu_ids[] __initdata = {
{
.idcode = 0x32440aaa,
.idmask = 0xffffffff,
- .map_io = s3c244x_map_io,
+ .map_io = s3c2442_map_io,
.init_clocks = s3c244x_init_clocks,
.init_uarts = s3c244x_init_uarts,
.init = s3c2442_init,
@@ -115,7 +115,7 @@ static struct cpu_table cpu_ids[] __initdata = {
{
.idcode = 0x32440aab,
.idmask = 0xffffffff,
- .map_io = s3c244x_map_io,
+ .map_io = s3c2442_map_io,
.init_clocks = s3c244x_init_clocks,
.init_uarts = s3c244x_init_uarts,
.init = s3c2442_init,
diff --git a/arch/arm/plat-s3c24xx/devs.c b/arch/arm/plat-s3c24xx/devs.c
index 452e18438b41..2f91057a0c02 100644
--- a/arch/arm/plat-s3c24xx/devs.c
+++ b/arch/arm/plat-s3c24xx/devs.c
@@ -247,7 +247,7 @@ static struct resource s3c_iis_resource[] = {
static u64 s3c_device_iis_dmamask = 0xffffffffUL;
struct platform_device s3c_device_iis = {
- .name = "s3c2410-iis",
+ .name = "s3c24xx-iis",
.id = -1,
.num_resources = ARRAY_SIZE(s3c_iis_resource),
.resource = s3c_iis_resource,
@@ -259,6 +259,21 @@ struct platform_device s3c_device_iis = {
EXPORT_SYMBOL(s3c_device_iis);
+/* ASoC PCM DMA */
+
+static u64 s3c_device_audio_dmamask = 0xffffffffUL;
+
+struct platform_device s3c_device_pcm = {
+ .name = "s3c24xx-pcm-audio",
+ .id = -1,
+ .dev = {
+ .dma_mask = &s3c_device_audio_dmamask,
+ .coherent_dma_mask = 0xffffffffUL
+ }
+};
+
+EXPORT_SYMBOL(s3c_device_pcm);
+
/* RTC */
static struct resource s3c_rtc_resource[] = {
@@ -481,19 +496,30 @@ static struct resource s3c_ac97_resource[] = {
},
};
-static u64 s3c_device_ac97_dmamask = 0xffffffffUL;
-
struct platform_device s3c_device_ac97 = {
.name = "s3c-ac97",
.id = -1,
.num_resources = ARRAY_SIZE(s3c_ac97_resource),
.resource = s3c_ac97_resource,
.dev = {
- .dma_mask = &s3c_device_ac97_dmamask,
+ .dma_mask = &s3c_device_audio_dmamask,
.coherent_dma_mask = 0xffffffffUL
}
};
EXPORT_SYMBOL(s3c_device_ac97);
+/* ASoC I2S */
+
+struct platform_device s3c2412_device_iis = {
+ .name = "s3c2412-iis",
+ .id = -1,
+ .dev = {
+ .dma_mask = &s3c_device_audio_dmamask,
+ .coherent_dma_mask = 0xffffffffUL
+ }
+};
+
+EXPORT_SYMBOL(s3c2412_device_iis);
+
#endif // CONFIG_CPU_S32440
diff --git a/arch/arm/plat-s3c24xx/gpiolib.c b/arch/arm/plat-s3c24xx/gpiolib.c
index 4c0896f2572d..243b6411050d 100644
--- a/arch/arm/plat-s3c24xx/gpiolib.c
+++ b/arch/arm/plat-s3c24xx/gpiolib.c
@@ -74,11 +74,6 @@ static int s3c24xx_gpiolib_bankf_toirq(struct gpio_chip *chip, unsigned offset)
return -EINVAL;
}
-static int s3c24xx_gpiolib_bankg_toirq(struct gpio_chip *chip, unsigned offset)
-{
- return IRQ_EINT8 + offset;
-}
-
static struct s3c_gpio_cfg s3c24xx_gpiocfg_banka = {
.set_config = s3c_gpio_setcfg_s3c24xx_a,
.get_config = s3c_gpio_getcfg_s3c24xx_a,
@@ -157,12 +152,13 @@ struct s3c_gpio_chip s3c24xx_gpios[] = {
[6] = {
.base = S3C2410_GPGCON,
.pm = __gpio_pm(&s3c_gpio_pm_2bit),
+ .irq_base = IRQ_EINT8,
.chip = {
.base = S3C2410_GPG(0),
.owner = THIS_MODULE,
.label = "GPIOG",
.ngpio = 16,
- .to_irq = s3c24xx_gpiolib_bankg_toirq,
+ .to_irq = samsung_gpiolib_to_irq,
},
}, {
.base = S3C2410_GPHCON,
diff --git a/arch/arm/plat-s3c24xx/include/plat/s3c244x.h b/arch/arm/plat-s3c24xx/include/plat/s3c244x.h
index 307248d1ccbb..89e8d0a25f87 100644
--- a/arch/arm/plat-s3c24xx/include/plat/s3c244x.h
+++ b/arch/arm/plat-s3c24xx/include/plat/s3c244x.h
@@ -21,17 +21,22 @@ extern void s3c244x_init_clocks(int xtal);
#else
#define s3c244x_init_clocks NULL
#define s3c244x_init_uarts NULL
-#define s3c244x_map_io NULL
#endif
#ifdef CONFIG_CPU_S3C2440
extern int s3c2440_init(void);
+
+extern void s3c2440_map_io(void);
#else
#define s3c2440_init NULL
+#define s3c2440_map_io NULL
#endif
#ifdef CONFIG_CPU_S3C2442
extern int s3c2442_init(void);
+
+extern void s3c2442_map_io(void);
#else
#define s3c2442_init NULL
+#define s3c2442_map_io NULL
#endif
diff --git a/arch/arm/plat-s3c24xx/spi-bus0-gpe11_12_13.c b/arch/arm/plat-s3c24xx/spi-bus0-gpe11_12_13.c
index 9793544a6ace..704175b0573f 100644
--- a/arch/arm/plat-s3c24xx/spi-bus0-gpe11_12_13.c
+++ b/arch/arm/plat-s3c24xx/spi-bus0-gpe11_12_13.c
@@ -29,8 +29,8 @@ void s3c24xx_spi_gpiocfg_bus0_gpe11_12_13(struct s3c2410_spi_info *spi,
} else {
s3c_gpio_cfgpin(S3C2410_GPE(13), S3C2410_GPIO_INPUT);
s3c_gpio_cfgpin(S3C2410_GPE(11), S3C2410_GPIO_INPUT);
- s3c_gpio_cfgpull(S3C2410_GPE(11), S3C_GPIO_PULL_NONE);
- s3c_gpio_cfgpull(S3C2410_GPE(12), S3C_GPIO_PULL_NONE);
- s3c_gpio_cfgpull(S3C2410_GPE(13), S3C_GPIO_PULL_NONE);
+ s3c_gpio_setpull(S3C2410_GPE(11), S3C_GPIO_PULL_NONE);
+ s3c_gpio_setpull(S3C2410_GPE(12), S3C_GPIO_PULL_NONE);
+ s3c_gpio_setpull(S3C2410_GPE(13), S3C_GPIO_PULL_NONE);
}
}
diff --git a/arch/arm/plat-s3c24xx/spi-bus1-gpd8_9_10.c b/arch/arm/plat-s3c24xx/spi-bus1-gpd8_9_10.c
index db9e9e477ec1..72457afd6255 100644
--- a/arch/arm/plat-s3c24xx/spi-bus1-gpd8_9_10.c
+++ b/arch/arm/plat-s3c24xx/spi-bus1-gpd8_9_10.c
@@ -31,8 +31,8 @@ void s3c24xx_spi_gpiocfg_bus1_gpd8_9_10(struct s3c2410_spi_info *spi,
} else {
s3c_gpio_cfgpin(S3C2410_GPD(8), S3C2410_GPIO_INPUT);
s3c_gpio_cfgpin(S3C2410_GPD(9), S3C2410_GPIO_INPUT);
- s3c_gpio_cfgpull(S3C2410_GPD(10), S3C_GPIO_PULL_NONE);
- s3c_gpio_cfgpull(S3C2410_GPD(9), S3C_GPIO_PULL_NONE);
- s3c_gpio_cfgpull(S3C2410_GPD(8), S3C_GPIO_PULL_NONE);
+ s3c_gpio_setpull(S3C2410_GPD(10), S3C_GPIO_PULL_NONE);
+ s3c_gpio_setpull(S3C2410_GPD(9), S3C_GPIO_PULL_NONE);
+ s3c_gpio_setpull(S3C2410_GPD(8), S3C_GPIO_PULL_NONE);
}
}
diff --git a/arch/arm/plat-s3c24xx/spi-bus1-gpg5_6_7.c b/arch/arm/plat-s3c24xx/spi-bus1-gpg5_6_7.c
index 8ea663a438bb..c3972b645d13 100644
--- a/arch/arm/plat-s3c24xx/spi-bus1-gpg5_6_7.c
+++ b/arch/arm/plat-s3c24xx/spi-bus1-gpg5_6_7.c
@@ -29,8 +29,8 @@ void s3c24xx_spi_gpiocfg_bus1_gpg5_6_7(struct s3c2410_spi_info *spi,
} else {
s3c_gpio_cfgpin(S3C2410_GPG(7), S3C2410_GPIO_INPUT);
s3c_gpio_cfgpin(S3C2410_GPG(5), S3C2410_GPIO_INPUT);
- s3c_gpio_cfgpull(S3C2410_GPG(5), S3C_GPIO_PULL_NONE);
- s3c_gpio_cfgpull(S3C2410_GPG(6), S3C_GPIO_PULL_NONE);
- s3c_gpio_cfgpull(S3C2410_GPG(7), S3C_GPIO_PULL_NONE);
+ s3c_gpio_setpull(S3C2410_GPG(5), S3C_GPIO_PULL_NONE);
+ s3c_gpio_setpull(S3C2410_GPG(6), S3C_GPIO_PULL_NONE);
+ s3c_gpio_setpull(S3C2410_GPG(7), S3C_GPIO_PULL_NONE);
}
}
diff --git a/arch/arm/plat-s5p/Kconfig b/arch/arm/plat-s5p/Kconfig
index 25960966af7c..65dbfa8e0a86 100644
--- a/arch/arm/plat-s5p/Kconfig
+++ b/arch/arm/plat-s5p/Kconfig
@@ -32,6 +32,11 @@ config S5P_EXT_INT
Use the external interrupts (other than GPIO interrupts.)
Note: Do not choose this for S5P6440 and S5P6450.
+config S5P_GPIO_INT
+ bool
+ help
+ Common code for the GPIO interrupts (other than external interrupts.)
+
config S5P_DEV_FIMC0
bool
help
diff --git a/arch/arm/plat-s5p/Makefile b/arch/arm/plat-s5p/Makefile
index f3e917e27da8..de65238a7aef 100644
--- a/arch/arm/plat-s5p/Makefile
+++ b/arch/arm/plat-s5p/Makefile
@@ -18,6 +18,9 @@ obj-y += cpu.o
obj-y += clock.o
obj-y += irq.o
obj-$(CONFIG_S5P_EXT_INT) += irq-eint.o
+obj-$(CONFIG_S5P_GPIO_INT) += irq-gpioint.o
+obj-$(CONFIG_PM) += pm.o
+obj-$(CONFIG_PM) += irq-pm.o
# devices
diff --git a/arch/arm/plat-s5p/clock.c b/arch/arm/plat-s5p/clock.c
index 8aaf4e6b60c3..8d081d968c58 100644
--- a/arch/arm/plat-s5p/clock.c
+++ b/arch/arm/plat-s5p/clock.c
@@ -21,6 +21,8 @@
#include <linux/io.h>
#include <asm/div64.h>
+#include <mach/regs-clock.h>
+
#include <plat/clock.h>
#include <plat/clock-clksrc.h>
#include <plat/s5p-clock.h>
@@ -88,14 +90,6 @@ struct clk clk_fout_vpll = {
.ctrlbit = (1 << 31),
};
-/* ARM clock */
-struct clk clk_arm = {
- .name = "armclk",
- .id = -1,
- .rate = 0,
- .ctrlbit = 0,
-};
-
/* Possible clock sources for APLL Mux */
static struct clk *clk_src_apll_list[] = {
[0] = &clk_fin_apll,
@@ -156,6 +150,24 @@ int s5p_gatectrl(void __iomem *reg, struct clk *clk, int enable)
return 0;
}
+int s5p_epll_enable(struct clk *clk, int enable)
+{
+ unsigned int ctrlbit = clk->ctrlbit;
+ unsigned int epll_con = __raw_readl(S5P_EPLL_CON) & ~ctrlbit;
+
+ if (enable)
+ __raw_writel(epll_con | ctrlbit, S5P_EPLL_CON);
+ else
+ __raw_writel(epll_con, S5P_EPLL_CON);
+
+ return 0;
+}
+
+unsigned long s5p_epll_get_rate(struct clk *clk)
+{
+ return clk->rate;
+}
+
static struct clk *s5p_clks[] __initdata = {
&clk_ext_xtal_mux,
&clk_48m,
@@ -165,7 +177,6 @@ static struct clk *s5p_clks[] __initdata = {
&clk_fout_epll,
&clk_fout_dpll,
&clk_fout_vpll,
- &clk_arm,
&clk_vpll,
&clk_xusbxti,
};
diff --git a/arch/arm/plat-s5p/include/plat/irqs.h b/arch/arm/plat-s5p/include/plat/irqs.h
index 3fb3a3a17465..ba9121c60a2a 100644
--- a/arch/arm/plat-s5p/include/plat/irqs.h
+++ b/arch/arm/plat-s5p/include/plat/irqs.h
@@ -94,4 +94,22 @@
((irq) - S5P_EINT_BASE1) : \
((irq) + 16 - S5P_EINT_BASE2))
+#define IRQ_EINT_BIT(x) EINT_OFFSET(x)
+
+/* Typically only a few gpio chips require gpio interrupt support.
+ To avoid memory waste irq descriptors are allocated only for
+ S5P_GPIOINT_GROUP_COUNT chips, each with total number of
+ S5P_GPIOINT_GROUP_SIZE pins/irqs. Each GPIOINT group can be assiged
+ to any gpio chip with the s5p_register_gpio_interrupt() function */
+#define S5P_GPIOINT_GROUP_COUNT 4
+#define S5P_GPIOINT_GROUP_SIZE 8
+#define S5P_GPIOINT_COUNT (S5P_GPIOINT_GROUP_COUNT * S5P_GPIOINT_GROUP_SIZE)
+
+/* IRQ types common for all s5p platforms */
+#define S5P_IRQ_TYPE_LEVEL_LOW (0x00)
+#define S5P_IRQ_TYPE_LEVEL_HIGH (0x01)
+#define S5P_IRQ_TYPE_EDGE_FALLING (0x02)
+#define S5P_IRQ_TYPE_EDGE_RISING (0x03)
+#define S5P_IRQ_TYPE_EDGE_BOTH (0x04)
+
#endif /* __ASM_PLAT_S5P_IRQS_H */
diff --git a/arch/arm/plat-s5p/include/plat/map-s5p.h b/arch/arm/plat-s5p/include/plat/map-s5p.h
index c4ff88bf6477..fef353d44513 100644
--- a/arch/arm/plat-s5p/include/plat/map-s5p.h
+++ b/arch/arm/plat-s5p/include/plat/map-s5p.h
@@ -13,24 +13,38 @@
#ifndef __ASM_PLAT_MAP_S5P_H
#define __ASM_PLAT_MAP_S5P_H __FILE__
-#define S5P_VA_CHIPID S3C_ADDR(0x00700000)
-#define S5P_VA_GPIO S3C_ADDR(0x00500000)
-#define S5P_VA_SYSTIMER S3C_ADDR(0x01200000)
-#define S5P_VA_SROMC S3C_ADDR(0x01100000)
-#define S5P_VA_SYSRAM S3C_ADDR(0x01180000)
-
-#define S5P_VA_COMBINER_BASE S3C_ADDR(0x00600000)
+#define S5P_VA_CHIPID S3C_ADDR(0x02000000)
+#define S5P_VA_CMU S3C_ADDR(0x02100000)
+#define S5P_VA_GPIO S3C_ADDR(0x02200000)
+#define S5P_VA_GPIO1 S5P_VA_GPIO
+#define S5P_VA_GPIO2 S3C_ADDR(0x02240000)
+#define S5P_VA_GPIO3 S3C_ADDR(0x02280000)
+
+#define S5P_VA_SYSRAM S3C_ADDR(0x02400000)
+#define S5P_VA_DMC0 S3C_ADDR(0x02440000)
+#define S5P_VA_DMC1 S3C_ADDR(0x02480000)
+#define S5P_VA_SROMC S3C_ADDR(0x024C0000)
+
+#define S5P_VA_SYSTIMER S3C_ADDR(0x02500000)
+#define S5P_VA_L2CC S3C_ADDR(0x02600000)
+
+#define S5P_VA_COMBINER_BASE S3C_ADDR(0x02700000)
#define S5P_VA_COMBINER(x) (S5P_VA_COMBINER_BASE + ((x) >> 2) * 0x10)
-#define S5P_VA_COREPERI_BASE S3C_ADDR(0x00800000)
+#define S5P_VA_COREPERI_BASE S3C_ADDR(0x02800000)
#define S5P_VA_COREPERI(x) (S5P_VA_COREPERI_BASE + (x))
#define S5P_VA_SCU S5P_VA_COREPERI(0x0)
#define S5P_VA_GIC_CPU S5P_VA_COREPERI(0x100)
#define S5P_VA_TWD S5P_VA_COREPERI(0x600)
#define S5P_VA_GIC_DIST S5P_VA_COREPERI(0x1000)
-#define S5P_VA_L2CC S3C_ADDR(0x00900000)
-#define S5P_VA_CMU S3C_ADDR(0x00920000)
+#define S3C_VA_USB_HSPHY S3C_ADDR(0x02900000)
+
+#define VA_VIC(x) (S3C_VA_IRQ + ((x) * 0x10000))
+#define VA_VIC0 VA_VIC(0)
+#define VA_VIC1 VA_VIC(1)
+#define VA_VIC2 VA_VIC(2)
+#define VA_VIC3 VA_VIC(3)
#define S5P_VA_UART(x) (S3C_VA_UART + ((x) * S3C_UART_OFFSET))
#define S5P_VA_UART0 S5P_VA_UART(0)
@@ -42,10 +56,4 @@
#define S3C_UART_OFFSET (0x400)
#endif
-#define VA_VIC(x) (S3C_VA_IRQ + ((x) * 0x10000))
-#define VA_VIC0 VA_VIC(0)
-#define VA_VIC1 VA_VIC(1)
-#define VA_VIC2 VA_VIC(2)
-#define VA_VIC3 VA_VIC(3)
-
#endif /* __ASM_PLAT_MAP_S5P_H */
diff --git a/arch/arm/plat-s5p/include/plat/s5p-clock.h b/arch/arm/plat-s5p/include/plat/s5p-clock.h
index 17036c898409..2b6dcff8ab2b 100644
--- a/arch/arm/plat-s5p/include/plat/s5p-clock.h
+++ b/arch/arm/plat-s5p/include/plat/s5p-clock.h
@@ -43,4 +43,8 @@ extern struct clksrc_sources clk_src_dpll;
extern int s5p_gatectrl(void __iomem *reg, struct clk *clk, int enable);
+/* Common EPLL operations for S5P platform */
+extern int s5p_epll_enable(struct clk *clk, int enable);
+extern unsigned long s5p_epll_get_rate(struct clk *clk);
+
#endif /* __ASM_PLAT_S5P_CLOCK_H */
diff --git a/arch/arm/plat-s5p/irq-eint.c b/arch/arm/plat-s5p/irq-eint.c
index f36cd3327025..752f1a645f9d 100644
--- a/arch/arm/plat-s5p/irq-eint.c
+++ b/arch/arm/plat-s5p/irq-eint.c
@@ -67,23 +67,23 @@ static int s5p_irq_eint_set_type(unsigned int irq, unsigned int type)
switch (type) {
case IRQ_TYPE_EDGE_RISING:
- newvalue = S5P_EXTINT_RISEEDGE;
+ newvalue = S5P_IRQ_TYPE_EDGE_RISING;
break;
case IRQ_TYPE_EDGE_FALLING:
- newvalue = S5P_EXTINT_FALLEDGE;
+ newvalue = S5P_IRQ_TYPE_EDGE_FALLING;
break;
case IRQ_TYPE_EDGE_BOTH:
- newvalue = S5P_EXTINT_BOTHEDGE;
+ newvalue = S5P_IRQ_TYPE_EDGE_BOTH;
break;
case IRQ_TYPE_LEVEL_LOW:
- newvalue = S5P_EXTINT_LOWLEV;
+ newvalue = S5P_IRQ_TYPE_LEVEL_LOW;
break;
case IRQ_TYPE_LEVEL_HIGH:
- newvalue = S5P_EXTINT_HILEV;
+ newvalue = S5P_IRQ_TYPE_LEVEL_HIGH;
break;
default:
diff --git a/arch/arm/plat-s5p/irq-gpioint.c b/arch/arm/plat-s5p/irq-gpioint.c
new file mode 100644
index 000000000000..0e5dc8cbf5e3
--- /dev/null
+++ b/arch/arm/plat-s5p/irq-gpioint.c
@@ -0,0 +1,237 @@
+/* linux/arch/arm/plat-s5p/irq-gpioint.c
+ *
+ * Copyright (c) 2010 Samsung Electronics Co., Ltd.
+ * Author: Kyungmin Park <kyungmin.park@samsung.com>
+ * Author: Joonyoung Shim <jy0922.shim@samsung.com>
+ * Author: Marek Szyprowski <m.szyprowski@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/io.h>
+#include <linux/gpio.h>
+
+#include <mach/map.h>
+#include <plat/gpio-core.h>
+#include <plat/gpio-cfg.h>
+
+#define S5P_GPIOREG(x) (S5P_VA_GPIO + (x))
+
+#define GPIOINT_CON_OFFSET 0x700
+#define GPIOINT_MASK_OFFSET 0x900
+#define GPIOINT_PEND_OFFSET 0xA00
+
+static struct s3c_gpio_chip *irq_chips[S5P_GPIOINT_GROUP_MAXNR];
+
+static int s5p_gpioint_get_group(unsigned int irq)
+{
+ struct gpio_chip *chip = get_irq_data(irq);
+ struct s3c_gpio_chip *s3c_chip = container_of(chip,
+ struct s3c_gpio_chip, chip);
+ int group;
+
+ for (group = 0; group < S5P_GPIOINT_GROUP_MAXNR; group++)
+ if (s3c_chip == irq_chips[group])
+ break;
+
+ return group;
+}
+
+static int s5p_gpioint_get_offset(unsigned int irq)
+{
+ struct gpio_chip *chip = get_irq_data(irq);
+ struct s3c_gpio_chip *s3c_chip = container_of(chip,
+ struct s3c_gpio_chip, chip);
+
+ return irq - s3c_chip->irq_base;
+}
+
+static void s5p_gpioint_ack(unsigned int irq)
+{
+ int group, offset, pend_offset;
+ unsigned int value;
+
+ group = s5p_gpioint_get_group(irq);
+ offset = s5p_gpioint_get_offset(irq);
+ pend_offset = group << 2;
+
+ value = __raw_readl(S5P_GPIOREG(GPIOINT_PEND_OFFSET) + pend_offset);
+ value |= 1 << offset;
+ __raw_writel(value, S5P_GPIOREG(GPIOINT_PEND_OFFSET) + pend_offset);
+}
+
+static void s5p_gpioint_mask(unsigned int irq)
+{
+ int group, offset, mask_offset;
+ unsigned int value;
+
+ group = s5p_gpioint_get_group(irq);
+ offset = s5p_gpioint_get_offset(irq);
+ mask_offset = group << 2;
+
+ value = __raw_readl(S5P_GPIOREG(GPIOINT_MASK_OFFSET) + mask_offset);
+ value |= 1 << offset;
+ __raw_writel(value, S5P_GPIOREG(GPIOINT_MASK_OFFSET) + mask_offset);
+}
+
+static void s5p_gpioint_unmask(unsigned int irq)
+{
+ int group, offset, mask_offset;
+ unsigned int value;
+
+ group = s5p_gpioint_get_group(irq);
+ offset = s5p_gpioint_get_offset(irq);
+ mask_offset = group << 2;
+
+ value = __raw_readl(S5P_GPIOREG(GPIOINT_MASK_OFFSET) + mask_offset);
+ value &= ~(1 << offset);
+ __raw_writel(value, S5P_GPIOREG(GPIOINT_MASK_OFFSET) + mask_offset);
+}
+
+static void s5p_gpioint_mask_ack(unsigned int irq)
+{
+ s5p_gpioint_mask(irq);
+ s5p_gpioint_ack(irq);
+}
+
+static int s5p_gpioint_set_type(unsigned int irq, unsigned int type)
+{
+ int group, offset, con_offset;
+ unsigned int value;
+
+ group = s5p_gpioint_get_group(irq);
+ offset = s5p_gpioint_get_offset(irq);
+ con_offset = group << 2;
+
+ switch (type) {
+ case IRQ_TYPE_EDGE_RISING:
+ type = S5P_IRQ_TYPE_EDGE_RISING;
+ break;
+ case IRQ_TYPE_EDGE_FALLING:
+ type = S5P_IRQ_TYPE_EDGE_FALLING;
+ break;
+ case IRQ_TYPE_EDGE_BOTH:
+ type = S5P_IRQ_TYPE_EDGE_BOTH;
+ break;
+ case IRQ_TYPE_LEVEL_HIGH:
+ type = S5P_IRQ_TYPE_LEVEL_HIGH;
+ break;
+ case IRQ_TYPE_LEVEL_LOW:
+ type = S5P_IRQ_TYPE_LEVEL_LOW;
+ break;
+ case IRQ_TYPE_NONE:
+ default:
+ printk(KERN_WARNING "No irq type\n");
+ return -EINVAL;
+ }
+
+ value = __raw_readl(S5P_GPIOREG(GPIOINT_CON_OFFSET) + con_offset);
+ value &= ~(0x7 << (offset * 0x4));
+ value |= (type << (offset * 0x4));
+ __raw_writel(value, S5P_GPIOREG(GPIOINT_CON_OFFSET) + con_offset);
+
+ return 0;
+}
+
+struct irq_chip s5p_gpioint = {
+ .name = "s5p_gpioint",
+ .ack = s5p_gpioint_ack,
+ .mask = s5p_gpioint_mask,
+ .mask_ack = s5p_gpioint_mask_ack,
+ .unmask = s5p_gpioint_unmask,
+ .set_type = s5p_gpioint_set_type,
+};
+
+static void s5p_gpioint_handler(unsigned int irq, struct irq_desc *desc)
+{
+ int group, offset, pend_offset, mask_offset;
+ int real_irq;
+ unsigned int pend, mask;
+
+ for (group = 0; group < S5P_GPIOINT_GROUP_MAXNR; group++) {
+ pend_offset = group << 2;
+ pend = __raw_readl(S5P_GPIOREG(GPIOINT_PEND_OFFSET) +
+ pend_offset);
+ if (!pend)
+ continue;
+
+ mask_offset = group << 2;
+ mask = __raw_readl(S5P_GPIOREG(GPIOINT_MASK_OFFSET) +
+ mask_offset);
+ pend &= ~mask;
+
+ for (offset = 0; offset < 8; offset++) {
+ if (pend & (1 << offset)) {
+ struct s3c_gpio_chip *chip = irq_chips[group];
+ if (chip) {
+ real_irq = chip->irq_base + offset;
+ generic_handle_irq(real_irq);
+ }
+ }
+ }
+ }
+}
+
+static __init int s5p_gpioint_add(struct s3c_gpio_chip *chip)
+{
+ static int used_gpioint_groups = 0;
+ static bool handler_registered = 0;
+ int irq, group = chip->group;
+ int i;
+
+ if (used_gpioint_groups >= S5P_GPIOINT_GROUP_COUNT)
+ return -ENOMEM;
+
+ chip->irq_base = S5P_GPIOINT_BASE +
+ used_gpioint_groups * S5P_GPIOINT_GROUP_SIZE;
+ used_gpioint_groups++;
+
+ if (!handler_registered) {
+ set_irq_chained_handler(IRQ_GPIOINT, s5p_gpioint_handler);
+ handler_registered = 1;
+ }
+
+ irq_chips[group] = chip;
+ for (i = 0; i < chip->chip.ngpio; i++) {
+ irq = chip->irq_base + i;
+ set_irq_chip(irq, &s5p_gpioint);
+ set_irq_data(irq, &chip->chip);
+ set_irq_handler(irq, handle_level_irq);
+ set_irq_flags(irq, IRQF_VALID);
+ }
+ return 0;
+}
+
+int __init s5p_register_gpio_interrupt(int pin)
+{
+ struct s3c_gpio_chip *my_chip = s3c_gpiolib_getchip(pin);
+ int offset, group;
+ int ret;
+
+ if (!my_chip)
+ return -EINVAL;
+
+ offset = pin - my_chip->chip.base;
+ group = my_chip->group;
+
+ /* check if the group has been already registered */
+ if (my_chip->irq_base)
+ return my_chip->irq_base + offset;
+
+ /* register gpio group */
+ ret = s5p_gpioint_add(my_chip);
+ if (ret == 0) {
+ my_chip->chip.to_irq = samsung_gpiolib_to_irq;
+ printk(KERN_INFO "Registered interrupt support for gpio group %d.\n",
+ group);
+ return my_chip->irq_base + offset;
+ }
+ return ret;
+}
diff --git a/arch/arm/plat-s5p/irq-pm.c b/arch/arm/plat-s5p/irq-pm.c
new file mode 100644
index 000000000000..dc33b9ecda45
--- /dev/null
+++ b/arch/arm/plat-s5p/irq-pm.c
@@ -0,0 +1,93 @@
+/* linux/arch/arm/plat-s5p/irq-pm.c
+ *
+ * Copyright (c) 2010 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com
+ *
+ * Based on arch/arm/plat-s3c24xx/irq-pm.c,
+ * Copyright (c) 2003,2004 Simtec Electronics
+ * Ben Dooks <ben@simtec.co.uk>
+ * http://armlinux.simtec.co.uk/
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+*/
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/sysdev.h>
+
+#include <plat/cpu.h>
+#include <plat/irqs.h>
+#include <plat/pm.h>
+#include <mach/map.h>
+
+#include <mach/regs-gpio.h>
+#include <mach/regs-irq.h>
+
+/* state for IRQs over sleep */
+
+/* default is to allow for EINT0..EINT31, and IRQ_RTC_TIC, IRQ_RTC_ALARM,
+ * as wakeup sources
+ *
+ * set bit to 1 in allow bitfield to enable the wakeup settings on it
+*/
+
+unsigned long s3c_irqwake_intallow = 0x00000006L;
+unsigned long s3c_irqwake_eintallow = 0xffffffffL;
+
+int s3c_irq_wake(unsigned int irqno, unsigned int state)
+{
+ unsigned long irqbit;
+
+ switch (irqno) {
+ case IRQ_RTC_TIC:
+ case IRQ_RTC_ALARM:
+ irqbit = 1 << (irqno + 1 - IRQ_RTC_ALARM);
+ if (!state)
+ s3c_irqwake_intmask |= irqbit;
+ else
+ s3c_irqwake_intmask &= ~irqbit;
+ break;
+ default:
+ return -ENOENT;
+ }
+ return 0;
+}
+
+static struct sleep_save eint_save[] = {
+ SAVE_ITEM(S5P_EINT_CON(0)),
+ SAVE_ITEM(S5P_EINT_CON(1)),
+ SAVE_ITEM(S5P_EINT_CON(2)),
+ SAVE_ITEM(S5P_EINT_CON(3)),
+
+ SAVE_ITEM(S5P_EINT_FLTCON(0)),
+ SAVE_ITEM(S5P_EINT_FLTCON(1)),
+ SAVE_ITEM(S5P_EINT_FLTCON(2)),
+ SAVE_ITEM(S5P_EINT_FLTCON(3)),
+ SAVE_ITEM(S5P_EINT_FLTCON(4)),
+ SAVE_ITEM(S5P_EINT_FLTCON(5)),
+ SAVE_ITEM(S5P_EINT_FLTCON(6)),
+ SAVE_ITEM(S5P_EINT_FLTCON(7)),
+
+ SAVE_ITEM(S5P_EINT_MASK(0)),
+ SAVE_ITEM(S5P_EINT_MASK(1)),
+ SAVE_ITEM(S5P_EINT_MASK(2)),
+ SAVE_ITEM(S5P_EINT_MASK(3)),
+};
+
+int s3c24xx_irq_suspend(struct sys_device *dev, pm_message_t state)
+{
+ s3c_pm_do_save(eint_save, ARRAY_SIZE(eint_save));
+
+ return 0;
+}
+
+int s3c24xx_irq_resume(struct sys_device *dev)
+{
+ s3c_pm_do_restore(eint_save, ARRAY_SIZE(eint_save));
+
+ return 0;
+}
+
diff --git a/arch/arm/plat-s5p/pm.c b/arch/arm/plat-s5p/pm.c
new file mode 100644
index 000000000000..d592b6304b48
--- /dev/null
+++ b/arch/arm/plat-s5p/pm.c
@@ -0,0 +1,52 @@
+/* linux/arch/arm/plat-s5p/pm.c
+ *
+ * Copyright (c) 2010 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com
+ *
+ * S5P Power Manager (Suspend-To-RAM) support
+ *
+ * Based on arch/arm/plat-s3c24xx/pm.c
+ * Copyright (c) 2004,2006 Simtec Electronics
+ * Ben Dooks <ben@simtec.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+*/
+
+#include <linux/suspend.h>
+#include <plat/pm.h>
+
+#define PFX "s5p pm: "
+
+/* s3c_pm_check_resume_pin
+ *
+ * check to see if the pin is configured correctly for sleep mode, and
+ * make any necessary adjustments if it is not
+*/
+
+static void s3c_pm_check_resume_pin(unsigned int pin, unsigned int irqoffs)
+{
+ /* nothing here yet */
+}
+
+/* s3c_pm_configure_extint
+ *
+ * configure all external interrupt pins
+*/
+
+void s3c_pm_configure_extint(void)
+{
+ /* nothing here yet */
+}
+
+void s3c_pm_restore_core(void)
+{
+ /* nothing here yet */
+}
+
+void s3c_pm_save_core(void)
+{
+ /* nothing here yet */
+}
+
diff --git a/arch/arm/plat-samsung/Kconfig b/arch/arm/plat-samsung/Kconfig
index 7c0bde781167..dcd6eff4ee53 100644
--- a/arch/arm/plat-samsung/Kconfig
+++ b/arch/arm/plat-samsung/Kconfig
@@ -180,6 +180,31 @@ config S3C_DEV_I2C2
help
Compile in platform device definitions for I2C channel 2
+config S3C_DEV_I2C3
+ bool
+ help
+ Compile in platform device definition for I2C controller 3
+
+config S3C_DEV_I2C4
+ bool
+ help
+ Compile in platform device definition for I2C controller 4
+
+config S3C_DEV_I2C5
+ bool
+ help
+ Compile in platform device definition for I2C controller 5
+
+config S3C_DEV_I2C6
+ bool
+ help
+ Compile in platform device definition for I2C controller 6
+
+config S3C_DEV_I2C7
+ bool
+ help
+ Compile in platform device definition for I2C controller 7
+
config S3C_DEV_FB
bool
help
diff --git a/arch/arm/plat-samsung/Makefile b/arch/arm/plat-samsung/Makefile
index 4d8ff923207a..afcce474af8e 100644
--- a/arch/arm/plat-samsung/Makefile
+++ b/arch/arm/plat-samsung/Makefile
@@ -40,6 +40,11 @@ obj-$(CONFIG_S3C_DEV_HWMON) += dev-hwmon.o
obj-y += dev-i2c0.o
obj-$(CONFIG_S3C_DEV_I2C1) += dev-i2c1.o
obj-$(CONFIG_S3C_DEV_I2C2) += dev-i2c2.o
+obj-$(CONFIG_S3C_DEV_I2C3) += dev-i2c3.o
+obj-$(CONFIG_S3C_DEV_I2C4) += dev-i2c4.o
+obj-$(CONFIG_S3C_DEV_I2C5) += dev-i2c5.o
+obj-$(CONFIG_S3C_DEV_I2C6) += dev-i2c6.o
+obj-$(CONFIG_S3C_DEV_I2C7) += dev-i2c7.o
obj-$(CONFIG_S3C_DEV_FB) += dev-fb.o
obj-y += dev-uart.o
obj-$(CONFIG_S3C_DEV_USB_HOST) += dev-usb.o
diff --git a/arch/arm/plat-samsung/dev-hsmmc.c b/arch/arm/plat-samsung/dev-hsmmc.c
index 9d2be0941410..db7a65c7f127 100644
--- a/arch/arm/plat-samsung/dev-hsmmc.c
+++ b/arch/arm/plat-samsung/dev-hsmmc.c
@@ -41,6 +41,7 @@ struct s3c_sdhci_platdata s3c_hsmmc0_def_platdata = {
.max_width = 4,
.host_caps = (MMC_CAP_4_BIT_DATA |
MMC_CAP_MMC_HIGHSPEED | MMC_CAP_SD_HIGHSPEED),
+ .clk_type = S3C_SDHCI_CLK_DIV_INTERNAL,
};
struct platform_device s3c_device_hsmmc0 = {
@@ -59,17 +60,20 @@ void s3c_sdhci0_set_platdata(struct s3c_sdhci_platdata *pd)
{
struct s3c_sdhci_platdata *set = &s3c_hsmmc0_def_platdata;
- set->max_width = pd->max_width;
set->cd_type = pd->cd_type;
set->ext_cd_init = pd->ext_cd_init;
set->ext_cd_cleanup = pd->ext_cd_cleanup;
set->ext_cd_gpio = pd->ext_cd_gpio;
set->ext_cd_gpio_invert = pd->ext_cd_gpio_invert;
+ if (pd->max_width)
+ set->max_width = pd->max_width;
if (pd->cfg_gpio)
set->cfg_gpio = pd->cfg_gpio;
if (pd->cfg_card)
set->cfg_card = pd->cfg_card;
if (pd->host_caps)
- set->host_caps = pd->host_caps;
+ set->host_caps |= pd->host_caps;
+ if (pd->clk_type)
+ set->clk_type = pd->clk_type;
}
diff --git a/arch/arm/plat-samsung/dev-hsmmc1.c b/arch/arm/plat-samsung/dev-hsmmc1.c
index a6c8295840af..2497321f08d7 100644
--- a/arch/arm/plat-samsung/dev-hsmmc1.c
+++ b/arch/arm/plat-samsung/dev-hsmmc1.c
@@ -41,6 +41,7 @@ struct s3c_sdhci_platdata s3c_hsmmc1_def_platdata = {
.max_width = 4,
.host_caps = (MMC_CAP_4_BIT_DATA |
MMC_CAP_MMC_HIGHSPEED | MMC_CAP_SD_HIGHSPEED),
+ .clk_type = S3C_SDHCI_CLK_DIV_INTERNAL,
};
struct platform_device s3c_device_hsmmc1 = {
@@ -59,17 +60,20 @@ void s3c_sdhci1_set_platdata(struct s3c_sdhci_platdata *pd)
{
struct s3c_sdhci_platdata *set = &s3c_hsmmc1_def_platdata;
- set->max_width = pd->max_width;
set->cd_type = pd->cd_type;
set->ext_cd_init = pd->ext_cd_init;
set->ext_cd_cleanup = pd->ext_cd_cleanup;
set->ext_cd_gpio = pd->ext_cd_gpio;
set->ext_cd_gpio_invert = pd->ext_cd_gpio_invert;
+ if (pd->max_width)
+ set->max_width = pd->max_width;
if (pd->cfg_gpio)
set->cfg_gpio = pd->cfg_gpio;
if (pd->cfg_card)
set->cfg_card = pd->cfg_card;
if (pd->host_caps)
- set->host_caps = pd->host_caps;
+ set->host_caps |= pd->host_caps;
+ if (pd->clk_type)
+ set->clk_type = pd->clk_type;
}
diff --git a/arch/arm/plat-samsung/dev-hsmmc2.c b/arch/arm/plat-samsung/dev-hsmmc2.c
index cb0d7143381a..f60aedba417c 100644
--- a/arch/arm/plat-samsung/dev-hsmmc2.c
+++ b/arch/arm/plat-samsung/dev-hsmmc2.c
@@ -42,6 +42,7 @@ struct s3c_sdhci_platdata s3c_hsmmc2_def_platdata = {
.max_width = 4,
.host_caps = (MMC_CAP_4_BIT_DATA |
MMC_CAP_MMC_HIGHSPEED | MMC_CAP_SD_HIGHSPEED),
+ .clk_type = S3C_SDHCI_CLK_DIV_INTERNAL,
};
struct platform_device s3c_device_hsmmc2 = {
@@ -60,17 +61,20 @@ void s3c_sdhci2_set_platdata(struct s3c_sdhci_platdata *pd)
{
struct s3c_sdhci_platdata *set = &s3c_hsmmc2_def_platdata;
- set->max_width = pd->max_width;
set->cd_type = pd->cd_type;
set->ext_cd_init = pd->ext_cd_init;
set->ext_cd_cleanup = pd->ext_cd_cleanup;
set->ext_cd_gpio = pd->ext_cd_gpio;
set->ext_cd_gpio_invert = pd->ext_cd_gpio_invert;
+ if (pd->max_width)
+ set->max_width = pd->max_width;
if (pd->cfg_gpio)
set->cfg_gpio = pd->cfg_gpio;
if (pd->cfg_card)
set->cfg_card = pd->cfg_card;
if (pd->host_caps)
- set->host_caps = pd->host_caps;
+ set->host_caps |= pd->host_caps;
+ if (pd->clk_type)
+ set->clk_type = pd->clk_type;
}
diff --git a/arch/arm/plat-samsung/dev-hsmmc3.c b/arch/arm/plat-samsung/dev-hsmmc3.c
index 85aaf0f2842f..ede776f20e62 100644
--- a/arch/arm/plat-samsung/dev-hsmmc3.c
+++ b/arch/arm/plat-samsung/dev-hsmmc3.c
@@ -33,8 +33,8 @@ static struct resource s3c_hsmmc3_resource[] = {
.flags = IORESOURCE_MEM,
},
[1] = {
- .start = IRQ_MMC3,
- .end = IRQ_MMC3,
+ .start = IRQ_HSMMC3,
+ .end = IRQ_HSMMC3,
.flags = IORESOURCE_IRQ,
}
};
@@ -45,6 +45,7 @@ struct s3c_sdhci_platdata s3c_hsmmc3_def_platdata = {
.max_width = 4,
.host_caps = (MMC_CAP_4_BIT_DATA |
MMC_CAP_MMC_HIGHSPEED | MMC_CAP_SD_HIGHSPEED),
+ .clk_type = S3C_SDHCI_CLK_DIV_INTERNAL,
};
struct platform_device s3c_device_hsmmc3 = {
@@ -63,15 +64,20 @@ void s3c_sdhci3_set_platdata(struct s3c_sdhci_platdata *pd)
{
struct s3c_sdhci_platdata *set = &s3c_hsmmc3_def_platdata;
- set->max_width = pd->max_width;
set->cd_type = pd->cd_type;
set->ext_cd_init = pd->ext_cd_init;
set->ext_cd_cleanup = pd->ext_cd_cleanup;
set->ext_cd_gpio = pd->ext_cd_gpio;
set->ext_cd_gpio_invert = pd->ext_cd_gpio_invert;
+ if (pd->max_width)
+ set->max_width = pd->max_width;
if (pd->cfg_gpio)
set->cfg_gpio = pd->cfg_gpio;
if (pd->cfg_card)
set->cfg_card = pd->cfg_card;
+ if (pd->host_caps)
+ set->host_caps |= pd->host_caps;
+ if (pd->clk_type)
+ set->clk_type = pd->clk_type;
}
diff --git a/arch/arm/plat-samsung/dev-i2c2.c b/arch/arm/plat-samsung/dev-i2c2.c
index 07036dee09e7..ff4ba69b6830 100644
--- a/arch/arm/plat-samsung/dev-i2c2.c
+++ b/arch/arm/plat-samsung/dev-i2c2.c
@@ -32,8 +32,8 @@ static struct resource s3c_i2c_resource[] = {
.flags = IORESOURCE_MEM,
},
[1] = {
- .start = IRQ_CAN0,
- .end = IRQ_CAN0,
+ .start = IRQ_IIC2,
+ .end = IRQ_IIC2,
.flags = IORESOURCE_IRQ,
},
};
diff --git a/arch/arm/plat-samsung/dev-i2c3.c b/arch/arm/plat-samsung/dev-i2c3.c
new file mode 100644
index 000000000000..8586a10014b7
--- /dev/null
+++ b/arch/arm/plat-samsung/dev-i2c3.c
@@ -0,0 +1,68 @@
+/* linux/arch/arm/plat-samsung/dev-i2c3.c
+ *
+ * Copyright (c) 2010 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com/
+ *
+ * S5P series device definition for i2c device 3
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/gfp.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/platform_device.h>
+
+#include <mach/irqs.h>
+#include <mach/map.h>
+
+#include <plat/regs-iic.h>
+#include <plat/iic.h>
+#include <plat/devs.h>
+#include <plat/cpu.h>
+
+static struct resource s3c_i2c_resource[] = {
+ [0] = {
+ .start = S3C_PA_IIC3,
+ .end = S3C_PA_IIC3 + SZ_4K - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = IRQ_IIC3,
+ .end = IRQ_IIC3,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+struct platform_device s3c_device_i2c3 = {
+ .name = "s3c2440-i2c",
+ .id = 3,
+ .num_resources = ARRAY_SIZE(s3c_i2c_resource),
+ .resource = s3c_i2c_resource,
+};
+
+static struct s3c2410_platform_i2c default_i2c_data3 __initdata = {
+ .flags = 0,
+ .bus_num = 3,
+ .slave_addr = 0x10,
+ .frequency = 100*1000,
+ .sda_delay = 100,
+};
+
+void __init s3c_i2c3_set_platdata(struct s3c2410_platform_i2c *pd)
+{
+ struct s3c2410_platform_i2c *npd;
+
+ if (!pd)
+ pd = &default_i2c_data3;
+
+ npd = kmemdup(pd, sizeof(struct s3c2410_platform_i2c), GFP_KERNEL);
+ if (!npd)
+ printk(KERN_ERR "%s: no memory for platform data\n", __func__);
+ else if (!npd->cfg_gpio)
+ npd->cfg_gpio = s3c_i2c3_cfg_gpio;
+
+ s3c_device_i2c3.dev.platform_data = npd;
+}
diff --git a/arch/arm/plat-samsung/dev-i2c4.c b/arch/arm/plat-samsung/dev-i2c4.c
new file mode 100644
index 000000000000..df2159e2daa6
--- /dev/null
+++ b/arch/arm/plat-samsung/dev-i2c4.c
@@ -0,0 +1,68 @@
+/* linux/arch/arm/plat-samsung/dev-i2c4.c
+ *
+ * Copyright (c) 2010 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com/
+ *
+ * S5P series device definition for i2c device 3
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/gfp.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/platform_device.h>
+
+#include <mach/irqs.h>
+#include <mach/map.h>
+
+#include <plat/regs-iic.h>
+#include <plat/iic.h>
+#include <plat/devs.h>
+#include <plat/cpu.h>
+
+static struct resource s3c_i2c_resource[] = {
+ [0] = {
+ .start = S3C_PA_IIC4,
+ .end = S3C_PA_IIC4 + SZ_4K - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = IRQ_IIC4,
+ .end = IRQ_IIC4,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+struct platform_device s3c_device_i2c4 = {
+ .name = "s3c2440-i2c",
+ .id = 4,
+ .num_resources = ARRAY_SIZE(s3c_i2c_resource),
+ .resource = s3c_i2c_resource,
+};
+
+static struct s3c2410_platform_i2c default_i2c_data4 __initdata = {
+ .flags = 0,
+ .bus_num = 4,
+ .slave_addr = 0x10,
+ .frequency = 100*1000,
+ .sda_delay = 100,
+};
+
+void __init s3c_i2c4_set_platdata(struct s3c2410_platform_i2c *pd)
+{
+ struct s3c2410_platform_i2c *npd;
+
+ if (!pd)
+ pd = &default_i2c_data4;
+
+ npd = kmemdup(pd, sizeof(struct s3c2410_platform_i2c), GFP_KERNEL);
+ if (!npd)
+ printk(KERN_ERR "%s: no memory for platform data\n", __func__);
+ else if (!npd->cfg_gpio)
+ npd->cfg_gpio = s3c_i2c4_cfg_gpio;
+
+ s3c_device_i2c4.dev.platform_data = npd;
+}
diff --git a/arch/arm/plat-samsung/dev-i2c5.c b/arch/arm/plat-samsung/dev-i2c5.c
new file mode 100644
index 000000000000..0499c2c3877b
--- /dev/null
+++ b/arch/arm/plat-samsung/dev-i2c5.c
@@ -0,0 +1,68 @@
+/* linux/arch/arm/plat-samsung/dev-i2c3.c
+ *
+ * Copyright (c) 2010 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com/
+ *
+ * S5P series device definition for i2c device 3
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/gfp.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/platform_device.h>
+
+#include <mach/irqs.h>
+#include <mach/map.h>
+
+#include <plat/regs-iic.h>
+#include <plat/iic.h>
+#include <plat/devs.h>
+#include <plat/cpu.h>
+
+static struct resource s3c_i2c_resource[] = {
+ [0] = {
+ .start = S3C_PA_IIC5,
+ .end = S3C_PA_IIC5 + SZ_4K - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = IRQ_IIC5,
+ .end = IRQ_IIC5,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+struct platform_device s3c_device_i2c5 = {
+ .name = "s3c2440-i2c",
+ .id = 5,
+ .num_resources = ARRAY_SIZE(s3c_i2c_resource),
+ .resource = s3c_i2c_resource,
+};
+
+static struct s3c2410_platform_i2c default_i2c_data5 __initdata = {
+ .flags = 0,
+ .bus_num = 5,
+ .slave_addr = 0x10,
+ .frequency = 100*1000,
+ .sda_delay = 100,
+};
+
+void __init s3c_i2c5_set_platdata(struct s3c2410_platform_i2c *pd)
+{
+ struct s3c2410_platform_i2c *npd;
+
+ if (!pd)
+ pd = &default_i2c_data5;
+
+ npd = kmemdup(pd, sizeof(struct s3c2410_platform_i2c), GFP_KERNEL);
+ if (!npd)
+ printk(KERN_ERR "%s: no memory for platform data\n", __func__);
+ else if (!npd->cfg_gpio)
+ npd->cfg_gpio = s3c_i2c5_cfg_gpio;
+
+ s3c_device_i2c5.dev.platform_data = npd;
+}
diff --git a/arch/arm/plat-samsung/dev-i2c6.c b/arch/arm/plat-samsung/dev-i2c6.c
new file mode 100644
index 000000000000..4083108908a8
--- /dev/null
+++ b/arch/arm/plat-samsung/dev-i2c6.c
@@ -0,0 +1,68 @@
+/* linux/arch/arm/plat-samsung/dev-i2c6.c
+ *
+ * Copyright (c) 2010 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com/
+ *
+ * S5P series device definition for i2c device 6
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/gfp.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/platform_device.h>
+
+#include <mach/irqs.h>
+#include <mach/map.h>
+
+#include <plat/regs-iic.h>
+#include <plat/iic.h>
+#include <plat/devs.h>
+#include <plat/cpu.h>
+
+static struct resource s3c_i2c_resource[] = {
+ [0] = {
+ .start = S3C_PA_IIC6,
+ .end = S3C_PA_IIC6 + SZ_4K - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = IRQ_IIC6,
+ .end = IRQ_IIC6,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+struct platform_device s3c_device_i2c6 = {
+ .name = "s3c2440-i2c",
+ .id = 6,
+ .num_resources = ARRAY_SIZE(s3c_i2c_resource),
+ .resource = s3c_i2c_resource,
+};
+
+static struct s3c2410_platform_i2c default_i2c_data6 __initdata = {
+ .flags = 0,
+ .bus_num = 6,
+ .slave_addr = 0x10,
+ .frequency = 100*1000,
+ .sda_delay = 100,
+};
+
+void __init s3c_i2c6_set_platdata(struct s3c2410_platform_i2c *pd)
+{
+ struct s3c2410_platform_i2c *npd;
+
+ if (!pd)
+ pd = &default_i2c_data6;
+
+ npd = kmemdup(pd, sizeof(struct s3c2410_platform_i2c), GFP_KERNEL);
+ if (!npd)
+ printk(KERN_ERR "%s: no memory for platform data\n", __func__);
+ else if (!npd->cfg_gpio)
+ npd->cfg_gpio = s3c_i2c6_cfg_gpio;
+
+ s3c_device_i2c6.dev.platform_data = npd;
+}
diff --git a/arch/arm/plat-samsung/dev-i2c7.c b/arch/arm/plat-samsung/dev-i2c7.c
new file mode 100644
index 000000000000..1182451d7dce
--- /dev/null
+++ b/arch/arm/plat-samsung/dev-i2c7.c
@@ -0,0 +1,68 @@
+/* linux/arch/arm/plat-samsung/dev-i2c7.c
+ *
+ * Copyright (c) 2010 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com/
+ *
+ * S5P series device definition for i2c device 7
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/gfp.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/platform_device.h>
+
+#include <mach/irqs.h>
+#include <mach/map.h>
+
+#include <plat/regs-iic.h>
+#include <plat/iic.h>
+#include <plat/devs.h>
+#include <plat/cpu.h>
+
+static struct resource s3c_i2c_resource[] = {
+ [0] = {
+ .start = S3C_PA_IIC7,
+ .end = S3C_PA_IIC7 + SZ_4K - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = IRQ_IIC7,
+ .end = IRQ_IIC7,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+struct platform_device s3c_device_i2c7 = {
+ .name = "s3c2440-i2c",
+ .id = 7,
+ .num_resources = ARRAY_SIZE(s3c_i2c_resource),
+ .resource = s3c_i2c_resource,
+};
+
+static struct s3c2410_platform_i2c default_i2c_data7 __initdata = {
+ .flags = 0,
+ .bus_num = 7,
+ .slave_addr = 0x10,
+ .frequency = 100*1000,
+ .sda_delay = 100,
+};
+
+void __init s3c_i2c7_set_platdata(struct s3c2410_platform_i2c *pd)
+{
+ struct s3c2410_platform_i2c *npd;
+
+ if (!pd)
+ pd = &default_i2c_data7;
+
+ npd = kmemdup(pd, sizeof(struct s3c2410_platform_i2c), GFP_KERNEL);
+ if (!npd)
+ printk(KERN_ERR "%s: no memory for platform data\n", __func__);
+ else if (!npd->cfg_gpio)
+ npd->cfg_gpio = s3c_i2c7_cfg_gpio;
+
+ s3c_device_i2c7.dev.platform_data = npd;
+}
diff --git a/arch/arm/plat-samsung/gpio-config.c b/arch/arm/plat-samsung/gpio-config.c
index e3d41eaed1ff..0aa32f242ee4 100644
--- a/arch/arm/plat-samsung/gpio-config.c
+++ b/arch/arm/plat-samsung/gpio-config.c
@@ -41,6 +41,37 @@ int s3c_gpio_cfgpin(unsigned int pin, unsigned int config)
}
EXPORT_SYMBOL(s3c_gpio_cfgpin);
+int s3c_gpio_cfgpin_range(unsigned int start, unsigned int nr,
+ unsigned int cfg)
+{
+ int ret;
+
+ for (; nr > 0; nr--, start++) {
+ ret = s3c_gpio_cfgpin(start, cfg);
+ if (ret != 0)
+ return ret;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(s3c_gpio_cfgpin_range);
+
+int s3c_gpio_cfgall_range(unsigned int start, unsigned int nr,
+ unsigned int cfg, s3c_gpio_pull_t pull)
+{
+ int ret;
+
+ for (; nr > 0; nr--, start++) {
+ s3c_gpio_setpull(start, pull);
+ ret = s3c_gpio_cfgpin(start, cfg);
+ if (ret != 0)
+ return ret;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(s3c_gpio_cfgall_range);
+
unsigned s3c_gpio_getcfg(unsigned int pin)
{
struct s3c_gpio_chip *chip = s3c_gpiolib_getchip(pin);
@@ -80,6 +111,25 @@ int s3c_gpio_setpull(unsigned int pin, s3c_gpio_pull_t pull)
}
EXPORT_SYMBOL(s3c_gpio_setpull);
+s3c_gpio_pull_t s3c_gpio_getpull(unsigned int pin)
+{
+ struct s3c_gpio_chip *chip = s3c_gpiolib_getchip(pin);
+ unsigned long flags;
+ int offset;
+ u32 pup = 0;
+
+ if (chip) {
+ offset = pin - chip->chip.base;
+
+ s3c_gpio_lock(chip, flags);
+ pup = s3c_gpio_do_getpull(chip, offset);
+ s3c_gpio_unlock(chip, flags);
+ }
+
+ return (__force s3c_gpio_pull_t)pup;
+}
+EXPORT_SYMBOL(s3c_gpio_getpull);
+
#ifdef CONFIG_S3C_GPIO_CFG_S3C24XX
int s3c_gpio_setcfg_s3c24xx_a(struct s3c_gpio_chip *chip,
unsigned int off, unsigned int cfg)
@@ -230,18 +280,17 @@ s3c_gpio_pull_t s3c_gpio_getpull_updown(struct s3c_gpio_chip *chip,
}
#endif
-#ifdef CONFIG_S3C_GPIO_PULL_UP
-int s3c_gpio_setpull_1up(struct s3c_gpio_chip *chip,
- unsigned int off, s3c_gpio_pull_t pull)
+#if defined(CONFIG_S3C_GPIO_PULL_UP) || defined(CONFIG_S3C_GPIO_PULL_DOWN)
+static int s3c_gpio_setpull_1(struct s3c_gpio_chip *chip,
+ unsigned int off, s3c_gpio_pull_t pull,
+ s3c_gpio_pull_t updown)
{
void __iomem *reg = chip->base + 0x08;
u32 pup = __raw_readl(reg);
- pup = __raw_readl(reg);
-
- if (pup == S3C_GPIO_PULL_UP)
+ if (pull == updown)
pup &= ~(1 << off);
- else if (pup == S3C_GPIO_PULL_NONE)
+ else if (pull == S3C_GPIO_PULL_NONE)
pup |= (1 << off);
else
return -EINVAL;
@@ -250,17 +299,45 @@ int s3c_gpio_setpull_1up(struct s3c_gpio_chip *chip,
return 0;
}
-s3c_gpio_pull_t s3c_gpio_getpull_1up(struct s3c_gpio_chip *chip,
- unsigned int off)
+static s3c_gpio_pull_t s3c_gpio_getpull_1(struct s3c_gpio_chip *chip,
+ unsigned int off, s3c_gpio_pull_t updown)
{
void __iomem *reg = chip->base + 0x08;
u32 pup = __raw_readl(reg);
pup &= (1 << off);
- return pup ? S3C_GPIO_PULL_NONE : S3C_GPIO_PULL_UP;
+ return pup ? S3C_GPIO_PULL_NONE : updown;
+}
+#endif /* CONFIG_S3C_GPIO_PULL_UP || CONFIG_S3C_GPIO_PULL_DOWN */
+
+#ifdef CONFIG_S3C_GPIO_PULL_UP
+s3c_gpio_pull_t s3c_gpio_getpull_1up(struct s3c_gpio_chip *chip,
+ unsigned int off)
+{
+ return s3c_gpio_getpull_1(chip, off, S3C_GPIO_PULL_UP);
+}
+
+int s3c_gpio_setpull_1up(struct s3c_gpio_chip *chip,
+ unsigned int off, s3c_gpio_pull_t pull)
+{
+ return s3c_gpio_setpull_1(chip, off, pull, S3C_GPIO_PULL_UP);
}
#endif /* CONFIG_S3C_GPIO_PULL_UP */
+#ifdef CONFIG_S3C_GPIO_PULL_DOWN
+s3c_gpio_pull_t s3c_gpio_getpull_1down(struct s3c_gpio_chip *chip,
+ unsigned int off)
+{
+ return s3c_gpio_getpull_1(chip, off, S3C_GPIO_PULL_DOWN);
+}
+
+int s3c_gpio_setpull_1down(struct s3c_gpio_chip *chip,
+ unsigned int off, s3c_gpio_pull_t pull)
+{
+ return s3c_gpio_setpull_1(chip, off, pull, S3C_GPIO_PULL_DOWN);
+}
+#endif /* CONFIG_S3C_GPIO_PULL_DOWN */
+
#ifdef CONFIG_S5P_GPIO_DRVSTR
s5p_gpio_drvstr_t s5p_gpio_get_drvstr(unsigned int pin)
{
diff --git a/arch/arm/plat-samsung/gpio.c b/arch/arm/plat-samsung/gpio.c
index b83a83351cea..7743c4b8b2fb 100644
--- a/arch/arm/plat-samsung/gpio.c
+++ b/arch/arm/plat-samsung/gpio.c
@@ -157,3 +157,11 @@ __init void s3c_gpiolib_add(struct s3c_gpio_chip *chip)
if (ret >= 0)
s3c_gpiolib_track(chip);
}
+
+int samsung_gpiolib_to_irq(struct gpio_chip *chip, unsigned int offset)
+{
+ struct s3c_gpio_chip *s3c_chip = container_of(chip,
+ struct s3c_gpio_chip, chip);
+
+ return s3c_chip->irq_base + offset;
+}
diff --git a/arch/arm/plat-samsung/include/plat/audio.h b/arch/arm/plat-samsung/include/plat/audio.h
index e32f9edfd4b7..7712ff6336f4 100644
--- a/arch/arm/plat-samsung/include/plat/audio.h
+++ b/arch/arm/plat-samsung/include/plat/audio.h
@@ -16,6 +16,15 @@
#define S3C64XX_AC97_GPE 1
extern void s3c64xx_ac97_setup_gpio(int);
+/*
+ * The machine init code calls s5p*_spdif_setup_gpio with
+ * one of these defines in order to select appropriate bank
+ * of GPIO for S/PDIF pins
+ */
+#define S5PC100_SPDIF_GPD 0
+#define S5PC100_SPDIF_GPG3 1
+extern void s5pc100_spdif_setup_gpio(int);
+
/**
* struct s3c_audio_pdata - common platform data for audio device drivers
* @cfg_gpio: Callback function to setup mux'ed pins in I2S/PCM/AC97 mode
diff --git a/arch/arm/plat-samsung/include/plat/devs.h b/arch/arm/plat-samsung/include/plat/devs.h
index 7d448e138792..2d82a6cb1444 100644
--- a/arch/arm/plat-samsung/include/plat/devs.h
+++ b/arch/arm/plat-samsung/include/plat/devs.h
@@ -32,6 +32,8 @@ extern struct platform_device s3c64xx_device_iisv4;
extern struct platform_device s3c64xx_device_spi0;
extern struct platform_device s3c64xx_device_spi1;
+extern struct platform_device s3c_device_pcm;
+
extern struct platform_device s3c64xx_device_pcm0;
extern struct platform_device s3c64xx_device_pcm1;
@@ -46,6 +48,11 @@ extern struct platform_device s3c_device_wdt;
extern struct platform_device s3c_device_i2c0;
extern struct platform_device s3c_device_i2c1;
extern struct platform_device s3c_device_i2c2;
+extern struct platform_device s3c_device_i2c3;
+extern struct platform_device s3c_device_i2c4;
+extern struct platform_device s3c_device_i2c5;
+extern struct platform_device s3c_device_i2c6;
+extern struct platform_device s3c_device_i2c7;
extern struct platform_device s3c_device_rtc;
extern struct platform_device s3c_device_adc;
extern struct platform_device s3c_device_sdi;
@@ -87,6 +94,7 @@ extern struct platform_device s5pv210_device_pcm2;
extern struct platform_device s5pv210_device_iis0;
extern struct platform_device s5pv210_device_iis1;
extern struct platform_device s5pv210_device_iis2;
+extern struct platform_device s5pv210_device_spdif;
extern struct platform_device s5p6442_device_pcm0;
extern struct platform_device s5p6442_device_pcm1;
@@ -106,6 +114,7 @@ extern struct platform_device s5pc100_device_pcm1;
extern struct platform_device s5pc100_device_iis0;
extern struct platform_device s5pc100_device_iis1;
extern struct platform_device s5pc100_device_iis2;
+extern struct platform_device s5pc100_device_spdif;
extern struct platform_device samsung_device_keypad;
diff --git a/arch/arm/plat-samsung/include/plat/gpio-cfg-helpers.h b/arch/arm/plat-samsung/include/plat/gpio-cfg-helpers.h
index 3e21c75feefa..0d2c5703f1ee 100644
--- a/arch/arm/plat-samsung/include/plat/gpio-cfg-helpers.h
+++ b/arch/arm/plat-samsung/include/plat/gpio-cfg-helpers.h
@@ -42,6 +42,12 @@ static inline int s3c_gpio_do_setpull(struct s3c_gpio_chip *chip,
return (chip->config->set_pull)(chip, off, pull);
}
+static inline s3c_gpio_pull_t s3c_gpio_do_getpull(struct s3c_gpio_chip *chip,
+ unsigned int off)
+{
+ return chip->config->get_pull(chip, off);
+}
+
/**
* s3c_gpio_setcfg_s3c24xx - S3C24XX style GPIO configuration.
* @chip: The gpio chip that is being configured.
@@ -204,6 +210,17 @@ extern s3c_gpio_pull_t s3c_gpio_getpull_1up(struct s3c_gpio_chip *chip,
unsigned int off);
/**
+ * s3c_gpio_getpull_1down() - Get configuration for choice of down or none
+ * @chip: The gpio chip that the GPIO pin belongs to
+ * @off: The offset to the pin to get the configuration of.
+ *
+ * This helper function reads the state of the pull-down resistor for the
+ * given GPIO in the same case as s3c_gpio_setpull_1down.
+*/
+extern s3c_gpio_pull_t s3c_gpio_getpull_1down(struct s3c_gpio_chip *chip,
+ unsigned int off);
+
+/**
* s3c_gpio_setpull_s3c2443() - Pull configuration for s3c2443.
* @chip: The gpio chip that is being configured.
* @off: The offset for the GPIO being configured.
diff --git a/arch/arm/plat-samsung/include/plat/gpio-cfg.h b/arch/arm/plat-samsung/include/plat/gpio-cfg.h
index 1c6b92947c5d..e4b5cf126fa9 100644
--- a/arch/arm/plat-samsung/include/plat/gpio-cfg.h
+++ b/arch/arm/plat-samsung/include/plat/gpio-cfg.h
@@ -108,6 +108,19 @@ extern int s3c_gpio_cfgpin(unsigned int pin, unsigned int to);
*/
extern unsigned s3c_gpio_getcfg(unsigned int pin);
+/**
+ * s3c_gpio_cfgpin_range() - Change the GPIO function for configuring pin range
+ * @start: The pin number to start at
+ * @nr: The number of pins to configure from @start.
+ * @cfg: The configuration for the pin's function
+ *
+ * Call s3c_gpio_cfgpin() for the @nr pins starting at @start.
+ *
+ * @sa s3c_gpio_cfgpin.
+ */
+extern int s3c_gpio_cfgpin_range(unsigned int start, unsigned int nr,
+ unsigned int cfg);
+
/* Define values for the pull-{up,down} available for each gpio pin.
*
* These values control the state of the weak pull-{up,down} resistors
@@ -140,6 +153,31 @@ extern int s3c_gpio_setpull(unsigned int pin, s3c_gpio_pull_t pull);
*/
extern s3c_gpio_pull_t s3c_gpio_getpull(unsigned int pin);
+/* configure `all` aspects of an gpio */
+
+/**
+ * s3c_gpio_cfgall_range() - configure range of gpio functtion and pull.
+ * @start: The gpio number to start at.
+ * @nr: The number of gpio to configure from @start.
+ * @cfg: The configuration to use
+ * @pull: The pull setting to use.
+ *
+ * Run s3c_gpio_cfgpin() and s3c_gpio_setpull() over the gpio range starting
+ * @gpio and running for @size.
+ *
+ * @sa s3c_gpio_cfgpin
+ * @sa s3c_gpio_setpull
+ * @sa s3c_gpio_cfgpin_range
+ */
+extern int s3c_gpio_cfgall_range(unsigned int start, unsigned int nr,
+ unsigned int cfg, s3c_gpio_pull_t pull);
+
+static inline int s3c_gpio_cfgrange_nopull(unsigned int pin, unsigned int size,
+ unsigned int cfg)
+{
+ return s3c_gpio_cfgall_range(pin, size, cfg, S3C_GPIO_PULL_NONE);
+}
+
/* Define values for the drvstr available for each gpio pin.
*
* These values control the value of the output signal driver strength,
@@ -169,4 +207,22 @@ extern s5p_gpio_drvstr_t s5p_gpio_get_drvstr(unsigned int pin);
*/
extern int s5p_gpio_set_drvstr(unsigned int pin, s5p_gpio_drvstr_t drvstr);
+/**
+ * s5p_register_gpio_interrupt() - register interrupt support for a gpio group
+ * @pin: The pin number from the group to be registered
+ *
+ * This function registers gpio interrupt support for the group that the
+ * specified pin belongs to.
+ *
+ * The total number of gpio pins is quite large ob s5p series. Registering
+ * irq support for all of them would be a resource waste. Because of that the
+ * interrupt support for standard gpio pins is registered dynamically.
+ *
+ * It will return the irq number of the interrupt that has been registered
+ * or -ENOMEM if no more gpio interrupts can be registered. It is allowed
+ * to call this function more than once for the same gpio group (the group
+ * will be registered only once).
+ */
+extern int s5p_register_gpio_interrupt(int pin);
+
#endif /* __PLAT_GPIO_CFG_H */
diff --git a/arch/arm/plat-samsung/include/plat/gpio-core.h b/arch/arm/plat-samsung/include/plat/gpio-core.h
index e358c7da8480..13a22b8861ef 100644
--- a/arch/arm/plat-samsung/include/plat/gpio-core.h
+++ b/arch/arm/plat-samsung/include/plat/gpio-core.h
@@ -43,6 +43,8 @@ struct s3c_gpio_cfg;
* struct s3c_gpio_chip - wrapper for specific implementation of gpio
* @chip: The chip structure to be exported via gpiolib.
* @base: The base pointer to the gpio configuration registers.
+ * @group: The group register number for gpio interrupt support.
+ * @irq_base: The base irq number.
* @config: special function and pull-resistor control information.
* @lock: Lock for exclusive access to this gpio bank.
* @pm_save: Save information for suspend/resume support.
@@ -63,6 +65,8 @@ struct s3c_gpio_chip {
struct s3c_gpio_cfg *config;
struct s3c_gpio_pm *pm;
void __iomem *base;
+ int irq_base;
+ int group;
spinlock_t lock;
#ifdef CONFIG_PM
u32 pm_save[4];
@@ -118,6 +122,17 @@ extern void samsung_gpiolib_add_4bit2_chips(struct s3c_gpio_chip *chip,
extern void samsung_gpiolib_add_4bit(struct s3c_gpio_chip *chip);
extern void samsung_gpiolib_add_4bit2(struct s3c_gpio_chip *chip);
+
+/**
+ * samsung_gpiolib_to_irq - convert gpio pin to irq number
+ * @chip: The gpio chip that the pin belongs to.
+ * @offset: The offset of the pin in the chip.
+ *
+ * This helper returns the irq number calculated from the chip->irq_base and
+ * the provided offset.
+ */
+extern int samsung_gpiolib_to_irq(struct gpio_chip *chip, unsigned int offset);
+
/* exported for core SoC support to change */
extern struct s3c_gpio_cfg s3c24xx_gpiocfg_default;
diff --git a/arch/arm/plat-samsung/include/plat/iic.h b/arch/arm/plat-samsung/include/plat/iic.h
index 133308bf595d..1543da8f85c1 100644
--- a/arch/arm/plat-samsung/include/plat/iic.h
+++ b/arch/arm/plat-samsung/include/plat/iic.h
@@ -55,10 +55,20 @@ struct s3c2410_platform_i2c {
extern void s3c_i2c0_set_platdata(struct s3c2410_platform_i2c *i2c);
extern void s3c_i2c1_set_platdata(struct s3c2410_platform_i2c *i2c);
extern void s3c_i2c2_set_platdata(struct s3c2410_platform_i2c *i2c);
+extern void s3c_i2c3_set_platdata(struct s3c2410_platform_i2c *i2c);
+extern void s3c_i2c4_set_platdata(struct s3c2410_platform_i2c *i2c);
+extern void s3c_i2c5_set_platdata(struct s3c2410_platform_i2c *i2c);
+extern void s3c_i2c6_set_platdata(struct s3c2410_platform_i2c *i2c);
+extern void s3c_i2c7_set_platdata(struct s3c2410_platform_i2c *i2c);
/* defined by architecture to configure gpio */
extern void s3c_i2c0_cfg_gpio(struct platform_device *dev);
extern void s3c_i2c1_cfg_gpio(struct platform_device *dev);
extern void s3c_i2c2_cfg_gpio(struct platform_device *dev);
+extern void s3c_i2c3_cfg_gpio(struct platform_device *dev);
+extern void s3c_i2c4_cfg_gpio(struct platform_device *dev);
+extern void s3c_i2c5_cfg_gpio(struct platform_device *dev);
+extern void s3c_i2c6_cfg_gpio(struct platform_device *dev);
+extern void s3c_i2c7_cfg_gpio(struct platform_device *dev);
#endif /* __ASM_ARCH_IIC_H */
diff --git a/arch/arm/plat-samsung/include/plat/map-base.h b/arch/arm/plat-samsung/include/plat/map-base.h
index 250be311c85b..3ffac4d2e4f0 100644
--- a/arch/arm/plat-samsung/include/plat/map-base.h
+++ b/arch/arm/plat-samsung/include/plat/map-base.h
@@ -14,7 +14,7 @@
#ifndef __ASM_PLAT_MAP_H
#define __ASM_PLAT_MAP_H __FILE__
-/* Fit all our registers in at 0xF4000000 upwards, trying to use as
+/* Fit all our registers in at 0xF6000000 upwards, trying to use as
* little of the VA space as possible so vmalloc and friends have a
* better chance of getting memory.
*
@@ -22,7 +22,7 @@
* an single MOVS instruction (ie, only 8 bits of set data)
*/
-#define S3C_ADDR_BASE (0xF4000000)
+#define S3C_ADDR_BASE 0xF6000000
#ifndef __ASSEMBLY__
#define S3C_ADDR(x) ((void __iomem __force *)S3C_ADDR_BASE + (x))
diff --git a/arch/arm/plat-samsung/include/plat/nand-core.h b/arch/arm/plat-samsung/include/plat/nand-core.h
new file mode 100644
index 000000000000..6de20789a95e
--- /dev/null
+++ b/arch/arm/plat-samsung/include/plat/nand-core.h
@@ -0,0 +1,28 @@
+/* arch/arm/plat-samsung/include/plat/nand-core.h
+ *
+ * Copyright (c) 2010 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com/
+ *
+ * S3C - Nand Controller core functions
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+*/
+
+#ifndef __ASM_ARCH_NAND_CORE_H
+#define __ASM_ARCH_NAND_CORE_H __FILE__
+
+/* These functions are only for use with the core support code, such as
+ * the cpu specific initialisation code
+ */
+
+/* re-define device name depending on support. */
+static inline void s3c_nand_setname(char *name)
+{
+#ifdef CONFIG_S3C_DEV_NAND
+ s3c_device_nand.name = name;
+#endif
+}
+
+#endif /* __ASM_ARCH_NAND_CORE_H */
diff --git a/arch/arm/plat-samsung/include/plat/sdhci.h b/arch/arm/plat-samsung/include/plat/sdhci.h
index 30844c263d03..85853f8c4c5d 100644
--- a/arch/arm/plat-samsung/include/plat/sdhci.h
+++ b/arch/arm/plat-samsung/include/plat/sdhci.h
@@ -28,11 +28,17 @@ enum cd_types {
S3C_SDHCI_CD_PERMANENT, /* no CD line, card permanently wired to host */
};
+enum clk_types {
+ S3C_SDHCI_CLK_DIV_INTERNAL, /* use mmc internal clock divider */
+ S3C_SDHCI_CLK_DIV_EXTERNAL, /* use external clock divider */
+};
+
/**
* struct s3c_sdhci_platdata() - Platform device data for Samsung SDHCI
* @max_width: The maximum number of data bits supported.
* @host_caps: Standard MMC host capabilities bit field.
* @cd_type: Type of Card Detection method (see cd_types enum above)
+ * @clk_type: Type of clock divider method (see clk_types enum above)
* @ext_cd_init: Initialize external card detect subsystem. Called on
* sdhci-s3c driver probe when cd_type == S3C_SDHCI_CD_EXTERNAL.
* notify_func argument is a callback to the sdhci-s3c driver
@@ -59,6 +65,7 @@ struct s3c_sdhci_platdata {
unsigned int max_width;
unsigned int host_caps;
enum cd_types cd_type;
+ enum clk_types clk_type;
char **clocks; /* set of clock sources */
@@ -110,6 +117,10 @@ extern void s5pv210_setup_sdhci0_cfg_gpio(struct platform_device *, int w);
extern void s5pv210_setup_sdhci1_cfg_gpio(struct platform_device *, int w);
extern void s5pv210_setup_sdhci2_cfg_gpio(struct platform_device *, int w);
extern void s5pv210_setup_sdhci3_cfg_gpio(struct platform_device *, int w);
+extern void s5pv310_setup_sdhci0_cfg_gpio(struct platform_device *, int w);
+extern void s5pv310_setup_sdhci1_cfg_gpio(struct platform_device *, int w);
+extern void s5pv310_setup_sdhci2_cfg_gpio(struct platform_device *, int w);
+extern void s5pv310_setup_sdhci3_cfg_gpio(struct platform_device *, int w);
/* S3C64XX SDHCI setup */
@@ -288,4 +299,57 @@ static inline void s5pv210_default_sdhci3(void) { }
#endif /* CONFIG_S5PV210_SETUP_SDHCI */
+/* S5PV310 SDHCI setup */
+#ifdef CONFIG_S5PV310_SETUP_SDHCI
+extern char *s5pv310_hsmmc_clksrcs[4];
+
+extern void s5pv310_setup_sdhci_cfg_card(struct platform_device *dev,
+ void __iomem *r,
+ struct mmc_ios *ios,
+ struct mmc_card *card);
+
+static inline void s5pv310_default_sdhci0(void)
+{
+#ifdef CONFIG_S3C_DEV_HSMMC
+ s3c_hsmmc0_def_platdata.clocks = s5pv310_hsmmc_clksrcs;
+ s3c_hsmmc0_def_platdata.cfg_gpio = s5pv310_setup_sdhci0_cfg_gpio;
+ s3c_hsmmc0_def_platdata.cfg_card = s5pv310_setup_sdhci_cfg_card;
+#endif
+}
+
+static inline void s5pv310_default_sdhci1(void)
+{
+#ifdef CONFIG_S3C_DEV_HSMMC1
+ s3c_hsmmc1_def_platdata.clocks = s5pv310_hsmmc_clksrcs;
+ s3c_hsmmc1_def_platdata.cfg_gpio = s5pv310_setup_sdhci1_cfg_gpio;
+ s3c_hsmmc1_def_platdata.cfg_card = s5pv310_setup_sdhci_cfg_card;
+#endif
+}
+
+static inline void s5pv310_default_sdhci2(void)
+{
+#ifdef CONFIG_S3C_DEV_HSMMC2
+ s3c_hsmmc2_def_platdata.clocks = s5pv310_hsmmc_clksrcs;
+ s3c_hsmmc2_def_platdata.cfg_gpio = s5pv310_setup_sdhci2_cfg_gpio;
+ s3c_hsmmc2_def_platdata.cfg_card = s5pv310_setup_sdhci_cfg_card;
+#endif
+}
+
+static inline void s5pv310_default_sdhci3(void)
+{
+#ifdef CONFIG_S3C_DEV_HSMMC3
+ s3c_hsmmc3_def_platdata.clocks = s5pv310_hsmmc_clksrcs;
+ s3c_hsmmc3_def_platdata.cfg_gpio = s5pv310_setup_sdhci3_cfg_gpio;
+ s3c_hsmmc3_def_platdata.cfg_card = s5pv310_setup_sdhci_cfg_card;
+#endif
+}
+
+#else
+static inline void s5pv310_default_sdhci0(void) { }
+static inline void s5pv310_default_sdhci1(void) { }
+static inline void s5pv310_default_sdhci2(void) { }
+static inline void s5pv310_default_sdhci3(void) { }
+
+#endif /* CONFIG_S5PV310_SETUP_SDHCI */
+
#endif /* __PLAT_S3C_SDHCI_H */
diff --git a/arch/arm/plat-samsung/pm-gpio.c b/arch/arm/plat-samsung/pm-gpio.c
index 7df03f87fbfa..96528200eb79 100644
--- a/arch/arm/plat-samsung/pm-gpio.c
+++ b/arch/arm/plat-samsung/pm-gpio.c
@@ -192,7 +192,7 @@ struct s3c_gpio_pm s3c_gpio_pm_2bit = {
.resume = s3c_gpio_pm_2bit_resume,
};
-#ifdef CONFIG_ARCH_S3C64XX
+#if defined(CONFIG_ARCH_S3C64XX) || defined(CONFIG_PLAT_S5P)
static void s3c_gpio_pm_4bit_save(struct s3c_gpio_chip *chip)
{
chip->pm_save[1] = __raw_readl(chip->base + OFFS_CON);
@@ -302,7 +302,7 @@ struct s3c_gpio_pm s3c_gpio_pm_4bit = {
.save = s3c_gpio_pm_4bit_save,
.resume = s3c_gpio_pm_4bit_resume,
};
-#endif /* CONFIG_ARCH_S3C64XX */
+#endif /* CONFIG_ARCH_S3C64XX || CONFIG_PLAT_S5P */
/**
* s3c_pm_save_gpio() - save gpio chip data for suspend
diff --git a/arch/arm/plat-samsung/s3c-pl330.c b/arch/arm/plat-samsung/s3c-pl330.c
index a91305a60aed..b4ff8d74ac40 100644
--- a/arch/arm/plat-samsung/s3c-pl330.c
+++ b/arch/arm/plat-samsung/s3c-pl330.c
@@ -15,6 +15,8 @@
#include <linux/io.h>
#include <linux/slab.h>
#include <linux/platform_device.h>
+#include <linux/clk.h>
+#include <linux/err.h>
#include <asm/hardware/pl330.h>
@@ -27,6 +29,7 @@
* @node: To attach to the global list of DMACs.
* @pi: PL330 configuration info for the DMAC.
* @kmcache: Pool to quickly allocate xfers for all channels in the dmac.
+ * @clk: Pointer of DMAC operation clock.
*/
struct s3c_pl330_dmac {
unsigned busy_chan;
@@ -34,6 +37,7 @@ struct s3c_pl330_dmac {
struct list_head node;
struct pl330_info *pi;
struct kmem_cache *kmcache;
+ struct clk *clk;
};
/**
@@ -1072,16 +1076,25 @@ static int pl330_probe(struct platform_device *pdev)
if (ret)
goto probe_err4;
- ret = pl330_add(pl330_info);
- if (ret)
- goto probe_err5;
-
/* Allocate a new DMAC */
s3c_pl330_dmac = kmalloc(sizeof(*s3c_pl330_dmac), GFP_KERNEL);
if (!s3c_pl330_dmac) {
ret = -ENOMEM;
+ goto probe_err5;
+ }
+
+ /* Get operation clock and enable it */
+ s3c_pl330_dmac->clk = clk_get(&pdev->dev, "pdma");
+ if (IS_ERR(s3c_pl330_dmac->clk)) {
+ dev_err(&pdev->dev, "Cannot get operation clock.\n");
+ ret = -EINVAL;
goto probe_err6;
}
+ clk_enable(s3c_pl330_dmac->clk);
+
+ ret = pl330_add(pl330_info);
+ if (ret)
+ goto probe_err7;
/* Hook the info */
s3c_pl330_dmac->pi = pl330_info;
@@ -1094,7 +1107,7 @@ static int pl330_probe(struct platform_device *pdev)
if (!s3c_pl330_dmac->kmcache) {
ret = -ENOMEM;
- goto probe_err7;
+ goto probe_err8;
}
/* Get the list of peripherals */
@@ -1120,10 +1133,13 @@ static int pl330_probe(struct platform_device *pdev)
return 0;
+probe_err8:
+ pl330_del(pl330_info);
probe_err7:
- kfree(s3c_pl330_dmac);
+ clk_disable(s3c_pl330_dmac->clk);
+ clk_put(s3c_pl330_dmac->clk);
probe_err6:
- pl330_del(pl330_info);
+ kfree(s3c_pl330_dmac);
probe_err5:
free_irq(irq, pl330_info);
probe_err4:
@@ -1188,6 +1204,10 @@ static int pl330_remove(struct platform_device *pdev)
}
}
+ /* Disable operation clock */
+ clk_disable(dmac->clk);
+ clk_put(dmac->clk);
+
/* Remove the DMAC */
list_del(&dmac->node);
kfree(dmac);
diff --git a/arch/arm/vfp/vfphw.S b/arch/arm/vfp/vfphw.S
index d66cead97d28..9897dcfc16d6 100644
--- a/arch/arm/vfp/vfphw.S
+++ b/arch/arm/vfp/vfphw.S
@@ -206,6 +206,7 @@ ENTRY(vfp_save_state)
mov pc, lr
ENDPROC(vfp_save_state)
+ .align
last_VFP_context_address:
.word last_VFP_context
diff --git a/arch/avr32/Kconfig b/arch/avr32/Kconfig
index f0dc5b8075a7..313b13073c54 100644
--- a/arch/avr32/Kconfig
+++ b/arch/avr32/Kconfig
@@ -1,10 +1,3 @@
-#
-# For a description of the syntax of this configuration file,
-# see Documentation/kbuild/kconfig-language.txt.
-#
-
-mainmenu "Linux Kernel Configuration"
-
config AVR32
def_bool y
# With EMBEDDED=n, we get lots of stuff automatically selected
diff --git a/arch/avr32/include/asm/pgtable.h b/arch/avr32/include/asm/pgtable.h
index a9ae30c41e74..6fbfea61f7bb 100644
--- a/arch/avr32/include/asm/pgtable.h
+++ b/arch/avr32/include/asm/pgtable.h
@@ -319,9 +319,7 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
#define pte_offset_kernel(dir, address) \
((pte_t *) pmd_page_vaddr(*(dir)) + pte_index(address))
#define pte_offset_map(dir, address) pte_offset_kernel(dir, address)
-#define pte_offset_map_nested(dir, address) pte_offset_kernel(dir, address)
#define pte_unmap(pte) do { } while (0)
-#define pte_unmap_nested(pte) do { } while (0)
struct vm_area_struct;
extern void update_mmu_cache(struct vm_area_struct * vma,
diff --git a/arch/avr32/kernel/ptrace.c b/arch/avr32/kernel/ptrace.c
index 5e73c25f8f85..4aedcab7cd4b 100644
--- a/arch/avr32/kernel/ptrace.c
+++ b/arch/avr32/kernel/ptrace.c
@@ -146,9 +146,11 @@ static int ptrace_setregs(struct task_struct *tsk, const void __user *uregs)
return ret;
}
-long arch_ptrace(struct task_struct *child, long request, long addr, long data)
+long arch_ptrace(struct task_struct *child, long request,
+ unsigned long addr, unsigned long data)
{
int ret;
+ void __user *datap = (void __user *) data;
switch (request) {
/* Read the word at location addr in the child process */
@@ -158,8 +160,7 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
break;
case PTRACE_PEEKUSR:
- ret = ptrace_read_user(child, addr,
- (unsigned long __user *)data);
+ ret = ptrace_read_user(child, addr, datap);
break;
/* Write the word in data at location addr */
@@ -173,11 +174,11 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
break;
case PTRACE_GETREGS:
- ret = ptrace_getregs(child, (void __user *)data);
+ ret = ptrace_getregs(child, datap);
break;
case PTRACE_SETREGS:
- ret = ptrace_setregs(child, (const void __user *)data);
+ ret = ptrace_setregs(child, datap);
break;
default:
diff --git a/arch/blackfin/Kconfig b/arch/blackfin/Kconfig
index d9a1cb7ec30a..0a221d48152d 100644
--- a/arch/blackfin/Kconfig
+++ b/arch/blackfin/Kconfig
@@ -1,10 +1,3 @@
-#
-# For a description of the syntax of this configuration file,
-# see Documentation/kbuild/kconfig-language.txt.
-#
-
-mainmenu "Blackfin Kernel Configuration"
-
config SYMBOL_PREFIX
string
default "_"
diff --git a/arch/blackfin/Kconfig.debug b/arch/blackfin/Kconfig.debug
index d1825cb24768..acb83799a215 100644
--- a/arch/blackfin/Kconfig.debug
+++ b/arch/blackfin/Kconfig.debug
@@ -102,17 +102,6 @@ config DEBUG_DOUBLEFAULT_RESET
endchoice
-config DEBUG_ICACHE_CHECK
- bool "Check Instruction cache coherency"
- depends on DEBUG_KERNEL
- depends on DEBUG_HWERR
- help
- Say Y here if you are getting weird unexplained errors. This will
- ensure that icache is what SDRAM says it should be by doing a
- byte wise comparison between SDRAM and instruction cache. This
- also relocates the irq_panic() function to L1 memory, (which is
- un-cached).
-
config DEBUG_HUNT_FOR_ZERO
bool "Catch NULL pointer reads/writes"
default y
diff --git a/arch/blackfin/configs/BF518F-EZBRD_defconfig b/arch/blackfin/configs/BF518F-EZBRD_defconfig
index 46fac1bf0605..c0b988ee30df 100644
--- a/arch/blackfin/configs/BF518F-EZBRD_defconfig
+++ b/arch/blackfin/configs/BF518F-EZBRD_defconfig
@@ -35,6 +35,7 @@ CONFIG_C_CDPRIO=y
CONFIG_BANK_3=0x99B2
CONFIG_BINFMT_FLAT=y
CONFIG_BINFMT_ZFLAT=y
+CONFIG_PM=y
CONFIG_NET=y
CONFIG_PACKET=y
CONFIG_UNIX=y
@@ -114,7 +115,6 @@ CONFIG_DEBUG_DOUBLEFAULT=y
CONFIG_DEBUG_BFIN_HWTRACE_COMPRESSION_ONE=y
CONFIG_EARLY_PRINTK=y
CONFIG_CPLB_INFO=y
-CONFIG_SECURITY=y
CONFIG_CRYPTO=y
# CONFIG_CRYPTO_ANSI_CPRNG is not set
CONFIG_CRC_CCITT=m
diff --git a/arch/blackfin/configs/BF526-EZBRD_defconfig b/arch/blackfin/configs/BF526-EZBRD_defconfig
index 80240806cf9e..864af5b68874 100644
--- a/arch/blackfin/configs/BF526-EZBRD_defconfig
+++ b/arch/blackfin/configs/BF526-EZBRD_defconfig
@@ -40,6 +40,7 @@ CONFIG_C_CDPRIO=y
CONFIG_BANK_3=0x99B2
CONFIG_BINFMT_FLAT=y
CONFIG_BINFMT_ZFLAT=y
+CONFIG_PM=y
CONFIG_NET=y
CONFIG_PACKET=y
CONFIG_UNIX=y
@@ -152,7 +153,6 @@ CONFIG_DEBUG_DOUBLEFAULT=y
CONFIG_DEBUG_BFIN_HWTRACE_COMPRESSION_ONE=y
CONFIG_EARLY_PRINTK=y
CONFIG_CPLB_INFO=y
-CONFIG_SECURITY=y
CONFIG_CRYPTO=y
# CONFIG_CRYPTO_ANSI_CPRNG is not set
CONFIG_CRC_CCITT=m
diff --git a/arch/blackfin/configs/BF527-AD7160-EVAL_defconfig b/arch/blackfin/configs/BF527-AD7160-EVAL_defconfig
index 08c55f6b8b7a..7b6a3370dbe2 100644
--- a/arch/blackfin/configs/BF527-AD7160-EVAL_defconfig
+++ b/arch/blackfin/configs/BF527-AD7160-EVAL_defconfig
@@ -9,6 +9,7 @@ CONFIG_EMBEDDED=y
# CONFIG_ELF_CORE is not set
# CONFIG_AIO is not set
CONFIG_SLAB=y
+CONFIG_MMAP_ALLOW_UNINITIALIZED=y
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
# CONFIG_BLK_DEV_BSG is not set
@@ -101,5 +102,4 @@ CONFIG_DETECT_HUNG_TASK=y
# CONFIG_DEBUG_BFIN_NO_KERN_HWTRACE is not set
CONFIG_EARLY_PRINTK=y
CONFIG_CPLB_INFO=y
-CONFIG_SECURITY=y
CONFIG_CRC_CCITT=m
diff --git a/arch/blackfin/configs/BF527-EZKIT-V2_defconfig b/arch/blackfin/configs/BF527-EZKIT-V2_defconfig
index 4a9125558fcf..4faa6b46a352 100644
--- a/arch/blackfin/configs/BF527-EZKIT-V2_defconfig
+++ b/arch/blackfin/configs/BF527-EZKIT-V2_defconfig
@@ -20,6 +20,7 @@ CONFIG_MODULE_UNLOAD=y
# CONFIG_LBDAF is not set
# CONFIG_BLK_DEV_BSG is not set
# CONFIG_IOSCHED_DEADLINE is not set
+# CONFIG_IOSCHED_CFQ is not set
CONFIG_PREEMPT_VOLUNTARY=y
CONFIG_BF527=y
CONFIG_BF_REV_0_2=y
@@ -38,6 +39,7 @@ CONFIG_C_CDPRIO=y
CONFIG_BANK_3=0x99B2
CONFIG_BINFMT_FLAT=y
CONFIG_BINFMT_ZFLAT=y
+CONFIG_PM=y
CONFIG_NET=y
CONFIG_PACKET=y
CONFIG_UNIX=y
@@ -181,6 +183,5 @@ CONFIG_DEBUG_DOUBLEFAULT=y
CONFIG_DEBUG_BFIN_HWTRACE_COMPRESSION_ONE=y
CONFIG_EARLY_PRINTK=y
CONFIG_CPLB_INFO=y
-CONFIG_SECURITY=y
CONFIG_CRYPTO=y
# CONFIG_CRYPTO_ANSI_CPRNG is not set
diff --git a/arch/blackfin/configs/BF527-EZKIT_defconfig b/arch/blackfin/configs/BF527-EZKIT_defconfig
index 8ccf3cec7534..9d893eb68243 100644
--- a/arch/blackfin/configs/BF527-EZKIT_defconfig
+++ b/arch/blackfin/configs/BF527-EZKIT_defconfig
@@ -20,6 +20,7 @@ CONFIG_MODULE_UNLOAD=y
# CONFIG_LBDAF is not set
# CONFIG_BLK_DEV_BSG is not set
# CONFIG_IOSCHED_DEADLINE is not set
+# CONFIG_IOSCHED_CFQ is not set
CONFIG_PREEMPT_VOLUNTARY=y
CONFIG_BF527=y
CONFIG_BF_REV_0_1=y
@@ -37,6 +38,7 @@ CONFIG_C_CDPRIO=y
CONFIG_BANK_3=0x99B2
CONFIG_BINFMT_FLAT=y
CONFIG_BINFMT_ZFLAT=y
+CONFIG_PM=y
CONFIG_NET=y
CONFIG_PACKET=y
CONFIG_UNIX=y
@@ -173,6 +175,5 @@ CONFIG_DEBUG_DOUBLEFAULT=y
CONFIG_DEBUG_BFIN_HWTRACE_COMPRESSION_ONE=y
CONFIG_EARLY_PRINTK=y
CONFIG_CPLB_INFO=y
-CONFIG_SECURITY=y
CONFIG_CRYPTO=y
# CONFIG_CRYPTO_ANSI_CPRNG is not set
diff --git a/arch/blackfin/configs/BF527-TLL6527M_defconfig b/arch/blackfin/configs/BF527-TLL6527M_defconfig
index 92ded5edc86c..97a2767c80f8 100644
--- a/arch/blackfin/configs/BF527-TLL6527M_defconfig
+++ b/arch/blackfin/configs/BF527-TLL6527M_defconfig
@@ -174,7 +174,6 @@ CONFIG_DEBUG_DOUBLEFAULT=y
CONFIG_DEBUG_BFIN_HWTRACE_COMPRESSION_ONE=y
CONFIG_EARLY_PRINTK=y
CONFIG_CPLB_INFO=y
-CONFIG_SECURITY=y
CONFIG_CRYPTO=y
# CONFIG_CRYPTO_ANSI_CPRNG is not set
CONFIG_CRC7=m
diff --git a/arch/blackfin/configs/BF533-EZKIT_defconfig b/arch/blackfin/configs/BF533-EZKIT_defconfig
index c40e0f1c7eac..f84774360c5b 100644
--- a/arch/blackfin/configs/BF533-EZKIT_defconfig
+++ b/arch/blackfin/configs/BF533-EZKIT_defconfig
@@ -20,6 +20,7 @@ CONFIG_MODULE_UNLOAD=y
# CONFIG_LBDAF is not set
# CONFIG_BLK_DEV_BSG is not set
# CONFIG_IOSCHED_DEADLINE is not set
+# CONFIG_IOSCHED_CFQ is not set
CONFIG_PREEMPT_VOLUNTARY=y
CONFIG_BFIN533_EZKIT=y
CONFIG_TIMER0=11
@@ -107,6 +108,5 @@ CONFIG_DEBUG_DOUBLEFAULT=y
CONFIG_DEBUG_BFIN_HWTRACE_COMPRESSION_ONE=y
CONFIG_EARLY_PRINTK=y
CONFIG_CPLB_INFO=y
-CONFIG_SECURITY=y
CONFIG_CRYPTO=y
# CONFIG_CRYPTO_ANSI_CPRNG is not set
diff --git a/arch/blackfin/configs/BF533-STAMP_defconfig b/arch/blackfin/configs/BF533-STAMP_defconfig
index aa8c1d7453ba..0e7262c04cc2 100644
--- a/arch/blackfin/configs/BF533-STAMP_defconfig
+++ b/arch/blackfin/configs/BF533-STAMP_defconfig
@@ -20,6 +20,7 @@ CONFIG_MODULE_UNLOAD=y
# CONFIG_LBDAF is not set
# CONFIG_BLK_DEV_BSG is not set
# CONFIG_IOSCHED_DEADLINE is not set
+# CONFIG_IOSCHED_CFQ is not set
CONFIG_PREEMPT_VOLUNTARY=y
CONFIG_TIMER0=11
CONFIG_HIGH_RES_TIMERS=y
@@ -121,6 +122,5 @@ CONFIG_DEBUG_DOUBLEFAULT=y
CONFIG_DEBUG_BFIN_HWTRACE_COMPRESSION_ONE=y
CONFIG_EARLY_PRINTK=y
CONFIG_CPLB_INFO=y
-CONFIG_SECURITY=y
CONFIG_CRYPTO=y
# CONFIG_CRYPTO_ANSI_CPRNG is not set
diff --git a/arch/blackfin/configs/BF537-STAMP_defconfig b/arch/blackfin/configs/BF537-STAMP_defconfig
index f245c0b427e4..4d14a002e7bd 100644
--- a/arch/blackfin/configs/BF537-STAMP_defconfig
+++ b/arch/blackfin/configs/BF537-STAMP_defconfig
@@ -20,9 +20,9 @@ CONFIG_MODULE_UNLOAD=y
# CONFIG_LBDAF is not set
# CONFIG_BLK_DEV_BSG is not set
# CONFIG_IOSCHED_DEADLINE is not set
+# CONFIG_IOSCHED_CFQ is not set
CONFIG_PREEMPT_VOLUNTARY=y
CONFIG_BF537=y
-CONFIG_IRQ_ERROR=11
CONFIG_HIGH_RES_TIMERS=y
CONFIG_NOMMU_INITIAL_TRIM_EXCESS=0
CONFIG_BFIN_GPTIMERS=m
@@ -133,6 +133,5 @@ CONFIG_DEBUG_DOUBLEFAULT=y
CONFIG_DEBUG_BFIN_HWTRACE_COMPRESSION_ONE=y
CONFIG_EARLY_PRINTK=y
CONFIG_CPLB_INFO=y
-CONFIG_SECURITY=y
CONFIG_CRYPTO=y
# CONFIG_CRYPTO_ANSI_CPRNG is not set
diff --git a/arch/blackfin/configs/BF538-EZKIT_defconfig b/arch/blackfin/configs/BF538-EZKIT_defconfig
index 74a330cca9b4..fbee9d776f56 100644
--- a/arch/blackfin/configs/BF538-EZKIT_defconfig
+++ b/arch/blackfin/configs/BF538-EZKIT_defconfig
@@ -20,6 +20,7 @@ CONFIG_MODULE_UNLOAD=y
# CONFIG_LBDAF is not set
# CONFIG_BLK_DEV_BSG is not set
# CONFIG_IOSCHED_DEADLINE is not set
+# CONFIG_IOSCHED_CFQ is not set
CONFIG_PREEMPT_VOLUNTARY=y
CONFIG_BF538=y
CONFIG_IRQ_TIMER0=12
@@ -31,6 +32,7 @@ CONFIG_C_CDPRIO=y
CONFIG_BANK_3=0x99B2
CONFIG_BINFMT_FLAT=y
CONFIG_BINFMT_ZFLAT=y
+CONFIG_PM=y
CONFIG_NET=y
CONFIG_PACKET=y
CONFIG_UNIX=y
@@ -129,6 +131,5 @@ CONFIG_DEBUG_DOUBLEFAULT=y
CONFIG_DEBUG_BFIN_HWTRACE_COMPRESSION_ONE=y
CONFIG_EARLY_PRINTK=y
CONFIG_CPLB_INFO=y
-CONFIG_SECURITY=y
CONFIG_CRYPTO=y
# CONFIG_CRYPTO_ANSI_CPRNG is not set
diff --git a/arch/blackfin/configs/BF548-EZKIT_defconfig b/arch/blackfin/configs/BF548-EZKIT_defconfig
index 29373cbba227..05dd11db2f7d 100644
--- a/arch/blackfin/configs/BF548-EZKIT_defconfig
+++ b/arch/blackfin/configs/BF548-EZKIT_defconfig
@@ -40,6 +40,7 @@ CONFIG_EBIU_MODEVAL=0x1
CONFIG_EBIU_FCTLVAL=0x6
CONFIG_BINFMT_FLAT=y
CONFIG_BINFMT_ZFLAT=y
+CONFIG_PM=y
CONFIG_NET=y
CONFIG_PACKET=y
CONFIG_UNIX=y
@@ -62,7 +63,7 @@ CONFIG_IRCOMM=m
CONFIG_IRTTY_SIR=m
CONFIG_BFIN_SIR=m
CONFIG_BFIN_SIR3=y
-CONFIG_LIB80211=m
+# CONFIG_WIRELESS is not set
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
CONFIG_FW_LOADER=m
CONFIG_MTD=y
@@ -92,6 +93,7 @@ CONFIG_NET_ETHERNET=y
CONFIG_SMSC911X=y
# CONFIG_NETDEV_1000 is not set
# CONFIG_NETDEV_10000 is not set
+# CONFIG_WLAN is not set
CONFIG_INPUT_FF_MEMLESS=m
# CONFIG_INPUT_MOUSEDEV is not set
CONFIG_INPUT_EVDEV=m
diff --git a/arch/blackfin/configs/BF561-ACVILON_defconfig b/arch/blackfin/configs/BF561-ACVILON_defconfig
index 1f12034f5610..bcb14d1c5664 100644
--- a/arch/blackfin/configs/BF561-ACVILON_defconfig
+++ b/arch/blackfin/configs/BF561-ACVILON_defconfig
@@ -14,6 +14,7 @@ CONFIG_EMBEDDED=y
# CONFIG_EVENTFD is not set
# CONFIG_AIO is not set
CONFIG_SLAB=y
+CONFIG_MMAP_ALLOW_UNINITIALIZED=y
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
# CONFIG_LBDAF is not set
@@ -44,6 +45,7 @@ CONFIG_IP_PNP=y
CONFIG_SYN_COOKIES=y
# CONFIG_INET_LRO is not set
# CONFIG_IPV6 is not set
+# CONFIG_WIRELESS is not set
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
# CONFIG_FW_LOADER is not set
CONFIG_MTD=y
@@ -71,6 +73,7 @@ CONFIG_NET_ETHERNET=y
CONFIG_SMSC911X=y
# CONFIG_NETDEV_1000 is not set
# CONFIG_NETDEV_10000 is not set
+# CONFIG_WLAN is not set
# CONFIG_INPUT is not set
# CONFIG_SERIO is not set
# CONFIG_VT is not set
@@ -147,5 +150,4 @@ CONFIG_DEBUG_INFO=y
CONFIG_DEBUG_MMRS=y
# CONFIG_DEBUG_BFIN_NO_KERN_HWTRACE is not set
CONFIG_CPLB_INFO=y
-CONFIG_SECURITY=y
# CONFIG_CRYPTO_ANSI_CPRNG is not set
diff --git a/arch/blackfin/configs/BF561-EZKIT_defconfig b/arch/blackfin/configs/BF561-EZKIT_defconfig
index 8913d997fa47..843aaa54a9e3 100644
--- a/arch/blackfin/configs/BF561-EZKIT_defconfig
+++ b/arch/blackfin/configs/BF561-EZKIT_defconfig
@@ -35,6 +35,7 @@ CONFIG_C_CDPRIO=y
CONFIG_BANK_3=0xAAC2
CONFIG_BINFMT_FLAT=y
CONFIG_BINFMT_ZFLAT=y
+CONFIG_PM=y
CONFIG_NET=y
CONFIG_PACKET=y
CONFIG_UNIX=y
diff --git a/arch/blackfin/configs/BlackStamp_defconfig b/arch/blackfin/configs/BlackStamp_defconfig
index 0242917b69c9..dae7adf3b2a2 100644
--- a/arch/blackfin/configs/BlackStamp_defconfig
+++ b/arch/blackfin/configs/BlackStamp_defconfig
@@ -40,6 +40,7 @@ CONFIG_INET=y
CONFIG_IP_PNP=y
# CONFIG_INET_LRO is not set
# CONFIG_IPV6 is not set
+# CONFIG_WIRELESS is not set
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
# CONFIG_FW_LOADER is not set
CONFIG_MTD=y
@@ -63,6 +64,7 @@ CONFIG_NET_ETHERNET=y
CONFIG_SMC91X=y
# CONFIG_NETDEV_1000 is not set
# CONFIG_NETDEV_10000 is not set
+# CONFIG_WLAN is not set
# CONFIG_INPUT_MOUSEDEV is not set
CONFIG_INPUT_EVDEV=m
# CONFIG_INPUT_KEYBOARD is not set
@@ -104,5 +106,4 @@ CONFIG_DEBUG_MMRS=y
# CONFIG_DEBUG_BFIN_NO_KERN_HWTRACE is not set
CONFIG_EARLY_PRINTK=y
CONFIG_CPLB_INFO=y
-CONFIG_SECURITY=y
CONFIG_CRC_CCITT=m
diff --git a/arch/blackfin/configs/CM-BF527_defconfig b/arch/blackfin/configs/CM-BF527_defconfig
index 0512fef3d55a..f3414244bfed 100644
--- a/arch/blackfin/configs/CM-BF527_defconfig
+++ b/arch/blackfin/configs/CM-BF527_defconfig
@@ -50,6 +50,7 @@ CONFIG_IP_PNP=y
# CONFIG_INET_LRO is not set
# CONFIG_INET_DIAG is not set
# CONFIG_IPV6 is not set
+# CONFIG_WIRELESS is not set
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
# CONFIG_FW_LOADER is not set
CONFIG_MTD=y
@@ -70,9 +71,9 @@ CONFIG_BLK_DEV_SD=y
CONFIG_NETDEVICES=y
CONFIG_NET_ETHERNET=y
CONFIG_BFIN_MAC=y
-CONFIG_BFIN_MAC_RMII=y
# CONFIG_NETDEV_1000 is not set
# CONFIG_NETDEV_10000 is not set
+# CONFIG_WLAN is not set
# CONFIG_INPUT is not set
# CONFIG_SERIO is not set
# CONFIG_VT is not set
@@ -124,7 +125,6 @@ CONFIG_DEBUG_FS=y
# CONFIG_RCU_CPU_STALL_DETECTOR is not set
# CONFIG_DEBUG_BFIN_NO_KERN_HWTRACE is not set
CONFIG_EARLY_PRINTK=y
-CONFIG_SECURITY=y
CONFIG_CRYPTO=y
# CONFIG_CRYPTO_ANSI_CPRNG is not set
CONFIG_CRC_CCITT=m
diff --git a/arch/blackfin/configs/CM-BF533_defconfig b/arch/blackfin/configs/CM-BF533_defconfig
index 05e09be8b4c5..8c7e08f173d4 100644
--- a/arch/blackfin/configs/CM-BF533_defconfig
+++ b/arch/blackfin/configs/CM-BF533_defconfig
@@ -33,6 +33,7 @@ CONFIG_BINFMT_SHARED_FLAT=y
CONFIG_NET=y
CONFIG_PACKET=y
CONFIG_UNIX=y
+# CONFIG_WIRELESS is not set
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
CONFIG_MTD=y
CONFIG_MTD_PARTITIONS=y
@@ -47,6 +48,7 @@ CONFIG_MTD_PHYSMAP=y
CONFIG_NETDEVICES=y
# CONFIG_NETDEV_1000 is not set
# CONFIG_NETDEV_10000 is not set
+# CONFIG_WLAN is not set
# CONFIG_INPUT is not set
# CONFIG_SERIO is not set
# CONFIG_VT is not set
@@ -72,7 +74,6 @@ CONFIG_DEBUG_MMRS=y
# CONFIG_DEBUG_BFIN_NO_KERN_HWTRACE is not set
CONFIG_EARLY_PRINTK=y
CONFIG_CPLB_INFO=y
-CONFIG_SECURITY=y
CONFIG_CRC_CCITT=y
CONFIG_CRC_ITU_T=y
CONFIG_CRC7=y
diff --git a/arch/blackfin/configs/CM-BF537E_defconfig b/arch/blackfin/configs/CM-BF537E_defconfig
index d2eb5325b9c3..bd3cb766d078 100644
--- a/arch/blackfin/configs/CM-BF537E_defconfig
+++ b/arch/blackfin/configs/CM-BF537E_defconfig
@@ -48,6 +48,7 @@ CONFIG_IP_PNP=y
# CONFIG_INET_LRO is not set
# CONFIG_INET_DIAG is not set
# CONFIG_IPV6 is not set
+# CONFIG_WIRELESS is not set
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
CONFIG_MTD=y
CONFIG_MTD_CMDLINE_PARTS=y
@@ -65,6 +66,7 @@ CONFIG_NET_ETHERNET=y
CONFIG_BFIN_MAC=y
# CONFIG_NETDEV_1000 is not set
# CONFIG_NETDEV_10000 is not set
+# CONFIG_WLAN is not set
# CONFIG_INPUT is not set
# CONFIG_SERIO is not set
# CONFIG_VT is not set
@@ -99,7 +101,6 @@ CONFIG_DEBUG_MMRS=y
# CONFIG_DEBUG_BFIN_NO_KERN_HWTRACE is not set
CONFIG_EARLY_PRINTK=y
CONFIG_CPLB_INFO=y
-CONFIG_SECURITY=y
CONFIG_CRYPTO=y
# CONFIG_CRYPTO_ANSI_CPRNG is not set
CONFIG_CRC_CCITT=m
diff --git a/arch/blackfin/configs/CM-BF537U_defconfig b/arch/blackfin/configs/CM-BF537U_defconfig
index 9d52c443eb09..82224f37c04e 100644
--- a/arch/blackfin/configs/CM-BF537U_defconfig
+++ b/arch/blackfin/configs/CM-BF537U_defconfig
@@ -44,6 +44,7 @@ CONFIG_INET=y
# CONFIG_INET_XFRM_MODE_BEET is not set
# CONFIG_INET_DIAG is not set
# CONFIG_IPV6 is not set
+# CONFIG_WIRELESS is not set
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
CONFIG_MTD=y
CONFIG_MTD_CMDLINE_PARTS=y
@@ -59,6 +60,7 @@ CONFIG_BLK_DEV_RAM=y
CONFIG_NETDEVICES=y
# CONFIG_NETDEV_1000 is not set
# CONFIG_NETDEV_10000 is not set
+# CONFIG_WLAN is not set
# CONFIG_INPUT is not set
# CONFIG_SERIO is not set
# CONFIG_VT is not set
@@ -90,7 +92,6 @@ CONFIG_DEBUG_MMRS=y
# CONFIG_DEBUG_BFIN_NO_KERN_HWTRACE is not set
CONFIG_EARLY_PRINTK=y
CONFIG_CPLB_INFO=y
-CONFIG_SECURITY=y
CONFIG_CRC_CCITT=m
CONFIG_CRC_ITU_T=y
CONFIG_CRC7=y
diff --git a/arch/blackfin/configs/CM-BF548_defconfig b/arch/blackfin/configs/CM-BF548_defconfig
index 9de13cf2cdda..433598c6e773 100644
--- a/arch/blackfin/configs/CM-BF548_defconfig
+++ b/arch/blackfin/configs/CM-BF548_defconfig
@@ -49,6 +49,7 @@ CONFIG_INET_XFRM_MODE_BEET=m
# CONFIG_INET_LRO is not set
# CONFIG_INET_DIAG is not set
# CONFIG_IPV6 is not set
+# CONFIG_WIRELESS is not set
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
# CONFIG_FW_LOADER is not set
CONFIG_MTD=y
@@ -71,6 +72,7 @@ CONFIG_NET_ETHERNET=y
CONFIG_SMSC911X=y
# CONFIG_NETDEV_1000 is not set
# CONFIG_NETDEV_10000 is not set
+# CONFIG_WLAN is not set
# CONFIG_INPUT_MOUSEDEV is not set
CONFIG_INPUT_EVDEV=m
CONFIG_INPUT_EVBUG=m
@@ -167,7 +169,6 @@ CONFIG_DEBUG_FS=y
# CONFIG_DEBUG_BFIN_NO_KERN_HWTRACE is not set
CONFIG_EARLY_PRINTK=y
CONFIG_CPLB_INFO=y
-CONFIG_SECURITY=y
# CONFIG_CRYPTO_ANSI_CPRNG is not set
# CONFIG_CRYPTO_HW is not set
CONFIG_CRC_CCITT=m
diff --git a/arch/blackfin/configs/CM-BF561_defconfig b/arch/blackfin/configs/CM-BF561_defconfig
index 238353a53bf0..ded7d845cb39 100644
--- a/arch/blackfin/configs/CM-BF561_defconfig
+++ b/arch/blackfin/configs/CM-BF561_defconfig
@@ -48,6 +48,7 @@ CONFIG_INET=y
# CONFIG_INET_LRO is not set
# CONFIG_INET_DIAG is not set
# CONFIG_IPV6 is not set
+# CONFIG_WIRELESS is not set
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
CONFIG_MTD=y
CONFIG_MTD_PARTITIONS=y
@@ -67,6 +68,7 @@ CONFIG_MII=y
CONFIG_SMSC911X=m
# CONFIG_NETDEV_1000 is not set
# CONFIG_NETDEV_10000 is not set
+# CONFIG_WLAN is not set
# CONFIG_INPUT is not set
# CONFIG_SERIO is not set
# CONFIG_VT is not set
@@ -99,7 +101,6 @@ CONFIG_DEBUG_MMRS=y
# CONFIG_DEBUG_BFIN_NO_KERN_HWTRACE is not set
CONFIG_EARLY_PRINTK=y
CONFIG_CPLB_INFO=y
-CONFIG_SECURITY=y
CONFIG_CRC_CCITT=m
CONFIG_CRC_ITU_T=y
CONFIG_CRC7=y
diff --git a/arch/blackfin/configs/H8606_defconfig b/arch/blackfin/configs/H8606_defconfig
index 0cb524e8947f..700fb701c121 100644
--- a/arch/blackfin/configs/H8606_defconfig
+++ b/arch/blackfin/configs/H8606_defconfig
@@ -33,6 +33,7 @@ CONFIG_IRLAN=m
CONFIG_IRCOMM=m
CONFIG_IRDA_CACHE_LAST_LSAP=y
CONFIG_IRTTY_SIR=m
+# CONFIG_WIRELESS is not set
# CONFIG_FW_LOADER is not set
CONFIG_MTD=y
CONFIG_MTD_PARTITIONS=y
@@ -50,6 +51,7 @@ CONFIG_NET_ETHERNET=y
CONFIG_DM9000=y
# CONFIG_NETDEV_1000 is not set
# CONFIG_NETDEV_10000 is not set
+# CONFIG_WLAN is not set
# CONFIG_INPUT_MOUSEDEV is not set
CONFIG_INPUT_EVDEV=y
# CONFIG_KEYBOARD_ATKBD is not set
@@ -84,4 +86,3 @@ CONFIG_NFS_V3=y
CONFIG_NLS=m
# CONFIG_DEBUG_BFIN_NO_KERN_HWTRACE is not set
CONFIG_CPLB_INFO=y
-CONFIG_SECURITY=y
diff --git a/arch/blackfin/configs/IP0X_defconfig b/arch/blackfin/configs/IP0X_defconfig
index 2a3411ef19fd..b40156d217e3 100644
--- a/arch/blackfin/configs/IP0X_defconfig
+++ b/arch/blackfin/configs/IP0X_defconfig
@@ -41,6 +41,7 @@ CONFIG_IP_NF_IPTABLES=y
CONFIG_IP_NF_FILTER=y
CONFIG_IP_NF_TARGET_REJECT=y
CONFIG_IP_NF_MANGLE=y
+# CONFIG_WIRELESS is not set
CONFIG_MTD=y
CONFIG_MTD_PARTITIONS=y
CONFIG_MTD_CHAR=y
@@ -60,6 +61,7 @@ CONFIG_NET_ETHERNET=y
CONFIG_DM9000=y
# CONFIG_NETDEV_1000 is not set
# CONFIG_NETDEV_10000 is not set
+# CONFIG_WLAN is not set
# CONFIG_INPUT is not set
# CONFIG_SERIO is not set
# CONFIG_VT is not set
@@ -89,5 +91,4 @@ CONFIG_NLS_CODEPAGE_437=y
CONFIG_NLS_ISO8859_1=y
# CONFIG_DEBUG_BFIN_NO_KERN_HWTRACE is not set
CONFIG_CPLB_INFO=y
-CONFIG_SECURITY=y
CONFIG_CRC_CCITT=y
diff --git a/arch/blackfin/configs/PNAV-10_defconfig b/arch/blackfin/configs/PNAV-10_defconfig
index fea303386548..be866d95ed76 100644
--- a/arch/blackfin/configs/PNAV-10_defconfig
+++ b/arch/blackfin/configs/PNAV-10_defconfig
@@ -14,6 +14,7 @@ CONFIG_MODULE_UNLOAD=y
# CONFIG_LBDAF is not set
# CONFIG_BLK_DEV_BSG is not set
# CONFIG_IOSCHED_DEADLINE is not set
+# CONFIG_IOSCHED_CFQ is not set
CONFIG_PREEMPT_VOLUNTARY=y
CONFIG_BF537=y
CONFIG_IRQ_TIMER0=12
@@ -107,7 +108,6 @@ CONFIG_SMB_FS=m
# CONFIG_DEBUG_HUNT_FOR_ZERO is not set
# CONFIG_DEBUG_BFIN_NO_KERN_HWTRACE is not set
# CONFIG_ACCESS_CHECK is not set
-CONFIG_SECURITY=y
CONFIG_CRYPTO=y
# CONFIG_CRYPTO_ANSI_CPRNG is not set
CONFIG_CRC_CCITT=m
diff --git a/arch/blackfin/configs/SRV1_defconfig b/arch/blackfin/configs/SRV1_defconfig
index 9811b3186847..b64bdf759b82 100644
--- a/arch/blackfin/configs/SRV1_defconfig
+++ b/arch/blackfin/configs/SRV1_defconfig
@@ -35,6 +35,7 @@ CONFIG_IRLAN=m
CONFIG_IRCOMM=m
CONFIG_IRDA_CACHE_LAST_LSAP=y
CONFIG_IRTTY_SIR=m
+# CONFIG_WIRELESS is not set
# CONFIG_FW_LOADER is not set
CONFIG_MTD=y
CONFIG_MTD_PARTITIONS=y
@@ -51,6 +52,7 @@ CONFIG_EEPROM_AT25=m
CONFIG_NETDEVICES=y
# CONFIG_NETDEV_1000 is not set
# CONFIG_NETDEV_10000 is not set
+# CONFIG_WLAN is not set
# CONFIG_INPUT_MOUSEDEV is not set
CONFIG_INPUT_EVDEV=m
# CONFIG_INPUT_KEYBOARD is not set
@@ -85,4 +87,3 @@ CONFIG_DEBUG_KERNEL=y
CONFIG_DEBUG_INFO=y
# CONFIG_DEBUG_BFIN_NO_KERN_HWTRACE is not set
CONFIG_CPLB_INFO=y
-CONFIG_SECURITY=y
diff --git a/arch/blackfin/configs/TCM-BF518_defconfig b/arch/blackfin/configs/TCM-BF518_defconfig
index 412bf79b9724..1bccd9a50986 100644
--- a/arch/blackfin/configs/TCM-BF518_defconfig
+++ b/arch/blackfin/configs/TCM-BF518_defconfig
@@ -128,7 +128,6 @@ CONFIG_DEBUG_DOUBLEFAULT=y
CONFIG_DEBUG_BFIN_HWTRACE_COMPRESSION_ONE=y
CONFIG_EARLY_PRINTK=y
CONFIG_CPLB_INFO=y
-CONFIG_SECURITY=y
CONFIG_CRYPTO=y
# CONFIG_CRYPTO_ANSI_CPRNG is not set
CONFIG_CRC_CCITT=m
diff --git a/arch/blackfin/configs/TCM-BF537_defconfig b/arch/blackfin/configs/TCM-BF537_defconfig
index 04bf52c4cf12..00ce899e9e5d 100644
--- a/arch/blackfin/configs/TCM-BF537_defconfig
+++ b/arch/blackfin/configs/TCM-BF537_defconfig
@@ -40,6 +40,7 @@ CONFIG_UNIX=y
CONFIG_INET=y
# CONFIG_INET_DIAG is not set
# CONFIG_IPV6 is not set
+# CONFIG_WIRELESS is not set
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
CONFIG_MTD=y
CONFIG_MTD_CMDLINE_PARTS=y
@@ -57,6 +58,7 @@ CONFIG_NET_ETHERNET=y
CONFIG_BFIN_MAC=y
# CONFIG_NETDEV_1000 is not set
# CONFIG_NETDEV_10000 is not set
+# CONFIG_WLAN is not set
# CONFIG_INPUT is not set
# CONFIG_SERIO is not set
# CONFIG_VT is not set
diff --git a/arch/blackfin/include/asm/bfin5xx_spi.h b/arch/blackfin/include/asm/bfin5xx_spi.h
index 0b5136e334b5..5392583d0253 100644
--- a/arch/blackfin/include/asm/bfin5xx_spi.h
+++ b/arch/blackfin/include/asm/bfin5xx_spi.h
@@ -60,6 +60,8 @@ struct bfin_spi_regs {
__BFP(shadow);
};
+#undef __BFP
+
#define MAX_CTRL_CS 8 /* cs in spi controller */
/* device.platform_data for SSP controller devices */
diff --git a/arch/blackfin/include/asm/bfin_ppi.h b/arch/blackfin/include/asm/bfin_ppi.h
index 003900886f97..3be05faa2c65 100644
--- a/arch/blackfin/include/asm/bfin_ppi.h
+++ b/arch/blackfin/include/asm/bfin_ppi.h
@@ -48,4 +48,6 @@ struct bfin_eppi_regs {
u32 clip;
};
+#undef __BFP
+
#endif
diff --git a/arch/blackfin/include/asm/bfin_twi.h b/arch/blackfin/include/asm/bfin_twi.h
new file mode 100644
index 000000000000..e767d649dfc4
--- /dev/null
+++ b/arch/blackfin/include/asm/bfin_twi.h
@@ -0,0 +1,45 @@
+/*
+ * bfin_twi.h - interface to Blackfin TWIs
+ *
+ * Copyright 2005-2010 Analog Devices Inc.
+ *
+ * Licensed under the GPL-2 or later.
+ */
+
+#ifndef __ASM_BFIN_TWI_H__
+#define __ASM_BFIN_TWI_H__
+
+#include <linux/types.h>
+
+/*
+ * All Blackfin system MMRs are padded to 32bits even if the register
+ * itself is only 16bits. So use a helper macro to streamline this.
+ */
+#define __BFP(m) u16 m; u16 __pad_##m
+
+/*
+ * bfin twi registers layout
+ */
+struct bfin_twi_regs {
+ __BFP(clkdiv);
+ __BFP(control);
+ __BFP(slave_ctl);
+ __BFP(slave_stat);
+ __BFP(slave_addr);
+ __BFP(master_ctl);
+ __BFP(master_stat);
+ __BFP(master_addr);
+ __BFP(int_stat);
+ __BFP(int_mask);
+ __BFP(fifo_ctl);
+ __BFP(fifo_stat);
+ u32 __pad[20];
+ __BFP(xmt_data8);
+ __BFP(xmt_data16);
+ __BFP(rcv_data8);
+ __BFP(rcv_data16);
+};
+
+#undef __BFP
+
+#endif
diff --git a/arch/blackfin/include/asm/cdef_LPBlackfin.h b/arch/blackfin/include/asm/cdef_LPBlackfin.h
index a1f6817687e8..59af63c0c2be 100644
--- a/arch/blackfin/include/asm/cdef_LPBlackfin.h
+++ b/arch/blackfin/include/asm/cdef_LPBlackfin.h
@@ -179,7 +179,7 @@
#define bfin_write_ITEST_DATA0(val) bfin_write32(ITEST_DATA0,val)
#define bfin_write_ITEST_DATA1(val) bfin_write32(ITEST_DATA1,val)
-#if ANOMALY_05000481
+#if !ANOMALY_05000481
#define bfin_read_ITEST_COMMAND() bfin_read32(ITEST_COMMAND)
#define bfin_read_ITEST_DATA0() bfin_read32(ITEST_DATA0)
#define bfin_read_ITEST_DATA1() bfin_read32(ITEST_DATA1)
diff --git a/arch/blackfin/include/asm/entry.h b/arch/blackfin/include/asm/entry.h
index a6886f6e4819..4104d5783e2c 100644
--- a/arch/blackfin/include/asm/entry.h
+++ b/arch/blackfin/include/asm/entry.h
@@ -15,14 +15,6 @@
#define LFLUSH_I_AND_D 0x00000808
#define LSIGTRAP 5
-/* process bits for task_struct.flags */
-#define PF_TRACESYS_OFF 3
-#define PF_TRACESYS_BIT 5
-#define PF_PTRACED_OFF 3
-#define PF_PTRACED_BIT 4
-#define PF_DTRACE_OFF 1
-#define PF_DTRACE_BIT 5
-
/*
* NOTE! The single-stepping code assumes that all interrupt handlers
* start by saving SYSCFG on the stack with their first instruction.
diff --git a/arch/blackfin/kernel/kgdb.c b/arch/blackfin/kernel/kgdb.c
index 08bc44ea6883..edae461b1c54 100644
--- a/arch/blackfin/kernel/kgdb.c
+++ b/arch/blackfin/kernel/kgdb.c
@@ -320,7 +320,7 @@ static void bfin_correct_hw_break(void)
}
}
-void kgdb_disable_hw_debug(struct pt_regs *regs)
+static void bfin_disable_hw_debug(struct pt_regs *regs)
{
/* Disable hardware debugging while we are in kgdb */
bfin_write_WPIACTL(0);
@@ -406,6 +406,7 @@ struct kgdb_arch arch_kgdb_ops = {
#endif
.set_hw_breakpoint = bfin_set_hw_break,
.remove_hw_breakpoint = bfin_remove_hw_break,
+ .disable_hw_break = bfin_disable_hw_debug,
.remove_all_hw_break = bfin_remove_all_hw_break,
.correct_hw_break = bfin_correct_hw_break,
};
diff --git a/arch/blackfin/kernel/process.c b/arch/blackfin/kernel/process.c
index cd0c090ebc54..b407bc8ad918 100644
--- a/arch/blackfin/kernel/process.c
+++ b/arch/blackfin/kernel/process.c
@@ -7,7 +7,6 @@
*/
#include <linux/module.h>
-#include <linux/smp_lock.h>
#include <linux/unistd.h>
#include <linux/user.h>
#include <linux/uaccess.h>
diff --git a/arch/blackfin/kernel/ptrace.c b/arch/blackfin/kernel/ptrace.c
index b35839354130..75089f80855d 100644
--- a/arch/blackfin/kernel/ptrace.c
+++ b/arch/blackfin/kernel/ptrace.c
@@ -38,12 +38,13 @@
* Get contents of register REGNO in task TASK.
*/
static inline long
-get_reg(struct task_struct *task, long regno, unsigned long __user *datap)
+get_reg(struct task_struct *task, unsigned long regno,
+ unsigned long __user *datap)
{
long tmp;
struct pt_regs *regs = task_pt_regs(task);
- if (regno & 3 || regno > PT_LAST_PSEUDO || regno < 0)
+ if (regno & 3 || regno > PT_LAST_PSEUDO)
return -EIO;
switch (regno) {
@@ -74,11 +75,11 @@ get_reg(struct task_struct *task, long regno, unsigned long __user *datap)
* Write contents of register REGNO in task TASK.
*/
static inline int
-put_reg(struct task_struct *task, long regno, unsigned long data)
+put_reg(struct task_struct *task, unsigned long regno, unsigned long data)
{
struct pt_regs *regs = task_pt_regs(task);
- if (regno & 3 || regno > PT_LAST_PSEUDO || regno < 0)
+ if (regno & 3 || regno > PT_LAST_PSEUDO)
return -EIO;
switch (regno) {
@@ -240,7 +241,8 @@ void user_disable_single_step(struct task_struct *child)
clear_tsk_thread_flag(child, TIF_SINGLESTEP);
}
-long arch_ptrace(struct task_struct *child, long request, long addr, long data)
+long arch_ptrace(struct task_struct *child, long request,
+ unsigned long addr, unsigned long data)
{
int ret;
unsigned long __user *datap = (unsigned long __user *)data;
@@ -368,14 +370,14 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
return copy_regset_to_user(child, &user_bfin_native_view,
REGSET_GENERAL,
0, sizeof(struct pt_regs),
- (void __user *)data);
+ datap);
case PTRACE_SETREGS:
pr_debug("ptrace: PTRACE_SETREGS\n");
return copy_regset_from_user(child, &user_bfin_native_view,
REGSET_GENERAL,
0, sizeof(struct pt_regs),
- (const void __user *)data);
+ datap);
case_default:
default:
diff --git a/arch/blackfin/mach-bf518/boards/ezbrd.c b/arch/blackfin/mach-bf518/boards/ezbrd.c
index f95e6096719b..b894c8abe7ec 100644
--- a/arch/blackfin/mach-bf518/boards/ezbrd.c
+++ b/arch/blackfin/mach-bf518/boards/ezbrd.c
@@ -87,13 +87,55 @@ static struct platform_device rtc_device = {
#endif
#if defined(CONFIG_BFIN_MAC) || defined(CONFIG_BFIN_MAC_MODULE)
+#include <linux/bfin_mac.h>
+static const unsigned short bfin_mac_peripherals[] = {
+ P_MII0_ETxD0,
+ P_MII0_ETxD1,
+ P_MII0_ETxEN,
+ P_MII0_ERxD0,
+ P_MII0_ERxD1,
+ P_MII0_TxCLK,
+ P_MII0_PHYINT,
+ P_MII0_CRS,
+ P_MII0_MDC,
+ P_MII0_MDIO,
+ 0
+};
+
+static struct bfin_phydev_platform_data bfin_phydev_data[] = {
+ {
+ .addr = 1,
+ .irq = IRQ_MAC_PHYINT,
+ },
+ {
+ .addr = 2,
+ .irq = IRQ_MAC_PHYINT,
+ },
+ {
+ .addr = 3,
+ .irq = IRQ_MAC_PHYINT,
+ },
+};
+
+static struct bfin_mii_bus_platform_data bfin_mii_bus_data = {
+ .phydev_number = 3,
+ .phydev_data = bfin_phydev_data,
+ .phy_mode = PHY_INTERFACE_MODE_MII,
+ .mac_peripherals = bfin_mac_peripherals,
+};
+
static struct platform_device bfin_mii_bus = {
.name = "bfin_mii_bus",
+ .dev = {
+ .platform_data = &bfin_mii_bus_data,
+ }
};
static struct platform_device bfin_mac_device = {
.name = "bfin_mac",
- .dev.platform_data = &bfin_mii_bus,
+ .dev = {
+ .platform_data = &bfin_mii_bus,
+ }
};
#if defined(CONFIG_NET_DSA_KSZ8893M) || defined(CONFIG_NET_DSA_KSZ8893M_MODULE)
diff --git a/arch/blackfin/mach-bf518/boards/tcm-bf518.c b/arch/blackfin/mach-bf518/boards/tcm-bf518.c
index bead810a6546..e6ce1d7c523a 100644
--- a/arch/blackfin/mach-bf518/boards/tcm-bf518.c
+++ b/arch/blackfin/mach-bf518/boards/tcm-bf518.c
@@ -81,13 +81,35 @@ static struct platform_device rtc_device = {
#endif
#if defined(CONFIG_BFIN_MAC) || defined(CONFIG_BFIN_MAC_MODULE)
+#include <linux/bfin_mac.h>
+static const unsigned short bfin_mac_peripherals[] = P_MII0;
+
+static struct bfin_phydev_platform_data bfin_phydev_data[] = {
+ {
+ .addr = 1,
+ .irq = IRQ_MAC_PHYINT,
+ },
+};
+
+static struct bfin_mii_bus_platform_data bfin_mii_bus_data = {
+ .phydev_number = 1,
+ .phydev_data = bfin_phydev_data,
+ .phy_mode = PHY_INTERFACE_MODE_MII,
+ .mac_peripherals = bfin_mac_peripherals,
+};
+
static struct platform_device bfin_mii_bus = {
.name = "bfin_mii_bus",
+ .dev = {
+ .platform_data = &bfin_mii_bus_data,
+ }
};
static struct platform_device bfin_mac_device = {
.name = "bfin_mac",
- .dev.platform_data = &bfin_mii_bus,
+ .dev = {
+ .platform_data = &bfin_mii_bus,
+ }
};
#endif
diff --git a/arch/blackfin/mach-bf527/boards/cm_bf527.c b/arch/blackfin/mach-bf527/boards/cm_bf527.c
index 38037c7e125a..2c31af7a320a 100644
--- a/arch/blackfin/mach-bf527/boards/cm_bf527.c
+++ b/arch/blackfin/mach-bf527/boards/cm_bf527.c
@@ -273,13 +273,35 @@ static struct platform_device dm9000_device = {
#endif
#if defined(CONFIG_BFIN_MAC) || defined(CONFIG_BFIN_MAC_MODULE)
+#include <linux/bfin_mac.h>
+static const unsigned short bfin_mac_peripherals[] = P_RMII0;
+
+static struct bfin_phydev_platform_data bfin_phydev_data[] = {
+ {
+ .addr = 1,
+ .irq = IRQ_MAC_PHYINT,
+ },
+};
+
+static struct bfin_mii_bus_platform_data bfin_mii_bus_data = {
+ .phydev_number = 1,
+ .phydev_data = bfin_phydev_data,
+ .phy_mode = PHY_INTERFACE_MODE_RMII,
+ .mac_peripherals = bfin_mac_peripherals,
+};
+
static struct platform_device bfin_mii_bus = {
.name = "bfin_mii_bus",
+ .dev = {
+ .platform_data = &bfin_mii_bus_data,
+ }
};
static struct platform_device bfin_mac_device = {
.name = "bfin_mac",
- .dev.platform_data = &bfin_mii_bus,
+ .dev = {
+ .platform_data = &bfin_mii_bus,
+ }
};
#endif
diff --git a/arch/blackfin/mach-bf527/boards/ezbrd.c b/arch/blackfin/mach-bf527/boards/ezbrd.c
index 6cc64a1e78b9..9a736a850c5c 100644
--- a/arch/blackfin/mach-bf527/boards/ezbrd.c
+++ b/arch/blackfin/mach-bf527/boards/ezbrd.c
@@ -193,13 +193,35 @@ static struct platform_device rtc_device = {
#if defined(CONFIG_BFIN_MAC) || defined(CONFIG_BFIN_MAC_MODULE)
+#include <linux/bfin_mac.h>
+static const unsigned short bfin_mac_peripherals[] = P_RMII0;
+
+static struct bfin_phydev_platform_data bfin_phydev_data[] = {
+ {
+ .addr = 1,
+ .irq = IRQ_MAC_PHYINT,
+ },
+};
+
+static struct bfin_mii_bus_platform_data bfin_mii_bus_data = {
+ .phydev_number = 1,
+ .phydev_data = bfin_phydev_data,
+ .phy_mode = PHY_INTERFACE_MODE_RMII,
+ .mac_peripherals = bfin_mac_peripherals,
+};
+
static struct platform_device bfin_mii_bus = {
.name = "bfin_mii_bus",
+ .dev = {
+ .platform_data = &bfin_mii_bus_data,
+ }
};
static struct platform_device bfin_mac_device = {
.name = "bfin_mac",
- .dev.platform_data = &bfin_mii_bus,
+ .dev = {
+ .platform_data = &bfin_mii_bus,
+ }
};
#endif
diff --git a/arch/blackfin/mach-bf527/boards/ezkit.c b/arch/blackfin/mach-bf527/boards/ezkit.c
index df82723fb504..9222bc00bbd3 100644
--- a/arch/blackfin/mach-bf527/boards/ezkit.c
+++ b/arch/blackfin/mach-bf527/boards/ezkit.c
@@ -366,13 +366,35 @@ static struct platform_device dm9000_device = {
#endif
#if defined(CONFIG_BFIN_MAC) || defined(CONFIG_BFIN_MAC_MODULE)
+#include <linux/bfin_mac.h>
+static const unsigned short bfin_mac_peripherals[] = P_RMII0;
+
+static struct bfin_phydev_platform_data bfin_phydev_data[] = {
+ {
+ .addr = 1,
+ .irq = IRQ_MAC_PHYINT,
+ },
+};
+
+static struct bfin_mii_bus_platform_data bfin_mii_bus_data = {
+ .phydev_number = 1,
+ .phydev_data = bfin_phydev_data,
+ .phy_mode = PHY_INTERFACE_MODE_RMII,
+ .mac_peripherals = bfin_mac_peripherals,
+};
+
static struct platform_device bfin_mii_bus = {
.name = "bfin_mii_bus",
+ .dev = {
+ .platform_data = &bfin_mii_bus_data,
+ }
};
static struct platform_device bfin_mac_device = {
.name = "bfin_mac",
- .dev.platform_data = &bfin_mii_bus,
+ .dev = {
+ .platform_data = &bfin_mii_bus,
+ }
};
#endif
diff --git a/arch/blackfin/mach-bf527/boards/tll6527m.c b/arch/blackfin/mach-bf527/boards/tll6527m.c
index ae4130e97c01..9ec575729e2c 100644
--- a/arch/blackfin/mach-bf527/boards/tll6527m.c
+++ b/arch/blackfin/mach-bf527/boards/tll6527m.c
@@ -257,13 +257,35 @@ static struct platform_device rtc_device = {
#endif
#if defined(CONFIG_BFIN_MAC) || defined(CONFIG_BFIN_MAC_MODULE)
+#include <linux/bfin_mac.h>
+static const unsigned short bfin_mac_peripherals[] = P_RMII0;
+
+static struct bfin_phydev_platform_data bfin_phydev_data[] = {
+ {
+ .addr = 1,
+ .irq = IRQ_MAC_PHYINT,
+ },
+};
+
+static struct bfin_mii_bus_platform_data bfin_mii_bus_data = {
+ .phydev_number = 1,
+ .phydev_data = bfin_phydev_data,
+ .phy_mode = PHY_INTERFACE_MODE_RMII,
+ .mac_peripherals = bfin_mac_peripherals,
+};
+
static struct platform_device bfin_mii_bus = {
.name = "bfin_mii_bus",
+ .dev = {
+ .platform_data = &bfin_mii_bus_data,
+ }
};
static struct platform_device bfin_mac_device = {
.name = "bfin_mac",
- .dev.platform_data = &bfin_mii_bus,
+ .dev = {
+ .platform_data = &bfin_mii_bus,
+ }
};
#endif
diff --git a/arch/blackfin/mach-bf537/boards/cm_bf537e.c b/arch/blackfin/mach-bf537/boards/cm_bf537e.c
index e2e7be40ef44..836698c4ee54 100644
--- a/arch/blackfin/mach-bf537/boards/cm_bf537e.c
+++ b/arch/blackfin/mach-bf537/boards/cm_bf537e.c
@@ -597,13 +597,35 @@ static struct platform_device bfin_sport1_uart_device = {
#endif
#if defined(CONFIG_BFIN_MAC) || defined(CONFIG_BFIN_MAC_MODULE)
+#include <linux/bfin_mac.h>
+static const unsigned short bfin_mac_peripherals[] = P_MII0;
+
+static struct bfin_phydev_platform_data bfin_phydev_data[] = {
+ {
+ .addr = 1,
+ .irq = IRQ_MAC_PHYINT,
+ },
+};
+
+static struct bfin_mii_bus_platform_data bfin_mii_bus_data = {
+ .phydev_number = 1,
+ .phydev_data = bfin_phydev_data,
+ .phy_mode = PHY_INTERFACE_MODE_MII,
+ .mac_peripherals = bfin_mac_peripherals,
+};
+
static struct platform_device bfin_mii_bus = {
.name = "bfin_mii_bus",
+ .dev = {
+ .platform_data = &bfin_mii_bus_data,
+ }
};
static struct platform_device bfin_mac_device = {
.name = "bfin_mac",
- .dev.platform_data = &bfin_mii_bus,
+ .dev = {
+ .platform_data = &bfin_mii_bus,
+ }
};
#endif
diff --git a/arch/blackfin/mach-bf537/boards/cm_bf537u.c b/arch/blackfin/mach-bf537/boards/cm_bf537u.c
index 752c833f7ca8..2a85670273cb 100644
--- a/arch/blackfin/mach-bf537/boards/cm_bf537u.c
+++ b/arch/blackfin/mach-bf537/boards/cm_bf537u.c
@@ -562,13 +562,35 @@ static struct platform_device bfin_sport1_uart_device = {
#endif
#if defined(CONFIG_BFIN_MAC) || defined(CONFIG_BFIN_MAC_MODULE)
+#include <linux/bfin_mac.h>
+static const unsigned short bfin_mac_peripherals[] = P_MII0;
+
+static struct bfin_phydev_platform_data bfin_phydev_data[] = {
+ {
+ .addr = 1,
+ .irq = IRQ_MAC_PHYINT,
+ },
+};
+
+static struct bfin_mii_bus_platform_data bfin_mii_bus_data = {
+ .phydev_number = 1,
+ .phydev_data = bfin_phydev_data,
+ .phy_mode = PHY_INTERFACE_MODE_MII,
+ .mac_peripherals = bfin_mac_peripherals,
+};
+
static struct platform_device bfin_mii_bus = {
.name = "bfin_mii_bus",
+ .dev = {
+ .platform_data = &bfin_mii_bus_data,
+ }
};
static struct platform_device bfin_mac_device = {
.name = "bfin_mac",
- .dev.platform_data = &bfin_mii_bus,
+ .dev = {
+ .platform_data = &bfin_mii_bus,
+ }
};
#endif
diff --git a/arch/blackfin/mach-bf537/boards/minotaur.c b/arch/blackfin/mach-bf537/boards/minotaur.c
index 05d45994480e..49800518412c 100644
--- a/arch/blackfin/mach-bf537/boards/minotaur.c
+++ b/arch/blackfin/mach-bf537/boards/minotaur.c
@@ -68,13 +68,35 @@ static struct platform_device rtc_device = {
#endif
#if defined(CONFIG_BFIN_MAC) || defined(CONFIG_BFIN_MAC_MODULE)
+#include <linux/bfin_mac.h>
+static const unsigned short bfin_mac_peripherals[] = P_MII0;
+
+static struct bfin_phydev_platform_data bfin_phydev_data[] = {
+ {
+ .addr = 1,
+ .irq = IRQ_MAC_PHYINT,
+ },
+};
+
+static struct bfin_mii_bus_platform_data bfin_mii_bus_data = {
+ .phydev_number = 1,
+ .phydev_data = bfin_phydev_data,
+ .phy_mode = PHY_INTERFACE_MODE_MII,
+ .mac_peripherals = bfin_mac_peripherals,
+};
+
static struct platform_device bfin_mii_bus = {
.name = "bfin_mii_bus",
+ .dev = {
+ .platform_data = &bfin_mii_bus_data,
+ }
};
static struct platform_device bfin_mac_device = {
.name = "bfin_mac",
- .dev.platform_data = &bfin_mii_bus,
+ .dev = {
+ .platform_data = &bfin_mii_bus,
+ }
};
#endif
diff --git a/arch/blackfin/mach-bf537/boards/pnav10.c b/arch/blackfin/mach-bf537/boards/pnav10.c
index 6b03808800a6..b95807894e25 100644
--- a/arch/blackfin/mach-bf537/boards/pnav10.c
+++ b/arch/blackfin/mach-bf537/boards/pnav10.c
@@ -99,13 +99,35 @@ static struct platform_device smc91x_device = {
#endif
#if defined(CONFIG_BFIN_MAC) || defined(CONFIG_BFIN_MAC_MODULE)
+#include <linux/bfin_mac.h>
+static const unsigned short bfin_mac_peripherals[] = P_RMII0;
+
+static struct bfin_phydev_platform_data bfin_phydev_data[] = {
+ {
+ .addr = 1,
+ .irq = IRQ_MAC_PHYINT,
+ },
+};
+
+static struct bfin_mii_bus_platform_data bfin_mii_bus_data = {
+ .phydev_number = 1,
+ .phydev_data = bfin_phydev_data,
+ .phy_mode = PHY_INTERFACE_MODE_RMII,
+ .mac_peripherals = bfin_mac_peripherals,
+};
+
static struct platform_device bfin_mii_bus = {
.name = "bfin_mii_bus",
+ .dev = {
+ .platform_data = &bfin_mii_bus_data,
+ }
};
static struct platform_device bfin_mac_device = {
.name = "bfin_mac",
- .dev.platform_data = &bfin_mii_bus,
+ .dev = {
+ .platform_data = &bfin_mii_bus,
+ }
};
#endif
diff --git a/arch/blackfin/mach-bf537/boards/stamp.c b/arch/blackfin/mach-bf537/boards/stamp.c
index cd2c797c8c9f..3aa344ce8e52 100644
--- a/arch/blackfin/mach-bf537/boards/stamp.c
+++ b/arch/blackfin/mach-bf537/boards/stamp.c
@@ -327,13 +327,35 @@ static struct platform_device bfin_can_device = {
#endif
#if defined(CONFIG_BFIN_MAC) || defined(CONFIG_BFIN_MAC_MODULE)
+#include <linux/bfin_mac.h>
+static const unsigned short bfin_mac_peripherals[] = P_MII0;
+
+static struct bfin_phydev_platform_data bfin_phydev_data[] = {
+ {
+ .addr = 1,
+ .irq = PHY_POLL, /* IRQ_MAC_PHYINT */
+ },
+};
+
+static struct bfin_mii_bus_platform_data bfin_mii_bus_data = {
+ .phydev_number = 1,
+ .phydev_data = bfin_phydev_data,
+ .phy_mode = PHY_INTERFACE_MODE_MII,
+ .mac_peripherals = bfin_mac_peripherals,
+};
+
static struct platform_device bfin_mii_bus = {
.name = "bfin_mii_bus",
+ .dev = {
+ .platform_data = &bfin_mii_bus_data,
+ }
};
static struct platform_device bfin_mac_device = {
.name = "bfin_mac",
- .dev.platform_data = &bfin_mii_bus,
+ .dev = {
+ .platform_data = &bfin_mii_bus,
+ }
};
#endif
diff --git a/arch/blackfin/mach-bf537/boards/tcm_bf537.c b/arch/blackfin/mach-bf537/boards/tcm_bf537.c
index a4d62b5fc7ba..31498add1a42 100644
--- a/arch/blackfin/mach-bf537/boards/tcm_bf537.c
+++ b/arch/blackfin/mach-bf537/boards/tcm_bf537.c
@@ -564,13 +564,35 @@ static struct platform_device bfin_sport1_uart_device = {
#endif
#if defined(CONFIG_BFIN_MAC) || defined(CONFIG_BFIN_MAC_MODULE)
+#include <linux/bfin_mac.h>
+static const unsigned short bfin_mac_peripherals[] = P_MII0;
+
+static struct bfin_phydev_platform_data bfin_phydev_data[] = {
+ {
+ .addr = 1,
+ .irq = IRQ_MAC_PHYINT,
+ },
+};
+
+static struct bfin_mii_bus_platform_data bfin_mii_bus_data = {
+ .phydev_number = 1,
+ .phydev_data = bfin_phydev_data,
+ .phy_mode = PHY_INTERFACE_MODE_MII,
+ .mac_peripherals = bfin_mac_peripherals,
+};
+
static struct platform_device bfin_mii_bus = {
.name = "bfin_mii_bus",
+ .dev = {
+ .platform_data = &bfin_mii_bus_data,
+ }
};
static struct platform_device bfin_mac_device = {
.name = "bfin_mac",
- .dev.platform_data = &bfin_mii_bus,
+ .dev = {
+ .platform_data = &bfin_mii_bus,
+ }
};
#endif
diff --git a/arch/blackfin/mach-common/Makefile b/arch/blackfin/mach-common/Makefile
index 814cb483853b..ff299f24aba0 100644
--- a/arch/blackfin/mach-common/Makefile
+++ b/arch/blackfin/mach-common/Makefile
@@ -11,4 +11,3 @@ obj-$(CONFIG_CPU_FREQ) += cpufreq.o
obj-$(CONFIG_CPU_VOLTAGE) += dpmc.o
obj-$(CONFIG_SMP) += smp.o
obj-$(CONFIG_BFIN_KERNEL_CLOCK) += clocks-init.o
-obj-$(CONFIG_DEBUG_ICACHE_CHECK) += irqpanic.o
diff --git a/arch/blackfin/mach-common/irqpanic.c b/arch/blackfin/mach-common/irqpanic.c
deleted file mode 100644
index c6496249e2bc..000000000000
--- a/arch/blackfin/mach-common/irqpanic.c
+++ /dev/null
@@ -1,106 +0,0 @@
-/*
- * panic kernel with dump information
- *
- * Copyright 2005-2009 Analog Devices Inc.
- *
- * Licensed under the GPL-2 or later.
- */
-
-#include <linux/module.h>
-#include <linux/kernel_stat.h>
-#include <linux/sched.h>
-#include <asm/blackfin.h>
-
-#define L1_ICACHE_START 0xffa10000
-#define L1_ICACHE_END 0xffa13fff
-
-/*
- * irq_panic - calls panic with string setup
- */
-__attribute__ ((l1_text))
-asmlinkage void irq_panic(int reason, struct pt_regs *regs)
-{
- unsigned int cmd, tag, ca, cache_hi, cache_lo, *pa;
- unsigned short i, j, die;
- unsigned int bad[10][6];
-
- /* check entire cache for coherency
- * Since printk is in cacheable memory,
- * don't call it until you have checked everything
- */
-
- die = 0;
- i = 0;
-
- /* check icache */
-
- for (ca = L1_ICACHE_START; ca <= L1_ICACHE_END && i < 10; ca += 32) {
-
- /* Grab various address bits for the itest_cmd fields */
- cmd = (((ca & 0x3000) << 4) | /* ca[13:12] for SBNK[1:0] */
- ((ca & 0x0c00) << 16) | /* ca[11:10] for WAYSEL[1:0] */
- ((ca & 0x3f8)) | /* ca[09:03] for SET[4:0] and DW[1:0] */
- 0); /* Access Tag, Read access */
-
- SSYNC();
- bfin_write_ITEST_COMMAND(cmd);
- SSYNC();
- tag = bfin_read_ITEST_DATA0();
- SSYNC();
-
- /* if tag is marked as valid, check it */
- if (tag & 1) {
- /* The icache is arranged in 4 groups of 64-bits */
- for (j = 0; j < 32; j += 8) {
- cmd = ((((ca + j) & 0x3000) << 4) | /* ca[13:12] for SBNK[1:0] */
- (((ca + j) & 0x0c00) << 16) | /* ca[11:10] for WAYSEL[1:0] */
- (((ca + j) & 0x3f8)) | /* ca[09:03] for SET[4:0] and DW[1:0] */
- 4); /* Access Data, Read access */
-
- SSYNC();
- bfin_write_ITEST_COMMAND(cmd);
- SSYNC();
-
- cache_hi = bfin_read_ITEST_DATA1();
- cache_lo = bfin_read_ITEST_DATA0();
-
- pa = ((unsigned int *)((tag & 0xffffcc00) |
- ((ca + j) & ~(0xffffcc00))));
-
- /*
- * Debugging this, enable
- *
- * printk("addr: %08x %08x%08x | %08x%08x\n",
- * ((unsigned int *)((tag & 0xffffcc00) | ((ca+j) & ~(0xffffcc00)))),
- * cache_hi, cache_lo, *(pa+1), *pa);
- */
-
- if (cache_hi != *(pa + 1) || cache_lo != *pa) {
- /* Since icache is not working, stay out of it, by not printing */
- die = 1;
- bad[i][0] = (ca + j);
- bad[i][1] = cache_hi;
- bad[i][2] = cache_lo;
- bad[i][3] = ((tag & 0xffffcc00) |
- ((ca + j) & ~(0xffffcc00)));
- bad[i][4] = *(pa + 1);
- bad[i][5] = *(pa);
- i++;
- }
- }
- }
- }
- if (die) {
- printk(KERN_EMERG "icache coherency error\n");
- for (j = 0; j <= i; j++) {
- printk(KERN_EMERG
- "cache address : %08x cache value : %08x%08x\n",
- bad[j][0], bad[j][1], bad[j][2]);
- printk(KERN_EMERG
- "physical address: %08x SDRAM value : %08x%08x\n",
- bad[j][3], bad[j][4], bad[j][5]);
- }
- panic("icache coherency error");
- } else
- printk(KERN_EMERG "icache checked, and OK\n");
-}
diff --git a/arch/cris/Kconfig b/arch/cris/Kconfig
index aefe3b18a074..613e62831c55 100644
--- a/arch/cris/Kconfig
+++ b/arch/cris/Kconfig
@@ -1,10 +1,3 @@
-#
-# For a description of the syntax of this configuration file,
-# see the Configure script.
-#
-
-mainmenu "Linux/CRIS Kernel Configuration"
-
config MMU
bool
default y
diff --git a/arch/cris/arch-v10/kernel/ptrace.c b/arch/cris/arch-v10/kernel/ptrace.c
index e70c804e9377..320065f3cbe5 100644
--- a/arch/cris/arch-v10/kernel/ptrace.c
+++ b/arch/cris/arch-v10/kernel/ptrace.c
@@ -76,9 +76,11 @@ ptrace_disable(struct task_struct *child)
* (in user space) where the result of the ptrace call is written (instead of
* being returned).
*/
-long arch_ptrace(struct task_struct *child, long request, long addr, long data)
+long arch_ptrace(struct task_struct *child, long request,
+ unsigned long addr, unsigned long data)
{
int ret;
+ unsigned int regno = addr >> 2;
unsigned long __user *datap = (unsigned long __user *)data;
switch (request) {
@@ -93,10 +95,10 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
unsigned long tmp;
ret = -EIO;
- if ((addr & 3) || addr < 0 || addr > PT_MAX << 2)
+ if ((addr & 3) || regno > PT_MAX)
break;
- tmp = get_reg(child, addr >> 2);
+ tmp = get_reg(child, regno);
ret = put_user(tmp, datap);
break;
}
@@ -110,19 +112,17 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
/* Write the word at location address in the USER area. */
case PTRACE_POKEUSR:
ret = -EIO;
- if ((addr & 3) || addr < 0 || addr > PT_MAX << 2)
+ if ((addr & 3) || regno > PT_MAX)
break;
- addr >>= 2;
-
- if (addr == PT_DCCR) {
+ if (regno == PT_DCCR) {
/* don't allow the tracing process to change stuff like
* interrupt enable, kernel/user bit, dma enables etc.
*/
data &= DCCR_MASK;
data |= get_reg(child, PT_DCCR) & ~DCCR_MASK;
}
- if (put_reg(child, addr, data))
+ if (put_reg(child, regno, data))
break;
ret = 0;
break;
@@ -141,7 +141,7 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
break;
}
- data += sizeof(long);
+ datap++;
}
break;
@@ -165,7 +165,7 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
}
put_reg(child, i, tmp);
- data += sizeof(long);
+ datap++;
}
break;
diff --git a/arch/cris/arch-v32/kernel/ptrace.c b/arch/cris/arch-v32/kernel/ptrace.c
index f4ebd1e7d0f5..511ece94a574 100644
--- a/arch/cris/arch-v32/kernel/ptrace.c
+++ b/arch/cris/arch-v32/kernel/ptrace.c
@@ -126,9 +126,11 @@ ptrace_disable(struct task_struct *child)
}
-long arch_ptrace(struct task_struct *child, long request, long addr, long data)
+long arch_ptrace(struct task_struct *child, long request,
+ unsigned long addr, unsigned long data)
{
int ret;
+ unsigned int regno = addr >> 2;
unsigned long __user *datap = (unsigned long __user *)data;
switch (request) {
@@ -163,10 +165,10 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
unsigned long tmp;
ret = -EIO;
- if ((addr & 3) || addr < 0 || addr > PT_MAX << 2)
+ if ((addr & 3) || regno > PT_MAX)
break;
- tmp = get_reg(child, addr >> 2);
+ tmp = get_reg(child, regno);
ret = put_user(tmp, datap);
break;
}
@@ -180,19 +182,17 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
/* Write the word at location address in the USER area. */
case PTRACE_POKEUSR:
ret = -EIO;
- if ((addr & 3) || addr < 0 || addr > PT_MAX << 2)
+ if ((addr & 3) || regno > PT_MAX)
break;
- addr >>= 2;
-
- if (addr == PT_CCS) {
+ if (regno == PT_CCS) {
/* don't allow the tracing process to change stuff like
* interrupt enable, kernel/user bit, dma enables etc.
*/
data &= CCS_MASK;
data |= get_reg(child, PT_CCS) & ~CCS_MASK;
}
- if (put_reg(child, addr, data))
+ if (put_reg(child, regno, data))
break;
ret = 0;
break;
diff --git a/arch/cris/include/asm/pgtable.h b/arch/cris/include/asm/pgtable.h
index f63d6fccbc6c..9eaae217b21b 100644
--- a/arch/cris/include/asm/pgtable.h
+++ b/arch/cris/include/asm/pgtable.h
@@ -248,10 +248,8 @@ static inline pgd_t * pgd_offset(const struct mm_struct *mm, unsigned long addre
((pte_t *) pmd_page_vaddr(*(dir)) + __pte_offset(address))
#define pte_offset_map(dir, address) \
((pte_t *)page_address(pmd_page(*(dir))) + __pte_offset(address))
-#define pte_offset_map_nested(dir, address) pte_offset_map(dir, address)
#define pte_unmap(pte) do { } while (0)
-#define pte_unmap_nested(pte) do { } while (0)
#define pte_pfn(x) ((unsigned long)(__va((x).pte)) >> PAGE_SHIFT)
#define pfn_pte(pfn, prot) __pte(((pfn) << PAGE_SHIFT) | pgprot_val(prot))
diff --git a/arch/frv/Kconfig b/arch/frv/Kconfig
index 0f2417df6323..f6bcb039cd6d 100644
--- a/arch/frv/Kconfig
+++ b/arch/frv/Kconfig
@@ -1,7 +1,3 @@
-#
-# For a description of the syntax of this configuration file,
-# see Documentation/kbuild/kconfig-language.txt.
-#
config FRV
bool
default y
@@ -61,8 +57,6 @@ config HZ
int
default 1000
-mainmenu "Fujitsu FR-V Kernel Configuration"
-
source "init/Kconfig"
source "kernel/Kconfig.freezer"
diff --git a/arch/frv/include/asm/highmem.h b/arch/frv/include/asm/highmem.h
index cb4c317eaecc..a8d6565d415d 100644
--- a/arch/frv/include/asm/highmem.h
+++ b/arch/frv/include/asm/highmem.h
@@ -112,12 +112,11 @@ extern struct page *kmap_atomic_to_page(void *ptr);
(void *) damlr; \
})
-static inline void *kmap_atomic(struct page *page, enum km_type type)
+static inline void *kmap_atomic_primary(struct page *page, enum km_type type)
{
unsigned long paddr;
pagefault_disable();
- debug_kmap_atomic(type);
paddr = page_to_phys(page);
switch (type) {
@@ -125,14 +124,6 @@ static inline void *kmap_atomic(struct page *page, enum km_type type)
case 1: return __kmap_atomic_primary(1, paddr, 3);
case 2: return __kmap_atomic_primary(2, paddr, 4);
case 3: return __kmap_atomic_primary(3, paddr, 5);
- case 4: return __kmap_atomic_primary(4, paddr, 6);
- case 5: return __kmap_atomic_primary(5, paddr, 7);
- case 6: return __kmap_atomic_primary(6, paddr, 8);
- case 7: return __kmap_atomic_primary(7, paddr, 9);
- case 8: return __kmap_atomic_primary(8, paddr, 10);
-
- case 9 ... 9 + NR_TLB_LINES - 1:
- return __kmap_atomic_secondary(type - 9, paddr);
default:
BUG();
@@ -152,22 +143,13 @@ do { \
asm volatile("tlbpr %0,gr0,#4,#1" : : "r"(vaddr) : "memory"); \
} while(0)
-static inline void kunmap_atomic_notypecheck(void *kvaddr, enum km_type type)
+static inline void kunmap_atomic_primary(void *kvaddr, enum km_type type)
{
switch (type) {
case 0: __kunmap_atomic_primary(0, 2); break;
case 1: __kunmap_atomic_primary(1, 3); break;
case 2: __kunmap_atomic_primary(2, 4); break;
case 3: __kunmap_atomic_primary(3, 5); break;
- case 4: __kunmap_atomic_primary(4, 6); break;
- case 5: __kunmap_atomic_primary(5, 7); break;
- case 6: __kunmap_atomic_primary(6, 8); break;
- case 7: __kunmap_atomic_primary(7, 9); break;
- case 8: __kunmap_atomic_primary(8, 10); break;
-
- case 9 ... 9 + NR_TLB_LINES - 1:
- __kunmap_atomic_secondary(type - 9, kvaddr);
- break;
default:
BUG();
@@ -175,6 +157,9 @@ static inline void kunmap_atomic_notypecheck(void *kvaddr, enum km_type type)
pagefault_enable();
}
+void *__kmap_atomic(struct page *page);
+void __kunmap_atomic(void *kvaddr);
+
#endif /* !__ASSEMBLY__ */
#endif /* __KERNEL__ */
diff --git a/arch/frv/include/asm/pgtable.h b/arch/frv/include/asm/pgtable.h
index c18b0d32e636..6bc241e4b4f8 100644
--- a/arch/frv/include/asm/pgtable.h
+++ b/arch/frv/include/asm/pgtable.h
@@ -451,17 +451,12 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
#if defined(CONFIG_HIGHPTE)
#define pte_offset_map(dir, address) \
- ((pte_t *)kmap_atomic(pmd_page(*(dir)),KM_PTE0) + pte_index(address))
-#define pte_offset_map_nested(dir, address) \
- ((pte_t *)kmap_atomic(pmd_page(*(dir)),KM_PTE1) + pte_index(address))
-#define pte_unmap(pte) kunmap_atomic(pte, KM_PTE0)
-#define pte_unmap_nested(pte) kunmap_atomic((pte), KM_PTE1)
+ ((pte_t *)kmap_atomic(pmd_page(*(dir))) + pte_index(address))
+#define pte_unmap(pte) kunmap_atomic(pte)
#else
#define pte_offset_map(dir, address) \
((pte_t *)page_address(pmd_page(*(dir))) + pte_index(address))
-#define pte_offset_map_nested(dir, address) pte_offset_map((dir), (address))
#define pte_unmap(pte) do { } while (0)
-#define pte_unmap_nested(pte) do { } while (0)
#endif
/*
diff --git a/arch/frv/kernel/process.c b/arch/frv/kernel/process.c
index 2b63b0191f52..efad12071c2e 100644
--- a/arch/frv/kernel/process.c
+++ b/arch/frv/kernel/process.c
@@ -16,7 +16,6 @@
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/smp.h>
-#include <linux/smp_lock.h>
#include <linux/stddef.h>
#include <linux/unistd.h>
#include <linux/ptrace.h>
diff --git a/arch/frv/kernel/ptrace.c b/arch/frv/kernel/ptrace.c
index fac028936a04..9d68f7fac730 100644
--- a/arch/frv/kernel/ptrace.c
+++ b/arch/frv/kernel/ptrace.c
@@ -254,23 +254,26 @@ void ptrace_disable(struct task_struct *child)
user_disable_single_step(child);
}
-long arch_ptrace(struct task_struct *child, long request, long addr, long data)
+long arch_ptrace(struct task_struct *child, long request,
+ unsigned long addr, unsigned long data)
{
unsigned long tmp;
int ret;
+ int regno = addr >> 2;
+ unsigned long __user *datap = (unsigned long __user *) data;
switch (request) {
/* read the word at location addr in the USER area. */
case PTRACE_PEEKUSR: {
tmp = 0;
ret = -EIO;
- if ((addr & 3) || addr < 0)
+ if (addr & 3)
break;
ret = 0;
- switch (addr >> 2) {
+ switch (regno) {
case 0 ... PT__END - 1:
- tmp = get_reg(child, addr >> 2);
+ tmp = get_reg(child, regno);
break;
case PT__END + 0:
@@ -299,23 +302,18 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
}
if (ret == 0)
- ret = put_user(tmp, (unsigned long *) data);
+ ret = put_user(tmp, datap);
break;
}
case PTRACE_POKEUSR: /* write the word at location addr in the USER area */
ret = -EIO;
- if ((addr & 3) || addr < 0)
+ if (addr & 3)
break;
- ret = 0;
- switch (addr >> 2) {
+ switch (regno) {
case 0 ... PT__END - 1:
- ret = put_reg(child, addr >> 2, data);
- break;
-
- default:
- ret = -EIO;
+ ret = put_reg(child, regno, data);
break;
}
break;
@@ -324,25 +322,25 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
return copy_regset_to_user(child, &user_frv_native_view,
REGSET_GENERAL,
0, sizeof(child->thread.user->i),
- (void __user *)data);
+ datap);
case PTRACE_SETREGS: /* Set all integer regs in the child. */
return copy_regset_from_user(child, &user_frv_native_view,
REGSET_GENERAL,
0, sizeof(child->thread.user->i),
- (const void __user *)data);
+ datap);
case PTRACE_GETFPREGS: /* Get the child FP/Media state. */
return copy_regset_to_user(child, &user_frv_native_view,
REGSET_FPMEDIA,
0, sizeof(child->thread.user->f),
- (void __user *)data);
+ datap);
case PTRACE_SETFPREGS: /* Set the child FP/Media state. */
return copy_regset_from_user(child, &user_frv_native_view,
REGSET_FPMEDIA,
0, sizeof(child->thread.user->f),
- (const void __user *)data);
+ datap);
default:
ret = ptrace_request(child, request, addr, data);
diff --git a/arch/frv/mb93090-mb00/pci-dma.c b/arch/frv/mb93090-mb00/pci-dma.c
index 85d110b71cf7..41098a3803a2 100644
--- a/arch/frv/mb93090-mb00/pci-dma.c
+++ b/arch/frv/mb93090-mb00/pci-dma.c
@@ -61,14 +61,14 @@ int dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
dampr2 = __get_DAMPR(2);
for (i = 0; i < nents; i++) {
- vaddr = kmap_atomic(sg_page(&sg[i]), __KM_CACHE);
+ vaddr = kmap_atomic_primary(sg_page(&sg[i]), __KM_CACHE);
frv_dcache_writeback((unsigned long) vaddr,
(unsigned long) vaddr + PAGE_SIZE);
}
- kunmap_atomic(vaddr, __KM_CACHE);
+ kunmap_atomic_primary(vaddr, __KM_CACHE);
if (dampr2) {
__set_DAMPR(2, dampr2);
__set_IAMPR(2, dampr2);
diff --git a/arch/frv/mm/cache-page.c b/arch/frv/mm/cache-page.c
index 0261cbe153b5..b24ade27a0f0 100644
--- a/arch/frv/mm/cache-page.c
+++ b/arch/frv/mm/cache-page.c
@@ -26,11 +26,11 @@ void flush_dcache_page(struct page *page)
dampr2 = __get_DAMPR(2);
- vaddr = kmap_atomic(page, __KM_CACHE);
+ vaddr = kmap_atomic_primary(page, __KM_CACHE);
frv_dcache_writeback((unsigned long) vaddr, (unsigned long) vaddr + PAGE_SIZE);
- kunmap_atomic(vaddr, __KM_CACHE);
+ kunmap_atomic_primary(vaddr, __KM_CACHE);
if (dampr2) {
__set_DAMPR(2, dampr2);
@@ -54,12 +54,12 @@ void flush_icache_user_range(struct vm_area_struct *vma, struct page *page,
dampr2 = __get_DAMPR(2);
- vaddr = kmap_atomic(page, __KM_CACHE);
+ vaddr = kmap_atomic_primary(page, __KM_CACHE);
start = (start & ~PAGE_MASK) | (unsigned long) vaddr;
frv_cache_wback_inv(start, start + len);
- kunmap_atomic(vaddr, __KM_CACHE);
+ kunmap_atomic_primary(vaddr, __KM_CACHE);
if (dampr2) {
__set_DAMPR(2, dampr2);
diff --git a/arch/frv/mm/highmem.c b/arch/frv/mm/highmem.c
index eadd07658075..fd7fcd4c2e33 100644
--- a/arch/frv/mm/highmem.c
+++ b/arch/frv/mm/highmem.c
@@ -36,3 +36,54 @@ struct page *kmap_atomic_to_page(void *ptr)
{
return virt_to_page(ptr);
}
+
+void *__kmap_atomic(struct page *page)
+{
+ unsigned long paddr;
+ int type;
+
+ pagefault_disable();
+ type = kmap_atomic_idx_push();
+ paddr = page_to_phys(page);
+
+ switch (type) {
+ /*
+ * The first 4 primary maps are reserved for architecture code
+ */
+ case 0: return __kmap_atomic_primary(4, paddr, 6);
+ case 1: return __kmap_atomic_primary(5, paddr, 7);
+ case 2: return __kmap_atomic_primary(6, paddr, 8);
+ case 3: return __kmap_atomic_primary(7, paddr, 9);
+ case 4: return __kmap_atomic_primary(8, paddr, 10);
+
+ case 5 ... 5 + NR_TLB_LINES - 1:
+ return __kmap_atomic_secondary(type - 5, paddr);
+
+ default:
+ BUG();
+ return NULL;
+ }
+}
+EXPORT_SYMBOL(__kmap_atomic);
+
+void __kunmap_atomic(void *kvaddr)
+{
+ int type = kmap_atomic_idx();
+ switch (type) {
+ case 0: __kunmap_atomic_primary(4, 6); break;
+ case 1: __kunmap_atomic_primary(5, 7); break;
+ case 2: __kunmap_atomic_primary(6, 8); break;
+ case 3: __kunmap_atomic_primary(7, 9); break;
+ case 4: __kunmap_atomic_primary(8, 10); break;
+
+ case 5 ... 5 + NR_TLB_LINES - 1:
+ __kunmap_atomic_secondary(type - 5, kvaddr);
+ break;
+
+ default:
+ BUG();
+ }
+ kmap_atomic_idx_pop();
+ pagefault_enable();
+}
+EXPORT_SYMBOL(__kunmap_atomic);
diff --git a/arch/h8300/Kconfig b/arch/h8300/Kconfig
index 988b6ff34cc4..65f897d8c1e9 100644
--- a/arch/h8300/Kconfig
+++ b/arch/h8300/Kconfig
@@ -1,10 +1,3 @@
-#
-# For a description of the syntax of this configuration file,
-# see Documentation/kbuild/kconfig-language.txt.
-#
-
-mainmenu "uClinux/h8300 (w/o MMU) Kernel Configuration"
-
config H8300
bool
default y
diff --git a/arch/h8300/kernel/process.c b/arch/h8300/kernel/process.c
index 97478138e361..933bd388efb2 100644
--- a/arch/h8300/kernel/process.c
+++ b/arch/h8300/kernel/process.c
@@ -28,7 +28,6 @@
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/smp.h>
-#include <linux/smp_lock.h>
#include <linux/stddef.h>
#include <linux/unistd.h>
#include <linux/ptrace.h>
diff --git a/arch/h8300/kernel/ptrace.c b/arch/h8300/kernel/ptrace.c
index df114122ebdf..497fa89b5df4 100644
--- a/arch/h8300/kernel/ptrace.c
+++ b/arch/h8300/kernel/ptrace.c
@@ -50,27 +50,29 @@ void ptrace_disable(struct task_struct *child)
user_disable_single_step(child);
}
-long arch_ptrace(struct task_struct *child, long request, long addr, long data)
+long arch_ptrace(struct task_struct *child, long request,
+ unsigned long addr, unsigned long data)
{
int ret;
+ int regno = addr >> 2;
+ unsigned long __user *datap = (unsigned long __user *) data;
switch (request) {
/* read the word at location addr in the USER area. */
case PTRACE_PEEKUSR: {
unsigned long tmp = 0;
- if ((addr & 3) || addr < 0 || addr >= sizeof(struct user)) {
+ if ((addr & 3) || addr >= sizeof(struct user)) {
ret = -EIO;
break ;
}
ret = 0; /* Default return condition */
- addr = addr >> 2; /* temporary hack. */
- if (addr < H8300_REGS_NO)
- tmp = h8300_get_reg(child, addr);
+ if (regno < H8300_REGS_NO)
+ tmp = h8300_get_reg(child, regno);
else {
- switch(addr) {
+ switch (regno) {
case 49:
tmp = child->mm->start_code;
break ;
@@ -88,24 +90,23 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
}
}
if (!ret)
- ret = put_user(tmp,(unsigned long *) data);
+ ret = put_user(tmp, datap);
break ;
}
/* when I and D space are separate, this will have to be fixed. */
case PTRACE_POKEUSR: /* write the word at location addr in the USER area */
- if ((addr & 3) || addr < 0 || addr >= sizeof(struct user)) {
+ if ((addr & 3) || addr >= sizeof(struct user)) {
ret = -EIO;
break ;
}
- addr = addr >> 2; /* temporary hack. */
- if (addr == PT_ORIG_ER0) {
+ if (regno == PT_ORIG_ER0) {
ret = -EIO;
break ;
}
- if (addr < H8300_REGS_NO) {
- ret = h8300_put_reg(child, addr, data);
+ if (regno < H8300_REGS_NO) {
+ ret = h8300_put_reg(child, regno, data);
break ;
}
ret = -EIO;
@@ -116,11 +117,11 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
unsigned long tmp;
for (i = 0; i < H8300_REGS_NO; i++) {
tmp = h8300_get_reg(child, i);
- if (put_user(tmp, (unsigned long *) data)) {
+ if (put_user(tmp, datap)) {
ret = -EFAULT;
break;
}
- data += sizeof(long);
+ datap++;
}
ret = 0;
break;
@@ -130,12 +131,12 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
int i;
unsigned long tmp;
for (i = 0; i < H8300_REGS_NO; i++) {
- if (get_user(tmp, (unsigned long *) data)) {
+ if (get_user(tmp, datap)) {
ret = -EFAULT;
break;
}
h8300_put_reg(child, i, tmp);
- data += sizeof(long);
+ datap++;
}
ret = 0;
break;
diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig
index 7c82fa1fc911..e0f5b6d7f849 100644
--- a/arch/ia64/Kconfig
+++ b/arch/ia64/Kconfig
@@ -1,10 +1,3 @@
-#
-# For a description of the syntax of this configuration file,
-# see Documentation/kbuild/kconfig-language.txt.
-#
-
-mainmenu "IA-64 Linux Kernel Configuration"
-
source "init/Kconfig"
source "kernel/Kconfig.freezer"
diff --git a/arch/ia64/hp/sim/simscsi.c b/arch/ia64/hp/sim/simscsi.c
index 3a078ad3aa44..331de723c676 100644
--- a/arch/ia64/hp/sim/simscsi.c
+++ b/arch/ia64/hp/sim/simscsi.c
@@ -202,7 +202,7 @@ simscsi_readwrite10 (struct scsi_cmnd *sc, int mode)
}
static int
-simscsi_queuecommand (struct scsi_cmnd *sc, void (*done)(struct scsi_cmnd *))
+simscsi_queuecommand_lck (struct scsi_cmnd *sc, void (*done)(struct scsi_cmnd *))
{
unsigned int target_id = sc->device->id;
char fname[MAX_ROOT_LEN+16];
@@ -326,6 +326,8 @@ simscsi_queuecommand (struct scsi_cmnd *sc, void (*done)(struct scsi_cmnd *))
return 0;
}
+static DEF_SCSI_QCMD(simscsi_queuecommand)
+
static int
simscsi_host_reset (struct scsi_cmnd *sc)
{
diff --git a/arch/ia64/include/asm/cputime.h b/arch/ia64/include/asm/cputime.h
index 7fa8a8594660..6073b187528a 100644
--- a/arch/ia64/include/asm/cputime.h
+++ b/arch/ia64/include/asm/cputime.h
@@ -56,10 +56,10 @@ typedef u64 cputime64_t;
#define jiffies64_to_cputime64(__jif) ((__jif) * (NSEC_PER_SEC / HZ))
/*
- * Convert cputime <-> milliseconds
+ * Convert cputime <-> microseconds
*/
-#define cputime_to_msecs(__ct) ((__ct) / NSEC_PER_MSEC)
-#define msecs_to_cputime(__msecs) ((__msecs) * NSEC_PER_MSEC)
+#define cputime_to_usecs(__ct) ((__ct) / NSEC_PER_USEC)
+#define usecs_to_cputime(__usecs) ((__usecs) * NSEC_PER_USEC)
/*
* Convert cputime <-> seconds
diff --git a/arch/ia64/include/asm/pgtable.h b/arch/ia64/include/asm/pgtable.h
index c3286f42e501..1a97af31ef17 100644
--- a/arch/ia64/include/asm/pgtable.h
+++ b/arch/ia64/include/asm/pgtable.h
@@ -406,9 +406,7 @@ pgd_offset (const struct mm_struct *mm, unsigned long address)
#define pte_index(addr) (((addr) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
#define pte_offset_kernel(dir,addr) ((pte_t *) pmd_page_vaddr(*(dir)) + pte_index(addr))
#define pte_offset_map(dir,addr) pte_offset_kernel(dir, addr)
-#define pte_offset_map_nested(dir,addr) pte_offset_map(dir, addr)
#define pte_unmap(pte) do { } while (0)
-#define pte_unmap_nested(pte) do { } while (0)
/* atomic versions of the some PTE manipulations: */
diff --git a/arch/ia64/include/asm/siginfo.h b/arch/ia64/include/asm/siginfo.h
index 118d42979003..c8fcaa2ac48f 100644
--- a/arch/ia64/include/asm/siginfo.h
+++ b/arch/ia64/include/asm/siginfo.h
@@ -62,6 +62,7 @@ typedef struct siginfo {
int _imm; /* immediate value for "break" */
unsigned int _flags; /* see below */
unsigned long _isr; /* isr */
+ short _addr_lsb; /* lsb of faulting address */
} _sigfault;
/* SIGPOLL */
diff --git a/arch/ia64/kernel/perfmon.c b/arch/ia64/kernel/perfmon.c
index 6b1852f7f972..39e534f5a3b0 100644
--- a/arch/ia64/kernel/perfmon.c
+++ b/arch/ia64/kernel/perfmon.c
@@ -618,16 +618,15 @@ pfm_get_unmapped_area(struct file *file, unsigned long addr, unsigned long len,
}
-static int
-pfmfs_get_sb(struct file_system_type *fs_type, int flags, const char *dev_name, void *data,
- struct vfsmount *mnt)
+static struct dentry *
+pfmfs_mount(struct file_system_type *fs_type, int flags, const char *dev_name, void *data)
{
- return get_sb_pseudo(fs_type, "pfm:", NULL, PFMFS_MAGIC, mnt);
+ return mount_pseudo(fs_type, "pfm:", NULL, PFMFS_MAGIC);
}
static struct file_system_type pfm_fs_type = {
.name = "pfmfs",
- .get_sb = pfmfs_get_sb,
+ .mount = pfmfs_mount,
.kill_sb = kill_anon_super,
};
diff --git a/arch/ia64/kernel/ptrace.c b/arch/ia64/kernel/ptrace.c
index 7c7909f9bc93..8848f43d819e 100644
--- a/arch/ia64/kernel/ptrace.c
+++ b/arch/ia64/kernel/ptrace.c
@@ -1177,7 +1177,8 @@ ptrace_disable (struct task_struct *child)
}
long
-arch_ptrace (struct task_struct *child, long request, long addr, long data)
+arch_ptrace (struct task_struct *child, long request,
+ unsigned long addr, unsigned long data)
{
switch (request) {
case PTRACE_PEEKTEXT:
diff --git a/arch/m32r/Kconfig b/arch/m32r/Kconfig
index 3867fd21f333..5c291d65196b 100644
--- a/arch/m32r/Kconfig
+++ b/arch/m32r/Kconfig
@@ -1,10 +1,3 @@
-#
-# For a description of the syntax of this configuration file,
-# see Documentation/kbuild/kconfig-language.txt.
-#
-
-mainmenu "Linux/M32R Kernel Configuration"
-
config M32R
bool
default y
diff --git a/arch/m32r/include/asm/pgtable.h b/arch/m32r/include/asm/pgtable.h
index e6359c566b50..8a28cfea2729 100644
--- a/arch/m32r/include/asm/pgtable.h
+++ b/arch/m32r/include/asm/pgtable.h
@@ -332,9 +332,7 @@ static inline void pmd_set(pmd_t * pmdp, pte_t * ptep)
((pte_t *)pmd_page_vaddr(*(dir)) + pte_index(address))
#define pte_offset_map(dir, address) \
((pte_t *)page_address(pmd_page(*(dir))) + pte_index(address))
-#define pte_offset_map_nested(dir, address) pte_offset_map(dir, address)
#define pte_unmap(pte) do { } while (0)
-#define pte_unmap_nested(pte) do { } while (0)
/* Encode and de-code a swap entry */
#define __swp_type(x) (((x).val >> 2) & 0x1f)
diff --git a/arch/m32r/kernel/ptrace.c b/arch/m32r/kernel/ptrace.c
index 0021ade4cba8..20743754f2b2 100644
--- a/arch/m32r/kernel/ptrace.c
+++ b/arch/m32r/kernel/ptrace.c
@@ -622,9 +622,11 @@ void ptrace_disable(struct task_struct *child)
}
long
-arch_ptrace(struct task_struct *child, long request, long addr, long data)
+arch_ptrace(struct task_struct *child, long request,
+ unsigned long addr, unsigned long data)
{
int ret;
+ unsigned long __user *datap = (unsigned long __user *) data;
switch (request) {
/*
@@ -639,8 +641,7 @@ arch_ptrace(struct task_struct *child, long request, long addr, long data)
* read the word at location addr in the USER area.
*/
case PTRACE_PEEKUSR:
- ret = ptrace_read_user(child, addr,
- (unsigned long __user *)data);
+ ret = ptrace_read_user(child, addr, datap);
break;
/*
@@ -661,11 +662,11 @@ arch_ptrace(struct task_struct *child, long request, long addr, long data)
break;
case PTRACE_GETREGS:
- ret = ptrace_getregs(child, (void __user *)data);
+ ret = ptrace_getregs(child, datap);
break;
case PTRACE_SETREGS:
- ret = ptrace_setregs(child, (void __user *)data);
+ ret = ptrace_setregs(child, datap);
break;
default:
diff --git a/arch/m68k/Kconfig b/arch/m68k/Kconfig
index 77bb0d6baa62..bc9271b85759 100644
--- a/arch/m68k/Kconfig
+++ b/arch/m68k/Kconfig
@@ -1,7 +1,3 @@
-#
-# For a description of the syntax of this configuration file,
-# see Documentation/kbuild/kconfig-language.txt.
-#
config M68K
bool
default y
@@ -62,8 +58,6 @@ config HZ
config ARCH_USES_GETTIMEOFFSET
def_bool y
-mainmenu "Linux/68k Kernel Configuration"
-
source "init/Kconfig"
source "kernel/Kconfig.freezer"
diff --git a/arch/m68k/include/asm/cacheflush_no.h b/arch/m68k/include/asm/cacheflush_no.h
index 89f195656be7..7085bd51668b 100644
--- a/arch/m68k/include/asm/cacheflush_no.h
+++ b/arch/m68k/include/asm/cacheflush_no.h
@@ -29,7 +29,7 @@
static inline void __flush_cache_all(void)
{
-#ifdef CONFIG_M5407
+#if defined(CONFIG_M5407) || defined(CONFIG_M548x)
/*
* Use cpushl to push and invalidate all cache lines.
* Gas doesn't seem to know how to generate the ColdFire
diff --git a/arch/m68k/include/asm/coldfire.h b/arch/m68k/include/asm/coldfire.h
index 83a9fa4e618a..3b0a34d0fe33 100644
--- a/arch/m68k/include/asm/coldfire.h
+++ b/arch/m68k/include/asm/coldfire.h
@@ -32,7 +32,9 @@
*/
#define MCF_MBAR 0x10000000
#define MCF_MBAR2 0x80000000
-#if defined(CONFIG_M520x)
+#if defined(CONFIG_M548x)
+#define MCF_IPSBAR MCF_MBAR
+#elif defined(CONFIG_M520x)
#define MCF_IPSBAR 0xFC000000
#else
#define MCF_IPSBAR 0x40000000
diff --git a/arch/m68k/include/asm/entry_mm.h b/arch/m68k/include/asm/entry_mm.h
index e41fea399bfe..73b8c8fbed9c 100644
--- a/arch/m68k/include/asm/entry_mm.h
+++ b/arch/m68k/include/asm/entry_mm.h
@@ -50,14 +50,6 @@
LFLUSH_I_AND_D = 0x00000808
-/* process bits for task_struct.ptrace */
-PT_TRACESYS_OFF = 3
-PT_TRACESYS_BIT = 1
-PT_PTRACED_OFF = 3
-PT_PTRACED_BIT = 0
-PT_DTRACE_OFF = 3
-PT_DTRACE_BIT = 2
-
#define SAVE_ALL_INT save_all_int
#define SAVE_ALL_SYS save_all_sys
#define RESTORE_ALL restore_all
diff --git a/arch/m68k/include/asm/entry_no.h b/arch/m68k/include/asm/entry_no.h
index 80e41492aa2a..26be277394f9 100644
--- a/arch/m68k/include/asm/entry_no.h
+++ b/arch/m68k/include/asm/entry_no.h
@@ -32,16 +32,6 @@
#ifdef __ASSEMBLY__
-/* process bits for task_struct.flags */
-PF_TRACESYS_OFF = 3
-PF_TRACESYS_BIT = 5
-PF_PTRACED_OFF = 3
-PF_PTRACED_BIT = 4
-PF_DTRACE_OFF = 1
-PF_DTRACE_BIT = 5
-
-LENOSYS = 38
-
#define SWITCH_STACK_SIZE (6*4+4) /* Includes return address */
/*
diff --git a/arch/m68k/include/asm/gpio.h b/arch/m68k/include/asm/gpio.h
index 283214dc65a7..1b57adbafad5 100644
--- a/arch/m68k/include/asm/gpio.h
+++ b/arch/m68k/include/asm/gpio.h
@@ -36,7 +36,8 @@
*/
#if defined(CONFIG_M5206) || defined(CONFIG_M5206e) || \
defined(CONFIG_M520x) || defined(CONFIG_M523x) || \
- defined(CONFIG_M527x) || defined(CONFIG_M528x) || defined(CONFIG_M532x)
+ defined(CONFIG_M527x) || defined(CONFIG_M528x) || \
+ defined(CONFIG_M532x) || defined(CONFIG_M548x)
/* These parts have GPIO organized by 8 bit ports */
@@ -136,6 +137,8 @@ static inline u32 __mcf_gpio_ppdr(unsigned gpio)
#endif
else
return MCFGPIO_PPDR + mcfgpio_port(gpio - MCFGPIO_SCR_START);
+#else
+ return 0;
#endif
}
@@ -173,6 +176,8 @@ static inline u32 __mcf_gpio_podr(unsigned gpio)
#endif
else
return MCFGPIO_PODR + mcfgpio_port(gpio - MCFGPIO_SCR_START);
+#else
+ return 0;
#endif
}
diff --git a/arch/m68k/include/asm/irqflags.h b/arch/m68k/include/asm/irqflags.h
index 4a5b284a1550..7ef4115b8c4a 100644
--- a/arch/m68k/include/asm/irqflags.h
+++ b/arch/m68k/include/asm/irqflags.h
@@ -2,7 +2,9 @@
#define _M68K_IRQFLAGS_H
#include <linux/types.h>
+#ifdef CONFIG_MMU
#include <linux/hardirq.h>
+#endif
#include <linux/preempt.h>
#include <asm/thread_info.h>
#include <asm/entry.h>
diff --git a/arch/m68k/include/asm/m548xgpt.h b/arch/m68k/include/asm/m548xgpt.h
new file mode 100644
index 000000000000..c8ef158a1c4e
--- /dev/null
+++ b/arch/m68k/include/asm/m548xgpt.h
@@ -0,0 +1,88 @@
+/*
+ * File: m548xgpt.h
+ * Purpose: Register and bit definitions for the MCF548X
+ *
+ * Notes:
+ *
+ */
+
+#ifndef m548xgpt_h
+#define m548xgpt_h
+
+/*********************************************************************
+*
+* General Purpose Timers (GPT)
+*
+*********************************************************************/
+
+/* Register read/write macros */
+#define MCF_GPT_GMS0 0x000800
+#define MCF_GPT_GCIR0 0x000804
+#define MCF_GPT_GPWM0 0x000808
+#define MCF_GPT_GSR0 0x00080C
+#define MCF_GPT_GMS1 0x000810
+#define MCF_GPT_GCIR1 0x000814
+#define MCF_GPT_GPWM1 0x000818
+#define MCF_GPT_GSR1 0x00081C
+#define MCF_GPT_GMS2 0x000820
+#define MCF_GPT_GCIR2 0x000824
+#define MCF_GPT_GPWM2 0x000828
+#define MCF_GPT_GSR2 0x00082C
+#define MCF_GPT_GMS3 0x000830
+#define MCF_GPT_GCIR3 0x000834
+#define MCF_GPT_GPWM3 0x000838
+#define MCF_GPT_GSR3 0x00083C
+#define MCF_GPT_GMS(x) (0x000800+((x)*0x010))
+#define MCF_GPT_GCIR(x) (0x000804+((x)*0x010))
+#define MCF_GPT_GPWM(x) (0x000808+((x)*0x010))
+#define MCF_GPT_GSR(x) (0x00080C+((x)*0x010))
+
+/* Bit definitions and macros for MCF_GPT_GMS */
+#define MCF_GPT_GMS_TMS(x) (((x)&0x00000007)<<0)
+#define MCF_GPT_GMS_GPIO(x) (((x)&0x00000003)<<4)
+#define MCF_GPT_GMS_IEN (0x00000100)
+#define MCF_GPT_GMS_OD (0x00000200)
+#define MCF_GPT_GMS_SC (0x00000400)
+#define MCF_GPT_GMS_CE (0x00001000)
+#define MCF_GPT_GMS_WDEN (0x00008000)
+#define MCF_GPT_GMS_ICT(x) (((x)&0x00000003)<<16)
+#define MCF_GPT_GMS_OCT(x) (((x)&0x00000003)<<20)
+#define MCF_GPT_GMS_OCPW(x) (((x)&0x000000FF)<<24)
+#define MCF_GPT_GMS_OCT_FRCLOW (0x00000000)
+#define MCF_GPT_GMS_OCT_PULSEHI (0x00100000)
+#define MCF_GPT_GMS_OCT_PULSELO (0x00200000)
+#define MCF_GPT_GMS_OCT_TOGGLE (0x00300000)
+#define MCF_GPT_GMS_ICT_ANY (0x00000000)
+#define MCF_GPT_GMS_ICT_RISE (0x00010000)
+#define MCF_GPT_GMS_ICT_FALL (0x00020000)
+#define MCF_GPT_GMS_ICT_PULSE (0x00030000)
+#define MCF_GPT_GMS_GPIO_INPUT (0x00000000)
+#define MCF_GPT_GMS_GPIO_OUTLO (0x00000020)
+#define MCF_GPT_GMS_GPIO_OUTHI (0x00000030)
+#define MCF_GPT_GMS_TMS_DISABLE (0x00000000)
+#define MCF_GPT_GMS_TMS_INCAPT (0x00000001)
+#define MCF_GPT_GMS_TMS_OUTCAPT (0x00000002)
+#define MCF_GPT_GMS_TMS_PWM (0x00000003)
+#define MCF_GPT_GMS_TMS_GPIO (0x00000004)
+
+/* Bit definitions and macros for MCF_GPT_GCIR */
+#define MCF_GPT_GCIR_CNT(x) (((x)&0x0000FFFF)<<0)
+#define MCF_GPT_GCIR_PRE(x) (((x)&0x0000FFFF)<<16)
+
+/* Bit definitions and macros for MCF_GPT_GPWM */
+#define MCF_GPT_GPWM_LOAD (0x00000001)
+#define MCF_GPT_GPWM_PWMOP (0x00000100)
+#define MCF_GPT_GPWM_WIDTH(x) (((x)&0x0000FFFF)<<16)
+
+/* Bit definitions and macros for MCF_GPT_GSR */
+#define MCF_GPT_GSR_CAPT (0x00000001)
+#define MCF_GPT_GSR_COMP (0x00000002)
+#define MCF_GPT_GSR_PWMP (0x00000004)
+#define MCF_GPT_GSR_TEXP (0x00000008)
+#define MCF_GPT_GSR_PIN (0x00000100)
+#define MCF_GPT_GSR_OVF(x) (((x)&0x00000007)<<12)
+#define MCF_GPT_GSR_CAPTURE(x) (((x)&0x0000FFFF)<<16)
+
+/********************************************************************/
+
+#endif /* m548xgpt_h */
diff --git a/arch/m68k/include/asm/m548xsim.h b/arch/m68k/include/asm/m548xsim.h
new file mode 100644
index 000000000000..149135ef30d2
--- /dev/null
+++ b/arch/m68k/include/asm/m548xsim.h
@@ -0,0 +1,55 @@
+/*
+ * m548xsim.h -- ColdFire 547x/548x System Integration Unit support.
+ */
+
+#ifndef m548xsim_h
+#define m548xsim_h
+
+#define MCFINT_VECBASE 64
+
+/*
+ * Interrupt Controller Registers
+ */
+#define MCFICM_INTC0 0x0700 /* Base for Interrupt Ctrl 0 */
+#define MCFINTC_IPRH 0x00 /* Interrupt pending 32-63 */
+#define MCFINTC_IPRL 0x04 /* Interrupt pending 1-31 */
+#define MCFINTC_IMRH 0x08 /* Interrupt mask 32-63 */
+#define MCFINTC_IMRL 0x0c /* Interrupt mask 1-31 */
+#define MCFINTC_INTFRCH 0x10 /* Interrupt force 32-63 */
+#define MCFINTC_INTFRCL 0x14 /* Interrupt force 1-31 */
+#define MCFINTC_IRLR 0x18 /* */
+#define MCFINTC_IACKL 0x19 /* */
+#define MCFINTC_ICR0 0x40 /* Base ICR register */
+
+/*
+ * Define system peripheral IRQ usage.
+ */
+#define MCF_IRQ_TIMER (64 + 54) /* Slice Timer 0 */
+#define MCF_IRQ_PROFILER (64 + 53) /* Slice Timer 1 */
+
+/*
+ * Generic GPIO support
+ */
+#define MCFGPIO_PIN_MAX 0 /* I am too lazy to count */
+#define MCFGPIO_IRQ_MAX -1
+#define MCFGPIO_IRQ_VECBASE -1
+
+/*
+ * Some PSC related definitions
+ */
+#define MCF_PAR_PSC(x) (0x000A4F-((x)&0x3))
+#define MCF_PAR_SDA (0x0008)
+#define MCF_PAR_SCL (0x0004)
+#define MCF_PAR_PSC_TXD (0x04)
+#define MCF_PAR_PSC_RXD (0x08)
+#define MCF_PAR_PSC_RTS(x) (((x)&0x03)<<4)
+#define MCF_PAR_PSC_CTS(x) (((x)&0x03)<<6)
+#define MCF_PAR_PSC_CTS_GPIO (0x00)
+#define MCF_PAR_PSC_CTS_BCLK (0x80)
+#define MCF_PAR_PSC_CTS_CTS (0xC0)
+#define MCF_PAR_PSC_RTS_GPIO (0x00)
+#define MCF_PAR_PSC_RTS_FSYNC (0x20)
+#define MCF_PAR_PSC_RTS_RTS (0x30)
+#define MCF_PAR_PSC_CANRX (0x40)
+
+#endif /* m548xsim_h */
diff --git a/arch/m68k/include/asm/machdep.h b/arch/m68k/include/asm/machdep.h
index 789f3b2de0e9..415d5484916c 100644
--- a/arch/m68k/include/asm/machdep.h
+++ b/arch/m68k/include/asm/machdep.h
@@ -40,5 +40,6 @@ extern unsigned long hw_timer_offset(void);
extern irqreturn_t arch_timer_interrupt(int irq, void *dummy);
extern void config_BSP(char *command, int len);
+extern void do_IRQ(int irq, struct pt_regs *fp);
#endif /* _M68K_MACHDEP_H */
diff --git a/arch/m68k/include/asm/mcfcache.h b/arch/m68k/include/asm/mcfcache.h
index c042634fadaa..f49dfc09f70a 100644
--- a/arch/m68k/include/asm/mcfcache.h
+++ b/arch/m68k/include/asm/mcfcache.h
@@ -107,7 +107,7 @@
.endm
#endif /* CONFIG_M532x */
-#if defined(CONFIG_M5407)
+#if defined(CONFIG_M5407) || defined(CONFIG_M548x)
/*
* Version 4 cores have a true harvard style separate instruction
* and data cache. Invalidate and enable cache, also enable write
diff --git a/arch/m68k/include/asm/mcfsim.h b/arch/m68k/include/asm/mcfsim.h
index 9c70a67bf85f..6901fd68165b 100644
--- a/arch/m68k/include/asm/mcfsim.h
+++ b/arch/m68k/include/asm/mcfsim.h
@@ -41,6 +41,8 @@
#elif defined(CONFIG_M5407)
#include <asm/m5407sim.h>
#include <asm/mcfintc.h>
+#elif defined(CONFIG_M548x)
+#include <asm/m548xsim.h>
#endif
/****************************************************************************/
diff --git a/arch/m68k/include/asm/mcfslt.h b/arch/m68k/include/asm/mcfslt.h
new file mode 100644
index 000000000000..d0d0ecba5333
--- /dev/null
+++ b/arch/m68k/include/asm/mcfslt.h
@@ -0,0 +1,44 @@
+/****************************************************************************/
+
+/*
+ * mcfslt.h -- ColdFire internal Slice (SLT) timer support defines.
+ *
+ * (C) Copyright 2004, Greg Ungerer (gerg@snapgear.com)
+ * (C) Copyright 2009, Philippe De Muyter (phdm@macqel.be)
+ */
+
+/****************************************************************************/
+#ifndef mcfslt_h
+#define mcfslt_h
+/****************************************************************************/
+
+/*
+ * Get address specific defines for the 547x.
+ */
+#define MCFSLT_TIMER0 0x900 /* Base address of TIMER0 */
+#define MCFSLT_TIMER1 0x910 /* Base address of TIMER1 */
+
+
+/*
+ * Define the SLT timer register set addresses.
+ */
+#define MCFSLT_STCNT 0x00 /* Terminal count */
+#define MCFSLT_SCR 0x04 /* Control */
+#define MCFSLT_SCNT 0x08 /* Current count */
+#define MCFSLT_SSR 0x0C /* Status */
+
+/*
+ * Bit definitions for the SCR control register.
+ */
+#define MCFSLT_SCR_RUN 0x04000000 /* Run mode (continuous) */
+#define MCFSLT_SCR_IEN 0x02000000 /* Interrupt enable */
+#define MCFSLT_SCR_TEN 0x01000000 /* Timer enable */
+
+/*
+ * Bit definitions for the SSR status register.
+ */
+#define MCFSLT_SSR_BE 0x02000000 /* Bus error condition */
+#define MCFSLT_SSR_TE 0x01000000 /* Timeout condition */
+
+/****************************************************************************/
+#endif /* mcfslt_h */
diff --git a/arch/m68k/include/asm/mcfuart.h b/arch/m68k/include/asm/mcfuart.h
index 01a8716c5fc5..db72e2b889ca 100644
--- a/arch/m68k/include/asm/mcfuart.h
+++ b/arch/m68k/include/asm/mcfuart.h
@@ -47,6 +47,11 @@
#define MCFUART_BASE1 0xfc060000 /* Base address of UART1 */
#define MCFUART_BASE2 0xfc064000 /* Base address of UART2 */
#define MCFUART_BASE3 0xfc068000 /* Base address of UART3 */
+#elif defined(CONFIG_M548x)
+#define MCFUART_BASE1 0x8600 /* on M548x */
+#define MCFUART_BASE2 0x8700 /* on M548x */
+#define MCFUART_BASE3 0x8800 /* on M548x */
+#define MCFUART_BASE4 0x8900 /* on M548x */
#endif
@@ -212,7 +217,9 @@ struct mcf_platform_uart {
#define MCFUART_URF_RXS 0xc0 /* Receiver status */
#endif
-#if defined(CONFIG_M5272)
+#if defined(CONFIG_M548x)
+#define MCFUART_TXFIFOSIZE 512
+#elif defined(CONFIG_M5272)
#define MCFUART_TXFIFOSIZE 25
#else
#define MCFUART_TXFIFOSIZE 1
diff --git a/arch/m68k/include/asm/motorola_pgtable.h b/arch/m68k/include/asm/motorola_pgtable.h
index 8e9a8a754dde..45bd3f589bf0 100644
--- a/arch/m68k/include/asm/motorola_pgtable.h
+++ b/arch/m68k/include/asm/motorola_pgtable.h
@@ -221,9 +221,7 @@ static inline pte_t *pte_offset_kernel(pmd_t *pmdp, unsigned long address)
}
#define pte_offset_map(pmdp,address) ((pte_t *)__pmd_page(*pmdp) + (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)))
-#define pte_offset_map_nested(pmdp, address) pte_offset_map(pmdp, address)
#define pte_unmap(pte) ((void)0)
-#define pte_unmap_nested(pte) ((void)0)
/*
* Allocate and free page tables. The xxx_kernel() versions are
diff --git a/arch/m68k/include/asm/sun3_pgtable.h b/arch/m68k/include/asm/sun3_pgtable.h
index f847ec732d62..cf5fad9b5250 100644
--- a/arch/m68k/include/asm/sun3_pgtable.h
+++ b/arch/m68k/include/asm/sun3_pgtable.h
@@ -219,9 +219,7 @@ static inline pte_t pgoff_to_pte(unsigned off)
#define pte_offset_kernel(pmd, address) ((pte_t *) __pmd_page(*pmd) + pte_index(address))
/* FIXME: should we bother with kmap() here? */
#define pte_offset_map(pmd, address) ((pte_t *)kmap(pmd_page(*pmd)) + pte_index(address))
-#define pte_offset_map_nested(pmd, address) pte_offset_map(pmd, address)
#define pte_unmap(pte) kunmap(pte)
-#define pte_unmap_nested(pte) kunmap(pte)
/* Macros to (de)construct the fake PTEs representing swap pages. */
#define __swp_type(x) ((x).val & 0x7F)
diff --git a/arch/m68k/kernel/asm-offsets.c b/arch/m68k/kernel/asm-offsets.c
index 73e5e581245b..78e59b82ebc3 100644
--- a/arch/m68k/kernel/asm-offsets.c
+++ b/arch/m68k/kernel/asm-offsets.c
@@ -22,13 +22,9 @@
int main(void)
{
/* offsets into the task struct */
- DEFINE(TASK_STATE, offsetof(struct task_struct, state));
- DEFINE(TASK_FLAGS, offsetof(struct task_struct, flags));
- DEFINE(TASK_PTRACE, offsetof(struct task_struct, ptrace));
DEFINE(TASK_THREAD, offsetof(struct task_struct, thread));
DEFINE(TASK_INFO, offsetof(struct task_struct, thread.info));
DEFINE(TASK_MM, offsetof(struct task_struct, mm));
- DEFINE(TASK_ACTIVE_MM, offsetof(struct task_struct, active_mm));
#ifdef CONFIG_MMU
DEFINE(TASK_TINFO, offsetof(struct task_struct, thread.info));
#endif
@@ -64,14 +60,6 @@ int main(void)
/* bitfields are a bit difficult */
DEFINE(PT_OFF_FORMATVEC, offsetof(struct pt_regs, pc) + 4);
- /* offsets into the irq_handler struct */
- DEFINE(IRQ_HANDLER, offsetof(struct irq_node, handler));
- DEFINE(IRQ_DEVID, offsetof(struct irq_node, dev_id));
- DEFINE(IRQ_NEXT, offsetof(struct irq_node, next));
-
- /* offsets into the kernel_stat struct */
- DEFINE(STAT_IRQ, offsetof(struct kernel_stat, irqs));
-
/* offsets into the irq_cpustat_t struct */
DEFINE(CPUSTAT_SOFTIRQ_PENDING, offsetof(irq_cpustat_t, __softirq_pending));
diff --git a/arch/m68k/kernel/process.c b/arch/m68k/kernel/process.c
index 18732ab23292..c2a1fc23dd75 100644
--- a/arch/m68k/kernel/process.c
+++ b/arch/m68k/kernel/process.c
@@ -18,7 +18,6 @@
#include <linux/slab.h>
#include <linux/fs.h>
#include <linux/smp.h>
-#include <linux/smp_lock.h>
#include <linux/stddef.h>
#include <linux/unistd.h>
#include <linux/ptrace.h>
diff --git a/arch/m68k/kernel/ptrace.c b/arch/m68k/kernel/ptrace.c
index 616e59752c29..0b252683cefb 100644
--- a/arch/m68k/kernel/ptrace.c
+++ b/arch/m68k/kernel/ptrace.c
@@ -156,55 +156,57 @@ void user_disable_single_step(struct task_struct *child)
singlestep_disable(child);
}
-long arch_ptrace(struct task_struct *child, long request, long addr, long data)
+long arch_ptrace(struct task_struct *child, long request,
+ unsigned long addr, unsigned long data)
{
unsigned long tmp;
int i, ret = 0;
+ int regno = addr >> 2; /* temporary hack. */
+ unsigned long __user *datap = (unsigned long __user *) data;
switch (request) {
/* read the word at location addr in the USER area. */
case PTRACE_PEEKUSR:
if (addr & 3)
goto out_eio;
- addr >>= 2; /* temporary hack. */
- if (addr >= 0 && addr < 19) {
- tmp = get_reg(child, addr);
- } else if (addr >= 21 && addr < 49) {
- tmp = child->thread.fp[addr - 21];
+ if (regno >= 0 && regno < 19) {
+ tmp = get_reg(child, regno);
+ } else if (regno >= 21 && regno < 49) {
+ tmp = child->thread.fp[regno - 21];
/* Convert internal fpu reg representation
* into long double format
*/
- if (FPU_IS_EMU && (addr < 45) && !(addr % 3))
+ if (FPU_IS_EMU && (regno < 45) && !(regno % 3))
tmp = ((tmp & 0xffff0000) << 15) |
((tmp & 0x0000ffff) << 16);
} else
goto out_eio;
- ret = put_user(tmp, (unsigned long *)data);
+ ret = put_user(tmp, datap);
break;
- case PTRACE_POKEUSR: /* write the word at location addr in the USER area */
+ case PTRACE_POKEUSR:
+ /* write the word at location addr in the USER area */
if (addr & 3)
goto out_eio;
- addr >>= 2; /* temporary hack. */
- if (addr == PT_SR) {
+ if (regno == PT_SR) {
data &= SR_MASK;
data |= get_reg(child, PT_SR) & ~SR_MASK;
}
- if (addr >= 0 && addr < 19) {
- if (put_reg(child, addr, data))
+ if (regno >= 0 && regno < 19) {
+ if (put_reg(child, regno, data))
goto out_eio;
- } else if (addr >= 21 && addr < 48) {
+ } else if (regno >= 21 && regno < 48) {
/* Convert long double format
* into internal fpu reg representation
*/
- if (FPU_IS_EMU && (addr < 45) && !(addr % 3)) {
- data = (unsigned long)data << 15;
+ if (FPU_IS_EMU && (regno < 45) && !(regno % 3)) {
+ data <<= 15;
data = (data & 0xffff0000) |
((data & 0x0000ffff) >> 1);
}
- child->thread.fp[addr - 21] = data;
+ child->thread.fp[regno - 21] = data;
} else
goto out_eio;
break;
@@ -212,16 +214,16 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
case PTRACE_GETREGS: /* Get all gp regs from the child. */
for (i = 0; i < 19; i++) {
tmp = get_reg(child, i);
- ret = put_user(tmp, (unsigned long *)data);
+ ret = put_user(tmp, datap);
if (ret)
break;
- data += sizeof(long);
+ datap++;
}
break;
case PTRACE_SETREGS: /* Set all gp regs in the child. */
for (i = 0; i < 19; i++) {
- ret = get_user(tmp, (unsigned long *)data);
+ ret = get_user(tmp, datap);
if (ret)
break;
if (i == PT_SR) {
@@ -229,25 +231,24 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
tmp |= get_reg(child, PT_SR) & ~SR_MASK;
}
put_reg(child, i, tmp);
- data += sizeof(long);
+ datap++;
}
break;
case PTRACE_GETFPREGS: /* Get the child FPU state. */
- if (copy_to_user((void *)data, &child->thread.fp,
+ if (copy_to_user(datap, &child->thread.fp,
sizeof(struct user_m68kfp_struct)))
ret = -EFAULT;
break;
case PTRACE_SETFPREGS: /* Set the child FPU state. */
- if (copy_from_user(&child->thread.fp, (void *)data,
+ if (copy_from_user(&child->thread.fp, datap,
sizeof(struct user_m68kfp_struct)))
ret = -EFAULT;
break;
case PTRACE_GET_THREAD_AREA:
- ret = put_user(task_thread_info(child)->tp_value,
- (unsigned long __user *)data);
+ ret = put_user(task_thread_info(child)->tp_value, datap);
break;
default:
diff --git a/arch/m68knommu/Kconfig b/arch/m68knommu/Kconfig
index 2609c394e1df..fa9f746cf4ae 100644
--- a/arch/m68knommu/Kconfig
+++ b/arch/m68knommu/Kconfig
@@ -1,10 +1,3 @@
-#
-# For a description of the syntax of this configuration file,
-# see Documentation/kbuild/kconfig-language.txt.
-#
-
-mainmenu "uClinux/68k (w/o MMU) Kernel Configuration"
-
config M68K
bool
default y
@@ -59,6 +52,10 @@ config GENERIC_HARDIRQS
bool
default y
+config GENERIC_HARDIRQS_NO__DO_IRQ
+ bool
+ default y
+
config GENERIC_CALIBRATE_DELAY
bool
default y
@@ -171,6 +168,11 @@ config M5407
help
Motorola ColdFire 5407 processor support.
+config M548x
+ bool "MCF548x"
+ help
+ Freescale ColdFire 5480/5481/5482/5483/5484/5485 processor support.
+
endchoice
config M527x
@@ -181,7 +183,7 @@ config M527x
config COLDFIRE
bool
- depends on (M5206 || M5206e || M520x || M523x || M5249 || M527x || M5272 || M528x || M5307 || M532x || M5407)
+ depends on (M5206 || M5206e || M520x || M523x || M5249 || M527x || M5272 || M528x || M5307 || M532x || M5407 || M548x)
select GENERIC_GPIO
select ARCH_REQUIRE_GPIOLIB
default y
diff --git a/arch/m68knommu/Makefile b/arch/m68knommu/Makefile
index 14042574ac21..026ef16fa68e 100644
--- a/arch/m68knommu/Makefile
+++ b/arch/m68knommu/Makefile
@@ -25,6 +25,7 @@ platform-$(CONFIG_M528x) := 528x
platform-$(CONFIG_M5307) := 5307
platform-$(CONFIG_M532x) := 532x
platform-$(CONFIG_M5407) := 5407
+platform-$(CONFIG_M548x) := 548x
PLATFORM := $(platform-y)
board-$(CONFIG_PILOT) := pilot
@@ -73,6 +74,7 @@ cpuclass-$(CONFIG_M528x) := coldfire
cpuclass-$(CONFIG_M5307) := coldfire
cpuclass-$(CONFIG_M532x) := coldfire
cpuclass-$(CONFIG_M5407) := coldfire
+cpuclass-$(CONFIG_M548x) := coldfire
cpuclass-$(CONFIG_M68328) := 68328
cpuclass-$(CONFIG_M68EZ328) := 68328
cpuclass-$(CONFIG_M68VZ328) := 68328
@@ -100,6 +102,7 @@ cflags-$(CONFIG_M528x) := $(call cc-option,-m528x,-m5307)
cflags-$(CONFIG_M5307) := $(call cc-option,-m5307,-m5200)
cflags-$(CONFIG_M532x) := $(call cc-option,-mcpu=532x,-m5307)
cflags-$(CONFIG_M5407) := $(call cc-option,-m5407,-m5200)
+cflags-$(CONFIG_M548x) := $(call cc-option,-m5407,-m5200)
cflags-$(CONFIG_M68328) := -m68000
cflags-$(CONFIG_M68EZ328) := -m68000
cflags-$(CONFIG_M68VZ328) := -m68000
diff --git a/arch/m68knommu/kernel/.gitignore b/arch/m68knommu/kernel/.gitignore
new file mode 100644
index 000000000000..c5f676c3c224
--- /dev/null
+++ b/arch/m68knommu/kernel/.gitignore
@@ -0,0 +1 @@
+vmlinux.lds
diff --git a/arch/m68knommu/kernel/asm-offsets.c b/arch/m68knommu/kernel/asm-offsets.c
index 24335022fa2c..ffe02f41ad46 100644
--- a/arch/m68knommu/kernel/asm-offsets.c
+++ b/arch/m68knommu/kernel/asm-offsets.c
@@ -21,14 +21,8 @@
int main(void)
{
/* offsets into the task struct */
- DEFINE(TASK_STATE, offsetof(struct task_struct, state));
- DEFINE(TASK_FLAGS, offsetof(struct task_struct, flags));
- DEFINE(TASK_PTRACE, offsetof(struct task_struct, ptrace));
- DEFINE(TASK_BLOCKED, offsetof(struct task_struct, blocked));
DEFINE(TASK_THREAD, offsetof(struct task_struct, thread));
- DEFINE(TASK_THREAD_INFO, offsetof(struct task_struct, stack));
DEFINE(TASK_MM, offsetof(struct task_struct, mm));
- DEFINE(TASK_ACTIVE_MM, offsetof(struct task_struct, active_mm));
/* offsets into the irq_cpustat_t struct */
DEFINE(CPUSTAT_SOFTIRQ_PENDING, offsetof(irq_cpustat_t, __softirq_pending));
@@ -63,7 +57,7 @@ int main(void)
DEFINE(PT_OFF_FORMATVEC, offsetof(struct pt_regs, sr) - 2);
#else
/* bitfields are a bit difficult */
- DEFINE(PT_OFF_VECTOR, offsetof(struct pt_regs, pc) + 4);
+ DEFINE(PT_OFF_FORMATVEC, offsetof(struct pt_regs, pc) + 4);
#endif
/* signal defines */
@@ -75,11 +69,8 @@ int main(void)
DEFINE(PT_PTRACED, PT_PTRACED);
/* Offsets in thread_info structure */
- DEFINE(TI_TASK, offsetof(struct thread_info, task));
- DEFINE(TI_EXECDOMAIN, offsetof(struct thread_info, exec_domain));
DEFINE(TI_FLAGS, offsetof(struct thread_info, flags));
DEFINE(TI_PREEMPTCOUNT, offsetof(struct thread_info, preempt_count));
- DEFINE(TI_CPU, offsetof(struct thread_info, cpu));
return 0;
}
diff --git a/arch/m68knommu/kernel/process.c b/arch/m68knommu/kernel/process.c
index 6d3390590e5b..e2a63af5d517 100644
--- a/arch/m68knommu/kernel/process.c
+++ b/arch/m68knommu/kernel/process.c
@@ -19,7 +19,6 @@
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/smp.h>
-#include <linux/smp_lock.h>
#include <linux/stddef.h>
#include <linux/unistd.h>
#include <linux/ptrace.h>
diff --git a/arch/m68knommu/kernel/ptrace.c b/arch/m68knommu/kernel/ptrace.c
index f6be1248d216..6709fb707335 100644
--- a/arch/m68knommu/kernel/ptrace.c
+++ b/arch/m68knommu/kernel/ptrace.c
@@ -18,6 +18,7 @@
#include <linux/ptrace.h>
#include <linux/user.h>
#include <linux/signal.h>
+#include <linux/tracehook.h>
#include <asm/uaccess.h>
#include <asm/page.h>
@@ -111,9 +112,12 @@ void ptrace_disable(struct task_struct *child)
user_disable_single_step(child);
}
-long arch_ptrace(struct task_struct *child, long request, long addr, long data)
+long arch_ptrace(struct task_struct *child, long request,
+ unsigned long addr, unsigned long data)
{
int ret;
+ int regno = addr >> 2;
+ unsigned long __user *datap = (unsigned long __user *) data;
switch (request) {
/* read the word at location addr in the USER area. */
@@ -121,71 +125,48 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
unsigned long tmp;
ret = -EIO;
- if ((addr & 3) || addr < 0 ||
- addr > sizeof(struct user) - 3)
+ if ((addr & 3) || addr > sizeof(struct user) - 3)
break;
tmp = 0; /* Default return condition */
- addr = addr >> 2; /* temporary hack. */
ret = -EIO;
- if (addr < 19) {
- tmp = get_reg(child, addr);
- if (addr == PT_SR)
+ if (regno < 19) {
+ tmp = get_reg(child, regno);
+ if (regno == PT_SR)
tmp >>= 16;
- } else if (addr >= 21 && addr < 49) {
- tmp = child->thread.fp[addr - 21];
-#ifdef CONFIG_M68KFPU_EMU
- /* Convert internal fpu reg representation
- * into long double format
- */
- if (FPU_IS_EMU && (addr < 45) && !(addr % 3))
- tmp = ((tmp & 0xffff0000) << 15) |
- ((tmp & 0x0000ffff) << 16);
-#endif
- } else if (addr == 49) {
+ } else if (regno >= 21 && regno < 49) {
+ tmp = child->thread.fp[regno - 21];
+ } else if (regno == 49) {
tmp = child->mm->start_code;
- } else if (addr == 50) {
+ } else if (regno == 50) {
tmp = child->mm->start_data;
- } else if (addr == 51) {
+ } else if (regno == 51) {
tmp = child->mm->end_code;
} else
break;
- ret = put_user(tmp,(unsigned long *) data);
+ ret = put_user(tmp, datap);
break;
}
case PTRACE_POKEUSR: /* write the word at location addr in the USER area */
ret = -EIO;
- if ((addr & 3) || addr < 0 ||
- addr > sizeof(struct user) - 3)
+ if ((addr & 3) || addr > sizeof(struct user) - 3)
break;
- addr = addr >> 2; /* temporary hack. */
-
- if (addr == PT_SR) {
+ if (regno == PT_SR) {
data &= SR_MASK;
data <<= 16;
data |= get_reg(child, PT_SR) & ~(SR_MASK << 16);
}
- if (addr < 19) {
- if (put_reg(child, addr, data))
+ if (regno < 19) {
+ if (put_reg(child, regno, data))
break;
ret = 0;
break;
}
- if (addr >= 21 && addr < 48)
+ if (regno >= 21 && regno < 48)
{
-#ifdef CONFIG_M68KFPU_EMU
- /* Convert long double format
- * into internal fpu reg representation
- */
- if (FPU_IS_EMU && (addr < 45) && !(addr % 3)) {
- data = (unsigned long)data << 15;
- data = (data & 0xffff0000) |
- ((data & 0x0000ffff) >> 1);
- }
-#endif
- child->thread.fp[addr - 21] = data;
+ child->thread.fp[regno - 21] = data;
ret = 0;
}
break;
@@ -197,11 +178,11 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
tmp = get_reg(child, i);
if (i == PT_SR)
tmp >>= 16;
- if (put_user(tmp, (unsigned long *) data)) {
+ if (put_user(tmp, datap)) {
ret = -EFAULT;
break;
}
- data += sizeof(long);
+ datap++;
}
ret = 0;
break;
@@ -211,7 +192,7 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
int i;
unsigned long tmp;
for (i = 0; i < 19; i++) {
- if (get_user(tmp, (unsigned long *) data)) {
+ if (get_user(tmp, datap)) {
ret = -EFAULT;
break;
}
@@ -221,7 +202,7 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
tmp |= get_reg(child, PT_SR) & ~(SR_MASK << 16);
}
put_reg(child, i, tmp);
- data += sizeof(long);
+ datap++;
}
ret = 0;
break;
@@ -230,7 +211,7 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
#ifdef PTRACE_GETFPREGS
case PTRACE_GETFPREGS: { /* Get the child FPU state. */
ret = 0;
- if (copy_to_user((void *)data, &child->thread.fp,
+ if (copy_to_user(datap, &child->thread.fp,
sizeof(struct user_m68kfp_struct)))
ret = -EFAULT;
break;
@@ -240,7 +221,7 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
#ifdef PTRACE_SETFPREGS
case PTRACE_SETFPREGS: { /* Set the child FPU state. */
ret = 0;
- if (copy_from_user(&child->thread.fp, (void *)data,
+ if (copy_from_user(&child->thread.fp, datap,
sizeof(struct user_m68kfp_struct)))
ret = -EFAULT;
break;
@@ -248,8 +229,7 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
#endif
case PTRACE_GET_THREAD_AREA:
- ret = put_user(task_thread_info(child)->tp_value,
- (unsigned long __user *)data);
+ ret = put_user(task_thread_info(child)->tp_value, datap);
break;
default:
@@ -259,21 +239,17 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
return ret;
}
-asmlinkage void syscall_trace(void)
+asmlinkage int syscall_trace_enter(void)
{
- if (!test_thread_flag(TIF_SYSCALL_TRACE))
- return;
- if (!(current->ptrace & PT_PTRACED))
- return;
- ptrace_notify(SIGTRAP | ((current->ptrace & PT_TRACESYSGOOD)
- ? 0x80 : 0));
- /*
- * this isn't the same as continuing with a signal, but it will do
- * for normal use. strace only continues with a signal if the
- * stopping signal is not SIGTRAP. -brl
- */
- if (current->exit_code) {
- send_sig(current->exit_code, current, 1);
- current->exit_code = 0;
- }
+ int ret = 0;
+
+ if (test_thread_flag(TIF_SYSCALL_TRACE))
+ ret = tracehook_report_syscall_entry(task_pt_regs(current));
+ return ret;
+}
+
+asmlinkage void syscall_trace_leave(void)
+{
+ if (test_thread_flag(TIF_SYSCALL_TRACE))
+ tracehook_report_syscall_exit(task_pt_regs(current), 0);
}
diff --git a/arch/m68knommu/kernel/setup.c b/arch/m68knommu/kernel/setup.c
index ba92b90d5fbc..c684adf5dc40 100644
--- a/arch/m68knommu/kernel/setup.c
+++ b/arch/m68knommu/kernel/setup.c
@@ -54,9 +54,6 @@ void (*mach_reset)(void);
void (*mach_halt)(void);
void (*mach_power_off)(void);
-#ifdef CONFIG_M68000
- #define CPU "MC68000"
-#endif
#ifdef CONFIG_M68328
#define CPU "MC68328"
#endif
diff --git a/arch/m68knommu/kernel/time.c b/arch/m68knommu/kernel/time.c
index 7089dd9d843b..d6ac2a43453c 100644
--- a/arch/m68knommu/kernel/time.c
+++ b/arch/m68knommu/kernel/time.c
@@ -60,13 +60,16 @@ static unsigned long read_rtc_mmss(void)
{
unsigned int year, mon, day, hour, min, sec;
- if (mach_gettod)
+ if (mach_gettod) {
mach_gettod(&year, &mon, &day, &hour, &min, &sec);
- else
- year = mon = day = hour = min = sec = 0;
+ if ((year += 1900) < 1970)
+ year += 100;
+ } else {
+ year = 1970;
+ mon = day = 1;
+ hour = min = sec = 0;
+ }
- if ((year += 1900) < 1970)
- year += 100;
return mktime(year, mon, day, hour, min, sec);
}
diff --git a/arch/m68knommu/kernel/traps.c b/arch/m68knommu/kernel/traps.c
index 3739c8f657d7..a768008dfd06 100644
--- a/arch/m68knommu/kernel/traps.c
+++ b/arch/m68knommu/kernel/traps.c
@@ -179,14 +179,16 @@ static void __show_stack(struct task_struct *task, unsigned long *stack)
void bad_super_trap(struct frame *fp)
{
+ int vector = (fp->ptregs.vector >> 2) & 0xff;
+
console_verbose();
- if (fp->ptregs.vector < 4 * ARRAY_SIZE(vec_names))
+ if (vector < ARRAY_SIZE(vec_names))
printk (KERN_WARNING "*** %s *** FORMAT=%X\n",
- vec_names[(fp->ptregs.vector) >> 2],
+ vec_names[vector],
fp->ptregs.format);
else
printk (KERN_WARNING "*** Exception %d *** FORMAT=%X\n",
- (fp->ptregs.vector) >> 2,
+ vector,
fp->ptregs.format);
printk (KERN_WARNING "Current process id is %d\n", current->pid);
die_if_kernel("BAD KERNEL TRAP", &fp->ptregs, 0);
@@ -195,10 +197,11 @@ void bad_super_trap(struct frame *fp)
asmlinkage void trap_c(struct frame *fp)
{
int sig;
+ int vector = (fp->ptregs.vector >> 2) & 0xff;
siginfo_t info;
if (fp->ptregs.sr & PS_S) {
- if ((fp->ptregs.vector >> 2) == VEC_TRACE) {
+ if (vector == VEC_TRACE) {
/* traced a trapping instruction */
} else
bad_super_trap(fp);
@@ -206,7 +209,7 @@ asmlinkage void trap_c(struct frame *fp)
}
/* send the appropriate signal to the user program */
- switch ((fp->ptregs.vector) >> 2) {
+ switch (vector) {
case VEC_ADDRERR:
info.si_code = BUS_ADRALN;
sig = SIGBUS;
@@ -360,16 +363,3 @@ void show_stack(struct task_struct *task, unsigned long *stack)
else
__show_stack(task, stack);
}
-
-#ifdef CONFIG_M68KFPU_EMU
-asmlinkage void fpemu_signal(int signal, int code, void *addr)
-{
- siginfo_t info;
-
- info.si_signo = signal;
- info.si_errno = 0;
- info.si_code = code;
- info.si_addr = addr;
- force_sig_info(signal, &info, current);
-}
-#endif
diff --git a/arch/m68knommu/platform/5206/Makefile b/arch/m68knommu/platform/5206/Makefile
index 113c33390064..b5db05625cfa 100644
--- a/arch/m68knommu/platform/5206/Makefile
+++ b/arch/m68knommu/platform/5206/Makefile
@@ -8,8 +8,8 @@
# on the console port whenever a DBG interrupt occurs. You have to
# set up you HW breakpoints to trigger a DBG interrupt:
#
-# EXTRA_CFLAGS += -DTRAP_DBG_INTERRUPT
-# EXTRA_AFLAGS += -DTRAP_DBG_INTERRUPT
+# ccflags-y := -DTRAP_DBG_INTERRUPT
+# asflags-y := -DTRAP_DBG_INTERRUPT
#
asflags-$(CONFIG_FULLDEBUG) := -DDEBUGGER_COMPATIBLE_CACHE=1
diff --git a/arch/m68knommu/platform/5206e/Makefile b/arch/m68knommu/platform/5206e/Makefile
index 113c33390064..b5db05625cfa 100644
--- a/arch/m68knommu/platform/5206e/Makefile
+++ b/arch/m68knommu/platform/5206e/Makefile
@@ -8,8 +8,8 @@
# on the console port whenever a DBG interrupt occurs. You have to
# set up you HW breakpoints to trigger a DBG interrupt:
#
-# EXTRA_CFLAGS += -DTRAP_DBG_INTERRUPT
-# EXTRA_AFLAGS += -DTRAP_DBG_INTERRUPT
+# ccflags-y := -DTRAP_DBG_INTERRUPT
+# asflags-y := -DTRAP_DBG_INTERRUPT
#
asflags-$(CONFIG_FULLDEBUG) := -DDEBUGGER_COMPATIBLE_CACHE=1
diff --git a/arch/m68knommu/platform/520x/Makefile b/arch/m68knommu/platform/520x/Makefile
index 435ab3483dc1..ad3f4e5a57ce 100644
--- a/arch/m68knommu/platform/520x/Makefile
+++ b/arch/m68knommu/platform/520x/Makefile
@@ -8,8 +8,8 @@
# on the console port whenever a DBG interrupt occurs. You have to
# set up you HW breakpoints to trigger a DBG interrupt:
#
-# EXTRA_CFLAGS += -DTRAP_DBG_INTERRUPT
-# EXTRA_AFLAGS += -DTRAP_DBG_INTERRUPT
+# ccflags-y := -DTRAP_DBG_INTERRUPT
+# asflags-y := -DTRAP_DBG_INTERRUPT
#
asflags-$(CONFIG_FULLDEBUG) := -DDEBUGGER_COMPATIBLE_CACHE=1
diff --git a/arch/m68knommu/platform/523x/Makefile b/arch/m68knommu/platform/523x/Makefile
index b8f9b45440c2..c04b8f71c88c 100644
--- a/arch/m68knommu/platform/523x/Makefile
+++ b/arch/m68knommu/platform/523x/Makefile
@@ -8,8 +8,8 @@
# on the console port whenever a DBG interrupt occurs. You have to
# set up you HW breakpoints to trigger a DBG interrupt:
#
-# EXTRA_CFLAGS += -DTRAP_DBG_INTERRUPT
-# EXTRA_AFLAGS += -DTRAP_DBG_INTERRUPT
+# ccflags-y := -DTRAP_DBG_INTERRUPT
+# asflags-y := -DTRAP_DBG_INTERRUPT
#
asflags-$(CONFIG_FULLDEBUG) := -DDEBUGGER_COMPATIBLE_CACHE=1
diff --git a/arch/m68knommu/platform/5249/Makefile b/arch/m68knommu/platform/5249/Makefile
index f56225d1582f..4bed30fd0073 100644
--- a/arch/m68knommu/platform/5249/Makefile
+++ b/arch/m68knommu/platform/5249/Makefile
@@ -8,8 +8,8 @@
# on the console port whenever a DBG interrupt occurs. You have to
# set up you HW breakpoints to trigger a DBG interrupt:
#
-# EXTRA_CFLAGS += -DTRAP_DBG_INTERRUPT
-# EXTRA_AFLAGS += -DTRAP_DBG_INTERRUPT
+# ccflags-y := -DTRAP_DBG_INTERRUPT
+# asflags-y := -DTRAP_DBG_INTERRUPT
#
asflags-$(CONFIG_FULLDEBUG) := -DDEBUGGER_COMPATIBLE_CACHE=1
diff --git a/arch/m68knommu/platform/5272/Makefile b/arch/m68knommu/platform/5272/Makefile
index 93673ef8e2c1..34110fc14301 100644
--- a/arch/m68knommu/platform/5272/Makefile
+++ b/arch/m68knommu/platform/5272/Makefile
@@ -8,8 +8,8 @@
# on the console port whenever a DBG interrupt occurs. You have to
# set up you HW breakpoints to trigger a DBG interrupt:
#
-# EXTRA_CFLAGS += -DTRAP_DBG_INTERRUPT
-# EXTRA_AFLAGS += -DTRAP_DBG_INTERRUPT
+# ccflags-y := -DTRAP_DBG_INTERRUPT
+# asflags-y := -DTRAP_DBG_INTERRUPT
#
asflags-$(CONFIG_FULLDEBUG) := -DDEBUGGER_COMPATIBLE_CACHE=1
diff --git a/arch/m68knommu/platform/5272/config.c b/arch/m68knommu/platform/5272/config.c
index 59278c0887d0..65bb582734e1 100644
--- a/arch/m68knommu/platform/5272/config.c
+++ b/arch/m68knommu/platform/5272/config.c
@@ -13,6 +13,8 @@
#include <linux/param.h>
#include <linux/init.h>
#include <linux/io.h>
+#include <linux/phy.h>
+#include <linux/phy_fixed.h>
#include <asm/machdep.h>
#include <asm/coldfire.h>
#include <asm/mcfsim.h>
@@ -148,9 +150,23 @@ void __init config_BSP(char *commandp, int size)
/***************************************************************************/
+/*
+ * Some 5272 based boards have the FEC ethernet diectly connected to
+ * an ethernet switch. In this case we need to use the fixed phy type,
+ * and we need to declare it early in boot.
+ */
+static struct fixed_phy_status nettel_fixed_phy_status __initdata = {
+ .link = 1,
+ .speed = 100,
+ .duplex = 0,
+};
+
+/***************************************************************************/
+
static int __init init_BSP(void)
{
m5272_uarts_init();
+ fixed_phy_add(PHY_POLL, 0, &nettel_fixed_phy_status);
platform_add_devices(m5272_devices, ARRAY_SIZE(m5272_devices));
return 0;
}
diff --git a/arch/m68knommu/platform/5272/intc.c b/arch/m68knommu/platform/5272/intc.c
index 7081e0a9720e..3cf681c177aa 100644
--- a/arch/m68knommu/platform/5272/intc.c
+++ b/arch/m68knommu/platform/5272/intc.c
@@ -12,6 +12,7 @@
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/interrupt.h>
+#include <linux/kernel_stat.h>
#include <linux/irq.h>
#include <linux/io.h>
#include <asm/coldfire.h>
@@ -29,6 +30,10 @@
* via a set of 4 "Interrupt Controller Registers" (ICR). There is a
* loose mapping of vector number to register and internal bits, but
* a table is the easiest and quickest way to map them.
+ *
+ * Note that the external interrupts are edge triggered (unlike the
+ * internal interrupt sources which are level triggered). Which means
+ * they also need acknowledgeing via acknowledge bits.
*/
struct irqmap {
unsigned char icr;
@@ -68,6 +73,11 @@ static struct irqmap intc_irqmap[MCFINT_VECMAX - MCFINT_VECBASE] = {
/*MCF_IRQ_SWTO*/ { .icr = MCFSIM_ICR4, .index = 16, .ack = 0, },
};
+/*
+ * The act of masking the interrupt also has a side effect of 'ack'ing
+ * an interrupt on this irq (for the external irqs). So this mask function
+ * is also an ack_mask function.
+ */
static void intc_irq_mask(unsigned int irq)
{
if ((irq >= MCFINT_VECBASE) && (irq <= MCFINT_VECMAX)) {
@@ -95,7 +105,9 @@ static void intc_irq_ack(unsigned int irq)
irq -= MCFINT_VECBASE;
if (intc_irqmap[irq].ack) {
u32 v;
- v = 0xd << intc_irqmap[irq].index;
+ v = readl(MCF_MBAR + intc_irqmap[irq].icr);
+ v &= (0x7 << intc_irqmap[irq].index);
+ v |= (0x8 << intc_irqmap[irq].index);
writel(v, MCF_MBAR + intc_irqmap[irq].icr);
}
}
@@ -103,21 +115,47 @@ static void intc_irq_ack(unsigned int irq)
static int intc_irq_set_type(unsigned int irq, unsigned int type)
{
- /* We can set the edge type here for external interrupts */
+ if ((irq >= MCFINT_VECBASE) && (irq <= MCFINT_VECMAX)) {
+ irq -= MCFINT_VECBASE;
+ if (intc_irqmap[irq].ack) {
+ u32 v;
+ v = readl(MCF_MBAR + MCFSIM_PITR);
+ if (type == IRQ_TYPE_EDGE_FALLING)
+ v &= ~(0x1 << (32 - irq));
+ else
+ v |= (0x1 << (32 - irq));
+ writel(v, MCF_MBAR + MCFSIM_PITR);
+ }
+ }
return 0;
}
+/*
+ * Simple flow handler to deal with the external edge triggered interrupts.
+ * We need to be careful with the masking/acking due to the side effects
+ * of masking an interrupt.
+ */
+static void intc_external_irq(unsigned int irq, struct irq_desc *desc)
+{
+ kstat_incr_irqs_this_cpu(irq, desc);
+ desc->status |= IRQ_INPROGRESS;
+ desc->chip->ack(irq);
+ handle_IRQ_event(irq, desc->action);
+ desc->status &= ~IRQ_INPROGRESS;
+}
+
static struct irq_chip intc_irq_chip = {
.name = "CF-INTC",
.mask = intc_irq_mask,
.unmask = intc_irq_unmask,
+ .mask_ack = intc_irq_mask,
.ack = intc_irq_ack,
.set_type = intc_irq_set_type,
};
void __init init_IRQ(void)
{
- int irq;
+ int irq, edge;
init_vectors();
@@ -128,11 +166,17 @@ void __init init_IRQ(void)
writel(0x88888888, MCF_MBAR + MCFSIM_ICR4);
for (irq = 0; (irq < NR_IRQS); irq++) {
- irq_desc[irq].status = IRQ_DISABLED;
- irq_desc[irq].action = NULL;
- irq_desc[irq].depth = 1;
- irq_desc[irq].chip = &intc_irq_chip;
- intc_irq_set_type(irq, 0);
+ set_irq_chip(irq, &intc_irq_chip);
+ edge = 0;
+ if ((irq >= MCFINT_VECBASE) && (irq <= MCFINT_VECMAX))
+ edge = intc_irqmap[irq - MCFINT_VECBASE].ack;
+ if (edge) {
+ set_irq_type(irq, IRQ_TYPE_EDGE_RISING);
+ set_irq_handler(irq, intc_external_irq);
+ } else {
+ set_irq_type(irq, IRQ_TYPE_LEVEL_HIGH);
+ set_irq_handler(irq, handle_level_irq);
+ }
}
}
diff --git a/arch/m68knommu/platform/527x/Makefile b/arch/m68knommu/platform/527x/Makefile
index 3d90e6d92459..6ac4b57370ea 100644
--- a/arch/m68knommu/platform/527x/Makefile
+++ b/arch/m68knommu/platform/527x/Makefile
@@ -8,8 +8,8 @@
# on the console port whenever a DBG interrupt occurs. You have to
# set up you HW breakpoints to trigger a DBG interrupt:
#
-# EXTRA_CFLAGS += -DTRAP_DBG_INTERRUPT
-# EXTRA_AFLAGS += -DTRAP_DBG_INTERRUPT
+# ccflags-y := -DTRAP_DBG_INTERRUPT
+# asflags-y := -DTRAP_DBG_INTERRUPT
#
asflags-$(CONFIG_FULLDEBUG) := -DDEBUGGER_COMPATIBLE_CACHE=1
diff --git a/arch/m68knommu/platform/528x/Makefile b/arch/m68knommu/platform/528x/Makefile
index 3d90e6d92459..6ac4b57370ea 100644
--- a/arch/m68knommu/platform/528x/Makefile
+++ b/arch/m68knommu/platform/528x/Makefile
@@ -8,8 +8,8 @@
# on the console port whenever a DBG interrupt occurs. You have to
# set up you HW breakpoints to trigger a DBG interrupt:
#
-# EXTRA_CFLAGS += -DTRAP_DBG_INTERRUPT
-# EXTRA_AFLAGS += -DTRAP_DBG_INTERRUPT
+# ccflags-y := -DTRAP_DBG_INTERRUPT
+# asflags-y := -DTRAP_DBG_INTERRUPT
#
asflags-$(CONFIG_FULLDEBUG) := -DDEBUGGER_COMPATIBLE_CACHE=1
diff --git a/arch/m68knommu/platform/5307/Makefile b/arch/m68knommu/platform/5307/Makefile
index 6de526976828..d4293b791f2e 100644
--- a/arch/m68knommu/platform/5307/Makefile
+++ b/arch/m68knommu/platform/5307/Makefile
@@ -8,8 +8,8 @@
# on the console port whenever a DBG interrupt occurs. You have to
# set up you HW breakpoints to trigger a DBG interrupt:
#
-# EXTRA_CFLAGS += -DTRAP_DBG_INTERRUPT
-# EXTRA_AFLAGS += -DTRAP_DBG_INTERRUPT
+# ccflags-y := -DTRAP_DBG_INTERRUPT
+# asflags-y := -DTRAP_DBG_INTERRUPT
#
asflags-$(CONFIG_FULLDEBUG) := -DDEBUGGER_COMPATIBLE_CACHE=1
diff --git a/arch/m68knommu/platform/532x/Makefile b/arch/m68knommu/platform/532x/Makefile
index 4cc23245bcd1..ce01669399c6 100644
--- a/arch/m68knommu/platform/532x/Makefile
+++ b/arch/m68knommu/platform/532x/Makefile
@@ -8,8 +8,8 @@
# on the console port whenever a DBG interrupt occurs. You have to
# set up you HW breakpoints to trigger a DBG interrupt:
#
-# EXTRA_CFLAGS += -DTRAP_DBG_INTERRUPT
-# EXTRA_AFLAGS += -DTRAP_DBG_INTERRUPT
+# ccflags-y := -DTRAP_DBG_INTERRUPT
+# asflags-y := -DTRAP_DBG_INTERRUPT
#
asflags-$(CONFIG_FULLDEBUG) := -DDEBUGGER_COMPATIBLE_CACHE=1
diff --git a/arch/m68knommu/platform/5407/Makefile b/arch/m68knommu/platform/5407/Makefile
index dee62c5dbaa6..e83fe148eddc 100644
--- a/arch/m68knommu/platform/5407/Makefile
+++ b/arch/m68knommu/platform/5407/Makefile
@@ -8,8 +8,8 @@
# on the console port whenever a DBG interrupt occurs. You have to
# set up you HW breakpoints to trigger a DBG interrupt:
#
-# EXTRA_CFLAGS += -DTRAP_DBG_INTERRUPT
-# EXTRA_AFLAGS += -DTRAP_DBG_INTERRUPT
+# ccflags-y := -DTRAP_DBG_INTERRUPT
+# asflags-y := -DTRAP_DBG_INTERRUPT
#
asflags-$(CONFIG_FULLDEBUG) := -DDEBUGGER_COMPATIBLE_CACHE=1
diff --git a/arch/m68knommu/platform/548x/Makefile b/arch/m68knommu/platform/548x/Makefile
new file mode 100644
index 000000000000..e6035e7a2d3f
--- /dev/null
+++ b/arch/m68knommu/platform/548x/Makefile
@@ -0,0 +1,18 @@
+#
+# Makefile for the m68knommu linux kernel.
+#
+
+#
+# If you want to play with the HW breakpoints then you will
+# need to add define this, which will give you a stack backtrace
+# on the console port whenever a DBG interrupt occurs. You have to
+# set up you HW breakpoints to trigger a DBG interrupt:
+#
+# EXTRA_CFLAGS += -DTRAP_DBG_INTERRUPT
+# EXTRA_AFLAGS += -DTRAP_DBG_INTERRUPT
+#
+
+asflags-$(CONFIG_FULLDEBUG) := -DDEBUGGER_COMPATIBLE_CACHE=1
+
+obj-y := config.o
+
diff --git a/arch/m68knommu/platform/548x/config.c b/arch/m68knommu/platform/548x/config.c
new file mode 100644
index 000000000000..9888846bd1cf
--- /dev/null
+++ b/arch/m68knommu/platform/548x/config.c
@@ -0,0 +1,115 @@
+/***************************************************************************/
+
+/*
+ * linux/arch/m68knommu/platform/548x/config.c
+ *
+ * Copyright (C) 2010, Philippe De Muyter <phdm@macqel.be>
+ */
+
+/***************************************************************************/
+
+#include <linux/kernel.h>
+#include <linux/param.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <asm/machdep.h>
+#include <asm/coldfire.h>
+#include <asm/m548xsim.h>
+#include <asm/mcfuart.h>
+#include <asm/m548xgpt.h>
+
+/***************************************************************************/
+
+static struct mcf_platform_uart m548x_uart_platform[] = {
+ {
+ .mapbase = MCF_MBAR + MCFUART_BASE1,
+ .irq = 64 + 35,
+ },
+ {
+ .mapbase = MCF_MBAR + MCFUART_BASE2,
+ .irq = 64 + 34,
+ },
+ {
+ .mapbase = MCF_MBAR + MCFUART_BASE3,
+ .irq = 64 + 33,
+ },
+ {
+ .mapbase = MCF_MBAR + MCFUART_BASE4,
+ .irq = 64 + 32,
+ },
+};
+
+static struct platform_device m548x_uart = {
+ .name = "mcfuart",
+ .id = 0,
+ .dev.platform_data = m548x_uart_platform,
+};
+
+static struct platform_device *m548x_devices[] __initdata = {
+ &m548x_uart,
+};
+
+
+/***************************************************************************/
+
+static void __init m548x_uart_init_line(int line, int irq)
+{
+ int rts_cts;
+
+ /* enable io pins */
+ switch (line) {
+ case 0:
+ rts_cts = 0; break;
+ case 1:
+ rts_cts = MCF_PAR_PSC_RTS_RTS; break;
+ case 2:
+ rts_cts = MCF_PAR_PSC_RTS_RTS | MCF_PAR_PSC_CTS_CTS; break;
+ case 3:
+ rts_cts = 0; break;
+ }
+ __raw_writeb(MCF_PAR_PSC_TXD | rts_cts | MCF_PAR_PSC_RXD,
+ MCF_MBAR + MCF_PAR_PSC(line));
+}
+
+static void __init m548x_uarts_init(void)
+{
+ const int nrlines = ARRAY_SIZE(m548x_uart_platform);
+ int line;
+
+ for (line = 0; (line < nrlines); line++)
+ m548x_uart_init_line(line, m548x_uart_platform[line].irq);
+}
+
+/***************************************************************************/
+
+static void mcf548x_reset(void)
+{
+ /* disable interrupts and enable the watchdog */
+ asm("movew #0x2700, %sr\n");
+ __raw_writel(0, MCF_MBAR + MCF_GPT_GMS0);
+ __raw_writel(MCF_GPT_GCIR_CNT(1), MCF_MBAR + MCF_GPT_GCIR0);
+ __raw_writel(MCF_GPT_GMS_WDEN | MCF_GPT_GMS_CE | MCF_GPT_GMS_TMS(4),
+ MCF_MBAR + MCF_GPT_GMS0);
+}
+
+/***************************************************************************/
+
+void __init config_BSP(char *commandp, int size)
+{
+ mach_reset = mcf548x_reset;
+ m548x_uarts_init();
+}
+
+/***************************************************************************/
+
+static int __init init_BSP(void)
+{
+
+ platform_add_devices(m548x_devices, ARRAY_SIZE(m548x_devices));
+ return 0;
+}
+
+arch_initcall(init_BSP);
+
+/***************************************************************************/
diff --git a/arch/m68knommu/platform/68328/entry.S b/arch/m68knommu/platform/68328/entry.S
index 9d80d2c42866..27241e16a526 100644
--- a/arch/m68knommu/platform/68328/entry.S
+++ b/arch/m68knommu/platform/68328/entry.S
@@ -43,10 +43,10 @@ badsys:
jra ret_from_exception
do_trace:
- movel #-ENOSYS,%sp@(PT_OFF_D0) /* needed for strace*/
+ movel #-ENOSYS,%sp@(PT_OFF_D0) /* needed for strace*/
subql #4,%sp
SAVE_SWITCH_STACK
- jbsr syscall_trace
+ jbsr syscall_trace_enter
RESTORE_SWITCH_STACK
addql #4,%sp
movel %sp@(PT_OFF_ORIG_D0),%d1
@@ -57,10 +57,10 @@ do_trace:
lea sys_call_table, %a0
jbsr %a0@(%d1)
-1: movel %d0,%sp@(PT_OFF_D0) /* save the return value */
+1: movel %d0,%sp@(PT_OFF_D0) /* save the return value */
subql #4,%sp /* dummy return address */
SAVE_SWITCH_STACK
- jbsr syscall_trace
+ jbsr syscall_trace_leave
ret_from_signal:
RESTORE_SWITCH_STACK
@@ -71,16 +71,16 @@ ENTRY(system_call)
SAVE_ALL
/* save top of frame*/
- pea %sp@
- jbsr set_esp0
- addql #4,%sp
+ pea %sp@
+ jbsr set_esp0
+ addql #4,%sp
movel %sp@(PT_OFF_ORIG_D0),%d0
movel %sp,%d1 /* get thread_info pointer */
andl #-THREAD_SIZE,%d1
movel %d1,%a2
- btst #TIF_SYSCALL_TRACE,%a2@(TI_FLAGS)
+ btst #(TIF_SYSCALL_TRACE%8),%a2@(TI_FLAGS+(31-TIF_SYSCALL_TRACE)/8)
jne do_trace
cmpl #NR_syscalls,%d0
jcc badsys
@@ -88,10 +88,10 @@ ENTRY(system_call)
lea sys_call_table,%a0
movel %a0@(%d0), %a0
jbsr %a0@
- movel %d0,%sp@(PT_OFF_D0) /* save the return value*/
+ movel %d0,%sp@(PT_OFF_D0) /* save the return value*/
ret_from_exception:
- btst #5,%sp@(PT_OFF_SR) /* check if returning to kernel*/
+ btst #5,%sp@(PT_OFF_SR) /* check if returning to kernel*/
jeq Luser_return /* if so, skip resched, signals*/
Lkernel_return:
@@ -133,7 +133,7 @@ Lreturn:
*/
inthandler1:
SAVE_ALL
- movew %sp@(PT_OFF_VECTOR), %d0
+ movew %sp@(PT_OFF_FORMATVEC), %d0
and #0x3ff, %d0
movel %sp,%sp@-
@@ -144,7 +144,7 @@ inthandler1:
inthandler2:
SAVE_ALL
- movew %sp@(PT_OFF_VECTOR), %d0
+ movew %sp@(PT_OFF_FORMATVEC), %d0
and #0x3ff, %d0
movel %sp,%sp@-
@@ -155,7 +155,7 @@ inthandler2:
inthandler3:
SAVE_ALL
- movew %sp@(PT_OFF_VECTOR), %d0
+ movew %sp@(PT_OFF_FORMATVEC), %d0
and #0x3ff, %d0
movel %sp,%sp@-
@@ -166,7 +166,7 @@ inthandler3:
inthandler4:
SAVE_ALL
- movew %sp@(PT_OFF_VECTOR), %d0
+ movew %sp@(PT_OFF_FORMATVEC), %d0
and #0x3ff, %d0
movel %sp,%sp@-
@@ -177,7 +177,7 @@ inthandler4:
inthandler5:
SAVE_ALL
- movew %sp@(PT_OFF_VECTOR), %d0
+ movew %sp@(PT_OFF_FORMATVEC), %d0
and #0x3ff, %d0
movel %sp,%sp@-
@@ -188,7 +188,7 @@ inthandler5:
inthandler6:
SAVE_ALL
- movew %sp@(PT_OFF_VECTOR), %d0
+ movew %sp@(PT_OFF_FORMATVEC), %d0
and #0x3ff, %d0
movel %sp,%sp@-
@@ -199,7 +199,7 @@ inthandler6:
inthandler7:
SAVE_ALL
- movew %sp@(PT_OFF_VECTOR), %d0
+ movew %sp@(PT_OFF_FORMATVEC), %d0
and #0x3ff, %d0
movel %sp,%sp@-
@@ -210,7 +210,7 @@ inthandler7:
inthandler:
SAVE_ALL
- movew %sp@(PT_OFF_VECTOR), %d0
+ movew %sp@(PT_OFF_FORMATVEC), %d0
and #0x3ff, %d0
movel %sp,%sp@-
diff --git a/arch/m68knommu/platform/68328/head-de2.S b/arch/m68knommu/platform/68328/head-de2.S
index 92d96456d363..f632fdcb93e9 100644
--- a/arch/m68knommu/platform/68328/head-de2.S
+++ b/arch/m68knommu/platform/68328/head-de2.S
@@ -1,11 +1,5 @@
-#if defined(CONFIG_RAM32MB)
-#define MEM_END 0x02000000 /* Memory size 32Mb */
-#elif defined(CONFIG_RAM16MB)
-#define MEM_END 0x01000000 /* Memory size 16Mb */
-#else
#define MEM_END 0x00800000 /* Memory size 8Mb */
-#endif
#undef CRT_DEBUG
diff --git a/arch/m68knommu/platform/68328/head-ram.S b/arch/m68knommu/platform/68328/head-ram.S
index 252b80b02038..7f1aeeacb219 100644
--- a/arch/m68knommu/platform/68328/head-ram.S
+++ b/arch/m68knommu/platform/68328/head-ram.S
@@ -67,33 +67,6 @@ pclp1:
beq pclp1
#endif /* DEBUG */
-#ifdef CONFIG_RELOCATE
- /* Copy me to RAM */
- moveal #__rom_start, %a0
- moveal #_stext, %a1
- moveal #_edata, %a2
-
- /* Copy %a0 to %a1 until %a1 == %a2 */
-LD1:
- movel %a0@+, %d0
- movel %d0, %a1@+
- cmpal %a1, %a2
- bhi LD1
-
-#ifdef DEBUG
- moveq #74, %d7 /* 'J' */
- moveb %d7,0xfffff907 /* No absolute addresses */
-pclp2:
- movew 0xfffff906, %d7
- andw #0x2000, %d7
- beq pclp2
-#endif /* DEBUG */
- /* jump into the RAM copy */
- jmp ram_jump
-ram_jump:
-
-#endif /* CONFIG_RELOCATE */
-
#ifdef DEBUG
moveq #82, %d7 /* 'R' */
moveb %d7,0xfffff907 /* No absolute addresses */
diff --git a/arch/m68knommu/platform/68328/ints.c b/arch/m68knommu/platform/68328/ints.c
index b91ee85d4b5d..865852806a17 100644
--- a/arch/m68knommu/platform/68328/ints.c
+++ b/arch/m68knommu/platform/68328/ints.c
@@ -179,10 +179,8 @@ void __init init_IRQ(void)
IMR = ~0;
for (i = 0; (i < NR_IRQS); i++) {
- irq_desc[i].status = IRQ_DISABLED;
- irq_desc[i].action = NULL;
- irq_desc[i].depth = 1;
- irq_desc[i].chip = &intc_irq_chip;
+ set_irq_chip(irq, &intc_irq_chip);
+ set_irq_handler(irq, handle_level_irq);
}
}
diff --git a/arch/m68knommu/platform/68360/entry.S b/arch/m68knommu/platform/68360/entry.S
index 6d3460a39cac..c131c6e1d92d 100644
--- a/arch/m68knommu/platform/68360/entry.S
+++ b/arch/m68knommu/platform/68360/entry.S
@@ -42,7 +42,7 @@ do_trace:
movel #-ENOSYS,%sp@(PT_OFF_D0) /* needed for strace*/
subql #4,%sp
SAVE_SWITCH_STACK
- jbsr syscall_trace
+ jbsr syscall_trace_enter
RESTORE_SWITCH_STACK
addql #4,%sp
movel %sp@(PT_OFF_ORIG_D0),%d1
@@ -56,7 +56,7 @@ do_trace:
1: movel %d0,%sp@(PT_OFF_D0) /* save the return value */
subql #4,%sp /* dummy return address */
SAVE_SWITCH_STACK
- jbsr syscall_trace
+ jbsr syscall_trace_leave
ret_from_signal:
RESTORE_SWITCH_STACK
@@ -71,7 +71,12 @@ ENTRY(system_call)
jbsr set_esp0
addql #4,%sp
- btst #PF_TRACESYS_BIT,%a2@(TASK_FLAGS+PF_TRACESYS_OFF)
+ movel %sp@(PT_OFF_ORIG_D0),%d0
+
+ movel %sp,%d1 /* get thread_info pointer */
+ andl #-THREAD_SIZE,%d1
+ movel %d1,%a2
+ btst #(TIF_SYSCALL_TRACE%8),%a2@(TI_FLAGS+(31-TIF_SYSCALL_TRACE)/8)
jne do_trace
cmpl #NR_syscalls,%d0
jcc badsys
@@ -124,7 +129,7 @@ Lreturn:
*/
inthandler:
SAVE_ALL
- movew %sp@(PT_OFF_VECTOR), %d0
+ movew %sp@(PT_OFF_FORMATVEC), %d0
and.l #0x3ff, %d0
lsr.l #0x02, %d0
diff --git a/arch/m68knommu/platform/68360/ints.c b/arch/m68knommu/platform/68360/ints.c
index 6f22970d8c20..ad96ab1051f0 100644
--- a/arch/m68knommu/platform/68360/ints.c
+++ b/arch/m68knommu/platform/68360/ints.c
@@ -132,10 +132,8 @@ void init_IRQ(void)
pquicc->intr_cimr = 0x00000000;
for (i = 0; (i < NR_IRQS); i++) {
- irq_desc[i].status = IRQ_DISABLED;
- irq_desc[i].action = NULL;
- irq_desc[i].depth = 1;
- irq_desc[i].chip = &intc_irq_chip;
+ set_irq_chip(irq, &intc_irq_chip);
+ set_irq_handler(irq, handle_level_irq);
}
}
diff --git a/arch/m68knommu/platform/68VZ328/config.c b/arch/m68knommu/platform/68VZ328/config.c
index fc5c63054e98..eabaabe8af36 100644
--- a/arch/m68knommu/platform/68VZ328/config.c
+++ b/arch/m68knommu/platform/68VZ328/config.c
@@ -90,11 +90,6 @@ static void init_hardware(char *command, int size)
PDIQEG &= ~PD(1);
PDIRQEN |= PD(1); /* IRQ enabled */
-#ifdef CONFIG_68328_SERIAL_UART2
- /* Enable RXD TXD port bits to enable UART2 */
- PJSEL &= ~(PJ(5) | PJ(4));
-#endif
-
#ifdef CONFIG_INIT_LCD
/* initialize LCD controller */
LSSA = (long) screen_bits;
diff --git a/arch/m68knommu/platform/coldfire/Makefile b/arch/m68knommu/platform/coldfire/Makefile
index f72a0e5d9996..45f501fa4525 100644
--- a/arch/m68knommu/platform/coldfire/Makefile
+++ b/arch/m68knommu/platform/coldfire/Makefile
@@ -8,8 +8,8 @@
# on the console port whenever a DBG interrupt occurs. You have to
# set up you HW breakpoints to trigger a DBG interrupt:
#
-# EXTRA_CFLAGS += -DTRAP_DBG_INTERRUPT
-# EXTRA_AFLAGS += -DTRAP_DBG_INTERRUPT
+# ccflags-y := -DTRAP_DBG_INTERRUPT
+# asflags-y := -DTRAP_DBG_INTERRUPT
#
asflags-$(CONFIG_FULLDEBUG) := -DDEBUGGER_COMPATIBLE_CACHE=1
@@ -26,6 +26,7 @@ obj-$(CONFIG_M528x) += pit.o intc-2.o
obj-$(CONFIG_M5307) += timers.o intc.o
obj-$(CONFIG_M532x) += timers.o intc-simr.o
obj-$(CONFIG_M5407) += timers.o intc.o
+obj-$(CONFIG_M548x) += sltimers.o intc-2.o
obj-y += pinmux.o gpio.o
extra-y := head.o
diff --git a/arch/m68knommu/platform/coldfire/entry.S b/arch/m68knommu/platform/coldfire/entry.S
index cd79d7e92ce6..5e92bed94b7e 100644
--- a/arch/m68knommu/platform/coldfire/entry.S
+++ b/arch/m68knommu/platform/coldfire/entry.S
@@ -88,7 +88,7 @@ ENTRY(system_call)
movel %d2,PT_OFF_D0(%sp) /* on syscall entry */
subql #4,%sp
SAVE_SWITCH_STACK
- jbsr syscall_trace
+ jbsr syscall_trace_enter
RESTORE_SWITCH_STACK
addql #4,%sp
movel %d3,%a0
@@ -96,7 +96,7 @@ ENTRY(system_call)
movel %d0,%sp@(PT_OFF_D0) /* save the return value */
subql #4,%sp /* dummy return address */
SAVE_SWITCH_STACK
- jbsr syscall_trace
+ jbsr syscall_trace_leave
ret_from_signal:
RESTORE_SWITCH_STACK
diff --git a/arch/m68knommu/platform/coldfire/intc-2.c b/arch/m68knommu/platform/coldfire/intc-2.c
index 5598c8b8661f..85daa2b3001a 100644
--- a/arch/m68knommu/platform/coldfire/intc-2.c
+++ b/arch/m68knommu/platform/coldfire/intc-2.c
@@ -1,5 +1,11 @@
/*
- * intc-1.c
+ * intc-2.c
+ *
+ * General interrupt controller code for the many ColdFire cores that use
+ * interrupt controllers with 63 interrupt sources, organized as 56 fully-
+ * programmable + 7 fixed-level interrupt sources. This includes the 523x
+ * family, the 5270, 5271, 5274, 5275, and the 528x family which have two such
+ * controllers, and the 547x and 548x families which have only one of them.
*
* (C) Copyright 2009, Greg Ungerer <gerg@snapgear.com>
*
@@ -19,21 +25,37 @@
#include <asm/traps.h>
/*
- * Each vector needs a unique priority and level asscoiated with it.
+ * Bit definitions for the ICR family of registers.
+ */
+#define MCFSIM_ICR_LEVEL(l) ((l)<<3) /* Level l intr */
+#define MCFSIM_ICR_PRI(p) (p) /* Priority p intr */
+
+/*
+ * Each vector needs a unique priority and level associated with it.
* We don't really care so much what they are, we don't rely on the
- * tranditional priority interrupt scheme of the m68k/ColdFire.
+ * traditional priority interrupt scheme of the m68k/ColdFire.
*/
-static u8 intc_intpri = 0x36;
+static u8 intc_intpri = MCFSIM_ICR_LEVEL(6) | MCFSIM_ICR_PRI(6);
+
+#ifdef MCFICM_INTC1
+#define NR_VECS 128
+#else
+#define NR_VECS 64
+#endif
static void intc_irq_mask(unsigned int irq)
{
- if ((irq >= MCFINT_VECBASE) && (irq <= MCFINT_VECBASE + 128)) {
+ if ((irq >= MCFINT_VECBASE) && (irq <= MCFINT_VECBASE + NR_VECS)) {
unsigned long imraddr;
u32 val, imrbit;
irq -= MCFINT_VECBASE;
imraddr = MCF_IPSBAR;
+#ifdef MCFICM_INTC1
imraddr += (irq & 0x40) ? MCFICM_INTC1 : MCFICM_INTC0;
+#else
+ imraddr += MCFICM_INTC0;
+#endif
imraddr += (irq & 0x20) ? MCFINTC_IMRH : MCFINTC_IMRL;
imrbit = 0x1 << (irq & 0x1f);
@@ -44,13 +66,17 @@ static void intc_irq_mask(unsigned int irq)
static void intc_irq_unmask(unsigned int irq)
{
- if ((irq >= MCFINT_VECBASE) && (irq <= MCFINT_VECBASE + 128)) {
+ if ((irq >= MCFINT_VECBASE) && (irq <= MCFINT_VECBASE + NR_VECS)) {
unsigned long intaddr, imraddr, icraddr;
u32 val, imrbit;
irq -= MCFINT_VECBASE;
intaddr = MCF_IPSBAR;
+#ifdef MCFICM_INTC1
intaddr += (irq & 0x40) ? MCFICM_INTC1 : MCFICM_INTC0;
+#else
+ intaddr += MCFICM_INTC0;
+#endif
imraddr = intaddr + ((irq & 0x20) ? MCFINTC_IMRH : MCFINTC_IMRL);
icraddr = intaddr + MCFINTC_ICR0 + (irq & 0x3f);
imrbit = 0x1 << (irq & 0x1f);
@@ -67,10 +93,16 @@ static void intc_irq_unmask(unsigned int irq)
}
}
+static int intc_irq_set_type(unsigned int irq, unsigned int type)
+{
+ return 0;
+}
+
static struct irq_chip intc_irq_chip = {
.name = "CF-INTC",
.mask = intc_irq_mask,
.unmask = intc_irq_unmask,
+ .set_type = intc_irq_set_type,
};
void __init init_IRQ(void)
@@ -81,13 +113,14 @@ void __init init_IRQ(void)
/* Mask all interrupt sources */
__raw_writel(0x1, MCF_IPSBAR + MCFICM_INTC0 + MCFINTC_IMRL);
+#ifdef MCFICM_INTC1
__raw_writel(0x1, MCF_IPSBAR + MCFICM_INTC1 + MCFINTC_IMRL);
+#endif
for (irq = 0; (irq < NR_IRQS); irq++) {
- irq_desc[irq].status = IRQ_DISABLED;
- irq_desc[irq].action = NULL;
- irq_desc[irq].depth = 1;
- irq_desc[irq].chip = &intc_irq_chip;
+ set_irq_chip(irq, &intc_irq_chip);
+ set_irq_type(irq, IRQ_TYPE_LEVEL_HIGH);
+ set_irq_handler(irq, handle_level_irq);
}
}
diff --git a/arch/m68knommu/platform/coldfire/intc-simr.c b/arch/m68knommu/platform/coldfire/intc-simr.c
index 1b01e79c2f63..bb7048636140 100644
--- a/arch/m68knommu/platform/coldfire/intc-simr.c
+++ b/arch/m68knommu/platform/coldfire/intc-simr.c
@@ -1,6 +1,8 @@
/*
* intc-simr.c
*
+ * Interrupt controller code for the ColdFire 5208, 5207 & 532x parts.
+ *
* (C) Copyright 2009, Greg Ungerer <gerg@snapgear.com>
*
* This file is subject to the terms and conditions of the GNU General Public
@@ -68,11 +70,9 @@ void __init init_IRQ(void)
__raw_writeb(0xff, MCFINTC1_SIMR);
for (irq = 0; (irq < NR_IRQS); irq++) {
- irq_desc[irq].status = IRQ_DISABLED;
- irq_desc[irq].action = NULL;
- irq_desc[irq].depth = 1;
- irq_desc[irq].chip = &intc_irq_chip;
- intc_irq_set_type(irq, 0);
+ set_irq_chip(irq, &intc_irq_chip);
+ set_irq_type(irq, IRQ_TYPE_LEVEL_HIGH);
+ set_irq_handler(irq, handle_level_irq);
}
}
diff --git a/arch/m68knommu/platform/coldfire/intc.c b/arch/m68knommu/platform/coldfire/intc.c
index a4560c86db71..60d2fcbe182b 100644
--- a/arch/m68knommu/platform/coldfire/intc.c
+++ b/arch/m68knommu/platform/coldfire/intc.c
@@ -143,11 +143,9 @@ void __init init_IRQ(void)
mcf_maskimr(0xffffffff);
for (irq = 0; (irq < NR_IRQS); irq++) {
- irq_desc[irq].status = IRQ_DISABLED;
- irq_desc[irq].action = NULL;
- irq_desc[irq].depth = 1;
- irq_desc[irq].chip = &intc_irq_chip;
- intc_irq_set_type(irq, 0);
+ set_irq_chip(irq, &intc_irq_chip);
+ set_irq_type(irq, IRQ_TYPE_LEVEL_HIGH);
+ set_irq_handler(irq, handle_level_irq);
}
}
diff --git a/arch/m68knommu/platform/coldfire/sltimers.c b/arch/m68knommu/platform/coldfire/sltimers.c
new file mode 100644
index 000000000000..0a1b937c3e18
--- /dev/null
+++ b/arch/m68knommu/platform/coldfire/sltimers.c
@@ -0,0 +1,145 @@
+/***************************************************************************/
+
+/*
+ * sltimers.c -- generic ColdFire slice timer support.
+ *
+ * Copyright (C) 2009-2010, Philippe De Muyter <phdm@macqel.be>
+ * based on
+ * timers.c -- generic ColdFire hardware timer support.
+ * Copyright (C) 1999-2008, Greg Ungerer <gerg@snapgear.com>
+ */
+
+/***************************************************************************/
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/sched.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/profile.h>
+#include <linux/clocksource.h>
+#include <asm/io.h>
+#include <asm/traps.h>
+#include <asm/machdep.h>
+#include <asm/coldfire.h>
+#include <asm/mcfslt.h>
+#include <asm/mcfsim.h>
+
+/***************************************************************************/
+
+#ifdef CONFIG_HIGHPROFILE
+
+/*
+ * By default use Slice Timer 1 as the profiler clock timer.
+ */
+#define PA(a) (MCF_MBAR + MCFSLT_TIMER1 + (a))
+
+/*
+ * Choose a reasonably fast profile timer. Make it an odd value to
+ * try and get good coverage of kernel operations.
+ */
+#define PROFILEHZ 1013
+
+irqreturn_t mcfslt_profile_tick(int irq, void *dummy)
+{
+ /* Reset Slice Timer 1 */
+ __raw_writel(MCFSLT_SSR_BE | MCFSLT_SSR_TE, PA(MCFSLT_SSR));
+ if (current->pid)
+ profile_tick(CPU_PROFILING);
+ return IRQ_HANDLED;
+}
+
+static struct irqaction mcfslt_profile_irq = {
+ .name = "profile timer",
+ .flags = IRQF_DISABLED | IRQF_TIMER,
+ .handler = mcfslt_profile_tick,
+};
+
+void mcfslt_profile_init(void)
+{
+ printk(KERN_INFO "PROFILE: lodging TIMER 1 @ %dHz as profile timer\n",
+ PROFILEHZ);
+
+ setup_irq(MCF_IRQ_PROFILER, &mcfslt_profile_irq);
+
+ /* Set up TIMER 2 as high speed profile clock */
+ __raw_writel(MCF_BUSCLK / PROFILEHZ - 1, PA(MCFSLT_STCNT));
+ __raw_writel(MCFSLT_SCR_RUN | MCFSLT_SCR_IEN | MCFSLT_SCR_TEN,
+ PA(MCFSLT_SCR));
+
+}
+
+#endif /* CONFIG_HIGHPROFILE */
+
+/***************************************************************************/
+
+/*
+ * By default use Slice Timer 0 as the system clock timer.
+ */
+#define TA(a) (MCF_MBAR + MCFSLT_TIMER0 + (a))
+
+static u32 mcfslt_cycles_per_jiffy;
+static u32 mcfslt_cnt;
+
+static irqreturn_t mcfslt_tick(int irq, void *dummy)
+{
+ /* Reset Slice Timer 0 */
+ __raw_writel(MCFSLT_SSR_BE | MCFSLT_SSR_TE, TA(MCFSLT_SSR));
+ mcfslt_cnt += mcfslt_cycles_per_jiffy;
+ return arch_timer_interrupt(irq, dummy);
+}
+
+static struct irqaction mcfslt_timer_irq = {
+ .name = "timer",
+ .flags = IRQF_DISABLED | IRQF_TIMER,
+ .handler = mcfslt_tick,
+};
+
+static cycle_t mcfslt_read_clk(struct clocksource *cs)
+{
+ unsigned long flags;
+ u32 cycles;
+ u16 scnt;
+
+ local_irq_save(flags);
+ scnt = __raw_readl(TA(MCFSLT_SCNT));
+ cycles = mcfslt_cnt;
+ local_irq_restore(flags);
+
+ /* substract because slice timers count down */
+ return cycles - scnt;
+}
+
+static struct clocksource mcfslt_clk = {
+ .name = "slt",
+ .rating = 250,
+ .read = mcfslt_read_clk,
+ .shift = 20,
+ .mask = CLOCKSOURCE_MASK(32),
+ .flags = CLOCK_SOURCE_IS_CONTINUOUS,
+};
+
+void hw_timer_init(void)
+{
+ mcfslt_cycles_per_jiffy = MCF_BUSCLK / HZ;
+ /*
+ * The coldfire slice timer (SLT) runs from STCNT to 0 included,
+ * then STCNT again and so on. It counts thus actually
+ * STCNT + 1 steps for 1 tick, not STCNT. So if you want
+ * n cycles, initialize STCNT with n - 1.
+ */
+ __raw_writel(mcfslt_cycles_per_jiffy - 1, TA(MCFSLT_STCNT));
+ __raw_writel(MCFSLT_SCR_RUN | MCFSLT_SCR_IEN | MCFSLT_SCR_TEN,
+ TA(MCFSLT_SCR));
+ /* initialize mcfslt_cnt knowing that slice timers count down */
+ mcfslt_cnt = mcfslt_cycles_per_jiffy;
+
+ setup_irq(MCF_IRQ_TIMER, &mcfslt_timer_irq);
+
+ mcfslt_clk.mult = clocksource_hz2mult(MCF_BUSCLK, mcfslt_clk.shift);
+ clocksource_register(&mcfslt_clk);
+
+#ifdef CONFIG_HIGHPROFILE
+ mcfslt_profile_init();
+#endif
+}
diff --git a/arch/microblaze/Kconfig b/arch/microblaze/Kconfig
index 692fdfce2a23..387d5ffdfd3a 100644
--- a/arch/microblaze/Kconfig
+++ b/arch/microblaze/Kconfig
@@ -1,8 +1,3 @@
-# For a description of the syntax of this configuration file,
-# see Documentation/kbuild/kconfig-language.txt.
-
-mainmenu "Linux/Microblaze Kernel Configuration"
-
config MICROBLAZE
def_bool y
select HAVE_MEMBLOCK
@@ -121,6 +116,23 @@ config CMDLINE_FORCE
Set this to have arguments from the default kernel command string
override those passed by the boot loader.
+config SECCOMP
+ bool "Enable seccomp to safely compute untrusted bytecode"
+ depends on PROC_FS
+ default y
+ help
+ This kernel feature is useful for number crunching applications
+ that may need to compute untrusted bytecode during their
+ execution. By using pipes or other transports made available to
+ the process as file descriptors supporting the read/write
+ syscalls, it's possible to isolate those applications in
+ their own address space using seccomp. Once seccomp is
+ enabled via /proc/<pid>/seccomp, it cannot be disabled
+ and the task is only allowed to execute a few safe syscalls
+ defined by each seccomp mode.
+
+ If unsure, say Y. Only embedded should say N here.
+
endmenu
menu "Advanced setup"
diff --git a/arch/microblaze/Kconfig.debug b/arch/microblaze/Kconfig.debug
index e6e5e0da28c3..e66e25c4b0b2 100644
--- a/arch/microblaze/Kconfig.debug
+++ b/arch/microblaze/Kconfig.debug
@@ -10,7 +10,7 @@ source "lib/Kconfig.debug"
config EARLY_PRINTK
bool "Early printk function for kernel"
- depends on SERIAL_UARTLITE_CONSOLE
+ depends on SERIAL_UARTLITE_CONSOLE || SERIAL_8250_CONSOLE
default n
help
This option turns on/off early printk messages to console.
diff --git a/arch/microblaze/Makefile b/arch/microblaze/Makefile
index 592c7079de88..15f1f1d1840d 100644
--- a/arch/microblaze/Makefile
+++ b/arch/microblaze/Makefile
@@ -42,11 +42,8 @@ KBUILD_CFLAGS += -ffixed-r31 $(CPUFLAGS-1) $(CPUFLAGS-2)
LDFLAGS :=
LDFLAGS_vmlinux :=
-LIBGCC := $(shell $(CC) $(KBUILD_CFLAGS) -print-libgcc-file-name)
-
head-y := arch/microblaze/kernel/head.o
libs-y += arch/microblaze/lib/
-libs-y += $(LIBGCC)
core-y += arch/microblaze/kernel/
core-y += arch/microblaze/mm/
core-y += arch/microblaze/platform/
@@ -72,12 +69,16 @@ export MMU DTB
all: linux.bin
-BOOT_TARGETS = linux.bin linux.bin.gz simpleImage.%
+# With make 3.82 we cannot mix normal and wildcard targets
+BOOT_TARGETS1 = linux.bin linux.bin.gz
+BOOT_TARGETS2 = simpleImage.%
archclean:
$(Q)$(MAKE) $(clean)=$(boot)
-$(BOOT_TARGETS): vmlinux
+$(BOOT_TARGETS1): vmlinux
+ $(Q)$(MAKE) $(build)=$(boot) $(boot)/$@
+$(BOOT_TARGETS2): vmlinux
$(Q)$(MAKE) $(build)=$(boot) $(boot)/$@
define archhelp
diff --git a/arch/microblaze/include/asm/byteorder.h b/arch/microblaze/include/asm/byteorder.h
index ce9c58732ffc..31902762a426 100644
--- a/arch/microblaze/include/asm/byteorder.h
+++ b/arch/microblaze/include/asm/byteorder.h
@@ -1,6 +1,10 @@
#ifndef _ASM_MICROBLAZE_BYTEORDER_H
#define _ASM_MICROBLAZE_BYTEORDER_H
+#ifdef __MICROBLAZEEL__
+#include <linux/byteorder/little_endian.h>
+#else
#include <linux/byteorder/big_endian.h>
+#endif
#endif /* _ASM_MICROBLAZE_BYTEORDER_H */
diff --git a/arch/microblaze/include/asm/checksum.h b/arch/microblaze/include/asm/checksum.h
index 128bf03b54b7..0185cbefdda4 100644
--- a/arch/microblaze/include/asm/checksum.h
+++ b/arch/microblaze/include/asm/checksum.h
@@ -24,8 +24,13 @@ csum_tcpudp_nofold(__be32 saddr, __be32 daddr, unsigned short len,
"addc %0, %0, %3\n\t"
"addc %0, %0, r0\n\t"
: "+&d" (sum)
- : "d" (saddr), "d" (daddr), "d" (len + proto));
-
+ : "d" (saddr), "d" (daddr),
+#ifdef __MICROBLAZEEL__
+ "d" ((len + proto) << 8)
+#else
+ "d" (len + proto)
+#endif
+);
return sum;
}
diff --git a/arch/microblaze/include/asm/cpuinfo.h b/arch/microblaze/include/asm/cpuinfo.h
index b4f5ca33aebf..cd257537ae54 100644
--- a/arch/microblaze/include/asm/cpuinfo.h
+++ b/arch/microblaze/include/asm/cpuinfo.h
@@ -38,6 +38,7 @@ struct cpuinfo {
u32 use_exc;
u32 ver_code;
u32 mmu;
+ u32 endian;
/* CPU caches */
u32 use_icache;
@@ -76,7 +77,6 @@ struct cpuinfo {
u32 num_rd_brk;
u32 num_wr_brk;
u32 cpu_clock_freq; /* store real freq of cpu */
- u32 freq_div_hz; /* store freq/HZ */
/* FPGA family */
u32 fpga_family_code;
@@ -97,7 +97,8 @@ void set_cpuinfo_pvr_full(struct cpuinfo *ci, struct device_node *cpu);
static inline unsigned int fcpu(struct device_node *cpu, char *n)
{
int *val;
- return (val = (int *) of_get_property(cpu, n, NULL)) ? *val : 0;
+ return (val = (int *) of_get_property(cpu, n, NULL)) ?
+ be32_to_cpup(val) : 0;
}
#endif /* _ASM_MICROBLAZE_CPUINFO_H */
diff --git a/arch/microblaze/include/asm/elf.h b/arch/microblaze/include/asm/elf.h
index 732caf1be741..098dfdde4b06 100644
--- a/arch/microblaze/include/asm/elf.h
+++ b/arch/microblaze/include/asm/elf.h
@@ -71,7 +71,7 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
#define ELF_ET_DYN_BASE (0x08000000)
-#ifdef __LITTLE_ENDIAN__
+#ifdef __MICROBLAZEEL__
#define ELF_DATA ELFDATA2LSB
#else
#define ELF_DATA ELFDATA2MSB
diff --git a/arch/microblaze/include/asm/gpio.h b/arch/microblaze/include/asm/gpio.h
index 2345ac354d9b..2b2c18be71c6 100644
--- a/arch/microblaze/include/asm/gpio.h
+++ b/arch/microblaze/include/asm/gpio.h
@@ -38,12 +38,9 @@ static inline int gpio_cansleep(unsigned int gpio)
return __gpio_cansleep(gpio);
}
-/*
- * Not implemented, yet.
- */
static inline int gpio_to_irq(unsigned int gpio)
{
- return -ENOSYS;
+ return __gpio_to_irq(gpio);
}
static inline int irq_to_gpio(unsigned int irq)
diff --git a/arch/microblaze/include/asm/io.h b/arch/microblaze/include/asm/io.h
index 00b5398d08c7..eae32220f447 100644
--- a/arch/microblaze/include/asm/io.h
+++ b/arch/microblaze/include/asm/io.h
@@ -243,6 +243,8 @@ static inline void __iomem *__ioremap(phys_addr_t address, unsigned long size,
#define out_8(a, v) __raw_writeb((v), (a))
#define in_8(a) __raw_readb(a)
+#define mmiowb()
+
#define ioport_map(port, nr) ((void __iomem *)(port))
#define ioport_unmap(addr)
diff --git a/arch/microblaze/include/asm/page.h b/arch/microblaze/include/asm/page.h
index cf377d91da71..ed9d0f6e2cdb 100644
--- a/arch/microblaze/include/asm/page.h
+++ b/arch/microblaze/include/asm/page.h
@@ -205,9 +205,6 @@ extern int page_is_ram(unsigned long pfn);
#define TOPHYS(addr) __virt_to_phys(addr)
#ifdef CONFIG_MMU
-#ifdef CONFIG_CONTIGUOUS_PAGE_ALLOC
-#define WANT_PAGE_VIRTUAL 1 /* page alloc 2 relies on this */
-#endif
#define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | VM_EXEC | \
VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
diff --git a/arch/microblaze/include/asm/pci.h b/arch/microblaze/include/asm/pci.h
index 5a388eeeb28f..2232ff942ba9 100644
--- a/arch/microblaze/include/asm/pci.h
+++ b/arch/microblaze/include/asm/pci.h
@@ -165,5 +165,7 @@ extern void __init xilinx_pci_init(void);
static inline void __init xilinx_pci_init(void) { return; }
#endif
+#include <asm-generic/pci-dma-compat.h>
+
#endif /* __KERNEL__ */
#endif /* __ASM_MICROBLAZE_PCI_H */
diff --git a/arch/microblaze/include/asm/pgalloc.h b/arch/microblaze/include/asm/pgalloc.h
index c614a893f8a3..ebd35792482c 100644
--- a/arch/microblaze/include/asm/pgalloc.h
+++ b/arch/microblaze/include/asm/pgalloc.h
@@ -165,7 +165,8 @@ extern inline void pte_free(struct mm_struct *mm, struct page *ptepage)
#define __pte_free_tlb(tlb, pte, addr) pte_free((tlb)->mm, (pte))
-#define pmd_populate(mm, pmd, pte) (pmd_val(*(pmd)) = page_address(pte))
+#define pmd_populate(mm, pmd, pte) \
+ (pmd_val(*(pmd)) = (unsigned long)page_address(pte))
#define pmd_populate_kernel(mm, pmd, pte) \
(pmd_val(*(pmd)) = (unsigned long) (pte))
diff --git a/arch/microblaze/include/asm/pgtable.h b/arch/microblaze/include/asm/pgtable.h
index ca2d92871545..cae268c22ba2 100644
--- a/arch/microblaze/include/asm/pgtable.h
+++ b/arch/microblaze/include/asm/pgtable.h
@@ -57,6 +57,13 @@ static inline int pte_file(pte_t pte) { return 0; }
#define pgprot_noncached_wc(prot) prot
+/*
+ * All 32bit addresses are effectively valid for vmalloc...
+ * Sort of meaningless for non-VM targets.
+ */
+#define VMALLOC_START 0
+#define VMALLOC_END 0xffffffff
+
#else /* CONFIG_MMU */
#include <asm-generic/4level-fixup.h>
@@ -497,12 +504,9 @@ static inline pmd_t *pmd_offset(pgd_t *dir, unsigned long address)
#define pte_offset_kernel(dir, addr) \
((pte_t *) pmd_page_kernel(*(dir)) + pte_index(addr))
#define pte_offset_map(dir, addr) \
- ((pte_t *) kmap_atomic(pmd_page(*(dir)), KM_PTE0) + pte_index(addr))
-#define pte_offset_map_nested(dir, addr) \
- ((pte_t *) kmap_atomic(pmd_page(*(dir)), KM_PTE1) + pte_index(addr))
+ ((pte_t *) kmap_atomic(pmd_page(*(dir))) + pte_index(addr))
-#define pte_unmap(pte) kunmap_atomic(pte, KM_PTE0)
-#define pte_unmap_nested(pte) kunmap_atomic(pte, KM_PTE1)
+#define pte_unmap(pte) kunmap_atomic(pte)
/* Encode and decode a nonlinear file mapping entry */
#define PTE_FILE_MAX_BITS 29
diff --git a/arch/microblaze/include/asm/prom.h b/arch/microblaze/include/asm/prom.h
index 101fa098f62a..bdc38312ae4a 100644
--- a/arch/microblaze/include/asm/prom.h
+++ b/arch/microblaze/include/asm/prom.h
@@ -27,6 +27,7 @@
/* Other Prototypes */
extern int early_uartlite_console(void);
+extern int early_uart16550_console(void);
#ifdef CONFIG_PCI
/*
diff --git a/arch/microblaze/include/asm/pvr.h b/arch/microblaze/include/asm/pvr.h
index 9578666e98ba..37db96a15b45 100644
--- a/arch/microblaze/include/asm/pvr.h
+++ b/arch/microblaze/include/asm/pvr.h
@@ -30,7 +30,9 @@ struct pvr_s {
#define PVR0_USE_EXC_MASK 0x04000000
#define PVR0_USE_ICACHE_MASK 0x02000000
#define PVR0_USE_DCACHE_MASK 0x01000000
-#define PVR0_USE_MMU 0x00800000 /* new */
+#define PVR0_USE_MMU 0x00800000
+#define PVR0_USE_BTC 0x00400000
+#define PVR0_ENDI 0x00200000
#define PVR0_VERSION_MASK 0x0000FF00
#define PVR0_USER1_MASK 0x000000FF
@@ -38,9 +40,9 @@ struct pvr_s {
#define PVR1_USER2_MASK 0xFFFFFFFF
/* Configuration PVR masks */
-#define PVR2_D_OPB_MASK 0x80000000
+#define PVR2_D_OPB_MASK 0x80000000 /* or AXI */
#define PVR2_D_LMB_MASK 0x40000000
-#define PVR2_I_OPB_MASK 0x20000000
+#define PVR2_I_OPB_MASK 0x20000000 /* or AXI */
#define PVR2_I_LMB_MASK 0x10000000
#define PVR2_INTERRUPT_IS_EDGE_MASK 0x08000000
#define PVR2_EDGE_IS_POSITIVE_MASK 0x04000000
@@ -63,8 +65,8 @@ struct pvr_s {
#define PVR2_OPCODE_0x0_ILL_MASK 0x00000040
#define PVR2_UNALIGNED_EXC_MASK 0x00000020
#define PVR2_ILL_OPCODE_EXC_MASK 0x00000010
-#define PVR2_IOPB_BUS_EXC_MASK 0x00000008
-#define PVR2_DOPB_BUS_EXC_MASK 0x00000004
+#define PVR2_IOPB_BUS_EXC_MASK 0x00000008 /* or AXI */
+#define PVR2_DOPB_BUS_EXC_MASK 0x00000004 /* or AXI */
#define PVR2_DIV_ZERO_EXC_MASK 0x00000002
#define PVR2_FPU_EXC_MASK 0x00000001
@@ -208,6 +210,8 @@ struct pvr_s {
#define PVR_MMU_TLB_ACCESS(pvr) (pvr.pvr[11] & PVR11_MMU_TLB_ACCESS)
#define PVR_MMU_ZONES(pvr) (pvr.pvr[11] & PVR11_MMU_ZONES)
+/* endian */
+#define PVR_ENDIAN(pvr) (pvr.pvr[0] & PVR0_ENDI)
int cpu_has_pvr(void);
void get_pvr(struct pvr_s *pvr);
diff --git a/arch/microblaze/include/asm/seccomp.h b/arch/microblaze/include/asm/seccomp.h
new file mode 100644
index 000000000000..0d912758a0d7
--- /dev/null
+++ b/arch/microblaze/include/asm/seccomp.h
@@ -0,0 +1,16 @@
+#ifndef _ASM_MICROBLAZE_SECCOMP_H
+#define _ASM_MICROBLAZE_SECCOMP_H
+
+#include <linux/unistd.h>
+
+#define __NR_seccomp_read __NR_read
+#define __NR_seccomp_write __NR_write
+#define __NR_seccomp_exit __NR_exit
+#define __NR_seccomp_sigreturn __NR_sigreturn
+
+#define __NR_seccomp_read_32 __NR_read
+#define __NR_seccomp_write_32 __NR_write
+#define __NR_seccomp_exit_32 __NR_exit
+#define __NR_seccomp_sigreturn_32 __NR_sigreturn
+
+#endif /* _ASM_MICROBLAZE_SECCOMP_H */
diff --git a/arch/microblaze/include/asm/setup.h b/arch/microblaze/include/asm/setup.h
index 782b5c89248e..8f3968971e4e 100644
--- a/arch/microblaze/include/asm/setup.h
+++ b/arch/microblaze/include/asm/setup.h
@@ -25,6 +25,12 @@ void early_printk(const char *fmt, ...);
int setup_early_printk(char *opt);
void disable_early_printk(void);
+#if defined(CONFIG_EARLY_PRINTK)
+#define eprintk early_printk
+#else
+#define eprintk printk
+#endif
+
void heartbeat(void);
void setup_heartbeat(void);
diff --git a/arch/microblaze/include/asm/thread_info.h b/arch/microblaze/include/asm/thread_info.h
index 8a8e9fc6e0c0..b73da2ac21b3 100644
--- a/arch/microblaze/include/asm/thread_info.h
+++ b/arch/microblaze/include/asm/thread_info.h
@@ -127,23 +127,19 @@ static inline struct thread_info *current_thread_info(void)
#define TIF_SECCOMP 10 /* secure computing */
#define TIF_FREEZE 14 /* Freezing for suspend */
-/* FIXME change in entry.S */
-#define TIF_KERNEL_TRACE 8 /* kernel trace active */
-
/* true if poll_idle() is polling TIF_NEED_RESCHED */
#define TIF_POLLING_NRFLAG 16
-#define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE)
-#define _TIF_NOTIFY_RESUME (1<<TIF_NOTIFY_RESUME)
-#define _TIF_SIGPENDING (1<<TIF_SIGPENDING)
-#define _TIF_NEED_RESCHED (1<<TIF_NEED_RESCHED)
-#define _TIF_SINGLESTEP (1<<TIF_SINGLESTEP)
-#define _TIF_IRET (1<<TIF_IRET)
-#define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG)
-#define _TIF_FREEZE (1<<TIF_FREEZE)
+#define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE)
+#define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME)
+#define _TIF_SIGPENDING (1 << TIF_SIGPENDING)
+#define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED)
+#define _TIF_SINGLESTEP (1 << TIF_SINGLESTEP)
+#define _TIF_IRET (1 << TIF_IRET)
+#define _TIF_POLLING_NRFLAG (1 << TIF_POLLING_NRFLAG)
+#define _TIF_FREEZE (1 << TIF_FREEZE)
#define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT)
#define _TIF_SECCOMP (1 << TIF_SECCOMP)
-#define _TIF_KERNEL_TRACE (1 << TIF_KERNEL_TRACE)
/* work to do in syscall trace */
#define _TIF_WORK_SYSCALL_MASK (_TIF_SYSCALL_TRACE | _TIF_SINGLESTEP | \
diff --git a/arch/microblaze/include/asm/unaligned.h b/arch/microblaze/include/asm/unaligned.h
index 3658d91ac0fb..2b97cbe500e9 100644
--- a/arch/microblaze/include/asm/unaligned.h
+++ b/arch/microblaze/include/asm/unaligned.h
@@ -12,12 +12,18 @@
# ifdef __KERNEL__
-# include <linux/unaligned/be_struct.h>
+# include <linux/unaligned/be_byteshift.h>
# include <linux/unaligned/le_byteshift.h>
# include <linux/unaligned/generic.h>
-# define get_unaligned __get_unaligned_be
-# define put_unaligned __put_unaligned_be
+
+# ifdef __MICROBLAZEEL__
+# define get_unaligned __get_unaligned_le
+# define put_unaligned __put_unaligned_le
+# else
+# define get_unaligned __get_unaligned_be
+# define put_unaligned __put_unaligned_be
+# endif
# endif /* __KERNEL__ */
#endif /* _ASM_MICROBLAZE_UNALIGNED_H */
diff --git a/arch/microblaze/include/asm/unistd.h b/arch/microblaze/include/asm/unistd.h
index 2b67e92a773c..d770b00ec6b1 100644
--- a/arch/microblaze/include/asm/unistd.h
+++ b/arch/microblaze/include/asm/unistd.h
@@ -383,8 +383,11 @@
#define __NR_rt_tgsigqueueinfo 365 /* new */
#define __NR_perf_event_open 366 /* new */
#define __NR_recvmmsg 367 /* new */
+#define __NR_fanotify_init 368
+#define __NR_fanotify_mark 369
+#define __NR_prlimit64 370
-#define __NR_syscalls 368
+#define __NR_syscalls 371
#ifdef __KERNEL__
#ifndef __ASSEMBLY__
diff --git a/arch/microblaze/kernel/cpu/cpuinfo-pvr-full.c b/arch/microblaze/kernel/cpu/cpuinfo-pvr-full.c
index f72dbd66c844..f70a6047f08e 100644
--- a/arch/microblaze/kernel/cpu/cpuinfo-pvr-full.c
+++ b/arch/microblaze/kernel/cpu/cpuinfo-pvr-full.c
@@ -72,6 +72,7 @@ void set_cpuinfo_pvr_full(struct cpuinfo *ci, struct device_node *cpu)
CI(pvr_user2, USER2);
CI(mmu, USE_MMU);
+ CI(endian, ENDIAN);
CI(use_icache, USE_ICACHE);
CI(icache_tagbits, ICACHE_ADDR_TAG_BITS);
diff --git a/arch/microblaze/kernel/cpu/cpuinfo-static.c b/arch/microblaze/kernel/cpu/cpuinfo-static.c
index 6095aa6b5c88..b16b994ca3d2 100644
--- a/arch/microblaze/kernel/cpu/cpuinfo-static.c
+++ b/arch/microblaze/kernel/cpu/cpuinfo-static.c
@@ -119,6 +119,7 @@ void __init set_cpuinfo_static(struct cpuinfo *ci, struct device_node *cpu)
ci->pvr_user2 = fcpu(cpu, "xlnx,pvr-user2");
ci->mmu = fcpu(cpu, "xlnx,use-mmu");
+ ci->endian = fcpu(cpu, "xlnx,endianness");
ci->ver_code = 0;
ci->fpga_family_code = 0;
diff --git a/arch/microblaze/kernel/cpu/cpuinfo.c b/arch/microblaze/kernel/cpu/cpuinfo.c
index 255ef880351e..87c79fa275c3 100644
--- a/arch/microblaze/kernel/cpu/cpuinfo.c
+++ b/arch/microblaze/kernel/cpu/cpuinfo.c
@@ -30,6 +30,8 @@ const struct cpu_ver_key cpu_ver_lookup[] = {
{"7.20.c", 0x0e},
{"7.20.d", 0x0f},
{"7.30.a", 0x10},
+ {"7.30.b", 0x11},
+ {"8.00.a", 0x12},
{NULL, 0},
};
diff --git a/arch/microblaze/kernel/cpu/mb.c b/arch/microblaze/kernel/cpu/mb.c
index 7086e3564281..b4048af02615 100644
--- a/arch/microblaze/kernel/cpu/mb.c
+++ b/arch/microblaze/kernel/cpu/mb.c
@@ -51,11 +51,12 @@ static int show_cpuinfo(struct seq_file *m, void *v)
count = seq_printf(m,
"CPU-Family: MicroBlaze\n"
"FPGA-Arch: %s\n"
- "CPU-Ver: %s\n"
+ "CPU-Ver: %s, %s endian\n"
"CPU-MHz: %d.%02d\n"
"BogoMips: %lu.%02lu\n",
fpga_family,
cpu_ver,
+ cpuinfo.endian ? "little" : "big",
cpuinfo.cpu_clock_freq /
1000000,
cpuinfo.cpu_clock_freq %
diff --git a/arch/microblaze/kernel/cpu/pvr.c b/arch/microblaze/kernel/cpu/pvr.c
index 9bee9382bf74..e01afa68273e 100644
--- a/arch/microblaze/kernel/cpu/pvr.c
+++ b/arch/microblaze/kernel/cpu/pvr.c
@@ -27,7 +27,7 @@
register unsigned tmp __asm__("r3"); \
tmp = 0x0; /* Prevent warning about unused */ \
__asm__ __volatile__ ( \
- ".byte 0x94,0x60,0xa0, " #pvrid "\n\t" \
+ "mfs %0, rpvr" #pvrid ";" \
: "=r" (tmp) : : "memory"); \
val = tmp; \
}
diff --git a/arch/microblaze/kernel/early_printk.c b/arch/microblaze/kernel/early_printk.c
index 7de84923ba07..c3616a080ebf 100644
--- a/arch/microblaze/kernel/early_printk.c
+++ b/arch/microblaze/kernel/early_printk.c
@@ -24,7 +24,8 @@
static u32 early_console_initialized;
static u32 base_addr;
-static void early_printk_putc(char c)
+#ifdef CONFIG_SERIAL_UARTLITE_CONSOLE
+static void early_printk_uartlite_putc(char c)
{
/*
* Limit how many times we'll spin waiting for TX FIFO status.
@@ -45,25 +46,70 @@ static void early_printk_putc(char c)
out_be32(base_addr + 4, c & 0xff);
}
-static void early_printk_write(struct console *unused,
+static void early_printk_uartlite_write(struct console *unused,
const char *s, unsigned n)
{
while (*s && n-- > 0) {
- early_printk_putc(*s);
+ early_printk_uartlite_putc(*s);
if (*s == '\n')
- early_printk_putc('\r');
+ early_printk_uartlite_putc('\r');
s++;
}
}
-static struct console early_serial_console = {
+static struct console early_serial_uartlite_console = {
.name = "earlyser",
- .write = early_printk_write,
+ .write = early_printk_uartlite_write,
.flags = CON_PRINTBUFFER,
.index = -1,
};
+#endif /* CONFIG_SERIAL_UARTLITE_CONSOLE */
-static struct console *early_console = &early_serial_console;
+#ifdef CONFIG_SERIAL_8250_CONSOLE
+static void early_printk_uart16550_putc(char c)
+{
+ /*
+ * Limit how many times we'll spin waiting for TX FIFO status.
+ * This will prevent lockups if the base address is incorrectly
+ * set, or any other issue on the UARTLITE.
+ * This limit is pretty arbitrary, unless we are at about 10 baud
+ * we'll never timeout on a working UART.
+ */
+
+ #define UART_LSR_TEMT 0x40 /* Transmitter empty */
+ #define UART_LSR_THRE 0x20 /* Transmit-hold-register empty */
+ #define BOTH_EMPTY (UART_LSR_TEMT | UART_LSR_THRE)
+
+ unsigned retries = 10000;
+
+ while (--retries &&
+ !((in_be32(base_addr + 0x14) & BOTH_EMPTY) == BOTH_EMPTY))
+ ;
+
+ if (retries)
+ out_be32(base_addr, c & 0xff);
+}
+
+static void early_printk_uart16550_write(struct console *unused,
+ const char *s, unsigned n)
+{
+ while (*s && n-- > 0) {
+ early_printk_uart16550_putc(*s);
+ if (*s == '\n')
+ early_printk_uart16550_putc('\r');
+ s++;
+ }
+}
+
+static struct console early_serial_uart16550_console = {
+ .name = "earlyser",
+ .write = early_printk_uart16550_write,
+ .flags = CON_PRINTBUFFER,
+ .index = -1,
+};
+#endif /* CONFIG_SERIAL_8250_CONSOLE */
+
+static struct console *early_console;
void early_printk(const char *fmt, ...)
{
@@ -84,20 +130,43 @@ int __init setup_early_printk(char *opt)
if (early_console_initialized)
return 1;
+#ifdef CONFIG_SERIAL_UARTLITE_CONSOLE
base_addr = early_uartlite_console();
if (base_addr) {
early_console_initialized = 1;
#ifdef CONFIG_MMU
early_console_reg_tlb_alloc(base_addr);
#endif
+ early_console = &early_serial_uartlite_console;
early_printk("early_printk_console is enabled at 0x%08x\n",
base_addr);
/* register_console(early_console); */
return 0;
- } else
- return 1;
+ }
+#endif /* CONFIG_SERIAL_UARTLITE_CONSOLE */
+
+#ifdef CONFIG_SERIAL_8250_CONSOLE
+ base_addr = early_uart16550_console();
+ base_addr &= ~3; /* clear register offset */
+ if (base_addr) {
+ early_console_initialized = 1;
+#ifdef CONFIG_MMU
+ early_console_reg_tlb_alloc(base_addr);
+#endif
+ early_console = &early_serial_uart16550_console;
+
+ early_printk("early_printk_console is enabled at 0x%08x\n",
+ base_addr);
+
+ /* register_console(early_console); */
+
+ return 0;
+ }
+#endif /* CONFIG_SERIAL_8250_CONSOLE */
+
+ return 1;
}
void __init disable_early_printk(void)
diff --git a/arch/microblaze/kernel/entry.S b/arch/microblaze/kernel/entry.S
index 304882e56459..819238b8a429 100644
--- a/arch/microblaze/kernel/entry.S
+++ b/arch/microblaze/kernel/entry.S
@@ -186,6 +186,8 @@
swi r13, r1, PTO+PT_R13; /* Save SDA2 */ \
swi r14, r1, PTO+PT_PC; /* PC, before IRQ/trap */ \
swi r15, r1, PTO+PT_R15; /* Save LP */ \
+ swi r16, r1, PTO+PT_R16; \
+ swi r17, r1, PTO+PT_R17; \
swi r18, r1, PTO+PT_R18; /* Save asm scratch reg */ \
swi r19, r1, PTO+PT_R19; \
swi r20, r1, PTO+PT_R20; \
@@ -220,6 +222,8 @@
lwi r13, r1, PTO+PT_R13; /* restore SDA2 */ \
lwi r14, r1, PTO+PT_PC; /* RESTORE_LINK PC, before IRQ/trap */\
lwi r15, r1, PTO+PT_R15; /* restore LP */ \
+ lwi r16, r1, PTO+PT_R16; \
+ lwi r17, r1, PTO+PT_R17; \
lwi r18, r1, PTO+PT_R18; /* restore asm scratch reg */ \
lwi r19, r1, PTO+PT_R19; \
lwi r20, r1, PTO+PT_R20; \
@@ -295,6 +299,8 @@ C_ENTRY(_user_exception):
/* addik r1, r1, -STATE_SAVE_SIZE; */
addik r1, r1, THREAD_SIZE + CONFIG_KERNEL_BASE_ADDR - CONFIG_KERNEL_START - STATE_SAVE_SIZE;
SAVE_REGS
+ swi r0, r1, PTO + PT_R3
+ swi r0, r1, PTO + PT_R4
lwi r11, r0, TOPHYS(PER_CPU(ENTRY_SP));
swi r11, r1, PTO+PT_R1; /* Store user SP. */
@@ -458,14 +464,8 @@ C_ENTRY(sys_execve):
addik r8, r1, PTO; /* add user context as 4th arg */
C_ENTRY(sys_rt_sigreturn_wrapper):
- swi r3, r1, PTO+PT_R3; /* restore saved r3, r4 registers */
- swi r4, r1, PTO+PT_R4;
- brlid r15, sys_rt_sigreturn /* Do real work */
+ brid sys_rt_sigreturn /* Do real work */
addik r5, r1, PTO; /* add user context as 1st arg */
- lwi r3, r1, PTO+PT_R3; /* restore saved r3, r4 registers */
- lwi r4, r1, PTO+PT_R4;
- bri ret_from_trap /* fall through will not work here due to align */
- nop;
/*
* HW EXCEPTION rutine start
@@ -765,9 +765,7 @@ C_ENTRY(_debug_exception):
/* save all regs to pt_reg structure */
swi r0, r1, PTO+PT_R0; /* R0 must be saved too */
swi r14, r1, PTO+PT_R14 /* rewrite saved R14 value */
- swi r16, r1, PTO+PT_R16
swi r16, r1, PTO+PT_PC; /* PC and r16 are the same */
- swi r17, r1, PTO+PT_R17
/* save special purpose registers to pt_regs */
mfs r11, rear;
swi r11, r1, PTO+PT_EAR;
@@ -801,8 +799,6 @@ C_ENTRY(_debug_exception):
addik r1, r1, -STATE_SAVE_SIZE; /* Make room on the stack. */
SAVE_REGS;
- swi r17, r1, PTO+PT_R17;
- swi r16, r1, PTO+PT_R16;
swi r16, r1, PTO+PT_PC; /* Save LP */
swi r0, r1, PTO + PT_MODE; /* Was in user-mode. */
lwi r11, r0, TOPHYS(PER_CPU(ENTRY_SP));
@@ -848,8 +844,6 @@ dbtrap_call: /* Return point for kernel/user entry + 8 because of rtsd r15, 8 */
tophys(r1,r1);
/* MS: Restore all regs */
RESTORE_REGS
- lwi r17, r1, PTO+PT_R17;
- lwi r16, r1, PTO+PT_R16;
addik r1, r1, STATE_SAVE_SIZE /* Clean up stack space */
lwi r1, r1, PT_R1 - PT_SIZE; /* Restore user stack pointer */
DBTRAP_return_user: /* MS: Make global symbol for debugging */
@@ -863,7 +857,6 @@ DBTRAP_return_user: /* MS: Make global symbol for debugging */
RESTORE_REGS
lwi r14, r1, PTO+PT_R14;
lwi r16, r1, PTO+PT_PC;
- lwi r17, r1, PTO+PT_R17;
addik r1, r1, STATE_SAVE_SIZE; /* MS: Clean up stack space */
tovirt(r1,r1);
DBTRAP_return_kernel: /* MS: Make global symbol for debugging */
diff --git a/arch/microblaze/kernel/exceptions.c b/arch/microblaze/kernel/exceptions.c
index b98ee8d0c1cd..478f2943ede7 100644
--- a/arch/microblaze/kernel/exceptions.c
+++ b/arch/microblaze/kernel/exceptions.c
@@ -72,7 +72,6 @@ asmlinkage void full_exception(struct pt_regs *regs, unsigned int type,
int fsr, int addr)
{
#ifdef CONFIG_MMU
- int code;
addr = regs->pc;
#endif
@@ -86,8 +85,7 @@ asmlinkage void full_exception(struct pt_regs *regs, unsigned int type,
switch (type & 0x1F) {
case MICROBLAZE_ILL_OPCODE_EXCEPTION:
if (user_mode(regs)) {
- pr_debug(KERN_WARNING "Illegal opcode exception " \
- "in user mode.\n");
+ pr_debug("Illegal opcode exception in user mode\n");
_exception(SIGILL, regs, ILL_ILLOPC, addr);
return;
}
@@ -97,8 +95,7 @@ asmlinkage void full_exception(struct pt_regs *regs, unsigned int type,
break;
case MICROBLAZE_IBUS_EXCEPTION:
if (user_mode(regs)) {
- pr_debug(KERN_WARNING "Instruction bus error " \
- "exception in user mode.\n");
+ pr_debug("Instruction bus error exception in user mode\n");
_exception(SIGBUS, regs, BUS_ADRERR, addr);
return;
}
@@ -108,8 +105,7 @@ asmlinkage void full_exception(struct pt_regs *regs, unsigned int type,
break;
case MICROBLAZE_DBUS_EXCEPTION:
if (user_mode(regs)) {
- pr_debug(KERN_WARNING "Data bus error exception " \
- "in user mode.\n");
+ pr_debug("Data bus error exception in user mode\n");
_exception(SIGBUS, regs, BUS_ADRERR, addr);
return;
}
@@ -119,8 +115,7 @@ asmlinkage void full_exception(struct pt_regs *regs, unsigned int type,
break;
case MICROBLAZE_DIV_ZERO_EXCEPTION:
if (user_mode(regs)) {
- pr_debug(KERN_WARNING "Divide by zero exception " \
- "in user mode\n");
+ pr_debug("Divide by zero exception in user mode\n");
_exception(SIGILL, regs, FPE_INTDIV, addr);
return;
}
@@ -129,7 +124,7 @@ asmlinkage void full_exception(struct pt_regs *regs, unsigned int type,
die("Divide by zero exception", regs, SIGBUS);
break;
case MICROBLAZE_FPU_EXCEPTION:
- pr_debug(KERN_WARNING "FPU exception\n");
+ pr_debug("FPU exception\n");
/* IEEE FP exception */
/* I removed fsr variable and use code var for storing fsr */
if (fsr & FSR_IO)
@@ -147,14 +142,8 @@ asmlinkage void full_exception(struct pt_regs *regs, unsigned int type,
#ifdef CONFIG_MMU
case MICROBLAZE_PRIVILEGED_EXCEPTION:
- pr_debug(KERN_WARNING "Privileged exception\n");
- /* "brk r0,r0" - used as debug breakpoint - old toolchain */
- if (get_user(code, (unsigned long *)regs->pc) == 0
- && code == 0x980c0000) {
- _exception(SIGTRAP, regs, TRAP_BRKPT, addr);
- } else {
- _exception(SIGILL, regs, ILL_PRVOPC, addr);
- }
+ pr_debug("Privileged exception\n");
+ _exception(SIGILL, regs, ILL_PRVOPC, addr);
break;
#endif
default:
diff --git a/arch/microblaze/kernel/heartbeat.c b/arch/microblaze/kernel/heartbeat.c
index 522751737cfa..154756f3c694 100644
--- a/arch/microblaze/kernel/heartbeat.c
+++ b/arch/microblaze/kernel/heartbeat.c
@@ -47,11 +47,10 @@ void setup_heartbeat(void)
struct device_node *gpio = NULL;
int *prop;
int j;
- char *gpio_list[] = {
- "xlnx,xps-gpio-1.00.a",
- "xlnx,opb-gpio-1.00.a",
- NULL
- };
+ const char * const gpio_list[] = {
+ "xlnx,xps-gpio-1.00.a",
+ NULL
+ };
for (j = 0; gpio_list[j] != NULL; j++) {
gpio = of_find_compatible_node(NULL, NULL, gpio_list[j]);
@@ -60,7 +59,7 @@ void setup_heartbeat(void)
}
if (gpio) {
- base_addr = *(int *) of_get_property(gpio, "reg", NULL);
+ base_addr = be32_to_cpup(of_get_property(gpio, "reg", NULL));
base_addr = (unsigned long) ioremap(base_addr, PAGE_SIZE);
printk(KERN_NOTICE "Heartbeat GPIO at 0x%x\n", base_addr);
diff --git a/arch/microblaze/kernel/intc.c b/arch/microblaze/kernel/intc.c
index 03172c1da770..d61ea33aff7c 100644
--- a/arch/microblaze/kernel/intc.c
+++ b/arch/microblaze/kernel/intc.c
@@ -126,11 +126,8 @@ void __init init_IRQ(void)
0
};
#endif
- static char *intc_list[] = {
+ const char * const intc_list[] = {
"xlnx,xps-intc-1.00.a",
- "xlnx,opb-intc-1.00.c",
- "xlnx,opb-intc-1.00.b",
- "xlnx,opb-intc-1.00.a",
NULL
};
@@ -141,12 +138,15 @@ void __init init_IRQ(void)
}
BUG_ON(!intc);
- intc_baseaddr = *(int *) of_get_property(intc, "reg", NULL);
+ intc_baseaddr = be32_to_cpup(of_get_property(intc,
+ "reg", NULL));
intc_baseaddr = (unsigned long) ioremap(intc_baseaddr, PAGE_SIZE);
- nr_irq = *(int *) of_get_property(intc, "xlnx,num-intr-inputs", NULL);
+ nr_irq = be32_to_cpup(of_get_property(intc,
+ "xlnx,num-intr-inputs", NULL));
intr_type =
- *(int *) of_get_property(intc, "xlnx,kind-of-intr", NULL);
+ be32_to_cpup(of_get_property(intc,
+ "xlnx,kind-of-intr", NULL));
if (intr_type >= (1 << (nr_irq + 1)))
printk(KERN_INFO " ERROR: Mismatch in kind-of-intr param\n");
diff --git a/arch/microblaze/kernel/kgdb.c b/arch/microblaze/kernel/kgdb.c
index bfc006b7f2d8..09a5e8286137 100644
--- a/arch/microblaze/kernel/kgdb.c
+++ b/arch/microblaze/kernel/kgdb.c
@@ -80,7 +80,7 @@ void gdb_regs_to_pt_regs(unsigned long *gdb_regs, struct pt_regs *regs)
void microblaze_kgdb_break(struct pt_regs *regs)
{
if (kgdb_handle_exception(1, SIGTRAP, 0, regs) != 0)
- return 0;
+ return;
/* Jump over the first arch_kgdb_breakpoint which is barrier to
* get kgdb work. The same solution is used for powerpc */
@@ -114,7 +114,6 @@ int kgdb_arch_handle_exception(int vector, int signo, int err_code,
{
char *ptr;
unsigned long address;
- int cpu = smp_processor_id();
switch (remcom_in_buffer[0]) {
case 'c':
@@ -143,5 +142,9 @@ void kgdb_arch_exit(void)
* Global data
*/
struct kgdb_arch arch_kgdb_ops = {
+#ifdef __MICROBLAZEEL__
+ .gdb_bpt_instr = {0x18, 0x00, 0x0c, 0xba}, /* brki r16, 0x18 */
+#else
.gdb_bpt_instr = {0xba, 0x0c, 0x00, 0x18}, /* brki r16, 0x18 */
+#endif
};
diff --git a/arch/microblaze/kernel/microblaze_ksyms.c b/arch/microblaze/kernel/microblaze_ksyms.c
index ff85f7718035..5cb034174005 100644
--- a/arch/microblaze/kernel/microblaze_ksyms.c
+++ b/arch/microblaze/kernel/microblaze_ksyms.c
@@ -15,37 +15,13 @@
#include <linux/syscalls.h>
#include <asm/checksum.h>
+#include <asm/cacheflush.h>
#include <linux/io.h>
#include <asm/page.h>
#include <asm/system.h>
#include <linux/ftrace.h>
#include <linux/uaccess.h>
-/*
- * libgcc functions - functions that are used internally by the
- * compiler... (prototypes are not correct though, but that
- * doesn't really matter since they're not versioned).
- */
-extern void __ashldi3(void);
-EXPORT_SYMBOL(__ashldi3);
-extern void __ashrdi3(void);
-EXPORT_SYMBOL(__ashrdi3);
-extern void __divsi3(void);
-EXPORT_SYMBOL(__divsi3);
-extern void __lshrdi3(void);
-EXPORT_SYMBOL(__lshrdi3);
-extern void __modsi3(void);
-EXPORT_SYMBOL(__modsi3);
-extern void __mulsi3(void);
-EXPORT_SYMBOL(__mulsi3);
-extern void __muldi3(void);
-EXPORT_SYMBOL(__muldi3);
-extern void __ucmpdi2(void);
-EXPORT_SYMBOL(__ucmpdi2);
-extern void __udivsi3(void);
-EXPORT_SYMBOL(__udivsi3);
-extern void __umodsi3(void);
-EXPORT_SYMBOL(__umodsi3);
extern char *_ebss;
EXPORT_SYMBOL_GPL(_ebss);
#ifdef CONFIG_FUNCTION_TRACER
@@ -63,3 +39,9 @@ EXPORT_SYMBOL(__strncpy_user);
EXPORT_SYMBOL(memcpy);
EXPORT_SYMBOL(memmove);
#endif
+
+#ifdef CONFIG_MMU
+EXPORT_SYMBOL(empty_zero_page);
+#endif
+
+EXPORT_SYMBOL(mbc);
diff --git a/arch/microblaze/kernel/prom.c b/arch/microblaze/kernel/prom.c
index 427b13b4740f..a105301e2b7f 100644
--- a/arch/microblaze/kernel/prom.c
+++ b/arch/microblaze/kernel/prom.c
@@ -42,11 +42,6 @@
#include <asm/sections.h>
#include <asm/pci-bridge.h>
-void __init early_init_dt_scan_chosen_arch(unsigned long node)
-{
- /* No Microblaze specific code here */
-}
-
void __init early_init_dt_add_memory_arch(u64 base, u64 size)
{
memblock_add(base, size);
@@ -77,11 +72,12 @@ static int __init early_init_dt_scan_serial(unsigned long node,
/* find compatible node with uartlite */
p = of_get_flat_dt_prop(node, "compatible", &l);
if ((strncmp(p, "xlnx,xps-uartlite", 17) != 0) &&
- (strncmp(p, "xlnx,opb-uartlite", 17) != 0))
+ (strncmp(p, "xlnx,opb-uartlite", 17) != 0) &&
+ (strncmp(p, "xlnx,axi-uartlite", 17) != 0))
return 0;
addr = of_get_flat_dt_prop(node, "reg", &l);
- return *addr; /* return address */
+ return be32_to_cpup(addr); /* return address */
}
/* this function is looking for early uartlite console - Microblaze specific */
@@ -89,6 +85,40 @@ int __init early_uartlite_console(void)
{
return of_scan_flat_dt(early_init_dt_scan_serial, NULL);
}
+
+/* MS this is Microblaze specifig function */
+static int __init early_init_dt_scan_serial_full(unsigned long node,
+ const char *uname, int depth, void *data)
+{
+ unsigned long l;
+ char *p;
+ unsigned int addr;
+
+ pr_debug("search \"chosen\", depth: %d, uname: %s\n", depth, uname);
+
+/* find all serial nodes */
+ if (strncmp(uname, "serial", 6) != 0)
+ return 0;
+
+ early_init_dt_check_for_initrd(node);
+
+/* find compatible node with uartlite */
+ p = of_get_flat_dt_prop(node, "compatible", &l);
+
+ if ((strncmp(p, "xlnx,xps-uart16550", 18) != 0) &&
+ (strncmp(p, "xlnx,axi-uart16550", 18) != 0))
+ return 0;
+
+ addr = *(u32 *)of_get_flat_dt_prop(node, "reg", &l);
+ addr += *(u32 *)of_get_flat_dt_prop(node, "reg-offset", &l);
+ return be32_to_cpu(addr); /* return address */
+}
+
+/* this function is looking for early uartlite console - Microblaze specific */
+int __init early_uart16550_console(void)
+{
+ return of_scan_flat_dt(early_init_dt_scan_serial_full, NULL);
+}
#endif
void __init early_init_devtree(void *params)
diff --git a/arch/microblaze/kernel/ptrace.c b/arch/microblaze/kernel/ptrace.c
index dc03ffc8174a..05ac8cc975d5 100644
--- a/arch/microblaze/kernel/ptrace.c
+++ b/arch/microblaze/kernel/ptrace.c
@@ -73,7 +73,8 @@ static microblaze_reg_t *reg_save_addr(unsigned reg_offs,
return (microblaze_reg_t *)((char *)regs + reg_offs);
}
-long arch_ptrace(struct task_struct *child, long request, long addr, long data)
+long arch_ptrace(struct task_struct *child, long request,
+ unsigned long addr, unsigned long data)
{
int rval;
unsigned long val = 0;
@@ -99,7 +100,7 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
} else {
rval = -EIO;
}
- } else if (addr >= 0 && addr < PT_SIZE && (addr & 0x3) == 0) {
+ } else if (addr < PT_SIZE && (addr & 0x3) == 0) {
microblaze_reg_t *reg_addr = reg_save_addr(addr, child);
if (request == PTRACE_PEEKUSR)
val = *reg_addr;
diff --git a/arch/microblaze/kernel/setup.c b/arch/microblaze/kernel/setup.c
index f5f768842354..bb1558e4b283 100644
--- a/arch/microblaze/kernel/setup.c
+++ b/arch/microblaze/kernel/setup.c
@@ -92,12 +92,6 @@ inline unsigned get_romfs_len(unsigned *addr)
}
#endif /* CONFIG_MTD_UCLINUX_EBSS */
-#if defined(CONFIG_EARLY_PRINTK) && defined(CONFIG_SERIAL_UARTLITE_CONSOLE)
-#define eprintk early_printk
-#else
-#define eprintk printk
-#endif
-
void __init machine_early_init(const char *cmdline, unsigned int ram,
unsigned int fdt, unsigned int msr)
{
diff --git a/arch/microblaze/kernel/syscall_table.S b/arch/microblaze/kernel/syscall_table.S
index 03376dc814c9..e88a930fd1e3 100644
--- a/arch/microblaze/kernel/syscall_table.S
+++ b/arch/microblaze/kernel/syscall_table.S
@@ -372,3 +372,6 @@ ENTRY(sys_call_table)
.long sys_rt_tgsigqueueinfo /* 365 */
.long sys_perf_event_open
.long sys_recvmmsg
+ .long sys_fanotify_init
+ .long sys_fanotify_mark
+ .long sys_prlimit64 /* 370 */
diff --git a/arch/microblaze/kernel/timer.c b/arch/microblaze/kernel/timer.c
index b1380ae93ae1..a5aa33db1df3 100644
--- a/arch/microblaze/kernel/timer.c
+++ b/arch/microblaze/kernel/timer.c
@@ -38,6 +38,9 @@ static unsigned int timer_baseaddr;
#define TIMER_BASE timer_baseaddr
#endif
+unsigned int freq_div_hz;
+unsigned int timer_clock_freq;
+
#define TCSR0 (0x00)
#define TLR0 (0x04)
#define TCR0 (0x08)
@@ -115,7 +118,7 @@ static void microblaze_timer_set_mode(enum clock_event_mode mode,
switch (mode) {
case CLOCK_EVT_MODE_PERIODIC:
printk(KERN_INFO "%s: periodic\n", __func__);
- microblaze_timer0_start_periodic(cpuinfo.freq_div_hz);
+ microblaze_timer0_start_periodic(freq_div_hz);
break;
case CLOCK_EVT_MODE_ONESHOT:
printk(KERN_INFO "%s: oneshot\n", __func__);
@@ -168,7 +171,7 @@ static struct irqaction timer_irqaction = {
static __init void microblaze_clockevent_init(void)
{
clockevent_microblaze_timer.mult =
- div_sc(cpuinfo.cpu_clock_freq, NSEC_PER_SEC,
+ div_sc(timer_clock_freq, NSEC_PER_SEC,
clockevent_microblaze_timer.shift);
clockevent_microblaze_timer.max_delta_ns =
clockevent_delta2ns((u32)~0, &clockevent_microblaze_timer);
@@ -201,7 +204,7 @@ static struct cyclecounter microblaze_cc = {
int __init init_microblaze_timecounter(void)
{
- microblaze_cc.mult = div_sc(cpuinfo.cpu_clock_freq, NSEC_PER_SEC,
+ microblaze_cc.mult = div_sc(timer_clock_freq, NSEC_PER_SEC,
microblaze_cc.shift);
timecounter_init(&microblaze_tc, &microblaze_cc, sched_clock());
@@ -221,7 +224,7 @@ static struct clocksource clocksource_microblaze = {
static int __init microblaze_clocksource_init(void)
{
clocksource_microblaze.mult =
- clocksource_hz2mult(cpuinfo.cpu_clock_freq,
+ clocksource_hz2mult(timer_clock_freq,
clocksource_microblaze.shift);
if (clocksource_register(&clocksource_microblaze))
panic("failed to register clocksource");
@@ -247,6 +250,7 @@ void __init time_init(void)
u32 irq, i = 0;
u32 timer_num = 1;
struct device_node *timer = NULL;
+ const void *prop;
#ifdef CONFIG_SELFMOD_TIMER
unsigned int timer_baseaddr = 0;
int arr_func[] = {
@@ -258,12 +262,10 @@ void __init time_init(void)
0
};
#endif
- char *timer_list[] = {
- "xlnx,xps-timer-1.00.a",
- "xlnx,opb-timer-1.00.b",
- "xlnx,opb-timer-1.00.a",
- NULL
- };
+ const char * const timer_list[] = {
+ "xlnx,xps-timer-1.00.a",
+ NULL
+ };
for (i = 0; timer_list[i] != NULL; i++) {
timer = of_find_compatible_node(NULL, NULL, timer_list[i]);
@@ -272,13 +274,13 @@ void __init time_init(void)
}
BUG_ON(!timer);
- timer_baseaddr = *(int *) of_get_property(timer, "reg", NULL);
+ timer_baseaddr = be32_to_cpup(of_get_property(timer, "reg", NULL));
timer_baseaddr = (unsigned long) ioremap(timer_baseaddr, PAGE_SIZE);
- irq = *(int *) of_get_property(timer, "interrupts", NULL);
- timer_num =
- *(int *) of_get_property(timer, "xlnx,one-timer-only", NULL);
+ irq = be32_to_cpup(of_get_property(timer, "interrupts", NULL));
+ timer_num = be32_to_cpup(of_get_property(timer,
+ "xlnx,one-timer-only", NULL));
if (timer_num) {
- printk(KERN_EMERG "Please enable two timers in HW\n");
+ eprintk(KERN_EMERG "Please enable two timers in HW\n");
BUG();
}
@@ -288,7 +290,14 @@ void __init time_init(void)
printk(KERN_INFO "%s #0 at 0x%08x, irq=%d\n",
timer_list[i], timer_baseaddr, irq);
- cpuinfo.freq_div_hz = cpuinfo.cpu_clock_freq / HZ;
+ /* If there is clock-frequency property than use it */
+ prop = of_get_property(timer, "clock-frequency", NULL);
+ if (prop)
+ timer_clock_freq = be32_to_cpup(prop);
+ else
+ timer_clock_freq = cpuinfo.cpu_clock_freq;
+
+ freq_div_hz = timer_clock_freq / HZ;
setup_irq(irq, &timer_irqaction);
#ifdef CONFIG_HEART_BEAT
diff --git a/arch/microblaze/kernel/vmlinux.lds.S b/arch/microblaze/kernel/vmlinux.lds.S
index a09f2962fbec..96a88c31fe48 100644
--- a/arch/microblaze/kernel/vmlinux.lds.S
+++ b/arch/microblaze/kernel/vmlinux.lds.S
@@ -8,7 +8,6 @@
* for more details.
*/
-OUTPUT_FORMAT("elf32-microblaze", "elf32-microblaze", "elf32-microblaze")
OUTPUT_ARCH(microblaze)
ENTRY(microblaze_start)
@@ -16,7 +15,11 @@ ENTRY(microblaze_start)
#include <asm-generic/vmlinux.lds.h>
#include <asm/thread_info.h>
+#ifdef __MICROBLAZEEL__
+jiffies = jiffies_64;
+#else
jiffies = jiffies_64 + 4;
+#endif
SECTIONS {
. = CONFIG_KERNEL_START;
diff --git a/arch/microblaze/lib/Makefile b/arch/microblaze/lib/Makefile
index 4dfe47d3cd91..f1fcbff3da25 100644
--- a/arch/microblaze/lib/Makefile
+++ b/arch/microblaze/lib/Makefile
@@ -11,3 +11,13 @@ lib-y += memcpy.o memmove.o
endif
lib-y += uaccess_old.o
+
+lib-y += ashldi3.o
+lib-y += ashrdi3.o
+lib-y += divsi3.o
+lib-y += lshrdi3.o
+lib-y += modsi3.o
+lib-y += muldi3.o
+lib-y += mulsi3.o
+lib-y += udivsi3.o
+lib-y += umodsi3.o
diff --git a/arch/microblaze/lib/ashldi3.c b/arch/microblaze/lib/ashldi3.c
new file mode 100644
index 000000000000..beb80f316095
--- /dev/null
+++ b/arch/microblaze/lib/ashldi3.c
@@ -0,0 +1,29 @@
+#include <linux/module.h>
+
+#include "libgcc.h"
+
+long long __ashldi3(long long u, word_type b)
+{
+ DWunion uu, w;
+ word_type bm;
+
+ if (b == 0)
+ return u;
+
+ uu.ll = u;
+ bm = 32 - b;
+
+ if (bm <= 0) {
+ w.s.low = 0;
+ w.s.high = (unsigned int) uu.s.low << -bm;
+ } else {
+ const unsigned int carries = (unsigned int) uu.s.low >> bm;
+
+ w.s.low = (unsigned int) uu.s.low << b;
+ w.s.high = ((unsigned int) uu.s.high << b) | carries;
+ }
+
+ return w.ll;
+}
+
+EXPORT_SYMBOL(__ashldi3);
diff --git a/arch/microblaze/lib/ashrdi3.c b/arch/microblaze/lib/ashrdi3.c
new file mode 100644
index 000000000000..c884a912b660
--- /dev/null
+++ b/arch/microblaze/lib/ashrdi3.c
@@ -0,0 +1,31 @@
+#include <linux/module.h>
+
+#include "libgcc.h"
+
+long long __ashrdi3(long long u, word_type b)
+{
+ DWunion uu, w;
+ word_type bm;
+
+ if (b == 0)
+ return u;
+
+ uu.ll = u;
+ bm = 32 - b;
+
+ if (bm <= 0) {
+ /* w.s.high = 1..1 or 0..0 */
+ w.s.high =
+ uu.s.high >> 31;
+ w.s.low = uu.s.high >> -bm;
+ } else {
+ const unsigned int carries = (unsigned int) uu.s.high << bm;
+
+ w.s.high = uu.s.high >> b;
+ w.s.low = ((unsigned int) uu.s.low >> b) | carries;
+ }
+
+ return w.ll;
+}
+
+EXPORT_SYMBOL(__ashrdi3);
diff --git a/arch/microblaze/lib/divsi3.S b/arch/microblaze/lib/divsi3.S
new file mode 100644
index 000000000000..595b02d6e86b
--- /dev/null
+++ b/arch/microblaze/lib/divsi3.S
@@ -0,0 +1,73 @@
+#include <linux/linkage.h>
+
+/*
+* Divide operation for 32 bit integers.
+* Input : Dividend in Reg r5
+* Divisor in Reg r6
+* Output: Result in Reg r3
+*/
+ .text
+ .globl __divsi3
+ .type __divsi3, @function
+ .ent __divsi3
+__divsi3:
+ .frame r1, 0, r15
+
+ addik r1, r1, -16
+ swi r28, r1, 0
+ swi r29, r1, 4
+ swi r30, r1, 8
+ swi r31, r1, 12
+
+ beqi r6, div_by_zero /* div_by_zero - division error */
+ beqi r5, result_is_zero /* result is zero */
+ bgeid r5, r5_pos
+ xor r28, r5, r6 /* get the sign of the result */
+ rsubi r5, r5, 0 /* make r5 positive */
+r5_pos:
+ bgei r6, r6_pos
+ rsubi r6, r6, 0 /* make r6 positive */
+r6_pos:
+ addik r30, r0, 0 /* clear mod */
+ addik r3, r0, 0 /* clear div */
+ addik r29, r0, 32 /* initialize the loop count */
+
+ /* first part try to find the first '1' in the r5 */
+div0:
+ blti r5, div2 /* this traps r5 == 0x80000000 */
+div1:
+ add r5, r5, r5 /* left shift logical r5 */
+ bgtid r5, div1
+ addik r29, r29, -1
+div2:
+ /* left shift logical r5 get the '1' into the carry */
+ add r5, r5, r5
+ addc r30, r30, r30 /* move that bit into the mod register */
+ rsub r31, r6, r30 /* try to subtract (r30 a r6) */
+ blti r31, mod_too_small
+ /* move the r31 to mod since the result was positive */
+ or r30, r0, r31
+ addik r3, r3, 1
+mod_too_small:
+ addik r29, r29, -1
+ beqi r29, loop_end
+ add r3, r3, r3 /* shift in the '1' into div */
+ bri div2 /* div2 */
+loop_end:
+ bgei r28, return_here
+ brid return_here
+ rsubi r3, r3, 0 /* negate the result */
+div_by_zero:
+result_is_zero:
+ or r3, r0, r0 /* set result to 0 */
+return_here:
+/* restore values of csrs and that of r3 and the divisor and the dividend */
+ lwi r28, r1, 0
+ lwi r29, r1, 4
+ lwi r30, r1, 8
+ lwi r31, r1, 12
+ rtsd r15, 8
+ addik r1, r1, 16
+
+.size __divsi3, . - __divsi3
+.end __divsi3
diff --git a/arch/microblaze/lib/libgcc.h b/arch/microblaze/lib/libgcc.h
new file mode 100644
index 000000000000..05909d58e2fe
--- /dev/null
+++ b/arch/microblaze/lib/libgcc.h
@@ -0,0 +1,25 @@
+#ifndef __ASM_LIBGCC_H
+#define __ASM_LIBGCC_H
+
+#include <asm/byteorder.h>
+
+typedef int word_type __attribute__ ((mode (__word__)));
+
+#ifdef __BIG_ENDIAN
+struct DWstruct {
+ int high, low;
+};
+#elif defined(__LITTLE_ENDIAN)
+struct DWstruct {
+ int low, high;
+};
+#else
+#error I feel sick.
+#endif
+
+typedef union {
+ struct DWstruct s;
+ long long ll;
+} DWunion;
+
+#endif /* __ASM_LIBGCC_H */
diff --git a/arch/microblaze/lib/lshrdi3.c b/arch/microblaze/lib/lshrdi3.c
new file mode 100644
index 000000000000..dcf8d6810b7c
--- /dev/null
+++ b/arch/microblaze/lib/lshrdi3.c
@@ -0,0 +1,29 @@
+#include <linux/module.h>
+
+#include "libgcc.h"
+
+long long __lshrdi3(long long u, word_type b)
+{
+ DWunion uu, w;
+ word_type bm;
+
+ if (b == 0)
+ return u;
+
+ uu.ll = u;
+ bm = 32 - b;
+
+ if (bm <= 0) {
+ w.s.high = 0;
+ w.s.low = (unsigned int) uu.s.high >> -bm;
+ } else {
+ const unsigned int carries = (unsigned int) uu.s.high << bm;
+
+ w.s.high = (unsigned int) uu.s.high >> b;
+ w.s.low = ((unsigned int) uu.s.low >> b) | carries;
+ }
+
+ return w.ll;
+}
+
+EXPORT_SYMBOL(__lshrdi3);
diff --git a/arch/microblaze/lib/memcpy.c b/arch/microblaze/lib/memcpy.c
index 014bac92bdff..cc495d7d99cc 100644
--- a/arch/microblaze/lib/memcpy.c
+++ b/arch/microblaze/lib/memcpy.c
@@ -33,17 +33,24 @@
#include <asm/system.h>
#ifdef __HAVE_ARCH_MEMCPY
+#ifndef CONFIG_OPT_LIB_FUNCTION
void *memcpy(void *v_dst, const void *v_src, __kernel_size_t c)
{
const char *src = v_src;
char *dst = v_dst;
-#ifndef CONFIG_OPT_LIB_FUNCTION
+
/* Simple, byte oriented memcpy. */
while (c--)
*dst++ = *src++;
return v_dst;
-#else
+}
+#else /* CONFIG_OPT_LIB_FUNCTION */
+void *memcpy(void *v_dst, const void *v_src, __kernel_size_t c)
+{
+ const char *src = v_src;
+ char *dst = v_dst;
+
/* The following code tries to optimize the copy by using unsigned
* alignment. This will work fine if both source and destination are
* aligned on the same boundary. However, if they are aligned on
@@ -86,7 +93,7 @@ void *memcpy(void *v_dst, const void *v_src, __kernel_size_t c)
case 0x1: /* Unaligned - Off by 1 */
/* Word align the source */
i_src = (const void *) ((unsigned)src & ~3);
-
+#ifndef __MICROBLAZEEL__
/* Load the holding buffer */
buf_hold = *i_src++ << 8;
@@ -95,7 +102,16 @@ void *memcpy(void *v_dst, const void *v_src, __kernel_size_t c)
*i_dst++ = buf_hold | value >> 24;
buf_hold = value << 8;
}
+#else
+ /* Load the holding buffer */
+ buf_hold = (*i_src++ & 0xFFFFFF00) >>8;
+ for (; c >= 4; c -= 4) {
+ value = *i_src++;
+ *i_dst++ = buf_hold | ((value & 0xFF) << 24);
+ buf_hold = (value & 0xFFFFFF00) >>8;
+ }
+#endif
/* Realign the source */
src = (const void *)i_src;
src -= 3;
@@ -103,7 +119,7 @@ void *memcpy(void *v_dst, const void *v_src, __kernel_size_t c)
case 0x2: /* Unaligned - Off by 2 */
/* Word align the source */
i_src = (const void *) ((unsigned)src & ~3);
-
+#ifndef __MICROBLAZEEL__
/* Load the holding buffer */
buf_hold = *i_src++ << 16;
@@ -112,7 +128,16 @@ void *memcpy(void *v_dst, const void *v_src, __kernel_size_t c)
*i_dst++ = buf_hold | value >> 16;
buf_hold = value << 16;
}
+#else
+ /* Load the holding buffer */
+ buf_hold = (*i_src++ & 0xFFFF0000 )>>16;
+ for (; c >= 4; c -= 4) {
+ value = *i_src++;
+ *i_dst++ = buf_hold | ((value & 0xFFFF)<<16);
+ buf_hold = (value & 0xFFFF0000) >>16;
+ }
+#endif
/* Realign the source */
src = (const void *)i_src;
src -= 2;
@@ -120,7 +145,7 @@ void *memcpy(void *v_dst, const void *v_src, __kernel_size_t c)
case 0x3: /* Unaligned - Off by 3 */
/* Word align the source */
i_src = (const void *) ((unsigned)src & ~3);
-
+#ifndef __MICROBLAZEEL__
/* Load the holding buffer */
buf_hold = *i_src++ << 24;
@@ -129,7 +154,16 @@ void *memcpy(void *v_dst, const void *v_src, __kernel_size_t c)
*i_dst++ = buf_hold | value >> 8;
buf_hold = value << 24;
}
+#else
+ /* Load the holding buffer */
+ buf_hold = (*i_src++ & 0xFF000000) >> 24;
+ for (; c >= 4; c -= 4) {
+ value = *i_src++;
+ *i_dst++ = buf_hold | ((value & 0xFFFFFF) << 8);
+ buf_hold = (value & 0xFF000000) >> 24;
+ }
+#endif
/* Realign the source */
src = (const void *)i_src;
src -= 1;
@@ -150,7 +184,7 @@ void *memcpy(void *v_dst, const void *v_src, __kernel_size_t c)
}
return v_dst;
-#endif
}
+#endif /* CONFIG_OPT_LIB_FUNCTION */
EXPORT_SYMBOL(memcpy);
#endif /* __HAVE_ARCH_MEMCPY */
diff --git a/arch/microblaze/lib/memmove.c b/arch/microblaze/lib/memmove.c
index 0929198c5e68..123e3616f2dd 100644
--- a/arch/microblaze/lib/memmove.c
+++ b/arch/microblaze/lib/memmove.c
@@ -31,16 +31,12 @@
#include <linux/string.h>
#ifdef __HAVE_ARCH_MEMMOVE
+#ifndef CONFIG_OPT_LIB_FUNCTION
void *memmove(void *v_dst, const void *v_src, __kernel_size_t c)
{
const char *src = v_src;
char *dst = v_dst;
-#ifdef CONFIG_OPT_LIB_FUNCTION
- const uint32_t *i_src;
- uint32_t *i_dst;
-#endif
-
if (!c)
return v_dst;
@@ -48,7 +44,6 @@ void *memmove(void *v_dst, const void *v_src, __kernel_size_t c)
if (v_dst <= v_src)
return memcpy(v_dst, v_src, c);
-#ifndef CONFIG_OPT_LIB_FUNCTION
/* copy backwards, from end to beginning */
src += c;
dst += c;
@@ -58,7 +53,22 @@ void *memmove(void *v_dst, const void *v_src, __kernel_size_t c)
*--dst = *--src;
return v_dst;
-#else
+}
+#else /* CONFIG_OPT_LIB_FUNCTION */
+void *memmove(void *v_dst, const void *v_src, __kernel_size_t c)
+{
+ const char *src = v_src;
+ char *dst = v_dst;
+ const uint32_t *i_src;
+ uint32_t *i_dst;
+
+ if (!c)
+ return v_dst;
+
+ /* Use memcpy when source is higher than dest */
+ if (v_dst <= v_src)
+ return memcpy(v_dst, v_src, c);
+
/* The following code tries to optimize the copy by using unsigned
* alignment. This will work fine if both source and destination are
* aligned on the same boundary. However, if they are aligned on
@@ -104,7 +114,7 @@ void *memmove(void *v_dst, const void *v_src, __kernel_size_t c)
case 0x1: /* Unaligned - Off by 1 */
/* Word align the source */
i_src = (const void *) (((unsigned)src + 4) & ~3);
-
+#ifndef __MICROBLAZEEL__
/* Load the holding buffer */
buf_hold = *--i_src >> 24;
@@ -113,7 +123,16 @@ void *memmove(void *v_dst, const void *v_src, __kernel_size_t c)
*--i_dst = buf_hold << 8 | value;
buf_hold = value >> 24;
}
+#else
+ /* Load the holding buffer */
+ buf_hold = (*--i_src & 0xFF) << 24;
+ for (; c >= 4; c -= 4) {
+ value = *--i_src;
+ *--i_dst = buf_hold | ((value & 0xFFFFFF00)>>8);
+ buf_hold = (value & 0xFF) << 24;
+ }
+#endif
/* Realign the source */
src = (const void *)i_src;
src += 1;
@@ -121,7 +140,7 @@ void *memmove(void *v_dst, const void *v_src, __kernel_size_t c)
case 0x2: /* Unaligned - Off by 2 */
/* Word align the source */
i_src = (const void *) (((unsigned)src + 4) & ~3);
-
+#ifndef __MICROBLAZEEL__
/* Load the holding buffer */
buf_hold = *--i_src >> 16;
@@ -130,7 +149,16 @@ void *memmove(void *v_dst, const void *v_src, __kernel_size_t c)
*--i_dst = buf_hold << 16 | value;
buf_hold = value >> 16;
}
+#else
+ /* Load the holding buffer */
+ buf_hold = (*--i_src & 0xFFFF) << 16;
+ for (; c >= 4; c -= 4) {
+ value = *--i_src;
+ *--i_dst = buf_hold | ((value & 0xFFFF0000)>>16);
+ buf_hold = (value & 0xFFFF) << 16;
+ }
+#endif
/* Realign the source */
src = (const void *)i_src;
src += 2;
@@ -138,7 +166,7 @@ void *memmove(void *v_dst, const void *v_src, __kernel_size_t c)
case 0x3: /* Unaligned - Off by 3 */
/* Word align the source */
i_src = (const void *) (((unsigned)src + 4) & ~3);
-
+#ifndef __MICROBLAZEEL__
/* Load the holding buffer */
buf_hold = *--i_src >> 8;
@@ -147,7 +175,16 @@ void *memmove(void *v_dst, const void *v_src, __kernel_size_t c)
*--i_dst = buf_hold << 24 | value;
buf_hold = value >> 8;
}
+#else
+ /* Load the holding buffer */
+ buf_hold = (*--i_src & 0xFFFFFF) << 8;
+ for (; c >= 4; c -= 4) {
+ value = *--i_src;
+ *--i_dst = buf_hold | ((value & 0xFF000000)>> 24);
+ buf_hold = (value & 0xFFFFFF) << 8;;
+ }
+#endif
/* Realign the source */
src = (const void *)i_src;
src += 3;
@@ -169,7 +206,7 @@ void *memmove(void *v_dst, const void *v_src, __kernel_size_t c)
*--dst = *--src;
}
return v_dst;
-#endif
}
+#endif /* CONFIG_OPT_LIB_FUNCTION */
EXPORT_SYMBOL(memmove);
#endif /* __HAVE_ARCH_MEMMOVE */
diff --git a/arch/microblaze/lib/memset.c b/arch/microblaze/lib/memset.c
index ecfb663e1fc1..834565d1607e 100644
--- a/arch/microblaze/lib/memset.c
+++ b/arch/microblaze/lib/memset.c
@@ -31,17 +31,30 @@
#include <linux/string.h>
#ifdef __HAVE_ARCH_MEMSET
+#ifndef CONFIG_OPT_LIB_FUNCTION
+void *memset(void *v_src, int c, __kernel_size_t n)
+{
+ char *src = v_src;
+
+ /* Truncate c to 8 bits */
+ c = (c & 0xFF);
+
+ /* Simple, byte oriented memset or the rest of count. */
+ while (n--)
+ *src++ = c;
+
+ return v_src;
+}
+#else /* CONFIG_OPT_LIB_FUNCTION */
void *memset(void *v_src, int c, __kernel_size_t n)
{
char *src = v_src;
-#ifdef CONFIG_OPT_LIB_FUNCTION
uint32_t *i_src;
uint32_t w32 = 0;
-#endif
+
/* Truncate c to 8 bits */
c = (c & 0xFF);
-#ifdef CONFIG_OPT_LIB_FUNCTION
if (unlikely(c)) {
/* Make a repeating word out of it */
w32 = c;
@@ -72,12 +85,13 @@ void *memset(void *v_src, int c, __kernel_size_t n)
src = (void *)i_src;
}
-#endif
+
/* Simple, byte oriented memset or the rest of count. */
while (n--)
*src++ = c;
return v_src;
}
+#endif /* CONFIG_OPT_LIB_FUNCTION */
EXPORT_SYMBOL(memset);
#endif /* __HAVE_ARCH_MEMSET */
diff --git a/arch/microblaze/lib/modsi3.S b/arch/microblaze/lib/modsi3.S
new file mode 100644
index 000000000000..84e0bee6e8c7
--- /dev/null
+++ b/arch/microblaze/lib/modsi3.S
@@ -0,0 +1,73 @@
+#include <linux/linkage.h>
+
+/*
+* modulo operation for 32 bit integers.
+* Input : op1 in Reg r5
+* op2 in Reg r6
+* Output: op1 mod op2 in Reg r3
+*/
+
+ .text
+ .globl __modsi3
+ .type __modsi3, @function
+ .ent __modsi3
+
+__modsi3:
+ .frame r1, 0, r15
+
+ addik r1, r1, -16
+ swi r28, r1, 0
+ swi r29, r1, 4
+ swi r30, r1, 8
+ swi r31, r1, 12
+
+ beqi r6, div_by_zero /* div_by_zero division error */
+ beqi r5, result_is_zero /* result is zero */
+ bgeid r5, r5_pos
+ /* get the sign of the result [ depends only on the first arg] */
+ add r28, r5, r0
+ rsubi r5, r5, 0 /* make r5 positive */
+r5_pos:
+ bgei r6, r6_pos
+ rsubi r6, r6, 0 /* make r6 positive */
+r6_pos:
+ addik r3, r0, 0 /* clear mod */
+ addik r30, r0, 0 /* clear div */
+ addik r29, r0, 32 /* initialize the loop count */
+/* first part try to find the first '1' in the r5 */
+div1:
+ add r5, r5, r5 /* left shift logical r5 */
+ bgeid r5, div1
+ addik r29, r29, -1
+div2:
+ /* left shift logical r5 get the '1' into the carry */
+ add r5, r5, r5
+ addc r3, r3, r3 /* move that bit into the mod register */
+ rsub r31, r6, r3 /* try to subtract (r30 a r6) */
+ blti r31, mod_too_small
+ /* move the r31 to mod since the result was positive */
+ or r3, r0, r31
+ addik r30, r30, 1
+mod_too_small:
+ addik r29, r29, -1
+ beqi r29, loop_end
+ add r30, r30, r30 /* shift in the '1' into div */
+ bri div2 /* div2 */
+loop_end:
+ bgei r28, return_here
+ brid return_here
+ rsubi r3, r3, 0 /* negate the result */
+div_by_zero:
+result_is_zero:
+ or r3, r0, r0 /* set result to 0 [both mod as well as div are 0] */
+return_here:
+/* restore values of csrs and that of r3 and the divisor and the dividend */
+ lwi r28, r1, 0
+ lwi r29, r1, 4
+ lwi r30, r1, 8
+ lwi r31, r1, 12
+ rtsd r15, 8
+ addik r1, r1, 16
+
+.size __modsi3, . - __modsi3
+.end __modsi3
diff --git a/arch/microblaze/lib/muldi3.S b/arch/microblaze/lib/muldi3.S
new file mode 100644
index 000000000000..ceeaa8c407f2
--- /dev/null
+++ b/arch/microblaze/lib/muldi3.S
@@ -0,0 +1,121 @@
+#include <linux/linkage.h>
+
+/*
+ * Multiply operation for 64 bit integers, for devices with hard multiply
+ * Input : Operand1[H] in Reg r5
+ * Operand1[L] in Reg r6
+ * Operand2[H] in Reg r7
+ * Operand2[L] in Reg r8
+ * Output: Result[H] in Reg r3
+ * Result[L] in Reg r4
+ *
+ * Explaination:
+ *
+ * Both the input numbers are divided into 16 bit number as follows
+ * op1 = A B C D
+ * op2 = E F G H
+ * result = D * H
+ * + (C * H + D * G) << 16
+ * + (B * H + C * G + D * F) << 32
+ * + (A * H + B * G + C * F + D * E) << 48
+ *
+ * Only 64 bits of the output are considered
+ */
+
+ .text
+ .globl __muldi3
+ .type __muldi3, @function
+ .ent __muldi3
+
+__muldi3:
+ addi r1, r1, -40
+
+/* Save the input operands on the caller's stack */
+ swi r5, r1, 44
+ swi r6, r1, 48
+ swi r7, r1, 52
+ swi r8, r1, 56
+
+/* Store all the callee saved registers */
+ sw r20, r1, r0
+ swi r21, r1, 4
+ swi r22, r1, 8
+ swi r23, r1, 12
+ swi r24, r1, 16
+ swi r25, r1, 20
+ swi r26, r1, 24
+ swi r27, r1, 28
+
+/* Load all the 16 bit values for A thru H */
+ lhui r20, r1, 44 /* A */
+ lhui r21, r1, 46 /* B */
+ lhui r22, r1, 48 /* C */
+ lhui r23, r1, 50 /* D */
+ lhui r24, r1, 52 /* E */
+ lhui r25, r1, 54 /* F */
+ lhui r26, r1, 56 /* G */
+ lhui r27, r1, 58 /* H */
+
+/* D * H ==> LSB of the result on stack ==> Store1 */
+ mul r9, r23, r27
+ swi r9, r1, 36 /* Pos2 and Pos3 */
+
+/* Hi (Store1) + C * H + D * G ==> Store2 ==> Pos1 and Pos2 */
+/* Store the carry generated in position 2 for Pos 3 */
+ lhui r11, r1, 36 /* Pos2 */
+ mul r9, r22, r27 /* C * H */
+ mul r10, r23, r26 /* D * G */
+ add r9, r9, r10
+ addc r12, r0, r0
+ add r9, r9, r11
+ addc r12, r12, r0 /* Store the Carry */
+ shi r9, r1, 36 /* Store Pos2 */
+ swi r9, r1, 32
+ lhui r11, r1, 32
+ shi r11, r1, 34 /* Store Pos1 */
+
+/* Hi (Store2) + B * H + C * G + D * F ==> Store3 ==> Pos0 and Pos1 */
+ mul r9, r21, r27 /* B * H */
+ mul r10, r22, r26 /* C * G */
+ mul r7, r23, r25 /* D * F */
+ add r9, r9, r11
+ add r9, r9, r10
+ add r9, r9, r7
+ swi r9, r1, 32 /* Pos0 and Pos1 */
+
+/* Hi (Store3) + A * H + B * G + C * F + D * E ==> Store3 ==> Pos0 */
+ lhui r11, r1, 32 /* Pos0 */
+ mul r9, r20, r27 /* A * H */
+ mul r10, r21, r26 /* B * G */
+ mul r7, r22, r25 /* C * F */
+ mul r8, r23, r24 /* D * E */
+ add r9, r9, r11
+ add r9, r9, r10
+ add r9, r9, r7
+ add r9, r9, r8
+ sext16 r9, r9 /* Sign extend the MSB */
+ shi r9, r1, 32
+
+/* Move results to r3 and r4 */
+ lhui r3, r1, 32
+ add r3, r3, r12
+ shi r3, r1, 32
+ lwi r3, r1, 32 /* Hi Part */
+ lwi r4, r1, 36 /* Lo Part */
+
+/* Restore Callee saved registers */
+ lw r20, r1, r0
+ lwi r21, r1, 4
+ lwi r22, r1, 8
+ lwi r23, r1, 12
+ lwi r24, r1, 16
+ lwi r25, r1, 20
+ lwi r26, r1, 24
+ lwi r27, r1, 28
+
+/* Restore Frame and return */
+ rtsd r15, 8
+ addi r1, r1, 40
+
+.size __muldi3, . - __muldi3
+.end __muldi3
diff --git a/arch/microblaze/lib/mulsi3.S b/arch/microblaze/lib/mulsi3.S
new file mode 100644
index 000000000000..90bd7b93afe6
--- /dev/null
+++ b/arch/microblaze/lib/mulsi3.S
@@ -0,0 +1,46 @@
+#include <linux/linkage.h>
+
+/*
+ * Multiply operation for 32 bit integers.
+ * Input : Operand1 in Reg r5
+ * Operand2 in Reg r6
+ * Output: Result [op1 * op2] in Reg r3
+ */
+ .text
+ .globl __mulsi3
+ .type __mulsi3, @function
+ .ent __mulsi3
+
+__mulsi3:
+ .frame r1, 0, r15
+ add r3, r0, r0
+ beqi r5, result_is_zero /* multiply by zero */
+ beqi r6, result_is_zero /* multiply by zero */
+ bgeid r5, r5_pos
+ xor r4, r5, r6 /* get the sign of the result */
+ rsubi r5, r5, 0 /* make r5 positive */
+r5_pos:
+ bgei r6, r6_pos
+ rsubi r6, r6, 0 /* make r6 positive */
+r6_pos:
+ bri l1
+l2:
+ add r5, r5, r5
+l1:
+ srl r6, r6
+ addc r7, r0, r0
+ beqi r7, l2
+ bneid r6, l2
+ add r3, r3, r5
+ blti r4, negateresult
+ rtsd r15, 8
+ nop
+negateresult:
+ rtsd r15, 8
+ rsub r3, r3, r0
+result_is_zero:
+ rtsd r15, 8
+ addi r3, r0, 0
+
+.size __mulsi3, . - __mulsi3
+.end __mulsi3
diff --git a/arch/microblaze/lib/udivsi3.S b/arch/microblaze/lib/udivsi3.S
new file mode 100644
index 000000000000..64cf57e4bb85
--- /dev/null
+++ b/arch/microblaze/lib/udivsi3.S
@@ -0,0 +1,84 @@
+#include <linux/linkage.h>
+
+/*
+* Unsigned divide operation.
+* Input : Divisor in Reg r5
+* Dividend in Reg r6
+* Output: Result in Reg r3
+*/
+
+ .text
+ .globl __udivsi3
+ .type __udivsi3, @function
+ .ent __udivsi3
+
+__udivsi3:
+
+ .frame r1, 0, r15
+
+ addik r1, r1, -12
+ swi r29, r1, 0
+ swi r30, r1, 4
+ swi r31, r1, 8
+
+ beqi r6, div_by_zero /* div_by_zero /* division error */
+ beqid r5, result_is_zero /* result is zero */
+ addik r30, r0, 0 /* clear mod */
+ addik r29, r0, 32 /* initialize the loop count */
+
+/* check if r6 and r5 are equal - if yes, return 1 */
+ rsub r18, r5, r6
+ beqid r18, return_here
+ addik r3, r0, 1
+
+/* check if (uns)r6 is greater than (uns)r5. in that case, just return 0 */
+ xor r18, r5, r6
+ bgeid r18, 16
+ add r3, r0, r0 /* we would anyways clear r3 */
+ blti r6, return_here /* r6[bit 31 = 1] hence is greater */
+ bri checkr6
+ rsub r18, r6, r5 /* microblazecmp */
+ blti r18, return_here
+
+/* if r6 [bit 31] is set, then return result as 1 */
+checkr6:
+ bgti r6, div0
+ brid return_here
+ addik r3, r0, 1
+
+/* first part try to find the first '1' in the r5 */
+div0:
+ blti r5, div2
+div1:
+ add r5, r5, r5 /* left shift logical r5 */
+ bgtid r5, div1
+ addik r29, r29, -1
+div2:
+/* left shift logical r5 get the '1' into the carry */
+ add r5, r5, r5
+ addc r30, r30, r30 /* move that bit into the mod register */
+ rsub r31, r6, r30 /* try to subtract (r30 a r6) */
+ blti r31, mod_too_small
+/* move the r31 to mod since the result was positive */
+ or r30, r0, r31
+ addik r3, r3, 1
+mod_too_small:
+ addik r29, r29, -1
+ beqi r29, loop_end
+ add r3, r3, r3 /* shift in the '1' into div */
+ bri div2 /* div2 */
+loop_end:
+ bri return_here
+div_by_zero:
+result_is_zero:
+ or r3, r0, r0 /* set result to 0 */
+return_here:
+/* restore values of csrs and that of r3 and the divisor and the dividend */
+ lwi r29, r1, 0
+ lwi r30, r1, 4
+ lwi r31, r1, 8
+ rtsd r15, 8
+ addik r1, r1, 12
+
+.size __udivsi3, . - __udivsi3
+.end __udivsi3
diff --git a/arch/microblaze/lib/umodsi3.S b/arch/microblaze/lib/umodsi3.S
new file mode 100644
index 000000000000..17d16bafae58
--- /dev/null
+++ b/arch/microblaze/lib/umodsi3.S
@@ -0,0 +1,86 @@
+#include <linux/linkage.h>
+
+/*
+ * Unsigned modulo operation for 32 bit integers.
+ * Input : op1 in Reg r5
+ * op2 in Reg r6
+ * Output: op1 mod op2 in Reg r3
+ */
+
+ .text
+ .globl __umodsi3
+ .type __umodsi3, @function
+ .ent __umodsi3
+
+__umodsi3:
+ .frame r1, 0, r15
+
+ addik r1, r1, -12
+ swi r29, r1, 0
+ swi r30, r1, 4
+ swi r31, r1, 8
+
+ beqi r6, div_by_zero /* div_by_zero - division error */
+ beqid r5, result_is_zero /* result is zero */
+ addik r3, r0, 0 /* clear div */
+ addik r30, r0, 0 /* clear mod */
+ addik r29, r0, 32 /* initialize the loop count */
+
+/* check if r6 and r5 are equal /* if yes, return 0 */
+ rsub r18, r5, r6
+ beqi r18, return_here
+
+/* check if (uns)r6 is greater than (uns)r5. in that case, just return r5 */
+ xor r18, r5, r6
+ bgeid r18, 16
+ addik r3, r5, 0
+ blti r6, return_here
+ bri $lcheckr6
+ rsub r18, r5, r6 /* microblazecmp */
+ bgti r18, return_here
+
+/* if r6 [bit 31] is set, then return result as r5-r6 */
+$lcheckr6:
+ bgtid r6, div0
+ addik r3, r0, 0
+ addik r18, r0, 0x7fffffff
+ and r5, r5, r18
+ and r6, r6, r18
+ brid return_here
+ rsub r3, r6, r5
+/* first part: try to find the first '1' in the r5 */
+div0:
+ blti r5, div2
+div1:
+ add r5, r5, r5 /* left shift logical r5 */
+ bgeid r5, div1
+ addik r29, r29, -1
+div2:
+ /* left shift logical r5 get the '1' into the carry */
+ add r5, r5, r5
+ addc r3, r3, r3 /* move that bit into the mod register */
+ rsub r31, r6, r3 /* try to subtract (r3 a r6) */
+ blti r31, mod_too_small
+ /* move the r31 to mod since the result was positive */
+ or r3, r0, r31
+ addik r30, r30, 1
+mod_too_small:
+ addik r29, r29, -1
+ beqi r29, loop_end
+ add r30, r30, r30 /* shift in the '1' into div */
+ bri div2 /* div2 */
+loop_end:
+ bri return_here
+div_by_zero:
+result_is_zero:
+ or r3, r0, r0 /* set result to 0 */
+return_here:
+/* restore values of csrs and that of r3 and the divisor and the dividend */
+ lwi r29, r1, 0
+ lwi r30, r1, 4
+ lwi r31, r1, 8
+ rtsd r15, 8
+ addik r1, r1, 12
+
+.size __umodsi3, . - __umodsi3
+.end __umodsi3
diff --git a/arch/microblaze/pci/pci-common.c b/arch/microblaze/pci/pci-common.c
index 55ef532f32be..e363615d6798 100644
--- a/arch/microblaze/pci/pci-common.c
+++ b/arch/microblaze/pci/pci-common.c
@@ -60,21 +60,6 @@ struct dma_map_ops *get_pci_dma_ops(void)
}
EXPORT_SYMBOL(get_pci_dma_ops);
-int pci_set_dma_mask(struct pci_dev *dev, u64 mask)
-{
- return dma_set_mask(&dev->dev, mask);
-}
-
-int pci_set_consistent_dma_mask(struct pci_dev *dev, u64 mask)
-{
- int rc;
-
- rc = dma_set_mask(&dev->dev, mask);
- dev->dev.coherent_dma_mask = dev->dma_mask;
-
- return rc;
-}
-
struct pci_controller *pcibios_alloc_controller(struct device_node *dev)
{
struct pci_controller *phb;
@@ -1075,8 +1060,6 @@ void __devinit pcibios_setup_bus_devices(struct pci_bus *bus)
bus->number, bus->self ? pci_name(bus->self) : "PHB");
list_for_each_entry(dev, &bus->devices, bus_list) {
- struct dev_archdata *sd = &dev->dev.archdata;
-
/* Setup OF node pointer in archdata */
dev->dev.of_node = pci_device_to_OF_node(dev);
@@ -1086,8 +1069,8 @@ void __devinit pcibios_setup_bus_devices(struct pci_bus *bus)
set_dev_node(&dev->dev, pcibus_to_node(dev->bus));
/* Hook up default DMA ops */
- sd->dma_ops = pci_dma_ops;
- sd->dma_data = (void *)PCI_DRAM_OFFSET;
+ set_dma_ops(&dev->dev, pci_dma_ops);
+ dev->dev.archdata.dma_data = (void *)PCI_DRAM_OFFSET;
/* Read default IRQs and fixup if necessary */
pci_read_irq_line(dev);
diff --git a/arch/microblaze/platform/generic/system.dts b/arch/microblaze/platform/generic/system.dts
index 2d5c41767cd0..3f85df2b73b3 100644
--- a/arch/microblaze/platform/generic/system.dts
+++ b/arch/microblaze/platform/generic/system.dts
@@ -85,6 +85,7 @@
xlnx,dynamic-bus-sizing = <0x1>;
xlnx,edge-is-positive = <0x1>;
xlnx,family = "virtex5";
+ xlnx,endianness = <0x1>;
xlnx,fpu-exception = <0x1>;
xlnx,fsl-data-size = <0x20>;
xlnx,fsl-exception = <0x0>;
@@ -218,6 +219,7 @@
#address-cells = <1>;
#size-cells = <1>;
compatible = "xlnx,compound";
+ ranges ;
ethernet@81c00000 {
compatible = "xlnx,xps-ll-temac-1.01.b", "xlnx,xps-ll-temac-1.00.a";
device_type = "network";
@@ -332,6 +334,7 @@
#address-cells = <1>;
#size-cells = <1>;
compatible = "xlnx,mpmc-4.02.a";
+ ranges ;
PIM3: sdma@84600180 {
compatible = "xlnx,ll-dma-1.00.a";
interrupt-parent = <&xps_intc_0>;
diff --git a/arch/microblaze/platform/platform.c b/arch/microblaze/platform/platform.c
index 5b89b58c5aed..b9529caa507a 100644
--- a/arch/microblaze/platform/platform.c
+++ b/arch/microblaze/platform/platform.c
@@ -17,9 +17,6 @@
static struct of_device_id xilinx_of_bus_ids[] __initdata = {
{ .compatible = "simple-bus", },
- { .compatible = "xlnx,plb-v46-1.00.a", },
- { .compatible = "xlnx,opb-v20-1.10.c", },
- { .compatible = "xlnx,opb-v20-1.10.b", },
{ .compatible = "xlnx,compound", },
{}
};
diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
index 784cf822963a..67a2fa2caa49 100644
--- a/arch/mips/Kconfig
+++ b/arch/mips/Kconfig
@@ -4,18 +4,21 @@ config MIPS
select HAVE_GENERIC_DMA_COHERENT
select HAVE_IDE
select HAVE_OPROFILE
+ select HAVE_PERF_EVENTS
+ select PERF_USE_VMALLOC
select HAVE_ARCH_KGDB
select HAVE_FUNCTION_TRACER
select HAVE_FUNCTION_TRACE_MCOUNT_TEST
select HAVE_DYNAMIC_FTRACE
select HAVE_FTRACE_MCOUNT_RECORD
+ select HAVE_C_RECORDMCOUNT
select HAVE_FUNCTION_GRAPH_TRACER
select HAVE_KPROBES
select HAVE_KRETPROBES
select RTC_LIB if !MACH_LOONGSON
select GENERIC_ATOMIC64 if !64BIT
-
-mainmenu "Linux/MIPS Kernel Configuration"
+ select HAVE_DMA_ATTRS
+ select HAVE_DMA_API_DEBUG
menu "Machine selection"
@@ -693,6 +696,9 @@ config CAVIUM_OCTEON_REFERENCE_BOARD
select SWAP_IO_SPACE
select HW_HAS_PCI
select ARCH_SUPPORTS_MSI
+ select ZONE_DMA32
+ select USB_ARCH_HAS_OHCI
+ select USB_ARCH_HAS_EHCI
help
This option supports all of the Octeon reference boards from Cavium
Networks. It builds a kernel that dynamically determines the Octeon
@@ -1336,6 +1342,57 @@ config CPU_CAVIUM_OCTEON
can have up to 16 Mips64v2 cores and 8 integrated gigabit ethernets.
Full details can be found at http://www.caviumnetworks.com.
+config CPU_BMIPS3300
+ bool "BMIPS3300"
+ depends on SYS_HAS_CPU_BMIPS3300
+ select DMA_NONCOHERENT
+ select IRQ_CPU
+ select SWAP_IO_SPACE
+ select SYS_SUPPORTS_32BIT_KERNEL
+ select WEAK_ORDERING
+ help
+ Broadcom BMIPS3300 processors.
+
+config CPU_BMIPS4350
+ bool "BMIPS4350"
+ depends on SYS_HAS_CPU_BMIPS4350
+ select CPU_SUPPORTS_32BIT_KERNEL
+ select DMA_NONCOHERENT
+ select IRQ_CPU
+ select SWAP_IO_SPACE
+ select SYS_SUPPORTS_SMP
+ select SYS_SUPPORTS_HOTPLUG_CPU
+ select WEAK_ORDERING
+ help
+ Broadcom BMIPS4350 ("VIPER") processors.
+
+config CPU_BMIPS4380
+ bool "BMIPS4380"
+ depends on SYS_HAS_CPU_BMIPS4380
+ select CPU_SUPPORTS_32BIT_KERNEL
+ select DMA_NONCOHERENT
+ select IRQ_CPU
+ select SWAP_IO_SPACE
+ select SYS_SUPPORTS_SMP
+ select SYS_SUPPORTS_HOTPLUG_CPU
+ select WEAK_ORDERING
+ help
+ Broadcom BMIPS4380 processors.
+
+config CPU_BMIPS5000
+ bool "BMIPS5000"
+ depends on SYS_HAS_CPU_BMIPS5000
+ select CPU_SUPPORTS_32BIT_KERNEL
+ select CPU_SUPPORTS_HIGHMEM
+ select DMA_NONCOHERENT
+ select IRQ_CPU
+ select SWAP_IO_SPACE
+ select SYS_SUPPORTS_SMP
+ select SYS_SUPPORTS_HOTPLUG_CPU
+ select WEAK_ORDERING
+ help
+ Broadcom BMIPS5000 processors.
+
endchoice
if CPU_LOONGSON2F
@@ -1454,6 +1511,18 @@ config SYS_HAS_CPU_SB1
config SYS_HAS_CPU_CAVIUM_OCTEON
bool
+config SYS_HAS_CPU_BMIPS3300
+ bool
+
+config SYS_HAS_CPU_BMIPS4350
+ bool
+
+config SYS_HAS_CPU_BMIPS4380
+ bool
+
+config SYS_HAS_CPU_BMIPS5000
+ bool
+
#
# CPU may reorder R->R, R->W, W->R, W->W
# Reordering beyond LL and SC is handled in WEAK_REORDERING_BEYOND_LLSC
@@ -1930,6 +1999,14 @@ config NODES_SHIFT
default "6"
depends on NEED_MULTIPLE_NODES
+config HW_PERF_EVENTS
+ bool "Enable hardware performance counter support for perf events"
+ depends on PERF_EVENTS && !MIPS_MT_SMTC && OPROFILE=n && CPU_MIPS32
+ default y
+ help
+ Enable hardware performance counter support for perf events. If
+ disabled, perf events will use software events only.
+
source "mm/Kconfig"
config SMP
@@ -2128,6 +2205,13 @@ config SECCOMP
If unsure, say Y. Only embedded should say N here.
+config USE_OF
+ bool "Flattened Device Tree support"
+ select OF
+ select OF_FLATTREE
+ help
+ Include support for flattened device tree machine descriptions.
+
endmenu
config LOCKDEP_SUPPORT
diff --git a/arch/mips/Kconfig.debug b/arch/mips/Kconfig.debug
index 43dc27997730..f437cd1fafb8 100644
--- a/arch/mips/Kconfig.debug
+++ b/arch/mips/Kconfig.debug
@@ -67,6 +67,15 @@ config CMDLINE_OVERRIDE
Normally, you will choose 'N' here.
+config DEBUG_STACKOVERFLOW
+ bool "Check for stack overflows"
+ depends on DEBUG_KERNEL
+ help
+ This option will cause messages to be printed if free stack space
+ drops below a certain limit(2GB on MIPS). The debugging option
+ provides another way to check stack overflow happened on kernel mode
+ stack usually caused by nested interruption.
+
config DEBUG_STACK_USAGE
bool "Enable stack utilization instrumentation"
depends on DEBUG_KERNEL
diff --git a/arch/mips/Makefile b/arch/mips/Makefile
index f4a4b663ebb3..7c1102e41fe2 100644
--- a/arch/mips/Makefile
+++ b/arch/mips/Makefile
@@ -48,9 +48,6 @@ ifneq ($(SUBARCH),$(ARCH))
endif
endif
-ifndef CONFIG_FUNCTION_TRACER
-cflags-y := -ffunction-sections
-endif
ifdef CONFIG_FUNCTION_GRAPH_TRACER
ifndef KBUILD_MCOUNT_RA_ADDRESS
ifeq ($(call cc-option-yn,-mmcount-ra-address), y)
@@ -159,6 +156,7 @@ cflags-$(CONFIG_CPU_CAVIUM_OCTEON) += $(call cc-option,-march=octeon) -Wa,--trap
ifeq (,$(findstring march=octeon, $(cflags-$(CONFIG_CPU_CAVIUM_OCTEON))))
cflags-$(CONFIG_CPU_CAVIUM_OCTEON) += -Wa,-march=octeon
endif
+cflags-$(CONFIG_CAVIUM_CN63XXP1) += -Wa,-mfix-cn63xxp1
cflags-$(CONFIG_CPU_R4000_WORKAROUNDS) += $(call cc-option,-mfix-r4000,)
cflags-$(CONFIG_CPU_R4400_WORKAROUNDS) += $(call cc-option,-mfix-r4400,)
diff --git a/arch/mips/alchemy/devboards/db1200/platform.c b/arch/mips/alchemy/devboards/db1200/platform.c
index 3fa34c3abc04..fbb55935b99e 100644
--- a/arch/mips/alchemy/devboards/db1200/platform.c
+++ b/arch/mips/alchemy/devboards/db1200/platform.c
@@ -429,6 +429,11 @@ static struct platform_device db1200_audio_dev = {
.resource = au1200_psc1_res,
};
+static struct platform_device db1200_stac_dev = {
+ .name = "ac97-codec",
+ .id = 1, /* on PSC1 */
+};
+
static struct platform_device *db1200_devs[] __initdata = {
NULL, /* PSC0, selected by S6.8 */
&db1200_ide_dev,
@@ -436,6 +441,7 @@ static struct platform_device *db1200_devs[] __initdata = {
&db1200_rtc_dev,
&db1200_nand_dev,
&db1200_audio_dev,
+ &db1200_stac_dev,
};
static int __init db1200_dev_init(void)
diff --git a/arch/mips/ar7/gpio.c b/arch/mips/ar7/gpio.c
index c32fbb57441a..425dfa5d6e12 100644
--- a/arch/mips/ar7/gpio.c
+++ b/arch/mips/ar7/gpio.c
@@ -1,7 +1,7 @@
/*
* Copyright (C) 2007 Felix Fietkau <nbd@openwrt.org>
* Copyright (C) 2007 Eugene Konev <ejka@openwrt.org>
- * Copyright (C) 2009 Florian Fainelli <florian@openwrt.org>
+ * Copyright (C) 2009-2010 Florian Fainelli <florian@openwrt.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -37,6 +37,16 @@ static int ar7_gpio_get_value(struct gpio_chip *chip, unsigned gpio)
return readl(gpio_in) & (1 << gpio);
}
+static int titan_gpio_get_value(struct gpio_chip *chip, unsigned gpio)
+{
+ struct ar7_gpio_chip *gpch =
+ container_of(chip, struct ar7_gpio_chip, chip);
+ void __iomem *gpio_in0 = gpch->regs + TITAN_GPIO_INPUT_0;
+ void __iomem *gpio_in1 = gpch->regs + TITAN_GPIO_INPUT_1;
+
+ return readl(gpio >> 5 ? gpio_in1 : gpio_in0) & (1 << (gpio & 0x1f));
+}
+
static void ar7_gpio_set_value(struct gpio_chip *chip,
unsigned gpio, int value)
{
@@ -51,6 +61,21 @@ static void ar7_gpio_set_value(struct gpio_chip *chip,
writel(tmp, gpio_out);
}
+static void titan_gpio_set_value(struct gpio_chip *chip,
+ unsigned gpio, int value)
+{
+ struct ar7_gpio_chip *gpch =
+ container_of(chip, struct ar7_gpio_chip, chip);
+ void __iomem *gpio_out0 = gpch->regs + TITAN_GPIO_OUTPUT_0;
+ void __iomem *gpio_out1 = gpch->regs + TITAN_GPIO_OUTPUT_1;
+ unsigned tmp;
+
+ tmp = readl(gpio >> 5 ? gpio_out1 : gpio_out0) & ~(1 << (gpio & 0x1f));
+ if (value)
+ tmp |= 1 << (gpio & 0x1f);
+ writel(tmp, gpio >> 5 ? gpio_out1 : gpio_out0);
+}
+
static int ar7_gpio_direction_input(struct gpio_chip *chip, unsigned gpio)
{
struct ar7_gpio_chip *gpch =
@@ -62,6 +87,21 @@ static int ar7_gpio_direction_input(struct gpio_chip *chip, unsigned gpio)
return 0;
}
+static int titan_gpio_direction_input(struct gpio_chip *chip, unsigned gpio)
+{
+ struct ar7_gpio_chip *gpch =
+ container_of(chip, struct ar7_gpio_chip, chip);
+ void __iomem *gpio_dir0 = gpch->regs + TITAN_GPIO_DIR_0;
+ void __iomem *gpio_dir1 = gpch->regs + TITAN_GPIO_DIR_1;
+
+ if (gpio >= TITAN_GPIO_MAX)
+ return -EINVAL;
+
+ writel(readl(gpio >> 5 ? gpio_dir1 : gpio_dir0) | (1 << (gpio & 0x1f)),
+ gpio >> 5 ? gpio_dir1 : gpio_dir0);
+ return 0;
+}
+
static int ar7_gpio_direction_output(struct gpio_chip *chip,
unsigned gpio, int value)
{
@@ -75,6 +115,24 @@ static int ar7_gpio_direction_output(struct gpio_chip *chip,
return 0;
}
+static int titan_gpio_direction_output(struct gpio_chip *chip,
+ unsigned gpio, int value)
+{
+ struct ar7_gpio_chip *gpch =
+ container_of(chip, struct ar7_gpio_chip, chip);
+ void __iomem *gpio_dir0 = gpch->regs + TITAN_GPIO_DIR_0;
+ void __iomem *gpio_dir1 = gpch->regs + TITAN_GPIO_DIR_1;
+
+ if (gpio >= TITAN_GPIO_MAX)
+ return -EINVAL;
+
+ titan_gpio_set_value(chip, gpio, value);
+ writel(readl(gpio >> 5 ? gpio_dir1 : gpio_dir0) & ~(1 <<
+ (gpio & 0x1f)), gpio >> 5 ? gpio_dir1 : gpio_dir0);
+
+ return 0;
+}
+
static struct ar7_gpio_chip ar7_gpio_chip = {
.chip = {
.label = "ar7-gpio",
@@ -87,7 +145,19 @@ static struct ar7_gpio_chip ar7_gpio_chip = {
}
};
-int ar7_gpio_enable(unsigned gpio)
+static struct ar7_gpio_chip titan_gpio_chip = {
+ .chip = {
+ .label = "titan-gpio",
+ .direction_input = titan_gpio_direction_input,
+ .direction_output = titan_gpio_direction_output,
+ .set = titan_gpio_set_value,
+ .get = titan_gpio_get_value,
+ .base = 0,
+ .ngpio = TITAN_GPIO_MAX,
+ }
+};
+
+static inline int ar7_gpio_enable_ar7(unsigned gpio)
{
void __iomem *gpio_en = ar7_gpio_chip.regs + AR7_GPIO_ENABLE;
@@ -95,9 +165,26 @@ int ar7_gpio_enable(unsigned gpio)
return 0;
}
+
+static inline int ar7_gpio_enable_titan(unsigned gpio)
+{
+ void __iomem *gpio_en0 = titan_gpio_chip.regs + TITAN_GPIO_ENBL_0;
+ void __iomem *gpio_en1 = titan_gpio_chip.regs + TITAN_GPIO_ENBL_1;
+
+ writel(readl(gpio >> 5 ? gpio_en1 : gpio_en0) | (1 << (gpio & 0x1f)),
+ gpio >> 5 ? gpio_en1 : gpio_en0);
+
+ return 0;
+}
+
+int ar7_gpio_enable(unsigned gpio)
+{
+ return ar7_is_titan() ? ar7_gpio_enable_titan(gpio) :
+ ar7_gpio_enable_ar7(gpio);
+}
EXPORT_SYMBOL(ar7_gpio_enable);
-int ar7_gpio_disable(unsigned gpio)
+static inline int ar7_gpio_disable_ar7(unsigned gpio)
{
void __iomem *gpio_en = ar7_gpio_chip.regs + AR7_GPIO_ENABLE;
@@ -105,27 +192,159 @@ int ar7_gpio_disable(unsigned gpio)
return 0;
}
+
+static inline int ar7_gpio_disable_titan(unsigned gpio)
+{
+ void __iomem *gpio_en0 = titan_gpio_chip.regs + TITAN_GPIO_ENBL_0;
+ void __iomem *gpio_en1 = titan_gpio_chip.regs + TITAN_GPIO_ENBL_1;
+
+ writel(readl(gpio >> 5 ? gpio_en1 : gpio_en0) & ~(1 << (gpio & 0x1f)),
+ gpio >> 5 ? gpio_en1 : gpio_en0);
+
+ return 0;
+}
+
+int ar7_gpio_disable(unsigned gpio)
+{
+ return ar7_is_titan() ? ar7_gpio_disable_titan(gpio) :
+ ar7_gpio_disable_ar7(gpio);
+}
EXPORT_SYMBOL(ar7_gpio_disable);
-static int __init ar7_gpio_init(void)
+struct titan_gpio_cfg {
+ u32 reg;
+ u32 shift;
+ u32 func;
+};
+
+static struct titan_gpio_cfg titan_gpio_table[] = {
+ /* reg, start bit, mux value */
+ {4, 24, 1},
+ {4, 26, 1},
+ {4, 28, 1},
+ {4, 30, 1},
+ {5, 6, 1},
+ {5, 8, 1},
+ {5, 10, 1},
+ {5, 12, 1},
+ {7, 14, 3},
+ {7, 16, 3},
+ {7, 18, 3},
+ {7, 20, 3},
+ {7, 22, 3},
+ {7, 26, 3},
+ {7, 28, 3},
+ {7, 30, 3},
+ {8, 0, 3},
+ {8, 2, 3},
+ {8, 4, 3},
+ {8, 10, 3},
+ {8, 14, 3},
+ {8, 16, 3},
+ {8, 18, 3},
+ {8, 20, 3},
+ {9, 8, 3},
+ {9, 10, 3},
+ {9, 12, 3},
+ {9, 14, 3},
+ {9, 18, 3},
+ {9, 20, 3},
+ {9, 24, 3},
+ {9, 26, 3},
+ {9, 28, 3},
+ {9, 30, 3},
+ {10, 0, 3},
+ {10, 2, 3},
+ {10, 8, 3},
+ {10, 10, 3},
+ {10, 12, 3},
+ {10, 14, 3},
+ {13, 12, 3},
+ {13, 14, 3},
+ {13, 16, 3},
+ {13, 18, 3},
+ {13, 24, 3},
+ {13, 26, 3},
+ {13, 28, 3},
+ {13, 30, 3},
+ {14, 2, 3},
+ {14, 6, 3},
+ {14, 8, 3},
+ {14, 12, 3}
+};
+
+static int titan_gpio_pinsel(unsigned gpio)
+{
+ struct titan_gpio_cfg gpio_cfg;
+ u32 mux_status, pin_sel_reg, tmp;
+ void __iomem *pin_sel = (void __iomem *)KSEG1ADDR(AR7_REGS_PINSEL);
+
+ if (gpio >= ARRAY_SIZE(titan_gpio_table))
+ return -EINVAL;
+
+ gpio_cfg = titan_gpio_table[gpio];
+ pin_sel_reg = gpio_cfg.reg - 1;
+
+ mux_status = (readl(pin_sel + pin_sel_reg) >> gpio_cfg.shift) & 0x3;
+
+ /* Check the mux status */
+ if (!((mux_status == 0) || (mux_status == gpio_cfg.func)))
+ return 0;
+
+ /* Set the pin sel value */
+ tmp = readl(pin_sel + pin_sel_reg);
+ tmp |= ((gpio_cfg.func & 0x3) << gpio_cfg.shift);
+ writel(tmp, pin_sel + pin_sel_reg);
+
+ return 0;
+}
+
+/* Perform minimal Titan GPIO configuration */
+static void titan_gpio_init(void)
+{
+ unsigned i;
+
+ for (i = 44; i < 48; i++) {
+ titan_gpio_pinsel(i);
+ ar7_gpio_enable_titan(i);
+ titan_gpio_direction_input(&titan_gpio_chip.chip, i);
+ }
+}
+
+int __init ar7_gpio_init(void)
{
int ret;
+ struct ar7_gpio_chip *gpch;
+ unsigned size;
+
+ if (!ar7_is_titan()) {
+ gpch = &ar7_gpio_chip;
+ size = 0x10;
+ } else {
+ gpch = &titan_gpio_chip;
+ size = 0x1f;
+ }
- ar7_gpio_chip.regs = ioremap_nocache(AR7_REGS_GPIO,
+ gpch->regs = ioremap_nocache(AR7_REGS_GPIO,
AR7_REGS_GPIO + 0x10);
- if (!ar7_gpio_chip.regs) {
- printk(KERN_ERR "ar7-gpio: failed to ioremap regs\n");
+ if (!gpch->regs) {
+ printk(KERN_ERR "%s: failed to ioremap regs\n",
+ gpch->chip.label);
return -ENOMEM;
}
- ret = gpiochip_add(&ar7_gpio_chip.chip);
+ ret = gpiochip_add(&gpch->chip);
if (ret) {
- printk(KERN_ERR "ar7-gpio: failed to add gpiochip\n");
+ printk(KERN_ERR "%s: failed to add gpiochip\n",
+ gpch->chip.label);
return ret;
}
- printk(KERN_INFO "ar7-gpio: registered %d GPIOs\n",
- ar7_gpio_chip.chip.ngpio);
+ printk(KERN_INFO "%s: registered %d GPIOs\n",
+ gpch->chip.label, gpch->chip.ngpio);
+
+ if (ar7_is_titan())
+ titan_gpio_init();
+
return ret;
}
-arch_initcall(ar7_gpio_init);
diff --git a/arch/mips/ar7/platform.c b/arch/mips/ar7/platform.c
index 0da5b2b8dd88..7d2fab392327 100644
--- a/arch/mips/ar7/platform.c
+++ b/arch/mips/ar7/platform.c
@@ -357,6 +357,11 @@ static struct gpio_led default_leds[] = {
},
};
+static struct gpio_led titan_leds[] = {
+ { .name = "status", .gpio = 8, .active_low = 1, },
+ { .name = "wifi", .gpio = 13, .active_low = 1, },
+};
+
static struct gpio_led dsl502t_leds[] = {
{
.name = "status",
@@ -495,6 +500,9 @@ static void __init detect_leds(void)
} else if (strstr(prid, "DG834")) {
ar7_led_data.num_leds = ARRAY_SIZE(dg834g_leds);
ar7_led_data.leds = dg834g_leds;
+ } else if (strstr(prid, "CYWM") || strstr(prid, "CYWL")) {
+ ar7_led_data.num_leds = ARRAY_SIZE(titan_leds);
+ ar7_led_data.leds = titan_leds;
}
}
@@ -560,6 +568,51 @@ static int __init ar7_register_uarts(void)
return 0;
}
+static void __init titan_fixup_devices(void)
+{
+ /* Set vlynq0 data */
+ vlynq_low_data.reset_bit = 15;
+ vlynq_low_data.gpio_bit = 14;
+
+ /* Set vlynq1 data */
+ vlynq_high_data.reset_bit = 16;
+ vlynq_high_data.gpio_bit = 7;
+
+ /* Set vlynq0 resources */
+ vlynq_low_res[0].start = TITAN_REGS_VLYNQ0;
+ vlynq_low_res[0].end = TITAN_REGS_VLYNQ0 + 0xff;
+ vlynq_low_res[1].start = 33;
+ vlynq_low_res[1].end = 33;
+ vlynq_low_res[2].start = 0x0c000000;
+ vlynq_low_res[2].end = 0x0fffffff;
+ vlynq_low_res[3].start = 80;
+ vlynq_low_res[3].end = 111;
+
+ /* Set vlynq1 resources */
+ vlynq_high_res[0].start = TITAN_REGS_VLYNQ1;
+ vlynq_high_res[0].end = TITAN_REGS_VLYNQ1 + 0xff;
+ vlynq_high_res[1].start = 34;
+ vlynq_high_res[1].end = 34;
+ vlynq_high_res[2].start = 0x40000000;
+ vlynq_high_res[2].end = 0x43ffffff;
+ vlynq_high_res[3].start = 112;
+ vlynq_high_res[3].end = 143;
+
+ /* Set cpmac0 data */
+ cpmac_low_data.phy_mask = 0x40000000;
+
+ /* Set cpmac1 data */
+ cpmac_high_data.phy_mask = 0x80000000;
+
+ /* Set cpmac0 resources */
+ cpmac_low_res[0].start = TITAN_REGS_MAC0;
+ cpmac_low_res[0].end = TITAN_REGS_MAC0 + 0x7ff;
+
+ /* Set cpmac1 resources */
+ cpmac_high_res[0].start = TITAN_REGS_MAC1;
+ cpmac_high_res[0].end = TITAN_REGS_MAC1 + 0x7ff;
+}
+
static int __init ar7_register_devices(void)
{
void __iomem *bootcr;
@@ -574,6 +627,9 @@ static int __init ar7_register_devices(void)
if (res)
pr_warning("unable to register physmap-flash: %d\n", res);
+ if (ar7_is_titan())
+ titan_fixup_devices();
+
ar7_device_disable(vlynq_low_data.reset_bit);
res = platform_device_register(&vlynq_low);
if (res)
diff --git a/arch/mips/ar7/prom.c b/arch/mips/ar7/prom.c
index 52385790e5c1..23818d299127 100644
--- a/arch/mips/ar7/prom.c
+++ b/arch/mips/ar7/prom.c
@@ -246,6 +246,8 @@ void __init prom_init(void)
ar7_init_cmdline(fw_arg0, (char **)fw_arg1);
ar7_init_env((struct env_var *)fw_arg2);
console_config();
+
+ ar7_gpio_init();
}
#define PORT(offset) (KSEG1ADDR(AR7_REGS_UART0 + (offset * 4)))
diff --git a/arch/mips/ar7/setup.c b/arch/mips/ar7/setup.c
index 3a801d2cb6e5..f20b53e597c4 100644
--- a/arch/mips/ar7/setup.c
+++ b/arch/mips/ar7/setup.c
@@ -23,6 +23,7 @@
#include <asm/reboot.h>
#include <asm/mach-ar7/ar7.h>
#include <asm/mach-ar7/prom.h>
+#include <asm/mach-ar7/gpio.h>
static void ar7_machine_restart(char *command)
{
@@ -49,6 +50,8 @@ static void ar7_machine_power_off(void)
const char *get_system_type(void)
{
u16 chip_id = ar7_chip_id();
+ u16 titan_variant_id = titan_chip_id();
+
switch (chip_id) {
case AR7_CHIP_7100:
return "TI AR7 (TNETD7100)";
@@ -56,6 +59,17 @@ const char *get_system_type(void)
return "TI AR7 (TNETD7200)";
case AR7_CHIP_7300:
return "TI AR7 (TNETD7300)";
+ case AR7_CHIP_TITAN:
+ switch (titan_variant_id) {
+ case TITAN_CHIP_1050:
+ return "TI AR7 (TNETV1050)";
+ case TITAN_CHIP_1055:
+ return "TI AR7 (TNETV1055)";
+ case TITAN_CHIP_1056:
+ return "TI AR7 (TNETV1056)";
+ case TITAN_CHIP_1060:
+ return "TI AR7 (TNETV1060)";
+ }
default:
return "TI AR7 (unknown)";
}
diff --git a/arch/mips/bcm63xx/cpu.c b/arch/mips/bcm63xx/cpu.c
index cbb7caf86d77..7c7e4d4486ce 100644
--- a/arch/mips/bcm63xx/cpu.c
+++ b/arch/mips/bcm63xx/cpu.c
@@ -10,7 +10,9 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/cpu.h>
+#include <asm/cpu.h>
#include <asm/cpu-info.h>
+#include <asm/mipsregs.h>
#include <bcm63xx_cpu.h>
#include <bcm63xx_regs.h>
#include <bcm63xx_io.h>
@@ -296,26 +298,24 @@ void __init bcm63xx_cpu_init(void)
expected_cpu_id = 0;
switch (c->cputype) {
- /*
- * BCM6338 as the same PrId as BCM3302 see arch/mips/kernel/cpu-probe.c
- */
- case CPU_BCM3302:
- __cpu_name[cpu] = "Broadcom BCM6338";
- expected_cpu_id = BCM6338_CPU_ID;
- bcm63xx_regs_base = bcm96338_regs_base;
- bcm63xx_irqs = bcm96338_irqs;
+ case CPU_BMIPS3300:
+ if ((read_c0_prid() & 0xff00) == PRID_IMP_BMIPS3300_ALT) {
+ expected_cpu_id = BCM6348_CPU_ID;
+ bcm63xx_regs_base = bcm96348_regs_base;
+ bcm63xx_irqs = bcm96348_irqs;
+ } else {
+ __cpu_name[cpu] = "Broadcom BCM6338";
+ expected_cpu_id = BCM6338_CPU_ID;
+ bcm63xx_regs_base = bcm96338_regs_base;
+ bcm63xx_irqs = bcm96338_irqs;
+ }
break;
- case CPU_BCM6345:
+ case CPU_BMIPS32:
expected_cpu_id = BCM6345_CPU_ID;
bcm63xx_regs_base = bcm96345_regs_base;
bcm63xx_irqs = bcm96345_irqs;
break;
- case CPU_BCM6348:
- expected_cpu_id = BCM6348_CPU_ID;
- bcm63xx_regs_base = bcm96348_regs_base;
- bcm63xx_irqs = bcm96348_irqs;
- break;
- case CPU_BCM6358:
+ case CPU_BMIPS4350:
expected_cpu_id = BCM6358_CPU_ID;
bcm63xx_regs_base = bcm96358_regs_base;
bcm63xx_irqs = bcm96358_irqs;
diff --git a/arch/mips/cavium-octeon/Kconfig b/arch/mips/cavium-octeon/Kconfig
index 47323ca452dc..caae22858163 100644
--- a/arch/mips/cavium-octeon/Kconfig
+++ b/arch/mips/cavium-octeon/Kconfig
@@ -3,6 +3,17 @@ config CAVIUM_OCTEON_SPECIFIC_OPTIONS
depends on CPU_CAVIUM_OCTEON
default "y"
+config CAVIUM_CN63XXP1
+ bool "Enable CN63XXP1 errata worarounds"
+ depends on CAVIUM_OCTEON_SPECIFIC_OPTIONS
+ default "n"
+ help
+ The CN63XXP1 chip requires build time workarounds to
+ function reliably, select this option to enable them. These
+ workarounds will cause a slight decrease in performance on
+ non-CN63XXP1 hardware, so it is recommended to select "n"
+ unless it is known the workarounds are needed.
+
config CAVIUM_OCTEON_2ND_KERNEL
bool "Build the kernel to be used as a 2nd kernel on the same chip"
depends on CAVIUM_OCTEON_SPECIFIC_OPTIONS
@@ -87,3 +98,15 @@ config ARCH_SPARSEMEM_ENABLE
config CAVIUM_OCTEON_HELPER
def_bool y
depends on OCTEON_ETHERNET || PCI
+
+config IOMMU_HELPER
+ bool
+
+config NEED_SG_DMA_LENGTH
+ bool
+
+config SWIOTLB
+ def_bool y
+ depends on CPU_CAVIUM_OCTEON
+ select IOMMU_HELPER
+ select NEED_SG_DMA_LENGTH
diff --git a/arch/mips/cavium-octeon/csrc-octeon.c b/arch/mips/cavium-octeon/csrc-octeon.c
index b6847c8e0ddd..26bf71130bf8 100644
--- a/arch/mips/cavium-octeon/csrc-octeon.c
+++ b/arch/mips/cavium-octeon/csrc-octeon.c
@@ -4,14 +4,18 @@
* for more details.
*
* Copyright (C) 2007 by Ralf Baechle
+ * Copyright (C) 2009, 2010 Cavium Networks, Inc.
*/
#include <linux/clocksource.h>
#include <linux/init.h>
+#include <linux/smp.h>
+#include <asm/cpu-info.h>
#include <asm/time.h>
#include <asm/octeon/octeon.h>
#include <asm/octeon/cvmx-ipd-defs.h>
+#include <asm/octeon/cvmx-mio-defs.h>
/*
* Set the current core's cvmcount counter to the value of the
@@ -19,11 +23,23 @@
* on-line. This allows for a read from a local cpu register to
* access a synchronized counter.
*
+ * On CPU_CAVIUM_OCTEON2 the IPD_CLK_COUNT is scaled by rdiv/sdiv.
*/
void octeon_init_cvmcount(void)
{
unsigned long flags;
unsigned loops = 2;
+ u64 f = 0;
+ u64 rdiv = 0;
+ u64 sdiv = 0;
+ if (current_cpu_type() == CPU_CAVIUM_OCTEON2) {
+ union cvmx_mio_rst_boot rst_boot;
+ rst_boot.u64 = cvmx_read_csr(CVMX_MIO_RST_BOOT);
+ rdiv = rst_boot.s.c_mul; /* CPU clock */
+ sdiv = rst_boot.s.pnr_mul; /* I/O clock */
+ f = (0x8000000000000000ull / sdiv) * 2;
+ }
+
/* Clobber loops so GCC will not unroll the following while loop. */
asm("" : "+r" (loops));
@@ -33,8 +49,20 @@ void octeon_init_cvmcount(void)
* Loop several times so we are executing from the cache,
* which should give more deterministic timing.
*/
- while (loops--)
- write_c0_cvmcount(cvmx_read_csr(CVMX_IPD_CLK_COUNT));
+ while (loops--) {
+ u64 ipd_clk_count = cvmx_read_csr(CVMX_IPD_CLK_COUNT);
+ if (rdiv != 0) {
+ ipd_clk_count *= rdiv;
+ if (f != 0) {
+ asm("dmultu\t%[cnt],%[f]\n\t"
+ "mfhi\t%[cnt]"
+ : [cnt] "+r" (ipd_clk_count),
+ [f] "=r" (f)
+ : : "hi", "lo");
+ }
+ }
+ write_c0_cvmcount(ipd_clk_count);
+ }
local_irq_restore(flags);
}
@@ -77,7 +105,7 @@ unsigned long long notrace sched_clock(void)
void __init plat_time_init(void)
{
clocksource_mips.rating = 300;
- clocksource_set_clock(&clocksource_mips, mips_hpt_frequency);
+ clocksource_set_clock(&clocksource_mips, octeon_get_clock_rate());
clocksource_register(&clocksource_mips);
}
diff --git a/arch/mips/cavium-octeon/dma-octeon.c b/arch/mips/cavium-octeon/dma-octeon.c
index d22b5a2d64f4..1abb66caaa1d 100644
--- a/arch/mips/cavium-octeon/dma-octeon.c
+++ b/arch/mips/cavium-octeon/dma-octeon.c
@@ -8,335 +8,342 @@
* Copyright (C) 2005 Ilya A. Volynets-Evenbakh <ilya@total-knowledge.com>
* swiped from i386, and cloned for MIPS by Geert, polished by Ralf.
* IP32 changes by Ilya.
- * Cavium Networks: Create new dma setup for Cavium Networks Octeon based on
- * the kernels original.
+ * Copyright (C) 2010 Cavium Networks, Inc.
*/
-#include <linux/types.h>
-#include <linux/mm.h>
-#include <linux/module.h>
-#include <linux/string.h>
#include <linux/dma-mapping.h>
-#include <linux/platform_device.h>
#include <linux/scatterlist.h>
+#include <linux/bootmem.h>
+#include <linux/swiotlb.h>
+#include <linux/types.h>
+#include <linux/init.h>
+#include <linux/mm.h>
-#include <linux/cache.h>
-#include <linux/io.h>
+#include <asm/bootinfo.h>
#include <asm/octeon/octeon.h>
+
+#ifdef CONFIG_PCI
+#include <asm/octeon/pci-octeon.h>
#include <asm/octeon/cvmx-npi-defs.h>
#include <asm/octeon/cvmx-pci-defs.h>
-#include <dma-coherence.h>
+static dma_addr_t octeon_hole_phys_to_dma(phys_addr_t paddr)
+{
+ if (paddr >= CVMX_PCIE_BAR1_PHYS_BASE && paddr < (CVMX_PCIE_BAR1_PHYS_BASE + CVMX_PCIE_BAR1_PHYS_SIZE))
+ return paddr - CVMX_PCIE_BAR1_PHYS_BASE + CVMX_PCIE_BAR1_RC_BASE;
+ else
+ return paddr;
+}
-#ifdef CONFIG_PCI
-#include <asm/octeon/pci-octeon.h>
-#endif
+static phys_addr_t octeon_hole_dma_to_phys(dma_addr_t daddr)
+{
+ if (daddr >= CVMX_PCIE_BAR1_RC_BASE)
+ return daddr + CVMX_PCIE_BAR1_PHYS_BASE - CVMX_PCIE_BAR1_RC_BASE;
+ else
+ return daddr;
+}
+
+static dma_addr_t octeon_gen1_phys_to_dma(struct device *dev, phys_addr_t paddr)
+{
+ if (paddr >= 0x410000000ull && paddr < 0x420000000ull)
+ paddr -= 0x400000000ull;
+ return octeon_hole_phys_to_dma(paddr);
+}
-#define BAR2_PCI_ADDRESS 0x8000000000ul
+static phys_addr_t octeon_gen1_dma_to_phys(struct device *dev, dma_addr_t daddr)
+{
+ daddr = octeon_hole_dma_to_phys(daddr);
-struct bar1_index_state {
- int16_t ref_count; /* Number of PCI mappings using this index */
- uint16_t address_bits; /* Upper bits of physical address. This is
- shifted 22 bits */
-};
+ if (daddr >= 0x10000000ull && daddr < 0x20000000ull)
+ daddr += 0x400000000ull;
-#ifdef CONFIG_PCI
-static DEFINE_RAW_SPINLOCK(bar1_lock);
-static struct bar1_index_state bar1_state[32];
-#endif
+ return daddr;
+}
-dma_addr_t octeon_map_dma_mem(struct device *dev, void *ptr, size_t size)
+static dma_addr_t octeon_big_phys_to_dma(struct device *dev, phys_addr_t paddr)
{
-#ifndef CONFIG_PCI
- /* Without PCI/PCIe this function can be called for Octeon internal
- devices such as USB. These devices all support 64bit addressing */
+ if (paddr >= 0x410000000ull && paddr < 0x420000000ull)
+ paddr -= 0x400000000ull;
+
+ /* Anything in the BAR1 hole or above goes via BAR2 */
+ if (paddr >= 0xf0000000ull)
+ paddr = OCTEON_BAR2_PCI_ADDRESS + paddr;
+
+ return paddr;
+}
+
+static phys_addr_t octeon_big_dma_to_phys(struct device *dev, dma_addr_t daddr)
+{
+ if (daddr >= OCTEON_BAR2_PCI_ADDRESS)
+ daddr -= OCTEON_BAR2_PCI_ADDRESS;
+
+ if (daddr >= 0x10000000ull && daddr < 0x20000000ull)
+ daddr += 0x400000000ull;
+ return daddr;
+}
+
+static dma_addr_t octeon_small_phys_to_dma(struct device *dev,
+ phys_addr_t paddr)
+{
+ if (paddr >= 0x410000000ull && paddr < 0x420000000ull)
+ paddr -= 0x400000000ull;
+
+ /* Anything not in the BAR1 range goes via BAR2 */
+ if (paddr >= octeon_bar1_pci_phys && paddr < octeon_bar1_pci_phys + 0x8000000ull)
+ paddr = paddr - octeon_bar1_pci_phys;
+ else
+ paddr = OCTEON_BAR2_PCI_ADDRESS + paddr;
+
+ return paddr;
+}
+
+static phys_addr_t octeon_small_dma_to_phys(struct device *dev,
+ dma_addr_t daddr)
+{
+ if (daddr >= OCTEON_BAR2_PCI_ADDRESS)
+ daddr -= OCTEON_BAR2_PCI_ADDRESS;
+ else
+ daddr += octeon_bar1_pci_phys;
+
+ if (daddr >= 0x10000000ull && daddr < 0x20000000ull)
+ daddr += 0x400000000ull;
+ return daddr;
+}
+
+#endif /* CONFIG_PCI */
+
+static dma_addr_t octeon_dma_map_page(struct device *dev, struct page *page,
+ unsigned long offset, size_t size, enum dma_data_direction direction,
+ struct dma_attrs *attrs)
+{
+ dma_addr_t daddr = swiotlb_map_page(dev, page, offset, size,
+ direction, attrs);
mb();
- return virt_to_phys(ptr);
-#else
- unsigned long flags;
- uint64_t dma_mask;
- int64_t start_index;
- dma_addr_t result = -1;
- uint64_t physical = virt_to_phys(ptr);
- int64_t index;
+ return daddr;
+}
+
+static int octeon_dma_map_sg(struct device *dev, struct scatterlist *sg,
+ int nents, enum dma_data_direction direction, struct dma_attrs *attrs)
+{
+ int r = swiotlb_map_sg_attrs(dev, sg, nents, direction, attrs);
mb();
- /*
- * Use the DMA masks to determine the allowed memory
- * region. For us it doesn't limit the actual memory, just the
- * address visible over PCI. Devices with limits need to use
- * lower indexed Bar1 entries.
- */
- if (dev) {
- dma_mask = dev->coherent_dma_mask;
- if (dev->dma_mask)
- dma_mask = *dev->dma_mask;
- } else {
- dma_mask = 0xfffffffful;
- }
+ return r;
+}
- /*
- * Platform devices, such as the internal USB, skip all
- * translation and use Octeon physical addresses directly.
- */
- if (!dev || dev->bus == &platform_bus_type)
- return physical;
+static void octeon_dma_sync_single_for_device(struct device *dev,
+ dma_addr_t dma_handle, size_t size, enum dma_data_direction direction)
+{
+ swiotlb_sync_single_for_device(dev, dma_handle, size, direction);
+ mb();
+}
- switch (octeon_dma_bar_type) {
- case OCTEON_DMA_BAR_TYPE_PCIE:
- if (unlikely(physical < (16ul << 10)))
- panic("dma_map_single: Not allowed to map first 16KB."
- " It interferes with BAR0 special area\n");
- else if ((physical + size >= (256ul << 20)) &&
- (physical < (512ul << 20)))
- panic("dma_map_single: Not allowed to map bootbus\n");
- else if ((physical + size >= 0x400000000ull) &&
- physical < 0x410000000ull)
- panic("dma_map_single: "
- "Attempt to map illegal memory address 0x%llx\n",
- physical);
- else if (physical >= 0x420000000ull)
- panic("dma_map_single: "
- "Attempt to map illegal memory address 0x%llx\n",
- physical);
- else if (physical >= CVMX_PCIE_BAR1_PHYS_BASE &&
- physical + size < (CVMX_PCIE_BAR1_PHYS_BASE + CVMX_PCIE_BAR1_PHYS_SIZE)) {
- result = physical - CVMX_PCIE_BAR1_PHYS_BASE + CVMX_PCIE_BAR1_RC_BASE;
-
- if (((result+size-1) & dma_mask) != result+size-1)
- panic("dma_map_single: Attempt to map address 0x%llx-0x%llx, which can't be accessed according to the dma mask 0x%llx\n",
- physical, physical+size-1, dma_mask);
- goto done;
- }
-
- /* The 2nd 256MB is mapped at 256<<20 instead of 0x410000000 */
- if ((physical >= 0x410000000ull) && physical < 0x420000000ull)
- result = physical - 0x400000000ull;
- else
- result = physical;
- if (((result+size-1) & dma_mask) != result+size-1)
- panic("dma_map_single: Attempt to map address "
- "0x%llx-0x%llx, which can't be accessed "
- "according to the dma mask 0x%llx\n",
- physical, physical+size-1, dma_mask);
- goto done;
+static void octeon_dma_sync_sg_for_device(struct device *dev,
+ struct scatterlist *sg, int nelems, enum dma_data_direction direction)
+{
+ swiotlb_sync_sg_for_device(dev, sg, nelems, direction);
+ mb();
+}
- case OCTEON_DMA_BAR_TYPE_BIG:
-#ifdef CONFIG_64BIT
- /* If the device supports 64bit addressing, then use BAR2 */
- if (dma_mask > BAR2_PCI_ADDRESS) {
- result = physical + BAR2_PCI_ADDRESS;
- goto done;
- }
-#endif
- if (unlikely(physical < (4ul << 10))) {
- panic("dma_map_single: Not allowed to map first 4KB. "
- "It interferes with BAR0 special area\n");
- } else if (physical < (256ul << 20)) {
- if (unlikely(physical + size > (256ul << 20)))
- panic("dma_map_single: Requested memory spans "
- "Bar0 0:256MB and bootbus\n");
- result = physical;
- goto done;
- } else if (unlikely(physical < (512ul << 20))) {
- panic("dma_map_single: Not allowed to map bootbus\n");
- } else if (physical < (2ul << 30)) {
- if (unlikely(physical + size > (2ul << 30)))
- panic("dma_map_single: Requested memory spans "
- "Bar0 512MB:2GB and BAR1\n");
- result = physical;
- goto done;
- } else if (physical < (2ul << 30) + (128 << 20)) {
- /* Fall through */
- } else if (physical <
- (4ul << 30) - (OCTEON_PCI_BAR1_HOLE_SIZE << 20)) {
- if (unlikely
- (physical + size >
- (4ul << 30) - (OCTEON_PCI_BAR1_HOLE_SIZE << 20)))
- panic("dma_map_single: Requested memory "
- "extends past Bar1 (4GB-%luMB)\n",
- OCTEON_PCI_BAR1_HOLE_SIZE);
- result = physical;
- goto done;
- } else if ((physical >= 0x410000000ull) &&
- (physical < 0x420000000ull)) {
- if (unlikely(physical + size > 0x420000000ull))
- panic("dma_map_single: Requested memory spans "
- "non existant memory\n");
- /* BAR0 fixed mapping 256MB:512MB ->
- * 16GB+256MB:16GB+512MB */
- result = physical - 0x400000000ull;
- goto done;
- } else {
- /* Continued below switch statement */
- }
- break;
+static void *octeon_dma_alloc_coherent(struct device *dev, size_t size,
+ dma_addr_t *dma_handle, gfp_t gfp)
+{
+ void *ret;
- case OCTEON_DMA_BAR_TYPE_SMALL:
-#ifdef CONFIG_64BIT
- /* If the device supports 64bit addressing, then use BAR2 */
- if (dma_mask > BAR2_PCI_ADDRESS) {
- result = physical + BAR2_PCI_ADDRESS;
- goto done;
- }
+ if (dma_alloc_from_coherent(dev, size, dma_handle, &ret))
+ return ret;
+
+ /* ignore region specifiers */
+ gfp &= ~(__GFP_DMA | __GFP_DMA32 | __GFP_HIGHMEM);
+
+#ifdef CONFIG_ZONE_DMA
+ if (dev == NULL)
+ gfp |= __GFP_DMA;
+ else if (dev->coherent_dma_mask <= DMA_BIT_MASK(24))
+ gfp |= __GFP_DMA;
+ else
#endif
- /* Continued below switch statement */
- break;
+#ifdef CONFIG_ZONE_DMA32
+ if (dev->coherent_dma_mask <= DMA_BIT_MASK(32))
+ gfp |= __GFP_DMA32;
+ else
+#endif
+ ;
- default:
- panic("dma_map_single: Invalid octeon_dma_bar_type\n");
- }
+ /* Don't invoke OOM killer */
+ gfp |= __GFP_NORETRY;
- /* Don't allow mapping to span multiple Bar entries. The hardware guys
- won't guarantee that DMA across boards work */
- if (unlikely((physical >> 22) != ((physical + size - 1) >> 22)))
- panic("dma_map_single: "
- "Requested memory spans more than one Bar1 entry\n");
+ ret = swiotlb_alloc_coherent(dev, size, dma_handle, gfp);
- if (octeon_dma_bar_type == OCTEON_DMA_BAR_TYPE_BIG)
- start_index = 31;
- else if (unlikely(dma_mask < (1ul << 27)))
- start_index = (dma_mask >> 22);
- else
- start_index = 31;
-
- /* Only one processor can access the Bar register at once */
- raw_spin_lock_irqsave(&bar1_lock, flags);
-
- /* Look through Bar1 for existing mapping that will work */
- for (index = start_index; index >= 0; index--) {
- if ((bar1_state[index].address_bits == physical >> 22) &&
- (bar1_state[index].ref_count)) {
- /* An existing mapping will work, use it */
- bar1_state[index].ref_count++;
- if (unlikely(bar1_state[index].ref_count < 0))
- panic("dma_map_single: "
- "Bar1[%d] reference count overflowed\n",
- (int) index);
- result = (index << 22) | (physical & ((1 << 22) - 1));
- /* Large BAR1 is offset at 2GB */
- if (octeon_dma_bar_type == OCTEON_DMA_BAR_TYPE_BIG)
- result += 2ul << 30;
- goto done_unlock;
- }
- }
+ mb();
- /* No existing mappings, look for a free entry */
- for (index = start_index; index >= 0; index--) {
- if (unlikely(bar1_state[index].ref_count == 0)) {
- union cvmx_pci_bar1_indexx bar1_index;
- /* We have a free entry, use it */
- bar1_state[index].ref_count = 1;
- bar1_state[index].address_bits = physical >> 22;
- bar1_index.u32 = 0;
- /* Address bits[35:22] sent to L2C */
- bar1_index.s.addr_idx = physical >> 22;
- /* Don't put PCI accesses in L2. */
- bar1_index.s.ca = 1;
- /* Endian Swap Mode */
- bar1_index.s.end_swp = 1;
- /* Set '1' when the selected address range is valid. */
- bar1_index.s.addr_v = 1;
- octeon_npi_write32(CVMX_NPI_PCI_BAR1_INDEXX(index),
- bar1_index.u32);
- /* An existing mapping will work, use it */
- result = (index << 22) | (physical & ((1 << 22) - 1));
- /* Large BAR1 is offset at 2GB */
- if (octeon_dma_bar_type == OCTEON_DMA_BAR_TYPE_BIG)
- result += 2ul << 30;
- goto done_unlock;
- }
- }
+ return ret;
+}
- pr_err("dma_map_single: "
- "Can't find empty BAR1 index for physical mapping 0x%llx\n",
- (unsigned long long) physical);
+static void octeon_dma_free_coherent(struct device *dev, size_t size,
+ void *vaddr, dma_addr_t dma_handle)
+{
+ int order = get_order(size);
-done_unlock:
- raw_spin_unlock_irqrestore(&bar1_lock, flags);
-done:
- pr_debug("dma_map_single 0x%llx->0x%llx\n", physical, result);
- return result;
-#endif
+ if (dma_release_from_coherent(dev, order, vaddr))
+ return;
+
+ swiotlb_free_coherent(dev, size, vaddr, dma_handle);
}
-void octeon_unmap_dma_mem(struct device *dev, dma_addr_t dma_addr)
+static dma_addr_t octeon_unity_phys_to_dma(struct device *dev, phys_addr_t paddr)
{
-#ifndef CONFIG_PCI
- /*
- * Without PCI/PCIe this function can be called for Octeon internal
- * devices such as USB. These devices all support 64bit addressing.
- */
- return;
-#else
- unsigned long flags;
- uint64_t index;
+ return paddr;
+}
+static phys_addr_t octeon_unity_dma_to_phys(struct device *dev, dma_addr_t daddr)
+{
+ return daddr;
+}
+
+struct octeon_dma_map_ops {
+ struct dma_map_ops dma_map_ops;
+ dma_addr_t (*phys_to_dma)(struct device *dev, phys_addr_t paddr);
+ phys_addr_t (*dma_to_phys)(struct device *dev, dma_addr_t daddr);
+};
+
+dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr)
+{
+ struct octeon_dma_map_ops *ops = container_of(get_dma_ops(dev),
+ struct octeon_dma_map_ops,
+ dma_map_ops);
+
+ return ops->phys_to_dma(dev, paddr);
+}
+EXPORT_SYMBOL(phys_to_dma);
+
+phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr)
+{
+ struct octeon_dma_map_ops *ops = container_of(get_dma_ops(dev),
+ struct octeon_dma_map_ops,
+ dma_map_ops);
+
+ return ops->dma_to_phys(dev, daddr);
+}
+EXPORT_SYMBOL(dma_to_phys);
+
+static struct octeon_dma_map_ops octeon_linear_dma_map_ops = {
+ .dma_map_ops = {
+ .alloc_coherent = octeon_dma_alloc_coherent,
+ .free_coherent = octeon_dma_free_coherent,
+ .map_page = octeon_dma_map_page,
+ .unmap_page = swiotlb_unmap_page,
+ .map_sg = octeon_dma_map_sg,
+ .unmap_sg = swiotlb_unmap_sg_attrs,
+ .sync_single_for_cpu = swiotlb_sync_single_for_cpu,
+ .sync_single_for_device = octeon_dma_sync_single_for_device,
+ .sync_sg_for_cpu = swiotlb_sync_sg_for_cpu,
+ .sync_sg_for_device = octeon_dma_sync_sg_for_device,
+ .mapping_error = swiotlb_dma_mapping_error,
+ .dma_supported = swiotlb_dma_supported
+ },
+ .phys_to_dma = octeon_unity_phys_to_dma,
+ .dma_to_phys = octeon_unity_dma_to_phys
+};
+
+char *octeon_swiotlb;
+
+void __init plat_swiotlb_setup(void)
+{
+ int i;
+ phys_t max_addr;
+ phys_t addr_size;
+ size_t swiotlbsize;
+ unsigned long swiotlb_nslabs;
+
+ max_addr = 0;
+ addr_size = 0;
+
+ for (i = 0 ; i < boot_mem_map.nr_map; i++) {
+ struct boot_mem_map_entry *e = &boot_mem_map.map[i];
+ if (e->type != BOOT_MEM_RAM)
+ continue;
+
+ /* These addresses map low for PCI. */
+ if (e->addr > 0x410000000ull)
+ continue;
+
+ addr_size += e->size;
+
+ if (max_addr < e->addr + e->size)
+ max_addr = e->addr + e->size;
+
+ }
+
+ swiotlbsize = PAGE_SIZE;
+
+#ifdef CONFIG_PCI
/*
- * Platform devices, such as the internal USB, skip all
- * translation and use Octeon physical addresses directly.
+ * For OCTEON_DMA_BAR_TYPE_SMALL, size the iotlb at 1/4 memory
+ * size to a maximum of 64MB
*/
- if (dev->bus == &platform_bus_type)
- return;
+ if (OCTEON_IS_MODEL(OCTEON_CN31XX)
+ || OCTEON_IS_MODEL(OCTEON_CN38XX_PASS2)) {
+ swiotlbsize = addr_size / 4;
+ if (swiotlbsize > 64 * (1<<20))
+ swiotlbsize = 64 * (1<<20);
+ } else if (max_addr > 0xf0000000ul) {
+ /*
+ * Otherwise only allocate a big iotlb if there is
+ * memory past the BAR1 hole.
+ */
+ swiotlbsize = 64 * (1<<20);
+ }
+#endif
+ swiotlb_nslabs = swiotlbsize >> IO_TLB_SHIFT;
+ swiotlb_nslabs = ALIGN(swiotlb_nslabs, IO_TLB_SEGSIZE);
+ swiotlbsize = swiotlb_nslabs << IO_TLB_SHIFT;
+
+ octeon_swiotlb = alloc_bootmem_low_pages(swiotlbsize);
+ swiotlb_init_with_tbl(octeon_swiotlb, swiotlb_nslabs, 1);
+
+ mips_dma_map_ops = &octeon_linear_dma_map_ops.dma_map_ops;
+}
+
+#ifdef CONFIG_PCI
+static struct octeon_dma_map_ops _octeon_pci_dma_map_ops = {
+ .dma_map_ops = {
+ .alloc_coherent = octeon_dma_alloc_coherent,
+ .free_coherent = octeon_dma_free_coherent,
+ .map_page = octeon_dma_map_page,
+ .unmap_page = swiotlb_unmap_page,
+ .map_sg = octeon_dma_map_sg,
+ .unmap_sg = swiotlb_unmap_sg_attrs,
+ .sync_single_for_cpu = swiotlb_sync_single_for_cpu,
+ .sync_single_for_device = octeon_dma_sync_single_for_device,
+ .sync_sg_for_cpu = swiotlb_sync_sg_for_cpu,
+ .sync_sg_for_device = octeon_dma_sync_sg_for_device,
+ .mapping_error = swiotlb_dma_mapping_error,
+ .dma_supported = swiotlb_dma_supported
+ },
+};
+
+struct dma_map_ops *octeon_pci_dma_map_ops;
+
+void __init octeon_pci_dma_init(void)
+{
switch (octeon_dma_bar_type) {
case OCTEON_DMA_BAR_TYPE_PCIE:
- /* Nothing to do, all mappings are static */
- goto done;
-
+ _octeon_pci_dma_map_ops.phys_to_dma = octeon_gen1_phys_to_dma;
+ _octeon_pci_dma_map_ops.dma_to_phys = octeon_gen1_dma_to_phys;
+ break;
case OCTEON_DMA_BAR_TYPE_BIG:
-#ifdef CONFIG_64BIT
- /* Nothing to do for addresses using BAR2 */
- if (dma_addr >= BAR2_PCI_ADDRESS)
- goto done;
-#endif
- if (unlikely(dma_addr < (4ul << 10)))
- panic("dma_unmap_single: Unexpect DMA address 0x%llx\n",
- dma_addr);
- else if (dma_addr < (2ul << 30))
- /* Nothing to do for addresses using BAR0 */
- goto done;
- else if (dma_addr < (2ul << 30) + (128ul << 20))
- /* Need to unmap, fall through */
- index = (dma_addr - (2ul << 30)) >> 22;
- else if (dma_addr <
- (4ul << 30) - (OCTEON_PCI_BAR1_HOLE_SIZE << 20))
- goto done; /* Nothing to do for the rest of BAR1 */
- else
- panic("dma_unmap_single: Unexpect DMA address 0x%llx\n",
- dma_addr);
- /* Continued below switch statement */
+ _octeon_pci_dma_map_ops.phys_to_dma = octeon_big_phys_to_dma;
+ _octeon_pci_dma_map_ops.dma_to_phys = octeon_big_dma_to_phys;
break;
-
case OCTEON_DMA_BAR_TYPE_SMALL:
-#ifdef CONFIG_64BIT
- /* Nothing to do for addresses using BAR2 */
- if (dma_addr >= BAR2_PCI_ADDRESS)
- goto done;
-#endif
- index = dma_addr >> 22;
- /* Continued below switch statement */
+ _octeon_pci_dma_map_ops.phys_to_dma = octeon_small_phys_to_dma;
+ _octeon_pci_dma_map_ops.dma_to_phys = octeon_small_dma_to_phys;
break;
-
default:
- panic("dma_unmap_single: Invalid octeon_dma_bar_type\n");
+ BUG();
}
-
- if (unlikely(index > 31))
- panic("dma_unmap_single: "
- "Attempt to unmap an invalid address (0x%llx)\n",
- dma_addr);
-
- raw_spin_lock_irqsave(&bar1_lock, flags);
- bar1_state[index].ref_count--;
- if (bar1_state[index].ref_count == 0)
- octeon_npi_write32(CVMX_NPI_PCI_BAR1_INDEXX(index), 0);
- else if (unlikely(bar1_state[index].ref_count < 0))
- panic("dma_unmap_single: Bar1[%u] reference count < 0\n",
- (int) index);
- raw_spin_unlock_irqrestore(&bar1_lock, flags);
-done:
- pr_debug("dma_unmap_single 0x%llx\n", dma_addr);
- return;
-#endif
+ octeon_pci_dma_map_ops = &_octeon_pci_dma_map_ops.dma_map_ops;
}
+#endif /* CONFIG_PCI */
diff --git a/arch/mips/cavium-octeon/executive/cvmx-l2c.c b/arch/mips/cavium-octeon/executive/cvmx-l2c.c
index 6abe56f1e097..d38246e33ddb 100644
--- a/arch/mips/cavium-octeon/executive/cvmx-l2c.c
+++ b/arch/mips/cavium-octeon/executive/cvmx-l2c.c
@@ -4,7 +4,7 @@
* Contact: support@caviumnetworks.com
* This file is part of the OCTEON SDK
*
- * Copyright (c) 2003-2008 Cavium Networks
+ * Copyright (c) 2003-2010 Cavium Networks
*
* This file is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License, Version 2, as
@@ -26,8 +26,8 @@
***********************license end**************************************/
/*
- * Implementation of the Level 2 Cache (L2C) control, measurement, and
- * debugging facilities.
+ * Implementation of the Level 2 Cache (L2C) control,
+ * measurement, and debugging facilities.
*/
#include <asm/octeon/cvmx.h>
@@ -42,13 +42,7 @@
* if multiple applications or operating systems are running, then it
* is up to the user program to coordinate between them.
*/
-static cvmx_spinlock_t cvmx_l2c_spinlock;
-
-static inline int l2_size_half(void)
-{
- uint64_t val = cvmx_read_csr(CVMX_L2D_FUS3);
- return !!(val & (1ull << 34));
-}
+cvmx_spinlock_t cvmx_l2c_spinlock;
int cvmx_l2c_get_core_way_partition(uint32_t core)
{
@@ -58,6 +52,9 @@ int cvmx_l2c_get_core_way_partition(uint32_t core)
if (core >= cvmx_octeon_num_cores())
return -1;
+ if (OCTEON_IS_MODEL(OCTEON_CN63XX))
+ return cvmx_read_csr(CVMX_L2C_WPAR_PPX(core)) & 0xffff;
+
/*
* Use the lower two bits of the coreNumber to determine the
* bit offset of the UMSK[] field in the L2C_SPAR register.
@@ -71,17 +68,13 @@ int cvmx_l2c_get_core_way_partition(uint32_t core)
switch (core & 0xC) {
case 0x0:
- return (cvmx_read_csr(CVMX_L2C_SPAR0) & (0xFF << field)) >>
- field;
+ return (cvmx_read_csr(CVMX_L2C_SPAR0) & (0xFF << field)) >> field;
case 0x4:
- return (cvmx_read_csr(CVMX_L2C_SPAR1) & (0xFF << field)) >>
- field;
+ return (cvmx_read_csr(CVMX_L2C_SPAR1) & (0xFF << field)) >> field;
case 0x8:
- return (cvmx_read_csr(CVMX_L2C_SPAR2) & (0xFF << field)) >>
- field;
+ return (cvmx_read_csr(CVMX_L2C_SPAR2) & (0xFF << field)) >> field;
case 0xC:
- return (cvmx_read_csr(CVMX_L2C_SPAR3) & (0xFF << field)) >>
- field;
+ return (cvmx_read_csr(CVMX_L2C_SPAR3) & (0xFF << field)) >> field;
}
return 0;
}
@@ -95,48 +88,50 @@ int cvmx_l2c_set_core_way_partition(uint32_t core, uint32_t mask)
mask &= valid_mask;
- /* A UMSK setting which blocks all L2C Ways is an error. */
- if (mask == valid_mask)
+ /* A UMSK setting which blocks all L2C Ways is an error on some chips */
+ if (mask == valid_mask && !OCTEON_IS_MODEL(OCTEON_CN63XX))
return -1;
/* Validate the core number */
if (core >= cvmx_octeon_num_cores())
return -1;
- /* Check to make sure current mask & new mask don't block all ways */
- if (((mask | cvmx_l2c_get_core_way_partition(core)) & valid_mask) ==
- valid_mask)
- return -1;
+ if (OCTEON_IS_MODEL(OCTEON_CN63XX)) {
+ cvmx_write_csr(CVMX_L2C_WPAR_PPX(core), mask);
+ return 0;
+ }
- /* Use the lower two bits of core to determine the bit offset of the
+ /*
+ * Use the lower two bits of core to determine the bit offset of the
* UMSK[] field in the L2C_SPAR register.
*/
field = (core & 0x3) * 8;
- /* Assign the new mask setting to the UMSK[] field in the appropriate
+ /*
+ * Assign the new mask setting to the UMSK[] field in the appropriate
* L2C_SPAR register based on the core_num.
*
*/
switch (core & 0xC) {
case 0x0:
cvmx_write_csr(CVMX_L2C_SPAR0,
- (cvmx_read_csr(CVMX_L2C_SPAR0) &
- ~(0xFF << field)) | mask << field);
+ (cvmx_read_csr(CVMX_L2C_SPAR0) & ~(0xFF << field)) |
+ mask << field);
break;
case 0x4:
cvmx_write_csr(CVMX_L2C_SPAR1,
- (cvmx_read_csr(CVMX_L2C_SPAR1) &
- ~(0xFF << field)) | mask << field);
+ (cvmx_read_csr(CVMX_L2C_SPAR1) & ~(0xFF << field)) |
+ mask << field);
break;
case 0x8:
cvmx_write_csr(CVMX_L2C_SPAR2,
- (cvmx_read_csr(CVMX_L2C_SPAR2) &
- ~(0xFF << field)) | mask << field);
+ (cvmx_read_csr(CVMX_L2C_SPAR2) & ~(0xFF << field)) |
+ mask << field);
break;
case 0xC:
cvmx_write_csr(CVMX_L2C_SPAR3,
- (cvmx_read_csr(CVMX_L2C_SPAR3) &
- ~(0xFF << field)) | mask << field);
+ (cvmx_read_csr(CVMX_L2C_SPAR3) & ~(0xFF << field)) |
+ mask << field);
break;
}
return 0;
@@ -146,84 +141,137 @@ int cvmx_l2c_set_hw_way_partition(uint32_t mask)
{
uint32_t valid_mask;
- valid_mask = 0xff;
-
- if (OCTEON_IS_MODEL(OCTEON_CN58XX) || OCTEON_IS_MODEL(OCTEON_CN38XX)) {
- if (l2_size_half())
- valid_mask = 0xf;
- } else if (l2_size_half())
- valid_mask = 0x3;
-
+ valid_mask = (0x1 << cvmx_l2c_get_num_assoc()) - 1;
mask &= valid_mask;
- /* A UMSK setting which blocks all L2C Ways is an error. */
- if (mask == valid_mask)
- return -1;
- /* Check to make sure current mask & new mask don't block all ways */
- if (((mask | cvmx_l2c_get_hw_way_partition()) & valid_mask) ==
- valid_mask)
+ /* A UMSK setting which blocks all L2C Ways is an error on some chips */
+ if (mask == valid_mask && !OCTEON_IS_MODEL(OCTEON_CN63XX))
return -1;
- cvmx_write_csr(CVMX_L2C_SPAR4,
- (cvmx_read_csr(CVMX_L2C_SPAR4) & ~0xFF) | mask);
+ if (OCTEON_IS_MODEL(OCTEON_CN63XX))
+ cvmx_write_csr(CVMX_L2C_WPAR_IOBX(0), mask);
+ else
+ cvmx_write_csr(CVMX_L2C_SPAR4,
+ (cvmx_read_csr(CVMX_L2C_SPAR4) & ~0xFF) | mask);
return 0;
}
int cvmx_l2c_get_hw_way_partition(void)
{
- return cvmx_read_csr(CVMX_L2C_SPAR4) & (0xFF);
+ if (OCTEON_IS_MODEL(OCTEON_CN63XX))
+ return cvmx_read_csr(CVMX_L2C_WPAR_IOBX(0)) & 0xffff;
+ else
+ return cvmx_read_csr(CVMX_L2C_SPAR4) & (0xFF);
}
void cvmx_l2c_config_perf(uint32_t counter, enum cvmx_l2c_event event,
uint32_t clear_on_read)
{
- union cvmx_l2c_pfctl pfctl;
+ if (OCTEON_IS_MODEL(OCTEON_CN5XXX) || OCTEON_IS_MODEL(OCTEON_CN3XXX)) {
+ union cvmx_l2c_pfctl pfctl;
- pfctl.u64 = cvmx_read_csr(CVMX_L2C_PFCTL);
+ pfctl.u64 = cvmx_read_csr(CVMX_L2C_PFCTL);
- switch (counter) {
- case 0:
- pfctl.s.cnt0sel = event;
- pfctl.s.cnt0ena = 1;
- if (!cvmx_octeon_is_pass1())
+ switch (counter) {
+ case 0:
+ pfctl.s.cnt0sel = event;
+ pfctl.s.cnt0ena = 1;
pfctl.s.cnt0rdclr = clear_on_read;
- break;
- case 1:
- pfctl.s.cnt1sel = event;
- pfctl.s.cnt1ena = 1;
- if (!cvmx_octeon_is_pass1())
+ break;
+ case 1:
+ pfctl.s.cnt1sel = event;
+ pfctl.s.cnt1ena = 1;
pfctl.s.cnt1rdclr = clear_on_read;
- break;
- case 2:
- pfctl.s.cnt2sel = event;
- pfctl.s.cnt2ena = 1;
- if (!cvmx_octeon_is_pass1())
+ break;
+ case 2:
+ pfctl.s.cnt2sel = event;
+ pfctl.s.cnt2ena = 1;
pfctl.s.cnt2rdclr = clear_on_read;
- break;
- case 3:
- default:
- pfctl.s.cnt3sel = event;
- pfctl.s.cnt3ena = 1;
- if (!cvmx_octeon_is_pass1())
+ break;
+ case 3:
+ default:
+ pfctl.s.cnt3sel = event;
+ pfctl.s.cnt3ena = 1;
pfctl.s.cnt3rdclr = clear_on_read;
- break;
- }
+ break;
+ }
- cvmx_write_csr(CVMX_L2C_PFCTL, pfctl.u64);
+ cvmx_write_csr(CVMX_L2C_PFCTL, pfctl.u64);
+ } else {
+ union cvmx_l2c_tadx_prf l2c_tadx_prf;
+ int tad;
+
+ cvmx_dprintf("L2C performance counter events are different for this chip, mapping 'event' to cvmx_l2c_tad_event_t\n");
+ if (clear_on_read)
+ cvmx_dprintf("L2C counters don't support clear on read for this chip\n");
+
+ l2c_tadx_prf.u64 = cvmx_read_csr(CVMX_L2C_TADX_PRF(0));
+
+ switch (counter) {
+ case 0:
+ l2c_tadx_prf.s.cnt0sel = event;
+ break;
+ case 1:
+ l2c_tadx_prf.s.cnt1sel = event;
+ break;
+ case 2:
+ l2c_tadx_prf.s.cnt2sel = event;
+ break;
+ default:
+ case 3:
+ l2c_tadx_prf.s.cnt3sel = event;
+ break;
+ }
+ for (tad = 0; tad < CVMX_L2C_TADS; tad++)
+ cvmx_write_csr(CVMX_L2C_TADX_PRF(tad),
+ l2c_tadx_prf.u64);
+ }
}
uint64_t cvmx_l2c_read_perf(uint32_t counter)
{
switch (counter) {
case 0:
- return cvmx_read_csr(CVMX_L2C_PFC0);
+ if (OCTEON_IS_MODEL(OCTEON_CN5XXX) || OCTEON_IS_MODEL(OCTEON_CN3XXX))
+ return cvmx_read_csr(CVMX_L2C_PFC0);
+ else {
+ uint64_t counter = 0;
+ int tad;
+ for (tad = 0; tad < CVMX_L2C_TADS; tad++)
+ counter += cvmx_read_csr(CVMX_L2C_TADX_PFC0(tad));
+ return counter;
+ }
case 1:
- return cvmx_read_csr(CVMX_L2C_PFC1);
+ if (OCTEON_IS_MODEL(OCTEON_CN5XXX) || OCTEON_IS_MODEL(OCTEON_CN3XXX))
+ return cvmx_read_csr(CVMX_L2C_PFC1);
+ else {
+ uint64_t counter = 0;
+ int tad;
+ for (tad = 0; tad < CVMX_L2C_TADS; tad++)
+ counter += cvmx_read_csr(CVMX_L2C_TADX_PFC1(tad));
+ return counter;
+ }
case 2:
- return cvmx_read_csr(CVMX_L2C_PFC2);
+ if (OCTEON_IS_MODEL(OCTEON_CN5XXX) || OCTEON_IS_MODEL(OCTEON_CN3XXX))
+ return cvmx_read_csr(CVMX_L2C_PFC2);
+ else {
+ uint64_t counter = 0;
+ int tad;
+ for (tad = 0; tad < CVMX_L2C_TADS; tad++)
+ counter += cvmx_read_csr(CVMX_L2C_TADX_PFC2(tad));
+ return counter;
+ }
case 3:
default:
- return cvmx_read_csr(CVMX_L2C_PFC3);
+ if (OCTEON_IS_MODEL(OCTEON_CN5XXX) || OCTEON_IS_MODEL(OCTEON_CN3XXX))
+ return cvmx_read_csr(CVMX_L2C_PFC3);
+ else {
+ uint64_t counter = 0;
+ int tad;
+ for (tad = 0; tad < CVMX_L2C_TADS; tad++)
+ counter += cvmx_read_csr(CVMX_L2C_TADX_PFC3(tad));
+ return counter;
+ }
}
}
@@ -240,7 +288,7 @@ static void fault_in(uint64_t addr, int len)
volatile char dummy;
/*
* Adjust addr and length so we get all cache lines even for
- * small ranges spanning two cache lines
+ * small ranges spanning two cache lines.
*/
len += addr & CVMX_CACHE_LINE_MASK;
addr &= ~CVMX_CACHE_LINE_MASK;
@@ -259,67 +307,100 @@ static void fault_in(uint64_t addr, int len)
int cvmx_l2c_lock_line(uint64_t addr)
{
- int retval = 0;
- union cvmx_l2c_dbg l2cdbg;
- union cvmx_l2c_lckbase lckbase;
- union cvmx_l2c_lckoff lckoff;
- union cvmx_l2t_err l2t_err;
- l2cdbg.u64 = 0;
- lckbase.u64 = 0;
- lckoff.u64 = 0;
-
- cvmx_spinlock_lock(&cvmx_l2c_spinlock);
-
- /* Clear l2t error bits if set */
- l2t_err.u64 = cvmx_read_csr(CVMX_L2T_ERR);
- l2t_err.s.lckerr = 1;
- l2t_err.s.lckerr2 = 1;
- cvmx_write_csr(CVMX_L2T_ERR, l2t_err.u64);
+ if (OCTEON_IS_MODEL(OCTEON_CN63XX)) {
+ int shift = CVMX_L2C_TAG_ADDR_ALIAS_SHIFT;
+ uint64_t assoc = cvmx_l2c_get_num_assoc();
+ uint64_t tag = addr >> shift;
+ uint64_t index = CVMX_ADD_SEG(CVMX_MIPS_SPACE_XKPHYS, cvmx_l2c_address_to_index(addr) << CVMX_L2C_IDX_ADDR_SHIFT);
+ uint64_t way;
+ union cvmx_l2c_tadx_tag l2c_tadx_tag;
+
+ CVMX_CACHE_LCKL2(CVMX_ADD_SEG(CVMX_MIPS_SPACE_XKPHYS, addr), 0);
+
+ /* Make sure we were able to lock the line */
+ for (way = 0; way < assoc; way++) {
+ CVMX_CACHE_LTGL2I(index | (way << shift), 0);
+ /* make sure CVMX_L2C_TADX_TAG is updated */
+ CVMX_SYNC;
+ l2c_tadx_tag.u64 = cvmx_read_csr(CVMX_L2C_TADX_TAG(0));
+ if (l2c_tadx_tag.s.valid && l2c_tadx_tag.s.tag == tag)
+ break;
+ }
- addr &= ~CVMX_CACHE_LINE_MASK;
+ /* Check if a valid line is found */
+ if (way >= assoc) {
+ /* cvmx_dprintf("ERROR: cvmx_l2c_lock_line: line not found for locking at 0x%llx address\n", (unsigned long long)addr); */
+ return -1;
+ }
- /* Set this core as debug core */
- l2cdbg.s.ppnum = cvmx_get_core_num();
- CVMX_SYNC;
- cvmx_write_csr(CVMX_L2C_DBG, l2cdbg.u64);
- cvmx_read_csr(CVMX_L2C_DBG);
-
- lckoff.s.lck_offset = 0; /* Only lock 1 line at a time */
- cvmx_write_csr(CVMX_L2C_LCKOFF, lckoff.u64);
- cvmx_read_csr(CVMX_L2C_LCKOFF);
-
- if (((union cvmx_l2c_cfg) (cvmx_read_csr(CVMX_L2C_CFG))).s.idxalias) {
- int alias_shift =
- CVMX_L2C_IDX_ADDR_SHIFT + 2 * CVMX_L2_SET_BITS - 1;
- uint64_t addr_tmp =
- addr ^ (addr & ((1 << alias_shift) - 1)) >>
- CVMX_L2_SET_BITS;
- lckbase.s.lck_base = addr_tmp >> 7;
+ /* Check if lock bit is not set */
+ if (!l2c_tadx_tag.s.lock) {
+ /* cvmx_dprintf("ERROR: cvmx_l2c_lock_line: Not able to lock at 0x%llx address\n", (unsigned long long)addr); */
+ return -1;
+ }
+ return way;
} else {
- lckbase.s.lck_base = addr >> 7;
- }
+ int retval = 0;
+ union cvmx_l2c_dbg l2cdbg;
+ union cvmx_l2c_lckbase lckbase;
+ union cvmx_l2c_lckoff lckoff;
+ union cvmx_l2t_err l2t_err;
- lckbase.s.lck_ena = 1;
- cvmx_write_csr(CVMX_L2C_LCKBASE, lckbase.u64);
- cvmx_read_csr(CVMX_L2C_LCKBASE); /* Make sure it gets there */
+ cvmx_spinlock_lock(&cvmx_l2c_spinlock);
- fault_in(addr, CVMX_CACHE_LINE_SIZE);
+ l2cdbg.u64 = 0;
+ lckbase.u64 = 0;
+ lckoff.u64 = 0;
- lckbase.s.lck_ena = 0;
- cvmx_write_csr(CVMX_L2C_LCKBASE, lckbase.u64);
- cvmx_read_csr(CVMX_L2C_LCKBASE); /* Make sure it gets there */
+ /* Clear l2t error bits if set */
+ l2t_err.u64 = cvmx_read_csr(CVMX_L2T_ERR);
+ l2t_err.s.lckerr = 1;
+ l2t_err.s.lckerr2 = 1;
+ cvmx_write_csr(CVMX_L2T_ERR, l2t_err.u64);
- /* Stop being debug core */
- cvmx_write_csr(CVMX_L2C_DBG, 0);
- cvmx_read_csr(CVMX_L2C_DBG);
+ addr &= ~CVMX_CACHE_LINE_MASK;
- l2t_err.u64 = cvmx_read_csr(CVMX_L2T_ERR);
- if (l2t_err.s.lckerr || l2t_err.s.lckerr2)
- retval = 1; /* We were unable to lock the line */
+ /* Set this core as debug core */
+ l2cdbg.s.ppnum = cvmx_get_core_num();
+ CVMX_SYNC;
+ cvmx_write_csr(CVMX_L2C_DBG, l2cdbg.u64);
+ cvmx_read_csr(CVMX_L2C_DBG);
+
+ lckoff.s.lck_offset = 0; /* Only lock 1 line at a time */
+ cvmx_write_csr(CVMX_L2C_LCKOFF, lckoff.u64);
+ cvmx_read_csr(CVMX_L2C_LCKOFF);
+
+ if (((union cvmx_l2c_cfg)(cvmx_read_csr(CVMX_L2C_CFG))).s.idxalias) {
+ int alias_shift = CVMX_L2C_IDX_ADDR_SHIFT + 2 * CVMX_L2_SET_BITS - 1;
+ uint64_t addr_tmp = addr ^ (addr & ((1 << alias_shift) - 1)) >> CVMX_L2_SET_BITS;
+ lckbase.s.lck_base = addr_tmp >> 7;
+ } else {
+ lckbase.s.lck_base = addr >> 7;
+ }
- cvmx_spinlock_unlock(&cvmx_l2c_spinlock);
+ lckbase.s.lck_ena = 1;
+ cvmx_write_csr(CVMX_L2C_LCKBASE, lckbase.u64);
+ /* Make sure it gets there */
+ cvmx_read_csr(CVMX_L2C_LCKBASE);
- return retval;
+ fault_in(addr, CVMX_CACHE_LINE_SIZE);
+
+ lckbase.s.lck_ena = 0;
+ cvmx_write_csr(CVMX_L2C_LCKBASE, lckbase.u64);
+ /* Make sure it gets there */
+ cvmx_read_csr(CVMX_L2C_LCKBASE);
+
+ /* Stop being debug core */
+ cvmx_write_csr(CVMX_L2C_DBG, 0);
+ cvmx_read_csr(CVMX_L2C_DBG);
+
+ l2t_err.u64 = cvmx_read_csr(CVMX_L2T_ERR);
+ if (l2t_err.s.lckerr || l2t_err.s.lckerr2)
+ retval = 1; /* We were unable to lock the line */
+
+ cvmx_spinlock_unlock(&cvmx_l2c_spinlock);
+ return retval;
+ }
}
int cvmx_l2c_lock_mem_region(uint64_t start, uint64_t len)
@@ -336,7 +417,6 @@ int cvmx_l2c_lock_mem_region(uint64_t start, uint64_t len)
start += CVMX_CACHE_LINE_SIZE;
len -= CVMX_CACHE_LINE_SIZE;
}
-
return retval;
}
@@ -344,80 +424,73 @@ void cvmx_l2c_flush(void)
{
uint64_t assoc, set;
uint64_t n_assoc, n_set;
- union cvmx_l2c_dbg l2cdbg;
-
- cvmx_spinlock_lock(&cvmx_l2c_spinlock);
- l2cdbg.u64 = 0;
- if (!OCTEON_IS_MODEL(OCTEON_CN30XX))
- l2cdbg.s.ppnum = cvmx_get_core_num();
- l2cdbg.s.finv = 1;
- n_set = CVMX_L2_SETS;
- n_assoc = l2_size_half() ? (CVMX_L2_ASSOC / 2) : CVMX_L2_ASSOC;
- for (set = 0; set < n_set; set++) {
- for (assoc = 0; assoc < n_assoc; assoc++) {
- l2cdbg.s.set = assoc;
- /* Enter debug mode, and make sure all other
- ** writes complete before we enter debug
- ** mode */
- CVMX_SYNCW;
- cvmx_write_csr(CVMX_L2C_DBG, l2cdbg.u64);
- cvmx_read_csr(CVMX_L2C_DBG);
-
- CVMX_PREPARE_FOR_STORE(CVMX_ADD_SEG
- (CVMX_MIPS_SPACE_XKPHYS,
- set * CVMX_CACHE_LINE_SIZE), 0);
- CVMX_SYNCW; /* Push STF out to L2 */
- /* Exit debug mode */
- CVMX_SYNC;
- cvmx_write_csr(CVMX_L2C_DBG, 0);
- cvmx_read_csr(CVMX_L2C_DBG);
+ n_set = cvmx_l2c_get_num_sets();
+ n_assoc = cvmx_l2c_get_num_assoc();
+
+ if (OCTEON_IS_MODEL(OCTEON_CN6XXX)) {
+ uint64_t address;
+ /* These may look like constants, but they aren't... */
+ int assoc_shift = CVMX_L2C_TAG_ADDR_ALIAS_SHIFT;
+ int set_shift = CVMX_L2C_IDX_ADDR_SHIFT;
+ for (set = 0; set < n_set; set++) {
+ for (assoc = 0; assoc < n_assoc; assoc++) {
+ address = CVMX_ADD_SEG(CVMX_MIPS_SPACE_XKPHYS,
+ (assoc << assoc_shift) | (set << set_shift));
+ CVMX_CACHE_WBIL2I(address, 0);
+ }
}
+ } else {
+ for (set = 0; set < n_set; set++)
+ for (assoc = 0; assoc < n_assoc; assoc++)
+ cvmx_l2c_flush_line(assoc, set);
}
-
- cvmx_spinlock_unlock(&cvmx_l2c_spinlock);
}
+
int cvmx_l2c_unlock_line(uint64_t address)
{
- int assoc;
- union cvmx_l2c_tag tag;
- union cvmx_l2c_dbg l2cdbg;
- uint32_t tag_addr;
- uint32_t index = cvmx_l2c_address_to_index(address);
+ if (OCTEON_IS_MODEL(OCTEON_CN63XX)) {
+ int assoc;
+ union cvmx_l2c_tag tag;
+ uint32_t tag_addr;
+ uint32_t index = cvmx_l2c_address_to_index(address);
+
+ tag_addr = ((address >> CVMX_L2C_TAG_ADDR_ALIAS_SHIFT) & ((1 << CVMX_L2C_TAG_ADDR_ALIAS_SHIFT) - 1));
+
+ /*
+ * For 63XX, we can flush a line by using the physical
+ * address directly, so finding the cache line used by
+ * the address is only required to provide the proper
+ * return value for the function.
+ */
+ for (assoc = 0; assoc < CVMX_L2_ASSOC; assoc++) {
+ tag = cvmx_l2c_get_tag(assoc, index);
+
+ if (tag.s.V && (tag.s.addr == tag_addr)) {
+ CVMX_CACHE_WBIL2(CVMX_ADD_SEG(CVMX_MIPS_SPACE_XKPHYS, address), 0);
+ return tag.s.L;
+ }
+ }
+ } else {
+ int assoc;
+ union cvmx_l2c_tag tag;
+ uint32_t tag_addr;
- cvmx_spinlock_lock(&cvmx_l2c_spinlock);
- /* Compute portion of address that is stored in tag */
- tag_addr =
- ((address >> CVMX_L2C_TAG_ADDR_ALIAS_SHIFT) &
- ((1 << CVMX_L2C_TAG_ADDR_ALIAS_SHIFT) - 1));
- for (assoc = 0; assoc < CVMX_L2_ASSOC; assoc++) {
- tag = cvmx_get_l2c_tag(assoc, index);
+ uint32_t index = cvmx_l2c_address_to_index(address);
- if (tag.s.V && (tag.s.addr == tag_addr)) {
- l2cdbg.u64 = 0;
- l2cdbg.s.ppnum = cvmx_get_core_num();
- l2cdbg.s.set = assoc;
- l2cdbg.s.finv = 1;
+ /* Compute portion of address that is stored in tag */
+ tag_addr = ((address >> CVMX_L2C_TAG_ADDR_ALIAS_SHIFT) & ((1 << CVMX_L2C_TAG_ADDR_ALIAS_SHIFT) - 1));
+ for (assoc = 0; assoc < CVMX_L2_ASSOC; assoc++) {
+ tag = cvmx_l2c_get_tag(assoc, index);
- CVMX_SYNC;
- /* Enter debug mode */
- cvmx_write_csr(CVMX_L2C_DBG, l2cdbg.u64);
- cvmx_read_csr(CVMX_L2C_DBG);
-
- CVMX_PREPARE_FOR_STORE(CVMX_ADD_SEG
- (CVMX_MIPS_SPACE_XKPHYS,
- address), 0);
- CVMX_SYNC;
- /* Exit debug mode */
- cvmx_write_csr(CVMX_L2C_DBG, 0);
- cvmx_read_csr(CVMX_L2C_DBG);
- cvmx_spinlock_unlock(&cvmx_l2c_spinlock);
- return tag.s.L;
+ if (tag.s.V && (tag.s.addr == tag_addr)) {
+ cvmx_l2c_flush_line(assoc, index);
+ return tag.s.L;
+ }
}
}
- cvmx_spinlock_unlock(&cvmx_l2c_spinlock);
return 0;
}
@@ -445,48 +518,49 @@ union __cvmx_l2c_tag {
uint64_t u64;
struct cvmx_l2c_tag_cn50xx {
uint64_t reserved:40;
- uint64_t V:1; /* Line valid */
- uint64_t D:1; /* Line dirty */
- uint64_t L:1; /* Line locked */
- uint64_t U:1; /* Use, LRU eviction */
+ uint64_t V:1; /* Line valid */
+ uint64_t D:1; /* Line dirty */
+ uint64_t L:1; /* Line locked */
+ uint64_t U:1; /* Use, LRU eviction */
uint64_t addr:20; /* Phys mem addr (33..14) */
} cn50xx;
struct cvmx_l2c_tag_cn30xx {
uint64_t reserved:41;
- uint64_t V:1; /* Line valid */
- uint64_t D:1; /* Line dirty */
- uint64_t L:1; /* Line locked */
- uint64_t U:1; /* Use, LRU eviction */
+ uint64_t V:1; /* Line valid */
+ uint64_t D:1; /* Line dirty */
+ uint64_t L:1; /* Line locked */
+ uint64_t U:1; /* Use, LRU eviction */
uint64_t addr:19; /* Phys mem addr (33..15) */
} cn30xx;
struct cvmx_l2c_tag_cn31xx {
uint64_t reserved:42;
- uint64_t V:1; /* Line valid */
- uint64_t D:1; /* Line dirty */
- uint64_t L:1; /* Line locked */
- uint64_t U:1; /* Use, LRU eviction */
+ uint64_t V:1; /* Line valid */
+ uint64_t D:1; /* Line dirty */
+ uint64_t L:1; /* Line locked */
+ uint64_t U:1; /* Use, LRU eviction */
uint64_t addr:18; /* Phys mem addr (33..16) */
} cn31xx;
struct cvmx_l2c_tag_cn38xx {
uint64_t reserved:43;
- uint64_t V:1; /* Line valid */
- uint64_t D:1; /* Line dirty */
- uint64_t L:1; /* Line locked */
- uint64_t U:1; /* Use, LRU eviction */
+ uint64_t V:1; /* Line valid */
+ uint64_t D:1; /* Line dirty */
+ uint64_t L:1; /* Line locked */
+ uint64_t U:1; /* Use, LRU eviction */
uint64_t addr:17; /* Phys mem addr (33..17) */
} cn38xx;
struct cvmx_l2c_tag_cn58xx {
uint64_t reserved:44;
- uint64_t V:1; /* Line valid */
- uint64_t D:1; /* Line dirty */
- uint64_t L:1; /* Line locked */
- uint64_t U:1; /* Use, LRU eviction */
+ uint64_t V:1; /* Line valid */
+ uint64_t D:1; /* Line dirty */
+ uint64_t L:1; /* Line locked */
+ uint64_t U:1; /* Use, LRU eviction */
uint64_t addr:16; /* Phys mem addr (33..18) */
} cn58xx;
struct cvmx_l2c_tag_cn58xx cn56xx; /* 2048 sets */
struct cvmx_l2c_tag_cn31xx cn52xx; /* 512 sets */
};
+
/**
* @INTERNAL
* Function to read a L2C tag. This code make the current core
@@ -503,7 +577,7 @@ union __cvmx_l2c_tag {
static union __cvmx_l2c_tag __read_l2_tag(uint64_t assoc, uint64_t index)
{
- uint64_t debug_tag_addr = (((1ULL << 63) | (index << 7)) + 96);
+ uint64_t debug_tag_addr = CVMX_ADD_SEG(CVMX_MIPS_SPACE_XKPHYS, (index << 7) + 96);
uint64_t core = cvmx_get_core_num();
union __cvmx_l2c_tag tag_val;
uint64_t dbg_addr = CVMX_L2C_DBG;
@@ -512,12 +586,15 @@ static union __cvmx_l2c_tag __read_l2_tag(uint64_t assoc, uint64_t index)
union cvmx_l2c_dbg debug_val;
debug_val.u64 = 0;
/*
- * For low core count parts, the core number is always small enough
- * to stay in the correct field and not set any reserved bits.
+ * For low core count parts, the core number is always small
+ * enough to stay in the correct field and not set any
+ * reserved bits.
*/
debug_val.s.ppnum = core;
debug_val.s.l2t = 1;
debug_val.s.set = assoc;
+
+ local_irq_save(flags);
/*
* Make sure core is quiet (no prefetches, etc.) before
* entering debug mode.
@@ -526,112 +603,139 @@ static union __cvmx_l2c_tag __read_l2_tag(uint64_t assoc, uint64_t index)
/* Flush L1 to make sure debug load misses L1 */
CVMX_DCACHE_INVALIDATE;
- local_irq_save(flags);
-
/*
* The following must be done in assembly as when in debug
* mode all data loads from L2 return special debug data, not
- * normal memory contents. Also, interrupts must be
- * disabled, since if an interrupt occurs while in debug mode
- * the ISR will get debug data from all its memory reads
- * instead of the contents of memory
+ * normal memory contents. Also, interrupts must be disabled,
+ * since if an interrupt occurs while in debug mode the ISR
+ * will get debug data from all its memory * reads instead of
+ * the contents of memory.
*/
- asm volatile (".set push \n"
- " .set mips64 \n"
- " .set noreorder \n"
- /* Enter debug mode, wait for store */
- " sd %[dbg_val], 0(%[dbg_addr]) \n"
- " ld $0, 0(%[dbg_addr]) \n"
- /* Read L2C tag data */
- " ld %[tag_val], 0(%[tag_addr]) \n"
- /* Exit debug mode, wait for store */
- " sd $0, 0(%[dbg_addr]) \n"
- " ld $0, 0(%[dbg_addr]) \n"
- /* Invalidate dcache to discard debug data */
- " cache 9, 0($0) \n"
- " .set pop" :
- [tag_val] "=r"(tag_val.u64) : [dbg_addr] "r"(dbg_addr),
- [dbg_val] "r"(debug_val.u64),
- [tag_addr] "r"(debug_tag_addr) : "memory");
+ asm volatile (
+ ".set push\n\t"
+ ".set mips64\n\t"
+ ".set noreorder\n\t"
+ "sd %[dbg_val], 0(%[dbg_addr])\n\t" /* Enter debug mode, wait for store */
+ "ld $0, 0(%[dbg_addr])\n\t"
+ "ld %[tag_val], 0(%[tag_addr])\n\t" /* Read L2C tag data */
+ "sd $0, 0(%[dbg_addr])\n\t" /* Exit debug mode, wait for store */
+ "ld $0, 0(%[dbg_addr])\n\t"
+ "cache 9, 0($0)\n\t" /* Invalidate dcache to discard debug data */
+ ".set pop"
+ : [tag_val] "=r" (tag_val)
+ : [dbg_addr] "r" (dbg_addr), [dbg_val] "r" (debug_val), [tag_addr] "r" (debug_tag_addr)
+ : "memory");
local_irq_restore(flags);
- return tag_val;
+ return tag_val;
}
+
union cvmx_l2c_tag cvmx_l2c_get_tag(uint32_t association, uint32_t index)
{
- union __cvmx_l2c_tag tmp_tag;
union cvmx_l2c_tag tag;
tag.u64 = 0;
if ((int)association >= cvmx_l2c_get_num_assoc()) {
- cvmx_dprintf
- ("ERROR: cvmx_get_l2c_tag association out of range\n");
+ cvmx_dprintf("ERROR: cvmx_l2c_get_tag association out of range\n");
return tag;
}
if ((int)index >= cvmx_l2c_get_num_sets()) {
- cvmx_dprintf("ERROR: cvmx_get_l2c_tag "
- "index out of range (arg: %d, max: %d\n",
- index, cvmx_l2c_get_num_sets());
+ cvmx_dprintf("ERROR: cvmx_l2c_get_tag index out of range (arg: %d, max: %d)\n",
+ (int)index, cvmx_l2c_get_num_sets());
return tag;
}
- /* __read_l2_tag is intended for internal use only */
- tmp_tag = __read_l2_tag(association, index);
-
- /*
- * Convert all tag structure types to generic version, as it
- * can represent all models.
- */
- if (OCTEON_IS_MODEL(OCTEON_CN58XX) || OCTEON_IS_MODEL(OCTEON_CN56XX)) {
- tag.s.V = tmp_tag.cn58xx.V;
- tag.s.D = tmp_tag.cn58xx.D;
- tag.s.L = tmp_tag.cn58xx.L;
- tag.s.U = tmp_tag.cn58xx.U;
- tag.s.addr = tmp_tag.cn58xx.addr;
- } else if (OCTEON_IS_MODEL(OCTEON_CN38XX)) {
- tag.s.V = tmp_tag.cn38xx.V;
- tag.s.D = tmp_tag.cn38xx.D;
- tag.s.L = tmp_tag.cn38xx.L;
- tag.s.U = tmp_tag.cn38xx.U;
- tag.s.addr = tmp_tag.cn38xx.addr;
- } else if (OCTEON_IS_MODEL(OCTEON_CN31XX)
- || OCTEON_IS_MODEL(OCTEON_CN52XX)) {
- tag.s.V = tmp_tag.cn31xx.V;
- tag.s.D = tmp_tag.cn31xx.D;
- tag.s.L = tmp_tag.cn31xx.L;
- tag.s.U = tmp_tag.cn31xx.U;
- tag.s.addr = tmp_tag.cn31xx.addr;
- } else if (OCTEON_IS_MODEL(OCTEON_CN30XX)) {
- tag.s.V = tmp_tag.cn30xx.V;
- tag.s.D = tmp_tag.cn30xx.D;
- tag.s.L = tmp_tag.cn30xx.L;
- tag.s.U = tmp_tag.cn30xx.U;
- tag.s.addr = tmp_tag.cn30xx.addr;
- } else if (OCTEON_IS_MODEL(OCTEON_CN50XX)) {
- tag.s.V = tmp_tag.cn50xx.V;
- tag.s.D = tmp_tag.cn50xx.D;
- tag.s.L = tmp_tag.cn50xx.L;
- tag.s.U = tmp_tag.cn50xx.U;
- tag.s.addr = tmp_tag.cn50xx.addr;
+ if (OCTEON_IS_MODEL(OCTEON_CN63XX)) {
+ union cvmx_l2c_tadx_tag l2c_tadx_tag;
+ uint64_t address = CVMX_ADD_SEG(CVMX_MIPS_SPACE_XKPHYS,
+ (association << CVMX_L2C_TAG_ADDR_ALIAS_SHIFT) |
+ (index << CVMX_L2C_IDX_ADDR_SHIFT));
+ /*
+ * Use L2 cache Index load tag cache instruction, as
+ * hardware loads the virtual tag for the L2 cache
+ * block with the contents of L2C_TAD0_TAG
+ * register.
+ */
+ CVMX_CACHE_LTGL2I(address, 0);
+ CVMX_SYNC; /* make sure CVMX_L2C_TADX_TAG is updated */
+ l2c_tadx_tag.u64 = cvmx_read_csr(CVMX_L2C_TADX_TAG(0));
+
+ tag.s.V = l2c_tadx_tag.s.valid;
+ tag.s.D = l2c_tadx_tag.s.dirty;
+ tag.s.L = l2c_tadx_tag.s.lock;
+ tag.s.U = l2c_tadx_tag.s.use;
+ tag.s.addr = l2c_tadx_tag.s.tag;
} else {
- cvmx_dprintf("Unsupported OCTEON Model in %s\n", __func__);
+ union __cvmx_l2c_tag tmp_tag;
+ /* __read_l2_tag is intended for internal use only */
+ tmp_tag = __read_l2_tag(association, index);
+
+ /*
+ * Convert all tag structure types to generic version,
+ * as it can represent all models.
+ */
+ if (OCTEON_IS_MODEL(OCTEON_CN58XX) || OCTEON_IS_MODEL(OCTEON_CN56XX)) {
+ tag.s.V = tmp_tag.cn58xx.V;
+ tag.s.D = tmp_tag.cn58xx.D;
+ tag.s.L = tmp_tag.cn58xx.L;
+ tag.s.U = tmp_tag.cn58xx.U;
+ tag.s.addr = tmp_tag.cn58xx.addr;
+ } else if (OCTEON_IS_MODEL(OCTEON_CN38XX)) {
+ tag.s.V = tmp_tag.cn38xx.V;
+ tag.s.D = tmp_tag.cn38xx.D;
+ tag.s.L = tmp_tag.cn38xx.L;
+ tag.s.U = tmp_tag.cn38xx.U;
+ tag.s.addr = tmp_tag.cn38xx.addr;
+ } else if (OCTEON_IS_MODEL(OCTEON_CN31XX) || OCTEON_IS_MODEL(OCTEON_CN52XX)) {
+ tag.s.V = tmp_tag.cn31xx.V;
+ tag.s.D = tmp_tag.cn31xx.D;
+ tag.s.L = tmp_tag.cn31xx.L;
+ tag.s.U = tmp_tag.cn31xx.U;
+ tag.s.addr = tmp_tag.cn31xx.addr;
+ } else if (OCTEON_IS_MODEL(OCTEON_CN30XX)) {
+ tag.s.V = tmp_tag.cn30xx.V;
+ tag.s.D = tmp_tag.cn30xx.D;
+ tag.s.L = tmp_tag.cn30xx.L;
+ tag.s.U = tmp_tag.cn30xx.U;
+ tag.s.addr = tmp_tag.cn30xx.addr;
+ } else if (OCTEON_IS_MODEL(OCTEON_CN50XX)) {
+ tag.s.V = tmp_tag.cn50xx.V;
+ tag.s.D = tmp_tag.cn50xx.D;
+ tag.s.L = tmp_tag.cn50xx.L;
+ tag.s.U = tmp_tag.cn50xx.U;
+ tag.s.addr = tmp_tag.cn50xx.addr;
+ } else {
+ cvmx_dprintf("Unsupported OCTEON Model in %s\n", __func__);
+ }
}
-
return tag;
}
uint32_t cvmx_l2c_address_to_index(uint64_t addr)
{
uint64_t idx = addr >> CVMX_L2C_IDX_ADDR_SHIFT;
- union cvmx_l2c_cfg l2c_cfg;
- l2c_cfg.u64 = cvmx_read_csr(CVMX_L2C_CFG);
+ int indxalias = 0;
- if (l2c_cfg.s.idxalias) {
- idx ^=
- ((addr & CVMX_L2C_ALIAS_MASK) >>
- CVMX_L2C_TAG_ADDR_ALIAS_SHIFT);
+ if (OCTEON_IS_MODEL(OCTEON_CN6XXX)) {
+ union cvmx_l2c_ctl l2c_ctl;
+ l2c_ctl.u64 = cvmx_read_csr(CVMX_L2C_CTL);
+ indxalias = !l2c_ctl.s.disidxalias;
+ } else {
+ union cvmx_l2c_cfg l2c_cfg;
+ l2c_cfg.u64 = cvmx_read_csr(CVMX_L2C_CFG);
+ indxalias = l2c_cfg.s.idxalias;
+ }
+
+ if (indxalias) {
+ if (OCTEON_IS_MODEL(OCTEON_CN63XX)) {
+ uint32_t a_14_12 = (idx / (CVMX_L2C_MEMBANK_SELECT_SIZE/(1<<CVMX_L2C_IDX_ADDR_SHIFT))) & 0x7;
+ idx ^= idx / cvmx_l2c_get_num_sets();
+ idx ^= a_14_12;
+ } else {
+ idx ^= ((addr & CVMX_L2C_ALIAS_MASK) >> CVMX_L2C_TAG_ADDR_ALIAS_SHIFT);
+ }
}
idx &= CVMX_L2C_IDX_MASK;
return idx;
@@ -652,10 +756,9 @@ int cvmx_l2c_get_set_bits(void)
int l2_set_bits;
if (OCTEON_IS_MODEL(OCTEON_CN56XX) || OCTEON_IS_MODEL(OCTEON_CN58XX))
l2_set_bits = 11; /* 2048 sets */
- else if (OCTEON_IS_MODEL(OCTEON_CN38XX))
+ else if (OCTEON_IS_MODEL(OCTEON_CN38XX) || OCTEON_IS_MODEL(OCTEON_CN63XX))
l2_set_bits = 10; /* 1024 sets */
- else if (OCTEON_IS_MODEL(OCTEON_CN31XX)
- || OCTEON_IS_MODEL(OCTEON_CN52XX))
+ else if (OCTEON_IS_MODEL(OCTEON_CN31XX) || OCTEON_IS_MODEL(OCTEON_CN52XX))
l2_set_bits = 9; /* 512 sets */
else if (OCTEON_IS_MODEL(OCTEON_CN30XX))
l2_set_bits = 8; /* 256 sets */
@@ -666,7 +769,6 @@ int cvmx_l2c_get_set_bits(void)
l2_set_bits = 11; /* 2048 sets */
}
return l2_set_bits;
-
}
/* Return the number of sets in the L2 Cache */
@@ -682,8 +784,11 @@ int cvmx_l2c_get_num_assoc(void)
if (OCTEON_IS_MODEL(OCTEON_CN56XX) ||
OCTEON_IS_MODEL(OCTEON_CN52XX) ||
OCTEON_IS_MODEL(OCTEON_CN58XX) ||
- OCTEON_IS_MODEL(OCTEON_CN50XX) || OCTEON_IS_MODEL(OCTEON_CN38XX))
+ OCTEON_IS_MODEL(OCTEON_CN50XX) ||
+ OCTEON_IS_MODEL(OCTEON_CN38XX))
l2_assoc = 8;
+ else if (OCTEON_IS_MODEL(OCTEON_CN63XX))
+ l2_assoc = 16;
else if (OCTEON_IS_MODEL(OCTEON_CN31XX) ||
OCTEON_IS_MODEL(OCTEON_CN30XX))
l2_assoc = 4;
@@ -693,11 +798,42 @@ int cvmx_l2c_get_num_assoc(void)
}
/* Check to see if part of the cache is disabled */
- if (cvmx_fuse_read(265))
- l2_assoc = l2_assoc >> 2;
- else if (cvmx_fuse_read(264))
- l2_assoc = l2_assoc >> 1;
-
+ if (OCTEON_IS_MODEL(OCTEON_CN63XX)) {
+ union cvmx_mio_fus_dat3 mio_fus_dat3;
+
+ mio_fus_dat3.u64 = cvmx_read_csr(CVMX_MIO_FUS_DAT3);
+ /*
+ * cvmx_mio_fus_dat3.s.l2c_crip fuses map as follows
+ * <2> will be not used for 63xx
+ * <1> disables 1/2 ways
+ * <0> disables 1/4 ways
+ * They are cumulative, so for 63xx:
+ * <1> <0>
+ * 0 0 16-way 2MB cache
+ * 0 1 12-way 1.5MB cache
+ * 1 0 8-way 1MB cache
+ * 1 1 4-way 512KB cache
+ */
+
+ if (mio_fus_dat3.s.l2c_crip == 3)
+ l2_assoc = 4;
+ else if (mio_fus_dat3.s.l2c_crip == 2)
+ l2_assoc = 8;
+ else if (mio_fus_dat3.s.l2c_crip == 1)
+ l2_assoc = 12;
+ } else {
+ union cvmx_l2d_fus3 val;
+ val.u64 = cvmx_read_csr(CVMX_L2D_FUS3);
+ /*
+ * Using shifts here, as bit position names are
+ * different for each model but they all mean the
+ * same.
+ */
+ if ((val.u64 >> 35) & 0x1)
+ l2_assoc = l2_assoc >> 2;
+ else if ((val.u64 >> 34) & 0x1)
+ l2_assoc = l2_assoc >> 1;
+ }
return l2_assoc;
}
@@ -711,24 +847,54 @@ int cvmx_l2c_get_num_assoc(void)
*/
void cvmx_l2c_flush_line(uint32_t assoc, uint32_t index)
{
- union cvmx_l2c_dbg l2cdbg;
+ /* Check the range of the index. */
+ if (index > (uint32_t)cvmx_l2c_get_num_sets()) {
+ cvmx_dprintf("ERROR: cvmx_l2c_flush_line index out of range.\n");
+ return;
+ }
- l2cdbg.u64 = 0;
- l2cdbg.s.ppnum = cvmx_get_core_num();
- l2cdbg.s.finv = 1;
+ /* Check the range of association. */
+ if (assoc > (uint32_t)cvmx_l2c_get_num_assoc()) {
+ cvmx_dprintf("ERROR: cvmx_l2c_flush_line association out of range.\n");
+ return;
+ }
- l2cdbg.s.set = assoc;
- /*
- * Enter debug mode, and make sure all other writes complete
- * before we enter debug mode.
- */
- asm volatile ("sync" : : : "memory");
- cvmx_write_csr(CVMX_L2C_DBG, l2cdbg.u64);
- cvmx_read_csr(CVMX_L2C_DBG);
-
- CVMX_PREPARE_FOR_STORE(((1ULL << 63) + (index) * 128), 0);
- /* Exit debug mode */
- asm volatile ("sync" : : : "memory");
- cvmx_write_csr(CVMX_L2C_DBG, 0);
- cvmx_read_csr(CVMX_L2C_DBG);
+ if (OCTEON_IS_MODEL(OCTEON_CN63XX)) {
+ uint64_t address;
+ /* Create the address based on index and association.
+ * Bits<20:17> select the way of the cache block involved in
+ * the operation
+ * Bits<16:7> of the effect address select the index
+ */
+ address = CVMX_ADD_SEG(CVMX_MIPS_SPACE_XKPHYS,
+ (assoc << CVMX_L2C_TAG_ADDR_ALIAS_SHIFT) |
+ (index << CVMX_L2C_IDX_ADDR_SHIFT));
+ CVMX_CACHE_WBIL2I(address, 0);
+ } else {
+ union cvmx_l2c_dbg l2cdbg;
+
+ l2cdbg.u64 = 0;
+ if (!OCTEON_IS_MODEL(OCTEON_CN30XX))
+ l2cdbg.s.ppnum = cvmx_get_core_num();
+ l2cdbg.s.finv = 1;
+
+ l2cdbg.s.set = assoc;
+ cvmx_spinlock_lock(&cvmx_l2c_spinlock);
+ /*
+ * Enter debug mode, and make sure all other writes
+ * complete before we enter debug mode
+ */
+ CVMX_SYNC;
+ cvmx_write_csr(CVMX_L2C_DBG, l2cdbg.u64);
+ cvmx_read_csr(CVMX_L2C_DBG);
+
+ CVMX_PREPARE_FOR_STORE(CVMX_ADD_SEG(CVMX_MIPS_SPACE_XKPHYS,
+ index * CVMX_CACHE_LINE_SIZE),
+ 0);
+ /* Exit debug mode */
+ CVMX_SYNC;
+ cvmx_write_csr(CVMX_L2C_DBG, 0);
+ cvmx_read_csr(CVMX_L2C_DBG);
+ cvmx_spinlock_unlock(&cvmx_l2c_spinlock);
+ }
}
diff --git a/arch/mips/cavium-octeon/octeon-platform.c b/arch/mips/cavium-octeon/octeon-platform.c
index 62ac30eef5e8..cecaf62aef32 100644
--- a/arch/mips/cavium-octeon/octeon-platform.c
+++ b/arch/mips/cavium-octeon/octeon-platform.c
@@ -3,13 +3,15 @@
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
- * Copyright (C) 2004-2009 Cavium Networks
+ * Copyright (C) 2004-2010 Cavium Networks
* Copyright (C) 2008 Wind River Systems
*/
#include <linux/init.h>
#include <linux/irq.h>
#include <linux/i2c.h>
+#include <linux/usb.h>
+#include <linux/dma-mapping.h>
#include <linux/module.h>
#include <linux/platform_device.h>
@@ -198,7 +200,7 @@ static int __init octeon_i2c_device_init(void)
num_ports = 1;
for (port = 0; port < num_ports; port++) {
- octeon_i2c_data[port].sys_freq = octeon_get_clock_rate();
+ octeon_i2c_data[port].sys_freq = octeon_get_io_clock_rate();
/*FIXME: should be examined. At the moment is set for 100Khz */
octeon_i2c_data[port].i2c_freq = 100000;
@@ -301,6 +303,10 @@ static int __init octeon_mgmt_device_init(void)
ret = -ENOMEM;
goto out;
}
+ /* No DMA restrictions */
+ pd->dev.coherent_dma_mask = DMA_BIT_MASK(64);
+ pd->dev.dma_mask = &pd->dev.coherent_dma_mask;
+
switch (port) {
case 0:
mgmt_port_resource.start = OCTEON_IRQ_MII0;
@@ -332,6 +338,108 @@ out:
}
device_initcall(octeon_mgmt_device_init);
+#ifdef CONFIG_USB
+
+static int __init octeon_ehci_device_init(void)
+{
+ struct platform_device *pd;
+ int ret = 0;
+
+ struct resource usb_resources[] = {
+ {
+ .flags = IORESOURCE_MEM,
+ }, {
+ .flags = IORESOURCE_IRQ,
+ }
+ };
+
+ /* Only Octeon2 has ehci/ohci */
+ if (!OCTEON_IS_MODEL(OCTEON_CN63XX))
+ return 0;
+
+ if (octeon_is_simulation() || usb_disabled())
+ return 0; /* No USB in the simulator. */
+
+ pd = platform_device_alloc("octeon-ehci", 0);
+ if (!pd) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ usb_resources[0].start = 0x00016F0000000000ULL;
+ usb_resources[0].end = usb_resources[0].start + 0x100;
+
+ usb_resources[1].start = OCTEON_IRQ_USB0;
+ usb_resources[1].end = OCTEON_IRQ_USB0;
+
+ ret = platform_device_add_resources(pd, usb_resources,
+ ARRAY_SIZE(usb_resources));
+ if (ret)
+ goto fail;
+
+ ret = platform_device_add(pd);
+ if (ret)
+ goto fail;
+
+ return ret;
+fail:
+ platform_device_put(pd);
+out:
+ return ret;
+}
+device_initcall(octeon_ehci_device_init);
+
+static int __init octeon_ohci_device_init(void)
+{
+ struct platform_device *pd;
+ int ret = 0;
+
+ struct resource usb_resources[] = {
+ {
+ .flags = IORESOURCE_MEM,
+ }, {
+ .flags = IORESOURCE_IRQ,
+ }
+ };
+
+ /* Only Octeon2 has ehci/ohci */
+ if (!OCTEON_IS_MODEL(OCTEON_CN63XX))
+ return 0;
+
+ if (octeon_is_simulation() || usb_disabled())
+ return 0; /* No USB in the simulator. */
+
+ pd = platform_device_alloc("octeon-ohci", 0);
+ if (!pd) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ usb_resources[0].start = 0x00016F0000000400ULL;
+ usb_resources[0].end = usb_resources[0].start + 0x100;
+
+ usb_resources[1].start = OCTEON_IRQ_USB0;
+ usb_resources[1].end = OCTEON_IRQ_USB0;
+
+ ret = platform_device_add_resources(pd, usb_resources,
+ ARRAY_SIZE(usb_resources));
+ if (ret)
+ goto fail;
+
+ ret = platform_device_add(pd);
+ if (ret)
+ goto fail;
+
+ return ret;
+fail:
+ platform_device_put(pd);
+out:
+ return ret;
+}
+device_initcall(octeon_ohci_device_init);
+
+#endif /* CONFIG_USB */
+
MODULE_AUTHOR("David Daney <ddaney@caviumnetworks.com>");
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Platform driver for Octeon SOC");
diff --git a/arch/mips/cavium-octeon/serial.c b/arch/mips/cavium-octeon/serial.c
index 12dbf533b77d..057f0ae88c99 100644
--- a/arch/mips/cavium-octeon/serial.c
+++ b/arch/mips/cavium-octeon/serial.c
@@ -66,7 +66,7 @@ static void __init octeon_uart_set_common(struct plat_serial8250_port *p)
/* Make simulator output fast*/
p->uartclk = 115200 * 16;
else
- p->uartclk = mips_hpt_frequency;
+ p->uartclk = octeon_get_io_clock_rate();
p->serial_in = octeon_serial_in;
p->serial_out = octeon_serial_out;
}
diff --git a/arch/mips/cavium-octeon/setup.c b/arch/mips/cavium-octeon/setup.c
index 69197cb6c7ea..b0c3686c96dd 100644
--- a/arch/mips/cavium-octeon/setup.c
+++ b/arch/mips/cavium-octeon/setup.c
@@ -33,6 +33,7 @@
#include <asm/octeon/octeon.h>
#include <asm/octeon/pci-octeon.h>
+#include <asm/octeon/cvmx-mio-defs.h>
#ifdef CONFIG_CAVIUM_DECODE_RSL
extern void cvmx_interrupt_rsl_decode(void);
@@ -96,12 +97,21 @@ int octeon_is_pci_host(void)
*/
uint64_t octeon_get_clock_rate(void)
{
- if (octeon_is_simulation())
- octeon_bootinfo->eclock_hz = 6000000;
- return octeon_bootinfo->eclock_hz;
+ struct cvmx_sysinfo *sysinfo = cvmx_sysinfo_get();
+
+ return sysinfo->cpu_clock_hz;
}
EXPORT_SYMBOL(octeon_get_clock_rate);
+static u64 octeon_io_clock_rate;
+
+u64 octeon_get_io_clock_rate(void)
+{
+ return octeon_io_clock_rate;
+}
+EXPORT_SYMBOL(octeon_get_io_clock_rate);
+
+
/**
* Write to the LCD display connected to the bootbus. This display
* exists on most Cavium evaluation boards. If it doesn't exist, then
@@ -346,8 +356,18 @@ void octeon_user_io_init(void)
cvmmemctl.s.wbfltime = 0;
/* R/W If set, do not put Istream in the L2 cache. */
cvmmemctl.s.istrnol2 = 0;
- /* R/W The write buffer threshold. */
- cvmmemctl.s.wbthresh = 10;
+
+ /*
+ * R/W The write buffer threshold. As per erratum Core-14752
+ * for CN63XX, a sc/scd might fail if the write buffer is
+ * full. Lowering WBTHRESH greatly lowers the chances of the
+ * write buffer ever being full and triggering the erratum.
+ */
+ if (OCTEON_IS_MODEL(OCTEON_CN63XX_PASS1_X))
+ cvmmemctl.s.wbthresh = 4;
+ else
+ cvmmemctl.s.wbthresh = 10;
+
/* R/W If set, CVMSEG is available for loads/stores in
* kernel/debug mode. */
#if CONFIG_CAVIUM_OCTEON_CVMSEG_SIZE > 0
@@ -365,14 +385,13 @@ void octeon_user_io_init(void)
* is max legal value. */
cvmmemctl.s.lmemsz = CONFIG_CAVIUM_OCTEON_CVMSEG_SIZE;
+ write_c0_cvmmemctl(cvmmemctl.u64);
if (smp_processor_id() == 0)
pr_notice("CVMSEG size: %d cache lines (%d bytes)\n",
CONFIG_CAVIUM_OCTEON_CVMSEG_SIZE,
CONFIG_CAVIUM_OCTEON_CVMSEG_SIZE * 128);
- write_c0_cvmmemctl(cvmmemctl.u64);
-
/* Move the performance counter interrupts to IRQ 6 */
cvmctl = read_c0_cvmctl();
cvmctl &= ~(7 << 7);
@@ -416,6 +435,41 @@ void __init prom_init(void)
cvmx_phys_to_ptr(octeon_boot_desc_ptr->cvmx_desc_vaddr);
cvmx_bootmem_init(cvmx_phys_to_ptr(octeon_bootinfo->phy_mem_desc_addr));
+ sysinfo = cvmx_sysinfo_get();
+ memset(sysinfo, 0, sizeof(*sysinfo));
+ sysinfo->system_dram_size = octeon_bootinfo->dram_size << 20;
+ sysinfo->phy_mem_desc_ptr =
+ cvmx_phys_to_ptr(octeon_bootinfo->phy_mem_desc_addr);
+ sysinfo->core_mask = octeon_bootinfo->core_mask;
+ sysinfo->exception_base_addr = octeon_bootinfo->exception_base_addr;
+ sysinfo->cpu_clock_hz = octeon_bootinfo->eclock_hz;
+ sysinfo->dram_data_rate_hz = octeon_bootinfo->dclock_hz * 2;
+ sysinfo->board_type = octeon_bootinfo->board_type;
+ sysinfo->board_rev_major = octeon_bootinfo->board_rev_major;
+ sysinfo->board_rev_minor = octeon_bootinfo->board_rev_minor;
+ memcpy(sysinfo->mac_addr_base, octeon_bootinfo->mac_addr_base,
+ sizeof(sysinfo->mac_addr_base));
+ sysinfo->mac_addr_count = octeon_bootinfo->mac_addr_count;
+ memcpy(sysinfo->board_serial_number,
+ octeon_bootinfo->board_serial_number,
+ sizeof(sysinfo->board_serial_number));
+ sysinfo->compact_flash_common_base_addr =
+ octeon_bootinfo->compact_flash_common_base_addr;
+ sysinfo->compact_flash_attribute_base_addr =
+ octeon_bootinfo->compact_flash_attribute_base_addr;
+ sysinfo->led_display_base_addr = octeon_bootinfo->led_display_base_addr;
+ sysinfo->dfa_ref_clock_hz = octeon_bootinfo->dfa_ref_clock_hz;
+ sysinfo->bootloader_config_flags = octeon_bootinfo->config_flags;
+
+ if (OCTEON_IS_MODEL(OCTEON_CN6XXX)) {
+ /* I/O clock runs at a different rate than the CPU. */
+ union cvmx_mio_rst_boot rst_boot;
+ rst_boot.u64 = cvmx_read_csr(CVMX_MIO_RST_BOOT);
+ octeon_io_clock_rate = 50000000 * rst_boot.s.pnr_mul;
+ } else {
+ octeon_io_clock_rate = sysinfo->cpu_clock_hz;
+ }
+
/*
* Only enable the LED controller if we're running on a CN38XX, CN58XX,
* or CN56XX. The CN30XX and CN31XX don't have an LED controller.
@@ -479,33 +533,6 @@ void __init prom_init(void)
}
#endif
- sysinfo = cvmx_sysinfo_get();
- memset(sysinfo, 0, sizeof(*sysinfo));
- sysinfo->system_dram_size = octeon_bootinfo->dram_size << 20;
- sysinfo->phy_mem_desc_ptr =
- cvmx_phys_to_ptr(octeon_bootinfo->phy_mem_desc_addr);
- sysinfo->core_mask = octeon_bootinfo->core_mask;
- sysinfo->exception_base_addr = octeon_bootinfo->exception_base_addr;
- sysinfo->cpu_clock_hz = octeon_bootinfo->eclock_hz;
- sysinfo->dram_data_rate_hz = octeon_bootinfo->dclock_hz * 2;
- sysinfo->board_type = octeon_bootinfo->board_type;
- sysinfo->board_rev_major = octeon_bootinfo->board_rev_major;
- sysinfo->board_rev_minor = octeon_bootinfo->board_rev_minor;
- memcpy(sysinfo->mac_addr_base, octeon_bootinfo->mac_addr_base,
- sizeof(sysinfo->mac_addr_base));
- sysinfo->mac_addr_count = octeon_bootinfo->mac_addr_count;
- memcpy(sysinfo->board_serial_number,
- octeon_bootinfo->board_serial_number,
- sizeof(sysinfo->board_serial_number));
- sysinfo->compact_flash_common_base_addr =
- octeon_bootinfo->compact_flash_common_base_addr;
- sysinfo->compact_flash_attribute_base_addr =
- octeon_bootinfo->compact_flash_attribute_base_addr;
- sysinfo->led_display_base_addr = octeon_bootinfo->led_display_base_addr;
- sysinfo->dfa_ref_clock_hz = octeon_bootinfo->dfa_ref_clock_hz;
- sysinfo->bootloader_config_flags = octeon_bootinfo->config_flags;
-
-
octeon_check_cpu_bist();
octeon_uart = octeon_get_boot_uart();
@@ -740,6 +767,31 @@ EXPORT_SYMBOL(prom_putchar);
void prom_free_prom_memory(void)
{
+ if (OCTEON_IS_MODEL(OCTEON_CN63XX_PASS1_X)) {
+ /* Check for presence of Core-14449 fix. */
+ u32 insn;
+ u32 *foo;
+
+ foo = &insn;
+
+ asm volatile("# before" : : : "memory");
+ prefetch(foo);
+ asm volatile(
+ ".set push\n\t"
+ ".set noreorder\n\t"
+ "bal 1f\n\t"
+ "nop\n"
+ "1:\tlw %0,-12($31)\n\t"
+ ".set pop\n\t"
+ : "=r" (insn) : : "$31", "memory");
+
+ if ((insn >> 26) != 0x33)
+ panic("No PREF instruction at Core-14449 probe point.\n");
+
+ if (((insn >> 16) & 0x1f) != 28)
+ panic("Core-14449 WAR not in place (%04x).\n"
+ "Please build kernel with proper options (CONFIG_CAVIUM_CN63XXP1).\n", insn);
+ }
#ifdef CONFIG_CAVIUM_DECODE_RSL
cvmx_interrupt_rsl_enable();
diff --git a/arch/mips/include/asm/atomic.h b/arch/mips/include/asm/atomic.h
index 47d87da379f9..4a02fe891ab6 100644
--- a/arch/mips/include/asm/atomic.h
+++ b/arch/mips/include/asm/atomic.h
@@ -64,18 +64,16 @@ static __inline__ void atomic_add(int i, atomic_t * v)
} else if (kernel_uses_llsc) {
int temp;
- __asm__ __volatile__(
- " .set mips3 \n"
- "1: ll %0, %1 # atomic_add \n"
- " addu %0, %2 \n"
- " sc %0, %1 \n"
- " beqz %0, 2f \n"
- " .subsection 2 \n"
- "2: b 1b \n"
- " .previous \n"
- " .set mips0 \n"
- : "=&r" (temp), "=m" (v->counter)
- : "Ir" (i), "m" (v->counter));
+ do {
+ __asm__ __volatile__(
+ " .set mips3 \n"
+ " ll %0, %1 # atomic_add \n"
+ " addu %0, %2 \n"
+ " sc %0, %1 \n"
+ " .set mips0 \n"
+ : "=&r" (temp), "=m" (v->counter)
+ : "Ir" (i), "m" (v->counter));
+ } while (unlikely(!temp));
} else {
unsigned long flags;
@@ -109,18 +107,16 @@ static __inline__ void atomic_sub(int i, atomic_t * v)
} else if (kernel_uses_llsc) {
int temp;
- __asm__ __volatile__(
- " .set mips3 \n"
- "1: ll %0, %1 # atomic_sub \n"
- " subu %0, %2 \n"
- " sc %0, %1 \n"
- " beqz %0, 2f \n"
- " .subsection 2 \n"
- "2: b 1b \n"
- " .previous \n"
- " .set mips0 \n"
- : "=&r" (temp), "=m" (v->counter)
- : "Ir" (i), "m" (v->counter));
+ do {
+ __asm__ __volatile__(
+ " .set mips3 \n"
+ " ll %0, %1 # atomic_sub \n"
+ " subu %0, %2 \n"
+ " sc %0, %1 \n"
+ " .set mips0 \n"
+ : "=&r" (temp), "=m" (v->counter)
+ : "Ir" (i), "m" (v->counter));
+ } while (unlikely(!temp));
} else {
unsigned long flags;
@@ -156,20 +152,19 @@ static __inline__ int atomic_add_return(int i, atomic_t * v)
} else if (kernel_uses_llsc) {
int temp;
- __asm__ __volatile__(
- " .set mips3 \n"
- "1: ll %1, %2 # atomic_add_return \n"
- " addu %0, %1, %3 \n"
- " sc %0, %2 \n"
- " beqz %0, 2f \n"
- " addu %0, %1, %3 \n"
- " .subsection 2 \n"
- "2: b 1b \n"
- " .previous \n"
- " .set mips0 \n"
- : "=&r" (result), "=&r" (temp), "=m" (v->counter)
- : "Ir" (i), "m" (v->counter)
- : "memory");
+ do {
+ __asm__ __volatile__(
+ " .set mips3 \n"
+ " ll %1, %2 # atomic_add_return \n"
+ " addu %0, %1, %3 \n"
+ " sc %0, %2 \n"
+ " .set mips0 \n"
+ : "=&r" (result), "=&r" (temp), "=m" (v->counter)
+ : "Ir" (i), "m" (v->counter)
+ : "memory");
+ } while (unlikely(!result));
+
+ result = temp + i;
} else {
unsigned long flags;
@@ -205,23 +200,24 @@ static __inline__ int atomic_sub_return(int i, atomic_t * v)
: "=&r" (result), "=&r" (temp), "=m" (v->counter)
: "Ir" (i), "m" (v->counter)
: "memory");
+
+ result = temp - i;
} else if (kernel_uses_llsc) {
int temp;
- __asm__ __volatile__(
- " .set mips3 \n"
- "1: ll %1, %2 # atomic_sub_return \n"
- " subu %0, %1, %3 \n"
- " sc %0, %2 \n"
- " beqz %0, 2f \n"
- " subu %0, %1, %3 \n"
- " .subsection 2 \n"
- "2: b 1b \n"
- " .previous \n"
- " .set mips0 \n"
- : "=&r" (result), "=&r" (temp), "=m" (v->counter)
- : "Ir" (i), "m" (v->counter)
- : "memory");
+ do {
+ __asm__ __volatile__(
+ " .set mips3 \n"
+ " ll %1, %2 # atomic_sub_return \n"
+ " subu %0, %1, %3 \n"
+ " sc %0, %2 \n"
+ " .set mips0 \n"
+ : "=&r" (result), "=&r" (temp), "=m" (v->counter)
+ : "Ir" (i), "m" (v->counter)
+ : "memory");
+ } while (unlikely(!result));
+
+ result = temp - i;
} else {
unsigned long flags;
@@ -279,12 +275,9 @@ static __inline__ int atomic_sub_if_positive(int i, atomic_t * v)
" bltz %0, 1f \n"
" sc %0, %2 \n"
" .set noreorder \n"
- " beqz %0, 2f \n"
+ " beqz %0, 1b \n"
" subu %0, %1, %3 \n"
" .set reorder \n"
- " .subsection 2 \n"
- "2: b 1b \n"
- " .previous \n"
"1: \n"
" .set mips0 \n"
: "=&r" (result), "=&r" (temp), "=m" (v->counter)
@@ -443,18 +436,16 @@ static __inline__ void atomic64_add(long i, atomic64_t * v)
} else if (kernel_uses_llsc) {
long temp;
- __asm__ __volatile__(
- " .set mips3 \n"
- "1: lld %0, %1 # atomic64_add \n"
- " daddu %0, %2 \n"
- " scd %0, %1 \n"
- " beqz %0, 2f \n"
- " .subsection 2 \n"
- "2: b 1b \n"
- " .previous \n"
- " .set mips0 \n"
- : "=&r" (temp), "=m" (v->counter)
- : "Ir" (i), "m" (v->counter));
+ do {
+ __asm__ __volatile__(
+ " .set mips3 \n"
+ " lld %0, %1 # atomic64_add \n"
+ " daddu %0, %2 \n"
+ " scd %0, %1 \n"
+ " .set mips0 \n"
+ : "=&r" (temp), "=m" (v->counter)
+ : "Ir" (i), "m" (v->counter));
+ } while (unlikely(!temp));
} else {
unsigned long flags;
@@ -488,18 +479,16 @@ static __inline__ void atomic64_sub(long i, atomic64_t * v)
} else if (kernel_uses_llsc) {
long temp;
- __asm__ __volatile__(
- " .set mips3 \n"
- "1: lld %0, %1 # atomic64_sub \n"
- " dsubu %0, %2 \n"
- " scd %0, %1 \n"
- " beqz %0, 2f \n"
- " .subsection 2 \n"
- "2: b 1b \n"
- " .previous \n"
- " .set mips0 \n"
- : "=&r" (temp), "=m" (v->counter)
- : "Ir" (i), "m" (v->counter));
+ do {
+ __asm__ __volatile__(
+ " .set mips3 \n"
+ " lld %0, %1 # atomic64_sub \n"
+ " dsubu %0, %2 \n"
+ " scd %0, %1 \n"
+ " .set mips0 \n"
+ : "=&r" (temp), "=m" (v->counter)
+ : "Ir" (i), "m" (v->counter));
+ } while (unlikely(!temp));
} else {
unsigned long flags;
@@ -535,20 +524,19 @@ static __inline__ long atomic64_add_return(long i, atomic64_t * v)
} else if (kernel_uses_llsc) {
long temp;
- __asm__ __volatile__(
- " .set mips3 \n"
- "1: lld %1, %2 # atomic64_add_return \n"
- " daddu %0, %1, %3 \n"
- " scd %0, %2 \n"
- " beqz %0, 2f \n"
- " daddu %0, %1, %3 \n"
- " .subsection 2 \n"
- "2: b 1b \n"
- " .previous \n"
- " .set mips0 \n"
- : "=&r" (result), "=&r" (temp), "=m" (v->counter)
- : "Ir" (i), "m" (v->counter)
- : "memory");
+ do {
+ __asm__ __volatile__(
+ " .set mips3 \n"
+ " lld %1, %2 # atomic64_add_return \n"
+ " daddu %0, %1, %3 \n"
+ " scd %0, %2 \n"
+ " .set mips0 \n"
+ : "=&r" (result), "=&r" (temp), "=m" (v->counter)
+ : "Ir" (i), "m" (v->counter)
+ : "memory");
+ } while (unlikely(!result));
+
+ result = temp + i;
} else {
unsigned long flags;
@@ -587,20 +575,19 @@ static __inline__ long atomic64_sub_return(long i, atomic64_t * v)
} else if (kernel_uses_llsc) {
long temp;
- __asm__ __volatile__(
- " .set mips3 \n"
- "1: lld %1, %2 # atomic64_sub_return \n"
- " dsubu %0, %1, %3 \n"
- " scd %0, %2 \n"
- " beqz %0, 2f \n"
- " dsubu %0, %1, %3 \n"
- " .subsection 2 \n"
- "2: b 1b \n"
- " .previous \n"
- " .set mips0 \n"
- : "=&r" (result), "=&r" (temp), "=m" (v->counter)
- : "Ir" (i), "m" (v->counter)
- : "memory");
+ do {
+ __asm__ __volatile__(
+ " .set mips3 \n"
+ " lld %1, %2 # atomic64_sub_return \n"
+ " dsubu %0, %1, %3 \n"
+ " scd %0, %2 \n"
+ " .set mips0 \n"
+ : "=&r" (result), "=&r" (temp), "=m" (v->counter)
+ : "Ir" (i), "m" (v->counter)
+ : "memory");
+ } while (unlikely(!result));
+
+ result = temp - i;
} else {
unsigned long flags;
@@ -658,12 +645,9 @@ static __inline__ long atomic64_sub_if_positive(long i, atomic64_t * v)
" bltz %0, 1f \n"
" scd %0, %2 \n"
" .set noreorder \n"
- " beqz %0, 2f \n"
+ " beqz %0, 1b \n"
" dsubu %0, %1, %3 \n"
" .set reorder \n"
- " .subsection 2 \n"
- "2: b 1b \n"
- " .previous \n"
"1: \n"
" .set mips0 \n"
: "=&r" (result), "=&r" (temp), "=m" (v->counter)
diff --git a/arch/mips/include/asm/bitops.h b/arch/mips/include/asm/bitops.h
index b0ce7ca2851f..50b4ef288c53 100644
--- a/arch/mips/include/asm/bitops.h
+++ b/arch/mips/include/asm/bitops.h
@@ -73,30 +73,26 @@ static inline void set_bit(unsigned long nr, volatile unsigned long *addr)
: "ir" (1UL << bit), "m" (*m));
#ifdef CONFIG_CPU_MIPSR2
} else if (kernel_uses_llsc && __builtin_constant_p(bit)) {
- __asm__ __volatile__(
- "1: " __LL "%0, %1 # set_bit \n"
- " " __INS "%0, %4, %2, 1 \n"
- " " __SC "%0, %1 \n"
- " beqz %0, 2f \n"
- " .subsection 2 \n"
- "2: b 1b \n"
- " .previous \n"
- : "=&r" (temp), "=m" (*m)
- : "ir" (bit), "m" (*m), "r" (~0));
+ do {
+ __asm__ __volatile__(
+ " " __LL "%0, %1 # set_bit \n"
+ " " __INS "%0, %3, %2, 1 \n"
+ " " __SC "%0, %1 \n"
+ : "=&r" (temp), "+m" (*m)
+ : "ir" (bit), "r" (~0));
+ } while (unlikely(!temp));
#endif /* CONFIG_CPU_MIPSR2 */
} else if (kernel_uses_llsc) {
- __asm__ __volatile__(
- " .set mips3 \n"
- "1: " __LL "%0, %1 # set_bit \n"
- " or %0, %2 \n"
- " " __SC "%0, %1 \n"
- " beqz %0, 2f \n"
- " .subsection 2 \n"
- "2: b 1b \n"
- " .previous \n"
- " .set mips0 \n"
- : "=&r" (temp), "=m" (*m)
- : "ir" (1UL << bit), "m" (*m));
+ do {
+ __asm__ __volatile__(
+ " .set mips3 \n"
+ " " __LL "%0, %1 # set_bit \n"
+ " or %0, %2 \n"
+ " " __SC "%0, %1 \n"
+ " .set mips0 \n"
+ : "=&r" (temp), "+m" (*m)
+ : "ir" (1UL << bit));
+ } while (unlikely(!temp));
} else {
volatile unsigned long *a = addr;
unsigned long mask;
@@ -134,34 +130,30 @@ static inline void clear_bit(unsigned long nr, volatile unsigned long *addr)
" " __SC "%0, %1 \n"
" beqzl %0, 1b \n"
" .set mips0 \n"
- : "=&r" (temp), "=m" (*m)
- : "ir" (~(1UL << bit)), "m" (*m));
+ : "=&r" (temp), "+m" (*m)
+ : "ir" (~(1UL << bit)));
#ifdef CONFIG_CPU_MIPSR2
} else if (kernel_uses_llsc && __builtin_constant_p(bit)) {
- __asm__ __volatile__(
- "1: " __LL "%0, %1 # clear_bit \n"
- " " __INS "%0, $0, %2, 1 \n"
- " " __SC "%0, %1 \n"
- " beqz %0, 2f \n"
- " .subsection 2 \n"
- "2: b 1b \n"
- " .previous \n"
- : "=&r" (temp), "=m" (*m)
- : "ir" (bit), "m" (*m));
+ do {
+ __asm__ __volatile__(
+ " " __LL "%0, %1 # clear_bit \n"
+ " " __INS "%0, $0, %2, 1 \n"
+ " " __SC "%0, %1 \n"
+ : "=&r" (temp), "+m" (*m)
+ : "ir" (bit));
+ } while (unlikely(!temp));
#endif /* CONFIG_CPU_MIPSR2 */
} else if (kernel_uses_llsc) {
- __asm__ __volatile__(
- " .set mips3 \n"
- "1: " __LL "%0, %1 # clear_bit \n"
- " and %0, %2 \n"
- " " __SC "%0, %1 \n"
- " beqz %0, 2f \n"
- " .subsection 2 \n"
- "2: b 1b \n"
- " .previous \n"
- " .set mips0 \n"
- : "=&r" (temp), "=m" (*m)
- : "ir" (~(1UL << bit)), "m" (*m));
+ do {
+ __asm__ __volatile__(
+ " .set mips3 \n"
+ " " __LL "%0, %1 # clear_bit \n"
+ " and %0, %2 \n"
+ " " __SC "%0, %1 \n"
+ " .set mips0 \n"
+ : "=&r" (temp), "+m" (*m)
+ : "ir" (~(1UL << bit)));
+ } while (unlikely(!temp));
} else {
volatile unsigned long *a = addr;
unsigned long mask;
@@ -213,24 +205,22 @@ static inline void change_bit(unsigned long nr, volatile unsigned long *addr)
" " __SC "%0, %1 \n"
" beqzl %0, 1b \n"
" .set mips0 \n"
- : "=&r" (temp), "=m" (*m)
- : "ir" (1UL << bit), "m" (*m));
+ : "=&r" (temp), "+m" (*m)
+ : "ir" (1UL << bit));
} else if (kernel_uses_llsc) {
unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
unsigned long temp;
- __asm__ __volatile__(
- " .set mips3 \n"
- "1: " __LL "%0, %1 # change_bit \n"
- " xor %0, %2 \n"
- " " __SC "%0, %1 \n"
- " beqz %0, 2f \n"
- " .subsection 2 \n"
- "2: b 1b \n"
- " .previous \n"
- " .set mips0 \n"
- : "=&r" (temp), "=m" (*m)
- : "ir" (1UL << bit), "m" (*m));
+ do {
+ __asm__ __volatile__(
+ " .set mips3 \n"
+ " " __LL "%0, %1 # change_bit \n"
+ " xor %0, %2 \n"
+ " " __SC "%0, %1 \n"
+ " .set mips0 \n"
+ : "=&r" (temp), "+m" (*m)
+ : "ir" (1UL << bit));
+ } while (unlikely(!temp));
} else {
volatile unsigned long *a = addr;
unsigned long mask;
@@ -272,30 +262,26 @@ static inline int test_and_set_bit(unsigned long nr,
" beqzl %2, 1b \n"
" and %2, %0, %3 \n"
" .set mips0 \n"
- : "=&r" (temp), "=m" (*m), "=&r" (res)
- : "r" (1UL << bit), "m" (*m)
+ : "=&r" (temp), "+m" (*m), "=&r" (res)
+ : "r" (1UL << bit)
: "memory");
} else if (kernel_uses_llsc) {
unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
unsigned long temp;
- __asm__ __volatile__(
- " .set push \n"
- " .set noreorder \n"
- " .set mips3 \n"
- "1: " __LL "%0, %1 # test_and_set_bit \n"
- " or %2, %0, %3 \n"
- " " __SC "%2, %1 \n"
- " beqz %2, 2f \n"
- " and %2, %0, %3 \n"
- " .subsection 2 \n"
- "2: b 1b \n"
- " nop \n"
- " .previous \n"
- " .set pop \n"
- : "=&r" (temp), "=m" (*m), "=&r" (res)
- : "r" (1UL << bit), "m" (*m)
- : "memory");
+ do {
+ __asm__ __volatile__(
+ " .set mips3 \n"
+ " " __LL "%0, %1 # test_and_set_bit \n"
+ " or %2, %0, %3 \n"
+ " " __SC "%2, %1 \n"
+ " .set mips0 \n"
+ : "=&r" (temp), "+m" (*m), "=&r" (res)
+ : "r" (1UL << bit)
+ : "memory");
+ } while (unlikely(!res));
+
+ res = temp & (1UL << bit);
} else {
volatile unsigned long *a = addr;
unsigned long mask;
@@ -340,30 +326,26 @@ static inline int test_and_set_bit_lock(unsigned long nr,
" beqzl %2, 1b \n"
" and %2, %0, %3 \n"
" .set mips0 \n"
- : "=&r" (temp), "=m" (*m), "=&r" (res)
- : "r" (1UL << bit), "m" (*m)
+ : "=&r" (temp), "+m" (*m), "=&r" (res)
+ : "r" (1UL << bit)
: "memory");
} else if (kernel_uses_llsc) {
unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
unsigned long temp;
- __asm__ __volatile__(
- " .set push \n"
- " .set noreorder \n"
- " .set mips3 \n"
- "1: " __LL "%0, %1 # test_and_set_bit \n"
- " or %2, %0, %3 \n"
- " " __SC "%2, %1 \n"
- " beqz %2, 2f \n"
- " and %2, %0, %3 \n"
- " .subsection 2 \n"
- "2: b 1b \n"
- " nop \n"
- " .previous \n"
- " .set pop \n"
- : "=&r" (temp), "=m" (*m), "=&r" (res)
- : "r" (1UL << bit), "m" (*m)
- : "memory");
+ do {
+ __asm__ __volatile__(
+ " .set mips3 \n"
+ " " __LL "%0, %1 # test_and_set_bit \n"
+ " or %2, %0, %3 \n"
+ " " __SC "%2, %1 \n"
+ " .set mips0 \n"
+ : "=&r" (temp), "+m" (*m), "=&r" (res)
+ : "r" (1UL << bit)
+ : "memory");
+ } while (unlikely(!res));
+
+ res = temp & (1UL << bit);
} else {
volatile unsigned long *a = addr;
unsigned long mask;
@@ -410,49 +392,43 @@ static inline int test_and_clear_bit(unsigned long nr,
" beqzl %2, 1b \n"
" and %2, %0, %3 \n"
" .set mips0 \n"
- : "=&r" (temp), "=m" (*m), "=&r" (res)
- : "r" (1UL << bit), "m" (*m)
+ : "=&r" (temp), "+m" (*m), "=&r" (res)
+ : "r" (1UL << bit)
: "memory");
#ifdef CONFIG_CPU_MIPSR2
} else if (kernel_uses_llsc && __builtin_constant_p(nr)) {
unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
unsigned long temp;
- __asm__ __volatile__(
- "1: " __LL "%0, %1 # test_and_clear_bit \n"
- " " __EXT "%2, %0, %3, 1 \n"
- " " __INS "%0, $0, %3, 1 \n"
- " " __SC "%0, %1 \n"
- " beqz %0, 2f \n"
- " .subsection 2 \n"
- "2: b 1b \n"
- " .previous \n"
- : "=&r" (temp), "=m" (*m), "=&r" (res)
- : "ir" (bit), "m" (*m)
- : "memory");
+ do {
+ __asm__ __volatile__(
+ " " __LL "%0, %1 # test_and_clear_bit \n"
+ " " __EXT "%2, %0, %3, 1 \n"
+ " " __INS "%0, $0, %3, 1 \n"
+ " " __SC "%0, %1 \n"
+ : "=&r" (temp), "+m" (*m), "=&r" (res)
+ : "ir" (bit)
+ : "memory");
+ } while (unlikely(!temp));
#endif
} else if (kernel_uses_llsc) {
unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
unsigned long temp;
- __asm__ __volatile__(
- " .set push \n"
- " .set noreorder \n"
- " .set mips3 \n"
- "1: " __LL "%0, %1 # test_and_clear_bit \n"
- " or %2, %0, %3 \n"
- " xor %2, %3 \n"
- " " __SC "%2, %1 \n"
- " beqz %2, 2f \n"
- " and %2, %0, %3 \n"
- " .subsection 2 \n"
- "2: b 1b \n"
- " nop \n"
- " .previous \n"
- " .set pop \n"
- : "=&r" (temp), "=m" (*m), "=&r" (res)
- : "r" (1UL << bit), "m" (*m)
- : "memory");
+ do {
+ __asm__ __volatile__(
+ " .set mips3 \n"
+ " " __LL "%0, %1 # test_and_clear_bit \n"
+ " or %2, %0, %3 \n"
+ " xor %2, %3 \n"
+ " " __SC "%2, %1 \n"
+ " .set mips0 \n"
+ : "=&r" (temp), "+m" (*m), "=&r" (res)
+ : "r" (1UL << bit)
+ : "memory");
+ } while (unlikely(!res));
+
+ res = temp & (1UL << bit);
} else {
volatile unsigned long *a = addr;
unsigned long mask;
@@ -499,30 +475,26 @@ static inline int test_and_change_bit(unsigned long nr,
" beqzl %2, 1b \n"
" and %2, %0, %3 \n"
" .set mips0 \n"
- : "=&r" (temp), "=m" (*m), "=&r" (res)
- : "r" (1UL << bit), "m" (*m)
+ : "=&r" (temp), "+m" (*m), "=&r" (res)
+ : "r" (1UL << bit)
: "memory");
} else if (kernel_uses_llsc) {
unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
unsigned long temp;
- __asm__ __volatile__(
- " .set push \n"
- " .set noreorder \n"
- " .set mips3 \n"
- "1: " __LL "%0, %1 # test_and_change_bit \n"
- " xor %2, %0, %3 \n"
- " " __SC "\t%2, %1 \n"
- " beqz %2, 2f \n"
- " and %2, %0, %3 \n"
- " .subsection 2 \n"
- "2: b 1b \n"
- " nop \n"
- " .previous \n"
- " .set pop \n"
- : "=&r" (temp), "=m" (*m), "=&r" (res)
- : "r" (1UL << bit), "m" (*m)
- : "memory");
+ do {
+ __asm__ __volatile__(
+ " .set mips3 \n"
+ " " __LL "%0, %1 # test_and_change_bit \n"
+ " xor %2, %0, %3 \n"
+ " " __SC "\t%2, %1 \n"
+ " .set mips0 \n"
+ : "=&r" (temp), "+m" (*m), "=&r" (res)
+ : "r" (1UL << bit)
+ : "memory");
+ } while (unlikely(!res));
+
+ res = temp & (1UL << bit);
} else {
volatile unsigned long *a = addr;
unsigned long mask;
diff --git a/arch/mips/include/asm/bootinfo.h b/arch/mips/include/asm/bootinfo.h
index 15a8ef0707c6..35cd1bab69c3 100644
--- a/arch/mips/include/asm/bootinfo.h
+++ b/arch/mips/include/asm/bootinfo.h
@@ -125,4 +125,16 @@ extern unsigned long fw_arg0, fw_arg1, fw_arg2, fw_arg3;
*/
extern void plat_mem_setup(void);
+#ifdef CONFIG_SWIOTLB
+/*
+ * Optional platform hook to call swiotlb_setup().
+ */
+extern void plat_swiotlb_setup(void);
+
+#else
+
+static inline void plat_swiotlb_setup(void) {}
+
+#endif /* CONFIG_SWIOTLB */
+
#endif /* _ASM_BOOTINFO_H */
diff --git a/arch/mips/include/asm/cmpxchg.h b/arch/mips/include/asm/cmpxchg.h
index 2d28017e95d0..d8d1c2805ac7 100644
--- a/arch/mips/include/asm/cmpxchg.h
+++ b/arch/mips/include/asm/cmpxchg.h
@@ -44,12 +44,9 @@
" move $1, %z4 \n" \
" .set mips3 \n" \
" " st " $1, %1 \n" \
- " beqz $1, 3f \n" \
- "2: \n" \
- " .subsection 2 \n" \
- "3: b 1b \n" \
- " .previous \n" \
+ " beqz $1, 1b \n" \
" .set pop \n" \
+ "2: \n" \
: "=&r" (__ret), "=R" (*m) \
: "R" (*m), "Jr" (old), "Jr" (new) \
: "memory"); \
diff --git a/arch/mips/include/asm/cpu.h b/arch/mips/include/asm/cpu.h
index b201a8f5b127..06d59dcbe243 100644
--- a/arch/mips/include/asm/cpu.h
+++ b/arch/mips/include/asm/cpu.h
@@ -111,14 +111,16 @@
* These are the PRID's for when 23:16 == PRID_COMP_BROADCOM
*/
-#define PRID_IMP_BCM4710 0x4000
-#define PRID_IMP_BCM3302 0x9000
-#define PRID_IMP_BCM6338 0x9000
-#define PRID_IMP_BCM6345 0x8000
-#define PRID_IMP_BCM6348 0x9100
-#define PRID_IMP_BCM4350 0xA000
-#define PRID_REV_BCM6358 0x0010
-#define PRID_REV_BCM6368 0x0030
+#define PRID_IMP_BMIPS4KC 0x4000
+#define PRID_IMP_BMIPS32 0x8000
+#define PRID_IMP_BMIPS3300 0x9000
+#define PRID_IMP_BMIPS3300_ALT 0x9100
+#define PRID_IMP_BMIPS3300_BUG 0x0000
+#define PRID_IMP_BMIPS43XX 0xa000
+#define PRID_IMP_BMIPS5000 0x5a00
+
+#define PRID_REV_BMIPS4380_LO 0x0040
+#define PRID_REV_BMIPS4380_HI 0x006f
/*
* These are the PRID's for when 23:16 == PRID_COMP_CAVIUM
@@ -131,6 +133,7 @@
#define PRID_IMP_CAVIUM_CN56XX 0x0400
#define PRID_IMP_CAVIUM_CN50XX 0x0600
#define PRID_IMP_CAVIUM_CN52XX 0x0700
+#define PRID_IMP_CAVIUM_CN63XX 0x9000
/*
* These are the PRID's for when 23:16 == PRID_COMP_INGENIC
@@ -223,15 +226,14 @@ enum cpu_type_enum {
* MIPS32 class processors
*/
CPU_4KC, CPU_4KEC, CPU_4KSC, CPU_24K, CPU_34K, CPU_1004K, CPU_74K,
- CPU_ALCHEMY, CPU_PR4450, CPU_BCM3302, CPU_BCM4710,
- CPU_BCM6338, CPU_BCM6345, CPU_BCM6348, CPU_BCM6358,
- CPU_JZRISC,
+ CPU_ALCHEMY, CPU_PR4450, CPU_BMIPS32, CPU_BMIPS3300, CPU_BMIPS4350,
+ CPU_BMIPS4380, CPU_BMIPS5000, CPU_JZRISC,
/*
* MIPS64 class processors
*/
CPU_5KC, CPU_20KC, CPU_25KF, CPU_SB1, CPU_SB1A, CPU_LOONGSON2,
- CPU_CAVIUM_OCTEON, CPU_CAVIUM_OCTEON_PLUS,
+ CPU_CAVIUM_OCTEON, CPU_CAVIUM_OCTEON_PLUS, CPU_CAVIUM_OCTEON2,
CPU_LAST
};
diff --git a/arch/mips/include/asm/device.h b/arch/mips/include/asm/device.h
index 06746c5e8099..c94fafba9e62 100644
--- a/arch/mips/include/asm/device.h
+++ b/arch/mips/include/asm/device.h
@@ -3,4 +3,17 @@
*
* This file is released under the GPLv2
*/
-#include <asm-generic/device.h>
+#ifndef _ASM_MIPS_DEVICE_H
+#define _ASM_MIPS_DEVICE_H
+
+struct dma_map_ops;
+
+struct dev_archdata {
+ /* DMA operations on that device */
+ struct dma_map_ops *dma_ops;
+};
+
+struct pdev_archdata {
+};
+
+#endif /* _ASM_MIPS_DEVICE_H*/
diff --git a/arch/mips/include/asm/dma-mapping.h b/arch/mips/include/asm/dma-mapping.h
index 18fbf7af8e93..655f849bd08d 100644
--- a/arch/mips/include/asm/dma-mapping.h
+++ b/arch/mips/include/asm/dma-mapping.h
@@ -5,51 +5,41 @@
#include <asm/cache.h>
#include <asm-generic/dma-coherent.h>
-void *dma_alloc_noncoherent(struct device *dev, size_t size,
- dma_addr_t *dma_handle, gfp_t flag);
+#include <dma-coherence.h>
-void dma_free_noncoherent(struct device *dev, size_t size,
- void *vaddr, dma_addr_t dma_handle);
+extern struct dma_map_ops *mips_dma_map_ops;
-void *dma_alloc_coherent(struct device *dev, size_t size,
- dma_addr_t *dma_handle, gfp_t flag);
+static inline struct dma_map_ops *get_dma_ops(struct device *dev)
+{
+ if (dev && dev->archdata.dma_ops)
+ return dev->archdata.dma_ops;
+ else
+ return mips_dma_map_ops;
+}
-void dma_free_coherent(struct device *dev, size_t size,
- void *vaddr, dma_addr_t dma_handle);
+static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size)
+{
+ if (!dev->dma_mask)
+ return 0;
-extern dma_addr_t dma_map_single(struct device *dev, void *ptr, size_t size,
- enum dma_data_direction direction);
-extern void dma_unmap_single(struct device *dev, dma_addr_t dma_addr,
- size_t size, enum dma_data_direction direction);
-extern int dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
- enum dma_data_direction direction);
-extern dma_addr_t dma_map_page(struct device *dev, struct page *page,
- unsigned long offset, size_t size, enum dma_data_direction direction);
-
-static inline void dma_unmap_page(struct device *dev, dma_addr_t dma_address,
- size_t size, enum dma_data_direction direction)
+ return addr + size <= *dev->dma_mask;
+}
+
+static inline void dma_mark_clean(void *addr, size_t size) {}
+
+#include <asm-generic/dma-mapping-common.h>
+
+static inline int dma_supported(struct device *dev, u64 mask)
{
- dma_unmap_single(dev, dma_address, size, direction);
+ struct dma_map_ops *ops = get_dma_ops(dev);
+ return ops->dma_supported(dev, mask);
}
-extern void dma_unmap_sg(struct device *dev, struct scatterlist *sg,
- int nhwentries, enum dma_data_direction direction);
-extern void dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle,
- size_t size, enum dma_data_direction direction);
-extern void dma_sync_single_for_device(struct device *dev,
- dma_addr_t dma_handle, size_t size, enum dma_data_direction direction);
-extern void dma_sync_single_range_for_cpu(struct device *dev,
- dma_addr_t dma_handle, unsigned long offset, size_t size,
- enum dma_data_direction direction);
-extern void dma_sync_single_range_for_device(struct device *dev,
- dma_addr_t dma_handle, unsigned long offset, size_t size,
- enum dma_data_direction direction);
-extern void dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
- int nelems, enum dma_data_direction direction);
-extern void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
- int nelems, enum dma_data_direction direction);
-extern int dma_mapping_error(struct device *dev, dma_addr_t dma_addr);
-extern int dma_supported(struct device *dev, u64 mask);
+static inline int dma_mapping_error(struct device *dev, u64 mask)
+{
+ struct dma_map_ops *ops = get_dma_ops(dev);
+ return ops->mapping_error(dev, mask);
+}
static inline int
dma_set_mask(struct device *dev, u64 mask)
@@ -65,4 +55,34 @@ dma_set_mask(struct device *dev, u64 mask)
extern void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
enum dma_data_direction direction);
+static inline void *dma_alloc_coherent(struct device *dev, size_t size,
+ dma_addr_t *dma_handle, gfp_t gfp)
+{
+ void *ret;
+ struct dma_map_ops *ops = get_dma_ops(dev);
+
+ ret = ops->alloc_coherent(dev, size, dma_handle, gfp);
+
+ debug_dma_alloc_coherent(dev, size, *dma_handle, ret);
+
+ return ret;
+}
+
+static inline void dma_free_coherent(struct device *dev, size_t size,
+ void *vaddr, dma_addr_t dma_handle)
+{
+ struct dma_map_ops *ops = get_dma_ops(dev);
+
+ ops->free_coherent(dev, size, vaddr, dma_handle);
+
+ debug_dma_free_coherent(dev, size, vaddr, dma_handle);
+}
+
+
+void *dma_alloc_noncoherent(struct device *dev, size_t size,
+ dma_addr_t *dma_handle, gfp_t flag);
+
+void dma_free_noncoherent(struct device *dev, size_t size,
+ void *vaddr, dma_addr_t dma_handle);
+
#endif /* _ASM_DMA_MAPPING_H */
diff --git a/arch/mips/include/asm/dma.h b/arch/mips/include/asm/dma.h
index 1353c81065d1..2d47da62d5a7 100644
--- a/arch/mips/include/asm/dma.h
+++ b/arch/mips/include/asm/dma.h
@@ -91,7 +91,10 @@
#define MAX_DMA_ADDRESS (PAGE_OFFSET + 0x01000000)
#endif
#define MAX_DMA_PFN PFN_DOWN(virt_to_phys((void *)MAX_DMA_ADDRESS))
+
+#ifndef MAX_DMA32_PFN
#define MAX_DMA32_PFN (1UL << (32 - PAGE_SHIFT))
+#endif
/* 8237 DMA controllers */
#define IO_DMA1_BASE 0x00 /* 8 bit slave DMA, channels 0..3 */
diff --git a/arch/mips/include/asm/highmem.h b/arch/mips/include/asm/highmem.h
index 75753ca73bfd..77e644082a3b 100644
--- a/arch/mips/include/asm/highmem.h
+++ b/arch/mips/include/asm/highmem.h
@@ -45,18 +45,12 @@ extern pte_t *pkmap_page_table;
extern void * kmap_high(struct page *page);
extern void kunmap_high(struct page *page);
-extern void *__kmap(struct page *page);
-extern void __kunmap(struct page *page);
-extern void *__kmap_atomic(struct page *page, enum km_type type);
-extern void __kunmap_atomic_notypecheck(void *kvaddr, enum km_type type);
-extern void *kmap_atomic_pfn(unsigned long pfn, enum km_type type);
-extern struct page *__kmap_atomic_to_page(void *ptr);
-
-#define kmap __kmap
-#define kunmap __kunmap
-#define kmap_atomic __kmap_atomic
-#define kunmap_atomic_notypecheck __kunmap_atomic_notypecheck
-#define kmap_atomic_to_page __kmap_atomic_to_page
+extern void *kmap(struct page *page);
+extern void kunmap(struct page *page);
+extern void *__kmap_atomic(struct page *page);
+extern void __kunmap_atomic(void *kvaddr);
+extern void *kmap_atomic_pfn(unsigned long pfn);
+extern struct page *kmap_atomic_to_page(void *ptr);
#define flush_cache_kmaps() flush_cache_all()
diff --git a/arch/mips/include/asm/irq.h b/arch/mips/include/asm/irq.h
index dea4aed6478f..b003ed52ed17 100644
--- a/arch/mips/include/asm/irq.h
+++ b/arch/mips/include/asm/irq.h
@@ -16,6 +16,11 @@
#include <irq.h>
+static inline void irq_dispose_mapping(unsigned int virq)
+{
+ return;
+}
+
#ifdef CONFIG_I8259
static inline int irq_canonicalize(int irq)
{
diff --git a/arch/mips/include/asm/local.h b/arch/mips/include/asm/local.h
index bdcdef02d147..fffc8307a80a 100644
--- a/arch/mips/include/asm/local.h
+++ b/arch/mips/include/asm/local.h
@@ -117,7 +117,7 @@ static __inline__ long local_sub_return(long i, local_t * l)
#define local_cmpxchg(l, o, n) \
((long)cmpxchg_local(&((l)->a.counter), (o), (n)))
-#define local_xchg(l, n) (xchg_local(&((l)->a.counter), (n)))
+#define local_xchg(l, n) (atomic_long_xchg((&(l)->a), (n)))
/**
* local_add_unless - add unless the number is a given value
diff --git a/arch/mips/include/asm/mach-ar7/ar7.h b/arch/mips/include/asm/mach-ar7/ar7.h
index 483ffea9ecb1..7919d76186bf 100644
--- a/arch/mips/include/asm/mach-ar7/ar7.h
+++ b/arch/mips/include/asm/mach-ar7/ar7.h
@@ -39,6 +39,7 @@
#define AR7_REGS_UART0 (AR7_REGS_BASE + 0x0e00)
#define AR7_REGS_USB (AR7_REGS_BASE + 0x1200)
#define AR7_REGS_RESET (AR7_REGS_BASE + 0x1600)
+#define AR7_REGS_PINSEL (AR7_REGS_BASE + 0x160C)
#define AR7_REGS_VLYNQ0 (AR7_REGS_BASE + 0x1800)
#define AR7_REGS_DCL (AR7_REGS_BASE + 0x1a00)
#define AR7_REGS_VLYNQ1 (AR7_REGS_BASE + 0x1c00)
@@ -50,6 +51,14 @@
#define UR8_REGS_WDT (AR7_REGS_BASE + 0x0b00)
#define UR8_REGS_UART1 (AR7_REGS_BASE + 0x0f00)
+/* Titan registers */
+#define TITAN_REGS_ESWITCH_BASE (0x08640000)
+#define TITAN_REGS_MAC0 (TITAN_REGS_ESWITCH_BASE)
+#define TITAN_REGS_MAC1 (TITAN_REGS_ESWITCH_BASE + 0x0800)
+#define TITAN_REGS_MDIO (TITAN_REGS_ESWITCH_BASE + 0x02000)
+#define TITAN_REGS_VLYNQ0 (AR7_REGS_BASE + 0x1c00)
+#define TITAN_REGS_VLYNQ1 (AR7_REGS_BASE + 0x1300)
+
#define AR7_RESET_PERIPHERAL 0x0
#define AR7_RESET_SOFTWARE 0x4
#define AR7_RESET_STATUS 0x8
@@ -59,15 +68,30 @@
#define AR7_RESET_BIT_MDIO 22
#define AR7_RESET_BIT_EPHY 26
+#define TITAN_RESET_BIT_EPHY1 28
+
/* GPIO control registers */
#define AR7_GPIO_INPUT 0x0
#define AR7_GPIO_OUTPUT 0x4
#define AR7_GPIO_DIR 0x8
#define AR7_GPIO_ENABLE 0xc
+#define TITAN_GPIO_INPUT_0 0x0
+#define TITAN_GPIO_INPUT_1 0x4
+#define TITAN_GPIO_OUTPUT_0 0x8
+#define TITAN_GPIO_OUTPUT_1 0xc
+#define TITAN_GPIO_DIR_0 0x10
+#define TITAN_GPIO_DIR_1 0x14
+#define TITAN_GPIO_ENBL_0 0x18
+#define TITAN_GPIO_ENBL_1 0x1c
#define AR7_CHIP_7100 0x18
#define AR7_CHIP_7200 0x2b
#define AR7_CHIP_7300 0x05
+#define AR7_CHIP_TITAN 0x07
+#define TITAN_CHIP_1050 0x0f
+#define TITAN_CHIP_1055 0x0e
+#define TITAN_CHIP_1056 0x0d
+#define TITAN_CHIP_1060 0x07
/* Interrupts */
#define AR7_IRQ_UART0 15
@@ -95,14 +119,29 @@ struct plat_dsl_data {
extern int ar7_cpu_clock, ar7_bus_clock, ar7_dsp_clock;
+static inline int ar7_is_titan(void)
+{
+ return (readl((void *)KSEG1ADDR(AR7_REGS_GPIO + 0x24)) & 0xffff) ==
+ AR7_CHIP_TITAN;
+}
+
static inline u16 ar7_chip_id(void)
{
- return readl((void *)KSEG1ADDR(AR7_REGS_GPIO + 0x14)) & 0xffff;
+ return ar7_is_titan() ? AR7_CHIP_TITAN : (readl((void *)
+ KSEG1ADDR(AR7_REGS_GPIO + 0x14)) & 0xffff);
+}
+
+static inline u16 titan_chip_id(void)
+{
+ unsigned int val = readl((void *)KSEG1ADDR(AR7_REGS_GPIO +
+ TITAN_GPIO_INPUT_1));
+ return ((val >> 12) & 0x0f);
}
static inline u8 ar7_chip_rev(void)
{
- return (readl((void *)KSEG1ADDR(AR7_REGS_GPIO + 0x14)) >> 16) & 0xff;
+ return (readl((void *)KSEG1ADDR(AR7_REGS_GPIO + (ar7_is_titan() ? 0x24 :
+ 0x14))) >> 16) & 0xff;
}
struct clk {
@@ -161,4 +200,8 @@ static inline void ar7_device_off(u32 bit)
msleep(20);
}
+int __init ar7_gpio_init(void);
+
+int __init ar7_gpio_init(void);
+
#endif /* __AR7_H__ */
diff --git a/arch/mips/include/asm/mach-ar7/gpio.h b/arch/mips/include/asm/mach-ar7/gpio.h
index abc317c0372e..c177cd1eed25 100644
--- a/arch/mips/include/asm/mach-ar7/gpio.h
+++ b/arch/mips/include/asm/mach-ar7/gpio.h
@@ -22,7 +22,8 @@
#include <asm/mach-ar7/ar7.h>
#define AR7_GPIO_MAX 32
-#define NR_BUILTIN_GPIO AR7_GPIO_MAX
+#define TITAN_GPIO_MAX 51
+#define NR_BUILTIN_GPIO TITAN_GPIO_MAX
#define gpio_to_irq(gpio) -1
diff --git a/arch/mips/include/asm/mach-bcm63xx/bcm963xx_tag.h b/arch/mips/include/asm/mach-bcm63xx/bcm963xx_tag.h
new file mode 100644
index 000000000000..5325084d5c48
--- /dev/null
+++ b/arch/mips/include/asm/mach-bcm63xx/bcm963xx_tag.h
@@ -0,0 +1,97 @@
+#ifndef __BCM963XX_TAG_H
+#define __BCM963XX_TAG_H
+
+#define TAGVER_LEN 4 /* Length of Tag Version */
+#define TAGLAYOUT_LEN 4 /* Length of FlashLayoutVer */
+#define SIG1_LEN 20 /* Company Signature 1 Length */
+#define SIG2_LEN 14 /* Company Signature 2 Lenght */
+#define BOARDID_LEN 16 /* Length of BoardId */
+#define ENDIANFLAG_LEN 2 /* Endian Flag Length */
+#define CHIPID_LEN 6 /* Chip Id Length */
+#define IMAGE_LEN 10 /* Length of Length Field */
+#define ADDRESS_LEN 12 /* Length of Address field */
+#define DUALFLAG_LEN 2 /* Dual Image flag Length */
+#define INACTIVEFLAG_LEN 2 /* Inactie Flag Length */
+#define RSASIG_LEN 20 /* Length of RSA Signature in tag */
+#define TAGINFO1_LEN 30 /* Length of vendor information field1 in tag */
+#define FLASHLAYOUTVER_LEN 4 /* Length of Flash Layout Version String tag */
+#define TAGINFO2_LEN 16 /* Length of vendor information field2 in tag */
+#define CRC_LEN 4 /* Length of CRC in bytes */
+#define ALTTAGINFO_LEN 54 /* Alternate length for vendor information; Pirelli */
+
+#define NUM_PIRELLI 2
+#define IMAGETAG_CRC_START 0xFFFFFFFF
+
+#define PIRELLI_BOARDS { \
+ "AGPF-S0", \
+ "DWV-S0", \
+}
+
+/*
+ * The broadcom firmware assumes the rootfs starts the image,
+ * therefore uses the rootfs start (flash_image_address)
+ * to determine where to flash the image. Since we have the kernel first
+ * we have to give it the kernel address, but the crc uses the length
+ * associated with this address (root_length), which is added to the kernel
+ * length (kernel_length) to determine the length of image to flash and thus
+ * needs to be rootfs + deadcode (jffs2 EOF marker)
+*/
+
+struct bcm_tag {
+ /* 0-3: Version of the image tag */
+ char tag_version[TAGVER_LEN];
+ /* 4-23: Company Line 1 */
+ char sig_1[SIG1_LEN];
+ /* 24-37: Company Line 2 */
+ char sig_2[SIG2_LEN];
+ /* 38-43: Chip this image is for */
+ char chip_id[CHIPID_LEN];
+ /* 44-59: Board name */
+ char board_id[BOARDID_LEN];
+ /* 60-61: Map endianness -- 1 BE 0 LE */
+ char big_endian[ENDIANFLAG_LEN];
+ /* 62-71: Total length of image */
+ char total_length[IMAGE_LEN];
+ /* 72-83: Address in memory of CFE */
+ char cfe__address[ADDRESS_LEN];
+ /* 84-93: Size of CFE */
+ char cfe_length[IMAGE_LEN];
+ /* 94-105: Address in memory of image start
+ * (kernel for OpenWRT, rootfs for stock firmware)
+ */
+ char flash_image_start[ADDRESS_LEN];
+ /* 106-115: Size of rootfs */
+ char root_length[IMAGE_LEN];
+ /* 116-127: Address in memory of kernel */
+ char kernel_address[ADDRESS_LEN];
+ /* 128-137: Size of kernel */
+ char kernel_length[IMAGE_LEN];
+ /* 138-139: Unused at the moment */
+ char dual_image[DUALFLAG_LEN];
+ /* 140-141: Unused at the moment */
+ char inactive_flag[INACTIVEFLAG_LEN];
+ /* 142-161: RSA Signature (not used; some vendors may use this) */
+ char rsa_signature[RSASIG_LEN];
+ /* 162-191: Compilation and related information (not used in OpenWrt) */
+ char information1[TAGINFO1_LEN];
+ /* 192-195: Version flash layout */
+ char flash_layout_ver[FLASHLAYOUTVER_LEN];
+ /* 196-199: kernel+rootfs CRC32 */
+ char fskernel_crc[CRC_LEN];
+ /* 200-215: Unused except on Alice Gate where is is information */
+ char information2[TAGINFO2_LEN];
+ /* 216-219: CRC32 of image less imagetag (kernel for Alice Gate) */
+ char image_crc[CRC_LEN];
+ /* 220-223: CRC32 of rootfs partition */
+ char rootfs_crc[CRC_LEN];
+ /* 224-227: CRC32 of kernel partition */
+ char kernel_crc[CRC_LEN];
+ /* 228-235: Unused at present */
+ char reserved1[8];
+ /* 236-239: CRC32 of header excluding tagVersion */
+ char header_crc[CRC_LEN];
+ /* 240-255: Unused at present */
+ char reserved2[16];
+};
+
+#endif /* __BCM63XX_TAG_H */
diff --git a/arch/mips/include/asm/mach-cavium-octeon/cpu-feature-overrides.h b/arch/mips/include/asm/mach-cavium-octeon/cpu-feature-overrides.h
index b952fc7215e2..0d5a42b5f47a 100644
--- a/arch/mips/include/asm/mach-cavium-octeon/cpu-feature-overrides.h
+++ b/arch/mips/include/asm/mach-cavium-octeon/cpu-feature-overrides.h
@@ -59,7 +59,7 @@
#define cpu_has_veic 0
#define cpu_hwrena_impl_bits 0xc0000000
-#define kernel_uses_smartmips_rixi (cpu_data[0].cputype == CPU_CAVIUM_OCTEON_PLUS)
+#define kernel_uses_smartmips_rixi (cpu_data[0].cputype != CPU_CAVIUM_OCTEON)
#define ARCH_HAS_IRQ_PER_CPU 1
#define ARCH_HAS_SPINLOCK_PREFETCH 1
@@ -81,4 +81,10 @@ static inline int octeon_has_saa(void)
return id >= 0x000d0300;
}
+/*
+ * The last 256MB are reserved for device to device mappings and the
+ * BAR1 hole.
+ */
+#define MAX_DMA32_PFN (((1ULL << 32) - (1ULL << 28)) >> PAGE_SHIFT)
+
#endif
diff --git a/arch/mips/include/asm/mach-cavium-octeon/dma-coherence.h b/arch/mips/include/asm/mach-cavium-octeon/dma-coherence.h
index 17d579471ec4..be8fb4240cec 100644
--- a/arch/mips/include/asm/mach-cavium-octeon/dma-coherence.h
+++ b/arch/mips/include/asm/mach-cavium-octeon/dma-coherence.h
@@ -15,41 +15,40 @@
struct device;
-dma_addr_t octeon_map_dma_mem(struct device *, void *, size_t);
-void octeon_unmap_dma_mem(struct device *, dma_addr_t);
+extern void octeon_pci_dma_init(void);
static inline dma_addr_t plat_map_dma_mem(struct device *dev, void *addr,
size_t size)
{
- return octeon_map_dma_mem(dev, addr, size);
+ BUG();
}
static inline dma_addr_t plat_map_dma_mem_page(struct device *dev,
struct page *page)
{
- return octeon_map_dma_mem(dev, page_address(page), PAGE_SIZE);
+ BUG();
}
static inline unsigned long plat_dma_addr_to_phys(struct device *dev,
dma_addr_t dma_addr)
{
- return dma_addr;
+ BUG();
}
static inline void plat_unmap_dma_mem(struct device *dev, dma_addr_t dma_addr,
size_t size, enum dma_data_direction direction)
{
- octeon_unmap_dma_mem(dev, dma_addr);
+ BUG();
}
static inline int plat_dma_supported(struct device *dev, u64 mask)
{
- return 1;
+ BUG();
}
static inline void plat_extra_sync_for_device(struct device *dev)
{
- mb();
+ BUG();
}
static inline int plat_device_is_coherent(struct device *dev)
@@ -60,7 +59,14 @@ static inline int plat_device_is_coherent(struct device *dev)
static inline int plat_dma_mapping_error(struct device *dev,
dma_addr_t dma_addr)
{
- return dma_addr == -1;
+ BUG();
}
+dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr);
+phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr);
+
+struct dma_map_ops;
+extern struct dma_map_ops *octeon_pci_dma_map_ops;
+extern char *octeon_swiotlb;
+
#endif /* __ASM_MACH_CAVIUM_OCTEON_DMA_COHERENCE_H */
diff --git a/arch/mips/include/asm/mach-ip27/dma-coherence.h b/arch/mips/include/asm/mach-ip27/dma-coherence.h
index d3d04018a858..016d0989b141 100644
--- a/arch/mips/include/asm/mach-ip27/dma-coherence.h
+++ b/arch/mips/include/asm/mach-ip27/dma-coherence.h
@@ -26,14 +26,15 @@ static inline dma_addr_t plat_map_dma_mem(struct device *dev, void *addr,
return pa;
}
-static dma_addr_t plat_map_dma_mem_page(struct device *dev, struct page *page)
+static inline dma_addr_t plat_map_dma_mem_page(struct device *dev,
+ struct page *page)
{
dma_addr_t pa = dev_to_baddr(dev, page_to_phys(page));
return pa;
}
-static unsigned long plat_dma_addr_to_phys(struct device *dev,
+static inline unsigned long plat_dma_addr_to_phys(struct device *dev,
dma_addr_t dma_addr)
{
return dma_addr & ~(0xffUL << 56);
diff --git a/arch/mips/include/asm/mach-ip32/dma-coherence.h b/arch/mips/include/asm/mach-ip32/dma-coherence.h
index 37855955b313..c8fb5aacf50a 100644
--- a/arch/mips/include/asm/mach-ip32/dma-coherence.h
+++ b/arch/mips/include/asm/mach-ip32/dma-coherence.h
@@ -37,7 +37,8 @@ static inline dma_addr_t plat_map_dma_mem(struct device *dev, void *addr,
return pa;
}
-static dma_addr_t plat_map_dma_mem_page(struct device *dev, struct page *page)
+static inline dma_addr_t plat_map_dma_mem_page(struct device *dev,
+ struct page *page)
{
dma_addr_t pa;
@@ -50,7 +51,7 @@ static dma_addr_t plat_map_dma_mem_page(struct device *dev, struct page *page)
}
/* This is almost certainly wrong but it's what dma-ip32.c used to use */
-static unsigned long plat_dma_addr_to_phys(struct device *dev,
+static inline unsigned long plat_dma_addr_to_phys(struct device *dev,
dma_addr_t dma_addr)
{
unsigned long addr = dma_addr & RAM_OFFSET_MASK;
diff --git a/arch/mips/include/asm/mach-jazz/dma-coherence.h b/arch/mips/include/asm/mach-jazz/dma-coherence.h
index f93aee59454a..302101b54acb 100644
--- a/arch/mips/include/asm/mach-jazz/dma-coherence.h
+++ b/arch/mips/include/asm/mach-jazz/dma-coherence.h
@@ -12,23 +12,24 @@
struct device;
-static dma_addr_t plat_map_dma_mem(struct device *dev, void *addr, size_t size)
+static inline dma_addr_t plat_map_dma_mem(struct device *dev, void *addr, size_t size)
{
return vdma_alloc(virt_to_phys(addr), size);
}
-static dma_addr_t plat_map_dma_mem_page(struct device *dev, struct page *page)
+static inline dma_addr_t plat_map_dma_mem_page(struct device *dev,
+ struct page *page)
{
return vdma_alloc(page_to_phys(page), PAGE_SIZE);
}
-static unsigned long plat_dma_addr_to_phys(struct device *dev,
+static inline unsigned long plat_dma_addr_to_phys(struct device *dev,
dma_addr_t dma_addr)
{
return vdma_log2phys(dma_addr);
}
-static void plat_unmap_dma_mem(struct device *dev, dma_addr_t dma_addr,
+static inline void plat_unmap_dma_mem(struct device *dev, dma_addr_t dma_addr,
size_t size, enum dma_data_direction direction)
{
vdma_free(dma_addr);
diff --git a/arch/mips/include/asm/mipsregs.h b/arch/mips/include/asm/mipsregs.h
index 335474c155f6..4d9870975382 100644
--- a/arch/mips/include/asm/mipsregs.h
+++ b/arch/mips/include/asm/mipsregs.h
@@ -1040,6 +1040,12 @@ do { \
#define read_c0_dtaglo() __read_32bit_c0_register($28, 2)
#define write_c0_dtaglo(val) __write_32bit_c0_register($28, 2, val)
+#define read_c0_ddatalo() __read_32bit_c0_register($28, 3)
+#define write_c0_ddatalo(val) __write_32bit_c0_register($28, 3, val)
+
+#define read_c0_staglo() __read_32bit_c0_register($28, 4)
+#define write_c0_staglo(val) __write_32bit_c0_register($28, 4, val)
+
#define read_c0_taghi() __read_32bit_c0_register($29, 0)
#define write_c0_taghi(val) __write_32bit_c0_register($29, 0, val)
@@ -1082,6 +1088,51 @@ do { \
#define read_octeon_c0_dcacheerr() __read_64bit_c0_register($27, 1)
#define write_octeon_c0_dcacheerr(val) __write_64bit_c0_register($27, 1, val)
+/* BMIPS3300 */
+#define read_c0_brcm_config_0() __read_32bit_c0_register($22, 0)
+#define write_c0_brcm_config_0(val) __write_32bit_c0_register($22, 0, val)
+
+#define read_c0_brcm_bus_pll() __read_32bit_c0_register($22, 4)
+#define write_c0_brcm_bus_pll(val) __write_32bit_c0_register($22, 4, val)
+
+#define read_c0_brcm_reset() __read_32bit_c0_register($22, 5)
+#define write_c0_brcm_reset(val) __write_32bit_c0_register($22, 5, val)
+
+/* BMIPS4380 */
+#define read_c0_brcm_cmt_intr() __read_32bit_c0_register($22, 1)
+#define write_c0_brcm_cmt_intr(val) __write_32bit_c0_register($22, 1, val)
+
+#define read_c0_brcm_cmt_ctrl() __read_32bit_c0_register($22, 2)
+#define write_c0_brcm_cmt_ctrl(val) __write_32bit_c0_register($22, 2, val)
+
+#define read_c0_brcm_cmt_local() __read_32bit_c0_register($22, 3)
+#define write_c0_brcm_cmt_local(val) __write_32bit_c0_register($22, 3, val)
+
+#define read_c0_brcm_config_1() __read_32bit_c0_register($22, 5)
+#define write_c0_brcm_config_1(val) __write_32bit_c0_register($22, 5, val)
+
+#define read_c0_brcm_cbr() __read_32bit_c0_register($22, 6)
+#define write_c0_brcm_cbr(val) __write_32bit_c0_register($22, 6, val)
+
+/* BMIPS5000 */
+#define read_c0_brcm_config() __read_32bit_c0_register($22, 0)
+#define write_c0_brcm_config(val) __write_32bit_c0_register($22, 0, val)
+
+#define read_c0_brcm_mode() __read_32bit_c0_register($22, 1)
+#define write_c0_brcm_mode(val) __write_32bit_c0_register($22, 1, val)
+
+#define read_c0_brcm_action() __read_32bit_c0_register($22, 2)
+#define write_c0_brcm_action(val) __write_32bit_c0_register($22, 2, val)
+
+#define read_c0_brcm_edsp() __read_32bit_c0_register($22, 3)
+#define write_c0_brcm_edsp(val) __write_32bit_c0_register($22, 3, val)
+
+#define read_c0_brcm_bootvec() __read_32bit_c0_register($22, 4)
+#define write_c0_brcm_bootvec(val) __write_32bit_c0_register($22, 4, val)
+
+#define read_c0_brcm_sleepcount() __read_32bit_c0_register($22, 7)
+#define write_c0_brcm_sleepcount(val) __write_32bit_c0_register($22, 7, val)
+
/*
* Macros to access the floating point coprocessor control registers
*/
diff --git a/arch/mips/include/asm/octeon/cvmx-agl-defs.h b/arch/mips/include/asm/octeon/cvmx-agl-defs.h
index ec94b9ab7be1..30d68f2365e0 100644
--- a/arch/mips/include/asm/octeon/cvmx-agl-defs.h
+++ b/arch/mips/include/asm/octeon/cvmx-agl-defs.h
@@ -4,7 +4,7 @@
* Contact: support@caviumnetworks.com
* This file is part of the OCTEON SDK
*
- * Copyright (c) 2003-2008 Cavium Networks
+ * Copyright (c) 2003-2010 Cavium Networks
*
* This file is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License, Version 2, as
@@ -28,148 +28,80 @@
#ifndef __CVMX_AGL_DEFS_H__
#define __CVMX_AGL_DEFS_H__
-#define CVMX_AGL_GMX_BAD_REG \
- CVMX_ADD_IO_SEG(0x00011800E0000518ull)
-#define CVMX_AGL_GMX_BIST \
- CVMX_ADD_IO_SEG(0x00011800E0000400ull)
-#define CVMX_AGL_GMX_DRV_CTL \
- CVMX_ADD_IO_SEG(0x00011800E00007F0ull)
-#define CVMX_AGL_GMX_INF_MODE \
- CVMX_ADD_IO_SEG(0x00011800E00007F8ull)
-#define CVMX_AGL_GMX_PRTX_CFG(offset) \
- CVMX_ADD_IO_SEG(0x00011800E0000010ull + (((offset) & 1) * 2048))
-#define CVMX_AGL_GMX_RXX_ADR_CAM0(offset) \
- CVMX_ADD_IO_SEG(0x00011800E0000180ull + (((offset) & 1) * 2048))
-#define CVMX_AGL_GMX_RXX_ADR_CAM1(offset) \
- CVMX_ADD_IO_SEG(0x00011800E0000188ull + (((offset) & 1) * 2048))
-#define CVMX_AGL_GMX_RXX_ADR_CAM2(offset) \
- CVMX_ADD_IO_SEG(0x00011800E0000190ull + (((offset) & 1) * 2048))
-#define CVMX_AGL_GMX_RXX_ADR_CAM3(offset) \
- CVMX_ADD_IO_SEG(0x00011800E0000198ull + (((offset) & 1) * 2048))
-#define CVMX_AGL_GMX_RXX_ADR_CAM4(offset) \
- CVMX_ADD_IO_SEG(0x00011800E00001A0ull + (((offset) & 1) * 2048))
-#define CVMX_AGL_GMX_RXX_ADR_CAM5(offset) \
- CVMX_ADD_IO_SEG(0x00011800E00001A8ull + (((offset) & 1) * 2048))
-#define CVMX_AGL_GMX_RXX_ADR_CAM_EN(offset) \
- CVMX_ADD_IO_SEG(0x00011800E0000108ull + (((offset) & 1) * 2048))
-#define CVMX_AGL_GMX_RXX_ADR_CTL(offset) \
- CVMX_ADD_IO_SEG(0x00011800E0000100ull + (((offset) & 1) * 2048))
-#define CVMX_AGL_GMX_RXX_DECISION(offset) \
- CVMX_ADD_IO_SEG(0x00011800E0000040ull + (((offset) & 1) * 2048))
-#define CVMX_AGL_GMX_RXX_FRM_CHK(offset) \
- CVMX_ADD_IO_SEG(0x00011800E0000020ull + (((offset) & 1) * 2048))
-#define CVMX_AGL_GMX_RXX_FRM_CTL(offset) \
- CVMX_ADD_IO_SEG(0x00011800E0000018ull + (((offset) & 1) * 2048))
-#define CVMX_AGL_GMX_RXX_FRM_MAX(offset) \
- CVMX_ADD_IO_SEG(0x00011800E0000030ull + (((offset) & 1) * 2048))
-#define CVMX_AGL_GMX_RXX_FRM_MIN(offset) \
- CVMX_ADD_IO_SEG(0x00011800E0000028ull + (((offset) & 1) * 2048))
-#define CVMX_AGL_GMX_RXX_IFG(offset) \
- CVMX_ADD_IO_SEG(0x00011800E0000058ull + (((offset) & 1) * 2048))
-#define CVMX_AGL_GMX_RXX_INT_EN(offset) \
- CVMX_ADD_IO_SEG(0x00011800E0000008ull + (((offset) & 1) * 2048))
-#define CVMX_AGL_GMX_RXX_INT_REG(offset) \
- CVMX_ADD_IO_SEG(0x00011800E0000000ull + (((offset) & 1) * 2048))
-#define CVMX_AGL_GMX_RXX_JABBER(offset) \
- CVMX_ADD_IO_SEG(0x00011800E0000038ull + (((offset) & 1) * 2048))
-#define CVMX_AGL_GMX_RXX_PAUSE_DROP_TIME(offset) \
- CVMX_ADD_IO_SEG(0x00011800E0000068ull + (((offset) & 1) * 2048))
-#define CVMX_AGL_GMX_RXX_STATS_CTL(offset) \
- CVMX_ADD_IO_SEG(0x00011800E0000050ull + (((offset) & 1) * 2048))
-#define CVMX_AGL_GMX_RXX_STATS_OCTS(offset) \
- CVMX_ADD_IO_SEG(0x00011800E0000088ull + (((offset) & 1) * 2048))
-#define CVMX_AGL_GMX_RXX_STATS_OCTS_CTL(offset) \
- CVMX_ADD_IO_SEG(0x00011800E0000098ull + (((offset) & 1) * 2048))
-#define CVMX_AGL_GMX_RXX_STATS_OCTS_DMAC(offset) \
- CVMX_ADD_IO_SEG(0x00011800E00000A8ull + (((offset) & 1) * 2048))
-#define CVMX_AGL_GMX_RXX_STATS_OCTS_DRP(offset) \
- CVMX_ADD_IO_SEG(0x00011800E00000B8ull + (((offset) & 1) * 2048))
-#define CVMX_AGL_GMX_RXX_STATS_PKTS(offset) \
- CVMX_ADD_IO_SEG(0x00011800E0000080ull + (((offset) & 1) * 2048))
-#define CVMX_AGL_GMX_RXX_STATS_PKTS_BAD(offset) \
- CVMX_ADD_IO_SEG(0x00011800E00000C0ull + (((offset) & 1) * 2048))
-#define CVMX_AGL_GMX_RXX_STATS_PKTS_CTL(offset) \
- CVMX_ADD_IO_SEG(0x00011800E0000090ull + (((offset) & 1) * 2048))
-#define CVMX_AGL_GMX_RXX_STATS_PKTS_DMAC(offset) \
- CVMX_ADD_IO_SEG(0x00011800E00000A0ull + (((offset) & 1) * 2048))
-#define CVMX_AGL_GMX_RXX_STATS_PKTS_DRP(offset) \
- CVMX_ADD_IO_SEG(0x00011800E00000B0ull + (((offset) & 1) * 2048))
-#define CVMX_AGL_GMX_RXX_UDD_SKP(offset) \
- CVMX_ADD_IO_SEG(0x00011800E0000048ull + (((offset) & 1) * 2048))
-#define CVMX_AGL_GMX_RX_BP_DROPX(offset) \
- CVMX_ADD_IO_SEG(0x00011800E0000420ull + (((offset) & 1) * 8))
-#define CVMX_AGL_GMX_RX_BP_OFFX(offset) \
- CVMX_ADD_IO_SEG(0x00011800E0000460ull + (((offset) & 1) * 8))
-#define CVMX_AGL_GMX_RX_BP_ONX(offset) \
- CVMX_ADD_IO_SEG(0x00011800E0000440ull + (((offset) & 1) * 8))
-#define CVMX_AGL_GMX_RX_PRT_INFO \
- CVMX_ADD_IO_SEG(0x00011800E00004E8ull)
-#define CVMX_AGL_GMX_RX_TX_STATUS \
- CVMX_ADD_IO_SEG(0x00011800E00007E8ull)
-#define CVMX_AGL_GMX_SMACX(offset) \
- CVMX_ADD_IO_SEG(0x00011800E0000230ull + (((offset) & 1) * 2048))
-#define CVMX_AGL_GMX_STAT_BP \
- CVMX_ADD_IO_SEG(0x00011800E0000520ull)
-#define CVMX_AGL_GMX_TXX_APPEND(offset) \
- CVMX_ADD_IO_SEG(0x00011800E0000218ull + (((offset) & 1) * 2048))
-#define CVMX_AGL_GMX_TXX_CTL(offset) \
- CVMX_ADD_IO_SEG(0x00011800E0000270ull + (((offset) & 1) * 2048))
-#define CVMX_AGL_GMX_TXX_MIN_PKT(offset) \
- CVMX_ADD_IO_SEG(0x00011800E0000240ull + (((offset) & 1) * 2048))
-#define CVMX_AGL_GMX_TXX_PAUSE_PKT_INTERVAL(offset) \
- CVMX_ADD_IO_SEG(0x00011800E0000248ull + (((offset) & 1) * 2048))
-#define CVMX_AGL_GMX_TXX_PAUSE_PKT_TIME(offset) \
- CVMX_ADD_IO_SEG(0x00011800E0000238ull + (((offset) & 1) * 2048))
-#define CVMX_AGL_GMX_TXX_PAUSE_TOGO(offset) \
- CVMX_ADD_IO_SEG(0x00011800E0000258ull + (((offset) & 1) * 2048))
-#define CVMX_AGL_GMX_TXX_PAUSE_ZERO(offset) \
- CVMX_ADD_IO_SEG(0x00011800E0000260ull + (((offset) & 1) * 2048))
-#define CVMX_AGL_GMX_TXX_SOFT_PAUSE(offset) \
- CVMX_ADD_IO_SEG(0x00011800E0000250ull + (((offset) & 1) * 2048))
-#define CVMX_AGL_GMX_TXX_STAT0(offset) \
- CVMX_ADD_IO_SEG(0x00011800E0000280ull + (((offset) & 1) * 2048))
-#define CVMX_AGL_GMX_TXX_STAT1(offset) \
- CVMX_ADD_IO_SEG(0x00011800E0000288ull + (((offset) & 1) * 2048))
-#define CVMX_AGL_GMX_TXX_STAT2(offset) \
- CVMX_ADD_IO_SEG(0x00011800E0000290ull + (((offset) & 1) * 2048))
-#define CVMX_AGL_GMX_TXX_STAT3(offset) \
- CVMX_ADD_IO_SEG(0x00011800E0000298ull + (((offset) & 1) * 2048))
-#define CVMX_AGL_GMX_TXX_STAT4(offset) \
- CVMX_ADD_IO_SEG(0x00011800E00002A0ull + (((offset) & 1) * 2048))
-#define CVMX_AGL_GMX_TXX_STAT5(offset) \
- CVMX_ADD_IO_SEG(0x00011800E00002A8ull + (((offset) & 1) * 2048))
-#define CVMX_AGL_GMX_TXX_STAT6(offset) \
- CVMX_ADD_IO_SEG(0x00011800E00002B0ull + (((offset) & 1) * 2048))
-#define CVMX_AGL_GMX_TXX_STAT7(offset) \
- CVMX_ADD_IO_SEG(0x00011800E00002B8ull + (((offset) & 1) * 2048))
-#define CVMX_AGL_GMX_TXX_STAT8(offset) \
- CVMX_ADD_IO_SEG(0x00011800E00002C0ull + (((offset) & 1) * 2048))
-#define CVMX_AGL_GMX_TXX_STAT9(offset) \
- CVMX_ADD_IO_SEG(0x00011800E00002C8ull + (((offset) & 1) * 2048))
-#define CVMX_AGL_GMX_TXX_STATS_CTL(offset) \
- CVMX_ADD_IO_SEG(0x00011800E0000268ull + (((offset) & 1) * 2048))
-#define CVMX_AGL_GMX_TXX_THRESH(offset) \
- CVMX_ADD_IO_SEG(0x00011800E0000210ull + (((offset) & 1) * 2048))
-#define CVMX_AGL_GMX_TX_BP \
- CVMX_ADD_IO_SEG(0x00011800E00004D0ull)
-#define CVMX_AGL_GMX_TX_COL_ATTEMPT \
- CVMX_ADD_IO_SEG(0x00011800E0000498ull)
-#define CVMX_AGL_GMX_TX_IFG \
- CVMX_ADD_IO_SEG(0x00011800E0000488ull)
-#define CVMX_AGL_GMX_TX_INT_EN \
- CVMX_ADD_IO_SEG(0x00011800E0000508ull)
-#define CVMX_AGL_GMX_TX_INT_REG \
- CVMX_ADD_IO_SEG(0x00011800E0000500ull)
-#define CVMX_AGL_GMX_TX_JAM \
- CVMX_ADD_IO_SEG(0x00011800E0000490ull)
-#define CVMX_AGL_GMX_TX_LFSR \
- CVMX_ADD_IO_SEG(0x00011800E00004F8ull)
-#define CVMX_AGL_GMX_TX_OVR_BP \
- CVMX_ADD_IO_SEG(0x00011800E00004C8ull)
-#define CVMX_AGL_GMX_TX_PAUSE_PKT_DMAC \
- CVMX_ADD_IO_SEG(0x00011800E00004A0ull)
-#define CVMX_AGL_GMX_TX_PAUSE_PKT_TYPE \
- CVMX_ADD_IO_SEG(0x00011800E00004A8ull)
+#define CVMX_AGL_GMX_BAD_REG (CVMX_ADD_IO_SEG(0x00011800E0000518ull))
+#define CVMX_AGL_GMX_BIST (CVMX_ADD_IO_SEG(0x00011800E0000400ull))
+#define CVMX_AGL_GMX_DRV_CTL (CVMX_ADD_IO_SEG(0x00011800E00007F0ull))
+#define CVMX_AGL_GMX_INF_MODE (CVMX_ADD_IO_SEG(0x00011800E00007F8ull))
+#define CVMX_AGL_GMX_PRTX_CFG(offset) (CVMX_ADD_IO_SEG(0x00011800E0000010ull) + ((offset) & 1) * 2048)
+#define CVMX_AGL_GMX_RXX_ADR_CAM0(offset) (CVMX_ADD_IO_SEG(0x00011800E0000180ull) + ((offset) & 1) * 2048)
+#define CVMX_AGL_GMX_RXX_ADR_CAM1(offset) (CVMX_ADD_IO_SEG(0x00011800E0000188ull) + ((offset) & 1) * 2048)
+#define CVMX_AGL_GMX_RXX_ADR_CAM2(offset) (CVMX_ADD_IO_SEG(0x00011800E0000190ull) + ((offset) & 1) * 2048)
+#define CVMX_AGL_GMX_RXX_ADR_CAM3(offset) (CVMX_ADD_IO_SEG(0x00011800E0000198ull) + ((offset) & 1) * 2048)
+#define CVMX_AGL_GMX_RXX_ADR_CAM4(offset) (CVMX_ADD_IO_SEG(0x00011800E00001A0ull) + ((offset) & 1) * 2048)
+#define CVMX_AGL_GMX_RXX_ADR_CAM5(offset) (CVMX_ADD_IO_SEG(0x00011800E00001A8ull) + ((offset) & 1) * 2048)
+#define CVMX_AGL_GMX_RXX_ADR_CAM_EN(offset) (CVMX_ADD_IO_SEG(0x00011800E0000108ull) + ((offset) & 1) * 2048)
+#define CVMX_AGL_GMX_RXX_ADR_CTL(offset) (CVMX_ADD_IO_SEG(0x00011800E0000100ull) + ((offset) & 1) * 2048)
+#define CVMX_AGL_GMX_RXX_DECISION(offset) (CVMX_ADD_IO_SEG(0x00011800E0000040ull) + ((offset) & 1) * 2048)
+#define CVMX_AGL_GMX_RXX_FRM_CHK(offset) (CVMX_ADD_IO_SEG(0x00011800E0000020ull) + ((offset) & 1) * 2048)
+#define CVMX_AGL_GMX_RXX_FRM_CTL(offset) (CVMX_ADD_IO_SEG(0x00011800E0000018ull) + ((offset) & 1) * 2048)
+#define CVMX_AGL_GMX_RXX_FRM_MAX(offset) (CVMX_ADD_IO_SEG(0x00011800E0000030ull) + ((offset) & 1) * 2048)
+#define CVMX_AGL_GMX_RXX_FRM_MIN(offset) (CVMX_ADD_IO_SEG(0x00011800E0000028ull) + ((offset) & 1) * 2048)
+#define CVMX_AGL_GMX_RXX_IFG(offset) (CVMX_ADD_IO_SEG(0x00011800E0000058ull) + ((offset) & 1) * 2048)
+#define CVMX_AGL_GMX_RXX_INT_EN(offset) (CVMX_ADD_IO_SEG(0x00011800E0000008ull) + ((offset) & 1) * 2048)
+#define CVMX_AGL_GMX_RXX_INT_REG(offset) (CVMX_ADD_IO_SEG(0x00011800E0000000ull) + ((offset) & 1) * 2048)
+#define CVMX_AGL_GMX_RXX_JABBER(offset) (CVMX_ADD_IO_SEG(0x00011800E0000038ull) + ((offset) & 1) * 2048)
+#define CVMX_AGL_GMX_RXX_PAUSE_DROP_TIME(offset) (CVMX_ADD_IO_SEG(0x00011800E0000068ull) + ((offset) & 1) * 2048)
+#define CVMX_AGL_GMX_RXX_RX_INBND(offset) (CVMX_ADD_IO_SEG(0x00011800E0000060ull) + ((offset) & 1) * 2048)
+#define CVMX_AGL_GMX_RXX_STATS_CTL(offset) (CVMX_ADD_IO_SEG(0x00011800E0000050ull) + ((offset) & 1) * 2048)
+#define CVMX_AGL_GMX_RXX_STATS_OCTS(offset) (CVMX_ADD_IO_SEG(0x00011800E0000088ull) + ((offset) & 1) * 2048)
+#define CVMX_AGL_GMX_RXX_STATS_OCTS_CTL(offset) (CVMX_ADD_IO_SEG(0x00011800E0000098ull) + ((offset) & 1) * 2048)
+#define CVMX_AGL_GMX_RXX_STATS_OCTS_DMAC(offset) (CVMX_ADD_IO_SEG(0x00011800E00000A8ull) + ((offset) & 1) * 2048)
+#define CVMX_AGL_GMX_RXX_STATS_OCTS_DRP(offset) (CVMX_ADD_IO_SEG(0x00011800E00000B8ull) + ((offset) & 1) * 2048)
+#define CVMX_AGL_GMX_RXX_STATS_PKTS(offset) (CVMX_ADD_IO_SEG(0x00011800E0000080ull) + ((offset) & 1) * 2048)
+#define CVMX_AGL_GMX_RXX_STATS_PKTS_BAD(offset) (CVMX_ADD_IO_SEG(0x00011800E00000C0ull) + ((offset) & 1) * 2048)
+#define CVMX_AGL_GMX_RXX_STATS_PKTS_CTL(offset) (CVMX_ADD_IO_SEG(0x00011800E0000090ull) + ((offset) & 1) * 2048)
+#define CVMX_AGL_GMX_RXX_STATS_PKTS_DMAC(offset) (CVMX_ADD_IO_SEG(0x00011800E00000A0ull) + ((offset) & 1) * 2048)
+#define CVMX_AGL_GMX_RXX_STATS_PKTS_DRP(offset) (CVMX_ADD_IO_SEG(0x00011800E00000B0ull) + ((offset) & 1) * 2048)
+#define CVMX_AGL_GMX_RXX_UDD_SKP(offset) (CVMX_ADD_IO_SEG(0x00011800E0000048ull) + ((offset) & 1) * 2048)
+#define CVMX_AGL_GMX_RX_BP_DROPX(offset) (CVMX_ADD_IO_SEG(0x00011800E0000420ull) + ((offset) & 1) * 8)
+#define CVMX_AGL_GMX_RX_BP_OFFX(offset) (CVMX_ADD_IO_SEG(0x00011800E0000460ull) + ((offset) & 1) * 8)
+#define CVMX_AGL_GMX_RX_BP_ONX(offset) (CVMX_ADD_IO_SEG(0x00011800E0000440ull) + ((offset) & 1) * 8)
+#define CVMX_AGL_GMX_RX_PRT_INFO (CVMX_ADD_IO_SEG(0x00011800E00004E8ull))
+#define CVMX_AGL_GMX_RX_TX_STATUS (CVMX_ADD_IO_SEG(0x00011800E00007E8ull))
+#define CVMX_AGL_GMX_SMACX(offset) (CVMX_ADD_IO_SEG(0x00011800E0000230ull) + ((offset) & 1) * 2048)
+#define CVMX_AGL_GMX_STAT_BP (CVMX_ADD_IO_SEG(0x00011800E0000520ull))
+#define CVMX_AGL_GMX_TXX_APPEND(offset) (CVMX_ADD_IO_SEG(0x00011800E0000218ull) + ((offset) & 1) * 2048)
+#define CVMX_AGL_GMX_TXX_CLK(offset) (CVMX_ADD_IO_SEG(0x00011800E0000208ull) + ((offset) & 1) * 2048)
+#define CVMX_AGL_GMX_TXX_CTL(offset) (CVMX_ADD_IO_SEG(0x00011800E0000270ull) + ((offset) & 1) * 2048)
+#define CVMX_AGL_GMX_TXX_MIN_PKT(offset) (CVMX_ADD_IO_SEG(0x00011800E0000240ull) + ((offset) & 1) * 2048)
+#define CVMX_AGL_GMX_TXX_PAUSE_PKT_INTERVAL(offset) (CVMX_ADD_IO_SEG(0x00011800E0000248ull) + ((offset) & 1) * 2048)
+#define CVMX_AGL_GMX_TXX_PAUSE_PKT_TIME(offset) (CVMX_ADD_IO_SEG(0x00011800E0000238ull) + ((offset) & 1) * 2048)
+#define CVMX_AGL_GMX_TXX_PAUSE_TOGO(offset) (CVMX_ADD_IO_SEG(0x00011800E0000258ull) + ((offset) & 1) * 2048)
+#define CVMX_AGL_GMX_TXX_PAUSE_ZERO(offset) (CVMX_ADD_IO_SEG(0x00011800E0000260ull) + ((offset) & 1) * 2048)
+#define CVMX_AGL_GMX_TXX_SOFT_PAUSE(offset) (CVMX_ADD_IO_SEG(0x00011800E0000250ull) + ((offset) & 1) * 2048)
+#define CVMX_AGL_GMX_TXX_STAT0(offset) (CVMX_ADD_IO_SEG(0x00011800E0000280ull) + ((offset) & 1) * 2048)
+#define CVMX_AGL_GMX_TXX_STAT1(offset) (CVMX_ADD_IO_SEG(0x00011800E0000288ull) + ((offset) & 1) * 2048)
+#define CVMX_AGL_GMX_TXX_STAT2(offset) (CVMX_ADD_IO_SEG(0x00011800E0000290ull) + ((offset) & 1) * 2048)
+#define CVMX_AGL_GMX_TXX_STAT3(offset) (CVMX_ADD_IO_SEG(0x00011800E0000298ull) + ((offset) & 1) * 2048)
+#define CVMX_AGL_GMX_TXX_STAT4(offset) (CVMX_ADD_IO_SEG(0x00011800E00002A0ull) + ((offset) & 1) * 2048)
+#define CVMX_AGL_GMX_TXX_STAT5(offset) (CVMX_ADD_IO_SEG(0x00011800E00002A8ull) + ((offset) & 1) * 2048)
+#define CVMX_AGL_GMX_TXX_STAT6(offset) (CVMX_ADD_IO_SEG(0x00011800E00002B0ull) + ((offset) & 1) * 2048)
+#define CVMX_AGL_GMX_TXX_STAT7(offset) (CVMX_ADD_IO_SEG(0x00011800E00002B8ull) + ((offset) & 1) * 2048)
+#define CVMX_AGL_GMX_TXX_STAT8(offset) (CVMX_ADD_IO_SEG(0x00011800E00002C0ull) + ((offset) & 1) * 2048)
+#define CVMX_AGL_GMX_TXX_STAT9(offset) (CVMX_ADD_IO_SEG(0x00011800E00002C8ull) + ((offset) & 1) * 2048)
+#define CVMX_AGL_GMX_TXX_STATS_CTL(offset) (CVMX_ADD_IO_SEG(0x00011800E0000268ull) + ((offset) & 1) * 2048)
+#define CVMX_AGL_GMX_TXX_THRESH(offset) (CVMX_ADD_IO_SEG(0x00011800E0000210ull) + ((offset) & 1) * 2048)
+#define CVMX_AGL_GMX_TX_BP (CVMX_ADD_IO_SEG(0x00011800E00004D0ull))
+#define CVMX_AGL_GMX_TX_COL_ATTEMPT (CVMX_ADD_IO_SEG(0x00011800E0000498ull))
+#define CVMX_AGL_GMX_TX_IFG (CVMX_ADD_IO_SEG(0x00011800E0000488ull))
+#define CVMX_AGL_GMX_TX_INT_EN (CVMX_ADD_IO_SEG(0x00011800E0000508ull))
+#define CVMX_AGL_GMX_TX_INT_REG (CVMX_ADD_IO_SEG(0x00011800E0000500ull))
+#define CVMX_AGL_GMX_TX_JAM (CVMX_ADD_IO_SEG(0x00011800E0000490ull))
+#define CVMX_AGL_GMX_TX_LFSR (CVMX_ADD_IO_SEG(0x00011800E00004F8ull))
+#define CVMX_AGL_GMX_TX_OVR_BP (CVMX_ADD_IO_SEG(0x00011800E00004C8ull))
+#define CVMX_AGL_GMX_TX_PAUSE_PKT_DMAC (CVMX_ADD_IO_SEG(0x00011800E00004A0ull))
+#define CVMX_AGL_GMX_TX_PAUSE_PKT_TYPE (CVMX_ADD_IO_SEG(0x00011800E00004A8ull))
+#define CVMX_AGL_PRTX_CTL(offset) (CVMX_ADD_IO_SEG(0x00011800E0002000ull) + ((offset) & 1) * 8)
union cvmx_agl_gmx_bad_reg {
uint64_t u64;
@@ -183,14 +115,29 @@ union cvmx_agl_gmx_bad_reg {
uint64_t ovrflw:1;
uint64_t reserved_27_31:5;
uint64_t statovr:1;
+ uint64_t reserved_24_25:2;
+ uint64_t loststat:2;
+ uint64_t reserved_4_21:18;
+ uint64_t out_ovr:2;
+ uint64_t reserved_0_1:2;
+ } s;
+ struct cvmx_agl_gmx_bad_reg_cn52xx {
+ uint64_t reserved_38_63:26;
+ uint64_t txpsh1:1;
+ uint64_t txpop1:1;
+ uint64_t ovrflw1:1;
+ uint64_t txpsh:1;
+ uint64_t txpop:1;
+ uint64_t ovrflw:1;
+ uint64_t reserved_27_31:5;
+ uint64_t statovr:1;
uint64_t reserved_23_25:3;
uint64_t loststat:1;
uint64_t reserved_4_21:18;
uint64_t out_ovr:2;
uint64_t reserved_0_1:2;
- } s;
- struct cvmx_agl_gmx_bad_reg_s cn52xx;
- struct cvmx_agl_gmx_bad_reg_s cn52xxp1;
+ } cn52xx;
+ struct cvmx_agl_gmx_bad_reg_cn52xx cn52xxp1;
struct cvmx_agl_gmx_bad_reg_cn56xx {
uint64_t reserved_35_63:29;
uint64_t txpsh:1;
@@ -205,18 +152,25 @@ union cvmx_agl_gmx_bad_reg {
uint64_t reserved_0_1:2;
} cn56xx;
struct cvmx_agl_gmx_bad_reg_cn56xx cn56xxp1;
+ struct cvmx_agl_gmx_bad_reg_s cn63xx;
+ struct cvmx_agl_gmx_bad_reg_s cn63xxp1;
};
union cvmx_agl_gmx_bist {
uint64_t u64;
struct cvmx_agl_gmx_bist_s {
+ uint64_t reserved_25_63:39;
+ uint64_t status:25;
+ } s;
+ struct cvmx_agl_gmx_bist_cn52xx {
uint64_t reserved_10_63:54;
uint64_t status:10;
- } s;
- struct cvmx_agl_gmx_bist_s cn52xx;
- struct cvmx_agl_gmx_bist_s cn52xxp1;
- struct cvmx_agl_gmx_bist_s cn56xx;
- struct cvmx_agl_gmx_bist_s cn56xxp1;
+ } cn52xx;
+ struct cvmx_agl_gmx_bist_cn52xx cn52xxp1;
+ struct cvmx_agl_gmx_bist_cn52xx cn56xx;
+ struct cvmx_agl_gmx_bist_cn52xx cn56xxp1;
+ struct cvmx_agl_gmx_bist_s cn63xx;
+ struct cvmx_agl_gmx_bist_s cn63xxp1;
};
union cvmx_agl_gmx_drv_ctl {
@@ -264,7 +218,13 @@ union cvmx_agl_gmx_inf_mode {
union cvmx_agl_gmx_prtx_cfg {
uint64_t u64;
struct cvmx_agl_gmx_prtx_cfg_s {
- uint64_t reserved_6_63:58;
+ uint64_t reserved_14_63:50;
+ uint64_t tx_idle:1;
+ uint64_t rx_idle:1;
+ uint64_t reserved_9_11:3;
+ uint64_t speed_msb:1;
+ uint64_t reserved_7_7:1;
+ uint64_t burst:1;
uint64_t tx_en:1;
uint64_t rx_en:1;
uint64_t slottime:1;
@@ -272,10 +232,20 @@ union cvmx_agl_gmx_prtx_cfg {
uint64_t speed:1;
uint64_t en:1;
} s;
- struct cvmx_agl_gmx_prtx_cfg_s cn52xx;
- struct cvmx_agl_gmx_prtx_cfg_s cn52xxp1;
- struct cvmx_agl_gmx_prtx_cfg_s cn56xx;
- struct cvmx_agl_gmx_prtx_cfg_s cn56xxp1;
+ struct cvmx_agl_gmx_prtx_cfg_cn52xx {
+ uint64_t reserved_6_63:58;
+ uint64_t tx_en:1;
+ uint64_t rx_en:1;
+ uint64_t slottime:1;
+ uint64_t duplex:1;
+ uint64_t speed:1;
+ uint64_t en:1;
+ } cn52xx;
+ struct cvmx_agl_gmx_prtx_cfg_cn52xx cn52xxp1;
+ struct cvmx_agl_gmx_prtx_cfg_cn52xx cn56xx;
+ struct cvmx_agl_gmx_prtx_cfg_cn52xx cn56xxp1;
+ struct cvmx_agl_gmx_prtx_cfg_s cn63xx;
+ struct cvmx_agl_gmx_prtx_cfg_s cn63xxp1;
};
union cvmx_agl_gmx_rxx_adr_cam0 {
@@ -287,6 +257,8 @@ union cvmx_agl_gmx_rxx_adr_cam0 {
struct cvmx_agl_gmx_rxx_adr_cam0_s cn52xxp1;
struct cvmx_agl_gmx_rxx_adr_cam0_s cn56xx;
struct cvmx_agl_gmx_rxx_adr_cam0_s cn56xxp1;
+ struct cvmx_agl_gmx_rxx_adr_cam0_s cn63xx;
+ struct cvmx_agl_gmx_rxx_adr_cam0_s cn63xxp1;
};
union cvmx_agl_gmx_rxx_adr_cam1 {
@@ -298,6 +270,8 @@ union cvmx_agl_gmx_rxx_adr_cam1 {
struct cvmx_agl_gmx_rxx_adr_cam1_s cn52xxp1;
struct cvmx_agl_gmx_rxx_adr_cam1_s cn56xx;
struct cvmx_agl_gmx_rxx_adr_cam1_s cn56xxp1;
+ struct cvmx_agl_gmx_rxx_adr_cam1_s cn63xx;
+ struct cvmx_agl_gmx_rxx_adr_cam1_s cn63xxp1;
};
union cvmx_agl_gmx_rxx_adr_cam2 {
@@ -309,6 +283,8 @@ union cvmx_agl_gmx_rxx_adr_cam2 {
struct cvmx_agl_gmx_rxx_adr_cam2_s cn52xxp1;
struct cvmx_agl_gmx_rxx_adr_cam2_s cn56xx;
struct cvmx_agl_gmx_rxx_adr_cam2_s cn56xxp1;
+ struct cvmx_agl_gmx_rxx_adr_cam2_s cn63xx;
+ struct cvmx_agl_gmx_rxx_adr_cam2_s cn63xxp1;
};
union cvmx_agl_gmx_rxx_adr_cam3 {
@@ -320,6 +296,8 @@ union cvmx_agl_gmx_rxx_adr_cam3 {
struct cvmx_agl_gmx_rxx_adr_cam3_s cn52xxp1;
struct cvmx_agl_gmx_rxx_adr_cam3_s cn56xx;
struct cvmx_agl_gmx_rxx_adr_cam3_s cn56xxp1;
+ struct cvmx_agl_gmx_rxx_adr_cam3_s cn63xx;
+ struct cvmx_agl_gmx_rxx_adr_cam3_s cn63xxp1;
};
union cvmx_agl_gmx_rxx_adr_cam4 {
@@ -331,6 +309,8 @@ union cvmx_agl_gmx_rxx_adr_cam4 {
struct cvmx_agl_gmx_rxx_adr_cam4_s cn52xxp1;
struct cvmx_agl_gmx_rxx_adr_cam4_s cn56xx;
struct cvmx_agl_gmx_rxx_adr_cam4_s cn56xxp1;
+ struct cvmx_agl_gmx_rxx_adr_cam4_s cn63xx;
+ struct cvmx_agl_gmx_rxx_adr_cam4_s cn63xxp1;
};
union cvmx_agl_gmx_rxx_adr_cam5 {
@@ -342,6 +322,8 @@ union cvmx_agl_gmx_rxx_adr_cam5 {
struct cvmx_agl_gmx_rxx_adr_cam5_s cn52xxp1;
struct cvmx_agl_gmx_rxx_adr_cam5_s cn56xx;
struct cvmx_agl_gmx_rxx_adr_cam5_s cn56xxp1;
+ struct cvmx_agl_gmx_rxx_adr_cam5_s cn63xx;
+ struct cvmx_agl_gmx_rxx_adr_cam5_s cn63xxp1;
};
union cvmx_agl_gmx_rxx_adr_cam_en {
@@ -354,6 +336,8 @@ union cvmx_agl_gmx_rxx_adr_cam_en {
struct cvmx_agl_gmx_rxx_adr_cam_en_s cn52xxp1;
struct cvmx_agl_gmx_rxx_adr_cam_en_s cn56xx;
struct cvmx_agl_gmx_rxx_adr_cam_en_s cn56xxp1;
+ struct cvmx_agl_gmx_rxx_adr_cam_en_s cn63xx;
+ struct cvmx_agl_gmx_rxx_adr_cam_en_s cn63xxp1;
};
union cvmx_agl_gmx_rxx_adr_ctl {
@@ -368,6 +352,8 @@ union cvmx_agl_gmx_rxx_adr_ctl {
struct cvmx_agl_gmx_rxx_adr_ctl_s cn52xxp1;
struct cvmx_agl_gmx_rxx_adr_ctl_s cn56xx;
struct cvmx_agl_gmx_rxx_adr_ctl_s cn56xxp1;
+ struct cvmx_agl_gmx_rxx_adr_ctl_s cn63xx;
+ struct cvmx_agl_gmx_rxx_adr_ctl_s cn63xxp1;
};
union cvmx_agl_gmx_rxx_decision {
@@ -380,11 +366,26 @@ union cvmx_agl_gmx_rxx_decision {
struct cvmx_agl_gmx_rxx_decision_s cn52xxp1;
struct cvmx_agl_gmx_rxx_decision_s cn56xx;
struct cvmx_agl_gmx_rxx_decision_s cn56xxp1;
+ struct cvmx_agl_gmx_rxx_decision_s cn63xx;
+ struct cvmx_agl_gmx_rxx_decision_s cn63xxp1;
};
union cvmx_agl_gmx_rxx_frm_chk {
uint64_t u64;
struct cvmx_agl_gmx_rxx_frm_chk_s {
+ uint64_t reserved_10_63:54;
+ uint64_t niberr:1;
+ uint64_t skperr:1;
+ uint64_t rcverr:1;
+ uint64_t lenerr:1;
+ uint64_t alnerr:1;
+ uint64_t fcserr:1;
+ uint64_t jabber:1;
+ uint64_t maxerr:1;
+ uint64_t carext:1;
+ uint64_t minerr:1;
+ } s;
+ struct cvmx_agl_gmx_rxx_frm_chk_cn52xx {
uint64_t reserved_9_63:55;
uint64_t skperr:1;
uint64_t rcverr:1;
@@ -395,17 +396,21 @@ union cvmx_agl_gmx_rxx_frm_chk {
uint64_t maxerr:1;
uint64_t reserved_1_1:1;
uint64_t minerr:1;
- } s;
- struct cvmx_agl_gmx_rxx_frm_chk_s cn52xx;
- struct cvmx_agl_gmx_rxx_frm_chk_s cn52xxp1;
- struct cvmx_agl_gmx_rxx_frm_chk_s cn56xx;
- struct cvmx_agl_gmx_rxx_frm_chk_s cn56xxp1;
+ } cn52xx;
+ struct cvmx_agl_gmx_rxx_frm_chk_cn52xx cn52xxp1;
+ struct cvmx_agl_gmx_rxx_frm_chk_cn52xx cn56xx;
+ struct cvmx_agl_gmx_rxx_frm_chk_cn52xx cn56xxp1;
+ struct cvmx_agl_gmx_rxx_frm_chk_s cn63xx;
+ struct cvmx_agl_gmx_rxx_frm_chk_s cn63xxp1;
};
union cvmx_agl_gmx_rxx_frm_ctl {
uint64_t u64;
struct cvmx_agl_gmx_rxx_frm_ctl_s {
- uint64_t reserved_10_63:54;
+ uint64_t reserved_13_63:51;
+ uint64_t ptp_mode:1;
+ uint64_t reserved_11_11:1;
+ uint64_t null_dis:1;
uint64_t pre_align:1;
uint64_t pad_len:1;
uint64_t vlan_len:1;
@@ -417,10 +422,24 @@ union cvmx_agl_gmx_rxx_frm_ctl {
uint64_t pre_strp:1;
uint64_t pre_chk:1;
} s;
- struct cvmx_agl_gmx_rxx_frm_ctl_s cn52xx;
- struct cvmx_agl_gmx_rxx_frm_ctl_s cn52xxp1;
- struct cvmx_agl_gmx_rxx_frm_ctl_s cn56xx;
- struct cvmx_agl_gmx_rxx_frm_ctl_s cn56xxp1;
+ struct cvmx_agl_gmx_rxx_frm_ctl_cn52xx {
+ uint64_t reserved_10_63:54;
+ uint64_t pre_align:1;
+ uint64_t pad_len:1;
+ uint64_t vlan_len:1;
+ uint64_t pre_free:1;
+ uint64_t ctl_smac:1;
+ uint64_t ctl_mcst:1;
+ uint64_t ctl_bck:1;
+ uint64_t ctl_drp:1;
+ uint64_t pre_strp:1;
+ uint64_t pre_chk:1;
+ } cn52xx;
+ struct cvmx_agl_gmx_rxx_frm_ctl_cn52xx cn52xxp1;
+ struct cvmx_agl_gmx_rxx_frm_ctl_cn52xx cn56xx;
+ struct cvmx_agl_gmx_rxx_frm_ctl_cn52xx cn56xxp1;
+ struct cvmx_agl_gmx_rxx_frm_ctl_s cn63xx;
+ struct cvmx_agl_gmx_rxx_frm_ctl_s cn63xxp1;
};
union cvmx_agl_gmx_rxx_frm_max {
@@ -433,6 +452,8 @@ union cvmx_agl_gmx_rxx_frm_max {
struct cvmx_agl_gmx_rxx_frm_max_s cn52xxp1;
struct cvmx_agl_gmx_rxx_frm_max_s cn56xx;
struct cvmx_agl_gmx_rxx_frm_max_s cn56xxp1;
+ struct cvmx_agl_gmx_rxx_frm_max_s cn63xx;
+ struct cvmx_agl_gmx_rxx_frm_max_s cn63xxp1;
};
union cvmx_agl_gmx_rxx_frm_min {
@@ -445,6 +466,8 @@ union cvmx_agl_gmx_rxx_frm_min {
struct cvmx_agl_gmx_rxx_frm_min_s cn52xxp1;
struct cvmx_agl_gmx_rxx_frm_min_s cn56xx;
struct cvmx_agl_gmx_rxx_frm_min_s cn56xxp1;
+ struct cvmx_agl_gmx_rxx_frm_min_s cn63xx;
+ struct cvmx_agl_gmx_rxx_frm_min_s cn63xxp1;
};
union cvmx_agl_gmx_rxx_ifg {
@@ -457,6 +480,8 @@ union cvmx_agl_gmx_rxx_ifg {
struct cvmx_agl_gmx_rxx_ifg_s cn52xxp1;
struct cvmx_agl_gmx_rxx_ifg_s cn56xx;
struct cvmx_agl_gmx_rxx_ifg_s cn56xxp1;
+ struct cvmx_agl_gmx_rxx_ifg_s cn63xx;
+ struct cvmx_agl_gmx_rxx_ifg_s cn63xxp1;
};
union cvmx_agl_gmx_rxx_int_en {
@@ -464,6 +489,29 @@ union cvmx_agl_gmx_rxx_int_en {
struct cvmx_agl_gmx_rxx_int_en_s {
uint64_t reserved_20_63:44;
uint64_t pause_drp:1;
+ uint64_t phy_dupx:1;
+ uint64_t phy_spd:1;
+ uint64_t phy_link:1;
+ uint64_t ifgerr:1;
+ uint64_t coldet:1;
+ uint64_t falerr:1;
+ uint64_t rsverr:1;
+ uint64_t pcterr:1;
+ uint64_t ovrerr:1;
+ uint64_t niberr:1;
+ uint64_t skperr:1;
+ uint64_t rcverr:1;
+ uint64_t lenerr:1;
+ uint64_t alnerr:1;
+ uint64_t fcserr:1;
+ uint64_t jabber:1;
+ uint64_t maxerr:1;
+ uint64_t carext:1;
+ uint64_t minerr:1;
+ } s;
+ struct cvmx_agl_gmx_rxx_int_en_cn52xx {
+ uint64_t reserved_20_63:44;
+ uint64_t pause_drp:1;
uint64_t reserved_16_18:3;
uint64_t ifgerr:1;
uint64_t coldet:1;
@@ -481,11 +529,12 @@ union cvmx_agl_gmx_rxx_int_en {
uint64_t maxerr:1;
uint64_t reserved_1_1:1;
uint64_t minerr:1;
- } s;
- struct cvmx_agl_gmx_rxx_int_en_s cn52xx;
- struct cvmx_agl_gmx_rxx_int_en_s cn52xxp1;
- struct cvmx_agl_gmx_rxx_int_en_s cn56xx;
- struct cvmx_agl_gmx_rxx_int_en_s cn56xxp1;
+ } cn52xx;
+ struct cvmx_agl_gmx_rxx_int_en_cn52xx cn52xxp1;
+ struct cvmx_agl_gmx_rxx_int_en_cn52xx cn56xx;
+ struct cvmx_agl_gmx_rxx_int_en_cn52xx cn56xxp1;
+ struct cvmx_agl_gmx_rxx_int_en_s cn63xx;
+ struct cvmx_agl_gmx_rxx_int_en_s cn63xxp1;
};
union cvmx_agl_gmx_rxx_int_reg {
@@ -493,6 +542,29 @@ union cvmx_agl_gmx_rxx_int_reg {
struct cvmx_agl_gmx_rxx_int_reg_s {
uint64_t reserved_20_63:44;
uint64_t pause_drp:1;
+ uint64_t phy_dupx:1;
+ uint64_t phy_spd:1;
+ uint64_t phy_link:1;
+ uint64_t ifgerr:1;
+ uint64_t coldet:1;
+ uint64_t falerr:1;
+ uint64_t rsverr:1;
+ uint64_t pcterr:1;
+ uint64_t ovrerr:1;
+ uint64_t niberr:1;
+ uint64_t skperr:1;
+ uint64_t rcverr:1;
+ uint64_t lenerr:1;
+ uint64_t alnerr:1;
+ uint64_t fcserr:1;
+ uint64_t jabber:1;
+ uint64_t maxerr:1;
+ uint64_t carext:1;
+ uint64_t minerr:1;
+ } s;
+ struct cvmx_agl_gmx_rxx_int_reg_cn52xx {
+ uint64_t reserved_20_63:44;
+ uint64_t pause_drp:1;
uint64_t reserved_16_18:3;
uint64_t ifgerr:1;
uint64_t coldet:1;
@@ -510,11 +582,12 @@ union cvmx_agl_gmx_rxx_int_reg {
uint64_t maxerr:1;
uint64_t reserved_1_1:1;
uint64_t minerr:1;
- } s;
- struct cvmx_agl_gmx_rxx_int_reg_s cn52xx;
- struct cvmx_agl_gmx_rxx_int_reg_s cn52xxp1;
- struct cvmx_agl_gmx_rxx_int_reg_s cn56xx;
- struct cvmx_agl_gmx_rxx_int_reg_s cn56xxp1;
+ } cn52xx;
+ struct cvmx_agl_gmx_rxx_int_reg_cn52xx cn52xxp1;
+ struct cvmx_agl_gmx_rxx_int_reg_cn52xx cn56xx;
+ struct cvmx_agl_gmx_rxx_int_reg_cn52xx cn56xxp1;
+ struct cvmx_agl_gmx_rxx_int_reg_s cn63xx;
+ struct cvmx_agl_gmx_rxx_int_reg_s cn63xxp1;
};
union cvmx_agl_gmx_rxx_jabber {
@@ -527,6 +600,8 @@ union cvmx_agl_gmx_rxx_jabber {
struct cvmx_agl_gmx_rxx_jabber_s cn52xxp1;
struct cvmx_agl_gmx_rxx_jabber_s cn56xx;
struct cvmx_agl_gmx_rxx_jabber_s cn56xxp1;
+ struct cvmx_agl_gmx_rxx_jabber_s cn63xx;
+ struct cvmx_agl_gmx_rxx_jabber_s cn63xxp1;
};
union cvmx_agl_gmx_rxx_pause_drop_time {
@@ -539,6 +614,20 @@ union cvmx_agl_gmx_rxx_pause_drop_time {
struct cvmx_agl_gmx_rxx_pause_drop_time_s cn52xxp1;
struct cvmx_agl_gmx_rxx_pause_drop_time_s cn56xx;
struct cvmx_agl_gmx_rxx_pause_drop_time_s cn56xxp1;
+ struct cvmx_agl_gmx_rxx_pause_drop_time_s cn63xx;
+ struct cvmx_agl_gmx_rxx_pause_drop_time_s cn63xxp1;
+};
+
+union cvmx_agl_gmx_rxx_rx_inbnd {
+ uint64_t u64;
+ struct cvmx_agl_gmx_rxx_rx_inbnd_s {
+ uint64_t reserved_4_63:60;
+ uint64_t duplex:1;
+ uint64_t speed:2;
+ uint64_t status:1;
+ } s;
+ struct cvmx_agl_gmx_rxx_rx_inbnd_s cn63xx;
+ struct cvmx_agl_gmx_rxx_rx_inbnd_s cn63xxp1;
};
union cvmx_agl_gmx_rxx_stats_ctl {
@@ -551,6 +640,8 @@ union cvmx_agl_gmx_rxx_stats_ctl {
struct cvmx_agl_gmx_rxx_stats_ctl_s cn52xxp1;
struct cvmx_agl_gmx_rxx_stats_ctl_s cn56xx;
struct cvmx_agl_gmx_rxx_stats_ctl_s cn56xxp1;
+ struct cvmx_agl_gmx_rxx_stats_ctl_s cn63xx;
+ struct cvmx_agl_gmx_rxx_stats_ctl_s cn63xxp1;
};
union cvmx_agl_gmx_rxx_stats_octs {
@@ -563,6 +654,8 @@ union cvmx_agl_gmx_rxx_stats_octs {
struct cvmx_agl_gmx_rxx_stats_octs_s cn52xxp1;
struct cvmx_agl_gmx_rxx_stats_octs_s cn56xx;
struct cvmx_agl_gmx_rxx_stats_octs_s cn56xxp1;
+ struct cvmx_agl_gmx_rxx_stats_octs_s cn63xx;
+ struct cvmx_agl_gmx_rxx_stats_octs_s cn63xxp1;
};
union cvmx_agl_gmx_rxx_stats_octs_ctl {
@@ -575,6 +668,8 @@ union cvmx_agl_gmx_rxx_stats_octs_ctl {
struct cvmx_agl_gmx_rxx_stats_octs_ctl_s cn52xxp1;
struct cvmx_agl_gmx_rxx_stats_octs_ctl_s cn56xx;
struct cvmx_agl_gmx_rxx_stats_octs_ctl_s cn56xxp1;
+ struct cvmx_agl_gmx_rxx_stats_octs_ctl_s cn63xx;
+ struct cvmx_agl_gmx_rxx_stats_octs_ctl_s cn63xxp1;
};
union cvmx_agl_gmx_rxx_stats_octs_dmac {
@@ -587,6 +682,8 @@ union cvmx_agl_gmx_rxx_stats_octs_dmac {
struct cvmx_agl_gmx_rxx_stats_octs_dmac_s cn52xxp1;
struct cvmx_agl_gmx_rxx_stats_octs_dmac_s cn56xx;
struct cvmx_agl_gmx_rxx_stats_octs_dmac_s cn56xxp1;
+ struct cvmx_agl_gmx_rxx_stats_octs_dmac_s cn63xx;
+ struct cvmx_agl_gmx_rxx_stats_octs_dmac_s cn63xxp1;
};
union cvmx_agl_gmx_rxx_stats_octs_drp {
@@ -599,6 +696,8 @@ union cvmx_agl_gmx_rxx_stats_octs_drp {
struct cvmx_agl_gmx_rxx_stats_octs_drp_s cn52xxp1;
struct cvmx_agl_gmx_rxx_stats_octs_drp_s cn56xx;
struct cvmx_agl_gmx_rxx_stats_octs_drp_s cn56xxp1;
+ struct cvmx_agl_gmx_rxx_stats_octs_drp_s cn63xx;
+ struct cvmx_agl_gmx_rxx_stats_octs_drp_s cn63xxp1;
};
union cvmx_agl_gmx_rxx_stats_pkts {
@@ -611,6 +710,8 @@ union cvmx_agl_gmx_rxx_stats_pkts {
struct cvmx_agl_gmx_rxx_stats_pkts_s cn52xxp1;
struct cvmx_agl_gmx_rxx_stats_pkts_s cn56xx;
struct cvmx_agl_gmx_rxx_stats_pkts_s cn56xxp1;
+ struct cvmx_agl_gmx_rxx_stats_pkts_s cn63xx;
+ struct cvmx_agl_gmx_rxx_stats_pkts_s cn63xxp1;
};
union cvmx_agl_gmx_rxx_stats_pkts_bad {
@@ -623,6 +724,8 @@ union cvmx_agl_gmx_rxx_stats_pkts_bad {
struct cvmx_agl_gmx_rxx_stats_pkts_bad_s cn52xxp1;
struct cvmx_agl_gmx_rxx_stats_pkts_bad_s cn56xx;
struct cvmx_agl_gmx_rxx_stats_pkts_bad_s cn56xxp1;
+ struct cvmx_agl_gmx_rxx_stats_pkts_bad_s cn63xx;
+ struct cvmx_agl_gmx_rxx_stats_pkts_bad_s cn63xxp1;
};
union cvmx_agl_gmx_rxx_stats_pkts_ctl {
@@ -635,6 +738,8 @@ union cvmx_agl_gmx_rxx_stats_pkts_ctl {
struct cvmx_agl_gmx_rxx_stats_pkts_ctl_s cn52xxp1;
struct cvmx_agl_gmx_rxx_stats_pkts_ctl_s cn56xx;
struct cvmx_agl_gmx_rxx_stats_pkts_ctl_s cn56xxp1;
+ struct cvmx_agl_gmx_rxx_stats_pkts_ctl_s cn63xx;
+ struct cvmx_agl_gmx_rxx_stats_pkts_ctl_s cn63xxp1;
};
union cvmx_agl_gmx_rxx_stats_pkts_dmac {
@@ -647,6 +752,8 @@ union cvmx_agl_gmx_rxx_stats_pkts_dmac {
struct cvmx_agl_gmx_rxx_stats_pkts_dmac_s cn52xxp1;
struct cvmx_agl_gmx_rxx_stats_pkts_dmac_s cn56xx;
struct cvmx_agl_gmx_rxx_stats_pkts_dmac_s cn56xxp1;
+ struct cvmx_agl_gmx_rxx_stats_pkts_dmac_s cn63xx;
+ struct cvmx_agl_gmx_rxx_stats_pkts_dmac_s cn63xxp1;
};
union cvmx_agl_gmx_rxx_stats_pkts_drp {
@@ -659,6 +766,8 @@ union cvmx_agl_gmx_rxx_stats_pkts_drp {
struct cvmx_agl_gmx_rxx_stats_pkts_drp_s cn52xxp1;
struct cvmx_agl_gmx_rxx_stats_pkts_drp_s cn56xx;
struct cvmx_agl_gmx_rxx_stats_pkts_drp_s cn56xxp1;
+ struct cvmx_agl_gmx_rxx_stats_pkts_drp_s cn63xx;
+ struct cvmx_agl_gmx_rxx_stats_pkts_drp_s cn63xxp1;
};
union cvmx_agl_gmx_rxx_udd_skp {
@@ -673,6 +782,8 @@ union cvmx_agl_gmx_rxx_udd_skp {
struct cvmx_agl_gmx_rxx_udd_skp_s cn52xxp1;
struct cvmx_agl_gmx_rxx_udd_skp_s cn56xx;
struct cvmx_agl_gmx_rxx_udd_skp_s cn56xxp1;
+ struct cvmx_agl_gmx_rxx_udd_skp_s cn63xx;
+ struct cvmx_agl_gmx_rxx_udd_skp_s cn63xxp1;
};
union cvmx_agl_gmx_rx_bp_dropx {
@@ -685,6 +796,8 @@ union cvmx_agl_gmx_rx_bp_dropx {
struct cvmx_agl_gmx_rx_bp_dropx_s cn52xxp1;
struct cvmx_agl_gmx_rx_bp_dropx_s cn56xx;
struct cvmx_agl_gmx_rx_bp_dropx_s cn56xxp1;
+ struct cvmx_agl_gmx_rx_bp_dropx_s cn63xx;
+ struct cvmx_agl_gmx_rx_bp_dropx_s cn63xxp1;
};
union cvmx_agl_gmx_rx_bp_offx {
@@ -697,6 +810,8 @@ union cvmx_agl_gmx_rx_bp_offx {
struct cvmx_agl_gmx_rx_bp_offx_s cn52xxp1;
struct cvmx_agl_gmx_rx_bp_offx_s cn56xx;
struct cvmx_agl_gmx_rx_bp_offx_s cn56xxp1;
+ struct cvmx_agl_gmx_rx_bp_offx_s cn63xx;
+ struct cvmx_agl_gmx_rx_bp_offx_s cn63xxp1;
};
union cvmx_agl_gmx_rx_bp_onx {
@@ -709,6 +824,8 @@ union cvmx_agl_gmx_rx_bp_onx {
struct cvmx_agl_gmx_rx_bp_onx_s cn52xxp1;
struct cvmx_agl_gmx_rx_bp_onx_s cn56xx;
struct cvmx_agl_gmx_rx_bp_onx_s cn56xxp1;
+ struct cvmx_agl_gmx_rx_bp_onx_s cn63xx;
+ struct cvmx_agl_gmx_rx_bp_onx_s cn63xxp1;
};
union cvmx_agl_gmx_rx_prt_info {
@@ -728,6 +845,8 @@ union cvmx_agl_gmx_rx_prt_info {
uint64_t commit:1;
} cn56xx;
struct cvmx_agl_gmx_rx_prt_info_cn56xx cn56xxp1;
+ struct cvmx_agl_gmx_rx_prt_info_s cn63xx;
+ struct cvmx_agl_gmx_rx_prt_info_s cn63xxp1;
};
union cvmx_agl_gmx_rx_tx_status {
@@ -747,6 +866,8 @@ union cvmx_agl_gmx_rx_tx_status {
uint64_t rx:1;
} cn56xx;
struct cvmx_agl_gmx_rx_tx_status_cn56xx cn56xxp1;
+ struct cvmx_agl_gmx_rx_tx_status_s cn63xx;
+ struct cvmx_agl_gmx_rx_tx_status_s cn63xxp1;
};
union cvmx_agl_gmx_smacx {
@@ -759,6 +880,8 @@ union cvmx_agl_gmx_smacx {
struct cvmx_agl_gmx_smacx_s cn52xxp1;
struct cvmx_agl_gmx_smacx_s cn56xx;
struct cvmx_agl_gmx_smacx_s cn56xxp1;
+ struct cvmx_agl_gmx_smacx_s cn63xx;
+ struct cvmx_agl_gmx_smacx_s cn63xxp1;
};
union cvmx_agl_gmx_stat_bp {
@@ -772,6 +895,8 @@ union cvmx_agl_gmx_stat_bp {
struct cvmx_agl_gmx_stat_bp_s cn52xxp1;
struct cvmx_agl_gmx_stat_bp_s cn56xx;
struct cvmx_agl_gmx_stat_bp_s cn56xxp1;
+ struct cvmx_agl_gmx_stat_bp_s cn63xx;
+ struct cvmx_agl_gmx_stat_bp_s cn63xxp1;
};
union cvmx_agl_gmx_txx_append {
@@ -787,6 +912,18 @@ union cvmx_agl_gmx_txx_append {
struct cvmx_agl_gmx_txx_append_s cn52xxp1;
struct cvmx_agl_gmx_txx_append_s cn56xx;
struct cvmx_agl_gmx_txx_append_s cn56xxp1;
+ struct cvmx_agl_gmx_txx_append_s cn63xx;
+ struct cvmx_agl_gmx_txx_append_s cn63xxp1;
+};
+
+union cvmx_agl_gmx_txx_clk {
+ uint64_t u64;
+ struct cvmx_agl_gmx_txx_clk_s {
+ uint64_t reserved_6_63:58;
+ uint64_t clk_cnt:6;
+ } s;
+ struct cvmx_agl_gmx_txx_clk_s cn63xx;
+ struct cvmx_agl_gmx_txx_clk_s cn63xxp1;
};
union cvmx_agl_gmx_txx_ctl {
@@ -800,6 +937,8 @@ union cvmx_agl_gmx_txx_ctl {
struct cvmx_agl_gmx_txx_ctl_s cn52xxp1;
struct cvmx_agl_gmx_txx_ctl_s cn56xx;
struct cvmx_agl_gmx_txx_ctl_s cn56xxp1;
+ struct cvmx_agl_gmx_txx_ctl_s cn63xx;
+ struct cvmx_agl_gmx_txx_ctl_s cn63xxp1;
};
union cvmx_agl_gmx_txx_min_pkt {
@@ -812,6 +951,8 @@ union cvmx_agl_gmx_txx_min_pkt {
struct cvmx_agl_gmx_txx_min_pkt_s cn52xxp1;
struct cvmx_agl_gmx_txx_min_pkt_s cn56xx;
struct cvmx_agl_gmx_txx_min_pkt_s cn56xxp1;
+ struct cvmx_agl_gmx_txx_min_pkt_s cn63xx;
+ struct cvmx_agl_gmx_txx_min_pkt_s cn63xxp1;
};
union cvmx_agl_gmx_txx_pause_pkt_interval {
@@ -824,6 +965,8 @@ union cvmx_agl_gmx_txx_pause_pkt_interval {
struct cvmx_agl_gmx_txx_pause_pkt_interval_s cn52xxp1;
struct cvmx_agl_gmx_txx_pause_pkt_interval_s cn56xx;
struct cvmx_agl_gmx_txx_pause_pkt_interval_s cn56xxp1;
+ struct cvmx_agl_gmx_txx_pause_pkt_interval_s cn63xx;
+ struct cvmx_agl_gmx_txx_pause_pkt_interval_s cn63xxp1;
};
union cvmx_agl_gmx_txx_pause_pkt_time {
@@ -836,6 +979,8 @@ union cvmx_agl_gmx_txx_pause_pkt_time {
struct cvmx_agl_gmx_txx_pause_pkt_time_s cn52xxp1;
struct cvmx_agl_gmx_txx_pause_pkt_time_s cn56xx;
struct cvmx_agl_gmx_txx_pause_pkt_time_s cn56xxp1;
+ struct cvmx_agl_gmx_txx_pause_pkt_time_s cn63xx;
+ struct cvmx_agl_gmx_txx_pause_pkt_time_s cn63xxp1;
};
union cvmx_agl_gmx_txx_pause_togo {
@@ -848,6 +993,8 @@ union cvmx_agl_gmx_txx_pause_togo {
struct cvmx_agl_gmx_txx_pause_togo_s cn52xxp1;
struct cvmx_agl_gmx_txx_pause_togo_s cn56xx;
struct cvmx_agl_gmx_txx_pause_togo_s cn56xxp1;
+ struct cvmx_agl_gmx_txx_pause_togo_s cn63xx;
+ struct cvmx_agl_gmx_txx_pause_togo_s cn63xxp1;
};
union cvmx_agl_gmx_txx_pause_zero {
@@ -860,6 +1007,8 @@ union cvmx_agl_gmx_txx_pause_zero {
struct cvmx_agl_gmx_txx_pause_zero_s cn52xxp1;
struct cvmx_agl_gmx_txx_pause_zero_s cn56xx;
struct cvmx_agl_gmx_txx_pause_zero_s cn56xxp1;
+ struct cvmx_agl_gmx_txx_pause_zero_s cn63xx;
+ struct cvmx_agl_gmx_txx_pause_zero_s cn63xxp1;
};
union cvmx_agl_gmx_txx_soft_pause {
@@ -872,6 +1021,8 @@ union cvmx_agl_gmx_txx_soft_pause {
struct cvmx_agl_gmx_txx_soft_pause_s cn52xxp1;
struct cvmx_agl_gmx_txx_soft_pause_s cn56xx;
struct cvmx_agl_gmx_txx_soft_pause_s cn56xxp1;
+ struct cvmx_agl_gmx_txx_soft_pause_s cn63xx;
+ struct cvmx_agl_gmx_txx_soft_pause_s cn63xxp1;
};
union cvmx_agl_gmx_txx_stat0 {
@@ -884,6 +1035,8 @@ union cvmx_agl_gmx_txx_stat0 {
struct cvmx_agl_gmx_txx_stat0_s cn52xxp1;
struct cvmx_agl_gmx_txx_stat0_s cn56xx;
struct cvmx_agl_gmx_txx_stat0_s cn56xxp1;
+ struct cvmx_agl_gmx_txx_stat0_s cn63xx;
+ struct cvmx_agl_gmx_txx_stat0_s cn63xxp1;
};
union cvmx_agl_gmx_txx_stat1 {
@@ -896,6 +1049,8 @@ union cvmx_agl_gmx_txx_stat1 {
struct cvmx_agl_gmx_txx_stat1_s cn52xxp1;
struct cvmx_agl_gmx_txx_stat1_s cn56xx;
struct cvmx_agl_gmx_txx_stat1_s cn56xxp1;
+ struct cvmx_agl_gmx_txx_stat1_s cn63xx;
+ struct cvmx_agl_gmx_txx_stat1_s cn63xxp1;
};
union cvmx_agl_gmx_txx_stat2 {
@@ -908,6 +1063,8 @@ union cvmx_agl_gmx_txx_stat2 {
struct cvmx_agl_gmx_txx_stat2_s cn52xxp1;
struct cvmx_agl_gmx_txx_stat2_s cn56xx;
struct cvmx_agl_gmx_txx_stat2_s cn56xxp1;
+ struct cvmx_agl_gmx_txx_stat2_s cn63xx;
+ struct cvmx_agl_gmx_txx_stat2_s cn63xxp1;
};
union cvmx_agl_gmx_txx_stat3 {
@@ -920,6 +1077,8 @@ union cvmx_agl_gmx_txx_stat3 {
struct cvmx_agl_gmx_txx_stat3_s cn52xxp1;
struct cvmx_agl_gmx_txx_stat3_s cn56xx;
struct cvmx_agl_gmx_txx_stat3_s cn56xxp1;
+ struct cvmx_agl_gmx_txx_stat3_s cn63xx;
+ struct cvmx_agl_gmx_txx_stat3_s cn63xxp1;
};
union cvmx_agl_gmx_txx_stat4 {
@@ -932,6 +1091,8 @@ union cvmx_agl_gmx_txx_stat4 {
struct cvmx_agl_gmx_txx_stat4_s cn52xxp1;
struct cvmx_agl_gmx_txx_stat4_s cn56xx;
struct cvmx_agl_gmx_txx_stat4_s cn56xxp1;
+ struct cvmx_agl_gmx_txx_stat4_s cn63xx;
+ struct cvmx_agl_gmx_txx_stat4_s cn63xxp1;
};
union cvmx_agl_gmx_txx_stat5 {
@@ -944,6 +1105,8 @@ union cvmx_agl_gmx_txx_stat5 {
struct cvmx_agl_gmx_txx_stat5_s cn52xxp1;
struct cvmx_agl_gmx_txx_stat5_s cn56xx;
struct cvmx_agl_gmx_txx_stat5_s cn56xxp1;
+ struct cvmx_agl_gmx_txx_stat5_s cn63xx;
+ struct cvmx_agl_gmx_txx_stat5_s cn63xxp1;
};
union cvmx_agl_gmx_txx_stat6 {
@@ -956,6 +1119,8 @@ union cvmx_agl_gmx_txx_stat6 {
struct cvmx_agl_gmx_txx_stat6_s cn52xxp1;
struct cvmx_agl_gmx_txx_stat6_s cn56xx;
struct cvmx_agl_gmx_txx_stat6_s cn56xxp1;
+ struct cvmx_agl_gmx_txx_stat6_s cn63xx;
+ struct cvmx_agl_gmx_txx_stat6_s cn63xxp1;
};
union cvmx_agl_gmx_txx_stat7 {
@@ -968,6 +1133,8 @@ union cvmx_agl_gmx_txx_stat7 {
struct cvmx_agl_gmx_txx_stat7_s cn52xxp1;
struct cvmx_agl_gmx_txx_stat7_s cn56xx;
struct cvmx_agl_gmx_txx_stat7_s cn56xxp1;
+ struct cvmx_agl_gmx_txx_stat7_s cn63xx;
+ struct cvmx_agl_gmx_txx_stat7_s cn63xxp1;
};
union cvmx_agl_gmx_txx_stat8 {
@@ -980,6 +1147,8 @@ union cvmx_agl_gmx_txx_stat8 {
struct cvmx_agl_gmx_txx_stat8_s cn52xxp1;
struct cvmx_agl_gmx_txx_stat8_s cn56xx;
struct cvmx_agl_gmx_txx_stat8_s cn56xxp1;
+ struct cvmx_agl_gmx_txx_stat8_s cn63xx;
+ struct cvmx_agl_gmx_txx_stat8_s cn63xxp1;
};
union cvmx_agl_gmx_txx_stat9 {
@@ -992,6 +1161,8 @@ union cvmx_agl_gmx_txx_stat9 {
struct cvmx_agl_gmx_txx_stat9_s cn52xxp1;
struct cvmx_agl_gmx_txx_stat9_s cn56xx;
struct cvmx_agl_gmx_txx_stat9_s cn56xxp1;
+ struct cvmx_agl_gmx_txx_stat9_s cn63xx;
+ struct cvmx_agl_gmx_txx_stat9_s cn63xxp1;
};
union cvmx_agl_gmx_txx_stats_ctl {
@@ -1004,6 +1175,8 @@ union cvmx_agl_gmx_txx_stats_ctl {
struct cvmx_agl_gmx_txx_stats_ctl_s cn52xxp1;
struct cvmx_agl_gmx_txx_stats_ctl_s cn56xx;
struct cvmx_agl_gmx_txx_stats_ctl_s cn56xxp1;
+ struct cvmx_agl_gmx_txx_stats_ctl_s cn63xx;
+ struct cvmx_agl_gmx_txx_stats_ctl_s cn63xxp1;
};
union cvmx_agl_gmx_txx_thresh {
@@ -1016,6 +1189,8 @@ union cvmx_agl_gmx_txx_thresh {
struct cvmx_agl_gmx_txx_thresh_s cn52xxp1;
struct cvmx_agl_gmx_txx_thresh_s cn56xx;
struct cvmx_agl_gmx_txx_thresh_s cn56xxp1;
+ struct cvmx_agl_gmx_txx_thresh_s cn63xx;
+ struct cvmx_agl_gmx_txx_thresh_s cn63xxp1;
};
union cvmx_agl_gmx_tx_bp {
@@ -1031,6 +1206,8 @@ union cvmx_agl_gmx_tx_bp {
uint64_t bp:1;
} cn56xx;
struct cvmx_agl_gmx_tx_bp_cn56xx cn56xxp1;
+ struct cvmx_agl_gmx_tx_bp_s cn63xx;
+ struct cvmx_agl_gmx_tx_bp_s cn63xxp1;
};
union cvmx_agl_gmx_tx_col_attempt {
@@ -1043,6 +1220,8 @@ union cvmx_agl_gmx_tx_col_attempt {
struct cvmx_agl_gmx_tx_col_attempt_s cn52xxp1;
struct cvmx_agl_gmx_tx_col_attempt_s cn56xx;
struct cvmx_agl_gmx_tx_col_attempt_s cn56xxp1;
+ struct cvmx_agl_gmx_tx_col_attempt_s cn63xx;
+ struct cvmx_agl_gmx_tx_col_attempt_s cn63xxp1;
};
union cvmx_agl_gmx_tx_ifg {
@@ -1056,12 +1235,16 @@ union cvmx_agl_gmx_tx_ifg {
struct cvmx_agl_gmx_tx_ifg_s cn52xxp1;
struct cvmx_agl_gmx_tx_ifg_s cn56xx;
struct cvmx_agl_gmx_tx_ifg_s cn56xxp1;
+ struct cvmx_agl_gmx_tx_ifg_s cn63xx;
+ struct cvmx_agl_gmx_tx_ifg_s cn63xxp1;
};
union cvmx_agl_gmx_tx_int_en {
uint64_t u64;
struct cvmx_agl_gmx_tx_int_en_s {
- uint64_t reserved_18_63:46;
+ uint64_t reserved_22_63:42;
+ uint64_t ptp_lost:2;
+ uint64_t reserved_18_19:2;
uint64_t late_col:2;
uint64_t reserved_14_15:2;
uint64_t xsdef:2;
@@ -1072,8 +1255,19 @@ union cvmx_agl_gmx_tx_int_en {
uint64_t reserved_1_1:1;
uint64_t pko_nxa:1;
} s;
- struct cvmx_agl_gmx_tx_int_en_s cn52xx;
- struct cvmx_agl_gmx_tx_int_en_s cn52xxp1;
+ struct cvmx_agl_gmx_tx_int_en_cn52xx {
+ uint64_t reserved_18_63:46;
+ uint64_t late_col:2;
+ uint64_t reserved_14_15:2;
+ uint64_t xsdef:2;
+ uint64_t reserved_10_11:2;
+ uint64_t xscol:2;
+ uint64_t reserved_4_7:4;
+ uint64_t undflw:2;
+ uint64_t reserved_1_1:1;
+ uint64_t pko_nxa:1;
+ } cn52xx;
+ struct cvmx_agl_gmx_tx_int_en_cn52xx cn52xxp1;
struct cvmx_agl_gmx_tx_int_en_cn56xx {
uint64_t reserved_17_63:47;
uint64_t late_col:1;
@@ -1087,12 +1281,16 @@ union cvmx_agl_gmx_tx_int_en {
uint64_t pko_nxa:1;
} cn56xx;
struct cvmx_agl_gmx_tx_int_en_cn56xx cn56xxp1;
+ struct cvmx_agl_gmx_tx_int_en_s cn63xx;
+ struct cvmx_agl_gmx_tx_int_en_s cn63xxp1;
};
union cvmx_agl_gmx_tx_int_reg {
uint64_t u64;
struct cvmx_agl_gmx_tx_int_reg_s {
- uint64_t reserved_18_63:46;
+ uint64_t reserved_22_63:42;
+ uint64_t ptp_lost:2;
+ uint64_t reserved_18_19:2;
uint64_t late_col:2;
uint64_t reserved_14_15:2;
uint64_t xsdef:2;
@@ -1103,8 +1301,19 @@ union cvmx_agl_gmx_tx_int_reg {
uint64_t reserved_1_1:1;
uint64_t pko_nxa:1;
} s;
- struct cvmx_agl_gmx_tx_int_reg_s cn52xx;
- struct cvmx_agl_gmx_tx_int_reg_s cn52xxp1;
+ struct cvmx_agl_gmx_tx_int_reg_cn52xx {
+ uint64_t reserved_18_63:46;
+ uint64_t late_col:2;
+ uint64_t reserved_14_15:2;
+ uint64_t xsdef:2;
+ uint64_t reserved_10_11:2;
+ uint64_t xscol:2;
+ uint64_t reserved_4_7:4;
+ uint64_t undflw:2;
+ uint64_t reserved_1_1:1;
+ uint64_t pko_nxa:1;
+ } cn52xx;
+ struct cvmx_agl_gmx_tx_int_reg_cn52xx cn52xxp1;
struct cvmx_agl_gmx_tx_int_reg_cn56xx {
uint64_t reserved_17_63:47;
uint64_t late_col:1;
@@ -1118,6 +1327,8 @@ union cvmx_agl_gmx_tx_int_reg {
uint64_t pko_nxa:1;
} cn56xx;
struct cvmx_agl_gmx_tx_int_reg_cn56xx cn56xxp1;
+ struct cvmx_agl_gmx_tx_int_reg_s cn63xx;
+ struct cvmx_agl_gmx_tx_int_reg_s cn63xxp1;
};
union cvmx_agl_gmx_tx_jam {
@@ -1130,6 +1341,8 @@ union cvmx_agl_gmx_tx_jam {
struct cvmx_agl_gmx_tx_jam_s cn52xxp1;
struct cvmx_agl_gmx_tx_jam_s cn56xx;
struct cvmx_agl_gmx_tx_jam_s cn56xxp1;
+ struct cvmx_agl_gmx_tx_jam_s cn63xx;
+ struct cvmx_agl_gmx_tx_jam_s cn63xxp1;
};
union cvmx_agl_gmx_tx_lfsr {
@@ -1142,6 +1355,8 @@ union cvmx_agl_gmx_tx_lfsr {
struct cvmx_agl_gmx_tx_lfsr_s cn52xxp1;
struct cvmx_agl_gmx_tx_lfsr_s cn56xx;
struct cvmx_agl_gmx_tx_lfsr_s cn56xxp1;
+ struct cvmx_agl_gmx_tx_lfsr_s cn63xx;
+ struct cvmx_agl_gmx_tx_lfsr_s cn63xxp1;
};
union cvmx_agl_gmx_tx_ovr_bp {
@@ -1165,6 +1380,8 @@ union cvmx_agl_gmx_tx_ovr_bp {
uint64_t ign_full:1;
} cn56xx;
struct cvmx_agl_gmx_tx_ovr_bp_cn56xx cn56xxp1;
+ struct cvmx_agl_gmx_tx_ovr_bp_s cn63xx;
+ struct cvmx_agl_gmx_tx_ovr_bp_s cn63xxp1;
};
union cvmx_agl_gmx_tx_pause_pkt_dmac {
@@ -1177,6 +1394,8 @@ union cvmx_agl_gmx_tx_pause_pkt_dmac {
struct cvmx_agl_gmx_tx_pause_pkt_dmac_s cn52xxp1;
struct cvmx_agl_gmx_tx_pause_pkt_dmac_s cn56xx;
struct cvmx_agl_gmx_tx_pause_pkt_dmac_s cn56xxp1;
+ struct cvmx_agl_gmx_tx_pause_pkt_dmac_s cn63xx;
+ struct cvmx_agl_gmx_tx_pause_pkt_dmac_s cn63xxp1;
};
union cvmx_agl_gmx_tx_pause_pkt_type {
@@ -1189,6 +1408,39 @@ union cvmx_agl_gmx_tx_pause_pkt_type {
struct cvmx_agl_gmx_tx_pause_pkt_type_s cn52xxp1;
struct cvmx_agl_gmx_tx_pause_pkt_type_s cn56xx;
struct cvmx_agl_gmx_tx_pause_pkt_type_s cn56xxp1;
+ struct cvmx_agl_gmx_tx_pause_pkt_type_s cn63xx;
+ struct cvmx_agl_gmx_tx_pause_pkt_type_s cn63xxp1;
+};
+
+union cvmx_agl_prtx_ctl {
+ uint64_t u64;
+ struct cvmx_agl_prtx_ctl_s {
+ uint64_t drv_byp:1;
+ uint64_t reserved_62_62:1;
+ uint64_t cmp_pctl:6;
+ uint64_t reserved_54_55:2;
+ uint64_t cmp_nctl:6;
+ uint64_t reserved_46_47:2;
+ uint64_t drv_pctl:6;
+ uint64_t reserved_38_39:2;
+ uint64_t drv_nctl:6;
+ uint64_t reserved_29_31:3;
+ uint64_t clk_set:5;
+ uint64_t clkrx_byp:1;
+ uint64_t reserved_21_22:2;
+ uint64_t clkrx_set:5;
+ uint64_t clktx_byp:1;
+ uint64_t reserved_13_14:2;
+ uint64_t clktx_set:5;
+ uint64_t reserved_5_7:3;
+ uint64_t dllrst:1;
+ uint64_t comp:1;
+ uint64_t enable:1;
+ uint64_t clkrst:1;
+ uint64_t mode:1;
+ } s;
+ struct cvmx_agl_prtx_ctl_s cn63xx;
+ struct cvmx_agl_prtx_ctl_s cn63xxp1;
};
#endif
diff --git a/arch/mips/include/asm/octeon/cvmx-asm.h b/arch/mips/include/asm/octeon/cvmx-asm.h
index b21d3fc1ef91..5de5de95311b 100644
--- a/arch/mips/include/asm/octeon/cvmx-asm.h
+++ b/arch/mips/include/asm/octeon/cvmx-asm.h
@@ -114,6 +114,17 @@
#define CVMX_DCACHE_INVALIDATE \
{ CVMX_SYNC; asm volatile ("cache 9, 0($0)" : : ); }
+#define CVMX_CACHE(op, address, offset) \
+ asm volatile ("cache " CVMX_TMP_STR(op) ", " CVMX_TMP_STR(offset) "(%[rbase])" \
+ : : [rbase] "d" (address) )
+/* fetch and lock the state. */
+#define CVMX_CACHE_LCKL2(address, offset) CVMX_CACHE(31, address, offset)
+/* unlock the state. */
+#define CVMX_CACHE_WBIL2(address, offset) CVMX_CACHE(23, address, offset)
+/* invalidate the cache block and clear the USED bits for the block */
+#define CVMX_CACHE_WBIL2I(address, offset) CVMX_CACHE(3, address, offset)
+/* load virtual tag and data for the L2 cache block into L2C_TAD0_TAG register */
+#define CVMX_CACHE_LTGL2I(address, offset) CVMX_CACHE(7, address, offset)
#define CVMX_POP(result, input) \
asm ("pop %[rd],%[rs]" : [rd] "=d" (result) : [rs] "d" (input))
diff --git a/arch/mips/include/asm/octeon/cvmx-ciu-defs.h b/arch/mips/include/asm/octeon/cvmx-ciu-defs.h
index f8f05b7764b7..27cead370411 100644
--- a/arch/mips/include/asm/octeon/cvmx-ciu-defs.h
+++ b/arch/mips/include/asm/octeon/cvmx-ciu-defs.h
@@ -4,7 +4,7 @@
* Contact: support@caviumnetworks.com
* This file is part of the OCTEON SDK
*
- * Copyright (c) 2003-2008 Cavium Networks
+ * Copyright (c) 2003-2010 Cavium Networks
*
* This file is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License, Version 2, as
@@ -28,87 +28,61 @@
#ifndef __CVMX_CIU_DEFS_H__
#define __CVMX_CIU_DEFS_H__
-#define CVMX_CIU_BIST \
- CVMX_ADD_IO_SEG(0x0001070000000730ull)
-#define CVMX_CIU_DINT \
- CVMX_ADD_IO_SEG(0x0001070000000720ull)
-#define CVMX_CIU_FUSE \
- CVMX_ADD_IO_SEG(0x0001070000000728ull)
-#define CVMX_CIU_GSTOP \
- CVMX_ADD_IO_SEG(0x0001070000000710ull)
-#define CVMX_CIU_INTX_EN0(offset) \
- CVMX_ADD_IO_SEG(0x0001070000000200ull + (((offset) & 63) * 16))
-#define CVMX_CIU_INTX_EN0_W1C(offset) \
- CVMX_ADD_IO_SEG(0x0001070000002200ull + (((offset) & 63) * 16))
-#define CVMX_CIU_INTX_EN0_W1S(offset) \
- CVMX_ADD_IO_SEG(0x0001070000006200ull + (((offset) & 63) * 16))
-#define CVMX_CIU_INTX_EN1(offset) \
- CVMX_ADD_IO_SEG(0x0001070000000208ull + (((offset) & 63) * 16))
-#define CVMX_CIU_INTX_EN1_W1C(offset) \
- CVMX_ADD_IO_SEG(0x0001070000002208ull + (((offset) & 63) * 16))
-#define CVMX_CIU_INTX_EN1_W1S(offset) \
- CVMX_ADD_IO_SEG(0x0001070000006208ull + (((offset) & 63) * 16))
-#define CVMX_CIU_INTX_EN4_0(offset) \
- CVMX_ADD_IO_SEG(0x0001070000000C80ull + (((offset) & 15) * 16))
-#define CVMX_CIU_INTX_EN4_0_W1C(offset) \
- CVMX_ADD_IO_SEG(0x0001070000002C80ull + (((offset) & 15) * 16))
-#define CVMX_CIU_INTX_EN4_0_W1S(offset) \
- CVMX_ADD_IO_SEG(0x0001070000006C80ull + (((offset) & 15) * 16))
-#define CVMX_CIU_INTX_EN4_1(offset) \
- CVMX_ADD_IO_SEG(0x0001070000000C88ull + (((offset) & 15) * 16))
-#define CVMX_CIU_INTX_EN4_1_W1C(offset) \
- CVMX_ADD_IO_SEG(0x0001070000002C88ull + (((offset) & 15) * 16))
-#define CVMX_CIU_INTX_EN4_1_W1S(offset) \
- CVMX_ADD_IO_SEG(0x0001070000006C88ull + (((offset) & 15) * 16))
-#define CVMX_CIU_INTX_SUM0(offset) \
- CVMX_ADD_IO_SEG(0x0001070000000000ull + (((offset) & 63) * 8))
-#define CVMX_CIU_INTX_SUM4(offset) \
- CVMX_ADD_IO_SEG(0x0001070000000C00ull + (((offset) & 15) * 8))
-#define CVMX_CIU_INT_SUM1 \
- CVMX_ADD_IO_SEG(0x0001070000000108ull)
-#define CVMX_CIU_MBOX_CLRX(offset) \
- CVMX_ADD_IO_SEG(0x0001070000000680ull + (((offset) & 15) * 8))
-#define CVMX_CIU_MBOX_SETX(offset) \
- CVMX_ADD_IO_SEG(0x0001070000000600ull + (((offset) & 15) * 8))
-#define CVMX_CIU_NMI \
- CVMX_ADD_IO_SEG(0x0001070000000718ull)
-#define CVMX_CIU_PCI_INTA \
- CVMX_ADD_IO_SEG(0x0001070000000750ull)
-#define CVMX_CIU_PP_DBG \
- CVMX_ADD_IO_SEG(0x0001070000000708ull)
-#define CVMX_CIU_PP_POKEX(offset) \
- CVMX_ADD_IO_SEG(0x0001070000000580ull + (((offset) & 15) * 8))
-#define CVMX_CIU_PP_RST \
- CVMX_ADD_IO_SEG(0x0001070000000700ull)
-#define CVMX_CIU_QLM_DCOK \
- CVMX_ADD_IO_SEG(0x0001070000000760ull)
-#define CVMX_CIU_QLM_JTGC \
- CVMX_ADD_IO_SEG(0x0001070000000768ull)
-#define CVMX_CIU_QLM_JTGD \
- CVMX_ADD_IO_SEG(0x0001070000000770ull)
-#define CVMX_CIU_SOFT_BIST \
- CVMX_ADD_IO_SEG(0x0001070000000738ull)
-#define CVMX_CIU_SOFT_PRST \
- CVMX_ADD_IO_SEG(0x0001070000000748ull)
-#define CVMX_CIU_SOFT_PRST1 \
- CVMX_ADD_IO_SEG(0x0001070000000758ull)
-#define CVMX_CIU_SOFT_RST \
- CVMX_ADD_IO_SEG(0x0001070000000740ull)
-#define CVMX_CIU_TIMX(offset) \
- CVMX_ADD_IO_SEG(0x0001070000000480ull + (((offset) & 3) * 8))
-#define CVMX_CIU_WDOGX(offset) \
- CVMX_ADD_IO_SEG(0x0001070000000500ull + (((offset) & 15) * 8))
+#define CVMX_CIU_BIST (CVMX_ADD_IO_SEG(0x0001070000000730ull))
+#define CVMX_CIU_BLOCK_INT (CVMX_ADD_IO_SEG(0x00010700000007C0ull))
+#define CVMX_CIU_DINT (CVMX_ADD_IO_SEG(0x0001070000000720ull))
+#define CVMX_CIU_FUSE (CVMX_ADD_IO_SEG(0x0001070000000728ull))
+#define CVMX_CIU_GSTOP (CVMX_ADD_IO_SEG(0x0001070000000710ull))
+#define CVMX_CIU_INT33_SUM0 (CVMX_ADD_IO_SEG(0x0001070000000110ull))
+#define CVMX_CIU_INTX_EN0(offset) (CVMX_ADD_IO_SEG(0x0001070000000200ull) + ((offset) & 63) * 16)
+#define CVMX_CIU_INTX_EN0_W1C(offset) (CVMX_ADD_IO_SEG(0x0001070000002200ull) + ((offset) & 63) * 16)
+#define CVMX_CIU_INTX_EN0_W1S(offset) (CVMX_ADD_IO_SEG(0x0001070000006200ull) + ((offset) & 63) * 16)
+#define CVMX_CIU_INTX_EN1(offset) (CVMX_ADD_IO_SEG(0x0001070000000208ull) + ((offset) & 63) * 16)
+#define CVMX_CIU_INTX_EN1_W1C(offset) (CVMX_ADD_IO_SEG(0x0001070000002208ull) + ((offset) & 63) * 16)
+#define CVMX_CIU_INTX_EN1_W1S(offset) (CVMX_ADD_IO_SEG(0x0001070000006208ull) + ((offset) & 63) * 16)
+#define CVMX_CIU_INTX_EN4_0(offset) (CVMX_ADD_IO_SEG(0x0001070000000C80ull) + ((offset) & 15) * 16)
+#define CVMX_CIU_INTX_EN4_0_W1C(offset) (CVMX_ADD_IO_SEG(0x0001070000002C80ull) + ((offset) & 15) * 16)
+#define CVMX_CIU_INTX_EN4_0_W1S(offset) (CVMX_ADD_IO_SEG(0x0001070000006C80ull) + ((offset) & 15) * 16)
+#define CVMX_CIU_INTX_EN4_1(offset) (CVMX_ADD_IO_SEG(0x0001070000000C88ull) + ((offset) & 15) * 16)
+#define CVMX_CIU_INTX_EN4_1_W1C(offset) (CVMX_ADD_IO_SEG(0x0001070000002C88ull) + ((offset) & 15) * 16)
+#define CVMX_CIU_INTX_EN4_1_W1S(offset) (CVMX_ADD_IO_SEG(0x0001070000006C88ull) + ((offset) & 15) * 16)
+#define CVMX_CIU_INTX_SUM0(offset) (CVMX_ADD_IO_SEG(0x0001070000000000ull) + ((offset) & 63) * 8)
+#define CVMX_CIU_INTX_SUM4(offset) (CVMX_ADD_IO_SEG(0x0001070000000C00ull) + ((offset) & 15) * 8)
+#define CVMX_CIU_INT_DBG_SEL (CVMX_ADD_IO_SEG(0x00010700000007D0ull))
+#define CVMX_CIU_INT_SUM1 (CVMX_ADD_IO_SEG(0x0001070000000108ull))
+#define CVMX_CIU_MBOX_CLRX(offset) (CVMX_ADD_IO_SEG(0x0001070000000680ull) + ((offset) & 15) * 8)
+#define CVMX_CIU_MBOX_SETX(offset) (CVMX_ADD_IO_SEG(0x0001070000000600ull) + ((offset) & 15) * 8)
+#define CVMX_CIU_NMI (CVMX_ADD_IO_SEG(0x0001070000000718ull))
+#define CVMX_CIU_PCI_INTA (CVMX_ADD_IO_SEG(0x0001070000000750ull))
+#define CVMX_CIU_PP_DBG (CVMX_ADD_IO_SEG(0x0001070000000708ull))
+#define CVMX_CIU_PP_POKEX(offset) (CVMX_ADD_IO_SEG(0x0001070000000580ull) + ((offset) & 15) * 8)
+#define CVMX_CIU_PP_RST (CVMX_ADD_IO_SEG(0x0001070000000700ull))
+#define CVMX_CIU_QLM0 (CVMX_ADD_IO_SEG(0x0001070000000780ull))
+#define CVMX_CIU_QLM1 (CVMX_ADD_IO_SEG(0x0001070000000788ull))
+#define CVMX_CIU_QLM2 (CVMX_ADD_IO_SEG(0x0001070000000790ull))
+#define CVMX_CIU_QLM_DCOK (CVMX_ADD_IO_SEG(0x0001070000000760ull))
+#define CVMX_CIU_QLM_JTGC (CVMX_ADD_IO_SEG(0x0001070000000768ull))
+#define CVMX_CIU_QLM_JTGD (CVMX_ADD_IO_SEG(0x0001070000000770ull))
+#define CVMX_CIU_SOFT_BIST (CVMX_ADD_IO_SEG(0x0001070000000738ull))
+#define CVMX_CIU_SOFT_PRST (CVMX_ADD_IO_SEG(0x0001070000000748ull))
+#define CVMX_CIU_SOFT_PRST1 (CVMX_ADD_IO_SEG(0x0001070000000758ull))
+#define CVMX_CIU_SOFT_RST (CVMX_ADD_IO_SEG(0x0001070000000740ull))
+#define CVMX_CIU_TIMX(offset) (CVMX_ADD_IO_SEG(0x0001070000000480ull) + ((offset) & 3) * 8)
+#define CVMX_CIU_WDOGX(offset) (CVMX_ADD_IO_SEG(0x0001070000000500ull) + ((offset) & 15) * 8)
union cvmx_ciu_bist {
uint64_t u64;
struct cvmx_ciu_bist_s {
+ uint64_t reserved_5_63:59;
+ uint64_t bist:5;
+ } s;
+ struct cvmx_ciu_bist_cn30xx {
uint64_t reserved_4_63:60;
uint64_t bist:4;
- } s;
- struct cvmx_ciu_bist_s cn30xx;
- struct cvmx_ciu_bist_s cn31xx;
- struct cvmx_ciu_bist_s cn38xx;
- struct cvmx_ciu_bist_s cn38xxp2;
+ } cn30xx;
+ struct cvmx_ciu_bist_cn30xx cn31xx;
+ struct cvmx_ciu_bist_cn30xx cn38xx;
+ struct cvmx_ciu_bist_cn30xx cn38xxp2;
struct cvmx_ciu_bist_cn50xx {
uint64_t reserved_2_63:62;
uint64_t bist:2;
@@ -118,10 +92,57 @@ union cvmx_ciu_bist {
uint64_t bist:3;
} cn52xx;
struct cvmx_ciu_bist_cn52xx cn52xxp1;
- struct cvmx_ciu_bist_s cn56xx;
- struct cvmx_ciu_bist_s cn56xxp1;
- struct cvmx_ciu_bist_s cn58xx;
- struct cvmx_ciu_bist_s cn58xxp1;
+ struct cvmx_ciu_bist_cn30xx cn56xx;
+ struct cvmx_ciu_bist_cn30xx cn56xxp1;
+ struct cvmx_ciu_bist_cn30xx cn58xx;
+ struct cvmx_ciu_bist_cn30xx cn58xxp1;
+ struct cvmx_ciu_bist_s cn63xx;
+ struct cvmx_ciu_bist_s cn63xxp1;
+};
+
+union cvmx_ciu_block_int {
+ uint64_t u64;
+ struct cvmx_ciu_block_int_s {
+ uint64_t reserved_43_63:21;
+ uint64_t ptp:1;
+ uint64_t dpi:1;
+ uint64_t dfm:1;
+ uint64_t reserved_34_39:6;
+ uint64_t srio1:1;
+ uint64_t srio0:1;
+ uint64_t reserved_31_31:1;
+ uint64_t iob:1;
+ uint64_t reserved_29_29:1;
+ uint64_t agl:1;
+ uint64_t reserved_27_27:1;
+ uint64_t pem1:1;
+ uint64_t pem0:1;
+ uint64_t reserved_23_24:2;
+ uint64_t asxpcs0:1;
+ uint64_t reserved_21_21:1;
+ uint64_t pip:1;
+ uint64_t reserved_18_19:2;
+ uint64_t lmc0:1;
+ uint64_t l2c:1;
+ uint64_t reserved_15_15:1;
+ uint64_t rad:1;
+ uint64_t usb:1;
+ uint64_t pow:1;
+ uint64_t tim:1;
+ uint64_t pko:1;
+ uint64_t ipd:1;
+ uint64_t reserved_8_8:1;
+ uint64_t zip:1;
+ uint64_t dfa:1;
+ uint64_t fpa:1;
+ uint64_t key:1;
+ uint64_t sli:1;
+ uint64_t reserved_2_2:1;
+ uint64_t gmx0:1;
+ uint64_t mio:1;
+ } s;
+ struct cvmx_ciu_block_int_s cn63xx;
+ struct cvmx_ciu_block_int_s cn63xxp1;
};
union cvmx_ciu_dint {
@@ -153,6 +174,11 @@ union cvmx_ciu_dint {
struct cvmx_ciu_dint_cn56xx cn56xxp1;
struct cvmx_ciu_dint_s cn58xx;
struct cvmx_ciu_dint_s cn58xxp1;
+ struct cvmx_ciu_dint_cn63xx {
+ uint64_t reserved_6_63:58;
+ uint64_t dint:6;
+ } cn63xx;
+ struct cvmx_ciu_dint_cn63xx cn63xxp1;
};
union cvmx_ciu_fuse {
@@ -184,6 +210,11 @@ union cvmx_ciu_fuse {
struct cvmx_ciu_fuse_cn56xx cn56xxp1;
struct cvmx_ciu_fuse_s cn58xx;
struct cvmx_ciu_fuse_s cn58xxp1;
+ struct cvmx_ciu_fuse_cn63xx {
+ uint64_t reserved_6_63:58;
+ uint64_t fuse:6;
+ } cn63xx;
+ struct cvmx_ciu_fuse_cn63xx cn63xxp1;
};
union cvmx_ciu_gstop {
@@ -203,6 +234,8 @@ union cvmx_ciu_gstop {
struct cvmx_ciu_gstop_s cn56xxp1;
struct cvmx_ciu_gstop_s cn58xx;
struct cvmx_ciu_gstop_s cn58xxp1;
+ struct cvmx_ciu_gstop_s cn63xx;
+ struct cvmx_ciu_gstop_s cn63xxp1;
};
union cvmx_ciu_intx_en0 {
@@ -343,6 +376,8 @@ union cvmx_ciu_intx_en0 {
struct cvmx_ciu_intx_en0_cn56xx cn56xxp1;
struct cvmx_ciu_intx_en0_cn38xx cn58xx;
struct cvmx_ciu_intx_en0_cn38xx cn58xxp1;
+ struct cvmx_ciu_intx_en0_cn52xx cn63xx;
+ struct cvmx_ciu_intx_en0_cn52xx cn63xxp1;
};
union cvmx_ciu_intx_en0_w1c {
@@ -412,6 +447,8 @@ union cvmx_ciu_intx_en0_w1c {
uint64_t gpio:16;
uint64_t workq:16;
} cn58xx;
+ struct cvmx_ciu_intx_en0_w1c_cn52xx cn63xx;
+ struct cvmx_ciu_intx_en0_w1c_cn52xx cn63xxp1;
};
union cvmx_ciu_intx_en0_w1s {
@@ -481,12 +518,42 @@ union cvmx_ciu_intx_en0_w1s {
uint64_t gpio:16;
uint64_t workq:16;
} cn58xx;
+ struct cvmx_ciu_intx_en0_w1s_cn52xx cn63xx;
+ struct cvmx_ciu_intx_en0_w1s_cn52xx cn63xxp1;
};
union cvmx_ciu_intx_en1 {
uint64_t u64;
struct cvmx_ciu_intx_en1_s {
- uint64_t reserved_20_63:44;
+ uint64_t rst:1;
+ uint64_t reserved_57_62:6;
+ uint64_t dfm:1;
+ uint64_t reserved_53_55:3;
+ uint64_t lmc0:1;
+ uint64_t srio1:1;
+ uint64_t srio0:1;
+ uint64_t pem1:1;
+ uint64_t pem0:1;
+ uint64_t ptp:1;
+ uint64_t agl:1;
+ uint64_t reserved_37_45:9;
+ uint64_t agx0:1;
+ uint64_t dpi:1;
+ uint64_t sli:1;
+ uint64_t usb:1;
+ uint64_t dfa:1;
+ uint64_t key:1;
+ uint64_t rad:1;
+ uint64_t tim:1;
+ uint64_t zip:1;
+ uint64_t pko:1;
+ uint64_t pip:1;
+ uint64_t ipd:1;
+ uint64_t l2c:1;
+ uint64_t pow:1;
+ uint64_t fpa:1;
+ uint64_t iob:1;
+ uint64_t mio:1;
uint64_t nand:1;
uint64_t mii1:1;
uint64_t usb1:1;
@@ -531,12 +598,76 @@ union cvmx_ciu_intx_en1 {
struct cvmx_ciu_intx_en1_cn56xx cn56xxp1;
struct cvmx_ciu_intx_en1_cn38xx cn58xx;
struct cvmx_ciu_intx_en1_cn38xx cn58xxp1;
+ struct cvmx_ciu_intx_en1_cn63xx {
+ uint64_t rst:1;
+ uint64_t reserved_57_62:6;
+ uint64_t dfm:1;
+ uint64_t reserved_53_55:3;
+ uint64_t lmc0:1;
+ uint64_t srio1:1;
+ uint64_t srio0:1;
+ uint64_t pem1:1;
+ uint64_t pem0:1;
+ uint64_t ptp:1;
+ uint64_t agl:1;
+ uint64_t reserved_37_45:9;
+ uint64_t agx0:1;
+ uint64_t dpi:1;
+ uint64_t sli:1;
+ uint64_t usb:1;
+ uint64_t dfa:1;
+ uint64_t key:1;
+ uint64_t rad:1;
+ uint64_t tim:1;
+ uint64_t zip:1;
+ uint64_t pko:1;
+ uint64_t pip:1;
+ uint64_t ipd:1;
+ uint64_t l2c:1;
+ uint64_t pow:1;
+ uint64_t fpa:1;
+ uint64_t iob:1;
+ uint64_t mio:1;
+ uint64_t nand:1;
+ uint64_t mii1:1;
+ uint64_t reserved_6_17:12;
+ uint64_t wdog:6;
+ } cn63xx;
+ struct cvmx_ciu_intx_en1_cn63xx cn63xxp1;
};
union cvmx_ciu_intx_en1_w1c {
uint64_t u64;
struct cvmx_ciu_intx_en1_w1c_s {
- uint64_t reserved_20_63:44;
+ uint64_t rst:1;
+ uint64_t reserved_57_62:6;
+ uint64_t dfm:1;
+ uint64_t reserved_53_55:3;
+ uint64_t lmc0:1;
+ uint64_t srio1:1;
+ uint64_t srio0:1;
+ uint64_t pem1:1;
+ uint64_t pem0:1;
+ uint64_t ptp:1;
+ uint64_t agl:1;
+ uint64_t reserved_37_45:9;
+ uint64_t agx0:1;
+ uint64_t dpi:1;
+ uint64_t sli:1;
+ uint64_t usb:1;
+ uint64_t dfa:1;
+ uint64_t key:1;
+ uint64_t rad:1;
+ uint64_t tim:1;
+ uint64_t zip:1;
+ uint64_t pko:1;
+ uint64_t pip:1;
+ uint64_t ipd:1;
+ uint64_t l2c:1;
+ uint64_t pow:1;
+ uint64_t fpa:1;
+ uint64_t iob:1;
+ uint64_t mio:1;
uint64_t nand:1;
uint64_t mii1:1;
uint64_t usb1:1;
@@ -560,12 +691,76 @@ union cvmx_ciu_intx_en1_w1c {
uint64_t reserved_16_63:48;
uint64_t wdog:16;
} cn58xx;
+ struct cvmx_ciu_intx_en1_w1c_cn63xx {
+ uint64_t rst:1;
+ uint64_t reserved_57_62:6;
+ uint64_t dfm:1;
+ uint64_t reserved_53_55:3;
+ uint64_t lmc0:1;
+ uint64_t srio1:1;
+ uint64_t srio0:1;
+ uint64_t pem1:1;
+ uint64_t pem0:1;
+ uint64_t ptp:1;
+ uint64_t agl:1;
+ uint64_t reserved_37_45:9;
+ uint64_t agx0:1;
+ uint64_t dpi:1;
+ uint64_t sli:1;
+ uint64_t usb:1;
+ uint64_t dfa:1;
+ uint64_t key:1;
+ uint64_t rad:1;
+ uint64_t tim:1;
+ uint64_t zip:1;
+ uint64_t pko:1;
+ uint64_t pip:1;
+ uint64_t ipd:1;
+ uint64_t l2c:1;
+ uint64_t pow:1;
+ uint64_t fpa:1;
+ uint64_t iob:1;
+ uint64_t mio:1;
+ uint64_t nand:1;
+ uint64_t mii1:1;
+ uint64_t reserved_6_17:12;
+ uint64_t wdog:6;
+ } cn63xx;
+ struct cvmx_ciu_intx_en1_w1c_cn63xx cn63xxp1;
};
union cvmx_ciu_intx_en1_w1s {
uint64_t u64;
struct cvmx_ciu_intx_en1_w1s_s {
- uint64_t reserved_20_63:44;
+ uint64_t rst:1;
+ uint64_t reserved_57_62:6;
+ uint64_t dfm:1;
+ uint64_t reserved_53_55:3;
+ uint64_t lmc0:1;
+ uint64_t srio1:1;
+ uint64_t srio0:1;
+ uint64_t pem1:1;
+ uint64_t pem0:1;
+ uint64_t ptp:1;
+ uint64_t agl:1;
+ uint64_t reserved_37_45:9;
+ uint64_t agx0:1;
+ uint64_t dpi:1;
+ uint64_t sli:1;
+ uint64_t usb:1;
+ uint64_t dfa:1;
+ uint64_t key:1;
+ uint64_t rad:1;
+ uint64_t tim:1;
+ uint64_t zip:1;
+ uint64_t pko:1;
+ uint64_t pip:1;
+ uint64_t ipd:1;
+ uint64_t l2c:1;
+ uint64_t pow:1;
+ uint64_t fpa:1;
+ uint64_t iob:1;
+ uint64_t mio:1;
uint64_t nand:1;
uint64_t mii1:1;
uint64_t usb1:1;
@@ -589,6 +784,42 @@ union cvmx_ciu_intx_en1_w1s {
uint64_t reserved_16_63:48;
uint64_t wdog:16;
} cn58xx;
+ struct cvmx_ciu_intx_en1_w1s_cn63xx {
+ uint64_t rst:1;
+ uint64_t reserved_57_62:6;
+ uint64_t dfm:1;
+ uint64_t reserved_53_55:3;
+ uint64_t lmc0:1;
+ uint64_t srio1:1;
+ uint64_t srio0:1;
+ uint64_t pem1:1;
+ uint64_t pem0:1;
+ uint64_t ptp:1;
+ uint64_t agl:1;
+ uint64_t reserved_37_45:9;
+ uint64_t agx0:1;
+ uint64_t dpi:1;
+ uint64_t sli:1;
+ uint64_t usb:1;
+ uint64_t dfa:1;
+ uint64_t key:1;
+ uint64_t rad:1;
+ uint64_t tim:1;
+ uint64_t zip:1;
+ uint64_t pko:1;
+ uint64_t pip:1;
+ uint64_t ipd:1;
+ uint64_t l2c:1;
+ uint64_t pow:1;
+ uint64_t fpa:1;
+ uint64_t iob:1;
+ uint64_t mio:1;
+ uint64_t nand:1;
+ uint64_t mii1:1;
+ uint64_t reserved_6_17:12;
+ uint64_t wdog:6;
+ } cn63xx;
+ struct cvmx_ciu_intx_en1_w1s_cn63xx cn63xxp1;
};
union cvmx_ciu_intx_en4_0 {
@@ -705,6 +936,8 @@ union cvmx_ciu_intx_en4_0 {
uint64_t workq:16;
} cn58xx;
struct cvmx_ciu_intx_en4_0_cn58xx cn58xxp1;
+ struct cvmx_ciu_intx_en4_0_cn52xx cn63xx;
+ struct cvmx_ciu_intx_en4_0_cn52xx cn63xxp1;
};
union cvmx_ciu_intx_en4_0_w1c {
@@ -774,6 +1007,8 @@ union cvmx_ciu_intx_en4_0_w1c {
uint64_t gpio:16;
uint64_t workq:16;
} cn58xx;
+ struct cvmx_ciu_intx_en4_0_w1c_cn52xx cn63xx;
+ struct cvmx_ciu_intx_en4_0_w1c_cn52xx cn63xxp1;
};
union cvmx_ciu_intx_en4_0_w1s {
@@ -843,12 +1078,42 @@ union cvmx_ciu_intx_en4_0_w1s {
uint64_t gpio:16;
uint64_t workq:16;
} cn58xx;
+ struct cvmx_ciu_intx_en4_0_w1s_cn52xx cn63xx;
+ struct cvmx_ciu_intx_en4_0_w1s_cn52xx cn63xxp1;
};
union cvmx_ciu_intx_en4_1 {
uint64_t u64;
struct cvmx_ciu_intx_en4_1_s {
- uint64_t reserved_20_63:44;
+ uint64_t rst:1;
+ uint64_t reserved_57_62:6;
+ uint64_t dfm:1;
+ uint64_t reserved_53_55:3;
+ uint64_t lmc0:1;
+ uint64_t srio1:1;
+ uint64_t srio0:1;
+ uint64_t pem1:1;
+ uint64_t pem0:1;
+ uint64_t ptp:1;
+ uint64_t agl:1;
+ uint64_t reserved_37_45:9;
+ uint64_t agx0:1;
+ uint64_t dpi:1;
+ uint64_t sli:1;
+ uint64_t usb:1;
+ uint64_t dfa:1;
+ uint64_t key:1;
+ uint64_t rad:1;
+ uint64_t tim:1;
+ uint64_t zip:1;
+ uint64_t pko:1;
+ uint64_t pip:1;
+ uint64_t ipd:1;
+ uint64_t l2c:1;
+ uint64_t pow:1;
+ uint64_t fpa:1;
+ uint64_t iob:1;
+ uint64_t mio:1;
uint64_t nand:1;
uint64_t mii1:1;
uint64_t usb1:1;
@@ -886,12 +1151,76 @@ union cvmx_ciu_intx_en4_1 {
uint64_t wdog:16;
} cn58xx;
struct cvmx_ciu_intx_en4_1_cn58xx cn58xxp1;
+ struct cvmx_ciu_intx_en4_1_cn63xx {
+ uint64_t rst:1;
+ uint64_t reserved_57_62:6;
+ uint64_t dfm:1;
+ uint64_t reserved_53_55:3;
+ uint64_t lmc0:1;
+ uint64_t srio1:1;
+ uint64_t srio0:1;
+ uint64_t pem1:1;
+ uint64_t pem0:1;
+ uint64_t ptp:1;
+ uint64_t agl:1;
+ uint64_t reserved_37_45:9;
+ uint64_t agx0:1;
+ uint64_t dpi:1;
+ uint64_t sli:1;
+ uint64_t usb:1;
+ uint64_t dfa:1;
+ uint64_t key:1;
+ uint64_t rad:1;
+ uint64_t tim:1;
+ uint64_t zip:1;
+ uint64_t pko:1;
+ uint64_t pip:1;
+ uint64_t ipd:1;
+ uint64_t l2c:1;
+ uint64_t pow:1;
+ uint64_t fpa:1;
+ uint64_t iob:1;
+ uint64_t mio:1;
+ uint64_t nand:1;
+ uint64_t mii1:1;
+ uint64_t reserved_6_17:12;
+ uint64_t wdog:6;
+ } cn63xx;
+ struct cvmx_ciu_intx_en4_1_cn63xx cn63xxp1;
};
union cvmx_ciu_intx_en4_1_w1c {
uint64_t u64;
struct cvmx_ciu_intx_en4_1_w1c_s {
- uint64_t reserved_20_63:44;
+ uint64_t rst:1;
+ uint64_t reserved_57_62:6;
+ uint64_t dfm:1;
+ uint64_t reserved_53_55:3;
+ uint64_t lmc0:1;
+ uint64_t srio1:1;
+ uint64_t srio0:1;
+ uint64_t pem1:1;
+ uint64_t pem0:1;
+ uint64_t ptp:1;
+ uint64_t agl:1;
+ uint64_t reserved_37_45:9;
+ uint64_t agx0:1;
+ uint64_t dpi:1;
+ uint64_t sli:1;
+ uint64_t usb:1;
+ uint64_t dfa:1;
+ uint64_t key:1;
+ uint64_t rad:1;
+ uint64_t tim:1;
+ uint64_t zip:1;
+ uint64_t pko:1;
+ uint64_t pip:1;
+ uint64_t ipd:1;
+ uint64_t l2c:1;
+ uint64_t pow:1;
+ uint64_t fpa:1;
+ uint64_t iob:1;
+ uint64_t mio:1;
uint64_t nand:1;
uint64_t mii1:1;
uint64_t usb1:1;
@@ -915,12 +1244,76 @@ union cvmx_ciu_intx_en4_1_w1c {
uint64_t reserved_16_63:48;
uint64_t wdog:16;
} cn58xx;
+ struct cvmx_ciu_intx_en4_1_w1c_cn63xx {
+ uint64_t rst:1;
+ uint64_t reserved_57_62:6;
+ uint64_t dfm:1;
+ uint64_t reserved_53_55:3;
+ uint64_t lmc0:1;
+ uint64_t srio1:1;
+ uint64_t srio0:1;
+ uint64_t pem1:1;
+ uint64_t pem0:1;
+ uint64_t ptp:1;
+ uint64_t agl:1;
+ uint64_t reserved_37_45:9;
+ uint64_t agx0:1;
+ uint64_t dpi:1;
+ uint64_t sli:1;
+ uint64_t usb:1;
+ uint64_t dfa:1;
+ uint64_t key:1;
+ uint64_t rad:1;
+ uint64_t tim:1;
+ uint64_t zip:1;
+ uint64_t pko:1;
+ uint64_t pip:1;
+ uint64_t ipd:1;
+ uint64_t l2c:1;
+ uint64_t pow:1;
+ uint64_t fpa:1;
+ uint64_t iob:1;
+ uint64_t mio:1;
+ uint64_t nand:1;
+ uint64_t mii1:1;
+ uint64_t reserved_6_17:12;
+ uint64_t wdog:6;
+ } cn63xx;
+ struct cvmx_ciu_intx_en4_1_w1c_cn63xx cn63xxp1;
};
union cvmx_ciu_intx_en4_1_w1s {
uint64_t u64;
struct cvmx_ciu_intx_en4_1_w1s_s {
- uint64_t reserved_20_63:44;
+ uint64_t rst:1;
+ uint64_t reserved_57_62:6;
+ uint64_t dfm:1;
+ uint64_t reserved_53_55:3;
+ uint64_t lmc0:1;
+ uint64_t srio1:1;
+ uint64_t srio0:1;
+ uint64_t pem1:1;
+ uint64_t pem0:1;
+ uint64_t ptp:1;
+ uint64_t agl:1;
+ uint64_t reserved_37_45:9;
+ uint64_t agx0:1;
+ uint64_t dpi:1;
+ uint64_t sli:1;
+ uint64_t usb:1;
+ uint64_t dfa:1;
+ uint64_t key:1;
+ uint64_t rad:1;
+ uint64_t tim:1;
+ uint64_t zip:1;
+ uint64_t pko:1;
+ uint64_t pip:1;
+ uint64_t ipd:1;
+ uint64_t l2c:1;
+ uint64_t pow:1;
+ uint64_t fpa:1;
+ uint64_t iob:1;
+ uint64_t mio:1;
uint64_t nand:1;
uint64_t mii1:1;
uint64_t usb1:1;
@@ -944,6 +1337,42 @@ union cvmx_ciu_intx_en4_1_w1s {
uint64_t reserved_16_63:48;
uint64_t wdog:16;
} cn58xx;
+ struct cvmx_ciu_intx_en4_1_w1s_cn63xx {
+ uint64_t rst:1;
+ uint64_t reserved_57_62:6;
+ uint64_t dfm:1;
+ uint64_t reserved_53_55:3;
+ uint64_t lmc0:1;
+ uint64_t srio1:1;
+ uint64_t srio0:1;
+ uint64_t pem1:1;
+ uint64_t pem0:1;
+ uint64_t ptp:1;
+ uint64_t agl:1;
+ uint64_t reserved_37_45:9;
+ uint64_t agx0:1;
+ uint64_t dpi:1;
+ uint64_t sli:1;
+ uint64_t usb:1;
+ uint64_t dfa:1;
+ uint64_t key:1;
+ uint64_t rad:1;
+ uint64_t tim:1;
+ uint64_t zip:1;
+ uint64_t pko:1;
+ uint64_t pip:1;
+ uint64_t ipd:1;
+ uint64_t l2c:1;
+ uint64_t pow:1;
+ uint64_t fpa:1;
+ uint64_t iob:1;
+ uint64_t mio:1;
+ uint64_t nand:1;
+ uint64_t mii1:1;
+ uint64_t reserved_6_17:12;
+ uint64_t wdog:6;
+ } cn63xx;
+ struct cvmx_ciu_intx_en4_1_w1s_cn63xx cn63xxp1;
};
union cvmx_ciu_intx_sum0 {
@@ -1084,6 +1513,8 @@ union cvmx_ciu_intx_sum0 {
struct cvmx_ciu_intx_sum0_cn56xx cn56xxp1;
struct cvmx_ciu_intx_sum0_cn38xx cn58xx;
struct cvmx_ciu_intx_sum0_cn38xx cn58xxp1;
+ struct cvmx_ciu_intx_sum0_cn52xx cn63xx;
+ struct cvmx_ciu_intx_sum0_cn52xx cn63xxp1;
};
union cvmx_ciu_intx_sum4 {
@@ -1200,12 +1631,85 @@ union cvmx_ciu_intx_sum4 {
uint64_t workq:16;
} cn58xx;
struct cvmx_ciu_intx_sum4_cn58xx cn58xxp1;
+ struct cvmx_ciu_intx_sum4_cn52xx cn63xx;
+ struct cvmx_ciu_intx_sum4_cn52xx cn63xxp1;
+};
+
+union cvmx_ciu_int33_sum0 {
+ uint64_t u64;
+ struct cvmx_ciu_int33_sum0_s {
+ uint64_t bootdma:1;
+ uint64_t mii:1;
+ uint64_t ipdppthr:1;
+ uint64_t powiq:1;
+ uint64_t twsi2:1;
+ uint64_t reserved_57_58:2;
+ uint64_t usb:1;
+ uint64_t timer:4;
+ uint64_t reserved_51_51:1;
+ uint64_t ipd_drp:1;
+ uint64_t reserved_49_49:1;
+ uint64_t gmx_drp:1;
+ uint64_t trace:1;
+ uint64_t rml:1;
+ uint64_t twsi:1;
+ uint64_t wdog_sum:1;
+ uint64_t pci_msi:4;
+ uint64_t pci_int:4;
+ uint64_t uart:2;
+ uint64_t mbox:2;
+ uint64_t gpio:16;
+ uint64_t workq:16;
+ } s;
+ struct cvmx_ciu_int33_sum0_s cn63xx;
+ struct cvmx_ciu_int33_sum0_s cn63xxp1;
+};
+
+union cvmx_ciu_int_dbg_sel {
+ uint64_t u64;
+ struct cvmx_ciu_int_dbg_sel_s {
+ uint64_t reserved_19_63:45;
+ uint64_t sel:3;
+ uint64_t reserved_10_15:6;
+ uint64_t irq:2;
+ uint64_t reserved_3_7:5;
+ uint64_t pp:3;
+ } s;
+ struct cvmx_ciu_int_dbg_sel_s cn63xx;
};
union cvmx_ciu_int_sum1 {
uint64_t u64;
struct cvmx_ciu_int_sum1_s {
- uint64_t reserved_20_63:44;
+ uint64_t rst:1;
+ uint64_t reserved_57_62:6;
+ uint64_t dfm:1;
+ uint64_t reserved_53_55:3;
+ uint64_t lmc0:1;
+ uint64_t srio1:1;
+ uint64_t srio0:1;
+ uint64_t pem1:1;
+ uint64_t pem0:1;
+ uint64_t ptp:1;
+ uint64_t agl:1;
+ uint64_t reserved_37_45:9;
+ uint64_t agx0:1;
+ uint64_t dpi:1;
+ uint64_t sli:1;
+ uint64_t usb:1;
+ uint64_t dfa:1;
+ uint64_t key:1;
+ uint64_t rad:1;
+ uint64_t tim:1;
+ uint64_t zip:1;
+ uint64_t pko:1;
+ uint64_t pip:1;
+ uint64_t ipd:1;
+ uint64_t l2c:1;
+ uint64_t pow:1;
+ uint64_t fpa:1;
+ uint64_t iob:1;
+ uint64_t mio:1;
uint64_t nand:1;
uint64_t mii1:1;
uint64_t usb1:1;
@@ -1250,6 +1754,42 @@ union cvmx_ciu_int_sum1 {
struct cvmx_ciu_int_sum1_cn56xx cn56xxp1;
struct cvmx_ciu_int_sum1_cn38xx cn58xx;
struct cvmx_ciu_int_sum1_cn38xx cn58xxp1;
+ struct cvmx_ciu_int_sum1_cn63xx {
+ uint64_t rst:1;
+ uint64_t reserved_57_62:6;
+ uint64_t dfm:1;
+ uint64_t reserved_53_55:3;
+ uint64_t lmc0:1;
+ uint64_t srio1:1;
+ uint64_t srio0:1;
+ uint64_t pem1:1;
+ uint64_t pem0:1;
+ uint64_t ptp:1;
+ uint64_t agl:1;
+ uint64_t reserved_37_45:9;
+ uint64_t agx0:1;
+ uint64_t dpi:1;
+ uint64_t sli:1;
+ uint64_t usb:1;
+ uint64_t dfa:1;
+ uint64_t key:1;
+ uint64_t rad:1;
+ uint64_t tim:1;
+ uint64_t zip:1;
+ uint64_t pko:1;
+ uint64_t pip:1;
+ uint64_t ipd:1;
+ uint64_t l2c:1;
+ uint64_t pow:1;
+ uint64_t fpa:1;
+ uint64_t iob:1;
+ uint64_t mio:1;
+ uint64_t nand:1;
+ uint64_t mii1:1;
+ uint64_t reserved_6_17:12;
+ uint64_t wdog:6;
+ } cn63xx;
+ struct cvmx_ciu_int_sum1_cn63xx cn63xxp1;
};
union cvmx_ciu_mbox_clrx {
@@ -1269,6 +1809,8 @@ union cvmx_ciu_mbox_clrx {
struct cvmx_ciu_mbox_clrx_s cn56xxp1;
struct cvmx_ciu_mbox_clrx_s cn58xx;
struct cvmx_ciu_mbox_clrx_s cn58xxp1;
+ struct cvmx_ciu_mbox_clrx_s cn63xx;
+ struct cvmx_ciu_mbox_clrx_s cn63xxp1;
};
union cvmx_ciu_mbox_setx {
@@ -1288,6 +1830,8 @@ union cvmx_ciu_mbox_setx {
struct cvmx_ciu_mbox_setx_s cn56xxp1;
struct cvmx_ciu_mbox_setx_s cn58xx;
struct cvmx_ciu_mbox_setx_s cn58xxp1;
+ struct cvmx_ciu_mbox_setx_s cn63xx;
+ struct cvmx_ciu_mbox_setx_s cn63xxp1;
};
union cvmx_ciu_nmi {
@@ -1319,6 +1863,11 @@ union cvmx_ciu_nmi {
struct cvmx_ciu_nmi_cn56xx cn56xxp1;
struct cvmx_ciu_nmi_s cn58xx;
struct cvmx_ciu_nmi_s cn58xxp1;
+ struct cvmx_ciu_nmi_cn63xx {
+ uint64_t reserved_6_63:58;
+ uint64_t nmi:6;
+ } cn63xx;
+ struct cvmx_ciu_nmi_cn63xx cn63xxp1;
};
union cvmx_ciu_pci_inta {
@@ -1338,6 +1887,8 @@ union cvmx_ciu_pci_inta {
struct cvmx_ciu_pci_inta_s cn56xxp1;
struct cvmx_ciu_pci_inta_s cn58xx;
struct cvmx_ciu_pci_inta_s cn58xxp1;
+ struct cvmx_ciu_pci_inta_s cn63xx;
+ struct cvmx_ciu_pci_inta_s cn63xxp1;
};
union cvmx_ciu_pp_dbg {
@@ -1369,12 +1920,17 @@ union cvmx_ciu_pp_dbg {
struct cvmx_ciu_pp_dbg_cn56xx cn56xxp1;
struct cvmx_ciu_pp_dbg_s cn58xx;
struct cvmx_ciu_pp_dbg_s cn58xxp1;
+ struct cvmx_ciu_pp_dbg_cn63xx {
+ uint64_t reserved_6_63:58;
+ uint64_t ppdbg:6;
+ } cn63xx;
+ struct cvmx_ciu_pp_dbg_cn63xx cn63xxp1;
};
union cvmx_ciu_pp_pokex {
uint64_t u64;
struct cvmx_ciu_pp_pokex_s {
- uint64_t reserved_0_63:64;
+ uint64_t poke:64;
} s;
struct cvmx_ciu_pp_pokex_s cn30xx;
struct cvmx_ciu_pp_pokex_s cn31xx;
@@ -1387,6 +1943,8 @@ union cvmx_ciu_pp_pokex {
struct cvmx_ciu_pp_pokex_s cn56xxp1;
struct cvmx_ciu_pp_pokex_s cn58xx;
struct cvmx_ciu_pp_pokex_s cn58xxp1;
+ struct cvmx_ciu_pp_pokex_s cn63xx;
+ struct cvmx_ciu_pp_pokex_s cn63xxp1;
};
union cvmx_ciu_pp_rst {
@@ -1422,6 +1980,97 @@ union cvmx_ciu_pp_rst {
struct cvmx_ciu_pp_rst_cn56xx cn56xxp1;
struct cvmx_ciu_pp_rst_s cn58xx;
struct cvmx_ciu_pp_rst_s cn58xxp1;
+ struct cvmx_ciu_pp_rst_cn63xx {
+ uint64_t reserved_6_63:58;
+ uint64_t rst:5;
+ uint64_t rst0:1;
+ } cn63xx;
+ struct cvmx_ciu_pp_rst_cn63xx cn63xxp1;
+};
+
+union cvmx_ciu_qlm0 {
+ uint64_t u64;
+ struct cvmx_ciu_qlm0_s {
+ uint64_t g2bypass:1;
+ uint64_t reserved_53_62:10;
+ uint64_t g2deemph:5;
+ uint64_t reserved_45_47:3;
+ uint64_t g2margin:5;
+ uint64_t reserved_32_39:8;
+ uint64_t txbypass:1;
+ uint64_t reserved_21_30:10;
+ uint64_t txdeemph:5;
+ uint64_t reserved_13_15:3;
+ uint64_t txmargin:5;
+ uint64_t reserved_4_7:4;
+ uint64_t lane_en:4;
+ } s;
+ struct cvmx_ciu_qlm0_s cn63xx;
+ struct cvmx_ciu_qlm0_cn63xxp1 {
+ uint64_t reserved_32_63:32;
+ uint64_t txbypass:1;
+ uint64_t reserved_20_30:11;
+ uint64_t txdeemph:4;
+ uint64_t reserved_13_15:3;
+ uint64_t txmargin:5;
+ uint64_t reserved_4_7:4;
+ uint64_t lane_en:4;
+ } cn63xxp1;
+};
+
+union cvmx_ciu_qlm1 {
+ uint64_t u64;
+ struct cvmx_ciu_qlm1_s {
+ uint64_t g2bypass:1;
+ uint64_t reserved_53_62:10;
+ uint64_t g2deemph:5;
+ uint64_t reserved_45_47:3;
+ uint64_t g2margin:5;
+ uint64_t reserved_32_39:8;
+ uint64_t txbypass:1;
+ uint64_t reserved_21_30:10;
+ uint64_t txdeemph:5;
+ uint64_t reserved_13_15:3;
+ uint64_t txmargin:5;
+ uint64_t reserved_4_7:4;
+ uint64_t lane_en:4;
+ } s;
+ struct cvmx_ciu_qlm1_s cn63xx;
+ struct cvmx_ciu_qlm1_cn63xxp1 {
+ uint64_t reserved_32_63:32;
+ uint64_t txbypass:1;
+ uint64_t reserved_20_30:11;
+ uint64_t txdeemph:4;
+ uint64_t reserved_13_15:3;
+ uint64_t txmargin:5;
+ uint64_t reserved_4_7:4;
+ uint64_t lane_en:4;
+ } cn63xxp1;
+};
+
+union cvmx_ciu_qlm2 {
+ uint64_t u64;
+ struct cvmx_ciu_qlm2_s {
+ uint64_t reserved_32_63:32;
+ uint64_t txbypass:1;
+ uint64_t reserved_21_30:10;
+ uint64_t txdeemph:5;
+ uint64_t reserved_13_15:3;
+ uint64_t txmargin:5;
+ uint64_t reserved_4_7:4;
+ uint64_t lane_en:4;
+ } s;
+ struct cvmx_ciu_qlm2_s cn63xx;
+ struct cvmx_ciu_qlm2_cn63xxp1 {
+ uint64_t reserved_32_63:32;
+ uint64_t txbypass:1;
+ uint64_t reserved_20_30:11;
+ uint64_t txdeemph:4;
+ uint64_t reserved_13_15:3;
+ uint64_t txmargin:5;
+ uint64_t reserved_4_7:4;
+ uint64_t lane_en:4;
+ } cn63xxp1;
};
union cvmx_ciu_qlm_dcok {
@@ -1459,6 +2108,15 @@ union cvmx_ciu_qlm_jtgc {
struct cvmx_ciu_qlm_jtgc_cn52xx cn52xxp1;
struct cvmx_ciu_qlm_jtgc_s cn56xx;
struct cvmx_ciu_qlm_jtgc_s cn56xxp1;
+ struct cvmx_ciu_qlm_jtgc_cn63xx {
+ uint64_t reserved_11_63:53;
+ uint64_t clk_div:3;
+ uint64_t reserved_6_7:2;
+ uint64_t mux_sel:2;
+ uint64_t reserved_3_3:1;
+ uint64_t bypass:3;
+ } cn63xx;
+ struct cvmx_ciu_qlm_jtgc_cn63xx cn63xxp1;
};
union cvmx_ciu_qlm_jtgd {
@@ -1493,6 +2151,17 @@ union cvmx_ciu_qlm_jtgd {
uint64_t shft_cnt:5;
uint64_t shft_reg:32;
} cn56xxp1;
+ struct cvmx_ciu_qlm_jtgd_cn63xx {
+ uint64_t capture:1;
+ uint64_t shift:1;
+ uint64_t update:1;
+ uint64_t reserved_43_60:18;
+ uint64_t select:3;
+ uint64_t reserved_37_39:3;
+ uint64_t shft_cnt:5;
+ uint64_t shft_reg:32;
+ } cn63xx;
+ struct cvmx_ciu_qlm_jtgd_cn63xx cn63xxp1;
};
union cvmx_ciu_soft_bist {
@@ -1512,6 +2181,8 @@ union cvmx_ciu_soft_bist {
struct cvmx_ciu_soft_bist_s cn56xxp1;
struct cvmx_ciu_soft_bist_s cn58xx;
struct cvmx_ciu_soft_bist_s cn58xxp1;
+ struct cvmx_ciu_soft_bist_s cn63xx;
+ struct cvmx_ciu_soft_bist_s cn63xxp1;
};
union cvmx_ciu_soft_prst {
@@ -1536,6 +2207,8 @@ union cvmx_ciu_soft_prst {
struct cvmx_ciu_soft_prst_cn52xx cn56xxp1;
struct cvmx_ciu_soft_prst_s cn58xx;
struct cvmx_ciu_soft_prst_s cn58xxp1;
+ struct cvmx_ciu_soft_prst_cn52xx cn63xx;
+ struct cvmx_ciu_soft_prst_cn52xx cn63xxp1;
};
union cvmx_ciu_soft_prst1 {
@@ -1548,6 +2221,8 @@ union cvmx_ciu_soft_prst1 {
struct cvmx_ciu_soft_prst1_s cn52xxp1;
struct cvmx_ciu_soft_prst1_s cn56xx;
struct cvmx_ciu_soft_prst1_s cn56xxp1;
+ struct cvmx_ciu_soft_prst1_s cn63xx;
+ struct cvmx_ciu_soft_prst1_s cn63xxp1;
};
union cvmx_ciu_soft_rst {
@@ -1567,6 +2242,8 @@ union cvmx_ciu_soft_rst {
struct cvmx_ciu_soft_rst_s cn56xxp1;
struct cvmx_ciu_soft_rst_s cn58xx;
struct cvmx_ciu_soft_rst_s cn58xxp1;
+ struct cvmx_ciu_soft_rst_s cn63xx;
+ struct cvmx_ciu_soft_rst_s cn63xxp1;
};
union cvmx_ciu_timx {
@@ -1587,6 +2264,8 @@ union cvmx_ciu_timx {
struct cvmx_ciu_timx_s cn56xxp1;
struct cvmx_ciu_timx_s cn58xx;
struct cvmx_ciu_timx_s cn58xxp1;
+ struct cvmx_ciu_timx_s cn63xx;
+ struct cvmx_ciu_timx_s cn63xxp1;
};
union cvmx_ciu_wdogx {
@@ -1611,6 +2290,8 @@ union cvmx_ciu_wdogx {
struct cvmx_ciu_wdogx_s cn56xxp1;
struct cvmx_ciu_wdogx_s cn58xx;
struct cvmx_ciu_wdogx_s cn58xxp1;
+ struct cvmx_ciu_wdogx_s cn63xx;
+ struct cvmx_ciu_wdogx_s cn63xxp1;
};
#endif
diff --git a/arch/mips/include/asm/octeon/cvmx-gpio-defs.h b/arch/mips/include/asm/octeon/cvmx-gpio-defs.h
index 5fdd6ba48a05..395564e8d1f0 100644
--- a/arch/mips/include/asm/octeon/cvmx-gpio-defs.h
+++ b/arch/mips/include/asm/octeon/cvmx-gpio-defs.h
@@ -4,7 +4,7 @@
* Contact: support@caviumnetworks.com
* This file is part of the OCTEON SDK
*
- * Copyright (c) 2003-2008 Cavium Networks
+ * Copyright (c) 2003-2010 Cavium Networks
*
* This file is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License, Version 2, as
@@ -28,29 +28,22 @@
#ifndef __CVMX_GPIO_DEFS_H__
#define __CVMX_GPIO_DEFS_H__
-#define CVMX_GPIO_BIT_CFGX(offset) \
- CVMX_ADD_IO_SEG(0x0001070000000800ull + (((offset) & 15) * 8))
-#define CVMX_GPIO_BOOT_ENA \
- CVMX_ADD_IO_SEG(0x00010700000008A8ull)
-#define CVMX_GPIO_CLK_GENX(offset) \
- CVMX_ADD_IO_SEG(0x00010700000008C0ull + (((offset) & 3) * 8))
-#define CVMX_GPIO_DBG_ENA \
- CVMX_ADD_IO_SEG(0x00010700000008A0ull)
-#define CVMX_GPIO_INT_CLR \
- CVMX_ADD_IO_SEG(0x0001070000000898ull)
-#define CVMX_GPIO_RX_DAT \
- CVMX_ADD_IO_SEG(0x0001070000000880ull)
-#define CVMX_GPIO_TX_CLR \
- CVMX_ADD_IO_SEG(0x0001070000000890ull)
-#define CVMX_GPIO_TX_SET \
- CVMX_ADD_IO_SEG(0x0001070000000888ull)
-#define CVMX_GPIO_XBIT_CFGX(offset) \
- CVMX_ADD_IO_SEG(0x0001070000000900ull + (((offset) & 31) * 8) - 8 * 16)
+#define CVMX_GPIO_BIT_CFGX(offset) (CVMX_ADD_IO_SEG(0x0001070000000800ull) + ((offset) & 15) * 8)
+#define CVMX_GPIO_BOOT_ENA (CVMX_ADD_IO_SEG(0x00010700000008A8ull))
+#define CVMX_GPIO_CLK_GENX(offset) (CVMX_ADD_IO_SEG(0x00010700000008C0ull) + ((offset) & 3) * 8)
+#define CVMX_GPIO_CLK_QLMX(offset) (CVMX_ADD_IO_SEG(0x00010700000008E0ull) + ((offset) & 1) * 8)
+#define CVMX_GPIO_DBG_ENA (CVMX_ADD_IO_SEG(0x00010700000008A0ull))
+#define CVMX_GPIO_INT_CLR (CVMX_ADD_IO_SEG(0x0001070000000898ull))
+#define CVMX_GPIO_RX_DAT (CVMX_ADD_IO_SEG(0x0001070000000880ull))
+#define CVMX_GPIO_TX_CLR (CVMX_ADD_IO_SEG(0x0001070000000890ull))
+#define CVMX_GPIO_TX_SET (CVMX_ADD_IO_SEG(0x0001070000000888ull))
+#define CVMX_GPIO_XBIT_CFGX(offset) (CVMX_ADD_IO_SEG(0x0001070000000900ull) + ((offset) & 31) * 8 - 8*16)
union cvmx_gpio_bit_cfgx {
uint64_t u64;
struct cvmx_gpio_bit_cfgx_s {
- uint64_t reserved_15_63:49;
+ uint64_t reserved_17_63:47;
+ uint64_t synce_sel:2;
uint64_t clk_gen:1;
uint64_t clk_sel:2;
uint64_t fil_sel:4;
@@ -73,12 +66,24 @@ union cvmx_gpio_bit_cfgx {
struct cvmx_gpio_bit_cfgx_cn30xx cn38xx;
struct cvmx_gpio_bit_cfgx_cn30xx cn38xxp2;
struct cvmx_gpio_bit_cfgx_cn30xx cn50xx;
- struct cvmx_gpio_bit_cfgx_s cn52xx;
- struct cvmx_gpio_bit_cfgx_s cn52xxp1;
- struct cvmx_gpio_bit_cfgx_s cn56xx;
- struct cvmx_gpio_bit_cfgx_s cn56xxp1;
+ struct cvmx_gpio_bit_cfgx_cn52xx {
+ uint64_t reserved_15_63:49;
+ uint64_t clk_gen:1;
+ uint64_t clk_sel:2;
+ uint64_t fil_sel:4;
+ uint64_t fil_cnt:4;
+ uint64_t int_type:1;
+ uint64_t int_en:1;
+ uint64_t rx_xor:1;
+ uint64_t tx_oe:1;
+ } cn52xx;
+ struct cvmx_gpio_bit_cfgx_cn52xx cn52xxp1;
+ struct cvmx_gpio_bit_cfgx_cn52xx cn56xx;
+ struct cvmx_gpio_bit_cfgx_cn52xx cn56xxp1;
struct cvmx_gpio_bit_cfgx_cn30xx cn58xx;
struct cvmx_gpio_bit_cfgx_cn30xx cn58xxp1;
+ struct cvmx_gpio_bit_cfgx_s cn63xx;
+ struct cvmx_gpio_bit_cfgx_s cn63xxp1;
};
union cvmx_gpio_boot_ena {
@@ -103,6 +108,19 @@ union cvmx_gpio_clk_genx {
struct cvmx_gpio_clk_genx_s cn52xxp1;
struct cvmx_gpio_clk_genx_s cn56xx;
struct cvmx_gpio_clk_genx_s cn56xxp1;
+ struct cvmx_gpio_clk_genx_s cn63xx;
+ struct cvmx_gpio_clk_genx_s cn63xxp1;
+};
+
+union cvmx_gpio_clk_qlmx {
+ uint64_t u64;
+ struct cvmx_gpio_clk_qlmx_s {
+ uint64_t reserved_3_63:61;
+ uint64_t div:1;
+ uint64_t lane_sel:2;
+ } s;
+ struct cvmx_gpio_clk_qlmx_s cn63xx;
+ struct cvmx_gpio_clk_qlmx_s cn63xxp1;
};
union cvmx_gpio_dbg_ena {
@@ -133,6 +151,8 @@ union cvmx_gpio_int_clr {
struct cvmx_gpio_int_clr_s cn56xxp1;
struct cvmx_gpio_int_clr_s cn58xx;
struct cvmx_gpio_int_clr_s cn58xxp1;
+ struct cvmx_gpio_int_clr_s cn63xx;
+ struct cvmx_gpio_int_clr_s cn63xxp1;
};
union cvmx_gpio_rx_dat {
@@ -155,6 +175,8 @@ union cvmx_gpio_rx_dat {
struct cvmx_gpio_rx_dat_cn38xx cn56xxp1;
struct cvmx_gpio_rx_dat_cn38xx cn58xx;
struct cvmx_gpio_rx_dat_cn38xx cn58xxp1;
+ struct cvmx_gpio_rx_dat_cn38xx cn63xx;
+ struct cvmx_gpio_rx_dat_cn38xx cn63xxp1;
};
union cvmx_gpio_tx_clr {
@@ -177,6 +199,8 @@ union cvmx_gpio_tx_clr {
struct cvmx_gpio_tx_clr_cn38xx cn56xxp1;
struct cvmx_gpio_tx_clr_cn38xx cn58xx;
struct cvmx_gpio_tx_clr_cn38xx cn58xxp1;
+ struct cvmx_gpio_tx_clr_cn38xx cn63xx;
+ struct cvmx_gpio_tx_clr_cn38xx cn63xxp1;
};
union cvmx_gpio_tx_set {
@@ -199,6 +223,8 @@ union cvmx_gpio_tx_set {
struct cvmx_gpio_tx_set_cn38xx cn56xxp1;
struct cvmx_gpio_tx_set_cn38xx cn58xx;
struct cvmx_gpio_tx_set_cn38xx cn58xxp1;
+ struct cvmx_gpio_tx_set_cn38xx cn63xx;
+ struct cvmx_gpio_tx_set_cn38xx cn63xxp1;
};
union cvmx_gpio_xbit_cfgx {
diff --git a/arch/mips/include/asm/octeon/cvmx-iob-defs.h b/arch/mips/include/asm/octeon/cvmx-iob-defs.h
index 0ee36baec500..d7d856c2483d 100644
--- a/arch/mips/include/asm/octeon/cvmx-iob-defs.h
+++ b/arch/mips/include/asm/octeon/cvmx-iob-defs.h
@@ -4,7 +4,7 @@
* Contact: support@caviumnetworks.com
* This file is part of the OCTEON SDK
*
- * Copyright (c) 2003-2008 Cavium Networks
+ * Copyright (c) 2003-2010 Cavium Networks
*
* This file is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License, Version 2, as
@@ -28,55 +28,39 @@
#ifndef __CVMX_IOB_DEFS_H__
#define __CVMX_IOB_DEFS_H__
-#define CVMX_IOB_BIST_STATUS \
- CVMX_ADD_IO_SEG(0x00011800F00007F8ull)
-#define CVMX_IOB_CTL_STATUS \
- CVMX_ADD_IO_SEG(0x00011800F0000050ull)
-#define CVMX_IOB_DWB_PRI_CNT \
- CVMX_ADD_IO_SEG(0x00011800F0000028ull)
-#define CVMX_IOB_FAU_TIMEOUT \
- CVMX_ADD_IO_SEG(0x00011800F0000000ull)
-#define CVMX_IOB_I2C_PRI_CNT \
- CVMX_ADD_IO_SEG(0x00011800F0000010ull)
-#define CVMX_IOB_INB_CONTROL_MATCH \
- CVMX_ADD_IO_SEG(0x00011800F0000078ull)
-#define CVMX_IOB_INB_CONTROL_MATCH_ENB \
- CVMX_ADD_IO_SEG(0x00011800F0000088ull)
-#define CVMX_IOB_INB_DATA_MATCH \
- CVMX_ADD_IO_SEG(0x00011800F0000070ull)
-#define CVMX_IOB_INB_DATA_MATCH_ENB \
- CVMX_ADD_IO_SEG(0x00011800F0000080ull)
-#define CVMX_IOB_INT_ENB \
- CVMX_ADD_IO_SEG(0x00011800F0000060ull)
-#define CVMX_IOB_INT_SUM \
- CVMX_ADD_IO_SEG(0x00011800F0000058ull)
-#define CVMX_IOB_N2C_L2C_PRI_CNT \
- CVMX_ADD_IO_SEG(0x00011800F0000020ull)
-#define CVMX_IOB_N2C_RSP_PRI_CNT \
- CVMX_ADD_IO_SEG(0x00011800F0000008ull)
-#define CVMX_IOB_OUTB_COM_PRI_CNT \
- CVMX_ADD_IO_SEG(0x00011800F0000040ull)
-#define CVMX_IOB_OUTB_CONTROL_MATCH \
- CVMX_ADD_IO_SEG(0x00011800F0000098ull)
-#define CVMX_IOB_OUTB_CONTROL_MATCH_ENB \
- CVMX_ADD_IO_SEG(0x00011800F00000A8ull)
-#define CVMX_IOB_OUTB_DATA_MATCH \
- CVMX_ADD_IO_SEG(0x00011800F0000090ull)
-#define CVMX_IOB_OUTB_DATA_MATCH_ENB \
- CVMX_ADD_IO_SEG(0x00011800F00000A0ull)
-#define CVMX_IOB_OUTB_FPA_PRI_CNT \
- CVMX_ADD_IO_SEG(0x00011800F0000048ull)
-#define CVMX_IOB_OUTB_REQ_PRI_CNT \
- CVMX_ADD_IO_SEG(0x00011800F0000038ull)
-#define CVMX_IOB_P2C_REQ_PRI_CNT \
- CVMX_ADD_IO_SEG(0x00011800F0000018ull)
-#define CVMX_IOB_PKT_ERR \
- CVMX_ADD_IO_SEG(0x00011800F0000068ull)
+#define CVMX_IOB_BIST_STATUS (CVMX_ADD_IO_SEG(0x00011800F00007F8ull))
+#define CVMX_IOB_CTL_STATUS (CVMX_ADD_IO_SEG(0x00011800F0000050ull))
+#define CVMX_IOB_DWB_PRI_CNT (CVMX_ADD_IO_SEG(0x00011800F0000028ull))
+#define CVMX_IOB_FAU_TIMEOUT (CVMX_ADD_IO_SEG(0x00011800F0000000ull))
+#define CVMX_IOB_I2C_PRI_CNT (CVMX_ADD_IO_SEG(0x00011800F0000010ull))
+#define CVMX_IOB_INB_CONTROL_MATCH (CVMX_ADD_IO_SEG(0x00011800F0000078ull))
+#define CVMX_IOB_INB_CONTROL_MATCH_ENB (CVMX_ADD_IO_SEG(0x00011800F0000088ull))
+#define CVMX_IOB_INB_DATA_MATCH (CVMX_ADD_IO_SEG(0x00011800F0000070ull))
+#define CVMX_IOB_INB_DATA_MATCH_ENB (CVMX_ADD_IO_SEG(0x00011800F0000080ull))
+#define CVMX_IOB_INT_ENB (CVMX_ADD_IO_SEG(0x00011800F0000060ull))
+#define CVMX_IOB_INT_SUM (CVMX_ADD_IO_SEG(0x00011800F0000058ull))
+#define CVMX_IOB_N2C_L2C_PRI_CNT (CVMX_ADD_IO_SEG(0x00011800F0000020ull))
+#define CVMX_IOB_N2C_RSP_PRI_CNT (CVMX_ADD_IO_SEG(0x00011800F0000008ull))
+#define CVMX_IOB_OUTB_COM_PRI_CNT (CVMX_ADD_IO_SEG(0x00011800F0000040ull))
+#define CVMX_IOB_OUTB_CONTROL_MATCH (CVMX_ADD_IO_SEG(0x00011800F0000098ull))
+#define CVMX_IOB_OUTB_CONTROL_MATCH_ENB (CVMX_ADD_IO_SEG(0x00011800F00000A8ull))
+#define CVMX_IOB_OUTB_DATA_MATCH (CVMX_ADD_IO_SEG(0x00011800F0000090ull))
+#define CVMX_IOB_OUTB_DATA_MATCH_ENB (CVMX_ADD_IO_SEG(0x00011800F00000A0ull))
+#define CVMX_IOB_OUTB_FPA_PRI_CNT (CVMX_ADD_IO_SEG(0x00011800F0000048ull))
+#define CVMX_IOB_OUTB_REQ_PRI_CNT (CVMX_ADD_IO_SEG(0x00011800F0000038ull))
+#define CVMX_IOB_P2C_REQ_PRI_CNT (CVMX_ADD_IO_SEG(0x00011800F0000018ull))
+#define CVMX_IOB_PKT_ERR (CVMX_ADD_IO_SEG(0x00011800F0000068ull))
+#define CVMX_IOB_TO_CMB_CREDITS (CVMX_ADD_IO_SEG(0x00011800F00000B0ull))
union cvmx_iob_bist_status {
uint64_t u64;
struct cvmx_iob_bist_status_s {
- uint64_t reserved_18_63:46;
+ uint64_t reserved_23_63:41;
+ uint64_t xmdfif:1;
+ uint64_t xmcfif:1;
+ uint64_t iorfif:1;
+ uint64_t rsdfif:1;
+ uint64_t iocfif:1;
uint64_t icnrcb:1;
uint64_t icr0:1;
uint64_t icr1:1;
@@ -96,40 +80,81 @@ union cvmx_iob_bist_status {
uint64_t ibd:1;
uint64_t icd:1;
} s;
- struct cvmx_iob_bist_status_s cn30xx;
- struct cvmx_iob_bist_status_s cn31xx;
- struct cvmx_iob_bist_status_s cn38xx;
- struct cvmx_iob_bist_status_s cn38xxp2;
- struct cvmx_iob_bist_status_s cn50xx;
- struct cvmx_iob_bist_status_s cn52xx;
- struct cvmx_iob_bist_status_s cn52xxp1;
- struct cvmx_iob_bist_status_s cn56xx;
- struct cvmx_iob_bist_status_s cn56xxp1;
- struct cvmx_iob_bist_status_s cn58xx;
- struct cvmx_iob_bist_status_s cn58xxp1;
+ struct cvmx_iob_bist_status_cn30xx {
+ uint64_t reserved_18_63:46;
+ uint64_t icnrcb:1;
+ uint64_t icr0:1;
+ uint64_t icr1:1;
+ uint64_t icnr1:1;
+ uint64_t icnr0:1;
+ uint64_t ibdr0:1;
+ uint64_t ibdr1:1;
+ uint64_t ibr0:1;
+ uint64_t ibr1:1;
+ uint64_t icnrt:1;
+ uint64_t ibrq0:1;
+ uint64_t ibrq1:1;
+ uint64_t icrn0:1;
+ uint64_t icrn1:1;
+ uint64_t icrp0:1;
+ uint64_t icrp1:1;
+ uint64_t ibd:1;
+ uint64_t icd:1;
+ } cn30xx;
+ struct cvmx_iob_bist_status_cn30xx cn31xx;
+ struct cvmx_iob_bist_status_cn30xx cn38xx;
+ struct cvmx_iob_bist_status_cn30xx cn38xxp2;
+ struct cvmx_iob_bist_status_cn30xx cn50xx;
+ struct cvmx_iob_bist_status_cn30xx cn52xx;
+ struct cvmx_iob_bist_status_cn30xx cn52xxp1;
+ struct cvmx_iob_bist_status_cn30xx cn56xx;
+ struct cvmx_iob_bist_status_cn30xx cn56xxp1;
+ struct cvmx_iob_bist_status_cn30xx cn58xx;
+ struct cvmx_iob_bist_status_cn30xx cn58xxp1;
+ struct cvmx_iob_bist_status_s cn63xx;
+ struct cvmx_iob_bist_status_s cn63xxp1;
};
union cvmx_iob_ctl_status {
uint64_t u64;
struct cvmx_iob_ctl_status_s {
- uint64_t reserved_5_63:59;
+ uint64_t reserved_10_63:54;
+ uint64_t xmc_per:4;
+ uint64_t rr_mode:1;
uint64_t outb_mat:1;
uint64_t inb_mat:1;
uint64_t pko_enb:1;
uint64_t dwb_enb:1;
uint64_t fau_end:1;
} s;
- struct cvmx_iob_ctl_status_s cn30xx;
- struct cvmx_iob_ctl_status_s cn31xx;
- struct cvmx_iob_ctl_status_s cn38xx;
- struct cvmx_iob_ctl_status_s cn38xxp2;
- struct cvmx_iob_ctl_status_s cn50xx;
- struct cvmx_iob_ctl_status_s cn52xx;
- struct cvmx_iob_ctl_status_s cn52xxp1;
- struct cvmx_iob_ctl_status_s cn56xx;
- struct cvmx_iob_ctl_status_s cn56xxp1;
- struct cvmx_iob_ctl_status_s cn58xx;
- struct cvmx_iob_ctl_status_s cn58xxp1;
+ struct cvmx_iob_ctl_status_cn30xx {
+ uint64_t reserved_5_63:59;
+ uint64_t outb_mat:1;
+ uint64_t inb_mat:1;
+ uint64_t pko_enb:1;
+ uint64_t dwb_enb:1;
+ uint64_t fau_end:1;
+ } cn30xx;
+ struct cvmx_iob_ctl_status_cn30xx cn31xx;
+ struct cvmx_iob_ctl_status_cn30xx cn38xx;
+ struct cvmx_iob_ctl_status_cn30xx cn38xxp2;
+ struct cvmx_iob_ctl_status_cn30xx cn50xx;
+ struct cvmx_iob_ctl_status_cn52xx {
+ uint64_t reserved_6_63:58;
+ uint64_t rr_mode:1;
+ uint64_t outb_mat:1;
+ uint64_t inb_mat:1;
+ uint64_t pko_enb:1;
+ uint64_t dwb_enb:1;
+ uint64_t fau_end:1;
+ } cn52xx;
+ struct cvmx_iob_ctl_status_cn30xx cn52xxp1;
+ struct cvmx_iob_ctl_status_cn30xx cn56xx;
+ struct cvmx_iob_ctl_status_cn30xx cn56xxp1;
+ struct cvmx_iob_ctl_status_cn30xx cn58xx;
+ struct cvmx_iob_ctl_status_cn30xx cn58xxp1;
+ struct cvmx_iob_ctl_status_s cn63xx;
+ struct cvmx_iob_ctl_status_s cn63xxp1;
};
union cvmx_iob_dwb_pri_cnt {
@@ -147,6 +172,8 @@ union cvmx_iob_dwb_pri_cnt {
struct cvmx_iob_dwb_pri_cnt_s cn56xxp1;
struct cvmx_iob_dwb_pri_cnt_s cn58xx;
struct cvmx_iob_dwb_pri_cnt_s cn58xxp1;
+ struct cvmx_iob_dwb_pri_cnt_s cn63xx;
+ struct cvmx_iob_dwb_pri_cnt_s cn63xxp1;
};
union cvmx_iob_fau_timeout {
@@ -167,6 +194,8 @@ union cvmx_iob_fau_timeout {
struct cvmx_iob_fau_timeout_s cn56xxp1;
struct cvmx_iob_fau_timeout_s cn58xx;
struct cvmx_iob_fau_timeout_s cn58xxp1;
+ struct cvmx_iob_fau_timeout_s cn63xx;
+ struct cvmx_iob_fau_timeout_s cn63xxp1;
};
union cvmx_iob_i2c_pri_cnt {
@@ -184,6 +213,8 @@ union cvmx_iob_i2c_pri_cnt {
struct cvmx_iob_i2c_pri_cnt_s cn56xxp1;
struct cvmx_iob_i2c_pri_cnt_s cn58xx;
struct cvmx_iob_i2c_pri_cnt_s cn58xxp1;
+ struct cvmx_iob_i2c_pri_cnt_s cn63xx;
+ struct cvmx_iob_i2c_pri_cnt_s cn63xxp1;
};
union cvmx_iob_inb_control_match {
@@ -206,6 +237,8 @@ union cvmx_iob_inb_control_match {
struct cvmx_iob_inb_control_match_s cn56xxp1;
struct cvmx_iob_inb_control_match_s cn58xx;
struct cvmx_iob_inb_control_match_s cn58xxp1;
+ struct cvmx_iob_inb_control_match_s cn63xx;
+ struct cvmx_iob_inb_control_match_s cn63xxp1;
};
union cvmx_iob_inb_control_match_enb {
@@ -228,6 +261,8 @@ union cvmx_iob_inb_control_match_enb {
struct cvmx_iob_inb_control_match_enb_s cn56xxp1;
struct cvmx_iob_inb_control_match_enb_s cn58xx;
struct cvmx_iob_inb_control_match_enb_s cn58xxp1;
+ struct cvmx_iob_inb_control_match_enb_s cn63xx;
+ struct cvmx_iob_inb_control_match_enb_s cn63xxp1;
};
union cvmx_iob_inb_data_match {
@@ -246,6 +281,8 @@ union cvmx_iob_inb_data_match {
struct cvmx_iob_inb_data_match_s cn56xxp1;
struct cvmx_iob_inb_data_match_s cn58xx;
struct cvmx_iob_inb_data_match_s cn58xxp1;
+ struct cvmx_iob_inb_data_match_s cn63xx;
+ struct cvmx_iob_inb_data_match_s cn63xxp1;
};
union cvmx_iob_inb_data_match_enb {
@@ -264,6 +301,8 @@ union cvmx_iob_inb_data_match_enb {
struct cvmx_iob_inb_data_match_enb_s cn56xxp1;
struct cvmx_iob_inb_data_match_enb_s cn58xx;
struct cvmx_iob_inb_data_match_enb_s cn58xxp1;
+ struct cvmx_iob_inb_data_match_enb_s cn63xx;
+ struct cvmx_iob_inb_data_match_enb_s cn63xxp1;
};
union cvmx_iob_int_enb {
@@ -294,6 +333,8 @@ union cvmx_iob_int_enb {
struct cvmx_iob_int_enb_s cn56xxp1;
struct cvmx_iob_int_enb_s cn58xx;
struct cvmx_iob_int_enb_s cn58xxp1;
+ struct cvmx_iob_int_enb_s cn63xx;
+ struct cvmx_iob_int_enb_s cn63xxp1;
};
union cvmx_iob_int_sum {
@@ -324,6 +365,8 @@ union cvmx_iob_int_sum {
struct cvmx_iob_int_sum_s cn56xxp1;
struct cvmx_iob_int_sum_s cn58xx;
struct cvmx_iob_int_sum_s cn58xxp1;
+ struct cvmx_iob_int_sum_s cn63xx;
+ struct cvmx_iob_int_sum_s cn63xxp1;
};
union cvmx_iob_n2c_l2c_pri_cnt {
@@ -341,6 +384,8 @@ union cvmx_iob_n2c_l2c_pri_cnt {
struct cvmx_iob_n2c_l2c_pri_cnt_s cn56xxp1;
struct cvmx_iob_n2c_l2c_pri_cnt_s cn58xx;
struct cvmx_iob_n2c_l2c_pri_cnt_s cn58xxp1;
+ struct cvmx_iob_n2c_l2c_pri_cnt_s cn63xx;
+ struct cvmx_iob_n2c_l2c_pri_cnt_s cn63xxp1;
};
union cvmx_iob_n2c_rsp_pri_cnt {
@@ -358,6 +403,8 @@ union cvmx_iob_n2c_rsp_pri_cnt {
struct cvmx_iob_n2c_rsp_pri_cnt_s cn56xxp1;
struct cvmx_iob_n2c_rsp_pri_cnt_s cn58xx;
struct cvmx_iob_n2c_rsp_pri_cnt_s cn58xxp1;
+ struct cvmx_iob_n2c_rsp_pri_cnt_s cn63xx;
+ struct cvmx_iob_n2c_rsp_pri_cnt_s cn63xxp1;
};
union cvmx_iob_outb_com_pri_cnt {
@@ -375,6 +422,8 @@ union cvmx_iob_outb_com_pri_cnt {
struct cvmx_iob_outb_com_pri_cnt_s cn56xxp1;
struct cvmx_iob_outb_com_pri_cnt_s cn58xx;
struct cvmx_iob_outb_com_pri_cnt_s cn58xxp1;
+ struct cvmx_iob_outb_com_pri_cnt_s cn63xx;
+ struct cvmx_iob_outb_com_pri_cnt_s cn63xxp1;
};
union cvmx_iob_outb_control_match {
@@ -397,6 +446,8 @@ union cvmx_iob_outb_control_match {
struct cvmx_iob_outb_control_match_s cn56xxp1;
struct cvmx_iob_outb_control_match_s cn58xx;
struct cvmx_iob_outb_control_match_s cn58xxp1;
+ struct cvmx_iob_outb_control_match_s cn63xx;
+ struct cvmx_iob_outb_control_match_s cn63xxp1;
};
union cvmx_iob_outb_control_match_enb {
@@ -419,6 +470,8 @@ union cvmx_iob_outb_control_match_enb {
struct cvmx_iob_outb_control_match_enb_s cn56xxp1;
struct cvmx_iob_outb_control_match_enb_s cn58xx;
struct cvmx_iob_outb_control_match_enb_s cn58xxp1;
+ struct cvmx_iob_outb_control_match_enb_s cn63xx;
+ struct cvmx_iob_outb_control_match_enb_s cn63xxp1;
};
union cvmx_iob_outb_data_match {
@@ -437,6 +490,8 @@ union cvmx_iob_outb_data_match {
struct cvmx_iob_outb_data_match_s cn56xxp1;
struct cvmx_iob_outb_data_match_s cn58xx;
struct cvmx_iob_outb_data_match_s cn58xxp1;
+ struct cvmx_iob_outb_data_match_s cn63xx;
+ struct cvmx_iob_outb_data_match_s cn63xxp1;
};
union cvmx_iob_outb_data_match_enb {
@@ -455,6 +510,8 @@ union cvmx_iob_outb_data_match_enb {
struct cvmx_iob_outb_data_match_enb_s cn56xxp1;
struct cvmx_iob_outb_data_match_enb_s cn58xx;
struct cvmx_iob_outb_data_match_enb_s cn58xxp1;
+ struct cvmx_iob_outb_data_match_enb_s cn63xx;
+ struct cvmx_iob_outb_data_match_enb_s cn63xxp1;
};
union cvmx_iob_outb_fpa_pri_cnt {
@@ -472,6 +529,8 @@ union cvmx_iob_outb_fpa_pri_cnt {
struct cvmx_iob_outb_fpa_pri_cnt_s cn56xxp1;
struct cvmx_iob_outb_fpa_pri_cnt_s cn58xx;
struct cvmx_iob_outb_fpa_pri_cnt_s cn58xxp1;
+ struct cvmx_iob_outb_fpa_pri_cnt_s cn63xx;
+ struct cvmx_iob_outb_fpa_pri_cnt_s cn63xxp1;
};
union cvmx_iob_outb_req_pri_cnt {
@@ -489,6 +548,8 @@ union cvmx_iob_outb_req_pri_cnt {
struct cvmx_iob_outb_req_pri_cnt_s cn56xxp1;
struct cvmx_iob_outb_req_pri_cnt_s cn58xx;
struct cvmx_iob_outb_req_pri_cnt_s cn58xxp1;
+ struct cvmx_iob_outb_req_pri_cnt_s cn63xx;
+ struct cvmx_iob_outb_req_pri_cnt_s cn63xxp1;
};
union cvmx_iob_p2c_req_pri_cnt {
@@ -506,25 +567,46 @@ union cvmx_iob_p2c_req_pri_cnt {
struct cvmx_iob_p2c_req_pri_cnt_s cn56xxp1;
struct cvmx_iob_p2c_req_pri_cnt_s cn58xx;
struct cvmx_iob_p2c_req_pri_cnt_s cn58xxp1;
+ struct cvmx_iob_p2c_req_pri_cnt_s cn63xx;
+ struct cvmx_iob_p2c_req_pri_cnt_s cn63xxp1;
};
union cvmx_iob_pkt_err {
uint64_t u64;
struct cvmx_iob_pkt_err_s {
+ uint64_t reserved_12_63:52;
+ uint64_t vport:6;
+ uint64_t port:6;
+ } s;
+ struct cvmx_iob_pkt_err_cn30xx {
uint64_t reserved_6_63:58;
uint64_t port:6;
+ } cn30xx;
+ struct cvmx_iob_pkt_err_cn30xx cn31xx;
+ struct cvmx_iob_pkt_err_cn30xx cn38xx;
+ struct cvmx_iob_pkt_err_cn30xx cn38xxp2;
+ struct cvmx_iob_pkt_err_cn30xx cn50xx;
+ struct cvmx_iob_pkt_err_cn30xx cn52xx;
+ struct cvmx_iob_pkt_err_cn30xx cn52xxp1;
+ struct cvmx_iob_pkt_err_cn30xx cn56xx;
+ struct cvmx_iob_pkt_err_cn30xx cn56xxp1;
+ struct cvmx_iob_pkt_err_cn30xx cn58xx;
+ struct cvmx_iob_pkt_err_cn30xx cn58xxp1;
+ struct cvmx_iob_pkt_err_s cn63xx;
+ struct cvmx_iob_pkt_err_s cn63xxp1;
+};
+
+union cvmx_iob_to_cmb_credits {
+ uint64_t u64;
+ struct cvmx_iob_to_cmb_credits_s {
+ uint64_t reserved_9_63:55;
+ uint64_t pko_rd:3;
+ uint64_t ncb_rd:3;
+ uint64_t ncb_wr:3;
} s;
- struct cvmx_iob_pkt_err_s cn30xx;
- struct cvmx_iob_pkt_err_s cn31xx;
- struct cvmx_iob_pkt_err_s cn38xx;
- struct cvmx_iob_pkt_err_s cn38xxp2;
- struct cvmx_iob_pkt_err_s cn50xx;
- struct cvmx_iob_pkt_err_s cn52xx;
- struct cvmx_iob_pkt_err_s cn52xxp1;
- struct cvmx_iob_pkt_err_s cn56xx;
- struct cvmx_iob_pkt_err_s cn56xxp1;
- struct cvmx_iob_pkt_err_s cn58xx;
- struct cvmx_iob_pkt_err_s cn58xxp1;
+ struct cvmx_iob_to_cmb_credits_s cn52xx;
+ struct cvmx_iob_to_cmb_credits_s cn63xx;
+ struct cvmx_iob_to_cmb_credits_s cn63xxp1;
};
#endif
diff --git a/arch/mips/include/asm/octeon/cvmx-ipd-defs.h b/arch/mips/include/asm/octeon/cvmx-ipd-defs.h
index f8b8fc657d2c..e0a5bfe88d04 100644
--- a/arch/mips/include/asm/octeon/cvmx-ipd-defs.h
+++ b/arch/mips/include/asm/octeon/cvmx-ipd-defs.h
@@ -4,7 +4,7 @@
* Contact: support@caviumnetworks.com
* This file is part of the OCTEON SDK
*
- * Copyright (c) 2003-2008 Cavium Networks
+ * Copyright (c) 2003-2010 Cavium Networks
*
* This file is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License, Version 2, as
@@ -28,104 +28,57 @@
#ifndef __CVMX_IPD_DEFS_H__
#define __CVMX_IPD_DEFS_H__
-#define CVMX_IPD_1ST_MBUFF_SKIP \
- CVMX_ADD_IO_SEG(0x00014F0000000000ull)
-#define CVMX_IPD_1st_NEXT_PTR_BACK \
- CVMX_ADD_IO_SEG(0x00014F0000000150ull)
-#define CVMX_IPD_2nd_NEXT_PTR_BACK \
- CVMX_ADD_IO_SEG(0x00014F0000000158ull)
-#define CVMX_IPD_BIST_STATUS \
- CVMX_ADD_IO_SEG(0x00014F00000007F8ull)
-#define CVMX_IPD_BP_PRT_RED_END \
- CVMX_ADD_IO_SEG(0x00014F0000000328ull)
-#define CVMX_IPD_CLK_COUNT \
- CVMX_ADD_IO_SEG(0x00014F0000000338ull)
-#define CVMX_IPD_CTL_STATUS \
- CVMX_ADD_IO_SEG(0x00014F0000000018ull)
-#define CVMX_IPD_INT_ENB \
- CVMX_ADD_IO_SEG(0x00014F0000000160ull)
-#define CVMX_IPD_INT_SUM \
- CVMX_ADD_IO_SEG(0x00014F0000000168ull)
-#define CVMX_IPD_NOT_1ST_MBUFF_SKIP \
- CVMX_ADD_IO_SEG(0x00014F0000000008ull)
-#define CVMX_IPD_PACKET_MBUFF_SIZE \
- CVMX_ADD_IO_SEG(0x00014F0000000010ull)
-#define CVMX_IPD_PKT_PTR_VALID \
- CVMX_ADD_IO_SEG(0x00014F0000000358ull)
-#define CVMX_IPD_PORTX_BP_PAGE_CNT(offset) \
- CVMX_ADD_IO_SEG(0x00014F0000000028ull + (((offset) & 63) * 8))
-#define CVMX_IPD_PORTX_BP_PAGE_CNT2(offset) \
- CVMX_ADD_IO_SEG(0x00014F0000000368ull + (((offset) & 63) * 8) - 8 * 36)
-#define CVMX_IPD_PORT_BP_COUNTERS2_PAIRX(offset) \
- CVMX_ADD_IO_SEG(0x00014F0000000388ull + (((offset) & 63) * 8) - 8 * 36)
-#define CVMX_IPD_PORT_BP_COUNTERS_PAIRX(offset) \
- CVMX_ADD_IO_SEG(0x00014F00000001B8ull + (((offset) & 63) * 8))
-#define CVMX_IPD_PORT_QOS_INTX(offset) \
- CVMX_ADD_IO_SEG(0x00014F0000000808ull + (((offset) & 7) * 8))
-#define CVMX_IPD_PORT_QOS_INT_ENBX(offset) \
- CVMX_ADD_IO_SEG(0x00014F0000000848ull + (((offset) & 7) * 8))
-#define CVMX_IPD_PORT_QOS_X_CNT(offset) \
- CVMX_ADD_IO_SEG(0x00014F0000000888ull + (((offset) & 511) * 8))
-#define CVMX_IPD_PRC_HOLD_PTR_FIFO_CTL \
- CVMX_ADD_IO_SEG(0x00014F0000000348ull)
-#define CVMX_IPD_PRC_PORT_PTR_FIFO_CTL \
- CVMX_ADD_IO_SEG(0x00014F0000000350ull)
-#define CVMX_IPD_PTR_COUNT \
- CVMX_ADD_IO_SEG(0x00014F0000000320ull)
-#define CVMX_IPD_PWP_PTR_FIFO_CTL \
- CVMX_ADD_IO_SEG(0x00014F0000000340ull)
-#define CVMX_IPD_QOS0_RED_MARKS \
- CVMX_ADD_IO_SEG(0x00014F0000000178ull)
-#define CVMX_IPD_QOS1_RED_MARKS \
- CVMX_ADD_IO_SEG(0x00014F0000000180ull)
-#define CVMX_IPD_QOS2_RED_MARKS \
- CVMX_ADD_IO_SEG(0x00014F0000000188ull)
-#define CVMX_IPD_QOS3_RED_MARKS \
- CVMX_ADD_IO_SEG(0x00014F0000000190ull)
-#define CVMX_IPD_QOS4_RED_MARKS \
- CVMX_ADD_IO_SEG(0x00014F0000000198ull)
-#define CVMX_IPD_QOS5_RED_MARKS \
- CVMX_ADD_IO_SEG(0x00014F00000001A0ull)
-#define CVMX_IPD_QOS6_RED_MARKS \
- CVMX_ADD_IO_SEG(0x00014F00000001A8ull)
-#define CVMX_IPD_QOS7_RED_MARKS \
- CVMX_ADD_IO_SEG(0x00014F00000001B0ull)
-#define CVMX_IPD_QOSX_RED_MARKS(offset) \
- CVMX_ADD_IO_SEG(0x00014F0000000178ull + (((offset) & 7) * 8))
-#define CVMX_IPD_QUE0_FREE_PAGE_CNT \
- CVMX_ADD_IO_SEG(0x00014F0000000330ull)
-#define CVMX_IPD_RED_PORT_ENABLE \
- CVMX_ADD_IO_SEG(0x00014F00000002D8ull)
-#define CVMX_IPD_RED_PORT_ENABLE2 \
- CVMX_ADD_IO_SEG(0x00014F00000003A8ull)
-#define CVMX_IPD_RED_QUE0_PARAM \
- CVMX_ADD_IO_SEG(0x00014F00000002E0ull)
-#define CVMX_IPD_RED_QUE1_PARAM \
- CVMX_ADD_IO_SEG(0x00014F00000002E8ull)
-#define CVMX_IPD_RED_QUE2_PARAM \
- CVMX_ADD_IO_SEG(0x00014F00000002F0ull)
-#define CVMX_IPD_RED_QUE3_PARAM \
- CVMX_ADD_IO_SEG(0x00014F00000002F8ull)
-#define CVMX_IPD_RED_QUE4_PARAM \
- CVMX_ADD_IO_SEG(0x00014F0000000300ull)
-#define CVMX_IPD_RED_QUE5_PARAM \
- CVMX_ADD_IO_SEG(0x00014F0000000308ull)
-#define CVMX_IPD_RED_QUE6_PARAM \
- CVMX_ADD_IO_SEG(0x00014F0000000310ull)
-#define CVMX_IPD_RED_QUE7_PARAM \
- CVMX_ADD_IO_SEG(0x00014F0000000318ull)
-#define CVMX_IPD_RED_QUEX_PARAM(offset) \
- CVMX_ADD_IO_SEG(0x00014F00000002E0ull + (((offset) & 7) * 8))
-#define CVMX_IPD_SUB_PORT_BP_PAGE_CNT \
- CVMX_ADD_IO_SEG(0x00014F0000000148ull)
-#define CVMX_IPD_SUB_PORT_FCS \
- CVMX_ADD_IO_SEG(0x00014F0000000170ull)
-#define CVMX_IPD_SUB_PORT_QOS_CNT \
- CVMX_ADD_IO_SEG(0x00014F0000000800ull)
-#define CVMX_IPD_WQE_FPA_QUEUE \
- CVMX_ADD_IO_SEG(0x00014F0000000020ull)
-#define CVMX_IPD_WQE_PTR_VALID \
- CVMX_ADD_IO_SEG(0x00014F0000000360ull)
+#define CVMX_IPD_1ST_MBUFF_SKIP (CVMX_ADD_IO_SEG(0x00014F0000000000ull))
+#define CVMX_IPD_1st_NEXT_PTR_BACK (CVMX_ADD_IO_SEG(0x00014F0000000150ull))
+#define CVMX_IPD_2nd_NEXT_PTR_BACK (CVMX_ADD_IO_SEG(0x00014F0000000158ull))
+#define CVMX_IPD_BIST_STATUS (CVMX_ADD_IO_SEG(0x00014F00000007F8ull))
+#define CVMX_IPD_BP_PRT_RED_END (CVMX_ADD_IO_SEG(0x00014F0000000328ull))
+#define CVMX_IPD_CLK_COUNT (CVMX_ADD_IO_SEG(0x00014F0000000338ull))
+#define CVMX_IPD_CTL_STATUS (CVMX_ADD_IO_SEG(0x00014F0000000018ull))
+#define CVMX_IPD_INT_ENB (CVMX_ADD_IO_SEG(0x00014F0000000160ull))
+#define CVMX_IPD_INT_SUM (CVMX_ADD_IO_SEG(0x00014F0000000168ull))
+#define CVMX_IPD_NOT_1ST_MBUFF_SKIP (CVMX_ADD_IO_SEG(0x00014F0000000008ull))
+#define CVMX_IPD_PACKET_MBUFF_SIZE (CVMX_ADD_IO_SEG(0x00014F0000000010ull))
+#define CVMX_IPD_PKT_PTR_VALID (CVMX_ADD_IO_SEG(0x00014F0000000358ull))
+#define CVMX_IPD_PORTX_BP_PAGE_CNT(offset) (CVMX_ADD_IO_SEG(0x00014F0000000028ull) + ((offset) & 63) * 8)
+#define CVMX_IPD_PORTX_BP_PAGE_CNT2(offset) (CVMX_ADD_IO_SEG(0x00014F0000000368ull) + ((offset) & 63) * 8 - 8*36)
+#define CVMX_IPD_PORTX_BP_PAGE_CNT3(offset) (CVMX_ADD_IO_SEG(0x00014F00000003D0ull) + ((offset) & 63) * 8 - 8*40)
+#define CVMX_IPD_PORT_BP_COUNTERS2_PAIRX(offset) (CVMX_ADD_IO_SEG(0x00014F0000000388ull) + ((offset) & 63) * 8 - 8*36)
+#define CVMX_IPD_PORT_BP_COUNTERS3_PAIRX(offset) (CVMX_ADD_IO_SEG(0x00014F00000003B0ull) + ((offset) & 63) * 8 - 8*40)
+#define CVMX_IPD_PORT_BP_COUNTERS_PAIRX(offset) (CVMX_ADD_IO_SEG(0x00014F00000001B8ull) + ((offset) & 63) * 8)
+#define CVMX_IPD_PORT_QOS_INTX(offset) (CVMX_ADD_IO_SEG(0x00014F0000000808ull) + ((offset) & 7) * 8)
+#define CVMX_IPD_PORT_QOS_INT_ENBX(offset) (CVMX_ADD_IO_SEG(0x00014F0000000848ull) + ((offset) & 7) * 8)
+#define CVMX_IPD_PORT_QOS_X_CNT(offset) (CVMX_ADD_IO_SEG(0x00014F0000000888ull) + ((offset) & 511) * 8)
+#define CVMX_IPD_PRC_HOLD_PTR_FIFO_CTL (CVMX_ADD_IO_SEG(0x00014F0000000348ull))
+#define CVMX_IPD_PRC_PORT_PTR_FIFO_CTL (CVMX_ADD_IO_SEG(0x00014F0000000350ull))
+#define CVMX_IPD_PTR_COUNT (CVMX_ADD_IO_SEG(0x00014F0000000320ull))
+#define CVMX_IPD_PWP_PTR_FIFO_CTL (CVMX_ADD_IO_SEG(0x00014F0000000340ull))
+#define CVMX_IPD_QOS0_RED_MARKS CVMX_IPD_QOSX_RED_MARKS(0)
+#define CVMX_IPD_QOS1_RED_MARKS CVMX_IPD_QOSX_RED_MARKS(1)
+#define CVMX_IPD_QOS2_RED_MARKS CVMX_IPD_QOSX_RED_MARKS(2)
+#define CVMX_IPD_QOS3_RED_MARKS CVMX_IPD_QOSX_RED_MARKS(3)
+#define CVMX_IPD_QOS4_RED_MARKS CVMX_IPD_QOSX_RED_MARKS(4)
+#define CVMX_IPD_QOS5_RED_MARKS CVMX_IPD_QOSX_RED_MARKS(5)
+#define CVMX_IPD_QOS6_RED_MARKS CVMX_IPD_QOSX_RED_MARKS(6)
+#define CVMX_IPD_QOS7_RED_MARKS CVMX_IPD_QOSX_RED_MARKS(7)
+#define CVMX_IPD_QOSX_RED_MARKS(offset) (CVMX_ADD_IO_SEG(0x00014F0000000178ull) + ((offset) & 7) * 8)
+#define CVMX_IPD_QUE0_FREE_PAGE_CNT (CVMX_ADD_IO_SEG(0x00014F0000000330ull))
+#define CVMX_IPD_RED_PORT_ENABLE (CVMX_ADD_IO_SEG(0x00014F00000002D8ull))
+#define CVMX_IPD_RED_PORT_ENABLE2 (CVMX_ADD_IO_SEG(0x00014F00000003A8ull))
+#define CVMX_IPD_RED_QUE0_PARAM CVMX_IPD_RED_QUEX_PARAM(0)
+#define CVMX_IPD_RED_QUE1_PARAM CVMX_IPD_RED_QUEX_PARAM(1)
+#define CVMX_IPD_RED_QUE2_PARAM CVMX_IPD_RED_QUEX_PARAM(2)
+#define CVMX_IPD_RED_QUE3_PARAM CVMX_IPD_RED_QUEX_PARAM(3)
+#define CVMX_IPD_RED_QUE4_PARAM CVMX_IPD_RED_QUEX_PARAM(4)
+#define CVMX_IPD_RED_QUE5_PARAM CVMX_IPD_RED_QUEX_PARAM(5)
+#define CVMX_IPD_RED_QUE6_PARAM CVMX_IPD_RED_QUEX_PARAM(6)
+#define CVMX_IPD_RED_QUE7_PARAM CVMX_IPD_RED_QUEX_PARAM(7)
+#define CVMX_IPD_RED_QUEX_PARAM(offset) (CVMX_ADD_IO_SEG(0x00014F00000002E0ull) + ((offset) & 7) * 8)
+#define CVMX_IPD_SUB_PORT_BP_PAGE_CNT (CVMX_ADD_IO_SEG(0x00014F0000000148ull))
+#define CVMX_IPD_SUB_PORT_FCS (CVMX_ADD_IO_SEG(0x00014F0000000170ull))
+#define CVMX_IPD_SUB_PORT_QOS_CNT (CVMX_ADD_IO_SEG(0x00014F0000000800ull))
+#define CVMX_IPD_WQE_FPA_QUEUE (CVMX_ADD_IO_SEG(0x00014F0000000020ull))
+#define CVMX_IPD_WQE_PTR_VALID (CVMX_ADD_IO_SEG(0x00014F0000000360ull))
union cvmx_ipd_1st_mbuff_skip {
uint64_t u64;
@@ -144,6 +97,8 @@ union cvmx_ipd_1st_mbuff_skip {
struct cvmx_ipd_1st_mbuff_skip_s cn56xxp1;
struct cvmx_ipd_1st_mbuff_skip_s cn58xx;
struct cvmx_ipd_1st_mbuff_skip_s cn58xxp1;
+ struct cvmx_ipd_1st_mbuff_skip_s cn63xx;
+ struct cvmx_ipd_1st_mbuff_skip_s cn63xxp1;
};
union cvmx_ipd_1st_next_ptr_back {
@@ -163,6 +118,8 @@ union cvmx_ipd_1st_next_ptr_back {
struct cvmx_ipd_1st_next_ptr_back_s cn56xxp1;
struct cvmx_ipd_1st_next_ptr_back_s cn58xx;
struct cvmx_ipd_1st_next_ptr_back_s cn58xxp1;
+ struct cvmx_ipd_1st_next_ptr_back_s cn63xx;
+ struct cvmx_ipd_1st_next_ptr_back_s cn63xxp1;
};
union cvmx_ipd_2nd_next_ptr_back {
@@ -182,6 +139,8 @@ union cvmx_ipd_2nd_next_ptr_back {
struct cvmx_ipd_2nd_next_ptr_back_s cn56xxp1;
struct cvmx_ipd_2nd_next_ptr_back_s cn58xx;
struct cvmx_ipd_2nd_next_ptr_back_s cn58xxp1;
+ struct cvmx_ipd_2nd_next_ptr_back_s cn63xx;
+ struct cvmx_ipd_2nd_next_ptr_back_s cn63xxp1;
};
union cvmx_ipd_bist_status {
@@ -236,13 +195,15 @@ union cvmx_ipd_bist_status {
struct cvmx_ipd_bist_status_s cn56xxp1;
struct cvmx_ipd_bist_status_cn30xx cn58xx;
struct cvmx_ipd_bist_status_cn30xx cn58xxp1;
+ struct cvmx_ipd_bist_status_s cn63xx;
+ struct cvmx_ipd_bist_status_s cn63xxp1;
};
union cvmx_ipd_bp_prt_red_end {
uint64_t u64;
struct cvmx_ipd_bp_prt_red_end_s {
- uint64_t reserved_40_63:24;
- uint64_t prt_enb:40;
+ uint64_t reserved_44_63:20;
+ uint64_t prt_enb:44;
} s;
struct cvmx_ipd_bp_prt_red_end_cn30xx {
uint64_t reserved_36_63:28;
@@ -252,12 +213,17 @@ union cvmx_ipd_bp_prt_red_end {
struct cvmx_ipd_bp_prt_red_end_cn30xx cn38xx;
struct cvmx_ipd_bp_prt_red_end_cn30xx cn38xxp2;
struct cvmx_ipd_bp_prt_red_end_cn30xx cn50xx;
- struct cvmx_ipd_bp_prt_red_end_s cn52xx;
- struct cvmx_ipd_bp_prt_red_end_s cn52xxp1;
- struct cvmx_ipd_bp_prt_red_end_s cn56xx;
- struct cvmx_ipd_bp_prt_red_end_s cn56xxp1;
+ struct cvmx_ipd_bp_prt_red_end_cn52xx {
+ uint64_t reserved_40_63:24;
+ uint64_t prt_enb:40;
+ } cn52xx;
+ struct cvmx_ipd_bp_prt_red_end_cn52xx cn52xxp1;
+ struct cvmx_ipd_bp_prt_red_end_cn52xx cn56xx;
+ struct cvmx_ipd_bp_prt_red_end_cn52xx cn56xxp1;
struct cvmx_ipd_bp_prt_red_end_cn30xx cn58xx;
struct cvmx_ipd_bp_prt_red_end_cn30xx cn58xxp1;
+ struct cvmx_ipd_bp_prt_red_end_s cn63xx;
+ struct cvmx_ipd_bp_prt_red_end_s cn63xxp1;
};
union cvmx_ipd_clk_count {
@@ -276,12 +242,17 @@ union cvmx_ipd_clk_count {
struct cvmx_ipd_clk_count_s cn56xxp1;
struct cvmx_ipd_clk_count_s cn58xx;
struct cvmx_ipd_clk_count_s cn58xxp1;
+ struct cvmx_ipd_clk_count_s cn63xx;
+ struct cvmx_ipd_clk_count_s cn63xxp1;
};
union cvmx_ipd_ctl_status {
uint64_t u64;
struct cvmx_ipd_ctl_status_s {
- uint64_t reserved_15_63:49;
+ uint64_t reserved_18_63:46;
+ uint64_t use_sop:1;
+ uint64_t rst_done:1;
+ uint64_t clken:1;
uint64_t no_wptr:1;
uint64_t pq_apkt:1;
uint64_t pq_nabuf:1;
@@ -322,11 +293,27 @@ union cvmx_ipd_ctl_status {
uint64_t opc_mode:2;
uint64_t ipd_en:1;
} cn38xxp2;
- struct cvmx_ipd_ctl_status_s cn50xx;
- struct cvmx_ipd_ctl_status_s cn52xx;
- struct cvmx_ipd_ctl_status_s cn52xxp1;
- struct cvmx_ipd_ctl_status_s cn56xx;
- struct cvmx_ipd_ctl_status_s cn56xxp1;
+ struct cvmx_ipd_ctl_status_cn50xx {
+ uint64_t reserved_15_63:49;
+ uint64_t no_wptr:1;
+ uint64_t pq_apkt:1;
+ uint64_t pq_nabuf:1;
+ uint64_t ipd_full:1;
+ uint64_t pkt_off:1;
+ uint64_t len_m8:1;
+ uint64_t reset:1;
+ uint64_t addpkt:1;
+ uint64_t naddbuf:1;
+ uint64_t pkt_lend:1;
+ uint64_t wqe_lend:1;
+ uint64_t pbp_en:1;
+ uint64_t opc_mode:2;
+ uint64_t ipd_en:1;
+ } cn50xx;
+ struct cvmx_ipd_ctl_status_cn50xx cn52xx;
+ struct cvmx_ipd_ctl_status_cn50xx cn52xxp1;
+ struct cvmx_ipd_ctl_status_cn50xx cn56xx;
+ struct cvmx_ipd_ctl_status_cn50xx cn56xxp1;
struct cvmx_ipd_ctl_status_cn58xx {
uint64_t reserved_12_63:52;
uint64_t ipd_full:1;
@@ -342,6 +329,25 @@ union cvmx_ipd_ctl_status {
uint64_t ipd_en:1;
} cn58xx;
struct cvmx_ipd_ctl_status_cn58xx cn58xxp1;
+ struct cvmx_ipd_ctl_status_s cn63xx;
+ struct cvmx_ipd_ctl_status_cn63xxp1 {
+ uint64_t reserved_16_63:48;
+ uint64_t clken:1;
+ uint64_t no_wptr:1;
+ uint64_t pq_apkt:1;
+ uint64_t pq_nabuf:1;
+ uint64_t ipd_full:1;
+ uint64_t pkt_off:1;
+ uint64_t len_m8:1;
+ uint64_t reset:1;
+ uint64_t addpkt:1;
+ uint64_t naddbuf:1;
+ uint64_t pkt_lend:1;
+ uint64_t wqe_lend:1;
+ uint64_t pbp_en:1;
+ uint64_t opc_mode:2;
+ uint64_t ipd_en:1;
+ } cn63xxp1;
};
union cvmx_ipd_int_enb {
@@ -391,6 +397,8 @@ union cvmx_ipd_int_enb {
struct cvmx_ipd_int_enb_s cn56xxp1;
struct cvmx_ipd_int_enb_cn38xx cn58xx;
struct cvmx_ipd_int_enb_cn38xx cn58xxp1;
+ struct cvmx_ipd_int_enb_s cn63xx;
+ struct cvmx_ipd_int_enb_s cn63xxp1;
};
union cvmx_ipd_int_sum {
@@ -440,6 +448,8 @@ union cvmx_ipd_int_sum {
struct cvmx_ipd_int_sum_s cn56xxp1;
struct cvmx_ipd_int_sum_cn38xx cn58xx;
struct cvmx_ipd_int_sum_cn38xx cn58xxp1;
+ struct cvmx_ipd_int_sum_s cn63xx;
+ struct cvmx_ipd_int_sum_s cn63xxp1;
};
union cvmx_ipd_not_1st_mbuff_skip {
@@ -459,6 +469,8 @@ union cvmx_ipd_not_1st_mbuff_skip {
struct cvmx_ipd_not_1st_mbuff_skip_s cn56xxp1;
struct cvmx_ipd_not_1st_mbuff_skip_s cn58xx;
struct cvmx_ipd_not_1st_mbuff_skip_s cn58xxp1;
+ struct cvmx_ipd_not_1st_mbuff_skip_s cn63xx;
+ struct cvmx_ipd_not_1st_mbuff_skip_s cn63xxp1;
};
union cvmx_ipd_packet_mbuff_size {
@@ -478,6 +490,8 @@ union cvmx_ipd_packet_mbuff_size {
struct cvmx_ipd_packet_mbuff_size_s cn56xxp1;
struct cvmx_ipd_packet_mbuff_size_s cn58xx;
struct cvmx_ipd_packet_mbuff_size_s cn58xxp1;
+ struct cvmx_ipd_packet_mbuff_size_s cn63xx;
+ struct cvmx_ipd_packet_mbuff_size_s cn63xxp1;
};
union cvmx_ipd_pkt_ptr_valid {
@@ -496,6 +510,8 @@ union cvmx_ipd_pkt_ptr_valid {
struct cvmx_ipd_pkt_ptr_valid_s cn56xxp1;
struct cvmx_ipd_pkt_ptr_valid_s cn58xx;
struct cvmx_ipd_pkt_ptr_valid_s cn58xxp1;
+ struct cvmx_ipd_pkt_ptr_valid_s cn63xx;
+ struct cvmx_ipd_pkt_ptr_valid_s cn63xxp1;
};
union cvmx_ipd_portx_bp_page_cnt {
@@ -516,6 +532,8 @@ union cvmx_ipd_portx_bp_page_cnt {
struct cvmx_ipd_portx_bp_page_cnt_s cn56xxp1;
struct cvmx_ipd_portx_bp_page_cnt_s cn58xx;
struct cvmx_ipd_portx_bp_page_cnt_s cn58xxp1;
+ struct cvmx_ipd_portx_bp_page_cnt_s cn63xx;
+ struct cvmx_ipd_portx_bp_page_cnt_s cn63xxp1;
};
union cvmx_ipd_portx_bp_page_cnt2 {
@@ -529,6 +547,19 @@ union cvmx_ipd_portx_bp_page_cnt2 {
struct cvmx_ipd_portx_bp_page_cnt2_s cn52xxp1;
struct cvmx_ipd_portx_bp_page_cnt2_s cn56xx;
struct cvmx_ipd_portx_bp_page_cnt2_s cn56xxp1;
+ struct cvmx_ipd_portx_bp_page_cnt2_s cn63xx;
+ struct cvmx_ipd_portx_bp_page_cnt2_s cn63xxp1;
+};
+
+union cvmx_ipd_portx_bp_page_cnt3 {
+ uint64_t u64;
+ struct cvmx_ipd_portx_bp_page_cnt3_s {
+ uint64_t reserved_18_63:46;
+ uint64_t bp_enb:1;
+ uint64_t page_cnt:17;
+ } s;
+ struct cvmx_ipd_portx_bp_page_cnt3_s cn63xx;
+ struct cvmx_ipd_portx_bp_page_cnt3_s cn63xxp1;
};
union cvmx_ipd_port_bp_counters2_pairx {
@@ -541,6 +572,18 @@ union cvmx_ipd_port_bp_counters2_pairx {
struct cvmx_ipd_port_bp_counters2_pairx_s cn52xxp1;
struct cvmx_ipd_port_bp_counters2_pairx_s cn56xx;
struct cvmx_ipd_port_bp_counters2_pairx_s cn56xxp1;
+ struct cvmx_ipd_port_bp_counters2_pairx_s cn63xx;
+ struct cvmx_ipd_port_bp_counters2_pairx_s cn63xxp1;
+};
+
+union cvmx_ipd_port_bp_counters3_pairx {
+ uint64_t u64;
+ struct cvmx_ipd_port_bp_counters3_pairx_s {
+ uint64_t reserved_25_63:39;
+ uint64_t cnt_val:25;
+ } s;
+ struct cvmx_ipd_port_bp_counters3_pairx_s cn63xx;
+ struct cvmx_ipd_port_bp_counters3_pairx_s cn63xxp1;
};
union cvmx_ipd_port_bp_counters_pairx {
@@ -560,6 +603,8 @@ union cvmx_ipd_port_bp_counters_pairx {
struct cvmx_ipd_port_bp_counters_pairx_s cn56xxp1;
struct cvmx_ipd_port_bp_counters_pairx_s cn58xx;
struct cvmx_ipd_port_bp_counters_pairx_s cn58xxp1;
+ struct cvmx_ipd_port_bp_counters_pairx_s cn63xx;
+ struct cvmx_ipd_port_bp_counters_pairx_s cn63xxp1;
};
union cvmx_ipd_port_qos_x_cnt {
@@ -572,6 +617,8 @@ union cvmx_ipd_port_qos_x_cnt {
struct cvmx_ipd_port_qos_x_cnt_s cn52xxp1;
struct cvmx_ipd_port_qos_x_cnt_s cn56xx;
struct cvmx_ipd_port_qos_x_cnt_s cn56xxp1;
+ struct cvmx_ipd_port_qos_x_cnt_s cn63xx;
+ struct cvmx_ipd_port_qos_x_cnt_s cn63xxp1;
};
union cvmx_ipd_port_qos_intx {
@@ -583,6 +630,8 @@ union cvmx_ipd_port_qos_intx {
struct cvmx_ipd_port_qos_intx_s cn52xxp1;
struct cvmx_ipd_port_qos_intx_s cn56xx;
struct cvmx_ipd_port_qos_intx_s cn56xxp1;
+ struct cvmx_ipd_port_qos_intx_s cn63xx;
+ struct cvmx_ipd_port_qos_intx_s cn63xxp1;
};
union cvmx_ipd_port_qos_int_enbx {
@@ -594,6 +643,8 @@ union cvmx_ipd_port_qos_int_enbx {
struct cvmx_ipd_port_qos_int_enbx_s cn52xxp1;
struct cvmx_ipd_port_qos_int_enbx_s cn56xx;
struct cvmx_ipd_port_qos_int_enbx_s cn56xxp1;
+ struct cvmx_ipd_port_qos_int_enbx_s cn63xx;
+ struct cvmx_ipd_port_qos_int_enbx_s cn63xxp1;
};
union cvmx_ipd_prc_hold_ptr_fifo_ctl {
@@ -616,6 +667,8 @@ union cvmx_ipd_prc_hold_ptr_fifo_ctl {
struct cvmx_ipd_prc_hold_ptr_fifo_ctl_s cn56xxp1;
struct cvmx_ipd_prc_hold_ptr_fifo_ctl_s cn58xx;
struct cvmx_ipd_prc_hold_ptr_fifo_ctl_s cn58xxp1;
+ struct cvmx_ipd_prc_hold_ptr_fifo_ctl_s cn63xx;
+ struct cvmx_ipd_prc_hold_ptr_fifo_ctl_s cn63xxp1;
};
union cvmx_ipd_prc_port_ptr_fifo_ctl {
@@ -637,6 +690,8 @@ union cvmx_ipd_prc_port_ptr_fifo_ctl {
struct cvmx_ipd_prc_port_ptr_fifo_ctl_s cn56xxp1;
struct cvmx_ipd_prc_port_ptr_fifo_ctl_s cn58xx;
struct cvmx_ipd_prc_port_ptr_fifo_ctl_s cn58xxp1;
+ struct cvmx_ipd_prc_port_ptr_fifo_ctl_s cn63xx;
+ struct cvmx_ipd_prc_port_ptr_fifo_ctl_s cn63xxp1;
};
union cvmx_ipd_ptr_count {
@@ -660,6 +715,8 @@ union cvmx_ipd_ptr_count {
struct cvmx_ipd_ptr_count_s cn56xxp1;
struct cvmx_ipd_ptr_count_s cn58xx;
struct cvmx_ipd_ptr_count_s cn58xxp1;
+ struct cvmx_ipd_ptr_count_s cn63xx;
+ struct cvmx_ipd_ptr_count_s cn63xxp1;
};
union cvmx_ipd_pwp_ptr_fifo_ctl {
@@ -683,6 +740,8 @@ union cvmx_ipd_pwp_ptr_fifo_ctl {
struct cvmx_ipd_pwp_ptr_fifo_ctl_s cn56xxp1;
struct cvmx_ipd_pwp_ptr_fifo_ctl_s cn58xx;
struct cvmx_ipd_pwp_ptr_fifo_ctl_s cn58xxp1;
+ struct cvmx_ipd_pwp_ptr_fifo_ctl_s cn63xx;
+ struct cvmx_ipd_pwp_ptr_fifo_ctl_s cn63xxp1;
};
union cvmx_ipd_qosx_red_marks {
@@ -702,6 +761,8 @@ union cvmx_ipd_qosx_red_marks {
struct cvmx_ipd_qosx_red_marks_s cn56xxp1;
struct cvmx_ipd_qosx_red_marks_s cn58xx;
struct cvmx_ipd_qosx_red_marks_s cn58xxp1;
+ struct cvmx_ipd_qosx_red_marks_s cn63xx;
+ struct cvmx_ipd_qosx_red_marks_s cn63xxp1;
};
union cvmx_ipd_que0_free_page_cnt {
@@ -721,6 +782,8 @@ union cvmx_ipd_que0_free_page_cnt {
struct cvmx_ipd_que0_free_page_cnt_s cn56xxp1;
struct cvmx_ipd_que0_free_page_cnt_s cn58xx;
struct cvmx_ipd_que0_free_page_cnt_s cn58xxp1;
+ struct cvmx_ipd_que0_free_page_cnt_s cn63xx;
+ struct cvmx_ipd_que0_free_page_cnt_s cn63xxp1;
};
union cvmx_ipd_red_port_enable {
@@ -741,18 +804,25 @@ union cvmx_ipd_red_port_enable {
struct cvmx_ipd_red_port_enable_s cn56xxp1;
struct cvmx_ipd_red_port_enable_s cn58xx;
struct cvmx_ipd_red_port_enable_s cn58xxp1;
+ struct cvmx_ipd_red_port_enable_s cn63xx;
+ struct cvmx_ipd_red_port_enable_s cn63xxp1;
};
union cvmx_ipd_red_port_enable2 {
uint64_t u64;
struct cvmx_ipd_red_port_enable2_s {
+ uint64_t reserved_8_63:56;
+ uint64_t prt_enb:8;
+ } s;
+ struct cvmx_ipd_red_port_enable2_cn52xx {
uint64_t reserved_4_63:60;
uint64_t prt_enb:4;
- } s;
- struct cvmx_ipd_red_port_enable2_s cn52xx;
- struct cvmx_ipd_red_port_enable2_s cn52xxp1;
- struct cvmx_ipd_red_port_enable2_s cn56xx;
- struct cvmx_ipd_red_port_enable2_s cn56xxp1;
+ } cn52xx;
+ struct cvmx_ipd_red_port_enable2_cn52xx cn52xxp1;
+ struct cvmx_ipd_red_port_enable2_cn52xx cn56xx;
+ struct cvmx_ipd_red_port_enable2_cn52xx cn56xxp1;
+ struct cvmx_ipd_red_port_enable2_s cn63xx;
+ struct cvmx_ipd_red_port_enable2_s cn63xxp1;
};
union cvmx_ipd_red_quex_param {
@@ -775,6 +845,8 @@ union cvmx_ipd_red_quex_param {
struct cvmx_ipd_red_quex_param_s cn56xxp1;
struct cvmx_ipd_red_quex_param_s cn58xx;
struct cvmx_ipd_red_quex_param_s cn58xxp1;
+ struct cvmx_ipd_red_quex_param_s cn63xx;
+ struct cvmx_ipd_red_quex_param_s cn63xxp1;
};
union cvmx_ipd_sub_port_bp_page_cnt {
@@ -795,6 +867,8 @@ union cvmx_ipd_sub_port_bp_page_cnt {
struct cvmx_ipd_sub_port_bp_page_cnt_s cn56xxp1;
struct cvmx_ipd_sub_port_bp_page_cnt_s cn58xx;
struct cvmx_ipd_sub_port_bp_page_cnt_s cn58xxp1;
+ struct cvmx_ipd_sub_port_bp_page_cnt_s cn63xx;
+ struct cvmx_ipd_sub_port_bp_page_cnt_s cn63xxp1;
};
union cvmx_ipd_sub_port_fcs {
@@ -822,6 +896,8 @@ union cvmx_ipd_sub_port_fcs {
struct cvmx_ipd_sub_port_fcs_s cn56xxp1;
struct cvmx_ipd_sub_port_fcs_cn38xx cn58xx;
struct cvmx_ipd_sub_port_fcs_cn38xx cn58xxp1;
+ struct cvmx_ipd_sub_port_fcs_s cn63xx;
+ struct cvmx_ipd_sub_port_fcs_s cn63xxp1;
};
union cvmx_ipd_sub_port_qos_cnt {
@@ -835,6 +911,8 @@ union cvmx_ipd_sub_port_qos_cnt {
struct cvmx_ipd_sub_port_qos_cnt_s cn52xxp1;
struct cvmx_ipd_sub_port_qos_cnt_s cn56xx;
struct cvmx_ipd_sub_port_qos_cnt_s cn56xxp1;
+ struct cvmx_ipd_sub_port_qos_cnt_s cn63xx;
+ struct cvmx_ipd_sub_port_qos_cnt_s cn63xxp1;
};
union cvmx_ipd_wqe_fpa_queue {
@@ -854,6 +932,8 @@ union cvmx_ipd_wqe_fpa_queue {
struct cvmx_ipd_wqe_fpa_queue_s cn56xxp1;
struct cvmx_ipd_wqe_fpa_queue_s cn58xx;
struct cvmx_ipd_wqe_fpa_queue_s cn58xxp1;
+ struct cvmx_ipd_wqe_fpa_queue_s cn63xx;
+ struct cvmx_ipd_wqe_fpa_queue_s cn63xxp1;
};
union cvmx_ipd_wqe_ptr_valid {
@@ -872,6 +952,8 @@ union cvmx_ipd_wqe_ptr_valid {
struct cvmx_ipd_wqe_ptr_valid_s cn56xxp1;
struct cvmx_ipd_wqe_ptr_valid_s cn58xx;
struct cvmx_ipd_wqe_ptr_valid_s cn58xxp1;
+ struct cvmx_ipd_wqe_ptr_valid_s cn63xx;
+ struct cvmx_ipd_wqe_ptr_valid_s cn63xxp1;
};
#endif
diff --git a/arch/mips/include/asm/octeon/cvmx-l2c-defs.h b/arch/mips/include/asm/octeon/cvmx-l2c-defs.h
index 337583842b51..7a50a0beb472 100644
--- a/arch/mips/include/asm/octeon/cvmx-l2c-defs.h
+++ b/arch/mips/include/asm/octeon/cvmx-l2c-defs.h
@@ -4,7 +4,7 @@
* Contact: support@caviumnetworks.com
* This file is part of the OCTEON SDK
*
- * Copyright (c) 2003-2008 Cavium Networks
+ * Copyright (c) 2003-2010 Cavium Networks
*
* This file is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License, Version 2, as
@@ -28,70 +28,113 @@
#ifndef __CVMX_L2C_DEFS_H__
#define __CVMX_L2C_DEFS_H__
-#define CVMX_L2C_BST0 \
- CVMX_ADD_IO_SEG(0x00011800800007F8ull)
-#define CVMX_L2C_BST1 \
- CVMX_ADD_IO_SEG(0x00011800800007F0ull)
-#define CVMX_L2C_BST2 \
- CVMX_ADD_IO_SEG(0x00011800800007E8ull)
-#define CVMX_L2C_CFG \
- CVMX_ADD_IO_SEG(0x0001180080000000ull)
-#define CVMX_L2C_DBG \
- CVMX_ADD_IO_SEG(0x0001180080000030ull)
-#define CVMX_L2C_DUT \
- CVMX_ADD_IO_SEG(0x0001180080000050ull)
-#define CVMX_L2C_GRPWRR0 \
- CVMX_ADD_IO_SEG(0x00011800800000C8ull)
-#define CVMX_L2C_GRPWRR1 \
- CVMX_ADD_IO_SEG(0x00011800800000D0ull)
-#define CVMX_L2C_INT_EN \
- CVMX_ADD_IO_SEG(0x0001180080000100ull)
-#define CVMX_L2C_INT_STAT \
- CVMX_ADD_IO_SEG(0x00011800800000F8ull)
-#define CVMX_L2C_LCKBASE \
- CVMX_ADD_IO_SEG(0x0001180080000058ull)
-#define CVMX_L2C_LCKOFF \
- CVMX_ADD_IO_SEG(0x0001180080000060ull)
-#define CVMX_L2C_LFB0 \
- CVMX_ADD_IO_SEG(0x0001180080000038ull)
-#define CVMX_L2C_LFB1 \
- CVMX_ADD_IO_SEG(0x0001180080000040ull)
-#define CVMX_L2C_LFB2 \
- CVMX_ADD_IO_SEG(0x0001180080000048ull)
-#define CVMX_L2C_LFB3 \
- CVMX_ADD_IO_SEG(0x00011800800000B8ull)
-#define CVMX_L2C_OOB \
- CVMX_ADD_IO_SEG(0x00011800800000D8ull)
-#define CVMX_L2C_OOB1 \
- CVMX_ADD_IO_SEG(0x00011800800000E0ull)
-#define CVMX_L2C_OOB2 \
- CVMX_ADD_IO_SEG(0x00011800800000E8ull)
-#define CVMX_L2C_OOB3 \
- CVMX_ADD_IO_SEG(0x00011800800000F0ull)
-#define CVMX_L2C_PFC0 \
- CVMX_ADD_IO_SEG(0x0001180080000098ull)
-#define CVMX_L2C_PFC1 \
- CVMX_ADD_IO_SEG(0x00011800800000A0ull)
-#define CVMX_L2C_PFC2 \
- CVMX_ADD_IO_SEG(0x00011800800000A8ull)
-#define CVMX_L2C_PFC3 \
- CVMX_ADD_IO_SEG(0x00011800800000B0ull)
-#define CVMX_L2C_PFCTL \
- CVMX_ADD_IO_SEG(0x0001180080000090ull)
-#define CVMX_L2C_PFCX(offset) \
- CVMX_ADD_IO_SEG(0x0001180080000098ull + (((offset) & 3) * 8))
-#define CVMX_L2C_PPGRP \
- CVMX_ADD_IO_SEG(0x00011800800000C0ull)
-#define CVMX_L2C_SPAR0 \
- CVMX_ADD_IO_SEG(0x0001180080000068ull)
-#define CVMX_L2C_SPAR1 \
- CVMX_ADD_IO_SEG(0x0001180080000070ull)
-#define CVMX_L2C_SPAR2 \
- CVMX_ADD_IO_SEG(0x0001180080000078ull)
-#define CVMX_L2C_SPAR3 \
- CVMX_ADD_IO_SEG(0x0001180080000080ull)
-#define CVMX_L2C_SPAR4 \
- CVMX_ADD_IO_SEG(0x0001180080000088ull)
+#define CVMX_L2C_BIG_CTL (CVMX_ADD_IO_SEG(0x0001180080800030ull))
+#define CVMX_L2C_BST (CVMX_ADD_IO_SEG(0x00011800808007F8ull))
+#define CVMX_L2C_BST0 (CVMX_ADD_IO_SEG(0x00011800800007F8ull))
+#define CVMX_L2C_BST1 (CVMX_ADD_IO_SEG(0x00011800800007F0ull))
+#define CVMX_L2C_BST2 (CVMX_ADD_IO_SEG(0x00011800800007E8ull))
+#define CVMX_L2C_BST_MEMX(block_id) (CVMX_ADD_IO_SEG(0x0001180080C007F8ull))
+#define CVMX_L2C_BST_TDTX(block_id) (CVMX_ADD_IO_SEG(0x0001180080A007F0ull))
+#define CVMX_L2C_BST_TTGX(block_id) (CVMX_ADD_IO_SEG(0x0001180080A007F8ull))
+#define CVMX_L2C_CFG (CVMX_ADD_IO_SEG(0x0001180080000000ull))
+#define CVMX_L2C_COP0_MAPX(offset) (CVMX_ADD_IO_SEG(0x0001180080940000ull) + ((offset) & 16383) * 8)
+#define CVMX_L2C_CTL (CVMX_ADD_IO_SEG(0x0001180080800000ull))
+#define CVMX_L2C_DBG (CVMX_ADD_IO_SEG(0x0001180080000030ull))
+#define CVMX_L2C_DUT (CVMX_ADD_IO_SEG(0x0001180080000050ull))
+#define CVMX_L2C_DUT_MAPX(offset) (CVMX_ADD_IO_SEG(0x0001180080E00000ull) + ((offset) & 2047) * 8)
+#define CVMX_L2C_ERR_TDTX(block_id) (CVMX_ADD_IO_SEG(0x0001180080A007E0ull))
+#define CVMX_L2C_ERR_TTGX(block_id) (CVMX_ADD_IO_SEG(0x0001180080A007E8ull))
+#define CVMX_L2C_ERR_VBFX(block_id) (CVMX_ADD_IO_SEG(0x0001180080C007F0ull))
+#define CVMX_L2C_ERR_XMC (CVMX_ADD_IO_SEG(0x00011800808007D8ull))
+#define CVMX_L2C_GRPWRR0 (CVMX_ADD_IO_SEG(0x00011800800000C8ull))
+#define CVMX_L2C_GRPWRR1 (CVMX_ADD_IO_SEG(0x00011800800000D0ull))
+#define CVMX_L2C_INT_EN (CVMX_ADD_IO_SEG(0x0001180080000100ull))
+#define CVMX_L2C_INT_ENA (CVMX_ADD_IO_SEG(0x0001180080800020ull))
+#define CVMX_L2C_INT_REG (CVMX_ADD_IO_SEG(0x0001180080800018ull))
+#define CVMX_L2C_INT_STAT (CVMX_ADD_IO_SEG(0x00011800800000F8ull))
+#define CVMX_L2C_IOCX_PFC(block_id) (CVMX_ADD_IO_SEG(0x0001180080800420ull))
+#define CVMX_L2C_IORX_PFC(block_id) (CVMX_ADD_IO_SEG(0x0001180080800428ull))
+#define CVMX_L2C_LCKBASE (CVMX_ADD_IO_SEG(0x0001180080000058ull))
+#define CVMX_L2C_LCKOFF (CVMX_ADD_IO_SEG(0x0001180080000060ull))
+#define CVMX_L2C_LFB0 (CVMX_ADD_IO_SEG(0x0001180080000038ull))
+#define CVMX_L2C_LFB1 (CVMX_ADD_IO_SEG(0x0001180080000040ull))
+#define CVMX_L2C_LFB2 (CVMX_ADD_IO_SEG(0x0001180080000048ull))
+#define CVMX_L2C_LFB3 (CVMX_ADD_IO_SEG(0x00011800800000B8ull))
+#define CVMX_L2C_OOB (CVMX_ADD_IO_SEG(0x00011800800000D8ull))
+#define CVMX_L2C_OOB1 (CVMX_ADD_IO_SEG(0x00011800800000E0ull))
+#define CVMX_L2C_OOB2 (CVMX_ADD_IO_SEG(0x00011800800000E8ull))
+#define CVMX_L2C_OOB3 (CVMX_ADD_IO_SEG(0x00011800800000F0ull))
+#define CVMX_L2C_PFC0 CVMX_L2C_PFCX(0)
+#define CVMX_L2C_PFC1 CVMX_L2C_PFCX(1)
+#define CVMX_L2C_PFC2 CVMX_L2C_PFCX(2)
+#define CVMX_L2C_PFC3 CVMX_L2C_PFCX(3)
+#define CVMX_L2C_PFCTL (CVMX_ADD_IO_SEG(0x0001180080000090ull))
+#define CVMX_L2C_PFCX(offset) (CVMX_ADD_IO_SEG(0x0001180080000098ull) + ((offset) & 3) * 8)
+#define CVMX_L2C_PPGRP (CVMX_ADD_IO_SEG(0x00011800800000C0ull))
+#define CVMX_L2C_QOS_IOBX(block_id) (CVMX_ADD_IO_SEG(0x0001180080880200ull))
+#define CVMX_L2C_QOS_PPX(offset) (CVMX_ADD_IO_SEG(0x0001180080880000ull) + ((offset) & 7) * 8)
+#define CVMX_L2C_QOS_WGT (CVMX_ADD_IO_SEG(0x0001180080800008ull))
+#define CVMX_L2C_RSCX_PFC(block_id) (CVMX_ADD_IO_SEG(0x0001180080800410ull))
+#define CVMX_L2C_RSDX_PFC(block_id) (CVMX_ADD_IO_SEG(0x0001180080800418ull))
+#define CVMX_L2C_SPAR0 (CVMX_ADD_IO_SEG(0x0001180080000068ull))
+#define CVMX_L2C_SPAR1 (CVMX_ADD_IO_SEG(0x0001180080000070ull))
+#define CVMX_L2C_SPAR2 (CVMX_ADD_IO_SEG(0x0001180080000078ull))
+#define CVMX_L2C_SPAR3 (CVMX_ADD_IO_SEG(0x0001180080000080ull))
+#define CVMX_L2C_SPAR4 (CVMX_ADD_IO_SEG(0x0001180080000088ull))
+#define CVMX_L2C_TADX_ECC0(block_id) (CVMX_ADD_IO_SEG(0x0001180080A00018ull))
+#define CVMX_L2C_TADX_ECC1(block_id) (CVMX_ADD_IO_SEG(0x0001180080A00020ull))
+#define CVMX_L2C_TADX_IEN(block_id) (CVMX_ADD_IO_SEG(0x0001180080A00000ull))
+#define CVMX_L2C_TADX_INT(block_id) (CVMX_ADD_IO_SEG(0x0001180080A00028ull))
+#define CVMX_L2C_TADX_PFC0(block_id) (CVMX_ADD_IO_SEG(0x0001180080A00400ull))
+#define CVMX_L2C_TADX_PFC1(block_id) (CVMX_ADD_IO_SEG(0x0001180080A00408ull))
+#define CVMX_L2C_TADX_PFC2(block_id) (CVMX_ADD_IO_SEG(0x0001180080A00410ull))
+#define CVMX_L2C_TADX_PFC3(block_id) (CVMX_ADD_IO_SEG(0x0001180080A00418ull))
+#define CVMX_L2C_TADX_PRF(block_id) (CVMX_ADD_IO_SEG(0x0001180080A00008ull))
+#define CVMX_L2C_TADX_TAG(block_id) (CVMX_ADD_IO_SEG(0x0001180080A00010ull))
+#define CVMX_L2C_VER_ID (CVMX_ADD_IO_SEG(0x00011800808007E0ull))
+#define CVMX_L2C_VER_IOB (CVMX_ADD_IO_SEG(0x00011800808007F0ull))
+#define CVMX_L2C_VER_MSC (CVMX_ADD_IO_SEG(0x00011800808007D0ull))
+#define CVMX_L2C_VER_PP (CVMX_ADD_IO_SEG(0x00011800808007E8ull))
+#define CVMX_L2C_VIRTID_IOBX(block_id) (CVMX_ADD_IO_SEG(0x00011800808C0200ull))
+#define CVMX_L2C_VIRTID_PPX(offset) (CVMX_ADD_IO_SEG(0x00011800808C0000ull) + ((offset) & 7) * 8)
+#define CVMX_L2C_VRT_CTL (CVMX_ADD_IO_SEG(0x0001180080800010ull))
+#define CVMX_L2C_VRT_MEMX(offset) (CVMX_ADD_IO_SEG(0x0001180080900000ull) + ((offset) & 1023) * 8)
+#define CVMX_L2C_WPAR_IOBX(block_id) (CVMX_ADD_IO_SEG(0x0001180080840200ull))
+#define CVMX_L2C_WPAR_PPX(offset) (CVMX_ADD_IO_SEG(0x0001180080840000ull) + ((offset) & 7) * 8)
+#define CVMX_L2C_XMCX_PFC(block_id) (CVMX_ADD_IO_SEG(0x0001180080800400ull))
+#define CVMX_L2C_XMC_CMD (CVMX_ADD_IO_SEG(0x0001180080800028ull))
+#define CVMX_L2C_XMDX_PFC(block_id) (CVMX_ADD_IO_SEG(0x0001180080800408ull))
+
+union cvmx_l2c_big_ctl {
+ uint64_t u64;
+ struct cvmx_l2c_big_ctl_s {
+ uint64_t reserved_8_63:56;
+ uint64_t maxdram:4;
+ uint64_t reserved_1_3:3;
+ uint64_t disable:1;
+ } s;
+ struct cvmx_l2c_big_ctl_s cn63xx;
+};
+
+union cvmx_l2c_bst {
+ uint64_t u64;
+ struct cvmx_l2c_bst_s {
+ uint64_t reserved_38_63:26;
+ uint64_t dutfl:6;
+ uint64_t reserved_17_31:15;
+ uint64_t ioccmdfl:1;
+ uint64_t reserved_13_15:3;
+ uint64_t iocdatfl:1;
+ uint64_t reserved_9_11:3;
+ uint64_t dutresfl:1;
+ uint64_t reserved_5_7:3;
+ uint64_t vrtfl:1;
+ uint64_t reserved_1_3:3;
+ uint64_t tdffl:1;
+ } s;
+ struct cvmx_l2c_bst_s cn63xx;
+ struct cvmx_l2c_bst_s cn63xxp1;
+};
union cvmx_l2c_bst0 {
uint64_t u64;
@@ -253,6 +296,48 @@ union cvmx_l2c_bst2 {
struct cvmx_l2c_bst2_cn56xx cn58xxp1;
};
+union cvmx_l2c_bst_memx {
+ uint64_t u64;
+ struct cvmx_l2c_bst_memx_s {
+ uint64_t start_bist:1;
+ uint64_t clear_bist:1;
+ uint64_t reserved_5_61:57;
+ uint64_t rdffl:1;
+ uint64_t vbffl:4;
+ } s;
+ struct cvmx_l2c_bst_memx_s cn63xx;
+ struct cvmx_l2c_bst_memx_s cn63xxp1;
+};
+
+union cvmx_l2c_bst_tdtx {
+ uint64_t u64;
+ struct cvmx_l2c_bst_tdtx_s {
+ uint64_t reserved_32_63:32;
+ uint64_t fbfrspfl:8;
+ uint64_t sbffl:8;
+ uint64_t fbffl:8;
+ uint64_t l2dfl:8;
+ } s;
+ struct cvmx_l2c_bst_tdtx_s cn63xx;
+ struct cvmx_l2c_bst_tdtx_cn63xxp1 {
+ uint64_t reserved_24_63:40;
+ uint64_t sbffl:8;
+ uint64_t fbffl:8;
+ uint64_t l2dfl:8;
+ } cn63xxp1;
+};
+
+union cvmx_l2c_bst_ttgx {
+ uint64_t u64;
+ struct cvmx_l2c_bst_ttgx_s {
+ uint64_t reserved_17_63:47;
+ uint64_t lrufl:1;
+ uint64_t tagfl:16;
+ } s;
+ struct cvmx_l2c_bst_ttgx_s cn63xx;
+ struct cvmx_l2c_bst_ttgx_s cn63xxp1;
+};
+
union cvmx_l2c_cfg {
uint64_t u64;
struct cvmx_l2c_cfg_s {
@@ -333,6 +418,49 @@ union cvmx_l2c_cfg {
} cn58xxp1;
};
+union cvmx_l2c_cop0_mapx {
+ uint64_t u64;
+ struct cvmx_l2c_cop0_mapx_s {
+ uint64_t data:64;
+ } s;
+ struct cvmx_l2c_cop0_mapx_s cn63xx;
+ struct cvmx_l2c_cop0_mapx_s cn63xxp1;
+};
+
+union cvmx_l2c_ctl {
+ uint64_t u64;
+ struct cvmx_l2c_ctl_s {
+ uint64_t reserved_28_63:36;
+ uint64_t disstgl2i:1;
+ uint64_t l2dfsbe:1;
+ uint64_t l2dfdbe:1;
+ uint64_t discclk:1;
+ uint64_t maxvab:4;
+ uint64_t maxlfb:4;
+ uint64_t rsp_arb_mode:1;
+ uint64_t xmc_arb_mode:1;
+ uint64_t ef_ena:1;
+ uint64_t ef_cnt:7;
+ uint64_t vab_thresh:4;
+ uint64_t disecc:1;
+ uint64_t disidxalias:1;
+ } s;
+ struct cvmx_l2c_ctl_s cn63xx;
+ struct cvmx_l2c_ctl_cn63xxp1 {
+ uint64_t reserved_25_63:39;
+ uint64_t discclk:1;
+ uint64_t maxvab:4;
+ uint64_t maxlfb:4;
+ uint64_t rsp_arb_mode:1;
+ uint64_t xmc_arb_mode:1;
+ uint64_t ef_ena:1;
+ uint64_t ef_cnt:7;
+ uint64_t vab_thresh:4;
+ uint64_t disecc:1;
+ uint64_t disidxalias:1;
+ } cn63xxp1;
+};
+
union cvmx_l2c_dbg {
uint64_t u64;
struct cvmx_l2c_dbg_s {
@@ -349,7 +477,9 @@ union cvmx_l2c_dbg {
uint64_t reserved_13_63:51;
uint64_t lfb_enum:2;
uint64_t lfb_dmp:1;
- uint64_t reserved_5_9:5;
+ uint64_t reserved_7_9:3;
+ uint64_t ppnum:1;
+ uint64_t reserved_5_5:1;
uint64_t set:2;
uint64_t finv:1;
uint64_t l2d:1;
@@ -420,6 +550,79 @@ union cvmx_l2c_dut {
struct cvmx_l2c_dut_s cn58xxp1;
};
+union cvmx_l2c_dut_mapx {
+ uint64_t u64;
+ struct cvmx_l2c_dut_mapx_s {
+ uint64_t reserved_38_63:26;
+ uint64_t tag:28;
+ uint64_t reserved_1_9:9;
+ uint64_t valid:1;
+ } s;
+ struct cvmx_l2c_dut_mapx_s cn63xx;
+ struct cvmx_l2c_dut_mapx_s cn63xxp1;
+};
+
+union cvmx_l2c_err_tdtx {
+ uint64_t u64;
+ struct cvmx_l2c_err_tdtx_s {
+ uint64_t dbe:1;
+ uint64_t sbe:1;
+ uint64_t vdbe:1;
+ uint64_t vsbe:1;
+ uint64_t syn:10;
+ uint64_t reserved_21_49:29;
+ uint64_t wayidx:17;
+ uint64_t reserved_2_3:2;
+ uint64_t type:2;
+ } s;
+ struct cvmx_l2c_err_tdtx_s cn63xx;
+ struct cvmx_l2c_err_tdtx_s cn63xxp1;
+};
+
+union cvmx_l2c_err_ttgx {
+ uint64_t u64;
+ struct cvmx_l2c_err_ttgx_s {
+ uint64_t dbe:1;
+ uint64_t sbe:1;
+ uint64_t noway:1;
+ uint64_t reserved_56_60:5;
+ uint64_t syn:6;
+ uint64_t reserved_21_49:29;
+ uint64_t wayidx:14;
+ uint64_t reserved_2_6:5;
+ uint64_t type:2;
+ } s;
+ struct cvmx_l2c_err_ttgx_s cn63xx;
+ struct cvmx_l2c_err_ttgx_s cn63xxp1;
+};
+
+union cvmx_l2c_err_vbfx {
+ uint64_t u64;
+ struct cvmx_l2c_err_vbfx_s {
+ uint64_t reserved_62_63:2;
+ uint64_t vdbe:1;
+ uint64_t vsbe:1;
+ uint64_t vsyn:10;
+ uint64_t reserved_2_49:48;
+ uint64_t type:2;
+ } s;
+ struct cvmx_l2c_err_vbfx_s cn63xx;
+ struct cvmx_l2c_err_vbfx_s cn63xxp1;
+};
+
+union cvmx_l2c_err_xmc {
+ uint64_t u64;
+ struct cvmx_l2c_err_xmc_s {
+ uint64_t cmd:6;
+ uint64_t reserved_52_57:6;
+ uint64_t sid:4;
+ uint64_t reserved_38_47:10;
+ uint64_t addr:38;
+ } s;
+ struct cvmx_l2c_err_xmc_s cn63xx;
+ struct cvmx_l2c_err_xmc_s cn63xxp1;
+};
+
union cvmx_l2c_grpwrr0 {
uint64_t u64;
struct cvmx_l2c_grpwrr0_s {
@@ -464,6 +667,60 @@ union cvmx_l2c_int_en {
struct cvmx_l2c_int_en_s cn56xxp1;
};
+union cvmx_l2c_int_ena {
+ uint64_t u64;
+ struct cvmx_l2c_int_ena_s {
+ uint64_t reserved_8_63:56;
+ uint64_t bigrd:1;
+ uint64_t bigwr:1;
+ uint64_t vrtpe:1;
+ uint64_t vrtadrng:1;
+ uint64_t vrtidrng:1;
+ uint64_t vrtwr:1;
+ uint64_t holewr:1;
+ uint64_t holerd:1;
+ } s;
+ struct cvmx_l2c_int_ena_s cn63xx;
+ struct cvmx_l2c_int_ena_cn63xxp1 {
+ uint64_t reserved_6_63:58;
+ uint64_t vrtpe:1;
+ uint64_t vrtadrng:1;
+ uint64_t vrtidrng:1;
+ uint64_t vrtwr:1;
+ uint64_t holewr:1;
+ uint64_t holerd:1;
+ } cn63xxp1;
+};
+
+union cvmx_l2c_int_reg {
+ uint64_t u64;
+ struct cvmx_l2c_int_reg_s {
+ uint64_t reserved_17_63:47;
+ uint64_t tad0:1;
+ uint64_t reserved_8_15:8;
+ uint64_t bigrd:1;
+ uint64_t bigwr:1;
+ uint64_t vrtpe:1;
+ uint64_t vrtadrng:1;
+ uint64_t vrtidrng:1;
+ uint64_t vrtwr:1;
+ uint64_t holewr:1;
+ uint64_t holerd:1;
+ } s;
+ struct cvmx_l2c_int_reg_s cn63xx;
+ struct cvmx_l2c_int_reg_cn63xxp1 {
+ uint64_t reserved_17_63:47;
+ uint64_t tad0:1;
+ uint64_t reserved_6_15:10;
+ uint64_t vrtpe:1;
+ uint64_t vrtadrng:1;
+ uint64_t vrtidrng:1;
+ uint64_t vrtwr:1;
+ uint64_t holewr:1;
+ uint64_t holerd:1;
+ } cn63xxp1;
+};
+
union cvmx_l2c_int_stat {
uint64_t u64;
struct cvmx_l2c_int_stat_s {
@@ -484,6 +741,24 @@ union cvmx_l2c_int_stat {
struct cvmx_l2c_int_stat_s cn56xxp1;
};
+union cvmx_l2c_iocx_pfc {
+ uint64_t u64;
+ struct cvmx_l2c_iocx_pfc_s {
+ uint64_t count:64;
+ } s;
+ struct cvmx_l2c_iocx_pfc_s cn63xx;
+ struct cvmx_l2c_iocx_pfc_s cn63xxp1;
+};
+
+union cvmx_l2c_iorx_pfc {
+ uint64_t u64;
+ struct cvmx_l2c_iorx_pfc_s {
+ uint64_t count:64;
+ } s;
+ struct cvmx_l2c_iorx_pfc_s cn63xx;
+ struct cvmx_l2c_iorx_pfc_s cn63xxp1;
+};
+
union cvmx_l2c_lckbase {
uint64_t u64;
struct cvmx_l2c_lckbase_s {
@@ -855,6 +1130,59 @@ union cvmx_l2c_ppgrp {
struct cvmx_l2c_ppgrp_s cn56xxp1;
};
+union cvmx_l2c_qos_iobx {
+ uint64_t u64;
+ struct cvmx_l2c_qos_iobx_s {
+ uint64_t reserved_6_63:58;
+ uint64_t dwblvl:2;
+ uint64_t reserved_2_3:2;
+ uint64_t lvl:2;
+ } s;
+ struct cvmx_l2c_qos_iobx_s cn63xx;
+ struct cvmx_l2c_qos_iobx_s cn63xxp1;
+};
+
+union cvmx_l2c_qos_ppx {
+ uint64_t u64;
+ struct cvmx_l2c_qos_ppx_s {
+ uint64_t reserved_2_63:62;
+ uint64_t lvl:2;
+ } s;
+ struct cvmx_l2c_qos_ppx_s cn63xx;
+ struct cvmx_l2c_qos_ppx_s cn63xxp1;
+};
+
+union cvmx_l2c_qos_wgt {
+ uint64_t u64;
+ struct cvmx_l2c_qos_wgt_s {
+ uint64_t reserved_32_63:32;
+ uint64_t wgt3:8;
+ uint64_t wgt2:8;
+ uint64_t wgt1:8;
+ uint64_t wgt0:8;
+ } s;
+ struct cvmx_l2c_qos_wgt_s cn63xx;
+ struct cvmx_l2c_qos_wgt_s cn63xxp1;
+};
+
+union cvmx_l2c_rscx_pfc {
+ uint64_t u64;
+ struct cvmx_l2c_rscx_pfc_s {
+ uint64_t count:64;
+ } s;
+ struct cvmx_l2c_rscx_pfc_s cn63xx;
+ struct cvmx_l2c_rscx_pfc_s cn63xxp1;
+};
+
+union cvmx_l2c_rsdx_pfc {
+ uint64_t u64;
+ struct cvmx_l2c_rsdx_pfc_s {
+ uint64_t count:64;
+ } s;
+ struct cvmx_l2c_rsdx_pfc_s cn63xx;
+ struct cvmx_l2c_rsdx_pfc_s cn63xxp1;
+};
+
union cvmx_l2c_spar0 {
uint64_t u64;
struct cvmx_l2c_spar0_s {
@@ -960,4 +1288,282 @@ union cvmx_l2c_spar4 {
struct cvmx_l2c_spar4_s cn58xxp1;
};
+union cvmx_l2c_tadx_ecc0 {
+ uint64_t u64;
+ struct cvmx_l2c_tadx_ecc0_s {
+ uint64_t reserved_58_63:6;
+ uint64_t ow3ecc:10;
+ uint64_t reserved_42_47:6;
+ uint64_t ow2ecc:10;
+ uint64_t reserved_26_31:6;
+ uint64_t ow1ecc:10;
+ uint64_t reserved_10_15:6;
+ uint64_t ow0ecc:10;
+ } s;
+ struct cvmx_l2c_tadx_ecc0_s cn63xx;
+ struct cvmx_l2c_tadx_ecc0_s cn63xxp1;
+};
+
+union cvmx_l2c_tadx_ecc1 {
+ uint64_t u64;
+ struct cvmx_l2c_tadx_ecc1_s {
+ uint64_t reserved_58_63:6;
+ uint64_t ow7ecc:10;
+ uint64_t reserved_42_47:6;
+ uint64_t ow6ecc:10;
+ uint64_t reserved_26_31:6;
+ uint64_t ow5ecc:10;
+ uint64_t reserved_10_15:6;
+ uint64_t ow4ecc:10;
+ } s;
+ struct cvmx_l2c_tadx_ecc1_s cn63xx;
+ struct cvmx_l2c_tadx_ecc1_s cn63xxp1;
+};
+
+union cvmx_l2c_tadx_ien {
+ uint64_t u64;
+ struct cvmx_l2c_tadx_ien_s {
+ uint64_t reserved_9_63:55;
+ uint64_t wrdislmc:1;
+ uint64_t rddislmc:1;
+ uint64_t noway:1;
+ uint64_t vbfdbe:1;
+ uint64_t vbfsbe:1;
+ uint64_t tagdbe:1;
+ uint64_t tagsbe:1;
+ uint64_t l2ddbe:1;
+ uint64_t l2dsbe:1;
+ } s;
+ struct cvmx_l2c_tadx_ien_s cn63xx;
+ struct cvmx_l2c_tadx_ien_cn63xxp1 {
+ uint64_t reserved_7_63:57;
+ uint64_t noway:1;
+ uint64_t vbfdbe:1;
+ uint64_t vbfsbe:1;
+ uint64_t tagdbe:1;
+ uint64_t tagsbe:1;
+ uint64_t l2ddbe:1;
+ uint64_t l2dsbe:1;
+ } cn63xxp1;
+};
+
+union cvmx_l2c_tadx_int {
+ uint64_t u64;
+ struct cvmx_l2c_tadx_int_s {
+ uint64_t reserved_9_63:55;
+ uint64_t wrdislmc:1;
+ uint64_t rddislmc:1;
+ uint64_t noway:1;
+ uint64_t vbfdbe:1;
+ uint64_t vbfsbe:1;
+ uint64_t tagdbe:1;
+ uint64_t tagsbe:1;
+ uint64_t l2ddbe:1;
+ uint64_t l2dsbe:1;
+ } s;
+ struct cvmx_l2c_tadx_int_s cn63xx;
+};
+
+union cvmx_l2c_tadx_pfc0 {
+ uint64_t u64;
+ struct cvmx_l2c_tadx_pfc0_s {
+ uint64_t count:64;
+ } s;
+ struct cvmx_l2c_tadx_pfc0_s cn63xx;
+ struct cvmx_l2c_tadx_pfc0_s cn63xxp1;
+};
+
+union cvmx_l2c_tadx_pfc1 {
+ uint64_t u64;
+ struct cvmx_l2c_tadx_pfc1_s {
+ uint64_t count:64;
+ } s;
+ struct cvmx_l2c_tadx_pfc1_s cn63xx;
+ struct cvmx_l2c_tadx_pfc1_s cn63xxp1;
+};
+
+union cvmx_l2c_tadx_pfc2 {
+ uint64_t u64;
+ struct cvmx_l2c_tadx_pfc2_s {
+ uint64_t count:64;
+ } s;
+ struct cvmx_l2c_tadx_pfc2_s cn63xx;
+ struct cvmx_l2c_tadx_pfc2_s cn63xxp1;
+};
+
+union cvmx_l2c_tadx_pfc3 {
+ uint64_t u64;
+ struct cvmx_l2c_tadx_pfc3_s {
+ uint64_t count:64;
+ } s;
+ struct cvmx_l2c_tadx_pfc3_s cn63xx;
+ struct cvmx_l2c_tadx_pfc3_s cn63xxp1;
+};
+
+union cvmx_l2c_tadx_prf {
+ uint64_t u64;
+ struct cvmx_l2c_tadx_prf_s {
+ uint64_t reserved_32_63:32;
+ uint64_t cnt3sel:8;
+ uint64_t cnt2sel:8;
+ uint64_t cnt1sel:8;
+ uint64_t cnt0sel:8;
+ } s;
+ struct cvmx_l2c_tadx_prf_s cn63xx;
+ struct cvmx_l2c_tadx_prf_s cn63xxp1;
+};
+
+union cvmx_l2c_tadx_tag {
+ uint64_t u64;
+ struct cvmx_l2c_tadx_tag_s {
+ uint64_t reserved_46_63:18;
+ uint64_t ecc:6;
+ uint64_t reserved_36_39:4;
+ uint64_t tag:19;
+ uint64_t reserved_4_16:13;
+ uint64_t use:1;
+ uint64_t valid:1;
+ uint64_t dirty:1;
+ uint64_t lock:1;
+ } s;
+ struct cvmx_l2c_tadx_tag_s cn63xx;
+ struct cvmx_l2c_tadx_tag_s cn63xxp1;
+};
+
+union cvmx_l2c_ver_id {
+ uint64_t u64;
+ struct cvmx_l2c_ver_id_s {
+ uint64_t mask:64;
+ } s;
+ struct cvmx_l2c_ver_id_s cn63xx;
+ struct cvmx_l2c_ver_id_s cn63xxp1;
+};
+
+union cvmx_l2c_ver_iob {
+ uint64_t u64;
+ struct cvmx_l2c_ver_iob_s {
+ uint64_t reserved_1_63:63;
+ uint64_t mask:1;
+ } s;
+ struct cvmx_l2c_ver_iob_s cn63xx;
+ struct cvmx_l2c_ver_iob_s cn63xxp1;
+};
+
+union cvmx_l2c_ver_msc {
+ uint64_t u64;
+ struct cvmx_l2c_ver_msc_s {
+ uint64_t reserved_2_63:62;
+ uint64_t invl2:1;
+ uint64_t dwb:1;
+ } s;
+ struct cvmx_l2c_ver_msc_s cn63xx;
+};
+
+union cvmx_l2c_ver_pp {
+ uint64_t u64;
+ struct cvmx_l2c_ver_pp_s {
+ uint64_t reserved_6_63:58;
+ uint64_t mask:6;
+ } s;
+ struct cvmx_l2c_ver_pp_s cn63xx;
+ struct cvmx_l2c_ver_pp_s cn63xxp1;
+};
+
+union cvmx_l2c_virtid_iobx {
+ uint64_t u64;
+ struct cvmx_l2c_virtid_iobx_s {
+ uint64_t reserved_14_63:50;
+ uint64_t dwbid:6;
+ uint64_t reserved_6_7:2;
+ uint64_t id:6;
+ } s;
+ struct cvmx_l2c_virtid_iobx_s cn63xx;
+ struct cvmx_l2c_virtid_iobx_s cn63xxp1;
+};
+
+union cvmx_l2c_virtid_ppx {
+ uint64_t u64;
+ struct cvmx_l2c_virtid_ppx_s {
+ uint64_t reserved_6_63:58;
+ uint64_t id:6;
+ } s;
+ struct cvmx_l2c_virtid_ppx_s cn63xx;
+ struct cvmx_l2c_virtid_ppx_s cn63xxp1;
+};
+
+union cvmx_l2c_vrt_ctl {
+ uint64_t u64;
+ struct cvmx_l2c_vrt_ctl_s {
+ uint64_t reserved_9_63:55;
+ uint64_t ooberr:1;
+ uint64_t reserved_7_7:1;
+ uint64_t memsz:3;
+ uint64_t numid:3;
+ uint64_t enable:1;
+ } s;
+ struct cvmx_l2c_vrt_ctl_s cn63xx;
+ struct cvmx_l2c_vrt_ctl_s cn63xxp1;
+};
+
+union cvmx_l2c_vrt_memx {
+ uint64_t u64;
+ struct cvmx_l2c_vrt_memx_s {
+ uint64_t reserved_36_63:28;
+ uint64_t parity:4;
+ uint64_t data:32;
+ } s;
+ struct cvmx_l2c_vrt_memx_s cn63xx;
+ struct cvmx_l2c_vrt_memx_s cn63xxp1;
+};
+
+union cvmx_l2c_wpar_iobx {
+ uint64_t u64;
+ struct cvmx_l2c_wpar_iobx_s {
+ uint64_t reserved_16_63:48;
+ uint64_t mask:16;
+ } s;
+ struct cvmx_l2c_wpar_iobx_s cn63xx;
+ struct cvmx_l2c_wpar_iobx_s cn63xxp1;
+};
+
+union cvmx_l2c_wpar_ppx {
+ uint64_t u64;
+ struct cvmx_l2c_wpar_ppx_s {
+ uint64_t reserved_16_63:48;
+ uint64_t mask:16;
+ } s;
+ struct cvmx_l2c_wpar_ppx_s cn63xx;
+ struct cvmx_l2c_wpar_ppx_s cn63xxp1;
+};
+
+union cvmx_l2c_xmcx_pfc {
+ uint64_t u64;
+ struct cvmx_l2c_xmcx_pfc_s {
+ uint64_t count:64;
+ } s;
+ struct cvmx_l2c_xmcx_pfc_s cn63xx;
+ struct cvmx_l2c_xmcx_pfc_s cn63xxp1;
+};
+
+union cvmx_l2c_xmc_cmd {
+ uint64_t u64;
+ struct cvmx_l2c_xmc_cmd_s {
+ uint64_t inuse:1;
+ uint64_t cmd:6;
+ uint64_t reserved_38_56:19;
+ uint64_t addr:38;
+ } s;
+ struct cvmx_l2c_xmc_cmd_s cn63xx;
+ struct cvmx_l2c_xmc_cmd_s cn63xxp1;
+};
+
+union cvmx_l2c_xmdx_pfc {
+ uint64_t u64;
+ struct cvmx_l2c_xmdx_pfc_s {
+ uint64_t count:64;
+ } s;
+ struct cvmx_l2c_xmdx_pfc_s cn63xx;
+ struct cvmx_l2c_xmdx_pfc_s cn63xxp1;
+};
+
#endif
diff --git a/arch/mips/include/asm/octeon/cvmx-l2c.h b/arch/mips/include/asm/octeon/cvmx-l2c.h
index 2a8c0902ea50..0b32c5b118e2 100644
--- a/arch/mips/include/asm/octeon/cvmx-l2c.h
+++ b/arch/mips/include/asm/octeon/cvmx-l2c.h
@@ -4,7 +4,7 @@
* Contact: support@caviumnetworks.com
* This file is part of the OCTEON SDK
*
- * Copyright (c) 2003-2008 Cavium Networks
+ * Copyright (c) 2003-2010 Cavium Networks
*
* This file is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License, Version 2, as
@@ -26,7 +26,6 @@
***********************license end**************************************/
/*
- *
* Interface to the Level 2 Cache (L2C) control, measurement, and debugging
* facilities.
*/
@@ -34,93 +33,126 @@
#ifndef __CVMX_L2C_H__
#define __CVMX_L2C_H__
-/* Deprecated macro, use function */
-#define CVMX_L2_ASSOC cvmx_l2c_get_num_assoc()
-
-/* Deprecated macro, use function */
-#define CVMX_L2_SET_BITS cvmx_l2c_get_set_bits()
+#define CVMX_L2_ASSOC cvmx_l2c_get_num_assoc() /* Deprecated macro, use function */
+#define CVMX_L2_SET_BITS cvmx_l2c_get_set_bits() /* Deprecated macro, use function */
+#define CVMX_L2_SETS cvmx_l2c_get_num_sets() /* Deprecated macro, use function */
-/* Deprecated macro, use function */
-#define CVMX_L2_SETS cvmx_l2c_get_num_sets()
#define CVMX_L2C_IDX_ADDR_SHIFT 7 /* based on 128 byte cache line size */
#define CVMX_L2C_IDX_MASK (cvmx_l2c_get_num_sets() - 1)
/* Defines for index aliasing computations */
-#define CVMX_L2C_TAG_ADDR_ALIAS_SHIFT \
- (CVMX_L2C_IDX_ADDR_SHIFT + cvmx_l2c_get_set_bits())
+#define CVMX_L2C_TAG_ADDR_ALIAS_SHIFT (CVMX_L2C_IDX_ADDR_SHIFT + cvmx_l2c_get_set_bits())
+#define CVMX_L2C_ALIAS_MASK (CVMX_L2C_IDX_MASK << CVMX_L2C_TAG_ADDR_ALIAS_SHIFT)
+#define CVMX_L2C_MEMBANK_SELECT_SIZE 4096
-#define CVMX_L2C_ALIAS_MASK \
- (CVMX_L2C_IDX_MASK << CVMX_L2C_TAG_ADDR_ALIAS_SHIFT)
+/* Defines for Virtualizations, valid only from Octeon II onwards. */
+#define CVMX_L2C_VRT_MAX_VIRTID_ALLOWED ((OCTEON_IS_MODEL(OCTEON_CN63XX)) ? 64 : 0)
+#define CVMX_L2C_VRT_MAX_MEMSZ_ALLOWED ((OCTEON_IS_MODEL(OCTEON_CN63XX)) ? 32 : 0)
union cvmx_l2c_tag {
uint64_t u64;
struct {
uint64_t reserved:28;
- uint64_t V:1; /* Line valid */
- uint64_t D:1; /* Line dirty */
- uint64_t L:1; /* Line locked */
- uint64_t U:1; /* Use, LRU eviction */
+ uint64_t V:1; /* Line valid */
+ uint64_t D:1; /* Line dirty */
+ uint64_t L:1; /* Line locked */
+ uint64_t U:1; /* Use, LRU eviction */
uint64_t addr:32; /* Phys mem (not all bits valid) */
} s;
};
+/* Number of L2C Tag-and-data sections (TADs) that are connected to LMC. */
+#define CVMX_L2C_TADS 1
+
/* L2C Performance Counter events. */
enum cvmx_l2c_event {
- CVMX_L2C_EVENT_CYCLES = 0,
- CVMX_L2C_EVENT_INSTRUCTION_MISS = 1,
- CVMX_L2C_EVENT_INSTRUCTION_HIT = 2,
- CVMX_L2C_EVENT_DATA_MISS = 3,
- CVMX_L2C_EVENT_DATA_HIT = 4,
- CVMX_L2C_EVENT_MISS = 5,
- CVMX_L2C_EVENT_HIT = 6,
- CVMX_L2C_EVENT_VICTIM_HIT = 7,
- CVMX_L2C_EVENT_INDEX_CONFLICT = 8,
- CVMX_L2C_EVENT_TAG_PROBE = 9,
- CVMX_L2C_EVENT_TAG_UPDATE = 10,
- CVMX_L2C_EVENT_TAG_COMPLETE = 11,
- CVMX_L2C_EVENT_TAG_DIRTY = 12,
- CVMX_L2C_EVENT_DATA_STORE_NOP = 13,
- CVMX_L2C_EVENT_DATA_STORE_READ = 14,
+ CVMX_L2C_EVENT_CYCLES = 0,
+ CVMX_L2C_EVENT_INSTRUCTION_MISS = 1,
+ CVMX_L2C_EVENT_INSTRUCTION_HIT = 2,
+ CVMX_L2C_EVENT_DATA_MISS = 3,
+ CVMX_L2C_EVENT_DATA_HIT = 4,
+ CVMX_L2C_EVENT_MISS = 5,
+ CVMX_L2C_EVENT_HIT = 6,
+ CVMX_L2C_EVENT_VICTIM_HIT = 7,
+ CVMX_L2C_EVENT_INDEX_CONFLICT = 8,
+ CVMX_L2C_EVENT_TAG_PROBE = 9,
+ CVMX_L2C_EVENT_TAG_UPDATE = 10,
+ CVMX_L2C_EVENT_TAG_COMPLETE = 11,
+ CVMX_L2C_EVENT_TAG_DIRTY = 12,
+ CVMX_L2C_EVENT_DATA_STORE_NOP = 13,
+ CVMX_L2C_EVENT_DATA_STORE_READ = 14,
CVMX_L2C_EVENT_DATA_STORE_WRITE = 15,
- CVMX_L2C_EVENT_FILL_DATA_VALID = 16,
- CVMX_L2C_EVENT_WRITE_REQUEST = 17,
- CVMX_L2C_EVENT_READ_REQUEST = 18,
+ CVMX_L2C_EVENT_FILL_DATA_VALID = 16,
+ CVMX_L2C_EVENT_WRITE_REQUEST = 17,
+ CVMX_L2C_EVENT_READ_REQUEST = 18,
CVMX_L2C_EVENT_WRITE_DATA_VALID = 19,
- CVMX_L2C_EVENT_XMC_NOP = 20,
- CVMX_L2C_EVENT_XMC_LDT = 21,
- CVMX_L2C_EVENT_XMC_LDI = 22,
- CVMX_L2C_EVENT_XMC_LDD = 23,
- CVMX_L2C_EVENT_XMC_STF = 24,
- CVMX_L2C_EVENT_XMC_STT = 25,
- CVMX_L2C_EVENT_XMC_STP = 26,
- CVMX_L2C_EVENT_XMC_STC = 27,
- CVMX_L2C_EVENT_XMC_DWB = 28,
- CVMX_L2C_EVENT_XMC_PL2 = 29,
- CVMX_L2C_EVENT_XMC_PSL1 = 30,
- CVMX_L2C_EVENT_XMC_IOBLD = 31,
- CVMX_L2C_EVENT_XMC_IOBST = 32,
- CVMX_L2C_EVENT_XMC_IOBDMA = 33,
- CVMX_L2C_EVENT_XMC_IOBRSP = 34,
- CVMX_L2C_EVENT_XMC_BUS_VALID = 35,
- CVMX_L2C_EVENT_XMC_MEM_DATA = 36,
- CVMX_L2C_EVENT_XMC_REFL_DATA = 37,
- CVMX_L2C_EVENT_XMC_IOBRSP_DATA = 38,
- CVMX_L2C_EVENT_RSC_NOP = 39,
- CVMX_L2C_EVENT_RSC_STDN = 40,
- CVMX_L2C_EVENT_RSC_FILL = 41,
- CVMX_L2C_EVENT_RSC_REFL = 42,
- CVMX_L2C_EVENT_RSC_STIN = 43,
- CVMX_L2C_EVENT_RSC_SCIN = 44,
- CVMX_L2C_EVENT_RSC_SCFL = 45,
- CVMX_L2C_EVENT_RSC_SCDN = 46,
- CVMX_L2C_EVENT_RSC_DATA_VALID = 47,
- CVMX_L2C_EVENT_RSC_VALID_FILL = 48,
- CVMX_L2C_EVENT_RSC_VALID_STRSP = 49,
- CVMX_L2C_EVENT_RSC_VALID_REFL = 50,
- CVMX_L2C_EVENT_LRF_REQ = 51,
- CVMX_L2C_EVENT_DT_RD_ALLOC = 52,
- CVMX_L2C_EVENT_DT_WR_INVAL = 53
+ CVMX_L2C_EVENT_XMC_NOP = 20,
+ CVMX_L2C_EVENT_XMC_LDT = 21,
+ CVMX_L2C_EVENT_XMC_LDI = 22,
+ CVMX_L2C_EVENT_XMC_LDD = 23,
+ CVMX_L2C_EVENT_XMC_STF = 24,
+ CVMX_L2C_EVENT_XMC_STT = 25,
+ CVMX_L2C_EVENT_XMC_STP = 26,
+ CVMX_L2C_EVENT_XMC_STC = 27,
+ CVMX_L2C_EVENT_XMC_DWB = 28,
+ CVMX_L2C_EVENT_XMC_PL2 = 29,
+ CVMX_L2C_EVENT_XMC_PSL1 = 30,
+ CVMX_L2C_EVENT_XMC_IOBLD = 31,
+ CVMX_L2C_EVENT_XMC_IOBST = 32,
+ CVMX_L2C_EVENT_XMC_IOBDMA = 33,
+ CVMX_L2C_EVENT_XMC_IOBRSP = 34,
+ CVMX_L2C_EVENT_XMC_BUS_VALID = 35,
+ CVMX_L2C_EVENT_XMC_MEM_DATA = 36,
+ CVMX_L2C_EVENT_XMC_REFL_DATA = 37,
+ CVMX_L2C_EVENT_XMC_IOBRSP_DATA = 38,
+ CVMX_L2C_EVENT_RSC_NOP = 39,
+ CVMX_L2C_EVENT_RSC_STDN = 40,
+ CVMX_L2C_EVENT_RSC_FILL = 41,
+ CVMX_L2C_EVENT_RSC_REFL = 42,
+ CVMX_L2C_EVENT_RSC_STIN = 43,
+ CVMX_L2C_EVENT_RSC_SCIN = 44,
+ CVMX_L2C_EVENT_RSC_SCFL = 45,
+ CVMX_L2C_EVENT_RSC_SCDN = 46,
+ CVMX_L2C_EVENT_RSC_DATA_VALID = 47,
+ CVMX_L2C_EVENT_RSC_VALID_FILL = 48,
+ CVMX_L2C_EVENT_RSC_VALID_STRSP = 49,
+ CVMX_L2C_EVENT_RSC_VALID_REFL = 50,
+ CVMX_L2C_EVENT_LRF_REQ = 51,
+ CVMX_L2C_EVENT_DT_RD_ALLOC = 52,
+ CVMX_L2C_EVENT_DT_WR_INVAL = 53,
+ CVMX_L2C_EVENT_MAX
+};
+
+/* L2C Performance Counter events for Octeon2. */
+enum cvmx_l2c_tad_event {
+ CVMX_L2C_TAD_EVENT_NONE = 0,
+ CVMX_L2C_TAD_EVENT_TAG_HIT = 1,
+ CVMX_L2C_TAD_EVENT_TAG_MISS = 2,
+ CVMX_L2C_TAD_EVENT_TAG_NOALLOC = 3,
+ CVMX_L2C_TAD_EVENT_TAG_VICTIM = 4,
+ CVMX_L2C_TAD_EVENT_SC_FAIL = 5,
+ CVMX_L2C_TAD_EVENT_SC_PASS = 6,
+ CVMX_L2C_TAD_EVENT_LFB_VALID = 7,
+ CVMX_L2C_TAD_EVENT_LFB_WAIT_LFB = 8,
+ CVMX_L2C_TAD_EVENT_LFB_WAIT_VAB = 9,
+ CVMX_L2C_TAD_EVENT_QUAD0_INDEX = 128,
+ CVMX_L2C_TAD_EVENT_QUAD0_READ = 129,
+ CVMX_L2C_TAD_EVENT_QUAD0_BANK = 130,
+ CVMX_L2C_TAD_EVENT_QUAD0_WDAT = 131,
+ CVMX_L2C_TAD_EVENT_QUAD1_INDEX = 144,
+ CVMX_L2C_TAD_EVENT_QUAD1_READ = 145,
+ CVMX_L2C_TAD_EVENT_QUAD1_BANK = 146,
+ CVMX_L2C_TAD_EVENT_QUAD1_WDAT = 147,
+ CVMX_L2C_TAD_EVENT_QUAD2_INDEX = 160,
+ CVMX_L2C_TAD_EVENT_QUAD2_READ = 161,
+ CVMX_L2C_TAD_EVENT_QUAD2_BANK = 162,
+ CVMX_L2C_TAD_EVENT_QUAD2_WDAT = 163,
+ CVMX_L2C_TAD_EVENT_QUAD3_INDEX = 176,
+ CVMX_L2C_TAD_EVENT_QUAD3_READ = 177,
+ CVMX_L2C_TAD_EVENT_QUAD3_BANK = 178,
+ CVMX_L2C_TAD_EVENT_QUAD3_WDAT = 179,
+ CVMX_L2C_TAD_EVENT_MAX
};
/**
@@ -132,10 +164,10 @@ enum cvmx_l2c_event {
* @clear_on_read: When asserted, any read of the performance counter
* clears the counter.
*
- * The routine does not clear the counter.
+ * @note The routine does not clear the counter.
*/
-void cvmx_l2c_config_perf(uint32_t counter,
- enum cvmx_l2c_event event, uint32_t clear_on_read);
+void cvmx_l2c_config_perf(uint32_t counter, enum cvmx_l2c_event event, uint32_t clear_on_read);
+
/**
* Read the given L2 Cache performance counter. The counter must be configured
* before reading, but this routine does not enforce this requirement.
@@ -160,18 +192,18 @@ int cvmx_l2c_get_core_way_partition(uint32_t core);
/**
* Partitions the L2 cache for a core
*
- * @core: The core that the partitioning applies to.
+ * @core: The core that the partitioning applies to.
+ * @mask: The partitioning of the ways expressed as a binary
+ * mask. A 0 bit allows the core to evict cache lines from
+ * a way, while a 1 bit blocks the core from evicting any
+ * lines from that way. There must be at least one allowed
+ * way (0 bit) in the mask.
*
- * @mask: The partitioning of the ways expressed as a binary mask. A 0
- * bit allows the core to evict cache lines from a way, while a
- * 1 bit blocks the core from evicting any lines from that
- * way. There must be at least one allowed way (0 bit) in the
- * mask.
- *
- * If any ways are blocked for all cores and the HW blocks, then those
- * ways will never have any cache lines evicted from them. All cores
- * and the hardware blocks are free to read from all ways regardless
- * of the partitioning.
+
+ * @note If any ways are blocked for all cores and the HW blocks, then
+ * those ways will never have any cache lines evicted from them.
+ * All cores and the hardware blocks are free to read from all
+ * ways regardless of the partitioning.
*/
int cvmx_l2c_set_core_way_partition(uint32_t core, uint32_t mask);
@@ -187,19 +219,21 @@ int cvmx_l2c_get_hw_way_partition(void);
/**
* Partitions the L2 cache for the hardware blocks.
*
- * @mask: The partitioning of the ways expressed as a binary mask. A 0
- * bit allows the core to evict cache lines from a way, while a
- * 1 bit blocks the core from evicting any lines from that
- * way. There must be at least one allowed way (0 bit) in the
- * mask.
+ * @mask: The partitioning of the ways expressed as a binary
+ * mask. A 0 bit allows the core to evict cache lines from
+ * a way, while a 1 bit blocks the core from evicting any
+ * lines from that way. There must be at least one allowed
+ * way (0 bit) in the mask.
*
- * If any ways are blocked for all cores and the HW blocks, then those
- * ways will never have any cache lines evicted from them. All cores
- * and the hardware blocks are free to read from all ways regardless
- * of the partitioning.
+
+ * @note If any ways are blocked for all cores and the HW blocks, then
+ * those ways will never have any cache lines evicted from them.
+ * All cores and the hardware blocks are free to read from all
+ * ways regardless of the partitioning.
*/
int cvmx_l2c_set_hw_way_partition(uint32_t mask);
+
/**
* Locks a line in the L2 cache at the specified physical address
*
@@ -263,13 +297,14 @@ int cvmx_l2c_unlock_mem_region(uint64_t start, uint64_t len);
*/
union cvmx_l2c_tag cvmx_l2c_get_tag(uint32_t association, uint32_t index);
-/* Wrapper around deprecated old function name */
-static inline union cvmx_l2c_tag cvmx_get_l2c_tag(uint32_t association,
- uint32_t index)
+/* Wrapper providing a deprecated old function name */
+static inline union cvmx_l2c_tag cvmx_get_l2c_tag(uint32_t association, uint32_t index) __attribute__((deprecated));
+static inline union cvmx_l2c_tag cvmx_get_l2c_tag(uint32_t association, uint32_t index)
{
return cvmx_l2c_get_tag(association, index);
}
+
/**
* Returns the cache index for a given physical address
*
diff --git a/arch/mips/include/asm/octeon/cvmx-l2d-defs.h b/arch/mips/include/asm/octeon/cvmx-l2d-defs.h
index d7102d455e1b..60543e0e77fc 100644
--- a/arch/mips/include/asm/octeon/cvmx-l2d-defs.h
+++ b/arch/mips/include/asm/octeon/cvmx-l2d-defs.h
@@ -4,7 +4,7 @@
* Contact: support@caviumnetworks.com
* This file is part of the OCTEON SDK
*
- * Copyright (c) 2003-2008 Cavium Networks
+ * Copyright (c) 2003-2010 Cavium Networks
*
* This file is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License, Version 2, as
@@ -28,30 +28,18 @@
#ifndef __CVMX_L2D_DEFS_H__
#define __CVMX_L2D_DEFS_H__
-#define CVMX_L2D_BST0 \
- CVMX_ADD_IO_SEG(0x0001180080000780ull)
-#define CVMX_L2D_BST1 \
- CVMX_ADD_IO_SEG(0x0001180080000788ull)
-#define CVMX_L2D_BST2 \
- CVMX_ADD_IO_SEG(0x0001180080000790ull)
-#define CVMX_L2D_BST3 \
- CVMX_ADD_IO_SEG(0x0001180080000798ull)
-#define CVMX_L2D_ERR \
- CVMX_ADD_IO_SEG(0x0001180080000010ull)
-#define CVMX_L2D_FADR \
- CVMX_ADD_IO_SEG(0x0001180080000018ull)
-#define CVMX_L2D_FSYN0 \
- CVMX_ADD_IO_SEG(0x0001180080000020ull)
-#define CVMX_L2D_FSYN1 \
- CVMX_ADD_IO_SEG(0x0001180080000028ull)
-#define CVMX_L2D_FUS0 \
- CVMX_ADD_IO_SEG(0x00011800800007A0ull)
-#define CVMX_L2D_FUS1 \
- CVMX_ADD_IO_SEG(0x00011800800007A8ull)
-#define CVMX_L2D_FUS2 \
- CVMX_ADD_IO_SEG(0x00011800800007B0ull)
-#define CVMX_L2D_FUS3 \
- CVMX_ADD_IO_SEG(0x00011800800007B8ull)
+#define CVMX_L2D_BST0 (CVMX_ADD_IO_SEG(0x0001180080000780ull))
+#define CVMX_L2D_BST1 (CVMX_ADD_IO_SEG(0x0001180080000788ull))
+#define CVMX_L2D_BST2 (CVMX_ADD_IO_SEG(0x0001180080000790ull))
+#define CVMX_L2D_BST3 (CVMX_ADD_IO_SEG(0x0001180080000798ull))
+#define CVMX_L2D_ERR (CVMX_ADD_IO_SEG(0x0001180080000010ull))
+#define CVMX_L2D_FADR (CVMX_ADD_IO_SEG(0x0001180080000018ull))
+#define CVMX_L2D_FSYN0 (CVMX_ADD_IO_SEG(0x0001180080000020ull))
+#define CVMX_L2D_FSYN1 (CVMX_ADD_IO_SEG(0x0001180080000028ull))
+#define CVMX_L2D_FUS0 (CVMX_ADD_IO_SEG(0x00011800800007A0ull))
+#define CVMX_L2D_FUS1 (CVMX_ADD_IO_SEG(0x00011800800007A8ull))
+#define CVMX_L2D_FUS2 (CVMX_ADD_IO_SEG(0x00011800800007B0ull))
+#define CVMX_L2D_FUS3 (CVMX_ADD_IO_SEG(0x00011800800007B8ull))
union cvmx_l2d_bst0 {
uint64_t u64;
diff --git a/arch/mips/include/asm/octeon/cvmx-l2t-defs.h b/arch/mips/include/asm/octeon/cvmx-l2t-defs.h
index 2639a3f5ffc2..873968f55eeb 100644
--- a/arch/mips/include/asm/octeon/cvmx-l2t-defs.h
+++ b/arch/mips/include/asm/octeon/cvmx-l2t-defs.h
@@ -4,7 +4,7 @@
* Contact: support@caviumnetworks.com
* This file is part of the OCTEON SDK
*
- * Copyright (c) 2003-2008 Cavium Networks
+ * Copyright (c) 2003-2010 Cavium Networks
*
* This file is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License, Version 2, as
@@ -28,8 +28,7 @@
#ifndef __CVMX_L2T_DEFS_H__
#define __CVMX_L2T_DEFS_H__
-#define CVMX_L2T_ERR \
- CVMX_ADD_IO_SEG(0x0001180080000008ull)
+#define CVMX_L2T_ERR (CVMX_ADD_IO_SEG(0x0001180080000008ull))
union cvmx_l2t_err {
uint64_t u64;
diff --git a/arch/mips/include/asm/octeon/cvmx-led-defs.h b/arch/mips/include/asm/octeon/cvmx-led-defs.h
index 16f174a4dadf..e25173bb8bb7 100644
--- a/arch/mips/include/asm/octeon/cvmx-led-defs.h
+++ b/arch/mips/include/asm/octeon/cvmx-led-defs.h
@@ -4,7 +4,7 @@
* Contact: support@caviumnetworks.com
* This file is part of the OCTEON SDK
*
- * Copyright (c) 2003-2008 Cavium Networks
+ * Copyright (c) 2003-2010 Cavium Networks
*
* This file is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License, Version 2, as
@@ -28,32 +28,19 @@
#ifndef __CVMX_LED_DEFS_H__
#define __CVMX_LED_DEFS_H__
-#define CVMX_LED_BLINK \
- CVMX_ADD_IO_SEG(0x0001180000001A48ull)
-#define CVMX_LED_CLK_PHASE \
- CVMX_ADD_IO_SEG(0x0001180000001A08ull)
-#define CVMX_LED_CYLON \
- CVMX_ADD_IO_SEG(0x0001180000001AF8ull)
-#define CVMX_LED_DBG \
- CVMX_ADD_IO_SEG(0x0001180000001A18ull)
-#define CVMX_LED_EN \
- CVMX_ADD_IO_SEG(0x0001180000001A00ull)
-#define CVMX_LED_POLARITY \
- CVMX_ADD_IO_SEG(0x0001180000001A50ull)
-#define CVMX_LED_PRT \
- CVMX_ADD_IO_SEG(0x0001180000001A10ull)
-#define CVMX_LED_PRT_FMT \
- CVMX_ADD_IO_SEG(0x0001180000001A30ull)
-#define CVMX_LED_PRT_STATUSX(offset) \
- CVMX_ADD_IO_SEG(0x0001180000001A80ull + (((offset) & 7) * 8))
-#define CVMX_LED_UDD_CNTX(offset) \
- CVMX_ADD_IO_SEG(0x0001180000001A20ull + (((offset) & 1) * 8))
-#define CVMX_LED_UDD_DATX(offset) \
- CVMX_ADD_IO_SEG(0x0001180000001A38ull + (((offset) & 1) * 8))
-#define CVMX_LED_UDD_DAT_CLRX(offset) \
- CVMX_ADD_IO_SEG(0x0001180000001AC8ull + (((offset) & 1) * 16))
-#define CVMX_LED_UDD_DAT_SETX(offset) \
- CVMX_ADD_IO_SEG(0x0001180000001AC0ull + (((offset) & 1) * 16))
+#define CVMX_LED_BLINK (CVMX_ADD_IO_SEG(0x0001180000001A48ull))
+#define CVMX_LED_CLK_PHASE (CVMX_ADD_IO_SEG(0x0001180000001A08ull))
+#define CVMX_LED_CYLON (CVMX_ADD_IO_SEG(0x0001180000001AF8ull))
+#define CVMX_LED_DBG (CVMX_ADD_IO_SEG(0x0001180000001A18ull))
+#define CVMX_LED_EN (CVMX_ADD_IO_SEG(0x0001180000001A00ull))
+#define CVMX_LED_POLARITY (CVMX_ADD_IO_SEG(0x0001180000001A50ull))
+#define CVMX_LED_PRT (CVMX_ADD_IO_SEG(0x0001180000001A10ull))
+#define CVMX_LED_PRT_FMT (CVMX_ADD_IO_SEG(0x0001180000001A30ull))
+#define CVMX_LED_PRT_STATUSX(offset) (CVMX_ADD_IO_SEG(0x0001180000001A80ull) + ((offset) & 7) * 8)
+#define CVMX_LED_UDD_CNTX(offset) (CVMX_ADD_IO_SEG(0x0001180000001A20ull) + ((offset) & 1) * 8)
+#define CVMX_LED_UDD_DATX(offset) (CVMX_ADD_IO_SEG(0x0001180000001A38ull) + ((offset) & 1) * 8)
+#define CVMX_LED_UDD_DAT_CLRX(offset) (CVMX_ADD_IO_SEG(0x0001180000001AC8ull) + ((offset) & 1) * 16)
+#define CVMX_LED_UDD_DAT_SETX(offset) (CVMX_ADD_IO_SEG(0x0001180000001AC0ull) + ((offset) & 1) * 16)
union cvmx_led_blink {
uint64_t u64;
diff --git a/arch/mips/include/asm/octeon/cvmx-mio-defs.h b/arch/mips/include/asm/octeon/cvmx-mio-defs.h
index 6555f0530988..52b14a333ad4 100644
--- a/arch/mips/include/asm/octeon/cvmx-mio-defs.h
+++ b/arch/mips/include/asm/octeon/cvmx-mio-defs.h
@@ -4,7 +4,7 @@
* Contact: support@caviumnetworks.com
* This file is part of the OCTEON SDK
*
- * Copyright (c) 2003-2008 Cavium Networks
+ * Copyright (c) 2003-2010 Cavium Networks
*
* This file is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License, Version 2, as
@@ -28,191 +28,117 @@
#ifndef __CVMX_MIO_DEFS_H__
#define __CVMX_MIO_DEFS_H__
-#define CVMX_MIO_BOOT_BIST_STAT \
- CVMX_ADD_IO_SEG(0x00011800000000F8ull)
-#define CVMX_MIO_BOOT_COMP \
- CVMX_ADD_IO_SEG(0x00011800000000B8ull)
-#define CVMX_MIO_BOOT_DMA_CFGX(offset) \
- CVMX_ADD_IO_SEG(0x0001180000000100ull + (((offset) & 3) * 8))
-#define CVMX_MIO_BOOT_DMA_INTX(offset) \
- CVMX_ADD_IO_SEG(0x0001180000000138ull + (((offset) & 3) * 8))
-#define CVMX_MIO_BOOT_DMA_INT_ENX(offset) \
- CVMX_ADD_IO_SEG(0x0001180000000150ull + (((offset) & 3) * 8))
-#define CVMX_MIO_BOOT_DMA_TIMX(offset) \
- CVMX_ADD_IO_SEG(0x0001180000000120ull + (((offset) & 3) * 8))
-#define CVMX_MIO_BOOT_ERR \
- CVMX_ADD_IO_SEG(0x00011800000000A0ull)
-#define CVMX_MIO_BOOT_INT \
- CVMX_ADD_IO_SEG(0x00011800000000A8ull)
-#define CVMX_MIO_BOOT_LOC_ADR \
- CVMX_ADD_IO_SEG(0x0001180000000090ull)
-#define CVMX_MIO_BOOT_LOC_CFGX(offset) \
- CVMX_ADD_IO_SEG(0x0001180000000080ull + (((offset) & 1) * 8))
-#define CVMX_MIO_BOOT_LOC_DAT \
- CVMX_ADD_IO_SEG(0x0001180000000098ull)
-#define CVMX_MIO_BOOT_PIN_DEFS \
- CVMX_ADD_IO_SEG(0x00011800000000C0ull)
-#define CVMX_MIO_BOOT_REG_CFGX(offset) \
- CVMX_ADD_IO_SEG(0x0001180000000000ull + (((offset) & 7) * 8))
-#define CVMX_MIO_BOOT_REG_TIMX(offset) \
- CVMX_ADD_IO_SEG(0x0001180000000040ull + (((offset) & 7) * 8))
-#define CVMX_MIO_BOOT_THR \
- CVMX_ADD_IO_SEG(0x00011800000000B0ull)
-#define CVMX_MIO_FUS_BNK_DATX(offset) \
- CVMX_ADD_IO_SEG(0x0001180000001520ull + (((offset) & 3) * 8))
-#define CVMX_MIO_FUS_DAT0 \
- CVMX_ADD_IO_SEG(0x0001180000001400ull)
-#define CVMX_MIO_FUS_DAT1 \
- CVMX_ADD_IO_SEG(0x0001180000001408ull)
-#define CVMX_MIO_FUS_DAT2 \
- CVMX_ADD_IO_SEG(0x0001180000001410ull)
-#define CVMX_MIO_FUS_DAT3 \
- CVMX_ADD_IO_SEG(0x0001180000001418ull)
-#define CVMX_MIO_FUS_EMA \
- CVMX_ADD_IO_SEG(0x0001180000001550ull)
-#define CVMX_MIO_FUS_PDF \
- CVMX_ADD_IO_SEG(0x0001180000001420ull)
-#define CVMX_MIO_FUS_PLL \
- CVMX_ADD_IO_SEG(0x0001180000001580ull)
-#define CVMX_MIO_FUS_PROG \
- CVMX_ADD_IO_SEG(0x0001180000001510ull)
-#define CVMX_MIO_FUS_PROG_TIMES \
- CVMX_ADD_IO_SEG(0x0001180000001518ull)
-#define CVMX_MIO_FUS_RCMD \
- CVMX_ADD_IO_SEG(0x0001180000001500ull)
-#define CVMX_MIO_FUS_SPR_REPAIR_RES \
- CVMX_ADD_IO_SEG(0x0001180000001548ull)
-#define CVMX_MIO_FUS_SPR_REPAIR_SUM \
- CVMX_ADD_IO_SEG(0x0001180000001540ull)
-#define CVMX_MIO_FUS_UNLOCK \
- CVMX_ADD_IO_SEG(0x0001180000001578ull)
-#define CVMX_MIO_FUS_WADR \
- CVMX_ADD_IO_SEG(0x0001180000001508ull)
-#define CVMX_MIO_NDF_DMA_CFG \
- CVMX_ADD_IO_SEG(0x0001180000000168ull)
-#define CVMX_MIO_NDF_DMA_INT \
- CVMX_ADD_IO_SEG(0x0001180000000170ull)
-#define CVMX_MIO_NDF_DMA_INT_EN \
- CVMX_ADD_IO_SEG(0x0001180000000178ull)
-#define CVMX_MIO_PLL_CTL \
- CVMX_ADD_IO_SEG(0x0001180000001448ull)
-#define CVMX_MIO_PLL_SETTING \
- CVMX_ADD_IO_SEG(0x0001180000001440ull)
-#define CVMX_MIO_TWSX_INT(offset) \
- CVMX_ADD_IO_SEG(0x0001180000001010ull + (((offset) & 1) * 512))
-#define CVMX_MIO_TWSX_SW_TWSI(offset) \
- CVMX_ADD_IO_SEG(0x0001180000001000ull + (((offset) & 1) * 512))
-#define CVMX_MIO_TWSX_SW_TWSI_EXT(offset) \
- CVMX_ADD_IO_SEG(0x0001180000001018ull + (((offset) & 1) * 512))
-#define CVMX_MIO_TWSX_TWSI_SW(offset) \
- CVMX_ADD_IO_SEG(0x0001180000001008ull + (((offset) & 1) * 512))
-#define CVMX_MIO_UART2_DLH \
- CVMX_ADD_IO_SEG(0x0001180000000488ull)
-#define CVMX_MIO_UART2_DLL \
- CVMX_ADD_IO_SEG(0x0001180000000480ull)
-#define CVMX_MIO_UART2_FAR \
- CVMX_ADD_IO_SEG(0x0001180000000520ull)
-#define CVMX_MIO_UART2_FCR \
- CVMX_ADD_IO_SEG(0x0001180000000450ull)
-#define CVMX_MIO_UART2_HTX \
- CVMX_ADD_IO_SEG(0x0001180000000708ull)
-#define CVMX_MIO_UART2_IER \
- CVMX_ADD_IO_SEG(0x0001180000000408ull)
-#define CVMX_MIO_UART2_IIR \
- CVMX_ADD_IO_SEG(0x0001180000000410ull)
-#define CVMX_MIO_UART2_LCR \
- CVMX_ADD_IO_SEG(0x0001180000000418ull)
-#define CVMX_MIO_UART2_LSR \
- CVMX_ADD_IO_SEG(0x0001180000000428ull)
-#define CVMX_MIO_UART2_MCR \
- CVMX_ADD_IO_SEG(0x0001180000000420ull)
-#define CVMX_MIO_UART2_MSR \
- CVMX_ADD_IO_SEG(0x0001180000000430ull)
-#define CVMX_MIO_UART2_RBR \
- CVMX_ADD_IO_SEG(0x0001180000000400ull)
-#define CVMX_MIO_UART2_RFL \
- CVMX_ADD_IO_SEG(0x0001180000000608ull)
-#define CVMX_MIO_UART2_RFW \
- CVMX_ADD_IO_SEG(0x0001180000000530ull)
-#define CVMX_MIO_UART2_SBCR \
- CVMX_ADD_IO_SEG(0x0001180000000620ull)
-#define CVMX_MIO_UART2_SCR \
- CVMX_ADD_IO_SEG(0x0001180000000438ull)
-#define CVMX_MIO_UART2_SFE \
- CVMX_ADD_IO_SEG(0x0001180000000630ull)
-#define CVMX_MIO_UART2_SRR \
- CVMX_ADD_IO_SEG(0x0001180000000610ull)
-#define CVMX_MIO_UART2_SRT \
- CVMX_ADD_IO_SEG(0x0001180000000638ull)
-#define CVMX_MIO_UART2_SRTS \
- CVMX_ADD_IO_SEG(0x0001180000000618ull)
-#define CVMX_MIO_UART2_STT \
- CVMX_ADD_IO_SEG(0x0001180000000700ull)
-#define CVMX_MIO_UART2_TFL \
- CVMX_ADD_IO_SEG(0x0001180000000600ull)
-#define CVMX_MIO_UART2_TFR \
- CVMX_ADD_IO_SEG(0x0001180000000528ull)
-#define CVMX_MIO_UART2_THR \
- CVMX_ADD_IO_SEG(0x0001180000000440ull)
-#define CVMX_MIO_UART2_USR \
- CVMX_ADD_IO_SEG(0x0001180000000538ull)
-#define CVMX_MIO_UARTX_DLH(offset) \
- CVMX_ADD_IO_SEG(0x0001180000000888ull + (((offset) & 1) * 1024))
-#define CVMX_MIO_UARTX_DLL(offset) \
- CVMX_ADD_IO_SEG(0x0001180000000880ull + (((offset) & 1) * 1024))
-#define CVMX_MIO_UARTX_FAR(offset) \
- CVMX_ADD_IO_SEG(0x0001180000000920ull + (((offset) & 1) * 1024))
-#define CVMX_MIO_UARTX_FCR(offset) \
- CVMX_ADD_IO_SEG(0x0001180000000850ull + (((offset) & 1) * 1024))
-#define CVMX_MIO_UARTX_HTX(offset) \
- CVMX_ADD_IO_SEG(0x0001180000000B08ull + (((offset) & 1) * 1024))
-#define CVMX_MIO_UARTX_IER(offset) \
- CVMX_ADD_IO_SEG(0x0001180000000808ull + (((offset) & 1) * 1024))
-#define CVMX_MIO_UARTX_IIR(offset) \
- CVMX_ADD_IO_SEG(0x0001180000000810ull + (((offset) & 1) * 1024))
-#define CVMX_MIO_UARTX_LCR(offset) \
- CVMX_ADD_IO_SEG(0x0001180000000818ull + (((offset) & 1) * 1024))
-#define CVMX_MIO_UARTX_LSR(offset) \
- CVMX_ADD_IO_SEG(0x0001180000000828ull + (((offset) & 1) * 1024))
-#define CVMX_MIO_UARTX_MCR(offset) \
- CVMX_ADD_IO_SEG(0x0001180000000820ull + (((offset) & 1) * 1024))
-#define CVMX_MIO_UARTX_MSR(offset) \
- CVMX_ADD_IO_SEG(0x0001180000000830ull + (((offset) & 1) * 1024))
-#define CVMX_MIO_UARTX_RBR(offset) \
- CVMX_ADD_IO_SEG(0x0001180000000800ull + (((offset) & 1) * 1024))
-#define CVMX_MIO_UARTX_RFL(offset) \
- CVMX_ADD_IO_SEG(0x0001180000000A08ull + (((offset) & 1) * 1024))
-#define CVMX_MIO_UARTX_RFW(offset) \
- CVMX_ADD_IO_SEG(0x0001180000000930ull + (((offset) & 1) * 1024))
-#define CVMX_MIO_UARTX_SBCR(offset) \
- CVMX_ADD_IO_SEG(0x0001180000000A20ull + (((offset) & 1) * 1024))
-#define CVMX_MIO_UARTX_SCR(offset) \
- CVMX_ADD_IO_SEG(0x0001180000000838ull + (((offset) & 1) * 1024))
-#define CVMX_MIO_UARTX_SFE(offset) \
- CVMX_ADD_IO_SEG(0x0001180000000A30ull + (((offset) & 1) * 1024))
-#define CVMX_MIO_UARTX_SRR(offset) \
- CVMX_ADD_IO_SEG(0x0001180000000A10ull + (((offset) & 1) * 1024))
-#define CVMX_MIO_UARTX_SRT(offset) \
- CVMX_ADD_IO_SEG(0x0001180000000A38ull + (((offset) & 1) * 1024))
-#define CVMX_MIO_UARTX_SRTS(offset) \
- CVMX_ADD_IO_SEG(0x0001180000000A18ull + (((offset) & 1) * 1024))
-#define CVMX_MIO_UARTX_STT(offset) \
- CVMX_ADD_IO_SEG(0x0001180000000B00ull + (((offset) & 1) * 1024))
-#define CVMX_MIO_UARTX_TFL(offset) \
- CVMX_ADD_IO_SEG(0x0001180000000A00ull + (((offset) & 1) * 1024))
-#define CVMX_MIO_UARTX_TFR(offset) \
- CVMX_ADD_IO_SEG(0x0001180000000928ull + (((offset) & 1) * 1024))
-#define CVMX_MIO_UARTX_THR(offset) \
- CVMX_ADD_IO_SEG(0x0001180000000840ull + (((offset) & 1) * 1024))
-#define CVMX_MIO_UARTX_USR(offset) \
- CVMX_ADD_IO_SEG(0x0001180000000938ull + (((offset) & 1) * 1024))
+#define CVMX_MIO_BOOT_BIST_STAT (CVMX_ADD_IO_SEG(0x00011800000000F8ull))
+#define CVMX_MIO_BOOT_COMP (CVMX_ADD_IO_SEG(0x00011800000000B8ull))
+#define CVMX_MIO_BOOT_DMA_CFGX(offset) (CVMX_ADD_IO_SEG(0x0001180000000100ull) + ((offset) & 3) * 8)
+#define CVMX_MIO_BOOT_DMA_INTX(offset) (CVMX_ADD_IO_SEG(0x0001180000000138ull) + ((offset) & 3) * 8)
+#define CVMX_MIO_BOOT_DMA_INT_ENX(offset) (CVMX_ADD_IO_SEG(0x0001180000000150ull) + ((offset) & 3) * 8)
+#define CVMX_MIO_BOOT_DMA_TIMX(offset) (CVMX_ADD_IO_SEG(0x0001180000000120ull) + ((offset) & 3) * 8)
+#define CVMX_MIO_BOOT_ERR (CVMX_ADD_IO_SEG(0x00011800000000A0ull))
+#define CVMX_MIO_BOOT_INT (CVMX_ADD_IO_SEG(0x00011800000000A8ull))
+#define CVMX_MIO_BOOT_LOC_ADR (CVMX_ADD_IO_SEG(0x0001180000000090ull))
+#define CVMX_MIO_BOOT_LOC_CFGX(offset) (CVMX_ADD_IO_SEG(0x0001180000000080ull) + ((offset) & 1) * 8)
+#define CVMX_MIO_BOOT_LOC_DAT (CVMX_ADD_IO_SEG(0x0001180000000098ull))
+#define CVMX_MIO_BOOT_PIN_DEFS (CVMX_ADD_IO_SEG(0x00011800000000C0ull))
+#define CVMX_MIO_BOOT_REG_CFGX(offset) (CVMX_ADD_IO_SEG(0x0001180000000000ull) + ((offset) & 7) * 8)
+#define CVMX_MIO_BOOT_REG_TIMX(offset) (CVMX_ADD_IO_SEG(0x0001180000000040ull) + ((offset) & 7) * 8)
+#define CVMX_MIO_BOOT_THR (CVMX_ADD_IO_SEG(0x00011800000000B0ull))
+#define CVMX_MIO_FUS_BNK_DATX(offset) (CVMX_ADD_IO_SEG(0x0001180000001520ull) + ((offset) & 3) * 8)
+#define CVMX_MIO_FUS_DAT0 (CVMX_ADD_IO_SEG(0x0001180000001400ull))
+#define CVMX_MIO_FUS_DAT1 (CVMX_ADD_IO_SEG(0x0001180000001408ull))
+#define CVMX_MIO_FUS_DAT2 (CVMX_ADD_IO_SEG(0x0001180000001410ull))
+#define CVMX_MIO_FUS_DAT3 (CVMX_ADD_IO_SEG(0x0001180000001418ull))
+#define CVMX_MIO_FUS_EMA (CVMX_ADD_IO_SEG(0x0001180000001550ull))
+#define CVMX_MIO_FUS_PDF (CVMX_ADD_IO_SEG(0x0001180000001420ull))
+#define CVMX_MIO_FUS_PLL (CVMX_ADD_IO_SEG(0x0001180000001580ull))
+#define CVMX_MIO_FUS_PROG (CVMX_ADD_IO_SEG(0x0001180000001510ull))
+#define CVMX_MIO_FUS_PROG_TIMES (CVMX_ADD_IO_SEG(0x0001180000001518ull))
+#define CVMX_MIO_FUS_RCMD (CVMX_ADD_IO_SEG(0x0001180000001500ull))
+#define CVMX_MIO_FUS_READ_TIMES (CVMX_ADD_IO_SEG(0x0001180000001570ull))
+#define CVMX_MIO_FUS_REPAIR_RES0 (CVMX_ADD_IO_SEG(0x0001180000001558ull))
+#define CVMX_MIO_FUS_REPAIR_RES1 (CVMX_ADD_IO_SEG(0x0001180000001560ull))
+#define CVMX_MIO_FUS_REPAIR_RES2 (CVMX_ADD_IO_SEG(0x0001180000001568ull))
+#define CVMX_MIO_FUS_SPR_REPAIR_RES (CVMX_ADD_IO_SEG(0x0001180000001548ull))
+#define CVMX_MIO_FUS_SPR_REPAIR_SUM (CVMX_ADD_IO_SEG(0x0001180000001540ull))
+#define CVMX_MIO_FUS_UNLOCK (CVMX_ADD_IO_SEG(0x0001180000001578ull))
+#define CVMX_MIO_FUS_WADR (CVMX_ADD_IO_SEG(0x0001180000001508ull))
+#define CVMX_MIO_GPIO_COMP (CVMX_ADD_IO_SEG(0x00011800000000C8ull))
+#define CVMX_MIO_NDF_DMA_CFG (CVMX_ADD_IO_SEG(0x0001180000000168ull))
+#define CVMX_MIO_NDF_DMA_INT (CVMX_ADD_IO_SEG(0x0001180000000170ull))
+#define CVMX_MIO_NDF_DMA_INT_EN (CVMX_ADD_IO_SEG(0x0001180000000178ull))
+#define CVMX_MIO_PLL_CTL (CVMX_ADD_IO_SEG(0x0001180000001448ull))
+#define CVMX_MIO_PLL_SETTING (CVMX_ADD_IO_SEG(0x0001180000001440ull))
+#define CVMX_MIO_PTP_CLOCK_CFG (CVMX_ADD_IO_SEG(0x0001070000000F00ull))
+#define CVMX_MIO_PTP_CLOCK_COMP (CVMX_ADD_IO_SEG(0x0001070000000F18ull))
+#define CVMX_MIO_PTP_CLOCK_HI (CVMX_ADD_IO_SEG(0x0001070000000F10ull))
+#define CVMX_MIO_PTP_CLOCK_LO (CVMX_ADD_IO_SEG(0x0001070000000F08ull))
+#define CVMX_MIO_PTP_EVT_CNT (CVMX_ADD_IO_SEG(0x0001070000000F28ull))
+#define CVMX_MIO_PTP_TIMESTAMP (CVMX_ADD_IO_SEG(0x0001070000000F20ull))
+#define CVMX_MIO_RST_BOOT (CVMX_ADD_IO_SEG(0x0001180000001600ull))
+#define CVMX_MIO_RST_CFG (CVMX_ADD_IO_SEG(0x0001180000001610ull))
+#define CVMX_MIO_RST_CTLX(offset) (CVMX_ADD_IO_SEG(0x0001180000001618ull) + ((offset) & 1) * 8)
+#define CVMX_MIO_RST_DELAY (CVMX_ADD_IO_SEG(0x0001180000001608ull))
+#define CVMX_MIO_RST_INT (CVMX_ADD_IO_SEG(0x0001180000001628ull))
+#define CVMX_MIO_RST_INT_EN (CVMX_ADD_IO_SEG(0x0001180000001630ull))
+#define CVMX_MIO_TWSX_INT(offset) (CVMX_ADD_IO_SEG(0x0001180000001010ull) + ((offset) & 1) * 512)
+#define CVMX_MIO_TWSX_SW_TWSI(offset) (CVMX_ADD_IO_SEG(0x0001180000001000ull) + ((offset) & 1) * 512)
+#define CVMX_MIO_TWSX_SW_TWSI_EXT(offset) (CVMX_ADD_IO_SEG(0x0001180000001018ull) + ((offset) & 1) * 512)
+#define CVMX_MIO_TWSX_TWSI_SW(offset) (CVMX_ADD_IO_SEG(0x0001180000001008ull) + ((offset) & 1) * 512)
+#define CVMX_MIO_UART2_DLH (CVMX_ADD_IO_SEG(0x0001180000000488ull))
+#define CVMX_MIO_UART2_DLL (CVMX_ADD_IO_SEG(0x0001180000000480ull))
+#define CVMX_MIO_UART2_FAR (CVMX_ADD_IO_SEG(0x0001180000000520ull))
+#define CVMX_MIO_UART2_FCR (CVMX_ADD_IO_SEG(0x0001180000000450ull))
+#define CVMX_MIO_UART2_HTX (CVMX_ADD_IO_SEG(0x0001180000000708ull))
+#define CVMX_MIO_UART2_IER (CVMX_ADD_IO_SEG(0x0001180000000408ull))
+#define CVMX_MIO_UART2_IIR (CVMX_ADD_IO_SEG(0x0001180000000410ull))
+#define CVMX_MIO_UART2_LCR (CVMX_ADD_IO_SEG(0x0001180000000418ull))
+#define CVMX_MIO_UART2_LSR (CVMX_ADD_IO_SEG(0x0001180000000428ull))
+#define CVMX_MIO_UART2_MCR (CVMX_ADD_IO_SEG(0x0001180000000420ull))
+#define CVMX_MIO_UART2_MSR (CVMX_ADD_IO_SEG(0x0001180000000430ull))
+#define CVMX_MIO_UART2_RBR (CVMX_ADD_IO_SEG(0x0001180000000400ull))
+#define CVMX_MIO_UART2_RFL (CVMX_ADD_IO_SEG(0x0001180000000608ull))
+#define CVMX_MIO_UART2_RFW (CVMX_ADD_IO_SEG(0x0001180000000530ull))
+#define CVMX_MIO_UART2_SBCR (CVMX_ADD_IO_SEG(0x0001180000000620ull))
+#define CVMX_MIO_UART2_SCR (CVMX_ADD_IO_SEG(0x0001180000000438ull))
+#define CVMX_MIO_UART2_SFE (CVMX_ADD_IO_SEG(0x0001180000000630ull))
+#define CVMX_MIO_UART2_SRR (CVMX_ADD_IO_SEG(0x0001180000000610ull))
+#define CVMX_MIO_UART2_SRT (CVMX_ADD_IO_SEG(0x0001180000000638ull))
+#define CVMX_MIO_UART2_SRTS (CVMX_ADD_IO_SEG(0x0001180000000618ull))
+#define CVMX_MIO_UART2_STT (CVMX_ADD_IO_SEG(0x0001180000000700ull))
+#define CVMX_MIO_UART2_TFL (CVMX_ADD_IO_SEG(0x0001180000000600ull))
+#define CVMX_MIO_UART2_TFR (CVMX_ADD_IO_SEG(0x0001180000000528ull))
+#define CVMX_MIO_UART2_THR (CVMX_ADD_IO_SEG(0x0001180000000440ull))
+#define CVMX_MIO_UART2_USR (CVMX_ADD_IO_SEG(0x0001180000000538ull))
+#define CVMX_MIO_UARTX_DLH(offset) (CVMX_ADD_IO_SEG(0x0001180000000888ull) + ((offset) & 1) * 1024)
+#define CVMX_MIO_UARTX_DLL(offset) (CVMX_ADD_IO_SEG(0x0001180000000880ull) + ((offset) & 1) * 1024)
+#define CVMX_MIO_UARTX_FAR(offset) (CVMX_ADD_IO_SEG(0x0001180000000920ull) + ((offset) & 1) * 1024)
+#define CVMX_MIO_UARTX_FCR(offset) (CVMX_ADD_IO_SEG(0x0001180000000850ull) + ((offset) & 1) * 1024)
+#define CVMX_MIO_UARTX_HTX(offset) (CVMX_ADD_IO_SEG(0x0001180000000B08ull) + ((offset) & 1) * 1024)
+#define CVMX_MIO_UARTX_IER(offset) (CVMX_ADD_IO_SEG(0x0001180000000808ull) + ((offset) & 1) * 1024)
+#define CVMX_MIO_UARTX_IIR(offset) (CVMX_ADD_IO_SEG(0x0001180000000810ull) + ((offset) & 1) * 1024)
+#define CVMX_MIO_UARTX_LCR(offset) (CVMX_ADD_IO_SEG(0x0001180000000818ull) + ((offset) & 1) * 1024)
+#define CVMX_MIO_UARTX_LSR(offset) (CVMX_ADD_IO_SEG(0x0001180000000828ull) + ((offset) & 1) * 1024)
+#define CVMX_MIO_UARTX_MCR(offset) (CVMX_ADD_IO_SEG(0x0001180000000820ull) + ((offset) & 1) * 1024)
+#define CVMX_MIO_UARTX_MSR(offset) (CVMX_ADD_IO_SEG(0x0001180000000830ull) + ((offset) & 1) * 1024)
+#define CVMX_MIO_UARTX_RBR(offset) (CVMX_ADD_IO_SEG(0x0001180000000800ull) + ((offset) & 1) * 1024)
+#define CVMX_MIO_UARTX_RFL(offset) (CVMX_ADD_IO_SEG(0x0001180000000A08ull) + ((offset) & 1) * 1024)
+#define CVMX_MIO_UARTX_RFW(offset) (CVMX_ADD_IO_SEG(0x0001180000000930ull) + ((offset) & 1) * 1024)
+#define CVMX_MIO_UARTX_SBCR(offset) (CVMX_ADD_IO_SEG(0x0001180000000A20ull) + ((offset) & 1) * 1024)
+#define CVMX_MIO_UARTX_SCR(offset) (CVMX_ADD_IO_SEG(0x0001180000000838ull) + ((offset) & 1) * 1024)
+#define CVMX_MIO_UARTX_SFE(offset) (CVMX_ADD_IO_SEG(0x0001180000000A30ull) + ((offset) & 1) * 1024)
+#define CVMX_MIO_UARTX_SRR(offset) (CVMX_ADD_IO_SEG(0x0001180000000A10ull) + ((offset) & 1) * 1024)
+#define CVMX_MIO_UARTX_SRT(offset) (CVMX_ADD_IO_SEG(0x0001180000000A38ull) + ((offset) & 1) * 1024)
+#define CVMX_MIO_UARTX_SRTS(offset) (CVMX_ADD_IO_SEG(0x0001180000000A18ull) + ((offset) & 1) * 1024)
+#define CVMX_MIO_UARTX_STT(offset) (CVMX_ADD_IO_SEG(0x0001180000000B00ull) + ((offset) & 1) * 1024)
+#define CVMX_MIO_UARTX_TFL(offset) (CVMX_ADD_IO_SEG(0x0001180000000A00ull) + ((offset) & 1) * 1024)
+#define CVMX_MIO_UARTX_TFR(offset) (CVMX_ADD_IO_SEG(0x0001180000000928ull) + ((offset) & 1) * 1024)
+#define CVMX_MIO_UARTX_THR(offset) (CVMX_ADD_IO_SEG(0x0001180000000840ull) + ((offset) & 1) * 1024)
+#define CVMX_MIO_UARTX_USR(offset) (CVMX_ADD_IO_SEG(0x0001180000000938ull) + ((offset) & 1) * 1024)
union cvmx_mio_boot_bist_stat {
uint64_t u64;
struct cvmx_mio_boot_bist_stat_s {
- uint64_t reserved_2_63:62;
- uint64_t loc:1;
- uint64_t ncbi:1;
+ uint64_t reserved_0_63:64;
} s;
struct cvmx_mio_boot_bist_stat_cn30xx {
uint64_t reserved_4_63:60;
@@ -257,20 +183,33 @@ union cvmx_mio_boot_bist_stat {
struct cvmx_mio_boot_bist_stat_cn52xxp1 cn56xxp1;
struct cvmx_mio_boot_bist_stat_cn38xx cn58xx;
struct cvmx_mio_boot_bist_stat_cn38xx cn58xxp1;
+ struct cvmx_mio_boot_bist_stat_cn63xx {
+ uint64_t reserved_9_63:55;
+ uint64_t stat:9;
+ } cn63xx;
+ struct cvmx_mio_boot_bist_stat_cn63xx cn63xxp1;
};
union cvmx_mio_boot_comp {
uint64_t u64;
struct cvmx_mio_boot_comp_s {
+ uint64_t reserved_0_63:64;
+ } s;
+ struct cvmx_mio_boot_comp_cn50xx {
uint64_t reserved_10_63:54;
uint64_t pctl:5;
uint64_t nctl:5;
- } s;
- struct cvmx_mio_boot_comp_s cn50xx;
- struct cvmx_mio_boot_comp_s cn52xx;
- struct cvmx_mio_boot_comp_s cn52xxp1;
- struct cvmx_mio_boot_comp_s cn56xx;
- struct cvmx_mio_boot_comp_s cn56xxp1;
+ } cn50xx;
+ struct cvmx_mio_boot_comp_cn50xx cn52xx;
+ struct cvmx_mio_boot_comp_cn50xx cn52xxp1;
+ struct cvmx_mio_boot_comp_cn50xx cn56xx;
+ struct cvmx_mio_boot_comp_cn50xx cn56xxp1;
+ struct cvmx_mio_boot_comp_cn63xx {
+ uint64_t reserved_12_63:52;
+ uint64_t pctl:6;
+ uint64_t nctl:6;
+ } cn63xx;
+ struct cvmx_mio_boot_comp_cn63xx cn63xxp1;
};
union cvmx_mio_boot_dma_cfgx {
@@ -291,6 +230,8 @@ union cvmx_mio_boot_dma_cfgx {
struct cvmx_mio_boot_dma_cfgx_s cn52xxp1;
struct cvmx_mio_boot_dma_cfgx_s cn56xx;
struct cvmx_mio_boot_dma_cfgx_s cn56xxp1;
+ struct cvmx_mio_boot_dma_cfgx_s cn63xx;
+ struct cvmx_mio_boot_dma_cfgx_s cn63xxp1;
};
union cvmx_mio_boot_dma_intx {
@@ -304,6 +245,8 @@ union cvmx_mio_boot_dma_intx {
struct cvmx_mio_boot_dma_intx_s cn52xxp1;
struct cvmx_mio_boot_dma_intx_s cn56xx;
struct cvmx_mio_boot_dma_intx_s cn56xxp1;
+ struct cvmx_mio_boot_dma_intx_s cn63xx;
+ struct cvmx_mio_boot_dma_intx_s cn63xxp1;
};
union cvmx_mio_boot_dma_int_enx {
@@ -317,6 +260,8 @@ union cvmx_mio_boot_dma_int_enx {
struct cvmx_mio_boot_dma_int_enx_s cn52xxp1;
struct cvmx_mio_boot_dma_int_enx_s cn56xx;
struct cvmx_mio_boot_dma_int_enx_s cn56xxp1;
+ struct cvmx_mio_boot_dma_int_enx_s cn63xx;
+ struct cvmx_mio_boot_dma_int_enx_s cn63xxp1;
};
union cvmx_mio_boot_dma_timx {
@@ -342,6 +287,8 @@ union cvmx_mio_boot_dma_timx {
struct cvmx_mio_boot_dma_timx_s cn52xxp1;
struct cvmx_mio_boot_dma_timx_s cn56xx;
struct cvmx_mio_boot_dma_timx_s cn56xxp1;
+ struct cvmx_mio_boot_dma_timx_s cn63xx;
+ struct cvmx_mio_boot_dma_timx_s cn63xxp1;
};
union cvmx_mio_boot_err {
@@ -362,6 +309,8 @@ union cvmx_mio_boot_err {
struct cvmx_mio_boot_err_s cn56xxp1;
struct cvmx_mio_boot_err_s cn58xx;
struct cvmx_mio_boot_err_s cn58xxp1;
+ struct cvmx_mio_boot_err_s cn63xx;
+ struct cvmx_mio_boot_err_s cn63xxp1;
};
union cvmx_mio_boot_int {
@@ -382,6 +331,8 @@ union cvmx_mio_boot_int {
struct cvmx_mio_boot_int_s cn56xxp1;
struct cvmx_mio_boot_int_s cn58xx;
struct cvmx_mio_boot_int_s cn58xxp1;
+ struct cvmx_mio_boot_int_s cn63xx;
+ struct cvmx_mio_boot_int_s cn63xxp1;
};
union cvmx_mio_boot_loc_adr {
@@ -402,6 +353,8 @@ union cvmx_mio_boot_loc_adr {
struct cvmx_mio_boot_loc_adr_s cn56xxp1;
struct cvmx_mio_boot_loc_adr_s cn58xx;
struct cvmx_mio_boot_loc_adr_s cn58xxp1;
+ struct cvmx_mio_boot_loc_adr_s cn63xx;
+ struct cvmx_mio_boot_loc_adr_s cn63xxp1;
};
union cvmx_mio_boot_loc_cfgx {
@@ -424,6 +377,8 @@ union cvmx_mio_boot_loc_cfgx {
struct cvmx_mio_boot_loc_cfgx_s cn56xxp1;
struct cvmx_mio_boot_loc_cfgx_s cn58xx;
struct cvmx_mio_boot_loc_cfgx_s cn58xxp1;
+ struct cvmx_mio_boot_loc_cfgx_s cn63xx;
+ struct cvmx_mio_boot_loc_cfgx_s cn63xxp1;
};
union cvmx_mio_boot_loc_dat {
@@ -442,6 +397,8 @@ union cvmx_mio_boot_loc_dat {
struct cvmx_mio_boot_loc_dat_s cn56xxp1;
struct cvmx_mio_boot_loc_dat_s cn58xx;
struct cvmx_mio_boot_loc_dat_s cn58xxp1;
+ struct cvmx_mio_boot_loc_dat_s cn63xx;
+ struct cvmx_mio_boot_loc_dat_s cn63xxp1;
};
union cvmx_mio_boot_pin_defs {
@@ -478,6 +435,8 @@ union cvmx_mio_boot_pin_defs {
uint64_t term:2;
uint64_t reserved_0_8:9;
} cn56xx;
+ struct cvmx_mio_boot_pin_defs_cn52xx cn63xx;
+ struct cvmx_mio_boot_pin_defs_cn52xx cn63xxp1;
};
union cvmx_mio_boot_reg_cfgx {
@@ -539,6 +498,8 @@ union cvmx_mio_boot_reg_cfgx {
struct cvmx_mio_boot_reg_cfgx_s cn56xxp1;
struct cvmx_mio_boot_reg_cfgx_cn30xx cn58xx;
struct cvmx_mio_boot_reg_cfgx_cn30xx cn58xxp1;
+ struct cvmx_mio_boot_reg_cfgx_s cn63xx;
+ struct cvmx_mio_boot_reg_cfgx_s cn63xxp1;
};
union cvmx_mio_boot_reg_timx {
@@ -583,6 +544,8 @@ union cvmx_mio_boot_reg_timx {
struct cvmx_mio_boot_reg_timx_s cn56xxp1;
struct cvmx_mio_boot_reg_timx_s cn58xx;
struct cvmx_mio_boot_reg_timx_s cn58xxp1;
+ struct cvmx_mio_boot_reg_timx_s cn63xx;
+ struct cvmx_mio_boot_reg_timx_s cn63xxp1;
};
union cvmx_mio_boot_thr {
@@ -611,6 +574,8 @@ union cvmx_mio_boot_thr {
struct cvmx_mio_boot_thr_s cn56xxp1;
struct cvmx_mio_boot_thr_cn30xx cn58xx;
struct cvmx_mio_boot_thr_cn30xx cn58xxp1;
+ struct cvmx_mio_boot_thr_s cn63xx;
+ struct cvmx_mio_boot_thr_s cn63xxp1;
};
union cvmx_mio_fus_bnk_datx {
@@ -625,6 +590,8 @@ union cvmx_mio_fus_bnk_datx {
struct cvmx_mio_fus_bnk_datx_s cn56xxp1;
struct cvmx_mio_fus_bnk_datx_s cn58xx;
struct cvmx_mio_fus_bnk_datx_s cn58xxp1;
+ struct cvmx_mio_fus_bnk_datx_s cn63xx;
+ struct cvmx_mio_fus_bnk_datx_s cn63xxp1;
};
union cvmx_mio_fus_dat0 {
@@ -644,6 +611,8 @@ union cvmx_mio_fus_dat0 {
struct cvmx_mio_fus_dat0_s cn56xxp1;
struct cvmx_mio_fus_dat0_s cn58xx;
struct cvmx_mio_fus_dat0_s cn58xxp1;
+ struct cvmx_mio_fus_dat0_s cn63xx;
+ struct cvmx_mio_fus_dat0_s cn63xxp1;
};
union cvmx_mio_fus_dat1 {
@@ -663,12 +632,15 @@ union cvmx_mio_fus_dat1 {
struct cvmx_mio_fus_dat1_s cn56xxp1;
struct cvmx_mio_fus_dat1_s cn58xx;
struct cvmx_mio_fus_dat1_s cn58xxp1;
+ struct cvmx_mio_fus_dat1_s cn63xx;
+ struct cvmx_mio_fus_dat1_s cn63xxp1;
};
union cvmx_mio_fus_dat2 {
uint64_t u64;
struct cvmx_mio_fus_dat2_s {
- uint64_t reserved_34_63:30;
+ uint64_t reserved_35_63:29;
+ uint64_t dorm_crypto:1;
uint64_t fus318:1;
uint64_t raid_en:1;
uint64_t reserved_30_31:2;
@@ -775,14 +747,38 @@ union cvmx_mio_fus_dat2 {
uint64_t pp_dis:16;
} cn58xx;
struct cvmx_mio_fus_dat2_cn58xx cn58xxp1;
+ struct cvmx_mio_fus_dat2_cn63xx {
+ uint64_t reserved_35_63:29;
+ uint64_t dorm_crypto:1;
+ uint64_t fus318:1;
+ uint64_t raid_en:1;
+ uint64_t reserved_29_31:3;
+ uint64_t nodfa_cp2:1;
+ uint64_t nomul:1;
+ uint64_t nocrypto:1;
+ uint64_t reserved_24_25:2;
+ uint64_t chip_id:8;
+ uint64_t reserved_6_15:10;
+ uint64_t pp_dis:6;
+ } cn63xx;
+ struct cvmx_mio_fus_dat2_cn63xx cn63xxp1;
};
union cvmx_mio_fus_dat3 {
uint64_t u64;
struct cvmx_mio_fus_dat3_s {
- uint64_t reserved_32_63:32;
+ uint64_t reserved_58_63:6;
+ uint64_t pll_ctl:10;
+ uint64_t dfa_info_dte:3;
+ uint64_t dfa_info_clm:4;
+ uint64_t reserved_40_40:1;
+ uint64_t ema:2;
+ uint64_t efus_lck_rsv:1;
+ uint64_t efus_lck_man:1;
+ uint64_t pll_half_dis:1;
+ uint64_t l2c_crip:3;
uint64_t pll_div4:1;
- uint64_t zip_crip:2;
+ uint64_t reserved_29_30:2;
uint64_t bar2_en:1;
uint64_t efus_lck:1;
uint64_t efus_ign:1;
@@ -801,7 +797,17 @@ union cvmx_mio_fus_dat3 {
uint64_t nodfa_dte:1;
uint64_t icache:24;
} cn30xx;
- struct cvmx_mio_fus_dat3_s cn31xx;
+ struct cvmx_mio_fus_dat3_cn31xx {
+ uint64_t reserved_32_63:32;
+ uint64_t pll_div4:1;
+ uint64_t zip_crip:2;
+ uint64_t bar2_en:1;
+ uint64_t efus_lck:1;
+ uint64_t efus_ign:1;
+ uint64_t nozip:1;
+ uint64_t nodfa_dte:1;
+ uint64_t icache:24;
+ } cn31xx;
struct cvmx_mio_fus_dat3_cn38xx {
uint64_t reserved_31_63:33;
uint64_t zip_crip:2;
@@ -828,6 +834,27 @@ union cvmx_mio_fus_dat3 {
struct cvmx_mio_fus_dat3_cn38xx cn56xxp1;
struct cvmx_mio_fus_dat3_cn38xx cn58xx;
struct cvmx_mio_fus_dat3_cn38xx cn58xxp1;
+ struct cvmx_mio_fus_dat3_cn63xx {
+ uint64_t reserved_58_63:6;
+ uint64_t pll_ctl:10;
+ uint64_t dfa_info_dte:3;
+ uint64_t dfa_info_clm:4;
+ uint64_t reserved_40_40:1;
+ uint64_t ema:2;
+ uint64_t efus_lck_rsv:1;
+ uint64_t efus_lck_man:1;
+ uint64_t pll_half_dis:1;
+ uint64_t l2c_crip:3;
+ uint64_t reserved_31_31:1;
+ uint64_t zip_info:2;
+ uint64_t bar2_en:1;
+ uint64_t efus_lck:1;
+ uint64_t efus_ign:1;
+ uint64_t nozip:1;
+ uint64_t nodfa_dte:1;
+ uint64_t reserved_0_23:24;
+ } cn63xx;
+ struct cvmx_mio_fus_dat3_cn63xx cn63xxp1;
};
union cvmx_mio_fus_ema {
@@ -848,6 +875,8 @@ union cvmx_mio_fus_ema {
uint64_t ema:2;
} cn58xx;
struct cvmx_mio_fus_ema_cn58xx cn58xxp1;
+ struct cvmx_mio_fus_ema_s cn63xx;
+ struct cvmx_mio_fus_ema_s cn63xxp1;
};
union cvmx_mio_fus_pdf {
@@ -861,60 +890,96 @@ union cvmx_mio_fus_pdf {
struct cvmx_mio_fus_pdf_s cn56xx;
struct cvmx_mio_fus_pdf_s cn56xxp1;
struct cvmx_mio_fus_pdf_s cn58xx;
+ struct cvmx_mio_fus_pdf_s cn63xx;
+ struct cvmx_mio_fus_pdf_s cn63xxp1;
};
union cvmx_mio_fus_pll {
uint64_t u64;
struct cvmx_mio_fus_pll_s {
- uint64_t reserved_2_63:62;
+ uint64_t reserved_8_63:56;
+ uint64_t c_cout_rst:1;
+ uint64_t c_cout_sel:2;
+ uint64_t pnr_cout_rst:1;
+ uint64_t pnr_cout_sel:2;
uint64_t rfslip:1;
uint64_t fbslip:1;
} s;
- struct cvmx_mio_fus_pll_s cn50xx;
- struct cvmx_mio_fus_pll_s cn52xx;
- struct cvmx_mio_fus_pll_s cn52xxp1;
- struct cvmx_mio_fus_pll_s cn56xx;
- struct cvmx_mio_fus_pll_s cn56xxp1;
- struct cvmx_mio_fus_pll_s cn58xx;
- struct cvmx_mio_fus_pll_s cn58xxp1;
+ struct cvmx_mio_fus_pll_cn50xx {
+ uint64_t reserved_2_63:62;
+ uint64_t rfslip:1;
+ uint64_t fbslip:1;
+ } cn50xx;
+ struct cvmx_mio_fus_pll_cn50xx cn52xx;
+ struct cvmx_mio_fus_pll_cn50xx cn52xxp1;
+ struct cvmx_mio_fus_pll_cn50xx cn56xx;
+ struct cvmx_mio_fus_pll_cn50xx cn56xxp1;
+ struct cvmx_mio_fus_pll_cn50xx cn58xx;
+ struct cvmx_mio_fus_pll_cn50xx cn58xxp1;
+ struct cvmx_mio_fus_pll_s cn63xx;
+ struct cvmx_mio_fus_pll_s cn63xxp1;
};
union cvmx_mio_fus_prog {
uint64_t u64;
struct cvmx_mio_fus_prog_s {
- uint64_t reserved_1_63:63;
+ uint64_t reserved_2_63:62;
+ uint64_t soft:1;
uint64_t prog:1;
} s;
- struct cvmx_mio_fus_prog_s cn30xx;
- struct cvmx_mio_fus_prog_s cn31xx;
- struct cvmx_mio_fus_prog_s cn38xx;
- struct cvmx_mio_fus_prog_s cn38xxp2;
- struct cvmx_mio_fus_prog_s cn50xx;
- struct cvmx_mio_fus_prog_s cn52xx;
- struct cvmx_mio_fus_prog_s cn52xxp1;
- struct cvmx_mio_fus_prog_s cn56xx;
- struct cvmx_mio_fus_prog_s cn56xxp1;
- struct cvmx_mio_fus_prog_s cn58xx;
- struct cvmx_mio_fus_prog_s cn58xxp1;
+ struct cvmx_mio_fus_prog_cn30xx {
+ uint64_t reserved_1_63:63;
+ uint64_t prog:1;
+ } cn30xx;
+ struct cvmx_mio_fus_prog_cn30xx cn31xx;
+ struct cvmx_mio_fus_prog_cn30xx cn38xx;
+ struct cvmx_mio_fus_prog_cn30xx cn38xxp2;
+ struct cvmx_mio_fus_prog_cn30xx cn50xx;
+ struct cvmx_mio_fus_prog_cn30xx cn52xx;
+ struct cvmx_mio_fus_prog_cn30xx cn52xxp1;
+ struct cvmx_mio_fus_prog_cn30xx cn56xx;
+ struct cvmx_mio_fus_prog_cn30xx cn56xxp1;
+ struct cvmx_mio_fus_prog_cn30xx cn58xx;
+ struct cvmx_mio_fus_prog_cn30xx cn58xxp1;
+ struct cvmx_mio_fus_prog_s cn63xx;
+ struct cvmx_mio_fus_prog_s cn63xxp1;
};
union cvmx_mio_fus_prog_times {
uint64_t u64;
struct cvmx_mio_fus_prog_times_s {
+ uint64_t reserved_35_63:29;
+ uint64_t vgate_pin:1;
+ uint64_t fsrc_pin:1;
+ uint64_t prog_pin:1;
+ uint64_t reserved_6_31:26;
+ uint64_t setup:6;
+ } s;
+ struct cvmx_mio_fus_prog_times_cn50xx {
uint64_t reserved_33_63:31;
uint64_t prog_pin:1;
uint64_t out:8;
uint64_t sclk_lo:4;
uint64_t sclk_hi:12;
uint64_t setup:8;
- } s;
- struct cvmx_mio_fus_prog_times_s cn50xx;
- struct cvmx_mio_fus_prog_times_s cn52xx;
- struct cvmx_mio_fus_prog_times_s cn52xxp1;
- struct cvmx_mio_fus_prog_times_s cn56xx;
- struct cvmx_mio_fus_prog_times_s cn56xxp1;
- struct cvmx_mio_fus_prog_times_s cn58xx;
- struct cvmx_mio_fus_prog_times_s cn58xxp1;
+ } cn50xx;
+ struct cvmx_mio_fus_prog_times_cn50xx cn52xx;
+ struct cvmx_mio_fus_prog_times_cn50xx cn52xxp1;
+ struct cvmx_mio_fus_prog_times_cn50xx cn56xx;
+ struct cvmx_mio_fus_prog_times_cn50xx cn56xxp1;
+ struct cvmx_mio_fus_prog_times_cn50xx cn58xx;
+ struct cvmx_mio_fus_prog_times_cn50xx cn58xxp1;
+ struct cvmx_mio_fus_prog_times_cn63xx {
+ uint64_t reserved_35_63:29;
+ uint64_t vgate_pin:1;
+ uint64_t fsrc_pin:1;
+ uint64_t prog_pin:1;
+ uint64_t out:7;
+ uint64_t sclk_lo:4;
+ uint64_t sclk_hi:15;
+ uint64_t setup:6;
+ } cn63xx;
+ struct cvmx_mio_fus_prog_times_cn63xx cn63xxp1;
};
union cvmx_mio_fus_rcmd {
@@ -948,6 +1013,57 @@ union cvmx_mio_fus_rcmd {
struct cvmx_mio_fus_rcmd_s cn56xxp1;
struct cvmx_mio_fus_rcmd_cn30xx cn58xx;
struct cvmx_mio_fus_rcmd_cn30xx cn58xxp1;
+ struct cvmx_mio_fus_rcmd_s cn63xx;
+ struct cvmx_mio_fus_rcmd_s cn63xxp1;
+};
+
+union cvmx_mio_fus_read_times {
+ uint64_t u64;
+ struct cvmx_mio_fus_read_times_s {
+ uint64_t reserved_26_63:38;
+ uint64_t sch:4;
+ uint64_t fsh:4;
+ uint64_t prh:4;
+ uint64_t sdh:4;
+ uint64_t setup:10;
+ } s;
+ struct cvmx_mio_fus_read_times_s cn63xx;
+ struct cvmx_mio_fus_read_times_s cn63xxp1;
+};
+
+union cvmx_mio_fus_repair_res0 {
+ uint64_t u64;
+ struct cvmx_mio_fus_repair_res0_s {
+ uint64_t reserved_55_63:9;
+ uint64_t too_many:1;
+ uint64_t repair2:18;
+ uint64_t repair1:18;
+ uint64_t repair0:18;
+ } s;
+ struct cvmx_mio_fus_repair_res0_s cn63xx;
+ struct cvmx_mio_fus_repair_res0_s cn63xxp1;
+};
+
+union cvmx_mio_fus_repair_res1 {
+ uint64_t u64;
+ struct cvmx_mio_fus_repair_res1_s {
+ uint64_t reserved_54_63:10;
+ uint64_t repair5:18;
+ uint64_t repair4:18;
+ uint64_t repair3:18;
+ } s;
+ struct cvmx_mio_fus_repair_res1_s cn63xx;
+ struct cvmx_mio_fus_repair_res1_s cn63xxp1;
+};
+
+union cvmx_mio_fus_repair_res2 {
+ uint64_t u64;
+ struct cvmx_mio_fus_repair_res2_s {
+ uint64_t reserved_18_63:46;
+ uint64_t repair6:18;
+ } s;
+ struct cvmx_mio_fus_repair_res2_s cn63xx;
+ struct cvmx_mio_fus_repair_res2_s cn63xxp1;
};
union cvmx_mio_fus_spr_repair_res {
@@ -968,6 +1084,8 @@ union cvmx_mio_fus_spr_repair_res {
struct cvmx_mio_fus_spr_repair_res_s cn56xxp1;
struct cvmx_mio_fus_spr_repair_res_s cn58xx;
struct cvmx_mio_fus_spr_repair_res_s cn58xxp1;
+ struct cvmx_mio_fus_spr_repair_res_s cn63xx;
+ struct cvmx_mio_fus_spr_repair_res_s cn63xxp1;
};
union cvmx_mio_fus_spr_repair_sum {
@@ -986,6 +1104,8 @@ union cvmx_mio_fus_spr_repair_sum {
struct cvmx_mio_fus_spr_repair_sum_s cn56xxp1;
struct cvmx_mio_fus_spr_repair_sum_s cn58xx;
struct cvmx_mio_fus_spr_repair_sum_s cn58xxp1;
+ struct cvmx_mio_fus_spr_repair_sum_s cn63xx;
+ struct cvmx_mio_fus_spr_repair_sum_s cn63xxp1;
};
union cvmx_mio_fus_unlock {
@@ -1021,6 +1141,22 @@ union cvmx_mio_fus_wadr {
struct cvmx_mio_fus_wadr_cn52xx cn56xxp1;
struct cvmx_mio_fus_wadr_cn50xx cn58xx;
struct cvmx_mio_fus_wadr_cn50xx cn58xxp1;
+ struct cvmx_mio_fus_wadr_cn63xx {
+ uint64_t reserved_4_63:60;
+ uint64_t addr:4;
+ } cn63xx;
+ struct cvmx_mio_fus_wadr_cn63xx cn63xxp1;
+};
+
+union cvmx_mio_gpio_comp {
+ uint64_t u64;
+ struct cvmx_mio_gpio_comp_s {
+ uint64_t reserved_12_63:52;
+ uint64_t pctl:6;
+ uint64_t nctl:6;
+ } s;
+ struct cvmx_mio_gpio_comp_s cn63xx;
+ struct cvmx_mio_gpio_comp_s cn63xxp1;
};
union cvmx_mio_ndf_dma_cfg {
@@ -1038,6 +1174,8 @@ union cvmx_mio_ndf_dma_cfg {
uint64_t adr:36;
} s;
struct cvmx_mio_ndf_dma_cfg_s cn52xx;
+ struct cvmx_mio_ndf_dma_cfg_s cn63xx;
+ struct cvmx_mio_ndf_dma_cfg_s cn63xxp1;
};
union cvmx_mio_ndf_dma_int {
@@ -1047,6 +1185,8 @@ union cvmx_mio_ndf_dma_int {
uint64_t done:1;
} s;
struct cvmx_mio_ndf_dma_int_s cn52xx;
+ struct cvmx_mio_ndf_dma_int_s cn63xx;
+ struct cvmx_mio_ndf_dma_int_s cn63xxp1;
};
union cvmx_mio_ndf_dma_int_en {
@@ -1056,6 +1196,8 @@ union cvmx_mio_ndf_dma_int_en {
uint64_t done:1;
} s;
struct cvmx_mio_ndf_dma_int_en_s cn52xx;
+ struct cvmx_mio_ndf_dma_int_en_s cn63xx;
+ struct cvmx_mio_ndf_dma_int_en_s cn63xxp1;
};
union cvmx_mio_pll_ctl {
@@ -1078,6 +1220,173 @@ union cvmx_mio_pll_setting {
struct cvmx_mio_pll_setting_s cn31xx;
};
+union cvmx_mio_ptp_clock_cfg {
+ uint64_t u64;
+ struct cvmx_mio_ptp_clock_cfg_s {
+ uint64_t reserved_24_63:40;
+ uint64_t evcnt_in:6;
+ uint64_t evcnt_edge:1;
+ uint64_t evcnt_en:1;
+ uint64_t tstmp_in:6;
+ uint64_t tstmp_edge:1;
+ uint64_t tstmp_en:1;
+ uint64_t ext_clk_in:6;
+ uint64_t ext_clk_en:1;
+ uint64_t ptp_en:1;
+ } s;
+ struct cvmx_mio_ptp_clock_cfg_s cn63xx;
+ struct cvmx_mio_ptp_clock_cfg_s cn63xxp1;
+};
+
+union cvmx_mio_ptp_clock_comp {
+ uint64_t u64;
+ struct cvmx_mio_ptp_clock_comp_s {
+ uint64_t nanosec:32;
+ uint64_t frnanosec:32;
+ } s;
+ struct cvmx_mio_ptp_clock_comp_s cn63xx;
+ struct cvmx_mio_ptp_clock_comp_s cn63xxp1;
+};
+
+union cvmx_mio_ptp_clock_hi {
+ uint64_t u64;
+ struct cvmx_mio_ptp_clock_hi_s {
+ uint64_t nanosec:64;
+ } s;
+ struct cvmx_mio_ptp_clock_hi_s cn63xx;
+ struct cvmx_mio_ptp_clock_hi_s cn63xxp1;
+};
+
+union cvmx_mio_ptp_clock_lo {
+ uint64_t u64;
+ struct cvmx_mio_ptp_clock_lo_s {
+ uint64_t reserved_32_63:32;
+ uint64_t frnanosec:32;
+ } s;
+ struct cvmx_mio_ptp_clock_lo_s cn63xx;
+ struct cvmx_mio_ptp_clock_lo_s cn63xxp1;
+};
+
+union cvmx_mio_ptp_evt_cnt {
+ uint64_t u64;
+ struct cvmx_mio_ptp_evt_cnt_s {
+ uint64_t cntr:64;
+ } s;
+ struct cvmx_mio_ptp_evt_cnt_s cn63xx;
+ struct cvmx_mio_ptp_evt_cnt_s cn63xxp1;
+};
+
+union cvmx_mio_ptp_timestamp {
+ uint64_t u64;
+ struct cvmx_mio_ptp_timestamp_s {
+ uint64_t nanosec:64;
+ } s;
+ struct cvmx_mio_ptp_timestamp_s cn63xx;
+ struct cvmx_mio_ptp_timestamp_s cn63xxp1;
+};
+
+union cvmx_mio_rst_boot {
+ uint64_t u64;
+ struct cvmx_mio_rst_boot_s {
+ uint64_t reserved_36_63:28;
+ uint64_t c_mul:6;
+ uint64_t pnr_mul:6;
+ uint64_t qlm2_spd:4;
+ uint64_t qlm1_spd:4;
+ uint64_t qlm0_spd:4;
+ uint64_t lboot:10;
+ uint64_t rboot:1;
+ uint64_t rboot_pin:1;
+ } s;
+ struct cvmx_mio_rst_boot_s cn63xx;
+ struct cvmx_mio_rst_boot_s cn63xxp1;
+};
+
+union cvmx_mio_rst_cfg {
+ uint64_t u64;
+ struct cvmx_mio_rst_cfg_s {
+ uint64_t bist_delay:58;
+ uint64_t reserved_3_5:3;
+ uint64_t cntl_clr_bist:1;
+ uint64_t warm_clr_bist:1;
+ uint64_t soft_clr_bist:1;
+ } s;
+ struct cvmx_mio_rst_cfg_s cn63xx;
+ struct cvmx_mio_rst_cfg_cn63xxp1 {
+ uint64_t bist_delay:58;
+ uint64_t reserved_2_5:4;
+ uint64_t warm_clr_bist:1;
+ uint64_t soft_clr_bist:1;
+ } cn63xxp1;
+};
+
+union cvmx_mio_rst_ctlx {
+ uint64_t u64;
+ struct cvmx_mio_rst_ctlx_s {
+ uint64_t reserved_10_63:54;
+ uint64_t prst_link:1;
+ uint64_t rst_done:1;
+ uint64_t rst_link:1;
+ uint64_t host_mode:1;
+ uint64_t prtmode:2;
+ uint64_t rst_drv:1;
+ uint64_t rst_rcv:1;
+ uint64_t rst_chip:1;
+ uint64_t rst_val:1;
+ } s;
+ struct cvmx_mio_rst_ctlx_s cn63xx;
+ struct cvmx_mio_rst_ctlx_cn63xxp1 {
+ uint64_t reserved_9_63:55;
+ uint64_t rst_done:1;
+ uint64_t rst_link:1;
+ uint64_t host_mode:1;
+ uint64_t prtmode:2;
+ uint64_t rst_drv:1;
+ uint64_t rst_rcv:1;
+ uint64_t rst_chip:1;
+ uint64_t rst_val:1;
+ } cn63xxp1;
+};
+
+union cvmx_mio_rst_delay {
+ uint64_t u64;
+ struct cvmx_mio_rst_delay_s {
+ uint64_t reserved_32_63:32;
+ uint64_t soft_rst_dly:16;
+ uint64_t warm_rst_dly:16;
+ } s;
+ struct cvmx_mio_rst_delay_s cn63xx;
+ struct cvmx_mio_rst_delay_s cn63xxp1;
+};
+
+union cvmx_mio_rst_int {
+ uint64_t u64;
+ struct cvmx_mio_rst_int_s {
+ uint64_t reserved_10_63:54;
+ uint64_t perst1:1;
+ uint64_t perst0:1;
+ uint64_t reserved_2_7:6;
+ uint64_t rst_link1:1;
+ uint64_t rst_link0:1;
+ } s;
+ struct cvmx_mio_rst_int_s cn63xx;
+ struct cvmx_mio_rst_int_s cn63xxp1;
+};
+
+union cvmx_mio_rst_int_en {
+ uint64_t u64;
+ struct cvmx_mio_rst_int_en_s {
+ uint64_t reserved_10_63:54;
+ uint64_t perst1:1;
+ uint64_t perst0:1;
+ uint64_t reserved_2_7:6;
+ uint64_t rst_link1:1;
+ uint64_t rst_link0:1;
+ } s;
+ struct cvmx_mio_rst_int_en_s cn63xx;
+ struct cvmx_mio_rst_int_en_s cn63xxp1;
+};
+
union cvmx_mio_twsx_int {
uint64_t u64;
struct cvmx_mio_twsx_int_s {
@@ -1115,6 +1424,8 @@ union cvmx_mio_twsx_int {
struct cvmx_mio_twsx_int_s cn56xxp1;
struct cvmx_mio_twsx_int_s cn58xx;
struct cvmx_mio_twsx_int_s cn58xxp1;
+ struct cvmx_mio_twsx_int_s cn63xx;
+ struct cvmx_mio_twsx_int_s cn63xxp1;
};
union cvmx_mio_twsx_sw_twsi {
@@ -1144,6 +1455,8 @@ union cvmx_mio_twsx_sw_twsi {
struct cvmx_mio_twsx_sw_twsi_s cn56xxp1;
struct cvmx_mio_twsx_sw_twsi_s cn58xx;
struct cvmx_mio_twsx_sw_twsi_s cn58xxp1;
+ struct cvmx_mio_twsx_sw_twsi_s cn63xx;
+ struct cvmx_mio_twsx_sw_twsi_s cn63xxp1;
};
union cvmx_mio_twsx_sw_twsi_ext {
@@ -1164,6 +1477,8 @@ union cvmx_mio_twsx_sw_twsi_ext {
struct cvmx_mio_twsx_sw_twsi_ext_s cn56xxp1;
struct cvmx_mio_twsx_sw_twsi_ext_s cn58xx;
struct cvmx_mio_twsx_sw_twsi_ext_s cn58xxp1;
+ struct cvmx_mio_twsx_sw_twsi_ext_s cn63xx;
+ struct cvmx_mio_twsx_sw_twsi_ext_s cn63xxp1;
};
union cvmx_mio_twsx_twsi_sw {
@@ -1184,6 +1499,8 @@ union cvmx_mio_twsx_twsi_sw {
struct cvmx_mio_twsx_twsi_sw_s cn56xxp1;
struct cvmx_mio_twsx_twsi_sw_s cn58xx;
struct cvmx_mio_twsx_twsi_sw_s cn58xxp1;
+ struct cvmx_mio_twsx_twsi_sw_s cn63xx;
+ struct cvmx_mio_twsx_twsi_sw_s cn63xxp1;
};
union cvmx_mio_uartx_dlh {
@@ -1203,6 +1520,8 @@ union cvmx_mio_uartx_dlh {
struct cvmx_mio_uartx_dlh_s cn56xxp1;
struct cvmx_mio_uartx_dlh_s cn58xx;
struct cvmx_mio_uartx_dlh_s cn58xxp1;
+ struct cvmx_mio_uartx_dlh_s cn63xx;
+ struct cvmx_mio_uartx_dlh_s cn63xxp1;
};
union cvmx_mio_uartx_dll {
@@ -1222,6 +1541,8 @@ union cvmx_mio_uartx_dll {
struct cvmx_mio_uartx_dll_s cn56xxp1;
struct cvmx_mio_uartx_dll_s cn58xx;
struct cvmx_mio_uartx_dll_s cn58xxp1;
+ struct cvmx_mio_uartx_dll_s cn63xx;
+ struct cvmx_mio_uartx_dll_s cn63xxp1;
};
union cvmx_mio_uartx_far {
@@ -1241,6 +1562,8 @@ union cvmx_mio_uartx_far {
struct cvmx_mio_uartx_far_s cn56xxp1;
struct cvmx_mio_uartx_far_s cn58xx;
struct cvmx_mio_uartx_far_s cn58xxp1;
+ struct cvmx_mio_uartx_far_s cn63xx;
+ struct cvmx_mio_uartx_far_s cn63xxp1;
};
union cvmx_mio_uartx_fcr {
@@ -1265,6 +1588,8 @@ union cvmx_mio_uartx_fcr {
struct cvmx_mio_uartx_fcr_s cn56xxp1;
struct cvmx_mio_uartx_fcr_s cn58xx;
struct cvmx_mio_uartx_fcr_s cn58xxp1;
+ struct cvmx_mio_uartx_fcr_s cn63xx;
+ struct cvmx_mio_uartx_fcr_s cn63xxp1;
};
union cvmx_mio_uartx_htx {
@@ -1284,6 +1609,8 @@ union cvmx_mio_uartx_htx {
struct cvmx_mio_uartx_htx_s cn56xxp1;
struct cvmx_mio_uartx_htx_s cn58xx;
struct cvmx_mio_uartx_htx_s cn58xxp1;
+ struct cvmx_mio_uartx_htx_s cn63xx;
+ struct cvmx_mio_uartx_htx_s cn63xxp1;
};
union cvmx_mio_uartx_ier {
@@ -1308,6 +1635,8 @@ union cvmx_mio_uartx_ier {
struct cvmx_mio_uartx_ier_s cn56xxp1;
struct cvmx_mio_uartx_ier_s cn58xx;
struct cvmx_mio_uartx_ier_s cn58xxp1;
+ struct cvmx_mio_uartx_ier_s cn63xx;
+ struct cvmx_mio_uartx_ier_s cn63xxp1;
};
union cvmx_mio_uartx_iir {
@@ -1329,6 +1658,8 @@ union cvmx_mio_uartx_iir {
struct cvmx_mio_uartx_iir_s cn56xxp1;
struct cvmx_mio_uartx_iir_s cn58xx;
struct cvmx_mio_uartx_iir_s cn58xxp1;
+ struct cvmx_mio_uartx_iir_s cn63xx;
+ struct cvmx_mio_uartx_iir_s cn63xxp1;
};
union cvmx_mio_uartx_lcr {
@@ -1354,6 +1685,8 @@ union cvmx_mio_uartx_lcr {
struct cvmx_mio_uartx_lcr_s cn56xxp1;
struct cvmx_mio_uartx_lcr_s cn58xx;
struct cvmx_mio_uartx_lcr_s cn58xxp1;
+ struct cvmx_mio_uartx_lcr_s cn63xx;
+ struct cvmx_mio_uartx_lcr_s cn63xxp1;
};
union cvmx_mio_uartx_lsr {
@@ -1380,6 +1713,8 @@ union cvmx_mio_uartx_lsr {
struct cvmx_mio_uartx_lsr_s cn56xxp1;
struct cvmx_mio_uartx_lsr_s cn58xx;
struct cvmx_mio_uartx_lsr_s cn58xxp1;
+ struct cvmx_mio_uartx_lsr_s cn63xx;
+ struct cvmx_mio_uartx_lsr_s cn63xxp1;
};
union cvmx_mio_uartx_mcr {
@@ -1404,6 +1739,8 @@ union cvmx_mio_uartx_mcr {
struct cvmx_mio_uartx_mcr_s cn56xxp1;
struct cvmx_mio_uartx_mcr_s cn58xx;
struct cvmx_mio_uartx_mcr_s cn58xxp1;
+ struct cvmx_mio_uartx_mcr_s cn63xx;
+ struct cvmx_mio_uartx_mcr_s cn63xxp1;
};
union cvmx_mio_uartx_msr {
@@ -1430,6 +1767,8 @@ union cvmx_mio_uartx_msr {
struct cvmx_mio_uartx_msr_s cn56xxp1;
struct cvmx_mio_uartx_msr_s cn58xx;
struct cvmx_mio_uartx_msr_s cn58xxp1;
+ struct cvmx_mio_uartx_msr_s cn63xx;
+ struct cvmx_mio_uartx_msr_s cn63xxp1;
};
union cvmx_mio_uartx_rbr {
@@ -1449,6 +1788,8 @@ union cvmx_mio_uartx_rbr {
struct cvmx_mio_uartx_rbr_s cn56xxp1;
struct cvmx_mio_uartx_rbr_s cn58xx;
struct cvmx_mio_uartx_rbr_s cn58xxp1;
+ struct cvmx_mio_uartx_rbr_s cn63xx;
+ struct cvmx_mio_uartx_rbr_s cn63xxp1;
};
union cvmx_mio_uartx_rfl {
@@ -1468,6 +1809,8 @@ union cvmx_mio_uartx_rfl {
struct cvmx_mio_uartx_rfl_s cn56xxp1;
struct cvmx_mio_uartx_rfl_s cn58xx;
struct cvmx_mio_uartx_rfl_s cn58xxp1;
+ struct cvmx_mio_uartx_rfl_s cn63xx;
+ struct cvmx_mio_uartx_rfl_s cn63xxp1;
};
union cvmx_mio_uartx_rfw {
@@ -1489,6 +1832,8 @@ union cvmx_mio_uartx_rfw {
struct cvmx_mio_uartx_rfw_s cn56xxp1;
struct cvmx_mio_uartx_rfw_s cn58xx;
struct cvmx_mio_uartx_rfw_s cn58xxp1;
+ struct cvmx_mio_uartx_rfw_s cn63xx;
+ struct cvmx_mio_uartx_rfw_s cn63xxp1;
};
union cvmx_mio_uartx_sbcr {
@@ -1508,6 +1853,8 @@ union cvmx_mio_uartx_sbcr {
struct cvmx_mio_uartx_sbcr_s cn56xxp1;
struct cvmx_mio_uartx_sbcr_s cn58xx;
struct cvmx_mio_uartx_sbcr_s cn58xxp1;
+ struct cvmx_mio_uartx_sbcr_s cn63xx;
+ struct cvmx_mio_uartx_sbcr_s cn63xxp1;
};
union cvmx_mio_uartx_scr {
@@ -1527,6 +1874,8 @@ union cvmx_mio_uartx_scr {
struct cvmx_mio_uartx_scr_s cn56xxp1;
struct cvmx_mio_uartx_scr_s cn58xx;
struct cvmx_mio_uartx_scr_s cn58xxp1;
+ struct cvmx_mio_uartx_scr_s cn63xx;
+ struct cvmx_mio_uartx_scr_s cn63xxp1;
};
union cvmx_mio_uartx_sfe {
@@ -1546,6 +1895,8 @@ union cvmx_mio_uartx_sfe {
struct cvmx_mio_uartx_sfe_s cn56xxp1;
struct cvmx_mio_uartx_sfe_s cn58xx;
struct cvmx_mio_uartx_sfe_s cn58xxp1;
+ struct cvmx_mio_uartx_sfe_s cn63xx;
+ struct cvmx_mio_uartx_sfe_s cn63xxp1;
};
union cvmx_mio_uartx_srr {
@@ -1567,6 +1918,8 @@ union cvmx_mio_uartx_srr {
struct cvmx_mio_uartx_srr_s cn56xxp1;
struct cvmx_mio_uartx_srr_s cn58xx;
struct cvmx_mio_uartx_srr_s cn58xxp1;
+ struct cvmx_mio_uartx_srr_s cn63xx;
+ struct cvmx_mio_uartx_srr_s cn63xxp1;
};
union cvmx_mio_uartx_srt {
@@ -1586,6 +1939,8 @@ union cvmx_mio_uartx_srt {
struct cvmx_mio_uartx_srt_s cn56xxp1;
struct cvmx_mio_uartx_srt_s cn58xx;
struct cvmx_mio_uartx_srt_s cn58xxp1;
+ struct cvmx_mio_uartx_srt_s cn63xx;
+ struct cvmx_mio_uartx_srt_s cn63xxp1;
};
union cvmx_mio_uartx_srts {
@@ -1605,6 +1960,8 @@ union cvmx_mio_uartx_srts {
struct cvmx_mio_uartx_srts_s cn56xxp1;
struct cvmx_mio_uartx_srts_s cn58xx;
struct cvmx_mio_uartx_srts_s cn58xxp1;
+ struct cvmx_mio_uartx_srts_s cn63xx;
+ struct cvmx_mio_uartx_srts_s cn63xxp1;
};
union cvmx_mio_uartx_stt {
@@ -1624,6 +1981,8 @@ union cvmx_mio_uartx_stt {
struct cvmx_mio_uartx_stt_s cn56xxp1;
struct cvmx_mio_uartx_stt_s cn58xx;
struct cvmx_mio_uartx_stt_s cn58xxp1;
+ struct cvmx_mio_uartx_stt_s cn63xx;
+ struct cvmx_mio_uartx_stt_s cn63xxp1;
};
union cvmx_mio_uartx_tfl {
@@ -1643,6 +2002,8 @@ union cvmx_mio_uartx_tfl {
struct cvmx_mio_uartx_tfl_s cn56xxp1;
struct cvmx_mio_uartx_tfl_s cn58xx;
struct cvmx_mio_uartx_tfl_s cn58xxp1;
+ struct cvmx_mio_uartx_tfl_s cn63xx;
+ struct cvmx_mio_uartx_tfl_s cn63xxp1;
};
union cvmx_mio_uartx_tfr {
@@ -1662,6 +2023,8 @@ union cvmx_mio_uartx_tfr {
struct cvmx_mio_uartx_tfr_s cn56xxp1;
struct cvmx_mio_uartx_tfr_s cn58xx;
struct cvmx_mio_uartx_tfr_s cn58xxp1;
+ struct cvmx_mio_uartx_tfr_s cn63xx;
+ struct cvmx_mio_uartx_tfr_s cn63xxp1;
};
union cvmx_mio_uartx_thr {
@@ -1681,6 +2044,8 @@ union cvmx_mio_uartx_thr {
struct cvmx_mio_uartx_thr_s cn56xxp1;
struct cvmx_mio_uartx_thr_s cn58xx;
struct cvmx_mio_uartx_thr_s cn58xxp1;
+ struct cvmx_mio_uartx_thr_s cn63xx;
+ struct cvmx_mio_uartx_thr_s cn63xxp1;
};
union cvmx_mio_uartx_usr {
@@ -1704,6 +2069,8 @@ union cvmx_mio_uartx_usr {
struct cvmx_mio_uartx_usr_s cn56xxp1;
struct cvmx_mio_uartx_usr_s cn58xx;
struct cvmx_mio_uartx_usr_s cn58xxp1;
+ struct cvmx_mio_uartx_usr_s cn63xx;
+ struct cvmx_mio_uartx_usr_s cn63xxp1;
};
union cvmx_mio_uart2_dlh {
diff --git a/arch/mips/include/asm/octeon/cvmx-mixx-defs.h b/arch/mips/include/asm/octeon/cvmx-mixx-defs.h
index dab6dca492f9..7057c447e69e 100644
--- a/arch/mips/include/asm/octeon/cvmx-mixx-defs.h
+++ b/arch/mips/include/asm/octeon/cvmx-mixx-defs.h
@@ -4,7 +4,7 @@
* Contact: support@caviumnetworks.com
* This file is part of the OCTEON SDK
*
- * Copyright (c) 2003-2008 Cavium Networks
+ * Copyright (c) 2003-2010 Cavium Networks
*
* This file is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License, Version 2, as
@@ -28,52 +28,52 @@
#ifndef __CVMX_MIXX_DEFS_H__
#define __CVMX_MIXX_DEFS_H__
-#define CVMX_MIXX_BIST(offset) \
- CVMX_ADD_IO_SEG(0x0001070000100078ull + (((offset) & 1) * 2048))
-#define CVMX_MIXX_CTL(offset) \
- CVMX_ADD_IO_SEG(0x0001070000100020ull + (((offset) & 1) * 2048))
-#define CVMX_MIXX_INTENA(offset) \
- CVMX_ADD_IO_SEG(0x0001070000100050ull + (((offset) & 1) * 2048))
-#define CVMX_MIXX_IRCNT(offset) \
- CVMX_ADD_IO_SEG(0x0001070000100030ull + (((offset) & 1) * 2048))
-#define CVMX_MIXX_IRHWM(offset) \
- CVMX_ADD_IO_SEG(0x0001070000100028ull + (((offset) & 1) * 2048))
-#define CVMX_MIXX_IRING1(offset) \
- CVMX_ADD_IO_SEG(0x0001070000100010ull + (((offset) & 1) * 2048))
-#define CVMX_MIXX_IRING2(offset) \
- CVMX_ADD_IO_SEG(0x0001070000100018ull + (((offset) & 1) * 2048))
-#define CVMX_MIXX_ISR(offset) \
- CVMX_ADD_IO_SEG(0x0001070000100048ull + (((offset) & 1) * 2048))
-#define CVMX_MIXX_ORCNT(offset) \
- CVMX_ADD_IO_SEG(0x0001070000100040ull + (((offset) & 1) * 2048))
-#define CVMX_MIXX_ORHWM(offset) \
- CVMX_ADD_IO_SEG(0x0001070000100038ull + (((offset) & 1) * 2048))
-#define CVMX_MIXX_ORING1(offset) \
- CVMX_ADD_IO_SEG(0x0001070000100000ull + (((offset) & 1) * 2048))
-#define CVMX_MIXX_ORING2(offset) \
- CVMX_ADD_IO_SEG(0x0001070000100008ull + (((offset) & 1) * 2048))
-#define CVMX_MIXX_REMCNT(offset) \
- CVMX_ADD_IO_SEG(0x0001070000100058ull + (((offset) & 1) * 2048))
+#define CVMX_MIXX_BIST(offset) (CVMX_ADD_IO_SEG(0x0001070000100078ull) + ((offset) & 1) * 2048)
+#define CVMX_MIXX_CTL(offset) (CVMX_ADD_IO_SEG(0x0001070000100020ull) + ((offset) & 1) * 2048)
+#define CVMX_MIXX_INTENA(offset) (CVMX_ADD_IO_SEG(0x0001070000100050ull) + ((offset) & 1) * 2048)
+#define CVMX_MIXX_IRCNT(offset) (CVMX_ADD_IO_SEG(0x0001070000100030ull) + ((offset) & 1) * 2048)
+#define CVMX_MIXX_IRHWM(offset) (CVMX_ADD_IO_SEG(0x0001070000100028ull) + ((offset) & 1) * 2048)
+#define CVMX_MIXX_IRING1(offset) (CVMX_ADD_IO_SEG(0x0001070000100010ull) + ((offset) & 1) * 2048)
+#define CVMX_MIXX_IRING2(offset) (CVMX_ADD_IO_SEG(0x0001070000100018ull) + ((offset) & 1) * 2048)
+#define CVMX_MIXX_ISR(offset) (CVMX_ADD_IO_SEG(0x0001070000100048ull) + ((offset) & 1) * 2048)
+#define CVMX_MIXX_ORCNT(offset) (CVMX_ADD_IO_SEG(0x0001070000100040ull) + ((offset) & 1) * 2048)
+#define CVMX_MIXX_ORHWM(offset) (CVMX_ADD_IO_SEG(0x0001070000100038ull) + ((offset) & 1) * 2048)
+#define CVMX_MIXX_ORING1(offset) (CVMX_ADD_IO_SEG(0x0001070000100000ull) + ((offset) & 1) * 2048)
+#define CVMX_MIXX_ORING2(offset) (CVMX_ADD_IO_SEG(0x0001070000100008ull) + ((offset) & 1) * 2048)
+#define CVMX_MIXX_REMCNT(offset) (CVMX_ADD_IO_SEG(0x0001070000100058ull) + ((offset) & 1) * 2048)
+#define CVMX_MIXX_TSCTL(offset) (CVMX_ADD_IO_SEG(0x0001070000100068ull) + ((offset) & 1) * 2048)
+#define CVMX_MIXX_TSTAMP(offset) (CVMX_ADD_IO_SEG(0x0001070000100060ull) + ((offset) & 1) * 2048)
union cvmx_mixx_bist {
uint64_t u64;
struct cvmx_mixx_bist_s {
- uint64_t reserved_4_63:60;
+ uint64_t reserved_6_63:58;
+ uint64_t opfdat:1;
+ uint64_t mrgdat:1;
uint64_t mrqdat:1;
uint64_t ipfdat:1;
uint64_t irfdat:1;
uint64_t orfdat:1;
} s;
- struct cvmx_mixx_bist_s cn52xx;
- struct cvmx_mixx_bist_s cn52xxp1;
- struct cvmx_mixx_bist_s cn56xx;
- struct cvmx_mixx_bist_s cn56xxp1;
+ struct cvmx_mixx_bist_cn52xx {
+ uint64_t reserved_4_63:60;
+ uint64_t mrqdat:1;
+ uint64_t ipfdat:1;
+ uint64_t irfdat:1;
+ uint64_t orfdat:1;
+ } cn52xx;
+ struct cvmx_mixx_bist_cn52xx cn52xxp1;
+ struct cvmx_mixx_bist_cn52xx cn56xx;
+ struct cvmx_mixx_bist_cn52xx cn56xxp1;
+ struct cvmx_mixx_bist_s cn63xx;
+ struct cvmx_mixx_bist_s cn63xxp1;
};
union cvmx_mixx_ctl {
uint64_t u64;
struct cvmx_mixx_ctl_s {
- uint64_t reserved_8_63:56;
+ uint64_t reserved_12_63:52;
+ uint64_t ts_thresh:4;
uint64_t crc_strip:1;
uint64_t busy:1;
uint64_t en:1;
@@ -82,16 +82,28 @@ union cvmx_mixx_ctl {
uint64_t nbtarb:1;
uint64_t mrq_hwm:2;
} s;
- struct cvmx_mixx_ctl_s cn52xx;
- struct cvmx_mixx_ctl_s cn52xxp1;
- struct cvmx_mixx_ctl_s cn56xx;
- struct cvmx_mixx_ctl_s cn56xxp1;
+ struct cvmx_mixx_ctl_cn52xx {
+ uint64_t reserved_8_63:56;
+ uint64_t crc_strip:1;
+ uint64_t busy:1;
+ uint64_t en:1;
+ uint64_t reset:1;
+ uint64_t lendian:1;
+ uint64_t nbtarb:1;
+ uint64_t mrq_hwm:2;
+ } cn52xx;
+ struct cvmx_mixx_ctl_cn52xx cn52xxp1;
+ struct cvmx_mixx_ctl_cn52xx cn56xx;
+ struct cvmx_mixx_ctl_cn52xx cn56xxp1;
+ struct cvmx_mixx_ctl_s cn63xx;
+ struct cvmx_mixx_ctl_s cn63xxp1;
};
union cvmx_mixx_intena {
uint64_t u64;
struct cvmx_mixx_intena_s {
- uint64_t reserved_7_63:57;
+ uint64_t reserved_8_63:56;
+ uint64_t tsena:1;
uint64_t orunena:1;
uint64_t irunena:1;
uint64_t data_drpena:1;
@@ -100,10 +112,21 @@ union cvmx_mixx_intena {
uint64_t ivfena:1;
uint64_t ovfena:1;
} s;
- struct cvmx_mixx_intena_s cn52xx;
- struct cvmx_mixx_intena_s cn52xxp1;
- struct cvmx_mixx_intena_s cn56xx;
- struct cvmx_mixx_intena_s cn56xxp1;
+ struct cvmx_mixx_intena_cn52xx {
+ uint64_t reserved_7_63:57;
+ uint64_t orunena:1;
+ uint64_t irunena:1;
+ uint64_t data_drpena:1;
+ uint64_t ithena:1;
+ uint64_t othena:1;
+ uint64_t ivfena:1;
+ uint64_t ovfena:1;
+ } cn52xx;
+ struct cvmx_mixx_intena_cn52xx cn52xxp1;
+ struct cvmx_mixx_intena_cn52xx cn56xx;
+ struct cvmx_mixx_intena_cn52xx cn56xxp1;
+ struct cvmx_mixx_intena_s cn63xx;
+ struct cvmx_mixx_intena_s cn63xxp1;
};
union cvmx_mixx_ircnt {
@@ -116,6 +139,8 @@ union cvmx_mixx_ircnt {
struct cvmx_mixx_ircnt_s cn52xxp1;
struct cvmx_mixx_ircnt_s cn56xx;
struct cvmx_mixx_ircnt_s cn56xxp1;
+ struct cvmx_mixx_ircnt_s cn63xx;
+ struct cvmx_mixx_ircnt_s cn63xxp1;
};
union cvmx_mixx_irhwm {
@@ -129,6 +154,8 @@ union cvmx_mixx_irhwm {
struct cvmx_mixx_irhwm_s cn52xxp1;
struct cvmx_mixx_irhwm_s cn56xx;
struct cvmx_mixx_irhwm_s cn56xxp1;
+ struct cvmx_mixx_irhwm_s cn63xx;
+ struct cvmx_mixx_irhwm_s cn63xxp1;
};
union cvmx_mixx_iring1 {
@@ -136,14 +163,21 @@ union cvmx_mixx_iring1 {
struct cvmx_mixx_iring1_s {
uint64_t reserved_60_63:4;
uint64_t isize:20;
+ uint64_t ibase:37;
+ uint64_t reserved_0_2:3;
+ } s;
+ struct cvmx_mixx_iring1_cn52xx {
+ uint64_t reserved_60_63:4;
+ uint64_t isize:20;
uint64_t reserved_36_39:4;
uint64_t ibase:33;
uint64_t reserved_0_2:3;
- } s;
- struct cvmx_mixx_iring1_s cn52xx;
- struct cvmx_mixx_iring1_s cn52xxp1;
- struct cvmx_mixx_iring1_s cn56xx;
- struct cvmx_mixx_iring1_s cn56xxp1;
+ } cn52xx;
+ struct cvmx_mixx_iring1_cn52xx cn52xxp1;
+ struct cvmx_mixx_iring1_cn52xx cn56xx;
+ struct cvmx_mixx_iring1_cn52xx cn56xxp1;
+ struct cvmx_mixx_iring1_s cn63xx;
+ struct cvmx_mixx_iring1_s cn63xxp1;
};
union cvmx_mixx_iring2 {
@@ -158,12 +192,15 @@ union cvmx_mixx_iring2 {
struct cvmx_mixx_iring2_s cn52xxp1;
struct cvmx_mixx_iring2_s cn56xx;
struct cvmx_mixx_iring2_s cn56xxp1;
+ struct cvmx_mixx_iring2_s cn63xx;
+ struct cvmx_mixx_iring2_s cn63xxp1;
};
union cvmx_mixx_isr {
uint64_t u64;
struct cvmx_mixx_isr_s {
- uint64_t reserved_7_63:57;
+ uint64_t reserved_8_63:56;
+ uint64_t ts:1;
uint64_t orun:1;
uint64_t irun:1;
uint64_t data_drp:1;
@@ -172,10 +209,21 @@ union cvmx_mixx_isr {
uint64_t idblovf:1;
uint64_t odblovf:1;
} s;
- struct cvmx_mixx_isr_s cn52xx;
- struct cvmx_mixx_isr_s cn52xxp1;
- struct cvmx_mixx_isr_s cn56xx;
- struct cvmx_mixx_isr_s cn56xxp1;
+ struct cvmx_mixx_isr_cn52xx {
+ uint64_t reserved_7_63:57;
+ uint64_t orun:1;
+ uint64_t irun:1;
+ uint64_t data_drp:1;
+ uint64_t irthresh:1;
+ uint64_t orthresh:1;
+ uint64_t idblovf:1;
+ uint64_t odblovf:1;
+ } cn52xx;
+ struct cvmx_mixx_isr_cn52xx cn52xxp1;
+ struct cvmx_mixx_isr_cn52xx cn56xx;
+ struct cvmx_mixx_isr_cn52xx cn56xxp1;
+ struct cvmx_mixx_isr_s cn63xx;
+ struct cvmx_mixx_isr_s cn63xxp1;
};
union cvmx_mixx_orcnt {
@@ -188,6 +236,8 @@ union cvmx_mixx_orcnt {
struct cvmx_mixx_orcnt_s cn52xxp1;
struct cvmx_mixx_orcnt_s cn56xx;
struct cvmx_mixx_orcnt_s cn56xxp1;
+ struct cvmx_mixx_orcnt_s cn63xx;
+ struct cvmx_mixx_orcnt_s cn63xxp1;
};
union cvmx_mixx_orhwm {
@@ -200,6 +250,8 @@ union cvmx_mixx_orhwm {
struct cvmx_mixx_orhwm_s cn52xxp1;
struct cvmx_mixx_orhwm_s cn56xx;
struct cvmx_mixx_orhwm_s cn56xxp1;
+ struct cvmx_mixx_orhwm_s cn63xx;
+ struct cvmx_mixx_orhwm_s cn63xxp1;
};
union cvmx_mixx_oring1 {
@@ -207,14 +259,21 @@ union cvmx_mixx_oring1 {
struct cvmx_mixx_oring1_s {
uint64_t reserved_60_63:4;
uint64_t osize:20;
+ uint64_t obase:37;
+ uint64_t reserved_0_2:3;
+ } s;
+ struct cvmx_mixx_oring1_cn52xx {
+ uint64_t reserved_60_63:4;
+ uint64_t osize:20;
uint64_t reserved_36_39:4;
uint64_t obase:33;
uint64_t reserved_0_2:3;
- } s;
- struct cvmx_mixx_oring1_s cn52xx;
- struct cvmx_mixx_oring1_s cn52xxp1;
- struct cvmx_mixx_oring1_s cn56xx;
- struct cvmx_mixx_oring1_s cn56xxp1;
+ } cn52xx;
+ struct cvmx_mixx_oring1_cn52xx cn52xxp1;
+ struct cvmx_mixx_oring1_cn52xx cn56xx;
+ struct cvmx_mixx_oring1_cn52xx cn56xxp1;
+ struct cvmx_mixx_oring1_s cn63xx;
+ struct cvmx_mixx_oring1_s cn63xxp1;
};
union cvmx_mixx_oring2 {
@@ -229,6 +288,8 @@ union cvmx_mixx_oring2 {
struct cvmx_mixx_oring2_s cn52xxp1;
struct cvmx_mixx_oring2_s cn56xx;
struct cvmx_mixx_oring2_s cn56xxp1;
+ struct cvmx_mixx_oring2_s cn63xx;
+ struct cvmx_mixx_oring2_s cn63xxp1;
};
union cvmx_mixx_remcnt {
@@ -243,6 +304,31 @@ union cvmx_mixx_remcnt {
struct cvmx_mixx_remcnt_s cn52xxp1;
struct cvmx_mixx_remcnt_s cn56xx;
struct cvmx_mixx_remcnt_s cn56xxp1;
+ struct cvmx_mixx_remcnt_s cn63xx;
+ struct cvmx_mixx_remcnt_s cn63xxp1;
+};
+
+union cvmx_mixx_tsctl {
+ uint64_t u64;
+ struct cvmx_mixx_tsctl_s {
+ uint64_t reserved_21_63:43;
+ uint64_t tsavl:5;
+ uint64_t reserved_13_15:3;
+ uint64_t tstot:5;
+ uint64_t reserved_5_7:3;
+ uint64_t tscnt:5;
+ } s;
+ struct cvmx_mixx_tsctl_s cn63xx;
+ struct cvmx_mixx_tsctl_s cn63xxp1;
+};
+
+union cvmx_mixx_tstamp {
+ uint64_t u64;
+ struct cvmx_mixx_tstamp_s {
+ uint64_t tstamp:64;
+ } s;
+ struct cvmx_mixx_tstamp_s cn63xx;
+ struct cvmx_mixx_tstamp_s cn63xxp1;
};
#endif
diff --git a/arch/mips/include/asm/octeon/cvmx-npei-defs.h b/arch/mips/include/asm/octeon/cvmx-npei-defs.h
index 4b347bb8ce80..9899a9d2ba72 100644
--- a/arch/mips/include/asm/octeon/cvmx-npei-defs.h
+++ b/arch/mips/include/asm/octeon/cvmx-npei-defs.h
@@ -4,7 +4,7 @@
* Contact: support@caviumnetworks.com
* This file is part of the OCTEON SDK
*
- * Copyright (c) 2003-2008 Cavium Networks
+ * Copyright (c) 2003-2010 Cavium Networks
*
* This file is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License, Version 2, as
@@ -28,206 +28,114 @@
#ifndef __CVMX_NPEI_DEFS_H__
#define __CVMX_NPEI_DEFS_H__
-#define CVMX_NPEI_BAR1_INDEXX(offset) \
- (0x0000000000000000ull + (((offset) & 31) * 16))
-#define CVMX_NPEI_BIST_STATUS \
- (0x0000000000000580ull)
-#define CVMX_NPEI_BIST_STATUS2 \
- (0x0000000000000680ull)
-#define CVMX_NPEI_CTL_PORT0 \
- (0x0000000000000250ull)
-#define CVMX_NPEI_CTL_PORT1 \
- (0x0000000000000260ull)
-#define CVMX_NPEI_CTL_STATUS \
- (0x0000000000000570ull)
-#define CVMX_NPEI_CTL_STATUS2 \
- (0x0000000000003C00ull)
-#define CVMX_NPEI_DATA_OUT_CNT \
- (0x00000000000005F0ull)
-#define CVMX_NPEI_DBG_DATA \
- (0x0000000000000510ull)
-#define CVMX_NPEI_DBG_SELECT \
- (0x0000000000000500ull)
-#define CVMX_NPEI_DMA0_INT_LEVEL \
- (0x00000000000005C0ull)
-#define CVMX_NPEI_DMA1_INT_LEVEL \
- (0x00000000000005D0ull)
-#define CVMX_NPEI_DMAX_COUNTS(offset) \
- (0x0000000000000450ull + (((offset) & 7) * 16))
-#define CVMX_NPEI_DMAX_DBELL(offset) \
- (0x00000000000003B0ull + (((offset) & 7) * 16))
-#define CVMX_NPEI_DMAX_IBUFF_SADDR(offset) \
- (0x0000000000000400ull + (((offset) & 7) * 16))
-#define CVMX_NPEI_DMAX_NADDR(offset) \
- (0x00000000000004A0ull + (((offset) & 7) * 16))
-#define CVMX_NPEI_DMA_CNTS \
- (0x00000000000005E0ull)
-#define CVMX_NPEI_DMA_CONTROL \
- (0x00000000000003A0ull)
-#define CVMX_NPEI_INT_A_ENB \
- (0x0000000000000560ull)
-#define CVMX_NPEI_INT_A_ENB2 \
- (0x0000000000003CE0ull)
-#define CVMX_NPEI_INT_A_SUM \
- (0x0000000000000550ull)
-#define CVMX_NPEI_INT_ENB \
- (0x0000000000000540ull)
-#define CVMX_NPEI_INT_ENB2 \
- (0x0000000000003CD0ull)
-#define CVMX_NPEI_INT_INFO \
- (0x0000000000000590ull)
-#define CVMX_NPEI_INT_SUM \
- (0x0000000000000530ull)
-#define CVMX_NPEI_INT_SUM2 \
- (0x0000000000003CC0ull)
-#define CVMX_NPEI_LAST_WIN_RDATA0 \
- (0x0000000000000600ull)
-#define CVMX_NPEI_LAST_WIN_RDATA1 \
- (0x0000000000000610ull)
-#define CVMX_NPEI_MEM_ACCESS_CTL \
- (0x00000000000004F0ull)
-#define CVMX_NPEI_MEM_ACCESS_SUBIDX(offset) \
- (0x0000000000000340ull + (((offset) & 31) * 16) - 16 * 12)
-#define CVMX_NPEI_MSI_ENB0 \
- (0x0000000000003C50ull)
-#define CVMX_NPEI_MSI_ENB1 \
- (0x0000000000003C60ull)
-#define CVMX_NPEI_MSI_ENB2 \
- (0x0000000000003C70ull)
-#define CVMX_NPEI_MSI_ENB3 \
- (0x0000000000003C80ull)
-#define CVMX_NPEI_MSI_RCV0 \
- (0x0000000000003C10ull)
-#define CVMX_NPEI_MSI_RCV1 \
- (0x0000000000003C20ull)
-#define CVMX_NPEI_MSI_RCV2 \
- (0x0000000000003C30ull)
-#define CVMX_NPEI_MSI_RCV3 \
- (0x0000000000003C40ull)
-#define CVMX_NPEI_MSI_RD_MAP \
- (0x0000000000003CA0ull)
-#define CVMX_NPEI_MSI_W1C_ENB0 \
- (0x0000000000003CF0ull)
-#define CVMX_NPEI_MSI_W1C_ENB1 \
- (0x0000000000003D00ull)
-#define CVMX_NPEI_MSI_W1C_ENB2 \
- (0x0000000000003D10ull)
-#define CVMX_NPEI_MSI_W1C_ENB3 \
- (0x0000000000003D20ull)
-#define CVMX_NPEI_MSI_W1S_ENB0 \
- (0x0000000000003D30ull)
-#define CVMX_NPEI_MSI_W1S_ENB1 \
- (0x0000000000003D40ull)
-#define CVMX_NPEI_MSI_W1S_ENB2 \
- (0x0000000000003D50ull)
-#define CVMX_NPEI_MSI_W1S_ENB3 \
- (0x0000000000003D60ull)
-#define CVMX_NPEI_MSI_WR_MAP \
- (0x0000000000003C90ull)
-#define CVMX_NPEI_PCIE_CREDIT_CNT \
- (0x0000000000003D70ull)
-#define CVMX_NPEI_PCIE_MSI_RCV \
- (0x0000000000003CB0ull)
-#define CVMX_NPEI_PCIE_MSI_RCV_B1 \
- (0x0000000000000650ull)
-#define CVMX_NPEI_PCIE_MSI_RCV_B2 \
- (0x0000000000000660ull)
-#define CVMX_NPEI_PCIE_MSI_RCV_B3 \
- (0x0000000000000670ull)
-#define CVMX_NPEI_PKTX_CNTS(offset) \
- (0x0000000000002400ull + (((offset) & 31) * 16))
-#define CVMX_NPEI_PKTX_INSTR_BADDR(offset) \
- (0x0000000000002800ull + (((offset) & 31) * 16))
-#define CVMX_NPEI_PKTX_INSTR_BAOFF_DBELL(offset) \
- (0x0000000000002C00ull + (((offset) & 31) * 16))
-#define CVMX_NPEI_PKTX_INSTR_FIFO_RSIZE(offset) \
- (0x0000000000003000ull + (((offset) & 31) * 16))
-#define CVMX_NPEI_PKTX_INSTR_HEADER(offset) \
- (0x0000000000003400ull + (((offset) & 31) * 16))
-#define CVMX_NPEI_PKTX_IN_BP(offset) \
- (0x0000000000003800ull + (((offset) & 31) * 16))
-#define CVMX_NPEI_PKTX_SLIST_BADDR(offset) \
- (0x0000000000001400ull + (((offset) & 31) * 16))
-#define CVMX_NPEI_PKTX_SLIST_BAOFF_DBELL(offset) \
- (0x0000000000001800ull + (((offset) & 31) * 16))
-#define CVMX_NPEI_PKTX_SLIST_FIFO_RSIZE(offset) \
- (0x0000000000001C00ull + (((offset) & 31) * 16))
-#define CVMX_NPEI_PKT_CNT_INT \
- (0x0000000000001110ull)
-#define CVMX_NPEI_PKT_CNT_INT_ENB \
- (0x0000000000001130ull)
-#define CVMX_NPEI_PKT_DATA_OUT_ES \
- (0x00000000000010B0ull)
-#define CVMX_NPEI_PKT_DATA_OUT_NS \
- (0x00000000000010A0ull)
-#define CVMX_NPEI_PKT_DATA_OUT_ROR \
- (0x0000000000001090ull)
-#define CVMX_NPEI_PKT_DPADDR \
- (0x0000000000001080ull)
-#define CVMX_NPEI_PKT_INPUT_CONTROL \
- (0x0000000000001150ull)
-#define CVMX_NPEI_PKT_INSTR_ENB \
- (0x0000000000001000ull)
-#define CVMX_NPEI_PKT_INSTR_RD_SIZE \
- (0x0000000000001190ull)
-#define CVMX_NPEI_PKT_INSTR_SIZE \
- (0x0000000000001020ull)
-#define CVMX_NPEI_PKT_INT_LEVELS \
- (0x0000000000001100ull)
-#define CVMX_NPEI_PKT_IN_BP \
- (0x00000000000006B0ull)
-#define CVMX_NPEI_PKT_IN_DONEX_CNTS(offset) \
- (0x0000000000002000ull + (((offset) & 31) * 16))
-#define CVMX_NPEI_PKT_IN_INSTR_COUNTS \
- (0x00000000000006A0ull)
-#define CVMX_NPEI_PKT_IN_PCIE_PORT \
- (0x00000000000011A0ull)
-#define CVMX_NPEI_PKT_IPTR \
- (0x0000000000001070ull)
-#define CVMX_NPEI_PKT_OUTPUT_WMARK \
- (0x0000000000001160ull)
-#define CVMX_NPEI_PKT_OUT_BMODE \
- (0x00000000000010D0ull)
-#define CVMX_NPEI_PKT_OUT_ENB \
- (0x0000000000001010ull)
-#define CVMX_NPEI_PKT_PCIE_PORT \
- (0x00000000000010E0ull)
-#define CVMX_NPEI_PKT_PORT_IN_RST \
- (0x0000000000000690ull)
-#define CVMX_NPEI_PKT_SLIST_ES \
- (0x0000000000001050ull)
-#define CVMX_NPEI_PKT_SLIST_ID_SIZE \
- (0x0000000000001180ull)
-#define CVMX_NPEI_PKT_SLIST_NS \
- (0x0000000000001040ull)
-#define CVMX_NPEI_PKT_SLIST_ROR \
- (0x0000000000001030ull)
-#define CVMX_NPEI_PKT_TIME_INT \
- (0x0000000000001120ull)
-#define CVMX_NPEI_PKT_TIME_INT_ENB \
- (0x0000000000001140ull)
-#define CVMX_NPEI_RSL_INT_BLOCKS \
- (0x0000000000000520ull)
-#define CVMX_NPEI_SCRATCH_1 \
- (0x0000000000000270ull)
-#define CVMX_NPEI_STATE1 \
- (0x0000000000000620ull)
-#define CVMX_NPEI_STATE2 \
- (0x0000000000000630ull)
-#define CVMX_NPEI_STATE3 \
- (0x0000000000000640ull)
-#define CVMX_NPEI_WINDOW_CTL \
- (0x0000000000000380ull)
-#define CVMX_NPEI_WIN_RD_ADDR \
- (0x0000000000000210ull)
-#define CVMX_NPEI_WIN_RD_DATA \
- (0x0000000000000240ull)
-#define CVMX_NPEI_WIN_WR_ADDR \
- (0x0000000000000200ull)
-#define CVMX_NPEI_WIN_WR_DATA \
- (0x0000000000000220ull)
-#define CVMX_NPEI_WIN_WR_MASK \
- (0x0000000000000230ull)
+#define CVMX_NPEI_BAR1_INDEXX(offset) (0x0000000000000000ull + ((offset) & 31) * 16)
+#define CVMX_NPEI_BIST_STATUS (0x0000000000000580ull)
+#define CVMX_NPEI_BIST_STATUS2 (0x0000000000000680ull)
+#define CVMX_NPEI_CTL_PORT0 (0x0000000000000250ull)
+#define CVMX_NPEI_CTL_PORT1 (0x0000000000000260ull)
+#define CVMX_NPEI_CTL_STATUS (0x0000000000000570ull)
+#define CVMX_NPEI_CTL_STATUS2 (0x0000000000003C00ull)
+#define CVMX_NPEI_DATA_OUT_CNT (0x00000000000005F0ull)
+#define CVMX_NPEI_DBG_DATA (0x0000000000000510ull)
+#define CVMX_NPEI_DBG_SELECT (0x0000000000000500ull)
+#define CVMX_NPEI_DMA0_INT_LEVEL (0x00000000000005C0ull)
+#define CVMX_NPEI_DMA1_INT_LEVEL (0x00000000000005D0ull)
+#define CVMX_NPEI_DMAX_COUNTS(offset) (0x0000000000000450ull + ((offset) & 7) * 16)
+#define CVMX_NPEI_DMAX_DBELL(offset) (0x00000000000003B0ull + ((offset) & 7) * 16)
+#define CVMX_NPEI_DMAX_IBUFF_SADDR(offset) (0x0000000000000400ull + ((offset) & 7) * 16)
+#define CVMX_NPEI_DMAX_NADDR(offset) (0x00000000000004A0ull + ((offset) & 7) * 16)
+#define CVMX_NPEI_DMA_CNTS (0x00000000000005E0ull)
+#define CVMX_NPEI_DMA_CONTROL (0x00000000000003A0ull)
+#define CVMX_NPEI_DMA_PCIE_REQ_NUM (0x00000000000005B0ull)
+#define CVMX_NPEI_DMA_STATE1 (0x00000000000006C0ull)
+#define CVMX_NPEI_DMA_STATE1_P1 (0x0000000000000680ull)
+#define CVMX_NPEI_DMA_STATE2 (0x00000000000006D0ull)
+#define CVMX_NPEI_DMA_STATE2_P1 (0x0000000000000690ull)
+#define CVMX_NPEI_DMA_STATE3_P1 (0x00000000000006A0ull)
+#define CVMX_NPEI_DMA_STATE4_P1 (0x00000000000006B0ull)
+#define CVMX_NPEI_DMA_STATE5_P1 (0x00000000000006C0ull)
+#define CVMX_NPEI_INT_A_ENB (0x0000000000000560ull)
+#define CVMX_NPEI_INT_A_ENB2 (0x0000000000003CE0ull)
+#define CVMX_NPEI_INT_A_SUM (0x0000000000000550ull)
+#define CVMX_NPEI_INT_ENB (0x0000000000000540ull)
+#define CVMX_NPEI_INT_ENB2 (0x0000000000003CD0ull)
+#define CVMX_NPEI_INT_INFO (0x0000000000000590ull)
+#define CVMX_NPEI_INT_SUM (0x0000000000000530ull)
+#define CVMX_NPEI_INT_SUM2 (0x0000000000003CC0ull)
+#define CVMX_NPEI_LAST_WIN_RDATA0 (0x0000000000000600ull)
+#define CVMX_NPEI_LAST_WIN_RDATA1 (0x0000000000000610ull)
+#define CVMX_NPEI_MEM_ACCESS_CTL (0x00000000000004F0ull)
+#define CVMX_NPEI_MEM_ACCESS_SUBIDX(offset) (0x0000000000000340ull + ((offset) & 31) * 16 - 16*12)
+#define CVMX_NPEI_MSI_ENB0 (0x0000000000003C50ull)
+#define CVMX_NPEI_MSI_ENB1 (0x0000000000003C60ull)
+#define CVMX_NPEI_MSI_ENB2 (0x0000000000003C70ull)
+#define CVMX_NPEI_MSI_ENB3 (0x0000000000003C80ull)
+#define CVMX_NPEI_MSI_RCV0 (0x0000000000003C10ull)
+#define CVMX_NPEI_MSI_RCV1 (0x0000000000003C20ull)
+#define CVMX_NPEI_MSI_RCV2 (0x0000000000003C30ull)
+#define CVMX_NPEI_MSI_RCV3 (0x0000000000003C40ull)
+#define CVMX_NPEI_MSI_RD_MAP (0x0000000000003CA0ull)
+#define CVMX_NPEI_MSI_W1C_ENB0 (0x0000000000003CF0ull)
+#define CVMX_NPEI_MSI_W1C_ENB1 (0x0000000000003D00ull)
+#define CVMX_NPEI_MSI_W1C_ENB2 (0x0000000000003D10ull)
+#define CVMX_NPEI_MSI_W1C_ENB3 (0x0000000000003D20ull)
+#define CVMX_NPEI_MSI_W1S_ENB0 (0x0000000000003D30ull)
+#define CVMX_NPEI_MSI_W1S_ENB1 (0x0000000000003D40ull)
+#define CVMX_NPEI_MSI_W1S_ENB2 (0x0000000000003D50ull)
+#define CVMX_NPEI_MSI_W1S_ENB3 (0x0000000000003D60ull)
+#define CVMX_NPEI_MSI_WR_MAP (0x0000000000003C90ull)
+#define CVMX_NPEI_PCIE_CREDIT_CNT (0x0000000000003D70ull)
+#define CVMX_NPEI_PCIE_MSI_RCV (0x0000000000003CB0ull)
+#define CVMX_NPEI_PCIE_MSI_RCV_B1 (0x0000000000000650ull)
+#define CVMX_NPEI_PCIE_MSI_RCV_B2 (0x0000000000000660ull)
+#define CVMX_NPEI_PCIE_MSI_RCV_B3 (0x0000000000000670ull)
+#define CVMX_NPEI_PKTX_CNTS(offset) (0x0000000000002400ull + ((offset) & 31) * 16)
+#define CVMX_NPEI_PKTX_INSTR_BADDR(offset) (0x0000000000002800ull + ((offset) & 31) * 16)
+#define CVMX_NPEI_PKTX_INSTR_BAOFF_DBELL(offset) (0x0000000000002C00ull + ((offset) & 31) * 16)
+#define CVMX_NPEI_PKTX_INSTR_FIFO_RSIZE(offset) (0x0000000000003000ull + ((offset) & 31) * 16)
+#define CVMX_NPEI_PKTX_INSTR_HEADER(offset) (0x0000000000003400ull + ((offset) & 31) * 16)
+#define CVMX_NPEI_PKTX_IN_BP(offset) (0x0000000000003800ull + ((offset) & 31) * 16)
+#define CVMX_NPEI_PKTX_SLIST_BADDR(offset) (0x0000000000001400ull + ((offset) & 31) * 16)
+#define CVMX_NPEI_PKTX_SLIST_BAOFF_DBELL(offset) (0x0000000000001800ull + ((offset) & 31) * 16)
+#define CVMX_NPEI_PKTX_SLIST_FIFO_RSIZE(offset) (0x0000000000001C00ull + ((offset) & 31) * 16)
+#define CVMX_NPEI_PKT_CNT_INT (0x0000000000001110ull)
+#define CVMX_NPEI_PKT_CNT_INT_ENB (0x0000000000001130ull)
+#define CVMX_NPEI_PKT_DATA_OUT_ES (0x00000000000010B0ull)
+#define CVMX_NPEI_PKT_DATA_OUT_NS (0x00000000000010A0ull)
+#define CVMX_NPEI_PKT_DATA_OUT_ROR (0x0000000000001090ull)
+#define CVMX_NPEI_PKT_DPADDR (0x0000000000001080ull)
+#define CVMX_NPEI_PKT_INPUT_CONTROL (0x0000000000001150ull)
+#define CVMX_NPEI_PKT_INSTR_ENB (0x0000000000001000ull)
+#define CVMX_NPEI_PKT_INSTR_RD_SIZE (0x0000000000001190ull)
+#define CVMX_NPEI_PKT_INSTR_SIZE (0x0000000000001020ull)
+#define CVMX_NPEI_PKT_INT_LEVELS (0x0000000000001100ull)
+#define CVMX_NPEI_PKT_IN_BP (0x00000000000006B0ull)
+#define CVMX_NPEI_PKT_IN_DONEX_CNTS(offset) (0x0000000000002000ull + ((offset) & 31) * 16)
+#define CVMX_NPEI_PKT_IN_INSTR_COUNTS (0x00000000000006A0ull)
+#define CVMX_NPEI_PKT_IN_PCIE_PORT (0x00000000000011A0ull)
+#define CVMX_NPEI_PKT_IPTR (0x0000000000001070ull)
+#define CVMX_NPEI_PKT_OUTPUT_WMARK (0x0000000000001160ull)
+#define CVMX_NPEI_PKT_OUT_BMODE (0x00000000000010D0ull)
+#define CVMX_NPEI_PKT_OUT_ENB (0x0000000000001010ull)
+#define CVMX_NPEI_PKT_PCIE_PORT (0x00000000000010E0ull)
+#define CVMX_NPEI_PKT_PORT_IN_RST (0x0000000000000690ull)
+#define CVMX_NPEI_PKT_SLIST_ES (0x0000000000001050ull)
+#define CVMX_NPEI_PKT_SLIST_ID_SIZE (0x0000000000001180ull)
+#define CVMX_NPEI_PKT_SLIST_NS (0x0000000000001040ull)
+#define CVMX_NPEI_PKT_SLIST_ROR (0x0000000000001030ull)
+#define CVMX_NPEI_PKT_TIME_INT (0x0000000000001120ull)
+#define CVMX_NPEI_PKT_TIME_INT_ENB (0x0000000000001140ull)
+#define CVMX_NPEI_RSL_INT_BLOCKS (0x0000000000000520ull)
+#define CVMX_NPEI_SCRATCH_1 (0x0000000000000270ull)
+#define CVMX_NPEI_STATE1 (0x0000000000000620ull)
+#define CVMX_NPEI_STATE2 (0x0000000000000630ull)
+#define CVMX_NPEI_STATE3 (0x0000000000000640ull)
+#define CVMX_NPEI_WINDOW_CTL (0x0000000000000380ull)
+#define CVMX_NPEI_WIN_RD_ADDR (0x0000000000000210ull)
+#define CVMX_NPEI_WIN_RD_DATA (0x0000000000000240ull)
+#define CVMX_NPEI_WIN_WR_ADDR (0x0000000000000200ull)
+#define CVMX_NPEI_WIN_WR_DATA (0x0000000000000220ull)
+#define CVMX_NPEI_WIN_WR_MASK (0x0000000000000230ull)
union cvmx_npei_bar1_indexx {
uint32_t u32;
@@ -248,9 +156,7 @@ union cvmx_npei_bist_status {
uint64_t u64;
struct cvmx_npei_bist_status_s {
uint64_t pkt_rdf:1;
- uint64_t pkt_pmem:1;
- uint64_t pkt_p1:1;
- uint64_t reserved_60_60:1;
+ uint64_t reserved_60_62:3;
uint64_t pcr_gim:1;
uint64_t pkt_pif:1;
uint64_t pcsr_int:1;
@@ -301,9 +207,7 @@ union cvmx_npei_bist_status {
} s;
struct cvmx_npei_bist_status_cn52xx {
uint64_t pkt_rdf:1;
- uint64_t pkt_pmem:1;
- uint64_t pkt_p1:1;
- uint64_t reserved_60_60:1;
+ uint64_t reserved_60_62:3;
uint64_t pcr_gim:1;
uint64_t pkt_pif:1;
uint64_t pcsr_int:1;
@@ -410,66 +314,7 @@ union cvmx_npei_bist_status {
uint64_t msi:1;
uint64_t ncb_cmd:1;
} cn52xxp1;
- struct cvmx_npei_bist_status_cn56xx {
- uint64_t pkt_rdf:1;
- uint64_t reserved_60_62:3;
- uint64_t pcr_gim:1;
- uint64_t pkt_pif:1;
- uint64_t pcsr_int:1;
- uint64_t pcsr_im:1;
- uint64_t pcsr_cnt:1;
- uint64_t pcsr_id:1;
- uint64_t pcsr_sl:1;
- uint64_t pkt_imem:1;
- uint64_t pkt_pfm:1;
- uint64_t pkt_pof:1;
- uint64_t reserved_48_49:2;
- uint64_t pkt_pop0:1;
- uint64_t pkt_pop1:1;
- uint64_t d0_mem:1;
- uint64_t d1_mem:1;
- uint64_t d2_mem:1;
- uint64_t d3_mem:1;
- uint64_t d4_mem:1;
- uint64_t ds_mem:1;
- uint64_t reserved_36_39:4;
- uint64_t d0_pst:1;
- uint64_t d1_pst:1;
- uint64_t d2_pst:1;
- uint64_t d3_pst:1;
- uint64_t d4_pst:1;
- uint64_t n2p0_c:1;
- uint64_t n2p0_o:1;
- uint64_t n2p1_c:1;
- uint64_t n2p1_o:1;
- uint64_t cpl_p0:1;
- uint64_t cpl_p1:1;
- uint64_t p2n1_po:1;
- uint64_t p2n1_no:1;
- uint64_t p2n1_co:1;
- uint64_t p2n0_po:1;
- uint64_t p2n0_no:1;
- uint64_t p2n0_co:1;
- uint64_t p2n0_c0:1;
- uint64_t p2n0_c1:1;
- uint64_t p2n0_n:1;
- uint64_t p2n0_p0:1;
- uint64_t p2n0_p1:1;
- uint64_t p2n1_c0:1;
- uint64_t p2n1_c1:1;
- uint64_t p2n1_n:1;
- uint64_t p2n1_p0:1;
- uint64_t p2n1_p1:1;
- uint64_t csm0:1;
- uint64_t csm1:1;
- uint64_t dif0:1;
- uint64_t dif1:1;
- uint64_t dif2:1;
- uint64_t dif3:1;
- uint64_t dif4:1;
- uint64_t msi:1;
- uint64_t ncb_cmd:1;
- } cn56xx;
+ struct cvmx_npei_bist_status_cn52xx cn56xx;
struct cvmx_npei_bist_status_cn56xxp1 {
uint64_t reserved_58_63:6;
uint64_t pcsr_int:1;
@@ -536,7 +381,16 @@ union cvmx_npei_bist_status {
union cvmx_npei_bist_status2 {
uint64_t u64;
struct cvmx_npei_bist_status2_s {
- uint64_t reserved_5_63:59;
+ uint64_t reserved_14_63:50;
+ uint64_t prd_tag:1;
+ uint64_t prd_st0:1;
+ uint64_t prd_st1:1;
+ uint64_t prd_err:1;
+ uint64_t nrd_st:1;
+ uint64_t nwe_st:1;
+ uint64_t nwe_wr0:1;
+ uint64_t nwe_wr1:1;
+ uint64_t pkt_rd:1;
uint64_t psc_p0:1;
uint64_t psc_p1:1;
uint64_t pkt_gd:1;
@@ -630,8 +484,7 @@ union cvmx_npei_ctl_status {
} cn52xxp1;
struct cvmx_npei_ctl_status_s cn56xx;
struct cvmx_npei_ctl_status_cn56xxp1 {
- uint64_t reserved_16_63:48;
- uint64_t ring_en:1;
+ uint64_t reserved_15_63:49;
uint64_t lnk_rst:1;
uint64_t arb:1;
uint64_t pkt_bp:4;
@@ -756,14 +609,14 @@ union cvmx_npei_dmax_ibuff_saddr {
uint64_t saddr:29;
uint64_t reserved_0_6:7;
} s;
- struct cvmx_npei_dmax_ibuff_saddr_cn52xx {
+ struct cvmx_npei_dmax_ibuff_saddr_s cn52xx;
+ struct cvmx_npei_dmax_ibuff_saddr_cn52xxp1 {
uint64_t reserved_36_63:28;
uint64_t saddr:29;
uint64_t reserved_0_6:7;
- } cn52xx;
- struct cvmx_npei_dmax_ibuff_saddr_cn52xx cn52xxp1;
+ } cn52xxp1;
struct cvmx_npei_dmax_ibuff_saddr_s cn56xx;
- struct cvmx_npei_dmax_ibuff_saddr_cn52xx cn56xxp1;
+ struct cvmx_npei_dmax_ibuff_saddr_cn52xxp1 cn56xxp1;
};
union cvmx_npei_dmax_naddr {
@@ -817,7 +670,8 @@ union cvmx_npei_dma_cnts {
union cvmx_npei_dma_control {
uint64_t u64;
struct cvmx_npei_dma_control_s {
- uint64_t reserved_39_63:25;
+ uint64_t reserved_40_63:24;
+ uint64_t p_32b_m:1;
uint64_t dma4_enb:1;
uint64_t dma3_enb:1;
uint64_t dma2_enb:1;
@@ -853,7 +707,161 @@ union cvmx_npei_dma_control {
uint64_t csize:14;
} cn52xxp1;
struct cvmx_npei_dma_control_s cn56xx;
- struct cvmx_npei_dma_control_s cn56xxp1;
+ struct cvmx_npei_dma_control_cn56xxp1 {
+ uint64_t reserved_39_63:25;
+ uint64_t dma4_enb:1;
+ uint64_t dma3_enb:1;
+ uint64_t dma2_enb:1;
+ uint64_t dma1_enb:1;
+ uint64_t dma0_enb:1;
+ uint64_t b0_lend:1;
+ uint64_t dwb_denb:1;
+ uint64_t dwb_ichk:9;
+ uint64_t fpa_que:3;
+ uint64_t o_add1:1;
+ uint64_t o_ro:1;
+ uint64_t o_ns:1;
+ uint64_t o_es:2;
+ uint64_t o_mode:1;
+ uint64_t csize:14;
+ } cn56xxp1;
+};
+
+union cvmx_npei_dma_pcie_req_num {
+ uint64_t u64;
+ struct cvmx_npei_dma_pcie_req_num_s {
+ uint64_t dma_arb:1;
+ uint64_t reserved_53_62:10;
+ uint64_t pkt_cnt:5;
+ uint64_t reserved_45_47:3;
+ uint64_t dma4_cnt:5;
+ uint64_t reserved_37_39:3;
+ uint64_t dma3_cnt:5;
+ uint64_t reserved_29_31:3;
+ uint64_t dma2_cnt:5;
+ uint64_t reserved_21_23:3;
+ uint64_t dma1_cnt:5;
+ uint64_t reserved_13_15:3;
+ uint64_t dma0_cnt:5;
+ uint64_t reserved_5_7:3;
+ uint64_t dma_cnt:5;
+ } s;
+ struct cvmx_npei_dma_pcie_req_num_s cn52xx;
+ struct cvmx_npei_dma_pcie_req_num_s cn56xx;
+};
+
+union cvmx_npei_dma_state1 {
+ uint64_t u64;
+ struct cvmx_npei_dma_state1_s {
+ uint64_t reserved_40_63:24;
+ uint64_t d4_dwe:8;
+ uint64_t d3_dwe:8;
+ uint64_t d2_dwe:8;
+ uint64_t d1_dwe:8;
+ uint64_t d0_dwe:8;
+ } s;
+ struct cvmx_npei_dma_state1_s cn52xx;
+};
+
+union cvmx_npei_dma_state1_p1 {
+ uint64_t u64;
+ struct cvmx_npei_dma_state1_p1_s {
+ uint64_t reserved_60_63:4;
+ uint64_t d0_difst:7;
+ uint64_t d1_difst:7;
+ uint64_t d2_difst:7;
+ uint64_t d3_difst:7;
+ uint64_t d4_difst:7;
+ uint64_t d0_reqst:5;
+ uint64_t d1_reqst:5;
+ uint64_t d2_reqst:5;
+ uint64_t d3_reqst:5;
+ uint64_t d4_reqst:5;
+ } s;
+ struct cvmx_npei_dma_state1_p1_cn52xxp1 {
+ uint64_t reserved_60_63:4;
+ uint64_t d0_difst:7;
+ uint64_t d1_difst:7;
+ uint64_t d2_difst:7;
+ uint64_t d3_difst:7;
+ uint64_t reserved_25_31:7;
+ uint64_t d0_reqst:5;
+ uint64_t d1_reqst:5;
+ uint64_t d2_reqst:5;
+ uint64_t d3_reqst:5;
+ uint64_t reserved_0_4:5;
+ } cn52xxp1;
+ struct cvmx_npei_dma_state1_p1_s cn56xxp1;
+};
+
+union cvmx_npei_dma_state2 {
+ uint64_t u64;
+ struct cvmx_npei_dma_state2_s {
+ uint64_t reserved_28_63:36;
+ uint64_t ndwe:4;
+ uint64_t reserved_21_23:3;
+ uint64_t ndre:5;
+ uint64_t reserved_10_15:6;
+ uint64_t prd:10;
+ } s;
+ struct cvmx_npei_dma_state2_s cn52xx;
+};
+
+union cvmx_npei_dma_state2_p1 {
+ uint64_t u64;
+ struct cvmx_npei_dma_state2_p1_s {
+ uint64_t reserved_45_63:19;
+ uint64_t d0_dffst:9;
+ uint64_t d1_dffst:9;
+ uint64_t d2_dffst:9;
+ uint64_t d3_dffst:9;
+ uint64_t d4_dffst:9;
+ } s;
+ struct cvmx_npei_dma_state2_p1_cn52xxp1 {
+ uint64_t reserved_45_63:19;
+ uint64_t d0_dffst:9;
+ uint64_t d1_dffst:9;
+ uint64_t d2_dffst:9;
+ uint64_t d3_dffst:9;
+ uint64_t reserved_0_8:9;
+ } cn52xxp1;
+ struct cvmx_npei_dma_state2_p1_s cn56xxp1;
+};
+
+union cvmx_npei_dma_state3_p1 {
+ uint64_t u64;
+ struct cvmx_npei_dma_state3_p1_s {
+ uint64_t reserved_60_63:4;
+ uint64_t d0_drest:15;
+ uint64_t d1_drest:15;
+ uint64_t d2_drest:15;
+ uint64_t d3_drest:15;
+ } s;
+ struct cvmx_npei_dma_state3_p1_s cn52xxp1;
+ struct cvmx_npei_dma_state3_p1_s cn56xxp1;
+};
+
+union cvmx_npei_dma_state4_p1 {
+ uint64_t u64;
+ struct cvmx_npei_dma_state4_p1_s {
+ uint64_t reserved_52_63:12;
+ uint64_t d0_dwest:13;
+ uint64_t d1_dwest:13;
+ uint64_t d2_dwest:13;
+ uint64_t d3_dwest:13;
+ } s;
+ struct cvmx_npei_dma_state4_p1_s cn52xxp1;
+ struct cvmx_npei_dma_state4_p1_s cn56xxp1;
+};
+
+union cvmx_npei_dma_state5_p1 {
+ uint64_t u64;
+ struct cvmx_npei_dma_state5_p1_s {
+ uint64_t reserved_28_63:36;
+ uint64_t d4_drest:15;
+ uint64_t d4_dwest:13;
+ } s;
+ struct cvmx_npei_dma_state5_p1_s cn56xxp1;
};
union cvmx_npei_int_a_enb {
@@ -871,17 +879,7 @@ union cvmx_npei_int_a_enb {
uint64_t dma1_cpl:1;
uint64_t dma0_cpl:1;
} s;
- struct cvmx_npei_int_a_enb_cn52xx {
- uint64_t reserved_8_63:56;
- uint64_t p1_rdlk:1;
- uint64_t p0_rdlk:1;
- uint64_t pgl_err:1;
- uint64_t pdi_err:1;
- uint64_t pop_err:1;
- uint64_t pins_err:1;
- uint64_t dma1_cpl:1;
- uint64_t dma0_cpl:1;
- } cn52xx;
+ struct cvmx_npei_int_a_enb_s cn52xx;
struct cvmx_npei_int_a_enb_cn52xxp1 {
uint64_t reserved_2_63:62;
uint64_t dma1_cpl:1;
@@ -905,16 +903,7 @@ union cvmx_npei_int_a_enb2 {
uint64_t dma1_cpl:1;
uint64_t dma0_cpl:1;
} s;
- struct cvmx_npei_int_a_enb2_cn52xx {
- uint64_t reserved_8_63:56;
- uint64_t p1_rdlk:1;
- uint64_t p0_rdlk:1;
- uint64_t pgl_err:1;
- uint64_t pdi_err:1;
- uint64_t pop_err:1;
- uint64_t pins_err:1;
- uint64_t reserved_0_1:2;
- } cn52xx;
+ struct cvmx_npei_int_a_enb2_s cn52xx;
struct cvmx_npei_int_a_enb2_cn52xxp1 {
uint64_t reserved_2_63:62;
uint64_t dma1_cpl:1;
@@ -938,17 +927,7 @@ union cvmx_npei_int_a_sum {
uint64_t dma1_cpl:1;
uint64_t dma0_cpl:1;
} s;
- struct cvmx_npei_int_a_sum_cn52xx {
- uint64_t reserved_8_63:56;
- uint64_t p1_rdlk:1;
- uint64_t p0_rdlk:1;
- uint64_t pgl_err:1;
- uint64_t pdi_err:1;
- uint64_t pop_err:1;
- uint64_t pins_err:1;
- uint64_t dma1_cpl:1;
- uint64_t dma0_cpl:1;
- } cn52xx;
+ struct cvmx_npei_int_a_sum_s cn52xx;
struct cvmx_npei_int_a_sum_cn52xxp1 {
uint64_t reserved_2_63:62;
uint64_t dma1_cpl:1;
@@ -1550,10 +1529,7 @@ union cvmx_npei_int_sum {
uint64_t c0_se:1;
uint64_t reserved_20_20:1;
uint64_t c0_aeri:1;
- uint64_t ptime:1;
- uint64_t pcnt:1;
- uint64_t pidbof:1;
- uint64_t psldbof:1;
+ uint64_t reserved_15_18:4;
uint64_t dtime1:1;
uint64_t dtime0:1;
uint64_t dcnt1:1;
@@ -1959,7 +1935,6 @@ union cvmx_npei_pktx_cnts {
} s;
struct cvmx_npei_pktx_cnts_s cn52xx;
struct cvmx_npei_pktx_cnts_s cn56xx;
- struct cvmx_npei_pktx_cnts_s cn56xxp1;
};
union cvmx_npei_pktx_in_bp {
@@ -1970,7 +1945,6 @@ union cvmx_npei_pktx_in_bp {
} s;
struct cvmx_npei_pktx_in_bp_s cn52xx;
struct cvmx_npei_pktx_in_bp_s cn56xx;
- struct cvmx_npei_pktx_in_bp_s cn56xxp1;
};
union cvmx_npei_pktx_instr_baddr {
@@ -1981,7 +1955,6 @@ union cvmx_npei_pktx_instr_baddr {
} s;
struct cvmx_npei_pktx_instr_baddr_s cn52xx;
struct cvmx_npei_pktx_instr_baddr_s cn56xx;
- struct cvmx_npei_pktx_instr_baddr_s cn56xxp1;
};
union cvmx_npei_pktx_instr_baoff_dbell {
@@ -1992,7 +1965,6 @@ union cvmx_npei_pktx_instr_baoff_dbell {
} s;
struct cvmx_npei_pktx_instr_baoff_dbell_s cn52xx;
struct cvmx_npei_pktx_instr_baoff_dbell_s cn56xx;
- struct cvmx_npei_pktx_instr_baoff_dbell_s cn56xxp1;
};
union cvmx_npei_pktx_instr_fifo_rsize {
@@ -2006,7 +1978,6 @@ union cvmx_npei_pktx_instr_fifo_rsize {
} s;
struct cvmx_npei_pktx_instr_fifo_rsize_s cn52xx;
struct cvmx_npei_pktx_instr_fifo_rsize_s cn56xx;
- struct cvmx_npei_pktx_instr_fifo_rsize_s cn56xxp1;
};
union cvmx_npei_pktx_instr_header {
@@ -2014,21 +1985,20 @@ union cvmx_npei_pktx_instr_header {
struct cvmx_npei_pktx_instr_header_s {
uint64_t reserved_44_63:20;
uint64_t pbp:1;
- uint64_t rsv_f:5;
+ uint64_t reserved_38_42:5;
uint64_t rparmode:2;
- uint64_t rsv_e:1;
+ uint64_t reserved_35_35:1;
uint64_t rskp_len:7;
- uint64_t rsv_d:6;
+ uint64_t reserved_22_27:6;
uint64_t use_ihdr:1;
- uint64_t rsv_c:5;
+ uint64_t reserved_16_20:5;
uint64_t par_mode:2;
- uint64_t rsv_b:1;
+ uint64_t reserved_13_13:1;
uint64_t skp_len:7;
- uint64_t rsv_a:6;
+ uint64_t reserved_0_5:6;
} s;
struct cvmx_npei_pktx_instr_header_s cn52xx;
struct cvmx_npei_pktx_instr_header_s cn56xx;
- struct cvmx_npei_pktx_instr_header_s cn56xxp1;
};
union cvmx_npei_pktx_slist_baddr {
@@ -2039,7 +2009,6 @@ union cvmx_npei_pktx_slist_baddr {
} s;
struct cvmx_npei_pktx_slist_baddr_s cn52xx;
struct cvmx_npei_pktx_slist_baddr_s cn56xx;
- struct cvmx_npei_pktx_slist_baddr_s cn56xxp1;
};
union cvmx_npei_pktx_slist_baoff_dbell {
@@ -2050,7 +2019,6 @@ union cvmx_npei_pktx_slist_baoff_dbell {
} s;
struct cvmx_npei_pktx_slist_baoff_dbell_s cn52xx;
struct cvmx_npei_pktx_slist_baoff_dbell_s cn56xx;
- struct cvmx_npei_pktx_slist_baoff_dbell_s cn56xxp1;
};
union cvmx_npei_pktx_slist_fifo_rsize {
@@ -2061,7 +2029,6 @@ union cvmx_npei_pktx_slist_fifo_rsize {
} s;
struct cvmx_npei_pktx_slist_fifo_rsize_s cn52xx;
struct cvmx_npei_pktx_slist_fifo_rsize_s cn56xx;
- struct cvmx_npei_pktx_slist_fifo_rsize_s cn56xxp1;
};
union cvmx_npei_pkt_cnt_int {
@@ -2072,7 +2039,6 @@ union cvmx_npei_pkt_cnt_int {
} s;
struct cvmx_npei_pkt_cnt_int_s cn52xx;
struct cvmx_npei_pkt_cnt_int_s cn56xx;
- struct cvmx_npei_pkt_cnt_int_s cn56xxp1;
};
union cvmx_npei_pkt_cnt_int_enb {
@@ -2083,7 +2049,6 @@ union cvmx_npei_pkt_cnt_int_enb {
} s;
struct cvmx_npei_pkt_cnt_int_enb_s cn52xx;
struct cvmx_npei_pkt_cnt_int_enb_s cn56xx;
- struct cvmx_npei_pkt_cnt_int_enb_s cn56xxp1;
};
union cvmx_npei_pkt_data_out_es {
@@ -2093,7 +2058,6 @@ union cvmx_npei_pkt_data_out_es {
} s;
struct cvmx_npei_pkt_data_out_es_s cn52xx;
struct cvmx_npei_pkt_data_out_es_s cn56xx;
- struct cvmx_npei_pkt_data_out_es_s cn56xxp1;
};
union cvmx_npei_pkt_data_out_ns {
@@ -2104,7 +2068,6 @@ union cvmx_npei_pkt_data_out_ns {
} s;
struct cvmx_npei_pkt_data_out_ns_s cn52xx;
struct cvmx_npei_pkt_data_out_ns_s cn56xx;
- struct cvmx_npei_pkt_data_out_ns_s cn56xxp1;
};
union cvmx_npei_pkt_data_out_ror {
@@ -2115,7 +2078,6 @@ union cvmx_npei_pkt_data_out_ror {
} s;
struct cvmx_npei_pkt_data_out_ror_s cn52xx;
struct cvmx_npei_pkt_data_out_ror_s cn56xx;
- struct cvmx_npei_pkt_data_out_ror_s cn56xxp1;
};
union cvmx_npei_pkt_dpaddr {
@@ -2126,7 +2088,6 @@ union cvmx_npei_pkt_dpaddr {
} s;
struct cvmx_npei_pkt_dpaddr_s cn52xx;
struct cvmx_npei_pkt_dpaddr_s cn56xx;
- struct cvmx_npei_pkt_dpaddr_s cn56xxp1;
};
union cvmx_npei_pkt_in_bp {
@@ -2135,6 +2096,7 @@ union cvmx_npei_pkt_in_bp {
uint64_t reserved_32_63:32;
uint64_t bp:32;
} s;
+ struct cvmx_npei_pkt_in_bp_s cn52xx;
struct cvmx_npei_pkt_in_bp_s cn56xx;
};
@@ -2146,7 +2108,6 @@ union cvmx_npei_pkt_in_donex_cnts {
} s;
struct cvmx_npei_pkt_in_donex_cnts_s cn52xx;
struct cvmx_npei_pkt_in_donex_cnts_s cn56xx;
- struct cvmx_npei_pkt_in_donex_cnts_s cn56xxp1;
};
union cvmx_npei_pkt_in_instr_counts {
@@ -2184,7 +2145,6 @@ union cvmx_npei_pkt_input_control {
} s;
struct cvmx_npei_pkt_input_control_s cn52xx;
struct cvmx_npei_pkt_input_control_s cn56xx;
- struct cvmx_npei_pkt_input_control_s cn56xxp1;
};
union cvmx_npei_pkt_instr_enb {
@@ -2195,7 +2155,6 @@ union cvmx_npei_pkt_instr_enb {
} s;
struct cvmx_npei_pkt_instr_enb_s cn52xx;
struct cvmx_npei_pkt_instr_enb_s cn56xx;
- struct cvmx_npei_pkt_instr_enb_s cn56xxp1;
};
union cvmx_npei_pkt_instr_rd_size {
@@ -2215,7 +2174,6 @@ union cvmx_npei_pkt_instr_size {
} s;
struct cvmx_npei_pkt_instr_size_s cn52xx;
struct cvmx_npei_pkt_instr_size_s cn56xx;
- struct cvmx_npei_pkt_instr_size_s cn56xxp1;
};
union cvmx_npei_pkt_int_levels {
@@ -2227,7 +2185,6 @@ union cvmx_npei_pkt_int_levels {
} s;
struct cvmx_npei_pkt_int_levels_s cn52xx;
struct cvmx_npei_pkt_int_levels_s cn56xx;
- struct cvmx_npei_pkt_int_levels_s cn56xxp1;
};
union cvmx_npei_pkt_iptr {
@@ -2238,7 +2195,6 @@ union cvmx_npei_pkt_iptr {
} s;
struct cvmx_npei_pkt_iptr_s cn52xx;
struct cvmx_npei_pkt_iptr_s cn56xx;
- struct cvmx_npei_pkt_iptr_s cn56xxp1;
};
union cvmx_npei_pkt_out_bmode {
@@ -2249,7 +2205,6 @@ union cvmx_npei_pkt_out_bmode {
} s;
struct cvmx_npei_pkt_out_bmode_s cn52xx;
struct cvmx_npei_pkt_out_bmode_s cn56xx;
- struct cvmx_npei_pkt_out_bmode_s cn56xxp1;
};
union cvmx_npei_pkt_out_enb {
@@ -2260,7 +2215,6 @@ union cvmx_npei_pkt_out_enb {
} s;
struct cvmx_npei_pkt_out_enb_s cn52xx;
struct cvmx_npei_pkt_out_enb_s cn56xx;
- struct cvmx_npei_pkt_out_enb_s cn56xxp1;
};
union cvmx_npei_pkt_output_wmark {
@@ -2280,7 +2234,6 @@ union cvmx_npei_pkt_pcie_port {
} s;
struct cvmx_npei_pkt_pcie_port_s cn52xx;
struct cvmx_npei_pkt_pcie_port_s cn56xx;
- struct cvmx_npei_pkt_pcie_port_s cn56xxp1;
};
union cvmx_npei_pkt_port_in_rst {
@@ -2300,7 +2253,6 @@ union cvmx_npei_pkt_slist_es {
} s;
struct cvmx_npei_pkt_slist_es_s cn52xx;
struct cvmx_npei_pkt_slist_es_s cn56xx;
- struct cvmx_npei_pkt_slist_es_s cn56xxp1;
};
union cvmx_npei_pkt_slist_id_size {
@@ -2312,7 +2264,6 @@ union cvmx_npei_pkt_slist_id_size {
} s;
struct cvmx_npei_pkt_slist_id_size_s cn52xx;
struct cvmx_npei_pkt_slist_id_size_s cn56xx;
- struct cvmx_npei_pkt_slist_id_size_s cn56xxp1;
};
union cvmx_npei_pkt_slist_ns {
@@ -2323,7 +2274,6 @@ union cvmx_npei_pkt_slist_ns {
} s;
struct cvmx_npei_pkt_slist_ns_s cn52xx;
struct cvmx_npei_pkt_slist_ns_s cn56xx;
- struct cvmx_npei_pkt_slist_ns_s cn56xxp1;
};
union cvmx_npei_pkt_slist_ror {
@@ -2334,7 +2284,6 @@ union cvmx_npei_pkt_slist_ror {
} s;
struct cvmx_npei_pkt_slist_ror_s cn52xx;
struct cvmx_npei_pkt_slist_ror_s cn56xx;
- struct cvmx_npei_pkt_slist_ror_s cn56xxp1;
};
union cvmx_npei_pkt_time_int {
@@ -2345,7 +2294,6 @@ union cvmx_npei_pkt_time_int {
} s;
struct cvmx_npei_pkt_time_int_s cn52xx;
struct cvmx_npei_pkt_time_int_s cn56xx;
- struct cvmx_npei_pkt_time_int_s cn56xxp1;
};
union cvmx_npei_pkt_time_int_enb {
@@ -2356,7 +2304,6 @@ union cvmx_npei_pkt_time_int_enb {
} s;
struct cvmx_npei_pkt_time_int_enb_s cn52xx;
struct cvmx_npei_pkt_time_int_enb_s cn56xx;
- struct cvmx_npei_pkt_time_int_enb_s cn56xxp1;
};
union cvmx_npei_rsl_int_blocks {
@@ -2371,7 +2318,8 @@ union cvmx_npei_rsl_int_blocks {
uint64_t asxpcs0:1;
uint64_t reserved_21_21:1;
uint64_t pip:1;
- uint64_t reserved_18_19:2;
+ uint64_t spx1:1;
+ uint64_t spx0:1;
uint64_t lmc0:1;
uint64_t l2c:1;
uint64_t usb1:1;
@@ -2383,7 +2331,7 @@ union cvmx_npei_rsl_int_blocks {
uint64_t ipd:1;
uint64_t reserved_8_8:1;
uint64_t zip:1;
- uint64_t reserved_6_6:1;
+ uint64_t dfa:1;
uint64_t fpa:1;
uint64_t key:1;
uint64_t npei:1;
@@ -2393,37 +2341,8 @@ union cvmx_npei_rsl_int_blocks {
} s;
struct cvmx_npei_rsl_int_blocks_s cn52xx;
struct cvmx_npei_rsl_int_blocks_s cn52xxp1;
- struct cvmx_npei_rsl_int_blocks_cn56xx {
- uint64_t reserved_31_63:33;
- uint64_t iob:1;
- uint64_t lmc1:1;
- uint64_t agl:1;
- uint64_t reserved_24_27:4;
- uint64_t asxpcs1:1;
- uint64_t asxpcs0:1;
- uint64_t reserved_21_21:1;
- uint64_t pip:1;
- uint64_t reserved_18_19:2;
- uint64_t lmc0:1;
- uint64_t l2c:1;
- uint64_t reserved_15_15:1;
- uint64_t rad:1;
- uint64_t usb:1;
- uint64_t pow:1;
- uint64_t tim:1;
- uint64_t pko:1;
- uint64_t ipd:1;
- uint64_t reserved_8_8:1;
- uint64_t zip:1;
- uint64_t reserved_6_6:1;
- uint64_t fpa:1;
- uint64_t key:1;
- uint64_t npei:1;
- uint64_t gmx1:1;
- uint64_t gmx0:1;
- uint64_t mio:1;
- } cn56xx;
- struct cvmx_npei_rsl_int_blocks_cn56xx cn56xxp1;
+ struct cvmx_npei_rsl_int_blocks_s cn56xx;
+ struct cvmx_npei_rsl_int_blocks_s cn56xxp1;
};
union cvmx_npei_scratch_1 {
diff --git a/arch/mips/include/asm/octeon/cvmx-npi-defs.h b/arch/mips/include/asm/octeon/cvmx-npi-defs.h
index 4e03cd8561e3..f089c780060f 100644
--- a/arch/mips/include/asm/octeon/cvmx-npi-defs.h
+++ b/arch/mips/include/asm/octeon/cvmx-npi-defs.h
@@ -4,7 +4,7 @@
* Contact: support@caviumnetworks.com
* This file is part of the OCTEON SDK
*
- * Copyright (c) 2003-2008 Cavium Networks
+ * Copyright (c) 2003-2010 Cavium Networks
*
* This file is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License, Version 2, as
@@ -28,246 +28,126 @@
#ifndef __CVMX_NPI_DEFS_H__
#define __CVMX_NPI_DEFS_H__
-#define CVMX_NPI_BASE_ADDR_INPUT0 \
- CVMX_ADD_IO_SEG(0x00011F0000000070ull)
-#define CVMX_NPI_BASE_ADDR_INPUT1 \
- CVMX_ADD_IO_SEG(0x00011F0000000080ull)
-#define CVMX_NPI_BASE_ADDR_INPUT2 \
- CVMX_ADD_IO_SEG(0x00011F0000000090ull)
-#define CVMX_NPI_BASE_ADDR_INPUT3 \
- CVMX_ADD_IO_SEG(0x00011F00000000A0ull)
-#define CVMX_NPI_BASE_ADDR_INPUTX(offset) \
- CVMX_ADD_IO_SEG(0x00011F0000000070ull + (((offset) & 3) * 16))
-#define CVMX_NPI_BASE_ADDR_OUTPUT0 \
- CVMX_ADD_IO_SEG(0x00011F00000000B8ull)
-#define CVMX_NPI_BASE_ADDR_OUTPUT1 \
- CVMX_ADD_IO_SEG(0x00011F00000000C0ull)
-#define CVMX_NPI_BASE_ADDR_OUTPUT2 \
- CVMX_ADD_IO_SEG(0x00011F00000000C8ull)
-#define CVMX_NPI_BASE_ADDR_OUTPUT3 \
- CVMX_ADD_IO_SEG(0x00011F00000000D0ull)
-#define CVMX_NPI_BASE_ADDR_OUTPUTX(offset) \
- CVMX_ADD_IO_SEG(0x00011F00000000B8ull + (((offset) & 3) * 8))
-#define CVMX_NPI_BIST_STATUS \
- CVMX_ADD_IO_SEG(0x00011F00000003F8ull)
-#define CVMX_NPI_BUFF_SIZE_OUTPUT0 \
- CVMX_ADD_IO_SEG(0x00011F00000000E0ull)
-#define CVMX_NPI_BUFF_SIZE_OUTPUT1 \
- CVMX_ADD_IO_SEG(0x00011F00000000E8ull)
-#define CVMX_NPI_BUFF_SIZE_OUTPUT2 \
- CVMX_ADD_IO_SEG(0x00011F00000000F0ull)
-#define CVMX_NPI_BUFF_SIZE_OUTPUT3 \
- CVMX_ADD_IO_SEG(0x00011F00000000F8ull)
-#define CVMX_NPI_BUFF_SIZE_OUTPUTX(offset) \
- CVMX_ADD_IO_SEG(0x00011F00000000E0ull + (((offset) & 3) * 8))
-#define CVMX_NPI_COMP_CTL \
- CVMX_ADD_IO_SEG(0x00011F0000000218ull)
-#define CVMX_NPI_CTL_STATUS \
- CVMX_ADD_IO_SEG(0x00011F0000000010ull)
-#define CVMX_NPI_DBG_SELECT \
- CVMX_ADD_IO_SEG(0x00011F0000000008ull)
-#define CVMX_NPI_DMA_CONTROL \
- CVMX_ADD_IO_SEG(0x00011F0000000128ull)
-#define CVMX_NPI_DMA_HIGHP_COUNTS \
- CVMX_ADD_IO_SEG(0x00011F0000000148ull)
-#define CVMX_NPI_DMA_HIGHP_NADDR \
- CVMX_ADD_IO_SEG(0x00011F0000000158ull)
-#define CVMX_NPI_DMA_LOWP_COUNTS \
- CVMX_ADD_IO_SEG(0x00011F0000000140ull)
-#define CVMX_NPI_DMA_LOWP_NADDR \
- CVMX_ADD_IO_SEG(0x00011F0000000150ull)
-#define CVMX_NPI_HIGHP_DBELL \
- CVMX_ADD_IO_SEG(0x00011F0000000120ull)
-#define CVMX_NPI_HIGHP_IBUFF_SADDR \
- CVMX_ADD_IO_SEG(0x00011F0000000110ull)
-#define CVMX_NPI_INPUT_CONTROL \
- CVMX_ADD_IO_SEG(0x00011F0000000138ull)
-#define CVMX_NPI_INT_ENB \
- CVMX_ADD_IO_SEG(0x00011F0000000020ull)
-#define CVMX_NPI_INT_SUM \
- CVMX_ADD_IO_SEG(0x00011F0000000018ull)
-#define CVMX_NPI_LOWP_DBELL \
- CVMX_ADD_IO_SEG(0x00011F0000000118ull)
-#define CVMX_NPI_LOWP_IBUFF_SADDR \
- CVMX_ADD_IO_SEG(0x00011F0000000108ull)
-#define CVMX_NPI_MEM_ACCESS_SUBID3 \
- CVMX_ADD_IO_SEG(0x00011F0000000028ull)
-#define CVMX_NPI_MEM_ACCESS_SUBID4 \
- CVMX_ADD_IO_SEG(0x00011F0000000030ull)
-#define CVMX_NPI_MEM_ACCESS_SUBID5 \
- CVMX_ADD_IO_SEG(0x00011F0000000038ull)
-#define CVMX_NPI_MEM_ACCESS_SUBID6 \
- CVMX_ADD_IO_SEG(0x00011F0000000040ull)
-#define CVMX_NPI_MEM_ACCESS_SUBIDX(offset) \
- CVMX_ADD_IO_SEG(0x00011F0000000028ull + (((offset) & 7) * 8) - 8 * 3)
-#define CVMX_NPI_MSI_RCV \
- (0x0000000000000190ull)
-#define CVMX_NPI_NPI_MSI_RCV \
- CVMX_ADD_IO_SEG(0x00011F0000001190ull)
-#define CVMX_NPI_NUM_DESC_OUTPUT0 \
- CVMX_ADD_IO_SEG(0x00011F0000000050ull)
-#define CVMX_NPI_NUM_DESC_OUTPUT1 \
- CVMX_ADD_IO_SEG(0x00011F0000000058ull)
-#define CVMX_NPI_NUM_DESC_OUTPUT2 \
- CVMX_ADD_IO_SEG(0x00011F0000000060ull)
-#define CVMX_NPI_NUM_DESC_OUTPUT3 \
- CVMX_ADD_IO_SEG(0x00011F0000000068ull)
-#define CVMX_NPI_NUM_DESC_OUTPUTX(offset) \
- CVMX_ADD_IO_SEG(0x00011F0000000050ull + (((offset) & 3) * 8))
-#define CVMX_NPI_OUTPUT_CONTROL \
- CVMX_ADD_IO_SEG(0x00011F0000000100ull)
-#define CVMX_NPI_P0_DBPAIR_ADDR \
- CVMX_ADD_IO_SEG(0x00011F0000000180ull)
-#define CVMX_NPI_P0_INSTR_ADDR \
- CVMX_ADD_IO_SEG(0x00011F00000001C0ull)
-#define CVMX_NPI_P0_INSTR_CNTS \
- CVMX_ADD_IO_SEG(0x00011F00000001A0ull)
-#define CVMX_NPI_P0_PAIR_CNTS \
- CVMX_ADD_IO_SEG(0x00011F0000000160ull)
-#define CVMX_NPI_P1_DBPAIR_ADDR \
- CVMX_ADD_IO_SEG(0x00011F0000000188ull)
-#define CVMX_NPI_P1_INSTR_ADDR \
- CVMX_ADD_IO_SEG(0x00011F00000001C8ull)
-#define CVMX_NPI_P1_INSTR_CNTS \
- CVMX_ADD_IO_SEG(0x00011F00000001A8ull)
-#define CVMX_NPI_P1_PAIR_CNTS \
- CVMX_ADD_IO_SEG(0x00011F0000000168ull)
-#define CVMX_NPI_P2_DBPAIR_ADDR \
- CVMX_ADD_IO_SEG(0x00011F0000000190ull)
-#define CVMX_NPI_P2_INSTR_ADDR \
- CVMX_ADD_IO_SEG(0x00011F00000001D0ull)
-#define CVMX_NPI_P2_INSTR_CNTS \
- CVMX_ADD_IO_SEG(0x00011F00000001B0ull)
-#define CVMX_NPI_P2_PAIR_CNTS \
- CVMX_ADD_IO_SEG(0x00011F0000000170ull)
-#define CVMX_NPI_P3_DBPAIR_ADDR \
- CVMX_ADD_IO_SEG(0x00011F0000000198ull)
-#define CVMX_NPI_P3_INSTR_ADDR \
- CVMX_ADD_IO_SEG(0x00011F00000001D8ull)
-#define CVMX_NPI_P3_INSTR_CNTS \
- CVMX_ADD_IO_SEG(0x00011F00000001B8ull)
-#define CVMX_NPI_P3_PAIR_CNTS \
- CVMX_ADD_IO_SEG(0x00011F0000000178ull)
-#define CVMX_NPI_PCI_BAR1_INDEXX(offset) \
- CVMX_ADD_IO_SEG(0x00011F0000001100ull + (((offset) & 31) * 4))
-#define CVMX_NPI_PCI_BIST_REG \
- CVMX_ADD_IO_SEG(0x00011F00000011C0ull)
-#define CVMX_NPI_PCI_BURST_SIZE \
- CVMX_ADD_IO_SEG(0x00011F00000000D8ull)
-#define CVMX_NPI_PCI_CFG00 \
- CVMX_ADD_IO_SEG(0x00011F0000001800ull)
-#define CVMX_NPI_PCI_CFG01 \
- CVMX_ADD_IO_SEG(0x00011F0000001804ull)
-#define CVMX_NPI_PCI_CFG02 \
- CVMX_ADD_IO_SEG(0x00011F0000001808ull)
-#define CVMX_NPI_PCI_CFG03 \
- CVMX_ADD_IO_SEG(0x00011F000000180Cull)
-#define CVMX_NPI_PCI_CFG04 \
- CVMX_ADD_IO_SEG(0x00011F0000001810ull)
-#define CVMX_NPI_PCI_CFG05 \
- CVMX_ADD_IO_SEG(0x00011F0000001814ull)
-#define CVMX_NPI_PCI_CFG06 \
- CVMX_ADD_IO_SEG(0x00011F0000001818ull)
-#define CVMX_NPI_PCI_CFG07 \
- CVMX_ADD_IO_SEG(0x00011F000000181Cull)
-#define CVMX_NPI_PCI_CFG08 \
- CVMX_ADD_IO_SEG(0x00011F0000001820ull)
-#define CVMX_NPI_PCI_CFG09 \
- CVMX_ADD_IO_SEG(0x00011F0000001824ull)
-#define CVMX_NPI_PCI_CFG10 \
- CVMX_ADD_IO_SEG(0x00011F0000001828ull)
-#define CVMX_NPI_PCI_CFG11 \
- CVMX_ADD_IO_SEG(0x00011F000000182Cull)
-#define CVMX_NPI_PCI_CFG12 \
- CVMX_ADD_IO_SEG(0x00011F0000001830ull)
-#define CVMX_NPI_PCI_CFG13 \
- CVMX_ADD_IO_SEG(0x00011F0000001834ull)
-#define CVMX_NPI_PCI_CFG15 \
- CVMX_ADD_IO_SEG(0x00011F000000183Cull)
-#define CVMX_NPI_PCI_CFG16 \
- CVMX_ADD_IO_SEG(0x00011F0000001840ull)
-#define CVMX_NPI_PCI_CFG17 \
- CVMX_ADD_IO_SEG(0x00011F0000001844ull)
-#define CVMX_NPI_PCI_CFG18 \
- CVMX_ADD_IO_SEG(0x00011F0000001848ull)
-#define CVMX_NPI_PCI_CFG19 \
- CVMX_ADD_IO_SEG(0x00011F000000184Cull)
-#define CVMX_NPI_PCI_CFG20 \
- CVMX_ADD_IO_SEG(0x00011F0000001850ull)
-#define CVMX_NPI_PCI_CFG21 \
- CVMX_ADD_IO_SEG(0x00011F0000001854ull)
-#define CVMX_NPI_PCI_CFG22 \
- CVMX_ADD_IO_SEG(0x00011F0000001858ull)
-#define CVMX_NPI_PCI_CFG56 \
- CVMX_ADD_IO_SEG(0x00011F00000018E0ull)
-#define CVMX_NPI_PCI_CFG57 \
- CVMX_ADD_IO_SEG(0x00011F00000018E4ull)
-#define CVMX_NPI_PCI_CFG58 \
- CVMX_ADD_IO_SEG(0x00011F00000018E8ull)
-#define CVMX_NPI_PCI_CFG59 \
- CVMX_ADD_IO_SEG(0x00011F00000018ECull)
-#define CVMX_NPI_PCI_CFG60 \
- CVMX_ADD_IO_SEG(0x00011F00000018F0ull)
-#define CVMX_NPI_PCI_CFG61 \
- CVMX_ADD_IO_SEG(0x00011F00000018F4ull)
-#define CVMX_NPI_PCI_CFG62 \
- CVMX_ADD_IO_SEG(0x00011F00000018F8ull)
-#define CVMX_NPI_PCI_CFG63 \
- CVMX_ADD_IO_SEG(0x00011F00000018FCull)
-#define CVMX_NPI_PCI_CNT_REG \
- CVMX_ADD_IO_SEG(0x00011F00000011B8ull)
-#define CVMX_NPI_PCI_CTL_STATUS_2 \
- CVMX_ADD_IO_SEG(0x00011F000000118Cull)
-#define CVMX_NPI_PCI_INT_ARB_CFG \
- CVMX_ADD_IO_SEG(0x00011F0000000130ull)
-#define CVMX_NPI_PCI_INT_ENB2 \
- CVMX_ADD_IO_SEG(0x00011F00000011A0ull)
-#define CVMX_NPI_PCI_INT_SUM2 \
- CVMX_ADD_IO_SEG(0x00011F0000001198ull)
-#define CVMX_NPI_PCI_READ_CMD \
- CVMX_ADD_IO_SEG(0x00011F0000000048ull)
-#define CVMX_NPI_PCI_READ_CMD_6 \
- CVMX_ADD_IO_SEG(0x00011F0000001180ull)
-#define CVMX_NPI_PCI_READ_CMD_C \
- CVMX_ADD_IO_SEG(0x00011F0000001184ull)
-#define CVMX_NPI_PCI_READ_CMD_E \
- CVMX_ADD_IO_SEG(0x00011F0000001188ull)
-#define CVMX_NPI_PCI_SCM_REG \
- CVMX_ADD_IO_SEG(0x00011F00000011A8ull)
-#define CVMX_NPI_PCI_TSR_REG \
- CVMX_ADD_IO_SEG(0x00011F00000011B0ull)
-#define CVMX_NPI_PORT32_INSTR_HDR \
- CVMX_ADD_IO_SEG(0x00011F00000001F8ull)
-#define CVMX_NPI_PORT33_INSTR_HDR \
- CVMX_ADD_IO_SEG(0x00011F0000000200ull)
-#define CVMX_NPI_PORT34_INSTR_HDR \
- CVMX_ADD_IO_SEG(0x00011F0000000208ull)
-#define CVMX_NPI_PORT35_INSTR_HDR \
- CVMX_ADD_IO_SEG(0x00011F0000000210ull)
-#define CVMX_NPI_PORT_BP_CONTROL \
- CVMX_ADD_IO_SEG(0x00011F00000001F0ull)
-#define CVMX_NPI_PX_DBPAIR_ADDR(offset) \
- CVMX_ADD_IO_SEG(0x00011F0000000180ull + (((offset) & 3) * 8))
-#define CVMX_NPI_PX_INSTR_ADDR(offset) \
- CVMX_ADD_IO_SEG(0x00011F00000001C0ull + (((offset) & 3) * 8))
-#define CVMX_NPI_PX_INSTR_CNTS(offset) \
- CVMX_ADD_IO_SEG(0x00011F00000001A0ull + (((offset) & 3) * 8))
-#define CVMX_NPI_PX_PAIR_CNTS(offset) \
- CVMX_ADD_IO_SEG(0x00011F0000000160ull + (((offset) & 3) * 8))
-#define CVMX_NPI_RSL_INT_BLOCKS \
- CVMX_ADD_IO_SEG(0x00011F0000000000ull)
-#define CVMX_NPI_SIZE_INPUT0 \
- CVMX_ADD_IO_SEG(0x00011F0000000078ull)
-#define CVMX_NPI_SIZE_INPUT1 \
- CVMX_ADD_IO_SEG(0x00011F0000000088ull)
-#define CVMX_NPI_SIZE_INPUT2 \
- CVMX_ADD_IO_SEG(0x00011F0000000098ull)
-#define CVMX_NPI_SIZE_INPUT3 \
- CVMX_ADD_IO_SEG(0x00011F00000000A8ull)
-#define CVMX_NPI_SIZE_INPUTX(offset) \
- CVMX_ADD_IO_SEG(0x00011F0000000078ull + (((offset) & 3) * 16))
-#define CVMX_NPI_WIN_READ_TO \
- CVMX_ADD_IO_SEG(0x00011F00000001E0ull)
+#define CVMX_NPI_BASE_ADDR_INPUT0 CVMX_NPI_BASE_ADDR_INPUTX(0)
+#define CVMX_NPI_BASE_ADDR_INPUT1 CVMX_NPI_BASE_ADDR_INPUTX(1)
+#define CVMX_NPI_BASE_ADDR_INPUT2 CVMX_NPI_BASE_ADDR_INPUTX(2)
+#define CVMX_NPI_BASE_ADDR_INPUT3 CVMX_NPI_BASE_ADDR_INPUTX(3)
+#define CVMX_NPI_BASE_ADDR_INPUTX(offset) (CVMX_ADD_IO_SEG(0x00011F0000000070ull) + ((offset) & 3) * 16)
+#define CVMX_NPI_BASE_ADDR_OUTPUT0 CVMX_NPI_BASE_ADDR_OUTPUTX(0)
+#define CVMX_NPI_BASE_ADDR_OUTPUT1 CVMX_NPI_BASE_ADDR_OUTPUTX(1)
+#define CVMX_NPI_BASE_ADDR_OUTPUT2 CVMX_NPI_BASE_ADDR_OUTPUTX(2)
+#define CVMX_NPI_BASE_ADDR_OUTPUT3 CVMX_NPI_BASE_ADDR_OUTPUTX(3)
+#define CVMX_NPI_BASE_ADDR_OUTPUTX(offset) (CVMX_ADD_IO_SEG(0x00011F00000000B8ull) + ((offset) & 3) * 8)
+#define CVMX_NPI_BIST_STATUS (CVMX_ADD_IO_SEG(0x00011F00000003F8ull))
+#define CVMX_NPI_BUFF_SIZE_OUTPUT0 CVMX_NPI_BUFF_SIZE_OUTPUTX(0)
+#define CVMX_NPI_BUFF_SIZE_OUTPUT1 CVMX_NPI_BUFF_SIZE_OUTPUTX(1)
+#define CVMX_NPI_BUFF_SIZE_OUTPUT2 CVMX_NPI_BUFF_SIZE_OUTPUTX(2)
+#define CVMX_NPI_BUFF_SIZE_OUTPUT3 CVMX_NPI_BUFF_SIZE_OUTPUTX(3)
+#define CVMX_NPI_BUFF_SIZE_OUTPUTX(offset) (CVMX_ADD_IO_SEG(0x00011F00000000E0ull) + ((offset) & 3) * 8)
+#define CVMX_NPI_COMP_CTL (CVMX_ADD_IO_SEG(0x00011F0000000218ull))
+#define CVMX_NPI_CTL_STATUS (CVMX_ADD_IO_SEG(0x00011F0000000010ull))
+#define CVMX_NPI_DBG_SELECT (CVMX_ADD_IO_SEG(0x00011F0000000008ull))
+#define CVMX_NPI_DMA_CONTROL (CVMX_ADD_IO_SEG(0x00011F0000000128ull))
+#define CVMX_NPI_DMA_HIGHP_COUNTS (CVMX_ADD_IO_SEG(0x00011F0000000148ull))
+#define CVMX_NPI_DMA_HIGHP_NADDR (CVMX_ADD_IO_SEG(0x00011F0000000158ull))
+#define CVMX_NPI_DMA_LOWP_COUNTS (CVMX_ADD_IO_SEG(0x00011F0000000140ull))
+#define CVMX_NPI_DMA_LOWP_NADDR (CVMX_ADD_IO_SEG(0x00011F0000000150ull))
+#define CVMX_NPI_HIGHP_DBELL (CVMX_ADD_IO_SEG(0x00011F0000000120ull))
+#define CVMX_NPI_HIGHP_IBUFF_SADDR (CVMX_ADD_IO_SEG(0x00011F0000000110ull))
+#define CVMX_NPI_INPUT_CONTROL (CVMX_ADD_IO_SEG(0x00011F0000000138ull))
+#define CVMX_NPI_INT_ENB (CVMX_ADD_IO_SEG(0x00011F0000000020ull))
+#define CVMX_NPI_INT_SUM (CVMX_ADD_IO_SEG(0x00011F0000000018ull))
+#define CVMX_NPI_LOWP_DBELL (CVMX_ADD_IO_SEG(0x00011F0000000118ull))
+#define CVMX_NPI_LOWP_IBUFF_SADDR (CVMX_ADD_IO_SEG(0x00011F0000000108ull))
+#define CVMX_NPI_MEM_ACCESS_SUBID3 CVMX_NPI_MEM_ACCESS_SUBIDX(3)
+#define CVMX_NPI_MEM_ACCESS_SUBID4 CVMX_NPI_MEM_ACCESS_SUBIDX(4)
+#define CVMX_NPI_MEM_ACCESS_SUBID5 CVMX_NPI_MEM_ACCESS_SUBIDX(5)
+#define CVMX_NPI_MEM_ACCESS_SUBID6 CVMX_NPI_MEM_ACCESS_SUBIDX(6)
+#define CVMX_NPI_MEM_ACCESS_SUBIDX(offset) (CVMX_ADD_IO_SEG(0x00011F0000000028ull) + ((offset) & 7) * 8 - 8*3)
+#define CVMX_NPI_MSI_RCV (0x0000000000000190ull)
+#define CVMX_NPI_NPI_MSI_RCV (CVMX_ADD_IO_SEG(0x00011F0000001190ull))
+#define CVMX_NPI_NUM_DESC_OUTPUT0 CVMX_NPI_NUM_DESC_OUTPUTX(0)
+#define CVMX_NPI_NUM_DESC_OUTPUT1 CVMX_NPI_NUM_DESC_OUTPUTX(1)
+#define CVMX_NPI_NUM_DESC_OUTPUT2 CVMX_NPI_NUM_DESC_OUTPUTX(2)
+#define CVMX_NPI_NUM_DESC_OUTPUT3 CVMX_NPI_NUM_DESC_OUTPUTX(3)
+#define CVMX_NPI_NUM_DESC_OUTPUTX(offset) (CVMX_ADD_IO_SEG(0x00011F0000000050ull) + ((offset) & 3) * 8)
+#define CVMX_NPI_OUTPUT_CONTROL (CVMX_ADD_IO_SEG(0x00011F0000000100ull))
+#define CVMX_NPI_P0_DBPAIR_ADDR CVMX_NPI_PX_DBPAIR_ADDR(0)
+#define CVMX_NPI_P0_INSTR_ADDR CVMX_NPI_PX_INSTR_ADDR(0)
+#define CVMX_NPI_P0_INSTR_CNTS CVMX_NPI_PX_INSTR_CNTS(0)
+#define CVMX_NPI_P0_PAIR_CNTS CVMX_NPI_PX_PAIR_CNTS(0)
+#define CVMX_NPI_P1_DBPAIR_ADDR CVMX_NPI_PX_DBPAIR_ADDR(1)
+#define CVMX_NPI_P1_INSTR_ADDR CVMX_NPI_PX_INSTR_ADDR(1)
+#define CVMX_NPI_P1_INSTR_CNTS CVMX_NPI_PX_INSTR_CNTS(1)
+#define CVMX_NPI_P1_PAIR_CNTS CVMX_NPI_PX_PAIR_CNTS(1)
+#define CVMX_NPI_P2_DBPAIR_ADDR CVMX_NPI_PX_DBPAIR_ADDR(2)
+#define CVMX_NPI_P2_INSTR_ADDR CVMX_NPI_PX_INSTR_ADDR(2)
+#define CVMX_NPI_P2_INSTR_CNTS CVMX_NPI_PX_INSTR_CNTS(2)
+#define CVMX_NPI_P2_PAIR_CNTS CVMX_NPI_PX_PAIR_CNTS(2)
+#define CVMX_NPI_P3_DBPAIR_ADDR CVMX_NPI_PX_DBPAIR_ADDR(3)
+#define CVMX_NPI_P3_INSTR_ADDR CVMX_NPI_PX_INSTR_ADDR(3)
+#define CVMX_NPI_P3_INSTR_CNTS CVMX_NPI_PX_INSTR_CNTS(3)
+#define CVMX_NPI_P3_PAIR_CNTS CVMX_NPI_PX_PAIR_CNTS(3)
+#define CVMX_NPI_PCI_BAR1_INDEXX(offset) (CVMX_ADD_IO_SEG(0x00011F0000001100ull) + ((offset) & 31) * 4)
+#define CVMX_NPI_PCI_BIST_REG (CVMX_ADD_IO_SEG(0x00011F00000011C0ull))
+#define CVMX_NPI_PCI_BURST_SIZE (CVMX_ADD_IO_SEG(0x00011F00000000D8ull))
+#define CVMX_NPI_PCI_CFG00 (CVMX_ADD_IO_SEG(0x00011F0000001800ull))
+#define CVMX_NPI_PCI_CFG01 (CVMX_ADD_IO_SEG(0x00011F0000001804ull))
+#define CVMX_NPI_PCI_CFG02 (CVMX_ADD_IO_SEG(0x00011F0000001808ull))
+#define CVMX_NPI_PCI_CFG03 (CVMX_ADD_IO_SEG(0x00011F000000180Cull))
+#define CVMX_NPI_PCI_CFG04 (CVMX_ADD_IO_SEG(0x00011F0000001810ull))
+#define CVMX_NPI_PCI_CFG05 (CVMX_ADD_IO_SEG(0x00011F0000001814ull))
+#define CVMX_NPI_PCI_CFG06 (CVMX_ADD_IO_SEG(0x00011F0000001818ull))
+#define CVMX_NPI_PCI_CFG07 (CVMX_ADD_IO_SEG(0x00011F000000181Cull))
+#define CVMX_NPI_PCI_CFG08 (CVMX_ADD_IO_SEG(0x00011F0000001820ull))
+#define CVMX_NPI_PCI_CFG09 (CVMX_ADD_IO_SEG(0x00011F0000001824ull))
+#define CVMX_NPI_PCI_CFG10 (CVMX_ADD_IO_SEG(0x00011F0000001828ull))
+#define CVMX_NPI_PCI_CFG11 (CVMX_ADD_IO_SEG(0x00011F000000182Cull))
+#define CVMX_NPI_PCI_CFG12 (CVMX_ADD_IO_SEG(0x00011F0000001830ull))
+#define CVMX_NPI_PCI_CFG13 (CVMX_ADD_IO_SEG(0x00011F0000001834ull))
+#define CVMX_NPI_PCI_CFG15 (CVMX_ADD_IO_SEG(0x00011F000000183Cull))
+#define CVMX_NPI_PCI_CFG16 (CVMX_ADD_IO_SEG(0x00011F0000001840ull))
+#define CVMX_NPI_PCI_CFG17 (CVMX_ADD_IO_SEG(0x00011F0000001844ull))
+#define CVMX_NPI_PCI_CFG18 (CVMX_ADD_IO_SEG(0x00011F0000001848ull))
+#define CVMX_NPI_PCI_CFG19 (CVMX_ADD_IO_SEG(0x00011F000000184Cull))
+#define CVMX_NPI_PCI_CFG20 (CVMX_ADD_IO_SEG(0x00011F0000001850ull))
+#define CVMX_NPI_PCI_CFG21 (CVMX_ADD_IO_SEG(0x00011F0000001854ull))
+#define CVMX_NPI_PCI_CFG22 (CVMX_ADD_IO_SEG(0x00011F0000001858ull))
+#define CVMX_NPI_PCI_CFG56 (CVMX_ADD_IO_SEG(0x00011F00000018E0ull))
+#define CVMX_NPI_PCI_CFG57 (CVMX_ADD_IO_SEG(0x00011F00000018E4ull))
+#define CVMX_NPI_PCI_CFG58 (CVMX_ADD_IO_SEG(0x00011F00000018E8ull))
+#define CVMX_NPI_PCI_CFG59 (CVMX_ADD_IO_SEG(0x00011F00000018ECull))
+#define CVMX_NPI_PCI_CFG60 (CVMX_ADD_IO_SEG(0x00011F00000018F0ull))
+#define CVMX_NPI_PCI_CFG61 (CVMX_ADD_IO_SEG(0x00011F00000018F4ull))
+#define CVMX_NPI_PCI_CFG62 (CVMX_ADD_IO_SEG(0x00011F00000018F8ull))
+#define CVMX_NPI_PCI_CFG63 (CVMX_ADD_IO_SEG(0x00011F00000018FCull))
+#define CVMX_NPI_PCI_CNT_REG (CVMX_ADD_IO_SEG(0x00011F00000011B8ull))
+#define CVMX_NPI_PCI_CTL_STATUS_2 (CVMX_ADD_IO_SEG(0x00011F000000118Cull))
+#define CVMX_NPI_PCI_INT_ARB_CFG (CVMX_ADD_IO_SEG(0x00011F0000000130ull))
+#define CVMX_NPI_PCI_INT_ENB2 (CVMX_ADD_IO_SEG(0x00011F00000011A0ull))
+#define CVMX_NPI_PCI_INT_SUM2 (CVMX_ADD_IO_SEG(0x00011F0000001198ull))
+#define CVMX_NPI_PCI_READ_CMD (CVMX_ADD_IO_SEG(0x00011F0000000048ull))
+#define CVMX_NPI_PCI_READ_CMD_6 (CVMX_ADD_IO_SEG(0x00011F0000001180ull))
+#define CVMX_NPI_PCI_READ_CMD_C (CVMX_ADD_IO_SEG(0x00011F0000001184ull))
+#define CVMX_NPI_PCI_READ_CMD_E (CVMX_ADD_IO_SEG(0x00011F0000001188ull))
+#define CVMX_NPI_PCI_SCM_REG (CVMX_ADD_IO_SEG(0x00011F00000011A8ull))
+#define CVMX_NPI_PCI_TSR_REG (CVMX_ADD_IO_SEG(0x00011F00000011B0ull))
+#define CVMX_NPI_PORT32_INSTR_HDR (CVMX_ADD_IO_SEG(0x00011F00000001F8ull))
+#define CVMX_NPI_PORT33_INSTR_HDR (CVMX_ADD_IO_SEG(0x00011F0000000200ull))
+#define CVMX_NPI_PORT34_INSTR_HDR (CVMX_ADD_IO_SEG(0x00011F0000000208ull))
+#define CVMX_NPI_PORT35_INSTR_HDR (CVMX_ADD_IO_SEG(0x00011F0000000210ull))
+#define CVMX_NPI_PORT_BP_CONTROL (CVMX_ADD_IO_SEG(0x00011F00000001F0ull))
+#define CVMX_NPI_PX_DBPAIR_ADDR(offset) (CVMX_ADD_IO_SEG(0x00011F0000000180ull) + ((offset) & 3) * 8)
+#define CVMX_NPI_PX_INSTR_ADDR(offset) (CVMX_ADD_IO_SEG(0x00011F00000001C0ull) + ((offset) & 3) * 8)
+#define CVMX_NPI_PX_INSTR_CNTS(offset) (CVMX_ADD_IO_SEG(0x00011F00000001A0ull) + ((offset) & 3) * 8)
+#define CVMX_NPI_PX_PAIR_CNTS(offset) (CVMX_ADD_IO_SEG(0x00011F0000000160ull) + ((offset) & 3) * 8)
+#define CVMX_NPI_RSL_INT_BLOCKS (CVMX_ADD_IO_SEG(0x00011F0000000000ull))
+#define CVMX_NPI_SIZE_INPUT0 CVMX_NPI_SIZE_INPUTX(0)
+#define CVMX_NPI_SIZE_INPUT1 CVMX_NPI_SIZE_INPUTX(1)
+#define CVMX_NPI_SIZE_INPUT2 CVMX_NPI_SIZE_INPUTX(2)
+#define CVMX_NPI_SIZE_INPUT3 CVMX_NPI_SIZE_INPUTX(3)
+#define CVMX_NPI_SIZE_INPUTX(offset) (CVMX_ADD_IO_SEG(0x00011F0000000078ull) + ((offset) & 3) * 16)
+#define CVMX_NPI_WIN_READ_TO (CVMX_ADD_IO_SEG(0x00011F00000001E0ull))
union cvmx_npi_base_addr_inputx {
uint64_t u64;
diff --git a/arch/mips/include/asm/octeon/cvmx-pci-defs.h b/arch/mips/include/asm/octeon/cvmx-pci-defs.h
index 90f8d6535753..6ff6d9d357ba 100644
--- a/arch/mips/include/asm/octeon/cvmx-pci-defs.h
+++ b/arch/mips/include/asm/octeon/cvmx-pci-defs.h
@@ -4,7 +4,7 @@
* Contact: support@caviumnetworks.com
* This file is part of the OCTEON SDK
*
- * Copyright (c) 2003-2008 Cavium Networks
+ * Copyright (c) 2003-2010 Cavium Networks
*
* This file is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License, Version 2, as
@@ -28,184 +28,91 @@
#ifndef __CVMX_PCI_DEFS_H__
#define __CVMX_PCI_DEFS_H__
-#define CVMX_PCI_BAR1_INDEXX(offset) \
- (0x0000000000000100ull + (((offset) & 31) * 4))
-#define CVMX_PCI_BIST_REG \
- (0x00000000000001C0ull)
-#define CVMX_PCI_CFG00 \
- (0x0000000000000000ull)
-#define CVMX_PCI_CFG01 \
- (0x0000000000000004ull)
-#define CVMX_PCI_CFG02 \
- (0x0000000000000008ull)
-#define CVMX_PCI_CFG03 \
- (0x000000000000000Cull)
-#define CVMX_PCI_CFG04 \
- (0x0000000000000010ull)
-#define CVMX_PCI_CFG05 \
- (0x0000000000000014ull)
-#define CVMX_PCI_CFG06 \
- (0x0000000000000018ull)
-#define CVMX_PCI_CFG07 \
- (0x000000000000001Cull)
-#define CVMX_PCI_CFG08 \
- (0x0000000000000020ull)
-#define CVMX_PCI_CFG09 \
- (0x0000000000000024ull)
-#define CVMX_PCI_CFG10 \
- (0x0000000000000028ull)
-#define CVMX_PCI_CFG11 \
- (0x000000000000002Cull)
-#define CVMX_PCI_CFG12 \
- (0x0000000000000030ull)
-#define CVMX_PCI_CFG13 \
- (0x0000000000000034ull)
-#define CVMX_PCI_CFG15 \
- (0x000000000000003Cull)
-#define CVMX_PCI_CFG16 \
- (0x0000000000000040ull)
-#define CVMX_PCI_CFG17 \
- (0x0000000000000044ull)
-#define CVMX_PCI_CFG18 \
- (0x0000000000000048ull)
-#define CVMX_PCI_CFG19 \
- (0x000000000000004Cull)
-#define CVMX_PCI_CFG20 \
- (0x0000000000000050ull)
-#define CVMX_PCI_CFG21 \
- (0x0000000000000054ull)
-#define CVMX_PCI_CFG22 \
- (0x0000000000000058ull)
-#define CVMX_PCI_CFG56 \
- (0x00000000000000E0ull)
-#define CVMX_PCI_CFG57 \
- (0x00000000000000E4ull)
-#define CVMX_PCI_CFG58 \
- (0x00000000000000E8ull)
-#define CVMX_PCI_CFG59 \
- (0x00000000000000ECull)
-#define CVMX_PCI_CFG60 \
- (0x00000000000000F0ull)
-#define CVMX_PCI_CFG61 \
- (0x00000000000000F4ull)
-#define CVMX_PCI_CFG62 \
- (0x00000000000000F8ull)
-#define CVMX_PCI_CFG63 \
- (0x00000000000000FCull)
-#define CVMX_PCI_CNT_REG \
- (0x00000000000001B8ull)
-#define CVMX_PCI_CTL_STATUS_2 \
- (0x000000000000018Cull)
-#define CVMX_PCI_DBELL_0 \
- (0x0000000000000080ull)
-#define CVMX_PCI_DBELL_1 \
- (0x0000000000000088ull)
-#define CVMX_PCI_DBELL_2 \
- (0x0000000000000090ull)
-#define CVMX_PCI_DBELL_3 \
- (0x0000000000000098ull)
-#define CVMX_PCI_DBELL_X(offset) \
- (0x0000000000000080ull + (((offset) & 3) * 8))
-#define CVMX_PCI_DMA_CNT0 \
- (0x00000000000000A0ull)
-#define CVMX_PCI_DMA_CNT1 \
- (0x00000000000000A8ull)
-#define CVMX_PCI_DMA_CNTX(offset) \
- (0x00000000000000A0ull + (((offset) & 1) * 8))
-#define CVMX_PCI_DMA_INT_LEV0 \
- (0x00000000000000A4ull)
-#define CVMX_PCI_DMA_INT_LEV1 \
- (0x00000000000000ACull)
-#define CVMX_PCI_DMA_INT_LEVX(offset) \
- (0x00000000000000A4ull + (((offset) & 1) * 8))
-#define CVMX_PCI_DMA_TIME0 \
- (0x00000000000000B0ull)
-#define CVMX_PCI_DMA_TIME1 \
- (0x00000000000000B4ull)
-#define CVMX_PCI_DMA_TIMEX(offset) \
- (0x00000000000000B0ull + (((offset) & 1) * 4))
-#define CVMX_PCI_INSTR_COUNT0 \
- (0x0000000000000084ull)
-#define CVMX_PCI_INSTR_COUNT1 \
- (0x000000000000008Cull)
-#define CVMX_PCI_INSTR_COUNT2 \
- (0x0000000000000094ull)
-#define CVMX_PCI_INSTR_COUNT3 \
- (0x000000000000009Cull)
-#define CVMX_PCI_INSTR_COUNTX(offset) \
- (0x0000000000000084ull + (((offset) & 3) * 8))
-#define CVMX_PCI_INT_ENB \
- (0x0000000000000038ull)
-#define CVMX_PCI_INT_ENB2 \
- (0x00000000000001A0ull)
-#define CVMX_PCI_INT_SUM \
- (0x0000000000000030ull)
-#define CVMX_PCI_INT_SUM2 \
- (0x0000000000000198ull)
-#define CVMX_PCI_MSI_RCV \
- (0x00000000000000F0ull)
-#define CVMX_PCI_PKTS_SENT0 \
- (0x0000000000000040ull)
-#define CVMX_PCI_PKTS_SENT1 \
- (0x0000000000000050ull)
-#define CVMX_PCI_PKTS_SENT2 \
- (0x0000000000000060ull)
-#define CVMX_PCI_PKTS_SENT3 \
- (0x0000000000000070ull)
-#define CVMX_PCI_PKTS_SENTX(offset) \
- (0x0000000000000040ull + (((offset) & 3) * 16))
-#define CVMX_PCI_PKTS_SENT_INT_LEV0 \
- (0x0000000000000048ull)
-#define CVMX_PCI_PKTS_SENT_INT_LEV1 \
- (0x0000000000000058ull)
-#define CVMX_PCI_PKTS_SENT_INT_LEV2 \
- (0x0000000000000068ull)
-#define CVMX_PCI_PKTS_SENT_INT_LEV3 \
- (0x0000000000000078ull)
-#define CVMX_PCI_PKTS_SENT_INT_LEVX(offset) \
- (0x0000000000000048ull + (((offset) & 3) * 16))
-#define CVMX_PCI_PKTS_SENT_TIME0 \
- (0x000000000000004Cull)
-#define CVMX_PCI_PKTS_SENT_TIME1 \
- (0x000000000000005Cull)
-#define CVMX_PCI_PKTS_SENT_TIME2 \
- (0x000000000000006Cull)
-#define CVMX_PCI_PKTS_SENT_TIME3 \
- (0x000000000000007Cull)
-#define CVMX_PCI_PKTS_SENT_TIMEX(offset) \
- (0x000000000000004Cull + (((offset) & 3) * 16))
-#define CVMX_PCI_PKT_CREDITS0 \
- (0x0000000000000044ull)
-#define CVMX_PCI_PKT_CREDITS1 \
- (0x0000000000000054ull)
-#define CVMX_PCI_PKT_CREDITS2 \
- (0x0000000000000064ull)
-#define CVMX_PCI_PKT_CREDITS3 \
- (0x0000000000000074ull)
-#define CVMX_PCI_PKT_CREDITSX(offset) \
- (0x0000000000000044ull + (((offset) & 3) * 16))
-#define CVMX_PCI_READ_CMD_6 \
- (0x0000000000000180ull)
-#define CVMX_PCI_READ_CMD_C \
- (0x0000000000000184ull)
-#define CVMX_PCI_READ_CMD_E \
- (0x0000000000000188ull)
-#define CVMX_PCI_READ_TIMEOUT \
- CVMX_ADD_IO_SEG(0x00011F00000000B0ull)
-#define CVMX_PCI_SCM_REG \
- (0x00000000000001A8ull)
-#define CVMX_PCI_TSR_REG \
- (0x00000000000001B0ull)
-#define CVMX_PCI_WIN_RD_ADDR \
- (0x0000000000000008ull)
-#define CVMX_PCI_WIN_RD_DATA \
- (0x0000000000000020ull)
-#define CVMX_PCI_WIN_WR_ADDR \
- (0x0000000000000000ull)
-#define CVMX_PCI_WIN_WR_DATA \
- (0x0000000000000010ull)
-#define CVMX_PCI_WIN_WR_MASK \
- (0x0000000000000018ull)
+#define CVMX_PCI_BAR1_INDEXX(offset) (0x0000000000000100ull + ((offset) & 31) * 4)
+#define CVMX_PCI_BIST_REG (0x00000000000001C0ull)
+#define CVMX_PCI_CFG00 (0x0000000000000000ull)
+#define CVMX_PCI_CFG01 (0x0000000000000004ull)
+#define CVMX_PCI_CFG02 (0x0000000000000008ull)
+#define CVMX_PCI_CFG03 (0x000000000000000Cull)
+#define CVMX_PCI_CFG04 (0x0000000000000010ull)
+#define CVMX_PCI_CFG05 (0x0000000000000014ull)
+#define CVMX_PCI_CFG06 (0x0000000000000018ull)
+#define CVMX_PCI_CFG07 (0x000000000000001Cull)
+#define CVMX_PCI_CFG08 (0x0000000000000020ull)
+#define CVMX_PCI_CFG09 (0x0000000000000024ull)
+#define CVMX_PCI_CFG10 (0x0000000000000028ull)
+#define CVMX_PCI_CFG11 (0x000000000000002Cull)
+#define CVMX_PCI_CFG12 (0x0000000000000030ull)
+#define CVMX_PCI_CFG13 (0x0000000000000034ull)
+#define CVMX_PCI_CFG15 (0x000000000000003Cull)
+#define CVMX_PCI_CFG16 (0x0000000000000040ull)
+#define CVMX_PCI_CFG17 (0x0000000000000044ull)
+#define CVMX_PCI_CFG18 (0x0000000000000048ull)
+#define CVMX_PCI_CFG19 (0x000000000000004Cull)
+#define CVMX_PCI_CFG20 (0x0000000000000050ull)
+#define CVMX_PCI_CFG21 (0x0000000000000054ull)
+#define CVMX_PCI_CFG22 (0x0000000000000058ull)
+#define CVMX_PCI_CFG56 (0x00000000000000E0ull)
+#define CVMX_PCI_CFG57 (0x00000000000000E4ull)
+#define CVMX_PCI_CFG58 (0x00000000000000E8ull)
+#define CVMX_PCI_CFG59 (0x00000000000000ECull)
+#define CVMX_PCI_CFG60 (0x00000000000000F0ull)
+#define CVMX_PCI_CFG61 (0x00000000000000F4ull)
+#define CVMX_PCI_CFG62 (0x00000000000000F8ull)
+#define CVMX_PCI_CFG63 (0x00000000000000FCull)
+#define CVMX_PCI_CNT_REG (0x00000000000001B8ull)
+#define CVMX_PCI_CTL_STATUS_2 (0x000000000000018Cull)
+#define CVMX_PCI_DBELL_X(offset) (0x0000000000000080ull + ((offset) & 3) * 8)
+#define CVMX_PCI_DMA_CNT0 CVMX_PCI_DMA_CNTX(0)
+#define CVMX_PCI_DMA_CNT1 CVMX_PCI_DMA_CNTX(1)
+#define CVMX_PCI_DMA_CNTX(offset) (0x00000000000000A0ull + ((offset) & 1) * 8)
+#define CVMX_PCI_DMA_INT_LEV0 CVMX_PCI_DMA_INT_LEVX(0)
+#define CVMX_PCI_DMA_INT_LEV1 CVMX_PCI_DMA_INT_LEVX(1)
+#define CVMX_PCI_DMA_INT_LEVX(offset) (0x00000000000000A4ull + ((offset) & 1) * 8)
+#define CVMX_PCI_DMA_TIME0 CVMX_PCI_DMA_TIMEX(0)
+#define CVMX_PCI_DMA_TIME1 CVMX_PCI_DMA_TIMEX(1)
+#define CVMX_PCI_DMA_TIMEX(offset) (0x00000000000000B0ull + ((offset) & 1) * 4)
+#define CVMX_PCI_INSTR_COUNT0 CVMX_PCI_INSTR_COUNTX(0)
+#define CVMX_PCI_INSTR_COUNT1 CVMX_PCI_INSTR_COUNTX(1)
+#define CVMX_PCI_INSTR_COUNT2 CVMX_PCI_INSTR_COUNTX(2)
+#define CVMX_PCI_INSTR_COUNT3 CVMX_PCI_INSTR_COUNTX(3)
+#define CVMX_PCI_INSTR_COUNTX(offset) (0x0000000000000084ull + ((offset) & 3) * 8)
+#define CVMX_PCI_INT_ENB (0x0000000000000038ull)
+#define CVMX_PCI_INT_ENB2 (0x00000000000001A0ull)
+#define CVMX_PCI_INT_SUM (0x0000000000000030ull)
+#define CVMX_PCI_INT_SUM2 (0x0000000000000198ull)
+#define CVMX_PCI_MSI_RCV (0x00000000000000F0ull)
+#define CVMX_PCI_PKTS_SENT0 CVMX_PCI_PKTS_SENTX(0)
+#define CVMX_PCI_PKTS_SENT1 CVMX_PCI_PKTS_SENTX(1)
+#define CVMX_PCI_PKTS_SENT2 CVMX_PCI_PKTS_SENTX(2)
+#define CVMX_PCI_PKTS_SENT3 CVMX_PCI_PKTS_SENTX(3)
+#define CVMX_PCI_PKTS_SENTX(offset) (0x0000000000000040ull + ((offset) & 3) * 16)
+#define CVMX_PCI_PKTS_SENT_INT_LEV0 CVMX_PCI_PKTS_SENT_INT_LEVX(0)
+#define CVMX_PCI_PKTS_SENT_INT_LEV1 CVMX_PCI_PKTS_SENT_INT_LEVX(1)
+#define CVMX_PCI_PKTS_SENT_INT_LEV2 CVMX_PCI_PKTS_SENT_INT_LEVX(2)
+#define CVMX_PCI_PKTS_SENT_INT_LEV3 CVMX_PCI_PKTS_SENT_INT_LEVX(3)
+#define CVMX_PCI_PKTS_SENT_INT_LEVX(offset) (0x0000000000000048ull + ((offset) & 3) * 16)
+#define CVMX_PCI_PKTS_SENT_TIME0 CVMX_PCI_PKTS_SENT_TIMEX(0)
+#define CVMX_PCI_PKTS_SENT_TIME1 CVMX_PCI_PKTS_SENT_TIMEX(1)
+#define CVMX_PCI_PKTS_SENT_TIME2 CVMX_PCI_PKTS_SENT_TIMEX(2)
+#define CVMX_PCI_PKTS_SENT_TIME3 CVMX_PCI_PKTS_SENT_TIMEX(3)
+#define CVMX_PCI_PKTS_SENT_TIMEX(offset) (0x000000000000004Cull + ((offset) & 3) * 16)
+#define CVMX_PCI_PKT_CREDITS0 CVMX_PCI_PKT_CREDITSX(0)
+#define CVMX_PCI_PKT_CREDITS1 CVMX_PCI_PKT_CREDITSX(1)
+#define CVMX_PCI_PKT_CREDITS2 CVMX_PCI_PKT_CREDITSX(2)
+#define CVMX_PCI_PKT_CREDITS3 CVMX_PCI_PKT_CREDITSX(3)
+#define CVMX_PCI_PKT_CREDITSX(offset) (0x0000000000000044ull + ((offset) & 3) * 16)
+#define CVMX_PCI_READ_CMD_6 (0x0000000000000180ull)
+#define CVMX_PCI_READ_CMD_C (0x0000000000000184ull)
+#define CVMX_PCI_READ_CMD_E (0x0000000000000188ull)
+#define CVMX_PCI_READ_TIMEOUT (CVMX_ADD_IO_SEG(0x00011F00000000B0ull))
+#define CVMX_PCI_SCM_REG (0x00000000000001A8ull)
+#define CVMX_PCI_TSR_REG (0x00000000000001B0ull)
+#define CVMX_PCI_WIN_RD_ADDR (0x0000000000000008ull)
+#define CVMX_PCI_WIN_RD_DATA (0x0000000000000020ull)
+#define CVMX_PCI_WIN_WR_ADDR (0x0000000000000000ull)
+#define CVMX_PCI_WIN_WR_DATA (0x0000000000000010ull)
+#define CVMX_PCI_WIN_WR_MASK (0x0000000000000018ull)
union cvmx_pci_bar1_indexx {
uint32_t u32;
diff --git a/arch/mips/include/asm/octeon/cvmx-pciercx-defs.h b/arch/mips/include/asm/octeon/cvmx-pciercx-defs.h
index 75574c918942..f8cb88902efb 100644
--- a/arch/mips/include/asm/octeon/cvmx-pciercx-defs.h
+++ b/arch/mips/include/asm/octeon/cvmx-pciercx-defs.h
@@ -4,7 +4,7 @@
* Contact: support@caviumnetworks.com
* This file is part of the OCTEON SDK
*
- * Copyright (c) 2003-2008 Cavium Networks
+ * Copyright (c) 2003-2010 Cavium Networks
*
* This file is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License, Version 2, as
@@ -28,158 +28,83 @@
#ifndef __CVMX_PCIERCX_DEFS_H__
#define __CVMX_PCIERCX_DEFS_H__
-#define CVMX_PCIERCX_CFG000(offset) \
- (0x0000000000000000ull + (((offset) & 1) * 0))
-#define CVMX_PCIERCX_CFG001(offset) \
- (0x0000000000000004ull + (((offset) & 1) * 0))
-#define CVMX_PCIERCX_CFG002(offset) \
- (0x0000000000000008ull + (((offset) & 1) * 0))
-#define CVMX_PCIERCX_CFG003(offset) \
- (0x000000000000000Cull + (((offset) & 1) * 0))
-#define CVMX_PCIERCX_CFG004(offset) \
- (0x0000000000000010ull + (((offset) & 1) * 0))
-#define CVMX_PCIERCX_CFG005(offset) \
- (0x0000000000000014ull + (((offset) & 1) * 0))
-#define CVMX_PCIERCX_CFG006(offset) \
- (0x0000000000000018ull + (((offset) & 1) * 0))
-#define CVMX_PCIERCX_CFG007(offset) \
- (0x000000000000001Cull + (((offset) & 1) * 0))
-#define CVMX_PCIERCX_CFG008(offset) \
- (0x0000000000000020ull + (((offset) & 1) * 0))
-#define CVMX_PCIERCX_CFG009(offset) \
- (0x0000000000000024ull + (((offset) & 1) * 0))
-#define CVMX_PCIERCX_CFG010(offset) \
- (0x0000000000000028ull + (((offset) & 1) * 0))
-#define CVMX_PCIERCX_CFG011(offset) \
- (0x000000000000002Cull + (((offset) & 1) * 0))
-#define CVMX_PCIERCX_CFG012(offset) \
- (0x0000000000000030ull + (((offset) & 1) * 0))
-#define CVMX_PCIERCX_CFG013(offset) \
- (0x0000000000000034ull + (((offset) & 1) * 0))
-#define CVMX_PCIERCX_CFG014(offset) \
- (0x0000000000000038ull + (((offset) & 1) * 0))
-#define CVMX_PCIERCX_CFG015(offset) \
- (0x000000000000003Cull + (((offset) & 1) * 0))
-#define CVMX_PCIERCX_CFG016(offset) \
- (0x0000000000000040ull + (((offset) & 1) * 0))
-#define CVMX_PCIERCX_CFG017(offset) \
- (0x0000000000000044ull + (((offset) & 1) * 0))
-#define CVMX_PCIERCX_CFG020(offset) \
- (0x0000000000000050ull + (((offset) & 1) * 0))
-#define CVMX_PCIERCX_CFG021(offset) \
- (0x0000000000000054ull + (((offset) & 1) * 0))
-#define CVMX_PCIERCX_CFG022(offset) \
- (0x0000000000000058ull + (((offset) & 1) * 0))
-#define CVMX_PCIERCX_CFG023(offset) \
- (0x000000000000005Cull + (((offset) & 1) * 0))
-#define CVMX_PCIERCX_CFG028(offset) \
- (0x0000000000000070ull + (((offset) & 1) * 0))
-#define CVMX_PCIERCX_CFG029(offset) \
- (0x0000000000000074ull + (((offset) & 1) * 0))
-#define CVMX_PCIERCX_CFG030(offset) \
- (0x0000000000000078ull + (((offset) & 1) * 0))
-#define CVMX_PCIERCX_CFG031(offset) \
- (0x000000000000007Cull + (((offset) & 1) * 0))
-#define CVMX_PCIERCX_CFG032(offset) \
- (0x0000000000000080ull + (((offset) & 1) * 0))
-#define CVMX_PCIERCX_CFG033(offset) \
- (0x0000000000000084ull + (((offset) & 1) * 0))
-#define CVMX_PCIERCX_CFG034(offset) \
- (0x0000000000000088ull + (((offset) & 1) * 0))
-#define CVMX_PCIERCX_CFG035(offset) \
- (0x000000000000008Cull + (((offset) & 1) * 0))
-#define CVMX_PCIERCX_CFG036(offset) \
- (0x0000000000000090ull + (((offset) & 1) * 0))
-#define CVMX_PCIERCX_CFG037(offset) \
- (0x0000000000000094ull + (((offset) & 1) * 0))
-#define CVMX_PCIERCX_CFG038(offset) \
- (0x0000000000000098ull + (((offset) & 1) * 0))
-#define CVMX_PCIERCX_CFG039(offset) \
- (0x000000000000009Cull + (((offset) & 1) * 0))
-#define CVMX_PCIERCX_CFG040(offset) \
- (0x00000000000000A0ull + (((offset) & 1) * 0))
-#define CVMX_PCIERCX_CFG041(offset) \
- (0x00000000000000A4ull + (((offset) & 1) * 0))
-#define CVMX_PCIERCX_CFG042(offset) \
- (0x00000000000000A8ull + (((offset) & 1) * 0))
-#define CVMX_PCIERCX_CFG064(offset) \
- (0x0000000000000100ull + (((offset) & 1) * 0))
-#define CVMX_PCIERCX_CFG065(offset) \
- (0x0000000000000104ull + (((offset) & 1) * 0))
-#define CVMX_PCIERCX_CFG066(offset) \
- (0x0000000000000108ull + (((offset) & 1) * 0))
-#define CVMX_PCIERCX_CFG067(offset) \
- (0x000000000000010Cull + (((offset) & 1) * 0))
-#define CVMX_PCIERCX_CFG068(offset) \
- (0x0000000000000110ull + (((offset) & 1) * 0))
-#define CVMX_PCIERCX_CFG069(offset) \
- (0x0000000000000114ull + (((offset) & 1) * 0))
-#define CVMX_PCIERCX_CFG070(offset) \
- (0x0000000000000118ull + (((offset) & 1) * 0))
-#define CVMX_PCIERCX_CFG071(offset) \
- (0x000000000000011Cull + (((offset) & 1) * 0))
-#define CVMX_PCIERCX_CFG072(offset) \
- (0x0000000000000120ull + (((offset) & 1) * 0))
-#define CVMX_PCIERCX_CFG073(offset) \
- (0x0000000000000124ull + (((offset) & 1) * 0))
-#define CVMX_PCIERCX_CFG074(offset) \
- (0x0000000000000128ull + (((offset) & 1) * 0))
-#define CVMX_PCIERCX_CFG075(offset) \
- (0x000000000000012Cull + (((offset) & 1) * 0))
-#define CVMX_PCIERCX_CFG076(offset) \
- (0x0000000000000130ull + (((offset) & 1) * 0))
-#define CVMX_PCIERCX_CFG077(offset) \
- (0x0000000000000134ull + (((offset) & 1) * 0))
-#define CVMX_PCIERCX_CFG448(offset) \
- (0x0000000000000700ull + (((offset) & 1) * 0))
-#define CVMX_PCIERCX_CFG449(offset) \
- (0x0000000000000704ull + (((offset) & 1) * 0))
-#define CVMX_PCIERCX_CFG450(offset) \
- (0x0000000000000708ull + (((offset) & 1) * 0))
-#define CVMX_PCIERCX_CFG451(offset) \
- (0x000000000000070Cull + (((offset) & 1) * 0))
-#define CVMX_PCIERCX_CFG452(offset) \
- (0x0000000000000710ull + (((offset) & 1) * 0))
-#define CVMX_PCIERCX_CFG453(offset) \
- (0x0000000000000714ull + (((offset) & 1) * 0))
-#define CVMX_PCIERCX_CFG454(offset) \
- (0x0000000000000718ull + (((offset) & 1) * 0))
-#define CVMX_PCIERCX_CFG455(offset) \
- (0x000000000000071Cull + (((offset) & 1) * 0))
-#define CVMX_PCIERCX_CFG456(offset) \
- (0x0000000000000720ull + (((offset) & 1) * 0))
-#define CVMX_PCIERCX_CFG458(offset) \
- (0x0000000000000728ull + (((offset) & 1) * 0))
-#define CVMX_PCIERCX_CFG459(offset) \
- (0x000000000000072Cull + (((offset) & 1) * 0))
-#define CVMX_PCIERCX_CFG460(offset) \
- (0x0000000000000730ull + (((offset) & 1) * 0))
-#define CVMX_PCIERCX_CFG461(offset) \
- (0x0000000000000734ull + (((offset) & 1) * 0))
-#define CVMX_PCIERCX_CFG462(offset) \
- (0x0000000000000738ull + (((offset) & 1) * 0))
-#define CVMX_PCIERCX_CFG463(offset) \
- (0x000000000000073Cull + (((offset) & 1) * 0))
-#define CVMX_PCIERCX_CFG464(offset) \
- (0x0000000000000740ull + (((offset) & 1) * 0))
-#define CVMX_PCIERCX_CFG465(offset) \
- (0x0000000000000744ull + (((offset) & 1) * 0))
-#define CVMX_PCIERCX_CFG466(offset) \
- (0x0000000000000748ull + (((offset) & 1) * 0))
-#define CVMX_PCIERCX_CFG467(offset) \
- (0x000000000000074Cull + (((offset) & 1) * 0))
-#define CVMX_PCIERCX_CFG468(offset) \
- (0x0000000000000750ull + (((offset) & 1) * 0))
-#define CVMX_PCIERCX_CFG490(offset) \
- (0x00000000000007A8ull + (((offset) & 1) * 0))
-#define CVMX_PCIERCX_CFG491(offset) \
- (0x00000000000007ACull + (((offset) & 1) * 0))
-#define CVMX_PCIERCX_CFG492(offset) \
- (0x00000000000007B0ull + (((offset) & 1) * 0))
-#define CVMX_PCIERCX_CFG516(offset) \
- (0x0000000000000810ull + (((offset) & 1) * 0))
-#define CVMX_PCIERCX_CFG517(offset) \
- (0x0000000000000814ull + (((offset) & 1) * 0))
+#define CVMX_PCIERCX_CFG000(block_id) (0x0000000000000000ull)
+#define CVMX_PCIERCX_CFG001(block_id) (0x0000000000000004ull)
+#define CVMX_PCIERCX_CFG002(block_id) (0x0000000000000008ull)
+#define CVMX_PCIERCX_CFG003(block_id) (0x000000000000000Cull)
+#define CVMX_PCIERCX_CFG004(block_id) (0x0000000000000010ull)
+#define CVMX_PCIERCX_CFG005(block_id) (0x0000000000000014ull)
+#define CVMX_PCIERCX_CFG006(block_id) (0x0000000000000018ull)
+#define CVMX_PCIERCX_CFG007(block_id) (0x000000000000001Cull)
+#define CVMX_PCIERCX_CFG008(block_id) (0x0000000000000020ull)
+#define CVMX_PCIERCX_CFG009(block_id) (0x0000000000000024ull)
+#define CVMX_PCIERCX_CFG010(block_id) (0x0000000000000028ull)
+#define CVMX_PCIERCX_CFG011(block_id) (0x000000000000002Cull)
+#define CVMX_PCIERCX_CFG012(block_id) (0x0000000000000030ull)
+#define CVMX_PCIERCX_CFG013(block_id) (0x0000000000000034ull)
+#define CVMX_PCIERCX_CFG014(block_id) (0x0000000000000038ull)
+#define CVMX_PCIERCX_CFG015(block_id) (0x000000000000003Cull)
+#define CVMX_PCIERCX_CFG016(block_id) (0x0000000000000040ull)
+#define CVMX_PCIERCX_CFG017(block_id) (0x0000000000000044ull)
+#define CVMX_PCIERCX_CFG020(block_id) (0x0000000000000050ull)
+#define CVMX_PCIERCX_CFG021(block_id) (0x0000000000000054ull)
+#define CVMX_PCIERCX_CFG022(block_id) (0x0000000000000058ull)
+#define CVMX_PCIERCX_CFG023(block_id) (0x000000000000005Cull)
+#define CVMX_PCIERCX_CFG028(block_id) (0x0000000000000070ull)
+#define CVMX_PCIERCX_CFG029(block_id) (0x0000000000000074ull)
+#define CVMX_PCIERCX_CFG030(block_id) (0x0000000000000078ull)
+#define CVMX_PCIERCX_CFG031(block_id) (0x000000000000007Cull)
+#define CVMX_PCIERCX_CFG032(block_id) (0x0000000000000080ull)
+#define CVMX_PCIERCX_CFG033(block_id) (0x0000000000000084ull)
+#define CVMX_PCIERCX_CFG034(block_id) (0x0000000000000088ull)
+#define CVMX_PCIERCX_CFG035(block_id) (0x000000000000008Cull)
+#define CVMX_PCIERCX_CFG036(block_id) (0x0000000000000090ull)
+#define CVMX_PCIERCX_CFG037(block_id) (0x0000000000000094ull)
+#define CVMX_PCIERCX_CFG038(block_id) (0x0000000000000098ull)
+#define CVMX_PCIERCX_CFG039(block_id) (0x000000000000009Cull)
+#define CVMX_PCIERCX_CFG040(block_id) (0x00000000000000A0ull)
+#define CVMX_PCIERCX_CFG041(block_id) (0x00000000000000A4ull)
+#define CVMX_PCIERCX_CFG042(block_id) (0x00000000000000A8ull)
+#define CVMX_PCIERCX_CFG064(block_id) (0x0000000000000100ull)
+#define CVMX_PCIERCX_CFG065(block_id) (0x0000000000000104ull)
+#define CVMX_PCIERCX_CFG066(block_id) (0x0000000000000108ull)
+#define CVMX_PCIERCX_CFG067(block_id) (0x000000000000010Cull)
+#define CVMX_PCIERCX_CFG068(block_id) (0x0000000000000110ull)
+#define CVMX_PCIERCX_CFG069(block_id) (0x0000000000000114ull)
+#define CVMX_PCIERCX_CFG070(block_id) (0x0000000000000118ull)
+#define CVMX_PCIERCX_CFG071(block_id) (0x000000000000011Cull)
+#define CVMX_PCIERCX_CFG072(block_id) (0x0000000000000120ull)
+#define CVMX_PCIERCX_CFG073(block_id) (0x0000000000000124ull)
+#define CVMX_PCIERCX_CFG074(block_id) (0x0000000000000128ull)
+#define CVMX_PCIERCX_CFG075(block_id) (0x000000000000012Cull)
+#define CVMX_PCIERCX_CFG076(block_id) (0x0000000000000130ull)
+#define CVMX_PCIERCX_CFG077(block_id) (0x0000000000000134ull)
+#define CVMX_PCIERCX_CFG448(block_id) (0x0000000000000700ull)
+#define CVMX_PCIERCX_CFG449(block_id) (0x0000000000000704ull)
+#define CVMX_PCIERCX_CFG450(block_id) (0x0000000000000708ull)
+#define CVMX_PCIERCX_CFG451(block_id) (0x000000000000070Cull)
+#define CVMX_PCIERCX_CFG452(block_id) (0x0000000000000710ull)
+#define CVMX_PCIERCX_CFG453(block_id) (0x0000000000000714ull)
+#define CVMX_PCIERCX_CFG454(block_id) (0x0000000000000718ull)
+#define CVMX_PCIERCX_CFG455(block_id) (0x000000000000071Cull)
+#define CVMX_PCIERCX_CFG456(block_id) (0x0000000000000720ull)
+#define CVMX_PCIERCX_CFG458(block_id) (0x0000000000000728ull)
+#define CVMX_PCIERCX_CFG459(block_id) (0x000000000000072Cull)
+#define CVMX_PCIERCX_CFG460(block_id) (0x0000000000000730ull)
+#define CVMX_PCIERCX_CFG461(block_id) (0x0000000000000734ull)
+#define CVMX_PCIERCX_CFG462(block_id) (0x0000000000000738ull)
+#define CVMX_PCIERCX_CFG463(block_id) (0x000000000000073Cull)
+#define CVMX_PCIERCX_CFG464(block_id) (0x0000000000000740ull)
+#define CVMX_PCIERCX_CFG465(block_id) (0x0000000000000744ull)
+#define CVMX_PCIERCX_CFG466(block_id) (0x0000000000000748ull)
+#define CVMX_PCIERCX_CFG467(block_id) (0x000000000000074Cull)
+#define CVMX_PCIERCX_CFG468(block_id) (0x0000000000000750ull)
+#define CVMX_PCIERCX_CFG490(block_id) (0x00000000000007A8ull)
+#define CVMX_PCIERCX_CFG491(block_id) (0x00000000000007ACull)
+#define CVMX_PCIERCX_CFG492(block_id) (0x00000000000007B0ull)
+#define CVMX_PCIERCX_CFG515(block_id) (0x000000000000080Cull)
+#define CVMX_PCIERCX_CFG516(block_id) (0x0000000000000810ull)
+#define CVMX_PCIERCX_CFG517(block_id) (0x0000000000000814ull)
union cvmx_pciercx_cfg000 {
uint32_t u32;
@@ -191,6 +116,8 @@ union cvmx_pciercx_cfg000 {
struct cvmx_pciercx_cfg000_s cn52xxp1;
struct cvmx_pciercx_cfg000_s cn56xx;
struct cvmx_pciercx_cfg000_s cn56xxp1;
+ struct cvmx_pciercx_cfg000_s cn63xx;
+ struct cvmx_pciercx_cfg000_s cn63xxp1;
};
union cvmx_pciercx_cfg001 {
@@ -225,6 +152,8 @@ union cvmx_pciercx_cfg001 {
struct cvmx_pciercx_cfg001_s cn52xxp1;
struct cvmx_pciercx_cfg001_s cn56xx;
struct cvmx_pciercx_cfg001_s cn56xxp1;
+ struct cvmx_pciercx_cfg001_s cn63xx;
+ struct cvmx_pciercx_cfg001_s cn63xxp1;
};
union cvmx_pciercx_cfg002 {
@@ -239,6 +168,8 @@ union cvmx_pciercx_cfg002 {
struct cvmx_pciercx_cfg002_s cn52xxp1;
struct cvmx_pciercx_cfg002_s cn56xx;
struct cvmx_pciercx_cfg002_s cn56xxp1;
+ struct cvmx_pciercx_cfg002_s cn63xx;
+ struct cvmx_pciercx_cfg002_s cn63xxp1;
};
union cvmx_pciercx_cfg003 {
@@ -254,6 +185,8 @@ union cvmx_pciercx_cfg003 {
struct cvmx_pciercx_cfg003_s cn52xxp1;
struct cvmx_pciercx_cfg003_s cn56xx;
struct cvmx_pciercx_cfg003_s cn56xxp1;
+ struct cvmx_pciercx_cfg003_s cn63xx;
+ struct cvmx_pciercx_cfg003_s cn63xxp1;
};
union cvmx_pciercx_cfg004 {
@@ -265,6 +198,8 @@ union cvmx_pciercx_cfg004 {
struct cvmx_pciercx_cfg004_s cn52xxp1;
struct cvmx_pciercx_cfg004_s cn56xx;
struct cvmx_pciercx_cfg004_s cn56xxp1;
+ struct cvmx_pciercx_cfg004_s cn63xx;
+ struct cvmx_pciercx_cfg004_s cn63xxp1;
};
union cvmx_pciercx_cfg005 {
@@ -276,6 +211,8 @@ union cvmx_pciercx_cfg005 {
struct cvmx_pciercx_cfg005_s cn52xxp1;
struct cvmx_pciercx_cfg005_s cn56xx;
struct cvmx_pciercx_cfg005_s cn56xxp1;
+ struct cvmx_pciercx_cfg005_s cn63xx;
+ struct cvmx_pciercx_cfg005_s cn63xxp1;
};
union cvmx_pciercx_cfg006 {
@@ -290,6 +227,8 @@ union cvmx_pciercx_cfg006 {
struct cvmx_pciercx_cfg006_s cn52xxp1;
struct cvmx_pciercx_cfg006_s cn56xx;
struct cvmx_pciercx_cfg006_s cn56xxp1;
+ struct cvmx_pciercx_cfg006_s cn63xx;
+ struct cvmx_pciercx_cfg006_s cn63xxp1;
};
union cvmx_pciercx_cfg007 {
@@ -317,6 +256,8 @@ union cvmx_pciercx_cfg007 {
struct cvmx_pciercx_cfg007_s cn52xxp1;
struct cvmx_pciercx_cfg007_s cn56xx;
struct cvmx_pciercx_cfg007_s cn56xxp1;
+ struct cvmx_pciercx_cfg007_s cn63xx;
+ struct cvmx_pciercx_cfg007_s cn63xxp1;
};
union cvmx_pciercx_cfg008 {
@@ -331,6 +272,8 @@ union cvmx_pciercx_cfg008 {
struct cvmx_pciercx_cfg008_s cn52xxp1;
struct cvmx_pciercx_cfg008_s cn56xx;
struct cvmx_pciercx_cfg008_s cn56xxp1;
+ struct cvmx_pciercx_cfg008_s cn63xx;
+ struct cvmx_pciercx_cfg008_s cn63xxp1;
};
union cvmx_pciercx_cfg009 {
@@ -347,6 +290,8 @@ union cvmx_pciercx_cfg009 {
struct cvmx_pciercx_cfg009_s cn52xxp1;
struct cvmx_pciercx_cfg009_s cn56xx;
struct cvmx_pciercx_cfg009_s cn56xxp1;
+ struct cvmx_pciercx_cfg009_s cn63xx;
+ struct cvmx_pciercx_cfg009_s cn63xxp1;
};
union cvmx_pciercx_cfg010 {
@@ -358,6 +303,8 @@ union cvmx_pciercx_cfg010 {
struct cvmx_pciercx_cfg010_s cn52xxp1;
struct cvmx_pciercx_cfg010_s cn56xx;
struct cvmx_pciercx_cfg010_s cn56xxp1;
+ struct cvmx_pciercx_cfg010_s cn63xx;
+ struct cvmx_pciercx_cfg010_s cn63xxp1;
};
union cvmx_pciercx_cfg011 {
@@ -369,6 +316,8 @@ union cvmx_pciercx_cfg011 {
struct cvmx_pciercx_cfg011_s cn52xxp1;
struct cvmx_pciercx_cfg011_s cn56xx;
struct cvmx_pciercx_cfg011_s cn56xxp1;
+ struct cvmx_pciercx_cfg011_s cn63xx;
+ struct cvmx_pciercx_cfg011_s cn63xxp1;
};
union cvmx_pciercx_cfg012 {
@@ -381,6 +330,8 @@ union cvmx_pciercx_cfg012 {
struct cvmx_pciercx_cfg012_s cn52xxp1;
struct cvmx_pciercx_cfg012_s cn56xx;
struct cvmx_pciercx_cfg012_s cn56xxp1;
+ struct cvmx_pciercx_cfg012_s cn63xx;
+ struct cvmx_pciercx_cfg012_s cn63xxp1;
};
union cvmx_pciercx_cfg013 {
@@ -393,6 +344,8 @@ union cvmx_pciercx_cfg013 {
struct cvmx_pciercx_cfg013_s cn52xxp1;
struct cvmx_pciercx_cfg013_s cn56xx;
struct cvmx_pciercx_cfg013_s cn56xxp1;
+ struct cvmx_pciercx_cfg013_s cn63xx;
+ struct cvmx_pciercx_cfg013_s cn63xxp1;
};
union cvmx_pciercx_cfg014 {
@@ -404,6 +357,8 @@ union cvmx_pciercx_cfg014 {
struct cvmx_pciercx_cfg014_s cn52xxp1;
struct cvmx_pciercx_cfg014_s cn56xx;
struct cvmx_pciercx_cfg014_s cn56xxp1;
+ struct cvmx_pciercx_cfg014_s cn63xx;
+ struct cvmx_pciercx_cfg014_s cn63xxp1;
};
union cvmx_pciercx_cfg015 {
@@ -429,6 +384,8 @@ union cvmx_pciercx_cfg015 {
struct cvmx_pciercx_cfg015_s cn52xxp1;
struct cvmx_pciercx_cfg015_s cn56xx;
struct cvmx_pciercx_cfg015_s cn56xxp1;
+ struct cvmx_pciercx_cfg015_s cn63xx;
+ struct cvmx_pciercx_cfg015_s cn63xxp1;
};
union cvmx_pciercx_cfg016 {
@@ -449,6 +406,8 @@ union cvmx_pciercx_cfg016 {
struct cvmx_pciercx_cfg016_s cn52xxp1;
struct cvmx_pciercx_cfg016_s cn56xx;
struct cvmx_pciercx_cfg016_s cn56xxp1;
+ struct cvmx_pciercx_cfg016_s cn63xx;
+ struct cvmx_pciercx_cfg016_s cn63xxp1;
};
union cvmx_pciercx_cfg017 {
@@ -471,6 +430,8 @@ union cvmx_pciercx_cfg017 {
struct cvmx_pciercx_cfg017_s cn52xxp1;
struct cvmx_pciercx_cfg017_s cn56xx;
struct cvmx_pciercx_cfg017_s cn56xxp1;
+ struct cvmx_pciercx_cfg017_s cn63xx;
+ struct cvmx_pciercx_cfg017_s cn63xxp1;
};
union cvmx_pciercx_cfg020 {
@@ -488,6 +449,8 @@ union cvmx_pciercx_cfg020 {
struct cvmx_pciercx_cfg020_s cn52xxp1;
struct cvmx_pciercx_cfg020_s cn56xx;
struct cvmx_pciercx_cfg020_s cn56xxp1;
+ struct cvmx_pciercx_cfg020_s cn63xx;
+ struct cvmx_pciercx_cfg020_s cn63xxp1;
};
union cvmx_pciercx_cfg021 {
@@ -500,6 +463,8 @@ union cvmx_pciercx_cfg021 {
struct cvmx_pciercx_cfg021_s cn52xxp1;
struct cvmx_pciercx_cfg021_s cn56xx;
struct cvmx_pciercx_cfg021_s cn56xxp1;
+ struct cvmx_pciercx_cfg021_s cn63xx;
+ struct cvmx_pciercx_cfg021_s cn63xxp1;
};
union cvmx_pciercx_cfg022 {
@@ -511,6 +476,8 @@ union cvmx_pciercx_cfg022 {
struct cvmx_pciercx_cfg022_s cn52xxp1;
struct cvmx_pciercx_cfg022_s cn56xx;
struct cvmx_pciercx_cfg022_s cn56xxp1;
+ struct cvmx_pciercx_cfg022_s cn63xx;
+ struct cvmx_pciercx_cfg022_s cn63xxp1;
};
union cvmx_pciercx_cfg023 {
@@ -523,6 +490,8 @@ union cvmx_pciercx_cfg023 {
struct cvmx_pciercx_cfg023_s cn52xxp1;
struct cvmx_pciercx_cfg023_s cn56xx;
struct cvmx_pciercx_cfg023_s cn56xxp1;
+ struct cvmx_pciercx_cfg023_s cn63xx;
+ struct cvmx_pciercx_cfg023_s cn63xxp1;
};
union cvmx_pciercx_cfg028 {
@@ -540,6 +509,8 @@ union cvmx_pciercx_cfg028 {
struct cvmx_pciercx_cfg028_s cn52xxp1;
struct cvmx_pciercx_cfg028_s cn56xx;
struct cvmx_pciercx_cfg028_s cn56xxp1;
+ struct cvmx_pciercx_cfg028_s cn63xx;
+ struct cvmx_pciercx_cfg028_s cn63xxp1;
};
union cvmx_pciercx_cfg029 {
@@ -561,6 +532,8 @@ union cvmx_pciercx_cfg029 {
struct cvmx_pciercx_cfg029_s cn52xxp1;
struct cvmx_pciercx_cfg029_s cn56xx;
struct cvmx_pciercx_cfg029_s cn56xxp1;
+ struct cvmx_pciercx_cfg029_s cn63xx;
+ struct cvmx_pciercx_cfg029_s cn63xxp1;
};
union cvmx_pciercx_cfg030 {
@@ -590,6 +563,8 @@ union cvmx_pciercx_cfg030 {
struct cvmx_pciercx_cfg030_s cn52xxp1;
struct cvmx_pciercx_cfg030_s cn56xx;
struct cvmx_pciercx_cfg030_s cn56xxp1;
+ struct cvmx_pciercx_cfg030_s cn63xx;
+ struct cvmx_pciercx_cfg030_s cn63xxp1;
};
union cvmx_pciercx_cfg031 {
@@ -611,6 +586,8 @@ union cvmx_pciercx_cfg031 {
struct cvmx_pciercx_cfg031_s cn52xxp1;
struct cvmx_pciercx_cfg031_s cn56xx;
struct cvmx_pciercx_cfg031_s cn56xxp1;
+ struct cvmx_pciercx_cfg031_s cn63xx;
+ struct cvmx_pciercx_cfg031_s cn63xxp1;
};
union cvmx_pciercx_cfg032 {
@@ -641,6 +618,8 @@ union cvmx_pciercx_cfg032 {
struct cvmx_pciercx_cfg032_s cn52xxp1;
struct cvmx_pciercx_cfg032_s cn56xx;
struct cvmx_pciercx_cfg032_s cn56xxp1;
+ struct cvmx_pciercx_cfg032_s cn63xx;
+ struct cvmx_pciercx_cfg032_s cn63xxp1;
};
union cvmx_pciercx_cfg033 {
@@ -663,6 +642,8 @@ union cvmx_pciercx_cfg033 {
struct cvmx_pciercx_cfg033_s cn52xxp1;
struct cvmx_pciercx_cfg033_s cn56xx;
struct cvmx_pciercx_cfg033_s cn56xxp1;
+ struct cvmx_pciercx_cfg033_s cn63xx;
+ struct cvmx_pciercx_cfg033_s cn63xxp1;
};
union cvmx_pciercx_cfg034 {
@@ -695,6 +676,8 @@ union cvmx_pciercx_cfg034 {
struct cvmx_pciercx_cfg034_s cn52xxp1;
struct cvmx_pciercx_cfg034_s cn56xx;
struct cvmx_pciercx_cfg034_s cn56xxp1;
+ struct cvmx_pciercx_cfg034_s cn63xx;
+ struct cvmx_pciercx_cfg034_s cn63xxp1;
};
union cvmx_pciercx_cfg035 {
@@ -713,6 +696,8 @@ union cvmx_pciercx_cfg035 {
struct cvmx_pciercx_cfg035_s cn52xxp1;
struct cvmx_pciercx_cfg035_s cn56xx;
struct cvmx_pciercx_cfg035_s cn56xxp1;
+ struct cvmx_pciercx_cfg035_s cn63xx;
+ struct cvmx_pciercx_cfg035_s cn63xxp1;
};
union cvmx_pciercx_cfg036 {
@@ -727,6 +712,8 @@ union cvmx_pciercx_cfg036 {
struct cvmx_pciercx_cfg036_s cn52xxp1;
struct cvmx_pciercx_cfg036_s cn56xx;
struct cvmx_pciercx_cfg036_s cn56xxp1;
+ struct cvmx_pciercx_cfg036_s cn63xx;
+ struct cvmx_pciercx_cfg036_s cn63xxp1;
};
union cvmx_pciercx_cfg037 {
@@ -740,6 +727,8 @@ union cvmx_pciercx_cfg037 {
struct cvmx_pciercx_cfg037_s cn52xxp1;
struct cvmx_pciercx_cfg037_s cn56xx;
struct cvmx_pciercx_cfg037_s cn56xxp1;
+ struct cvmx_pciercx_cfg037_s cn63xx;
+ struct cvmx_pciercx_cfg037_s cn63xxp1;
};
union cvmx_pciercx_cfg038 {
@@ -753,28 +742,51 @@ union cvmx_pciercx_cfg038 {
struct cvmx_pciercx_cfg038_s cn52xxp1;
struct cvmx_pciercx_cfg038_s cn56xx;
struct cvmx_pciercx_cfg038_s cn56xxp1;
+ struct cvmx_pciercx_cfg038_s cn63xx;
+ struct cvmx_pciercx_cfg038_s cn63xxp1;
};
union cvmx_pciercx_cfg039 {
uint32_t u32;
struct cvmx_pciercx_cfg039_s {
- uint32_t reserved_0_31:32;
+ uint32_t reserved_9_31:23;
+ uint32_t cls:1;
+ uint32_t slsv:7;
+ uint32_t reserved_0_0:1;
} s;
- struct cvmx_pciercx_cfg039_s cn52xx;
- struct cvmx_pciercx_cfg039_s cn52xxp1;
- struct cvmx_pciercx_cfg039_s cn56xx;
- struct cvmx_pciercx_cfg039_s cn56xxp1;
+ struct cvmx_pciercx_cfg039_cn52xx {
+ uint32_t reserved_0_31:32;
+ } cn52xx;
+ struct cvmx_pciercx_cfg039_cn52xx cn52xxp1;
+ struct cvmx_pciercx_cfg039_cn52xx cn56xx;
+ struct cvmx_pciercx_cfg039_cn52xx cn56xxp1;
+ struct cvmx_pciercx_cfg039_s cn63xx;
+ struct cvmx_pciercx_cfg039_cn52xx cn63xxp1;
};
union cvmx_pciercx_cfg040 {
uint32_t u32;
struct cvmx_pciercx_cfg040_s {
+ uint32_t reserved_17_31:15;
+ uint32_t cdl:1;
+ uint32_t reserved_13_15:3;
+ uint32_t cde:1;
+ uint32_t csos:1;
+ uint32_t emc:1;
+ uint32_t tm:3;
+ uint32_t sde:1;
+ uint32_t hasd:1;
+ uint32_t ec:1;
+ uint32_t tls:4;
+ } s;
+ struct cvmx_pciercx_cfg040_cn52xx {
uint32_t reserved_0_31:32;
- } s;
- struct cvmx_pciercx_cfg040_s cn52xx;
- struct cvmx_pciercx_cfg040_s cn52xxp1;
- struct cvmx_pciercx_cfg040_s cn56xx;
- struct cvmx_pciercx_cfg040_s cn56xxp1;
+ } cn52xx;
+ struct cvmx_pciercx_cfg040_cn52xx cn52xxp1;
+ struct cvmx_pciercx_cfg040_cn52xx cn56xx;
+ struct cvmx_pciercx_cfg040_cn52xx cn56xxp1;
+ struct cvmx_pciercx_cfg040_s cn63xx;
+ struct cvmx_pciercx_cfg040_s cn63xxp1;
};
union cvmx_pciercx_cfg041 {
@@ -786,6 +798,8 @@ union cvmx_pciercx_cfg041 {
struct cvmx_pciercx_cfg041_s cn52xxp1;
struct cvmx_pciercx_cfg041_s cn56xx;
struct cvmx_pciercx_cfg041_s cn56xxp1;
+ struct cvmx_pciercx_cfg041_s cn63xx;
+ struct cvmx_pciercx_cfg041_s cn63xxp1;
};
union cvmx_pciercx_cfg042 {
@@ -797,6 +811,8 @@ union cvmx_pciercx_cfg042 {
struct cvmx_pciercx_cfg042_s cn52xxp1;
struct cvmx_pciercx_cfg042_s cn56xx;
struct cvmx_pciercx_cfg042_s cn56xxp1;
+ struct cvmx_pciercx_cfg042_s cn63xx;
+ struct cvmx_pciercx_cfg042_s cn63xxp1;
};
union cvmx_pciercx_cfg064 {
@@ -810,6 +826,8 @@ union cvmx_pciercx_cfg064 {
struct cvmx_pciercx_cfg064_s cn52xxp1;
struct cvmx_pciercx_cfg064_s cn56xx;
struct cvmx_pciercx_cfg064_s cn56xxp1;
+ struct cvmx_pciercx_cfg064_s cn63xx;
+ struct cvmx_pciercx_cfg064_s cn63xxp1;
};
union cvmx_pciercx_cfg065 {
@@ -834,6 +852,8 @@ union cvmx_pciercx_cfg065 {
struct cvmx_pciercx_cfg065_s cn52xxp1;
struct cvmx_pciercx_cfg065_s cn56xx;
struct cvmx_pciercx_cfg065_s cn56xxp1;
+ struct cvmx_pciercx_cfg065_s cn63xx;
+ struct cvmx_pciercx_cfg065_s cn63xxp1;
};
union cvmx_pciercx_cfg066 {
@@ -858,6 +878,8 @@ union cvmx_pciercx_cfg066 {
struct cvmx_pciercx_cfg066_s cn52xxp1;
struct cvmx_pciercx_cfg066_s cn56xx;
struct cvmx_pciercx_cfg066_s cn56xxp1;
+ struct cvmx_pciercx_cfg066_s cn63xx;
+ struct cvmx_pciercx_cfg066_s cn63xxp1;
};
union cvmx_pciercx_cfg067 {
@@ -882,6 +904,8 @@ union cvmx_pciercx_cfg067 {
struct cvmx_pciercx_cfg067_s cn52xxp1;
struct cvmx_pciercx_cfg067_s cn56xx;
struct cvmx_pciercx_cfg067_s cn56xxp1;
+ struct cvmx_pciercx_cfg067_s cn63xx;
+ struct cvmx_pciercx_cfg067_s cn63xxp1;
};
union cvmx_pciercx_cfg068 {
@@ -901,6 +925,8 @@ union cvmx_pciercx_cfg068 {
struct cvmx_pciercx_cfg068_s cn52xxp1;
struct cvmx_pciercx_cfg068_s cn56xx;
struct cvmx_pciercx_cfg068_s cn56xxp1;
+ struct cvmx_pciercx_cfg068_s cn63xx;
+ struct cvmx_pciercx_cfg068_s cn63xxp1;
};
union cvmx_pciercx_cfg069 {
@@ -920,6 +946,8 @@ union cvmx_pciercx_cfg069 {
struct cvmx_pciercx_cfg069_s cn52xxp1;
struct cvmx_pciercx_cfg069_s cn56xx;
struct cvmx_pciercx_cfg069_s cn56xxp1;
+ struct cvmx_pciercx_cfg069_s cn63xx;
+ struct cvmx_pciercx_cfg069_s cn63xxp1;
};
union cvmx_pciercx_cfg070 {
@@ -936,6 +964,8 @@ union cvmx_pciercx_cfg070 {
struct cvmx_pciercx_cfg070_s cn52xxp1;
struct cvmx_pciercx_cfg070_s cn56xx;
struct cvmx_pciercx_cfg070_s cn56xxp1;
+ struct cvmx_pciercx_cfg070_s cn63xx;
+ struct cvmx_pciercx_cfg070_s cn63xxp1;
};
union cvmx_pciercx_cfg071 {
@@ -947,6 +977,8 @@ union cvmx_pciercx_cfg071 {
struct cvmx_pciercx_cfg071_s cn52xxp1;
struct cvmx_pciercx_cfg071_s cn56xx;
struct cvmx_pciercx_cfg071_s cn56xxp1;
+ struct cvmx_pciercx_cfg071_s cn63xx;
+ struct cvmx_pciercx_cfg071_s cn63xxp1;
};
union cvmx_pciercx_cfg072 {
@@ -958,6 +990,8 @@ union cvmx_pciercx_cfg072 {
struct cvmx_pciercx_cfg072_s cn52xxp1;
struct cvmx_pciercx_cfg072_s cn56xx;
struct cvmx_pciercx_cfg072_s cn56xxp1;
+ struct cvmx_pciercx_cfg072_s cn63xx;
+ struct cvmx_pciercx_cfg072_s cn63xxp1;
};
union cvmx_pciercx_cfg073 {
@@ -969,6 +1003,8 @@ union cvmx_pciercx_cfg073 {
struct cvmx_pciercx_cfg073_s cn52xxp1;
struct cvmx_pciercx_cfg073_s cn56xx;
struct cvmx_pciercx_cfg073_s cn56xxp1;
+ struct cvmx_pciercx_cfg073_s cn63xx;
+ struct cvmx_pciercx_cfg073_s cn63xxp1;
};
union cvmx_pciercx_cfg074 {
@@ -980,6 +1016,8 @@ union cvmx_pciercx_cfg074 {
struct cvmx_pciercx_cfg074_s cn52xxp1;
struct cvmx_pciercx_cfg074_s cn56xx;
struct cvmx_pciercx_cfg074_s cn56xxp1;
+ struct cvmx_pciercx_cfg074_s cn63xx;
+ struct cvmx_pciercx_cfg074_s cn63xxp1;
};
union cvmx_pciercx_cfg075 {
@@ -994,6 +1032,8 @@ union cvmx_pciercx_cfg075 {
struct cvmx_pciercx_cfg075_s cn52xxp1;
struct cvmx_pciercx_cfg075_s cn56xx;
struct cvmx_pciercx_cfg075_s cn56xxp1;
+ struct cvmx_pciercx_cfg075_s cn63xx;
+ struct cvmx_pciercx_cfg075_s cn63xxp1;
};
union cvmx_pciercx_cfg076 {
@@ -1013,6 +1053,8 @@ union cvmx_pciercx_cfg076 {
struct cvmx_pciercx_cfg076_s cn52xxp1;
struct cvmx_pciercx_cfg076_s cn56xx;
struct cvmx_pciercx_cfg076_s cn56xxp1;
+ struct cvmx_pciercx_cfg076_s cn63xx;
+ struct cvmx_pciercx_cfg076_s cn63xxp1;
};
union cvmx_pciercx_cfg077 {
@@ -1025,6 +1067,8 @@ union cvmx_pciercx_cfg077 {
struct cvmx_pciercx_cfg077_s cn52xxp1;
struct cvmx_pciercx_cfg077_s cn56xx;
struct cvmx_pciercx_cfg077_s cn56xxp1;
+ struct cvmx_pciercx_cfg077_s cn63xx;
+ struct cvmx_pciercx_cfg077_s cn63xxp1;
};
union cvmx_pciercx_cfg448 {
@@ -1037,6 +1081,8 @@ union cvmx_pciercx_cfg448 {
struct cvmx_pciercx_cfg448_s cn52xxp1;
struct cvmx_pciercx_cfg448_s cn56xx;
struct cvmx_pciercx_cfg448_s cn56xxp1;
+ struct cvmx_pciercx_cfg448_s cn63xx;
+ struct cvmx_pciercx_cfg448_s cn63xxp1;
};
union cvmx_pciercx_cfg449 {
@@ -1048,6 +1094,8 @@ union cvmx_pciercx_cfg449 {
struct cvmx_pciercx_cfg449_s cn52xxp1;
struct cvmx_pciercx_cfg449_s cn56xx;
struct cvmx_pciercx_cfg449_s cn56xxp1;
+ struct cvmx_pciercx_cfg449_s cn63xx;
+ struct cvmx_pciercx_cfg449_s cn63xxp1;
};
union cvmx_pciercx_cfg450 {
@@ -1064,6 +1112,8 @@ union cvmx_pciercx_cfg450 {
struct cvmx_pciercx_cfg450_s cn52xxp1;
struct cvmx_pciercx_cfg450_s cn56xx;
struct cvmx_pciercx_cfg450_s cn56xxp1;
+ struct cvmx_pciercx_cfg450_s cn63xx;
+ struct cvmx_pciercx_cfg450_s cn63xxp1;
};
union cvmx_pciercx_cfg451 {
@@ -1080,6 +1130,8 @@ union cvmx_pciercx_cfg451 {
struct cvmx_pciercx_cfg451_s cn52xxp1;
struct cvmx_pciercx_cfg451_s cn56xx;
struct cvmx_pciercx_cfg451_s cn56xxp1;
+ struct cvmx_pciercx_cfg451_s cn63xx;
+ struct cvmx_pciercx_cfg451_s cn63xxp1;
};
union cvmx_pciercx_cfg452 {
@@ -1103,6 +1155,8 @@ union cvmx_pciercx_cfg452 {
struct cvmx_pciercx_cfg452_s cn52xxp1;
struct cvmx_pciercx_cfg452_s cn56xx;
struct cvmx_pciercx_cfg452_s cn56xxp1;
+ struct cvmx_pciercx_cfg452_s cn63xx;
+ struct cvmx_pciercx_cfg452_s cn63xxp1;
};
union cvmx_pciercx_cfg453 {
@@ -1118,6 +1172,8 @@ union cvmx_pciercx_cfg453 {
struct cvmx_pciercx_cfg453_s cn52xxp1;
struct cvmx_pciercx_cfg453_s cn56xx;
struct cvmx_pciercx_cfg453_s cn56xxp1;
+ struct cvmx_pciercx_cfg453_s cn63xx;
+ struct cvmx_pciercx_cfg453_s cn63xxp1;
};
union cvmx_pciercx_cfg454 {
@@ -1136,6 +1192,8 @@ union cvmx_pciercx_cfg454 {
struct cvmx_pciercx_cfg454_s cn52xxp1;
struct cvmx_pciercx_cfg454_s cn56xx;
struct cvmx_pciercx_cfg454_s cn56xxp1;
+ struct cvmx_pciercx_cfg454_s cn63xx;
+ struct cvmx_pciercx_cfg454_s cn63xxp1;
};
union cvmx_pciercx_cfg455 {
@@ -1165,6 +1223,8 @@ union cvmx_pciercx_cfg455 {
struct cvmx_pciercx_cfg455_s cn52xxp1;
struct cvmx_pciercx_cfg455_s cn56xx;
struct cvmx_pciercx_cfg455_s cn56xxp1;
+ struct cvmx_pciercx_cfg455_s cn63xx;
+ struct cvmx_pciercx_cfg455_s cn63xxp1;
};
union cvmx_pciercx_cfg456 {
@@ -1178,6 +1238,8 @@ union cvmx_pciercx_cfg456 {
struct cvmx_pciercx_cfg456_s cn52xxp1;
struct cvmx_pciercx_cfg456_s cn56xx;
struct cvmx_pciercx_cfg456_s cn56xxp1;
+ struct cvmx_pciercx_cfg456_s cn63xx;
+ struct cvmx_pciercx_cfg456_s cn63xxp1;
};
union cvmx_pciercx_cfg458 {
@@ -1189,6 +1251,8 @@ union cvmx_pciercx_cfg458 {
struct cvmx_pciercx_cfg458_s cn52xxp1;
struct cvmx_pciercx_cfg458_s cn56xx;
struct cvmx_pciercx_cfg458_s cn56xxp1;
+ struct cvmx_pciercx_cfg458_s cn63xx;
+ struct cvmx_pciercx_cfg458_s cn63xxp1;
};
union cvmx_pciercx_cfg459 {
@@ -1200,6 +1264,8 @@ union cvmx_pciercx_cfg459 {
struct cvmx_pciercx_cfg459_s cn52xxp1;
struct cvmx_pciercx_cfg459_s cn56xx;
struct cvmx_pciercx_cfg459_s cn56xxp1;
+ struct cvmx_pciercx_cfg459_s cn63xx;
+ struct cvmx_pciercx_cfg459_s cn63xxp1;
};
union cvmx_pciercx_cfg460 {
@@ -1213,6 +1279,8 @@ union cvmx_pciercx_cfg460 {
struct cvmx_pciercx_cfg460_s cn52xxp1;
struct cvmx_pciercx_cfg460_s cn56xx;
struct cvmx_pciercx_cfg460_s cn56xxp1;
+ struct cvmx_pciercx_cfg460_s cn63xx;
+ struct cvmx_pciercx_cfg460_s cn63xxp1;
};
union cvmx_pciercx_cfg461 {
@@ -1226,6 +1294,8 @@ union cvmx_pciercx_cfg461 {
struct cvmx_pciercx_cfg461_s cn52xxp1;
struct cvmx_pciercx_cfg461_s cn56xx;
struct cvmx_pciercx_cfg461_s cn56xxp1;
+ struct cvmx_pciercx_cfg461_s cn63xx;
+ struct cvmx_pciercx_cfg461_s cn63xxp1;
};
union cvmx_pciercx_cfg462 {
@@ -1239,6 +1309,8 @@ union cvmx_pciercx_cfg462 {
struct cvmx_pciercx_cfg462_s cn52xxp1;
struct cvmx_pciercx_cfg462_s cn56xx;
struct cvmx_pciercx_cfg462_s cn56xxp1;
+ struct cvmx_pciercx_cfg462_s cn63xx;
+ struct cvmx_pciercx_cfg462_s cn63xxp1;
};
union cvmx_pciercx_cfg463 {
@@ -1253,6 +1325,8 @@ union cvmx_pciercx_cfg463 {
struct cvmx_pciercx_cfg463_s cn52xxp1;
struct cvmx_pciercx_cfg463_s cn56xx;
struct cvmx_pciercx_cfg463_s cn56xxp1;
+ struct cvmx_pciercx_cfg463_s cn63xx;
+ struct cvmx_pciercx_cfg463_s cn63xxp1;
};
union cvmx_pciercx_cfg464 {
@@ -1267,6 +1341,8 @@ union cvmx_pciercx_cfg464 {
struct cvmx_pciercx_cfg464_s cn52xxp1;
struct cvmx_pciercx_cfg464_s cn56xx;
struct cvmx_pciercx_cfg464_s cn56xxp1;
+ struct cvmx_pciercx_cfg464_s cn63xx;
+ struct cvmx_pciercx_cfg464_s cn63xxp1;
};
union cvmx_pciercx_cfg465 {
@@ -1281,6 +1357,8 @@ union cvmx_pciercx_cfg465 {
struct cvmx_pciercx_cfg465_s cn52xxp1;
struct cvmx_pciercx_cfg465_s cn56xx;
struct cvmx_pciercx_cfg465_s cn56xxp1;
+ struct cvmx_pciercx_cfg465_s cn63xx;
+ struct cvmx_pciercx_cfg465_s cn63xxp1;
};
union cvmx_pciercx_cfg466 {
@@ -1298,6 +1376,8 @@ union cvmx_pciercx_cfg466 {
struct cvmx_pciercx_cfg466_s cn52xxp1;
struct cvmx_pciercx_cfg466_s cn56xx;
struct cvmx_pciercx_cfg466_s cn56xxp1;
+ struct cvmx_pciercx_cfg466_s cn63xx;
+ struct cvmx_pciercx_cfg466_s cn63xxp1;
};
union cvmx_pciercx_cfg467 {
@@ -1313,6 +1393,8 @@ union cvmx_pciercx_cfg467 {
struct cvmx_pciercx_cfg467_s cn52xxp1;
struct cvmx_pciercx_cfg467_s cn56xx;
struct cvmx_pciercx_cfg467_s cn56xxp1;
+ struct cvmx_pciercx_cfg467_s cn63xx;
+ struct cvmx_pciercx_cfg467_s cn63xxp1;
};
union cvmx_pciercx_cfg468 {
@@ -1328,6 +1410,8 @@ union cvmx_pciercx_cfg468 {
struct cvmx_pciercx_cfg468_s cn52xxp1;
struct cvmx_pciercx_cfg468_s cn56xx;
struct cvmx_pciercx_cfg468_s cn56xxp1;
+ struct cvmx_pciercx_cfg468_s cn63xx;
+ struct cvmx_pciercx_cfg468_s cn63xxp1;
};
union cvmx_pciercx_cfg490 {
@@ -1342,6 +1426,8 @@ union cvmx_pciercx_cfg490 {
struct cvmx_pciercx_cfg490_s cn52xxp1;
struct cvmx_pciercx_cfg490_s cn56xx;
struct cvmx_pciercx_cfg490_s cn56xxp1;
+ struct cvmx_pciercx_cfg490_s cn63xx;
+ struct cvmx_pciercx_cfg490_s cn63xxp1;
};
union cvmx_pciercx_cfg491 {
@@ -1356,6 +1442,8 @@ union cvmx_pciercx_cfg491 {
struct cvmx_pciercx_cfg491_s cn52xxp1;
struct cvmx_pciercx_cfg491_s cn56xx;
struct cvmx_pciercx_cfg491_s cn56xxp1;
+ struct cvmx_pciercx_cfg491_s cn63xx;
+ struct cvmx_pciercx_cfg491_s cn63xxp1;
};
union cvmx_pciercx_cfg492 {
@@ -1370,6 +1458,23 @@ union cvmx_pciercx_cfg492 {
struct cvmx_pciercx_cfg492_s cn52xxp1;
struct cvmx_pciercx_cfg492_s cn56xx;
struct cvmx_pciercx_cfg492_s cn56xxp1;
+ struct cvmx_pciercx_cfg492_s cn63xx;
+ struct cvmx_pciercx_cfg492_s cn63xxp1;
+};
+
+union cvmx_pciercx_cfg515 {
+ uint32_t u32;
+ struct cvmx_pciercx_cfg515_s {
+ uint32_t reserved_21_31:11;
+ uint32_t s_d_e:1;
+ uint32_t ctcrb:1;
+ uint32_t cpyts:1;
+ uint32_t dsc:1;
+ uint32_t le:9;
+ uint32_t n_fts:8;
+ } s;
+ struct cvmx_pciercx_cfg515_s cn63xx;
+ struct cvmx_pciercx_cfg515_s cn63xxp1;
};
union cvmx_pciercx_cfg516 {
@@ -1381,6 +1486,8 @@ union cvmx_pciercx_cfg516 {
struct cvmx_pciercx_cfg516_s cn52xxp1;
struct cvmx_pciercx_cfg516_s cn56xx;
struct cvmx_pciercx_cfg516_s cn56xxp1;
+ struct cvmx_pciercx_cfg516_s cn63xx;
+ struct cvmx_pciercx_cfg516_s cn63xxp1;
};
union cvmx_pciercx_cfg517 {
@@ -1392,6 +1499,8 @@ union cvmx_pciercx_cfg517 {
struct cvmx_pciercx_cfg517_s cn52xxp1;
struct cvmx_pciercx_cfg517_s cn56xx;
struct cvmx_pciercx_cfg517_s cn56xxp1;
+ struct cvmx_pciercx_cfg517_s cn63xx;
+ struct cvmx_pciercx_cfg517_s cn63xxp1;
};
#endif
diff --git a/arch/mips/include/asm/octeon/cvmx-pescx-defs.h b/arch/mips/include/asm/octeon/cvmx-pescx-defs.h
index f40cfaf84454..aef84851a94c 100644
--- a/arch/mips/include/asm/octeon/cvmx-pescx-defs.h
+++ b/arch/mips/include/asm/octeon/cvmx-pescx-defs.h
@@ -4,7 +4,7 @@
* Contact: support@caviumnetworks.com
* This file is part of the OCTEON SDK
*
- * Copyright (c) 2003-2008 Cavium Networks
+ * Copyright (c) 2003-2010 Cavium Networks
*
* This file is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License, Version 2, as
@@ -28,38 +28,22 @@
#ifndef __CVMX_PESCX_DEFS_H__
#define __CVMX_PESCX_DEFS_H__
-#define CVMX_PESCX_BIST_STATUS(block_id) \
- CVMX_ADD_IO_SEG(0x00011800C8000018ull + (((block_id) & 1) * 0x8000000ull))
-#define CVMX_PESCX_BIST_STATUS2(block_id) \
- CVMX_ADD_IO_SEG(0x00011800C8000418ull + (((block_id) & 1) * 0x8000000ull))
-#define CVMX_PESCX_CFG_RD(block_id) \
- CVMX_ADD_IO_SEG(0x00011800C8000030ull + (((block_id) & 1) * 0x8000000ull))
-#define CVMX_PESCX_CFG_WR(block_id) \
- CVMX_ADD_IO_SEG(0x00011800C8000028ull + (((block_id) & 1) * 0x8000000ull))
-#define CVMX_PESCX_CPL_LUT_VALID(block_id) \
- CVMX_ADD_IO_SEG(0x00011800C8000098ull + (((block_id) & 1) * 0x8000000ull))
-#define CVMX_PESCX_CTL_STATUS(block_id) \
- CVMX_ADD_IO_SEG(0x00011800C8000000ull + (((block_id) & 1) * 0x8000000ull))
-#define CVMX_PESCX_CTL_STATUS2(block_id) \
- CVMX_ADD_IO_SEG(0x00011800C8000400ull + (((block_id) & 1) * 0x8000000ull))
-#define CVMX_PESCX_DBG_INFO(block_id) \
- CVMX_ADD_IO_SEG(0x00011800C8000008ull + (((block_id) & 1) * 0x8000000ull))
-#define CVMX_PESCX_DBG_INFO_EN(block_id) \
- CVMX_ADD_IO_SEG(0x00011800C80000A0ull + (((block_id) & 1) * 0x8000000ull))
-#define CVMX_PESCX_DIAG_STATUS(block_id) \
- CVMX_ADD_IO_SEG(0x00011800C8000020ull + (((block_id) & 1) * 0x8000000ull))
-#define CVMX_PESCX_P2N_BAR0_START(block_id) \
- CVMX_ADD_IO_SEG(0x00011800C8000080ull + (((block_id) & 1) * 0x8000000ull))
-#define CVMX_PESCX_P2N_BAR1_START(block_id) \
- CVMX_ADD_IO_SEG(0x00011800C8000088ull + (((block_id) & 1) * 0x8000000ull))
-#define CVMX_PESCX_P2N_BAR2_START(block_id) \
- CVMX_ADD_IO_SEG(0x00011800C8000090ull + (((block_id) & 1) * 0x8000000ull))
-#define CVMX_PESCX_P2P_BARX_END(offset, block_id) \
- CVMX_ADD_IO_SEG(0x00011800C8000048ull + (((offset) & 3) * 16) + (((block_id) & 1) * 0x8000000ull))
-#define CVMX_PESCX_P2P_BARX_START(offset, block_id) \
- CVMX_ADD_IO_SEG(0x00011800C8000040ull + (((offset) & 3) * 16) + (((block_id) & 1) * 0x8000000ull))
-#define CVMX_PESCX_TLP_CREDITS(block_id) \
- CVMX_ADD_IO_SEG(0x00011800C8000038ull + (((block_id) & 1) * 0x8000000ull))
+#define CVMX_PESCX_BIST_STATUS(block_id) (CVMX_ADD_IO_SEG(0x00011800C8000018ull) + ((block_id) & 1) * 0x8000000ull)
+#define CVMX_PESCX_BIST_STATUS2(block_id) (CVMX_ADD_IO_SEG(0x00011800C8000418ull) + ((block_id) & 1) * 0x8000000ull)
+#define CVMX_PESCX_CFG_RD(block_id) (CVMX_ADD_IO_SEG(0x00011800C8000030ull) + ((block_id) & 1) * 0x8000000ull)
+#define CVMX_PESCX_CFG_WR(block_id) (CVMX_ADD_IO_SEG(0x00011800C8000028ull) + ((block_id) & 1) * 0x8000000ull)
+#define CVMX_PESCX_CPL_LUT_VALID(block_id) (CVMX_ADD_IO_SEG(0x00011800C8000098ull) + ((block_id) & 1) * 0x8000000ull)
+#define CVMX_PESCX_CTL_STATUS(block_id) (CVMX_ADD_IO_SEG(0x00011800C8000000ull) + ((block_id) & 1) * 0x8000000ull)
+#define CVMX_PESCX_CTL_STATUS2(block_id) (CVMX_ADD_IO_SEG(0x00011800C8000400ull) + ((block_id) & 1) * 0x8000000ull)
+#define CVMX_PESCX_DBG_INFO(block_id) (CVMX_ADD_IO_SEG(0x00011800C8000008ull) + ((block_id) & 1) * 0x8000000ull)
+#define CVMX_PESCX_DBG_INFO_EN(block_id) (CVMX_ADD_IO_SEG(0x00011800C80000A0ull) + ((block_id) & 1) * 0x8000000ull)
+#define CVMX_PESCX_DIAG_STATUS(block_id) (CVMX_ADD_IO_SEG(0x00011800C8000020ull) + ((block_id) & 1) * 0x8000000ull)
+#define CVMX_PESCX_P2N_BAR0_START(block_id) (CVMX_ADD_IO_SEG(0x00011800C8000080ull) + ((block_id) & 1) * 0x8000000ull)
+#define CVMX_PESCX_P2N_BAR1_START(block_id) (CVMX_ADD_IO_SEG(0x00011800C8000088ull) + ((block_id) & 1) * 0x8000000ull)
+#define CVMX_PESCX_P2N_BAR2_START(block_id) (CVMX_ADD_IO_SEG(0x00011800C8000090ull) + ((block_id) & 1) * 0x8000000ull)
+#define CVMX_PESCX_P2P_BARX_END(offset, block_id) (CVMX_ADD_IO_SEG(0x00011800C8000048ull) + (((offset) & 3) + ((block_id) & 1) * 0x800000ull) * 16)
+#define CVMX_PESCX_P2P_BARX_START(offset, block_id) (CVMX_ADD_IO_SEG(0x00011800C8000040ull) + (((offset) & 3) + ((block_id) & 1) * 0x800000ull) * 16)
+#define CVMX_PESCX_TLP_CREDITS(block_id) (CVMX_ADD_IO_SEG(0x00011800C8000038ull) + ((block_id) & 1) * 0x8000000ull)
union cvmx_pescx_bist_status {
uint64_t u64;
diff --git a/arch/mips/include/asm/octeon/cvmx-pexp-defs.h b/arch/mips/include/asm/octeon/cvmx-pexp-defs.h
index 5ea5dc571b54..5ab8679d89af 100644
--- a/arch/mips/include/asm/octeon/cvmx-pexp-defs.h
+++ b/arch/mips/include/asm/octeon/cvmx-pexp-defs.h
@@ -4,7 +4,7 @@
* Contact: support@caviumnetworks.com
* This file is part of the OCTEON SDK
*
- * Copyright (c) 2003-2008 Cavium Networks
+ * Copyright (c) 2003-2010 Cavium Networks
*
* This file is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License, Version 2, as
@@ -35,195 +35,191 @@
#ifndef __CVMX_PEXP_DEFS_H__
#define __CVMX_PEXP_DEFS_H__
-#define CVMX_PEXP_NPEI_BAR1_INDEXX(offset) \
- CVMX_ADD_IO_SEG(0x00011F0000008000ull + (((offset) & 31) * 16))
-#define CVMX_PEXP_NPEI_BIST_STATUS \
- CVMX_ADD_IO_SEG(0x00011F0000008580ull)
-#define CVMX_PEXP_NPEI_BIST_STATUS2 \
- CVMX_ADD_IO_SEG(0x00011F0000008680ull)
-#define CVMX_PEXP_NPEI_CTL_PORT0 \
- CVMX_ADD_IO_SEG(0x00011F0000008250ull)
-#define CVMX_PEXP_NPEI_CTL_PORT1 \
- CVMX_ADD_IO_SEG(0x00011F0000008260ull)
-#define CVMX_PEXP_NPEI_CTL_STATUS \
- CVMX_ADD_IO_SEG(0x00011F0000008570ull)
-#define CVMX_PEXP_NPEI_CTL_STATUS2 \
- CVMX_ADD_IO_SEG(0x00011F000000BC00ull)
-#define CVMX_PEXP_NPEI_DATA_OUT_CNT \
- CVMX_ADD_IO_SEG(0x00011F00000085F0ull)
-#define CVMX_PEXP_NPEI_DBG_DATA \
- CVMX_ADD_IO_SEG(0x00011F0000008510ull)
-#define CVMX_PEXP_NPEI_DBG_SELECT \
- CVMX_ADD_IO_SEG(0x00011F0000008500ull)
-#define CVMX_PEXP_NPEI_DMA0_INT_LEVEL \
- CVMX_ADD_IO_SEG(0x00011F00000085C0ull)
-#define CVMX_PEXP_NPEI_DMA1_INT_LEVEL \
- CVMX_ADD_IO_SEG(0x00011F00000085D0ull)
-#define CVMX_PEXP_NPEI_DMAX_COUNTS(offset) \
- CVMX_ADD_IO_SEG(0x00011F0000008450ull + (((offset) & 7) * 16))
-#define CVMX_PEXP_NPEI_DMAX_DBELL(offset) \
- CVMX_ADD_IO_SEG(0x00011F00000083B0ull + (((offset) & 7) * 16))
-#define CVMX_PEXP_NPEI_DMAX_IBUFF_SADDR(offset) \
- CVMX_ADD_IO_SEG(0x00011F0000008400ull + (((offset) & 7) * 16))
-#define CVMX_PEXP_NPEI_DMAX_NADDR(offset) \
- CVMX_ADD_IO_SEG(0x00011F00000084A0ull + (((offset) & 7) * 16))
-#define CVMX_PEXP_NPEI_DMA_CNTS \
- CVMX_ADD_IO_SEG(0x00011F00000085E0ull)
-#define CVMX_PEXP_NPEI_DMA_CONTROL \
- CVMX_ADD_IO_SEG(0x00011F00000083A0ull)
-#define CVMX_PEXP_NPEI_INT_A_ENB \
- CVMX_ADD_IO_SEG(0x00011F0000008560ull)
-#define CVMX_PEXP_NPEI_INT_A_ENB2 \
- CVMX_ADD_IO_SEG(0x00011F000000BCE0ull)
-#define CVMX_PEXP_NPEI_INT_A_SUM \
- CVMX_ADD_IO_SEG(0x00011F0000008550ull)
-#define CVMX_PEXP_NPEI_INT_ENB \
- CVMX_ADD_IO_SEG(0x00011F0000008540ull)
-#define CVMX_PEXP_NPEI_INT_ENB2 \
- CVMX_ADD_IO_SEG(0x00011F000000BCD0ull)
-#define CVMX_PEXP_NPEI_INT_INFO \
- CVMX_ADD_IO_SEG(0x00011F0000008590ull)
-#define CVMX_PEXP_NPEI_INT_SUM \
- CVMX_ADD_IO_SEG(0x00011F0000008530ull)
-#define CVMX_PEXP_NPEI_INT_SUM2 \
- CVMX_ADD_IO_SEG(0x00011F000000BCC0ull)
-#define CVMX_PEXP_NPEI_LAST_WIN_RDATA0 \
- CVMX_ADD_IO_SEG(0x00011F0000008600ull)
-#define CVMX_PEXP_NPEI_LAST_WIN_RDATA1 \
- CVMX_ADD_IO_SEG(0x00011F0000008610ull)
-#define CVMX_PEXP_NPEI_MEM_ACCESS_CTL \
- CVMX_ADD_IO_SEG(0x00011F00000084F0ull)
-#define CVMX_PEXP_NPEI_MEM_ACCESS_SUBIDX(offset) \
- CVMX_ADD_IO_SEG(0x00011F0000008280ull + (((offset) & 31) * 16) - 16 * 12)
-#define CVMX_PEXP_NPEI_MSI_ENB0 \
- CVMX_ADD_IO_SEG(0x00011F000000BC50ull)
-#define CVMX_PEXP_NPEI_MSI_ENB1 \
- CVMX_ADD_IO_SEG(0x00011F000000BC60ull)
-#define CVMX_PEXP_NPEI_MSI_ENB2 \
- CVMX_ADD_IO_SEG(0x00011F000000BC70ull)
-#define CVMX_PEXP_NPEI_MSI_ENB3 \
- CVMX_ADD_IO_SEG(0x00011F000000BC80ull)
-#define CVMX_PEXP_NPEI_MSI_RCV0 \
- CVMX_ADD_IO_SEG(0x00011F000000BC10ull)
-#define CVMX_PEXP_NPEI_MSI_RCV1 \
- CVMX_ADD_IO_SEG(0x00011F000000BC20ull)
-#define CVMX_PEXP_NPEI_MSI_RCV2 \
- CVMX_ADD_IO_SEG(0x00011F000000BC30ull)
-#define CVMX_PEXP_NPEI_MSI_RCV3 \
- CVMX_ADD_IO_SEG(0x00011F000000BC40ull)
-#define CVMX_PEXP_NPEI_MSI_RD_MAP \
- CVMX_ADD_IO_SEG(0x00011F000000BCA0ull)
-#define CVMX_PEXP_NPEI_MSI_W1C_ENB0 \
- CVMX_ADD_IO_SEG(0x00011F000000BCF0ull)
-#define CVMX_PEXP_NPEI_MSI_W1C_ENB1 \
- CVMX_ADD_IO_SEG(0x00011F000000BD00ull)
-#define CVMX_PEXP_NPEI_MSI_W1C_ENB2 \
- CVMX_ADD_IO_SEG(0x00011F000000BD10ull)
-#define CVMX_PEXP_NPEI_MSI_W1C_ENB3 \
- CVMX_ADD_IO_SEG(0x00011F000000BD20ull)
-#define CVMX_PEXP_NPEI_MSI_W1S_ENB0 \
- CVMX_ADD_IO_SEG(0x00011F000000BD30ull)
-#define CVMX_PEXP_NPEI_MSI_W1S_ENB1 \
- CVMX_ADD_IO_SEG(0x00011F000000BD40ull)
-#define CVMX_PEXP_NPEI_MSI_W1S_ENB2 \
- CVMX_ADD_IO_SEG(0x00011F000000BD50ull)
-#define CVMX_PEXP_NPEI_MSI_W1S_ENB3 \
- CVMX_ADD_IO_SEG(0x00011F000000BD60ull)
-#define CVMX_PEXP_NPEI_MSI_WR_MAP \
- CVMX_ADD_IO_SEG(0x00011F000000BC90ull)
-#define CVMX_PEXP_NPEI_PCIE_CREDIT_CNT \
- CVMX_ADD_IO_SEG(0x00011F000000BD70ull)
-#define CVMX_PEXP_NPEI_PCIE_MSI_RCV \
- CVMX_ADD_IO_SEG(0x00011F000000BCB0ull)
-#define CVMX_PEXP_NPEI_PCIE_MSI_RCV_B1 \
- CVMX_ADD_IO_SEG(0x00011F0000008650ull)
-#define CVMX_PEXP_NPEI_PCIE_MSI_RCV_B2 \
- CVMX_ADD_IO_SEG(0x00011F0000008660ull)
-#define CVMX_PEXP_NPEI_PCIE_MSI_RCV_B3 \
- CVMX_ADD_IO_SEG(0x00011F0000008670ull)
-#define CVMX_PEXP_NPEI_PKTX_CNTS(offset) \
- CVMX_ADD_IO_SEG(0x00011F000000A400ull + (((offset) & 31) * 16))
-#define CVMX_PEXP_NPEI_PKTX_INSTR_BADDR(offset) \
- CVMX_ADD_IO_SEG(0x00011F000000A800ull + (((offset) & 31) * 16))
-#define CVMX_PEXP_NPEI_PKTX_INSTR_BAOFF_DBELL(offset) \
- CVMX_ADD_IO_SEG(0x00011F000000AC00ull + (((offset) & 31) * 16))
-#define CVMX_PEXP_NPEI_PKTX_INSTR_FIFO_RSIZE(offset) \
- CVMX_ADD_IO_SEG(0x00011F000000B000ull + (((offset) & 31) * 16))
-#define CVMX_PEXP_NPEI_PKTX_INSTR_HEADER(offset) \
- CVMX_ADD_IO_SEG(0x00011F000000B400ull + (((offset) & 31) * 16))
-#define CVMX_PEXP_NPEI_PKTX_IN_BP(offset) \
- CVMX_ADD_IO_SEG(0x00011F000000B800ull + (((offset) & 31) * 16))
-#define CVMX_PEXP_NPEI_PKTX_SLIST_BADDR(offset) \
- CVMX_ADD_IO_SEG(0x00011F0000009400ull + (((offset) & 31) * 16))
-#define CVMX_PEXP_NPEI_PKTX_SLIST_BAOFF_DBELL(offset) \
- CVMX_ADD_IO_SEG(0x00011F0000009800ull + (((offset) & 31) * 16))
-#define CVMX_PEXP_NPEI_PKTX_SLIST_FIFO_RSIZE(offset) \
- CVMX_ADD_IO_SEG(0x00011F0000009C00ull + (((offset) & 31) * 16))
-#define CVMX_PEXP_NPEI_PKT_CNT_INT \
- CVMX_ADD_IO_SEG(0x00011F0000009110ull)
-#define CVMX_PEXP_NPEI_PKT_CNT_INT_ENB \
- CVMX_ADD_IO_SEG(0x00011F0000009130ull)
-#define CVMX_PEXP_NPEI_PKT_DATA_OUT_ES \
- CVMX_ADD_IO_SEG(0x00011F00000090B0ull)
-#define CVMX_PEXP_NPEI_PKT_DATA_OUT_NS \
- CVMX_ADD_IO_SEG(0x00011F00000090A0ull)
-#define CVMX_PEXP_NPEI_PKT_DATA_OUT_ROR \
- CVMX_ADD_IO_SEG(0x00011F0000009090ull)
-#define CVMX_PEXP_NPEI_PKT_DPADDR \
- CVMX_ADD_IO_SEG(0x00011F0000009080ull)
-#define CVMX_PEXP_NPEI_PKT_INPUT_CONTROL \
- CVMX_ADD_IO_SEG(0x00011F0000009150ull)
-#define CVMX_PEXP_NPEI_PKT_INSTR_ENB \
- CVMX_ADD_IO_SEG(0x00011F0000009000ull)
-#define CVMX_PEXP_NPEI_PKT_INSTR_RD_SIZE \
- CVMX_ADD_IO_SEG(0x00011F0000009190ull)
-#define CVMX_PEXP_NPEI_PKT_INSTR_SIZE \
- CVMX_ADD_IO_SEG(0x00011F0000009020ull)
-#define CVMX_PEXP_NPEI_PKT_INT_LEVELS \
- CVMX_ADD_IO_SEG(0x00011F0000009100ull)
-#define CVMX_PEXP_NPEI_PKT_IN_BP \
- CVMX_ADD_IO_SEG(0x00011F00000086B0ull)
-#define CVMX_PEXP_NPEI_PKT_IN_DONEX_CNTS(offset) \
- CVMX_ADD_IO_SEG(0x00011F000000A000ull + (((offset) & 31) * 16))
-#define CVMX_PEXP_NPEI_PKT_IN_INSTR_COUNTS \
- CVMX_ADD_IO_SEG(0x00011F00000086A0ull)
-#define CVMX_PEXP_NPEI_PKT_IN_PCIE_PORT \
- CVMX_ADD_IO_SEG(0x00011F00000091A0ull)
-#define CVMX_PEXP_NPEI_PKT_IPTR \
- CVMX_ADD_IO_SEG(0x00011F0000009070ull)
-#define CVMX_PEXP_NPEI_PKT_OUTPUT_WMARK \
- CVMX_ADD_IO_SEG(0x00011F0000009160ull)
-#define CVMX_PEXP_NPEI_PKT_OUT_BMODE \
- CVMX_ADD_IO_SEG(0x00011F00000090D0ull)
-#define CVMX_PEXP_NPEI_PKT_OUT_ENB \
- CVMX_ADD_IO_SEG(0x00011F0000009010ull)
-#define CVMX_PEXP_NPEI_PKT_PCIE_PORT \
- CVMX_ADD_IO_SEG(0x00011F00000090E0ull)
-#define CVMX_PEXP_NPEI_PKT_PORT_IN_RST \
- CVMX_ADD_IO_SEG(0x00011F0000008690ull)
-#define CVMX_PEXP_NPEI_PKT_SLIST_ES \
- CVMX_ADD_IO_SEG(0x00011F0000009050ull)
-#define CVMX_PEXP_NPEI_PKT_SLIST_ID_SIZE \
- CVMX_ADD_IO_SEG(0x00011F0000009180ull)
-#define CVMX_PEXP_NPEI_PKT_SLIST_NS \
- CVMX_ADD_IO_SEG(0x00011F0000009040ull)
-#define CVMX_PEXP_NPEI_PKT_SLIST_ROR \
- CVMX_ADD_IO_SEG(0x00011F0000009030ull)
-#define CVMX_PEXP_NPEI_PKT_TIME_INT \
- CVMX_ADD_IO_SEG(0x00011F0000009120ull)
-#define CVMX_PEXP_NPEI_PKT_TIME_INT_ENB \
- CVMX_ADD_IO_SEG(0x00011F0000009140ull)
-#define CVMX_PEXP_NPEI_RSL_INT_BLOCKS \
- CVMX_ADD_IO_SEG(0x00011F0000008520ull)
-#define CVMX_PEXP_NPEI_SCRATCH_1 \
- CVMX_ADD_IO_SEG(0x00011F0000008270ull)
-#define CVMX_PEXP_NPEI_STATE1 \
- CVMX_ADD_IO_SEG(0x00011F0000008620ull)
-#define CVMX_PEXP_NPEI_STATE2 \
- CVMX_ADD_IO_SEG(0x00011F0000008630ull)
-#define CVMX_PEXP_NPEI_STATE3 \
- CVMX_ADD_IO_SEG(0x00011F0000008640ull)
-#define CVMX_PEXP_NPEI_WINDOW_CTL \
- CVMX_ADD_IO_SEG(0x00011F0000008380ull)
+#define CVMX_PEXP_NPEI_BAR1_INDEXX(offset) (CVMX_ADD_IO_SEG(0x00011F0000008000ull) + ((offset) & 31) * 16)
+#define CVMX_PEXP_NPEI_BIST_STATUS (CVMX_ADD_IO_SEG(0x00011F0000008580ull))
+#define CVMX_PEXP_NPEI_BIST_STATUS2 (CVMX_ADD_IO_SEG(0x00011F0000008680ull))
+#define CVMX_PEXP_NPEI_CTL_PORT0 (CVMX_ADD_IO_SEG(0x00011F0000008250ull))
+#define CVMX_PEXP_NPEI_CTL_PORT1 (CVMX_ADD_IO_SEG(0x00011F0000008260ull))
+#define CVMX_PEXP_NPEI_CTL_STATUS (CVMX_ADD_IO_SEG(0x00011F0000008570ull))
+#define CVMX_PEXP_NPEI_CTL_STATUS2 (CVMX_ADD_IO_SEG(0x00011F000000BC00ull))
+#define CVMX_PEXP_NPEI_DATA_OUT_CNT (CVMX_ADD_IO_SEG(0x00011F00000085F0ull))
+#define CVMX_PEXP_NPEI_DBG_DATA (CVMX_ADD_IO_SEG(0x00011F0000008510ull))
+#define CVMX_PEXP_NPEI_DBG_SELECT (CVMX_ADD_IO_SEG(0x00011F0000008500ull))
+#define CVMX_PEXP_NPEI_DMA0_INT_LEVEL (CVMX_ADD_IO_SEG(0x00011F00000085C0ull))
+#define CVMX_PEXP_NPEI_DMA1_INT_LEVEL (CVMX_ADD_IO_SEG(0x00011F00000085D0ull))
+#define CVMX_PEXP_NPEI_DMAX_COUNTS(offset) (CVMX_ADD_IO_SEG(0x00011F0000008450ull) + ((offset) & 7) * 16)
+#define CVMX_PEXP_NPEI_DMAX_DBELL(offset) (CVMX_ADD_IO_SEG(0x00011F00000083B0ull) + ((offset) & 7) * 16)
+#define CVMX_PEXP_NPEI_DMAX_IBUFF_SADDR(offset) (CVMX_ADD_IO_SEG(0x00011F0000008400ull) + ((offset) & 7) * 16)
+#define CVMX_PEXP_NPEI_DMAX_NADDR(offset) (CVMX_ADD_IO_SEG(0x00011F00000084A0ull) + ((offset) & 7) * 16)
+#define CVMX_PEXP_NPEI_DMA_CNTS (CVMX_ADD_IO_SEG(0x00011F00000085E0ull))
+#define CVMX_PEXP_NPEI_DMA_CONTROL (CVMX_ADD_IO_SEG(0x00011F00000083A0ull))
+#define CVMX_PEXP_NPEI_DMA_PCIE_REQ_NUM (CVMX_ADD_IO_SEG(0x00011F00000085B0ull))
+#define CVMX_PEXP_NPEI_DMA_STATE1 (CVMX_ADD_IO_SEG(0x00011F00000086C0ull))
+#define CVMX_PEXP_NPEI_DMA_STATE1_P1 (CVMX_ADD_IO_SEG(0x00011F0000008680ull))
+#define CVMX_PEXP_NPEI_DMA_STATE2 (CVMX_ADD_IO_SEG(0x00011F00000086D0ull))
+#define CVMX_PEXP_NPEI_DMA_STATE2_P1 (CVMX_ADD_IO_SEG(0x00011F0000008690ull))
+#define CVMX_PEXP_NPEI_DMA_STATE3_P1 (CVMX_ADD_IO_SEG(0x00011F00000086A0ull))
+#define CVMX_PEXP_NPEI_DMA_STATE4_P1 (CVMX_ADD_IO_SEG(0x00011F00000086B0ull))
+#define CVMX_PEXP_NPEI_DMA_STATE5_P1 (CVMX_ADD_IO_SEG(0x00011F00000086C0ull))
+#define CVMX_PEXP_NPEI_INT_A_ENB (CVMX_ADD_IO_SEG(0x00011F0000008560ull))
+#define CVMX_PEXP_NPEI_INT_A_ENB2 (CVMX_ADD_IO_SEG(0x00011F000000BCE0ull))
+#define CVMX_PEXP_NPEI_INT_A_SUM (CVMX_ADD_IO_SEG(0x00011F0000008550ull))
+#define CVMX_PEXP_NPEI_INT_ENB (CVMX_ADD_IO_SEG(0x00011F0000008540ull))
+#define CVMX_PEXP_NPEI_INT_ENB2 (CVMX_ADD_IO_SEG(0x00011F000000BCD0ull))
+#define CVMX_PEXP_NPEI_INT_INFO (CVMX_ADD_IO_SEG(0x00011F0000008590ull))
+#define CVMX_PEXP_NPEI_INT_SUM (CVMX_ADD_IO_SEG(0x00011F0000008530ull))
+#define CVMX_PEXP_NPEI_INT_SUM2 (CVMX_ADD_IO_SEG(0x00011F000000BCC0ull))
+#define CVMX_PEXP_NPEI_LAST_WIN_RDATA0 (CVMX_ADD_IO_SEG(0x00011F0000008600ull))
+#define CVMX_PEXP_NPEI_LAST_WIN_RDATA1 (CVMX_ADD_IO_SEG(0x00011F0000008610ull))
+#define CVMX_PEXP_NPEI_MEM_ACCESS_CTL (CVMX_ADD_IO_SEG(0x00011F00000084F0ull))
+#define CVMX_PEXP_NPEI_MEM_ACCESS_SUBIDX(offset) (CVMX_ADD_IO_SEG(0x00011F0000008280ull) + ((offset) & 31) * 16 - 16*12)
+#define CVMX_PEXP_NPEI_MSI_ENB0 (CVMX_ADD_IO_SEG(0x00011F000000BC50ull))
+#define CVMX_PEXP_NPEI_MSI_ENB1 (CVMX_ADD_IO_SEG(0x00011F000000BC60ull))
+#define CVMX_PEXP_NPEI_MSI_ENB2 (CVMX_ADD_IO_SEG(0x00011F000000BC70ull))
+#define CVMX_PEXP_NPEI_MSI_ENB3 (CVMX_ADD_IO_SEG(0x00011F000000BC80ull))
+#define CVMX_PEXP_NPEI_MSI_RCV0 (CVMX_ADD_IO_SEG(0x00011F000000BC10ull))
+#define CVMX_PEXP_NPEI_MSI_RCV1 (CVMX_ADD_IO_SEG(0x00011F000000BC20ull))
+#define CVMX_PEXP_NPEI_MSI_RCV2 (CVMX_ADD_IO_SEG(0x00011F000000BC30ull))
+#define CVMX_PEXP_NPEI_MSI_RCV3 (CVMX_ADD_IO_SEG(0x00011F000000BC40ull))
+#define CVMX_PEXP_NPEI_MSI_RD_MAP (CVMX_ADD_IO_SEG(0x00011F000000BCA0ull))
+#define CVMX_PEXP_NPEI_MSI_W1C_ENB0 (CVMX_ADD_IO_SEG(0x00011F000000BCF0ull))
+#define CVMX_PEXP_NPEI_MSI_W1C_ENB1 (CVMX_ADD_IO_SEG(0x00011F000000BD00ull))
+#define CVMX_PEXP_NPEI_MSI_W1C_ENB2 (CVMX_ADD_IO_SEG(0x00011F000000BD10ull))
+#define CVMX_PEXP_NPEI_MSI_W1C_ENB3 (CVMX_ADD_IO_SEG(0x00011F000000BD20ull))
+#define CVMX_PEXP_NPEI_MSI_W1S_ENB0 (CVMX_ADD_IO_SEG(0x00011F000000BD30ull))
+#define CVMX_PEXP_NPEI_MSI_W1S_ENB1 (CVMX_ADD_IO_SEG(0x00011F000000BD40ull))
+#define CVMX_PEXP_NPEI_MSI_W1S_ENB2 (CVMX_ADD_IO_SEG(0x00011F000000BD50ull))
+#define CVMX_PEXP_NPEI_MSI_W1S_ENB3 (CVMX_ADD_IO_SEG(0x00011F000000BD60ull))
+#define CVMX_PEXP_NPEI_MSI_WR_MAP (CVMX_ADD_IO_SEG(0x00011F000000BC90ull))
+#define CVMX_PEXP_NPEI_PCIE_CREDIT_CNT (CVMX_ADD_IO_SEG(0x00011F000000BD70ull))
+#define CVMX_PEXP_NPEI_PCIE_MSI_RCV (CVMX_ADD_IO_SEG(0x00011F000000BCB0ull))
+#define CVMX_PEXP_NPEI_PCIE_MSI_RCV_B1 (CVMX_ADD_IO_SEG(0x00011F0000008650ull))
+#define CVMX_PEXP_NPEI_PCIE_MSI_RCV_B2 (CVMX_ADD_IO_SEG(0x00011F0000008660ull))
+#define CVMX_PEXP_NPEI_PCIE_MSI_RCV_B3 (CVMX_ADD_IO_SEG(0x00011F0000008670ull))
+#define CVMX_PEXP_NPEI_PKTX_CNTS(offset) (CVMX_ADD_IO_SEG(0x00011F000000A400ull) + ((offset) & 31) * 16)
+#define CVMX_PEXP_NPEI_PKTX_INSTR_BADDR(offset) (CVMX_ADD_IO_SEG(0x00011F000000A800ull) + ((offset) & 31) * 16)
+#define CVMX_PEXP_NPEI_PKTX_INSTR_BAOFF_DBELL(offset) (CVMX_ADD_IO_SEG(0x00011F000000AC00ull) + ((offset) & 31) * 16)
+#define CVMX_PEXP_NPEI_PKTX_INSTR_FIFO_RSIZE(offset) (CVMX_ADD_IO_SEG(0x00011F000000B000ull) + ((offset) & 31) * 16)
+#define CVMX_PEXP_NPEI_PKTX_INSTR_HEADER(offset) (CVMX_ADD_IO_SEG(0x00011F000000B400ull) + ((offset) & 31) * 16)
+#define CVMX_PEXP_NPEI_PKTX_IN_BP(offset) (CVMX_ADD_IO_SEG(0x00011F000000B800ull) + ((offset) & 31) * 16)
+#define CVMX_PEXP_NPEI_PKTX_SLIST_BADDR(offset) (CVMX_ADD_IO_SEG(0x00011F0000009400ull) + ((offset) & 31) * 16)
+#define CVMX_PEXP_NPEI_PKTX_SLIST_BAOFF_DBELL(offset) (CVMX_ADD_IO_SEG(0x00011F0000009800ull) + ((offset) & 31) * 16)
+#define CVMX_PEXP_NPEI_PKTX_SLIST_FIFO_RSIZE(offset) (CVMX_ADD_IO_SEG(0x00011F0000009C00ull) + ((offset) & 31) * 16)
+#define CVMX_PEXP_NPEI_PKT_CNT_INT (CVMX_ADD_IO_SEG(0x00011F0000009110ull))
+#define CVMX_PEXP_NPEI_PKT_CNT_INT_ENB (CVMX_ADD_IO_SEG(0x00011F0000009130ull))
+#define CVMX_PEXP_NPEI_PKT_DATA_OUT_ES (CVMX_ADD_IO_SEG(0x00011F00000090B0ull))
+#define CVMX_PEXP_NPEI_PKT_DATA_OUT_NS (CVMX_ADD_IO_SEG(0x00011F00000090A0ull))
+#define CVMX_PEXP_NPEI_PKT_DATA_OUT_ROR (CVMX_ADD_IO_SEG(0x00011F0000009090ull))
+#define CVMX_PEXP_NPEI_PKT_DPADDR (CVMX_ADD_IO_SEG(0x00011F0000009080ull))
+#define CVMX_PEXP_NPEI_PKT_INPUT_CONTROL (CVMX_ADD_IO_SEG(0x00011F0000009150ull))
+#define CVMX_PEXP_NPEI_PKT_INSTR_ENB (CVMX_ADD_IO_SEG(0x00011F0000009000ull))
+#define CVMX_PEXP_NPEI_PKT_INSTR_RD_SIZE (CVMX_ADD_IO_SEG(0x00011F0000009190ull))
+#define CVMX_PEXP_NPEI_PKT_INSTR_SIZE (CVMX_ADD_IO_SEG(0x00011F0000009020ull))
+#define CVMX_PEXP_NPEI_PKT_INT_LEVELS (CVMX_ADD_IO_SEG(0x00011F0000009100ull))
+#define CVMX_PEXP_NPEI_PKT_IN_BP (CVMX_ADD_IO_SEG(0x00011F00000086B0ull))
+#define CVMX_PEXP_NPEI_PKT_IN_DONEX_CNTS(offset) (CVMX_ADD_IO_SEG(0x00011F000000A000ull) + ((offset) & 31) * 16)
+#define CVMX_PEXP_NPEI_PKT_IN_INSTR_COUNTS (CVMX_ADD_IO_SEG(0x00011F00000086A0ull))
+#define CVMX_PEXP_NPEI_PKT_IN_PCIE_PORT (CVMX_ADD_IO_SEG(0x00011F00000091A0ull))
+#define CVMX_PEXP_NPEI_PKT_IPTR (CVMX_ADD_IO_SEG(0x00011F0000009070ull))
+#define CVMX_PEXP_NPEI_PKT_OUTPUT_WMARK (CVMX_ADD_IO_SEG(0x00011F0000009160ull))
+#define CVMX_PEXP_NPEI_PKT_OUT_BMODE (CVMX_ADD_IO_SEG(0x00011F00000090D0ull))
+#define CVMX_PEXP_NPEI_PKT_OUT_ENB (CVMX_ADD_IO_SEG(0x00011F0000009010ull))
+#define CVMX_PEXP_NPEI_PKT_PCIE_PORT (CVMX_ADD_IO_SEG(0x00011F00000090E0ull))
+#define CVMX_PEXP_NPEI_PKT_PORT_IN_RST (CVMX_ADD_IO_SEG(0x00011F0000008690ull))
+#define CVMX_PEXP_NPEI_PKT_SLIST_ES (CVMX_ADD_IO_SEG(0x00011F0000009050ull))
+#define CVMX_PEXP_NPEI_PKT_SLIST_ID_SIZE (CVMX_ADD_IO_SEG(0x00011F0000009180ull))
+#define CVMX_PEXP_NPEI_PKT_SLIST_NS (CVMX_ADD_IO_SEG(0x00011F0000009040ull))
+#define CVMX_PEXP_NPEI_PKT_SLIST_ROR (CVMX_ADD_IO_SEG(0x00011F0000009030ull))
+#define CVMX_PEXP_NPEI_PKT_TIME_INT (CVMX_ADD_IO_SEG(0x00011F0000009120ull))
+#define CVMX_PEXP_NPEI_PKT_TIME_INT_ENB (CVMX_ADD_IO_SEG(0x00011F0000009140ull))
+#define CVMX_PEXP_NPEI_RSL_INT_BLOCKS (CVMX_ADD_IO_SEG(0x00011F0000008520ull))
+#define CVMX_PEXP_NPEI_SCRATCH_1 (CVMX_ADD_IO_SEG(0x00011F0000008270ull))
+#define CVMX_PEXP_NPEI_STATE1 (CVMX_ADD_IO_SEG(0x00011F0000008620ull))
+#define CVMX_PEXP_NPEI_STATE2 (CVMX_ADD_IO_SEG(0x00011F0000008630ull))
+#define CVMX_PEXP_NPEI_STATE3 (CVMX_ADD_IO_SEG(0x00011F0000008640ull))
+#define CVMX_PEXP_NPEI_WINDOW_CTL (CVMX_ADD_IO_SEG(0x00011F0000008380ull))
+#define CVMX_PEXP_SLI_BIST_STATUS (CVMX_ADD_IO_SEG(0x00011F0000010580ull))
+#define CVMX_PEXP_SLI_CTL_PORTX(offset) (CVMX_ADD_IO_SEG(0x00011F0000010050ull) + ((offset) & 1) * 16)
+#define CVMX_PEXP_SLI_CTL_STATUS (CVMX_ADD_IO_SEG(0x00011F0000010570ull))
+#define CVMX_PEXP_SLI_DATA_OUT_CNT (CVMX_ADD_IO_SEG(0x00011F00000105F0ull))
+#define CVMX_PEXP_SLI_DBG_DATA (CVMX_ADD_IO_SEG(0x00011F0000010310ull))
+#define CVMX_PEXP_SLI_DBG_SELECT (CVMX_ADD_IO_SEG(0x00011F0000010300ull))
+#define CVMX_PEXP_SLI_DMAX_CNT(offset) (CVMX_ADD_IO_SEG(0x00011F0000010400ull) + ((offset) & 1) * 16)
+#define CVMX_PEXP_SLI_DMAX_INT_LEVEL(offset) (CVMX_ADD_IO_SEG(0x00011F00000103E0ull) + ((offset) & 1) * 16)
+#define CVMX_PEXP_SLI_DMAX_TIM(offset) (CVMX_ADD_IO_SEG(0x00011F0000010420ull) + ((offset) & 1) * 16)
+#define CVMX_PEXP_SLI_INT_ENB_CIU (CVMX_ADD_IO_SEG(0x00011F0000013CD0ull))
+#define CVMX_PEXP_SLI_INT_ENB_PORTX(offset) (CVMX_ADD_IO_SEG(0x00011F0000010340ull) + ((offset) & 1) * 16)
+#define CVMX_PEXP_SLI_INT_SUM (CVMX_ADD_IO_SEG(0x00011F0000010330ull))
+#define CVMX_PEXP_SLI_LAST_WIN_RDATA0 (CVMX_ADD_IO_SEG(0x00011F0000010600ull))
+#define CVMX_PEXP_SLI_LAST_WIN_RDATA1 (CVMX_ADD_IO_SEG(0x00011F0000010610ull))
+#define CVMX_PEXP_SLI_MAC_CREDIT_CNT (CVMX_ADD_IO_SEG(0x00011F0000013D70ull))
+#define CVMX_PEXP_SLI_MEM_ACCESS_CTL (CVMX_ADD_IO_SEG(0x00011F00000102F0ull))
+#define CVMX_PEXP_SLI_MEM_ACCESS_SUBIDX(offset) (CVMX_ADD_IO_SEG(0x00011F00000100E0ull) + ((offset) & 31) * 16 - 16*12)
+#define CVMX_PEXP_SLI_MSI_ENB0 (CVMX_ADD_IO_SEG(0x00011F0000013C50ull))
+#define CVMX_PEXP_SLI_MSI_ENB1 (CVMX_ADD_IO_SEG(0x00011F0000013C60ull))
+#define CVMX_PEXP_SLI_MSI_ENB2 (CVMX_ADD_IO_SEG(0x00011F0000013C70ull))
+#define CVMX_PEXP_SLI_MSI_ENB3 (CVMX_ADD_IO_SEG(0x00011F0000013C80ull))
+#define CVMX_PEXP_SLI_MSI_RCV0 (CVMX_ADD_IO_SEG(0x00011F0000013C10ull))
+#define CVMX_PEXP_SLI_MSI_RCV1 (CVMX_ADD_IO_SEG(0x00011F0000013C20ull))
+#define CVMX_PEXP_SLI_MSI_RCV2 (CVMX_ADD_IO_SEG(0x00011F0000013C30ull))
+#define CVMX_PEXP_SLI_MSI_RCV3 (CVMX_ADD_IO_SEG(0x00011F0000013C40ull))
+#define CVMX_PEXP_SLI_MSI_RD_MAP (CVMX_ADD_IO_SEG(0x00011F0000013CA0ull))
+#define CVMX_PEXP_SLI_MSI_W1C_ENB0 (CVMX_ADD_IO_SEG(0x00011F0000013CF0ull))
+#define CVMX_PEXP_SLI_MSI_W1C_ENB1 (CVMX_ADD_IO_SEG(0x00011F0000013D00ull))
+#define CVMX_PEXP_SLI_MSI_W1C_ENB2 (CVMX_ADD_IO_SEG(0x00011F0000013D10ull))
+#define CVMX_PEXP_SLI_MSI_W1C_ENB3 (CVMX_ADD_IO_SEG(0x00011F0000013D20ull))
+#define CVMX_PEXP_SLI_MSI_W1S_ENB0 (CVMX_ADD_IO_SEG(0x00011F0000013D30ull))
+#define CVMX_PEXP_SLI_MSI_W1S_ENB1 (CVMX_ADD_IO_SEG(0x00011F0000013D40ull))
+#define CVMX_PEXP_SLI_MSI_W1S_ENB2 (CVMX_ADD_IO_SEG(0x00011F0000013D50ull))
+#define CVMX_PEXP_SLI_MSI_W1S_ENB3 (CVMX_ADD_IO_SEG(0x00011F0000013D60ull))
+#define CVMX_PEXP_SLI_MSI_WR_MAP (CVMX_ADD_IO_SEG(0x00011F0000013C90ull))
+#define CVMX_PEXP_SLI_PCIE_MSI_RCV (CVMX_ADD_IO_SEG(0x00011F0000013CB0ull))
+#define CVMX_PEXP_SLI_PCIE_MSI_RCV_B1 (CVMX_ADD_IO_SEG(0x00011F0000010650ull))
+#define CVMX_PEXP_SLI_PCIE_MSI_RCV_B2 (CVMX_ADD_IO_SEG(0x00011F0000010660ull))
+#define CVMX_PEXP_SLI_PCIE_MSI_RCV_B3 (CVMX_ADD_IO_SEG(0x00011F0000010670ull))
+#define CVMX_PEXP_SLI_PKTX_CNTS(offset) (CVMX_ADD_IO_SEG(0x00011F0000012400ull) + ((offset) & 31) * 16)
+#define CVMX_PEXP_SLI_PKTX_INSTR_BADDR(offset) (CVMX_ADD_IO_SEG(0x00011F0000012800ull) + ((offset) & 31) * 16)
+#define CVMX_PEXP_SLI_PKTX_INSTR_BAOFF_DBELL(offset) (CVMX_ADD_IO_SEG(0x00011F0000012C00ull) + ((offset) & 31) * 16)
+#define CVMX_PEXP_SLI_PKTX_INSTR_FIFO_RSIZE(offset) (CVMX_ADD_IO_SEG(0x00011F0000013000ull) + ((offset) & 31) * 16)
+#define CVMX_PEXP_SLI_PKTX_INSTR_HEADER(offset) (CVMX_ADD_IO_SEG(0x00011F0000013400ull) + ((offset) & 31) * 16)
+#define CVMX_PEXP_SLI_PKTX_IN_BP(offset) (CVMX_ADD_IO_SEG(0x00011F0000013800ull) + ((offset) & 31) * 16)
+#define CVMX_PEXP_SLI_PKTX_OUT_SIZE(offset) (CVMX_ADD_IO_SEG(0x00011F0000010C00ull) + ((offset) & 31) * 16)
+#define CVMX_PEXP_SLI_PKTX_SLIST_BADDR(offset) (CVMX_ADD_IO_SEG(0x00011F0000011400ull) + ((offset) & 31) * 16)
+#define CVMX_PEXP_SLI_PKTX_SLIST_BAOFF_DBELL(offset) (CVMX_ADD_IO_SEG(0x00011F0000011800ull) + ((offset) & 31) * 16)
+#define CVMX_PEXP_SLI_PKTX_SLIST_FIFO_RSIZE(offset) (CVMX_ADD_IO_SEG(0x00011F0000011C00ull) + ((offset) & 31) * 16)
+#define CVMX_PEXP_SLI_PKT_CNT_INT (CVMX_ADD_IO_SEG(0x00011F0000011130ull))
+#define CVMX_PEXP_SLI_PKT_CNT_INT_ENB (CVMX_ADD_IO_SEG(0x00011F0000011150ull))
+#define CVMX_PEXP_SLI_PKT_CTL (CVMX_ADD_IO_SEG(0x00011F0000011220ull))
+#define CVMX_PEXP_SLI_PKT_DATA_OUT_ES (CVMX_ADD_IO_SEG(0x00011F00000110B0ull))
+#define CVMX_PEXP_SLI_PKT_DATA_OUT_NS (CVMX_ADD_IO_SEG(0x00011F00000110A0ull))
+#define CVMX_PEXP_SLI_PKT_DATA_OUT_ROR (CVMX_ADD_IO_SEG(0x00011F0000011090ull))
+#define CVMX_PEXP_SLI_PKT_DPADDR (CVMX_ADD_IO_SEG(0x00011F0000011080ull))
+#define CVMX_PEXP_SLI_PKT_INPUT_CONTROL (CVMX_ADD_IO_SEG(0x00011F0000011170ull))
+#define CVMX_PEXP_SLI_PKT_INSTR_ENB (CVMX_ADD_IO_SEG(0x00011F0000011000ull))
+#define CVMX_PEXP_SLI_PKT_INSTR_RD_SIZE (CVMX_ADD_IO_SEG(0x00011F00000111A0ull))
+#define CVMX_PEXP_SLI_PKT_INSTR_SIZE (CVMX_ADD_IO_SEG(0x00011F0000011020ull))
+#define CVMX_PEXP_SLI_PKT_INT_LEVELS (CVMX_ADD_IO_SEG(0x00011F0000011120ull))
+#define CVMX_PEXP_SLI_PKT_IN_BP (CVMX_ADD_IO_SEG(0x00011F0000011210ull))
+#define CVMX_PEXP_SLI_PKT_IN_DONEX_CNTS(offset) (CVMX_ADD_IO_SEG(0x00011F0000012000ull) + ((offset) & 31) * 16)
+#define CVMX_PEXP_SLI_PKT_IN_INSTR_COUNTS (CVMX_ADD_IO_SEG(0x00011F0000011200ull))
+#define CVMX_PEXP_SLI_PKT_IN_PCIE_PORT (CVMX_ADD_IO_SEG(0x00011F00000111B0ull))
+#define CVMX_PEXP_SLI_PKT_IPTR (CVMX_ADD_IO_SEG(0x00011F0000011070ull))
+#define CVMX_PEXP_SLI_PKT_OUTPUT_WMARK (CVMX_ADD_IO_SEG(0x00011F0000011180ull))
+#define CVMX_PEXP_SLI_PKT_OUT_BMODE (CVMX_ADD_IO_SEG(0x00011F00000110D0ull))
+#define CVMX_PEXP_SLI_PKT_OUT_ENB (CVMX_ADD_IO_SEG(0x00011F0000011010ull))
+#define CVMX_PEXP_SLI_PKT_PCIE_PORT (CVMX_ADD_IO_SEG(0x00011F00000110E0ull))
+#define CVMX_PEXP_SLI_PKT_PORT_IN_RST (CVMX_ADD_IO_SEG(0x00011F00000111F0ull))
+#define CVMX_PEXP_SLI_PKT_SLIST_ES (CVMX_ADD_IO_SEG(0x00011F0000011050ull))
+#define CVMX_PEXP_SLI_PKT_SLIST_NS (CVMX_ADD_IO_SEG(0x00011F0000011040ull))
+#define CVMX_PEXP_SLI_PKT_SLIST_ROR (CVMX_ADD_IO_SEG(0x00011F0000011030ull))
+#define CVMX_PEXP_SLI_PKT_TIME_INT (CVMX_ADD_IO_SEG(0x00011F0000011140ull))
+#define CVMX_PEXP_SLI_PKT_TIME_INT_ENB (CVMX_ADD_IO_SEG(0x00011F0000011160ull))
+#define CVMX_PEXP_SLI_S2M_PORTX_CTL(offset) (CVMX_ADD_IO_SEG(0x00011F0000013D80ull) + ((offset) & 1) * 16)
+#define CVMX_PEXP_SLI_SCRATCH_1 (CVMX_ADD_IO_SEG(0x00011F00000103C0ull))
+#define CVMX_PEXP_SLI_SCRATCH_2 (CVMX_ADD_IO_SEG(0x00011F00000103D0ull))
+#define CVMX_PEXP_SLI_STATE1 (CVMX_ADD_IO_SEG(0x00011F0000010620ull))
+#define CVMX_PEXP_SLI_STATE2 (CVMX_ADD_IO_SEG(0x00011F0000010630ull))
+#define CVMX_PEXP_SLI_STATE3 (CVMX_ADD_IO_SEG(0x00011F0000010640ull))
+#define CVMX_PEXP_SLI_WINDOW_CTL (CVMX_ADD_IO_SEG(0x00011F00000102E0ull))
#endif
diff --git a/arch/mips/include/asm/octeon/cvmx-pow-defs.h b/arch/mips/include/asm/octeon/cvmx-pow-defs.h
index 2d82e24be51c..39fd75b03f77 100644
--- a/arch/mips/include/asm/octeon/cvmx-pow-defs.h
+++ b/arch/mips/include/asm/octeon/cvmx-pow-defs.h
@@ -4,7 +4,7 @@
* Contact: support@caviumnetworks.com
* This file is part of the OCTEON SDK
*
- * Copyright (c) 2003-2008 Cavium Networks
+ * Copyright (c) 2003-2010 Cavium Networks
*
* This file is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License, Version 2, as
@@ -28,52 +28,29 @@
#ifndef __CVMX_POW_DEFS_H__
#define __CVMX_POW_DEFS_H__
-#define CVMX_POW_BIST_STAT \
- CVMX_ADD_IO_SEG(0x00016700000003F8ull)
-#define CVMX_POW_DS_PC \
- CVMX_ADD_IO_SEG(0x0001670000000398ull)
-#define CVMX_POW_ECC_ERR \
- CVMX_ADD_IO_SEG(0x0001670000000218ull)
-#define CVMX_POW_INT_CTL \
- CVMX_ADD_IO_SEG(0x0001670000000220ull)
-#define CVMX_POW_IQ_CNTX(offset) \
- CVMX_ADD_IO_SEG(0x0001670000000340ull + (((offset) & 7) * 8))
-#define CVMX_POW_IQ_COM_CNT \
- CVMX_ADD_IO_SEG(0x0001670000000388ull)
-#define CVMX_POW_IQ_INT \
- CVMX_ADD_IO_SEG(0x0001670000000238ull)
-#define CVMX_POW_IQ_INT_EN \
- CVMX_ADD_IO_SEG(0x0001670000000240ull)
-#define CVMX_POW_IQ_THRX(offset) \
- CVMX_ADD_IO_SEG(0x00016700000003A0ull + (((offset) & 7) * 8))
-#define CVMX_POW_NOS_CNT \
- CVMX_ADD_IO_SEG(0x0001670000000228ull)
-#define CVMX_POW_NW_TIM \
- CVMX_ADD_IO_SEG(0x0001670000000210ull)
-#define CVMX_POW_PF_RST_MSK \
- CVMX_ADD_IO_SEG(0x0001670000000230ull)
-#define CVMX_POW_PP_GRP_MSKX(offset) \
- CVMX_ADD_IO_SEG(0x0001670000000000ull + (((offset) & 15) * 8))
-#define CVMX_POW_QOS_RNDX(offset) \
- CVMX_ADD_IO_SEG(0x00016700000001C0ull + (((offset) & 7) * 8))
-#define CVMX_POW_QOS_THRX(offset) \
- CVMX_ADD_IO_SEG(0x0001670000000180ull + (((offset) & 7) * 8))
-#define CVMX_POW_TS_PC \
- CVMX_ADD_IO_SEG(0x0001670000000390ull)
-#define CVMX_POW_WA_COM_PC \
- CVMX_ADD_IO_SEG(0x0001670000000380ull)
-#define CVMX_POW_WA_PCX(offset) \
- CVMX_ADD_IO_SEG(0x0001670000000300ull + (((offset) & 7) * 8))
-#define CVMX_POW_WQ_INT \
- CVMX_ADD_IO_SEG(0x0001670000000200ull)
-#define CVMX_POW_WQ_INT_CNTX(offset) \
- CVMX_ADD_IO_SEG(0x0001670000000100ull + (((offset) & 15) * 8))
-#define CVMX_POW_WQ_INT_PC \
- CVMX_ADD_IO_SEG(0x0001670000000208ull)
-#define CVMX_POW_WQ_INT_THRX(offset) \
- CVMX_ADD_IO_SEG(0x0001670000000080ull + (((offset) & 15) * 8))
-#define CVMX_POW_WS_PCX(offset) \
- CVMX_ADD_IO_SEG(0x0001670000000280ull + (((offset) & 15) * 8))
+#define CVMX_POW_BIST_STAT (CVMX_ADD_IO_SEG(0x00016700000003F8ull))
+#define CVMX_POW_DS_PC (CVMX_ADD_IO_SEG(0x0001670000000398ull))
+#define CVMX_POW_ECC_ERR (CVMX_ADD_IO_SEG(0x0001670000000218ull))
+#define CVMX_POW_INT_CTL (CVMX_ADD_IO_SEG(0x0001670000000220ull))
+#define CVMX_POW_IQ_CNTX(offset) (CVMX_ADD_IO_SEG(0x0001670000000340ull) + ((offset) & 7) * 8)
+#define CVMX_POW_IQ_COM_CNT (CVMX_ADD_IO_SEG(0x0001670000000388ull))
+#define CVMX_POW_IQ_INT (CVMX_ADD_IO_SEG(0x0001670000000238ull))
+#define CVMX_POW_IQ_INT_EN (CVMX_ADD_IO_SEG(0x0001670000000240ull))
+#define CVMX_POW_IQ_THRX(offset) (CVMX_ADD_IO_SEG(0x00016700000003A0ull) + ((offset) & 7) * 8)
+#define CVMX_POW_NOS_CNT (CVMX_ADD_IO_SEG(0x0001670000000228ull))
+#define CVMX_POW_NW_TIM (CVMX_ADD_IO_SEG(0x0001670000000210ull))
+#define CVMX_POW_PF_RST_MSK (CVMX_ADD_IO_SEG(0x0001670000000230ull))
+#define CVMX_POW_PP_GRP_MSKX(offset) (CVMX_ADD_IO_SEG(0x0001670000000000ull) + ((offset) & 15) * 8)
+#define CVMX_POW_QOS_RNDX(offset) (CVMX_ADD_IO_SEG(0x00016700000001C0ull) + ((offset) & 7) * 8)
+#define CVMX_POW_QOS_THRX(offset) (CVMX_ADD_IO_SEG(0x0001670000000180ull) + ((offset) & 7) * 8)
+#define CVMX_POW_TS_PC (CVMX_ADD_IO_SEG(0x0001670000000390ull))
+#define CVMX_POW_WA_COM_PC (CVMX_ADD_IO_SEG(0x0001670000000380ull))
+#define CVMX_POW_WA_PCX(offset) (CVMX_ADD_IO_SEG(0x0001670000000300ull) + ((offset) & 7) * 8)
+#define CVMX_POW_WQ_INT (CVMX_ADD_IO_SEG(0x0001670000000200ull))
+#define CVMX_POW_WQ_INT_CNTX(offset) (CVMX_ADD_IO_SEG(0x0001670000000100ull) + ((offset) & 15) * 8)
+#define CVMX_POW_WQ_INT_PC (CVMX_ADD_IO_SEG(0x0001670000000208ull))
+#define CVMX_POW_WQ_INT_THRX(offset) (CVMX_ADD_IO_SEG(0x0001670000000080ull) + ((offset) & 15) * 8)
+#define CVMX_POW_WS_PCX(offset) (CVMX_ADD_IO_SEG(0x0001670000000280ull) + ((offset) & 15) * 8)
union cvmx_pow_bist_stat {
uint64_t u64;
@@ -160,6 +137,19 @@ union cvmx_pow_bist_stat {
struct cvmx_pow_bist_stat_cn56xx cn56xxp1;
struct cvmx_pow_bist_stat_cn38xx cn58xx;
struct cvmx_pow_bist_stat_cn38xx cn58xxp1;
+ struct cvmx_pow_bist_stat_cn63xx {
+ uint64_t reserved_22_63:42;
+ uint64_t pp:6;
+ uint64_t reserved_12_15:4;
+ uint64_t cam:1;
+ uint64_t nbr:3;
+ uint64_t nbt:4;
+ uint64_t index:1;
+ uint64_t fidx:1;
+ uint64_t pend:1;
+ uint64_t adr:1;
+ } cn63xx;
+ struct cvmx_pow_bist_stat_cn63xx cn63xxp1;
};
union cvmx_pow_ds_pc {
@@ -179,6 +169,8 @@ union cvmx_pow_ds_pc {
struct cvmx_pow_ds_pc_s cn56xxp1;
struct cvmx_pow_ds_pc_s cn58xx;
struct cvmx_pow_ds_pc_s cn58xxp1;
+ struct cvmx_pow_ds_pc_s cn63xx;
+ struct cvmx_pow_ds_pc_s cn63xxp1;
};
union cvmx_pow_ecc_err {
@@ -219,6 +211,8 @@ union cvmx_pow_ecc_err {
struct cvmx_pow_ecc_err_s cn56xxp1;
struct cvmx_pow_ecc_err_s cn58xx;
struct cvmx_pow_ecc_err_s cn58xxp1;
+ struct cvmx_pow_ecc_err_s cn63xx;
+ struct cvmx_pow_ecc_err_s cn63xxp1;
};
union cvmx_pow_int_ctl {
@@ -239,6 +233,8 @@ union cvmx_pow_int_ctl {
struct cvmx_pow_int_ctl_s cn56xxp1;
struct cvmx_pow_int_ctl_s cn58xx;
struct cvmx_pow_int_ctl_s cn58xxp1;
+ struct cvmx_pow_int_ctl_s cn63xx;
+ struct cvmx_pow_int_ctl_s cn63xxp1;
};
union cvmx_pow_iq_cntx {
@@ -258,6 +254,8 @@ union cvmx_pow_iq_cntx {
struct cvmx_pow_iq_cntx_s cn56xxp1;
struct cvmx_pow_iq_cntx_s cn58xx;
struct cvmx_pow_iq_cntx_s cn58xxp1;
+ struct cvmx_pow_iq_cntx_s cn63xx;
+ struct cvmx_pow_iq_cntx_s cn63xxp1;
};
union cvmx_pow_iq_com_cnt {
@@ -277,6 +275,8 @@ union cvmx_pow_iq_com_cnt {
struct cvmx_pow_iq_com_cnt_s cn56xxp1;
struct cvmx_pow_iq_com_cnt_s cn58xx;
struct cvmx_pow_iq_com_cnt_s cn58xxp1;
+ struct cvmx_pow_iq_com_cnt_s cn63xx;
+ struct cvmx_pow_iq_com_cnt_s cn63xxp1;
};
union cvmx_pow_iq_int {
@@ -289,6 +289,8 @@ union cvmx_pow_iq_int {
struct cvmx_pow_iq_int_s cn52xxp1;
struct cvmx_pow_iq_int_s cn56xx;
struct cvmx_pow_iq_int_s cn56xxp1;
+ struct cvmx_pow_iq_int_s cn63xx;
+ struct cvmx_pow_iq_int_s cn63xxp1;
};
union cvmx_pow_iq_int_en {
@@ -301,6 +303,8 @@ union cvmx_pow_iq_int_en {
struct cvmx_pow_iq_int_en_s cn52xxp1;
struct cvmx_pow_iq_int_en_s cn56xx;
struct cvmx_pow_iq_int_en_s cn56xxp1;
+ struct cvmx_pow_iq_int_en_s cn63xx;
+ struct cvmx_pow_iq_int_en_s cn63xxp1;
};
union cvmx_pow_iq_thrx {
@@ -313,6 +317,8 @@ union cvmx_pow_iq_thrx {
struct cvmx_pow_iq_thrx_s cn52xxp1;
struct cvmx_pow_iq_thrx_s cn56xx;
struct cvmx_pow_iq_thrx_s cn56xxp1;
+ struct cvmx_pow_iq_thrx_s cn63xx;
+ struct cvmx_pow_iq_thrx_s cn63xxp1;
};
union cvmx_pow_nos_cnt {
@@ -341,6 +347,11 @@ union cvmx_pow_nos_cnt {
struct cvmx_pow_nos_cnt_s cn56xxp1;
struct cvmx_pow_nos_cnt_s cn58xx;
struct cvmx_pow_nos_cnt_s cn58xxp1;
+ struct cvmx_pow_nos_cnt_cn63xx {
+ uint64_t reserved_11_63:53;
+ uint64_t nos_cnt:11;
+ } cn63xx;
+ struct cvmx_pow_nos_cnt_cn63xx cn63xxp1;
};
union cvmx_pow_nw_tim {
@@ -360,6 +371,8 @@ union cvmx_pow_nw_tim {
struct cvmx_pow_nw_tim_s cn56xxp1;
struct cvmx_pow_nw_tim_s cn58xx;
struct cvmx_pow_nw_tim_s cn58xxp1;
+ struct cvmx_pow_nw_tim_s cn63xx;
+ struct cvmx_pow_nw_tim_s cn63xxp1;
};
union cvmx_pow_pf_rst_msk {
@@ -375,6 +388,8 @@ union cvmx_pow_pf_rst_msk {
struct cvmx_pow_pf_rst_msk_s cn56xxp1;
struct cvmx_pow_pf_rst_msk_s cn58xx;
struct cvmx_pow_pf_rst_msk_s cn58xxp1;
+ struct cvmx_pow_pf_rst_msk_s cn63xx;
+ struct cvmx_pow_pf_rst_msk_s cn63xxp1;
};
union cvmx_pow_pp_grp_mskx {
@@ -405,6 +420,8 @@ union cvmx_pow_pp_grp_mskx {
struct cvmx_pow_pp_grp_mskx_s cn56xxp1;
struct cvmx_pow_pp_grp_mskx_s cn58xx;
struct cvmx_pow_pp_grp_mskx_s cn58xxp1;
+ struct cvmx_pow_pp_grp_mskx_s cn63xx;
+ struct cvmx_pow_pp_grp_mskx_s cn63xxp1;
};
union cvmx_pow_qos_rndx {
@@ -427,6 +444,8 @@ union cvmx_pow_qos_rndx {
struct cvmx_pow_qos_rndx_s cn56xxp1;
struct cvmx_pow_qos_rndx_s cn58xx;
struct cvmx_pow_qos_rndx_s cn58xxp1;
+ struct cvmx_pow_qos_rndx_s cn63xx;
+ struct cvmx_pow_qos_rndx_s cn63xxp1;
};
union cvmx_pow_qos_thrx {
@@ -485,6 +504,19 @@ union cvmx_pow_qos_thrx {
struct cvmx_pow_qos_thrx_s cn56xxp1;
struct cvmx_pow_qos_thrx_s cn58xx;
struct cvmx_pow_qos_thrx_s cn58xxp1;
+ struct cvmx_pow_qos_thrx_cn63xx {
+ uint64_t reserved_59_63:5;
+ uint64_t des_cnt:11;
+ uint64_t reserved_47_47:1;
+ uint64_t buf_cnt:11;
+ uint64_t reserved_35_35:1;
+ uint64_t free_cnt:11;
+ uint64_t reserved_22_23:2;
+ uint64_t max_thr:10;
+ uint64_t reserved_10_11:2;
+ uint64_t min_thr:10;
+ } cn63xx;
+ struct cvmx_pow_qos_thrx_cn63xx cn63xxp1;
};
union cvmx_pow_ts_pc {
@@ -504,6 +536,8 @@ union cvmx_pow_ts_pc {
struct cvmx_pow_ts_pc_s cn56xxp1;
struct cvmx_pow_ts_pc_s cn58xx;
struct cvmx_pow_ts_pc_s cn58xxp1;
+ struct cvmx_pow_ts_pc_s cn63xx;
+ struct cvmx_pow_ts_pc_s cn63xxp1;
};
union cvmx_pow_wa_com_pc {
@@ -523,6 +557,8 @@ union cvmx_pow_wa_com_pc {
struct cvmx_pow_wa_com_pc_s cn56xxp1;
struct cvmx_pow_wa_com_pc_s cn58xx;
struct cvmx_pow_wa_com_pc_s cn58xxp1;
+ struct cvmx_pow_wa_com_pc_s cn63xx;
+ struct cvmx_pow_wa_com_pc_s cn63xxp1;
};
union cvmx_pow_wa_pcx {
@@ -542,6 +578,8 @@ union cvmx_pow_wa_pcx {
struct cvmx_pow_wa_pcx_s cn56xxp1;
struct cvmx_pow_wa_pcx_s cn58xx;
struct cvmx_pow_wa_pcx_s cn58xxp1;
+ struct cvmx_pow_wa_pcx_s cn63xx;
+ struct cvmx_pow_wa_pcx_s cn63xxp1;
};
union cvmx_pow_wq_int {
@@ -562,6 +600,8 @@ union cvmx_pow_wq_int {
struct cvmx_pow_wq_int_s cn56xxp1;
struct cvmx_pow_wq_int_s cn58xx;
struct cvmx_pow_wq_int_s cn58xxp1;
+ struct cvmx_pow_wq_int_s cn63xx;
+ struct cvmx_pow_wq_int_s cn63xxp1;
};
union cvmx_pow_wq_int_cntx {
@@ -604,6 +644,15 @@ union cvmx_pow_wq_int_cntx {
struct cvmx_pow_wq_int_cntx_s cn56xxp1;
struct cvmx_pow_wq_int_cntx_s cn58xx;
struct cvmx_pow_wq_int_cntx_s cn58xxp1;
+ struct cvmx_pow_wq_int_cntx_cn63xx {
+ uint64_t reserved_28_63:36;
+ uint64_t tc_cnt:4;
+ uint64_t reserved_23_23:1;
+ uint64_t ds_cnt:11;
+ uint64_t reserved_11_11:1;
+ uint64_t iq_cnt:11;
+ } cn63xx;
+ struct cvmx_pow_wq_int_cntx_cn63xx cn63xxp1;
};
union cvmx_pow_wq_int_pc {
@@ -626,6 +675,8 @@ union cvmx_pow_wq_int_pc {
struct cvmx_pow_wq_int_pc_s cn56xxp1;
struct cvmx_pow_wq_int_pc_s cn58xx;
struct cvmx_pow_wq_int_pc_s cn58xxp1;
+ struct cvmx_pow_wq_int_pc_s cn63xx;
+ struct cvmx_pow_wq_int_pc_s cn63xxp1;
};
union cvmx_pow_wq_int_thrx {
@@ -674,6 +725,16 @@ union cvmx_pow_wq_int_thrx {
struct cvmx_pow_wq_int_thrx_s cn56xxp1;
struct cvmx_pow_wq_int_thrx_s cn58xx;
struct cvmx_pow_wq_int_thrx_s cn58xxp1;
+ struct cvmx_pow_wq_int_thrx_cn63xx {
+ uint64_t reserved_29_63:35;
+ uint64_t tc_en:1;
+ uint64_t tc_thr:4;
+ uint64_t reserved_22_23:2;
+ uint64_t ds_thr:10;
+ uint64_t reserved_10_11:2;
+ uint64_t iq_thr:10;
+ } cn63xx;
+ struct cvmx_pow_wq_int_thrx_cn63xx cn63xxp1;
};
union cvmx_pow_ws_pcx {
@@ -693,6 +754,8 @@ union cvmx_pow_ws_pcx {
struct cvmx_pow_ws_pcx_s cn56xxp1;
struct cvmx_pow_ws_pcx_s cn58xx;
struct cvmx_pow_ws_pcx_s cn58xxp1;
+ struct cvmx_pow_ws_pcx_s cn63xx;
+ struct cvmx_pow_ws_pcx_s cn63xxp1;
};
#endif
diff --git a/arch/mips/include/asm/octeon/cvmx-rnm-defs.h b/arch/mips/include/asm/octeon/cvmx-rnm-defs.h
index 4586958c97be..c45da1f35ea7 100644
--- a/arch/mips/include/asm/octeon/cvmx-rnm-defs.h
+++ b/arch/mips/include/asm/octeon/cvmx-rnm-defs.h
@@ -4,7 +4,7 @@
* Contact: support@caviumnetworks.com
* This file is part of the OCTEON SDK
*
- * Copyright (c) 2003-2008 Cavium Networks
+ * Copyright (c) 2003-2010 Cavium Networks
*
* This file is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License, Version 2, as
@@ -30,10 +30,11 @@
#include <linux/types.h>
-#define CVMX_RNM_BIST_STATUS \
- CVMX_ADD_IO_SEG(0x0001180040000008ull)
-#define CVMX_RNM_CTL_STATUS \
- CVMX_ADD_IO_SEG(0x0001180040000000ull)
+#define CVMX_RNM_BIST_STATUS (CVMX_ADD_IO_SEG(0x0001180040000008ull))
+#define CVMX_RNM_CTL_STATUS (CVMX_ADD_IO_SEG(0x0001180040000000ull))
+#define CVMX_RNM_EER_DBG (CVMX_ADD_IO_SEG(0x0001180040000018ull))
+#define CVMX_RNM_EER_KEY (CVMX_ADD_IO_SEG(0x0001180040000010ull))
+#define CVMX_RNM_SERIAL_NUM (CVMX_ADD_IO_SEG(0x0001180040000020ull))
union cvmx_rnm_bist_status {
uint64_t u64;
@@ -53,12 +54,16 @@ union cvmx_rnm_bist_status {
struct cvmx_rnm_bist_status_s cn56xxp1;
struct cvmx_rnm_bist_status_s cn58xx;
struct cvmx_rnm_bist_status_s cn58xxp1;
+ struct cvmx_rnm_bist_status_s cn63xx;
+ struct cvmx_rnm_bist_status_s cn63xxp1;
};
union cvmx_rnm_ctl_status {
uint64_t u64;
struct cvmx_rnm_ctl_status_s {
- uint64_t reserved_9_63:55;
+ uint64_t reserved_11_63:53;
+ uint64_t eer_lck:1;
+ uint64_t eer_val:1;
uint64_t ent_sel:4;
uint64_t exp_ent:1;
uint64_t rng_rst:1;
@@ -76,13 +81,49 @@ union cvmx_rnm_ctl_status {
struct cvmx_rnm_ctl_status_cn30xx cn31xx;
struct cvmx_rnm_ctl_status_cn30xx cn38xx;
struct cvmx_rnm_ctl_status_cn30xx cn38xxp2;
- struct cvmx_rnm_ctl_status_s cn50xx;
- struct cvmx_rnm_ctl_status_s cn52xx;
- struct cvmx_rnm_ctl_status_s cn52xxp1;
- struct cvmx_rnm_ctl_status_s cn56xx;
- struct cvmx_rnm_ctl_status_s cn56xxp1;
- struct cvmx_rnm_ctl_status_s cn58xx;
- struct cvmx_rnm_ctl_status_s cn58xxp1;
+ struct cvmx_rnm_ctl_status_cn50xx {
+ uint64_t reserved_9_63:55;
+ uint64_t ent_sel:4;
+ uint64_t exp_ent:1;
+ uint64_t rng_rst:1;
+ uint64_t rnm_rst:1;
+ uint64_t rng_en:1;
+ uint64_t ent_en:1;
+ } cn50xx;
+ struct cvmx_rnm_ctl_status_cn50xx cn52xx;
+ struct cvmx_rnm_ctl_status_cn50xx cn52xxp1;
+ struct cvmx_rnm_ctl_status_cn50xx cn56xx;
+ struct cvmx_rnm_ctl_status_cn50xx cn56xxp1;
+ struct cvmx_rnm_ctl_status_cn50xx cn58xx;
+ struct cvmx_rnm_ctl_status_cn50xx cn58xxp1;
+ struct cvmx_rnm_ctl_status_s cn63xx;
+ struct cvmx_rnm_ctl_status_s cn63xxp1;
+};
+
+union cvmx_rnm_eer_dbg {
+ uint64_t u64;
+ struct cvmx_rnm_eer_dbg_s {
+ uint64_t dat:64;
+ } s;
+ struct cvmx_rnm_eer_dbg_s cn63xx;
+ struct cvmx_rnm_eer_dbg_s cn63xxp1;
+};
+
+union cvmx_rnm_eer_key {
+ uint64_t u64;
+ struct cvmx_rnm_eer_key_s {
+ uint64_t key:64;
+ } s;
+ struct cvmx_rnm_eer_key_s cn63xx;
+ struct cvmx_rnm_eer_key_s cn63xxp1;
+};
+
+union cvmx_rnm_serial_num {
+ uint64_t u64;
+ struct cvmx_rnm_serial_num_s {
+ uint64_t dat:64;
+ } s;
+ struct cvmx_rnm_serial_num_s cn63xx;
};
#endif
diff --git a/arch/mips/include/asm/octeon/cvmx-smix-defs.h b/arch/mips/include/asm/octeon/cvmx-smix-defs.h
index 9ae45fcbe3e3..4f3c0666e94a 100644
--- a/arch/mips/include/asm/octeon/cvmx-smix-defs.h
+++ b/arch/mips/include/asm/octeon/cvmx-smix-defs.h
@@ -4,7 +4,7 @@
* Contact: support@caviumnetworks.com
* This file is part of the OCTEON SDK
*
- * Copyright (c) 2003-2008 Cavium Networks
+ * Copyright (c) 2003-2010 Cavium Networks
*
* This file is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License, Version 2, as
@@ -28,16 +28,11 @@
#ifndef __CVMX_SMIX_DEFS_H__
#define __CVMX_SMIX_DEFS_H__
-#define CVMX_SMIX_CLK(offset) \
- CVMX_ADD_IO_SEG(0x0001180000001818ull + (((offset) & 1) * 256))
-#define CVMX_SMIX_CMD(offset) \
- CVMX_ADD_IO_SEG(0x0001180000001800ull + (((offset) & 1) * 256))
-#define CVMX_SMIX_EN(offset) \
- CVMX_ADD_IO_SEG(0x0001180000001820ull + (((offset) & 1) * 256))
-#define CVMX_SMIX_RD_DAT(offset) \
- CVMX_ADD_IO_SEG(0x0001180000001810ull + (((offset) & 1) * 256))
-#define CVMX_SMIX_WR_DAT(offset) \
- CVMX_ADD_IO_SEG(0x0001180000001808ull + (((offset) & 1) * 256))
+#define CVMX_SMIX_CLK(offset) (CVMX_ADD_IO_SEG(0x0001180000001818ull) + ((offset) & 1) * 256)
+#define CVMX_SMIX_CMD(offset) (CVMX_ADD_IO_SEG(0x0001180000001800ull) + ((offset) & 1) * 256)
+#define CVMX_SMIX_EN(offset) (CVMX_ADD_IO_SEG(0x0001180000001820ull) + ((offset) & 1) * 256)
+#define CVMX_SMIX_RD_DAT(offset) (CVMX_ADD_IO_SEG(0x0001180000001810ull) + ((offset) & 1) * 256)
+#define CVMX_SMIX_WR_DAT(offset) (CVMX_ADD_IO_SEG(0x0001180000001808ull) + ((offset) & 1) * 256)
union cvmx_smix_clk {
uint64_t u64;
@@ -56,7 +51,8 @@ union cvmx_smix_clk {
struct cvmx_smix_clk_cn30xx {
uint64_t reserved_21_63:43;
uint64_t sample_hi:5;
- uint64_t reserved_14_15:2;
+ uint64_t sample_mode:1;
+ uint64_t reserved_14_14:1;
uint64_t clk_idle:1;
uint64_t preamble:1;
uint64_t sample:4;
@@ -65,23 +61,15 @@ union cvmx_smix_clk {
struct cvmx_smix_clk_cn30xx cn31xx;
struct cvmx_smix_clk_cn30xx cn38xx;
struct cvmx_smix_clk_cn30xx cn38xxp2;
- struct cvmx_smix_clk_cn50xx {
- uint64_t reserved_25_63:39;
- uint64_t mode:1;
- uint64_t reserved_21_23:3;
- uint64_t sample_hi:5;
- uint64_t reserved_14_15:2;
- uint64_t clk_idle:1;
- uint64_t preamble:1;
- uint64_t sample:4;
- uint64_t phase:8;
- } cn50xx;
+ struct cvmx_smix_clk_s cn50xx;
struct cvmx_smix_clk_s cn52xx;
- struct cvmx_smix_clk_cn50xx cn52xxp1;
+ struct cvmx_smix_clk_s cn52xxp1;
struct cvmx_smix_clk_s cn56xx;
- struct cvmx_smix_clk_cn50xx cn56xxp1;
+ struct cvmx_smix_clk_s cn56xxp1;
struct cvmx_smix_clk_cn30xx cn58xx;
struct cvmx_smix_clk_cn30xx cn58xxp1;
+ struct cvmx_smix_clk_s cn63xx;
+ struct cvmx_smix_clk_s cn63xxp1;
};
union cvmx_smix_cmd {
@@ -112,6 +100,8 @@ union cvmx_smix_cmd {
struct cvmx_smix_cmd_s cn56xxp1;
struct cvmx_smix_cmd_cn30xx cn58xx;
struct cvmx_smix_cmd_cn30xx cn58xxp1;
+ struct cvmx_smix_cmd_s cn63xx;
+ struct cvmx_smix_cmd_s cn63xxp1;
};
union cvmx_smix_en {
@@ -131,6 +121,8 @@ union cvmx_smix_en {
struct cvmx_smix_en_s cn56xxp1;
struct cvmx_smix_en_s cn58xx;
struct cvmx_smix_en_s cn58xxp1;
+ struct cvmx_smix_en_s cn63xx;
+ struct cvmx_smix_en_s cn63xxp1;
};
union cvmx_smix_rd_dat {
@@ -152,6 +144,8 @@ union cvmx_smix_rd_dat {
struct cvmx_smix_rd_dat_s cn56xxp1;
struct cvmx_smix_rd_dat_s cn58xx;
struct cvmx_smix_rd_dat_s cn58xxp1;
+ struct cvmx_smix_rd_dat_s cn63xx;
+ struct cvmx_smix_rd_dat_s cn63xxp1;
};
union cvmx_smix_wr_dat {
@@ -173,6 +167,8 @@ union cvmx_smix_wr_dat {
struct cvmx_smix_wr_dat_s cn56xxp1;
struct cvmx_smix_wr_dat_s cn58xx;
struct cvmx_smix_wr_dat_s cn58xxp1;
+ struct cvmx_smix_wr_dat_s cn63xx;
+ struct cvmx_smix_wr_dat_s cn63xxp1;
};
#endif
diff --git a/arch/mips/include/asm/octeon/cvmx-uctlx-defs.h b/arch/mips/include/asm/octeon/cvmx-uctlx-defs.h
new file mode 100644
index 000000000000..594f1b68cd62
--- /dev/null
+++ b/arch/mips/include/asm/octeon/cvmx-uctlx-defs.h
@@ -0,0 +1,261 @@
+/***********************license start***************
+ * Author: Cavium Networks
+ *
+ * Contact: support@caviumnetworks.com
+ * This file is part of the OCTEON SDK
+ *
+ * Copyright (c) 2003-2010 Cavium Networks
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, Version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+ * NONINFRINGEMENT. See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this file; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ * or visit http://www.gnu.org/licenses/.
+ *
+ * This file may also be available under a different license from Cavium.
+ * Contact Cavium Networks for more information
+ ***********************license end**************************************/
+
+#ifndef __CVMX_UCTLX_TYPEDEFS_H__
+#define __CVMX_UCTLX_TYPEDEFS_H__
+
+#define CVMX_UCTLX_BIST_STATUS(block_id) (CVMX_ADD_IO_SEG(0x000118006F0000A0ull))
+#define CVMX_UCTLX_CLK_RST_CTL(block_id) (CVMX_ADD_IO_SEG(0x000118006F000000ull))
+#define CVMX_UCTLX_EHCI_CTL(block_id) (CVMX_ADD_IO_SEG(0x000118006F000080ull))
+#define CVMX_UCTLX_EHCI_FLA(block_id) (CVMX_ADD_IO_SEG(0x000118006F0000A8ull))
+#define CVMX_UCTLX_ERTO_CTL(block_id) (CVMX_ADD_IO_SEG(0x000118006F000090ull))
+#define CVMX_UCTLX_IF_ENA(block_id) (CVMX_ADD_IO_SEG(0x000118006F000030ull))
+#define CVMX_UCTLX_INT_ENA(block_id) (CVMX_ADD_IO_SEG(0x000118006F000028ull))
+#define CVMX_UCTLX_INT_REG(block_id) (CVMX_ADD_IO_SEG(0x000118006F000020ull))
+#define CVMX_UCTLX_OHCI_CTL(block_id) (CVMX_ADD_IO_SEG(0x000118006F000088ull))
+#define CVMX_UCTLX_ORTO_CTL(block_id) (CVMX_ADD_IO_SEG(0x000118006F000098ull))
+#define CVMX_UCTLX_PPAF_WM(block_id) (CVMX_ADD_IO_SEG(0x000118006F000038ull))
+#define CVMX_UCTLX_UPHY_CTL_STATUS(block_id) (CVMX_ADD_IO_SEG(0x000118006F000008ull))
+#define CVMX_UCTLX_UPHY_PORTX_CTL_STATUS(offset, block_id) (CVMX_ADD_IO_SEG(0x000118006F000010ull) + (((offset) & 1) + ((block_id) & 0) * 0x0ull) * 8)
+
+union cvmx_uctlx_bist_status {
+ uint64_t u64;
+ struct cvmx_uctlx_bist_status_s {
+ uint64_t reserved_6_63:58;
+ uint64_t data_bis:1;
+ uint64_t desc_bis:1;
+ uint64_t erbm_bis:1;
+ uint64_t orbm_bis:1;
+ uint64_t wrbm_bis:1;
+ uint64_t ppaf_bis:1;
+ } s;
+ struct cvmx_uctlx_bist_status_s cn63xx;
+ struct cvmx_uctlx_bist_status_s cn63xxp1;
+};
+
+union cvmx_uctlx_clk_rst_ctl {
+ uint64_t u64;
+ struct cvmx_uctlx_clk_rst_ctl_s {
+ uint64_t reserved_25_63:39;
+ uint64_t clear_bist:1;
+ uint64_t start_bist:1;
+ uint64_t ehci_sm:1;
+ uint64_t ohci_clkcktrst:1;
+ uint64_t ohci_sm:1;
+ uint64_t ohci_susp_lgcy:1;
+ uint64_t app_start_clk:1;
+ uint64_t o_clkdiv_rst:1;
+ uint64_t h_clkdiv_byp:1;
+ uint64_t h_clkdiv_rst:1;
+ uint64_t h_clkdiv_en:1;
+ uint64_t o_clkdiv_en:1;
+ uint64_t h_div:4;
+ uint64_t p_refclk_sel:2;
+ uint64_t p_refclk_div:2;
+ uint64_t reserved_4_4:1;
+ uint64_t p_com_on:1;
+ uint64_t p_por:1;
+ uint64_t p_prst:1;
+ uint64_t hrst:1;
+ } s;
+ struct cvmx_uctlx_clk_rst_ctl_s cn63xx;
+ struct cvmx_uctlx_clk_rst_ctl_s cn63xxp1;
+};
+
+union cvmx_uctlx_ehci_ctl {
+ uint64_t u64;
+ struct cvmx_uctlx_ehci_ctl_s {
+ uint64_t reserved_20_63:44;
+ uint64_t desc_rbm:1;
+ uint64_t reg_nb:1;
+ uint64_t l2c_dc:1;
+ uint64_t l2c_bc:1;
+ uint64_t l2c_0pag:1;
+ uint64_t l2c_stt:1;
+ uint64_t l2c_buff_emod:2;
+ uint64_t l2c_desc_emod:2;
+ uint64_t inv_reg_a2:1;
+ uint64_t ehci_64b_addr_en:1;
+ uint64_t l2c_addr_msb:8;
+ } s;
+ struct cvmx_uctlx_ehci_ctl_s cn63xx;
+ struct cvmx_uctlx_ehci_ctl_s cn63xxp1;
+};
+
+union cvmx_uctlx_ehci_fla {
+ uint64_t u64;
+ struct cvmx_uctlx_ehci_fla_s {
+ uint64_t reserved_6_63:58;
+ uint64_t fla:6;
+ } s;
+ struct cvmx_uctlx_ehci_fla_s cn63xx;
+ struct cvmx_uctlx_ehci_fla_s cn63xxp1;
+};
+
+union cvmx_uctlx_erto_ctl {
+ uint64_t u64;
+ struct cvmx_uctlx_erto_ctl_s {
+ uint64_t reserved_32_63:32;
+ uint64_t to_val:27;
+ uint64_t reserved_0_4:5;
+ } s;
+ struct cvmx_uctlx_erto_ctl_s cn63xx;
+ struct cvmx_uctlx_erto_ctl_s cn63xxp1;
+};
+
+union cvmx_uctlx_if_ena {
+ uint64_t u64;
+ struct cvmx_uctlx_if_ena_s {
+ uint64_t reserved_1_63:63;
+ uint64_t en:1;
+ } s;
+ struct cvmx_uctlx_if_ena_s cn63xx;
+ struct cvmx_uctlx_if_ena_s cn63xxp1;
+};
+
+union cvmx_uctlx_int_ena {
+ uint64_t u64;
+ struct cvmx_uctlx_int_ena_s {
+ uint64_t reserved_8_63:56;
+ uint64_t ec_ovf_e:1;
+ uint64_t oc_ovf_e:1;
+ uint64_t wb_pop_e:1;
+ uint64_t wb_psh_f:1;
+ uint64_t cf_psh_f:1;
+ uint64_t or_psh_f:1;
+ uint64_t er_psh_f:1;
+ uint64_t pp_psh_f:1;
+ } s;
+ struct cvmx_uctlx_int_ena_s cn63xx;
+ struct cvmx_uctlx_int_ena_s cn63xxp1;
+};
+
+union cvmx_uctlx_int_reg {
+ uint64_t u64;
+ struct cvmx_uctlx_int_reg_s {
+ uint64_t reserved_8_63:56;
+ uint64_t ec_ovf_e:1;
+ uint64_t oc_ovf_e:1;
+ uint64_t wb_pop_e:1;
+ uint64_t wb_psh_f:1;
+ uint64_t cf_psh_f:1;
+ uint64_t or_psh_f:1;
+ uint64_t er_psh_f:1;
+ uint64_t pp_psh_f:1;
+ } s;
+ struct cvmx_uctlx_int_reg_s cn63xx;
+ struct cvmx_uctlx_int_reg_s cn63xxp1;
+};
+
+union cvmx_uctlx_ohci_ctl {
+ uint64_t u64;
+ struct cvmx_uctlx_ohci_ctl_s {
+ uint64_t reserved_19_63:45;
+ uint64_t reg_nb:1;
+ uint64_t l2c_dc:1;
+ uint64_t l2c_bc:1;
+ uint64_t l2c_0pag:1;
+ uint64_t l2c_stt:1;
+ uint64_t l2c_buff_emod:2;
+ uint64_t l2c_desc_emod:2;
+ uint64_t inv_reg_a2:1;
+ uint64_t reserved_8_8:1;
+ uint64_t l2c_addr_msb:8;
+ } s;
+ struct cvmx_uctlx_ohci_ctl_s cn63xx;
+ struct cvmx_uctlx_ohci_ctl_s cn63xxp1;
+};
+
+union cvmx_uctlx_orto_ctl {
+ uint64_t u64;
+ struct cvmx_uctlx_orto_ctl_s {
+ uint64_t reserved_32_63:32;
+ uint64_t to_val:24;
+ uint64_t reserved_0_7:8;
+ } s;
+ struct cvmx_uctlx_orto_ctl_s cn63xx;
+ struct cvmx_uctlx_orto_ctl_s cn63xxp1;
+};
+
+union cvmx_uctlx_ppaf_wm {
+ uint64_t u64;
+ struct cvmx_uctlx_ppaf_wm_s {
+ uint64_t reserved_5_63:59;
+ uint64_t wm:5;
+ } s;
+ struct cvmx_uctlx_ppaf_wm_s cn63xx;
+ struct cvmx_uctlx_ppaf_wm_s cn63xxp1;
+};
+
+union cvmx_uctlx_uphy_ctl_status {
+ uint64_t u64;
+ struct cvmx_uctlx_uphy_ctl_status_s {
+ uint64_t reserved_10_63:54;
+ uint64_t bist_done:1;
+ uint64_t bist_err:1;
+ uint64_t hsbist:1;
+ uint64_t fsbist:1;
+ uint64_t lsbist:1;
+ uint64_t siddq:1;
+ uint64_t vtest_en:1;
+ uint64_t uphy_bist:1;
+ uint64_t bist_en:1;
+ uint64_t ate_reset:1;
+ } s;
+ struct cvmx_uctlx_uphy_ctl_status_s cn63xx;
+ struct cvmx_uctlx_uphy_ctl_status_s cn63xxp1;
+};
+
+union cvmx_uctlx_uphy_portx_ctl_status {
+ uint64_t u64;
+ struct cvmx_uctlx_uphy_portx_ctl_status_s {
+ uint64_t reserved_43_63:21;
+ uint64_t tdata_out:4;
+ uint64_t txbiststuffenh:1;
+ uint64_t txbiststuffen:1;
+ uint64_t dmpulldown:1;
+ uint64_t dppulldown:1;
+ uint64_t vbusvldext:1;
+ uint64_t portreset:1;
+ uint64_t txhsvxtune:2;
+ uint64_t txvreftune:4;
+ uint64_t txrisetune:1;
+ uint64_t txpreemphasistune:1;
+ uint64_t txfslstune:4;
+ uint64_t sqrxtune:3;
+ uint64_t compdistune:3;
+ uint64_t loop_en:1;
+ uint64_t tclk:1;
+ uint64_t tdata_sel:1;
+ uint64_t taddr_in:4;
+ uint64_t tdata_in:8;
+ } s;
+ struct cvmx_uctlx_uphy_portx_ctl_status_s cn63xx;
+ struct cvmx_uctlx_uphy_portx_ctl_status_s cn63xxp1;
+};
+
+#endif
diff --git a/arch/mips/include/asm/octeon/octeon-model.h b/arch/mips/include/asm/octeon/octeon-model.h
index cf50336eca2e..700f88e31cad 100644
--- a/arch/mips/include/asm/octeon/octeon-model.h
+++ b/arch/mips/include/asm/octeon/octeon-model.h
@@ -35,14 +35,6 @@
#ifndef __OCTEON_MODEL_H__
#define __OCTEON_MODEL_H__
-/* NOTE: These must match what is checked in common-config.mk */
-/* Defines to represent the different versions of Octeon. */
-
-/*
- * IMPORTANT: When the default pass is updated for an Octeon Model,
- * the corresponding change must also be made in the oct-sim script.
- */
-
/*
* The defines below should be used with the OCTEON_IS_MODEL() macro
* to determine what model of chip the software is running on. Models
@@ -71,6 +63,21 @@
#define OM_IGNORE_MINOR_REVISION 0x08000000
#define OM_FLAG_MASK 0xff000000
+#define OM_MATCH_5XXX_FAMILY_MODELS 0x20000000 /* Match all cn5XXX Octeon models. */
+#define OM_MATCH_6XXX_FAMILY_MODELS 0x40000000 /* Match all cn6XXX Octeon models. */
+
+/*
+ * CN6XXX models with new revision encoding
+ */
+#define OCTEON_CN63XX_PASS1_0 0x000d9000
+#define OCTEON_CN63XX_PASS1_1 0x000d9001
+#define OCTEON_CN63XX_PASS1_2 0x000d9002
+#define OCTEON_CN63XX_PASS2_0 0x000d9008
+
+#define OCTEON_CN63XX (OCTEON_CN63XX_PASS2_0 | OM_IGNORE_REVISION)
+#define OCTEON_CN63XX_PASS1_X (OCTEON_CN63XX_PASS1_0 | OM_IGNORE_MINOR_REVISION)
+#define OCTEON_CN63XX_PASS2_X (OCTEON_CN63XX_PASS2_0 | OM_IGNORE_MINOR_REVISION)
+
/*
* CN5XXX models with new revision encoding
*/
@@ -189,6 +196,9 @@
| OM_MATCH_PREVIOUS_MODELS \
| OM_IGNORE_REVISION)
+#define OCTEON_CN5XXX (OCTEON_CN58XX_PASS1_0 | OM_MATCH_5XXX_FAMILY_MODELS)
+#define OCTEON_CN6XXX (OCTEON_CN63XX_PASS1_0 | OM_MATCH_6XXX_FAMILY_MODELS)
+
/* The revision byte (low byte) has two different encodings.
* CN3XXX:
*
@@ -222,6 +232,7 @@
| OCTEON_58XX_MODEL_MASK)
#define OCTEON_58XX_MODEL_MINOR_REV_MASK (OCTEON_58XX_MODEL_REV_MASK \
& 0x00fffff8)
+#define OCTEON_5XXX_MODEL_MASK 0x00ff0fc0
#define __OCTEON_MATCH_MASK__(x, y, z) (((x) & (z)) == ((y) & (z)))
@@ -273,6 +284,15 @@ static inline int __OCTEON_IS_MODEL_COMPILE__(uint32_t arg_model,
__OCTEON_MATCH_MASK__((chip_model), (arg_model),
OCTEON_58XX_MODEL_REV_MASK))
return 1;
+
+ if (((arg_model & OM_MATCH_5XXX_FAMILY_MODELS) == OM_MATCH_5XXX_FAMILY_MODELS) &&
+ ((chip_model) >= OCTEON_CN58XX_PASS1_0) && ((chip_model) < OCTEON_CN63XX_PASS1_0))
+ return 1;
+
+ if (((arg_model & OM_MATCH_6XXX_FAMILY_MODELS) == OM_MATCH_6XXX_FAMILY_MODELS) &&
+ ((chip_model) >= OCTEON_CN63XX_PASS1_0))
+ return 1;
+
if ((arg_model & OM_MATCH_PREVIOUS_MODELS) &&
((chip_model & OCTEON_58XX_MODEL_MASK) <
(arg_model & OCTEON_58XX_MODEL_MASK)))
diff --git a/arch/mips/include/asm/octeon/octeon.h b/arch/mips/include/asm/octeon/octeon.h
index 917a6c413b1a..6b34afd0d4e7 100644
--- a/arch/mips/include/asm/octeon/octeon.h
+++ b/arch/mips/include/asm/octeon/octeon.h
@@ -35,6 +35,7 @@ extern int octeon_is_simulation(void);
extern int octeon_is_pci_host(void);
extern int octeon_usb_is_ref_clk(void);
extern uint64_t octeon_get_clock_rate(void);
+extern u64 octeon_get_io_clock_rate(void);
extern const char *octeon_board_type_string(void);
extern const char *octeon_get_pci_interrupts(void);
extern int octeon_get_southbridge_interrupt(void);
diff --git a/arch/mips/include/asm/octeon/pci-octeon.h b/arch/mips/include/asm/octeon/pci-octeon.h
index ece78043acf6..fba2ba200f58 100644
--- a/arch/mips/include/asm/octeon/pci-octeon.h
+++ b/arch/mips/include/asm/octeon/pci-octeon.h
@@ -36,6 +36,16 @@ extern int (*octeon_pcibios_map_irq)(const struct pci_dev *dev,
u8 slot, u8 pin);
/*
+ * For PCI (not PCIe) the BAR2 base address.
+ */
+#define OCTEON_BAR2_PCI_ADDRESS 0x8000000000ull
+
+/*
+ * For PCI (not PCIe) the base of the memory mapped by BAR1
+ */
+extern u64 octeon_bar1_pci_phys;
+
+/*
* The following defines are used when octeon_dma_bar_type =
* OCTEON_DMA_BAR_TYPE_BIG
*/
diff --git a/arch/mips/include/asm/pci/bridge.h b/arch/mips/include/asm/pci/bridge.h
index 5f4b9d4e4114..f1f508e4f971 100644
--- a/arch/mips/include/asm/pci/bridge.h
+++ b/arch/mips/include/asm/pci/bridge.h
@@ -839,7 +839,7 @@ struct bridge_controller {
nasid_t nasid;
unsigned int widget_id;
unsigned int irq_cpu;
- dma64_addr_t baddr;
+ u64 baddr;
unsigned int pci_int[8];
};
diff --git a/arch/mips/include/asm/perf_event.h b/arch/mips/include/asm/perf_event.h
new file mode 100644
index 000000000000..e00007cf8162
--- /dev/null
+++ b/arch/mips/include/asm/perf_event.h
@@ -0,0 +1,25 @@
+/*
+ * linux/arch/mips/include/asm/perf_event.h
+ *
+ * Copyright (C) 2010 MIPS Technologies, Inc.
+ * Author: Deng-Cheng Zhu
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __MIPS_PERF_EVENT_H__
+#define __MIPS_PERF_EVENT_H__
+
+/*
+ * MIPS performance counters do not raise NMI upon overflow, a regular
+ * interrupt will be signaled. Hence we can do the pending perf event
+ * work at the tail of the irq handler.
+ */
+static inline void
+set_perf_event_pending(void)
+{
+}
+
+#endif /* __MIPS_PERF_EVENT_H__ */
diff --git a/arch/mips/include/asm/pgtable-32.h b/arch/mips/include/asm/pgtable-32.h
index ae90412556d0..8a153d2fa62a 100644
--- a/arch/mips/include/asm/pgtable-32.h
+++ b/arch/mips/include/asm/pgtable-32.h
@@ -154,10 +154,7 @@ pfn_pte(unsigned long pfn, pgprot_t prot)
#define pte_offset_map(dir, address) \
((pte_t *)page_address(pmd_page(*(dir))) + __pte_offset(address))
-#define pte_offset_map_nested(dir, address) \
- ((pte_t *)page_address(pmd_page(*(dir))) + __pte_offset(address))
#define pte_unmap(pte) ((void)(pte))
-#define pte_unmap_nested(pte) ((void)(pte))
#if defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX)
diff --git a/arch/mips/include/asm/pgtable-64.h b/arch/mips/include/asm/pgtable-64.h
index 1be4b0fa30da..55908fd56b1f 100644
--- a/arch/mips/include/asm/pgtable-64.h
+++ b/arch/mips/include/asm/pgtable-64.h
@@ -113,10 +113,10 @@
#endif
#define PTRS_PER_PTE ((PAGE_SIZE << PTE_ORDER) / sizeof(pte_t))
-#if PGDIR_SIZE >= TASK_SIZE
+#if PGDIR_SIZE >= TASK_SIZE64
#define USER_PTRS_PER_PGD (1)
#else
-#define USER_PTRS_PER_PGD (TASK_SIZE / PGDIR_SIZE)
+#define USER_PTRS_PER_PGD (TASK_SIZE64 / PGDIR_SIZE)
#endif
#define FIRST_USER_ADDRESS 0UL
@@ -257,10 +257,7 @@ static inline pmd_t *pmd_offset(pud_t * pud, unsigned long address)
((pte_t *) pmd_page_vaddr(*(dir)) + __pte_offset(address))
#define pte_offset_map(dir, address) \
((pte_t *)page_address(pmd_page(*(dir))) + __pte_offset(address))
-#define pte_offset_map_nested(dir, address) \
- ((pte_t *)page_address(pmd_page(*(dir))) + __pte_offset(address))
#define pte_unmap(pte) ((void)(pte))
-#define pte_unmap_nested(pte) ((void)(pte))
/*
* Initialize a new pgd / pmd table with invalid pointers.
diff --git a/arch/mips/include/asm/processor.h b/arch/mips/include/asm/processor.h
index 0d629bb93cbe..ead6928fa6b8 100644
--- a/arch/mips/include/asm/processor.h
+++ b/arch/mips/include/asm/processor.h
@@ -50,13 +50,10 @@ extern unsigned int vced_count, vcei_count;
* so don't change it unless you know what you are doing.
*/
#define TASK_SIZE 0x7fff8000UL
-#define STACK_TOP ((TASK_SIZE & PAGE_MASK) - SPECIAL_PAGES_SIZE)
-/*
- * This decides where the kernel will search for a free chunk of vm
- * space during mmap's.
- */
-#define TASK_UNMAPPED_BASE ((TASK_SIZE / 3) & ~(PAGE_SIZE))
+#ifdef __KERNEL__
+#define STACK_TOP_MAX TASK_SIZE
+#endif
#define TASK_IS_32BIT_ADDR 1
@@ -71,28 +68,29 @@ extern unsigned int vced_count, vcei_count;
* 8192EB ...
*/
#define TASK_SIZE32 0x7fff8000UL
-#define TASK_SIZE 0x10000000000UL
-#define STACK_TOP \
- (((test_thread_flag(TIF_32BIT_ADDR) ? \
- TASK_SIZE32 : TASK_SIZE) & PAGE_MASK) - SPECIAL_PAGES_SIZE)
+#define TASK_SIZE64 0x10000000000UL
+#define TASK_SIZE (test_thread_flag(TIF_32BIT_ADDR) ? TASK_SIZE32 : TASK_SIZE64)
+
+#ifdef __KERNEL__
+#define STACK_TOP_MAX TASK_SIZE64
+#endif
+
-/*
- * This decides where the kernel will search for a free chunk of vm
- * space during mmap's.
- */
-#define TASK_UNMAPPED_BASE \
- (test_thread_flag(TIF_32BIT_ADDR) ? \
- PAGE_ALIGN(TASK_SIZE32 / 3) : PAGE_ALIGN(TASK_SIZE / 3))
#define TASK_SIZE_OF(tsk) \
- (test_tsk_thread_flag(tsk, TIF_32BIT_ADDR) ? TASK_SIZE32 : TASK_SIZE)
+ (test_tsk_thread_flag(tsk, TIF_32BIT_ADDR) ? TASK_SIZE32 : TASK_SIZE64)
#define TASK_IS_32BIT_ADDR test_thread_flag(TIF_32BIT_ADDR)
#endif
-#ifdef __KERNEL__
-#define STACK_TOP_MAX TASK_SIZE
-#endif
+#define STACK_TOP ((TASK_SIZE & PAGE_MASK) - SPECIAL_PAGES_SIZE)
+
+/*
+ * This decides where the kernel will search for a free chunk of vm
+ * space during mmap's.
+ */
+#define TASK_UNMAPPED_BASE PAGE_ALIGN(TASK_SIZE / 3)
+
#define NUM_FPU_REGS 32
diff --git a/arch/mips/include/asm/prom.h b/arch/mips/include/asm/prom.h
new file mode 100644
index 000000000000..f29b862d9db3
--- /dev/null
+++ b/arch/mips/include/asm/prom.h
@@ -0,0 +1,31 @@
+/*
+ * arch/mips/include/asm/prom.h
+ *
+ * Copyright (C) 2010 Cisco Systems Inc. <dediao@cisco.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+#ifndef __ASM_MIPS_PROM_H
+#define __ASM_MIPS_PROM_H
+
+#ifdef CONFIG_OF
+#include <asm/bootinfo.h>
+
+/* which is compatible with the flattened device tree (FDT) */
+#define cmd_line arcs_cmdline
+
+extern int early_init_dt_scan_memory_arch(unsigned long node,
+ const char *uname, int depth, void *data);
+
+extern int reserve_mem_mach(unsigned long addr, unsigned long size);
+extern void free_mem_mach(unsigned long addr, unsigned long size);
+
+extern void device_tree_init(void);
+#else /* CONFIG_OF */
+static inline void device_tree_init(void) { }
+#endif /* CONFIG_OF */
+
+#endif /* _ASM_MIPS_PROM_H */
diff --git a/arch/mips/include/asm/system.h b/arch/mips/include/asm/system.h
index bb937ccfba1e..6018c80ce37a 100644
--- a/arch/mips/include/asm/system.h
+++ b/arch/mips/include/asm/system.h
@@ -115,21 +115,19 @@ static inline unsigned long __xchg_u32(volatile int * m, unsigned int val)
} else if (kernel_uses_llsc) {
unsigned long dummy;
- __asm__ __volatile__(
- " .set mips3 \n"
- "1: ll %0, %3 # xchg_u32 \n"
- " .set mips0 \n"
- " move %2, %z4 \n"
- " .set mips3 \n"
- " sc %2, %1 \n"
- " beqz %2, 2f \n"
- " .subsection 2 \n"
- "2: b 1b \n"
- " .previous \n"
- " .set mips0 \n"
- : "=&r" (retval), "=m" (*m), "=&r" (dummy)
- : "R" (*m), "Jr" (val)
- : "memory");
+ do {
+ __asm__ __volatile__(
+ " .set mips3 \n"
+ " ll %0, %3 # xchg_u32 \n"
+ " .set mips0 \n"
+ " move %2, %z4 \n"
+ " .set mips3 \n"
+ " sc %2, %1 \n"
+ " .set mips0 \n"
+ : "=&r" (retval), "=m" (*m), "=&r" (dummy)
+ : "R" (*m), "Jr" (val)
+ : "memory");
+ } while (unlikely(!dummy));
} else {
unsigned long flags;
@@ -167,19 +165,17 @@ static inline __u64 __xchg_u64(volatile __u64 * m, __u64 val)
} else if (kernel_uses_llsc) {
unsigned long dummy;
- __asm__ __volatile__(
- " .set mips3 \n"
- "1: lld %0, %3 # xchg_u64 \n"
- " move %2, %z4 \n"
- " scd %2, %1 \n"
- " beqz %2, 2f \n"
- " .subsection 2 \n"
- "2: b 1b \n"
- " .previous \n"
- " .set mips0 \n"
- : "=&r" (retval), "=m" (*m), "=&r" (dummy)
- : "R" (*m), "Jr" (val)
- : "memory");
+ do {
+ __asm__ __volatile__(
+ " .set mips3 \n"
+ " lld %0, %3 # xchg_u64 \n"
+ " move %2, %z4 \n"
+ " scd %2, %1 \n"
+ " .set mips0 \n"
+ : "=&r" (retval), "=m" (*m), "=&r" (dummy)
+ : "R" (*m), "Jr" (val)
+ : "memory");
+ } while (unlikely(!dummy));
} else {
unsigned long flags;
diff --git a/arch/mips/include/asm/thread_info.h b/arch/mips/include/asm/thread_info.h
index 70df9c0d3c5b..d309556cacf8 100644
--- a/arch/mips/include/asm/thread_info.h
+++ b/arch/mips/include/asm/thread_info.h
@@ -83,6 +83,8 @@ register struct thread_info *__current_thread_info __asm__("$28");
#define THREAD_SIZE (PAGE_SIZE << THREAD_SIZE_ORDER)
#define THREAD_MASK (THREAD_SIZE - 1UL)
+#define STACK_WARN (THREAD_SIZE / 8)
+
#define __HAVE_ARCH_THREAD_INFO_ALLOCATOR
#ifdef CONFIG_DEBUG_STACK_USAGE
diff --git a/arch/mips/include/asm/uaccess.h b/arch/mips/include/asm/uaccess.h
index c2d53c18fd36..653a412c036c 100644
--- a/arch/mips/include/asm/uaccess.h
+++ b/arch/mips/include/asm/uaccess.h
@@ -35,7 +35,9 @@
#ifdef CONFIG_64BIT
-#define __UA_LIMIT (- TASK_SIZE)
+extern u64 __ua_limit;
+
+#define __UA_LIMIT __ua_limit
#define __UA_ADDR ".dword"
#define __UA_LA "dla"
diff --git a/arch/mips/kernel/Makefile b/arch/mips/kernel/Makefile
index 06f848299785..22b2e0e38617 100644
--- a/arch/mips/kernel/Makefile
+++ b/arch/mips/kernel/Makefile
@@ -96,10 +96,14 @@ obj-$(CONFIG_KEXEC) += machine_kexec.o relocate_kernel.o
obj-$(CONFIG_EARLY_PRINTK) += early_printk.o
obj-$(CONFIG_SPINLOCK_TEST) += spinlock_test.o
+obj-$(CONFIG_OF) += prom.o
+
CFLAGS_cpu-bugs64.o = $(shell if $(CC) $(KBUILD_CFLAGS) -Wa,-mdaddi -c -o /dev/null -xc /dev/null >/dev/null 2>&1; then echo "-DHAVE_AS_SET_DADDI"; fi)
obj-$(CONFIG_HAVE_STD_PC_SERIAL_PORT) += 8250-platform.o
obj-$(CONFIG_MIPS_CPUFREQ) += cpufreq/
+obj-$(CONFIG_HW_PERF_EVENTS) += perf_event.o
+
CPPFLAGS_vmlinux.lds := $(KBUILD_CFLAGS)
diff --git a/arch/mips/kernel/cpu-probe.c b/arch/mips/kernel/cpu-probe.c
index b1b304ea2128..71620e19827a 100644
--- a/arch/mips/kernel/cpu-probe.c
+++ b/arch/mips/kernel/cpu-probe.c
@@ -25,6 +25,8 @@
#include <asm/system.h>
#include <asm/watch.h>
#include <asm/spram.h>
+#include <asm/uaccess.h>
+
/*
* Not all of the MIPS CPUs have the "wait" instruction available. Moreover,
* the implementation of the "wait" feature differs between CPU families. This
@@ -181,12 +183,13 @@ void __init check_wait(void)
case CPU_5KC:
case CPU_25KF:
case CPU_PR4450:
- case CPU_BCM3302:
- case CPU_BCM6338:
- case CPU_BCM6348:
- case CPU_BCM6358:
+ case CPU_BMIPS3300:
+ case CPU_BMIPS4350:
+ case CPU_BMIPS4380:
+ case CPU_BMIPS5000:
case CPU_CAVIUM_OCTEON:
case CPU_CAVIUM_OCTEON_PLUS:
+ case CPU_CAVIUM_OCTEON2:
case CPU_JZRISC:
cpu_wait = r4k_wait;
break;
@@ -902,33 +905,37 @@ static inline void cpu_probe_broadcom(struct cpuinfo_mips *c, unsigned int cpu)
{
decode_configs(c);
switch (c->processor_id & 0xff00) {
- case PRID_IMP_BCM3302:
- /* same as PRID_IMP_BCM6338 */
- c->cputype = CPU_BCM3302;
- __cpu_name[cpu] = "Broadcom BCM3302";
- break;
- case PRID_IMP_BCM4710:
- c->cputype = CPU_BCM4710;
- __cpu_name[cpu] = "Broadcom BCM4710";
- break;
- case PRID_IMP_BCM6345:
- c->cputype = CPU_BCM6345;
- __cpu_name[cpu] = "Broadcom BCM6345";
+ case PRID_IMP_BMIPS32:
+ c->cputype = CPU_BMIPS32;
+ __cpu_name[cpu] = "Broadcom BMIPS32";
+ break;
+ case PRID_IMP_BMIPS3300:
+ case PRID_IMP_BMIPS3300_ALT:
+ case PRID_IMP_BMIPS3300_BUG:
+ c->cputype = CPU_BMIPS3300;
+ __cpu_name[cpu] = "Broadcom BMIPS3300";
+ break;
+ case PRID_IMP_BMIPS43XX: {
+ int rev = c->processor_id & 0xff;
+
+ if (rev >= PRID_REV_BMIPS4380_LO &&
+ rev <= PRID_REV_BMIPS4380_HI) {
+ c->cputype = CPU_BMIPS4380;
+ __cpu_name[cpu] = "Broadcom BMIPS4380";
+ } else {
+ c->cputype = CPU_BMIPS4350;
+ __cpu_name[cpu] = "Broadcom BMIPS4350";
+ }
break;
- case PRID_IMP_BCM6348:
- c->cputype = CPU_BCM6348;
- __cpu_name[cpu] = "Broadcom BCM6348";
+ }
+ case PRID_IMP_BMIPS5000:
+ c->cputype = CPU_BMIPS5000;
+ __cpu_name[cpu] = "Broadcom BMIPS5000";
+ c->options |= MIPS_CPU_ULRI;
break;
- case PRID_IMP_BCM4350:
- switch (c->processor_id & 0xf0) {
- case PRID_REV_BCM6358:
- c->cputype = CPU_BCM6358;
- __cpu_name[cpu] = "Broadcom BCM6358";
- break;
- default:
- c->cputype = CPU_UNKNOWN;
- break;
- }
+ case PRID_IMP_BMIPS4KC:
+ c->cputype = CPU_4KC;
+ __cpu_name[cpu] = "MIPS 4Kc";
break;
}
}
@@ -953,6 +960,12 @@ platform:
if (cpu == 0)
__elf_platform = "octeon";
break;
+ case PRID_IMP_CAVIUM_CN63XX:
+ c->cputype = CPU_CAVIUM_OCTEON2;
+ __cpu_name[cpu] = "Cavium Octeon II";
+ if (cpu == 0)
+ __elf_platform = "octeon2";
+ break;
default:
printk(KERN_INFO "Unknown Octeon chip!\n");
c->cputype = CPU_UNKNOWN;
@@ -976,6 +989,12 @@ static inline void cpu_probe_ingenic(struct cpuinfo_mips *c, unsigned int cpu)
}
}
+#ifdef CONFIG_64BIT
+/* For use by uaccess.h */
+u64 __ua_limit;
+EXPORT_SYMBOL(__ua_limit);
+#endif
+
const char *__cpu_name[NR_CPUS];
const char *__elf_platform;
@@ -1053,6 +1072,11 @@ __cpuinit void cpu_probe(void)
c->srsets = 1;
cpu_probe_vmbits(c);
+
+#ifdef CONFIG_64BIT
+ if (cpu == 0)
+ __ua_limit = ~((1ull << cpu_vmbits) - 1);
+#endif
}
__cpuinit void cpu_report(void)
diff --git a/arch/mips/kernel/irq.c b/arch/mips/kernel/irq.c
index c6345f579a8a..4f93db58a79e 100644
--- a/arch/mips/kernel/irq.c
+++ b/arch/mips/kernel/irq.c
@@ -151,6 +151,29 @@ void __init init_IRQ(void)
#endif
}
+#ifdef DEBUG_STACKOVERFLOW
+static inline void check_stack_overflow(void)
+{
+ unsigned long sp;
+
+ __asm__ __volatile__("move %0, $sp" : "=r" (sp));
+ sp &= THREAD_MASK;
+
+ /*
+ * Check for stack overflow: is there less than STACK_WARN free?
+ * STACK_WARN is defined as 1/8 of THREAD_SIZE by default.
+ */
+ if (unlikely(sp < (sizeof(struct thread_info) + STACK_WARN))) {
+ printk("do_IRQ: stack overflow: %ld\n",
+ sp - sizeof(struct thread_info));
+ dump_stack();
+ }
+}
+#else
+static inline void check_stack_overflow(void) {}
+#endif
+
+
/*
* do_IRQ handles all normal device IRQ's (the special
* SMP cross-CPU interrupts have their own specific
@@ -159,6 +182,7 @@ void __init init_IRQ(void)
void __irq_entry do_IRQ(unsigned int irq)
{
irq_enter();
+ check_stack_overflow();
__DO_IRQ_SMTC_HOOK(irq);
generic_handle_irq(irq);
irq_exit();
diff --git a/arch/mips/kernel/mips-mt-fpaff.c b/arch/mips/kernel/mips-mt-fpaff.c
index 9a526ba6f257..802e6160f37e 100644
--- a/arch/mips/kernel/mips-mt-fpaff.c
+++ b/arch/mips/kernel/mips-mt-fpaff.c
@@ -103,7 +103,7 @@ asmlinkage long mipsmt_sys_sched_setaffinity(pid_t pid, unsigned int len,
if (!check_same_owner(p) && !capable(CAP_SYS_NICE))
goto out_unlock;
- retval = security_task_setscheduler(p)
+ retval = security_task_setscheduler(p);
if (retval)
goto out_unlock;
diff --git a/arch/mips/kernel/perf_event.c b/arch/mips/kernel/perf_event.c
new file mode 100644
index 000000000000..2b7f3f703b83
--- /dev/null
+++ b/arch/mips/kernel/perf_event.c
@@ -0,0 +1,601 @@
+/*
+ * Linux performance counter support for MIPS.
+ *
+ * Copyright (C) 2010 MIPS Technologies, Inc.
+ * Author: Deng-Cheng Zhu
+ *
+ * This code is based on the implementation for ARM, which is in turn
+ * based on the sparc64 perf event code and the x86 code. Performance
+ * counter access is based on the MIPS Oprofile code. And the callchain
+ * support references the code of MIPS stacktrace.c.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/cpumask.h>
+#include <linux/interrupt.h>
+#include <linux/smp.h>
+#include <linux/kernel.h>
+#include <linux/perf_event.h>
+#include <linux/uaccess.h>
+
+#include <asm/irq.h>
+#include <asm/irq_regs.h>
+#include <asm/stacktrace.h>
+#include <asm/time.h> /* For perf_irq */
+
+/* These are for 32bit counters. For 64bit ones, define them accordingly. */
+#define MAX_PERIOD ((1ULL << 32) - 1)
+#define VALID_COUNT 0x7fffffff
+#define TOTAL_BITS 32
+#define HIGHEST_BIT 31
+
+#define MIPS_MAX_HWEVENTS 4
+
+struct cpu_hw_events {
+ /* Array of events on this cpu. */
+ struct perf_event *events[MIPS_MAX_HWEVENTS];
+
+ /*
+ * Set the bit (indexed by the counter number) when the counter
+ * is used for an event.
+ */
+ unsigned long used_mask[BITS_TO_LONGS(MIPS_MAX_HWEVENTS)];
+
+ /*
+ * The borrowed MSB for the performance counter. A MIPS performance
+ * counter uses its bit 31 (for 32bit counters) or bit 63 (for 64bit
+ * counters) as a factor of determining whether a counter overflow
+ * should be signaled. So here we use a separate MSB for each
+ * counter to make things easy.
+ */
+ unsigned long msbs[BITS_TO_LONGS(MIPS_MAX_HWEVENTS)];
+
+ /*
+ * Software copy of the control register for each performance counter.
+ * MIPS CPUs vary in performance counters. They use this differently,
+ * and even may not use it.
+ */
+ unsigned int saved_ctrl[MIPS_MAX_HWEVENTS];
+};
+DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events) = {
+ .saved_ctrl = {0},
+};
+
+/* The description of MIPS performance events. */
+struct mips_perf_event {
+ unsigned int event_id;
+ /*
+ * MIPS performance counters are indexed starting from 0.
+ * CNTR_EVEN indicates the indexes of the counters to be used are
+ * even numbers.
+ */
+ unsigned int cntr_mask;
+ #define CNTR_EVEN 0x55555555
+ #define CNTR_ODD 0xaaaaaaaa
+#ifdef CONFIG_MIPS_MT_SMP
+ enum {
+ T = 0,
+ V = 1,
+ P = 2,
+ } range;
+#else
+ #define T
+ #define V
+ #define P
+#endif
+};
+
+static struct mips_perf_event raw_event;
+static DEFINE_MUTEX(raw_event_mutex);
+
+#define UNSUPPORTED_PERF_EVENT_ID 0xffffffff
+#define C(x) PERF_COUNT_HW_CACHE_##x
+
+struct mips_pmu {
+ const char *name;
+ int irq;
+ irqreturn_t (*handle_irq)(int irq, void *dev);
+ int (*handle_shared_irq)(void);
+ void (*start)(void);
+ void (*stop)(void);
+ int (*alloc_counter)(struct cpu_hw_events *cpuc,
+ struct hw_perf_event *hwc);
+ u64 (*read_counter)(unsigned int idx);
+ void (*write_counter)(unsigned int idx, u64 val);
+ void (*enable_event)(struct hw_perf_event *evt, int idx);
+ void (*disable_event)(int idx);
+ const struct mips_perf_event *(*map_raw_event)(u64 config);
+ const struct mips_perf_event (*general_event_map)[PERF_COUNT_HW_MAX];
+ const struct mips_perf_event (*cache_event_map)
+ [PERF_COUNT_HW_CACHE_MAX]
+ [PERF_COUNT_HW_CACHE_OP_MAX]
+ [PERF_COUNT_HW_CACHE_RESULT_MAX];
+ unsigned int num_counters;
+};
+
+static const struct mips_pmu *mipspmu;
+
+static int
+mipspmu_event_set_period(struct perf_event *event,
+ struct hw_perf_event *hwc,
+ int idx)
+{
+ struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
+ s64 left = local64_read(&hwc->period_left);
+ s64 period = hwc->sample_period;
+ int ret = 0;
+ u64 uleft;
+ unsigned long flags;
+
+ if (unlikely(left <= -period)) {
+ left = period;
+ local64_set(&hwc->period_left, left);
+ hwc->last_period = period;
+ ret = 1;
+ }
+
+ if (unlikely(left <= 0)) {
+ left += period;
+ local64_set(&hwc->period_left, left);
+ hwc->last_period = period;
+ ret = 1;
+ }
+
+ if (left > (s64)MAX_PERIOD)
+ left = MAX_PERIOD;
+
+ local64_set(&hwc->prev_count, (u64)-left);
+
+ local_irq_save(flags);
+ uleft = (u64)(-left) & MAX_PERIOD;
+ uleft > VALID_COUNT ?
+ set_bit(idx, cpuc->msbs) : clear_bit(idx, cpuc->msbs);
+ mipspmu->write_counter(idx, (u64)(-left) & VALID_COUNT);
+ local_irq_restore(flags);
+
+ perf_event_update_userpage(event);
+
+ return ret;
+}
+
+static int mipspmu_enable(struct perf_event *event)
+{
+ struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
+ struct hw_perf_event *hwc = &event->hw;
+ int idx;
+ int err = 0;
+
+ /* To look for a free counter for this event. */
+ idx = mipspmu->alloc_counter(cpuc, hwc);
+ if (idx < 0) {
+ err = idx;
+ goto out;
+ }
+
+ /*
+ * If there is an event in the counter we are going to use then
+ * make sure it is disabled.
+ */
+ event->hw.idx = idx;
+ mipspmu->disable_event(idx);
+ cpuc->events[idx] = event;
+
+ /* Set the period for the event. */
+ mipspmu_event_set_period(event, hwc, idx);
+
+ /* Enable the event. */
+ mipspmu->enable_event(hwc, idx);
+
+ /* Propagate our changes to the userspace mapping. */
+ perf_event_update_userpage(event);
+
+out:
+ return err;
+}
+
+static void mipspmu_event_update(struct perf_event *event,
+ struct hw_perf_event *hwc,
+ int idx)
+{
+ struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
+ unsigned long flags;
+ int shift = 64 - TOTAL_BITS;
+ s64 prev_raw_count, new_raw_count;
+ s64 delta;
+
+again:
+ prev_raw_count = local64_read(&hwc->prev_count);
+ local_irq_save(flags);
+ /* Make the counter value be a "real" one. */
+ new_raw_count = mipspmu->read_counter(idx);
+ if (new_raw_count & (test_bit(idx, cpuc->msbs) << HIGHEST_BIT)) {
+ new_raw_count &= VALID_COUNT;
+ clear_bit(idx, cpuc->msbs);
+ } else
+ new_raw_count |= (test_bit(idx, cpuc->msbs) << HIGHEST_BIT);
+ local_irq_restore(flags);
+
+ if (local64_cmpxchg(&hwc->prev_count, prev_raw_count,
+ new_raw_count) != prev_raw_count)
+ goto again;
+
+ delta = (new_raw_count << shift) - (prev_raw_count << shift);
+ delta >>= shift;
+
+ local64_add(delta, &event->count);
+ local64_sub(delta, &hwc->period_left);
+
+ return;
+}
+
+static void mipspmu_disable(struct perf_event *event)
+{
+ struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
+ struct hw_perf_event *hwc = &event->hw;
+ int idx = hwc->idx;
+
+
+ WARN_ON(idx < 0 || idx >= mipspmu->num_counters);
+
+ /* We are working on a local event. */
+ mipspmu->disable_event(idx);
+
+ barrier();
+
+ mipspmu_event_update(event, hwc, idx);
+ cpuc->events[idx] = NULL;
+ clear_bit(idx, cpuc->used_mask);
+
+ perf_event_update_userpage(event);
+}
+
+static void mipspmu_unthrottle(struct perf_event *event)
+{
+ struct hw_perf_event *hwc = &event->hw;
+
+ mipspmu->enable_event(hwc, hwc->idx);
+}
+
+static void mipspmu_read(struct perf_event *event)
+{
+ struct hw_perf_event *hwc = &event->hw;
+
+ /* Don't read disabled counters! */
+ if (hwc->idx < 0)
+ return;
+
+ mipspmu_event_update(event, hwc, hwc->idx);
+}
+
+static struct pmu pmu = {
+ .enable = mipspmu_enable,
+ .disable = mipspmu_disable,
+ .unthrottle = mipspmu_unthrottle,
+ .read = mipspmu_read,
+};
+
+static atomic_t active_events = ATOMIC_INIT(0);
+static DEFINE_MUTEX(pmu_reserve_mutex);
+static int (*save_perf_irq)(void);
+
+static int mipspmu_get_irq(void)
+{
+ int err;
+
+ if (mipspmu->irq >= 0) {
+ /* Request my own irq handler. */
+ err = request_irq(mipspmu->irq, mipspmu->handle_irq,
+ IRQF_DISABLED | IRQF_NOBALANCING,
+ "mips_perf_pmu", NULL);
+ if (err) {
+ pr_warning("Unable to request IRQ%d for MIPS "
+ "performance counters!\n", mipspmu->irq);
+ }
+ } else if (cp0_perfcount_irq < 0) {
+ /*
+ * We are sharing the irq number with the timer interrupt.
+ */
+ save_perf_irq = perf_irq;
+ perf_irq = mipspmu->handle_shared_irq;
+ err = 0;
+ } else {
+ pr_warning("The platform hasn't properly defined its "
+ "interrupt controller.\n");
+ err = -ENOENT;
+ }
+
+ return err;
+}
+
+static void mipspmu_free_irq(void)
+{
+ if (mipspmu->irq >= 0)
+ free_irq(mipspmu->irq, NULL);
+ else if (cp0_perfcount_irq < 0)
+ perf_irq = save_perf_irq;
+}
+
+static inline unsigned int
+mipspmu_perf_event_encode(const struct mips_perf_event *pev)
+{
+/*
+ * Top 8 bits for range, next 16 bits for cntr_mask, lowest 8 bits for
+ * event_id.
+ */
+#ifdef CONFIG_MIPS_MT_SMP
+ return ((unsigned int)pev->range << 24) |
+ (pev->cntr_mask & 0xffff00) |
+ (pev->event_id & 0xff);
+#else
+ return (pev->cntr_mask & 0xffff00) |
+ (pev->event_id & 0xff);
+#endif
+}
+
+static const struct mips_perf_event *
+mipspmu_map_general_event(int idx)
+{
+ const struct mips_perf_event *pev;
+
+ pev = ((*mipspmu->general_event_map)[idx].event_id ==
+ UNSUPPORTED_PERF_EVENT_ID ? ERR_PTR(-EOPNOTSUPP) :
+ &(*mipspmu->general_event_map)[idx]);
+
+ return pev;
+}
+
+static const struct mips_perf_event *
+mipspmu_map_cache_event(u64 config)
+{
+ unsigned int cache_type, cache_op, cache_result;
+ const struct mips_perf_event *pev;
+
+ cache_type = (config >> 0) & 0xff;
+ if (cache_type >= PERF_COUNT_HW_CACHE_MAX)
+ return ERR_PTR(-EINVAL);
+
+ cache_op = (config >> 8) & 0xff;
+ if (cache_op >= PERF_COUNT_HW_CACHE_OP_MAX)
+ return ERR_PTR(-EINVAL);
+
+ cache_result = (config >> 16) & 0xff;
+ if (cache_result >= PERF_COUNT_HW_CACHE_RESULT_MAX)
+ return ERR_PTR(-EINVAL);
+
+ pev = &((*mipspmu->cache_event_map)
+ [cache_type]
+ [cache_op]
+ [cache_result]);
+
+ if (pev->event_id == UNSUPPORTED_PERF_EVENT_ID)
+ return ERR_PTR(-EOPNOTSUPP);
+
+ return pev;
+
+}
+
+static int validate_event(struct cpu_hw_events *cpuc,
+ struct perf_event *event)
+{
+ struct hw_perf_event fake_hwc = event->hw;
+
+ if (event->pmu && event->pmu != &pmu)
+ return 0;
+
+ return mipspmu->alloc_counter(cpuc, &fake_hwc) >= 0;
+}
+
+static int validate_group(struct perf_event *event)
+{
+ struct perf_event *sibling, *leader = event->group_leader;
+ struct cpu_hw_events fake_cpuc;
+
+ memset(&fake_cpuc, 0, sizeof(fake_cpuc));
+
+ if (!validate_event(&fake_cpuc, leader))
+ return -ENOSPC;
+
+ list_for_each_entry(sibling, &leader->sibling_list, group_entry) {
+ if (!validate_event(&fake_cpuc, sibling))
+ return -ENOSPC;
+ }
+
+ if (!validate_event(&fake_cpuc, event))
+ return -ENOSPC;
+
+ return 0;
+}
+
+/*
+ * mipsxx/rm9000/loongson2 have different performance counters, they have
+ * specific low-level init routines.
+ */
+static void reset_counters(void *arg);
+static int __hw_perf_event_init(struct perf_event *event);
+
+static void hw_perf_event_destroy(struct perf_event *event)
+{
+ if (atomic_dec_and_mutex_lock(&active_events,
+ &pmu_reserve_mutex)) {
+ /*
+ * We must not call the destroy function with interrupts
+ * disabled.
+ */
+ on_each_cpu(reset_counters,
+ (void *)(long)mipspmu->num_counters, 1);
+ mipspmu_free_irq();
+ mutex_unlock(&pmu_reserve_mutex);
+ }
+}
+
+const struct pmu *hw_perf_event_init(struct perf_event *event)
+{
+ int err = 0;
+
+ if (!mipspmu || event->cpu >= nr_cpumask_bits ||
+ (event->cpu >= 0 && !cpu_online(event->cpu)))
+ return ERR_PTR(-ENODEV);
+
+ if (!atomic_inc_not_zero(&active_events)) {
+ if (atomic_read(&active_events) > MIPS_MAX_HWEVENTS) {
+ atomic_dec(&active_events);
+ return ERR_PTR(-ENOSPC);
+ }
+
+ mutex_lock(&pmu_reserve_mutex);
+ if (atomic_read(&active_events) == 0)
+ err = mipspmu_get_irq();
+
+ if (!err)
+ atomic_inc(&active_events);
+ mutex_unlock(&pmu_reserve_mutex);
+ }
+
+ if (err)
+ return ERR_PTR(err);
+
+ err = __hw_perf_event_init(event);
+ if (err)
+ hw_perf_event_destroy(event);
+
+ return err ? ERR_PTR(err) : &pmu;
+}
+
+void hw_perf_enable(void)
+{
+ if (mipspmu)
+ mipspmu->start();
+}
+
+void hw_perf_disable(void)
+{
+ if (mipspmu)
+ mipspmu->stop();
+}
+
+/* This is needed by specific irq handlers in perf_event_*.c */
+static void
+handle_associated_event(struct cpu_hw_events *cpuc,
+ int idx, struct perf_sample_data *data, struct pt_regs *regs)
+{
+ struct perf_event *event = cpuc->events[idx];
+ struct hw_perf_event *hwc = &event->hw;
+
+ mipspmu_event_update(event, hwc, idx);
+ data->period = event->hw.last_period;
+ if (!mipspmu_event_set_period(event, hwc, idx))
+ return;
+
+ if (perf_event_overflow(event, 0, data, regs))
+ mipspmu->disable_event(idx);
+}
+
+#include "perf_event_mipsxx.c"
+
+/* Callchain handling code. */
+static inline void
+callchain_store(struct perf_callchain_entry *entry,
+ u64 ip)
+{
+ if (entry->nr < PERF_MAX_STACK_DEPTH)
+ entry->ip[entry->nr++] = ip;
+}
+
+/*
+ * Leave userspace callchain empty for now. When we find a way to trace
+ * the user stack callchains, we add here.
+ */
+static void
+perf_callchain_user(struct pt_regs *regs,
+ struct perf_callchain_entry *entry)
+{
+}
+
+static void save_raw_perf_callchain(struct perf_callchain_entry *entry,
+ unsigned long reg29)
+{
+ unsigned long *sp = (unsigned long *)reg29;
+ unsigned long addr;
+
+ while (!kstack_end(sp)) {
+ addr = *sp++;
+ if (__kernel_text_address(addr)) {
+ callchain_store(entry, addr);
+ if (entry->nr >= PERF_MAX_STACK_DEPTH)
+ break;
+ }
+ }
+}
+
+static void
+perf_callchain_kernel(struct pt_regs *regs,
+ struct perf_callchain_entry *entry)
+{
+ unsigned long sp = regs->regs[29];
+#ifdef CONFIG_KALLSYMS
+ unsigned long ra = regs->regs[31];
+ unsigned long pc = regs->cp0_epc;
+
+ callchain_store(entry, PERF_CONTEXT_KERNEL);
+ if (raw_show_trace || !__kernel_text_address(pc)) {
+ unsigned long stack_page =
+ (unsigned long)task_stack_page(current);
+ if (stack_page && sp >= stack_page &&
+ sp <= stack_page + THREAD_SIZE - 32)
+ save_raw_perf_callchain(entry, sp);
+ return;
+ }
+ do {
+ callchain_store(entry, pc);
+ if (entry->nr >= PERF_MAX_STACK_DEPTH)
+ break;
+ pc = unwind_stack(current, &sp, pc, &ra);
+ } while (pc);
+#else
+ callchain_store(entry, PERF_CONTEXT_KERNEL);
+ save_raw_perf_callchain(entry, sp);
+#endif
+}
+
+static void
+perf_do_callchain(struct pt_regs *regs,
+ struct perf_callchain_entry *entry)
+{
+ int is_user;
+
+ if (!regs)
+ return;
+
+ is_user = user_mode(regs);
+
+ if (!current || !current->pid)
+ return;
+
+ if (is_user && current->state != TASK_RUNNING)
+ return;
+
+ if (!is_user) {
+ perf_callchain_kernel(regs, entry);
+ if (current->mm)
+ regs = task_pt_regs(current);
+ else
+ regs = NULL;
+ }
+ if (regs)
+ perf_callchain_user(regs, entry);
+}
+
+static DEFINE_PER_CPU(struct perf_callchain_entry, pmc_irq_entry);
+
+struct perf_callchain_entry *
+perf_callchain(struct pt_regs *regs)
+{
+ struct perf_callchain_entry *entry = &__get_cpu_var(pmc_irq_entry);
+
+ entry->nr = 0;
+ perf_do_callchain(regs, entry);
+ return entry;
+}
diff --git a/arch/mips/kernel/perf_event_mipsxx.c b/arch/mips/kernel/perf_event_mipsxx.c
new file mode 100644
index 000000000000..5c7c6fc07565
--- /dev/null
+++ b/arch/mips/kernel/perf_event_mipsxx.c
@@ -0,0 +1,1052 @@
+#if defined(CONFIG_CPU_MIPS32) || defined(CONFIG_CPU_MIPS64) || \
+ defined(CONFIG_CPU_R10000) || defined(CONFIG_CPU_SB1)
+
+#define M_CONFIG1_PC (1 << 4)
+
+#define M_PERFCTL_EXL (1UL << 0)
+#define M_PERFCTL_KERNEL (1UL << 1)
+#define M_PERFCTL_SUPERVISOR (1UL << 2)
+#define M_PERFCTL_USER (1UL << 3)
+#define M_PERFCTL_INTERRUPT_ENABLE (1UL << 4)
+#define M_PERFCTL_EVENT(event) (((event) & 0x3ff) << 5)
+#define M_PERFCTL_VPEID(vpe) ((vpe) << 16)
+#define M_PERFCTL_MT_EN(filter) ((filter) << 20)
+#define M_TC_EN_ALL M_PERFCTL_MT_EN(0)
+#define M_TC_EN_VPE M_PERFCTL_MT_EN(1)
+#define M_TC_EN_TC M_PERFCTL_MT_EN(2)
+#define M_PERFCTL_TCID(tcid) ((tcid) << 22)
+#define M_PERFCTL_WIDE (1UL << 30)
+#define M_PERFCTL_MORE (1UL << 31)
+
+#define M_PERFCTL_COUNT_EVENT_WHENEVER (M_PERFCTL_EXL | \
+ M_PERFCTL_KERNEL | \
+ M_PERFCTL_USER | \
+ M_PERFCTL_SUPERVISOR | \
+ M_PERFCTL_INTERRUPT_ENABLE)
+
+#ifdef CONFIG_MIPS_MT_SMP
+#define M_PERFCTL_CONFIG_MASK 0x3fff801f
+#else
+#define M_PERFCTL_CONFIG_MASK 0x1f
+#endif
+#define M_PERFCTL_EVENT_MASK 0xfe0
+
+#define M_COUNTER_OVERFLOW (1UL << 31)
+
+#ifdef CONFIG_MIPS_MT_SMP
+static int cpu_has_mipsmt_pertccounters;
+
+/*
+ * FIXME: For VSMP, vpe_id() is redefined for Perf-events, because
+ * cpu_data[cpuid].vpe_id reports 0 for _both_ CPUs.
+ */
+#if defined(CONFIG_HW_PERF_EVENTS)
+#define vpe_id() (cpu_has_mipsmt_pertccounters ? \
+ 0 : smp_processor_id())
+#else
+#define vpe_id() (cpu_has_mipsmt_pertccounters ? \
+ 0 : cpu_data[smp_processor_id()].vpe_id)
+#endif
+
+/* Copied from op_model_mipsxx.c */
+static inline unsigned int vpe_shift(void)
+{
+ if (num_possible_cpus() > 1)
+ return 1;
+
+ return 0;
+}
+#else /* !CONFIG_MIPS_MT_SMP */
+#define vpe_id() 0
+
+static inline unsigned int vpe_shift(void)
+{
+ return 0;
+}
+#endif /* CONFIG_MIPS_MT_SMP */
+
+static inline unsigned int
+counters_total_to_per_cpu(unsigned int counters)
+{
+ return counters >> vpe_shift();
+}
+
+static inline unsigned int
+counters_per_cpu_to_total(unsigned int counters)
+{
+ return counters << vpe_shift();
+}
+
+#define __define_perf_accessors(r, n, np) \
+ \
+static inline unsigned int r_c0_ ## r ## n(void) \
+{ \
+ unsigned int cpu = vpe_id(); \
+ \
+ switch (cpu) { \
+ case 0: \
+ return read_c0_ ## r ## n(); \
+ case 1: \
+ return read_c0_ ## r ## np(); \
+ default: \
+ BUG(); \
+ } \
+ return 0; \
+} \
+ \
+static inline void w_c0_ ## r ## n(unsigned int value) \
+{ \
+ unsigned int cpu = vpe_id(); \
+ \
+ switch (cpu) { \
+ case 0: \
+ write_c0_ ## r ## n(value); \
+ return; \
+ case 1: \
+ write_c0_ ## r ## np(value); \
+ return; \
+ default: \
+ BUG(); \
+ } \
+ return; \
+} \
+
+__define_perf_accessors(perfcntr, 0, 2)
+__define_perf_accessors(perfcntr, 1, 3)
+__define_perf_accessors(perfcntr, 2, 0)
+__define_perf_accessors(perfcntr, 3, 1)
+
+__define_perf_accessors(perfctrl, 0, 2)
+__define_perf_accessors(perfctrl, 1, 3)
+__define_perf_accessors(perfctrl, 2, 0)
+__define_perf_accessors(perfctrl, 3, 1)
+
+static inline int __n_counters(void)
+{
+ if (!(read_c0_config1() & M_CONFIG1_PC))
+ return 0;
+ if (!(read_c0_perfctrl0() & M_PERFCTL_MORE))
+ return 1;
+ if (!(read_c0_perfctrl1() & M_PERFCTL_MORE))
+ return 2;
+ if (!(read_c0_perfctrl2() & M_PERFCTL_MORE))
+ return 3;
+
+ return 4;
+}
+
+static inline int n_counters(void)
+{
+ int counters;
+
+ switch (current_cpu_type()) {
+ case CPU_R10000:
+ counters = 2;
+ break;
+
+ case CPU_R12000:
+ case CPU_R14000:
+ counters = 4;
+ break;
+
+ default:
+ counters = __n_counters();
+ }
+
+ return counters;
+}
+
+static void reset_counters(void *arg)
+{
+ int counters = (int)(long)arg;
+ switch (counters) {
+ case 4:
+ w_c0_perfctrl3(0);
+ w_c0_perfcntr3(0);
+ case 3:
+ w_c0_perfctrl2(0);
+ w_c0_perfcntr2(0);
+ case 2:
+ w_c0_perfctrl1(0);
+ w_c0_perfcntr1(0);
+ case 1:
+ w_c0_perfctrl0(0);
+ w_c0_perfcntr0(0);
+ }
+}
+
+static inline u64
+mipsxx_pmu_read_counter(unsigned int idx)
+{
+ switch (idx) {
+ case 0:
+ return r_c0_perfcntr0();
+ case 1:
+ return r_c0_perfcntr1();
+ case 2:
+ return r_c0_perfcntr2();
+ case 3:
+ return r_c0_perfcntr3();
+ default:
+ WARN_ONCE(1, "Invalid performance counter number (%d)\n", idx);
+ return 0;
+ }
+}
+
+static inline void
+mipsxx_pmu_write_counter(unsigned int idx, u64 val)
+{
+ switch (idx) {
+ case 0:
+ w_c0_perfcntr0(val);
+ return;
+ case 1:
+ w_c0_perfcntr1(val);
+ return;
+ case 2:
+ w_c0_perfcntr2(val);
+ return;
+ case 3:
+ w_c0_perfcntr3(val);
+ return;
+ }
+}
+
+static inline unsigned int
+mipsxx_pmu_read_control(unsigned int idx)
+{
+ switch (idx) {
+ case 0:
+ return r_c0_perfctrl0();
+ case 1:
+ return r_c0_perfctrl1();
+ case 2:
+ return r_c0_perfctrl2();
+ case 3:
+ return r_c0_perfctrl3();
+ default:
+ WARN_ONCE(1, "Invalid performance counter number (%d)\n", idx);
+ return 0;
+ }
+}
+
+static inline void
+mipsxx_pmu_write_control(unsigned int idx, unsigned int val)
+{
+ switch (idx) {
+ case 0:
+ w_c0_perfctrl0(val);
+ return;
+ case 1:
+ w_c0_perfctrl1(val);
+ return;
+ case 2:
+ w_c0_perfctrl2(val);
+ return;
+ case 3:
+ w_c0_perfctrl3(val);
+ return;
+ }
+}
+
+#ifdef CONFIG_MIPS_MT_SMP
+static DEFINE_RWLOCK(pmuint_rwlock);
+#endif
+
+/* 24K/34K/1004K cores can share the same event map. */
+static const struct mips_perf_event mipsxxcore_event_map
+ [PERF_COUNT_HW_MAX] = {
+ [PERF_COUNT_HW_CPU_CYCLES] = { 0x00, CNTR_EVEN | CNTR_ODD, P },
+ [PERF_COUNT_HW_INSTRUCTIONS] = { 0x01, CNTR_EVEN | CNTR_ODD, T },
+ [PERF_COUNT_HW_CACHE_REFERENCES] = { UNSUPPORTED_PERF_EVENT_ID },
+ [PERF_COUNT_HW_CACHE_MISSES] = { UNSUPPORTED_PERF_EVENT_ID },
+ [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = { 0x02, CNTR_EVEN, T },
+ [PERF_COUNT_HW_BRANCH_MISSES] = { 0x02, CNTR_ODD, T },
+ [PERF_COUNT_HW_BUS_CYCLES] = { UNSUPPORTED_PERF_EVENT_ID },
+};
+
+/* 74K core has different branch event code. */
+static const struct mips_perf_event mipsxx74Kcore_event_map
+ [PERF_COUNT_HW_MAX] = {
+ [PERF_COUNT_HW_CPU_CYCLES] = { 0x00, CNTR_EVEN | CNTR_ODD, P },
+ [PERF_COUNT_HW_INSTRUCTIONS] = { 0x01, CNTR_EVEN | CNTR_ODD, T },
+ [PERF_COUNT_HW_CACHE_REFERENCES] = { UNSUPPORTED_PERF_EVENT_ID },
+ [PERF_COUNT_HW_CACHE_MISSES] = { UNSUPPORTED_PERF_EVENT_ID },
+ [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = { 0x27, CNTR_EVEN, T },
+ [PERF_COUNT_HW_BRANCH_MISSES] = { 0x27, CNTR_ODD, T },
+ [PERF_COUNT_HW_BUS_CYCLES] = { UNSUPPORTED_PERF_EVENT_ID },
+};
+
+/* 24K/34K/1004K cores can share the same cache event map. */
+static const struct mips_perf_event mipsxxcore_cache_map
+ [PERF_COUNT_HW_CACHE_MAX]
+ [PERF_COUNT_HW_CACHE_OP_MAX]
+ [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
+[C(L1D)] = {
+ /*
+ * Like some other architectures (e.g. ARM), the performance
+ * counters don't differentiate between read and write
+ * accesses/misses, so this isn't strictly correct, but it's the
+ * best we can do. Writes and reads get combined.
+ */
+ [C(OP_READ)] = {
+ [C(RESULT_ACCESS)] = { 0x0a, CNTR_EVEN, T },
+ [C(RESULT_MISS)] = { 0x0b, CNTR_EVEN | CNTR_ODD, T },
+ },
+ [C(OP_WRITE)] = {
+ [C(RESULT_ACCESS)] = { 0x0a, CNTR_EVEN, T },
+ [C(RESULT_MISS)] = { 0x0b, CNTR_EVEN | CNTR_ODD, T },
+ },
+ [C(OP_PREFETCH)] = {
+ [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID },
+ [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID },
+ },
+},
+[C(L1I)] = {
+ [C(OP_READ)] = {
+ [C(RESULT_ACCESS)] = { 0x09, CNTR_EVEN, T },
+ [C(RESULT_MISS)] = { 0x09, CNTR_ODD, T },
+ },
+ [C(OP_WRITE)] = {
+ [C(RESULT_ACCESS)] = { 0x09, CNTR_EVEN, T },
+ [C(RESULT_MISS)] = { 0x09, CNTR_ODD, T },
+ },
+ [C(OP_PREFETCH)] = {
+ [C(RESULT_ACCESS)] = { 0x14, CNTR_EVEN, T },
+ /*
+ * Note that MIPS has only "hit" events countable for
+ * the prefetch operation.
+ */
+ [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID },
+ },
+},
+[C(LL)] = {
+ [C(OP_READ)] = {
+ [C(RESULT_ACCESS)] = { 0x15, CNTR_ODD, P },
+ [C(RESULT_MISS)] = { 0x16, CNTR_EVEN, P },
+ },
+ [C(OP_WRITE)] = {
+ [C(RESULT_ACCESS)] = { 0x15, CNTR_ODD, P },
+ [C(RESULT_MISS)] = { 0x16, CNTR_EVEN, P },
+ },
+ [C(OP_PREFETCH)] = {
+ [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID },
+ [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID },
+ },
+},
+[C(DTLB)] = {
+ [C(OP_READ)] = {
+ [C(RESULT_ACCESS)] = { 0x06, CNTR_EVEN, T },
+ [C(RESULT_MISS)] = { 0x06, CNTR_ODD, T },
+ },
+ [C(OP_WRITE)] = {
+ [C(RESULT_ACCESS)] = { 0x06, CNTR_EVEN, T },
+ [C(RESULT_MISS)] = { 0x06, CNTR_ODD, T },
+ },
+ [C(OP_PREFETCH)] = {
+ [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID },
+ [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID },
+ },
+},
+[C(ITLB)] = {
+ [C(OP_READ)] = {
+ [C(RESULT_ACCESS)] = { 0x05, CNTR_EVEN, T },
+ [C(RESULT_MISS)] = { 0x05, CNTR_ODD, T },
+ },
+ [C(OP_WRITE)] = {
+ [C(RESULT_ACCESS)] = { 0x05, CNTR_EVEN, T },
+ [C(RESULT_MISS)] = { 0x05, CNTR_ODD, T },
+ },
+ [C(OP_PREFETCH)] = {
+ [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID },
+ [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID },
+ },
+},
+[C(BPU)] = {
+ /* Using the same code for *HW_BRANCH* */
+ [C(OP_READ)] = {
+ [C(RESULT_ACCESS)] = { 0x02, CNTR_EVEN, T },
+ [C(RESULT_MISS)] = { 0x02, CNTR_ODD, T },
+ },
+ [C(OP_WRITE)] = {
+ [C(RESULT_ACCESS)] = { 0x02, CNTR_EVEN, T },
+ [C(RESULT_MISS)] = { 0x02, CNTR_ODD, T },
+ },
+ [C(OP_PREFETCH)] = {
+ [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID },
+ [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID },
+ },
+},
+};
+
+/* 74K core has completely different cache event map. */
+static const struct mips_perf_event mipsxx74Kcore_cache_map
+ [PERF_COUNT_HW_CACHE_MAX]
+ [PERF_COUNT_HW_CACHE_OP_MAX]
+ [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
+[C(L1D)] = {
+ /*
+ * Like some other architectures (e.g. ARM), the performance
+ * counters don't differentiate between read and write
+ * accesses/misses, so this isn't strictly correct, but it's the
+ * best we can do. Writes and reads get combined.
+ */
+ [C(OP_READ)] = {
+ [C(RESULT_ACCESS)] = { 0x17, CNTR_ODD, T },
+ [C(RESULT_MISS)] = { 0x18, CNTR_ODD, T },
+ },
+ [C(OP_WRITE)] = {
+ [C(RESULT_ACCESS)] = { 0x17, CNTR_ODD, T },
+ [C(RESULT_MISS)] = { 0x18, CNTR_ODD, T },
+ },
+ [C(OP_PREFETCH)] = {
+ [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID },
+ [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID },
+ },
+},
+[C(L1I)] = {
+ [C(OP_READ)] = {
+ [C(RESULT_ACCESS)] = { 0x06, CNTR_EVEN, T },
+ [C(RESULT_MISS)] = { 0x06, CNTR_ODD, T },
+ },
+ [C(OP_WRITE)] = {
+ [C(RESULT_ACCESS)] = { 0x06, CNTR_EVEN, T },
+ [C(RESULT_MISS)] = { 0x06, CNTR_ODD, T },
+ },
+ [C(OP_PREFETCH)] = {
+ [C(RESULT_ACCESS)] = { 0x34, CNTR_EVEN, T },
+ /*
+ * Note that MIPS has only "hit" events countable for
+ * the prefetch operation.
+ */
+ [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID },
+ },
+},
+[C(LL)] = {
+ [C(OP_READ)] = {
+ [C(RESULT_ACCESS)] = { 0x1c, CNTR_ODD, P },
+ [C(RESULT_MISS)] = { 0x1d, CNTR_EVEN | CNTR_ODD, P },
+ },
+ [C(OP_WRITE)] = {
+ [C(RESULT_ACCESS)] = { 0x1c, CNTR_ODD, P },
+ [C(RESULT_MISS)] = { 0x1d, CNTR_EVEN | CNTR_ODD, P },
+ },
+ [C(OP_PREFETCH)] = {
+ [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID },
+ [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID },
+ },
+},
+[C(DTLB)] = {
+ /* 74K core does not have specific DTLB events. */
+ [C(OP_READ)] = {
+ [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID },
+ [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID },
+ },
+ [C(OP_WRITE)] = {
+ [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID },
+ [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID },
+ },
+ [C(OP_PREFETCH)] = {
+ [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID },
+ [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID },
+ },
+},
+[C(ITLB)] = {
+ [C(OP_READ)] = {
+ [C(RESULT_ACCESS)] = { 0x04, CNTR_EVEN, T },
+ [C(RESULT_MISS)] = { 0x04, CNTR_ODD, T },
+ },
+ [C(OP_WRITE)] = {
+ [C(RESULT_ACCESS)] = { 0x04, CNTR_EVEN, T },
+ [C(RESULT_MISS)] = { 0x04, CNTR_ODD, T },
+ },
+ [C(OP_PREFETCH)] = {
+ [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID },
+ [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID },
+ },
+},
+[C(BPU)] = {
+ /* Using the same code for *HW_BRANCH* */
+ [C(OP_READ)] = {
+ [C(RESULT_ACCESS)] = { 0x27, CNTR_EVEN, T },
+ [C(RESULT_MISS)] = { 0x27, CNTR_ODD, T },
+ },
+ [C(OP_WRITE)] = {
+ [C(RESULT_ACCESS)] = { 0x27, CNTR_EVEN, T },
+ [C(RESULT_MISS)] = { 0x27, CNTR_ODD, T },
+ },
+ [C(OP_PREFETCH)] = {
+ [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID },
+ [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID },
+ },
+},
+};
+
+#ifdef CONFIG_MIPS_MT_SMP
+static void
+check_and_calc_range(struct perf_event *event,
+ const struct mips_perf_event *pev)
+{
+ struct hw_perf_event *hwc = &event->hw;
+
+ if (event->cpu >= 0) {
+ if (pev->range > V) {
+ /*
+ * The user selected an event that is processor
+ * wide, while expecting it to be VPE wide.
+ */
+ hwc->config_base |= M_TC_EN_ALL;
+ } else {
+ /*
+ * FIXME: cpu_data[event->cpu].vpe_id reports 0
+ * for both CPUs.
+ */
+ hwc->config_base |= M_PERFCTL_VPEID(event->cpu);
+ hwc->config_base |= M_TC_EN_VPE;
+ }
+ } else
+ hwc->config_base |= M_TC_EN_ALL;
+}
+#else
+static void
+check_and_calc_range(struct perf_event *event,
+ const struct mips_perf_event *pev)
+{
+}
+#endif
+
+static int __hw_perf_event_init(struct perf_event *event)
+{
+ struct perf_event_attr *attr = &event->attr;
+ struct hw_perf_event *hwc = &event->hw;
+ const struct mips_perf_event *pev;
+ int err;
+
+ /* Returning MIPS event descriptor for generic perf event. */
+ if (PERF_TYPE_HARDWARE == event->attr.type) {
+ if (event->attr.config >= PERF_COUNT_HW_MAX)
+ return -EINVAL;
+ pev = mipspmu_map_general_event(event->attr.config);
+ } else if (PERF_TYPE_HW_CACHE == event->attr.type) {
+ pev = mipspmu_map_cache_event(event->attr.config);
+ } else if (PERF_TYPE_RAW == event->attr.type) {
+ /* We are working on the global raw event. */
+ mutex_lock(&raw_event_mutex);
+ pev = mipspmu->map_raw_event(event->attr.config);
+ } else {
+ /* The event type is not (yet) supported. */
+ return -EOPNOTSUPP;
+ }
+
+ if (IS_ERR(pev)) {
+ if (PERF_TYPE_RAW == event->attr.type)
+ mutex_unlock(&raw_event_mutex);
+ return PTR_ERR(pev);
+ }
+
+ /*
+ * We allow max flexibility on how each individual counter shared
+ * by the single CPU operates (the mode exclusion and the range).
+ */
+ hwc->config_base = M_PERFCTL_INTERRUPT_ENABLE;
+
+ /* Calculate range bits and validate it. */
+ if (num_possible_cpus() > 1)
+ check_and_calc_range(event, pev);
+
+ hwc->event_base = mipspmu_perf_event_encode(pev);
+ if (PERF_TYPE_RAW == event->attr.type)
+ mutex_unlock(&raw_event_mutex);
+
+ if (!attr->exclude_user)
+ hwc->config_base |= M_PERFCTL_USER;
+ if (!attr->exclude_kernel) {
+ hwc->config_base |= M_PERFCTL_KERNEL;
+ /* MIPS kernel mode: KSU == 00b || EXL == 1 || ERL == 1 */
+ hwc->config_base |= M_PERFCTL_EXL;
+ }
+ if (!attr->exclude_hv)
+ hwc->config_base |= M_PERFCTL_SUPERVISOR;
+
+ hwc->config_base &= M_PERFCTL_CONFIG_MASK;
+ /*
+ * The event can belong to another cpu. We do not assign a local
+ * counter for it for now.
+ */
+ hwc->idx = -1;
+ hwc->config = 0;
+
+ if (!hwc->sample_period) {
+ hwc->sample_period = MAX_PERIOD;
+ hwc->last_period = hwc->sample_period;
+ local64_set(&hwc->period_left, hwc->sample_period);
+ }
+
+ err = 0;
+ if (event->group_leader != event) {
+ err = validate_group(event);
+ if (err)
+ return -EINVAL;
+ }
+
+ event->destroy = hw_perf_event_destroy;
+
+ return err;
+}
+
+static void pause_local_counters(void)
+{
+ struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
+ int counters = mipspmu->num_counters;
+ unsigned long flags;
+
+ local_irq_save(flags);
+ switch (counters) {
+ case 4:
+ cpuc->saved_ctrl[3] = r_c0_perfctrl3();
+ w_c0_perfctrl3(cpuc->saved_ctrl[3] &
+ ~M_PERFCTL_COUNT_EVENT_WHENEVER);
+ case 3:
+ cpuc->saved_ctrl[2] = r_c0_perfctrl2();
+ w_c0_perfctrl2(cpuc->saved_ctrl[2] &
+ ~M_PERFCTL_COUNT_EVENT_WHENEVER);
+ case 2:
+ cpuc->saved_ctrl[1] = r_c0_perfctrl1();
+ w_c0_perfctrl1(cpuc->saved_ctrl[1] &
+ ~M_PERFCTL_COUNT_EVENT_WHENEVER);
+ case 1:
+ cpuc->saved_ctrl[0] = r_c0_perfctrl0();
+ w_c0_perfctrl0(cpuc->saved_ctrl[0] &
+ ~M_PERFCTL_COUNT_EVENT_WHENEVER);
+ }
+ local_irq_restore(flags);
+}
+
+static void resume_local_counters(void)
+{
+ struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
+ int counters = mipspmu->num_counters;
+ unsigned long flags;
+
+ local_irq_save(flags);
+ switch (counters) {
+ case 4:
+ w_c0_perfctrl3(cpuc->saved_ctrl[3]);
+ case 3:
+ w_c0_perfctrl2(cpuc->saved_ctrl[2]);
+ case 2:
+ w_c0_perfctrl1(cpuc->saved_ctrl[1]);
+ case 1:
+ w_c0_perfctrl0(cpuc->saved_ctrl[0]);
+ }
+ local_irq_restore(flags);
+}
+
+static int mipsxx_pmu_handle_shared_irq(void)
+{
+ struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
+ struct perf_sample_data data;
+ unsigned int counters = mipspmu->num_counters;
+ unsigned int counter;
+ int handled = IRQ_NONE;
+ struct pt_regs *regs;
+
+ if (cpu_has_mips_r2 && !(read_c0_cause() & (1 << 26)))
+ return handled;
+
+ /*
+ * First we pause the local counters, so that when we are locked
+ * here, the counters are all paused. When it gets locked due to
+ * perf_disable(), the timer interrupt handler will be delayed.
+ *
+ * See also mipsxx_pmu_start().
+ */
+ pause_local_counters();
+#ifdef CONFIG_MIPS_MT_SMP
+ read_lock(&pmuint_rwlock);
+#endif
+
+ regs = get_irq_regs();
+
+ perf_sample_data_init(&data, 0);
+
+ switch (counters) {
+#define HANDLE_COUNTER(n) \
+ case n + 1: \
+ if (test_bit(n, cpuc->used_mask)) { \
+ counter = r_c0_perfcntr ## n(); \
+ if (counter & M_COUNTER_OVERFLOW) { \
+ w_c0_perfcntr ## n(counter & \
+ VALID_COUNT); \
+ if (test_and_change_bit(n, cpuc->msbs)) \
+ handle_associated_event(cpuc, \
+ n, &data, regs); \
+ handled = IRQ_HANDLED; \
+ } \
+ }
+ HANDLE_COUNTER(3)
+ HANDLE_COUNTER(2)
+ HANDLE_COUNTER(1)
+ HANDLE_COUNTER(0)
+ }
+
+ /*
+ * Do all the work for the pending perf events. We can do this
+ * in here because the performance counter interrupt is a regular
+ * interrupt, not NMI.
+ */
+ if (handled == IRQ_HANDLED)
+ perf_event_do_pending();
+
+#ifdef CONFIG_MIPS_MT_SMP
+ read_unlock(&pmuint_rwlock);
+#endif
+ resume_local_counters();
+ return handled;
+}
+
+static irqreturn_t
+mipsxx_pmu_handle_irq(int irq, void *dev)
+{
+ return mipsxx_pmu_handle_shared_irq();
+}
+
+static void mipsxx_pmu_start(void)
+{
+#ifdef CONFIG_MIPS_MT_SMP
+ write_unlock(&pmuint_rwlock);
+#endif
+ resume_local_counters();
+}
+
+/*
+ * MIPS performance counters can be per-TC. The control registers can
+ * not be directly accessed accross CPUs. Hence if we want to do global
+ * control, we need cross CPU calls. on_each_cpu() can help us, but we
+ * can not make sure this function is called with interrupts enabled. So
+ * here we pause local counters and then grab a rwlock and leave the
+ * counters on other CPUs alone. If any counter interrupt raises while
+ * we own the write lock, simply pause local counters on that CPU and
+ * spin in the handler. Also we know we won't be switched to another
+ * CPU after pausing local counters and before grabbing the lock.
+ */
+static void mipsxx_pmu_stop(void)
+{
+ pause_local_counters();
+#ifdef CONFIG_MIPS_MT_SMP
+ write_lock(&pmuint_rwlock);
+#endif
+}
+
+static int
+mipsxx_pmu_alloc_counter(struct cpu_hw_events *cpuc,
+ struct hw_perf_event *hwc)
+{
+ int i;
+
+ /*
+ * We only need to care the counter mask. The range has been
+ * checked definitely.
+ */
+ unsigned long cntr_mask = (hwc->event_base >> 8) & 0xffff;
+
+ for (i = mipspmu->num_counters - 1; i >= 0; i--) {
+ /*
+ * Note that some MIPS perf events can be counted by both
+ * even and odd counters, wheresas many other are only by
+ * even _or_ odd counters. This introduces an issue that
+ * when the former kind of event takes the counter the
+ * latter kind of event wants to use, then the "counter
+ * allocation" for the latter event will fail. In fact if
+ * they can be dynamically swapped, they both feel happy.
+ * But here we leave this issue alone for now.
+ */
+ if (test_bit(i, &cntr_mask) &&
+ !test_and_set_bit(i, cpuc->used_mask))
+ return i;
+ }
+
+ return -EAGAIN;
+}
+
+static void
+mipsxx_pmu_enable_event(struct hw_perf_event *evt, int idx)
+{
+ struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
+ unsigned long flags;
+
+ WARN_ON(idx < 0 || idx >= mipspmu->num_counters);
+
+ local_irq_save(flags);
+ cpuc->saved_ctrl[idx] = M_PERFCTL_EVENT(evt->event_base & 0xff) |
+ (evt->config_base & M_PERFCTL_CONFIG_MASK) |
+ /* Make sure interrupt enabled. */
+ M_PERFCTL_INTERRUPT_ENABLE;
+ /*
+ * We do not actually let the counter run. Leave it until start().
+ */
+ local_irq_restore(flags);
+}
+
+static void
+mipsxx_pmu_disable_event(int idx)
+{
+ struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
+ unsigned long flags;
+
+ WARN_ON(idx < 0 || idx >= mipspmu->num_counters);
+
+ local_irq_save(flags);
+ cpuc->saved_ctrl[idx] = mipsxx_pmu_read_control(idx) &
+ ~M_PERFCTL_COUNT_EVENT_WHENEVER;
+ mipsxx_pmu_write_control(idx, cpuc->saved_ctrl[idx]);
+ local_irq_restore(flags);
+}
+
+/* 24K */
+#define IS_UNSUPPORTED_24K_EVENT(r, b) \
+ ((b) == 12 || (r) == 151 || (r) == 152 || (b) == 26 || \
+ (b) == 27 || (r) == 28 || (r) == 158 || (b) == 31 || \
+ (b) == 32 || (b) == 34 || (b) == 36 || (r) == 168 || \
+ (r) == 172 || (b) == 47 || ((b) >= 56 && (b) <= 63) || \
+ ((b) >= 68 && (b) <= 127))
+#define IS_BOTH_COUNTERS_24K_EVENT(b) \
+ ((b) == 0 || (b) == 1 || (b) == 11)
+
+/* 34K */
+#define IS_UNSUPPORTED_34K_EVENT(r, b) \
+ ((b) == 12 || (r) == 27 || (r) == 158 || (b) == 36 || \
+ (b) == 38 || (r) == 175 || ((b) >= 56 && (b) <= 63) || \
+ ((b) >= 68 && (b) <= 127))
+#define IS_BOTH_COUNTERS_34K_EVENT(b) \
+ ((b) == 0 || (b) == 1 || (b) == 11)
+#ifdef CONFIG_MIPS_MT_SMP
+#define IS_RANGE_P_34K_EVENT(r, b) \
+ ((b) == 0 || (r) == 18 || (b) == 21 || (b) == 22 || \
+ (b) == 25 || (b) == 39 || (r) == 44 || (r) == 174 || \
+ (r) == 176 || ((b) >= 50 && (b) <= 55) || \
+ ((b) >= 64 && (b) <= 67))
+#define IS_RANGE_V_34K_EVENT(r) ((r) == 47)
+#endif
+
+/* 74K */
+#define IS_UNSUPPORTED_74K_EVENT(r, b) \
+ ((r) == 5 || ((r) >= 135 && (r) <= 137) || \
+ ((b) >= 10 && (b) <= 12) || (b) == 22 || (b) == 27 || \
+ (b) == 33 || (b) == 34 || ((b) >= 47 && (b) <= 49) || \
+ (r) == 178 || (b) == 55 || (b) == 57 || (b) == 60 || \
+ (b) == 61 || (r) == 62 || (r) == 191 || \
+ ((b) >= 64 && (b) <= 127))
+#define IS_BOTH_COUNTERS_74K_EVENT(b) \
+ ((b) == 0 || (b) == 1)
+
+/* 1004K */
+#define IS_UNSUPPORTED_1004K_EVENT(r, b) \
+ ((b) == 12 || (r) == 27 || (r) == 158 || (b) == 38 || \
+ (r) == 175 || (b) == 63 || ((b) >= 68 && (b) <= 127))
+#define IS_BOTH_COUNTERS_1004K_EVENT(b) \
+ ((b) == 0 || (b) == 1 || (b) == 11)
+#ifdef CONFIG_MIPS_MT_SMP
+#define IS_RANGE_P_1004K_EVENT(r, b) \
+ ((b) == 0 || (r) == 18 || (b) == 21 || (b) == 22 || \
+ (b) == 25 || (b) == 36 || (b) == 39 || (r) == 44 || \
+ (r) == 174 || (r) == 176 || ((b) >= 50 && (b) <= 59) || \
+ (r) == 188 || (b) == 61 || (b) == 62 || \
+ ((b) >= 64 && (b) <= 67))
+#define IS_RANGE_V_1004K_EVENT(r) ((r) == 47)
+#endif
+
+/*
+ * User can use 0-255 raw events, where 0-127 for the events of even
+ * counters, and 128-255 for odd counters. Note that bit 7 is used to
+ * indicate the parity. So, for example, when user wants to take the
+ * Event Num of 15 for odd counters (by referring to the user manual),
+ * then 128 needs to be added to 15 as the input for the event config,
+ * i.e., 143 (0x8F) to be used.
+ */
+static const struct mips_perf_event *
+mipsxx_pmu_map_raw_event(u64 config)
+{
+ unsigned int raw_id = config & 0xff;
+ unsigned int base_id = raw_id & 0x7f;
+
+ switch (current_cpu_type()) {
+ case CPU_24K:
+ if (IS_UNSUPPORTED_24K_EVENT(raw_id, base_id))
+ return ERR_PTR(-EOPNOTSUPP);
+ raw_event.event_id = base_id;
+ if (IS_BOTH_COUNTERS_24K_EVENT(base_id))
+ raw_event.cntr_mask = CNTR_EVEN | CNTR_ODD;
+ else
+ raw_event.cntr_mask =
+ raw_id > 127 ? CNTR_ODD : CNTR_EVEN;
+#ifdef CONFIG_MIPS_MT_SMP
+ /*
+ * This is actually doing nothing. Non-multithreading
+ * CPUs will not check and calculate the range.
+ */
+ raw_event.range = P;
+#endif
+ break;
+ case CPU_34K:
+ if (IS_UNSUPPORTED_34K_EVENT(raw_id, base_id))
+ return ERR_PTR(-EOPNOTSUPP);
+ raw_event.event_id = base_id;
+ if (IS_BOTH_COUNTERS_34K_EVENT(base_id))
+ raw_event.cntr_mask = CNTR_EVEN | CNTR_ODD;
+ else
+ raw_event.cntr_mask =
+ raw_id > 127 ? CNTR_ODD : CNTR_EVEN;
+#ifdef CONFIG_MIPS_MT_SMP
+ if (IS_RANGE_P_34K_EVENT(raw_id, base_id))
+ raw_event.range = P;
+ else if (unlikely(IS_RANGE_V_34K_EVENT(raw_id)))
+ raw_event.range = V;
+ else
+ raw_event.range = T;
+#endif
+ break;
+ case CPU_74K:
+ if (IS_UNSUPPORTED_74K_EVENT(raw_id, base_id))
+ return ERR_PTR(-EOPNOTSUPP);
+ raw_event.event_id = base_id;
+ if (IS_BOTH_COUNTERS_74K_EVENT(base_id))
+ raw_event.cntr_mask = CNTR_EVEN | CNTR_ODD;
+ else
+ raw_event.cntr_mask =
+ raw_id > 127 ? CNTR_ODD : CNTR_EVEN;
+#ifdef CONFIG_MIPS_MT_SMP
+ raw_event.range = P;
+#endif
+ break;
+ case CPU_1004K:
+ if (IS_UNSUPPORTED_1004K_EVENT(raw_id, base_id))
+ return ERR_PTR(-EOPNOTSUPP);
+ raw_event.event_id = base_id;
+ if (IS_BOTH_COUNTERS_1004K_EVENT(base_id))
+ raw_event.cntr_mask = CNTR_EVEN | CNTR_ODD;
+ else
+ raw_event.cntr_mask =
+ raw_id > 127 ? CNTR_ODD : CNTR_EVEN;
+#ifdef CONFIG_MIPS_MT_SMP
+ if (IS_RANGE_P_1004K_EVENT(raw_id, base_id))
+ raw_event.range = P;
+ else if (unlikely(IS_RANGE_V_1004K_EVENT(raw_id)))
+ raw_event.range = V;
+ else
+ raw_event.range = T;
+#endif
+ break;
+ }
+
+ return &raw_event;
+}
+
+static struct mips_pmu mipsxxcore_pmu = {
+ .handle_irq = mipsxx_pmu_handle_irq,
+ .handle_shared_irq = mipsxx_pmu_handle_shared_irq,
+ .start = mipsxx_pmu_start,
+ .stop = mipsxx_pmu_stop,
+ .alloc_counter = mipsxx_pmu_alloc_counter,
+ .read_counter = mipsxx_pmu_read_counter,
+ .write_counter = mipsxx_pmu_write_counter,
+ .enable_event = mipsxx_pmu_enable_event,
+ .disable_event = mipsxx_pmu_disable_event,
+ .map_raw_event = mipsxx_pmu_map_raw_event,
+ .general_event_map = &mipsxxcore_event_map,
+ .cache_event_map = &mipsxxcore_cache_map,
+};
+
+static struct mips_pmu mipsxx74Kcore_pmu = {
+ .handle_irq = mipsxx_pmu_handle_irq,
+ .handle_shared_irq = mipsxx_pmu_handle_shared_irq,
+ .start = mipsxx_pmu_start,
+ .stop = mipsxx_pmu_stop,
+ .alloc_counter = mipsxx_pmu_alloc_counter,
+ .read_counter = mipsxx_pmu_read_counter,
+ .write_counter = mipsxx_pmu_write_counter,
+ .enable_event = mipsxx_pmu_enable_event,
+ .disable_event = mipsxx_pmu_disable_event,
+ .map_raw_event = mipsxx_pmu_map_raw_event,
+ .general_event_map = &mipsxx74Kcore_event_map,
+ .cache_event_map = &mipsxx74Kcore_cache_map,
+};
+
+static int __init
+init_hw_perf_events(void)
+{
+ int counters, irq;
+
+ pr_info("Performance counters: ");
+
+ counters = n_counters();
+ if (counters == 0) {
+ pr_cont("No available PMU.\n");
+ return -ENODEV;
+ }
+
+#ifdef CONFIG_MIPS_MT_SMP
+ cpu_has_mipsmt_pertccounters = read_c0_config7() & (1<<19);
+ if (!cpu_has_mipsmt_pertccounters)
+ counters = counters_total_to_per_cpu(counters);
+#endif
+
+#ifdef MSC01E_INT_BASE
+ if (cpu_has_veic) {
+ /*
+ * Using platform specific interrupt controller defines.
+ */
+ irq = MSC01E_INT_BASE + MSC01E_INT_PERFCTR;
+ } else {
+#endif
+ if (cp0_perfcount_irq >= 0)
+ irq = MIPS_CPU_IRQ_BASE + cp0_perfcount_irq;
+ else
+ irq = -1;
+#ifdef MSC01E_INT_BASE
+ }
+#endif
+
+ on_each_cpu(reset_counters, (void *)(long)counters, 1);
+
+ switch (current_cpu_type()) {
+ case CPU_24K:
+ mipsxxcore_pmu.name = "mips/24K";
+ mipsxxcore_pmu.num_counters = counters;
+ mipsxxcore_pmu.irq = irq;
+ mipspmu = &mipsxxcore_pmu;
+ break;
+ case CPU_34K:
+ mipsxxcore_pmu.name = "mips/34K";
+ mipsxxcore_pmu.num_counters = counters;
+ mipsxxcore_pmu.irq = irq;
+ mipspmu = &mipsxxcore_pmu;
+ break;
+ case CPU_74K:
+ mipsxx74Kcore_pmu.name = "mips/74K";
+ mipsxx74Kcore_pmu.num_counters = counters;
+ mipsxx74Kcore_pmu.irq = irq;
+ mipspmu = &mipsxx74Kcore_pmu;
+ break;
+ case CPU_1004K:
+ mipsxxcore_pmu.name = "mips/1004K";
+ mipsxxcore_pmu.num_counters = counters;
+ mipsxxcore_pmu.irq = irq;
+ mipspmu = &mipsxxcore_pmu;
+ break;
+ default:
+ pr_cont("Either hardware does not support performance "
+ "counters, or not yet implemented.\n");
+ return -ENODEV;
+ }
+
+ if (mipspmu)
+ pr_cont("%s PMU enabled, %d counters available to each "
+ "CPU, irq %d%s\n", mipspmu->name, counters, irq,
+ irq < 0 ? " (share with timer interrupt)" : "");
+
+ return 0;
+}
+arch_initcall(init_hw_perf_events);
+
+#endif /* defined(CONFIG_CPU_MIPS32)... */
diff --git a/arch/mips/kernel/prom.c b/arch/mips/kernel/prom.c
new file mode 100644
index 000000000000..e000b278f024
--- /dev/null
+++ b/arch/mips/kernel/prom.c
@@ -0,0 +1,112 @@
+/*
+ * MIPS support for CONFIG_OF device tree support
+ *
+ * Copyright (C) 2010 Cisco Systems Inc. <dediao@cisco.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/errno.h>
+#include <linux/types.h>
+#include <linux/bootmem.h>
+#include <linux/initrd.h>
+#include <linux/debugfs.h>
+#include <linux/of.h>
+#include <linux/of_fdt.h>
+#include <linux/of_irq.h>
+#include <linux/of_platform.h>
+
+#include <asm/page.h>
+#include <asm/prom.h>
+
+int __init early_init_dt_scan_memory_arch(unsigned long node,
+ const char *uname, int depth,
+ void *data)
+{
+ return early_init_dt_scan_memory(node, uname, depth, data);
+}
+
+void __init early_init_dt_add_memory_arch(u64 base, u64 size)
+{
+ return add_memory_region(base, size, BOOT_MEM_RAM);
+}
+
+int __init reserve_mem_mach(unsigned long addr, unsigned long size)
+{
+ return reserve_bootmem(addr, size, BOOTMEM_DEFAULT);
+}
+
+void __init free_mem_mach(unsigned long addr, unsigned long size)
+{
+ return free_bootmem(addr, size);
+}
+
+u64 __init early_init_dt_alloc_memory_arch(u64 size, u64 align)
+{
+ return virt_to_phys(
+ __alloc_bootmem(size, align, __pa(MAX_DMA_ADDRESS))
+ );
+}
+
+#ifdef CONFIG_BLK_DEV_INITRD
+void __init early_init_dt_setup_initrd_arch(unsigned long start,
+ unsigned long end)
+{
+ initrd_start = (unsigned long)__va(start);
+ initrd_end = (unsigned long)__va(end);
+ initrd_below_start_ok = 1;
+}
+#endif
+
+/*
+ * irq_create_of_mapping - Hook to resolve OF irq specifier into a Linux irq#
+ *
+ * Currently the mapping mechanism is trivial; simple flat hwirq numbers are
+ * mapped 1:1 onto Linux irq numbers. Cascaded irq controllers are not
+ * supported.
+ */
+unsigned int irq_create_of_mapping(struct device_node *controller,
+ const u32 *intspec, unsigned int intsize)
+{
+ return intspec[0];
+}
+EXPORT_SYMBOL_GPL(irq_create_of_mapping);
+
+void __init early_init_devtree(void *params)
+{
+ /* Setup flat device-tree pointer */
+ initial_boot_params = params;
+
+ /* Retrieve various informations from the /chosen node of the
+ * device-tree, including the platform type, initrd location and
+ * size, and more ...
+ */
+ of_scan_flat_dt(early_init_dt_scan_chosen, NULL);
+
+ /* Scan memory nodes */
+ of_scan_flat_dt(early_init_dt_scan_root, NULL);
+ of_scan_flat_dt(early_init_dt_scan_memory_arch, NULL);
+}
+
+void __init device_tree_init(void)
+{
+ unsigned long base, size;
+
+ if (!initial_boot_params)
+ return;
+
+ base = virt_to_phys((void *)initial_boot_params);
+ size = initial_boot_params->totalsize;
+
+ /* Before we do anything, lets reserve the dt blob */
+ reserve_mem_mach(base, size);
+
+ unflatten_device_tree();
+
+ /* free the space reserved for the dt blob */
+ free_mem_mach(base, size);
+}
diff --git a/arch/mips/kernel/ptrace.c b/arch/mips/kernel/ptrace.c
index c8777333e198..d21c388c0116 100644
--- a/arch/mips/kernel/ptrace.c
+++ b/arch/mips/kernel/ptrace.c
@@ -255,9 +255,13 @@ int ptrace_set_watch_regs(struct task_struct *child,
return 0;
}
-long arch_ptrace(struct task_struct *child, long request, long addr, long data)
+long arch_ptrace(struct task_struct *child, long request,
+ unsigned long addr, unsigned long data)
{
int ret;
+ void __user *addrp = (void __user *) addr;
+ void __user *datavp = (void __user *) data;
+ unsigned long __user *datalp = (void __user *) data;
switch (request) {
/* when I and D space are separate, these will need to be fixed. */
@@ -386,7 +390,7 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
ret = -EIO;
goto out;
}
- ret = put_user(tmp, (unsigned long __user *) data);
+ ret = put_user(tmp, datalp);
break;
}
@@ -478,34 +482,31 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
}
case PTRACE_GETREGS:
- ret = ptrace_getregs(child, (__s64 __user *) data);
+ ret = ptrace_getregs(child, datavp);
break;
case PTRACE_SETREGS:
- ret = ptrace_setregs(child, (__s64 __user *) data);
+ ret = ptrace_setregs(child, datavp);
break;
case PTRACE_GETFPREGS:
- ret = ptrace_getfpregs(child, (__u32 __user *) data);
+ ret = ptrace_getfpregs(child, datavp);
break;
case PTRACE_SETFPREGS:
- ret = ptrace_setfpregs(child, (__u32 __user *) data);
+ ret = ptrace_setfpregs(child, datavp);
break;
case PTRACE_GET_THREAD_AREA:
- ret = put_user(task_thread_info(child)->tp_value,
- (unsigned long __user *) data);
+ ret = put_user(task_thread_info(child)->tp_value, datalp);
break;
case PTRACE_GET_WATCH_REGS:
- ret = ptrace_get_watch_regs(child,
- (struct pt_watch_regs __user *) addr);
+ ret = ptrace_get_watch_regs(child, addrp);
break;
case PTRACE_SET_WATCH_REGS:
- ret = ptrace_set_watch_regs(child,
- (struct pt_watch_regs __user *) addr);
+ ret = ptrace_set_watch_regs(child, addrp);
break;
default:
diff --git a/arch/mips/kernel/setup.c b/arch/mips/kernel/setup.c
index 85aef3fc6716..acd3f2c49c06 100644
--- a/arch/mips/kernel/setup.c
+++ b/arch/mips/kernel/setup.c
@@ -31,6 +31,7 @@
#include <asm/setup.h>
#include <asm/smp-ops.h>
#include <asm/system.h>
+#include <asm/prom.h>
struct cpuinfo_mips cpu_data[NR_CPUS] __read_mostly;
@@ -487,7 +488,9 @@ static void __init arch_mem_init(char **cmdline_p)
}
bootmem_init();
+ device_tree_init();
sparse_init();
+ plat_swiotlb_setup();
paging_init();
}
diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c
index d053bf4759e4..8e9fbe75894e 100644
--- a/arch/mips/kernel/traps.c
+++ b/arch/mips/kernel/traps.c
@@ -29,6 +29,7 @@
#include <linux/notifier.h>
#include <linux/kdb.h>
#include <linux/irq.h>
+#include <linux/perf_event.h>
#include <asm/bootinfo.h>
#include <asm/branch.h>
@@ -576,10 +577,16 @@ static inline int simulate_sc(struct pt_regs *regs, unsigned int opcode)
*/
static int simulate_llsc(struct pt_regs *regs, unsigned int opcode)
{
- if ((opcode & OPCODE) == LL)
+ if ((opcode & OPCODE) == LL) {
+ perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS,
+ 1, 0, regs, 0);
return simulate_ll(regs, opcode);
- if ((opcode & OPCODE) == SC)
+ }
+ if ((opcode & OPCODE) == SC) {
+ perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS,
+ 1, 0, regs, 0);
return simulate_sc(regs, opcode);
+ }
return -1; /* Must be something else ... */
}
@@ -595,6 +602,8 @@ static int simulate_rdhwr(struct pt_regs *regs, unsigned int opcode)
if ((opcode & OPCODE) == SPEC3 && (opcode & FUNC) == RDHWR) {
int rd = (opcode & RD) >> 11;
int rt = (opcode & RT) >> 16;
+ perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS,
+ 1, 0, regs, 0);
switch (rd) {
case 0: /* CPU number */
regs->regs[rt] = smp_processor_id();
@@ -630,8 +639,11 @@ static int simulate_rdhwr(struct pt_regs *regs, unsigned int opcode)
static int simulate_sync(struct pt_regs *regs, unsigned int opcode)
{
- if ((opcode & OPCODE) == SPEC0 && (opcode & FUNC) == SYNC)
+ if ((opcode & OPCODE) == SPEC0 && (opcode & FUNC) == SYNC) {
+ perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS,
+ 1, 0, regs, 0);
return 0;
+ }
return -1; /* Must be something else ... */
}
@@ -1469,6 +1481,7 @@ void __cpuinit per_cpu_trap_init(void)
{
unsigned int cpu = smp_processor_id();
unsigned int status_set = ST0_CU0;
+ unsigned int hwrena = cpu_hwrena_impl_bits;
#ifdef CONFIG_MIPS_MT_SMTC
int secondaryTC = 0;
int bootTC = (cpu == 0);
@@ -1501,14 +1514,14 @@ void __cpuinit per_cpu_trap_init(void)
change_c0_status(ST0_CU|ST0_MX|ST0_RE|ST0_FR|ST0_BEV|ST0_TS|ST0_KX|ST0_SX|ST0_UX,
status_set);
- if (cpu_has_mips_r2) {
- unsigned int enable = 0x0000000f | cpu_hwrena_impl_bits;
+ if (cpu_has_mips_r2)
+ hwrena |= 0x0000000f;
- if (!noulri && cpu_has_userlocal)
- enable |= (1 << 29);
+ if (!noulri && cpu_has_userlocal)
+ hwrena |= (1 << 29);
- write_c0_hwrena(enable);
- }
+ if (hwrena)
+ write_c0_hwrena(hwrena);
#ifdef CONFIG_MIPS_MT_SMTC
if (!secondaryTC) {
diff --git a/arch/mips/kernel/unaligned.c b/arch/mips/kernel/unaligned.c
index 33d5a5ce4a29..cfea1adfa153 100644
--- a/arch/mips/kernel/unaligned.c
+++ b/arch/mips/kernel/unaligned.c
@@ -78,6 +78,8 @@
#include <linux/smp.h>
#include <linux/sched.h>
#include <linux/debugfs.h>
+#include <linux/perf_event.h>
+
#include <asm/asm.h>
#include <asm/branch.h>
#include <asm/byteorder.h>
@@ -109,6 +111,9 @@ static void emulate_load_store_insn(struct pt_regs *regs,
unsigned long value;
unsigned int res;
+ perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS,
+ 1, 0, regs, 0);
+
/*
* This load never faults.
*/
@@ -511,6 +516,8 @@ asmlinkage void do_ade(struct pt_regs *regs)
unsigned int __user *pc;
mm_segment_t seg;
+ perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS,
+ 1, 0, regs, regs->cp0_badvaddr);
/*
* Did we catch a fault trying to load an instruction?
* Or are we running in MIPS16 mode?
diff --git a/arch/mips/loongson/Kconfig b/arch/mips/loongson/Kconfig
index c97ca69b94e0..6e1b77fec7ea 100644
--- a/arch/mips/loongson/Kconfig
+++ b/arch/mips/loongson/Kconfig
@@ -20,7 +20,6 @@ config LEMOTE_FULOONG2E
select SYS_SUPPORTS_LITTLE_ENDIAN
select SYS_SUPPORTS_HIGHMEM
select SYS_HAS_EARLY_PRINTK
- select GENERIC_HARDIRQS_NO__DO_IRQ
select GENERIC_ISA_DMA_SUPPORT_BROKEN
select CPU_HAS_WB
select LOONGSON_MC146818
@@ -40,7 +39,6 @@ config LEMOTE_MACH2F
select CS5536
select CSRC_R4K if ! MIPS_EXTERNAL_TIMER
select DMA_NONCOHERENT
- select GENERIC_HARDIRQS_NO__DO_IRQ
select GENERIC_ISA_DMA_SUPPORT_BROKEN
select HW_HAS_PCI
select I8259
diff --git a/arch/mips/math-emu/cp1emu.c b/arch/mips/math-emu/cp1emu.c
index ec3faa413f3b..b2ad1b0910ff 100644
--- a/arch/mips/math-emu/cp1emu.c
+++ b/arch/mips/math-emu/cp1emu.c
@@ -36,6 +36,7 @@
#include <linux/sched.h>
#include <linux/module.h>
#include <linux/debugfs.h>
+#include <linux/perf_event.h>
#include <asm/inst.h>
#include <asm/bootinfo.h>
@@ -258,6 +259,8 @@ static int cop1Emulate(struct pt_regs *xcp, struct mips_fpu_struct *ctx)
}
emul:
+ perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS,
+ 1, 0, xcp, 0);
MIPS_FPU_EMU_INC_STATS(emulated);
switch (MIPSInst_OPCODE(ir)) {
case ldc1_op:{
diff --git a/arch/mips/mm/c-octeon.c b/arch/mips/mm/c-octeon.c
index 0f9c488044d1..16c4d256b76f 100644
--- a/arch/mips/mm/c-octeon.c
+++ b/arch/mips/mm/c-octeon.c
@@ -181,10 +181,10 @@ static void __cpuinit probe_octeon(void)
unsigned int config1;
struct cpuinfo_mips *c = &current_cpu_data;
+ config1 = read_c0_config1();
switch (c->cputype) {
case CPU_CAVIUM_OCTEON:
case CPU_CAVIUM_OCTEON_PLUS:
- config1 = read_c0_config1();
c->icache.linesz = 2 << ((config1 >> 19) & 7);
c->icache.sets = 64 << ((config1 >> 22) & 7);
c->icache.ways = 1 + ((config1 >> 16) & 7);
@@ -204,6 +204,20 @@ static void __cpuinit probe_octeon(void)
c->options |= MIPS_CPU_PREFETCH;
break;
+ case CPU_CAVIUM_OCTEON2:
+ c->icache.linesz = 2 << ((config1 >> 19) & 7);
+ c->icache.sets = 8;
+ c->icache.ways = 37;
+ c->icache.flags |= MIPS_CACHE_VTAG;
+ icache_size = c->icache.sets * c->icache.ways * c->icache.linesz;
+
+ c->dcache.linesz = 128;
+ c->dcache.ways = 32;
+ c->dcache.sets = 8;
+ dcache_size = c->dcache.sets * c->dcache.ways * c->dcache.linesz;
+ c->options |= MIPS_CPU_PREFETCH;
+ break;
+
default:
panic("Unsupported Cavium Networks CPU type\n");
break;
diff --git a/arch/mips/mm/c-r4k.c b/arch/mips/mm/c-r4k.c
index 6721ee2b1e8b..b4923a75cb4b 100644
--- a/arch/mips/mm/c-r4k.c
+++ b/arch/mips/mm/c-r4k.c
@@ -42,14 +42,14 @@
* o collapses to normal function call on UP kernels
* o collapses to normal function call on systems with a single shared
* primary cache.
+ * o doesn't disable interrupts on the local CPU
*/
-static inline void r4k_on_each_cpu(void (*func) (void *info), void *info,
- int wait)
+static inline void r4k_on_each_cpu(void (*func) (void *info), void *info)
{
preempt_disable();
#if !defined(CONFIG_MIPS_MT_SMP) && !defined(CONFIG_MIPS_MT_SMTC)
- smp_call_function(func, info, wait);
+ smp_call_function(func, info, 1);
#endif
func(info);
preempt_enable();
@@ -363,7 +363,7 @@ static inline void local_r4k___flush_cache_all(void * args)
static void r4k___flush_cache_all(void)
{
- r4k_on_each_cpu(local_r4k___flush_cache_all, NULL, 1);
+ r4k_on_each_cpu(local_r4k___flush_cache_all, NULL);
}
static inline int has_valid_asid(const struct mm_struct *mm)
@@ -410,7 +410,7 @@ static void r4k_flush_cache_range(struct vm_area_struct *vma,
int exec = vma->vm_flags & VM_EXEC;
if (cpu_has_dc_aliases || (exec && !cpu_has_ic_fills_f_dc))
- r4k_on_each_cpu(local_r4k_flush_cache_range, vma, 1);
+ r4k_on_each_cpu(local_r4k_flush_cache_range, vma);
}
static inline void local_r4k_flush_cache_mm(void * args)
@@ -442,7 +442,7 @@ static void r4k_flush_cache_mm(struct mm_struct *mm)
if (!cpu_has_dc_aliases)
return;
- r4k_on_each_cpu(local_r4k_flush_cache_mm, mm, 1);
+ r4k_on_each_cpu(local_r4k_flush_cache_mm, mm);
}
struct flush_cache_page_args {
@@ -534,7 +534,7 @@ static void r4k_flush_cache_page(struct vm_area_struct *vma,
args.addr = addr;
args.pfn = pfn;
- r4k_on_each_cpu(local_r4k_flush_cache_page, &args, 1);
+ r4k_on_each_cpu(local_r4k_flush_cache_page, &args);
}
static inline void local_r4k_flush_data_cache_page(void * addr)
@@ -547,8 +547,7 @@ static void r4k_flush_data_cache_page(unsigned long addr)
if (in_atomic())
local_r4k_flush_data_cache_page((void *)addr);
else
- r4k_on_each_cpu(local_r4k_flush_data_cache_page, (void *) addr,
- 1);
+ r4k_on_each_cpu(local_r4k_flush_data_cache_page, (void *) addr);
}
struct flush_icache_range_args {
@@ -589,7 +588,7 @@ static void r4k_flush_icache_range(unsigned long start, unsigned long end)
args.start = start;
args.end = end;
- r4k_on_each_cpu(local_r4k_flush_icache_range_ipi, &args, 1);
+ r4k_on_each_cpu(local_r4k_flush_icache_range_ipi, &args);
instruction_hazard();
}
@@ -710,7 +709,7 @@ static void local_r4k_flush_cache_sigtramp(void * arg)
static void r4k_flush_cache_sigtramp(unsigned long addr)
{
- r4k_on_each_cpu(local_r4k_flush_cache_sigtramp, (void *) addr, 1);
+ r4k_on_each_cpu(local_r4k_flush_cache_sigtramp, (void *) addr);
}
static void r4k_flush_icache_all(void)
diff --git a/arch/mips/mm/dma-default.c b/arch/mips/mm/dma-default.c
index 469d4019f795..4fc1a0fbe007 100644
--- a/arch/mips/mm/dma-default.c
+++ b/arch/mips/mm/dma-default.c
@@ -95,10 +95,9 @@ void *dma_alloc_noncoherent(struct device *dev, size_t size,
return ret;
}
-
EXPORT_SYMBOL(dma_alloc_noncoherent);
-void *dma_alloc_coherent(struct device *dev, size_t size,
+static void *mips_dma_alloc_coherent(struct device *dev, size_t size,
dma_addr_t * dma_handle, gfp_t gfp)
{
void *ret;
@@ -123,7 +122,6 @@ void *dma_alloc_coherent(struct device *dev, size_t size,
return ret;
}
-EXPORT_SYMBOL(dma_alloc_coherent);
void dma_free_noncoherent(struct device *dev, size_t size, void *vaddr,
dma_addr_t dma_handle)
@@ -131,10 +129,9 @@ void dma_free_noncoherent(struct device *dev, size_t size, void *vaddr,
plat_unmap_dma_mem(dev, dma_handle, size, DMA_BIDIRECTIONAL);
free_pages((unsigned long) vaddr, get_order(size));
}
-
EXPORT_SYMBOL(dma_free_noncoherent);
-void dma_free_coherent(struct device *dev, size_t size, void *vaddr,
+static void mips_dma_free_coherent(struct device *dev, size_t size, void *vaddr,
dma_addr_t dma_handle)
{
unsigned long addr = (unsigned long) vaddr;
@@ -151,8 +148,6 @@ void dma_free_coherent(struct device *dev, size_t size, void *vaddr,
free_pages(addr, get_order(size));
}
-EXPORT_SYMBOL(dma_free_coherent);
-
static inline void __dma_sync(unsigned long addr, size_t size,
enum dma_data_direction direction)
{
@@ -174,21 +169,8 @@ static inline void __dma_sync(unsigned long addr, size_t size,
}
}
-dma_addr_t dma_map_single(struct device *dev, void *ptr, size_t size,
- enum dma_data_direction direction)
-{
- unsigned long addr = (unsigned long) ptr;
-
- if (!plat_device_is_coherent(dev))
- __dma_sync(addr, size, direction);
-
- return plat_map_dma_mem(dev, ptr, size);
-}
-
-EXPORT_SYMBOL(dma_map_single);
-
-void dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
- enum dma_data_direction direction)
+static void mips_dma_unmap_page(struct device *dev, dma_addr_t dma_addr,
+ size_t size, enum dma_data_direction direction, struct dma_attrs *attrs)
{
if (cpu_is_noncoherent_r10000(dev))
__dma_sync(dma_addr_to_virt(dev, dma_addr), size,
@@ -197,15 +179,11 @@ void dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
plat_unmap_dma_mem(dev, dma_addr, size, direction);
}
-EXPORT_SYMBOL(dma_unmap_single);
-
-int dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
- enum dma_data_direction direction)
+static int mips_dma_map_sg(struct device *dev, struct scatterlist *sg,
+ int nents, enum dma_data_direction direction, struct dma_attrs *attrs)
{
int i;
- BUG_ON(direction == DMA_NONE);
-
for (i = 0; i < nents; i++, sg++) {
unsigned long addr;
@@ -219,33 +197,27 @@ int dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
return nents;
}
-EXPORT_SYMBOL(dma_map_sg);
-
-dma_addr_t dma_map_page(struct device *dev, struct page *page,
- unsigned long offset, size_t size, enum dma_data_direction direction)
+static dma_addr_t mips_dma_map_page(struct device *dev, struct page *page,
+ unsigned long offset, size_t size, enum dma_data_direction direction,
+ struct dma_attrs *attrs)
{
- BUG_ON(direction == DMA_NONE);
+ unsigned long addr;
- if (!plat_device_is_coherent(dev)) {
- unsigned long addr;
+ addr = (unsigned long) page_address(page) + offset;
- addr = (unsigned long) page_address(page) + offset;
+ if (!plat_device_is_coherent(dev))
__dma_sync(addr, size, direction);
- }
- return plat_map_dma_mem_page(dev, page) + offset;
+ return plat_map_dma_mem(dev, (void *)addr, size);
}
-EXPORT_SYMBOL(dma_map_page);
-
-void dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nhwentries,
- enum dma_data_direction direction)
+static void mips_dma_unmap_sg(struct device *dev, struct scatterlist *sg,
+ int nhwentries, enum dma_data_direction direction,
+ struct dma_attrs *attrs)
{
unsigned long addr;
int i;
- BUG_ON(direction == DMA_NONE);
-
for (i = 0; i < nhwentries; i++, sg++) {
if (!plat_device_is_coherent(dev) &&
direction != DMA_TO_DEVICE) {
@@ -257,13 +229,9 @@ void dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nhwentries,
}
}
-EXPORT_SYMBOL(dma_unmap_sg);
-
-void dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle,
- size_t size, enum dma_data_direction direction)
+static void mips_dma_sync_single_for_cpu(struct device *dev,
+ dma_addr_t dma_handle, size_t size, enum dma_data_direction direction)
{
- BUG_ON(direction == DMA_NONE);
-
if (cpu_is_noncoherent_r10000(dev)) {
unsigned long addr;
@@ -272,13 +240,9 @@ void dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle,
}
}
-EXPORT_SYMBOL(dma_sync_single_for_cpu);
-
-void dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle,
- size_t size, enum dma_data_direction direction)
+static void mips_dma_sync_single_for_device(struct device *dev,
+ dma_addr_t dma_handle, size_t size, enum dma_data_direction direction)
{
- BUG_ON(direction == DMA_NONE);
-
plat_extra_sync_for_device(dev);
if (!plat_device_is_coherent(dev)) {
unsigned long addr;
@@ -288,46 +252,11 @@ void dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle,
}
}
-EXPORT_SYMBOL(dma_sync_single_for_device);
-
-void dma_sync_single_range_for_cpu(struct device *dev, dma_addr_t dma_handle,
- unsigned long offset, size_t size, enum dma_data_direction direction)
-{
- BUG_ON(direction == DMA_NONE);
-
- if (cpu_is_noncoherent_r10000(dev)) {
- unsigned long addr;
-
- addr = dma_addr_to_virt(dev, dma_handle);
- __dma_sync(addr + offset, size, direction);
- }
-}
-
-EXPORT_SYMBOL(dma_sync_single_range_for_cpu);
-
-void dma_sync_single_range_for_device(struct device *dev, dma_addr_t dma_handle,
- unsigned long offset, size_t size, enum dma_data_direction direction)
-{
- BUG_ON(direction == DMA_NONE);
-
- plat_extra_sync_for_device(dev);
- if (!plat_device_is_coherent(dev)) {
- unsigned long addr;
-
- addr = dma_addr_to_virt(dev, dma_handle);
- __dma_sync(addr + offset, size, direction);
- }
-}
-
-EXPORT_SYMBOL(dma_sync_single_range_for_device);
-
-void dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nelems,
- enum dma_data_direction direction)
+static void mips_dma_sync_sg_for_cpu(struct device *dev,
+ struct scatterlist *sg, int nelems, enum dma_data_direction direction)
{
int i;
- BUG_ON(direction == DMA_NONE);
-
/* Make sure that gcc doesn't leave the empty loop body. */
for (i = 0; i < nelems; i++, sg++) {
if (cpu_is_noncoherent_r10000(dev))
@@ -336,15 +265,11 @@ void dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nelems,
}
}
-EXPORT_SYMBOL(dma_sync_sg_for_cpu);
-
-void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nelems,
- enum dma_data_direction direction)
+static void mips_dma_sync_sg_for_device(struct device *dev,
+ struct scatterlist *sg, int nelems, enum dma_data_direction direction)
{
int i;
- BUG_ON(direction == DMA_NONE);
-
/* Make sure that gcc doesn't leave the empty loop body. */
for (i = 0; i < nelems; i++, sg++) {
if (!plat_device_is_coherent(dev))
@@ -353,24 +278,18 @@ void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nele
}
}
-EXPORT_SYMBOL(dma_sync_sg_for_device);
-
-int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
+int mips_dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
{
return plat_dma_mapping_error(dev, dma_addr);
}
-EXPORT_SYMBOL(dma_mapping_error);
-
-int dma_supported(struct device *dev, u64 mask)
+int mips_dma_supported(struct device *dev, u64 mask)
{
return plat_dma_supported(dev, mask);
}
-EXPORT_SYMBOL(dma_supported);
-
-void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
- enum dma_data_direction direction)
+void mips_dma_cache_sync(struct device *dev, void *vaddr, size_t size,
+ enum dma_data_direction direction)
{
BUG_ON(direction == DMA_NONE);
@@ -379,4 +298,30 @@ void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
__dma_sync((unsigned long)vaddr, size, direction);
}
-EXPORT_SYMBOL(dma_cache_sync);
+static struct dma_map_ops mips_default_dma_map_ops = {
+ .alloc_coherent = mips_dma_alloc_coherent,
+ .free_coherent = mips_dma_free_coherent,
+ .map_page = mips_dma_map_page,
+ .unmap_page = mips_dma_unmap_page,
+ .map_sg = mips_dma_map_sg,
+ .unmap_sg = mips_dma_unmap_sg,
+ .sync_single_for_cpu = mips_dma_sync_single_for_cpu,
+ .sync_single_for_device = mips_dma_sync_single_for_device,
+ .sync_sg_for_cpu = mips_dma_sync_sg_for_cpu,
+ .sync_sg_for_device = mips_dma_sync_sg_for_device,
+ .mapping_error = mips_dma_mapping_error,
+ .dma_supported = mips_dma_supported
+};
+
+struct dma_map_ops *mips_dma_map_ops = &mips_default_dma_map_ops;
+EXPORT_SYMBOL(mips_dma_map_ops);
+
+#define PREALLOC_DMA_DEBUG_ENTRIES (1 << 16)
+
+static int __init mips_dma_init(void)
+{
+ dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES);
+
+ return 0;
+}
+fs_initcall(mips_dma_init);
diff --git a/arch/mips/mm/fault.c b/arch/mips/mm/fault.c
index 783ad0065fdf..137ee76a0045 100644
--- a/arch/mips/mm/fault.c
+++ b/arch/mips/mm/fault.c
@@ -18,6 +18,7 @@
#include <linux/smp.h>
#include <linux/module.h>
#include <linux/kprobes.h>
+#include <linux/perf_event.h>
#include <asm/branch.h>
#include <asm/mmu_context.h>
@@ -144,6 +145,7 @@ good_area:
* the fault.
*/
fault = handle_mm_fault(mm, vma, address, write ? FAULT_FLAG_WRITE : 0);
+ perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, 0, regs, address);
if (unlikely(fault & VM_FAULT_ERROR)) {
if (fault & VM_FAULT_OOM)
goto out_of_memory;
@@ -151,10 +153,15 @@ good_area:
goto do_sigbus;
BUG();
}
- if (fault & VM_FAULT_MAJOR)
+ if (fault & VM_FAULT_MAJOR) {
+ perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ,
+ 1, 0, regs, address);
tsk->maj_flt++;
- else
+ } else {
+ perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN,
+ 1, 0, regs, address);
tsk->min_flt++;
+ }
up_read(&mm->mmap_sem);
return;
diff --git a/arch/mips/mm/highmem.c b/arch/mips/mm/highmem.c
index 6a2b1bf9ef11..3634c7ea06ac 100644
--- a/arch/mips/mm/highmem.c
+++ b/arch/mips/mm/highmem.c
@@ -9,7 +9,7 @@ static pte_t *kmap_pte;
unsigned long highstart_pfn, highend_pfn;
-void *__kmap(struct page *page)
+void *kmap(struct page *page)
{
void *addr;
@@ -21,16 +21,16 @@ void *__kmap(struct page *page)
return addr;
}
-EXPORT_SYMBOL(__kmap);
+EXPORT_SYMBOL(kmap);
-void __kunmap(struct page *page)
+void kunmap(struct page *page)
{
BUG_ON(in_interrupt());
if (!PageHighMem(page))
return;
kunmap_high(page);
}
-EXPORT_SYMBOL(__kunmap);
+EXPORT_SYMBOL(kunmap);
/*
* kmap_atomic/kunmap_atomic is significantly faster than kmap/kunmap because
@@ -41,17 +41,17 @@ EXPORT_SYMBOL(__kunmap);
* kmaps are appropriate for short, tight code paths only.
*/
-void *__kmap_atomic(struct page *page, enum km_type type)
+void *__kmap_atomic(struct page *page)
{
- enum fixed_addresses idx;
unsigned long vaddr;
+ int idx, type;
/* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */
pagefault_disable();
if (!PageHighMem(page))
return page_address(page);
- debug_kmap_atomic(type);
+ type = kmap_atomic_idx_push();
idx = type + KM_TYPE_NR*smp_processor_id();
vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
#ifdef CONFIG_DEBUG_HIGHMEM
@@ -64,43 +64,48 @@ void *__kmap_atomic(struct page *page, enum km_type type)
}
EXPORT_SYMBOL(__kmap_atomic);
-void __kunmap_atomic_notypecheck(void *kvaddr, enum km_type type)
+void __kunmap_atomic(void *kvaddr)
{
-#ifdef CONFIG_DEBUG_HIGHMEM
unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK;
- enum fixed_addresses idx = type + KM_TYPE_NR*smp_processor_id();
+ int type;
if (vaddr < FIXADDR_START) { // FIXME
pagefault_enable();
return;
}
- BUG_ON(vaddr != __fix_to_virt(FIX_KMAP_BEGIN + idx));
+ type = kmap_atomic_idx();
+#ifdef CONFIG_DEBUG_HIGHMEM
+ {
+ int idx = type + KM_TYPE_NR * smp_processor_id();
- /*
- * force other mappings to Oops if they'll try to access
- * this pte without first remap it
- */
- pte_clear(&init_mm, vaddr, kmap_pte-idx);
- local_flush_tlb_one(vaddr);
-#endif
+ BUG_ON(vaddr != __fix_to_virt(FIX_KMAP_BEGIN + idx));
+ /*
+ * force other mappings to Oops if they'll try to access
+ * this pte without first remap it
+ */
+ pte_clear(&init_mm, vaddr, kmap_pte-idx);
+ local_flush_tlb_one(vaddr);
+ }
+#endif
+ kmap_atomic_idx_pop();
pagefault_enable();
}
-EXPORT_SYMBOL(__kunmap_atomic_notypecheck);
+EXPORT_SYMBOL(__kunmap_atomic);
/*
* This is the same as kmap_atomic() but can map memory that doesn't
* have a struct page associated with it.
*/
-void *kmap_atomic_pfn(unsigned long pfn, enum km_type type)
+void *kmap_atomic_pfn(unsigned long pfn)
{
- enum fixed_addresses idx;
unsigned long vaddr;
+ int idx, type;
pagefault_disable();
- debug_kmap_atomic(type);
+ type = kmap_atomic_idx_push();
idx = type + KM_TYPE_NR*smp_processor_id();
vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
set_pte(kmap_pte-idx, pfn_pte(pfn, PAGE_KERNEL));
@@ -109,7 +114,7 @@ void *kmap_atomic_pfn(unsigned long pfn, enum km_type type)
return (void*) vaddr;
}
-struct page *__kmap_atomic_to_page(void *ptr)
+struct page *kmap_atomic_to_page(void *ptr)
{
unsigned long idx, vaddr = (unsigned long)ptr;
pte_t *pte;
diff --git a/arch/mips/mm/sc-mips.c b/arch/mips/mm/sc-mips.c
index 5ab5fa8c1d82..505fecad4684 100644
--- a/arch/mips/mm/sc-mips.c
+++ b/arch/mips/mm/sc-mips.c
@@ -57,6 +57,34 @@ static struct bcache_ops mips_sc_ops = {
.bc_inv = mips_sc_inv
};
+/*
+ * Check if the L2 cache controller is activated on a particular platform.
+ * MTI's L2 controller and the L2 cache controller of Broadcom's BMIPS
+ * cores both use c0_config2's bit 12 as "L2 Bypass" bit, that is the
+ * cache being disabled. However there is no guarantee for this to be
+ * true on all platforms. In an act of stupidity the spec defined bits
+ * 12..15 as implementation defined so below function will eventually have
+ * to be replaced by a platform specific probe.
+ */
+static inline int mips_sc_is_activated(struct cpuinfo_mips *c)
+{
+ /* Check the bypass bit (L2B) */
+ switch (c->cputype) {
+ case CPU_34K:
+ case CPU_74K:
+ case CPU_1004K:
+ case CPU_BMIPS5000:
+ if (config2 & (1 << 12))
+ return 0;
+ }
+
+ tmp = (config2 >> 4) & 0x0f;
+ if (0 < tmp && tmp <= 7)
+ c->scache.linesz = 2 << tmp;
+ else
+ return 0;
+}
+
static inline int __init mips_sc_probe(void)
{
struct cpuinfo_mips *c = &current_cpu_data;
@@ -79,10 +107,8 @@ static inline int __init mips_sc_probe(void)
return 0;
config2 = read_c0_config2();
- tmp = (config2 >> 4) & 0x0f;
- if (0 < tmp && tmp <= 7)
- c->scache.linesz = 2 << tmp;
- else
+
+ if (!mips_sc_is_activated(c))
return 0;
tmp = (config2 >> 8) & 0x0f;
diff --git a/arch/mips/mm/tlbex.c b/arch/mips/mm/tlbex.c
index 4510e61883eb..93816f3bca67 100644
--- a/arch/mips/mm/tlbex.c
+++ b/arch/mips/mm/tlbex.c
@@ -338,13 +338,12 @@ static void __cpuinit build_tlb_write_entry(u32 **p, struct uasm_label **l,
case CPU_4KSC:
case CPU_20KC:
case CPU_25KF:
- case CPU_BCM3302:
- case CPU_BCM4710:
+ case CPU_BMIPS32:
+ case CPU_BMIPS3300:
+ case CPU_BMIPS4350:
+ case CPU_BMIPS4380:
+ case CPU_BMIPS5000:
case CPU_LOONGSON2:
- case CPU_BCM6338:
- case CPU_BCM6345:
- case CPU_BCM6348:
- case CPU_BCM6358:
case CPU_R5500:
if (m4kc_tlbp_war())
uasm_i_nop(p);
diff --git a/arch/mips/mm/uasm.c b/arch/mips/mm/uasm.c
index d2647a4e012b..23afdebc8e5c 100644
--- a/arch/mips/mm/uasm.c
+++ b/arch/mips/mm/uasm.c
@@ -405,7 +405,6 @@ I_u1u2u3(_mfc0)
I_u1u2u3(_mtc0)
I_u2u1u3(_ori)
I_u3u1u2(_or)
-I_u2s3u1(_pref)
I_0(_rfe)
I_u2s3u1(_sc)
I_u2s3u1(_scd)
@@ -427,6 +426,25 @@ I_u1(_syscall);
I_u1u2s3(_bbit0);
I_u1u2s3(_bbit1);
+#ifdef CONFIG_CPU_CAVIUM_OCTEON
+#include <asm/octeon/octeon.h>
+void __uasminit uasm_i_pref(u32 **buf, unsigned int a, signed int b,
+ unsigned int c)
+{
+ if (OCTEON_IS_MODEL(OCTEON_CN63XX_PASS1_X) && a <= 24 && a != 5)
+ /*
+ * As per erratum Core-14449, replace prefetches 0-4,
+ * 6-24 with 'pref 28'.
+ */
+ build_insn(buf, insn_pref, c, 28, b);
+ else
+ build_insn(buf, insn_pref, c, a, b);
+}
+UASM_EXPORT_SYMBOL(uasm_i_pref);
+#else
+I_u2s3u1(_pref)
+#endif
+
/* Handle labels. */
void __uasminit uasm_build_label(struct uasm_label **lab, u32 *addr, int lid)
{
diff --git a/arch/mips/pci/pci-octeon.c b/arch/mips/pci/pci-octeon.c
index d248b707eff3..2d74fc9ae3ba 100644
--- a/arch/mips/pci/pci-octeon.c
+++ b/arch/mips/pci/pci-octeon.c
@@ -11,6 +11,7 @@
#include <linux/interrupt.h>
#include <linux/time.h>
#include <linux/delay.h>
+#include <linux/swiotlb.h>
#include <asm/time.h>
@@ -19,6 +20,8 @@
#include <asm/octeon/cvmx-pci-defs.h>
#include <asm/octeon/pci-octeon.h>
+#include <dma-coherence.h>
+
#define USE_OCTEON_INTERNAL_ARBITER
/*
@@ -32,6 +35,8 @@
/* Octeon't PCI controller uses did=3, subdid=3 for PCI memory. */
#define OCTEON_PCI_MEMSPACE_OFFSET (0x00011b0000000000ull)
+u64 octeon_bar1_pci_phys;
+
/**
* This is the bit decoding used for the Octeon PCI controller addresses
*/
@@ -170,6 +175,8 @@ int pcibios_plat_dev_init(struct pci_dev *dev)
pci_write_config_dword(dev, pos + PCI_ERR_ROOT_STATUS, dconfig);
}
+ dev->dev.archdata.dma_ops = octeon_pci_dma_map_ops;
+
return 0;
}
@@ -618,12 +625,10 @@ static int __init octeon_pci_setup(void)
* before the readl()'s below. We don't want BAR2 overlapping
* with BAR0/BAR1 during these reads.
*/
- octeon_npi_write32(CVMX_NPI_PCI_CFG08, 0);
- octeon_npi_write32(CVMX_NPI_PCI_CFG09, 0x80);
-
- /* Disable the BAR1 movable mappings */
- for (index = 0; index < 32; index++)
- octeon_npi_write32(CVMX_NPI_PCI_BAR1_INDEXX(index), 0);
+ octeon_npi_write32(CVMX_NPI_PCI_CFG08,
+ (u32)(OCTEON_BAR2_PCI_ADDRESS & 0xffffffffull));
+ octeon_npi_write32(CVMX_NPI_PCI_CFG09,
+ (u32)(OCTEON_BAR2_PCI_ADDRESS >> 32));
if (octeon_dma_bar_type == OCTEON_DMA_BAR_TYPE_BIG) {
/* Remap the Octeon BAR 0 to 0-2GB */
@@ -637,6 +642,25 @@ static int __init octeon_pci_setup(void)
octeon_npi_write32(CVMX_NPI_PCI_CFG06, 2ul << 30);
octeon_npi_write32(CVMX_NPI_PCI_CFG07, 0);
+ /* BAR1 movable mappings set for identity mapping */
+ octeon_bar1_pci_phys = 0x80000000ull;
+ for (index = 0; index < 32; index++) {
+ union cvmx_pci_bar1_indexx bar1_index;
+
+ bar1_index.u32 = 0;
+ /* Address bits[35:22] sent to L2C */
+ bar1_index.s.addr_idx =
+ (octeon_bar1_pci_phys >> 22) + index;
+ /* Don't put PCI accesses in L2. */
+ bar1_index.s.ca = 1;
+ /* Endian Swap Mode */
+ bar1_index.s.end_swp = 1;
+ /* Set '1' when the selected address range is valid. */
+ bar1_index.s.addr_v = 1;
+ octeon_npi_write32(CVMX_NPI_PCI_BAR1_INDEXX(index),
+ bar1_index.u32);
+ }
+
/* Devices go after BAR1 */
octeon_pci_mem_resource.start =
OCTEON_PCI_MEMSPACE_OFFSET + (4ul << 30) -
@@ -652,6 +676,27 @@ static int __init octeon_pci_setup(void)
octeon_npi_write32(CVMX_NPI_PCI_CFG06, 0);
octeon_npi_write32(CVMX_NPI_PCI_CFG07, 0);
+ /* BAR1 movable regions contiguous to cover the swiotlb */
+ octeon_bar1_pci_phys =
+ virt_to_phys(octeon_swiotlb) & ~((1ull << 22) - 1);
+
+ for (index = 0; index < 32; index++) {
+ union cvmx_pci_bar1_indexx bar1_index;
+
+ bar1_index.u32 = 0;
+ /* Address bits[35:22] sent to L2C */
+ bar1_index.s.addr_idx =
+ (octeon_bar1_pci_phys >> 22) + index;
+ /* Don't put PCI accesses in L2. */
+ bar1_index.s.ca = 1;
+ /* Endian Swap Mode */
+ bar1_index.s.end_swp = 1;
+ /* Set '1' when the selected address range is valid. */
+ bar1_index.s.addr_v = 1;
+ octeon_npi_write32(CVMX_NPI_PCI_BAR1_INDEXX(index),
+ bar1_index.u32);
+ }
+
/* Devices go after BAR0 */
octeon_pci_mem_resource.start =
OCTEON_PCI_MEMSPACE_OFFSET + (128ul << 20) +
@@ -667,6 +712,9 @@ static int __init octeon_pci_setup(void)
* was setup properly.
*/
cvmx_write_csr(CVMX_NPI_PCI_INT_SUM2, -1);
+
+ octeon_pci_dma_init();
+
return 0;
}
diff --git a/arch/mips/pci/pcie-octeon.c b/arch/mips/pci/pcie-octeon.c
index 861361e0c9af..385f035b24e4 100644
--- a/arch/mips/pci/pcie-octeon.c
+++ b/arch/mips/pci/pcie-octeon.c
@@ -75,6 +75,8 @@ union cvmx_pcie_address {
} mem;
};
+#include <dma-coherence.h>
+
/**
* Return the Core virtual base address for PCIe IO access. IOs are
* read/written as an offset from this address.
@@ -1391,6 +1393,9 @@ static int __init octeon_pcie_setup(void)
cvmx_pcie_get_io_size(1) - 1;
register_pci_controller(&octeon_pcie1_controller);
}
+
+ octeon_pci_dma_init();
+
return 0;
}
diff --git a/arch/mn10300/Kconfig b/arch/mn10300/Kconfig
index 7c2a2f7f8dc1..41ba38513c89 100644
--- a/arch/mn10300/Kconfig
+++ b/arch/mn10300/Kconfig
@@ -1,16 +1,20 @@
-#
-# For a description of the syntax of this configuration file,
-# see Documentation/kbuild/kconfig-language.txt.
-#
-
-mainmenu "Linux Kernel Configuration"
-
config MN10300
def_bool y
select HAVE_OPROFILE
-config AM33
- def_bool y
+config AM33_2
+ def_bool n
+
+config AM33_3
+ def_bool n
+
+config AM34_2
+ def_bool n
+ select MN10300_HAS_ATOMIC_OPS_UNIT
+ select MN10300_HAS_CACHE_SNOOP
+
+config ERRATUM_NEED_TO_RELOAD_MMUCTR
+ def_bool y if AM33_3 || AM34_2
config MMU
def_bool y
@@ -37,7 +41,7 @@ config GENERIC_CALIBRATE_DELAY
def_bool y
config GENERIC_CMOS_UPDATE
- def_bool y
+ def_bool n
config GENERIC_FIND_NEXT_BIT
def_bool y
@@ -45,6 +49,27 @@ config GENERIC_FIND_NEXT_BIT
config GENERIC_HWEIGHT
def_bool y
+config GENERIC_TIME
+ def_bool y
+
+config GENERIC_CLOCKEVENTS
+ def_bool y
+
+config GENERIC_CLOCKEVENTS_BUILD
+ def_bool y
+ depends on GENERIC_CLOCKEVENTS
+
+config GENERIC_CLOCKEVENTS_BROADCAST
+ bool
+
+config CEVT_MN10300
+ def_bool y
+ depends on GENERIC_CLOCKEVENTS
+
+config CSRC_MN10300
+ def_bool y
+ depends on GENERIC_TIME
+
config GENERIC_BUG
def_bool y
@@ -61,18 +86,12 @@ config GENERIC_HARDIRQS
config HOTPLUG_CPU
def_bool n
-config HZ
- int
- default 1000
-
-mainmenu "Matsushita MN10300/AM33 Kernel Configuration"
-
source "init/Kconfig"
source "kernel/Kconfig.freezer"
-menu "Matsushita MN10300 system setup"
+menu "Panasonic MN10300 system setup"
choice
prompt "Unit type"
@@ -87,6 +106,10 @@ config MN10300_UNIT_ASB2303
config MN10300_UNIT_ASB2305
bool "ASB2305"
+config MN10300_UNIT_ASB2364
+ bool "ASB2364"
+ select SMSC911X_ARCH_HOOKS if SMSC911X
+
endchoice
choice
@@ -99,57 +122,51 @@ choice
config MN10300_PROC_MN103E010
bool "MN103E010"
depends on MN10300_UNIT_ASB2303 || MN10300_UNIT_ASB2305
+ select AM33_2
+ select MN10300_PROC_HAS_TTYSM0
+ select MN10300_PROC_HAS_TTYSM1
+ select MN10300_PROC_HAS_TTYSM2
+
+config MN10300_PROC_MN2WS0050
+ bool "MN2WS0050"
+ depends on MN10300_UNIT_ASB2364
+ select AM34_2
select MN10300_PROC_HAS_TTYSM0
select MN10300_PROC_HAS_TTYSM1
select MN10300_PROC_HAS_TTYSM2
endchoice
-choice
- prompt "Processor core support"
- default MN10300_CPU_AM33V2
+config MN10300_HAS_ATOMIC_OPS_UNIT
+ def_bool n
help
- This option specifies the processor core for which the kernel will be
- compiled. It affects the instruction set used.
-
-config MN10300_CPU_AM33V2
- bool "AM33v2"
-
-endchoice
+ This should be enabled if the processor has an atomic ops unit
+ capable of doing LL/SC equivalent operations.
config FPU
bool "FPU present"
default y
- depends on MN10300_PROC_MN103E010
+ depends on MN10300_PROC_MN103E010 || MN10300_PROC_MN2WS0050
-choice
- prompt "CPU Caching mode"
- default MN10300_CACHE_WBACK
+config LAZY_SAVE_FPU
+ bool "Save FPU state lazily"
+ default y
+ depends on FPU && !SMP
help
- This option determines the caching mode for the kernel.
+ Enable this to be lazy in the saving of the FPU state to the owning
+ task's thread struct. This is useful if most tasks on the system
+ don't use the FPU as only those tasks that use it will pass it
+ between them, and the state needn't be saved for a task that isn't
+ using it.
- Write-Back caching mode involves the all reads and writes causing
- the affected cacheline to be read into the cache first before being
- operated upon. Memory is not then updated by a write until the cache
- is filled and a cacheline needs to be displaced from the cache to
- make room. Only at that point is it written back.
+ This can't be so easily used on SMP as the process that owns the FPU
+ state on a CPU may be currently running on another CPU, so for the
+ moment, it is disabled.
- Write-Through caching only fetches cachelines from memory on a
- read. Writes always get written directly to memory. If the affected
- cacheline is also in cache, it will be updated too.
+source "arch/mn10300/mm/Kconfig.cache"
- The final option is to turn of caching entirely.
-
-config MN10300_CACHE_WBACK
- bool "Write-Back"
-
-config MN10300_CACHE_WTHRU
- bool "Write-Through"
-
-config MN10300_CACHE_DISABLED
- bool "Disabled"
-
-endchoice
+config MN10300_TLB_USE_PIDR
+ def_bool y
menu "Memory layout options"
@@ -170,24 +187,55 @@ config KERNEL_TEXT_ADDRESS
config KERNEL_ZIMAGE_BASE_ADDRESS
hex "Base address of compressed vmlinux image"
- default "0x90700000"
+ default "0x50700000"
+
+config BOOT_STACK_OFFSET
+ hex
+ default "0xF00" if SMP
+ default "0xFF0" if !SMP
+config BOOT_STACK_SIZE
+ hex
+ depends on SMP
+ default "0x100"
endmenu
-config PREEMPT
- bool "Preemptible Kernel"
- help
- This option reduces the latency of the kernel when reacting to
- real-time or interactive events by allowing a low priority process to
- be preempted even if it is in kernel mode executing a system call.
- This allows applications to run more reliably even when the system is
- under load.
+config SMP
+ bool "Symmetric multi-processing support"
+ default y
+ depends on MN10300_PROC_MN2WS0038 || MN10300_PROC_MN2WS0050
+ ---help---
+ This enables support for systems with more than one CPU. If you have
+ a system with only one CPU, like most personal computers, say N. If
+ you have a system with more than one CPU, say Y.
- Say Y here if you are building a kernel for a desktop, embedded
- or real-time system. Say N if you are unsure.
+ If you say N here, the kernel will run on single and multiprocessor
+ machines, but will use only one CPU of a multiprocessor machine. If
+ you say Y here, the kernel will run on many, but not all,
+ singleprocessor machines. On a singleprocessor machine, the kernel
+ will run faster if you say N here.
+
+ See also <file:Documentation/i386/IO-APIC.txt>,
+ <file:Documentation/nmi_watchdog.txt> and the SMP-HOWTO available at
+ <http://www.tldp.org/docs.html#howto>.
+
+ If you don't know what to do here, say N.
+
+config NR_CPUS
+ int
+ depends on SMP
+ default "2"
+
+config USE_GENERIC_SMP_HELPERS
+ bool
+ depends on SMP
+ default y
+
+source "kernel/Kconfig.preempt"
config MN10300_CURRENT_IN_E2
bool "Hold current task address in E2 register"
+ depends on !SMP
default y
help
This option removes the E2/R2 register from the set available to gcc
@@ -209,12 +257,15 @@ config MN10300_USING_JTAG
suppresses the use of certain hardware debugging features, such as
single-stepping, which are taken over completely by the JTAG unit.
+source "kernel/Kconfig.hz"
+source "kernel/time/Kconfig"
+
config MN10300_RTC
bool "Using MN10300 RTC"
- depends on MN10300_PROC_MN103E010
+ depends on MN10300_PROC_MN103E010 || MN10300_PROC_MN2WS0050
+ select GENERIC_CMOS_UPDATE
default n
help
-
This option enables support for the RTC, thus enabling time to be
tracked, even when system is powered down. This is available on-chip
on the MN103E010.
@@ -306,14 +357,23 @@ config MN10300_TTYSM1
choice
prompt "Select the timer to supply the clock for SIF1"
- default MN10300_TTYSM0_TIMER9
+ default MN10300_TTYSM1_TIMER12 \
+ if !(AM33_2 || AM33_3)
+ default MN10300_TTYSM1_TIMER9 \
+ if AM33_2 || AM33_3
depends on MN10300_TTYSM1
+config MN10300_TTYSM1_TIMER12
+ bool "Use timer 12 (16-bit)"
+ depends on !(AM33_2 || AM33_3)
+
config MN10300_TTYSM1_TIMER9
bool "Use timer 9 (16-bit)"
+ depends on AM33_2 || AM33_3
config MN10300_TTYSM1_TIMER3
bool "Use timer 3 (8-bit)"
+ depends on AM33_2 || AM33_3
endchoice
@@ -328,17 +388,107 @@ config MN10300_TTYSM2
choice
prompt "Select the timer to supply the clock for SIF2"
- default MN10300_TTYSM0_TIMER10
+ default MN10300_TTYSM2_TIMER3 \
+ if !(AM33_2 || AM33_3)
+ default MN10300_TTYSM2_TIMER10 \
+ if AM33_2 || AM33_3
depends on MN10300_TTYSM2
+config MN10300_TTYSM2_TIMER9
+ bool "Use timer 9 (16-bit)"
+ depends on !(AM33_2 || AM33_3)
+
+config MN10300_TTYSM2_TIMER1
+ bool "Use timer 1 (8-bit)"
+ depends on !(AM33_2 || AM33_3)
+
+config MN10300_TTYSM2_TIMER3
+ bool "Use timer 3 (8-bit)"
+ depends on !(AM33_2 || AM33_3)
+
config MN10300_TTYSM2_TIMER10
bool "Use timer 10 (16-bit)"
+ depends on AM33_2 || AM33_3
endchoice
config MN10300_TTYSM2_CTS
bool "Enable the use of the CTS line /dev/ttySM2"
- depends on MN10300_TTYSM2
+ depends on MN10300_TTYSM2 && AM33_2
+
+endmenu
+
+menu "Interrupt request priority options"
+
+comment "[!] NOTE: A lower number/level indicates a higher priority (0 is highest, 6 is lowest)"
+
+comment "____Non-maskable interrupt levels____"
+comment "The following must be set to a higher priority than local_irq_disable() and on-chip serial"
+
+config GDBSTUB_IRQ_LEVEL
+ int "GDBSTUB interrupt priority"
+ depends on GDBSTUB
+ range 0 1 if LINUX_CLI_LEVEL = 2
+ range 0 2 if LINUX_CLI_LEVEL = 3
+ range 0 3 if LINUX_CLI_LEVEL = 4
+ range 0 4 if LINUX_CLI_LEVEL = 5
+ range 0 5 if LINUX_CLI_LEVEL = 6
+ default 0
+
+comment "The following must be set to a higher priority than local_irq_disable()"
+
+config MN10300_SERIAL_IRQ_LEVEL
+ int "MN10300 on-chip serial interrupt priority"
+ depends on MN10300_TTYSM
+ range 1 1 if LINUX_CLI_LEVEL = 2
+ range 1 2 if LINUX_CLI_LEVEL = 3
+ range 1 3 if LINUX_CLI_LEVEL = 4
+ range 1 4 if LINUX_CLI_LEVEL = 5
+ range 1 5 if LINUX_CLI_LEVEL = 6
+ default 1
+
+comment "-"
+comment "____Maskable interrupt levels____"
+
+config LINUX_CLI_LEVEL
+ int "The highest interrupt priority excluded by local_irq_disable() (2-6)"
+ range 2 6
+ default 2
+ help
+ local_irq_disable() doesn't actually disable maskable interrupts -
+ what it does is restrict the levels of interrupt which are permitted
+ (a lower level indicates a higher priority) by lowering the value in
+ EPSW.IM from 7. Any interrupt is permitted for which the level is
+ lower than EPSW.IM.
+
+ Certain interrupts, such as GDBSTUB and virtual MN10300 on-chip
+ serial DMA interrupts are allowed to interrupt normal disabled
+ sections.
+
+comment "The following must be set to a equal to or lower priority than LINUX_CLI_LEVEL"
+
+config TIMER_IRQ_LEVEL
+ int "Kernel timer interrupt priority"
+ range LINUX_CLI_LEVEL 6
+ default 4
+
+config PCI_IRQ_LEVEL
+ int "PCI interrupt priority"
+ depends on PCI
+ range LINUX_CLI_LEVEL 6
+ default 5
+
+config ETHERNET_IRQ_LEVEL
+ int "Ethernet interrupt priority"
+ depends on SMC91X || SMC911X || SMSC911X
+ range LINUX_CLI_LEVEL 6
+ default 6
+
+config EXT_SERIAL_IRQ_LEVEL
+ int "External serial port interrupt priority"
+ depends on SERIAL_8250
+ range LINUX_CLI_LEVEL 6
+ default 6
endmenu
diff --git a/arch/mn10300/Makefile b/arch/mn10300/Makefile
index ac5c6bdb2f05..7120282bf0d8 100644
--- a/arch/mn10300/Makefile
+++ b/arch/mn10300/Makefile
@@ -36,6 +36,9 @@ endif
ifeq ($(CONFIG_MN10300_PROC_MN103E010),y)
PROCESSOR := mn103e010
endif
+ifeq ($(CONFIG_MN10300_PROC_MN2WS0050),y)
+PROCESSOR := mn2ws0050
+endif
ifeq ($(CONFIG_MN10300_UNIT_ASB2303),y)
UNIT := asb2303
@@ -43,6 +46,9 @@ endif
ifeq ($(CONFIG_MN10300_UNIT_ASB2305),y)
UNIT := asb2305
endif
+ifeq ($(CONFIG_MN10300_UNIT_ASB2364),y)
+UNIT := asb2364
+endif
head-y := arch/mn10300/kernel/head.o arch/mn10300/kernel/init_task.o
diff --git a/arch/mn10300/boot/compressed/head.S b/arch/mn10300/boot/compressed/head.S
index 502e1eb56709..7b50345b9e84 100644
--- a/arch/mn10300/boot/compressed/head.S
+++ b/arch/mn10300/boot/compressed/head.S
@@ -14,10 +14,29 @@
#include <linux/linkage.h>
#include <asm/cpu-regs.h>
+#include <asm/cache.h>
+#ifdef CONFIG_SMP
+#include <proc/smp-regs.h>
+#endif
.globl startup_32
startup_32:
- # first save off parameters from bootloader
+#ifdef CONFIG_SMP
+ #
+ # Secondary CPUs jump directly to the kernel entry point
+ #
+ # Must save primary CPU's D0-D2 registers as they hold boot parameters
+ #
+ mov (CPUID), d3
+ and CPUID_MASK,d3
+ beq startup_primary
+ mov CONFIG_KERNEL_TEXT_ADDRESS,a0
+ jmp (a0)
+
+startup_primary:
+#endif /* CONFIG_SMP */
+
+ # first save parameters from bootloader
mov param_save_area,a0
mov d0,(a0)
mov d1,(4,a0)
@@ -37,8 +56,15 @@ startup_32:
mov (a0),d0
btst CHCTR_ICBUSY|CHCTR_DCBUSY,d0 # wait till not busy
lne
- mov CHCTR_ICEN|CHCTR_DCEN|CHCTR_DCWTMD,d0 # writethru dcache
+
+#ifdef CONFIG_MN10300_CACHE_ENABLED
+#ifdef CONFIG_MN10300_CACHE_WBACK
+ mov CHCTR_ICEN|CHCTR_DCEN|CHCTR_DCWTMD_WRBACK,d0
+#else
+ mov CHCTR_ICEN|CHCTR_DCEN|CHCTR_DCWTMD_WRTHROUGH,d0
+#endif /* WBACK */
movhu d0,(a0) # enable
+#endif /* !ENABLED */
# clear the BSS area
mov __bss_start,a0
@@ -54,6 +80,9 @@ bssclear_end:
# decompress the kernel
call decompress_kernel[],0
+#ifdef CONFIG_MN10300_CACHE_WBACK
+ call mn10300_dcache_flush_inv[],0
+#endif
# disable caches again
mov CHCTR,a0
@@ -69,10 +98,46 @@ bssclear_end:
mov (4,a0),d1
mov (8,a0),d2
+ # jump to the kernel proper entry point
mov a3,sp
mov CONFIG_KERNEL_TEXT_ADDRESS,a0
jmp (a0)
+
+###############################################################################
+#
+# Cache flush routines
+#
+###############################################################################
+#ifdef CONFIG_MN10300_CACHE_WBACK
+mn10300_dcache_flush_inv:
+ movhu (CHCTR),d0
+ btst CHCTR_DCEN,d0
+ beq mn10300_dcache_flush_inv_end
+
+ mov L1_CACHE_NENTRIES,d1
+ clr a1
+
+mn10300_dcache_flush_inv_loop:
+ mov (DCACHE_PURGE_WAY0(0),a1),d0 # unconditional purge
+ mov (DCACHE_PURGE_WAY1(0),a1),d0 # unconditional purge
+ mov (DCACHE_PURGE_WAY2(0),a1),d0 # unconditional purge
+ mov (DCACHE_PURGE_WAY3(0),a1),d0 # unconditional purge
+
+ add L1_CACHE_BYTES,a1
+ add -1,d1
+ bne mn10300_dcache_flush_inv_loop
+
+mn10300_dcache_flush_inv_end:
+ ret [],0
+#endif /* CONFIG_MN10300_CACHE_WBACK */
+
+
+###############################################################################
+#
+# Data areas
+#
+###############################################################################
.data
.align 4
param_save_area:
diff --git a/arch/mn10300/configs/asb2303_defconfig b/arch/mn10300/configs/asb2303_defconfig
index d80dfcb2c902..3f749b69ca71 100644
--- a/arch/mn10300/configs/asb2303_defconfig
+++ b/arch/mn10300/configs/asb2303_defconfig
@@ -12,6 +12,8 @@ CONFIG_SLAB=y
CONFIG_PROFILING=y
# CONFIG_BLOCK is not set
CONFIG_PREEMPT=y
+CONFIG_NO_HZ=y
+CONFIG_HIGH_RES_TIMERS=y
CONFIG_MN10300_RTC=y
CONFIG_MN10300_TTYSM_CONSOLE=y
CONFIG_MN10300_TTYSM0=y
diff --git a/arch/mn10300/configs/asb2364_defconfig b/arch/mn10300/configs/asb2364_defconfig
new file mode 100644
index 000000000000..83ce2f27b12a
--- /dev/null
+++ b/arch/mn10300/configs/asb2364_defconfig
@@ -0,0 +1,98 @@
+CONFIG_EXPERIMENTAL=y
+CONFIG_SYSVIPC=y
+CONFIG_POSIX_MQUEUE=y
+CONFIG_BSD_PROCESS_ACCT=y
+CONFIG_TASKSTATS=y
+CONFIG_TASK_DELAY_ACCT=y
+CONFIG_TASK_XACCT=y
+CONFIG_TASK_IO_ACCOUNTING=y
+CONFIG_LOG_BUF_SHIFT=14
+CONFIG_CGROUPS=y
+CONFIG_CGROUP_NS=y
+CONFIG_CGROUP_FREEZER=y
+CONFIG_CGROUP_DEVICE=y
+CONFIG_CGROUP_CPUACCT=y
+CONFIG_RESOURCE_COUNTERS=y
+CONFIG_RELAY=y
+# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
+CONFIG_EMBEDDED=y
+# CONFIG_KALLSYMS is not set
+# CONFIG_VM_EVENT_COUNTERS is not set
+CONFIG_SLAB=y
+CONFIG_PROFILING=y
+CONFIG_MODULES=y
+CONFIG_MODULE_UNLOAD=y
+# CONFIG_BLOCK is not set
+CONFIG_MN10300_UNIT_ASB2364=y
+CONFIG_PREEMPT=y
+# CONFIG_MN10300_USING_JTAG is not set
+CONFIG_NO_HZ=y
+CONFIG_HIGH_RES_TIMERS=y
+CONFIG_MN10300_TTYSM_CONSOLE=y
+CONFIG_MN10300_TTYSM0=y
+CONFIG_MN10300_TTYSM0_TIMER2=y
+CONFIG_MN10300_TTYSM1=y
+CONFIG_NET=y
+CONFIG_PACKET=y
+CONFIG_UNIX=y
+CONFIG_INET=y
+CONFIG_IP_MULTICAST=y
+CONFIG_IP_PNP=y
+CONFIG_IP_PNP_BOOTP=y
+# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
+# CONFIG_INET_XFRM_MODE_TUNNEL is not set
+# CONFIG_INET_XFRM_MODE_BEET is not set
+# CONFIG_INET_LRO is not set
+# CONFIG_INET_DIAG is not set
+CONFIG_IPV6=y
+# CONFIG_INET6_XFRM_MODE_TRANSPORT is not set
+# CONFIG_INET6_XFRM_MODE_TUNNEL is not set
+# CONFIG_INET6_XFRM_MODE_BEET is not set
+# CONFIG_FIRMWARE_IN_KERNEL is not set
+CONFIG_CONNECTOR=y
+CONFIG_MTD=y
+CONFIG_MTD_DEBUG=y
+CONFIG_MTD_PARTITIONS=y
+CONFIG_MTD_REDBOOT_PARTS=y
+CONFIG_MTD_REDBOOT_PARTS_UNALLOCATED=y
+CONFIG_MTD_CHAR=y
+CONFIG_MTD_CFI=y
+CONFIG_MTD_JEDECPROBE=y
+CONFIG_MTD_CFI_ADV_OPTIONS=y
+CONFIG_MTD_CFI_GEOMETRY=y
+CONFIG_MTD_CFI_I4=y
+CONFIG_MTD_CFI_AMDSTD=y
+CONFIG_MTD_PHYSMAP=y
+CONFIG_NETDEVICES=y
+CONFIG_NET_ETHERNET=y
+CONFIG_SMSC911X=y
+# CONFIG_NETDEV_1000 is not set
+# CONFIG_NETDEV_10000 is not set
+# CONFIG_INPUT_MOUSEDEV is not set
+# CONFIG_INPUT_KEYBOARD is not set
+# CONFIG_INPUT_MOUSE is not set
+# CONFIG_SERIO is not set
+# CONFIG_VT is not set
+CONFIG_SERIAL_8250=y
+CONFIG_SERIAL_8250_CONSOLE=y
+CONFIG_SERIAL_8250_EXTENDED=y
+CONFIG_SERIAL_8250_SHARE_IRQ=y
+# CONFIG_HW_RANDOM is not set
+# CONFIG_HWMON is not set
+# CONFIG_HID_SUPPORT is not set
+# CONFIG_USB_SUPPORT is not set
+CONFIG_PROC_KCORE=y
+# CONFIG_PROC_PAGE_MONITOR is not set
+CONFIG_TMPFS=y
+CONFIG_TMPFS_POSIX_ACL=y
+CONFIG_JFFS2_FS=y
+CONFIG_NFS_FS=y
+CONFIG_NFS_V3=y
+CONFIG_ROOT_NFS=y
+CONFIG_MAGIC_SYSRQ=y
+CONFIG_STRIP_ASM_SYMS=y
+CONFIG_DEBUG_KERNEL=y
+CONFIG_DETECT_HUNG_TASK=y
+# CONFIG_DEBUG_BUGVERBOSE is not set
+CONFIG_DEBUG_INFO=y
+# CONFIG_RCU_CPU_STALL_DETECTOR is not set
diff --git a/arch/mn10300/include/asm/atomic.h b/arch/mn10300/include/asm/atomic.h
index f0cc1f84a72f..92d2f9298e38 100644
--- a/arch/mn10300/include/asm/atomic.h
+++ b/arch/mn10300/include/asm/atomic.h
@@ -1 +1,351 @@
+/* MN10300 Atomic counter operations
+ *
+ * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public Licence
+ * as published by the Free Software Foundation; either version
+ * 2 of the Licence, or (at your option) any later version.
+ */
+#ifndef _ASM_ATOMIC_H
+#define _ASM_ATOMIC_H
+
+#include <asm/irqflags.h>
+
+#ifndef __ASSEMBLY__
+
+#ifdef CONFIG_SMP
+#ifdef CONFIG_MN10300_HAS_ATOMIC_OPS_UNIT
+static inline
+unsigned long __xchg(volatile unsigned long *m, unsigned long val)
+{
+ unsigned long status;
+ unsigned long oldval;
+
+ asm volatile(
+ "1: mov %4,(_AAR,%3) \n"
+ " mov (_ADR,%3),%1 \n"
+ " mov %5,(_ADR,%3) \n"
+ " mov (_ADR,%3),%0 \n" /* flush */
+ " mov (_ASR,%3),%0 \n"
+ " or %0,%0 \n"
+ " bne 1b \n"
+ : "=&r"(status), "=&r"(oldval), "=m"(*m)
+ : "a"(ATOMIC_OPS_BASE_ADDR), "r"(m), "r"(val)
+ : "memory", "cc");
+
+ return oldval;
+}
+
+static inline unsigned long __cmpxchg(volatile unsigned long *m,
+ unsigned long old, unsigned long new)
+{
+ unsigned long status;
+ unsigned long oldval;
+
+ asm volatile(
+ "1: mov %4,(_AAR,%3) \n"
+ " mov (_ADR,%3),%1 \n"
+ " cmp %5,%1 \n"
+ " bne 2f \n"
+ " mov %6,(_ADR,%3) \n"
+ "2: mov (_ADR,%3),%0 \n" /* flush */
+ " mov (_ASR,%3),%0 \n"
+ " or %0,%0 \n"
+ " bne 1b \n"
+ : "=&r"(status), "=&r"(oldval), "=m"(*m)
+ : "a"(ATOMIC_OPS_BASE_ADDR), "r"(m),
+ "r"(old), "r"(new)
+ : "memory", "cc");
+
+ return oldval;
+}
+#else /* CONFIG_MN10300_HAS_ATOMIC_OPS_UNIT */
+#error "No SMP atomic operation support!"
+#endif /* CONFIG_MN10300_HAS_ATOMIC_OPS_UNIT */
+
+#else /* CONFIG_SMP */
+
+/*
+ * Emulate xchg for non-SMP MN10300
+ */
+struct __xchg_dummy { unsigned long a[100]; };
+#define __xg(x) ((struct __xchg_dummy *)(x))
+
+static inline
+unsigned long __xchg(volatile unsigned long *m, unsigned long val)
+{
+ unsigned long oldval;
+ unsigned long flags;
+
+ flags = arch_local_cli_save();
+ oldval = *m;
+ *m = val;
+ arch_local_irq_restore(flags);
+ return oldval;
+}
+
+/*
+ * Emulate cmpxchg for non-SMP MN10300
+ */
+static inline unsigned long __cmpxchg(volatile unsigned long *m,
+ unsigned long old, unsigned long new)
+{
+ unsigned long oldval;
+ unsigned long flags;
+
+ flags = arch_local_cli_save();
+ oldval = *m;
+ if (oldval == old)
+ *m = new;
+ arch_local_irq_restore(flags);
+ return oldval;
+}
+
+#endif /* CONFIG_SMP */
+
+#define xchg(ptr, v) \
+ ((__typeof__(*(ptr))) __xchg((unsigned long *)(ptr), \
+ (unsigned long)(v)))
+
+#define cmpxchg(ptr, o, n) \
+ ((__typeof__(*(ptr))) __cmpxchg((unsigned long *)(ptr), \
+ (unsigned long)(o), \
+ (unsigned long)(n)))
+
+#define atomic_xchg(ptr, v) (xchg(&(ptr)->counter, (v)))
+#define atomic_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), (old), (new)))
+
+#endif /* !__ASSEMBLY__ */
+
+#ifndef CONFIG_SMP
#include <asm-generic/atomic.h>
+#else
+
+/*
+ * Atomic operations that C can't guarantee us. Useful for
+ * resource counting etc..
+ */
+
+#define ATOMIC_INIT(i) { (i) }
+
+#ifdef __KERNEL__
+
+/**
+ * atomic_read - read atomic variable
+ * @v: pointer of type atomic_t
+ *
+ * Atomically reads the value of @v. Note that the guaranteed
+ * useful range of an atomic_t is only 24 bits.
+ */
+#define atomic_read(v) ((v)->counter)
+
+/**
+ * atomic_set - set atomic variable
+ * @v: pointer of type atomic_t
+ * @i: required value
+ *
+ * Atomically sets the value of @v to @i. Note that the guaranteed
+ * useful range of an atomic_t is only 24 bits.
+ */
+#define atomic_set(v, i) (((v)->counter) = (i))
+
+/**
+ * atomic_add_return - add integer to atomic variable
+ * @i: integer value to add
+ * @v: pointer of type atomic_t
+ *
+ * Atomically adds @i to @v and returns the result
+ * Note that the guaranteed useful range of an atomic_t is only 24 bits.
+ */
+static inline int atomic_add_return(int i, atomic_t *v)
+{
+ int retval;
+#ifdef CONFIG_SMP
+ int status;
+
+ asm volatile(
+ "1: mov %4,(_AAR,%3) \n"
+ " mov (_ADR,%3),%1 \n"
+ " add %5,%1 \n"
+ " mov %1,(_ADR,%3) \n"
+ " mov (_ADR,%3),%0 \n" /* flush */
+ " mov (_ASR,%3),%0 \n"
+ " or %0,%0 \n"
+ " bne 1b \n"
+ : "=&r"(status), "=&r"(retval), "=m"(v->counter)
+ : "a"(ATOMIC_OPS_BASE_ADDR), "r"(&v->counter), "r"(i)
+ : "memory", "cc");
+
+#else
+ unsigned long flags;
+
+ flags = arch_local_cli_save();
+ retval = v->counter;
+ retval += i;
+ v->counter = retval;
+ arch_local_irq_restore(flags);
+#endif
+ return retval;
+}
+
+/**
+ * atomic_sub_return - subtract integer from atomic variable
+ * @i: integer value to subtract
+ * @v: pointer of type atomic_t
+ *
+ * Atomically subtracts @i from @v and returns the result
+ * Note that the guaranteed useful range of an atomic_t is only 24 bits.
+ */
+static inline int atomic_sub_return(int i, atomic_t *v)
+{
+ int retval;
+#ifdef CONFIG_SMP
+ int status;
+
+ asm volatile(
+ "1: mov %4,(_AAR,%3) \n"
+ " mov (_ADR,%3),%1 \n"
+ " sub %5,%1 \n"
+ " mov %1,(_ADR,%3) \n"
+ " mov (_ADR,%3),%0 \n" /* flush */
+ " mov (_ASR,%3),%0 \n"
+ " or %0,%0 \n"
+ " bne 1b \n"
+ : "=&r"(status), "=&r"(retval), "=m"(v->counter)
+ : "a"(ATOMIC_OPS_BASE_ADDR), "r"(&v->counter), "r"(i)
+ : "memory", "cc");
+
+#else
+ unsigned long flags;
+ flags = arch_local_cli_save();
+ retval = v->counter;
+ retval -= i;
+ v->counter = retval;
+ arch_local_irq_restore(flags);
+#endif
+ return retval;
+}
+
+static inline int atomic_add_negative(int i, atomic_t *v)
+{
+ return atomic_add_return(i, v) < 0;
+}
+
+static inline void atomic_add(int i, atomic_t *v)
+{
+ atomic_add_return(i, v);
+}
+
+static inline void atomic_sub(int i, atomic_t *v)
+{
+ atomic_sub_return(i, v);
+}
+
+static inline void atomic_inc(atomic_t *v)
+{
+ atomic_add_return(1, v);
+}
+
+static inline void atomic_dec(atomic_t *v)
+{
+ atomic_sub_return(1, v);
+}
+
+#define atomic_dec_return(v) atomic_sub_return(1, (v))
+#define atomic_inc_return(v) atomic_add_return(1, (v))
+
+#define atomic_sub_and_test(i, v) (atomic_sub_return((i), (v)) == 0)
+#define atomic_dec_and_test(v) (atomic_sub_return(1, (v)) == 0)
+#define atomic_inc_and_test(v) (atomic_add_return(1, (v)) == 0)
+
+#define atomic_add_unless(v, a, u) \
+({ \
+ int c, old; \
+ c = atomic_read(v); \
+ while (c != (u) && (old = atomic_cmpxchg((v), c, c + (a))) != c) \
+ c = old; \
+ c != (u); \
+})
+
+#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
+
+/**
+ * atomic_clear_mask - Atomically clear bits in memory
+ * @mask: Mask of the bits to be cleared
+ * @v: pointer to word in memory
+ *
+ * Atomically clears the bits set in mask from the memory word specified.
+ */
+static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr)
+{
+#ifdef CONFIG_SMP
+ int status;
+
+ asm volatile(
+ "1: mov %3,(_AAR,%2) \n"
+ " mov (_ADR,%2),%0 \n"
+ " and %4,%0 \n"
+ " mov %0,(_ADR,%2) \n"
+ " mov (_ADR,%2),%0 \n" /* flush */
+ " mov (_ASR,%2),%0 \n"
+ " or %0,%0 \n"
+ " bne 1b \n"
+ : "=&r"(status), "=m"(*addr)
+ : "a"(ATOMIC_OPS_BASE_ADDR), "r"(addr), "r"(~mask)
+ : "memory", "cc");
+#else
+ unsigned long flags;
+
+ mask = ~mask;
+ flags = arch_local_cli_save();
+ *addr &= mask;
+ arch_local_irq_restore(flags);
+#endif
+}
+
+/**
+ * atomic_set_mask - Atomically set bits in memory
+ * @mask: Mask of the bits to be set
+ * @v: pointer to word in memory
+ *
+ * Atomically sets the bits set in mask from the memory word specified.
+ */
+static inline void atomic_set_mask(unsigned long mask, unsigned long *addr)
+{
+#ifdef CONFIG_SMP
+ int status;
+
+ asm volatile(
+ "1: mov %3,(_AAR,%2) \n"
+ " mov (_ADR,%2),%0 \n"
+ " or %4,%0 \n"
+ " mov %0,(_ADR,%2) \n"
+ " mov (_ADR,%2),%0 \n" /* flush */
+ " mov (_ASR,%2),%0 \n"
+ " or %0,%0 \n"
+ " bne 1b \n"
+ : "=&r"(status), "=m"(*addr)
+ : "a"(ATOMIC_OPS_BASE_ADDR), "r"(addr), "r"(mask)
+ : "memory", "cc");
+#else
+ unsigned long flags;
+
+ flags = arch_local_cli_save();
+ *addr |= mask;
+ arch_local_irq_restore(flags);
+#endif
+}
+
+/* Atomic operations are already serializing on MN10300??? */
+#define smp_mb__before_atomic_dec() barrier()
+#define smp_mb__after_atomic_dec() barrier()
+#define smp_mb__before_atomic_inc() barrier()
+#define smp_mb__after_atomic_inc() barrier()
+
+#include <asm-generic/atomic-long.h>
+
+#endif /* __KERNEL__ */
+#endif /* CONFIG_SMP */
+#endif /* _ASM_ATOMIC_H */
diff --git a/arch/mn10300/include/asm/bitops.h b/arch/mn10300/include/asm/bitops.h
index 3f50e9661076..3b8a868188f5 100644
--- a/arch/mn10300/include/asm/bitops.h
+++ b/arch/mn10300/include/asm/bitops.h
@@ -57,7 +57,7 @@
#define clear_bit(nr, addr) ___clear_bit((nr), (addr))
-static inline void __clear_bit(int nr, volatile void *addr)
+static inline void __clear_bit(unsigned long nr, volatile void *addr)
{
unsigned int *a = (unsigned int *) addr;
int mask;
@@ -70,15 +70,15 @@ static inline void __clear_bit(int nr, volatile void *addr)
/*
* test bit
*/
-static inline int test_bit(int nr, const volatile void *addr)
+static inline int test_bit(unsigned long nr, const volatile void *addr)
{
- return 1UL & (((const unsigned int *) addr)[nr >> 5] >> (nr & 31));
+ return 1UL & (((const volatile unsigned int *) addr)[nr >> 5] >> (nr & 31));
}
/*
* change bit
*/
-static inline void __change_bit(int nr, volatile void *addr)
+static inline void __change_bit(unsigned long nr, volatile void *addr)
{
int mask;
unsigned int *a = (unsigned int *) addr;
@@ -88,7 +88,7 @@ static inline void __change_bit(int nr, volatile void *addr)
*a ^= mask;
}
-extern void change_bit(int nr, volatile void *addr);
+extern void change_bit(unsigned long nr, volatile void *addr);
/*
* test and set bit
@@ -135,7 +135,7 @@ extern void change_bit(int nr, volatile void *addr);
/*
* test and change bit
*/
-static inline int __test_and_change_bit(int nr, volatile void *addr)
+static inline int __test_and_change_bit(unsigned long nr, volatile void *addr)
{
int mask, retval;
unsigned int *a = (unsigned int *)addr;
@@ -148,7 +148,7 @@ static inline int __test_and_change_bit(int nr, volatile void *addr)
return retval;
}
-extern int test_and_change_bit(int nr, volatile void *addr);
+extern int test_and_change_bit(unsigned long nr, volatile void *addr);
#include <asm-generic/bitops/lock.h>
diff --git a/arch/mn10300/include/asm/cache.h b/arch/mn10300/include/asm/cache.h
index 781bf613366d..f29cde2cfc91 100644
--- a/arch/mn10300/include/asm/cache.h
+++ b/arch/mn10300/include/asm/cache.h
@@ -43,14 +43,18 @@
/* instruction cache access registers */
#define ICACHE_DATA(WAY, ENTRY, OFF) \
- __SYSREG(0xc8000000 + (WAY) * L1_CACHE_WAYDISP + (ENTRY) * 0x10 + (OFF) * 4, u32)
+ __SYSREG(0xc8000000 + (WAY) * L1_CACHE_WAYDISP + \
+ (ENTRY) * L1_CACHE_BYTES + (OFF) * 4, u32)
#define ICACHE_TAG(WAY, ENTRY) \
- __SYSREG(0xc8100000 + (WAY) * L1_CACHE_WAYDISP + (ENTRY) * 0x10, u32)
+ __SYSREG(0xc8100000 + (WAY) * L1_CACHE_WAYDISP + \
+ (ENTRY) * L1_CACHE_BYTES, u32)
-/* instruction cache access registers */
+/* data cache access registers */
#define DCACHE_DATA(WAY, ENTRY, OFF) \
- __SYSREG(0xc8200000 + (WAY) * L1_CACHE_WAYDISP + (ENTRY) * 0x10 + (OFF) * 4, u32)
+ __SYSREG(0xc8200000 + (WAY) * L1_CACHE_WAYDISP + \
+ (ENTRY) * L1_CACHE_BYTES + (OFF) * 4, u32)
#define DCACHE_TAG(WAY, ENTRY) \
- __SYSREG(0xc8300000 + (WAY) * L1_CACHE_WAYDISP + (ENTRY) * 0x10, u32)
+ __SYSREG(0xc8300000 + (WAY) * L1_CACHE_WAYDISP + \
+ (ENTRY) * L1_CACHE_BYTES, u32)
#endif /* _ASM_CACHE_H */
diff --git a/arch/mn10300/include/asm/cacheflush.h b/arch/mn10300/include/asm/cacheflush.h
index 29e692f7f030..faed90240ded 100644
--- a/arch/mn10300/include/asm/cacheflush.h
+++ b/arch/mn10300/include/asm/cacheflush.h
@@ -17,66 +17,55 @@
#include <linux/mm.h>
/*
- * virtually-indexed cache management (our cache is physically indexed)
+ * Primitive routines
*/
-#define flush_cache_all() do {} while (0)
-#define flush_cache_mm(mm) do {} while (0)
-#define flush_cache_dup_mm(mm) do {} while (0)
-#define flush_cache_range(mm, start, end) do {} while (0)
-#define flush_cache_page(vma, vmaddr, pfn) do {} while (0)
-#define flush_cache_vmap(start, end) do {} while (0)
-#define flush_cache_vunmap(start, end) do {} while (0)
-#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 0
-#define flush_dcache_page(page) do {} while (0)
-#define flush_dcache_mmap_lock(mapping) do {} while (0)
-#define flush_dcache_mmap_unlock(mapping) do {} while (0)
-
-/*
- * physically-indexed cache management
- */
-#ifndef CONFIG_MN10300_CACHE_DISABLED
-
-extern void flush_icache_range(unsigned long start, unsigned long end);
-extern void flush_icache_page(struct vm_area_struct *vma, struct page *pg);
-
-#else
-
-#define flush_icache_range(start, end) do {} while (0)
-#define flush_icache_page(vma, pg) do {} while (0)
-
-#endif
-
-#define flush_icache_user_range(vma, pg, adr, len) \
- flush_icache_range(adr, adr + len)
-
-#define copy_to_user_page(vma, page, vaddr, dst, src, len) \
- do { \
- memcpy(dst, src, len); \
- flush_icache_page(vma, page); \
- } while (0)
-
-#define copy_from_user_page(vma, page, vaddr, dst, src, len) \
- memcpy(dst, src, len)
-
-/*
- * primitive routines
- */
-#ifndef CONFIG_MN10300_CACHE_DISABLED
+#ifdef CONFIG_MN10300_CACHE_ENABLED
+extern void mn10300_local_icache_inv(void);
+extern void mn10300_local_icache_inv_page(unsigned long start);
+extern void mn10300_local_icache_inv_range(unsigned long start, unsigned long end);
+extern void mn10300_local_icache_inv_range2(unsigned long start, unsigned long size);
+extern void mn10300_local_dcache_inv(void);
+extern void mn10300_local_dcache_inv_page(unsigned long start);
+extern void mn10300_local_dcache_inv_range(unsigned long start, unsigned long end);
+extern void mn10300_local_dcache_inv_range2(unsigned long start, unsigned long size);
extern void mn10300_icache_inv(void);
+extern void mn10300_icache_inv_page(unsigned long start);
+extern void mn10300_icache_inv_range(unsigned long start, unsigned long end);
+extern void mn10300_icache_inv_range2(unsigned long start, unsigned long size);
extern void mn10300_dcache_inv(void);
-extern void mn10300_dcache_inv_page(unsigned start);
-extern void mn10300_dcache_inv_range(unsigned start, unsigned end);
-extern void mn10300_dcache_inv_range2(unsigned start, unsigned size);
+extern void mn10300_dcache_inv_page(unsigned long start);
+extern void mn10300_dcache_inv_range(unsigned long start, unsigned long end);
+extern void mn10300_dcache_inv_range2(unsigned long start, unsigned long size);
#ifdef CONFIG_MN10300_CACHE_WBACK
+extern void mn10300_local_dcache_flush(void);
+extern void mn10300_local_dcache_flush_page(unsigned long start);
+extern void mn10300_local_dcache_flush_range(unsigned long start, unsigned long end);
+extern void mn10300_local_dcache_flush_range2(unsigned long start, unsigned long size);
+extern void mn10300_local_dcache_flush_inv(void);
+extern void mn10300_local_dcache_flush_inv_page(unsigned long start);
+extern void mn10300_local_dcache_flush_inv_range(unsigned long start, unsigned long end);
+extern void mn10300_local_dcache_flush_inv_range2(unsigned long start, unsigned long size);
extern void mn10300_dcache_flush(void);
-extern void mn10300_dcache_flush_page(unsigned start);
-extern void mn10300_dcache_flush_range(unsigned start, unsigned end);
-extern void mn10300_dcache_flush_range2(unsigned start, unsigned size);
+extern void mn10300_dcache_flush_page(unsigned long start);
+extern void mn10300_dcache_flush_range(unsigned long start, unsigned long end);
+extern void mn10300_dcache_flush_range2(unsigned long start, unsigned long size);
extern void mn10300_dcache_flush_inv(void);
-extern void mn10300_dcache_flush_inv_page(unsigned start);
-extern void mn10300_dcache_flush_inv_range(unsigned start, unsigned end);
-extern void mn10300_dcache_flush_inv_range2(unsigned start, unsigned size);
+extern void mn10300_dcache_flush_inv_page(unsigned long start);
+extern void mn10300_dcache_flush_inv_range(unsigned long start, unsigned long end);
+extern void mn10300_dcache_flush_inv_range2(unsigned long start, unsigned long size);
#else
+#define mn10300_local_dcache_flush() do {} while (0)
+#define mn10300_local_dcache_flush_page(start) do {} while (0)
+#define mn10300_local_dcache_flush_range(start, end) do {} while (0)
+#define mn10300_local_dcache_flush_range2(start, size) do {} while (0)
+#define mn10300_local_dcache_flush_inv() \
+ mn10300_local_dcache_inv()
+#define mn10300_local_dcache_flush_inv_page(start) \
+ mn10300_local_dcache_inv_page(start)
+#define mn10300_local_dcache_flush_inv_range(start, end) \
+ mn10300_local_dcache_inv_range(start, end)
+#define mn10300_local_dcache_flush_inv_range2(start, size) \
+ mn10300_local_dcache_inv_range2(start, size)
#define mn10300_dcache_flush() do {} while (0)
#define mn10300_dcache_flush_page(start) do {} while (0)
#define mn10300_dcache_flush_range(start, end) do {} while (0)
@@ -90,7 +79,26 @@ extern void mn10300_dcache_flush_inv_range2(unsigned start, unsigned size);
mn10300_dcache_inv_range2((start), (size))
#endif /* CONFIG_MN10300_CACHE_WBACK */
#else
+#define mn10300_local_icache_inv() do {} while (0)
+#define mn10300_local_icache_inv_page(start) do {} while (0)
+#define mn10300_local_icache_inv_range(start, end) do {} while (0)
+#define mn10300_local_icache_inv_range2(start, size) do {} while (0)
+#define mn10300_local_dcache_inv() do {} while (0)
+#define mn10300_local_dcache_inv_page(start) do {} while (0)
+#define mn10300_local_dcache_inv_range(start, end) do {} while (0)
+#define mn10300_local_dcache_inv_range2(start, size) do {} while (0)
+#define mn10300_local_dcache_flush() do {} while (0)
+#define mn10300_local_dcache_flush_inv_page(start) do {} while (0)
+#define mn10300_local_dcache_flush_inv() do {} while (0)
+#define mn10300_local_dcache_flush_inv_range(start, end)do {} while (0)
+#define mn10300_local_dcache_flush_inv_range2(start, size) do {} while (0)
+#define mn10300_local_dcache_flush_page(start) do {} while (0)
+#define mn10300_local_dcache_flush_range(start, end) do {} while (0)
+#define mn10300_local_dcache_flush_range2(start, size) do {} while (0)
#define mn10300_icache_inv() do {} while (0)
+#define mn10300_icache_inv_page(start) do {} while (0)
+#define mn10300_icache_inv_range(start, end) do {} while (0)
+#define mn10300_icache_inv_range2(start, size) do {} while (0)
#define mn10300_dcache_inv() do {} while (0)
#define mn10300_dcache_inv_page(start) do {} while (0)
#define mn10300_dcache_inv_range(start, end) do {} while (0)
@@ -103,10 +111,56 @@ extern void mn10300_dcache_flush_inv_range2(unsigned start, unsigned size);
#define mn10300_dcache_flush_page(start) do {} while (0)
#define mn10300_dcache_flush_range(start, end) do {} while (0)
#define mn10300_dcache_flush_range2(start, size) do {} while (0)
-#endif /* CONFIG_MN10300_CACHE_DISABLED */
+#endif /* CONFIG_MN10300_CACHE_ENABLED */
+
+/*
+ * Virtually-indexed cache management (our cache is physically indexed)
+ */
+#define flush_cache_all() do {} while (0)
+#define flush_cache_mm(mm) do {} while (0)
+#define flush_cache_dup_mm(mm) do {} while (0)
+#define flush_cache_range(mm, start, end) do {} while (0)
+#define flush_cache_page(vma, vmaddr, pfn) do {} while (0)
+#define flush_cache_vmap(start, end) do {} while (0)
+#define flush_cache_vunmap(start, end) do {} while (0)
+#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 0
+#define flush_dcache_page(page) do {} while (0)
+#define flush_dcache_mmap_lock(mapping) do {} while (0)
+#define flush_dcache_mmap_unlock(mapping) do {} while (0)
+
+/*
+ * Physically-indexed cache management
+ */
+#if defined(CONFIG_MN10300_CACHE_FLUSH_ICACHE)
+extern void flush_icache_page(struct vm_area_struct *vma, struct page *page);
+extern void flush_icache_range(unsigned long start, unsigned long end);
+#elif defined(CONFIG_MN10300_CACHE_INV_ICACHE)
+static inline void flush_icache_page(struct vm_area_struct *vma,
+ struct page *page)
+{
+ mn10300_icache_inv_page(page_to_phys(page));
+}
+extern void flush_icache_range(unsigned long start, unsigned long end);
+#else
+#define flush_icache_range(start, end) do {} while (0)
+#define flush_icache_page(vma, pg) do {} while (0)
+#endif
+
+
+#define flush_icache_user_range(vma, pg, adr, len) \
+ flush_icache_range(adr, adr + len)
+
+#define copy_to_user_page(vma, page, vaddr, dst, src, len) \
+ do { \
+ memcpy(dst, src, len); \
+ flush_icache_page(vma, page); \
+ } while (0)
+
+#define copy_from_user_page(vma, page, vaddr, dst, src, len) \
+ memcpy(dst, src, len)
/*
- * internal debugging function
+ * Internal debugging function
*/
#ifdef CONFIG_DEBUG_PAGEALLOC
extern void kernel_map_pages(struct page *page, int numpages, int enable);
diff --git a/arch/mn10300/include/asm/cpu-regs.h b/arch/mn10300/include/asm/cpu-regs.h
index 757e9b5388ea..90ed4a365c97 100644
--- a/arch/mn10300/include/asm/cpu-regs.h
+++ b/arch/mn10300/include/asm/cpu-regs.h
@@ -15,7 +15,6 @@
#include <linux/types.h>
#endif
-#ifdef CONFIG_MN10300_CPU_AM33V2
/* we tell the compiler to pretend to be AM33 so that it doesn't try and use
* the FP regs, but tell the assembler that we're actually allowed AM33v2
* instructions */
@@ -24,7 +23,6 @@ asm(" .am33_2\n");
#else
.am33_2
#endif
-#endif
#ifdef __KERNEL__
@@ -58,6 +56,9 @@ asm(" .am33_2\n");
#define EPSW_nAR 0x00040000 /* register bank control */
#define EPSW_ML 0x00080000 /* monitor level */
#define EPSW_FE 0x00100000 /* FPU enable */
+#define EPSW_IM_SHIFT 8 /* EPSW_IM_SHIFT determines the interrupt mode */
+
+#define NUM2EPSW_IM(num) ((num) << EPSW_IM_SHIFT)
/* FPU registers */
#define FPCR_EF_I 0x00000001 /* inexact result FPU exception flag */
@@ -99,9 +100,11 @@ asm(" .am33_2\n");
#define CPUREV __SYSREGC(0xc0000050, u32) /* CPU revision register */
#define CPUREV_TYPE 0x0000000f /* CPU type */
#define CPUREV_TYPE_S 0
-#define CPUREV_TYPE_AM33V1 0x00000000 /* - AM33 V1 core, AM33/1.00 arch */
-#define CPUREV_TYPE_AM33V2 0x00000001 /* - AM33 V2 core, AM33/2.00 arch */
-#define CPUREV_TYPE_AM34V1 0x00000002 /* - AM34 V1 core, AM33/2.00 arch */
+#define CPUREV_TYPE_AM33_1 0x00000000 /* - AM33-1 core, AM33/1.00 arch */
+#define CPUREV_TYPE_AM33_2 0x00000001 /* - AM33-2 core, AM33/2.00 arch */
+#define CPUREV_TYPE_AM34_1 0x00000002 /* - AM34-1 core, AM33/2.00 arch */
+#define CPUREV_TYPE_AM33_3 0x00000003 /* - AM33-3 core, AM33/2.00 arch */
+#define CPUREV_TYPE_AM34_2 0x00000004 /* - AM34-2 core, AM33/3.00 arch */
#define CPUREV_REVISION 0x000000f0 /* CPU revision */
#define CPUREV_REVISION_S 4
#define CPUREV_ICWAY 0x00000f00 /* number of instruction cache ways */
@@ -180,6 +183,21 @@ asm(" .am33_2\n");
#define CHCTR_ICWMD 0x0f00 /* instruction cache way mode */
#define CHCTR_DCWMD 0xf000 /* data cache way mode */
+#ifdef CONFIG_AM34_2
+#define ICIVCR __SYSREG(0xc0000c00, u32) /* icache area invalidate control */
+#define ICIVCR_ICIVBSY 0x00000008 /* icache area invalidate busy */
+#define ICIVCR_ICI 0x00000001 /* icache area invalidate */
+
+#define ICIVMR __SYSREG(0xc0000c04, u32) /* icache area invalidate mask */
+
+#define DCPGCR __SYSREG(0xc0000c10, u32) /* data cache area purge control */
+#define DCPGCR_DCPGBSY 0x00000008 /* data cache area purge busy */
+#define DCPGCR_DCP 0x00000002 /* data cache area purge */
+#define DCPGCR_DCI 0x00000001 /* data cache area invalidate */
+
+#define DCPGMR __SYSREG(0xc0000c14, u32) /* data cache area purge mask */
+#endif /* CONFIG_AM34_2 */
+
/* MMU control registers */
#define MMUCTR __SYSREG(0xc0000090, u32) /* MMU control register */
#define MMUCTR_IRP 0x0000003f /* instruction TLB replace pointer */
@@ -203,6 +221,9 @@ asm(" .am33_2\n");
#define MMUCTR_DTL_LOCK0_3 0x03000000 /* - entry 0-3 locked */
#define MMUCTR_DTL_LOCK0_7 0x04000000 /* - entry 0-7 locked */
#define MMUCTR_DTL_LOCK0_15 0x05000000 /* - entry 0-15 locked */
+#ifdef CONFIG_AM34_2
+#define MMUCTR_WTE 0x80000000 /* write-through cache TLB entry bit enable */
+#endif
#define PIDR __SYSREG(0xc0000094, u16) /* PID register */
#define PIDR_PID 0x00ff /* process identifier */
@@ -231,14 +252,6 @@ asm(" .am33_2\n");
#define xPTEL_PS_4Mb 0x00000c00 /* - 4Mb page */
#define xPTEL_PPN 0xfffff006 /* physical page number */
-#define xPTEL_V_BIT 0 /* bit numbers corresponding to above masks */
-#define xPTEL_UNUSED1_BIT 1
-#define xPTEL_UNUSED2_BIT 2
-#define xPTEL_C_BIT 3
-#define xPTEL_PV_BIT 4
-#define xPTEL_D_BIT 5
-#define xPTEL_G_BIT 9
-
#define IPTEU __SYSREG(0xc00000a4, u32) /* instruction TLB virtual addr */
#define DPTEU __SYSREG(0xc00000b4, u32) /* data TLB virtual addr */
#define xPTEU_VPN 0xfffffc00 /* virtual page number */
@@ -262,7 +275,16 @@ asm(" .am33_2\n");
#define xPTEL2_PS_128Kb 0x00000100 /* - 128Kb page */
#define xPTEL2_PS_1Kb 0x00000200 /* - 1Kb page */
#define xPTEL2_PS_4Mb 0x00000300 /* - 4Mb page */
-#define xPTEL2_PPN 0xfffffc00 /* physical page number */
+#define xPTEL2_CWT 0x00000400 /* cacheable write-through */
+#define xPTEL2_UNUSED1 0x00000800 /* unused bit (broadcast mask) */
+#define xPTEL2_PPN 0xfffff000 /* physical page number */
+
+#define xPTEL2_V_BIT 0 /* bit numbers corresponding to above masks */
+#define xPTEL2_C_BIT 1
+#define xPTEL2_PV_BIT 2
+#define xPTEL2_D_BIT 3
+#define xPTEL2_G_BIT 7
+#define xPTEL2_UNUSED1_BIT 11
#define MMUFCR __SYSREGC(0xc000009c, u32) /* MMU exception cause */
#define MMUFCR_IFC __SYSREGC(0xc000009c, u16) /* MMU instruction excep cause */
@@ -285,6 +307,47 @@ asm(" .am33_2\n");
#define MMUFCR_xFC_PR_RWK_RWU 0x01c0 /* - R/W kernel and R/W user */
#define MMUFCR_xFC_ILLADDR 0x0200 /* illegal address excep flag */
+#ifdef CONFIG_MN10300_HAS_ATOMIC_OPS_UNIT
+/* atomic operation registers */
+#define AAR __SYSREG(0xc0000a00, u32) /* cacheable address */
+#define AAR2 __SYSREG(0xc0000a04, u32) /* uncacheable address */
+#define ADR __SYSREG(0xc0000a08, u32) /* data */
+#define ASR __SYSREG(0xc0000a0c, u32) /* status */
+#define AARU __SYSREG(0xd400aa00, u32) /* user address */
+#define ADRU __SYSREG(0xd400aa08, u32) /* user data */
+#define ASRU __SYSREG(0xd400aa0c, u32) /* user status */
+
+#define ASR_RW 0x00000008 /* read */
+#define ASR_BW 0x00000004 /* bus error */
+#define ASR_IW 0x00000002 /* interrupt */
+#define ASR_LW 0x00000001 /* bus lock */
+
+#define ASRU_RW ASR_RW /* read */
+#define ASRU_BW ASR_BW /* bus error */
+#define ASRU_IW ASR_IW /* interrupt */
+#define ASRU_LW ASR_LW /* bus lock */
+
+/* in inline ASM, we stick the base pointer in to a reg and use offsets from
+ * it */
+#define ATOMIC_OPS_BASE_ADDR 0xc0000a00
+#ifndef __ASSEMBLY__
+asm(
+ "_AAR = 0\n"
+ "_AAR2 = 4\n"
+ "_ADR = 8\n"
+ "_ASR = 12\n");
+#else
+#define _AAR 0
+#define _AAR2 4
+#define _ADR 8
+#define _ASR 12
+#endif
+
+/* physical page address for userspace atomic operations registers */
+#define USER_ATOMIC_OPS_PAGE_ADDR 0xd400a000
+
+#endif /* CONFIG_MN10300_HAS_ATOMIC_OPS_UNIT */
+
#endif /* __KERNEL__ */
#endif /* _ASM_CPU_REGS_H */
diff --git a/arch/mn10300/include/asm/dmactl-regs.h b/arch/mn10300/include/asm/dmactl-regs.h
index 58a199da0f4a..80337b339c90 100644
--- a/arch/mn10300/include/asm/dmactl-regs.h
+++ b/arch/mn10300/include/asm/dmactl-regs.h
@@ -11,91 +11,6 @@
#ifndef _ASM_DMACTL_REGS_H
#define _ASM_DMACTL_REGS_H
-#include <asm/cpu-regs.h>
-
-#ifdef __KERNEL__
-
-/* DMA registers */
-#define DMxCTR(N) __SYSREG(0xd2000000 + ((N) * 0x100), u32) /* control reg */
-#define DMxCTR_BG 0x0000001f /* transfer request source */
-#define DMxCTR_BG_SOFT 0x00000000 /* - software source */
-#define DMxCTR_BG_SC0TX 0x00000002 /* - serial port 0 transmission */
-#define DMxCTR_BG_SC0RX 0x00000003 /* - serial port 0 reception */
-#define DMxCTR_BG_SC1TX 0x00000004 /* - serial port 1 transmission */
-#define DMxCTR_BG_SC1RX 0x00000005 /* - serial port 1 reception */
-#define DMxCTR_BG_SC2TX 0x00000006 /* - serial port 2 transmission */
-#define DMxCTR_BG_SC2RX 0x00000007 /* - serial port 2 reception */
-#define DMxCTR_BG_TM0UFLOW 0x00000008 /* - timer 0 underflow */
-#define DMxCTR_BG_TM1UFLOW 0x00000009 /* - timer 1 underflow */
-#define DMxCTR_BG_TM2UFLOW 0x0000000a /* - timer 2 underflow */
-#define DMxCTR_BG_TM3UFLOW 0x0000000b /* - timer 3 underflow */
-#define DMxCTR_BG_TM6ACMPCAP 0x0000000c /* - timer 6A compare/capture */
-#define DMxCTR_BG_AFE 0x0000000d /* - analogue front-end interrupt source */
-#define DMxCTR_BG_ADC 0x0000000e /* - A/D conversion end interrupt source */
-#define DMxCTR_BG_IRDA 0x0000000f /* - IrDA interrupt source */
-#define DMxCTR_BG_RTC 0x00000010 /* - RTC interrupt source */
-#define DMxCTR_BG_XIRQ0 0x00000011 /* - XIRQ0 pin interrupt source */
-#define DMxCTR_BG_XIRQ1 0x00000012 /* - XIRQ1 pin interrupt source */
-#define DMxCTR_BG_XDMR0 0x00000013 /* - external request 0 source (XDMR0 pin) */
-#define DMxCTR_BG_XDMR1 0x00000014 /* - external request 1 source (XDMR1 pin) */
-#define DMxCTR_SAM 0x000000e0 /* DMA transfer src addr mode */
-#define DMxCTR_SAM_INCR 0x00000000 /* - increment */
-#define DMxCTR_SAM_DECR 0x00000020 /* - decrement */
-#define DMxCTR_SAM_FIXED 0x00000040 /* - fixed */
-#define DMxCTR_DAM 0x00000000 /* DMA transfer dest addr mode */
-#define DMxCTR_DAM_INCR 0x00000000 /* - increment */
-#define DMxCTR_DAM_DECR 0x00000100 /* - decrement */
-#define DMxCTR_DAM_FIXED 0x00000200 /* - fixed */
-#define DMxCTR_TM 0x00001800 /* DMA transfer mode */
-#define DMxCTR_TM_BATCH 0x00000000 /* - batch transfer */
-#define DMxCTR_TM_INTERM 0x00001000 /* - intermittent transfer */
-#define DMxCTR_UT 0x00006000 /* DMA transfer unit */
-#define DMxCTR_UT_1 0x00000000 /* - 1 byte */
-#define DMxCTR_UT_2 0x00002000 /* - 2 byte */
-#define DMxCTR_UT_4 0x00004000 /* - 4 byte */
-#define DMxCTR_UT_16 0x00006000 /* - 16 byte */
-#define DMxCTR_TEN 0x00010000 /* DMA channel transfer enable */
-#define DMxCTR_RQM 0x00060000 /* external request input source mode */
-#define DMxCTR_RQM_FALLEDGE 0x00000000 /* - falling edge */
-#define DMxCTR_RQM_RISEEDGE 0x00020000 /* - rising edge */
-#define DMxCTR_RQM_LOLEVEL 0x00040000 /* - low level */
-#define DMxCTR_RQM_HILEVEL 0x00060000 /* - high level */
-#define DMxCTR_RQF 0x01000000 /* DMA transfer request flag */
-#define DMxCTR_XEND 0x80000000 /* DMA transfer end flag */
-
-#define DMxSRC(N) __SYSREG(0xd2000004 + ((N) * 0x100), u32) /* control reg */
-
-#define DMxDST(N) __SYSREG(0xd2000008 + ((N) * 0x100), u32) /* src addr reg */
-
-#define DMxSIZ(N) __SYSREG(0xd200000c + ((N) * 0x100), u32) /* dest addr reg */
-#define DMxSIZ_CT 0x000fffff /* number of bytes to transfer */
-
-#define DMxCYC(N) __SYSREG(0xd2000010 + ((N) * 0x100), u32) /* intermittent
- * size reg */
-#define DMxCYC_CYC 0x000000ff /* number of interrmittent transfers -1 */
-
-#define DM0IRQ 16 /* DMA channel 0 complete IRQ */
-#define DM1IRQ 17 /* DMA channel 1 complete IRQ */
-#define DM2IRQ 18 /* DMA channel 2 complete IRQ */
-#define DM3IRQ 19 /* DMA channel 3 complete IRQ */
-
-#define DM0ICR GxICR(DM0IRQ) /* DMA channel 0 complete intr ctrl reg */
-#define DM1ICR GxICR(DM0IR1) /* DMA channel 1 complete intr ctrl reg */
-#define DM2ICR GxICR(DM0IR2) /* DMA channel 2 complete intr ctrl reg */
-#define DM3ICR GxICR(DM0IR3) /* DMA channel 3 complete intr ctrl reg */
-
-#ifndef __ASSEMBLY__
-
-struct mn10300_dmactl_regs {
- u32 ctr;
- const void *src;
- void *dst;
- u32 siz;
- u32 cyc;
-} __attribute__((aligned(0x100)));
-
-#endif /* __ASSEMBLY__ */
-
-#endif /* __KERNEL__ */
+#include <proc/dmactl-regs.h>
#endif /* _ASM_DMACTL_REGS_H */
diff --git a/arch/mn10300/include/asm/elf.h b/arch/mn10300/include/asm/elf.h
index e5fa97cd9a14..8157c9267f42 100644
--- a/arch/mn10300/include/asm/elf.h
+++ b/arch/mn10300/include/asm/elf.h
@@ -32,6 +32,12 @@
#define R_MN10300_ALIGN 34 /* Alignment requirement. */
/*
+ * AM33/AM34 HW Capabilities
+ */
+#define HWCAP_MN10300_ATOMIC_OP_UNIT 1 /* Has AM34 Atomic Operations */
+
+
+/*
* ELF register definitions..
*/
typedef unsigned long elf_greg_t;
@@ -47,8 +53,6 @@ typedef struct {
u_int32_t fpcr;
} elf_fpregset_t;
-extern int dump_fpu(struct pt_regs *, elf_fpregset_t *);
-
/*
* This is used to ensure we don't load something for the wrong architecture
*/
@@ -130,7 +134,11 @@ do { \
* instruction set this CPU supports. This could be done in user space,
* but it's not easy, and we've already done it here.
*/
+#ifdef CONFIG_MN10300_HAS_ATOMIC_OPS_UNIT
+#define ELF_HWCAP (HWCAP_MN10300_ATOMIC_OP_UNIT)
+#else
#define ELF_HWCAP (0)
+#endif
/*
* This yields a string that ld.so will use to load implementation
diff --git a/arch/mn10300/include/asm/exceptions.h b/arch/mn10300/include/asm/exceptions.h
index fa16466ef3f9..ca3e20508c77 100644
--- a/arch/mn10300/include/asm/exceptions.h
+++ b/arch/mn10300/include/asm/exceptions.h
@@ -15,8 +15,8 @@
/*
* define the breakpoint instruction opcode to use
- * - note that the JTAG unit steals 0xFF, so we want to avoid that if we can
- * (can use 0xF7)
+ * - note that the JTAG unit steals 0xFF, so you can't use JTAG and GDBSTUB at
+ * the same time.
*/
#define GDBSTUB_BKPT 0xFF
@@ -90,7 +90,6 @@ enum exception_code {
extern void __set_intr_stub(enum exception_code code, void *handler);
extern void set_intr_stub(enum exception_code code, void *handler);
-extern void set_jtag_stub(enum exception_code code, void *handler);
struct pt_regs;
@@ -102,7 +101,6 @@ extern asmlinkage void dtlb_aerror(void);
extern asmlinkage void raw_bus_error(void);
extern asmlinkage void double_fault(void);
extern asmlinkage int system_call(struct pt_regs *);
-extern asmlinkage void fpu_exception(struct pt_regs *, enum exception_code);
extern asmlinkage void nmi(struct pt_regs *, enum exception_code);
extern asmlinkage void uninitialised_exception(struct pt_regs *,
enum exception_code);
@@ -116,6 +114,8 @@ extern void die(const char *, struct pt_regs *, enum exception_code)
extern int die_if_no_fixup(const char *, struct pt_regs *, enum exception_code);
+#define NUM2EXCEP_IRQ_LEVEL(num) (EXCEP_IRQ_LEVEL0 + (num) * 8)
+
#endif /* __ASSEMBLY__ */
#endif /* _ASM_EXCEPTIONS_H */
diff --git a/arch/mn10300/include/asm/fpu.h b/arch/mn10300/include/asm/fpu.h
index 64a2b83a7a6a..b7625de8eade 100644
--- a/arch/mn10300/include/asm/fpu.h
+++ b/arch/mn10300/include/asm/fpu.h
@@ -12,74 +12,125 @@
#ifndef _ASM_FPU_H
#define _ASM_FPU_H
-#include <asm/processor.h>
+#ifndef __ASSEMBLY__
+
+#include <linux/sched.h>
+#include <asm/exceptions.h>
#include <asm/sigcontext.h>
-#include <asm/user.h>
#ifdef __KERNEL__
-/* the task that owns the FPU state */
+extern asmlinkage void fpu_disabled(void);
+
+#ifdef CONFIG_FPU
+
+#ifdef CONFIG_LAZY_SAVE_FPU
+/* the task that currently owns the FPU state */
extern struct task_struct *fpu_state_owner;
+#endif
-#define set_using_fpu(tsk) \
-do { \
- (tsk)->thread.fpu_flags |= THREAD_USING_FPU; \
-} while (0)
+#if (THREAD_USING_FPU & ~0xff)
+#error THREAD_USING_FPU must be smaller than 0x100.
+#endif
-#define clear_using_fpu(tsk) \
-do { \
- (tsk)->thread.fpu_flags &= ~THREAD_USING_FPU; \
-} while (0)
+static inline void set_using_fpu(struct task_struct *tsk)
+{
+ asm volatile(
+ "bset %0,(0,%1)"
+ :
+ : "i"(THREAD_USING_FPU), "a"(&tsk->thread.fpu_flags)
+ : "memory", "cc");
+}
-#define is_using_fpu(tsk) ((tsk)->thread.fpu_flags & THREAD_USING_FPU)
+static inline void clear_using_fpu(struct task_struct *tsk)
+{
+ asm volatile(
+ "bclr %0,(0,%1)"
+ :
+ : "i"(THREAD_USING_FPU), "a"(&tsk->thread.fpu_flags)
+ : "memory", "cc");
+}
-#define unlazy_fpu(tsk) \
-do { \
- preempt_disable(); \
- if (fpu_state_owner == (tsk)) \
- fpu_save(&tsk->thread.fpu_state); \
- preempt_enable(); \
-} while (0)
-
-#define exit_fpu() \
-do { \
- struct task_struct *__tsk = current; \
- preempt_disable(); \
- if (fpu_state_owner == __tsk) \
- fpu_state_owner = NULL; \
- preempt_enable(); \
-} while (0)
-
-#define flush_fpu() \
-do { \
- struct task_struct *__tsk = current; \
- preempt_disable(); \
- if (fpu_state_owner == __tsk) { \
- fpu_state_owner = NULL; \
- __tsk->thread.uregs->epsw &= ~EPSW_FE; \
- } \
- preempt_enable(); \
- clear_using_fpu(__tsk); \
-} while (0)
+#define is_using_fpu(tsk) ((tsk)->thread.fpu_flags & THREAD_USING_FPU)
-extern asmlinkage void fpu_init_state(void);
extern asmlinkage void fpu_kill_state(struct task_struct *);
-extern asmlinkage void fpu_disabled(struct pt_regs *, enum exception_code);
extern asmlinkage void fpu_exception(struct pt_regs *, enum exception_code);
-
-#ifdef CONFIG_FPU
+extern asmlinkage void fpu_invalid_op(struct pt_regs *, enum exception_code);
+extern asmlinkage void fpu_init_state(void);
extern asmlinkage void fpu_save(struct fpu_state_struct *);
-extern asmlinkage void fpu_restore(struct fpu_state_struct *);
-#else
-#define fpu_save(a)
-#define fpu_restore(a)
-#endif /* CONFIG_FPU */
-
-/*
- * signal frame handlers
- */
extern int fpu_setup_sigcontext(struct fpucontext *buf);
extern int fpu_restore_sigcontext(struct fpucontext *buf);
+static inline void unlazy_fpu(struct task_struct *tsk)
+{
+ preempt_disable();
+#ifndef CONFIG_LAZY_SAVE_FPU
+ if (tsk->thread.fpu_flags & THREAD_HAS_FPU) {
+ fpu_save(&tsk->thread.fpu_state);
+ tsk->thread.fpu_flags &= ~THREAD_HAS_FPU;
+ tsk->thread.uregs->epsw &= ~EPSW_FE;
+ }
+#else
+ if (fpu_state_owner == tsk)
+ fpu_save(&tsk->thread.fpu_state);
+#endif
+ preempt_enable();
+}
+
+static inline void exit_fpu(void)
+{
+#ifdef CONFIG_LAZY_SAVE_FPU
+ struct task_struct *tsk = current;
+
+ preempt_disable();
+ if (fpu_state_owner == tsk)
+ fpu_state_owner = NULL;
+ preempt_enable();
+#endif
+}
+
+static inline void flush_fpu(void)
+{
+ struct task_struct *tsk = current;
+
+ preempt_disable();
+#ifndef CONFIG_LAZY_SAVE_FPU
+ if (tsk->thread.fpu_flags & THREAD_HAS_FPU) {
+ tsk->thread.fpu_flags &= ~THREAD_HAS_FPU;
+ tsk->thread.uregs->epsw &= ~EPSW_FE;
+ }
+#else
+ if (fpu_state_owner == tsk) {
+ fpu_state_owner = NULL;
+ tsk->thread.uregs->epsw &= ~EPSW_FE;
+ }
+#endif
+ preempt_enable();
+ clear_using_fpu(tsk);
+}
+
+#else /* CONFIG_FPU */
+
+extern asmlinkage
+void unexpected_fpu_exception(struct pt_regs *, enum exception_code);
+#define fpu_invalid_op unexpected_fpu_exception
+#define fpu_exception unexpected_fpu_exception
+
+struct task_struct;
+struct fpu_state_struct;
+static inline bool is_using_fpu(struct task_struct *tsk) { return false; }
+static inline void set_using_fpu(struct task_struct *tsk) {}
+static inline void clear_using_fpu(struct task_struct *tsk) {}
+static inline void fpu_init_state(void) {}
+static inline void fpu_save(struct fpu_state_struct *s) {}
+static inline void fpu_kill_state(struct task_struct *tsk) {}
+static inline void unlazy_fpu(struct task_struct *tsk) {}
+static inline void exit_fpu(void) {}
+static inline void flush_fpu(void) {}
+static inline int fpu_setup_sigcontext(struct fpucontext *buf) { return 0; }
+static inline int fpu_restore_sigcontext(struct fpucontext *buf) { return 0; }
+#endif /* CONFIG_FPU */
+
#endif /* __KERNEL__ */
+#endif /* !__ASSEMBLY__ */
#endif /* _ASM_FPU_H */
diff --git a/arch/mn10300/include/asm/frame.inc b/arch/mn10300/include/asm/frame.inc
index 5b1949bdf039..2ee58e3eb6b3 100644
--- a/arch/mn10300/include/asm/frame.inc
+++ b/arch/mn10300/include/asm/frame.inc
@@ -18,6 +18,7 @@
#ifndef __ASM_OFFSETS_H__
#include <asm/asm-offsets.h>
#endif
+#include <asm/thread_info.h>
#define pi break
@@ -37,11 +38,15 @@
movm [d2,d3,a2,a3,exreg0,exreg1,exother],(sp)
mov sp,fp # FRAME pointer in A3
add -12,sp # allow for calls to be made
- mov (__frame),a1
- mov a1,(REG_NEXT,fp)
- mov fp,(__frame)
- and ~EPSW_FE,epsw # disable the FPU inside the kernel
+ # push the exception frame onto the front of the list
+ GET_THREAD_INFO a1
+ mov (TI_frame,a1),a0
+ mov a0,(REG_NEXT,fp)
+ mov fp,(TI_frame,a1)
+
+ # disable the FPU inside the kernel
+ and ~EPSW_FE,epsw
# we may be holding current in E2
#ifdef CONFIG_MN10300_CURRENT_IN_E2
@@ -57,10 +62,11 @@
.macro RESTORE_ALL
# peel back the stack to the calling frame
# - this permits execve() to discard extra frames due to kernel syscalls
- mov (__frame),fp
+ GET_THREAD_INFO a0
+ mov (TI_frame,a0),fp
mov fp,sp
- mov (REG_NEXT,fp),d0 # userspace has regs->next == 0
- mov d0,(__frame)
+ mov (REG_NEXT,fp),d0
+ mov d0,(TI_frame,a0) # userspace has regs->next == 0
#ifndef CONFIG_MN10300_USING_JTAG
mov (REG_EPSW,fp),d0
diff --git a/arch/mn10300/include/asm/gdb-stub.h b/arch/mn10300/include/asm/gdb-stub.h
index 41ed26763964..f5495ad82b77 100644
--- a/arch/mn10300/include/asm/gdb-stub.h
+++ b/arch/mn10300/include/asm/gdb-stub.h
@@ -110,7 +110,7 @@ extern asmlinkage void gdbstub_exception(struct pt_regs *, enum exception_code);
extern asmlinkage void __gdbstub_bug_trap(void);
extern asmlinkage void __gdbstub_pause(void);
-#ifndef CONFIG_MN10300_CACHE_DISABLED
+#ifdef CONFIG_MN10300_CACHE_ENABLED
extern asmlinkage void gdbstub_purge_cache(void);
#else
#define gdbstub_purge_cache() do {} while (0)
diff --git a/arch/mn10300/include/asm/hardirq.h b/arch/mn10300/include/asm/hardirq.h
index 54d950117674..0000d650b55f 100644
--- a/arch/mn10300/include/asm/hardirq.h
+++ b/arch/mn10300/include/asm/hardirq.h
@@ -19,9 +19,10 @@
/* assembly code in softirq.h is sensitive to the offsets of these fields */
typedef struct {
unsigned int __softirq_pending;
- unsigned long idle_timestamp;
+#ifdef CONFIG_MN10300_WD_TIMER
unsigned int __nmi_count; /* arch dependent */
unsigned int __irq_count; /* arch dependent */
+#endif
} ____cacheline_aligned irq_cpustat_t;
#include <linux/irq_cpustat.h> /* Standard mappings for irq_cpustat_t above */
diff --git a/arch/mn10300/include/asm/highmem.h b/arch/mn10300/include/asm/highmem.h
index b0b187a29b88..bfe2d88604d9 100644
--- a/arch/mn10300/include/asm/highmem.h
+++ b/arch/mn10300/include/asm/highmem.h
@@ -70,15 +70,16 @@ static inline void kunmap(struct page *page)
* be used in IRQ contexts, so in some (very limited) cases we need
* it.
*/
-static inline unsigned long kmap_atomic(struct page *page, enum km_type type)
+static inline unsigned long __kmap_atomic(struct page *page)
{
- enum fixed_addresses idx;
unsigned long vaddr;
+ int idx, type;
+ pagefault_disable();
if (page < highmem_start_page)
return page_address(page);
- debug_kmap_atomic(type);
+ type = kmap_atomic_idx_push();
idx = type + KM_TYPE_NR * smp_processor_id();
vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
#if HIGHMEM_DEBUG
@@ -86,31 +87,42 @@ static inline unsigned long kmap_atomic(struct page *page, enum km_type type)
BUG();
#endif
set_pte(kmap_pte - idx, mk_pte(page, kmap_prot));
- __flush_tlb_one(vaddr);
+ local_flush_tlb_one(vaddr);
return vaddr;
}
-static inline void kunmap_atomic_notypecheck(unsigned long vaddr, enum km_type type)
+static inline void __kunmap_atomic(unsigned long vaddr)
{
-#if HIGHMEM_DEBUG
- enum fixed_addresses idx = type + KM_TYPE_NR * smp_processor_id();
+ int type;
- if (vaddr < FIXADDR_START) /* FIXME */
+ if (vaddr < FIXADDR_START) { /* FIXME */
+ pagefault_enable();
return;
+ }
- if (vaddr != __fix_to_virt(FIX_KMAP_BEGIN + idx))
- BUG();
+ type = kmap_atomic_idx();
- /*
- * force other mappings to Oops if they'll try to access
- * this pte without first remap it
- */
- pte_clear(kmap_pte - idx);
- __flush_tlb_one(vaddr);
+#if HIGHMEM_DEBUG
+ {
+ unsigned int idx;
+ idx = type + KM_TYPE_NR * smp_processor_id();
+
+ if (vaddr != __fix_to_virt(FIX_KMAP_BEGIN + idx))
+ BUG();
+
+ /*
+ * force other mappings to Oops if they'll try to access
+ * this pte without first remap it
+ */
+ pte_clear(kmap_pte - idx);
+ local_flush_tlb_one(vaddr);
+ }
#endif
-}
+ kmap_atomic_idx_pop();
+ pagefault_enable();
+}
#endif /* __KERNEL__ */
#endif /* _ASM_HIGHMEM_H */
diff --git a/arch/mn10300/include/asm/intctl-regs.h b/arch/mn10300/include/asm/intctl-regs.h
index ba544c796c5a..585b708c2bc0 100644
--- a/arch/mn10300/include/asm/intctl-regs.h
+++ b/arch/mn10300/include/asm/intctl-regs.h
@@ -15,24 +15,19 @@
#ifdef __KERNEL__
-/* interrupt controller registers */
-#define GxICR(X) __SYSREG(0xd4000000 + (X) * 4, u16) /* group irq ctrl regs */
-
-#define IAGR __SYSREG(0xd4000100, u16) /* intr acceptance group reg */
-#define IAGR_GN 0x00fc /* group number register
- * (documentation _has_ to be wrong)
- */
+/*
+ * Interrupt controller registers
+ * - Registers 64-191 are at addresses offset from the main array
+ */
+#define GxICR(X) \
+ __SYSREG(0xd4000000 + (X) * 4 + \
+ (((X) >= 64) && ((X) < 192)) * 0xf00, u16)
-#define EXTMD __SYSREG(0xd4000200, u16) /* external pin intr spec reg */
-#define GET_XIRQ_TRIGGER(X) ((EXTMD >> ((X) * 2)) & 3)
+#define GxICR_u8(X) \
+ __SYSREG(0xd4000000 + (X) * 4 + \
+ (((X) >= 64) && ((X) < 192)) * 0xf00, u8)
-#define SET_XIRQ_TRIGGER(X,Y) \
-do { \
- u16 x = EXTMD; \
- x &= ~(3 << ((X) * 2)); \
- x |= ((Y) & 3) << ((X) * 2); \
- EXTMD = x; \
-} while (0)
+#include <proc/intctl-regs.h>
#define XIRQ_TRIGGER_LOWLEVEL 0
#define XIRQ_TRIGGER_HILEVEL 1
@@ -59,10 +54,18 @@ do { \
#define GxICR_LEVEL_5 0x5000 /* - level 5 */
#define GxICR_LEVEL_6 0x6000 /* - level 6 */
#define GxICR_LEVEL_SHIFT 12
+#define GxICR_NMI 0x8000 /* nmi request flag */
+
+#define NUM2GxICR_LEVEL(num) ((num) << GxICR_LEVEL_SHIFT)
#ifndef __ASSEMBLY__
extern void set_intr_level(int irq, u16 level);
-extern void set_intr_postackable(int irq);
+extern void mn10300_intc_set_level(unsigned int irq, unsigned int level);
+extern void mn10300_intc_clear(unsigned int irq);
+extern void mn10300_intc_set(unsigned int irq);
+extern void mn10300_intc_enable(unsigned int irq);
+extern void mn10300_intc_disable(unsigned int irq);
+extern void mn10300_set_lateack_irq_type(int irq);
#endif
/* external interrupts */
diff --git a/arch/mn10300/include/asm/io.h b/arch/mn10300/include/asm/io.h
index c1a4119e6497..787255da744e 100644
--- a/arch/mn10300/include/asm/io.h
+++ b/arch/mn10300/include/asm/io.h
@@ -206,6 +206,19 @@ static inline void outsl(unsigned long addr, const void *buffer, int count)
#define iowrite32_rep(p, src, count) \
outsl((unsigned long) (p), (src), (count))
+#define readsb(p, dst, count) \
+ insb((unsigned long) (p), (dst), (count))
+#define readsw(p, dst, count) \
+ insw((unsigned long) (p), (dst), (count))
+#define readsl(p, dst, count) \
+ insl((unsigned long) (p), (dst), (count))
+
+#define writesb(p, src, count) \
+ outsb((unsigned long) (p), (src), (count))
+#define writesw(p, src, count) \
+ outsw((unsigned long) (p), (src), (count))
+#define writesl(p, src, count) \
+ outsl((unsigned long) (p), (src), (count))
#define IO_SPACE_LIMIT 0xffffffff
diff --git a/arch/mn10300/include/asm/irq.h b/arch/mn10300/include/asm/irq.h
index 25c045d16d1c..1a73fb3f60c6 100644
--- a/arch/mn10300/include/asm/irq.h
+++ b/arch/mn10300/include/asm/irq.h
@@ -21,8 +21,16 @@
/* this number is used when no interrupt has been assigned */
#define NO_IRQ INT_MAX
-/* hardware irq numbers */
-#define NR_IRQS GxICR_NUM_IRQS
+/*
+ * hardware irq numbers
+ * - the ASB2364 has an FPGA with an IRQ multiplexer on it
+ */
+#ifdef CONFIG_MN10300_UNIT_ASB2364
+#include <unit/irq.h>
+#else
+#define NR_CPU_IRQS GxICR_NUM_IRQS
+#define NR_IRQS NR_CPU_IRQS
+#endif
/* external hardware irq numbers */
#define NR_XIRQS GxICR_NUM_XIRQS
diff --git a/arch/mn10300/include/asm/irq_regs.h b/arch/mn10300/include/asm/irq_regs.h
index a848cd232eb4..97d0cb5af807 100644
--- a/arch/mn10300/include/asm/irq_regs.h
+++ b/arch/mn10300/include/asm/irq_regs.h
@@ -18,7 +18,11 @@
#define ARCH_HAS_OWN_IRQ_REGS
#ifndef __ASSEMBLY__
-#define get_irq_regs() (__frame)
+static inline __attribute__((const))
+struct pt_regs *get_irq_regs(void)
+{
+ return current_frame();
+}
#endif
#endif /* _ASM_IRQ_REGS_H */
diff --git a/arch/mn10300/include/asm/irqflags.h b/arch/mn10300/include/asm/irqflags.h
index 5e529a117cb2..7a7ae12c7119 100644
--- a/arch/mn10300/include/asm/irqflags.h
+++ b/arch/mn10300/include/asm/irqflags.h
@@ -13,6 +13,9 @@
#define _ASM_IRQFLAGS_H
#include <asm/cpu-regs.h>
+#ifndef __ASSEMBLY__
+#include <linux/smp.h>
+#endif
/*
* interrupt control
@@ -23,11 +26,7 @@
* - level 6 - timer interrupt
* - "enabled": run in IM7
*/
-#ifdef CONFIG_MN10300_TTYSM
-#define MN10300_CLI_LEVEL EPSW_IM_2
-#else
-#define MN10300_CLI_LEVEL EPSW_IM_1
-#endif
+#define MN10300_CLI_LEVEL (CONFIG_LINUX_CLI_LEVEL << EPSW_IM_SHIFT)
#ifndef __ASSEMBLY__
@@ -64,11 +63,12 @@ static inline unsigned long arch_local_irq_save(void)
/*
* we make sure arch_irq_enable() doesn't cause priority inversion
*/
-extern unsigned long __mn10300_irq_enabled_epsw;
+extern unsigned long __mn10300_irq_enabled_epsw[];
static inline void arch_local_irq_enable(void)
{
unsigned long tmp;
+ int cpu = raw_smp_processor_id();
asm volatile(
" mov epsw,%0 \n"
@@ -76,8 +76,8 @@ static inline void arch_local_irq_enable(void)
" or %2,%0 \n"
" mov %0,epsw \n"
: "=&d"(tmp)
- : "i"(~EPSW_IM), "r"(__mn10300_irq_enabled_epsw)
- : "memory");
+ : "i"(~EPSW_IM), "r"(__mn10300_irq_enabled_epsw[cpu])
+ : "memory", "cc");
}
static inline void arch_local_irq_restore(unsigned long flags)
@@ -94,7 +94,7 @@ static inline void arch_local_irq_restore(unsigned long flags)
static inline bool arch_irqs_disabled_flags(unsigned long flags)
{
- return (flags & EPSW_IM) <= MN10300_CLI_LEVEL;
+ return (flags & (EPSW_IE | EPSW_IM)) != (EPSW_IE | EPSW_IM_7);
}
static inline bool arch_irqs_disabled(void)
@@ -109,6 +109,9 @@ static inline bool arch_irqs_disabled(void)
*/
static inline void arch_safe_halt(void)
{
+#ifdef CONFIG_SMP
+ arch_local_irq_enable();
+#else
asm volatile(
" or %0,epsw \n"
" nop \n"
@@ -117,7 +120,97 @@ static inline void arch_safe_halt(void)
:
: "i"(EPSW_IE|EPSW_IM), "n"(&CPUM), "i"(CPUM_SLEEP)
: "cc");
+#endif
}
+#define __sleep_cpu() \
+do { \
+ asm volatile( \
+ " bset %1,(%0)\n" \
+ "1: btst %1,(%0)\n" \
+ " bne 1b\n" \
+ : \
+ : "i"(&CPUM), "i"(CPUM_SLEEP) \
+ : "cc" \
+ ); \
+} while (0)
+
+static inline void arch_local_cli(void)
+{
+ asm volatile(
+ " and %0,epsw \n"
+ " nop \n"
+ " nop \n"
+ " nop \n"
+ :
+ : "i"(~EPSW_IE)
+ : "memory"
+ );
+}
+
+static inline unsigned long arch_local_cli_save(void)
+{
+ unsigned long flags = arch_local_save_flags();
+ arch_local_cli();
+ return flags;
+}
+
+static inline void arch_local_sti(void)
+{
+ asm volatile(
+ " or %0,epsw \n"
+ :
+ : "i"(EPSW_IE)
+ : "memory");
+}
+
+static inline void arch_local_change_intr_mask_level(unsigned long level)
+{
+ asm volatile(
+ " and %0,epsw \n"
+ " or %1,epsw \n"
+ :
+ : "i"(~EPSW_IM), "i"(EPSW_IE | level)
+ : "cc", "memory");
+}
+
+#else /* !__ASSEMBLY__ */
+
+#define LOCAL_SAVE_FLAGS(reg) \
+ mov epsw,reg
+
+#define LOCAL_IRQ_DISABLE \
+ and ~EPSW_IM,epsw; \
+ or EPSW_IE|MN10300_CLI_LEVEL,epsw; \
+ nop; \
+ nop; \
+ nop
+
+#define LOCAL_IRQ_ENABLE \
+ or EPSW_IE|EPSW_IM_7,epsw
+
+#define LOCAL_IRQ_RESTORE(reg) \
+ mov reg,epsw
+
+#define LOCAL_CLI_SAVE(reg) \
+ mov epsw,reg; \
+ and ~EPSW_IE,epsw; \
+ nop; \
+ nop; \
+ nop
+
+#define LOCAL_CLI \
+ and ~EPSW_IE,epsw; \
+ nop; \
+ nop; \
+ nop
+
+#define LOCAL_STI \
+ or EPSW_IE,epsw
+
+#define LOCAL_CHANGE_INTR_MASK_LEVEL(level) \
+ and ~EPSW_IM,epsw; \
+ or EPSW_IE|(level),epsw
+
#endif /* __ASSEMBLY__ */
#endif /* _ASM_IRQFLAGS_H */
diff --git a/arch/mn10300/include/asm/mmu_context.h b/arch/mn10300/include/asm/mmu_context.h
index cb294c244de3..c8f6c82672ad 100644
--- a/arch/mn10300/include/asm/mmu_context.h
+++ b/arch/mn10300/include/asm/mmu_context.h
@@ -27,28 +27,38 @@
#include <asm/tlbflush.h>
#include <asm-generic/mm_hooks.h>
+#define MMU_CONTEXT_TLBPID_NR 256
#define MMU_CONTEXT_TLBPID_MASK 0x000000ffUL
#define MMU_CONTEXT_VERSION_MASK 0xffffff00UL
#define MMU_CONTEXT_FIRST_VERSION 0x00000100UL
#define MMU_NO_CONTEXT 0x00000000UL
-
-extern unsigned long mmu_context_cache[NR_CPUS];
-#define mm_context(mm) (mm->context.tlbpid[smp_processor_id()])
+#define MMU_CONTEXT_TLBPID_LOCK_NR 0
#define enter_lazy_tlb(mm, tsk) do {} while (0)
+static inline void cpu_ran_vm(int cpu, struct mm_struct *mm)
+{
#ifdef CONFIG_SMP
-#define cpu_ran_vm(cpu, mm) \
- cpumask_set_cpu((cpu), mm_cpumask(mm))
-#define cpu_maybe_ran_vm(cpu, mm) \
- cpumask_test_and_set_cpu((cpu), mm_cpumask(mm))
+ cpumask_set_cpu(cpu, mm_cpumask(mm));
+#endif
+}
+
+static inline bool cpu_maybe_ran_vm(int cpu, struct mm_struct *mm)
+{
+#ifdef CONFIG_SMP
+ return cpumask_test_and_set_cpu(cpu, mm_cpumask(mm));
#else
-#define cpu_ran_vm(cpu, mm) do {} while (0)
-#define cpu_maybe_ran_vm(cpu, mm) true
-#endif /* CONFIG_SMP */
+ return true;
+#endif
+}
-/*
- * allocate an MMU context
+#ifdef CONFIG_MN10300_TLB_USE_PIDR
+extern unsigned long mmu_context_cache[NR_CPUS];
+#define mm_context(mm) (mm->context.tlbpid[smp_processor_id()])
+
+/**
+ * allocate_mmu_context - Allocate storage for the arch-specific MMU data
+ * @mm: The userspace VM context being set up
*/
static inline unsigned long allocate_mmu_context(struct mm_struct *mm)
{
@@ -58,7 +68,7 @@ static inline unsigned long allocate_mmu_context(struct mm_struct *mm)
if (!(mc & MMU_CONTEXT_TLBPID_MASK)) {
/* we exhausted the TLB PIDs of this version on this CPU, so we
* flush this CPU's TLB in its entirety and start new cycle */
- flush_tlb_all();
+ local_flush_tlb_all();
/* fix the TLB version if needed (we avoid version #0 so as to
* distingush MMU_NO_CONTEXT) */
@@ -101,22 +111,34 @@ static inline int init_new_context(struct task_struct *tsk,
}
/*
- * destroy context related info for an mm_struct that is about to be put to
- * rest
- */
-#define destroy_context(mm) do { } while (0)
-
-/*
* after we have set current->mm to a new value, this activates the context for
* the new mm so we see the new mappings.
*/
-static inline void activate_context(struct mm_struct *mm, int cpu)
+static inline void activate_context(struct mm_struct *mm)
{
PIDR = get_mmu_context(mm) & MMU_CONTEXT_TLBPID_MASK;
}
+#else /* CONFIG_MN10300_TLB_USE_PIDR */
-/*
- * change between virtual memory sets
+#define init_new_context(tsk, mm) (0)
+#define activate_context(mm) local_flush_tlb()
+
+#endif /* CONFIG_MN10300_TLB_USE_PIDR */
+
+/**
+ * destroy_context - Destroy mm context information
+ * @mm: The MM being destroyed.
+ *
+ * Destroy context related info for an mm_struct that is about to be put to
+ * rest
+ */
+#define destroy_context(mm) do {} while (0)
+
+/**
+ * switch_mm - Change between userspace virtual memory contexts
+ * @prev: The outgoing MM context.
+ * @next: The incoming MM context.
+ * @tsk: The incoming task.
*/
static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
struct task_struct *tsk)
@@ -124,11 +146,12 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
int cpu = smp_processor_id();
if (prev != next) {
+#ifdef CONFIG_SMP
+ per_cpu(cpu_tlbstate, cpu).active_mm = next;
+#endif
cpu_ran_vm(cpu, next);
- activate_context(next, cpu);
PTBR = (unsigned long) next->pgd;
- } else if (!cpu_maybe_ran_vm(cpu, next)) {
- activate_context(next, cpu);
+ activate_context(next);
}
}
diff --git a/arch/mn10300/include/asm/pgalloc.h b/arch/mn10300/include/asm/pgalloc.h
index a19f11327cd8..146bacf193ea 100644
--- a/arch/mn10300/include/asm/pgalloc.h
+++ b/arch/mn10300/include/asm/pgalloc.h
@@ -11,7 +11,6 @@
#ifndef _ASM_PGALLOC_H
#define _ASM_PGALLOC_H
-#include <asm/processor.h>
#include <asm/page.h>
#include <linux/threads.h>
#include <linux/mm.h> /* for struct page */
diff --git a/arch/mn10300/include/asm/pgtable.h b/arch/mn10300/include/asm/pgtable.h
index 16d88577f3e0..a1e894b5f65b 100644
--- a/arch/mn10300/include/asm/pgtable.h
+++ b/arch/mn10300/include/asm/pgtable.h
@@ -90,46 +90,58 @@ extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
* The vmalloc() routines also leaves a hole of 4kB between each vmalloced
* area to catch addressing errors.
*/
+#ifndef __ASSEMBLY__
+#define VMALLOC_OFFSET (8UL * 1024 * 1024)
+#define VMALLOC_START (0x70000000UL)
+#define VMALLOC_END (0x7C000000UL)
+#else
#define VMALLOC_OFFSET (8 * 1024 * 1024)
#define VMALLOC_START (0x70000000)
#define VMALLOC_END (0x7C000000)
+#endif
#ifndef __ASSEMBLY__
extern pte_t kernel_vmalloc_ptes[(VMALLOC_END - VMALLOC_START) / PAGE_SIZE];
#endif
-/* IPTEL/DPTEL bit assignments */
-#define _PAGE_BIT_VALID xPTEL_V_BIT
-#define _PAGE_BIT_ACCESSED xPTEL_UNUSED1_BIT /* mustn't be loaded into IPTEL/DPTEL */
-#define _PAGE_BIT_NX xPTEL_UNUSED2_BIT /* mustn't be loaded into IPTEL/DPTEL */
-#define _PAGE_BIT_CACHE xPTEL_C_BIT
-#define _PAGE_BIT_PRESENT xPTEL_PV_BIT
-#define _PAGE_BIT_DIRTY xPTEL_D_BIT
-#define _PAGE_BIT_GLOBAL xPTEL_G_BIT
-
-#define _PAGE_VALID xPTEL_V
-#define _PAGE_ACCESSED xPTEL_UNUSED1
-#define _PAGE_NX xPTEL_UNUSED2 /* no-execute bit */
-#define _PAGE_CACHE xPTEL_C
-#define _PAGE_PRESENT xPTEL_PV
-#define _PAGE_DIRTY xPTEL_D
-#define _PAGE_PROT xPTEL_PR
-#define _PAGE_PROT_RKNU xPTEL_PR_ROK
-#define _PAGE_PROT_WKNU xPTEL_PR_RWK
-#define _PAGE_PROT_RKRU xPTEL_PR_ROK_ROU
-#define _PAGE_PROT_WKRU xPTEL_PR_RWK_ROU
-#define _PAGE_PROT_WKWU xPTEL_PR_RWK_RWU
-#define _PAGE_GLOBAL xPTEL_G
-#define _PAGE_PSE xPTEL_PS_4Mb /* 4MB page */
-
-#define _PAGE_FILE xPTEL_UNUSED1_BIT /* set:pagecache unset:swap */
-
-#define __PAGE_PROT_UWAUX 0x040
-#define __PAGE_PROT_USER 0x080
-#define __PAGE_PROT_WRITE 0x100
+/* IPTEL2/DPTEL2 bit assignments */
+#define _PAGE_BIT_VALID xPTEL2_V_BIT
+#define _PAGE_BIT_CACHE xPTEL2_C_BIT
+#define _PAGE_BIT_PRESENT xPTEL2_PV_BIT
+#define _PAGE_BIT_DIRTY xPTEL2_D_BIT
+#define _PAGE_BIT_GLOBAL xPTEL2_G_BIT
+#define _PAGE_BIT_ACCESSED xPTEL2_UNUSED1_BIT /* mustn't be loaded into IPTEL2/DPTEL2 */
+
+#define _PAGE_VALID xPTEL2_V
+#define _PAGE_CACHE xPTEL2_C
+#define _PAGE_PRESENT xPTEL2_PV
+#define _PAGE_DIRTY xPTEL2_D
+#define _PAGE_PROT xPTEL2_PR
+#define _PAGE_PROT_RKNU xPTEL2_PR_ROK
+#define _PAGE_PROT_WKNU xPTEL2_PR_RWK
+#define _PAGE_PROT_RKRU xPTEL2_PR_ROK_ROU
+#define _PAGE_PROT_WKRU xPTEL2_PR_RWK_ROU
+#define _PAGE_PROT_WKWU xPTEL2_PR_RWK_RWU
+#define _PAGE_GLOBAL xPTEL2_G
+#define _PAGE_PS_MASK xPTEL2_PS
+#define _PAGE_PS_4Kb xPTEL2_PS_4Kb
+#define _PAGE_PS_128Kb xPTEL2_PS_128Kb
+#define _PAGE_PS_1Kb xPTEL2_PS_1Kb
+#define _PAGE_PS_4Mb xPTEL2_PS_4Mb
+#define _PAGE_PSE xPTEL2_PS_4Mb /* 4MB page */
+#define _PAGE_CACHE_WT xPTEL2_CWT
+#define _PAGE_ACCESSED xPTEL2_UNUSED1
+#define _PAGE_NX 0 /* no-execute bit */
+
+/* If _PAGE_VALID is clear, we use these: */
+#define _PAGE_FILE xPTEL2_C /* set:pagecache unset:swap */
+#define _PAGE_PROTNONE 0x000 /* If not present */
+
+#define __PAGE_PROT_UWAUX 0x010
+#define __PAGE_PROT_USER 0x020
+#define __PAGE_PROT_WRITE 0x040
#define _PAGE_PRESENTV (_PAGE_PRESENT|_PAGE_VALID)
-#define _PAGE_PROTNONE 0x000 /* If not present */
#ifndef __ASSEMBLY__
@@ -170,6 +182,9 @@ extern pte_t kernel_vmalloc_ptes[(VMALLOC_END - VMALLOC_START) / PAGE_SIZE];
#define PAGE_KERNEL_LARGE __pgprot(__PAGE_KERNEL_LARGE)
#define PAGE_KERNEL_LARGE_EXEC __pgprot(__PAGE_KERNEL_LARGE_EXEC)
+#define __PAGE_USERIO (__PAGE_KERNEL_BASE | _PAGE_PROT_WKWU | _PAGE_NX)
+#define PAGE_USERIO __pgprot(__PAGE_USERIO)
+
/*
* Whilst the MN10300 can do page protection for execute (given separate data
* and insn TLBs), we are not supporting it at the moment. Write permission,
@@ -323,11 +338,7 @@ static inline int pte_exec_kernel(pte_t pte)
return 1;
}
-/*
- * Bits 0 and 1 are taken, split up the 29 bits of offset
- * into this range:
- */
-#define PTE_FILE_MAX_BITS 29
+#define PTE_FILE_MAX_BITS 30
#define pte_to_pgoff(pte) (pte_val(pte) >> 2)
#define pgoff_to_pte(off) __pte((off) << 2 | _PAGE_FILE)
@@ -373,8 +384,13 @@ static inline void ptep_mkdirty(pte_t *ptep)
* Macro to mark a page protection value as "uncacheable". On processors which
* do not support it, this is a no-op.
*/
-#define pgprot_noncached(prot) __pgprot(pgprot_val(prot) | _PAGE_CACHE)
+#define pgprot_noncached(prot) __pgprot(pgprot_val(prot) & ~_PAGE_CACHE)
+/*
+ * Macro to mark a page protection value as "Write-Through".
+ * On processors which do not support it, this is a no-op.
+ */
+#define pgprot_through(prot) __pgprot(pgprot_val(prot) | _PAGE_CACHE_WT)
/*
* Conversion functions: convert a page and protection to a page entry,
@@ -457,9 +473,7 @@ static inline int set_kernel_exec(unsigned long vaddr, int enable)
#define pte_offset_map(dir, address) \
((pte_t *) page_address(pmd_page(*(dir))) + pte_index(address))
-#define pte_offset_map_nested(dir, address) pte_offset_map(dir, address)
#define pte_unmap(pte) do {} while (0)
-#define pte_unmap_nested(pte) do {} while (0)
/*
* The MN10300 has external MMU info in the form of a TLB: this is adapted from
diff --git a/arch/mn10300/include/asm/processor.h b/arch/mn10300/include/asm/processor.h
index f7d4b0d285e8..4c1b5cc14c19 100644
--- a/arch/mn10300/include/asm/processor.h
+++ b/arch/mn10300/include/asm/processor.h
@@ -13,10 +13,13 @@
#ifndef _ASM_PROCESSOR_H
#define _ASM_PROCESSOR_H
+#include <linux/threads.h>
+#include <linux/thread_info.h>
#include <asm/page.h>
#include <asm/ptrace.h>
#include <asm/cpu-regs.h>
-#include <linux/threads.h>
+#include <asm/uaccess.h>
+#include <asm/current.h>
/* Forward declaration, a strange C thing */
struct task_struct;
@@ -33,6 +36,8 @@ struct mm_struct;
__pc; \
})
+extern void get_mem_info(unsigned long *mem_base, unsigned long *mem_size);
+
extern void show_registers(struct pt_regs *regs);
/*
@@ -43,17 +48,22 @@ extern void show_registers(struct pt_regs *regs);
struct mn10300_cpuinfo {
int type;
- unsigned long loops_per_sec;
+ unsigned long loops_per_jiffy;
char hard_math;
- unsigned long *pgd_quick;
- unsigned long *pte_quick;
- unsigned long pgtable_cache_sz;
};
extern struct mn10300_cpuinfo boot_cpu_data;
+#ifdef CONFIG_SMP
+#if CONFIG_NR_CPUS < 2 || CONFIG_NR_CPUS > 8
+# error Sorry, NR_CPUS should be 2 to 8
+#endif
+extern struct mn10300_cpuinfo cpu_data[];
+#define current_cpu_data cpu_data[smp_processor_id()]
+#else /* CONFIG_SMP */
#define cpu_data &boot_cpu_data
#define current_cpu_data boot_cpu_data
+#endif /* CONFIG_SMP */
extern void identify_cpu(struct mn10300_cpuinfo *);
extern void print_cpu_info(struct mn10300_cpuinfo *);
@@ -76,10 +86,6 @@ extern void dodgy_tsc(void);
*/
#define TASK_UNMAPPED_BASE 0x30000000
-typedef struct {
- unsigned long seg;
-} mm_segment_t;
-
struct fpu_state_struct {
unsigned long fs[32]; /* fpu registers */
unsigned long fpcr; /* fpu control register */
@@ -92,20 +98,19 @@ struct thread_struct {
unsigned long a3; /* kernel FP */
unsigned long wchan;
unsigned long usp;
- struct pt_regs *__frame;
unsigned long fpu_flags;
#define THREAD_USING_FPU 0x00000001 /* T if this task is using the FPU */
+#define THREAD_HAS_FPU 0x00000002 /* T if this task owns the FPU right now */
struct fpu_state_struct fpu_state;
};
-#define INIT_THREAD \
-{ \
- .uregs = init_uregs, \
- .pc = 0, \
- .sp = 0, \
- .a3 = 0, \
- .wchan = 0, \
- .__frame = NULL, \
+#define INIT_THREAD \
+{ \
+ .uregs = init_uregs, \
+ .pc = 0, \
+ .sp = 0, \
+ .a3 = 0, \
+ .wchan = 0, \
}
#define INIT_MMAP \
@@ -117,13 +122,20 @@ struct thread_struct {
* - need to discard the frame stacked by the kernel thread invoking the execve
* syscall (see RESTORE_ALL macro)
*/
-#define start_thread(regs, new_pc, new_sp) do { \
- set_fs(USER_DS); \
- __frame = current->thread.uregs; \
- __frame->epsw = EPSW_nSL | EPSW_IE | EPSW_IM; \
- __frame->pc = new_pc; \
- __frame->sp = new_sp; \
-} while (0)
+static inline void start_thread(struct pt_regs *regs,
+ unsigned long new_pc, unsigned long new_sp)
+{
+ struct thread_info *ti = current_thread_info();
+ struct pt_regs *frame0;
+ set_fs(USER_DS);
+
+ frame0 = thread_info_to_uregs(ti);
+ frame0->epsw = EPSW_nSL | EPSW_IE | EPSW_IM;
+ frame0->pc = new_pc;
+ frame0->sp = new_sp;
+ ti->frame = frame0;
+}
+
/* Free all resources held by a thread. */
extern void release_thread(struct task_struct *);
@@ -157,7 +169,7 @@ unsigned long get_wchan(struct task_struct *p);
static inline void prefetch(const void *x)
{
-#ifndef CONFIG_MN10300_CACHE_DISABLED
+#ifdef CONFIG_MN10300_CACHE_ENABLED
#ifdef CONFIG_MN10300_PROC_MN103E010
asm volatile ("nop; nop; dcpf (%0)" : : "r"(x));
#else
@@ -168,7 +180,7 @@ static inline void prefetch(const void *x)
static inline void prefetchw(const void *x)
{
-#ifndef CONFIG_MN10300_CACHE_DISABLED
+#ifdef CONFIG_MN10300_CACHE_ENABLED
#ifdef CONFIG_MN10300_PROC_MN103E010
asm volatile ("nop; nop; dcpf (%0)" : : "r"(x));
#else
diff --git a/arch/mn10300/include/asm/ptrace.h b/arch/mn10300/include/asm/ptrace.h
index 7c2e911052b6..b6961811d445 100644
--- a/arch/mn10300/include/asm/ptrace.h
+++ b/arch/mn10300/include/asm/ptrace.h
@@ -40,7 +40,6 @@
#define PT_PC 26
#define NR_PTREGS 27
-#ifndef __ASSEMBLY__
/*
* This defines the way registers are stored in the event of an exception
* - the strange order is due to the MOVM instruction
@@ -75,7 +74,6 @@ struct pt_regs {
unsigned long epsw;
unsigned long pc;
};
-#endif
/* Arbitrarily choose the same ptrace numbers as used by the Sparc code. */
#define PTRACE_GETREGS 12
@@ -86,12 +84,7 @@ struct pt_regs {
/* options set using PTRACE_SETOPTIONS */
#define PTRACE_O_TRACESYSGOOD 0x00000001
-#if defined(__KERNEL__)
-
-extern struct pt_regs *__frame; /* current frame pointer */
-
-#if !defined(__ASSEMBLY__)
-struct task_struct;
+#ifdef __KERNEL__
#define user_mode(regs) (((regs)->epsw & EPSW_nSL) == EPSW_nSL)
#define instruction_pointer(regs) ((regs)->pc)
@@ -100,9 +93,7 @@ extern void show_regs(struct pt_regs *);
#define arch_has_single_step() (1)
-#endif /* !__ASSEMBLY */
-
#define profile_pc(regs) ((regs)->pc)
-#endif /* __KERNEL__ */
+#endif /* __KERNEL__ */
#endif /* _ASM_PTRACE_H */
diff --git a/arch/mn10300/include/asm/reset-regs.h b/arch/mn10300/include/asm/reset-regs.h
index 174523d50132..10c7502a113f 100644
--- a/arch/mn10300/include/asm/reset-regs.h
+++ b/arch/mn10300/include/asm/reset-regs.h
@@ -50,7 +50,7 @@ static inline void mn10300_proc_hard_reset(void)
RSTCTR |= RSTCTR_CHIPRST;
}
-extern unsigned int watchdog_alert_counter;
+extern unsigned int watchdog_alert_counter[];
extern void watchdog_go(void);
extern asmlinkage void watchdog_handler(void);
diff --git a/arch/mn10300/include/asm/rtc.h b/arch/mn10300/include/asm/rtc.h
index c295194cc703..6c14bb1d0d9b 100644
--- a/arch/mn10300/include/asm/rtc.h
+++ b/arch/mn10300/include/asm/rtc.h
@@ -15,25 +15,14 @@
#include <linux/init.h>
-extern void check_rtc_time(void);
extern void __init calibrate_clock(void);
-extern unsigned long __init get_initial_rtc_time(void);
#else /* !CONFIG_MN10300_RTC */
-static inline void check_rtc_time(void)
-{
-}
-
static inline void calibrate_clock(void)
{
}
-static inline unsigned long get_initial_rtc_time(void)
-{
- return 0;
-}
-
#endif /* !CONFIG_MN10300_RTC */
#include <asm-generic/rtc.h>
diff --git a/arch/mn10300/include/asm/rwlock.h b/arch/mn10300/include/asm/rwlock.h
new file mode 100644
index 000000000000..6d594d4a0e10
--- /dev/null
+++ b/arch/mn10300/include/asm/rwlock.h
@@ -0,0 +1,125 @@
+/*
+ * Helpers used by both rw spinlocks and rw semaphores.
+ *
+ * Based in part on code from semaphore.h and
+ * spinlock.h Copyright 1996 Linus Torvalds.
+ *
+ * Copyright 1999 Red Hat, Inc.
+ *
+ * Written by Benjamin LaHaise.
+ *
+ * Modified by Matsushita Electric Industrial Co., Ltd.
+ * Modifications:
+ * 13-Nov-2006 MEI Temporarily delete lock functions for SMP support.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ */
+#ifndef _ASM_RWLOCK_H
+#define _ASM_RWLOCK_H
+
+#define RW_LOCK_BIAS 0x01000000
+
+#ifndef CONFIG_SMP
+
+typedef struct { unsigned long a[100]; } __dummy_lock_t;
+#define __dummy_lock(lock) (*(__dummy_lock_t *)(lock))
+
+#define RW_LOCK_BIAS_STR "0x01000000"
+
+#define __build_read_lock_ptr(rw, helper) \
+ do { \
+ asm volatile( \
+ " mov (%0),d3 \n" \
+ " sub 1,d3 \n" \
+ " mov d3,(%0) \n" \
+ " blt 1f \n" \
+ " bra 2f \n" \
+ "1: jmp 3f \n" \
+ "2: \n" \
+ " .section .text.lock,\"ax\" \n" \
+ "3: call "helper"[],0 \n" \
+ " jmp 2b \n" \
+ " .previous" \
+ : \
+ : "d" (rw) \
+ : "memory", "d3", "cc"); \
+ } while (0)
+
+#define __build_read_lock_const(rw, helper) \
+ do { \
+ asm volatile( \
+ " mov (%0),d3 \n" \
+ " sub 1,d3 \n" \
+ " mov d3,(%0) \n" \
+ " blt 1f \n" \
+ " bra 2f \n" \
+ "1: jmp 3f \n" \
+ "2: \n" \
+ " .section .text.lock,\"ax\" \n" \
+ "3: call "helper"[],0 \n" \
+ " jmp 2b \n" \
+ " .previous" \
+ : \
+ : "d" (rw) \
+ : "memory", "d3", "cc"); \
+ } while (0)
+
+#define __build_read_lock(rw, helper) \
+ do { \
+ if (__builtin_constant_p(rw)) \
+ __build_read_lock_const(rw, helper); \
+ else \
+ __build_read_lock_ptr(rw, helper); \
+ } while (0)
+
+#define __build_write_lock_ptr(rw, helper) \
+ do { \
+ asm volatile( \
+ " mov (%0),d3 \n" \
+ " sub 1,d3 \n" \
+ " mov d3,(%0) \n" \
+ " blt 1f \n" \
+ " bra 2f \n" \
+ "1: jmp 3f \n" \
+ "2: \n" \
+ " .section .text.lock,\"ax\" \n" \
+ "3: call "helper"[],0 \n" \
+ " jmp 2b \n" \
+ " .previous" \
+ : \
+ : "d" (rw) \
+ : "memory", "d3", "cc"); \
+ } while (0)
+
+#define __build_write_lock_const(rw, helper) \
+ do { \
+ asm volatile( \
+ " mov (%0),d3 \n" \
+ " sub 1,d3 \n" \
+ " mov d3,(%0) \n" \
+ " blt 1f \n" \
+ " bra 2f \n" \
+ "1: jmp 3f \n" \
+ "2: \n" \
+ " .section .text.lock,\"ax\" \n" \
+ "3: call "helper"[],0 \n" \
+ " jmp 2b \n" \
+ " .previous" \
+ : \
+ : "d" (rw) \
+ : "memory", "d3", "cc"); \
+ } while (0)
+
+#define __build_write_lock(rw, helper) \
+ do { \
+ if (__builtin_constant_p(rw)) \
+ __build_write_lock_const(rw, helper); \
+ else \
+ __build_write_lock_ptr(rw, helper); \
+ } while (0)
+
+#endif /* CONFIG_SMP */
+#endif /* _ASM_RWLOCK_H */
diff --git a/arch/mn10300/include/asm/serial-regs.h b/arch/mn10300/include/asm/serial-regs.h
index 6498469e93ac..8320cda32f5a 100644
--- a/arch/mn10300/include/asm/serial-regs.h
+++ b/arch/mn10300/include/asm/serial-regs.h
@@ -20,18 +20,25 @@
/* serial port 0 */
#define SC0CTR __SYSREG(0xd4002000, u16) /* control reg */
#define SC01CTR_CK 0x0007 /* clock source select */
-#define SC0CTR_CK_TM8UFLOW_8 0x0000 /* - 1/8 timer 8 underflow (serial port 0 only) */
-#define SC1CTR_CK_TM9UFLOW_8 0x0000 /* - 1/8 timer 9 underflow (serial port 1 only) */
#define SC01CTR_CK_IOCLK_8 0x0001 /* - 1/8 IOCLK */
#define SC01CTR_CK_IOCLK_32 0x0002 /* - 1/32 IOCLK */
+#define SC01CTR_CK_EXTERN_8 0x0006 /* - 1/8 external closk */
+#define SC01CTR_CK_EXTERN 0x0007 /* - external closk */
+#if defined(CONFIG_AM33_2) || defined(CONFIG_AM33_3)
+#define SC0CTR_CK_TM8UFLOW_8 0x0000 /* - 1/8 timer 8 underflow (serial port 0 only) */
#define SC0CTR_CK_TM2UFLOW_2 0x0003 /* - 1/2 timer 2 underflow (serial port 0 only) */
-#define SC1CTR_CK_TM3UFLOW_2 0x0003 /* - 1/2 timer 3 underflow (serial port 1 only) */
-#define SC0CTR_CK_TM0UFLOW_8 0x0004 /* - 1/8 timer 1 underflow (serial port 0 only) */
-#define SC1CTR_CK_TM1UFLOW_8 0x0004 /* - 1/8 timer 2 underflow (serial port 1 only) */
+#define SC0CTR_CK_TM0UFLOW_8 0x0004 /* - 1/8 timer 0 underflow (serial port 0 only) */
#define SC0CTR_CK_TM2UFLOW_8 0x0005 /* - 1/8 timer 2 underflow (serial port 0 only) */
+#define SC1CTR_CK_TM9UFLOW_8 0x0000 /* - 1/8 timer 9 underflow (serial port 1 only) */
+#define SC1CTR_CK_TM3UFLOW_2 0x0003 /* - 1/2 timer 3 underflow (serial port 1 only) */
+#define SC1CTR_CK_TM1UFLOW_8 0x0004 /* - 1/8 timer 1 underflow (serial port 1 only) */
#define SC1CTR_CK_TM3UFLOW_8 0x0005 /* - 1/8 timer 3 underflow (serial port 1 only) */
-#define SC01CTR_CK_EXTERN_8 0x0006 /* - 1/8 external closk */
-#define SC01CTR_CK_EXTERN 0x0007 /* - external closk */
+#else /* CONFIG_AM33_2 || CONFIG_AM33_3 */
+#define SC0CTR_CK_TM8UFLOW_8 0x0000 /* - 1/8 timer 8 underflow (serial port 0 only) */
+#define SC0CTR_CK_TM0UFLOW_8 0x0004 /* - 1/8 timer 0 underflow (serial port 0 only) */
+#define SC0CTR_CK_TM2UFLOW_8 0x0005 /* - 1/8 timer 2 underflow (serial port 0 only) */
+#define SC1CTR_CK_TM12UFLOW_8 0x0000 /* - 1/8 timer 12 underflow (serial port 1 only) */
+#endif /* CONFIG_AM33_2 || CONFIG_AM33_3 */
#define SC01CTR_STB 0x0008 /* stop bit select */
#define SC01CTR_STB_1BIT 0x0000 /* - 1 stop bit */
#define SC01CTR_STB_2BIT 0x0008 /* - 2 stop bits */
@@ -100,11 +107,23 @@
/* serial port 2 */
#define SC2CTR __SYSREG(0xd4002020, u16) /* control reg */
+#ifdef CONFIG_AM33_2
#define SC2CTR_CK 0x0003 /* clock source select */
#define SC2CTR_CK_TM10UFLOW 0x0000 /* - timer 10 underflow */
#define SC2CTR_CK_TM2UFLOW 0x0001 /* - timer 2 underflow */
#define SC2CTR_CK_EXTERN 0x0002 /* - external closk */
#define SC2CTR_CK_TM3UFLOW 0x0003 /* - timer 3 underflow */
+#else /* CONFIG_AM33_2 */
+#define SC2CTR_CK 0x0007 /* clock source select */
+#define SC2CTR_CK_TM9UFLOW_8 0x0000 /* - 1/8 timer 9 underflow */
+#define SC2CTR_CK_IOCLK_8 0x0001 /* - 1/8 IOCLK */
+#define SC2CTR_CK_IOCLK_32 0x0002 /* - 1/32 IOCLK */
+#define SC2CTR_CK_TM3UFLOW_2 0x0003 /* - 1/2 timer 3 underflow */
+#define SC2CTR_CK_TM1UFLOW_8 0x0004 /* - 1/8 timer 1 underflow */
+#define SC2CTR_CK_TM3UFLOW_8 0x0005 /* - 1/8 timer 3 underflow */
+#define SC2CTR_CK_EXTERN_8 0x0006 /* - 1/8 external closk */
+#define SC2CTR_CK_EXTERN 0x0007 /* - external closk */
+#endif /* CONFIG_AM33_2 */
#define SC2CTR_STB 0x0008 /* stop bit select */
#define SC2CTR_STB_1BIT 0x0000 /* - 1 stop bit */
#define SC2CTR_STB_2BIT 0x0008 /* - 2 stop bits */
@@ -134,9 +153,14 @@
#define SC2ICR_RES 0x04 /* receive error select */
#define SC2ICR_RI 0x01 /* receive interrupt cause */
-#define SC2TXB __SYSREG(0xd4002018, u8) /* transmit buffer reg */
-#define SC2RXB __SYSREG(0xd4002019, u8) /* receive buffer reg */
-#define SC2STR __SYSREG(0xd400201c, u8) /* status reg */
+#define SC2TXB __SYSREG(0xd4002028, u8) /* transmit buffer reg */
+#define SC2RXB __SYSREG(0xd4002029, u8) /* receive buffer reg */
+
+#ifdef CONFIG_AM33_2
+#define SC2STR __SYSREG(0xd400202c, u8) /* status reg */
+#else /* CONFIG_AM33_2 */
+#define SC2STR __SYSREG(0xd400202c, u16) /* status reg */
+#endif /* CONFIG_AM33_2 */
#define SC2STR_OEF 0x0001 /* overrun error found */
#define SC2STR_PEF 0x0002 /* parity error found */
#define SC2STR_FEF 0x0004 /* framing error found */
@@ -146,10 +170,17 @@
#define SC2STR_RXF 0x0040 /* receive status */
#define SC2STR_TXF 0x0080 /* transmit status */
+#ifdef CONFIG_AM33_2
#define SC2TIM __SYSREG(0xd400202d, u8) /* status reg */
+#endif
+#ifdef CONFIG_AM33_2
#define SC2RXIRQ 24 /* serial 2 Receive IRQ */
#define SC2TXIRQ 25 /* serial 2 Transmit IRQ */
+#else /* CONFIG_AM33_2 */
+#define SC2RXIRQ 68 /* serial 2 Receive IRQ */
+#define SC2TXIRQ 69 /* serial 2 Transmit IRQ */
+#endif /* CONFIG_AM33_2 */
#define SC2RXICR GxICR(SC2RXIRQ) /* serial 2 receive intr ctrl reg */
#define SC2TXICR GxICR(SC2TXIRQ) /* serial 2 transmit intr ctrl reg */
diff --git a/arch/mn10300/include/asm/serial.h b/arch/mn10300/include/asm/serial.h
index a29445cddd6f..23a799293599 100644
--- a/arch/mn10300/include/asm/serial.h
+++ b/arch/mn10300/include/asm/serial.h
@@ -9,10 +9,8 @@
* 2 of the Licence, or (at your option) any later version.
*/
-/*
- * The ASB2305 has an 18.432 MHz clock the UART
- */
-#define BASE_BAUD (18432000 / 16)
+#ifndef _ASM_SERIAL_H
+#define _ASM_SERIAL_H
/* Standard COM flags (except for COM4, because of the 8514 problem) */
#ifdef CONFIG_SERIAL_DETECT_IRQ
@@ -34,3 +32,5 @@
#endif
#include <unit/serial.h>
+
+#endif /* _ASM_SERIAL_H */
diff --git a/arch/mn10300/include/asm/smp.h b/arch/mn10300/include/asm/smp.h
index 4eb8c61b7dab..a3930e43a958 100644
--- a/arch/mn10300/include/asm/smp.h
+++ b/arch/mn10300/include/asm/smp.h
@@ -3,6 +3,16 @@
* Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
* Written by David Howells (dhowells@redhat.com)
*
+ * Modified by Matsushita Electric Industrial Co., Ltd.
+ * Modifications:
+ * 13-Nov-2006 MEI Define IPI-IRQ number and add inline/macro function
+ * for SMP support.
+ * 22-Jan-2007 MEI Add the define related to SMP_BOOT_IRQ.
+ * 23-Feb-2007 MEI Add the define related to SMP icahce invalidate.
+ * 23-Jun-2008 MEI Delete INTC_IPI.
+ * 22-Jul-2008 MEI Add smp_nmi_call_function and related defines.
+ * 04-Aug-2008 MEI Delete USE_DOIRQ_CACHE_IPI.
+ *
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public Licence
* as published by the Free Software Foundation; either version
@@ -11,8 +21,85 @@
#ifndef _ASM_SMP_H
#define _ASM_SMP_H
-#ifdef CONFIG_SMP
-#error SMP not yet supported for MN10300
+#ifndef __ASSEMBLY__
+#include <linux/threads.h>
+#include <linux/cpumask.h>
#endif
+#ifdef CONFIG_SMP
+#include <proc/smp-regs.h>
+
+#define RESCHEDULE_IPI 63
+#define CALL_FUNC_SINGLE_IPI 192
+#define LOCAL_TIMER_IPI 193
+#define FLUSH_CACHE_IPI 194
+#define CALL_FUNCTION_NMI_IPI 195
+#define GDB_NMI_IPI 196
+
+#define SMP_BOOT_IRQ 195
+
+#define RESCHEDULE_GxICR_LV GxICR_LEVEL_6
+#define CALL_FUNCTION_GxICR_LV GxICR_LEVEL_4
+#define LOCAL_TIMER_GxICR_LV GxICR_LEVEL_4
+#define FLUSH_CACHE_GxICR_LV GxICR_LEVEL_0
+#define SMP_BOOT_GxICR_LV GxICR_LEVEL_0
+
+#define TIME_OUT_COUNT_BOOT_IPI 100
+#define DELAY_TIME_BOOT_IPI 75000
+
+
+#ifndef __ASSEMBLY__
+
+/**
+ * raw_smp_processor_id - Determine the raw CPU ID of the CPU running it
+ *
+ * What we really want to do is to use the CPUID hardware CPU register to get
+ * this information, but accesses to that aren't cached, and run at system bus
+ * speed, not CPU speed. A copy of this value is, however, stored in the
+ * thread_info struct, and that can be cached.
+ *
+ * An alternate way of dealing with this could be to use the EPSW.S bits to
+ * cache this information for systems with up to four CPUs.
+ */
+#if 0
+#define raw_smp_processor_id() (CPUID)
+#else
+#define raw_smp_processor_id() (current_thread_info()->cpu)
#endif
+
+static inline int cpu_logical_map(int cpu)
+{
+ return cpu;
+}
+
+static inline int cpu_number_map(int cpu)
+{
+ return cpu;
+}
+
+
+extern cpumask_t cpu_boot_map;
+
+extern void smp_init_cpus(void);
+extern void smp_cache_interrupt(void);
+extern void send_IPI_allbutself(int irq);
+extern int smp_nmi_call_function(smp_call_func_t func, void *info, int wait);
+
+extern void arch_send_call_function_single_ipi(int cpu);
+extern void arch_send_call_function_ipi_mask(const struct cpumask *mask);
+
+#ifdef CONFIG_HOTPLUG_CPU
+extern int __cpu_disable(void);
+extern void __cpu_die(unsigned int cpu);
+#endif /* CONFIG_HOTPLUG_CPU */
+
+#endif /* __ASSEMBLY__ */
+#else /* CONFIG_SMP */
+#ifndef __ASSEMBLY__
+
+static inline void smp_init_cpus(void) {}
+
+#endif /* __ASSEMBLY__ */
+#endif /* CONFIG_SMP */
+
+#endif /* _ASM_SMP_H */
diff --git a/arch/mn10300/include/asm/smsc911x.h b/arch/mn10300/include/asm/smsc911x.h
new file mode 100644
index 000000000000..2fcd1080322b
--- /dev/null
+++ b/arch/mn10300/include/asm/smsc911x.h
@@ -0,0 +1 @@
+#include <unit/smsc911x.h>
diff --git a/arch/mn10300/include/asm/spinlock.h b/arch/mn10300/include/asm/spinlock.h
index 4bf9c8b169e0..93429154e898 100644
--- a/arch/mn10300/include/asm/spinlock.h
+++ b/arch/mn10300/include/asm/spinlock.h
@@ -11,6 +11,183 @@
#ifndef _ASM_SPINLOCK_H
#define _ASM_SPINLOCK_H
-#error SMP spinlocks not implemented for MN10300
+#include <asm/atomic.h>
+#include <asm/rwlock.h>
+#include <asm/page.h>
+/*
+ * Simple spin lock operations. There are two variants, one clears IRQ's
+ * on the local processor, one does not.
+ *
+ * We make no fairness assumptions. They have a cost.
+ */
+
+#define arch_spin_is_locked(x) (*(volatile signed char *)(&(x)->slock) != 0)
+#define arch_spin_unlock_wait(x) do { barrier(); } while (arch_spin_is_locked(x))
+
+static inline void arch_spin_unlock(arch_spinlock_t *lock)
+{
+ asm volatile(
+ " bclr 1,(0,%0) \n"
+ :
+ : "a"(&lock->slock)
+ : "memory", "cc");
+}
+
+static inline int arch_spin_trylock(arch_spinlock_t *lock)
+{
+ int ret;
+
+ asm volatile(
+ " mov 1,%0 \n"
+ " bset %0,(%1) \n"
+ " bne 1f \n"
+ " clr %0 \n"
+ "1: xor 1,%0 \n"
+ : "=d"(ret)
+ : "a"(&lock->slock)
+ : "memory", "cc");
+
+ return ret;
+}
+
+static inline void arch_spin_lock(arch_spinlock_t *lock)
+{
+ asm volatile(
+ "1: bset 1,(0,%0) \n"
+ " bne 1b \n"
+ :
+ : "a"(&lock->slock)
+ : "memory", "cc");
+}
+
+static inline void arch_spin_lock_flags(arch_spinlock_t *lock,
+ unsigned long flags)
+{
+ int temp;
+
+ asm volatile(
+ "1: bset 1,(0,%2) \n"
+ " beq 3f \n"
+ " mov %1,epsw \n"
+ "2: mov (0,%2),%0 \n"
+ " or %0,%0 \n"
+ " bne 2b \n"
+ " mov %3,%0 \n"
+ " mov %0,epsw \n"
+ " nop \n"
+ " nop \n"
+ " bra 1b\n"
+ "3: \n"
+ : "=&d" (temp)
+ : "d" (flags), "a"(&lock->slock), "i"(EPSW_IE | MN10300_CLI_LEVEL)
+ : "memory", "cc");
+}
+
+#ifdef __KERNEL__
+
+/*
+ * Read-write spinlocks, allowing multiple readers
+ * but only one writer.
+ *
+ * NOTE! it is quite common to have readers in interrupts
+ * but no interrupt writers. For those circumstances we
+ * can "mix" irq-safe locks - any writer needs to get a
+ * irq-safe write-lock, but readers can get non-irqsafe
+ * read-locks.
+ */
+
+/**
+ * read_can_lock - would read_trylock() succeed?
+ * @lock: the rwlock in question.
+ */
+#define arch_read_can_lock(x) ((int)(x)->lock > 0)
+
+/**
+ * write_can_lock - would write_trylock() succeed?
+ * @lock: the rwlock in question.
+ */
+#define arch_write_can_lock(x) ((x)->lock == RW_LOCK_BIAS)
+
+/*
+ * On mn10300, we implement read-write locks as a 32-bit counter
+ * with the high bit (sign) being the "contended" bit.
+ */
+static inline void arch_read_lock(arch_rwlock_t *rw)
+{
+#if 0 //def CONFIG_MN10300_HAS_ATOMIC_OPS_UNIT
+ __build_read_lock(rw, "__read_lock_failed");
+#else
+ {
+ atomic_t *count = (atomic_t *)rw;
+ while (atomic_dec_return(count) < 0)
+ atomic_inc(count);
+ }
+#endif
+}
+
+static inline void arch_write_lock(arch_rwlock_t *rw)
+{
+#if 0 //def CONFIG_MN10300_HAS_ATOMIC_OPS_UNIT
+ __build_write_lock(rw, "__write_lock_failed");
+#else
+ {
+ atomic_t *count = (atomic_t *)rw;
+ while (!atomic_sub_and_test(RW_LOCK_BIAS, count))
+ atomic_add(RW_LOCK_BIAS, count);
+ }
+#endif
+}
+
+static inline void arch_read_unlock(arch_rwlock_t *rw)
+{
+#if 0 //def CONFIG_MN10300_HAS_ATOMIC_OPS_UNIT
+ __build_read_unlock(rw);
+#else
+ {
+ atomic_t *count = (atomic_t *)rw;
+ atomic_inc(count);
+ }
+#endif
+}
+
+static inline void arch_write_unlock(arch_rwlock_t *rw)
+{
+#if 0 //def CONFIG_MN10300_HAS_ATOMIC_OPS_UNIT
+ __build_write_unlock(rw);
+#else
+ {
+ atomic_t *count = (atomic_t *)rw;
+ atomic_add(RW_LOCK_BIAS, count);
+ }
+#endif
+}
+
+static inline int arch_read_trylock(arch_rwlock_t *lock)
+{
+ atomic_t *count = (atomic_t *)lock;
+ atomic_dec(count);
+ if (atomic_read(count) >= 0)
+ return 1;
+ atomic_inc(count);
+ return 0;
+}
+
+static inline int arch_write_trylock(arch_rwlock_t *lock)
+{
+ atomic_t *count = (atomic_t *)lock;
+ if (atomic_sub_and_test(RW_LOCK_BIAS, count))
+ return 1;
+ atomic_add(RW_LOCK_BIAS, count);
+ return 0;
+}
+
+#define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
+#define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
+
+#define _raw_spin_relax(lock) cpu_relax()
+#define _raw_read_relax(lock) cpu_relax()
+#define _raw_write_relax(lock) cpu_relax()
+
+#endif /* __KERNEL__ */
#endif /* _ASM_SPINLOCK_H */
diff --git a/arch/mn10300/include/asm/spinlock_types.h b/arch/mn10300/include/asm/spinlock_types.h
new file mode 100644
index 000000000000..653dc519b405
--- /dev/null
+++ b/arch/mn10300/include/asm/spinlock_types.h
@@ -0,0 +1,20 @@
+#ifndef _ASM_SPINLOCK_TYPES_H
+#define _ASM_SPINLOCK_TYPES_H
+
+#ifndef __LINUX_SPINLOCK_TYPES_H
+# error "please don't include this file directly"
+#endif
+
+typedef struct arch_spinlock {
+ unsigned int slock;
+} arch_spinlock_t;
+
+#define __ARCH_SPIN_LOCK_UNLOCKED { 0 }
+
+typedef struct {
+ unsigned int lock;
+} arch_rwlock_t;
+
+#define __ARCH_RW_LOCK_UNLOCKED { RW_LOCK_BIAS }
+
+#endif /* _ASM_SPINLOCK_TYPES_H */
diff --git a/arch/mn10300/include/asm/syscall.h b/arch/mn10300/include/asm/syscall.h
new file mode 100644
index 000000000000..b44b0bb75a01
--- /dev/null
+++ b/arch/mn10300/include/asm/syscall.h
@@ -0,0 +1,117 @@
+/* Access to user system call parameters and results
+ *
+ * See asm-generic/syscall.h for function descriptions.
+ *
+ * Copyright (C) 2010 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public Licence
+ * as published by the Free Software Foundation; either version
+ * 2 of the Licence, or (at your option) any later version.
+ */
+
+#ifndef _ASM_SYSCALL_H
+#define _ASM_SYSCALL_H
+
+#include <linux/sched.h>
+#include <linux/err.h>
+
+extern const unsigned long sys_call_table[];
+
+static inline int syscall_get_nr(struct task_struct *task, struct pt_regs *regs)
+{
+ return regs->orig_d0;
+}
+
+static inline void syscall_rollback(struct task_struct *task,
+ struct pt_regs *regs)
+{
+ regs->d0 = regs->orig_d0;
+}
+
+static inline long syscall_get_error(struct task_struct *task,
+ struct pt_regs *regs)
+{
+ unsigned long error = regs->d0;
+ return IS_ERR_VALUE(error) ? error : 0;
+}
+
+static inline long syscall_get_return_value(struct task_struct *task,
+ struct pt_regs *regs)
+{
+ return regs->d0;
+}
+
+static inline void syscall_set_return_value(struct task_struct *task,
+ struct pt_regs *regs,
+ int error, long val)
+{
+ regs->d0 = (long) error ?: val;
+}
+
+static inline void syscall_get_arguments(struct task_struct *task,
+ struct pt_regs *regs,
+ unsigned int i, unsigned int n,
+ unsigned long *args)
+{
+ switch (i) {
+ case 0:
+ if (!n--) break;
+ *args++ = regs->a0;
+ case 1:
+ if (!n--) break;
+ *args++ = regs->d1;
+ case 2:
+ if (!n--) break;
+ *args++ = regs->a3;
+ case 3:
+ if (!n--) break;
+ *args++ = regs->a2;
+ case 4:
+ if (!n--) break;
+ *args++ = regs->d3;
+ case 5:
+ if (!n--) break;
+ *args++ = regs->d2;
+ case 6:
+ if (!n--) break;
+ default:
+ BUG();
+ break;
+ }
+}
+
+static inline void syscall_set_arguments(struct task_struct *task,
+ struct pt_regs *regs,
+ unsigned int i, unsigned int n,
+ const unsigned long *args)
+{
+ switch (i) {
+ case 0:
+ if (!n--) break;
+ regs->a0 = *args++;
+ case 1:
+ if (!n--) break;
+ regs->d1 = *args++;
+ case 2:
+ if (!n--) break;
+ regs->a3 = *args++;
+ case 3:
+ if (!n--) break;
+ regs->a2 = *args++;
+ case 4:
+ if (!n--) break;
+ regs->d3 = *args++;
+ case 5:
+ if (!n--) break;
+ regs->d2 = *args++;
+ case 6:
+ if (!n--) break;
+ default:
+ BUG();
+ break;
+ }
+}
+
+#endif /* _ASM_SYSCALL_H */
diff --git a/arch/mn10300/include/asm/system.h b/arch/mn10300/include/asm/system.h
index 9f7c7e17c01e..8ff3e5aaca41 100644
--- a/arch/mn10300/include/asm/system.h
+++ b/arch/mn10300/include/asm/system.h
@@ -12,12 +12,29 @@
#define _ASM_SYSTEM_H
#include <asm/cpu-regs.h>
+#include <asm/intctl-regs.h>
#ifdef __KERNEL__
#ifndef __ASSEMBLY__
#include <linux/kernel.h>
#include <linux/irqflags.h>
+#include <asm/atomic.h>
+
+#if !defined(CONFIG_LAZY_SAVE_FPU)
+struct fpu_state_struct;
+extern asmlinkage void fpu_save(struct fpu_state_struct *);
+#define switch_fpu(prev, next) \
+ do { \
+ if ((prev)->thread.fpu_flags & THREAD_HAS_FPU) { \
+ (prev)->thread.fpu_flags &= ~THREAD_HAS_FPU; \
+ (prev)->thread.uregs->epsw &= ~EPSW_FE; \
+ fpu_save(&(prev)->thread.fpu_state); \
+ } \
+ } while (0)
+#else
+#define switch_fpu(prev, next) do {} while (0)
+#endif
struct task_struct;
struct thread_struct;
@@ -30,6 +47,7 @@ struct task_struct *__switch_to(struct thread_struct *prev,
/* context switching is now performed out-of-line in switch_to.S */
#define switch_to(prev, next, last) \
do { \
+ switch_fpu(prev, next); \
current->thread.wchan = (u_long) __builtin_return_address(0); \
(last) = __switch_to(&(prev)->thread, &(next)->thread, (prev)); \
mb(); \
@@ -40,8 +58,6 @@ do { \
#define nop() asm volatile ("nop")
-#endif /* !__ASSEMBLY__ */
-
/*
* Force strict CPU ordering.
* And yes, this is required on UP too when we're talking
@@ -68,64 +84,19 @@ do { \
#define smp_mb() mb()
#define smp_rmb() rmb()
#define smp_wmb() wmb()
-#else
+#define set_mb(var, value) do { xchg(&var, value); } while (0)
+#else /* CONFIG_SMP */
#define smp_mb() barrier()
#define smp_rmb() barrier()
#define smp_wmb() barrier()
-#endif
-
#define set_mb(var, value) do { var = value; mb(); } while (0)
+#endif /* CONFIG_SMP */
+
#define set_wmb(var, value) do { var = value; wmb(); } while (0)
#define read_barrier_depends() do {} while (0)
#define smp_read_barrier_depends() do {} while (0)
-/*****************************************************************************/
-/*
- * MN10300 doesn't actually have an exchange instruction
- */
-#ifndef __ASSEMBLY__
-
-struct __xchg_dummy { unsigned long a[100]; };
-#define __xg(x) ((struct __xchg_dummy *)(x))
-
-static inline
-unsigned long __xchg(volatile unsigned long *m, unsigned long val)
-{
- unsigned long retval;
- unsigned long flags;
-
- local_irq_save(flags);
- retval = *m;
- *m = val;
- local_irq_restore(flags);
- return retval;
-}
-
-#define xchg(ptr, v) \
- ((__typeof__(*(ptr))) __xchg((unsigned long *)(ptr), \
- (unsigned long)(v)))
-
-static inline unsigned long __cmpxchg(volatile unsigned long *m,
- unsigned long old, unsigned long new)
-{
- unsigned long retval;
- unsigned long flags;
-
- local_irq_save(flags);
- retval = *m;
- if (retval == old)
- *m = new;
- local_irq_restore(flags);
- return retval;
-}
-
-#define cmpxchg(ptr, o, n) \
- ((__typeof__(*(ptr))) __cmpxchg((unsigned long *)(ptr), \
- (unsigned long)(o), \
- (unsigned long)(n)))
-
#endif /* !__ASSEMBLY__ */
-
#endif /* __KERNEL__ */
#endif /* _ASM_SYSTEM_H */
diff --git a/arch/mn10300/include/asm/thread_info.h b/arch/mn10300/include/asm/thread_info.h
index 2001cb657a95..aa07a4a5d794 100644
--- a/arch/mn10300/include/asm/thread_info.h
+++ b/arch/mn10300/include/asm/thread_info.h
@@ -16,10 +16,6 @@
#include <asm/page.h>
-#ifndef __ASSEMBLY__
-#include <asm/processor.h>
-#endif
-
#define PREEMPT_ACTIVE 0x10000000
#ifdef CONFIG_4KSTACKS
@@ -38,10 +34,14 @@
* must also be changed
*/
#ifndef __ASSEMBLY__
+typedef struct {
+ unsigned long seg;
+} mm_segment_t;
struct thread_info {
struct task_struct *task; /* main task structure */
struct exec_domain *exec_domain; /* execution domain */
+ struct pt_regs *frame; /* current exception frame */
unsigned long flags; /* low level flags */
__u32 cpu; /* current CPU */
__s32 preempt_count; /* 0 => preemptable, <0 => BUG */
@@ -55,6 +55,10 @@ struct thread_info {
__u8 supervisor_stack[0];
};
+#define thread_info_to_uregs(ti) \
+ ((struct pt_regs *) \
+ ((unsigned long)ti + THREAD_SIZE - sizeof(struct pt_regs)))
+
#else /* !__ASSEMBLY__ */
#ifndef __ASM_OFFSETS_H__
@@ -102,6 +106,12 @@ struct thread_info *current_thread_info(void)
return ti;
}
+static inline __attribute__((const))
+struct pt_regs *current_frame(void)
+{
+ return current_thread_info()->frame;
+}
+
/* how to get the current stack pointer from C */
static inline unsigned long current_stack_pointer(void)
{
diff --git a/arch/mn10300/include/asm/timer-regs.h b/arch/mn10300/include/asm/timer-regs.h
index 1d883b7f94ab..c634977caf66 100644
--- a/arch/mn10300/include/asm/timer-regs.h
+++ b/arch/mn10300/include/asm/timer-regs.h
@@ -17,21 +17,27 @@
#ifdef __KERNEL__
-/* timer prescalar control */
+/*
+ * Timer prescalar control
+ */
#define TMPSCNT __SYSREG(0xd4003071, u8) /* timer prescaler control */
#define TMPSCNT_ENABLE 0x80 /* timer prescaler enable */
#define TMPSCNT_DISABLE 0x00 /* timer prescaler disable */
-/* 8 bit timers */
+/*
+ * 8-bit timers
+ */
#define TM0MD __SYSREG(0xd4003000, u8) /* timer 0 mode register */
#define TM0MD_SRC 0x07 /* timer source */
#define TM0MD_SRC_IOCLK 0x00 /* - IOCLK */
#define TM0MD_SRC_IOCLK_8 0x01 /* - 1/8 IOCLK */
#define TM0MD_SRC_IOCLK_32 0x02 /* - 1/32 IOCLK */
-#define TM0MD_SRC_TM2IO 0x03 /* - TM2IO pin input */
#define TM0MD_SRC_TM1UFLOW 0x05 /* - timer 1 underflow */
#define TM0MD_SRC_TM2UFLOW 0x06 /* - timer 2 underflow */
+#if defined(CONFIG_AM33_2)
+#define TM0MD_SRC_TM2IO 0x03 /* - TM2IO pin input */
#define TM0MD_SRC_TM0IO 0x07 /* - TM0IO pin input */
+#endif /* CONFIG_AM33_2 */
#define TM0MD_INIT_COUNTER 0x40 /* initialize TMnBC = TMnBR */
#define TM0MD_COUNT_ENABLE 0x80 /* timer count enable */
@@ -43,7 +49,9 @@
#define TM1MD_SRC_TM0CASCADE 0x03 /* - cascade with timer 0 */
#define TM1MD_SRC_TM0UFLOW 0x04 /* - timer 0 underflow */
#define TM1MD_SRC_TM2UFLOW 0x06 /* - timer 2 underflow */
+#if defined(CONFIG_AM33_2)
#define TM1MD_SRC_TM1IO 0x07 /* - TM1IO pin input */
+#endif /* CONFIG_AM33_2 */
#define TM1MD_INIT_COUNTER 0x40 /* initialize TMnBC = TMnBR */
#define TM1MD_COUNT_ENABLE 0x80 /* timer count enable */
@@ -55,7 +63,9 @@
#define TM2MD_SRC_TM1CASCADE 0x03 /* - cascade with timer 1 */
#define TM2MD_SRC_TM0UFLOW 0x04 /* - timer 0 underflow */
#define TM2MD_SRC_TM1UFLOW 0x05 /* - timer 1 underflow */
+#if defined(CONFIG_AM33_2)
#define TM2MD_SRC_TM2IO 0x07 /* - TM2IO pin input */
+#endif /* CONFIG_AM33_2 */
#define TM2MD_INIT_COUNTER 0x40 /* initialize TMnBC = TMnBR */
#define TM2MD_COUNT_ENABLE 0x80 /* timer count enable */
@@ -64,11 +74,13 @@
#define TM3MD_SRC_IOCLK 0x00 /* - IOCLK */
#define TM3MD_SRC_IOCLK_8 0x01 /* - 1/8 IOCLK */
#define TM3MD_SRC_IOCLK_32 0x02 /* - 1/32 IOCLK */
-#define TM3MD_SRC_TM1CASCADE 0x03 /* - cascade with timer 2 */
+#define TM3MD_SRC_TM2CASCADE 0x03 /* - cascade with timer 2 */
#define TM3MD_SRC_TM0UFLOW 0x04 /* - timer 0 underflow */
#define TM3MD_SRC_TM1UFLOW 0x05 /* - timer 1 underflow */
#define TM3MD_SRC_TM2UFLOW 0x06 /* - timer 2 underflow */
+#if defined(CONFIG_AM33_2)
#define TM3MD_SRC_TM3IO 0x07 /* - TM3IO pin input */
+#endif /* CONFIG_AM33_2 */
#define TM3MD_INIT_COUNTER 0x40 /* initialize TMnBC = TMnBR */
#define TM3MD_COUNT_ENABLE 0x80 /* timer count enable */
@@ -96,7 +108,9 @@
#define TM2ICR GxICR(TM2IRQ) /* timer 2 uflow intr ctrl reg */
#define TM3ICR GxICR(TM3IRQ) /* timer 3 uflow intr ctrl reg */
-/* 16-bit timers 4,5 & 7-11 */
+/*
+ * 16-bit timers 4,5 & 7-15
+ */
#define TM4MD __SYSREG(0xd4003080, u8) /* timer 4 mode register */
#define TM4MD_SRC 0x07 /* timer source */
#define TM4MD_SRC_IOCLK 0x00 /* - IOCLK */
@@ -105,7 +119,9 @@
#define TM4MD_SRC_TM0UFLOW 0x04 /* - timer 0 underflow */
#define TM4MD_SRC_TM1UFLOW 0x05 /* - timer 1 underflow */
#define TM4MD_SRC_TM2UFLOW 0x06 /* - timer 2 underflow */
+#if defined(CONFIG_AM33_2)
#define TM4MD_SRC_TM4IO 0x07 /* - TM4IO pin input */
+#endif /* CONFIG_AM33_2 */
#define TM4MD_INIT_COUNTER 0x40 /* initialize TMnBC = TMnBR */
#define TM4MD_COUNT_ENABLE 0x80 /* timer count enable */
@@ -118,7 +134,11 @@
#define TM5MD_SRC_TM0UFLOW 0x04 /* - timer 0 underflow */
#define TM5MD_SRC_TM1UFLOW 0x05 /* - timer 1 underflow */
#define TM5MD_SRC_TM2UFLOW 0x06 /* - timer 2 underflow */
+#if defined(CONFIG_AM33_2)
#define TM5MD_SRC_TM5IO 0x07 /* - TM5IO pin input */
+#else /* !CONFIG_AM33_2 */
+#define TM5MD_SRC_TM7UFLOW 0x07 /* - timer 7 underflow */
+#endif /* CONFIG_AM33_2 */
#define TM5MD_INIT_COUNTER 0x40 /* initialize TMnBC = TMnBR */
#define TM5MD_COUNT_ENABLE 0x80 /* timer count enable */
@@ -130,7 +150,9 @@
#define TM7MD_SRC_TM0UFLOW 0x04 /* - timer 0 underflow */
#define TM7MD_SRC_TM1UFLOW 0x05 /* - timer 1 underflow */
#define TM7MD_SRC_TM2UFLOW 0x06 /* - timer 2 underflow */
+#if defined(CONFIG_AM33_2)
#define TM7MD_SRC_TM7IO 0x07 /* - TM7IO pin input */
+#endif /* CONFIG_AM33_2 */
#define TM7MD_INIT_COUNTER 0x40 /* initialize TMnBC = TMnBR */
#define TM7MD_COUNT_ENABLE 0x80 /* timer count enable */
@@ -143,7 +165,11 @@
#define TM8MD_SRC_TM0UFLOW 0x04 /* - timer 0 underflow */
#define TM8MD_SRC_TM1UFLOW 0x05 /* - timer 1 underflow */
#define TM8MD_SRC_TM2UFLOW 0x06 /* - timer 2 underflow */
+#if defined(CONFIG_AM33_2)
#define TM8MD_SRC_TM8IO 0x07 /* - TM8IO pin input */
+#else /* !CONFIG_AM33_2 */
+#define TM8MD_SRC_TM7UFLOW 0x07 /* - timer 7 underflow */
+#endif /* CONFIG_AM33_2 */
#define TM8MD_INIT_COUNTER 0x40 /* initialize TMnBC = TMnBR */
#define TM8MD_COUNT_ENABLE 0x80 /* timer count enable */
@@ -156,7 +182,11 @@
#define TM9MD_SRC_TM0UFLOW 0x04 /* - timer 0 underflow */
#define TM9MD_SRC_TM1UFLOW 0x05 /* - timer 1 underflow */
#define TM9MD_SRC_TM2UFLOW 0x06 /* - timer 2 underflow */
+#if defined(CONFIG_AM33_2)
#define TM9MD_SRC_TM9IO 0x07 /* - TM9IO pin input */
+#else /* !CONFIG_AM33_2 */
+#define TM9MD_SRC_TM7UFLOW 0x07 /* - timer 7 underflow */
+#endif /* CONFIG_AM33_2 */
#define TM9MD_INIT_COUNTER 0x40 /* initialize TMnBC = TMnBR */
#define TM9MD_COUNT_ENABLE 0x80 /* timer count enable */
@@ -169,7 +199,11 @@
#define TM10MD_SRC_TM0UFLOW 0x04 /* - timer 0 underflow */
#define TM10MD_SRC_TM1UFLOW 0x05 /* - timer 1 underflow */
#define TM10MD_SRC_TM2UFLOW 0x06 /* - timer 2 underflow */
+#if defined(CONFIG_AM33_2)
#define TM10MD_SRC_TM10IO 0x07 /* - TM10IO pin input */
+#else /* !CONFIG_AM33_2 */
+#define TM10MD_SRC_TM7UFLOW 0x07 /* - timer 7 underflow */
+#endif /* CONFIG_AM33_2 */
#define TM10MD_INIT_COUNTER 0x40 /* initialize TMnBC = TMnBR */
#define TM10MD_COUNT_ENABLE 0x80 /* timer count enable */
@@ -178,32 +212,101 @@
#define TM11MD_SRC_IOCLK 0x00 /* - IOCLK */
#define TM11MD_SRC_IOCLK_8 0x01 /* - 1/8 IOCLK */
#define TM11MD_SRC_IOCLK_32 0x02 /* - 1/32 IOCLK */
-#define TM11MD_SRC_TM7CASCADE 0x03 /* - cascade with timer 7 */
#define TM11MD_SRC_TM0UFLOW 0x04 /* - timer 0 underflow */
#define TM11MD_SRC_TM1UFLOW 0x05 /* - timer 1 underflow */
#define TM11MD_SRC_TM2UFLOW 0x06 /* - timer 2 underflow */
+#if defined(CONFIG_AM33_2)
#define TM11MD_SRC_TM11IO 0x07 /* - TM11IO pin input */
+#else /* !CONFIG_AM33_2 */
+#define TM11MD_SRC_TM7UFLOW 0x07 /* - timer 7 underflow */
+#endif /* CONFIG_AM33_2 */
#define TM11MD_INIT_COUNTER 0x40 /* initialize TMnBC = TMnBR */
#define TM11MD_COUNT_ENABLE 0x80 /* timer count enable */
+#if defined(CONFIG_AM34_2)
+#define TM12MD __SYSREG(0xd4003180, u8) /* timer 11 mode register */
+#define TM12MD_SRC 0x07 /* timer source */
+#define TM12MD_SRC_IOCLK 0x00 /* - IOCLK */
+#define TM12MD_SRC_IOCLK_8 0x01 /* - 1/8 IOCLK */
+#define TM12MD_SRC_IOCLK_32 0x02 /* - 1/32 IOCLK */
+#define TM12MD_SRC_TM0UFLOW 0x04 /* - timer 0 underflow */
+#define TM12MD_SRC_TM1UFLOW 0x05 /* - timer 1 underflow */
+#define TM12MD_SRC_TM2UFLOW 0x06 /* - timer 2 underflow */
+#define TM12MD_SRC_TM7UFLOW 0x07 /* - timer 7 underflow */
+#define TM12MD_INIT_COUNTER 0x40 /* initialize TMnBC = TMnBR */
+#define TM12MD_COUNT_ENABLE 0x80 /* timer count enable */
+
+#define TM13MD __SYSREG(0xd4003182, u8) /* timer 11 mode register */
+#define TM13MD_SRC 0x07 /* timer source */
+#define TM13MD_SRC_IOCLK 0x00 /* - IOCLK */
+#define TM13MD_SRC_IOCLK_8 0x01 /* - 1/8 IOCLK */
+#define TM13MD_SRC_IOCLK_32 0x02 /* - 1/32 IOCLK */
+#define TM13MD_SRC_TM12CASCADE 0x03 /* - cascade with timer 12 */
+#define TM13MD_SRC_TM0UFLOW 0x04 /* - timer 0 underflow */
+#define TM13MD_SRC_TM1UFLOW 0x05 /* - timer 1 underflow */
+#define TM13MD_SRC_TM2UFLOW 0x06 /* - timer 2 underflow */
+#define TM13MD_SRC_TM7UFLOW 0x07 /* - timer 7 underflow */
+#define TM13MD_INIT_COUNTER 0x40 /* initialize TMnBC = TMnBR */
+#define TM13MD_COUNT_ENABLE 0x80 /* timer count enable */
+
+#define TM14MD __SYSREG(0xd4003184, u8) /* timer 11 mode register */
+#define TM14MD_SRC 0x07 /* timer source */
+#define TM14MD_SRC_IOCLK 0x00 /* - IOCLK */
+#define TM14MD_SRC_IOCLK_8 0x01 /* - 1/8 IOCLK */
+#define TM14MD_SRC_IOCLK_32 0x02 /* - 1/32 IOCLK */
+#define TM14MD_SRC_TM13CASCADE 0x03 /* - cascade with timer 13 */
+#define TM14MD_SRC_TM0UFLOW 0x04 /* - timer 0 underflow */
+#define TM14MD_SRC_TM1UFLOW 0x05 /* - timer 1 underflow */
+#define TM14MD_SRC_TM2UFLOW 0x06 /* - timer 2 underflow */
+#define TM14MD_SRC_TM7UFLOW 0x07 /* - timer 7 underflow */
+#define TM14MD_INIT_COUNTER 0x40 /* initialize TMnBC = TMnBR */
+#define TM14MD_COUNT_ENABLE 0x80 /* timer count enable */
+
+#define TM15MD __SYSREG(0xd4003186, u8) /* timer 11 mode register */
+#define TM15MD_SRC 0x07 /* timer source */
+#define TM15MD_SRC_IOCLK 0x00 /* - IOCLK */
+#define TM15MD_SRC_IOCLK_8 0x01 /* - 1/8 IOCLK */
+#define TM15MD_SRC_IOCLK_32 0x02 /* - 1/32 IOCLK */
+#define TM15MD_SRC_TM0UFLOW 0x04 /* - timer 0 underflow */
+#define TM15MD_SRC_TM1UFLOW 0x05 /* - timer 1 underflow */
+#define TM15MD_SRC_TM2UFLOW 0x06 /* - timer 2 underflow */
+#define TM15MD_SRC_TM7UFLOW 0x07 /* - timer 7 underflow */
+#define TM15MD_INIT_COUNTER 0x40 /* initialize TMnBC = TMnBR */
+#define TM15MD_COUNT_ENABLE 0x80 /* timer count enable */
+#endif /* CONFIG_AM34_2 */
+
+
#define TM4BR __SYSREG(0xd4003090, u16) /* timer 4 base register */
#define TM5BR __SYSREG(0xd4003092, u16) /* timer 5 base register */
+#define TM45BR __SYSREG(0xd4003090, u32) /* timer 4:5 base register */
#define TM7BR __SYSREG(0xd4003096, u16) /* timer 7 base register */
#define TM8BR __SYSREG(0xd4003098, u16) /* timer 8 base register */
#define TM9BR __SYSREG(0xd400309a, u16) /* timer 9 base register */
+#define TM89BR __SYSREG(0xd4003098, u32) /* timer 8:9 base register */
#define TM10BR __SYSREG(0xd400309c, u16) /* timer 10 base register */
#define TM11BR __SYSREG(0xd400309e, u16) /* timer 11 base register */
-#define TM45BR __SYSREG(0xd4003090, u32) /* timer 4:5 base register */
+#if defined(CONFIG_AM34_2)
+#define TM12BR __SYSREG(0xd4003190, u16) /* timer 12 base register */
+#define TM13BR __SYSREG(0xd4003192, u16) /* timer 13 base register */
+#define TM14BR __SYSREG(0xd4003194, u16) /* timer 14 base register */
+#define TM15BR __SYSREG(0xd4003196, u16) /* timer 15 base register */
+#endif /* CONFIG_AM34_2 */
#define TM4BC __SYSREG(0xd40030a0, u16) /* timer 4 binary counter */
#define TM5BC __SYSREG(0xd40030a2, u16) /* timer 5 binary counter */
#define TM45BC __SYSREG(0xd40030a0, u32) /* timer 4:5 binary counter */
-
#define TM7BC __SYSREG(0xd40030a6, u16) /* timer 7 binary counter */
#define TM8BC __SYSREG(0xd40030a8, u16) /* timer 8 binary counter */
#define TM9BC __SYSREG(0xd40030aa, u16) /* timer 9 binary counter */
+#define TM89BC __SYSREG(0xd40030a8, u32) /* timer 8:9 binary counter */
#define TM10BC __SYSREG(0xd40030ac, u16) /* timer 10 binary counter */
#define TM11BC __SYSREG(0xd40030ae, u16) /* timer 11 binary counter */
+#if defined(CONFIG_AM34_2)
+#define TM12BC __SYSREG(0xd40031a0, u16) /* timer 12 binary counter */
+#define TM13BC __SYSREG(0xd40031a2, u16) /* timer 13 binary counter */
+#define TM14BC __SYSREG(0xd40031a4, u16) /* timer 14 binary counter */
+#define TM15BC __SYSREG(0xd40031a6, u16) /* timer 15 binary counter */
+#endif /* CONFIG_AM34_2 */
#define TM4IRQ 6 /* timer 4 IRQ */
#define TM5IRQ 7 /* timer 5 IRQ */
@@ -212,6 +315,12 @@
#define TM9IRQ 13 /* timer 9 IRQ */
#define TM10IRQ 14 /* timer 10 IRQ */
#define TM11IRQ 15 /* timer 11 IRQ */
+#if defined(CONFIG_AM34_2)
+#define TM12IRQ 64 /* timer 12 IRQ */
+#define TM13IRQ 65 /* timer 13 IRQ */
+#define TM14IRQ 66 /* timer 14 IRQ */
+#define TM15IRQ 67 /* timer 15 IRQ */
+#endif /* CONFIG_AM34_2 */
#define TM4ICR GxICR(TM4IRQ) /* timer 4 uflow intr ctrl reg */
#define TM5ICR GxICR(TM5IRQ) /* timer 5 uflow intr ctrl reg */
@@ -220,8 +329,16 @@
#define TM9ICR GxICR(TM9IRQ) /* timer 9 uflow intr ctrl reg */
#define TM10ICR GxICR(TM10IRQ) /* timer 10 uflow intr ctrl reg */
#define TM11ICR GxICR(TM11IRQ) /* timer 11 uflow intr ctrl reg */
-
-/* 16-bit timer 6 */
+#if defined(CONFIG_AM34_2)
+#define TM12ICR GxICR(TM12IRQ) /* timer 12 uflow intr ctrl reg */
+#define TM13ICR GxICR(TM13IRQ) /* timer 13 uflow intr ctrl reg */
+#define TM14ICR GxICR(TM14IRQ) /* timer 14 uflow intr ctrl reg */
+#define TM15ICR GxICR(TM15IRQ) /* timer 15 uflow intr ctrl reg */
+#endif /* CONFIG_AM34_2 */
+
+/*
+ * 16-bit timer 6
+ */
#define TM6MD __SYSREG(0xd4003084, u16) /* timer6 mode register */
#define TM6MD_SRC 0x0007 /* timer source */
#define TM6MD_SRC_IOCLK 0x0000 /* - IOCLK */
@@ -229,10 +346,14 @@
#define TM6MD_SRC_IOCLK_32 0x0002 /* - 1/32 IOCLK */
#define TM6MD_SRC_TM0UFLOW 0x0004 /* - timer 0 underflow */
#define TM6MD_SRC_TM1UFLOW 0x0005 /* - timer 1 underflow */
-#define TM6MD_SRC_TM6IOB_BOTH 0x0006 /* - TM6IOB pin input (both edges) */
+#define TM6MD_SRC_TM2UFLOW 0x0006 /* - timer 2 underflow */
+#if defined(CONFIG_AM33_2)
+/* #define TM6MD_SRC_TM6IOB_BOTH 0x0006 */ /* - TM6IOB pin input (both edges) */
#define TM6MD_SRC_TM6IOB_SINGLE 0x0007 /* - TM6IOB pin input (single edge) */
-#define TM6MD_CLR_ENABLE 0x0010 /* clear count enable */
+#endif /* CONFIG_AM33_2 */
#define TM6MD_ONESHOT_ENABLE 0x0040 /* oneshot count */
+#define TM6MD_CLR_ENABLE 0x0010 /* clear count enable */
+#if defined(CONFIG_AM33_2)
#define TM6MD_TRIG_ENABLE 0x0080 /* TM6IOB pin trigger enable */
#define TM6MD_PWM 0x3800 /* PWM output mode */
#define TM6MD_PWM_DIS 0x0000 /* - disabled */
@@ -240,10 +361,15 @@
#define TM6MD_PWM_11BIT 0x1800 /* - 11 bits mode */
#define TM6MD_PWM_12BIT 0x3000 /* - 12 bits mode */
#define TM6MD_PWM_14BIT 0x3800 /* - 14 bits mode */
+#endif /* CONFIG_AM33_2 */
+
#define TM6MD_INIT_COUNTER 0x4000 /* initialize TMnBC to zero */
#define TM6MD_COUNT_ENABLE 0x8000 /* timer count enable */
#define TM6MDA __SYSREG(0xd40030b4, u8) /* timer6 cmp/cap A mode reg */
+#define TM6MDA_MODE_CMP_SINGLE 0x00 /* - compare, single buffer mode */
+#define TM6MDA_MODE_CMP_DOUBLE 0x40 /* - compare, double buffer mode */
+#if defined(CONFIG_AM33_2)
#define TM6MDA_OUT 0x07 /* output select */
#define TM6MDA_OUT_SETA_RESETB 0x00 /* - set at match A, reset at match B */
#define TM6MDA_OUT_SETA_RESETOV 0x01 /* - set at match A, reset at overflow */
@@ -251,30 +377,35 @@
#define TM6MDA_OUT_RESETA 0x03 /* - reset at match A */
#define TM6MDA_OUT_TOGGLE 0x04 /* - toggle on match A */
#define TM6MDA_MODE 0xc0 /* compare A register mode */
-#define TM6MDA_MODE_CMP_SINGLE 0x00 /* - compare, single buffer mode */
-#define TM6MDA_MODE_CMP_DOUBLE 0x40 /* - compare, double buffer mode */
#define TM6MDA_MODE_CAP_S_EDGE 0x80 /* - capture, single edge mode */
#define TM6MDA_MODE_CAP_D_EDGE 0xc0 /* - capture, double edge mode */
#define TM6MDA_EDGE 0x20 /* compare A edge select */
#define TM6MDA_EDGE_FALLING 0x00 /* capture on falling edge */
#define TM6MDA_EDGE_RISING 0x20 /* capture on rising edge */
#define TM6MDA_CAPTURE_ENABLE 0x10 /* capture enable */
+#else /* !CONFIG_AM33_2 */
+#define TM6MDA_MODE 0x40 /* compare A register mode */
+#endif /* CONFIG_AM33_2 */
#define TM6MDB __SYSREG(0xd40030b5, u8) /* timer6 cmp/cap B mode reg */
+#define TM6MDB_MODE_CMP_SINGLE 0x00 /* - compare, single buffer mode */
+#define TM6MDB_MODE_CMP_DOUBLE 0x40 /* - compare, double buffer mode */
+#if defined(CONFIG_AM33_2)
#define TM6MDB_OUT 0x07 /* output select */
#define TM6MDB_OUT_SETB_RESETA 0x00 /* - set at match B, reset at match A */
#define TM6MDB_OUT_SETB_RESETOV 0x01 /* - set at match B */
#define TM6MDB_OUT_RESETB 0x03 /* - reset at match B */
#define TM6MDB_OUT_TOGGLE 0x04 /* - toggle on match B */
#define TM6MDB_MODE 0xc0 /* compare B register mode */
-#define TM6MDB_MODE_CMP_SINGLE 0x00 /* - compare, single buffer mode */
-#define TM6MDB_MODE_CMP_DOUBLE 0x40 /* - compare, double buffer mode */
#define TM6MDB_MODE_CAP_S_EDGE 0x80 /* - capture, single edge mode */
#define TM6MDB_MODE_CAP_D_EDGE 0xc0 /* - capture, double edge mode */
#define TM6MDB_EDGE 0x20 /* compare B edge select */
#define TM6MDB_EDGE_FALLING 0x00 /* capture on falling edge */
#define TM6MDB_EDGE_RISING 0x20 /* capture on rising edge */
#define TM6MDB_CAPTURE_ENABLE 0x10 /* capture enable */
+#else /* !CONFIG_AM33_2 */
+#define TM6MDB_MODE 0x40 /* compare B register mode */
+#endif /* CONFIG_AM33_2 */
#define TM6CA __SYSREG(0xd40030c4, u16) /* timer6 cmp/capture reg A */
#define TM6CB __SYSREG(0xd40030d4, u16) /* timer6 cmp/capture reg B */
@@ -288,6 +419,34 @@
#define TM6AICR GxICR(TM6AIRQ) /* timer 6A intr control reg */
#define TM6BICR GxICR(TM6BIRQ) /* timer 6B intr control reg */
+#if defined(CONFIG_AM34_2)
+/*
+ * MTM: OS Tick-Timer
+ */
+#define TMTMD __SYSREG(0xd4004100, u8) /* Tick Timer mode register */
+#define TMTMD_TMTLDE 0x40 /* initialize TMTBC = TMTBR */
+#define TMTMD_TMTCNE 0x80 /* timer count enable */
+
+#define TMTBR __SYSREG(0xd4004110, u32) /* Tick Timer mode reg */
+#define TMTBC __SYSREG(0xd4004120, u32) /* Tick Timer mode reg */
+
+/*
+ * MTM: OS Timestamp-Timer
+ */
+#define TMSMD __SYSREG(0xd4004140, u8) /* Tick Timer mode register */
+#define TMSMD_TMSLDE 0x40 /* initialize TMSBC = TMSBR */
+#define TMSMD_TMSCNE 0x80 /* timer count enable */
+
+#define TMSBR __SYSREG(0xd4004150, u32) /* Tick Timer mode register */
+#define TMSBC __SYSREG(0xd4004160, u32) /* Tick Timer mode register */
+
+#define TMTIRQ 119 /* OS Tick timer IRQ */
+#define TMSIRQ 120 /* Timestamp timer IRQ */
+
+#define TMTICR GxICR(TMTIRQ) /* OS Tick timer uflow intr ctrl reg */
+#define TMSICR GxICR(TMSIRQ) /* Timestamp timer uflow intr ctrl reg */
+#endif /* CONFIG_AM34_2 */
+
#endif /* __KERNEL__ */
#endif /* _ASM_TIMER_REGS_H */
diff --git a/arch/mn10300/include/asm/timex.h b/arch/mn10300/include/asm/timex.h
index 8d031f9e117d..bd4e90dfe6c2 100644
--- a/arch/mn10300/include/asm/timex.h
+++ b/arch/mn10300/include/asm/timex.h
@@ -16,18 +16,30 @@
#define TICK_SIZE (tick_nsec / 1000)
-#define CLOCK_TICK_RATE 1193180 /* Underlying HZ - this should probably be set
- * to something appropriate, but what? */
-
-extern cycles_t cacheflush_time;
+#define CLOCK_TICK_RATE MN10300_JCCLK /* Underlying HZ */
#ifdef __KERNEL__
+extern cycles_t cacheflush_time;
+
static inline cycles_t get_cycles(void)
{
return read_timestamp_counter();
}
+extern int init_clockevents(void);
+extern int init_clocksource(void);
+
+static inline void setup_jiffies_interrupt(int irq,
+ struct irqaction *action)
+{
+ u16 tmp;
+ setup_irq(irq, action);
+ set_intr_level(irq, NUM2GxICR_LEVEL(CONFIG_TIMER_IRQ_LEVEL));
+ GxICR(irq) |= GxICR_ENABLE | GxICR_DETECT | GxICR_REQUEST;
+ tmp = GxICR(irq);
+}
+
#endif /* __KERNEL__ */
#endif /* _ASM_TIMEX_H */
diff --git a/arch/mn10300/include/asm/tlbflush.h b/arch/mn10300/include/asm/tlbflush.h
index 1a7e29281c5d..efddd6e1adea 100644
--- a/arch/mn10300/include/asm/tlbflush.h
+++ b/arch/mn10300/include/asm/tlbflush.h
@@ -11,24 +11,78 @@
#ifndef _ASM_TLBFLUSH_H
#define _ASM_TLBFLUSH_H
+#include <linux/mm.h>
#include <asm/processor.h>
-#define __flush_tlb() \
-do { \
- int w; \
- __asm__ __volatile__ \
- (" mov %1,%0 \n" \
- " or %2,%0 \n" \
- " mov %0,%1 \n" \
- : "=d"(w) \
- : "m"(MMUCTR), "i"(MMUCTR_IIV|MMUCTR_DIV) \
- : "cc", "memory" \
- ); \
-} while (0)
+struct tlb_state {
+ struct mm_struct *active_mm;
+ int state;
+};
+DECLARE_PER_CPU_SHARED_ALIGNED(struct tlb_state, cpu_tlbstate);
-#define __flush_tlb_all() __flush_tlb()
-#define __flush_tlb_one(addr) __flush_tlb()
+/**
+ * local_flush_tlb - Flush the current MM's entries from the local CPU's TLBs
+ */
+static inline void local_flush_tlb(void)
+{
+ int w;
+ asm volatile(
+ " mov %1,%0 \n"
+ " or %2,%0 \n"
+ " mov %0,%1 \n"
+ : "=d"(w)
+ : "m"(MMUCTR), "i"(MMUCTR_IIV|MMUCTR_DIV)
+ : "cc", "memory");
+}
+
+/**
+ * local_flush_tlb_all - Flush all entries from the local CPU's TLBs
+ */
+static inline void local_flush_tlb_all(void)
+{
+ local_flush_tlb();
+}
+/**
+ * local_flush_tlb_one - Flush one entry from the local CPU's TLBs
+ */
+static inline void local_flush_tlb_one(unsigned long addr)
+{
+ local_flush_tlb();
+}
+
+/**
+ * local_flush_tlb_page - Flush a page's entry from the local CPU's TLBs
+ * @mm: The MM to flush for
+ * @addr: The address of the target page in RAM (not its page struct)
+ */
+static inline
+void local_flush_tlb_page(struct mm_struct *mm, unsigned long addr)
+{
+ unsigned long pteu, flags, cnx;
+
+ addr &= PAGE_MASK;
+
+ local_irq_save(flags);
+
+ cnx = 1;
+#ifdef CONFIG_MN10300_TLB_USE_PIDR
+ cnx = mm->context.tlbpid[smp_processor_id()];
+#endif
+ if (cnx) {
+ pteu = addr;
+#ifdef CONFIG_MN10300_TLB_USE_PIDR
+ pteu |= cnx & xPTEU_PID;
+#endif
+ IPTEU = pteu;
+ DPTEU = pteu;
+ if (IPTEL & xPTEL_V)
+ IPTEL = 0;
+ if (DPTEL & xPTEL_V)
+ DPTEL = 0;
+ }
+ local_irq_restore(flags);
+}
/*
* TLB flushing:
@@ -40,41 +94,61 @@ do { \
* - flush_tlb_range(mm, start, end) flushes a range of pages
* - flush_tlb_pgtables(mm, start, end) flushes a range of page tables
*/
-#define flush_tlb_all() \
-do { \
- preempt_disable(); \
- __flush_tlb_all(); \
- preempt_enable(); \
-} while (0)
-
-#define flush_tlb_mm(mm) \
-do { \
- preempt_disable(); \
- __flush_tlb_all(); \
- preempt_enable(); \
-} while (0)
-
-#define flush_tlb_range(vma, start, end) \
-do { \
- unsigned long __s __attribute__((unused)) = (start); \
- unsigned long __e __attribute__((unused)) = (end); \
- preempt_disable(); \
- __flush_tlb_all(); \
- preempt_enable(); \
-} while (0)
-
-
-#define __flush_tlb_global() flush_tlb_all()
-#define flush_tlb() flush_tlb_all()
-#define flush_tlb_kernel_range(start, end) \
-do { \
- unsigned long __s __attribute__((unused)) = (start); \
- unsigned long __e __attribute__((unused)) = (end); \
- flush_tlb_all(); \
-} while (0)
-
-extern void flush_tlb_page(struct vm_area_struct *vma, unsigned long addr);
-
-#define flush_tlb_pgtables(mm, start, end) do {} while (0)
+#ifdef CONFIG_SMP
+
+#include <asm/smp.h>
+
+extern void flush_tlb_all(void);
+extern void flush_tlb_current_task(void);
+extern void flush_tlb_mm(struct mm_struct *);
+extern void flush_tlb_page(struct vm_area_struct *, unsigned long);
+
+#define flush_tlb() flush_tlb_current_task()
+
+static inline void flush_tlb_range(struct vm_area_struct *vma,
+ unsigned long start, unsigned long end)
+{
+ flush_tlb_mm(vma->vm_mm);
+}
+
+#else /* CONFIG_SMP */
+
+static inline void flush_tlb_all(void)
+{
+ preempt_disable();
+ local_flush_tlb_all();
+ preempt_enable();
+}
+
+static inline void flush_tlb_mm(struct mm_struct *mm)
+{
+ preempt_disable();
+ local_flush_tlb_all();
+ preempt_enable();
+}
+
+static inline void flush_tlb_range(struct vm_area_struct *vma,
+ unsigned long start, unsigned long end)
+{
+ preempt_disable();
+ local_flush_tlb_all();
+ preempt_enable();
+}
+
+#define flush_tlb_page(vma, addr) local_flush_tlb_page((vma)->vm_mm, addr)
+#define flush_tlb() flush_tlb_all()
+
+#endif /* CONFIG_SMP */
+
+static inline void flush_tlb_kernel_range(unsigned long start,
+ unsigned long end)
+{
+ flush_tlb_all();
+}
+
+static inline void flush_tlb_pgtables(struct mm_struct *mm,
+ unsigned long start, unsigned long end)
+{
+}
#endif /* _ASM_TLBFLUSH_H */
diff --git a/arch/mn10300/include/asm/uaccess.h b/arch/mn10300/include/asm/uaccess.h
index 197a7af3dd8a..679dee0bbd08 100644
--- a/arch/mn10300/include/asm/uaccess.h
+++ b/arch/mn10300/include/asm/uaccess.h
@@ -14,9 +14,8 @@
/*
* User space memory access functions
*/
-#include <linux/sched.h>
+#include <linux/thread_info.h>
#include <asm/page.h>
-#include <asm/pgtable.h>
#include <asm/errno.h>
#define VERIFY_READ 0
@@ -29,7 +28,6 @@
*
* For historical reasons, these macros are grossly misnamed.
*/
-
#define MAKE_MM_SEG(s) ((mm_segment_t) { (s) })
#define KERNEL_XDS MAKE_MM_SEG(0xBFFFFFFF)
@@ -377,7 +375,7 @@ unsigned long __generic_copy_to_user_nocheck(void *to, const void *from,
#if 0
-#error don't use - these macros don't increment to & from pointers
+#error "don't use - these macros don't increment to & from pointers"
/* Optimize just a little bit when we know the size of the move. */
#define __constant_copy_user(to, from, size) \
do { \
diff --git a/arch/mn10300/kernel/Makefile b/arch/mn10300/kernel/Makefile
index 23f2ab67574c..8f5f1e81baf5 100644
--- a/arch/mn10300/kernel/Makefile
+++ b/arch/mn10300/kernel/Makefile
@@ -3,13 +3,16 @@
#
extra-y := head.o init_task.o vmlinux.lds
-obj-y := process.o signal.o entry.o fpu.o traps.o irq.o \
+fpu-obj-y := fpu-nofpu.o fpu-nofpu-low.o
+fpu-obj-$(CONFIG_FPU) := fpu.o fpu-low.o
+
+obj-y := process.o signal.o entry.o traps.o irq.o \
ptrace.o setup.o time.o sys_mn10300.o io.o kthread.o \
- switch_to.o mn10300_ksyms.o kernel_execve.o
+ switch_to.o mn10300_ksyms.o kernel_execve.o $(fpu-obj-y)
-obj-$(CONFIG_MN10300_WD_TIMER) += mn10300-watchdog.o mn10300-watchdog-low.o
+obj-$(CONFIG_SMP) += smp.o smp-low.o
-obj-$(CONFIG_FPU) += fpu-low.o
+obj-$(CONFIG_MN10300_WD_TIMER) += mn10300-watchdog.o mn10300-watchdog-low.o
obj-$(CONFIG_MN10300_TTYSM) += mn10300-serial.o mn10300-serial-low.o \
mn10300-debug.o
@@ -17,7 +20,7 @@ obj-$(CONFIG_GDBSTUB) += gdb-stub.o gdb-low.o
obj-$(CONFIG_GDBSTUB_ON_TTYSx) += gdb-io-serial.o gdb-io-serial-low.o
obj-$(CONFIG_GDBSTUB_ON_TTYSMx) += gdb-io-ttysm.o gdb-io-ttysm-low.o
-ifneq ($(CONFIG_MN10300_CACHE_DISABLED),y)
+ifeq ($(CONFIG_MN10300_CACHE_ENABLED),y)
obj-$(CONFIG_GDBSTUB) += gdb-cache.o
endif
@@ -25,3 +28,5 @@ obj-$(CONFIG_MN10300_RTC) += rtc.o
obj-$(CONFIG_PROFILE) += profile.o profile-low.o
obj-$(CONFIG_MODULES) += module.o
obj-$(CONFIG_KPROBES) += kprobes.o
+obj-$(CONFIG_CSRC_MN10300) += csrc-mn10300.o
+obj-$(CONFIG_CEVT_MN10300) += cevt-mn10300.o
diff --git a/arch/mn10300/kernel/asm-offsets.c b/arch/mn10300/kernel/asm-offsets.c
index 02dc7e461fef..96f24fab7de6 100644
--- a/arch/mn10300/kernel/asm-offsets.c
+++ b/arch/mn10300/kernel/asm-offsets.c
@@ -23,6 +23,7 @@ void foo(void)
OFFSET(TI_task, thread_info, task);
OFFSET(TI_exec_domain, thread_info, exec_domain);
+ OFFSET(TI_frame, thread_info, frame);
OFFSET(TI_flags, thread_info, flags);
OFFSET(TI_cpu, thread_info, cpu);
OFFSET(TI_preempt_count, thread_info, preempt_count);
@@ -66,7 +67,15 @@ void foo(void)
OFFSET(THREAD_SP, thread_struct, sp);
OFFSET(THREAD_A3, thread_struct, a3);
OFFSET(THREAD_USP, thread_struct, usp);
- OFFSET(THREAD_FRAME, thread_struct, __frame);
+#ifdef CONFIG_FPU
+ OFFSET(THREAD_FPU_FLAGS, thread_struct, fpu_flags);
+ OFFSET(THREAD_FPU_STATE, thread_struct, fpu_state);
+ DEFINE(__THREAD_USING_FPU, THREAD_USING_FPU);
+ DEFINE(__THREAD_HAS_FPU, THREAD_HAS_FPU);
+#endif /* CONFIG_FPU */
+ BLANK();
+
+ OFFSET(TASK_THREAD, task_struct, thread);
BLANK();
DEFINE(CLONE_VM_asm, CLONE_VM);
diff --git a/arch/mn10300/kernel/cevt-mn10300.c b/arch/mn10300/kernel/cevt-mn10300.c
new file mode 100644
index 000000000000..d4cb535bf786
--- /dev/null
+++ b/arch/mn10300/kernel/cevt-mn10300.c
@@ -0,0 +1,131 @@
+/* MN10300 clockevents
+ *
+ * Copyright (C) 2010 Red Hat, Inc. All Rights Reserved.
+ * Written by Mark Salter (msalter@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public Licence
+ * as published by the Free Software Foundation; either version
+ * 2 of the Licence, or (at your option) any later version.
+ */
+#include <linux/clockchips.h>
+#include <linux/interrupt.h>
+#include <linux/percpu.h>
+#include <linux/smp.h>
+#include <asm/timex.h>
+#include "internal.h"
+
+#ifdef CONFIG_SMP
+#if (CONFIG_NR_CPUS > 2) && !defined(CONFIG_GEENERIC_CLOCKEVENTS_BROADCAST)
+#error "This doesn't scale well! Need per-core local timers."
+#endif
+#else /* CONFIG_SMP */
+#define stop_jiffies_counter1()
+#define reload_jiffies_counter1(x)
+#define TMJC1IRQ TMJCIRQ
+#endif
+
+
+static int next_event(unsigned long delta,
+ struct clock_event_device *evt)
+{
+ unsigned int cpu = smp_processor_id();
+
+ if (cpu == 0) {
+ stop_jiffies_counter();
+ reload_jiffies_counter(delta - 1);
+ } else {
+ stop_jiffies_counter1();
+ reload_jiffies_counter1(delta - 1);
+ }
+ return 0;
+}
+
+static void set_clock_mode(enum clock_event_mode mode,
+ struct clock_event_device *evt)
+{
+ /* Nothing to do ... */
+}
+
+static DEFINE_PER_CPU(struct clock_event_device, mn10300_clockevent_device);
+static DEFINE_PER_CPU(struct irqaction, timer_irq);
+
+static irqreturn_t timer_interrupt(int irq, void *dev_id)
+{
+ struct clock_event_device *cd;
+ unsigned int cpu = smp_processor_id();
+
+ if (cpu == 0)
+ stop_jiffies_counter();
+ else
+ stop_jiffies_counter1();
+
+ cd = &per_cpu(mn10300_clockevent_device, cpu);
+ cd->event_handler(cd);
+
+ return IRQ_HANDLED;
+}
+
+static void event_handler(struct clock_event_device *dev)
+{
+}
+
+int __init init_clockevents(void)
+{
+ struct clock_event_device *cd;
+ struct irqaction *iact;
+ unsigned int cpu = smp_processor_id();
+
+ cd = &per_cpu(mn10300_clockevent_device, cpu);
+
+ if (cpu == 0) {
+ stop_jiffies_counter();
+ cd->irq = TMJCIRQ;
+ } else {
+ stop_jiffies_counter1();
+ cd->irq = TMJC1IRQ;
+ }
+
+ cd->name = "Timestamp";
+ cd->features = CLOCK_EVT_FEAT_ONESHOT;
+
+ /* Calculate the min / max delta */
+ clockevent_set_clock(cd, MN10300_JCCLK);
+
+ cd->max_delta_ns = clockevent_delta2ns(TMJCBR_MAX, cd);
+ cd->min_delta_ns = clockevent_delta2ns(100, cd);
+
+ cd->rating = 200;
+ cd->cpumask = cpumask_of(smp_processor_id());
+ cd->set_mode = set_clock_mode;
+ cd->event_handler = event_handler;
+ cd->set_next_event = next_event;
+
+ iact = &per_cpu(timer_irq, cpu);
+ iact->flags = IRQF_DISABLED | IRQF_SHARED | IRQF_TIMER;
+ iact->handler = timer_interrupt;
+
+ clockevents_register_device(cd);
+
+#if defined(CONFIG_SMP) && !defined(CONFIG_GENERIC_CLOCKEVENTS_BROADCAST)
+ /* setup timer irq affinity so it only runs on this cpu */
+ {
+ struct irq_desc *desc;
+ desc = irq_to_desc(cd->irq);
+ cpumask_copy(desc->affinity, cpumask_of(cpu));
+ iact->flags |= IRQF_NOBALANCING;
+ }
+#endif
+
+ if (cpu == 0) {
+ reload_jiffies_counter(MN10300_JC_PER_HZ - 1);
+ iact->name = "CPU0 Timer";
+ } else {
+ reload_jiffies_counter1(MN10300_JC_PER_HZ - 1);
+ iact->name = "CPU1 Timer";
+ }
+
+ setup_jiffies_interrupt(cd->irq, iact);
+
+ return 0;
+}
diff --git a/arch/mn10300/kernel/csrc-mn10300.c b/arch/mn10300/kernel/csrc-mn10300.c
new file mode 100644
index 000000000000..ba2f0c4d6e01
--- /dev/null
+++ b/arch/mn10300/kernel/csrc-mn10300.c
@@ -0,0 +1,35 @@
+/* MN10300 clocksource
+ *
+ * Copyright (C) 2010 Red Hat, Inc. All Rights Reserved.
+ * Written by Mark Salter (msalter@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public Licence
+ * as published by the Free Software Foundation; either version
+ * 2 of the Licence, or (at your option) any later version.
+ */
+#include <linux/clocksource.h>
+#include <linux/init.h>
+#include <asm/timex.h>
+#include "internal.h"
+
+static cycle_t mn10300_read(struct clocksource *cs)
+{
+ return read_timestamp_counter();
+}
+
+static struct clocksource clocksource_mn10300 = {
+ .name = "TSC",
+ .rating = 200,
+ .read = mn10300_read,
+ .mask = CLOCKSOURCE_MASK(32),
+ .flags = CLOCK_SOURCE_IS_CONTINUOUS,
+};
+
+int __init init_clocksource(void)
+{
+ startup_timestamp_counter();
+ clocksource_set_clock(&clocksource_mn10300, MN10300_TSCCLK);
+ clocksource_register(&clocksource_mn10300);
+ return 0;
+}
diff --git a/arch/mn10300/kernel/entry.S b/arch/mn10300/kernel/entry.S
index 3d394b4eefba..f00b9bafcd3e 100644
--- a/arch/mn10300/kernel/entry.S
+++ b/arch/mn10300/kernel/entry.S
@@ -28,25 +28,17 @@
#include <asm/asm-offsets.h>
#include <asm/frame.inc>
+#if defined(CONFIG_SMP) && defined(CONFIG_GDBSTUB)
+#include <asm/gdb-stub.h>
+#endif /* CONFIG_SMP && CONFIG_GDBSTUB */
+
#ifdef CONFIG_PREEMPT
-#define preempt_stop __cli
+#define preempt_stop LOCAL_IRQ_DISABLE
#else
#define preempt_stop
#define resume_kernel restore_all
#endif
- .macro __cli
- and ~EPSW_IM,epsw
- or EPSW_IE|MN10300_CLI_LEVEL,epsw
- nop
- nop
- nop
- .endm
- .macro __sti
- or EPSW_IE|EPSW_IM_7,epsw
- .endm
-
-
.am33_2
###############################################################################
@@ -88,7 +80,7 @@ syscall_call:
syscall_exit:
# make sure we don't miss an interrupt setting need_resched or
# sigpending between sampling and the rti
- __cli
+ LOCAL_IRQ_DISABLE
mov (TI_flags,a2),d2
btst _TIF_ALLWORK_MASK,d2
bne syscall_exit_work
@@ -105,7 +97,7 @@ restore_all:
syscall_exit_work:
btst _TIF_SYSCALL_TRACE,d2
beq work_pending
- __sti # could let syscall_trace_exit() call
+ LOCAL_IRQ_ENABLE # could let syscall_trace_exit() call
# schedule() instead
mov fp,d0
call syscall_trace_exit[],0 # do_syscall_trace(regs)
@@ -121,7 +113,7 @@ work_resched:
# make sure we don't miss an interrupt setting need_resched or
# sigpending between sampling and the rti
- __cli
+ LOCAL_IRQ_DISABLE
# is there any work to be done other than syscall tracing?
mov (TI_flags,a2),d2
@@ -168,7 +160,7 @@ ret_from_intr:
ENTRY(resume_userspace)
# make sure we don't miss an interrupt setting need_resched or
# sigpending between sampling and the rti
- __cli
+ LOCAL_IRQ_DISABLE
# is there any work to be done on int/exception return?
mov (TI_flags,a2),d2
@@ -178,7 +170,7 @@ ENTRY(resume_userspace)
#ifdef CONFIG_PREEMPT
ENTRY(resume_kernel)
- __cli
+ LOCAL_IRQ_DISABLE
mov (TI_preempt_count,a2),d0 # non-zero preempt_count ?
cmp 0,d0
bne restore_all
@@ -216,31 +208,6 @@ ENTRY(irq_handler)
###############################################################################
#
-# Monitor Signal handler entry point
-#
-###############################################################################
-ENTRY(monitor_signal)
- movbu (0xae000001),d1
- cmp 1,d1
- beq monsignal
- ret [],0
-
-monsignal:
- or EPSW_NMID,epsw
- mov d0,a0
- mov a0,sp
- mov (REG_EPSW,fp),d1
- and ~EPSW_nSL,d1
- mov d1,(REG_EPSW,fp)
- movm (sp),[d2,d3,a2,a3,exreg0,exreg1,exother]
- mov (sp),a1
- mov a1,usp
- movm (sp),[other]
- add 4,sp
-here: jmp 0x8e000008-here+0x8e000008
-
-###############################################################################
-#
# Double Fault handler entry point
# - note that there will not be a stack, D0/A0 will hold EPSW/PC as were
#
@@ -276,6 +243,10 @@ double_fault_loop:
ENTRY(raw_bus_error)
add -4,sp
mov d0,(sp)
+#if defined(CONFIG_ERRATUM_NEED_TO_RELOAD_MMUCTR)
+ mov (MMUCTR),d0
+ mov d0,(MMUCTR)
+#endif
mov (BCBERR),d0 # what
btst BCBERR_BEMR_DMA,d0 # see if it was an external bus error
beq __common_exception_aux # it wasn't
@@ -302,11 +273,88 @@ ENTRY(nmi_handler)
add -4,sp
mov d0,(sp)
mov (TBR),d0
+
+#ifdef CONFIG_SMP
+ add -4,sp
+ mov d0,(sp) # save d0(TBR)
+ movhu (NMIAGR),d0
+ and NMIAGR_GN,d0
+ lsr 0x2,d0
+ cmp CALL_FUNCTION_NMI_IPI,d0
+ bne 5f # if not call function, jump
+
+ # function call nmi ipi
+ add 4,sp # no need to store TBR
+ mov GxICR_DETECT,d0 # clear NMI request
+ movbu d0,(GxICR(CALL_FUNCTION_NMI_IPI))
+ movhu (GxICR(CALL_FUNCTION_NMI_IPI)),d0
+ and ~EPSW_NMID,epsw # enable NMI
+
+ mov (sp),d0 # restore d0
+ SAVE_ALL
+ call smp_nmi_call_function_interrupt[],0
+ RESTORE_ALL
+
+5:
+#ifdef CONFIG_GDBSTUB
+ cmp GDB_NMI_IPI,d0
+ bne 3f # if not gdb nmi ipi, jump
+
+ # gdb nmi ipi
+ add 4,sp # no need to store TBR
+ mov GxICR_DETECT,d0 # clear NMI
+ movbu d0,(GxICR(GDB_NMI_IPI))
+ movhu (GxICR(GDB_NMI_IPI)),d0
+ and ~EPSW_NMID,epsw # enable NMI
+#ifdef CONFIG_MN10300_CACHE_ENABLED
+ mov (gdbstub_nmi_opr_type),d0
+ cmp GDBSTUB_NMI_CACHE_PURGE,d0
+ bne 4f # if not gdb cache purge, jump
+
+ # gdb cache purge nmi ipi
+ add -20,sp
+ mov d1,(4,sp)
+ mov a0,(8,sp)
+ mov a1,(12,sp)
+ mov mdr,d0
+ mov d0,(16,sp)
+ call gdbstub_local_purge_cache[],0
+ mov 0x1,d0
+ mov (CPUID),d1
+ asl d1,d0
+ mov gdbstub_nmi_cpumask,a0
+ bclr d0,(a0)
+ mov (4,sp),d1
+ mov (8,sp),a0
+ mov (12,sp),a1
+ mov (16,sp),d0
+ mov d0,mdr
+ add 20,sp
+ mov (sp),d0
+ add 4,sp
+ rti
+4:
+#endif /* CONFIG_MN10300_CACHE_ENABLED */
+ # gdb wait nmi ipi
+ mov (sp),d0
+ SAVE_ALL
+ call gdbstub_nmi_wait[],0
+ RESTORE_ALL
+3:
+#endif /* CONFIG_GDBSTUB */
+ mov (sp),d0 # restore TBR to d0
+ add 4,sp
+#endif /* CONFIG_SMP */
+
bra __common_exception_nonmi
ENTRY(__common_exception)
add -4,sp
mov d0,(sp)
+#if defined(CONFIG_ERRATUM_NEED_TO_RELOAD_MMUCTR)
+ mov (MMUCTR),d0
+ mov d0,(MMUCTR)
+#endif
__common_exception_aux:
mov (TBR),d0
@@ -331,15 +379,21 @@ __common_exception_nonmi:
mov d0,(REG_ORIG_D0,fp)
#ifdef CONFIG_GDBSTUB
+#ifdef CONFIG_SMP
+ call gdbstub_busy_check[],0
+ and d0,d0 # check return value
+ beq 2f
+#else /* CONFIG_SMP */
btst 0x01,(gdbstub_busy)
beq 2f
+#endif /* CONFIG_SMP */
and ~EPSW_IE,epsw
mov fp,d0
mov a2,d1
call gdbstub_exception[],0 # gdbstub itself caused an exception
bra restore_all
2:
-#endif
+#endif /* CONFIG_GDBSTUB */
mov fp,d0 # arg 0: stacked register file
mov a2,d1 # arg 1: exception number
@@ -374,11 +428,7 @@ ENTRY(set_excp_vector)
add exception_table,d0
mov d1,(d0)
mov 4,d1
-#if defined(CONFIG_MN10300_CACHE_WBACK)
- jmp mn10300_dcache_flush_inv_range2
-#else
ret [],0
-#endif
###############################################################################
#
diff --git a/arch/mn10300/kernel/fpu-low.S b/arch/mn10300/kernel/fpu-low.S
index 96cfd47e68d5..78df25cfae29 100644
--- a/arch/mn10300/kernel/fpu-low.S
+++ b/arch/mn10300/kernel/fpu-low.S
@@ -8,25 +8,14 @@
* as published by the Free Software Foundation; either version
* 2 of the Licence, or (at your option) any later version.
*/
+#include <linux/linkage.h>
#include <asm/cpu-regs.h>
+#include <asm/smp.h>
+#include <asm/thread_info.h>
+#include <asm/asm-offsets.h>
+#include <asm/frame.inc>
-###############################################################################
-#
-# void fpu_init_state(void)
-# - initialise the FPU
-#
-###############################################################################
- .globl fpu_init_state
- .type fpu_init_state,@function
-fpu_init_state:
- mov epsw,d0
- or EPSW_FE,epsw
-
-#ifdef CONFIG_MN10300_PROC_MN103E010
- nop
- nop
- nop
-#endif
+.macro FPU_INIT_STATE_ALL
fmov 0,fs0
fmov fs0,fs1
fmov fs0,fs2
@@ -60,7 +49,100 @@ fpu_init_state:
fmov fs0,fs30
fmov fs0,fs31
fmov FPCR_INIT,fpcr
+.endm
+
+.macro FPU_SAVE_ALL areg,dreg
+ fmov fs0,(\areg+)
+ fmov fs1,(\areg+)
+ fmov fs2,(\areg+)
+ fmov fs3,(\areg+)
+ fmov fs4,(\areg+)
+ fmov fs5,(\areg+)
+ fmov fs6,(\areg+)
+ fmov fs7,(\areg+)
+ fmov fs8,(\areg+)
+ fmov fs9,(\areg+)
+ fmov fs10,(\areg+)
+ fmov fs11,(\areg+)
+ fmov fs12,(\areg+)
+ fmov fs13,(\areg+)
+ fmov fs14,(\areg+)
+ fmov fs15,(\areg+)
+ fmov fs16,(\areg+)
+ fmov fs17,(\areg+)
+ fmov fs18,(\areg+)
+ fmov fs19,(\areg+)
+ fmov fs20,(\areg+)
+ fmov fs21,(\areg+)
+ fmov fs22,(\areg+)
+ fmov fs23,(\areg+)
+ fmov fs24,(\areg+)
+ fmov fs25,(\areg+)
+ fmov fs26,(\areg+)
+ fmov fs27,(\areg+)
+ fmov fs28,(\areg+)
+ fmov fs29,(\areg+)
+ fmov fs30,(\areg+)
+ fmov fs31,(\areg+)
+ fmov fpcr,\dreg
+ mov \dreg,(\areg)
+.endm
+
+.macro FPU_RESTORE_ALL areg,dreg
+ fmov (\areg+),fs0
+ fmov (\areg+),fs1
+ fmov (\areg+),fs2
+ fmov (\areg+),fs3
+ fmov (\areg+),fs4
+ fmov (\areg+),fs5
+ fmov (\areg+),fs6
+ fmov (\areg+),fs7
+ fmov (\areg+),fs8
+ fmov (\areg+),fs9
+ fmov (\areg+),fs10
+ fmov (\areg+),fs11
+ fmov (\areg+),fs12
+ fmov (\areg+),fs13
+ fmov (\areg+),fs14
+ fmov (\areg+),fs15
+ fmov (\areg+),fs16
+ fmov (\areg+),fs17
+ fmov (\areg+),fs18
+ fmov (\areg+),fs19
+ fmov (\areg+),fs20
+ fmov (\areg+),fs21
+ fmov (\areg+),fs22
+ fmov (\areg+),fs23
+ fmov (\areg+),fs24
+ fmov (\areg+),fs25
+ fmov (\areg+),fs26
+ fmov (\areg+),fs27
+ fmov (\areg+),fs28
+ fmov (\areg+),fs29
+ fmov (\areg+),fs30
+ fmov (\areg+),fs31
+ mov (\areg),\dreg
+ fmov \dreg,fpcr
+.endm
+###############################################################################
+#
+# void fpu_init_state(void)
+# - initialise the FPU
+#
+###############################################################################
+ .globl fpu_init_state
+ .type fpu_init_state,@function
+fpu_init_state:
+ mov epsw,d0
+ or EPSW_FE,epsw
+
+#ifdef CONFIG_MN10300_PROC_MN103E010
+ nop
+ nop
+ nop
+#endif
+ FPU_INIT_STATE_ALL
#ifdef CONFIG_MN10300_PROC_MN103E010
nop
nop
@@ -89,40 +171,7 @@ fpu_save:
nop
#endif
mov d0,a0
- fmov fs0,(a0+)
- fmov fs1,(a0+)
- fmov fs2,(a0+)
- fmov fs3,(a0+)
- fmov fs4,(a0+)
- fmov fs5,(a0+)
- fmov fs6,(a0+)
- fmov fs7,(a0+)
- fmov fs8,(a0+)
- fmov fs9,(a0+)
- fmov fs10,(a0+)
- fmov fs11,(a0+)
- fmov fs12,(a0+)
- fmov fs13,(a0+)
- fmov fs14,(a0+)
- fmov fs15,(a0+)
- fmov fs16,(a0+)
- fmov fs17,(a0+)
- fmov fs18,(a0+)
- fmov fs19,(a0+)
- fmov fs20,(a0+)
- fmov fs21,(a0+)
- fmov fs22,(a0+)
- fmov fs23,(a0+)
- fmov fs24,(a0+)
- fmov fs25,(a0+)
- fmov fs26,(a0+)
- fmov fs27,(a0+)
- fmov fs28,(a0+)
- fmov fs29,(a0+)
- fmov fs30,(a0+)
- fmov fs31,(a0+)
- fmov fpcr,d0
- mov d0,(a0)
+ FPU_SAVE_ALL a0,d0
#ifdef CONFIG_MN10300_PROC_MN103E010
nop
nop
@@ -135,63 +184,75 @@ fpu_save:
###############################################################################
#
-# void fpu_restore(struct fpu_state_struct *)
-# - restore the fpu state
-# - note that an FPU Operational exception might occur during this process
+# void fpu_disabled(void)
+# - handle an exception due to the FPU being disabled
+# when CONFIG_FPU is enabled
#
###############################################################################
- .globl fpu_restore
- .type fpu_restore,@function
-fpu_restore:
- mov epsw,d1
- or EPSW_FE,epsw /* enable the FPU so we can access it */
-
-#ifdef CONFIG_MN10300_PROC_MN103E010
+ .type fpu_disabled,@function
+ .globl fpu_disabled
+fpu_disabled:
+ or EPSW_nAR|EPSW_FE,epsw
nop
nop
-#endif
- mov d0,a0
- fmov (a0+),fs0
- fmov (a0+),fs1
- fmov (a0+),fs2
- fmov (a0+),fs3
- fmov (a0+),fs4
- fmov (a0+),fs5
- fmov (a0+),fs6
- fmov (a0+),fs7
- fmov (a0+),fs8
- fmov (a0+),fs9
- fmov (a0+),fs10
- fmov (a0+),fs11
- fmov (a0+),fs12
- fmov (a0+),fs13
- fmov (a0+),fs14
- fmov (a0+),fs15
- fmov (a0+),fs16
- fmov (a0+),fs17
- fmov (a0+),fs18
- fmov (a0+),fs19
- fmov (a0+),fs20
- fmov (a0+),fs21
- fmov (a0+),fs22
- fmov (a0+),fs23
- fmov (a0+),fs24
- fmov (a0+),fs25
- fmov (a0+),fs26
- fmov (a0+),fs27
- fmov (a0+),fs28
- fmov (a0+),fs29
- fmov (a0+),fs30
- fmov (a0+),fs31
- mov (a0),d0
- fmov d0,fpcr
-#ifdef CONFIG_MN10300_PROC_MN103E010
nop
+
+ mov sp,a1
+ mov (a1),d1 /* get epsw of user context */
+ and ~(THREAD_SIZE-1),a1 /* a1: (thread_info *ti) */
+ mov (TI_task,a1),a2 /* a2: (task_struct *tsk) */
+ btst EPSW_nSL,d1
+ beq fpu_used_in_kernel
+
+ or EPSW_FE,d1
+ mov d1,(sp)
+ mov (TASK_THREAD+THREAD_FPU_FLAGS,a2),d1
+#ifndef CONFIG_LAZY_SAVE_FPU
+ or __THREAD_HAS_FPU,d1
+ mov d1,(TASK_THREAD+THREAD_FPU_FLAGS,a2)
+#else /* !CONFIG_LAZY_SAVE_FPU */
+ mov (fpu_state_owner),a0
+ cmp 0,a0
+ beq fpu_regs_save_end
+
+ mov (TASK_THREAD+THREAD_UREGS,a0),a1
+ add TASK_THREAD+THREAD_FPU_STATE,a0
+ FPU_SAVE_ALL a0,d0
+
+ mov (REG_EPSW,a1),d0
+ and ~EPSW_FE,d0
+ mov d0,(REG_EPSW,a1)
+
+fpu_regs_save_end:
+ mov a2,(fpu_state_owner)
+#endif /* !CONFIG_LAZY_SAVE_FPU */
+
+ btst __THREAD_USING_FPU,d1
+ beq fpu_regs_init
+ add TASK_THREAD+THREAD_FPU_STATE,a2
+ FPU_RESTORE_ALL a2,d0
+ rti
+
+fpu_regs_init:
+ FPU_INIT_STATE_ALL
+ add TASK_THREAD+THREAD_FPU_FLAGS,a2
+ bset __THREAD_USING_FPU,(0,a2)
+ rti
+
+fpu_used_in_kernel:
+ and ~(EPSW_nAR|EPSW_FE),epsw
nop
nop
-#endif
- mov d1,epsw
- ret [],0
+ add -4,sp
+ SAVE_ALL
+ mov -1,d0
+ mov d0,(REG_ORIG_D0,fp)
+
+ and ~EPSW_NMID,epsw
+
+ mov fp,d0
+ call fpu_disabled_in_kernel[],0
+ jmp ret_from_exception
- .size fpu_restore,.-fpu_restore
+ .size fpu_disabled,.-fpu_disabled
diff --git a/arch/mn10300/kernel/fpu-nofpu-low.S b/arch/mn10300/kernel/fpu-nofpu-low.S
new file mode 100644
index 000000000000..7ea087a549f4
--- /dev/null
+++ b/arch/mn10300/kernel/fpu-nofpu-low.S
@@ -0,0 +1,39 @@
+/* MN10300 Low level FPU management operations
+ *
+ * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public Licence
+ * as published by the Free Software Foundation; either version
+ * 2 of the Licence, or (at your option) any later version.
+ */
+#include <linux/linkage.h>
+#include <asm/cpu-regs.h>
+#include <asm/smp.h>
+#include <asm/thread_info.h>
+#include <asm/asm-offsets.h>
+#include <asm/frame.inc>
+
+###############################################################################
+#
+# void fpu_disabled(void)
+# - handle an exception due to the FPU being disabled
+# when CONFIG_FPU is disabled
+#
+###############################################################################
+ .type fpu_disabled,@function
+ .globl fpu_disabled
+fpu_disabled:
+ add -4,sp
+ SAVE_ALL
+ mov -1,d0
+ mov d0,(REG_ORIG_D0,fp)
+
+ and ~EPSW_NMID,epsw
+
+ mov fp,d0
+ call unexpected_fpu_exception[],0
+ jmp ret_from_exception
+
+ .size fpu_disabled,.-fpu_disabled
diff --git a/arch/mn10300/kernel/fpu-nofpu.c b/arch/mn10300/kernel/fpu-nofpu.c
new file mode 100644
index 000000000000..31c765b92c5d
--- /dev/null
+++ b/arch/mn10300/kernel/fpu-nofpu.c
@@ -0,0 +1,30 @@
+/* MN10300 FPU management
+ *
+ * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public Licence
+ * as published by the Free Software Foundation; either version
+ * 2 of the Licence, or (at your option) any later version.
+ */
+#include <asm/fpu.h>
+
+/*
+ * handle an FPU operational exception
+ * - there's a possibility that if the FPU is asynchronous, the signal might
+ * be meant for a process other than the current one
+ */
+asmlinkage
+void unexpected_fpu_exception(struct pt_regs *regs, enum exception_code code)
+{
+ panic("An FPU exception was received, but there's no FPU enabled.");
+}
+
+/*
+ * fill in the FPU structure for a core dump
+ */
+int dump_fpu(struct pt_regs *regs, elf_fpregset_t *fpreg)
+{
+ return 0; /* not valid */
+}
diff --git a/arch/mn10300/kernel/fpu.c b/arch/mn10300/kernel/fpu.c
index e705f25ad5ff..5f9c3fa19a85 100644
--- a/arch/mn10300/kernel/fpu.c
+++ b/arch/mn10300/kernel/fpu.c
@@ -12,56 +12,19 @@
#include <asm/fpu.h>
#include <asm/elf.h>
#include <asm/exceptions.h>
+#include <asm/system.h>
+#ifdef CONFIG_LAZY_SAVE_FPU
struct task_struct *fpu_state_owner;
+#endif
/*
- * handle an exception due to the FPU being disabled
+ * error functions in FPU disabled exception
*/
-asmlinkage void fpu_disabled(struct pt_regs *regs, enum exception_code code)
+asmlinkage void fpu_disabled_in_kernel(struct pt_regs *regs)
{
- struct task_struct *tsk = current;
-
- if (!user_mode(regs))
- die_if_no_fixup("An FPU Disabled exception happened in"
- " kernel space\n",
- regs, code);
-
-#ifdef CONFIG_FPU
- preempt_disable();
-
- /* transfer the last process's FPU state to memory */
- if (fpu_state_owner) {
- fpu_save(&fpu_state_owner->thread.fpu_state);
- fpu_state_owner->thread.uregs->epsw &= ~EPSW_FE;
- }
-
- /* the current process now owns the FPU state */
- fpu_state_owner = tsk;
- regs->epsw |= EPSW_FE;
-
- /* load the FPU with the current process's FPU state or invent a new
- * clean one if the process doesn't have one */
- if (is_using_fpu(tsk)) {
- fpu_restore(&tsk->thread.fpu_state);
- } else {
- fpu_init_state();
- set_using_fpu(tsk);
- }
-
- preempt_enable();
-#else
- {
- siginfo_t info;
-
- info.si_signo = SIGFPE;
- info.si_errno = 0;
- info.si_addr = (void *) tsk->thread.uregs->pc;
- info.si_code = FPE_FLTINV;
-
- force_sig_info(SIGFPE, &info, tsk);
- }
-#endif /* CONFIG_FPU */
+ die_if_no_fixup("An FPU Disabled exception happened in kernel space\n",
+ regs, EXCEP_FPU_DISABLED);
}
/*
@@ -71,15 +34,16 @@ asmlinkage void fpu_disabled(struct pt_regs *regs, enum exception_code code)
*/
asmlinkage void fpu_exception(struct pt_regs *regs, enum exception_code code)
{
- struct task_struct *tsk = fpu_state_owner;
+ struct task_struct *tsk = current;
siginfo_t info;
+ u32 fpcr;
if (!user_mode(regs))
die_if_no_fixup("An FPU Operation exception happened in"
" kernel space\n",
regs, code);
- if (!tsk)
+ if (!is_using_fpu(tsk))
die_if_no_fixup("An FPU Operation exception happened,"
" but the FPU is not in use",
regs, code);
@@ -89,48 +53,45 @@ asmlinkage void fpu_exception(struct pt_regs *regs, enum exception_code code)
info.si_addr = (void *) tsk->thread.uregs->pc;
info.si_code = FPE_FLTINV;
-#ifdef CONFIG_FPU
- {
- u32 fpcr;
+ unlazy_fpu(tsk);
- /* get FPCR (we need to enable the FPU whilst we do this) */
- asm volatile(" or %1,epsw \n"
-#ifdef CONFIG_MN10300_PROC_MN103E010
- " nop \n"
- " nop \n"
- " nop \n"
-#endif
- " fmov fpcr,%0 \n"
-#ifdef CONFIG_MN10300_PROC_MN103E010
- " nop \n"
- " nop \n"
- " nop \n"
-#endif
- " and %2,epsw \n"
- : "=&d"(fpcr)
- : "i"(EPSW_FE), "i"(~EPSW_FE)
- );
-
- if (fpcr & FPCR_EC_Z)
- info.si_code = FPE_FLTDIV;
- else if (fpcr & FPCR_EC_O)
- info.si_code = FPE_FLTOVF;
- else if (fpcr & FPCR_EC_U)
- info.si_code = FPE_FLTUND;
- else if (fpcr & FPCR_EC_I)
- info.si_code = FPE_FLTRES;
- }
-#endif
+ fpcr = tsk->thread.fpu_state.fpcr;
+
+ if (fpcr & FPCR_EC_Z)
+ info.si_code = FPE_FLTDIV;
+ else if (fpcr & FPCR_EC_O)
+ info.si_code = FPE_FLTOVF;
+ else if (fpcr & FPCR_EC_U)
+ info.si_code = FPE_FLTUND;
+ else if (fpcr & FPCR_EC_I)
+ info.si_code = FPE_FLTRES;
force_sig_info(SIGFPE, &info, tsk);
}
/*
+ * handle an FPU invalid_op exception
+ * - Derived from DO_EINFO() macro in arch/mn10300/kernel/traps.c
+ */
+asmlinkage void fpu_invalid_op(struct pt_regs *regs, enum exception_code code)
+{
+ siginfo_t info;
+
+ if (!user_mode(regs))
+ die_if_no_fixup("FPU invalid opcode", regs, code);
+
+ info.si_signo = SIGILL;
+ info.si_errno = 0;
+ info.si_code = ILL_COPROC;
+ info.si_addr = (void *) regs->pc;
+ force_sig_info(info.si_signo, &info, current);
+}
+
+/*
* save the FPU state to a signal context
*/
int fpu_setup_sigcontext(struct fpucontext *fpucontext)
{
-#ifdef CONFIG_FPU
struct task_struct *tsk = current;
if (!is_using_fpu(tsk))
@@ -142,11 +103,19 @@ int fpu_setup_sigcontext(struct fpucontext *fpucontext)
*/
preempt_disable();
+#ifndef CONFIG_LAZY_SAVE_FPU
+ if (tsk->thread.fpu_flags & THREAD_HAS_FPU) {
+ fpu_save(&tsk->thread.fpu_state);
+ tsk->thread.uregs->epsw &= ~EPSW_FE;
+ tsk->thread.fpu_flags &= ~THREAD_HAS_FPU;
+ }
+#else /* !CONFIG_LAZY_SAVE_FPU */
if (fpu_state_owner == tsk) {
fpu_save(&tsk->thread.fpu_state);
fpu_state_owner->thread.uregs->epsw &= ~EPSW_FE;
fpu_state_owner = NULL;
}
+#endif /* !CONFIG_LAZY_SAVE_FPU */
preempt_enable();
@@ -161,9 +130,6 @@ int fpu_setup_sigcontext(struct fpucontext *fpucontext)
return -1;
return 1;
-#else
- return 0;
-#endif
}
/*
@@ -171,17 +137,23 @@ int fpu_setup_sigcontext(struct fpucontext *fpucontext)
*/
void fpu_kill_state(struct task_struct *tsk)
{
-#ifdef CONFIG_FPU
/* disown anything left in the FPU */
preempt_disable();
+#ifndef CONFIG_LAZY_SAVE_FPU
+ if (tsk->thread.fpu_flags & THREAD_HAS_FPU) {
+ tsk->thread.uregs->epsw &= ~EPSW_FE;
+ tsk->thread.fpu_flags &= ~THREAD_HAS_FPU;
+ }
+#else /* !CONFIG_LAZY_SAVE_FPU */
if (fpu_state_owner == tsk) {
fpu_state_owner->thread.uregs->epsw &= ~EPSW_FE;
fpu_state_owner = NULL;
}
+#endif /* !CONFIG_LAZY_SAVE_FPU */
preempt_enable();
-#endif
+
/* we no longer have a valid current FPU state */
clear_using_fpu(tsk);
}
@@ -195,8 +167,7 @@ int fpu_restore_sigcontext(struct fpucontext *fpucontext)
int ret;
/* load up the old FPU state */
- ret = copy_from_user(&tsk->thread.fpu_state,
- fpucontext,
+ ret = copy_from_user(&tsk->thread.fpu_state, fpucontext,
min(sizeof(struct fpu_state_struct),
sizeof(struct fpucontext)));
if (!ret)
diff --git a/arch/mn10300/kernel/gdb-io-serial-low.S b/arch/mn10300/kernel/gdb-io-serial-low.S
index 4998b24f5d3a..b1d0152e96cb 100644
--- a/arch/mn10300/kernel/gdb-io-serial-low.S
+++ b/arch/mn10300/kernel/gdb-io-serial-low.S
@@ -18,6 +18,7 @@
#include <asm/thread_info.h>
#include <asm/frame.inc>
#include <asm/intctl-regs.h>
+#include <asm/irqflags.h>
#include <unit/serial.h>
.text
@@ -69,7 +70,7 @@ gdbstub_io_rx_overflow:
bra gdbstub_io_rx_done
gdbstub_io_rx_enter:
- or EPSW_IE|EPSW_IM_1,epsw
+ LOCAL_CHANGE_INTR_MASK_LEVEL(NUM2EPSW_IM(CONFIG_GDBSTUB_IRQ_LEVEL+1))
add -4,sp
SAVE_ALL
@@ -80,7 +81,7 @@ gdbstub_io_rx_enter:
mov fp,d0
call gdbstub_rx_irq[],0 # gdbstub_rx_irq(regs,excep)
- and ~EPSW_IE,epsw
+ LOCAL_CLI
bclr 0x01,(gdbstub_busy)
.globl gdbstub_return
diff --git a/arch/mn10300/kernel/gdb-io-serial.c b/arch/mn10300/kernel/gdb-io-serial.c
index ae663dc717e9..f28dc99c6f72 100644
--- a/arch/mn10300/kernel/gdb-io-serial.c
+++ b/arch/mn10300/kernel/gdb-io-serial.c
@@ -23,6 +23,7 @@
#include <asm/exceptions.h>
#include <asm/serial-regs.h>
#include <unit/serial.h>
+#include <asm/smp.h>
/*
* initialise the GDB stub
@@ -45,22 +46,35 @@ void gdbstub_io_init(void)
XIRQxICR(GDBPORT_SERIAL_IRQ) = 0;
tmp = XIRQxICR(GDBPORT_SERIAL_IRQ);
+#if CONFIG_GDBSTUB_IRQ_LEVEL == 0
IVAR0 = EXCEP_IRQ_LEVEL0;
- set_intr_stub(EXCEP_IRQ_LEVEL0, gdbstub_io_rx_handler);
+#elif CONFIG_GDBSTUB_IRQ_LEVEL == 1
+ IVAR1 = EXCEP_IRQ_LEVEL1;
+#elif CONFIG_GDBSTUB_IRQ_LEVEL == 2
+ IVAR2 = EXCEP_IRQ_LEVEL2;
+#elif CONFIG_GDBSTUB_IRQ_LEVEL == 3
+ IVAR3 = EXCEP_IRQ_LEVEL3;
+#elif CONFIG_GDBSTUB_IRQ_LEVEL == 4
+ IVAR4 = EXCEP_IRQ_LEVEL4;
+#elif CONFIG_GDBSTUB_IRQ_LEVEL == 5
+ IVAR5 = EXCEP_IRQ_LEVEL5;
+#else
+#error "Unknown irq level for gdbstub."
+#endif
+
+ set_intr_stub(NUM2EXCEP_IRQ_LEVEL(CONFIG_GDBSTUB_IRQ_LEVEL),
+ gdbstub_io_rx_handler);
XIRQxICR(GDBPORT_SERIAL_IRQ) &= ~GxICR_REQUEST;
- XIRQxICR(GDBPORT_SERIAL_IRQ) = GxICR_ENABLE | GxICR_LEVEL_0;
+ XIRQxICR(GDBPORT_SERIAL_IRQ) =
+ GxICR_ENABLE | NUM2GxICR_LEVEL(CONFIG_GDBSTUB_IRQ_LEVEL);
tmp = XIRQxICR(GDBPORT_SERIAL_IRQ);
GDBPORT_SERIAL_IER = UART_IER_RDI | UART_IER_RLSI;
/* permit level 0 IRQs to take place */
- asm volatile(
- " and %0,epsw \n"
- " or %1,epsw \n"
- :
- : "i"(~EPSW_IM), "i"(EPSW_IE | EPSW_IM_1)
- );
+ arch_local_change_intr_mask_level(
+ NUM2EPSW_IM(CONFIG_GDBSTUB_IRQ_LEVEL + 1));
}
/*
@@ -87,6 +101,9 @@ int gdbstub_io_rx_char(unsigned char *_ch, int nonblock)
{
unsigned ix;
u8 ch, st;
+#if defined(CONFIG_MN10300_WD_TIMER)
+ int cpu;
+#endif
*_ch = 0xff;
@@ -104,8 +121,9 @@ int gdbstub_io_rx_char(unsigned char *_ch, int nonblock)
if (nonblock)
return -EAGAIN;
#ifdef CONFIG_MN10300_WD_TIMER
- watchdog_alert_counter = 0;
-#endif /* CONFIG_MN10300_WD_TIMER */
+ for (cpu = 0; cpu < NR_CPUS; cpu++)
+ watchdog_alert_counter[cpu] = 0;
+#endif
goto try_again;
}
diff --git a/arch/mn10300/kernel/gdb-io-ttysm.c b/arch/mn10300/kernel/gdb-io-ttysm.c
index a560bbc3137d..abdeea153c89 100644
--- a/arch/mn10300/kernel/gdb-io-ttysm.c
+++ b/arch/mn10300/kernel/gdb-io-ttysm.c
@@ -58,9 +58,12 @@ void __init gdbstub_io_init(void)
gdbstub_io_set_baud(115200);
/* we want to get serial receive interrupts */
- set_intr_level(gdbstub_port->rx_irq, GxICR_LEVEL_0);
- set_intr_level(gdbstub_port->tx_irq, GxICR_LEVEL_0);
- set_intr_stub(EXCEP_IRQ_LEVEL0, gdbstub_io_rx_handler);
+ set_intr_level(gdbstub_port->rx_irq,
+ NUM2GxICR_LEVEL(CONFIG_GDBSTUB_IRQ_LEVEL));
+ set_intr_level(gdbstub_port->tx_irq,
+ NUM2GxICR_LEVEL(CONFIG_GDBSTUB_IRQ_LEVEL));
+ set_intr_stub(NUM2EXCEP_IRQ_LEVEL(CONFIG_GDBSTUB_IRQ_LEVEL),
+ gdbstub_io_rx_handler);
*gdbstub_port->rx_icr |= GxICR_ENABLE;
tmp = *gdbstub_port->rx_icr;
@@ -84,12 +87,8 @@ void __init gdbstub_io_init(void)
tmp = *gdbstub_port->_control;
/* permit level 0 IRQs only */
- asm volatile(
- " and %0,epsw \n"
- " or %1,epsw \n"
- :
- : "i"(~EPSW_IM), "i"(EPSW_IE|EPSW_IM_1)
- );
+ arch_local_change_intr_mask_level(
+ NUM2EPSW_IM(CONFIG_GDBSTUB_IRQ_LEVEL + 1));
}
/*
@@ -184,6 +183,9 @@ int gdbstub_io_rx_char(unsigned char *_ch, int nonblock)
{
unsigned ix;
u8 ch, st;
+#if defined(CONFIG_MN10300_WD_TIMER)
+ int cpu;
+#endif
*_ch = 0xff;
@@ -201,8 +203,9 @@ try_again:
if (nonblock)
return -EAGAIN;
#ifdef CONFIG_MN10300_WD_TIMER
- watchdog_alert_counter = 0;
-#endif /* CONFIG_MN10300_WD_TIMER */
+ for (cpu = 0; cpu < NR_CPUS; cpu++)
+ watchdog_alert_counter[cpu] = 0;
+#endif
goto try_again;
}
diff --git a/arch/mn10300/kernel/gdb-stub.c b/arch/mn10300/kernel/gdb-stub.c
index 41b11706c8ed..b169d99d9f20 100644
--- a/arch/mn10300/kernel/gdb-stub.c
+++ b/arch/mn10300/kernel/gdb-stub.c
@@ -440,15 +440,11 @@ static const unsigned char gdbstub_insn_sizes[256] =
static int __gdbstub_mark_bp(u8 *addr, int ix)
{
- if (addr < (u8 *) 0x70000000UL)
- return 0;
- /* 70000000-7fffffff: vmalloc area */
- if (addr < (u8 *) 0x80000000UL)
+ /* vmalloc area */
+ if (((u8 *) VMALLOC_START <= addr) && (addr < (u8 *) VMALLOC_END))
goto okay;
- if (addr < (u8 *) 0x8c000000UL)
- return 0;
- /* 8c000000-93ffffff: SRAM, SDRAM */
- if (addr < (u8 *) 0x94000000UL)
+ /* SRAM, SDRAM */
+ if (((u8 *) 0x80000000UL <= addr) && (addr < (u8 *) 0xa0000000UL))
goto okay;
return 0;
@@ -1197,9 +1193,9 @@ static int gdbstub(struct pt_regs *regs, enum exception_code excep)
mn10300_set_gdbleds(1);
asm volatile("mov mdr,%0" : "=d"(mdr));
- asm volatile("mov epsw,%0" : "=d"(epsw));
- asm volatile("mov %0,epsw"
- :: "d"((epsw & ~EPSW_IM) | EPSW_IE | EPSW_IM_1));
+ local_save_flags(epsw);
+ arch_local_change_intr_mask_level(
+ NUM2EPSW_IM(CONFIG_GDBSTUB_IRQ_LEVEL + 1));
gdbstub_store_fpu();
diff --git a/arch/mn10300/kernel/head.S b/arch/mn10300/kernel/head.S
index 14f27f3bfaf4..73e00fc78072 100644
--- a/arch/mn10300/kernel/head.S
+++ b/arch/mn10300/kernel/head.S
@@ -19,6 +19,12 @@
#include <asm/frame.inc>
#include <asm/param.h>
#include <unit/serial.h>
+#ifdef CONFIG_SMP
+#include <asm/smp.h>
+#include <asm/intctl-regs.h>
+#include <asm/cpu-regs.h>
+#include <proc/smp-regs.h>
+#endif /* CONFIG_SMP */
__HEAD
@@ -30,17 +36,51 @@
.globl _start
.type _start,@function
_start:
+#ifdef CONFIG_SMP
+ #
+ # If this is a secondary CPU (AP), then deal with that elsewhere
+ #
+ mov (CPUID),d3
+ and CPUID_MASK,d3
+ bne startup_secondary
+
+ #
+ # We're dealing with the primary CPU (BP) here, then.
+ # Keep BP's D0,D1,D2 register for boot check.
+ #
+
+ # Set up the Boot IPI for each secondary CPU
+ mov 0x1,a0
+loop_set_secondary_icr:
+ mov a0,a1
+ asl CROSS_ICR_CPU_SHIFT,a1
+ add CROSS_GxICR(SMP_BOOT_IRQ,0),a1
+ movhu (a1),d3
+ or GxICR_ENABLE|GxICR_LEVEL_0,d3
+ movhu d3,(a1)
+ movhu (a1),d3 # flush
+ inc a0
+ cmp NR_CPUS,a0
+ bne loop_set_secondary_icr
+#endif /* CONFIG_SMP */
+
# save commandline pointer
mov d0,a3
# preload the PGD pointer register
mov swapper_pg_dir,d0
mov d0,(PTBR)
+ clr d0
+ movbu d0,(PIDR)
# turn on the TLBs
mov MMUCTR_IIV|MMUCTR_DIV,d0
mov d0,(MMUCTR)
+#ifdef CONFIG_AM34_2
+ mov MMUCTR_ITE|MMUCTR_DTE|MMUCTR_CE|MMUCTR_WTE,d0
+#else
mov MMUCTR_ITE|MMUCTR_DTE|MMUCTR_CE,d0
+#endif
mov d0,(MMUCTR)
# turn on AM33v2 exception handling mode and set the trap table base
@@ -51,6 +91,11 @@ _start:
mov d0,(TBR)
# invalidate and enable both of the caches
+#ifdef CONFIG_SMP
+ mov ECHCTR,a0
+ clr d0
+ mov d0,(a0)
+#endif
mov CHCTR,a0
clr d0
movhu d0,(a0) # turn off first
@@ -61,18 +106,18 @@ _start:
btst CHCTR_ICBUSY|CHCTR_DCBUSY,d0 # wait till not busy
lne
-#ifndef CONFIG_MN10300_CACHE_DISABLED
+#ifdef CONFIG_MN10300_CACHE_ENABLED
#ifdef CONFIG_MN10300_CACHE_WBACK
#ifndef CONFIG_MN10300_CACHE_WBACK_NOWRALLOC
mov CHCTR_ICEN|CHCTR_DCEN|CHCTR_DCWTMD_WRBACK,d0
#else
mov CHCTR_ICEN|CHCTR_DCEN|CHCTR_DCWTMD_WRBACK|CHCTR_DCALMD,d0
-#endif /* CACHE_DISABLED */
+#endif /* NOWRALLOC */
#else
mov CHCTR_ICEN|CHCTR_DCEN|CHCTR_DCWTMD_WRTHROUGH,d0
#endif /* WBACK */
movhu d0,(a0) # enable
-#endif /* NOWRALLOC */
+#endif /* ENABLED */
# turn on RTS on the debug serial port if applicable
#ifdef CONFIG_MN10300_UNIT_ASB2305
@@ -206,6 +251,44 @@ __no_parameters:
call processor_init[],0
call unit_init[],0
+#ifdef CONFIG_SMP
+ # mark the primary CPU in cpu_boot_map
+ mov cpu_boot_map,a0
+ mov 0x1,d0
+ mov d0,(a0)
+
+ # signal each secondary CPU to begin booting
+ mov 0x1,d2 # CPU ID
+
+loop_request_boot_secondary:
+ mov d2,a0
+ # send SMP_BOOT_IPI to secondary CPU
+ asl CROSS_ICR_CPU_SHIFT,a0
+ add CROSS_GxICR(SMP_BOOT_IRQ,0),a0
+ movhu (a0),d0
+ or GxICR_REQUEST|GxICR_DETECT,d0
+ movhu d0,(a0)
+ movhu (a0),d0 # flush
+
+ # wait up to 100ms for AP's IPI to be received
+ clr d3
+wait_on_secondary_boot:
+ mov DELAY_TIME_BOOT_IPI,d0
+ call __delay[],0
+ inc d3
+ mov cpu_boot_map,a0
+ mov (a0),d0
+ lsr d2,d0
+ btst 0x1,d0
+ bne 1f
+ cmp TIME_OUT_COUNT_BOOT_IPI,d3
+ bne wait_on_secondary_boot
+1:
+ inc d2
+ cmp NR_CPUS,d2
+ bne loop_request_boot_secondary
+#endif /* CONFIG_SMP */
+
#ifdef CONFIG_GDBSTUB
call gdbstub_init[],0
@@ -217,7 +300,118 @@ __gdbstub_pause:
#endif
jmp start_kernel
- .size _start, _start-.
+ .size _start,.-_start
+
+###############################################################################
+#
+# Secondary CPU boot point
+#
+###############################################################################
+#ifdef CONFIG_SMP
+startup_secondary:
+ # preload the PGD pointer register
+ mov swapper_pg_dir,d0
+ mov d0,(PTBR)
+ clr d0
+ movbu d0,(PIDR)
+
+ # turn on the TLBs
+ mov MMUCTR_IIV|MMUCTR_DIV,d0
+ mov d0,(MMUCTR)
+#ifdef CONFIG_AM34_2
+ mov MMUCTR_ITE|MMUCTR_DTE|MMUCTR_CE|MMUCTR_WTE,d0
+#else
+ mov MMUCTR_ITE|MMUCTR_DTE|MMUCTR_CE,d0
+#endif
+ mov d0,(MMUCTR)
+
+ # turn on AM33v2 exception handling mode and set the trap table base
+ movhu (CPUP),d0
+ or CPUP_EXM_AM33V2,d0
+ movhu d0,(CPUP)
+
+ # set the interrupt vector table
+ mov CONFIG_INTERRUPT_VECTOR_BASE,d0
+ mov d0,(TBR)
+
+ # invalidate and enable both of the caches
+ mov ECHCTR,a0
+ clr d0
+ mov d0,(a0)
+ mov CHCTR,a0
+ clr d0
+ movhu d0,(a0) # turn off first
+ mov CHCTR_ICINV|CHCTR_DCINV,d0
+ movhu d0,(a0)
+ setlb
+ mov (a0),d0
+ btst CHCTR_ICBUSY|CHCTR_DCBUSY,d0 # wait till not busy (use CPU loop buffer)
+ lne
+
+#ifdef CONFIG_MN10300_CACHE_ENABLED
+#ifdef CONFIG_MN10300_CACHE_WBACK
+#ifndef CONFIG_MN10300_CACHE_WBACK_NOWRALLOC
+ mov CHCTR_ICEN|CHCTR_DCEN|CHCTR_DCWTMD_WRBACK,d0
+#else
+ mov CHCTR_ICEN|CHCTR_DCEN|CHCTR_DCWTMD_WRBACK|CHCTR_DCALMD,d0
+#endif /* !NOWRALLOC */
+#else
+ mov CHCTR_ICEN|CHCTR_DCEN|CHCTR_DCWTMD_WRTHROUGH,d0
+#endif /* WBACK */
+ movhu d0,(a0) # enable
+#endif /* ENABLED */
+
+ # Clear the boot IPI interrupt for this CPU
+ movhu (GxICR(SMP_BOOT_IRQ)),d0
+ and ~GxICR_REQUEST,d0
+ movhu d0,(GxICR(SMP_BOOT_IRQ))
+ movhu (GxICR(SMP_BOOT_IRQ)),d0 # flush
+
+ /* get stack */
+ mov CONFIG_INTERRUPT_VECTOR_BASE + CONFIG_BOOT_STACK_OFFSET,a0
+ mov (CPUID),d0
+ and CPUID_MASK,d0
+ mulu CONFIG_BOOT_STACK_SIZE,d0
+ sub d0,a0
+ mov a0,sp
+
+ # init interrupt for AP
+ call smp_prepare_cpu_init[],0
+
+ # mark this secondary CPU in cpu_boot_map
+ mov (CPUID),d0
+ mov 0x1,d1
+ asl d0,d1
+ mov cpu_boot_map,a0
+ bset d1,(a0)
+
+ or EPSW_IE|EPSW_IM_1,epsw # permit level 0 interrupts
+ nop
+ nop
+#ifdef CONFIG_MN10300_CACHE_WBACK
+ # flush the local cache if it's in writeback mode
+ call mn10300_local_dcache_flush_inv[],0
+ setlb
+ mov (CHCTR),d0
+ btst CHCTR_DCBUSY,d0 # wait till not busy (use CPU loop buffer)
+ lne
+#endif
+
+ # now sleep waiting for further instructions
+secondary_sleep:
+ mov CPUM_SLEEP,d0
+ movhu d0,(CPUM)
+ nop
+ nop
+ bra secondary_sleep
+ .size startup_secondary,.-startup_secondary
+#endif /* CONFIG_SMP */
+
+###############################################################################
+#
+#
+#
+###############################################################################
ENTRY(__head_end)
/*
diff --git a/arch/mn10300/kernel/internal.h b/arch/mn10300/kernel/internal.h
index eee2eee86267..6a064ab5af07 100644
--- a/arch/mn10300/kernel/internal.h
+++ b/arch/mn10300/kernel/internal.h
@@ -9,6 +9,9 @@
* 2 of the Licence, or (at your option) any later version.
*/
+struct clocksource;
+struct clock_event_device;
+
/*
* kthread.S
*/
@@ -18,3 +21,25 @@ extern int kernel_thread_helper(int);
* entry.S
*/
extern void ret_from_fork(struct task_struct *) __attribute__((noreturn));
+
+/*
+ * smp-low.S
+ */
+#ifdef CONFIG_SMP
+extern void mn10300_low_ipi_handler(void);
+#endif
+
+/*
+ * time.c
+ */
+extern irqreturn_t local_timer_interrupt(void);
+
+/*
+ * time.c
+ */
+#ifdef CONFIG_CEVT_MN10300
+extern void clockevent_set_clock(struct clock_event_device *, unsigned int);
+#endif
+#ifdef CONFIG_CSRC_MN10300
+extern void clocksource_set_clock(struct clocksource *, unsigned int);
+#endif
diff --git a/arch/mn10300/kernel/irq.c b/arch/mn10300/kernel/irq.c
index e2d5ed891f37..c2e44597c22b 100644
--- a/arch/mn10300/kernel/irq.c
+++ b/arch/mn10300/kernel/irq.c
@@ -12,11 +12,26 @@
#include <linux/interrupt.h>
#include <linux/kernel_stat.h>
#include <linux/seq_file.h>
+#include <linux/cpumask.h>
#include <asm/setup.h>
+#include <asm/serial-regs.h>
-unsigned long __mn10300_irq_enabled_epsw = EPSW_IE | EPSW_IM_7;
+unsigned long __mn10300_irq_enabled_epsw[NR_CPUS] __cacheline_aligned_in_smp = {
+ [0 ... NR_CPUS - 1] = EPSW_IE | EPSW_IM_7
+};
EXPORT_SYMBOL(__mn10300_irq_enabled_epsw);
+#ifdef CONFIG_SMP
+static char irq_affinity_online[NR_IRQS] = {
+ [0 ... NR_IRQS - 1] = 0
+};
+
+#define NR_IRQ_WORDS ((NR_IRQS + 31) / 32)
+static unsigned long irq_affinity_request[NR_IRQ_WORDS] = {
+ [0 ... NR_IRQ_WORDS - 1] = 0
+};
+#endif /* CONFIG_SMP */
+
atomic_t irq_err_count;
/*
@@ -24,30 +39,67 @@ atomic_t irq_err_count;
*/
static void mn10300_cpupic_ack(unsigned int irq)
{
+ unsigned long flags;
u16 tmp;
- *(volatile u8 *) &GxICR(irq) = GxICR_DETECT;
+
+ flags = arch_local_cli_save();
+ GxICR_u8(irq) = GxICR_DETECT;
tmp = GxICR(irq);
+ arch_local_irq_restore(flags);
}
-static void mn10300_cpupic_mask(unsigned int irq)
+static void __mask_and_set_icr(unsigned int irq,
+ unsigned int mask, unsigned int set)
{
- u16 tmp = GxICR(irq);
- GxICR(irq) = (tmp & GxICR_LEVEL);
+ unsigned long flags;
+ u16 tmp;
+
+ flags = arch_local_cli_save();
+ tmp = GxICR(irq);
+ GxICR(irq) = (tmp & mask) | set;
tmp = GxICR(irq);
+ arch_local_irq_restore(flags);
+}
+
+static void mn10300_cpupic_mask(unsigned int irq)
+{
+ __mask_and_set_icr(irq, GxICR_LEVEL, 0);
}
static void mn10300_cpupic_mask_ack(unsigned int irq)
{
- u16 tmp = GxICR(irq);
- GxICR(irq) = (tmp & GxICR_LEVEL) | GxICR_DETECT;
- tmp = GxICR(irq);
+#ifdef CONFIG_SMP
+ unsigned long flags;
+ u16 tmp;
+
+ flags = arch_local_cli_save();
+
+ if (!test_and_clear_bit(irq, irq_affinity_request)) {
+ tmp = GxICR(irq);
+ GxICR(irq) = (tmp & GxICR_LEVEL) | GxICR_DETECT;
+ tmp = GxICR(irq);
+ } else {
+ u16 tmp2;
+ tmp = GxICR(irq);
+ GxICR(irq) = (tmp & GxICR_LEVEL);
+ tmp2 = GxICR(irq);
+
+ irq_affinity_online[irq] =
+ any_online_cpu(*irq_desc[irq].affinity);
+ CROSS_GxICR(irq, irq_affinity_online[irq]) =
+ (tmp & (GxICR_LEVEL | GxICR_ENABLE)) | GxICR_DETECT;
+ tmp = CROSS_GxICR(irq, irq_affinity_online[irq]);
+ }
+
+ arch_local_irq_restore(flags);
+#else /* CONFIG_SMP */
+ __mask_and_set_icr(irq, GxICR_LEVEL, GxICR_DETECT);
+#endif /* CONFIG_SMP */
}
static void mn10300_cpupic_unmask(unsigned int irq)
{
- u16 tmp = GxICR(irq);
- GxICR(irq) = (tmp & GxICR_LEVEL) | GxICR_ENABLE;
- tmp = GxICR(irq);
+ __mask_and_set_icr(irq, GxICR_LEVEL, GxICR_ENABLE);
}
static void mn10300_cpupic_unmask_clear(unsigned int irq)
@@ -56,11 +108,89 @@ static void mn10300_cpupic_unmask_clear(unsigned int irq)
* device has ceased to assert its interrupt line and the interrupt
* channel has been disabled in the PIC, so for level-triggered
* interrupts we need to clear the request bit when we re-enable */
- u16 tmp = GxICR(irq);
- GxICR(irq) = (tmp & GxICR_LEVEL) | GxICR_ENABLE | GxICR_DETECT;
- tmp = GxICR(irq);
+#ifdef CONFIG_SMP
+ unsigned long flags;
+ u16 tmp;
+
+ flags = arch_local_cli_save();
+
+ if (!test_and_clear_bit(irq, irq_affinity_request)) {
+ tmp = GxICR(irq);
+ GxICR(irq) = (tmp & GxICR_LEVEL) | GxICR_ENABLE | GxICR_DETECT;
+ tmp = GxICR(irq);
+ } else {
+ tmp = GxICR(irq);
+
+ irq_affinity_online[irq] = any_online_cpu(*irq_desc[irq].affinity);
+ CROSS_GxICR(irq, irq_affinity_online[irq]) = (tmp & GxICR_LEVEL) | GxICR_ENABLE | GxICR_DETECT;
+ tmp = CROSS_GxICR(irq, irq_affinity_online[irq]);
+ }
+
+ arch_local_irq_restore(flags);
+#else /* CONFIG_SMP */
+ __mask_and_set_icr(irq, GxICR_LEVEL, GxICR_ENABLE | GxICR_DETECT);
+#endif /* CONFIG_SMP */
}
+#ifdef CONFIG_SMP
+static int
+mn10300_cpupic_setaffinity(unsigned int irq, const struct cpumask *mask)
+{
+ unsigned long flags;
+ int err;
+
+ flags = arch_local_cli_save();
+
+ /* check irq no */
+ switch (irq) {
+ case TMJCIRQ:
+ case RESCHEDULE_IPI:
+ case CALL_FUNC_SINGLE_IPI:
+ case LOCAL_TIMER_IPI:
+ case FLUSH_CACHE_IPI:
+ case CALL_FUNCTION_NMI_IPI:
+ case GDB_NMI_IPI:
+#ifdef CONFIG_MN10300_TTYSM0
+ case SC0RXIRQ:
+ case SC0TXIRQ:
+#ifdef CONFIG_MN10300_TTYSM0_TIMER8
+ case TM8IRQ:
+#elif CONFIG_MN10300_TTYSM0_TIMER2
+ case TM2IRQ:
+#endif /* CONFIG_MN10300_TTYSM0_TIMER8 */
+#endif /* CONFIG_MN10300_TTYSM0 */
+
+#ifdef CONFIG_MN10300_TTYSM1
+ case SC1RXIRQ:
+ case SC1TXIRQ:
+#ifdef CONFIG_MN10300_TTYSM1_TIMER12
+ case TM12IRQ:
+#elif CONFIG_MN10300_TTYSM1_TIMER9
+ case TM9IRQ:
+#elif CONFIG_MN10300_TTYSM1_TIMER3
+ case TM3IRQ:
+#endif /* CONFIG_MN10300_TTYSM1_TIMER12 */
+#endif /* CONFIG_MN10300_TTYSM1 */
+
+#ifdef CONFIG_MN10300_TTYSM2
+ case SC2RXIRQ:
+ case SC2TXIRQ:
+ case TM10IRQ:
+#endif /* CONFIG_MN10300_TTYSM2 */
+ err = -1;
+ break;
+
+ default:
+ set_bit(irq, irq_affinity_request);
+ err = 0;
+ break;
+ }
+
+ arch_local_irq_restore(flags);
+ return err;
+}
+#endif /* CONFIG_SMP */
+
/*
* MN10300 PIC level-triggered IRQ handling.
*
@@ -79,6 +209,9 @@ static struct irq_chip mn10300_cpu_pic_level = {
.mask = mn10300_cpupic_mask,
.mask_ack = mn10300_cpupic_mask,
.unmask = mn10300_cpupic_unmask_clear,
+#ifdef CONFIG_SMP
+ .set_affinity = mn10300_cpupic_setaffinity,
+#endif
};
/*
@@ -94,6 +227,9 @@ static struct irq_chip mn10300_cpu_pic_edge = {
.mask = mn10300_cpupic_mask,
.mask_ack = mn10300_cpupic_mask_ack,
.unmask = mn10300_cpupic_unmask,
+#ifdef CONFIG_SMP
+ .set_affinity = mn10300_cpupic_setaffinity,
+#endif
};
/*
@@ -111,14 +247,34 @@ void ack_bad_irq(int irq)
*/
void set_intr_level(int irq, u16 level)
{
- u16 tmp;
+ BUG_ON(in_interrupt());
- if (in_interrupt())
- BUG();
+ __mask_and_set_icr(irq, GxICR_ENABLE, level);
+}
- tmp = GxICR(irq);
- GxICR(irq) = (tmp & GxICR_ENABLE) | level;
- tmp = GxICR(irq);
+void mn10300_intc_set_level(unsigned int irq, unsigned int level)
+{
+ set_intr_level(irq, NUM2GxICR_LEVEL(level) & GxICR_LEVEL);
+}
+
+void mn10300_intc_clear(unsigned int irq)
+{
+ __mask_and_set_icr(irq, GxICR_LEVEL | GxICR_ENABLE, GxICR_DETECT);
+}
+
+void mn10300_intc_set(unsigned int irq)
+{
+ __mask_and_set_icr(irq, 0, GxICR_REQUEST | GxICR_DETECT);
+}
+
+void mn10300_intc_enable(unsigned int irq)
+{
+ mn10300_cpupic_unmask(irq);
+}
+
+void mn10300_intc_disable(unsigned int irq)
+{
+ mn10300_cpupic_mask(irq);
}
/*
@@ -126,7 +282,7 @@ void set_intr_level(int irq, u16 level)
* than before
* - see Documentation/mn10300/features.txt
*/
-void set_intr_postackable(int irq)
+void mn10300_set_lateack_irq_type(int irq)
{
set_irq_chip_and_handler(irq, &mn10300_cpu_pic_level,
handle_level_irq);
@@ -147,6 +303,7 @@ void __init init_IRQ(void)
* interrupts */
set_irq_chip_and_handler(irq, &mn10300_cpu_pic_edge,
handle_level_irq);
+
unit_init_IRQ();
}
@@ -156,20 +313,22 @@ void __init init_IRQ(void)
asmlinkage void do_IRQ(void)
{
unsigned long sp, epsw, irq_disabled_epsw, old_irq_enabled_epsw;
+ unsigned int cpu_id = smp_processor_id();
int irq;
sp = current_stack_pointer();
- if (sp - (sp & ~(THREAD_SIZE - 1)) < STACK_WARN)
- BUG();
+ BUG_ON(sp - (sp & ~(THREAD_SIZE - 1)) < STACK_WARN);
/* make sure local_irq_enable() doesn't muck up the interrupt priority
* setting in EPSW */
- old_irq_enabled_epsw = __mn10300_irq_enabled_epsw;
+ old_irq_enabled_epsw = __mn10300_irq_enabled_epsw[cpu_id];
local_save_flags(epsw);
- __mn10300_irq_enabled_epsw = EPSW_IE | (EPSW_IM & epsw);
+ __mn10300_irq_enabled_epsw[cpu_id] = EPSW_IE | (EPSW_IM & epsw);
irq_disabled_epsw = EPSW_IE | MN10300_CLI_LEVEL;
- __IRQ_STAT(smp_processor_id(), __irq_count)++;
+#ifdef CONFIG_MN10300_WD_TIMER
+ __IRQ_STAT(cpu_id, __irq_count)++;
+#endif
irq_enter();
@@ -189,7 +348,7 @@ asmlinkage void do_IRQ(void)
local_irq_restore(epsw);
}
- __mn10300_irq_enabled_epsw = old_irq_enabled_epsw;
+ __mn10300_irq_enabled_epsw[cpu_id] = old_irq_enabled_epsw;
irq_exit();
}
@@ -222,9 +381,16 @@ int show_interrupts(struct seq_file *p, void *v)
seq_printf(p, "%3d: ", i);
for_each_present_cpu(cpu)
seq_printf(p, "%10u ", kstat_irqs_cpu(i, cpu));
- seq_printf(p, " %14s.%u", irq_desc[i].chip->name,
- (GxICR(i) & GxICR_LEVEL) >>
- GxICR_LEVEL_SHIFT);
+
+ if (i < NR_CPU_IRQS)
+ seq_printf(p, " %14s.%u",
+ irq_desc[i].chip->name,
+ (GxICR(i) & GxICR_LEVEL) >>
+ GxICR_LEVEL_SHIFT);
+ else
+ seq_printf(p, " %14s",
+ irq_desc[i].chip->name);
+
seq_printf(p, " %s", action->name);
for (action = action->next;
@@ -240,11 +406,13 @@ int show_interrupts(struct seq_file *p, void *v)
/* polish off with NMI and error counters */
case NR_IRQS:
+#ifdef CONFIG_MN10300_WD_TIMER
seq_printf(p, "NMI: ");
for (j = 0; j < NR_CPUS; j++)
if (cpu_online(j))
seq_printf(p, "%10u ", nmi_count(j));
seq_putc(p, '\n');
+#endif
seq_printf(p, "ERR: %10u\n", atomic_read(&irq_err_count));
break;
@@ -252,3 +420,51 @@ int show_interrupts(struct seq_file *p, void *v)
return 0;
}
+
+#ifdef CONFIG_HOTPLUG_CPU
+void migrate_irqs(void)
+{
+ irq_desc_t *desc;
+ int irq;
+ unsigned int self, new;
+ unsigned long flags;
+
+ self = smp_processor_id();
+ for (irq = 0; irq < NR_IRQS; irq++) {
+ desc = irq_desc + irq;
+
+ if (desc->status == IRQ_PER_CPU)
+ continue;
+
+ if (cpu_isset(self, irq_desc[irq].affinity) &&
+ !cpus_intersects(irq_affinity[irq], cpu_online_map)) {
+ int cpu_id;
+ cpu_id = first_cpu(cpu_online_map);
+ cpu_set(cpu_id, irq_desc[irq].affinity);
+ }
+ /* We need to operate irq_affinity_online atomically. */
+ arch_local_cli_save(flags);
+ if (irq_affinity_online[irq] == self) {
+ u16 x, tmp;
+
+ x = GxICR(irq);
+ GxICR(irq) = x & GxICR_LEVEL;
+ tmp = GxICR(irq);
+
+ new = any_online_cpu(irq_desc[irq].affinity);
+ irq_affinity_online[irq] = new;
+
+ CROSS_GxICR(irq, new) =
+ (x & GxICR_LEVEL) | GxICR_DETECT;
+ tmp = CROSS_GxICR(irq, new);
+
+ x &= GxICR_LEVEL | GxICR_ENABLE;
+ if (GxICR(irq) & GxICR_REQUEST) {
+ x |= GxICR_REQUEST | GxICR_DETECT;
+ CROSS_GxICR(irq, new) = x;
+ tmp = CROSS_GxICR(irq, new);
+ }
+ arch_local_irq_restore(flags);
+ }
+}
+#endif /* CONFIG_HOTPLUG_CPU */
diff --git a/arch/mn10300/kernel/kprobes.c b/arch/mn10300/kernel/kprobes.c
index 67e6389d625a..0311a7fcea16 100644
--- a/arch/mn10300/kernel/kprobes.c
+++ b/arch/mn10300/kernel/kprobes.c
@@ -377,8 +377,10 @@ void __kprobes arch_arm_kprobe(struct kprobe *p)
void __kprobes arch_disarm_kprobe(struct kprobe *p)
{
+#ifndef CONFIG_MN10300_CACHE_SNOOP
mn10300_dcache_flush();
mn10300_icache_inv();
+#endif
}
void arch_remove_kprobe(struct kprobe *p)
@@ -390,8 +392,10 @@ void __kprobes disarm_kprobe(struct kprobe *p, struct pt_regs *regs)
{
*p->addr = p->opcode;
regs->pc = (unsigned long) p->addr;
+#ifndef CONFIG_MN10300_CACHE_SNOOP
mn10300_dcache_flush();
mn10300_icache_inv();
+#endif
}
static inline
diff --git a/arch/mn10300/kernel/mn10300-serial-low.S b/arch/mn10300/kernel/mn10300-serial-low.S
index 66702d256610..dfc1b6f2fa9a 100644
--- a/arch/mn10300/kernel/mn10300-serial-low.S
+++ b/arch/mn10300/kernel/mn10300-serial-low.S
@@ -39,7 +39,7 @@
###############################################################################
.balign L1_CACHE_BYTES
ENTRY(mn10300_serial_vdma_interrupt)
- or EPSW_IE,psw # permit overriding by
+# or EPSW_IE,psw # permit overriding by
# debugging interrupts
movm [d2,d3,a2,a3,exreg0],(sp)
@@ -164,7 +164,7 @@ mnsc_vdma_tx_noint:
rti
mnsc_vdma_tx_empty:
- mov +(GxICR_LEVEL_1|GxICR_DETECT),d2
+ mov +(NUM2GxICR_LEVEL(CONFIG_MN10300_SERIAL_IRQ_LEVEL)|GxICR_DETECT),d2
movhu d2,(e3) # disable the interrupt
movhu (e3),d2 # flush
@@ -175,7 +175,7 @@ mnsc_vdma_tx_break:
movhu (SCxCTR,e2),d2 # turn on break mode
or SC01CTR_BKE,d2
movhu d2,(SCxCTR,e2)
- mov +(GxICR_LEVEL_1|GxICR_DETECT),d2
+ mov +(NUM2GxICR_LEVEL(CONFIG_MN10300_SERIAL_IRQ_LEVEL)|GxICR_DETECT),d2
movhu d2,(e3) # disable transmit interrupts on this
# channel
movhu (e3),d2 # flush
diff --git a/arch/mn10300/kernel/mn10300-serial.c b/arch/mn10300/kernel/mn10300-serial.c
index db509dd80565..996384dba45d 100644
--- a/arch/mn10300/kernel/mn10300-serial.c
+++ b/arch/mn10300/kernel/mn10300-serial.c
@@ -44,6 +44,11 @@ static const char serial_revdate[] = "2007-11-06";
#include <unit/timex.h>
#include "mn10300-serial.h"
+#ifdef CONFIG_SMP
+#undef GxICR
+#define GxICR(X) CROSS_GxICR(X, 0)
+#endif /* CONFIG_SMP */
+
#define kenter(FMT, ...) \
printk(KERN_DEBUG "-->%s(" FMT ")\n", __func__, ##__VA_ARGS__)
#define _enter(FMT, ...) \
@@ -57,6 +62,11 @@ static const char serial_revdate[] = "2007-11-06";
#define _proto(FMT, ...) \
no_printk(KERN_DEBUG "### MNSERIAL " FMT " ###\n", ##__VA_ARGS__)
+#ifndef CODMSB
+/* c_cflag bit meaning */
+#define CODMSB 004000000000 /* change Transfer bit-order */
+#endif
+
#define NR_UARTS 3
#ifdef CONFIG_MN10300_TTYSM_CONSOLE
@@ -152,26 +162,35 @@ struct mn10300_serial_port mn10300_serial_port_sif0 = {
.name = "ttySM0",
._iobase = &SC0CTR,
._control = &SC0CTR,
- ._status = (volatile u8 *) &SC0STR,
+ ._status = (volatile u8 *)&SC0STR,
._intr = &SC0ICR,
._rxb = &SC0RXB,
._txb = &SC0TXB,
.rx_name = "ttySM0:Rx",
.tx_name = "ttySM0:Tx",
-#ifdef CONFIG_MN10300_TTYSM0_TIMER8
+#if defined(CONFIG_MN10300_TTYSM0_TIMER8)
.tm_name = "ttySM0:Timer8",
._tmxmd = &TM8MD,
._tmxbr = &TM8BR,
._tmicr = &TM8ICR,
.tm_irq = TM8IRQ,
.div_timer = MNSCx_DIV_TIMER_16BIT,
-#else /* CONFIG_MN10300_TTYSM0_TIMER2 */
+#elif defined(CONFIG_MN10300_TTYSM0_TIMER0)
+ .tm_name = "ttySM0:Timer0",
+ ._tmxmd = &TM0MD,
+ ._tmxbr = (volatile u16 *)&TM0BR,
+ ._tmicr = &TM0ICR,
+ .tm_irq = TM0IRQ,
+ .div_timer = MNSCx_DIV_TIMER_8BIT,
+#elif defined(CONFIG_MN10300_TTYSM0_TIMER2)
.tm_name = "ttySM0:Timer2",
._tmxmd = &TM2MD,
- ._tmxbr = (volatile u16 *) &TM2BR,
+ ._tmxbr = (volatile u16 *)&TM2BR,
._tmicr = &TM2ICR,
.tm_irq = TM2IRQ,
.div_timer = MNSCx_DIV_TIMER_8BIT,
+#else
+#error "Unknown config for ttySM0"
#endif
.rx_irq = SC0RXIRQ,
.tx_irq = SC0TXIRQ,
@@ -205,26 +224,35 @@ struct mn10300_serial_port mn10300_serial_port_sif1 = {
.name = "ttySM1",
._iobase = &SC1CTR,
._control = &SC1CTR,
- ._status = (volatile u8 *) &SC1STR,
+ ._status = (volatile u8 *)&SC1STR,
._intr = &SC1ICR,
._rxb = &SC1RXB,
._txb = &SC1TXB,
.rx_name = "ttySM1:Rx",
.tx_name = "ttySM1:Tx",
-#ifdef CONFIG_MN10300_TTYSM1_TIMER9
+#if defined(CONFIG_MN10300_TTYSM1_TIMER9)
.tm_name = "ttySM1:Timer9",
._tmxmd = &TM9MD,
._tmxbr = &TM9BR,
._tmicr = &TM9ICR,
.tm_irq = TM9IRQ,
.div_timer = MNSCx_DIV_TIMER_16BIT,
-#else /* CONFIG_MN10300_TTYSM1_TIMER3 */
+#elif defined(CONFIG_MN10300_TTYSM1_TIMER3)
.tm_name = "ttySM1:Timer3",
._tmxmd = &TM3MD,
- ._tmxbr = (volatile u16 *) &TM3BR,
+ ._tmxbr = (volatile u16 *)&TM3BR,
._tmicr = &TM3ICR,
.tm_irq = TM3IRQ,
.div_timer = MNSCx_DIV_TIMER_8BIT,
+#elif defined(CONFIG_MN10300_TTYSM1_TIMER12)
+ .tm_name = "ttySM1/Timer12",
+ ._tmxmd = &TM12MD,
+ ._tmxbr = &TM12BR,
+ ._tmicr = &TM12ICR,
+ .tm_irq = TM12IRQ,
+ .div_timer = MNSCx_DIV_TIMER_16BIT,
+#else
+#error "Unknown config for ttySM1"
#endif
.rx_irq = SC1RXIRQ,
.tx_irq = SC1TXIRQ,
@@ -260,20 +288,45 @@ struct mn10300_serial_port mn10300_serial_port_sif2 = {
.uart.lock =
__SPIN_LOCK_UNLOCKED(mn10300_serial_port_sif2.uart.lock),
.name = "ttySM2",
- .rx_name = "ttySM2:Rx",
- .tx_name = "ttySM2:Tx",
- .tm_name = "ttySM2:Timer10",
._iobase = &SC2CTR,
._control = &SC2CTR,
- ._status = &SC2STR,
+ ._status = (volatile u8 *)&SC2STR,
._intr = &SC2ICR,
._rxb = &SC2RXB,
._txb = &SC2TXB,
+ .rx_name = "ttySM2:Rx",
+ .tx_name = "ttySM2:Tx",
+#if defined(CONFIG_MN10300_TTYSM2_TIMER10)
+ .tm_name = "ttySM2/Timer10",
._tmxmd = &TM10MD,
._tmxbr = &TM10BR,
._tmicr = &TM10ICR,
.tm_irq = TM10IRQ,
.div_timer = MNSCx_DIV_TIMER_16BIT,
+#elif defined(CONFIG_MN10300_TTYSM2_TIMER9)
+ .tm_name = "ttySM2/Timer9",
+ ._tmxmd = &TM9MD,
+ ._tmxbr = &TM9BR,
+ ._tmicr = &TM9ICR,
+ .tm_irq = TM9IRQ,
+ .div_timer = MNSCx_DIV_TIMER_16BIT,
+#elif defined(CONFIG_MN10300_TTYSM2_TIMER1)
+ .tm_name = "ttySM2/Timer1",
+ ._tmxmd = &TM1MD,
+ ._tmxbr = (volatile u16 *)&TM1BR,
+ ._tmicr = &TM1ICR,
+ .tm_irq = TM1IRQ,
+ .div_timer = MNSCx_DIV_TIMER_8BIT,
+#elif defined(CONFIG_MN10300_TTYSM2_TIMER3)
+ .tm_name = "ttySM2/Timer3",
+ ._tmxmd = &TM3MD,
+ ._tmxbr = (volatile u16 *)&TM3BR,
+ ._tmicr = &TM3ICR,
+ .tm_irq = TM3IRQ,
+ .div_timer = MNSCx_DIV_TIMER_8BIT,
+#else
+#error "Unknown config for ttySM2"
+#endif
.rx_irq = SC2RXIRQ,
.tx_irq = SC2TXIRQ,
.rx_icr = &GxICR(SC2RXIRQ),
@@ -322,9 +375,13 @@ struct mn10300_serial_port *mn10300_serial_ports[NR_UARTS + 1] = {
*/
static void mn10300_serial_mask_ack(unsigned int irq)
{
+ unsigned long flags;
u16 tmp;
+
+ flags = arch_local_cli_save();
GxICR(irq) = GxICR_LEVEL_6;
tmp = GxICR(irq); /* flush write buffer */
+ arch_local_irq_restore(flags);
}
static void mn10300_serial_nop(unsigned int irq)
@@ -348,23 +405,36 @@ struct mn10300_serial_int mn10300_serial_int_tbl[NR_IRQS];
static void mn10300_serial_dis_tx_intr(struct mn10300_serial_port *port)
{
+ unsigned long flags;
u16 x;
- *port->tx_icr = GxICR_LEVEL_1 | GxICR_DETECT;
+
+ flags = arch_local_cli_save();
+ *port->tx_icr = NUM2GxICR_LEVEL(CONFIG_MN10300_SERIAL_IRQ_LEVEL);
x = *port->tx_icr;
+ arch_local_irq_restore(flags);
}
static void mn10300_serial_en_tx_intr(struct mn10300_serial_port *port)
{
+ unsigned long flags;
u16 x;
- *port->tx_icr = GxICR_LEVEL_1 | GxICR_ENABLE;
+
+ flags = arch_local_cli_save();
+ *port->tx_icr =
+ NUM2GxICR_LEVEL(CONFIG_MN10300_SERIAL_IRQ_LEVEL) | GxICR_ENABLE;
x = *port->tx_icr;
+ arch_local_irq_restore(flags);
}
static void mn10300_serial_dis_rx_intr(struct mn10300_serial_port *port)
{
+ unsigned long flags;
u16 x;
- *port->rx_icr = GxICR_LEVEL_1 | GxICR_DETECT;
+
+ flags = arch_local_cli_save();
+ *port->rx_icr = NUM2GxICR_LEVEL(CONFIG_MN10300_SERIAL_IRQ_LEVEL);
x = *port->rx_icr;
+ arch_local_irq_restore(flags);
}
/*
@@ -650,7 +720,7 @@ static unsigned int mn10300_serial_tx_empty(struct uart_port *_port)
static void mn10300_serial_set_mctrl(struct uart_port *_port,
unsigned int mctrl)
{
- struct mn10300_serial_port *port =
+ struct mn10300_serial_port *port __attribute__ ((unused)) =
container_of(_port, struct mn10300_serial_port, uart);
_enter("%s,%x", port->name, mctrl);
@@ -706,6 +776,7 @@ static void mn10300_serial_start_tx(struct uart_port *_port)
UART_XMIT_SIZE));
/* kick the virtual DMA controller */
+ arch_local_cli();
x = *port->tx_icr;
x |= GxICR_ENABLE;
@@ -716,10 +787,14 @@ static void mn10300_serial_start_tx(struct uart_port *_port)
_debug("CTR=%04hx ICR=%02hx STR=%04x TMD=%02hx TBR=%04hx ICR=%04hx",
*port->_control, *port->_intr, *port->_status,
- *port->_tmxmd, *port->_tmxbr, *port->tx_icr);
+ *port->_tmxmd,
+ (port->div_timer == MNSCx_DIV_TIMER_8BIT) ?
+ *(volatile u8 *)port->_tmxbr : *port->_tmxbr,
+ *port->tx_icr);
*port->tx_icr = x;
x = *port->tx_icr;
+ arch_local_sti();
}
/*
@@ -842,8 +917,10 @@ static int mn10300_serial_startup(struct uart_port *_port)
pint->port = port;
pint->vdma = mn10300_serial_vdma_tx_handler;
- set_intr_level(port->rx_irq, GxICR_LEVEL_1);
- set_intr_level(port->tx_irq, GxICR_LEVEL_1);
+ set_intr_level(port->rx_irq,
+ NUM2GxICR_LEVEL(CONFIG_MN10300_SERIAL_IRQ_LEVEL));
+ set_intr_level(port->tx_irq,
+ NUM2GxICR_LEVEL(CONFIG_MN10300_SERIAL_IRQ_LEVEL));
set_irq_chip(port->tm_irq, &mn10300_serial_pic);
if (request_irq(port->rx_irq, mn10300_serial_interrupt,
@@ -876,6 +953,7 @@ error:
*/
static void mn10300_serial_shutdown(struct uart_port *_port)
{
+ u16 x;
struct mn10300_serial_port *port =
container_of(_port, struct mn10300_serial_port, uart);
@@ -897,8 +975,12 @@ static void mn10300_serial_shutdown(struct uart_port *_port)
free_irq(port->rx_irq, port);
free_irq(port->tx_irq, port);
- *port->rx_icr = GxICR_LEVEL_1;
- *port->tx_icr = GxICR_LEVEL_1;
+ arch_local_cli();
+ *port->rx_icr = NUM2GxICR_LEVEL(CONFIG_MN10300_SERIAL_IRQ_LEVEL);
+ x = *port->rx_icr;
+ *port->tx_icr = NUM2GxICR_LEVEL(CONFIG_MN10300_SERIAL_IRQ_LEVEL);
+ x = *port->tx_icr;
+ arch_local_sti();
}
/*
@@ -947,11 +1029,66 @@ static void mn10300_serial_change_speed(struct mn10300_serial_port *port,
/* Determine divisor based on baud rate */
battempt = 0;
- if (div_timer == MNSCx_DIV_TIMER_16BIT)
- scxctr |= SC0CTR_CK_TM8UFLOW_8; /* ( == SC1CTR_CK_TM9UFLOW_8
- * == SC2CTR_CK_TM10UFLOW) */
- else if (div_timer == MNSCx_DIV_TIMER_8BIT)
+ switch (port->uart.line) {
+#ifdef CONFIG_MN10300_TTYSM0
+ case 0: /* ttySM0 */
+#if defined(CONFIG_MN10300_TTYSM0_TIMER8)
+ scxctr |= SC0CTR_CK_TM8UFLOW_8;
+#elif defined(CONFIG_MN10300_TTYSM0_TIMER0)
+ scxctr |= SC0CTR_CK_TM0UFLOW_8;
+#elif defined(CONFIG_MN10300_TTYSM0_TIMER2)
scxctr |= SC0CTR_CK_TM2UFLOW_8;
+#else
+#error "Unknown config for ttySM0"
+#endif
+ break;
+#endif /* CONFIG_MN10300_TTYSM0 */
+
+#ifdef CONFIG_MN10300_TTYSM1
+ case 1: /* ttySM1 */
+#if defined(CONFIG_AM33_2) || defined(CONFIG_AM33_3)
+#if defined(CONFIG_MN10300_TTYSM1_TIMER9)
+ scxctr |= SC1CTR_CK_TM9UFLOW_8;
+#elif defined(CONFIG_MN10300_TTYSM1_TIMER3)
+ scxctr |= SC1CTR_CK_TM3UFLOW_8;
+#else
+#error "Unknown config for ttySM1"
+#endif
+#else /* CONFIG_AM33_2 || CONFIG_AM33_3 */
+#if defined(CONFIG_MN10300_TTYSM1_TIMER12)
+ scxctr |= SC1CTR_CK_TM12UFLOW_8;
+#else
+#error "Unknown config for ttySM1"
+#endif
+#endif /* CONFIG_AM33_2 || CONFIG_AM33_3 */
+ break;
+#endif /* CONFIG_MN10300_TTYSM1 */
+
+#ifdef CONFIG_MN10300_TTYSM2
+ case 2: /* ttySM2 */
+#if defined(CONFIG_AM33_2)
+#if defined(CONFIG_MN10300_TTYSM2_TIMER10)
+ scxctr |= SC2CTR_CK_TM10UFLOW;
+#else
+#error "Unknown config for ttySM2"
+#endif
+#else /* CONFIG_AM33_2 */
+#if defined(CONFIG_MN10300_TTYSM2_TIMER9)
+ scxctr |= SC2CTR_CK_TM9UFLOW_8;
+#elif defined(CONFIG_MN10300_TTYSM2_TIMER1)
+ scxctr |= SC2CTR_CK_TM1UFLOW_8;
+#elif defined(CONFIG_MN10300_TTYSM2_TIMER3)
+ scxctr |= SC2CTR_CK_TM3UFLOW_8;
+#else
+#error "Unknown config for ttySM2"
+#endif
+#endif /* CONFIG_AM33_2 */
+ break;
+#endif /* CONFIG_MN10300_TTYSM2 */
+
+ default:
+ break;
+ }
try_alternative:
baud = uart_get_baud_rate(&port->uart, new, old, 0,
@@ -1195,6 +1332,12 @@ static void mn10300_serial_set_termios(struct uart_port *_port,
ctr &= ~SC2CTR_TWE;
*port->_control = ctr;
}
+
+ /* change Transfer bit-order (LSB/MSB) */
+ if (new->c_cflag & CODMSB)
+ *port->_control |= SC01CTR_OD_MSBFIRST; /* MSB MODE */
+ else
+ *port->_control &= ~SC01CTR_OD_MSBFIRST; /* LSB MODE */
}
/*
@@ -1302,11 +1445,16 @@ static int __init mn10300_serial_init(void)
printk(KERN_INFO "%s version %s (%s)\n",
serial_name, serial_version, serial_revdate);
-#ifdef CONFIG_MN10300_TTYSM2
- SC2TIM = 8; /* make the baud base of timer 2 IOCLK/8 */
+#if defined(CONFIG_MN10300_TTYSM2) && defined(CONFIG_AM33_2)
+ {
+ int tmp;
+ SC2TIM = 8; /* make the baud base of timer 2 IOCLK/8 */
+ tmp = SC2TIM;
+ }
#endif
- set_intr_stub(EXCEP_IRQ_LEVEL1, mn10300_serial_vdma_interrupt);
+ set_intr_stub(NUM2EXCEP_IRQ_LEVEL(CONFIG_MN10300_SERIAL_IRQ_LEVEL),
+ mn10300_serial_vdma_interrupt);
ret = uart_register_driver(&mn10300_serial_driver);
if (!ret) {
@@ -1366,9 +1514,11 @@ static void mn10300_serial_console_write(struct console *co,
port = mn10300_serial_ports[co->index];
/* firstly hijack the serial port from the "virtual DMA" controller */
+ arch_local_cli();
txicr = *port->tx_icr;
- *port->tx_icr = GxICR_LEVEL_1;
+ *port->tx_icr = NUM2GxICR_LEVEL(CONFIG_MN10300_SERIAL_IRQ_LEVEL);
tmp = *port->tx_icr;
+ arch_local_sti();
/* the transmitter may be disabled */
scxctr = *port->_control;
@@ -1422,8 +1572,10 @@ static void mn10300_serial_console_write(struct console *co,
if (!(scxctr & SC01CTR_TXE))
*port->_control = scxctr;
+ arch_local_cli();
*port->tx_icr = txicr;
tmp = *port->tx_icr;
+ arch_local_sti();
}
/*
diff --git a/arch/mn10300/kernel/mn10300-watchdog-low.S b/arch/mn10300/kernel/mn10300-watchdog-low.S
index 996244745cca..f2f5c9cfaabd 100644
--- a/arch/mn10300/kernel/mn10300-watchdog-low.S
+++ b/arch/mn10300/kernel/mn10300-watchdog-low.S
@@ -16,6 +16,7 @@
#include <asm/intctl-regs.h>
#include <asm/timer-regs.h>
#include <asm/frame.inc>
+#include <linux/threads.h>
.text
@@ -53,7 +54,13 @@ watchdog_handler:
.type touch_nmi_watchdog,@function
touch_nmi_watchdog:
clr d0
- mov d0,(watchdog_alert_counter)
+ clr d1
+ mov watchdog_alert_counter, a0
+ setlb
+ mov d0, (a0+)
+ inc d1
+ cmp NR_CPUS, d1
+ lne
ret [],0
.size touch_nmi_watchdog,.-touch_nmi_watchdog
diff --git a/arch/mn10300/kernel/mn10300-watchdog.c b/arch/mn10300/kernel/mn10300-watchdog.c
index f362d9d138f1..c5e12bfd9fcd 100644
--- a/arch/mn10300/kernel/mn10300-watchdog.c
+++ b/arch/mn10300/kernel/mn10300-watchdog.c
@@ -30,7 +30,7 @@
static DEFINE_SPINLOCK(watchdog_print_lock);
static unsigned int watchdog;
static unsigned int watchdog_hz = 1;
-unsigned int watchdog_alert_counter;
+unsigned int watchdog_alert_counter[NR_CPUS];
EXPORT_SYMBOL(touch_nmi_watchdog);
@@ -39,9 +39,6 @@ EXPORT_SYMBOL(touch_nmi_watchdog);
* is to check its timer makes IRQ counts. If they are not
* changing then that CPU has some problem.
*
- * as these watchdog NMI IRQs are generated on every CPU, we only
- * have to check the current processor.
- *
* since NMIs dont listen to _any_ locks, we have to be extremely
* careful not to rely on unsafe variables. The printk might lock
* up though, so we have to break up any console locks first ...
@@ -69,8 +66,8 @@ int __init check_watchdog(void)
printk(KERN_INFO "OK.\n");
- /* now that we know it works we can reduce NMI frequency to
- * something more reasonable; makes a difference in some configs
+ /* now that we know it works we can reduce NMI frequency to something
+ * more reasonable; makes a difference in some configs
*/
watchdog_hz = 1;
@@ -121,15 +118,22 @@ void __init watchdog_go(void)
}
}
+#ifdef CONFIG_SMP
+static void watchdog_dump_register(void *dummy)
+{
+ printk(KERN_ERR "--- Register Dump (CPU%d) ---\n", CPUID);
+ show_registers(current_frame());
+}
+#endif
+
asmlinkage
void watchdog_interrupt(struct pt_regs *regs, enum exception_code excep)
{
-
/*
* Since current-> is always on the stack, and we always switch
* the stack NMI-atomically, it's safe to use smp_processor_id().
*/
- int sum, cpu = smp_processor_id();
+ int sum, cpu;
int irq = NMIIRQ;
u8 wdt, tmp;
@@ -138,43 +142,61 @@ void watchdog_interrupt(struct pt_regs *regs, enum exception_code excep)
tmp = WDCTR;
NMICR = NMICR_WDIF;
- nmi_count(cpu)++;
+ nmi_count(smp_processor_id())++;
kstat_incr_irqs_this_cpu(irq, irq_to_desc(irq));
- sum = irq_stat[cpu].__irq_count;
-
- if (last_irq_sums[cpu] == sum) {
- /*
- * Ayiee, looks like this CPU is stuck ...
- * wait a few IRQs (5 seconds) before doing the oops ...
- */
- watchdog_alert_counter++;
- if (watchdog_alert_counter == 5 * watchdog_hz) {
- spin_lock(&watchdog_print_lock);
+
+ for_each_online_cpu(cpu) {
+
+ sum = irq_stat[cpu].__irq_count;
+
+ if ((last_irq_sums[cpu] == sum)
+#if defined(CONFIG_GDBSTUB) && defined(CONFIG_SMP)
+ && !(CHK_GDBSTUB_BUSY()
+ || atomic_read(&cpu_doing_single_step))
+#endif
+ ) {
/*
- * We are in trouble anyway, lets at least try
- * to get a message out.
+ * Ayiee, looks like this CPU is stuck ...
+ * wait a few IRQs (5 seconds) before doing the oops ...
*/
- bust_spinlocks(1);
- printk(KERN_ERR
- "NMI Watchdog detected LOCKUP on CPU%d,"
- " pc %08lx, registers:\n",
- cpu, regs->pc);
- show_registers(regs);
- printk("console shuts up ...\n");
- console_silent();
- spin_unlock(&watchdog_print_lock);
- bust_spinlocks(0);
+ watchdog_alert_counter[cpu]++;
+ if (watchdog_alert_counter[cpu] == 5 * watchdog_hz) {
+ spin_lock(&watchdog_print_lock);
+ /*
+ * We are in trouble anyway, lets at least try
+ * to get a message out.
+ */
+ bust_spinlocks(1);
+ printk(KERN_ERR
+ "NMI Watchdog detected LOCKUP on CPU%d,"
+ " pc %08lx, registers:\n",
+ cpu, regs->pc);
+#ifdef CONFIG_SMP
+ printk(KERN_ERR
+ "--- Register Dump (CPU%d) ---\n",
+ CPUID);
+#endif
+ show_registers(regs);
+#ifdef CONFIG_SMP
+ smp_nmi_call_function(watchdog_dump_register,
+ NULL, 1);
+#endif
+ printk(KERN_NOTICE "console shuts up ...\n");
+ console_silent();
+ spin_unlock(&watchdog_print_lock);
+ bust_spinlocks(0);
#ifdef CONFIG_GDBSTUB
- if (gdbstub_busy)
- gdbstub_exception(regs, excep);
- else
- gdbstub_intercept(regs, excep);
+ if (CHK_GDBSTUB_BUSY_AND_ACTIVE())
+ gdbstub_exception(regs, excep);
+ else
+ gdbstub_intercept(regs, excep);
#endif
- do_exit(SIGSEGV);
+ do_exit(SIGSEGV);
+ }
+ } else {
+ last_irq_sums[cpu] = sum;
+ watchdog_alert_counter[cpu] = 0;
}
- } else {
- last_irq_sums[cpu] = sum;
- watchdog_alert_counter = 0;
}
WDCTR = wdt | WDCTR_WDRST;
diff --git a/arch/mn10300/kernel/process.c b/arch/mn10300/kernel/process.c
index f48373e2bc1c..e1b14a6ed544 100644
--- a/arch/mn10300/kernel/process.c
+++ b/arch/mn10300/kernel/process.c
@@ -14,7 +14,6 @@
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/smp.h>
-#include <linux/smp_lock.h>
#include <linux/stddef.h>
#include <linux/unistd.h>
#include <linux/ptrace.h>
@@ -57,6 +56,7 @@ unsigned long thread_saved_pc(struct task_struct *tsk)
void (*pm_power_off)(void);
EXPORT_SYMBOL(pm_power_off);
+#if !defined(CONFIG_SMP) || defined(CONFIG_HOTPLUG_CPU)
/*
* we use this if we don't have any better idle routine
*/
@@ -69,6 +69,35 @@ static void default_idle(void)
local_irq_enable();
}
+#else /* !CONFIG_SMP || CONFIG_HOTPLUG_CPU */
+/*
+ * On SMP it's slightly faster (but much more power-consuming!)
+ * to poll the ->work.need_resched flag instead of waiting for the
+ * cross-CPU IPI to arrive. Use this option with caution.
+ */
+static inline void poll_idle(void)
+{
+ int oldval;
+
+ local_irq_enable();
+
+ /*
+ * Deal with another CPU just having chosen a thread to
+ * run here:
+ */
+ oldval = test_and_clear_thread_flag(TIF_NEED_RESCHED);
+
+ if (!oldval) {
+ set_thread_flag(TIF_POLLING_NRFLAG);
+ while (!need_resched())
+ cpu_relax();
+ clear_thread_flag(TIF_POLLING_NRFLAG);
+ } else {
+ set_need_resched();
+ }
+}
+#endif /* !CONFIG_SMP || CONFIG_HOTPLUG_CPU */
+
/*
* the idle thread
* - there's no useful work to be done, so just try to conserve power and have
@@ -77,8 +106,6 @@ static void default_idle(void)
*/
void cpu_idle(void)
{
- int cpu = smp_processor_id();
-
/* endless idle loop with no priority at all */
for (;;) {
while (!need_resched()) {
@@ -86,10 +113,13 @@ void cpu_idle(void)
smp_rmb();
idle = pm_idle;
- if (!idle)
+ if (!idle) {
+#if defined(CONFIG_SMP) && !defined(CONFIG_HOTPLUG_CPU)
+ idle = poll_idle;
+#else /* CONFIG_SMP && !CONFIG_HOTPLUG_CPU */
idle = default_idle;
-
- irq_stat[cpu].idle_timestamp = jiffies;
+#endif /* CONFIG_SMP && !CONFIG_HOTPLUG_CPU */
+ }
idle();
}
@@ -197,6 +227,7 @@ int copy_thread(unsigned long clone_flags,
unsigned long c_usp, unsigned long ustk_size,
struct task_struct *p, struct pt_regs *kregs)
{
+ struct thread_info *ti = task_thread_info(p);
struct pt_regs *c_uregs, *c_kregs, *uregs;
unsigned long c_ksp;
@@ -217,7 +248,7 @@ int copy_thread(unsigned long clone_flags,
/* the new TLS pointer is passed in as arg #5 to sys_clone() */
if (clone_flags & CLONE_SETTLS)
- c_uregs->e2 = __frame->d3;
+ c_uregs->e2 = current_frame()->d3;
/* set up the return kernel frame if called from kernel_thread() */
c_kregs = c_uregs;
@@ -235,7 +266,7 @@ int copy_thread(unsigned long clone_flags,
}
/* set up things up so the scheduler can start the new task */
- p->thread.__frame = c_kregs;
+ ti->frame = c_kregs;
p->thread.a3 = (unsigned long) c_kregs;
p->thread.sp = c_ksp;
p->thread.pc = (unsigned long) ret_from_fork;
@@ -247,25 +278,26 @@ int copy_thread(unsigned long clone_flags,
/*
* clone a process
- * - tlsptr is retrieved by copy_thread() from __frame->d3
+ * - tlsptr is retrieved by copy_thread() from current_frame()->d3
*/
asmlinkage long sys_clone(unsigned long clone_flags, unsigned long newsp,
int __user *parent_tidptr, int __user *child_tidptr,
int __user *tlsptr)
{
- return do_fork(clone_flags, newsp ?: __frame->sp, __frame, 0,
- parent_tidptr, child_tidptr);
+ return do_fork(clone_flags, newsp ?: current_frame()->sp,
+ current_frame(), 0, parent_tidptr, child_tidptr);
}
asmlinkage long sys_fork(void)
{
- return do_fork(SIGCHLD, __frame->sp, __frame, 0, NULL, NULL);
+ return do_fork(SIGCHLD, current_frame()->sp,
+ current_frame(), 0, NULL, NULL);
}
asmlinkage long sys_vfork(void)
{
- return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, __frame->sp, __frame,
- 0, NULL, NULL);
+ return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, current_frame()->sp,
+ current_frame(), 0, NULL, NULL);
}
asmlinkage long sys_execve(const char __user *name,
@@ -279,7 +311,7 @@ asmlinkage long sys_execve(const char __user *name,
error = PTR_ERR(filename);
if (IS_ERR(filename))
return error;
- error = do_execve(filename, argv, envp, __frame);
+ error = do_execve(filename, argv, envp, current_frame());
putname(filename);
return error;
}
diff --git a/arch/mn10300/kernel/profile.c b/arch/mn10300/kernel/profile.c
index 20d7d0306b16..4f342f75d00c 100644
--- a/arch/mn10300/kernel/profile.c
+++ b/arch/mn10300/kernel/profile.c
@@ -41,7 +41,7 @@ static __init int profile_init(void)
tmp = TM11ICR;
printk(KERN_INFO "Profiling initiated on timer 11, priority 0, %uHz\n",
- mn10300_ioclk / 8 / (TM11BR + 1));
+ MN10300_IOCLK / 8 / (TM11BR + 1));
printk(KERN_INFO "Profile histogram stored %p-%p\n",
prof_buffer, (u8 *)(prof_buffer + prof_len) - 1);
diff --git a/arch/mn10300/kernel/ptrace.c b/arch/mn10300/kernel/ptrace.c
index cf847dabc1bd..5c0b07e61006 100644
--- a/arch/mn10300/kernel/ptrace.c
+++ b/arch/mn10300/kernel/ptrace.c
@@ -295,31 +295,31 @@ void ptrace_disable(struct task_struct *child)
/*
* handle the arch-specific side of process tracing
*/
-long arch_ptrace(struct task_struct *child, long request, long addr, long data)
+long arch_ptrace(struct task_struct *child, long request,
+ unsigned long addr, unsigned long data)
{
unsigned long tmp;
int ret;
+ unsigned long __user *datap = (unsigned long __user *) data;
switch (request) {
/* read the word at location addr in the USER area. */
case PTRACE_PEEKUSR:
ret = -EIO;
- if ((addr & 3) || addr < 0 ||
- addr > sizeof(struct user) - 3)
+ if ((addr & 3) || addr > sizeof(struct user) - 3)
break;
tmp = 0; /* Default return condition */
if (addr < NR_PTREGS << 2)
tmp = get_stack_long(child,
ptrace_regid_to_frame[addr]);
- ret = put_user(tmp, (unsigned long *) data);
+ ret = put_user(tmp, datap);
break;
/* write the word at location addr in the USER area */
case PTRACE_POKEUSR:
ret = -EIO;
- if ((addr & 3) || addr < 0 ||
- addr > sizeof(struct user) - 3)
+ if ((addr & 3) || addr > sizeof(struct user) - 3)
break;
ret = 0;
@@ -332,25 +332,25 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
return copy_regset_to_user(child, &user_mn10300_native_view,
REGSET_GENERAL,
0, NR_PTREGS * sizeof(long),
- (void __user *)data);
+ datap);
case PTRACE_SETREGS: /* Set all integer regs in the child. */
return copy_regset_from_user(child, &user_mn10300_native_view,
REGSET_GENERAL,
0, NR_PTREGS * sizeof(long),
- (const void __user *)data);
+ datap);
case PTRACE_GETFPREGS: /* Get the child FPU state. */
return copy_regset_to_user(child, &user_mn10300_native_view,
REGSET_FPU,
0, sizeof(struct fpu_state_struct),
- (void __user *)data);
+ datap);
case PTRACE_SETFPREGS: /* Set the child FPU state. */
return copy_regset_from_user(child, &user_mn10300_native_view,
REGSET_FPU,
0, sizeof(struct fpu_state_struct),
- (const void __user *)data);
+ datap);
default:
ret = ptrace_request(child, request, addr, data);
diff --git a/arch/mn10300/kernel/rtc.c b/arch/mn10300/kernel/rtc.c
index 4eef0e7224f6..e9e20f9a4dd3 100644
--- a/arch/mn10300/kernel/rtc.c
+++ b/arch/mn10300/kernel/rtc.c
@@ -20,18 +20,22 @@
DEFINE_SPINLOCK(rtc_lock);
EXPORT_SYMBOL(rtc_lock);
-/* time for RTC to update itself in ioclks */
-static unsigned long mn10300_rtc_update_period;
-
+/*
+ * Read the current RTC time
+ */
void read_persistent_clock(struct timespec *ts)
{
struct rtc_time tm;
get_rtc_time(&tm);
- ts->tv_sec = mktime(tm.tm_year, tm.tm_mon, tm.tm_mday,
- tm.tm_hour, tm.tm_min, tm.tm_sec);
ts->tv_nsec = 0;
+ ts->tv_sec = mktime(tm.tm_year, tm.tm_mon, tm.tm_mday,
+ tm.tm_hour, tm.tm_min, tm.tm_sec);
+
+ /* if rtc is way off in the past, set something reasonable */
+ if (ts->tv_sec < 0)
+ ts->tv_sec = mktime(2009, 1, 1, 12, 0, 0);
}
/*
@@ -115,39 +119,14 @@ int update_persistent_clock(struct timespec now)
*/
void __init calibrate_clock(void)
{
- unsigned long count0, counth, count1;
unsigned char status;
/* make sure the RTC is running and is set to operate in 24hr mode */
status = RTSRC;
RTCRB |= RTCRB_SET;
RTCRB |= RTCRB_TM_24HR;
+ RTCRB &= ~RTCRB_DM_BINARY;
RTCRA |= RTCRA_DVR;
RTCRA &= ~RTCRA_DVR;
RTCRB &= ~RTCRB_SET;
-
- /* work out the clock speed by counting clock cycles between ends of
- * the RTC update cycle - track the RTC through one complete update
- * cycle (1 second)
- */
- startup_timestamp_counter();
-
- while (!(RTCRA & RTCRA_UIP)) {}
- while ((RTCRA & RTCRA_UIP)) {}
-
- count0 = TMTSCBC;
-
- while (!(RTCRA & RTCRA_UIP)) {}
-
- counth = TMTSCBC;
-
- while ((RTCRA & RTCRA_UIP)) {}
-
- count1 = TMTSCBC;
-
- shutdown_timestamp_counter();
-
- MN10300_TSCCLK = count0 - count1; /* the timers count down */
- mn10300_rtc_update_period = counth - count1;
- MN10300_TSC_PER_HZ = MN10300_TSCCLK / HZ;
}
diff --git a/arch/mn10300/kernel/setup.c b/arch/mn10300/kernel/setup.c
index d464affcba0e..9e7a3209a3e1 100644
--- a/arch/mn10300/kernel/setup.c
+++ b/arch/mn10300/kernel/setup.c
@@ -22,6 +22,7 @@
#include <linux/init.h>
#include <linux/bootmem.h>
#include <linux/seq_file.h>
+#include <linux/cpu.h>
#include <asm/processor.h>
#include <linux/console.h>
#include <asm/uaccess.h>
@@ -30,7 +31,6 @@
#include <asm/io.h>
#include <asm/smp.h>
#include <proc/proc.h>
-#include <asm/busctl-regs.h>
#include <asm/fpu.h>
#include <asm/sections.h>
@@ -64,11 +64,13 @@ unsigned long memory_size;
struct thread_info *__current_ti = &init_thread_union.thread_info;
struct task_struct *__current = &init_task;
-#define mn10300_known_cpus 3
+#define mn10300_known_cpus 5
static const char *const mn10300_cputypes[] = {
- "am33v1",
- "am33v2",
- "am34v1",
+ "am33-1",
+ "am33-2",
+ "am34-1",
+ "am33-3",
+ "am34-2",
"unknown"
};
@@ -123,6 +125,7 @@ void __init setup_arch(char **cmdline_p)
cpu_init();
unit_setup();
+ smp_init_cpus();
parse_mem_cmdline(cmdline_p);
init_mm.start_code = (unsigned long)&_text;
@@ -179,57 +182,55 @@ void __init setup_arch(char **cmdline_p)
void __init cpu_init(void)
{
unsigned long cpurev = CPUREV, type;
- unsigned long base, size;
type = (CPUREV & CPUREV_TYPE) >> CPUREV_TYPE_S;
if (type > mn10300_known_cpus)
type = mn10300_known_cpus;
- printk(KERN_INFO "Matsushita %s, rev %ld\n",
+ printk(KERN_INFO "Panasonic %s, rev %ld\n",
mn10300_cputypes[type],
(cpurev & CPUREV_REVISION) >> CPUREV_REVISION_S);
- /* determine the memory size and base from the memory controller regs */
- memory_size = 0;
-
- base = SDBASE(0);
- if (base & SDBASE_CE) {
- size = (base & SDBASE_CBAM) << SDBASE_CBAM_SHIFT;
- size = ~size + 1;
- base &= SDBASE_CBA;
+ get_mem_info(&phys_memory_base, &memory_size);
+ phys_memory_end = phys_memory_base + memory_size;
- printk(KERN_INFO "SDRAM[0]: %luMb @%08lx\n", size >> 20, base);
- memory_size += size;
- phys_memory_base = base;
- }
+ fpu_init_state();
+}
- base = SDBASE(1);
- if (base & SDBASE_CE) {
- size = (base & SDBASE_CBAM) << SDBASE_CBAM_SHIFT;
- size = ~size + 1;
- base &= SDBASE_CBA;
+static struct cpu cpu_devices[NR_CPUS];
- printk(KERN_INFO "SDRAM[1]: %luMb @%08lx\n", size >> 20, base);
- memory_size += size;
- if (phys_memory_base == 0)
- phys_memory_base = base;
- }
+static int __init topology_init(void)
+{
+ int i;
- phys_memory_end = phys_memory_base + memory_size;
+ for_each_present_cpu(i)
+ register_cpu(&cpu_devices[i], i);
-#ifdef CONFIG_FPU
- fpu_init_state();
-#endif
+ return 0;
}
+subsys_initcall(topology_init);
+
/*
* Get CPU information for use by the procfs.
*/
static int show_cpuinfo(struct seq_file *m, void *v)
{
+#ifdef CONFIG_SMP
+ struct mn10300_cpuinfo *c = v;
+ unsigned long cpu_id = c - cpu_data;
+ unsigned long cpurev = c->type, type, icachesz, dcachesz;
+#else /* CONFIG_SMP */
+ unsigned long cpu_id = 0;
unsigned long cpurev = CPUREV, type, icachesz, dcachesz;
+#endif /* CONFIG_SMP */
- type = (CPUREV & CPUREV_TYPE) >> CPUREV_TYPE_S;
+#ifdef CONFIG_SMP
+ if (!cpu_online(cpu_id))
+ return 0;
+#endif
+
+ type = (cpurev & CPUREV_TYPE) >> CPUREV_TYPE_S;
if (type > mn10300_known_cpus)
type = mn10300_known_cpus;
@@ -244,13 +245,14 @@ static int show_cpuinfo(struct seq_file *m, void *v)
1024;
seq_printf(m,
- "processor : 0\n"
- "vendor_id : Matsushita\n"
+ "processor : %ld\n"
+ "vendor_id : " PROCESSOR_VENDOR_NAME "\n"
"cpu core : %s\n"
"cpu rev : %lu\n"
"model name : " PROCESSOR_MODEL_NAME "\n"
"icache size: %lu\n"
"dcache size: %lu\n",
+ cpu_id,
mn10300_cputypes[type],
(cpurev & CPUREV_REVISION) >> CPUREV_REVISION_S,
icachesz,
@@ -262,8 +264,13 @@ static int show_cpuinfo(struct seq_file *m, void *v)
"bogomips : %lu.%02lu\n\n",
MN10300_IOCLK / 1000000,
(MN10300_IOCLK / 10000) % 100,
+#ifdef CONFIG_SMP
+ c->loops_per_jiffy / (500000 / HZ),
+ (c->loops_per_jiffy / (5000 / HZ)) % 100
+#else /* CONFIG_SMP */
loops_per_jiffy / (500000 / HZ),
(loops_per_jiffy / (5000 / HZ)) % 100
+#endif /* CONFIG_SMP */
);
return 0;
diff --git a/arch/mn10300/kernel/signal.c b/arch/mn10300/kernel/signal.c
index d4de05ab7864..690f4e9507d7 100644
--- a/arch/mn10300/kernel/signal.c
+++ b/arch/mn10300/kernel/signal.c
@@ -91,7 +91,7 @@ asmlinkage long sys_sigaction(int sig,
*/
asmlinkage long sys_sigaltstack(const stack_t __user *uss, stack_t *uoss)
{
- return do_sigaltstack(uss, uoss, __frame->sp);
+ return do_sigaltstack(uss, uoss, current_frame()->sp);
}
/*
@@ -156,10 +156,11 @@ badframe:
*/
asmlinkage long sys_sigreturn(void)
{
- struct sigframe __user *frame = (struct sigframe __user *) __frame->sp;
+ struct sigframe __user *frame;
sigset_t set;
long d0;
+ frame = (struct sigframe __user *) current_frame()->sp;
if (verify_area(VERIFY_READ, frame, sizeof(*frame)))
goto badframe;
if (__get_user(set.sig[0], &frame->sc.oldmask))
@@ -176,7 +177,7 @@ asmlinkage long sys_sigreturn(void)
recalc_sigpending();
spin_unlock_irq(&current->sighand->siglock);
- if (restore_sigcontext(__frame, &frame->sc, &d0))
+ if (restore_sigcontext(current_frame(), &frame->sc, &d0))
goto badframe;
return d0;
@@ -191,11 +192,11 @@ badframe:
*/
asmlinkage long sys_rt_sigreturn(void)
{
- struct rt_sigframe __user *frame =
- (struct rt_sigframe __user *) __frame->sp;
+ struct rt_sigframe __user *frame;
sigset_t set;
- unsigned long d0;
+ long d0;
+ frame = (struct rt_sigframe __user *) current_frame()->sp;
if (verify_area(VERIFY_READ, frame, sizeof(*frame)))
goto badframe;
if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set)))
@@ -207,10 +208,11 @@ asmlinkage long sys_rt_sigreturn(void)
recalc_sigpending();
spin_unlock_irq(&current->sighand->siglock);
- if (restore_sigcontext(__frame, &frame->uc.uc_mcontext, &d0))
+ if (restore_sigcontext(current_frame(), &frame->uc.uc_mcontext, &d0))
goto badframe;
- if (do_sigaltstack(&frame->uc.uc_stack, NULL, __frame->sp) == -EFAULT)
+ if (do_sigaltstack(&frame->uc.uc_stack, NULL, current_frame()->sp) ==
+ -EFAULT)
goto badframe;
return d0;
@@ -572,7 +574,7 @@ asmlinkage void do_notify_resume(struct pt_regs *regs, u32 thread_info_flags)
if (thread_info_flags & _TIF_NOTIFY_RESUME) {
clear_thread_flag(TIF_NOTIFY_RESUME);
- tracehook_notify_resume(__frame);
+ tracehook_notify_resume(current_frame());
if (current->replacement_session_keyring)
key_replace_session_keyring();
}
diff --git a/arch/mn10300/kernel/smp-low.S b/arch/mn10300/kernel/smp-low.S
new file mode 100644
index 000000000000..72938cefc05e
--- /dev/null
+++ b/arch/mn10300/kernel/smp-low.S
@@ -0,0 +1,97 @@
+/* SMP IPI low-level handler
+ *
+ * Copyright (C) 2006-2007 Matsushita Electric Industrial Co., Ltd.
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ */
+
+#include <linux/sys.h>
+#include <linux/linkage.h>
+#include <asm/smp.h>
+#include <asm/system.h>
+#include <asm/thread_info.h>
+#include <asm/cpu-regs.h>
+#include <proc/smp-regs.h>
+#include <asm/asm-offsets.h>
+#include <asm/frame.inc>
+
+ .am33_2
+
+###############################################################################
+#
+# IPI interrupt handler
+#
+###############################################################################
+ .globl mn10300_low_ipi_handler
+mn10300_low_ipi_handler:
+ add -4,sp
+ mov d0,(sp)
+ movhu (IAGR),d0
+ and IAGR_GN,d0
+ lsr 0x2,d0
+#ifdef CONFIG_MN10300_CACHE_ENABLED
+ cmp FLUSH_CACHE_IPI,d0
+ beq mn10300_flush_cache_ipi
+#endif
+ cmp SMP_BOOT_IRQ,d0
+ beq mn10300_smp_boot_ipi
+ /* OTHERS */
+ mov (sp),d0
+ add 4,sp
+#ifdef CONFIG_GDBSTUB
+ jmp gdbstub_io_rx_handler
+#else
+ jmp end
+#endif
+
+###############################################################################
+#
+# Cache flush IPI interrupt handler
+#
+###############################################################################
+#ifdef CONFIG_MN10300_CACHE_ENABLED
+mn10300_flush_cache_ipi:
+ mov (sp),d0
+ add 4,sp
+
+ /* FLUSH_CACHE_IPI */
+ add -4,sp
+ SAVE_ALL
+ mov GxICR_DETECT,d2
+ movbu d2,(GxICR(FLUSH_CACHE_IPI)) # ACK the interrupt
+ movhu (GxICR(FLUSH_CACHE_IPI)),d2
+ call smp_cache_interrupt[],0
+ RESTORE_ALL
+ jmp end
+#endif
+
+###############################################################################
+#
+# SMP boot CPU IPI interrupt handler
+#
+###############################################################################
+mn10300_smp_boot_ipi:
+ /* clear interrupt */
+ movhu (GxICR(SMP_BOOT_IRQ)),d0
+ and ~GxICR_REQUEST,d0
+ movhu d0,(GxICR(SMP_BOOT_IRQ))
+ mov (sp),d0
+ add 4,sp
+
+ # get stack
+ mov (CPUID),a0
+ add -1,a0
+ add a0,a0
+ add a0,a0
+ mov (start_stack,a0),a0
+ mov a0,sp
+ jmp initialize_secondary
+
+
+# Jump here after RTI to suppress the icache lookahead
+end:
diff --git a/arch/mn10300/kernel/smp.c b/arch/mn10300/kernel/smp.c
new file mode 100644
index 000000000000..0dcd1c686ba8
--- /dev/null
+++ b/arch/mn10300/kernel/smp.c
@@ -0,0 +1,1152 @@
+/* SMP support routines.
+ *
+ * Copyright (C) 2006-2008 Panasonic Corporation
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/interrupt.h>
+#include <linux/spinlock.h>
+#include <linux/init.h>
+#include <linux/jiffies.h>
+#include <linux/cpumask.h>
+#include <linux/err.h>
+#include <linux/kernel.h>
+#include <linux/delay.h>
+#include <linux/sched.h>
+#include <linux/profile.h>
+#include <linux/smp.h>
+#include <asm/tlbflush.h>
+#include <asm/system.h>
+#include <asm/bitops.h>
+#include <asm/processor.h>
+#include <asm/bug.h>
+#include <asm/exceptions.h>
+#include <asm/hardirq.h>
+#include <asm/fpu.h>
+#include <asm/mmu_context.h>
+#include <asm/thread_info.h>
+#include <asm/cpu-regs.h>
+#include <asm/intctl-regs.h>
+#include "internal.h"
+
+#ifdef CONFIG_HOTPLUG_CPU
+#include <linux/cpu.h>
+#include <asm/cacheflush.h>
+
+static unsigned long sleep_mode[NR_CPUS];
+
+static void run_sleep_cpu(unsigned int cpu);
+static void run_wakeup_cpu(unsigned int cpu);
+#endif /* CONFIG_HOTPLUG_CPU */
+
+/*
+ * Debug Message function
+ */
+
+#undef DEBUG_SMP
+#ifdef DEBUG_SMP
+#define Dprintk(fmt, ...) printk(KERN_DEBUG fmt, ##__VA_ARGS__)
+#else
+#define Dprintk(fmt, ...) no_printk(KERN_DEBUG fmt, ##__VA_ARGS__)
+#endif
+
+/* timeout value in msec for smp_nmi_call_function. zero is no timeout. */
+#define CALL_FUNCTION_NMI_IPI_TIMEOUT 0
+
+/*
+ * Structure and data for smp_nmi_call_function().
+ */
+struct nmi_call_data_struct {
+ smp_call_func_t func;
+ void *info;
+ cpumask_t started;
+ cpumask_t finished;
+ int wait;
+ char size_alignment[0]
+ __attribute__ ((__aligned__(SMP_CACHE_BYTES)));
+} __attribute__ ((__aligned__(SMP_CACHE_BYTES)));
+
+static DEFINE_SPINLOCK(smp_nmi_call_lock);
+static struct nmi_call_data_struct *nmi_call_data;
+
+/*
+ * Data structures and variables
+ */
+static cpumask_t cpu_callin_map; /* Bitmask of callin CPUs */
+static cpumask_t cpu_callout_map; /* Bitmask of callout CPUs */
+cpumask_t cpu_boot_map; /* Bitmask of boot APs */
+unsigned long start_stack[NR_CPUS - 1];
+
+/*
+ * Per CPU parameters
+ */
+struct mn10300_cpuinfo cpu_data[NR_CPUS] __cacheline_aligned;
+
+static int cpucount; /* The count of boot CPUs */
+static cpumask_t smp_commenced_mask;
+cpumask_t cpu_initialized __initdata = CPU_MASK_NONE;
+
+/*
+ * Function Prototypes
+ */
+static int do_boot_cpu(int);
+static void smp_show_cpu_info(int cpu_id);
+static void smp_callin(void);
+static void smp_online(void);
+static void smp_store_cpu_info(int);
+static void smp_cpu_init(void);
+static void smp_tune_scheduling(void);
+static void send_IPI_mask(const cpumask_t *cpumask, int irq);
+static void init_ipi(void);
+
+/*
+ * IPI Initialization interrupt definitions
+ */
+static void mn10300_ipi_disable(unsigned int irq);
+static void mn10300_ipi_enable(unsigned int irq);
+static void mn10300_ipi_ack(unsigned int irq);
+static void mn10300_ipi_nop(unsigned int irq);
+
+static struct irq_chip mn10300_ipi_type = {
+ .name = "cpu_ipi",
+ .disable = mn10300_ipi_disable,
+ .enable = mn10300_ipi_enable,
+ .ack = mn10300_ipi_ack,
+ .eoi = mn10300_ipi_nop
+};
+
+static irqreturn_t smp_reschedule_interrupt(int irq, void *dev_id);
+static irqreturn_t smp_call_function_interrupt(int irq, void *dev_id);
+
+static struct irqaction reschedule_ipi = {
+ .handler = smp_reschedule_interrupt,
+ .name = "smp reschedule IPI"
+};
+static struct irqaction call_function_ipi = {
+ .handler = smp_call_function_interrupt,
+ .name = "smp call function IPI"
+};
+
+#if !defined(CONFIG_GENERIC_CLOCKEVENTS) || defined(CONFIG_GENERIC_CLOCKEVENTS_BROADCAST)
+static irqreturn_t smp_ipi_timer_interrupt(int irq, void *dev_id);
+static struct irqaction local_timer_ipi = {
+ .handler = smp_ipi_timer_interrupt,
+ .flags = IRQF_DISABLED,
+ .name = "smp local timer IPI"
+};
+#endif
+
+/**
+ * init_ipi - Initialise the IPI mechanism
+ */
+static void init_ipi(void)
+{
+ unsigned long flags;
+ u16 tmp16;
+
+ /* set up the reschedule IPI */
+ set_irq_chip_and_handler(RESCHEDULE_IPI,
+ &mn10300_ipi_type, handle_percpu_irq);
+ setup_irq(RESCHEDULE_IPI, &reschedule_ipi);
+ set_intr_level(RESCHEDULE_IPI, RESCHEDULE_GxICR_LV);
+ mn10300_ipi_enable(RESCHEDULE_IPI);
+
+ /* set up the call function IPI */
+ set_irq_chip_and_handler(CALL_FUNC_SINGLE_IPI,
+ &mn10300_ipi_type, handle_percpu_irq);
+ setup_irq(CALL_FUNC_SINGLE_IPI, &call_function_ipi);
+ set_intr_level(CALL_FUNC_SINGLE_IPI, CALL_FUNCTION_GxICR_LV);
+ mn10300_ipi_enable(CALL_FUNC_SINGLE_IPI);
+
+ /* set up the local timer IPI */
+#if !defined(CONFIG_GENERIC_CLOCKEVENTS) || \
+ defined(CONFIG_GENERIC_CLOCKEVENTS_BROADCAST)
+ set_irq_chip_and_handler(LOCAL_TIMER_IPI,
+ &mn10300_ipi_type, handle_percpu_irq);
+ setup_irq(LOCAL_TIMER_IPI, &local_timer_ipi);
+ set_intr_level(LOCAL_TIMER_IPI, LOCAL_TIMER_GxICR_LV);
+ mn10300_ipi_enable(LOCAL_TIMER_IPI);
+#endif
+
+#ifdef CONFIG_MN10300_CACHE_ENABLED
+ /* set up the cache flush IPI */
+ flags = arch_local_cli_save();
+ __set_intr_stub(NUM2EXCEP_IRQ_LEVEL(FLUSH_CACHE_GxICR_LV),
+ mn10300_low_ipi_handler);
+ GxICR(FLUSH_CACHE_IPI) = FLUSH_CACHE_GxICR_LV | GxICR_DETECT;
+ mn10300_ipi_enable(FLUSH_CACHE_IPI);
+ arch_local_irq_restore(flags);
+#endif
+
+ /* set up the NMI call function IPI */
+ flags = arch_local_cli_save();
+ GxICR(CALL_FUNCTION_NMI_IPI) = GxICR_NMI | GxICR_ENABLE | GxICR_DETECT;
+ tmp16 = GxICR(CALL_FUNCTION_NMI_IPI);
+ arch_local_irq_restore(flags);
+
+ /* set up the SMP boot IPI */
+ flags = arch_local_cli_save();
+ __set_intr_stub(NUM2EXCEP_IRQ_LEVEL(SMP_BOOT_GxICR_LV),
+ mn10300_low_ipi_handler);
+ arch_local_irq_restore(flags);
+}
+
+/**
+ * mn10300_ipi_shutdown - Shut down handling of an IPI
+ * @irq: The IPI to be shut down.
+ */
+static void mn10300_ipi_shutdown(unsigned int irq)
+{
+ unsigned long flags;
+ u16 tmp;
+
+ flags = arch_local_cli_save();
+
+ tmp = GxICR(irq);
+ GxICR(irq) = (tmp & GxICR_LEVEL) | GxICR_DETECT;
+ tmp = GxICR(irq);
+
+ arch_local_irq_restore(flags);
+}
+
+/**
+ * mn10300_ipi_enable - Enable an IPI
+ * @irq: The IPI to be enabled.
+ */
+static void mn10300_ipi_enable(unsigned int irq)
+{
+ unsigned long flags;
+ u16 tmp;
+
+ flags = arch_local_cli_save();
+
+ tmp = GxICR(irq);
+ GxICR(irq) = (tmp & GxICR_LEVEL) | GxICR_ENABLE;
+ tmp = GxICR(irq);
+
+ arch_local_irq_restore(flags);
+}
+
+/**
+ * mn10300_ipi_disable - Disable an IPI
+ * @irq: The IPI to be disabled.
+ */
+static void mn10300_ipi_disable(unsigned int irq)
+{
+ unsigned long flags;
+ u16 tmp;
+
+ flags = arch_local_cli_save();
+
+ tmp = GxICR(irq);
+ GxICR(irq) = tmp & GxICR_LEVEL;
+ tmp = GxICR(irq);
+
+ arch_local_irq_restore(flags);
+}
+
+/**
+ * mn10300_ipi_ack - Acknowledge an IPI interrupt in the PIC
+ * @irq: The IPI to be acknowledged.
+ *
+ * Clear the interrupt detection flag for the IPI on the appropriate interrupt
+ * channel in the PIC.
+ */
+static void mn10300_ipi_ack(unsigned int irq)
+{
+ unsigned long flags;
+ u16 tmp;
+
+ flags = arch_local_cli_save();
+ GxICR_u8(irq) = GxICR_DETECT;
+ tmp = GxICR(irq);
+ arch_local_irq_restore(flags);
+}
+
+/**
+ * mn10300_ipi_nop - Dummy IPI action
+ * @irq: The IPI to be acted upon.
+ */
+static void mn10300_ipi_nop(unsigned int irq)
+{
+}
+
+/**
+ * send_IPI_mask - Send IPIs to all CPUs in list
+ * @cpumask: The list of CPUs to target.
+ * @irq: The IPI request to be sent.
+ *
+ * Send the specified IPI to all the CPUs in the list, not waiting for them to
+ * finish before returning. The caller is responsible for synchronisation if
+ * that is needed.
+ */
+static void send_IPI_mask(const cpumask_t *cpumask, int irq)
+{
+ int i;
+ u16 tmp;
+
+ for (i = 0; i < NR_CPUS; i++) {
+ if (cpu_isset(i, *cpumask)) {
+ /* send IPI */
+ tmp = CROSS_GxICR(irq, i);
+ CROSS_GxICR(irq, i) =
+ tmp | GxICR_REQUEST | GxICR_DETECT;
+ tmp = CROSS_GxICR(irq, i); /* flush write buffer */
+ }
+ }
+}
+
+/**
+ * send_IPI_self - Send an IPI to this CPU.
+ * @irq: The IPI request to be sent.
+ *
+ * Send the specified IPI to the current CPU.
+ */
+void send_IPI_self(int irq)
+{
+ send_IPI_mask(cpumask_of(smp_processor_id()), irq);
+}
+
+/**
+ * send_IPI_allbutself - Send IPIs to all the other CPUs.
+ * @irq: The IPI request to be sent.
+ *
+ * Send the specified IPI to all CPUs in the system barring the current one,
+ * not waiting for them to finish before returning. The caller is responsible
+ * for synchronisation if that is needed.
+ */
+void send_IPI_allbutself(int irq)
+{
+ cpumask_t cpumask;
+
+ cpumask = cpu_online_map;
+ cpu_clear(smp_processor_id(), cpumask);
+ send_IPI_mask(&cpumask, irq);
+}
+
+void arch_send_call_function_ipi_mask(const struct cpumask *mask)
+{
+ BUG();
+ /*send_IPI_mask(mask, CALL_FUNCTION_IPI);*/
+}
+
+void arch_send_call_function_single_ipi(int cpu)
+{
+ send_IPI_mask(cpumask_of(cpu), CALL_FUNC_SINGLE_IPI);
+}
+
+/**
+ * smp_send_reschedule - Send reschedule IPI to a CPU
+ * @cpu: The CPU to target.
+ */
+void smp_send_reschedule(int cpu)
+{
+ send_IPI_mask(cpumask_of(cpu), RESCHEDULE_IPI);
+}
+
+/**
+ * smp_nmi_call_function - Send a call function NMI IPI to all CPUs
+ * @func: The function to ask to be run.
+ * @info: The context data to pass to that function.
+ * @wait: If true, wait (atomically) until function is run on all CPUs.
+ *
+ * Send a non-maskable request to all CPUs in the system, requesting them to
+ * run the specified function with the given context data, and, potentially, to
+ * wait for completion of that function on all CPUs.
+ *
+ * Returns 0 if successful, -ETIMEDOUT if we were asked to wait, but hit the
+ * timeout.
+ */
+int smp_nmi_call_function(smp_call_func_t func, void *info, int wait)
+{
+ struct nmi_call_data_struct data;
+ unsigned long flags;
+ unsigned int cnt;
+ int cpus, ret = 0;
+
+ cpus = num_online_cpus() - 1;
+ if (cpus < 1)
+ return 0;
+
+ data.func = func;
+ data.info = info;
+ data.started = cpu_online_map;
+ cpu_clear(smp_processor_id(), data.started);
+ data.wait = wait;
+ if (wait)
+ data.finished = data.started;
+
+ spin_lock_irqsave(&smp_nmi_call_lock, flags);
+ nmi_call_data = &data;
+ smp_mb();
+
+ /* Send a message to all other CPUs and wait for them to respond */
+ send_IPI_allbutself(CALL_FUNCTION_NMI_IPI);
+
+ /* Wait for response */
+ if (CALL_FUNCTION_NMI_IPI_TIMEOUT > 0) {
+ for (cnt = 0;
+ cnt < CALL_FUNCTION_NMI_IPI_TIMEOUT &&
+ !cpus_empty(data.started);
+ cnt++)
+ mdelay(1);
+
+ if (wait && cnt < CALL_FUNCTION_NMI_IPI_TIMEOUT) {
+ for (cnt = 0;
+ cnt < CALL_FUNCTION_NMI_IPI_TIMEOUT &&
+ !cpus_empty(data.finished);
+ cnt++)
+ mdelay(1);
+ }
+
+ if (cnt >= CALL_FUNCTION_NMI_IPI_TIMEOUT)
+ ret = -ETIMEDOUT;
+
+ } else {
+ /* If timeout value is zero, wait until cpumask has been
+ * cleared */
+ while (!cpus_empty(data.started))
+ barrier();
+ if (wait)
+ while (!cpus_empty(data.finished))
+ barrier();
+ }
+
+ spin_unlock_irqrestore(&smp_nmi_call_lock, flags);
+ return ret;
+}
+
+/**
+ * stop_this_cpu - Callback to stop a CPU.
+ * @unused: Callback context (ignored).
+ */
+void stop_this_cpu(void *unused)
+{
+ static volatile int stopflag;
+ unsigned long flags;
+
+#ifdef CONFIG_GDBSTUB
+ /* In case of single stepping smp_send_stop by other CPU,
+ * clear procindebug to avoid deadlock.
+ */
+ atomic_set(&procindebug[smp_processor_id()], 0);
+#endif /* CONFIG_GDBSTUB */
+
+ flags = arch_local_cli_save();
+ cpu_clear(smp_processor_id(), cpu_online_map);
+
+ while (!stopflag)
+ cpu_relax();
+
+ cpu_set(smp_processor_id(), cpu_online_map);
+ arch_local_irq_restore(flags);
+}
+
+/**
+ * smp_send_stop - Send a stop request to all CPUs.
+ */
+void smp_send_stop(void)
+{
+ smp_nmi_call_function(stop_this_cpu, NULL, 0);
+}
+
+/**
+ * smp_reschedule_interrupt - Reschedule IPI handler
+ * @irq: The interrupt number.
+ * @dev_id: The device ID.
+ *
+ * We need do nothing here, since the scheduling will be effected on our way
+ * back through entry.S.
+ *
+ * Returns IRQ_HANDLED to indicate we handled the interrupt successfully.
+ */
+static irqreturn_t smp_reschedule_interrupt(int irq, void *dev_id)
+{
+ /* do nothing */
+ return IRQ_HANDLED;
+}
+
+/**
+ * smp_call_function_interrupt - Call function IPI handler
+ * @irq: The interrupt number.
+ * @dev_id: The device ID.
+ *
+ * Returns IRQ_HANDLED to indicate we handled the interrupt successfully.
+ */
+static irqreturn_t smp_call_function_interrupt(int irq, void *dev_id)
+{
+ /* generic_smp_call_function_interrupt(); */
+ generic_smp_call_function_single_interrupt();
+ return IRQ_HANDLED;
+}
+
+/**
+ * smp_nmi_call_function_interrupt - Non-maskable call function IPI handler
+ */
+void smp_nmi_call_function_interrupt(void)
+{
+ smp_call_func_t func = nmi_call_data->func;
+ void *info = nmi_call_data->info;
+ int wait = nmi_call_data->wait;
+
+ /* Notify the initiating CPU that I've grabbed the data and am about to
+ * execute the function
+ */
+ smp_mb();
+ cpu_clear(smp_processor_id(), nmi_call_data->started);
+ (*func)(info);
+
+ if (wait) {
+ smp_mb();
+ cpu_clear(smp_processor_id(), nmi_call_data->finished);
+ }
+}
+
+#if !defined(CONFIG_GENERIC_CLOCKEVENTS) || \
+ defined(CONFIG_GENERIC_CLOCKEVENTS_BROADCAST)
+/**
+ * smp_ipi_timer_interrupt - Local timer IPI handler
+ * @irq: The interrupt number.
+ * @dev_id: The device ID.
+ *
+ * Returns IRQ_HANDLED to indicate we handled the interrupt successfully.
+ */
+static irqreturn_t smp_ipi_timer_interrupt(int irq, void *dev_id)
+{
+ return local_timer_interrupt();
+}
+#endif
+
+void __init smp_init_cpus(void)
+{
+ int i;
+ for (i = 0; i < NR_CPUS; i++) {
+ set_cpu_possible(i, true);
+ set_cpu_present(i, true);
+ }
+}
+
+/**
+ * smp_cpu_init - Initialise AP in start_secondary.
+ *
+ * For this Application Processor, set up init_mm, initialise FPU and set
+ * interrupt level 0-6 setting.
+ */
+static void __init smp_cpu_init(void)
+{
+ unsigned long flags;
+ int cpu_id = smp_processor_id();
+ u16 tmp16;
+
+ if (test_and_set_bit(cpu_id, &cpu_initialized)) {
+ printk(KERN_WARNING "CPU#%d already initialized!\n", cpu_id);
+ for (;;)
+ local_irq_enable();
+ }
+ printk(KERN_INFO "Initializing CPU#%d\n", cpu_id);
+
+ atomic_inc(&init_mm.mm_count);
+ current->active_mm = &init_mm;
+ BUG_ON(current->mm);
+
+ enter_lazy_tlb(&init_mm, current);
+
+ /* Force FPU initialization */
+ clear_using_fpu(current);
+
+ GxICR(CALL_FUNC_SINGLE_IPI) = CALL_FUNCTION_GxICR_LV | GxICR_DETECT;
+ mn10300_ipi_enable(CALL_FUNC_SINGLE_IPI);
+
+ GxICR(LOCAL_TIMER_IPI) = LOCAL_TIMER_GxICR_LV | GxICR_DETECT;
+ mn10300_ipi_enable(LOCAL_TIMER_IPI);
+
+ GxICR(RESCHEDULE_IPI) = RESCHEDULE_GxICR_LV | GxICR_DETECT;
+ mn10300_ipi_enable(RESCHEDULE_IPI);
+
+#ifdef CONFIG_MN10300_CACHE_ENABLED
+ GxICR(FLUSH_CACHE_IPI) = FLUSH_CACHE_GxICR_LV | GxICR_DETECT;
+ mn10300_ipi_enable(FLUSH_CACHE_IPI);
+#endif
+
+ mn10300_ipi_shutdown(SMP_BOOT_IRQ);
+
+ /* Set up the non-maskable call function IPI */
+ flags = arch_local_cli_save();
+ GxICR(CALL_FUNCTION_NMI_IPI) = GxICR_NMI | GxICR_ENABLE | GxICR_DETECT;
+ tmp16 = GxICR(CALL_FUNCTION_NMI_IPI);
+ arch_local_irq_restore(flags);
+}
+
+/**
+ * smp_prepare_cpu_init - Initialise CPU in startup_secondary
+ *
+ * Set interrupt level 0-6 setting and init ICR of gdbstub.
+ */
+void smp_prepare_cpu_init(void)
+{
+ int loop;
+
+ /* Set the interrupt vector registers */
+ IVAR0 = EXCEP_IRQ_LEVEL0;
+ IVAR1 = EXCEP_IRQ_LEVEL1;
+ IVAR2 = EXCEP_IRQ_LEVEL2;
+ IVAR3 = EXCEP_IRQ_LEVEL3;
+ IVAR4 = EXCEP_IRQ_LEVEL4;
+ IVAR5 = EXCEP_IRQ_LEVEL5;
+ IVAR6 = EXCEP_IRQ_LEVEL6;
+
+ /* Disable all interrupts and set to priority 6 (lowest) */
+ for (loop = 0; loop < GxICR_NUM_IRQS; loop++)
+ GxICR(loop) = GxICR_LEVEL_6 | GxICR_DETECT;
+
+#ifdef CONFIG_GDBSTUB
+ /* initialise GDB-stub */
+ do {
+ unsigned long flags;
+ u16 tmp16;
+
+ flags = arch_local_cli_save();
+ GxICR(GDB_NMI_IPI) = GxICR_NMI | GxICR_ENABLE | GxICR_DETECT;
+ tmp16 = GxICR(GDB_NMI_IPI);
+ arch_local_irq_restore(flags);
+ } while (0);
+#endif
+}
+
+/**
+ * start_secondary - Activate a secondary CPU (AP)
+ * @unused: Thread parameter (ignored).
+ */
+int __init start_secondary(void *unused)
+{
+ smp_cpu_init();
+ smp_callin();
+ while (!cpu_isset(smp_processor_id(), smp_commenced_mask))
+ cpu_relax();
+
+ local_flush_tlb();
+ preempt_disable();
+ smp_online();
+
+#ifdef CONFIG_GENERIC_CLOCKEVENTS
+ init_clockevents();
+#endif
+ cpu_idle();
+ return 0;
+}
+
+/**
+ * smp_prepare_cpus - Boot up secondary CPUs (APs)
+ * @max_cpus: Maximum number of CPUs to boot.
+ *
+ * Call do_boot_cpu, and boot up APs.
+ */
+void __init smp_prepare_cpus(unsigned int max_cpus)
+{
+ int phy_id;
+
+ /* Setup boot CPU information */
+ smp_store_cpu_info(0);
+ smp_tune_scheduling();
+
+ init_ipi();
+
+ /* If SMP should be disabled, then finish */
+ if (max_cpus == 0) {
+ printk(KERN_INFO "SMP mode deactivated.\n");
+ goto smp_done;
+ }
+
+ /* Boot secondary CPUs (for which phy_id > 0) */
+ for (phy_id = 0; phy_id < NR_CPUS; phy_id++) {
+ /* Don't boot primary CPU */
+ if (max_cpus <= cpucount + 1)
+ continue;
+ if (phy_id != 0)
+ do_boot_cpu(phy_id);
+ set_cpu_possible(phy_id, true);
+ smp_show_cpu_info(phy_id);
+ }
+
+smp_done:
+ Dprintk("Boot done.\n");
+}
+
+/**
+ * smp_store_cpu_info - Save a CPU's information
+ * @cpu: The CPU to save for.
+ *
+ * Save boot_cpu_data and jiffy for the specified CPU.
+ */
+static void __init smp_store_cpu_info(int cpu)
+{
+ struct mn10300_cpuinfo *ci = &cpu_data[cpu];
+
+ *ci = boot_cpu_data;
+ ci->loops_per_jiffy = loops_per_jiffy;
+ ci->type = CPUREV;
+}
+
+/**
+ * smp_tune_scheduling - Set time slice value
+ *
+ * Nothing to do here.
+ */
+static void __init smp_tune_scheduling(void)
+{
+}
+
+/**
+ * do_boot_cpu: Boot up one CPU
+ * @phy_id: Physical ID of CPU to boot.
+ *
+ * Send an IPI to a secondary CPU to boot it. Returns 0 on success, 1
+ * otherwise.
+ */
+static int __init do_boot_cpu(int phy_id)
+{
+ struct task_struct *idle;
+ unsigned long send_status, callin_status;
+ int timeout, cpu_id;
+
+ send_status = GxICR_REQUEST;
+ callin_status = 0;
+ timeout = 0;
+ cpu_id = phy_id;
+
+ cpucount++;
+
+ /* Create idle thread for this CPU */
+ idle = fork_idle(cpu_id);
+ if (IS_ERR(idle))
+ panic("Failed fork for CPU#%d.", cpu_id);
+
+ idle->thread.pc = (unsigned long)start_secondary;
+
+ printk(KERN_NOTICE "Booting CPU#%d\n", cpu_id);
+ start_stack[cpu_id - 1] = idle->thread.sp;
+
+ task_thread_info(idle)->cpu = cpu_id;
+
+ /* Send boot IPI to AP */
+ send_IPI_mask(cpumask_of(phy_id), SMP_BOOT_IRQ);
+
+ Dprintk("Waiting for send to finish...\n");
+
+ /* Wait for AP's IPI receive in 100[ms] */
+ do {
+ udelay(1000);
+ send_status =
+ CROSS_GxICR(SMP_BOOT_IRQ, phy_id) & GxICR_REQUEST;
+ } while (send_status == GxICR_REQUEST && timeout++ < 100);
+
+ Dprintk("Waiting for cpu_callin_map.\n");
+
+ if (send_status == 0) {
+ /* Allow AP to start initializing */
+ cpu_set(cpu_id, cpu_callout_map);
+
+ /* Wait for setting cpu_callin_map */
+ timeout = 0;
+ do {
+ udelay(1000);
+ callin_status = cpu_isset(cpu_id, cpu_callin_map);
+ } while (callin_status == 0 && timeout++ < 5000);
+
+ if (callin_status == 0)
+ Dprintk("Not responding.\n");
+ } else {
+ printk(KERN_WARNING "IPI not delivered.\n");
+ }
+
+ if (send_status == GxICR_REQUEST || callin_status == 0) {
+ cpu_clear(cpu_id, cpu_callout_map);
+ cpu_clear(cpu_id, cpu_callin_map);
+ cpu_clear(cpu_id, cpu_initialized);
+ cpucount--;
+ return 1;
+ }
+ return 0;
+}
+
+/**
+ * smp_show_cpu_info - Show SMP CPU information
+ * @cpu: The CPU of interest.
+ */
+static void __init smp_show_cpu_info(int cpu)
+{
+ struct mn10300_cpuinfo *ci = &cpu_data[cpu];
+
+ printk(KERN_INFO
+ "CPU#%d : ioclk speed: %lu.%02luMHz : bogomips : %lu.%02lu\n",
+ cpu,
+ MN10300_IOCLK / 1000000,
+ (MN10300_IOCLK / 10000) % 100,
+ ci->loops_per_jiffy / (500000 / HZ),
+ (ci->loops_per_jiffy / (5000 / HZ)) % 100);
+}
+
+/**
+ * smp_callin - Set cpu_callin_map of the current CPU ID
+ */
+static void __init smp_callin(void)
+{
+ unsigned long timeout;
+ int cpu;
+
+ cpu = smp_processor_id();
+ timeout = jiffies + (2 * HZ);
+
+ if (cpu_isset(cpu, cpu_callin_map)) {
+ printk(KERN_ERR "CPU#%d already present.\n", cpu);
+ BUG();
+ }
+ Dprintk("CPU#%d waiting for CALLOUT\n", cpu);
+
+ /* Wait for AP startup 2s total */
+ while (time_before(jiffies, timeout)) {
+ if (cpu_isset(cpu, cpu_callout_map))
+ break;
+ cpu_relax();
+ }
+
+ if (!time_before(jiffies, timeout)) {
+ printk(KERN_ERR
+ "BUG: CPU#%d started up but did not get a callout!\n",
+ cpu);
+ BUG();
+ }
+
+#ifdef CONFIG_CALIBRATE_DELAY
+ calibrate_delay(); /* Get our bogomips */
+#endif
+
+ /* Save our processor parameters */
+ smp_store_cpu_info(cpu);
+
+ /* Allow the boot processor to continue */
+ cpu_set(cpu, cpu_callin_map);
+}
+
+/**
+ * smp_online - Set cpu_online_map
+ */
+static void __init smp_online(void)
+{
+ int cpu;
+
+ cpu = smp_processor_id();
+
+ local_irq_enable();
+
+ cpu_set(cpu, cpu_online_map);
+ smp_wmb();
+}
+
+/**
+ * smp_cpus_done -
+ * @max_cpus: Maximum CPU count.
+ *
+ * Do nothing.
+ */
+void __init smp_cpus_done(unsigned int max_cpus)
+{
+}
+
+/*
+ * smp_prepare_boot_cpu - Set up stuff for the boot processor.
+ *
+ * Set up the cpu_online_map, cpu_callout_map and cpu_callin_map of the boot
+ * processor (CPU 0).
+ */
+void __devinit smp_prepare_boot_cpu(void)
+{
+ cpu_set(0, cpu_callout_map);
+ cpu_set(0, cpu_callin_map);
+ current_thread_info()->cpu = 0;
+}
+
+/*
+ * initialize_secondary - Initialise a secondary CPU (Application Processor).
+ *
+ * Set SP register and jump to thread's PC address.
+ */
+void initialize_secondary(void)
+{
+ asm volatile (
+ "mov %0,sp \n"
+ "jmp (%1) \n"
+ :
+ : "a"(current->thread.sp), "a"(current->thread.pc));
+}
+
+/**
+ * __cpu_up - Set smp_commenced_mask for the nominated CPU
+ * @cpu: The target CPU.
+ */
+int __devinit __cpu_up(unsigned int cpu)
+{
+ int timeout;
+
+#ifdef CONFIG_HOTPLUG_CPU
+ if (num_online_cpus() == 1)
+ disable_hlt();
+ if (sleep_mode[cpu])
+ run_wakeup_cpu(cpu);
+#endif /* CONFIG_HOTPLUG_CPU */
+
+ cpu_set(cpu, smp_commenced_mask);
+
+ /* Wait 5s total for a response */
+ for (timeout = 0 ; timeout < 5000 ; timeout++) {
+ if (cpu_isset(cpu, cpu_online_map))
+ break;
+ udelay(1000);
+ }
+
+ BUG_ON(!cpu_isset(cpu, cpu_online_map));
+ return 0;
+}
+
+/**
+ * setup_profiling_timer - Set up the profiling timer
+ * @multiplier - The frequency multiplier to use
+ *
+ * The frequency of the profiling timer can be changed by writing a multiplier
+ * value into /proc/profile.
+ */
+int setup_profiling_timer(unsigned int multiplier)
+{
+ return -EINVAL;
+}
+
+/*
+ * CPU hotplug routines
+ */
+#ifdef CONFIG_HOTPLUG_CPU
+
+static DEFINE_PER_CPU(struct cpu, cpu_devices);
+
+static int __init topology_init(void)
+{
+ int cpu, ret;
+
+ for_each_cpu(cpu) {
+ ret = register_cpu(&per_cpu(cpu_devices, cpu), cpu, NULL);
+ if (ret)
+ printk(KERN_WARNING
+ "topology_init: register_cpu %d failed (%d)\n",
+ cpu, ret);
+ }
+ return 0;
+}
+
+subsys_initcall(topology_init);
+
+int __cpu_disable(void)
+{
+ int cpu = smp_processor_id();
+ if (cpu == 0)
+ return -EBUSY;
+
+ migrate_irqs();
+ cpu_clear(cpu, current->active_mm->cpu_vm_mask);
+ return 0;
+}
+
+void __cpu_die(unsigned int cpu)
+{
+ run_sleep_cpu(cpu);
+
+ if (num_online_cpus() == 1)
+ enable_hlt();
+}
+
+#ifdef CONFIG_MN10300_CACHE_ENABLED
+static inline void hotplug_cpu_disable_cache(void)
+{
+ int tmp;
+ asm volatile(
+ " movhu (%1),%0 \n"
+ " and %2,%0 \n"
+ " movhu %0,(%1) \n"
+ "1: movhu (%1),%0 \n"
+ " btst %3,%0 \n"
+ " bne 1b \n"
+ : "=&r"(tmp)
+ : "a"(&CHCTR),
+ "i"(~(CHCTR_ICEN | CHCTR_DCEN)),
+ "i"(CHCTR_ICBUSY | CHCTR_DCBUSY)
+ : "memory", "cc");
+}
+
+static inline void hotplug_cpu_enable_cache(void)
+{
+ int tmp;
+ asm volatile(
+ "movhu (%1),%0 \n"
+ "or %2,%0 \n"
+ "movhu %0,(%1) \n"
+ : "=&r"(tmp)
+ : "a"(&CHCTR),
+ "i"(CHCTR_ICEN | CHCTR_DCEN)
+ : "memory", "cc");
+}
+
+static inline void hotplug_cpu_invalidate_cache(void)
+{
+ int tmp;
+ asm volatile (
+ "movhu (%1),%0 \n"
+ "or %2,%0 \n"
+ "movhu %0,(%1) \n"
+ : "=&r"(tmp)
+ : "a"(&CHCTR),
+ "i"(CHCTR_ICINV | CHCTR_DCINV)
+ : "cc");
+}
+
+#else /* CONFIG_MN10300_CACHE_ENABLED */
+#define hotplug_cpu_disable_cache() do {} while (0)
+#define hotplug_cpu_enable_cache() do {} while (0)
+#define hotplug_cpu_invalidate_cache() do {} while (0)
+#endif /* CONFIG_MN10300_CACHE_ENABLED */
+
+/**
+ * hotplug_cpu_nmi_call_function - Call a function on other CPUs for hotplug
+ * @cpumask: List of target CPUs.
+ * @func: The function to call on those CPUs.
+ * @info: The context data for the function to be called.
+ * @wait: Whether to wait for the calls to complete.
+ *
+ * Non-maskably call a function on another CPU for hotplug purposes.
+ *
+ * This function must be called with maskable interrupts disabled.
+ */
+static int hotplug_cpu_nmi_call_function(cpumask_t cpumask,
+ smp_call_func_t func, void *info,
+ int wait)
+{
+ /*
+ * The address and the size of nmi_call_func_mask_data
+ * need to be aligned on L1_CACHE_BYTES.
+ */
+ static struct nmi_call_data_struct nmi_call_func_mask_data
+ __cacheline_aligned;
+ unsigned long start, end;
+
+ start = (unsigned long)&nmi_call_func_mask_data;
+ end = start + sizeof(struct nmi_call_data_struct);
+
+ nmi_call_func_mask_data.func = func;
+ nmi_call_func_mask_data.info = info;
+ nmi_call_func_mask_data.started = cpumask;
+ nmi_call_func_mask_data.wait = wait;
+ if (wait)
+ nmi_call_func_mask_data.finished = cpumask;
+
+ spin_lock(&smp_nmi_call_lock);
+ nmi_call_data = &nmi_call_func_mask_data;
+ mn10300_local_dcache_flush_range(start, end);
+ smp_wmb();
+
+ send_IPI_mask(cpumask, CALL_FUNCTION_NMI_IPI);
+
+ do {
+ mn10300_local_dcache_inv_range(start, end);
+ barrier();
+ } while (!cpus_empty(nmi_call_func_mask_data.started));
+
+ if (wait) {
+ do {
+ mn10300_local_dcache_inv_range(start, end);
+ barrier();
+ } while (!cpus_empty(nmi_call_func_mask_data.finished));
+ }
+
+ spin_unlock(&smp_nmi_call_lock);
+ return 0;
+}
+
+static void restart_wakeup_cpu(void)
+{
+ unsigned int cpu = smp_processor_id();
+
+ cpu_set(cpu, cpu_callin_map);
+ local_flush_tlb();
+ cpu_set(cpu, cpu_online_map);
+ smp_wmb();
+}
+
+static void prepare_sleep_cpu(void *unused)
+{
+ sleep_mode[smp_processor_id()] = 1;
+ smp_mb();
+ mn10300_local_dcache_flush_inv();
+ hotplug_cpu_disable_cache();
+ hotplug_cpu_invalidate_cache();
+}
+
+/* when this function called, IE=0, NMID=0. */
+static void sleep_cpu(void *unused)
+{
+ unsigned int cpu_id = smp_processor_id();
+ /*
+ * CALL_FUNCTION_NMI_IPI for wakeup_cpu() shall not be requested,
+ * before this cpu goes in SLEEP mode.
+ */
+ do {
+ smp_mb();
+ __sleep_cpu();
+ } while (sleep_mode[cpu_id]);
+ restart_wakeup_cpu();
+}
+
+static void run_sleep_cpu(unsigned int cpu)
+{
+ unsigned long flags;
+ cpumask_t cpumask = cpumask_of(cpu);
+
+ flags = arch_local_cli_save();
+ hotplug_cpu_nmi_call_function(cpumask, prepare_sleep_cpu, NULL, 1);
+ hotplug_cpu_nmi_call_function(cpumask, sleep_cpu, NULL, 0);
+ udelay(1); /* delay for the cpu to sleep. */
+ arch_local_irq_restore(flags);
+}
+
+static void wakeup_cpu(void)
+{
+ hotplug_cpu_invalidate_cache();
+ hotplug_cpu_enable_cache();
+ smp_mb();
+ sleep_mode[smp_processor_id()] = 0;
+}
+
+static void run_wakeup_cpu(unsigned int cpu)
+{
+ unsigned long flags;
+
+ flags = arch_local_cli_save();
+#if NR_CPUS == 2
+ mn10300_local_dcache_flush_inv();
+#else
+ /*
+ * Before waking up the cpu,
+ * all online cpus should stop and flush D-Cache for global data.
+ */
+#error not support NR_CPUS > 2, when CONFIG_HOTPLUG_CPU=y.
+#endif
+ hotplug_cpu_nmi_call_function(cpumask_of(cpu), wakeup_cpu, NULL, 1);
+ arch_local_irq_restore(flags);
+}
+
+#endif /* CONFIG_HOTPLUG_CPU */
diff --git a/arch/mn10300/kernel/switch_to.S b/arch/mn10300/kernel/switch_to.S
index 630aad71b946..9074d0fb8788 100644
--- a/arch/mn10300/kernel/switch_to.S
+++ b/arch/mn10300/kernel/switch_to.S
@@ -15,6 +15,9 @@
#include <linux/linkage.h>
#include <asm/thread_info.h>
#include <asm/cpu-regs.h>
+#ifdef CONFIG_SMP
+#include <proc/smp-regs.h>
+#endif /* CONFIG_SMP */
.text
@@ -35,8 +38,6 @@ ENTRY(__switch_to)
mov d1,a1
# save prev context
- mov (__frame),d0
- mov d0,(THREAD_FRAME,a0)
mov __switch_back,d0
mov d0,(THREAD_PC,a0)
mov sp,a2
@@ -58,8 +59,6 @@ ENTRY(__switch_to)
mov a2,e2
#endif
- mov (THREAD_FRAME,a1),a2
- mov a2,(__frame)
mov (THREAD_PC,a1),a2
mov d2,d0 # for ret_from_fork
mov d0,a0 # for __switch_to
diff --git a/arch/mn10300/kernel/time.c b/arch/mn10300/kernel/time.c
index 8f7f6d22783d..f860a340acc9 100644
--- a/arch/mn10300/kernel/time.c
+++ b/arch/mn10300/kernel/time.c
@@ -17,29 +17,18 @@
#include <linux/smp.h>
#include <linux/profile.h>
#include <linux/cnt32_to_63.h>
+#include <linux/clocksource.h>
+#include <linux/clockchips.h>
#include <asm/irq.h>
#include <asm/div64.h>
#include <asm/processor.h>
#include <asm/intctl-regs.h>
#include <asm/rtc.h>
-
-#ifdef CONFIG_MN10300_RTC
-unsigned long mn10300_ioclk; /* system I/O clock frequency */
-unsigned long mn10300_iobclk; /* system I/O clock frequency */
-unsigned long mn10300_tsc_per_HZ; /* number of ioclks per jiffy */
-#endif /* CONFIG_MN10300_RTC */
+#include "internal.h"
static unsigned long mn10300_last_tsc; /* time-stamp counter at last time
* interrupt occurred */
-static irqreturn_t timer_interrupt(int irq, void *dev_id);
-
-static struct irqaction timer_irq = {
- .handler = timer_interrupt,
- .flags = IRQF_DISABLED | IRQF_SHARED | IRQF_TIMER,
- .name = "timer",
-};
-
static unsigned long sched_clock_multiplier;
/*
@@ -54,9 +43,12 @@ unsigned long long sched_clock(void)
unsigned long tsc, tmp;
unsigned product[3]; /* 96-bit intermediate value */
+ /* cnt32_to_63() is not safe with preemption */
+ preempt_disable();
+
/* read the TSC value
*/
- tsc = 0 - get_cycles(); /* get_cycles() counts down */
+ tsc = get_cycles();
/* expand to 64-bits.
* - sched_clock() must be called once a minute or better or the
@@ -64,6 +56,8 @@ unsigned long long sched_clock(void)
*/
tsc64.ll = cnt32_to_63(tsc) & 0x7fffffffffffffffULL;
+ preempt_enable();
+
/* scale the 64-bit TSC value to a nanosecond value via a 96-bit
* intermediate
*/
@@ -90,6 +84,20 @@ static void __init mn10300_sched_clock_init(void)
__muldiv64u(NSEC_PER_SEC, 1 << 16, MN10300_TSCCLK);
}
+/**
+ * local_timer_interrupt - Local timer interrupt handler
+ *
+ * Handle local timer interrupts for this CPU. They may have been propagated
+ * to this CPU from the CPU that actually gets them by way of an IPI.
+ */
+irqreturn_t local_timer_interrupt(void)
+{
+ profile_tick(CPU_PROFILING);
+ update_process_times(user_mode(get_irq_regs()));
+ return IRQ_HANDLED;
+}
+
+#ifndef CONFIG_GENERIC_TIME
/*
* advance the kernel's time keeping clocks (xtime and jiffies)
* - we use Timer 0 & 1 cascaded as a clock to nudge us the next time
@@ -98,27 +106,73 @@ static void __init mn10300_sched_clock_init(void)
static irqreturn_t timer_interrupt(int irq, void *dev_id)
{
unsigned tsc, elapse;
+ irqreturn_t ret;
write_seqlock(&xtime_lock);
while (tsc = get_cycles(),
- elapse = mn10300_last_tsc - tsc, /* time elapsed since last
+ elapse = tsc - mn10300_last_tsc, /* time elapsed since last
* tick */
elapse > MN10300_TSC_PER_HZ
) {
- mn10300_last_tsc -= MN10300_TSC_PER_HZ;
+ mn10300_last_tsc += MN10300_TSC_PER_HZ;
/* advance the kernel's time tracking system */
- profile_tick(CPU_PROFILING);
do_timer(1);
}
write_sequnlock(&xtime_lock);
- update_process_times(user_mode(get_irq_regs()));
+ ret = local_timer_interrupt();
+#ifdef CONFIG_SMP
+ send_IPI_allbutself(LOCAL_TIMER_IPI);
+#endif
+ return ret;
+}
- return IRQ_HANDLED;
+static struct irqaction timer_irq = {
+ .handler = timer_interrupt,
+ .flags = IRQF_DISABLED | IRQF_SHARED | IRQF_TIMER,
+ .name = "timer",
+};
+#endif /* CONFIG_GENERIC_TIME */
+
+#ifdef CONFIG_CSRC_MN10300
+void __init clocksource_set_clock(struct clocksource *cs, unsigned int clock)
+{
+ u64 temp;
+ u32 shift;
+
+ /* Find a shift value */
+ for (shift = 32; shift > 0; shift--) {
+ temp = (u64) NSEC_PER_SEC << shift;
+ do_div(temp, clock);
+ if ((temp >> 32) == 0)
+ break;
+ }
+ cs->shift = shift;
+ cs->mult = (u32) temp;
}
+#endif
+
+#if CONFIG_CEVT_MN10300
+void __cpuinit clockevent_set_clock(struct clock_event_device *cd,
+ unsigned int clock)
+{
+ u64 temp;
+ u32 shift;
+
+ /* Find a shift value */
+ for (shift = 32; shift > 0; shift--) {
+ temp = (u64) clock << shift;
+ do_div(temp, NSEC_PER_SEC);
+ if ((temp >> 32) == 0)
+ break;
+ }
+ cd->shift = shift;
+ cd->mult = (u32) temp;
+}
+#endif
/*
* initialise the various timers used by the main part of the kernel
@@ -131,21 +185,25 @@ void __init time_init(void)
*/
TMPSCNT |= TMPSCNT_ENABLE;
+#ifdef CONFIG_GENERIC_TIME
+ init_clocksource();
+#else
startup_timestamp_counter();
+#endif
printk(KERN_INFO
"timestamp counter I/O clock running at %lu.%02lu"
" (calibrated against RTC)\n",
MN10300_TSCCLK / 1000000, (MN10300_TSCCLK / 10000) % 100);
- mn10300_last_tsc = TMTSCBC;
-
- /* use timer 0 & 1 cascaded to tick at as close to HZ as possible */
- setup_irq(TMJCIRQ, &timer_irq);
+ mn10300_last_tsc = read_timestamp_counter();
- set_intr_level(TMJCIRQ, TMJCICR_LEVEL);
-
- startup_jiffies_counter();
+#ifdef CONFIG_GENERIC_CLOCKEVENTS
+ init_clockevents();
+#else
+ reload_jiffies_counter(MN10300_JC_PER_HZ - 1);
+ setup_jiffies_interrupt(TMJCIRQ, &timer_irq, CONFIG_TIMER_IRQ_LEVEL);
+#endif
#ifdef CONFIG_MN10300_WD_TIMER
/* start the watchdog timer */
diff --git a/arch/mn10300/kernel/traps.c b/arch/mn10300/kernel/traps.c
index 91365adba4f5..b90c3f160c77 100644
--- a/arch/mn10300/kernel/traps.c
+++ b/arch/mn10300/kernel/traps.c
@@ -45,9 +45,6 @@
#error "INTERRUPT_VECTOR_BASE not aligned to 16MiB boundary!"
#endif
-struct pt_regs *__frame; /* current frame pointer */
-EXPORT_SYMBOL(__frame);
-
int kstack_depth_to_print = 24;
spinlock_t die_lock = __SPIN_LOCK_UNLOCKED(die_lock);
@@ -101,7 +98,6 @@ DO_EINFO(SIGILL, {}, "invalid opcode", invalid_op, ILL_ILLOPC);
DO_EINFO(SIGILL, {}, "invalid ex opcode", invalid_exop, ILL_ILLOPC);
DO_EINFO(SIGBUS, {}, "invalid address", mem_error, BUS_ADRERR);
DO_EINFO(SIGBUS, {}, "bus error", bus_error, BUS_ADRERR);
-DO_EINFO(SIGILL, {}, "FPU invalid opcode", fpu_invalid_op, ILL_COPROC);
DO_ERROR(SIGTRAP,
#ifndef CONFIG_MN10300_USING_JTAG
@@ -222,11 +218,14 @@ void show_registers_only(struct pt_regs *regs)
printk(KERN_EMERG "threadinfo=%p task=%p)\n",
current_thread_info(), current);
- if ((unsigned long) current >= 0x90000000UL &&
- (unsigned long) current < 0x94000000UL)
+ if ((unsigned long) current >= PAGE_OFFSET &&
+ (unsigned long) current < (unsigned long)high_memory)
printk(KERN_EMERG "Process %s (pid: %d)\n",
current->comm, current->pid);
+#ifdef CONFIG_SMP
+ printk(KERN_EMERG "CPUID: %08x\n", CPUID);
+#endif
printk(KERN_EMERG "CPUP: %04hx\n", CPUP);
printk(KERN_EMERG "TBR: %08x\n", TBR);
printk(KERN_EMERG "DEAR: %08x\n", DEAR);
@@ -522,8 +521,12 @@ void __init set_intr_stub(enum exception_code code, void *handler)
{
unsigned long addr;
u8 *vector = (u8 *)(CONFIG_INTERRUPT_VECTOR_BASE + code);
+ unsigned long flags;
addr = (unsigned long) handler - (unsigned long) vector;
+
+ flags = arch_local_cli_save();
+
vector[0] = 0xdc; /* JMP handler */
vector[1] = addr;
vector[2] = addr >> 8;
@@ -533,30 +536,12 @@ void __init set_intr_stub(enum exception_code code, void *handler)
vector[6] = 0xcb;
vector[7] = 0xcb;
- mn10300_dcache_flush_inv();
- mn10300_icache_inv();
-}
-
-/*
- * set an interrupt stub to invoke the JTAG unit and then jump to a handler
- */
-void __init set_jtag_stub(enum exception_code code, void *handler)
-{
- unsigned long addr;
- u8 *vector = (u8 *)(CONFIG_INTERRUPT_VECTOR_BASE + code);
-
- addr = (unsigned long) handler - ((unsigned long) vector + 1);
- vector[0] = 0xff; /* PI to jump into JTAG debugger */
- vector[1] = 0xdc; /* jmp handler */
- vector[2] = addr;
- vector[3] = addr >> 8;
- vector[4] = addr >> 16;
- vector[5] = addr >> 24;
- vector[6] = 0xcb;
- vector[7] = 0xcb;
+ arch_local_irq_restore(flags);
+#ifndef CONFIG_MN10300_CACHE_SNOOP
mn10300_dcache_flush_inv();
- flush_icache_range((unsigned long) vector, (unsigned long) vector + 8);
+ mn10300_icache_inv();
+#endif
}
/*
@@ -581,7 +566,6 @@ void __init trap_init(void)
set_excp_vector(EXCEP_PRIVINSACC, insn_acc_error);
set_excp_vector(EXCEP_PRIVDATACC, data_acc_error);
set_excp_vector(EXCEP_DATINSACC, insn_acc_error);
- set_excp_vector(EXCEP_FPU_DISABLED, fpu_disabled);
set_excp_vector(EXCEP_FPU_UNIMPINS, fpu_invalid_op);
set_excp_vector(EXCEP_FPU_OPERATION, fpu_exception);
diff --git a/arch/mn10300/kernel/vmlinux.lds.S b/arch/mn10300/kernel/vmlinux.lds.S
index 10549dcfb610..febbeee7f2f5 100644
--- a/arch/mn10300/kernel/vmlinux.lds.S
+++ b/arch/mn10300/kernel/vmlinux.lds.S
@@ -70,7 +70,7 @@ SECTIONS
.exit.text : { EXIT_TEXT; }
.exit.data : { EXIT_DATA; }
- PERCPU(32)
+ PERCPU(PAGE_SIZE)
. = ALIGN(PAGE_SIZE);
__init_end = .;
/* freed after init ends here */
diff --git a/arch/mn10300/lib/bitops.c b/arch/mn10300/lib/bitops.c
index 440a7dcbf87b..a66c6cdaf442 100644
--- a/arch/mn10300/lib/bitops.c
+++ b/arch/mn10300/lib/bitops.c
@@ -15,7 +15,7 @@
/*
* try flipping a bit using BSET and BCLR
*/
-void change_bit(int nr, volatile void *addr)
+void change_bit(unsigned long nr, volatile void *addr)
{
if (test_bit(nr, addr))
goto try_clear_bit;
@@ -34,7 +34,7 @@ try_clear_bit:
/*
* try flipping a bit using BSET and BCLR and returning the old value
*/
-int test_and_change_bit(int nr, volatile void *addr)
+int test_and_change_bit(unsigned long nr, volatile void *addr)
{
if (test_bit(nr, addr))
goto try_clear_bit;
diff --git a/arch/mn10300/lib/delay.c b/arch/mn10300/lib/delay.c
index fdf6f710f94e..8e7ceb8ba33d 100644
--- a/arch/mn10300/lib/delay.c
+++ b/arch/mn10300/lib/delay.c
@@ -38,14 +38,14 @@ EXPORT_SYMBOL(__delay);
*/
void __udelay(unsigned long usecs)
{
- signed long ioclk, stop;
+ unsigned long start, stop, cnt;
/* usecs * CLK / 1E6 */
stop = __muldiv64u(usecs, MN10300_TSCCLK, 1000000);
- stop = TMTSCBC - stop;
+ start = TMTSCBC;
do {
- ioclk = TMTSCBC;
- } while (stop < ioclk);
+ cnt = start - TMTSCBC;
+ } while (cnt < stop);
}
EXPORT_SYMBOL(__udelay);
diff --git a/arch/mn10300/lib/do_csum.S b/arch/mn10300/lib/do_csum.S
index e138994e1667..1d27bba0cd8f 100644
--- a/arch/mn10300/lib/do_csum.S
+++ b/arch/mn10300/lib/do_csum.S
@@ -10,26 +10,25 @@
*/
#include <asm/cache.h>
- .section .text
- .balign L1_CACHE_BYTES
+ .section .text
+ .balign L1_CACHE_BYTES
###############################################################################
#
-# unsigned int do_csum(const unsigned char *buff, size_t len)
+# unsigned int do_csum(const unsigned char *buff, int len)
#
###############################################################################
.globl do_csum
- .type do_csum,@function
+ .type do_csum,@function
do_csum:
movm [d2,d3],(sp)
- mov d0,(12,sp)
- mov d1,(16,sp)
mov d1,d2 # count
mov d0,a0 # buff
+ mov a0,a1
clr d1 # accumulator
cmp +0,d2
- beq do_csum_done # return if zero-length buffer
+ ble do_csum_done # check for zero length or negative
# 4-byte align the buffer pointer
btst +3,a0
@@ -41,17 +40,15 @@ do_csum:
inc a0
asl +8,d0
add d0,d1
- addc +0,d1
add -1,d2
-do_csum_addr_not_odd:
+do_csum_addr_not_odd:
cmp +2,d2
bcs do_csum_fewer_than_4
btst +2,a0
beq do_csum_now_4b_aligned
movhu (a0+),d0
add d0,d1
- addc +0,d1
add -2,d2
cmp +4,d2
bcs do_csum_fewer_than_4
@@ -66,20 +63,20 @@ do_csum_now_4b_aligned:
do_csum_loop:
mov (a0+),d0
- add d0,d1
mov (a0+),e0
- addc e0,d1
mov (a0+),e1
- addc e1,d1
mov (a0+),e3
+ add d0,d1
+ addc e0,d1
+ addc e1,d1
addc e3,d1
mov (a0+),d0
- addc d0,d1
mov (a0+),e0
- addc e0,d1
mov (a0+),e1
- addc e1,d1
mov (a0+),e3
+ addc d0,d1
+ addc e0,d1
+ addc e1,d1
addc e3,d1
addc +0,d1
@@ -94,12 +91,12 @@ do_csum_remainder:
cmp +16,d2
bcs do_csum_fewer_than_16
mov (a0+),d0
- add d0,d1
mov (a0+),e0
- addc e0,d1
mov (a0+),e1
- addc e1,d1
mov (a0+),e3
+ add d0,d1
+ addc e0,d1
+ addc e1,d1
addc e3,d1
addc +0,d1
add -16,d2
@@ -131,9 +128,9 @@ do_csum_fewer_than_4:
xor_cmp d0,d0,+2,d2
bcs do_csum_fewer_than_2
movhu (a0+),d0
-do_csum_fewer_than_2:
and +1,d2
beq do_csum_add_last_bit
+do_csum_fewer_than_2:
movbu (a0),d3
add d3,d0
do_csum_add_last_bit:
@@ -142,21 +139,19 @@ do_csum_add_last_bit:
do_csum_done:
# compress the checksum down to 16 bits
- mov +0xffff0000,d2
- and d1,d2
+ mov +0xffff0000,d0
+ and d1,d0
asl +16,d1
- add d2,d1,d0
+ add d1,d0
addc +0xffff,d0
lsr +16,d0
# flip the halves of the word result if the buffer was oddly aligned
- mov (12,sp),d1
- and +1,d1
+ and +1,a1
beq do_csum_not_oddly_aligned
swaph d0,d0 # exchange bits 15:8 with 7:0
do_csum_not_oddly_aligned:
ret [d2,d3],8
-do_csum_end:
- .size do_csum, do_csum_end-do_csum
+ .size do_csum, .-do_csum
diff --git a/arch/mn10300/mm/Kconfig.cache b/arch/mn10300/mm/Kconfig.cache
new file mode 100644
index 000000000000..c4fd923a55a0
--- /dev/null
+++ b/arch/mn10300/mm/Kconfig.cache
@@ -0,0 +1,101 @@
+#
+# MN10300 CPU cache options
+#
+
+choice
+ prompt "CPU Caching mode"
+ default MN10300_CACHE_WBACK
+ help
+ This option determines the caching mode for the kernel.
+
+ Write-Back caching mode involves the all reads and writes causing
+ the affected cacheline to be read into the cache first before being
+ operated upon. Memory is not then updated by a write until the cache
+ is filled and a cacheline needs to be displaced from the cache to
+ make room. Only at that point is it written back.
+
+ Write-Through caching only fetches cachelines from memory on a
+ read. Writes always get written directly to memory. If the affected
+ cacheline is also in cache, it will be updated too.
+
+ The final option is to turn of caching entirely.
+
+config MN10300_CACHE_WBACK
+ bool "Write-Back"
+ help
+ The dcache operates in delayed write-back mode. It must be manually
+ flushed if writes are made that subsequently need to be executed or
+ to be DMA'd by a device.
+
+config MN10300_CACHE_WTHRU
+ bool "Write-Through"
+ help
+ The dcache operates in immediate write-through mode. Writes are
+ committed to RAM immediately in addition to being stored in the
+ cache. This means that the written data is immediately available for
+ execution or DMA.
+
+ This is not available for use with an SMP kernel if cache flushing
+ and invalidation by automatic purge register is not selected.
+
+config MN10300_CACHE_DISABLED
+ bool "Disabled"
+ help
+ The icache and dcache are disabled.
+
+endchoice
+
+config MN10300_CACHE_ENABLED
+ def_bool y if !MN10300_CACHE_DISABLED
+
+
+choice
+ prompt "CPU cache flush/invalidate method"
+ default MN10300_CACHE_MANAGE_BY_TAG if !AM34_2
+ default MN10300_CACHE_MANAGE_BY_REG if AM34_2
+ depends on MN10300_CACHE_ENABLED
+ help
+ This determines the method by which CPU cache flushing and
+ invalidation is performed.
+
+config MN10300_CACHE_MANAGE_BY_TAG
+ bool "Use the cache tag registers directly"
+ depends on !(SMP && MN10300_CACHE_WTHRU)
+
+config MN10300_CACHE_MANAGE_BY_REG
+ bool "Flush areas by way of automatic purge registers (AM34 only)"
+ depends on AM34_2
+
+endchoice
+
+config MN10300_CACHE_INV_BY_TAG
+ def_bool y if MN10300_CACHE_MANAGE_BY_TAG && MN10300_CACHE_ENABLED
+
+config MN10300_CACHE_INV_BY_REG
+ def_bool y if MN10300_CACHE_MANAGE_BY_REG && MN10300_CACHE_ENABLED
+
+config MN10300_CACHE_FLUSH_BY_TAG
+ def_bool y if MN10300_CACHE_MANAGE_BY_TAG && MN10300_CACHE_WBACK
+
+config MN10300_CACHE_FLUSH_BY_REG
+ def_bool y if MN10300_CACHE_MANAGE_BY_REG && MN10300_CACHE_WBACK
+
+
+config MN10300_HAS_CACHE_SNOOP
+ def_bool n
+
+config MN10300_CACHE_SNOOP
+ bool "Use CPU Cache Snooping"
+ depends on MN10300_CACHE_ENABLED && MN10300_HAS_CACHE_SNOOP
+ default y
+
+config MN10300_CACHE_FLUSH_ICACHE
+ def_bool y if MN10300_CACHE_WBACK && !MN10300_CACHE_SNOOP
+ help
+ Set if we need the dcache flushing before the icache is invalidated.
+
+config MN10300_CACHE_INV_ICACHE
+ def_bool y if MN10300_CACHE_WTHRU && !MN10300_CACHE_SNOOP
+ help
+ Set if we need the icache to be invalidated, even if the dcache is in
+ write-through mode and doesn't need flushing.
diff --git a/arch/mn10300/mm/Makefile b/arch/mn10300/mm/Makefile
index 1557277fbc5c..203fee23f7d7 100644
--- a/arch/mn10300/mm/Makefile
+++ b/arch/mn10300/mm/Makefile
@@ -2,11 +2,21 @@
# Makefile for the MN10300-specific memory management code
#
-cacheflush-y := cache.o cache-mn10300.o
-cacheflush-$(CONFIG_MN10300_CACHE_WBACK) += cache-flush-mn10300.o
+cache-smp-wback-$(CONFIG_MN10300_CACHE_WBACK) := cache-smp-flush.o
+
+cacheflush-y := cache.o
+cacheflush-$(CONFIG_SMP) += cache-smp.o cache-smp-inv.o $(cache-smp-wback-y)
+cacheflush-$(CONFIG_MN10300_CACHE_INV_ICACHE) += cache-inv-icache.o
+cacheflush-$(CONFIG_MN10300_CACHE_FLUSH_ICACHE) += cache-flush-icache.o
+cacheflush-$(CONFIG_MN10300_CACHE_INV_BY_TAG) += cache-inv-by-tag.o
+cacheflush-$(CONFIG_MN10300_CACHE_INV_BY_REG) += cache-inv-by-reg.o
+cacheflush-$(CONFIG_MN10300_CACHE_FLUSH_BY_TAG) += cache-flush-by-tag.o
+cacheflush-$(CONFIG_MN10300_CACHE_FLUSH_BY_REG) += cache-flush-by-reg.o
cacheflush-$(CONFIG_MN10300_CACHE_DISABLED) := cache-disabled.o
obj-y := \
init.o fault.o pgtable.o extable.o tlb-mn10300.o mmu-context.o \
misalignment.o dma-alloc.o $(cacheflush-y)
+
+obj-$(CONFIG_SMP) += tlb-smp.o
diff --git a/arch/mn10300/mm/cache-flush-by-reg.S b/arch/mn10300/mm/cache-flush-by-reg.S
new file mode 100644
index 000000000000..1dcae0211671
--- /dev/null
+++ b/arch/mn10300/mm/cache-flush-by-reg.S
@@ -0,0 +1,308 @@
+/* MN10300 CPU core caching routines, using indirect regs on cache controller
+ *
+ * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public Licence
+ * as published by the Free Software Foundation; either version
+ * 2 of the Licence, or (at your option) any later version.
+ */
+
+#include <linux/sys.h>
+#include <linux/linkage.h>
+#include <asm/smp.h>
+#include <asm/page.h>
+#include <asm/cache.h>
+#include <asm/irqflags.h>
+
+ .am33_2
+
+#ifndef CONFIG_SMP
+ .globl mn10300_dcache_flush
+ .globl mn10300_dcache_flush_page
+ .globl mn10300_dcache_flush_range
+ .globl mn10300_dcache_flush_range2
+ .globl mn10300_dcache_flush_inv
+ .globl mn10300_dcache_flush_inv_page
+ .globl mn10300_dcache_flush_inv_range
+ .globl mn10300_dcache_flush_inv_range2
+
+mn10300_dcache_flush = mn10300_local_dcache_flush
+mn10300_dcache_flush_page = mn10300_local_dcache_flush_page
+mn10300_dcache_flush_range = mn10300_local_dcache_flush_range
+mn10300_dcache_flush_range2 = mn10300_local_dcache_flush_range2
+mn10300_dcache_flush_inv = mn10300_local_dcache_flush_inv
+mn10300_dcache_flush_inv_page = mn10300_local_dcache_flush_inv_page
+mn10300_dcache_flush_inv_range = mn10300_local_dcache_flush_inv_range
+mn10300_dcache_flush_inv_range2 = mn10300_local_dcache_flush_inv_range2
+
+#endif /* !CONFIG_SMP */
+
+###############################################################################
+#
+# void mn10300_local_dcache_flush(void)
+# Flush the entire data cache back to RAM
+#
+###############################################################################
+ ALIGN
+ .globl mn10300_local_dcache_flush
+ .type mn10300_local_dcache_flush,@function
+mn10300_local_dcache_flush:
+ movhu (CHCTR),d0
+ btst CHCTR_DCEN,d0
+ beq mn10300_local_dcache_flush_end
+
+ mov DCPGCR,a0
+
+ LOCAL_CLI_SAVE(d1)
+
+ # wait for busy bit of area purge
+ setlb
+ mov (a0),d0
+ btst DCPGCR_DCPGBSY,d0
+ lne
+
+ # set mask
+ clr d0
+ mov d0,(DCPGMR)
+
+ # area purge
+ #
+ # DCPGCR = DCPGCR_DCP
+ #
+ mov DCPGCR_DCP,d0
+ mov d0,(a0)
+
+ # wait for busy bit of area purge
+ setlb
+ mov (a0),d0
+ btst DCPGCR_DCPGBSY,d0
+ lne
+
+ LOCAL_IRQ_RESTORE(d1)
+
+mn10300_local_dcache_flush_end:
+ ret [],0
+ .size mn10300_local_dcache_flush,.-mn10300_local_dcache_flush
+
+###############################################################################
+#
+# void mn10300_local_dcache_flush_page(unsigned long start)
+# void mn10300_local_dcache_flush_range(unsigned long start, unsigned long end)
+# void mn10300_local_dcache_flush_range2(unsigned long start, unsigned long size)
+# Flush a range of addresses on a page in the dcache
+#
+###############################################################################
+ ALIGN
+ .globl mn10300_local_dcache_flush_page
+ .globl mn10300_local_dcache_flush_range
+ .globl mn10300_local_dcache_flush_range2
+ .type mn10300_local_dcache_flush_page,@function
+ .type mn10300_local_dcache_flush_range,@function
+ .type mn10300_local_dcache_flush_range2,@function
+mn10300_local_dcache_flush_page:
+ and ~(PAGE_SIZE-1),d0
+ mov PAGE_SIZE,d1
+mn10300_local_dcache_flush_range2:
+ add d0,d1
+mn10300_local_dcache_flush_range:
+ movm [d2,d3,a2],(sp)
+
+ movhu (CHCTR),d2
+ btst CHCTR_DCEN,d2
+ beq mn10300_local_dcache_flush_range_end
+
+ # calculate alignsize
+ #
+ # alignsize = L1_CACHE_BYTES;
+ # for (i = (end - start - 1) / L1_CACHE_BYTES ; i > 0; i >>= 1)
+ # alignsize <<= 1;
+ # d2 = alignsize;
+ #
+ mov L1_CACHE_BYTES,d2
+ sub d0,d1,d3
+ add -1,d3
+ lsr L1_CACHE_SHIFT,d3
+ beq 2f
+1:
+ add d2,d2
+ lsr 1,d3
+ bne 1b
+2:
+ mov d1,a1 # a1 = end
+
+ LOCAL_CLI_SAVE(d3)
+ mov DCPGCR,a0
+
+ # wait for busy bit of area purge
+ setlb
+ mov (a0),d1
+ btst DCPGCR_DCPGBSY,d1
+ lne
+
+ # determine the mask
+ mov d2,d1
+ add -1,d1
+ not d1 # d1 = mask = ~(alignsize-1)
+ mov d1,(DCPGMR)
+
+ and d1,d0,a2 # a2 = mask & start
+
+dcpgloop:
+ # area purge
+ mov a2,d0
+ or DCPGCR_DCP,d0
+ mov d0,(a0) # DCPGCR = (mask & start) | DCPGCR_DCP
+
+ # wait for busy bit of area purge
+ setlb
+ mov (a0),d1
+ btst DCPGCR_DCPGBSY,d1
+ lne
+
+ # check purge of end address
+ add d2,a2 # a2 += alignsize
+ cmp a1,a2 # if (a2 < end) goto dcpgloop
+ bns dcpgloop
+
+ LOCAL_IRQ_RESTORE(d3)
+
+mn10300_local_dcache_flush_range_end:
+ ret [d2,d3,a2],12
+
+ .size mn10300_local_dcache_flush_page,.-mn10300_local_dcache_flush_page
+ .size mn10300_local_dcache_flush_range,.-mn10300_local_dcache_flush_range
+ .size mn10300_local_dcache_flush_range2,.-mn10300_local_dcache_flush_range2
+
+###############################################################################
+#
+# void mn10300_local_dcache_flush_inv(void)
+# Flush the entire data cache and invalidate all entries
+#
+###############################################################################
+ ALIGN
+ .globl mn10300_local_dcache_flush_inv
+ .type mn10300_local_dcache_flush_inv,@function
+mn10300_local_dcache_flush_inv:
+ movhu (CHCTR),d0
+ btst CHCTR_DCEN,d0
+ beq mn10300_local_dcache_flush_inv_end
+
+ mov DCPGCR,a0
+
+ LOCAL_CLI_SAVE(d1)
+
+ # wait for busy bit of area purge & invalidate
+ setlb
+ mov (a0),d0
+ btst DCPGCR_DCPGBSY,d0
+ lne
+
+ # set the mask to cover everything
+ clr d0
+ mov d0,(DCPGMR)
+
+ # area purge & invalidate
+ mov DCPGCR_DCP|DCPGCR_DCI,d0
+ mov d0,(a0)
+
+ # wait for busy bit of area purge & invalidate
+ setlb
+ mov (a0),d0
+ btst DCPGCR_DCPGBSY,d0
+ lne
+
+ LOCAL_IRQ_RESTORE(d1)
+
+mn10300_local_dcache_flush_inv_end:
+ ret [],0
+ .size mn10300_local_dcache_flush_inv,.-mn10300_local_dcache_flush_inv
+
+###############################################################################
+#
+# void mn10300_local_dcache_flush_inv_page(unsigned long start)
+# void mn10300_local_dcache_flush_inv_range(unsigned long start, unsigned long end)
+# void mn10300_local_dcache_flush_inv_range2(unsigned long start, unsigned long size)
+# Flush and invalidate a range of addresses on a page in the dcache
+#
+###############################################################################
+ ALIGN
+ .globl mn10300_local_dcache_flush_inv_page
+ .globl mn10300_local_dcache_flush_inv_range
+ .globl mn10300_local_dcache_flush_inv_range2
+ .type mn10300_local_dcache_flush_inv_page,@function
+ .type mn10300_local_dcache_flush_inv_range,@function
+ .type mn10300_local_dcache_flush_inv_range2,@function
+mn10300_local_dcache_flush_inv_page:
+ and ~(PAGE_SIZE-1),d0
+ mov PAGE_SIZE,d1
+mn10300_local_dcache_flush_inv_range2:
+ add d0,d1
+mn10300_local_dcache_flush_inv_range:
+ movm [d2,d3,a2],(sp)
+
+ movhu (CHCTR),d2
+ btst CHCTR_DCEN,d2
+ beq mn10300_local_dcache_flush_inv_range_end
+
+ # calculate alignsize
+ #
+ # alignsize = L1_CACHE_BYTES;
+ # for (i = (end - start - 1) / L1_CACHE_BYTES; i > 0; i >>= 1)
+ # alignsize <<= 1;
+ # d2 = alignsize
+ #
+ mov L1_CACHE_BYTES,d2
+ sub d0,d1,d3
+ add -1,d3
+ lsr L1_CACHE_SHIFT,d3
+ beq 2f
+1:
+ add d2,d2
+ lsr 1,d3
+ bne 1b
+2:
+ mov d1,a1 # a1 = end
+
+ LOCAL_CLI_SAVE(d3)
+ mov DCPGCR,a0
+
+ # wait for busy bit of area purge & invalidate
+ setlb
+ mov (a0),d1
+ btst DCPGCR_DCPGBSY,d1
+ lne
+
+ # set the mask
+ mov d2,d1
+ add -1,d1
+ not d1 # d1 = mask = ~(alignsize-1)
+ mov d1,(DCPGMR)
+
+ and d1,d0,a2 # a2 = mask & start
+
+dcpgivloop:
+ # area purge & invalidate
+ mov a2,d0
+ or DCPGCR_DCP|DCPGCR_DCI,d0
+ mov d0,(a0) # DCPGCR = (mask & start)|DCPGCR_DCP|DCPGCR_DCI
+
+ # wait for busy bit of area purge & invalidate
+ setlb
+ mov (a0),d1
+ btst DCPGCR_DCPGBSY,d1
+ lne
+
+ # check purge & invalidate of end address
+ add d2,a2 # a2 += alignsize
+ cmp a1,a2 # if (a2 < end) goto dcpgivloop
+ bns dcpgivloop
+
+ LOCAL_IRQ_RESTORE(d3)
+
+mn10300_local_dcache_flush_inv_range_end:
+ ret [d2,d3,a2],12
+ .size mn10300_local_dcache_flush_inv_page,.-mn10300_local_dcache_flush_inv_page
+ .size mn10300_local_dcache_flush_inv_range,.-mn10300_local_dcache_flush_inv_range
+ .size mn10300_local_dcache_flush_inv_range2,.-mn10300_local_dcache_flush_inv_range2
diff --git a/arch/mn10300/mm/cache-flush-by-tag.S b/arch/mn10300/mm/cache-flush-by-tag.S
new file mode 100644
index 000000000000..5cd6a27dd63e
--- /dev/null
+++ b/arch/mn10300/mm/cache-flush-by-tag.S
@@ -0,0 +1,251 @@
+/* MN10300 CPU core caching routines, using direct tag flushing
+ *
+ * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public Licence
+ * as published by the Free Software Foundation; either version
+ * 2 of the Licence, or (at your option) any later version.
+ */
+
+#include <linux/sys.h>
+#include <linux/linkage.h>
+#include <asm/smp.h>
+#include <asm/page.h>
+#include <asm/cache.h>
+#include <asm/irqflags.h>
+
+ .am33_2
+
+#ifndef CONFIG_SMP
+ .globl mn10300_dcache_flush
+ .globl mn10300_dcache_flush_page
+ .globl mn10300_dcache_flush_range
+ .globl mn10300_dcache_flush_range2
+ .globl mn10300_dcache_flush_inv
+ .globl mn10300_dcache_flush_inv_page
+ .globl mn10300_dcache_flush_inv_range
+ .globl mn10300_dcache_flush_inv_range2
+
+mn10300_dcache_flush = mn10300_local_dcache_flush
+mn10300_dcache_flush_page = mn10300_local_dcache_flush_page
+mn10300_dcache_flush_range = mn10300_local_dcache_flush_range
+mn10300_dcache_flush_range2 = mn10300_local_dcache_flush_range2
+mn10300_dcache_flush_inv = mn10300_local_dcache_flush_inv
+mn10300_dcache_flush_inv_page = mn10300_local_dcache_flush_inv_page
+mn10300_dcache_flush_inv_range = mn10300_local_dcache_flush_inv_range
+mn10300_dcache_flush_inv_range2 = mn10300_local_dcache_flush_inv_range2
+
+#endif /* !CONFIG_SMP */
+
+###############################################################################
+#
+# void mn10300_local_dcache_flush(void)
+# Flush the entire data cache back to RAM
+#
+###############################################################################
+ ALIGN
+ .globl mn10300_local_dcache_flush
+ .type mn10300_local_dcache_flush,@function
+mn10300_local_dcache_flush:
+ movhu (CHCTR),d0
+ btst CHCTR_DCEN,d0
+ beq mn10300_local_dcache_flush_end
+
+ # read the addresses tagged in the cache's tag RAM and attempt to flush
+ # those addresses specifically
+ # - we rely on the hardware to filter out invalid tag entry addresses
+ mov DCACHE_TAG(0,0),a0 # dcache tag RAM access address
+ mov DCACHE_PURGE(0,0),a1 # dcache purge request address
+ mov L1_CACHE_NWAYS*L1_CACHE_NENTRIES,d1 # total number of entries
+
+mn10300_local_dcache_flush_loop:
+ mov (a0),d0
+ and L1_CACHE_TAG_ADDRESS|L1_CACHE_TAG_ENTRY,d0
+ or L1_CACHE_TAG_VALID,d0 # retain valid entries in the
+ # cache
+ mov d0,(a1) # conditional purge
+
+ add L1_CACHE_BYTES,a0
+ add L1_CACHE_BYTES,a1
+ add -1,d1
+ bne mn10300_local_dcache_flush_loop
+
+mn10300_local_dcache_flush_end:
+ ret [],0
+ .size mn10300_local_dcache_flush,.-mn10300_local_dcache_flush
+
+###############################################################################
+#
+# void mn10300_local_dcache_flush_page(unsigned long start)
+# void mn10300_local_dcache_flush_range(unsigned long start, unsigned long end)
+# void mn10300_local_dcache_flush_range2(unsigned long start, unsigned long size)
+# Flush a range of addresses on a page in the dcache
+#
+###############################################################################
+ ALIGN
+ .globl mn10300_local_dcache_flush_page
+ .globl mn10300_local_dcache_flush_range
+ .globl mn10300_local_dcache_flush_range2
+ .type mn10300_local_dcache_flush_page,@function
+ .type mn10300_local_dcache_flush_range,@function
+ .type mn10300_local_dcache_flush_range2,@function
+mn10300_local_dcache_flush_page:
+ and ~(PAGE_SIZE-1),d0
+ mov PAGE_SIZE,d1
+mn10300_local_dcache_flush_range2:
+ add d0,d1
+mn10300_local_dcache_flush_range:
+ movm [d2],(sp)
+
+ movhu (CHCTR),d2
+ btst CHCTR_DCEN,d2
+ beq mn10300_local_dcache_flush_range_end
+
+ sub d0,d1,a0
+ cmp MN10300_DCACHE_FLUSH_BORDER,a0
+ ble 1f
+
+ movm (sp),[d2]
+ bra mn10300_local_dcache_flush
+1:
+
+ # round start addr down
+ and L1_CACHE_TAG_ADDRESS|L1_CACHE_TAG_ENTRY,d0
+ mov d0,a1
+
+ add L1_CACHE_BYTES,d1 # round end addr up
+ and L1_CACHE_TAG_ADDRESS|L1_CACHE_TAG_ENTRY,d1
+
+ # write a request to flush all instances of an address from the cache
+ mov DCACHE_PURGE(0,0),a0
+ mov a1,d0
+ and L1_CACHE_TAG_ENTRY,d0
+ add d0,a0 # starting dcache purge control
+ # reg address
+
+ sub a1,d1
+ lsr L1_CACHE_SHIFT,d1 # total number of entries to
+ # examine
+
+ or L1_CACHE_TAG_VALID,a1 # retain valid entries in the
+ # cache
+
+mn10300_local_dcache_flush_range_loop:
+ mov a1,(L1_CACHE_WAYDISP*0,a0) # conditionally purge this line
+ # all ways
+
+ add L1_CACHE_BYTES,a0
+ add L1_CACHE_BYTES,a1
+ and ~L1_CACHE_WAYDISP,a0 # make sure way stay on way 0
+ add -1,d1
+ bne mn10300_local_dcache_flush_range_loop
+
+mn10300_local_dcache_flush_range_end:
+ ret [d2],4
+
+ .size mn10300_local_dcache_flush_page,.-mn10300_local_dcache_flush_page
+ .size mn10300_local_dcache_flush_range,.-mn10300_local_dcache_flush_range
+ .size mn10300_local_dcache_flush_range2,.-mn10300_local_dcache_flush_range2
+
+###############################################################################
+#
+# void mn10300_local_dcache_flush_inv(void)
+# Flush the entire data cache and invalidate all entries
+#
+###############################################################################
+ ALIGN
+ .globl mn10300_local_dcache_flush_inv
+ .type mn10300_local_dcache_flush_inv,@function
+mn10300_local_dcache_flush_inv:
+ movhu (CHCTR),d0
+ btst CHCTR_DCEN,d0
+ beq mn10300_local_dcache_flush_inv_end
+
+ mov L1_CACHE_NENTRIES,d1
+ clr a1
+
+mn10300_local_dcache_flush_inv_loop:
+ mov (DCACHE_PURGE_WAY0(0),a1),d0 # unconditional purge
+ mov (DCACHE_PURGE_WAY1(0),a1),d0 # unconditional purge
+ mov (DCACHE_PURGE_WAY2(0),a1),d0 # unconditional purge
+ mov (DCACHE_PURGE_WAY3(0),a1),d0 # unconditional purge
+
+ add L1_CACHE_BYTES,a1
+ add -1,d1
+ bne mn10300_local_dcache_flush_inv_loop
+
+mn10300_local_dcache_flush_inv_end:
+ ret [],0
+ .size mn10300_local_dcache_flush_inv,.-mn10300_local_dcache_flush_inv
+
+###############################################################################
+#
+# void mn10300_local_dcache_flush_inv_page(unsigned long start)
+# void mn10300_local_dcache_flush_inv_range(unsigned long start, unsigned long end)
+# void mn10300_local_dcache_flush_inv_range2(unsigned long start, unsigned long size)
+# Flush and invalidate a range of addresses on a page in the dcache
+#
+###############################################################################
+ ALIGN
+ .globl mn10300_local_dcache_flush_inv_page
+ .globl mn10300_local_dcache_flush_inv_range
+ .globl mn10300_local_dcache_flush_inv_range2
+ .type mn10300_local_dcache_flush_inv_page,@function
+ .type mn10300_local_dcache_flush_inv_range,@function
+ .type mn10300_local_dcache_flush_inv_range2,@function
+mn10300_local_dcache_flush_inv_page:
+ and ~(PAGE_SIZE-1),d0
+ mov PAGE_SIZE,d1
+mn10300_local_dcache_flush_inv_range2:
+ add d0,d1
+mn10300_local_dcache_flush_inv_range:
+ movm [d2],(sp)
+
+ movhu (CHCTR),d2
+ btst CHCTR_DCEN,d2
+ beq mn10300_local_dcache_flush_inv_range_end
+
+ sub d0,d1,a0
+ cmp MN10300_DCACHE_FLUSH_INV_BORDER,a0
+ ble 1f
+
+ movm (sp),[d2]
+ bra mn10300_local_dcache_flush_inv
+1:
+
+ and L1_CACHE_TAG_ADDRESS|L1_CACHE_TAG_ENTRY,d0 # round start
+ # addr down
+ mov d0,a1
+
+ add L1_CACHE_BYTES,d1 # round end addr up
+ and L1_CACHE_TAG_ADDRESS|L1_CACHE_TAG_ENTRY,d1
+
+ # write a request to flush and invalidate all instances of an address
+ # from the cache
+ mov DCACHE_PURGE(0,0),a0
+ mov a1,d0
+ and L1_CACHE_TAG_ENTRY,d0
+ add d0,a0 # starting dcache purge control
+ # reg address
+
+ sub a1,d1
+ lsr L1_CACHE_SHIFT,d1 # total number of entries to
+ # examine
+
+mn10300_local_dcache_flush_inv_range_loop:
+ mov a1,(L1_CACHE_WAYDISP*0,a0) # conditionally purge this line
+ # in all ways
+
+ add L1_CACHE_BYTES,a0
+ add L1_CACHE_BYTES,a1
+ and ~L1_CACHE_WAYDISP,a0 # make sure way stay on way 0
+ add -1,d1
+ bne mn10300_local_dcache_flush_inv_range_loop
+
+mn10300_local_dcache_flush_inv_range_end:
+ ret [d2],4
+ .size mn10300_local_dcache_flush_inv_page,.-mn10300_local_dcache_flush_inv_page
+ .size mn10300_local_dcache_flush_inv_range,.-mn10300_local_dcache_flush_inv_range
+ .size mn10300_local_dcache_flush_inv_range2,.-mn10300_local_dcache_flush_inv_range2
diff --git a/arch/mn10300/mm/cache-flush-icache.c b/arch/mn10300/mm/cache-flush-icache.c
new file mode 100644
index 000000000000..fdb1a9db20f0
--- /dev/null
+++ b/arch/mn10300/mm/cache-flush-icache.c
@@ -0,0 +1,155 @@
+/* Flush dcache and invalidate icache when the dcache is in writeback mode
+ *
+ * Copyright (C) 2010 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public Licence
+ * as published by the Free Software Foundation; either version
+ * 2 of the Licence, or (at your option) any later version.
+ */
+#include <linux/module.h>
+#include <linux/mm.h>
+#include <asm/cacheflush.h>
+#include <asm/smp.h>
+#include "cache-smp.h"
+
+/**
+ * flush_icache_page - Flush a page from the dcache and invalidate the icache
+ * @vma: The VMA the page is part of.
+ * @page: The page to be flushed.
+ *
+ * Write a page back from the dcache and invalidate the icache so that we can
+ * run code from it that we've just written into it
+ */
+void flush_icache_page(struct vm_area_struct *vma, struct page *page)
+{
+ unsigned long start = page_to_phys(page);
+ unsigned long flags;
+
+ flags = smp_lock_cache();
+
+ mn10300_local_dcache_flush_page(start);
+ mn10300_local_icache_inv_page(start);
+
+ smp_cache_call(SMP_IDCACHE_INV_FLUSH_RANGE, start, start + PAGE_SIZE);
+ smp_unlock_cache(flags);
+}
+EXPORT_SYMBOL(flush_icache_page);
+
+/**
+ * flush_icache_page_range - Flush dcache and invalidate icache for part of a
+ * single page
+ * @start: The starting virtual address of the page part.
+ * @end: The ending virtual address of the page part.
+ *
+ * Flush the dcache and invalidate the icache for part of a single page, as
+ * determined by the virtual addresses given. The page must be in the paged
+ * area.
+ */
+static void flush_icache_page_range(unsigned long start, unsigned long end)
+{
+ unsigned long addr, size, off;
+ struct page *page;
+ pgd_t *pgd;
+ pud_t *pud;
+ pmd_t *pmd;
+ pte_t *ppte, pte;
+
+ /* work out how much of the page to flush */
+ off = start & ~PAGE_MASK;
+ size = end - start;
+
+ /* get the physical address the page is mapped to from the page
+ * tables */
+ pgd = pgd_offset(current->mm, start);
+ if (!pgd || !pgd_val(*pgd))
+ return;
+
+ pud = pud_offset(pgd, start);
+ if (!pud || !pud_val(*pud))
+ return;
+
+ pmd = pmd_offset(pud, start);
+ if (!pmd || !pmd_val(*pmd))
+ return;
+
+ ppte = pte_offset_map(pmd, start);
+ if (!ppte)
+ return;
+ pte = *ppte;
+ pte_unmap(ppte);
+
+ if (pte_none(pte))
+ return;
+
+ page = pte_page(pte);
+ if (!page)
+ return;
+
+ addr = page_to_phys(page);
+
+ /* flush the dcache and invalidate the icache coverage on that
+ * region */
+ mn10300_local_dcache_flush_range2(addr + off, size);
+ mn10300_local_icache_inv_range2(addr + off, size);
+ smp_cache_call(SMP_IDCACHE_INV_FLUSH_RANGE, start, end);
+}
+
+/**
+ * flush_icache_range - Globally flush dcache and invalidate icache for region
+ * @start: The starting virtual address of the region.
+ * @end: The ending virtual address of the region.
+ *
+ * This is used by the kernel to globally flush some code it has just written
+ * from the dcache back to RAM and then to globally invalidate the icache over
+ * that region so that that code can be run on all CPUs in the system.
+ */
+void flush_icache_range(unsigned long start, unsigned long end)
+{
+ unsigned long start_page, end_page;
+ unsigned long flags;
+
+ flags = smp_lock_cache();
+
+ if (end > 0x80000000UL) {
+ /* addresses above 0xa0000000 do not go through the cache */
+ if (end > 0xa0000000UL) {
+ end = 0xa0000000UL;
+ if (start >= end)
+ goto done;
+ }
+
+ /* kernel addresses between 0x80000000 and 0x9fffffff do not
+ * require page tables, so we just map such addresses
+ * directly */
+ start_page = (start >= 0x80000000UL) ? start : 0x80000000UL;
+ mn10300_local_dcache_flush_range(start_page, end);
+ mn10300_local_icache_inv_range(start_page, end);
+ smp_cache_call(SMP_IDCACHE_INV_FLUSH_RANGE, start_page, end);
+ if (start_page == start)
+ goto done;
+ end = start_page;
+ }
+
+ start_page = start & PAGE_MASK;
+ end_page = (end - 1) & PAGE_MASK;
+
+ if (start_page == end_page) {
+ /* the first and last bytes are on the same page */
+ flush_icache_page_range(start, end);
+ } else if (start_page + 1 == end_page) {
+ /* split over two virtually contiguous pages */
+ flush_icache_page_range(start, end_page);
+ flush_icache_page_range(end_page, end);
+ } else {
+ /* more than 2 pages; just flush the entire cache */
+ mn10300_dcache_flush();
+ mn10300_icache_inv();
+ smp_cache_call(SMP_IDCACHE_INV_FLUSH, 0, 0);
+ }
+
+done:
+ smp_unlock_cache(flags);
+}
+EXPORT_SYMBOL(flush_icache_range);
diff --git a/arch/mn10300/mm/cache-flush-mn10300.S b/arch/mn10300/mm/cache-flush-mn10300.S
deleted file mode 100644
index c8ed1cbac107..000000000000
--- a/arch/mn10300/mm/cache-flush-mn10300.S
+++ /dev/null
@@ -1,192 +0,0 @@
-/* MN10300 CPU core caching routines
- *
- * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
- * Written by David Howells (dhowells@redhat.com)
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public Licence
- * as published by the Free Software Foundation; either version
- * 2 of the Licence, or (at your option) any later version.
- */
-
-#include <linux/sys.h>
-#include <linux/linkage.h>
-#include <asm/smp.h>
-#include <asm/page.h>
-#include <asm/cache.h>
-
- .am33_2
- .globl mn10300_dcache_flush
- .globl mn10300_dcache_flush_page
- .globl mn10300_dcache_flush_range
- .globl mn10300_dcache_flush_range2
- .globl mn10300_dcache_flush_inv
- .globl mn10300_dcache_flush_inv_page
- .globl mn10300_dcache_flush_inv_range
- .globl mn10300_dcache_flush_inv_range2
-
-###############################################################################
-#
-# void mn10300_dcache_flush(void)
-# Flush the entire data cache back to RAM
-#
-###############################################################################
- ALIGN
-mn10300_dcache_flush:
- movhu (CHCTR),d0
- btst CHCTR_DCEN,d0
- beq mn10300_dcache_flush_end
-
- # read the addresses tagged in the cache's tag RAM and attempt to flush
- # those addresses specifically
- # - we rely on the hardware to filter out invalid tag entry addresses
- mov DCACHE_TAG(0,0),a0 # dcache tag RAM access address
- mov DCACHE_PURGE(0,0),a1 # dcache purge request address
- mov L1_CACHE_NWAYS*L1_CACHE_NENTRIES,d1 # total number of entries
-
-mn10300_dcache_flush_loop:
- mov (a0),d0
- and L1_CACHE_TAG_ADDRESS|L1_CACHE_TAG_ENTRY,d0
- or L1_CACHE_TAG_VALID,d0 # retain valid entries in the
- # cache
- mov d0,(a1) # conditional purge
-
-mn10300_dcache_flush_skip:
- add L1_CACHE_BYTES,a0
- add L1_CACHE_BYTES,a1
- add -1,d1
- bne mn10300_dcache_flush_loop
-
-mn10300_dcache_flush_end:
- ret [],0
-
-###############################################################################
-#
-# void mn10300_dcache_flush_page(unsigned start)
-# void mn10300_dcache_flush_range(unsigned start, unsigned end)
-# void mn10300_dcache_flush_range2(unsigned start, unsigned size)
-# Flush a range of addresses on a page in the dcache
-#
-###############################################################################
- ALIGN
-mn10300_dcache_flush_page:
- mov PAGE_SIZE,d1
-mn10300_dcache_flush_range2:
- add d0,d1
-mn10300_dcache_flush_range:
- movm [d2,d3],(sp)
-
- movhu (CHCTR),d2
- btst CHCTR_DCEN,d2
- beq mn10300_dcache_flush_range_end
-
- # round start addr down
- and L1_CACHE_TAG_ADDRESS|L1_CACHE_TAG_ENTRY,d0
- mov d0,a1
-
- add L1_CACHE_BYTES,d1 # round end addr up
- and L1_CACHE_TAG_ADDRESS|L1_CACHE_TAG_ENTRY,d1
-
- # write a request to flush all instances of an address from the cache
- mov DCACHE_PURGE(0,0),a0
- mov a1,d0
- and L1_CACHE_TAG_ENTRY,d0
- add d0,a0 # starting dcache purge control
- # reg address
-
- sub a1,d1
- lsr L1_CACHE_SHIFT,d1 # total number of entries to
- # examine
-
- or L1_CACHE_TAG_VALID,a1 # retain valid entries in the
- # cache
-
-mn10300_dcache_flush_range_loop:
- mov a1,(L1_CACHE_WAYDISP*0,a0) # conditionally purge this line
- # all ways
-
- add L1_CACHE_BYTES,a0
- add L1_CACHE_BYTES,a1
- and ~L1_CACHE_WAYDISP,a0 # make sure way stay on way 0
- add -1,d1
- bne mn10300_dcache_flush_range_loop
-
-mn10300_dcache_flush_range_end:
- ret [d2,d3],8
-
-###############################################################################
-#
-# void mn10300_dcache_flush_inv(void)
-# Flush the entire data cache and invalidate all entries
-#
-###############################################################################
- ALIGN
-mn10300_dcache_flush_inv:
- movhu (CHCTR),d0
- btst CHCTR_DCEN,d0
- beq mn10300_dcache_flush_inv_end
-
- # hit each line in the dcache with an unconditional purge
- mov DCACHE_PURGE(0,0),a1 # dcache purge request address
- mov L1_CACHE_NWAYS*L1_CACHE_NENTRIES,d1 # total number of entries
-
-mn10300_dcache_flush_inv_loop:
- mov (a1),d0 # unconditional purge
-
- add L1_CACHE_BYTES,a1
- add -1,d1
- bne mn10300_dcache_flush_inv_loop
-
-mn10300_dcache_flush_inv_end:
- ret [],0
-
-###############################################################################
-#
-# void mn10300_dcache_flush_inv_page(unsigned start)
-# void mn10300_dcache_flush_inv_range(unsigned start, unsigned end)
-# void mn10300_dcache_flush_inv_range2(unsigned start, unsigned size)
-# Flush and invalidate a range of addresses on a page in the dcache
-#
-###############################################################################
- ALIGN
-mn10300_dcache_flush_inv_page:
- mov PAGE_SIZE,d1
-mn10300_dcache_flush_inv_range2:
- add d0,d1
-mn10300_dcache_flush_inv_range:
- movm [d2,d3],(sp)
- movhu (CHCTR),d2
- btst CHCTR_DCEN,d2
- beq mn10300_dcache_flush_inv_range_end
-
- and L1_CACHE_TAG_ADDRESS|L1_CACHE_TAG_ENTRY,d0 # round start
- # addr down
- mov d0,a1
-
- add L1_CACHE_BYTES,d1 # round end addr up
- and L1_CACHE_TAG_ADDRESS|L1_CACHE_TAG_ENTRY,d1
-
- # write a request to flush and invalidate all instances of an address
- # from the cache
- mov DCACHE_PURGE(0,0),a0
- mov a1,d0
- and L1_CACHE_TAG_ENTRY,d0
- add d0,a0 # starting dcache purge control
- # reg address
-
- sub a1,d1
- lsr L1_CACHE_SHIFT,d1 # total number of entries to
- # examine
-
-mn10300_dcache_flush_inv_range_loop:
- mov a1,(L1_CACHE_WAYDISP*0,a0) # conditionally purge this line
- # in all ways
-
- add L1_CACHE_BYTES,a0
- add L1_CACHE_BYTES,a1
- and ~L1_CACHE_WAYDISP,a0 # make sure way stay on way 0
- add -1,d1
- bne mn10300_dcache_flush_inv_range_loop
-
-mn10300_dcache_flush_inv_range_end:
- ret [d2,d3],8
diff --git a/arch/mn10300/mm/cache-inv-by-reg.S b/arch/mn10300/mm/cache-inv-by-reg.S
new file mode 100644
index 000000000000..c8950861ed77
--- /dev/null
+++ b/arch/mn10300/mm/cache-inv-by-reg.S
@@ -0,0 +1,356 @@
+/* MN10300 CPU cache invalidation routines, using automatic purge registers
+ *
+ * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public Licence
+ * as published by the Free Software Foundation; either version
+ * 2 of the Licence, or (at your option) any later version.
+ */
+#include <linux/sys.h>
+#include <linux/linkage.h>
+#include <asm/smp.h>
+#include <asm/page.h>
+#include <asm/cache.h>
+#include <asm/irqflags.h>
+#include <asm/cacheflush.h>
+
+#define mn10300_local_dcache_inv_range_intr_interval \
+ +((1 << MN10300_DCACHE_INV_RANGE_INTR_LOG2_INTERVAL) - 1)
+
+#if mn10300_local_dcache_inv_range_intr_interval > 0xff
+#error MN10300_DCACHE_INV_RANGE_INTR_LOG2_INTERVAL must be 8 or less
+#endif
+
+ .am33_2
+
+#ifndef CONFIG_SMP
+ .globl mn10300_icache_inv
+ .globl mn10300_icache_inv_page
+ .globl mn10300_icache_inv_range
+ .globl mn10300_icache_inv_range2
+ .globl mn10300_dcache_inv
+ .globl mn10300_dcache_inv_page
+ .globl mn10300_dcache_inv_range
+ .globl mn10300_dcache_inv_range2
+
+mn10300_icache_inv = mn10300_local_icache_inv
+mn10300_icache_inv_page = mn10300_local_icache_inv_page
+mn10300_icache_inv_range = mn10300_local_icache_inv_range
+mn10300_icache_inv_range2 = mn10300_local_icache_inv_range2
+mn10300_dcache_inv = mn10300_local_dcache_inv
+mn10300_dcache_inv_page = mn10300_local_dcache_inv_page
+mn10300_dcache_inv_range = mn10300_local_dcache_inv_range
+mn10300_dcache_inv_range2 = mn10300_local_dcache_inv_range2
+
+#endif /* !CONFIG_SMP */
+
+###############################################################################
+#
+# void mn10300_local_icache_inv(void)
+# Invalidate the entire icache
+#
+###############################################################################
+ ALIGN
+ .globl mn10300_local_icache_inv
+ .type mn10300_local_icache_inv,@function
+mn10300_local_icache_inv:
+ mov CHCTR,a0
+
+ movhu (a0),d0
+ btst CHCTR_ICEN,d0
+ beq mn10300_local_icache_inv_end
+
+ # invalidate
+ or CHCTR_ICINV,d0
+ movhu d0,(a0)
+ movhu (a0),d0
+
+mn10300_local_icache_inv_end:
+ ret [],0
+ .size mn10300_local_icache_inv,.-mn10300_local_icache_inv
+
+###############################################################################
+#
+# void mn10300_local_dcache_inv(void)
+# Invalidate the entire dcache
+#
+###############################################################################
+ ALIGN
+ .globl mn10300_local_dcache_inv
+ .type mn10300_local_dcache_inv,@function
+mn10300_local_dcache_inv:
+ mov CHCTR,a0
+
+ movhu (a0),d0
+ btst CHCTR_DCEN,d0
+ beq mn10300_local_dcache_inv_end
+
+ # invalidate
+ or CHCTR_DCINV,d0
+ movhu d0,(a0)
+ movhu (a0),d0
+
+mn10300_local_dcache_inv_end:
+ ret [],0
+ .size mn10300_local_dcache_inv,.-mn10300_local_dcache_inv
+
+###############################################################################
+#
+# void mn10300_local_dcache_inv_range(unsigned long start, unsigned long end)
+# void mn10300_local_dcache_inv_range2(unsigned long start, unsigned long size)
+# void mn10300_local_dcache_inv_page(unsigned long start)
+# Invalidate a range of addresses on a page in the dcache
+#
+###############################################################################
+ ALIGN
+ .globl mn10300_local_dcache_inv_page
+ .globl mn10300_local_dcache_inv_range
+ .globl mn10300_local_dcache_inv_range2
+ .type mn10300_local_dcache_inv_page,@function
+ .type mn10300_local_dcache_inv_range,@function
+ .type mn10300_local_dcache_inv_range2,@function
+mn10300_local_dcache_inv_page:
+ and ~(PAGE_SIZE-1),d0
+ mov PAGE_SIZE,d1
+mn10300_local_dcache_inv_range2:
+ add d0,d1
+mn10300_local_dcache_inv_range:
+ # If we are in writeback mode we check the start and end alignments,
+ # and if they're not cacheline-aligned, we must flush any bits outside
+ # the range that share cachelines with stuff inside the range
+#ifdef CONFIG_MN10300_CACHE_WBACK
+ btst ~(L1_CACHE_BYTES-1),d0
+ bne 1f
+ btst ~(L1_CACHE_BYTES-1),d1
+ beq 2f
+1:
+ bra mn10300_local_dcache_flush_inv_range
+2:
+#endif /* CONFIG_MN10300_CACHE_WBACK */
+
+ movm [d2,d3,a2],(sp)
+
+ mov CHCTR,a0
+ movhu (a0),d2
+ btst CHCTR_DCEN,d2
+ beq mn10300_local_dcache_inv_range_end
+
+ # round the addresses out to be full cachelines, unless we're in
+ # writeback mode, in which case we would be in flush and invalidate by
+ # now
+#ifndef CONFIG_MN10300_CACHE_WBACK
+ and L1_CACHE_TAG_ADDRESS|L1_CACHE_TAG_ENTRY,d0 # round start
+ # addr down
+
+ mov L1_CACHE_BYTES-1,d2
+ add d2,d1
+ and L1_CACHE_TAG_ADDRESS|L1_CACHE_TAG_ENTRY,d1 # round end addr up
+#endif /* !CONFIG_MN10300_CACHE_WBACK */
+
+ sub d0,d1,d2 # calculate the total size
+ mov d0,a2 # A2 = start address
+ mov d1,a1 # A1 = end address
+
+ LOCAL_CLI_SAVE(d3)
+
+ mov DCPGCR,a0 # make sure the purger isn't busy
+ setlb
+ mov (a0),d0
+ btst DCPGCR_DCPGBSY,d0
+ lne
+
+ # skip initial address alignment calculation if address is zero
+ mov d2,d1
+ cmp 0,a2
+ beq 1f
+
+dcivloop:
+ /* calculate alignsize
+ *
+ * alignsize = L1_CACHE_BYTES;
+ * while (! start & alignsize) {
+ * alignsize <<=1;
+ * }
+ * d1 = alignsize;
+ */
+ mov L1_CACHE_BYTES,d1
+ lsr 1,d1
+ setlb
+ add d1,d1
+ mov d1,d0
+ and a2,d0
+ leq
+
+1:
+ /* calculate invsize
+ *
+ * if (totalsize > alignsize) {
+ * invsize = alignsize;
+ * } else {
+ * invsize = totalsize;
+ * tmp = 0x80000000;
+ * while (! invsize & tmp) {
+ * tmp >>= 1;
+ * }
+ * invsize = tmp;
+ * }
+ * d1 = invsize
+ */
+ cmp d2,d1
+ bns 2f
+ mov d2,d1
+
+ mov 0x80000000,d0 # start from 31bit=1
+ setlb
+ lsr 1,d0
+ mov d0,e0
+ and d1,e0
+ leq
+ mov d0,d1
+
+2:
+ /* set mask
+ *
+ * mask = ~(invsize-1);
+ * DCPGMR = mask;
+ */
+ mov d1,d0
+ add -1,d0
+ not d0
+ mov d0,(DCPGMR)
+
+ # invalidate area
+ mov a2,d0
+ or DCPGCR_DCI,d0
+ mov d0,(a0) # DCPGCR = (mask & start) | DCPGCR_DCI
+
+ setlb # wait for the purge to complete
+ mov (a0),d0
+ btst DCPGCR_DCPGBSY,d0
+ lne
+
+ sub d1,d2 # decrease size remaining
+ add d1,a2 # increase next start address
+
+ /* check invalidating of end address
+ *
+ * a2 = a2 + invsize
+ * if (a2 < end) {
+ * goto dcivloop;
+ * } */
+ cmp a1,a2
+ bns dcivloop
+
+ LOCAL_IRQ_RESTORE(d3)
+
+mn10300_local_dcache_inv_range_end:
+ ret [d2,d3,a2],12
+ .size mn10300_local_dcache_inv_page,.-mn10300_local_dcache_inv_page
+ .size mn10300_local_dcache_inv_range,.-mn10300_local_dcache_inv_range
+ .size mn10300_local_dcache_inv_range2,.-mn10300_local_dcache_inv_range2
+
+###############################################################################
+#
+# void mn10300_local_icache_inv_page(unsigned long start)
+# void mn10300_local_icache_inv_range2(unsigned long start, unsigned long size)
+# void mn10300_local_icache_inv_range(unsigned long start, unsigned long end)
+# Invalidate a range of addresses on a page in the icache
+#
+###############################################################################
+ ALIGN
+ .globl mn10300_local_icache_inv_page
+ .globl mn10300_local_icache_inv_range
+ .globl mn10300_local_icache_inv_range2
+ .type mn10300_local_icache_inv_page,@function
+ .type mn10300_local_icache_inv_range,@function
+ .type mn10300_local_icache_inv_range2,@function
+mn10300_local_icache_inv_page:
+ and ~(PAGE_SIZE-1),d0
+ mov PAGE_SIZE,d1
+mn10300_local_icache_inv_range2:
+ add d0,d1
+mn10300_local_icache_inv_range:
+ movm [d2,d3,a2],(sp)
+
+ mov CHCTR,a0
+ movhu (a0),d2
+ btst CHCTR_ICEN,d2
+ beq mn10300_local_icache_inv_range_reg_end
+
+ /* calculate alignsize
+ *
+ * alignsize = L1_CACHE_BYTES;
+ * for (i = (end - start - 1) / L1_CACHE_BYTES ; i > 0; i >>= 1) {
+ * alignsize <<= 1;
+ * }
+ * d2 = alignsize;
+ */
+ mov L1_CACHE_BYTES,d2
+ sub d0,d1,d3
+ add -1,d3
+ lsr L1_CACHE_SHIFT,d3
+ beq 2f
+1:
+ add d2,d2
+ lsr 1,d3
+ bne 1b
+2:
+
+ /* a1 = end */
+ mov d1,a1
+
+ LOCAL_CLI_SAVE(d3)
+
+ mov ICIVCR,a0
+ /* wait for busy bit of area invalidation */
+ setlb
+ mov (a0),d1
+ btst ICIVCR_ICIVBSY,d1
+ lne
+
+ /* set mask
+ *
+ * mask = ~(alignsize-1);
+ * ICIVMR = mask;
+ */
+ mov d2,d1
+ add -1,d1
+ not d1
+ mov d1,(ICIVMR)
+ /* a2 = mask & start */
+ and d1,d0,a2
+
+icivloop:
+ /* area invalidate
+ *
+ * ICIVCR = (mask & start) | ICIVCR_ICI
+ */
+ mov a2,d0
+ or ICIVCR_ICI,d0
+ mov d0,(a0)
+
+ /* wait for busy bit of area invalidation */
+ setlb
+ mov (a0),d1
+ btst ICIVCR_ICIVBSY,d1
+ lne
+
+ /* check invalidating of end address
+ *
+ * a2 = a2 + alignsize
+ * if (a2 < end) {
+ * goto icivloop;
+ * } */
+ add d2,a2
+ cmp a1,a2
+ bns icivloop
+
+ LOCAL_IRQ_RESTORE(d3)
+
+mn10300_local_icache_inv_range_reg_end:
+ ret [d2,d3,a2],12
+ .size mn10300_local_icache_inv_page,.-mn10300_local_icache_inv_page
+ .size mn10300_local_icache_inv_range,.-mn10300_local_icache_inv_range
+ .size mn10300_local_icache_inv_range2,.-mn10300_local_icache_inv_range2
diff --git a/arch/mn10300/mm/cache-inv-by-tag.S b/arch/mn10300/mm/cache-inv-by-tag.S
new file mode 100644
index 000000000000..e9713b40c0ff
--- /dev/null
+++ b/arch/mn10300/mm/cache-inv-by-tag.S
@@ -0,0 +1,348 @@
+/* MN10300 CPU core caching routines
+ *
+ * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public Licence
+ * as published by the Free Software Foundation; either version
+ * 2 of the Licence, or (at your option) any later version.
+ */
+#include <linux/sys.h>
+#include <linux/linkage.h>
+#include <asm/smp.h>
+#include <asm/page.h>
+#include <asm/cache.h>
+#include <asm/irqflags.h>
+#include <asm/cacheflush.h>
+
+#define mn10300_local_dcache_inv_range_intr_interval \
+ +((1 << MN10300_DCACHE_INV_RANGE_INTR_LOG2_INTERVAL) - 1)
+
+#if mn10300_local_dcache_inv_range_intr_interval > 0xff
+#error MN10300_DCACHE_INV_RANGE_INTR_LOG2_INTERVAL must be 8 or less
+#endif
+
+ .am33_2
+
+ .globl mn10300_local_icache_inv_page
+ .globl mn10300_local_icache_inv_range
+ .globl mn10300_local_icache_inv_range2
+
+mn10300_local_icache_inv_page = mn10300_local_icache_inv
+mn10300_local_icache_inv_range = mn10300_local_icache_inv
+mn10300_local_icache_inv_range2 = mn10300_local_icache_inv
+
+#ifndef CONFIG_SMP
+ .globl mn10300_icache_inv
+ .globl mn10300_icache_inv_page
+ .globl mn10300_icache_inv_range
+ .globl mn10300_icache_inv_range2
+ .globl mn10300_dcache_inv
+ .globl mn10300_dcache_inv_page
+ .globl mn10300_dcache_inv_range
+ .globl mn10300_dcache_inv_range2
+
+mn10300_icache_inv = mn10300_local_icache_inv
+mn10300_icache_inv_page = mn10300_local_icache_inv_page
+mn10300_icache_inv_range = mn10300_local_icache_inv_range
+mn10300_icache_inv_range2 = mn10300_local_icache_inv_range2
+mn10300_dcache_inv = mn10300_local_dcache_inv
+mn10300_dcache_inv_page = mn10300_local_dcache_inv_page
+mn10300_dcache_inv_range = mn10300_local_dcache_inv_range
+mn10300_dcache_inv_range2 = mn10300_local_dcache_inv_range2
+
+#endif /* !CONFIG_SMP */
+
+###############################################################################
+#
+# void mn10300_local_icache_inv(void)
+# Invalidate the entire icache
+#
+###############################################################################
+ ALIGN
+ .globl mn10300_local_icache_inv
+ .type mn10300_local_icache_inv,@function
+mn10300_local_icache_inv:
+ mov CHCTR,a0
+
+ movhu (a0),d0
+ btst CHCTR_ICEN,d0
+ beq mn10300_local_icache_inv_end
+
+#if defined(CONFIG_AM33_2) || defined(CONFIG_AM33_3)
+ LOCAL_CLI_SAVE(d1)
+
+ # disable the icache
+ and ~CHCTR_ICEN,d0
+ movhu d0,(a0)
+
+ # and wait for it to calm down
+ setlb
+ movhu (a0),d0
+ btst CHCTR_ICBUSY,d0
+ lne
+
+ # invalidate
+ or CHCTR_ICINV,d0
+ movhu d0,(a0)
+
+ # wait for the cache to finish
+ mov CHCTR,a0
+ setlb
+ movhu (a0),d0
+ btst CHCTR_ICBUSY,d0
+ lne
+
+ # and reenable it
+ and ~CHCTR_ICINV,d0
+ or CHCTR_ICEN,d0
+ movhu d0,(a0)
+ movhu (a0),d0
+
+ LOCAL_IRQ_RESTORE(d1)
+#else /* CONFIG_AM33_2 || CONFIG_AM33_3 */
+ # invalidate
+ or CHCTR_ICINV,d0
+ movhu d0,(a0)
+ movhu (a0),d0
+#endif /* CONFIG_AM33_2 || CONFIG_AM33_3 */
+
+mn10300_local_icache_inv_end:
+ ret [],0
+ .size mn10300_local_icache_inv,.-mn10300_local_icache_inv
+
+###############################################################################
+#
+# void mn10300_local_dcache_inv(void)
+# Invalidate the entire dcache
+#
+###############################################################################
+ ALIGN
+ .globl mn10300_local_dcache_inv
+ .type mn10300_local_dcache_inv,@function
+mn10300_local_dcache_inv:
+ mov CHCTR,a0
+
+ movhu (a0),d0
+ btst CHCTR_DCEN,d0
+ beq mn10300_local_dcache_inv_end
+
+#if defined(CONFIG_AM33_2) || defined(CONFIG_AM33_3)
+ LOCAL_CLI_SAVE(d1)
+
+ # disable the dcache
+ and ~CHCTR_DCEN,d0
+ movhu d0,(a0)
+
+ # and wait for it to calm down
+ setlb
+ movhu (a0),d0
+ btst CHCTR_DCBUSY,d0
+ lne
+
+ # invalidate
+ or CHCTR_DCINV,d0
+ movhu d0,(a0)
+
+ # wait for the cache to finish
+ mov CHCTR,a0
+ setlb
+ movhu (a0),d0
+ btst CHCTR_DCBUSY,d0
+ lne
+
+ # and reenable it
+ and ~CHCTR_DCINV,d0
+ or CHCTR_DCEN,d0
+ movhu d0,(a0)
+ movhu (a0),d0
+
+ LOCAL_IRQ_RESTORE(d1)
+#else /* CONFIG_AM33_2 || CONFIG_AM33_3 */
+ # invalidate
+ or CHCTR_DCINV,d0
+ movhu d0,(a0)
+ movhu (a0),d0
+#endif /* CONFIG_AM33_2 || CONFIG_AM33_3 */
+
+mn10300_local_dcache_inv_end:
+ ret [],0
+ .size mn10300_local_dcache_inv,.-mn10300_local_dcache_inv
+
+###############################################################################
+#
+# void mn10300_local_dcache_inv_range(unsigned long start, unsigned long end)
+# void mn10300_local_dcache_inv_range2(unsigned long start, unsigned long size)
+# void mn10300_local_dcache_inv_page(unsigned long start)
+# Invalidate a range of addresses on a page in the dcache
+#
+###############################################################################
+ ALIGN
+ .globl mn10300_local_dcache_inv_page
+ .globl mn10300_local_dcache_inv_range
+ .globl mn10300_local_dcache_inv_range2
+ .type mn10300_local_dcache_inv_page,@function
+ .type mn10300_local_dcache_inv_range,@function
+ .type mn10300_local_dcache_inv_range2,@function
+mn10300_local_dcache_inv_page:
+ and ~(PAGE_SIZE-1),d0
+ mov PAGE_SIZE,d1
+mn10300_local_dcache_inv_range2:
+ add d0,d1
+mn10300_local_dcache_inv_range:
+ # If we are in writeback mode we check the start and end alignments,
+ # and if they're not cacheline-aligned, we must flush any bits outside
+ # the range that share cachelines with stuff inside the range
+#ifdef CONFIG_MN10300_CACHE_WBACK
+ btst ~(L1_CACHE_BYTES-1),d0
+ bne 1f
+ btst ~(L1_CACHE_BYTES-1),d1
+ beq 2f
+1:
+ bra mn10300_local_dcache_flush_inv_range
+2:
+#endif /* CONFIG_MN10300_CACHE_WBACK */
+
+ movm [d2,d3,a2],(sp)
+
+ mov CHCTR,a2
+ movhu (a2),d2
+ btst CHCTR_DCEN,d2
+ beq mn10300_local_dcache_inv_range_end
+
+#ifndef CONFIG_MN10300_CACHE_WBACK
+ and L1_CACHE_TAG_ADDRESS|L1_CACHE_TAG_ENTRY,d0 # round start
+ # addr down
+
+ add L1_CACHE_BYTES,d1 # round end addr up
+ and L1_CACHE_TAG_ADDRESS|L1_CACHE_TAG_ENTRY,d1
+#endif /* !CONFIG_MN10300_CACHE_WBACK */
+ mov d0,a1
+
+ clr d2 # we're going to clear tag RAM
+ # entries
+
+ # read the tags from the tag RAM, and if they indicate a valid dirty
+ # cache line then invalidate that line
+ mov DCACHE_TAG(0,0),a0
+ mov a1,d0
+ and L1_CACHE_TAG_ENTRY,d0
+ add d0,a0 # starting dcache tag RAM
+ # access address
+
+ sub a1,d1
+ lsr L1_CACHE_SHIFT,d1 # total number of entries to
+ # examine
+
+ and ~(L1_CACHE_DISPARITY-1),a1 # determine comparator base
+
+mn10300_local_dcache_inv_range_outer_loop:
+ LOCAL_CLI_SAVE(d3)
+
+ # disable the dcache
+ movhu (a2),d0
+ and ~CHCTR_DCEN,d0
+ movhu d0,(a2)
+
+ # and wait for it to calm down
+ setlb
+ movhu (a2),d0
+ btst CHCTR_DCBUSY,d0
+ lne
+
+mn10300_local_dcache_inv_range_loop:
+
+ # process the way 0 slot
+ mov (L1_CACHE_WAYDISP*0,a0),d0 # read the tag in the way 0 slot
+ btst L1_CACHE_TAG_VALID,d0
+ beq mn10300_local_dcache_inv_range_skip_0 # jump if this cacheline
+ # is not valid
+
+ xor a1,d0
+ lsr 12,d0
+ bne mn10300_local_dcache_inv_range_skip_0 # jump if not this cacheline
+
+ mov d2,(L1_CACHE_WAYDISP*0,a0) # kill the tag
+
+mn10300_local_dcache_inv_range_skip_0:
+
+ # process the way 1 slot
+ mov (L1_CACHE_WAYDISP*1,a0),d0 # read the tag in the way 1 slot
+ btst L1_CACHE_TAG_VALID,d0
+ beq mn10300_local_dcache_inv_range_skip_1 # jump if this cacheline
+ # is not valid
+
+ xor a1,d0
+ lsr 12,d0
+ bne mn10300_local_dcache_inv_range_skip_1 # jump if not this cacheline
+
+ mov d2,(L1_CACHE_WAYDISP*1,a0) # kill the tag
+
+mn10300_local_dcache_inv_range_skip_1:
+
+ # process the way 2 slot
+ mov (L1_CACHE_WAYDISP*2,a0),d0 # read the tag in the way 2 slot
+ btst L1_CACHE_TAG_VALID,d0
+ beq mn10300_local_dcache_inv_range_skip_2 # jump if this cacheline
+ # is not valid
+
+ xor a1,d0
+ lsr 12,d0
+ bne mn10300_local_dcache_inv_range_skip_2 # jump if not this cacheline
+
+ mov d2,(L1_CACHE_WAYDISP*2,a0) # kill the tag
+
+mn10300_local_dcache_inv_range_skip_2:
+
+ # process the way 3 slot
+ mov (L1_CACHE_WAYDISP*3,a0),d0 # read the tag in the way 3 slot
+ btst L1_CACHE_TAG_VALID,d0
+ beq mn10300_local_dcache_inv_range_skip_3 # jump if this cacheline
+ # is not valid
+
+ xor a1,d0
+ lsr 12,d0
+ bne mn10300_local_dcache_inv_range_skip_3 # jump if not this cacheline
+
+ mov d2,(L1_CACHE_WAYDISP*3,a0) # kill the tag
+
+mn10300_local_dcache_inv_range_skip_3:
+
+ # approx every N steps we re-enable the cache and see if there are any
+ # interrupts to be processed
+ # we also break out if we've reached the end of the loop
+ # (the bottom nibble of the count is zero in both cases)
+ add L1_CACHE_BYTES,a0
+ add L1_CACHE_BYTES,a1
+ and ~L1_CACHE_WAYDISP,a0
+ add -1,d1
+ btst mn10300_local_dcache_inv_range_intr_interval,d1
+ bne mn10300_local_dcache_inv_range_loop
+
+ # wait for the cache to finish what it's doing
+ setlb
+ movhu (a2),d0
+ btst CHCTR_DCBUSY,d0
+ lne
+
+ # and reenable it
+ or CHCTR_DCEN,d0
+ movhu d0,(a2)
+ movhu (a2),d0
+
+ # re-enable interrupts
+ # - we don't bother with delay NOPs as we'll have enough instructions
+ # before we disable interrupts again to give the interrupts a chance
+ # to happen
+ LOCAL_IRQ_RESTORE(d3)
+
+ # go around again if the counter hasn't yet reached zero
+ add 0,d1
+ bne mn10300_local_dcache_inv_range_outer_loop
+
+mn10300_local_dcache_inv_range_end:
+ ret [d2,d3,a2],12
+ .size mn10300_local_dcache_inv_page,.-mn10300_local_dcache_inv_page
+ .size mn10300_local_dcache_inv_range,.-mn10300_local_dcache_inv_range
+ .size mn10300_local_dcache_inv_range2,.-mn10300_local_dcache_inv_range2
diff --git a/arch/mn10300/mm/cache-inv-icache.c b/arch/mn10300/mm/cache-inv-icache.c
new file mode 100644
index 000000000000..a8933a60b2d4
--- /dev/null
+++ b/arch/mn10300/mm/cache-inv-icache.c
@@ -0,0 +1,129 @@
+/* Invalidate icache when dcache doesn't need invalidation as it's in
+ * write-through mode
+ *
+ * Copyright (C) 2010 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public Licence
+ * as published by the Free Software Foundation; either version
+ * 2 of the Licence, or (at your option) any later version.
+ */
+#include <linux/module.h>
+#include <linux/mm.h>
+#include <asm/cacheflush.h>
+#include <asm/smp.h>
+#include "cache-smp.h"
+
+/**
+ * flush_icache_page_range - Flush dcache and invalidate icache for part of a
+ * single page
+ * @start: The starting virtual address of the page part.
+ * @end: The ending virtual address of the page part.
+ *
+ * Invalidate the icache for part of a single page, as determined by the
+ * virtual addresses given. The page must be in the paged area. The dcache is
+ * not flushed as the cache must be in write-through mode to get here.
+ */
+static void flush_icache_page_range(unsigned long start, unsigned long end)
+{
+ unsigned long addr, size, off;
+ struct page *page;
+ pgd_t *pgd;
+ pud_t *pud;
+ pmd_t *pmd;
+ pte_t *ppte, pte;
+
+ /* work out how much of the page to flush */
+ off = start & ~PAGE_MASK;
+ size = end - start;
+
+ /* get the physical address the page is mapped to from the page
+ * tables */
+ pgd = pgd_offset(current->mm, start);
+ if (!pgd || !pgd_val(*pgd))
+ return;
+
+ pud = pud_offset(pgd, start);
+ if (!pud || !pud_val(*pud))
+ return;
+
+ pmd = pmd_offset(pud, start);
+ if (!pmd || !pmd_val(*pmd))
+ return;
+
+ ppte = pte_offset_map(pmd, start);
+ if (!ppte)
+ return;
+ pte = *ppte;
+ pte_unmap(ppte);
+
+ if (pte_none(pte))
+ return;
+
+ page = pte_page(pte);
+ if (!page)
+ return;
+
+ addr = page_to_phys(page);
+
+ /* invalidate the icache coverage on that region */
+ mn10300_local_icache_inv_range2(addr + off, size);
+ smp_cache_call(SMP_ICACHE_INV_FLUSH_RANGE, start, end);
+}
+
+/**
+ * flush_icache_range - Globally flush dcache and invalidate icache for region
+ * @start: The starting virtual address of the region.
+ * @end: The ending virtual address of the region.
+ *
+ * This is used by the kernel to globally flush some code it has just written
+ * from the dcache back to RAM and then to globally invalidate the icache over
+ * that region so that that code can be run on all CPUs in the system.
+ */
+void flush_icache_range(unsigned long start, unsigned long end)
+{
+ unsigned long start_page, end_page;
+ unsigned long flags;
+
+ flags = smp_lock_cache();
+
+ if (end > 0x80000000UL) {
+ /* addresses above 0xa0000000 do not go through the cache */
+ if (end > 0xa0000000UL) {
+ end = 0xa0000000UL;
+ if (start >= end)
+ goto done;
+ }
+
+ /* kernel addresses between 0x80000000 and 0x9fffffff do not
+ * require page tables, so we just map such addresses
+ * directly */
+ start_page = (start >= 0x80000000UL) ? start : 0x80000000UL;
+ mn10300_icache_inv_range(start_page, end);
+ smp_cache_call(SMP_ICACHE_INV_FLUSH_RANGE, start, end);
+ if (start_page == start)
+ goto done;
+ end = start_page;
+ }
+
+ start_page = start & PAGE_MASK;
+ end_page = (end - 1) & PAGE_MASK;
+
+ if (start_page == end_page) {
+ /* the first and last bytes are on the same page */
+ flush_icache_page_range(start, end);
+ } else if (start_page + 1 == end_page) {
+ /* split over two virtually contiguous pages */
+ flush_icache_page_range(start, end_page);
+ flush_icache_page_range(end_page, end);
+ } else {
+ /* more than 2 pages; just flush the entire cache */
+ mn10300_local_icache_inv();
+ smp_cache_call(SMP_ICACHE_INV, 0, 0);
+ }
+
+done:
+ smp_unlock_cache(flags);
+}
+EXPORT_SYMBOL(flush_icache_range);
diff --git a/arch/mn10300/mm/cache-mn10300.S b/arch/mn10300/mm/cache-mn10300.S
deleted file mode 100644
index e839d0aedd69..000000000000
--- a/arch/mn10300/mm/cache-mn10300.S
+++ /dev/null
@@ -1,289 +0,0 @@
-/* MN10300 CPU core caching routines
- *
- * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
- * Written by David Howells (dhowells@redhat.com)
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public Licence
- * as published by the Free Software Foundation; either version
- * 2 of the Licence, or (at your option) any later version.
- */
-#include <linux/sys.h>
-#include <linux/linkage.h>
-#include <asm/smp.h>
-#include <asm/page.h>
-#include <asm/cache.h>
-
-#define mn10300_dcache_inv_range_intr_interval \
- +((1 << MN10300_DCACHE_INV_RANGE_INTR_LOG2_INTERVAL) - 1)
-
-#if mn10300_dcache_inv_range_intr_interval > 0xff
-#error MN10300_DCACHE_INV_RANGE_INTR_LOG2_INTERVAL must be 8 or less
-#endif
-
- .am33_2
-
- .globl mn10300_icache_inv
- .globl mn10300_dcache_inv
- .globl mn10300_dcache_inv_range
- .globl mn10300_dcache_inv_range2
- .globl mn10300_dcache_inv_page
-
-###############################################################################
-#
-# void mn10300_icache_inv(void)
-# Invalidate the entire icache
-#
-###############################################################################
- ALIGN
-mn10300_icache_inv:
- mov CHCTR,a0
-
- movhu (a0),d0
- btst CHCTR_ICEN,d0
- beq mn10300_icache_inv_end
-
- mov epsw,d1
- and ~EPSW_IE,epsw
- nop
- nop
-
- # disable the icache
- and ~CHCTR_ICEN,d0
- movhu d0,(a0)
-
- # and wait for it to calm down
- setlb
- movhu (a0),d0
- btst CHCTR_ICBUSY,d0
- lne
-
- # invalidate
- or CHCTR_ICINV,d0
- movhu d0,(a0)
-
- # wait for the cache to finish
- mov CHCTR,a0
- setlb
- movhu (a0),d0
- btst CHCTR_ICBUSY,d0
- lne
-
- # and reenable it
- and ~CHCTR_ICINV,d0
- or CHCTR_ICEN,d0
- movhu d0,(a0)
- movhu (a0),d0
-
- mov d1,epsw
-
-mn10300_icache_inv_end:
- ret [],0
-
-###############################################################################
-#
-# void mn10300_dcache_inv(void)
-# Invalidate the entire dcache
-#
-###############################################################################
- ALIGN
-mn10300_dcache_inv:
- mov CHCTR,a0
-
- movhu (a0),d0
- btst CHCTR_DCEN,d0
- beq mn10300_dcache_inv_end
-
- mov epsw,d1
- and ~EPSW_IE,epsw
- nop
- nop
-
- # disable the dcache
- and ~CHCTR_DCEN,d0
- movhu d0,(a0)
-
- # and wait for it to calm down
- setlb
- movhu (a0),d0
- btst CHCTR_DCBUSY,d0
- lne
-
- # invalidate
- or CHCTR_DCINV,d0
- movhu d0,(a0)
-
- # wait for the cache to finish
- mov CHCTR,a0
- setlb
- movhu (a0),d0
- btst CHCTR_DCBUSY,d0
- lne
-
- # and reenable it
- and ~CHCTR_DCINV,d0
- or CHCTR_DCEN,d0
- movhu d0,(a0)
- movhu (a0),d0
-
- mov d1,epsw
-
-mn10300_dcache_inv_end:
- ret [],0
-
-###############################################################################
-#
-# void mn10300_dcache_inv_range(unsigned start, unsigned end)
-# void mn10300_dcache_inv_range2(unsigned start, unsigned size)
-# void mn10300_dcache_inv_page(unsigned start)
-# Invalidate a range of addresses on a page in the dcache
-#
-###############################################################################
- ALIGN
-mn10300_dcache_inv_page:
- mov PAGE_SIZE,d1
-mn10300_dcache_inv_range2:
- add d0,d1
-mn10300_dcache_inv_range:
- movm [d2,d3,a2],(sp)
- mov CHCTR,a2
-
- movhu (a2),d2
- btst CHCTR_DCEN,d2
- beq mn10300_dcache_inv_range_end
-
- and L1_CACHE_TAG_ADDRESS|L1_CACHE_TAG_ENTRY,d0 # round start
- # addr down
- mov d0,a1
-
- add L1_CACHE_BYTES,d1 # round end addr up
- and L1_CACHE_TAG_ADDRESS|L1_CACHE_TAG_ENTRY,d1
-
- clr d2 # we're going to clear tag ram
- # entries
-
- # read the tags from the tag RAM, and if they indicate a valid dirty
- # cache line then invalidate that line
- mov DCACHE_TAG(0,0),a0
- mov a1,d0
- and L1_CACHE_TAG_ENTRY,d0
- add d0,a0 # starting dcache tag RAM
- # access address
-
- sub a1,d1
- lsr L1_CACHE_SHIFT,d1 # total number of entries to
- # examine
-
- and ~(L1_CACHE_DISPARITY-1),a1 # determine comparator base
-
-mn10300_dcache_inv_range_outer_loop:
- # disable interrupts
- mov epsw,d3
- and ~EPSW_IE,epsw
- nop # note that reading CHCTR and
- # AND'ing D0 occupy two delay
- # slots after disabling
- # interrupts
-
- # disable the dcache
- movhu (a2),d0
- and ~CHCTR_DCEN,d0
- movhu d0,(a2)
-
- # and wait for it to calm down
- setlb
- movhu (a2),d0
- btst CHCTR_DCBUSY,d0
- lne
-
-mn10300_dcache_inv_range_loop:
-
- # process the way 0 slot
- mov (L1_CACHE_WAYDISP*0,a0),d0 # read the tag in the way 0 slot
- btst L1_CACHE_TAG_VALID,d0
- beq mn10300_dcache_inv_range_skip_0 # jump if this cacheline is not
- # valid
-
- xor a1,d0
- lsr 12,d0
- bne mn10300_dcache_inv_range_skip_0 # jump if not this cacheline
-
- mov d2,(a0) # kill the tag
-
-mn10300_dcache_inv_range_skip_0:
-
- # process the way 1 slot
- mov (L1_CACHE_WAYDISP*1,a0),d0 # read the tag in the way 1 slot
- btst L1_CACHE_TAG_VALID,d0
- beq mn10300_dcache_inv_range_skip_1 # jump if this cacheline is not
- # valid
-
- xor a1,d0
- lsr 12,d0
- bne mn10300_dcache_inv_range_skip_1 # jump if not this cacheline
-
- mov d2,(a0) # kill the tag
-
-mn10300_dcache_inv_range_skip_1:
-
- # process the way 2 slot
- mov (L1_CACHE_WAYDISP*2,a0),d0 # read the tag in the way 2 slot
- btst L1_CACHE_TAG_VALID,d0
- beq mn10300_dcache_inv_range_skip_2 # jump if this cacheline is not
- # valid
-
- xor a1,d0
- lsr 12,d0
- bne mn10300_dcache_inv_range_skip_2 # jump if not this cacheline
-
- mov d2,(a0) # kill the tag
-
-mn10300_dcache_inv_range_skip_2:
-
- # process the way 3 slot
- mov (L1_CACHE_WAYDISP*3,a0),d0 # read the tag in the way 3 slot
- btst L1_CACHE_TAG_VALID,d0
- beq mn10300_dcache_inv_range_skip_3 # jump if this cacheline is not
- # valid
-
- xor a1,d0
- lsr 12,d0
- bne mn10300_dcache_inv_range_skip_3 # jump if not this cacheline
-
- mov d2,(a0) # kill the tag
-
-mn10300_dcache_inv_range_skip_3:
-
- # approx every N steps we re-enable the cache and see if there are any
- # interrupts to be processed
- # we also break out if we've reached the end of the loop
- # (the bottom nibble of the count is zero in both cases)
- add L1_CACHE_BYTES,a0
- add L1_CACHE_BYTES,a1
- add -1,d1
- btst mn10300_dcache_inv_range_intr_interval,d1
- bne mn10300_dcache_inv_range_loop
-
- # wait for the cache to finish what it's doing
- setlb
- movhu (a2),d0
- btst CHCTR_DCBUSY,d0
- lne
-
- # and reenable it
- or CHCTR_DCEN,d0
- movhu d0,(a2)
- movhu (a2),d0
-
- # re-enable interrupts
- # - we don't bother with delay NOPs as we'll have enough instructions
- # before we disable interrupts again to give the interrupts a chance
- # to happen
- mov d3,epsw
-
- # go around again if the counter hasn't yet reached zero
- add 0,d1
- bne mn10300_dcache_inv_range_outer_loop
-
-mn10300_dcache_inv_range_end:
- ret [d2,d3,a2],12
diff --git a/arch/mn10300/mm/cache-smp-flush.c b/arch/mn10300/mm/cache-smp-flush.c
new file mode 100644
index 000000000000..fd51af5eaf70
--- /dev/null
+++ b/arch/mn10300/mm/cache-smp-flush.c
@@ -0,0 +1,156 @@
+/* Functions for global dcache flush when writeback caching in SMP
+ *
+ * Copyright (C) 2010 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public Licence
+ * as published by the Free Software Foundation; either version
+ * 2 of the Licence, or (at your option) any later version.
+ */
+#include <linux/mm.h>
+#include <asm/cacheflush.h>
+#include "cache-smp.h"
+
+/**
+ * mn10300_dcache_flush - Globally flush data cache
+ *
+ * Flush the data cache on all CPUs.
+ */
+void mn10300_dcache_flush(void)
+{
+ unsigned long flags;
+
+ flags = smp_lock_cache();
+ mn10300_local_dcache_flush();
+ smp_cache_call(SMP_DCACHE_FLUSH, 0, 0);
+ smp_unlock_cache(flags);
+}
+
+/**
+ * mn10300_dcache_flush_page - Globally flush a page of data cache
+ * @start: The address of the page of memory to be flushed.
+ *
+ * Flush a range of addresses in the data cache on all CPUs covering
+ * the page that includes the given address.
+ */
+void mn10300_dcache_flush_page(unsigned long start)
+{
+ unsigned long flags;
+
+ start &= ~(PAGE_SIZE-1);
+
+ flags = smp_lock_cache();
+ mn10300_local_dcache_flush_page(start);
+ smp_cache_call(SMP_DCACHE_FLUSH_RANGE, start, start + PAGE_SIZE);
+ smp_unlock_cache(flags);
+}
+
+/**
+ * mn10300_dcache_flush_range - Globally flush range of data cache
+ * @start: The start address of the region to be flushed.
+ * @end: The end address of the region to be flushed.
+ *
+ * Flush a range of addresses in the data cache on all CPUs, between start and
+ * end-1 inclusive.
+ */
+void mn10300_dcache_flush_range(unsigned long start, unsigned long end)
+{
+ unsigned long flags;
+
+ flags = smp_lock_cache();
+ mn10300_local_dcache_flush_range(start, end);
+ smp_cache_call(SMP_DCACHE_FLUSH_RANGE, start, end);
+ smp_unlock_cache(flags);
+}
+
+/**
+ * mn10300_dcache_flush_range2 - Globally flush range of data cache
+ * @start: The start address of the region to be flushed.
+ * @size: The size of the region to be flushed.
+ *
+ * Flush a range of addresses in the data cache on all CPUs, between start and
+ * start+size-1 inclusive.
+ */
+void mn10300_dcache_flush_range2(unsigned long start, unsigned long size)
+{
+ unsigned long flags;
+
+ flags = smp_lock_cache();
+ mn10300_local_dcache_flush_range2(start, size);
+ smp_cache_call(SMP_DCACHE_FLUSH_RANGE, start, start + size);
+ smp_unlock_cache(flags);
+}
+
+/**
+ * mn10300_dcache_flush_inv - Globally flush and invalidate data cache
+ *
+ * Flush and invalidate the data cache on all CPUs.
+ */
+void mn10300_dcache_flush_inv(void)
+{
+ unsigned long flags;
+
+ flags = smp_lock_cache();
+ mn10300_local_dcache_flush_inv();
+ smp_cache_call(SMP_DCACHE_FLUSH_INV, 0, 0);
+ smp_unlock_cache(flags);
+}
+
+/**
+ * mn10300_dcache_flush_inv_page - Globally flush and invalidate a page of data
+ * cache
+ * @start: The address of the page of memory to be flushed and invalidated.
+ *
+ * Flush and invalidate a range of addresses in the data cache on all CPUs
+ * covering the page that includes the given address.
+ */
+void mn10300_dcache_flush_inv_page(unsigned long start)
+{
+ unsigned long flags;
+
+ start &= ~(PAGE_SIZE-1);
+
+ flags = smp_lock_cache();
+ mn10300_local_dcache_flush_inv_page(start);
+ smp_cache_call(SMP_DCACHE_FLUSH_INV_RANGE, start, start + PAGE_SIZE);
+ smp_unlock_cache(flags);
+}
+
+/**
+ * mn10300_dcache_flush_inv_range - Globally flush and invalidate range of data
+ * cache
+ * @start: The start address of the region to be flushed and invalidated.
+ * @end: The end address of the region to be flushed and invalidated.
+ *
+ * Flush and invalidate a range of addresses in the data cache on all CPUs,
+ * between start and end-1 inclusive.
+ */
+void mn10300_dcache_flush_inv_range(unsigned long start, unsigned long end)
+{
+ unsigned long flags;
+
+ flags = smp_lock_cache();
+ mn10300_local_dcache_flush_inv_range(start, end);
+ smp_cache_call(SMP_DCACHE_FLUSH_INV_RANGE, start, end);
+ smp_unlock_cache(flags);
+}
+
+/**
+ * mn10300_dcache_flush_inv_range2 - Globally flush and invalidate range of data
+ * cache
+ * @start: The start address of the region to be flushed and invalidated.
+ * @size: The size of the region to be flushed and invalidated.
+ *
+ * Flush and invalidate a range of addresses in the data cache on all CPUs,
+ * between start and start+size-1 inclusive.
+ */
+void mn10300_dcache_flush_inv_range2(unsigned long start, unsigned long size)
+{
+ unsigned long flags;
+
+ flags = smp_lock_cache();
+ mn10300_local_dcache_flush_inv_range2(start, size);
+ smp_cache_call(SMP_DCACHE_FLUSH_INV_RANGE, start, start + size);
+ smp_unlock_cache(flags);
+}
diff --git a/arch/mn10300/mm/cache-smp-inv.c b/arch/mn10300/mm/cache-smp-inv.c
new file mode 100644
index 000000000000..ff1787358c8e
--- /dev/null
+++ b/arch/mn10300/mm/cache-smp-inv.c
@@ -0,0 +1,153 @@
+/* Functions for global i/dcache invalidation when caching in SMP
+ *
+ * Copyright (C) 2010 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public Licence
+ * as published by the Free Software Foundation; either version
+ * 2 of the Licence, or (at your option) any later version.
+ */
+#include <linux/mm.h>
+#include <asm/cacheflush.h>
+#include "cache-smp.h"
+
+/**
+ * mn10300_icache_inv - Globally invalidate instruction cache
+ *
+ * Invalidate the instruction cache on all CPUs.
+ */
+void mn10300_icache_inv(void)
+{
+ unsigned long flags;
+
+ flags = smp_lock_cache();
+ mn10300_local_icache_inv();
+ smp_cache_call(SMP_ICACHE_INV, 0, 0);
+ smp_unlock_cache(flags);
+}
+
+/**
+ * mn10300_icache_inv_page - Globally invalidate a page of instruction cache
+ * @start: The address of the page of memory to be invalidated.
+ *
+ * Invalidate a range of addresses in the instruction cache on all CPUs
+ * covering the page that includes the given address.
+ */
+void mn10300_icache_inv_page(unsigned long start)
+{
+ unsigned long flags;
+
+ start &= ~(PAGE_SIZE-1);
+
+ flags = smp_lock_cache();
+ mn10300_local_icache_inv_page(start);
+ smp_cache_call(SMP_ICACHE_INV_RANGE, start, start + PAGE_SIZE);
+ smp_unlock_cache(flags);
+}
+
+/**
+ * mn10300_icache_inv_range - Globally invalidate range of instruction cache
+ * @start: The start address of the region to be invalidated.
+ * @end: The end address of the region to be invalidated.
+ *
+ * Invalidate a range of addresses in the instruction cache on all CPUs,
+ * between start and end-1 inclusive.
+ */
+void mn10300_icache_inv_range(unsigned long start, unsigned long end)
+{
+ unsigned long flags;
+
+ flags = smp_lock_cache();
+ mn10300_local_icache_inv_range(start, end);
+ smp_cache_call(SMP_ICACHE_INV_RANGE, start, end);
+ smp_unlock_cache(flags);
+}
+
+/**
+ * mn10300_icache_inv_range2 - Globally invalidate range of instruction cache
+ * @start: The start address of the region to be invalidated.
+ * @size: The size of the region to be invalidated.
+ *
+ * Invalidate a range of addresses in the instruction cache on all CPUs,
+ * between start and start+size-1 inclusive.
+ */
+void mn10300_icache_inv_range2(unsigned long start, unsigned long size)
+{
+ unsigned long flags;
+
+ flags = smp_lock_cache();
+ mn10300_local_icache_inv_range2(start, size);
+ smp_cache_call(SMP_ICACHE_INV_RANGE, start, start + size);
+ smp_unlock_cache(flags);
+}
+
+/**
+ * mn10300_dcache_inv - Globally invalidate data cache
+ *
+ * Invalidate the data cache on all CPUs.
+ */
+void mn10300_dcache_inv(void)
+{
+ unsigned long flags;
+
+ flags = smp_lock_cache();
+ mn10300_local_dcache_inv();
+ smp_cache_call(SMP_DCACHE_INV, 0, 0);
+ smp_unlock_cache(flags);
+}
+
+/**
+ * mn10300_dcache_inv_page - Globally invalidate a page of data cache
+ * @start: The address of the page of memory to be invalidated.
+ *
+ * Invalidate a range of addresses in the data cache on all CPUs covering the
+ * page that includes the given address.
+ */
+void mn10300_dcache_inv_page(unsigned long start)
+{
+ unsigned long flags;
+
+ start &= ~(PAGE_SIZE-1);
+
+ flags = smp_lock_cache();
+ mn10300_local_dcache_inv_page(start);
+ smp_cache_call(SMP_DCACHE_INV_RANGE, start, start + PAGE_SIZE);
+ smp_unlock_cache(flags);
+}
+
+/**
+ * mn10300_dcache_inv_range - Globally invalidate range of data cache
+ * @start: The start address of the region to be invalidated.
+ * @end: The end address of the region to be invalidated.
+ *
+ * Invalidate a range of addresses in the data cache on all CPUs, between start
+ * and end-1 inclusive.
+ */
+void mn10300_dcache_inv_range(unsigned long start, unsigned long end)
+{
+ unsigned long flags;
+
+ flags = smp_lock_cache();
+ mn10300_local_dcache_inv_range(start, end);
+ smp_cache_call(SMP_DCACHE_INV_RANGE, start, end);
+ smp_unlock_cache(flags);
+}
+
+/**
+ * mn10300_dcache_inv_range2 - Globally invalidate range of data cache
+ * @start: The start address of the region to be invalidated.
+ * @size: The size of the region to be invalidated.
+ *
+ * Invalidate a range of addresses in the data cache on all CPUs, between start
+ * and start+size-1 inclusive.
+ */
+void mn10300_dcache_inv_range2(unsigned long start, unsigned long size)
+{
+ unsigned long flags;
+
+ flags = smp_lock_cache();
+ mn10300_local_dcache_inv_range2(start, size);
+ smp_cache_call(SMP_DCACHE_INV_RANGE, start, start + size);
+ smp_unlock_cache(flags);
+}
diff --git a/arch/mn10300/mm/cache-smp.c b/arch/mn10300/mm/cache-smp.c
new file mode 100644
index 000000000000..4a6e9a4b5b27
--- /dev/null
+++ b/arch/mn10300/mm/cache-smp.c
@@ -0,0 +1,105 @@
+/* SMP global caching code
+ *
+ * Copyright (C) 2010 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public Licence
+ * as published by the Free Software Foundation; either version
+ * 2 of the Licence, or (at your option) any later version.
+ */
+#include <linux/module.h>
+#include <linux/mm.h>
+#include <linux/mman.h>
+#include <linux/threads.h>
+#include <linux/interrupt.h>
+#include <asm/page.h>
+#include <asm/pgtable.h>
+#include <asm/processor.h>
+#include <asm/cacheflush.h>
+#include <asm/io.h>
+#include <asm/uaccess.h>
+#include <asm/smp.h>
+#include "cache-smp.h"
+
+DEFINE_SPINLOCK(smp_cache_lock);
+static unsigned long smp_cache_mask;
+static unsigned long smp_cache_start;
+static unsigned long smp_cache_end;
+static cpumask_t smp_cache_ipi_map; /* Bitmask of cache IPI done CPUs */
+
+/**
+ * smp_cache_interrupt - Handle IPI request to flush caches.
+ *
+ * Handle a request delivered by IPI to flush the current CPU's
+ * caches. The parameters are stored in smp_cache_*.
+ */
+void smp_cache_interrupt(void)
+{
+ unsigned long opr_mask = smp_cache_mask;
+
+ switch ((enum smp_dcache_ops)(opr_mask & SMP_DCACHE_OP_MASK)) {
+ case SMP_DCACHE_NOP:
+ break;
+ case SMP_DCACHE_INV:
+ mn10300_local_dcache_inv();
+ break;
+ case SMP_DCACHE_INV_RANGE:
+ mn10300_local_dcache_inv_range(smp_cache_start, smp_cache_end);
+ break;
+ case SMP_DCACHE_FLUSH:
+ mn10300_local_dcache_flush();
+ break;
+ case SMP_DCACHE_FLUSH_RANGE:
+ mn10300_local_dcache_flush_range(smp_cache_start,
+ smp_cache_end);
+ break;
+ case SMP_DCACHE_FLUSH_INV:
+ mn10300_local_dcache_flush_inv();
+ break;
+ case SMP_DCACHE_FLUSH_INV_RANGE:
+ mn10300_local_dcache_flush_inv_range(smp_cache_start,
+ smp_cache_end);
+ break;
+ }
+
+ switch ((enum smp_icache_ops)(opr_mask & SMP_ICACHE_OP_MASK)) {
+ case SMP_ICACHE_NOP:
+ break;
+ case SMP_ICACHE_INV:
+ mn10300_local_icache_inv();
+ break;
+ case SMP_ICACHE_INV_RANGE:
+ mn10300_local_icache_inv_range(smp_cache_start, smp_cache_end);
+ break;
+ }
+
+ cpu_clear(smp_processor_id(), smp_cache_ipi_map);
+}
+
+/**
+ * smp_cache_call - Issue an IPI to request the other CPUs flush caches
+ * @opr_mask: Cache operation flags
+ * @start: Start address of request
+ * @end: End address of request
+ *
+ * Send cache flush IPI to other CPUs. This invokes smp_cache_interrupt()
+ * above on those other CPUs and then waits for them to finish.
+ *
+ * The caller must hold smp_cache_lock.
+ */
+void smp_cache_call(unsigned long opr_mask,
+ unsigned long start, unsigned long end)
+{
+ smp_cache_mask = opr_mask;
+ smp_cache_start = start;
+ smp_cache_end = end;
+ smp_cache_ipi_map = cpu_online_map;
+ cpu_clear(smp_processor_id(), smp_cache_ipi_map);
+
+ send_IPI_allbutself(FLUSH_CACHE_IPI);
+
+ while (!cpus_empty(smp_cache_ipi_map))
+ /* nothing. lockup detection does not belong here */
+ mb();
+}
diff --git a/arch/mn10300/mm/cache-smp.h b/arch/mn10300/mm/cache-smp.h
new file mode 100644
index 000000000000..cb52892aa66a
--- /dev/null
+++ b/arch/mn10300/mm/cache-smp.h
@@ -0,0 +1,69 @@
+/* SMP caching definitions
+ *
+ * Copyright (C) 2010 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public Licence
+ * as published by the Free Software Foundation; either version
+ * 2 of the Licence, or (at your option) any later version.
+ */
+
+
+/*
+ * Operation requests for smp_cache_call().
+ *
+ * One of smp_icache_ops and one of smp_dcache_ops can be OR'd together.
+ */
+enum smp_icache_ops {
+ SMP_ICACHE_NOP = 0x0000,
+ SMP_ICACHE_INV = 0x0001,
+ SMP_ICACHE_INV_RANGE = 0x0002,
+};
+#define SMP_ICACHE_OP_MASK 0x0003
+
+enum smp_dcache_ops {
+ SMP_DCACHE_NOP = 0x0000,
+ SMP_DCACHE_INV = 0x0004,
+ SMP_DCACHE_INV_RANGE = 0x0008,
+ SMP_DCACHE_FLUSH = 0x000c,
+ SMP_DCACHE_FLUSH_RANGE = 0x0010,
+ SMP_DCACHE_FLUSH_INV = 0x0014,
+ SMP_DCACHE_FLUSH_INV_RANGE = 0x0018,
+};
+#define SMP_DCACHE_OP_MASK 0x001c
+
+#define SMP_IDCACHE_INV_FLUSH (SMP_ICACHE_INV | SMP_DCACHE_FLUSH)
+#define SMP_IDCACHE_INV_FLUSH_RANGE (SMP_ICACHE_INV_RANGE | SMP_DCACHE_FLUSH_RANGE)
+
+/*
+ * cache-smp.c
+ */
+#ifdef CONFIG_SMP
+extern spinlock_t smp_cache_lock;
+
+extern void smp_cache_call(unsigned long opr_mask,
+ unsigned long addr, unsigned long end);
+
+static inline unsigned long smp_lock_cache(void)
+ __acquires(&smp_cache_lock)
+{
+ unsigned long flags;
+ spin_lock_irqsave(&smp_cache_lock, flags);
+ return flags;
+}
+
+static inline void smp_unlock_cache(unsigned long flags)
+ __releases(&smp_cache_lock)
+{
+ spin_unlock_irqrestore(&smp_cache_lock, flags);
+}
+
+#else
+static inline unsigned long smp_lock_cache(void) { return 0; }
+static inline void smp_unlock_cache(unsigned long flags) {}
+static inline void smp_cache_call(unsigned long opr_mask,
+ unsigned long addr, unsigned long end)
+{
+}
+#endif /* CONFIG_SMP */
diff --git a/arch/mn10300/mm/cache.c b/arch/mn10300/mm/cache.c
index 9261217e8d2c..0a1f0aa92ebc 100644
--- a/arch/mn10300/mm/cache.c
+++ b/arch/mn10300/mm/cache.c
@@ -18,8 +18,13 @@
#include <asm/cacheflush.h>
#include <asm/io.h>
#include <asm/uaccess.h>
+#include <asm/smp.h>
+#include "cache-smp.h"
EXPORT_SYMBOL(mn10300_icache_inv);
+EXPORT_SYMBOL(mn10300_icache_inv_range);
+EXPORT_SYMBOL(mn10300_icache_inv_range2);
+EXPORT_SYMBOL(mn10300_icache_inv_page);
EXPORT_SYMBOL(mn10300_dcache_inv);
EXPORT_SYMBOL(mn10300_dcache_inv_range);
EXPORT_SYMBOL(mn10300_dcache_inv_range2);
@@ -37,96 +42,6 @@ EXPORT_SYMBOL(mn10300_dcache_flush_page);
#endif
/*
- * write a page back from the dcache and invalidate the icache so that we can
- * run code from it that we've just written into it
- */
-void flush_icache_page(struct vm_area_struct *vma, struct page *page)
-{
- mn10300_dcache_flush_page(page_to_phys(page));
- mn10300_icache_inv();
-}
-EXPORT_SYMBOL(flush_icache_page);
-
-/*
- * write some code we've just written back from the dcache and invalidate the
- * icache so that we can run that code
- */
-void flush_icache_range(unsigned long start, unsigned long end)
-{
-#ifdef CONFIG_MN10300_CACHE_WBACK
- unsigned long addr, size, base, off;
- struct page *page;
- pgd_t *pgd;
- pud_t *pud;
- pmd_t *pmd;
- pte_t *ppte, pte;
-
- if (end > 0x80000000UL) {
- /* addresses above 0xa0000000 do not go through the cache */
- if (end > 0xa0000000UL) {
- end = 0xa0000000UL;
- if (start >= end)
- return;
- }
-
- /* kernel addresses between 0x80000000 and 0x9fffffff do not
- * require page tables, so we just map such addresses directly */
- base = (start >= 0x80000000UL) ? start : 0x80000000UL;
- mn10300_dcache_flush_range(base, end);
- if (base == start)
- goto invalidate;
- end = base;
- }
-
- for (; start < end; start += size) {
- /* work out how much of the page to flush */
- off = start & (PAGE_SIZE - 1);
-
- size = end - start;
- if (size > PAGE_SIZE - off)
- size = PAGE_SIZE - off;
-
- /* get the physical address the page is mapped to from the page
- * tables */
- pgd = pgd_offset(current->mm, start);
- if (!pgd || !pgd_val(*pgd))
- continue;
-
- pud = pud_offset(pgd, start);
- if (!pud || !pud_val(*pud))
- continue;
-
- pmd = pmd_offset(pud, start);
- if (!pmd || !pmd_val(*pmd))
- continue;
-
- ppte = pte_offset_map(pmd, start);
- if (!ppte)
- continue;
- pte = *ppte;
- pte_unmap(ppte);
-
- if (pte_none(pte))
- continue;
-
- page = pte_page(pte);
- if (!page)
- continue;
-
- addr = page_to_phys(page);
-
- /* flush the dcache and invalidate the icache coverage on that
- * region */
- mn10300_dcache_flush_range2(addr + off, size);
- }
-#endif
-
-invalidate:
- mn10300_icache_inv();
-}
-EXPORT_SYMBOL(flush_icache_range);
-
-/*
* allow userspace to flush the instruction cache
*/
asmlinkage long sys_cacheflush(unsigned long start, unsigned long end)
diff --git a/arch/mn10300/mm/fault.c b/arch/mn10300/mm/fault.c
index 81f153fa51b4..59c3da49d9d9 100644
--- a/arch/mn10300/mm/fault.c
+++ b/arch/mn10300/mm/fault.c
@@ -39,10 +39,6 @@ void bust_spinlocks(int yes)
{
if (yes) {
oops_in_progress = 1;
-#ifdef CONFIG_SMP
- /* Many serial drivers do __global_cli() */
- global_irq_lock = 0;
-#endif
} else {
int loglevel_save = console_loglevel;
#ifdef CONFIG_VT
@@ -100,8 +96,6 @@ static void print_pagetable_entries(pgd_t *pgdir, unsigned long address)
}
#endif
-asmlinkage void monitor_signal(struct pt_regs *);
-
/*
* This routine handles page faults. It determines the address,
* and the problem, and then passes it off to one of the appropriate
@@ -279,7 +273,6 @@ good_area:
*/
bad_area:
up_read(&mm->mmap_sem);
- monitor_signal(regs);
/* User mode accesses just cause a SIGSEGV */
if ((fault_code & MMUFCR_xFC_ACCESS) == MMUFCR_xFC_ACCESS_USR) {
@@ -292,7 +285,6 @@ bad_area:
}
no_context:
- monitor_signal(regs);
/* Are we prepared to handle this kernel fault? */
if (fixup_exception(regs))
return;
@@ -338,14 +330,13 @@ no_context:
*/
out_of_memory:
up_read(&mm->mmap_sem);
- if ((fault_code & MMUFCR_xFC_ACCESS) != MMUFCR_xFC_ACCESS_USR)
- goto no_context;
- pagefault_out_of_memory();
- return;
+ printk(KERN_ALERT "VM: killing process %s\n", tsk->comm);
+ if ((fault_code & MMUFCR_xFC_ACCESS) == MMUFCR_xFC_ACCESS_USR)
+ do_exit(SIGKILL);
+ goto no_context;
do_sigbus:
up_read(&mm->mmap_sem);
- monitor_signal(regs);
/*
* Send a sigbus, regardless of whether we were in kernel
diff --git a/arch/mn10300/mm/init.c b/arch/mn10300/mm/init.c
index 6e6bc0e51521..48907cc3bdb7 100644
--- a/arch/mn10300/mm/init.c
+++ b/arch/mn10300/mm/init.c
@@ -41,6 +41,10 @@ DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
unsigned long highstart_pfn, highend_pfn;
+#ifdef CONFIG_MN10300_HAS_ATOMIC_OPS_UNIT
+static struct vm_struct user_iomap_vm;
+#endif
+
/*
* set up paging
*/
@@ -73,7 +77,24 @@ void __init paging_init(void)
/* pass the memory from the bootmem allocator to the main allocator */
free_area_init(zones_size);
- __flush_tlb_all();
+#ifdef CONFIG_MN10300_HAS_ATOMIC_OPS_UNIT
+ /* The Atomic Operation Unit registers need to be mapped to userspace
+ * for all processes. The following uses vm_area_register_early() to
+ * reserve the first page of the vmalloc area and sets the pte for that
+ * page.
+ *
+ * glibc hardcodes this virtual mapping, so we're pretty much stuck with
+ * it from now on.
+ */
+ user_iomap_vm.flags = VM_USERMAP;
+ user_iomap_vm.size = 1 << PAGE_SHIFT;
+ vm_area_register_early(&user_iomap_vm, PAGE_SIZE);
+ ppte = kernel_vmalloc_ptes;
+ set_pte(ppte, pfn_pte(USER_ATOMIC_OPS_PAGE_ADDR >> PAGE_SHIFT,
+ PAGE_USERIO));
+#endif
+
+ local_flush_tlb_all();
}
/*
@@ -84,8 +105,7 @@ void __init mem_init(void)
int codesize, reservedpages, datasize, initsize;
int tmp;
- if (!mem_map)
- BUG();
+ BUG_ON(!mem_map);
#define START_PFN (contig_page_data.bdata->node_min_pfn)
#define MAX_LOW_PFN (contig_page_data.bdata->node_low_pfn)
diff --git a/arch/mn10300/mm/misalignment.c b/arch/mn10300/mm/misalignment.c
index 6dffbf97ac26..eef989c1d0c1 100644
--- a/arch/mn10300/mm/misalignment.c
+++ b/arch/mn10300/mm/misalignment.c
@@ -449,8 +449,7 @@ found_opcode:
regs->pc, opcode, pop->opcode, pop->params[0], pop->params[1]);
tmp = format_tbl[pop->format].opsz;
- if (tmp > noc)
- BUG(); /* match was less complete than it ought to have been */
+ BUG_ON(tmp > noc); /* match was less complete than it ought to have been */
if (tmp < noc) {
tmp = noc - tmp;
diff --git a/arch/mn10300/mm/mmu-context.c b/arch/mn10300/mm/mmu-context.c
index 36ba02191d40..a4f7d3dcc6e6 100644
--- a/arch/mn10300/mm/mmu-context.c
+++ b/arch/mn10300/mm/mmu-context.c
@@ -13,40 +13,15 @@
#include <asm/mmu_context.h>
#include <asm/tlbflush.h>
+#ifdef CONFIG_MN10300_TLB_USE_PIDR
/*
* list of the MMU contexts last allocated on each CPU
*/
unsigned long mmu_context_cache[NR_CPUS] = {
- [0 ... NR_CPUS - 1] = MMU_CONTEXT_FIRST_VERSION * 2 - 1,
+ [0 ... NR_CPUS - 1] =
+ MMU_CONTEXT_FIRST_VERSION * 2 - (1 - MMU_CONTEXT_TLBPID_LOCK_NR),
};
-
-/*
- * flush the specified TLB entry
- */
-void flush_tlb_page(struct vm_area_struct *vma, unsigned long addr)
-{
- unsigned long pteu, cnx, flags;
-
- addr &= PAGE_MASK;
-
- /* make sure the context doesn't migrate and defend against
- * interference from vmalloc'd regions */
- local_irq_save(flags);
-
- cnx = mm_context(vma->vm_mm);
-
- if (cnx != MMU_NO_CONTEXT) {
- pteu = addr | (cnx & 0x000000ffUL);
- IPTEU = pteu;
- DPTEU = pteu;
- if (IPTEL & xPTEL_V)
- IPTEL = 0;
- if (DPTEL & xPTEL_V)
- DPTEL = 0;
- }
-
- local_irq_restore(flags);
-}
+#endif /* CONFIG_MN10300_TLB_USE_PIDR */
/*
* preemptively set a TLB entry
@@ -63,10 +38,16 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long addr, pte_t *pte
* interference from vmalloc'd regions */
local_irq_save(flags);
+ cnx = ~MMU_NO_CONTEXT;
+#ifdef CONFIG_MN10300_TLB_USE_PIDR
cnx = mm_context(vma->vm_mm);
+#endif
if (cnx != MMU_NO_CONTEXT) {
- pteu = addr | (cnx & 0x000000ffUL);
+ pteu = addr;
+#ifdef CONFIG_MN10300_TLB_USE_PIDR
+ pteu |= cnx & MMU_CONTEXT_TLBPID_MASK;
+#endif
if (!(pte_val(pte) & _PAGE_NX)) {
IPTEU = pteu;
if (IPTEL & xPTEL_V)
diff --git a/arch/mn10300/mm/pgtable.c b/arch/mn10300/mm/pgtable.c
index 9c1624c9e4e9..450f7ba3f8f2 100644
--- a/arch/mn10300/mm/pgtable.c
+++ b/arch/mn10300/mm/pgtable.c
@@ -59,7 +59,7 @@ void set_pmd_pfn(unsigned long vaddr, unsigned long pfn, pgprot_t flags)
* It's enough to flush this one mapping.
* (PGE mappings get flushed as well)
*/
- __flush_tlb_one(vaddr);
+ local_flush_tlb_one(vaddr);
}
pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address)
diff --git a/arch/mn10300/mm/tlb-mn10300.S b/arch/mn10300/mm/tlb-mn10300.S
index 7095147dcb8b..b9940177d81b 100644
--- a/arch/mn10300/mm/tlb-mn10300.S
+++ b/arch/mn10300/mm/tlb-mn10300.S
@@ -27,7 +27,6 @@
###############################################################################
.type itlb_miss,@function
ENTRY(itlb_miss)
- and ~EPSW_NMID,epsw
#ifdef CONFIG_GDBSTUB
movm [d2,d3,a2],(sp)
#else
@@ -38,6 +37,12 @@ ENTRY(itlb_miss)
nop
#endif
+#if defined(CONFIG_ERRATUM_NEED_TO_RELOAD_MMUCTR)
+ mov (MMUCTR),d2
+ mov d2,(MMUCTR)
+#endif
+
+ and ~EPSW_NMID,epsw
mov (IPTEU),d3
mov (PTBR),a2
mov d3,d2
@@ -56,10 +61,16 @@ ENTRY(itlb_miss)
btst _PAGE_VALID,d2
beq itlb_miss_fault # jump if doesn't point to a page
# (might be a swap id)
+#if ((_PAGE_ACCESSED & 0xffffff00) == 0)
bset _PAGE_ACCESSED,(0,a2)
- and ~(xPTEL_UNUSED1|xPTEL_UNUSED2),d2
+#elif ((_PAGE_ACCESSED & 0xffff00ff) == 0)
+ bset +(_PAGE_ACCESSED >> 8),(1,a2)
+#else
+#error "_PAGE_ACCESSED value is out of range"
+#endif
+ and ~xPTEL2_UNUSED1,d2
itlb_miss_set:
- mov d2,(IPTEL) # change the TLB
+ mov d2,(IPTEL2) # change the TLB
#ifdef CONFIG_GDBSTUB
movm (sp),[d2,d3,a2]
#endif
@@ -79,7 +90,6 @@ itlb_miss_fault:
###############################################################################
.type dtlb_miss,@function
ENTRY(dtlb_miss)
- and ~EPSW_NMID,epsw
#ifdef CONFIG_GDBSTUB
movm [d2,d3,a2],(sp)
#else
@@ -90,6 +100,12 @@ ENTRY(dtlb_miss)
nop
#endif
+#if defined(CONFIG_ERRATUM_NEED_TO_RELOAD_MMUCTR)
+ mov (MMUCTR),d2
+ mov d2,(MMUCTR)
+#endif
+
+ and ~EPSW_NMID,epsw
mov (DPTEU),d3
mov (PTBR),a2
mov d3,d2
@@ -108,10 +124,16 @@ ENTRY(dtlb_miss)
btst _PAGE_VALID,d2
beq dtlb_miss_fault # jump if doesn't point to a page
# (might be a swap id)
+#if ((_PAGE_ACCESSED & 0xffffff00) == 0)
bset _PAGE_ACCESSED,(0,a2)
- and ~(xPTEL_UNUSED1|xPTEL_UNUSED2),d2
+#elif ((_PAGE_ACCESSED & 0xffff00ff) == 0)
+ bset +(_PAGE_ACCESSED >> 8),(1,a2)
+#else
+#error "_PAGE_ACCESSED value is out of range"
+#endif
+ and ~xPTEL2_UNUSED1,d2
dtlb_miss_set:
- mov d2,(DPTEL) # change the TLB
+ mov d2,(DPTEL2) # change the TLB
#ifdef CONFIG_GDBSTUB
movm (sp),[d2,d3,a2]
#endif
@@ -130,9 +152,15 @@ dtlb_miss_fault:
###############################################################################
.type itlb_aerror,@function
ENTRY(itlb_aerror)
- and ~EPSW_NMID,epsw
add -4,sp
SAVE_ALL
+
+#if defined(CONFIG_ERRATUM_NEED_TO_RELOAD_MMUCTR)
+ mov (MMUCTR),d1
+ mov d1,(MMUCTR)
+#endif
+
+ and ~EPSW_NMID,epsw
add -4,sp # need to pass three params
# calculate the fault code
@@ -140,15 +168,13 @@ ENTRY(itlb_aerror)
or 0x00010000,d1 # it's an instruction fetch
# determine the page address
- mov (IPTEU),a2
- mov a2,d0
+ mov (IPTEU),d0
and PAGE_MASK,d0
mov d0,(12,sp)
clr d0
- mov d0,(IPTEL)
+ mov d0,(IPTEL2)
- and ~EPSW_NMID,epsw
or EPSW_IE,epsw
mov fp,d0
call do_page_fault[],0 # do_page_fault(regs,code,addr
@@ -163,10 +189,16 @@ ENTRY(itlb_aerror)
###############################################################################
.type dtlb_aerror,@function
ENTRY(dtlb_aerror)
- and ~EPSW_NMID,epsw
add -4,sp
SAVE_ALL
+
+#if defined(CONFIG_ERRATUM_NEED_TO_RELOAD_MMUCTR)
+ mov (MMUCTR),d1
+ mov d1,(MMUCTR)
+#endif
+
add -4,sp # need to pass three params
+ and ~EPSW_NMID,epsw
# calculate the fault code
movhu (MMUFCR_DFC),d1
@@ -178,9 +210,8 @@ ENTRY(dtlb_aerror)
mov d0,(12,sp)
clr d0
- mov d0,(DPTEL)
+ mov d0,(DPTEL2)
- and ~EPSW_NMID,epsw
or EPSW_IE,epsw
mov fp,d0
call do_page_fault[],0 # do_page_fault(regs,code,addr
diff --git a/arch/mn10300/mm/tlb-smp.c b/arch/mn10300/mm/tlb-smp.c
new file mode 100644
index 000000000000..0b6a5ad1960e
--- /dev/null
+++ b/arch/mn10300/mm/tlb-smp.c
@@ -0,0 +1,214 @@
+/* SMP TLB support routines.
+ *
+ * Copyright (C) 2006-2008 Panasonic Corporation
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#include <linux/interrupt.h>
+#include <linux/spinlock.h>
+#include <linux/init.h>
+#include <linux/jiffies.h>
+#include <linux/cpumask.h>
+#include <linux/err.h>
+#include <linux/kernel.h>
+#include <linux/delay.h>
+#include <linux/sched.h>
+#include <linux/profile.h>
+#include <linux/smp.h>
+#include <asm/tlbflush.h>
+#include <asm/system.h>
+#include <asm/bitops.h>
+#include <asm/processor.h>
+#include <asm/bug.h>
+#include <asm/exceptions.h>
+#include <asm/hardirq.h>
+#include <asm/fpu.h>
+#include <asm/mmu_context.h>
+#include <asm/thread_info.h>
+#include <asm/cpu-regs.h>
+#include <asm/intctl-regs.h>
+
+/*
+ * For flush TLB
+ */
+#define FLUSH_ALL 0xffffffff
+
+static cpumask_t flush_cpumask;
+static struct mm_struct *flush_mm;
+static unsigned long flush_va;
+static DEFINE_SPINLOCK(tlbstate_lock);
+
+DEFINE_PER_CPU_SHARED_ALIGNED(struct tlb_state, cpu_tlbstate) = {
+ &init_mm, 0
+};
+
+static void flush_tlb_others(cpumask_t cpumask, struct mm_struct *mm,
+ unsigned long va);
+static void do_flush_tlb_all(void *info);
+
+/**
+ * smp_flush_tlb - Callback to invalidate the TLB.
+ * @unused: Callback context (ignored).
+ */
+void smp_flush_tlb(void *unused)
+{
+ unsigned long cpu_id;
+
+ cpu_id = get_cpu();
+
+ if (!cpu_isset(cpu_id, flush_cpumask))
+ /* This was a BUG() but until someone can quote me the line
+ * from the intel manual that guarantees an IPI to multiple
+ * CPUs is retried _only_ on the erroring CPUs its staying as a
+ * return
+ *
+ * BUG();
+ */
+ goto out;
+
+ if (flush_va == FLUSH_ALL)
+ local_flush_tlb();
+ else
+ local_flush_tlb_page(flush_mm, flush_va);
+
+ smp_mb__before_clear_bit();
+ cpu_clear(cpu_id, flush_cpumask);
+ smp_mb__after_clear_bit();
+out:
+ put_cpu();
+}
+
+/**
+ * flush_tlb_others - Tell the specified CPUs to invalidate their TLBs
+ * @cpumask: The list of CPUs to target.
+ * @mm: The VM context to flush from (if va!=FLUSH_ALL).
+ * @va: Virtual address to flush or FLUSH_ALL to flush everything.
+ */
+static void flush_tlb_others(cpumask_t cpumask, struct mm_struct *mm,
+ unsigned long va)
+{
+ cpumask_t tmp;
+
+ /* A couple of sanity checks (to be removed):
+ * - mask must not be empty
+ * - current CPU must not be in mask
+ * - we do not send IPIs to as-yet unbooted CPUs.
+ */
+ BUG_ON(!mm);
+ BUG_ON(cpus_empty(cpumask));
+ BUG_ON(cpu_isset(smp_processor_id(), cpumask));
+
+ cpus_and(tmp, cpumask, cpu_online_map);
+ BUG_ON(!cpus_equal(cpumask, tmp));
+
+ /* I'm not happy about this global shared spinlock in the MM hot path,
+ * but we'll see how contended it is.
+ *
+ * Temporarily this turns IRQs off, so that lockups are detected by the
+ * NMI watchdog.
+ */
+ spin_lock(&tlbstate_lock);
+
+ flush_mm = mm;
+ flush_va = va;
+#if NR_CPUS <= BITS_PER_LONG
+ atomic_set_mask(cpumask.bits[0], &flush_cpumask.bits[0]);
+#else
+#error Not supported.
+#endif
+
+ /* FIXME: if NR_CPUS>=3, change send_IPI_mask */
+ smp_call_function(smp_flush_tlb, NULL, 1);
+
+ while (!cpus_empty(flush_cpumask))
+ /* Lockup detection does not belong here */
+ smp_mb();
+
+ flush_mm = NULL;
+ flush_va = 0;
+ spin_unlock(&tlbstate_lock);
+}
+
+/**
+ * flush_tlb_mm - Invalidate TLB of specified VM context
+ * @mm: The VM context to invalidate.
+ */
+void flush_tlb_mm(struct mm_struct *mm)
+{
+ cpumask_t cpu_mask;
+
+ preempt_disable();
+ cpu_mask = mm->cpu_vm_mask;
+ cpu_clear(smp_processor_id(), cpu_mask);
+
+ local_flush_tlb();
+ if (!cpus_empty(cpu_mask))
+ flush_tlb_others(cpu_mask, mm, FLUSH_ALL);
+
+ preempt_enable();
+}
+
+/**
+ * flush_tlb_current_task - Invalidate TLB of current task
+ */
+void flush_tlb_current_task(void)
+{
+ struct mm_struct *mm = current->mm;
+ cpumask_t cpu_mask;
+
+ preempt_disable();
+ cpu_mask = mm->cpu_vm_mask;
+ cpu_clear(smp_processor_id(), cpu_mask);
+
+ local_flush_tlb();
+ if (!cpus_empty(cpu_mask))
+ flush_tlb_others(cpu_mask, mm, FLUSH_ALL);
+
+ preempt_enable();
+}
+
+/**
+ * flush_tlb_page - Invalidate TLB of page
+ * @vma: The VM context to invalidate the page for.
+ * @va: The virtual address of the page to invalidate.
+ */
+void flush_tlb_page(struct vm_area_struct *vma, unsigned long va)
+{
+ struct mm_struct *mm = vma->vm_mm;
+ cpumask_t cpu_mask;
+
+ preempt_disable();
+ cpu_mask = mm->cpu_vm_mask;
+ cpu_clear(smp_processor_id(), cpu_mask);
+
+ local_flush_tlb_page(mm, va);
+ if (!cpus_empty(cpu_mask))
+ flush_tlb_others(cpu_mask, mm, va);
+
+ preempt_enable();
+}
+
+/**
+ * do_flush_tlb_all - Callback to completely invalidate a TLB
+ * @unused: Callback context (ignored).
+ */
+static void do_flush_tlb_all(void *unused)
+{
+ local_flush_tlb_all();
+}
+
+/**
+ * flush_tlb_all - Completely invalidate TLBs on all CPUs
+ */
+void flush_tlb_all(void)
+{
+ on_each_cpu(do_flush_tlb_all, 0, 1);
+}
diff --git a/arch/mn10300/proc-mn103e010/include/proc/cache.h b/arch/mn10300/proc-mn103e010/include/proc/cache.h
index bdc1f9a59b4c..c1528004163c 100644
--- a/arch/mn10300/proc-mn103e010/include/proc/cache.h
+++ b/arch/mn10300/proc-mn103e010/include/proc/cache.h
@@ -30,4 +30,13 @@
*/
#define MN10300_DCACHE_INV_RANGE_INTR_LOG2_INTERVAL 4
+/*
+ * The size of range at which it becomes more economical to just flush the
+ * whole cache rather than trying to flush the specified range.
+ */
+#define MN10300_DCACHE_FLUSH_BORDER \
+ +(L1_CACHE_NWAYS * L1_CACHE_NENTRIES * L1_CACHE_BYTES)
+#define MN10300_DCACHE_FLUSH_INV_BORDER \
+ +(L1_CACHE_NWAYS * L1_CACHE_NENTRIES * L1_CACHE_BYTES)
+
#endif /* _ASM_PROC_CACHE_H */
diff --git a/arch/mn10300/proc-mn103e010/include/proc/clock.h b/arch/mn10300/proc-mn103e010/include/proc/clock.h
index aa23e147d620..704a819f1f4b 100644
--- a/arch/mn10300/proc-mn103e010/include/proc/clock.h
+++ b/arch/mn10300/proc-mn103e010/include/proc/clock.h
@@ -13,6 +13,4 @@
#include <unit/clock.h>
-#define MN10300_WDCLK MN10300_IOCLK
-
#endif /* _ASM_PROC_CLOCK_H */
diff --git a/arch/mn10300/proc-mn103e010/include/proc/dmactl-regs.h b/arch/mn10300/proc-mn103e010/include/proc/dmactl-regs.h
new file mode 100644
index 000000000000..d72d328d1f9c
--- /dev/null
+++ b/arch/mn10300/proc-mn103e010/include/proc/dmactl-regs.h
@@ -0,0 +1,102 @@
+/* MN103E010 on-board DMA controller registers
+ *
+ * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public Licence
+ * as published by the Free Software Foundation; either version
+ * 2 of the Licence, or (at your option) any later version.
+ */
+
+#ifndef _ASM_PROC_DMACTL_REGS_H
+#define _ASM_PROC_DMACTL_REGS_H
+
+#include <asm/cpu-regs.h>
+
+#ifdef __KERNEL__
+
+/* DMA registers */
+#define DMxCTR(N) __SYSREG(0xd2000000 + ((N) * 0x100), u32) /* control reg */
+#define DMxCTR_BG 0x0000001f /* transfer request source */
+#define DMxCTR_BG_SOFT 0x00000000 /* - software source */
+#define DMxCTR_BG_SC0TX 0x00000002 /* - serial port 0 transmission */
+#define DMxCTR_BG_SC0RX 0x00000003 /* - serial port 0 reception */
+#define DMxCTR_BG_SC1TX 0x00000004 /* - serial port 1 transmission */
+#define DMxCTR_BG_SC1RX 0x00000005 /* - serial port 1 reception */
+#define DMxCTR_BG_SC2TX 0x00000006 /* - serial port 2 transmission */
+#define DMxCTR_BG_SC2RX 0x00000007 /* - serial port 2 reception */
+#define DMxCTR_BG_TM0UFLOW 0x00000008 /* - timer 0 underflow */
+#define DMxCTR_BG_TM1UFLOW 0x00000009 /* - timer 1 underflow */
+#define DMxCTR_BG_TM2UFLOW 0x0000000a /* - timer 2 underflow */
+#define DMxCTR_BG_TM3UFLOW 0x0000000b /* - timer 3 underflow */
+#define DMxCTR_BG_TM6ACMPCAP 0x0000000c /* - timer 6A compare/capture */
+#define DMxCTR_BG_AFE 0x0000000d /* - analogue front-end interrupt source */
+#define DMxCTR_BG_ADC 0x0000000e /* - A/D conversion end interrupt source */
+#define DMxCTR_BG_IRDA 0x0000000f /* - IrDA interrupt source */
+#define DMxCTR_BG_RTC 0x00000010 /* - RTC interrupt source */
+#define DMxCTR_BG_XIRQ0 0x00000011 /* - XIRQ0 pin interrupt source */
+#define DMxCTR_BG_XIRQ1 0x00000012 /* - XIRQ1 pin interrupt source */
+#define DMxCTR_BG_XDMR0 0x00000013 /* - external request 0 source (XDMR0 pin) */
+#define DMxCTR_BG_XDMR1 0x00000014 /* - external request 1 source (XDMR1 pin) */
+#define DMxCTR_SAM 0x000000e0 /* DMA transfer src addr mode */
+#define DMxCTR_SAM_INCR 0x00000000 /* - increment */
+#define DMxCTR_SAM_DECR 0x00000020 /* - decrement */
+#define DMxCTR_SAM_FIXED 0x00000040 /* - fixed */
+#define DMxCTR_DAM 0x00000000 /* DMA transfer dest addr mode */
+#define DMxCTR_DAM_INCR 0x00000000 /* - increment */
+#define DMxCTR_DAM_DECR 0x00000100 /* - decrement */
+#define DMxCTR_DAM_FIXED 0x00000200 /* - fixed */
+#define DMxCTR_TM 0x00001800 /* DMA transfer mode */
+#define DMxCTR_TM_BATCH 0x00000000 /* - batch transfer */
+#define DMxCTR_TM_INTERM 0x00001000 /* - intermittent transfer */
+#define DMxCTR_UT 0x00006000 /* DMA transfer unit */
+#define DMxCTR_UT_1 0x00000000 /* - 1 byte */
+#define DMxCTR_UT_2 0x00002000 /* - 2 byte */
+#define DMxCTR_UT_4 0x00004000 /* - 4 byte */
+#define DMxCTR_UT_16 0x00006000 /* - 16 byte */
+#define DMxCTR_TEN 0x00010000 /* DMA channel transfer enable */
+#define DMxCTR_RQM 0x00060000 /* external request input source mode */
+#define DMxCTR_RQM_FALLEDGE 0x00000000 /* - falling edge */
+#define DMxCTR_RQM_RISEEDGE 0x00020000 /* - rising edge */
+#define DMxCTR_RQM_LOLEVEL 0x00040000 /* - low level */
+#define DMxCTR_RQM_HILEVEL 0x00060000 /* - high level */
+#define DMxCTR_RQF 0x01000000 /* DMA transfer request flag */
+#define DMxCTR_XEND 0x80000000 /* DMA transfer end flag */
+
+#define DMxSRC(N) __SYSREG(0xd2000004 + ((N) * 0x100), u32) /* control reg */
+
+#define DMxDST(N) __SYSREG(0xd2000008 + ((N) * 0x100), u32) /* src addr reg */
+
+#define DMxSIZ(N) __SYSREG(0xd200000c + ((N) * 0x100), u32) /* dest addr reg */
+#define DMxSIZ_CT 0x000fffff /* number of bytes to transfer */
+
+#define DMxCYC(N) __SYSREG(0xd2000010 + ((N) * 0x100), u32) /* intermittent
+ * size reg */
+#define DMxCYC_CYC 0x000000ff /* number of interrmittent transfers -1 */
+
+#define DM0IRQ 16 /* DMA channel 0 complete IRQ */
+#define DM1IRQ 17 /* DMA channel 1 complete IRQ */
+#define DM2IRQ 18 /* DMA channel 2 complete IRQ */
+#define DM3IRQ 19 /* DMA channel 3 complete IRQ */
+
+#define DM0ICR GxICR(DM0IRQ) /* DMA channel 0 complete intr ctrl reg */
+#define DM1ICR GxICR(DM0IR1) /* DMA channel 1 complete intr ctrl reg */
+#define DM2ICR GxICR(DM0IR2) /* DMA channel 2 complete intr ctrl reg */
+#define DM3ICR GxICR(DM0IR3) /* DMA channel 3 complete intr ctrl reg */
+
+#ifndef __ASSEMBLY__
+
+struct mn10300_dmactl_regs {
+ u32 ctr;
+ const void *src;
+ void *dst;
+ u32 siz;
+ u32 cyc;
+} __attribute__((aligned(0x100)));
+
+#endif /* __ASSEMBLY__ */
+
+#endif /* __KERNEL__ */
+
+#endif /* _ASM_PROC_DMACTL_REGS_H */
diff --git a/arch/mn10300/proc-mn103e010/include/proc/intctl-regs.h b/arch/mn10300/proc-mn103e010/include/proc/intctl-regs.h
new file mode 100644
index 000000000000..f537801a44ba
--- /dev/null
+++ b/arch/mn10300/proc-mn103e010/include/proc/intctl-regs.h
@@ -0,0 +1,29 @@
+#ifndef _ASM_PROC_INTCTL_REGS_H
+#define _ASM_PROC_INTCTL_REGS_H
+
+#ifndef _ASM_INTCTL_REGS_H
+# error "please don't include this file directly"
+#endif
+
+/* intr acceptance group reg */
+#define IAGR __SYSREG(0xd4000100, u16)
+
+/* group number register */
+#define IAGR_GN 0x00fc
+
+#define __GET_XIRQ_TRIGGER(X, Z) (((Z) >> ((X) * 2)) & 3)
+
+#define __SET_XIRQ_TRIGGER(X, Y, Z) \
+({ \
+ typeof(Z) x = (Z); \
+ x &= ~(3 << ((X) * 2)); \
+ x |= ((Y) & 3) << ((X) * 2); \
+ (Z) = x; \
+})
+
+/* external pin intr spec reg */
+#define EXTMD __SYSREG(0xd4000200, u16)
+#define GET_XIRQ_TRIGGER(X) __GET_XIRQ_TRIGGER(X, EXTMD)
+#define SET_XIRQ_TRIGGER(X, Y) __SET_XIRQ_TRIGGER(X, Y, EXTMD)
+
+#endif /* _ASM_PROC_INTCTL_REGS_H */
diff --git a/arch/mn10300/proc-mn103e010/include/proc/proc.h b/arch/mn10300/proc-mn103e010/include/proc/proc.h
index 22a2b93f70b7..39c4f8e7d2d3 100644
--- a/arch/mn10300/proc-mn103e010/include/proc/proc.h
+++ b/arch/mn10300/proc-mn103e010/include/proc/proc.h
@@ -12,7 +12,7 @@
#ifndef _ASM_PROC_PROC_H
#define _ASM_PROC_PROC_H
-#define PROCESSOR_VENDOR_NAME "Matsushita"
+#define PROCESSOR_VENDOR_NAME "Panasonic"
#define PROCESSOR_MODEL_NAME "mn103e010"
#endif /* _ASM_PROC_PROC_H */
diff --git a/arch/mn10300/proc-mn103e010/proc-init.c b/arch/mn10300/proc-mn103e010/proc-init.c
index 9a482efafa82..27b97980dca4 100644
--- a/arch/mn10300/proc-mn103e010/proc-init.c
+++ b/arch/mn10300/proc-mn103e010/proc-init.c
@@ -9,7 +9,9 @@
* 2 of the Licence, or (at your option) any later version.
*/
#include <linux/kernel.h>
+#include <asm/fpu.h>
#include <asm/rtc.h>
+#include <asm/busctl-regs.h>
/*
* initialise the on-silicon processor peripherals
@@ -28,6 +30,7 @@ asmlinkage void __init processor_init(void)
__set_intr_stub(EXCEP_DAERROR, dtlb_aerror);
__set_intr_stub(EXCEP_BUSERROR, raw_bus_error);
__set_intr_stub(EXCEP_DOUBLE_FAULT, double_fault);
+ __set_intr_stub(EXCEP_FPU_DISABLED, fpu_disabled);
__set_intr_stub(EXCEP_SYSCALL0, system_call);
__set_intr_stub(EXCEP_NMI, nmi_handler);
@@ -73,3 +76,37 @@ asmlinkage void __init processor_init(void)
calibrate_clock();
}
+
+/*
+ * determine the memory size and base from the memory controller regs
+ */
+void __init get_mem_info(unsigned long *mem_base, unsigned long *mem_size)
+{
+ unsigned long base, size;
+
+ *mem_base = 0;
+ *mem_size = 0;
+
+ base = SDBASE(0);
+ if (base & SDBASE_CE) {
+ size = (base & SDBASE_CBAM) << SDBASE_CBAM_SHIFT;
+ size = ~size + 1;
+ base &= SDBASE_CBA;
+
+ printk(KERN_INFO "SDRAM[0]: %luMb @%08lx\n", size >> 20, base);
+ *mem_size += size;
+ *mem_base = base;
+ }
+
+ base = SDBASE(1);
+ if (base & SDBASE_CE) {
+ size = (base & SDBASE_CBAM) << SDBASE_CBAM_SHIFT;
+ size = ~size + 1;
+ base &= SDBASE_CBA;
+
+ printk(KERN_INFO "SDRAM[1]: %luMb @%08lx\n", size >> 20, base);
+ *mem_size += size;
+ if (*mem_base == 0)
+ *mem_base = base;
+ }
+}
diff --git a/arch/mn10300/proc-mn2ws0050/Makefile b/arch/mn10300/proc-mn2ws0050/Makefile
new file mode 100644
index 000000000000..d4ca13309a85
--- /dev/null
+++ b/arch/mn10300/proc-mn2ws0050/Makefile
@@ -0,0 +1,5 @@
+#
+# Makefile for the linux kernel.
+#
+
+obj-y := proc-init.o
diff --git a/arch/mn10300/proc-mn2ws0050/include/proc/cache.h b/arch/mn10300/proc-mn2ws0050/include/proc/cache.h
new file mode 100644
index 000000000000..cafd7b5b55b4
--- /dev/null
+++ b/arch/mn10300/proc-mn2ws0050/include/proc/cache.h
@@ -0,0 +1,48 @@
+/* Cache specification
+ *
+ * Copyright (C) 2005 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * Modified by Matsushita Electric Industrial Co., Ltd.
+ * Modifications:
+ * 13-Nov-2006 MEI Add L1_CACHE_SHIFT_MAX definition.
+ * 29-Jul-2008 MEI Add define for MN10300_HAS_AREAPURGE_REG.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+#ifndef _ASM_PROC_CACHE_H
+#define _ASM_PROC_CACHE_H
+
+/*
+ * L1 cache
+ */
+#define L1_CACHE_NWAYS 4 /* number of ways in caches */
+#define L1_CACHE_NENTRIES 128 /* number of entries in each way */
+#define L1_CACHE_BYTES 32 /* bytes per entry */
+#define L1_CACHE_SHIFT 5 /* shift for bytes per entry */
+#define L1_CACHE_WAYDISP 0x1000 /* distance from one way to the next */
+
+#define L1_CACHE_TAG_VALID 0x00000001 /* cache tag valid bit */
+#define L1_CACHE_TAG_DIRTY 0x00000008 /* data cache tag dirty bit */
+#define L1_CACHE_TAG_ENTRY 0x00000fe0 /* cache tag entry address mask */
+#define L1_CACHE_TAG_ADDRESS 0xfffff000 /* cache tag line address mask */
+
+/*
+ * specification of the interval between interrupt checking intervals whilst
+ * managing the cache with the interrupts disabled
+ */
+#define MN10300_DCACHE_INV_RANGE_INTR_LOG2_INTERVAL 4
+
+/*
+ * The size of range at which it becomes more economical to just flush the
+ * whole cache rather than trying to flush the specified range.
+ */
+#define MN10300_DCACHE_FLUSH_BORDER \
+ +(L1_CACHE_NWAYS * L1_CACHE_NENTRIES * L1_CACHE_BYTES)
+#define MN10300_DCACHE_FLUSH_INV_BORDER \
+ +(L1_CACHE_NWAYS * L1_CACHE_NENTRIES * L1_CACHE_BYTES)
+
+#endif /* _ASM_PROC_CACHE_H */
diff --git a/arch/mn10300/proc-mn2ws0050/include/proc/clock.h b/arch/mn10300/proc-mn2ws0050/include/proc/clock.h
new file mode 100644
index 000000000000..fe4c0a4a53a2
--- /dev/null
+++ b/arch/mn10300/proc-mn2ws0050/include/proc/clock.h
@@ -0,0 +1,20 @@
+/* clock.h: proc-specific clocks
+ *
+ * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * Modified by Matsushita Electric Industrial Co., Ltd.
+ * Modifications:
+ * 23-Feb-2007 MEI Delete define for watchdog timer.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+#ifndef _ASM_PROC_CLOCK_H
+#define _ASM_PROC_CLOCK_H
+
+#include <unit/clock.h>
+
+#endif /* _ASM_PROC_CLOCK_H */
diff --git a/arch/mn10300/proc-mn2ws0050/include/proc/dmactl-regs.h b/arch/mn10300/proc-mn2ws0050/include/proc/dmactl-regs.h
new file mode 100644
index 000000000000..4c4319e241d1
--- /dev/null
+++ b/arch/mn10300/proc-mn2ws0050/include/proc/dmactl-regs.h
@@ -0,0 +1,103 @@
+/* MN2WS0050 on-board DMA controller registers
+ *
+ * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ */
+
+#ifndef _ASM_PROC_DMACTL_REGS_H
+#define _ASM_PROC_DMACTL_REGS_H
+
+#include <asm/cpu-regs.h>
+
+#ifdef __KERNEL__
+
+/* DMA registers */
+#define DMxCTR(N) __SYSREG(0xd4005000+(N*0x100), u32) /* control reg */
+#define DMxCTR_BG 0x0000001f /* transfer request source */
+#define DMxCTR_BG_SOFT 0x00000000 /* - software source */
+#define DMxCTR_BG_SC0TX 0x00000002 /* - serial port 0 transmission */
+#define DMxCTR_BG_SC0RX 0x00000003 /* - serial port 0 reception */
+#define DMxCTR_BG_SC1TX 0x00000004 /* - serial port 1 transmission */
+#define DMxCTR_BG_SC1RX 0x00000005 /* - serial port 1 reception */
+#define DMxCTR_BG_SC2TX 0x00000006 /* - serial port 2 transmission */
+#define DMxCTR_BG_SC2RX 0x00000007 /* - serial port 2 reception */
+#define DMxCTR_BG_TM0UFLOW 0x00000008 /* - timer 0 underflow */
+#define DMxCTR_BG_TM1UFLOW 0x00000009 /* - timer 1 underflow */
+#define DMxCTR_BG_TM2UFLOW 0x0000000a /* - timer 2 underflow */
+#define DMxCTR_BG_TM3UFLOW 0x0000000b /* - timer 3 underflow */
+#define DMxCTR_BG_TM6ACMPCAP 0x0000000c /* - timer 6A compare/capture */
+#define DMxCTR_BG_RYBY 0x0000000d /* - NAND Flash RY/BY request source */
+#define DMxCTR_BG_RMC 0x0000000e /* - remote controller output */
+#define DMxCTR_BG_XIRQ12 0x00000011 /* - XIRQ12 pin interrupt source */
+#define DMxCTR_BG_XIRQ13 0x00000012 /* - XIRQ13 pin interrupt source */
+#define DMxCTR_BG_TCK 0x00000014 /* - tick timer underflow */
+#define DMxCTR_BG_SC4TX 0x00000019 /* - serial port4 transmission */
+#define DMxCTR_BG_SC4RX 0x0000001a /* - serial port4 reception */
+#define DMxCTR_BG_SC5TX 0x0000001b /* - serial port5 transmission */
+#define DMxCTR_BG_SC5RX 0x0000001c /* - serial port5 reception */
+#define DMxCTR_BG_SC6TX 0x0000001d /* - serial port6 transmission */
+#define DMxCTR_BG_SC6RX 0x0000001e /* - serial port6 reception */
+#define DMxCTR_BG_TMSUFLOW 0x0000001f /* - timestamp timer underflow */
+#define DMxCTR_SAM 0x00000060 /* DMA transfer src addr mode */
+#define DMxCTR_SAM_INCR 0x00000000 /* - increment */
+#define DMxCTR_SAM_DECR 0x00000020 /* - decrement */
+#define DMxCTR_SAM_FIXED 0x00000040 /* - fixed */
+#define DMxCTR_DAM 0x00000300 /* DMA transfer dest addr mode */
+#define DMxCTR_DAM_INCR 0x00000000 /* - increment */
+#define DMxCTR_DAM_DECR 0x00000100 /* - decrement */
+#define DMxCTR_DAM_FIXED 0x00000200 /* - fixed */
+#define DMxCTR_UT 0x00006000 /* DMA transfer unit */
+#define DMxCTR_UT_1 0x00000000 /* - 1 byte */
+#define DMxCTR_UT_2 0x00002000 /* - 2 byte */
+#define DMxCTR_UT_4 0x00004000 /* - 4 byte */
+#define DMxCTR_UT_16 0x00006000 /* - 16 byte */
+#define DMxCTR_RRE 0x00008000 /* DMA round robin enable */
+#define DMxCTR_TEN 0x00010000 /* DMA channel transfer enable */
+#define DMxCTR_RQM 0x00060000 /* external request input source mode */
+#define DMxCTR_RQM_FALLEDGE 0x00000000 /* - falling edge */
+#define DMxCTR_RQM_RISEEDGE 0x00020000 /* - rising edge */
+#define DMxCTR_RQM_LOLEVEL 0x00040000 /* - low level */
+#define DMxCTR_RQM_HILEVEL 0x00060000 /* - high level */
+#define DMxCTR_RQF 0x01000000 /* DMA transfer request flag */
+#define DMxCTR_PERR 0x40000000 /* DMA transfer parameter error flag */
+#define DMxCTR_XEND 0x80000000 /* DMA transfer end flag */
+
+#define DMxSRC(N) __SYSREG(0xd4005004+(N*0x100), u32) /* control reg */
+
+#define DMxDST(N) __SYSREG(0xd4005008+(N*0x100), u32) /* source addr reg */
+
+#define DMxSIZ(N) __SYSREG(0xd400500c+(N*0x100), u32) /* dest addr reg */
+#define DMxSIZ_CT 0x000fffff /* number of bytes to transfer */
+
+#define DMxCYC(N) __SYSREG(0xd4005010+(N*0x100), u32) /* intermittent size reg */
+#define DMxCYC_CYC 0x000000ff /* number of interrmittent transfers -1 */
+
+#define DM0IRQ 16 /* DMA channel 0 complete IRQ */
+#define DM1IRQ 17 /* DMA channel 1 complete IRQ */
+#define DM2IRQ 18 /* DMA channel 2 complete IRQ */
+#define DM3IRQ 19 /* DMA channel 3 complete IRQ */
+
+#define DM0ICR GxICR(DM0IRQ) /* DMA channel 0 complete intr ctrl reg */
+#define DM1ICR GxICR(DM0IR1) /* DMA channel 1 complete intr ctrl reg */
+#define DM2ICR GxICR(DM0IR2) /* DMA channel 2 complete intr ctrl reg */
+#define DM3ICR GxICR(DM0IR3) /* DMA channel 3 complete intr ctrl reg */
+
+#ifndef __ASSEMBLY__
+
+struct mn10300_dmactl_regs {
+ u32 ctr;
+ const void *src;
+ void *dst;
+ u32 siz;
+ u32 cyc;
+} __attribute__((aligned(0x100)));
+
+#endif /* __ASSEMBLY__ */
+
+#endif /* __KERNEL__ */
+
+#endif /* _ASM_PROC_DMACTL_REGS_H */
diff --git a/arch/mn10300/proc-mn2ws0050/include/proc/intctl-regs.h b/arch/mn10300/proc-mn2ws0050/include/proc/intctl-regs.h
new file mode 100644
index 000000000000..a1e977273d19
--- /dev/null
+++ b/arch/mn10300/proc-mn2ws0050/include/proc/intctl-regs.h
@@ -0,0 +1,29 @@
+#ifndef _ASM_PROC_INTCTL_REGS_H
+#define _ASM_PROC_INTCTL_REGS_H
+
+#ifndef _ASM_INTCTL_REGS_H
+# error "please don't include this file directly"
+#endif
+
+/* intr acceptance group reg */
+#define IAGR __SYSREG(0xd4000100, u16)
+
+/* group number register */
+#define IAGR_GN 0x003fc
+
+#define __GET_XIRQ_TRIGGER(X, Z) (((Z) >> ((X) * 2)) & 3)
+
+#define __SET_XIRQ_TRIGGER(X, Y, Z) \
+({ \
+ typeof(Z) x = (Z); \
+ x &= ~(3 << ((X) * 2)); \
+ x |= ((Y) & 3) << ((X) * 2); \
+ (Z) = x; \
+})
+
+/* external pin intr spec reg */
+#define EXTMD0 __SYSREG(0xd4000200, u32)
+#define GET_XIRQ_TRIGGER(X) __GET_XIRQ_TRIGGER(X, EXTMD0)
+#define SET_XIRQ_TRIGGER(X, Y) __SET_XIRQ_TRIGGER(X, Y, EXTMD0)
+
+#endif /* _ASM_PROC_INTCTL_REGS_H */
diff --git a/arch/mn10300/proc-mn2ws0050/include/proc/irq.h b/arch/mn10300/proc-mn2ws0050/include/proc/irq.h
new file mode 100644
index 000000000000..37777a85ab6f
--- /dev/null
+++ b/arch/mn10300/proc-mn2ws0050/include/proc/irq.h
@@ -0,0 +1,49 @@
+/* MN2WS0050 on-board interrupt controller registers
+ *
+ * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * Modified by Matsushita Electric Industrial Co., Ltd.
+ * Modifications:
+ * 13-Nov-2006 MEI Define extended IRQ number for SMP support.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#ifndef _PROC_IRQ_H
+#define _PROC_IRQ_H
+
+#ifdef __KERNEL__
+
+#define GxICR_NUM_IRQS 163
+#ifdef CONFIG_SMP
+#define GxICR_NUM_EXT_IRQS 197
+#endif /* CONFIG_SMP */
+
+#define GxICR_NUM_XIRQS 16
+
+#define XIRQ0 34
+#define XIRQ1 35
+#define XIRQ2 36
+#define XIRQ3 37
+#define XIRQ4 38
+#define XIRQ5 39
+#define XIRQ6 40
+#define XIRQ7 41
+#define XIRQ8 42
+#define XIRQ9 43
+#define XIRQ10 44
+#define XIRQ11 45
+#define XIRQ12 46
+#define XIRQ13 47
+#define XIRQ14 48
+#define XIRQ15 49
+
+#define XIRQ2IRQ(num) (XIRQ0 + num)
+
+#endif /* __KERNEL__ */
+
+#endif /* _PROC_IRQ_H */
diff --git a/arch/mn10300/proc-mn2ws0050/include/proc/nand-regs.h b/arch/mn10300/proc-mn2ws0050/include/proc/nand-regs.h
new file mode 100644
index 000000000000..84448f3828b3
--- /dev/null
+++ b/arch/mn10300/proc-mn2ws0050/include/proc/nand-regs.h
@@ -0,0 +1,120 @@
+/* NAND flash interface register definitions
+ *
+ * Copyright (C) 2008-2009 Panasonic Corporation
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _PROC_NAND_REGS_H_
+#define _PROC_NAND_REGS_H_
+
+/* command register */
+#define FCOMMAND_0 __SYSREG(0xd8f00000, u8) /* fcommand[24:31] */
+#define FCOMMAND_1 __SYSREG(0xd8f00001, u8) /* fcommand[16:23] */
+#define FCOMMAND_2 __SYSREG(0xd8f00002, u8) /* fcommand[8:15] */
+#define FCOMMAND_3 __SYSREG(0xd8f00003, u8) /* fcommand[0:7] */
+
+/* for dma 16 byte trans, use FCOMMAND2 register */
+#define FCOMMAND2_0 __SYSREG(0xd8f00110, u8) /* fcommand2[24:31] */
+#define FCOMMAND2_1 __SYSREG(0xd8f00111, u8) /* fcommand2[16:23] */
+#define FCOMMAND2_2 __SYSREG(0xd8f00112, u8) /* fcommand2[8:15] */
+#define FCOMMAND2_3 __SYSREG(0xd8f00113, u8) /* fcommand2[0:7] */
+
+#define FCOMMAND_FIEN 0x80 /* nand flash I/F enable */
+#define FCOMMAND_BW_8BIT 0x00 /* 8bit bus width */
+#define FCOMMAND_BW_16BIT 0x40 /* 16bit bus width */
+#define FCOMMAND_BLOCKSZ_SMALL 0x00 /* small block */
+#define FCOMMAND_BLOCKSZ_LARGE 0x20 /* large block */
+#define FCOMMAND_DMASTART 0x10 /* dma start */
+#define FCOMMAND_RYBY 0x08 /* ready/busy flag */
+#define FCOMMAND_RYBYINTMSK 0x04 /* mask ready/busy interrupt */
+#define FCOMMAND_XFWP 0x02 /* write protect enable */
+#define FCOMMAND_XFCE 0x01 /* flash device disable */
+#define FCOMMAND_SEQKILL 0x10 /* stop seq-read */
+#define FCOMMAND_ANUM 0x07 /* address cycle */
+#define FCOMMAND_ANUM_NONE 0x00 /* address cycle none */
+#define FCOMMAND_ANUM_1CYC 0x01 /* address cycle 1cycle */
+#define FCOMMAND_ANUM_2CYC 0x02 /* address cycle 2cycle */
+#define FCOMMAND_ANUM_3CYC 0x03 /* address cycle 3cycle */
+#define FCOMMAND_ANUM_4CYC 0x04 /* address cycle 4cycle */
+#define FCOMMAND_ANUM_5CYC 0x05 /* address cycle 5cycle */
+#define FCOMMAND_FCMD_READ0 0x00 /* read1 command */
+#define FCOMMAND_FCMD_SEQIN 0x80 /* page program 1st command */
+#define FCOMMAND_FCMD_PAGEPROG 0x10 /* page program 2nd command */
+#define FCOMMAND_FCMD_RESET 0xff /* reset command */
+#define FCOMMAND_FCMD_ERASE1 0x60 /* erase 1st command */
+#define FCOMMAND_FCMD_ERASE2 0xd0 /* erase 2nd command */
+#define FCOMMAND_FCMD_STATUS 0x70 /* read status command */
+#define FCOMMAND_FCMD_READID 0x90 /* read id command */
+#define FCOMMAND_FCMD_READOOB 0x50 /* read3 command */
+/* address register */
+#define FADD __SYSREG(0xd8f00004, u32)
+/* address register 2 */
+#define FADD2 __SYSREG(0xd8f00008, u32)
+/* error judgement register */
+#define FJUDGE __SYSREG(0xd8f0000c, u32)
+#define FJUDGE_NOERR 0x0 /* no error */
+#define FJUDGE_1BITERR 0x1 /* 1bit error in data area */
+#define FJUDGE_PARITYERR 0x2 /* parity error */
+#define FJUDGE_UNCORRECTABLE 0x3 /* uncorrectable error */
+#define FJUDGE_ERRJDG_MSK 0x3 /* mask of judgement result */
+/* 1st ECC store register */
+#define FECC11 __SYSREG(0xd8f00010, u32)
+/* 2nd ECC store register */
+#define FECC12 __SYSREG(0xd8f00014, u32)
+/* 3rd ECC store register */
+#define FECC21 __SYSREG(0xd8f00018, u32)
+/* 4th ECC store register */
+#define FECC22 __SYSREG(0xd8f0001c, u32)
+/* 5th ECC store register */
+#define FECC31 __SYSREG(0xd8f00020, u32)
+/* 6th ECC store register */
+#define FECC32 __SYSREG(0xd8f00024, u32)
+/* 7th ECC store register */
+#define FECC41 __SYSREG(0xd8f00028, u32)
+/* 8th ECC store register */
+#define FECC42 __SYSREG(0xd8f0002c, u32)
+/* data register */
+#define FDATA __SYSREG(0xd8f00030, u32)
+/* access pulse register */
+#define FPWS __SYSREG(0xd8f00100, u32)
+#define FPWS_PWS1W_2CLK 0x00000000 /* write pulse width 1clock */
+#define FPWS_PWS1W_3CLK 0x01000000 /* write pulse width 2clock */
+#define FPWS_PWS1W_4CLK 0x02000000 /* write pulse width 4clock */
+#define FPWS_PWS1W_5CLK 0x03000000 /* write pulse width 5clock */
+#define FPWS_PWS1W_6CLK 0x04000000 /* write pulse width 6clock */
+#define FPWS_PWS1W_7CLK 0x05000000 /* write pulse width 7clock */
+#define FPWS_PWS1W_8CLK 0x06000000 /* write pulse width 8clock */
+#define FPWS_PWS1R_3CLK 0x00010000 /* read pulse width 3clock */
+#define FPWS_PWS1R_4CLK 0x00020000 /* read pulse width 4clock */
+#define FPWS_PWS1R_5CLK 0x00030000 /* read pulse width 5clock */
+#define FPWS_PWS1R_6CLK 0x00040000 /* read pulse width 6clock */
+#define FPWS_PWS1R_7CLK 0x00050000 /* read pulse width 7clock */
+#define FPWS_PWS1R_8CLK 0x00060000 /* read pulse width 8clock */
+#define FPWS_PWS2W_2CLK 0x00000100 /* write pulse interval 2clock */
+#define FPWS_PWS2W_3CLK 0x00000200 /* write pulse interval 3clock */
+#define FPWS_PWS2W_4CLK 0x00000300 /* write pulse interval 4clock */
+#define FPWS_PWS2W_5CLK 0x00000400 /* write pulse interval 5clock */
+#define FPWS_PWS2W_6CLK 0x00000500 /* write pulse interval 6clock */
+#define FPWS_PWS2R_2CLK 0x00000001 /* read pulse interval 2clock */
+#define FPWS_PWS2R_3CLK 0x00000002 /* read pulse interval 3clock */
+#define FPWS_PWS2R_4CLK 0x00000003 /* read pulse interval 4clock */
+#define FPWS_PWS2R_5CLK 0x00000004 /* read pulse interval 5clock */
+#define FPWS_PWS2R_6CLK 0x00000005 /* read pulse interval 6clock */
+/* command register 2 */
+#define FCOMMAND2 __SYSREG(0xd8f00110, u32)
+/* transfer frequency register */
+#define FNUM __SYSREG(0xd8f00114, u32)
+#define FSDATA_ADDR 0xd8f00400
+/* active data register */
+#define FSDATA __SYSREG(FSDATA_ADDR, u32)
+
+#endif /* _PROC_NAND_REGS_H_ */
diff --git a/arch/mn10300/proc-mn2ws0050/include/proc/proc.h b/arch/mn10300/proc-mn2ws0050/include/proc/proc.h
new file mode 100644
index 000000000000..90d5cadd05bd
--- /dev/null
+++ b/arch/mn10300/proc-mn2ws0050/include/proc/proc.h
@@ -0,0 +1,18 @@
+/* proc.h: MN2WS0050 processor description
+ *
+ * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#ifndef _ASM_PROC_PROC_H
+#define _ASM_PROC_PROC_H
+
+#define PROCESSOR_VENDOR_NAME "Panasonic"
+#define PROCESSOR_MODEL_NAME "mn2ws0050"
+
+#endif /* _ASM_PROC_PROC_H */
diff --git a/arch/mn10300/proc-mn2ws0050/include/proc/smp-regs.h b/arch/mn10300/proc-mn2ws0050/include/proc/smp-regs.h
new file mode 100644
index 000000000000..22f277fbb4de
--- /dev/null
+++ b/arch/mn10300/proc-mn2ws0050/include/proc/smp-regs.h
@@ -0,0 +1,51 @@
+/* MN10300/AM33v2 Microcontroller SMP registers
+ *
+ * Copyright (C) 2006 Matsushita Electric Industrial Co., Ltd.
+ * All Rights Reserved.
+ * Created:
+ * 13-Nov-2006 MEI Add extended cache and atomic operation register
+ * for SMP support.
+ * 23-Feb-2007 MEI Add define for gdbstub SMP.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#ifndef _ASM_PROC_SMP_REGS_H
+#define _ASM_PROC_SMP_REGS_H
+
+#ifdef __KERNEL__
+
+#ifndef __ASSEMBLY__
+#include <linux/types.h>
+#endif
+#include <asm/cpu-regs.h>
+
+/*
+ * Reference to the interrupt controllers of other CPUs
+ */
+#define CROSS_ICR_CPU_SHIFT 16
+
+#define CROSS_GxICR(X, CPU) __SYSREG(0xc4000000 + (X) * 4 + \
+ ((X) >= 64 && (X) < 192) * 0xf00 + ((CPU) << CROSS_ICR_CPU_SHIFT), u16)
+#define CROSS_GxICR_u8(X, CPU) __SYSREG(0xc4000000 + (X) * 4 + \
+ (((X) >= 64) && ((X) < 192)) * 0xf00 + ((CPU) << CROSS_ICR_CPU_SHIFT), u8)
+
+/* CPU ID register */
+#define CPUID __SYSREGC(0xc0000054, u32)
+#define CPUID_MASK 0x00000007 /* CPU ID mask */
+
+/* extended cache control register */
+#define ECHCTR __SYSREG(0xc0000c20, u32)
+#define ECHCTR_IBCM 0x00000001 /* instruction cache broad cast mask */
+#define ECHCTR_DBCM 0x00000002 /* data cache broad cast mask */
+#define ECHCTR_ISPM 0x00000004 /* instruction cache snoop mask */
+#define ECHCTR_DSPM 0x00000008 /* data cache snoop mask */
+
+#define NMIAGR __SYSREG(0xd400013c, u16)
+#define NMIAGR_GN 0x03fc
+
+#endif /* __KERNEL__ */
+#endif /* _ASM_PROC_SMP_REGS_H */
diff --git a/arch/mn10300/proc-mn2ws0050/proc-init.c b/arch/mn10300/proc-mn2ws0050/proc-init.c
new file mode 100644
index 000000000000..c58249b9525a
--- /dev/null
+++ b/arch/mn10300/proc-mn2ws0050/proc-init.c
@@ -0,0 +1,134 @@
+/* MN2WS0050 processor initialisation
+ *
+ * Copyright (C) 2005 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+#include <linux/sched.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+
+#include <asm/processor.h>
+#include <asm/system.h>
+#include <asm/uaccess.h>
+#include <asm/io.h>
+#include <asm/atomic.h>
+#include <asm/smp.h>
+#include <asm/pgalloc.h>
+#include <asm/busctl-regs.h>
+#include <unit/timex.h>
+#include <asm/fpu.h>
+#include <asm/rtc.h>
+
+#define MEMCONF __SYSREGC(0xdf800400, u32)
+
+/*
+ * initialise the on-silicon processor peripherals
+ */
+asmlinkage void __init processor_init(void)
+{
+ int loop;
+
+ /* set up the exception table first */
+ for (loop = 0x000; loop < 0x400; loop += 8)
+ __set_intr_stub(loop, __common_exception);
+
+ __set_intr_stub(EXCEP_ITLBMISS, itlb_miss);
+ __set_intr_stub(EXCEP_DTLBMISS, dtlb_miss);
+ __set_intr_stub(EXCEP_IAERROR, itlb_aerror);
+ __set_intr_stub(EXCEP_DAERROR, dtlb_aerror);
+ __set_intr_stub(EXCEP_BUSERROR, raw_bus_error);
+ __set_intr_stub(EXCEP_DOUBLE_FAULT, double_fault);
+ __set_intr_stub(EXCEP_FPU_DISABLED, fpu_disabled);
+ __set_intr_stub(EXCEP_SYSCALL0, system_call);
+
+ __set_intr_stub(EXCEP_NMI, nmi_handler);
+ __set_intr_stub(EXCEP_WDT, nmi_handler);
+ __set_intr_stub(EXCEP_IRQ_LEVEL0, irq_handler);
+ __set_intr_stub(EXCEP_IRQ_LEVEL1, irq_handler);
+ __set_intr_stub(EXCEP_IRQ_LEVEL2, irq_handler);
+ __set_intr_stub(EXCEP_IRQ_LEVEL3, irq_handler);
+ __set_intr_stub(EXCEP_IRQ_LEVEL4, irq_handler);
+ __set_intr_stub(EXCEP_IRQ_LEVEL5, irq_handler);
+ __set_intr_stub(EXCEP_IRQ_LEVEL6, irq_handler);
+
+ IVAR0 = EXCEP_IRQ_LEVEL0;
+ IVAR1 = EXCEP_IRQ_LEVEL1;
+ IVAR2 = EXCEP_IRQ_LEVEL2;
+ IVAR3 = EXCEP_IRQ_LEVEL3;
+ IVAR4 = EXCEP_IRQ_LEVEL4;
+ IVAR5 = EXCEP_IRQ_LEVEL5;
+ IVAR6 = EXCEP_IRQ_LEVEL6;
+
+#ifndef CONFIG_MN10300_HAS_CACHE_SNOOP
+ mn10300_dcache_flush_inv();
+ mn10300_icache_inv();
+#endif
+
+ /* disable all interrupts and set to priority 6 (lowest) */
+#ifdef CONFIG_SMP
+ for (loop = 0; loop < GxICR_NUM_IRQS; loop++)
+ GxICR(loop) = GxICR_LEVEL_6 | GxICR_DETECT;
+#else /* !CONFIG_SMP */
+ for (loop = 0; loop < NR_IRQS; loop++)
+ GxICR(loop) = GxICR_LEVEL_6 | GxICR_DETECT;
+#endif /* !CONFIG_SMP */
+
+ /* clear the timers */
+ TM0MD = 0;
+ TM1MD = 0;
+ TM2MD = 0;
+ TM3MD = 0;
+ TM4MD = 0;
+ TM5MD = 0;
+ TM6MD = 0;
+ TM6MDA = 0;
+ TM6MDB = 0;
+ TM7MD = 0;
+ TM8MD = 0;
+ TM9MD = 0;
+ TM10MD = 0;
+ TM11MD = 0;
+ TM12MD = 0;
+ TM13MD = 0;
+ TM14MD = 0;
+ TM15MD = 0;
+
+ calibrate_clock();
+}
+
+/*
+ * determine the memory size and base from the memory controller regs
+ */
+void __init get_mem_info(unsigned long *mem_base, unsigned long *mem_size)
+{
+ unsigned long memconf = MEMCONF;
+ unsigned long size = 0; /* order: MByte */
+
+ *mem_base = 0x90000000; /* fixed address */
+
+ switch (memconf & 0x00000003) {
+ case 0x01:
+ size = 256 / 8; /* 256 Mbit per chip */
+ break;
+ case 0x02:
+ size = 512 / 8; /* 512 Mbit per chip */
+ break;
+ case 0x03:
+ size = 1024 / 8; /* 1 Gbit per chip */
+ break;
+ default:
+ panic("Invalid SDRAM size");
+ break;
+ }
+
+ printk(KERN_INFO "DDR2-SDRAM: %luMB x 2 @%08lx\n", size, *mem_base);
+
+ *mem_size = (size * 2) << 20;
+}
diff --git a/arch/mn10300/unit-asb2303/include/unit/clock.h b/arch/mn10300/unit-asb2303/include/unit/clock.h
index 2a0bf79ab968..0316907a012e 100644
--- a/arch/mn10300/unit-asb2303/include/unit/clock.h
+++ b/arch/mn10300/unit-asb2303/include/unit/clock.h
@@ -14,32 +14,11 @@
#ifndef __ASSEMBLY__
-#ifdef CONFIG_MN10300_RTC
-
-extern unsigned long mn10300_ioclk; /* IOCLK (crystal speed) in HZ */
-extern unsigned long mn10300_iobclk;
-extern unsigned long mn10300_tsc_per_HZ;
-
-#define MN10300_IOCLK mn10300_ioclk
-/* If this processors has a another clock, uncomment the below. */
-/* #define MN10300_IOBCLK mn10300_iobclk */
-
-#else /* !CONFIG_MN10300_RTC */
-
#define MN10300_IOCLK 33333333UL
/* #define MN10300_IOBCLK 66666666UL */
-#endif /* !CONFIG_MN10300_RTC */
-
-#define MN10300_JCCLK MN10300_IOCLK
-#define MN10300_TSCCLK MN10300_IOCLK
-
-#ifdef CONFIG_MN10300_RTC
-#define MN10300_TSC_PER_HZ mn10300_tsc_per_HZ
-#else /* !CONFIG_MN10300_RTC */
-#define MN10300_TSC_PER_HZ (MN10300_TSCCLK/HZ)
-#endif /* !CONFIG_MN10300_RTC */
-
#endif /* !__ASSEMBLY__ */
+#define MN10300_WDCLK MN10300_IOCLK
+
#endif /* _ASM_UNIT_CLOCK_H */
diff --git a/arch/mn10300/unit-asb2303/include/unit/serial.h b/arch/mn10300/unit-asb2303/include/unit/serial.h
index 047566cd2e36..991e356bac5f 100644
--- a/arch/mn10300/unit-asb2303/include/unit/serial.h
+++ b/arch/mn10300/unit-asb2303/include/unit/serial.h
@@ -22,6 +22,11 @@
#define SERIAL_IRQ XIRQ0 /* Dual serial (PC16552) (Hi) */
/*
+ * The ASB2303 has an 18.432 MHz clock the UART
+ */
+#define BASE_BAUD (18432000 / 16)
+
+/*
* dispose of the /dev/ttyS0 and /dev/ttyS1 serial ports
*/
#ifndef CONFIG_GDBSTUB_ON_TTYSx
diff --git a/arch/mn10300/unit-asb2303/include/unit/timex.h b/arch/mn10300/unit-asb2303/include/unit/timex.h
index f206b63c95b4..cc18fe7d8b90 100644
--- a/arch/mn10300/unit-asb2303/include/unit/timex.h
+++ b/arch/mn10300/unit-asb2303/include/unit/timex.h
@@ -1,6 +1,6 @@
-/* ASB2303-specific timer specifcations
+/* ASB2303-specific timer specifications
*
- * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
+ * Copyright (C) 2007, 2010 Red Hat, Inc. All Rights Reserved.
* Written by David Howells (dhowells@redhat.com)
*
* This program is free software; you can redistribute it and/or
@@ -17,67 +17,72 @@
#include <asm/timer-regs.h>
#include <unit/clock.h>
+#include <asm/param.h>
/*
* jiffies counter specifications
*/
#define TMJCBR_MAX 0xffff
-#define TMJCBC TM01BC
-
-#define TMJCMD TM01MD
-#define TMJCBR TM01BR
#define TMJCIRQ TM1IRQ
#define TMJCICR TM1ICR
-#define TMJCICR_LEVEL GxICR_LEVEL_5
#ifndef __ASSEMBLY__
-static inline void startup_jiffies_counter(void)
+#define MN10300_SRC_IOCLK MN10300_IOCLK
+
+#ifndef HZ
+# error HZ undeclared.
+#endif /* !HZ */
+/* use as little prescaling as possible to avoid losing accuracy */
+#if (MN10300_SRC_IOCLK + HZ / 2) / HZ - 1 <= TMJCBR_MAX
+# define IOCLK_PRESCALE 1
+# define JC_TIMER_CLKSRC TM0MD_SRC_IOCLK
+# define TSC_TIMER_CLKSRC TM4MD_SRC_IOCLK
+#elif (MN10300_SRC_IOCLK / 8 + HZ / 2) / HZ - 1 <= TMJCBR_MAX
+# define IOCLK_PRESCALE 8
+# define JC_TIMER_CLKSRC TM0MD_SRC_IOCLK_8
+# define TSC_TIMER_CLKSRC TM4MD_SRC_IOCLK_8
+#elif (MN10300_SRC_IOCLK / 32 + HZ / 2) / HZ - 1 <= TMJCBR_MAX
+# define IOCLK_PRESCALE 32
+# define JC_TIMER_CLKSRC TM0MD_SRC_IOCLK_32
+# define TSC_TIMER_CLKSRC TM4MD_SRC_IOCLK_32
+#else
+# error You lose.
+#endif
+
+#define MN10300_JCCLK (MN10300_SRC_IOCLK / IOCLK_PRESCALE)
+#define MN10300_TSCCLK (MN10300_SRC_IOCLK / IOCLK_PRESCALE)
+
+#define MN10300_JC_PER_HZ ((MN10300_JCCLK + HZ / 2) / HZ)
+#define MN10300_TSC_PER_HZ ((MN10300_TSCCLK + HZ / 2) / HZ)
+
+static inline void stop_jiffies_counter(void)
{
- unsigned rate;
- u16 md, t16;
-
- /* use as little prescaling as possible to avoid losing accuracy */
- md = TM0MD_SRC_IOCLK;
- rate = MN10300_JCCLK / HZ;
-
- if (rate > TMJCBR_MAX) {
- md = TM0MD_SRC_IOCLK_8;
- rate = MN10300_JCCLK / 8 / HZ;
-
- if (rate > TMJCBR_MAX) {
- md = TM0MD_SRC_IOCLK_32;
- rate = MN10300_JCCLK / 32 / HZ;
-
- if (rate > TMJCBR_MAX)
- BUG();
- }
- }
+ u16 tmp;
+ TM01MD = JC_TIMER_CLKSRC | TM1MD_SRC_TM0CASCADE << 8;
+ tmp = TM01MD;
+}
- TMJCBR = rate - 1;
- t16 = TMJCBR;
+static inline void reload_jiffies_counter(u32 cnt)
+{
+ u32 tmp;
- TMJCMD =
- md |
- TM1MD_SRC_TM0CASCADE << 8 |
- TM0MD_INIT_COUNTER |
- TM1MD_INIT_COUNTER << 8;
+ TM01BR = cnt;
+ tmp = TM01BR;
- TMJCMD =
- md |
- TM1MD_SRC_TM0CASCADE << 8 |
- TM0MD_COUNT_ENABLE |
- TM1MD_COUNT_ENABLE << 8;
+ TM01MD = JC_TIMER_CLKSRC | \
+ TM1MD_SRC_TM0CASCADE << 8 | \
+ TM0MD_INIT_COUNTER | \
+ TM1MD_INIT_COUNTER << 8;
- t16 = TMJCMD;
- TMJCICR |= GxICR_ENABLE | GxICR_DETECT | GxICR_REQUEST;
- t16 = TMJCICR;
-}
+ TM01MD = JC_TIMER_CLKSRC | \
+ TM1MD_SRC_TM0CASCADE << 8 | \
+ TM0MD_COUNT_ENABLE | \
+ TM1MD_COUNT_ENABLE << 8;
-static inline void shutdown_jiffies_counter(void)
-{
+ tmp = TM01MD;
}
#endif /* !__ASSEMBLY__ */
@@ -94,29 +99,39 @@ static inline void shutdown_jiffies_counter(void)
static inline void startup_timestamp_counter(void)
{
+ u32 t32;
+
/* set up timer 4 & 5 cascaded as a 32-bit counter to count real time
* - count down from 4Gig-1 to 0 and wrap at IOCLK rate
*/
TM45BR = TMTSCBR_MAX;
+ t32 = TM45BR;
- TM4MD = TM4MD_SRC_IOCLK;
+ TM4MD = TSC_TIMER_CLKSRC;
TM4MD |= TM4MD_INIT_COUNTER;
TM4MD &= ~TM4MD_INIT_COUNTER;
TM4ICR = 0;
+ t32 = TM4ICR;
TM5MD = TM5MD_SRC_TM4CASCADE;
TM5MD |= TM5MD_INIT_COUNTER;
TM5MD &= ~TM5MD_INIT_COUNTER;
TM5ICR = 0;
+ t32 = TM5ICR;
TM5MD |= TM5MD_COUNT_ENABLE;
TM4MD |= TM4MD_COUNT_ENABLE;
+ t32 = TM5MD;
+ t32 = TM4MD;
}
static inline void shutdown_timestamp_counter(void)
{
+ u8 t8;
TM4MD = 0;
TM5MD = 0;
+ t8 = TM4MD;
+ t8 = TM5MD;
}
/*
@@ -127,7 +142,7 @@ typedef unsigned long cycles_t;
static inline cycles_t read_timestamp_counter(void)
{
- return (cycles_t)TMTSCBC;
+ return (cycles_t)~TMTSCBC;
}
#endif /* !__ASSEMBLY__ */
diff --git a/arch/mn10300/unit-asb2303/unit-init.c b/arch/mn10300/unit-asb2303/unit-init.c
index 70e8cb4ea266..834a76aa551a 100644
--- a/arch/mn10300/unit-asb2303/unit-init.c
+++ b/arch/mn10300/unit-asb2303/unit-init.c
@@ -31,6 +31,14 @@ asmlinkage void __init unit_init(void)
SET_XIRQ_TRIGGER(3, XIRQ_TRIGGER_HILEVEL);
SET_XIRQ_TRIGGER(4, XIRQ_TRIGGER_LOWLEVEL);
SET_XIRQ_TRIGGER(5, XIRQ_TRIGGER_LOWLEVEL);
+
+#ifdef CONFIG_EXT_SERIAL_IRQ_LEVEL
+ set_intr_level(XIRQ0, NUM2GxICR_LEVEL(CONFIG_EXT_SERIAL_IRQ_LEVEL));
+#endif
+
+#ifdef CONFIG_ETHERNET_IRQ_LEVEL
+ set_intr_level(XIRQ3, NUM2GxICR_LEVEL(CONFIG_ETHERNET_IRQ_LEVEL));
+#endif
}
/*
@@ -51,7 +59,7 @@ void __init unit_init_IRQ(void)
switch (GET_XIRQ_TRIGGER(extnum)) {
case XIRQ_TRIGGER_HILEVEL:
case XIRQ_TRIGGER_LOWLEVEL:
- set_intr_postackable(XIRQ2IRQ(extnum));
+ mn10300_set_lateack_irq_type(XIRQ2IRQ(extnum));
break;
default:
break;
diff --git a/arch/mn10300/unit-asb2305/include/unit/clock.h b/arch/mn10300/unit-asb2305/include/unit/clock.h
index 67be3f2eb18e..29e3425431cf 100644
--- a/arch/mn10300/unit-asb2305/include/unit/clock.h
+++ b/arch/mn10300/unit-asb2305/include/unit/clock.h
@@ -14,32 +14,11 @@
#ifndef __ASSEMBLY__
-#ifdef CONFIG_MN10300_RTC
-
-extern unsigned long mn10300_ioclk; /* IOCLK (crystal speed) in HZ */
-extern unsigned long mn10300_iobclk;
-extern unsigned long mn10300_tsc_per_HZ;
-
-#define MN10300_IOCLK mn10300_ioclk
-/* If this processors has a another clock, uncomment the below. */
-/* #define MN10300_IOBCLK mn10300_iobclk */
-
-#else /* !CONFIG_MN10300_RTC */
-
#define MN10300_IOCLK 33333333UL
/* #define MN10300_IOBCLK 66666666UL */
-#endif /* !CONFIG_MN10300_RTC */
-
-#define MN10300_JCCLK MN10300_IOCLK
-#define MN10300_TSCCLK MN10300_IOCLK
-
-#ifdef CONFIG_MN10300_RTC
-#define MN10300_TSC_PER_HZ mn10300_tsc_per_HZ
-#else /* !CONFIG_MN10300_RTC */
-#define MN10300_TSC_PER_HZ (MN10300_TSCCLK/HZ)
-#endif /* !CONFIG_MN10300_RTC */
-
#endif /* !__ASSEMBLY__ */
+#define MN10300_WDCLK MN10300_IOCLK
+
#endif /* _ASM_UNIT_CLOCK_H */
diff --git a/arch/mn10300/unit-asb2305/include/unit/serial.h b/arch/mn10300/unit-asb2305/include/unit/serial.h
index 8086cc092cec..88c08219315f 100644
--- a/arch/mn10300/unit-asb2305/include/unit/serial.h
+++ b/arch/mn10300/unit-asb2305/include/unit/serial.h
@@ -21,6 +21,11 @@
#define SERIAL_IRQ XIRQ0 /* Dual serial (PC16552) (Hi) */
/*
+ * The ASB2305 has an 18.432 MHz clock the UART
+ */
+#define BASE_BAUD (18432000 / 16)
+
+/*
* dispose of the /dev/ttyS0 serial port
*/
#ifndef CONFIG_GDBSTUB_ON_TTYSx
diff --git a/arch/mn10300/unit-asb2305/include/unit/timex.h b/arch/mn10300/unit-asb2305/include/unit/timex.h
index d1c72d59fa9f..758af30d1a16 100644
--- a/arch/mn10300/unit-asb2305/include/unit/timex.h
+++ b/arch/mn10300/unit-asb2305/include/unit/timex.h
@@ -1,6 +1,6 @@
-/* ASB2305 timer specifcations
+/* ASB2305-specific timer specifications
*
- * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
+ * Copyright (C) 2007, 2010 Red Hat, Inc. All Rights Reserved.
* Written by David Howells (dhowells@redhat.com)
*
* This program is free software; you can redistribute it and/or
@@ -17,67 +17,72 @@
#include <asm/timer-regs.h>
#include <unit/clock.h>
+#include <asm/param.h>
/*
* jiffies counter specifications
*/
#define TMJCBR_MAX 0xffff
-#define TMJCBC TM01BC
-
-#define TMJCMD TM01MD
-#define TMJCBR TM01BR
#define TMJCIRQ TM1IRQ
#define TMJCICR TM1ICR
-#define TMJCICR_LEVEL GxICR_LEVEL_5
#ifndef __ASSEMBLY__
-static inline void startup_jiffies_counter(void)
+#define MN10300_SRC_IOCLK MN10300_IOCLK
+
+#ifndef HZ
+# error HZ undeclared.
+#endif /* !HZ */
+/* use as little prescaling as possible to avoid losing accuracy */
+#if (MN10300_SRC_IOCLK + HZ / 2) / HZ - 1 <= TMJCBR_MAX
+# define IOCLK_PRESCALE 1
+# define JC_TIMER_CLKSRC TM0MD_SRC_IOCLK
+# define TSC_TIMER_CLKSRC TM4MD_SRC_IOCLK
+#elif (MN10300_SRC_IOCLK / 8 + HZ / 2) / HZ - 1 <= TMJCBR_MAX
+# define IOCLK_PRESCALE 8
+# define JC_TIMER_CLKSRC TM0MD_SRC_IOCLK_8
+# define TSC_TIMER_CLKSRC TM4MD_SRC_IOCLK_8
+#elif (MN10300_SRC_IOCLK / 32 + HZ / 2) / HZ - 1 <= TMJCBR_MAX
+# define IOCLK_PRESCALE 32
+# define JC_TIMER_CLKSRC TM0MD_SRC_IOCLK_32
+# define TSC_TIMER_CLKSRC TM4MD_SRC_IOCLK_32
+#else
+# error You lose.
+#endif
+
+#define MN10300_JCCLK (MN10300_SRC_IOCLK / IOCLK_PRESCALE)
+#define MN10300_TSCCLK (MN10300_SRC_IOCLK / IOCLK_PRESCALE)
+
+#define MN10300_JC_PER_HZ ((MN10300_JCCLK + HZ / 2) / HZ)
+#define MN10300_TSC_PER_HZ ((MN10300_TSCCLK + HZ / 2) / HZ)
+
+static inline void stop_jiffies_counter(void)
{
- unsigned rate;
- u16 md, t16;
-
- /* use as little prescaling as possible to avoid losing accuracy */
- md = TM0MD_SRC_IOCLK;
- rate = MN10300_JCCLK / HZ;
-
- if (rate > TMJCBR_MAX) {
- md = TM0MD_SRC_IOCLK_8;
- rate = MN10300_JCCLK / 8 / HZ;
-
- if (rate > TMJCBR_MAX) {
- md = TM0MD_SRC_IOCLK_32;
- rate = MN10300_JCCLK / 32 / HZ;
-
- if (rate > TMJCBR_MAX)
- BUG();
- }
- }
+ u16 tmp;
+ TM01MD = JC_TIMER_CLKSRC | TM1MD_SRC_TM0CASCADE << 8;
+ tmp = TM01MD;
+}
- TMJCBR = rate - 1;
- t16 = TMJCBR;
+static inline void reload_jiffies_counter(u32 cnt)
+{
+ u32 tmp;
- TMJCMD =
- md |
- TM1MD_SRC_TM0CASCADE << 8 |
- TM0MD_INIT_COUNTER |
- TM1MD_INIT_COUNTER << 8;
+ TM01BR = cnt;
+ tmp = TM01BR;
- TMJCMD =
- md |
- TM1MD_SRC_TM0CASCADE << 8 |
- TM0MD_COUNT_ENABLE |
- TM1MD_COUNT_ENABLE << 8;
+ TM01MD = JC_TIMER_CLKSRC | \
+ TM1MD_SRC_TM0CASCADE << 8 | \
+ TM0MD_INIT_COUNTER | \
+ TM1MD_INIT_COUNTER << 8;
- t16 = TMJCMD;
- TMJCICR |= GxICR_ENABLE | GxICR_DETECT | GxICR_REQUEST;
- t16 = TMJCICR;
-}
+ TM01MD = JC_TIMER_CLKSRC | \
+ TM1MD_SRC_TM0CASCADE << 8 | \
+ TM0MD_COUNT_ENABLE | \
+ TM1MD_COUNT_ENABLE << 8;
-static inline void shutdown_jiffies_counter(void)
-{
+ tmp = TM01MD;
}
#endif /* !__ASSEMBLY__ */
@@ -94,29 +99,39 @@ static inline void shutdown_jiffies_counter(void)
static inline void startup_timestamp_counter(void)
{
+ u32 t32;
+
/* set up timer 4 & 5 cascaded as a 32-bit counter to count real time
* - count down from 4Gig-1 to 0 and wrap at IOCLK rate
*/
TM45BR = TMTSCBR_MAX;
+ t32 = TM45BR;
- TM4MD = TM4MD_SRC_IOCLK;
+ TM4MD = TSC_TIMER_CLKSRC;
TM4MD |= TM4MD_INIT_COUNTER;
TM4MD &= ~TM4MD_INIT_COUNTER;
TM4ICR = 0;
+ t32 = TM4ICR;
TM5MD = TM5MD_SRC_TM4CASCADE;
TM5MD |= TM5MD_INIT_COUNTER;
TM5MD &= ~TM5MD_INIT_COUNTER;
TM5ICR = 0;
+ t32 = TM5ICR;
TM5MD |= TM5MD_COUNT_ENABLE;
TM4MD |= TM4MD_COUNT_ENABLE;
+ t32 = TM5MD;
+ t32 = TM4MD;
}
static inline void shutdown_timestamp_counter(void)
{
+ u8 t8;
TM4MD = 0;
TM5MD = 0;
+ t8 = TM4MD;
+ t8 = TM5MD;
}
/*
@@ -127,7 +142,7 @@ typedef unsigned long cycles_t;
static inline cycles_t read_timestamp_counter(void)
{
- return (cycles_t) TMTSCBC;
+ return (cycles_t)~TMTSCBC;
}
#endif /* !__ASSEMBLY__ */
diff --git a/arch/mn10300/unit-asb2305/pci-asb2305.c b/arch/mn10300/unit-asb2305/pci-asb2305.c
index 45b40ac6c464..8e6763e6f250 100644
--- a/arch/mn10300/unit-asb2305/pci-asb2305.c
+++ b/arch/mn10300/unit-asb2305/pci-asb2305.c
@@ -93,7 +93,7 @@ static void __init pcibios_allocate_bus_resources(struct list_head *bus_list)
struct pci_bus *bus;
struct pci_dev *dev;
int idx;
- struct resource *r, *pr;
+ struct resource *r;
/* Depth-First Search on bus tree */
list_for_each_entry(bus, bus_list, node) {
@@ -105,10 +105,8 @@ static void __init pcibios_allocate_bus_resources(struct list_head *bus_list)
r = &dev->resource[idx];
if (!r->flags)
continue;
- pr = pci_find_parent_resource(dev, r);
if (!r->start ||
- !pr ||
- request_resource(pr, r) < 0) {
+ pci_claim_resource(dev, idx) < 0) {
printk(KERN_ERR "PCI:"
" Cannot allocate resource"
" region %d of bridge %s\n",
@@ -131,7 +129,7 @@ static void __init pcibios_allocate_resources(int pass)
struct pci_dev *dev = NULL;
int idx, disabled;
u16 command;
- struct resource *r, *pr;
+ struct resource *r;
for_each_pci_dev(dev) {
pci_read_config_word(dev, PCI_COMMAND, &command);
@@ -150,8 +148,7 @@ static void __init pcibios_allocate_resources(int pass)
" (f=%lx, d=%d, p=%d)\n",
pci_name(dev), r->start, r->end, r->flags,
disabled, pass);
- pr = pci_find_parent_resource(dev, r);
- if (!pr || request_resource(pr, r) < 0) {
+ if (pci_claim_resource(dev, idx) < 0) {
printk(KERN_ERR "PCI:"
" Cannot allocate resource"
" region %d of device %s\n",
@@ -184,7 +181,7 @@ static void __init pcibios_allocate_resources(int pass)
static int __init pcibios_assign_resources(void)
{
struct pci_dev *dev = NULL;
- struct resource *r, *pr;
+ struct resource *r;
if (!(pci_probe & PCI_ASSIGN_ROMS)) {
/* Try to use BIOS settings for ROMs, otherwise let
@@ -194,8 +191,7 @@ static int __init pcibios_assign_resources(void)
r = &dev->resource[PCI_ROM_RESOURCE];
if (!r->flags || !r->start)
continue;
- pr = pci_find_parent_resource(dev, r);
- if (!pr || request_resource(pr, r) < 0) {
+ if (pci_claim_resource(dev, PCI_ROM_RESOURCE) < 0) {
r->end -= r->start;
r->start = 0;
}
diff --git a/arch/mn10300/unit-asb2305/pci.c b/arch/mn10300/unit-asb2305/pci.c
index 6d8720a0a599..a4954fe82094 100644
--- a/arch/mn10300/unit-asb2305/pci.c
+++ b/arch/mn10300/unit-asb2305/pci.c
@@ -503,7 +503,7 @@ asmlinkage void __init unit_pci_init(void)
struct pci_ops *o = &pci_direct_ampci;
u32 x;
- set_intr_level(XIRQ1, GxICR_LEVEL_3);
+ set_intr_level(XIRQ1, NUM2GxICR_LEVEL(CONFIG_PCI_IRQ_LEVEL));
memset(&bus, 0, sizeof(bus));
diff --git a/arch/mn10300/unit-asb2305/unit-init.c b/arch/mn10300/unit-asb2305/unit-init.c
index a76c8e0ab90f..e1becd6b7571 100644
--- a/arch/mn10300/unit-asb2305/unit-init.c
+++ b/arch/mn10300/unit-asb2305/unit-init.c
@@ -26,8 +26,10 @@ asmlinkage void __init unit_init(void)
{
#ifndef CONFIG_GDBSTUB_ON_TTYSx
/* set the 16550 interrupt line to level 3 if not being used for GDB */
- set_intr_level(XIRQ0, GxICR_LEVEL_3);
+#ifdef CONFIG_EXT_SERIAL_IRQ_LEVEL
+ set_intr_level(XIRQ0, NUM2GxICR_LEVEL(CONFIG_EXT_SERIAL_IRQ_LEVEL));
#endif
+#endif /* CONFIG_GDBSTUB_ON_TTYSx */
}
/*
@@ -51,7 +53,7 @@ void __init unit_init_IRQ(void)
switch (GET_XIRQ_TRIGGER(extnum)) {
case XIRQ_TRIGGER_HILEVEL:
case XIRQ_TRIGGER_LOWLEVEL:
- set_intr_postackable(XIRQ2IRQ(extnum));
+ mn10300_set_lateack_irq_type(XIRQ2IRQ(extnum));
break;
default:
break;
diff --git a/arch/mn10300/unit-asb2364/Makefile b/arch/mn10300/unit-asb2364/Makefile
new file mode 100644
index 000000000000..b3263ecfc4ff
--- /dev/null
+++ b/arch/mn10300/unit-asb2364/Makefile
@@ -0,0 +1,12 @@
+#
+# Makefile for the linux kernel.
+#
+# Note! Dependencies are done automagically by 'make dep', which also
+# removes any old dependencies. DON'T put your own dependencies here
+# unless it's something special (ie not a .c file).
+#
+# Note 2! The CFLAGS definitions are now in the main makefile...
+
+obj-y := unit-init.o leds.o irq-fpga.o
+
+obj-$(CONFIG_SMSC911X) += smsc911x.o
diff --git a/arch/mn10300/unit-asb2364/include/unit/clock.h b/arch/mn10300/unit-asb2364/include/unit/clock.h
new file mode 100644
index 000000000000..d34ac9a7508b
--- /dev/null
+++ b/arch/mn10300/unit-asb2364/include/unit/clock.h
@@ -0,0 +1,29 @@
+/* clock.h: unit-specific clocks
+ *
+ * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * Modified by Matsushita Electric Industrial Co., Ltd.
+ * Modifications:
+ * 23-Feb-2007 MEI Add define for watchdog timer.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#ifndef _ASM_UNIT_CLOCK_H
+#define _ASM_UNIT_CLOCK_H
+
+#ifndef __ASSEMBLY__
+
+#define MN10300_IOCLK 100000000UL /* for DDR800 */
+/*#define MN10300_IOCLK 83333333UL */ /* for DDR667 */
+#define MN10300_IOBCLK MN10300_IOCLK /* IOBCLK is equal to IOCLK */
+
+#endif /* !__ASSEMBLY__ */
+
+#define MN10300_WDCLK 27000000UL
+
+#endif /* _ASM_UNIT_CLOCK_H */
diff --git a/arch/mn10300/unit-asb2364/include/unit/fpga-regs.h b/arch/mn10300/unit-asb2364/include/unit/fpga-regs.h
new file mode 100644
index 000000000000..7cf12054db65
--- /dev/null
+++ b/arch/mn10300/unit-asb2364/include/unit/fpga-regs.h
@@ -0,0 +1,52 @@
+/* ASB2364 FPGA registers
+ */
+
+#ifndef _ASM_UNIT_FPGA_REGS_H
+#define _ASM_UNIT_FPGA_REGS_H
+
+#include <asm/cpu-regs.h>
+
+#ifdef __KERNEL__
+
+#define ASB2364_FPGA_REG_RESET_LAN __SYSREG(0xa9001300, u16)
+#define ASB2364_FPGA_REG_RESET_UART __SYSREG(0xa9001304, u16)
+#define ASB2364_FPGA_REG_RESET_I2C __SYSREG(0xa9001308, u16)
+#define ASB2364_FPGA_REG_RESET_USB __SYSREG(0xa900130c, u16)
+#define ASB2364_FPGA_REG_RESET_AV __SYSREG(0xa9001310, u16)
+
+#define ASB2364_FPGA_REG_IRQ(X) __SYSREG(0xa9001590+((X)*4), u16)
+#define ASB2364_FPGA_REG_IRQ_LAN ASB2364_FPGA_REG_IRQ(0)
+#define ASB2364_FPGA_REG_IRQ_UART ASB2364_FPGA_REG_IRQ(1)
+#define ASB2364_FPGA_REG_IRQ_I2C ASB2364_FPGA_REG_IRQ(2)
+#define ASB2364_FPGA_REG_IRQ_USB ASB2364_FPGA_REG_IRQ(3)
+#define ASB2364_FPGA_REG_IRQ_FPGA ASB2364_FPGA_REG_IRQ(5)
+
+#define ASB2364_FPGA_REG_MASK(X) __SYSREG(0xa9001590+((X)*4), u16)
+#define ASB2364_FPGA_REG_MASK_LAN ASB2364_FPGA_REG_MASK(0)
+#define ASB2364_FPGA_REG_MASK_UART ASB2364_FPGA_REG_MASK(1)
+#define ASB2364_FPGA_REG_MASK_I2C ASB2364_FPGA_REG_MASK(2)
+#define ASB2364_FPGA_REG_MASK_USB ASB2364_FPGA_REG_MASK(3)
+#define ASB2364_FPGA_REG_MASK_FPGA ASB2364_FPGA_REG_MASK(5)
+
+#define ASB2364_FPGA_REG_CPLD5_SET1 __SYSREG(0xa9002500, u16)
+#define ASB2364_FPGA_REG_CPLD5_SET2 __SYSREG(0xa9002504, u16)
+#define ASB2364_FPGA_REG_CPLD6_SET1 __SYSREG(0xa9002600, u16)
+#define ASB2364_FPGA_REG_CPLD6_SET2 __SYSREG(0xa9002604, u16)
+#define ASB2364_FPGA_REG_CPLD7_SET1 __SYSREG(0xa9002700, u16)
+#define ASB2364_FPGA_REG_CPLD7_SET2 __SYSREG(0xa9002704, u16)
+#define ASB2364_FPGA_REG_CPLD8_SET1 __SYSREG(0xa9002800, u16)
+#define ASB2364_FPGA_REG_CPLD8_SET2 __SYSREG(0xa9002804, u16)
+#define ASB2364_FPGA_REG_CPLD9_SET1 __SYSREG(0xa9002900, u16)
+#define ASB2364_FPGA_REG_CPLD9_SET2 __SYSREG(0xa9002904, u16)
+#define ASB2364_FPGA_REG_CPLD10_SET1 __SYSREG(0xa9002a00, u16)
+#define ASB2364_FPGA_REG_CPLD10_SET2 __SYSREG(0xa9002a04, u16)
+
+#define SyncExBus() \
+ do { \
+ unsigned short w; \
+ w = *(volatile short *)0xa9000000; \
+ } while (0)
+
+#endif /* __KERNEL__ */
+
+#endif /* _ASM_UNIT_FPGA_REGS_H */
diff --git a/arch/mn10300/unit-asb2364/include/unit/irq.h b/arch/mn10300/unit-asb2364/include/unit/irq.h
new file mode 100644
index 000000000000..786148e46565
--- /dev/null
+++ b/arch/mn10300/unit-asb2364/include/unit/irq.h
@@ -0,0 +1,35 @@
+/* ASB2364 FPGA irq numbers
+ *
+ * Copyright (C) 2010 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public Licence
+ * as published by the Free Software Foundation; either version
+ * 2 of the Licence, or (at your option) any later version.
+ */
+#ifndef _UNIT_IRQ_H
+#define _UNIT_IRQ_H
+
+#ifndef __ASSEMBLY__
+
+#ifdef CONFIG_SMP
+#define NR_CPU_IRQS GxICR_NUM_EXT_IRQS
+#else
+#define NR_CPU_IRQS GxICR_NUM_IRQS
+#endif
+
+enum {
+ FPGA_LAN_IRQ = NR_CPU_IRQS,
+ FPGA_UART_IRQ,
+ FPGA_I2C_IRQ,
+ FPGA_USB_IRQ,
+ FPGA_RESERVED_IRQ,
+ FPGA_FPGA_IRQ,
+ NR_IRQS
+};
+
+extern void __init irq_fpga_init(void);
+
+#endif /* !__ASSEMBLY__ */
+#endif /* _UNIT_IRQ_H */
diff --git a/arch/mn10300/unit-asb2364/include/unit/leds.h b/arch/mn10300/unit-asb2364/include/unit/leds.h
new file mode 100644
index 000000000000..03a3933ad323
--- /dev/null
+++ b/arch/mn10300/unit-asb2364/include/unit/leds.h
@@ -0,0 +1,54 @@
+/* Unit-specific leds
+ *
+ * Copyright (C) 2005 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#ifndef _ASM_UNIT_LEDS_H
+#define _ASM_UNIT_LEDS_H
+
+#include <asm/pio-regs.h>
+#include <asm/cpu-regs.h>
+#include <asm/exceptions.h>
+
+#define MN10300_USE_7SEGLEDS 0
+
+#define ASB2364_7SEGLEDS __SYSREG(0xA9001630, u32)
+
+/*
+ * use the 7-segment LEDs to indicate states
+ */
+
+#if MN10300_USE_7SEGLEDS
+/* flip the 7-segment LEDs between "Gdb-" and "----" */
+#define mn10300_set_gdbleds(ONOFF) \
+ do { \
+ ASB2364_7SEGLEDS = (ONOFF) ? 0x8543077f : 0x7f7f7f7f; \
+ } while (0)
+#else
+#define mn10300_set_gdbleds(ONOFF) do {} while (0)
+#endif
+
+#if MN10300_USE_7SEGLEDS
+/* indicate double-fault by displaying "db-f" on the LEDs */
+#define mn10300_set_dbfleds \
+ mov 0x43077f1d,d0 ; \
+ mov d0,(ASB2364_7SEGLEDS)
+#else
+#define mn10300_set_dbfleds
+#endif
+
+#ifndef __ASSEMBLY__
+extern void peripheral_leds_display_exception(enum exception_code);
+extern void peripheral_leds_led_chase(void);
+extern void peripheral_leds7x4_display_dec(unsigned int, unsigned int);
+extern void peripheral_leds7x4_display_hex(unsigned int, unsigned int);
+extern void debug_to_serial(const char *, int);
+#endif /* __ASSEMBLY__ */
+
+#endif /* _ASM_UNIT_LEDS_H */
diff --git a/arch/mn10300/unit-asb2364/include/unit/serial.h b/arch/mn10300/unit-asb2364/include/unit/serial.h
new file mode 100644
index 000000000000..7f048bbfdfd7
--- /dev/null
+++ b/arch/mn10300/unit-asb2364/include/unit/serial.h
@@ -0,0 +1,151 @@
+/* Unit-specific 8250 serial ports
+ *
+ * Copyright (C) 2005 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#ifndef _ASM_UNIT_SERIAL_H
+#define _ASM_UNIT_SERIAL_H
+
+#include <asm/cpu-regs.h>
+#include <proc/irq.h>
+#include <unit/fpga-regs.h>
+#include <linux/serial_reg.h>
+
+#define SERIAL_PORT0_BASE_ADDRESS 0xA8200000
+
+#define SERIAL_IRQ XIRQ1 /* single serial (TL16C550C) (Lo) */
+
+/*
+ * The ASB2364 has an 12.288 MHz clock
+ * for your UART.
+ *
+ * It'd be nice if someone built a serial card with a 24.576 MHz
+ * clock, since the 16550A is capable of handling a top speed of 1.5
+ * megabits/second; but this requires the faster clock.
+ */
+#define BASE_BAUD (12288000 / 16)
+
+/*
+ * dispose of the /dev/ttyS0 and /dev/ttyS1 serial ports
+ */
+#ifndef CONFIG_GDBSTUB_ON_TTYSx
+
+#define SERIAL_PORT_DFNS \
+ { \
+ .baud_base = BASE_BAUD, \
+ .irq = SERIAL_IRQ, \
+ .flags = STD_COM_FLAGS, \
+ .iomem_base = (u8 *) SERIAL_PORT0_BASE_ADDRESS, \
+ .iomem_reg_shift = 1, \
+ .io_type = SERIAL_IO_MEM, \
+ },
+
+#ifndef __ASSEMBLY__
+
+static inline void __debug_to_serial(const char *p, int n)
+{
+}
+
+#endif /* !__ASSEMBLY__ */
+
+#else /* CONFIG_GDBSTUB_ON_TTYSx */
+
+#define SERIAL_PORT_DFNS /* stolen by gdb-stub */
+
+#if defined(CONFIG_GDBSTUB_ON_TTYS0)
+#define GDBPORT_SERIAL_RX __SYSREG(SERIAL_PORT0_BASE_ADDRESS + UART_RX * 4, u8)
+#define GDBPORT_SERIAL_TX __SYSREG(SERIAL_PORT0_BASE_ADDRESS + UART_TX * 4, u8)
+#define GDBPORT_SERIAL_DLL __SYSREG(SERIAL_PORT0_BASE_ADDRESS + UART_DLL * 4, u8)
+#define GDBPORT_SERIAL_DLM __SYSREG(SERIAL_PORT0_BASE_ADDRESS + UART_DLM * 4, u8)
+#define GDBPORT_SERIAL_IER __SYSREG(SERIAL_PORT0_BASE_ADDRESS + UART_IER * 4, u8)
+#define GDBPORT_SERIAL_IIR __SYSREG(SERIAL_PORT0_BASE_ADDRESS + UART_IIR * 4, u8)
+#define GDBPORT_SERIAL_FCR __SYSREG(SERIAL_PORT0_BASE_ADDRESS + UART_FCR * 4, u8)
+#define GDBPORT_SERIAL_LCR __SYSREG(SERIAL_PORT0_BASE_ADDRESS + UART_LCR * 4, u8)
+#define GDBPORT_SERIAL_MCR __SYSREG(SERIAL_PORT0_BASE_ADDRESS + UART_MCR * 4, u8)
+#define GDBPORT_SERIAL_LSR __SYSREG(SERIAL_PORT0_BASE_ADDRESS + UART_LSR * 4, u8)
+#define GDBPORT_SERIAL_MSR __SYSREG(SERIAL_PORT0_BASE_ADDRESS + UART_MSR * 4, u8)
+#define GDBPORT_SERIAL_SCR __SYSREG(SERIAL_PORT0_BASE_ADDRESS + UART_SCR * 4, u8)
+#define GDBPORT_SERIAL_IRQ SERIAL_IRQ
+
+#elif defined(CONFIG_GDBSTUB_ON_TTYS1)
+#error The ASB2364 does not have a /dev/ttyS1
+#endif
+
+#ifndef __ASSEMBLY__
+
+static inline void __debug_to_serial(const char *p, int n)
+{
+ char ch;
+
+#define LSR_WAIT_FOR(STATE) \
+ do {} while (!(GDBPORT_SERIAL_LSR & UART_LSR_##STATE))
+#define FLOWCTL_QUERY(LINE) \
+ ({ GDBPORT_SERIAL_MSR & UART_MSR_##LINE; })
+#define FLOWCTL_WAIT_FOR(LINE) \
+ do {} while (!(GDBPORT_SERIAL_MSR & UART_MSR_##LINE))
+#define FLOWCTL_CLEAR(LINE) \
+ do { GDBPORT_SERIAL_MCR &= ~UART_MCR_##LINE; } while (0)
+#define FLOWCTL_SET(LINE) \
+ do { GDBPORT_SERIAL_MCR |= UART_MCR_##LINE; } while (0)
+
+ FLOWCTL_SET(DTR);
+
+ for (; n > 0; n--) {
+ LSR_WAIT_FOR(THRE);
+ FLOWCTL_WAIT_FOR(CTS);
+
+ ch = *p++;
+ if (ch == 0x0a) {
+ GDBPORT_SERIAL_TX = 0x0d;
+ LSR_WAIT_FOR(THRE);
+ FLOWCTL_WAIT_FOR(CTS);
+ }
+ GDBPORT_SERIAL_TX = ch;
+ }
+
+ FLOWCTL_CLEAR(DTR);
+}
+
+#endif /* !__ASSEMBLY__ */
+
+#endif /* CONFIG_GDBSTUB_ON_TTYSx */
+
+#define SERIAL_INITIALIZE \
+do { \
+ /* release reset */ \
+ ASB2364_FPGA_REG_RESET_UART = 0x0001; \
+ SyncExBus(); \
+} while (0)
+
+#define SERIAL_CHECK_INTERRUPT \
+do { \
+ if ((ASB2364_FPGA_REG_IRQ_UART & 0x0001) == 0x0001) { \
+ return IRQ_NONE; \
+ } \
+} while (0)
+
+#define SERIAL_CLEAR_INTERRUPT \
+do { \
+ ASB2364_FPGA_REG_IRQ_UART = 0x0001; \
+ SyncExBus(); \
+} while (0)
+
+#define SERIAL_SET_INT_MASK \
+do { \
+ ASB2364_FPGA_REG_MASK_UART = 0x0001; \
+ SyncExBus(); \
+} while (0)
+
+#define SERIAL_CLEAR_INT_MASK \
+do { \
+ ASB2364_FPGA_REG_MASK_UART = 0x0000; \
+ SyncExBus(); \
+} while (0)
+
+#endif /* _ASM_UNIT_SERIAL_H */
diff --git a/arch/mn10300/unit-asb2364/include/unit/smsc911x.h b/arch/mn10300/unit-asb2364/include/unit/smsc911x.h
new file mode 100644
index 000000000000..4c1ede535fa9
--- /dev/null
+++ b/arch/mn10300/unit-asb2364/include/unit/smsc911x.h
@@ -0,0 +1,171 @@
+/* Support for the SMSC911x NIC
+ *
+ * Copyright (C) 2006 Matsushita Electric Industrial Co., Ltd.
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+#ifndef _ASM_UNIT_SMSC911X_H
+#define _ASM_UNIT_SMSC911X_H
+
+#include <linux/netdevice.h>
+#include <proc/irq.h>
+#include <unit/fpga-regs.h>
+
+#define MN10300_USE_EXT_EEPROM
+
+
+#define SMSC911X_BASE 0xA8000000UL
+#define SMSC911X_BASE_END 0xA8000100UL
+#define SMSC911X_IRQ FPGA_LAN_IRQ
+
+/*
+ * Allow the FPGA to be initialised by the SMSC911x driver
+ */
+#undef SMSC_INITIALIZE
+#define SMSC_INITIALIZE() \
+do { \
+ /* release reset */ \
+ ASB2364_FPGA_REG_RESET_LAN = 0x0001; \
+ SyncExBus(); \
+} while (0)
+
+#ifdef MN10300_USE_EXT_EEPROM
+#include <linux/delay.h>
+#include <unit/clock.h>
+
+#define EEPROM_ADDRESS 0xA0
+#define MAC_OFFSET 0x0008
+#define USE_IIC_CH 0 /* 0 or 1 */
+#define IIC_OFFSET (0x80000 * USE_IIC_CH)
+#define IIC_DTRM __SYSREG(0xd8400000 + IIC_OFFSET, u32)
+#define IIC_DREC __SYSREG(0xd8400004 + IIC_OFFSET, u32)
+#define IIC_MYADD __SYSREG(0xd8400008 + IIC_OFFSET, u32)
+#define IIC_CLK __SYSREG(0xd840000c + IIC_OFFSET, u32)
+#define IIC_BRST __SYSREG(0xd8400010 + IIC_OFFSET, u32)
+#define IIC_HOLD __SYSREG(0xd8400014 + IIC_OFFSET, u32)
+#define IIC_BSTS __SYSREG(0xd8400018 + IIC_OFFSET, u32)
+#define IIC_ICR __SYSREG(0xd4000080 + 4 * USE_IIC_CH, u16)
+
+#define IIC_CLK_PLS ((unsigned short)(MN10300_IOCLK / 100000 - 1))
+#define IIC_CLK_LOW ((unsigned short)(IIC_CLK_PLS / 2))
+
+#define SYS_IIC_DTRM_Bit_STA ((unsigned short)0x0400)
+#define SYS_IIC_DTRM_Bit_STO ((unsigned short)0x0200)
+#define SYS_IIC_DTRM_Bit_ACK ((unsigned short)0x0100)
+#define SYS_IIC_DTRM_Bit_DATA ((unsigned short)0x00FF)
+
+static inline void POLL_INT_REQ(volatile u16 *icr)
+{
+ unsigned long flags;
+ u16 tmp;
+
+ while (!(*icr & GxICR_REQUEST))
+ ;
+ flags = arch_local_cli_save();
+ tmp = *icr;
+ *icr = (tmp & GxICR_LEVEL) | GxICR_DETECT;
+ tmp = *icr;
+ arch_local_irq_restore(flags);
+}
+
+/*
+ * Implement the SMSC911x hook for MAC address retrieval
+ */
+#undef smsc_get_mac
+static inline int smsc_get_mac(struct net_device *dev)
+{
+ unsigned char *mac_buf = dev->dev_addr;
+ int i;
+ unsigned short value;
+ unsigned int data;
+ int mac_length = 6;
+ int check;
+ u16 orig_gicr, tmp;
+ unsigned long flags;
+
+ /* save original GnICR and clear GnICR.IE */
+ flags = arch_local_cli_save();
+ orig_gicr = IIC_ICR;
+ IIC_ICR = orig_gicr & GxICR_LEVEL;
+ tmp = IIC_ICR;
+ arch_local_irq_restore(flags);
+
+ IIC_MYADD = 0x00000008;
+ IIC_CLK = (IIC_CLK_LOW << 16) + (IIC_CLK_PLS);
+ /* bus hung recovery */
+
+ while (1) {
+ check = 0;
+ for (i = 0; i < 3; i++) {
+ if ((IIC_BSTS & 0x00000003) == 0x00000003)
+ check++;
+ udelay(3);
+ }
+
+ if (check == 3) {
+ IIC_BRST = 0x00000003;
+ break;
+ } else {
+ for (i = 0; i < 3; i++) {
+ IIC_BRST = 0x00000002;
+ udelay(8);
+ IIC_BRST = 0x00000003;
+ udelay(8);
+ }
+ }
+ }
+
+ IIC_BRST = 0x00000002;
+ IIC_BRST = 0x00000003;
+
+ value = SYS_IIC_DTRM_Bit_STA | SYS_IIC_DTRM_Bit_ACK;
+ value |= (((unsigned short)EEPROM_ADDRESS & SYS_IIC_DTRM_Bit_DATA) |
+ (unsigned short)0x0000);
+ IIC_DTRM = value;
+ POLL_INT_REQ(&IIC_ICR);
+
+ /** send offset of MAC address in EEPROM **/
+ IIC_DTRM = (unsigned char)((MAC_OFFSET & 0xFF00) >> 8);
+ POLL_INT_REQ(&IIC_ICR);
+
+ IIC_DTRM = (unsigned char)(MAC_OFFSET & 0x00FF);
+ POLL_INT_REQ(&IIC_ICR);
+
+ udelay(1000);
+
+ value = SYS_IIC_DTRM_Bit_STA;
+ value |= (((unsigned short)EEPROM_ADDRESS & SYS_IIC_DTRM_Bit_DATA) |
+ (unsigned short)0x0001);
+ IIC_DTRM = value;
+ POLL_INT_REQ(&IIC_ICR);
+
+ IIC_DTRM = 0x00000000;
+ while (mac_length > 0) {
+ POLL_INT_REQ(&IIC_ICR);
+
+ data = IIC_DREC;
+ mac_length--;
+ if (mac_length == 0)
+ value = 0x00000300; /* stop IIC bus */
+ else if (mac_length == 1)
+ value = 0x00000100; /* no ack */
+ else
+ value = 0x00000000; /* ack */
+ IIC_DTRM = value;
+ *mac_buf++ = (unsigned char)(data & 0xff);
+ }
+
+ /* restore GnICR.LV and GnICR.IE */
+ flags = arch_local_cli_save();
+ IIC_ICR = (orig_gicr & (GxICR_LEVEL | GxICR_ENABLE));
+ tmp = IIC_ICR;
+ arch_local_irq_restore(flags);
+
+ return 0;
+}
+#endif /* MN10300_USE_EXT_EEPROM */
+#endif /* _ASM_UNIT_SMSC911X_H */
diff --git a/arch/mn10300/unit-asb2364/include/unit/timex.h b/arch/mn10300/unit-asb2364/include/unit/timex.h
new file mode 100644
index 000000000000..ddb7ed010706
--- /dev/null
+++ b/arch/mn10300/unit-asb2364/include/unit/timex.h
@@ -0,0 +1,159 @@
+/* timex.h: MN2WS0038 architecture timer specifications
+ *
+ * Copyright (C) 2002, 2010 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+#ifndef _ASM_UNIT_TIMEX_H
+#define _ASM_UNIT_TIMEX_H
+
+#ifndef __ASSEMBLY__
+#include <linux/irq.h>
+#endif /* __ASSEMBLY__ */
+
+#include <asm/timer-regs.h>
+#include <unit/clock.h>
+#include <asm/param.h>
+
+/*
+ * jiffies counter specifications
+ */
+
+#define TMJCBR_MAX 0xffffff /* 24bit */
+#define TMJCIRQ TMTIRQ
+
+#ifndef __ASSEMBLY__
+
+#define MN10300_SRC_IOBCLK MN10300_IOBCLK
+
+#ifndef HZ
+# error HZ undeclared.
+#endif /* !HZ */
+
+#define MN10300_JCCLK (MN10300_SRC_IOBCLK)
+#define MN10300_TSCCLK (MN10300_SRC_IOBCLK)
+
+#define MN10300_JC_PER_HZ ((MN10300_JCCLK + HZ / 2) / HZ)
+#define MN10300_TSC_PER_HZ ((MN10300_TSCCLK + HZ / 2) / HZ)
+
+/* Check bit width of MTM interval value that sets base register */
+#if (MN10300_JC_PER_HZ - 1) > TMJCBR_MAX
+# error MTM tick timer interval value is overflow.
+#endif
+
+static inline void stop_jiffies_counter(void)
+{
+ u16 tmp;
+ TMTMD = 0;
+ tmp = TMTMD;
+}
+
+static inline void reload_jiffies_counter(u32 cnt)
+{
+ u32 tmp;
+
+ TMTBR = cnt;
+ tmp = TMTBR;
+
+ TMTMD = TMTMD_TMTLDE;
+ TMTMD = TMTMD_TMTCNE;
+ tmp = TMTMD;
+}
+
+#if defined(CONFIG_SMP) && defined(CONFIG_GENERIC_CLOCKEVENTS) && \
+ !defined(CONFIG_GENERIC_CLOCKEVENTS_BROADCAST)
+/*
+ * If we aren't using broadcasting, each core needs its own event timer.
+ * Since CPU0 uses the tick timer which is 24-bits, we use timer 4 & 5
+ * cascaded to 32-bits for CPU1 (but only really use 24-bits to match
+ * CPU0).
+ */
+
+#define TMJC1IRQ TM5IRQ
+
+static inline void stop_jiffies_counter1(void)
+{
+ u8 tmp;
+ TM4MD = 0;
+ TM5MD = 0;
+ tmp = TM4MD;
+ tmp = TM5MD;
+}
+
+static inline void reload_jiffies_counter1(u32 cnt)
+{
+ u32 tmp;
+
+ TM45BR = cnt;
+ tmp = TM45BR;
+
+ TM4MD = TM4MD_INIT_COUNTER;
+ tmp = TM4MD;
+
+ TM5MD = TM5MD_SRC_TM4CASCADE | TM5MD_INIT_COUNTER;
+ TM5MD = TM5MD_SRC_TM4CASCADE | TM5MD_COUNT_ENABLE;
+ tmp = TM5MD;
+
+ TM4MD = TM4MD_COUNT_ENABLE;
+ tmp = TM4MD;
+}
+#endif /* CONFIG_SMP&GENERIC_CLOCKEVENTS&!GENERIC_CLOCKEVENTS_BROADCAST */
+
+#endif /* !__ASSEMBLY__ */
+
+
+/*
+ * timestamp counter specifications
+ */
+#define TMTSCBR_MAX 0xffffffff
+
+#ifndef __ASSEMBLY__
+
+/* Use 32-bit timestamp counter */
+#define TMTSCMD TMSMD
+#define TMTSCBR TMSBR
+#define TMTSCBC TMSBC
+#define TMTSCICR TMSICR
+
+static inline void startup_timestamp_counter(void)
+{
+ u32 sync;
+
+ /* set up TMS(Timestamp) 32bit timer register to count real time
+ * - count down from 4Gig-1 to 0 and wrap at IOBCLK rate
+ */
+
+ TMTSCBR = TMTSCBR_MAX;
+ sync = TMTSCBR;
+
+ TMTSCICR = 0;
+ sync = TMTSCICR;
+
+ TMTSCMD = TMTMD_TMTLDE;
+ TMTSCMD = TMTMD_TMTCNE;
+ sync = TMTSCMD;
+}
+
+static inline void shutdown_timestamp_counter(void)
+{
+ TMTSCMD = 0;
+}
+
+/*
+ * we use a cascaded pair of 16-bit down-counting timers to count I/O
+ * clock cycles for the purposes of time keeping
+ */
+typedef unsigned long cycles_t;
+
+static inline cycles_t read_timestamp_counter(void)
+{
+ return (cycles_t)~TMTSCBC;
+}
+
+#endif /* !__ASSEMBLY__ */
+
+#endif /* _ASM_UNIT_TIMEX_H */
diff --git a/arch/mn10300/unit-asb2364/irq-fpga.c b/arch/mn10300/unit-asb2364/irq-fpga.c
new file mode 100644
index 000000000000..fcf29754e4d1
--- /dev/null
+++ b/arch/mn10300/unit-asb2364/irq-fpga.c
@@ -0,0 +1,96 @@
+/* ASB2364 FPGA interrupt multiplexing
+ *
+ * Copyright (C) 2010 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public Licence
+ * as published by the Free Software Foundation; either version
+ * 2 of the Licence, or (at your option) any later version.
+ */
+
+#include <linux/interrupt.h>
+#include <linux/init.h>
+#include <linux/irq.h>
+#include <unit/fpga-regs.h>
+
+/*
+ * FPGA PIC operations
+ */
+static void asb2364_fpga_mask(unsigned int irq)
+{
+ ASB2364_FPGA_REG_MASK(irq - NR_CPU_IRQS) = 0x0001;
+ SyncExBus();
+}
+
+static void asb2364_fpga_ack(unsigned int irq)
+{
+ ASB2364_FPGA_REG_IRQ(irq - NR_CPU_IRQS) = 0x0001;
+ SyncExBus();
+}
+
+static void asb2364_fpga_mask_ack(unsigned int irq)
+{
+ ASB2364_FPGA_REG_MASK(irq - NR_CPU_IRQS) = 0x0001;
+ SyncExBus();
+ ASB2364_FPGA_REG_IRQ(irq - NR_CPU_IRQS) = 0x0001;
+ SyncExBus();
+}
+
+static void asb2364_fpga_unmask(unsigned int irq)
+{
+ ASB2364_FPGA_REG_MASK(irq - NR_CPU_IRQS) = 0x0000;
+ SyncExBus();
+}
+
+static struct irq_chip asb2364_fpga_pic = {
+ .name = "fpga",
+ .ack = asb2364_fpga_ack,
+ .mask = asb2364_fpga_mask,
+ .mask_ack = asb2364_fpga_mask_ack,
+ .unmask = asb2364_fpga_unmask,
+};
+
+/*
+ * FPGA PIC interrupt handler
+ */
+static irqreturn_t fpga_interrupt(int irq, void *_mask)
+{
+ if ((ASB2364_FPGA_REG_IRQ_LAN & 0x0001) != 0x0001)
+ generic_handle_irq(FPGA_LAN_IRQ);
+ if ((ASB2364_FPGA_REG_IRQ_UART & 0x0001) != 0x0001)
+ generic_handle_irq(FPGA_UART_IRQ);
+ if ((ASB2364_FPGA_REG_IRQ_I2C & 0x0001) != 0x0001)
+ generic_handle_irq(FPGA_I2C_IRQ);
+ if ((ASB2364_FPGA_REG_IRQ_USB & 0x0001) != 0x0001)
+ generic_handle_irq(FPGA_USB_IRQ);
+ if ((ASB2364_FPGA_REG_IRQ_FPGA & 0x0001) != 0x0001)
+ generic_handle_irq(FPGA_FPGA_IRQ);
+
+ return IRQ_HANDLED;
+}
+
+/*
+ * Define an interrupt action for each FPGA PIC output
+ */
+static struct irqaction fpga_irq[] = {
+ [0] = {
+ .handler = fpga_interrupt,
+ .flags = IRQF_DISABLED | IRQF_SHARED,
+ .name = "fpga",
+ },
+};
+
+/*
+ * Initialise the FPGA's PIC
+ */
+void __init irq_fpga_init(void)
+{
+ int irq;
+
+ for (irq = NR_CPU_IRQS; irq < NR_IRQS; irq++)
+ set_irq_chip_and_handler(irq, &asb2364_fpga_pic, handle_level_irq);
+
+ /* the FPGA drives the XIRQ1 input on the CPU PIC */
+ setup_irq(XIRQ1, &fpga_irq[0]);
+}
diff --git a/arch/mn10300/unit-asb2364/leds.c b/arch/mn10300/unit-asb2364/leds.c
new file mode 100644
index 000000000000..1ff830c372b3
--- /dev/null
+++ b/arch/mn10300/unit-asb2364/leds.c
@@ -0,0 +1,98 @@
+/* leds.c: ASB2364 peripheral 7seg LEDs x4 support
+ *
+ * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/kernel.h>
+#include <linux/param.h>
+#include <linux/init.h>
+
+#include <asm/io.h>
+#include <asm/processor.h>
+#include <asm/intctl-regs.h>
+#include <asm/rtc-regs.h>
+#include <unit/leds.h>
+
+#if MN10300_USE_7SEGLEDS
+static const u8 asb2364_led_hex_tbl[16] = {
+ 0x80, 0xf2, 0x48, 0x60, 0x32, 0x24, 0x04, 0xf0,
+ 0x00, 0x20, 0x10, 0x06, 0x8c, 0x42, 0x0c, 0x1c
+};
+
+static const u32 asb2364_led_chase_tbl[6] = {
+ ~0x02020202, /* top - segA */
+ ~0x04040404, /* right top - segB */
+ ~0x08080808, /* right bottom - segC */
+ ~0x10101010, /* bottom - segD */
+ ~0x20202020, /* left bottom - segE */
+ ~0x40404040, /* left top - segF */
+};
+
+static unsigned asb2364_led_chase;
+
+void peripheral_leds7x4_display_dec(unsigned int val, unsigned int points)
+{
+ u32 leds;
+
+ leds = asb2364_led_hex_tbl[(val/1000) % 10];
+ leds <<= 8;
+ leds |= asb2364_led_hex_tbl[(val/100) % 10];
+ leds <<= 8;
+ leds |= asb2364_led_hex_tbl[(val/10) % 10];
+ leds <<= 8;
+ leds |= asb2364_led_hex_tbl[val % 10];
+ leds |= points^0x01010101;
+
+ ASB2364_7SEGLEDS = leds;
+}
+
+void peripheral_leds7x4_display_hex(unsigned int val, unsigned int points)
+{
+ u32 leds;
+
+ leds = asb2364_led_hex_tbl[(val/1000) % 10];
+ leds <<= 8;
+ leds |= asb2364_led_hex_tbl[(val/100) % 10];
+ leds <<= 8;
+ leds |= asb2364_led_hex_tbl[(val/10) % 10];
+ leds <<= 8;
+ leds |= asb2364_led_hex_tbl[val % 10];
+ leds |= points^0x01010101;
+
+ ASB2364_7SEGLEDS = leds;
+}
+
+/* display triple horizontal bar and exception code */
+void peripheral_leds_display_exception(enum exception_code code)
+{
+ u32 leds;
+
+ leds = asb2364_led_hex_tbl[(code/0x100) % 0x10];
+ leds <<= 8;
+ leds |= asb2364_led_hex_tbl[(code/0x10) % 0x10];
+ leds <<= 8;
+ leds |= asb2364_led_hex_tbl[code % 0x10];
+ leds |= 0x6d010101;
+
+ ASB2364_7SEGLEDS = leds;
+}
+
+void peripheral_leds_led_chase(void)
+{
+ ASB2364_7SEGLEDS = asb2364_led_chase_tbl[asb2364_led_chase];
+ asb2364_led_chase++;
+ if (asb2364_led_chase >= 6)
+ asb2364_led_chase = 0;
+}
+#else /* MN10300_USE_7SEGLEDS */
+void peripheral_leds7x4_display_dec(unsigned int val, unsigned int points) { }
+void peripheral_leds7x4_display_hex(unsigned int val, unsigned int points) { }
+void peripheral_leds_display_exception(enum exception_code code) { }
+void peripheral_leds_led_chase(void) { }
+#endif /* MN10300_USE_7SEGLEDS */
diff --git a/arch/mn10300/unit-asb2364/smsc911x.c b/arch/mn10300/unit-asb2364/smsc911x.c
new file mode 100644
index 000000000000..544a73e94c81
--- /dev/null
+++ b/arch/mn10300/unit-asb2364/smsc911x.c
@@ -0,0 +1,58 @@
+/* Specification for the SMSC911x NIC
+ *
+ * Copyright (C) 2006 Matsushita Electric Industrial Co., Ltd.
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/platform_device.h>
+#include <linux/io.h>
+#include <linux/ioport.h>
+#include <linux/smsc911x.h>
+#include <unit/smsc911x.h>
+
+static struct smsc911x_platform_config smsc911x_config = {
+ .irq_polarity = SMSC911X_IRQ_POLARITY_ACTIVE_LOW,
+ .irq_type = SMSC911X_IRQ_TYPE_OPEN_DRAIN,
+ .flags = SMSC911X_USE_32BIT,
+};
+
+static struct resource smsc911x_resources[] = {
+ [0] = {
+ .start = SMSC911X_BASE,
+ .end = SMSC911X_BASE_END,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = SMSC911X_IRQ,
+ .end = SMSC911X_IRQ,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct platform_device smsc911x_device = {
+ .name = "smsc911x",
+ .id = 0,
+ .num_resources = ARRAY_SIZE(smsc911x_resources),
+ .resource = smsc911x_resources,
+ .dev = {
+ .platform_data = &smsc911x_config,
+ }
+};
+
+/*
+ * add platform devices
+ */
+static int __init unit_device_init(void)
+{
+ platform_device_register(&smsc911x_device);
+ return 0;
+}
+
+device_initcall(unit_device_init);
diff --git a/arch/mn10300/unit-asb2364/unit-init.c b/arch/mn10300/unit-asb2364/unit-init.c
new file mode 100644
index 000000000000..11440803db10
--- /dev/null
+++ b/arch/mn10300/unit-asb2364/unit-init.c
@@ -0,0 +1,88 @@
+/* ASB2364 initialisation
+ *
+ * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/kernel.h>
+#include <linux/param.h>
+#include <linux/init.h>
+#include <linux/device.h>
+#include <linux/delay.h>
+
+#include <asm/io.h>
+#include <asm/setup.h>
+#include <asm/processor.h>
+#include <asm/irq.h>
+#include <asm/intctl-regs.h>
+#include <unit/fpga-regs.h>
+
+/*
+ * initialise some of the unit hardware before gdbstub is set up
+ */
+asmlinkage void __init unit_init(void)
+{
+ /* set up the external interrupts */
+
+ /* XIRQ[0]: NAND RXBY */
+ /* SET_XIRQ_TRIGGER(0, XIRQ_TRIGGER_LOWLEVEL); */
+
+ /* XIRQ[1]: LAN, UART, I2C, USB, PCI, FPGA */
+ SET_XIRQ_TRIGGER(1, XIRQ_TRIGGER_LOWLEVEL);
+
+ /* XIRQ[2]: Extend Slot 1-9 */
+ /* SET_XIRQ_TRIGGER(2, XIRQ_TRIGGER_LOWLEVEL); */
+
+#if defined(CONFIG_EXT_SERIAL_IRQ_LEVEL) && \
+ defined(CONFIG_ETHERNET_IRQ_LEVEL) && \
+ (CONFIG_EXT_SERIAL_IRQ_LEVEL != CONFIG_ETHERNET_IRQ_LEVEL)
+# error CONFIG_EXT_SERIAL_IRQ_LEVEL != CONFIG_ETHERNET_IRQ_LEVEL
+#endif
+
+#if defined(CONFIG_EXT_SERIAL_IRQ_LEVEL)
+ set_intr_level(XIRQ1, NUM2GxICR_LEVEL(CONFIG_EXT_SERIAL_IRQ_LEVEL));
+#elif defined(CONFIG_ETHERNET_IRQ_LEVEL)
+ set_intr_level(XIRQ1, NUM2GxICR_LEVEL(CONFIG_ETHERNET_IRQ_LEVEL));
+#endif
+}
+
+/*
+ * initialise the rest of the unit hardware after gdbstub is ready
+ */
+asmlinkage void __init unit_setup(void)
+{
+
+}
+
+/*
+ * initialise the external interrupts used by a unit of this type
+ */
+void __init unit_init_IRQ(void)
+{
+ unsigned int extnum;
+
+ for (extnum = 0 ; extnum < NR_XIRQS ; extnum++) {
+ switch (GET_XIRQ_TRIGGER(extnum)) {
+ /* LEVEL triggered interrupts should be made
+ * post-ACK'able as they hold their lines until
+ * serviced
+ */
+ case XIRQ_TRIGGER_HILEVEL:
+ case XIRQ_TRIGGER_LOWLEVEL:
+ mn10300_set_lateack_irq_type(XIRQ2IRQ(extnum));
+ break;
+ default:
+ break;
+ }
+ }
+
+#define IRQCTL __SYSREG(0xd5000090, u32)
+ IRQCTL |= 0x02;
+
+ irq_fpga_init();
+}
diff --git a/arch/parisc/Kconfig b/arch/parisc/Kconfig
index 79a04a9394d5..0888675c98dd 100644
--- a/arch/parisc/Kconfig
+++ b/arch/parisc/Kconfig
@@ -1,10 +1,3 @@
-#
-# For a description of the syntax of this configuration file,
-# see Documentation/kbuild/kconfig-language.txt.
-#
-
-mainmenu "Linux/PA-RISC Kernel Configuration"
-
config PARISC
def_bool y
select HAVE_IDE
@@ -19,6 +12,7 @@ config PARISC
select HAVE_IRQ_WORK
select HAVE_PERF_EVENTS
select GENERIC_ATOMIC64 if !64BIT
+ select GENERIC_HARDIRQS_NO__DO_IRQ
help
The PA-RISC microprocessor is designed by Hewlett-Packard and used
in many of their workstations & servers (HP9000 700 and 800 series,
@@ -85,6 +79,9 @@ config IRQ_PER_CPU
bool
default y
+config GENERIC_HARDIRQS_NO__DO_IRQ
+ def_bool y
+
# unless you want to implement ACPI on PA-RISC ... ;-)
config PM
bool
diff --git a/arch/parisc/hpux/sys_hpux.c b/arch/parisc/hpux/sys_hpux.c
index ba430a03bc7a..30394081d9b6 100644
--- a/arch/parisc/hpux/sys_hpux.c
+++ b/arch/parisc/hpux/sys_hpux.c
@@ -28,7 +28,6 @@
#include <linux/namei.h>
#include <linux/sched.h>
#include <linux/slab.h>
-#include <linux/smp_lock.h>
#include <linux/syscalls.h>
#include <linux/utsname.h>
#include <linux/vfs.h>
diff --git a/arch/parisc/include/asm/cache.h b/arch/parisc/include/asm/cache.h
index 039880e7d2c9..47f11c707b65 100644
--- a/arch/parisc/include/asm/cache.h
+++ b/arch/parisc/include/asm/cache.h
@@ -24,8 +24,6 @@
#ifndef __ASSEMBLY__
-#define L1_CACHE_ALIGN(x) (((x)+(L1_CACHE_BYTES-1))&~(L1_CACHE_BYTES-1))
-
#define SMP_CACHE_BYTES L1_CACHE_BYTES
#define ARCH_DMA_MINALIGN L1_CACHE_BYTES
diff --git a/arch/parisc/include/asm/cacheflush.h b/arch/parisc/include/asm/cacheflush.h
index dba11aedce1b..f388a85bba11 100644
--- a/arch/parisc/include/asm/cacheflush.h
+++ b/arch/parisc/include/asm/cacheflush.h
@@ -126,20 +126,20 @@ static inline void *kmap(struct page *page)
#define kunmap(page) kunmap_parisc(page_address(page))
-static inline void *kmap_atomic(struct page *page, enum km_type idx)
+static inline void *__kmap_atomic(struct page *page)
{
pagefault_disable();
return page_address(page);
}
-static inline void kunmap_atomic_notypecheck(void *addr, enum km_type idx)
+static inline void __kunmap_atomic(void *addr)
{
kunmap_parisc(addr);
pagefault_enable();
}
-#define kmap_atomic_prot(page, idx, prot) kmap_atomic(page, idx)
-#define kmap_atomic_pfn(pfn, idx) kmap_atomic(pfn_to_page(pfn), (idx))
+#define kmap_atomic_prot(page, prot) kmap_atomic(page)
+#define kmap_atomic_pfn(pfn) kmap_atomic(pfn_to_page(pfn))
#define kmap_atomic_to_page(ptr) virt_to_page(ptr)
#endif
diff --git a/arch/parisc/include/asm/irq.h b/arch/parisc/include/asm/irq.h
index dfa26b67f919..c67dccf2e31f 100644
--- a/arch/parisc/include/asm/irq.h
+++ b/arch/parisc/include/asm/irq.h
@@ -40,7 +40,7 @@ struct irq_chip;
void no_ack_irq(unsigned int irq);
void no_end_irq(unsigned int irq);
void cpu_ack_irq(unsigned int irq);
-void cpu_end_irq(unsigned int irq);
+void cpu_eoi_irq(unsigned int irq);
extern int txn_alloc_irq(unsigned int nbits);
extern int txn_claim_irq(int);
diff --git a/arch/parisc/include/asm/pgtable.h b/arch/parisc/include/asm/pgtable.h
index 01c15035e783..865f37a8a881 100644
--- a/arch/parisc/include/asm/pgtable.h
+++ b/arch/parisc/include/asm/pgtable.h
@@ -397,9 +397,7 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
#define pte_offset_kernel(pmd, address) \
((pte_t *) pmd_page_vaddr(*(pmd)) + pte_index(address))
#define pte_offset_map(pmd, address) pte_offset_kernel(pmd, address)
-#define pte_offset_map_nested(pmd, address) pte_offset_kernel(pmd, address)
#define pte_unmap(pte) do { } while (0)
-#define pte_unmap_nested(pte) do { } while (0)
#define pte_unmap(pte) do { } while (0)
#define pte_unmap_nested(pte) do { } while (0)
diff --git a/arch/parisc/include/asm/unistd.h b/arch/parisc/include/asm/unistd.h
index 1ce7d2851d90..3eb82c2a5ec3 100644
--- a/arch/parisc/include/asm/unistd.h
+++ b/arch/parisc/include/asm/unistd.h
@@ -813,8 +813,9 @@
#define __NR_perf_event_open (__NR_Linux + 318)
#define __NR_recvmmsg (__NR_Linux + 319)
#define __NR_accept4 (__NR_Linux + 320)
+#define __NR_prlimit64 (__NR_Linux + 321)
-#define __NR_Linux_syscalls (__NR_accept4 + 1)
+#define __NR_Linux_syscalls (__NR_prlimit64 + 1)
#define __IGNORE_select /* newselect */
diff --git a/arch/parisc/kernel/irq.c b/arch/parisc/kernel/irq.c
index efbcee5d2220..d7d94b845dc2 100644
--- a/arch/parisc/kernel/irq.c
+++ b/arch/parisc/kernel/irq.c
@@ -52,7 +52,7 @@ static volatile unsigned long cpu_eiem = 0;
*/
static DEFINE_PER_CPU(unsigned long, local_ack_eiem) = ~0UL;
-static void cpu_disable_irq(unsigned int irq)
+static void cpu_mask_irq(unsigned int irq)
{
unsigned long eirr_bit = EIEM_MASK(irq);
@@ -63,7 +63,7 @@ static void cpu_disable_irq(unsigned int irq)
* then gets disabled */
}
-static void cpu_enable_irq(unsigned int irq)
+static void cpu_unmask_irq(unsigned int irq)
{
unsigned long eirr_bit = EIEM_MASK(irq);
@@ -75,15 +75,6 @@ static void cpu_enable_irq(unsigned int irq)
smp_send_all_nop();
}
-static unsigned int cpu_startup_irq(unsigned int irq)
-{
- cpu_enable_irq(irq);
- return 0;
-}
-
-void no_ack_irq(unsigned int irq) { }
-void no_end_irq(unsigned int irq) { }
-
void cpu_ack_irq(unsigned int irq)
{
unsigned long mask = EIEM_MASK(irq);
@@ -99,7 +90,7 @@ void cpu_ack_irq(unsigned int irq)
mtctl(mask, 23);
}
-void cpu_end_irq(unsigned int irq)
+void cpu_eoi_irq(unsigned int irq)
{
unsigned long mask = EIEM_MASK(irq);
int cpu = smp_processor_id();
@@ -146,12 +137,10 @@ static int cpu_set_affinity_irq(unsigned int irq, const struct cpumask *dest)
static struct irq_chip cpu_interrupt_type = {
.name = "CPU",
- .startup = cpu_startup_irq,
- .shutdown = cpu_disable_irq,
- .enable = cpu_enable_irq,
- .disable = cpu_disable_irq,
+ .mask = cpu_mask_irq,
+ .unmask = cpu_unmask_irq,
.ack = cpu_ack_irq,
- .end = cpu_end_irq,
+ .eoi = cpu_eoi_irq,
#ifdef CONFIG_SMP
.set_affinity = cpu_set_affinity_irq,
#endif
@@ -247,10 +236,11 @@ int cpu_claim_irq(unsigned int irq, struct irq_chip *type, void *data)
if (irq_desc[irq].chip != &cpu_interrupt_type)
return -EBUSY;
+ /* for iosapic interrupts */
if (type) {
- irq_desc[irq].chip = type;
- irq_desc[irq].chip_data = data;
- cpu_interrupt_type.enable(irq);
+ set_irq_chip_and_handler(irq, type, handle_percpu_irq);
+ set_irq_chip_data(irq, data);
+ cpu_unmask_irq(irq);
}
return 0;
}
@@ -368,7 +358,7 @@ void do_cpu_irq_mask(struct pt_regs *regs)
goto set_out;
}
#endif
- __do_IRQ(irq);
+ generic_handle_irq(irq);
out:
irq_exit();
@@ -398,14 +388,15 @@ static void claim_cpu_irqs(void)
{
int i;
for (i = CPU_IRQ_BASE; i <= CPU_IRQ_MAX; i++) {
- irq_desc[i].chip = &cpu_interrupt_type;
+ set_irq_chip_and_handler(i, &cpu_interrupt_type,
+ handle_percpu_irq);
}
- irq_desc[TIMER_IRQ].action = &timer_action;
- irq_desc[TIMER_IRQ].status = IRQ_PER_CPU;
+ set_irq_handler(TIMER_IRQ, handle_percpu_irq);
+ setup_irq(TIMER_IRQ, &timer_action);
#ifdef CONFIG_SMP
- irq_desc[IPI_IRQ].action = &ipi_action;
- irq_desc[IPI_IRQ].status = IRQ_PER_CPU;
+ set_irq_handler(IPI_IRQ, handle_percpu_irq);
+ setup_irq(IPI_IRQ, &ipi_action);
#endif
}
@@ -423,3 +414,4 @@ void __init init_IRQ(void)
set_eiem(cpu_eiem); /* EIEM : enable all external intr */
}
+
diff --git a/arch/parisc/kernel/pdc_cons.c b/arch/parisc/kernel/pdc_cons.c
index 1ff366cb9685..66d1f17fdb94 100644
--- a/arch/parisc/kernel/pdc_cons.c
+++ b/arch/parisc/kernel/pdc_cons.c
@@ -12,6 +12,7 @@
* Copyright (C) 2001 Helge Deller <deller at parisc-linux.org>
* Copyright (C) 2001 Thomas Bogendoerfer <tsbogend at parisc-linux.org>
* Copyright (C) 2002 Randolph Chung <tausq with parisc-linux.org>
+ * Copyright (C) 2010 Guy Martin <gmsoft at tuxicoman.be>
*
*
* This program is free software; you can redistribute it and/or modify
@@ -31,12 +32,11 @@
/*
* The PDC console is a simple console, which can be used for debugging
- * boot related problems on HP PA-RISC machines.
+ * boot related problems on HP PA-RISC machines. It is also useful when no
+ * other console works.
*
* This code uses the ROM (=PDC) based functions to read and write characters
* from and to PDC's boot path.
- * Since all character read from that path must be polled, this code never
- * can or will be a fully functional linux console.
*/
/* Define EARLY_BOOTUP_DEBUG to debug kernel related boot problems.
@@ -53,6 +53,7 @@
#include <asm/pdc.h> /* for iodc_call() proto and friends */
static DEFINE_SPINLOCK(pdc_console_lock);
+static struct console pdc_cons;
static void pdc_console_write(struct console *co, const char *s, unsigned count)
{
@@ -85,12 +86,138 @@ static int pdc_console_setup(struct console *co, char *options)
#if defined(CONFIG_PDC_CONSOLE)
#include <linux/vt_kern.h>
+#include <linux/tty_flip.h>
+
+#define PDC_CONS_POLL_DELAY (30 * HZ / 1000)
+
+static struct timer_list pdc_console_timer;
+
+extern struct console * console_drivers;
+
+static int pdc_console_tty_open(struct tty_struct *tty, struct file *filp)
+{
+
+ mod_timer(&pdc_console_timer, jiffies + PDC_CONS_POLL_DELAY);
+
+ return 0;
+}
+
+static void pdc_console_tty_close(struct tty_struct *tty, struct file *filp)
+{
+ if (!tty->count)
+ del_timer(&pdc_console_timer);
+}
+
+static int pdc_console_tty_write(struct tty_struct *tty, const unsigned char *buf, int count)
+{
+ pdc_console_write(NULL, buf, count);
+ return count;
+}
+
+static int pdc_console_tty_write_room(struct tty_struct *tty)
+{
+ return 32768; /* no limit, no buffer used */
+}
+
+static int pdc_console_tty_chars_in_buffer(struct tty_struct *tty)
+{
+ return 0; /* no buffer */
+}
+
+static struct tty_driver *pdc_console_tty_driver;
+
+static const struct tty_operations pdc_console_tty_ops = {
+ .open = pdc_console_tty_open,
+ .close = pdc_console_tty_close,
+ .write = pdc_console_tty_write,
+ .write_room = pdc_console_tty_write_room,
+ .chars_in_buffer = pdc_console_tty_chars_in_buffer,
+};
+
+static void pdc_console_poll(unsigned long unused)
+{
+
+ int data, count = 0;
+
+ struct tty_struct *tty = pdc_console_tty_driver->ttys[0];
+
+ if (!tty)
+ return;
+
+ while (1) {
+ data = pdc_console_poll_key(NULL);
+ if (data == -1)
+ break;
+ tty_insert_flip_char(tty, data & 0xFF, TTY_NORMAL);
+ count ++;
+ }
+
+ if (count)
+ tty_flip_buffer_push(tty);
+
+ if (tty->count && (pdc_cons.flags & CON_ENABLED))
+ mod_timer(&pdc_console_timer, jiffies + PDC_CONS_POLL_DELAY);
+}
+
+static int __init pdc_console_tty_driver_init(void)
+{
+
+ int err;
+ struct tty_driver *drv;
+
+ /* Check if the console driver is still registered.
+ * It is unregistered if the pdc console was not selected as the
+ * primary console. */
+
+ struct console *tmp = console_drivers;
+
+ for (tmp = console_drivers; tmp; tmp = tmp->next)
+ if (tmp == &pdc_cons)
+ break;
+
+ if (!tmp) {
+ printk(KERN_INFO "PDC console driver not registered anymore, not creating %s\n", pdc_cons.name);
+ return -ENODEV;
+ }
+
+ printk(KERN_INFO "The PDC console driver is still registered, removing CON_BOOT flag\n");
+ pdc_cons.flags &= ~CON_BOOT;
+
+ drv = alloc_tty_driver(1);
+
+ if (!drv)
+ return -ENOMEM;
+
+ drv->driver_name = "pdc_cons";
+ drv->name = "ttyB";
+ drv->major = MUX_MAJOR;
+ drv->minor_start = 0;
+ drv->type = TTY_DRIVER_TYPE_SYSTEM;
+ drv->init_termios = tty_std_termios;
+ drv->flags = TTY_DRIVER_REAL_RAW | TTY_DRIVER_RESET_TERMIOS;
+ tty_set_operations(drv, &pdc_console_tty_ops);
+
+ err = tty_register_driver(drv);
+ if (err) {
+ printk(KERN_ERR "Unable to register the PDC console TTY driver\n");
+ return err;
+ }
+
+ pdc_console_tty_driver = drv;
+
+ /* No need to initialize the pdc_console_timer if tty isn't allocated */
+ init_timer(&pdc_console_timer);
+ pdc_console_timer.function = pdc_console_poll;
+
+ return 0;
+}
+
+module_init(pdc_console_tty_driver_init);
static struct tty_driver * pdc_console_device (struct console *c, int *index)
{
- extern struct tty_driver console_driver;
- *index = c->index ? c->index-1 : fg_console;
- return &console_driver;
+ *index = c->index;
+ return pdc_console_tty_driver;
}
#else
#define pdc_console_device NULL
@@ -101,7 +228,7 @@ static struct console pdc_cons = {
.write = pdc_console_write,
.device = pdc_console_device,
.setup = pdc_console_setup,
- .flags = CON_BOOT | CON_PRINTBUFFER | CON_ENABLED,
+ .flags = CON_BOOT | CON_PRINTBUFFER,
.index = -1,
};
diff --git a/arch/parisc/kernel/ptrace.c b/arch/parisc/kernel/ptrace.c
index c4f49e45129d..2905b1f52d30 100644
--- a/arch/parisc/kernel/ptrace.c
+++ b/arch/parisc/kernel/ptrace.c
@@ -110,7 +110,8 @@ void user_enable_block_step(struct task_struct *task)
pa_psw(task)->l = 0;
}
-long arch_ptrace(struct task_struct *child, long request, long addr, long data)
+long arch_ptrace(struct task_struct *child, long request,
+ unsigned long addr, unsigned long data)
{
unsigned long tmp;
long ret = -EIO;
@@ -120,11 +121,11 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
/* Read the word at location addr in the USER area. For ptraced
processes, the kernel saves all regs on a syscall. */
case PTRACE_PEEKUSR:
- if ((addr & (sizeof(long)-1)) ||
- (unsigned long) addr >= sizeof(struct pt_regs))
+ if ((addr & (sizeof(unsigned long)-1)) ||
+ addr >= sizeof(struct pt_regs))
break;
tmp = *(unsigned long *) ((char *) task_regs(child) + addr);
- ret = put_user(tmp, (unsigned long *) data);
+ ret = put_user(tmp, (unsigned long __user *) data);
break;
/* Write the word at location addr in the USER area. This will need
@@ -151,8 +152,8 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
break;
}
- if ((addr & (sizeof(long)-1)) ||
- (unsigned long) addr >= sizeof(struct pt_regs))
+ if ((addr & (sizeof(unsigned long)-1)) ||
+ addr >= sizeof(struct pt_regs))
break;
if ((addr >= PT_GR1 && addr <= PT_GR31) ||
addr == PT_IAOQ0 || addr == PT_IAOQ1 ||
diff --git a/arch/parisc/kernel/signal.c b/arch/parisc/kernel/signal.c
index 35c827e94e31..609a331878e7 100644
--- a/arch/parisc/kernel/signal.c
+++ b/arch/parisc/kernel/signal.c
@@ -98,7 +98,6 @@ void
sys_rt_sigreturn(struct pt_regs *regs, int in_syscall)
{
struct rt_sigframe __user *frame;
- struct siginfo si;
sigset_t set;
unsigned long usp = (regs->gr[30] & ~(0x01UL));
unsigned long sigframe_size = PARISC_RT_SIGFRAME_SIZE;
@@ -178,13 +177,7 @@ sys_rt_sigreturn(struct pt_regs *regs, int in_syscall)
give_sigsegv:
DBG(1,"sys_rt_sigreturn: Sending SIGSEGV\n");
- si.si_signo = SIGSEGV;
- si.si_errno = 0;
- si.si_code = SI_KERNEL;
- si.si_pid = task_pid_vnr(current);
- si.si_uid = current_uid();
- si.si_addr = &frame->uc;
- force_sig_info(SIGSEGV, &si, current);
+ force_sig(SIGSEGV, current);
return;
}
diff --git a/arch/parisc/kernel/sys_parisc32.c b/arch/parisc/kernel/sys_parisc32.c
index 9779ece2b070..88a0ad14a9c9 100644
--- a/arch/parisc/kernel/sys_parisc32.c
+++ b/arch/parisc/kernel/sys_parisc32.c
@@ -20,7 +20,6 @@
#include <linux/times.h>
#include <linux/time.h>
#include <linux/smp.h>
-#include <linux/smp_lock.h>
#include <linux/sem.h>
#include <linux/msg.h>
#include <linux/shm.h>
diff --git a/arch/parisc/kernel/syscall_table.S b/arch/parisc/kernel/syscall_table.S
index 3d52c978738f..74867dfdabe5 100644
--- a/arch/parisc/kernel/syscall_table.S
+++ b/arch/parisc/kernel/syscall_table.S
@@ -419,6 +419,7 @@
ENTRY_SAME(perf_event_open)
ENTRY_COMP(recvmmsg)
ENTRY_SAME(accept4) /* 320 */
+ ENTRY_SAME(prlimit64)
/* Nothing yet */
diff --git a/arch/parisc/kernel/unaligned.c b/arch/parisc/kernel/unaligned.c
index 92d977bb5ea8..234e3682cf09 100644
--- a/arch/parisc/kernel/unaligned.c
+++ b/arch/parisc/kernel/unaligned.c
@@ -619,15 +619,12 @@ void handle_unaligned(struct pt_regs *regs)
flop=1;
ret = emulate_std(regs, R2(regs->iir),1);
break;
-
-#ifdef CONFIG_PA20
case OPCODE_LDD_L:
ret = emulate_ldd(regs, R2(regs->iir),0);
break;
case OPCODE_STD_L:
ret = emulate_std(regs, R2(regs->iir),0);
break;
-#endif
}
#endif
switch (regs->iir & OPCODE3_MASK)
diff --git a/arch/parisc/kernel/unwind.c b/arch/parisc/kernel/unwind.c
index d58eac1a8288..76ed62ed785b 100644
--- a/arch/parisc/kernel/unwind.c
+++ b/arch/parisc/kernel/unwind.c
@@ -80,8 +80,11 @@ find_unwind_entry(unsigned long addr)
if (addr >= table->start &&
addr <= table->end)
e = find_unwind_entry_in_table(table, addr);
- if (e)
+ if (e) {
+ /* Move-to-front to exploit common traces */
+ list_move(&table->list, &unwind_tables);
break;
+ }
}
return e;
diff --git a/arch/parisc/math-emu/Makefile b/arch/parisc/math-emu/Makefile
index 1f3f225897f5..0bd63b08a79a 100644
--- a/arch/parisc/math-emu/Makefile
+++ b/arch/parisc/math-emu/Makefile
@@ -3,7 +3,7 @@
#
# See arch/parisc/math-emu/README
-EXTRA_CFLAGS += -Wno-parentheses -Wno-implicit-function-declaration \
+ccflags-y := -Wno-parentheses -Wno-implicit-function-declaration \
-Wno-uninitialized -Wno-strict-prototypes -Wno-return-type \
-Wno-implicit-int
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index 4b1e521d966f..e625e9e034ae 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -1,15 +1,13 @@
-# For a description of the syntax of this configuration file,
-# see Documentation/kbuild/kconfig-language.txt.
-#
-
-mainmenu "Linux/PowerPC Kernel Configuration"
-
source "arch/powerpc/platforms/Kconfig.cputype"
config PPC32
bool
default y if !PPC64
+config 32BIT
+ bool
+ default y if PPC32
+
config 64BIT
bool
default y if PPC64
@@ -688,9 +686,12 @@ config 4xx_SOC
bool
config FSL_LBC
- bool
+ bool "Freescale Local Bus support"
+ depends on FSL_SOC
help
- Freescale Localbus support
+ Enables reporting of errors from the Freescale local bus
+ controller. Also contains some common code used by
+ drivers for specific local bus peripherals.
config FSL_GTM
bool
diff --git a/arch/powerpc/boot/div64.S b/arch/powerpc/boot/div64.S
index 722f360a32a9..d271ab542673 100644
--- a/arch/powerpc/boot/div64.S
+++ b/arch/powerpc/boot/div64.S
@@ -33,9 +33,10 @@ __div64_32:
cntlzw r0,r5 # we are shifting the dividend right
li r10,-1 # to make it < 2^32, and shifting
srw r10,r10,r0 # the divisor right the same amount,
- add r9,r4,r10 # rounding up (so the estimate cannot
+ addc r9,r4,r10 # rounding up (so the estimate cannot
andc r11,r6,r10 # ever be too large, only too small)
andc r9,r9,r10
+ addze r9,r9
or r11,r5,r11
rotlw r9,r9,r0
rotlw r11,r11,r0
diff --git a/arch/powerpc/boot/dts/mpc8610_hpcd.dts b/arch/powerpc/boot/dts/mpc8610_hpcd.dts
index 9535ce68caae..83c3218cb4da 100644
--- a/arch/powerpc/boot/dts/mpc8610_hpcd.dts
+++ b/arch/powerpc/boot/dts/mpc8610_hpcd.dts
@@ -286,6 +286,7 @@
ssi@16100 {
compatible = "fsl,mpc8610-ssi";
+ status = "disabled";
cell-index = <1>;
reg = <0x16100 0x100>;
interrupt-parent = <&mpic>;
diff --git a/arch/powerpc/configs/mpc85xx_defconfig b/arch/powerpc/configs/mpc85xx_defconfig
index c3b113b2ca31..3aeb5949cfef 100644
--- a/arch/powerpc/configs/mpc85xx_defconfig
+++ b/arch/powerpc/configs/mpc85xx_defconfig
@@ -124,6 +124,9 @@ CONFIG_I2C_CPM=m
CONFIG_I2C_MPC=y
# CONFIG_HWMON is not set
CONFIG_VIDEO_OUTPUT_CONTROL=y
+CONFIG_FB=y
+CONFIG_FB_FSL_DIU=y
+# CONFIG_VGA_CONSOLE is not set
CONFIG_SOUND=y
CONFIG_SND=y
# CONFIG_SND_SUPPORT_OLD_API is not set
diff --git a/arch/powerpc/configs/mpc85xx_smp_defconfig b/arch/powerpc/configs/mpc85xx_smp_defconfig
index a075da2ea3fb..d62c8016f4bc 100644
--- a/arch/powerpc/configs/mpc85xx_smp_defconfig
+++ b/arch/powerpc/configs/mpc85xx_smp_defconfig
@@ -126,6 +126,9 @@ CONFIG_I2C_CPM=m
CONFIG_I2C_MPC=y
# CONFIG_HWMON is not set
CONFIG_VIDEO_OUTPUT_CONTROL=y
+CONFIG_FB=y
+CONFIG_FB_FSL_DIU=y
+# CONFIG_VGA_CONSOLE is not set
CONFIG_SOUND=y
CONFIG_SND=y
# CONFIG_SND_SUPPORT_OLD_API is not set
diff --git a/arch/powerpc/include/asm/cputime.h b/arch/powerpc/include/asm/cputime.h
index 8bdc6a9e5773..1cf20bdfbeca 100644
--- a/arch/powerpc/include/asm/cputime.h
+++ b/arch/powerpc/include/asm/cputime.h
@@ -124,23 +124,23 @@ static inline u64 cputime64_to_jiffies64(const cputime_t ct)
}
/*
- * Convert cputime <-> milliseconds
+ * Convert cputime <-> microseconds
*/
extern u64 __cputime_msec_factor;
-static inline unsigned long cputime_to_msecs(const cputime_t ct)
+static inline unsigned long cputime_to_usecs(const cputime_t ct)
{
- return mulhdu(ct, __cputime_msec_factor);
+ return mulhdu(ct, __cputime_msec_factor) * USEC_PER_MSEC;
}
-static inline cputime_t msecs_to_cputime(const unsigned long ms)
+static inline cputime_t usecs_to_cputime(const unsigned long us)
{
cputime_t ct;
unsigned long sec;
/* have to be a little careful about overflow */
- ct = ms % 1000;
- sec = ms / 1000;
+ ct = us % 1000000;
+ sec = us / 1000000;
if (ct) {
ct *= tb_ticks_per_sec;
do_div(ct, 1000);
diff --git a/arch/powerpc/include/asm/immap_86xx.h b/arch/powerpc/include/asm/fsl_guts.h
index 0f165e59c326..bebd12463ec9 100644
--- a/arch/powerpc/include/asm/immap_86xx.h
+++ b/arch/powerpc/include/asm/fsl_guts.h
@@ -1,5 +1,5 @@
/**
- * MPC86xx Internal Memory Map
+ * Freecale 85xx and 86xx Global Utilties register set
*
* Authors: Jeff Brown
* Timur Tabi <timur@freescale.com>
@@ -10,73 +10,112 @@
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
- *
- * This header file defines structures for various 86xx SOC devices that are
- * used by multiple source files.
*/
-#ifndef __ASM_POWERPC_IMMAP_86XX_H__
-#define __ASM_POWERPC_IMMAP_86XX_H__
+#ifndef __ASM_POWERPC_FSL_GUTS_H__
+#define __ASM_POWERPC_FSL_GUTS_H__
#ifdef __KERNEL__
-/* Global Utility Registers */
-struct ccsr_guts {
+/*
+ * These #ifdefs are safe because it's not possible to build a kernel that
+ * runs on e500 and e600 cores.
+ */
+
+#if !defined(CONFIG_PPC_85xx) && !defined(CONFIG_PPC_86xx)
+#error Only 85xx and 86xx SOCs are supported
+#endif
+
+/**
+ * Global Utility Registers.
+ *
+ * Not all registers defined in this structure are available on all chips, so
+ * you are expected to know whether a given register actually exists on your
+ * chip before you access it.
+ *
+ * Also, some registers are similar on different chips but have slightly
+ * different names. In these cases, one name is chosen to avoid extraneous
+ * #ifdefs.
+ */
+#ifdef CONFIG_PPC_85xx
+struct ccsr_guts_85xx {
+#else
+struct ccsr_guts_86xx {
+#endif
__be32 porpllsr; /* 0x.0000 - POR PLL Ratio Status Register */
__be32 porbmsr; /* 0x.0004 - POR Boot Mode Status Register */
__be32 porimpscr; /* 0x.0008 - POR I/O Impedance Status and Control Register */
__be32 pordevsr; /* 0x.000c - POR I/O Device Status Register */
__be32 pordbgmsr; /* 0x.0010 - POR Debug Mode Status Register */
- u8 res1[0x20 - 0x14];
+ __be32 pordevsr2; /* 0x.0014 - POR device status register 2 */
+ u8 res018[0x20 - 0x18];
__be32 porcir; /* 0x.0020 - POR Configuration Information Register */
- u8 res2[0x30 - 0x24];
+ u8 res024[0x30 - 0x24];
__be32 gpiocr; /* 0x.0030 - GPIO Control Register */
- u8 res3[0x40 - 0x34];
+ u8 res034[0x40 - 0x34];
__be32 gpoutdr; /* 0x.0040 - General-Purpose Output Data Register */
- u8 res4[0x50 - 0x44];
+ u8 res044[0x50 - 0x44];
__be32 gpindr; /* 0x.0050 - General-Purpose Input Data Register */
- u8 res5[0x60 - 0x54];
+ u8 res054[0x60 - 0x54];
__be32 pmuxcr; /* 0x.0060 - Alternate Function Signal Multiplex Control */
- u8 res6[0x70 - 0x64];
+ __be32 pmuxcr2; /* 0x.0064 - Alternate function signal multiplex control 2 */
+ __be32 dmuxcr; /* 0x.0068 - DMA Mux Control Register */
+ u8 res06c[0x70 - 0x6c];
__be32 devdisr; /* 0x.0070 - Device Disable Control */
__be32 devdisr2; /* 0x.0074 - Device Disable Control 2 */
- u8 res7[0x80 - 0x78];
+ u8 res078[0x7c - 0x78];
+ __be32 pmjcr; /* 0x.007c - 4 Power Management Jog Control Register */
__be32 powmgtcsr; /* 0x.0080 - Power Management Status and Control Register */
- u8 res8[0x90 - 0x84];
+ __be32 pmrccr; /* 0x.0084 - Power Management Reset Counter Configuration Register */
+ __be32 pmpdccr; /* 0x.0088 - Power Management Power Down Counter Configuration Register */
+ __be32 pmcdr; /* 0x.008c - 4Power management clock disable register */
__be32 mcpsumr; /* 0x.0090 - Machine Check Summary Register */
__be32 rstrscr; /* 0x.0094 - Reset Request Status and Control Register */
- u8 res9[0xA0 - 0x98];
+ __be32 ectrstcr; /* 0x.0098 - Exception reset control register */
+ __be32 autorstsr; /* 0x.009c - Automatic reset status register */
__be32 pvr; /* 0x.00a0 - Processor Version Register */
__be32 svr; /* 0x.00a4 - System Version Register */
- u8 res10[0xB0 - 0xA8];
+ u8 res0a8[0xb0 - 0xa8];
__be32 rstcr; /* 0x.00b0 - Reset Control Register */
- u8 res11[0xC0 - 0xB4];
+ u8 res0b4[0xc0 - 0xb4];
+#ifdef CONFIG_PPC_85xx
+ __be32 iovselsr; /* 0x.00c0 - I/O voltage select status register */
+#else
__be32 elbcvselcr; /* 0x.00c0 - eLBC Voltage Select Ctrl Reg */
- u8 res12[0x800 - 0xC4];
+#endif
+ u8 res0c4[0x224 - 0xc4];
+ __be32 iodelay1; /* 0x.0224 - IO delay control register 1 */
+ __be32 iodelay2; /* 0x.0228 - IO delay control register 2 */
+ u8 res22c[0x800 - 0x22c];
__be32 clkdvdr; /* 0x.0800 - Clock Divide Register */
- u8 res13[0x900 - 0x804];
+ u8 res804[0x900 - 0x804];
__be32 ircr; /* 0x.0900 - Infrared Control Register */
- u8 res14[0x908 - 0x904];
+ u8 res904[0x908 - 0x904];
__be32 dmacr; /* 0x.0908 - DMA Control Register */
- u8 res15[0x914 - 0x90C];
+ u8 res90c[0x914 - 0x90c];
__be32 elbccr; /* 0x.0914 - eLBC Control Register */
- u8 res16[0xB20 - 0x918];
+ u8 res918[0xb20 - 0x918];
__be32 ddr1clkdr; /* 0x.0b20 - DDR1 Clock Disable Register */
__be32 ddr2clkdr; /* 0x.0b24 - DDR2 Clock Disable Register */
__be32 ddrclkdr; /* 0x.0b28 - DDR Clock Disable Register */
- u8 res17[0xE00 - 0xB2C];
+ u8 resb2c[0xe00 - 0xb2c];
__be32 clkocr; /* 0x.0e00 - Clock Out Select Register */
- u8 res18[0xE10 - 0xE04];
+ u8 rese04[0xe10 - 0xe04];
__be32 ddrdllcr; /* 0x.0e10 - DDR DLL Control Register */
- u8 res19[0xE20 - 0xE14];
+ u8 rese14[0xe20 - 0xe14];
__be32 lbcdllcr; /* 0x.0e20 - LBC DLL Control Register */
- u8 res20[0xF04 - 0xE24];
+ __be32 cpfor; /* 0x.0e24 - L2 charge pump fuse override register */
+ u8 rese28[0xf04 - 0xe28];
__be32 srds1cr0; /* 0x.0f04 - SerDes1 Control Register 0 */
__be32 srds1cr1; /* 0x.0f08 - SerDes1 Control Register 0 */
- u8 res21[0xF40 - 0xF0C];
- __be32 srds2cr0; /* 0x.0f40 - SerDes1 Control Register 0 */
- __be32 srds2cr1; /* 0x.0f44 - SerDes1 Control Register 0 */
+ u8 resf0c[0xf2c - 0xf0c];
+ __be32 itcr; /* 0x.0f2c - Internal transaction control register */
+ u8 resf30[0xf40 - 0xf30];
+ __be32 srds2cr0; /* 0x.0f40 - SerDes2 Control Register 0 */
+ __be32 srds2cr1; /* 0x.0f44 - SerDes2 Control Register 0 */
} __attribute__ ((packed));
+#ifdef CONFIG_PPC_86xx
+
#define CCSR_GUTS_DMACR_DEV_SSI 0 /* DMA controller/channel set to SSI */
#define CCSR_GUTS_DMACR_DEV_IR 1 /* DMA controller/channel set to IR */
@@ -93,7 +132,7 @@ struct ccsr_guts {
* ch: The channel on the DMA controller (0, 1, 2, or 3)
* device: The device to set as the source (CCSR_GUTS_DMACR_DEV_xx)
*/
-static inline void guts_set_dmacr(struct ccsr_guts __iomem *guts,
+static inline void guts_set_dmacr(struct ccsr_guts_86xx __iomem *guts,
unsigned int co, unsigned int ch, unsigned int device)
{
unsigned int shift = 16 + (8 * (1 - co) + 2 * (3 - ch));
@@ -129,7 +168,7 @@ static inline void guts_set_dmacr(struct ccsr_guts __iomem *guts,
* ch: The channel on the DMA controller (0, 1, 2, or 3)
* value: the new value for the bit (0 or 1)
*/
-static inline void guts_set_pmuxcr_dma(struct ccsr_guts __iomem *guts,
+static inline void guts_set_pmuxcr_dma(struct ccsr_guts_86xx __iomem *guts,
unsigned int co, unsigned int ch, unsigned int value)
{
if ((ch == 0) || (ch == 3)) {
@@ -152,5 +191,7 @@ static inline void guts_set_pmuxcr_dma(struct ccsr_guts __iomem *guts,
#define CCSR_GUTS_CLKDVDR_SSICLK_MASK 0x000000FF
#define CCSR_GUTS_CLKDVDR_SSICLK(x) ((x) & CCSR_GUTS_CLKDVDR_SSICLK_MASK)
-#endif /* __ASM_POWERPC_IMMAP_86XX_H__ */
-#endif /* __KERNEL__ */
+#endif
+
+#endif
+#endif
diff --git a/arch/powerpc/include/asm/fsl_lbc.h b/arch/powerpc/include/asm/fsl_lbc.h
index 1b5a21041f9b..5c1bf3466749 100644
--- a/arch/powerpc/include/asm/fsl_lbc.h
+++ b/arch/powerpc/include/asm/fsl_lbc.h
@@ -1,9 +1,10 @@
/* Freescale Local Bus Controller
*
- * Copyright (c) 2006-2007 Freescale Semiconductor
+ * Copyright © 2006-2007, 2010 Freescale Semiconductor
*
* Authors: Nick Spence <nick.spence@freescale.com>,
* Scott Wood <scottwood@freescale.com>
+ * Jack Lan <jack.lan@freescale.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -26,6 +27,8 @@
#include <linux/compiler.h>
#include <linux/types.h>
#include <linux/io.h>
+#include <linux/device.h>
+#include <linux/spinlock.h>
struct fsl_lbc_bank {
__be32 br; /**< Base Register */
@@ -125,13 +128,23 @@ struct fsl_lbc_regs {
#define LTESR_ATMW 0x00800000
#define LTESR_ATMR 0x00400000
#define LTESR_CS 0x00080000
+#define LTESR_UPM 0x00000002
#define LTESR_CC 0x00000001
#define LTESR_NAND_MASK (LTESR_FCT | LTESR_PAR | LTESR_CC)
+#define LTESR_MASK (LTESR_BM | LTESR_FCT | LTESR_PAR | LTESR_WP \
+ | LTESR_ATMW | LTESR_ATMR | LTESR_CS | LTESR_UPM \
+ | LTESR_CC)
+#define LTESR_CLEAR 0xFFFFFFFF
+#define LTECCR_CLEAR 0xFFFFFFFF
+#define LTESR_STATUS LTESR_MASK
+#define LTEIR_ENABLE LTESR_MASK
+#define LTEDR_ENABLE 0x00000000
__be32 ltedr; /**< Transfer Error Disable Register */
__be32 lteir; /**< Transfer Error Interrupt Register */
__be32 lteatr; /**< Transfer Error Attributes Register */
__be32 ltear; /**< Transfer Error Address Register */
- u8 res6[0xC];
+ __be32 lteccr; /**< Transfer Error ECC Register */
+ u8 res6[0x8];
__be32 lbcr; /**< Configuration Register */
#define LBCR_LDIS 0x80000000
#define LBCR_LDIS_SHIFT 31
@@ -235,6 +248,7 @@ struct fsl_upm {
int width;
};
+extern u32 fsl_lbc_addr(phys_addr_t addr_base);
extern int fsl_lbc_find(phys_addr_t addr_base);
extern int fsl_upm_find(phys_addr_t addr_base, struct fsl_upm *upm);
@@ -265,7 +279,23 @@ static inline void fsl_upm_end_pattern(struct fsl_upm *upm)
cpu_relax();
}
+/* overview of the fsl lbc controller */
+
+struct fsl_lbc_ctrl {
+ /* device info */
+ struct device *dev;
+ struct fsl_lbc_regs __iomem *regs;
+ int irq;
+ wait_queue_head_t irq_wait;
+ spinlock_t lock;
+ void *nand;
+
+ /* status read from LTESR by irq handler */
+ unsigned int irq_status;
+};
+
extern int fsl_upm_run_pattern(struct fsl_upm *upm, void __iomem *io_base,
u32 mar);
+extern struct fsl_lbc_ctrl *fsl_lbc_ctrl_dev;
#endif /* __ASM_FSL_LBC_H */
diff --git a/arch/powerpc/include/asm/fsldma.h b/arch/powerpc/include/asm/fsldma.h
deleted file mode 100644
index debc5ed96d6e..000000000000
--- a/arch/powerpc/include/asm/fsldma.h
+++ /dev/null
@@ -1,137 +0,0 @@
-/*
- * Freescale MPC83XX / MPC85XX DMA Controller
- *
- * Copyright (c) 2009 Ira W. Snyder <iws@ovro.caltech.edu>
- *
- * This file is licensed under the terms of the GNU General Public License
- * version 2. This program is licensed "as is" without any warranty of any
- * kind, whether express or implied.
- */
-
-#ifndef __ARCH_POWERPC_ASM_FSLDMA_H__
-#define __ARCH_POWERPC_ASM_FSLDMA_H__
-
-#include <linux/slab.h>
-#include <linux/dmaengine.h>
-
-/*
- * Definitions for the Freescale DMA controller's DMA_SLAVE implemention
- *
- * The Freescale DMA_SLAVE implementation was designed to handle many-to-many
- * transfers. An example usage would be an accelerated copy between two
- * scatterlists. Another example use would be an accelerated copy from
- * multiple non-contiguous device buffers into a single scatterlist.
- *
- * A DMA_SLAVE transaction is defined by a struct fsl_dma_slave. This
- * structure contains a list of hardware addresses that should be copied
- * to/from the scatterlist passed into device_prep_slave_sg(). The structure
- * also has some fields to enable hardware-specific features.
- */
-
-/**
- * struct fsl_dma_hw_addr
- * @entry: linked list entry
- * @address: the hardware address
- * @length: length to transfer
- *
- * Holds a single physical hardware address / length pair for use
- * with the DMAEngine DMA_SLAVE API.
- */
-struct fsl_dma_hw_addr {
- struct list_head entry;
-
- dma_addr_t address;
- size_t length;
-};
-
-/**
- * struct fsl_dma_slave
- * @addresses: a linked list of struct fsl_dma_hw_addr structures
- * @request_count: value for DMA request count
- * @src_loop_size: setup and enable constant source-address DMA transfers
- * @dst_loop_size: setup and enable constant destination address DMA transfers
- * @external_start: enable externally started DMA transfers
- * @external_pause: enable externally paused DMA transfers
- *
- * Holds a list of address / length pairs for use with the DMAEngine
- * DMA_SLAVE API implementation for the Freescale DMA controller.
- */
-struct fsl_dma_slave {
-
- /* List of hardware address/length pairs */
- struct list_head addresses;
-
- /* Support for extra controller features */
- unsigned int request_count;
- unsigned int src_loop_size;
- unsigned int dst_loop_size;
- bool external_start;
- bool external_pause;
-};
-
-/**
- * fsl_dma_slave_append - add an address/length pair to a struct fsl_dma_slave
- * @slave: the &struct fsl_dma_slave to add to
- * @address: the hardware address to add
- * @length: the length of bytes to transfer from @address
- *
- * Add a hardware address/length pair to a struct fsl_dma_slave. Returns 0 on
- * success, -ERRNO otherwise.
- */
-static inline int fsl_dma_slave_append(struct fsl_dma_slave *slave,
- dma_addr_t address, size_t length)
-{
- struct fsl_dma_hw_addr *addr;
-
- addr = kzalloc(sizeof(*addr), GFP_ATOMIC);
- if (!addr)
- return -ENOMEM;
-
- INIT_LIST_HEAD(&addr->entry);
- addr->address = address;
- addr->length = length;
-
- list_add_tail(&addr->entry, &slave->addresses);
- return 0;
-}
-
-/**
- * fsl_dma_slave_free - free a struct fsl_dma_slave
- * @slave: the struct fsl_dma_slave to free
- *
- * Free a struct fsl_dma_slave and all associated address/length pairs
- */
-static inline void fsl_dma_slave_free(struct fsl_dma_slave *slave)
-{
- struct fsl_dma_hw_addr *addr, *tmp;
-
- if (slave) {
- list_for_each_entry_safe(addr, tmp, &slave->addresses, entry) {
- list_del(&addr->entry);
- kfree(addr);
- }
-
- kfree(slave);
- }
-}
-
-/**
- * fsl_dma_slave_alloc - allocate a struct fsl_dma_slave
- * @gfp: the flags to pass to kmalloc when allocating this structure
- *
- * Allocate a struct fsl_dma_slave for use by the DMA_SLAVE API. Returns a new
- * struct fsl_dma_slave on success, or NULL on failure.
- */
-static inline struct fsl_dma_slave *fsl_dma_slave_alloc(gfp_t gfp)
-{
- struct fsl_dma_slave *slave;
-
- slave = kzalloc(sizeof(*slave), gfp);
- if (!slave)
- return NULL;
-
- INIT_LIST_HEAD(&slave->addresses);
- return slave;
-}
-
-#endif /* __ARCH_POWERPC_ASM_FSLDMA_H__ */
diff --git a/arch/powerpc/include/asm/highmem.h b/arch/powerpc/include/asm/highmem.h
index d10d64a4be38..dbc264010d0b 100644
--- a/arch/powerpc/include/asm/highmem.h
+++ b/arch/powerpc/include/asm/highmem.h
@@ -60,9 +60,8 @@ extern pte_t *pkmap_page_table;
extern void *kmap_high(struct page *page);
extern void kunmap_high(struct page *page);
-extern void *kmap_atomic_prot(struct page *page, enum km_type type,
- pgprot_t prot);
-extern void kunmap_atomic_notypecheck(void *kvaddr, enum km_type type);
+extern void *kmap_atomic_prot(struct page *page, pgprot_t prot);
+extern void __kunmap_atomic(void *kvaddr);
static inline void *kmap(struct page *page)
{
@@ -80,9 +79,9 @@ static inline void kunmap(struct page *page)
kunmap_high(page);
}
-static inline void *kmap_atomic(struct page *page, enum km_type type)
+static inline void *__kmap_atomic(struct page *page)
{
- return kmap_atomic_prot(page, type, kmap_prot);
+ return kmap_atomic_prot(page, kmap_prot);
}
static inline struct page *kmap_atomic_to_page(void *ptr)
diff --git a/arch/powerpc/include/asm/kgdb.h b/arch/powerpc/include/asm/kgdb.h
index edd217006d27..9db24e77b9f4 100644
--- a/arch/powerpc/include/asm/kgdb.h
+++ b/arch/powerpc/include/asm/kgdb.h
@@ -31,6 +31,7 @@ static inline void arch_kgdb_breakpoint(void)
asm(".long 0x7d821008"); /* twge r2, r2 */
}
#define CACHE_FLUSH_IS_SAFE 1
+#define DBG_MAX_REG_NUM 70
/* The number bytes of registers we have to save depends on a few
* things. For 64bit we default to not including vector registers and
diff --git a/arch/powerpc/include/asm/pgtable-ppc32.h b/arch/powerpc/include/asm/pgtable-ppc32.h
index a7db96f2b5c3..47edde8c3556 100644
--- a/arch/powerpc/include/asm/pgtable-ppc32.h
+++ b/arch/powerpc/include/asm/pgtable-ppc32.h
@@ -308,12 +308,8 @@ static inline void __ptep_set_access_flags(pte_t *ptep, pte_t entry)
#define pte_offset_kernel(dir, addr) \
((pte_t *) pmd_page_vaddr(*(dir)) + pte_index(addr))
#define pte_offset_map(dir, addr) \
- ((pte_t *) kmap_atomic(pmd_page(*(dir)), KM_PTE0) + pte_index(addr))
-#define pte_offset_map_nested(dir, addr) \
- ((pte_t *) kmap_atomic(pmd_page(*(dir)), KM_PTE1) + pte_index(addr))
-
-#define pte_unmap(pte) kunmap_atomic(pte, KM_PTE0)
-#define pte_unmap_nested(pte) kunmap_atomic(pte, KM_PTE1)
+ ((pte_t *) kmap_atomic(pmd_page(*(dir))) + pte_index(addr))
+#define pte_unmap(pte) kunmap_atomic(pte)
/*
* Encode and decode a swap entry.
diff --git a/arch/powerpc/include/asm/pgtable-ppc64.h b/arch/powerpc/include/asm/pgtable-ppc64.h
index 49865045d56f..2b09cd522d33 100644
--- a/arch/powerpc/include/asm/pgtable-ppc64.h
+++ b/arch/powerpc/include/asm/pgtable-ppc64.h
@@ -193,9 +193,7 @@
(((pte_t *) pmd_page_vaddr(*(dir))) + (((addr) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)))
#define pte_offset_map(dir,addr) pte_offset_kernel((dir), (addr))
-#define pte_offset_map_nested(dir,addr) pte_offset_kernel((dir), (addr))
#define pte_unmap(pte) do { } while(0)
-#define pte_unmap_nested(pte) do { } while(0)
/* to find an entry in a kernel page-table-directory */
/* This now only contains the vmalloc pages */
diff --git a/arch/powerpc/kernel/ibmebus.c b/arch/powerpc/kernel/ibmebus.c
index 9b626cfffce1..f62efdfd1769 100644
--- a/arch/powerpc/kernel/ibmebus.c
+++ b/arch/powerpc/kernel/ibmebus.c
@@ -162,13 +162,10 @@ static int ibmebus_create_device(struct device_node *dn)
dev->dev.bus = &ibmebus_bus_type;
dev->dev.archdata.dma_ops = &ibmebus_dma_ops;
- ret = of_device_register(dev);
- if (ret) {
- of_device_free(dev);
- return ret;
- }
-
- return 0;
+ ret = of_device_add(dev);
+ if (ret)
+ platform_device_put(dev);
+ return ret;
}
static int ibmebus_create_devices(const struct of_device_id *matches)
diff --git a/arch/powerpc/kernel/kgdb.c b/arch/powerpc/kernel/kgdb.c
index 7f61a3ac787c..42850ee00ada 100644
--- a/arch/powerpc/kernel/kgdb.c
+++ b/arch/powerpc/kernel/kgdb.c
@@ -194,40 +194,6 @@ static int kgdb_dabr_match(struct pt_regs *regs)
ptr = (unsigned long *)ptr32; \
} while (0)
-
-void pt_regs_to_gdb_regs(unsigned long *gdb_regs, struct pt_regs *regs)
-{
- unsigned long *ptr = gdb_regs;
- int reg;
-
- memset(gdb_regs, 0, NUMREGBYTES);
-
- for (reg = 0; reg < 32; reg++)
- PACK64(ptr, regs->gpr[reg]);
-
-#ifdef CONFIG_FSL_BOOKE
-#ifdef CONFIG_SPE
- for (reg = 0; reg < 32; reg++)
- PACK64(ptr, current->thread.evr[reg]);
-#else
- ptr += 32;
-#endif
-#else
- /* fp registers not used by kernel, leave zero */
- ptr += 32 * 8 / sizeof(long);
-#endif
-
- PACK64(ptr, regs->nip);
- PACK64(ptr, regs->msr);
- PACK32(ptr, regs->ccr);
- PACK64(ptr, regs->link);
- PACK64(ptr, regs->ctr);
- PACK32(ptr, regs->xer);
-
- BUG_ON((unsigned long)ptr >
- (unsigned long)(((void *)gdb_regs) + NUMREGBYTES));
-}
-
void sleeping_thread_to_gdb_regs(unsigned long *gdb_regs, struct task_struct *p)
{
struct pt_regs *regs = (struct pt_regs *)(p->thread.ksp +
@@ -271,44 +237,140 @@ void sleeping_thread_to_gdb_regs(unsigned long *gdb_regs, struct task_struct *p)
(unsigned long)(((void *)gdb_regs) + NUMREGBYTES));
}
-#define UNPACK64(dest, ptr) do { dest = *(ptr++); } while (0)
+#define GDB_SIZEOF_REG sizeof(unsigned long)
+#define GDB_SIZEOF_REG_U32 sizeof(u32)
-#define UNPACK32(dest, ptr) do { \
- u32 *ptr32; \
- ptr32 = (u32 *)ptr; \
- dest = *(ptr32++); \
- ptr = (unsigned long *)ptr32; \
- } while (0)
+#ifdef CONFIG_FSL_BOOKE
+#define GDB_SIZEOF_FLOAT_REG sizeof(unsigned long)
+#else
+#define GDB_SIZEOF_FLOAT_REG sizeof(u64)
+#endif
-void gdb_regs_to_pt_regs(unsigned long *gdb_regs, struct pt_regs *regs)
+struct dbg_reg_def_t dbg_reg_def[DBG_MAX_REG_NUM] =
{
- unsigned long *ptr = gdb_regs;
- int reg;
-
- for (reg = 0; reg < 32; reg++)
- UNPACK64(regs->gpr[reg], ptr);
+ { "r0", GDB_SIZEOF_REG, offsetof(struct pt_regs, gpr[0]) },
+ { "r1", GDB_SIZEOF_REG, offsetof(struct pt_regs, gpr[1]) },
+ { "r2", GDB_SIZEOF_REG, offsetof(struct pt_regs, gpr[2]) },
+ { "r3", GDB_SIZEOF_REG, offsetof(struct pt_regs, gpr[3]) },
+ { "r4", GDB_SIZEOF_REG, offsetof(struct pt_regs, gpr[4]) },
+ { "r5", GDB_SIZEOF_REG, offsetof(struct pt_regs, gpr[5]) },
+ { "r6", GDB_SIZEOF_REG, offsetof(struct pt_regs, gpr[6]) },
+ { "r7", GDB_SIZEOF_REG, offsetof(struct pt_regs, gpr[7]) },
+ { "r8", GDB_SIZEOF_REG, offsetof(struct pt_regs, gpr[8]) },
+ { "r9", GDB_SIZEOF_REG, offsetof(struct pt_regs, gpr[9]) },
+ { "r10", GDB_SIZEOF_REG, offsetof(struct pt_regs, gpr[10]) },
+ { "r11", GDB_SIZEOF_REG, offsetof(struct pt_regs, gpr[11]) },
+ { "r12", GDB_SIZEOF_REG, offsetof(struct pt_regs, gpr[12]) },
+ { "r13", GDB_SIZEOF_REG, offsetof(struct pt_regs, gpr[13]) },
+ { "r14", GDB_SIZEOF_REG, offsetof(struct pt_regs, gpr[14]) },
+ { "r15", GDB_SIZEOF_REG, offsetof(struct pt_regs, gpr[15]) },
+ { "r16", GDB_SIZEOF_REG, offsetof(struct pt_regs, gpr[16]) },
+ { "r17", GDB_SIZEOF_REG, offsetof(struct pt_regs, gpr[17]) },
+ { "r18", GDB_SIZEOF_REG, offsetof(struct pt_regs, gpr[18]) },
+ { "r19", GDB_SIZEOF_REG, offsetof(struct pt_regs, gpr[19]) },
+ { "r20", GDB_SIZEOF_REG, offsetof(struct pt_regs, gpr[20]) },
+ { "r21", GDB_SIZEOF_REG, offsetof(struct pt_regs, gpr[21]) },
+ { "r22", GDB_SIZEOF_REG, offsetof(struct pt_regs, gpr[22]) },
+ { "r23", GDB_SIZEOF_REG, offsetof(struct pt_regs, gpr[23]) },
+ { "r24", GDB_SIZEOF_REG, offsetof(struct pt_regs, gpr[24]) },
+ { "r25", GDB_SIZEOF_REG, offsetof(struct pt_regs, gpr[25]) },
+ { "r26", GDB_SIZEOF_REG, offsetof(struct pt_regs, gpr[26]) },
+ { "r27", GDB_SIZEOF_REG, offsetof(struct pt_regs, gpr[27]) },
+ { "r28", GDB_SIZEOF_REG, offsetof(struct pt_regs, gpr[28]) },
+ { "r29", GDB_SIZEOF_REG, offsetof(struct pt_regs, gpr[29]) },
+ { "r30", GDB_SIZEOF_REG, offsetof(struct pt_regs, gpr[30]) },
+ { "r31", GDB_SIZEOF_REG, offsetof(struct pt_regs, gpr[31]) },
+
+ { "f0", GDB_SIZEOF_FLOAT_REG, 0 },
+ { "f1", GDB_SIZEOF_FLOAT_REG, 1 },
+ { "f2", GDB_SIZEOF_FLOAT_REG, 2 },
+ { "f3", GDB_SIZEOF_FLOAT_REG, 3 },
+ { "f4", GDB_SIZEOF_FLOAT_REG, 4 },
+ { "f5", GDB_SIZEOF_FLOAT_REG, 5 },
+ { "f6", GDB_SIZEOF_FLOAT_REG, 6 },
+ { "f7", GDB_SIZEOF_FLOAT_REG, 7 },
+ { "f8", GDB_SIZEOF_FLOAT_REG, 8 },
+ { "f9", GDB_SIZEOF_FLOAT_REG, 9 },
+ { "f10", GDB_SIZEOF_FLOAT_REG, 10 },
+ { "f11", GDB_SIZEOF_FLOAT_REG, 11 },
+ { "f12", GDB_SIZEOF_FLOAT_REG, 12 },
+ { "f13", GDB_SIZEOF_FLOAT_REG, 13 },
+ { "f14", GDB_SIZEOF_FLOAT_REG, 14 },
+ { "f15", GDB_SIZEOF_FLOAT_REG, 15 },
+ { "f16", GDB_SIZEOF_FLOAT_REG, 16 },
+ { "f17", GDB_SIZEOF_FLOAT_REG, 17 },
+ { "f18", GDB_SIZEOF_FLOAT_REG, 18 },
+ { "f19", GDB_SIZEOF_FLOAT_REG, 19 },
+ { "f20", GDB_SIZEOF_FLOAT_REG, 20 },
+ { "f21", GDB_SIZEOF_FLOAT_REG, 21 },
+ { "f22", GDB_SIZEOF_FLOAT_REG, 22 },
+ { "f23", GDB_SIZEOF_FLOAT_REG, 23 },
+ { "f24", GDB_SIZEOF_FLOAT_REG, 24 },
+ { "f25", GDB_SIZEOF_FLOAT_REG, 25 },
+ { "f26", GDB_SIZEOF_FLOAT_REG, 26 },
+ { "f27", GDB_SIZEOF_FLOAT_REG, 27 },
+ { "f28", GDB_SIZEOF_FLOAT_REG, 28 },
+ { "f29", GDB_SIZEOF_FLOAT_REG, 29 },
+ { "f30", GDB_SIZEOF_FLOAT_REG, 30 },
+ { "f31", GDB_SIZEOF_FLOAT_REG, 31 },
+
+ { "pc", GDB_SIZEOF_REG, offsetof(struct pt_regs, nip) },
+ { "msr", GDB_SIZEOF_REG, offsetof(struct pt_regs, msr) },
+ { "cr", GDB_SIZEOF_REG_U32, offsetof(struct pt_regs, ccr) },
+ { "lr", GDB_SIZEOF_REG, offsetof(struct pt_regs, link) },
+ { "ctr", GDB_SIZEOF_REG_U32, offsetof(struct pt_regs, ctr) },
+ { "xer", GDB_SIZEOF_REG, offsetof(struct pt_regs, xer) },
+};
-#ifdef CONFIG_FSL_BOOKE
-#ifdef CONFIG_SPE
- for (reg = 0; reg < 32; reg++)
- UNPACK64(current->thread.evr[reg], ptr);
+char *dbg_get_reg(int regno, void *mem, struct pt_regs *regs)
+{
+ if (regno >= DBG_MAX_REG_NUM || regno < 0)
+ return NULL;
+
+ if (regno < 32 || regno >= 64)
+ /* First 0 -> 31 gpr registers*/
+ /* pc, msr, ls... registers 64 -> 69 */
+ memcpy(mem, (void *)regs + dbg_reg_def[regno].offset,
+ dbg_reg_def[regno].size);
+
+ if (regno >= 32 && regno < 64) {
+ /* FP registers 32 -> 63 */
+#if defined(CONFIG_FSL_BOOKE) && defined(CONFIG_SPE)
+ if (current)
+ memcpy(mem, &current->thread.evr[regno-32],
+ dbg_reg_def[regno].size);
#else
- ptr += 32;
+ /* fp registers not used by kernel, leave zero */
+ memset(mem, 0, dbg_reg_def[regno].size);
#endif
+ }
+
+ return dbg_reg_def[regno].name;
+}
+
+int dbg_set_reg(int regno, void *mem, struct pt_regs *regs)
+{
+ if (regno >= DBG_MAX_REG_NUM || regno < 0)
+ return -EINVAL;
+
+ if (regno < 32 || regno >= 64)
+ /* First 0 -> 31 gpr registers*/
+ /* pc, msr, ls... registers 64 -> 69 */
+ memcpy((void *)regs + dbg_reg_def[regno].offset, mem,
+ dbg_reg_def[regno].size);
+
+ if (regno >= 32 && regno < 64) {
+ /* FP registers 32 -> 63 */
+#if defined(CONFIG_FSL_BOOKE) && defined(CONFIG_SPE)
+ memcpy(&current->thread.evr[regno-32], mem,
+ dbg_reg_def[regno].size);
#else
- /* fp registers not used by kernel, leave zero */
- ptr += 32 * 8 / sizeof(int);
+ /* fp registers not used by kernel, leave zero */
+ return 0;
#endif
+ }
- UNPACK64(regs->nip, ptr);
- UNPACK64(regs->msr, ptr);
- UNPACK32(regs->ccr, ptr);
- UNPACK64(regs->link, ptr);
- UNPACK64(regs->ctr, ptr);
- UNPACK32(regs->xer, ptr);
-
- BUG_ON((unsigned long)ptr >
- (unsigned long)(((void *)gdb_regs) + NUMREGBYTES));
+ return 0;
}
void kgdb_arch_set_pc(struct pt_regs *regs, unsigned long pc)
diff --git a/arch/powerpc/kernel/kvm.c b/arch/powerpc/kernel/kvm.c
index 428d0e538aec..b06bdae04064 100644
--- a/arch/powerpc/kernel/kvm.c
+++ b/arch/powerpc/kernel/kvm.c
@@ -127,7 +127,7 @@ static void kvm_patch_ins_nop(u32 *inst)
static void kvm_patch_ins_b(u32 *inst, int addr)
{
-#ifdef CONFIG_RELOCATABLE
+#if defined(CONFIG_RELOCATABLE) && defined(CONFIG_PPC_BOOK3S)
/* On relocatable kernels interrupts handlers and our code
can be in different regions, so we don't patch them */
diff --git a/arch/powerpc/kernel/legacy_serial.c b/arch/powerpc/kernel/legacy_serial.c
index c1fd0f9658fd..c834757bebc0 100644
--- a/arch/powerpc/kernel/legacy_serial.c
+++ b/arch/powerpc/kernel/legacy_serial.c
@@ -52,14 +52,14 @@ static int __init add_legacy_port(struct device_node *np, int want_index,
phys_addr_t taddr, unsigned long irq,
upf_t flags, int irq_check_parent)
{
- const u32 *clk, *spd;
+ const __be32 *clk, *spd;
u32 clock = BASE_BAUD * 16;
int index;
/* get clock freq. if present */
clk = of_get_property(np, "clock-frequency", NULL);
if (clk && *clk)
- clock = *clk;
+ clock = be32_to_cpup(clk);
/* get default speed if present */
spd = of_get_property(np, "current-speed", NULL);
@@ -109,7 +109,7 @@ static int __init add_legacy_port(struct device_node *np, int want_index,
legacy_serial_infos[index].taddr = taddr;
legacy_serial_infos[index].np = of_node_get(np);
legacy_serial_infos[index].clock = clock;
- legacy_serial_infos[index].speed = spd ? *spd : 0;
+ legacy_serial_infos[index].speed = spd ? be32_to_cpup(spd) : 0;
legacy_serial_infos[index].irq_check_parent = irq_check_parent;
printk(KERN_DEBUG "Found legacy serial port %d for %s\n",
@@ -168,7 +168,7 @@ static int __init add_legacy_soc_port(struct device_node *np,
static int __init add_legacy_isa_port(struct device_node *np,
struct device_node *isa_brg)
{
- const u32 *reg;
+ const __be32 *reg;
const char *typep;
int index = -1;
u64 taddr;
@@ -181,7 +181,7 @@ static int __init add_legacy_isa_port(struct device_node *np,
return -1;
/* Verify it's an IO port, we don't support anything else */
- if (!(reg[0] & 0x00000001))
+ if (!(be32_to_cpu(reg[0]) & 0x00000001))
return -1;
/* Now look for an "ibm,aix-loc" property that gives us ordering
@@ -202,7 +202,7 @@ static int __init add_legacy_isa_port(struct device_node *np,
taddr = 0;
/* Add port, irq will be dealt with later */
- return add_legacy_port(np, index, UPIO_PORT, reg[1], taddr,
+ return add_legacy_port(np, index, UPIO_PORT, be32_to_cpu(reg[1]), taddr,
NO_IRQ, UPF_BOOT_AUTOCONF, 0);
}
@@ -251,9 +251,9 @@ static int __init add_legacy_pci_port(struct device_node *np,
* we get to their "reg" property
*/
if (np != pci_dev) {
- const u32 *reg = of_get_property(np, "reg", NULL);
- if (reg && (*reg < 4))
- index = lindex = *reg;
+ const __be32 *reg = of_get_property(np, "reg", NULL);
+ if (reg && (be32_to_cpup(reg) < 4))
+ index = lindex = be32_to_cpup(reg);
}
/* Local index means it's the Nth port in the PCI chip. Unfortunately
@@ -507,7 +507,7 @@ static int __init check_legacy_serial_console(void)
struct device_node *prom_stdout = NULL;
int i, speed = 0, offset = 0;
const char *name;
- const u32 *spd;
+ const __be32 *spd;
DBG(" -> check_legacy_serial_console()\n");
@@ -547,7 +547,7 @@ static int __init check_legacy_serial_console(void)
}
spd = of_get_property(prom_stdout, "current-speed", NULL);
if (spd)
- speed = *spd;
+ speed = be32_to_cpup(spd);
if (strcmp(name, "serial") != 0)
goto not_found;
diff --git a/arch/powerpc/kernel/prom.c b/arch/powerpc/kernel/prom.c
index c3c6a8857544..9e3132db718b 100644
--- a/arch/powerpc/kernel/prom.c
+++ b/arch/powerpc/kernel/prom.c
@@ -364,10 +364,15 @@ static int __init early_init_dt_scan_cpus(unsigned long node,
return 0;
}
-void __init early_init_dt_scan_chosen_arch(unsigned long node)
+int __init early_init_dt_scan_chosen_ppc(unsigned long node, const char *uname,
+ int depth, void *data)
{
unsigned long *lprop;
+ /* Use common scan routine to determine if this is the chosen node */
+ if (early_init_dt_scan_chosen(node, uname, depth, data) == 0)
+ return 0;
+
#ifdef CONFIG_PPC64
/* check if iommu is forced on or off */
if (of_get_flat_dt_prop(node, "linux,iommu-off", NULL) != NULL)
@@ -399,6 +404,9 @@ void __init early_init_dt_scan_chosen_arch(unsigned long node)
if (lprop)
crashk_res.end = crashk_res.start + *lprop - 1;
#endif
+
+ /* break now */
+ return 1;
}
#ifdef CONFIG_PPC_PSERIES
@@ -683,7 +691,7 @@ void __init early_init_devtree(void *params)
* device-tree, including the platform type, initrd location and
* size, TCE reserve, and more ...
*/
- of_scan_flat_dt(early_init_dt_scan_chosen, NULL);
+ of_scan_flat_dt(early_init_dt_scan_chosen_ppc, NULL);
/* Scan memory nodes and rebuild MEMBLOCKs */
memblock_init();
diff --git a/arch/powerpc/kernel/ptrace.c b/arch/powerpc/kernel/ptrace.c
index 286d9783d93f..a9b32967cff6 100644
--- a/arch/powerpc/kernel/ptrace.c
+++ b/arch/powerpc/kernel/ptrace.c
@@ -1406,37 +1406,42 @@ static long ppc_del_hwdebug(struct task_struct *child, long addr, long data)
* Here are the old "legacy" powerpc specific getregs/setregs ptrace calls,
* we mark them as obsolete now, they will be removed in a future version
*/
-static long arch_ptrace_old(struct task_struct *child, long request, long addr,
- long data)
+static long arch_ptrace_old(struct task_struct *child, long request,
+ unsigned long addr, unsigned long data)
{
+ void __user *datavp = (void __user *) data;
+
switch (request) {
case PPC_PTRACE_GETREGS: /* Get GPRs 0 - 31. */
return copy_regset_to_user(child, &user_ppc_native_view,
REGSET_GPR, 0, 32 * sizeof(long),
- (void __user *) data);
+ datavp);
case PPC_PTRACE_SETREGS: /* Set GPRs 0 - 31. */
return copy_regset_from_user(child, &user_ppc_native_view,
REGSET_GPR, 0, 32 * sizeof(long),
- (const void __user *) data);
+ datavp);
case PPC_PTRACE_GETFPREGS: /* Get FPRs 0 - 31. */
return copy_regset_to_user(child, &user_ppc_native_view,
REGSET_FPR, 0, 32 * sizeof(double),
- (void __user *) data);
+ datavp);
case PPC_PTRACE_SETFPREGS: /* Set FPRs 0 - 31. */
return copy_regset_from_user(child, &user_ppc_native_view,
REGSET_FPR, 0, 32 * sizeof(double),
- (const void __user *) data);
+ datavp);
}
return -EPERM;
}
-long arch_ptrace(struct task_struct *child, long request, long addr, long data)
+long arch_ptrace(struct task_struct *child, long request,
+ unsigned long addr, unsigned long data)
{
int ret = -EPERM;
+ void __user *datavp = (void __user *) data;
+ unsigned long __user *datalp = datavp;
switch (request) {
/* read the word at location addr in the USER area. */
@@ -1446,11 +1451,11 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
ret = -EIO;
/* convert to index and check */
#ifdef CONFIG_PPC32
- index = (unsigned long) addr >> 2;
+ index = addr >> 2;
if ((addr & 3) || (index > PT_FPSCR)
|| (child->thread.regs == NULL))
#else
- index = (unsigned long) addr >> 3;
+ index = addr >> 3;
if ((addr & 7) || (index > PT_FPSCR))
#endif
break;
@@ -1463,7 +1468,7 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
tmp = ((unsigned long *)child->thread.fpr)
[TS_FPRWIDTH * (index - PT_FPR0)];
}
- ret = put_user(tmp,(unsigned long __user *) data);
+ ret = put_user(tmp, datalp);
break;
}
@@ -1474,11 +1479,11 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
ret = -EIO;
/* convert to index and check */
#ifdef CONFIG_PPC32
- index = (unsigned long) addr >> 2;
+ index = addr >> 2;
if ((addr & 3) || (index > PT_FPSCR)
|| (child->thread.regs == NULL))
#else
- index = (unsigned long) addr >> 3;
+ index = addr >> 3;
if ((addr & 7) || (index > PT_FPSCR))
#endif
break;
@@ -1525,11 +1530,11 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
dbginfo.features = 0;
#endif /* CONFIG_PPC_ADV_DEBUG_REGS */
- if (!access_ok(VERIFY_WRITE, data,
+ if (!access_ok(VERIFY_WRITE, datavp,
sizeof(struct ppc_debug_info)))
return -EFAULT;
- ret = __copy_to_user((struct ppc_debug_info __user *)data,
- &dbginfo, sizeof(struct ppc_debug_info)) ?
+ ret = __copy_to_user(datavp, &dbginfo,
+ sizeof(struct ppc_debug_info)) ?
-EFAULT : 0;
break;
}
@@ -1537,11 +1542,10 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
case PPC_PTRACE_SETHWDEBUG: {
struct ppc_hw_breakpoint bp_info;
- if (!access_ok(VERIFY_READ, data,
+ if (!access_ok(VERIFY_READ, datavp,
sizeof(struct ppc_hw_breakpoint)))
return -EFAULT;
- ret = __copy_from_user(&bp_info,
- (struct ppc_hw_breakpoint __user *)data,
+ ret = __copy_from_user(&bp_info, datavp,
sizeof(struct ppc_hw_breakpoint)) ?
-EFAULT : 0;
if (!ret)
@@ -1560,11 +1564,9 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
if (addr > 0)
break;
#ifdef CONFIG_PPC_ADV_DEBUG_REGS
- ret = put_user(child->thread.dac1,
- (unsigned long __user *)data);
+ ret = put_user(child->thread.dac1, datalp);
#else
- ret = put_user(child->thread.dabr,
- (unsigned long __user *)data);
+ ret = put_user(child->thread.dabr, datalp);
#endif
break;
}
@@ -1580,7 +1582,7 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
return copy_regset_to_user(child, &user_ppc_native_view,
REGSET_GPR,
0, sizeof(struct pt_regs),
- (void __user *) data);
+ datavp);
#ifdef CONFIG_PPC64
case PTRACE_SETREGS64:
@@ -1589,19 +1591,19 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
return copy_regset_from_user(child, &user_ppc_native_view,
REGSET_GPR,
0, sizeof(struct pt_regs),
- (const void __user *) data);
+ datavp);
case PTRACE_GETFPREGS: /* Get the child FPU state (FPR0...31 + FPSCR) */
return copy_regset_to_user(child, &user_ppc_native_view,
REGSET_FPR,
0, sizeof(elf_fpregset_t),
- (void __user *) data);
+ datavp);
case PTRACE_SETFPREGS: /* Set the child FPU state (FPR0...31 + FPSCR) */
return copy_regset_from_user(child, &user_ppc_native_view,
REGSET_FPR,
0, sizeof(elf_fpregset_t),
- (const void __user *) data);
+ datavp);
#ifdef CONFIG_ALTIVEC
case PTRACE_GETVRREGS:
@@ -1609,40 +1611,40 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
REGSET_VMX,
0, (33 * sizeof(vector128) +
sizeof(u32)),
- (void __user *) data);
+ datavp);
case PTRACE_SETVRREGS:
return copy_regset_from_user(child, &user_ppc_native_view,
REGSET_VMX,
0, (33 * sizeof(vector128) +
sizeof(u32)),
- (const void __user *) data);
+ datavp);
#endif
#ifdef CONFIG_VSX
case PTRACE_GETVSRREGS:
return copy_regset_to_user(child, &user_ppc_native_view,
REGSET_VSX,
0, 32 * sizeof(double),
- (void __user *) data);
+ datavp);
case PTRACE_SETVSRREGS:
return copy_regset_from_user(child, &user_ppc_native_view,
REGSET_VSX,
0, 32 * sizeof(double),
- (const void __user *) data);
+ datavp);
#endif
#ifdef CONFIG_SPE
case PTRACE_GETEVRREGS:
/* Get the child spe register state. */
return copy_regset_to_user(child, &user_ppc_native_view,
REGSET_SPE, 0, 35 * sizeof(u32),
- (void __user *) data);
+ datavp);
case PTRACE_SETEVRREGS:
/* Set the child spe register state. */
return copy_regset_from_user(child, &user_ppc_native_view,
REGSET_SPE, 0, 35 * sizeof(u32),
- (const void __user *) data);
+ datavp);
#endif
/* Old reverse args ptrace callss */
diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c
index 2a178b0ebcdf..ce6f61c6f871 100644
--- a/arch/powerpc/kernel/setup_64.c
+++ b/arch/powerpc/kernel/setup_64.c
@@ -497,9 +497,8 @@ static void __init emergency_stack_init(void)
}
/*
- * Called into from start_kernel, after lock_kernel has been called.
- * Initializes bootmem, which is unsed to manage page allocation until
- * mem_init is called.
+ * Called into from start_kernel this initializes bootmem, which is used
+ * to manage page allocation until mem_init is called.
*/
void __init setup_arch(char **cmdline_p)
{
diff --git a/arch/powerpc/kernel/sys_ppc32.c b/arch/powerpc/kernel/sys_ppc32.c
index b1b6043a56c4..4e5bf1edc0f2 100644
--- a/arch/powerpc/kernel/sys_ppc32.c
+++ b/arch/powerpc/kernel/sys_ppc32.c
@@ -23,7 +23,6 @@
#include <linux/resource.h>
#include <linux/times.h>
#include <linux/smp.h>
-#include <linux/smp_lock.h>
#include <linux/sem.h>
#include <linux/msg.h>
#include <linux/shm.h>
diff --git a/arch/powerpc/kernel/vio.c b/arch/powerpc/kernel/vio.c
index d692989a4318..441d2a722f06 100644
--- a/arch/powerpc/kernel/vio.c
+++ b/arch/powerpc/kernel/vio.c
@@ -238,9 +238,7 @@ static inline void vio_cmo_dealloc(struct vio_dev *viodev, size_t size)
* memory in this pool does not change.
*/
if (spare_needed && reserve_freed) {
- tmp = min(spare_needed, min(reserve_freed,
- (viodev->cmo.entitled -
- VIO_CMO_MIN_ENT)));
+ tmp = min3(spare_needed, reserve_freed, (viodev->cmo.entitled - VIO_CMO_MIN_ENT));
vio_cmo.spare += tmp;
viodev->cmo.entitled -= tmp;
diff --git a/arch/powerpc/kvm/booke_interrupts.S b/arch/powerpc/kvm/booke_interrupts.S
index 049846911ce4..1cc471faac2d 100644
--- a/arch/powerpc/kvm/booke_interrupts.S
+++ b/arch/powerpc/kvm/booke_interrupts.S
@@ -416,7 +416,7 @@ lightweight_exit:
lwz r3, VCPU_PC(r4)
mtsrr0 r3
lwz r3, VCPU_SHARED(r4)
- lwz r3, VCPU_SHARED_MSR(r3)
+ lwz r3, (VCPU_SHARED_MSR + 4)(r3)
oris r3, r3, KVMPPC_MSR_MASK@h
ori r3, r3, KVMPPC_MSR_MASK@l
mtsrr1 r3
diff --git a/arch/powerpc/kvm/e500.c b/arch/powerpc/kvm/e500.c
index 71750f2dd5d3..e3768ee9b595 100644
--- a/arch/powerpc/kvm/e500.c
+++ b/arch/powerpc/kvm/e500.c
@@ -138,8 +138,8 @@ void kvmppc_core_vcpu_free(struct kvm_vcpu *vcpu)
struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
free_page((unsigned long)vcpu->arch.shared);
- kvmppc_e500_tlb_uninit(vcpu_e500);
kvm_vcpu_uninit(vcpu);
+ kvmppc_e500_tlb_uninit(vcpu_e500);
kmem_cache_free(kvm_vcpu_cache, vcpu_e500);
}
diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c
index 2f87a1627f6c..38f756f25053 100644
--- a/arch/powerpc/kvm/powerpc.c
+++ b/arch/powerpc/kvm/powerpc.c
@@ -617,6 +617,7 @@ long kvm_arch_vm_ioctl(struct file *filp,
switch (ioctl) {
case KVM_PPC_GET_PVINFO: {
struct kvm_ppc_pvinfo pvinfo;
+ memset(&pvinfo, 0, sizeof(pvinfo));
r = kvm_vm_ioctl_get_pvinfo(&pvinfo);
if (copy_to_user(argp, &pvinfo, sizeof(pvinfo))) {
r = -EFAULT;
diff --git a/arch/powerpc/kvm/timing.c b/arch/powerpc/kvm/timing.c
index 46fa04f12a9b..a021f5827a33 100644
--- a/arch/powerpc/kvm/timing.c
+++ b/arch/powerpc/kvm/timing.c
@@ -35,7 +35,6 @@ void kvmppc_init_timing_stats(struct kvm_vcpu *vcpu)
int i;
/* pause guest execution to avoid concurrent updates */
- local_irq_disable();
mutex_lock(&vcpu->mutex);
vcpu->arch.last_exit_type = 0xDEAD;
@@ -51,7 +50,6 @@ void kvmppc_init_timing_stats(struct kvm_vcpu *vcpu)
vcpu->arch.timing_last_enter.tv64 = 0;
mutex_unlock(&vcpu->mutex);
- local_irq_enable();
}
static void add_exit_timing(struct kvm_vcpu *vcpu, u64 duration, int type)
diff --git a/arch/powerpc/mm/hash_utils_64.c b/arch/powerpc/mm/hash_utils_64.c
index 83f534d862db..5e9584405c45 100644
--- a/arch/powerpc/mm/hash_utils_64.c
+++ b/arch/powerpc/mm/hash_utils_64.c
@@ -1123,7 +1123,7 @@ void hash_preload(struct mm_struct *mm, unsigned long ea,
else
#endif /* CONFIG_PPC_HAS_HASH_64K */
rc = __hash_page_4K(ea, access, vsid, ptep, trap, local, ssize,
- subpage_protection(pgdir, ea));
+ subpage_protection(mm, ea));
/* Dump some info in case of hash insertion failure, they should
* never happen so it is really useful to know if/when they do
diff --git a/arch/powerpc/mm/highmem.c b/arch/powerpc/mm/highmem.c
index 857d4173f9c6..e7450bdbe83a 100644
--- a/arch/powerpc/mm/highmem.c
+++ b/arch/powerpc/mm/highmem.c
@@ -29,17 +29,17 @@
* be used in IRQ contexts, so in some (very limited) cases we need
* it.
*/
-void *kmap_atomic_prot(struct page *page, enum km_type type, pgprot_t prot)
+void *kmap_atomic_prot(struct page *page, pgprot_t prot)
{
- unsigned int idx;
unsigned long vaddr;
+ int idx, type;
/* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */
pagefault_disable();
if (!PageHighMem(page))
return page_address(page);
- debug_kmap_atomic(type);
+ type = kmap_atomic_idx_push();
idx = type + KM_TYPE_NR*smp_processor_id();
vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
#ifdef CONFIG_DEBUG_HIGHMEM
@@ -52,26 +52,35 @@ void *kmap_atomic_prot(struct page *page, enum km_type type, pgprot_t prot)
}
EXPORT_SYMBOL(kmap_atomic_prot);
-void kunmap_atomic_notypecheck(void *kvaddr, enum km_type type)
+void __kunmap_atomic(void *kvaddr)
{
-#ifdef CONFIG_DEBUG_HIGHMEM
unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK;
- enum fixed_addresses idx = type + KM_TYPE_NR*smp_processor_id();
+ int type;
if (vaddr < __fix_to_virt(FIX_KMAP_END)) {
pagefault_enable();
return;
}
- BUG_ON(vaddr != __fix_to_virt(FIX_KMAP_BEGIN + idx));
+ type = kmap_atomic_idx();
- /*
- * force other mappings to Oops if they'll try to access
- * this pte without first remap it
- */
- pte_clear(&init_mm, vaddr, kmap_pte-idx);
- local_flush_tlb_page(NULL, vaddr);
+#ifdef CONFIG_DEBUG_HIGHMEM
+ {
+ unsigned int idx;
+
+ idx = type + KM_TYPE_NR * smp_processor_id();
+ BUG_ON(vaddr != __fix_to_virt(FIX_KMAP_BEGIN + idx));
+
+ /*
+ * force other mappings to Oops if they'll try to access
+ * this pte without first remap it
+ */
+ pte_clear(&init_mm, vaddr, kmap_pte-idx);
+ local_flush_tlb_page(NULL, vaddr);
+ }
#endif
+
+ kmap_atomic_idx_pop();
pagefault_enable();
}
-EXPORT_SYMBOL(kunmap_atomic_notypecheck);
+EXPORT_SYMBOL(__kunmap_atomic);
diff --git a/arch/powerpc/mm/pgtable.c b/arch/powerpc/mm/pgtable.c
index 2c7e801ab20b..6a3997f98dfb 100644
--- a/arch/powerpc/mm/pgtable.c
+++ b/arch/powerpc/mm/pgtable.c
@@ -92,7 +92,7 @@ static void pte_free_rcu_callback(struct rcu_head *head)
static void pte_free_submit(struct pte_freelist_batch *batch)
{
- call_rcu(&batch->rcu, pte_free_rcu_callback);
+ call_rcu_sched(&batch->rcu, pte_free_rcu_callback);
}
void pgtable_free_tlb(struct mmu_gather *tlb, void *table, unsigned shift)
diff --git a/arch/powerpc/mm/tlb_low_64e.S b/arch/powerpc/mm/tlb_low_64e.S
index 8b04c54e596f..8526bd9d2aa3 100644
--- a/arch/powerpc/mm/tlb_low_64e.S
+++ b/arch/powerpc/mm/tlb_low_64e.S
@@ -138,8 +138,11 @@
cmpldi cr0,r15,0 /* Check for user region */
std r14,EX_TLB_ESR(r12) /* write crazy -1 to frame */
beq normal_tlb_miss
+
+ li r11,_PAGE_PRESENT|_PAGE_BAP_SX /* Base perm */
+ oris r11,r11,_PAGE_ACCESSED@h
/* XXX replace the RMW cycles with immediate loads + writes */
-1: mfspr r10,SPRN_MAS1
+ mfspr r10,SPRN_MAS1
cmpldi cr0,r15,8 /* Check for vmalloc region */
rlwinm r10,r10,0,16,1 /* Clear TID */
mtspr SPRN_MAS1,r10
diff --git a/arch/powerpc/mm/tlb_nohash.c b/arch/powerpc/mm/tlb_nohash.c
index 36c0c449a899..2a030d89bbc6 100644
--- a/arch/powerpc/mm/tlb_nohash.c
+++ b/arch/powerpc/mm/tlb_nohash.c
@@ -585,6 +585,6 @@ void setup_initial_memory_limit(phys_addr_t first_memblock_base,
ppc64_rma_size = min_t(u64, first_memblock_size, 0x40000000);
/* Finally limit subsequent allocations */
- memblock_set_current_limit(ppc64_memblock_base + ppc64_rma_size);
+ memblock_set_current_limit(first_memblock_base + ppc64_rma_size);
}
#endif /* CONFIG_PPC64 */
diff --git a/arch/powerpc/platforms/85xx/p1022_ds.c b/arch/powerpc/platforms/85xx/p1022_ds.c
index 2b390d19a1d1..7eb5c40c069f 100644
--- a/arch/powerpc/platforms/85xx/p1022_ds.c
+++ b/arch/powerpc/platforms/85xx/p1022_ds.c
@@ -8,7 +8,6 @@
* Copyright 2010 Freescale Semiconductor, Inc.
*
* This file is taken from the Freescale P1022DS BSP, with modifications:
- * 1) No DIU support (pending rewrite of DIU code)
* 2) No AMP support
* 3) No PCI endpoint support
*
@@ -20,12 +19,211 @@
#include <linux/pci.h>
#include <linux/of_platform.h>
#include <linux/memblock.h>
-
+#include <asm/div64.h>
#include <asm/mpic.h>
#include <asm/swiotlb.h>
#include <sysdev/fsl_soc.h>
#include <sysdev/fsl_pci.h>
+#include <asm/fsl_guts.h>
+
+#if defined(CONFIG_FB_FSL_DIU) || defined(CONFIG_FB_FSL_DIU_MODULE)
+
+/*
+ * Board-specific initialization of the DIU. This code should probably be
+ * executed when the DIU is opened, rather than in arch code, but the DIU
+ * driver does not have a mechanism for this (yet).
+ *
+ * This is especially problematic on the P1022DS because the local bus (eLBC)
+ * and the DIU video signals share the same pins, which means that enabling the
+ * DIU will disable access to NOR flash.
+ */
+
+/* DIU Pixel Clock bits of the CLKDVDR Global Utilities register */
+#define CLKDVDR_PXCKEN 0x80000000
+#define CLKDVDR_PXCKINV 0x10000000
+#define CLKDVDR_PXCKDLY 0x06000000
+#define CLKDVDR_PXCLK_MASK 0x00FF0000
+
+/* Some ngPIXIS register definitions */
+#define PX_BRDCFG1_DVIEN 0x80
+#define PX_BRDCFG1_DFPEN 0x40
+#define PX_BRDCFG1_BACKLIGHT 0x20
+#define PX_BRDCFG1_DDCEN 0x10
+
+/*
+ * DIU Area Descriptor
+ *
+ * Note that we need to byte-swap the value before it's written to the AD
+ * register. So even though the registers don't look like they're in the same
+ * bit positions as they are on the MPC8610, the same value is written to the
+ * AD register on the MPC8610 and on the P1022.
+ */
+#define AD_BYTE_F 0x10000000
+#define AD_ALPHA_C_MASK 0x0E000000
+#define AD_ALPHA_C_SHIFT 25
+#define AD_BLUE_C_MASK 0x01800000
+#define AD_BLUE_C_SHIFT 23
+#define AD_GREEN_C_MASK 0x00600000
+#define AD_GREEN_C_SHIFT 21
+#define AD_RED_C_MASK 0x00180000
+#define AD_RED_C_SHIFT 19
+#define AD_PALETTE 0x00040000
+#define AD_PIXEL_S_MASK 0x00030000
+#define AD_PIXEL_S_SHIFT 16
+#define AD_COMP_3_MASK 0x0000F000
+#define AD_COMP_3_SHIFT 12
+#define AD_COMP_2_MASK 0x00000F00
+#define AD_COMP_2_SHIFT 8
+#define AD_COMP_1_MASK 0x000000F0
+#define AD_COMP_1_SHIFT 4
+#define AD_COMP_0_MASK 0x0000000F
+#define AD_COMP_0_SHIFT 0
+
+#define MAKE_AD(alpha, red, blue, green, size, c0, c1, c2, c3) \
+ cpu_to_le32(AD_BYTE_F | (alpha << AD_ALPHA_C_SHIFT) | \
+ (blue << AD_BLUE_C_SHIFT) | (green << AD_GREEN_C_SHIFT) | \
+ (red << AD_RED_C_SHIFT) | (c3 << AD_COMP_3_SHIFT) | \
+ (c2 << AD_COMP_2_SHIFT) | (c1 << AD_COMP_1_SHIFT) | \
+ (c0 << AD_COMP_0_SHIFT) | (size << AD_PIXEL_S_SHIFT))
+
+/**
+ * p1022ds_get_pixel_format: return the Area Descriptor for a given pixel depth
+ *
+ * The Area Descriptor is a 32-bit value that determine which bits in each
+ * pixel are to be used for each color.
+ */
+static unsigned int p1022ds_get_pixel_format(unsigned int bits_per_pixel,
+ int monitor_port)
+{
+ switch (bits_per_pixel) {
+ case 32:
+ /* 0x88883316 */
+ return MAKE_AD(3, 2, 0, 1, 3, 8, 8, 8, 8);
+ case 24:
+ /* 0x88082219 */
+ return MAKE_AD(4, 0, 1, 2, 2, 0, 8, 8, 8);
+ case 16:
+ /* 0x65053118 */
+ return MAKE_AD(4, 2, 1, 0, 1, 5, 6, 5, 0);
+ default:
+ pr_err("fsl-diu: unsupported pixel depth %u\n", bits_per_pixel);
+ return 0;
+ }
+}
+
+/**
+ * p1022ds_set_gamma_table: update the gamma table, if necessary
+ *
+ * On some boards, the gamma table for some ports may need to be modified.
+ * This is not the case on the P1022DS, so we do nothing.
+*/
+static void p1022ds_set_gamma_table(int monitor_port, char *gamma_table_base)
+{
+}
+
+/**
+ * p1022ds_set_monitor_port: switch the output to a different monitor port
+ *
+ */
+static void p1022ds_set_monitor_port(int monitor_port)
+{
+ struct device_node *pixis_node;
+ u8 __iomem *brdcfg1;
+
+ pixis_node = of_find_compatible_node(NULL, NULL, "fsl,p1022ds-pixis");
+ if (!pixis_node) {
+ pr_err("p1022ds: missing ngPIXIS node\n");
+ return;
+ }
+
+ brdcfg1 = of_iomap(pixis_node, 0);
+ if (!brdcfg1) {
+ pr_err("p1022ds: could not map ngPIXIS registers\n");
+ return;
+ }
+ brdcfg1 += 9; /* BRDCFG1 is at offset 9 in the ngPIXIS */
+
+ switch (monitor_port) {
+ case 0: /* DVI */
+ /* Enable the DVI port, disable the DFP and the backlight */
+ clrsetbits_8(brdcfg1, PX_BRDCFG1_DFPEN | PX_BRDCFG1_BACKLIGHT,
+ PX_BRDCFG1_DVIEN);
+ break;
+ case 1: /* Single link LVDS */
+ /* Enable the DFP port, disable the DVI and the backlight */
+ clrsetbits_8(brdcfg1, PX_BRDCFG1_DVIEN | PX_BRDCFG1_BACKLIGHT,
+ PX_BRDCFG1_DFPEN);
+ break;
+ default:
+ pr_err("p1022ds: unsupported monitor port %i\n", monitor_port);
+ }
+}
+
+/**
+ * p1022ds_set_pixel_clock: program the DIU's clock
+ *
+ * @pixclock: the wavelength, in picoseconds, of the clock
+ */
+void p1022ds_set_pixel_clock(unsigned int pixclock)
+{
+ struct device_node *guts_np = NULL;
+ struct ccsr_guts_85xx __iomem *guts;
+ unsigned long freq;
+ u64 temp;
+ u32 pxclk;
+
+ /* Map the global utilities registers. */
+ guts_np = of_find_compatible_node(NULL, NULL, "fsl,p1022-guts");
+ if (!guts_np) {
+ pr_err("p1022ds: missing global utilties device node\n");
+ return;
+ }
+
+ guts = of_iomap(guts_np, 0);
+ of_node_put(guts_np);
+ if (!guts) {
+ pr_err("p1022ds: could not map global utilties device\n");
+ return;
+ }
+
+ /* Convert pixclock from a wavelength to a frequency */
+ temp = 1000000000000ULL;
+ do_div(temp, pixclock);
+ freq = temp;
+
+ /* pixclk is the ratio of the platform clock to the pixel clock */
+ pxclk = DIV_ROUND_CLOSEST(fsl_get_sys_freq(), freq);
+
+ /* Disable the pixel clock, and set it to non-inverted and no delay */
+ clrbits32(&guts->clkdvdr,
+ CLKDVDR_PXCKEN | CLKDVDR_PXCKDLY | CLKDVDR_PXCLK_MASK);
+
+ /* Enable the clock and set the pxclk */
+ setbits32(&guts->clkdvdr, CLKDVDR_PXCKEN | (pxclk << 16));
+}
+
+/**
+ * p1022ds_show_monitor_port: show the current monitor
+ *
+ * This function returns a string indicating whether the current monitor is
+ * set to DVI or LVDS.
+ */
+ssize_t p1022ds_show_monitor_port(int monitor_port, char *buf)
+{
+ return sprintf(buf, "%c0 - DVI\n%c1 - Single link LVDS\n",
+ monitor_port == 0 ? '*' : ' ', monitor_port == 1 ? '*' : ' ');
+}
+
+/**
+ * p1022ds_set_sysfs_monitor_port: set the monitor port for sysfs
+ */
+int p1022ds_set_sysfs_monitor_port(int val)
+{
+ return val < 2 ? val : 0;
+}
+
+#endif
void __init p1022_ds_pic_init(void)
{
@@ -92,6 +290,15 @@ static void __init p1022_ds_setup_arch(void)
}
#endif
+#if defined(CONFIG_FB_FSL_DIU) || defined(CONFIG_FB_FSL_DIU_MODULE)
+ diu_ops.get_pixel_format = p1022ds_get_pixel_format;
+ diu_ops.set_gamma_table = p1022ds_set_gamma_table;
+ diu_ops.set_monitor_port = p1022ds_set_monitor_port;
+ diu_ops.set_pixel_clock = p1022ds_set_pixel_clock;
+ diu_ops.show_monitor_port = p1022ds_show_monitor_port;
+ diu_ops.set_sysfs_monitor_port = p1022ds_set_sysfs_monitor_port;
+#endif
+
#ifdef CONFIG_SMP
mpc85xx_smp_init();
#endif
diff --git a/arch/powerpc/platforms/cell/spufs/inode.c b/arch/powerpc/platforms/cell/spufs/inode.c
index 5dec408d6703..3532b92de983 100644
--- a/arch/powerpc/platforms/cell/spufs/inode.c
+++ b/arch/powerpc/platforms/cell/spufs/inode.c
@@ -798,17 +798,17 @@ spufs_fill_super(struct super_block *sb, void *data, int silent)
return spufs_create_root(sb, data);
}
-static int
-spufs_get_sb(struct file_system_type *fstype, int flags,
- const char *name, void *data, struct vfsmount *mnt)
+static struct dentry *
+spufs_mount(struct file_system_type *fstype, int flags,
+ const char *name, void *data)
{
- return get_sb_single(fstype, flags, data, spufs_fill_super, mnt);
+ return mount_single(fstype, flags, data, spufs_fill_super);
}
static struct file_system_type spufs_type = {
.owner = THIS_MODULE,
.name = "spufs",
- .get_sb = spufs_get_sb,
+ .mount = spufs_mount,
.kill_sb = kill_litter_super,
};
diff --git a/arch/powerpc/platforms/pseries/Kconfig b/arch/powerpc/platforms/pseries/Kconfig
index c667f0f02c34..3139814f6439 100644
--- a/arch/powerpc/platforms/pseries/Kconfig
+++ b/arch/powerpc/platforms/pseries/Kconfig
@@ -47,6 +47,12 @@ config LPARCFG
config PPC_PSERIES_DEBUG
depends on PPC_PSERIES && PPC_EARLY_DEBUG
bool "Enable extra debug logging in platforms/pseries"
+ help
+ Say Y here if you want the pseries core to produce a bunch of
+ debug messages to the system log. Select this if you are having a
+ problem with the pseries core and want to see more of what is
+ going on. This does not enable debugging in lpar.c, which must
+ be manually done due to its verbosity.
default y
config PPC_SMLPAR
diff --git a/arch/powerpc/platforms/pseries/eeh.c b/arch/powerpc/platforms/pseries/eeh.c
index 34b7dc12e731..17a11c82e6f8 100644
--- a/arch/powerpc/platforms/pseries/eeh.c
+++ b/arch/powerpc/platforms/pseries/eeh.c
@@ -21,8 +21,6 @@
* Please address comments and feedback to Linas Vepstas <linas@austin.ibm.com>
*/
-#undef DEBUG
-
#include <linux/delay.h>
#include <linux/init.h>
#include <linux/list.h>
diff --git a/arch/powerpc/platforms/pseries/pci_dlpar.c b/arch/powerpc/platforms/pseries/pci_dlpar.c
index 4b7a062dee15..5fcc92a12d3e 100644
--- a/arch/powerpc/platforms/pseries/pci_dlpar.c
+++ b/arch/powerpc/platforms/pseries/pci_dlpar.c
@@ -25,8 +25,6 @@
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
-#undef DEBUG
-
#include <linux/pci.h>
#include <asm/pci-bridge.h>
#include <asm/ppc-pci.h>
diff --git a/arch/powerpc/sysdev/fsl_lbc.c b/arch/powerpc/sysdev/fsl_lbc.c
index dceb8d1a843d..4fcb5a4e60dd 100644
--- a/arch/powerpc/sysdev/fsl_lbc.c
+++ b/arch/powerpc/sysdev/fsl_lbc.c
@@ -1,9 +1,12 @@
/*
* Freescale LBC and UPM routines.
*
- * Copyright (c) 2007-2008 MontaVista Software, Inc.
+ * Copyright © 2007-2008 MontaVista Software, Inc.
+ * Copyright © 2010 Freescale Semiconductor
*
* Author: Anton Vorontsov <avorontsov@ru.mvista.com>
+ * Author: Jack Lan <Jack.Lan@freescale.com>
+ * Author: Roy Zang <tie-fei.zang@freescale.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -19,39 +22,37 @@
#include <linux/types.h>
#include <linux/io.h>
#include <linux/of.h>
+#include <linux/slab.h>
+#include <linux/platform_device.h>
+#include <linux/interrupt.h>
+#include <linux/mod_devicetable.h>
#include <asm/prom.h>
#include <asm/fsl_lbc.h>
static spinlock_t fsl_lbc_lock = __SPIN_LOCK_UNLOCKED(fsl_lbc_lock);
-static struct fsl_lbc_regs __iomem *fsl_lbc_regs;
+struct fsl_lbc_ctrl *fsl_lbc_ctrl_dev;
+EXPORT_SYMBOL(fsl_lbc_ctrl_dev);
-static char __initdata *compat_lbc[] = {
- "fsl,pq2-localbus",
- "fsl,pq2pro-localbus",
- "fsl,pq3-localbus",
- "fsl,elbc",
-};
-
-static int __init fsl_lbc_init(void)
+/**
+ * fsl_lbc_addr - convert the base address
+ * @addr_base: base address of the memory bank
+ *
+ * This function converts a base address of lbc into the right format for the
+ * BR register. If the SOC has eLBC then it returns 32bit physical address
+ * else it convers a 34bit local bus physical address to correct format of
+ * 32bit address for BR register (Example: MPC8641).
+ */
+u32 fsl_lbc_addr(phys_addr_t addr_base)
{
- struct device_node *lbus;
- int i;
+ struct device_node *np = fsl_lbc_ctrl_dev->dev->of_node;
+ u32 addr = addr_base & 0xffff8000;
- for (i = 0; i < ARRAY_SIZE(compat_lbc); i++) {
- lbus = of_find_compatible_node(NULL, NULL, compat_lbc[i]);
- if (lbus)
- goto found;
- }
- return -ENODEV;
+ if (of_device_is_compatible(np, "fsl,elbc"))
+ return addr;
-found:
- fsl_lbc_regs = of_iomap(lbus, 0);
- of_node_put(lbus);
- if (!fsl_lbc_regs)
- return -ENOMEM;
- return 0;
+ return addr | ((addr_base & 0x300000000ull) >> 19);
}
-arch_initcall(fsl_lbc_init);
+EXPORT_SYMBOL(fsl_lbc_addr);
/**
* fsl_lbc_find - find Localbus bank
@@ -65,15 +66,17 @@ arch_initcall(fsl_lbc_init);
int fsl_lbc_find(phys_addr_t addr_base)
{
int i;
+ struct fsl_lbc_regs __iomem *lbc;
- if (!fsl_lbc_regs)
+ if (!fsl_lbc_ctrl_dev || !fsl_lbc_ctrl_dev->regs)
return -ENODEV;
- for (i = 0; i < ARRAY_SIZE(fsl_lbc_regs->bank); i++) {
- __be32 br = in_be32(&fsl_lbc_regs->bank[i].br);
- __be32 or = in_be32(&fsl_lbc_regs->bank[i].or);
+ lbc = fsl_lbc_ctrl_dev->regs;
+ for (i = 0; i < ARRAY_SIZE(lbc->bank); i++) {
+ __be32 br = in_be32(&lbc->bank[i].br);
+ __be32 or = in_be32(&lbc->bank[i].or);
- if (br & BR_V && (br & or & BR_BA) == addr_base)
+ if (br & BR_V && (br & or & BR_BA) == fsl_lbc_addr(addr_base))
return i;
}
@@ -94,22 +97,27 @@ int fsl_upm_find(phys_addr_t addr_base, struct fsl_upm *upm)
{
int bank;
__be32 br;
+ struct fsl_lbc_regs __iomem *lbc;
bank = fsl_lbc_find(addr_base);
if (bank < 0)
return bank;
- br = in_be32(&fsl_lbc_regs->bank[bank].br);
+ if (!fsl_lbc_ctrl_dev || !fsl_lbc_ctrl_dev->regs)
+ return -ENODEV;
+
+ lbc = fsl_lbc_ctrl_dev->regs;
+ br = in_be32(&lbc->bank[bank].br);
switch (br & BR_MSEL) {
case BR_MS_UPMA:
- upm->mxmr = &fsl_lbc_regs->mamr;
+ upm->mxmr = &lbc->mamr;
break;
case BR_MS_UPMB:
- upm->mxmr = &fsl_lbc_regs->mbmr;
+ upm->mxmr = &lbc->mbmr;
break;
case BR_MS_UPMC:
- upm->mxmr = &fsl_lbc_regs->mcmr;
+ upm->mxmr = &lbc->mcmr;
break;
default:
return -EINVAL;
@@ -148,9 +156,12 @@ int fsl_upm_run_pattern(struct fsl_upm *upm, void __iomem *io_base, u32 mar)
int ret = 0;
unsigned long flags;
+ if (!fsl_lbc_ctrl_dev || !fsl_lbc_ctrl_dev->regs)
+ return -ENODEV;
+
spin_lock_irqsave(&fsl_lbc_lock, flags);
- out_be32(&fsl_lbc_regs->mar, mar);
+ out_be32(&fsl_lbc_ctrl_dev->regs->mar, mar);
switch (upm->width) {
case 8:
@@ -172,3 +183,166 @@ int fsl_upm_run_pattern(struct fsl_upm *upm, void __iomem *io_base, u32 mar)
return ret;
}
EXPORT_SYMBOL(fsl_upm_run_pattern);
+
+static int __devinit fsl_lbc_ctrl_init(struct fsl_lbc_ctrl *ctrl)
+{
+ struct fsl_lbc_regs __iomem *lbc = ctrl->regs;
+
+ /* clear event registers */
+ setbits32(&lbc->ltesr, LTESR_CLEAR);
+ out_be32(&lbc->lteatr, 0);
+ out_be32(&lbc->ltear, 0);
+ out_be32(&lbc->lteccr, LTECCR_CLEAR);
+ out_be32(&lbc->ltedr, LTEDR_ENABLE);
+
+ /* Enable interrupts for any detected events */
+ out_be32(&lbc->lteir, LTEIR_ENABLE);
+
+ return 0;
+}
+
+/*
+ * NOTE: This interrupt is used to report localbus events of various kinds,
+ * such as transaction errors on the chipselects.
+ */
+
+static irqreturn_t fsl_lbc_ctrl_irq(int irqno, void *data)
+{
+ struct fsl_lbc_ctrl *ctrl = data;
+ struct fsl_lbc_regs __iomem *lbc = ctrl->regs;
+ u32 status;
+
+ status = in_be32(&lbc->ltesr);
+ if (!status)
+ return IRQ_NONE;
+
+ out_be32(&lbc->ltesr, LTESR_CLEAR);
+ out_be32(&lbc->lteatr, 0);
+ out_be32(&lbc->ltear, 0);
+ ctrl->irq_status = status;
+
+ if (status & LTESR_BM)
+ dev_err(ctrl->dev, "Local bus monitor time-out: "
+ "LTESR 0x%08X\n", status);
+ if (status & LTESR_WP)
+ dev_err(ctrl->dev, "Write protect error: "
+ "LTESR 0x%08X\n", status);
+ if (status & LTESR_ATMW)
+ dev_err(ctrl->dev, "Atomic write error: "
+ "LTESR 0x%08X\n", status);
+ if (status & LTESR_ATMR)
+ dev_err(ctrl->dev, "Atomic read error: "
+ "LTESR 0x%08X\n", status);
+ if (status & LTESR_CS)
+ dev_err(ctrl->dev, "Chip select error: "
+ "LTESR 0x%08X\n", status);
+ if (status & LTESR_UPM)
+ ;
+ if (status & LTESR_FCT) {
+ dev_err(ctrl->dev, "FCM command time-out: "
+ "LTESR 0x%08X\n", status);
+ smp_wmb();
+ wake_up(&ctrl->irq_wait);
+ }
+ if (status & LTESR_PAR) {
+ dev_err(ctrl->dev, "Parity or Uncorrectable ECC error: "
+ "LTESR 0x%08X\n", status);
+ smp_wmb();
+ wake_up(&ctrl->irq_wait);
+ }
+ if (status & LTESR_CC) {
+ smp_wmb();
+ wake_up(&ctrl->irq_wait);
+ }
+ if (status & ~LTESR_MASK)
+ dev_err(ctrl->dev, "Unknown error: "
+ "LTESR 0x%08X\n", status);
+ return IRQ_HANDLED;
+}
+
+/*
+ * fsl_lbc_ctrl_probe
+ *
+ * called by device layer when it finds a device matching
+ * one our driver can handled. This code allocates all of
+ * the resources needed for the controller only. The
+ * resources for the NAND banks themselves are allocated
+ * in the chip probe function.
+*/
+
+static int __devinit fsl_lbc_ctrl_probe(struct platform_device *dev)
+{
+ int ret;
+
+ if (!dev->dev.of_node) {
+ dev_err(&dev->dev, "Device OF-Node is NULL");
+ return -EFAULT;
+ }
+
+ fsl_lbc_ctrl_dev = kzalloc(sizeof(*fsl_lbc_ctrl_dev), GFP_KERNEL);
+ if (!fsl_lbc_ctrl_dev)
+ return -ENOMEM;
+
+ dev_set_drvdata(&dev->dev, fsl_lbc_ctrl_dev);
+
+ spin_lock_init(&fsl_lbc_ctrl_dev->lock);
+ init_waitqueue_head(&fsl_lbc_ctrl_dev->irq_wait);
+
+ fsl_lbc_ctrl_dev->regs = of_iomap(dev->dev.of_node, 0);
+ if (!fsl_lbc_ctrl_dev->regs) {
+ dev_err(&dev->dev, "failed to get memory region\n");
+ ret = -ENODEV;
+ goto err;
+ }
+
+ fsl_lbc_ctrl_dev->irq = irq_of_parse_and_map(dev->dev.of_node, 0);
+ if (fsl_lbc_ctrl_dev->irq == NO_IRQ) {
+ dev_err(&dev->dev, "failed to get irq resource\n");
+ ret = -ENODEV;
+ goto err;
+ }
+
+ fsl_lbc_ctrl_dev->dev = &dev->dev;
+
+ ret = fsl_lbc_ctrl_init(fsl_lbc_ctrl_dev);
+ if (ret < 0)
+ goto err;
+
+ ret = request_irq(fsl_lbc_ctrl_dev->irq, fsl_lbc_ctrl_irq, 0,
+ "fsl-lbc", fsl_lbc_ctrl_dev);
+ if (ret != 0) {
+ dev_err(&dev->dev, "failed to install irq (%d)\n",
+ fsl_lbc_ctrl_dev->irq);
+ ret = fsl_lbc_ctrl_dev->irq;
+ goto err;
+ }
+
+ return 0;
+
+err:
+ iounmap(fsl_lbc_ctrl_dev->regs);
+ kfree(fsl_lbc_ctrl_dev);
+ return ret;
+}
+
+static const struct of_device_id fsl_lbc_match[] = {
+ { .compatible = "fsl,elbc", },
+ { .compatible = "fsl,pq3-localbus", },
+ { .compatible = "fsl,pq2-localbus", },
+ { .compatible = "fsl,pq2pro-localbus", },
+ {},
+};
+
+static struct platform_driver fsl_lbc_ctrl_driver = {
+ .driver = {
+ .name = "fsl-lbc",
+ .of_match_table = fsl_lbc_match,
+ },
+ .probe = fsl_lbc_ctrl_probe,
+};
+
+static int __init fsl_lbc_init(void)
+{
+ return platform_driver_register(&fsl_lbc_ctrl_driver);
+}
+module_init(fsl_lbc_init);
diff --git a/arch/powerpc/sysdev/fsl_rio.c b/arch/powerpc/sysdev/fsl_rio.c
index 412763672d23..9725369d432a 100644
--- a/arch/powerpc/sysdev/fsl_rio.c
+++ b/arch/powerpc/sysdev/fsl_rio.c
@@ -50,6 +50,7 @@
#define RIO_ATMU_REGS_OFFSET 0x10c00
#define RIO_P_MSG_REGS_OFFSET 0x11000
#define RIO_S_MSG_REGS_OFFSET 0x13000
+#define RIO_GCCSR 0x13c
#define RIO_ESCSR 0x158
#define RIO_CCSR 0x15c
#define RIO_LTLEDCSR 0x0608
@@ -87,6 +88,9 @@
#define RIO_IPWSR_PWD 0x00000008
#define RIO_IPWSR_PWB 0x00000004
+#define RIO_EPWISR_PINT 0x80000000
+#define RIO_EPWISR_PW 0x00000001
+
#define RIO_MSG_DESC_SIZE 32
#define RIO_MSG_BUFFER_SIZE 4096
#define RIO_MIN_TX_RING_SIZE 2
@@ -1082,18 +1086,12 @@ fsl_rio_port_write_handler(int irq, void *dev_instance)
struct rio_priv *priv = port->priv;
u32 epwisr, tmp;
- ipwmr = in_be32(&priv->msg_regs->pwmr);
- ipwsr = in_be32(&priv->msg_regs->pwsr);
-
epwisr = in_be32(priv->regs_win + RIO_EPWISR);
- if (epwisr & 0x80000000) {
- tmp = in_be32(priv->regs_win + RIO_LTLEDCSR);
- pr_info("RIO_LTLEDCSR = 0x%x\n", tmp);
- out_be32(priv->regs_win + RIO_LTLEDCSR, 0);
- }
+ if (!(epwisr & RIO_EPWISR_PW))
+ goto pw_done;
- if (!(epwisr & 0x00000001))
- return IRQ_HANDLED;
+ ipwmr = in_be32(&priv->msg_regs->pwmr);
+ ipwsr = in_be32(&priv->msg_regs->pwsr);
#ifdef DEBUG_PW
pr_debug("PW Int->IPWMR: 0x%08x IPWSR: 0x%08x (", ipwmr, ipwsr);
@@ -1109,20 +1107,6 @@ fsl_rio_port_write_handler(int irq, void *dev_instance)
pr_debug(" PWB");
pr_debug(" )\n");
#endif
- out_be32(&priv->msg_regs->pwsr,
- ipwsr & (RIO_IPWSR_TE | RIO_IPWSR_QFI | RIO_IPWSR_PWD));
-
- if ((ipwmr & RIO_IPWMR_EIE) && (ipwsr & RIO_IPWSR_TE)) {
- priv->port_write_msg.err_count++;
- pr_info("RIO: Port-Write Transaction Err (%d)\n",
- priv->port_write_msg.err_count);
- }
- if (ipwsr & RIO_IPWSR_PWD) {
- priv->port_write_msg.discard_count++;
- pr_info("RIO: Port Discarded Port-Write Msg(s) (%d)\n",
- priv->port_write_msg.discard_count);
- }
-
/* Schedule deferred processing if PW was received */
if (ipwsr & RIO_IPWSR_QFI) {
/* Save PW message (if there is room in FIFO),
@@ -1134,16 +1118,43 @@ fsl_rio_port_write_handler(int irq, void *dev_instance)
RIO_PW_MSG_SIZE);
} else {
priv->port_write_msg.discard_count++;
- pr_info("RIO: ISR Discarded Port-Write Msg(s) (%d)\n",
+ pr_debug("RIO: ISR Discarded Port-Write Msg(s) (%d)\n",
priv->port_write_msg.discard_count);
}
+ /* Clear interrupt and issue Clear Queue command. This allows
+ * another port-write to be received.
+ */
+ out_be32(&priv->msg_regs->pwsr, RIO_IPWSR_QFI);
+ out_be32(&priv->msg_regs->pwmr, ipwmr | RIO_IPWMR_CQ);
+
schedule_work(&priv->pw_work);
}
- /* Issue Clear Queue command. This allows another
- * port-write to be received.
- */
- out_be32(&priv->msg_regs->pwmr, ipwmr | RIO_IPWMR_CQ);
+ if ((ipwmr & RIO_IPWMR_EIE) && (ipwsr & RIO_IPWSR_TE)) {
+ priv->port_write_msg.err_count++;
+ pr_debug("RIO: Port-Write Transaction Err (%d)\n",
+ priv->port_write_msg.err_count);
+ /* Clear Transaction Error: port-write controller should be
+ * disabled when clearing this error
+ */
+ out_be32(&priv->msg_regs->pwmr, ipwmr & ~RIO_IPWMR_PWE);
+ out_be32(&priv->msg_regs->pwsr, RIO_IPWSR_TE);
+ out_be32(&priv->msg_regs->pwmr, ipwmr);
+ }
+
+ if (ipwsr & RIO_IPWSR_PWD) {
+ priv->port_write_msg.discard_count++;
+ pr_debug("RIO: Port Discarded Port-Write Msg(s) (%d)\n",
+ priv->port_write_msg.discard_count);
+ out_be32(&priv->msg_regs->pwsr, RIO_IPWSR_PWD);
+ }
+
+pw_done:
+ if (epwisr & RIO_EPWISR_PINT) {
+ tmp = in_be32(priv->regs_win + RIO_LTLEDCSR);
+ pr_debug("RIO_LTLEDCSR = 0x%x\n", tmp);
+ out_be32(priv->regs_win + RIO_LTLEDCSR, 0);
+ }
return IRQ_HANDLED;
}
@@ -1461,6 +1472,7 @@ int fsl_rio_setup(struct platform_device *dev)
port->host_deviceid = fsl_rio_get_hdid(port->id);
port->priv = priv;
+ port->phys_efptr = 0x100;
rio_register_mport(port);
priv->regs_win = ioremap(regs.start, regs.end - regs.start + 1);
@@ -1508,6 +1520,12 @@ int fsl_rio_setup(struct platform_device *dev)
dev_info(&dev->dev, "RapidIO Common Transport System size: %d\n",
port->sys_size ? 65536 : 256);
+ if (port->host_deviceid >= 0)
+ out_be32(priv->regs_win + RIO_GCCSR, RIO_PORT_GEN_HOST |
+ RIO_PORT_GEN_MASTER | RIO_PORT_GEN_DISCOVERED);
+ else
+ out_be32(priv->regs_win + RIO_GCCSR, 0x00000000);
+
priv->atmu_regs = (struct rio_atmu_regs *)(priv->regs_win
+ RIO_ATMU_REGS_OFFSET);
priv->maint_atmu_regs = priv->atmu_regs + 1;
diff --git a/arch/s390/Kbuild b/arch/s390/Kbuild
new file mode 100644
index 000000000000..ae4b01060edd
--- /dev/null
+++ b/arch/s390/Kbuild
@@ -0,0 +1,6 @@
+obj-y += kernel/
+obj-y += mm/
+obj-y += crypto/
+obj-y += appldata/
+obj-y += hypfs/
+obj-y += kvm/
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
index 75976a141947..e0b98e71ff47 100644
--- a/arch/s390/Kconfig
+++ b/arch/s390/Kconfig
@@ -1,8 +1,3 @@
-#
-# For a description of the syntax of this configuration file,
-# see Documentation/kbuild/kconfig-language.txt.
-#
-
config SCHED_MC
def_bool y
depends on SMP
@@ -60,6 +55,9 @@ config NO_IOMEM
config NO_DMA
def_bool y
+config ARCH_DMA_ADDR_T_64BIT
+ def_bool 64BIT
+
config GENERIC_LOCKBREAK
bool
default y
@@ -75,8 +73,6 @@ config VIRT_CPU_ACCOUNTING
config ARCH_SUPPORTS_DEBUG_PAGEALLOC
def_bool y
-mainmenu "Linux Kernel Configuration"
-
config S390
def_bool y
select USE_GENERIC_SMP_HELPERS if SMP
@@ -84,6 +80,7 @@ config S390
select HAVE_FUNCTION_TRACER
select HAVE_FUNCTION_TRACE_MCOUNT_TEST
select HAVE_FTRACE_MCOUNT_RECORD
+ select HAVE_C_RECORDMCOUNT
select HAVE_SYSCALL_TRACEPOINTS
select HAVE_DYNAMIC_FTRACE
select HAVE_FUNCTION_GRAPH_TRACER
@@ -101,6 +98,7 @@ config S390
select HAVE_KERNEL_BZIP2
select HAVE_KERNEL_LZMA
select HAVE_KERNEL_LZO
+ select HAVE_GET_USER_PAGES_FAST
select ARCH_INLINE_SPIN_TRYLOCK
select ARCH_INLINE_SPIN_TRYLOCK_BH
select ARCH_INLINE_SPIN_LOCK
@@ -147,7 +145,7 @@ source "kernel/time/Kconfig"
config 64BIT
bool "64 bit kernel"
help
- Select this option if you have a 64 bit IBM zSeries machine
+ Select this option if you have an IBM z/Architecture machine
and want to use the 64 bit addressing mode.
config 32BIT
@@ -199,9 +197,18 @@ config HOTPLUG_CPU
can be controlled through /sys/devices/system/cpu/cpu#.
Say N if you want to disable CPU hotplug.
+config SCHED_MC
+ def_bool y
+ prompt "Multi-core scheduler support"
+ depends on SMP
+ help
+ Multi-core scheduler support improves the CPU scheduler's decision
+ making when dealing with multi-core CPU chips at a cost of slightly
+ increased overhead in some places.
+
config SCHED_BOOK
bool "Book scheduler support"
- depends on SMP
+ depends on SMP && SCHED_MC
help
Book scheduler support improves the CPU scheduler's decision making
when dealing with machines that have several books.
@@ -211,7 +218,7 @@ config MATHEMU
depends on MARCH_G5
help
This option is required for IEEE compliant floating point arithmetic
- on older S/390 machines. Say Y unless you know your machine doesn't
+ on older ESA/390 machines. Say Y unless you know your machine doesn't
need this.
config COMPAT
@@ -240,8 +247,8 @@ config S390_EXEC_PROTECT
space programs and it also selects the addressing mode option above.
The kernel parameter noexec=on will enable this feature and also
switch the addressing modes, default is disabled. Enabling this (via
- kernel parameter) on machines earlier than IBM System z9-109 EC/BC
- will reduce system performance.
+ kernel parameter) on machines earlier than IBM System z9 this will
+ reduce system performance.
comment "Code generation options"
@@ -250,41 +257,46 @@ choice
default MARCH_G5
config MARCH_G5
- bool "S/390 model G5 and G6"
+ bool "System/390 model G5 and G6"
depends on !64BIT
help
Select this to build a 31 bit kernel that works
- on all S/390 and zSeries machines.
+ on all ESA/390 and z/Architecture machines.
config MARCH_Z900
- bool "IBM eServer zSeries model z800 and z900"
+ bool "IBM zSeries model z800 and z900"
help
- Select this to optimize for zSeries machines. This
- will enable some optimizations that are not available
- on older 31 bit only CPUs.
+ Select this to enable optimizations for model z800/z900 (2064 and
+ 2066 series). This will enable some optimizations that are not
+ available on older ESA/390 (31 Bit) only CPUs.
config MARCH_Z990
- bool "IBM eServer zSeries model z890 and z990"
+ bool "IBM zSeries model z890 and z990"
help
- Select this enable optimizations for model z890/z990.
- This will be slightly faster but does not work on
- older machines such as the z900.
+ Select this to enable optimizations for model z890/z990 (2084 and
+ 2086 series). The kernel will be slightly faster but will not work
+ on older machines.
config MARCH_Z9_109
bool "IBM System z9"
help
- Select this to enable optimizations for IBM System z9-109, IBM
- System z9 Enterprise Class (z9 EC), and IBM System z9 Business
- Class (z9 BC). The kernel will be slightly faster but will not
- work on older machines such as the z990, z890, z900, and z800.
+ Select this to enable optimizations for IBM System z9 (2094 and
+ 2096 series). The kernel will be slightly faster but will not work
+ on older machines.
config MARCH_Z10
bool "IBM System z10"
help
- Select this to enable optimizations for IBM System z10. The
- kernel will be slightly faster but will not work on older
- machines such as the z990, z890, z900, z800, z9-109, z9-ec
- and z9-bc.
+ Select this to enable optimizations for IBM System z10 (2097 and
+ 2098 series). The kernel will be slightly faster but will not work
+ on older machines.
+
+config MARCH_Z196
+ bool "IBM zEnterprise 196"
+ help
+ Select this to enable optimizations for IBM zEnterprise 196
+ (2817 series). The kernel will be slightly faster but will not work
+ on older machines.
endchoice
diff --git a/arch/s390/Kconfig.debug b/arch/s390/Kconfig.debug
index 45e0c6199f36..05221b13ffb1 100644
--- a/arch/s390/Kconfig.debug
+++ b/arch/s390/Kconfig.debug
@@ -6,6 +6,18 @@ config TRACE_IRQFLAGS_SUPPORT
source "lib/Kconfig.debug"
+config STRICT_DEVMEM
+ def_bool y
+ prompt "Filter access to /dev/mem"
+ ---help---
+ This option restricts access to /dev/mem. If this option is
+ disabled, you allow userspace access to all memory, including
+ kernel and userspace memory. Accidental memory access is likely
+ to be disastrous.
+ Memory access is required for experts who want to debug the kernel.
+
+ If you are unsure, say Y.
+
config DEBUG_STRICT_USER_COPY_CHECKS
bool "Strict user copy size checks"
---help---
diff --git a/arch/s390/Makefile b/arch/s390/Makefile
index 0c9e6c6d2a64..d5b8a6ade525 100644
--- a/arch/s390/Makefile
+++ b/arch/s390/Makefile
@@ -40,6 +40,7 @@ cflags-$(CONFIG_MARCH_Z900) += $(call cc-option,-march=z900)
cflags-$(CONFIG_MARCH_Z990) += $(call cc-option,-march=z990)
cflags-$(CONFIG_MARCH_Z9_109) += $(call cc-option,-march=z9-109)
cflags-$(CONFIG_MARCH_Z10) += $(call cc-option,-march=z10)
+cflags-$(CONFIG_MARCH_Z196) += $(call cc-option,-march=z196)
#KBUILD_IMAGE is necessary for make rpm
KBUILD_IMAGE :=arch/s390/boot/image
@@ -94,8 +95,8 @@ head-y := arch/s390/kernel/head.o
head-y += arch/s390/kernel/$(if $(CONFIG_64BIT),head64.o,head31.o)
head-y += arch/s390/kernel/init_task.o
-core-y += arch/s390/mm/ arch/s390/kernel/ arch/s390/crypto/ \
- arch/s390/appldata/ arch/s390/hypfs/ arch/s390/kvm/
+# See arch/s390/Kbuild for content of core part of the kernel
+core-y += arch/s390/
libs-y += arch/s390/lib/
drivers-y += drivers/s390/
diff --git a/arch/s390/crypto/crypt_s390.h b/arch/s390/crypto/crypt_s390.h
index 0ef9829f2ad6..7ee9a1b4ad9f 100644
--- a/arch/s390/crypto/crypt_s390.h
+++ b/arch/s390/crypto/crypt_s390.h
@@ -297,7 +297,7 @@ static inline int crypt_s390_func_available(int func)
int ret;
/* check if CPACF facility (bit 17) is available */
- if (!(stfl() & 1ULL << (31 - 17)))
+ if (!test_facility(17))
return 0;
switch (func & CRYPT_S390_OP_MASK) {
diff --git a/arch/s390/hypfs/hypfs_diag.c b/arch/s390/hypfs/hypfs_diag.c
index 020e51c063d2..cd4a81be9cf8 100644
--- a/arch/s390/hypfs/hypfs_diag.c
+++ b/arch/s390/hypfs/hypfs_diag.c
@@ -638,18 +638,21 @@ __init int hypfs_diag_init(void)
pr_err("The hardware system does not support hypfs\n");
return -ENODATA;
}
- rc = diag224_get_name_table();
- if (rc) {
- diag204_free_buffer();
- pr_err("The hardware system does not provide all "
- "functions required by hypfs\n");
- }
if (diag204_info_type == INFO_EXT) {
rc = hypfs_dbfs_init();
if (rc)
- diag204_free_buffer();
+ return rc;
}
- return rc;
+ if (MACHINE_IS_LPAR) {
+ rc = diag224_get_name_table();
+ if (rc) {
+ pr_err("The hardware system does not provide all "
+ "functions required by hypfs\n");
+ debugfs_remove(dbfs_d204_file);
+ return rc;
+ }
+ }
+ return 0;
}
void hypfs_diag_exit(void)
diff --git a/arch/s390/hypfs/inode.c b/arch/s390/hypfs/inode.c
index 74d98670be27..47cc446dab8f 100644
--- a/arch/s390/hypfs/inode.c
+++ b/arch/s390/hypfs/inode.c
@@ -316,10 +316,10 @@ static int hypfs_fill_super(struct super_block *sb, void *data, int silent)
return 0;
}
-static int hypfs_get_super(struct file_system_type *fst, int flags,
- const char *devname, void *data, struct vfsmount *mnt)
+static struct dentry *hypfs_mount(struct file_system_type *fst, int flags,
+ const char *devname, void *data)
{
- return get_sb_single(fst, flags, data, hypfs_fill_super, mnt);
+ return mount_single(fst, flags, data, hypfs_fill_super);
}
static void hypfs_kill_super(struct super_block *sb)
@@ -455,7 +455,7 @@ static const struct file_operations hypfs_file_ops = {
static struct file_system_type hypfs_type = {
.owner = THIS_MODULE,
.name = "s390_hypfs",
- .get_sb = hypfs_get_super,
+ .mount = hypfs_mount,
.kill_sb = hypfs_kill_super
};
diff --git a/arch/s390/include/asm/ccwdev.h b/arch/s390/include/asm/ccwdev.h
index f3ba0fa98de6..e8501115eca8 100644
--- a/arch/s390/include/asm/ccwdev.h
+++ b/arch/s390/include/asm/ccwdev.h
@@ -92,6 +92,16 @@ struct ccw_device {
};
/*
+ * Possible events used by the path_event notifier.
+ */
+#define PE_NONE 0x0
+#define PE_PATH_GONE 0x1 /* A path is no longer available. */
+#define PE_PATH_AVAILABLE 0x2 /* A path has become available and
+ was successfully verified. */
+#define PE_PATHGROUP_ESTABLISHED 0x4 /* A pathgroup was reset and had
+ to be established again. */
+
+/*
* Possible CIO actions triggered by the unit check handler.
*/
enum uc_todo {
@@ -109,6 +119,7 @@ enum uc_todo {
* @set_online: called when setting device online
* @set_offline: called when setting device offline
* @notify: notify driver of device state changes
+ * @path_event: notify driver of channel path events
* @shutdown: called at device shutdown
* @prepare: prepare for pm state transition
* @complete: undo work done in @prepare
@@ -127,6 +138,7 @@ struct ccw_driver {
int (*set_online) (struct ccw_device *);
int (*set_offline) (struct ccw_device *);
int (*notify) (struct ccw_device *, int);
+ void (*path_event) (struct ccw_device *, int *);
void (*shutdown) (struct ccw_device *);
int (*prepare) (struct ccw_device *);
void (*complete) (struct ccw_device *);
diff --git a/arch/s390/include/asm/cpu.h b/arch/s390/include/asm/cpu.h
index 471234b90574..e0b69540216f 100644
--- a/arch/s390/include/asm/cpu.h
+++ b/arch/s390/include/asm/cpu.h
@@ -20,7 +20,7 @@ struct cpuid
unsigned int ident : 24;
unsigned int machine : 16;
unsigned int unused : 16;
-} __packed;
+} __attribute__ ((packed, aligned(8)));
#endif /* __ASSEMBLY__ */
#endif /* _ASM_S390_CPU_H */
diff --git a/arch/s390/include/asm/cputime.h b/arch/s390/include/asm/cputime.h
index 8b1a52a137c5..40e2ab0fa3f0 100644
--- a/arch/s390/include/asm/cputime.h
+++ b/arch/s390/include/asm/cputime.h
@@ -73,18 +73,18 @@ cputime64_to_jiffies64(cputime64_t cputime)
}
/*
- * Convert cputime to milliseconds and back.
+ * Convert cputime to microseconds and back.
*/
static inline unsigned int
-cputime_to_msecs(const cputime_t cputime)
+cputime_to_usecs(const cputime_t cputime)
{
- return cputime_div(cputime, 4096000);
+ return cputime_div(cputime, 4096);
}
static inline cputime_t
-msecs_to_cputime(const unsigned int m)
+usecs_to_cputime(const unsigned int m)
{
- return (cputime_t) m * 4096000;
+ return (cputime_t) m * 4096;
}
/*
diff --git a/arch/s390/include/asm/dasd.h b/arch/s390/include/asm/dasd.h
index 218bce81ec70..b604a9186f8e 100644
--- a/arch/s390/include/asm/dasd.h
+++ b/arch/s390/include/asm/dasd.h
@@ -217,6 +217,25 @@ typedef struct dasd_symmio_parms {
int rssd_result_len;
} __attribute__ ((packed)) dasd_symmio_parms_t;
+/*
+ * Data returned by Sense Path Group ID (SNID)
+ */
+struct dasd_snid_data {
+ struct {
+ __u8 group:2;
+ __u8 reserve:2;
+ __u8 mode:1;
+ __u8 res:3;
+ } __attribute__ ((packed)) path_state;
+ __u8 pgid[11];
+} __attribute__ ((packed));
+
+struct dasd_snid_ioctl_data {
+ struct dasd_snid_data data;
+ __u8 path_mask;
+} __attribute__ ((packed));
+
+
/********************************************************************************
* SECTION: Definition of IOCTLs
*
@@ -261,25 +280,10 @@ typedef struct dasd_symmio_parms {
/* Set Attributes (cache operations) */
#define BIODASDSATTR _IOW(DASD_IOCTL_LETTER,2,attrib_data_t)
+/* Get Sense Path Group ID (SNID) data */
+#define BIODASDSNID _IOWR(DASD_IOCTL_LETTER, 1, struct dasd_snid_ioctl_data)
+
#define BIODASDSYMMIO _IOWR(DASD_IOCTL_LETTER, 240, dasd_symmio_parms_t)
#endif /* DASD_H */
-/*
- * Overrides for Emacs so that we follow Linus's tabbing style.
- * Emacs will notice this stuff at the end of the file and automatically
- * adjust the settings for this buffer only. This must remain at the end
- * of the file.
- * ---------------------------------------------------------------------------
- * Local variables:
- * c-indent-level: 4
- * c-brace-imaginary-offset: 0
- * c-brace-offset: -4
- * c-argdecl-indent: 4
- * c-label-offset: -4
- * c-continued-statement-offset: 4
- * c-continued-brace-offset: 0
- * indent-tabs-mode: nil
- * tab-width: 8
- * End:
- */
diff --git a/arch/s390/include/asm/hugetlb.h b/arch/s390/include/asm/hugetlb.h
index bb8343d157bc..b56403c2df28 100644
--- a/arch/s390/include/asm/hugetlb.h
+++ b/arch/s390/include/asm/hugetlb.h
@@ -37,32 +37,6 @@ static inline int prepare_hugepage_range(struct file *file,
int arch_prepare_hugepage(struct page *page);
void arch_release_hugepage(struct page *page);
-static inline pte_t pte_mkhuge(pte_t pte)
-{
- /*
- * PROT_NONE needs to be remapped from the pte type to the ste type.
- * The HW invalid bit is also different for pte and ste. The pte
- * invalid bit happens to be the same as the ste _SEGMENT_ENTRY_LARGE
- * bit, so we don't have to clear it.
- */
- if (pte_val(pte) & _PAGE_INVALID) {
- if (pte_val(pte) & _PAGE_SWT)
- pte_val(pte) |= _HPAGE_TYPE_NONE;
- pte_val(pte) |= _SEGMENT_ENTRY_INV;
- }
- /*
- * Clear SW pte bits SWT and SWX, there are no SW bits in a segment
- * table entry.
- */
- pte_val(pte) &= ~(_PAGE_SWT | _PAGE_SWX);
- /*
- * Also set the change-override bit because we don't need dirty bit
- * tracking for hugetlbfs pages.
- */
- pte_val(pte) |= (_SEGMENT_ENTRY_LARGE | _SEGMENT_ENTRY_CO);
- return pte;
-}
-
static inline pte_t huge_pte_wrprotect(pte_t pte)
{
pte_val(pte) |= _PAGE_RO;
diff --git a/arch/s390/include/asm/lowcore.h b/arch/s390/include/asm/lowcore.h
index 0f97ef2d92ac..65e172f8209d 100644
--- a/arch/s390/include/asm/lowcore.h
+++ b/arch/s390/include/asm/lowcore.h
@@ -150,9 +150,10 @@ struct _lowcore {
*/
__u32 ipib; /* 0x0e00 */
__u32 ipib_checksum; /* 0x0e04 */
+ __u8 pad_0x0e08[0x0f00-0x0e08]; /* 0x0e08 */
- /* Align to the top 1k of prefix area */
- __u8 pad_0x0e08[0x1000-0x0e08]; /* 0x0e08 */
+ /* Extended facility list */
+ __u64 stfle_fac_list[32]; /* 0x0f00 */
} __packed;
#else /* CONFIG_32BIT */
@@ -285,7 +286,11 @@ struct _lowcore {
*/
__u64 ipib; /* 0x0e00 */
__u32 ipib_checksum; /* 0x0e08 */
- __u8 pad_0x0e0c[0x11b8-0x0e0c]; /* 0x0e0c */
+ __u8 pad_0x0e0c[0x0f00-0x0e0c]; /* 0x0e0c */
+
+ /* Extended facility list */
+ __u64 stfle_fac_list[32]; /* 0x0f00 */
+ __u8 pad_0x1000[0x11b8-0x1000]; /* 0x1000 */
/* 64 bit extparam used for pfault/diag 250: defined by architecture */
__u64 ext_params2; /* 0x11B8 */
diff --git a/arch/s390/include/asm/page.h b/arch/s390/include/asm/page.h
index af650fb47206..3c987e9ec8d6 100644
--- a/arch/s390/include/asm/page.h
+++ b/arch/s390/include/asm/page.h
@@ -108,9 +108,13 @@ typedef pte_t *pgtable_t;
#define __pgprot(x) ((pgprot_t) { (x) } )
static inline void
-page_set_storage_key(unsigned long addr, unsigned int skey)
+page_set_storage_key(unsigned long addr, unsigned int skey, int mapped)
{
- asm volatile("sske %0,%1" : : "d" (skey), "a" (addr));
+ if (!mapped)
+ asm volatile(".insn rrf,0xb22b0000,%0,%1,8,0"
+ : : "d" (skey), "a" (addr));
+ else
+ asm volatile("sske %0,%1" : : "d" (skey), "a" (addr));
}
static inline unsigned int
@@ -126,6 +130,11 @@ struct page;
void arch_free_page(struct page *page, int order);
void arch_alloc_page(struct page *page, int order);
+static inline int devmem_is_allowed(unsigned long pfn)
+{
+ return 0;
+}
+
#define HAVE_ARCH_FREE_PAGE
#define HAVE_ARCH_ALLOC_PAGE
diff --git a/arch/s390/include/asm/pgalloc.h b/arch/s390/include/asm/pgalloc.h
index 68940d0bad91..082eb4e50e8b 100644
--- a/arch/s390/include/asm/pgalloc.h
+++ b/arch/s390/include/asm/pgalloc.h
@@ -21,9 +21,11 @@
unsigned long *crst_table_alloc(struct mm_struct *, int);
void crst_table_free(struct mm_struct *, unsigned long *);
+void crst_table_free_rcu(struct mm_struct *, unsigned long *);
unsigned long *page_table_alloc(struct mm_struct *);
void page_table_free(struct mm_struct *, unsigned long *);
+void page_table_free_rcu(struct mm_struct *, unsigned long *);
void disable_noexec(struct mm_struct *, struct task_struct *);
static inline void clear_table(unsigned long *s, unsigned long val, size_t n)
@@ -176,4 +178,6 @@ static inline void pmd_populate(struct mm_struct *mm,
#define pte_free_kernel(mm, pte) page_table_free(mm, (unsigned long *) pte)
#define pte_free(mm, pte) page_table_free(mm, (unsigned long *) pte)
+extern void rcu_table_freelist_finish(void);
+
#endif /* _S390_PGALLOC_H */
diff --git a/arch/s390/include/asm/pgtable.h b/arch/s390/include/asm/pgtable.h
index 3157441ee1da..02ace3491c51 100644
--- a/arch/s390/include/asm/pgtable.h
+++ b/arch/s390/include/asm/pgtable.h
@@ -38,6 +38,7 @@
extern pgd_t swapper_pg_dir[] __attribute__ ((aligned (4096)));
extern void paging_init(void);
extern void vmem_map_init(void);
+extern void fault_init(void);
/*
* The S390 doesn't have any external MMU info: the kernel page
@@ -46,11 +47,27 @@ extern void vmem_map_init(void);
#define update_mmu_cache(vma, address, ptep) do { } while (0)
/*
- * ZERO_PAGE is a global shared page that is always zero: used
+ * ZERO_PAGE is a global shared page that is always zero; used
* for zero-mapped memory areas etc..
*/
-extern char empty_zero_page[PAGE_SIZE];
-#define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
+
+extern unsigned long empty_zero_page;
+extern unsigned long zero_page_mask;
+
+#define ZERO_PAGE(vaddr) \
+ (virt_to_page((void *)(empty_zero_page + \
+ (((unsigned long)(vaddr)) &zero_page_mask))))
+
+#define is_zero_pfn is_zero_pfn
+static inline int is_zero_pfn(unsigned long pfn)
+{
+ extern unsigned long zero_pfn;
+ unsigned long offset_from_zero_pfn = pfn - zero_pfn;
+ return offset_from_zero_pfn <= (zero_page_mask >> PAGE_SHIFT);
+}
+
+#define my_zero_pfn(addr) page_to_pfn(ZERO_PAGE(addr))
+
#endif /* !__ASSEMBLY__ */
/*
@@ -300,6 +317,7 @@ extern unsigned long VMALLOC_START;
/* Bits in the segment table entry */
#define _SEGMENT_ENTRY_ORIGIN 0x7fffffc0UL /* page table origin */
+#define _SEGMENT_ENTRY_RO 0x200 /* page protection bit */
#define _SEGMENT_ENTRY_INV 0x20 /* invalid segment table entry */
#define _SEGMENT_ENTRY_COMMON 0x10 /* common segment bit */
#define _SEGMENT_ENTRY_PTL 0x0f /* page table length */
@@ -572,7 +590,7 @@ static inline void rcp_unlock(pte_t *ptep)
}
/* forward declaration for SetPageUptodate in page-flags.h*/
-static inline void page_clear_dirty(struct page *page);
+static inline void page_clear_dirty(struct page *page, int mapped);
#include <linux/page-flags.h>
static inline void ptep_rcp_copy(pte_t *ptep)
@@ -754,6 +772,34 @@ static inline pte_t pte_mkspecial(pte_t pte)
return pte;
}
+#ifdef CONFIG_HUGETLB_PAGE
+static inline pte_t pte_mkhuge(pte_t pte)
+{
+ /*
+ * PROT_NONE needs to be remapped from the pte type to the ste type.
+ * The HW invalid bit is also different for pte and ste. The pte
+ * invalid bit happens to be the same as the ste _SEGMENT_ENTRY_LARGE
+ * bit, so we don't have to clear it.
+ */
+ if (pte_val(pte) & _PAGE_INVALID) {
+ if (pte_val(pte) & _PAGE_SWT)
+ pte_val(pte) |= _HPAGE_TYPE_NONE;
+ pte_val(pte) |= _SEGMENT_ENTRY_INV;
+ }
+ /*
+ * Clear SW pte bits SWT and SWX, there are no SW bits in a segment
+ * table entry.
+ */
+ pte_val(pte) &= ~(_PAGE_SWT | _PAGE_SWX);
+ /*
+ * Also set the change-override bit because we don't need dirty bit
+ * tracking for hugetlbfs pages.
+ */
+ pte_val(pte) |= (_SEGMENT_ENTRY_LARGE | _SEGMENT_ENTRY_CO);
+ return pte;
+}
+#endif
+
#ifdef CONFIG_PGSTE
/*
* Get (and clear) the user dirty bit for a PTE.
@@ -782,7 +828,7 @@ static inline int kvm_s390_test_and_clear_page_dirty(struct mm_struct *mm,
}
dirty = test_and_clear_bit_simple(KVM_UD_BIT, pgste);
if (skey & _PAGE_CHANGED)
- page_clear_dirty(page);
+ page_clear_dirty(page, 1);
rcp_unlock(ptep);
return dirty;
}
@@ -957,9 +1003,9 @@ static inline int page_test_dirty(struct page *page)
}
#define __HAVE_ARCH_PAGE_CLEAR_DIRTY
-static inline void page_clear_dirty(struct page *page)
+static inline void page_clear_dirty(struct page *page, int mapped)
{
- page_set_storage_key(page_to_phys(page), PAGE_DEFAULT_KEY);
+ page_set_storage_key(page_to_phys(page), PAGE_DEFAULT_KEY, mapped);
}
/*
@@ -1048,9 +1094,7 @@ static inline pmd_t *pmd_offset(pud_t *pud, unsigned long address)
#define pte_offset(pmd, addr) ((pte_t *) pmd_deref(*(pmd)) + pte_index(addr))
#define pte_offset_kernel(pmd, address) pte_offset(pmd,address)
#define pte_offset_map(pmd, address) pte_offset_kernel(pmd, address)
-#define pte_offset_map_nested(pmd, address) pte_offset_kernel(pmd, address)
#define pte_unmap(pte) do { } while (0)
-#define pte_unmap_nested(pte) do { } while (0)
/*
* 31 bit swap entry format:
diff --git a/arch/s390/include/asm/processor.h b/arch/s390/include/asm/processor.h
index 73e259834e10..8d6f87169577 100644
--- a/arch/s390/include/asm/processor.h
+++ b/arch/s390/include/asm/processor.h
@@ -82,8 +82,6 @@ struct thread_struct {
unsigned long prot_addr; /* address of protection-excep. */
unsigned int trap_no;
per_struct per_info;
- /* Used to give failing instruction back to user for ieee exceptions */
- unsigned long ieee_instruction_pointer;
/* pfault_wait is used to block the process on a pfault event */
unsigned long pfault_wait;
};
diff --git a/arch/s390/include/asm/ptrace.h b/arch/s390/include/asm/ptrace.h
index e2c218dc68a6..d9d42b1e46fa 100644
--- a/arch/s390/include/asm/ptrace.h
+++ b/arch/s390/include/asm/ptrace.h
@@ -481,8 +481,7 @@ struct user_regs_struct
* watchpoints. This is the way intel does it.
*/
per_struct per_info;
- unsigned long ieee_instruction_pointer;
- /* Used to give failing instruction back to user for ieee exceptions */
+ unsigned long ieee_instruction_pointer; /* obsolete, always 0 */
};
#ifdef __KERNEL__
diff --git a/arch/s390/include/asm/s390_ext.h b/arch/s390/include/asm/s390_ext.h
index 2afc060266a2..1a9307e70842 100644
--- a/arch/s390/include/asm/s390_ext.h
+++ b/arch/s390/include/asm/s390_ext.h
@@ -12,7 +12,7 @@
#include <linux/types.h>
-typedef void (*ext_int_handler_t)(__u16 code);
+typedef void (*ext_int_handler_t)(unsigned int, unsigned int, unsigned long);
typedef struct ext_int_info_t {
struct ext_int_info_t *next;
diff --git a/arch/s390/include/asm/scatterlist.h b/arch/s390/include/asm/scatterlist.h
index 35d786fe93ae..6d45ef6c12a7 100644
--- a/arch/s390/include/asm/scatterlist.h
+++ b/arch/s390/include/asm/scatterlist.h
@@ -1 +1,3 @@
#include <asm-generic/scatterlist.h>
+
+#define ARCH_HAS_SG_CHAIN
diff --git a/arch/s390/include/asm/setup.h b/arch/s390/include/asm/setup.h
index 25e831d58e1e..d5e2ef10537d 100644
--- a/arch/s390/include/asm/setup.h
+++ b/arch/s390/include/asm/setup.h
@@ -73,6 +73,7 @@ extern unsigned int user_mode;
#define MACHINE_FLAG_PFMF (1UL << 11)
#define MACHINE_FLAG_LPAR (1UL << 12)
#define MACHINE_FLAG_SPP (1UL << 13)
+#define MACHINE_FLAG_TOPOLOGY (1UL << 14)
#define MACHINE_IS_VM (S390_lowcore.machine_flags & MACHINE_FLAG_VM)
#define MACHINE_IS_KVM (S390_lowcore.machine_flags & MACHINE_FLAG_KVM)
@@ -90,6 +91,7 @@ extern unsigned int user_mode;
#define MACHINE_HAS_HPAGE (0)
#define MACHINE_HAS_PFMF (0)
#define MACHINE_HAS_SPP (0)
+#define MACHINE_HAS_TOPOLOGY (0)
#else /* __s390x__ */
#define MACHINE_HAS_IEEE (1)
#define MACHINE_HAS_CSP (1)
@@ -100,6 +102,7 @@ extern unsigned int user_mode;
#define MACHINE_HAS_HPAGE (S390_lowcore.machine_flags & MACHINE_FLAG_HPAGE)
#define MACHINE_HAS_PFMF (S390_lowcore.machine_flags & MACHINE_FLAG_PFMF)
#define MACHINE_HAS_SPP (S390_lowcore.machine_flags & MACHINE_FLAG_SPP)
+#define MACHINE_HAS_TOPOLOGY (S390_lowcore.machine_flags & MACHINE_FLAG_TOPOLOGY)
#endif /* __s390x__ */
#define ZFCPDUMP_HSA_SIZE (32UL<<20)
diff --git a/arch/s390/include/asm/syscall.h b/arch/s390/include/asm/syscall.h
index 8429686951f9..5c0246b955d8 100644
--- a/arch/s390/include/asm/syscall.h
+++ b/arch/s390/include/asm/syscall.h
@@ -65,8 +65,6 @@ static inline void syscall_get_arguments(struct task_struct *task,
if (test_tsk_thread_flag(task, TIF_31BIT))
mask = 0xffffffff;
#endif
- if (i + n == 6)
- args[--n] = regs->args[0] & mask;
while (n-- > 0)
if (i + n > 0)
args[n] = regs->gprs[2 + i + n] & mask;
@@ -80,8 +78,6 @@ static inline void syscall_set_arguments(struct task_struct *task,
const unsigned long *args)
{
BUG_ON(i + n > 6);
- if (i + n == 6)
- regs->args[0] = args[--n];
while (n-- > 0)
if (i + n > 0)
regs->gprs[2 + i + n] = args[n];
diff --git a/arch/s390/include/asm/sysinfo.h b/arch/s390/include/asm/sysinfo.h
index 22bdb2a0ee5f..79d3d6e2e9c5 100644
--- a/arch/s390/include/asm/sysinfo.h
+++ b/arch/s390/include/asm/sysinfo.h
@@ -14,8 +14,13 @@
#ifndef __ASM_S390_SYSINFO_H
#define __ASM_S390_SYSINFO_H
+#include <asm/bitsperlong.h>
+
struct sysinfo_1_1_1 {
- char reserved_0[32];
+ unsigned short :16;
+ unsigned char ccr;
+ unsigned char cai;
+ char reserved_0[28];
char manufacturer[16];
char type[4];
char reserved_1[12];
@@ -104,6 +109,39 @@ struct sysinfo_3_2_2 {
char reserved_544[3552];
};
+#define TOPOLOGY_CPU_BITS 64
+#define TOPOLOGY_NR_MAG 6
+
+struct topology_cpu {
+ unsigned char reserved0[4];
+ unsigned char :6;
+ unsigned char pp:2;
+ unsigned char reserved1;
+ unsigned short origin;
+ unsigned long mask[TOPOLOGY_CPU_BITS / BITS_PER_LONG];
+};
+
+struct topology_container {
+ unsigned char reserved[7];
+ unsigned char id;
+};
+
+union topology_entry {
+ unsigned char nl;
+ struct topology_cpu cpu;
+ struct topology_container container;
+};
+
+struct sysinfo_15_1_x {
+ unsigned char reserved0[2];
+ unsigned short length;
+ unsigned char mag[TOPOLOGY_NR_MAG];
+ unsigned char reserved1;
+ unsigned char mnest;
+ unsigned char reserved2[4];
+ union topology_entry tle[0];
+};
+
static inline int stsi(void *sysinfo, int fc, int sel1, int sel2)
{
register int r0 asm("0") = (fc << 28) | sel1;
diff --git a/arch/s390/include/asm/system.h b/arch/s390/include/asm/system.h
index 1f2ebc4afd82..3ad16dbf622e 100644
--- a/arch/s390/include/asm/system.h
+++ b/arch/s390/include/asm/system.h
@@ -85,14 +85,16 @@ static inline void restore_access_regs(unsigned int *acrs)
asm volatile("lam 0,15,%0" : : "Q" (*acrs));
}
-#define switch_to(prev,next,last) do { \
- if (prev == next) \
- break; \
- save_fp_regs(&prev->thread.fp_regs); \
- restore_fp_regs(&next->thread.fp_regs); \
- save_access_regs(&prev->thread.acrs[0]); \
- restore_access_regs(&next->thread.acrs[0]); \
- prev = __switch_to(prev,next); \
+#define switch_to(prev,next,last) do { \
+ if (prev->mm) { \
+ save_fp_regs(&prev->thread.fp_regs); \
+ save_access_regs(&prev->thread.acrs[0]); \
+ } \
+ if (next->mm) { \
+ restore_fp_regs(&next->thread.fp_regs); \
+ restore_access_regs(&next->thread.acrs[0]); \
+ } \
+ prev = __switch_to(prev,next); \
} while (0)
extern void account_vtime(struct task_struct *, struct task_struct *);
@@ -418,30 +420,21 @@ extern void smp_ctl_clear_bit(int cr, int bit);
#endif /* CONFIG_SMP */
-static inline unsigned int stfl(void)
-{
- asm volatile(
- " .insn s,0xb2b10000,0(0)\n" /* stfl */
- "0:\n"
- EX_TABLE(0b,0b));
- return S390_lowcore.stfl_fac_list;
-}
+#define MAX_FACILITY_BIT (256*8) /* stfle_fac_list has 256 bytes */
-static inline int __stfle(unsigned long long *list, int doublewords)
+/*
+ * The test_facility function uses the bit odering where the MSB is bit 0.
+ * That makes it easier to query facility bits with the bit number as
+ * documented in the Principles of Operation.
+ */
+static inline int test_facility(unsigned long nr)
{
- typedef struct { unsigned long long _[doublewords]; } addrtype;
- register unsigned long __nr asm("0") = doublewords - 1;
-
- asm volatile(".insn s,0xb2b00000,%0" /* stfle */
- : "=m" (*(addrtype *) list), "+d" (__nr) : : "cc");
- return __nr + 1;
-}
+ unsigned char *ptr;
-static inline int stfle(unsigned long long *list, int doublewords)
-{
- if (!(stfl() & (1UL << 24)))
- return -EOPNOTSUPP;
- return __stfle(list, doublewords);
+ if (nr >= MAX_FACILITY_BIT)
+ return 0;
+ ptr = (unsigned char *) &S390_lowcore.stfle_fac_list + (nr >> 3);
+ return (*ptr & (0x80 >> (nr & 7))) != 0;
}
static inline unsigned short stap(void)
diff --git a/arch/s390/include/asm/tlb.h b/arch/s390/include/asm/tlb.h
index fd1c00d08bf5..f1f644f2240a 100644
--- a/arch/s390/include/asm/tlb.h
+++ b/arch/s390/include/asm/tlb.h
@@ -64,10 +64,9 @@ static inline void tlb_flush_mmu(struct mmu_gather *tlb,
if (!tlb->fullmm && (tlb->nr_ptes > 0 || tlb->nr_pxds < TLB_NR_PTRS))
__tlb_flush_mm(tlb->mm);
while (tlb->nr_ptes > 0)
- pte_free(tlb->mm, tlb->array[--tlb->nr_ptes]);
+ page_table_free_rcu(tlb->mm, tlb->array[--tlb->nr_ptes]);
while (tlb->nr_pxds < TLB_NR_PTRS)
- /* pgd_free frees the pointer as region or segment table */
- pgd_free(tlb->mm, tlb->array[tlb->nr_pxds++]);
+ crst_table_free_rcu(tlb->mm, tlb->array[tlb->nr_pxds++]);
}
static inline void tlb_finish_mmu(struct mmu_gather *tlb,
@@ -75,6 +74,8 @@ static inline void tlb_finish_mmu(struct mmu_gather *tlb,
{
tlb_flush_mmu(tlb, start, end);
+ rcu_table_freelist_finish();
+
/* keep the page table cache within bounds */
check_pgt_cache();
@@ -103,7 +104,7 @@ static inline void pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte,
if (tlb->nr_ptes >= tlb->nr_pxds)
tlb_flush_mmu(tlb, 0, 0);
} else
- pte_free(tlb->mm, pte);
+ page_table_free(tlb->mm, (unsigned long *) pte);
}
/*
@@ -124,7 +125,7 @@ static inline void pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd,
if (tlb->nr_ptes >= tlb->nr_pxds)
tlb_flush_mmu(tlb, 0, 0);
} else
- pmd_free(tlb->mm, pmd);
+ crst_table_free(tlb->mm, (unsigned long *) pmd);
#endif
}
@@ -146,7 +147,7 @@ static inline void pud_free_tlb(struct mmu_gather *tlb, pud_t *pud,
if (tlb->nr_ptes >= tlb->nr_pxds)
tlb_flush_mmu(tlb, 0, 0);
} else
- pud_free(tlb->mm, pud);
+ crst_table_free(tlb->mm, (unsigned long *) pud);
#endif
}
diff --git a/arch/s390/include/asm/topology.h b/arch/s390/include/asm/topology.h
index 051107a2c5e2..c5338834ddbd 100644
--- a/arch/s390/include/asm/topology.h
+++ b/arch/s390/include/asm/topology.h
@@ -2,6 +2,7 @@
#define _ASM_S390_TOPOLOGY_H
#include <linux/cpumask.h>
+#include <asm/sysinfo.h>
extern unsigned char cpu_core_id[NR_CPUS];
extern cpumask_t cpu_core_map[NR_CPUS];
@@ -32,6 +33,7 @@ static inline const struct cpumask *cpu_book_mask(unsigned int cpu)
int topology_set_cpu_management(int fc);
void topology_schedule_update(void);
+void store_topology(struct sysinfo_15_1_x *info);
#define POLARIZATION_UNKNWN (-1)
#define POLARIZATION_HRZ (0)
diff --git a/arch/s390/kernel/asm-offsets.c b/arch/s390/kernel/asm-offsets.c
index 5232278d79ad..33982e7ce04d 100644
--- a/arch/s390/kernel/asm-offsets.c
+++ b/arch/s390/kernel/asm-offsets.c
@@ -66,9 +66,9 @@ int main(void)
DEFINE(__VDSO_ECTG_BASE, offsetof(struct vdso_per_cpu_data, ectg_timer_base));
DEFINE(__VDSO_ECTG_USER, offsetof(struct vdso_per_cpu_data, ectg_user_time));
/* constants used by the vdso */
- DEFINE(CLOCK_REALTIME, CLOCK_REALTIME);
- DEFINE(CLOCK_MONOTONIC, CLOCK_MONOTONIC);
- DEFINE(CLOCK_REALTIME_RES, MONOTONIC_RES_NSEC);
+ DEFINE(__CLOCK_REALTIME, CLOCK_REALTIME);
+ DEFINE(__CLOCK_MONOTONIC, CLOCK_MONOTONIC);
+ DEFINE(__CLOCK_REALTIME_RES, MONOTONIC_RES_NSEC);
BLANK();
/* constants for SIGP */
DEFINE(__SIGP_STOP, sigp_stop);
@@ -84,6 +84,7 @@ int main(void)
DEFINE(__LC_SVC_INT_CODE, offsetof(struct _lowcore, svc_code));
DEFINE(__LC_PGM_ILC, offsetof(struct _lowcore, pgm_ilc));
DEFINE(__LC_PGM_INT_CODE, offsetof(struct _lowcore, pgm_code));
+ DEFINE(__LC_TRANS_EXC_CODE, offsetof(struct _lowcore, trans_exc_code));
DEFINE(__LC_PER_ATMID, offsetof(struct _lowcore, per_perc_atmid));
DEFINE(__LC_PER_ADDRESS, offsetof(struct _lowcore, per_address));
DEFINE(__LC_PER_ACCESS_ID, offsetof(struct _lowcore, per_access_id));
@@ -142,10 +143,8 @@ int main(void)
DEFINE(__LC_GPREGS_SAVE_AREA, offsetof(struct _lowcore, gpregs_save_area));
DEFINE(__LC_CREGS_SAVE_AREA, offsetof(struct _lowcore, cregs_save_area));
#ifdef CONFIG_32BIT
- DEFINE(__LC_PFAULT_INTPARM, offsetof(struct _lowcore, ext_params));
DEFINE(SAVE_AREA_BASE, offsetof(struct _lowcore, extended_save_area_addr));
#else /* CONFIG_32BIT */
- DEFINE(__LC_PFAULT_INTPARM, offsetof(struct _lowcore, ext_params2));
DEFINE(__LC_EXT_PARAMS2, offsetof(struct _lowcore, ext_params2));
DEFINE(SAVE_AREA_BASE, offsetof(struct _lowcore, floating_pt_save_area));
DEFINE(__LC_PASTE, offsetof(struct _lowcore, paste));
diff --git a/arch/s390/kernel/compat_linux.c b/arch/s390/kernel/compat_linux.c
index 1e6449c79ab6..53acaa86dd94 100644
--- a/arch/s390/kernel/compat_linux.c
+++ b/arch/s390/kernel/compat_linux.c
@@ -25,7 +25,6 @@
#include <linux/resource.h>
#include <linux/times.h>
#include <linux/smp.h>
-#include <linux/smp_lock.h>
#include <linux/sem.h>
#include <linux/msg.h>
#include <linux/shm.h>
diff --git a/arch/s390/kernel/compat_ptrace.h b/arch/s390/kernel/compat_ptrace.h
index 123dd660d7fb..3141025724f4 100644
--- a/arch/s390/kernel/compat_ptrace.h
+++ b/arch/s390/kernel/compat_ptrace.h
@@ -51,8 +51,7 @@ struct user_regs_struct32
* watchpoints. This is the way intel does it.
*/
per_struct32 per_info;
- u32 ieee_instruction_pointer;
- /* Used to give failing instruction back to user for ieee exceptions */
+ u32 ieee_instruction_pointer; /* obsolete, always 0 */
};
struct user32 {
diff --git a/arch/s390/kernel/dis.c b/arch/s390/kernel/dis.c
index b39b27d68b45..c83726c9fe03 100644
--- a/arch/s390/kernel/dis.c
+++ b/arch/s390/kernel/dis.c
@@ -113,7 +113,7 @@ enum {
INSTR_INVALID,
INSTR_E,
INSTR_RIE_R0IU, INSTR_RIE_R0UU, INSTR_RIE_RRP, INSTR_RIE_RRPU,
- INSTR_RIE_RRUUU, INSTR_RIE_RUPI, INSTR_RIE_RUPU,
+ INSTR_RIE_RRUUU, INSTR_RIE_RUPI, INSTR_RIE_RUPU, INSTR_RIE_RRI0,
INSTR_RIL_RI, INSTR_RIL_RP, INSTR_RIL_RU, INSTR_RIL_UP,
INSTR_RIS_R0RDU, INSTR_RIS_R0UU, INSTR_RIS_RURDI, INSTR_RIS_RURDU,
INSTR_RI_RI, INSTR_RI_RP, INSTR_RI_RU, INSTR_RI_UP,
@@ -122,13 +122,14 @@ enum {
INSTR_RRE_RR, INSTR_RRE_RR_OPT,
INSTR_RRF_0UFF, INSTR_RRF_F0FF, INSTR_RRF_F0FF2, INSTR_RRF_F0FR,
INSTR_RRF_FFRU, INSTR_RRF_FUFF, INSTR_RRF_M0RR, INSTR_RRF_R0RR,
- INSTR_RRF_RURR, INSTR_RRF_U0FF, INSTR_RRF_U0RF, INSTR_RRF_U0RR,
- INSTR_RRF_UUFF, INSTR_RRR_F0FF, INSTR_RRS_RRRDU,
+ INSTR_RRF_R0RR2, INSTR_RRF_RURR, INSTR_RRF_U0FF, INSTR_RRF_U0RF,
+ INSTR_RRF_U0RR, INSTR_RRF_UUFF, INSTR_RRR_F0FF, INSTR_RRS_RRRDU,
INSTR_RR_FF, INSTR_RR_R0, INSTR_RR_RR, INSTR_RR_U0, INSTR_RR_UR,
INSTR_RSE_CCRD, INSTR_RSE_RRRD, INSTR_RSE_RURD,
INSTR_RSI_RRP,
INSTR_RSL_R0RD,
INSTR_RSY_AARD, INSTR_RSY_CCRD, INSTR_RSY_RRRD, INSTR_RSY_RURD,
+ INSTR_RSY_RDRM,
INSTR_RS_AARD, INSTR_RS_CCRD, INSTR_RS_R0RD, INSTR_RS_RRRD,
INSTR_RS_RURD,
INSTR_RXE_FRRD, INSTR_RXE_RRRD,
@@ -139,7 +140,7 @@ enum {
INSTR_SIY_IRD, INSTR_SIY_URD,
INSTR_SI_URD,
INSTR_SSE_RDRD,
- INSTR_SSF_RRDRD,
+ INSTR_SSF_RRDRD, INSTR_SSF_RRDRD2,
INSTR_SS_L0RDRD, INSTR_SS_LIRDRD, INSTR_SS_LLRDRD, INSTR_SS_RRRDRD,
INSTR_SS_RRRDRD2, INSTR_SS_RRRDRD3,
INSTR_S_00, INSTR_S_RD,
@@ -152,7 +153,7 @@ struct operand {
};
struct insn {
- const char name[6];
+ const char name[5];
unsigned char opfrag;
unsigned char format;
};
@@ -217,6 +218,7 @@ static const unsigned char formats[][7] = {
[INSTR_RIE_RRP] = { 0xff, R_8,R_12,J16_16,0,0,0 },
[INSTR_RIE_RRUUU] = { 0xff, R_8,R_12,U8_16,U8_24,U8_32,0 },
[INSTR_RIE_RUPI] = { 0xff, R_8,I8_32,U4_12,J16_16,0,0 },
+ [INSTR_RIE_RRI0] = { 0xff, R_8,R_12,I16_16,0,0,0 },
[INSTR_RIL_RI] = { 0x0f, R_8,I32_16,0,0,0,0 },
[INSTR_RIL_RP] = { 0x0f, R_8,J32_16,0,0,0,0 },
[INSTR_RIL_RU] = { 0x0f, R_8,U32_16,0,0,0,0 },
@@ -248,6 +250,7 @@ static const unsigned char formats[][7] = {
[INSTR_RRF_FUFF] = { 0xff, F_24,F_16,F_28,U4_20,0,0 },
[INSTR_RRF_M0RR] = { 0xff, R_24,R_28,M_16,0,0,0 },
[INSTR_RRF_R0RR] = { 0xff, R_24,R_16,R_28,0,0,0 },
+ [INSTR_RRF_R0RR2] = { 0xff, R_24,R_28,R_16,0,0,0 },
[INSTR_RRF_RURR] = { 0xff, R_24,R_28,R_16,U4_20,0,0 },
[INSTR_RRF_U0FF] = { 0xff, F_24,U4_16,F_28,0,0,0 },
[INSTR_RRF_U0RF] = { 0xff, R_24,U4_16,F_28,0,0,0 },
@@ -269,6 +272,7 @@ static const unsigned char formats[][7] = {
[INSTR_RSY_CCRD] = { 0xff, C_8,C_12,D20_20,B_16,0,0 },
[INSTR_RSY_RRRD] = { 0xff, R_8,R_12,D20_20,B_16,0,0 },
[INSTR_RSY_RURD] = { 0xff, R_8,U4_12,D20_20,B_16,0,0 },
+ [INSTR_RSY_RDRM] = { 0xff, R_8,D20_20,B_16,U4_12,0,0 },
[INSTR_RS_AARD] = { 0xff, A_8,A_12,D_20,B_16,0,0 },
[INSTR_RS_CCRD] = { 0xff, C_8,C_12,D_20,B_16,0,0 },
[INSTR_RS_R0RD] = { 0xff, R_8,D_20,B_16,0,0,0 },
@@ -290,6 +294,7 @@ static const unsigned char formats[][7] = {
[INSTR_SI_URD] = { 0xff, D_20,B_16,U8_8,0,0,0 },
[INSTR_SSE_RDRD] = { 0xff, D_20,B_16,D_36,B_32,0,0 },
[INSTR_SSF_RRDRD] = { 0x00, D_20,B_16,D_36,B_32,R_8,0 },
+ [INSTR_SSF_RRDRD2]= { 0x00, R_8,D_20,B_16,D_36,B_32,0 },
[INSTR_SS_L0RDRD] = { 0xff, D_20,L8_8,B_16,D_36,B_32,0 },
[INSTR_SS_LIRDRD] = { 0xff, D_20,L4_8,B_16,D_36,B_32,U4_12 },
[INSTR_SS_LLRDRD] = { 0xff, D_20,L4_8,B_16,D_36,L4_12,B_32 },
@@ -300,6 +305,36 @@ static const unsigned char formats[][7] = {
[INSTR_S_RD] = { 0xff, D_20,B_16,0,0,0,0 },
};
+enum {
+ LONG_INSN_ALGHSIK,
+ LONG_INSN_ALHSIK,
+ LONG_INSN_CLFHSI,
+ LONG_INSN_CLGFRL,
+ LONG_INSN_CLGHRL,
+ LONG_INSN_CLGHSI,
+ LONG_INSN_CLHHSI,
+ LONG_INSN_LLGFRL,
+ LONG_INSN_LLGHRL,
+ LONG_INSN_POPCNT,
+ LONG_INSN_RISBHG,
+ LONG_INSN_RISBLG,
+};
+
+static char *long_insn_name[] = {
+ [LONG_INSN_ALGHSIK] = "alghsik",
+ [LONG_INSN_ALHSIK] = "alhsik",
+ [LONG_INSN_CLFHSI] = "clfhsi",
+ [LONG_INSN_CLGFRL] = "clgfrl",
+ [LONG_INSN_CLGHRL] = "clghrl",
+ [LONG_INSN_CLGHSI] = "clghsi",
+ [LONG_INSN_CLHHSI] = "clhhsi",
+ [LONG_INSN_LLGFRL] = "llgfrl",
+ [LONG_INSN_LLGHRL] = "llghrl",
+ [LONG_INSN_POPCNT] = "popcnt",
+ [LONG_INSN_RISBHG] = "risbhg",
+ [LONG_INSN_RISBLG] = "risblk",
+};
+
static struct insn opcode[] = {
#ifdef CONFIG_64BIT
{ "lmd", 0xef, INSTR_SS_RRRDRD3 },
@@ -881,6 +916,35 @@ static struct insn opcode_b9[] = {
{ "pfmf", 0xaf, INSTR_RRE_RR },
{ "trte", 0xbf, INSTR_RRF_M0RR },
{ "trtre", 0xbd, INSTR_RRF_M0RR },
+ { "ahhhr", 0xc8, INSTR_RRF_R0RR2 },
+ { "shhhr", 0xc9, INSTR_RRF_R0RR2 },
+ { "alhhh", 0xca, INSTR_RRF_R0RR2 },
+ { "alhhl", 0xca, INSTR_RRF_R0RR2 },
+ { "slhhh", 0xcb, INSTR_RRF_R0RR2 },
+ { "chhr ", 0xcd, INSTR_RRE_RR },
+ { "clhhr", 0xcf, INSTR_RRE_RR },
+ { "ahhlr", 0xd8, INSTR_RRF_R0RR2 },
+ { "shhlr", 0xd9, INSTR_RRF_R0RR2 },
+ { "slhhl", 0xdb, INSTR_RRF_R0RR2 },
+ { "chlr", 0xdd, INSTR_RRE_RR },
+ { "clhlr", 0xdf, INSTR_RRE_RR },
+ { { 0, LONG_INSN_POPCNT }, 0xe1, INSTR_RRE_RR },
+ { "locgr", 0xe2, INSTR_RRF_M0RR },
+ { "ngrk", 0xe4, INSTR_RRF_R0RR2 },
+ { "ogrk", 0xe6, INSTR_RRF_R0RR2 },
+ { "xgrk", 0xe7, INSTR_RRF_R0RR2 },
+ { "agrk", 0xe8, INSTR_RRF_R0RR2 },
+ { "sgrk", 0xe9, INSTR_RRF_R0RR2 },
+ { "algrk", 0xea, INSTR_RRF_R0RR2 },
+ { "slgrk", 0xeb, INSTR_RRF_R0RR2 },
+ { "locr", 0xf2, INSTR_RRF_M0RR },
+ { "nrk", 0xf4, INSTR_RRF_R0RR2 },
+ { "ork", 0xf6, INSTR_RRF_R0RR2 },
+ { "xrk", 0xf7, INSTR_RRF_R0RR2 },
+ { "ark", 0xf8, INSTR_RRF_R0RR2 },
+ { "srk", 0xf9, INSTR_RRF_R0RR2 },
+ { "alrk", 0xfa, INSTR_RRF_R0RR2 },
+ { "slrk", 0xfb, INSTR_RRF_R0RR2 },
#endif
{ "kmac", 0x1e, INSTR_RRE_RR },
{ "lrvr", 0x1f, INSTR_RRE_RR },
@@ -949,9 +1013,9 @@ static struct insn opcode_c4[] = {
{ "lgfrl", 0x0c, INSTR_RIL_RP },
{ "lhrl", 0x05, INSTR_RIL_RP },
{ "lghrl", 0x04, INSTR_RIL_RP },
- { "llgfrl", 0x0e, INSTR_RIL_RP },
+ { { 0, LONG_INSN_LLGFRL }, 0x0e, INSTR_RIL_RP },
{ "llhrl", 0x02, INSTR_RIL_RP },
- { "llghrl", 0x06, INSTR_RIL_RP },
+ { { 0, LONG_INSN_LLGHRL }, 0x06, INSTR_RIL_RP },
{ "strl", 0x0f, INSTR_RIL_RP },
{ "stgrl", 0x0b, INSTR_RIL_RP },
{ "sthrl", 0x07, INSTR_RIL_RP },
@@ -968,9 +1032,9 @@ static struct insn opcode_c6[] = {
{ "cghrl", 0x04, INSTR_RIL_RP },
{ "clrl", 0x0f, INSTR_RIL_RP },
{ "clgrl", 0x0a, INSTR_RIL_RP },
- { "clgfrl", 0x0e, INSTR_RIL_RP },
+ { { 0, LONG_INSN_CLGFRL }, 0x0e, INSTR_RIL_RP },
{ "clhrl", 0x07, INSTR_RIL_RP },
- { "clghrl", 0x06, INSTR_RIL_RP },
+ { { 0, LONG_INSN_CLGHRL }, 0x06, INSTR_RIL_RP },
{ "pfdrl", 0x02, INSTR_RIL_UP },
{ "exrl", 0x00, INSTR_RIL_RP },
#endif
@@ -982,6 +1046,20 @@ static struct insn opcode_c8[] = {
{ "mvcos", 0x00, INSTR_SSF_RRDRD },
{ "ectg", 0x01, INSTR_SSF_RRDRD },
{ "csst", 0x02, INSTR_SSF_RRDRD },
+ { "lpd", 0x04, INSTR_SSF_RRDRD2 },
+ { "lpdg ", 0x05, INSTR_SSF_RRDRD2 },
+#endif
+ { "", 0, INSTR_INVALID }
+};
+
+static struct insn opcode_cc[] = {
+#ifdef CONFIG_64BIT
+ { "brcth", 0x06, INSTR_RIL_RP },
+ { "aih", 0x08, INSTR_RIL_RI },
+ { "alsih", 0x0a, INSTR_RIL_RI },
+ { "alsih", 0x0b, INSTR_RIL_RI },
+ { "cih", 0x0d, INSTR_RIL_RI },
+ { "clih ", 0x0f, INSTR_RIL_RI },
#endif
{ "", 0, INSTR_INVALID }
};
@@ -1063,6 +1141,16 @@ static struct insn opcode_e3[] = {
{ "mfy", 0x5c, INSTR_RXY_RRRD },
{ "mhy", 0x7c, INSTR_RXY_RRRD },
{ "pfd", 0x36, INSTR_RXY_URRD },
+ { "lbh", 0xc0, INSTR_RXY_RRRD },
+ { "llch", 0xc2, INSTR_RXY_RRRD },
+ { "stch", 0xc3, INSTR_RXY_RRRD },
+ { "lhh", 0xc4, INSTR_RXY_RRRD },
+ { "llhh", 0xc6, INSTR_RXY_RRRD },
+ { "sthh", 0xc7, INSTR_RXY_RRRD },
+ { "lfh", 0xca, INSTR_RXY_RRRD },
+ { "stfh", 0xcb, INSTR_RXY_RRRD },
+ { "chf", 0xcd, INSTR_RXY_RRRD },
+ { "clhf", 0xcf, INSTR_RXY_RRRD },
#endif
{ "lrv", 0x1e, INSTR_RXY_RRRD },
{ "lrvh", 0x1f, INSTR_RXY_RRRD },
@@ -1080,9 +1168,9 @@ static struct insn opcode_e5[] = {
{ "chhsi", 0x54, INSTR_SIL_RDI },
{ "chsi", 0x5c, INSTR_SIL_RDI },
{ "cghsi", 0x58, INSTR_SIL_RDI },
- { "clhhsi", 0x55, INSTR_SIL_RDU },
- { "clfhsi", 0x5d, INSTR_SIL_RDU },
- { "clghsi", 0x59, INSTR_SIL_RDU },
+ { { 0, LONG_INSN_CLHHSI }, 0x55, INSTR_SIL_RDU },
+ { { 0, LONG_INSN_CLFHSI }, 0x5d, INSTR_SIL_RDU },
+ { { 0, LONG_INSN_CLGHSI }, 0x59, INSTR_SIL_RDU },
{ "mvhhi", 0x44, INSTR_SIL_RDI },
{ "mvhi", 0x4c, INSTR_SIL_RDI },
{ "mvghi", 0x48, INSTR_SIL_RDI },
@@ -1137,6 +1225,24 @@ static struct insn opcode_eb[] = {
{ "alsi", 0x6e, INSTR_SIY_IRD },
{ "algsi", 0x7e, INSTR_SIY_IRD },
{ "ecag", 0x4c, INSTR_RSY_RRRD },
+ { "srak", 0xdc, INSTR_RSY_RRRD },
+ { "slak", 0xdd, INSTR_RSY_RRRD },
+ { "srlk", 0xde, INSTR_RSY_RRRD },
+ { "sllk", 0xdf, INSTR_RSY_RRRD },
+ { "locg", 0xe2, INSTR_RSY_RDRM },
+ { "stocg", 0xe3, INSTR_RSY_RDRM },
+ { "lang", 0xe4, INSTR_RSY_RRRD },
+ { "laog", 0xe6, INSTR_RSY_RRRD },
+ { "laxg", 0xe7, INSTR_RSY_RRRD },
+ { "laag", 0xe8, INSTR_RSY_RRRD },
+ { "laalg", 0xea, INSTR_RSY_RRRD },
+ { "loc", 0xf2, INSTR_RSY_RDRM },
+ { "stoc", 0xf3, INSTR_RSY_RDRM },
+ { "lan", 0xf4, INSTR_RSY_RRRD },
+ { "lao", 0xf6, INSTR_RSY_RRRD },
+ { "lax", 0xf7, INSTR_RSY_RRRD },
+ { "laa", 0xf8, INSTR_RSY_RRRD },
+ { "laal", 0xfa, INSTR_RSY_RRRD },
#endif
{ "rll", 0x1d, INSTR_RSY_RRRD },
{ "mvclu", 0x8e, INSTR_RSY_RRRD },
@@ -1172,6 +1278,12 @@ static struct insn opcode_ec[] = {
{ "rxsbg", 0x57, INSTR_RIE_RRUUU },
{ "rosbg", 0x56, INSTR_RIE_RRUUU },
{ "risbg", 0x55, INSTR_RIE_RRUUU },
+ { { 0, LONG_INSN_RISBLG }, 0x51, INSTR_RIE_RRUUU },
+ { { 0, LONG_INSN_RISBHG }, 0x5D, INSTR_RIE_RRUUU },
+ { "ahik", 0xd8, INSTR_RIE_RRI0 },
+ { "aghik", 0xd9, INSTR_RIE_RRI0 },
+ { { 0, LONG_INSN_ALHSIK }, 0xda, INSTR_RIE_RRI0 },
+ { { 0, LONG_INSN_ALGHSIK }, 0xdb, INSTR_RIE_RRI0 },
#endif
{ "", 0, INSTR_INVALID }
};
@@ -1321,6 +1433,9 @@ static struct insn *find_insn(unsigned char *code)
case 0xc8:
table = opcode_c8;
break;
+ case 0xcc:
+ table = opcode_cc;
+ break;
case 0xe3:
table = opcode_e3;
opfrag = code[5];
@@ -1367,7 +1482,11 @@ static int print_insn(char *buffer, unsigned char *code, unsigned long addr)
ptr = buffer;
insn = find_insn(code);
if (insn) {
- ptr += sprintf(ptr, "%.5s\t", insn->name);
+ if (insn->name[0] == '\0')
+ ptr += sprintf(ptr, "%s\t",
+ long_insn_name[(int) insn->name[1]]);
+ else
+ ptr += sprintf(ptr, "%.5s\t", insn->name);
/* Extract the operands. */
separator = 0;
for (ops = formats[insn->format] + 1, i = 0;
diff --git a/arch/s390/kernel/early.c b/arch/s390/kernel/early.c
index c00856ad4e5a..3b7e7dddc324 100644
--- a/arch/s390/kernel/early.c
+++ b/arch/s390/kernel/early.c
@@ -208,7 +208,8 @@ static noinline __init void init_kernel_storage_key(void)
end_pfn = PFN_UP(__pa(&_end));
for (init_pfn = 0 ; init_pfn < end_pfn; init_pfn++)
- page_set_storage_key(init_pfn << PAGE_SHIFT, PAGE_DEFAULT_KEY);
+ page_set_storage_key(init_pfn << PAGE_SHIFT,
+ PAGE_DEFAULT_KEY, 0);
}
static __initdata struct sysinfo_3_2_2 vmms __aligned(PAGE_SIZE);
@@ -255,13 +256,33 @@ static noinline __init void setup_lowcore_early(void)
s390_base_pgm_handler_fn = early_pgm_check_handler;
}
+static noinline __init void setup_facility_list(void)
+{
+ unsigned long nr;
+
+ S390_lowcore.stfl_fac_list = 0;
+ asm volatile(
+ " .insn s,0xb2b10000,0(0)\n" /* stfl */
+ "0:\n"
+ EX_TABLE(0b,0b) : "=m" (S390_lowcore.stfl_fac_list));
+ memcpy(&S390_lowcore.stfle_fac_list, &S390_lowcore.stfl_fac_list, 4);
+ nr = 4; /* # bytes stored by stfl */
+ if (test_facility(7)) {
+ /* More facility bits available with stfle */
+ register unsigned long reg0 asm("0") = MAX_FACILITY_BIT/64 - 1;
+ asm volatile(".insn s,0xb2b00000,%0" /* stfle */
+ : "=m" (S390_lowcore.stfle_fac_list), "+d" (reg0)
+ : : "cc");
+ nr = (reg0 + 1) * 8; /* # bytes stored by stfle */
+ }
+ memset((char *) S390_lowcore.stfle_fac_list + nr, 0,
+ MAX_FACILITY_BIT/8 - nr);
+}
+
static noinline __init void setup_hpage(void)
{
#ifndef CONFIG_DEBUG_PAGEALLOC
- unsigned int facilities;
-
- facilities = stfl();
- if (!(facilities & (1UL << 23)) || !(facilities & (1UL << 29)))
+ if (!test_facility(2) || !test_facility(8))
return;
S390_lowcore.machine_flags |= MACHINE_FLAG_HPAGE;
__ctl_set_bit(0, 23);
@@ -355,18 +376,15 @@ static __init void detect_diag44(void)
static __init void detect_machine_facilities(void)
{
#ifdef CONFIG_64BIT
- unsigned int facilities;
- unsigned long long facility_bits;
-
- facilities = stfl();
- if (facilities & (1 << 28))
+ if (test_facility(3))
S390_lowcore.machine_flags |= MACHINE_FLAG_IDTE;
- if (facilities & (1 << 23))
+ if (test_facility(8))
S390_lowcore.machine_flags |= MACHINE_FLAG_PFMF;
- if (facilities & (1 << 4))
+ if (test_facility(11))
+ S390_lowcore.machine_flags |= MACHINE_FLAG_TOPOLOGY;
+ if (test_facility(27))
S390_lowcore.machine_flags |= MACHINE_FLAG_MVCOS;
- if ((stfle(&facility_bits, 1) > 0) &&
- (facility_bits & (1ULL << (63 - 40))))
+ if (test_facility(40))
S390_lowcore.machine_flags |= MACHINE_FLAG_SPP;
#endif
}
@@ -447,6 +465,7 @@ void __init startup_init(void)
lockdep_off();
sort_main_extable();
setup_lowcore_early();
+ setup_facility_list();
detect_machine_type();
ipl_update_parameters();
setup_boot_command_line();
diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S
index bea9ee37ac9d..1ecc337fb679 100644
--- a/arch/s390/kernel/entry.S
+++ b/arch/s390/kernel/entry.S
@@ -72,25 +72,9 @@ STACK_SIZE = 1 << STACK_SHIFT
l %r1,BASED(.Ltrace_irq_off_caller)
basr %r14,%r1
.endm
-
- .macro TRACE_IRQS_CHECK_ON
- tm SP_PSW(%r15),0x03 # irqs enabled?
- bz BASED(0f)
- TRACE_IRQS_ON
-0:
- .endm
-
- .macro TRACE_IRQS_CHECK_OFF
- tm SP_PSW(%r15),0x03 # irqs enabled?
- bz BASED(0f)
- TRACE_IRQS_OFF
-0:
- .endm
#else
#define TRACE_IRQS_ON
#define TRACE_IRQS_OFF
-#define TRACE_IRQS_CHECK_ON
-#define TRACE_IRQS_CHECK_OFF
#endif
#ifdef CONFIG_LOCKDEP
@@ -198,6 +182,12 @@ STACK_SIZE = 1 << STACK_SHIFT
lpsw \psworg # back to caller
.endm
+ .macro REENABLE_IRQS
+ mvc __SF_EMPTY(1,%r15),SP_PSW(%r15)
+ ni __SF_EMPTY(%r15),0xbf
+ ssm __SF_EMPTY(%r15)
+ .endm
+
/*
* Scheduler resume function, called by switch_to
* gpr2 = (task_struct *) prev
@@ -264,12 +254,11 @@ sysc_do_svc:
bnl BASED(sysc_nr_ok)
lr %r7,%r1 # copy svc number to %r7
sysc_nr_ok:
- mvc SP_ARGS(4,%r15),SP_R7(%r15)
-sysc_do_restart:
sth %r7,SP_SVCNR(%r15)
sll %r7,2 # svc number *4
l %r8,BASED(.Lsysc_table)
tm __TI_flags+2(%r9),_TIF_SYSCALL
+ mvc SP_ARGS(4,%r15),SP_R7(%r15)
l %r8,0(%r7,%r8) # get system call addr.
bnz BASED(sysc_tracesys)
basr %r14,%r8 # call sys_xxxx
@@ -357,7 +346,7 @@ sysc_restart:
l %r7,SP_R2(%r15) # load new svc number
mvc SP_R2(4,%r15),SP_ORIG_R2(%r15) # restore first argument
lm %r2,%r6,SP_R2(%r15) # load svc arguments
- b BASED(sysc_do_restart) # restart svc
+ b BASED(sysc_nr_ok) # restart svc
#
# _TIF_SINGLE_STEP is set, call do_single_step
@@ -390,6 +379,7 @@ sysc_tracesys:
l %r8,0(%r7,%r8)
sysc_tracego:
lm %r3,%r6,SP_R3(%r15)
+ mvc SP_ARGS(4,%r15),SP_R7(%r15)
l %r2,SP_ORIG_R2(%r15)
basr %r14,%r8 # call sys_xxx
st %r2,SP_R2(%r15) # store return value
@@ -440,13 +430,11 @@ kernel_execve:
br %r14
# execve succeeded.
0: stnsm __SF_EMPTY(%r15),0xfc # disable interrupts
- TRACE_IRQS_OFF
l %r15,__LC_KERNEL_STACK # load ksp
s %r15,BASED(.Lc_spsize) # make room for registers & psw
l %r9,__LC_THREAD_INFO
mvc SP_PTREGS(__PT_SIZE,%r15),0(%r12) # copy pt_regs
xc __SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15)
- TRACE_IRQS_ON
stosm __SF_EMPTY(%r15),0x03 # reenable interrupts
l %r1,BASED(.Lexecve_tail)
basr %r14,%r1
@@ -483,9 +471,10 @@ pgm_check_handler:
UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER
mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER
pgm_no_vtime:
- TRACE_IRQS_CHECK_OFF
l %r9,__LC_THREAD_INFO # load pointer to thread_info struct
l %r3,__LC_PGM_ILC # load program interruption code
+ l %r4,__LC_TRANS_EXC_CODE
+ REENABLE_IRQS
la %r8,0x7f
nr %r8,%r3
pgm_do_call:
@@ -495,7 +484,6 @@ pgm_do_call:
la %r2,SP_PTREGS(%r15) # address of register-save area
basr %r14,%r7 # branch to interrupt-handler
pgm_exit:
- TRACE_IRQS_CHECK_ON
b BASED(sysc_return)
#
@@ -523,7 +511,6 @@ pgm_per_std:
UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER
mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER
pgm_no_vtime2:
- TRACE_IRQS_CHECK_OFF
l %r9,__LC_THREAD_INFO # load pointer to thread_info struct
l %r1,__TI_task(%r9)
tm SP_PSW+1(%r15),0x01 # kernel per event ?
@@ -533,6 +520,8 @@ pgm_no_vtime2:
mvc __THREAD_per+__PER_access_id(1,%r1),__LC_PER_ACCESS_ID
oi __TI_flags+3(%r9),_TIF_SINGLE_STEP # set TIF_SINGLE_STEP
l %r3,__LC_PGM_ILC # load program interruption code
+ l %r4,__LC_TRANS_EXC_CODE
+ REENABLE_IRQS
la %r8,0x7f
nr %r8,%r3 # clear per-event-bit and ilc
be BASED(pgm_exit2) # only per or per+check ?
@@ -542,8 +531,6 @@ pgm_no_vtime2:
la %r2,SP_PTREGS(%r15) # address of register-save area
basr %r14,%r7 # branch to interrupt-handler
pgm_exit2:
- TRACE_IRQS_ON
- stosm __SF_EMPTY(%r15),0x03 # reenable interrupts
b BASED(sysc_return)
#
@@ -557,13 +544,11 @@ pgm_svcper:
mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER
lh %r7,0x8a # get svc number from lowcore
l %r9,__LC_THREAD_INFO # load pointer to thread_info struct
- TRACE_IRQS_OFF
l %r8,__TI_task(%r9)
mvc __THREAD_per+__PER_atmid(2,%r8),__LC_PER_ATMID
mvc __THREAD_per+__PER_address(4,%r8),__LC_PER_ADDRESS
mvc __THREAD_per+__PER_access_id(1,%r8),__LC_PER_ACCESS_ID
oi __TI_flags+3(%r9),_TIF_SINGLE_STEP # set TIF_SINGLE_STEP
- TRACE_IRQS_ON
stosm __SF_EMPTY(%r15),0x03 # reenable interrupts
lm %r2,%r6,SP_R2(%r15) # load svc arguments
b BASED(sysc_do_svc)
@@ -572,6 +557,7 @@ pgm_svcper:
# per was called from kernel, must be kprobes
#
kernel_per:
+ REENABLE_IRQS
mvi SP_SVCNR(%r15),0xff # set trap indication to pgm check
mvi SP_SVCNR+1(%r15),0xff
la %r2,SP_PTREGS(%r15) # address of register-save area
@@ -737,7 +723,8 @@ ext_no_vtime:
l %r9,__LC_THREAD_INFO # load pointer to thread_info struct
TRACE_IRQS_OFF
la %r2,SP_PTREGS(%r15) # address of register-save area
- lh %r3,__LC_EXT_INT_CODE # get interruption code
+ l %r3,__LC_CPU_ADDRESS # get cpu address + interruption code
+ l %r4,__LC_EXT_PARAMS # get external parameters
l %r1,BASED(.Ldo_extint)
basr %r14,%r1
b BASED(io_return)
diff --git a/arch/s390/kernel/entry.h b/arch/s390/kernel/entry.h
index ff579b6bde06..95c1dfc4ef31 100644
--- a/arch/s390/kernel/entry.h
+++ b/arch/s390/kernel/entry.h
@@ -5,7 +5,7 @@
#include <linux/signal.h>
#include <asm/ptrace.h>
-typedef void pgm_check_handler_t(struct pt_regs *, long);
+typedef void pgm_check_handler_t(struct pt_regs *, long, unsigned long);
extern pgm_check_handler_t *pgm_check_table[128];
pgm_check_handler_t do_protection_exception;
pgm_check_handler_t do_dat_exception;
@@ -19,7 +19,7 @@ void do_signal(struct pt_regs *regs);
int handle_signal32(unsigned long sig, struct k_sigaction *ka,
siginfo_t *info, sigset_t *oldset, struct pt_regs *regs);
-void do_extint(struct pt_regs *regs, unsigned short code);
+void do_extint(struct pt_regs *regs, unsigned int, unsigned int, unsigned long);
int __cpuinit start_secondary(void *cpuvoid);
void __init startup_init(void);
void die(const char * str, struct pt_regs * regs, long err);
diff --git a/arch/s390/kernel/entry64.S b/arch/s390/kernel/entry64.S
index 8bccec15ea90..8f3e802174db 100644
--- a/arch/s390/kernel/entry64.S
+++ b/arch/s390/kernel/entry64.S
@@ -79,25 +79,9 @@ _TIF_SYSCALL = (_TIF_SYSCALL_TRACE>>8 | _TIF_SYSCALL_AUDIT>>8 | \
basr %r2,%r0
brasl %r14,trace_hardirqs_off_caller
.endm
-
- .macro TRACE_IRQS_CHECK_ON
- tm SP_PSW(%r15),0x03 # irqs enabled?
- jz 0f
- TRACE_IRQS_ON
-0:
- .endm
-
- .macro TRACE_IRQS_CHECK_OFF
- tm SP_PSW(%r15),0x03 # irqs enabled?
- jz 0f
- TRACE_IRQS_OFF
-0:
- .endm
#else
#define TRACE_IRQS_ON
#define TRACE_IRQS_OFF
-#define TRACE_IRQS_CHECK_ON
-#define TRACE_IRQS_CHECK_OFF
#endif
#ifdef CONFIG_LOCKDEP
@@ -207,6 +191,12 @@ _TIF_SYSCALL = (_TIF_SYSCALL_TRACE>>8 | _TIF_SYSCALL_AUDIT>>8 | \
0:
.endm
+ .macro REENABLE_IRQS
+ mvc __SF_EMPTY(1,%r15),SP_PSW(%r15)
+ ni __SF_EMPTY(%r15),0xbf
+ ssm __SF_EMPTY(%r15)
+ .endm
+
/*
* Scheduler resume function, called by switch_to
* gpr2 = (task_struct *) prev
@@ -256,7 +246,6 @@ sysc_saveall:
CREATE_STACK_FRAME __LC_SAVE_AREA
mvc SP_PSW(16,%r15),__LC_SVC_OLD_PSW
mvc SP_ILC(4,%r15),__LC_SVC_ILC
- stg %r7,SP_ARGS(%r15)
lg %r12,__LC_THREAD_INFO # load pointer to thread_info struct
sysc_vtime:
UPDATE_VTIME __LC_EXIT_TIMER,__LC_SYNC_ENTER_TIMER,__LC_USER_TIMER
@@ -284,6 +273,7 @@ sysc_nr_ok:
sysc_noemu:
#endif
tm __TI_flags+6(%r12),_TIF_SYSCALL
+ mvc SP_ARGS(8,%r15),SP_R7(%r15)
lgf %r8,0(%r7,%r10) # load address of system call routine
jnz sysc_tracesys
basr %r14,%r8 # call sys_xxxx
@@ -397,6 +387,7 @@ sysc_tracesys:
lgf %r8,0(%r7,%r10)
sysc_tracego:
lmg %r3,%r6,SP_R3(%r15)
+ mvc SP_ARGS(8,%r15),SP_R7(%r15)
lg %r2,SP_ORIG_R2(%r15)
basr %r14,%r8 # call sys_xxx
stg %r2,SP_R2(%r15) # store return value
@@ -443,14 +434,12 @@ kernel_execve:
br %r14
# execve succeeded.
0: stnsm __SF_EMPTY(%r15),0xfc # disable interrupts
-# TRACE_IRQS_OFF
lg %r15,__LC_KERNEL_STACK # load ksp
aghi %r15,-SP_SIZE # make room for registers & psw
lg %r13,__LC_SVC_NEW_PSW+8
mvc SP_PTREGS(__PT_SIZE,%r15),0(%r12) # copy pt_regs
lg %r12,__LC_THREAD_INFO
xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
-# TRACE_IRQS_ON
stosm __SF_EMPTY(%r15),0x03 # reenable interrupts
brasl %r14,execve_tail
j sysc_return
@@ -490,19 +479,18 @@ pgm_check_handler:
LAST_BREAK
pgm_no_vtime:
HANDLE_SIE_INTERCEPT
- TRACE_IRQS_CHECK_OFF
stg %r11,SP_ARGS(%r15)
lgf %r3,__LC_PGM_ILC # load program interruption code
+ lg %r4,__LC_TRANS_EXC_CODE
+ REENABLE_IRQS
lghi %r8,0x7f
ngr %r8,%r3
-pgm_do_call:
sll %r8,3
larl %r1,pgm_check_table
lg %r1,0(%r8,%r1) # load address of handler routine
la %r2,SP_PTREGS(%r15) # address of register-save area
basr %r14,%r1 # branch to interrupt-handler
pgm_exit:
- TRACE_IRQS_CHECK_ON
j sysc_return
#
@@ -533,7 +521,6 @@ pgm_per_std:
LAST_BREAK
pgm_no_vtime2:
HANDLE_SIE_INTERCEPT
- TRACE_IRQS_CHECK_OFF
lg %r1,__TI_task(%r12)
tm SP_PSW+1(%r15),0x01 # kernel per event ?
jz kernel_per
@@ -542,6 +529,8 @@ pgm_no_vtime2:
mvc __THREAD_per+__PER_access_id(1,%r1),__LC_PER_ACCESS_ID
oi __TI_flags+7(%r12),_TIF_SINGLE_STEP # set TIF_SINGLE_STEP
lgf %r3,__LC_PGM_ILC # load program interruption code
+ lg %r4,__LC_TRANS_EXC_CODE
+ REENABLE_IRQS
lghi %r8,0x7f
ngr %r8,%r3 # clear per-event-bit and ilc
je pgm_exit2
@@ -551,8 +540,6 @@ pgm_no_vtime2:
la %r2,SP_PTREGS(%r15) # address of register-save area
basr %r14,%r1 # branch to interrupt-handler
pgm_exit2:
- TRACE_IRQS_ON
- stosm __SF_EMPTY(%r15),0x03 # reenable interrupts
j sysc_return
#
@@ -568,13 +555,11 @@ pgm_svcper:
UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER
mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER
LAST_BREAK
- TRACE_IRQS_OFF
lg %r8,__TI_task(%r12)
mvc __THREAD_per+__PER_atmid(2,%r8),__LC_PER_ATMID
mvc __THREAD_per+__PER_address(8,%r8),__LC_PER_ADDRESS
mvc __THREAD_per+__PER_access_id(1,%r8),__LC_PER_ACCESS_ID
oi __TI_flags+7(%r12),_TIF_SINGLE_STEP # set TIF_SINGLE_STEP
- TRACE_IRQS_ON
stosm __SF_EMPTY(%r15),0x03 # reenable interrupts
lmg %r2,%r6,SP_R2(%r15) # load svc arguments
j sysc_do_svc
@@ -583,6 +568,7 @@ pgm_svcper:
# per was called from kernel, must be kprobes
#
kernel_per:
+ REENABLE_IRQS
xc SP_SVCNR(2,%r15),SP_SVCNR(%r15) # clear svc number
la %r2,SP_PTREGS(%r15) # address of register-save area
brasl %r14,do_single_step
@@ -743,8 +729,11 @@ ext_int_handler:
ext_no_vtime:
HANDLE_SIE_INTERCEPT
TRACE_IRQS_OFF
+ lghi %r1,4096
la %r2,SP_PTREGS(%r15) # address of register-save area
- llgh %r3,__LC_EXT_INT_CODE # get interruption code
+ llgf %r3,__LC_CPU_ADDRESS # get cpu address + interruption code
+ llgf %r4,__LC_EXT_PARAMS # get external parameter
+ lg %r5,__LC_EXT_PARAMS2-4096(%r1) # get 64 bit external parameter
brasl %r14,do_extint
j io_return
@@ -966,7 +955,6 @@ cleanup_system_call:
CREATE_STACK_FRAME __LC_SAVE_AREA
mvc SP_PSW(16,%r15),__LC_SVC_OLD_PSW
mvc SP_ILC(4,%r15),__LC_SVC_ILC
- stg %r7,SP_ARGS(%r15)
mvc 8(8,%r12),__LC_THREAD_INFO
cleanup_vtime:
clc __LC_RETURN_PSW+8(8),BASED(cleanup_system_call_insn+24)
diff --git a/arch/s390/kernel/head.S b/arch/s390/kernel/head.S
index db1696e210af..7061398341d5 100644
--- a/arch/s390/kernel/head.S
+++ b/arch/s390/kernel/head.S
@@ -488,7 +488,9 @@ startup:
.align 16
2: .long 0x000a0000,0x8badcccc
#if defined(CONFIG_64BIT)
-#if defined(CONFIG_MARCH_Z10)
+#if defined(CONFIG_MARCH_Z196)
+ .long 0xc100efe3, 0xf46c0000
+#elif defined(CONFIG_MARCH_Z10)
.long 0xc100efe3, 0xf0680000
#elif defined(CONFIG_MARCH_Z9_109)
.long 0xc100efc3, 0x00000000
@@ -498,7 +500,9 @@ startup:
.long 0xc0000000, 0x00000000
#endif
#else
-#if defined(CONFIG_MARCH_Z10)
+#if defined(CONFIG_MARCH_Z196)
+ .long 0x8100c880, 0x00000000
+#elif defined(CONFIG_MARCH_Z10)
.long 0x8100c880, 0x00000000
#elif defined(CONFIG_MARCH_Z9_109)
.long 0x8100c880, 0x00000000
diff --git a/arch/s390/kernel/kprobes.c b/arch/s390/kernel/kprobes.c
index 2a3d2bf6f083..2564793ec2b6 100644
--- a/arch/s390/kernel/kprobes.c
+++ b/arch/s390/kernel/kprobes.c
@@ -30,6 +30,7 @@
#include <asm/sections.h>
#include <linux/module.h>
#include <linux/slab.h>
+#include <linux/hardirq.h>
DEFINE_PER_CPU(struct kprobe *, current_kprobe) = NULL;
DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk);
@@ -212,7 +213,7 @@ static void __kprobes prepare_singlestep(struct kprobe *p, struct pt_regs *regs)
/* Set the PER control regs, turns on single step for this address */
__ctl_load(kprobe_per_regs, 9, 11);
regs->psw.mask |= PSW_MASK_PER;
- regs->psw.mask &= ~(PSW_MASK_IO | PSW_MASK_EXT | PSW_MASK_MCHECK);
+ regs->psw.mask &= ~(PSW_MASK_IO | PSW_MASK_EXT);
}
static void __kprobes save_previous_kprobe(struct kprobe_ctlblk *kcb)
@@ -239,7 +240,7 @@ static void __kprobes set_current_kprobe(struct kprobe *p, struct pt_regs *regs,
__get_cpu_var(current_kprobe) = p;
/* Save the interrupt and per flags */
kcb->kprobe_saved_imask = regs->psw.mask &
- (PSW_MASK_PER | PSW_MASK_IO | PSW_MASK_EXT | PSW_MASK_MCHECK);
+ (PSW_MASK_PER | PSW_MASK_IO | PSW_MASK_EXT);
/* Save the control regs that govern PER */
__ctl_store(kcb->kprobe_saved_ctl, 9, 11);
}
@@ -348,6 +349,7 @@ static int __kprobes trampoline_probe_handler(struct kprobe *p,
struct hlist_node *node, *tmp;
unsigned long flags, orig_ret_address = 0;
unsigned long trampoline_address = (unsigned long)&kretprobe_trampoline;
+ kprobe_opcode_t *correct_ret_addr = NULL;
INIT_HLIST_HEAD(&empty_rp);
kretprobe_hash_lock(current, &head, &flags);
@@ -370,10 +372,32 @@ static int __kprobes trampoline_probe_handler(struct kprobe *p,
/* another task is sharing our hash bucket */
continue;
- if (ri->rp && ri->rp->handler)
- ri->rp->handler(ri, regs);
+ orig_ret_address = (unsigned long)ri->ret_addr;
+
+ if (orig_ret_address != trampoline_address)
+ /*
+ * This is the real return address. Any other
+ * instances associated with this task are for
+ * other calls deeper on the call stack
+ */
+ break;
+ }
+
+ kretprobe_assert(ri, orig_ret_address, trampoline_address);
+
+ correct_ret_addr = ri->ret_addr;
+ hlist_for_each_entry_safe(ri, node, tmp, head, hlist) {
+ if (ri->task != current)
+ /* another task is sharing our hash bucket */
+ continue;
orig_ret_address = (unsigned long)ri->ret_addr;
+
+ if (ri->rp && ri->rp->handler) {
+ ri->ret_addr = correct_ret_addr;
+ ri->rp->handler(ri, regs);
+ }
+
recycle_rp_inst(ri, &empty_rp);
if (orig_ret_address != trampoline_address) {
@@ -385,7 +409,7 @@ static int __kprobes trampoline_probe_handler(struct kprobe *p,
break;
}
}
- kretprobe_assert(ri, orig_ret_address, trampoline_address);
+
regs->psw.addr = orig_ret_address | PSW_ADDR_AMODE;
reset_current_kprobe();
@@ -478,7 +502,7 @@ out:
return 1;
}
-int __kprobes kprobe_fault_handler(struct pt_regs *regs, int trapnr)
+static int __kprobes kprobe_trap_handler(struct pt_regs *regs, int trapnr)
{
struct kprobe *cur = kprobe_running();
struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
@@ -502,8 +526,9 @@ int __kprobes kprobe_fault_handler(struct pt_regs *regs, int trapnr)
regs->psw.mask |= kcb->kprobe_saved_imask;
if (kcb->kprobe_status == KPROBE_REENTER)
restore_previous_kprobe(kcb);
- else
+ else {
reset_current_kprobe();
+ }
preempt_enable_no_resched();
break;
case KPROBE_HIT_ACTIVE:
@@ -546,6 +571,18 @@ int __kprobes kprobe_fault_handler(struct pt_regs *regs, int trapnr)
return 0;
}
+int __kprobes kprobe_fault_handler(struct pt_regs *regs, int trapnr)
+{
+ int ret;
+
+ if (regs->psw.mask & (PSW_MASK_IO | PSW_MASK_EXT))
+ local_irq_disable();
+ ret = kprobe_trap_handler(regs, trapnr);
+ if (regs->psw.mask & (PSW_MASK_IO | PSW_MASK_EXT))
+ local_irq_restore(regs->psw.mask & ~PSW_MASK_PER);
+ return ret;
+}
+
/*
* Wrapper routine to for handling exceptions.
*/
@@ -553,8 +590,12 @@ int __kprobes kprobe_exceptions_notify(struct notifier_block *self,
unsigned long val, void *data)
{
struct die_args *args = (struct die_args *)data;
+ struct pt_regs *regs = args->regs;
int ret = NOTIFY_DONE;
+ if (regs->psw.mask & (PSW_MASK_IO | PSW_MASK_EXT))
+ local_irq_disable();
+
switch (val) {
case DIE_BPT:
if (kprobe_handler(args->regs))
@@ -565,16 +606,17 @@ int __kprobes kprobe_exceptions_notify(struct notifier_block *self,
ret = NOTIFY_STOP;
break;
case DIE_TRAP:
- /* kprobe_running() needs smp_processor_id() */
- preempt_disable();
- if (kprobe_running() &&
- kprobe_fault_handler(args->regs, args->trapnr))
+ if (!preemptible() && kprobe_running() &&
+ kprobe_trap_handler(args->regs, args->trapnr))
ret = NOTIFY_STOP;
- preempt_enable();
break;
default:
break;
}
+
+ if (regs->psw.mask & (PSW_MASK_IO | PSW_MASK_EXT))
+ local_irq_restore(regs->psw.mask & ~PSW_MASK_PER);
+
return ret;
}
@@ -588,6 +630,7 @@ int __kprobes setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs)
/* setup return addr to the jprobe handler routine */
regs->psw.addr = (unsigned long)(jp->entry) | PSW_ADDR_AMODE;
+ regs->psw.mask &= ~(PSW_MASK_IO | PSW_MASK_EXT);
/* r14 is the function return address */
kcb->jprobe_saved_r14 = (unsigned long)regs->gprs[14];
diff --git a/arch/s390/kernel/nmi.c b/arch/s390/kernel/nmi.c
index ac151399ef34..1995c1712fc8 100644
--- a/arch/s390/kernel/nmi.c
+++ b/arch/s390/kernel/nmi.c
@@ -95,7 +95,6 @@ EXPORT_SYMBOL_GPL(s390_handle_mcck);
static int notrace s390_revalidate_registers(struct mci *mci)
{
int kill_task;
- u64 tmpclock;
u64 zero;
void *fpt_save_area, *fpt_creg_save_area;
@@ -214,11 +213,10 @@ static int notrace s390_revalidate_registers(struct mci *mci)
: "0", "cc");
#endif
/* Revalidate clock comparator register */
- asm volatile(
- " stck 0(%1)\n"
- " sckc 0(%1)"
- : "=m" (tmpclock) : "a" (&(tmpclock)) : "cc", "memory");
-
+ if (S390_lowcore.clock_comparator == -1)
+ set_clock_comparator(S390_lowcore.mcck_clock);
+ else
+ set_clock_comparator(S390_lowcore.clock_comparator);
/* Check if old PSW is valid */
if (!mci->wp)
/*
diff --git a/arch/s390/kernel/process.c b/arch/s390/kernel/process.c
index d3a2d1c6438e..ec2e03b22ead 100644
--- a/arch/s390/kernel/process.c
+++ b/arch/s390/kernel/process.c
@@ -76,17 +76,17 @@ unsigned long thread_saved_pc(struct task_struct *tsk)
static void default_idle(void)
{
/* CPU is going idle. */
- local_irq_disable();
- if (need_resched()) {
- local_irq_enable();
- return;
- }
#ifdef CONFIG_HOTPLUG_CPU
if (cpu_is_offline(smp_processor_id())) {
preempt_enable_no_resched();
cpu_die();
}
#endif
+ local_irq_disable();
+ if (need_resched()) {
+ local_irq_enable();
+ return;
+ }
local_mcck_disable();
if (test_thread_flag(TIF_MCCK_PENDING)) {
local_mcck_enable();
diff --git a/arch/s390/kernel/processor.c b/arch/s390/kernel/processor.c
index ecb2d02b02e4..644548e615c6 100644
--- a/arch/s390/kernel/processor.c
+++ b/arch/s390/kernel/processor.c
@@ -42,7 +42,7 @@ void __cpuinit print_cpu_info(void)
struct cpuid *id = &per_cpu(cpu_id, smp_processor_id());
pr_info("Processor %d started, address %d, identification %06X\n",
- S390_lowcore.cpu_nr, S390_lowcore.cpu_addr, id->ident);
+ S390_lowcore.cpu_nr, stap(), id->ident);
}
/*
diff --git a/arch/s390/kernel/ptrace.c b/arch/s390/kernel/ptrace.c
index 83339d33c4b1..019bb714db49 100644
--- a/arch/s390/kernel/ptrace.c
+++ b/arch/s390/kernel/ptrace.c
@@ -343,7 +343,8 @@ poke_user(struct task_struct *child, addr_t addr, addr_t data)
return __poke_user(child, addr, data);
}
-long arch_ptrace(struct task_struct *child, long request, long addr, long data)
+long arch_ptrace(struct task_struct *child, long request,
+ unsigned long addr, unsigned long data)
{
ptrace_area parea;
int copied, ret;
diff --git a/arch/s390/kernel/s390_ext.c b/arch/s390/kernel/s390_ext.c
index 9ce641b5291f..bd1db508e8af 100644
--- a/arch/s390/kernel/s390_ext.c
+++ b/arch/s390/kernel/s390_ext.c
@@ -113,12 +113,15 @@ int unregister_early_external_interrupt(__u16 code, ext_int_handler_t handler,
return 0;
}
-void __irq_entry do_extint(struct pt_regs *regs, unsigned short code)
+void __irq_entry do_extint(struct pt_regs *regs, unsigned int ext_int_code,
+ unsigned int param32, unsigned long param64)
{
+ struct pt_regs *old_regs;
+ unsigned short code;
ext_int_info_t *p;
int index;
- struct pt_regs *old_regs;
+ code = (unsigned short) ext_int_code;
old_regs = set_irq_regs(regs);
s390_idle_check(regs, S390_lowcore.int_clock,
S390_lowcore.async_enter_timer);
@@ -132,7 +135,7 @@ void __irq_entry do_extint(struct pt_regs *regs, unsigned short code)
index = ext_hash(code);
for (p = ext_int_hash[index]; p; p = p->next) {
if (likely(p->code == code))
- p->handler(code);
+ p->handler(ext_int_code, param32, param64);
}
irq_exit();
set_irq_regs(old_regs);
diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c
index c8e8e1354e1d..6f6350826c81 100644
--- a/arch/s390/kernel/setup.c
+++ b/arch/s390/kernel/setup.c
@@ -409,6 +409,9 @@ setup_lowcore(void)
lc->current_task = (unsigned long) init_thread_union.thread_info.task;
lc->thread_info = (unsigned long) &init_thread_union;
lc->machine_flags = S390_lowcore.machine_flags;
+ lc->stfl_fac_list = S390_lowcore.stfl_fac_list;
+ memcpy(lc->stfle_fac_list, S390_lowcore.stfle_fac_list,
+ MAX_FACILITY_BIT/8);
#ifndef CONFIG_64BIT
if (MACHINE_HAS_IEEE) {
lc->extended_save_area_addr = (__u32)
@@ -627,7 +630,8 @@ setup_memory(void)
add_active_range(0, start_chunk, end_chunk);
pfn = max(start_chunk, start_pfn);
for (; pfn < end_chunk; pfn++)
- page_set_storage_key(PFN_PHYS(pfn), PAGE_DEFAULT_KEY);
+ page_set_storage_key(PFN_PHYS(pfn),
+ PAGE_DEFAULT_KEY, 0);
}
psw_set_key(PAGE_DEFAULT_KEY);
@@ -674,12 +678,9 @@ setup_memory(void)
static void __init setup_hwcaps(void)
{
static const int stfl_bits[6] = { 0, 2, 7, 17, 19, 21 };
- unsigned long long facility_list_extended;
- unsigned int facility_list;
struct cpuid cpu_id;
int i;
- facility_list = stfl();
/*
* The store facility list bits numbers as found in the principles
* of operation are numbered with bit 1UL<<31 as number 0 to
@@ -699,11 +700,10 @@ static void __init setup_hwcaps(void)
* HWCAP_S390_ETF3EH bit 8 (22 && 30).
*/
for (i = 0; i < 6; i++)
- if (facility_list & (1UL << (31 - stfl_bits[i])))
+ if (test_facility(stfl_bits[i]))
elf_hwcap |= 1UL << i;
- if ((facility_list & (1UL << (31 - 22)))
- && (facility_list & (1UL << (31 - 30))))
+ if (test_facility(22) && test_facility(30))
elf_hwcap |= HWCAP_S390_ETF3EH;
/*
@@ -719,12 +719,8 @@ static void __init setup_hwcaps(void)
* translated to:
* HWCAP_S390_DFP bit 6 (42 && 44).
*/
- if ((elf_hwcap & (1UL << 2)) &&
- __stfle(&facility_list_extended, 1) > 0) {
- if ((facility_list_extended & (1ULL << (63 - 42)))
- && (facility_list_extended & (1ULL << (63 - 44))))
- elf_hwcap |= HWCAP_S390_DFP;
- }
+ if ((elf_hwcap & (1UL << 2)) && test_facility(42) && test_facility(44))
+ elf_hwcap |= HWCAP_S390_DFP;
/*
* Huge page support HWCAP_S390_HPAGE is bit 7.
@@ -765,6 +761,9 @@ static void __init setup_hwcaps(void)
case 0x2098:
strcpy(elf_platform, "z10");
break;
+ case 0x2817:
+ strcpy(elf_platform, "z196");
+ break;
}
}
diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c
index 8127ebd59c4d..94cf510b8fe1 100644
--- a/arch/s390/kernel/smp.c
+++ b/arch/s390/kernel/smp.c
@@ -156,7 +156,8 @@ void smp_send_stop(void)
* cpus are handled.
*/
-static void do_ext_call_interrupt(__u16 code)
+static void do_ext_call_interrupt(unsigned int ext_int_code,
+ unsigned int param32, unsigned long param64)
{
unsigned long bits;
@@ -593,6 +594,8 @@ int __cpuinit __cpu_up(unsigned int cpu)
cpu_lowcore->kernel_asce = S390_lowcore.kernel_asce;
cpu_lowcore->machine_flags = S390_lowcore.machine_flags;
cpu_lowcore->ftrace_func = S390_lowcore.ftrace_func;
+ memcpy(cpu_lowcore->stfle_fac_list, S390_lowcore.stfle_fac_list,
+ MAX_FACILITY_BIT/8);
eieio();
while (sigp(cpu, sigp_restart) == sigp_busy)
diff --git a/arch/s390/kernel/sysinfo.c b/arch/s390/kernel/sysinfo.c
index a0ffc7717ed6..5c9e439bf3f6 100644
--- a/arch/s390/kernel/sysinfo.c
+++ b/arch/s390/kernel/sysinfo.c
@@ -15,6 +15,7 @@
#include <asm/ebcdic.h>
#include <asm/sysinfo.h>
#include <asm/cpcmd.h>
+#include <asm/topology.h>
/* Sigh, math-emu. Don't ask. */
#include <asm/sfp-util.h>
@@ -74,6 +75,44 @@ static int stsi_1_1_1(struct sysinfo_1_1_1 *info, char *page, int len)
"Model Temp. Capacity: %-16.16s %08u\n",
info->model_temp_cap,
*(u32 *) info->model_temp_cap_rating);
+ if (info->cai) {
+ len += sprintf(page + len,
+ "Capacity Adj. Ind.: %d\n",
+ info->cai);
+ len += sprintf(page + len, "Capacity Ch. Reason: %d\n",
+ info->ccr);
+ }
+ return len;
+}
+
+static int stsi_15_1_x(struct sysinfo_15_1_x *info, char *page, int len)
+{
+ static int max_mnest;
+ int i, rc;
+
+ len += sprintf(page + len, "\n");
+ if (!MACHINE_HAS_TOPOLOGY)
+ return len;
+ if (max_mnest) {
+ stsi(info, 15, 1, max_mnest);
+ } else {
+ for (max_mnest = 6; max_mnest > 1; max_mnest--) {
+ rc = stsi(info, 15, 1, max_mnest);
+ if (rc != -ENOSYS)
+ break;
+ }
+ }
+ len += sprintf(page + len, "CPU Topology HW: ");
+ for (i = 0; i < TOPOLOGY_NR_MAG; i++)
+ len += sprintf(page + len, " %d", info->mag[i]);
+ len += sprintf(page + len, "\n");
+#ifdef CONFIG_SCHED_MC
+ store_topology(info);
+ len += sprintf(page + len, "CPU Topology SW: ");
+ for (i = 0; i < TOPOLOGY_NR_MAG; i++)
+ len += sprintf(page + len, " %d", info->mag[i]);
+ len += sprintf(page + len, "\n");
+#endif
return len;
}
@@ -87,7 +126,6 @@ static int stsi_1_2_2(struct sysinfo_1_2_2 *info, char *page, int len)
ext = (struct sysinfo_1_2_2_extension *)
((unsigned long) info + info->acc_offset);
- len += sprintf(page + len, "\n");
len += sprintf(page + len, "CPUs Total: %d\n",
info->cpus_total);
len += sprintf(page + len, "CPUs Configured: %d\n",
@@ -217,6 +255,9 @@ static int proc_read_sysinfo(char *page, char **start,
len = stsi_1_1_1((struct sysinfo_1_1_1 *) info, page, len);
if (level >= 1)
+ len = stsi_15_1_x((struct sysinfo_15_1_x *) info, page, len);
+
+ if (level >= 1)
len = stsi_1_2_2((struct sysinfo_1_2_2 *) info, page, len);
if (level >= 2)
diff --git a/arch/s390/kernel/time.c b/arch/s390/kernel/time.c
index 2896cac9c14a..f754a6dc4f94 100644
--- a/arch/s390/kernel/time.c
+++ b/arch/s390/kernel/time.c
@@ -155,7 +155,9 @@ void init_cpu_timer(void)
__ctl_set_bit(0, 4);
}
-static void clock_comparator_interrupt(__u16 code)
+static void clock_comparator_interrupt(unsigned int ext_int_code,
+ unsigned int param32,
+ unsigned long param64)
{
if (S390_lowcore.clock_comparator == -1ULL)
set_clock_comparator(S390_lowcore.clock_comparator);
@@ -164,14 +166,13 @@ static void clock_comparator_interrupt(__u16 code)
static void etr_timing_alert(struct etr_irq_parm *);
static void stp_timing_alert(struct stp_irq_parm *);
-static void timing_alert_interrupt(__u16 code)
+static void timing_alert_interrupt(unsigned int ext_int_code,
+ unsigned int param32, unsigned long param64)
{
- if (S390_lowcore.ext_params & 0x00c40000)
- etr_timing_alert((struct etr_irq_parm *)
- &S390_lowcore.ext_params);
- if (S390_lowcore.ext_params & 0x00038000)
- stp_timing_alert((struct stp_irq_parm *)
- &S390_lowcore.ext_params);
+ if (param32 & 0x00c40000)
+ etr_timing_alert((struct etr_irq_parm *) &param32);
+ if (param32 & 0x00038000)
+ stp_timing_alert((struct stp_irq_parm *) &param32);
}
static void etr_reset(void);
diff --git a/arch/s390/kernel/topology.c b/arch/s390/kernel/topology.c
index 13559c993847..94b06c31fc8a 100644
--- a/arch/s390/kernel/topology.c
+++ b/arch/s390/kernel/topology.c
@@ -18,55 +18,20 @@
#include <linux/cpuset.h>
#include <asm/delay.h>
#include <asm/s390_ext.h>
-#include <asm/sysinfo.h>
-
-#define CPU_BITS 64
-#define NR_MAG 6
#define PTF_HORIZONTAL (0UL)
#define PTF_VERTICAL (1UL)
#define PTF_CHECK (2UL)
-struct tl_cpu {
- unsigned char reserved0[4];
- unsigned char :6;
- unsigned char pp:2;
- unsigned char reserved1;
- unsigned short origin;
- unsigned long mask[CPU_BITS / BITS_PER_LONG];
-};
-
-struct tl_container {
- unsigned char reserved[7];
- unsigned char id;
-};
-
-union tl_entry {
- unsigned char nl;
- struct tl_cpu cpu;
- struct tl_container container;
-};
-
-struct tl_info {
- unsigned char reserved0[2];
- unsigned short length;
- unsigned char mag[NR_MAG];
- unsigned char reserved1;
- unsigned char mnest;
- unsigned char reserved2[4];
- union tl_entry tle[0];
-};
-
struct mask_info {
struct mask_info *next;
unsigned char id;
cpumask_t mask;
};
-static int topology_enabled;
+static int topology_enabled = 1;
static void topology_work_fn(struct work_struct *work);
-static struct tl_info *tl_info;
-static int machine_has_topology;
+static struct sysinfo_15_1_x *tl_info;
static struct timer_list topology_timer;
static void set_topology_timer(void);
static DECLARE_WORK(topology_work, topology_work_fn);
@@ -88,8 +53,10 @@ static cpumask_t cpu_group_map(struct mask_info *info, unsigned int cpu)
cpumask_t mask;
cpus_clear(mask);
- if (!topology_enabled || !machine_has_topology)
- return cpu_possible_map;
+ if (!topology_enabled || !MACHINE_HAS_TOPOLOGY) {
+ cpumask_copy(&mask, cpumask_of(cpu));
+ return mask;
+ }
while (info) {
if (cpu_isset(cpu, info->mask)) {
mask = info->mask;
@@ -102,18 +69,18 @@ static cpumask_t cpu_group_map(struct mask_info *info, unsigned int cpu)
return mask;
}
-static void add_cpus_to_mask(struct tl_cpu *tl_cpu, struct mask_info *book,
- struct mask_info *core)
+static void add_cpus_to_mask(struct topology_cpu *tl_cpu,
+ struct mask_info *book, struct mask_info *core)
{
unsigned int cpu;
- for (cpu = find_first_bit(&tl_cpu->mask[0], CPU_BITS);
- cpu < CPU_BITS;
- cpu = find_next_bit(&tl_cpu->mask[0], CPU_BITS, cpu + 1))
+ for (cpu = find_first_bit(&tl_cpu->mask[0], TOPOLOGY_CPU_BITS);
+ cpu < TOPOLOGY_CPU_BITS;
+ cpu = find_next_bit(&tl_cpu->mask[0], TOPOLOGY_CPU_BITS, cpu + 1))
{
unsigned int rcpu, lcpu;
- rcpu = CPU_BITS - 1 - cpu + tl_cpu->origin;
+ rcpu = TOPOLOGY_CPU_BITS - 1 - cpu + tl_cpu->origin;
for_each_present_cpu(lcpu) {
if (cpu_logical_map(lcpu) != rcpu)
continue;
@@ -146,15 +113,14 @@ static void clear_masks(void)
#endif
}
-static union tl_entry *next_tle(union tl_entry *tle)
+static union topology_entry *next_tle(union topology_entry *tle)
{
- if (tle->nl)
- return (union tl_entry *)((struct tl_container *)tle + 1);
- else
- return (union tl_entry *)((struct tl_cpu *)tle + 1);
+ if (!tle->nl)
+ return (union topology_entry *)((struct topology_cpu *)tle + 1);
+ return (union topology_entry *)((struct topology_container *)tle + 1);
}
-static void tl_to_cores(struct tl_info *info)
+static void tl_to_cores(struct sysinfo_15_1_x *info)
{
#ifdef CONFIG_SCHED_BOOK
struct mask_info *book = &book_info;
@@ -162,13 +128,13 @@ static void tl_to_cores(struct tl_info *info)
struct mask_info *book = NULL;
#endif
struct mask_info *core = &core_info;
- union tl_entry *tle, *end;
+ union topology_entry *tle, *end;
spin_lock_irq(&topology_lock);
clear_masks();
tle = info->tle;
- end = (union tl_entry *)((unsigned long)info + info->length);
+ end = (union topology_entry *)((unsigned long)info + info->length);
while (tle < end) {
switch (tle->nl) {
#ifdef CONFIG_SCHED_BOOK
@@ -186,7 +152,6 @@ static void tl_to_cores(struct tl_info *info)
break;
default:
clear_masks();
- machine_has_topology = 0;
goto out;
}
tle = next_tle(tle);
@@ -223,7 +188,7 @@ int topology_set_cpu_management(int fc)
int cpu;
int rc;
- if (!machine_has_topology)
+ if (!MACHINE_HAS_TOPOLOGY)
return -EOPNOTSUPP;
if (fc)
rc = ptf(PTF_VERTICAL);
@@ -251,7 +216,7 @@ static void update_cpu_core_map(void)
spin_unlock_irqrestore(&topology_lock, flags);
}
-static void store_topology(struct tl_info *info)
+void store_topology(struct sysinfo_15_1_x *info)
{
#ifdef CONFIG_SCHED_BOOK
int rc;
@@ -265,11 +230,11 @@ static void store_topology(struct tl_info *info)
int arch_update_cpu_topology(void)
{
- struct tl_info *info = tl_info;
+ struct sysinfo_15_1_x *info = tl_info;
struct sys_device *sysdev;
int cpu;
- if (!machine_has_topology) {
+ if (!MACHINE_HAS_TOPOLOGY) {
update_cpu_core_map();
topology_update_polarization_simple();
return 0;
@@ -311,9 +276,9 @@ static void set_topology_timer(void)
static int __init early_parse_topology(char *p)
{
- if (strncmp(p, "on", 2))
+ if (strncmp(p, "off", 3))
return 0;
- topology_enabled = 1;
+ topology_enabled = 0;
return 0;
}
early_param("topology", early_parse_topology);
@@ -323,7 +288,7 @@ static int __init init_topology_update(void)
int rc;
rc = 0;
- if (!machine_has_topology) {
+ if (!MACHINE_HAS_TOPOLOGY) {
topology_update_polarization_simple();
goto out;
}
@@ -335,13 +300,14 @@ out:
}
__initcall(init_topology_update);
-static void alloc_masks(struct tl_info *info, struct mask_info *mask, int offset)
+static void alloc_masks(struct sysinfo_15_1_x *info, struct mask_info *mask,
+ int offset)
{
int i, nr_masks;
- nr_masks = info->mag[NR_MAG - offset];
+ nr_masks = info->mag[TOPOLOGY_NR_MAG - offset];
for (i = 0; i < info->mnest - offset; i++)
- nr_masks *= info->mag[NR_MAG - offset - 1 - i];
+ nr_masks *= info->mag[TOPOLOGY_NR_MAG - offset - 1 - i];
nr_masks = max(nr_masks, 1);
for (i = 0; i < nr_masks; i++) {
mask->next = alloc_bootmem(sizeof(struct mask_info));
@@ -351,21 +317,16 @@ static void alloc_masks(struct tl_info *info, struct mask_info *mask, int offset
void __init s390_init_cpu_topology(void)
{
- unsigned long long facility_bits;
- struct tl_info *info;
+ struct sysinfo_15_1_x *info;
int i;
- if (stfle(&facility_bits, 1) <= 0)
- return;
- if (!(facility_bits & (1ULL << 52)) || !(facility_bits & (1ULL << 61)))
+ if (!MACHINE_HAS_TOPOLOGY)
return;
- machine_has_topology = 1;
-
tl_info = alloc_bootmem_pages(PAGE_SIZE);
info = tl_info;
store_topology(info);
pr_info("The CPU configuration topology of the machine is:");
- for (i = 0; i < NR_MAG; i++)
+ for (i = 0; i < TOPOLOGY_NR_MAG; i++)
printk(" %d", info->mag[i]);
printk(" / %d\n", info->mnest);
alloc_masks(info, &core_info, 2);
diff --git a/arch/s390/kernel/traps.c b/arch/s390/kernel/traps.c
index 5d8f0f3d0250..70640822621a 100644
--- a/arch/s390/kernel/traps.c
+++ b/arch/s390/kernel/traps.c
@@ -329,27 +329,19 @@ int is_valid_bugaddr(unsigned long addr)
return 1;
}
-static void __kprobes inline do_trap(long interruption_code, int signr,
- char *str, struct pt_regs *regs,
- siginfo_t *info)
+static inline void __kprobes do_trap(long pgm_int_code, int signr, char *str,
+ struct pt_regs *regs, siginfo_t *info)
{
- /*
- * We got all needed information from the lowcore and can
- * now safely switch on interrupts.
- */
- if (regs->psw.mask & PSW_MASK_PSTATE)
- local_irq_enable();
-
- if (notify_die(DIE_TRAP, str, regs, interruption_code,
- interruption_code, signr) == NOTIFY_STOP)
+ if (notify_die(DIE_TRAP, str, regs, pgm_int_code,
+ pgm_int_code, signr) == NOTIFY_STOP)
return;
if (regs->psw.mask & PSW_MASK_PSTATE) {
struct task_struct *tsk = current;
- tsk->thread.trap_no = interruption_code & 0xffff;
+ tsk->thread.trap_no = pgm_int_code & 0xffff;
force_sig_info(signr, info, tsk);
- report_user_fault(regs, interruption_code, signr);
+ report_user_fault(regs, pgm_int_code, signr);
} else {
const struct exception_table_entry *fixup;
fixup = search_exception_tables(regs->psw.addr & PSW_ADDR_INSN);
@@ -361,14 +353,16 @@ static void __kprobes inline do_trap(long interruption_code, int signr,
btt = report_bug(regs->psw.addr & PSW_ADDR_INSN, regs);
if (btt == BUG_TRAP_TYPE_WARN)
return;
- die(str, regs, interruption_code);
+ die(str, regs, pgm_int_code);
}
}
}
-static inline void __user *get_check_address(struct pt_regs *regs)
+static inline void __user *get_psw_address(struct pt_regs *regs,
+ long pgm_int_code)
{
- return (void __user *)((regs->psw.addr-S390_lowcore.pgm_ilc) & PSW_ADDR_INSN);
+ return (void __user *)
+ ((regs->psw.addr - (pgm_int_code >> 16)) & PSW_ADDR_INSN);
}
void __kprobes do_single_step(struct pt_regs *regs)
@@ -381,57 +375,57 @@ void __kprobes do_single_step(struct pt_regs *regs)
force_sig(SIGTRAP, current);
}
-static void default_trap_handler(struct pt_regs * regs, long interruption_code)
+static void default_trap_handler(struct pt_regs *regs, long pgm_int_code,
+ unsigned long trans_exc_code)
{
if (regs->psw.mask & PSW_MASK_PSTATE) {
- local_irq_enable();
- report_user_fault(regs, interruption_code, SIGSEGV);
+ report_user_fault(regs, pgm_int_code, SIGSEGV);
do_exit(SIGSEGV);
} else
- die("Unknown program exception", regs, interruption_code);
+ die("Unknown program exception", regs, pgm_int_code);
}
-#define DO_ERROR_INFO(signr, str, name, sicode, siaddr) \
-static void name(struct pt_regs * regs, long interruption_code) \
+#define DO_ERROR_INFO(name, signr, sicode, str) \
+static void name(struct pt_regs *regs, long pgm_int_code, \
+ unsigned long trans_exc_code) \
{ \
siginfo_t info; \
info.si_signo = signr; \
info.si_errno = 0; \
info.si_code = sicode; \
- info.si_addr = siaddr; \
- do_trap(interruption_code, signr, str, regs, &info); \
+ info.si_addr = get_psw_address(regs, pgm_int_code); \
+ do_trap(pgm_int_code, signr, str, regs, &info); \
}
-DO_ERROR_INFO(SIGILL, "addressing exception", addressing_exception,
- ILL_ILLADR, get_check_address(regs))
-DO_ERROR_INFO(SIGILL, "execute exception", execute_exception,
- ILL_ILLOPN, get_check_address(regs))
-DO_ERROR_INFO(SIGFPE, "fixpoint divide exception", divide_exception,
- FPE_INTDIV, get_check_address(regs))
-DO_ERROR_INFO(SIGFPE, "fixpoint overflow exception", overflow_exception,
- FPE_INTOVF, get_check_address(regs))
-DO_ERROR_INFO(SIGFPE, "HFP overflow exception", hfp_overflow_exception,
- FPE_FLTOVF, get_check_address(regs))
-DO_ERROR_INFO(SIGFPE, "HFP underflow exception", hfp_underflow_exception,
- FPE_FLTUND, get_check_address(regs))
-DO_ERROR_INFO(SIGFPE, "HFP significance exception", hfp_significance_exception,
- FPE_FLTRES, get_check_address(regs))
-DO_ERROR_INFO(SIGFPE, "HFP divide exception", hfp_divide_exception,
- FPE_FLTDIV, get_check_address(regs))
-DO_ERROR_INFO(SIGFPE, "HFP square root exception", hfp_sqrt_exception,
- FPE_FLTINV, get_check_address(regs))
-DO_ERROR_INFO(SIGILL, "operand exception", operand_exception,
- ILL_ILLOPN, get_check_address(regs))
-DO_ERROR_INFO(SIGILL, "privileged operation", privileged_op,
- ILL_PRVOPC, get_check_address(regs))
-DO_ERROR_INFO(SIGILL, "special operation exception", special_op_exception,
- ILL_ILLOPN, get_check_address(regs))
-DO_ERROR_INFO(SIGILL, "translation exception", translation_exception,
- ILL_ILLOPN, get_check_address(regs))
-
-static inline void
-do_fp_trap(struct pt_regs *regs, void __user *location,
- int fpc, long interruption_code)
+DO_ERROR_INFO(addressing_exception, SIGILL, ILL_ILLADR,
+ "addressing exception")
+DO_ERROR_INFO(execute_exception, SIGILL, ILL_ILLOPN,
+ "execute exception")
+DO_ERROR_INFO(divide_exception, SIGFPE, FPE_INTDIV,
+ "fixpoint divide exception")
+DO_ERROR_INFO(overflow_exception, SIGFPE, FPE_INTOVF,
+ "fixpoint overflow exception")
+DO_ERROR_INFO(hfp_overflow_exception, SIGFPE, FPE_FLTOVF,
+ "HFP overflow exception")
+DO_ERROR_INFO(hfp_underflow_exception, SIGFPE, FPE_FLTUND,
+ "HFP underflow exception")
+DO_ERROR_INFO(hfp_significance_exception, SIGFPE, FPE_FLTRES,
+ "HFP significance exception")
+DO_ERROR_INFO(hfp_divide_exception, SIGFPE, FPE_FLTDIV,
+ "HFP divide exception")
+DO_ERROR_INFO(hfp_sqrt_exception, SIGFPE, FPE_FLTINV,
+ "HFP square root exception")
+DO_ERROR_INFO(operand_exception, SIGILL, ILL_ILLOPN,
+ "operand exception")
+DO_ERROR_INFO(privileged_op, SIGILL, ILL_PRVOPC,
+ "privileged operation")
+DO_ERROR_INFO(special_op_exception, SIGILL, ILL_ILLOPN,
+ "special operation exception")
+DO_ERROR_INFO(translation_exception, SIGILL, ILL_ILLOPN,
+ "translation exception")
+
+static inline void do_fp_trap(struct pt_regs *regs, void __user *location,
+ int fpc, long pgm_int_code)
{
siginfo_t si;
@@ -453,26 +447,19 @@ do_fp_trap(struct pt_regs *regs, void __user *location,
else if (fpc & 0x0800) /* inexact */
si.si_code = FPE_FLTRES;
}
- current->thread.ieee_instruction_pointer = (addr_t) location;
- do_trap(interruption_code, SIGFPE,
+ do_trap(pgm_int_code, SIGFPE,
"floating point exception", regs, &si);
}
-static void illegal_op(struct pt_regs * regs, long interruption_code)
+static void illegal_op(struct pt_regs *regs, long pgm_int_code,
+ unsigned long trans_exc_code)
{
siginfo_t info;
__u8 opcode[6];
__u16 __user *location;
int signal = 0;
- location = get_check_address(regs);
-
- /*
- * We got all needed information from the lowcore and can
- * now safely switch on interrupts.
- */
- if (regs->psw.mask & PSW_MASK_PSTATE)
- local_irq_enable();
+ location = get_psw_address(regs, pgm_int_code);
if (regs->psw.mask & PSW_MASK_PSTATE) {
if (get_user(*((__u16 *) opcode), (__u16 __user *) location))
@@ -512,7 +499,7 @@ static void illegal_op(struct pt_regs * regs, long interruption_code)
* If we get an illegal op in kernel mode, send it through the
* kprobes notifier. If kprobes doesn't pick it up, SIGILL
*/
- if (notify_die(DIE_BPT, "bpt", regs, interruption_code,
+ if (notify_die(DIE_BPT, "bpt", regs, pgm_int_code,
3, SIGTRAP) != NOTIFY_STOP)
signal = SIGILL;
}
@@ -520,13 +507,13 @@ static void illegal_op(struct pt_regs * regs, long interruption_code)
#ifdef CONFIG_MATHEMU
if (signal == SIGFPE)
do_fp_trap(regs, location,
- current->thread.fp_regs.fpc, interruption_code);
+ current->thread.fp_regs.fpc, pgm_int_code);
else if (signal == SIGSEGV) {
info.si_signo = signal;
info.si_errno = 0;
info.si_code = SEGV_MAPERR;
info.si_addr = (void __user *) location;
- do_trap(interruption_code, signal,
+ do_trap(pgm_int_code, signal,
"user address fault", regs, &info);
} else
#endif
@@ -535,28 +522,22 @@ static void illegal_op(struct pt_regs * regs, long interruption_code)
info.si_errno = 0;
info.si_code = ILL_ILLOPC;
info.si_addr = (void __user *) location;
- do_trap(interruption_code, signal,
+ do_trap(pgm_int_code, signal,
"illegal operation", regs, &info);
}
}
#ifdef CONFIG_MATHEMU
-asmlinkage void
-specification_exception(struct pt_regs * regs, long interruption_code)
+asmlinkage void specification_exception(struct pt_regs *regs,
+ long pgm_int_code,
+ unsigned long trans_exc_code)
{
__u8 opcode[6];
__u16 __user *location = NULL;
int signal = 0;
- location = (__u16 __user *) get_check_address(regs);
-
- /*
- * We got all needed information from the lowcore and can
- * now safely switch on interrupts.
- */
- if (regs->psw.mask & PSW_MASK_PSTATE)
- local_irq_enable();
+ location = (__u16 __user *) get_psw_address(regs, pgm_int_code);
if (regs->psw.mask & PSW_MASK_PSTATE) {
get_user(*((__u16 *) opcode), location);
@@ -592,35 +573,29 @@ specification_exception(struct pt_regs * regs, long interruption_code)
if (signal == SIGFPE)
do_fp_trap(regs, location,
- current->thread.fp_regs.fpc, interruption_code);
+ current->thread.fp_regs.fpc, pgm_int_code);
else if (signal) {
siginfo_t info;
info.si_signo = signal;
info.si_errno = 0;
info.si_code = ILL_ILLOPN;
info.si_addr = location;
- do_trap(interruption_code, signal,
+ do_trap(pgm_int_code, signal,
"specification exception", regs, &info);
}
}
#else
-DO_ERROR_INFO(SIGILL, "specification exception", specification_exception,
- ILL_ILLOPN, get_check_address(regs));
+DO_ERROR_INFO(specification_exception, SIGILL, ILL_ILLOPN,
+ "specification exception");
#endif
-static void data_exception(struct pt_regs * regs, long interruption_code)
+static void data_exception(struct pt_regs *regs, long pgm_int_code,
+ unsigned long trans_exc_code)
{
__u16 __user *location;
int signal = 0;
- location = get_check_address(regs);
-
- /*
- * We got all needed information from the lowcore and can
- * now safely switch on interrupts.
- */
- if (regs->psw.mask & PSW_MASK_PSTATE)
- local_irq_enable();
+ location = get_psw_address(regs, pgm_int_code);
if (MACHINE_HAS_IEEE)
asm volatile("stfpc %0" : "=m" (current->thread.fp_regs.fpc));
@@ -686,19 +661,19 @@ static void data_exception(struct pt_regs * regs, long interruption_code)
signal = SIGILL;
if (signal == SIGFPE)
do_fp_trap(regs, location,
- current->thread.fp_regs.fpc, interruption_code);
+ current->thread.fp_regs.fpc, pgm_int_code);
else if (signal) {
siginfo_t info;
info.si_signo = signal;
info.si_errno = 0;
info.si_code = ILL_ILLOPN;
info.si_addr = location;
- do_trap(interruption_code, signal,
- "data exception", regs, &info);
+ do_trap(pgm_int_code, signal, "data exception", regs, &info);
}
}
-static void space_switch_exception(struct pt_regs * regs, long int_code)
+static void space_switch_exception(struct pt_regs *regs, long pgm_int_code,
+ unsigned long trans_exc_code)
{
siginfo_t info;
@@ -709,8 +684,8 @@ static void space_switch_exception(struct pt_regs * regs, long int_code)
info.si_signo = SIGILL;
info.si_errno = 0;
info.si_code = ILL_PRVOPC;
- info.si_addr = get_check_address(regs);
- do_trap(int_code, SIGILL, "space switch event", regs, &info);
+ info.si_addr = get_psw_address(regs, pgm_int_code);
+ do_trap(pgm_int_code, SIGILL, "space switch event", regs, &info);
}
asmlinkage void kernel_stack_overflow(struct pt_regs * regs)
diff --git a/arch/s390/kernel/vdso.c b/arch/s390/kernel/vdso.c
index 6b83870507d5..e3150dd2fe74 100644
--- a/arch/s390/kernel/vdso.c
+++ b/arch/s390/kernel/vdso.c
@@ -84,11 +84,7 @@ struct vdso_data *vdso_data = &vdso_data_store.data;
*/
static void vdso_init_data(struct vdso_data *vd)
{
- unsigned int facility_list;
-
- facility_list = stfl();
- vd->ectg_available =
- user_mode != HOME_SPACE_MODE && (facility_list & 1);
+ vd->ectg_available = user_mode != HOME_SPACE_MODE && test_facility(31);
}
#ifdef CONFIG_64BIT
diff --git a/arch/s390/kernel/vdso32/clock_getres.S b/arch/s390/kernel/vdso32/clock_getres.S
index 9532c4e6a9d2..36aaa25d05da 100644
--- a/arch/s390/kernel/vdso32/clock_getres.S
+++ b/arch/s390/kernel/vdso32/clock_getres.S
@@ -19,9 +19,9 @@
.type __kernel_clock_getres,@function
__kernel_clock_getres:
.cfi_startproc
- chi %r2,CLOCK_REALTIME
+ chi %r2,__CLOCK_REALTIME
je 0f
- chi %r2,CLOCK_MONOTONIC
+ chi %r2,__CLOCK_MONOTONIC
jne 3f
0: ltr %r3,%r3
jz 2f /* res == NULL */
@@ -34,6 +34,6 @@ __kernel_clock_getres:
3: lhi %r1,__NR_clock_getres /* fallback to svc */
svc 0
br %r14
-4: .long CLOCK_REALTIME_RES
+4: .long __CLOCK_REALTIME_RES
.cfi_endproc
.size __kernel_clock_getres,.-__kernel_clock_getres
diff --git a/arch/s390/kernel/vdso32/clock_gettime.S b/arch/s390/kernel/vdso32/clock_gettime.S
index 969643954273..b2224e0b974c 100644
--- a/arch/s390/kernel/vdso32/clock_gettime.S
+++ b/arch/s390/kernel/vdso32/clock_gettime.S
@@ -21,9 +21,9 @@ __kernel_clock_gettime:
.cfi_startproc
basr %r5,0
0: al %r5,21f-0b(%r5) /* get &_vdso_data */
- chi %r2,CLOCK_REALTIME
+ chi %r2,__CLOCK_REALTIME
je 10f
- chi %r2,CLOCK_MONOTONIC
+ chi %r2,__CLOCK_MONOTONIC
jne 19f
/* CLOCK_MONOTONIC */
diff --git a/arch/s390/kernel/vdso64/clock_getres.S b/arch/s390/kernel/vdso64/clock_getres.S
index 9ce8caafdb4e..176e1f75f9aa 100644
--- a/arch/s390/kernel/vdso64/clock_getres.S
+++ b/arch/s390/kernel/vdso64/clock_getres.S
@@ -19,9 +19,9 @@
.type __kernel_clock_getres,@function
__kernel_clock_getres:
.cfi_startproc
- cghi %r2,CLOCK_REALTIME
+ cghi %r2,__CLOCK_REALTIME
je 0f
- cghi %r2,CLOCK_MONOTONIC
+ cghi %r2,__CLOCK_MONOTONIC
je 0f
cghi %r2,-2 /* CLOCK_THREAD_CPUTIME_ID for this thread */
jne 2f
@@ -39,6 +39,6 @@ __kernel_clock_getres:
2: lghi %r1,__NR_clock_getres /* fallback to svc */
svc 0
br %r14
-3: .quad CLOCK_REALTIME_RES
+3: .quad __CLOCK_REALTIME_RES
.cfi_endproc
.size __kernel_clock_getres,.-__kernel_clock_getres
diff --git a/arch/s390/kernel/vdso64/clock_gettime.S b/arch/s390/kernel/vdso64/clock_gettime.S
index f40467884a03..d46c95ed5f19 100644
--- a/arch/s390/kernel/vdso64/clock_gettime.S
+++ b/arch/s390/kernel/vdso64/clock_gettime.S
@@ -20,11 +20,11 @@
__kernel_clock_gettime:
.cfi_startproc
larl %r5,_vdso_data
- cghi %r2,CLOCK_REALTIME
+ cghi %r2,__CLOCK_REALTIME
je 4f
cghi %r2,-2 /* CLOCK_THREAD_CPUTIME_ID for this thread */
je 9f
- cghi %r2,CLOCK_MONOTONIC
+ cghi %r2,__CLOCK_MONOTONIC
jne 12f
/* CLOCK_MONOTONIC */
diff --git a/arch/s390/kernel/vtime.c b/arch/s390/kernel/vtime.c
index 3479f1b0d4e0..7eff9b7347c0 100644
--- a/arch/s390/kernel/vtime.c
+++ b/arch/s390/kernel/vtime.c
@@ -19,6 +19,7 @@
#include <linux/kernel_stat.h>
#include <linux/rcupdate.h>
#include <linux/posix-timers.h>
+#include <linux/cpu.h>
#include <asm/s390_ext.h>
#include <asm/timer.h>
@@ -314,7 +315,8 @@ static void do_callbacks(struct list_head *cb_list)
/*
* Handler for the virtual CPU timer.
*/
-static void do_cpu_timer_interrupt(__u16 error_code)
+static void do_cpu_timer_interrupt(unsigned int ext_int_code,
+ unsigned int param32, unsigned long param64)
{
struct vtimer_queue *vq;
struct vtimer_list *event, *tmp;
@@ -565,6 +567,23 @@ void init_cpu_vtimer(void)
__ctl_set_bit(0,10);
}
+static int __cpuinit s390_nohz_notify(struct notifier_block *self,
+ unsigned long action, void *hcpu)
+{
+ struct s390_idle_data *idle;
+ long cpu = (long) hcpu;
+
+ idle = &per_cpu(s390_idle, cpu);
+ switch (action) {
+ case CPU_DYING:
+ case CPU_DYING_FROZEN:
+ idle->nohz_delay = 0;
+ default:
+ break;
+ }
+ return NOTIFY_OK;
+}
+
void __init vtime_init(void)
{
/* request the cpu timer external interrupt */
@@ -573,5 +592,6 @@ void __init vtime_init(void)
/* Enable cpu timer interrupts on the boot cpu. */
init_cpu_vtimer();
+ cpu_notifier(s390_nohz_notify, 0);
}
diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
index 4fe68650535c..985d825494f1 100644
--- a/arch/s390/kvm/kvm-s390.c
+++ b/arch/s390/kvm/kvm-s390.c
@@ -740,8 +740,8 @@ static int __init kvm_s390_init(void)
kvm_exit();
return -ENOMEM;
}
- stfle(facilities, 1);
- facilities[0] &= 0xff00fff3f0700000ULL;
+ memcpy(facilities, S390_lowcore.stfle_fac_list, 16);
+ facilities[0] &= 0xff00fff3f47c0000ULL;
return 0;
}
diff --git a/arch/s390/kvm/priv.c b/arch/s390/kvm/priv.c
index 44205507717c..9194a4b52b22 100644
--- a/arch/s390/kvm/priv.c
+++ b/arch/s390/kvm/priv.c
@@ -154,12 +154,12 @@ static int handle_chsc(struct kvm_vcpu *vcpu)
static int handle_stfl(struct kvm_vcpu *vcpu)
{
- unsigned int facility_list = stfl();
+ unsigned int facility_list;
int rc;
vcpu->stat.instruction_stfl++;
/* only pass the facility bits, which we can handle */
- facility_list &= 0xff00fff3;
+ facility_list = S390_lowcore.stfl_fac_list & 0xff00fff3;
rc = copy_to_guest(vcpu, offsetof(struct _lowcore, stfl_fac_list),
&facility_list, sizeof(facility_list));
diff --git a/arch/s390/lib/delay.c b/arch/s390/lib/delay.c
index 752b362bf651..7c37ec359ec2 100644
--- a/arch/s390/lib/delay.c
+++ b/arch/s390/lib/delay.c
@@ -29,17 +29,21 @@ static void __udelay_disabled(unsigned long long usecs)
{
unsigned long mask, cr0, cr0_saved;
u64 clock_saved;
+ u64 end;
+ mask = psw_kernel_bits | PSW_MASK_WAIT | PSW_MASK_EXT;
+ end = get_clock() + (usecs << 12);
clock_saved = local_tick_disable();
- set_clock_comparator(get_clock() + (usecs << 12));
__ctl_store(cr0_saved, 0, 0);
cr0 = (cr0_saved & 0xffff00e0) | 0x00000800;
__ctl_load(cr0 , 0, 0);
- mask = psw_kernel_bits | PSW_MASK_WAIT | PSW_MASK_EXT;
lockdep_off();
- trace_hardirqs_on();
- __load_psw_mask(mask);
- local_irq_disable();
+ do {
+ set_clock_comparator(end);
+ trace_hardirqs_on();
+ __load_psw_mask(mask);
+ local_irq_disable();
+ } while (get_clock() < end);
lockdep_on();
__ctl_load(cr0_saved, 0, 0);
local_tick_enable(clock_saved);
diff --git a/arch/s390/mm/Makefile b/arch/s390/mm/Makefile
index eec054484419..6fbc6f3fbdf2 100644
--- a/arch/s390/mm/Makefile
+++ b/arch/s390/mm/Makefile
@@ -3,6 +3,6 @@
#
obj-y := init.o fault.o extmem.o mmap.o vmem.o pgtable.o maccess.o \
- page-states.o
+ page-states.o gup.o
obj-$(CONFIG_CMM) += cmm.o
obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o
diff --git a/arch/s390/mm/cmm.c b/arch/s390/mm/cmm.c
index a9550dca3e4b..c66ffd8dbbb7 100644
--- a/arch/s390/mm/cmm.c
+++ b/arch/s390/mm/cmm.c
@@ -23,7 +23,10 @@
#include <asm/pgalloc.h>
#include <asm/diag.h>
-static char *sender = "VMRMSVM";
+#ifdef CONFIG_CMM_IUCV
+static char *cmm_default_sender = "VMRMSVM";
+#endif
+static char *sender;
module_param(sender, charp, 0400);
MODULE_PARM_DESC(sender,
"Guest name that may send SMSG messages (default VMRMSVM)");
@@ -440,6 +443,8 @@ static int __init cmm_init(void)
int len = strlen(sender);
while (len--)
sender[len] = toupper(sender[len]);
+ } else {
+ sender = cmm_default_sender;
}
rc = smsg_register_callback(SMSG_PREFIX, cmm_smsg_target);
diff --git a/arch/s390/mm/fault.c b/arch/s390/mm/fault.c
index 2505b2ea0ef1..fe5701e9efbf 100644
--- a/arch/s390/mm/fault.c
+++ b/arch/s390/mm/fault.c
@@ -52,6 +52,14 @@
#define VM_FAULT_BADMAP 0x020000
#define VM_FAULT_BADACCESS 0x040000
+static unsigned long store_indication;
+
+void fault_init(void)
+{
+ if (test_facility(2) && test_facility(75))
+ store_indication = 0xc00;
+}
+
static inline int notify_page_fault(struct pt_regs *regs)
{
int ret = 0;
@@ -199,14 +207,21 @@ static noinline void do_sigbus(struct pt_regs *regs, long int_code,
unsigned long trans_exc_code)
{
struct task_struct *tsk = current;
+ unsigned long address;
+ struct siginfo si;
/*
* Send a sigbus, regardless of whether we were in kernel
* or user mode.
*/
- tsk->thread.prot_addr = trans_exc_code & __FAIL_ADDR_MASK;
+ address = trans_exc_code & __FAIL_ADDR_MASK;
+ tsk->thread.prot_addr = address;
tsk->thread.trap_no = int_code;
- force_sig(SIGBUS, tsk);
+ si.si_signo = SIGBUS;
+ si.si_errno = 0;
+ si.si_code = BUS_ADRERR;
+ si.si_addr = (void __user *) address;
+ force_sig_info(SIGBUS, &si, tsk);
}
#ifdef CONFIG_S390_EXEC_PROTECT
@@ -266,10 +281,11 @@ static noinline void do_fault_error(struct pt_regs *regs, long int_code,
if (fault & VM_FAULT_OOM)
pagefault_out_of_memory();
else if (fault & VM_FAULT_SIGBUS) {
- do_sigbus(regs, int_code, trans_exc_code);
/* Kernel mode? Handle exceptions or die */
if (!(regs->psw.mask & PSW_MASK_PSTATE))
do_no_context(regs, int_code, trans_exc_code);
+ else
+ do_sigbus(regs, int_code, trans_exc_code);
} else
BUG();
break;
@@ -294,7 +310,7 @@ static inline int do_exception(struct pt_regs *regs, int access,
struct mm_struct *mm;
struct vm_area_struct *vma;
unsigned long address;
- int fault;
+ int fault, write;
if (notify_page_fault(regs))
return 0;
@@ -312,12 +328,6 @@ static inline int do_exception(struct pt_regs *regs, int access,
goto out;
address = trans_exc_code & __FAIL_ADDR_MASK;
- /*
- * When we get here, the fault happened in the current
- * task's user address space, so we can switch on the
- * interrupts again and then search the VMAs
- */
- local_irq_enable();
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, 0, regs, address);
down_read(&mm->mmap_sem);
@@ -348,8 +358,10 @@ static inline int do_exception(struct pt_regs *regs, int access,
* make sure we exit gracefully rather than endlessly redo
* the fault.
*/
- fault = handle_mm_fault(mm, vma, address,
- (access == VM_WRITE) ? FAULT_FLAG_WRITE : 0);
+ write = (access == VM_WRITE ||
+ (trans_exc_code & store_indication) == 0x400) ?
+ FAULT_FLAG_WRITE : 0;
+ fault = handle_mm_fault(mm, vma, address, write);
if (unlikely(fault & VM_FAULT_ERROR))
goto out_up;
@@ -374,20 +386,20 @@ out:
return fault;
}
-void __kprobes do_protection_exception(struct pt_regs *regs, long int_code)
+void __kprobes do_protection_exception(struct pt_regs *regs, long pgm_int_code,
+ unsigned long trans_exc_code)
{
- unsigned long trans_exc_code = S390_lowcore.trans_exc_code;
int fault;
/* Protection exception is supressing, decrement psw address. */
- regs->psw.addr -= (int_code >> 16);
+ regs->psw.addr -= (pgm_int_code >> 16);
/*
* Check for low-address protection. This needs to be treated
* as a special case because the translation exception code
* field is not guaranteed to contain valid data in this case.
*/
if (unlikely(!(trans_exc_code & 4))) {
- do_low_address(regs, int_code, trans_exc_code);
+ do_low_address(regs, pgm_int_code, trans_exc_code);
return;
}
fault = do_exception(regs, VM_WRITE, trans_exc_code);
@@ -395,9 +407,9 @@ void __kprobes do_protection_exception(struct pt_regs *regs, long int_code)
do_fault_error(regs, 4, trans_exc_code, fault);
}
-void __kprobes do_dat_exception(struct pt_regs *regs, long int_code)
+void __kprobes do_dat_exception(struct pt_regs *regs, long pgm_int_code,
+ unsigned long trans_exc_code)
{
- unsigned long trans_exc_code = S390_lowcore.trans_exc_code;
int access, fault;
access = VM_READ | VM_EXEC | VM_WRITE;
@@ -408,21 +420,19 @@ void __kprobes do_dat_exception(struct pt_regs *regs, long int_code)
#endif
fault = do_exception(regs, access, trans_exc_code);
if (unlikely(fault))
- do_fault_error(regs, int_code & 255, trans_exc_code, fault);
+ do_fault_error(regs, pgm_int_code & 255, trans_exc_code, fault);
}
#ifdef CONFIG_64BIT
-void __kprobes do_asce_exception(struct pt_regs *regs, long int_code)
+void __kprobes do_asce_exception(struct pt_regs *regs, long pgm_int_code,
+ unsigned long trans_exc_code)
{
- unsigned long trans_exc_code = S390_lowcore.trans_exc_code;
struct mm_struct *mm = current->mm;
struct vm_area_struct *vma;
if (unlikely(!user_space_fault(trans_exc_code) || in_atomic() || !mm))
goto no_context;
- local_irq_enable();
-
down_read(&mm->mmap_sem);
vma = find_vma(mm, trans_exc_code & __FAIL_ADDR_MASK);
up_read(&mm->mmap_sem);
@@ -434,16 +444,16 @@ void __kprobes do_asce_exception(struct pt_regs *regs, long int_code)
/* User mode accesses just cause a SIGSEGV */
if (regs->psw.mask & PSW_MASK_PSTATE) {
- do_sigsegv(regs, int_code, SEGV_MAPERR, trans_exc_code);
+ do_sigsegv(regs, pgm_int_code, SEGV_MAPERR, trans_exc_code);
return;
}
no_context:
- do_no_context(regs, int_code, trans_exc_code);
+ do_no_context(regs, pgm_int_code, trans_exc_code);
}
#endif
-int __handle_fault(unsigned long uaddr, unsigned long int_code, int write_user)
+int __handle_fault(unsigned long uaddr, unsigned long pgm_int_code, int write)
{
struct pt_regs regs;
int access, fault;
@@ -454,14 +464,14 @@ int __handle_fault(unsigned long uaddr, unsigned long int_code, int write_user)
regs.psw.addr = (unsigned long) __builtin_return_address(0);
regs.psw.addr |= PSW_ADDR_AMODE;
uaddr &= PAGE_MASK;
- access = write_user ? VM_WRITE : VM_READ;
+ access = write ? VM_WRITE : VM_READ;
fault = do_exception(&regs, access, uaddr | 2);
if (unlikely(fault)) {
if (fault & VM_FAULT_OOM) {
pagefault_out_of_memory();
fault = 0;
} else if (fault & VM_FAULT_SIGBUS)
- do_sigbus(&regs, int_code, uaddr);
+ do_sigbus(&regs, pgm_int_code, uaddr);
}
return fault ? -EFAULT : 0;
}
@@ -527,7 +537,8 @@ void pfault_fini(void)
: : "a" (&refbk), "m" (refbk) : "cc");
}
-static void pfault_interrupt(__u16 int_code)
+static void pfault_interrupt(unsigned int ext_int_code,
+ unsigned int param32, unsigned long param64)
{
struct task_struct *tsk;
__u16 subcode;
@@ -538,14 +549,18 @@ static void pfault_interrupt(__u16 int_code)
* in the 'cpu address' field associated with the
* external interrupt.
*/
- subcode = S390_lowcore.cpu_addr;
+ subcode = ext_int_code >> 16;
if ((subcode & 0xff00) != __SUBCODE_MASK)
return;
/*
* Get the token (= address of the task structure of the affected task).
*/
- tsk = *(struct task_struct **) __LC_PFAULT_INTPARM;
+#ifdef CONFIG_64BIT
+ tsk = *(struct task_struct **) param64;
+#else
+ tsk = *(struct task_struct **) param32;
+#endif
if (subcode & 0x0080) {
/* signal bit is set -> a page has been swapped in by VM */
diff --git a/arch/s390/mm/gup.c b/arch/s390/mm/gup.c
new file mode 100644
index 000000000000..45b405ca2567
--- /dev/null
+++ b/arch/s390/mm/gup.c
@@ -0,0 +1,224 @@
+/*
+ * Lockless get_user_pages_fast for s390
+ *
+ * Copyright IBM Corp. 2010
+ * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
+ */
+#include <linux/sched.h>
+#include <linux/mm.h>
+#include <linux/hugetlb.h>
+#include <linux/vmstat.h>
+#include <linux/pagemap.h>
+#include <linux/rwsem.h>
+#include <asm/pgtable.h>
+
+/*
+ * The performance critical leaf functions are made noinline otherwise gcc
+ * inlines everything into a single function which results in too much
+ * register pressure.
+ */
+static inline int gup_pte_range(pmd_t *pmdp, pmd_t pmd, unsigned long addr,
+ unsigned long end, int write, struct page **pages, int *nr)
+{
+ unsigned long mask;
+ pte_t *ptep, pte;
+ struct page *page;
+
+ mask = (write ? _PAGE_RO : 0) | _PAGE_INVALID | _PAGE_SPECIAL;
+
+ ptep = ((pte_t *) pmd_deref(pmd)) + pte_index(addr);
+ do {
+ pte = *ptep;
+ barrier();
+ if ((pte_val(pte) & mask) != 0)
+ return 0;
+ VM_BUG_ON(!pfn_valid(pte_pfn(pte)));
+ page = pte_page(pte);
+ if (!page_cache_get_speculative(page))
+ return 0;
+ if (unlikely(pte_val(pte) != pte_val(*ptep))) {
+ put_page(page);
+ return 0;
+ }
+ pages[*nr] = page;
+ (*nr)++;
+
+ } while (ptep++, addr += PAGE_SIZE, addr != end);
+
+ return 1;
+}
+
+static inline int gup_huge_pmd(pmd_t *pmdp, pmd_t pmd, unsigned long addr,
+ unsigned long end, int write, struct page **pages, int *nr)
+{
+ unsigned long mask, result;
+ struct page *head, *page;
+ int refs;
+
+ result = write ? 0 : _SEGMENT_ENTRY_RO;
+ mask = result | _SEGMENT_ENTRY_INV;
+ if ((pmd_val(pmd) & mask) != result)
+ return 0;
+ VM_BUG_ON(!pfn_valid(pmd_val(pmd) >> PAGE_SHIFT));
+
+ refs = 0;
+ head = pmd_page(pmd);
+ page = head + ((addr & ~PMD_MASK) >> PAGE_SHIFT);
+ do {
+ VM_BUG_ON(compound_head(page) != head);
+ pages[*nr] = page;
+ (*nr)++;
+ page++;
+ refs++;
+ } while (addr += PAGE_SIZE, addr != end);
+
+ if (!page_cache_add_speculative(head, refs)) {
+ *nr -= refs;
+ return 0;
+ }
+
+ if (unlikely(pmd_val(pmd) != pmd_val(*pmdp))) {
+ *nr -= refs;
+ while (refs--)
+ put_page(head);
+ }
+
+ return 1;
+}
+
+
+static inline int gup_pmd_range(pud_t *pudp, pud_t pud, unsigned long addr,
+ unsigned long end, int write, struct page **pages, int *nr)
+{
+ unsigned long next;
+ pmd_t *pmdp, pmd;
+
+ pmdp = (pmd_t *) pudp;
+#ifdef CONFIG_64BIT
+ if ((pud_val(pud) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R3)
+ pmdp = (pmd_t *) pud_deref(pud);
+ pmdp += pmd_index(addr);
+#endif
+ do {
+ pmd = *pmdp;
+ barrier();
+ next = pmd_addr_end(addr, end);
+ if (pmd_none(pmd))
+ return 0;
+ if (unlikely(pmd_huge(pmd))) {
+ if (!gup_huge_pmd(pmdp, pmd, addr, next,
+ write, pages, nr))
+ return 0;
+ } else if (!gup_pte_range(pmdp, pmd, addr, next,
+ write, pages, nr))
+ return 0;
+ } while (pmdp++, addr = next, addr != end);
+
+ return 1;
+}
+
+static inline int gup_pud_range(pgd_t *pgdp, pgd_t pgd, unsigned long addr,
+ unsigned long end, int write, struct page **pages, int *nr)
+{
+ unsigned long next;
+ pud_t *pudp, pud;
+
+ pudp = (pud_t *) pgdp;
+#ifdef CONFIG_64BIT
+ if ((pgd_val(pgd) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R2)
+ pudp = (pud_t *) pgd_deref(pgd);
+ pudp += pud_index(addr);
+#endif
+ do {
+ pud = *pudp;
+ barrier();
+ next = pud_addr_end(addr, end);
+ if (pud_none(pud))
+ return 0;
+ if (!gup_pmd_range(pudp, pud, addr, next, write, pages, nr))
+ return 0;
+ } while (pudp++, addr = next, addr != end);
+
+ return 1;
+}
+
+/**
+ * get_user_pages_fast() - pin user pages in memory
+ * @start: starting user address
+ * @nr_pages: number of pages from start to pin
+ * @write: whether pages will be written to
+ * @pages: array that receives pointers to the pages pinned.
+ * Should be at least nr_pages long.
+ *
+ * Attempt to pin user pages in memory without taking mm->mmap_sem.
+ * If not successful, it will fall back to taking the lock and
+ * calling get_user_pages().
+ *
+ * Returns number of pages pinned. This may be fewer than the number
+ * requested. If nr_pages is 0 or negative, returns 0. If no pages
+ * were pinned, returns -errno.
+ */
+int get_user_pages_fast(unsigned long start, int nr_pages, int write,
+ struct page **pages)
+{
+ struct mm_struct *mm = current->mm;
+ unsigned long addr, len, end;
+ unsigned long next;
+ pgd_t *pgdp, pgd;
+ int nr = 0;
+
+ start &= PAGE_MASK;
+ addr = start;
+ len = (unsigned long) nr_pages << PAGE_SHIFT;
+ end = start + len;
+ if (end < start)
+ goto slow_irqon;
+
+ /*
+ * local_irq_disable() doesn't prevent pagetable teardown, but does
+ * prevent the pagetables from being freed on s390.
+ *
+ * So long as we atomically load page table pointers versus teardown,
+ * we can follow the address down to the the page and take a ref on it.
+ */
+ local_irq_disable();
+ pgdp = pgd_offset(mm, addr);
+ do {
+ pgd = *pgdp;
+ barrier();
+ next = pgd_addr_end(addr, end);
+ if (pgd_none(pgd))
+ goto slow;
+ if (!gup_pud_range(pgdp, pgd, addr, next, write, pages, &nr))
+ goto slow;
+ } while (pgdp++, addr = next, addr != end);
+ local_irq_enable();
+
+ VM_BUG_ON(nr != (end - start) >> PAGE_SHIFT);
+ return nr;
+
+ {
+ int ret;
+slow:
+ local_irq_enable();
+slow_irqon:
+ /* Try to get the remaining pages with get_user_pages */
+ start += nr << PAGE_SHIFT;
+ pages += nr;
+
+ down_read(&mm->mmap_sem);
+ ret = get_user_pages(current, mm, start,
+ (end - start) >> PAGE_SHIFT, write, 0, pages, NULL);
+ up_read(&mm->mmap_sem);
+
+ /* Have to be a bit careful with return values */
+ if (nr > 0) {
+ if (ret < 0)
+ ret = nr;
+ else
+ ret += nr;
+ }
+
+ return ret;
+ }
+}
diff --git a/arch/s390/mm/hugetlbpage.c b/arch/s390/mm/hugetlbpage.c
index f28c43d2f61d..639cd21f2218 100644
--- a/arch/s390/mm/hugetlbpage.c
+++ b/arch/s390/mm/hugetlbpage.c
@@ -68,7 +68,7 @@ void arch_release_hugepage(struct page *page)
ptep = (pte_t *) page[1].index;
if (!ptep)
return;
- pte_free(&init_mm, ptep);
+ page_table_free(&init_mm, (unsigned long *) ptep);
page[1].index = 0;
}
diff --git a/arch/s390/mm/init.c b/arch/s390/mm/init.c
index 94b8ba2ec857..bb409332a484 100644
--- a/arch/s390/mm/init.c
+++ b/arch/s390/mm/init.c
@@ -38,13 +38,54 @@
#include <asm/tlbflush.h>
#include <asm/sections.h>
-DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
-
pgd_t swapper_pg_dir[PTRS_PER_PGD] __attribute__((__aligned__(PAGE_SIZE)));
-char empty_zero_page[PAGE_SIZE] __attribute__((__aligned__(PAGE_SIZE)));
+unsigned long empty_zero_page, zero_page_mask;
EXPORT_SYMBOL(empty_zero_page);
+static unsigned long setup_zero_pages(void)
+{
+ struct cpuid cpu_id;
+ unsigned int order;
+ unsigned long size;
+ struct page *page;
+ int i;
+
+ get_cpu_id(&cpu_id);
+ switch (cpu_id.machine) {
+ case 0x9672: /* g5 */
+ case 0x2064: /* z900 */
+ case 0x2066: /* z900 */
+ case 0x2084: /* z990 */
+ case 0x2086: /* z990 */
+ case 0x2094: /* z9-109 */
+ case 0x2096: /* z9-109 */
+ order = 0;
+ break;
+ case 0x2097: /* z10 */
+ case 0x2098: /* z10 */
+ default:
+ order = 2;
+ break;
+ }
+
+ empty_zero_page = __get_free_pages(GFP_KERNEL | __GFP_ZERO, order);
+ if (!empty_zero_page)
+ panic("Out of memory in setup_zero_pages");
+
+ page = virt_to_page((void *) empty_zero_page);
+ split_page(page, order);
+ for (i = 1 << order; i > 0; i--) {
+ SetPageReserved(page);
+ page++;
+ }
+
+ size = PAGE_SIZE << order;
+ zero_page_mask = (size - 1) & PAGE_MASK;
+
+ return 1UL << order;
+}
+
/*
* paging_init() sets up the page tables
*/
@@ -83,6 +124,7 @@ void __init paging_init(void)
#endif
max_zone_pfns[ZONE_NORMAL] = max_low_pfn;
free_area_init_nodes(max_zone_pfns);
+ fault_init();
}
void __init mem_init(void)
@@ -92,14 +134,12 @@ void __init mem_init(void)
max_mapnr = num_physpages = max_low_pfn;
high_memory = (void *) __va(max_low_pfn * PAGE_SIZE);
- /* clear the zero-page */
- memset(empty_zero_page, 0, PAGE_SIZE);
-
/* Setup guest page hinting */
cmma_init();
/* this will put all low memory onto the freelists */
totalram_pages += free_all_bootmem();
+ totalram_pages -= setup_zero_pages(); /* Setup zeroed pages. */
reservedpages = 0;
diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c
index 8d999249d357..0c719c61972e 100644
--- a/arch/s390/mm/pgtable.c
+++ b/arch/s390/mm/pgtable.c
@@ -15,6 +15,7 @@
#include <linux/spinlock.h>
#include <linux/module.h>
#include <linux/quicklist.h>
+#include <linux/rcupdate.h>
#include <asm/system.h>
#include <asm/pgtable.h>
@@ -23,6 +24,67 @@
#include <asm/tlbflush.h>
#include <asm/mmu_context.h>
+struct rcu_table_freelist {
+ struct rcu_head rcu;
+ struct mm_struct *mm;
+ unsigned int pgt_index;
+ unsigned int crst_index;
+ unsigned long *table[0];
+};
+
+#define RCU_FREELIST_SIZE \
+ ((PAGE_SIZE - sizeof(struct rcu_table_freelist)) \
+ / sizeof(unsigned long))
+
+DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
+static DEFINE_PER_CPU(struct rcu_table_freelist *, rcu_table_freelist);
+
+static void __page_table_free(struct mm_struct *mm, unsigned long *table);
+static void __crst_table_free(struct mm_struct *mm, unsigned long *table);
+
+static struct rcu_table_freelist *rcu_table_freelist_get(struct mm_struct *mm)
+{
+ struct rcu_table_freelist **batchp = &__get_cpu_var(rcu_table_freelist);
+ struct rcu_table_freelist *batch = *batchp;
+
+ if (batch)
+ return batch;
+ batch = (struct rcu_table_freelist *) __get_free_page(GFP_ATOMIC);
+ if (batch) {
+ batch->mm = mm;
+ batch->pgt_index = 0;
+ batch->crst_index = RCU_FREELIST_SIZE;
+ *batchp = batch;
+ }
+ return batch;
+}
+
+static void rcu_table_freelist_callback(struct rcu_head *head)
+{
+ struct rcu_table_freelist *batch =
+ container_of(head, struct rcu_table_freelist, rcu);
+
+ while (batch->pgt_index > 0)
+ __page_table_free(batch->mm, batch->table[--batch->pgt_index]);
+ while (batch->crst_index < RCU_FREELIST_SIZE)
+ __crst_table_free(batch->mm, batch->table[batch->crst_index++]);
+ free_page((unsigned long) batch);
+}
+
+void rcu_table_freelist_finish(void)
+{
+ struct rcu_table_freelist *batch = __get_cpu_var(rcu_table_freelist);
+
+ if (!batch)
+ return;
+ call_rcu(&batch->rcu, rcu_table_freelist_callback);
+ __get_cpu_var(rcu_table_freelist) = NULL;
+}
+
+static void smp_sync(void *arg)
+{
+}
+
#ifndef CONFIG_64BIT
#define ALLOC_ORDER 1
#define TABLES_PER_PAGE 4
@@ -78,25 +140,55 @@ unsigned long *crst_table_alloc(struct mm_struct *mm, int noexec)
}
page->index = page_to_phys(shadow);
}
- spin_lock(&mm->context.list_lock);
+ spin_lock_bh(&mm->context.list_lock);
list_add(&page->lru, &mm->context.crst_list);
- spin_unlock(&mm->context.list_lock);
+ spin_unlock_bh(&mm->context.list_lock);
return (unsigned long *) page_to_phys(page);
}
-void crst_table_free(struct mm_struct *mm, unsigned long *table)
+static void __crst_table_free(struct mm_struct *mm, unsigned long *table)
{
unsigned long *shadow = get_shadow_table(table);
- struct page *page = virt_to_page(table);
- spin_lock(&mm->context.list_lock);
- list_del(&page->lru);
- spin_unlock(&mm->context.list_lock);
if (shadow)
free_pages((unsigned long) shadow, ALLOC_ORDER);
free_pages((unsigned long) table, ALLOC_ORDER);
}
+void crst_table_free(struct mm_struct *mm, unsigned long *table)
+{
+ struct page *page = virt_to_page(table);
+
+ spin_lock_bh(&mm->context.list_lock);
+ list_del(&page->lru);
+ spin_unlock_bh(&mm->context.list_lock);
+ __crst_table_free(mm, table);
+}
+
+void crst_table_free_rcu(struct mm_struct *mm, unsigned long *table)
+{
+ struct rcu_table_freelist *batch;
+ struct page *page = virt_to_page(table);
+
+ spin_lock_bh(&mm->context.list_lock);
+ list_del(&page->lru);
+ spin_unlock_bh(&mm->context.list_lock);
+ if (atomic_read(&mm->mm_users) < 2 &&
+ cpumask_equal(mm_cpumask(mm), cpumask_of(smp_processor_id()))) {
+ __crst_table_free(mm, table);
+ return;
+ }
+ batch = rcu_table_freelist_get(mm);
+ if (!batch) {
+ smp_call_function(smp_sync, NULL, 1);
+ __crst_table_free(mm, table);
+ return;
+ }
+ batch->table[--batch->crst_index] = table;
+ if (batch->pgt_index >= batch->crst_index)
+ rcu_table_freelist_finish();
+}
+
#ifdef CONFIG_64BIT
int crst_table_upgrade(struct mm_struct *mm, unsigned long limit)
{
@@ -108,7 +200,7 @@ repeat:
table = crst_table_alloc(mm, mm->context.noexec);
if (!table)
return -ENOMEM;
- spin_lock(&mm->page_table_lock);
+ spin_lock_bh(&mm->page_table_lock);
if (mm->context.asce_limit < limit) {
pgd = (unsigned long *) mm->pgd;
if (mm->context.asce_limit <= (1UL << 31)) {
@@ -130,7 +222,7 @@ repeat:
mm->task_size = mm->context.asce_limit;
table = NULL;
}
- spin_unlock(&mm->page_table_lock);
+ spin_unlock_bh(&mm->page_table_lock);
if (table)
crst_table_free(mm, table);
if (mm->context.asce_limit < limit)
@@ -182,7 +274,7 @@ unsigned long *page_table_alloc(struct mm_struct *mm)
unsigned long bits;
bits = (mm->context.noexec || mm->context.has_pgste) ? 3UL : 1UL;
- spin_lock(&mm->context.list_lock);
+ spin_lock_bh(&mm->context.list_lock);
page = NULL;
if (!list_empty(&mm->context.pgtable_list)) {
page = list_first_entry(&mm->context.pgtable_list,
@@ -191,7 +283,7 @@ unsigned long *page_table_alloc(struct mm_struct *mm)
page = NULL;
}
if (!page) {
- spin_unlock(&mm->context.list_lock);
+ spin_unlock_bh(&mm->context.list_lock);
page = alloc_page(GFP_KERNEL|__GFP_REPEAT);
if (!page)
return NULL;
@@ -202,7 +294,7 @@ unsigned long *page_table_alloc(struct mm_struct *mm)
clear_table_pgstes(table);
else
clear_table(table, _PAGE_TYPE_EMPTY, PAGE_SIZE);
- spin_lock(&mm->context.list_lock);
+ spin_lock_bh(&mm->context.list_lock);
list_add(&page->lru, &mm->context.pgtable_list);
}
table = (unsigned long *) page_to_phys(page);
@@ -213,10 +305,25 @@ unsigned long *page_table_alloc(struct mm_struct *mm)
page->flags |= bits;
if ((page->flags & FRAG_MASK) == ((1UL << TABLES_PER_PAGE) - 1))
list_move_tail(&page->lru, &mm->context.pgtable_list);
- spin_unlock(&mm->context.list_lock);
+ spin_unlock_bh(&mm->context.list_lock);
return table;
}
+static void __page_table_free(struct mm_struct *mm, unsigned long *table)
+{
+ struct page *page;
+ unsigned long bits;
+
+ bits = ((unsigned long) table) & 15;
+ table = (unsigned long *)(((unsigned long) table) ^ bits);
+ page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
+ page->flags ^= bits;
+ if (!(page->flags & FRAG_MASK)) {
+ pgtable_page_dtor(page);
+ __free_page(page);
+ }
+}
+
void page_table_free(struct mm_struct *mm, unsigned long *table)
{
struct page *page;
@@ -225,7 +332,7 @@ void page_table_free(struct mm_struct *mm, unsigned long *table)
bits = (mm->context.noexec || mm->context.has_pgste) ? 3UL : 1UL;
bits <<= (__pa(table) & (PAGE_SIZE - 1)) / 256 / sizeof(unsigned long);
page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
- spin_lock(&mm->context.list_lock);
+ spin_lock_bh(&mm->context.list_lock);
page->flags ^= bits;
if (page->flags & FRAG_MASK) {
/* Page now has some free pgtable fragments. */
@@ -234,18 +341,48 @@ void page_table_free(struct mm_struct *mm, unsigned long *table)
} else
/* All fragments of the 4K page have been freed. */
list_del(&page->lru);
- spin_unlock(&mm->context.list_lock);
+ spin_unlock_bh(&mm->context.list_lock);
if (page) {
pgtable_page_dtor(page);
__free_page(page);
}
}
+void page_table_free_rcu(struct mm_struct *mm, unsigned long *table)
+{
+ struct rcu_table_freelist *batch;
+ struct page *page;
+ unsigned long bits;
+
+ if (atomic_read(&mm->mm_users) < 2 &&
+ cpumask_equal(mm_cpumask(mm), cpumask_of(smp_processor_id()))) {
+ page_table_free(mm, table);
+ return;
+ }
+ batch = rcu_table_freelist_get(mm);
+ if (!batch) {
+ smp_call_function(smp_sync, NULL, 1);
+ page_table_free(mm, table);
+ return;
+ }
+ bits = (mm->context.noexec || mm->context.has_pgste) ? 3UL : 1UL;
+ bits <<= (__pa(table) & (PAGE_SIZE - 1)) / 256 / sizeof(unsigned long);
+ page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
+ spin_lock_bh(&mm->context.list_lock);
+ /* Delayed freeing with rcu prevents reuse of pgtable fragments */
+ list_del_init(&page->lru);
+ spin_unlock_bh(&mm->context.list_lock);
+ table = (unsigned long *)(((unsigned long) table) | bits);
+ batch->table[batch->pgt_index++] = table;
+ if (batch->pgt_index >= batch->crst_index)
+ rcu_table_freelist_finish();
+}
+
void disable_noexec(struct mm_struct *mm, struct task_struct *tsk)
{
struct page *page;
- spin_lock(&mm->context.list_lock);
+ spin_lock_bh(&mm->context.list_lock);
/* Free shadow region and segment tables. */
list_for_each_entry(page, &mm->context.crst_list, lru)
if (page->index) {
@@ -255,7 +392,7 @@ void disable_noexec(struct mm_struct *mm, struct task_struct *tsk)
/* "Free" second halves of page tables. */
list_for_each_entry(page, &mm->context.pgtable_list, lru)
page->flags &= ~SECOND_HALVES;
- spin_unlock(&mm->context.list_lock);
+ spin_unlock_bh(&mm->context.list_lock);
mm->context.noexec = 0;
update_mm(mm, tsk);
}
@@ -312,6 +449,8 @@ int s390_enable_sie(void)
tsk->mm = tsk->active_mm = mm;
preempt_disable();
update_mm(mm, tsk);
+ atomic_inc(&mm->context.attach_count);
+ atomic_dec(&old_mm->context.attach_count);
cpumask_set_cpu(smp_processor_id(), mm_cpumask(mm));
preempt_enable();
task_unlock(tsk);
diff --git a/arch/score/Kconfig b/arch/score/Kconfig
index be4a15584751..4293fdcb5398 100644
--- a/arch/score/Kconfig
+++ b/arch/score/Kconfig
@@ -1,8 +1,3 @@
-# For a description of the syntax of this configuration file,
-# see Documentation/kbuild/kconfig-language.txt.
-
-mainmenu "Linux/SCORE Kernel Configuration"
-
menu "Machine selection"
choice
diff --git a/arch/score/include/asm/pgtable.h b/arch/score/include/asm/pgtable.h
index ccf38f06c57d..2fd469807683 100644
--- a/arch/score/include/asm/pgtable.h
+++ b/arch/score/include/asm/pgtable.h
@@ -88,10 +88,7 @@ static inline void pmd_clear(pmd_t *pmdp)
#define pte_offset_map(dir, address) \
((pte_t *)page_address(pmd_page(*(dir))) + __pte_offset(address))
-#define pte_offset_map_nested(dir, address) \
- ((pte_t *)page_address(pmd_page(*(dir))) + __pte_offset(address))
#define pte_unmap(pte) ((void)(pte))
-#define pte_unmap_nested(pte) ((void)(pte))
/*
* Bits 9(_PAGE_PRESENT) and 10(_PAGE_FILE)are taken,
diff --git a/arch/score/kernel/ptrace.c b/arch/score/kernel/ptrace.c
index 174c6422b096..55836188b217 100644
--- a/arch/score/kernel/ptrace.c
+++ b/arch/score/kernel/ptrace.c
@@ -325,7 +325,8 @@ void ptrace_disable(struct task_struct *child)
}
long
-arch_ptrace(struct task_struct *child, long request, long addr, long data)
+arch_ptrace(struct task_struct *child, long request,
+ unsigned long addr, unsigned long data)
{
int ret;
unsigned long __user *datap = (void __user *)data;
@@ -335,14 +336,14 @@ arch_ptrace(struct task_struct *child, long request, long addr, long data)
ret = copy_regset_to_user(child, &user_score_native_view,
REGSET_GENERAL,
0, sizeof(struct pt_regs),
- (void __user *)datap);
+ datap);
break;
case PTRACE_SETREGS:
ret = copy_regset_from_user(child, &user_score_native_view,
REGSET_GENERAL,
0, sizeof(struct pt_regs),
- (const void __user *)datap);
+ datap);
break;
default:
diff --git a/arch/sh/Kconfig b/arch/sh/Kconfig
index 35b6879628a0..7f217b3a50a8 100644
--- a/arch/sh/Kconfig
+++ b/arch/sh/Kconfig
@@ -1,10 +1,3 @@
-#
-# For a description of the syntax of this configuration file,
-# see Documentation/kbuild/kconfig-language.txt.
-#
-
-mainmenu "Linux/SuperH Kernel Configuration"
-
config SUPERH
def_bool y
select EMBEDDED
@@ -24,8 +17,12 @@ config SUPERH
select HAVE_KERNEL_LZMA
select HAVE_KERNEL_LZO
select HAVE_SYSCALL_TRACEPOINTS
+ select HAVE_REGS_AND_STACK_ACCESS_API
+ select HAVE_GENERIC_HARDIRQS
+ select HAVE_SPARSE_IRQ
select RTC_LIB
select GENERIC_ATOMIC64
+ select GENERIC_HARDIRQS_NO_DEPRECATED
help
The SuperH is a RISC processor targeted for use in embedded systems
and consumer electronics; it was also used in the Sega Dreamcast
@@ -46,8 +43,9 @@ config SUPERH32
select HAVE_ARCH_KGDB
select HAVE_HW_BREAKPOINT
select HAVE_MIXED_BREAKPOINTS_REGS
- select PERF_EVENTS if HAVE_HW_BREAKPOINT
+ select PERF_EVENTS
select ARCH_HIBERNATION_POSSIBLE if MMU
+ select SPARSE_IRQ
config SUPERH64
def_bool ARCH = "sh64"
@@ -77,19 +75,9 @@ config GENERIC_FIND_NEXT_BIT
config GENERIC_HWEIGHT
def_bool y
-config GENERIC_HARDIRQS
- def_bool y
-
-config GENERIC_HARDIRQS_NO__DO_IRQ
- def_bool y
-
config IRQ_PER_CPU
def_bool y
-config SPARSE_IRQ
- def_bool y
- depends on SUPERH32
-
config GENERIC_GPIO
def_bool n
@@ -205,6 +193,7 @@ config CPU_SH2
config CPU_SH2A
bool
select CPU_SH2
+ select UNCACHED_MAPPING
config CPU_SH3
bool
@@ -471,6 +460,7 @@ config CPU_SUBTYPE_SHX3
select CPU_SH4A
select CPU_SHX3
select GENERIC_CLOCKEVENTS_BROADCAST if SMP
+ select ARCH_REQUIRE_GPIOLIB
# SH4AL-DSP Processor Support
@@ -575,7 +565,7 @@ config SH_CLK_CPG
config SH_CLK_CPG_LEGACY
depends on SH_CLK_CPG
def_bool y if !CPU_SUBTYPE_SH7785 && !ARCH_SHMOBILE && \
- !CPU_SUBTYPE_SH7786
+ !CPU_SHX3 && !CPU_SUBTYPE_SH7757
config SH_CLK_MD
int "CPU Mode Pin Setting"
diff --git a/arch/sh/Makefile b/arch/sh/Makefile
index 307b3a4a790b..9c8c6e1a2a15 100644
--- a/arch/sh/Makefile
+++ b/arch/sh/Makefile
@@ -133,10 +133,7 @@ machdir-$(CONFIG_SOLUTION_ENGINE) += mach-se
machdir-$(CONFIG_SH_HP6XX) += mach-hp6xx
machdir-$(CONFIG_SH_DREAMCAST) += mach-dreamcast
machdir-$(CONFIG_SH_SH03) += mach-sh03
-machdir-$(CONFIG_SH_SECUREEDGE5410) += mach-snapgear
machdir-$(CONFIG_SH_RTS7751R2D) += mach-r2d
-machdir-$(CONFIG_SH_7751_SYSTEMH) += mach-systemh
-machdir-$(CONFIG_SH_EDOSK7705) += mach-edosk7705
machdir-$(CONFIG_SH_HIGHLANDER) += mach-highlander
machdir-$(CONFIG_SH_MIGOR) += mach-migor
machdir-$(CONFIG_SH_AP325RXA) += mach-ap325rxa
diff --git a/arch/sh/boards/Kconfig b/arch/sh/boards/Kconfig
index 07b35ca2f644..2018c7ea4c93 100644
--- a/arch/sh/boards/Kconfig
+++ b/arch/sh/boards/Kconfig
@@ -81,13 +81,6 @@ config SH_7343_SOLUTION_ENGINE
Select 7343 SolutionEngine if configuring for a Hitachi
SH7343 (SH-Mobile 3AS) evaluation board.
-config SH_7751_SYSTEMH
- bool "SystemH7751R"
- depends on CPU_SUBTYPE_SH7751R
- help
- Select SystemH if you are configuring for a Renesas SystemH
- 7751R evaluation board.
-
config SH_HP6XX
bool "HP6XX"
select SYS_SUPPORTS_APM_EMULATION
@@ -155,6 +148,8 @@ config SH_SDK7786
depends on CPU_SUBTYPE_SH7786
select SYS_SUPPORTS_PCI
select NO_IOPORT if !PCI
+ select ARCH_WANT_OPTIONAL_GPIOLIB
+ select HAVE_SRAM_POOL
help
Select SDK7786 if configuring for a Renesas Technology Europe
SH7786-65nm board.
@@ -165,6 +160,11 @@ config SH_HIGHLANDER
select SYS_SUPPORTS_PCI
select IO_TRAPPED if MMU
+config SH_SH7757LCR
+ bool "SH7757LCR"
+ depends on CPU_SUBTYPE_SH7757
+ select ARCH_REQUIRE_GPIOLIB
+
config SH_SH7785LCR
bool "SH7785LCR"
depends on CPU_SUBTYPE_SH7785
@@ -309,6 +309,17 @@ config SH_POLARIS
help
Select if configuring for an SMSC Polaris development board
+config SH_SH2007
+ bool "SH-2007 board"
+ select NO_IOPORT
+ depends on CPU_SUBTYPE_SH7780
+ help
+ SH-2007 is a single-board computer based around SH7780 chip
+ intended for embedded applications.
+ It has an Ethernet interface (SMC9118), direct connected
+ Compact Flash socket, two serial ports and PC-104 bus.
+ More information at <http://sh2000.sh-linux.org>.
+
endmenu
source "arch/sh/boards/mach-r2d/Kconfig"
diff --git a/arch/sh/boards/Makefile b/arch/sh/boards/Makefile
index 4f90f9b7a922..be7d11d04b26 100644
--- a/arch/sh/boards/Makefile
+++ b/arch/sh/boards/Makefile
@@ -2,10 +2,14 @@
# Specific board support, not covered by a mach group.
#
obj-$(CONFIG_SH_MAGIC_PANEL_R2) += board-magicpanelr2.o
+obj-$(CONFIG_SH_SECUREEDGE5410) += board-secureedge5410.o
+obj-$(CONFIG_SH_SH2007) += board-sh2007.o
obj-$(CONFIG_SH_SH7785LCR) += board-sh7785lcr.o
obj-$(CONFIG_SH_URQUELL) += board-urquell.o
obj-$(CONFIG_SH_SHMIN) += board-shmin.o
+obj-$(CONFIG_SH_EDOSK7705) += board-edosk7705.o
obj-$(CONFIG_SH_EDOSK7760) += board-edosk7760.o
obj-$(CONFIG_SH_ESPT) += board-espt.o
obj-$(CONFIG_SH_POLARIS) += board-polaris.o
obj-$(CONFIG_SH_TITAN) += board-titan.o
+obj-$(CONFIG_SH_SH7757LCR) += board-sh7757lcr.o
diff --git a/arch/sh/boards/board-edosk7705.c b/arch/sh/boards/board-edosk7705.c
new file mode 100644
index 000000000000..4cb3bb74c36f
--- /dev/null
+++ b/arch/sh/boards/board-edosk7705.c
@@ -0,0 +1,78 @@
+/*
+ * arch/sh/boards/renesas/edosk7705/setup.c
+ *
+ * Copyright (C) 2000 Kazumoto Kojima
+ *
+ * Hitachi SolutionEngine Support.
+ *
+ * Modified for edosk7705 development
+ * board by S. Dunn, 2003.
+ */
+#include <linux/init.h>
+#include <linux/irq.h>
+#include <linux/platform_device.h>
+#include <linux/interrupt.h>
+#include <linux/smc91x.h>
+#include <asm/machvec.h>
+#include <asm/sizes.h>
+
+#define SMC_IOBASE 0xA2000000
+#define SMC_IO_OFFSET 0x300
+#define SMC_IOADDR (SMC_IOBASE + SMC_IO_OFFSET)
+
+#define ETHERNET_IRQ 0x09
+
+static void __init sh_edosk7705_init_irq(void)
+{
+ make_imask_irq(ETHERNET_IRQ);
+}
+
+/* eth initialization functions */
+static struct smc91x_platdata smc91x_info = {
+ .flags = SMC91X_USE_16BIT | SMC91X_IO_SHIFT_1 | IORESOURCE_IRQ_LOWLEVEL,
+};
+
+static struct resource smc91x_res[] = {
+ [0] = {
+ .start = SMC_IOADDR,
+ .end = SMC_IOADDR + SZ_32 - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = ETHERNET_IRQ,
+ .end = ETHERNET_IRQ,
+ .flags = IORESOURCE_IRQ ,
+ }
+};
+
+static struct platform_device smc91x_dev = {
+ .name = "smc91x",
+ .id = -1,
+ .num_resources = ARRAY_SIZE(smc91x_res),
+ .resource = smc91x_res,
+
+ .dev = {
+ .platform_data = &smc91x_info,
+ },
+};
+
+/* platform init code */
+static struct platform_device *edosk7705_devices[] __initdata = {
+ &smc91x_dev,
+};
+
+static int __init init_edosk7705_devices(void)
+{
+ return platform_add_devices(edosk7705_devices,
+ ARRAY_SIZE(edosk7705_devices));
+}
+__initcall(init_edosk7705_devices);
+
+/*
+ * The Machine Vector
+ */
+static struct sh_machine_vector mv_edosk7705 __initmv = {
+ .mv_name = "EDOSK7705",
+ .mv_nr_irqs = 80,
+ .mv_init_irq = sh_edosk7705_init_irq,
+};
diff --git a/arch/sh/boards/mach-snapgear/setup.c b/arch/sh/boards/board-secureedge5410.c
index 331745dee379..32f875e8493d 100644
--- a/arch/sh/boards/mach-snapgear/setup.c
+++ b/arch/sh/boards/board-secureedge5410.c
@@ -1,6 +1,4 @@
/*
- * linux/arch/sh/boards/snapgear/setup.c
- *
* Copyright (C) 2002 David McCullough <davidm@snapgear.com>
* Copyright (C) 2003 Paul Mundt <lethal@linux-sh.org>
*
@@ -19,18 +17,19 @@
#include <linux/module.h>
#include <linux/sched.h>
#include <asm/machvec.h>
-#include <mach/snapgear.h>
+#include <mach/secureedge5410.h>
#include <asm/irq.h>
#include <asm/io.h>
#include <cpu/timer.h>
+unsigned short secureedge5410_ioport;
+
/*
* EraseConfig handling functions
*/
-
static irqreturn_t eraseconfig_interrupt(int irq, void *dev_id)
{
- (void)__raw_readb(0xb8000000); /* dummy read */
+ ctrl_delay(); /* dummy read */
printk("SnapGear: erase switch interrupt!\n");
@@ -39,21 +38,22 @@ static irqreturn_t eraseconfig_interrupt(int irq, void *dev_id)
static int __init eraseconfig_init(void)
{
+ unsigned int irq = evt2irq(0x240);
+
printk("SnapGear: EraseConfig init\n");
+
/* Setup "EraseConfig" switch on external IRQ 0 */
- if (request_irq(IRL0_IRQ, eraseconfig_interrupt, IRQF_DISABLED,
+ if (request_irq(irq, eraseconfig_interrupt, IRQF_DISABLED,
"Erase Config", NULL))
printk("SnapGear: failed to register IRQ%d for Reset witch\n",
- IRL0_IRQ);
+ irq);
else
printk("SnapGear: registered EraseConfig switch on IRQ%d\n",
- IRL0_IRQ);
- return(0);
+ irq);
+ return 0;
}
-
module_init(eraseconfig_init);
-/****************************************************************************/
/*
* Initialize IRQ setting
*
@@ -62,7 +62,6 @@ module_init(eraseconfig_init);
* IRL2 = eth1
* IRL3 = crypto
*/
-
static void __init init_snapgear_IRQ(void)
{
printk("Setup SnapGear IRQ/IPR ...\n");
@@ -76,20 +75,5 @@ static void __init init_snapgear_IRQ(void)
static struct sh_machine_vector mv_snapgear __initmv = {
.mv_name = "SnapGear SecureEdge5410",
.mv_nr_irqs = 72,
-
- .mv_inb = snapgear_inb,
- .mv_inw = snapgear_inw,
- .mv_inl = snapgear_inl,
- .mv_outb = snapgear_outb,
- .mv_outw = snapgear_outw,
- .mv_outl = snapgear_outl,
-
- .mv_inb_p = snapgear_inb_p,
- .mv_inw_p = snapgear_inw,
- .mv_inl_p = snapgear_inl,
- .mv_outb_p = snapgear_outb_p,
- .mv_outw_p = snapgear_outw,
- .mv_outl_p = snapgear_outl,
-
.mv_init_irq = init_snapgear_IRQ,
};
diff --git a/arch/sh/boards/board-sh2007.c b/arch/sh/boards/board-sh2007.c
new file mode 100644
index 000000000000..b90b78f6a829
--- /dev/null
+++ b/arch/sh/boards/board-sh2007.c
@@ -0,0 +1,133 @@
+/*
+ * SH-2007 board support.
+ *
+ * Copyright (C) 2003, 2004 SUGIOKA Toshinobu
+ * Copyright (C) 2010 Hitoshi Mitake <mitake@dcl.info.waseda.ac.jp>
+ */
+#include <linux/init.h>
+#include <linux/irq.h>
+#include <linux/smsc911x.h>
+#include <linux/platform_device.h>
+#include <linux/ata_platform.h>
+#include <linux/io.h>
+#include <asm/machvec.h>
+#include <mach/sh2007.h>
+
+struct smsc911x_platform_config smc911x_info = {
+ .flags = SMSC911X_USE_32BIT,
+ .irq_polarity = SMSC911X_IRQ_POLARITY_ACTIVE_LOW,
+ .irq_type = SMSC911X_IRQ_TYPE_PUSH_PULL,
+};
+
+static struct resource smsc9118_0_resources[] = {
+ [0] = {
+ .start = SMC0_BASE,
+ .end = SMC0_BASE + 0xff,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = evt2irq(0x240),
+ .end = evt2irq(0x240),
+ .flags = IORESOURCE_IRQ,
+ }
+};
+
+static struct resource smsc9118_1_resources[] = {
+ [0] = {
+ .start = SMC1_BASE,
+ .end = SMC1_BASE + 0xff,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = evt2irq(0x280),
+ .end = evt2irq(0x280),
+ .flags = IORESOURCE_IRQ,
+ }
+};
+
+static struct platform_device smsc9118_0_device = {
+ .name = "smsc911x",
+ .id = 0,
+ .num_resources = ARRAY_SIZE(smsc9118_0_resources),
+ .resource = smsc9118_0_resources,
+ .dev = {
+ .platform_data = &smc911x_info,
+ },
+};
+
+static struct platform_device smsc9118_1_device = {
+ .name = "smsc911x",
+ .id = 1,
+ .num_resources = ARRAY_SIZE(smsc9118_1_resources),
+ .resource = smsc9118_1_resources,
+ .dev = {
+ .platform_data = &smc911x_info,
+ },
+};
+
+static struct resource cf_resources[] = {
+ [0] = {
+ .start = CF_BASE + CF_OFFSET,
+ .end = CF_BASE + CF_OFFSET + 0x0f,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = CF_BASE + CF_OFFSET + 0x206,
+ .end = CF_BASE + CF_OFFSET + 0x20f,
+ .flags = IORESOURCE_MEM,
+ },
+ [2] = {
+ .start = evt2irq(0x2c0),
+ .end = evt2irq(0x2c0),
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct platform_device cf_device = {
+ .name = "pata_platform",
+ .id = 0,
+ .num_resources = ARRAY_SIZE(cf_resources),
+ .resource = cf_resources,
+};
+
+static struct platform_device *sh2007_devices[] __initdata = {
+ &smsc9118_0_device,
+ &smsc9118_1_device,
+ &cf_device,
+};
+
+static int __init sh2007_io_init(void)
+{
+ platform_add_devices(sh2007_devices, ARRAY_SIZE(sh2007_devices));
+ return 0;
+}
+subsys_initcall(sh2007_io_init);
+
+static void __init sh2007_init_irq(void)
+{
+ plat_irq_setup_pins(IRQ_MODE_IRQ);
+}
+
+/*
+ * Initialize the board
+ */
+static void __init sh2007_setup(char **cmdline_p)
+{
+ printk(KERN_INFO "SH-2007 Setup...");
+
+ /* setup wait control registers for area 5 */
+ __raw_writel(CS5BCR_D, CS5BCR);
+ __raw_writel(CS5WCR_D, CS5WCR);
+ __raw_writel(CS5PCR_D, CS5PCR);
+
+ printk(KERN_INFO " done.\n");
+}
+
+/*
+ * The Machine Vector
+ */
+struct sh_machine_vector mv_sh2007 __initmv = {
+ .mv_setup = sh2007_setup,
+ .mv_name = "sh2007",
+ .mv_init_irq = sh2007_init_irq,
+};
diff --git a/arch/sh/boards/board-sh7757lcr.c b/arch/sh/boards/board-sh7757lcr.c
new file mode 100644
index 000000000000..c475f1056ab4
--- /dev/null
+++ b/arch/sh/boards/board-sh7757lcr.c
@@ -0,0 +1,374 @@
+/*
+ * Renesas R0P7757LC0012RL Support.
+ *
+ * Copyright (C) 2009 - 2010 Renesas Solutions Corp.
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+
+#include <linux/init.h>
+#include <linux/platform_device.h>
+#include <linux/gpio.h>
+#include <linux/irq.h>
+#include <linux/spi/spi.h>
+#include <linux/spi/flash.h>
+#include <linux/io.h>
+#include <cpu/sh7757.h>
+#include <asm/sh_eth.h>
+#include <asm/heartbeat.h>
+
+static struct resource heartbeat_resource = {
+ .start = 0xffec005c, /* PUDR */
+ .end = 0xffec005c,
+ .flags = IORESOURCE_MEM | IORESOURCE_MEM_8BIT,
+};
+
+static unsigned char heartbeat_bit_pos[] = { 0, 1, 2, 3 };
+
+static struct heartbeat_data heartbeat_data = {
+ .bit_pos = heartbeat_bit_pos,
+ .nr_bits = ARRAY_SIZE(heartbeat_bit_pos),
+ .flags = HEARTBEAT_INVERTED,
+};
+
+static struct platform_device heartbeat_device = {
+ .name = "heartbeat",
+ .id = -1,
+ .dev = {
+ .platform_data = &heartbeat_data,
+ },
+ .num_resources = 1,
+ .resource = &heartbeat_resource,
+};
+
+/* Fast Ethernet */
+static struct resource sh_eth0_resources[] = {
+ {
+ .start = 0xfef00000,
+ .end = 0xfef001ff,
+ .flags = IORESOURCE_MEM,
+ }, {
+ .start = 84,
+ .end = 84,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct sh_eth_plat_data sh7757_eth0_pdata = {
+ .phy = 1,
+ .edmac_endian = EDMAC_LITTLE_ENDIAN,
+};
+
+static struct platform_device sh7757_eth0_device = {
+ .name = "sh-eth",
+ .resource = sh_eth0_resources,
+ .id = 0,
+ .num_resources = ARRAY_SIZE(sh_eth0_resources),
+ .dev = {
+ .platform_data = &sh7757_eth0_pdata,
+ },
+};
+
+static struct resource sh_eth1_resources[] = {
+ {
+ .start = 0xfef00800,
+ .end = 0xfef009ff,
+ .flags = IORESOURCE_MEM,
+ }, {
+ .start = 84,
+ .end = 84,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct sh_eth_plat_data sh7757_eth1_pdata = {
+ .phy = 1,
+ .edmac_endian = EDMAC_LITTLE_ENDIAN,
+};
+
+static struct platform_device sh7757_eth1_device = {
+ .name = "sh-eth",
+ .resource = sh_eth1_resources,
+ .id = 1,
+ .num_resources = ARRAY_SIZE(sh_eth1_resources),
+ .dev = {
+ .platform_data = &sh7757_eth1_pdata,
+ },
+};
+
+static struct platform_device *sh7757lcr_devices[] __initdata = {
+ &heartbeat_device,
+ &sh7757_eth0_device,
+ &sh7757_eth1_device,
+};
+
+static int __init sh7757lcr_devices_setup(void)
+{
+ /* RGMII (PTA) */
+ gpio_request(GPIO_FN_ET0_MDC, NULL);
+ gpio_request(GPIO_FN_ET0_MDIO, NULL);
+ gpio_request(GPIO_FN_ET1_MDC, NULL);
+ gpio_request(GPIO_FN_ET1_MDIO, NULL);
+
+ /* ONFI (PTB, PTZ) */
+ gpio_request(GPIO_FN_ON_NRE, NULL);
+ gpio_request(GPIO_FN_ON_NWE, NULL);
+ gpio_request(GPIO_FN_ON_NWP, NULL);
+ gpio_request(GPIO_FN_ON_NCE0, NULL);
+ gpio_request(GPIO_FN_ON_R_B0, NULL);
+ gpio_request(GPIO_FN_ON_ALE, NULL);
+ gpio_request(GPIO_FN_ON_CLE, NULL);
+
+ gpio_request(GPIO_FN_ON_DQ7, NULL);
+ gpio_request(GPIO_FN_ON_DQ6, NULL);
+ gpio_request(GPIO_FN_ON_DQ5, NULL);
+ gpio_request(GPIO_FN_ON_DQ4, NULL);
+ gpio_request(GPIO_FN_ON_DQ3, NULL);
+ gpio_request(GPIO_FN_ON_DQ2, NULL);
+ gpio_request(GPIO_FN_ON_DQ1, NULL);
+ gpio_request(GPIO_FN_ON_DQ0, NULL);
+
+ /* IRQ8 to 0 (PTB, PTC) */
+ gpio_request(GPIO_FN_IRQ8, NULL);
+ gpio_request(GPIO_FN_IRQ7, NULL);
+ gpio_request(GPIO_FN_IRQ6, NULL);
+ gpio_request(GPIO_FN_IRQ5, NULL);
+ gpio_request(GPIO_FN_IRQ4, NULL);
+ gpio_request(GPIO_FN_IRQ3, NULL);
+ gpio_request(GPIO_FN_IRQ2, NULL);
+ gpio_request(GPIO_FN_IRQ1, NULL);
+ gpio_request(GPIO_FN_IRQ0, NULL);
+
+ /* SPI0 (PTD) */
+ gpio_request(GPIO_FN_SP0_MOSI, NULL);
+ gpio_request(GPIO_FN_SP0_MISO, NULL);
+ gpio_request(GPIO_FN_SP0_SCK, NULL);
+ gpio_request(GPIO_FN_SP0_SCK_FB, NULL);
+ gpio_request(GPIO_FN_SP0_SS0, NULL);
+ gpio_request(GPIO_FN_SP0_SS1, NULL);
+ gpio_request(GPIO_FN_SP0_SS2, NULL);
+ gpio_request(GPIO_FN_SP0_SS3, NULL);
+
+ /* RMII 0/1 (PTE, PTF) */
+ gpio_request(GPIO_FN_RMII0_CRS_DV, NULL);
+ gpio_request(GPIO_FN_RMII0_TXD1, NULL);
+ gpio_request(GPIO_FN_RMII0_TXD0, NULL);
+ gpio_request(GPIO_FN_RMII0_TXEN, NULL);
+ gpio_request(GPIO_FN_RMII0_REFCLK, NULL);
+ gpio_request(GPIO_FN_RMII0_RXD1, NULL);
+ gpio_request(GPIO_FN_RMII0_RXD0, NULL);
+ gpio_request(GPIO_FN_RMII0_RX_ER, NULL);
+ gpio_request(GPIO_FN_RMII1_CRS_DV, NULL);
+ gpio_request(GPIO_FN_RMII1_TXD1, NULL);
+ gpio_request(GPIO_FN_RMII1_TXD0, NULL);
+ gpio_request(GPIO_FN_RMII1_TXEN, NULL);
+ gpio_request(GPIO_FN_RMII1_REFCLK, NULL);
+ gpio_request(GPIO_FN_RMII1_RXD1, NULL);
+ gpio_request(GPIO_FN_RMII1_RXD0, NULL);
+ gpio_request(GPIO_FN_RMII1_RX_ER, NULL);
+
+ /* eMMC (PTG) */
+ gpio_request(GPIO_FN_MMCCLK, NULL);
+ gpio_request(GPIO_FN_MMCCMD, NULL);
+ gpio_request(GPIO_FN_MMCDAT7, NULL);
+ gpio_request(GPIO_FN_MMCDAT6, NULL);
+ gpio_request(GPIO_FN_MMCDAT5, NULL);
+ gpio_request(GPIO_FN_MMCDAT4, NULL);
+ gpio_request(GPIO_FN_MMCDAT3, NULL);
+ gpio_request(GPIO_FN_MMCDAT2, NULL);
+ gpio_request(GPIO_FN_MMCDAT1, NULL);
+ gpio_request(GPIO_FN_MMCDAT0, NULL);
+
+ /* LPC (PTG, PTH, PTQ, PTU) */
+ gpio_request(GPIO_FN_SERIRQ, NULL);
+ gpio_request(GPIO_FN_LPCPD, NULL);
+ gpio_request(GPIO_FN_LDRQ, NULL);
+ gpio_request(GPIO_FN_WP, NULL);
+ gpio_request(GPIO_FN_FMS0, NULL);
+ gpio_request(GPIO_FN_LAD3, NULL);
+ gpio_request(GPIO_FN_LAD2, NULL);
+ gpio_request(GPIO_FN_LAD1, NULL);
+ gpio_request(GPIO_FN_LAD0, NULL);
+ gpio_request(GPIO_FN_LFRAME, NULL);
+ gpio_request(GPIO_FN_LRESET, NULL);
+ gpio_request(GPIO_FN_LCLK, NULL);
+ gpio_request(GPIO_FN_LGPIO7, NULL);
+ gpio_request(GPIO_FN_LGPIO6, NULL);
+ gpio_request(GPIO_FN_LGPIO5, NULL);
+ gpio_request(GPIO_FN_LGPIO4, NULL);
+
+ /* SPI1 (PTH) */
+ gpio_request(GPIO_FN_SP1_MOSI, NULL);
+ gpio_request(GPIO_FN_SP1_MISO, NULL);
+ gpio_request(GPIO_FN_SP1_SCK, NULL);
+ gpio_request(GPIO_FN_SP1_SCK_FB, NULL);
+ gpio_request(GPIO_FN_SP1_SS0, NULL);
+ gpio_request(GPIO_FN_SP1_SS1, NULL);
+
+ /* SDHI (PTI) */
+ gpio_request(GPIO_FN_SD_WP, NULL);
+ gpio_request(GPIO_FN_SD_CD, NULL);
+ gpio_request(GPIO_FN_SD_CLK, NULL);
+ gpio_request(GPIO_FN_SD_CMD, NULL);
+ gpio_request(GPIO_FN_SD_D3, NULL);
+ gpio_request(GPIO_FN_SD_D2, NULL);
+ gpio_request(GPIO_FN_SD_D1, NULL);
+ gpio_request(GPIO_FN_SD_D0, NULL);
+
+ /* SCIF3/4 (PTJ, PTW) */
+ gpio_request(GPIO_FN_RTS3, NULL);
+ gpio_request(GPIO_FN_CTS3, NULL);
+ gpio_request(GPIO_FN_TXD3, NULL);
+ gpio_request(GPIO_FN_RXD3, NULL);
+ gpio_request(GPIO_FN_RTS4, NULL);
+ gpio_request(GPIO_FN_RXD4, NULL);
+ gpio_request(GPIO_FN_TXD4, NULL);
+ gpio_request(GPIO_FN_CTS4, NULL);
+
+ /* SERMUX (PTK, PTL, PTO, PTV) */
+ gpio_request(GPIO_FN_COM2_TXD, NULL);
+ gpio_request(GPIO_FN_COM2_RXD, NULL);
+ gpio_request(GPIO_FN_COM2_RTS, NULL);
+ gpio_request(GPIO_FN_COM2_CTS, NULL);
+ gpio_request(GPIO_FN_COM2_DTR, NULL);
+ gpio_request(GPIO_FN_COM2_DSR, NULL);
+ gpio_request(GPIO_FN_COM2_DCD, NULL);
+ gpio_request(GPIO_FN_COM2_RI, NULL);
+ gpio_request(GPIO_FN_RAC_RXD, NULL);
+ gpio_request(GPIO_FN_RAC_RTS, NULL);
+ gpio_request(GPIO_FN_RAC_CTS, NULL);
+ gpio_request(GPIO_FN_RAC_DTR, NULL);
+ gpio_request(GPIO_FN_RAC_DSR, NULL);
+ gpio_request(GPIO_FN_RAC_DCD, NULL);
+ gpio_request(GPIO_FN_RAC_TXD, NULL);
+ gpio_request(GPIO_FN_COM1_TXD, NULL);
+ gpio_request(GPIO_FN_COM1_RXD, NULL);
+ gpio_request(GPIO_FN_COM1_RTS, NULL);
+ gpio_request(GPIO_FN_COM1_CTS, NULL);
+
+ writeb(0x10, 0xfe470000); /* SMR0: SerMux mode 0 */
+
+ /* IIC (PTM, PTR, PTS) */
+ gpio_request(GPIO_FN_SDA7, NULL);
+ gpio_request(GPIO_FN_SCL7, NULL);
+ gpio_request(GPIO_FN_SDA6, NULL);
+ gpio_request(GPIO_FN_SCL6, NULL);
+ gpio_request(GPIO_FN_SDA5, NULL);
+ gpio_request(GPIO_FN_SCL5, NULL);
+ gpio_request(GPIO_FN_SDA4, NULL);
+ gpio_request(GPIO_FN_SCL4, NULL);
+ gpio_request(GPIO_FN_SDA3, NULL);
+ gpio_request(GPIO_FN_SCL3, NULL);
+ gpio_request(GPIO_FN_SDA2, NULL);
+ gpio_request(GPIO_FN_SCL2, NULL);
+ gpio_request(GPIO_FN_SDA1, NULL);
+ gpio_request(GPIO_FN_SCL1, NULL);
+ gpio_request(GPIO_FN_SDA0, NULL);
+ gpio_request(GPIO_FN_SCL0, NULL);
+
+ /* USB (PTN) */
+ gpio_request(GPIO_FN_VBUS_EN, NULL);
+ gpio_request(GPIO_FN_VBUS_OC, NULL);
+
+ /* SGPIO1/0 (PTN, PTO) */
+ gpio_request(GPIO_FN_SGPIO1_CLK, NULL);
+ gpio_request(GPIO_FN_SGPIO1_LOAD, NULL);
+ gpio_request(GPIO_FN_SGPIO1_DI, NULL);
+ gpio_request(GPIO_FN_SGPIO1_DO, NULL);
+ gpio_request(GPIO_FN_SGPIO0_CLK, NULL);
+ gpio_request(GPIO_FN_SGPIO0_LOAD, NULL);
+ gpio_request(GPIO_FN_SGPIO0_DI, NULL);
+ gpio_request(GPIO_FN_SGPIO0_DO, NULL);
+
+ /* WDT (PTN) */
+ gpio_request(GPIO_FN_SUB_CLKIN, NULL);
+
+ /* System (PTT) */
+ gpio_request(GPIO_FN_STATUS1, NULL);
+ gpio_request(GPIO_FN_STATUS0, NULL);
+
+ /* PWMX (PTT) */
+ gpio_request(GPIO_FN_PWMX1, NULL);
+ gpio_request(GPIO_FN_PWMX0, NULL);
+
+ /* R-SPI (PTV) */
+ gpio_request(GPIO_FN_R_SPI_MOSI, NULL);
+ gpio_request(GPIO_FN_R_SPI_MISO, NULL);
+ gpio_request(GPIO_FN_R_SPI_RSPCK, NULL);
+ gpio_request(GPIO_FN_R_SPI_SSL0, NULL);
+ gpio_request(GPIO_FN_R_SPI_SSL1, NULL);
+
+ /* EVC (PTV, PTW) */
+ gpio_request(GPIO_FN_EVENT7, NULL);
+ gpio_request(GPIO_FN_EVENT6, NULL);
+ gpio_request(GPIO_FN_EVENT5, NULL);
+ gpio_request(GPIO_FN_EVENT4, NULL);
+ gpio_request(GPIO_FN_EVENT3, NULL);
+ gpio_request(GPIO_FN_EVENT2, NULL);
+ gpio_request(GPIO_FN_EVENT1, NULL);
+ gpio_request(GPIO_FN_EVENT0, NULL);
+
+ /* LED for heartbeat */
+ gpio_request(GPIO_PTU3, NULL);
+ gpio_direction_output(GPIO_PTU3, 1);
+ gpio_request(GPIO_PTU2, NULL);
+ gpio_direction_output(GPIO_PTU2, 1);
+ gpio_request(GPIO_PTU1, NULL);
+ gpio_direction_output(GPIO_PTU1, 1);
+ gpio_request(GPIO_PTU0, NULL);
+ gpio_direction_output(GPIO_PTU0, 1);
+
+ /* control for MDIO of Gigabit Ethernet */
+ gpio_request(GPIO_PTT4, NULL);
+ gpio_direction_output(GPIO_PTT4, 1);
+
+ /* control for eMMC */
+ gpio_request(GPIO_PTT7, NULL); /* eMMC_RST# */
+ gpio_direction_output(GPIO_PTT7, 0);
+ gpio_request(GPIO_PTT6, NULL); /* eMMC_INDEX# */
+ gpio_direction_output(GPIO_PTT6, 0);
+ gpio_request(GPIO_PTT5, NULL); /* eMMC_PRST# */
+ gpio_direction_output(GPIO_PTT5, 1);
+
+ /* General platform */
+ return platform_add_devices(sh7757lcr_devices,
+ ARRAY_SIZE(sh7757lcr_devices));
+}
+arch_initcall(sh7757lcr_devices_setup);
+
+/* Initialize IRQ setting */
+void __init init_sh7757lcr_IRQ(void)
+{
+ plat_irq_setup_pins(IRQ_MODE_IRQ7654);
+ plat_irq_setup_pins(IRQ_MODE_IRQ3210);
+}
+
+/* Initialize the board */
+static void __init sh7757lcr_setup(char **cmdline_p)
+{
+ printk(KERN_INFO "Renesas R0P7757LC0012RL support.\n");
+}
+
+static int sh7757lcr_mode_pins(void)
+{
+ int value = 0;
+
+ /* These are the factory default settings of S3 (Low active).
+ * If you change these dip switches then you will need to
+ * adjust the values below as well.
+ */
+ value |= MODE_PIN0; /* Clock Mode: 1 */
+
+ return value;
+}
+
+/* The Machine Vector */
+static struct sh_machine_vector mv_sh7757lcr __initmv = {
+ .mv_name = "SH7757LCR",
+ .mv_setup = sh7757lcr_setup,
+ .mv_init_irq = init_sh7757lcr_IRQ,
+ .mv_mode_pins = sh7757lcr_mode_pins,
+};
+
diff --git a/arch/sh/boards/mach-ap325rxa/setup.c b/arch/sh/boards/mach-ap325rxa/setup.c
index 3da116f47f01..07ea908c510d 100644
--- a/arch/sh/boards/mach-ap325rxa/setup.c
+++ b/arch/sh/boards/mach-ap325rxa/setup.c
@@ -176,6 +176,21 @@ static void ap320_wvga_power_off(void *board_data)
__raw_writew(0, FPGA_LCDREG);
}
+const static struct fb_videomode ap325rxa_lcdc_modes[] = {
+ {
+ .name = "LB070WV1",
+ .xres = 800,
+ .yres = 480,
+ .left_margin = 32,
+ .right_margin = 160,
+ .hsync_len = 8,
+ .upper_margin = 63,
+ .lower_margin = 80,
+ .vsync_len = 1,
+ .sync = 0, /* hsync and vsync are active low */
+ },
+};
+
static struct sh_mobile_lcdc_info lcdc_info = {
.clock_source = LCDC_CLK_EXTERNAL,
.ch[0] = {
@@ -183,18 +198,8 @@ static struct sh_mobile_lcdc_info lcdc_info = {
.bpp = 16,
.interface_type = RGB18,
.clock_divider = 1,
- .lcd_cfg = {
- .name = "LB070WV1",
- .xres = 800,
- .yres = 480,
- .left_margin = 32,
- .right_margin = 160,
- .hsync_len = 8,
- .upper_margin = 63,
- .lower_margin = 80,
- .vsync_len = 1,
- .sync = 0, /* hsync and vsync are active low */
- },
+ .lcd_cfg = ap325rxa_lcdc_modes,
+ .num_cfg = ARRAY_SIZE(ap325rxa_lcdc_modes),
.lcd_size_cfg = { /* 7.0 inch */
.width = 152,
.height = 91,
@@ -481,7 +486,6 @@ static struct soc_camera_link ov7725_link = {
.power = ov7725_power,
.board_info = &ap325rxa_i2c_camera[0],
.i2c_adapter_id = 0,
- .module_name = "ov772x",
.priv = &ov7725_info,
};
diff --git a/arch/sh/boards/mach-cayman/irq.c b/arch/sh/boards/mach-cayman/irq.c
index 1394b078db36..d7ac5af9d102 100644
--- a/arch/sh/boards/mach-cayman/irq.c
+++ b/arch/sh/boards/mach-cayman/irq.c
@@ -55,8 +55,9 @@ static struct irqaction cayman_action_pci2 = {
.flags = IRQF_DISABLED,
};
-static void enable_cayman_irq(unsigned int irq)
+static void enable_cayman_irq(struct irq_data *data)
{
+ unsigned int irq = data->irq;
unsigned long flags;
unsigned long mask;
unsigned int reg;
@@ -72,8 +73,9 @@ static void enable_cayman_irq(unsigned int irq)
local_irq_restore(flags);
}
-void disable_cayman_irq(unsigned int irq)
+static void disable_cayman_irq(struct irq_data *data)
{
+ unsigned int irq = data->irq;
unsigned long flags;
unsigned long mask;
unsigned int reg;
@@ -89,16 +91,10 @@ void disable_cayman_irq(unsigned int irq)
local_irq_restore(flags);
}
-static void ack_cayman_irq(unsigned int irq)
-{
- disable_cayman_irq(irq);
-}
-
struct irq_chip cayman_irq_type = {
.name = "Cayman-IRQ",
- .unmask = enable_cayman_irq,
- .mask = disable_cayman_irq,
- .mask_ack = ack_cayman_irq,
+ .irq_unmask = enable_cayman_irq,
+ .irq_mask = disable_cayman_irq,
};
int cayman_irq_demux(int evt)
diff --git a/arch/sh/boards/mach-dreamcast/irq.c b/arch/sh/boards/mach-dreamcast/irq.c
index d932667410ab..72e7ac9549da 100644
--- a/arch/sh/boards/mach-dreamcast/irq.c
+++ b/arch/sh/boards/mach-dreamcast/irq.c
@@ -60,8 +60,9 @@
*/
/* Disable the hardware event by masking its bit in its EMR */
-static inline void disable_systemasic_irq(unsigned int irq)
+static inline void disable_systemasic_irq(struct irq_data *data)
{
+ unsigned int irq = data->irq;
__u32 emr = EMR_BASE + (LEVEL(irq) << 4) + (LEVEL(irq) << 2);
__u32 mask;
@@ -71,8 +72,9 @@ static inline void disable_systemasic_irq(unsigned int irq)
}
/* Enable the hardware event by setting its bit in its EMR */
-static inline void enable_systemasic_irq(unsigned int irq)
+static inline void enable_systemasic_irq(struct irq_data *data)
{
+ unsigned int irq = data->irq;
__u32 emr = EMR_BASE + (LEVEL(irq) << 4) + (LEVEL(irq) << 2);
__u32 mask;
@@ -82,18 +84,19 @@ static inline void enable_systemasic_irq(unsigned int irq)
}
/* Acknowledge a hardware event by writing its bit back to its ESR */
-static void mask_ack_systemasic_irq(unsigned int irq)
+static void mask_ack_systemasic_irq(struct irq_data *data)
{
+ unsigned int irq = data->irq;
__u32 esr = ESR_BASE + (LEVEL(irq) << 2);
- disable_systemasic_irq(irq);
+ disable_systemasic_irq(data);
outl((1 << EVENT_BIT(irq)), esr);
}
struct irq_chip systemasic_int = {
.name = "System ASIC",
- .mask = disable_systemasic_irq,
- .mask_ack = mask_ack_systemasic_irq,
- .unmask = enable_systemasic_irq,
+ .irq_mask = disable_systemasic_irq,
+ .irq_mask_ack = mask_ack_systemasic_irq,
+ .irq_unmask = enable_systemasic_irq,
};
/*
diff --git a/arch/sh/boards/mach-ecovec24/setup.c b/arch/sh/boards/mach-ecovec24/setup.c
index 1d7b495a7db4..f48c492a68d3 100644
--- a/arch/sh/boards/mach-ecovec24/setup.c
+++ b/arch/sh/boards/mach-ecovec24/setup.c
@@ -231,14 +231,41 @@ static struct platform_device usb1_common_device = {
};
/* LCDC */
+const static struct fb_videomode ecovec_lcd_modes[] = {
+ {
+ .name = "Panel",
+ .xres = 800,
+ .yres = 480,
+ .left_margin = 220,
+ .right_margin = 110,
+ .hsync_len = 70,
+ .upper_margin = 20,
+ .lower_margin = 5,
+ .vsync_len = 5,
+ .sync = 0, /* hsync and vsync are active low */
+ },
+};
+
+const static struct fb_videomode ecovec_dvi_modes[] = {
+ {
+ .name = "DVI",
+ .xres = 1280,
+ .yres = 720,
+ .left_margin = 220,
+ .right_margin = 110,
+ .hsync_len = 40,
+ .upper_margin = 20,
+ .lower_margin = 5,
+ .vsync_len = 5,
+ .sync = 0, /* hsync and vsync are active low */
+ },
+};
+
static struct sh_mobile_lcdc_info lcdc_info = {
.ch[0] = {
.interface_type = RGB18,
.chan = LCDC_CHAN_MAINLCD,
.bpp = 16,
- .lcd_cfg = {
- .sync = 0, /* hsync and vsync are active low */
- },
.lcd_size_cfg = { /* 7.0 inch */
.width = 152,
.height = 91,
@@ -620,7 +647,6 @@ static struct soc_camera_link tw9910_link = {
.bus_id = 1,
.power = tw9910_power,
.board_info = &i2c_camera[0],
- .module_name = "tw9910",
.priv = &tw9910_info,
};
@@ -644,7 +670,6 @@ static struct soc_camera_link mt9t112_link1 = {
.power = mt9t112_power1,
.bus_id = 0,
.board_info = &i2c_camera[1],
- .module_name = "mt9t112",
.priv = &mt9t112_info1,
};
@@ -667,7 +692,6 @@ static struct soc_camera_link mt9t112_link2 = {
.power = mt9t112_power2,
.bus_id = 1,
.board_info = &i2c_camera[2],
- .module_name = "mt9t112",
.priv = &mt9t112_info2,
};
@@ -696,32 +720,6 @@ static struct platform_device camera_devices[] = {
};
/* FSI */
-/*
- * FSI-B use external clock which came from da7210.
- * So, we should change parent of fsi
- */
-#define FCLKBCR 0xa415000c
-static void fsimck_init(struct clk *clk)
-{
- u32 status = __raw_readl(clk->enable_reg);
-
- /* use external clock */
- status &= ~0x000000ff;
- status |= 0x00000080;
-
- __raw_writel(status, clk->enable_reg);
-}
-
-static struct clk_ops fsimck_clk_ops = {
- .init = fsimck_init,
-};
-
-static struct clk fsimckb_clk = {
- .ops = &fsimck_clk_ops,
- .enable_reg = (void __iomem *)FCLKBCR,
- .rate = 0, /* unknown */
-};
-
static struct sh_fsi_platform_info fsi_info = {
.portb_flags = SH_FSI_BRS_INV |
SH_FSI_OUT_SLAVE_MODE |
@@ -793,7 +791,6 @@ static struct sh_vou_pdata sh_vou_pdata = {
.flags = SH_VOU_HSYNC_LOW | SH_VOU_VSYNC_LOW,
.board_info = &ak8813,
.i2c_adap = 0,
- .module_name = "ak881x",
};
static struct resource sh_vou_resources[] = {
@@ -1079,33 +1076,18 @@ static int __init arch_setup(void)
if (gpio_get_value(GPIO_PTE6)) {
/* DVI */
lcdc_info.clock_source = LCDC_CLK_EXTERNAL;
- lcdc_info.ch[0].clock_divider = 1,
- lcdc_info.ch[0].lcd_cfg.name = "DVI";
- lcdc_info.ch[0].lcd_cfg.xres = 1280;
- lcdc_info.ch[0].lcd_cfg.yres = 720;
- lcdc_info.ch[0].lcd_cfg.left_margin = 220;
- lcdc_info.ch[0].lcd_cfg.right_margin = 110;
- lcdc_info.ch[0].lcd_cfg.hsync_len = 40;
- lcdc_info.ch[0].lcd_cfg.upper_margin = 20;
- lcdc_info.ch[0].lcd_cfg.lower_margin = 5;
- lcdc_info.ch[0].lcd_cfg.vsync_len = 5;
+ lcdc_info.ch[0].clock_divider = 1;
+ lcdc_info.ch[0].lcd_cfg = ecovec_dvi_modes;
+ lcdc_info.ch[0].num_cfg = ARRAY_SIZE(ecovec_dvi_modes);
gpio_set_value(GPIO_PTA2, 1);
gpio_set_value(GPIO_PTU1, 1);
} else {
/* Panel */
-
lcdc_info.clock_source = LCDC_CLK_PERIPHERAL;
- lcdc_info.ch[0].clock_divider = 2,
- lcdc_info.ch[0].lcd_cfg.name = "Panel";
- lcdc_info.ch[0].lcd_cfg.xres = 800;
- lcdc_info.ch[0].lcd_cfg.yres = 480;
- lcdc_info.ch[0].lcd_cfg.left_margin = 220;
- lcdc_info.ch[0].lcd_cfg.right_margin = 110;
- lcdc_info.ch[0].lcd_cfg.hsync_len = 70;
- lcdc_info.ch[0].lcd_cfg.upper_margin = 20;
- lcdc_info.ch[0].lcd_cfg.lower_margin = 5;
- lcdc_info.ch[0].lcd_cfg.vsync_len = 5;
+ lcdc_info.ch[0].clock_divider = 2;
+ lcdc_info.ch[0].lcd_cfg = ecovec_lcd_modes;
+ lcdc_info.ch[0].num_cfg = ARRAY_SIZE(ecovec_lcd_modes);
gpio_set_value(GPIO_PTR1, 1);
@@ -1248,18 +1230,18 @@ static int __init arch_setup(void)
/* set SPU2 clock to 83.4 MHz */
clk = clk_get(NULL, "spu_clk");
- if (clk) {
+ if (!IS_ERR(clk)) {
clk_set_rate(clk, clk_round_rate(clk, 83333333));
clk_put(clk);
}
/* change parent of FSI B */
clk = clk_get(NULL, "fsib_clk");
- if (clk) {
- clk_register(&fsimckb_clk);
- clk_set_parent(clk, &fsimckb_clk);
- clk_set_rate(clk, 11000);
- clk_set_rate(&fsimckb_clk, 11000);
+ if (!IS_ERR(clk)) {
+ /* 48kHz dummy clock was used to make sure 1/1 divide */
+ clk_set_rate(&sh7724_fsimckb_clk, 48000);
+ clk_set_parent(clk, &sh7724_fsimckb_clk);
+ clk_set_rate(clk, 48000);
clk_put(clk);
}
@@ -1273,7 +1255,7 @@ static int __init arch_setup(void)
/* set VPU clock to 166 MHz */
clk = clk_get(NULL, "vpu_clk");
- if (clk) {
+ if (!IS_ERR(clk)) {
clk_set_rate(clk, clk_round_rate(clk, 166000000));
clk_put(clk);
}
diff --git a/arch/sh/boards/mach-edosk7705/Makefile b/arch/sh/boards/mach-edosk7705/Makefile
deleted file mode 100644
index cd54acb51499..000000000000
--- a/arch/sh/boards/mach-edosk7705/Makefile
+++ /dev/null
@@ -1,5 +0,0 @@
-#
-# Makefile for the EDOSK7705 specific parts of the kernel
-#
-
-obj-y := setup.o io.o
diff --git a/arch/sh/boards/mach-edosk7705/io.c b/arch/sh/boards/mach-edosk7705/io.c
deleted file mode 100644
index 5b9c57c43241..000000000000
--- a/arch/sh/boards/mach-edosk7705/io.c
+++ /dev/null
@@ -1,71 +0,0 @@
-/*
- * arch/sh/boards/renesas/edosk7705/io.c
- *
- * Copyright (C) 2001 Ian da Silva, Jeremy Siegel
- * Based largely on io_se.c.
- *
- * I/O routines for Hitachi EDOSK7705 board.
- *
- */
-
-#include <linux/kernel.h>
-#include <linux/types.h>
-#include <linux/io.h>
-#include <mach/edosk7705.h>
-#include <asm/addrspace.h>
-
-#define SMC_IOADDR 0xA2000000
-
-/* Map the Ethernet addresses as if it is at 0x300 - 0x320 */
-static unsigned long sh_edosk7705_isa_port2addr(unsigned long port)
-{
- /*
- * SMC91C96 registers are 4 byte aligned rather than the
- * usual 2 byte!
- */
- if (port >= 0x300 && port < 0x320)
- return SMC_IOADDR + ((port - 0x300) * 2);
-
- maybebadio(port);
- return port;
-}
-
-/* Trying to read / write bytes on odd-byte boundaries to the Ethernet
- * registers causes problems. So we bit-shift the value and read / write
- * in 2 byte chunks. Setting the low byte to 0 does not cause problems
- * now as odd byte writes are only made on the bit mask / interrupt
- * register. This may not be the case in future Mar-2003 SJD
- */
-unsigned char sh_edosk7705_inb(unsigned long port)
-{
- if (port >= 0x300 && port < 0x320 && port & 0x01)
- return __raw_readw(port - 1) >> 8;
-
- return __raw_readb(sh_edosk7705_isa_port2addr(port));
-}
-
-void sh_edosk7705_outb(unsigned char value, unsigned long port)
-{
- if (port >= 0x300 && port < 0x320 && port & 0x01) {
- __raw_writew(((unsigned short)value << 8), port - 1);
- return;
- }
-
- __raw_writeb(value, sh_edosk7705_isa_port2addr(port));
-}
-
-void sh_edosk7705_insb(unsigned long port, void *addr, unsigned long count)
-{
- unsigned char *p = addr;
-
- while (count--)
- *p++ = sh_edosk7705_inb(port);
-}
-
-void sh_edosk7705_outsb(unsigned long port, const void *addr, unsigned long count)
-{
- unsigned char *p = (unsigned char *)addr;
-
- while (count--)
- sh_edosk7705_outb(*p++, port);
-}
diff --git a/arch/sh/boards/mach-edosk7705/setup.c b/arch/sh/boards/mach-edosk7705/setup.c
deleted file mode 100644
index d59225e26fb9..000000000000
--- a/arch/sh/boards/mach-edosk7705/setup.c
+++ /dev/null
@@ -1,36 +0,0 @@
-/*
- * arch/sh/boards/renesas/edosk7705/setup.c
- *
- * Copyright (C) 2000 Kazumoto Kojima
- *
- * Hitachi SolutionEngine Support.
- *
- * Modified for edosk7705 development
- * board by S. Dunn, 2003.
- */
-#include <linux/init.h>
-#include <linux/irq.h>
-#include <asm/machvec.h>
-#include <mach/edosk7705.h>
-
-static void __init sh_edosk7705_init_irq(void)
-{
- /* This is the Ethernet interrupt */
- make_imask_irq(0x09);
-}
-
-/*
- * The Machine Vector
- */
-static struct sh_machine_vector mv_edosk7705 __initmv = {
- .mv_name = "EDOSK7705",
- .mv_nr_irqs = 80,
-
- .mv_inb = sh_edosk7705_inb,
- .mv_outb = sh_edosk7705_outb,
-
- .mv_insb = sh_edosk7705_insb,
- .mv_outsb = sh_edosk7705_outsb,
-
- .mv_init_irq = sh_edosk7705_init_irq,
-};
diff --git a/arch/sh/boards/mach-kfr2r09/setup.c b/arch/sh/boards/mach-kfr2r09/setup.c
index 68994a163f6c..9b60eaabf8f3 100644
--- a/arch/sh/boards/mach-kfr2r09/setup.c
+++ b/arch/sh/boards/mach-kfr2r09/setup.c
@@ -126,6 +126,21 @@ static struct platform_device kfr2r09_sh_keysc_device = {
},
};
+const static struct fb_videomode kfr2r09_lcdc_modes[] = {
+ {
+ .name = "TX07D34VM0AAA",
+ .xres = 240,
+ .yres = 400,
+ .left_margin = 0,
+ .right_margin = 16,
+ .hsync_len = 8,
+ .upper_margin = 0,
+ .lower_margin = 1,
+ .vsync_len = 1,
+ .sync = FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
+ },
+};
+
static struct sh_mobile_lcdc_info kfr2r09_sh_lcdc_info = {
.clock_source = LCDC_CLK_BUS,
.ch[0] = {
@@ -134,18 +149,8 @@ static struct sh_mobile_lcdc_info kfr2r09_sh_lcdc_info = {
.interface_type = SYS18,
.clock_divider = 6,
.flags = LCDC_FLAGS_DWPOL,
- .lcd_cfg = {
- .name = "TX07D34VM0AAA",
- .xres = 240,
- .yres = 400,
- .left_margin = 0,
- .right_margin = 16,
- .hsync_len = 8,
- .upper_margin = 0,
- .lower_margin = 1,
- .vsync_len = 1,
- .sync = FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
- },
+ .lcd_cfg = kfr2r09_lcdc_modes,
+ .num_cfg = ARRAY_SIZE(kfr2r09_lcdc_modes),
.lcd_size_cfg = {
.width = 35,
.height = 58,
@@ -333,7 +338,6 @@ static struct soc_camera_link rj54n1_link = {
.power = camera_power,
.board_info = &kfr2r09_i2c_camera,
.i2c_adapter_id = 1,
- .module_name = "rj54n1cb0c",
.priv = &rj54n1_priv,
};
diff --git a/arch/sh/boards/mach-landisk/irq.c b/arch/sh/boards/mach-landisk/irq.c
index 96f38a4187d0..e79412a40490 100644
--- a/arch/sh/boards/mach-landisk/irq.c
+++ b/arch/sh/boards/mach-landisk/irq.c
@@ -18,25 +18,24 @@
#include <linux/io.h>
#include <mach-landisk/mach/iodata_landisk.h>
-static void disable_landisk_irq(unsigned int irq)
+static void disable_landisk_irq(struct irq_data *data)
{
- unsigned char mask = 0xff ^ (0x01 << (irq - 5));
+ unsigned char mask = 0xff ^ (0x01 << (data->irq - 5));
__raw_writeb(__raw_readb(PA_IMASK) & mask, PA_IMASK);
}
-static void enable_landisk_irq(unsigned int irq)
+static void enable_landisk_irq(struct irq_data *data)
{
- unsigned char value = (0x01 << (irq - 5));
+ unsigned char value = (0x01 << (data->irq - 5));
__raw_writeb(__raw_readb(PA_IMASK) | value, PA_IMASK);
}
static struct irq_chip landisk_irq_chip __read_mostly = {
.name = "LANDISK",
- .mask = disable_landisk_irq,
- .unmask = enable_landisk_irq,
- .mask_ack = disable_landisk_irq,
+ .irq_mask = disable_landisk_irq,
+ .irq_unmask = enable_landisk_irq,
};
/*
@@ -50,7 +49,7 @@ void __init init_landisk_IRQ(void)
disable_irq_nosync(i);
set_irq_chip_and_handler_name(i, &landisk_irq_chip,
handle_level_irq, "level");
- enable_landisk_irq(i);
+ enable_landisk_irq(irq_get_irq_data(i));
}
__raw_writeb(0x00, PA_PWRINT_CLR);
}
diff --git a/arch/sh/boards/mach-microdev/io.c b/arch/sh/boards/mach-microdev/io.c
index 2960c659020e..acdafb0c6404 100644
--- a/arch/sh/boards/mach-microdev/io.c
+++ b/arch/sh/boards/mach-microdev/io.c
@@ -54,7 +54,7 @@
/*
* map I/O ports to memory-mapped addresses
*/
-static unsigned long microdev_isa_port2addr(unsigned long offset)
+void __iomem *microdev_ioport_map(unsigned long offset, unsigned int len)
{
unsigned long result;
@@ -72,16 +72,6 @@ static unsigned long microdev_isa_port2addr(unsigned long offset)
* Configuration Registers
*/
result = IO_SUPERIO_PHYS + (offset << 1);
-#if 0
- } else if (offset == KBD_DATA_REG || offset == KBD_CNTL_REG ||
- offset == KBD_STATUS_REG) {
- /*
- * SMSC FDC37C93xAPM SuperIO chip
- *
- * PS/2 Keyboard + Mouse (ports 0x60 and 0x64).
- */
- result = IO_SUPERIO_PHYS + (offset << 1);
-#endif
} else if (((offset >= IO_IDE1_BASE) &&
(offset < IO_IDE1_BASE + IO_IDE_EXTENT)) ||
(offset == IO_IDE1_MISC)) {
@@ -131,237 +121,5 @@ static unsigned long microdev_isa_port2addr(unsigned long offset)
result = PVR;
}
- return result;
-}
-
-#define PORT2ADDR(x) (microdev_isa_port2addr(x))
-
-static inline void delay(void)
-{
-#if defined(CONFIG_PCI)
- /* System board present, just make a dummy SRAM access. (CS0 will be
- mapped to PCI memory, probably good to avoid it.) */
- __raw_readw(0xa6800000);
-#else
- /* CS0 will be mapped to flash, ROM etc so safe to access it. */
- __raw_readw(0xa0000000);
-#endif
-}
-
-unsigned char microdev_inb(unsigned long port)
-{
-#ifdef CONFIG_PCI
- if (port >= PCIBIOS_MIN_IO)
- return microdev_pci_inb(port);
-#endif
- return *(volatile unsigned char*)PORT2ADDR(port);
-}
-
-unsigned short microdev_inw(unsigned long port)
-{
-#ifdef CONFIG_PCI
- if (port >= PCIBIOS_MIN_IO)
- return microdev_pci_inw(port);
-#endif
- return *(volatile unsigned short*)PORT2ADDR(port);
-}
-
-unsigned int microdev_inl(unsigned long port)
-{
-#ifdef CONFIG_PCI
- if (port >= PCIBIOS_MIN_IO)
- return microdev_pci_inl(port);
-#endif
- return *(volatile unsigned int*)PORT2ADDR(port);
-}
-
-void microdev_outw(unsigned short b, unsigned long port)
-{
-#ifdef CONFIG_PCI
- if (port >= PCIBIOS_MIN_IO) {
- microdev_pci_outw(b, port);
- return;
- }
-#endif
- *(volatile unsigned short*)PORT2ADDR(port) = b;
-}
-
-void microdev_outb(unsigned char b, unsigned long port)
-{
-#ifdef CONFIG_PCI
- if (port >= PCIBIOS_MIN_IO) {
- microdev_pci_outb(b, port);
- return;
- }
-#endif
-
- /*
- * There is a board feature with the current SH4-202 MicroDev in
- * that the 2 byte enables (nBE0 and nBE1) are tied together (and
- * to the Chip Select Line (Ethernet_CS)). Due to this connectivity,
- * it is not possible to safely perform 8-bit writes to the
- * Ethernet registers, as 16-bits will be consumed from the Data
- * lines (corrupting the other byte). Hence, this function is
- * written to implement 16-bit read/modify/write for all byte-wide
- * accesses.
- *
- * Note: there is no problem with byte READS (even or odd).
- *
- * Sean McGoogan - 16th June 2003.
- */
- if ((port >= IO_LAN91C111_BASE) &&
- (port < IO_LAN91C111_BASE + IO_LAN91C111_EXTENT)) {
- /*
- * Then are trying to perform a byte-write to the
- * LAN91C111. This needs special care.
- */
- if (port % 2 == 1) { /* is the port odd ? */
- /* unset bit-0, i.e. make even */
- const unsigned long evenPort = port-1;
- unsigned short word;
-
- /*
- * do a 16-bit read/write to write to 'port',
- * preserving even byte.
- *
- * Even addresses are bits 0-7
- * Odd addresses are bits 8-15
- */
- word = microdev_inw(evenPort);
- word = (word & 0xffu) | (b << 8);
- microdev_outw(word, evenPort);
- } else {
- /* else, we are trying to do an even byte write */
- unsigned short word;
-
- /*
- * do a 16-bit read/write to write to 'port',
- * preserving odd byte.
- *
- * Even addresses are bits 0-7
- * Odd addresses are bits 8-15
- */
- word = microdev_inw(port);
- word = (word & 0xff00u) | (b);
- microdev_outw(word, port);
- }
- } else {
- *(volatile unsigned char*)PORT2ADDR(port) = b;
- }
-}
-
-void microdev_outl(unsigned int b, unsigned long port)
-{
-#ifdef CONFIG_PCI
- if (port >= PCIBIOS_MIN_IO) {
- microdev_pci_outl(b, port);
- return;
- }
-#endif
- *(volatile unsigned int*)PORT2ADDR(port) = b;
-}
-
-unsigned char microdev_inb_p(unsigned long port)
-{
- unsigned char v = microdev_inb(port);
- delay();
- return v;
-}
-
-unsigned short microdev_inw_p(unsigned long port)
-{
- unsigned short v = microdev_inw(port);
- delay();
- return v;
-}
-
-unsigned int microdev_inl_p(unsigned long port)
-{
- unsigned int v = microdev_inl(port);
- delay();
- return v;
-}
-
-void microdev_outb_p(unsigned char b, unsigned long port)
-{
- microdev_outb(b, port);
- delay();
-}
-
-void microdev_outw_p(unsigned short b, unsigned long port)
-{
- microdev_outw(b, port);
- delay();
-}
-
-void microdev_outl_p(unsigned int b, unsigned long port)
-{
- microdev_outl(b, port);
- delay();
-}
-
-void microdev_insb(unsigned long port, void *buffer, unsigned long count)
-{
- volatile unsigned char *port_addr;
- unsigned char *buf = buffer;
-
- port_addr = (volatile unsigned char *)PORT2ADDR(port);
-
- while (count--)
- *buf++ = *port_addr;
-}
-
-void microdev_insw(unsigned long port, void *buffer, unsigned long count)
-{
- volatile unsigned short *port_addr;
- unsigned short *buf = buffer;
-
- port_addr = (volatile unsigned short *)PORT2ADDR(port);
-
- while (count--)
- *buf++ = *port_addr;
-}
-
-void microdev_insl(unsigned long port, void *buffer, unsigned long count)
-{
- volatile unsigned long *port_addr;
- unsigned int *buf = buffer;
-
- port_addr = (volatile unsigned long *)PORT2ADDR(port);
-
- while (count--)
- *buf++ = *port_addr;
-}
-
-void microdev_outsb(unsigned long port, const void *buffer, unsigned long count)
-{
- volatile unsigned char *port_addr;
- const unsigned char *buf = buffer;
-
- port_addr = (volatile unsigned char *)PORT2ADDR(port);
-
- while (count--)
- *port_addr = *buf++;
-}
-
-void microdev_outsw(unsigned long port, const void *buffer, unsigned long count)
-{
- volatile unsigned short *port_addr;
- const unsigned short *buf = buffer;
-
- port_addr = (volatile unsigned short *)PORT2ADDR(port);
-
- while (count--)
- *port_addr = *buf++;
-}
-
-void microdev_outsl(unsigned long port, const void *buffer, unsigned long count)
-{
- volatile unsigned long *port_addr;
- const unsigned int *buf = buffer;
-
- port_addr = (volatile unsigned long *)PORT2ADDR(port);
-
- while (count--)
- *port_addr = *buf++;
+ return (void __iomem *)result;
}
diff --git a/arch/sh/boards/mach-microdev/irq.c b/arch/sh/boards/mach-microdev/irq.c
index a26d16669aa2..c35001fd9032 100644
--- a/arch/sh/boards/mach-microdev/irq.c
+++ b/arch/sh/boards/mach-microdev/irq.c
@@ -65,19 +65,9 @@ static const struct {
# error Inconsistancy in defining the IRQ# for primary IDE!
#endif
-static void enable_microdev_irq(unsigned int irq);
-static void disable_microdev_irq(unsigned int irq);
-static void mask_and_ack_microdev(unsigned int);
-
-static struct irq_chip microdev_irq_type = {
- .name = "MicroDev-IRQ",
- .unmask = enable_microdev_irq,
- .mask = disable_microdev_irq,
- .ack = mask_and_ack_microdev,
-};
-
-static void disable_microdev_irq(unsigned int irq)
+static void disable_microdev_irq(struct irq_data *data)
{
+ unsigned int irq = data->irq;
unsigned int fpgaIrq;
if (irq >= NUM_EXTERNAL_IRQS)
@@ -91,8 +81,9 @@ static void disable_microdev_irq(unsigned int irq)
__raw_writel(MICRODEV_FPGA_INTC_MASK(fpgaIrq), MICRODEV_FPGA_INTDSB_REG);
}
-static void enable_microdev_irq(unsigned int irq)
+static void enable_microdev_irq(struct irq_data *data)
{
+ unsigned int irq = data->irq;
unsigned long priorityReg, priorities, pri;
unsigned int fpgaIrq;
@@ -116,17 +107,18 @@ static void enable_microdev_irq(unsigned int irq)
__raw_writel(MICRODEV_FPGA_INTC_MASK(fpgaIrq), MICRODEV_FPGA_INTENB_REG);
}
+static struct irq_chip microdev_irq_type = {
+ .name = "MicroDev-IRQ",
+ .irq_unmask = enable_microdev_irq,
+ .irq_mask = disable_microdev_irq,
+};
+
/* This function sets the desired irq handler to be a MicroDev type */
static void __init make_microdev_irq(unsigned int irq)
{
disable_irq_nosync(irq);
set_irq_chip_and_handler(irq, &microdev_irq_type, handle_level_irq);
- disable_microdev_irq(irq);
-}
-
-static void mask_and_ack_microdev(unsigned int irq)
-{
- disable_microdev_irq(irq);
+ disable_microdev_irq(irq_get_irq_data(irq));
}
extern void __init init_microdev_irq(void)
diff --git a/arch/sh/boards/mach-microdev/setup.c b/arch/sh/boards/mach-microdev/setup.c
index d1df2a4fb9b8..d8a747291e03 100644
--- a/arch/sh/boards/mach-microdev/setup.c
+++ b/arch/sh/boards/mach-microdev/setup.c
@@ -195,27 +195,6 @@ device_initcall(microdev_devices_setup);
static struct sh_machine_vector mv_sh4202_microdev __initmv = {
.mv_name = "SH4-202 MicroDev",
.mv_nr_irqs = 72,
-
- .mv_inb = microdev_inb,
- .mv_inw = microdev_inw,
- .mv_inl = microdev_inl,
- .mv_outb = microdev_outb,
- .mv_outw = microdev_outw,
- .mv_outl = microdev_outl,
-
- .mv_inb_p = microdev_inb_p,
- .mv_inw_p = microdev_inw_p,
- .mv_inl_p = microdev_inl_p,
- .mv_outb_p = microdev_outb_p,
- .mv_outw_p = microdev_outw_p,
- .mv_outl_p = microdev_outl_p,
-
- .mv_insb = microdev_insb,
- .mv_insw = microdev_insw,
- .mv_insl = microdev_insl,
- .mv_outsb = microdev_outsb,
- .mv_outsw = microdev_outsw,
- .mv_outsl = microdev_outsl,
-
+ .mv_ioport_map = microdev_ioport_map,
.mv_init_irq = init_microdev_irq,
};
diff --git a/arch/sh/boards/mach-migor/setup.c b/arch/sh/boards/mach-migor/setup.c
index 662debe4ead2..c8acfec98695 100644
--- a/arch/sh/boards/mach-migor/setup.c
+++ b/arch/sh/boards/mach-migor/setup.c
@@ -213,51 +213,55 @@ static struct platform_device migor_nand_flash_device = {
}
};
+const static struct fb_videomode migor_lcd_modes[] = {
+ {
+#if defined(CONFIG_SH_MIGOR_RTA_WVGA)
+ .name = "LB070WV1",
+ .xres = 800,
+ .yres = 480,
+ .left_margin = 64,
+ .right_margin = 16,
+ .hsync_len = 120,
+ .sync = 0,
+#elif defined(CONFIG_SH_MIGOR_QVGA)
+ .name = "PH240320T",
+ .xres = 320,
+ .yres = 240,
+ .left_margin = 0,
+ .right_margin = 16,
+ .hsync_len = 8,
+ .sync = FB_SYNC_HOR_HIGH_ACT,
+#endif
+ .upper_margin = 1,
+ .lower_margin = 17,
+ .vsync_len = 2,
+ },
+};
+
static struct sh_mobile_lcdc_info sh_mobile_lcdc_info = {
-#ifdef CONFIG_SH_MIGOR_RTA_WVGA
+#if defined(CONFIG_SH_MIGOR_RTA_WVGA)
.clock_source = LCDC_CLK_BUS,
.ch[0] = {
.chan = LCDC_CHAN_MAINLCD,
.bpp = 16,
.interface_type = RGB16,
.clock_divider = 2,
- .lcd_cfg = {
- .name = "LB070WV1",
- .xres = 800,
- .yres = 480,
- .left_margin = 64,
- .right_margin = 16,
- .hsync_len = 120,
- .upper_margin = 1,
- .lower_margin = 17,
- .vsync_len = 2,
- .sync = 0,
- },
+ .lcd_cfg = migor_lcd_modes,
+ .num_cfg = ARRAY_SIZE(migor_lcd_modes),
.lcd_size_cfg = { /* 7.0 inch */
.width = 152,
.height = 91,
},
}
-#endif
-#ifdef CONFIG_SH_MIGOR_QVGA
+#elif defined(CONFIG_SH_MIGOR_QVGA)
.clock_source = LCDC_CLK_PERIPHERAL,
.ch[0] = {
.chan = LCDC_CHAN_MAINLCD,
.bpp = 16,
.interface_type = SYS16A,
.clock_divider = 10,
- .lcd_cfg = {
- .name = "PH240320T",
- .xres = 320,
- .yres = 240,
- .left_margin = 0,
- .right_margin = 16,
- .hsync_len = 8,
- .upper_margin = 1,
- .lower_margin = 17,
- .vsync_len = 2,
- .sync = FB_SYNC_HOR_HIGH_ACT,
- },
+ .lcd_cfg = migor_lcd_modes,
+ .num_cfg = ARRAY_SIZE(migor_lcd_modes),
.lcd_size_cfg = { /* 2.4 inch */
.width = 49,
.height = 37,
@@ -450,7 +454,6 @@ static struct soc_camera_link ov7725_link = {
.power = ov7725_power,
.board_info = &migor_i2c_camera[0],
.i2c_adapter_id = 0,
- .module_name = "ov772x",
.priv = &ov7725_info,
};
@@ -463,7 +466,6 @@ static struct soc_camera_link tw9910_link = {
.power = tw9910_power,
.board_info = &migor_i2c_camera[1],
.i2c_adapter_id = 0,
- .module_name = "tw9910",
.priv = &tw9910_info,
};
diff --git a/arch/sh/boards/mach-sdk7786/Makefile b/arch/sh/boards/mach-sdk7786/Makefile
index a29f19e85b63..23ff7d4ac491 100644
--- a/arch/sh/boards/mach-sdk7786/Makefile
+++ b/arch/sh/boards/mach-sdk7786/Makefile
@@ -1 +1,4 @@
-obj-y := setup.o fpga.o irq.o
+obj-y := fpga.o irq.o setup.o
+
+obj-$(CONFIG_GENERIC_GPIO) += gpio.o
+obj-$(CONFIG_HAVE_SRAM_POOL) += sram.o
diff --git a/arch/sh/boards/mach-sdk7786/gpio.c b/arch/sh/boards/mach-sdk7786/gpio.c
new file mode 100644
index 000000000000..f71ce09d4e15
--- /dev/null
+++ b/arch/sh/boards/mach-sdk7786/gpio.c
@@ -0,0 +1,49 @@
+/*
+ * SDK7786 FPGA USRGPIR Support.
+ *
+ * Copyright (C) 2010 Paul Mundt
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/gpio.h>
+#include <linux/irq.h>
+#include <linux/kernel.h>
+#include <linux/spinlock.h>
+#include <linux/io.h>
+#include <mach/fpga.h>
+
+#define NR_FPGA_GPIOS 8
+
+static const char *usrgpir_gpio_names[NR_FPGA_GPIOS] = {
+ "in0", "in1", "in2", "in3", "in4", "in5", "in6", "in7",
+};
+
+static int usrgpir_gpio_direction_input(struct gpio_chip *chip, unsigned gpio)
+{
+ /* always in */
+ return 0;
+}
+
+static int usrgpir_gpio_get(struct gpio_chip *chip, unsigned gpio)
+{
+ return !!(fpga_read_reg(USRGPIR) & (1 << gpio));
+}
+
+static struct gpio_chip usrgpir_gpio_chip = {
+ .label = "sdk7786-fpga",
+ .names = usrgpir_gpio_names,
+ .direction_input = usrgpir_gpio_direction_input,
+ .get = usrgpir_gpio_get,
+ .base = -1, /* don't care */
+ .ngpio = NR_FPGA_GPIOS,
+};
+
+static int __init usrgpir_gpio_setup(void)
+{
+ return gpiochip_add(&usrgpir_gpio_chip);
+}
+device_initcall(usrgpir_gpio_setup);
diff --git a/arch/sh/boards/mach-sdk7786/setup.c b/arch/sh/boards/mach-sdk7786/setup.c
index 2ec1ea5cf8ef..7e0c4e3878e0 100644
--- a/arch/sh/boards/mach-sdk7786/setup.c
+++ b/arch/sh/boards/mach-sdk7786/setup.c
@@ -20,6 +20,8 @@
#include <asm/machvec.h>
#include <asm/heartbeat.h>
#include <asm/sizes.h>
+#include <asm/clock.h>
+#include <asm/clkdev.h>
#include <asm/reboot.h>
#include <asm/smp-ops.h>
@@ -140,6 +142,45 @@ static int sdk7786_mode_pins(void)
return fpga_read_reg(MODSWR);
}
+/*
+ * FPGA-driven PCIe clocks
+ *
+ * Historically these include the oscillator, clock B (slots 2/3/4) and
+ * clock A (slot 1 and the CPU clock). Newer revs of the PCB shove
+ * everything under a single PCIe clocks enable bit that happens to map
+ * to the same bit position as the oscillator bit for earlier FPGA
+ * versions.
+ *
+ * Given that the legacy clocks have the side-effect of shutting the CPU
+ * off through the FPGA along with the PCI slots, we simply leave them in
+ * their initial state and don't bother registering them with the clock
+ * framework.
+ */
+static int sdk7786_pcie_clk_enable(struct clk *clk)
+{
+ fpga_write_reg(fpga_read_reg(PCIECR) | PCIECR_CLKEN, PCIECR);
+ return 0;
+}
+
+static void sdk7786_pcie_clk_disable(struct clk *clk)
+{
+ fpga_write_reg(fpga_read_reg(PCIECR) & ~PCIECR_CLKEN, PCIECR);
+}
+
+static struct clk_ops sdk7786_pcie_clk_ops = {
+ .enable = sdk7786_pcie_clk_enable,
+ .disable = sdk7786_pcie_clk_disable,
+};
+
+static struct clk sdk7786_pcie_clk = {
+ .ops = &sdk7786_pcie_clk_ops,
+};
+
+static struct clk_lookup sdk7786_pcie_cl = {
+ .con_id = "pcie_plat_clk",
+ .clk = &sdk7786_pcie_clk,
+};
+
static int sdk7786_clk_init(void)
{
struct clk *clk;
@@ -158,7 +199,18 @@ static int sdk7786_clk_init(void)
ret = clk_set_rate(clk, 33333333);
clk_put(clk);
- return ret;
+ /*
+ * Setup the FPGA clocks.
+ */
+ ret = clk_register(&sdk7786_pcie_clk);
+ if (unlikely(ret)) {
+ pr_err("FPGA clock registration failed\n");
+ return ret;
+ }
+
+ clkdev_add(&sdk7786_pcie_cl);
+
+ return 0;
}
static void sdk7786_restart(char *cmd)
diff --git a/arch/sh/boards/mach-sdk7786/sram.c b/arch/sh/boards/mach-sdk7786/sram.c
new file mode 100644
index 000000000000..c81c3abbe01c
--- /dev/null
+++ b/arch/sh/boards/mach-sdk7786/sram.c
@@ -0,0 +1,72 @@
+/*
+ * SDK7786 FPGA SRAM Support.
+ *
+ * Copyright (C) 2010 Paul Mundt
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/io.h>
+#include <linux/string.h>
+#include <mach/fpga.h>
+#include <asm/sram.h>
+#include <asm/sizes.h>
+
+static int __init fpga_sram_init(void)
+{
+ unsigned long phys;
+ unsigned int area;
+ void __iomem *vaddr;
+ int ret;
+ u16 data;
+
+ /* Enable FPGA SRAM */
+ data = fpga_read_reg(LCLASR);
+ data |= LCLASR_FRAMEN;
+ fpga_write_reg(data, LCLASR);
+
+ /*
+ * FPGA_SEL determines the area mapping
+ */
+ area = (data & LCLASR_FPGA_SEL_MASK) >> LCLASR_FPGA_SEL_SHIFT;
+ if (unlikely(area == LCLASR_AREA_MASK)) {
+ pr_err("FPGA memory unmapped.\n");
+ return -ENXIO;
+ }
+
+ /*
+ * The memory itself occupies a 2KiB range at the top of the area
+ * immediately below the system registers.
+ */
+ phys = (area << 26) + SZ_64M - SZ_4K;
+
+ /*
+ * The FPGA SRAM resides in translatable physical space, so set
+ * up a mapping prior to inserting it in to the pool.
+ */
+ vaddr = ioremap(phys, SZ_2K);
+ if (unlikely(!vaddr)) {
+ pr_err("Failed remapping FPGA memory.\n");
+ return -ENXIO;
+ }
+
+ pr_info("Adding %dKiB of FPGA memory at 0x%08lx-0x%08lx "
+ "(area %d) to pool.\n",
+ SZ_2K >> 10, phys, phys + SZ_2K - 1, area);
+
+ ret = gen_pool_add(sram_pool, (unsigned long)vaddr, SZ_2K, -1);
+ if (unlikely(ret < 0)) {
+ pr_err("Failed adding memory\n");
+ iounmap(vaddr);
+ return ret;
+ }
+
+ return 0;
+}
+postcore_initcall(fpga_sram_init);
diff --git a/arch/sh/boards/mach-se/7206/Makefile b/arch/sh/boards/mach-se/7206/Makefile
index 63e7ed699f39..5c9eaa0535b9 100644
--- a/arch/sh/boards/mach-se/7206/Makefile
+++ b/arch/sh/boards/mach-se/7206/Makefile
@@ -2,4 +2,4 @@
# Makefile for the 7206 SolutionEngine specific parts of the kernel
#
-obj-y := setup.o io.o irq.o
+obj-y := setup.o irq.o
diff --git a/arch/sh/boards/mach-se/7206/io.c b/arch/sh/boards/mach-se/7206/io.c
deleted file mode 100644
index adadc77532ee..000000000000
--- a/arch/sh/boards/mach-se/7206/io.c
+++ /dev/null
@@ -1,104 +0,0 @@
-/* $Id: io.c,v 1.5 2004/02/22 23:08:43 kkojima Exp $
- *
- * linux/arch/sh/boards/se/7206/io.c
- *
- * Copyright (C) 2006 Yoshinori Sato
- *
- * I/O routine for Hitachi 7206 SolutionEngine.
- *
- */
-
-#include <linux/kernel.h>
-#include <linux/types.h>
-#include <asm/io.h>
-#include <mach-se/mach/se7206.h>
-
-
-static inline void delay(void)
-{
- __raw_readw(0x20000000); /* P2 ROM Area */
-}
-
-/* MS7750 requires special versions of in*, out* routines, since
- PC-like io ports are located at upper half byte of 16-bit word which
- can be accessed only with 16-bit wide. */
-
-static inline volatile __u16 *
-port2adr(unsigned int port)
-{
- if (port >= 0x2000 && port < 0x2020)
- return (volatile __u16 *) (PA_MRSHPC + (port - 0x2000));
- else if (port >= 0x300 && port < 0x310)
- return (volatile __u16 *) (PA_SMSC + (port - 0x300));
-
- return (volatile __u16 *)port;
-}
-
-unsigned char se7206_inb(unsigned long port)
-{
- return (*port2adr(port)) & 0xff;
-}
-
-unsigned char se7206_inb_p(unsigned long port)
-{
- unsigned long v;
-
- v = (*port2adr(port)) & 0xff;
- delay();
- return v;
-}
-
-unsigned short se7206_inw(unsigned long port)
-{
- return *port2adr(port);
-}
-
-void se7206_outb(unsigned char value, unsigned long port)
-{
- *(port2adr(port)) = value;
-}
-
-void se7206_outb_p(unsigned char value, unsigned long port)
-{
- *(port2adr(port)) = value;
- delay();
-}
-
-void se7206_outw(unsigned short value, unsigned long port)
-{
- *port2adr(port) = value;
-}
-
-void se7206_insb(unsigned long port, void *addr, unsigned long count)
-{
- volatile __u16 *p = port2adr(port);
- __u8 *ap = addr;
-
- while (count--)
- *ap++ = *p;
-}
-
-void se7206_insw(unsigned long port, void *addr, unsigned long count)
-{
- volatile __u16 *p = port2adr(port);
- __u16 *ap = addr;
- while (count--)
- *ap++ = *p;
-}
-
-void se7206_outsb(unsigned long port, const void *addr, unsigned long count)
-{
- volatile __u16 *p = port2adr(port);
- const __u8 *ap = addr;
-
- while (count--)
- *p = *ap++;
-}
-
-void se7206_outsw(unsigned long port, const void *addr, unsigned long count)
-{
- volatile __u16 *p = port2adr(port);
- const __u16 *ap = addr;
- while (count--)
- *p = *ap++;
-}
diff --git a/arch/sh/boards/mach-se/7206/irq.c b/arch/sh/boards/mach-se/7206/irq.c
index 8d82175d83ab..d961949600fd 100644
--- a/arch/sh/boards/mach-se/7206/irq.c
+++ b/arch/sh/boards/mach-se/7206/irq.c
@@ -25,8 +25,9 @@
#define INTC_IPR01 0xfffe0818
#define INTC_ICR1 0xfffe0802
-static void disable_se7206_irq(unsigned int irq)
+static void disable_se7206_irq(struct irq_data *data)
{
+ unsigned int irq = data->irq;
unsigned short val;
unsigned short mask = 0xffff ^ (0x0f << 4 * (3 - (IRQ0_IRQ - irq)));
unsigned short msk0,msk1;
@@ -55,8 +56,9 @@ static void disable_se7206_irq(unsigned int irq)
__raw_writew(msk1, INTMSK1);
}
-static void enable_se7206_irq(unsigned int irq)
+static void enable_se7206_irq(struct irq_data *data)
{
+ unsigned int irq = data->irq;
unsigned short val;
unsigned short value = (0x0001 << 4 * (3 - (IRQ0_IRQ - irq)));
unsigned short msk0,msk1;
@@ -86,13 +88,14 @@ static void enable_se7206_irq(unsigned int irq)
__raw_writew(msk1, INTMSK1);
}
-static void eoi_se7206_irq(unsigned int irq)
+static void eoi_se7206_irq(struct irq_data *data)
{
unsigned short sts0,sts1;
+ unsigned int irq = data->irq;
struct irq_desc *desc = irq_to_desc(irq);
if (!(desc->status & (IRQ_DISABLED|IRQ_INPROGRESS)))
- enable_se7206_irq(irq);
+ enable_se7206_irq(data);
/* FPGA isr clear */
sts0 = __raw_readw(INTSTS0);
sts1 = __raw_readw(INTSTS1);
@@ -115,10 +118,9 @@ static void eoi_se7206_irq(unsigned int irq)
static struct irq_chip se7206_irq_chip __read_mostly = {
.name = "SE7206-FPGA",
- .mask = disable_se7206_irq,
- .unmask = enable_se7206_irq,
- .mask_ack = disable_se7206_irq,
- .eoi = eoi_se7206_irq,
+ .irq_mask = disable_se7206_irq,
+ .irq_unmask = enable_se7206_irq,
+ .irq_eoi = eoi_se7206_irq,
};
static void make_se7206_irq(unsigned int irq)
@@ -126,7 +128,7 @@ static void make_se7206_irq(unsigned int irq)
disable_irq_nosync(irq);
set_irq_chip_and_handler_name(irq, &se7206_irq_chip,
handle_level_irq, "level");
- disable_se7206_irq(irq);
+ disable_se7206_irq(irq_get_irq_data(irq));
}
/*
@@ -137,11 +139,13 @@ void __init init_se7206_IRQ(void)
make_se7206_irq(IRQ0_IRQ); /* SMC91C111 */
make_se7206_irq(IRQ1_IRQ); /* ATA */
make_se7206_irq(IRQ3_IRQ); /* SLOT / PCM */
- __raw_writew(inw(INTC_ICR1) | 0x000b ,INTC_ICR1 ) ; /* ICR1 */
+
+ __raw_writew(__raw_readw(INTC_ICR1) | 0x000b, INTC_ICR); /* ICR1 */
/* FPGA System register setup*/
__raw_writew(0x0000,INTSTS0); /* Clear INTSTS0 */
__raw_writew(0x0000,INTSTS1); /* Clear INTSTS1 */
+
/* IRQ0=LAN, IRQ1=ATA, IRQ3=SLT,PCM */
__raw_writew(0x0001,INTSEL);
}
diff --git a/arch/sh/boards/mach-se/7206/setup.c b/arch/sh/boards/mach-se/7206/setup.c
index 8f5c65d43d1d..7f4871c71a01 100644
--- a/arch/sh/boards/mach-se/7206/setup.c
+++ b/arch/sh/boards/mach-se/7206/setup.c
@@ -86,20 +86,5 @@ __initcall(se7206_devices_setup);
static struct sh_machine_vector mv_se __initmv = {
.mv_name = "SolutionEngine",
.mv_nr_irqs = 256,
- .mv_inb = se7206_inb,
- .mv_inw = se7206_inw,
- .mv_outb = se7206_outb,
- .mv_outw = se7206_outw,
-
- .mv_inb_p = se7206_inb_p,
- .mv_inw_p = se7206_inw,
- .mv_outb_p = se7206_outb_p,
- .mv_outw_p = se7206_outw,
-
- .mv_insb = se7206_insb,
- .mv_insw = se7206_insw,
- .mv_outsb = se7206_outsb,
- .mv_outsw = se7206_outsw,
-
.mv_init_irq = init_se7206_IRQ,
};
diff --git a/arch/sh/boards/mach-se/7343/irq.c b/arch/sh/boards/mach-se/7343/irq.c
index d4305c26e9f7..76255a19417f 100644
--- a/arch/sh/boards/mach-se/7343/irq.c
+++ b/arch/sh/boards/mach-se/7343/irq.c
@@ -18,23 +18,22 @@
unsigned int se7343_fpga_irq[SE7343_FPGA_IRQ_NR] = { 0, };
-static void disable_se7343_irq(unsigned int irq)
+static void disable_se7343_irq(struct irq_data *data)
{
- unsigned int bit = (unsigned int)get_irq_chip_data(irq);
+ unsigned int bit = (unsigned int)irq_data_get_irq_chip_data(data);
__raw_writew(__raw_readw(PA_CPLD_IMSK) | 1 << bit, PA_CPLD_IMSK);
}
-static void enable_se7343_irq(unsigned int irq)
+static void enable_se7343_irq(struct irq_data *data)
{
- unsigned int bit = (unsigned int)get_irq_chip_data(irq);
+ unsigned int bit = (unsigned int)irq_data_get_irq_chip_data(data);
__raw_writew(__raw_readw(PA_CPLD_IMSK) & ~(1 << bit), PA_CPLD_IMSK);
}
static struct irq_chip se7343_irq_chip __read_mostly = {
- .name = "SE7343-FPGA",
- .mask = disable_se7343_irq,
- .unmask = enable_se7343_irq,
- .mask_ack = disable_se7343_irq,
+ .name = "SE7343-FPGA",
+ .irq_mask = disable_se7343_irq,
+ .irq_unmask = enable_se7343_irq,
};
static void se7343_irq_demux(unsigned int irq, struct irq_desc *desc)
diff --git a/arch/sh/boards/mach-se/770x/Makefile b/arch/sh/boards/mach-se/770x/Makefile
index 8e624b06d5ea..43ea14feef51 100644
--- a/arch/sh/boards/mach-se/770x/Makefile
+++ b/arch/sh/boards/mach-se/770x/Makefile
@@ -2,4 +2,4 @@
# Makefile for the 770x SolutionEngine specific parts of the kernel
#
-obj-y := setup.o io.o irq.o
+obj-y := setup.o irq.o
diff --git a/arch/sh/boards/mach-se/770x/io.c b/arch/sh/boards/mach-se/770x/io.c
deleted file mode 100644
index 28833c8786ea..000000000000
--- a/arch/sh/boards/mach-se/770x/io.c
+++ /dev/null
@@ -1,156 +0,0 @@
-/*
- * Copyright (C) 2000 Kazumoto Kojima
- *
- * I/O routine for Hitachi SolutionEngine.
- */
-#include <linux/kernel.h>
-#include <linux/types.h>
-#include <asm/io.h>
-#include <mach-se/mach/se.h>
-
-/* MS7750 requires special versions of in*, out* routines, since
- PC-like io ports are located at upper half byte of 16-bit word which
- can be accessed only with 16-bit wide. */
-
-static inline volatile __u16 *
-port2adr(unsigned int port)
-{
- if (port & 0xff000000)
- return ( volatile __u16 *) port;
- if (port >= 0x2000)
- return (volatile __u16 *) (PA_MRSHPC + (port - 0x2000));
- else if (port >= 0x1000)
- return (volatile __u16 *) (PA_83902 + (port << 1));
- else
- return (volatile __u16 *) (PA_SUPERIO + (port << 1));
-}
-
-static inline int
-shifted_port(unsigned long port)
-{
- /* For IDE registers, value is not shifted */
- if ((0x1f0 <= port && port < 0x1f8) || port == 0x3f6)
- return 0;
- else
- return 1;
-}
-
-unsigned char se_inb(unsigned long port)
-{
- if (shifted_port(port))
- return (*port2adr(port) >> 8);
- else
- return (*port2adr(port))&0xff;
-}
-
-unsigned char se_inb_p(unsigned long port)
-{
- unsigned long v;
-
- if (shifted_port(port))
- v = (*port2adr(port) >> 8);
- else
- v = (*port2adr(port))&0xff;
- ctrl_delay();
- return v;
-}
-
-unsigned short se_inw(unsigned long port)
-{
- if (port >= 0x2000)
- return *port2adr(port);
- else
- maybebadio(port);
- return 0;
-}
-
-unsigned int se_inl(unsigned long port)
-{
- maybebadio(port);
- return 0;
-}
-
-void se_outb(unsigned char value, unsigned long port)
-{
- if (shifted_port(port))
- *(port2adr(port)) = value << 8;
- else
- *(port2adr(port)) = value;
-}
-
-void se_outb_p(unsigned char value, unsigned long port)
-{
- if (shifted_port(port))
- *(port2adr(port)) = value << 8;
- else
- *(port2adr(port)) = value;
- ctrl_delay();
-}
-
-void se_outw(unsigned short value, unsigned long port)
-{
- if (port >= 0x2000)
- *port2adr(port) = value;
- else
- maybebadio(port);
-}
-
-void se_outl(unsigned int value, unsigned long port)
-{
- maybebadio(port);
-}
-
-void se_insb(unsigned long port, void *addr, unsigned long count)
-{
- volatile __u16 *p = port2adr(port);
- __u8 *ap = addr;
-
- if (shifted_port(port)) {
- while (count--)
- *ap++ = *p >> 8;
- } else {
- while (count--)
- *ap++ = *p;
- }
-}
-
-void se_insw(unsigned long port, void *addr, unsigned long count)
-{
- volatile __u16 *p = port2adr(port);
- __u16 *ap = addr;
- while (count--)
- *ap++ = *p;
-}
-
-void se_insl(unsigned long port, void *addr, unsigned long count)
-{
- maybebadio(port);
-}
-
-void se_outsb(unsigned long port, const void *addr, unsigned long count)
-{
- volatile __u16 *p = port2adr(port);
- const __u8 *ap = addr;
-
- if (shifted_port(port)) {
- while (count--)
- *p = *ap++ << 8;
- } else {
- while (count--)
- *p = *ap++;
- }
-}
-
-void se_outsw(unsigned long port, const void *addr, unsigned long count)
-{
- volatile __u16 *p = port2adr(port);
- const __u16 *ap = addr;
-
- while (count--)
- *p = *ap++;
-}
-
-void se_outsl(unsigned long port, const void *addr, unsigned long count)
-{
- maybebadio(port);
-}
diff --git a/arch/sh/boards/mach-se/770x/setup.c b/arch/sh/boards/mach-se/770x/setup.c
index 66d39d1b0901..31330c65c0ce 100644
--- a/arch/sh/boards/mach-se/770x/setup.c
+++ b/arch/sh/boards/mach-se/770x/setup.c
@@ -195,27 +195,5 @@ static struct sh_machine_vector mv_se __initmv = {
#elif defined(CONFIG_CPU_SUBTYPE_SH7710) || defined(CONFIG_CPU_SUBTYPE_SH7712)
.mv_nr_irqs = 104,
#endif
-
- .mv_inb = se_inb,
- .mv_inw = se_inw,
- .mv_inl = se_inl,
- .mv_outb = se_outb,
- .mv_outw = se_outw,
- .mv_outl = se_outl,
-
- .mv_inb_p = se_inb_p,
- .mv_inw_p = se_inw,
- .mv_inl_p = se_inl,
- .mv_outb_p = se_outb_p,
- .mv_outw_p = se_outw,
- .mv_outl_p = se_outl,
-
- .mv_insb = se_insb,
- .mv_insw = se_insw,
- .mv_insl = se_insl,
- .mv_outsb = se_outsb,
- .mv_outsw = se_outsw,
- .mv_outsl = se_outsl,
-
.mv_init_irq = init_se_IRQ,
};
diff --git a/arch/sh/boards/mach-se/7722/irq.c b/arch/sh/boards/mach-se/7722/irq.c
index 61605db04ee6..c013f95628ed 100644
--- a/arch/sh/boards/mach-se/7722/irq.c
+++ b/arch/sh/boards/mach-se/7722/irq.c
@@ -18,23 +18,22 @@
unsigned int se7722_fpga_irq[SE7722_FPGA_IRQ_NR] = { 0, };
-static void disable_se7722_irq(unsigned int irq)
+static void disable_se7722_irq(struct irq_data *data)
{
- unsigned int bit = (unsigned int)get_irq_chip_data(irq);
+ unsigned int bit = (unsigned int)irq_data_get_irq_chip_data(data);
__raw_writew(__raw_readw(IRQ01_MASK) | 1 << bit, IRQ01_MASK);
}
-static void enable_se7722_irq(unsigned int irq)
+static void enable_se7722_irq(struct irq_data *data)
{
- unsigned int bit = (unsigned int)get_irq_chip_data(irq);
+ unsigned int bit = (unsigned int)irq_data_get_irq_chip_data(data);
__raw_writew(__raw_readw(IRQ01_MASK) & ~(1 << bit), IRQ01_MASK);
}
static struct irq_chip se7722_irq_chip __read_mostly = {
- .name = "SE7722-FPGA",
- .mask = disable_se7722_irq,
- .unmask = enable_se7722_irq,
- .mask_ack = disable_se7722_irq,
+ .name = "SE7722-FPGA",
+ .irq_mask = disable_se7722_irq,
+ .irq_unmask = enable_se7722_irq,
};
static void se7722_irq_demux(unsigned int irq, struct irq_desc *desc)
diff --git a/arch/sh/boards/mach-se/7724/irq.c b/arch/sh/boards/mach-se/7724/irq.c
index 0942be2daef6..5bd87c22b65b 100644
--- a/arch/sh/boards/mach-se/7724/irq.c
+++ b/arch/sh/boards/mach-se/7724/irq.c
@@ -68,25 +68,26 @@ static struct fpga_irq get_fpga_irq(unsigned int irq)
return set;
}
-static void disable_se7724_irq(unsigned int irq)
+static void disable_se7724_irq(struct irq_data *data)
{
+ unsigned int irq = data->irq;
struct fpga_irq set = get_fpga_irq(fpga2irq(irq));
unsigned int bit = irq - set.base;
__raw_writew(__raw_readw(set.mraddr) | 0x0001 << bit, set.mraddr);
}
-static void enable_se7724_irq(unsigned int irq)
+static void enable_se7724_irq(struct irq_data *data)
{
+ unsigned int irq = data->irq;
struct fpga_irq set = get_fpga_irq(fpga2irq(irq));
unsigned int bit = irq - set.base;
__raw_writew(__raw_readw(set.mraddr) & ~(0x0001 << bit), set.mraddr);
}
static struct irq_chip se7724_irq_chip __read_mostly = {
- .name = "SE7724-FPGA",
- .mask = disable_se7724_irq,
- .unmask = enable_se7724_irq,
- .mask_ack = disable_se7724_irq,
+ .name = "SE7724-FPGA",
+ .irq_mask = disable_se7724_irq,
+ .irq_unmask = enable_se7724_irq,
};
static void se7724_irq_demux(unsigned int irq, struct irq_desc *desc)
diff --git a/arch/sh/boards/mach-se/7724/setup.c b/arch/sh/boards/mach-se/7724/setup.c
index 552ebd9ba82b..527a0cd956b5 100644
--- a/arch/sh/boards/mach-se/7724/setup.c
+++ b/arch/sh/boards/mach-se/7724/setup.c
@@ -144,16 +144,42 @@ static struct platform_device nor_flash_device = {
};
/* LCDC */
+const static struct fb_videomode lcdc_720p_modes[] = {
+ {
+ .name = "LB070WV1",
+ .sync = 0, /* hsync and vsync are active low */
+ .xres = 1280,
+ .yres = 720,
+ .left_margin = 220,
+ .right_margin = 110,
+ .hsync_len = 40,
+ .upper_margin = 20,
+ .lower_margin = 5,
+ .vsync_len = 5,
+ },
+};
+
+const static struct fb_videomode lcdc_vga_modes[] = {
+ {
+ .name = "LB070WV1",
+ .sync = 0, /* hsync and vsync are active low */
+ .xres = 640,
+ .yres = 480,
+ .left_margin = 105,
+ .right_margin = 50,
+ .hsync_len = 96,
+ .upper_margin = 33,
+ .lower_margin = 10,
+ .vsync_len = 2,
+ },
+};
+
static struct sh_mobile_lcdc_info lcdc_info = {
.clock_source = LCDC_CLK_EXTERNAL,
.ch[0] = {
.chan = LCDC_CHAN_MAINLCD,
.bpp = 16,
.clock_divider = 1,
- .lcd_cfg = {
- .name = "LB070WV1",
- .sync = 0, /* hsync and vsync are active low */
- },
.lcd_size_cfg = { /* 7.0 inch */
.width = 152,
.height = 91,
@@ -257,31 +283,6 @@ static struct platform_device ceu1_device = {
};
/* FSI */
-/*
- * FSI-A use external clock which came from ak464x.
- * So, we should change parent of fsi
- */
-#define FCLKACR 0xa4150008
-static void fsimck_init(struct clk *clk)
-{
- u32 status = __raw_readl(clk->enable_reg);
-
- /* use external clock */
- status &= ~0x000000ff;
- status |= 0x00000080;
- __raw_writel(status, clk->enable_reg);
-}
-
-static struct clk_ops fsimck_clk_ops = {
- .init = fsimck_init,
-};
-
-static struct clk fsimcka_clk = {
- .ops = &fsimck_clk_ops,
- .enable_reg = (void __iomem *)FCLKACR,
- .rate = 0, /* unknown */
-};
-
/* change J20, J21, J22 pin to 1-2 connection to use slave mode */
static struct sh_fsi_platform_info fsi_info = {
.porta_flags = SH_FSI_BRS_INV |
@@ -550,7 +551,6 @@ static struct sh_vou_pdata sh_vou_pdata = {
.flags = SH_VOU_HSYNC_LOW | SH_VOU_VSYNC_LOW,
.board_info = &ak8813,
.i2c_adap = 0,
- .module_name = "ak881x",
};
static struct resource sh_vou_resources[] = {
@@ -827,37 +827,29 @@ static int __init devices_setup(void)
gpio_request(GPIO_FN_KEYOUT0, NULL);
/* enable FSI */
- gpio_request(GPIO_FN_FSIMCKB, NULL);
gpio_request(GPIO_FN_FSIMCKA, NULL);
+ gpio_request(GPIO_FN_FSIIASD, NULL);
gpio_request(GPIO_FN_FSIOASD, NULL);
gpio_request(GPIO_FN_FSIIABCK, NULL);
gpio_request(GPIO_FN_FSIIALRCK, NULL);
gpio_request(GPIO_FN_FSIOABCK, NULL);
gpio_request(GPIO_FN_FSIOALRCK, NULL);
gpio_request(GPIO_FN_CLKAUDIOAO, NULL);
- gpio_request(GPIO_FN_FSIIBSD, NULL);
- gpio_request(GPIO_FN_FSIOBSD, NULL);
- gpio_request(GPIO_FN_FSIIBBCK, NULL);
- gpio_request(GPIO_FN_FSIIBLRCK, NULL);
- gpio_request(GPIO_FN_FSIOBBCK, NULL);
- gpio_request(GPIO_FN_FSIOBLRCK, NULL);
- gpio_request(GPIO_FN_CLKAUDIOBO, NULL);
- gpio_request(GPIO_FN_FSIIASD, NULL);
/* set SPU2 clock to 83.4 MHz */
clk = clk_get(NULL, "spu_clk");
- if (clk) {
+ if (!IS_ERR(clk)) {
clk_set_rate(clk, clk_round_rate(clk, 83333333));
clk_put(clk);
}
/* change parent of FSI A */
clk = clk_get(NULL, "fsia_clk");
- if (clk) {
- clk_register(&fsimcka_clk);
- clk_set_parent(clk, &fsimcka_clk);
- clk_set_rate(clk, 11000);
- clk_set_rate(&fsimcka_clk, 11000);
+ if (!IS_ERR(clk)) {
+ /* 48kHz dummy clock was used to make sure 1/1 divide */
+ clk_set_rate(&sh7724_fsimcka_clk, 48000);
+ clk_set_parent(clk, &sh7724_fsimcka_clk);
+ clk_set_rate(clk, 48000);
clk_put(clk);
}
@@ -909,24 +901,12 @@ static int __init devices_setup(void)
if (sw & SW41_B) {
/* 720p */
- lcdc_info.ch[0].lcd_cfg.xres = 1280;
- lcdc_info.ch[0].lcd_cfg.yres = 720;
- lcdc_info.ch[0].lcd_cfg.left_margin = 220;
- lcdc_info.ch[0].lcd_cfg.right_margin = 110;
- lcdc_info.ch[0].lcd_cfg.hsync_len = 40;
- lcdc_info.ch[0].lcd_cfg.upper_margin = 20;
- lcdc_info.ch[0].lcd_cfg.lower_margin = 5;
- lcdc_info.ch[0].lcd_cfg.vsync_len = 5;
+ lcdc_info.ch[0].lcd_cfg = lcdc_720p_modes;
+ lcdc_info.ch[0].num_cfg = ARRAY_SIZE(lcdc_720p_modes);
} else {
/* VGA */
- lcdc_info.ch[0].lcd_cfg.xres = 640;
- lcdc_info.ch[0].lcd_cfg.yres = 480;
- lcdc_info.ch[0].lcd_cfg.left_margin = 105;
- lcdc_info.ch[0].lcd_cfg.right_margin = 50;
- lcdc_info.ch[0].lcd_cfg.hsync_len = 96;
- lcdc_info.ch[0].lcd_cfg.upper_margin = 33;
- lcdc_info.ch[0].lcd_cfg.lower_margin = 10;
- lcdc_info.ch[0].lcd_cfg.vsync_len = 2;
+ lcdc_info.ch[0].lcd_cfg = lcdc_vga_modes;
+ lcdc_info.ch[0].num_cfg = ARRAY_SIZE(lcdc_vga_modes);
}
if (sw & SW41_A) {
diff --git a/arch/sh/boards/mach-se/7751/Makefile b/arch/sh/boards/mach-se/7751/Makefile
index e6f4341bfe6e..a338fd9d5039 100644
--- a/arch/sh/boards/mach-se/7751/Makefile
+++ b/arch/sh/boards/mach-se/7751/Makefile
@@ -2,4 +2,4 @@
# Makefile for the 7751 SolutionEngine specific parts of the kernel
#
-obj-y := setup.o io.o irq.o
+obj-y := setup.o irq.o
diff --git a/arch/sh/boards/mach-se/7751/io.c b/arch/sh/boards/mach-se/7751/io.c
deleted file mode 100644
index 6e75bd4459e5..000000000000
--- a/arch/sh/boards/mach-se/7751/io.c
+++ /dev/null
@@ -1,119 +0,0 @@
-/*
- * Copyright (C) 2001 Ian da Silva, Jeremy Siegel
- * Based largely on io_se.c.
- *
- * I/O routine for Hitachi 7751 SolutionEngine.
- *
- * Initial version only to support LAN access; some
- * placeholder code from io_se.c left in with the
- * expectation of later SuperIO and PCMCIA access.
- */
-#include <linux/kernel.h>
-#include <linux/types.h>
-#include <linux/pci.h>
-#include <asm/io.h>
-#include <mach-se/mach/se7751.h>
-#include <asm/addrspace.h>
-
-static inline volatile u16 *port2adr(unsigned int port)
-{
- if (port >= 0x2000)
- return (volatile __u16 *) (PA_MRSHPC + (port - 0x2000));
- maybebadio((unsigned long)port);
- return (volatile __u16*)port;
-}
-
-/*
- * General outline: remap really low stuff [eventually] to SuperIO,
- * stuff in PCI IO space (at or above window at pci.h:PCIBIOS_MIN_IO)
- * is mapped through the PCI IO window. Stuff with high bits (PXSEG)
- * should be way beyond the window, and is used w/o translation for
- * compatibility.
- */
-unsigned char sh7751se_inb(unsigned long port)
-{
- if (PXSEG(port))
- return *(volatile unsigned char *)port;
- else
- return (*port2adr(port)) & 0xff;
-}
-
-unsigned char sh7751se_inb_p(unsigned long port)
-{
- unsigned char v;
-
- if (PXSEG(port))
- v = *(volatile unsigned char *)port;
- else
- v = (*port2adr(port)) & 0xff;
- ctrl_delay();
- return v;
-}
-
-unsigned short sh7751se_inw(unsigned long port)
-{
- if (PXSEG(port))
- return *(volatile unsigned short *)port;
- else if (port >= 0x2000)
- return *port2adr(port);
- else
- maybebadio(port);
- return 0;
-}
-
-unsigned int sh7751se_inl(unsigned long port)
-{
- if (PXSEG(port))
- return *(volatile unsigned long *)port;
- else if (port >= 0x2000)
- return *port2adr(port);
- else
- maybebadio(port);
- return 0;
-}
-
-void sh7751se_outb(unsigned char value, unsigned long port)
-{
-
- if (PXSEG(port))
- *(volatile unsigned char *)port = value;
- else
- *(port2adr(port)) = value;
-}
-
-void sh7751se_outb_p(unsigned char value, unsigned long port)
-{
- if (PXSEG(port))
- *(volatile unsigned char *)port = value;
- else
- *(port2adr(port)) = value;
- ctrl_delay();
-}
-
-void sh7751se_outw(unsigned short value, unsigned long port)
-{
- if (PXSEG(port))
- *(volatile unsigned short *)port = value;
- else if (port >= 0x2000)
- *port2adr(port) = value;
- else
- maybebadio(port);
-}
-
-void sh7751se_outl(unsigned int value, unsigned long port)
-{
- if (PXSEG(port))
- *(volatile unsigned long *)port = value;
- else
- maybebadio(port);
-}
-
-void sh7751se_insl(unsigned long port, void *addr, unsigned long count)
-{
- maybebadio(port);
-}
-
-void sh7751se_outsl(unsigned long port, const void *addr, unsigned long count)
-{
- maybebadio(port);
-}
diff --git a/arch/sh/boards/mach-se/7751/setup.c b/arch/sh/boards/mach-se/7751/setup.c
index 50572512e3e8..9fbc51beb181 100644
--- a/arch/sh/boards/mach-se/7751/setup.c
+++ b/arch/sh/boards/mach-se/7751/setup.c
@@ -56,23 +56,5 @@ __initcall(se7751_devices_setup);
static struct sh_machine_vector mv_7751se __initmv = {
.mv_name = "7751 SolutionEngine",
.mv_nr_irqs = 72,
-
- .mv_inb = sh7751se_inb,
- .mv_inw = sh7751se_inw,
- .mv_inl = sh7751se_inl,
- .mv_outb = sh7751se_outb,
- .mv_outw = sh7751se_outw,
- .mv_outl = sh7751se_outl,
-
- .mv_inb_p = sh7751se_inb_p,
- .mv_inw_p = sh7751se_inw,
- .mv_inl_p = sh7751se_inl,
- .mv_outb_p = sh7751se_outb_p,
- .mv_outw_p = sh7751se_outw,
- .mv_outl_p = sh7751se_outl,
-
- .mv_insl = sh7751se_insl,
- .mv_outsl = sh7751se_outsl,
-
.mv_init_irq = init_7751se_IRQ,
};
diff --git a/arch/sh/boards/mach-snapgear/Makefile b/arch/sh/boards/mach-snapgear/Makefile
deleted file mode 100644
index d2d2f4b6a502..000000000000
--- a/arch/sh/boards/mach-snapgear/Makefile
+++ /dev/null
@@ -1,5 +0,0 @@
-#
-# Makefile for the SnapGear specific parts of the kernel
-#
-
-obj-y := setup.o io.o
diff --git a/arch/sh/boards/mach-snapgear/io.c b/arch/sh/boards/mach-snapgear/io.c
deleted file mode 100644
index 476650e42dbc..000000000000
--- a/arch/sh/boards/mach-snapgear/io.c
+++ /dev/null
@@ -1,121 +0,0 @@
-/*
- * Copyright (C) 2002 David McCullough <davidm@snapgear.com>
- * Copyright (C) 2001 Ian da Silva, Jeremy Siegel
- * Based largely on io_se.c.
- *
- * I/O routine for Hitachi 7751 SolutionEngine.
- *
- * Initial version only to support LAN access; some
- * placeholder code from io_se.c left in with the
- * expectation of later SuperIO and PCMCIA access.
- */
-#include <linux/kernel.h>
-#include <linux/types.h>
-#include <linux/pci.h>
-#include <asm/io.h>
-#include <asm/addrspace.h>
-
-#ifdef CONFIG_SH_SECUREEDGE5410
-unsigned short secureedge5410_ioport;
-#endif
-
-static inline volatile __u16 *port2adr(unsigned int port)
-{
- maybebadio((unsigned long)port);
- return (volatile __u16*)port;
-}
-
-/*
- * General outline: remap really low stuff [eventually] to SuperIO,
- * stuff in PCI IO space (at or above window at pci.h:PCIBIOS_MIN_IO)
- * is mapped through the PCI IO window. Stuff with high bits (PXSEG)
- * should be way beyond the window, and is used w/o translation for
- * compatibility.
- */
-unsigned char snapgear_inb(unsigned long port)
-{
- if (PXSEG(port))
- return *(volatile unsigned char *)port;
- else
- return (*port2adr(port)) & 0xff;
-}
-
-unsigned char snapgear_inb_p(unsigned long port)
-{
- unsigned char v;
-
- if (PXSEG(port))
- v = *(volatile unsigned char *)port;
- else
- v = (*port2adr(port))&0xff;
- ctrl_delay();
- return v;
-}
-
-unsigned short snapgear_inw(unsigned long port)
-{
- if (PXSEG(port))
- return *(volatile unsigned short *)port;
- else if (port >= 0x2000)
- return *port2adr(port);
- else
- maybebadio(port);
- return 0;
-}
-
-unsigned int snapgear_inl(unsigned long port)
-{
- if (PXSEG(port))
- return *(volatile unsigned long *)port;
- else if (port >= 0x2000)
- return *port2adr(port);
- else
- maybebadio(port);
- return 0;
-}
-
-void snapgear_outb(unsigned char value, unsigned long port)
-{
-
- if (PXSEG(port))
- *(volatile unsigned char *)port = value;
- else
- *(port2adr(port)) = value;
-}
-
-void snapgear_outb_p(unsigned char value, unsigned long port)
-{
- if (PXSEG(port))
- *(volatile unsigned char *)port = value;
- else
- *(port2adr(port)) = value;
- ctrl_delay();
-}
-
-void snapgear_outw(unsigned short value, unsigned long port)
-{
- if (PXSEG(port))
- *(volatile unsigned short *)port = value;
- else if (port >= 0x2000)
- *port2adr(port) = value;
- else
- maybebadio(port);
-}
-
-void snapgear_outl(unsigned int value, unsigned long port)
-{
- if (PXSEG(port))
- *(volatile unsigned long *)port = value;
- else
- maybebadio(port);
-}
-
-void snapgear_insl(unsigned long port, void *addr, unsigned long count)
-{
- maybebadio(port);
-}
-
-void snapgear_outsl(unsigned long port, const void *addr, unsigned long count)
-{
- maybebadio(port);
-}
diff --git a/arch/sh/boards/mach-systemh/Makefile b/arch/sh/boards/mach-systemh/Makefile
deleted file mode 100644
index 2cc6a23d9d39..000000000000
--- a/arch/sh/boards/mach-systemh/Makefile
+++ /dev/null
@@ -1,13 +0,0 @@
-#
-# Makefile for the SystemH specific parts of the kernel
-#
-
-obj-y := setup.o irq.o io.o
-
-# XXX: This wants to be consolidated in arch/sh/drivers/pci, and more
-# importantly, with the generic sh7751_pcic_init() code. For now, we'll
-# just abuse the hell out of kbuild, because we can..
-
-obj-$(CONFIG_PCI) += pci.o
-pci-y := ../../se/7751/pci.o
-
diff --git a/arch/sh/boards/mach-systemh/io.c b/arch/sh/boards/mach-systemh/io.c
deleted file mode 100644
index 15577ff1f715..000000000000
--- a/arch/sh/boards/mach-systemh/io.c
+++ /dev/null
@@ -1,158 +0,0 @@
-/*
- * linux/arch/sh/boards/renesas/systemh/io.c
- *
- * Copyright (C) 2001 Ian da Silva, Jeremy Siegel
- * Based largely on io_se.c.
- *
- * I/O routine for Hitachi 7751 Systemh.
- */
-#include <linux/kernel.h>
-#include <linux/types.h>
-#include <linux/pci.h>
-#include <mach/systemh7751.h>
-#include <asm/addrspace.h>
-#include <asm/io.h>
-
-#define ETHER_IOMAP(adr) (0xB3000000 + (adr)) /*map to 16bits access area
- of smc lan chip*/
-static inline volatile __u16 *
-port2adr(unsigned int port)
-{
- if (port >= 0x2000)
- return (volatile __u16 *) (PA_MRSHPC + (port - 0x2000));
- maybebadio((unsigned long)port);
- return (volatile __u16*)port;
-}
-
-/*
- * General outline: remap really low stuff [eventually] to SuperIO,
- * stuff in PCI IO space (at or above window at pci.h:PCIBIOS_MIN_IO)
- * is mapped through the PCI IO window. Stuff with high bits (PXSEG)
- * should be way beyond the window, and is used w/o translation for
- * compatibility.
- */
-unsigned char sh7751systemh_inb(unsigned long port)
-{
- if (PXSEG(port))
- return *(volatile unsigned char *)port;
- else if (port <= 0x3F1)
- return *(volatile unsigned char *)ETHER_IOMAP(port);
- else
- return (*port2adr(port))&0xff;
-}
-
-unsigned char sh7751systemh_inb_p(unsigned long port)
-{
- unsigned char v;
-
- if (PXSEG(port))
- v = *(volatile unsigned char *)port;
- else if (port <= 0x3F1)
- v = *(volatile unsigned char *)ETHER_IOMAP(port);
- else
- v = (*port2adr(port))&0xff;
- ctrl_delay();
- return v;
-}
-
-unsigned short sh7751systemh_inw(unsigned long port)
-{
- if (PXSEG(port))
- return *(volatile unsigned short *)port;
- else if (port >= 0x2000)
- return *port2adr(port);
- else if (port <= 0x3F1)
- return *(volatile unsigned int *)ETHER_IOMAP(port);
- else
- maybebadio(port);
- return 0;
-}
-
-unsigned int sh7751systemh_inl(unsigned long port)
-{
- if (PXSEG(port))
- return *(volatile unsigned long *)port;
- else if (port >= 0x2000)
- return *port2adr(port);
- else if (port <= 0x3F1)
- return *(volatile unsigned int *)ETHER_IOMAP(port);
- else
- maybebadio(port);
- return 0;
-}
-
-void sh7751systemh_outb(unsigned char value, unsigned long port)
-{
-
- if (PXSEG(port))
- *(volatile unsigned char *)port = value;
- else if (port <= 0x3F1)
- *(volatile unsigned char *)ETHER_IOMAP(port) = value;
- else
- *(port2adr(port)) = value;
-}
-
-void sh7751systemh_outb_p(unsigned char value, unsigned long port)
-{
- if (PXSEG(port))
- *(volatile unsigned char *)port = value;
- else if (port <= 0x3F1)
- *(volatile unsigned char *)ETHER_IOMAP(port) = value;
- else
- *(port2adr(port)) = value;
- ctrl_delay();
-}
-
-void sh7751systemh_outw(unsigned short value, unsigned long port)
-{
- if (PXSEG(port))
- *(volatile unsigned short *)port = value;
- else if (port >= 0x2000)
- *port2adr(port) = value;
- else if (port <= 0x3F1)
- *(volatile unsigned short *)ETHER_IOMAP(port) = value;
- else
- maybebadio(port);
-}
-
-void sh7751systemh_outl(unsigned int value, unsigned long port)
-{
- if (PXSEG(port))
- *(volatile unsigned long *)port = value;
- else
- maybebadio(port);
-}
-
-void sh7751systemh_insb(unsigned long port, void *addr, unsigned long count)
-{
- unsigned char *p = addr;
- while (count--) *p++ = sh7751systemh_inb(port);
-}
-
-void sh7751systemh_insw(unsigned long port, void *addr, unsigned long count)
-{
- unsigned short *p = addr;
- while (count--) *p++ = sh7751systemh_inw(port);
-}
-
-void sh7751systemh_insl(unsigned long port, void *addr, unsigned long count)
-{
- maybebadio(port);
-}
-
-void sh7751systemh_outsb(unsigned long port, const void *addr, unsigned long count)
-{
- unsigned char *p = (unsigned char*)addr;
- while (count--) sh7751systemh_outb(*p++, port);
-}
-
-void sh7751systemh_outsw(unsigned long port, const void *addr, unsigned long count)
-{
- unsigned short *p = (unsigned short*)addr;
- while (count--) sh7751systemh_outw(*p++, port);
-}
-
-void sh7751systemh_outsl(unsigned long port, const void *addr, unsigned long count)
-{
- maybebadio(port);
-}
diff --git a/arch/sh/boards/mach-systemh/irq.c b/arch/sh/boards/mach-systemh/irq.c
deleted file mode 100644
index 523aea5dc94e..000000000000
--- a/arch/sh/boards/mach-systemh/irq.c
+++ /dev/null
@@ -1,76 +0,0 @@
-/*
- * linux/arch/sh/boards/renesas/systemh/irq.c
- *
- * Copyright (C) 2000 Kazumoto Kojima
- *
- * Hitachi SystemH Support.
- *
- * Modified for 7751 SystemH by
- * Jonathan Short.
- */
-
-#include <linux/init.h>
-#include <linux/irq.h>
-#include <linux/interrupt.h>
-#include <linux/io.h>
-
-#include <mach/systemh7751.h>
-#include <asm/smc37c93x.h>
-
-/* address of external interrupt mask register
- * address must be set prior to use these (maybe in init_XXX_irq())
- * XXX : is it better to use .config than specifying it in code? */
-static unsigned long *systemh_irq_mask_register = (unsigned long *)0xB3F10004;
-static unsigned long *systemh_irq_request_register = (unsigned long *)0xB3F10000;
-
-/* forward declaration */
-static void enable_systemh_irq(unsigned int irq);
-static void disable_systemh_irq(unsigned int irq);
-static void mask_and_ack_systemh(unsigned int);
-
-static struct irq_chip systemh_irq_type = {
- .name = " SystemH Register",
- .unmask = enable_systemh_irq,
- .mask = disable_systemh_irq,
- .ack = mask_and_ack_systemh,
-};
-
-static void disable_systemh_irq(unsigned int irq)
-{
- if (systemh_irq_mask_register) {
- unsigned long val, mask = 0x01 << 1;
-
- /* Clear the "irq"th bit in the mask and set it in the request */
- val = __raw_readl((unsigned long)systemh_irq_mask_register);
- val &= ~mask;
- __raw_writel(val, (unsigned long)systemh_irq_mask_register);
-
- val = __raw_readl((unsigned long)systemh_irq_request_register);
- val |= mask;
- __raw_writel(val, (unsigned long)systemh_irq_request_register);
- }
-}
-
-static void enable_systemh_irq(unsigned int irq)
-{
- if (systemh_irq_mask_register) {
- unsigned long val, mask = 0x01 << 1;
-
- /* Set "irq"th bit in the mask register */
- val = __raw_readl((unsigned long)systemh_irq_mask_register);
- val |= mask;
- __raw_writel(val, (unsigned long)systemh_irq_mask_register);
- }
-}
-
-static void mask_and_ack_systemh(unsigned int irq)
-{
- disable_systemh_irq(irq);
-}
-
-void make_systemh_irq(unsigned int irq)
-{
- disable_irq_nosync(irq);
- set_irq_chip_and_handler(irq, &systemh_irq_type, handle_level_irq);
- disable_systemh_irq(irq);
-}
diff --git a/arch/sh/boards/mach-systemh/setup.c b/arch/sh/boards/mach-systemh/setup.c
deleted file mode 100644
index 219fd800a43f..000000000000
--- a/arch/sh/boards/mach-systemh/setup.c
+++ /dev/null
@@ -1,57 +0,0 @@
-/*
- * linux/arch/sh/boards/renesas/systemh/setup.c
- *
- * Copyright (C) 2000 Kazumoto Kojima
- * Copyright (C) 2003 Paul Mundt
- *
- * Hitachi SystemH Support.
- *
- * Modified for 7751 SystemH by Jonathan Short.
- *
- * Rewritten for 2.6 by Paul Mundt.
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- */
-#include <linux/init.h>
-#include <asm/machvec.h>
-#include <mach/systemh7751.h>
-
-extern void make_systemh_irq(unsigned int irq);
-
-/*
- * Initialize IRQ setting
- */
-static void __init sh7751systemh_init_irq(void)
-{
- make_systemh_irq(0xb); /* Ethernet interrupt */
-}
-
-static struct sh_machine_vector mv_7751systemh __initmv = {
- .mv_name = "7751 SystemH",
- .mv_nr_irqs = 72,
-
- .mv_inb = sh7751systemh_inb,
- .mv_inw = sh7751systemh_inw,
- .mv_inl = sh7751systemh_inl,
- .mv_outb = sh7751systemh_outb,
- .mv_outw = sh7751systemh_outw,
- .mv_outl = sh7751systemh_outl,
-
- .mv_inb_p = sh7751systemh_inb_p,
- .mv_inw_p = sh7751systemh_inw,
- .mv_inl_p = sh7751systemh_inl,
- .mv_outb_p = sh7751systemh_outb_p,
- .mv_outw_p = sh7751systemh_outw,
- .mv_outl_p = sh7751systemh_outl,
-
- .mv_insb = sh7751systemh_insb,
- .mv_insw = sh7751systemh_insw,
- .mv_insl = sh7751systemh_insl,
- .mv_outsb = sh7751systemh_outsb,
- .mv_outsw = sh7751systemh_outsw,
- .mv_outsl = sh7751systemh_outsl,
-
- .mv_init_irq = sh7751systemh_init_irq,
-};
diff --git a/arch/sh/boards/mach-x3proto/Makefile b/arch/sh/boards/mach-x3proto/Makefile
index 983e4551fecf..708c21c919ff 100644
--- a/arch/sh/boards/mach-x3proto/Makefile
+++ b/arch/sh/boards/mach-x3proto/Makefile
@@ -1 +1,3 @@
obj-y += setup.o ilsel.o
+
+obj-$(CONFIG_GENERIC_GPIO) += gpio.o
diff --git a/arch/sh/boards/mach-x3proto/gpio.c b/arch/sh/boards/mach-x3proto/gpio.c
new file mode 100644
index 000000000000..239e74066253
--- /dev/null
+++ b/arch/sh/boards/mach-x3proto/gpio.c
@@ -0,0 +1,136 @@
+/*
+ * arch/sh/boards/mach-x3proto/gpio.c
+ *
+ * Renesas SH-X3 Prototype Baseboard GPIO Support.
+ *
+ * Copyright (C) 2010 Paul Mundt
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/gpio.h>
+#include <linux/irq.h>
+#include <linux/kernel.h>
+#include <linux/spinlock.h>
+#include <linux/io.h>
+#include <mach/ilsel.h>
+#include <mach/hardware.h>
+
+#define KEYCTLR 0xb81c0000
+#define KEYOUTR 0xb81c0002
+#define KEYDETR 0xb81c0004
+
+static DEFINE_SPINLOCK(x3proto_gpio_lock);
+static unsigned int x3proto_gpio_irq_map[NR_BASEBOARD_GPIOS] = { 0, };
+
+static int x3proto_gpio_direction_input(struct gpio_chip *chip, unsigned gpio)
+{
+ unsigned long flags;
+ unsigned int data;
+
+ spin_lock_irqsave(&x3proto_gpio_lock, flags);
+ data = __raw_readw(KEYCTLR);
+ data |= (1 << gpio);
+ __raw_writew(data, KEYCTLR);
+ spin_unlock_irqrestore(&x3proto_gpio_lock, flags);
+
+ return 0;
+}
+
+static int x3proto_gpio_get(struct gpio_chip *chip, unsigned gpio)
+{
+ return !!(__raw_readw(KEYDETR) & (1 << gpio));
+}
+
+static int x3proto_gpio_to_irq(struct gpio_chip *chip, unsigned gpio)
+{
+ return x3proto_gpio_irq_map[gpio];
+}
+
+static void x3proto_gpio_irq_handler(unsigned int irq, struct irq_desc *desc)
+{
+ struct irq_data *data = irq_get_irq_data(irq);
+ struct irq_chip *chip = irq_data_get_irq_chip(data);
+ unsigned long mask;
+ int pin;
+
+ chip->irq_mask_ack(data);
+
+ mask = __raw_readw(KEYDETR);
+
+ for_each_set_bit(pin, &mask, NR_BASEBOARD_GPIOS)
+ generic_handle_irq(x3proto_gpio_to_irq(NULL, pin));
+
+ chip->irq_unmask(data);
+}
+
+struct gpio_chip x3proto_gpio_chip = {
+ .label = "x3proto-gpio",
+ .direction_input = x3proto_gpio_direction_input,
+ .get = x3proto_gpio_get,
+ .to_irq = x3proto_gpio_to_irq,
+ .base = -1,
+ .ngpio = NR_BASEBOARD_GPIOS,
+};
+
+int __init x3proto_gpio_setup(void)
+{
+ int ilsel;
+ int ret, i;
+
+ ilsel = ilsel_enable(ILSEL_KEY);
+ if (unlikely(ilsel < 0))
+ return ilsel;
+
+ ret = gpiochip_add(&x3proto_gpio_chip);
+ if (unlikely(ret))
+ goto err_gpio;
+
+ for (i = 0; i < NR_BASEBOARD_GPIOS; i++) {
+ unsigned long flags;
+ int irq = create_irq();
+
+ if (unlikely(irq < 0)) {
+ ret = -EINVAL;
+ goto err_irq;
+ }
+
+ spin_lock_irqsave(&x3proto_gpio_lock, flags);
+ x3proto_gpio_irq_map[i] = irq;
+ set_irq_chip_and_handler_name(irq, &dummy_irq_chip,
+ handle_simple_irq, "gpio");
+ spin_unlock_irqrestore(&x3proto_gpio_lock, flags);
+ }
+
+ pr_info("registering '%s' support, handling GPIOs %u -> %u, "
+ "bound to IRQ %u\n",
+ x3proto_gpio_chip.label, x3proto_gpio_chip.base,
+ x3proto_gpio_chip.base + x3proto_gpio_chip.ngpio,
+ ilsel);
+
+ set_irq_chained_handler(ilsel, x3proto_gpio_irq_handler);
+ set_irq_wake(ilsel, 1);
+
+ return 0;
+
+err_irq:
+ for (; i >= 0; --i)
+ if (x3proto_gpio_irq_map[i])
+ destroy_irq(x3proto_gpio_irq_map[i]);
+
+ ret = gpiochip_remove(&x3proto_gpio_chip);
+ if (unlikely(ret))
+ pr_err("Failed deregistering GPIO\n");
+
+err_gpio:
+ synchronize_irq(ilsel);
+
+ ilsel_disable(ILSEL_KEY);
+
+ return ret;
+}
diff --git a/arch/sh/boards/mach-x3proto/ilsel.c b/arch/sh/boards/mach-x3proto/ilsel.c
index 5c9842704c60..95e346139515 100644
--- a/arch/sh/boards/mach-x3proto/ilsel.c
+++ b/arch/sh/boards/mach-x3proto/ilsel.c
@@ -1,20 +1,22 @@
/*
- * arch/sh/boards/renesas/x3proto/ilsel.c
+ * arch/sh/boards/mach-x3proto/ilsel.c
*
* Helper routines for SH-X3 proto board ILSEL.
*
- * Copyright (C) 2007 Paul Mundt
+ * Copyright (C) 2007 - 2010 Paul Mundt
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/bitmap.h>
#include <linux/io.h>
-#include <asm/ilsel.h>
+#include <mach/ilsel.h>
/*
* ILSEL is split across:
@@ -64,6 +66,8 @@ static void __ilsel_enable(ilsel_source_t set, unsigned int bit)
unsigned int tmp, shift;
unsigned long addr;
+ pr_notice("enabling ILSEL set %d\n", set);
+
addr = mk_ilsel_addr(bit);
shift = mk_ilsel_shift(bit);
@@ -92,8 +96,10 @@ int ilsel_enable(ilsel_source_t set)
{
unsigned int bit;
- /* Aliased sources must use ilsel_enable_fixed() */
- BUG_ON(set > ILSEL_KEY);
+ if (unlikely(set > ILSEL_KEY)) {
+ pr_err("Aliased sources must use ilsel_enable_fixed()\n");
+ return -EINVAL;
+ }
do {
bit = find_first_zero_bit(&ilsel_level_map, ILSEL_LEVELS);
@@ -140,6 +146,8 @@ void ilsel_disable(unsigned int irq)
unsigned long addr;
unsigned int tmp;
+ pr_notice("disabling ILSEL set %d\n", irq);
+
addr = mk_ilsel_addr(irq);
tmp = __raw_readw(addr);
diff --git a/arch/sh/boards/mach-x3proto/setup.c b/arch/sh/boards/mach-x3proto/setup.c
index 102bf56befb4..d682e2b6a856 100644
--- a/arch/sh/boards/mach-x3proto/setup.c
+++ b/arch/sh/boards/mach-x3proto/setup.c
@@ -1,9 +1,9 @@
/*
- * arch/sh/boards/renesas/x3proto/setup.c
+ * arch/sh/boards/mach-x3proto/setup.c
*
* Renesas SH-X3 Prototype Board Support.
*
- * Copyright (C) 2007 - 2008 Paul Mundt
+ * Copyright (C) 2007 - 2010 Paul Mundt
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
@@ -16,9 +16,13 @@
#include <linux/smc91x.h>
#include <linux/irq.h>
#include <linux/interrupt.h>
+#include <linux/input.h>
#include <linux/usb/r8a66597.h>
#include <linux/usb/m66592.h>
-#include <asm/ilsel.h>
+#include <linux/gpio.h>
+#include <linux/gpio_keys.h>
+#include <mach/ilsel.h>
+#include <mach/hardware.h>
#include <asm/smp-ops.h>
static struct resource heartbeat_resources[] = {
@@ -122,15 +126,128 @@ static struct platform_device m66592_usb_peripheral_device = {
.resource = m66592_usb_peripheral_resources,
};
+static struct gpio_keys_button baseboard_buttons[NR_BASEBOARD_GPIOS] = {
+ {
+ .desc = "key44",
+ .code = KEY_POWER,
+ .active_low = 1,
+ .wakeup = 1,
+ }, {
+ .desc = "key43",
+ .code = KEY_SUSPEND,
+ .active_low = 1,
+ .wakeup = 1,
+ }, {
+ .desc = "key42",
+ .code = KEY_KATAKANAHIRAGANA,
+ .active_low = 1,
+ }, {
+ .desc = "key41",
+ .code = KEY_SWITCHVIDEOMODE,
+ .active_low = 1,
+ }, {
+ .desc = "key34",
+ .code = KEY_F12,
+ .active_low = 1,
+ }, {
+ .desc = "key33",
+ .code = KEY_F11,
+ .active_low = 1,
+ }, {
+ .desc = "key32",
+ .code = KEY_F10,
+ .active_low = 1,
+ }, {
+ .desc = "key31",
+ .code = KEY_F9,
+ .active_low = 1,
+ }, {
+ .desc = "key24",
+ .code = KEY_F8,
+ .active_low = 1,
+ }, {
+ .desc = "key23",
+ .code = KEY_F7,
+ .active_low = 1,
+ }, {
+ .desc = "key22",
+ .code = KEY_F6,
+ .active_low = 1,
+ }, {
+ .desc = "key21",
+ .code = KEY_F5,
+ .active_low = 1,
+ }, {
+ .desc = "key14",
+ .code = KEY_F4,
+ .active_low = 1,
+ }, {
+ .desc = "key13",
+ .code = KEY_F3,
+ .active_low = 1,
+ }, {
+ .desc = "key12",
+ .code = KEY_F2,
+ .active_low = 1,
+ }, {
+ .desc = "key11",
+ .code = KEY_F1,
+ .active_low = 1,
+ },
+};
+
+static struct gpio_keys_platform_data baseboard_buttons_data = {
+ .buttons = baseboard_buttons,
+ .nbuttons = ARRAY_SIZE(baseboard_buttons),
+};
+
+static struct platform_device baseboard_buttons_device = {
+ .name = "gpio-keys",
+ .id = -1,
+ .dev = {
+ .platform_data = &baseboard_buttons_data,
+ },
+};
+
static struct platform_device *x3proto_devices[] __initdata = {
&heartbeat_device,
&smc91x_device,
&r8a66597_usb_host_device,
&m66592_usb_peripheral_device,
+ &baseboard_buttons_device,
};
+static void __init x3proto_init_irq(void)
+{
+ plat_irq_setup_pins(IRQ_MODE_IRL3210);
+
+ /* Set ICR0.LVLMODE */
+ __raw_writel(__raw_readl(0xfe410000) | (1 << 21), 0xfe410000);
+}
+
static int __init x3proto_devices_setup(void)
{
+ int ret, i;
+
+ /*
+ * IRLs are only needed for ILSEL mappings, so flip over the INTC
+ * pins at a later point to enable the GPIOs to settle.
+ */
+ x3proto_init_irq();
+
+ /*
+ * Now that ILSELs are available, set up the baseboard GPIOs.
+ */
+ ret = x3proto_gpio_setup();
+ if (unlikely(ret))
+ return ret;
+
+ /*
+ * Propagate dynamic GPIOs for the baseboard button device.
+ */
+ for (i = 0; i < ARRAY_SIZE(baseboard_buttons); i++)
+ baseboard_buttons[i].gpio = x3proto_gpio_chip.base + i;
+
r8a66597_usb_host_resources[1].start =
r8a66597_usb_host_resources[1].end = ilsel_enable(ILSEL_USBH_I);
@@ -145,14 +262,6 @@ static int __init x3proto_devices_setup(void)
}
device_initcall(x3proto_devices_setup);
-static void __init x3proto_init_irq(void)
-{
- plat_irq_setup_pins(IRQ_MODE_IRL3210);
-
- /* Set ICR0.LVLMODE */
- __raw_writel(__raw_readl(0xfe410000) | (1 << 21), 0xfe410000);
-}
-
static void __init x3proto_setup(char **cmdline_p)
{
register_smp_ops(&shx3_smp_ops);
@@ -161,5 +270,4 @@ static void __init x3proto_setup(char **cmdline_p)
static struct sh_machine_vector mv_x3proto __initmv = {
.mv_name = "x3proto",
.mv_setup = x3proto_setup,
- .mv_init_irq = x3proto_init_irq,
};
diff --git a/arch/sh/boot/compressed/head_32.S b/arch/sh/boot/compressed/head_32.S
index 200c1d4f1efe..3e150326f1fd 100644
--- a/arch/sh/boot/compressed/head_32.S
+++ b/arch/sh/boot/compressed/head_32.S
@@ -91,7 +91,9 @@ bss_start_addr:
end_addr:
.long _end
init_sr:
- .long 0x400000F0 /* Privileged mode, Bank=0, Block=0, IMASK=0xF */
+ .long 0x500000F0 /* Privileged mode, Bank=0, Block=1, IMASK=0xF */
+kexec_magic:
+ .long 0x400000F0 /* magic used by kexec to parse zImage format */
init_stack_addr:
.long stack_start
decompress_kernel_addr:
diff --git a/arch/sh/cchips/hd6446x/Makefile b/arch/sh/cchips/hd6446x/Makefile
index 9682e3ab668f..59c348337bb8 100644
--- a/arch/sh/cchips/hd6446x/Makefile
+++ b/arch/sh/cchips/hd6446x/Makefile
@@ -1,3 +1,3 @@
obj-$(CONFIG_HD64461) += hd64461.o
-EXTRA_CFLAGS += -Werror
+ccflags-y := -Werror
diff --git a/arch/sh/cchips/hd6446x/hd64461.c b/arch/sh/cchips/hd6446x/hd64461.c
index bcb31ae84a51..177a10b25cad 100644
--- a/arch/sh/cchips/hd6446x/hd64461.c
+++ b/arch/sh/cchips/hd6446x/hd64461.c
@@ -17,8 +17,9 @@
/* This belongs in cpu specific */
#define INTC_ICR1 0xA4140010UL
-static void hd64461_mask_irq(unsigned int irq)
+static void hd64461_mask_irq(struct irq_data *data)
{
+ unsigned int irq = data->irq;
unsigned short nimr;
unsigned short mask = 1 << (irq - HD64461_IRQBASE);
@@ -27,8 +28,9 @@ static void hd64461_mask_irq(unsigned int irq)
__raw_writew(nimr, HD64461_NIMR);
}
-static void hd64461_unmask_irq(unsigned int irq)
+static void hd64461_unmask_irq(struct irq_data *data)
{
+ unsigned int irq = data->irq;
unsigned short nimr;
unsigned short mask = 1 << (irq - HD64461_IRQBASE);
@@ -37,20 +39,21 @@ static void hd64461_unmask_irq(unsigned int irq)
__raw_writew(nimr, HD64461_NIMR);
}
-static void hd64461_mask_and_ack_irq(unsigned int irq)
+static void hd64461_mask_and_ack_irq(struct irq_data *data)
{
- hd64461_mask_irq(irq);
+ hd64461_mask_irq(data);
+
#ifdef CONFIG_HD64461_ENABLER
- if (irq == HD64461_IRQBASE + 13)
+ if (data->irq == HD64461_IRQBASE + 13)
__raw_writeb(0x00, HD64461_PCC1CSCR);
#endif
}
static struct irq_chip hd64461_irq_chip = {
.name = "HD64461-IRQ",
- .mask = hd64461_mask_irq,
- .mask_ack = hd64461_mask_and_ack_irq,
- .unmask = hd64461_unmask_irq,
+ .irq_mask = hd64461_mask_irq,
+ .irq_mask_ack = hd64461_mask_and_ack_irq,
+ .irq_unmask = hd64461_unmask_irq,
};
static void hd64461_irq_demux(unsigned int irq, struct irq_desc *desc)
diff --git a/arch/sh/configs/ap325rxa_defconfig b/arch/sh/configs/ap325rxa_defconfig
index 238d6833ac70..e5335123b5e9 100644
--- a/arch/sh/configs/ap325rxa_defconfig
+++ b/arch/sh/configs/ap325rxa_defconfig
@@ -3,7 +3,6 @@ CONFIG_EXPERIMENTAL=y
CONFIG_SYSVIPC=y
CONFIG_BSD_PROCESS_ACCT=y
CONFIG_LOG_BUF_SHIFT=14
-CONFIG_SYSFS_DEPRECATED_V2=y
# CONFIG_KALLSYMS is not set
CONFIG_SLAB=y
CONFIG_MODULES=y
diff --git a/arch/sh/configs/cayman_defconfig b/arch/sh/configs/cayman_defconfig
index b3bf11bcf025..67e150631ea5 100644
--- a/arch/sh/configs/cayman_defconfig
+++ b/arch/sh/configs/cayman_defconfig
@@ -1,7 +1,6 @@
CONFIG_EXPERIMENTAL=y
CONFIG_POSIX_MQUEUE=y
CONFIG_LOG_BUF_SHIFT=14
-CONFIG_SYSFS_DEPRECATED_V2=y
# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
CONFIG_SLAB=y
CONFIG_MODULES=y
diff --git a/arch/sh/configs/dreamcast_defconfig b/arch/sh/configs/dreamcast_defconfig
index 3cdee4f0c184..ec243ca29529 100644
--- a/arch/sh/configs/dreamcast_defconfig
+++ b/arch/sh/configs/dreamcast_defconfig
@@ -2,7 +2,6 @@ CONFIG_EXPERIMENTAL=y
CONFIG_SYSVIPC=y
CONFIG_BSD_PROCESS_ACCT=y
CONFIG_LOG_BUF_SHIFT=14
-CONFIG_SYSFS_DEPRECATED_V2=y
# CONFIG_SYSCTL_SYSCALL is not set
CONFIG_SLAB=y
CONFIG_PROFILING=y
diff --git a/arch/sh/configs/ecovec24-romimage_defconfig b/arch/sh/configs/ecovec24-romimage_defconfig
index 021633b02835..5fcb17bff24a 100644
--- a/arch/sh/configs/ecovec24-romimage_defconfig
+++ b/arch/sh/configs/ecovec24-romimage_defconfig
@@ -5,7 +5,6 @@ CONFIG_BSD_PROCESS_ACCT=y
CONFIG_IKCONFIG=y
CONFIG_IKCONFIG_PROC=y
CONFIG_LOG_BUF_SHIFT=14
-CONFIG_SYSFS_DEPRECATED_V2=y
CONFIG_BLK_DEV_INITRD=y
# CONFIG_KALLSYMS is not set
CONFIG_SLAB=y
diff --git a/arch/sh/configs/edosk7760_defconfig b/arch/sh/configs/edosk7760_defconfig
index 365f2318e9b5..e1077a041ac3 100644
--- a/arch/sh/configs/edosk7760_defconfig
+++ b/arch/sh/configs/edosk7760_defconfig
@@ -5,7 +5,6 @@ CONFIG_POSIX_MQUEUE=y
CONFIG_BSD_PROCESS_ACCT=y
CONFIG_IKCONFIG=y
CONFIG_IKCONFIG_PROC=y
-CONFIG_SYSFS_DEPRECATED_V2=y
CONFIG_BLK_DEV_INITRD=y
CONFIG_KALLSYMS_ALL=y
CONFIG_MODULES=y
diff --git a/arch/sh/configs/espt_defconfig b/arch/sh/configs/espt_defconfig
index ca7fc1b3d567..67cb1094a033 100644
--- a/arch/sh/configs/espt_defconfig
+++ b/arch/sh/configs/espt_defconfig
@@ -3,7 +3,6 @@ CONFIG_SYSVIPC=y
CONFIG_IKCONFIG=y
CONFIG_IKCONFIG_PROC=y
CONFIG_LOG_BUF_SHIFT=14
-CONFIG_SYSFS_DEPRECATED_V2=y
CONFIG_NAMESPACES=y
CONFIG_UTS_NS=y
CONFIG_IPC_NS=y
diff --git a/arch/sh/configs/hp6xx_defconfig b/arch/sh/configs/hp6xx_defconfig
index 45c18a3830d2..496edcdf95a3 100644
--- a/arch/sh/configs/hp6xx_defconfig
+++ b/arch/sh/configs/hp6xx_defconfig
@@ -3,7 +3,6 @@ CONFIG_BSD_PROCESS_ACCT=y
CONFIG_IKCONFIG=y
CONFIG_IKCONFIG_PROC=y
CONFIG_LOG_BUF_SHIFT=14
-CONFIG_SYSFS_DEPRECATED_V2=y
# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
# CONFIG_SYSCTL_SYSCALL is not set
CONFIG_SLAB=y
diff --git a/arch/sh/configs/kfr2r09-romimage_defconfig b/arch/sh/configs/kfr2r09-romimage_defconfig
index d4268b1953bc..029a506ca325 100644
--- a/arch/sh/configs/kfr2r09-romimage_defconfig
+++ b/arch/sh/configs/kfr2r09-romimage_defconfig
@@ -5,7 +5,6 @@ CONFIG_BSD_PROCESS_ACCT=y
CONFIG_IKCONFIG=y
CONFIG_IKCONFIG_PROC=y
CONFIG_LOG_BUF_SHIFT=14
-CONFIG_SYSFS_DEPRECATED_V2=y
CONFIG_BLK_DEV_INITRD=y
# CONFIG_KALLSYMS is not set
CONFIG_SLAB=y
diff --git a/arch/sh/configs/kfr2r09_defconfig b/arch/sh/configs/kfr2r09_defconfig
index ad5d296b375f..fac13ded07b2 100644
--- a/arch/sh/configs/kfr2r09_defconfig
+++ b/arch/sh/configs/kfr2r09_defconfig
@@ -5,7 +5,6 @@ CONFIG_BSD_PROCESS_ACCT=y
CONFIG_IKCONFIG=y
CONFIG_IKCONFIG_PROC=y
CONFIG_LOG_BUF_SHIFT=14
-CONFIG_SYSFS_DEPRECATED_V2=y
CONFIG_BLK_DEV_INITRD=y
# CONFIG_KALLSYMS is not set
CONFIG_SLAB=y
diff --git a/arch/sh/configs/landisk_defconfig b/arch/sh/configs/landisk_defconfig
index 14e658e9318f..3670e937f2b7 100644
--- a/arch/sh/configs/landisk_defconfig
+++ b/arch/sh/configs/landisk_defconfig
@@ -1,7 +1,6 @@
CONFIG_EXPERIMENTAL=y
CONFIG_SYSVIPC=y
CONFIG_LOG_BUF_SHIFT=14
-CONFIG_SYSFS_DEPRECATED_V2=y
# CONFIG_SYSCTL_SYSCALL is not set
CONFIG_KALLSYMS_EXTRA_PASS=y
CONFIG_SLAB=y
diff --git a/arch/sh/configs/lboxre2_defconfig b/arch/sh/configs/lboxre2_defconfig
index 6be7eaaa8bb6..e3c0894b1bb4 100644
--- a/arch/sh/configs/lboxre2_defconfig
+++ b/arch/sh/configs/lboxre2_defconfig
@@ -1,7 +1,6 @@
CONFIG_EXPERIMENTAL=y
CONFIG_SYSVIPC=y
CONFIG_LOG_BUF_SHIFT=14
-CONFIG_SYSFS_DEPRECATED_V2=y
# CONFIG_SYSCTL_SYSCALL is not set
CONFIG_KALLSYMS_EXTRA_PASS=y
CONFIG_SLAB=y
diff --git a/arch/sh/configs/magicpanelr2_defconfig b/arch/sh/configs/magicpanelr2_defconfig
index 4d61b7711b40..9479872b1ae6 100644
--- a/arch/sh/configs/magicpanelr2_defconfig
+++ b/arch/sh/configs/magicpanelr2_defconfig
@@ -5,7 +5,6 @@ CONFIG_POSIX_MQUEUE=y
CONFIG_BSD_PROCESS_ACCT=y
CONFIG_BSD_PROCESS_ACCT_V3=y
CONFIG_AUDIT=y
-CONFIG_SYSFS_DEPRECATED_V2=y
CONFIG_RELAY=y
CONFIG_BLK_DEV_INITRD=y
# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
diff --git a/arch/sh/configs/microdev_defconfig b/arch/sh/configs/microdev_defconfig
index 0e32a24fed53..f1d2e1b5ee41 100644
--- a/arch/sh/configs/microdev_defconfig
+++ b/arch/sh/configs/microdev_defconfig
@@ -1,7 +1,6 @@
CONFIG_EXPERIMENTAL=y
CONFIG_BSD_PROCESS_ACCT=y
CONFIG_LOG_BUF_SHIFT=14
-CONFIG_SYSFS_DEPRECATED_V2=y
CONFIG_BLK_DEV_INITRD=y
# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
# CONFIG_SYSCTL_SYSCALL is not set
diff --git a/arch/sh/configs/migor_defconfig b/arch/sh/configs/migor_defconfig
index c19fcdfdee37..9ad904a110de 100644
--- a/arch/sh/configs/migor_defconfig
+++ b/arch/sh/configs/migor_defconfig
@@ -3,7 +3,6 @@ CONFIG_SYSVIPC=y
CONFIG_IKCONFIG=y
CONFIG_IKCONFIG_PROC=y
CONFIG_LOG_BUF_SHIFT=14
-CONFIG_SYSFS_DEPRECATED_V2=y
CONFIG_BLK_DEV_INITRD=y
# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
# CONFIG_SYSCTL_SYSCALL is not set
diff --git a/arch/sh/configs/polaris_defconfig b/arch/sh/configs/polaris_defconfig
index 984e3fe1ce5d..f3d5d9f76310 100644
--- a/arch/sh/configs/polaris_defconfig
+++ b/arch/sh/configs/polaris_defconfig
@@ -7,7 +7,6 @@ CONFIG_BSD_PROCESS_ACCT=y
CONFIG_BSD_PROCESS_ACCT_V3=y
CONFIG_AUDIT=y
CONFIG_LOG_BUF_SHIFT=14
-CONFIG_SYSFS_DEPRECATED_V2=y
# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
CONFIG_SLAB=y
CONFIG_MODULES=y
diff --git a/arch/sh/configs/r7780mp_defconfig b/arch/sh/configs/r7780mp_defconfig
index e8b5472e6d84..920b8471ceb7 100644
--- a/arch/sh/configs/r7780mp_defconfig
+++ b/arch/sh/configs/r7780mp_defconfig
@@ -4,7 +4,6 @@ CONFIG_BSD_PROCESS_ACCT=y
CONFIG_IKCONFIG=y
CONFIG_IKCONFIG_PROC=y
CONFIG_LOG_BUF_SHIFT=14
-CONFIG_SYSFS_DEPRECATED_V2=y
# CONFIG_SYSCTL_SYSCALL is not set
# CONFIG_FUTEX is not set
# CONFIG_EPOLL is not set
diff --git a/arch/sh/configs/r7785rp_defconfig b/arch/sh/configs/r7785rp_defconfig
index fd8848060982..c77da6be06b8 100644
--- a/arch/sh/configs/r7785rp_defconfig
+++ b/arch/sh/configs/r7785rp_defconfig
@@ -8,7 +8,6 @@ CONFIG_RCU_TRACE=y
CONFIG_IKCONFIG=y
CONFIG_IKCONFIG_PROC=y
CONFIG_LOG_BUF_SHIFT=14
-CONFIG_SYSFS_DEPRECATED_V2=y
# CONFIG_SYSCTL_SYSCALL is not set
CONFIG_SLAB=y
CONFIG_PROFILING=y
diff --git a/arch/sh/configs/rts7751r2d1_defconfig b/arch/sh/configs/rts7751r2d1_defconfig
index a42f7c22ca1a..a3d081095ce2 100644
--- a/arch/sh/configs/rts7751r2d1_defconfig
+++ b/arch/sh/configs/rts7751r2d1_defconfig
@@ -1,7 +1,6 @@
CONFIG_EXPERIMENTAL=y
CONFIG_SYSVIPC=y
CONFIG_LOG_BUF_SHIFT=14
-CONFIG_SYSFS_DEPRECATED_V2=y
# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
# CONFIG_SYSCTL_SYSCALL is not set
CONFIG_SLAB=y
diff --git a/arch/sh/configs/rts7751r2dplus_defconfig b/arch/sh/configs/rts7751r2dplus_defconfig
index 742aa61f2427..b1a04f3c598b 100644
--- a/arch/sh/configs/rts7751r2dplus_defconfig
+++ b/arch/sh/configs/rts7751r2dplus_defconfig
@@ -1,7 +1,6 @@
CONFIG_EXPERIMENTAL=y
CONFIG_SYSVIPC=y
CONFIG_LOG_BUF_SHIFT=14
-CONFIG_SYSFS_DEPRECATED_V2=y
# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
# CONFIG_SYSCTL_SYSCALL is not set
CONFIG_SLAB=y
diff --git a/arch/sh/configs/sdk7780_defconfig b/arch/sh/configs/sdk7780_defconfig
index aed394d89346..ae1115849dda 100644
--- a/arch/sh/configs/sdk7780_defconfig
+++ b/arch/sh/configs/sdk7780_defconfig
@@ -6,7 +6,6 @@ CONFIG_BSD_PROCESS_ACCT=y
CONFIG_IKCONFIG=y
CONFIG_IKCONFIG_PROC=y
CONFIG_LOG_BUF_SHIFT=18
-CONFIG_SYSFS_DEPRECATED_V2=y
CONFIG_RELAY=y
CONFIG_KALLSYMS_ALL=y
CONFIG_MODULES=y
diff --git a/arch/sh/configs/se7343_defconfig b/arch/sh/configs/se7343_defconfig
index 7a7e13853cfd..be9c474197b3 100644
--- a/arch/sh/configs/se7343_defconfig
+++ b/arch/sh/configs/se7343_defconfig
@@ -3,7 +3,6 @@ CONFIG_EXPERIMENTAL=y
CONFIG_SYSVIPC=y
CONFIG_POSIX_MQUEUE=y
CONFIG_LOG_BUF_SHIFT=14
-CONFIG_SYSFS_DEPRECATED_V2=y
# CONFIG_SYSCTL_SYSCALL is not set
# CONFIG_FUTEX is not set
# CONFIG_EPOLL is not set
diff --git a/arch/sh/configs/se7712_defconfig b/arch/sh/configs/se7712_defconfig
index 3620a7f4c821..1248635e4f88 100644
--- a/arch/sh/configs/se7712_defconfig
+++ b/arch/sh/configs/se7712_defconfig
@@ -5,7 +5,6 @@ CONFIG_SYSVIPC=y
CONFIG_POSIX_MQUEUE=y
CONFIG_BSD_PROCESS_ACCT=y
CONFIG_LOG_BUF_SHIFT=14
-CONFIG_SYSFS_DEPRECATED_V2=y
# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
CONFIG_KALLSYMS_ALL=y
# CONFIG_BUG is not set
diff --git a/arch/sh/configs/se7721_defconfig b/arch/sh/configs/se7721_defconfig
index fe22f599c0cb..c3ba6e8a9818 100644
--- a/arch/sh/configs/se7721_defconfig
+++ b/arch/sh/configs/se7721_defconfig
@@ -5,7 +5,6 @@ CONFIG_SYSVIPC=y
CONFIG_POSIX_MQUEUE=y
CONFIG_BSD_PROCESS_ACCT=y
CONFIG_LOG_BUF_SHIFT=14
-CONFIG_SYSFS_DEPRECATED_V2=y
# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
CONFIG_KALLSYMS_ALL=y
# CONFIG_BUG is not set
diff --git a/arch/sh/configs/se7722_defconfig b/arch/sh/configs/se7722_defconfig
index b9b64c38810e..ae998c7e2ee0 100644
--- a/arch/sh/configs/se7722_defconfig
+++ b/arch/sh/configs/se7722_defconfig
@@ -4,7 +4,6 @@ CONFIG_BSD_PROCESS_ACCT=y
CONFIG_IKCONFIG=y
CONFIG_IKCONFIG_PROC=y
CONFIG_LOG_BUF_SHIFT=14
-CONFIG_SYSFS_DEPRECATED_V2=y
CONFIG_BLK_DEV_INITRD=y
CONFIG_PROFILING=y
CONFIG_MODULES=y
diff --git a/arch/sh/configs/se7724_defconfig b/arch/sh/configs/se7724_defconfig
index 03e736781c2e..ed35093e3758 100644
--- a/arch/sh/configs/se7724_defconfig
+++ b/arch/sh/configs/se7724_defconfig
@@ -3,7 +3,6 @@ CONFIG_EXPERIMENTAL=y
CONFIG_SYSVIPC=y
CONFIG_BSD_PROCESS_ACCT=y
CONFIG_LOG_BUF_SHIFT=14
-CONFIG_SYSFS_DEPRECATED_V2=y
# CONFIG_KALLSYMS is not set
CONFIG_SLAB=y
CONFIG_MODULES=y
diff --git a/arch/sh/configs/se7750_defconfig b/arch/sh/configs/se7750_defconfig
index 1a686b6d5cd4..912c98590e22 100644
--- a/arch/sh/configs/se7750_defconfig
+++ b/arch/sh/configs/se7750_defconfig
@@ -5,7 +5,6 @@ CONFIG_BSD_PROCESS_ACCT=y
CONFIG_IKCONFIG=y
CONFIG_IKCONFIG_PROC=y
CONFIG_LOG_BUF_SHIFT=14
-CONFIG_SYSFS_DEPRECATED_V2=y
# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
# CONFIG_SYSCTL_SYSCALL is not set
# CONFIG_HOTPLUG is not set
diff --git a/arch/sh/configs/se7751_defconfig b/arch/sh/configs/se7751_defconfig
index 7e03451a9fad..75c92fc1876b 100644
--- a/arch/sh/configs/se7751_defconfig
+++ b/arch/sh/configs/se7751_defconfig
@@ -2,7 +2,6 @@ CONFIG_EXPERIMENTAL=y
CONFIG_SYSVIPC=y
CONFIG_BSD_PROCESS_ACCT=y
CONFIG_LOG_BUF_SHIFT=14
-CONFIG_SYSFS_DEPRECATED_V2=y
CONFIG_BLK_DEV_INITRD=y
# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
# CONFIG_SYSCTL_SYSCALL is not set
diff --git a/arch/sh/configs/se7780_defconfig b/arch/sh/configs/se7780_defconfig
index 4cfc4deff135..c8c5e7f7a68d 100644
--- a/arch/sh/configs/se7780_defconfig
+++ b/arch/sh/configs/se7780_defconfig
@@ -3,7 +3,6 @@ CONFIG_SYSVIPC=y
CONFIG_IKCONFIG=y
CONFIG_IKCONFIG_PROC=y
CONFIG_LOG_BUF_SHIFT=14
-CONFIG_SYSFS_DEPRECATED_V2=y
# CONFIG_KALLSYMS is not set
# CONFIG_HOTPLUG is not set
# CONFIG_EPOLL is not set
diff --git a/arch/sh/configs/snapgear_defconfig b/arch/sh/configs/secureedge5410_defconfig
index f38c98341f15..7eae4e59d7f0 100644
--- a/arch/sh/configs/snapgear_defconfig
+++ b/arch/sh/configs/secureedge5410_defconfig
@@ -1,7 +1,6 @@
CONFIG_EXPERIMENTAL=y
# CONFIG_SWAP is not set
CONFIG_LOG_BUF_SHIFT=14
-CONFIG_SYSFS_DEPRECATED_V2=y
CONFIG_BLK_DEV_INITRD=y
# CONFIG_SYSCTL_SYSCALL is not set
# CONFIG_HOTPLUG is not set
diff --git a/arch/sh/configs/sh03_defconfig b/arch/sh/configs/sh03_defconfig
index b95dc76b04c1..2051821724c6 100644
--- a/arch/sh/configs/sh03_defconfig
+++ b/arch/sh/configs/sh03_defconfig
@@ -3,7 +3,6 @@ CONFIG_SYSVIPC=y
CONFIG_POSIX_MQUEUE=y
CONFIG_BSD_PROCESS_ACCT=y
CONFIG_LOG_BUF_SHIFT=14
-CONFIG_SYSFS_DEPRECATED_V2=y
CONFIG_BLK_DEV_INITRD=y
# CONFIG_SYSCTL_SYSCALL is not set
CONFIG_SLAB=y
diff --git a/arch/sh/configs/sh2007_defconfig b/arch/sh/configs/sh2007_defconfig
new file mode 100644
index 000000000000..0d2f41472a19
--- /dev/null
+++ b/arch/sh/configs/sh2007_defconfig
@@ -0,0 +1,212 @@
+CONFIG_EXPERIMENTAL=y
+# CONFIG_LOCALVERSION_AUTO is not set
+CONFIG_SYSVIPC=y
+CONFIG_POSIX_MQUEUE=y
+CONFIG_BSD_PROCESS_ACCT=y
+CONFIG_AUDIT=y
+CONFIG_AUDITSYSCALL=y
+CONFIG_IKCONFIG=y
+CONFIG_LOG_BUF_SHIFT=14
+# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
+CONFIG_KALLSYMS_ALL=y
+CONFIG_SLAB=y
+# CONFIG_BLK_DEV_BSG is not set
+CONFIG_CPU_SUBTYPE_SH7780=y
+CONFIG_MEMORY_SIZE=0x08000000
+# CONFIG_VSYSCALL is not set
+CONFIG_FLATMEM_MANUAL=y
+CONFIG_SH_SH2007=y
+CONFIG_HIGH_RES_TIMERS=y
+CONFIG_SH_DMA=y
+CONFIG_SH_DMA_API=y
+CONFIG_NR_DMA_CHANNELS_BOOL=y
+CONFIG_HZ_100=y
+CONFIG_CMDLINE_OVERWRITE=y
+CONFIG_CMDLINE="console=ttySC1,115200 ip=dhcp root=/dev/nfs rw nfsroot=/nfs/rootfs,rsize=1024,wsize=1024 earlyprintk=sh-sci.1"
+CONFIG_PCCARD=y
+CONFIG_BINFMT_MISC=y
+CONFIG_PACKET=y
+CONFIG_UNIX=y
+CONFIG_XFRM_USER=y
+CONFIG_NET_KEY=y
+CONFIG_NET_KEY_MIGRATE=y
+CONFIG_INET=y
+CONFIG_IP_ADVANCED_ROUTER=y
+CONFIG_IP_MULTIPLE_TABLES=y
+CONFIG_IP_ROUTE_MULTIPATH=y
+CONFIG_IP_ROUTE_VERBOSE=y
+CONFIG_IP_PNP=y
+CONFIG_IP_PNP_DHCP=y
+CONFIG_NET_IPIP=y
+# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
+# CONFIG_INET_XFRM_MODE_TUNNEL is not set
+# CONFIG_INET_XFRM_MODE_BEET is not set
+# CONFIG_INET_LRO is not set
+# CONFIG_IPV6 is not set
+CONFIG_NETWORK_SECMARK=y
+CONFIG_NET_PKTGEN=y
+CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
+CONFIG_BLK_DEV_LOOP=y
+CONFIG_BLK_DEV_RAM=y
+CONFIG_CDROM_PKTCDVD=y
+# CONFIG_MISC_DEVICES is not set
+CONFIG_RAID_ATTRS=y
+CONFIG_SCSI=y
+CONFIG_SCSI_TGT=y
+CONFIG_BLK_DEV_SD=y
+CONFIG_BLK_DEV_SR=y
+CONFIG_CHR_DEV_SG=y
+CONFIG_SCSI_MULTI_LUN=y
+CONFIG_SCSI_CONSTANTS=y
+CONFIG_SCSI_LOGGING=y
+CONFIG_SCSI_SCAN_ASYNC=y
+CONFIG_SCSI_SPI_ATTRS=y
+CONFIG_SCSI_FC_ATTRS=y
+CONFIG_SCSI_ISCSI_ATTRS=y
+CONFIG_SCSI_SRP_ATTRS=y
+# CONFIG_SCSI_LOWLEVEL is not set
+CONFIG_NETDEVICES=y
+CONFIG_DUMMY=y
+CONFIG_EQUALIZER=y
+CONFIG_TUN=y
+CONFIG_VETH=y
+CONFIG_NET_ETHERNET=y
+CONFIG_SMSC911X=y
+# CONFIG_NETDEV_1000 is not set
+# CONFIG_NETDEV_10000 is not set
+# CONFIG_WLAN is not set
+CONFIG_INPUT_FF_MEMLESS=y
+# CONFIG_INPUT_MOUSEDEV is not set
+# CONFIG_INPUT_KEYBOARD is not set
+# CONFIG_INPUT_MOUSE is not set
+# CONFIG_SERIO is not set
+CONFIG_VT_HW_CONSOLE_BINDING=y
+# CONFIG_DEVKMEM is not set
+CONFIG_SERIAL_SH_SCI=y
+CONFIG_SERIAL_SH_SCI_CONSOLE=y
+# CONFIG_LEGACY_PTYS is not set
+# CONFIG_HWMON is not set
+CONFIG_WATCHDOG=y
+CONFIG_SH_WDT=y
+CONFIG_SSB=y
+CONFIG_FB=y
+CONFIG_BACKLIGHT_LCD_SUPPORT=y
+# CONFIG_LCD_CLASS_DEVICE is not set
+CONFIG_FRAMEBUFFER_CONSOLE=y
+CONFIG_FRAMEBUFFER_CONSOLE_DETECT_PRIMARY=y
+CONFIG_LOGO=y
+# CONFIG_HID_SUPPORT is not set
+CONFIG_USB=y
+CONFIG_USB_DEVICEFS=y
+# CONFIG_USB_DEVICE_CLASS is not set
+CONFIG_USB_MON=y
+CONFIG_NEW_LEDS=y
+CONFIG_LEDS_CLASS=y
+CONFIG_LEDS_TRIGGERS=y
+CONFIG_RTC_CLASS=y
+CONFIG_RTC_INTF_DEV_UIE_EMUL=y
+CONFIG_DMADEVICES=y
+CONFIG_TIMB_DMA=y
+CONFIG_EXT3_FS=y
+CONFIG_ISO9660_FS=y
+CONFIG_JOLIET=y
+CONFIG_ZISOFS=y
+CONFIG_UDF_FS=y
+CONFIG_MSDOS_FS=y
+CONFIG_VFAT_FS=y
+CONFIG_FAT_DEFAULT_CODEPAGE=932
+CONFIG_FAT_DEFAULT_IOCHARSET="ascii"
+CONFIG_PROC_KCORE=y
+CONFIG_TMPFS=y
+CONFIG_TMPFS_POSIX_ACL=y
+CONFIG_CONFIGFS_FS=y
+# CONFIG_MISC_FILESYSTEMS is not set
+CONFIG_NFS_FS=y
+CONFIG_NFS_V3=y
+CONFIG_NFS_V3_ACL=y
+CONFIG_NFS_V4=y
+CONFIG_ROOT_NFS=y
+CONFIG_NLS_DEFAULT="utf8"
+CONFIG_NLS_CODEPAGE_437=y
+CONFIG_NLS_CODEPAGE_737=y
+CONFIG_NLS_CODEPAGE_775=y
+CONFIG_NLS_CODEPAGE_850=y
+CONFIG_NLS_CODEPAGE_852=y
+CONFIG_NLS_CODEPAGE_855=y
+CONFIG_NLS_CODEPAGE_857=y
+CONFIG_NLS_CODEPAGE_860=y
+CONFIG_NLS_CODEPAGE_861=y
+CONFIG_NLS_CODEPAGE_862=y
+CONFIG_NLS_CODEPAGE_863=y
+CONFIG_NLS_CODEPAGE_864=y
+CONFIG_NLS_CODEPAGE_865=y
+CONFIG_NLS_CODEPAGE_866=y
+CONFIG_NLS_CODEPAGE_869=y
+CONFIG_NLS_CODEPAGE_936=y
+CONFIG_NLS_CODEPAGE_950=y
+CONFIG_NLS_CODEPAGE_932=y
+CONFIG_NLS_CODEPAGE_949=y
+CONFIG_NLS_CODEPAGE_874=y
+CONFIG_NLS_ISO8859_8=y
+CONFIG_NLS_CODEPAGE_1250=y
+CONFIG_NLS_CODEPAGE_1251=y
+CONFIG_NLS_ASCII=y
+CONFIG_NLS_ISO8859_1=y
+CONFIG_NLS_ISO8859_2=y
+CONFIG_NLS_ISO8859_3=y
+CONFIG_NLS_ISO8859_4=y
+CONFIG_NLS_ISO8859_5=y
+CONFIG_NLS_ISO8859_6=y
+CONFIG_NLS_ISO8859_7=y
+CONFIG_NLS_ISO8859_9=y
+CONFIG_NLS_ISO8859_13=y
+CONFIG_NLS_ISO8859_14=y
+CONFIG_NLS_ISO8859_15=y
+CONFIG_NLS_KOI8_R=y
+CONFIG_NLS_KOI8_U=y
+CONFIG_NLS_UTF8=y
+# CONFIG_ENABLE_WARN_DEPRECATED is not set
+# CONFIG_ENABLE_MUST_CHECK is not set
+CONFIG_DEBUG_FS=y
+CONFIG_DEBUG_KERNEL=y
+# CONFIG_DETECT_SOFTLOCKUP is not set
+# CONFIG_SCHED_DEBUG is not set
+CONFIG_DEBUG_INFO=y
+CONFIG_FRAME_POINTER=y
+# CONFIG_RCU_CPU_STALL_DETECTOR is not set
+CONFIG_SH_STANDARD_BIOS=y
+CONFIG_CRYPTO_NULL=y
+CONFIG_CRYPTO_AUTHENC=y
+CONFIG_CRYPTO_ECB=y
+CONFIG_CRYPTO_LRW=y
+CONFIG_CRYPTO_PCBC=y
+CONFIG_CRYPTO_XTS=y
+CONFIG_CRYPTO_HMAC=y
+CONFIG_CRYPTO_XCBC=y
+CONFIG_CRYPTO_MD4=y
+CONFIG_CRYPTO_MICHAEL_MIC=y
+CONFIG_CRYPTO_SHA1=y
+CONFIG_CRYPTO_SHA256=y
+CONFIG_CRYPTO_SHA512=y
+CONFIG_CRYPTO_TGR192=y
+CONFIG_CRYPTO_WP512=y
+CONFIG_CRYPTO_AES=y
+CONFIG_CRYPTO_ANUBIS=y
+CONFIG_CRYPTO_ARC4=y
+CONFIG_CRYPTO_BLOWFISH=y
+CONFIG_CRYPTO_CAMELLIA=y
+CONFIG_CRYPTO_CAST5=y
+CONFIG_CRYPTO_CAST6=y
+CONFIG_CRYPTO_FCRYPT=y
+CONFIG_CRYPTO_KHAZAD=y
+CONFIG_CRYPTO_SEED=y
+CONFIG_CRYPTO_SERPENT=y
+CONFIG_CRYPTO_TEA=y
+CONFIG_CRYPTO_TWOFISH=y
+CONFIG_CRYPTO_DEFLATE=y
+CONFIG_CRYPTO_LZO=y
+# CONFIG_CRYPTO_ANSI_CPRNG is not set
+# CONFIG_CRYPTO_HW is not set
+CONFIG_CRC_CCITT=y
+CONFIG_CRC16=y
+CONFIG_LIBCRC32C=y
diff --git a/arch/sh/configs/sh7710voipgw_defconfig b/arch/sh/configs/sh7710voipgw_defconfig
index b804641c8dd2..f92ad17cd629 100644
--- a/arch/sh/configs/sh7710voipgw_defconfig
+++ b/arch/sh/configs/sh7710voipgw_defconfig
@@ -3,7 +3,6 @@ CONFIG_EXPERIMENTAL=y
CONFIG_SYSVIPC=y
CONFIG_POSIX_MQUEUE=y
CONFIG_LOG_BUF_SHIFT=14
-CONFIG_SYSFS_DEPRECATED_V2=y
# CONFIG_SYSCTL_SYSCALL is not set
# CONFIG_FUTEX is not set
# CONFIG_EPOLL is not set
diff --git a/arch/sh/configs/sh7757lcr_defconfig b/arch/sh/configs/sh7757lcr_defconfig
new file mode 100644
index 000000000000..273f3fa198f7
--- /dev/null
+++ b/arch/sh/configs/sh7757lcr_defconfig
@@ -0,0 +1,85 @@
+CONFIG_EXPERIMENTAL=y
+# CONFIG_SWAP is not set
+CONFIG_SYSVIPC=y
+CONFIG_POSIX_MQUEUE=y
+CONFIG_BSD_PROCESS_ACCT=y
+CONFIG_TASKSTATS=y
+CONFIG_TASK_DELAY_ACCT=y
+CONFIG_TASK_XACCT=y
+CONFIG_TASK_IO_ACCOUNTING=y
+CONFIG_LOG_BUF_SHIFT=14
+CONFIG_BLK_DEV_INITRD=y
+# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
+# CONFIG_SYSCTL_SYSCALL is not set
+CONFIG_KALLSYMS_ALL=y
+CONFIG_SLAB=y
+CONFIG_MODULES=y
+CONFIG_MODULE_UNLOAD=y
+# CONFIG_BLK_DEV_BSG is not set
+CONFIG_CPU_SUBTYPE_SH7757=y
+CONFIG_MEMORY_START=0x40000000
+CONFIG_MEMORY_SIZE=0x0f000000
+CONFIG_PMB=y
+CONFIG_FLATMEM_MANUAL=y
+CONFIG_SH_SH7757LCR=y
+CONFIG_HEARTBEAT=y
+CONFIG_SECCOMP=y
+CONFIG_CMDLINE_OVERWRITE=y
+CONFIG_CMDLINE="console=ttySC2,115200 root=/dev/nfs ip=dhcp"
+CONFIG_NET=y
+CONFIG_PACKET=y
+CONFIG_UNIX=y
+CONFIG_INET=y
+CONFIG_IP_MULTICAST=y
+CONFIG_IP_PNP=y
+CONFIG_IP_PNP_DHCP=y
+# CONFIG_INET_LRO is not set
+CONFIG_IPV6=y
+# CONFIG_WIRELESS is not set
+CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
+# CONFIG_FW_LOADER is not set
+CONFIG_BLK_DEV_RAM=y
+# CONFIG_MISC_DEVICES is not set
+CONFIG_NETDEVICES=y
+CONFIG_PHYLIB=y
+CONFIG_VITESSE_PHY=y
+CONFIG_MDIO_BITBANG=y
+CONFIG_NET_ETHERNET=y
+CONFIG_MII=y
+# CONFIG_NETDEV_10000 is not set
+# CONFIG_WLAN is not set
+# CONFIG_KEYBOARD_ATKBD is not set
+# CONFIG_MOUSE_PS2 is not set
+# CONFIG_SERIO is not set
+CONFIG_SERIAL_8250=y
+CONFIG_SERIAL_8250_CONSOLE=y
+CONFIG_SERIAL_8250_NR_UARTS=2
+CONFIG_SERIAL_SH_SCI=y
+CONFIG_SERIAL_SH_SCI_NR_UARTS=3
+CONFIG_SERIAL_SH_SCI_CONSOLE=y
+# CONFIG_LEGACY_PTYS is not set
+# CONFIG_HW_RANDOM is not set
+# CONFIG_HWMON is not set
+# CONFIG_USB_SUPPORT is not set
+CONFIG_EXT2_FS=y
+CONFIG_EXT3_FS=y
+CONFIG_INOTIFY=y
+CONFIG_ISO9660_FS=y
+CONFIG_VFAT_FS=y
+CONFIG_PROC_KCORE=y
+CONFIG_TMPFS=y
+CONFIG_SQUASHFS=y
+CONFIG_MINIX_FS=y
+CONFIG_NFS_FS=y
+CONFIG_ROOT_NFS=y
+CONFIG_NLS_CODEPAGE_437=y
+CONFIG_NLS_CODEPAGE_932=y
+CONFIG_NLS_ISO8859_1=y
+CONFIG_DEBUG_KERNEL=y
+# CONFIG_DETECT_SOFTLOCKUP is not set
+# CONFIG_SCHED_DEBUG is not set
+# CONFIG_DEBUG_BUGVERBOSE is not set
+CONFIG_DEBUG_INFO=y
+# CONFIG_RCU_CPU_STALL_DETECTOR is not set
+# CONFIG_FTRACE is not set
+# CONFIG_CRYPTO_ANSI_CPRNG is not set
diff --git a/arch/sh/configs/sh7763rdp_defconfig b/arch/sh/configs/sh7763rdp_defconfig
index 361876786932..479536440264 100644
--- a/arch/sh/configs/sh7763rdp_defconfig
+++ b/arch/sh/configs/sh7763rdp_defconfig
@@ -3,7 +3,6 @@ CONFIG_SYSVIPC=y
CONFIG_IKCONFIG=y
CONFIG_IKCONFIG_PROC=y
CONFIG_LOG_BUF_SHIFT=14
-CONFIG_SYSFS_DEPRECATED_V2=y
CONFIG_NAMESPACES=y
CONFIG_UTS_NS=y
CONFIG_IPC_NS=y
diff --git a/arch/sh/configs/sh7785lcr_defconfig b/arch/sh/configs/sh7785lcr_defconfig
index ee6b81f7539e..51561f5677d8 100644
--- a/arch/sh/configs/sh7785lcr_defconfig
+++ b/arch/sh/configs/sh7785lcr_defconfig
@@ -4,7 +4,6 @@ CONFIG_BSD_PROCESS_ACCT=y
CONFIG_IKCONFIG=y
CONFIG_IKCONFIG_PROC=y
CONFIG_LOG_BUF_SHIFT=14
-CONFIG_SYSFS_DEPRECATED_V2=y
CONFIG_SLAB=y
CONFIG_PROFILING=y
CONFIG_MODULES=y
diff --git a/arch/sh/configs/shx3_defconfig b/arch/sh/configs/shx3_defconfig
index bb4f60c0f866..3f92d37c6374 100644
--- a/arch/sh/configs/shx3_defconfig
+++ b/arch/sh/configs/shx3_defconfig
@@ -15,7 +15,6 @@ CONFIG_CGROUP_DEVICE=y
CONFIG_CGROUP_CPUACCT=y
CONFIG_RESOURCE_COUNTERS=y
CONFIG_CGROUP_MEM_RES_CTLR=y
-CONFIG_SYSFS_DEPRECATED_V2=y
CONFIG_RELAY=y
CONFIG_NAMESPACES=y
CONFIG_UTS_NS=y
diff --git a/arch/sh/configs/systemh_defconfig b/arch/sh/configs/systemh_defconfig
deleted file mode 100644
index 7007d00c67e0..000000000000
--- a/arch/sh/configs/systemh_defconfig
+++ /dev/null
@@ -1,29 +0,0 @@
-CONFIG_EXPERIMENTAL=y
-CONFIG_LOG_BUF_SHIFT=14
-CONFIG_SYSFS_DEPRECATED_V2=y
-CONFIG_BLK_DEV_INITRD=y
-# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-# CONFIG_SYSCTL_SYSCALL is not set
-# CONFIG_HOTPLUG is not set
-CONFIG_SLAB=y
-CONFIG_MODULES=y
-CONFIG_MODULE_UNLOAD=y
-# CONFIG_BLK_DEV_BSG is not set
-CONFIG_CPU_SUBTYPE_SH7751R=y
-CONFIG_MEMORY_START=0x0c000000
-CONFIG_MEMORY_SIZE=0x00400000
-CONFIG_FLATMEM_MANUAL=y
-CONFIG_SH_7751_SYSTEMH=y
-CONFIG_PREEMPT=y
-# CONFIG_STANDALONE is not set
-CONFIG_BLK_DEV_RAM=y
-CONFIG_BLK_DEV_RAM_SIZE=1024
-# CONFIG_INPUT is not set
-# CONFIG_SERIO_SERPORT is not set
-# CONFIG_VT is not set
-CONFIG_HW_RANDOM=y
-CONFIG_PROC_KCORE=y
-CONFIG_TMPFS=y
-CONFIG_CRAMFS=y
-CONFIG_ROMFS_FS=y
-# CONFIG_RCU_CPU_STALL_DETECTOR is not set
diff --git a/arch/sh/configs/titan_defconfig b/arch/sh/configs/titan_defconfig
index 45c309ff447e..0f558914e760 100644
--- a/arch/sh/configs/titan_defconfig
+++ b/arch/sh/configs/titan_defconfig
@@ -5,7 +5,6 @@ CONFIG_POSIX_MQUEUE=y
CONFIG_IKCONFIG=y
CONFIG_IKCONFIG_PROC=y
CONFIG_LOG_BUF_SHIFT=16
-CONFIG_SYSFS_DEPRECATED_V2=y
CONFIG_BLK_DEV_INITRD=y
# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
# CONFIG_SYSCTL_SYSCALL is not set
diff --git a/arch/sh/configs/ul2_defconfig b/arch/sh/configs/ul2_defconfig
index e107d424acf0..2d288b887fbd 100644
--- a/arch/sh/configs/ul2_defconfig
+++ b/arch/sh/configs/ul2_defconfig
@@ -4,7 +4,6 @@ CONFIG_BSD_PROCESS_ACCT=y
CONFIG_IKCONFIG=y
CONFIG_IKCONFIG_PROC=y
CONFIG_LOG_BUF_SHIFT=14
-CONFIG_SYSFS_DEPRECATED_V2=y
CONFIG_BLK_DEV_INITRD=y
CONFIG_PROFILING=y
CONFIG_MODULES=y
diff --git a/arch/sh/drivers/dma/dma-api.c b/arch/sh/drivers/dma/dma-api.c
index 4a277224a871..f46848f088e4 100644
--- a/arch/sh/drivers/dma/dma-api.c
+++ b/arch/sh/drivers/dma/dma-api.c
@@ -412,8 +412,8 @@ EXPORT_SYMBOL(unregister_dmac);
static int __init dma_api_init(void)
{
printk(KERN_NOTICE "DMA: Registering DMA API.\n");
- create_proc_read_entry("dma", 0, 0, dma_read_proc, 0);
- return 0;
+ return create_proc_read_entry("dma", 0, 0, dma_read_proc, 0)
+ ? 0 : -ENOMEM;
}
subsys_initcall(dma_api_init);
diff --git a/arch/sh/drivers/pci/Makefile b/arch/sh/drivers/pci/Makefile
index 4a59e6890876..82f0a335fd19 100644
--- a/arch/sh/drivers/pci/Makefile
+++ b/arch/sh/drivers/pci/Makefile
@@ -19,6 +19,7 @@ obj-$(CONFIG_SH_RTS7751R2D) += fixups-rts7751r2d.o
obj-$(CONFIG_SH_SH03) += fixups-sh03.o
obj-$(CONFIG_SH_HIGHLANDER) += fixups-r7780rp.o
obj-$(CONFIG_SH_SH7785LCR) += fixups-r7780rp.o
+obj-$(CONFIG_SH_SDK7786) += fixups-sdk7786.o
obj-$(CONFIG_SH_SDK7780) += fixups-sdk7780.o
obj-$(CONFIG_SH_7780_SOLUTION_ENGINE) += fixups-sdk7780.o
obj-$(CONFIG_SH_TITAN) += fixups-titan.o
diff --git a/arch/sh/drivers/pci/fixups-sdk7786.c b/arch/sh/drivers/pci/fixups-sdk7786.c
new file mode 100644
index 000000000000..0e18ee332553
--- /dev/null
+++ b/arch/sh/drivers/pci/fixups-sdk7786.c
@@ -0,0 +1,67 @@
+/*
+ * SDK7786 FPGA PCIe mux handling
+ *
+ * Copyright (C) 2010 Paul Mundt
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#define pr_fmt(fmt) "PCI: " fmt
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/pci.h>
+#include <mach/fpga.h>
+
+/*
+ * The SDK7786 FPGA supports mangling of most of the slots in some way or
+ * another. Slots 3/4 are special in that only one can be supported at a
+ * time, and both appear on port 3 to the PCI bus scan. Enabling slot 4
+ * (the horizontal edge connector) will disable slot 3 entirely.
+ *
+ * Misconfigurations can be detected through the FPGA via the slot
+ * resistors to determine card presence. Hotplug remains unsupported.
+ */
+static unsigned int slot4en __devinitdata;
+
+char *__devinit pcibios_setup(char *str)
+{
+ if (strcmp(str, "slot4en") == 0) {
+ slot4en = 1;
+ return NULL;
+ }
+
+ return str;
+}
+
+static int __init sdk7786_pci_init(void)
+{
+ u16 data = fpga_read_reg(PCIECR);
+
+ /*
+ * Enable slot #4 if it's been specified on the command line.
+ *
+ * Optionally reroute if slot #4 has a card present while slot #3
+ * does not, regardless of command line value.
+ *
+ * Card presence is logically inverted.
+ */
+ slot4en ?: (!(data & PCIECR_PRST4) && (data & PCIECR_PRST3));
+ if (slot4en) {
+ pr_info("Activating PCIe slot#4 (disabling slot#3)\n");
+
+ data &= ~PCIECR_PCIEMUX1;
+ fpga_write_reg(data, PCIECR);
+
+ /* Warn about forced rerouting if slot#3 is occupied */
+ if ((data & PCIECR_PRST3) == 0) {
+ pr_warning("Unreachable card detected in slot#3\n");
+ return -EBUSY;
+ }
+ } else
+ pr_info("PCIe slot#4 disabled\n");
+
+ return 0;
+}
+postcore_initcall(sdk7786_pci_init);
diff --git a/arch/sh/drivers/pci/ops-sh4.c b/arch/sh/drivers/pci/ops-sh4.c
index 0b81999fb88b..b6234203e0ac 100644
--- a/arch/sh/drivers/pci/ops-sh4.c
+++ b/arch/sh/drivers/pci/ops-sh4.c
@@ -9,6 +9,7 @@
*/
#include <linux/pci.h>
#include <linux/io.h>
+#include <linux/spinlock.h>
#include <asm/addrspace.h>
#include "pci-sh4.h"
@@ -18,8 +19,6 @@
#define CONFIG_CMD(bus, devfn, where) \
(0x80000000 | (bus->number << 16) | (devfn << 8) | (where & ~3))
-static DEFINE_SPINLOCK(sh4_pci_lock);
-
/*
* Functions for accessing PCI configuration space with type 1 accesses
*/
@@ -34,10 +33,10 @@ static int sh4_pci_read(struct pci_bus *bus, unsigned int devfn,
* PCIPDR may only be accessed as 32 bit words,
* so we must do byte alignment by hand
*/
- spin_lock_irqsave(&sh4_pci_lock, flags);
+ raw_spin_lock_irqsave(&pci_config_lock, flags);
pci_write_reg(chan, CONFIG_CMD(bus, devfn, where), SH4_PCIPAR);
data = pci_read_reg(chan, SH4_PCIPDR);
- spin_unlock_irqrestore(&sh4_pci_lock, flags);
+ raw_spin_unlock_irqrestore(&pci_config_lock, flags);
switch (size) {
case 1:
@@ -69,10 +68,10 @@ static int sh4_pci_write(struct pci_bus *bus, unsigned int devfn,
int shift;
u32 data;
- spin_lock_irqsave(&sh4_pci_lock, flags);
+ raw_spin_lock_irqsave(&pci_config_lock, flags);
pci_write_reg(chan, CONFIG_CMD(bus, devfn, where), SH4_PCIPAR);
data = pci_read_reg(chan, SH4_PCIPDR);
- spin_unlock_irqrestore(&sh4_pci_lock, flags);
+ raw_spin_unlock_irqrestore(&pci_config_lock, flags);
switch (size) {
case 1:
diff --git a/arch/sh/drivers/pci/ops-sh7786.c b/arch/sh/drivers/pci/ops-sh7786.c
index 48f594b9582b..128421009e3f 100644
--- a/arch/sh/drivers/pci/ops-sh7786.c
+++ b/arch/sh/drivers/pci/ops-sh7786.c
@@ -1,7 +1,7 @@
/*
* Generic SH7786 PCI-Express operations.
*
- * Copyright (C) 2009 Paul Mundt
+ * Copyright (C) 2009 - 2010 Paul Mundt
*
* This file is subject to the terms and conditions of the GNU General Public
* License v2. See the file "COPYING" in the main directory of this archive
@@ -19,37 +19,72 @@ enum {
PCI_ACCESS_WRITE,
};
-static DEFINE_SPINLOCK(sh7786_pcie_lock);
-
static int sh7786_pcie_config_access(unsigned char access_type,
struct pci_bus *bus, unsigned int devfn, int where, u32 *data)
{
struct pci_channel *chan = bus->sysdata;
- int dev, func;
+ int dev, func, type, reg;
dev = PCI_SLOT(devfn);
func = PCI_FUNC(devfn);
+ type = !!bus->parent;
+ reg = where & ~3;
if (bus->number > 255 || dev > 31 || func > 7)
return PCIBIOS_FUNC_NOT_SUPPORTED;
- if (devfn)
- return PCIBIOS_DEVICE_NOT_FOUND;
+
+ /*
+ * While each channel has its own memory-mapped extended config
+ * space, it's generally only accessible when in endpoint mode.
+ * When in root complex mode, the controller is unable to target
+ * itself with either type 0 or type 1 accesses, and indeed, any
+ * controller initiated target transfer to its own config space
+ * result in a completer abort.
+ *
+ * Each channel effectively only supports a single device, but as
+ * the same channel <-> device access works for any PCI_SLOT()
+ * value, we cheat a bit here and bind the controller's config
+ * space to devfn 0 in order to enable self-enumeration. In this
+ * case the regular PAR/PDR path is sidelined and the mangled
+ * config access itself is initiated as a SuperHyway transaction.
+ */
+ if (pci_is_root_bus(bus)) {
+ if (dev == 0) {
+ if (access_type == PCI_ACCESS_READ)
+ *data = pci_read_reg(chan, PCI_REG(reg));
+ else
+ pci_write_reg(chan, *data, PCI_REG(reg));
+
+ return PCIBIOS_SUCCESSFUL;
+ } else if (dev > 1)
+ return PCIBIOS_DEVICE_NOT_FOUND;
+ }
+
+ /* Clear errors */
+ pci_write_reg(chan, pci_read_reg(chan, SH4A_PCIEERRFR), SH4A_PCIEERRFR);
/* Set the PIO address */
pci_write_reg(chan, (bus->number << 24) | (dev << 19) |
- (func << 16) | (where & ~3), SH4A_PCIEPAR);
+ (func << 16) | reg, SH4A_PCIEPAR);
/* Enable the configuration access */
- pci_write_reg(chan, (1 << 31), SH4A_PCIEPCTLR);
+ pci_write_reg(chan, (1 << 31) | (type << 8), SH4A_PCIEPCTLR);
+
+ /* Check for errors */
+ if (pci_read_reg(chan, SH4A_PCIEERRFR) & 0x10)
+ return PCIBIOS_DEVICE_NOT_FOUND;
+
+ /* Check for master and target aborts */
+ if (pci_read_reg(chan, SH4A_PCIEPCICONF1) & ((1 << 29) | (1 << 28)))
+ return PCIBIOS_DEVICE_NOT_FOUND;
if (access_type == PCI_ACCESS_READ)
*data = pci_read_reg(chan, SH4A_PCIEPDR);
else
pci_write_reg(chan, *data, SH4A_PCIEPDR);
- /* Check for master and target aborts */
- if (pci_read_reg(chan, SH4A_PCIEPCICONF1) & ((1 << 29) | (1 << 28)))
- return PCIBIOS_DEVICE_NOT_FOUND;
+ /* Disable the configuration access */
+ pci_write_reg(chan, 0, SH4A_PCIEPCTLR);
return PCIBIOS_SUCCESSFUL;
}
@@ -66,11 +101,13 @@ static int sh7786_pcie_read(struct pci_bus *bus, unsigned int devfn,
else if ((size == 4) && (where & 3))
return PCIBIOS_BAD_REGISTER_NUMBER;
- spin_lock_irqsave(&sh7786_pcie_lock, flags);
+ raw_spin_lock_irqsave(&pci_config_lock, flags);
ret = sh7786_pcie_config_access(PCI_ACCESS_READ, bus,
devfn, where, &data);
- if (ret != PCIBIOS_SUCCESSFUL)
+ if (ret != PCIBIOS_SUCCESSFUL) {
+ *val = 0xffffffff;
goto out;
+ }
if (size == 1)
*val = (data >> ((where & 3) << 3)) & 0xff;
@@ -84,7 +121,7 @@ static int sh7786_pcie_read(struct pci_bus *bus, unsigned int devfn,
devfn, where, size, (unsigned long)*val);
out:
- spin_unlock_irqrestore(&sh7786_pcie_lock, flags);
+ raw_spin_unlock_irqrestore(&pci_config_lock, flags);
return ret;
}
@@ -100,7 +137,7 @@ static int sh7786_pcie_write(struct pci_bus *bus, unsigned int devfn,
else if ((size == 4) && (where & 3))
return PCIBIOS_BAD_REGISTER_NUMBER;
- spin_lock_irqsave(&sh7786_pcie_lock, flags);
+ raw_spin_lock_irqsave(&pci_config_lock, flags);
ret = sh7786_pcie_config_access(PCI_ACCESS_READ, bus,
devfn, where, &data);
if (ret != PCIBIOS_SUCCESSFUL)
@@ -124,7 +161,7 @@ static int sh7786_pcie_write(struct pci_bus *bus, unsigned int devfn,
ret = sh7786_pcie_config_access(PCI_ACCESS_WRITE, bus,
devfn, where, &data);
out:
- spin_unlock_irqrestore(&sh7786_pcie_lock, flags);
+ raw_spin_unlock_irqrestore(&pci_config_lock, flags);
return ret;
}
diff --git a/arch/sh/drivers/pci/pci-sh7751.c b/arch/sh/drivers/pci/pci-sh7751.c
index f98141b3b7d7..86adb1e235cd 100644
--- a/arch/sh/drivers/pci/pci-sh7751.c
+++ b/arch/sh/drivers/pci/pci-sh7751.c
@@ -81,7 +81,7 @@ static int __init sh7751_pci_init(void)
unsigned int id;
u32 word, reg;
- printk(KERN_NOTICE "PCI: Starting intialization.\n");
+ printk(KERN_NOTICE "PCI: Starting initialization.\n");
chan->reg_base = 0xfe200000;
diff --git a/arch/sh/drivers/pci/pci-sh7780.c b/arch/sh/drivers/pci/pci-sh7780.c
index ffdcbf10b95e..edb7cca14882 100644
--- a/arch/sh/drivers/pci/pci-sh7780.c
+++ b/arch/sh/drivers/pci/pci-sh7780.c
@@ -246,7 +246,7 @@ static int __init sh7780_pci_init(void)
const char *type;
int ret, i;
- printk(KERN_NOTICE "PCI: Starting intialization.\n");
+ printk(KERN_NOTICE "PCI: Starting initialization.\n");
chan->reg_base = 0xfe040000;
diff --git a/arch/sh/drivers/pci/pci-sh7780.h b/arch/sh/drivers/pci/pci-sh7780.h
index 205dcbefe275..1742e2c9db7a 100644
--- a/arch/sh/drivers/pci/pci-sh7780.h
+++ b/arch/sh/drivers/pci/pci-sh7780.h
@@ -12,12 +12,6 @@
#ifndef _PCI_SH7780_H_
#define _PCI_SH7780_H_
-#define PCI_VENDOR_ID_RENESAS 0x1912
-#define PCI_DEVICE_ID_RENESAS_SH7781 0x0001
-#define PCI_DEVICE_ID_RENESAS_SH7780 0x0002
-#define PCI_DEVICE_ID_RENESAS_SH7763 0x0004
-#define PCI_DEVICE_ID_RENESAS_SH7785 0x0007
-
/* SH7780 Control Registers */
#define PCIECR 0xFE000008
#define PCIECR_ENBL 0x01
diff --git a/arch/sh/drivers/pci/pci.c b/arch/sh/drivers/pci/pci.c
index 1e9598d2bbf4..60ee09a4e121 100644
--- a/arch/sh/drivers/pci/pci.c
+++ b/arch/sh/drivers/pci/pci.c
@@ -19,6 +19,7 @@
#include <linux/dma-debug.h>
#include <linux/io.h>
#include <linux/mutex.h>
+#include <linux/spinlock.h>
unsigned long PCIBIOS_MIN_IO = 0x0000;
unsigned long PCIBIOS_MIN_MEM = 0;
@@ -56,6 +57,11 @@ static void __devinit pcibios_scanbus(struct pci_channel *hose)
}
}
+/*
+ * This interrupt-safe spinlock protects all accesses to PCI
+ * configuration space.
+ */
+DEFINE_RAW_SPINLOCK(pci_config_lock);
static DEFINE_MUTEX(pci_scan_mutex);
int __devinit register_pci_controller(struct pci_channel *hose)
@@ -233,40 +239,7 @@ void pcibios_bus_to_resource(struct pci_dev *dev, struct resource *res,
int pcibios_enable_device(struct pci_dev *dev, int mask)
{
- u16 cmd, old_cmd;
- int idx;
- struct resource *r;
-
- pci_read_config_word(dev, PCI_COMMAND, &cmd);
- old_cmd = cmd;
- for (idx=0; idx < PCI_NUM_RESOURCES; idx++) {
- /* Only set up the requested stuff */
- if (!(mask & (1<<idx)))
- continue;
-
- r = &dev->resource[idx];
- if (!(r->flags & (IORESOURCE_IO | IORESOURCE_MEM)))
- continue;
- if ((idx == PCI_ROM_RESOURCE) &&
- (!(r->flags & IORESOURCE_ROM_ENABLE)))
- continue;
- if (!r->start && r->end) {
- printk(KERN_ERR "PCI: Device %s not available "
- "because of resource collisions\n",
- pci_name(dev));
- return -EINVAL;
- }
- if (r->flags & IORESOURCE_IO)
- cmd |= PCI_COMMAND_IO;
- if (r->flags & IORESOURCE_MEM)
- cmd |= PCI_COMMAND_MEMORY;
- }
- if (cmd != old_cmd) {
- printk("PCI: Enabling device %s (%04x -> %04x)\n",
- pci_name(dev), old_cmd, cmd);
- pci_write_config_word(dev, PCI_COMMAND, cmd);
- }
- return 0;
+ return pci_enable_resources(dev, mask);
}
/*
@@ -295,7 +268,7 @@ void __init pcibios_update_irq(struct pci_dev *dev, int irq)
pci_write_config_byte(dev, PCI_INTERRUPT_LINE, irq);
}
-char * __devinit pcibios_setup(char *str)
+char * __devinit __weak pcibios_setup(char *str)
{
return str;
}
diff --git a/arch/sh/drivers/pci/pcie-sh7786.c b/arch/sh/drivers/pci/pcie-sh7786.c
index 68cb9b0ac9d2..96e9b058aa1d 100644
--- a/arch/sh/drivers/pci/pcie-sh7786.c
+++ b/arch/sh/drivers/pci/pcie-sh7786.c
@@ -13,11 +13,14 @@
#include <linux/io.h>
#include <linux/delay.h>
#include <linux/slab.h>
+#include <linux/clk.h>
+#include <linux/sh_clk.h>
#include "pcie-sh7786.h"
#include <asm/sizes.h>
struct sh7786_pcie_port {
struct pci_channel *hose;
+ struct clk *fclk, phy_clk;
unsigned int index;
int endpoint;
int link;
@@ -51,6 +54,7 @@ static struct resource sh7786_pci0_resources[] = {
.name = "PCIe0 MEM 2",
.start = 0xfe100000,
.end = 0xfe100000 + SZ_1M - 1,
+ .flags = IORESOURCE_MEM,
},
};
@@ -74,6 +78,7 @@ static struct resource sh7786_pci1_resources[] = {
.name = "PCIe1 MEM 2",
.start = 0xfe300000,
.end = 0xfe300000 + SZ_1M - 1,
+ .flags = IORESOURCE_MEM,
},
};
@@ -82,6 +87,7 @@ static struct resource sh7786_pci2_resources[] = {
.name = "PCIe2 IO",
.start = 0xfc800000,
.end = 0xfc800000 + SZ_4M - 1,
+ .flags = IORESOURCE_IO,
}, {
.name = "PCIe2 MEM 0",
.start = 0x80000000,
@@ -96,6 +102,7 @@ static struct resource sh7786_pci2_resources[] = {
.name = "PCIe2 MEM 2",
.start = 0xfcd00000,
.end = 0xfcd00000 + SZ_1M - 1,
+ .flags = IORESOURCE_MEM,
},
};
@@ -117,7 +124,29 @@ static struct pci_channel sh7786_pci_channels[] = {
DEFINE_CONTROLLER(0xfcc00000, 2),
};
-static int phy_wait_for_ack(struct pci_channel *chan)
+static struct clk fixed_pciexclkp = {
+ .rate = 100000000, /* 100 MHz reference clock */
+};
+
+static void __devinit sh7786_pci_fixup(struct pci_dev *dev)
+{
+ /*
+ * Prevent enumeration of root complex resources.
+ */
+ if (pci_is_root_bus(dev->bus) && dev->devfn == 0) {
+ int i;
+
+ for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
+ dev->resource[i].start = 0;
+ dev->resource[i].end = 0;
+ dev->resource[i].flags = 0;
+ }
+ }
+}
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_RENESAS, PCI_DEVICE_ID_RENESAS_SH7786,
+ sh7786_pci_fixup);
+
+static int __init phy_wait_for_ack(struct pci_channel *chan)
{
unsigned int timeout = 100;
@@ -131,7 +160,7 @@ static int phy_wait_for_ack(struct pci_channel *chan)
return -ETIMEDOUT;
}
-static int pci_wait_for_irq(struct pci_channel *chan, unsigned int mask)
+static int __init pci_wait_for_irq(struct pci_channel *chan, unsigned int mask)
{
unsigned int timeout = 100;
@@ -145,19 +174,14 @@ static int pci_wait_for_irq(struct pci_channel *chan, unsigned int mask)
return -ETIMEDOUT;
}
-static void phy_write_reg(struct pci_channel *chan, unsigned int addr,
- unsigned int lane, unsigned int data)
+static void __init phy_write_reg(struct pci_channel *chan, unsigned int addr,
+ unsigned int lane, unsigned int data)
{
- unsigned long phyaddr, ctrl;
+ unsigned long phyaddr;
phyaddr = (1 << BITS_CMD) + ((lane & 0xf) << BITS_LANE) +
((addr & 0xff) << BITS_ADR);
- /* Enable clock */
- ctrl = pci_read_reg(chan, SH4A_PCIEPHYCTLR);
- ctrl |= (1 << BITS_CKE);
- pci_write_reg(chan, ctrl, SH4A_PCIEPHYCTLR);
-
/* Set write data */
pci_write_reg(chan, data, SH4A_PCIEPHYDOUTR);
pci_write_reg(chan, phyaddr, SH4A_PCIEPHYADRR);
@@ -165,20 +189,74 @@ static void phy_write_reg(struct pci_channel *chan, unsigned int addr,
phy_wait_for_ack(chan);
/* Clear command */
+ pci_write_reg(chan, 0, SH4A_PCIEPHYDOUTR);
pci_write_reg(chan, 0, SH4A_PCIEPHYADRR);
phy_wait_for_ack(chan);
+}
- /* Disable clock */
- ctrl = pci_read_reg(chan, SH4A_PCIEPHYCTLR);
- ctrl &= ~(1 << BITS_CKE);
- pci_write_reg(chan, ctrl, SH4A_PCIEPHYCTLR);
+static int __init pcie_clk_init(struct sh7786_pcie_port *port)
+{
+ struct pci_channel *chan = port->hose;
+ struct clk *clk;
+ char fclk_name[16];
+ int ret;
+
+ /*
+ * First register the fixed clock
+ */
+ ret = clk_register(&fixed_pciexclkp);
+ if (unlikely(ret != 0))
+ return ret;
+
+ /*
+ * Grab the port's function clock, which the PHY clock depends
+ * on. clock lookups don't help us much at this point, since no
+ * dev_id is available this early. Lame.
+ */
+ snprintf(fclk_name, sizeof(fclk_name), "pcie%d_fck", port->index);
+
+ port->fclk = clk_get(NULL, fclk_name);
+ if (IS_ERR(port->fclk)) {
+ ret = PTR_ERR(port->fclk);
+ goto err_fclk;
+ }
+
+ clk_enable(port->fclk);
+
+ /*
+ * And now, set up the PHY clock
+ */
+ clk = &port->phy_clk;
+
+ memset(clk, 0, sizeof(struct clk));
+
+ clk->parent = &fixed_pciexclkp;
+ clk->enable_reg = (void __iomem *)(chan->reg_base + SH4A_PCIEPHYCTLR);
+ clk->enable_bit = BITS_CKE;
+
+ ret = sh_clk_mstp32_register(clk, 1);
+ if (unlikely(ret < 0))
+ goto err_phy;
+
+ return 0;
+
+err_phy:
+ clk_disable(port->fclk);
+ clk_put(port->fclk);
+err_fclk:
+ clk_unregister(&fixed_pciexclkp);
+
+ return ret;
}
-static int phy_init(struct pci_channel *chan)
+static int __init phy_init(struct sh7786_pcie_port *port)
{
+ struct pci_channel *chan = port->hose;
unsigned int timeout = 100;
+ clk_enable(&port->phy_clk);
+
/* Initialize the phy */
phy_write_reg(chan, 0x60, 0xf, 0x004b008b);
phy_write_reg(chan, 0x61, 0xf, 0x00007b41);
@@ -187,9 +265,13 @@ static int phy_init(struct pci_channel *chan)
phy_write_reg(chan, 0x66, 0xf, 0x00000010);
phy_write_reg(chan, 0x74, 0xf, 0x0007001c);
phy_write_reg(chan, 0x79, 0xf, 0x01fc000d);
+ phy_write_reg(chan, 0xb0, 0xf, 0x00000610);
/* Deassert Standby */
- phy_write_reg(chan, 0x67, 0xf, 0x00000400);
+ phy_write_reg(chan, 0x67, 0x1, 0x00000400);
+
+ /* Disable clock */
+ clk_disable(&port->phy_clk);
while (timeout--) {
if (pci_read_reg(chan, SH4A_PCIEPHYSR))
@@ -201,22 +283,33 @@ static int phy_init(struct pci_channel *chan)
return -ETIMEDOUT;
}
-static int pcie_init(struct sh7786_pcie_port *port)
+static void __init pcie_reset(struct sh7786_pcie_port *port)
+{
+ struct pci_channel *chan = port->hose;
+
+ pci_write_reg(chan, 1, SH4A_PCIESRSTR);
+ pci_write_reg(chan, 0, SH4A_PCIETCTLR);
+ pci_write_reg(chan, 0, SH4A_PCIESRSTR);
+ pci_write_reg(chan, 0, SH4A_PCIETXVC0SR);
+}
+
+static int __init pcie_init(struct sh7786_pcie_port *port)
{
struct pci_channel *chan = port->hose;
unsigned int data;
phys_addr_t memphys;
size_t memsize;
- int ret, i;
+ int ret, i, win;
/* Begin initialization */
- pci_write_reg(chan, 0, SH4A_PCIETCTLR);
+ pcie_reset(port);
- /* Initialize as type1. */
- data = pci_read_reg(chan, SH4A_PCIEPCICONF3);
- data &= ~(0x7f << 16);
- data |= PCI_HEADER_TYPE_BRIDGE << 16;
- pci_write_reg(chan, data, SH4A_PCIEPCICONF3);
+ /*
+ * Initial header for port config space is type 1, set the device
+ * class to match. Hardware takes care of propagating the IDSETR
+ * settings, so there is no need to bother with a quirk.
+ */
+ pci_write_reg(chan, PCI_CLASS_BRIDGE_PCI << 16, SH4A_PCIEIDSETR1);
/* Initialize default capabilities. */
data = pci_read_reg(chan, SH4A_PCIEEXPCAP0);
@@ -268,30 +361,33 @@ static int pcie_init(struct sh7786_pcie_port *port)
* LAR1/LAMR1.
*/
if (memsize > SZ_512M) {
- __raw_writel(memphys + SZ_512M, chan->reg_base + SH4A_PCIELAR1);
- __raw_writel(((memsize - SZ_512M) - SZ_256) | 1,
- chan->reg_base + SH4A_PCIELAMR1);
+ pci_write_reg(chan, memphys + SZ_512M, SH4A_PCIELAR1);
+ pci_write_reg(chan, ((memsize - SZ_512M) - SZ_256) | 1,
+ SH4A_PCIELAMR1);
memsize = SZ_512M;
} else {
/*
* Otherwise just zero it out and disable it.
*/
- __raw_writel(0, chan->reg_base + SH4A_PCIELAR1);
- __raw_writel(0, chan->reg_base + SH4A_PCIELAMR1);
+ pci_write_reg(chan, 0, SH4A_PCIELAR1);
+ pci_write_reg(chan, 0, SH4A_PCIELAMR1);
}
/*
* LAR0/LAMR0 covers up to the first 512MB, which is enough to
* cover all of lowmem on most platforms.
*/
- __raw_writel(memphys, chan->reg_base + SH4A_PCIELAR0);
- __raw_writel((memsize - SZ_256) | 1, chan->reg_base + SH4A_PCIELAMR0);
+ pci_write_reg(chan, memphys, SH4A_PCIELAR0);
+ pci_write_reg(chan, (memsize - SZ_256) | 1, SH4A_PCIELAMR0);
/* Finish initialization */
data = pci_read_reg(chan, SH4A_PCIETCTLR);
data |= 0x1;
pci_write_reg(chan, data, SH4A_PCIETCTLR);
+ /* Let things settle down a bit.. */
+ mdelay(100);
+
/* Enable DL_Active Interrupt generation */
data = pci_read_reg(chan, SH4A_PCIEDLINTENR);
data |= PCIEDLINTENR_DLL_ACT_ENABLE;
@@ -302,9 +398,12 @@ static int pcie_init(struct sh7786_pcie_port *port)
data |= PCIEMACCTLR_SCR_DIS | (0xff << 16);
pci_write_reg(chan, data, SH4A_PCIEMACCTLR);
+ /*
+ * This will timeout if we don't have a link, but we permit the
+ * port to register anyways in order to support hotplug on future
+ * hardware.
+ */
ret = pci_wait_for_irq(chan, MASK_INT_TX_CTRL);
- if (unlikely(ret != 0))
- return -ENODEV;
data = pci_read_reg(chan, SH4A_PCIEPCICONF1);
data &= ~(PCI_STATUS_DEVSEL_MASK << 16);
@@ -317,35 +416,48 @@ static int pcie_init(struct sh7786_pcie_port *port)
wmb();
- data = pci_read_reg(chan, SH4A_PCIEMACSR);
- printk(KERN_NOTICE "PCI: PCIe#%d link width %d\n",
- port->index, (data >> 20) & 0x3f);
-
+ if (ret == 0) {
+ data = pci_read_reg(chan, SH4A_PCIEMACSR);
+ printk(KERN_NOTICE "PCI: PCIe#%d x%d link detected\n",
+ port->index, (data >> 20) & 0x3f);
+ } else
+ printk(KERN_NOTICE "PCI: PCIe#%d link down\n",
+ port->index);
- for (i = 0; i < chan->nr_resources; i++) {
+ for (i = win = 0; i < chan->nr_resources; i++) {
struct resource *res = chan->resources + i;
resource_size_t size;
- u32 enable_mask;
+ u32 mask;
- pci_write_reg(chan, 0x00000000, SH4A_PCIEPTCTLR(i));
+ /*
+ * We can't use the 32-bit mode windows in legacy 29-bit
+ * mode, so just skip them entirely.
+ */
+ if ((res->flags & IORESOURCE_MEM_32BIT) && __in_29bit_mode())
+ continue;
- size = resource_size(res);
+ pci_write_reg(chan, 0x00000000, SH4A_PCIEPTCTLR(win));
/*
* The PAMR mask is calculated in units of 256kB, which
* keeps things pretty simple.
*/
- __raw_writel(((roundup_pow_of_two(size) / SZ_256K) - 1) << 18,
- chan->reg_base + SH4A_PCIEPAMR(i));
+ size = resource_size(res);
+ mask = (roundup_pow_of_two(size) / SZ_256K) - 1;
+ pci_write_reg(chan, mask << 18, SH4A_PCIEPAMR(win));
- pci_write_reg(chan, 0x00000000, SH4A_PCIEPARH(i));
- pci_write_reg(chan, 0x00000000, SH4A_PCIEPARL(i));
+ pci_write_reg(chan, upper_32_bits(res->start),
+ SH4A_PCIEPARH(win));
+ pci_write_reg(chan, lower_32_bits(res->start),
+ SH4A_PCIEPARL(win));
- enable_mask = MASK_PARE;
+ mask = MASK_PARE;
if (res->flags & IORESOURCE_IO)
- enable_mask |= MASK_SPC;
+ mask |= MASK_SPC;
+
+ pci_write_reg(chan, mask, SH4A_PCIEPTCTLR(win));
- pci_write_reg(chan, enable_mask, SH4A_PCIEPTCTLR(i));
+ win++;
}
return 0;
@@ -356,26 +468,33 @@ int __init pcibios_map_platform_irq(struct pci_dev *pdev, u8 slot, u8 pin)
return 71;
}
-static int sh7786_pcie_core_init(void)
+static int __init sh7786_pcie_core_init(void)
{
/* Return the number of ports */
return test_mode_pin(MODE_PIN12) ? 3 : 2;
}
-static int __devinit sh7786_pcie_init_hw(struct sh7786_pcie_port *port)
+static int __init sh7786_pcie_init_hw(struct sh7786_pcie_port *port)
{
int ret;
- ret = phy_init(port->hose);
- if (unlikely(ret < 0))
- return ret;
-
/*
* Check if we are configured in endpoint or root complex mode,
* this is a fixed pin setting that applies to all PCIe ports.
*/
port->endpoint = test_mode_pin(MODE_PIN11);
+ /*
+ * Setup clocks, needed both for PHY and PCIe registers.
+ */
+ ret = pcie_clk_init(port);
+ if (unlikely(ret < 0))
+ return ret;
+
+ ret = phy_init(port);
+ if (unlikely(ret < 0))
+ return ret;
+
ret = pcie_init(port);
if (unlikely(ret < 0))
return ret;
@@ -390,9 +509,10 @@ static struct sh7786_pcie_hwops sh7786_65nm_pcie_hwops __initdata = {
static int __init sh7786_pcie_init(void)
{
+ struct clk *platclk;
int ret = 0, i;
- printk(KERN_NOTICE "PCI: Starting intialization.\n");
+ printk(KERN_NOTICE "PCI: Starting initialization.\n");
sh7786_pcie_hwops = &sh7786_65nm_pcie_hwops;
@@ -407,6 +527,22 @@ static int __init sh7786_pcie_init(void)
if (unlikely(!sh7786_pcie_ports))
return -ENOMEM;
+ /*
+ * Fetch any optional platform clock associated with this block.
+ *
+ * This is a rather nasty hack for boards with spec-mocking FPGAs
+ * that have a secondary set of clocks outside of the on-chip
+ * ones that need to be accounted for before there is any chance
+ * of touching the existing MSTP bits or CPG clocks.
+ */
+ platclk = clk_get(NULL, "pcie_plat_clk");
+ if (IS_ERR(platclk)) {
+ /* Sane hardware should probably get a WARN_ON.. */
+ platclk = NULL;
+ }
+
+ clk_enable(platclk);
+
printk(KERN_NOTICE "PCI: probing %d ports.\n", nr_ports);
for (i = 0; i < nr_ports; i++) {
@@ -419,8 +555,11 @@ static int __init sh7786_pcie_init(void)
ret |= sh7786_pcie_hwops->port_init_hw(port);
}
- if (unlikely(ret))
+ if (unlikely(ret)) {
+ clk_disable(platclk);
+ clk_put(platclk);
return ret;
+ }
return 0;
}
diff --git a/arch/sh/drivers/pci/pcie-sh7786.h b/arch/sh/drivers/pci/pcie-sh7786.h
index 90a6992576b0..1ee054e47eae 100644
--- a/arch/sh/drivers/pci/pcie-sh7786.h
+++ b/arch/sh/drivers/pci/pcie-sh7786.h
@@ -55,8 +55,11 @@
#define BITS_ERRRCV (0) /* 0 ERRRCV 0 */
#define MASK_ERRRCV (1<<BITS_ERRRCV)
+/* PCIEENBLR */
+#define SH4A_PCIEENBLR (0x000008) /* R/W - 0x0000 0001 32 */
+
/* PCIEECR */
-#define SH4A_PCIEECR (0x000008) /* R/W - 0x0000 0000 32 */
+#define SH4A_PCIEECR (0x00000C) /* R/W - 0x0000 0000 32 */
#define BITS_ENBL (0) /* 0 ENBL 0 R/W */
#define MASK_ENBL (1<<BITS_ENBL)
@@ -113,6 +116,27 @@
#define BITS_MDATA (0)
#define MASK_MDATA (0xffffffff<<BITS_MDATA)
+/* PCIEUNLOCKCR */
+#define SH4A_PCIEUNLOCKCR (0x000048) /* R/W - 0x0000 0000 32 */
+
+/* PCIEIDR */
+#define SH4A_PCIEIDR (0x000060) /* R/W - 0x0101 1101 32 */
+
+/* PCIEDBGCTLR */
+#define SH4A_PCIEDBGCTLR (0x000100) /* R/W - 0x0000 0000 32 */
+
+/* PCIEINTXR */
+#define SH4A_PCIEINTXR (0x004000) /* R/W - 0x0000 0000 32 */
+
+/* PCIERMSGR */
+#define SH4A_PCIERMSGR (0x004010) /* R/W - 0x0000 0000 32 */
+
+/* PCIERSTR */
+#define SH4A_PCIERSTR(x) (0x008000 + ((x) * 0x4)) /* R/W - 0x0000 0000 32 */
+
+/* PCIESRSTR */
+#define SH4A_PCIESRSTR (0x008040) /* R/W - 0x0000 0000 32 */
+
/* PCIEPHYCTLR */
#define SH4A_PCIEPHYCTLR (0x010000) /* R/W - 0x0000 0000 32 */
#define BITS_CKE (0)
@@ -121,6 +145,9 @@
/* PCIERMSGIER */
#define SH4A_PCIERMSGIER (0x004040) /* R/W - 0x0000 0000 32 */
+/* PCIEPHYCTLR */
+#define SH4A_PCIEPHYCTLR (0x010000) /* R/W - 0x0000 0000 32 */
+
/* PCIEPHYADRR */
#define SH4A_PCIEPHYADRR (0x010004) /* R/W - 0x0000 0000 32 */
#define BITS_ACK (24) // Rev1.171
@@ -152,7 +179,7 @@
#define MASK_CFINT (1<<BITS_CFINT)
/* PCIETSTR */
-#define SH4A_PCIETSTR (0x020004) /* R/W R/W 0x0000 0000 32 */
+#define SH4A_PCIETSTR (0x020004) /* R 0x0000 0000 32 */
/* PCIEINTR */
#define SH4A_PCIEINTR (0x020008) /* R/W R/W 0x0000 0000 32 */
@@ -236,6 +263,9 @@
#define BITS_INTPM (8)
#define MASK_INTPM (1<<BITS_INTPM)
+/* PCIEEH0R */
+#define SH4A_PCIEEHR(x) (0x020010 + ((x) * 0x4)) /* R - 0x0000 0000 32 */
+
/* PCIEAIR */
#define SH4A_PCIEAIR (SH4A_PCIE_BASE + 0x020010) /* R/W R/W 0xxxxx xxxx 32 */
@@ -244,6 +274,25 @@
/* PCIEERRFR */ // Rev1.18
#define SH4A_PCIEERRFR (0x020020) /* R/W R/W 0xxxxx xxxx 32 */ // Rev1.18
+
+/* PCIEERRFER */
+#define SH4A_PCIEERRFER (0x020024) /* R/W R/W 0x0000 0000 32 */
+
+/* PCIEERRFR2 */
+#define SH4A_PCIEERRFR2 (0x020028) /* R/W R/W 0x0000 0000 32 */
+
+/* PCIEMSIR */
+#define SH4A_PCIEMSIR (0x020040) /* R/W - 0x0000 0000 32 */
+
+/* PCIEMSIFR */
+#define SH4A_PCIEMSIFR (0x020044) /* R/W R/W 0x0000 0000 32 */
+
+/* PCIEPWRCTLR */
+#define SH4A_PCIEPWRCTLR (0x020100) /* R/W - 0x0000 0000 32 */
+
+/* PCIEPCCTLR */
+#define SH4A_PCIEPCCTLR (0x020180) /* R/W - 0x0000 0000 32 */
+
// Rev1.18
/* PCIELAR0 */
#define SH4A_PCIELAR0 (0x020200) /* R/W R/W 0x0000 0000 32 */
@@ -352,6 +401,7 @@
#define SH4A_PCIEDMCCR0 (0x021120) /* R/W R/W 0x0000 0000 32 */
#define SH4A_PCIEDMCC2R0 (0x021124) /* R/W R/W 0x0000 0000 - */
#define SH4A_PCIEDMCCCR0 (0x021128) /* R/W R/W 0x0000 0000 32 */
+#define SH4A_PCIEDMCHSR0 (0x02112C) /* R/W - 0x0000 0000 32 */
#define SH4A_PCIEDMSAR1 (0x021140) /* R/W R/W 0x0000 0000 32 */
#define SH4A_PCIEDMSAHR1 (0x021144) /* R/W R/W 0x0000 0000 32 */
#define SH4A_PCIEDMDAR1 (0x021148) /* R/W R/W 0x0000 0000 32 */
@@ -363,6 +413,7 @@
#define SH4A_PCIEDMCCR1 (0x021160) /* R/W R/W 0x0000 0000 32 */
#define SH4A_PCIEDMCC2R1 (0x021164) /* R/W R/W 0x0000 0000 - */
#define SH4A_PCIEDMCCCR1 (0x021168) /* R/W R/W 0x0000 0000 32 */
+#define SH4A_PCIEDMCHSR1 (0x02116C) /* R/W - 0x0000 0000 32 */
#define SH4A_PCIEDMSAR2 (0x021180) /* R/W R/W 0x0000 0000 32 */
#define SH4A_PCIEDMSAHR2 (0x021184) /* R/W R/W 0x0000 0000 32 */
#define SH4A_PCIEDMDAR2 (0x021188) /* R/W R/W 0x0000 0000 32 */
@@ -385,6 +436,7 @@
#define SH4A_PCIEDMCCR3 (0x0211E0) /* R/W R/W 0x0000 0000 32 */
#define SH4A_PCIEDMCC2R3 (0x0211E4) /* R/W R/W 0x0000 0000 - */
#define SH4A_PCIEDMCCCR3 (0x0211E8) /* R/W R/W 0x0000 0000 32 */
+#define SH4A_PCIEDMCHSR3 (0x0211EC) /* R/W R/W 0x0000 0000 32 */
#define SH4A_PCIEPCICONF0 (0x040000) /* R R - 8/16/32 */
#define SH4A_PCIEPCICONF1 (0x040004) /* R/W R/W 0x0008 0000 8/16/32 */
#define SH4A_PCIEPCICONF2 (0x040008) /* R/W R/W 0xFF00 0000 8/16/32 */
diff --git a/arch/sh/include/asm/Kbuild b/arch/sh/include/asm/Kbuild
index 0eed47b236ab..7beb42322f60 100644
--- a/arch/sh/include/asm/Kbuild
+++ b/arch/sh/include/asm/Kbuild
@@ -5,5 +5,7 @@ header-y += cpu-features.h
header-y += hw_breakpoint.h
header-y += posix_types_32.h
header-y += posix_types_64.h
+header-y += ptrace_32.h
+header-y += ptrace_64.h
header-y += unistd_32.h
header-y += unistd_64.h
diff --git a/arch/sh/include/asm/addrspace.h b/arch/sh/include/asm/addrspace.h
index 446b3831c214..3d1ae2bfaa6f 100644
--- a/arch/sh/include/asm/addrspace.h
+++ b/arch/sh/include/asm/addrspace.h
@@ -44,10 +44,10 @@
/*
* These will never work in 32-bit, don't even bother.
*/
-#define P1SEGADDR(a) __futile_remapping_attempt
-#define P2SEGADDR(a) __futile_remapping_attempt
-#define P3SEGADDR(a) __futile_remapping_attempt
-#define P4SEGADDR(a) __futile_remapping_attempt
+#define P1SEGADDR(a) ({ (void)(a); BUG(); NULL; })
+#define P2SEGADDR(a) ({ (void)(a); BUG(); NULL; })
+#define P3SEGADDR(a) ({ (void)(a); BUG(); NULL; })
+#define P4SEGADDR(a) ({ (void)(a); BUG(); NULL; })
#endif
#endif /* P1SEG */
diff --git a/arch/sh/include/asm/cacheflush.h b/arch/sh/include/asm/cacheflush.h
index 1f4e562c5e8c..82e1eabeac98 100644
--- a/arch/sh/include/asm/cacheflush.h
+++ b/arch/sh/include/asm/cacheflush.h
@@ -96,7 +96,7 @@ void kmap_coherent_init(void);
void *kmap_coherent(struct page *page, unsigned long addr);
void kunmap_coherent(void *kvaddr);
-#define PG_dcache_dirty PG_arch_1
+#define PG_dcache_clean PG_arch_1
void cpu_cache_init(void);
diff --git a/arch/sh/include/asm/elf.h b/arch/sh/include/asm/elf.h
index ce830faeebbf..f38112be67d2 100644
--- a/arch/sh/include/asm/elf.h
+++ b/arch/sh/include/asm/elf.h
@@ -50,25 +50,14 @@
#define R_SH_GOTPC 167
/* FDPIC relocs */
-#define R_SH_GOT20 70
-#define R_SH_GOTOFF20 71
-#define R_SH_GOTFUNCDESC 72
-#define R_SH_GOTFUNCDESC20 73
-#define R_SH_GOTOFFFUNCDESC 74
-#define R_SH_GOTOFFFUNCDESC20 75
-#define R_SH_FUNCDESC 76
-#define R_SH_FUNCDESC_VALUE 77
-
-#if 0 /* XXX - later .. */
-#define R_SH_GOT20 198
-#define R_SH_GOTOFF20 199
-#define R_SH_GOTFUNCDESC 200
-#define R_SH_GOTFUNCDESC20 201
-#define R_SH_GOTOFFFUNCDESC 202
-#define R_SH_GOTOFFFUNCDESC20 203
-#define R_SH_FUNCDESC 204
-#define R_SH_FUNCDESC_VALUE 205
-#endif
+#define R_SH_GOT20 201
+#define R_SH_GOTOFF20 202
+#define R_SH_GOTFUNCDESC 203
+#define R_SH_GOTFUNCDESC20 204
+#define R_SH_GOTOFFFUNCDESC 205
+#define R_SH_GOTOFFFUNCDESC20 206
+#define R_SH_FUNCDESC 207
+#define R_SH_FUNCDESC_VALUE 208
/* SHmedia relocs */
#define R_SH_IMM_LOW16 246
diff --git a/arch/sh/include/asm/fixmap.h b/arch/sh/include/asm/fixmap.h
index 6e7cea453895..bd7e79a12653 100644
--- a/arch/sh/include/asm/fixmap.h
+++ b/arch/sh/include/asm/fixmap.h
@@ -58,7 +58,7 @@ enum fixed_addresses {
#ifdef CONFIG_HIGHMEM
FIX_KMAP_BEGIN, /* reserved pte's for temporary kernel mappings */
- FIX_KMAP_END = FIX_KMAP_BEGIN+(KM_TYPE_NR*NR_CPUS)-1,
+ FIX_KMAP_END = FIX_KMAP_BEGIN + (KM_TYPE_NR * NR_CPUS) - 1,
#endif
#ifdef CONFIG_IOREMAP_FIXED
@@ -69,7 +69,7 @@ enum fixed_addresses {
*/
#define FIX_N_IOREMAPS 32
FIX_IOREMAP_BEGIN,
- FIX_IOREMAP_END = FIX_IOREMAP_BEGIN + FIX_N_IOREMAPS,
+ FIX_IOREMAP_END = FIX_IOREMAP_BEGIN + FIX_N_IOREMAPS - 1,
#endif
__end_of_fixed_addresses
diff --git a/arch/sh/include/asm/gpio.h b/arch/sh/include/asm/gpio.h
index f8d9a731e903..04f53d31489f 100644
--- a/arch/sh/include/asm/gpio.h
+++ b/arch/sh/include/asm/gpio.h
@@ -41,14 +41,12 @@ static inline int gpio_cansleep(unsigned gpio)
static inline int gpio_to_irq(unsigned gpio)
{
- WARN_ON(1);
- return -ENOSYS;
+ return __gpio_to_irq(gpio);
}
static inline int irq_to_gpio(unsigned int irq)
{
- WARN_ON(1);
- return -EINVAL;
+ return -ENOSYS;
}
#endif /* CONFIG_GPIOLIB */
diff --git a/arch/sh/include/asm/irq.h b/arch/sh/include/asm/irq.h
index 02c2f0102cfa..45d08b6a5ef7 100644
--- a/arch/sh/include/asm/irq.h
+++ b/arch/sh/include/asm/irq.h
@@ -9,7 +9,7 @@
* advised to cap this at the hard limit that they're interested in
* through the machvec.
*/
-#define NR_IRQS 256
+#define NR_IRQS 512
#define NR_IRQS_LEGACY 8 /* Legacy external IRQ0-7 */
/*
diff --git a/arch/sh/include/asm/kprobes.h b/arch/sh/include/asm/kprobes.h
index 036c3311233c..134f3980e44a 100644
--- a/arch/sh/include/asm/kprobes.h
+++ b/arch/sh/include/asm/kprobes.h
@@ -16,7 +16,6 @@ typedef insn_size_t kprobe_opcode_t;
? (MAX_STACK_SIZE) \
: (((unsigned long)current_thread_info()) + THREAD_SIZE - (ADDR)))
-#define regs_return_value(_regs) ((_regs)->regs[0])
#define flush_insn_slot(p) do { } while (0)
#define kretprobe_blacklist_size 0
diff --git a/arch/sh/include/asm/pci.h b/arch/sh/include/asm/pci.h
index 8bd952fcf3ba..f0efe97f1750 100644
--- a/arch/sh/include/asm/pci.h
+++ b/arch/sh/include/asm/pci.h
@@ -37,6 +37,8 @@ struct pci_channel {
};
/* arch/sh/drivers/pci/pci.c */
+extern raw_spinlock_t pci_config_lock;
+
extern int register_pci_controller(struct pci_channel *hose);
extern void pcibios_report_status(unsigned int status_mask, int warn);
diff --git a/arch/sh/include/asm/pgtable.h b/arch/sh/include/asm/pgtable.h
index 02f77450cd8f..083ea068e819 100644
--- a/arch/sh/include/asm/pgtable.h
+++ b/arch/sh/include/asm/pgtable.h
@@ -66,7 +66,6 @@ static inline unsigned long long neff_sign_extend(unsigned long val)
#define PHYS_ADDR_MASK29 0x1fffffff
#define PHYS_ADDR_MASK32 0xffffffff
-#ifdef CONFIG_PMB
static inline unsigned long phys_addr_mask(void)
{
/* Is the MMU in 29bit mode? */
@@ -75,17 +74,6 @@ static inline unsigned long phys_addr_mask(void)
return PHYS_ADDR_MASK32;
}
-#elif defined(CONFIG_32BIT)
-static inline unsigned long phys_addr_mask(void)
-{
- return PHYS_ADDR_MASK32;
-}
-#else
-static inline unsigned long phys_addr_mask(void)
-{
- return PHYS_ADDR_MASK29;
-}
-#endif
#define PTE_PHYS_MASK (phys_addr_mask() & PAGE_MASK)
#define PTE_FLAGS_MASK (~(PTE_PHYS_MASK) << PAGE_SHIFT)
@@ -169,6 +157,8 @@ extern void page_table_range_init(unsigned long start, unsigned long end,
#define HAVE_ARCH_UNMAPPED_AREA
#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
+#define __HAVE_ARCH_PTE_SPECIAL
+
#include <asm-generic/pgtable.h>
#endif /* __ASM_SH_PGTABLE_H */
diff --git a/arch/sh/include/asm/pgtable_32.h b/arch/sh/include/asm/pgtable_32.h
index e172d696e52b..43528ec656ba 100644
--- a/arch/sh/include/asm/pgtable_32.h
+++ b/arch/sh/include/asm/pgtable_32.h
@@ -378,8 +378,6 @@ PTE_BIT_FUNC(low, mkold, &= ~_PAGE_ACCESSED);
PTE_BIT_FUNC(low, mkyoung, |= _PAGE_ACCESSED);
PTE_BIT_FUNC(low, mkspecial, |= _PAGE_SPECIAL);
-#define __HAVE_ARCH_PTE_SPECIAL
-
/*
* Macro and implementation to make a page protection as uncachable.
*/
@@ -429,10 +427,7 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
#define pte_offset_kernel(dir, address) \
((pte_t *) pmd_page_vaddr(*(dir)) + pte_index(address))
#define pte_offset_map(dir, address) pte_offset_kernel(dir, address)
-#define pte_offset_map_nested(dir, address) pte_offset_kernel(dir, address)
-
#define pte_unmap(pte) do { } while (0)
-#define pte_unmap_nested(pte) do { } while (0)
#ifdef CONFIG_X2TLB
#define pte_ERROR(e) \
diff --git a/arch/sh/include/asm/pgtable_64.h b/arch/sh/include/asm/pgtable_64.h
index 0ee46776dad6..42cb9dd52161 100644
--- a/arch/sh/include/asm/pgtable_64.h
+++ b/arch/sh/include/asm/pgtable_64.h
@@ -84,9 +84,7 @@ static __inline__ void set_pte(pte_t *pteptr, pte_t pteval)
((pte_t *) ((pmd_val(*(dir))) & PAGE_MASK) + pte_index((addr)))
#define pte_offset_map(dir,addr) pte_offset_kernel(dir, addr)
-#define pte_offset_map_nested(dir,addr) pte_offset_kernel(dir, addr)
#define pte_unmap(pte) do { } while (0)
-#define pte_unmap_nested(pte) do { } while (0)
#ifndef __ASSEMBLY__
#define IOBASE_VADDR 0xff000000
@@ -132,6 +130,7 @@ static __inline__ void set_pte(pte_t *pteptr, pte_t pteval)
* anything above the PPN field.
*/
#define _PAGE_WIRED _PAGE_EXT(0x001) /* software: wire the tlb entry */
+#define _PAGE_SPECIAL _PAGE_EXT(0x002)
#define _PAGE_CLEAR_FLAGS (_PAGE_PRESENT | _PAGE_FILE | _PAGE_SHARED | \
_PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_WIRED)
@@ -175,7 +174,8 @@ static __inline__ void set_pte(pte_t *pteptr, pte_t pteval)
/* Default flags for a User page */
#define _PAGE_TABLE (_KERNPG_TABLE | _PAGE_USER)
-#define _PAGE_CHG_MASK (PTE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY)
+#define _PAGE_CHG_MASK (PTE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY | \
+ _PAGE_SPECIAL)
/*
* We have full permissions (Read/Write/Execute/Shared).
@@ -265,7 +265,7 @@ static inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_DIRTY; }
static inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED; }
static inline int pte_file(pte_t pte) { return pte_val(pte) & _PAGE_FILE; }
static inline int pte_write(pte_t pte) { return pte_val(pte) & _PAGE_WRITE; }
-static inline int pte_special(pte_t pte){ return 0; }
+static inline int pte_special(pte_t pte){ return pte_val(pte) & _PAGE_SPECIAL; }
static inline pte_t pte_wrprotect(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) & ~_PAGE_WRITE)); return pte; }
static inline pte_t pte_mkclean(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) & ~_PAGE_DIRTY)); return pte; }
@@ -274,8 +274,7 @@ static inline pte_t pte_mkwrite(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) |
static inline pte_t pte_mkdirty(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) | _PAGE_DIRTY)); return pte; }
static inline pte_t pte_mkyoung(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) | _PAGE_ACCESSED)); return pte; }
static inline pte_t pte_mkhuge(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) | _PAGE_SZHUGE)); return pte; }
-static inline pte_t pte_mkspecial(pte_t pte) { return pte; }
-
+static inline pte_t pte_mkspecial(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) | _PAGE_SPECIAL)); return pte; }
/*
* Conversion functions: convert a page and protection to a page entry.
diff --git a/arch/sh/include/asm/processor.h b/arch/sh/include/asm/processor.h
index 0a58cb25a658..c9e7cbc4768a 100644
--- a/arch/sh/include/asm/processor.h
+++ b/arch/sh/include/asm/processor.h
@@ -89,6 +89,7 @@ struct sh_cpuinfo {
struct task_struct *idle;
#endif
+ unsigned int phys_bits;
unsigned long flags;
} __attribute__ ((aligned(L1_CACHE_BYTES)));
diff --git a/arch/sh/include/asm/processor_32.h b/arch/sh/include/asm/processor_32.h
index 61a445d2d02a..e3c73cdd8c90 100644
--- a/arch/sh/include/asm/processor_32.h
+++ b/arch/sh/include/asm/processor_32.h
@@ -13,7 +13,6 @@
#include <linux/linkage.h>
#include <asm/page.h>
#include <asm/types.h>
-#include <asm/ptrace.h>
#include <asm/hw_breakpoint.h>
/*
@@ -194,18 +193,19 @@ extern unsigned long get_wchan(struct task_struct *p);
#define KSTK_EIP(tsk) (task_pt_regs(tsk)->pc)
#define KSTK_ESP(tsk) (task_pt_regs(tsk)->regs[15])
-#define user_stack_pointer(_regs) ((_regs)->regs[15])
-
#if defined(CONFIG_CPU_SH2A) || defined(CONFIG_CPU_SH4)
#define PREFETCH_STRIDE L1_CACHE_BYTES
#define ARCH_HAS_PREFETCH
#define ARCH_HAS_PREFETCHW
static inline void prefetch(void *x)
{
- __asm__ __volatile__ ("pref @%0\n\t" : : "r" (x) : "memory");
+ __builtin_prefetch(x, 0, 3);
}
-#define prefetchw(x) prefetch(x)
+static inline void prefetchw(void *x)
+{
+ __builtin_prefetch(x, 1, 3);
+}
#endif
#endif /* __KERNEL__ */
diff --git a/arch/sh/include/asm/processor_64.h b/arch/sh/include/asm/processor_64.h
index 621bc4618c6b..2a541ddb5a1b 100644
--- a/arch/sh/include/asm/processor_64.h
+++ b/arch/sh/include/asm/processor_64.h
@@ -17,7 +17,6 @@
#include <linux/compiler.h>
#include <asm/page.h>
#include <asm/types.h>
-#include <asm/ptrace.h>
#include <cpu/registers.h>
/*
@@ -231,7 +230,5 @@ extern unsigned long get_wchan(struct task_struct *p);
#define KSTK_EIP(tsk) ((tsk)->thread.pc)
#define KSTK_ESP(tsk) ((tsk)->thread.sp)
-#define user_stack_pointer(_regs) ((_regs)->regs[15])
-
#endif /* __ASSEMBLY__ */
#endif /* __ASM_SH_PROCESSOR_64_H */
diff --git a/arch/sh/include/asm/ptrace.h b/arch/sh/include/asm/ptrace.h
index 2168fde25611..f6edc10aa0d3 100644
--- a/arch/sh/include/asm/ptrace.h
+++ b/arch/sh/include/asm/ptrace.h
@@ -3,90 +3,7 @@
/*
* Copyright (C) 1999, 2000 Niibe Yutaka
- *
- */
-#if defined(__SH5__)
-struct pt_regs {
- unsigned long long pc;
- unsigned long long sr;
- long long syscall_nr;
- unsigned long long regs[63];
- unsigned long long tregs[8];
- unsigned long long pad[2];
-};
-#else
-/*
- * GCC defines register number like this:
- * -----------------------------
- * 0 - 15 are integer registers
- * 17 - 22 are control/special registers
- * 24 - 39 fp registers
- * 40 - 47 xd registers
- * 48 - fpscr register
- * -----------------------------
- *
- * We follows above, except:
- * 16 --- program counter (PC)
- * 22 --- syscall #
- * 23 --- floating point communication register
*/
-#define REG_REG0 0
-#define REG_REG15 15
-
-#define REG_PC 16
-
-#define REG_PR 17
-#define REG_SR 18
-#define REG_GBR 19
-#define REG_MACH 20
-#define REG_MACL 21
-
-#define REG_SYSCALL 22
-
-#define REG_FPREG0 23
-#define REG_FPREG15 38
-#define REG_XFREG0 39
-#define REG_XFREG15 54
-
-#define REG_FPSCR 55
-#define REG_FPUL 56
-
-/*
- * This struct defines the way the registers are stored on the
- * kernel stack during a system call or other kernel entry.
- */
-struct pt_regs {
- unsigned long regs[16];
- unsigned long pc;
- unsigned long pr;
- unsigned long sr;
- unsigned long gbr;
- unsigned long mach;
- unsigned long macl;
- long tra;
-};
-
-/*
- * This struct defines the way the DSP registers are stored on the
- * kernel stack during a system call or other kernel entry.
- */
-struct pt_dspregs {
- unsigned long a1;
- unsigned long a0g;
- unsigned long a1g;
- unsigned long m0;
- unsigned long m1;
- unsigned long a0;
- unsigned long x0;
- unsigned long x1;
- unsigned long y0;
- unsigned long y1;
- unsigned long dsr;
- unsigned long rs;
- unsigned long re;
- unsigned long mod;
-};
-#endif
#define PTRACE_GETREGS 12 /* General registers */
#define PTRACE_SETREGS 13
@@ -107,22 +24,102 @@ struct pt_dspregs {
#define PT_DATA_ADDR 248 /* &(struct user)->start_data */
#define PT_TEXT_LEN 252
+#if defined(__SH5__) || defined(CONFIG_CPU_SH5)
+#include "ptrace_64.h"
+#else
+#include "ptrace_32.h"
+#endif
+
#ifdef __KERNEL__
+
+#include <linux/stringify.h>
+#include <linux/stddef.h>
+#include <linux/thread_info.h>
#include <asm/addrspace.h>
#include <asm/page.h>
#include <asm/system.h>
#define user_mode(regs) (((regs)->sr & 0x40000000)==0)
+#define user_stack_pointer(regs) ((unsigned long)(regs)->regs[15])
+#define kernel_stack_pointer(regs) ((unsigned long)(regs)->regs[15])
#define instruction_pointer(regs) ((unsigned long)(regs)->pc)
extern void show_regs(struct pt_regs *);
+#define arch_has_single_step() (1)
+
/*
- * These are defined as per linux/ptrace.h.
+ * kprobe-based event tracer support
*/
-struct task_struct;
+struct pt_regs_offset {
+ const char *name;
+ int offset;
+};
-#define arch_has_single_step() (1)
+#define REG_OFFSET_NAME(r) {.name = #r, .offset = offsetof(struct pt_regs, r)}
+#define REGS_OFFSET_NAME(num) \
+ {.name = __stringify(r##num), .offset = offsetof(struct pt_regs, regs[num])}
+#define TREGS_OFFSET_NAME(num) \
+ {.name = __stringify(tr##num), .offset = offsetof(struct pt_regs, tregs[num])}
+#define REG_OFFSET_END {.name = NULL, .offset = 0}
+
+/* Query offset/name of register from its name/offset */
+extern int regs_query_register_offset(const char *name);
+extern const char *regs_query_register_name(unsigned int offset);
+
+extern const struct pt_regs_offset regoffset_table[];
+
+/**
+ * regs_get_register() - get register value from its offset
+ * @regs: pt_regs from which register value is gotten.
+ * @offset: offset number of the register.
+ *
+ * regs_get_register returns the value of a register. The @offset is the
+ * offset of the register in struct pt_regs address which specified by @regs.
+ * If @offset is bigger than MAX_REG_OFFSET, this returns 0.
+ */
+static inline unsigned long regs_get_register(struct pt_regs *regs,
+ unsigned int offset)
+{
+ if (unlikely(offset > MAX_REG_OFFSET))
+ return 0;
+ return *(unsigned long *)((unsigned long)regs + offset);
+}
+
+/**
+ * regs_within_kernel_stack() - check the address in the stack
+ * @regs: pt_regs which contains kernel stack pointer.
+ * @addr: address which is checked.
+ *
+ * regs_within_kernel_stack() checks @addr is within the kernel stack page(s).
+ * If @addr is within the kernel stack, it returns true. If not, returns false.
+ */
+static inline int regs_within_kernel_stack(struct pt_regs *regs,
+ unsigned long addr)
+{
+ return ((addr & ~(THREAD_SIZE - 1)) ==
+ (kernel_stack_pointer(regs) & ~(THREAD_SIZE - 1)));
+}
+
+/**
+ * regs_get_kernel_stack_nth() - get Nth entry of the stack
+ * @regs: pt_regs which contains kernel stack pointer.
+ * @n: stack entry number.
+ *
+ * regs_get_kernel_stack_nth() returns @n th entry of the kernel stack which
+ * is specified by @regs. If the @n th entry is NOT in the kernel stack,
+ * this returns 0.
+ */
+static inline unsigned long regs_get_kernel_stack_nth(struct pt_regs *regs,
+ unsigned int n)
+{
+ unsigned long *addr = (unsigned long *)kernel_stack_pointer(regs);
+ addr += n;
+ if (regs_within_kernel_stack(regs, (unsigned long)addr))
+ return *addr;
+ else
+ return 0;
+}
struct perf_event;
struct perf_sample_data;
diff --git a/arch/sh/include/asm/ptrace_32.h b/arch/sh/include/asm/ptrace_32.h
new file mode 100644
index 000000000000..35d9e257558c
--- /dev/null
+++ b/arch/sh/include/asm/ptrace_32.h
@@ -0,0 +1,83 @@
+#ifndef __ASM_SH_PTRACE_32_H
+#define __ASM_SH_PTRACE_32_H
+
+/*
+ * GCC defines register number like this:
+ * -----------------------------
+ * 0 - 15 are integer registers
+ * 17 - 22 are control/special registers
+ * 24 - 39 fp registers
+ * 40 - 47 xd registers
+ * 48 - fpscr register
+ * -----------------------------
+ *
+ * We follows above, except:
+ * 16 --- program counter (PC)
+ * 22 --- syscall #
+ * 23 --- floating point communication register
+ */
+#define REG_REG0 0
+#define REG_REG15 15
+
+#define REG_PC 16
+
+#define REG_PR 17
+#define REG_SR 18
+#define REG_GBR 19
+#define REG_MACH 20
+#define REG_MACL 21
+
+#define REG_SYSCALL 22
+
+#define REG_FPREG0 23
+#define REG_FPREG15 38
+#define REG_XFREG0 39
+#define REG_XFREG15 54
+
+#define REG_FPSCR 55
+#define REG_FPUL 56
+
+/*
+ * This struct defines the way the registers are stored on the
+ * kernel stack during a system call or other kernel entry.
+ */
+struct pt_regs {
+ unsigned long regs[16];
+ unsigned long pc;
+ unsigned long pr;
+ unsigned long sr;
+ unsigned long gbr;
+ unsigned long mach;
+ unsigned long macl;
+ long tra;
+};
+
+/*
+ * This struct defines the way the DSP registers are stored on the
+ * kernel stack during a system call or other kernel entry.
+ */
+struct pt_dspregs {
+ unsigned long a1;
+ unsigned long a0g;
+ unsigned long a1g;
+ unsigned long m0;
+ unsigned long m1;
+ unsigned long a0;
+ unsigned long x0;
+ unsigned long x1;
+ unsigned long y0;
+ unsigned long y1;
+ unsigned long dsr;
+ unsigned long rs;
+ unsigned long re;
+ unsigned long mod;
+};
+
+#ifdef __KERNEL__
+
+#define MAX_REG_OFFSET offsetof(struct pt_regs, tra)
+#define regs_return_value(regs) ((regs)->regs[0])
+
+#endif /* __KERNEL__ */
+
+#endif /* __ASM_SH_PTRACE_32_H */
diff --git a/arch/sh/include/asm/ptrace_64.h b/arch/sh/include/asm/ptrace_64.h
new file mode 100644
index 000000000000..d43c1cb0bbe7
--- /dev/null
+++ b/arch/sh/include/asm/ptrace_64.h
@@ -0,0 +1,20 @@
+#ifndef __ASM_SH_PTRACE_64_H
+#define __ASM_SH_PTRACE_64_H
+
+struct pt_regs {
+ unsigned long long pc;
+ unsigned long long sr;
+ long long syscall_nr;
+ unsigned long long regs[63];
+ unsigned long long tregs[8];
+ unsigned long long pad[2];
+};
+
+#ifdef __KERNEL__
+
+#define MAX_REG_OFFSET offsetof(struct pt_regs, tregs[7])
+#define regs_return_value(regs) ((regs)->regs[3])
+
+#endif /* __KERNEL__ */
+
+#endif /* __ASM_SH_PTRACE_64_H */
diff --git a/arch/sh/include/asm/sizes.h b/arch/sh/include/asm/sizes.h
index 3a1fb97770f1..0b9fe2d5c36d 100644
--- a/arch/sh/include/asm/sizes.h
+++ b/arch/sh/include/asm/sizes.h
@@ -32,6 +32,7 @@
#define SZ_512 0x00000200
#define SZ_1K 0x00000400
+#define SZ_2K 0x00000800
#define SZ_4K 0x00001000
#define SZ_8K 0x00002000
#define SZ_16K 0x00004000
diff --git a/arch/sh/include/asm/sram.h b/arch/sh/include/asm/sram.h
new file mode 100644
index 000000000000..a2808ce4c0aa
--- /dev/null
+++ b/arch/sh/include/asm/sram.h
@@ -0,0 +1,38 @@
+#ifndef __ASM_SRAM_H
+#define __ASM_SRAM_H
+
+#ifdef CONFIG_HAVE_SRAM_POOL
+
+#include <linux/spinlock.h>
+#include <linux/genalloc.h>
+
+/* arch/sh/mm/sram.c */
+extern struct gen_pool *sram_pool;
+
+static inline unsigned long sram_alloc(size_t len)
+{
+ if (!sram_pool)
+ return 0UL;
+
+ return gen_pool_alloc(sram_pool, len);
+}
+
+static inline void sram_free(unsigned long addr, size_t len)
+{
+ return gen_pool_free(sram_pool, addr, len);
+}
+
+#else
+
+static inline unsigned long sram_alloc(size_t len)
+{
+ return 0;
+}
+
+static inline void sram_free(unsigned long addr, size_t len)
+{
+}
+
+#endif /* CONFIG_HAVE_SRAM_POOL */
+
+#endif /* __ASM_SRAM_H */
diff --git a/arch/sh/include/asm/system.h b/arch/sh/include/asm/system.h
index 0bd7a17d5e1a..10c8b1823a18 100644
--- a/arch/sh/include/asm/system.h
+++ b/arch/sh/include/asm/system.h
@@ -10,6 +10,7 @@
#include <linux/compiler.h>
#include <linux/linkage.h>
#include <asm/types.h>
+#include <asm/uncached.h>
#define AT_VECTOR_SIZE_ARCH 5 /* entries in ARCH_DLINFO */
@@ -137,11 +138,6 @@ extern unsigned int instruction_size(unsigned int insn);
#define instruction_size(insn) (4)
#endif
-extern unsigned long cached_to_uncached;
-extern unsigned long uncached_size;
-
-extern struct dentry *sh_debugfs_root;
-
void per_cpu_trap_init(void);
void default_idle(void);
void cpu_idle_wait(void);
diff --git a/arch/sh/include/asm/system_32.h b/arch/sh/include/asm/system_32.h
index 51296b36770e..a4ad1cd9bc4d 100644
--- a/arch/sh/include/asm/system_32.h
+++ b/arch/sh/include/asm/system_32.h
@@ -145,42 +145,6 @@ do { \
__restore_dsp(prev); \
} while (0)
-/*
- * Jump to uncached area.
- * When handling TLB or caches, we need to do it from an uncached area.
- */
-#define jump_to_uncached() \
-do { \
- unsigned long __dummy; \
- \
- __asm__ __volatile__( \
- "mova 1f, %0\n\t" \
- "add %1, %0\n\t" \
- "jmp @%0\n\t" \
- " nop\n\t" \
- ".balign 4\n" \
- "1:" \
- : "=&z" (__dummy) \
- : "r" (cached_to_uncached)); \
-} while (0)
-
-/*
- * Back to cached area.
- */
-#define back_to_cached() \
-do { \
- unsigned long __dummy; \
- ctrl_barrier(); \
- __asm__ __volatile__( \
- "mov.l 1f, %0\n\t" \
- "jmp @%0\n\t" \
- " nop\n\t" \
- ".balign 4\n" \
- "1: .long 2f\n" \
- "2:" \
- : "=&r" (__dummy)); \
-} while (0)
-
#ifdef CONFIG_CPU_HAS_SR_RB
#define lookup_exception_vector() \
({ \
@@ -212,17 +176,16 @@ static inline reg_size_t register_align(void *val)
}
int handle_unaligned_access(insn_size_t instruction, struct pt_regs *regs,
- struct mem_access *ma, int);
+ struct mem_access *ma, int, unsigned long address);
static inline void trigger_address_error(void)
{
- if (__in_29bit_mode())
- __asm__ __volatile__ (
- "ldc %0, sr\n\t"
- "mov.l @%1, %0"
- :
- : "r" (0x10000000), "r" (0x80000001)
- );
+ __asm__ __volatile__ (
+ "ldc %0, sr\n\t"
+ "mov.l @%1, %0"
+ :
+ : "r" (0x10000000), "r" (0x80000001)
+ );
}
asmlinkage void do_address_error(struct pt_regs *regs,
diff --git a/arch/sh/include/asm/system_64.h b/arch/sh/include/asm/system_64.h
index 36338646dfc8..8593bc8d1a4e 100644
--- a/arch/sh/include/asm/system_64.h
+++ b/arch/sh/include/asm/system_64.h
@@ -34,9 +34,6 @@ do { \
&next->thread); \
} while (0)
-#define jump_to_uncached() do { } while (0)
-#define back_to_cached() do { } while (0)
-
#define __icbi(addr) __asm__ __volatile__ ( "icbi %0, 0\n\t" : : "r" (addr))
#define __ocbp(addr) __asm__ __volatile__ ( "ocbp %0, 0\n\t" : : "r" (addr))
#define __ocbi(addr) __asm__ __volatile__ ( "ocbi %0, 0\n\t" : : "r" (addr))
diff --git a/arch/sh/include/asm/tlbflush.h b/arch/sh/include/asm/tlbflush.h
index e0ac97221ae6..0df66f0c7284 100644
--- a/arch/sh/include/asm/tlbflush.h
+++ b/arch/sh/include/asm/tlbflush.h
@@ -21,6 +21,8 @@ extern void local_flush_tlb_kernel_range(unsigned long start,
unsigned long end);
extern void local_flush_tlb_one(unsigned long asid, unsigned long page);
+extern void __flush_tlb_global(void);
+
#ifdef CONFIG_SMP
extern void flush_tlb_all(void);
diff --git a/arch/sh/include/asm/uncached.h b/arch/sh/include/asm/uncached.h
index e3419f96626a..6f8816b79cf1 100644
--- a/arch/sh/include/asm/uncached.h
+++ b/arch/sh/include/asm/uncached.h
@@ -4,15 +4,55 @@
#include <linux/bug.h>
#ifdef CONFIG_UNCACHED_MAPPING
+extern unsigned long cached_to_uncached;
+extern unsigned long uncached_size;
extern unsigned long uncached_start, uncached_end;
extern int virt_addr_uncached(unsigned long kaddr);
extern void uncached_init(void);
extern void uncached_resize(unsigned long size);
+
+/*
+ * Jump to uncached area.
+ * When handling TLB or caches, we need to do it from an uncached area.
+ */
+#define jump_to_uncached() \
+do { \
+ unsigned long __dummy; \
+ \
+ __asm__ __volatile__( \
+ "mova 1f, %0\n\t" \
+ "add %1, %0\n\t" \
+ "jmp @%0\n\t" \
+ " nop\n\t" \
+ ".balign 4\n" \
+ "1:" \
+ : "=&z" (__dummy) \
+ : "r" (cached_to_uncached)); \
+} while (0)
+
+/*
+ * Back to cached area.
+ */
+#define back_to_cached() \
+do { \
+ unsigned long __dummy; \
+ ctrl_barrier(); \
+ __asm__ __volatile__( \
+ "mov.l 1f, %0\n\t" \
+ "jmp @%0\n\t" \
+ " nop\n\t" \
+ ".balign 4\n" \
+ "1: .long 2f\n" \
+ "2:" \
+ : "=&r" (__dummy)); \
+} while (0)
#else
#define virt_addr_uncached(kaddr) (0)
#define uncached_init() do { } while (0)
#define uncached_resize(size) BUG()
+#define jump_to_uncached() do { } while (0)
+#define back_to_cached() do { } while (0)
#endif
#endif /* __ASM_SH_UNCACHED_H */
diff --git a/arch/sh/include/asm/unistd_32.h b/arch/sh/include/asm/unistd_32.h
index 0e7f0fc8f086..903cd618eb74 100644
--- a/arch/sh/include/asm/unistd_32.h
+++ b/arch/sh/include/asm/unistd_32.h
@@ -345,12 +345,33 @@
#define __NR_pwritev 334
#define __NR_rt_tgsigqueueinfo 335
#define __NR_perf_event_open 336
+#define __NR_fanotify_init 337
+#define __NR_fanotify_mark 338
+#define __NR_prlimit64 339
-#define NR_syscalls 337
+/* Non-multiplexed socket family */
+#define __NR_socket 340
+#define __NR_bind 341
+#define __NR_connect 342
+#define __NR_listen 343
+#define __NR_accept 344
+#define __NR_getsockname 345
+#define __NR_getpeername 346
+#define __NR_socketpair 347
+#define __NR_send 348
+#define __NR_sendto 349
+#define __NR_recv 350
+#define __NR_recvfrom 351
+#define __NR_shutdown 352
+#define __NR_setsockopt 353
+#define __NR_getsockopt 354
+#define __NR_sendmsg 355
+#define __NR_recvmsg 356
+#define __NR_recvmmsg 357
-#ifdef __KERNEL__
+#define NR_syscalls 358
-#define __IGNORE_recvmmsg
+#ifdef __KERNEL__
#define __ARCH_WANT_IPC_PARSE_VERSION
#define __ARCH_WANT_OLD_READDIR
diff --git a/arch/sh/include/asm/unistd_64.h b/arch/sh/include/asm/unistd_64.h
index 0580c33a1e04..09aa93f9eb70 100644
--- a/arch/sh/include/asm/unistd_64.h
+++ b/arch/sh/include/asm/unistd_64.h
@@ -387,10 +387,13 @@
#define __NR_perf_event_open 364
#define __NR_recvmmsg 365
#define __NR_accept4 366
+#define __NR_fanotify_init 367
+#define __NR_fanotify_mark 368
+#define __NR_prlimit64 369
#ifdef __KERNEL__
-#define NR_syscalls 367
+#define NR_syscalls 370
#define __ARCH_WANT_IPC_PARSE_VERSION
#define __ARCH_WANT_OLD_READDIR
diff --git a/arch/sh/include/cpu-sh3/cpu/mmu_context.h b/arch/sh/include/cpu-sh3/cpu/mmu_context.h
index ab09da73ce77..0c7c735ea82a 100644
--- a/arch/sh/include/cpu-sh3/cpu/mmu_context.h
+++ b/arch/sh/include/cpu-sh3/cpu/mmu_context.h
@@ -16,6 +16,7 @@
#define MMU_TEA 0xFFFFFFFC /* TLB Exception Address */
#define MMUCR 0xFFFFFFE0 /* MMU Control Register */
+#define MMUCR_TI (1 << 2) /* TLB flush bit */
#define MMU_TLB_ADDRESS_ARRAY 0xF2000000
#define MMU_PAGE_ASSOC_BIT 0x80
diff --git a/arch/sh/include/cpu-sh4/cpu/freq.h b/arch/sh/include/cpu-sh4/cpu/freq.h
index e1e90960ee9a..cffd25ed0240 100644
--- a/arch/sh/include/cpu-sh4/cpu/freq.h
+++ b/arch/sh/include/cpu-sh4/cpu/freq.h
@@ -56,7 +56,9 @@
#define FRQCR1 0xffc40004
#define FRQMR1 0xffc40014
#elif defined(CONFIG_CPU_SUBTYPE_SHX3)
-#define FRQCR 0xffc00014
+#define FRQCR0 0xffc00000
+#define FRQCR1 0xffc00004
+#define FRQMR1 0xffc00014
#else
#define FRQCR 0xffc00000
#define FRQCR_PSTBY 0x0200
diff --git a/arch/sh/include/cpu-sh4/cpu/sh7724.h b/arch/sh/include/cpu-sh4/cpu/sh7724.h
index 4c27b68789b3..7eb435999426 100644
--- a/arch/sh/include/cpu-sh4/cpu/sh7724.h
+++ b/arch/sh/include/cpu-sh4/cpu/sh7724.h
@@ -303,4 +303,7 @@ enum {
SHDMA_SLAVE_SDHI1_RX,
};
+extern struct clk sh7724_fsimcka_clk;
+extern struct clk sh7724_fsimckb_clk;
+
#endif /* __ASM_SH7724_H__ */
diff --git a/arch/sh/include/cpu-sh4/cpu/sh7757.h b/arch/sh/include/cpu-sh4/cpu/sh7757.h
index f4d267efad71..15f3de11c55a 100644
--- a/arch/sh/include/cpu-sh4/cpu/sh7757.h
+++ b/arch/sh/include/cpu-sh4/cpu/sh7757.h
@@ -3,241 +3,252 @@
enum {
/* PTA */
- GPIO_PTA7, GPIO_PTA6, GPIO_PTA5, GPIO_PTA4,
- GPIO_PTA3, GPIO_PTA2, GPIO_PTA1, GPIO_PTA0,
+ GPIO_PTA0, GPIO_PTA1, GPIO_PTA2, GPIO_PTA3,
+ GPIO_PTA4, GPIO_PTA5, GPIO_PTA6, GPIO_PTA7,
/* PTB */
- GPIO_PTB7, GPIO_PTB6, GPIO_PTB5, GPIO_PTB4,
- GPIO_PTB3, GPIO_PTB2, GPIO_PTB1, GPIO_PTB0,
+ GPIO_PTB0, GPIO_PTB1, GPIO_PTB2, GPIO_PTB3,
+ GPIO_PTB4, GPIO_PTB5, GPIO_PTB6, GPIO_PTB7,
/* PTC */
- GPIO_PTC7, GPIO_PTC6, GPIO_PTC5, GPIO_PTC4,
- GPIO_PTC3, GPIO_PTC2, GPIO_PTC1, GPIO_PTC0,
+ GPIO_PTC0, GPIO_PTC1, GPIO_PTC2, GPIO_PTC3,
+ GPIO_PTC4, GPIO_PTC5, GPIO_PTC6, GPIO_PTC7,
/* PTD */
- GPIO_PTD7, GPIO_PTD6, GPIO_PTD5, GPIO_PTD4,
- GPIO_PTD3, GPIO_PTD2, GPIO_PTD1, GPIO_PTD0,
+ GPIO_PTD0, GPIO_PTD1, GPIO_PTD2, GPIO_PTD3,
+ GPIO_PTD4, GPIO_PTD5, GPIO_PTD6, GPIO_PTD7,
/* PTE */
- GPIO_PTE7, GPIO_PTE6, GPIO_PTE5, GPIO_PTE4,
- GPIO_PTE3, GPIO_PTE2, GPIO_PTE1, GPIO_PTE0,
+ GPIO_PTE0, GPIO_PTE1, GPIO_PTE2, GPIO_PTE3,
+ GPIO_PTE4, GPIO_PTE5, GPIO_PTE6, GPIO_PTE7,
/* PTF */
- GPIO_PTF7, GPIO_PTF6, GPIO_PTF5, GPIO_PTF4,
- GPIO_PTF3, GPIO_PTF2, GPIO_PTF1, GPIO_PTF0,
+ GPIO_PTF0, GPIO_PTF1, GPIO_PTF2, GPIO_PTF3,
+ GPIO_PTF4, GPIO_PTF5, GPIO_PTF6, GPIO_PTF7,
/* PTG */
- GPIO_PTG7, GPIO_PTG6, GPIO_PTG5, GPIO_PTG4,
- GPIO_PTG3, GPIO_PTG2, GPIO_PTG1, GPIO_PTG0,
+ GPIO_PTG0, GPIO_PTG1, GPIO_PTG2, GPIO_PTG3,
+ GPIO_PTG4, GPIO_PTG5, GPIO_PTG6, GPIO_PTG7,
/* PTH */
- GPIO_PTH7, GPIO_PTH6, GPIO_PTH5, GPIO_PTH4,
- GPIO_PTH3, GPIO_PTH2, GPIO_PTH1, GPIO_PTH0,
+ GPIO_PTH0, GPIO_PTH1, GPIO_PTH2, GPIO_PTH3,
+ GPIO_PTH4, GPIO_PTH5, GPIO_PTH6, GPIO_PTH7,
/* PTI */
- GPIO_PTI7, GPIO_PTI6, GPIO_PTI5, GPIO_PTI4,
- GPIO_PTI3, GPIO_PTI2, GPIO_PTI1, GPIO_PTI0,
+ GPIO_PTI0, GPIO_PTI1, GPIO_PTI2, GPIO_PTI3,
+ GPIO_PTI4, GPIO_PTI5, GPIO_PTI6, GPIO_PTI7,
/* PTJ */
- GPIO_PTJ7, GPIO_PTJ6, GPIO_PTJ5, GPIO_PTJ4,
- GPIO_PTJ3, GPIO_PTJ2, GPIO_PTJ1, GPIO_PTJ0,
+ GPIO_PTJ0, GPIO_PTJ1, GPIO_PTJ2, GPIO_PTJ3,
+ GPIO_PTJ4, GPIO_PTJ5, GPIO_PTJ6, GPIO_PTJ7_RESV,
/* PTK */
- GPIO_PTK7, GPIO_PTK6, GPIO_PTK5, GPIO_PTK4,
- GPIO_PTK3, GPIO_PTK2, GPIO_PTK1, GPIO_PTK0,
+ GPIO_PTK0, GPIO_PTK1, GPIO_PTK2, GPIO_PTK3,
+ GPIO_PTK4, GPIO_PTK5, GPIO_PTK6, GPIO_PTK7,
/* PTL */
- GPIO_PTL7, GPIO_PTL6, GPIO_PTL5, GPIO_PTL4,
- GPIO_PTL3, GPIO_PTL2, GPIO_PTL1, GPIO_PTL0,
+ GPIO_PTL0, GPIO_PTL1, GPIO_PTL2, GPIO_PTL3,
+ GPIO_PTL4, GPIO_PTL5, GPIO_PTL6, GPIO_PTL7_RESV,
/* PTM */
- GPIO_PTM6, GPIO_PTM5, GPIO_PTM4,
- GPIO_PTM3, GPIO_PTM2, GPIO_PTM1, GPIO_PTM0,
+ GPIO_PTM0, GPIO_PTM1, GPIO_PTM2, GPIO_PTM3,
+ GPIO_PTM4, GPIO_PTM5, GPIO_PTM6, GPIO_PTM7,
/* PTN */
- GPIO_PTN7, GPIO_PTN6, GPIO_PTN5, GPIO_PTN4,
- GPIO_PTN3, GPIO_PTN2, GPIO_PTN1, GPIO_PTN0,
+ GPIO_PTN0, GPIO_PTN1, GPIO_PTN2, GPIO_PTN3,
+ GPIO_PTN4, GPIO_PTN5, GPIO_PTN6, GPIO_PTN7_RESV,
/* PTO */
- GPIO_PTO7, GPIO_PTO6, GPIO_PTO5, GPIO_PTO4,
- GPIO_PTO3, GPIO_PTO2, GPIO_PTO1, GPIO_PTO0,
+ GPIO_PTO0, GPIO_PTO1, GPIO_PTO2, GPIO_PTO3,
+ GPIO_PTO4, GPIO_PTO5, GPIO_PTO6, GPIO_PTO7,
/* PTP */
- GPIO_PTP6, GPIO_PTP5, GPIO_PTP4,
- GPIO_PTP3, GPIO_PTP2, GPIO_PTP1, GPIO_PTP0,
+ GPIO_PTP0, GPIO_PTP1, GPIO_PTP2, GPIO_PTP3,
+ GPIO_PTP4, GPIO_PTP5, GPIO_PTP6, GPIO_PTP7,
/* PTQ */
- GPIO_PTQ6, GPIO_PTQ5, GPIO_PTQ4,
- GPIO_PTQ3, GPIO_PTQ2, GPIO_PTQ1, GPIO_PTQ0,
+ GPIO_PTQ0, GPIO_PTQ1, GPIO_PTQ2, GPIO_PTQ3,
+ GPIO_PTQ4, GPIO_PTQ5, GPIO_PTQ6, GPIO_PTQ7_RESV,
/* PTR */
- GPIO_PTR7, GPIO_PTR6, GPIO_PTR5, GPIO_PTR4,
- GPIO_PTR3, GPIO_PTR2, GPIO_PTR1, GPIO_PTR0,
+ GPIO_PTR0, GPIO_PTR1, GPIO_PTR2, GPIO_PTR3,
+ GPIO_PTR4, GPIO_PTR5, GPIO_PTR6, GPIO_PTR7,
/* PTS */
- GPIO_PTS7, GPIO_PTS6, GPIO_PTS5, GPIO_PTS4,
- GPIO_PTS3, GPIO_PTS2, GPIO_PTS1, GPIO_PTS0,
+ GPIO_PTS0, GPIO_PTS1, GPIO_PTS2, GPIO_PTS3,
+ GPIO_PTS4, GPIO_PTS5, GPIO_PTS6, GPIO_PTS7,
/* PTT */
- GPIO_PTT5, GPIO_PTT4,
- GPIO_PTT3, GPIO_PTT2, GPIO_PTT1, GPIO_PTT0,
+ GPIO_PTT0, GPIO_PTT1, GPIO_PTT2, GPIO_PTT3,
+ GPIO_PTT4, GPIO_PTT5, GPIO_PTT6, GPIO_PTT7,
/* PTU */
- GPIO_PTU7, GPIO_PTU6, GPIO_PTU5, GPIO_PTU4,
- GPIO_PTU3, GPIO_PTU2, GPIO_PTU1, GPIO_PTU0,
+ GPIO_PTU0, GPIO_PTU1, GPIO_PTU2, GPIO_PTU3,
+ GPIO_PTU4, GPIO_PTU5, GPIO_PTU6, GPIO_PTU7,
/* PTV */
- GPIO_PTV7, GPIO_PTV6, GPIO_PTV5, GPIO_PTV4,
- GPIO_PTV3, GPIO_PTV2, GPIO_PTV1, GPIO_PTV0,
+ GPIO_PTV0, GPIO_PTV1, GPIO_PTV2, GPIO_PTV3,
+ GPIO_PTV4, GPIO_PTV5, GPIO_PTV6, GPIO_PTV7,
/* PTW */
- GPIO_PTW7, GPIO_PTW6, GPIO_PTW5, GPIO_PTW4,
- GPIO_PTW3, GPIO_PTW2, GPIO_PTW1, GPIO_PTW0,
+ GPIO_PTW0, GPIO_PTW1, GPIO_PTW2, GPIO_PTW3,
+ GPIO_PTW4, GPIO_PTW5, GPIO_PTW6, GPIO_PTW7,
/* PTX */
- GPIO_PTX7, GPIO_PTX6, GPIO_PTX5, GPIO_PTX4,
- GPIO_PTX3, GPIO_PTX2, GPIO_PTX1, GPIO_PTX0,
+ GPIO_PTX0, GPIO_PTX1, GPIO_PTX2, GPIO_PTX3,
+ GPIO_PTX4, GPIO_PTX5, GPIO_PTX6, GPIO_PTX7,
/* PTY */
- GPIO_PTY7, GPIO_PTY6, GPIO_PTY5, GPIO_PTY4,
- GPIO_PTY3, GPIO_PTY2, GPIO_PTY1, GPIO_PTY0,
+ GPIO_PTY0, GPIO_PTY1, GPIO_PTY2, GPIO_PTY3,
+ GPIO_PTY4, GPIO_PTY5, GPIO_PTY6, GPIO_PTY7,
/* PTZ */
- GPIO_PTZ7, GPIO_PTZ6, GPIO_PTZ5, GPIO_PTZ4,
- GPIO_PTZ3, GPIO_PTZ2, GPIO_PTZ1, GPIO_PTZ0,
+ GPIO_PTZ0, GPIO_PTZ1, GPIO_PTZ2, GPIO_PTZ3,
+ GPIO_PTZ4, GPIO_PTZ5, GPIO_PTZ6, GPIO_PTZ7,
- /* PTA (mobule: LBSC, CPG, LPC) */
+ /* PTA (mobule: LBSC, RGMII) */
GPIO_FN_BS, GPIO_FN_RDWR, GPIO_FN_WE1, GPIO_FN_RDY,
- GPIO_FN_MD10, GPIO_FN_MD9, GPIO_FN_MD8,
- GPIO_FN_LGPIO7, GPIO_FN_LGPIO6, GPIO_FN_LGPIO5, GPIO_FN_LGPIO4,
- GPIO_FN_LGPIO3, GPIO_FN_LGPIO2, GPIO_FN_LGPIO1, GPIO_FN_LGPIO0,
-
- /* PTB (mobule: LBSC, EtherC, SIM, LPC) */
- GPIO_FN_D15, GPIO_FN_D14, GPIO_FN_D13, GPIO_FN_D12,
- GPIO_FN_D11, GPIO_FN_D10, GPIO_FN_D9, GPIO_FN_D8,
- GPIO_FN_ET0_MDC, GPIO_FN_ET0_MDIO,
- GPIO_FN_ET1_MDC, GPIO_FN_ET1_MDIO,
- GPIO_FN_SIM_D, GPIO_FN_SIM_CLK, GPIO_FN_SIM_RST,
- GPIO_FN_WPSZ1, GPIO_FN_WPSZ0, GPIO_FN_FWID, GPIO_FN_FLSHSZ,
- GPIO_FN_LPC_SPIEN, GPIO_FN_BASEL,
+ GPIO_FN_ET0_MDC, GPIO_FN_ET0_MDIO,
+ GPIO_FN_ET1_MDC, GPIO_FN_ET1_MDIO,
- /* PTC (mobule: SD) */
- GPIO_FN_SD_WP, GPIO_FN_SD_CD, GPIO_FN_SD_CLK, GPIO_FN_SD_CMD,
- GPIO_FN_SD_D3, GPIO_FN_SD_D2, GPIO_FN_SD_D1, GPIO_FN_SD_D0,
+ /* PTB (mobule: INTC, ONFI, TMU) */
+ GPIO_FN_IRQ15, GPIO_FN_IRQ14, GPIO_FN_IRQ13, GPIO_FN_IRQ12,
+ GPIO_FN_IRQ11, GPIO_FN_IRQ10, GPIO_FN_IRQ9, GPIO_FN_IRQ8,
+ GPIO_FN_ON_NRE, GPIO_FN_ON_NWE, GPIO_FN_ON_NWP, GPIO_FN_ON_NCE0,
+ GPIO_FN_ON_R_B0, GPIO_FN_ON_ALE, GPIO_FN_ON_CLE,
+ GPIO_FN_TCLK,
- /* PTD (mobule: INTC, SPI0, LBSC, CPG, ADC) */
+ /* PTC (mobule: IRQ, PWMU) */
GPIO_FN_IRQ7, GPIO_FN_IRQ6, GPIO_FN_IRQ5, GPIO_FN_IRQ4,
GPIO_FN_IRQ3, GPIO_FN_IRQ2, GPIO_FN_IRQ1, GPIO_FN_IRQ0,
- GPIO_FN_MD6, GPIO_FN_MD5, GPIO_FN_MD3, GPIO_FN_MD2,
- GPIO_FN_MD1, GPIO_FN_MD0, GPIO_FN_ADTRG1, GPIO_FN_ADTRG0,
-
- /* PTE (mobule: EtherC) */
- GPIO_FN_ET0_CRS_DV, GPIO_FN_ET0_TXD1,
- GPIO_FN_ET0_TXD0, GPIO_FN_ET0_TX_EN,
- GPIO_FN_ET0_REF_CLK, GPIO_FN_ET0_RXD1,
- GPIO_FN_ET0_RXD0, GPIO_FN_ET0_RX_ER,
-
- /* PTF (mobule: EtherC) */
- GPIO_FN_ET1_CRS_DV, GPIO_FN_ET1_TXD1,
- GPIO_FN_ET1_TXD0, GPIO_FN_ET1_TX_EN,
- GPIO_FN_ET1_REF_CLK, GPIO_FN_ET1_RXD1,
- GPIO_FN_ET1_RXD0, GPIO_FN_ET1_RX_ER,
-
- /* PTG (mobule: SYSTEM, PWMX, LPC) */
- GPIO_FN_STATUS0, GPIO_FN_STATUS1,
- GPIO_FN_PWX0, GPIO_FN_PWX1, GPIO_FN_PWX2, GPIO_FN_PWX3,
- GPIO_FN_SERIRQ, GPIO_FN_CLKRUN, GPIO_FN_LPCPD, GPIO_FN_LDRQ,
-
- /* PTH (mobule: TMU, SCIF234, SPI1, SPI0) */
- GPIO_FN_TCLK, GPIO_FN_RXD4, GPIO_FN_TXD4,
+ GPIO_FN_PWMU0, GPIO_FN_PWMU1, GPIO_FN_PWMU2, GPIO_FN_PWMU3,
+ GPIO_FN_PWMU4, GPIO_FN_PWMU5,
+
+ /* PTD (mobule: SPI0, DMAC) */
+ GPIO_FN_SP0_MOSI, GPIO_FN_SP0_MISO, GPIO_FN_SP0_SCK,
+ GPIO_FN_SP0_SCK_FB, GPIO_FN_SP0_SS0, GPIO_FN_SP0_SS1,
+ GPIO_FN_SP0_SS2, GPIO_FN_SP0_SS3, GPIO_FN_DREQ0,
+ GPIO_FN_DACK0, GPIO_FN_TEND0,
+
+ /* PTE (mobule: RMII) */
+ GPIO_FN_RMII0_CRS_DV, GPIO_FN_RMII0_TXD1, GPIO_FN_RMII0_TXD0,
+ GPIO_FN_RMII0_TXEN, GPIO_FN_RMII0_REFCLK, GPIO_FN_RMII0_RXD1,
+ GPIO_FN_RMII0_RXD0, GPIO_FN_RMII0_RX_ER,
+
+ /* PTF (mobule: RMII, SerMux) */
+ GPIO_FN_RMII1_CRS_DV, GPIO_FN_RMII1_TXD1, GPIO_FN_RMII1_TXD0,
+ GPIO_FN_RMII1_TXEN, GPIO_FN_RMII1_REFCLK, GPIO_FN_RMII1_RXD1,
+ GPIO_FN_RMII1_RXD0, GPIO_FN_RMII1_RX_ER, GPIO_FN_RAC_RI,
+
+ /* PTG (mobule: system, LBSC, LPC, WDT, LPC, eMMC) */
+ GPIO_FN_BOOTFMS, GPIO_FN_BOOTWP,
+ GPIO_FN_A25, GPIO_FN_A24, GPIO_FN_SERIRQ, GPIO_FN_WDTOVF,
+ GPIO_FN_LPCPD, GPIO_FN_LDRQ, GPIO_FN_MMCCLK, GPIO_FN_MMCCMD,
+
+ /* PTH (mobule: SPI1, LPC, DMAC, ADC) */
GPIO_FN_SP1_MOSI, GPIO_FN_SP1_MISO,
GPIO_FN_SP1_SCK, GPIO_FN_SP1_SCK_FB,
GPIO_FN_SP1_SS0, GPIO_FN_SP1_SS1,
- GPIO_FN_SP0_SS1,
-
- /* PTI (mobule: INTC) */
- GPIO_FN_IRQ15, GPIO_FN_IRQ14, GPIO_FN_IRQ13, GPIO_FN_IRQ12,
- GPIO_FN_IRQ11, GPIO_FN_IRQ10, GPIO_FN_IRQ9, GPIO_FN_IRQ8,
-
- /* PTJ (mobule: SCIF234, SERMUX) */
- GPIO_FN_RXD3, GPIO_FN_TXD3, GPIO_FN_RXD2, GPIO_FN_TXD2,
- GPIO_FN_COM1_TXD, GPIO_FN_COM1_RXD,
- GPIO_FN_COM1_RTS, GPIO_FN_COM1_CTS,
-
- /* PTK (mobule: SERMUX) */
- GPIO_FN_COM2_TXD, GPIO_FN_COM2_RXD,
- GPIO_FN_COM2_RTS, GPIO_FN_COM2_CTS,
- GPIO_FN_COM2_DTR, GPIO_FN_COM2_DSR,
- GPIO_FN_COM2_DCD, GPIO_FN_COM2_RI,
+ GPIO_FN_WP, GPIO_FN_FMS0, GPIO_FN_TEND1, GPIO_FN_DREQ1,
+ GPIO_FN_DACK1, GPIO_FN_ADTRG1, GPIO_FN_ADTRG0,
- /* PTL (mobule: SERMUX) */
- GPIO_FN_RAC_TXD, GPIO_FN_RAC_RXD,
- GPIO_FN_RAC_RTS, GPIO_FN_RAC_CTS,
- GPIO_FN_RAC_DTR, GPIO_FN_RAC_DSR,
- GPIO_FN_RAC_DCD, GPIO_FN_RAC_RI,
+ /* PTI (mobule: LBSC, SDHI) */
+ GPIO_FN_D15, GPIO_FN_D14, GPIO_FN_D13, GPIO_FN_D12,
+ GPIO_FN_D11, GPIO_FN_D10, GPIO_FN_D9, GPIO_FN_D8,
+ GPIO_FN_SD_WP, GPIO_FN_SD_CD, GPIO_FN_SD_CLK, GPIO_FN_SD_CMD,
+ GPIO_FN_SD_D3, GPIO_FN_SD_D2, GPIO_FN_SD_D1, GPIO_FN_SD_D0,
- /* PTM (mobule: IIC, LPC) */
+ /* PTJ (mobule: SCIF234) */
+ GPIO_FN_RTS3, GPIO_FN_CTS3, GPIO_FN_TXD3, GPIO_FN_RXD3,
+ GPIO_FN_RTS4, GPIO_FN_RXD4, GPIO_FN_TXD4,
+
+ /* PTK (mobule: SERMUX, LBSC, SCIF) */
+ GPIO_FN_COM2_TXD, GPIO_FN_COM2_RXD, GPIO_FN_COM2_RTS,
+ GPIO_FN_COM2_CTS, GPIO_FN_COM2_DTR, GPIO_FN_COM2_DSR,
+ GPIO_FN_COM2_DCD, GPIO_FN_CLKOUT,
+ GPIO_FN_SCK2, GPIO_FN_SCK4, GPIO_FN_SCK3,
+
+ /* PTL (mobule: SERMUX, SCIF, LBSC, AUD) */
+ GPIO_FN_RAC_RXD, GPIO_FN_RAC_RTS, GPIO_FN_RAC_CTS,
+ GPIO_FN_RAC_DTR, GPIO_FN_RAC_DSR, GPIO_FN_RAC_DCD,
+ GPIO_FN_RAC_TXD, GPIO_FN_RXD2, GPIO_FN_CS5,
+ GPIO_FN_CS6, GPIO_FN_AUDSYNC, GPIO_FN_AUDCK,
+ GPIO_FN_TXD2,
+
+ /* PTM (mobule: LBSC, IIC) */
+ GPIO_FN_CS4, GPIO_FN_RD, GPIO_FN_WE0, GPIO_FN_CS0,
GPIO_FN_SDA6, GPIO_FN_SCL6, GPIO_FN_SDA7, GPIO_FN_SCL7,
- GPIO_FN_WP, GPIO_FN_FMS0, GPIO_FN_FMS1,
-
- /* PTN (mobule: SCIF234, EVC) */
- GPIO_FN_SCK2, GPIO_FN_RTS4, GPIO_FN_RTS3, GPIO_FN_RTS2,
- GPIO_FN_CTS4, GPIO_FN_CTS3, GPIO_FN_CTS2,
- GPIO_FN_EVENT7, GPIO_FN_EVENT6, GPIO_FN_EVENT5, GPIO_FN_EVENT4,
- GPIO_FN_EVENT3, GPIO_FN_EVENT2, GPIO_FN_EVENT1, GPIO_FN_EVENT0,
- /* PTO (mobule: SGPIO) */
- GPIO_FN_SGPIO0_CLK, GPIO_FN_SGPIO0_LOAD,
- GPIO_FN_SGPIO0_DI, GPIO_FN_SGPIO0_DO,
- GPIO_FN_SGPIO1_CLK, GPIO_FN_SGPIO1_LOAD,
- GPIO_FN_SGPIO1_DI, GPIO_FN_SGPIO1_DO,
+ /* PTN (mobule: USB, JMC, SGPIO, WDT) */
+ GPIO_FN_VBUS_EN, GPIO_FN_VBUS_OC, GPIO_FN_JMCTCK,
+ GPIO_FN_JMCTMS, GPIO_FN_JMCTDO, GPIO_FN_JMCTDI,
+ GPIO_FN_JMCTRST,
+ GPIO_FN_SGPIO1_CLK, GPIO_FN_SGPIO1_LOAD, GPIO_FN_SGPIO1_DI,
+ GPIO_FN_SGPIO1_DO, GPIO_FN_SUB_CLKIN,
- /* PTP (mobule: JMC, SCIF234) */
- GPIO_FN_JMCTCK, GPIO_FN_JMCTMS, GPIO_FN_JMCTDO, GPIO_FN_JMCTDI,
- GPIO_FN_JMCRST, GPIO_FN_SCK4, GPIO_FN_SCK3,
+ /* PTO (mobule: SGPIO, SerMux) */
+ GPIO_FN_SGPIO0_CLK, GPIO_FN_SGPIO0_LOAD, GPIO_FN_SGPIO0_DI,
+ GPIO_FN_SGPIO0_DO, GPIO_FN_SGPIO2_CLK, GPIO_FN_SGPIO2_LOAD,
+ GPIO_FN_SGPIO2_DI, GPIO_FN_SGPIO2_DO, GPIO_FN_COM1_TXD,
+ GPIO_FN_COM1_RXD, GPIO_FN_COM1_RTS, GPIO_FN_COM1_CTS,
/* PTQ (mobule: LPC) */
GPIO_FN_LAD3, GPIO_FN_LAD2, GPIO_FN_LAD1, GPIO_FN_LAD0,
GPIO_FN_LFRAME, GPIO_FN_LRESET, GPIO_FN_LCLK,
/* PTR (mobule: GRA, IIC) */
- GPIO_FN_DDC3, GPIO_FN_DDC2,
- GPIO_FN_SDA8, GPIO_FN_SCL8, GPIO_FN_SDA2, GPIO_FN_SCL2,
+ GPIO_FN_DDC3, GPIO_FN_DDC2, GPIO_FN_SDA2, GPIO_FN_SCL2,
GPIO_FN_SDA1, GPIO_FN_SCL1, GPIO_FN_SDA0, GPIO_FN_SCL0,
+ GPIO_FN_SDA8, GPIO_FN_SCL8,
/* PTS (mobule: GRA, IIC) */
- GPIO_FN_DDC1, GPIO_FN_DDC0,
- GPIO_FN_SDA9, GPIO_FN_SCL9, GPIO_FN_SDA5, GPIO_FN_SCL5,
+ GPIO_FN_DDC1, GPIO_FN_DDC0, GPIO_FN_SDA5, GPIO_FN_SCL5,
GPIO_FN_SDA4, GPIO_FN_SCL4, GPIO_FN_SDA3, GPIO_FN_SCL3,
+ GPIO_FN_SDA9, GPIO_FN_SCL9,
- /* PTT (mobule: SYSTEM, PWMX) */
- GPIO_FN_AUDSYNC, GPIO_FN_AUDCK,
- GPIO_FN_AUDATA3, GPIO_FN_AUDATA2,
- GPIO_FN_AUDATA1, GPIO_FN_AUDATA0,
- GPIO_FN_PWX7, GPIO_FN_PWX6, GPIO_FN_PWX5, GPIO_FN_PWX4,
+ /* PTT (mobule: PWMX, AUD) */
+ GPIO_FN_PWMX7, GPIO_FN_PWMX6, GPIO_FN_PWMX5, GPIO_FN_PWMX4,
+ GPIO_FN_PWMX3, GPIO_FN_PWMX2, GPIO_FN_PWMX1, GPIO_FN_PWMX0,
+ GPIO_FN_AUDATA3, GPIO_FN_AUDATA2, GPIO_FN_AUDATA1,
+ GPIO_FN_AUDATA0, GPIO_FN_STATUS1, GPIO_FN_STATUS0,
- /* PTU (mobule: LBSC, DMAC) */
- GPIO_FN_CS6, GPIO_FN_CS5, GPIO_FN_CS4, GPIO_FN_CS0,
- GPIO_FN_RD, GPIO_FN_WE0, GPIO_FN_A25, GPIO_FN_A24,
- GPIO_FN_DREQ0, GPIO_FN_DACK0,
+ /* PTU (mobule: LPC, APM) */
+ GPIO_FN_LGPIO7, GPIO_FN_LGPIO6, GPIO_FN_LGPIO5, GPIO_FN_LGPIO4,
+ GPIO_FN_LGPIO3, GPIO_FN_LGPIO2, GPIO_FN_LGPIO1, GPIO_FN_LGPIO0,
+ GPIO_FN_APMONCTL_O, GPIO_FN_APMPWBTOUT_O, GPIO_FN_APMSCI_O,
+ GPIO_FN_APMVDDON, GPIO_FN_APMSLPBTN, GPIO_FN_APMPWRBTN,
+ GPIO_FN_APMS5N, GPIO_FN_APMS3N,
- /* PTV (mobule: LBSC, DMAC) */
+ /* PTV (mobule: LBSC, SerMux, R-SPI, EVC, GRA) */
GPIO_FN_A23, GPIO_FN_A22, GPIO_FN_A21, GPIO_FN_A20,
GPIO_FN_A19, GPIO_FN_A18, GPIO_FN_A17, GPIO_FN_A16,
- GPIO_FN_TEND0, GPIO_FN_DREQ1, GPIO_FN_DACK1, GPIO_FN_TEND1,
+ GPIO_FN_COM2_RI, GPIO_FN_R_SPI_MOSI, GPIO_FN_R_SPI_MISO,
+ GPIO_FN_R_SPI_RSPCK, GPIO_FN_R_SPI_SSL0, GPIO_FN_R_SPI_SSL1,
+ GPIO_FN_EVENT7, GPIO_FN_EVENT6, GPIO_FN_VBIOS_DI,
+ GPIO_FN_VBIOS_DO, GPIO_FN_VBIOS_CLK, GPIO_FN_VBIOS_CS,
- /* PTW (mobule: LBSC) */
+ /* PTW (mobule: LBSC, EVC, SCIF) */
GPIO_FN_A15, GPIO_FN_A14, GPIO_FN_A13, GPIO_FN_A12,
GPIO_FN_A11, GPIO_FN_A10, GPIO_FN_A9, GPIO_FN_A8,
+ GPIO_FN_EVENT5, GPIO_FN_EVENT4, GPIO_FN_EVENT3, GPIO_FN_EVENT2,
+ GPIO_FN_EVENT1, GPIO_FN_EVENT0, GPIO_FN_CTS4, GPIO_FN_CTS2,
- /* PTX (mobule: LBSC) */
+ /* PTX (mobule: LBSC, SCIF, SIM) */
GPIO_FN_A7, GPIO_FN_A6, GPIO_FN_A5, GPIO_FN_A4,
GPIO_FN_A3, GPIO_FN_A2, GPIO_FN_A1, GPIO_FN_A0,
+ GPIO_FN_RTS2, GPIO_FN_SIM_D, GPIO_FN_SIM_CLK, GPIO_FN_SIM_RST,
/* PTY (mobule: LBSC) */
GPIO_FN_D7, GPIO_FN_D6, GPIO_FN_D5, GPIO_FN_D4,
GPIO_FN_D3, GPIO_FN_D2, GPIO_FN_D1, GPIO_FN_D0,
+
+ /* PTZ (mobule: eMMC, ONFI) */
+ GPIO_FN_MMCDAT7, GPIO_FN_MMCDAT6, GPIO_FN_MMCDAT5,
+ GPIO_FN_MMCDAT4, GPIO_FN_MMCDAT3, GPIO_FN_MMCDAT2,
+ GPIO_FN_MMCDAT1, GPIO_FN_MMCDAT0,
+ GPIO_FN_ON_DQ7, GPIO_FN_ON_DQ6, GPIO_FN_ON_DQ5, GPIO_FN_ON_DQ4,
+ GPIO_FN_ON_DQ3, GPIO_FN_ON_DQ2, GPIO_FN_ON_DQ1, GPIO_FN_ON_DQ0,
};
#endif /* __ASM_SH7757_H__ */
diff --git a/arch/sh/include/cpu-sh4/cpu/shx3.h b/arch/sh/include/cpu-sh4/cpu/shx3.h
new file mode 100644
index 000000000000..68d9080a8da9
--- /dev/null
+++ b/arch/sh/include/cpu-sh4/cpu/shx3.h
@@ -0,0 +1,64 @@
+#ifndef __CPU_SHX3_H
+#define __CPU_SHX3_H
+
+enum {
+ /* PA */
+ GPIO_PA7, GPIO_PA6, GPIO_PA5, GPIO_PA4,
+ GPIO_PA3, GPIO_PA2, GPIO_PA1, GPIO_PA0,
+
+ /* PB */
+ GPIO_PB7, GPIO_PB6, GPIO_PB5, GPIO_PB4,
+ GPIO_PB3, GPIO_PB2, GPIO_PB1, GPIO_PB0,
+
+ /* PC */
+ GPIO_PC7, GPIO_PC6, GPIO_PC5, GPIO_PC4,
+ GPIO_PC3, GPIO_PC2, GPIO_PC1, GPIO_PC0,
+
+ /* PD */
+ GPIO_PD7, GPIO_PD6, GPIO_PD5, GPIO_PD4,
+ GPIO_PD3, GPIO_PD2, GPIO_PD1, GPIO_PD0,
+
+ /* PE */
+ GPIO_PE7, GPIO_PE6, GPIO_PE5, GPIO_PE4,
+ GPIO_PE3, GPIO_PE2, GPIO_PE1, GPIO_PE0,
+
+ /* PF */
+ GPIO_PF7, GPIO_PF6, GPIO_PF5, GPIO_PF4,
+ GPIO_PF3, GPIO_PF2, GPIO_PF1, GPIO_PF0,
+
+ /* PG */
+ GPIO_PG7, GPIO_PG6, GPIO_PG5, GPIO_PG4,
+ GPIO_PG3, GPIO_PG2, GPIO_PG1, GPIO_PG0,
+
+ /* PH */
+ GPIO_PH5, GPIO_PH4,
+ GPIO_PH3, GPIO_PH2, GPIO_PH1, GPIO_PH0,
+
+ /* SCIF */
+ GPIO_FN_SCK3, GPIO_FN_TXD3, GPIO_FN_RXD3,
+ GPIO_FN_SCK2, GPIO_FN_TXD2, GPIO_FN_RXD2,
+ GPIO_FN_SCK1, GPIO_FN_TXD1, GPIO_FN_RXD1,
+ GPIO_FN_SCK0, GPIO_FN_TXD0, GPIO_FN_RXD0,
+
+ /* LBSC */
+ GPIO_FN_D31, GPIO_FN_D30, GPIO_FN_D29, GPIO_FN_D28,
+ GPIO_FN_D27, GPIO_FN_D26, GPIO_FN_D25, GPIO_FN_D24,
+ GPIO_FN_D23, GPIO_FN_D22, GPIO_FN_D21, GPIO_FN_D20,
+ GPIO_FN_D19, GPIO_FN_D18, GPIO_FN_D17, GPIO_FN_D16,
+ GPIO_FN_WE3, GPIO_FN_WE2, GPIO_FN_CS6, GPIO_FN_CS5,
+ GPIO_FN_CS4, GPIO_FN_CLKOUTENB, GPIO_FN_BREQ,
+ GPIO_FN_IOIS16, GPIO_FN_CE2B, GPIO_FN_CE2A, GPIO_FN_BACK,
+
+ /* DMAC */
+ GPIO_FN_DACK0, GPIO_FN_DREQ0, GPIO_FN_DRAK0,
+ GPIO_FN_DACK1, GPIO_FN_DREQ1, GPIO_FN_DRAK1,
+ GPIO_FN_DACK2, GPIO_FN_DREQ2, GPIO_FN_DRAK2,
+ GPIO_FN_DACK3, GPIO_FN_DREQ3, GPIO_FN_DRAK3,
+
+ /* INTC */
+ GPIO_FN_IRQ3, GPIO_FN_IRQ2, GPIO_FN_IRQ1, GPIO_FN_IRQ0,
+ GPIO_FN_IRL3, GPIO_FN_IRL2, GPIO_FN_IRL1, GPIO_FN_IRL0,
+ GPIO_FN_IRQOUT, GPIO_FN_STATUS1, GPIO_FN_STATUS0,
+};
+
+#endif /* __CPU_SHX3_H */
diff --git a/arch/sh/include/mach-common/mach/edosk7705.h b/arch/sh/include/mach-common/mach/edosk7705.h
deleted file mode 100644
index efc43b323466..000000000000
--- a/arch/sh/include/mach-common/mach/edosk7705.h
+++ /dev/null
@@ -1,7 +0,0 @@
-#ifndef __ASM_SH_EDOSK7705_H
-#define __ASM_SH_EDOSK7705_H
-
-#define __IO_PREFIX sh_edosk7705
-#include <asm/io_generic.h>
-
-#endif /* __ASM_SH_EDOSK7705_H */
diff --git a/arch/sh/include/mach-common/mach/microdev.h b/arch/sh/include/mach-common/mach/microdev.h
index 1aed15856e11..dcb05fa8c164 100644
--- a/arch/sh/include/mach-common/mach/microdev.h
+++ b/arch/sh/include/mach-common/mach/microdev.h
@@ -68,13 +68,4 @@ extern void microdev_print_fpga_intc_status(void);
#define __IO_PREFIX microdev
#include <asm/io_generic.h>
-#if defined(CONFIG_PCI)
-unsigned char microdev_pci_inb(unsigned long port);
-unsigned short microdev_pci_inw(unsigned long port);
-unsigned long microdev_pci_inl(unsigned long port);
-void microdev_pci_outb(unsigned char data, unsigned long port);
-void microdev_pci_outw(unsigned short data, unsigned long port);
-void microdev_pci_outl(unsigned long data, unsigned long port);
-#endif
-
#endif /* __ASM_SH_MICRODEV_H */
diff --git a/arch/sh/include/mach-common/mach/snapgear.h b/arch/sh/include/mach-common/mach/secureedge5410.h
index 042d95f51c4d..3653b9a4bacc 100644
--- a/arch/sh/include/mach-common/mach/snapgear.h
+++ b/arch/sh/include/mach-common/mach/secureedge5410.h
@@ -12,30 +12,9 @@
#ifndef _ASM_SH_IO_SNAPGEAR_H
#define _ASM_SH_IO_SNAPGEAR_H
-#if defined(CONFIG_CPU_SH4)
-/*
- * The external interrupt lines, these take up ints 0 - 15 inclusive
- * depending on the priority for the interrupt. In fact the priority
- * is the interrupt :-)
- */
-
-#define IRL0_IRQ 2
-#define IRL0_PRIORITY 13
-
-#define IRL1_IRQ 5
-#define IRL1_PRIORITY 10
-
-#define IRL2_IRQ 8
-#define IRL2_PRIORITY 7
-
-#define IRL3_IRQ 11
-#define IRL3_PRIORITY 4
-#endif
-
#define __IO_PREFIX snapgear
#include <asm/io_generic.h>
-#ifdef CONFIG_SH_SECUREEDGE5410
/*
* We need to remember what was written to the ioport as some bits
* are shared with other functions and you cannot read back what was
@@ -66,6 +45,5 @@ extern unsigned short secureedge5410_ioport;
((secureedge5410_ioport & ~(mask)) | ((val) & (mask)))))
#define SECUREEDGE_READ_IOPORT() \
((*SECUREEDGE_IOPORT_ADDR&0x0817) | (secureedge5410_ioport&~0x0817))
-#endif
#endif /* _ASM_SH_IO_SNAPGEAR_H */
diff --git a/arch/sh/include/mach-common/mach/sh2007.h b/arch/sh/include/mach-common/mach/sh2007.h
new file mode 100644
index 000000000000..48180b9aa03d
--- /dev/null
+++ b/arch/sh/include/mach-common/mach/sh2007.h
@@ -0,0 +1,117 @@
+#ifndef __MACH_SH2007_H
+#define __MACH_SH2007_H
+
+#define CS5BCR 0xff802050
+#define CS5WCR 0xff802058
+#define CS5PCR 0xff802070
+
+#define BUS_SZ8 1
+#define BUS_SZ16 2
+#define BUS_SZ32 3
+
+#define PCMCIA_IODYN 1
+#define PCMCIA_ATA 0
+#define PCMCIA_IO8 2
+#define PCMCIA_IO16 3
+#define PCMCIA_COMM8 4
+#define PCMCIA_COMM16 5
+#define PCMCIA_ATTR8 6
+#define PCMCIA_ATTR16 7
+
+#define TYPE_SRAM 0
+#define TYPE_PCMCIA 4
+
+/* write-read/write-write delay (0-7:0,1,2,3,4,5,6,7) */
+#define IWW5 0
+#define IWW6 3
+/* different area, read-write delay (0-7:0,1,2,3,4,5,6,7) */
+#define IWRWD5 2
+#define IWRWD6 2
+/* same area, read-write delay (0-7:0,1,2,3,4,5,6,7) */
+#define IWRWS5 2
+#define IWRWS6 2
+/* different area, read-read delay (0-7:0,1,2,3,4,5,6,7) */
+#define IWRRD5 2
+#define IWRRD6 2
+/* same area, read-read delay (0-7:0,1,2,3,4,5,6,7) */
+#define IWRRS5 0
+#define IWRRS6 2
+/* burst count (0-3:4,8,16,32) */
+#define BST5 0
+#define BST6 0
+/* bus size */
+#define SZ5 BUS_SZ16
+#define SZ6 BUS_SZ16
+/* RD hold for SRAM (0-1:0,1) */
+#define RDSPL5 0
+#define RDSPL6 0
+/* Burst pitch (0-7:0,1,2,3,4,5,6,7) */
+#define BW5 0
+#define BW6 0
+/* Multiplex (0-1:0,1) */
+#define MPX5 0
+#define MPX6 0
+/* device type */
+#define TYPE5 TYPE_PCMCIA
+#define TYPE6 TYPE_PCMCIA
+/* address setup before assert CSn for SRAM (0-7:0,1,2,3,4,5,6,7) */
+#define ADS5 0
+#define ADS6 0
+/* address hold after negate CSn for SRAM (0-7:0,1,2,3,4,5,6,7) */
+#define ADH5 0
+#define ADH6 0
+/* CSn assert to RD assert delay for SRAM (0-7:0,1,2,3,4,5,6,7) */
+#define RDS5 0
+#define RDS6 0
+/* RD negate to CSn negate delay for SRAM (0-7:0,1,2,3,4,5,6,7) */
+#define RDH5 0
+#define RDH6 0
+/* CSn assert to WE assert delay for SRAM (0-7:0,1,2,3,4,5,6,7) */
+#define WTS5 0
+#define WTS6 0
+/* WE negate to CSn negate delay for SRAM (0-7:0,1,2,3,4,5,6,7) */
+#define WTH5 0
+#define WTH6 0
+/* BS hold (0-1:1,2) */
+#define BSH5 0
+#define BSH6 0
+/* wait cycle (0-15:0,1,2,3,4,5,6,7,8,9,11,13,15,17,21,25) */
+#define IW5 6 /* 60ns PIO mode 4 */
+#define IW6 15 /* 250ns */
+
+#define SAA5 PCMCIA_IODYN /* IDE area b4000000-b5ffffff */
+#define SAB5 PCMCIA_IODYN /* CF area b6000000-b7ffffff */
+#define PCWA5 0 /* additional wait A (0-3:0,15,30,50) */
+#define PCWB5 0 /* additional wait B (0-3:0,15,30,50) */
+/* wait B (0-15:0,1,2,3,4,5,6,7,8,9,11,13,15,17,21,25) */
+#define PCIW5 12
+/* Address->OE/WE assert delay A (0-7:0,1,2,3,6,9,12,15) */
+#define TEDA5 2
+/* Address->OE/WE assert delay B (0-7:0,1,2,3,6,9,12,15) */
+#define TEDB5 4
+/* OE/WE negate->Address delay A (0-7:0,1,2,3,6,9,12,15) */
+#define TEHA5 2
+/* OE/WE negate->Address delay B (0-7:0,1,2,3,6,9,12,15) */
+#define TEHB5 3
+
+#define CS5BCR_D ((IWW5<<28)|(IWRWD5<<24)|(IWRWS5<<20)| \
+ (IWRRD5<<16)|(IWRRS5<<12)|(BST5<<10)| \
+ (SZ5<<8)|(RDSPL5<<7)|(BW5<<4)|(MPX5<<3)|TYPE5)
+#define CS5WCR_D ((ADS5<<28)|(ADH5<<24)|(RDS5<<20)| \
+ (RDH5<<16)|(WTS5<<12)|(WTH5<<8)|(BSH5<<4)|IW5)
+#define CS5PCR_D ((SAA5<<28)|(SAB5<<24)|(PCWA5<<22)| \
+ (PCWB5<<20)|(PCIW5<<16)|(TEDA5<<12)| \
+ (TEDB5<<8)|(TEHA5<<4)|TEHB5)
+
+#define SMC0_BASE 0xb0800000 /* eth0 */
+#define SMC1_BASE 0xb0900000 /* eth1 */
+#define CF_BASE 0xb6100000 /* Compact Flash (I/O area) */
+#define IDE_BASE 0xb4000000 /* IDE */
+#define PC104_IO_BASE 0xb8000000
+#define PC104_MEM_BASE 0xba000000
+#define SMC_IO_SIZE 0x100
+
+#define CF_OFFSET 0x1f0
+#define IDE_OFFSET 0x170
+
+#endif /* __MACH_SH2007_H */
diff --git a/arch/sh/include/mach-common/mach/systemh7751.h b/arch/sh/include/mach-common/mach/systemh7751.h
deleted file mode 100644
index 4161122c84ef..000000000000
--- a/arch/sh/include/mach-common/mach/systemh7751.h
+++ /dev/null
@@ -1,71 +0,0 @@
-#ifndef __ASM_SH_SYSTEMH_7751SYSTEMH_H
-#define __ASM_SH_SYSTEMH_7751SYSTEMH_H
-
-/*
- * linux/include/asm-sh/systemh/7751systemh.h
- *
- * Copyright (C) 2000 Kazumoto Kojima
- *
- * Hitachi SystemH support
-
- * Modified for 7751 SystemH by
- * Jonathan Short, 2002.
- */
-
-/* Box specific addresses. */
-
-#define PA_ROM 0x00000000 /* EPROM */
-#define PA_ROM_SIZE 0x00400000 /* EPROM size 4M byte */
-#define PA_FROM 0x01000000 /* EPROM */
-#define PA_FROM_SIZE 0x00400000 /* EPROM size 4M byte */
-#define PA_EXT1 0x04000000
-#define PA_EXT1_SIZE 0x04000000
-#define PA_EXT2 0x08000000
-#define PA_EXT2_SIZE 0x04000000
-#define PA_SDRAM 0x0c000000
-#define PA_SDRAM_SIZE 0x04000000
-
-#define PA_EXT4 0x12000000
-#define PA_EXT4_SIZE 0x02000000
-#define PA_EXT5 0x14000000
-#define PA_EXT5_SIZE 0x04000000
-#define PA_PCIC 0x18000000 /* MR-SHPC-01 PCMCIA */
-
-#define PA_DIPSW0 0xb9000000 /* Dip switch 5,6 */
-#define PA_DIPSW1 0xb9000002 /* Dip switch 7,8 */
-#define PA_LED 0xba000000 /* LED */
-#define PA_BCR 0xbb000000 /* FPGA on the MS7751SE01 */
-
-#define PA_MRSHPC 0xb83fffe0 /* MR-SHPC-01 PCMCIA controller */
-#define PA_MRSHPC_MW1 0xb8400000 /* MR-SHPC-01 memory window base */
-#define PA_MRSHPC_MW2 0xb8500000 /* MR-SHPC-01 attribute window base */
-#define PA_MRSHPC_IO 0xb8600000 /* MR-SHPC-01 I/O window base */
-#define MRSHPC_MODE (PA_MRSHPC + 4)
-#define MRSHPC_OPTION (PA_MRSHPC + 6)
-#define MRSHPC_CSR (PA_MRSHPC + 8)
-#define MRSHPC_ISR (PA_MRSHPC + 10)
-#define MRSHPC_ICR (PA_MRSHPC + 12)
-#define MRSHPC_CPWCR (PA_MRSHPC + 14)
-#define MRSHPC_MW0CR1 (PA_MRSHPC + 16)
-#define MRSHPC_MW1CR1 (PA_MRSHPC + 18)
-#define MRSHPC_IOWCR1 (PA_MRSHPC + 20)
-#define MRSHPC_MW0CR2 (PA_MRSHPC + 22)
-#define MRSHPC_MW1CR2 (PA_MRSHPC + 24)
-#define MRSHPC_IOWCR2 (PA_MRSHPC + 26)
-#define MRSHPC_CDCR (PA_MRSHPC + 28)
-#define MRSHPC_PCIC_INFO (PA_MRSHPC + 30)
-
-#define BCR_ILCRA (PA_BCR + 0)
-#define BCR_ILCRB (PA_BCR + 2)
-#define BCR_ILCRC (PA_BCR + 4)
-#define BCR_ILCRD (PA_BCR + 6)
-#define BCR_ILCRE (PA_BCR + 8)
-#define BCR_ILCRF (PA_BCR + 10)
-#define BCR_ILCRG (PA_BCR + 12)
-
-#define IRQ_79C973 13
-
-#define __IO_PREFIX sh7751systemh
-#include <asm/io_generic.h>
-
-#endif /* __ASM_SH_SYSTEMH_7751SYSTEMH_H */
diff --git a/arch/sh/include/mach-sdk7786/mach/fpga.h b/arch/sh/include/mach-sdk7786/mach/fpga.h
index 416b621d94d1..40f0c2d3690c 100644
--- a/arch/sh/include/mach-sdk7786/mach/fpga.h
+++ b/arch/sh/include/mach-sdk7786/mach/fpga.h
@@ -31,11 +31,35 @@
#define EXTASR 0x110
#define SPCAR 0x120
#define INTMSR 0x130
+
#define PCIECR 0x140
+#define PCIECR_PCIEMUX1 BIT(15)
+#define PCIECR_PCIEMUX0 BIT(14)
+#define PCIECR_PRST4 BIT(12) /* slot 4 card present */
+#define PCIECR_PRST3 BIT(11) /* slot 3 card present */
+#define PCIECR_PRST2 BIT(10) /* slot 2 card present */
+#define PCIECR_PRST1 BIT(9) /* slot 1 card present */
+#define PCIECR_CLKEN BIT(4) /* oscillator enable */
+
#define FAER 0x150
#define USRGPIR 0x160
+
/* 0x170 reserved */
-#define LCLASR 0x180
+
+#define LCLASR 0x180
+#define LCLASR_FRAMEN BIT(15)
+
+#define LCLASR_FPGA_SEL_SHIFT 12
+#define LCLASR_NAND_SEL_SHIFT 8
+#define LCLASR_NORB_SEL_SHIFT 4
+#define LCLASR_NORA_SEL_SHIFT 0
+
+#define LCLASR_AREA_MASK 0x7
+
+#define LCLASR_FPGA_SEL_MASK (LCLASR_AREA_MASK << LCLASR_FPGA_SEL_SHIFT)
+#define LCLASR_NAND_SEL_MASK (LCLASR_AREA_MASK << LCLASR_NAND_SEL_SHIFT)
+#define LCLASR_NORB_SEL_MASK (LCLASR_AREA_MASK << LCLASR_NORB_SEL_SHIFT)
+#define LCLASR_NORA_SEL_MASK (LCLASR_AREA_MASK << LCLASR_NORA_SEL_SHIFT)
#define SBCR 0x190
#define SCBR_I2CMEN BIT(0) /* FPGA I2C master enable */
diff --git a/arch/sh/include/mach-x3proto/mach/hardware.h b/arch/sh/include/mach-x3proto/mach/hardware.h
new file mode 100644
index 000000000000..52bca57bfeb6
--- /dev/null
+++ b/arch/sh/include/mach-x3proto/mach/hardware.h
@@ -0,0 +1,12 @@
+#ifndef __MACH_X3PROTO_HARDWARE_H
+#define __MACH_X3PROTO_HARDWARE_H
+
+struct gpio_chip;
+
+/* arch/sh/boards/mach-x3proto/gpio.c */
+int x3proto_gpio_setup(void);
+extern struct gpio_chip x3proto_gpio_chip;
+
+#define NR_BASEBOARD_GPIOS 16
+
+#endif /* __MACH_X3PROTO_HARDWARE_H */
diff --git a/arch/sh/include/asm/ilsel.h b/arch/sh/include/mach-x3proto/mach/ilsel.h
index e3d304b280f6..e3d304b280f6 100644
--- a/arch/sh/include/asm/ilsel.h
+++ b/arch/sh/include/mach-x3proto/mach/ilsel.h
diff --git a/arch/sh/kernel/Makefile b/arch/sh/kernel/Makefile
index e25f3c69525d..8eed6a485446 100644
--- a/arch/sh/kernel/Makefile
+++ b/arch/sh/kernel/Makefile
@@ -12,9 +12,9 @@ endif
CFLAGS_REMOVE_return_address.o = -pg
obj-y := clkdev.o debugtraps.o dma-nommu.o dumpstack.o \
- idle.o io.o irq.o \
- irq_$(BITS).o machvec.o nmi_debug.o process.o \
- process_$(BITS).o ptrace_$(BITS).o \
+ idle.o io.o irq.o irq_$(BITS).o kdebugfs.o \
+ machvec.o nmi_debug.o process.o \
+ process_$(BITS).o ptrace.o ptrace_$(BITS).o \
reboot.o return_address.o \
setup.o signal_$(BITS).o sys_sh.o sys_sh$(BITS).o \
syscalls_$(BITS).o time.o topology.o traps.o \
@@ -44,4 +44,4 @@ obj-$(CONFIG_HAS_IOPORT) += io_generic.o
obj-$(CONFIG_HAVE_HW_BREAKPOINT) += hw_breakpoint.o
obj-$(CONFIG_GENERIC_CLOCKEVENTS_BROADCAST) += localtimer.o
-EXTRA_CFLAGS += -Werror
+ccflags-y := -Werror
diff --git a/arch/sh/kernel/clkdev.c b/arch/sh/kernel/clkdev.c
index befc255830a4..1f800ef4a735 100644
--- a/arch/sh/kernel/clkdev.c
+++ b/arch/sh/kernel/clkdev.c
@@ -161,9 +161,11 @@ EXPORT_SYMBOL(clk_add_alias);
*/
void clkdev_drop(struct clk_lookup *cl)
{
+ struct clk_lookup_alloc *cla = container_of(cl, struct clk_lookup_alloc, cl);
+
mutex_lock(&clocks_mutex);
list_del(&cl->node);
mutex_unlock(&clocks_mutex);
- kfree(cl);
+ kfree(cla);
}
EXPORT_SYMBOL(clkdev_drop);
diff --git a/arch/sh/kernel/cpu/init.c b/arch/sh/kernel/cpu/init.c
index 97661061ff20..fac742e514ee 100644
--- a/arch/sh/kernel/cpu/init.c
+++ b/arch/sh/kernel/cpu/init.c
@@ -340,6 +340,8 @@ asmlinkage void __cpuinit cpu_init(void)
*/
current_cpu_data.asid_cache = NO_CONTEXT;
+ current_cpu_data.phys_bits = __in_29bit_mode() ? 29 : 32;
+
speculative_execution_init();
expmask_init();
diff --git a/arch/sh/kernel/cpu/irq/imask.c b/arch/sh/kernel/cpu/irq/imask.c
index a351ed84eec5..32c825c9488e 100644
--- a/arch/sh/kernel/cpu/irq/imask.c
+++ b/arch/sh/kernel/cpu/irq/imask.c
@@ -51,16 +51,20 @@ static inline void set_interrupt_registers(int ip)
: "t");
}
-static void mask_imask_irq(unsigned int irq)
+static void mask_imask_irq(struct irq_data *data)
{
+ unsigned int irq = data->irq;
+
clear_bit(irq, imask_mask);
if (interrupt_priority < IMASK_PRIORITY - irq)
interrupt_priority = IMASK_PRIORITY - irq;
set_interrupt_registers(interrupt_priority);
}
-static void unmask_imask_irq(unsigned int irq)
+static void unmask_imask_irq(struct irq_data *data)
{
+ unsigned int irq = data->irq;
+
set_bit(irq, imask_mask);
interrupt_priority = IMASK_PRIORITY -
find_first_zero_bit(imask_mask, IMASK_PRIORITY);
@@ -69,9 +73,9 @@ static void unmask_imask_irq(unsigned int irq)
static struct irq_chip imask_irq_chip = {
.name = "SR.IMASK",
- .mask = mask_imask_irq,
- .unmask = unmask_imask_irq,
- .mask_ack = mask_imask_irq,
+ .irq_mask = mask_imask_irq,
+ .irq_unmask = unmask_imask_irq,
+ .irq_mask_ack = mask_imask_irq,
};
void make_imask_irq(unsigned int irq)
diff --git a/arch/sh/kernel/cpu/irq/intc-sh5.c b/arch/sh/kernel/cpu/irq/intc-sh5.c
index 96a239583948..5af48f8357e5 100644
--- a/arch/sh/kernel/cpu/irq/intc-sh5.c
+++ b/arch/sh/kernel/cpu/irq/intc-sh5.c
@@ -76,39 +76,11 @@ int intc_evt_to_irq[(0xE20/0x20)+1] = {
};
static unsigned long intc_virt;
-
-static unsigned int startup_intc_irq(unsigned int irq);
-static void shutdown_intc_irq(unsigned int irq);
-static void enable_intc_irq(unsigned int irq);
-static void disable_intc_irq(unsigned int irq);
-static void mask_and_ack_intc(unsigned int);
-static void end_intc_irq(unsigned int irq);
-
-static struct irq_chip intc_irq_type = {
- .name = "INTC",
- .startup = startup_intc_irq,
- .shutdown = shutdown_intc_irq,
- .enable = enable_intc_irq,
- .disable = disable_intc_irq,
- .ack = mask_and_ack_intc,
- .end = end_intc_irq
-};
-
static int irlm; /* IRL mode */
-static unsigned int startup_intc_irq(unsigned int irq)
-{
- enable_intc_irq(irq);
- return 0; /* never anything pending */
-}
-
-static void shutdown_intc_irq(unsigned int irq)
-{
- disable_intc_irq(irq);
-}
-
-static void enable_intc_irq(unsigned int irq)
+static void enable_intc_irq(struct irq_data *data)
{
+ unsigned int irq = data->irq;
unsigned long reg;
unsigned long bitmask;
@@ -126,8 +98,9 @@ static void enable_intc_irq(unsigned int irq)
__raw_writel(bitmask, reg);
}
-static void disable_intc_irq(unsigned int irq)
+static void disable_intc_irq(struct irq_data *data)
{
+ unsigned int irq = data->irq;
unsigned long reg;
unsigned long bitmask;
@@ -142,15 +115,11 @@ static void disable_intc_irq(unsigned int irq)
__raw_writel(bitmask, reg);
}
-static void mask_and_ack_intc(unsigned int irq)
-{
- disable_intc_irq(irq);
-}
-
-static void end_intc_irq(unsigned int irq)
-{
- enable_intc_irq(irq);
-}
+static struct irq_chip intc_irq_type = {
+ .name = "INTC",
+ .irq_enable = enable_intc_irq,
+ .irq_disable = disable_intc_irq,
+};
void __init plat_irq_setup(void)
{
diff --git a/arch/sh/kernel/cpu/irq/ipr.c b/arch/sh/kernel/cpu/irq/ipr.c
index 9282d965a1b6..7516c35ee514 100644
--- a/arch/sh/kernel/cpu/irq/ipr.c
+++ b/arch/sh/kernel/cpu/irq/ipr.c
@@ -24,25 +24,25 @@
#include <linux/module.h>
#include <linux/topology.h>
-static inline struct ipr_desc *get_ipr_desc(unsigned int irq)
+static inline struct ipr_desc *get_ipr_desc(struct irq_data *data)
{
- struct irq_chip *chip = get_irq_chip(irq);
+ struct irq_chip *chip = irq_data_get_irq_chip(data);
return container_of(chip, struct ipr_desc, chip);
}
-static void disable_ipr_irq(unsigned int irq)
+static void disable_ipr_irq(struct irq_data *data)
{
- struct ipr_data *p = get_irq_chip_data(irq);
- unsigned long addr = get_ipr_desc(irq)->ipr_offsets[p->ipr_idx];
+ struct ipr_data *p = irq_data_get_irq_chip_data(data);
+ unsigned long addr = get_ipr_desc(data)->ipr_offsets[p->ipr_idx];
/* Set the priority in IPR to 0 */
__raw_writew(__raw_readw(addr) & (0xffff ^ (0xf << p->shift)), addr);
(void)__raw_readw(addr); /* Read back to flush write posting */
}
-static void enable_ipr_irq(unsigned int irq)
+static void enable_ipr_irq(struct irq_data *data)
{
- struct ipr_data *p = get_irq_chip_data(irq);
- unsigned long addr = get_ipr_desc(irq)->ipr_offsets[p->ipr_idx];
+ struct ipr_data *p = irq_data_get_irq_chip_data(data);
+ unsigned long addr = get_ipr_desc(data)->ipr_offsets[p->ipr_idx];
/* Set priority in IPR back to original value */
__raw_writew(__raw_readw(addr) | (p->priority << p->shift), addr);
}
@@ -56,19 +56,18 @@ void register_ipr_controller(struct ipr_desc *desc)
{
int i;
- desc->chip.mask = disable_ipr_irq;
- desc->chip.unmask = enable_ipr_irq;
- desc->chip.mask_ack = disable_ipr_irq;
+ desc->chip.irq_mask = disable_ipr_irq;
+ desc->chip.irq_unmask = enable_ipr_irq;
for (i = 0; i < desc->nr_irqs; i++) {
struct ipr_data *p = desc->ipr_data + i;
- struct irq_desc *irq_desc;
+ int res;
BUG_ON(p->ipr_idx >= desc->nr_offsets);
BUG_ON(!desc->ipr_offsets[p->ipr_idx]);
- irq_desc = irq_to_desc_alloc_node(p->irq, numa_node_id());
- if (unlikely(!irq_desc)) {
+ res = irq_alloc_desc_at(p->irq, numa_node_id());
+ if (unlikely(res != p->irq && res != -EEXIST)) {
printk(KERN_INFO "can not get irq_desc for %d\n",
p->irq);
continue;
@@ -78,7 +77,7 @@ void register_ipr_controller(struct ipr_desc *desc)
set_irq_chip_and_handler_name(p->irq, &desc->chip,
handle_level_irq, "level");
set_irq_chip_data(p->irq, p);
- disable_ipr_irq(p->irq);
+ disable_ipr_irq(irq_get_irq_data(p->irq));
}
}
EXPORT_SYMBOL(register_ipr_controller);
diff --git a/arch/sh/kernel/cpu/sh4/clock-sh4-202.c b/arch/sh/kernel/cpu/sh4/clock-sh4-202.c
index 4eabc68cd753..b601fa3978d1 100644
--- a/arch/sh/kernel/cpu/sh4/clock-sh4-202.c
+++ b/arch/sh/kernel/cpu/sh4/clock-sh4-202.c
@@ -110,7 +110,7 @@ static int shoc_clk_verify_rate(struct clk *clk, unsigned long rate)
return 0;
}
-static int shoc_clk_set_rate(struct clk *clk, unsigned long rate, int algo_id)
+static int shoc_clk_set_rate(struct clk *clk, unsigned long rate)
{
unsigned long frqcr3;
unsigned int tmp;
diff --git a/arch/sh/kernel/cpu/sh4/perf_event.c b/arch/sh/kernel/cpu/sh4/perf_event.c
index 7f9ecc9c2d02..dbf3b4bb71fe 100644
--- a/arch/sh/kernel/cpu/sh4/perf_event.c
+++ b/arch/sh/kernel/cpu/sh4/perf_event.c
@@ -225,7 +225,7 @@ static void sh7750_pmu_enable_all(void)
}
static struct sh_pmu sh7750_pmu = {
- .name = "SH7750",
+ .name = "sh7750",
.num_events = 2,
.event_map = sh7750_event_map,
.max_events = ARRAY_SIZE(sh7750_general_events),
diff --git a/arch/sh/kernel/cpu/sh4/probe.c b/arch/sh/kernel/cpu/sh4/probe.c
index d180f16281ed..b93458f33b74 100644
--- a/arch/sh/kernel/cpu/sh4/probe.c
+++ b/arch/sh/kernel/cpu/sh4/probe.c
@@ -150,7 +150,7 @@ void __cpuinit cpu_probe(void)
boot_cpu_data.type = CPU_SH7724;
boot_cpu_data.flags |= CPU_HAS_L2_CACHE;
break;
- case 0x50:
+ case 0x10:
boot_cpu_data.type = CPU_SH7757;
break;
}
diff --git a/arch/sh/kernel/cpu/sh4a/Makefile b/arch/sh/kernel/cpu/sh4a/Makefile
index b144e8af89dc..cc122b1d3035 100644
--- a/arch/sh/kernel/cpu/sh4a/Makefile
+++ b/arch/sh/kernel/cpu/sh4a/Makefile
@@ -8,13 +8,13 @@ obj-$(CONFIG_CPU_SUBTYPE_SH7763) += setup-sh7763.o
obj-$(CONFIG_CPU_SUBTYPE_SH7770) += setup-sh7770.o
obj-$(CONFIG_CPU_SUBTYPE_SH7780) += setup-sh7780.o
obj-$(CONFIG_CPU_SUBTYPE_SH7785) += setup-sh7785.o
-obj-$(CONFIG_CPU_SUBTYPE_SH7786) += setup-sh7786.o
+obj-$(CONFIG_CPU_SUBTYPE_SH7786) += setup-sh7786.o intc-shx3.o
obj-$(CONFIG_CPU_SUBTYPE_SH7343) += setup-sh7343.o
obj-$(CONFIG_CPU_SUBTYPE_SH7722) += setup-sh7722.o
obj-$(CONFIG_CPU_SUBTYPE_SH7723) += setup-sh7723.o
obj-$(CONFIG_CPU_SUBTYPE_SH7724) += setup-sh7724.o
obj-$(CONFIG_CPU_SUBTYPE_SH7366) += setup-sh7366.o
-obj-$(CONFIG_CPU_SUBTYPE_SHX3) += setup-shx3.o
+obj-$(CONFIG_CPU_SUBTYPE_SHX3) += setup-shx3.o intc-shx3.o
# SMP setup
smp-$(CONFIG_CPU_SHX3) := smp-shx3.o
@@ -40,6 +40,7 @@ pinmux-$(CONFIG_CPU_SUBTYPE_SH7724) := pinmux-sh7724.o
pinmux-$(CONFIG_CPU_SUBTYPE_SH7757) := pinmux-sh7757.o
pinmux-$(CONFIG_CPU_SUBTYPE_SH7785) := pinmux-sh7785.o
pinmux-$(CONFIG_CPU_SUBTYPE_SH7786) := pinmux-sh7786.o
+pinmux-$(CONFIG_CPU_SUBTYPE_SHX3) := pinmux-shx3.o
obj-y += $(clock-y)
obj-$(CONFIG_SMP) += $(smp-y)
diff --git a/arch/sh/kernel/cpu/sh4a/clock-sh7724.c b/arch/sh/kernel/cpu/sh4a/clock-sh7724.c
index 2d9700c6b53a..271c0b325a9a 100644
--- a/arch/sh/kernel/cpu/sh4a/clock-sh7724.c
+++ b/arch/sh/kernel/cpu/sh4a/clock-sh7724.c
@@ -48,7 +48,7 @@ static struct clk r_clk = {
* Default rate for the root input clock, reset this with clk_set_rate()
* from the platform code.
*/
-struct clk extal_clk = {
+static struct clk extal_clk = {
.rate = 33333333,
};
@@ -111,12 +111,21 @@ static struct clk div3_clk = {
.parent = &pll_clk,
};
-struct clk *main_clks[] = {
+/* External input clock (pin name: FSIMCKA/FSIMCKB ) */
+struct clk sh7724_fsimcka_clk = {
+};
+
+struct clk sh7724_fsimckb_clk = {
+};
+
+static struct clk *main_clks[] = {
&r_clk,
&extal_clk,
&fll_clk,
&pll_clk,
&div3_clk,
+ &sh7724_fsimcka_clk,
+ &sh7724_fsimckb_clk,
};
static void div4_kick(struct clk *clk)
@@ -154,16 +163,38 @@ struct clk div4_clks[DIV4_NR] = {
[DIV4_M1] = DIV4(FRQCRB, 4, 0x2f7c, CLK_ENABLE_ON_INIT),
};
-enum { DIV6_V, DIV6_FA, DIV6_FB, DIV6_I, DIV6_S, DIV6_NR };
+enum { DIV6_V, DIV6_I, DIV6_S, DIV6_NR };
-struct clk div6_clks[DIV6_NR] = {
+static struct clk div6_clks[DIV6_NR] = {
[DIV6_V] = SH_CLK_DIV6(&div3_clk, VCLKCR, 0),
- [DIV6_FA] = SH_CLK_DIV6(&div3_clk, FCLKACR, 0),
- [DIV6_FB] = SH_CLK_DIV6(&div3_clk, FCLKBCR, 0),
[DIV6_I] = SH_CLK_DIV6(&div3_clk, IRDACLKCR, 0),
[DIV6_S] = SH_CLK_DIV6(&div3_clk, SPUCLKCR, CLK_ENABLE_ON_INIT),
};
+enum { DIV6_FA, DIV6_FB, DIV6_REPARENT_NR };
+
+/* Indices are important - they are the actual src selecting values */
+static struct clk *fclkacr_parent[] = {
+ [0] = &div3_clk,
+ [1] = NULL,
+ [2] = &sh7724_fsimcka_clk,
+ [3] = NULL,
+};
+
+static struct clk *fclkbcr_parent[] = {
+ [0] = &div3_clk,
+ [1] = NULL,
+ [2] = &sh7724_fsimckb_clk,
+ [3] = NULL,
+};
+
+static struct clk div6_reparent_clks[DIV6_REPARENT_NR] = {
+ [DIV6_FA] = SH_CLK_DIV6_EXT(&div3_clk, FCLKACR, 0,
+ fclkacr_parent, ARRAY_SIZE(fclkacr_parent), 6, 2),
+ [DIV6_FB] = SH_CLK_DIV6_EXT(&div3_clk, FCLKBCR, 0,
+ fclkbcr_parent, ARRAY_SIZE(fclkbcr_parent), 6, 2),
+};
+
static struct clk mstp_clks[HWBLK_NR] = {
SH_HWBLK_CLK(HWBLK_TLB, &div4_clks[DIV4_I], CLK_ENABLE_ON_INIT),
SH_HWBLK_CLK(HWBLK_IC, &div4_clks[DIV4_I], CLK_ENABLE_ON_INIT),
@@ -240,8 +271,8 @@ static struct clk_lookup lookups[] = {
/* DIV6 clocks */
CLKDEV_CON_ID("video_clk", &div6_clks[DIV6_V]),
- CLKDEV_CON_ID("fsia_clk", &div6_clks[DIV6_FA]),
- CLKDEV_CON_ID("fsib_clk", &div6_clks[DIV6_FB]),
+ CLKDEV_CON_ID("fsia_clk", &div6_reparent_clks[DIV6_FA]),
+ CLKDEV_CON_ID("fsib_clk", &div6_reparent_clks[DIV6_FB]),
CLKDEV_CON_ID("irda_clk", &div6_clks[DIV6_I]),
CLKDEV_CON_ID("spu_clk", &div6_clks[DIV6_S]),
@@ -376,6 +407,9 @@ int __init arch_clk_init(void)
ret = sh_clk_div6_register(div6_clks, DIV6_NR);
if (!ret)
+ ret = sh_clk_div6_reparent_register(div6_reparent_clks, DIV6_REPARENT_NR);
+
+ if (!ret)
ret = sh_hwblk_clk_register(mstp_clks, HWBLK_NR);
return ret;
diff --git a/arch/sh/kernel/cpu/sh4a/clock-sh7757.c b/arch/sh/kernel/cpu/sh4a/clock-sh7757.c
index 0a752bd324ac..ce39a2ae8c6c 100644
--- a/arch/sh/kernel/cpu/sh4a/clock-sh7757.c
+++ b/arch/sh/kernel/cpu/sh4a/clock-sh7757.c
@@ -3,7 +3,7 @@
*
* SH7757 support for the clock framework
*
- * Copyright (C) 2009 Renesas Solutions Corp.
+ * Copyright (C) 2009-2010 Renesas Solutions Corp.
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
@@ -16,124 +16,147 @@
#include <asm/clock.h>
#include <asm/freq.h>
-static int ifc_divisors[] = { 2, 1, 4, 1, 1, 8, 1, 1,
- 16, 1, 1, 32, 1, 1, 1, 1 };
-static int sfc_divisors[] = { 2, 1, 4, 1, 1, 8, 1, 1,
- 16, 1, 1, 32, 1, 1, 1, 1 };
-static int bfc_divisors[] = { 2, 1, 4, 1, 1, 8, 1, 1,
- 16, 1, 1, 32, 1, 1, 1, 1 };
-static int p1fc_divisors[] = { 2, 1, 4, 1, 1, 8, 1, 1,
- 16, 1, 1, 32, 1, 1, 1, 1 };
+/*
+ * Default rate for the root input clock, reset this with clk_set_rate()
+ * from the platform code.
+ */
+static struct clk extal_clk = {
+ .rate = 48000000,
+};
-static void master_clk_init(struct clk *clk)
+static unsigned long pll_recalc(struct clk *clk)
{
- clk->rate = CONFIG_SH_PCLK_FREQ * 16;
-}
+ int multiplier;
-static struct clk_ops sh7757_master_clk_ops = {
- .init = master_clk_init,
-};
+ multiplier = test_mode_pin(MODE_PIN0) ? 24 : 16;
-static void module_clk_recalc(struct clk *clk)
-{
- int idx = __raw_readl(FRQCR) & 0x0000000f;
- clk->rate = clk->parent->rate / p1fc_divisors[idx];
+ return clk->parent->rate * multiplier;
}
-static struct clk_ops sh7757_module_clk_ops = {
- .recalc = module_clk_recalc,
+static struct clk_ops pll_clk_ops = {
+ .recalc = pll_recalc,
};
-static void bus_clk_recalc(struct clk *clk)
-{
- int idx = (__raw_readl(FRQCR) >> 8) & 0x0000000f;
- clk->rate = clk->parent->rate / bfc_divisors[idx];
-}
+static struct clk pll_clk = {
+ .ops = &pll_clk_ops,
+ .parent = &extal_clk,
+ .flags = CLK_ENABLE_ON_INIT,
+};
-static struct clk_ops sh7757_bus_clk_ops = {
- .recalc = bus_clk_recalc,
+static struct clk *clks[] = {
+ &extal_clk,
+ &pll_clk,
};
-static void cpu_clk_recalc(struct clk *clk)
-{
- int idx = (__raw_readl(FRQCR) >> 20) & 0x0000000f;
- clk->rate = clk->parent->rate / ifc_divisors[idx];
-}
+static unsigned int div2[] = { 1, 1, 2, 1, 1, 4, 1, 6,
+ 1, 1, 1, 16, 1, 24, 1, 1 };
-static struct clk_ops sh7757_cpu_clk_ops = {
- .recalc = cpu_clk_recalc,
+static struct clk_div_mult_table div4_div_mult_table = {
+ .divisors = div2,
+ .nr_divisors = ARRAY_SIZE(div2),
};
-static struct clk_ops *sh7757_clk_ops[] = {
- &sh7757_master_clk_ops,
- &sh7757_module_clk_ops,
- &sh7757_bus_clk_ops,
- &sh7757_cpu_clk_ops,
+static struct clk_div4_table div4_table = {
+ .div_mult_table = &div4_div_mult_table,
};
-void __init arch_init_clk_ops(struct clk_ops **ops, int idx)
-{
- if (idx < ARRAY_SIZE(sh7757_clk_ops))
- *ops = sh7757_clk_ops[idx];
-}
+enum { DIV4_I, DIV4_SH, DIV4_P, DIV4_NR };
-static void shyway_clk_recalc(struct clk *clk)
-{
- int idx = (__raw_readl(FRQCR) >> 12) & 0x0000000f;
- clk->rate = clk->parent->rate / sfc_divisors[idx];
-}
-
-static struct clk_ops sh7757_shyway_clk_ops = {
- .recalc = shyway_clk_recalc,
-};
+#define DIV4(_bit, _mask, _flags) \
+ SH_CLK_DIV4(&pll_clk, FRQCR, _bit, _mask, _flags)
-static struct clk sh7757_shyway_clk = {
- .flags = CLK_ENABLE_ON_INIT,
- .ops = &sh7757_shyway_clk_ops,
+struct clk div4_clks[DIV4_NR] = {
+ /*
+ * P clock is always enable, because some P clock modules is used
+ * by Host PC.
+ */
+ [DIV4_P] = DIV4(0, 0x2800, CLK_ENABLE_ON_INIT),
+ [DIV4_SH] = DIV4(12, 0x00a0, CLK_ENABLE_ON_INIT),
+ [DIV4_I] = DIV4(20, 0x0004, CLK_ENABLE_ON_INIT),
};
-/*
- * Additional sh7757-specific on-chip clocks that aren't already part of the
- * clock framework
- */
-static struct clk *sh7757_onchip_clocks[] = {
- &sh7757_shyway_clk,
+#define MSTPCR0 0xffc80030
+#define MSTPCR1 0xffc80034
+
+enum { MSTP004, MSTP000, MSTP114, MSTP113, MSTP112,
+ MSTP111, MSTP110, MSTP103, MSTP102,
+ MSTP_NR };
+
+static struct clk mstp_clks[MSTP_NR] = {
+ /* MSTPCR0 */
+ [MSTP004] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 4, 0),
+ [MSTP000] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 0, 0),
+
+ /* MSTPCR1 */
+ [MSTP114] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR1, 14, 0),
+ [MSTP113] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR1, 13, 0),
+ [MSTP112] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR1, 12, 0),
+ [MSTP111] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR1, 11, 0),
+ [MSTP110] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR1, 10, 0),
+ [MSTP103] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR1, 3, 0),
+ [MSTP102] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR1, 2, 0),
};
#define CLKDEV_CON_ID(_id, _clk) { .con_id = _id, .clk = _clk }
static struct clk_lookup lookups[] = {
/* main clocks */
- CLKDEV_CON_ID("shyway_clk", &sh7757_shyway_clk),
+ CLKDEV_CON_ID("extal", &extal_clk),
+ CLKDEV_CON_ID("pll_clk", &pll_clk),
+
+ /* DIV4 clocks */
+ CLKDEV_CON_ID("peripheral_clk", &div4_clks[DIV4_P]),
+ CLKDEV_CON_ID("shyway_clk", &div4_clks[DIV4_SH]),
+ CLKDEV_CON_ID("cpu_clk", &div4_clks[DIV4_I]),
+
+ /* MSTP32 clocks */
+ CLKDEV_CON_ID("sdhi0", &mstp_clks[MSTP004]),
+ CLKDEV_CON_ID("riic", &mstp_clks[MSTP000]),
+ {
+ /* TMU0 */
+ .dev_id = "sh_tmu.0",
+ .con_id = "tmu_fck",
+ .clk = &mstp_clks[MSTP113],
+ }, {
+ /* TMU1 */
+ .dev_id = "sh_tmu.1",
+ .con_id = "tmu_fck",
+ .clk = &mstp_clks[MSTP114],
+ },
+ {
+ /* SCIF4 (But, ID is 2) */
+ .dev_id = "sh-sci.2",
+ .con_id = "sci_fck",
+ .clk = &mstp_clks[MSTP112],
+ }, {
+ /* SCIF3 */
+ .dev_id = "sh-sci.1",
+ .con_id = "sci_fck",
+ .clk = &mstp_clks[MSTP111],
+ }, {
+ /* SCIF2 */
+ .dev_id = "sh-sci.0",
+ .con_id = "sci_fck",
+ .clk = &mstp_clks[MSTP110],
+ },
+ CLKDEV_CON_ID("usb0", &mstp_clks[MSTP102]),
};
-static int __init sh7757_clk_init(void)
+int __init arch_clk_init(void)
{
- struct clk *clk = clk_get(NULL, "master_clk");
- int i;
-
- for (i = 0; i < ARRAY_SIZE(sh7757_onchip_clocks); i++) {
- struct clk *clkp = sh7757_onchip_clocks[i];
+ int i, ret = 0;
- clkp->parent = clk;
- clk_register(clkp);
- clk_enable(clkp);
- }
+ for (i = 0; i < ARRAY_SIZE(clks); i++)
+ ret |= clk_register(clks[i]);
+ for (i = 0; i < ARRAY_SIZE(lookups); i++)
+ clkdev_add(&lookups[i]);
- /*
- * Now that we have the rest of the clocks registered, we need to
- * force the parent clock to propagate so that these clocks will
- * automatically figure out their rate. We cheat by handing the
- * parent clock its current rate and forcing child propagation.
- */
- clk_set_rate(clk, clk_get_rate(clk));
+ if (!ret)
+ ret = sh_clk_div4_register(div4_clks, ARRAY_SIZE(div4_clks),
+ &div4_table);
+ if (!ret)
+ ret = sh_clk_mstp32_register(mstp_clks, MSTP_NR);
- clk_put(clk);
-
- clkdev_add_table(lookups, ARRAY_SIZE(lookups));
-
- return 0;
+ return ret;
}
-arch_initcall(sh7757_clk_init);
-
diff --git a/arch/sh/kernel/cpu/sh4a/clock-shx3.c b/arch/sh/kernel/cpu/sh4a/clock-shx3.c
index 236a6282d778..4f70df6b6169 100644
--- a/arch/sh/kernel/cpu/sh4a/clock-shx3.c
+++ b/arch/sh/kernel/cpu/sh4a/clock-shx3.c
@@ -5,7 +5,7 @@
*
* Copyright (C) 2006-2007 Renesas Technology Corp.
* Copyright (C) 2006-2007 Renesas Solutions Corp.
- * Copyright (C) 2006-2007 Paul Mundt
+ * Copyright (C) 2006-2010 Paul Mundt
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
@@ -18,120 +18,179 @@
#include <asm/clock.h>
#include <asm/freq.h>
-static int ifc_divisors[] = { 1, 2, 4 ,6 };
-static int bfc_divisors[] = { 1, 1, 1, 1, 1, 12, 16, 18, 24, 32, 36, 48 };
-static int pfc_divisors[] = { 1, 1, 1, 1, 1, 1, 1, 18, 24, 32, 36, 48 };
-static int cfc_divisors[] = { 1, 1, 4, 6 };
-
-#define IFC_POS 28
-#define IFC_MSK 0x0003
-#define BFC_MSK 0x000f
-#define PFC_MSK 0x000f
-#define CFC_MSK 0x0003
-#define BFC_POS 16
-#define PFC_POS 0
-#define CFC_POS 20
-
-static void master_clk_init(struct clk *clk)
-{
- clk->rate *= pfc_divisors[(__raw_readl(FRQCR) >> PFC_POS) & PFC_MSK];
-}
-
-static struct clk_ops shx3_master_clk_ops = {
- .init = master_clk_init,
+/*
+ * Default rate for the root input clock, reset this with clk_set_rate()
+ * from the platform code.
+ */
+static struct clk extal_clk = {
+ .rate = 16666666,
};
-static unsigned long module_clk_recalc(struct clk *clk)
+static unsigned long pll_recalc(struct clk *clk)
{
- int idx = ((__raw_readl(FRQCR) >> PFC_POS) & PFC_MSK);
- return clk->parent->rate / pfc_divisors[idx];
+ /* PLL1 has a fixed x72 multiplier. */
+ return clk->parent->rate * 72;
}
-static struct clk_ops shx3_module_clk_ops = {
- .recalc = module_clk_recalc,
+static struct clk_ops pll_clk_ops = {
+ .recalc = pll_recalc,
};
-static unsigned long bus_clk_recalc(struct clk *clk)
-{
- int idx = ((__raw_readl(FRQCR) >> BFC_POS) & BFC_MSK);
- return clk->parent->rate / bfc_divisors[idx];
-}
+static struct clk pll_clk = {
+ .ops = &pll_clk_ops,
+ .parent = &extal_clk,
+ .flags = CLK_ENABLE_ON_INIT,
+};
-static struct clk_ops shx3_bus_clk_ops = {
- .recalc = bus_clk_recalc,
+static struct clk *clks[] = {
+ &extal_clk,
+ &pll_clk,
};
-static unsigned long cpu_clk_recalc(struct clk *clk)
-{
- int idx = ((__raw_readl(FRQCR) >> IFC_POS) & IFC_MSK);
- return clk->parent->rate / ifc_divisors[idx];
-}
+static unsigned int div2[] = { 1, 2, 4, 6, 8, 12, 16, 18,
+ 24, 32, 36, 48 };
-static struct clk_ops shx3_cpu_clk_ops = {
- .recalc = cpu_clk_recalc,
+static struct clk_div_mult_table div4_div_mult_table = {
+ .divisors = div2,
+ .nr_divisors = ARRAY_SIZE(div2),
};
-static struct clk_ops *shx3_clk_ops[] = {
- &shx3_master_clk_ops,
- &shx3_module_clk_ops,
- &shx3_bus_clk_ops,
- &shx3_cpu_clk_ops,
+static struct clk_div4_table div4_table = {
+ .div_mult_table = &div4_div_mult_table,
};
-void __init arch_init_clk_ops(struct clk_ops **ops, int idx)
-{
- if (idx < ARRAY_SIZE(shx3_clk_ops))
- *ops = shx3_clk_ops[idx];
-}
+enum { DIV4_I, DIV4_SH, DIV4_B, DIV4_DDR, DIV4_SHA, DIV4_P, DIV4_NR };
-static unsigned long shyway_clk_recalc(struct clk *clk)
-{
- int idx = ((__raw_readl(FRQCR) >> CFC_POS) & CFC_MSK);
- return clk->parent->rate / cfc_divisors[idx];
-}
+#define DIV4(_bit, _mask, _flags) \
+ SH_CLK_DIV4(&pll_clk, FRQMR1, _bit, _mask, _flags)
-static struct clk_ops shx3_shyway_clk_ops = {
- .recalc = shyway_clk_recalc,
+struct clk div4_clks[DIV4_NR] = {
+ [DIV4_P] = DIV4(0, 0x0f80, 0),
+ [DIV4_SHA] = DIV4(4, 0x0ff0, 0),
+ [DIV4_DDR] = DIV4(12, 0x000c, CLK_ENABLE_ON_INIT),
+ [DIV4_B] = DIV4(16, 0x0fe0, CLK_ENABLE_ON_INIT),
+ [DIV4_SH] = DIV4(20, 0x000c, CLK_ENABLE_ON_INIT),
+ [DIV4_I] = DIV4(28, 0x000e, CLK_ENABLE_ON_INIT),
};
-static struct clk shx3_shyway_clk = {
- .flags = CLK_ENABLE_ON_INIT,
- .ops = &shx3_shyway_clk_ops,
-};
-
-/*
- * Additional SHx3-specific on-chip clocks that aren't already part of the
- * clock framework
- */
-static struct clk *shx3_onchip_clocks[] = {
- &shx3_shyway_clk,
+#define MSTPCR0 0xffc00030
+#define MSTPCR1 0xffc00034
+
+enum { MSTP027, MSTP026, MSTP025, MSTP024,
+ MSTP009, MSTP008, MSTP003, MSTP002,
+ MSTP001, MSTP000, MSTP119, MSTP105,
+ MSTP104, MSTP_NR };
+
+static struct clk mstp_clks[MSTP_NR] = {
+ /* MSTPCR0 */
+ [MSTP027] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 27, 0),
+ [MSTP026] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 26, 0),
+ [MSTP025] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 25, 0),
+ [MSTP024] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 24, 0),
+ [MSTP009] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 9, 0),
+ [MSTP008] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 8, 0),
+ [MSTP003] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 3, 0),
+ [MSTP002] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 2, 0),
+ [MSTP001] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 1, 0),
+ [MSTP000] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 0, 0),
+
+ /* MSTPCR1 */
+ [MSTP119] = SH_CLK_MSTP32(NULL, MSTPCR1, 19, 0),
+ [MSTP105] = SH_CLK_MSTP32(NULL, MSTPCR1, 5, 0),
+ [MSTP104] = SH_CLK_MSTP32(NULL, MSTPCR1, 4, 0),
};
#define CLKDEV_CON_ID(_id, _clk) { .con_id = _id, .clk = _clk }
static struct clk_lookup lookups[] = {
/* main clocks */
- CLKDEV_CON_ID("shyway_clk", &shx3_shyway_clk),
+ CLKDEV_CON_ID("extal", &extal_clk),
+ CLKDEV_CON_ID("pll_clk", &pll_clk),
+
+ /* DIV4 clocks */
+ CLKDEV_CON_ID("peripheral_clk", &div4_clks[DIV4_P]),
+ CLKDEV_CON_ID("shywaya_clk", &div4_clks[DIV4_SHA]),
+ CLKDEV_CON_ID("ddr_clk", &div4_clks[DIV4_DDR]),
+ CLKDEV_CON_ID("bus_clk", &div4_clks[DIV4_B]),
+ CLKDEV_CON_ID("shyway_clk", &div4_clks[DIV4_SH]),
+ CLKDEV_CON_ID("cpu_clk", &div4_clks[DIV4_I]),
+
+ /* MSTP32 clocks */
+ {
+ /* SCIF3 */
+ .dev_id = "sh-sci.3",
+ .con_id = "sci_fck",
+ .clk = &mstp_clks[MSTP027],
+ }, {
+ /* SCIF2 */
+ .dev_id = "sh-sci.2",
+ .con_id = "sci_fck",
+ .clk = &mstp_clks[MSTP026],
+ }, {
+ /* SCIF1 */
+ .dev_id = "sh-sci.1",
+ .con_id = "sci_fck",
+ .clk = &mstp_clks[MSTP025],
+ }, {
+ /* SCIF0 */
+ .dev_id = "sh-sci.0",
+ .con_id = "sci_fck",
+ .clk = &mstp_clks[MSTP024],
+ },
+ CLKDEV_CON_ID("h8ex_fck", &mstp_clks[MSTP003]),
+ CLKDEV_CON_ID("csm_fck", &mstp_clks[MSTP002]),
+ CLKDEV_CON_ID("fe1_fck", &mstp_clks[MSTP001]),
+ CLKDEV_CON_ID("fe0_fck", &mstp_clks[MSTP000]),
+ {
+ /* TMU0 */
+ .dev_id = "sh_tmu.0",
+ .con_id = "tmu_fck",
+ .clk = &mstp_clks[MSTP008],
+ }, {
+ /* TMU1 */
+ .dev_id = "sh_tmu.1",
+ .con_id = "tmu_fck",
+ .clk = &mstp_clks[MSTP008],
+ }, {
+ /* TMU2 */
+ .dev_id = "sh_tmu.2",
+ .con_id = "tmu_fck",
+ .clk = &mstp_clks[MSTP008],
+ }, {
+ /* TMU3 */
+ .dev_id = "sh_tmu.3",
+ .con_id = "tmu_fck",
+ .clk = &mstp_clks[MSTP009],
+ }, {
+ /* TMU4 */
+ .dev_id = "sh_tmu.4",
+ .con_id = "tmu_fck",
+ .clk = &mstp_clks[MSTP009],
+ }, {
+ /* TMU5 */
+ .dev_id = "sh_tmu.5",
+ .con_id = "tmu_fck",
+ .clk = &mstp_clks[MSTP009],
+ },
+ CLKDEV_CON_ID("hudi_fck", &mstp_clks[MSTP119]),
+ CLKDEV_CON_ID("dmac_11_6_fck", &mstp_clks[MSTP105]),
+ CLKDEV_CON_ID("dmac_5_0_fck", &mstp_clks[MSTP104]),
};
int __init arch_clk_init(void)
{
- struct clk *clk;
int i, ret = 0;
- cpg_clk_init();
-
- clk = clk_get(NULL, "master_clk");
- for (i = 0; i < ARRAY_SIZE(shx3_onchip_clocks); i++) {
- struct clk *clkp = shx3_onchip_clocks[i];
-
- clkp->parent = clk;
- ret |= clk_register(clkp);
- }
-
- clk_put(clk);
+ for (i = 0; i < ARRAY_SIZE(clks); i++)
+ ret |= clk_register(clks[i]);
+ for (i = 0; i < ARRAY_SIZE(lookups); i++)
+ clkdev_add(&lookups[i]);
- clkdev_add_table(lookups, ARRAY_SIZE(lookups));
+ if (!ret)
+ ret = sh_clk_div4_register(div4_clks, ARRAY_SIZE(div4_clks),
+ &div4_table);
+ if (!ret)
+ ret = sh_clk_mstp32_register(mstp_clks, MSTP_NR);
return ret;
}
diff --git a/arch/sh/kernel/cpu/sh4a/intc-shx3.c b/arch/sh/kernel/cpu/sh4a/intc-shx3.c
new file mode 100644
index 000000000000..78c971486b4e
--- /dev/null
+++ b/arch/sh/kernel/cpu/sh4a/intc-shx3.c
@@ -0,0 +1,34 @@
+/*
+ * Shared support for SH-X3 interrupt controllers.
+ *
+ * Copyright (C) 2009 - 2010 Paul Mundt
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#include <linux/irq.h>
+#include <linux/io.h>
+#include <linux/init.h>
+
+#define INTACK 0xfe4100b8
+#define INTACKCLR 0xfe4100bc
+#define INTC_USERIMASK 0xfe411000
+
+#ifdef CONFIG_INTC_BALANCING
+unsigned int irq_lookup(unsigned int irq)
+{
+ return __raw_readl(INTACK) & 1 ? irq : NO_IRQ_IGNORE;
+}
+
+void irq_finish(unsigned int irq)
+{
+ __raw_writel(irq2evt(irq), INTACKCLR);
+}
+#endif
+
+static int __init shx3_irq_setup(void)
+{
+ return register_intc_userimask(INTC_USERIMASK);
+}
+arch_initcall(shx3_irq_setup);
diff --git a/arch/sh/kernel/cpu/sh4a/perf_event.c b/arch/sh/kernel/cpu/sh4a/perf_event.c
index eddc21973fa1..580276525731 100644
--- a/arch/sh/kernel/cpu/sh4a/perf_event.c
+++ b/arch/sh/kernel/cpu/sh4a/perf_event.c
@@ -1,7 +1,7 @@
/*
* Performance events support for SH-4A performance counters
*
- * Copyright (C) 2009 Paul Mundt
+ * Copyright (C) 2009, 2010 Paul Mundt
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
@@ -22,7 +22,25 @@
#define CCBR_CMDS (1 << 1)
#define CCBR_PPCE (1 << 0)
+#ifdef CONFIG_CPU_SHX3
+/*
+ * The PMCAT location for SH-X3 CPUs was quietly moved, while the CCBR
+ * and PMCTR locations remains tentatively constant. This change remains
+ * wholly undocumented, and was simply found through trial and error.
+ *
+ * Early cuts of SH-X3 still appear to use the SH-X/SH-X2 locations, and
+ * it's unclear when this ceased to be the case. For now we always use
+ * the new location (if future parts keep up with this trend then
+ * scanning for them at runtime also remains a viable option.)
+ *
+ * The gap in the register space also suggests that there are other
+ * undocumented counters, so this will need to be revisited at a later
+ * point in time.
+ */
+#define PPC_PMCAT 0xfc100240
+#else
#define PPC_PMCAT 0xfc100080
+#endif
#define PMCAT_OVF3 (1 << 27)
#define PMCAT_CNN3 (1 << 26)
@@ -241,7 +259,7 @@ static void sh4a_pmu_enable_all(void)
}
static struct sh_pmu sh4a_pmu = {
- .name = "SH-4A",
+ .name = "sh4a",
.num_events = 2,
.event_map = sh4a_event_map,
.max_events = ARRAY_SIZE(sh4a_general_events),
diff --git a/arch/sh/kernel/cpu/sh4a/pinmux-sh7757.c b/arch/sh/kernel/cpu/sh4a/pinmux-sh7757.c
index ed23b155c097..4c74bd04bba4 100644
--- a/arch/sh/kernel/cpu/sh4a/pinmux-sh7757.c
+++ b/arch/sh/kernel/cpu/sh4a/pinmux-sh7757.c
@@ -1,11 +1,11 @@
/*
- * SH7757 (A0 step) Pinmux
+ * SH7757 (B0 step) Pinmux
*
- * Copyright (C) 2009 Renesas Solutions Corp.
+ * Copyright (C) 2009-2010 Renesas Solutions Corp.
*
* Author : Yoshihiro Shimoda <shimoda.yoshihiro@renesas.com>
*
- * Based on SH7757 Pinmux
+ * Based on SH7723 Pinmux
* Copyright (C) 2008 Magnus Damm
*
* This file is subject to the terms and conditions of the GNU General Public
@@ -40,27 +40,27 @@ enum {
PTH3_DATA, PTH2_DATA, PTH1_DATA, PTH0_DATA,
PTI7_DATA, PTI6_DATA, PTI5_DATA, PTI4_DATA,
PTI3_DATA, PTI2_DATA, PTI1_DATA, PTI0_DATA,
- PTJ7_DATA, PTJ6_DATA, PTJ5_DATA, PTJ4_DATA,
+ PTJ6_DATA, PTJ5_DATA, PTJ4_DATA,
PTJ3_DATA, PTJ2_DATA, PTJ1_DATA, PTJ0_DATA,
PTK7_DATA, PTK6_DATA, PTK5_DATA, PTK4_DATA,
PTK3_DATA, PTK2_DATA, PTK1_DATA, PTK0_DATA,
- PTL7_DATA, PTL6_DATA, PTL5_DATA, PTL4_DATA,
+ PTL6_DATA, PTL5_DATA, PTL4_DATA,
PTL3_DATA, PTL2_DATA, PTL1_DATA, PTL0_DATA,
- PTM6_DATA, PTM5_DATA, PTM4_DATA,
+ PTM7_DATA, PTM6_DATA, PTM5_DATA, PTM4_DATA,
PTM3_DATA, PTM2_DATA, PTM1_DATA, PTM0_DATA,
- PTN7_DATA, PTN6_DATA, PTN5_DATA, PTN4_DATA,
+ PTN6_DATA, PTN5_DATA, PTN4_DATA,
PTN3_DATA, PTN2_DATA, PTN1_DATA, PTN0_DATA,
PTO7_DATA, PTO6_DATA, PTO5_DATA, PTO4_DATA,
PTO3_DATA, PTO2_DATA, PTO1_DATA, PTO0_DATA,
- PTP6_DATA, PTP5_DATA, PTP4_DATA,
+ PTP7_DATA, PTP6_DATA, PTP5_DATA, PTP4_DATA,
PTP3_DATA, PTP2_DATA, PTP1_DATA, PTP0_DATA,
- PTQ6_DATA, PTQ5_DATA, PTQ4_DATA,
+ PTQ6_DATA, PTQ5_DATA, PTQ4_DATA,
PTQ3_DATA, PTQ2_DATA, PTQ1_DATA, PTQ0_DATA,
PTR7_DATA, PTR6_DATA, PTR5_DATA, PTR4_DATA,
PTR3_DATA, PTR2_DATA, PTR1_DATA, PTR0_DATA,
PTS7_DATA, PTS6_DATA, PTS5_DATA, PTS4_DATA,
PTS3_DATA, PTS2_DATA, PTS1_DATA, PTS0_DATA,
- PTT5_DATA, PTT4_DATA,
+ PTT7_DATA, PTT6_DATA, PTT5_DATA, PTT4_DATA,
PTT3_DATA, PTT2_DATA, PTT1_DATA, PTT0_DATA,
PTU7_DATA, PTU6_DATA, PTU5_DATA, PTU4_DATA,
PTU3_DATA, PTU2_DATA, PTU1_DATA, PTU0_DATA,
@@ -95,27 +95,27 @@ enum {
PTH3_IN, PTH2_IN, PTH1_IN, PTH0_IN,
PTI7_IN, PTI6_IN, PTI5_IN, PTI4_IN,
PTI3_IN, PTI2_IN, PTI1_IN, PTI0_IN,
- PTJ7_IN, PTJ6_IN, PTJ5_IN, PTJ4_IN,
+ PTJ6_IN, PTJ5_IN, PTJ4_IN,
PTJ3_IN, PTJ2_IN, PTJ1_IN, PTJ0_IN,
PTK7_IN, PTK6_IN, PTK5_IN, PTK4_IN,
PTK3_IN, PTK2_IN, PTK1_IN, PTK0_IN,
- PTL7_IN, PTL6_IN, PTL5_IN, PTL4_IN,
+ PTL6_IN, PTL5_IN, PTL4_IN,
PTL3_IN, PTL2_IN, PTL1_IN, PTL0_IN,
- PTM6_IN, PTM5_IN, PTM4_IN,
+ PTM7_IN, PTM6_IN, PTM5_IN, PTM4_IN,
PTM3_IN, PTM2_IN, PTM1_IN, PTM0_IN,
- PTN7_IN, PTN6_IN, PTN5_IN, PTN4_IN,
+ PTN6_IN, PTN5_IN, PTN4_IN,
PTN3_IN, PTN2_IN, PTN1_IN, PTN0_IN,
PTO7_IN, PTO6_IN, PTO5_IN, PTO4_IN,
PTO3_IN, PTO2_IN, PTO1_IN, PTO0_IN,
- PTP6_IN, PTP5_IN, PTP4_IN,
+ PTP7_IN, PTP6_IN, PTP5_IN, PTP4_IN,
PTP3_IN, PTP2_IN, PTP1_IN, PTP0_IN,
- PTQ6_IN, PTQ5_IN, PTQ4_IN,
+ PTQ6_IN, PTQ5_IN, PTQ4_IN,
PTQ3_IN, PTQ2_IN, PTQ1_IN, PTQ0_IN,
PTR7_IN, PTR6_IN, PTR5_IN, PTR4_IN,
PTR3_IN, PTR2_IN, PTR1_IN, PTR0_IN,
PTS7_IN, PTS6_IN, PTS5_IN, PTS4_IN,
PTS3_IN, PTS2_IN, PTS1_IN, PTS0_IN,
- PTT5_IN, PTT4_IN,
+ PTT7_IN, PTT6_IN, PTT5_IN, PTT4_IN,
PTT3_IN, PTT2_IN, PTT1_IN, PTT0_IN,
PTU7_IN, PTU6_IN, PTU5_IN, PTU4_IN,
PTU3_IN, PTU2_IN, PTU1_IN, PTU0_IN,
@@ -132,16 +132,43 @@ enum {
PINMUX_INPUT_END,
PINMUX_INPUT_PULLUP_BEGIN,
+ PTA7_IN_PU, PTA6_IN_PU, PTA5_IN_PU, PTA4_IN_PU,
+ PTA3_IN_PU, PTA2_IN_PU, PTA1_IN_PU, PTA0_IN_PU,
+ PTD7_IN_PU, PTD6_IN_PU, PTD5_IN_PU, PTD4_IN_PU,
+ PTD3_IN_PU, PTD2_IN_PU, PTD1_IN_PU, PTD0_IN_PU,
+ PTE7_IN_PU, PTE6_IN_PU, PTE5_IN_PU, PTE4_IN_PU,
+ PTE3_IN_PU, PTE2_IN_PU, PTE1_IN_PU, PTE0_IN_PU,
+ PTF7_IN_PU, PTF6_IN_PU, PTF5_IN_PU, PTF4_IN_PU,
+ PTF3_IN_PU, PTF2_IN_PU, PTF1_IN_PU, PTF0_IN_PU,
+ PTG7_IN_PU, PTG6_IN_PU, PTG4_IN_PU,
+ PTH7_IN_PU, PTH6_IN_PU, PTH5_IN_PU, PTH4_IN_PU,
+ PTH3_IN_PU, PTH2_IN_PU, PTH1_IN_PU, PTH0_IN_PU,
+ PTI7_IN_PU, PTI6_IN_PU, PTI4_IN_PU,
+ PTI3_IN_PU, PTI2_IN_PU, PTI1_IN_PU, PTI0_IN_PU,
+ PTJ6_IN_PU, PTJ5_IN_PU, PTJ4_IN_PU,
+ PTJ3_IN_PU, PTJ2_IN_PU, PTJ1_IN_PU, PTJ0_IN_PU,
+ PTK7_IN_PU, PTK6_IN_PU, PTK5_IN_PU, PTK4_IN_PU,
+ PTK3_IN_PU, PTK2_IN_PU, PTK1_IN_PU, PTK0_IN_PU,
+ PTL6_IN_PU, PTL5_IN_PU, PTL4_IN_PU,
+ PTL3_IN_PU, PTL2_IN_PU, PTL1_IN_PU, PTL0_IN_PU,
+ PTM7_IN_PU, PTM6_IN_PU, PTM5_IN_PU, PTM4_IN_PU,
+ PTN4_IN_PU,
+ PTN3_IN_PU, PTN2_IN_PU, PTN1_IN_PU, PTN0_IN_PU,
+ PTO7_IN_PU, PTO6_IN_PU, PTO5_IN_PU, PTO4_IN_PU,
+ PTO3_IN_PU, PTO2_IN_PU, PTO1_IN_PU, PTO0_IN_PU,
+ PTT7_IN_PU, PTT6_IN_PU, PTT5_IN_PU, PTT4_IN_PU,
+ PTT3_IN_PU, PTT2_IN_PU, PTT1_IN_PU, PTT0_IN_PU,
PTU7_IN_PU, PTU6_IN_PU, PTU5_IN_PU, PTU4_IN_PU,
PTU3_IN_PU, PTU2_IN_PU, PTU1_IN_PU, PTU0_IN_PU,
PTV7_IN_PU, PTV6_IN_PU, PTV5_IN_PU, PTV4_IN_PU,
- PTV3_IN_PU, PTV2_IN_PU, PTV1_IN_PU, PTV0_IN_PU,
- PTW7_IN_PU, PTW6_IN_PU, PTW5_IN_PU, PTW4_IN_PU,
- PTW3_IN_PU, PTW2_IN_PU, PTW1_IN_PU, PTW0_IN_PU,
+ PTV3_IN_PU, PTV2_IN_PU,
+ PTW1_IN_PU, PTW0_IN_PU,
PTX7_IN_PU, PTX6_IN_PU, PTX5_IN_PU, PTX4_IN_PU,
PTX3_IN_PU, PTX2_IN_PU, PTX1_IN_PU, PTX0_IN_PU,
PTY7_IN_PU, PTY6_IN_PU, PTY5_IN_PU, PTY4_IN_PU,
PTY3_IN_PU, PTY2_IN_PU, PTY1_IN_PU, PTY0_IN_PU,
+ PTZ7_IN_PU, PTZ6_IN_PU, PTZ5_IN_PU, PTZ4_IN_PU,
+ PTZ3_IN_PU, PTZ2_IN_PU, PTZ1_IN_PU, PTZ0_IN_PU,
PINMUX_INPUT_PULLUP_END,
PINMUX_OUTPUT_BEGIN,
@@ -163,27 +190,27 @@ enum {
PTH3_OUT, PTH2_OUT, PTH1_OUT, PTH0_OUT,
PTI7_OUT, PTI6_OUT, PTI5_OUT, PTI4_OUT,
PTI3_OUT, PTI2_OUT, PTI1_OUT, PTI0_OUT,
- PTJ7_OUT, PTJ6_OUT, PTJ5_OUT, PTJ4_OUT,
+ PTJ6_OUT, PTJ5_OUT, PTJ4_OUT,
PTJ3_OUT, PTJ2_OUT, PTJ1_OUT, PTJ0_OUT,
PTK7_OUT, PTK6_OUT, PTK5_OUT, PTK4_OUT,
PTK3_OUT, PTK2_OUT, PTK1_OUT, PTK0_OUT,
- PTL7_OUT, PTL6_OUT, PTL5_OUT, PTL4_OUT,
+ PTL6_OUT, PTL5_OUT, PTL4_OUT,
PTL3_OUT, PTL2_OUT, PTL1_OUT, PTL0_OUT,
- PTM6_OUT, PTM5_OUT, PTM4_OUT,
+ PTM7_OUT, PTM6_OUT, PTM5_OUT, PTM4_OUT,
PTM3_OUT, PTM2_OUT, PTM1_OUT, PTM0_OUT,
- PTN7_OUT, PTN6_OUT, PTN5_OUT, PTN4_OUT,
+ PTN6_OUT, PTN5_OUT, PTN4_OUT,
PTN3_OUT, PTN2_OUT, PTN1_OUT, PTN0_OUT,
PTO7_OUT, PTO6_OUT, PTO5_OUT, PTO4_OUT,
PTO3_OUT, PTO2_OUT, PTO1_OUT, PTO0_OUT,
- PTP6_OUT, PTP5_OUT, PTP4_OUT,
+ PTP7_OUT, PTP6_OUT, PTP5_OUT, PTP4_OUT,
PTP3_OUT, PTP2_OUT, PTP1_OUT, PTP0_OUT,
- PTQ6_OUT, PTQ5_OUT, PTQ4_OUT,
+ PTQ6_OUT, PTQ5_OUT, PTQ4_OUT,
PTQ3_OUT, PTQ2_OUT, PTQ1_OUT, PTQ0_OUT,
PTR7_OUT, PTR6_OUT, PTR5_OUT, PTR4_OUT,
PTR3_OUT, PTR2_OUT, PTR1_OUT, PTR0_OUT,
PTS7_OUT, PTS6_OUT, PTS5_OUT, PTS4_OUT,
PTS3_OUT, PTS2_OUT, PTS1_OUT, PTS0_OUT,
- PTT5_OUT, PTT4_OUT,
+ PTT7_OUT, PTT6_OUT, PTT5_OUT, PTT4_OUT,
PTT3_OUT, PTT2_OUT, PTT1_OUT, PTT0_OUT,
PTU7_OUT, PTU6_OUT, PTU5_OUT, PTU4_OUT,
PTU3_OUT, PTU2_OUT, PTU1_OUT, PTU0_OUT,
@@ -218,27 +245,27 @@ enum {
PTH3_FN, PTH2_FN, PTH1_FN, PTH0_FN,
PTI7_FN, PTI6_FN, PTI5_FN, PTI4_FN,
PTI3_FN, PTI2_FN, PTI1_FN, PTI0_FN,
- PTJ7_FN, PTJ6_FN, PTJ5_FN, PTJ4_FN,
+ PTJ6_FN, PTJ5_FN, PTJ4_FN,
PTJ3_FN, PTJ2_FN, PTJ1_FN, PTJ0_FN,
PTK7_FN, PTK6_FN, PTK5_FN, PTK4_FN,
PTK3_FN, PTK2_FN, PTK1_FN, PTK0_FN,
- PTL7_FN, PTL6_FN, PTL5_FN, PTL4_FN,
+ PTL6_FN, PTL5_FN, PTL4_FN,
PTL3_FN, PTL2_FN, PTL1_FN, PTL0_FN,
- PTM6_FN, PTM5_FN, PTM4_FN,
+ PTM7_FN, PTM6_FN, PTM5_FN, PTM4_FN,
PTM3_FN, PTM2_FN, PTM1_FN, PTM0_FN,
- PTN7_FN, PTN6_FN, PTN5_FN, PTN4_FN,
+ PTN6_FN, PTN5_FN, PTN4_FN,
PTN3_FN, PTN2_FN, PTN1_FN, PTN0_FN,
PTO7_FN, PTO6_FN, PTO5_FN, PTO4_FN,
PTO3_FN, PTO2_FN, PTO1_FN, PTO0_FN,
- PTP6_FN, PTP5_FN, PTP4_FN,
+ PTP7_FN, PTP6_FN, PTP5_FN, PTP4_FN,
PTP3_FN, PTP2_FN, PTP1_FN, PTP0_FN,
- PTQ6_FN, PTQ5_FN, PTQ4_FN,
+ PTQ6_FN, PTQ5_FN, PTQ4_FN,
PTQ3_FN, PTQ2_FN, PTQ1_FN, PTQ0_FN,
PTR7_FN, PTR6_FN, PTR5_FN, PTR4_FN,
PTR3_FN, PTR2_FN, PTR1_FN, PTR0_FN,
PTS7_FN, PTS6_FN, PTS5_FN, PTS4_FN,
PTS3_FN, PTS2_FN, PTS1_FN, PTS0_FN,
- PTT5_FN, PTT4_FN,
+ PTT7_FN, PTT6_FN, PTT5_FN, PTT4_FN,
PTT3_FN, PTT2_FN, PTT1_FN, PTT0_FN,
PTU7_FN, PTU6_FN, PTU5_FN, PTU4_FN,
PTU3_FN, PTU2_FN, PTU1_FN, PTU0_FN,
@@ -253,181 +280,248 @@ enum {
PTZ7_FN, PTZ6_FN, PTZ5_FN, PTZ4_FN,
PTZ3_FN, PTZ2_FN, PTZ1_FN, PTZ0_FN,
- PS0_15_FN1, PS0_15_FN3,
- PS0_14_FN1, PS0_14_FN3,
- PS0_13_FN1, PS0_13_FN3,
- PS0_12_FN1, PS0_12_FN3,
+ PS0_15_FN1, PS0_15_FN2,
+ PS0_14_FN1, PS0_14_FN2,
+ PS0_13_FN1, PS0_13_FN2,
+ PS0_12_FN1, PS0_12_FN2,
+ PS0_11_FN1, PS0_11_FN2,
+ PS0_10_FN1, PS0_10_FN2,
+ PS0_9_FN1, PS0_9_FN2,
+ PS0_8_FN1, PS0_8_FN2,
PS0_7_FN1, PS0_7_FN2,
PS0_6_FN1, PS0_6_FN2,
PS0_5_FN1, PS0_5_FN2,
PS0_4_FN1, PS0_4_FN2,
PS0_3_FN1, PS0_3_FN2,
PS0_2_FN1, PS0_2_FN2,
- PS0_1_FN1, PS0_1_FN2,
- PS1_7_FN1, PS1_7_FN3,
- PS1_6_FN1, PS1_6_FN3,
+ PS1_10_FN1, PS1_10_FN2,
+ PS1_9_FN1, PS1_9_FN2,
+ PS1_8_FN1, PS1_8_FN2,
+ PS1_2_FN1, PS1_2_FN2,
+
+ PS2_13_FN1, PS2_13_FN2,
+ PS2_12_FN1, PS2_12_FN2,
+ PS2_7_FN1, PS2_7_FN2,
+ PS2_6_FN1, PS2_6_FN2,
+ PS2_5_FN1, PS2_5_FN2,
+ PS2_4_FN1, PS2_4_FN2,
+ PS2_2_FN1, PS2_2_FN2,
+
+ PS3_15_FN1, PS3_15_FN2,
+ PS3_14_FN1, PS3_14_FN2,
+ PS3_13_FN1, PS3_13_FN2,
+ PS3_12_FN1, PS3_12_FN2,
+ PS3_11_FN1, PS3_11_FN2,
+ PS3_10_FN1, PS3_10_FN2,
+ PS3_9_FN1, PS3_9_FN2,
+ PS3_8_FN1, PS3_8_FN2,
+ PS3_7_FN1, PS3_7_FN2,
+ PS3_2_FN1, PS3_2_FN2,
+ PS3_1_FN1, PS3_1_FN2,
- PS2_13_FN1, PS2_13_FN3,
- PS2_12_FN1, PS2_12_FN3,
- PS2_1_FN1, PS2_1_FN2,
- PS2_0_FN1, PS2_0_FN2,
-
- PS4_15_FN1, PS4_15_FN2,
PS4_14_FN1, PS4_14_FN2,
PS4_13_FN1, PS4_13_FN2,
PS4_12_FN1, PS4_12_FN2,
- PS4_11_FN1, PS4_11_FN2,
PS4_10_FN1, PS4_10_FN2,
PS4_9_FN1, PS4_9_FN2,
+ PS4_8_FN1, PS4_8_FN2,
+ PS4_4_FN1, PS4_4_FN2,
PS4_3_FN1, PS4_3_FN2,
PS4_2_FN1, PS4_2_FN2,
PS4_1_FN1, PS4_1_FN2,
PS4_0_FN1, PS4_0_FN2,
+ PS5_11_FN1, PS5_11_FN2,
+ PS5_10_FN1, PS5_10_FN2,
PS5_9_FN1, PS5_9_FN2,
PS5_8_FN1, PS5_8_FN2,
PS5_7_FN1, PS5_7_FN2,
PS5_6_FN1, PS5_6_FN2,
PS5_5_FN1, PS5_5_FN2,
PS5_4_FN1, PS5_4_FN2,
-
- /* AN15 to 8 : EVENT15 to 8 */
- PS6_7_FN_AN, PS6_7_FN_EV,
- PS6_6_FN_AN, PS6_6_FN_EV,
- PS6_5_FN_AN, PS6_5_FN_EV,
- PS6_4_FN_AN, PS6_4_FN_EV,
- PS6_3_FN_AN, PS6_3_FN_EV,
- PS6_2_FN_AN, PS6_2_FN_EV,
- PS6_1_FN_AN, PS6_1_FN_EV,
- PS6_0_FN_AN, PS6_0_FN_EV,
-
+ PS5_3_FN1, PS5_3_FN2,
+ PS5_2_FN1, PS5_2_FN2,
+
+ PS6_15_FN1, PS6_15_FN2,
+ PS6_14_FN1, PS6_14_FN2,
+ PS6_13_FN1, PS6_13_FN2,
+ PS6_12_FN1, PS6_12_FN2,
+ PS6_11_FN1, PS6_11_FN2,
+ PS6_10_FN1, PS6_10_FN2,
+ PS6_9_FN1, PS6_9_FN2,
+ PS6_8_FN1, PS6_8_FN2,
+ PS6_7_FN1, PS6_7_FN2,
+ PS6_6_FN1, PS6_6_FN2,
+ PS6_5_FN1, PS6_5_FN2,
+ PS6_4_FN1, PS6_4_FN2,
+ PS6_3_FN1, PS6_3_FN2,
+ PS6_2_FN1, PS6_2_FN2,
+ PS6_1_FN1, PS6_1_FN2,
+ PS6_0_FN1, PS6_0_FN2,
+
+ PS7_15_FN1, PS7_15_FN2,
+ PS7_14_FN1, PS7_14_FN2,
+ PS7_13_FN1, PS7_13_FN2,
+ PS7_12_FN1, PS7_12_FN2,
+ PS7_11_FN1, PS7_11_FN2,
+ PS7_10_FN1, PS7_10_FN2,
+ PS7_9_FN1, PS7_9_FN2,
+ PS7_8_FN1, PS7_8_FN2,
+ PS7_7_FN1, PS7_7_FN2,
+ PS7_6_FN1, PS7_6_FN2,
+ PS7_5_FN1, PS7_5_FN2,
+ PS7_4_FN1, PS7_4_FN2,
+
+ PS8_15_FN1, PS8_15_FN2,
+ PS8_14_FN1, PS8_14_FN2,
+ PS8_13_FN1, PS8_13_FN2,
+ PS8_12_FN1, PS8_12_FN2,
+ PS8_11_FN1, PS8_11_FN2,
+ PS8_10_FN1, PS8_10_FN2,
+ PS8_9_FN1, PS8_9_FN2,
+ PS8_8_FN1, PS8_8_FN2,
PINMUX_FUNCTION_END,
PINMUX_MARK_BEGIN,
- /* PTA (mobule: LBSC, CPG, LPC) */
+ /* PTA (mobule: LBSC, RGMII) */
BS_MARK, RDWR_MARK, WE1_MARK, RDY_MARK,
- MD10_MARK, MD9_MARK, MD8_MARK,
- LGPIO7_MARK, LGPIO6_MARK, LGPIO5_MARK, LGPIO4_MARK,
- LGPIO3_MARK, LGPIO2_MARK, LGPIO1_MARK, LGPIO0_MARK,
-
- /* PTB (mobule: LBSC, EtherC, SIM, LPC) */
- D15_MARK, D14_MARK, D13_MARK, D12_MARK,
- D11_MARK, D10_MARK, D9_MARK, D8_MARK,
ET0_MDC_MARK, ET0_MDIO_MARK, ET1_MDC_MARK, ET1_MDIO_MARK,
- SIM_D_MARK, SIM_CLK_MARK, SIM_RST_MARK,
- WPSZ1_MARK, WPSZ0_MARK, FWID_MARK, FLSHSZ_MARK,
- LPC_SPIEN_MARK, BASEL_MARK,
- /* PTC (mobule: SD) */
- SD_WP_MARK, SD_CD_MARK, SD_CLK_MARK, SD_CMD_MARK,
- SD_D3_MARK, SD_D2_MARK, SD_D1_MARK, SD_D0_MARK,
+ /* PTB (mobule: INTC, ONFI, TMU) */
+ IRQ15_MARK, IRQ14_MARK, IRQ13_MARK, IRQ12_MARK,
+ IRQ11_MARK, IRQ10_MARK, IRQ9_MARK, IRQ8_MARK,
+ ON_NRE_MARK, ON_NWE_MARK, ON_NWP_MARK, ON_NCE0_MARK,
+ ON_R_B0_MARK, ON_ALE_MARK, ON_CLE_MARK, TCLK_MARK,
- /* PTD (mobule: INTC, SPI0, LBSC, CPG, ADC) */
+ /* PTC (mobule: IRQ, PWMU) */
IRQ7_MARK, IRQ6_MARK, IRQ5_MARK, IRQ4_MARK,
IRQ3_MARK, IRQ2_MARK, IRQ1_MARK, IRQ0_MARK,
- MD6_MARK, MD5_MARK, MD3_MARK, MD2_MARK,
- MD1_MARK, MD0_MARK, ADTRG1_MARK, ADTRG0_MARK,
-
- /* PTE (mobule: EtherC) */
- ET0_CRS_DV_MARK, ET0_TXD1_MARK,
- ET0_TXD0_MARK, ET0_TX_EN_MARK,
- ET0_REF_CLK_MARK, ET0_RXD1_MARK,
- ET0_RXD0_MARK, ET0_RX_ER_MARK,
-
- /* PTF (mobule: EtherC) */
- ET1_CRS_DV_MARK, ET1_TXD1_MARK,
- ET1_TXD0_MARK, ET1_TX_EN_MARK,
- ET1_REF_CLK_MARK, ET1_RXD1_MARK,
- ET1_RXD0_MARK, ET1_RX_ER_MARK,
-
- /* PTG (mobule: SYSTEM, PWMX, LPC) */
- STATUS0_MARK, STATUS1_MARK,
- PWX0_MARK, PWX1_MARK, PWX2_MARK, PWX3_MARK,
- SERIRQ_MARK, CLKRUN_MARK, LPCPD_MARK, LDRQ_MARK,
-
- /* PTH (mobule: TMU, SCIF234, SPI1, SPI0) */
- TCLK_MARK, RXD4_MARK, TXD4_MARK,
+ PWMU0_MARK, PWMU1_MARK, PWMU2_MARK, PWMU3_MARK,
+ PWMU4_MARK, PWMU5_MARK,
+
+ /* PTD (mobule: SPI0, DMAC) */
+ SP0_MOSI_MARK, SP0_MISO_MARK, SP0_SCK_MARK, SP0_SCK_FB_MARK,
+ SP0_SS0_MARK, SP0_SS1_MARK, SP0_SS2_MARK, SP0_SS3_MARK,
+ DREQ0_MARK, DACK0_MARK, TEND0_MARK,
+
+ /* PTE (mobule: RMII) */
+ RMII0_CRS_DV_MARK, RMII0_TXD1_MARK,
+ RMII0_TXD0_MARK, RMII0_TXEN_MARK,
+ RMII0_REFCLK_MARK, RMII0_RXD1_MARK,
+ RMII0_RXD0_MARK, RMII0_RX_ER_MARK,
+
+ /* PTF (mobule: RMII, SerMux) */
+ RMII1_CRS_DV_MARK, RMII1_TXD1_MARK,
+ RMII1_TXD0_MARK, RMII1_TXEN_MARK,
+ RMII1_REFCLK_MARK, RMII1_RXD1_MARK,
+ RMII1_RXD0_MARK, RMII1_RX_ER_MARK,
+ RAC_RI_MARK,
+
+ /* PTG (mobule: system, LBSC, LPC, WDT, LPC, eMMC) */
+ BOOTFMS_MARK, BOOTWP_MARK, A25_MARK, A24_MARK,
+ SERIRQ_MARK, WDTOVF_MARK, LPCPD_MARK, LDRQ_MARK,
+ MMCCLK_MARK, MMCCMD_MARK,
+
+ /* PTH (mobule: SPI1, LPC, DMAC, ADC) */
SP1_MOSI_MARK, SP1_MISO_MARK, SP1_SCK_MARK, SP1_SCK_FB_MARK,
- SP1_SS0_MARK, SP1_SS1_MARK, SP0_SS1_MARK,
+ SP1_SS0_MARK, SP1_SS1_MARK, WP_MARK, FMS0_MARK,
+ TEND1_MARK, DREQ1_MARK, DACK1_MARK, ADTRG1_MARK,
+ ADTRG0_MARK,
- /* PTI (mobule: INTC) */
- IRQ15_MARK, IRQ14_MARK, IRQ13_MARK, IRQ12_MARK,
- IRQ11_MARK, IRQ10_MARK, IRQ9_MARK, IRQ8_MARK,
+ /* PTI (mobule: LBSC, SDHI) */
+ D15_MARK, D14_MARK, D13_MARK, D12_MARK,
+ D11_MARK, D10_MARK, D9_MARK, D8_MARK,
+ SD_WP_MARK, SD_CD_MARK, SD_CLK_MARK, SD_CMD_MARK,
+ SD_D3_MARK, SD_D2_MARK, SD_D1_MARK, SD_D0_MARK,
- /* PTJ (mobule: SCIF234, SERMUX) */
- RXD3_MARK, TXD3_MARK, RXD2_MARK, TXD2_MARK,
- COM1_TXD_MARK, COM1_RXD_MARK, COM1_RTS_MARK, COM1_CTS_MARK,
+ /* PTJ (mobule: SCIF234) */
+ RTS3_MARK, CTS3_MARK, TXD3_MARK, RXD3_MARK,
+ RTS4_MARK, RXD4_MARK, TXD4_MARK,
- /* PTK (mobule: SERMUX) */
+ /* PTK (mobule: SERMUX, LBSC, SCIF) */
COM2_TXD_MARK, COM2_RXD_MARK, COM2_RTS_MARK, COM2_CTS_MARK,
- COM2_DTR_MARK, COM2_DSR_MARK, COM2_DCD_MARK, COM2_RI_MARK,
+ COM2_DTR_MARK, COM2_DSR_MARK, COM2_DCD_MARK, CLKOUT_MARK,
+ SCK2_MARK, SCK4_MARK, SCK3_MARK,
- /* PTL (mobule: SERMUX) */
- RAC_TXD_MARK, RAC_RXD_MARK, RAC_RTS_MARK, RAC_CTS_MARK,
- RAC_DTR_MARK, RAC_DSR_MARK, RAC_DCD_MARK, RAC_RI_MARK,
+ /* PTL (mobule: SERMUX, SCIF, LBSC, AUD) */
+ RAC_RXD_MARK, RAC_RTS_MARK, RAC_CTS_MARK, RAC_DTR_MARK,
+ RAC_DSR_MARK, RAC_DCD_MARK, RAC_TXD_MARK, RXD2_MARK,
+ CS5_MARK, CS6_MARK, AUDSYNC_MARK, AUDCK_MARK,
+ TXD2_MARK,
- /* PTM (mobule: IIC, LPC) */
+ /* PTM (mobule: LBSC, IIC) */
+ CS4_MARK, RD_MARK, WE0_MARK, CS0_MARK,
SDA6_MARK, SCL6_MARK, SDA7_MARK, SCL7_MARK,
- WP_MARK, FMS0_MARK, FMS1_MARK,
- /* PTN (mobule: SCIF234, EVC) */
- SCK2_MARK, RTS4_MARK, RTS3_MARK, RTS2_MARK,
- CTS4_MARK, CTS3_MARK, CTS2_MARK,
- EVENT7_MARK, EVENT6_MARK, EVENT5_MARK, EVENT4_MARK,
- EVENT3_MARK, EVENT2_MARK, EVENT1_MARK, EVENT0_MARK,
+ /* PTN (mobule: USB, JMC, SGPIO, WDT) */
+ VBUS_EN_MARK, VBUS_OC_MARK, JMCTCK_MARK, JMCTMS_MARK,
+ JMCTDO_MARK, JMCTDI_MARK, JMCTRST_MARK,
+ SGPIO1_CLK_MARK, SGPIO1_LOAD_MARK, SGPIO1_DI_MARK,
+ SGPIO1_DO_MARK, SUB_CLKIN_MARK,
- /* PTO (mobule: SGPIO) */
- SGPIO0_CLK_MARK, SGPIO0_LOAD_MARK,
- SGPIO0_DI_MARK, SGPIO0_DO_MARK,
- SGPIO1_CLK_MARK, SGPIO1_LOAD_MARK,
- SGPIO1_DI_MARK, SGPIO1_DO_MARK,
-
- /* PTP (mobule: JMC, SCIF234) */
- JMCTCK_MARK, JMCTMS_MARK, JMCTDO_MARK, JMCTDI_MARK,
- JMCRST_MARK, SCK4_MARK, SCK3_MARK,
+ /* PTO (mobule: SGPIO, SerMux) */
+ SGPIO0_CLK_MARK, SGPIO0_LOAD_MARK, SGPIO0_DI_MARK,
+ SGPIO0_DO_MARK, SGPIO2_CLK_MARK, SGPIO2_LOAD_MARK,
+ SGPIO2_DI_MARK, SGPIO2_DO_MARK,
+ COM1_TXD_MARK, COM1_RXD_MARK, COM1_RTS_MARK, COM1_CTS_MARK,
/* PTQ (mobule: LPC) */
LAD3_MARK, LAD2_MARK, LAD1_MARK, LAD0_MARK,
LFRAME_MARK, LRESET_MARK, LCLK_MARK,
/* PTR (mobule: GRA, IIC) */
- DDC3_MARK, DDC2_MARK,
- SDA8_MARK, SCL8_MARK, SDA2_MARK, SCL2_MARK,
+ DDC3_MARK, DDC2_MARK, SDA2_MARK, SCL2_MARK,
SDA1_MARK, SCL1_MARK, SDA0_MARK, SCL0_MARK,
+ SDA8_MARK, SCL8_MARK,
/* PTS (mobule: GRA, IIC) */
- DDC1_MARK, DDC0_MARK,
- SDA9_MARK, SCL9_MARK, SDA5_MARK, SCL5_MARK,
+ DDC1_MARK, DDC0_MARK, SDA5_MARK, SCL5_MARK,
SDA4_MARK, SCL4_MARK, SDA3_MARK, SCL3_MARK,
+ SDA9_MARK, SCL9_MARK,
- /* PTT (mobule: SYSTEM, PWMX) */
- AUDSYNC_MARK, AUDCK_MARK,
- AUDATA3_MARK, AUDATA2_MARK,
- AUDATA1_MARK, AUDATA0_MARK,
- PWX7_MARK, PWX6_MARK, PWX5_MARK, PWX4_MARK,
+ /* PTT (mobule: PWMX, AUD) */
+ PWMX7_MARK, PWMX6_MARK, PWMX5_MARK, PWMX4_MARK,
+ PWMX3_MARK, PWMX2_MARK, PWMX1_MARK, PWMX0_MARK,
+ AUDATA3_MARK, AUDATA2_MARK, AUDATA1_MARK, AUDATA0_MARK,
+ STATUS1_MARK, STATUS0_MARK,
- /* PTU (mobule: LBSC, DMAC) */
- CS6_MARK, CS5_MARK, CS4_MARK, CS0_MARK,
- RD_MARK, WE0_MARK, A25_MARK, A24_MARK,
- DREQ0_MARK, DACK0_MARK,
+ /* PTU (mobule: LPC, APM) */
+ LGPIO7_MARK, LGPIO6_MARK, LGPIO5_MARK, LGPIO4_MARK,
+ LGPIO3_MARK, LGPIO2_MARK, LGPIO1_MARK, LGPIO0_MARK,
+ APMONCTL_O_MARK, APMPWBTOUT_O_MARK, APMSCI_O_MARK,
+ APMVDDON_MARK, APMSLPBTN_MARK, APMPWRBTN_MARK, APMS5N_MARK,
+ APMS3N_MARK,
- /* PTV (mobule: LBSC, DMAC) */
+ /* PTV (mobule: LBSC, SerMux, R-SPI, EVC, GRA) */
A23_MARK, A22_MARK, A21_MARK, A20_MARK,
A19_MARK, A18_MARK, A17_MARK, A16_MARK,
- TEND0_MARK, DREQ1_MARK, DACK1_MARK, TEND1_MARK,
+ COM2_RI_MARK, R_SPI_MOSI_MARK, R_SPI_MISO_MARK,
+ R_SPI_RSPCK_MARK, R_SPI_SSL0_MARK, R_SPI_SSL1_MARK,
+ EVENT7_MARK, EVENT6_MARK, VBIOS_DI_MARK, VBIOS_DO_MARK,
+ VBIOS_CLK_MARK, VBIOS_CS_MARK,
- /* PTW (mobule: LBSC) */
+ /* PTW (mobule: LBSC, EVC, SCIF) */
A15_MARK, A14_MARK, A13_MARK, A12_MARK,
A11_MARK, A10_MARK, A9_MARK, A8_MARK,
+ EVENT5_MARK, EVENT4_MARK, EVENT3_MARK, EVENT2_MARK,
+ EVENT1_MARK, EVENT0_MARK, CTS4_MARK, CTS2_MARK,
- /* PTX (mobule: LBSC) */
+ /* PTX (mobule: LBSC, SCIF, SIM) */
A7_MARK, A6_MARK, A5_MARK, A4_MARK,
A3_MARK, A2_MARK, A1_MARK, A0_MARK,
+ RTS2_MARK, SIM_D_MARK, SIM_CLK_MARK, SIM_RST_MARK,
/* PTY (mobule: LBSC) */
D7_MARK, D6_MARK, D5_MARK, D4_MARK,
D3_MARK, D2_MARK, D1_MARK, D0_MARK,
+
+ /* PTZ (mobule: eMMC, ONFI) */
+ MMCDAT7_MARK, MMCDAT6_MARK, MMCDAT5_MARK, MMCDAT4_MARK,
+ MMCDAT3_MARK, MMCDAT2_MARK, MMCDAT1_MARK, MMCDAT0_MARK,
+ ON_DQ7_MARK, ON_DQ6_MARK, ON_DQ5_MARK, ON_DQ4_MARK,
+ ON_DQ3_MARK, ON_DQ2_MARK, ON_DQ1_MARK, ON_DQ0_MARK,
+
PINMUX_MARK_END,
};
@@ -473,6 +567,8 @@ static pinmux_enum_t pinmux_data[] = {
PINMUX_DATA(PTD0_DATA, PTD0_IN, PTD0_OUT),
/* PTE GPIO */
+ PINMUX_DATA(PTE7_DATA, PTE7_IN, PTE7_OUT),
+ PINMUX_DATA(PTE6_DATA, PTE6_IN, PTE6_OUT),
PINMUX_DATA(PTE5_DATA, PTE5_IN, PTE5_OUT),
PINMUX_DATA(PTE4_DATA, PTE4_IN, PTE4_OUT),
PINMUX_DATA(PTE3_DATA, PTE3_IN, PTE3_OUT),
@@ -521,7 +617,6 @@ static pinmux_enum_t pinmux_data[] = {
PINMUX_DATA(PTI0_DATA, PTI0_IN, PTI0_OUT),
/* PTJ GPIO */
- PINMUX_DATA(PTJ7_DATA, PTJ7_IN, PTJ7_OUT),
PINMUX_DATA(PTJ6_DATA, PTJ6_IN, PTJ6_OUT),
PINMUX_DATA(PTJ5_DATA, PTJ5_IN, PTJ5_OUT),
PINMUX_DATA(PTJ4_DATA, PTJ4_IN, PTJ4_OUT),
@@ -541,7 +636,6 @@ static pinmux_enum_t pinmux_data[] = {
PINMUX_DATA(PTK0_DATA, PTK0_IN, PTK0_OUT),
/* PTL GPIO */
- PINMUX_DATA(PTL7_DATA, PTL7_IN, PTL7_OUT),
PINMUX_DATA(PTL6_DATA, PTL6_IN, PTL6_OUT),
PINMUX_DATA(PTL5_DATA, PTL5_IN, PTL5_OUT),
PINMUX_DATA(PTL4_DATA, PTL4_IN, PTL4_OUT),
@@ -560,7 +654,6 @@ static pinmux_enum_t pinmux_data[] = {
PINMUX_DATA(PTM0_DATA, PTM0_IN, PTM0_OUT),
/* PTN GPIO */
- PINMUX_DATA(PTN7_DATA, PTN7_IN, PTN7_OUT),
PINMUX_DATA(PTN6_DATA, PTN6_IN, PTN6_OUT),
PINMUX_DATA(PTN5_DATA, PTN5_IN, PTN5_OUT),
PINMUX_DATA(PTN4_DATA, PTN4_IN, PTN4_OUT),
@@ -609,6 +702,8 @@ static pinmux_enum_t pinmux_data[] = {
PINMUX_DATA(PTS0_DATA, PTS0_IN, PTS0_OUT),
/* PTT GPIO */
+ PINMUX_DATA(PTT7_DATA, PTT7_IN, PTT7_OUT),
+ PINMUX_DATA(PTT6_DATA, PTT6_IN, PTT6_OUT),
PINMUX_DATA(PTT5_DATA, PTT5_IN, PTT5_OUT),
PINMUX_DATA(PTT4_DATA, PTT4_IN, PTT4_OUT),
PINMUX_DATA(PTT3_DATA, PTT3_IN, PTT3_OUT),
@@ -677,186 +772,204 @@ static pinmux_enum_t pinmux_data[] = {
PINMUX_DATA(PTZ0_DATA, PTZ0_IN, PTZ0_OUT),
/* PTA FN */
- PINMUX_DATA(BS_MARK, PS0_15_FN1, PTA7_FN),
- PINMUX_DATA(LGPIO7_MARK, PS0_15_FN3, PTA7_FN),
- PINMUX_DATA(RDWR_MARK, PS0_14_FN1, PTA6_FN),
- PINMUX_DATA(LGPIO6_MARK, PS0_14_FN3, PTA6_FN),
- PINMUX_DATA(WE1_MARK, PS0_13_FN1, PTA5_FN),
- PINMUX_DATA(LGPIO5_MARK, PS0_13_FN3, PTA5_FN),
- PINMUX_DATA(RDY_MARK, PS0_12_FN1, PTA4_FN),
- PINMUX_DATA(LGPIO4_MARK, PS0_12_FN3, PTA4_FN),
- PINMUX_DATA(LGPIO3_MARK, PTA3_FN),
- PINMUX_DATA(LGPIO2_MARK, PTA2_FN),
- PINMUX_DATA(LGPIO1_MARK, PTA1_FN),
- PINMUX_DATA(LGPIO0_MARK, PTA0_FN),
+ PINMUX_DATA(BS_MARK, PTA7_FN),
+ PINMUX_DATA(RDWR_MARK, PTA6_FN),
+ PINMUX_DATA(WE1_MARK, PTA5_FN),
+ PINMUX_DATA(RDY_MARK, PTA4_FN),
+ PINMUX_DATA(ET0_MDC_MARK, PTA3_FN),
+ PINMUX_DATA(ET0_MDIO_MARK, PTA2_FN),
+ PINMUX_DATA(ET1_MDC_MARK, PTA1_FN),
+ PINMUX_DATA(ET1_MDIO_MARK, PTA0_FN),
/* PTB FN */
- PINMUX_DATA(D15_MARK, PS0_7_FN1, PTB7_FN),
- PINMUX_DATA(ET0_MDC_MARK, PS0_7_FN2, PTB7_FN),
- PINMUX_DATA(D14_MARK, PS0_6_FN1, PTB6_FN),
- PINMUX_DATA(ET0_MDIO_MARK, PS0_6_FN2, PTB6_FN),
- PINMUX_DATA(D13_MARK, PS0_5_FN1, PTB5_FN),
- PINMUX_DATA(ET1_MDC_MARK, PS0_5_FN2, PTB5_FN),
- PINMUX_DATA(D12_MARK, PS0_4_FN1, PTB4_FN),
- PINMUX_DATA(ET1_MDIO_MARK, PS0_4_FN2, PTB4_FN),
- PINMUX_DATA(D11_MARK, PS0_3_FN1, PTB3_FN),
- PINMUX_DATA(SIM_D_MARK, PS0_3_FN2, PTB3_FN),
- PINMUX_DATA(D10_MARK, PS0_2_FN1, PTB2_FN),
- PINMUX_DATA(SIM_CLK_MARK, PS0_2_FN2, PTB2_FN),
- PINMUX_DATA(D9_MARK, PS0_1_FN1, PTB1_FN),
- PINMUX_DATA(SIM_RST_MARK, PS0_1_FN2, PTB1_FN),
- PINMUX_DATA(D8_MARK, PTB0_FN),
+ PINMUX_DATA(IRQ15_MARK, PS0_15_FN1, PTB7_FN),
+ PINMUX_DATA(ON_NRE_MARK, PS0_15_FN2, PTB7_FN),
+ PINMUX_DATA(IRQ14_MARK, PS0_14_FN1, PTB6_FN),
+ PINMUX_DATA(ON_NWE_MARK, PS0_14_FN2, PTB6_FN),
+ PINMUX_DATA(IRQ13_MARK, PS0_13_FN1, PTB5_FN),
+ PINMUX_DATA(ON_NWP_MARK, PS0_13_FN2, PTB5_FN),
+ PINMUX_DATA(IRQ12_MARK, PS0_12_FN1, PTB4_FN),
+ PINMUX_DATA(ON_NCE0_MARK, PS0_12_FN2, PTB4_FN),
+ PINMUX_DATA(IRQ11_MARK, PS0_11_FN1, PTB3_FN),
+ PINMUX_DATA(ON_R_B0_MARK, PS0_11_FN2, PTB3_FN),
+ PINMUX_DATA(IRQ10_MARK, PS0_10_FN1, PTB2_FN),
+ PINMUX_DATA(ON_ALE_MARK, PS0_10_FN2, PTB2_FN),
+ PINMUX_DATA(IRQ9_MARK, PS0_9_FN1, PTB1_FN),
+ PINMUX_DATA(ON_CLE_MARK, PS0_9_FN2, PTB1_FN),
+ PINMUX_DATA(IRQ8_MARK, PS0_8_FN1, PTB0_FN),
+ PINMUX_DATA(TCLK_MARK, PS0_8_FN2, PTB0_FN),
/* PTC FN */
- PINMUX_DATA(SD_WP_MARK, PTC7_FN),
- PINMUX_DATA(SD_CD_MARK, PTC6_FN),
- PINMUX_DATA(SD_CLK_MARK, PTC5_FN),
- PINMUX_DATA(SD_CMD_MARK, PTC4_FN),
- PINMUX_DATA(SD_D3_MARK, PTC3_FN),
- PINMUX_DATA(SD_D2_MARK, PTC2_FN),
- PINMUX_DATA(SD_D1_MARK, PTC1_FN),
- PINMUX_DATA(SD_D0_MARK, PTC0_FN),
+ PINMUX_DATA(IRQ7_MARK, PS0_7_FN1, PTC7_FN),
+ PINMUX_DATA(PWMU0_MARK, PS0_7_FN2, PTC7_FN),
+ PINMUX_DATA(IRQ6_MARK, PS0_6_FN1, PTC6_FN),
+ PINMUX_DATA(PWMU1_MARK, PS0_6_FN2, PTC6_FN),
+ PINMUX_DATA(IRQ5_MARK, PS0_5_FN1, PTC5_FN),
+ PINMUX_DATA(PWMU2_MARK, PS0_5_FN2, PTC5_FN),
+ PINMUX_DATA(IRQ4_MARK, PS0_4_FN1, PTC5_FN),
+ PINMUX_DATA(PWMU3_MARK, PS0_4_FN2, PTC4_FN),
+ PINMUX_DATA(IRQ3_MARK, PS0_3_FN1, PTC3_FN),
+ PINMUX_DATA(PWMU4_MARK, PS0_3_FN2, PTC3_FN),
+ PINMUX_DATA(IRQ2_MARK, PS0_2_FN1, PTC2_FN),
+ PINMUX_DATA(PWMU5_MARK, PS0_2_FN2, PTC2_FN),
+ PINMUX_DATA(IRQ1_MARK, PTC1_FN),
+ PINMUX_DATA(IRQ0_MARK, PTC0_FN),
/* PTD FN */
- PINMUX_DATA(IRQ7_MARK, PS1_7_FN1, PTD7_FN),
- PINMUX_DATA(ADTRG1_MARK, PS1_7_FN3, PTD7_FN),
- PINMUX_DATA(IRQ6_MARK, PS1_6_FN1, PTD6_FN),
- PINMUX_DATA(ADTRG0_MARK, PS1_6_FN3, PTD6_FN),
- PINMUX_DATA(IRQ5_MARK, PTD5_FN),
- PINMUX_DATA(IRQ4_MARK, PTD4_FN),
- PINMUX_DATA(IRQ3_MARK, PTD3_FN),
- PINMUX_DATA(IRQ2_MARK, PTD2_FN),
- PINMUX_DATA(IRQ1_MARK, PTD1_FN),
- PINMUX_DATA(IRQ0_MARK, PTD0_FN),
+ PINMUX_DATA(SP0_MOSI_MARK, PTD7_FN),
+ PINMUX_DATA(SP0_MISO_MARK, PTD6_FN),
+ PINMUX_DATA(SP0_SCK_MARK, PTD5_FN),
+ PINMUX_DATA(SP0_SCK_FB_MARK, PTD4_FN),
+ PINMUX_DATA(SP0_SS0_MARK, PTD3_FN),
+ PINMUX_DATA(SP0_SS1_MARK, PS1_10_FN1, PTD2_FN),
+ PINMUX_DATA(DREQ0_MARK, PS1_10_FN2, PTD2_FN),
+ PINMUX_DATA(SP0_SS2_MARK, PS1_9_FN1, PTD1_FN),
+ PINMUX_DATA(DACK0_MARK, PS1_9_FN2, PTD1_FN),
+ PINMUX_DATA(SP0_SS3_MARK, PS1_8_FN1, PTD0_FN),
+ PINMUX_DATA(TEND0_MARK, PS1_8_FN2, PTD0_FN),
/* PTE FN */
- PINMUX_DATA(ET0_CRS_DV_MARK, PTE7_FN),
- PINMUX_DATA(ET0_TXD1_MARK, PTE6_FN),
- PINMUX_DATA(ET0_TXD0_MARK, PTE5_FN),
- PINMUX_DATA(ET0_TX_EN_MARK, PTE4_FN),
- PINMUX_DATA(ET0_REF_CLK_MARK, PTE3_FN),
- PINMUX_DATA(ET0_RXD1_MARK, PTE2_FN),
- PINMUX_DATA(ET0_RXD0_MARK, PTE1_FN),
- PINMUX_DATA(ET0_RX_ER_MARK, PTE0_FN),
+ PINMUX_DATA(RMII0_CRS_DV_MARK, PTE7_FN),
+ PINMUX_DATA(RMII0_TXD1_MARK, PTE6_FN),
+ PINMUX_DATA(RMII0_TXD0_MARK, PTE5_FN),
+ PINMUX_DATA(RMII0_TXEN_MARK, PTE4_FN),
+ PINMUX_DATA(RMII0_REFCLK_MARK, PTE3_FN),
+ PINMUX_DATA(RMII0_RXD1_MARK, PTE2_FN),
+ PINMUX_DATA(RMII0_RXD0_MARK, PTE1_FN),
+ PINMUX_DATA(RMII0_RX_ER_MARK, PTE0_FN),
/* PTF FN */
- PINMUX_DATA(ET1_CRS_DV_MARK, PTF7_FN),
- PINMUX_DATA(ET1_TXD1_MARK, PTF6_FN),
- PINMUX_DATA(ET1_TXD0_MARK, PTF5_FN),
- PINMUX_DATA(ET1_TX_EN_MARK, PTF4_FN),
- PINMUX_DATA(ET1_REF_CLK_MARK, PTF3_FN),
- PINMUX_DATA(ET1_RXD1_MARK, PTF2_FN),
- PINMUX_DATA(ET1_RXD0_MARK, PTF1_FN),
- PINMUX_DATA(ET1_RX_ER_MARK, PTF0_FN),
+ PINMUX_DATA(RMII1_CRS_DV_MARK, PTF7_FN),
+ PINMUX_DATA(RMII1_TXD1_MARK, PTF6_FN),
+ PINMUX_DATA(RMII1_TXD0_MARK, PTF5_FN),
+ PINMUX_DATA(RMII1_TXEN_MARK, PTF4_FN),
+ PINMUX_DATA(RMII1_REFCLK_MARK, PTF3_FN),
+ PINMUX_DATA(RMII1_RXD1_MARK, PS1_2_FN1, PTF2_FN),
+ PINMUX_DATA(RAC_RI_MARK, PS1_2_FN2, PTF2_FN),
+ PINMUX_DATA(RMII1_RXD0_MARK, PTF1_FN),
+ PINMUX_DATA(RMII1_RX_ER_MARK, PTF0_FN),
/* PTG FN */
- PINMUX_DATA(PWX0_MARK, PTG7_FN),
- PINMUX_DATA(PWX1_MARK, PTG6_FN),
- PINMUX_DATA(STATUS0_MARK, PS2_13_FN1, PTG5_FN),
- PINMUX_DATA(PWX2_MARK, PS2_13_FN3, PTG5_FN),
- PINMUX_DATA(STATUS1_MARK, PS2_12_FN1, PTG4_FN),
- PINMUX_DATA(PWX3_MARK, PS2_12_FN3, PTG4_FN),
+ PINMUX_DATA(BOOTFMS_MARK, PTG7_FN),
+ PINMUX_DATA(BOOTWP_MARK, PTG6_FN),
+ PINMUX_DATA(A25_MARK, PS2_13_FN1, PTG5_FN),
+ PINMUX_DATA(MMCCLK_MARK, PS2_13_FN2, PTG5_FN),
+ PINMUX_DATA(A24_MARK, PS2_12_FN1, PTG4_FN),
+ PINMUX_DATA(MMCCMD_MARK, PS2_12_FN2, PTG4_FN),
PINMUX_DATA(SERIRQ_MARK, PTG3_FN),
- PINMUX_DATA(CLKRUN_MARK, PTG2_FN),
+ PINMUX_DATA(WDTOVF_MARK, PTG2_FN),
PINMUX_DATA(LPCPD_MARK, PTG1_FN),
PINMUX_DATA(LDRQ_MARK, PTG0_FN),
/* PTH FN */
- PINMUX_DATA(SP1_MOSI_MARK, PTH7_FN),
- PINMUX_DATA(SP1_MISO_MARK, PTH6_FN),
- PINMUX_DATA(SP1_SCK_MARK, PTH5_FN),
- PINMUX_DATA(SP1_SCK_FB_MARK, PTH4_FN),
+ PINMUX_DATA(SP1_MOSI_MARK, PS2_7_FN1, PTH7_FN),
+ PINMUX_DATA(TEND1_MARK, PS2_7_FN2, PTH7_FN),
+ PINMUX_DATA(SP1_MISO_MARK, PS2_6_FN1, PTH6_FN),
+ PINMUX_DATA(DREQ1_MARK, PS2_6_FN2, PTH6_FN),
+ PINMUX_DATA(SP1_SCK_MARK, PS2_5_FN1, PTH5_FN),
+ PINMUX_DATA(DACK1_MARK, PS2_5_FN2, PTH5_FN),
+ PINMUX_DATA(SP1_SCK_FB_MARK, PS2_4_FN1, PTH4_FN),
+ PINMUX_DATA(ADTRG1_MARK, PS2_4_FN2, PTH4_FN),
PINMUX_DATA(SP1_SS0_MARK, PTH3_FN),
- PINMUX_DATA(TCLK_MARK, PTH2_FN),
- PINMUX_DATA(RXD4_MARK, PS2_1_FN1, PTH1_FN),
- PINMUX_DATA(SP1_SS1_MARK, PS2_1_FN2, PTH1_FN),
- PINMUX_DATA(TXD4_MARK, PS2_0_FN1, PTH0_FN),
- PINMUX_DATA(SP0_SS1_MARK, PS2_0_FN2, PTH0_FN),
+ PINMUX_DATA(SP1_SS1_MARK, PS2_2_FN1, PTH2_FN),
+ PINMUX_DATA(ADTRG0_MARK, PS2_2_FN2, PTH2_FN),
+ PINMUX_DATA(WP_MARK, PTH1_FN),
+ PINMUX_DATA(FMS0_MARK, PTH0_FN),
/* PTI FN */
- PINMUX_DATA(IRQ15_MARK, PTI7_FN),
- PINMUX_DATA(IRQ14_MARK, PTI6_FN),
- PINMUX_DATA(IRQ13_MARK, PTI5_FN),
- PINMUX_DATA(IRQ12_MARK, PTI4_FN),
- PINMUX_DATA(IRQ11_MARK, PTI3_FN),
- PINMUX_DATA(IRQ10_MARK, PTI2_FN),
- PINMUX_DATA(IRQ9_MARK, PTI1_FN),
- PINMUX_DATA(IRQ8_MARK, PTI0_FN),
+ PINMUX_DATA(D15_MARK, PS3_15_FN1, PTI7_FN),
+ PINMUX_DATA(SD_WP_MARK, PS3_15_FN2, PTI7_FN),
+ PINMUX_DATA(D14_MARK, PS3_14_FN1, PTI6_FN),
+ PINMUX_DATA(SD_CD_MARK, PS3_14_FN2, PTI6_FN),
+ PINMUX_DATA(D13_MARK, PS3_13_FN1, PTI5_FN),
+ PINMUX_DATA(SD_CLK_MARK, PS3_13_FN2, PTI5_FN),
+ PINMUX_DATA(D12_MARK, PS3_12_FN1, PTI4_FN),
+ PINMUX_DATA(SD_CMD_MARK, PS3_12_FN2, PTI4_FN),
+ PINMUX_DATA(D11_MARK, PS3_11_FN1, PTI3_FN),
+ PINMUX_DATA(SD_D3_MARK, PS3_11_FN2, PTI3_FN),
+ PINMUX_DATA(D10_MARK, PS3_10_FN1, PTI2_FN),
+ PINMUX_DATA(SD_D2_MARK, PS3_10_FN2, PTI2_FN),
+ PINMUX_DATA(D9_MARK, PS3_9_FN1, PTI1_FN),
+ PINMUX_DATA(SD_D1_MARK, PS3_9_FN2, PTI1_FN),
+ PINMUX_DATA(D8_MARK, PS3_8_FN1, PTI0_FN),
+ PINMUX_DATA(SD_D0_MARK, PS3_8_FN2, PTI0_FN),
/* PTJ FN */
- PINMUX_DATA(RXD3_MARK, PTJ7_FN),
- PINMUX_DATA(TXD3_MARK, PTJ6_FN),
- PINMUX_DATA(RXD2_MARK, PTJ5_FN),
- PINMUX_DATA(TXD2_MARK, PTJ4_FN),
- PINMUX_DATA(COM1_TXD_MARK, PTJ3_FN),
- PINMUX_DATA(COM1_RXD_MARK, PTJ2_FN),
- PINMUX_DATA(COM1_RTS_MARK, PTJ1_FN),
- PINMUX_DATA(COM1_CTS_MARK, PTJ0_FN),
+ PINMUX_DATA(RTS3_MARK, PTJ6_FN),
+ PINMUX_DATA(CTS3_MARK, PTJ5_FN),
+ PINMUX_DATA(TXD3_MARK, PTJ4_FN),
+ PINMUX_DATA(RXD3_MARK, PTJ3_FN),
+ PINMUX_DATA(RTS4_MARK, PTJ2_FN),
+ PINMUX_DATA(RXD4_MARK, PTJ1_FN),
+ PINMUX_DATA(TXD4_MARK, PTJ0_FN),
/* PTK FN */
- PINMUX_DATA(COM2_TXD_MARK, PTK7_FN),
+ PINMUX_DATA(COM2_TXD_MARK, PS3_7_FN1, PTK7_FN),
+ PINMUX_DATA(SCK2_MARK, PS3_7_FN2, PTK7_FN),
PINMUX_DATA(COM2_RXD_MARK, PTK6_FN),
PINMUX_DATA(COM2_RTS_MARK, PTK5_FN),
PINMUX_DATA(COM2_CTS_MARK, PTK4_FN),
PINMUX_DATA(COM2_DTR_MARK, PTK3_FN),
- PINMUX_DATA(COM2_DSR_MARK, PTK2_FN),
- PINMUX_DATA(COM2_DCD_MARK, PTK1_FN),
- PINMUX_DATA(COM2_RI_MARK, PTK0_FN),
+ PINMUX_DATA(COM2_DSR_MARK, PS3_2_FN1, PTK2_FN),
+ PINMUX_DATA(SCK4_MARK, PS3_2_FN2, PTK2_FN),
+ PINMUX_DATA(COM2_DCD_MARK, PS3_1_FN1, PTK1_FN),
+ PINMUX_DATA(SCK3_MARK, PS3_1_FN2, PTK1_FN),
+ PINMUX_DATA(CLKOUT_MARK, PTK0_FN),
/* PTL FN */
- PINMUX_DATA(RAC_TXD_MARK, PTL7_FN),
- PINMUX_DATA(RAC_RXD_MARK, PTL6_FN),
- PINMUX_DATA(RAC_RTS_MARK, PTL5_FN),
- PINMUX_DATA(RAC_CTS_MARK, PTL4_FN),
+ PINMUX_DATA(RAC_RXD_MARK, PS4_14_FN1, PTL6_FN),
+ PINMUX_DATA(RXD2_MARK, PS4_14_FN2, PTL6_FN),
+ PINMUX_DATA(RAC_RTS_MARK, PS4_13_FN1, PTL5_FN),
+ PINMUX_DATA(CS5_MARK, PS4_13_FN2, PTL5_FN),
+ PINMUX_DATA(RAC_CTS_MARK, PS4_12_FN1, PTL4_FN),
+ PINMUX_DATA(CS6_MARK, PS4_12_FN2, PTL4_FN),
PINMUX_DATA(RAC_DTR_MARK, PTL3_FN),
- PINMUX_DATA(RAC_DSR_MARK, PTL2_FN),
- PINMUX_DATA(RAC_DCD_MARK, PTL1_FN),
- PINMUX_DATA(RAC_RI_MARK, PTL0_FN),
+ PINMUX_DATA(RAC_DSR_MARK, PS4_10_FN1, PTL2_FN),
+ PINMUX_DATA(AUDSYNC_MARK, PS4_10_FN2, PTL2_FN),
+ PINMUX_DATA(RAC_DCD_MARK, PS4_9_FN1, PTL1_FN),
+ PINMUX_DATA(AUDCK_MARK, PS4_9_FN2, PTL1_FN),
+ PINMUX_DATA(RAC_TXD_MARK, PS4_8_FN1, PTL0_FN),
+ PINMUX_DATA(TXD2_MARK, PS4_8_FN1, PTL0_FN),
/* PTM FN */
- PINMUX_DATA(WP_MARK, PTM6_FN),
- PINMUX_DATA(FMS0_MARK, PTM5_FN),
- PINMUX_DATA(FMS1_MARK, PTM4_FN),
+ PINMUX_DATA(CS4_MARK, PTM7_FN),
+ PINMUX_DATA(RD_MARK, PTM6_FN),
+ PINMUX_DATA(WE0_MARK, PTM7_FN),
+ PINMUX_DATA(CS0_MARK, PTM4_FN),
PINMUX_DATA(SDA6_MARK, PTM3_FN),
PINMUX_DATA(SCL6_MARK, PTM2_FN),
PINMUX_DATA(SDA7_MARK, PTM1_FN),
PINMUX_DATA(SCL7_MARK, PTM0_FN),
/* PTN FN */
- PINMUX_DATA(SCK2_MARK, PS4_15_FN1, PTN7_FN),
- PINMUX_DATA(EVENT7_MARK, PS4_15_FN2, PTN7_FN),
- PINMUX_DATA(RTS4_MARK, PS4_14_FN1, PTN6_FN),
- PINMUX_DATA(EVENT6_MARK, PS4_14_FN2, PTN6_FN),
- PINMUX_DATA(RTS3_MARK, PS4_13_FN1, PTN5_FN),
- PINMUX_DATA(EVENT5_MARK, PS4_13_FN2, PTN5_FN),
- PINMUX_DATA(RTS2_MARK, PS4_12_FN1, PTN4_FN),
- PINMUX_DATA(EVENT4_MARK, PS4_12_FN2, PTN4_FN),
- PINMUX_DATA(CTS4_MARK, PS4_11_FN1, PTN3_FN),
- PINMUX_DATA(EVENT3_MARK, PS4_11_FN2, PTN3_FN),
- PINMUX_DATA(CTS3_MARK, PS4_10_FN1, PTN2_FN),
- PINMUX_DATA(EVENT2_MARK, PS4_10_FN2, PTN2_FN),
- PINMUX_DATA(CTS2_MARK, PS4_9_FN1, PTN1_FN),
- PINMUX_DATA(EVENT1_MARK, PS4_9_FN2, PTN1_FN),
- PINMUX_DATA(EVENT0_MARK, PTN0_FN),
+ PINMUX_DATA(VBUS_EN_MARK, PTN6_FN),
+ PINMUX_DATA(VBUS_OC_MARK, PTN5_FN),
+ PINMUX_DATA(JMCTCK_MARK, PS4_4_FN1, PTN4_FN),
+ PINMUX_DATA(SGPIO1_CLK_MARK, PS4_4_FN2, PTN4_FN),
+ PINMUX_DATA(JMCTMS_MARK, PS4_3_FN1, PTN5_FN),
+ PINMUX_DATA(SGPIO1_LOAD_MARK, PS4_3_FN2, PTN5_FN),
+ PINMUX_DATA(JMCTDO_MARK, PS4_2_FN1, PTN2_FN),
+ PINMUX_DATA(SGPIO1_DO_MARK, PS4_2_FN2, PTN2_FN),
+ PINMUX_DATA(JMCTDI_MARK, PS4_1_FN1, PTN1_FN),
+ PINMUX_DATA(SGPIO1_DI_MARK, PS4_1_FN2, PTN1_FN),
+ PINMUX_DATA(JMCTRST_MARK, PS4_0_FN1, PTN0_FN),
+ PINMUX_DATA(SUB_CLKIN_MARK, PS4_0_FN2, PTN0_FN),
/* PTO FN */
PINMUX_DATA(SGPIO0_CLK_MARK, PTO7_FN),
PINMUX_DATA(SGPIO0_LOAD_MARK, PTO6_FN),
PINMUX_DATA(SGPIO0_DI_MARK, PTO5_FN),
PINMUX_DATA(SGPIO0_DO_MARK, PTO4_FN),
- PINMUX_DATA(SGPIO1_CLK_MARK, PTO3_FN),
- PINMUX_DATA(SGPIO1_LOAD_MARK, PTO2_FN),
- PINMUX_DATA(SGPIO1_DI_MARK, PTO1_FN),
- PINMUX_DATA(SGPIO1_DO_MARK, PTO0_FN),
+ PINMUX_DATA(SGPIO2_CLK_MARK, PS5_11_FN1, PTO3_FN),
+ PINMUX_DATA(COM1_TXD_MARK, PS5_11_FN2, PTO3_FN),
+ PINMUX_DATA(SGPIO2_LOAD_MARK, PS5_10_FN1, PTO2_FN),
+ PINMUX_DATA(COM1_RXD_MARK, PS5_10_FN2, PTO2_FN),
+ PINMUX_DATA(SGPIO2_DI_MARK, PS5_9_FN1, PTO1_FN),
+ PINMUX_DATA(COM1_RTS_MARK, PS5_9_FN2, PTO1_FN),
+ PINMUX_DATA(SGPIO2_DO_MARK, PS5_8_FN1, PTO0_FN),
+ PINMUX_DATA(COM1_CTS_MARK, PS5_8_FN2, PTO0_FN),
/* PTP FN */
- PINMUX_DATA(JMCTCK_MARK, PTP6_FN),
- PINMUX_DATA(JMCTMS_MARK, PTP5_FN),
- PINMUX_DATA(JMCTDO_MARK, PTP4_FN),
- PINMUX_DATA(JMCTDI_MARK, PTP3_FN),
- PINMUX_DATA(JMCRST_MARK, PTP2_FN),
- PINMUX_DATA(SCK4_MARK, PTP1_FN),
- PINMUX_DATA(SCK3_MARK, PTP0_FN),
/* PTQ FN */
PINMUX_DATA(LAD3_MARK, PTQ6_FN),
@@ -864,8 +977,8 @@ static pinmux_enum_t pinmux_data[] = {
PINMUX_DATA(LAD1_MARK, PTQ4_FN),
PINMUX_DATA(LAD0_MARK, PTQ3_FN),
PINMUX_DATA(LFRAME_MARK, PTQ2_FN),
- PINMUX_DATA(SCK4_MARK, PTQ1_FN),
- PINMUX_DATA(SCK3_MARK, PTQ0_FN),
+ PINMUX_DATA(LRESET_MARK, PTQ1_FN),
+ PINMUX_DATA(LCLK_MARK, PTQ0_FN),
/* PTR FN */
PINMUX_DATA(SDA8_MARK, PTR7_FN), /* DDC3? */
@@ -888,58 +1001,84 @@ static pinmux_enum_t pinmux_data[] = {
PINMUX_DATA(SCL3_MARK, PTS0_FN),
/* PTT FN */
- PINMUX_DATA(AUDSYNC_MARK, PTS5_FN),
- PINMUX_DATA(AUDCK_MARK, PTS4_FN),
- PINMUX_DATA(AUDATA3_MARK, PS4_3_FN1, PTS3_FN),
- PINMUX_DATA(PWX7_MARK, PS4_3_FN2, PTS3_FN),
- PINMUX_DATA(AUDATA2_MARK, PS4_2_FN1, PTS2_FN),
- PINMUX_DATA(PWX6_MARK, PS4_2_FN2, PTS2_FN),
- PINMUX_DATA(AUDATA1_MARK, PS4_1_FN1, PTS1_FN),
- PINMUX_DATA(PWX5_MARK, PS4_1_FN2, PTS1_FN),
- PINMUX_DATA(AUDATA0_MARK, PS4_0_FN1, PTS0_FN),
- PINMUX_DATA(PWX4_MARK, PS4_0_FN2, PTS0_FN),
+ PINMUX_DATA(PWMX7_MARK, PS5_7_FN1, PTT7_FN),
+ PINMUX_DATA(AUDATA3_MARK, PS5_7_FN2, PTT7_FN),
+ PINMUX_DATA(PWMX6_MARK, PS5_6_FN1, PTT6_FN),
+ PINMUX_DATA(AUDATA2_MARK, PS5_6_FN2, PTT6_FN),
+ PINMUX_DATA(PWMX5_MARK, PS5_5_FN1, PTT5_FN),
+ PINMUX_DATA(AUDATA1_MARK, PS5_5_FN2, PTT5_FN),
+ PINMUX_DATA(PWMX4_MARK, PS5_4_FN1, PTT4_FN),
+ PINMUX_DATA(AUDATA0_MARK, PS5_4_FN2, PTT4_FN),
+ PINMUX_DATA(PWMX3_MARK, PS5_3_FN1, PTT3_FN),
+ PINMUX_DATA(STATUS1_MARK, PS5_3_FN2, PTT3_FN),
+ PINMUX_DATA(PWMX2_MARK, PS5_2_FN1, PTT2_FN),
+ PINMUX_DATA(STATUS0_MARK, PS5_2_FN2, PTT2_FN),
+ PINMUX_DATA(PWMX1_MARK, PTT1_FN),
+ PINMUX_DATA(PWMX0_MARK, PTT0_FN),
/* PTU FN */
- PINMUX_DATA(CS6_MARK, PTU7_FN),
- PINMUX_DATA(CS5_MARK, PTU6_FN),
- PINMUX_DATA(CS4_MARK, PTU5_FN),
- PINMUX_DATA(CS0_MARK, PTU4_FN),
- PINMUX_DATA(RD_MARK, PTU3_FN),
- PINMUX_DATA(WE0_MARK, PTU2_FN),
- PINMUX_DATA(A25_MARK, PS5_9_FN1, PTU1_FN),
- PINMUX_DATA(DREQ0_MARK, PS5_9_FN2, PTU1_FN),
- PINMUX_DATA(A24_MARK, PS5_8_FN1, PTU0_FN),
- PINMUX_DATA(DACK0_MARK, PS5_8_FN2, PTU0_FN),
+ PINMUX_DATA(LGPIO7_MARK, PS6_15_FN1, PTU7_FN),
+ PINMUX_DATA(APMONCTL_O_MARK, PS6_15_FN2, PTU7_FN),
+ PINMUX_DATA(LGPIO6_MARK, PS6_14_FN1, PTU6_FN),
+ PINMUX_DATA(APMPWBTOUT_O_MARK, PS6_14_FN2, PTU6_FN),
+ PINMUX_DATA(LGPIO5_MARK, PS6_13_FN1, PTU5_FN),
+ PINMUX_DATA(APMSCI_O_MARK, PS6_13_FN2, PTU5_FN),
+ PINMUX_DATA(LGPIO4_MARK, PS6_12_FN1, PTU4_FN),
+ PINMUX_DATA(APMVDDON_MARK, PS6_12_FN2, PTU4_FN),
+ PINMUX_DATA(LGPIO3_MARK, PS6_11_FN1, PTU3_FN),
+ PINMUX_DATA(APMSLPBTN_MARK, PS6_11_FN2, PTU3_FN),
+ PINMUX_DATA(LGPIO2_MARK, PS6_10_FN1, PTU2_FN),
+ PINMUX_DATA(APMPWRBTN_MARK, PS6_10_FN2, PTU2_FN),
+ PINMUX_DATA(LGPIO1_MARK, PS6_9_FN1, PTU1_FN),
+ PINMUX_DATA(APMS5N_MARK, PS6_9_FN2, PTU1_FN),
+ PINMUX_DATA(LGPIO0_MARK, PS6_8_FN1, PTU0_FN),
+ PINMUX_DATA(APMS3N_MARK, PS6_8_FN2, PTU0_FN),
/* PTV FN */
- PINMUX_DATA(A23_MARK, PS5_7_FN1, PTV7_FN),
- PINMUX_DATA(TEND0_MARK, PS5_7_FN2, PTV7_FN),
- PINMUX_DATA(A22_MARK, PS5_6_FN1, PTV6_FN),
- PINMUX_DATA(DREQ1_MARK, PS5_6_FN2, PTV6_FN),
- PINMUX_DATA(A21_MARK, PS5_5_FN1, PTV5_FN),
- PINMUX_DATA(DACK1_MARK, PS5_5_FN2, PTV5_FN),
- PINMUX_DATA(A20_MARK, PS5_4_FN1, PTV4_FN),
- PINMUX_DATA(TEND1_MARK, PS5_4_FN2, PTV4_FN),
- PINMUX_DATA(A19_MARK, PTV3_FN),
- PINMUX_DATA(A18_MARK, PTV2_FN),
- PINMUX_DATA(A17_MARK, PTV1_FN),
- PINMUX_DATA(A16_MARK, PTV0_FN),
+ PINMUX_DATA(A23_MARK, PS6_7_FN1, PTV7_FN),
+ PINMUX_DATA(COM2_RI_MARK, PS6_7_FN2, PTV7_FN),
+ PINMUX_DATA(A22_MARK, PS6_6_FN1, PTV6_FN),
+ PINMUX_DATA(R_SPI_MOSI_MARK, PS6_6_FN2, PTV6_FN),
+ PINMUX_DATA(A21_MARK, PS6_5_FN1, PTV5_FN),
+ PINMUX_DATA(R_SPI_MISO_MARK, PS6_5_FN2, PTV5_FN),
+ PINMUX_DATA(A20_MARK, PS6_4_FN1, PTV4_FN),
+ PINMUX_DATA(R_SPI_RSPCK_MARK, PS6_4_FN2, PTV4_FN),
+ PINMUX_DATA(A19_MARK, PS6_3_FN1, PTV3_FN),
+ PINMUX_DATA(R_SPI_SSL0_MARK, PS6_3_FN2, PTV3_FN),
+ PINMUX_DATA(A18_MARK, PS6_2_FN1, PTV2_FN),
+ PINMUX_DATA(R_SPI_SSL1_MARK, PS6_2_FN2, PTV2_FN),
+ PINMUX_DATA(A17_MARK, PS6_1_FN1, PTV1_FN),
+ PINMUX_DATA(EVENT7_MARK, PS6_1_FN2, PTV1_FN),
+ PINMUX_DATA(A16_MARK, PS6_0_FN1, PTV0_FN),
+ PINMUX_DATA(EVENT6_MARK, PS6_0_FN1, PTV0_FN),
/* PTW FN */
- PINMUX_DATA(A15_MARK, PTW7_FN),
- PINMUX_DATA(A14_MARK, PTW6_FN),
- PINMUX_DATA(A13_MARK, PTW5_FN),
- PINMUX_DATA(A12_MARK, PTW4_FN),
- PINMUX_DATA(A11_MARK, PTW3_FN),
- PINMUX_DATA(A10_MARK, PTW2_FN),
- PINMUX_DATA(A9_MARK, PTW1_FN),
- PINMUX_DATA(A8_MARK, PTW0_FN),
+ PINMUX_DATA(A15_MARK, PS7_15_FN1, PTW7_FN),
+ PINMUX_DATA(EVENT5_MARK, PS7_15_FN2, PTW7_FN),
+ PINMUX_DATA(A14_MARK, PS7_14_FN1, PTW6_FN),
+ PINMUX_DATA(EVENT4_MARK, PS7_14_FN2, PTW6_FN),
+ PINMUX_DATA(A13_MARK, PS7_13_FN1, PTW5_FN),
+ PINMUX_DATA(EVENT3_MARK, PS7_13_FN2, PTW5_FN),
+ PINMUX_DATA(A12_MARK, PS7_12_FN1, PTW4_FN),
+ PINMUX_DATA(EVENT2_MARK, PS7_12_FN2, PTW4_FN),
+ PINMUX_DATA(A11_MARK, PS7_11_FN1, PTW3_FN),
+ PINMUX_DATA(EVENT1_MARK, PS7_11_FN2, PTW3_FN),
+ PINMUX_DATA(A10_MARK, PS7_10_FN1, PTW2_FN),
+ PINMUX_DATA(EVENT0_MARK, PS7_10_FN2, PTW2_FN),
+ PINMUX_DATA(A9_MARK, PS7_9_FN1, PTW1_FN),
+ PINMUX_DATA(CTS4_MARK, PS7_9_FN2, PTW1_FN),
+ PINMUX_DATA(A8_MARK, PS7_8_FN1, PTW0_FN),
+ PINMUX_DATA(CTS2_MARK, PS7_8_FN2, PTW0_FN),
/* PTX FN */
- PINMUX_DATA(A7_MARK, PTX7_FN),
- PINMUX_DATA(A6_MARK, PTX6_FN),
- PINMUX_DATA(A5_MARK, PTX5_FN),
- PINMUX_DATA(A4_MARK, PTX4_FN),
+ PINMUX_DATA(A7_MARK, PS7_7_FN1, PTX7_FN),
+ PINMUX_DATA(RTS2_MARK, PS7_7_FN2, PTX7_FN),
+ PINMUX_DATA(A6_MARK, PS7_6_FN1, PTX6_FN),
+ PINMUX_DATA(SIM_D_MARK, PS7_6_FN2, PTX6_FN),
+ PINMUX_DATA(A5_MARK, PS7_5_FN1, PTX5_FN),
+ PINMUX_DATA(SIM_CLK_MARK, PS7_5_FN2, PTX5_FN),
+ PINMUX_DATA(A4_MARK, PS7_4_FN1, PTX4_FN),
+ PINMUX_DATA(SIM_RST_MARK, PS7_4_FN2, PTX4_FN),
PINMUX_DATA(A3_MARK, PTX3_FN),
PINMUX_DATA(A2_MARK, PTX2_FN),
PINMUX_DATA(A1_MARK, PTX1_FN),
@@ -954,6 +1093,24 @@ static pinmux_enum_t pinmux_data[] = {
PINMUX_DATA(D2_MARK, PTY2_FN),
PINMUX_DATA(D1_MARK, PTY1_FN),
PINMUX_DATA(D0_MARK, PTY0_FN),
+
+ /* PTZ FN */
+ PINMUX_DATA(MMCDAT7_MARK, PS8_15_FN1, PTZ7_FN),
+ PINMUX_DATA(ON_DQ7_MARK, PS8_15_FN2, PTZ7_FN),
+ PINMUX_DATA(MMCDAT6_MARK, PS8_14_FN1, PTZ6_FN),
+ PINMUX_DATA(ON_DQ6_MARK, PS8_14_FN2, PTZ6_FN),
+ PINMUX_DATA(MMCDAT5_MARK, PS8_13_FN1, PTZ5_FN),
+ PINMUX_DATA(ON_DQ5_MARK, PS8_13_FN2, PTZ5_FN),
+ PINMUX_DATA(MMCDAT4_MARK, PS8_12_FN1, PTZ4_FN),
+ PINMUX_DATA(ON_DQ4_MARK, PS8_12_FN2, PTZ4_FN),
+ PINMUX_DATA(MMCDAT3_MARK, PS8_11_FN1, PTZ3_FN),
+ PINMUX_DATA(ON_DQ3_MARK, PS8_11_FN2, PTZ3_FN),
+ PINMUX_DATA(MMCDAT2_MARK, PS8_10_FN1, PTZ2_FN),
+ PINMUX_DATA(ON_DQ2_MARK, PS8_10_FN2, PTZ2_FN),
+ PINMUX_DATA(MMCDAT1_MARK, PS8_9_FN1, PTZ1_FN),
+ PINMUX_DATA(ON_DQ1_MARK, PS8_9_FN2, PTZ1_FN),
+ PINMUX_DATA(MMCDAT0_MARK, PS8_8_FN1, PTZ0_FN),
+ PINMUX_DATA(ON_DQ0_MARK, PS8_8_FN2, PTZ0_FN),
};
static struct pinmux_gpio pinmux_gpios[] = {
@@ -1048,7 +1205,6 @@ static struct pinmux_gpio pinmux_gpios[] = {
PINMUX_GPIO(GPIO_PTI0, PTI0_DATA),
/* PTJ */
- PINMUX_GPIO(GPIO_PTJ7, PTJ7_DATA),
PINMUX_GPIO(GPIO_PTJ6, PTJ6_DATA),
PINMUX_GPIO(GPIO_PTJ5, PTJ5_DATA),
PINMUX_GPIO(GPIO_PTJ4, PTJ4_DATA),
@@ -1068,7 +1224,6 @@ static struct pinmux_gpio pinmux_gpios[] = {
PINMUX_GPIO(GPIO_PTK0, PTK0_DATA),
/* PTL */
- PINMUX_GPIO(GPIO_PTL7, PTL7_DATA),
PINMUX_GPIO(GPIO_PTL6, PTL6_DATA),
PINMUX_GPIO(GPIO_PTL5, PTL5_DATA),
PINMUX_GPIO(GPIO_PTL4, PTL4_DATA),
@@ -1078,6 +1233,7 @@ static struct pinmux_gpio pinmux_gpios[] = {
PINMUX_GPIO(GPIO_PTL0, PTL0_DATA),
/* PTM */
+ PINMUX_GPIO(GPIO_PTM7, PTM7_DATA),
PINMUX_GPIO(GPIO_PTM6, PTM6_DATA),
PINMUX_GPIO(GPIO_PTM5, PTM5_DATA),
PINMUX_GPIO(GPIO_PTM4, PTM4_DATA),
@@ -1087,7 +1243,6 @@ static struct pinmux_gpio pinmux_gpios[] = {
PINMUX_GPIO(GPIO_PTM0, PTM0_DATA),
/* PTN */
- PINMUX_GPIO(GPIO_PTN7, PTN7_DATA),
PINMUX_GPIO(GPIO_PTN6, PTN6_DATA),
PINMUX_GPIO(GPIO_PTN5, PTN5_DATA),
PINMUX_GPIO(GPIO_PTN4, PTN4_DATA),
@@ -1107,6 +1262,7 @@ static struct pinmux_gpio pinmux_gpios[] = {
PINMUX_GPIO(GPIO_PTO0, PTO0_DATA),
/* PTP */
+ PINMUX_GPIO(GPIO_PTP7, PTP7_DATA),
PINMUX_GPIO(GPIO_PTP6, PTP6_DATA),
PINMUX_GPIO(GPIO_PTP5, PTP5_DATA),
PINMUX_GPIO(GPIO_PTP4, PTP4_DATA),
@@ -1145,6 +1301,8 @@ static struct pinmux_gpio pinmux_gpios[] = {
PINMUX_GPIO(GPIO_PTS0, PTS0_DATA),
/* PTT */
+ PINMUX_GPIO(GPIO_PTT7, PTT7_DATA),
+ PINMUX_GPIO(GPIO_PTT6, PTT6_DATA),
PINMUX_GPIO(GPIO_PTT5, PTT5_DATA),
PINMUX_GPIO(GPIO_PTT4, PTT4_DATA),
PINMUX_GPIO(GPIO_PTT3, PTT3_DATA),
@@ -1212,54 +1370,35 @@ static struct pinmux_gpio pinmux_gpios[] = {
PINMUX_GPIO(GPIO_PTZ1, PTZ1_DATA),
PINMUX_GPIO(GPIO_PTZ0, PTZ0_DATA),
- /* PTA (mobule: LBSC, CPG, LPC) */
+ /* PTA (mobule: LBSC, RGMII) */
PINMUX_GPIO(GPIO_FN_BS, BS_MARK),
PINMUX_GPIO(GPIO_FN_RDWR, RDWR_MARK),
PINMUX_GPIO(GPIO_FN_WE1, WE1_MARK),
PINMUX_GPIO(GPIO_FN_RDY, RDY_MARK),
- PINMUX_GPIO(GPIO_FN_MD10, MD10_MARK),
- PINMUX_GPIO(GPIO_FN_MD9, MD9_MARK),
- PINMUX_GPIO(GPIO_FN_MD8, MD8_MARK),
- PINMUX_GPIO(GPIO_FN_LGPIO7, LGPIO7_MARK),
- PINMUX_GPIO(GPIO_FN_LGPIO6, LGPIO6_MARK),
- PINMUX_GPIO(GPIO_FN_LGPIO5, LGPIO5_MARK),
- PINMUX_GPIO(GPIO_FN_LGPIO4, LGPIO4_MARK),
- PINMUX_GPIO(GPIO_FN_LGPIO3, LGPIO3_MARK),
- PINMUX_GPIO(GPIO_FN_LGPIO2, LGPIO2_MARK),
- PINMUX_GPIO(GPIO_FN_LGPIO1, LGPIO1_MARK),
- PINMUX_GPIO(GPIO_FN_LGPIO0, LGPIO0_MARK),
-
- /* PTB (mobule: LBSC, EtherC, SIM, LPC) */
- PINMUX_GPIO(GPIO_FN_D15, D15_MARK),
- PINMUX_GPIO(GPIO_FN_D14, D14_MARK),
- PINMUX_GPIO(GPIO_FN_D13, D13_MARK),
- PINMUX_GPIO(GPIO_FN_D12, D12_MARK),
- PINMUX_GPIO(GPIO_FN_D11, D11_MARK),
- PINMUX_GPIO(GPIO_FN_D10, D10_MARK),
- PINMUX_GPIO(GPIO_FN_D9, D9_MARK),
- PINMUX_GPIO(GPIO_FN_D8, D8_MARK),
PINMUX_GPIO(GPIO_FN_ET0_MDC, ET0_MDC_MARK),
- PINMUX_GPIO(GPIO_FN_ET0_MDIO, ET0_MDIO_MARK),
+ PINMUX_GPIO(GPIO_FN_ET0_MDIO, ET0_MDC_MARK),
PINMUX_GPIO(GPIO_FN_ET1_MDC, ET1_MDC_MARK),
- PINMUX_GPIO(GPIO_FN_ET1_MDIO, ET1_MDIO_MARK),
- PINMUX_GPIO(GPIO_FN_WPSZ1, WPSZ1_MARK),
- PINMUX_GPIO(GPIO_FN_WPSZ0, WPSZ0_MARK),
- PINMUX_GPIO(GPIO_FN_FWID, FWID_MARK),
- PINMUX_GPIO(GPIO_FN_FLSHSZ, FLSHSZ_MARK),
- PINMUX_GPIO(GPIO_FN_LPC_SPIEN, LPC_SPIEN_MARK),
- PINMUX_GPIO(GPIO_FN_BASEL, BASEL_MARK),
-
- /* PTC (mobule: SD) */
- PINMUX_GPIO(GPIO_FN_SD_WP, SD_WP_MARK),
- PINMUX_GPIO(GPIO_FN_SD_CD, SD_CD_MARK),
- PINMUX_GPIO(GPIO_FN_SD_CLK, SD_CLK_MARK),
- PINMUX_GPIO(GPIO_FN_SD_CMD, SD_CMD_MARK),
- PINMUX_GPIO(GPIO_FN_SD_D3, SD_D3_MARK),
- PINMUX_GPIO(GPIO_FN_SD_D2, SD_D2_MARK),
- PINMUX_GPIO(GPIO_FN_SD_D1, SD_D1_MARK),
- PINMUX_GPIO(GPIO_FN_SD_D0, SD_D0_MARK),
+ PINMUX_GPIO(GPIO_FN_ET1_MDIO, ET1_MDC_MARK),
- /* PTD (mobule: INTC, SPI0, LBSC, CPG, ADC) */
+ /* PTB (mobule: INTC, ONFI, TMU) */
+ PINMUX_GPIO(GPIO_FN_IRQ15, IRQ15_MARK),
+ PINMUX_GPIO(GPIO_FN_IRQ14, IRQ14_MARK),
+ PINMUX_GPIO(GPIO_FN_IRQ13, IRQ13_MARK),
+ PINMUX_GPIO(GPIO_FN_IRQ12, IRQ12_MARK),
+ PINMUX_GPIO(GPIO_FN_IRQ11, IRQ11_MARK),
+ PINMUX_GPIO(GPIO_FN_IRQ10, IRQ10_MARK),
+ PINMUX_GPIO(GPIO_FN_IRQ9, IRQ9_MARK),
+ PINMUX_GPIO(GPIO_FN_IRQ8, IRQ8_MARK),
+ PINMUX_GPIO(GPIO_FN_ON_NRE, ON_NRE_MARK),
+ PINMUX_GPIO(GPIO_FN_ON_NWE, ON_NWE_MARK),
+ PINMUX_GPIO(GPIO_FN_ON_NWP, ON_NWP_MARK),
+ PINMUX_GPIO(GPIO_FN_ON_NCE0, ON_NCE0_MARK),
+ PINMUX_GPIO(GPIO_FN_ON_R_B0, ON_R_B0_MARK),
+ PINMUX_GPIO(GPIO_FN_ON_ALE, ON_ALE_MARK),
+ PINMUX_GPIO(GPIO_FN_ON_CLE, ON_CLE_MARK),
+ PINMUX_GPIO(GPIO_FN_TCLK, TCLK_MARK),
+
+ /* PTC (mobule: IRQ, PWMU) */
PINMUX_GPIO(GPIO_FN_IRQ7, IRQ7_MARK),
PINMUX_GPIO(GPIO_FN_IRQ6, IRQ6_MARK),
PINMUX_GPIO(GPIO_FN_IRQ5, IRQ5_MARK),
@@ -1268,80 +1407,102 @@ static struct pinmux_gpio pinmux_gpios[] = {
PINMUX_GPIO(GPIO_FN_IRQ2, IRQ2_MARK),
PINMUX_GPIO(GPIO_FN_IRQ1, IRQ1_MARK),
PINMUX_GPIO(GPIO_FN_IRQ0, IRQ0_MARK),
- PINMUX_GPIO(GPIO_FN_MD6, MD6_MARK),
- PINMUX_GPIO(GPIO_FN_MD5, MD5_MARK),
- PINMUX_GPIO(GPIO_FN_MD3, MD3_MARK),
- PINMUX_GPIO(GPIO_FN_MD2, MD2_MARK),
- PINMUX_GPIO(GPIO_FN_MD1, MD1_MARK),
- PINMUX_GPIO(GPIO_FN_MD0, MD0_MARK),
- PINMUX_GPIO(GPIO_FN_ADTRG1, ADTRG1_MARK),
- PINMUX_GPIO(GPIO_FN_ADTRG0, ADTRG0_MARK),
+ PINMUX_GPIO(GPIO_FN_PWMU0, PWMU0_MARK),
+ PINMUX_GPIO(GPIO_FN_PWMU1, PWMU1_MARK),
+ PINMUX_GPIO(GPIO_FN_PWMU2, PWMU2_MARK),
+ PINMUX_GPIO(GPIO_FN_PWMU3, PWMU3_MARK),
+ PINMUX_GPIO(GPIO_FN_PWMU4, PWMU4_MARK),
+ PINMUX_GPIO(GPIO_FN_PWMU5, PWMU5_MARK),
+
+ /* PTD (mobule: SPI0, DMAC) */
+ PINMUX_GPIO(GPIO_FN_SP0_MOSI, SP0_MOSI_MARK),
+ PINMUX_GPIO(GPIO_FN_SP0_MISO, SP0_MISO_MARK),
+ PINMUX_GPIO(GPIO_FN_SP0_SCK, SP0_SCK_MARK),
+ PINMUX_GPIO(GPIO_FN_SP0_SCK_FB, SP0_SCK_FB_MARK),
+ PINMUX_GPIO(GPIO_FN_SP0_SS0, SP0_SS0_MARK),
+ PINMUX_GPIO(GPIO_FN_SP0_SS1, SP0_SS1_MARK),
+ PINMUX_GPIO(GPIO_FN_SP0_SS2, SP0_SS2_MARK),
+ PINMUX_GPIO(GPIO_FN_SP0_SS3, SP0_SS3_MARK),
+ PINMUX_GPIO(GPIO_FN_DREQ0, DREQ0_MARK),
+ PINMUX_GPIO(GPIO_FN_DACK0, DACK0_MARK),
+ PINMUX_GPIO(GPIO_FN_TEND0, TEND0_MARK),
- /* PTE (mobule: EtherC) */
- PINMUX_GPIO(GPIO_FN_ET0_CRS_DV, ET0_CRS_DV_MARK),
- PINMUX_GPIO(GPIO_FN_ET0_TXD1, ET0_TXD1_MARK),
- PINMUX_GPIO(GPIO_FN_ET0_TXD0, ET0_TXD0_MARK),
- PINMUX_GPIO(GPIO_FN_ET0_TX_EN, ET0_TX_EN_MARK),
- PINMUX_GPIO(GPIO_FN_ET0_REF_CLK, ET0_REF_CLK_MARK),
- PINMUX_GPIO(GPIO_FN_ET0_RXD1, ET0_RXD1_MARK),
- PINMUX_GPIO(GPIO_FN_ET0_RXD0, ET0_RXD0_MARK),
- PINMUX_GPIO(GPIO_FN_ET0_RX_ER, ET0_RX_ER_MARK),
-
- /* PTF (mobule: EtherC) */
- PINMUX_GPIO(GPIO_FN_ET1_CRS_DV, ET1_CRS_DV_MARK),
- PINMUX_GPIO(GPIO_FN_ET1_TXD1, ET1_TXD1_MARK),
- PINMUX_GPIO(GPIO_FN_ET1_TXD0, ET1_TXD0_MARK),
- PINMUX_GPIO(GPIO_FN_ET1_TX_EN, ET1_TX_EN_MARK),
- PINMUX_GPIO(GPIO_FN_ET1_REF_CLK, ET1_REF_CLK_MARK),
- PINMUX_GPIO(GPIO_FN_ET1_RXD1, ET1_RXD1_MARK),
- PINMUX_GPIO(GPIO_FN_ET1_RXD0, ET1_RXD0_MARK),
- PINMUX_GPIO(GPIO_FN_ET1_RX_ER, ET1_RX_ER_MARK),
-
- /* PTG (mobule: SYSTEM, PWMX, LPC) */
- PINMUX_GPIO(GPIO_FN_STATUS0, STATUS0_MARK),
- PINMUX_GPIO(GPIO_FN_STATUS1, STATUS1_MARK),
- PINMUX_GPIO(GPIO_FN_PWX0, PWX0_MARK),
- PINMUX_GPIO(GPIO_FN_PWX1, PWX1_MARK),
- PINMUX_GPIO(GPIO_FN_PWX2, PWX2_MARK),
- PINMUX_GPIO(GPIO_FN_PWX3, PWX3_MARK),
+ /* PTE (mobule: RMII) */
+ PINMUX_GPIO(GPIO_FN_RMII0_CRS_DV, RMII0_CRS_DV_MARK),
+ PINMUX_GPIO(GPIO_FN_RMII0_TXD1, RMII0_TXD1_MARK),
+ PINMUX_GPIO(GPIO_FN_RMII0_TXD0, RMII0_TXD0_MARK),
+ PINMUX_GPIO(GPIO_FN_RMII0_TXEN, RMII0_TXEN_MARK),
+ PINMUX_GPIO(GPIO_FN_RMII0_REFCLK, RMII0_REFCLK_MARK),
+ PINMUX_GPIO(GPIO_FN_RMII0_RXD1, RMII0_RXD1_MARK),
+ PINMUX_GPIO(GPIO_FN_RMII0_RXD0, RMII0_RXD0_MARK),
+ PINMUX_GPIO(GPIO_FN_RMII0_RX_ER, RMII0_RX_ER_MARK),
+
+ /* PTF (mobule: RMII, SerMux) */
+ PINMUX_GPIO(GPIO_FN_RMII1_CRS_DV, RMII1_CRS_DV_MARK),
+ PINMUX_GPIO(GPIO_FN_RMII1_TXD1, RMII1_TXD1_MARK),
+ PINMUX_GPIO(GPIO_FN_RMII1_TXD0, RMII1_TXD0_MARK),
+ PINMUX_GPIO(GPIO_FN_RMII1_TXEN, RMII1_TXEN_MARK),
+ PINMUX_GPIO(GPIO_FN_RMII1_REFCLK, RMII1_REFCLK_MARK),
+ PINMUX_GPIO(GPIO_FN_RMII1_RXD1, RMII1_RXD1_MARK),
+ PINMUX_GPIO(GPIO_FN_RMII1_RXD0, RMII1_RXD0_MARK),
+ PINMUX_GPIO(GPIO_FN_RMII1_RX_ER, RMII1_RX_ER_MARK),
+ PINMUX_GPIO(GPIO_FN_RAC_RI, RAC_RI_MARK),
+
+ /* PTG (mobule: system, LBSC, LPC, WDT, LPC, eMMC) */
+ PINMUX_GPIO(GPIO_FN_BOOTFMS, BOOTFMS_MARK),
+ PINMUX_GPIO(GPIO_FN_BOOTWP, BOOTWP_MARK),
+ PINMUX_GPIO(GPIO_FN_A25, A25_MARK),
+ PINMUX_GPIO(GPIO_FN_A24, A24_MARK),
PINMUX_GPIO(GPIO_FN_SERIRQ, SERIRQ_MARK),
- PINMUX_GPIO(GPIO_FN_CLKRUN, CLKRUN_MARK),
+ PINMUX_GPIO(GPIO_FN_WDTOVF, WDTOVF_MARK),
PINMUX_GPIO(GPIO_FN_LPCPD, LPCPD_MARK),
PINMUX_GPIO(GPIO_FN_LDRQ, LDRQ_MARK),
+ PINMUX_GPIO(GPIO_FN_MMCCLK, MMCCLK_MARK),
+ PINMUX_GPIO(GPIO_FN_MMCCMD, MMCCMD_MARK),
- /* PTH (mobule: TMU, SCIF234, SPI1, SPI0) */
- PINMUX_GPIO(GPIO_FN_TCLK, TCLK_MARK),
- PINMUX_GPIO(GPIO_FN_RXD4, RXD4_MARK),
- PINMUX_GPIO(GPIO_FN_TXD4, TXD4_MARK),
+ /* PTH (mobule: SPI1, LPC, DMAC, ADC) */
PINMUX_GPIO(GPIO_FN_SP1_MOSI, SP1_MOSI_MARK),
PINMUX_GPIO(GPIO_FN_SP1_MISO, SP1_MISO_MARK),
PINMUX_GPIO(GPIO_FN_SP1_SCK, SP1_SCK_MARK),
PINMUX_GPIO(GPIO_FN_SP1_SCK_FB, SP1_SCK_FB_MARK),
PINMUX_GPIO(GPIO_FN_SP1_SS0, SP1_SS0_MARK),
PINMUX_GPIO(GPIO_FN_SP1_SS1, SP1_SS1_MARK),
- PINMUX_GPIO(GPIO_FN_SP0_SS1, SP0_SS1_MARK),
+ PINMUX_GPIO(GPIO_FN_WP, WP_MARK),
+ PINMUX_GPIO(GPIO_FN_FMS0, FMS0_MARK),
+ PINMUX_GPIO(GPIO_FN_TEND1, TEND1_MARK),
+ PINMUX_GPIO(GPIO_FN_DREQ1, DREQ1_MARK),
+ PINMUX_GPIO(GPIO_FN_DACK1, DACK1_MARK),
+ PINMUX_GPIO(GPIO_FN_ADTRG1, ADTRG1_MARK),
+ PINMUX_GPIO(GPIO_FN_ADTRG0, ADTRG0_MARK),
- /* PTI (mobule: INTC) */
- PINMUX_GPIO(GPIO_FN_IRQ15, IRQ15_MARK),
- PINMUX_GPIO(GPIO_FN_IRQ14, IRQ14_MARK),
- PINMUX_GPIO(GPIO_FN_IRQ13, IRQ13_MARK),
- PINMUX_GPIO(GPIO_FN_IRQ12, IRQ12_MARK),
- PINMUX_GPIO(GPIO_FN_IRQ11, IRQ11_MARK),
- PINMUX_GPIO(GPIO_FN_IRQ10, IRQ10_MARK),
- PINMUX_GPIO(GPIO_FN_IRQ9, IRQ9_MARK),
- PINMUX_GPIO(GPIO_FN_IRQ8, IRQ8_MARK),
+ /* PTI (mobule: LBSC, SDHI) */
+ PINMUX_GPIO(GPIO_FN_D15, D15_MARK),
+ PINMUX_GPIO(GPIO_FN_D14, D14_MARK),
+ PINMUX_GPIO(GPIO_FN_D13, D13_MARK),
+ PINMUX_GPIO(GPIO_FN_D12, D12_MARK),
+ PINMUX_GPIO(GPIO_FN_D11, D11_MARK),
+ PINMUX_GPIO(GPIO_FN_D10, D10_MARK),
+ PINMUX_GPIO(GPIO_FN_D9, D9_MARK),
+ PINMUX_GPIO(GPIO_FN_D8, D8_MARK),
+ PINMUX_GPIO(GPIO_FN_SD_WP, SD_WP_MARK),
+ PINMUX_GPIO(GPIO_FN_SD_CD, SD_CD_MARK),
+ PINMUX_GPIO(GPIO_FN_SD_CLK, SD_CLK_MARK),
+ PINMUX_GPIO(GPIO_FN_SD_CMD, SD_CMD_MARK),
+ PINMUX_GPIO(GPIO_FN_SD_D3, SD_D3_MARK),
+ PINMUX_GPIO(GPIO_FN_SD_D2, SD_D2_MARK),
+ PINMUX_GPIO(GPIO_FN_SD_D1, SD_D1_MARK),
+ PINMUX_GPIO(GPIO_FN_SD_D0, SD_D0_MARK),
/* PTJ (mobule: SCIF234, SERMUX) */
- PINMUX_GPIO(GPIO_FN_RXD3, RXD3_MARK),
+ PINMUX_GPIO(GPIO_FN_RTS3, RTS3_MARK),
+ PINMUX_GPIO(GPIO_FN_CTS3, CTS3_MARK),
PINMUX_GPIO(GPIO_FN_TXD3, TXD3_MARK),
- PINMUX_GPIO(GPIO_FN_RXD2, RXD2_MARK),
- PINMUX_GPIO(GPIO_FN_TXD2, TXD2_MARK),
- PINMUX_GPIO(GPIO_FN_COM1_TXD, COM1_TXD_MARK),
- PINMUX_GPIO(GPIO_FN_COM1_RXD, COM1_RXD_MARK),
- PINMUX_GPIO(GPIO_FN_COM1_RTS, COM1_RTS_MARK),
- PINMUX_GPIO(GPIO_FN_COM1_CTS, COM1_CTS_MARK),
+ PINMUX_GPIO(GPIO_FN_RXD3, RXD3_MARK),
+ PINMUX_GPIO(GPIO_FN_RTS4, RTS4_MARK),
+ PINMUX_GPIO(GPIO_FN_RXD4, RXD4_MARK),
+ PINMUX_GPIO(GPIO_FN_TXD4, TXD4_MARK),
- /* PTK (mobule: SERMUX) */
+ /* PTK (mobule: SERMUX, LBSC, SCIF) */
PINMUX_GPIO(GPIO_FN_COM2_TXD, COM2_TXD_MARK),
PINMUX_GPIO(GPIO_FN_COM2_RXD, COM2_RXD_MARK),
PINMUX_GPIO(GPIO_FN_COM2_RTS, COM2_RTS_MARK),
@@ -1349,62 +1510,65 @@ static struct pinmux_gpio pinmux_gpios[] = {
PINMUX_GPIO(GPIO_FN_COM2_DTR, COM2_DTR_MARK),
PINMUX_GPIO(GPIO_FN_COM2_DSR, COM2_DSR_MARK),
PINMUX_GPIO(GPIO_FN_COM2_DCD, COM2_DCD_MARK),
- PINMUX_GPIO(GPIO_FN_COM2_RI, COM2_RI_MARK),
+ PINMUX_GPIO(GPIO_FN_CLKOUT, CLKOUT_MARK),
+ PINMUX_GPIO(GPIO_FN_SCK2, SCK2_MARK),
+ PINMUX_GPIO(GPIO_FN_SCK4, SCK4_MARK),
+ PINMUX_GPIO(GPIO_FN_SCK3, SCK3_MARK),
- /* PTL (mobule: SERMUX) */
- PINMUX_GPIO(GPIO_FN_RAC_TXD, RAC_TXD_MARK),
+ /* PTL (mobule: SERMUX, SCIF, LBSC, AUD) */
PINMUX_GPIO(GPIO_FN_RAC_RXD, RAC_RXD_MARK),
PINMUX_GPIO(GPIO_FN_RAC_RTS, RAC_RTS_MARK),
PINMUX_GPIO(GPIO_FN_RAC_CTS, RAC_CTS_MARK),
PINMUX_GPIO(GPIO_FN_RAC_DTR, RAC_DTR_MARK),
PINMUX_GPIO(GPIO_FN_RAC_DSR, RAC_DSR_MARK),
PINMUX_GPIO(GPIO_FN_RAC_DCD, RAC_DCD_MARK),
- PINMUX_GPIO(GPIO_FN_RAC_RI, RAC_RI_MARK),
+ PINMUX_GPIO(GPIO_FN_RAC_TXD, RAC_TXD_MARK),
+ PINMUX_GPIO(GPIO_FN_RXD2, RXD2_MARK),
+ PINMUX_GPIO(GPIO_FN_CS5, CS5_MARK),
+ PINMUX_GPIO(GPIO_FN_CS6, CS6_MARK),
+ PINMUX_GPIO(GPIO_FN_AUDSYNC, AUDSYNC_MARK),
+ PINMUX_GPIO(GPIO_FN_AUDCK, AUDCK_MARK),
+ PINMUX_GPIO(GPIO_FN_TXD2, TXD2_MARK),
- /* PTM (mobule: IIC, LPC) */
+ /* PTM (mobule: LBSC, IIC) */
+ PINMUX_GPIO(GPIO_FN_CS4, CS4_MARK),
+ PINMUX_GPIO(GPIO_FN_RD, RD_MARK),
+ PINMUX_GPIO(GPIO_FN_WE0, WE0_MARK),
+ PINMUX_GPIO(GPIO_FN_CS0, CS0_MARK),
PINMUX_GPIO(GPIO_FN_SDA6, SDA6_MARK),
PINMUX_GPIO(GPIO_FN_SCL6, SCL6_MARK),
PINMUX_GPIO(GPIO_FN_SDA7, SDA7_MARK),
PINMUX_GPIO(GPIO_FN_SCL7, SCL7_MARK),
- PINMUX_GPIO(GPIO_FN_WP, WP_MARK),
- PINMUX_GPIO(GPIO_FN_FMS0, FMS0_MARK),
- PINMUX_GPIO(GPIO_FN_FMS1, FMS1_MARK),
- /* PTN (mobule: SCIF234, EVC) */
- PINMUX_GPIO(GPIO_FN_SCK2, SCK2_MARK),
- PINMUX_GPIO(GPIO_FN_RTS4, RTS4_MARK),
- PINMUX_GPIO(GPIO_FN_RTS3, RTS3_MARK),
- PINMUX_GPIO(GPIO_FN_RTS2, RTS2_MARK),
- PINMUX_GPIO(GPIO_FN_CTS4, CTS4_MARK),
- PINMUX_GPIO(GPIO_FN_CTS3, CTS3_MARK),
- PINMUX_GPIO(GPIO_FN_CTS2, CTS2_MARK),
- PINMUX_GPIO(GPIO_FN_EVENT7, EVENT7_MARK),
- PINMUX_GPIO(GPIO_FN_EVENT6, EVENT6_MARK),
- PINMUX_GPIO(GPIO_FN_EVENT5, EVENT5_MARK),
- PINMUX_GPIO(GPIO_FN_EVENT4, EVENT4_MARK),
- PINMUX_GPIO(GPIO_FN_EVENT3, EVENT3_MARK),
- PINMUX_GPIO(GPIO_FN_EVENT2, EVENT2_MARK),
- PINMUX_GPIO(GPIO_FN_EVENT1, EVENT1_MARK),
- PINMUX_GPIO(GPIO_FN_EVENT0, EVENT0_MARK),
+ /* PTN (mobule: USB, JMC, SGPIO, WDT) */
+ PINMUX_GPIO(GPIO_FN_VBUS_EN, VBUS_EN_MARK),
+ PINMUX_GPIO(GPIO_FN_VBUS_OC, VBUS_OC_MARK),
+ PINMUX_GPIO(GPIO_FN_JMCTCK, JMCTCK_MARK),
+ PINMUX_GPIO(GPIO_FN_JMCTMS, JMCTMS_MARK),
+ PINMUX_GPIO(GPIO_FN_JMCTDO, JMCTDO_MARK),
+ PINMUX_GPIO(GPIO_FN_JMCTDI, JMCTDI_MARK),
+ PINMUX_GPIO(GPIO_FN_JMCTRST, JMCTRST_MARK),
+ PINMUX_GPIO(GPIO_FN_SGPIO1_CLK, SGPIO1_CLK_MARK),
+ PINMUX_GPIO(GPIO_FN_SGPIO1_LOAD, SGPIO1_LOAD_MARK),
+ PINMUX_GPIO(GPIO_FN_SGPIO1_DI, SGPIO1_DI_MARK),
+ PINMUX_GPIO(GPIO_FN_SGPIO1_DO, SGPIO1_DO_MARK),
+ PINMUX_GPIO(GPIO_FN_SUB_CLKIN, SUB_CLKIN_MARK),
- /* PTO (mobule: SGPIO) */
+ /* PTO (mobule: SGPIO, SerMux) */
PINMUX_GPIO(GPIO_FN_SGPIO0_CLK, SGPIO0_CLK_MARK),
PINMUX_GPIO(GPIO_FN_SGPIO0_LOAD, SGPIO0_LOAD_MARK),
PINMUX_GPIO(GPIO_FN_SGPIO0_DI, SGPIO0_DI_MARK),
PINMUX_GPIO(GPIO_FN_SGPIO0_DO, SGPIO0_DO_MARK),
- PINMUX_GPIO(GPIO_FN_SGPIO1_CLK, SGPIO1_CLK_MARK),
- PINMUX_GPIO(GPIO_FN_SGPIO1_LOAD, SGPIO1_LOAD_MARK),
- PINMUX_GPIO(GPIO_FN_SGPIO1_DI, SGPIO1_DI_MARK),
- PINMUX_GPIO(GPIO_FN_SGPIO1_DO, SGPIO1_DO_MARK),
+ PINMUX_GPIO(GPIO_FN_SGPIO2_CLK, SGPIO2_CLK_MARK),
+ PINMUX_GPIO(GPIO_FN_SGPIO2_LOAD, SGPIO2_LOAD_MARK),
+ PINMUX_GPIO(GPIO_FN_SGPIO2_DI, SGPIO2_DI_MARK),
+ PINMUX_GPIO(GPIO_FN_SGPIO2_DO, SGPIO2_DO_MARK),
+ PINMUX_GPIO(GPIO_FN_COM1_TXD, COM1_TXD_MARK),
+ PINMUX_GPIO(GPIO_FN_COM1_RXD, COM1_RXD_MARK),
+ PINMUX_GPIO(GPIO_FN_COM1_RTS, COM1_RTS_MARK),
+ PINMUX_GPIO(GPIO_FN_COM1_CTS, COM1_CTS_MARK),
- /* PTP (mobule: JMC, SCIF234) */
- PINMUX_GPIO(GPIO_FN_JMCTCK, JMCTCK_MARK),
- PINMUX_GPIO(GPIO_FN_JMCTMS, JMCTMS_MARK),
- PINMUX_GPIO(GPIO_FN_JMCTDO, JMCTDO_MARK),
- PINMUX_GPIO(GPIO_FN_JMCTDI, JMCTDI_MARK),
- PINMUX_GPIO(GPIO_FN_JMCRST, JMCRST_MARK),
- PINMUX_GPIO(GPIO_FN_SCK4, SCK4_MARK),
- PINMUX_GPIO(GPIO_FN_SCK3, SCK3_MARK),
+ /* PTP (mobule: EVC, ADC) */
/* PTQ (mobule: LPC) */
PINMUX_GPIO(GPIO_FN_LAD3, LAD3_MARK),
@@ -1439,31 +1603,41 @@ static struct pinmux_gpio pinmux_gpios[] = {
PINMUX_GPIO(GPIO_FN_SDA3, SDA3_MARK),
PINMUX_GPIO(GPIO_FN_SCL3, SCL3_MARK),
- /* PTT (mobule: SYSTEM, PWMX) */
- PINMUX_GPIO(GPIO_FN_AUDSYNC, AUDSYNC_MARK),
- PINMUX_GPIO(GPIO_FN_AUDCK, AUDCK_MARK),
+ /* PTT (mobule: PWMX, AUD) */
+ PINMUX_GPIO(GPIO_FN_PWMX7, PWMX7_MARK),
+ PINMUX_GPIO(GPIO_FN_PWMX6, PWMX6_MARK),
+ PINMUX_GPIO(GPIO_FN_PWMX5, PWMX5_MARK),
+ PINMUX_GPIO(GPIO_FN_PWMX4, PWMX4_MARK),
+ PINMUX_GPIO(GPIO_FN_PWMX3, PWMX3_MARK),
+ PINMUX_GPIO(GPIO_FN_PWMX2, PWMX2_MARK),
+ PINMUX_GPIO(GPIO_FN_PWMX1, PWMX1_MARK),
+ PINMUX_GPIO(GPIO_FN_PWMX0, PWMX0_MARK),
PINMUX_GPIO(GPIO_FN_AUDATA3, AUDATA3_MARK),
PINMUX_GPIO(GPIO_FN_AUDATA2, AUDATA2_MARK),
PINMUX_GPIO(GPIO_FN_AUDATA1, AUDATA1_MARK),
PINMUX_GPIO(GPIO_FN_AUDATA0, AUDATA0_MARK),
- PINMUX_GPIO(GPIO_FN_PWX7, PWX7_MARK),
- PINMUX_GPIO(GPIO_FN_PWX6, PWX6_MARK),
- PINMUX_GPIO(GPIO_FN_PWX5, PWX5_MARK),
- PINMUX_GPIO(GPIO_FN_PWX4, PWX4_MARK),
-
- /* PTU (mobule: LBSC, DMAC) */
- PINMUX_GPIO(GPIO_FN_CS6, CS6_MARK),
- PINMUX_GPIO(GPIO_FN_CS5, CS5_MARK),
- PINMUX_GPIO(GPIO_FN_CS4, CS4_MARK),
- PINMUX_GPIO(GPIO_FN_CS0, CS0_MARK),
- PINMUX_GPIO(GPIO_FN_RD, RD_MARK),
- PINMUX_GPIO(GPIO_FN_WE0, WE0_MARK),
- PINMUX_GPIO(GPIO_FN_A25, A25_MARK),
- PINMUX_GPIO(GPIO_FN_A24, A24_MARK),
- PINMUX_GPIO(GPIO_FN_DREQ0, DREQ0_MARK),
- PINMUX_GPIO(GPIO_FN_DACK0, DACK0_MARK),
+ PINMUX_GPIO(GPIO_FN_STATUS1, STATUS1_MARK),
+ PINMUX_GPIO(GPIO_FN_STATUS0, STATUS0_MARK),
- /* PTV (mobule: LBSC, DMAC) */
+ /* PTU (mobule: LPC, APM) */
+ PINMUX_GPIO(GPIO_FN_LGPIO7, LGPIO7_MARK),
+ PINMUX_GPIO(GPIO_FN_LGPIO6, LGPIO6_MARK),
+ PINMUX_GPIO(GPIO_FN_LGPIO5, LGPIO5_MARK),
+ PINMUX_GPIO(GPIO_FN_LGPIO4, LGPIO4_MARK),
+ PINMUX_GPIO(GPIO_FN_LGPIO3, LGPIO3_MARK),
+ PINMUX_GPIO(GPIO_FN_LGPIO2, LGPIO2_MARK),
+ PINMUX_GPIO(GPIO_FN_LGPIO1, LGPIO1_MARK),
+ PINMUX_GPIO(GPIO_FN_LGPIO0, LGPIO0_MARK),
+ PINMUX_GPIO(GPIO_FN_APMONCTL_O, APMONCTL_O_MARK),
+ PINMUX_GPIO(GPIO_FN_APMPWBTOUT_O, APMPWBTOUT_O_MARK),
+ PINMUX_GPIO(GPIO_FN_APMSCI_O, APMSCI_O_MARK),
+ PINMUX_GPIO(GPIO_FN_APMVDDON, APMVDDON_MARK),
+ PINMUX_GPIO(GPIO_FN_APMSLPBTN, APMSLPBTN_MARK),
+ PINMUX_GPIO(GPIO_FN_APMPWRBTN, APMPWRBTN_MARK),
+ PINMUX_GPIO(GPIO_FN_APMS5N, APMS5N_MARK),
+ PINMUX_GPIO(GPIO_FN_APMS3N, APMS3N_MARK),
+
+ /* PTV (mobule: LBSC, SerMux, R-SPI, EVC, GRA) */
PINMUX_GPIO(GPIO_FN_A23, A23_MARK),
PINMUX_GPIO(GPIO_FN_A22, A22_MARK),
PINMUX_GPIO(GPIO_FN_A21, A21_MARK),
@@ -1472,12 +1646,20 @@ static struct pinmux_gpio pinmux_gpios[] = {
PINMUX_GPIO(GPIO_FN_A18, A18_MARK),
PINMUX_GPIO(GPIO_FN_A17, A17_MARK),
PINMUX_GPIO(GPIO_FN_A16, A16_MARK),
- PINMUX_GPIO(GPIO_FN_TEND0, TEND0_MARK),
- PINMUX_GPIO(GPIO_FN_DREQ1, DREQ1_MARK),
- PINMUX_GPIO(GPIO_FN_DACK1, DACK1_MARK),
- PINMUX_GPIO(GPIO_FN_TEND1, TEND1_MARK),
+ PINMUX_GPIO(GPIO_FN_COM2_RI, COM2_RI_MARK),
+ PINMUX_GPIO(GPIO_FN_R_SPI_MOSI, R_SPI_MOSI_MARK),
+ PINMUX_GPIO(GPIO_FN_R_SPI_MISO, R_SPI_MISO_MARK),
+ PINMUX_GPIO(GPIO_FN_R_SPI_RSPCK, R_SPI_RSPCK_MARK),
+ PINMUX_GPIO(GPIO_FN_R_SPI_SSL0, R_SPI_SSL0_MARK),
+ PINMUX_GPIO(GPIO_FN_R_SPI_SSL1, R_SPI_SSL1_MARK),
+ PINMUX_GPIO(GPIO_FN_EVENT7, EVENT7_MARK),
+ PINMUX_GPIO(GPIO_FN_EVENT6, EVENT6_MARK),
+ PINMUX_GPIO(GPIO_FN_VBIOS_DI, VBIOS_DI_MARK),
+ PINMUX_GPIO(GPIO_FN_VBIOS_DO, VBIOS_DO_MARK),
+ PINMUX_GPIO(GPIO_FN_VBIOS_CLK, VBIOS_CLK_MARK),
+ PINMUX_GPIO(GPIO_FN_VBIOS_CS, VBIOS_CS_MARK),
- /* PTW (mobule: LBSC) */
+ /* PTW (mobule: LBSC, EVC, SCIF) */
PINMUX_GPIO(GPIO_FN_A16, A16_MARK),
PINMUX_GPIO(GPIO_FN_A15, A15_MARK),
PINMUX_GPIO(GPIO_FN_A14, A14_MARK),
@@ -1487,6 +1669,14 @@ static struct pinmux_gpio pinmux_gpios[] = {
PINMUX_GPIO(GPIO_FN_A10, A10_MARK),
PINMUX_GPIO(GPIO_FN_A9, A9_MARK),
PINMUX_GPIO(GPIO_FN_A8, A8_MARK),
+ PINMUX_GPIO(GPIO_FN_EVENT5, EVENT5_MARK),
+ PINMUX_GPIO(GPIO_FN_EVENT4, EVENT4_MARK),
+ PINMUX_GPIO(GPIO_FN_EVENT3, EVENT3_MARK),
+ PINMUX_GPIO(GPIO_FN_EVENT2, EVENT2_MARK),
+ PINMUX_GPIO(GPIO_FN_EVENT1, EVENT1_MARK),
+ PINMUX_GPIO(GPIO_FN_EVENT0, EVENT0_MARK),
+ PINMUX_GPIO(GPIO_FN_CTS4, CTS4_MARK),
+ PINMUX_GPIO(GPIO_FN_CTS2, CTS2_MARK),
/* PTX (mobule: LBSC) */
PINMUX_GPIO(GPIO_FN_A7, A7_MARK),
@@ -1497,6 +1687,10 @@ static struct pinmux_gpio pinmux_gpios[] = {
PINMUX_GPIO(GPIO_FN_A2, A2_MARK),
PINMUX_GPIO(GPIO_FN_A1, A1_MARK),
PINMUX_GPIO(GPIO_FN_A0, A0_MARK),
+ PINMUX_GPIO(GPIO_FN_RTS2, RTS2_MARK),
+ PINMUX_GPIO(GPIO_FN_SIM_D, SIM_D_MARK),
+ PINMUX_GPIO(GPIO_FN_SIM_CLK, SIM_CLK_MARK),
+ PINMUX_GPIO(GPIO_FN_SIM_RST, SIM_RST_MARK),
/* PTY (mobule: LBSC) */
PINMUX_GPIO(GPIO_FN_D7, D7_MARK),
@@ -1507,18 +1701,36 @@ static struct pinmux_gpio pinmux_gpios[] = {
PINMUX_GPIO(GPIO_FN_D2, D2_MARK),
PINMUX_GPIO(GPIO_FN_D1, D1_MARK),
PINMUX_GPIO(GPIO_FN_D0, D0_MARK),
+
+ /* PTZ (mobule: eMMC, ONFI) */
+ PINMUX_GPIO(GPIO_FN_MMCDAT7, MMCDAT7_MARK),
+ PINMUX_GPIO(GPIO_FN_MMCDAT6, MMCDAT6_MARK),
+ PINMUX_GPIO(GPIO_FN_MMCDAT5, MMCDAT5_MARK),
+ PINMUX_GPIO(GPIO_FN_MMCDAT4, MMCDAT4_MARK),
+ PINMUX_GPIO(GPIO_FN_MMCDAT3, MMCDAT3_MARK),
+ PINMUX_GPIO(GPIO_FN_MMCDAT2, MMCDAT2_MARK),
+ PINMUX_GPIO(GPIO_FN_MMCDAT1, MMCDAT1_MARK),
+ PINMUX_GPIO(GPIO_FN_MMCDAT0, MMCDAT0_MARK),
+ PINMUX_GPIO(GPIO_FN_ON_DQ7, ON_DQ7_MARK),
+ PINMUX_GPIO(GPIO_FN_ON_DQ6, ON_DQ6_MARK),
+ PINMUX_GPIO(GPIO_FN_ON_DQ5, ON_DQ5_MARK),
+ PINMUX_GPIO(GPIO_FN_ON_DQ4, ON_DQ4_MARK),
+ PINMUX_GPIO(GPIO_FN_ON_DQ3, ON_DQ3_MARK),
+ PINMUX_GPIO(GPIO_FN_ON_DQ2, ON_DQ2_MARK),
+ PINMUX_GPIO(GPIO_FN_ON_DQ1, ON_DQ1_MARK),
+ PINMUX_GPIO(GPIO_FN_ON_DQ0, ON_DQ0_MARK),
};
static struct pinmux_cfg_reg pinmux_config_regs[] = {
{ PINMUX_CFG_REG("PACR", 0xffec0000, 16, 2) {
- PTA7_FN, PTA7_OUT, PTA7_IN, 0,
- PTA6_FN, PTA6_OUT, PTA6_IN, 0,
- PTA5_FN, PTA5_OUT, PTA5_IN, 0,
- PTA4_FN, PTA4_OUT, PTA4_IN, 0,
- PTA3_FN, PTA3_OUT, PTA3_IN, 0,
- PTA2_FN, PTA2_OUT, PTA2_IN, 0,
- PTA1_FN, PTA1_OUT, PTA1_IN, 0,
- PTA0_FN, PTA0_OUT, PTA0_IN, 0 }
+ PTA7_FN, PTA7_OUT, PTA7_IN, PTA7_IN_PU,
+ PTA6_FN, PTA6_OUT, PTA6_IN, PTA6_IN_PU,
+ PTA5_FN, PTA5_OUT, PTA5_IN, PTA5_IN_PU,
+ PTA4_FN, PTA4_OUT, PTA4_IN, PTA4_IN_PU,
+ PTA3_FN, PTA3_OUT, PTA3_IN, PTA3_IN_PU,
+ PTA2_FN, PTA2_OUT, PTA2_IN, PTA2_IN_PU,
+ PTA1_FN, PTA1_OUT, PTA1_IN, PTA1_IN_PU,
+ PTA0_FN, PTA0_OUT, PTA0_IN, PTA0_IN_PU }
},
{ PINMUX_CFG_REG("PBCR", 0xffec0002, 16, 2) {
PTB7_FN, PTB7_OUT, PTB7_IN, 0,
@@ -1541,125 +1753,126 @@ static struct pinmux_cfg_reg pinmux_config_regs[] = {
PTC0_FN, PTC0_OUT, PTC0_IN, 0 }
},
{ PINMUX_CFG_REG("PDCR", 0xffec0006, 16, 2) {
- PTD7_FN, PTD7_OUT, PTD7_IN, 0,
- PTD6_FN, PTD6_OUT, PTD6_IN, 0,
- PTD5_FN, PTD5_OUT, PTD5_IN, 0,
- PTD4_FN, PTD4_OUT, PTD4_IN, 0,
- PTD3_FN, PTD3_OUT, PTD3_IN, 0,
- PTD2_FN, PTD2_OUT, PTD2_IN, 0,
- PTD1_FN, PTD1_OUT, PTD1_IN, 0,
- PTD0_FN, PTD0_OUT, PTD0_IN, 0 }
+ PTD7_FN, PTD7_OUT, PTD7_IN, PTD7_IN_PU,
+ PTD6_FN, PTD6_OUT, PTD6_IN, PTD6_IN_PU,
+ PTD5_FN, PTD5_OUT, PTD5_IN, PTD5_IN_PU,
+ PTD4_FN, PTD4_OUT, PTD4_IN, PTD4_IN_PU,
+ PTD3_FN, PTD3_OUT, PTD3_IN, PTD3_IN_PU,
+ PTD2_FN, PTD2_OUT, PTD2_IN, PTD2_IN_PU,
+ PTD1_FN, PTD1_OUT, PTD1_IN, PTD1_IN_PU,
+ PTD0_FN, PTD0_OUT, PTD0_IN, PTD0_IN_PU }
},
{ PINMUX_CFG_REG("PECR", 0xffec0008, 16, 2) {
- PTE7_FN, PTE7_OUT, PTE7_IN, 0,
- PTE6_FN, PTE6_OUT, PTE6_IN, 0,
- PTE5_FN, PTE5_OUT, PTE5_IN, 0,
- PTE4_FN, PTE4_OUT, PTE4_IN, 0,
- PTE3_FN, PTE3_OUT, PTE3_IN, 0,
- PTE2_FN, PTE2_OUT, PTE2_IN, 0,
- PTE1_FN, PTE1_OUT, PTE1_IN, 0,
- PTE0_FN, PTE0_OUT, PTE0_IN, 0 }
+ PTE7_FN, PTE7_OUT, PTE7_IN, PTE7_IN_PU,
+ PTE6_FN, PTE6_OUT, PTE6_IN, PTE6_IN_PU,
+ PTE5_FN, PTE5_OUT, PTE5_IN, PTE5_IN_PU,
+ PTE4_FN, PTE4_OUT, PTE4_IN, PTE4_IN_PU,
+ PTE3_FN, PTE3_OUT, PTE3_IN, PTE3_IN_PU,
+ PTE2_FN, PTE2_OUT, PTE2_IN, PTE2_IN_PU,
+ PTE1_FN, PTE1_OUT, PTE1_IN, PTE1_IN_PU,
+ PTE0_FN, PTE0_OUT, PTE0_IN, PTE0_IN_PU }
},
{ PINMUX_CFG_REG("PFCR", 0xffec000a, 16, 2) {
- PTF7_FN, PTF7_OUT, PTF7_IN, 0,
- PTF6_FN, PTF6_OUT, PTF6_IN, 0,
- PTF5_FN, PTF5_OUT, PTF5_IN, 0,
- PTF4_FN, PTF4_OUT, PTF4_IN, 0,
- PTF3_FN, PTF3_OUT, PTF3_IN, 0,
- PTF2_FN, PTF2_OUT, PTF2_IN, 0,
- PTF1_FN, PTF1_OUT, PTF1_IN, 0,
- PTF0_FN, PTF0_OUT, PTF0_IN, 0 }
+ PTF7_FN, PTF7_OUT, PTF7_IN, PTF7_IN_PU,
+ PTF6_FN, PTF6_OUT, PTF6_IN, PTF6_IN_PU,
+ PTF5_FN, PTF5_OUT, PTF5_IN, PTF5_IN_PU,
+ PTF4_FN, PTF4_OUT, PTF4_IN, PTF4_IN_PU,
+ PTF3_FN, PTF3_OUT, PTF3_IN, PTF3_IN_PU,
+ PTF2_FN, PTF2_OUT, PTF2_IN, PTF2_IN_PU,
+ PTF1_FN, PTF1_OUT, PTF1_IN, PTF1_IN_PU,
+ PTF0_FN, PTF0_OUT, PTF0_IN, PTF0_IN_PU }
},
{ PINMUX_CFG_REG("PGCR", 0xffec000c, 16, 2) {
- PTG7_FN, PTG7_OUT, PTG7_IN, 0,
- PTG6_FN, PTG6_OUT, PTG6_IN, 0,
+ PTG7_FN, PTG7_OUT, PTG7_IN, PTG7_IN_PU ,
+ PTG6_FN, PTG6_OUT, PTG6_IN, PTG6_IN_PU ,
PTG5_FN, PTG5_OUT, PTG5_IN, 0,
- PTG4_FN, PTG4_OUT, PTG4_IN, 0,
+ PTG4_FN, PTG4_OUT, PTG4_IN, PTG4_IN_PU ,
PTG3_FN, PTG3_OUT, PTG3_IN, 0,
PTG2_FN, PTG2_OUT, PTG2_IN, 0,
PTG1_FN, PTG1_OUT, PTG1_IN, 0,
PTG0_FN, PTG0_OUT, PTG0_IN, 0 }
},
{ PINMUX_CFG_REG("PHCR", 0xffec000e, 16, 2) {
- PTH7_FN, PTH7_OUT, PTH7_IN, 0,
- PTH6_FN, PTH6_OUT, PTH6_IN, 0,
- PTH5_FN, PTH5_OUT, PTH5_IN, 0,
- PTH4_FN, PTH4_OUT, PTH4_IN, 0,
- PTH3_FN, PTH3_OUT, PTH3_IN, 0,
- PTH2_FN, PTH2_OUT, PTH2_IN, 0,
- PTH1_FN, PTH1_OUT, PTH1_IN, 0,
- PTH0_FN, PTH0_OUT, PTH0_IN, 0 }
+ PTH7_FN, PTH7_OUT, PTH7_IN, PTH7_IN_PU,
+ PTH6_FN, PTH6_OUT, PTH6_IN, PTH6_IN_PU,
+ PTH5_FN, PTH5_OUT, PTH5_IN, PTH5_IN_PU,
+ PTH4_FN, PTH4_OUT, PTH4_IN, PTH4_IN_PU,
+ PTH3_FN, PTH3_OUT, PTH3_IN, PTH3_IN_PU,
+ PTH2_FN, PTH2_OUT, PTH2_IN, PTH2_IN_PU,
+ PTH1_FN, PTH1_OUT, PTH1_IN, PTH1_IN_PU,
+ PTH0_FN, PTH0_OUT, PTH0_IN, PTH0_IN_PU }
},
{ PINMUX_CFG_REG("PICR", 0xffec0010, 16, 2) {
- PTI7_FN, PTI7_OUT, PTI7_IN, 0,
- PTI6_FN, PTI6_OUT, PTI6_IN, 0,
+ PTI7_FN, PTI7_OUT, PTI7_IN, PTI7_IN_PU,
+ PTI6_FN, PTI6_OUT, PTI6_IN, PTI6_IN_PU,
PTI5_FN, PTI5_OUT, PTI5_IN, 0,
- PTI4_FN, PTI4_OUT, PTI4_IN, 0,
- PTI3_FN, PTI3_OUT, PTI3_IN, 0,
- PTI2_FN, PTI2_OUT, PTI2_IN, 0,
- PTI1_FN, PTI1_OUT, PTI1_IN, 0,
- PTI0_FN, PTI0_OUT, PTI0_IN, 0 }
+ PTI4_FN, PTI4_OUT, PTI4_IN, PTI4_IN_PU,
+ PTI3_FN, PTI3_OUT, PTI3_IN, PTI3_IN_PU,
+ PTI2_FN, PTI2_OUT, PTI2_IN, PTI2_IN_PU,
+ PTI1_FN, PTI1_OUT, PTI1_IN, PTI1_IN_PU,
+ PTI0_FN, PTI0_OUT, PTI0_IN, PTI0_IN_PU }
},
{ PINMUX_CFG_REG("PJCR", 0xffec0012, 16, 2) {
- PTJ7_FN, PTJ7_OUT, PTJ7_IN, 0,
- PTJ6_FN, PTJ6_OUT, PTJ6_IN, 0,
- PTJ5_FN, PTJ5_OUT, PTJ5_IN, 0,
- PTJ4_FN, PTJ4_OUT, PTJ4_IN, 0,
- PTJ3_FN, PTJ3_OUT, PTJ3_IN, 0,
- PTJ2_FN, PTJ2_OUT, PTJ2_IN, 0,
- PTJ1_FN, PTJ1_OUT, PTJ1_IN, 0,
- PTJ0_FN, PTJ0_OUT, PTJ0_IN, 0 }
+ 0, 0, 0, 0, /* reserved: always set 1 */
+ PTJ6_FN, PTJ6_OUT, PTJ6_IN, PTJ6_IN_PU,
+ PTJ5_FN, PTJ5_OUT, PTJ5_IN, PTJ5_IN_PU,
+ PTJ4_FN, PTJ4_OUT, PTJ4_IN, PTJ4_IN_PU,
+ PTJ3_FN, PTJ3_OUT, PTJ3_IN, PTJ3_IN_PU,
+ PTJ2_FN, PTJ2_OUT, PTJ2_IN, PTJ2_IN_PU,
+ PTJ1_FN, PTJ1_OUT, PTJ1_IN, PTJ1_IN_PU,
+ PTJ0_FN, PTJ0_OUT, PTJ0_IN, PTJ0_IN_PU }
},
{ PINMUX_CFG_REG("PKCR", 0xffec0014, 16, 2) {
- PTK7_FN, PTK7_OUT, PTK7_IN, 0,
- PTK6_FN, PTK6_OUT, PTK6_IN, 0,
- PTK5_FN, PTK5_OUT, PTK5_IN, 0,
- PTK4_FN, PTK4_OUT, PTK4_IN, 0,
- PTK3_FN, PTK3_OUT, PTK3_IN, 0,
- PTK2_FN, PTK2_OUT, PTK2_IN, 0,
- PTK1_FN, PTK1_OUT, PTK1_IN, 0,
- PTK0_FN, PTK0_OUT, PTK0_IN, 0 }
+ PTK7_FN, PTK7_OUT, PTK7_IN, PTK7_IN_PU,
+ PTK6_FN, PTK6_OUT, PTK6_IN, PTK6_IN_PU,
+ PTK5_FN, PTK5_OUT, PTK5_IN, PTK5_IN_PU,
+ PTK4_FN, PTK4_OUT, PTK4_IN, PTK4_IN_PU,
+ PTK3_FN, PTK3_OUT, PTK3_IN, PTK3_IN_PU,
+ PTK2_FN, PTK2_OUT, PTK2_IN, PTK2_IN_PU,
+ PTK1_FN, PTK1_OUT, PTK1_IN, PTK1_IN_PU,
+ PTK0_FN, PTK0_OUT, PTK0_IN, PTK0_IN_PU }
},
{ PINMUX_CFG_REG("PLCR", 0xffec0016, 16, 2) {
- PTL7_FN, PTL7_OUT, PTL7_IN, 0,
- PTL6_FN, PTL6_OUT, PTL6_IN, 0,
- PTL5_FN, PTL5_OUT, PTL5_IN, 0,
- PTL4_FN, PTL4_OUT, PTL4_IN, 0,
- PTL3_FN, PTL3_OUT, PTL3_IN, 0,
- PTL2_FN, PTL2_OUT, PTL2_IN, 0,
- PTL1_FN, PTL1_OUT, PTL1_IN, 0,
- PTL0_FN, PTL0_OUT, PTL0_IN, 0 }
+ 0, 0, 0, 0, /* reserved: always set 1 */
+ PTL6_FN, PTL6_OUT, PTL6_IN, PTL6_IN_PU,
+ PTL5_FN, PTL5_OUT, PTL5_IN, PTL5_IN_PU,
+ PTL4_FN, PTL4_OUT, PTL4_IN, PTL4_IN_PU,
+ PTL3_FN, PTL3_OUT, PTL3_IN, PTL3_IN_PU,
+ PTL2_FN, PTL2_OUT, PTL2_IN, PTL2_IN_PU,
+ PTL1_FN, PTL1_OUT, PTL1_IN, PTL1_IN_PU,
+ PTL0_FN, PTL0_OUT, PTL0_IN, PTL0_IN_PU }
},
{ PINMUX_CFG_REG("PMCR", 0xffec0018, 16, 2) {
- 0, 0, 0, 0, /* reserved: always set 1 */
- PTM6_FN, PTM6_OUT, PTM6_IN, 0,
- PTM5_FN, PTM5_OUT, PTM5_IN, 0,
- PTM4_FN, PTM4_OUT, PTM4_IN, 0,
+ PTM7_FN, PTM7_OUT, PTM7_IN, PTM7_IN_PU,
+ PTM6_FN, PTM6_OUT, PTM6_IN, PTM6_IN_PU,
+ PTM5_FN, PTM5_OUT, PTM5_IN, PTM5_IN_PU,
+ PTM4_FN, PTM4_OUT, PTM4_IN, PTM4_IN_PU,
PTM3_FN, PTM3_OUT, PTM3_IN, 0,
PTM2_FN, PTM2_OUT, PTM2_IN, 0,
PTM1_FN, PTM1_OUT, PTM1_IN, 0,
PTM0_FN, PTM0_OUT, PTM0_IN, 0 }
},
{ PINMUX_CFG_REG("PNCR", 0xffec001a, 16, 2) {
- PTN7_FN, PTN7_OUT, PTN7_IN, 0,
+ 0, 0, 0, 0, /* reserved: always set 1 */
PTN6_FN, PTN6_OUT, PTN6_IN, 0,
PTN5_FN, PTN5_OUT, PTN5_IN, 0,
- PTN4_FN, PTN4_OUT, PTN4_IN, 0,
- PTN3_FN, PTN3_OUT, PTN3_IN, 0,
- PTN2_FN, PTN2_OUT, PTN2_IN, 0,
- PTN1_FN, PTN1_OUT, PTN1_IN, 0,
- PTN0_FN, PTN0_OUT, PTN0_IN, 0 }
+ PTN4_FN, PTN4_OUT, PTN4_IN, PTN4_IN_PU,
+ PTN3_FN, PTN3_OUT, PTN3_IN, PTN3_IN_PU,
+ PTN2_FN, PTN2_OUT, PTN2_IN, PTN2_IN_PU,
+ PTN1_FN, PTN1_OUT, PTN1_IN, PTN1_IN_PU,
+ PTN0_FN, PTN0_OUT, PTN0_IN, PTN0_IN_PU }
},
{ PINMUX_CFG_REG("POCR", 0xffec001c, 16, 2) {
- PTO7_FN, PTO7_OUT, PTO7_IN, 0,
- PTO6_FN, PTO6_OUT, PTO6_IN, 0,
- PTO5_FN, PTO5_OUT, PTO5_IN, 0,
- PTO4_FN, PTO4_OUT, PTO4_IN, 0,
- PTO3_FN, PTO3_OUT, PTO3_IN, 0,
- PTO2_FN, PTO2_OUT, PTO2_IN, 0,
- PTO1_FN, PTO1_OUT, PTO1_IN, 0,
- PTO0_FN, PTO0_OUT, PTO0_IN, 0 }
+ PTO7_FN, PTO7_OUT, PTO7_IN, PTO7_IN_PU,
+ PTO6_FN, PTO6_OUT, PTO6_IN, PTO6_IN_PU,
+ PTO5_FN, PTO5_OUT, PTO5_IN, PTO5_IN_PU,
+ PTO4_FN, PTO4_OUT, PTO4_IN, PTO4_IN_PU,
+ PTO3_FN, PTO3_OUT, PTO3_IN, PTO3_IN_PU,
+ PTO2_FN, PTO2_OUT, PTO2_IN, PTO2_IN_PU,
+ PTO1_FN, PTO1_OUT, PTO1_IN, PTO1_IN_PU,
+ PTO0_FN, PTO0_OUT, PTO0_IN, PTO0_IN_PU }
},
+#if 0 /* FIXME: Remove it? */
{ PINMUX_CFG_REG("PPCR", 0xffec001e, 16, 2) {
0, 0, 0, 0, /* reserved: always set 1 */
PTP6_FN, PTP6_OUT, PTP6_IN, 0,
@@ -1670,6 +1883,7 @@ static struct pinmux_cfg_reg pinmux_config_regs[] = {
PTP1_FN, PTP1_OUT, PTP1_IN, 0,
PTP0_FN, PTP0_OUT, PTP0_IN, 0 }
},
+#endif
{ PINMUX_CFG_REG("PQCR", 0xffec0020, 16, 2) {
0, 0, 0, 0, /* reserved: always set 1 */
PTQ6_FN, PTQ6_OUT, PTQ6_IN, 0,
@@ -1701,14 +1915,14 @@ static struct pinmux_cfg_reg pinmux_config_regs[] = {
PTS0_FN, PTS0_OUT, PTS0_IN, 0 }
},
{ PINMUX_CFG_REG("PTCR", 0xffec0026, 16, 2) {
- 0, 0, 0, 0, /* reserved: always set 1 */
- 0, 0, 0, 0, /* reserved: always set 1 */
- PTT5_FN, PTT5_OUT, PTT5_IN, 0,
- PTT4_FN, PTT4_OUT, PTT4_IN, 0,
- PTT3_FN, PTT3_OUT, PTT3_IN, 0,
- PTT2_FN, PTT2_OUT, PTT2_IN, 0,
- PTT1_FN, PTT1_OUT, PTT1_IN, 0,
- PTT0_FN, PTT0_OUT, PTT0_IN, 0 }
+ PTT7_FN, PTT7_OUT, PTT7_IN, PTO7_IN_PU,
+ PTT6_FN, PTT6_OUT, PTT6_IN, PTO6_IN_PU,
+ PTT5_FN, PTT5_OUT, PTT5_IN, PTO5_IN_PU,
+ PTT4_FN, PTT4_OUT, PTT4_IN, PTO4_IN_PU,
+ PTT3_FN, PTT3_OUT, PTT3_IN, PTO3_IN_PU,
+ PTT2_FN, PTT2_OUT, PTT2_IN, PTO2_IN_PU,
+ PTT1_FN, PTT1_OUT, PTT1_IN, PTO1_IN_PU,
+ PTT0_FN, PTT0_OUT, PTT0_IN, PTO0_IN_PU }
},
{ PINMUX_CFG_REG("PUCR", 0xffec0028, 16, 2) {
PTU7_FN, PTU7_OUT, PTU7_IN, PTU7_IN_PU,
@@ -1727,16 +1941,16 @@ static struct pinmux_cfg_reg pinmux_config_regs[] = {
PTV4_FN, PTV4_OUT, PTV4_IN, PTV4_IN_PU,
PTV3_FN, PTV3_OUT, PTV3_IN, PTV3_IN_PU,
PTV2_FN, PTV2_OUT, PTV2_IN, PTV2_IN_PU,
- PTV1_FN, PTV1_OUT, PTV1_IN, PTV1_IN_PU,
- PTV0_FN, PTV0_OUT, PTV0_IN, PTV0_IN_PU }
+ PTV1_FN, PTV1_OUT, PTV1_IN, 0,
+ PTV0_FN, PTV0_OUT, PTV0_IN, 0 }
},
{ PINMUX_CFG_REG("PWCR", 0xffec002c, 16, 2) {
- PTW7_FN, PTW7_OUT, PTW7_IN, PTW7_IN_PU,
- PTW6_FN, PTW6_OUT, PTW6_IN, PTW6_IN_PU,
- PTW5_FN, PTW5_OUT, PTW5_IN, PTW5_IN_PU,
- PTW4_FN, PTW4_OUT, PTW4_IN, PTW4_IN_PU,
- PTW3_FN, PTW3_OUT, PTW3_IN, PTW3_IN_PU,
- PTW2_FN, PTW2_OUT, PTW2_IN, PTW2_IN_PU,
+ PTW7_FN, PTW7_OUT, PTW7_IN, 0,
+ PTW6_FN, PTW6_OUT, PTW6_IN, 0,
+ PTW5_FN, PTW5_OUT, PTW5_IN, 0,
+ PTW4_FN, PTW4_OUT, PTW4_IN, 0,
+ PTW3_FN, PTW3_OUT, PTW3_IN, 0,
+ PTW2_FN, PTW2_OUT, PTW2_IN, 0,
PTW1_FN, PTW1_OUT, PTW1_IN, PTW1_IN_PU,
PTW0_FN, PTW0_OUT, PTW0_IN, PTW0_IN_PU }
},
@@ -1761,32 +1975,32 @@ static struct pinmux_cfg_reg pinmux_config_regs[] = {
PTY0_FN, PTY0_OUT, PTY0_IN, PTY0_IN_PU }
},
{ PINMUX_CFG_REG("PZCR", 0xffec0032, 16, 2) {
- 0, PTZ7_OUT, PTZ7_IN, 0,
- 0, PTZ6_OUT, PTZ6_IN, 0,
- 0, PTZ5_OUT, PTZ5_IN, 0,
- 0, PTZ4_OUT, PTZ4_IN, 0,
- 0, PTZ3_OUT, PTZ3_IN, 0,
- 0, PTZ2_OUT, PTZ2_IN, 0,
- 0, PTZ1_OUT, PTZ1_IN, 0,
- 0, PTZ0_OUT, PTZ0_IN, 0 }
+ PTZ7_FN, PTZ7_OUT, PTZ7_IN, 0,
+ PTZ6_FN, PTZ6_OUT, PTZ6_IN, 0,
+ PTZ5_FN, PTZ5_OUT, PTZ5_IN, 0,
+ PTZ4_FN, PTZ4_OUT, PTZ4_IN, 0,
+ PTZ3_FN, PTZ3_OUT, PTZ3_IN, 0,
+ PTZ2_FN, PTZ2_OUT, PTZ2_IN, 0,
+ PTZ1_FN, PTZ1_OUT, PTZ1_IN, 0,
+ PTZ0_FN, PTZ0_OUT, PTZ0_IN, 0 }
},
{ PINMUX_CFG_REG("PSEL0", 0xffec0070, 16, 1) {
- PS0_15_FN3, PS0_15_FN1,
- PS0_14_FN3, PS0_14_FN1,
- PS0_13_FN3, PS0_13_FN1,
- PS0_12_FN3, PS0_12_FN1,
- 0, 0,
- 0, 0,
+ PS0_15_FN1, PS0_15_FN2,
+ PS0_14_FN1, PS0_14_FN2,
+ PS0_13_FN1, PS0_13_FN2,
+ PS0_12_FN1, PS0_12_FN2,
+ PS0_11_FN1, PS0_11_FN2,
+ PS0_10_FN1, PS0_10_FN2,
+ PS0_9_FN1, PS0_9_FN2,
+ PS0_8_FN1, PS0_8_FN2,
+ PS0_7_FN1, PS0_7_FN2,
+ PS0_6_FN1, PS0_6_FN2,
+ PS0_5_FN1, PS0_5_FN2,
+ PS0_4_FN1, PS0_4_FN2,
+ PS0_3_FN1, PS0_3_FN2,
+ PS0_2_FN1, PS0_2_FN2,
0, 0,
- 0, 0,
- PS0_7_FN2, PS0_7_FN1,
- PS0_6_FN2, PS0_6_FN1,
- PS0_5_FN2, PS0_5_FN1,
- PS0_4_FN2, PS0_4_FN1,
- PS0_3_FN2, PS0_3_FN1,
- PS0_2_FN2, PS0_2_FN1,
- PS0_1_FN2, PS0_1_FN1,
0, 0, }
},
{ PINMUX_CFG_REG("PSEL1", 0xffec0072, 16, 1) {
@@ -1795,73 +2009,136 @@ static struct pinmux_cfg_reg pinmux_config_regs[] = {
0, 0,
0, 0,
0, 0,
+ PS1_10_FN1, PS1_10_FN2,
+ PS1_9_FN1, PS1_9_FN2,
+ PS1_8_FN1, PS1_8_FN2,
0, 0,
0, 0,
0, 0,
- PS1_7_FN1, PS1_7_FN3,
- PS1_6_FN1, PS1_6_FN3,
- 0, 0,
- 0, 0,
0, 0,
0, 0,
+ PS1_2_FN1, PS1_2_FN2,
0, 0,
0, 0, }
},
{ PINMUX_CFG_REG("PSEL2", 0xffec0074, 16, 1) {
0, 0,
0, 0,
- PS2_13_FN3, PS2_13_FN1,
- PS2_12_FN3, PS2_12_FN1,
+ PS2_13_FN1, PS2_13_FN2,
+ PS2_12_FN1, PS2_12_FN2,
0, 0,
0, 0,
0, 0,
0, 0,
+ PS2_7_FN1, PS2_7_FN2,
+ PS2_6_FN1, PS2_6_FN2,
+ PS2_5_FN1, PS2_5_FN2,
+ PS2_4_FN1, PS2_4_FN2,
0, 0,
+ PS2_2_FN1, PS2_2_FN2,
0, 0,
+ 0, 0, }
+ },
+ { PINMUX_CFG_REG("PSEL3", 0xffec0076, 16, 1) {
+ PS3_15_FN1, PS3_15_FN2,
+ PS3_14_FN1, PS3_14_FN2,
+ PS3_13_FN1, PS3_13_FN2,
+ PS3_12_FN1, PS3_12_FN2,
+ PS3_11_FN1, PS3_11_FN2,
+ PS3_10_FN1, PS3_10_FN2,
+ PS3_9_FN1, PS3_9_FN2,
+ PS3_8_FN1, PS3_8_FN2,
+ PS3_7_FN1, PS3_7_FN2,
0, 0,
0, 0,
0, 0,
0, 0,
- PS2_1_FN1, PS2_1_FN2,
- PS2_0_FN1, PS2_0_FN2, }
+ PS3_2_FN1, PS3_2_FN2,
+ PS3_1_FN1, PS3_1_FN2,
+ 0, 0, }
},
+
{ PINMUX_CFG_REG("PSEL4", 0xffec0078, 16, 1) {
- PS4_15_FN2, PS4_15_FN1,
- PS4_14_FN2, PS4_14_FN1,
- PS4_13_FN2, PS4_13_FN1,
- PS4_12_FN2, PS4_12_FN1,
- PS4_11_FN2, PS4_11_FN1,
- PS4_10_FN2, PS4_10_FN1,
- PS4_9_FN2, PS4_9_FN1,
0, 0,
+ PS4_14_FN1, PS4_14_FN2,
+ PS4_13_FN1, PS4_13_FN2,
+ PS4_12_FN1, PS4_12_FN2,
0, 0,
+ PS4_10_FN1, PS4_10_FN2,
+ PS4_9_FN1, PS4_9_FN2,
+ PS4_8_FN1, PS4_8_FN2,
0, 0,
0, 0,
0, 0,
- PS4_3_FN2, PS4_3_FN1,
- PS4_2_FN2, PS4_2_FN1,
- PS4_1_FN2, PS4_1_FN1,
- PS4_0_FN2, PS4_0_FN1, }
+ PS4_4_FN1, PS4_4_FN2,
+ PS4_3_FN1, PS4_3_FN2,
+ PS4_2_FN1, PS4_2_FN2,
+ PS4_1_FN1, PS4_1_FN2,
+ PS4_0_FN1, PS4_0_FN2, }
},
{ PINMUX_CFG_REG("PSEL5", 0xffec007a, 16, 1) {
0, 0,
0, 0,
0, 0,
0, 0,
- 0, 0,
- 0, 0,
+ PS5_11_FN1, PS5_11_FN2,
+ PS5_10_FN1, PS5_10_FN2,
PS5_9_FN1, PS5_9_FN2,
PS5_8_FN1, PS5_8_FN2,
PS5_7_FN1, PS5_7_FN2,
PS5_6_FN1, PS5_6_FN2,
PS5_5_FN1, PS5_5_FN2,
+ PS5_4_FN1, PS5_4_FN2,
+ PS5_3_FN1, PS5_3_FN2,
+ PS5_2_FN1, PS5_2_FN2,
+ 0, 0,
+ 0, 0, }
+ },
+ { PINMUX_CFG_REG("PSEL6", 0xffec007c, 16, 1) {
+ PS6_15_FN1, PS6_15_FN2,
+ PS6_14_FN1, PS6_14_FN2,
+ PS6_13_FN1, PS6_13_FN2,
+ PS6_12_FN1, PS6_12_FN2,
+ PS6_11_FN1, PS6_11_FN2,
+ PS6_10_FN1, PS6_10_FN2,
+ PS6_9_FN1, PS6_9_FN2,
+ PS6_8_FN1, PS6_8_FN2,
+ PS6_7_FN1, PS6_7_FN2,
+ PS6_6_FN1, PS6_6_FN2,
+ PS6_5_FN1, PS6_5_FN2,
+ PS6_4_FN1, PS6_4_FN2,
+ PS6_3_FN1, PS6_3_FN2,
+ PS6_2_FN1, PS6_2_FN2,
+ PS6_1_FN1, PS6_1_FN2,
+ PS6_0_FN1, PS6_0_FN2, }
+ },
+ { PINMUX_CFG_REG("PSEL7", 0xffec0082, 16, 1) {
+ PS7_15_FN1, PS7_15_FN2,
+ PS7_14_FN1, PS7_14_FN2,
+ PS7_13_FN1, PS7_13_FN2,
+ PS7_12_FN1, PS7_12_FN2,
+ PS7_11_FN1, PS7_11_FN2,
+ PS7_10_FN1, PS7_10_FN2,
+ PS7_9_FN1, PS7_9_FN2,
+ PS7_8_FN1, PS7_8_FN2,
+ PS7_7_FN1, PS7_7_FN2,
+ PS7_6_FN1, PS7_6_FN2,
+ PS7_5_FN1, PS7_5_FN2,
0, 0,
0, 0,
0, 0,
0, 0,
0, 0, }
},
- { PINMUX_CFG_REG("PSEL6", 0xffec007c, 16, 1) {
+ { PINMUX_CFG_REG("PSEL8", 0xffec0084, 16, 1) {
+ PS8_15_FN1, PS8_15_FN2,
+ PS8_14_FN1, PS8_14_FN2,
+ PS8_13_FN1, PS8_13_FN2,
+ PS8_12_FN1, PS8_12_FN2,
+ PS8_11_FN1, PS8_11_FN2,
+ PS8_10_FN1, PS8_10_FN2,
+ PS8_9_FN1, PS8_9_FN2,
+ PS8_8_FN1, PS8_8_FN2,
0, 0,
0, 0,
0, 0,
@@ -1869,15 +2146,7 @@ static struct pinmux_cfg_reg pinmux_config_regs[] = {
0, 0,
0, 0,
0, 0,
- 0, 0,
- PS6_7_FN_AN, PS6_7_FN_EV,
- PS6_6_FN_AN, PS6_6_FN_EV,
- PS6_5_FN_AN, PS6_5_FN_EV,
- PS6_4_FN_AN, PS6_4_FN_EV,
- PS6_3_FN_AN, PS6_3_FN_EV,
- PS6_2_FN_AN, PS6_2_FN_EV,
- PS6_1_FN_AN, PS6_1_FN_EV,
- PS6_0_FN_AN, PS6_0_FN_EV, }
+ 0, 0, }
},
{}
};
@@ -1920,7 +2189,7 @@ static struct pinmux_data_reg pinmux_data_regs[] = {
PTI3_DATA, PTI2_DATA, PTI1_DATA, PTI0_DATA }
},
{ PINMUX_DATA_REG("PJDR", 0xffec0046, 8) {
- PTJ7_DATA, PTJ6_DATA, PTJ5_DATA, PTJ4_DATA,
+ 0, PTJ6_DATA, PTJ5_DATA, PTJ4_DATA,
PTJ3_DATA, PTJ2_DATA, PTJ1_DATA, PTJ0_DATA }
},
{ PINMUX_DATA_REG("PKDR", 0xffec0048, 8) {
@@ -1928,15 +2197,15 @@ static struct pinmux_data_reg pinmux_data_regs[] = {
PTK3_DATA, PTK2_DATA, PTK1_DATA, PTK0_DATA }
},
{ PINMUX_DATA_REG("PLDR", 0xffec004a, 8) {
- PTL7_DATA, PTL6_DATA, PTL5_DATA, PTL4_DATA,
+ 0, PTL6_DATA, PTL5_DATA, PTL4_DATA,
PTL3_DATA, PTL2_DATA, PTL1_DATA, PTL0_DATA }
},
{ PINMUX_DATA_REG("PMDR", 0xffec004c, 8) {
- 0, PTM6_DATA, PTM5_DATA, PTM4_DATA,
+ PTM7_DATA, PTM6_DATA, PTM5_DATA, PTM4_DATA,
PTM3_DATA, PTM2_DATA, PTM1_DATA, PTM0_DATA }
},
{ PINMUX_DATA_REG("PNDR", 0xffec004e, 8) {
- PTN7_DATA, PTN6_DATA, PTN5_DATA, PTN4_DATA,
+ 0, PTN6_DATA, PTN5_DATA, PTN4_DATA,
PTN3_DATA, PTN2_DATA, PTN1_DATA, PTN0_DATA }
},
{ PINMUX_DATA_REG("PODR", 0xffec0050, 8) {
@@ -1944,7 +2213,7 @@ static struct pinmux_data_reg pinmux_data_regs[] = {
PTO3_DATA, PTO2_DATA, PTO1_DATA, PTO0_DATA }
},
{ PINMUX_DATA_REG("PPDR", 0xffec0052, 8) {
- 0, PTP6_DATA, PTP5_DATA, PTP4_DATA,
+ PTP7_DATA, PTP6_DATA, PTP5_DATA, PTP4_DATA,
PTP3_DATA, PTP2_DATA, PTP1_DATA, PTP0_DATA }
},
{ PINMUX_DATA_REG("PQDR", 0xffec0054, 8) {
@@ -1960,7 +2229,7 @@ static struct pinmux_data_reg pinmux_data_regs[] = {
PTS3_DATA, PTS2_DATA, PTS1_DATA, PTS0_DATA }
},
{ PINMUX_DATA_REG("PTDR", 0xffec005a, 8) {
- 0, 0, PTT5_DATA, PTT4_DATA,
+ PTT7_DATA, PTT6_DATA, PTT5_DATA, PTT4_DATA,
PTT3_DATA, PTT2_DATA, PTT1_DATA, PTT0_DATA }
},
{ PINMUX_DATA_REG("PUDR", 0xffec005c, 8) {
@@ -2000,8 +2269,8 @@ static struct pinmux_info sh7757_pinmux_info = {
.mark = { PINMUX_MARK_BEGIN, PINMUX_MARK_END },
.function = { PINMUX_FUNCTION_BEGIN, PINMUX_FUNCTION_END },
- .first_gpio = GPIO_PTA7,
- .last_gpio = GPIO_FN_D0,
+ .first_gpio = GPIO_PTA0,
+ .last_gpio = GPIO_FN_ON_DQ0,
.gpios = pinmux_gpios,
.cfg_regs = pinmux_config_regs,
@@ -2015,5 +2284,4 @@ static int __init plat_pinmux_setup(void)
{
return register_pinmux(&sh7757_pinmux_info);
}
-
arch_initcall(plat_pinmux_setup);
diff --git a/arch/sh/kernel/cpu/sh4a/pinmux-shx3.c b/arch/sh/kernel/cpu/sh4a/pinmux-shx3.c
new file mode 100644
index 000000000000..aaa5338abbff
--- /dev/null
+++ b/arch/sh/kernel/cpu/sh4a/pinmux-shx3.c
@@ -0,0 +1,587 @@
+/*
+ * SH-X3 prototype CPU pinmux
+ *
+ * Copyright (C) 2010 Paul Mundt
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/gpio.h>
+#include <cpu/shx3.h>
+
+enum {
+ PINMUX_RESERVED = 0,
+
+ PINMUX_DATA_BEGIN,
+ PA7_DATA, PA6_DATA, PA5_DATA, PA4_DATA,
+ PA3_DATA, PA2_DATA, PA1_DATA, PA0_DATA,
+ PB7_DATA, PB6_DATA, PB5_DATA, PB4_DATA,
+ PB3_DATA, PB2_DATA, PB1_DATA, PB0_DATA,
+ PC7_DATA, PC6_DATA, PC5_DATA, PC4_DATA,
+ PC3_DATA, PC2_DATA, PC1_DATA, PC0_DATA,
+ PD7_DATA, PD6_DATA, PD5_DATA, PD4_DATA,
+ PD3_DATA, PD2_DATA, PD1_DATA, PD0_DATA,
+ PE7_DATA, PE6_DATA, PE5_DATA, PE4_DATA,
+ PE3_DATA, PE2_DATA, PE1_DATA, PE0_DATA,
+ PF7_DATA, PF6_DATA, PF5_DATA, PF4_DATA,
+ PF3_DATA, PF2_DATA, PF1_DATA, PF0_DATA,
+ PG7_DATA, PG6_DATA, PG5_DATA, PG4_DATA,
+ PG3_DATA, PG2_DATA, PG1_DATA, PG0_DATA,
+
+ PH5_DATA, PH4_DATA,
+ PH3_DATA, PH2_DATA, PH1_DATA, PH0_DATA,
+ PINMUX_DATA_END,
+
+ PINMUX_INPUT_BEGIN,
+ PA7_IN, PA6_IN, PA5_IN, PA4_IN,
+ PA3_IN, PA2_IN, PA1_IN, PA0_IN,
+ PB7_IN, PB6_IN, PB5_IN, PB4_IN,
+ PB3_IN, PB2_IN, PB1_IN, PB0_IN,
+ PC7_IN, PC6_IN, PC5_IN, PC4_IN,
+ PC3_IN, PC2_IN, PC1_IN, PC0_IN,
+ PD7_IN, PD6_IN, PD5_IN, PD4_IN,
+ PD3_IN, PD2_IN, PD1_IN, PD0_IN,
+ PE7_IN, PE6_IN, PE5_IN, PE4_IN,
+ PE3_IN, PE2_IN, PE1_IN, PE0_IN,
+ PF7_IN, PF6_IN, PF5_IN, PF4_IN,
+ PF3_IN, PF2_IN, PF1_IN, PF0_IN,
+ PG7_IN, PG6_IN, PG5_IN, PG4_IN,
+ PG3_IN, PG2_IN, PG1_IN, PG0_IN,
+
+ PH5_IN, PH4_IN,
+ PH3_IN, PH2_IN, PH1_IN, PH0_IN,
+ PINMUX_INPUT_END,
+
+ PINMUX_INPUT_PULLUP_BEGIN,
+ PA7_IN_PU, PA6_IN_PU, PA5_IN_PU, PA4_IN_PU,
+ PA3_IN_PU, PA2_IN_PU, PA1_IN_PU, PA0_IN_PU,
+ PB7_IN_PU, PB6_IN_PU, PB5_IN_PU, PB4_IN_PU,
+ PB3_IN_PU, PB2_IN_PU, PB1_IN_PU, PB0_IN_PU,
+ PC7_IN_PU, PC6_IN_PU, PC5_IN_PU, PC4_IN_PU,
+ PC3_IN_PU, PC2_IN_PU, PC1_IN_PU, PC0_IN_PU,
+ PD7_IN_PU, PD6_IN_PU, PD5_IN_PU, PD4_IN_PU,
+ PD3_IN_PU, PD2_IN_PU, PD1_IN_PU, PD0_IN_PU,
+ PE7_IN_PU, PE6_IN_PU, PE5_IN_PU, PE4_IN_PU,
+ PE3_IN_PU, PE2_IN_PU, PE1_IN_PU, PE0_IN_PU,
+ PF7_IN_PU, PF6_IN_PU, PF5_IN_PU, PF4_IN_PU,
+ PF3_IN_PU, PF2_IN_PU, PF1_IN_PU, PF0_IN_PU,
+ PG7_IN_PU, PG6_IN_PU, PG5_IN_PU, PG4_IN_PU,
+ PG3_IN_PU, PG2_IN_PU, PG1_IN_PU, PG0_IN_PU,
+
+ PH5_IN_PU, PH4_IN_PU,
+ PH3_IN_PU, PH2_IN_PU, PH1_IN_PU, PH0_IN_PU,
+ PINMUX_INPUT_PULLUP_END,
+
+ PINMUX_OUTPUT_BEGIN,
+ PA7_OUT, PA6_OUT, PA5_OUT, PA4_OUT,
+ PA3_OUT, PA2_OUT, PA1_OUT, PA0_OUT,
+ PB7_OUT, PB6_OUT, PB5_OUT, PB4_OUT,
+ PB3_OUT, PB2_OUT, PB1_OUT, PB0_OUT,
+ PC7_OUT, PC6_OUT, PC5_OUT, PC4_OUT,
+ PC3_OUT, PC2_OUT, PC1_OUT, PC0_OUT,
+ PD7_OUT, PD6_OUT, PD5_OUT, PD4_OUT,
+ PD3_OUT, PD2_OUT, PD1_OUT, PD0_OUT,
+ PE7_OUT, PE6_OUT, PE5_OUT, PE4_OUT,
+ PE3_OUT, PE2_OUT, PE1_OUT, PE0_OUT,
+ PF7_OUT, PF6_OUT, PF5_OUT, PF4_OUT,
+ PF3_OUT, PF2_OUT, PF1_OUT, PF0_OUT,
+ PG7_OUT, PG6_OUT, PG5_OUT, PG4_OUT,
+ PG3_OUT, PG2_OUT, PG1_OUT, PG0_OUT,
+
+ PH5_OUT, PH4_OUT,
+ PH3_OUT, PH2_OUT, PH1_OUT, PH0_OUT,
+ PINMUX_OUTPUT_END,
+
+ PINMUX_FUNCTION_BEGIN,
+ PA7_FN, PA6_FN, PA5_FN, PA4_FN,
+ PA3_FN, PA2_FN, PA1_FN, PA0_FN,
+ PB7_FN, PB6_FN, PB5_FN, PB4_FN,
+ PB3_FN, PB2_FN, PB1_FN, PB0_FN,
+ PC7_FN, PC6_FN, PC5_FN, PC4_FN,
+ PC3_FN, PC2_FN, PC1_FN, PC0_FN,
+ PD7_FN, PD6_FN, PD5_FN, PD4_FN,
+ PD3_FN, PD2_FN, PD1_FN, PD0_FN,
+ PE7_FN, PE6_FN, PE5_FN, PE4_FN,
+ PE3_FN, PE2_FN, PE1_FN, PE0_FN,
+ PF7_FN, PF6_FN, PF5_FN, PF4_FN,
+ PF3_FN, PF2_FN, PF1_FN, PF0_FN,
+ PG7_FN, PG6_FN, PG5_FN, PG4_FN,
+ PG3_FN, PG2_FN, PG1_FN, PG0_FN,
+
+ PH5_FN, PH4_FN,
+ PH3_FN, PH2_FN, PH1_FN, PH0_FN,
+ PINMUX_FUNCTION_END,
+
+ PINMUX_MARK_BEGIN,
+
+ D31_MARK, D30_MARK, D29_MARK, D28_MARK, D27_MARK, D26_MARK,
+ D25_MARK, D24_MARK, D23_MARK, D22_MARK, D21_MARK, D20_MARK,
+ D19_MARK, D18_MARK, D17_MARK, D16_MARK,
+
+ BACK_MARK, BREQ_MARK,
+ WE3_MARK, WE2_MARK,
+ CS6_MARK, CS5_MARK, CS4_MARK,
+ CLKOUTENB_MARK,
+
+ DACK3_MARK, DACK2_MARK, DACK1_MARK, DACK0_MARK,
+ DREQ3_MARK, DREQ2_MARK, DREQ1_MARK, DREQ0_MARK,
+
+ IRQ3_MARK, IRQ2_MARK, IRQ1_MARK, IRQ0_MARK,
+
+ DRAK3_MARK, DRAK2_MARK, DRAK1_MARK, DRAK0_MARK,
+
+ SCK3_MARK, SCK2_MARK, SCK1_MARK, SCK0_MARK,
+ IRL3_MARK, IRL2_MARK, IRL1_MARK, IRL0_MARK,
+ TXD3_MARK, TXD2_MARK, TXD1_MARK, TXD0_MARK,
+ RXD3_MARK, RXD2_MARK, RXD1_MARK, RXD0_MARK,
+
+ CE2B_MARK, CE2A_MARK, IOIS16_MARK,
+ STATUS1_MARK, STATUS0_MARK,
+
+ IRQOUT_MARK,
+
+ PINMUX_MARK_END,
+};
+
+static pinmux_enum_t shx3_pinmux_data[] = {
+
+ /* PA GPIO */
+ PINMUX_DATA(PA7_DATA, PA7_IN, PA7_OUT, PA7_IN_PU),
+ PINMUX_DATA(PA6_DATA, PA6_IN, PA6_OUT, PA6_IN_PU),
+ PINMUX_DATA(PA5_DATA, PA5_IN, PA5_OUT, PA5_IN_PU),
+ PINMUX_DATA(PA4_DATA, PA4_IN, PA4_OUT, PA4_IN_PU),
+ PINMUX_DATA(PA3_DATA, PA3_IN, PA3_OUT, PA3_IN_PU),
+ PINMUX_DATA(PA2_DATA, PA2_IN, PA2_OUT, PA2_IN_PU),
+ PINMUX_DATA(PA1_DATA, PA1_IN, PA1_OUT, PA1_IN_PU),
+ PINMUX_DATA(PA0_DATA, PA0_IN, PA0_OUT, PA0_IN_PU),
+
+ /* PB GPIO */
+ PINMUX_DATA(PB7_DATA, PB7_IN, PB7_OUT, PB7_IN_PU),
+ PINMUX_DATA(PB6_DATA, PB6_IN, PB6_OUT, PB6_IN_PU),
+ PINMUX_DATA(PB5_DATA, PB5_IN, PB5_OUT, PB5_IN_PU),
+ PINMUX_DATA(PB4_DATA, PB4_IN, PB4_OUT, PB4_IN_PU),
+ PINMUX_DATA(PB3_DATA, PB3_IN, PB3_OUT, PB3_IN_PU),
+ PINMUX_DATA(PB2_DATA, PB2_IN, PB2_OUT, PB2_IN_PU),
+ PINMUX_DATA(PB1_DATA, PB1_IN, PB1_OUT, PB1_IN_PU),
+ PINMUX_DATA(PB0_DATA, PB0_IN, PB0_OUT, PB0_IN_PU),
+
+ /* PC GPIO */
+ PINMUX_DATA(PC7_DATA, PC7_IN, PC7_OUT, PC7_IN_PU),
+ PINMUX_DATA(PC6_DATA, PC6_IN, PC6_OUT, PC6_IN_PU),
+ PINMUX_DATA(PC5_DATA, PC5_IN, PC5_OUT, PC5_IN_PU),
+ PINMUX_DATA(PC4_DATA, PC4_IN, PC4_OUT, PC4_IN_PU),
+ PINMUX_DATA(PC3_DATA, PC3_IN, PC3_OUT, PC3_IN_PU),
+ PINMUX_DATA(PC2_DATA, PC2_IN, PC2_OUT, PC2_IN_PU),
+ PINMUX_DATA(PC1_DATA, PC1_IN, PC1_OUT, PC1_IN_PU),
+ PINMUX_DATA(PC0_DATA, PC0_IN, PC0_OUT, PC0_IN_PU),
+
+ /* PD GPIO */
+ PINMUX_DATA(PD7_DATA, PD7_IN, PD7_OUT, PD7_IN_PU),
+ PINMUX_DATA(PD6_DATA, PD6_IN, PD6_OUT, PD6_IN_PU),
+ PINMUX_DATA(PD5_DATA, PD5_IN, PD5_OUT, PD5_IN_PU),
+ PINMUX_DATA(PD4_DATA, PD4_IN, PD4_OUT, PD4_IN_PU),
+ PINMUX_DATA(PD3_DATA, PD3_IN, PD3_OUT, PD3_IN_PU),
+ PINMUX_DATA(PD2_DATA, PD2_IN, PD2_OUT, PD2_IN_PU),
+ PINMUX_DATA(PD1_DATA, PD1_IN, PD1_OUT, PD1_IN_PU),
+ PINMUX_DATA(PD0_DATA, PD0_IN, PD0_OUT, PD0_IN_PU),
+
+ /* PE GPIO */
+ PINMUX_DATA(PE7_DATA, PE7_IN, PE7_OUT, PE7_IN_PU),
+ PINMUX_DATA(PE6_DATA, PE6_IN, PE6_OUT, PE6_IN_PU),
+ PINMUX_DATA(PE5_DATA, PE5_IN, PE5_OUT, PE5_IN_PU),
+ PINMUX_DATA(PE4_DATA, PE4_IN, PE4_OUT, PE4_IN_PU),
+ PINMUX_DATA(PE3_DATA, PE3_IN, PE3_OUT, PE3_IN_PU),
+ PINMUX_DATA(PE2_DATA, PE2_IN, PE2_OUT, PE2_IN_PU),
+ PINMUX_DATA(PE1_DATA, PE1_IN, PE1_OUT, PE1_IN_PU),
+ PINMUX_DATA(PE0_DATA, PE0_IN, PE0_OUT, PE0_IN_PU),
+
+ /* PF GPIO */
+ PINMUX_DATA(PF7_DATA, PF7_IN, PF7_OUT, PF7_IN_PU),
+ PINMUX_DATA(PF6_DATA, PF6_IN, PF6_OUT, PF6_IN_PU),
+ PINMUX_DATA(PF5_DATA, PF5_IN, PF5_OUT, PF5_IN_PU),
+ PINMUX_DATA(PF4_DATA, PF4_IN, PF4_OUT, PF4_IN_PU),
+ PINMUX_DATA(PF3_DATA, PF3_IN, PF3_OUT, PF3_IN_PU),
+ PINMUX_DATA(PF2_DATA, PF2_IN, PF2_OUT, PF2_IN_PU),
+ PINMUX_DATA(PF1_DATA, PF1_IN, PF1_OUT, PF1_IN_PU),
+ PINMUX_DATA(PF0_DATA, PF0_IN, PF0_OUT, PF0_IN_PU),
+
+ /* PG GPIO */
+ PINMUX_DATA(PG7_DATA, PG7_IN, PG7_OUT, PG7_IN_PU),
+ PINMUX_DATA(PG6_DATA, PG6_IN, PG6_OUT, PG6_IN_PU),
+ PINMUX_DATA(PG5_DATA, PG5_IN, PG5_OUT, PG5_IN_PU),
+ PINMUX_DATA(PG4_DATA, PG4_IN, PG4_OUT, PG4_IN_PU),
+ PINMUX_DATA(PG3_DATA, PG3_IN, PG3_OUT, PG3_IN_PU),
+ PINMUX_DATA(PG2_DATA, PG2_IN, PG2_OUT, PG2_IN_PU),
+ PINMUX_DATA(PG1_DATA, PG1_IN, PG1_OUT, PG1_IN_PU),
+ PINMUX_DATA(PG0_DATA, PG0_IN, PG0_OUT, PG0_IN_PU),
+
+ /* PH GPIO */
+ PINMUX_DATA(PH5_DATA, PH5_IN, PH5_OUT, PH5_IN_PU),
+ PINMUX_DATA(PH4_DATA, PH4_IN, PH4_OUT, PH4_IN_PU),
+ PINMUX_DATA(PH3_DATA, PH3_IN, PH3_OUT, PH3_IN_PU),
+ PINMUX_DATA(PH2_DATA, PH2_IN, PH2_OUT, PH2_IN_PU),
+ PINMUX_DATA(PH1_DATA, PH1_IN, PH1_OUT, PH1_IN_PU),
+ PINMUX_DATA(PH0_DATA, PH0_IN, PH0_OUT, PH0_IN_PU),
+
+ /* PA FN */
+ PINMUX_DATA(D31_MARK, PA7_FN),
+ PINMUX_DATA(D30_MARK, PA6_FN),
+ PINMUX_DATA(D29_MARK, PA5_FN),
+ PINMUX_DATA(D28_MARK, PA4_FN),
+ PINMUX_DATA(D27_MARK, PA3_FN),
+ PINMUX_DATA(D26_MARK, PA2_FN),
+ PINMUX_DATA(D25_MARK, PA1_FN),
+ PINMUX_DATA(D24_MARK, PA0_FN),
+
+ /* PB FN */
+ PINMUX_DATA(D23_MARK, PB7_FN),
+ PINMUX_DATA(D22_MARK, PB6_FN),
+ PINMUX_DATA(D21_MARK, PB5_FN),
+ PINMUX_DATA(D20_MARK, PB4_FN),
+ PINMUX_DATA(D19_MARK, PB3_FN),
+ PINMUX_DATA(D18_MARK, PB2_FN),
+ PINMUX_DATA(D17_MARK, PB1_FN),
+ PINMUX_DATA(D16_MARK, PB0_FN),
+
+ /* PC FN */
+ PINMUX_DATA(BACK_MARK, PC7_FN),
+ PINMUX_DATA(BREQ_MARK, PC6_FN),
+ PINMUX_DATA(WE3_MARK, PC5_FN),
+ PINMUX_DATA(WE2_MARK, PC4_FN),
+ PINMUX_DATA(CS6_MARK, PC3_FN),
+ PINMUX_DATA(CS5_MARK, PC2_FN),
+ PINMUX_DATA(CS4_MARK, PC1_FN),
+ PINMUX_DATA(CLKOUTENB_MARK, PC0_FN),
+
+ /* PD FN */
+ PINMUX_DATA(DACK3_MARK, PD7_FN),
+ PINMUX_DATA(DACK2_MARK, PD6_FN),
+ PINMUX_DATA(DACK1_MARK, PD5_FN),
+ PINMUX_DATA(DACK0_MARK, PD4_FN),
+ PINMUX_DATA(DREQ3_MARK, PD3_FN),
+ PINMUX_DATA(DREQ2_MARK, PD2_FN),
+ PINMUX_DATA(DREQ1_MARK, PD1_FN),
+ PINMUX_DATA(DREQ0_MARK, PD0_FN),
+
+ /* PE FN */
+ PINMUX_DATA(IRQ3_MARK, PE7_FN),
+ PINMUX_DATA(IRQ2_MARK, PE6_FN),
+ PINMUX_DATA(IRQ1_MARK, PE5_FN),
+ PINMUX_DATA(IRQ0_MARK, PE4_FN),
+ PINMUX_DATA(DRAK3_MARK, PE3_FN),
+ PINMUX_DATA(DRAK2_MARK, PE2_FN),
+ PINMUX_DATA(DRAK1_MARK, PE1_FN),
+ PINMUX_DATA(DRAK0_MARK, PE0_FN),
+
+ /* PF FN */
+ PINMUX_DATA(SCK3_MARK, PF7_FN),
+ PINMUX_DATA(SCK2_MARK, PF6_FN),
+ PINMUX_DATA(SCK1_MARK, PF5_FN),
+ PINMUX_DATA(SCK0_MARK, PF4_FN),
+ PINMUX_DATA(IRL3_MARK, PF3_FN),
+ PINMUX_DATA(IRL2_MARK, PF2_FN),
+ PINMUX_DATA(IRL1_MARK, PF1_FN),
+ PINMUX_DATA(IRL0_MARK, PF0_FN),
+
+ /* PG FN */
+ PINMUX_DATA(TXD3_MARK, PG7_FN),
+ PINMUX_DATA(TXD2_MARK, PG6_FN),
+ PINMUX_DATA(TXD1_MARK, PG5_FN),
+ PINMUX_DATA(TXD0_MARK, PG4_FN),
+ PINMUX_DATA(RXD3_MARK, PG3_FN),
+ PINMUX_DATA(RXD2_MARK, PG2_FN),
+ PINMUX_DATA(RXD1_MARK, PG1_FN),
+ PINMUX_DATA(RXD0_MARK, PG0_FN),
+
+ /* PH FN */
+ PINMUX_DATA(CE2B_MARK, PH5_FN),
+ PINMUX_DATA(CE2A_MARK, PH4_FN),
+ PINMUX_DATA(IOIS16_MARK, PH3_FN),
+ PINMUX_DATA(STATUS1_MARK, PH2_FN),
+ PINMUX_DATA(STATUS0_MARK, PH1_FN),
+ PINMUX_DATA(IRQOUT_MARK, PH0_FN),
+};
+
+static struct pinmux_gpio shx3_pinmux_gpios[] = {
+ /* PA */
+ PINMUX_GPIO(GPIO_PA7, PA7_DATA),
+ PINMUX_GPIO(GPIO_PA6, PA6_DATA),
+ PINMUX_GPIO(GPIO_PA5, PA5_DATA),
+ PINMUX_GPIO(GPIO_PA4, PA4_DATA),
+ PINMUX_GPIO(GPIO_PA3, PA3_DATA),
+ PINMUX_GPIO(GPIO_PA2, PA2_DATA),
+ PINMUX_GPIO(GPIO_PA1, PA1_DATA),
+ PINMUX_GPIO(GPIO_PA0, PA0_DATA),
+
+ /* PB */
+ PINMUX_GPIO(GPIO_PB7, PB7_DATA),
+ PINMUX_GPIO(GPIO_PB6, PB6_DATA),
+ PINMUX_GPIO(GPIO_PB5, PB5_DATA),
+ PINMUX_GPIO(GPIO_PB4, PB4_DATA),
+ PINMUX_GPIO(GPIO_PB3, PB3_DATA),
+ PINMUX_GPIO(GPIO_PB2, PB2_DATA),
+ PINMUX_GPIO(GPIO_PB1, PB1_DATA),
+ PINMUX_GPIO(GPIO_PB0, PB0_DATA),
+
+ /* PC */
+ PINMUX_GPIO(GPIO_PC7, PC7_DATA),
+ PINMUX_GPIO(GPIO_PC6, PC6_DATA),
+ PINMUX_GPIO(GPIO_PC5, PC5_DATA),
+ PINMUX_GPIO(GPIO_PC4, PC4_DATA),
+ PINMUX_GPIO(GPIO_PC3, PC3_DATA),
+ PINMUX_GPIO(GPIO_PC2, PC2_DATA),
+ PINMUX_GPIO(GPIO_PC1, PC1_DATA),
+ PINMUX_GPIO(GPIO_PC0, PC0_DATA),
+
+ /* PD */
+ PINMUX_GPIO(GPIO_PD7, PD7_DATA),
+ PINMUX_GPIO(GPIO_PD6, PD6_DATA),
+ PINMUX_GPIO(GPIO_PD5, PD5_DATA),
+ PINMUX_GPIO(GPIO_PD4, PD4_DATA),
+ PINMUX_GPIO(GPIO_PD3, PD3_DATA),
+ PINMUX_GPIO(GPIO_PD2, PD2_DATA),
+ PINMUX_GPIO(GPIO_PD1, PD1_DATA),
+ PINMUX_GPIO(GPIO_PD0, PD0_DATA),
+
+ /* PE */
+ PINMUX_GPIO(GPIO_PE7, PE7_DATA),
+ PINMUX_GPIO(GPIO_PE6, PE6_DATA),
+ PINMUX_GPIO(GPIO_PE5, PE5_DATA),
+ PINMUX_GPIO(GPIO_PE4, PE4_DATA),
+ PINMUX_GPIO(GPIO_PE3, PE3_DATA),
+ PINMUX_GPIO(GPIO_PE2, PE2_DATA),
+ PINMUX_GPIO(GPIO_PE1, PE1_DATA),
+ PINMUX_GPIO(GPIO_PE0, PE0_DATA),
+
+ /* PF */
+ PINMUX_GPIO(GPIO_PF7, PF7_DATA),
+ PINMUX_GPIO(GPIO_PF6, PF6_DATA),
+ PINMUX_GPIO(GPIO_PF5, PF5_DATA),
+ PINMUX_GPIO(GPIO_PF4, PF4_DATA),
+ PINMUX_GPIO(GPIO_PF3, PF3_DATA),
+ PINMUX_GPIO(GPIO_PF2, PF2_DATA),
+ PINMUX_GPIO(GPIO_PF1, PF1_DATA),
+ PINMUX_GPIO(GPIO_PF0, PF0_DATA),
+
+ /* PG */
+ PINMUX_GPIO(GPIO_PG7, PG7_DATA),
+ PINMUX_GPIO(GPIO_PG6, PG6_DATA),
+ PINMUX_GPIO(GPIO_PG5, PG5_DATA),
+ PINMUX_GPIO(GPIO_PG4, PG4_DATA),
+ PINMUX_GPIO(GPIO_PG3, PG3_DATA),
+ PINMUX_GPIO(GPIO_PG2, PG2_DATA),
+ PINMUX_GPIO(GPIO_PG1, PG1_DATA),
+ PINMUX_GPIO(GPIO_PG0, PG0_DATA),
+
+ /* PH */
+ PINMUX_GPIO(GPIO_PH5, PH5_DATA),
+ PINMUX_GPIO(GPIO_PH4, PH4_DATA),
+ PINMUX_GPIO(GPIO_PH3, PH3_DATA),
+ PINMUX_GPIO(GPIO_PH2, PH2_DATA),
+ PINMUX_GPIO(GPIO_PH1, PH1_DATA),
+ PINMUX_GPIO(GPIO_PH0, PH0_DATA),
+
+ /* FN */
+ PINMUX_GPIO(GPIO_FN_D31, D31_MARK),
+ PINMUX_GPIO(GPIO_FN_D30, D30_MARK),
+ PINMUX_GPIO(GPIO_FN_D29, D29_MARK),
+ PINMUX_GPIO(GPIO_FN_D28, D28_MARK),
+ PINMUX_GPIO(GPIO_FN_D27, D27_MARK),
+ PINMUX_GPIO(GPIO_FN_D26, D26_MARK),
+ PINMUX_GPIO(GPIO_FN_D25, D25_MARK),
+ PINMUX_GPIO(GPIO_FN_D24, D24_MARK),
+ PINMUX_GPIO(GPIO_FN_D23, D23_MARK),
+ PINMUX_GPIO(GPIO_FN_D22, D22_MARK),
+ PINMUX_GPIO(GPIO_FN_D21, D21_MARK),
+ PINMUX_GPIO(GPIO_FN_D20, D20_MARK),
+ PINMUX_GPIO(GPIO_FN_D19, D19_MARK),
+ PINMUX_GPIO(GPIO_FN_D18, D18_MARK),
+ PINMUX_GPIO(GPIO_FN_D17, D17_MARK),
+ PINMUX_GPIO(GPIO_FN_D16, D16_MARK),
+ PINMUX_GPIO(GPIO_FN_BACK, BACK_MARK),
+ PINMUX_GPIO(GPIO_FN_BREQ, BREQ_MARK),
+ PINMUX_GPIO(GPIO_FN_WE3, WE3_MARK),
+ PINMUX_GPIO(GPIO_FN_WE2, WE2_MARK),
+ PINMUX_GPIO(GPIO_FN_CS6, CS6_MARK),
+ PINMUX_GPIO(GPIO_FN_CS5, CS5_MARK),
+ PINMUX_GPIO(GPIO_FN_CS4, CS4_MARK),
+ PINMUX_GPIO(GPIO_FN_CLKOUTENB, CLKOUTENB_MARK),
+ PINMUX_GPIO(GPIO_FN_DACK3, DACK3_MARK),
+ PINMUX_GPIO(GPIO_FN_DACK2, DACK2_MARK),
+ PINMUX_GPIO(GPIO_FN_DACK1, DACK1_MARK),
+ PINMUX_GPIO(GPIO_FN_DACK0, DACK0_MARK),
+ PINMUX_GPIO(GPIO_FN_DREQ3, DREQ3_MARK),
+ PINMUX_GPIO(GPIO_FN_DREQ2, DREQ2_MARK),
+ PINMUX_GPIO(GPIO_FN_DREQ1, DREQ1_MARK),
+ PINMUX_GPIO(GPIO_FN_DREQ0, DREQ0_MARK),
+ PINMUX_GPIO(GPIO_FN_IRQ3, IRQ3_MARK),
+ PINMUX_GPIO(GPIO_FN_IRQ2, IRQ2_MARK),
+ PINMUX_GPIO(GPIO_FN_IRQ1, IRQ1_MARK),
+ PINMUX_GPIO(GPIO_FN_IRQ0, IRQ0_MARK),
+ PINMUX_GPIO(GPIO_FN_DRAK3, DRAK3_MARK),
+ PINMUX_GPIO(GPIO_FN_DRAK2, DRAK2_MARK),
+ PINMUX_GPIO(GPIO_FN_DRAK1, DRAK1_MARK),
+ PINMUX_GPIO(GPIO_FN_DRAK0, DRAK0_MARK),
+ PINMUX_GPIO(GPIO_FN_SCK3, SCK3_MARK),
+ PINMUX_GPIO(GPIO_FN_SCK2, SCK2_MARK),
+ PINMUX_GPIO(GPIO_FN_SCK1, SCK1_MARK),
+ PINMUX_GPIO(GPIO_FN_SCK0, SCK0_MARK),
+ PINMUX_GPIO(GPIO_FN_IRL3, IRL3_MARK),
+ PINMUX_GPIO(GPIO_FN_IRL2, IRL2_MARK),
+ PINMUX_GPIO(GPIO_FN_IRL1, IRL1_MARK),
+ PINMUX_GPIO(GPIO_FN_IRL0, IRL0_MARK),
+ PINMUX_GPIO(GPIO_FN_TXD3, TXD3_MARK),
+ PINMUX_GPIO(GPIO_FN_TXD2, TXD2_MARK),
+ PINMUX_GPIO(GPIO_FN_TXD1, TXD1_MARK),
+ PINMUX_GPIO(GPIO_FN_TXD0, TXD0_MARK),
+ PINMUX_GPIO(GPIO_FN_RXD3, RXD3_MARK),
+ PINMUX_GPIO(GPIO_FN_RXD2, RXD2_MARK),
+ PINMUX_GPIO(GPIO_FN_RXD1, RXD1_MARK),
+ PINMUX_GPIO(GPIO_FN_RXD0, RXD0_MARK),
+ PINMUX_GPIO(GPIO_FN_CE2B, CE2B_MARK),
+ PINMUX_GPIO(GPIO_FN_CE2A, CE2A_MARK),
+ PINMUX_GPIO(GPIO_FN_IOIS16, IOIS16_MARK),
+ PINMUX_GPIO(GPIO_FN_STATUS1, STATUS1_MARK),
+ PINMUX_GPIO(GPIO_FN_STATUS0, STATUS0_MARK),
+ PINMUX_GPIO(GPIO_FN_IRQOUT, IRQOUT_MARK),
+};
+
+static struct pinmux_cfg_reg shx3_pinmux_config_regs[] = {
+ { PINMUX_CFG_REG("PABCR", 0xffc70000, 32, 2) {
+ PA7_FN, PA7_OUT, PA7_IN, PA7_IN_PU,
+ PA6_FN, PA6_OUT, PA6_IN, PA6_IN_PU,
+ PA5_FN, PA5_OUT, PA5_IN, PA5_IN_PU,
+ PA4_FN, PA4_OUT, PA4_IN, PA4_IN_PU,
+ PA3_FN, PA3_OUT, PA3_IN, PA3_IN_PU,
+ PA2_FN, PA2_OUT, PA2_IN, PA2_IN_PU,
+ PA1_FN, PA1_OUT, PA1_IN, PA1_IN_PU,
+ PA0_FN, PA0_OUT, PA0_IN, PA0_IN_PU,
+ PB7_FN, PB7_OUT, PB7_IN, PB7_IN_PU,
+ PB6_FN, PB6_OUT, PB6_IN, PB6_IN_PU,
+ PB5_FN, PB5_OUT, PB5_IN, PB5_IN_PU,
+ PB4_FN, PB4_OUT, PB4_IN, PB4_IN_PU,
+ PB3_FN, PB3_OUT, PB3_IN, PB3_IN_PU,
+ PB2_FN, PB2_OUT, PB2_IN, PB2_IN_PU,
+ PB1_FN, PB1_OUT, PB1_IN, PB1_IN_PU,
+ PB0_FN, PB0_OUT, PB0_IN, PB0_IN_PU, },
+ },
+ { PINMUX_CFG_REG("PCDCR", 0xffc70004, 32, 2) {
+ PC7_FN, PC7_OUT, PC7_IN, PC7_IN_PU,
+ PC6_FN, PC6_OUT, PC6_IN, PC6_IN_PU,
+ PC5_FN, PC5_OUT, PC5_IN, PC5_IN_PU,
+ PC4_FN, PC4_OUT, PC4_IN, PC4_IN_PU,
+ PC3_FN, PC3_OUT, PC3_IN, PC3_IN_PU,
+ PC2_FN, PC2_OUT, PC2_IN, PC2_IN_PU,
+ PC1_FN, PC1_OUT, PC1_IN, PC1_IN_PU,
+ PC0_FN, PC0_OUT, PC0_IN, PC0_IN_PU,
+ PD7_FN, PD7_OUT, PD7_IN, PD7_IN_PU,
+ PD6_FN, PD6_OUT, PD6_IN, PD6_IN_PU,
+ PD5_FN, PD5_OUT, PD5_IN, PD5_IN_PU,
+ PD4_FN, PD4_OUT, PD4_IN, PD4_IN_PU,
+ PD3_FN, PD3_OUT, PD3_IN, PD3_IN_PU,
+ PD2_FN, PD2_OUT, PD2_IN, PD2_IN_PU,
+ PD1_FN, PD1_OUT, PD1_IN, PD1_IN_PU,
+ PD0_FN, PD0_OUT, PD0_IN, PD0_IN_PU, },
+ },
+ { PINMUX_CFG_REG("PEFCR", 0xffc70008, 32, 2) {
+ PE7_FN, PE7_OUT, PE7_IN, PE7_IN_PU,
+ PE6_FN, PE6_OUT, PE6_IN, PE6_IN_PU,
+ PE5_FN, PE5_OUT, PE5_IN, PE5_IN_PU,
+ PE4_FN, PE4_OUT, PE4_IN, PE4_IN_PU,
+ PE3_FN, PE3_OUT, PE3_IN, PE3_IN_PU,
+ PE2_FN, PE2_OUT, PE2_IN, PE2_IN_PU,
+ PE1_FN, PE1_OUT, PE1_IN, PE1_IN_PU,
+ PE0_FN, PE0_OUT, PE0_IN, PE0_IN_PU,
+ PF7_FN, PF7_OUT, PF7_IN, PF7_IN_PU,
+ PF6_FN, PF6_OUT, PF6_IN, PF6_IN_PU,
+ PF5_FN, PF5_OUT, PF5_IN, PF5_IN_PU,
+ PF4_FN, PF4_OUT, PF4_IN, PF4_IN_PU,
+ PF3_FN, PF3_OUT, PF3_IN, PF3_IN_PU,
+ PF2_FN, PF2_OUT, PF2_IN, PF2_IN_PU,
+ PF1_FN, PF1_OUT, PF1_IN, PF1_IN_PU,
+ PF0_FN, PF0_OUT, PF0_IN, PF0_IN_PU, },
+ },
+ { PINMUX_CFG_REG("PGHCR", 0xffc7000c, 32, 2) {
+ PG7_FN, PG7_OUT, PG7_IN, PG7_IN_PU,
+ PG6_FN, PG6_OUT, PG6_IN, PG6_IN_PU,
+ PG5_FN, PG5_OUT, PG5_IN, PG5_IN_PU,
+ PG4_FN, PG4_OUT, PG4_IN, PG4_IN_PU,
+ PG3_FN, PG3_OUT, PG3_IN, PG3_IN_PU,
+ PG2_FN, PG2_OUT, PG2_IN, PG2_IN_PU,
+ PG1_FN, PG1_OUT, PG1_IN, PG1_IN_PU,
+ PG0_FN, PG0_OUT, PG0_IN, PG0_IN_PU,
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ PH5_FN, PH5_OUT, PH5_IN, PH5_IN_PU,
+ PH4_FN, PH4_OUT, PH4_IN, PH4_IN_PU,
+ PH3_FN, PH3_OUT, PH3_IN, PH3_IN_PU,
+ PH2_FN, PH2_OUT, PH2_IN, PH2_IN_PU,
+ PH1_FN, PH1_OUT, PH1_IN, PH1_IN_PU,
+ PH0_FN, PH0_OUT, PH0_IN, PH0_IN_PU, },
+ },
+ { },
+};
+
+static struct pinmux_data_reg shx3_pinmux_data_regs[] = {
+ { PINMUX_DATA_REG("PABDR", 0xffc70010, 32) {
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ PA7_DATA, PA6_DATA, PA5_DATA, PA4_DATA,
+ PA3_DATA, PA2_DATA, PA1_DATA, PA0_DATA,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ PB7_DATA, PB6_DATA, PB5_DATA, PB4_DATA,
+ PB3_DATA, PB2_DATA, PB1_DATA, PB0_DATA, },
+ },
+ { PINMUX_DATA_REG("PCDDR", 0xffc70014, 32) {
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ PC7_DATA, PC6_DATA, PC5_DATA, PC4_DATA,
+ PC3_DATA, PC2_DATA, PC1_DATA, PC0_DATA,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ PD7_DATA, PD6_DATA, PD5_DATA, PD4_DATA,
+ PD3_DATA, PD2_DATA, PD1_DATA, PD0_DATA, },
+ },
+ { PINMUX_DATA_REG("PEFDR", 0xffc70018, 32) {
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ PE7_DATA, PE6_DATA, PE5_DATA, PE4_DATA,
+ PE3_DATA, PE2_DATA, PE1_DATA, PE0_DATA,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ PF7_DATA, PF6_DATA, PF5_DATA, PF4_DATA,
+ PF3_DATA, PF2_DATA, PF1_DATA, PF0_DATA, },
+ },
+ { PINMUX_DATA_REG("PGHDR", 0xffc7001c, 32) {
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ PG7_DATA, PG6_DATA, PG5_DATA, PG4_DATA,
+ PG3_DATA, PG2_DATA, PG1_DATA, PG0_DATA,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, PH5_DATA, PH4_DATA,
+ PH3_DATA, PH2_DATA, PH1_DATA, PH0_DATA, },
+ },
+ { },
+};
+
+static struct pinmux_info shx3_pinmux_info = {
+ .name = "shx3_pfc",
+ .reserved_id = PINMUX_RESERVED,
+ .data = { PINMUX_DATA_BEGIN, PINMUX_DATA_END },
+ .input = { PINMUX_INPUT_BEGIN, PINMUX_INPUT_END },
+ .input_pu = { PINMUX_INPUT_PULLUP_BEGIN,
+ PINMUX_INPUT_PULLUP_END },
+ .output = { PINMUX_OUTPUT_BEGIN, PINMUX_OUTPUT_END },
+ .mark = { PINMUX_MARK_BEGIN, PINMUX_MARK_END },
+ .function = { PINMUX_FUNCTION_BEGIN, PINMUX_FUNCTION_END },
+ .first_gpio = GPIO_PA7,
+ .last_gpio = GPIO_FN_IRQOUT,
+ .gpios = shx3_pinmux_gpios,
+ .gpio_data = shx3_pinmux_data,
+ .gpio_data_size = ARRAY_SIZE(shx3_pinmux_data),
+ .cfg_regs = shx3_pinmux_config_regs,
+ .data_regs = shx3_pinmux_data_regs,
+};
+
+static int __init shx3_pinmux_setup(void)
+{
+ return register_pinmux(&shx3_pinmux_info);
+}
+arch_initcall(shx3_pinmux_setup);
diff --git a/arch/sh/kernel/cpu/sh4a/setup-sh7722.c b/arch/sh/kernel/cpu/sh4a/setup-sh7722.c
index 156ccc960015..d551ed8dea95 100644
--- a/arch/sh/kernel/cpu/sh4a/setup-sh7722.c
+++ b/arch/sh/kernel/cpu/sh4a/setup-sh7722.c
@@ -551,7 +551,7 @@ static struct resource siu_resources[] = {
};
static struct platform_device siu_device = {
- .name = "sh_siu",
+ .name = "siu-pcm-audio",
.id = -1,
.dev = {
.platform_data = &siu_platform_data,
diff --git a/arch/sh/kernel/cpu/sh4a/setup-sh7724.c b/arch/sh/kernel/cpu/sh4a/setup-sh7724.c
index 79c556e56262..828c9657eb52 100644
--- a/arch/sh/kernel/cpu/sh4a/setup-sh7724.c
+++ b/arch/sh/kernel/cpu/sh4a/setup-sh7724.c
@@ -524,6 +524,70 @@ static struct platform_device veu1_device = {
},
};
+/* BEU0 */
+static struct uio_info beu0_platform_data = {
+ .name = "BEU0",
+ .version = "0",
+ .irq = evt2irq(0x8A0),
+};
+
+static struct resource beu0_resources[] = {
+ [0] = {
+ .name = "BEU0",
+ .start = 0xfe930000,
+ .end = 0xfe933400,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ /* place holder for contiguous memory */
+ },
+};
+
+static struct platform_device beu0_device = {
+ .name = "uio_pdrv_genirq",
+ .id = 6,
+ .dev = {
+ .platform_data = &beu0_platform_data,
+ },
+ .resource = beu0_resources,
+ .num_resources = ARRAY_SIZE(beu0_resources),
+ .archdata = {
+ .hwblk_id = HWBLK_BEU0,
+ },
+};
+
+/* BEU1 */
+static struct uio_info beu1_platform_data = {
+ .name = "BEU1",
+ .version = "0",
+ .irq = evt2irq(0xA00),
+};
+
+static struct resource beu1_resources[] = {
+ [0] = {
+ .name = "BEU1",
+ .start = 0xfe940000,
+ .end = 0xfe943400,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ /* place holder for contiguous memory */
+ },
+};
+
+static struct platform_device beu1_device = {
+ .name = "uio_pdrv_genirq",
+ .id = 7,
+ .dev = {
+ .platform_data = &beu1_platform_data,
+ },
+ .resource = beu1_resources,
+ .num_resources = ARRAY_SIZE(beu1_resources),
+ .archdata = {
+ .hwblk_id = HWBLK_BEU1,
+ },
+};
+
static struct sh_timer_config cmt_platform_data = {
.channel_offset = 0x60,
.timer_bit = 5,
@@ -857,6 +921,8 @@ static struct platform_device *sh7724_devices[] __initdata = {
&vpu_device,
&veu0_device,
&veu1_device,
+ &beu0_device,
+ &beu1_device,
&jpu_device,
&spu0_device,
&spu1_device,
diff --git a/arch/sh/kernel/cpu/sh4a/setup-sh7757.c b/arch/sh/kernel/cpu/sh4a/setup-sh7757.c
index 444aca95b20d..749c6388d5a5 100644
--- a/arch/sh/kernel/cpu/sh4a/setup-sh7757.c
+++ b/arch/sh/kernel/cpu/sh4a/setup-sh7757.c
@@ -26,7 +26,7 @@ static struct plat_sci_port scif2_platform_data = {
static struct platform_device scif2_device = {
.name = "sh-sci",
- .id = 2,
+ .id = 0,
.dev = {
.platform_data = &scif2_platform_data,
},
@@ -41,7 +41,7 @@ static struct plat_sci_port scif3_platform_data = {
static struct platform_device scif3_device = {
.name = "sh-sci",
- .id = 3,
+ .id = 1,
.dev = {
.platform_data = &scif3_platform_data,
},
@@ -56,7 +56,7 @@ static struct plat_sci_port scif4_platform_data = {
static struct platform_device scif4_device = {
.name = "sh-sci",
- .id = 4,
+ .id = 2,
.dev = {
.platform_data = &scif4_platform_data,
},
@@ -163,39 +163,23 @@ enum {
IRL4_HHLL, IRL4_HHLH, IRL4_HHHL,
IRQ0, IRQ1, IRQ2, IRQ3, IRQ4, IRQ5, IRQ6, IRQ7,
- SDHI,
- DVC,
- IRQ8, IRQ9, IRQ10,
- WDT0,
- TMU0, TMU1, TMU2, TMU2_TICPI,
+ SDHI, DVC,
+ IRQ8, IRQ9, IRQ11, IRQ10, IRQ12, IRQ13, IRQ14, IRQ15,
+ TMU0, TMU1, TMU2, TMU2_TICPI, TMU3, TMU4, TMU5,
HUDI,
-
ARC4,
- DMAC0,
- IRQ11,
- SCIF2,
- DMAC1_6,
- USB0,
- IRQ12,
+ DMAC0_5, DMAC6_7, DMAC8_11,
+ SCIF0, SCIF1, SCIF2, SCIF3, SCIF4,
+ USB0, USB1,
JMC,
- SPI1,
- IRQ13, IRQ14,
- USB1,
+ SPI0, SPI1,
TMR01, TMR23, TMR45,
- WDT1,
FRT,
- LPC,
- SCIF0, SCIF1, SCIF3,
- PECI0I, PECI1I, PECI2I,
- IRQ15,
+ LPC, LPC5, LPC6, LPC7, LPC8,
+ PECI0, PECI1, PECI2, PECI3, PECI4, PECI5,
ETHERC,
- SPI0,
- ADC1,
- DMAC1_8,
+ ADC0, ADC1,
SIM,
- TMU3, TMU4, TMU5,
- ADC0,
- SCIF4,
IIC0_0, IIC0_1, IIC0_2, IIC0_3,
IIC1_0, IIC1_1, IIC1_2, IIC1_3,
IIC2_0, IIC2_1, IIC2_2, IIC2_3,
@@ -206,9 +190,23 @@ enum {
IIC7_0, IIC7_1, IIC7_2, IIC7_3,
IIC8_0, IIC8_1, IIC8_2, IIC8_3,
IIC9_0, IIC9_1, IIC9_2, IIC9_3,
- PCIINTA,
- PCIE,
+ ONFICTL,
+ MMC1, MMC2,
+ ECCU,
+ PCIC,
+ G200,
+ RSPI,
SGPIO,
+ DMINT12, DMINT13, DMINT14, DMINT15, DMINT16, DMINT17, DMINT18, DMINT19,
+ DMINT20, DMINT21, DMINT22, DMINT23,
+ DDRECC,
+ TSIP,
+ PCIE_BRIDGE,
+ WDT0B, WDT1B, WDT2B, WDT3B, WDT4B, WDT5B, WDT6B, WDT7B, WDT8B,
+ GETHER0, GETHER1, GETHER2,
+ PBIA, PBIB, PBIC,
+ DMAE2, DMAE3,
+ SERMUX2, SERMUX3,
/* interrupt groups */
@@ -221,19 +219,18 @@ static struct intc_vect vectors[] __initdata = {
INTC_VECT(DVC, 0x4e0),
INTC_VECT(IRQ8, 0x500), INTC_VECT(IRQ9, 0x520),
INTC_VECT(IRQ10, 0x540),
- INTC_VECT(WDT0, 0x560),
INTC_VECT(TMU0, 0x580), INTC_VECT(TMU1, 0x5a0),
INTC_VECT(TMU2, 0x5c0), INTC_VECT(TMU2_TICPI, 0x5e0),
INTC_VECT(HUDI, 0x600),
INTC_VECT(ARC4, 0x620),
- INTC_VECT(DMAC0, 0x640), INTC_VECT(DMAC0, 0x660),
- INTC_VECT(DMAC0, 0x680), INTC_VECT(DMAC0, 0x6a0),
- INTC_VECT(DMAC0, 0x6c0),
+ INTC_VECT(DMAC0_5, 0x640), INTC_VECT(DMAC0_5, 0x660),
+ INTC_VECT(DMAC0_5, 0x680), INTC_VECT(DMAC0_5, 0x6a0),
+ INTC_VECT(DMAC0_5, 0x6c0),
INTC_VECT(IRQ11, 0x6e0),
INTC_VECT(SCIF2, 0x700), INTC_VECT(SCIF2, 0x720),
INTC_VECT(SCIF2, 0x740), INTC_VECT(SCIF2, 0x760),
- INTC_VECT(DMAC0, 0x780), INTC_VECT(DMAC0, 0x7a0),
- INTC_VECT(DMAC1_6, 0x7c0), INTC_VECT(DMAC1_6, 0x7e0),
+ INTC_VECT(DMAC0_5, 0x780), INTC_VECT(DMAC0_5, 0x7a0),
+ INTC_VECT(DMAC6_7, 0x7c0), INTC_VECT(DMAC6_7, 0x7e0),
INTC_VECT(USB0, 0x840),
INTC_VECT(IRQ12, 0x880),
INTC_VECT(JMC, 0x8a0),
@@ -242,7 +239,6 @@ static struct intc_vect vectors[] __initdata = {
INTC_VECT(USB1, 0x920),
INTC_VECT(TMR01, 0xa00), INTC_VECT(TMR23, 0xa20),
INTC_VECT(TMR45, 0xa40),
- INTC_VECT(WDT1, 0xa60),
INTC_VECT(FRT, 0xa80),
INTC_VECT(LPC, 0xaa0), INTC_VECT(LPC, 0xac0),
INTC_VECT(LPC, 0xae0), INTC_VECT(LPC, 0xb00),
@@ -250,14 +246,14 @@ static struct intc_vect vectors[] __initdata = {
INTC_VECT(SCIF0, 0xb40), INTC_VECT(SCIF1, 0xb60),
INTC_VECT(SCIF3, 0xb80), INTC_VECT(SCIF3, 0xba0),
INTC_VECT(SCIF3, 0xbc0), INTC_VECT(SCIF3, 0xbe0),
- INTC_VECT(PECI0I, 0xc00), INTC_VECT(PECI1I, 0xc20),
- INTC_VECT(PECI2I, 0xc40),
+ INTC_VECT(PECI0, 0xc00), INTC_VECT(PECI1, 0xc20),
+ INTC_VECT(PECI2, 0xc40),
INTC_VECT(IRQ15, 0xc60),
INTC_VECT(ETHERC, 0xc80), INTC_VECT(ETHERC, 0xca0),
INTC_VECT(SPI0, 0xcc0),
INTC_VECT(ADC1, 0xce0),
- INTC_VECT(DMAC1_8, 0xd00), INTC_VECT(DMAC1_8, 0xd20),
- INTC_VECT(DMAC1_8, 0xd40), INTC_VECT(DMAC1_8, 0xd60),
+ INTC_VECT(DMAC8_11, 0xd00), INTC_VECT(DMAC8_11, 0xd20),
+ INTC_VECT(DMAC8_11, 0xd40), INTC_VECT(DMAC8_11, 0xd60),
INTC_VECT(SIM, 0xd80), INTC_VECT(SIM, 0xda0),
INTC_VECT(SIM, 0xdc0), INTC_VECT(SIM, 0xde0),
INTC_VECT(TMU3, 0xe00), INTC_VECT(TMU4, 0xe20),
@@ -278,17 +274,47 @@ static struct intc_vect vectors[] __initdata = {
INTC_VECT(IIC5_0, 0x1860), INTC_VECT(IIC5_1, 0x1880),
INTC_VECT(IIC5_2, 0x18a0), INTC_VECT(IIC5_3, 0x18c0),
INTC_VECT(IIC6_0, 0x18e0), INTC_VECT(IIC6_1, 0x1900),
- INTC_VECT(IIC6_2, 0x1920), INTC_VECT(IIC6_3, 0x1980),
+ INTC_VECT(IIC6_2, 0x1920),
+ INTC_VECT(ONFICTL, 0x1960),
+ INTC_VECT(IIC6_3, 0x1980),
INTC_VECT(IIC7_0, 0x19a0), INTC_VECT(IIC7_1, 0x1a00),
INTC_VECT(IIC7_2, 0x1a20), INTC_VECT(IIC7_3, 0x1a40),
INTC_VECT(IIC8_0, 0x1a60), INTC_VECT(IIC8_1, 0x1a80),
INTC_VECT(IIC8_2, 0x1aa0), INTC_VECT(IIC8_3, 0x1b40),
INTC_VECT(IIC9_0, 0x1b60), INTC_VECT(IIC9_1, 0x1b80),
INTC_VECT(IIC9_2, 0x1c00), INTC_VECT(IIC9_3, 0x1c20),
- INTC_VECT(PCIINTA, 0x1ce0),
- INTC_VECT(PCIE, 0x1e00),
- INTC_VECT(SGPIO, 0x1f80),
- INTC_VECT(SGPIO, 0x1fa0),
+ INTC_VECT(MMC1, 0x1c60), INTC_VECT(MMC2, 0x1c80),
+ INTC_VECT(ECCU, 0x1cc0),
+ INTC_VECT(PCIC, 0x1ce0),
+ INTC_VECT(G200, 0x1d00),
+ INTC_VECT(RSPI, 0x1d80), INTC_VECT(RSPI, 0x1da0),
+ INTC_VECT(RSPI, 0x1dc0), INTC_VECT(RSPI, 0x1de0),
+ INTC_VECT(PECI3, 0x1ec0), INTC_VECT(PECI4, 0x1ee0),
+ INTC_VECT(PECI5, 0x1f00),
+ INTC_VECT(SGPIO, 0x1f80), INTC_VECT(SGPIO, 0x1fa0),
+ INTC_VECT(SGPIO, 0x1fc0),
+ INTC_VECT(DMINT12, 0x2400), INTC_VECT(DMINT13, 0x2420),
+ INTC_VECT(DMINT14, 0x2440), INTC_VECT(DMINT15, 0x2460),
+ INTC_VECT(DMINT16, 0x2480), INTC_VECT(DMINT17, 0x24e0),
+ INTC_VECT(DMINT18, 0x2500), INTC_VECT(DMINT19, 0x2520),
+ INTC_VECT(DMINT20, 0x2540), INTC_VECT(DMINT21, 0x2560),
+ INTC_VECT(DMINT22, 0x2580), INTC_VECT(DMINT23, 0x2600),
+ INTC_VECT(DDRECC, 0x2620),
+ INTC_VECT(TSIP, 0x2640),
+ INTC_VECT(PCIE_BRIDGE, 0x27c0),
+ INTC_VECT(WDT0B, 0x2800), INTC_VECT(WDT1B, 0x2820),
+ INTC_VECT(WDT2B, 0x2840), INTC_VECT(WDT3B, 0x2860),
+ INTC_VECT(WDT4B, 0x2880), INTC_VECT(WDT5B, 0x28a0),
+ INTC_VECT(WDT6B, 0x28c0), INTC_VECT(WDT7B, 0x28e0),
+ INTC_VECT(WDT8B, 0x2900),
+ INTC_VECT(GETHER0, 0x2960), INTC_VECT(GETHER1, 0x2980),
+ INTC_VECT(GETHER2, 0x29a0),
+ INTC_VECT(PBIA, 0x2a00), INTC_VECT(PBIB, 0x2a20),
+ INTC_VECT(PBIC, 0x2a40),
+ INTC_VECT(DMAE2, 0x2a60), INTC_VECT(DMAE3, 0x2a80),
+ INTC_VECT(SERMUX2, 0x2aa0), INTC_VECT(SERMUX3, 0x2b40),
+ INTC_VECT(LPC5, 0x2b60), INTC_VECT(LPC6, 0x2b80),
+ INTC_VECT(LPC7, 0x2c00), INTC_VECT(LPC8, 0x2c20),
};
static struct intc_group groups[] __initdata = {
@@ -312,31 +338,45 @@ static struct intc_mask_reg mask_registers[] __initdata = {
{ 0xffd40038, 0xffd4003c, 32, /* INT2MSKR / INT2MSKCR */
{ 0, 0, 0, 0, 0, 0, 0, 0,
- 0, DMAC1_8, 0, PECI0I, LPC, FRT, WDT1, TMR45,
- TMR23, TMR01, 0, 0, 0, 0, 0, DMAC0,
- HUDI, 0, WDT0, SCIF3, SCIF2, SDHI, TMU345, TMU012
+ 0, DMAC8_11, 0, PECI0, LPC, FRT, 0, TMR45,
+ TMR23, TMR01, 0, 0, 0, 0, 0, DMAC0_5,
+ HUDI, 0, 0, SCIF3, SCIF2, SDHI, TMU345, TMU012
} },
{ 0xffd400d0, 0xffd400d4, 32, /* INT2MSKR1 / INT2MSKCR1 */
{ IRQ15, IRQ14, IRQ13, IRQ12, IRQ11, IRQ10, SCIF4, ETHERC,
IRQ9, IRQ8, SCIF1, SCIF0, USB0, 0, 0, USB1,
- ADC1, 0, DMAC1_6, ADC0, SPI0, SIM, PECI2I, PECI1I,
+ ADC1, 0, DMAC6_7, ADC0, SPI0, SIM, PECI2, PECI1,
ARC4, 0, SPI1, JMC, 0, 0, 0, DVC
} },
{ 0xffd10038, 0xffd1003c, 32, /* INT2MSKR2 / INT2MSKCR2 */
- { IIC4_1, IIC4_2, IIC5_0, 0, 0, 0, SGPIO, 0,
- 0, 0, 0, IIC9_2, IIC8_2, IIC8_1, IIC8_0, IIC7_3,
+ { IIC4_1, IIC4_2, IIC5_0, ONFICTL, 0, 0, SGPIO, 0,
+ 0, G200, 0, IIC9_2, IIC8_2, IIC8_1, IIC8_0, IIC7_3,
IIC7_2, IIC7_1, IIC6_3, IIC0_0, IIC0_1, IIC0_2, IIC0_3, IIC3_1,
- IIC2_3, 0, IIC2_1, IIC9_1, IIC3_3, IIC1_0, PCIE, IIC2_2
+ IIC2_3, 0, IIC2_1, IIC9_1, IIC3_3, IIC1_0, 0, IIC2_2
} },
- { 0xffd100d0, 0xff1400d4, 32, /* INT2MSKR3 / INT2MSKCR4 */
- { 0, IIC6_1, IIC6_0, IIC5_1, IIC3_2, IIC2_0, 0, 0,
+ { 0xffd100d0, 0xffd100d4, 32, /* INT2MSKR3 / INT2MSKCR3 */
+ { MMC1, IIC6_1, IIC6_0, IIC5_1, IIC3_2, IIC2_0, PECI5, MMC2,
IIC1_3, IIC1_2, IIC9_0, IIC8_3, IIC4_3, IIC7_0, 0, IIC6_2,
- PCIINTA, 0, IIC4_0, 0, 0, 0, 0, IIC9_3,
+ PCIC, 0, IIC4_0, 0, ECCU, RSPI, 0, IIC9_3,
IIC3_0, 0, IIC5_3, IIC5_2, 0, 0, 0, IIC1_1
} },
+
+ { 0xffd20038, 0xffd2003c, 32, /* INT2MSKR4 / INT2MSKCR4 */
+ { WDT0B, WDT1B, WDT3B, GETHER0, 0, 0, 0, 0,
+ 0, 0, 0, LPC7, SERMUX2, DMAE3, DMAE2, PBIC,
+ PBIB, PBIA, GETHER1, DMINT12, DMINT13, DMINT14, DMINT15, TSIP,
+ DMINT23, 0, DMINT21, LPC6, 0, DMINT16, 0, DMINT22
+ } },
+
+ { 0xffd200d0, 0xffd200d4, 32, /* INT2MSKR5 / INT2MSKCR5 */
+ { 0, WDT8B, WDT7B, WDT4B, 0, DMINT20, 0, 0,
+ DMINT19, DMINT18, LPC5, SERMUX3, WDT2B, GETHER2, 0, 0,
+ 0, 0, PCIE_BRIDGE, 0, 0, 0, 0, LPC8,
+ DDRECC, 0, WDT6B, WDT5B, 0, 0, 0, DMINT17
+ } },
};
#define INTPRI 0xffd00010
@@ -372,6 +412,22 @@ static struct intc_mask_reg mask_registers[] __initdata = {
#define INT2PRI29 0xffd100b4
#define INT2PRI30 0xffd100b8
#define INT2PRI31 0xffd100bc
+#define INT2PRI32 0xffd20000
+#define INT2PRI33 0xffd20004
+#define INT2PRI34 0xffd20008
+#define INT2PRI35 0xffd2000c
+#define INT2PRI36 0xffd20010
+#define INT2PRI37 0xffd20014
+#define INT2PRI38 0xffd20018
+#define INT2PRI39 0xffd2001c
+#define INT2PRI40 0xffd200a0
+#define INT2PRI41 0xffd200a4
+#define INT2PRI42 0xffd200a8
+#define INT2PRI43 0xffd200ac
+#define INT2PRI44 0xffd200b0
+#define INT2PRI45 0xffd200b4
+#define INT2PRI46 0xffd200b8
+#define INT2PRI47 0xffd200bc
static struct intc_prio_reg prio_registers[] __initdata = {
{ INTPRI, 0, 32, 4, { IRQ0, IRQ1, IRQ2, IRQ3,
@@ -379,39 +435,61 @@ static struct intc_prio_reg prio_registers[] __initdata = {
{ INT2PRI0, 0, 32, 8, { TMU0, TMU1, TMU2, TMU2_TICPI } },
{ INT2PRI1, 0, 32, 8, { TMU3, TMU4, TMU5, SDHI } },
- { INT2PRI2, 0, 32, 8, { SCIF2, SCIF3, WDT0, IRQ8 } },
- { INT2PRI3, 0, 32, 8, { HUDI, DMAC0, ADC0, IRQ9 } },
+ { INT2PRI2, 0, 32, 8, { SCIF2, SCIF3, 0, IRQ8 } },
+ { INT2PRI3, 0, 32, 8, { HUDI, DMAC0_5, ADC0, IRQ9 } },
{ INT2PRI4, 0, 32, 8, { IRQ10, 0, TMR01, TMR23 } },
- { INT2PRI5, 0, 32, 8, { TMR45, WDT1, FRT, LPC } },
- { INT2PRI6, 0, 32, 8, { PECI0I, ETHERC, DMAC1_8, 0 } },
+ { INT2PRI5, 0, 32, 8, { TMR45, 0, FRT, LPC } },
+ { INT2PRI6, 0, 32, 8, { PECI0, ETHERC, DMAC8_11, 0 } },
{ INT2PRI7, 0, 32, 8, { SCIF4, 0, IRQ11, IRQ12 } },
{ INT2PRI8, 0, 32, 8, { 0, 0, 0, DVC } },
{ INT2PRI9, 0, 32, 8, { ARC4, 0, SPI1, JMC } },
- { INT2PRI10, 0, 32, 8, { SPI0, SIM, PECI2I, PECI1I } },
- { INT2PRI11, 0, 32, 8, { ADC1, IRQ13, DMAC1_6, IRQ14 } },
+ { INT2PRI10, 0, 32, 8, { SPI0, SIM, PECI2, PECI1 } },
+ { INT2PRI11, 0, 32, 8, { ADC1, IRQ13, DMAC6_7, IRQ14 } },
{ INT2PRI12, 0, 32, 8, { USB0, 0, IRQ15, USB1 } },
{ INT2PRI13, 0, 32, 8, { 0, 0, SCIF1, SCIF0 } },
{ INT2PRI16, 0, 32, 8, { IIC2_2, 0, 0, 0 } },
- { INT2PRI17, 0, 32, 8, { PCIE, 0, 0, IIC1_0 } },
+ { INT2PRI17, 0, 32, 8, { 0, 0, 0, IIC1_0 } },
{ INT2PRI18, 0, 32, 8, { IIC3_3, IIC9_1, IIC2_1, IIC1_2 } },
{ INT2PRI19, 0, 32, 8, { IIC2_3, IIC3_1, 0, IIC1_3 } },
{ INT2PRI20, 0, 32, 8, { IIC2_0, IIC6_3, IIC7_1, IIC7_2 } },
{ INT2PRI21, 0, 32, 8, { IIC7_3, IIC8_0, IIC8_1, IIC8_2 } },
- { INT2PRI22, 0, 32, 8, { IIC9_2, 0, 0, 0 } },
- { INT2PRI23, 0, 32, 8, { 0, SGPIO, IIC3_2, IIC5_1 } },
- { INT2PRI24, 0, 32, 8, { 0, 0, 0, IIC1_1 } },
+ { INT2PRI22, 0, 32, 8, { IIC9_2, MMC2, G200, 0 } },
+ { INT2PRI23, 0, 32, 8, { PECI5, SGPIO, IIC3_2, IIC5_1 } },
+ { INT2PRI24, 0, 32, 8, { PECI4, PECI3, 0, IIC1_1 } },
{ INT2PRI25, 0, 32, 8, { IIC3_0, 0, IIC5_3, IIC5_2 } },
- { INT2PRI26, 0, 32, 8, { 0, 0, 0, IIC9_3 } },
- { INT2PRI27, 0, 32, 8, { PCIINTA, IIC6_0, IIC4_0, IIC6_1 } },
- { INT2PRI28, 0, 32, 8, { IIC4_3, IIC7_0, 0, IIC6_2 } },
+ { INT2PRI26, 0, 32, 8, { ECCU, RSPI, 0, IIC9_3 } },
+ { INT2PRI27, 0, 32, 8, { PCIC, IIC6_0, IIC4_0, IIC6_1 } },
+ { INT2PRI28, 0, 32, 8, { IIC4_3, IIC7_0, MMC1, IIC6_2 } },
{ INT2PRI29, 0, 32, 8, { 0, 0, IIC9_0, IIC8_3 } },
- { INT2PRI30, 0, 32, 8, { IIC4_1, IIC4_2, IIC5_0, 0 } },
+ { INT2PRI30, 0, 32, 8, { IIC4_1, IIC4_2, IIC5_0, ONFICTL } },
{ INT2PRI31, 0, 32, 8, { IIC0_0, IIC0_1, IIC0_2, IIC0_3 } },
+ { INT2PRI32, 0, 32, 8, { DMINT22, 0, 0, 0 } },
+ { INT2PRI33, 0, 32, 8, { 0, 0, 0, DMINT16 } },
+ { INT2PRI34, 0, 32, 8, { 0, LPC6, DMINT21, DMINT18 } },
+ { INT2PRI35, 0, 32, 8, { DMINT23, TSIP, 0, DMINT19 } },
+ { INT2PRI36, 0, 32, 8, { DMINT20, GETHER1, PBIA, PBIB } },
+ { INT2PRI37, 0, 32, 8, { PBIC, DMAE2, DMAE3, SERMUX2 } },
+ { INT2PRI38, 0, 32, 8, { LPC7, 0, 0, 0 } },
+ { INT2PRI39, 0, 32, 8, { 0, 0, 0, WDT4B } },
+ { INT2PRI40, 0, 32, 8, { 0, 0, 0, DMINT17 } },
+ { INT2PRI41, 0, 32, 8, { DDRECC, 0, WDT6B, WDT5B } },
+ { INT2PRI42, 0, 32, 8, { 0, 0, 0, LPC8 } },
+ { INT2PRI43, 0, 32, 8, { 0, WDT7B, PCIE_BRIDGE, WDT8B } },
+ { INT2PRI44, 0, 32, 8, { WDT2B, GETHER2, 0, 0 } },
+ { INT2PRI45, 0, 32, 8, { 0, 0, LPC5, SERMUX3 } },
+ { INT2PRI46, 0, 32, 8, { WDT0B, WDT1B, WDT3B, GETHER0 } },
+ { INT2PRI47, 0, 32, 8, { DMINT12, DMINT13, DMINT14, DMINT15 } },
+};
+
+static struct intc_sense_reg sense_registers_irq8to15[] __initdata = {
+ { 0xffd100f8, 32, 2, /* ICR2 */ { IRQ15, IRQ14, IRQ13, IRQ12,
+ IRQ11, IRQ10, IRQ9, IRQ8 } },
};
static DECLARE_INTC_DESC(intc_desc, "sh7757", vectors, groups,
- mask_registers, prio_registers, NULL);
+ mask_registers, prio_registers,
+ sense_registers_irq8to15);
/* Support for external interrupt pins in IRQ mode */
static struct intc_vect vectors_irq0123[] __initdata = {
diff --git a/arch/sh/kernel/cpu/sh4a/setup-sh7786.c b/arch/sh/kernel/cpu/sh4a/setup-sh7786.c
index 8797723231ea..c016c0004714 100644
--- a/arch/sh/kernel/cpu/sh4a/setup-sh7786.c
+++ b/arch/sh/kernel/cpu/sh4a/setup-sh7786.c
@@ -629,33 +629,10 @@ static void __init sh7786_usb_setup(void)
}
}
-static int __init sh7786_devices_setup(void)
-{
- int ret;
-
- sh7786_usb_setup();
-
- ret = platform_add_devices(sh7786_early_devices,
- ARRAY_SIZE(sh7786_early_devices));
- if (unlikely(ret != 0))
- return ret;
-
- return platform_add_devices(sh7786_devices,
- ARRAY_SIZE(sh7786_devices));
-}
-arch_initcall(sh7786_devices_setup);
-
-void __init plat_early_device_setup(void)
-{
- early_platform_add_devices(sh7786_early_devices,
- ARRAY_SIZE(sh7786_early_devices));
-}
-
enum {
UNUSED = 0,
/* interrupt sources */
-
IRL0_LLLL, IRL0_LLLH, IRL0_LLHL, IRL0_LLHH,
IRL0_LHLL, IRL0_LHLH, IRL0_LHHL, IRL0_LHHH,
IRL0_HLLL, IRL0_HLLH, IRL0_HLHL, IRL0_HLHH,
@@ -693,9 +670,12 @@ enum {
Thermal,
INTICI0, INTICI1, INTICI2, INTICI3,
INTICI4, INTICI5, INTICI6, INTICI7,
+
+ /* Muxed sub-events */
+ TXI1, BRI1, RXI1, ERI1,
};
-static struct intc_vect vectors[] __initdata = {
+static struct intc_vect sh7786_vectors[] __initdata = {
INTC_VECT(WDT, 0x3e0),
INTC_VECT(TMU0_0, 0x400), INTC_VECT(TMU0_1, 0x420),
INTC_VECT(TMU0_2, 0x440), INTC_VECT(TMU0_3, 0x460),
@@ -756,14 +736,12 @@ static struct intc_vect vectors[] __initdata = {
#define INTDISTCR0 0xfe4100b0
#define INTDISTCR1 0xfe4100b4
-#define INTACK 0xfe4100b8
-#define INTACKCLR 0xfe4100bc
#define INT2DISTCR0 0xfe410900
#define INT2DISTCR1 0xfe410904
#define INT2DISTCR2 0xfe410908
#define INT2DISTCR3 0xfe41090c
-static struct intc_mask_reg mask_registers[] __initdata = {
+static struct intc_mask_reg sh7786_mask_registers[] __initdata = {
{ CnINTMSK0, CnINTMSKCLR0, 32,
{ IRQ0, IRQ1, IRQ2, IRQ3, IRQ4, IRQ5, IRQ6, IRQ7 },
INTC_SMP_BALANCING(INTDISTCR0) },
@@ -807,7 +785,7 @@ static struct intc_mask_reg mask_registers[] __initdata = {
0, 0, 0, 0, 0, 0, 0, 0 }, INTC_SMP_BALANCING(INT2DISTCR3) },
};
-static struct intc_prio_reg prio_registers[] __initdata = {
+static struct intc_prio_reg sh7786_prio_registers[] __initdata = {
{ 0xfe410010, 0, 32, 4, /* INTPRI */ { IRQ0, IRQ1, IRQ2, IRQ3,
IRQ4, IRQ5, IRQ6, IRQ7 } },
{ 0xfe410800, 0, 32, 8, /* INT2PRI0 */ { 0, 0, 0, WDT } },
@@ -851,11 +829,27 @@ static struct intc_prio_reg prio_registers[] __initdata = {
INTICI3, INTICI2, INTICI1, INTICI0 }, INTC_SMP(4, 2) },
};
-static DECLARE_INTC_DESC(intc_desc, "sh7786", vectors, NULL,
- mask_registers, prio_registers, NULL);
+static struct intc_subgroup sh7786_subgroups[] __initdata = {
+ { 0xfe410c20, 32, SCIF1,
+ { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, TXI1, BRI1, RXI1, ERI1 } },
+};
-/* Support for external interrupt pins in IRQ mode */
+static struct intc_desc sh7786_intc_desc __initdata = {
+ .name = "sh7786",
+ .hw = {
+ .vectors = sh7786_vectors,
+ .nr_vectors = ARRAY_SIZE(sh7786_vectors),
+ .mask_regs = sh7786_mask_registers,
+ .nr_mask_regs = ARRAY_SIZE(sh7786_mask_registers),
+ .subgroups = sh7786_subgroups,
+ .nr_subgroups = ARRAY_SIZE(sh7786_subgroups),
+ .prio_regs = sh7786_prio_registers,
+ .nr_prio_regs = ARRAY_SIZE(sh7786_prio_registers),
+ },
+};
+/* Support for external interrupt pins in IRQ mode */
static struct intc_vect vectors_irq0123[] __initdata = {
INTC_VECT(IRQ0, 0x200), INTC_VECT(IRQ1, 0x240),
INTC_VECT(IRQ2, 0x280), INTC_VECT(IRQ3, 0x2c0),
@@ -866,23 +860,25 @@ static struct intc_vect vectors_irq4567[] __initdata = {
INTC_VECT(IRQ6, 0x380), INTC_VECT(IRQ7, 0x3c0),
};
-static struct intc_sense_reg sense_registers[] __initdata = {
+static struct intc_sense_reg sh7786_sense_registers[] __initdata = {
{ 0xfe41001c, 32, 2, /* ICR1 */ { IRQ0, IRQ1, IRQ2, IRQ3,
IRQ4, IRQ5, IRQ6, IRQ7 } },
};
-static struct intc_mask_reg ack_registers[] __initdata = {
+static struct intc_mask_reg sh7786_ack_registers[] __initdata = {
{ 0xfe410024, 0, 32, /* INTREQ */
{ IRQ0, IRQ1, IRQ2, IRQ3, IRQ4, IRQ5, IRQ6, IRQ7 } },
};
static DECLARE_INTC_DESC_ACK(intc_desc_irq0123, "sh7786-irq0123",
- vectors_irq0123, NULL, mask_registers,
- prio_registers, sense_registers, ack_registers);
+ vectors_irq0123, NULL, sh7786_mask_registers,
+ sh7786_prio_registers, sh7786_sense_registers,
+ sh7786_ack_registers);
static DECLARE_INTC_DESC_ACK(intc_desc_irq4567, "sh7786-irq4567",
- vectors_irq4567, NULL, mask_registers,
- prio_registers, sense_registers, ack_registers);
+ vectors_irq4567, NULL, sh7786_mask_registers,
+ sh7786_prio_registers, sh7786_sense_registers,
+ sh7786_ack_registers);
/* External interrupt pins in IRL mode */
@@ -909,10 +905,10 @@ static struct intc_vect vectors_irl4567[] __initdata = {
};
static DECLARE_INTC_DESC(intc_desc_irl0123, "sh7786-irl0123", vectors_irl0123,
- NULL, mask_registers, NULL, NULL);
+ NULL, sh7786_mask_registers, NULL, NULL);
static DECLARE_INTC_DESC(intc_desc_irl4567, "sh7786-irl4567", vectors_irl4567,
- NULL, mask_registers, NULL, NULL);
+ NULL, sh7786_mask_registers, NULL, NULL);
#define INTC_ICR0 0xfe410000
#define INTC_INTMSK0 CnINTMSK0
@@ -920,19 +916,6 @@ static DECLARE_INTC_DESC(intc_desc_irl4567, "sh7786-irl4567", vectors_irl4567,
#define INTC_INTMSK2 INTMSK2
#define INTC_INTMSKCLR1 CnINTMSKCLR1
#define INTC_INTMSKCLR2 INTMSKCLR2
-#define INTC_USERIMASK 0xfe411000
-
-#ifdef CONFIG_INTC_BALANCING
-unsigned int irq_lookup(unsigned int irq)
-{
- return __raw_readl(INTACK) & 1 ? irq : NO_IRQ_IGNORE;
-}
-
-void irq_finish(unsigned int irq)
-{
- __raw_writel(irq2evt(irq), INTACKCLR);
-}
-#endif
void __init plat_irq_setup(void)
{
@@ -946,8 +929,7 @@ void __init plat_irq_setup(void)
/* select IRL mode for IRL3-0 + IRL7-4 */
__raw_writel(__raw_readl(INTC_ICR0) & ~0x00c00000, INTC_ICR0);
- register_intc_controller(&intc_desc);
- register_intc_userimask(INTC_USERIMASK);
+ register_intc_controller(&sh7786_intc_desc);
}
void __init plat_irq_setup_pins(int mode)
@@ -991,3 +973,39 @@ void __init plat_irq_setup_pins(int mode)
void __init plat_mem_setup(void)
{
}
+
+static int __init sh7786_devices_setup(void)
+{
+ int ret, irq;
+
+ sh7786_usb_setup();
+
+ /*
+ * De-mux SCIF1 IRQs if possible
+ */
+ irq = intc_irq_lookup(sh7786_intc_desc.name, TXI1);
+ if (irq > 0) {
+ scif1_platform_data.irqs[SCIx_TXI_IRQ] = irq;
+ scif1_platform_data.irqs[SCIx_ERI_IRQ] =
+ intc_irq_lookup(sh7786_intc_desc.name, ERI1);
+ scif1_platform_data.irqs[SCIx_BRI_IRQ] =
+ intc_irq_lookup(sh7786_intc_desc.name, BRI1);
+ scif1_platform_data.irqs[SCIx_RXI_IRQ] =
+ intc_irq_lookup(sh7786_intc_desc.name, RXI1);
+ }
+
+ ret = platform_add_devices(sh7786_early_devices,
+ ARRAY_SIZE(sh7786_early_devices));
+ if (unlikely(ret != 0))
+ return ret;
+
+ return platform_add_devices(sh7786_devices,
+ ARRAY_SIZE(sh7786_devices));
+}
+arch_initcall(sh7786_devices_setup);
+
+void __init plat_early_device_setup(void)
+{
+ early_platform_add_devices(sh7786_early_devices,
+ ARRAY_SIZE(sh7786_early_devices));
+}
diff --git a/arch/sh/kernel/cpu/sh4a/setup-shx3.c b/arch/sh/kernel/cpu/sh4a/setup-shx3.c
index 9158bc5ea38b..013f0b144489 100644
--- a/arch/sh/kernel/cpu/sh4a/setup-shx3.c
+++ b/arch/sh/kernel/cpu/sh4a/setup-shx3.c
@@ -1,7 +1,7 @@
/*
* SH-X3 Prototype Setup
*
- * Copyright (C) 2007 - 2009 Paul Mundt
+ * Copyright (C) 2007 - 2010 Paul Mundt
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
@@ -12,7 +12,9 @@
#include <linux/serial.h>
#include <linux/serial_sci.h>
#include <linux/io.h>
+#include <linux/gpio.h>
#include <linux/sh_timer.h>
+#include <cpu/shx3.h>
#include <asm/mmzone.h>
/*
@@ -354,6 +356,10 @@ static struct intc_group groups[] __initdata = {
DMAC1_DMINT9, DMAC1_DMINT10, DMAC1_DMINT11),
};
+#define INT2DISTCR0 0xfe4108a0
+#define INT2DISTCR1 0xfe4108a4
+#define INT2DISTCR2 0xfe4108a8
+
static struct intc_mask_reg mask_registers[] __initdata = {
{ 0xfe410030, 0xfe410050, 32, /* CnINTMSK0 / CnINTMSKCLR0 */
{ IRQ0, IRQ1, IRQ2, IRQ3 } },
@@ -363,20 +369,23 @@ static struct intc_mask_reg mask_registers[] __initdata = {
{ FE1, FE0, 0, ATAPI, VCORE0, VIN1, VIN0, IIC,
DU, GPIO3, GPIO2, GPIO1, GPIO0, PAM, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, /* HUDI bits ignored */
- 0, TMU5, TMU4, TMU3, TMU2, TMU1, TMU0, 0, } },
+ 0, TMU5, TMU4, TMU3, TMU2, TMU1, TMU0, 0, },
+ INTC_SMP_BALANCING(INT2DISTCR0) },
{ 0xfe410830, 0xfe410860, 32, /* CnINT2MSK1 / CnINT2MSKCLR1 */
{ 0, 0, 0, 0, DTU3, DTU2, DTU1, DTU0, /* IRM bits ignored */
PCII9, PCII8, PCII7, PCII6, PCII5, PCII4, PCII3, PCII2,
PCII1, PCII0, DMAC1_DMAE, DMAC1_DMINT11,
DMAC1_DMINT10, DMAC1_DMINT9, DMAC1_DMINT8, DMAC1_DMINT7,
DMAC1_DMINT6, DMAC0_DMAE, DMAC0_DMINT5, DMAC0_DMINT4,
- DMAC0_DMINT3, DMAC0_DMINT2, DMAC0_DMINT1, DMAC0_DMINT0 } },
+ DMAC0_DMINT3, DMAC0_DMINT2, DMAC0_DMINT1, DMAC0_DMINT0 },
+ INTC_SMP_BALANCING(INT2DISTCR1) },
{ 0xfe410840, 0xfe410870, 32, /* CnINT2MSK2 / CnINT2MSKCLR2 */
{ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
SCIF3_TXI, SCIF3_BRI, SCIF3_RXI, SCIF3_ERI,
SCIF2_TXI, SCIF2_BRI, SCIF2_RXI, SCIF2_ERI,
SCIF1_TXI, SCIF1_BRI, SCIF1_RXI, SCIF1_ERI,
- SCIF0_TXI, SCIF0_BRI, SCIF0_RXI, SCIF0_ERI } },
+ SCIF0_TXI, SCIF0_BRI, SCIF0_RXI, SCIF0_ERI },
+ INTC_SMP_BALANCING(INT2DISTCR2) },
};
static struct intc_prio_reg prio_registers[] __initdata = {
@@ -433,11 +442,33 @@ static DECLARE_INTC_DESC(intc_desc_irl, "shx3-irl", vectors_irl, groups,
void __init plat_irq_setup_pins(int mode)
{
+ int ret = 0;
+
switch (mode) {
case IRQ_MODE_IRQ:
+ ret |= gpio_request(GPIO_FN_IRQ3, intc_desc_irq.name);
+ ret |= gpio_request(GPIO_FN_IRQ2, intc_desc_irq.name);
+ ret |= gpio_request(GPIO_FN_IRQ1, intc_desc_irq.name);
+ ret |= gpio_request(GPIO_FN_IRQ0, intc_desc_irq.name);
+
+ if (unlikely(ret)) {
+ pr_err("Failed to set IRQ mode\n");
+ return;
+ }
+
register_intc_controller(&intc_desc_irq);
break;
case IRQ_MODE_IRL3210:
+ ret |= gpio_request(GPIO_FN_IRL3, intc_desc_irl.name);
+ ret |= gpio_request(GPIO_FN_IRL2, intc_desc_irl.name);
+ ret |= gpio_request(GPIO_FN_IRL1, intc_desc_irl.name);
+ ret |= gpio_request(GPIO_FN_IRL0, intc_desc_irl.name);
+
+ if (unlikely(ret)) {
+ pr_err("Failed to set IRL mode\n");
+ return;
+ }
+
register_intc_controller(&intc_desc_irl);
break;
default:
@@ -447,6 +478,9 @@ void __init plat_irq_setup_pins(int mode)
void __init plat_irq_setup(void)
{
+ reserve_intc_vectors(vectors_irq, ARRAY_SIZE(vectors_irq));
+ reserve_intc_vectors(vectors_irl, ARRAY_SIZE(vectors_irl));
+
register_intc_controller(&intc_desc);
}
diff --git a/arch/sh/kernel/head_32.S b/arch/sh/kernel/head_32.S
index 6e35f012cc03..7db248936b60 100644
--- a/arch/sh/kernel/head_32.S
+++ b/arch/sh/kernel/head_32.S
@@ -330,7 +330,7 @@ ENTRY(_stext)
#if defined(CONFIG_CPU_SH2)
1: .long 0x000000F0 ! IMASK=0xF
#else
-1: .long 0x400080F0 ! MD=1, RB=0, BL=0, FD=1, IMASK=0xF
+1: .long 0x500080F0 ! MD=1, RB=0, BL=1, FD=1, IMASK=0xF
#endif
ENTRY(stack_start)
2: .long init_thread_union+THREAD_SIZE
diff --git a/arch/sh/kernel/io_trapped.c b/arch/sh/kernel/io_trapped.c
index 2947d2bd1291..32c385ef1011 100644
--- a/arch/sh/kernel/io_trapped.c
+++ b/arch/sh/kernel/io_trapped.c
@@ -291,7 +291,7 @@ int handle_trapped_io(struct pt_regs *regs, unsigned long address)
}
tmp = handle_unaligned_access(instruction, regs,
- &trapped_io_access, 1);
+ &trapped_io_access, 1, address);
set_fs(oldfs);
return tmp == 0;
}
diff --git a/arch/sh/kernel/irq.c b/arch/sh/kernel/irq.c
index ae5bac39b896..68ecbe6c881a 100644
--- a/arch/sh/kernel/irq.c
+++ b/arch/sh/kernel/irq.c
@@ -56,6 +56,8 @@ int show_interrupts(struct seq_file *p, void *v)
int i = *(loff_t *)v, j, prec;
struct irqaction *action;
struct irq_desc *desc;
+ struct irq_data *data;
+ struct irq_chip *chip;
if (i > nr_irqs)
return 0;
@@ -77,6 +79,9 @@ int show_interrupts(struct seq_file *p, void *v)
if (!desc)
return 0;
+ data = irq_get_irq_data(i);
+ chip = irq_data_get_irq_chip(data);
+
raw_spin_lock_irqsave(&desc->lock, flags);
for_each_online_cpu(j)
any_count |= kstat_irqs_cpu(i, j);
@@ -87,7 +92,7 @@ int show_interrupts(struct seq_file *p, void *v)
seq_printf(p, "%*d: ", prec, i);
for_each_online_cpu(j)
seq_printf(p, "%10u ", kstat_irqs_cpu(i, j));
- seq_printf(p, " %14s", desc->chip->name);
+ seq_printf(p, " %14s", chip->name);
seq_printf(p, "-%-8s", desc->name);
if (action) {
@@ -273,16 +278,12 @@ void __init init_IRQ(void)
{
plat_irq_setup();
- /*
- * Pin any of the legacy IRQ vectors that haven't already been
- * grabbed by the platform
- */
- reserve_irq_legacy();
-
/* Perform the machine specific initialisation */
if (sh_mv.mv_init_irq)
sh_mv.mv_init_irq();
+ intc_finalize();
+
irq_ctx_init(smp_processor_id());
}
@@ -295,13 +296,16 @@ int __init arch_probe_nr_irqs(void)
#endif
#ifdef CONFIG_HOTPLUG_CPU
-static void route_irq(struct irq_desc *desc, unsigned int irq, unsigned int cpu)
+static void route_irq(struct irq_data *data, unsigned int irq, unsigned int cpu)
{
+ struct irq_desc *desc = irq_to_desc(irq);
+ struct irq_chip *chip = irq_data_get_irq_chip(data);
+
printk(KERN_INFO "IRQ%u: moving from cpu%u to cpu%u\n",
- irq, desc->node, cpu);
+ irq, data->node, cpu);
raw_spin_lock_irq(&desc->lock);
- desc->chip->set_affinity(irq, cpumask_of(cpu));
+ chip->irq_set_affinity(data, cpumask_of(cpu), false);
raw_spin_unlock_irq(&desc->lock);
}
@@ -312,24 +316,25 @@ static void route_irq(struct irq_desc *desc, unsigned int irq, unsigned int cpu)
*/
void migrate_irqs(void)
{
- struct irq_desc *desc;
unsigned int irq, cpu = smp_processor_id();
- for_each_irq_desc(irq, desc) {
- if (desc->node == cpu) {
- unsigned int newcpu = cpumask_any_and(desc->affinity,
+ for_each_active_irq(irq) {
+ struct irq_data *data = irq_get_irq_data(irq);
+
+ if (data->node == cpu) {
+ unsigned int newcpu = cpumask_any_and(data->affinity,
cpu_online_mask);
if (newcpu >= nr_cpu_ids) {
if (printk_ratelimit())
printk(KERN_INFO "IRQ%u no longer affine to CPU%u\n",
irq, cpu);
- cpumask_setall(desc->affinity);
- newcpu = cpumask_any_and(desc->affinity,
+ cpumask_setall(data->affinity);
+ newcpu = cpumask_any_and(data->affinity,
cpu_online_mask);
}
- route_irq(desc, irq, newcpu);
+ route_irq(data, irq, newcpu);
}
}
}
diff --git a/arch/sh/kernel/irq_64.c b/arch/sh/kernel/irq_64.c
index 32365ba0e039..8fc05b997b6d 100644
--- a/arch/sh/kernel/irq_64.c
+++ b/arch/sh/kernel/irq_64.c
@@ -11,17 +11,17 @@
#include <linux/module.h>
#include <cpu/registers.h>
-void notrace raw_local_irq_restore(unsigned long flags)
+void notrace arch_local_irq_restore(unsigned long flags)
{
unsigned long long __dummy;
- if (flags == RAW_IRQ_DISABLED) {
+ if (flags == ARCH_IRQ_DISABLED) {
__asm__ __volatile__ (
"getcon " __SR ", %0\n\t"
"or %0, %1, %0\n\t"
"putcon %0, " __SR "\n\t"
: "=&r" (__dummy)
- : "r" (RAW_IRQ_DISABLED)
+ : "r" (ARCH_IRQ_DISABLED)
);
} else {
__asm__ __volatile__ (
@@ -29,13 +29,13 @@ void notrace raw_local_irq_restore(unsigned long flags)
"and %0, %1, %0\n\t"
"putcon %0, " __SR "\n\t"
: "=&r" (__dummy)
- : "r" (~RAW_IRQ_DISABLED)
+ : "r" (~ARCH_IRQ_DISABLED)
);
}
}
-EXPORT_SYMBOL(raw_local_irq_restore);
+EXPORT_SYMBOL(arch_local_irq_restore);
-unsigned long notrace __raw_local_save_flags(void)
+unsigned long notrace arch_local_save_flags(void)
{
unsigned long flags;
@@ -43,9 +43,9 @@ unsigned long notrace __raw_local_save_flags(void)
"getcon " __SR ", %0\n\t"
"and %0, %1, %0"
: "=&r" (flags)
- : "r" (RAW_IRQ_DISABLED)
+ : "r" (ARCH_IRQ_DISABLED)
);
return flags;
}
-EXPORT_SYMBOL(__raw_local_save_flags);
+EXPORT_SYMBOL(arch_local_save_flags);
diff --git a/arch/sh/kernel/kdebugfs.c b/arch/sh/kernel/kdebugfs.c
new file mode 100644
index 000000000000..e11c30bb100c
--- /dev/null
+++ b/arch/sh/kernel/kdebugfs.c
@@ -0,0 +1,16 @@
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/debugfs.h>
+
+struct dentry *arch_debugfs_dir;
+EXPORT_SYMBOL(arch_debugfs_dir);
+
+static int __init arch_kdebugfs_init(void)
+{
+ arch_debugfs_dir = debugfs_create_dir("sh", NULL);
+ if (!arch_debugfs_dir)
+ return -ENOMEM;
+
+ return 0;
+}
+arch_initcall(arch_kdebugfs_init);
diff --git a/arch/sh/kernel/kprobes.c b/arch/sh/kernel/kprobes.c
index 4049d99f76e1..1208b09e95c3 100644
--- a/arch/sh/kernel/kprobes.c
+++ b/arch/sh/kernel/kprobes.c
@@ -20,9 +20,9 @@
DEFINE_PER_CPU(struct kprobe *, current_kprobe) = NULL;
DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk);
-static struct kprobe saved_current_opcode;
-static struct kprobe saved_next_opcode;
-static struct kprobe saved_next_opcode2;
+static DEFINE_PER_CPU(struct kprobe, saved_current_opcode);
+static DEFINE_PER_CPU(struct kprobe, saved_next_opcode);
+static DEFINE_PER_CPU(struct kprobe, saved_next_opcode2);
#define OPCODE_JMP(x) (((x) & 0xF0FF) == 0x402b)
#define OPCODE_JSR(x) (((x) & 0xF0FF) == 0x400b)
@@ -102,16 +102,21 @@ int __kprobes kprobe_handle_illslot(unsigned long pc)
void __kprobes arch_remove_kprobe(struct kprobe *p)
{
- if (saved_next_opcode.addr != 0x0) {
+ struct kprobe *saved = &__get_cpu_var(saved_next_opcode);
+
+ if (saved->addr) {
arch_disarm_kprobe(p);
- arch_disarm_kprobe(&saved_next_opcode);
- saved_next_opcode.addr = 0x0;
- saved_next_opcode.opcode = 0x0;
-
- if (saved_next_opcode2.addr != 0x0) {
- arch_disarm_kprobe(&saved_next_opcode2);
- saved_next_opcode2.addr = 0x0;
- saved_next_opcode2.opcode = 0x0;
+ arch_disarm_kprobe(saved);
+
+ saved->addr = NULL;
+ saved->opcode = 0;
+
+ saved = &__get_cpu_var(saved_next_opcode2);
+ if (saved->addr) {
+ arch_disarm_kprobe(saved);
+
+ saved->addr = NULL;
+ saved->opcode = 0;
}
}
}
@@ -141,57 +146,59 @@ static void __kprobes set_current_kprobe(struct kprobe *p, struct pt_regs *regs,
*/
static void __kprobes prepare_singlestep(struct kprobe *p, struct pt_regs *regs)
{
- kprobe_opcode_t *addr = NULL;
- saved_current_opcode.addr = (kprobe_opcode_t *) (regs->pc);
- addr = saved_current_opcode.addr;
+ __get_cpu_var(saved_current_opcode).addr = (kprobe_opcode_t *)regs->pc;
if (p != NULL) {
+ struct kprobe *op1, *op2;
+
arch_disarm_kprobe(p);
+ op1 = &__get_cpu_var(saved_next_opcode);
+ op2 = &__get_cpu_var(saved_next_opcode2);
+
if (OPCODE_JSR(p->opcode) || OPCODE_JMP(p->opcode)) {
unsigned int reg_nr = ((p->opcode >> 8) & 0x000F);
- saved_next_opcode.addr =
- (kprobe_opcode_t *) regs->regs[reg_nr];
+ op1->addr = (kprobe_opcode_t *) regs->regs[reg_nr];
} else if (OPCODE_BRA(p->opcode) || OPCODE_BSR(p->opcode)) {
unsigned long disp = (p->opcode & 0x0FFF);
- saved_next_opcode.addr =
+ op1->addr =
(kprobe_opcode_t *) (regs->pc + 4 + disp * 2);
} else if (OPCODE_BRAF(p->opcode) || OPCODE_BSRF(p->opcode)) {
unsigned int reg_nr = ((p->opcode >> 8) & 0x000F);
- saved_next_opcode.addr =
+ op1->addr =
(kprobe_opcode_t *) (regs->pc + 4 +
regs->regs[reg_nr]);
} else if (OPCODE_RTS(p->opcode)) {
- saved_next_opcode.addr = (kprobe_opcode_t *) regs->pr;
+ op1->addr = (kprobe_opcode_t *) regs->pr;
} else if (OPCODE_BF(p->opcode) || OPCODE_BT(p->opcode)) {
unsigned long disp = (p->opcode & 0x00FF);
/* case 1 */
- saved_next_opcode.addr = p->addr + 1;
+ op1->addr = p->addr + 1;
/* case 2 */
- saved_next_opcode2.addr =
+ op2->addr =
(kprobe_opcode_t *) (regs->pc + 4 + disp * 2);
- saved_next_opcode2.opcode = *(saved_next_opcode2.addr);
- arch_arm_kprobe(&saved_next_opcode2);
+ op2->opcode = *(op2->addr);
+ arch_arm_kprobe(op2);
} else if (OPCODE_BF_S(p->opcode) || OPCODE_BT_S(p->opcode)) {
unsigned long disp = (p->opcode & 0x00FF);
/* case 1 */
- saved_next_opcode.addr = p->addr + 2;
+ op1->addr = p->addr + 2;
/* case 2 */
- saved_next_opcode2.addr =
+ op2->addr =
(kprobe_opcode_t *) (regs->pc + 4 + disp * 2);
- saved_next_opcode2.opcode = *(saved_next_opcode2.addr);
- arch_arm_kprobe(&saved_next_opcode2);
+ op2->opcode = *(op2->addr);
+ arch_arm_kprobe(op2);
} else {
- saved_next_opcode.addr = p->addr + 1;
+ op1->addr = p->addr + 1;
}
- saved_next_opcode.opcode = *(saved_next_opcode.addr);
- arch_arm_kprobe(&saved_next_opcode);
+ op1->opcode = *(op1->addr);
+ arch_arm_kprobe(op1);
}
}
@@ -376,21 +383,23 @@ static int __kprobes post_kprobe_handler(struct pt_regs *regs)
cur->post_handler(cur, regs, 0);
}
- if (saved_next_opcode.addr != 0x0) {
- arch_disarm_kprobe(&saved_next_opcode);
- saved_next_opcode.addr = 0x0;
- saved_next_opcode.opcode = 0x0;
+ p = &__get_cpu_var(saved_next_opcode);
+ if (p->addr) {
+ arch_disarm_kprobe(p);
+ p->addr = NULL;
+ p->opcode = 0;
- addr = saved_current_opcode.addr;
- saved_current_opcode.addr = 0x0;
+ addr = __get_cpu_var(saved_current_opcode).addr;
+ __get_cpu_var(saved_current_opcode).addr = NULL;
p = get_kprobe(addr);
arch_arm_kprobe(p);
- if (saved_next_opcode2.addr != 0x0) {
- arch_disarm_kprobe(&saved_next_opcode2);
- saved_next_opcode2.addr = 0x0;
- saved_next_opcode2.opcode = 0x0;
+ p = &__get_cpu_var(saved_next_opcode2);
+ if (p->addr) {
+ arch_disarm_kprobe(p);
+ p->addr = NULL;
+ p->opcode = 0;
}
}
@@ -572,14 +581,5 @@ static struct kprobe trampoline_p = {
int __init arch_init_kprobes(void)
{
- saved_next_opcode.addr = 0x0;
- saved_next_opcode.opcode = 0x0;
-
- saved_current_opcode.addr = 0x0;
- saved_current_opcode.opcode = 0x0;
-
- saved_next_opcode2.addr = 0x0;
- saved_next_opcode2.opcode = 0x0;
-
return register_kprobe(&trampoline_p);
}
diff --git a/arch/sh/kernel/ptrace.c b/arch/sh/kernel/ptrace.c
new file mode 100644
index 000000000000..0a05983633ca
--- /dev/null
+++ b/arch/sh/kernel/ptrace.c
@@ -0,0 +1,33 @@
+#include <linux/ptrace.h>
+
+/**
+ * regs_query_register_offset() - query register offset from its name
+ * @name: the name of a register
+ *
+ * regs_query_register_offset() returns the offset of a register in struct
+ * pt_regs from its name. If the name is invalid, this returns -EINVAL;
+ */
+int regs_query_register_offset(const char *name)
+{
+ const struct pt_regs_offset *roff;
+ for (roff = regoffset_table; roff->name != NULL; roff++)
+ if (!strcmp(roff->name, name))
+ return roff->offset;
+ return -EINVAL;
+}
+
+/**
+ * regs_query_register_name() - query register name from its offset
+ * @offset: the offset of a register in struct pt_regs.
+ *
+ * regs_query_register_name() returns the name of a register from its
+ * offset in struct pt_regs. If the @offset is invalid, this returns NULL;
+ */
+const char *regs_query_register_name(unsigned int offset)
+{
+ const struct pt_regs_offset *roff;
+ for (roff = regoffset_table; roff->name != NULL; roff++)
+ if (roff->offset == offset)
+ return roff->name;
+ return NULL;
+}
diff --git a/arch/sh/kernel/ptrace_32.c b/arch/sh/kernel/ptrace_32.c
index 6c4bbba2a675..90a15d29feeb 100644
--- a/arch/sh/kernel/ptrace_32.c
+++ b/arch/sh/kernel/ptrace_32.c
@@ -274,6 +274,33 @@ static int dspregs_active(struct task_struct *target,
}
#endif
+const struct pt_regs_offset regoffset_table[] = {
+ REGS_OFFSET_NAME(0),
+ REGS_OFFSET_NAME(1),
+ REGS_OFFSET_NAME(2),
+ REGS_OFFSET_NAME(3),
+ REGS_OFFSET_NAME(4),
+ REGS_OFFSET_NAME(5),
+ REGS_OFFSET_NAME(6),
+ REGS_OFFSET_NAME(7),
+ REGS_OFFSET_NAME(8),
+ REGS_OFFSET_NAME(9),
+ REGS_OFFSET_NAME(10),
+ REGS_OFFSET_NAME(11),
+ REGS_OFFSET_NAME(12),
+ REGS_OFFSET_NAME(13),
+ REGS_OFFSET_NAME(14),
+ REGS_OFFSET_NAME(15),
+ REG_OFFSET_NAME(pc),
+ REG_OFFSET_NAME(pr),
+ REG_OFFSET_NAME(sr),
+ REG_OFFSET_NAME(gbr),
+ REG_OFFSET_NAME(mach),
+ REG_OFFSET_NAME(macl),
+ REG_OFFSET_NAME(tra),
+ REG_OFFSET_END,
+};
+
/*
* These are our native regset flavours.
*/
@@ -338,9 +365,9 @@ const struct user_regset_view *task_user_regset_view(struct task_struct *task)
return &user_sh_native_view;
}
-long arch_ptrace(struct task_struct *child, long request, long addr, long data)
+long arch_ptrace(struct task_struct *child, long request,
+ unsigned long addr, unsigned long data)
{
- struct user * dummy = NULL;
unsigned long __user *datap = (unsigned long __user *)data;
int ret;
@@ -356,17 +383,20 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
if (addr < sizeof(struct pt_regs))
tmp = get_stack_long(child, addr);
- else if (addr >= (long) &dummy->fpu &&
- addr < (long) &dummy->u_fpvalid) {
+ else if (addr >= offsetof(struct user, fpu) &&
+ addr < offsetof(struct user, u_fpvalid)) {
if (!tsk_used_math(child)) {
- if (addr == (long)&dummy->fpu.fpscr)
+ if (addr == offsetof(struct user, fpu.fpscr))
tmp = FPSCR_INIT;
else
tmp = 0;
- } else
- tmp = ((long *)child->thread.xstate)
- [(addr - (long)&dummy->fpu) >> 2];
- } else if (addr == (long) &dummy->u_fpvalid)
+ } else {
+ unsigned long index;
+ index = addr - offsetof(struct user, fpu);
+ tmp = ((unsigned long *)child->thread.xstate)
+ [index >> 2];
+ }
+ } else if (addr == offsetof(struct user, u_fpvalid))
tmp = !!tsk_used_math(child);
else if (addr == PT_TEXT_ADDR)
tmp = child->mm->start_code;
@@ -390,13 +420,15 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
if (addr < sizeof(struct pt_regs))
ret = put_stack_long(child, addr, data);
- else if (addr >= (long) &dummy->fpu &&
- addr < (long) &dummy->u_fpvalid) {
+ else if (addr >= offsetof(struct user, fpu) &&
+ addr < offsetof(struct user, u_fpvalid)) {
+ unsigned long index;
+ index = addr - offsetof(struct user, fpu);
set_stopped_child_used_math(child);
- ((long *)child->thread.xstate)
- [(addr - (long)&dummy->fpu) >> 2] = data;
+ ((unsigned long *)child->thread.xstate)
+ [index >> 2] = data;
ret = 0;
- } else if (addr == (long) &dummy->u_fpvalid) {
+ } else if (addr == offsetof(struct user, u_fpvalid)) {
conditional_stopped_child_used_math(data, child);
ret = 0;
}
@@ -406,35 +438,35 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
return copy_regset_to_user(child, &user_sh_native_view,
REGSET_GENERAL,
0, sizeof(struct pt_regs),
- (void __user *)data);
+ datap);
case PTRACE_SETREGS:
return copy_regset_from_user(child, &user_sh_native_view,
REGSET_GENERAL,
0, sizeof(struct pt_regs),
- (const void __user *)data);
+ datap);
#ifdef CONFIG_SH_FPU
case PTRACE_GETFPREGS:
return copy_regset_to_user(child, &user_sh_native_view,
REGSET_FPU,
0, sizeof(struct user_fpu_struct),
- (void __user *)data);
+ datap);
case PTRACE_SETFPREGS:
return copy_regset_from_user(child, &user_sh_native_view,
REGSET_FPU,
0, sizeof(struct user_fpu_struct),
- (const void __user *)data);
+ datap);
#endif
#ifdef CONFIG_SH_DSP
case PTRACE_GETDSPREGS:
return copy_regset_to_user(child, &user_sh_native_view,
REGSET_DSP,
0, sizeof(struct pt_dspregs),
- (void __user *)data);
+ datap);
case PTRACE_SETDSPREGS:
return copy_regset_from_user(child, &user_sh_native_view,
REGSET_DSP,
0, sizeof(struct pt_dspregs),
- (const void __user *)data);
+ datap);
#endif
default:
ret = ptrace_request(child, request, addr, data);
diff --git a/arch/sh/kernel/ptrace_64.c b/arch/sh/kernel/ptrace_64.c
index 5fd644da7f02..4436eacddb15 100644
--- a/arch/sh/kernel/ptrace_64.c
+++ b/arch/sh/kernel/ptrace_64.c
@@ -20,7 +20,7 @@
#include <linux/sched.h>
#include <linux/mm.h>
#include <linux/smp.h>
-#include <linux/smp_lock.h>
+#include <linux/bitops.h>
#include <linux/errno.h>
#include <linux/ptrace.h>
#include <linux/user.h>
@@ -252,6 +252,85 @@ static int fpregs_active(struct task_struct *target,
}
#endif
+const struct pt_regs_offset regoffset_table[] = {
+ REG_OFFSET_NAME(pc),
+ REG_OFFSET_NAME(sr),
+ REG_OFFSET_NAME(syscall_nr),
+ REGS_OFFSET_NAME(0),
+ REGS_OFFSET_NAME(1),
+ REGS_OFFSET_NAME(2),
+ REGS_OFFSET_NAME(3),
+ REGS_OFFSET_NAME(4),
+ REGS_OFFSET_NAME(5),
+ REGS_OFFSET_NAME(6),
+ REGS_OFFSET_NAME(7),
+ REGS_OFFSET_NAME(8),
+ REGS_OFFSET_NAME(9),
+ REGS_OFFSET_NAME(10),
+ REGS_OFFSET_NAME(11),
+ REGS_OFFSET_NAME(12),
+ REGS_OFFSET_NAME(13),
+ REGS_OFFSET_NAME(14),
+ REGS_OFFSET_NAME(15),
+ REGS_OFFSET_NAME(16),
+ REGS_OFFSET_NAME(17),
+ REGS_OFFSET_NAME(18),
+ REGS_OFFSET_NAME(19),
+ REGS_OFFSET_NAME(20),
+ REGS_OFFSET_NAME(21),
+ REGS_OFFSET_NAME(22),
+ REGS_OFFSET_NAME(23),
+ REGS_OFFSET_NAME(24),
+ REGS_OFFSET_NAME(25),
+ REGS_OFFSET_NAME(26),
+ REGS_OFFSET_NAME(27),
+ REGS_OFFSET_NAME(28),
+ REGS_OFFSET_NAME(29),
+ REGS_OFFSET_NAME(30),
+ REGS_OFFSET_NAME(31),
+ REGS_OFFSET_NAME(32),
+ REGS_OFFSET_NAME(33),
+ REGS_OFFSET_NAME(34),
+ REGS_OFFSET_NAME(35),
+ REGS_OFFSET_NAME(36),
+ REGS_OFFSET_NAME(37),
+ REGS_OFFSET_NAME(38),
+ REGS_OFFSET_NAME(39),
+ REGS_OFFSET_NAME(40),
+ REGS_OFFSET_NAME(41),
+ REGS_OFFSET_NAME(42),
+ REGS_OFFSET_NAME(43),
+ REGS_OFFSET_NAME(44),
+ REGS_OFFSET_NAME(45),
+ REGS_OFFSET_NAME(46),
+ REGS_OFFSET_NAME(47),
+ REGS_OFFSET_NAME(48),
+ REGS_OFFSET_NAME(49),
+ REGS_OFFSET_NAME(50),
+ REGS_OFFSET_NAME(51),
+ REGS_OFFSET_NAME(52),
+ REGS_OFFSET_NAME(53),
+ REGS_OFFSET_NAME(54),
+ REGS_OFFSET_NAME(55),
+ REGS_OFFSET_NAME(56),
+ REGS_OFFSET_NAME(57),
+ REGS_OFFSET_NAME(58),
+ REGS_OFFSET_NAME(59),
+ REGS_OFFSET_NAME(60),
+ REGS_OFFSET_NAME(61),
+ REGS_OFFSET_NAME(62),
+ REGS_OFFSET_NAME(63),
+ TREGS_OFFSET_NAME(0),
+ TREGS_OFFSET_NAME(1),
+ TREGS_OFFSET_NAME(2),
+ TREGS_OFFSET_NAME(3),
+ TREGS_OFFSET_NAME(4),
+ TREGS_OFFSET_NAME(5),
+ TREGS_OFFSET_NAME(6),
+ TREGS_OFFSET_NAME(7),
+ REG_OFFSET_END,
+};
+
/*
* These are our native regset flavours.
*/
@@ -304,9 +383,11 @@ const struct user_regset_view *task_user_regset_view(struct task_struct *task)
return &user_sh64_native_view;
}
-long arch_ptrace(struct task_struct *child, long request, long addr, long data)
+long arch_ptrace(struct task_struct *child, long request,
+ unsigned long addr, unsigned long data)
{
int ret;
+ unsigned long __user *datap = (unsigned long __user *) data;
switch (request) {
/* read the word at location addr in the USER area. */
@@ -321,13 +402,15 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
tmp = get_stack_long(child, addr);
else if ((addr >= offsetof(struct user, fpu)) &&
(addr < offsetof(struct user, u_fpvalid))) {
- tmp = get_fpu_long(child, addr - offsetof(struct user, fpu));
+ unsigned long index;
+ index = addr - offsetof(struct user, fpu);
+ tmp = get_fpu_long(child, index);
} else if (addr == offsetof(struct user, u_fpvalid)) {
tmp = !!tsk_used_math(child);
} else {
break;
}
- ret = put_user(tmp, (unsigned long *)data);
+ ret = put_user(tmp, datap);
break;
}
@@ -358,7 +441,9 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
}
else if ((addr >= offsetof(struct user, fpu)) &&
(addr < offsetof(struct user, u_fpvalid))) {
- ret = put_fpu_long(child, addr - offsetof(struct user, fpu), data);
+ unsigned long index;
+ index = addr - offsetof(struct user, fpu);
+ ret = put_fpu_long(child, index, data);
}
break;
@@ -366,23 +451,23 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
return copy_regset_to_user(child, &user_sh64_native_view,
REGSET_GENERAL,
0, sizeof(struct pt_regs),
- (void __user *)data);
+ datap);
case PTRACE_SETREGS:
return copy_regset_from_user(child, &user_sh64_native_view,
REGSET_GENERAL,
0, sizeof(struct pt_regs),
- (const void __user *)data);
+ datap);
#ifdef CONFIG_SH_FPU
case PTRACE_GETFPREGS:
return copy_regset_to_user(child, &user_sh64_native_view,
REGSET_FPU,
0, sizeof(struct user_fpu_struct),
- (void __user *)data);
+ datap);
case PTRACE_SETFPREGS:
return copy_regset_from_user(child, &user_sh64_native_view,
REGSET_FPU,
0, sizeof(struct user_fpu_struct),
- (const void __user *)data);
+ datap);
#endif
default:
ret = ptrace_request(child, request, addr, data);
@@ -392,13 +477,13 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
return ret;
}
-asmlinkage int sh64_ptrace(long request, long pid, long addr, long data)
+asmlinkage int sh64_ptrace(long request, long pid,
+ unsigned long addr, unsigned long data)
{
#define WPC_DBRMODE 0x0d104008
- static int first_call = 1;
+ static unsigned long first_call;
- lock_kernel();
- if (first_call) {
+ if (!test_and_set_bit(0, &first_call)) {
/* Set WPC.DBRMODE to 0. This makes all debug events get
* delivered through RESVEC, i.e. into the handlers in entry.S.
* (If the kernel was downloaded using a remote gdb, WPC.DBRMODE
@@ -408,9 +493,7 @@ asmlinkage int sh64_ptrace(long request, long pid, long addr, long data)
* the remote gdb.) */
printk("DBRMODE set to 0 to permit native debugging\n");
poke_real_address_q(WPC_DBRMODE, 0);
- first_call = 0;
}
- unlock_kernel();
return sys_ptrace(request, pid, addr, data);
}
diff --git a/arch/sh/kernel/reboot.c b/arch/sh/kernel/reboot.c
index b1fca66bb92e..ca6a5ca64015 100644
--- a/arch/sh/kernel/reboot.c
+++ b/arch/sh/kernel/reboot.c
@@ -9,6 +9,7 @@
#include <asm/addrspace.h>
#include <asm/reboot.h>
#include <asm/system.h>
+#include <asm/tlbflush.h>
void (*pm_power_off)(void);
EXPORT_SYMBOL(pm_power_off);
@@ -25,6 +26,9 @@ static void native_machine_restart(char * __unused)
{
local_irq_disable();
+ /* Destroy all of the TLBs in preparation for reset by MMU */
+ __flush_tlb_global();
+
/* Address error with SR.BL=1 first. */
trigger_address_error();
diff --git a/arch/sh/kernel/setup.c b/arch/sh/kernel/setup.c
index e769401a78ba..d6b018c7ebdc 100644
--- a/arch/sh/kernel/setup.c
+++ b/arch/sh/kernel/setup.c
@@ -24,7 +24,6 @@
#include <linux/module.h>
#include <linux/smp.h>
#include <linux/err.h>
-#include <linux/debugfs.h>
#include <linux/crash_dump.h>
#include <linux/mmzone.h>
#include <linux/clk.h>
@@ -42,6 +41,7 @@
#include <asm/smp.h>
#include <asm/mmu_context.h>
#include <asm/mmzone.h>
+#include <asm/sparsemem.h>
/*
* Initialize loops_per_jiffy as 10000000 (1000MIPS).
@@ -53,6 +53,7 @@ struct sh_cpuinfo cpu_data[NR_CPUS] __read_mostly = {
.type = CPU_SH_NONE,
.family = CPU_FAMILY_UNKNOWN,
.loops_per_jiffy = 10000000,
+ .phys_bits = MAX_PHYSMEM_BITS,
},
};
EXPORT_SYMBOL(cpu_data);
@@ -136,8 +137,9 @@ void __init check_for_initrd(void)
goto disable;
}
- if (unlikely(start < PAGE_OFFSET)) {
- pr_err("initrd start < PAGE_OFFSET\n");
+ if (unlikely(start < __MEMORY_START)) {
+ pr_err("initrd start (%08lx) < __MEMORY_START(%x)\n",
+ start, __MEMORY_START);
goto disable;
}
@@ -158,7 +160,7 @@ void __init check_for_initrd(void)
/*
* Address sanitization
*/
- initrd_start = (unsigned long)__va(__pa(start));
+ initrd_start = (unsigned long)__va(start);
initrd_end = initrd_start + INITRD_SIZE;
memblock_reserve(__pa(initrd_start), INITRD_SIZE);
@@ -432,6 +434,8 @@ static int show_cpuinfo(struct seq_file *m, void *v)
if (c->flags & CPU_HAS_L2_CACHE)
show_cacheinfo(m, "scache", c->scache);
+ seq_printf(m, "address sizes\t: %u bits physical\n", c->phys_bits);
+
seq_printf(m, "bogomips\t: %lu.%02lu\n",
c->loops_per_jiffy/(500000/HZ),
(c->loops_per_jiffy/(5000/HZ)) % 100);
@@ -458,17 +462,3 @@ const struct seq_operations cpuinfo_op = {
.show = show_cpuinfo,
};
#endif /* CONFIG_PROC_FS */
-
-struct dentry *sh_debugfs_root;
-
-static int __init sh_debugfs_init(void)
-{
- sh_debugfs_root = debugfs_create_dir("sh", NULL);
- if (!sh_debugfs_root)
- return -ENOMEM;
- if (IS_ERR(sh_debugfs_root))
- return PTR_ERR(sh_debugfs_root);
-
- return 0;
-}
-arch_initcall(sh_debugfs_init);
diff --git a/arch/sh/kernel/sys_sh.c b/arch/sh/kernel/sys_sh.c
index 81f58371613d..8c6a350df751 100644
--- a/arch/sh/kernel/sys_sh.c
+++ b/arch/sh/kernel/sys_sh.c
@@ -88,7 +88,7 @@ asmlinkage int sys_cacheflush(unsigned long addr, unsigned long len, int op)
}
if (op & CACHEFLUSH_I)
- flush_cache_all();
+ flush_icache_range(addr, addr+len);
up_read(&current->mm->mmap_sem);
return 0;
diff --git a/arch/sh/kernel/syscalls_32.S b/arch/sh/kernel/syscalls_32.S
index 19fd11dd9871..e872e81add8a 100644
--- a/arch/sh/kernel/syscalls_32.S
+++ b/arch/sh/kernel/syscalls_32.S
@@ -353,3 +353,25 @@ ENTRY(sys_call_table)
.long sys_pwritev
.long sys_rt_tgsigqueueinfo /* 335 */
.long sys_perf_event_open
+ .long sys_fanotify_init
+ .long sys_fanotify_mark
+ .long sys_prlimit64
+ /* Broken-out socket family */
+ .long sys_socket /* 340 */
+ .long sys_bind
+ .long sys_connect
+ .long sys_listen
+ .long sys_accept
+ .long sys_getsockname /* 345 */
+ .long sys_getpeername
+ .long sys_socketpair
+ .long sys_send
+ .long sys_sendto
+ .long sys_recv /* 350 */
+ .long sys_recvfrom
+ .long sys_shutdown
+ .long sys_setsockopt
+ .long sys_getsockopt
+ .long sys_sendmsg /* 355 */
+ .long sys_recvmsg
+ .long sys_recvmmsg
diff --git a/arch/sh/kernel/syscalls_64.S b/arch/sh/kernel/syscalls_64.S
index 2048a20d7c80..66585708ce90 100644
--- a/arch/sh/kernel/syscalls_64.S
+++ b/arch/sh/kernel/syscalls_64.S
@@ -393,3 +393,6 @@ sys_call_table:
.long sys_perf_event_open
.long sys_recvmmsg /* 365 */
.long sys_accept4
+ .long sys_fanotify_init
+ .long sys_fanotify_mark
+ .long sys_prlimit64
diff --git a/arch/sh/kernel/traps_32.c b/arch/sh/kernel/traps_32.c
index c3d86fa71ddf..3484c2f65aba 100644
--- a/arch/sh/kernel/traps_32.c
+++ b/arch/sh/kernel/traps_32.c
@@ -5,7 +5,7 @@
* SuperH version: Copyright (C) 1999 Niibe Yutaka
* Copyright (C) 2000 Philipp Rumpf
* Copyright (C) 2000 David Howells
- * Copyright (C) 2002 - 2007 Paul Mundt
+ * Copyright (C) 2002 - 2010 Paul Mundt
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
@@ -26,6 +26,7 @@
#include <linux/limits.h>
#include <linux/sysfs.h>
#include <linux/uaccess.h>
+#include <linux/perf_event.h>
#include <asm/system.h>
#include <asm/alignment.h>
#include <asm/fpu.h>
@@ -369,7 +370,8 @@ static inline int handle_delayslot(struct pt_regs *regs,
#define SH_PC_12BIT_OFFSET(instr) ((((signed short)(instr<<4))>>3) + 4)
int handle_unaligned_access(insn_size_t instruction, struct pt_regs *regs,
- struct mem_access *ma, int expected)
+ struct mem_access *ma, int expected,
+ unsigned long address)
{
u_int rm;
int ret, index;
@@ -383,9 +385,18 @@ int handle_unaligned_access(insn_size_t instruction, struct pt_regs *regs,
index = (instruction>>8)&15; /* 0x0F00 */
rm = regs->regs[index];
- /* shout about fixups */
- if (!expected)
+ /*
+ * Log the unexpected fixups, and then pass them on to perf.
+ *
+ * We intentionally don't report the expected cases to perf as
+ * otherwise the trapped I/O case will skew the results too much
+ * to be useful.
+ */
+ if (!expected) {
unaligned_fixups_notify(current, instruction, regs);
+ perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, 0,
+ regs, address);
+ }
ret = -EFAULT;
switch (instruction&0xF000) {
@@ -574,7 +585,8 @@ fixup:
set_fs(USER_DS);
tmp = handle_unaligned_access(instruction, regs,
- &user_mem_access, 0);
+ &user_mem_access, 0,
+ address);
set_fs(oldfs);
if (tmp == 0)
@@ -607,8 +619,8 @@ uspace_segv:
unaligned_fixups_notify(current, instruction, regs);
- handle_unaligned_access(instruction, regs,
- &user_mem_access, 0);
+ handle_unaligned_access(instruction, regs, &user_mem_access,
+ 0, address);
set_fs(oldfs);
}
}
@@ -802,6 +814,9 @@ void __cpuinit per_cpu_trap_init(void)
: /* no output */
: "r" (&vbr_base)
: "memory");
+
+ /* disable exception blocking now when the vbr has been setup */
+ clear_bl_bit();
}
void *set_exception_table_vec(unsigned int vec, void *handler)
diff --git a/arch/sh/kernel/traps_64.c b/arch/sh/kernel/traps_64.c
index e67e140bf1f6..6713ca97e553 100644
--- a/arch/sh/kernel/traps_64.c
+++ b/arch/sh/kernel/traps_64.c
@@ -24,6 +24,7 @@
#include <linux/interrupt.h>
#include <linux/sysctl.h>
#include <linux/module.h>
+#include <linux/perf_event.h>
#include <asm/system.h>
#include <asm/uaccess.h>
#include <asm/io.h>
@@ -50,7 +51,7 @@ asmlinkage void do_##name(unsigned long error_code, struct pt_regs *regs) \
do_unhandled_exception(trapnr, signr, str, __stringify(name), error_code, regs, current); \
}
-spinlock_t die_lock;
+static DEFINE_SPINLOCK(die_lock);
void die(const char * str, struct pt_regs * regs, long err)
{
@@ -433,6 +434,8 @@ static int misaligned_load(struct pt_regs *regs,
return error;
}
+ perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, 0, regs, address);
+
destreg = (opcode >> 4) & 0x3f;
if (user_mode(regs)) {
__u64 buffer;
@@ -509,6 +512,8 @@ static int misaligned_store(struct pt_regs *regs,
return error;
}
+ perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, 0, regs, address);
+
srcreg = (opcode >> 4) & 0x3f;
if (user_mode(regs)) {
__u64 buffer;
@@ -583,6 +588,8 @@ static int misaligned_fpu_load(struct pt_regs *regs,
return error;
}
+ perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, 0, regs, address);
+
destreg = (opcode >> 4) & 0x3f;
if (user_mode(regs)) {
__u64 buffer;
@@ -658,6 +665,8 @@ static int misaligned_fpu_store(struct pt_regs *regs,
return error;
}
+ perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, 0, regs, address);
+
srcreg = (opcode >> 4) & 0x3f;
if (user_mode(regs)) {
__u64 buffer;
diff --git a/arch/sh/kernel/vsyscall/vsyscall-trapa.S b/arch/sh/kernel/vsyscall/vsyscall-trapa.S
index 3b6eb34c43fa..3e70f851cdc6 100644
--- a/arch/sh/kernel/vsyscall/vsyscall-trapa.S
+++ b/arch/sh/kernel/vsyscall/vsyscall-trapa.S
@@ -8,9 +8,9 @@ __kernel_vsyscall:
* fill out .eh_frame -- PFM. */
.LEND_vsyscall:
.size __kernel_vsyscall,.-.LSTART_vsyscall
- .previous
.section .eh_frame,"a",@progbits
+ .previous
.LCIE:
.ualong .LCIE_end - .LCIE_start
.LCIE_start:
diff --git a/arch/sh/lib/Makefile b/arch/sh/lib/Makefile
index dab4d2129812..7b95f29e3174 100644
--- a/arch/sh/lib/Makefile
+++ b/arch/sh/lib/Makefile
@@ -30,4 +30,4 @@ lib-$(CONFIG_MMU) += copy_page.o __clear_user.o
lib-$(CONFIG_MCOUNT) += mcount.o
lib-y += $(memcpy-y) $(memset-y) $(udivsi3-y)
-EXTRA_CFLAGS += -Werror
+ccflags-y := -Werror
diff --git a/arch/sh/math-emu/math.c b/arch/sh/math-emu/math.c
index 1fcdb1220975..f76a5090d5d1 100644
--- a/arch/sh/math-emu/math.c
+++ b/arch/sh/math-emu/math.c
@@ -12,6 +12,7 @@
#include <linux/types.h>
#include <linux/sched.h>
#include <linux/signal.h>
+#include <linux/perf_event.h>
#include <asm/system.h>
#include <asm/uaccess.h>
@@ -619,6 +620,8 @@ int do_fpu_inst(unsigned short inst, struct pt_regs *regs)
struct task_struct *tsk = current;
struct sh_fpu_soft_struct *fpu = &(tsk->thread.xstate->softfpu);
+ perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, 0, regs, 0);
+
if (!(task_thread_info(tsk)->status & TS_USEDFPU)) {
/* initialize once. */
fpu_init(fpu);
diff --git a/arch/sh/mm/Kconfig b/arch/sh/mm/Kconfig
index 1445ca6257df..c3e61b366493 100644
--- a/arch/sh/mm/Kconfig
+++ b/arch/sh/mm/Kconfig
@@ -79,7 +79,7 @@ config 29BIT
config 32BIT
bool
- default y if CPU_SH5
+ default y if CPU_SH5 || !MMU
config PMB
bool "Support 32-bit physical addressing through PMB"
@@ -168,6 +168,10 @@ config IOREMAP_FIXED
config UNCACHED_MAPPING
bool
+config HAVE_SRAM_POOL
+ bool
+ select GENERIC_ALLOCATOR
+
choice
prompt "Kernel page size"
default PAGE_SIZE_4KB
diff --git a/arch/sh/mm/Makefile b/arch/sh/mm/Makefile
index 53f7c684afb2..150aa326afff 100644
--- a/arch/sh/mm/Makefile
+++ b/arch/sh/mm/Makefile
@@ -15,7 +15,7 @@ cacheops-$(CONFIG_CPU_SHX3) += cache-shx3.o
obj-y += $(cacheops-y)
mmu-y := nommu.o extable_32.o
-mmu-$(CONFIG_MMU) := extable_$(BITS).o fault_$(BITS).o \
+mmu-$(CONFIG_MMU) := extable_$(BITS).o fault_$(BITS).o gup.o \
ioremap.o kmap.o pgtable.o tlbflush_$(BITS).o
obj-y += $(mmu-y)
@@ -40,6 +40,7 @@ obj-$(CONFIG_PMB) += pmb.o
obj-$(CONFIG_NUMA) += numa.o
obj-$(CONFIG_IOREMAP_FIXED) += ioremap_fixed.o
obj-$(CONFIG_UNCACHED_MAPPING) += uncached.o
+obj-$(CONFIG_HAVE_SRAM_POOL) += sram.o
# Special flags for fault_64.o. This puts restrictions on the number of
# caller-save registers that the compiler can target when building this file.
@@ -66,4 +67,4 @@ CFLAGS_fault_64.o += -ffixed-r7 \
-ffixed-r60 -ffixed-r61 -ffixed-r62 \
-fomit-frame-pointer
-EXTRA_CFLAGS += -Werror
+ccflags-y := -Werror
diff --git a/arch/sh/mm/asids-debugfs.c b/arch/sh/mm/asids-debugfs.c
index cd8c3bf39b5a..74c03ecc4871 100644
--- a/arch/sh/mm/asids-debugfs.c
+++ b/arch/sh/mm/asids-debugfs.c
@@ -63,7 +63,7 @@ static int __init asids_debugfs_init(void)
{
struct dentry *asids_dentry;
- asids_dentry = debugfs_create_file("asids", S_IRUSR, sh_debugfs_root,
+ asids_dentry = debugfs_create_file("asids", S_IRUSR, arch_debugfs_dir,
NULL, &asids_debugfs_fops);
if (!asids_dentry)
return -ENOMEM;
diff --git a/arch/sh/mm/cache-debugfs.c b/arch/sh/mm/cache-debugfs.c
index 690ed010d002..52411462c409 100644
--- a/arch/sh/mm/cache-debugfs.c
+++ b/arch/sh/mm/cache-debugfs.c
@@ -126,25 +126,19 @@ static int __init cache_debugfs_init(void)
{
struct dentry *dcache_dentry, *icache_dentry;
- dcache_dentry = debugfs_create_file("dcache", S_IRUSR, sh_debugfs_root,
+ dcache_dentry = debugfs_create_file("dcache", S_IRUSR, arch_debugfs_dir,
(unsigned int *)CACHE_TYPE_DCACHE,
&cache_debugfs_fops);
if (!dcache_dentry)
return -ENOMEM;
- if (IS_ERR(dcache_dentry))
- return PTR_ERR(dcache_dentry);
- icache_dentry = debugfs_create_file("icache", S_IRUSR, sh_debugfs_root,
+ icache_dentry = debugfs_create_file("icache", S_IRUSR, arch_debugfs_dir,
(unsigned int *)CACHE_TYPE_ICACHE,
&cache_debugfs_fops);
if (!icache_dentry) {
debugfs_remove(dcache_dentry);
return -ENOMEM;
}
- if (IS_ERR(icache_dentry)) {
- debugfs_remove(dcache_dentry);
- return PTR_ERR(icache_dentry);
- }
return 0;
}
diff --git a/arch/sh/mm/cache-sh4.c b/arch/sh/mm/cache-sh4.c
index 2cfae81914aa..92eb98633ab0 100644
--- a/arch/sh/mm/cache-sh4.c
+++ b/arch/sh/mm/cache-sh4.c
@@ -114,7 +114,7 @@ static void sh4_flush_dcache_page(void *arg)
struct address_space *mapping = page_mapping(page);
if (mapping && !mapping_mapped(mapping))
- set_bit(PG_dcache_dirty, &page->flags);
+ clear_bit(PG_dcache_clean, &page->flags);
else
#endif
flush_cache_one(CACHE_OC_ADDRESS_ARRAY |
@@ -239,7 +239,7 @@ static void sh4_flush_cache_page(void *args)
* another ASID than the current one.
*/
map_coherent = (current_cpu_data.dcache.n_aliases &&
- !test_bit(PG_dcache_dirty, &page->flags) &&
+ test_bit(PG_dcache_clean, &page->flags) &&
page_mapped(page));
if (map_coherent)
vaddr = kmap_coherent(page, address);
diff --git a/arch/sh/mm/cache-sh7705.c b/arch/sh/mm/cache-sh7705.c
index f498da1cce7a..7729cca727eb 100644
--- a/arch/sh/mm/cache-sh7705.c
+++ b/arch/sh/mm/cache-sh7705.c
@@ -139,7 +139,7 @@ static void sh7705_flush_dcache_page(void *arg)
struct address_space *mapping = page_mapping(page);
if (mapping && !mapping_mapped(mapping))
- set_bit(PG_dcache_dirty, &page->flags);
+ clear_bit(PG_dcache_clean, &page->flags);
else
__flush_dcache_page(__pa(page_address(page)));
}
diff --git a/arch/sh/mm/cache.c b/arch/sh/mm/cache.c
index ba401d137bb9..88d3dc3d30d5 100644
--- a/arch/sh/mm/cache.c
+++ b/arch/sh/mm/cache.c
@@ -60,14 +60,14 @@ void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
unsigned long len)
{
if (boot_cpu_data.dcache.n_aliases && page_mapped(page) &&
- !test_bit(PG_dcache_dirty, &page->flags)) {
+ test_bit(PG_dcache_clean, &page->flags)) {
void *vto = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK);
memcpy(vto, src, len);
kunmap_coherent(vto);
} else {
memcpy(dst, src, len);
if (boot_cpu_data.dcache.n_aliases)
- set_bit(PG_dcache_dirty, &page->flags);
+ clear_bit(PG_dcache_clean, &page->flags);
}
if (vma->vm_flags & VM_EXEC)
@@ -79,14 +79,14 @@ void copy_from_user_page(struct vm_area_struct *vma, struct page *page,
unsigned long len)
{
if (boot_cpu_data.dcache.n_aliases && page_mapped(page) &&
- !test_bit(PG_dcache_dirty, &page->flags)) {
+ test_bit(PG_dcache_clean, &page->flags)) {
void *vfrom = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK);
memcpy(dst, vfrom, len);
kunmap_coherent(vfrom);
} else {
memcpy(dst, src, len);
if (boot_cpu_data.dcache.n_aliases)
- set_bit(PG_dcache_dirty, &page->flags);
+ clear_bit(PG_dcache_clean, &page->flags);
}
}
@@ -98,7 +98,7 @@ void copy_user_highpage(struct page *to, struct page *from,
vto = kmap_atomic(to, KM_USER1);
if (boot_cpu_data.dcache.n_aliases && page_mapped(from) &&
- !test_bit(PG_dcache_dirty, &from->flags)) {
+ test_bit(PG_dcache_clean, &from->flags)) {
vfrom = kmap_coherent(from, vaddr);
copy_page(vto, vfrom);
kunmap_coherent(vfrom);
@@ -141,7 +141,7 @@ void __update_cache(struct vm_area_struct *vma,
page = pfn_to_page(pfn);
if (pfn_valid(pfn)) {
- int dirty = test_and_clear_bit(PG_dcache_dirty, &page->flags);
+ int dirty = !test_and_set_bit(PG_dcache_clean, &page->flags);
if (dirty)
__flush_purge_region(page_address(page), PAGE_SIZE);
}
@@ -153,7 +153,7 @@ void __flush_anon_page(struct page *page, unsigned long vmaddr)
if (pages_do_alias(addr, vmaddr)) {
if (boot_cpu_data.dcache.n_aliases && page_mapped(page) &&
- !test_bit(PG_dcache_dirty, &page->flags)) {
+ test_bit(PG_dcache_clean, &page->flags)) {
void *kaddr;
kaddr = kmap_coherent(page, vmaddr);
diff --git a/arch/sh/mm/consistent.c b/arch/sh/mm/consistent.c
index c86a08540258..40733a952402 100644
--- a/arch/sh/mm/consistent.c
+++ b/arch/sh/mm/consistent.c
@@ -38,11 +38,12 @@ void *dma_generic_alloc_coherent(struct device *dev, size_t size,
void *ret, *ret_nocache;
int order = get_order(size);
+ gfp |= __GFP_ZERO;
+
ret = (void *)__get_free_pages(gfp, order);
if (!ret)
return NULL;
- memset(ret, 0, size);
/*
* Pages from the page allocator may have data present in
* cache. So flush the cache before using uncached memory.
@@ -78,21 +79,20 @@ void dma_generic_free_coherent(struct device *dev, size_t size,
void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
enum dma_data_direction direction)
{
-#if defined(CONFIG_CPU_SH5) || defined(CONFIG_PMB)
- void *p1addr = vaddr;
-#else
- void *p1addr = (void*) P1SEGADDR((unsigned long)vaddr);
-#endif
+ void *addr;
+
+ addr = __in_29bit_mode() ?
+ (void *)P1SEGADDR((unsigned long)vaddr) : vaddr;
switch (direction) {
case DMA_FROM_DEVICE: /* invalidate only */
- __flush_invalidate_region(p1addr, size);
+ __flush_invalidate_region(addr, size);
break;
case DMA_TO_DEVICE: /* writeback only */
- __flush_wback_region(p1addr, size);
+ __flush_wback_region(addr, size);
break;
case DMA_BIDIRECTIONAL: /* writeback and invalidate */
- __flush_purge_region(p1addr, size);
+ __flush_purge_region(addr, size);
break;
default:
BUG();
diff --git a/arch/sh/mm/gup.c b/arch/sh/mm/gup.c
new file mode 100644
index 000000000000..bf8daf9d9c9b
--- /dev/null
+++ b/arch/sh/mm/gup.c
@@ -0,0 +1,273 @@
+/*
+ * Lockless get_user_pages_fast for SuperH
+ *
+ * Copyright (C) 2009 - 2010 Paul Mundt
+ *
+ * Cloned from the x86 and PowerPC versions, by:
+ *
+ * Copyright (C) 2008 Nick Piggin
+ * Copyright (C) 2008 Novell Inc.
+ */
+#include <linux/sched.h>
+#include <linux/mm.h>
+#include <linux/vmstat.h>
+#include <linux/highmem.h>
+#include <asm/pgtable.h>
+
+static inline pte_t gup_get_pte(pte_t *ptep)
+{
+#ifndef CONFIG_X2TLB
+ return ACCESS_ONCE(*ptep);
+#else
+ /*
+ * With get_user_pages_fast, we walk down the pagetables without
+ * taking any locks. For this we would like to load the pointers
+ * atomically, but that is not possible with 64-bit PTEs. What
+ * we do have is the guarantee that a pte will only either go
+ * from not present to present, or present to not present or both
+ * -- it will not switch to a completely different present page
+ * without a TLB flush in between; something that we are blocking
+ * by holding interrupts off.
+ *
+ * Setting ptes from not present to present goes:
+ * ptep->pte_high = h;
+ * smp_wmb();
+ * ptep->pte_low = l;
+ *
+ * And present to not present goes:
+ * ptep->pte_low = 0;
+ * smp_wmb();
+ * ptep->pte_high = 0;
+ *
+ * We must ensure here that the load of pte_low sees l iff pte_high
+ * sees h. We load pte_high *after* loading pte_low, which ensures we
+ * don't see an older value of pte_high. *Then* we recheck pte_low,
+ * which ensures that we haven't picked up a changed pte high. We might
+ * have got rubbish values from pte_low and pte_high, but we are
+ * guaranteed that pte_low will not have the present bit set *unless*
+ * it is 'l'. And get_user_pages_fast only operates on present ptes, so
+ * we're safe.
+ *
+ * gup_get_pte should not be used or copied outside gup.c without being
+ * very careful -- it does not atomically load the pte or anything that
+ * is likely to be useful for you.
+ */
+ pte_t pte;
+
+retry:
+ pte.pte_low = ptep->pte_low;
+ smp_rmb();
+ pte.pte_high = ptep->pte_high;
+ smp_rmb();
+ if (unlikely(pte.pte_low != ptep->pte_low))
+ goto retry;
+
+ return pte;
+#endif
+}
+
+/*
+ * The performance critical leaf functions are made noinline otherwise gcc
+ * inlines everything into a single function which results in too much
+ * register pressure.
+ */
+static noinline int gup_pte_range(pmd_t pmd, unsigned long addr,
+ unsigned long end, int write, struct page **pages, int *nr)
+{
+ u64 mask, result;
+ pte_t *ptep;
+
+#ifdef CONFIG_X2TLB
+ result = _PAGE_PRESENT | _PAGE_EXT(_PAGE_EXT_KERN_READ | _PAGE_EXT_USER_READ);
+ if (write)
+ result |= _PAGE_EXT(_PAGE_EXT_KERN_WRITE | _PAGE_EXT_USER_WRITE);
+#elif defined(CONFIG_SUPERH64)
+ result = _PAGE_PRESENT | _PAGE_USER | _PAGE_READ;
+ if (write)
+ result |= _PAGE_WRITE;
+#else
+ result = _PAGE_PRESENT | _PAGE_USER;
+ if (write)
+ result |= _PAGE_RW;
+#endif
+
+ mask = result | _PAGE_SPECIAL;
+
+ ptep = pte_offset_map(&pmd, addr);
+ do {
+ pte_t pte = gup_get_pte(ptep);
+ struct page *page;
+
+ if ((pte_val(pte) & mask) != result) {
+ pte_unmap(ptep);
+ return 0;
+ }
+ VM_BUG_ON(!pfn_valid(pte_pfn(pte)));
+ page = pte_page(pte);
+ get_page(page);
+ pages[*nr] = page;
+ (*nr)++;
+
+ } while (ptep++, addr += PAGE_SIZE, addr != end);
+ pte_unmap(ptep - 1);
+
+ return 1;
+}
+
+static int gup_pmd_range(pud_t pud, unsigned long addr, unsigned long end,
+ int write, struct page **pages, int *nr)
+{
+ unsigned long next;
+ pmd_t *pmdp;
+
+ pmdp = pmd_offset(&pud, addr);
+ do {
+ pmd_t pmd = *pmdp;
+
+ next = pmd_addr_end(addr, end);
+ if (pmd_none(pmd))
+ return 0;
+ if (!gup_pte_range(pmd, addr, next, write, pages, nr))
+ return 0;
+ } while (pmdp++, addr = next, addr != end);
+
+ return 1;
+}
+
+static int gup_pud_range(pgd_t pgd, unsigned long addr, unsigned long end,
+ int write, struct page **pages, int *nr)
+{
+ unsigned long next;
+ pud_t *pudp;
+
+ pudp = pud_offset(&pgd, addr);
+ do {
+ pud_t pud = *pudp;
+
+ next = pud_addr_end(addr, end);
+ if (pud_none(pud))
+ return 0;
+ if (!gup_pmd_range(pud, addr, next, write, pages, nr))
+ return 0;
+ } while (pudp++, addr = next, addr != end);
+
+ return 1;
+}
+
+/*
+ * Like get_user_pages_fast() except its IRQ-safe in that it won't fall
+ * back to the regular GUP.
+ */
+int __get_user_pages_fast(unsigned long start, int nr_pages, int write,
+ struct page **pages)
+{
+ struct mm_struct *mm = current->mm;
+ unsigned long addr, len, end;
+ unsigned long next;
+ unsigned long flags;
+ pgd_t *pgdp;
+ int nr = 0;
+
+ start &= PAGE_MASK;
+ addr = start;
+ len = (unsigned long) nr_pages << PAGE_SHIFT;
+ end = start + len;
+ if (unlikely(!access_ok(write ? VERIFY_WRITE : VERIFY_READ,
+ (void __user *)start, len)))
+ return 0;
+
+ /*
+ * This doesn't prevent pagetable teardown, but does prevent
+ * the pagetables and pages from being freed.
+ */
+ local_irq_save(flags);
+ pgdp = pgd_offset(mm, addr);
+ do {
+ pgd_t pgd = *pgdp;
+
+ next = pgd_addr_end(addr, end);
+ if (pgd_none(pgd))
+ break;
+ if (!gup_pud_range(pgd, addr, next, write, pages, &nr))
+ break;
+ } while (pgdp++, addr = next, addr != end);
+ local_irq_restore(flags);
+
+ return nr;
+}
+
+/**
+ * get_user_pages_fast() - pin user pages in memory
+ * @start: starting user address
+ * @nr_pages: number of pages from start to pin
+ * @write: whether pages will be written to
+ * @pages: array that receives pointers to the pages pinned.
+ * Should be at least nr_pages long.
+ *
+ * Attempt to pin user pages in memory without taking mm->mmap_sem.
+ * If not successful, it will fall back to taking the lock and
+ * calling get_user_pages().
+ *
+ * Returns number of pages pinned. This may be fewer than the number
+ * requested. If nr_pages is 0 or negative, returns 0. If no pages
+ * were pinned, returns -errno.
+ */
+int get_user_pages_fast(unsigned long start, int nr_pages, int write,
+ struct page **pages)
+{
+ struct mm_struct *mm = current->mm;
+ unsigned long addr, len, end;
+ unsigned long next;
+ pgd_t *pgdp;
+ int nr = 0;
+
+ start &= PAGE_MASK;
+ addr = start;
+ len = (unsigned long) nr_pages << PAGE_SHIFT;
+
+ end = start + len;
+ if (end < start)
+ goto slow_irqon;
+
+ local_irq_disable();
+ pgdp = pgd_offset(mm, addr);
+ do {
+ pgd_t pgd = *pgdp;
+
+ next = pgd_addr_end(addr, end);
+ if (pgd_none(pgd))
+ goto slow;
+ if (!gup_pud_range(pgd, addr, next, write, pages, &nr))
+ goto slow;
+ } while (pgdp++, addr = next, addr != end);
+ local_irq_enable();
+
+ VM_BUG_ON(nr != (end - start) >> PAGE_SHIFT);
+ return nr;
+
+ {
+ int ret;
+
+slow:
+ local_irq_enable();
+slow_irqon:
+ /* Try to get the remaining pages with get_user_pages */
+ start += nr << PAGE_SHIFT;
+ pages += nr;
+
+ down_read(&mm->mmap_sem);
+ ret = get_user_pages(current, mm, start,
+ (end - start) >> PAGE_SHIFT, write, 0, pages, NULL);
+ up_read(&mm->mmap_sem);
+
+ /* Have to be a bit careful with return values */
+ if (nr > 0) {
+ if (ret < 0)
+ ret = nr;
+ else
+ ret += nr;
+ }
+
+ return ret;
+ }
+}
diff --git a/arch/sh/mm/init.c b/arch/sh/mm/init.c
index 552bea5113f5..3385b28acaac 100644
--- a/arch/sh/mm/init.c
+++ b/arch/sh/mm/init.c
@@ -47,7 +47,6 @@ static pte_t *__get_pte_phys(unsigned long addr)
pgd_t *pgd;
pud_t *pud;
pmd_t *pmd;
- pte_t *pte;
pgd = pgd_offset_k(addr);
if (pgd_none(*pgd)) {
@@ -67,8 +66,7 @@ static pte_t *__get_pte_phys(unsigned long addr)
return NULL;
}
- pte = pte_offset_kernel(pmd, addr);
- return pte;
+ return pte_offset_kernel(pmd, addr);
}
static void set_pte_phys(unsigned long addr, unsigned long phys, pgprot_t prot)
@@ -125,13 +123,45 @@ void __clear_fixmap(enum fixed_addresses idx, pgprot_t prot)
clear_pte_phys(address, prot);
}
+static pmd_t * __init one_md_table_init(pud_t *pud)
+{
+ if (pud_none(*pud)) {
+ pmd_t *pmd;
+
+ pmd = alloc_bootmem_pages(PAGE_SIZE);
+ pud_populate(&init_mm, pud, pmd);
+ BUG_ON(pmd != pmd_offset(pud, 0));
+ }
+
+ return pmd_offset(pud, 0);
+}
+
+static pte_t * __init one_page_table_init(pmd_t *pmd)
+{
+ if (pmd_none(*pmd)) {
+ pte_t *pte;
+
+ pte = alloc_bootmem_pages(PAGE_SIZE);
+ pmd_populate_kernel(&init_mm, pmd, pte);
+ BUG_ON(pte != pte_offset_kernel(pmd, 0));
+ }
+
+ return pte_offset_kernel(pmd, 0);
+}
+
+static pte_t * __init page_table_kmap_check(pte_t *pte, pmd_t *pmd,
+ unsigned long vaddr, pte_t *lastpte)
+{
+ return pte;
+}
+
void __init page_table_range_init(unsigned long start, unsigned long end,
pgd_t *pgd_base)
{
pgd_t *pgd;
pud_t *pud;
pmd_t *pmd;
- pte_t *pte;
+ pte_t *pte = NULL;
int i, j, k;
unsigned long vaddr;
@@ -144,19 +174,13 @@ void __init page_table_range_init(unsigned long start, unsigned long end,
for ( ; (i < PTRS_PER_PGD) && (vaddr != end); pgd++, i++) {
pud = (pud_t *)pgd;
for ( ; (j < PTRS_PER_PUD) && (vaddr != end); pud++, j++) {
-#ifdef __PAGETABLE_PMD_FOLDED
- pmd = (pmd_t *)pud;
-#else
- pmd = (pmd_t *)alloc_bootmem_low_pages(PAGE_SIZE);
- pud_populate(&init_mm, pud, pmd);
+ pmd = one_md_table_init(pud);
+#ifndef __PAGETABLE_PMD_FOLDED
pmd += k;
#endif
for (; (k < PTRS_PER_PMD) && (vaddr != end); pmd++, k++) {
- if (pmd_none(*pmd)) {
- pte = (pte_t *) alloc_bootmem_low_pages(PAGE_SIZE);
- pmd_populate_kernel(&init_mm, pmd, pte);
- BUG_ON(pte != pte_offset_kernel(pmd, 0));
- }
+ pte = page_table_kmap_check(one_page_table_init(pmd),
+ pmd, vaddr, pte);
vaddr += PMD_SIZE;
}
k = 0;
diff --git a/arch/sh/mm/kmap.c b/arch/sh/mm/kmap.c
index 15d74ea42094..ec29e14ec5a8 100644
--- a/arch/sh/mm/kmap.c
+++ b/arch/sh/mm/kmap.c
@@ -34,7 +34,7 @@ void *kmap_coherent(struct page *page, unsigned long addr)
enum fixed_addresses idx;
unsigned long vaddr;
- BUG_ON(test_bit(PG_dcache_dirty, &page->flags));
+ BUG_ON(!test_bit(PG_dcache_clean, &page->flags));
pagefault_disable();
diff --git a/arch/sh/mm/nommu.c b/arch/sh/mm/nommu.c
index 7694f50c9034..36312d254faf 100644
--- a/arch/sh/mm/nommu.c
+++ b/arch/sh/mm/nommu.c
@@ -67,6 +67,10 @@ void local_flush_tlb_kernel_range(unsigned long start, unsigned long end)
BUG();
}
+void __flush_tlb_global(void)
+{
+}
+
void __update_tlb(struct vm_area_struct *vma, unsigned long address, pte_t pte)
{
}
diff --git a/arch/sh/mm/pmb.c b/arch/sh/mm/pmb.c
index 6379091a1647..b20b1b3eee4b 100644
--- a/arch/sh/mm/pmb.c
+++ b/arch/sh/mm/pmb.c
@@ -40,7 +40,7 @@ struct pmb_entry {
unsigned long flags;
unsigned long size;
- spinlock_t lock;
+ raw_spinlock_t lock;
/*
* 0 .. NR_PMB_ENTRIES for specific entry selection, or
@@ -265,7 +265,7 @@ static struct pmb_entry *pmb_alloc(unsigned long vpn, unsigned long ppn,
memset(pmbe, 0, sizeof(struct pmb_entry));
- spin_lock_init(&pmbe->lock);
+ raw_spin_lock_init(&pmbe->lock);
pmbe->vpn = vpn;
pmbe->ppn = ppn;
@@ -327,9 +327,9 @@ static void set_pmb_entry(struct pmb_entry *pmbe)
{
unsigned long flags;
- spin_lock_irqsave(&pmbe->lock, flags);
+ raw_spin_lock_irqsave(&pmbe->lock, flags);
__set_pmb_entry(pmbe);
- spin_unlock_irqrestore(&pmbe->lock, flags);
+ raw_spin_unlock_irqrestore(&pmbe->lock, flags);
}
#endif /* CONFIG_PM */
@@ -368,7 +368,7 @@ int pmb_bolt_mapping(unsigned long vaddr, phys_addr_t phys,
return PTR_ERR(pmbe);
}
- spin_lock_irqsave(&pmbe->lock, flags);
+ raw_spin_lock_irqsave(&pmbe->lock, flags);
pmbe->size = pmb_sizes[i].size;
@@ -383,9 +383,10 @@ int pmb_bolt_mapping(unsigned long vaddr, phys_addr_t phys,
* entries for easier tear-down.
*/
if (likely(pmbp)) {
- spin_lock(&pmbp->lock);
+ raw_spin_lock_nested(&pmbp->lock,
+ SINGLE_DEPTH_NESTING);
pmbp->link = pmbe;
- spin_unlock(&pmbp->lock);
+ raw_spin_unlock(&pmbp->lock);
}
pmbp = pmbe;
@@ -398,7 +399,7 @@ int pmb_bolt_mapping(unsigned long vaddr, phys_addr_t phys,
i--;
mapped++;
- spin_unlock_irqrestore(&pmbe->lock, flags);
+ raw_spin_unlock_irqrestore(&pmbe->lock, flags);
}
} while (size >= SZ_16M);
@@ -627,15 +628,14 @@ static void __init pmb_synchronize(void)
continue;
}
- spin_lock_irqsave(&pmbe->lock, irqflags);
+ raw_spin_lock_irqsave(&pmbe->lock, irqflags);
for (j = 0; j < ARRAY_SIZE(pmb_sizes); j++)
if (pmb_sizes[j].flag == size)
pmbe->size = pmb_sizes[j].size;
if (pmbp) {
- spin_lock(&pmbp->lock);
-
+ raw_spin_lock_nested(&pmbp->lock, SINGLE_DEPTH_NESTING);
/*
* Compare the previous entry against the current one to
* see if the entries span a contiguous mapping. If so,
@@ -644,13 +644,12 @@ static void __init pmb_synchronize(void)
*/
if (pmb_can_merge(pmbp, pmbe))
pmbp->link = pmbe;
-
- spin_unlock(&pmbp->lock);
+ raw_spin_unlock(&pmbp->lock);
}
pmbp = pmbe;
- spin_unlock_irqrestore(&pmbe->lock, irqflags);
+ raw_spin_unlock_irqrestore(&pmbe->lock, irqflags);
}
}
@@ -757,7 +756,7 @@ static void __init pmb_resize(void)
/*
* Found it, now resize it.
*/
- spin_lock_irqsave(&pmbe->lock, flags);
+ raw_spin_lock_irqsave(&pmbe->lock, flags);
pmbe->size = SZ_16M;
pmbe->flags &= ~PMB_SZ_MASK;
@@ -767,7 +766,7 @@ static void __init pmb_resize(void)
__set_pmb_entry(pmbe);
- spin_unlock_irqrestore(&pmbe->lock, flags);
+ raw_spin_unlock_irqrestore(&pmbe->lock, flags);
}
read_unlock(&pmb_rwlock);
@@ -866,11 +865,9 @@ static int __init pmb_debugfs_init(void)
struct dentry *dentry;
dentry = debugfs_create_file("pmb", S_IFREG | S_IRUGO,
- sh_debugfs_root, NULL, &pmb_debugfs_fops);
+ arch_debugfs_dir, NULL, &pmb_debugfs_fops);
if (!dentry)
return -ENOMEM;
- if (IS_ERR(dentry))
- return PTR_ERR(dentry);
return 0;
}
diff --git a/arch/sh/mm/sram.c b/arch/sh/mm/sram.c
new file mode 100644
index 000000000000..bc156ec4545e
--- /dev/null
+++ b/arch/sh/mm/sram.c
@@ -0,0 +1,34 @@
+/*
+ * SRAM pool for tiny memories not otherwise managed.
+ *
+ * Copyright (C) 2010 Paul Mundt
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <asm/sram.h>
+
+/*
+ * This provides a standard SRAM pool for tiny memories that can be
+ * added either by the CPU or the platform code. Typical SRAM sizes
+ * to be inserted in to the pool will generally be less than the page
+ * size, with anything more reasonably sized handled as a NUMA memory
+ * node.
+ */
+struct gen_pool *sram_pool;
+
+static int __init sram_pool_init(void)
+{
+ /*
+ * This is a global pool, we don't care about node locality.
+ */
+ sram_pool = gen_pool_create(1, -1);
+ if (unlikely(!sram_pool))
+ return -ENOMEM;
+
+ return 0;
+}
+core_initcall(sram_pool_init);
diff --git a/arch/sh/mm/tlb-debugfs.c b/arch/sh/mm/tlb-debugfs.c
index 229bf75f28df..dea637a09246 100644
--- a/arch/sh/mm/tlb-debugfs.c
+++ b/arch/sh/mm/tlb-debugfs.c
@@ -151,15 +151,13 @@ static int __init tlb_debugfs_init(void)
{
struct dentry *itlb, *utlb;
- itlb = debugfs_create_file("itlb", S_IRUSR, sh_debugfs_root,
+ itlb = debugfs_create_file("itlb", S_IRUSR, arch_debugfs_dir,
(unsigned int *)TLB_TYPE_ITLB,
&tlb_debugfs_fops);
if (unlikely(!itlb))
return -ENOMEM;
- if (IS_ERR(itlb))
- return PTR_ERR(itlb);
- utlb = debugfs_create_file("utlb", S_IRUSR, sh_debugfs_root,
+ utlb = debugfs_create_file("utlb", S_IRUSR, arch_debugfs_dir,
(unsigned int *)TLB_TYPE_UTLB,
&tlb_debugfs_fops);
if (unlikely(!utlb)) {
@@ -167,11 +165,6 @@ static int __init tlb_debugfs_init(void)
return -ENOMEM;
}
- if (IS_ERR(utlb)) {
- debugfs_remove(itlb);
- return PTR_ERR(utlb);
- }
-
return 0;
}
module_init(tlb_debugfs_init);
diff --git a/arch/sh/mm/tlbflush_32.c b/arch/sh/mm/tlbflush_32.c
index 3fbe03ce8fe3..a6a20d6de4c0 100644
--- a/arch/sh/mm/tlbflush_32.c
+++ b/arch/sh/mm/tlbflush_32.c
@@ -119,3 +119,19 @@ void local_flush_tlb_mm(struct mm_struct *mm)
local_irq_restore(flags);
}
}
+
+void __flush_tlb_global(void)
+{
+ unsigned long flags;
+
+ local_irq_save(flags);
+
+ /*
+ * This is the most destructive of the TLB flushing options,
+ * and will tear down all of the UTLB/ITLB mappings, including
+ * wired entries.
+ */
+ __raw_writel(__raw_readl(MMUCR) | MMUCR_TI, MMUCR);
+
+ local_irq_restore(flags);
+}
diff --git a/arch/sh/mm/tlbflush_64.c b/arch/sh/mm/tlbflush_64.c
index 03db41cc1268..7f5810f5dfdc 100644
--- a/arch/sh/mm/tlbflush_64.c
+++ b/arch/sh/mm/tlbflush_64.c
@@ -455,6 +455,11 @@ void local_flush_tlb_kernel_range(unsigned long start, unsigned long end)
flush_tlb_all();
}
+void __flush_tlb_global(void)
+{
+ flush_tlb_all();
+}
+
void __update_tlb(struct vm_area_struct *vma, unsigned long address, pte_t pte)
{
}
diff --git a/arch/sh/mm/uncached.c b/arch/sh/mm/uncached.c
index 8a4eca551fc0..a7767da815e9 100644
--- a/arch/sh/mm/uncached.c
+++ b/arch/sh/mm/uncached.c
@@ -28,7 +28,7 @@ EXPORT_SYMBOL(virt_addr_uncached);
void __init uncached_init(void)
{
-#ifdef CONFIG_29BIT
+#if defined(CONFIG_29BIT) || !defined(CONFIG_MMU)
uncached_start = P2SEG;
#else
uncached_start = memory_end;
diff --git a/arch/sh/oprofile/Makefile b/arch/sh/oprofile/Makefile
index e85aae73e3dc..ce3b119021e7 100644
--- a/arch/sh/oprofile/Makefile
+++ b/arch/sh/oprofile/Makefile
@@ -1,5 +1,7 @@
obj-$(CONFIG_OPROFILE) += oprofile.o
+CFLAGS_common.o += -DUTS_MACHINE='"$(UTS_MACHINE)"'
+
DRIVER_OBJS = $(addprefix ../../../drivers/oprofile/, \
oprof.o cpu_buffer.o buffer_sync.o \
event_buffer.o oprofile_files.o \
diff --git a/arch/sh/oprofile/backtrace.c b/arch/sh/oprofile/backtrace.c
index 2bc74de23f08..37f3a75ea6cb 100644
--- a/arch/sh/oprofile/backtrace.c
+++ b/arch/sh/oprofile/backtrace.c
@@ -91,7 +91,7 @@ void sh_backtrace(struct pt_regs * const regs, unsigned int depth)
if (depth > backtrace_limit)
depth = backtrace_limit;
- stackaddr = (unsigned long *)regs->regs[15];
+ stackaddr = (unsigned long *)kernel_stack_pointer(regs);
if (!user_mode(regs)) {
if (depth)
unwind_stack(NULL, regs, stackaddr,
diff --git a/arch/sh/oprofile/common.c b/arch/sh/oprofile/common.c
index e10d89376f9b..b4c2d2b946dd 100644
--- a/arch/sh/oprofile/common.c
+++ b/arch/sh/oprofile/common.c
@@ -1,7 +1,7 @@
/*
* arch/sh/oprofile/init.c
*
- * Copyright (C) 2003 - 2008 Paul Mundt
+ * Copyright (C) 2003 - 2010 Paul Mundt
*
* Based on arch/mips/oprofile/common.c:
*
@@ -18,43 +18,46 @@
#include <linux/errno.h>
#include <linux/smp.h>
#include <linux/perf_event.h>
+#include <linux/slab.h>
#include <asm/processor.h>
-#ifdef CONFIG_HW_PERF_EVENTS
extern void sh_backtrace(struct pt_regs * const regs, unsigned int depth);
+#ifdef CONFIG_HW_PERF_EVENTS
+/*
+ * This will need to be reworked when multiple PMUs are supported.
+ */
+static char *sh_pmu_op_name;
+
char *op_name_from_perf_id(void)
{
- const char *pmu;
- char buf[20];
- int size;
-
- pmu = perf_pmu_name();
- if (!pmu)
- return NULL;
-
- size = snprintf(buf, sizeof(buf), "sh/%s", pmu);
- if (size > -1 && size < sizeof(buf))
- return buf;
-
- return NULL;
+ return sh_pmu_op_name;
}
int __init oprofile_arch_init(struct oprofile_operations *ops)
{
ops->backtrace = sh_backtrace;
+ if (perf_num_counters() == 0)
+ return -ENODEV;
+
+ sh_pmu_op_name = kasprintf(GFP_KERNEL, "%s/%s",
+ UTS_MACHINE, perf_pmu_name());
+ if (unlikely(!sh_pmu_op_name))
+ return -ENOMEM;
+
return oprofile_perf_init(ops);
}
void __exit oprofile_arch_exit(void)
{
oprofile_perf_exit();
+ kfree(sh_pmu_op_name);
}
#else
int __init oprofile_arch_init(struct oprofile_operations *ops)
{
- pr_info("oprofile: hardware counters not available\n");
+ ops->backtrace = sh_backtrace;
return -ENODEV;
}
void __exit oprofile_arch_exit(void) {}
diff --git a/arch/sh/tools/mach-types b/arch/sh/tools/mach-types
index b25aa554ee5e..0e68465e7b50 100644
--- a/arch/sh/tools/mach-types
+++ b/arch/sh/tools/mach-types
@@ -26,7 +26,6 @@ HD64461 HD64461
7724SE SH_7724_SOLUTION_ENGINE
7751SE SH_7751_SOLUTION_ENGINE
7780SE SH_7780_SOLUTION_ENGINE
-7751SYSTEMH SH_7751_SYSTEMH
HP6XX SH_HP6XX
DREAMCAST SH_DREAMCAST
SNAPGEAR SH_SECUREEDGE5410
@@ -52,6 +51,8 @@ MIGOR SH_MIGOR
RSK7201 SH_RSK7201
RSK7203 SH_RSK7203
AP325RXA SH_AP325RXA
+SH2007 SH_SH2007
+SH7757LCR SH_SH7757LCR
SH7763RDP SH_SH7763RDP
SH7785LCR SH_SH7785LCR
SH7785LCR_PT SH_SH7785LCR_PT
diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig
index 3e9d31401fb2..45d9c87d083a 100644
--- a/arch/sparc/Kconfig
+++ b/arch/sparc/Kconfig
@@ -1,9 +1,3 @@
-# For a description of the syntax of this configuration file,
-# see Documentation/kbuild/kconfig-language.txt.
-#
-
-mainmenu "Linux/SPARC Kernel Configuration"
-
config 64BIT
bool "64-bit kernel" if ARCH = "sparc"
default ARCH = "sparc64"
@@ -19,6 +13,7 @@ config SPARC
bool
default y
select OF
+ select OF_PROMTREE
select HAVE_IDE
select HAVE_OPROFILE
select HAVE_ARCH_KGDB if !SMP || SPARC64
@@ -27,8 +22,6 @@ config SPARC
select RTC_CLASS
select RTC_DRV_M48T59
select HAVE_IRQ_WORK
- select HAVE_PERF_EVENTS
- select PERF_USE_VMALLOC
select HAVE_DMA_ATTRS
select HAVE_DMA_API_DEBUG
select HAVE_ARCH_JUMP_LABEL
@@ -55,7 +48,6 @@ config SPARC64
select RTC_DRV_BQ4802
select RTC_DRV_SUN4V
select RTC_DRV_STARFIRE
- select HAVE_IRQ_WORK
select HAVE_PERF_EVENTS
select PERF_USE_VMALLOC
diff --git a/arch/sparc/include/asm/Kbuild b/arch/sparc/include/asm/Kbuild
index deeb0fba8029..3c93f08ce187 100644
--- a/arch/sparc/include/asm/Kbuild
+++ b/arch/sparc/include/asm/Kbuild
@@ -7,7 +7,6 @@ header-y += display7seg.h
header-y += envctrl.h
header-y += fbio.h
header-y += jsflash.h
-header-y += openprom.h
header-y += openpromio.h
header-y += perfctr.h
header-y += psrcompat.h
diff --git a/arch/sparc/include/asm/floppy_32.h b/arch/sparc/include/asm/floppy_32.h
index c792830636de..86666f70322e 100644
--- a/arch/sparc/include/asm/floppy_32.h
+++ b/arch/sparc/include/asm/floppy_32.h
@@ -304,7 +304,8 @@ static struct linux_prom_registers fd_regs[2];
static int sun_floppy_init(void)
{
char state[128];
- int tnode, fd_node, num_regs;
+ phandle tnode, fd_node;
+ int num_regs;
struct resource r;
use_virtual_dma = 1;
diff --git a/arch/sparc/include/asm/highmem.h b/arch/sparc/include/asm/highmem.h
index ec23b0a87b98..3d7afbb7f4bb 100644
--- a/arch/sparc/include/asm/highmem.h
+++ b/arch/sparc/include/asm/highmem.h
@@ -70,8 +70,8 @@ static inline void kunmap(struct page *page)
kunmap_high(page);
}
-extern void *kmap_atomic(struct page *page, enum km_type type);
-extern void kunmap_atomic_notypecheck(void *kvaddr, enum km_type type);
+extern void *__kmap_atomic(struct page *page);
+extern void __kunmap_atomic(void *kvaddr);
extern struct page *kmap_atomic_to_page(void *vaddr);
#define flush_cache_kmaps() flush_cache_all()
diff --git a/arch/sparc/include/asm/io_32.h b/arch/sparc/include/asm/io_32.h
index 2889574608db..c2ced21c9dc1 100644
--- a/arch/sparc/include/asm/io_32.h
+++ b/arch/sparc/include/asm/io_32.h
@@ -208,6 +208,21 @@ _memset_io(volatile void __iomem *dst, int c, __kernel_size_t n)
#define memset_io(d,c,sz) _memset_io(d,c,sz)
static inline void
+_sbus_memcpy_fromio(void *dst, const volatile void __iomem *src,
+ __kernel_size_t n)
+{
+ char *d = dst;
+
+ while (n--) {
+ char tmp = sbus_readb(src);
+ *d++ = tmp;
+ src++;
+ }
+}
+
+#define sbus_memcpy_fromio(d, s, sz) _sbus_memcpy_fromio(d, s, sz)
+
+static inline void
_memcpy_fromio(void *dst, const volatile void __iomem *src, __kernel_size_t n)
{
char *d = dst;
@@ -222,6 +237,22 @@ _memcpy_fromio(void *dst, const volatile void __iomem *src, __kernel_size_t n)
#define memcpy_fromio(d,s,sz) _memcpy_fromio(d,s,sz)
static inline void
+_sbus_memcpy_toio(volatile void __iomem *dst, const void *src,
+ __kernel_size_t n)
+{
+ const char *s = src;
+ volatile void __iomem *d = dst;
+
+ while (n--) {
+ char tmp = *s++;
+ sbus_writeb(tmp, d);
+ d++;
+ }
+}
+
+#define sbus_memcpy_toio(d, s, sz) _sbus_memcpy_toio(d, s, sz)
+
+static inline void
_memcpy_toio(volatile void __iomem *dst, const void *src, __kernel_size_t n)
{
const char *s = src;
diff --git a/arch/sparc/include/asm/io_64.h b/arch/sparc/include/asm/io_64.h
index 9517d063c79c..9c8965415f0a 100644
--- a/arch/sparc/include/asm/io_64.h
+++ b/arch/sparc/include/asm/io_64.h
@@ -419,6 +419,21 @@ _memset_io(volatile void __iomem *dst, int c, __kernel_size_t n)
#define memset_io(d,c,sz) _memset_io(d,c,sz)
static inline void
+_sbus_memcpy_fromio(void *dst, const volatile void __iomem *src,
+ __kernel_size_t n)
+{
+ char *d = dst;
+
+ while (n--) {
+ char tmp = sbus_readb(src);
+ *d++ = tmp;
+ src++;
+ }
+}
+
+#define sbus_memcpy_fromio(d, s, sz) _sbus_memcpy_fromio(d, s, sz)
+
+static inline void
_memcpy_fromio(void *dst, const volatile void __iomem *src, __kernel_size_t n)
{
char *d = dst;
@@ -433,6 +448,22 @@ _memcpy_fromio(void *dst, const volatile void __iomem *src, __kernel_size_t n)
#define memcpy_fromio(d,s,sz) _memcpy_fromio(d,s,sz)
static inline void
+_sbus_memcpy_toio(volatile void __iomem *dst, const void *src,
+ __kernel_size_t n)
+{
+ const char *s = src;
+ volatile void __iomem *d = dst;
+
+ while (n--) {
+ char tmp = *s++;
+ sbus_writeb(tmp, d);
+ d++;
+ }
+}
+
+#define sbus_memcpy_toio(d, s, sz) _sbus_memcpy_toio(d, s, sz)
+
+static inline void
_memcpy_toio(volatile void __iomem *dst, const void *src, __kernel_size_t n)
{
const char *s = src;
diff --git a/arch/sparc/include/asm/jump_label.h b/arch/sparc/include/asm/jump_label.h
index 62e66d7b2fb6..427d4684e0d2 100644
--- a/arch/sparc/include/asm/jump_label.h
+++ b/arch/sparc/include/asm/jump_label.h
@@ -4,7 +4,6 @@
#ifdef __KERNEL__
#include <linux/types.h>
-#include <asm/system.h>
#define JUMP_LABEL_NOP_SIZE 4
@@ -14,6 +13,7 @@
"nop\n\t" \
"nop\n\t" \
".pushsection __jump_table, \"a\"\n\t"\
+ ".align 4\n\t" \
".word 1b, %l[" #label "], %c0\n\t" \
".popsection \n\t" \
: : "i" (key) : : label);\
diff --git a/arch/sparc/include/asm/openprom.h b/arch/sparc/include/asm/openprom.h
index 963e1a45c35f..81cd43432dc0 100644
--- a/arch/sparc/include/asm/openprom.h
+++ b/arch/sparc/include/asm/openprom.h
@@ -11,6 +11,8 @@
#define LINUX_OPPROM_MAGIC 0x10010407
#ifndef __ASSEMBLY__
+#include <linux/of.h>
+
/* V0 prom device operations. */
struct linux_dev_v0_funcs {
int (*v0_devopen)(char *device_str);
@@ -26,7 +28,7 @@ struct linux_dev_v0_funcs {
/* V2 and later prom device operations. */
struct linux_dev_v2_funcs {
- int (*v2_inst2pkg)(int d); /* Convert ihandle to phandle */
+ phandle (*v2_inst2pkg)(int d); /* Convert ihandle to phandle */
char * (*v2_dumb_mem_alloc)(char *va, unsigned sz);
void (*v2_dumb_mem_free)(char *va, unsigned sz);
@@ -168,12 +170,12 @@ struct linux_romvec {
/* Routines for traversing the prom device tree. */
struct linux_nodeops {
- int (*no_nextnode)(int node);
- int (*no_child)(int node);
- int (*no_proplen)(int node, const char *name);
- int (*no_getprop)(int node, const char *name, char *val);
- int (*no_setprop)(int node, const char *name, char *val, int len);
- char * (*no_nextprop)(int node, char *name);
+ phandle (*no_nextnode)(phandle node);
+ phandle (*no_child)(phandle node);
+ int (*no_proplen)(phandle node, const char *name);
+ int (*no_getprop)(phandle node, const char *name, char *val);
+ int (*no_setprop)(phandle node, const char *name, char *val, int len);
+ char * (*no_nextprop)(phandle node, char *name);
};
/* More fun PROM structures for device probing. */
diff --git a/arch/sparc/include/asm/oplib_32.h b/arch/sparc/include/asm/oplib_32.h
index 33e31ce6b31f..51296a6f5005 100644
--- a/arch/sparc/include/asm/oplib_32.h
+++ b/arch/sparc/include/asm/oplib_32.h
@@ -30,7 +30,7 @@ extern unsigned int prom_rev, prom_prev;
/* Root node of the prom device tree, this stays constant after
* initialization is complete.
*/
-extern int prom_root_node;
+extern phandle prom_root_node;
/* Pointer to prom structure containing the device tree traversal
* and usage utility functions. Only prom-lib should use these,
@@ -178,68 +178,68 @@ extern void prom_putsegment(int context, unsigned long virt_addr,
/* PROM device tree traversal functions... */
/* Get the child node of the given node, or zero if no child exists. */
-extern int prom_getchild(int parent_node);
+extern phandle prom_getchild(phandle parent_node);
/* Get the next sibling node of the given node, or zero if no further
* siblings exist.
*/
-extern int prom_getsibling(int node);
+extern phandle prom_getsibling(phandle node);
/* Get the length, at the passed node, of the given property type.
* Returns -1 on error (ie. no such property at this node).
*/
-extern int prom_getproplen(int thisnode, const char *property);
+extern int prom_getproplen(phandle thisnode, const char *property);
/* Fetch the requested property using the given buffer. Returns
* the number of bytes the prom put into your buffer or -1 on error.
*/
-extern int __must_check prom_getproperty(int thisnode, const char *property,
+extern int __must_check prom_getproperty(phandle thisnode, const char *property,
char *prop_buffer, int propbuf_size);
/* Acquire an integer property. */
-extern int prom_getint(int node, char *property);
+extern int prom_getint(phandle node, char *property);
/* Acquire an integer property, with a default value. */
-extern int prom_getintdefault(int node, char *property, int defval);
+extern int prom_getintdefault(phandle node, char *property, int defval);
/* Acquire a boolean property, 0=FALSE 1=TRUE. */
-extern int prom_getbool(int node, char *prop);
+extern int prom_getbool(phandle node, char *prop);
/* Acquire a string property, null string on error. */
-extern void prom_getstring(int node, char *prop, char *buf, int bufsize);
+extern void prom_getstring(phandle node, char *prop, char *buf, int bufsize);
/* Does the passed node have the given "name"? YES=1 NO=0 */
-extern int prom_nodematch(int thisnode, char *name);
+extern int prom_nodematch(phandle thisnode, char *name);
/* Search all siblings starting at the passed node for "name" matching
* the given string. Returns the node on success, zero on failure.
*/
-extern int prom_searchsiblings(int node_start, char *name);
+extern phandle prom_searchsiblings(phandle node_start, char *name);
/* Return the first property type, as a string, for the given node.
* Returns a null string on error.
*/
-extern char *prom_firstprop(int node, char *buffer);
+extern char *prom_firstprop(phandle node, char *buffer);
/* Returns the next property after the passed property for the given
* node. Returns null string on failure.
*/
-extern char *prom_nextprop(int node, char *prev_property, char *buffer);
+extern char *prom_nextprop(phandle node, char *prev_property, char *buffer);
/* Returns phandle of the path specified */
-extern int prom_finddevice(char *name);
+extern phandle prom_finddevice(char *name);
/* Returns 1 if the specified node has given property. */
-extern int prom_node_has_property(int node, char *property);
+extern int prom_node_has_property(phandle node, char *property);
/* Set the indicated property at the given node with the passed value.
* Returns the number of bytes of your value that the prom took.
*/
-extern int prom_setprop(int node, const char *prop_name, char *prop_value,
+extern int prom_setprop(phandle node, const char *prop_name, char *prop_value,
int value_size);
-extern int prom_pathtoinode(char *path);
-extern int prom_inst2pkg(int);
+extern phandle prom_pathtoinode(char *path);
+extern phandle prom_inst2pkg(int);
/* Dorking with Bus ranges... */
@@ -247,13 +247,13 @@ extern int prom_inst2pkg(int);
extern void prom_apply_obio_ranges(struct linux_prom_registers *obioregs, int nregs);
/* Apply ranges of any prom node (and optionally parent node as well) to registers. */
-extern void prom_apply_generic_ranges(int node, int parent,
+extern void prom_apply_generic_ranges(phandle node, phandle parent,
struct linux_prom_registers *sbusregs, int nregs);
/* CPU probing helpers. */
-int cpu_find_by_instance(int instance, int *prom_node, int *mid);
-int cpu_find_by_mid(int mid, int *prom_node);
-int cpu_get_hwmid(int prom_node);
+int cpu_find_by_instance(int instance, phandle *prom_node, int *mid);
+int cpu_find_by_mid(int mid, phandle *prom_node);
+int cpu_get_hwmid(phandle prom_node);
extern spinlock_t prom_lock;
diff --git a/arch/sparc/include/asm/oplib_64.h b/arch/sparc/include/asm/oplib_64.h
index 3e0b2d62303d..c9cc078e3e31 100644
--- a/arch/sparc/include/asm/oplib_64.h
+++ b/arch/sparc/include/asm/oplib_64.h
@@ -16,7 +16,7 @@ extern char prom_version[];
/* Root node of the prom device tree, this stays constant after
* initialization is complete.
*/
-extern int prom_root_node;
+extern phandle prom_root_node;
/* PROM stdin and stdout */
extern int prom_stdin, prom_stdout;
@@ -24,7 +24,7 @@ extern int prom_stdin, prom_stdout;
/* /chosen node of the prom device tree, this stays constant after
* initialization is complete.
*/
-extern int prom_chosen_node;
+extern phandle prom_chosen_node;
/* Helper values and strings in arch/sparc64/kernel/head.S */
extern const char prom_peer_name[];
@@ -218,68 +218,69 @@ extern void prom_unmap(unsigned long size, unsigned long vaddr);
/* PROM device tree traversal functions... */
/* Get the child node of the given node, or zero if no child exists. */
-extern int prom_getchild(int parent_node);
+extern phandle prom_getchild(phandle parent_node);
/* Get the next sibling node of the given node, or zero if no further
* siblings exist.
*/
-extern int prom_getsibling(int node);
+extern phandle prom_getsibling(phandle node);
/* Get the length, at the passed node, of the given property type.
* Returns -1 on error (ie. no such property at this node).
*/
-extern int prom_getproplen(int thisnode, const char *property);
+extern int prom_getproplen(phandle thisnode, const char *property);
/* Fetch the requested property using the given buffer. Returns
* the number of bytes the prom put into your buffer or -1 on error.
*/
-extern int prom_getproperty(int thisnode, const char *property,
+extern int prom_getproperty(phandle thisnode, const char *property,
char *prop_buffer, int propbuf_size);
/* Acquire an integer property. */
-extern int prom_getint(int node, const char *property);
+extern int prom_getint(phandle node, const char *property);
/* Acquire an integer property, with a default value. */
-extern int prom_getintdefault(int node, const char *property, int defval);
+extern int prom_getintdefault(phandle node, const char *property, int defval);
/* Acquire a boolean property, 0=FALSE 1=TRUE. */
-extern int prom_getbool(int node, const char *prop);
+extern int prom_getbool(phandle node, const char *prop);
/* Acquire a string property, null string on error. */
-extern void prom_getstring(int node, const char *prop, char *buf, int bufsize);
+extern void prom_getstring(phandle node, const char *prop, char *buf,
+ int bufsize);
/* Does the passed node have the given "name"? YES=1 NO=0 */
-extern int prom_nodematch(int thisnode, const char *name);
+extern int prom_nodematch(phandle thisnode, const char *name);
/* Search all siblings starting at the passed node for "name" matching
* the given string. Returns the node on success, zero on failure.
*/
-extern int prom_searchsiblings(int node_start, const char *name);
+extern phandle prom_searchsiblings(phandle node_start, const char *name);
/* Return the first property type, as a string, for the given node.
* Returns a null string on error. Buffer should be at least 32B long.
*/
-extern char *prom_firstprop(int node, char *buffer);
+extern char *prom_firstprop(phandle node, char *buffer);
/* Returns the next property after the passed property for the given
* node. Returns null string on failure. Buffer should be at least 32B long.
*/
-extern char *prom_nextprop(int node, const char *prev_property, char *buffer);
+extern char *prom_nextprop(phandle node, const char *prev_property, char *buf);
/* Returns 1 if the specified node has given property. */
-extern int prom_node_has_property(int node, const char *property);
+extern int prom_node_has_property(phandle node, const char *property);
/* Returns phandle of the path specified */
-extern int prom_finddevice(const char *name);
+extern phandle prom_finddevice(const char *name);
/* Set the indicated property at the given node with the passed value.
* Returns the number of bytes of your value that the prom took.
*/
-extern int prom_setprop(int node, const char *prop_name, char *prop_value,
+extern int prom_setprop(phandle node, const char *prop_name, char *prop_value,
int value_size);
-extern int prom_pathtoinode(const char *path);
-extern int prom_inst2pkg(int);
+extern phandle prom_pathtoinode(const char *path);
+extern phandle prom_inst2pkg(int);
extern int prom_service_exists(const char *service_name);
extern void prom_sun4v_guest_soft_state(void);
diff --git a/arch/sparc/include/asm/pci_64.h b/arch/sparc/include/asm/pci_64.h
index 5312782f0b5e..948b686ec089 100644
--- a/arch/sparc/include/asm/pci_64.h
+++ b/arch/sparc/include/asm/pci_64.h
@@ -38,7 +38,7 @@ static inline void pcibios_penalize_isa_irq(int irq, int active)
* types on sparc64. However, it requires that the device
* can drive enough of the 64 bits.
*/
-#define PCI64_REQUIRED_MASK (~(dma64_addr_t)0)
+#define PCI64_REQUIRED_MASK (~(u64)0)
#define PCI64_ADDR_BASE 0xfffc000000000000UL
#ifdef CONFIG_PCI
diff --git a/arch/sparc/include/asm/pgtable_32.h b/arch/sparc/include/asm/pgtable_32.h
index 0ece77f47753..303bd4dc8292 100644
--- a/arch/sparc/include/asm/pgtable_32.h
+++ b/arch/sparc/include/asm/pgtable_32.h
@@ -304,10 +304,7 @@ BTFIXUPDEF_CALL(pte_t *, pte_offset_kernel, pmd_t *, unsigned long)
* and sun4c is guaranteed to have no highmem anyway.
*/
#define pte_offset_map(d, a) pte_offset_kernel(d,a)
-#define pte_offset_map_nested(d, a) pte_offset_kernel(d,a)
-
#define pte_unmap(pte) do{}while(0)
-#define pte_unmap_nested(pte) do{}while(0)
/* Certain architectures need to do special things when pte's
* within a page table are directly modified. Thus, the following
diff --git a/arch/sparc/include/asm/pgtable_64.h b/arch/sparc/include/asm/pgtable_64.h
index f5b5fa76c02d..f8dddb7045bb 100644
--- a/arch/sparc/include/asm/pgtable_64.h
+++ b/arch/sparc/include/asm/pgtable_64.h
@@ -652,9 +652,7 @@ static inline int pte_special(pte_t pte)
((address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)))
#define pte_offset_kernel pte_index
#define pte_offset_map pte_index
-#define pte_offset_map_nested pte_index
#define pte_unmap(pte) do { } while (0)
-#define pte_unmap_nested(pte) do { } while (0)
/* Actual page table PTE updates. */
extern void tlb_batch_add(struct mm_struct *mm, unsigned long vaddr, pte_t *ptep, pte_t orig);
diff --git a/arch/sparc/include/asm/prom.h b/arch/sparc/include/asm/prom.h
index 291f12575edd..56bbaadef646 100644
--- a/arch/sparc/include/asm/prom.h
+++ b/arch/sparc/include/asm/prom.h
@@ -18,6 +18,7 @@
* 2 of the License, or (at your option) any later version.
*/
#include <linux/types.h>
+#include <linux/of_pdt.h>
#include <linux/proc_fs.h>
#include <linux/mutex.h>
#include <asm/atomic.h>
@@ -67,8 +68,8 @@ extern struct device_node *of_console_device;
extern char *of_console_path;
extern char *of_console_options;
-extern void (*prom_build_more)(struct device_node *dp, struct device_node ***nextp);
-extern char *build_full_name(struct device_node *dp);
+extern void irq_trans_init(struct device_node *dp);
+extern char *build_path_component(struct device_node *dp);
#endif /* __KERNEL__ */
#endif /* _SPARC_PROM_H */
diff --git a/arch/sparc/kernel/auxio_32.c b/arch/sparc/kernel/auxio_32.c
index ee8d214cae1e..35f48837871a 100644
--- a/arch/sparc/kernel/auxio_32.c
+++ b/arch/sparc/kernel/auxio_32.c
@@ -23,7 +23,7 @@ static DEFINE_SPINLOCK(auxio_lock);
void __init auxio_probe(void)
{
- int node, auxio_nd;
+ phandle node, auxio_nd;
struct linux_prom_registers auxregs[1];
struct resource r;
@@ -113,7 +113,7 @@ volatile unsigned char * auxio_power_register = NULL;
void __init auxio_power_probe(void)
{
struct linux_prom_registers regs;
- int node;
+ phandle node;
struct resource r;
/* Attempt to find the sun4m power control node. */
diff --git a/arch/sparc/kernel/btext.c b/arch/sparc/kernel/btext.c
index 8cc2d56ffe9a..89aa4eb20cf5 100644
--- a/arch/sparc/kernel/btext.c
+++ b/arch/sparc/kernel/btext.c
@@ -40,7 +40,7 @@ static unsigned char *dispDeviceBase __force_data;
static unsigned char vga_font[cmapsz];
-static int __init btext_initialize(unsigned int node)
+static int __init btext_initialize(phandle node)
{
unsigned int width, height, depth, pitch;
unsigned long address = 0;
@@ -309,7 +309,7 @@ static struct console btext_console = {
int __init btext_find_display(void)
{
- unsigned int node;
+ phandle node;
char type[32];
int ret;
diff --git a/arch/sparc/kernel/devices.c b/arch/sparc/kernel/devices.c
index 62dc7a021413..d2eddd6647cd 100644
--- a/arch/sparc/kernel/devices.c
+++ b/arch/sparc/kernel/devices.c
@@ -31,9 +31,9 @@ static char *cpu_mid_prop(void)
return "mid";
}
-static int check_cpu_node(int nd, int *cur_inst,
- int (*compare)(int, int, void *), void *compare_arg,
- int *prom_node, int *mid)
+static int check_cpu_node(phandle nd, int *cur_inst,
+ int (*compare)(phandle, int, void *), void *compare_arg,
+ phandle *prom_node, int *mid)
{
if (!compare(nd, *cur_inst, compare_arg)) {
if (prom_node)
@@ -51,8 +51,8 @@ static int check_cpu_node(int nd, int *cur_inst,
return -ENODEV;
}
-static int __cpu_find_by(int (*compare)(int, int, void *), void *compare_arg,
- int *prom_node, int *mid)
+static int __cpu_find_by(int (*compare)(phandle, int, void *),
+ void *compare_arg, phandle *prom_node, int *mid)
{
struct device_node *dp;
int cur_inst;
@@ -71,7 +71,7 @@ static int __cpu_find_by(int (*compare)(int, int, void *), void *compare_arg,
return -ENODEV;
}
-static int cpu_instance_compare(int nd, int instance, void *_arg)
+static int cpu_instance_compare(phandle nd, int instance, void *_arg)
{
int desired_instance = (int) _arg;
@@ -80,13 +80,13 @@ static int cpu_instance_compare(int nd, int instance, void *_arg)
return -ENODEV;
}
-int cpu_find_by_instance(int instance, int *prom_node, int *mid)
+int cpu_find_by_instance(int instance, phandle *prom_node, int *mid)
{
return __cpu_find_by(cpu_instance_compare, (void *)instance,
prom_node, mid);
}
-static int cpu_mid_compare(int nd, int instance, void *_arg)
+static int cpu_mid_compare(phandle nd, int instance, void *_arg)
{
int desired_mid = (int) _arg;
int this_mid;
@@ -98,7 +98,7 @@ static int cpu_mid_compare(int nd, int instance, void *_arg)
return -ENODEV;
}
-int cpu_find_by_mid(int mid, int *prom_node)
+int cpu_find_by_mid(int mid, phandle *prom_node)
{
return __cpu_find_by(cpu_mid_compare, (void *)mid,
prom_node, NULL);
@@ -108,7 +108,7 @@ int cpu_find_by_mid(int mid, int *prom_node)
* address (0-3). This gives us the true hardware mid, which might have
* some other bits set. On 4d hardware and software mids are the same.
*/
-int cpu_get_hwmid(int prom_node)
+int cpu_get_hwmid(phandle prom_node)
{
return prom_getintdefault(prom_node, cpu_mid_prop(), -ENODEV);
}
@@ -119,7 +119,8 @@ void __init device_scan(void)
#ifndef CONFIG_SMP
{
- int err, cpu_node;
+ phandle cpu_node;
+ int err;
err = cpu_find_by_instance(0, &cpu_node, NULL);
if (err) {
/* Probably a sun4e, Sun is trying to trick us ;-) */
diff --git a/arch/sparc/kernel/irq_32.c b/arch/sparc/kernel/irq_32.c
index 0116d8d10def..5ad6e5c5dbb3 100644
--- a/arch/sparc/kernel/irq_32.c
+++ b/arch/sparc/kernel/irq_32.c
@@ -365,7 +365,7 @@ static int request_fast_irq(unsigned int irq,
unsigned long flags;
unsigned int cpu_irq;
int ret;
-#ifdef CONFIG_SMP
+#if defined CONFIG_SMP && !defined CONFIG_SPARC_LEON
struct tt_entry *trap_table;
extern struct tt_entry trapbase_cpu1, trapbase_cpu2, trapbase_cpu3;
#endif
@@ -425,7 +425,7 @@ static int request_fast_irq(unsigned int irq,
table[SP_TRAP_IRQ1+(cpu_irq-1)].inst_four = SPARC_NOP;
INSTANTIATE(sparc_ttable)
-#ifdef CONFIG_SMP
+#if defined CONFIG_SMP && !defined CONFIG_SPARC_LEON
trap_table = &trapbase_cpu1; INSTANTIATE(trap_table)
trap_table = &trapbase_cpu2; INSTANTIATE(trap_table)
trap_table = &trapbase_cpu3; INSTANTIATE(trap_table)
diff --git a/arch/sparc/kernel/leon_kernel.c b/arch/sparc/kernel/leon_kernel.c
index 6a7b4dbc8e09..2d51527d810f 100644
--- a/arch/sparc/kernel/leon_kernel.c
+++ b/arch/sparc/kernel/leon_kernel.c
@@ -282,5 +282,5 @@ void __init leon_init_IRQ(void)
void __init leon_init(void)
{
- prom_build_more = &leon_node_init;
+ of_pdt_build_more = &leon_node_init;
}
diff --git a/arch/sparc/kernel/leon_smp.c b/arch/sparc/kernel/leon_smp.c
index e1656fc41ccb..16582d85368a 100644
--- a/arch/sparc/kernel/leon_smp.c
+++ b/arch/sparc/kernel/leon_smp.c
@@ -12,7 +12,6 @@
#include <linux/sched.h>
#include <linux/threads.h>
#include <linux/smp.h>
-#include <linux/smp_lock.h>
#include <linux/interrupt.h>
#include <linux/kernel_stat.h>
#include <linux/init.h>
@@ -56,8 +55,8 @@ void __init leon_configure_cache_smp(void);
static inline unsigned long do_swap(volatile unsigned long *ptr,
unsigned long val)
{
- __asm__ __volatile__("swapa [%1] %2, %0\n\t" : "=&r"(val)
- : "r"(ptr), "i"(ASI_LEON_DCACHE_MISS)
+ __asm__ __volatile__("swapa [%2] %3, %0\n\t" : "=&r"(val)
+ : "0"(val), "r"(ptr), "i"(ASI_LEON_DCACHE_MISS)
: "memory");
return val;
}
diff --git a/arch/sparc/kernel/pcic.c b/arch/sparc/kernel/pcic.c
index d36a8d391ca0..aeaa09a3c655 100644
--- a/arch/sparc/kernel/pcic.c
+++ b/arch/sparc/kernel/pcic.c
@@ -284,7 +284,7 @@ int __init pcic_probe(void)
struct linux_prom_registers regs[PROMREG_MAX];
struct linux_pbm_info* pbm;
char namebuf[64];
- int node;
+ phandle node;
int err;
if (pcic0_up) {
@@ -440,7 +440,7 @@ static int __devinit pdev_to_pnode(struct linux_pbm_info *pbm,
{
struct linux_prom_pci_registers regs[PROMREG_MAX];
int err;
- int node = prom_getchild(pbm->prom_node);
+ phandle node = prom_getchild(pbm->prom_node);
while(node) {
err = prom_getproperty(node, "reg",
diff --git a/arch/sparc/kernel/prom.h b/arch/sparc/kernel/prom.h
index eeb04a782ec8..cf5fe1c0b024 100644
--- a/arch/sparc/kernel/prom.h
+++ b/arch/sparc/kernel/prom.h
@@ -4,12 +4,6 @@
#include <linux/spinlock.h>
#include <asm/prom.h>
-extern void * prom_early_alloc(unsigned long size);
-extern void irq_trans_init(struct device_node *dp);
-
-extern unsigned int prom_unique_id;
-
-extern char *build_path_component(struct device_node *dp);
extern void of_console_init(void);
extern unsigned int prom_early_allocated;
diff --git a/arch/sparc/kernel/prom_common.c b/arch/sparc/kernel/prom_common.c
index 1f830da2ddf2..ed25834328f4 100644
--- a/arch/sparc/kernel/prom_common.c
+++ b/arch/sparc/kernel/prom_common.c
@@ -20,14 +20,13 @@
#include <linux/mutex.h>
#include <linux/slab.h>
#include <linux/of.h>
+#include <linux/of_pdt.h>
#include <asm/prom.h>
#include <asm/oplib.h>
#include <asm/leon.h>
#include "prom.h"
-void (*prom_build_more)(struct device_node *dp, struct device_node ***nextp);
-
struct device_node *of_console_device;
EXPORT_SYMBOL(of_console_device);
@@ -119,192 +118,47 @@ int of_find_in_proplist(const char *list, const char *match, int len)
}
EXPORT_SYMBOL(of_find_in_proplist);
-unsigned int prom_unique_id;
-
-static struct property * __init build_one_prop(phandle node, char *prev,
- char *special_name,
- void *special_val,
- int special_len)
+/*
+ * SPARC32 and SPARC64's prom_nextprop() do things differently
+ * here, despite sharing the same interface. SPARC32 doesn't fill in 'buf',
+ * returning NULL on an error. SPARC64 fills in 'buf', but sets it to an
+ * empty string upon error.
+ */
+static int __init handle_nextprop_quirks(char *buf, const char *name)
{
- static struct property *tmp = NULL;
- struct property *p;
- const char *name;
-
- if (tmp) {
- p = tmp;
- memset(p, 0, sizeof(*p) + 32);
- tmp = NULL;
- } else {
- p = prom_early_alloc(sizeof(struct property) + 32);
- p->unique_id = prom_unique_id++;
- }
-
- p->name = (char *) (p + 1);
- if (special_name) {
- strcpy(p->name, special_name);
- p->length = special_len;
- p->value = prom_early_alloc(special_len);
- memcpy(p->value, special_val, special_len);
- } else {
- if (prev == NULL) {
- name = prom_firstprop(node, p->name);
- } else {
- name = prom_nextprop(node, prev, p->name);
- }
+ if (!name || strlen(name) == 0)
+ return -1;
- if (!name || strlen(name) == 0) {
- tmp = p;
- return NULL;
- }
#ifdef CONFIG_SPARC32
- strcpy(p->name, name);
+ strcpy(buf, name);
#endif
- p->length = prom_getproplen(node, p->name);
- if (p->length <= 0) {
- p->length = 0;
- } else {
- int len;
-
- p->value = prom_early_alloc(p->length + 1);
- len = prom_getproperty(node, p->name, p->value,
- p->length);
- if (len <= 0)
- p->length = 0;
- ((unsigned char *)p->value)[p->length] = '\0';
- }
- }
- return p;
-}
-
-static struct property * __init build_prop_list(phandle node)
-{
- struct property *head, *tail;
-
- head = tail = build_one_prop(node, NULL,
- ".node", &node, sizeof(node));
-
- tail->next = build_one_prop(node, NULL, NULL, NULL, 0);
- tail = tail->next;
- while(tail) {
- tail->next = build_one_prop(node, tail->name,
- NULL, NULL, 0);
- tail = tail->next;
- }
-
- return head;
-}
-
-static char * __init get_one_property(phandle node, const char *name)
-{
- char *buf = "<NULL>";
- int len;
-
- len = prom_getproplen(node, name);
- if (len > 0) {
- buf = prom_early_alloc(len);
- len = prom_getproperty(node, name, buf, len);
- }
-
- return buf;
-}
-
-static struct device_node * __init prom_create_node(phandle node,
- struct device_node *parent)
-{
- struct device_node *dp;
-
- if (!node)
- return NULL;
-
- dp = prom_early_alloc(sizeof(*dp));
- dp->unique_id = prom_unique_id++;
- dp->parent = parent;
-
- kref_init(&dp->kref);
-
- dp->name = get_one_property(node, "name");
- dp->type = get_one_property(node, "device_type");
- dp->phandle = node;
-
- dp->properties = build_prop_list(node);
-
- irq_trans_init(dp);
-
- return dp;
-}
-
-char * __init build_full_name(struct device_node *dp)
-{
- int len, ourlen, plen;
- char *n;
-
- plen = strlen(dp->parent->full_name);
- ourlen = strlen(dp->path_component_name);
- len = ourlen + plen + 2;
-
- n = prom_early_alloc(len);
- strcpy(n, dp->parent->full_name);
- if (!of_node_is_root(dp->parent)) {
- strcpy(n + plen, "/");
- plen++;
- }
- strcpy(n + plen, dp->path_component_name);
-
- return n;
+ return 0;
}
-static struct device_node * __init prom_build_tree(struct device_node *parent,
- phandle node,
- struct device_node ***nextp)
+static int __init prom_common_nextprop(phandle node, char *prev, char *buf)
{
- struct device_node *ret = NULL, *prev_sibling = NULL;
- struct device_node *dp;
-
- while (1) {
- dp = prom_create_node(node, parent);
- if (!dp)
- break;
-
- if (prev_sibling)
- prev_sibling->sibling = dp;
-
- if (!ret)
- ret = dp;
- prev_sibling = dp;
-
- *(*nextp) = dp;
- *nextp = &dp->allnext;
-
- dp->path_component_name = build_path_component(dp);
- dp->full_name = build_full_name(dp);
-
- dp->child = prom_build_tree(dp, prom_getchild(node), nextp);
-
- if (prom_build_more)
- prom_build_more(dp, nextp);
-
- node = prom_getsibling(node);
- }
+ const char *name;
- return ret;
+ buf[0] = '\0';
+ name = prom_nextprop(node, prev, buf);
+ return handle_nextprop_quirks(buf, name);
}
unsigned int prom_early_allocated __initdata;
+static struct of_pdt_ops prom_sparc_ops __initdata = {
+ .nextprop = prom_common_nextprop,
+ .getproplen = prom_getproplen,
+ .getproperty = prom_getproperty,
+ .getchild = prom_getchild,
+ .getsibling = prom_getsibling,
+};
+
void __init prom_build_devicetree(void)
{
- struct device_node **nextp;
-
- allnodes = prom_create_node(prom_root_node, NULL);
- allnodes->path_component_name = "";
- allnodes->full_name = "/";
-
- nextp = &allnodes->allnext;
- allnodes->child = prom_build_tree(allnodes,
- prom_getchild(allnodes->phandle),
- &nextp);
+ of_pdt_build_devicetree(prom_root_node, &prom_sparc_ops);
of_console_init();
- printk("PROM: Built device tree with %u bytes of memory.\n",
- prom_early_allocated);
+ pr_info("PROM: Built device tree with %u bytes of memory.\n",
+ prom_early_allocated);
}
diff --git a/arch/sparc/kernel/ptrace_32.c b/arch/sparc/kernel/ptrace_32.c
index e608f397e11f..27b9e93d0121 100644
--- a/arch/sparc/kernel/ptrace_32.c
+++ b/arch/sparc/kernel/ptrace_32.c
@@ -323,18 +323,35 @@ const struct user_regset_view *task_user_regset_view(struct task_struct *task)
return &user_sparc32_view;
}
-long arch_ptrace(struct task_struct *child, long request, long addr, long data)
+struct fps {
+ unsigned long regs[32];
+ unsigned long fsr;
+ unsigned long flags;
+ unsigned long extra;
+ unsigned long fpqd;
+ struct fq {
+ unsigned long *insnaddr;
+ unsigned long insn;
+ } fpq[16];
+};
+
+long arch_ptrace(struct task_struct *child, long request,
+ unsigned long addr, unsigned long data)
{
unsigned long addr2 = current->thread.kregs->u_regs[UREG_I4];
+ void __user *addr2p;
const struct user_regset_view *view;
+ struct pt_regs __user *pregs;
+ struct fps __user *fps;
int ret;
view = task_user_regset_view(current);
+ addr2p = (void __user *) addr2;
+ pregs = (struct pt_regs __user *) addr;
+ fps = (struct fps __user *) addr;
switch(request) {
case PTRACE_GETREGS: {
- struct pt_regs __user *pregs = (struct pt_regs __user *) addr;
-
ret = copy_regset_to_user(child, view, REGSET_GENERAL,
32 * sizeof(u32),
4 * sizeof(u32),
@@ -348,8 +365,6 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
}
case PTRACE_SETREGS: {
- struct pt_regs __user *pregs = (struct pt_regs __user *) addr;
-
ret = copy_regset_from_user(child, view, REGSET_GENERAL,
32 * sizeof(u32),
4 * sizeof(u32),
@@ -363,19 +378,6 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
}
case PTRACE_GETFPREGS: {
- struct fps {
- unsigned long regs[32];
- unsigned long fsr;
- unsigned long flags;
- unsigned long extra;
- unsigned long fpqd;
- struct fq {
- unsigned long *insnaddr;
- unsigned long insn;
- } fpq[16];
- };
- struct fps __user *fps = (struct fps __user *) addr;
-
ret = copy_regset_to_user(child, view, REGSET_FP,
0 * sizeof(u32),
32 * sizeof(u32),
@@ -397,19 +399,6 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
}
case PTRACE_SETFPREGS: {
- struct fps {
- unsigned long regs[32];
- unsigned long fsr;
- unsigned long flags;
- unsigned long extra;
- unsigned long fpqd;
- struct fq {
- unsigned long *insnaddr;
- unsigned long insn;
- } fpq[16];
- };
- struct fps __user *fps = (struct fps __user *) addr;
-
ret = copy_regset_from_user(child, view, REGSET_FP,
0 * sizeof(u32),
32 * sizeof(u32),
@@ -424,8 +413,7 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
case PTRACE_READTEXT:
case PTRACE_READDATA:
- ret = ptrace_readdata(child, addr,
- (void __user *) addr2, data);
+ ret = ptrace_readdata(child, addr, addr2p, data);
if (ret == data)
ret = 0;
@@ -435,8 +423,7 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
case PTRACE_WRITETEXT:
case PTRACE_WRITEDATA:
- ret = ptrace_writedata(child, (void __user *) addr2,
- addr, data);
+ ret = ptrace_writedata(child, addr2p, addr, data);
if (ret == data)
ret = 0;
diff --git a/arch/sparc/kernel/ptrace_64.c b/arch/sparc/kernel/ptrace_64.c
index aa90da08bf61..9ccc812bc09e 100644
--- a/arch/sparc/kernel/ptrace_64.c
+++ b/arch/sparc/kernel/ptrace_64.c
@@ -969,16 +969,19 @@ struct fps {
unsigned long fsr;
};
-long arch_ptrace(struct task_struct *child, long request, long addr, long data)
+long arch_ptrace(struct task_struct *child, long request,
+ unsigned long addr, unsigned long data)
{
const struct user_regset_view *view = task_user_regset_view(current);
unsigned long addr2 = task_pt_regs(current)->u_regs[UREG_I4];
struct pt_regs __user *pregs;
struct fps __user *fps;
+ void __user *addr2p;
int ret;
- pregs = (struct pt_regs __user *) (unsigned long) addr;
- fps = (struct fps __user *) (unsigned long) addr;
+ pregs = (struct pt_regs __user *) addr;
+ fps = (struct fps __user *) addr;
+ addr2p = (void __user *) addr2;
switch (request) {
case PTRACE_PEEKUSR:
@@ -1029,8 +1032,7 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
case PTRACE_READTEXT:
case PTRACE_READDATA:
- ret = ptrace_readdata(child, addr,
- (char __user *)addr2, data);
+ ret = ptrace_readdata(child, addr, addr2p, data);
if (ret == data)
ret = 0;
else if (ret >= 0)
@@ -1039,8 +1041,7 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
case PTRACE_WRITETEXT:
case PTRACE_WRITEDATA:
- ret = ptrace_writedata(child, (char __user *) addr2,
- addr, data);
+ ret = ptrace_writedata(child, addr2p, addr, data);
if (ret == data)
ret = 0;
else if (ret >= 0)
diff --git a/arch/sparc/kernel/rtrap_32.S b/arch/sparc/kernel/rtrap_32.S
index 4da2e1f66290..5f5f74c2c2ca 100644
--- a/arch/sparc/kernel/rtrap_32.S
+++ b/arch/sparc/kernel/rtrap_32.S
@@ -78,9 +78,9 @@ signal_p:
call do_notify_resume
add %sp, STACKFRAME_SZ, %o0 ! pt_regs ptr
- /* Fall through. */
- ld [%sp + STACKFRAME_SZ + PT_PSR], %t_psr
- clr %l6
+ b signal_p
+ ld [%curptr + TI_FLAGS], %g2
+
ret_trap_continue:
sethi %hi(PSR_SYSCALL), %g1
andn %t_psr, %g1, %t_psr
diff --git a/arch/sparc/kernel/rtrap_64.S b/arch/sparc/kernel/rtrap_64.S
index 090b9e9ad5e3..77f1b95e0806 100644
--- a/arch/sparc/kernel/rtrap_64.S
+++ b/arch/sparc/kernel/rtrap_64.S
@@ -34,37 +34,9 @@ __handle_preemption:
__handle_user_windows:
call fault_in_user_windows
wrpr %g0, RTRAP_PSTATE, %pstate
- wrpr %g0, RTRAP_PSTATE_IRQOFF, %pstate
- /* Redo sched+sig checks */
- ldx [%g6 + TI_FLAGS], %l0
- andcc %l0, _TIF_NEED_RESCHED, %g0
-
- be,pt %xcc, 1f
- nop
- call schedule
- wrpr %g0, RTRAP_PSTATE, %pstate
- wrpr %g0, RTRAP_PSTATE_IRQOFF, %pstate
- ldx [%g6 + TI_FLAGS], %l0
-
-1: andcc %l0, _TIF_DO_NOTIFY_RESUME_MASK, %g0
- be,pt %xcc, __handle_user_windows_continue
- nop
- mov %l5, %o1
- add %sp, PTREGS_OFF, %o0
- mov %l0, %o2
-
- call do_notify_resume
- wrpr %g0, RTRAP_PSTATE, %pstate
- wrpr %g0, RTRAP_PSTATE_IRQOFF, %pstate
- /* Signal delivery can modify pt_regs tstate, so we must
- * reload it.
- */
- ldx [%sp + PTREGS_OFF + PT_V9_TSTATE], %l1
- sethi %hi(0xf << 20), %l4
- and %l1, %l4, %l4
- ba,pt %xcc, __handle_user_windows_continue
+ ba,pt %xcc, __handle_preemption_continue
+ wrpr %g0, RTRAP_PSTATE_IRQOFF, %pstate
- andn %l1, %l4, %l1
__handle_userfpu:
rd %fprs, %l5
andcc %l5, FPRS_FEF, %g0
@@ -87,7 +59,7 @@ __handle_signal:
ldx [%sp + PTREGS_OFF + PT_V9_TSTATE], %l1
sethi %hi(0xf << 20), %l4
and %l1, %l4, %l4
- ba,pt %xcc, __handle_signal_continue
+ ba,pt %xcc, __handle_preemption_continue
andn %l1, %l4, %l1
/* When returning from a NMI (%pil==15) interrupt we want to
@@ -177,11 +149,9 @@ __handle_preemption_continue:
bne,pn %xcc, __handle_preemption
andcc %l0, _TIF_DO_NOTIFY_RESUME_MASK, %g0
bne,pn %xcc, __handle_signal
-__handle_signal_continue:
ldub [%g6 + TI_WSAVED], %o2
brnz,pn %o2, __handle_user_windows
nop
-__handle_user_windows_continue:
sethi %hi(TSTATE_PEF), %o0
andcc %l1, %o0, %g0
diff --git a/arch/sparc/kernel/setup_64.c b/arch/sparc/kernel/setup_64.c
index 5f72de67588b..29bafe051bb1 100644
--- a/arch/sparc/kernel/setup_64.c
+++ b/arch/sparc/kernel/setup_64.c
@@ -315,7 +315,7 @@ void __init setup_arch(char **cmdline_p)
#ifdef CONFIG_IP_PNP
if (!ic_set_manually) {
- int chosen = prom_finddevice ("/chosen");
+ phandle chosen = prom_finddevice("/chosen");
u32 cl, sv, gw;
cl = prom_getintdefault (chosen, "client-ip", 0);
diff --git a/arch/sparc/kernel/starfire.c b/arch/sparc/kernel/starfire.c
index 060d0f3a6151..a4446c0fb7a1 100644
--- a/arch/sparc/kernel/starfire.c
+++ b/arch/sparc/kernel/starfire.c
@@ -23,7 +23,7 @@ int this_is_starfire = 0;
void check_if_starfire(void)
{
- int ssnode = prom_finddevice("/ssp-serial");
+ phandle ssnode = prom_finddevice("/ssp-serial");
if (ssnode != 0 && ssnode != -1)
this_is_starfire = 1;
}
diff --git a/arch/sparc/kernel/sys_sparc32.c b/arch/sparc/kernel/sys_sparc32.c
index e6375a750d9a..6db18c6927fb 100644
--- a/arch/sparc/kernel/sys_sparc32.c
+++ b/arch/sparc/kernel/sys_sparc32.c
@@ -17,7 +17,6 @@
#include <linux/resource.h>
#include <linux/times.h>
#include <linux/smp.h>
-#include <linux/smp_lock.h>
#include <linux/sem.h>
#include <linux/msg.h>
#include <linux/shm.h>
diff --git a/arch/sparc/kernel/sys_sparc_32.c b/arch/sparc/kernel/sys_sparc_32.c
index 675c9e11ada5..42b282fa6112 100644
--- a/arch/sparc/kernel/sys_sparc_32.c
+++ b/arch/sparc/kernel/sys_sparc_32.c
@@ -19,7 +19,6 @@
#include <linux/mman.h>
#include <linux/utsname.h>
#include <linux/smp.h>
-#include <linux/smp_lock.h>
#include <linux/ipc.h>
#include <asm/uaccess.h>
diff --git a/arch/sparc/kernel/tadpole.c b/arch/sparc/kernel/tadpole.c
index f476a5f4af6a..9aba8bd5a78b 100644
--- a/arch/sparc/kernel/tadpole.c
+++ b/arch/sparc/kernel/tadpole.c
@@ -100,7 +100,7 @@ static void swift_clockstop(void)
void __init clock_stop_probe(void)
{
- unsigned int node, clk_nd;
+ phandle node, clk_nd;
char name[20];
prom_getstring(prom_root_node, "name", name, sizeof(name));
diff --git a/arch/sparc/kernel/unaligned_32.c b/arch/sparc/kernel/unaligned_32.c
index 12b9f352595f..4491f4cb2695 100644
--- a/arch/sparc/kernel/unaligned_32.c
+++ b/arch/sparc/kernel/unaligned_32.c
@@ -16,7 +16,6 @@
#include <asm/system.h>
#include <asm/uaccess.h>
#include <linux/smp.h>
-#include <linux/smp_lock.h>
#include <linux/perf_event.h>
enum direction {
diff --git a/arch/sparc/kernel/windows.c b/arch/sparc/kernel/windows.c
index b351770cbdd6..3107381e576d 100644
--- a/arch/sparc/kernel/windows.c
+++ b/arch/sparc/kernel/windows.c
@@ -9,7 +9,6 @@
#include <linux/string.h>
#include <linux/mm.h>
#include <linux/smp.h>
-#include <linux/smp_lock.h>
#include <asm/uaccess.h>
diff --git a/arch/sparc/mm/fault_32.c b/arch/sparc/mm/fault_32.c
index bd8601601afa..5b836f5aea90 100644
--- a/arch/sparc/mm/fault_32.c
+++ b/arch/sparc/mm/fault_32.c
@@ -539,6 +539,12 @@ do_sigbus:
__do_fault_siginfo(BUS_ADRERR, SIGBUS, tsk->thread.kregs, address);
}
+static void check_stack_aligned(unsigned long sp)
+{
+ if (sp & 0x7UL)
+ force_sig(SIGILL, current);
+}
+
void window_overflow_fault(void)
{
unsigned long sp;
@@ -547,6 +553,8 @@ void window_overflow_fault(void)
if(((sp + 0x38) & PAGE_MASK) != (sp & PAGE_MASK))
force_user_fault(sp + 0x38, 1);
force_user_fault(sp, 1);
+
+ check_stack_aligned(sp);
}
void window_underflow_fault(unsigned long sp)
@@ -554,6 +562,8 @@ void window_underflow_fault(unsigned long sp)
if(((sp + 0x38) & PAGE_MASK) != (sp & PAGE_MASK))
force_user_fault(sp + 0x38, 0);
force_user_fault(sp, 0);
+
+ check_stack_aligned(sp);
}
void window_ret_fault(struct pt_regs *regs)
@@ -564,4 +574,6 @@ void window_ret_fault(struct pt_regs *regs)
if(((sp + 0x38) & PAGE_MASK) != (sp & PAGE_MASK))
force_user_fault(sp + 0x38, 0);
force_user_fault(sp, 0);
+
+ check_stack_aligned(sp);
}
diff --git a/arch/sparc/mm/highmem.c b/arch/sparc/mm/highmem.c
index e139e9cbf5f7..4730eac0747b 100644
--- a/arch/sparc/mm/highmem.c
+++ b/arch/sparc/mm/highmem.c
@@ -29,17 +29,17 @@
#include <asm/tlbflush.h>
#include <asm/fixmap.h>
-void *kmap_atomic(struct page *page, enum km_type type)
+void *__kmap_atomic(struct page *page)
{
- unsigned long idx;
unsigned long vaddr;
+ long idx, type;
/* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */
pagefault_disable();
if (!PageHighMem(page))
return page_address(page);
- debug_kmap_atomic(type);
+ type = kmap_atomic_idx_push();
idx = type + KM_TYPE_NR*smp_processor_id();
vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
@@ -63,44 +63,52 @@ void *kmap_atomic(struct page *page, enum km_type type)
return (void*) vaddr;
}
-EXPORT_SYMBOL(kmap_atomic);
+EXPORT_SYMBOL(__kmap_atomic);
-void kunmap_atomic_notypecheck(void *kvaddr, enum km_type type)
+void __kunmap_atomic(void *kvaddr)
{
-#ifdef CONFIG_DEBUG_HIGHMEM
unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK;
- unsigned long idx = type + KM_TYPE_NR*smp_processor_id();
+ int type;
if (vaddr < FIXADDR_START) { // FIXME
pagefault_enable();
return;
}
- BUG_ON(vaddr != __fix_to_virt(FIX_KMAP_BEGIN+idx));
+ type = kmap_atomic_idx();
-/* XXX Fix - Anton */
+#ifdef CONFIG_DEBUG_HIGHMEM
+ {
+ unsigned long idx;
+
+ idx = type + KM_TYPE_NR * smp_processor_id();
+ BUG_ON(vaddr != __fix_to_virt(FIX_KMAP_BEGIN+idx));
+
+ /* XXX Fix - Anton */
#if 0
- __flush_cache_one(vaddr);
+ __flush_cache_one(vaddr);
#else
- flush_cache_all();
+ flush_cache_all();
#endif
- /*
- * force other mappings to Oops if they'll try to access
- * this pte without first remap it
- */
- pte_clear(&init_mm, vaddr, kmap_pte-idx);
-/* XXX Fix - Anton */
+ /*
+ * force other mappings to Oops if they'll try to access
+ * this pte without first remap it
+ */
+ pte_clear(&init_mm, vaddr, kmap_pte-idx);
+ /* XXX Fix - Anton */
#if 0
- __flush_tlb_one(vaddr);
+ __flush_tlb_one(vaddr);
#else
- flush_tlb_all();
+ flush_tlb_all();
#endif
+ }
#endif
+ kmap_atomic_idx_pop();
pagefault_enable();
}
-EXPORT_SYMBOL(kunmap_atomic_notypecheck);
+EXPORT_SYMBOL(__kunmap_atomic);
/* We may be fed a pagetable here by ptep_to_xxx and others. */
struct page *kmap_atomic_to_page(void *ptr)
diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c
index 4c2572773b55..2f6ae1d1fb6b 100644
--- a/arch/sparc/mm/init_64.c
+++ b/arch/sparc/mm/init_64.c
@@ -88,7 +88,7 @@ static void __init read_obp_memory(const char *property,
struct linux_prom64_registers *regs,
int *num_ents)
{
- int node = prom_finddevice("/memory");
+ phandle node = prom_finddevice("/memory");
int prop_size = prom_getproplen(node, property);
int ents, ret, i;
diff --git a/arch/sparc/mm/srmmu.c b/arch/sparc/mm/srmmu.c
index b0b43aa5e45a..92319aa8b662 100644
--- a/arch/sparc/mm/srmmu.c
+++ b/arch/sparc/mm/srmmu.c
@@ -1262,7 +1262,8 @@ extern unsigned long bootmem_init(unsigned long *pages_avail);
void __init srmmu_paging_init(void)
{
- int i, cpunode;
+ int i;
+ phandle cpunode;
char node_str[128];
pgd_t *pgd;
pmd_t *pmd;
@@ -1398,7 +1399,8 @@ static void __init srmmu_is_bad(void)
static void __init init_vac_layout(void)
{
- int nd, cache_lines;
+ phandle nd;
+ int cache_lines;
char node_str[128];
#ifdef CONFIG_SMP
int cpu = 0;
@@ -2082,7 +2084,7 @@ static void __init get_srmmu_type(void)
/* Next check for Fujitsu Swift. */
if(psr_typ == 0 && psr_vers == 4) {
- int cpunode;
+ phandle cpunode;
char node_str[128];
/* Look if it is not a TurboSparc emulating Swift... */
diff --git a/arch/sparc/mm/sun4c.c b/arch/sparc/mm/sun4c.c
index 4289f90f8697..ddd0d86e508e 100644
--- a/arch/sparc/mm/sun4c.c
+++ b/arch/sparc/mm/sun4c.c
@@ -420,7 +420,7 @@ volatile unsigned long __iomem *sun4c_memerr_reg = NULL;
void __init sun4c_probe_memerr_reg(void)
{
- int node;
+ phandle node;
struct linux_prom_registers regs[1];
node = prom_getchild(prom_root_node);
diff --git a/arch/sparc/prom/init_32.c b/arch/sparc/prom/init_32.c
index ccb36c7f9b8c..d342dba4dd54 100644
--- a/arch/sparc/prom/init_32.c
+++ b/arch/sparc/prom/init_32.c
@@ -20,7 +20,7 @@ enum prom_major_version prom_vers;
unsigned int prom_rev, prom_prev;
/* The root node of the prom device tree. */
-int prom_root_node;
+phandle prom_root_node;
EXPORT_SYMBOL(prom_root_node);
/* Pointer to the device tree operations structure. */
diff --git a/arch/sparc/prom/init_64.c b/arch/sparc/prom/init_64.c
index 7b00f89490a4..3ff911e7d25b 100644
--- a/arch/sparc/prom/init_64.c
+++ b/arch/sparc/prom/init_64.c
@@ -19,7 +19,7 @@ char prom_version[80];
/* The root node of the prom device tree. */
int prom_stdin, prom_stdout;
-int prom_chosen_node;
+phandle prom_chosen_node;
/* You must call prom_init() before you attempt to use any of the
* routines in the prom library. It returns 0 on success, 1 on
@@ -30,7 +30,7 @@ extern void prom_cif_init(void *, void *);
void __init prom_init(void *cif_handler, void *cif_stack)
{
- int node;
+ phandle node;
prom_cif_init(cif_handler, cif_stack);
diff --git a/arch/sparc/prom/memory.c b/arch/sparc/prom/memory.c
index fac7899a29c3..3f263a64857d 100644
--- a/arch/sparc/prom/memory.c
+++ b/arch/sparc/prom/memory.c
@@ -31,7 +31,8 @@ static int __init prom_meminit_v0(void)
static int __init prom_meminit_v2(void)
{
struct linux_prom_registers reg[64];
- int node, size, num_ents, i;
+ phandle node;
+ int size, num_ents, i;
node = prom_searchsiblings(prom_getchild(prom_root_node), "memory");
size = prom_getproperty(node, "available", (char *) reg, sizeof(reg));
diff --git a/arch/sparc/prom/misc_64.c b/arch/sparc/prom/misc_64.c
index 6cb1581d6aef..d24bc44e361e 100644
--- a/arch/sparc/prom/misc_64.c
+++ b/arch/sparc/prom/misc_64.c
@@ -183,7 +183,8 @@ unsigned char prom_get_idprom(char *idbuf, int num_bytes)
int prom_get_mmu_ihandle(void)
{
- int node, ret;
+ phandle node;
+ int ret;
if (prom_mmu_ihandle_cache != 0)
return prom_mmu_ihandle_cache;
@@ -201,7 +202,8 @@ int prom_get_mmu_ihandle(void)
static int prom_get_memory_ihandle(void)
{
static int memory_ihandle_cache;
- int node, ret;
+ phandle node;
+ int ret;
if (memory_ihandle_cache != 0)
return memory_ihandle_cache;
diff --git a/arch/sparc/prom/ranges.c b/arch/sparc/prom/ranges.c
index aeff43e44e45..541fc829c207 100644
--- a/arch/sparc/prom/ranges.c
+++ b/arch/sparc/prom/ranges.c
@@ -68,7 +68,7 @@ EXPORT_SYMBOL(prom_apply_obio_ranges);
void __init prom_ranges_init(void)
{
- int node, obio_node;
+ phandle node, obio_node;
int success;
num_obio_ranges = 0;
@@ -89,8 +89,8 @@ void __init prom_ranges_init(void)
prom_printf("PROMLIB: obio_ranges %d\n", num_obio_ranges);
}
-void
-prom_apply_generic_ranges (int node, int parent, struct linux_prom_registers *regs, int nregs)
+void prom_apply_generic_ranges(phandle node, phandle parent,
+ struct linux_prom_registers *regs, int nregs)
{
int success;
int num_ranges;
diff --git a/arch/sparc/prom/tree_32.c b/arch/sparc/prom/tree_32.c
index b21592f8e3fe..63e08e149774 100644
--- a/arch/sparc/prom/tree_32.c
+++ b/arch/sparc/prom/tree_32.c
@@ -20,10 +20,10 @@ extern void restore_current(void);
static char promlib_buf[128];
/* Internal version of prom_getchild that does not alter return values. */
-int __prom_getchild(int node)
+phandle __prom_getchild(phandle node)
{
unsigned long flags;
- int cnode;
+ phandle cnode;
spin_lock_irqsave(&prom_lock, flags);
cnode = prom_nodeops->no_child(node);
@@ -36,9 +36,9 @@ int __prom_getchild(int node)
/* Return the child of node 'node' or zero if no this node has no
* direct descendent.
*/
-int prom_getchild(int node)
+phandle prom_getchild(phandle node)
{
- int cnode;
+ phandle cnode;
if (node == -1)
return 0;
@@ -52,10 +52,10 @@ int prom_getchild(int node)
EXPORT_SYMBOL(prom_getchild);
/* Internal version of prom_getsibling that does not alter return values. */
-int __prom_getsibling(int node)
+phandle __prom_getsibling(phandle node)
{
unsigned long flags;
- int cnode;
+ phandle cnode;
spin_lock_irqsave(&prom_lock, flags);
cnode = prom_nodeops->no_nextnode(node);
@@ -68,9 +68,9 @@ int __prom_getsibling(int node)
/* Return the next sibling of node 'node' or zero if no more siblings
* at this level of depth in the tree.
*/
-int prom_getsibling(int node)
+phandle prom_getsibling(phandle node)
{
- int sibnode;
+ phandle sibnode;
if (node == -1)
return 0;
@@ -86,7 +86,7 @@ EXPORT_SYMBOL(prom_getsibling);
/* Return the length in bytes of property 'prop' at node 'node'.
* Return -1 on error.
*/
-int prom_getproplen(int node, const char *prop)
+int prom_getproplen(phandle node, const char *prop)
{
int ret;
unsigned long flags;
@@ -106,7 +106,7 @@ EXPORT_SYMBOL(prom_getproplen);
* 'buffer' which has a size of 'bufsize'. If the acquisition
* was successful the length will be returned, else -1 is returned.
*/
-int prom_getproperty(int node, const char *prop, char *buffer, int bufsize)
+int prom_getproperty(phandle node, const char *prop, char *buffer, int bufsize)
{
int plen, ret;
unsigned long flags;
@@ -126,7 +126,7 @@ EXPORT_SYMBOL(prom_getproperty);
/* Acquire an integer property and return its value. Returns -1
* on failure.
*/
-int prom_getint(int node, char *prop)
+int prom_getint(phandle node, char *prop)
{
static int intprop;
@@ -140,7 +140,7 @@ EXPORT_SYMBOL(prom_getint);
/* Acquire an integer property, upon error return the passed default
* integer.
*/
-int prom_getintdefault(int node, char *property, int deflt)
+int prom_getintdefault(phandle node, char *property, int deflt)
{
int retval;
@@ -152,7 +152,7 @@ int prom_getintdefault(int node, char *property, int deflt)
EXPORT_SYMBOL(prom_getintdefault);
/* Acquire a boolean property, 1=TRUE 0=FALSE. */
-int prom_getbool(int node, char *prop)
+int prom_getbool(phandle node, char *prop)
{
int retval;
@@ -166,7 +166,7 @@ EXPORT_SYMBOL(prom_getbool);
* string on error. The char pointer is the user supplied string
* buffer.
*/
-void prom_getstring(int node, char *prop, char *user_buf, int ubuf_size)
+void prom_getstring(phandle node, char *prop, char *user_buf, int ubuf_size)
{
int len;
@@ -180,7 +180,7 @@ EXPORT_SYMBOL(prom_getstring);
/* Does the device at node 'node' have name 'name'?
* YES = 1 NO = 0
*/
-int prom_nodematch(int node, char *name)
+int prom_nodematch(phandle node, char *name)
{
int error;
@@ -194,10 +194,11 @@ int prom_nodematch(int node, char *name)
/* Search siblings at 'node_start' for a node with name
* 'nodename'. Return node if successful, zero if not.
*/
-int prom_searchsiblings(int node_start, char *nodename)
+phandle prom_searchsiblings(phandle node_start, char *nodename)
{
- int thisnode, error;
+ phandle thisnode;
+ int error;
for(thisnode = node_start; thisnode;
thisnode=prom_getsibling(thisnode)) {
@@ -213,7 +214,7 @@ int prom_searchsiblings(int node_start, char *nodename)
EXPORT_SYMBOL(prom_searchsiblings);
/* Interal version of nextprop that does not alter return values. */
-char * __prom_nextprop(int node, char * oprop)
+char *__prom_nextprop(phandle node, char * oprop)
{
unsigned long flags;
char *prop;
@@ -228,7 +229,7 @@ char * __prom_nextprop(int node, char * oprop)
/* Return the first property name for node 'node'. */
/* buffer is unused argument, but as v9 uses it, we need to have the same interface */
-char * prom_firstprop(int node, char *bufer)
+char *prom_firstprop(phandle node, char *bufer)
{
if (node == 0 || node == -1)
return "";
@@ -241,7 +242,7 @@ EXPORT_SYMBOL(prom_firstprop);
* at node 'node' . Returns empty string if no more
* property types for this node.
*/
-char * prom_nextprop(int node, char *oprop, char *buffer)
+char *prom_nextprop(phandle node, char *oprop, char *buffer)
{
if (node == 0 || node == -1)
return "";
@@ -250,11 +251,11 @@ char * prom_nextprop(int node, char *oprop, char *buffer)
}
EXPORT_SYMBOL(prom_nextprop);
-int prom_finddevice(char *name)
+phandle prom_finddevice(char *name)
{
char nbuf[128];
char *s = name, *d;
- int node = prom_root_node, node2;
+ phandle node = prom_root_node, node2;
unsigned int which_io, phys_addr;
struct linux_prom_registers reg[PROMREG_MAX];
@@ -298,7 +299,7 @@ int prom_finddevice(char *name)
}
EXPORT_SYMBOL(prom_finddevice);
-int prom_node_has_property(int node, char *prop)
+int prom_node_has_property(phandle node, char *prop)
{
char *current_property = "";
@@ -314,7 +315,7 @@ EXPORT_SYMBOL(prom_node_has_property);
/* Set property 'pname' at node 'node' to value 'value' which has a length
* of 'size' bytes. Return the number of bytes the prom accepted.
*/
-int prom_setprop(int node, const char *pname, char *value, int size)
+int prom_setprop(phandle node, const char *pname, char *value, int size)
{
unsigned long flags;
int ret;
@@ -329,9 +330,9 @@ int prom_setprop(int node, const char *pname, char *value, int size)
}
EXPORT_SYMBOL(prom_setprop);
-int prom_inst2pkg(int inst)
+phandle prom_inst2pkg(int inst)
{
- int node;
+ phandle node;
unsigned long flags;
spin_lock_irqsave(&prom_lock, flags);
@@ -345,9 +346,10 @@ int prom_inst2pkg(int inst)
/* Return 'node' assigned to a particular prom 'path'
* FIXME: Should work for v0 as well
*/
-int prom_pathtoinode(char *path)
+phandle prom_pathtoinode(char *path)
{
- int node, inst;
+ phandle node;
+ int inst;
inst = prom_devopen (path);
if (inst == -1) return 0;
diff --git a/arch/sparc/prom/tree_64.c b/arch/sparc/prom/tree_64.c
index 9d3f9137a43a..691be68932f8 100644
--- a/arch/sparc/prom/tree_64.c
+++ b/arch/sparc/prom/tree_64.c
@@ -16,7 +16,7 @@
#include <asm/oplib.h>
#include <asm/ldc.h>
-static int prom_node_to_node(const char *type, int node)
+static phandle prom_node_to_node(const char *type, phandle node)
{
unsigned long args[5];
@@ -28,20 +28,20 @@ static int prom_node_to_node(const char *type, int node)
p1275_cmd_direct(args);
- return (int) args[4];
+ return (phandle) args[4];
}
/* Return the child of node 'node' or zero if no this node has no
* direct descendent.
*/
-inline int __prom_getchild(int node)
+inline phandle __prom_getchild(phandle node)
{
return prom_node_to_node("child", node);
}
-inline int prom_getchild(int node)
+inline phandle prom_getchild(phandle node)
{
- int cnode;
+ phandle cnode;
if (node == -1)
return 0;
@@ -52,9 +52,9 @@ inline int prom_getchild(int node)
}
EXPORT_SYMBOL(prom_getchild);
-inline int prom_getparent(int node)
+inline phandle prom_getparent(phandle node)
{
- int cnode;
+ phandle cnode;
if (node == -1)
return 0;
@@ -67,14 +67,14 @@ inline int prom_getparent(int node)
/* Return the next sibling of node 'node' or zero if no more siblings
* at this level of depth in the tree.
*/
-inline int __prom_getsibling(int node)
+inline phandle __prom_getsibling(phandle node)
{
return prom_node_to_node(prom_peer_name, node);
}
-inline int prom_getsibling(int node)
+inline phandle prom_getsibling(phandle node)
{
- int sibnode;
+ phandle sibnode;
if (node == -1)
return 0;
@@ -89,7 +89,7 @@ EXPORT_SYMBOL(prom_getsibling);
/* Return the length in bytes of property 'prop' at node 'node'.
* Return -1 on error.
*/
-inline int prom_getproplen(int node, const char *prop)
+inline int prom_getproplen(phandle node, const char *prop)
{
unsigned long args[6];
@@ -113,7 +113,7 @@ EXPORT_SYMBOL(prom_getproplen);
* 'buffer' which has a size of 'bufsize'. If the acquisition
* was successful the length will be returned, else -1 is returned.
*/
-inline int prom_getproperty(int node, const char *prop,
+inline int prom_getproperty(phandle node, const char *prop,
char *buffer, int bufsize)
{
unsigned long args[8];
@@ -141,7 +141,7 @@ EXPORT_SYMBOL(prom_getproperty);
/* Acquire an integer property and return its value. Returns -1
* on failure.
*/
-inline int prom_getint(int node, const char *prop)
+inline int prom_getint(phandle node, const char *prop)
{
int intprop;
@@ -156,7 +156,7 @@ EXPORT_SYMBOL(prom_getint);
* integer.
*/
-int prom_getintdefault(int node, const char *property, int deflt)
+int prom_getintdefault(phandle node, const char *property, int deflt)
{
int retval;
@@ -169,7 +169,7 @@ int prom_getintdefault(int node, const char *property, int deflt)
EXPORT_SYMBOL(prom_getintdefault);
/* Acquire a boolean property, 1=TRUE 0=FALSE. */
-int prom_getbool(int node, const char *prop)
+int prom_getbool(phandle node, const char *prop)
{
int retval;
@@ -184,7 +184,8 @@ EXPORT_SYMBOL(prom_getbool);
* string on error. The char pointer is the user supplied string
* buffer.
*/
-void prom_getstring(int node, const char *prop, char *user_buf, int ubuf_size)
+void prom_getstring(phandle node, const char *prop, char *user_buf,
+ int ubuf_size)
{
int len;
@@ -198,7 +199,7 @@ EXPORT_SYMBOL(prom_getstring);
/* Does the device at node 'node' have name 'name'?
* YES = 1 NO = 0
*/
-int prom_nodematch(int node, const char *name)
+int prom_nodematch(phandle node, const char *name)
{
char namebuf[128];
prom_getproperty(node, "name", namebuf, sizeof(namebuf));
@@ -210,10 +211,10 @@ int prom_nodematch(int node, const char *name)
/* Search siblings at 'node_start' for a node with name
* 'nodename'. Return node if successful, zero if not.
*/
-int prom_searchsiblings(int node_start, const char *nodename)
+phandle prom_searchsiblings(phandle node_start, const char *nodename)
{
-
- int thisnode, error;
+ phandle thisnode;
+ int error;
char promlib_buf[128];
for(thisnode = node_start; thisnode;
@@ -234,7 +235,7 @@ static const char *prom_nextprop_name = "nextprop";
/* Return the first property type for node 'node'.
* buffer should be at least 32B in length
*/
-inline char *prom_firstprop(int node, char *buffer)
+inline char *prom_firstprop(phandle node, char *buffer)
{
unsigned long args[7];
@@ -260,7 +261,7 @@ EXPORT_SYMBOL(prom_firstprop);
* at node 'node' . Returns NULL string if no more
* property types for this node.
*/
-inline char *prom_nextprop(int node, const char *oprop, char *buffer)
+inline char *prom_nextprop(phandle node, const char *oprop, char *buffer)
{
unsigned long args[7];
char buf[32];
@@ -288,8 +289,7 @@ inline char *prom_nextprop(int node, const char *oprop, char *buffer)
}
EXPORT_SYMBOL(prom_nextprop);
-int
-prom_finddevice(const char *name)
+phandle prom_finddevice(const char *name)
{
unsigned long args[5];
@@ -307,7 +307,7 @@ prom_finddevice(const char *name)
}
EXPORT_SYMBOL(prom_finddevice);
-int prom_node_has_property(int node, const char *prop)
+int prom_node_has_property(phandle node, const char *prop)
{
char buf [32];
@@ -325,7 +325,7 @@ EXPORT_SYMBOL(prom_node_has_property);
* of 'size' bytes. Return the number of bytes the prom accepted.
*/
int
-prom_setprop(int node, const char *pname, char *value, int size)
+prom_setprop(phandle node, const char *pname, char *value, int size)
{
unsigned long args[8];
@@ -355,10 +355,10 @@ prom_setprop(int node, const char *pname, char *value, int size)
}
EXPORT_SYMBOL(prom_setprop);
-inline int prom_inst2pkg(int inst)
+inline phandle prom_inst2pkg(int inst)
{
unsigned long args[5];
- int node;
+ phandle node;
args[0] = (unsigned long) "instance-to-package";
args[1] = 1;
@@ -377,10 +377,10 @@ inline int prom_inst2pkg(int inst)
/* Return 'node' assigned to a particular prom 'path'
* FIXME: Should work for v0 as well
*/
-int
-prom_pathtoinode(const char *path)
+phandle prom_pathtoinode(const char *path)
{
- int node, inst;
+ phandle node;
+ int inst;
inst = prom_devopen (path);
if (inst == 0)
diff --git a/arch/tile/Kconfig b/arch/tile/Kconfig
index 1eb308cb711a..e11b5fcb70eb 100644
--- a/arch/tile/Kconfig
+++ b/arch/tile/Kconfig
@@ -58,6 +58,9 @@ config ARCH_SUPPORTS_OPTIMIZED_INLINING
config ARCH_PHYS_ADDR_T_64BIT
def_bool y
+config ARCH_DMA_ADDR_T_64BIT
+ def_bool y
+
config LOCKDEP_SUPPORT
def_bool y
@@ -96,6 +99,7 @@ config HVC_TILE
config TILE
def_bool y
+ select HAVE_KVM if !TILEGX
select GENERIC_FIND_FIRST_BIT
select GENERIC_FIND_NEXT_BIT
select USE_GENERIC_SMP_HELPERS
@@ -113,8 +117,6 @@ config TILE
# config HUGETLB_PAGE_SIZE_VARIABLE
-mainmenu "Linux/TILE Kernel Configuration"
-
# Please note: TILE-Gx support is not yet finalized; this is
# the preliminary support. TILE-Gx drivers are only provided
# with the alpha or beta test versions for Tilera customers.
@@ -236,9 +238,9 @@ choice
If you are not absolutely sure what you are doing, leave this
option alone!
- config VMSPLIT_375G
+ config VMSPLIT_3_75G
bool "3.75G/0.25G user/kernel split (no kernel networking)"
- config VMSPLIT_35G
+ config VMSPLIT_3_5G
bool "3.5G/0.5G user/kernel split"
config VMSPLIT_3G
bool "3G/1G user/kernel split"
@@ -252,8 +254,8 @@ endchoice
config PAGE_OFFSET
hex
- default 0xF0000000 if VMSPLIT_375G
- default 0xE0000000 if VMSPLIT_35G
+ default 0xF0000000 if VMSPLIT_3_75G
+ default 0xE0000000 if VMSPLIT_3_5G
default 0xB0000000 if VMSPLIT_3G_OPT
default 0x80000000 if VMSPLIT_2G
default 0x40000000 if VMSPLIT_1G
@@ -314,10 +316,31 @@ config HARDWALL
bool "Hardwall support to allow access to user dynamic network"
default y
+config KERNEL_PL
+ int "Processor protection level for kernel"
+ range 1 2
+ default "1"
+ ---help---
+ This setting determines the processor protection level the
+ kernel will be built to run at. Generally you should use
+ the default value here.
+
endmenu # Tilera-specific configuration
menu "Bus options"
+config PCI
+ bool "PCI support"
+ default y
+ select PCI_DOMAINS
+ ---help---
+ Enable PCI root complex support, so PCIe endpoint devices can
+ be attached to the Tile chip. Many, but not all, PCI devices
+ are supported under Tilera's root complex driver.
+
+config PCI_DOMAINS
+ bool
+
config NO_IOMEM
def_bool !PCI
@@ -354,3 +377,5 @@ source "security/Kconfig"
source "crypto/Kconfig"
source "lib/Kconfig"
+
+source "arch/tile/kvm/Kconfig"
diff --git a/arch/tile/Makefile b/arch/tile/Makefile
index fd8f6bb5face..17acce70569b 100644
--- a/arch/tile/Makefile
+++ b/arch/tile/Makefile
@@ -26,8 +26,9 @@ $(error Set TILERA_ROOT or CROSS_COMPILE when building $(ARCH) on $(HOST_ARCH))
endif
endif
-
+ifneq ($(CONFIG_DEBUG_EXTRA_FLAGS),"")
KBUILD_CFLAGS += $(CONFIG_DEBUG_EXTRA_FLAGS)
+endif
LIBGCC_PATH := $(shell $(CC) $(KBUILD_CFLAGS) -print-libgcc-file-name)
@@ -49,6 +50,20 @@ head-y := arch/tile/kernel/head_$(BITS).o
libs-y += arch/tile/lib/
libs-y += $(LIBGCC_PATH)
-
# See arch/tile/Kbuild for content of core part of the kernel
core-y += arch/tile/
+
+core-$(CONFIG_KVM) += arch/tile/kvm/
+
+ifdef TILERA_ROOT
+INSTALL_PATH ?= $(TILERA_ROOT)/tile/boot
+endif
+
+install:
+ install -D -m 755 vmlinux $(INSTALL_PATH)/vmlinux-$(KERNELRELEASE)
+ install -D -m 644 .config $(INSTALL_PATH)/config-$(KERNELRELEASE)
+ install -D -m 644 System.map $(INSTALL_PATH)/System.map-$(KERNELRELEASE)
+
+define archhelp
+ echo ' install - install kernel into $(INSTALL_PATH)'
+endef
diff --git a/arch/tile/include/arch/sim.h b/arch/tile/include/arch/sim.h
new file mode 100644
index 000000000000..74b7c1624d34
--- /dev/null
+++ b/arch/tile/include/arch/sim.h
@@ -0,0 +1,619 @@
+/*
+ * Copyright 2010 Tilera Corporation. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation, version 2.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ * NON INFRINGEMENT. See the GNU General Public License for
+ * more details.
+ */
+
+/**
+ * @file
+ *
+ * Provides an API for controlling the simulator at runtime.
+ */
+
+/**
+ * @addtogroup arch_sim
+ * @{
+ *
+ * An API for controlling the simulator at runtime.
+ *
+ * The simulator's behavior can be modified while it is running.
+ * For example, human-readable trace output can be enabled and disabled
+ * around code of interest.
+ *
+ * There are two ways to modify simulator behavior:
+ * programmatically, by calling various sim_* functions, and
+ * interactively, by entering commands like "sim set functional true"
+ * at the tile-monitor prompt. Typing "sim help" at that prompt provides
+ * a list of interactive commands.
+ *
+ * All interactive commands can also be executed programmatically by
+ * passing a string to the sim_command function.
+ */
+
+#ifndef __ARCH_SIM_H__
+#define __ARCH_SIM_H__
+
+#include <arch/sim_def.h>
+#include <arch/abi.h>
+
+#ifndef __ASSEMBLER__
+
+#include <arch/spr_def.h>
+
+
+/**
+ * Return true if the current program is running under a simulator,
+ * rather than on real hardware. If running on hardware, other "sim_xxx()"
+ * calls have no useful effect.
+ */
+static inline int
+sim_is_simulator(void)
+{
+ return __insn_mfspr(SPR_SIM_CONTROL) != 0;
+}
+
+
+/**
+ * Checkpoint the simulator state to a checkpoint file.
+ *
+ * The checkpoint file name is either the default or the name specified
+ * on the command line with "--checkpoint-file".
+ */
+static __inline void
+sim_checkpoint(void)
+{
+ __insn_mtspr(SPR_SIM_CONTROL, SIM_CONTROL_CHECKPOINT);
+}
+
+
+/**
+ * Report whether or not various kinds of simulator tracing are enabled.
+ *
+ * @return The bitwise OR of these values:
+ *
+ * SIM_TRACE_CYCLES (--trace-cycles),
+ * SIM_TRACE_ROUTER (--trace-router),
+ * SIM_TRACE_REGISTER_WRITES (--trace-register-writes),
+ * SIM_TRACE_DISASM (--trace-disasm),
+ * SIM_TRACE_STALL_INFO (--trace-stall-info)
+ * SIM_TRACE_MEMORY_CONTROLLER (--trace-memory-controller)
+ * SIM_TRACE_L2_CACHE (--trace-l2)
+ * SIM_TRACE_LINES (--trace-lines)
+ */
+static __inline unsigned int
+sim_get_tracing(void)
+{
+ return __insn_mfspr(SPR_SIM_CONTROL) & SIM_TRACE_FLAG_MASK;
+}
+
+
+/**
+ * Turn on or off different kinds of simulator tracing.
+ *
+ * @param mask Either one of these special values:
+ *
+ * SIM_TRACE_NONE (turns off tracing),
+ * SIM_TRACE_ALL (turns on all possible tracing).
+ *
+ * or the bitwise OR of these values:
+ *
+ * SIM_TRACE_CYCLES (--trace-cycles),
+ * SIM_TRACE_ROUTER (--trace-router),
+ * SIM_TRACE_REGISTER_WRITES (--trace-register-writes),
+ * SIM_TRACE_DISASM (--trace-disasm),
+ * SIM_TRACE_STALL_INFO (--trace-stall-info)
+ * SIM_TRACE_MEMORY_CONTROLLER (--trace-memory-controller)
+ * SIM_TRACE_L2_CACHE (--trace-l2)
+ * SIM_TRACE_LINES (--trace-lines)
+ */
+static __inline void
+sim_set_tracing(unsigned int mask)
+{
+ __insn_mtspr(SPR_SIM_CONTROL, SIM_TRACE_SPR_ARG(mask));
+}
+
+
+/**
+ * Request dumping of different kinds of simulator state.
+ *
+ * @param mask Either this special value:
+ *
+ * SIM_DUMP_ALL (dump all known state)
+ *
+ * or the bitwise OR of these values:
+ *
+ * SIM_DUMP_REGS (the register file),
+ * SIM_DUMP_SPRS (the SPRs),
+ * SIM_DUMP_ITLB (the iTLB),
+ * SIM_DUMP_DTLB (the dTLB),
+ * SIM_DUMP_L1I (the L1 I-cache),
+ * SIM_DUMP_L1D (the L1 D-cache),
+ * SIM_DUMP_L2 (the L2 cache),
+ * SIM_DUMP_SNREGS (the switch register file),
+ * SIM_DUMP_SNITLB (the switch iTLB),
+ * SIM_DUMP_SNL1I (the switch L1 I-cache),
+ * SIM_DUMP_BACKTRACE (the current backtrace)
+ */
+static __inline void
+sim_dump(unsigned int mask)
+{
+ __insn_mtspr(SPR_SIM_CONTROL, SIM_DUMP_SPR_ARG(mask));
+}
+
+
+/**
+ * Print a string to the simulator stdout.
+ *
+ * @param str The string to be written; a newline is automatically added.
+ */
+static __inline void
+sim_print_string(const char* str)
+{
+ int i;
+ for (i = 0; str[i] != 0; i++)
+ {
+ __insn_mtspr(SPR_SIM_CONTROL, SIM_CONTROL_PUTC |
+ (str[i] << _SIM_CONTROL_OPERATOR_BITS));
+ }
+ __insn_mtspr(SPR_SIM_CONTROL, SIM_CONTROL_PUTC |
+ (SIM_PUTC_FLUSH_STRING << _SIM_CONTROL_OPERATOR_BITS));
+}
+
+
+/**
+ * Execute a simulator command string.
+ *
+ * Type 'sim help' at the tile-monitor prompt to learn what commands
+ * are available. Note the use of the tile-monitor "sim" command to
+ * pass commands to the simulator.
+ *
+ * The argument to sim_command() does not include the leading "sim"
+ * prefix used at the tile-monitor prompt; for example, you might call
+ * sim_command("trace disasm").
+ */
+static __inline void
+sim_command(const char* str)
+{
+ int c;
+ do
+ {
+ c = *str++;
+ __insn_mtspr(SPR_SIM_CONTROL, SIM_CONTROL_COMMAND |
+ (c << _SIM_CONTROL_OPERATOR_BITS));
+ }
+ while (c);
+}
+
+
+
+#ifndef __DOXYGEN__
+
+/**
+ * The underlying implementation of "_sim_syscall()".
+ *
+ * We use extra "and" instructions to ensure that all the values
+ * we are passing to the simulator are actually valid in the registers
+ * (i.e. returned from memory) prior to the SIM_CONTROL spr.
+ */
+static __inline int _sim_syscall0(int val)
+{
+ long result;
+ __asm__ __volatile__ ("mtspr SIM_CONTROL, r0"
+ : "=R00" (result) : "R00" (val));
+ return result;
+}
+
+static __inline int _sim_syscall1(int val, long arg1)
+{
+ long result;
+ __asm__ __volatile__ ("{ and zero, r1, r1; mtspr SIM_CONTROL, r0 }"
+ : "=R00" (result) : "R00" (val), "R01" (arg1));
+ return result;
+}
+
+static __inline int _sim_syscall2(int val, long arg1, long arg2)
+{
+ long result;
+ __asm__ __volatile__ ("{ and zero, r1, r2; mtspr SIM_CONTROL, r0 }"
+ : "=R00" (result)
+ : "R00" (val), "R01" (arg1), "R02" (arg2));
+ return result;
+}
+
+/* Note that _sim_syscall3() and higher are technically at risk of
+ receiving an interrupt right before the mtspr bundle, in which case
+ the register values for arguments 3 and up may still be in flight
+ to the core from a stack frame reload. */
+
+static __inline int _sim_syscall3(int val, long arg1, long arg2, long arg3)
+{
+ long result;
+ __asm__ __volatile__ ("{ and zero, r3, r3 };"
+ "{ and zero, r1, r2; mtspr SIM_CONTROL, r0 }"
+ : "=R00" (result)
+ : "R00" (val), "R01" (arg1), "R02" (arg2),
+ "R03" (arg3));
+ return result;
+}
+
+static __inline int _sim_syscall4(int val, long arg1, long arg2, long arg3,
+ long arg4)
+{
+ long result;
+ __asm__ __volatile__ ("{ and zero, r3, r4 };"
+ "{ and zero, r1, r2; mtspr SIM_CONTROL, r0 }"
+ : "=R00" (result)
+ : "R00" (val), "R01" (arg1), "R02" (arg2),
+ "R03" (arg3), "R04" (arg4));
+ return result;
+}
+
+static __inline int _sim_syscall5(int val, long arg1, long arg2, long arg3,
+ long arg4, long arg5)
+{
+ long result;
+ __asm__ __volatile__ ("{ and zero, r3, r4; and zero, r5, r5 };"
+ "{ and zero, r1, r2; mtspr SIM_CONTROL, r0 }"
+ : "=R00" (result)
+ : "R00" (val), "R01" (arg1), "R02" (arg2),
+ "R03" (arg3), "R04" (arg4), "R05" (arg5));
+ return result;
+}
+
+
+/**
+ * Make a special syscall to the simulator itself, if running under
+ * simulation. This is used as the implementation of other functions
+ * and should not be used outside this file.
+ *
+ * @param syscall_num The simulator syscall number.
+ * @param nr The number of additional arguments provided.
+ *
+ * @return Varies by syscall.
+ */
+#define _sim_syscall(syscall_num, nr, args...) \
+ _sim_syscall##nr( \
+ ((syscall_num) << _SIM_CONTROL_OPERATOR_BITS) | SIM_CONTROL_SYSCALL, args)
+
+
+/* Values for the "access_mask" parameters below. */
+#define SIM_WATCHPOINT_READ 1
+#define SIM_WATCHPOINT_WRITE 2
+#define SIM_WATCHPOINT_EXECUTE 4
+
+
+static __inline int
+sim_add_watchpoint(unsigned int process_id,
+ unsigned long address,
+ unsigned long size,
+ unsigned int access_mask,
+ unsigned long user_data)
+{
+ return _sim_syscall(SIM_SYSCALL_ADD_WATCHPOINT, 5, process_id,
+ address, size, access_mask, user_data);
+}
+
+
+static __inline int
+sim_remove_watchpoint(unsigned int process_id,
+ unsigned long address,
+ unsigned long size,
+ unsigned int access_mask,
+ unsigned long user_data)
+{
+ return _sim_syscall(SIM_SYSCALL_REMOVE_WATCHPOINT, 5, process_id,
+ address, size, access_mask, user_data);
+}
+
+
+/**
+ * Return value from sim_query_watchpoint.
+ */
+struct SimQueryWatchpointStatus
+{
+ /**
+ * 0 if a watchpoint fired, 1 if no watchpoint fired, or -1 for
+ * error (meaning a bad process_id).
+ */
+ int syscall_status;
+
+ /**
+ * The address of the watchpoint that fired (this is the address
+ * passed to sim_add_watchpoint, not an address within that range
+ * that actually triggered the watchpoint).
+ */
+ unsigned long address;
+
+ /** The arbitrary user_data installed by sim_add_watchpoint. */
+ unsigned long user_data;
+};
+
+
+static __inline struct SimQueryWatchpointStatus
+sim_query_watchpoint(unsigned int process_id)
+{
+ struct SimQueryWatchpointStatus status;
+ long val = SIM_CONTROL_SYSCALL |
+ (SIM_SYSCALL_QUERY_WATCHPOINT << _SIM_CONTROL_OPERATOR_BITS);
+ __asm__ __volatile__ ("{ and zero, r1, r1; mtspr SIM_CONTROL, r0 }"
+ : "=R00" (status.syscall_status),
+ "=R01" (status.address),
+ "=R02" (status.user_data)
+ : "R00" (val), "R01" (process_id));
+ return status;
+}
+
+
+/* On the simulator, confirm lines have been evicted everywhere. */
+static __inline void
+sim_validate_lines_evicted(unsigned long long pa, unsigned long length)
+{
+#ifdef __LP64__
+ _sim_syscall(SIM_SYSCALL_VALIDATE_LINES_EVICTED, 2, pa, length);
+#else
+ _sim_syscall(SIM_SYSCALL_VALIDATE_LINES_EVICTED, 4,
+ 0 /* dummy */, (long)(pa), (long)(pa >> 32), length);
+#endif
+}
+
+
+#endif /* !__DOXYGEN__ */
+
+
+
+
+/**
+ * Modify the shaping parameters of a shim.
+ *
+ * @param shim The shim to modify. One of:
+ * SIM_CONTROL_SHAPING_GBE_0
+ * SIM_CONTROL_SHAPING_GBE_1
+ * SIM_CONTROL_SHAPING_GBE_2
+ * SIM_CONTROL_SHAPING_GBE_3
+ * SIM_CONTROL_SHAPING_XGBE_0
+ * SIM_CONTROL_SHAPING_XGBE_1
+ *
+ * @param type The type of shaping. This should be the same type of
+ * shaping that is already in place on the shim. One of:
+ * SIM_CONTROL_SHAPING_MULTIPLIER
+ * SIM_CONTROL_SHAPING_PPS
+ * SIM_CONTROL_SHAPING_BPS
+ *
+ * @param units The magnitude of the rate. One of:
+ * SIM_CONTROL_SHAPING_UNITS_SINGLE
+ * SIM_CONTROL_SHAPING_UNITS_KILO
+ * SIM_CONTROL_SHAPING_UNITS_MEGA
+ * SIM_CONTROL_SHAPING_UNITS_GIGA
+ *
+ * @param rate The rate to which to change it. This must fit in
+ * SIM_CONTROL_SHAPING_RATE_BITS bits or a warning is issued and
+ * the shaping is not changed.
+ *
+ * @return 0 if no problems were detected in the arguments to sim_set_shaping
+ * or 1 if problems were detected (for example, rate does not fit in 17 bits).
+ */
+static __inline int
+sim_set_shaping(unsigned shim,
+ unsigned type,
+ unsigned units,
+ unsigned rate)
+{
+ if ((rate & ~((1 << SIM_CONTROL_SHAPING_RATE_BITS) - 1)) != 0)
+ return 1;
+
+ __insn_mtspr(SPR_SIM_CONTROL, SIM_SHAPING_SPR_ARG(shim, type, units, rate));
+ return 0;
+}
+
+#ifdef __tilegx__
+
+/** Enable a set of mPIPE links. Pass a -1 link_mask to enable all links. */
+static __inline void
+sim_enable_mpipe_links(unsigned mpipe, unsigned long link_mask)
+{
+ __insn_mtspr(SPR_SIM_CONTROL,
+ (SIM_CONTROL_ENABLE_MPIPE_LINK_MAGIC_BYTE |
+ (mpipe << 8) | (1 << 16) | ((uint_reg_t)link_mask << 32)));
+}
+
+/** Disable a set of mPIPE links. Pass a -1 link_mask to disable all links. */
+static __inline void
+sim_disable_mpipe_links(unsigned mpipe, unsigned long link_mask)
+{
+ __insn_mtspr(SPR_SIM_CONTROL,
+ (SIM_CONTROL_ENABLE_MPIPE_LINK_MAGIC_BYTE |
+ (mpipe << 8) | (0 << 16) | ((uint_reg_t)link_mask << 32)));
+}
+
+#endif /* __tilegx__ */
+
+
+/*
+ * An API for changing "functional" mode.
+ */
+
+#ifndef __DOXYGEN__
+
+#define sim_enable_functional() \
+ __insn_mtspr(SPR_SIM_CONTROL, SIM_CONTROL_ENABLE_FUNCTIONAL)
+
+#define sim_disable_functional() \
+ __insn_mtspr(SPR_SIM_CONTROL, SIM_CONTROL_DISABLE_FUNCTIONAL)
+
+#endif /* __DOXYGEN__ */
+
+
+/*
+ * Profiler support.
+ */
+
+/**
+ * Turn profiling on for the current task.
+ *
+ * Note that this has no effect if run in an environment without
+ * profiling support (thus, the proper flags to the simulator must
+ * be supplied).
+ */
+static __inline void
+sim_profiler_enable(void)
+{
+ __insn_mtspr(SPR_SIM_CONTROL, SIM_CONTROL_PROFILER_ENABLE);
+}
+
+
+/** Turn profiling off for the current task. */
+static __inline void
+sim_profiler_disable(void)
+{
+ __insn_mtspr(SPR_SIM_CONTROL, SIM_CONTROL_PROFILER_DISABLE);
+}
+
+
+/**
+ * Turn profiling on or off for the current task.
+ *
+ * @param enabled If true, turns on profiling. If false, turns it off.
+ *
+ * Note that this has no effect if run in an environment without
+ * profiling support (thus, the proper flags to the simulator must
+ * be supplied).
+ */
+static __inline void
+sim_profiler_set_enabled(int enabled)
+{
+ int val =
+ enabled ? SIM_CONTROL_PROFILER_ENABLE : SIM_CONTROL_PROFILER_DISABLE;
+ __insn_mtspr(SPR_SIM_CONTROL, val);
+}
+
+
+/**
+ * Return true if and only if profiling is currently enabled
+ * for the current task.
+ *
+ * This returns false even if sim_profiler_enable() was called
+ * if the current execution environment does not support profiling.
+ */
+static __inline int
+sim_profiler_is_enabled(void)
+{
+ return ((__insn_mfspr(SPR_SIM_CONTROL) & SIM_PROFILER_ENABLED_MASK) != 0);
+}
+
+
+/**
+ * Reset profiling counters to zero for the current task.
+ *
+ * Resetting can be done while profiling is enabled. It does not affect
+ * the chip-wide profiling counters.
+ */
+static __inline void
+sim_profiler_clear(void)
+{
+ __insn_mtspr(SPR_SIM_CONTROL, SIM_CONTROL_PROFILER_CLEAR);
+}
+
+
+/**
+ * Enable specified chip-level profiling counters.
+ *
+ * Does not affect the per-task profiling counters.
+ *
+ * @param mask Either this special value:
+ *
+ * SIM_CHIP_ALL (enables all chip-level components).
+ *
+ * or the bitwise OR of these values:
+ *
+ * SIM_CHIP_MEMCTL (enable all memory controllers)
+ * SIM_CHIP_XAUI (enable all XAUI controllers)
+ * SIM_CHIP_MPIPE (enable all MPIPE controllers)
+ */
+static __inline void
+sim_profiler_chip_enable(unsigned int mask)
+{
+ __insn_mtspr(SPR_SIM_CONTROL, SIM_PROFILER_CHIP_ENABLE_SPR_ARG(mask));
+}
+
+
+/**
+ * Disable specified chip-level profiling counters.
+ *
+ * Does not affect the per-task profiling counters.
+ *
+ * @param mask Either this special value:
+ *
+ * SIM_CHIP_ALL (disables all chip-level components).
+ *
+ * or the bitwise OR of these values:
+ *
+ * SIM_CHIP_MEMCTL (disable all memory controllers)
+ * SIM_CHIP_XAUI (disable all XAUI controllers)
+ * SIM_CHIP_MPIPE (disable all MPIPE controllers)
+ */
+static __inline void
+sim_profiler_chip_disable(unsigned int mask)
+{
+ __insn_mtspr(SPR_SIM_CONTROL, SIM_PROFILER_CHIP_DISABLE_SPR_ARG(mask));
+}
+
+
+/**
+ * Reset specified chip-level profiling counters to zero.
+ *
+ * Does not affect the per-task profiling counters.
+ *
+ * @param mask Either this special value:
+ *
+ * SIM_CHIP_ALL (clears all chip-level components).
+ *
+ * or the bitwise OR of these values:
+ *
+ * SIM_CHIP_MEMCTL (clear all memory controllers)
+ * SIM_CHIP_XAUI (clear all XAUI controllers)
+ * SIM_CHIP_MPIPE (clear all MPIPE controllers)
+ */
+static __inline void
+sim_profiler_chip_clear(unsigned int mask)
+{
+ __insn_mtspr(SPR_SIM_CONTROL, SIM_PROFILER_CHIP_CLEAR_SPR_ARG(mask));
+}
+
+
+/*
+ * Event support.
+ */
+
+#ifndef __DOXYGEN__
+
+static __inline void
+sim_event_begin(unsigned int x)
+{
+#if defined(__tile__) && !defined(__NO_EVENT_SPR__)
+ __insn_mtspr(SPR_EVENT_BEGIN, x);
+#endif
+}
+
+static __inline void
+sim_event_end(unsigned int x)
+{
+#if defined(__tile__) && !defined(__NO_EVENT_SPR__)
+ __insn_mtspr(SPR_EVENT_END, x);
+#endif
+}
+
+#endif /* !__DOXYGEN__ */
+
+#endif /* !__ASSEMBLER__ */
+
+#endif /* !__ARCH_SIM_H__ */
+
+/** @} */
diff --git a/arch/tile/include/arch/sim_def.h b/arch/tile/include/arch/sim_def.h
index 6418fbde063e..7a17082c3773 100644
--- a/arch/tile/include/arch/sim_def.h
+++ b/arch/tile/include/arch/sim_def.h
@@ -1,477 +1,461 @@
-// Copyright 2010 Tilera Corporation. All Rights Reserved.
-//
-// This program is free software; you can redistribute it and/or
-// modify it under the terms of the GNU General Public License
-// as published by the Free Software Foundation, version 2.
-//
-// This program is distributed in the hope that it will be useful, but
-// WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
-// NON INFRINGEMENT. See the GNU General Public License for
-// more details.
-
-//! @file
-//!
-//! Some low-level simulator definitions.
-//!
+/*
+ * Copyright 2010 Tilera Corporation. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation, version 2.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ * NON INFRINGEMENT. See the GNU General Public License for
+ * more details.
+ */
+
+/**
+ * @file
+ *
+ * Some low-level simulator definitions.
+ */
#ifndef __ARCH_SIM_DEF_H__
#define __ARCH_SIM_DEF_H__
-//! Internal: the low bits of the SIM_CONTROL_* SPR values specify
-//! the operation to perform, and the remaining bits are
-//! an operation-specific parameter (often unused).
-//!
+/**
+ * Internal: the low bits of the SIM_CONTROL_* SPR values specify
+ * the operation to perform, and the remaining bits are
+ * an operation-specific parameter (often unused).
+ */
#define _SIM_CONTROL_OPERATOR_BITS 8
-//== Values which can be written to SPR_SIM_CONTROL.
+/*
+ * Values which can be written to SPR_SIM_CONTROL.
+ */
-//! If written to SPR_SIM_CONTROL, stops profiling.
-//!
+/** If written to SPR_SIM_CONTROL, stops profiling. */
#define SIM_CONTROL_PROFILER_DISABLE 0
-//! If written to SPR_SIM_CONTROL, starts profiling.
-//!
+/** If written to SPR_SIM_CONTROL, starts profiling. */
#define SIM_CONTROL_PROFILER_ENABLE 1
-//! If written to SPR_SIM_CONTROL, clears profiling counters.
-//!
+/** If written to SPR_SIM_CONTROL, clears profiling counters. */
#define SIM_CONTROL_PROFILER_CLEAR 2
-//! If written to SPR_SIM_CONTROL, checkpoints the simulator.
-//!
+/** If written to SPR_SIM_CONTROL, checkpoints the simulator. */
#define SIM_CONTROL_CHECKPOINT 3
-//! If written to SPR_SIM_CONTROL, combined with a mask (shifted by 8),
-//! sets the tracing mask to the given mask. See "sim_set_tracing()".
-//!
+/**
+ * If written to SPR_SIM_CONTROL, combined with a mask (shifted by 8),
+ * sets the tracing mask to the given mask. See "sim_set_tracing()".
+ */
#define SIM_CONTROL_SET_TRACING 4
-//! If written to SPR_SIM_CONTROL, combined with a mask (shifted by 8),
-//! dumps the requested items of machine state to the log.
-//!
+/**
+ * If written to SPR_SIM_CONTROL, combined with a mask (shifted by 8),
+ * dumps the requested items of machine state to the log.
+ */
#define SIM_CONTROL_DUMP 5
-//! If written to SPR_SIM_CONTROL, clears chip-level profiling counters.
-//!
+/** If written to SPR_SIM_CONTROL, clears chip-level profiling counters. */
#define SIM_CONTROL_PROFILER_CHIP_CLEAR 6
-//! If written to SPR_SIM_CONTROL, disables chip-level profiling.
-//!
+/** If written to SPR_SIM_CONTROL, disables chip-level profiling. */
#define SIM_CONTROL_PROFILER_CHIP_DISABLE 7
-//! If written to SPR_SIM_CONTROL, enables chip-level profiling.
-//!
+/** If written to SPR_SIM_CONTROL, enables chip-level profiling. */
#define SIM_CONTROL_PROFILER_CHIP_ENABLE 8
-//! If written to SPR_SIM_CONTROL, enables chip-level functional mode
-//!
+/** If written to SPR_SIM_CONTROL, enables chip-level functional mode */
#define SIM_CONTROL_ENABLE_FUNCTIONAL 9
-//! If written to SPR_SIM_CONTROL, disables chip-level functional mode.
-//!
+/** If written to SPR_SIM_CONTROL, disables chip-level functional mode. */
#define SIM_CONTROL_DISABLE_FUNCTIONAL 10
-//! If written to SPR_SIM_CONTROL, enables chip-level functional mode.
-//! All tiles must perform this write for functional mode to be enabled.
-//! Ignored in naked boot mode unless --functional is specified.
-//! WARNING: Only the hypervisor startup code should use this!
-//!
+/**
+ * If written to SPR_SIM_CONTROL, enables chip-level functional mode.
+ * All tiles must perform this write for functional mode to be enabled.
+ * Ignored in naked boot mode unless --functional is specified.
+ * WARNING: Only the hypervisor startup code should use this!
+ */
#define SIM_CONTROL_ENABLE_FUNCTIONAL_BARRIER 11
-//! If written to SPR_SIM_CONTROL, combined with a character (shifted by 8),
-//! writes a string directly to the simulator output. Written to once for
-//! each character in the string, plus a final NUL. Instead of NUL,
-//! you can also use "SIM_PUTC_FLUSH_STRING" or "SIM_PUTC_FLUSH_BINARY".
-//!
-// ISSUE: Document the meaning of "newline", and the handling of NUL.
-//
+/**
+ * If written to SPR_SIM_CONTROL, combined with a character (shifted by 8),
+ * writes a string directly to the simulator output. Written to once for
+ * each character in the string, plus a final NUL. Instead of NUL,
+ * you can also use "SIM_PUTC_FLUSH_STRING" or "SIM_PUTC_FLUSH_BINARY".
+ */
+/* ISSUE: Document the meaning of "newline", and the handling of NUL. */
#define SIM_CONTROL_PUTC 12
-//! If written to SPR_SIM_CONTROL, clears the --grind-coherence state for
-//! this core. This is intended to be used before a loop that will
-//! invalidate the cache by loading new data and evicting all current data.
-//! Generally speaking, this API should only be used by system code.
-//!
+/**
+ * If written to SPR_SIM_CONTROL, clears the --grind-coherence state for
+ * this core. This is intended to be used before a loop that will
+ * invalidate the cache by loading new data and evicting all current data.
+ * Generally speaking, this API should only be used by system code.
+ */
#define SIM_CONTROL_GRINDER_CLEAR 13
-//! If written to SPR_SIM_CONTROL, shuts down the simulator.
-//!
+/** If written to SPR_SIM_CONTROL, shuts down the simulator. */
#define SIM_CONTROL_SHUTDOWN 14
-//! If written to SPR_SIM_CONTROL, combined with a pid (shifted by 8),
-//! indicates that a fork syscall just created the given process.
-//!
+/**
+ * If written to SPR_SIM_CONTROL, combined with a pid (shifted by 8),
+ * indicates that a fork syscall just created the given process.
+ */
#define SIM_CONTROL_OS_FORK 15
-//! If written to SPR_SIM_CONTROL, combined with a pid (shifted by 8),
-//! indicates that an exit syscall was just executed by the given process.
-//!
+/**
+ * If written to SPR_SIM_CONTROL, combined with a pid (shifted by 8),
+ * indicates that an exit syscall was just executed by the given process.
+ */
#define SIM_CONTROL_OS_EXIT 16
-//! If written to SPR_SIM_CONTROL, combined with a pid (shifted by 8),
-//! indicates that the OS just switched to the given process.
-//!
+/**
+ * If written to SPR_SIM_CONTROL, combined with a pid (shifted by 8),
+ * indicates that the OS just switched to the given process.
+ */
#define SIM_CONTROL_OS_SWITCH 17
-//! If written to SPR_SIM_CONTROL, combined with a character (shifted by 8),
-//! indicates that an exec syscall was just executed. Written to once for
-//! each character in the executable name, plus a final NUL.
-//!
+/**
+ * If written to SPR_SIM_CONTROL, combined with a character (shifted by 8),
+ * indicates that an exec syscall was just executed. Written to once for
+ * each character in the executable name, plus a final NUL.
+ */
#define SIM_CONTROL_OS_EXEC 18
-//! If written to SPR_SIM_CONTROL, combined with a character (shifted by 8),
-//! indicates that an interpreter (PT_INTERP) was loaded. Written to once
-//! for each character in "ADDR:PATH", plus a final NUL, where "ADDR" is a
-//! hex load address starting with "0x", and "PATH" is the executable name.
-//!
+/**
+ * If written to SPR_SIM_CONTROL, combined with a character (shifted by 8),
+ * indicates that an interpreter (PT_INTERP) was loaded. Written to once
+ * for each character in "ADDR:PATH", plus a final NUL, where "ADDR" is a
+ * hex load address starting with "0x", and "PATH" is the executable name.
+ */
#define SIM_CONTROL_OS_INTERP 19
-//! If written to SPR_SIM_CONTROL, combined with a character (shifted by 8),
-//! indicates that a dll was loaded. Written to once for each character
-//! in "ADDR:PATH", plus a final NUL, where "ADDR" is a hexadecimal load
-//! address starting with "0x", and "PATH" is the executable name.
-//!
+/**
+ * If written to SPR_SIM_CONTROL, combined with a character (shifted by 8),
+ * indicates that a dll was loaded. Written to once for each character
+ * in "ADDR:PATH", plus a final NUL, where "ADDR" is a hexadecimal load
+ * address starting with "0x", and "PATH" is the executable name.
+ */
#define SIM_CONTROL_DLOPEN 20
-//! If written to SPR_SIM_CONTROL, combined with a character (shifted by 8),
-//! indicates that a dll was unloaded. Written to once for each character
-//! in "ADDR", plus a final NUL, where "ADDR" is a hexadecimal load
-//! address starting with "0x".
-//!
+/**
+ * If written to SPR_SIM_CONTROL, combined with a character (shifted by 8),
+ * indicates that a dll was unloaded. Written to once for each character
+ * in "ADDR", plus a final NUL, where "ADDR" is a hexadecimal load
+ * address starting with "0x".
+ */
#define SIM_CONTROL_DLCLOSE 21
-//! If written to SPR_SIM_CONTROL, combined with a flag (shifted by 8),
-//! indicates whether to allow data reads to remotely-cached
-//! dirty cache lines to be cached locally without grinder warnings or
-//! assertions (used by Linux kernel fast memcpy).
-//!
+/**
+ * If written to SPR_SIM_CONTROL, combined with a flag (shifted by 8),
+ * indicates whether to allow data reads to remotely-cached
+ * dirty cache lines to be cached locally without grinder warnings or
+ * assertions (used by Linux kernel fast memcpy).
+ */
#define SIM_CONTROL_ALLOW_MULTIPLE_CACHING 22
-//! If written to SPR_SIM_CONTROL, enables memory tracing.
-//!
+/** If written to SPR_SIM_CONTROL, enables memory tracing. */
#define SIM_CONTROL_ENABLE_MEM_LOGGING 23
-//! If written to SPR_SIM_CONTROL, disables memory tracing.
-//!
+/** If written to SPR_SIM_CONTROL, disables memory tracing. */
#define SIM_CONTROL_DISABLE_MEM_LOGGING 24
-//! If written to SPR_SIM_CONTROL, changes the shaping parameters of one of
-//! the gbe or xgbe shims. Must specify the shim id, the type, the units, and
-//! the rate, as defined in SIM_SHAPING_SPR_ARG.
-//!
+/**
+ * If written to SPR_SIM_CONTROL, changes the shaping parameters of one of
+ * the gbe or xgbe shims. Must specify the shim id, the type, the units, and
+ * the rate, as defined in SIM_SHAPING_SPR_ARG.
+ */
#define SIM_CONTROL_SHAPING 25
-//! If written to SPR_SIM_CONTROL, combined with character (shifted by 8),
-//! requests that a simulator command be executed. Written to once for each
-//! character in the command, plus a final NUL.
-//!
+/**
+ * If written to SPR_SIM_CONTROL, combined with character (shifted by 8),
+ * requests that a simulator command be executed. Written to once for each
+ * character in the command, plus a final NUL.
+ */
#define SIM_CONTROL_COMMAND 26
-//! If written to SPR_SIM_CONTROL, indicates that the simulated system
-//! is panicking, to allow debugging via --debug-on-panic.
-//!
+/**
+ * If written to SPR_SIM_CONTROL, indicates that the simulated system
+ * is panicking, to allow debugging via --debug-on-panic.
+ */
#define SIM_CONTROL_PANIC 27
-//! If written to SPR_SIM_CONTROL, triggers a simulator syscall.
-//! See "sim_syscall()" for more info.
-//!
+/**
+ * If written to SPR_SIM_CONTROL, triggers a simulator syscall.
+ * See "sim_syscall()" for more info.
+ */
#define SIM_CONTROL_SYSCALL 32
-//! If written to SPR_SIM_CONTROL, combined with a pid (shifted by 8),
-//! provides the pid that subsequent SIM_CONTROL_OS_FORK writes should
-//! use as the pid, rather than the default previous SIM_CONTROL_OS_SWITCH.
-//!
+/**
+ * If written to SPR_SIM_CONTROL, combined with a pid (shifted by 8),
+ * provides the pid that subsequent SIM_CONTROL_OS_FORK writes should
+ * use as the pid, rather than the default previous SIM_CONTROL_OS_SWITCH.
+ */
#define SIM_CONTROL_OS_FORK_PARENT 33
-//! If written to SPR_SIM_CONTROL, combined with a mPIPE shim number
-//! (shifted by 8), clears the pending magic data section. The cleared
-//! pending magic data section and any subsequently appended magic bytes
-//! will only take effect when the classifier blast programmer is run.
+/**
+ * If written to SPR_SIM_CONTROL, combined with a mPIPE shim number
+ * (shifted by 8), clears the pending magic data section. The cleared
+ * pending magic data section and any subsequently appended magic bytes
+ * will only take effect when the classifier blast programmer is run.
+ */
#define SIM_CONTROL_CLEAR_MPIPE_MAGIC_BYTES 34
-//! If written to SPR_SIM_CONTROL, combined with a mPIPE shim number
-//! (shifted by 8) and a byte of data (shifted by 16), appends that byte
-//! to the shim's pending magic data section. The pending magic data
-//! section takes effect when the classifier blast programmer is run.
+/**
+ * If written to SPR_SIM_CONTROL, combined with a mPIPE shim number
+ * (shifted by 8) and a byte of data (shifted by 16), appends that byte
+ * to the shim's pending magic data section. The pending magic data
+ * section takes effect when the classifier blast programmer is run.
+ */
#define SIM_CONTROL_APPEND_MPIPE_MAGIC_BYTE 35
-//! If written to SPR_SIM_CONTROL, combined with a mPIPE shim number
-//! (shifted by 8), an enable=1/disable=0 bit (shifted by 16), and a
-//! mask of links (shifted by 32), enable or disable the corresponding
-//! mPIPE links.
+/**
+ * If written to SPR_SIM_CONTROL, combined with a mPIPE shim number
+ * (shifted by 8), an enable=1/disable=0 bit (shifted by 16), and a
+ * mask of links (shifted by 32), enable or disable the corresponding
+ * mPIPE links.
+ */
#define SIM_CONTROL_ENABLE_MPIPE_LINK_MAGIC_BYTE 36
-//== Syscall numbers for use with "sim_syscall()".
-//! Syscall number for sim_add_watchpoint().
-//!
+/*
+ * Syscall numbers for use with "sim_syscall()".
+ */
+
+/** Syscall number for sim_add_watchpoint(). */
#define SIM_SYSCALL_ADD_WATCHPOINT 2
-//! Syscall number for sim_remove_watchpoint().
-//!
+/** Syscall number for sim_remove_watchpoint(). */
#define SIM_SYSCALL_REMOVE_WATCHPOINT 3
-//! Syscall number for sim_query_watchpoint().
-//!
+/** Syscall number for sim_query_watchpoint(). */
#define SIM_SYSCALL_QUERY_WATCHPOINT 4
-//! Syscall number that asserts that the cache lines whose 64-bit PA
-//! is passed as the second argument to sim_syscall(), and over a
-//! range passed as the third argument, are no longer in cache.
-//! The simulator raises an error if this is not the case.
-//!
+/**
+ * Syscall number that asserts that the cache lines whose 64-bit PA
+ * is passed as the second argument to sim_syscall(), and over a
+ * range passed as the third argument, are no longer in cache.
+ * The simulator raises an error if this is not the case.
+ */
#define SIM_SYSCALL_VALIDATE_LINES_EVICTED 5
-//== Bit masks which can be shifted by 8, combined with
-//== SIM_CONTROL_SET_TRACING, and written to SPR_SIM_CONTROL.
+/*
+ * Bit masks which can be shifted by 8, combined with
+ * SIM_CONTROL_SET_TRACING, and written to SPR_SIM_CONTROL.
+ */
-//! @addtogroup arch_sim
-//! @{
+/**
+ * @addtogroup arch_sim
+ * @{
+ */
-//! Enable --trace-cycle when passed to simulator_set_tracing().
-//!
+/** Enable --trace-cycle when passed to simulator_set_tracing(). */
#define SIM_TRACE_CYCLES 0x01
-//! Enable --trace-router when passed to simulator_set_tracing().
-//!
+/** Enable --trace-router when passed to simulator_set_tracing(). */
#define SIM_TRACE_ROUTER 0x02
-//! Enable --trace-register-writes when passed to simulator_set_tracing().
-//!
+/** Enable --trace-register-writes when passed to simulator_set_tracing(). */
#define SIM_TRACE_REGISTER_WRITES 0x04
-//! Enable --trace-disasm when passed to simulator_set_tracing().
-//!
+/** Enable --trace-disasm when passed to simulator_set_tracing(). */
#define SIM_TRACE_DISASM 0x08
-//! Enable --trace-stall-info when passed to simulator_set_tracing().
-//!
+/** Enable --trace-stall-info when passed to simulator_set_tracing(). */
#define SIM_TRACE_STALL_INFO 0x10
-//! Enable --trace-memory-controller when passed to simulator_set_tracing().
-//!
+/** Enable --trace-memory-controller when passed to simulator_set_tracing(). */
#define SIM_TRACE_MEMORY_CONTROLLER 0x20
-//! Enable --trace-l2 when passed to simulator_set_tracing().
-//!
+/** Enable --trace-l2 when passed to simulator_set_tracing(). */
#define SIM_TRACE_L2_CACHE 0x40
-//! Enable --trace-lines when passed to simulator_set_tracing().
-//!
+/** Enable --trace-lines when passed to simulator_set_tracing(). */
#define SIM_TRACE_LINES 0x80
-//! Turn off all tracing when passed to simulator_set_tracing().
-//!
+/** Turn off all tracing when passed to simulator_set_tracing(). */
#define SIM_TRACE_NONE 0
-//! Turn on all tracing when passed to simulator_set_tracing().
-//!
+/** Turn on all tracing when passed to simulator_set_tracing(). */
#define SIM_TRACE_ALL (-1)
-//! @}
+/** @} */
-//! Computes the value to write to SPR_SIM_CONTROL to set tracing flags.
-//!
+/** Computes the value to write to SPR_SIM_CONTROL to set tracing flags. */
#define SIM_TRACE_SPR_ARG(mask) \
(SIM_CONTROL_SET_TRACING | ((mask) << _SIM_CONTROL_OPERATOR_BITS))
-//== Bit masks which can be shifted by 8, combined with
-//== SIM_CONTROL_DUMP, and written to SPR_SIM_CONTROL.
+/*
+ * Bit masks which can be shifted by 8, combined with
+ * SIM_CONTROL_DUMP, and written to SPR_SIM_CONTROL.
+ */
-//! @addtogroup arch_sim
-//! @{
+/**
+ * @addtogroup arch_sim
+ * @{
+ */
-//! Dump the general-purpose registers.
-//!
+/** Dump the general-purpose registers. */
#define SIM_DUMP_REGS 0x001
-//! Dump the SPRs.
-//!
+/** Dump the SPRs. */
#define SIM_DUMP_SPRS 0x002
-//! Dump the ITLB.
-//!
+/** Dump the ITLB. */
#define SIM_DUMP_ITLB 0x004
-//! Dump the DTLB.
-//!
+/** Dump the DTLB. */
#define SIM_DUMP_DTLB 0x008
-//! Dump the L1 I-cache.
-//!
+/** Dump the L1 I-cache. */
#define SIM_DUMP_L1I 0x010
-//! Dump the L1 D-cache.
-//!
+/** Dump the L1 D-cache. */
#define SIM_DUMP_L1D 0x020
-//! Dump the L2 cache.
-//!
+/** Dump the L2 cache. */
#define SIM_DUMP_L2 0x040
-//! Dump the switch registers.
-//!
+/** Dump the switch registers. */
#define SIM_DUMP_SNREGS 0x080
-//! Dump the switch ITLB.
-//!
+/** Dump the switch ITLB. */
#define SIM_DUMP_SNITLB 0x100
-//! Dump the switch L1 I-cache.
-//!
+/** Dump the switch L1 I-cache. */
#define SIM_DUMP_SNL1I 0x200
-//! Dump the current backtrace.
-//!
+/** Dump the current backtrace. */
#define SIM_DUMP_BACKTRACE 0x400
-//! Only dump valid lines in caches.
-//!
+/** Only dump valid lines in caches. */
#define SIM_DUMP_VALID_LINES 0x800
-//! Dump everything that is dumpable.
-//!
+/** Dump everything that is dumpable. */
#define SIM_DUMP_ALL (-1 & ~SIM_DUMP_VALID_LINES)
-// @}
+/** @} */
-//! Computes the value to write to SPR_SIM_CONTROL to dump machine state.
-//!
+/** Computes the value to write to SPR_SIM_CONTROL to dump machine state. */
#define SIM_DUMP_SPR_ARG(mask) \
(SIM_CONTROL_DUMP | ((mask) << _SIM_CONTROL_OPERATOR_BITS))
-//== Bit masks which can be shifted by 8, combined with
-//== SIM_CONTROL_PROFILER_CHIP_xxx, and written to SPR_SIM_CONTROL.
+/*
+ * Bit masks which can be shifted by 8, combined with
+ * SIM_CONTROL_PROFILER_CHIP_xxx, and written to SPR_SIM_CONTROL.
+ */
-//! @addtogroup arch_sim
-//! @{
+/**
+ * @addtogroup arch_sim
+ * @{
+ */
-//! Use with with SIM_PROFILER_CHIP_xxx to control the memory controllers.
-//!
+/** Use with with SIM_PROFILER_CHIP_xxx to control the memory controllers. */
#define SIM_CHIP_MEMCTL 0x001
-//! Use with with SIM_PROFILER_CHIP_xxx to control the XAUI interface.
-//!
+/** Use with with SIM_PROFILER_CHIP_xxx to control the XAUI interface. */
#define SIM_CHIP_XAUI 0x002
-//! Use with with SIM_PROFILER_CHIP_xxx to control the PCIe interface.
-//!
+/** Use with with SIM_PROFILER_CHIP_xxx to control the PCIe interface. */
#define SIM_CHIP_PCIE 0x004
-//! Use with with SIM_PROFILER_CHIP_xxx to control the MPIPE interface.
-//!
+/** Use with with SIM_PROFILER_CHIP_xxx to control the MPIPE interface. */
#define SIM_CHIP_MPIPE 0x008
-//! Reference all chip devices.
-//!
+/** Use with with SIM_PROFILER_CHIP_xxx to control the TRIO interface. */
+#define SIM_CHIP_TRIO 0x010
+
+/** Reference all chip devices. */
#define SIM_CHIP_ALL (-1)
-//! @}
+/** @} */
-//! Computes the value to write to SPR_SIM_CONTROL to clear chip statistics.
-//!
+/** Computes the value to write to SPR_SIM_CONTROL to clear chip statistics. */
#define SIM_PROFILER_CHIP_CLEAR_SPR_ARG(mask) \
(SIM_CONTROL_PROFILER_CHIP_CLEAR | ((mask) << _SIM_CONTROL_OPERATOR_BITS))
-//! Computes the value to write to SPR_SIM_CONTROL to disable chip statistics.
-//!
+/** Computes the value to write to SPR_SIM_CONTROL to disable chip statistics.*/
#define SIM_PROFILER_CHIP_DISABLE_SPR_ARG(mask) \
(SIM_CONTROL_PROFILER_CHIP_DISABLE | ((mask) << _SIM_CONTROL_OPERATOR_BITS))
-//! Computes the value to write to SPR_SIM_CONTROL to enable chip statistics.
-//!
+/** Computes the value to write to SPR_SIM_CONTROL to enable chip statistics. */
#define SIM_PROFILER_CHIP_ENABLE_SPR_ARG(mask) \
(SIM_CONTROL_PROFILER_CHIP_ENABLE | ((mask) << _SIM_CONTROL_OPERATOR_BITS))
-// Shim bitrate controls.
+/* Shim bitrate controls. */
-//! The number of bits used to store the shim id.
-//!
+/** The number of bits used to store the shim id. */
#define SIM_CONTROL_SHAPING_SHIM_ID_BITS 3
-//! @addtogroup arch_sim
-//! @{
+/**
+ * @addtogroup arch_sim
+ * @{
+ */
-//! Change the gbe 0 bitrate.
-//!
+/** Change the gbe 0 bitrate. */
#define SIM_CONTROL_SHAPING_GBE_0 0x0
-//! Change the gbe 1 bitrate.
-//!
+/** Change the gbe 1 bitrate. */
#define SIM_CONTROL_SHAPING_GBE_1 0x1
-//! Change the gbe 2 bitrate.
-//!
+/** Change the gbe 2 bitrate. */
#define SIM_CONTROL_SHAPING_GBE_2 0x2
-//! Change the gbe 3 bitrate.
-//!
+/** Change the gbe 3 bitrate. */
#define SIM_CONTROL_SHAPING_GBE_3 0x3
-//! Change the xgbe 0 bitrate.
-//!
+/** Change the xgbe 0 bitrate. */
#define SIM_CONTROL_SHAPING_XGBE_0 0x4
-//! Change the xgbe 1 bitrate.
-//!
+/** Change the xgbe 1 bitrate. */
#define SIM_CONTROL_SHAPING_XGBE_1 0x5
-//! The type of shaping to do.
-//!
+/** The type of shaping to do. */
#define SIM_CONTROL_SHAPING_TYPE_BITS 2
-//! Control the multiplier.
-//!
+/** Control the multiplier. */
#define SIM_CONTROL_SHAPING_MULTIPLIER 0
-//! Control the PPS.
-//!
+/** Control the PPS. */
#define SIM_CONTROL_SHAPING_PPS 1
-//! Control the BPS.
-//!
+/** Control the BPS. */
#define SIM_CONTROL_SHAPING_BPS 2
-//! The number of bits for the units for the shaping parameter.
-//!
+/** The number of bits for the units for the shaping parameter. */
#define SIM_CONTROL_SHAPING_UNITS_BITS 2
-//! Provide a number in single units.
-//!
+/** Provide a number in single units. */
#define SIM_CONTROL_SHAPING_UNITS_SINGLE 0
-//! Provide a number in kilo units.
-//!
+/** Provide a number in kilo units. */
#define SIM_CONTROL_SHAPING_UNITS_KILO 1
-//! Provide a number in mega units.
-//!
+/** Provide a number in mega units. */
#define SIM_CONTROL_SHAPING_UNITS_MEGA 2
-//! Provide a number in giga units.
-//!
+/** Provide a number in giga units. */
#define SIM_CONTROL_SHAPING_UNITS_GIGA 3
-// @}
+/** @} */
-//! How many bits are available for the rate.
-//!
+/** How many bits are available for the rate. */
#define SIM_CONTROL_SHAPING_RATE_BITS \
(32 - (_SIM_CONTROL_OPERATOR_BITS + \
SIM_CONTROL_SHAPING_SHIM_ID_BITS + \
SIM_CONTROL_SHAPING_TYPE_BITS + \
SIM_CONTROL_SHAPING_UNITS_BITS))
-//! Computes the value to write to SPR_SIM_CONTROL to change a bitrate.
-//!
+/** Computes the value to write to SPR_SIM_CONTROL to change a bitrate. */
#define SIM_SHAPING_SPR_ARG(shim, type, units, rate) \
(SIM_CONTROL_SHAPING | \
((shim) | \
@@ -483,30 +467,36 @@
SIM_CONTROL_SHAPING_UNITS_BITS))) << _SIM_CONTROL_OPERATOR_BITS)
-//== Values returned when reading SPR_SIM_CONTROL.
-// ISSUE: These names should share a longer common prefix.
+/*
+ * Values returned when reading SPR_SIM_CONTROL.
+ * ISSUE: These names should share a longer common prefix.
+ */
-//! When reading SPR_SIM_CONTROL, the mask of simulator tracing bits
-//! (SIM_TRACE_xxx values).
-//!
+/**
+ * When reading SPR_SIM_CONTROL, the mask of simulator tracing bits
+ * (SIM_TRACE_xxx values).
+ */
#define SIM_TRACE_FLAG_MASK 0xFFFF
-//! When reading SPR_SIM_CONTROL, the mask for whether profiling is enabled.
-//!
+/** When reading SPR_SIM_CONTROL, the mask for whether profiling is enabled. */
#define SIM_PROFILER_ENABLED_MASK 0x10000
-//== Special arguments for "SIM_CONTROL_PUTC".
+/*
+ * Special arguments for "SIM_CONTROL_PUTC".
+ */
-//! Flag value for forcing a PUTC string-flush, including
-//! coordinate/cycle prefix and newline.
-//!
+/**
+ * Flag value for forcing a PUTC string-flush, including
+ * coordinate/cycle prefix and newline.
+ */
#define SIM_PUTC_FLUSH_STRING 0x100
-//! Flag value for forcing a PUTC binary-data-flush, which skips the
-//! prefix and does not append a newline.
-//!
+/**
+ * Flag value for forcing a PUTC binary-data-flush, which skips the
+ * prefix and does not append a newline.
+ */
#define SIM_PUTC_FLUSH_BINARY 0x101
-#endif //__ARCH_SIM_DEF_H__
+#endif /* __ARCH_SIM_DEF_H__ */
diff --git a/arch/tile/include/arch/spr_def.h b/arch/tile/include/arch/spr_def.h
index c8fdbd9a45e6..442fcba0d122 100644
--- a/arch/tile/include/arch/spr_def.h
+++ b/arch/tile/include/arch/spr_def.h
@@ -12,8 +12,93 @@
* more details.
*/
+/*
+ * In addition to including the proper base SPR definition file, depending
+ * on machine architecture, this file defines several macros which allow
+ * kernel code to use protection-level dependent SPRs without worrying
+ * about which PL it's running at. In these macros, the PL that the SPR
+ * or interrupt number applies to is replaced by K.
+ */
+
+#if CONFIG_KERNEL_PL != 1 && CONFIG_KERNEL_PL != 2
+#error CONFIG_KERNEL_PL must be 1 or 2
+#endif
+
+/* Concatenate 4 strings. */
+#define __concat4(a, b, c, d) a ## b ## c ## d
+#define _concat4(a, b, c, d) __concat4(a, b, c, d)
+
#ifdef __tilegx__
#include <arch/spr_def_64.h>
+
+/* TILE-Gx dependent, protection-level dependent SPRs. */
+
+#define SPR_INTERRUPT_MASK_K \
+ _concat4(SPR_INTERRUPT_MASK_, CONFIG_KERNEL_PL,,)
+#define SPR_INTERRUPT_MASK_SET_K \
+ _concat4(SPR_INTERRUPT_MASK_SET_, CONFIG_KERNEL_PL,,)
+#define SPR_INTERRUPT_MASK_RESET_K \
+ _concat4(SPR_INTERRUPT_MASK_RESET_, CONFIG_KERNEL_PL,,)
+#define SPR_INTERRUPT_VECTOR_BASE_K \
+ _concat4(SPR_INTERRUPT_VECTOR_BASE_, CONFIG_KERNEL_PL,,)
+
+#define SPR_IPI_MASK_K \
+ _concat4(SPR_IPI_MASK_, CONFIG_KERNEL_PL,,)
+#define SPR_IPI_MASK_RESET_K \
+ _concat4(SPR_IPI_MASK_RESET_, CONFIG_KERNEL_PL,,)
+#define SPR_IPI_MASK_SET_K \
+ _concat4(SPR_IPI_MASK_SET_, CONFIG_KERNEL_PL,,)
+#define SPR_IPI_EVENT_K \
+ _concat4(SPR_IPI_EVENT_, CONFIG_KERNEL_PL,,)
+#define SPR_IPI_EVENT_RESET_K \
+ _concat4(SPR_IPI_EVENT_RESET_, CONFIG_KERNEL_PL,,)
+#define SPR_IPI_MASK_SET_K \
+ _concat4(SPR_IPI_MASK_SET_, CONFIG_KERNEL_PL,,)
+#define INT_IPI_K \
+ _concat4(INT_IPI_, CONFIG_KERNEL_PL,,)
+
+#define SPR_SINGLE_STEP_CONTROL_K \
+ _concat4(SPR_SINGLE_STEP_CONTROL_, CONFIG_KERNEL_PL,,)
+#define SPR_SINGLE_STEP_EN_K_K \
+ _concat4(SPR_SINGLE_STEP_EN_, CONFIG_KERNEL_PL, _, CONFIG_KERNEL_PL)
+#define INT_SINGLE_STEP_K \
+ _concat4(INT_SINGLE_STEP_, CONFIG_KERNEL_PL,,)
+
#else
#include <arch/spr_def_32.h>
+
+/* TILEPro dependent, protection-level dependent SPRs. */
+
+#define SPR_INTERRUPT_MASK_K_0 \
+ _concat4(SPR_INTERRUPT_MASK_, CONFIG_KERNEL_PL, _0,)
+#define SPR_INTERRUPT_MASK_K_1 \
+ _concat4(SPR_INTERRUPT_MASK_, CONFIG_KERNEL_PL, _1,)
+#define SPR_INTERRUPT_MASK_SET_K_0 \
+ _concat4(SPR_INTERRUPT_MASK_SET_, CONFIG_KERNEL_PL, _0,)
+#define SPR_INTERRUPT_MASK_SET_K_1 \
+ _concat4(SPR_INTERRUPT_MASK_SET_, CONFIG_KERNEL_PL, _1,)
+#define SPR_INTERRUPT_MASK_RESET_K_0 \
+ _concat4(SPR_INTERRUPT_MASK_RESET_, CONFIG_KERNEL_PL, _0,)
+#define SPR_INTERRUPT_MASK_RESET_K_1 \
+ _concat4(SPR_INTERRUPT_MASK_RESET_, CONFIG_KERNEL_PL, _1,)
+
#endif
+
+/* Generic protection-level dependent SPRs. */
+
+#define SPR_SYSTEM_SAVE_K_0 \
+ _concat4(SPR_SYSTEM_SAVE_, CONFIG_KERNEL_PL, _0,)
+#define SPR_SYSTEM_SAVE_K_1 \
+ _concat4(SPR_SYSTEM_SAVE_, CONFIG_KERNEL_PL, _1,)
+#define SPR_SYSTEM_SAVE_K_2 \
+ _concat4(SPR_SYSTEM_SAVE_, CONFIG_KERNEL_PL, _2,)
+#define SPR_SYSTEM_SAVE_K_3 \
+ _concat4(SPR_SYSTEM_SAVE_, CONFIG_KERNEL_PL, _3,)
+#define SPR_EX_CONTEXT_K_0 \
+ _concat4(SPR_EX_CONTEXT_, CONFIG_KERNEL_PL, _0,)
+#define SPR_EX_CONTEXT_K_1 \
+ _concat4(SPR_EX_CONTEXT_, CONFIG_KERNEL_PL, _1,)
+#define SPR_INTCTRL_K_STATUS \
+ _concat4(SPR_INTCTRL_, CONFIG_KERNEL_PL, _STATUS,)
+#define INT_INTCTRL_K \
+ _concat4(INT_INTCTRL_, CONFIG_KERNEL_PL,,)
diff --git a/arch/tile/include/arch/spr_def_32.h b/arch/tile/include/arch/spr_def_32.h
index b4fc06864df6..bbc1f4c924ee 100644
--- a/arch/tile/include/arch/spr_def_32.h
+++ b/arch/tile/include/arch/spr_def_32.h
@@ -56,58 +56,93 @@
#define SPR_EX_CONTEXT_1_1__ICS_SHIFT 2
#define SPR_EX_CONTEXT_1_1__ICS_RMASK 0x1
#define SPR_EX_CONTEXT_1_1__ICS_MASK 0x4
+#define SPR_EX_CONTEXT_2_0 0x4605
+#define SPR_EX_CONTEXT_2_1 0x4606
+#define SPR_EX_CONTEXT_2_1__PL_SHIFT 0
+#define SPR_EX_CONTEXT_2_1__PL_RMASK 0x3
+#define SPR_EX_CONTEXT_2_1__PL_MASK 0x3
+#define SPR_EX_CONTEXT_2_1__ICS_SHIFT 2
+#define SPR_EX_CONTEXT_2_1__ICS_RMASK 0x1
+#define SPR_EX_CONTEXT_2_1__ICS_MASK 0x4
#define SPR_FAIL 0x4e09
#define SPR_INTCTRL_0_STATUS 0x4a07
#define SPR_INTCTRL_1_STATUS 0x4807
+#define SPR_INTCTRL_2_STATUS 0x4607
#define SPR_INTERRUPT_CRITICAL_SECTION 0x4e0a
#define SPR_INTERRUPT_MASK_0_0 0x4a08
#define SPR_INTERRUPT_MASK_0_1 0x4a09
#define SPR_INTERRUPT_MASK_1_0 0x4809
#define SPR_INTERRUPT_MASK_1_1 0x480a
+#define SPR_INTERRUPT_MASK_2_0 0x4608
+#define SPR_INTERRUPT_MASK_2_1 0x4609
#define SPR_INTERRUPT_MASK_RESET_0_0 0x4a0a
#define SPR_INTERRUPT_MASK_RESET_0_1 0x4a0b
#define SPR_INTERRUPT_MASK_RESET_1_0 0x480b
#define SPR_INTERRUPT_MASK_RESET_1_1 0x480c
+#define SPR_INTERRUPT_MASK_RESET_2_0 0x460a
+#define SPR_INTERRUPT_MASK_RESET_2_1 0x460b
#define SPR_INTERRUPT_MASK_SET_0_0 0x4a0c
#define SPR_INTERRUPT_MASK_SET_0_1 0x4a0d
#define SPR_INTERRUPT_MASK_SET_1_0 0x480d
#define SPR_INTERRUPT_MASK_SET_1_1 0x480e
+#define SPR_INTERRUPT_MASK_SET_2_0 0x460c
+#define SPR_INTERRUPT_MASK_SET_2_1 0x460d
#define SPR_MPL_DMA_CPL_SET_0 0x5800
#define SPR_MPL_DMA_CPL_SET_1 0x5801
+#define SPR_MPL_DMA_CPL_SET_2 0x5802
#define SPR_MPL_DMA_NOTIFY_SET_0 0x3800
#define SPR_MPL_DMA_NOTIFY_SET_1 0x3801
+#define SPR_MPL_DMA_NOTIFY_SET_2 0x3802
#define SPR_MPL_INTCTRL_0_SET_0 0x4a00
#define SPR_MPL_INTCTRL_0_SET_1 0x4a01
+#define SPR_MPL_INTCTRL_0_SET_2 0x4a02
#define SPR_MPL_INTCTRL_1_SET_0 0x4800
#define SPR_MPL_INTCTRL_1_SET_1 0x4801
+#define SPR_MPL_INTCTRL_1_SET_2 0x4802
+#define SPR_MPL_INTCTRL_2_SET_0 0x4600
+#define SPR_MPL_INTCTRL_2_SET_1 0x4601
+#define SPR_MPL_INTCTRL_2_SET_2 0x4602
#define SPR_MPL_SN_ACCESS_SET_0 0x0800
#define SPR_MPL_SN_ACCESS_SET_1 0x0801
+#define SPR_MPL_SN_ACCESS_SET_2 0x0802
#define SPR_MPL_SN_CPL_SET_0 0x5a00
#define SPR_MPL_SN_CPL_SET_1 0x5a01
+#define SPR_MPL_SN_CPL_SET_2 0x5a02
#define SPR_MPL_SN_FIREWALL_SET_0 0x2c00
#define SPR_MPL_SN_FIREWALL_SET_1 0x2c01
+#define SPR_MPL_SN_FIREWALL_SET_2 0x2c02
#define SPR_MPL_SN_NOTIFY_SET_0 0x2a00
#define SPR_MPL_SN_NOTIFY_SET_1 0x2a01
+#define SPR_MPL_SN_NOTIFY_SET_2 0x2a02
#define SPR_MPL_UDN_ACCESS_SET_0 0x0c00
#define SPR_MPL_UDN_ACCESS_SET_1 0x0c01
+#define SPR_MPL_UDN_ACCESS_SET_2 0x0c02
#define SPR_MPL_UDN_AVAIL_SET_0 0x4000
#define SPR_MPL_UDN_AVAIL_SET_1 0x4001
+#define SPR_MPL_UDN_AVAIL_SET_2 0x4002
#define SPR_MPL_UDN_CA_SET_0 0x3c00
#define SPR_MPL_UDN_CA_SET_1 0x3c01
+#define SPR_MPL_UDN_CA_SET_2 0x3c02
#define SPR_MPL_UDN_COMPLETE_SET_0 0x1400
#define SPR_MPL_UDN_COMPLETE_SET_1 0x1401
+#define SPR_MPL_UDN_COMPLETE_SET_2 0x1402
#define SPR_MPL_UDN_FIREWALL_SET_0 0x3000
#define SPR_MPL_UDN_FIREWALL_SET_1 0x3001
+#define SPR_MPL_UDN_FIREWALL_SET_2 0x3002
#define SPR_MPL_UDN_REFILL_SET_0 0x1000
#define SPR_MPL_UDN_REFILL_SET_1 0x1001
+#define SPR_MPL_UDN_REFILL_SET_2 0x1002
#define SPR_MPL_UDN_TIMER_SET_0 0x3600
#define SPR_MPL_UDN_TIMER_SET_1 0x3601
+#define SPR_MPL_UDN_TIMER_SET_2 0x3602
#define SPR_MPL_WORLD_ACCESS_SET_0 0x4e00
#define SPR_MPL_WORLD_ACCESS_SET_1 0x4e01
+#define SPR_MPL_WORLD_ACCESS_SET_2 0x4e02
#define SPR_PASS 0x4e0b
#define SPR_PERF_COUNT_0 0x4205
#define SPR_PERF_COUNT_1 0x4206
#define SPR_PERF_COUNT_CTL 0x4207
+#define SPR_PERF_COUNT_DN_CTL 0x4210
#define SPR_PERF_COUNT_STS 0x4208
#define SPR_PROC_STATUS 0x4f00
#define SPR_SIM_CONTROL 0x4e0c
@@ -124,6 +159,10 @@
#define SPR_SYSTEM_SAVE_1_1 0x4901
#define SPR_SYSTEM_SAVE_1_2 0x4902
#define SPR_SYSTEM_SAVE_1_3 0x4903
+#define SPR_SYSTEM_SAVE_2_0 0x4700
+#define SPR_SYSTEM_SAVE_2_1 0x4701
+#define SPR_SYSTEM_SAVE_2_2 0x4702
+#define SPR_SYSTEM_SAVE_2_3 0x4703
#define SPR_TILE_COORD 0x4c17
#define SPR_TILE_RTF_HWM 0x4e10
#define SPR_TILE_TIMER_CONTROL 0x3205
diff --git a/arch/tile/include/asm/backtrace.h b/arch/tile/include/asm/backtrace.h
index 758ca4619d50..f18887d82399 100644
--- a/arch/tile/include/asm/backtrace.h
+++ b/arch/tile/include/asm/backtrace.h
@@ -146,7 +146,10 @@ enum {
CALLER_SP_IN_R52_BASE = 4,
- CALLER_SP_OFFSET_BASE = 8
+ CALLER_SP_OFFSET_BASE = 8,
+
+ /* Marks the entry point of certain functions. */
+ ENTRY_POINT_INFO_OP = 16
};
diff --git a/arch/tile/include/asm/cacheflush.h b/arch/tile/include/asm/cacheflush.h
index c5741da4eeac..14a3f8556ace 100644
--- a/arch/tile/include/asm/cacheflush.h
+++ b/arch/tile/include/asm/cacheflush.h
@@ -137,4 +137,56 @@ static inline void finv_buffer(void *buffer, size_t size)
mb_incoherent();
}
+/*
+ * Flush & invalidate a VA range that is homed remotely on a single core,
+ * waiting until the memory controller holds the flushed values.
+ */
+static inline void finv_buffer_remote(void *buffer, size_t size)
+{
+ char *p;
+ int i;
+
+ /*
+ * Flush and invalidate the buffer out of the local L1/L2
+ * and request the home cache to flush and invalidate as well.
+ */
+ __finv_buffer(buffer, size);
+
+ /*
+ * Wait for the home cache to acknowledge that it has processed
+ * all the flush-and-invalidate requests. This does not mean
+ * that the flushed data has reached the memory controller yet,
+ * but it does mean the home cache is processing the flushes.
+ */
+ __insn_mf();
+
+ /*
+ * Issue a load to the last cache line, which can't complete
+ * until all the previously-issued flushes to the same memory
+ * controller have also completed. If we weren't striping
+ * memory, that one load would be sufficient, but since we may
+ * be, we also need to back up to the last load issued to
+ * another memory controller, which would be the point where
+ * we crossed an 8KB boundary (the granularity of striping
+ * across memory controllers). Keep backing up and doing this
+ * until we are before the beginning of the buffer, or have
+ * hit all the controllers.
+ */
+ for (i = 0, p = (char *)buffer + size - 1;
+ i < (1 << CHIP_LOG_NUM_MSHIMS()) && p >= (char *)buffer;
+ ++i) {
+ const unsigned long STRIPE_WIDTH = 8192;
+
+ /* Force a load instruction to issue. */
+ *(volatile char *)p;
+
+ /* Jump to end of previous stripe. */
+ p -= STRIPE_WIDTH;
+ p = (char *)((unsigned long)p | (STRIPE_WIDTH - 1));
+ }
+
+ /* Wait for the loads (and thus flushes) to have completed. */
+ __insn_mf();
+}
+
#endif /* _ASM_TILE_CACHEFLUSH_H */
diff --git a/arch/tile/include/asm/compat.h b/arch/tile/include/asm/compat.h
index 8b60ec8b2d19..c3ae570c0a5d 100644
--- a/arch/tile/include/asm/compat.h
+++ b/arch/tile/include/asm/compat.h
@@ -216,15 +216,16 @@ struct compat_siginfo;
struct compat_sigaltstack;
long compat_sys_execve(const char __user *path,
const compat_uptr_t __user *argv,
- const compat_uptr_t __user *envp);
+ const compat_uptr_t __user *envp, struct pt_regs *);
long compat_sys_rt_sigaction(int sig, struct compat_sigaction __user *act,
struct compat_sigaction __user *oact,
size_t sigsetsize);
long compat_sys_rt_sigqueueinfo(int pid, int sig,
struct compat_siginfo __user *uinfo);
-long compat_sys_rt_sigreturn(void);
+long compat_sys_rt_sigreturn(struct pt_regs *);
long compat_sys_sigaltstack(const struct compat_sigaltstack __user *uss_ptr,
- struct compat_sigaltstack __user *uoss_ptr);
+ struct compat_sigaltstack __user *uoss_ptr,
+ struct pt_regs *);
long compat_sys_truncate64(char __user *filename, u32 dummy, u32 low, u32 high);
long compat_sys_ftruncate64(unsigned int fd, u32 dummy, u32 low, u32 high);
long compat_sys_pread64(unsigned int fd, char __user *ubuf, size_t count,
@@ -255,4 +256,12 @@ long tile_compat_sys_ptrace(compat_long_t request, compat_long_t pid,
/* Tilera Linux syscalls that don't have "compat" versions. */
#define compat_sys_flush_cache sys_flush_cache
+/* These are the intvec_64.S trampolines. */
+long _compat_sys_execve(const char __user *path,
+ const compat_uptr_t __user *argv,
+ const compat_uptr_t __user *envp);
+long _compat_sys_sigaltstack(const struct compat_sigaltstack __user *uss_ptr,
+ struct compat_sigaltstack __user *uoss_ptr);
+long _compat_sys_rt_sigreturn(void);
+
#endif /* _ASM_TILE_COMPAT_H */
diff --git a/arch/tile/include/asm/highmem.h b/arch/tile/include/asm/highmem.h
index d155db6fa9bd..b2a6c5de79ab 100644
--- a/arch/tile/include/asm/highmem.h
+++ b/arch/tile/include/asm/highmem.h
@@ -23,7 +23,6 @@
#include <linux/interrupt.h>
#include <linux/threads.h>
-#include <asm/kmap_types.h>
#include <asm/tlbflush.h>
#include <asm/homecache.h>
@@ -60,12 +59,12 @@ void *kmap_fix_kpte(struct page *page, int finished);
/* This macro is used only in map_new_virtual() to map "page". */
#define kmap_prot page_to_kpgprot(page)
-void kunmap_atomic_notypecheck(void *kvaddr, enum km_type type);
-void *kmap_atomic_pfn(unsigned long pfn, enum km_type type);
-void *kmap_atomic_prot_pfn(unsigned long pfn, enum km_type type, pgprot_t prot);
+void *__kmap_atomic(struct page *page);
+void __kunmap_atomic(void *kvaddr);
+void *kmap_atomic_pfn(unsigned long pfn);
+void *kmap_atomic_prot_pfn(unsigned long pfn, pgprot_t prot);
struct page *kmap_atomic_to_page(void *ptr);
-void *kmap_atomic_prot(struct page *page, enum km_type type, pgprot_t prot);
-void *kmap_atomic(struct page *page, enum km_type type);
+void *kmap_atomic_prot(struct page *page, pgprot_t prot);
void kmap_atomic_fix_kpte(struct page *page, int finished);
#define flush_cache_kmaps() do { } while (0)
diff --git a/arch/tile/include/asm/io.h b/arch/tile/include/asm/io.h
index ee43328713ab..d3cbb9b14cbe 100644
--- a/arch/tile/include/asm/io.h
+++ b/arch/tile/include/asm/io.h
@@ -55,9 +55,6 @@ extern void iounmap(volatile void __iomem *addr);
#define ioremap_writethrough(physaddr, size) ioremap(physaddr, size)
#define ioremap_fullcache(physaddr, size) ioremap(physaddr, size)
-void __iomem *ioport_map(unsigned long port, unsigned int len);
-extern inline void ioport_unmap(void __iomem *addr) {}
-
#define mmiowb()
/* Conversion between virtual and physical mappings. */
@@ -189,12 +186,22 @@ static inline void memcpy_toio(volatile void __iomem *dst, const void *src,
* we never run, uses them unconditionally.
*/
-static inline int ioport_panic(void)
+static inline long ioport_panic(void)
{
panic("inb/outb and friends do not exist on tile");
return 0;
}
+static inline void __iomem *ioport_map(unsigned long port, unsigned int len)
+{
+ return (void __iomem *) ioport_panic();
+}
+
+static inline void ioport_unmap(void __iomem *addr)
+{
+ ioport_panic();
+}
+
static inline u8 inb(unsigned long addr)
{
return ioport_panic();
diff --git a/arch/tile/include/asm/irqflags.h b/arch/tile/include/asm/irqflags.h
index a11d4837ee4d..641e4ff3d805 100644
--- a/arch/tile/include/asm/irqflags.h
+++ b/arch/tile/include/asm/irqflags.h
@@ -47,53 +47,53 @@
int __n = (n); \
int __mask = 1 << (__n & 0x1f); \
if (__n < 32) \
- __insn_mtspr(SPR_INTERRUPT_MASK_SET_1_0, __mask); \
+ __insn_mtspr(SPR_INTERRUPT_MASK_SET_K_0, __mask); \
else \
- __insn_mtspr(SPR_INTERRUPT_MASK_SET_1_1, __mask); \
+ __insn_mtspr(SPR_INTERRUPT_MASK_SET_K_1, __mask); \
} while (0)
#define interrupt_mask_reset(n) do { \
int __n = (n); \
int __mask = 1 << (__n & 0x1f); \
if (__n < 32) \
- __insn_mtspr(SPR_INTERRUPT_MASK_RESET_1_0, __mask); \
+ __insn_mtspr(SPR_INTERRUPT_MASK_RESET_K_0, __mask); \
else \
- __insn_mtspr(SPR_INTERRUPT_MASK_RESET_1_1, __mask); \
+ __insn_mtspr(SPR_INTERRUPT_MASK_RESET_K_1, __mask); \
} while (0)
#define interrupt_mask_check(n) ({ \
int __n = (n); \
(((__n < 32) ? \
- __insn_mfspr(SPR_INTERRUPT_MASK_1_0) : \
- __insn_mfspr(SPR_INTERRUPT_MASK_1_1)) \
+ __insn_mfspr(SPR_INTERRUPT_MASK_K_0) : \
+ __insn_mfspr(SPR_INTERRUPT_MASK_K_1)) \
>> (__n & 0x1f)) & 1; \
})
#define interrupt_mask_set_mask(mask) do { \
unsigned long long __m = (mask); \
- __insn_mtspr(SPR_INTERRUPT_MASK_SET_1_0, (unsigned long)(__m)); \
- __insn_mtspr(SPR_INTERRUPT_MASK_SET_1_1, (unsigned long)(__m>>32)); \
+ __insn_mtspr(SPR_INTERRUPT_MASK_SET_K_0, (unsigned long)(__m)); \
+ __insn_mtspr(SPR_INTERRUPT_MASK_SET_K_1, (unsigned long)(__m>>32)); \
} while (0)
#define interrupt_mask_reset_mask(mask) do { \
unsigned long long __m = (mask); \
- __insn_mtspr(SPR_INTERRUPT_MASK_RESET_1_0, (unsigned long)(__m)); \
- __insn_mtspr(SPR_INTERRUPT_MASK_RESET_1_1, (unsigned long)(__m>>32)); \
+ __insn_mtspr(SPR_INTERRUPT_MASK_RESET_K_0, (unsigned long)(__m)); \
+ __insn_mtspr(SPR_INTERRUPT_MASK_RESET_K_1, (unsigned long)(__m>>32)); \
} while (0)
#else
#define interrupt_mask_set(n) \
- __insn_mtspr(SPR_INTERRUPT_MASK_SET_1, (1UL << (n)))
+ __insn_mtspr(SPR_INTERRUPT_MASK_SET_K, (1UL << (n)))
#define interrupt_mask_reset(n) \
- __insn_mtspr(SPR_INTERRUPT_MASK_RESET_1, (1UL << (n)))
+ __insn_mtspr(SPR_INTERRUPT_MASK_RESET_K, (1UL << (n)))
#define interrupt_mask_check(n) \
- ((__insn_mfspr(SPR_INTERRUPT_MASK_1) >> (n)) & 1)
+ ((__insn_mfspr(SPR_INTERRUPT_MASK_K) >> (n)) & 1)
#define interrupt_mask_set_mask(mask) \
- __insn_mtspr(SPR_INTERRUPT_MASK_SET_1, (mask))
+ __insn_mtspr(SPR_INTERRUPT_MASK_SET_K, (mask))
#define interrupt_mask_reset_mask(mask) \
- __insn_mtspr(SPR_INTERRUPT_MASK_RESET_1, (mask))
+ __insn_mtspr(SPR_INTERRUPT_MASK_RESET_K, (mask))
#endif
/*
* The set of interrupts we want active if irqs are enabled.
* Note that in particular, the tile timer interrupt comes and goes
* from this set, since we have no other way to turn off the timer.
- * Likewise, INTCTRL_1 is removed and re-added during device
+ * Likewise, INTCTRL_K is removed and re-added during device
* interrupts, as is the the hardwall UDN_FIREWALL interrupt.
* We use a low bit (MEM_ERROR) as our sentinel value and make sure it
* is always claimed as an "active interrupt" so we can query that bit
@@ -170,14 +170,14 @@ DECLARE_PER_CPU(unsigned long long, interrupts_enabled_mask);
/* Return 0 or 1 to indicate whether interrupts are currently disabled. */
#define IRQS_DISABLED(tmp) \
- mfspr tmp, INTERRUPT_MASK_1; \
+ mfspr tmp, SPR_INTERRUPT_MASK_K; \
andi tmp, tmp, 1
/* Load up a pointer to &interrupts_enabled_mask. */
#define GET_INTERRUPTS_ENABLED_MASK_PTR(reg) \
- moveli reg, hw2_last(interrupts_enabled_mask); \
- shl16insli reg, reg, hw1(interrupts_enabled_mask); \
- shl16insli reg, reg, hw0(interrupts_enabled_mask); \
+ moveli reg, hw2_last(interrupts_enabled_mask); \
+ shl16insli reg, reg, hw1(interrupts_enabled_mask); \
+ shl16insli reg, reg, hw0(interrupts_enabled_mask); \
add reg, reg, tp
/* Disable interrupts. */
@@ -185,18 +185,18 @@ DECLARE_PER_CPU(unsigned long long, interrupts_enabled_mask);
moveli tmp0, hw2_last(LINUX_MASKABLE_INTERRUPTS); \
shl16insli tmp0, tmp0, hw1(LINUX_MASKABLE_INTERRUPTS); \
shl16insli tmp0, tmp0, hw0(LINUX_MASKABLE_INTERRUPTS); \
- mtspr INTERRUPT_MASK_SET_1, tmp0
+ mtspr SPR_INTERRUPT_MASK_SET_K, tmp0
/* Disable ALL synchronous interrupts (used by NMI entry). */
#define IRQ_DISABLE_ALL(tmp) \
movei tmp, -1; \
- mtspr INTERRUPT_MASK_SET_1, tmp
+ mtspr SPR_INTERRUPT_MASK_SET_K, tmp
/* Enable interrupts. */
#define IRQ_ENABLE(tmp0, tmp1) \
GET_INTERRUPTS_ENABLED_MASK_PTR(tmp0); \
ld tmp0, tmp0; \
- mtspr INTERRUPT_MASK_RESET_1, tmp0
+ mtspr SPR_INTERRUPT_MASK_RESET_K, tmp0
#else /* !__tilegx__ */
@@ -210,14 +210,14 @@ DECLARE_PER_CPU(unsigned long long, interrupts_enabled_mask);
* (making the original code's write of the "high" mask word idempotent).
*/
#define IRQS_DISABLED(tmp) \
- mfspr tmp, INTERRUPT_MASK_1_0; \
+ mfspr tmp, SPR_INTERRUPT_MASK_K_0; \
shri tmp, tmp, INT_MEM_ERROR; \
andi tmp, tmp, 1
/* Load up a pointer to &interrupts_enabled_mask. */
#define GET_INTERRUPTS_ENABLED_MASK_PTR(reg) \
- moveli reg, lo16(interrupts_enabled_mask); \
- auli reg, reg, ha16(interrupts_enabled_mask);\
+ moveli reg, lo16(interrupts_enabled_mask); \
+ auli reg, reg, ha16(interrupts_enabled_mask); \
add reg, reg, tp
/* Disable interrupts. */
@@ -227,16 +227,16 @@ DECLARE_PER_CPU(unsigned long long, interrupts_enabled_mask);
moveli tmp1, lo16(LINUX_MASKABLE_INTERRUPTS) \
}; \
{ \
- mtspr INTERRUPT_MASK_SET_1_0, tmp0; \
+ mtspr SPR_INTERRUPT_MASK_SET_K_0, tmp0; \
auli tmp1, tmp1, ha16(LINUX_MASKABLE_INTERRUPTS) \
}; \
- mtspr INTERRUPT_MASK_SET_1_1, tmp1
+ mtspr SPR_INTERRUPT_MASK_SET_K_1, tmp1
/* Disable ALL synchronous interrupts (used by NMI entry). */
#define IRQ_DISABLE_ALL(tmp) \
movei tmp, -1; \
- mtspr INTERRUPT_MASK_SET_1_0, tmp; \
- mtspr INTERRUPT_MASK_SET_1_1, tmp
+ mtspr SPR_INTERRUPT_MASK_SET_K_0, tmp; \
+ mtspr SPR_INTERRUPT_MASK_SET_K_1, tmp
/* Enable interrupts. */
#define IRQ_ENABLE(tmp0, tmp1) \
@@ -246,8 +246,8 @@ DECLARE_PER_CPU(unsigned long long, interrupts_enabled_mask);
addi tmp1, tmp0, 4 \
}; \
lw tmp1, tmp1; \
- mtspr INTERRUPT_MASK_RESET_1_0, tmp0; \
- mtspr INTERRUPT_MASK_RESET_1_1, tmp1
+ mtspr SPR_INTERRUPT_MASK_RESET_K_0, tmp0; \
+ mtspr SPR_INTERRUPT_MASK_RESET_K_1, tmp1
#endif
/*
diff --git a/arch/tile/include/asm/kmap_types.h b/arch/tile/include/asm/kmap_types.h
index 1480106d1c05..3d0f20246260 100644
--- a/arch/tile/include/asm/kmap_types.h
+++ b/arch/tile/include/asm/kmap_types.h
@@ -16,28 +16,42 @@
#define _ASM_TILE_KMAP_TYPES_H
/*
- * In TILE Linux each set of four of these uses another 16MB chunk of
- * address space, given 64 tiles and 64KB pages, so we only enable
- * ones that are required by the kernel configuration.
+ * In 32-bit TILE Linux we have to balance the desire to have a lot of
+ * nested atomic mappings with the fact that large page sizes and many
+ * processors chew up address space quickly. In a typical
+ * 64-processor, 64KB-page layout build, making KM_TYPE_NR one larger
+ * adds 4MB of required address-space. For now we leave KM_TYPE_NR
+ * set to depth 8.
*/
enum km_type {
+ KM_TYPE_NR = 8
+};
+
+/*
+ * We provide dummy definitions of all the stray values that used to be
+ * required for kmap_atomic() and no longer are.
+ */
+enum {
KM_BOUNCE_READ,
KM_SKB_SUNRPC_DATA,
KM_SKB_DATA_SOFTIRQ,
KM_USER0,
KM_USER1,
KM_BIO_SRC_IRQ,
+ KM_BIO_DST_IRQ,
+ KM_PTE0,
+ KM_PTE1,
KM_IRQ0,
KM_IRQ1,
KM_SOFTIRQ0,
KM_SOFTIRQ1,
- KM_MEMCPY0,
- KM_MEMCPY1,
-#if defined(CONFIG_HIGHPTE)
- KM_PTE0,
- KM_PTE1,
-#endif
- KM_TYPE_NR
+ KM_SYNC_ICACHE,
+ KM_SYNC_DCACHE,
+ KM_UML_USERCOPY,
+ KM_IRQ_PTE,
+ KM_NMI,
+ KM_NMI_PTE,
+ KM_KDB
};
#endif /* _ASM_TILE_KMAP_TYPES_H */
diff --git a/arch/tile/include/asm/mman.h b/arch/tile/include/asm/mman.h
index 4c6811e3e8dc..81b8fc348d63 100644
--- a/arch/tile/include/asm/mman.h
+++ b/arch/tile/include/asm/mman.h
@@ -23,6 +23,7 @@
#define MAP_POPULATE 0x0040 /* populate (prefault) pagetables */
#define MAP_NONBLOCK 0x0080 /* do not block on IO */
#define MAP_GROWSDOWN 0x0100 /* stack-like segment */
+#define MAP_STACK MAP_GROWSDOWN /* provide convenience alias */
#define MAP_LOCKED 0x0200 /* pages are locked */
#define MAP_NORESERVE 0x0400 /* don't check for reservations */
#define MAP_DENYWRITE 0x0800 /* ETXTBSY */
diff --git a/arch/tile/include/asm/page.h b/arch/tile/include/asm/page.h
index 7d90641cf18d..7979a45430d3 100644
--- a/arch/tile/include/asm/page.h
+++ b/arch/tile/include/asm/page.h
@@ -199,17 +199,17 @@ static inline __attribute_const__ int get_order(unsigned long size)
* If you want more physical memory than this then see the CONFIG_HIGHMEM
* option in the kernel configuration.
*
- * The top two 16MB chunks in the table below (VIRT and HV) are
- * unavailable to Linux. Since the kernel interrupt vectors must live
- * at 0xfd000000, we map all of the bottom of RAM at this address with
- * a huge page table entry to minimize its ITLB footprint (as well as
- * at PAGE_OFFSET). The last architected requirement is that user
- * interrupt vectors live at 0xfc000000, so we make that range of
- * memory available to user processes. The remaining regions are sized
- * as shown; after the first four addresses, we show "typical" values,
- * since the actual addresses depend on kernel #defines.
+ * The top 16MB chunk in the table below is unavailable to Linux. Since
+ * the kernel interrupt vectors must live at ether 0xfe000000 or 0xfd000000
+ * (depending on whether the kernel is at PL2 or Pl1), we map all of the
+ * bottom of RAM at this address with a huge page table entry to minimize
+ * its ITLB footprint (as well as at PAGE_OFFSET). The last architected
+ * requirement is that user interrupt vectors live at 0xfc000000, so we
+ * make that range of memory available to user processes. The remaining
+ * regions are sized as shown; the first four addresses use the PL 1
+ * values, and after that, we show "typical" values, since the actual
+ * addresses depend on kernel #defines.
*
- * MEM_VIRT_INTRPT 0xff000000
* MEM_HV_INTRPT 0xfe000000
* MEM_SV_INTRPT (kernel code) 0xfd000000
* MEM_USER_INTRPT (user vector) 0xfc000000
@@ -221,9 +221,14 @@ static inline __attribute_const__ int get_order(unsigned long size)
*/
#define MEM_USER_INTRPT _AC(0xfc000000, UL)
+#if CONFIG_KERNEL_PL == 1
#define MEM_SV_INTRPT _AC(0xfd000000, UL)
#define MEM_HV_INTRPT _AC(0xfe000000, UL)
-#define MEM_VIRT_INTRPT _AC(0xff000000, UL)
+#else
+#define MEM_GUEST_INTRPT _AC(0xfd000000, UL)
+#define MEM_SV_INTRPT _AC(0xfe000000, UL)
+#define MEM_HV_INTRPT _AC(0xff000000, UL)
+#endif
#define INTRPT_SIZE 0x4000
diff --git a/arch/tile/include/asm/pci-bridge.h b/arch/tile/include/asm/pci-bridge.h
deleted file mode 100644
index e853b0e2793b..000000000000
--- a/arch/tile/include/asm/pci-bridge.h
+++ /dev/null
@@ -1,117 +0,0 @@
-/*
- * Copyright 2010 Tilera Corporation. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation, version 2.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT. See the GNU General Public License for
- * more details.
- */
-
-#ifndef _ASM_TILE_PCI_BRIDGE_H
-#define _ASM_TILE_PCI_BRIDGE_H
-
-#include <linux/ioport.h>
-#include <linux/pci.h>
-
-struct device_node;
-struct pci_controller;
-
-/*
- * pci_io_base returns the memory address at which you can access
- * the I/O space for PCI bus number `bus' (or NULL on error).
- */
-extern void __iomem *pci_bus_io_base(unsigned int bus);
-extern unsigned long pci_bus_io_base_phys(unsigned int bus);
-extern unsigned long pci_bus_mem_base_phys(unsigned int bus);
-
-/* Allocate a new PCI host bridge structure */
-extern struct pci_controller *pcibios_alloc_controller(void);
-
-/* Helper function for setting up resources */
-extern void pci_init_resource(struct resource *res, unsigned long start,
- unsigned long end, int flags, char *name);
-
-/* Get the PCI host controller for a bus */
-extern struct pci_controller *pci_bus_to_hose(int bus);
-
-/*
- * Structure of a PCI controller (host bridge)
- */
-struct pci_controller {
- int index; /* PCI domain number */
- struct pci_bus *root_bus;
-
- int first_busno;
- int last_busno;
-
- int hv_cfg_fd[2]; /* config{0,1} fds for this PCIe controller */
- int hv_mem_fd; /* fd to Hypervisor for MMIO operations */
-
- struct pci_ops *ops;
-
- int irq_base; /* Base IRQ from the Hypervisor */
- int plx_gen1; /* flag for PLX Gen 1 configuration */
-
- /* Address ranges that are routed to this controller/bridge. */
- struct resource mem_resources[3];
-};
-
-static inline struct pci_controller *pci_bus_to_host(struct pci_bus *bus)
-{
- return bus->sysdata;
-}
-
-extern void setup_indirect_pci_nomap(struct pci_controller *hose,
- void __iomem *cfg_addr, void __iomem *cfg_data);
-extern void setup_indirect_pci(struct pci_controller *hose,
- u32 cfg_addr, u32 cfg_data);
-extern void setup_grackle(struct pci_controller *hose);
-
-extern unsigned char common_swizzle(struct pci_dev *, unsigned char *);
-
-/*
- * The following code swizzles for exactly one bridge. The routine
- * common_swizzle below handles multiple bridges. But there are a
- * some boards that don't follow the PCI spec's suggestion so we
- * break this piece out separately.
- */
-static inline unsigned char bridge_swizzle(unsigned char pin,
- unsigned char idsel)
-{
- return (((pin-1) + idsel) % 4) + 1;
-}
-
-/*
- * The following macro is used to lookup irqs in a standard table
- * format for those PPC systems that do not already have PCI
- * interrupts properly routed.
- */
-/* FIXME - double check this */
-#define PCI_IRQ_TABLE_LOOKUP ({ \
- long _ctl_ = -1; \
- if (idsel >= min_idsel && idsel <= max_idsel && pin <= irqs_per_slot) \
- _ctl_ = pci_irq_table[idsel - min_idsel][pin-1]; \
- _ctl_; \
-})
-
-/*
- * Scan the buses below a given PCI host bridge and assign suitable
- * resources to all devices found.
- */
-extern int pciauto_bus_scan(struct pci_controller *, int);
-
-#ifdef CONFIG_PCI
-extern unsigned long pci_address_to_pio(phys_addr_t address);
-#else
-static inline unsigned long pci_address_to_pio(phys_addr_t address)
-{
- return (unsigned long)-1;
-}
-#endif
-
-#endif /* _ASM_TILE_PCI_BRIDGE_H */
diff --git a/arch/tile/include/asm/pci.h b/arch/tile/include/asm/pci.h
index b0c15da2d5d5..c3fc458a0d32 100644
--- a/arch/tile/include/asm/pci.h
+++ b/arch/tile/include/asm/pci.h
@@ -15,7 +15,29 @@
#ifndef _ASM_TILE_PCI_H
#define _ASM_TILE_PCI_H
-#include <asm/pci-bridge.h>
+#include <linux/pci.h>
+
+/*
+ * Structure of a PCI controller (host bridge)
+ */
+struct pci_controller {
+ int index; /* PCI domain number */
+ struct pci_bus *root_bus;
+
+ int first_busno;
+ int last_busno;
+
+ int hv_cfg_fd[2]; /* config{0,1} fds for this PCIe controller */
+ int hv_mem_fd; /* fd to Hypervisor for MMIO operations */
+
+ struct pci_ops *ops;
+
+ int irq_base; /* Base IRQ from the Hypervisor */
+ int plx_gen1; /* flag for PLX Gen 1 configuration */
+
+ /* Address ranges that are routed to this controller/bridge. */
+ struct resource mem_resources[3];
+};
/*
* The hypervisor maps the entirety of CPA-space as bus addresses, so
@@ -24,56 +46,12 @@
*/
#define PCI_DMA_BUS_IS_PHYS 1
-struct pci_controller *pci_bus_to_hose(int bus);
-unsigned char __init common_swizzle(struct pci_dev *dev, unsigned char *pinp);
int __init tile_pci_init(void);
-void pci_iounmap(struct pci_dev *dev, void __iomem *addr);
-void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long max);
-void __devinit pcibios_fixup_bus(struct pci_bus *bus);
-int __devinit _tile_cfg_read(struct pci_controller *hose,
- int bus,
- int slot,
- int function,
- int offset,
- int size,
- u32 *val);
-int __devinit _tile_cfg_write(struct pci_controller *hose,
- int bus,
- int slot,
- int function,
- int offset,
- int size,
- u32 val);
+void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long max);
+static inline void pci_iounmap(struct pci_dev *dev, void __iomem *addr) {}
-/*
- * These are used to to config reads and writes in the early stages of
- * setup before the driver infrastructure has been set up enough to be
- * able to do config reads and writes.
- */
-#define early_cfg_read(where, size, value) \
- _tile_cfg_read(controller, \
- current_bus, \
- pci_slot, \
- pci_fn, \
- where, \
- size, \
- value)
-
-#define early_cfg_write(where, size, value) \
- _tile_cfg_write(controller, \
- current_bus, \
- pci_slot, \
- pci_fn, \
- where, \
- size, \
- value)
-
-
-
-#define PCICFG_BYTE 1
-#define PCICFG_WORD 2
-#define PCICFG_DWORD 4
+void __devinit pcibios_fixup_bus(struct pci_bus *bus);
#define TILE_NUM_PCIE 2
@@ -88,33 +66,33 @@ static inline int pci_proc_domain(struct pci_bus *bus)
}
/*
- * I/O space is currently not supported.
+ * pcibios_assign_all_busses() tells whether or not the bus numbers
+ * should be reassigned, in case the BIOS didn't do it correctly, or
+ * in case we don't have a BIOS and we want to let Linux do it.
*/
+static inline int pcibios_assign_all_busses(void)
+{
+ return 1;
+}
-#define TILE_PCIE_LOWER_IO 0x0
-#define TILE_PCIE_UPPER_IO 0x10000
-#define TILE_PCIE_PCIE_IO_SIZE 0x0000FFFF
-
-#define _PAGE_NO_CACHE 0
-#define _PAGE_GUARDED 0
-
-
-#define pcibios_assign_all_busses() pci_assign_all_buses
-extern int pci_assign_all_buses;
-
+/*
+ * No special bus mastering setup handling.
+ */
static inline void pcibios_set_master(struct pci_dev *dev)
{
- /* No special bus mastering setup handling */
}
#define PCIBIOS_MIN_MEM 0
-#define PCIBIOS_MIN_IO TILE_PCIE_LOWER_IO
+#define PCIBIOS_MIN_IO 0
/*
* This flag tells if the platform is TILEmpower that needs
* special configuration for the PLX switch chip.
*/
-extern int blade_pci;
+extern int tile_plx_gen1;
+
+/* Use any cpu for PCI. */
+#define cpumask_of_pcibus(bus) cpu_online_mask
/* implement the pci_ DMA API in terms of the generic device dma_ one */
#include <asm-generic/pci-dma-compat.h>
@@ -122,7 +100,4 @@ extern int blade_pci;
/* generic pci stuff */
#include <asm-generic/pci.h>
-/* Use any cpu for PCI. */
-#define cpumask_of_pcibus(bus) cpu_online_mask
-
#endif /* _ASM_TILE_PCI_H */
diff --git a/arch/tile/include/asm/pgtable.h b/arch/tile/include/asm/pgtable.h
index b3367379d537..a6604e9485da 100644
--- a/arch/tile/include/asm/pgtable.h
+++ b/arch/tile/include/asm/pgtable.h
@@ -344,18 +344,11 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
#define pgd_offset_k(address) pgd_offset(&init_mm, address)
#if defined(CONFIG_HIGHPTE)
-extern pte_t *_pte_offset_map(pmd_t *, unsigned long address, enum km_type);
-#define pte_offset_map(dir, address) \
- _pte_offset_map(dir, address, KM_PTE0)
-#define pte_offset_map_nested(dir, address) \
- _pte_offset_map(dir, address, KM_PTE1)
-#define pte_unmap(pte) kunmap_atomic(pte, KM_PTE0)
-#define pte_unmap_nested(pte) kunmap_atomic(pte, KM_PTE1)
+extern pte_t *pte_offset_map(pmd_t *, unsigned long address);
+#define pte_unmap(pte) kunmap_atomic(pte)
#else
#define pte_offset_map(dir, address) pte_offset_kernel(dir, address)
-#define pte_offset_map_nested(dir, address) pte_offset_map(dir, address)
#define pte_unmap(pte) do { } while (0)
-#define pte_unmap_nested(pte) do { } while (0)
#endif
/* Clear a non-executable kernel PTE and flush it from the TLB. */
diff --git a/arch/tile/include/asm/processor.h b/arch/tile/include/asm/processor.h
index ccd5f8425688..a9e7c8760334 100644
--- a/arch/tile/include/asm/processor.h
+++ b/arch/tile/include/asm/processor.h
@@ -292,8 +292,18 @@ extern int kstack_hash;
/* Are we using huge pages in the TLB for kernel data? */
extern int kdata_huge;
+/* Support standard Linux prefetching. */
+#define ARCH_HAS_PREFETCH
+#define prefetch(x) __builtin_prefetch(x)
#define PREFETCH_STRIDE CHIP_L2_LINE_SIZE()
+/* Bring a value into the L1D, faulting the TLB if necessary. */
+#ifdef __tilegx__
+#define prefetch_L1(x) __insn_prefetch_l1_fault((void *)(x))
+#else
+#define prefetch_L1(x) __insn_prefetch_L1((void *)(x))
+#endif
+
#else /* __ASSEMBLY__ */
/* Do some slow action (e.g. read a slow SPR). */
@@ -328,18 +338,21 @@ extern int kdata_huge;
* Note that assembly code assumes that USER_PL is zero.
*/
#define USER_PL 0
-#define KERNEL_PL 1
+#if CONFIG_KERNEL_PL == 2
+#define GUEST_PL 1
+#endif
+#define KERNEL_PL CONFIG_KERNEL_PL
-/* SYSTEM_SAVE_1_0 holds the current cpu number ORed with ksp0. */
+/* SYSTEM_SAVE_K_0 holds the current cpu number ORed with ksp0. */
#define CPU_LOG_MASK_VALUE 12
#define CPU_MASK_VALUE ((1 << CPU_LOG_MASK_VALUE) - 1)
#if CONFIG_NR_CPUS > CPU_MASK_VALUE
# error Too many cpus!
#endif
#define raw_smp_processor_id() \
- ((int)__insn_mfspr(SPR_SYSTEM_SAVE_1_0) & CPU_MASK_VALUE)
+ ((int)__insn_mfspr(SPR_SYSTEM_SAVE_K_0) & CPU_MASK_VALUE)
#define get_current_ksp0() \
- (__insn_mfspr(SPR_SYSTEM_SAVE_1_0) & ~CPU_MASK_VALUE)
+ (__insn_mfspr(SPR_SYSTEM_SAVE_K_0) & ~CPU_MASK_VALUE)
#define next_current_ksp0(task) ({ \
unsigned long __ksp0 = task_ksp0(task); \
int __cpu = raw_smp_processor_id(); \
diff --git a/arch/tile/include/asm/ptrace.h b/arch/tile/include/asm/ptrace.h
index 4a02bb073979..ac6d343129d3 100644
--- a/arch/tile/include/asm/ptrace.h
+++ b/arch/tile/include/asm/ptrace.h
@@ -62,8 +62,8 @@ struct pt_regs {
pt_reg_t lr; /* aliases regs[TREG_LR] */
/* Saved special registers. */
- pt_reg_t pc; /* stored in EX_CONTEXT_1_0 */
- pt_reg_t ex1; /* stored in EX_CONTEXT_1_1 (PL and ICS bit) */
+ pt_reg_t pc; /* stored in EX_CONTEXT_K_0 */
+ pt_reg_t ex1; /* stored in EX_CONTEXT_K_1 (PL and ICS bit) */
pt_reg_t faultnum; /* fault number (INT_SWINT_1 for syscall) */
pt_reg_t orig_r0; /* r0 at syscall entry, else zero */
pt_reg_t flags; /* flags (see below) */
diff --git a/arch/tile/include/asm/stat.h b/arch/tile/include/asm/stat.h
index 3dc90fa92c70..b16e5db8f0e7 100644
--- a/arch/tile/include/asm/stat.h
+++ b/arch/tile/include/asm/stat.h
@@ -1 +1,4 @@
+#ifdef CONFIG_COMPAT
+#define __ARCH_WANT_STAT64 /* Used for compat_sys_stat64() etc. */
+#endif
#include <asm-generic/stat.h>
diff --git a/arch/tile/include/asm/syscalls.h b/arch/tile/include/asm/syscalls.h
index ce99ffefeacf..3b5507c31eae 100644
--- a/arch/tile/include/asm/syscalls.h
+++ b/arch/tile/include/asm/syscalls.h
@@ -32,8 +32,9 @@ extern void *compat_sys_call_table[];
/*
* Note that by convention, any syscall which requires the current
- * register set takes an additional "struct pt_regs *" pointer; the
- * sys_xxx() function just adds the pointer and tail-calls to _sys_xxx().
+ * register set takes an additional "struct pt_regs *" pointer; a
+ * _sys_xxx() trampoline in intvec*.S just sets up the pointer and
+ * jumps to sys_xxx().
*/
/* kernel/sys.c */
@@ -43,66 +44,17 @@ long sys32_fadvise64(int fd, u32 offset_lo, u32 offset_hi,
int sys32_fadvise64_64(int fd, u32 offset_lo, u32 offset_hi,
u32 len_lo, u32 len_hi, int advice);
long sys_flush_cache(void);
-long sys_mmap2(unsigned long addr, unsigned long len,
- unsigned long prot, unsigned long flags,
- unsigned long fd, unsigned long pgoff);
-#ifdef __tilegx__
-long sys_mmap(unsigned long addr, unsigned long len,
- unsigned long prot, unsigned long flags,
- unsigned long fd, off_t pgoff);
+#ifndef __tilegx__ /* No mmap() in the 32-bit kernel. */
+#define sys_mmap sys_mmap
#endif
-/* kernel/process.c */
-long sys_clone(unsigned long clone_flags, unsigned long newsp,
- void __user *parent_tid, void __user *child_tid);
-long _sys_clone(unsigned long clone_flags, unsigned long newsp,
- void __user *parent_tid, void __user *child_tid,
- struct pt_regs *regs);
-long sys_fork(void);
-long _sys_fork(struct pt_regs *regs);
-long sys_vfork(void);
-long _sys_vfork(struct pt_regs *regs);
-long sys_execve(const char __user *filename,
- const char __user *const __user *argv,
- const char __user *const __user *envp);
-long _sys_execve(const char __user *filename,
- const char __user *const __user *argv,
- const char __user *const __user *envp, struct pt_regs *regs);
-
-/* kernel/signal.c */
-long sys_sigaltstack(const stack_t __user *, stack_t __user *);
-long _sys_sigaltstack(const stack_t __user *, stack_t __user *,
- struct pt_regs *);
-long sys_rt_sigreturn(void);
-long _sys_rt_sigreturn(struct pt_regs *regs);
-
-/* platform-independent functions */
-long sys_rt_sigsuspend(sigset_t __user *unewset, size_t sigsetsize);
-long sys_rt_sigaction(int sig, const struct sigaction __user *act,
- struct sigaction __user *oact, size_t sigsetsize);
-
#ifndef __tilegx__
/* mm/fault.c */
-int sys_cmpxchg_badaddr(unsigned long address);
-int _sys_cmpxchg_badaddr(unsigned long address, struct pt_regs *);
+long sys_cmpxchg_badaddr(unsigned long address, struct pt_regs *);
+long _sys_cmpxchg_badaddr(unsigned long address);
#endif
#ifdef CONFIG_COMPAT
-long compat_sys_execve(const char __user *path,
- const compat_uptr_t __user *argv,
- const compat_uptr_t __user *envp);
-long _compat_sys_execve(const char __user *path,
- const compat_uptr_t __user *argv,
- const compat_uptr_t __user *envp,
- struct pt_regs *regs);
-long compat_sys_sigaltstack(const struct compat_sigaltstack __user *uss_ptr,
- struct compat_sigaltstack __user *uoss_ptr);
-long _compat_sys_sigaltstack(const struct compat_sigaltstack __user *uss_ptr,
- struct compat_sigaltstack __user *uoss_ptr,
- struct pt_regs *regs);
-long compat_sys_rt_sigreturn(void);
-long _compat_sys_rt_sigreturn(struct pt_regs *regs);
-
/* These four are not defined for 64-bit, but serve as "compat" syscalls. */
long sys_fcntl64(unsigned int fd, unsigned int cmd, unsigned long arg);
long sys_fstat64(unsigned long fd, struct stat64 __user *statbuf);
@@ -110,4 +62,15 @@ long sys_truncate64(const char __user *path, loff_t length);
long sys_ftruncate64(unsigned int fd, loff_t length);
#endif
+/* These are the intvec*.S trampolines. */
+long _sys_sigaltstack(const stack_t __user *, stack_t __user *);
+long _sys_rt_sigreturn(void);
+long _sys_clone(unsigned long clone_flags, unsigned long newsp,
+ void __user *parent_tid, void __user *child_tid);
+long _sys_execve(const char __user *filename,
+ const char __user *const __user *argv,
+ const char __user *const __user *envp);
+
+#include <asm-generic/syscalls.h>
+
#endif /* _ASM_TILE_SYSCALLS_H */
diff --git a/arch/tile/include/asm/system.h b/arch/tile/include/asm/system.h
index f749be327ce0..5388850deeb2 100644
--- a/arch/tile/include/asm/system.h
+++ b/arch/tile/include/asm/system.h
@@ -89,6 +89,10 @@
#define get_cycles_low() __insn_mfspr(SPR_CYCLE) /* just get all 64 bits */
#endif
+#if !CHIP_HAS_MF_WAITS_FOR_VICTIMS()
+int __mb_incoherent(void); /* Helper routine for mb_incoherent(). */
+#endif
+
/* Fence to guarantee visibility of stores to incoherent memory. */
static inline void
mb_incoherent(void)
@@ -97,7 +101,6 @@ mb_incoherent(void)
#if !CHIP_HAS_MF_WAITS_FOR_VICTIMS()
{
- int __mb_incoherent(void);
#if CHIP_HAS_TILE_WRITE_PENDING()
const unsigned long WRITE_TIMEOUT_CYCLES = 400;
unsigned long start = get_cycles_low();
@@ -161,7 +164,7 @@ extern struct task_struct *_switch_to(struct task_struct *prev,
/* Helper function for _switch_to(). */
extern struct task_struct *__switch_to(struct task_struct *prev,
struct task_struct *next,
- unsigned long new_system_save_1_0);
+ unsigned long new_system_save_k_0);
/* Address that switched-away from tasks are at. */
extern unsigned long get_switch_to_pc(void);
@@ -214,13 +217,6 @@ int hardwall_deactivate(struct task_struct *task);
} while (0)
#endif
-/* Invoke the simulator "syscall" mechanism (see arch/tile/kernel/entry.S). */
-extern int _sim_syscall(int syscall_num, ...);
-#define sim_syscall(syscall_num, ...) \
- _sim_syscall(SIM_CONTROL_SYSCALL + \
- ((syscall_num) << _SIM_CONTROL_OPERATOR_BITS), \
- ## __VA_ARGS__)
-
/*
* Kernel threads can check to see if they need to migrate their
* stack whenever they return from a context switch; for user
diff --git a/arch/tile/include/asm/traps.h b/arch/tile/include/asm/traps.h
index 432a9c15c8a2..d06e35f57201 100644
--- a/arch/tile/include/asm/traps.h
+++ b/arch/tile/include/asm/traps.h
@@ -59,4 +59,8 @@ void do_hardwall_trap(struct pt_regs *, int fault_num);
void do_breakpoint(struct pt_regs *, int fault_num);
+#ifdef __tilegx__
+void gx_singlestep_handle(struct pt_regs *, int fault_num);
+#endif
+
#endif /* _ASM_TILE_SYSCALLS_H */
diff --git a/arch/tile/include/asm/unistd.h b/arch/tile/include/asm/unistd.h
index f2e3ff485333..b35c2db71199 100644
--- a/arch/tile/include/asm/unistd.h
+++ b/arch/tile/include/asm/unistd.h
@@ -41,6 +41,7 @@ __SYSCALL(__NR_cmpxchg_badaddr, sys_cmpxchg_badaddr)
#ifdef CONFIG_COMPAT
#define __ARCH_WANT_SYS_LLSEEK
#endif
+#define __ARCH_WANT_SYS_NEWFSTATAT
#endif
#endif /* _ASM_TILE_UNISTD_H */
diff --git a/arch/tile/include/hv/drv_xgbe_impl.h b/arch/tile/include/hv/drv_xgbe_impl.h
new file mode 100644
index 000000000000..3a73b2b44913
--- /dev/null
+++ b/arch/tile/include/hv/drv_xgbe_impl.h
@@ -0,0 +1,300 @@
+/*
+ * Copyright 2010 Tilera Corporation. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation, version 2.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ * NON INFRINGEMENT. See the GNU General Public License for
+ * more details.
+ */
+
+/**
+ * @file drivers/xgbe/impl.h
+ * Implementation details for the NetIO library.
+ */
+
+#ifndef __DRV_XGBE_IMPL_H__
+#define __DRV_XGBE_IMPL_H__
+
+#include <hv/netio_errors.h>
+#include <hv/netio_intf.h>
+#include <hv/drv_xgbe_intf.h>
+
+
+/** How many groups we have (log2). */
+#define LOG2_NUM_GROUPS (12)
+/** How many groups we have. */
+#define NUM_GROUPS (1 << LOG2_NUM_GROUPS)
+
+/** Number of output requests we'll buffer per tile. */
+#define EPP_REQS_PER_TILE (32)
+
+/** Words used in an eDMA command without checksum acceleration. */
+#define EDMA_WDS_NO_CSUM 8
+/** Words used in an eDMA command with checksum acceleration. */
+#define EDMA_WDS_CSUM 10
+/** Total available words in the eDMA command FIFO. */
+#define EDMA_WDS_TOTAL 128
+
+
+/*
+ * FIXME: These definitions are internal and should have underscores!
+ * NOTE: The actual numeric values here are intentional and allow us to
+ * optimize the concept "if small ... else if large ... else ...", by
+ * checking for the low bit being set, and then for non-zero.
+ * These are used as array indices, so they must have the values (0, 1, 2)
+ * in some order.
+ */
+#define SIZE_SMALL (1) /**< Small packet queue. */
+#define SIZE_LARGE (2) /**< Large packet queue. */
+#define SIZE_JUMBO (0) /**< Jumbo packet queue. */
+
+/** The number of "SIZE_xxx" values. */
+#define NETIO_NUM_SIZES 3
+
+
+/*
+ * Default numbers of packets for IPP drivers. These values are chosen
+ * such that CIPP1 will not overflow its L2 cache.
+ */
+
+/** The default number of small packets. */
+#define NETIO_DEFAULT_SMALL_PACKETS 2750
+/** The default number of large packets. */
+#define NETIO_DEFAULT_LARGE_PACKETS 2500
+/** The default number of jumbo packets. */
+#define NETIO_DEFAULT_JUMBO_PACKETS 250
+
+
+/** Log2 of the size of a memory arena. */
+#define NETIO_ARENA_SHIFT 24 /* 16 MB */
+/** Size of a memory arena. */
+#define NETIO_ARENA_SIZE (1 << NETIO_ARENA_SHIFT)
+
+
+/** A queue of packets.
+ *
+ * This structure partially defines a queue of packets waiting to be
+ * processed. The queue as a whole is written to by an interrupt handler and
+ * read by non-interrupt code; this data structure is what's touched by the
+ * interrupt handler. The other part of the queue state, the read offset, is
+ * kept in user space, not in hypervisor space, so it is in a separate data
+ * structure.
+ *
+ * The read offset (__packet_receive_read in the user part of the queue
+ * structure) points to the next packet to be read. When the read offset is
+ * equal to the write offset, the queue is empty; therefore the queue must
+ * contain one more slot than the required maximum queue size.
+ *
+ * Here's an example of all 3 state variables and what they mean. All
+ * pointers move left to right.
+ *
+ * @code
+ * I I V V V V I I I I
+ * 0 1 2 3 4 5 6 7 8 9 10
+ * ^ ^ ^ ^
+ * | | |
+ * | | __last_packet_plus_one
+ * | __buffer_write
+ * __packet_receive_read
+ * @endcode
+ *
+ * This queue has 10 slots, and thus can hold 9 packets (_last_packet_plus_one
+ * = 10). The read pointer is at 2, and the write pointer is at 6; thus,
+ * there are valid, unread packets in slots 2, 3, 4, and 5. The remaining
+ * slots are invalid (do not contain a packet).
+ */
+typedef struct {
+ /** Byte offset of the next notify packet to be written: zero for the first
+ * packet on the queue, sizeof (netio_pkt_t) for the second packet on the
+ * queue, etc. */
+ volatile uint32_t __packet_write;
+
+ /** Offset of the packet after the last valid packet (i.e., when any
+ * pointer is incremented to this value, it wraps back to zero). */
+ uint32_t __last_packet_plus_one;
+}
+__netio_packet_queue_t;
+
+
+/** A queue of buffers.
+ *
+ * This structure partially defines a queue of empty buffers which have been
+ * obtained via requests to the IPP. (The elements of the queue are packet
+ * handles, which are transformed into a full netio_pkt_t when the buffer is
+ * retrieved.) The queue as a whole is written to by an interrupt handler and
+ * read by non-interrupt code; this data structure is what's touched by the
+ * interrupt handler. The other parts of the queue state, the read offset and
+ * requested write offset, are kept in user space, not in hypervisor space, so
+ * they are in a separate data structure.
+ *
+ * The read offset (__buffer_read in the user part of the queue structure)
+ * points to the next buffer to be read. When the read offset is equal to the
+ * write offset, the queue is empty; therefore the queue must contain one more
+ * slot than the required maximum queue size.
+ *
+ * The requested write offset (__buffer_requested_write in the user part of
+ * the queue structure) points to the slot which will hold the next buffer we
+ * request from the IPP, once we get around to sending such a request. When
+ * the requested write offset is equal to the write offset, no requests for
+ * new buffers are outstanding; when the requested write offset is one greater
+ * than the read offset, no more requests may be sent.
+ *
+ * Note that, unlike the packet_queue, the buffer_queue places incoming
+ * buffers at decreasing addresses. This makes the check for "is it time to
+ * wrap the buffer pointer" cheaper in the assembly code which receives new
+ * buffers, and means that the value which defines the queue size,
+ * __last_buffer, is different than in the packet queue. Also, the offset
+ * used in the packet_queue is already scaled by the size of a packet; here we
+ * use unscaled slot indices for the offsets. (These differences are
+ * historical, and in the future it's possible that the packet_queue will look
+ * more like this queue.)
+ *
+ * @code
+ * Here's an example of all 4 state variables and what they mean. Remember:
+ * all pointers move right to left.
+ *
+ * V V V I I R R V V V
+ * 0 1 2 3 4 5 6 7 8 9
+ * ^ ^ ^ ^
+ * | | | |
+ * | | | __last_buffer
+ * | | __buffer_write
+ * | __buffer_requested_write
+ * __buffer_read
+ * @endcode
+ *
+ * This queue has 10 slots, and thus can hold 9 buffers (_last_buffer = 9).
+ * The read pointer is at 2, and the write pointer is at 6; thus, there are
+ * valid, unread buffers in slots 2, 1, 0, 9, 8, and 7. The requested write
+ * pointer is at 4; thus, requests have been made to the IPP for buffers which
+ * will be placed in slots 6 and 5 when they arrive. Finally, the remaining
+ * slots are invalid (do not contain a buffer).
+ */
+typedef struct
+{
+ /** Ordinal number of the next buffer to be written: 0 for the first slot in
+ * the queue, 1 for the second slot in the queue, etc. */
+ volatile uint32_t __buffer_write;
+
+ /** Ordinal number of the last buffer (i.e., when any pointer is decremented
+ * below zero, it is reloaded with this value). */
+ uint32_t __last_buffer;
+}
+__netio_buffer_queue_t;
+
+
+/**
+ * An object for providing Ethernet packets to a process.
+ */
+typedef struct __netio_queue_impl_t
+{
+ /** The queue of packets waiting to be received. */
+ __netio_packet_queue_t __packet_receive_queue;
+ /** The intr bit mask that IDs this device. */
+ unsigned int __intr_id;
+ /** Offset to queues of empty buffers, one per size. */
+ uint32_t __buffer_queue[NETIO_NUM_SIZES];
+ /** The address of the first EPP tile, or -1 if no EPP. */
+ /* ISSUE: Actually this is always "0" or "~0". */
+ uint32_t __epp_location;
+ /** The queue ID that this queue represents. */
+ unsigned int __queue_id;
+ /** Number of acknowledgements received. */
+ volatile uint32_t __acks_received;
+ /** Last completion number received for packet_sendv. */
+ volatile uint32_t __last_completion_rcv;
+ /** Number of packets allowed to be outstanding. */
+ uint32_t __max_outstanding;
+ /** First VA available for packets. */
+ void* __va_0;
+ /** First VA in second range available for packets. */
+ void* __va_1;
+ /** Padding to align the "__packets" field to the size of a netio_pkt_t. */
+ uint32_t __padding[3];
+ /** The packets themselves. */
+ netio_pkt_t __packets[0];
+}
+netio_queue_impl_t;
+
+
+/**
+ * An object for managing the user end of a NetIO queue.
+ */
+typedef struct __netio_queue_user_impl_t
+{
+ /** The next incoming packet to be read. */
+ uint32_t __packet_receive_read;
+ /** The next empty buffers to be read, one index per size. */
+ uint8_t __buffer_read[NETIO_NUM_SIZES];
+ /** Where the empty buffer we next request from the IPP will go, one index
+ * per size. */
+ uint8_t __buffer_requested_write[NETIO_NUM_SIZES];
+ /** PCIe interface flag. */
+ uint8_t __pcie;
+ /** Number of packets left to be received before we send a credit update. */
+ uint32_t __receive_credit_remaining;
+ /** Value placed in __receive_credit_remaining when it reaches zero. */
+ uint32_t __receive_credit_interval;
+ /** First fast I/O routine index. */
+ uint32_t __fastio_index;
+ /** Number of acknowledgements expected. */
+ uint32_t __acks_outstanding;
+ /** Last completion number requested. */
+ uint32_t __last_completion_req;
+ /** File descriptor for driver. */
+ int __fd;
+}
+netio_queue_user_impl_t;
+
+
+#define NETIO_GROUP_CHUNK_SIZE 64 /**< Max # groups in one IPP request */
+#define NETIO_BUCKET_CHUNK_SIZE 64 /**< Max # buckets in one IPP request */
+
+
+/** Internal structure used to convey packet send information to the
+ * hypervisor. FIXME: Actually, it's not used for that anymore, but
+ * netio_packet_send() still uses it internally.
+ */
+typedef struct
+{
+ uint16_t flags; /**< Packet flags (__NETIO_SEND_FLG_xxx) */
+ uint16_t transfer_size; /**< Size of packet */
+ uint32_t va; /**< VA of start of packet */
+ __netio_pkt_handle_t handle; /**< Packet handle */
+ uint32_t csum0; /**< First checksum word */
+ uint32_t csum1; /**< Second checksum word */
+}
+__netio_send_cmd_t;
+
+
+/** Flags used in two contexts:
+ * - As the "flags" member in the __netio_send_cmd_t, above; used only
+ * for netio_pkt_send_{prepare,commit}.
+ * - As part of the flags passed to the various send packet fast I/O calls.
+ */
+
+/** Need acknowledgement on this packet. Note that some code in the
+ * normal send_pkt fast I/O handler assumes that this is equal to 1. */
+#define __NETIO_SEND_FLG_ACK 0x1
+
+/** Do checksum on this packet. (Only used with the __netio_send_cmd_t;
+ * normal packet sends use a special fast I/O index to denote checksumming,
+ * and multi-segment sends test the checksum descriptor.) */
+#define __NETIO_SEND_FLG_CSUM 0x2
+
+/** Get a completion on this packet. Only used with multi-segment sends. */
+#define __NETIO_SEND_FLG_COMPLETION 0x4
+
+/** Position of the number-of-extra-segments value in the flags word.
+ Only used with multi-segment sends. */
+#define __NETIO_SEND_FLG_XSEG_SHIFT 3
+
+/** Width of the number-of-extra-segments value in the flags word. */
+#define __NETIO_SEND_FLG_XSEG_WIDTH 2
+
+#endif /* __DRV_XGBE_IMPL_H__ */
diff --git a/arch/tile/include/hv/drv_xgbe_intf.h b/arch/tile/include/hv/drv_xgbe_intf.h
new file mode 100644
index 000000000000..146e47d5334b
--- /dev/null
+++ b/arch/tile/include/hv/drv_xgbe_intf.h
@@ -0,0 +1,615 @@
+/*
+ * Copyright 2010 Tilera Corporation. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation, version 2.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ * NON INFRINGEMENT. See the GNU General Public License for
+ * more details.
+ */
+
+/**
+ * @file drv_xgbe_intf.h
+ * Interface to the hypervisor XGBE driver.
+ */
+
+#ifndef __DRV_XGBE_INTF_H__
+#define __DRV_XGBE_INTF_H__
+
+/**
+ * An object for forwarding VAs and PAs to the hypervisor.
+ * @ingroup types
+ *
+ * This allows the supervisor to specify a number of areas of memory to
+ * store packet buffers.
+ */
+typedef struct
+{
+ /** The physical address of the memory. */
+ HV_PhysAddr pa;
+ /** Page table entry for the memory. This is only used to derive the
+ * memory's caching mode; the PA bits are ignored. */
+ HV_PTE pte;
+ /** The virtual address of the memory. */
+ HV_VirtAddr va;
+ /** Size (in bytes) of the memory area. */
+ int size;
+
+}
+netio_ipp_address_t;
+
+/** The various pread/pwrite offsets into the hypervisor-level driver.
+ * @ingroup types
+ */
+typedef enum
+{
+ /** Inform the Linux driver of the address of the NetIO arena memory.
+ * This offset is actually only used to convey information from netio
+ * to the Linux driver; it never makes it from there to the hypervisor.
+ * Write-only; takes a uint32_t specifying the VA address. */
+ NETIO_FIXED_ADDR = 0x5000000000000000ULL,
+
+ /** Inform the Linux driver of the size of the NetIO arena memory.
+ * This offset is actually only used to convey information from netio
+ * to the Linux driver; it never makes it from there to the hypervisor.
+ * Write-only; takes a uint32_t specifying the VA size. */
+ NETIO_FIXED_SIZE = 0x5100000000000000ULL,
+
+ /** Register current tile with IPP. Write then read: write, takes a
+ * netio_input_config_t, read returns a pointer to a netio_queue_impl_t. */
+ NETIO_IPP_INPUT_REGISTER_OFF = 0x6000000000000000ULL,
+
+ /** Unregister current tile from IPP. Write-only, takes a dummy argument. */
+ NETIO_IPP_INPUT_UNREGISTER_OFF = 0x6100000000000000ULL,
+
+ /** Start packets flowing. Write-only, takes a dummy argument. */
+ NETIO_IPP_INPUT_INIT_OFF = 0x6200000000000000ULL,
+
+ /** Stop packets flowing. Write-only, takes a dummy argument. */
+ NETIO_IPP_INPUT_UNINIT_OFF = 0x6300000000000000ULL,
+
+ /** Configure group (typically we group on VLAN). Write-only: takes an
+ * array of netio_group_t's, low 24 bits of the offset is the base group
+ * number times the size of a netio_group_t. */
+ NETIO_IPP_INPUT_GROUP_CFG_OFF = 0x6400000000000000ULL,
+
+ /** Configure bucket. Write-only: takes an array of netio_bucket_t's, low
+ * 24 bits of the offset is the base bucket number times the size of a
+ * netio_bucket_t. */
+ NETIO_IPP_INPUT_BUCKET_CFG_OFF = 0x6500000000000000ULL,
+
+ /** Get/set a parameter. Read or write: read or write data is the parameter
+ * value, low 32 bits of the offset is a __netio_getset_offset_t. */
+ NETIO_IPP_PARAM_OFF = 0x6600000000000000ULL,
+
+ /** Get fast I/O index. Read-only; returns a 4-byte base index value. */
+ NETIO_IPP_GET_FASTIO_OFF = 0x6700000000000000ULL,
+
+ /** Configure hijack IP address. Packets with this IPv4 dest address
+ * go to bucket NETIO_NUM_BUCKETS - 1. Write-only: takes an IP address
+ * in some standard form. FIXME: Define the form! */
+ NETIO_IPP_INPUT_HIJACK_CFG_OFF = 0x6800000000000000ULL,
+
+ /**
+ * Offsets beyond this point are reserved for the supervisor (although that
+ * enforcement must be done by the supervisor driver itself).
+ */
+ NETIO_IPP_USER_MAX_OFF = 0x6FFFFFFFFFFFFFFFULL,
+
+ /** Register I/O memory. Write-only, takes a netio_ipp_address_t. */
+ NETIO_IPP_IOMEM_REGISTER_OFF = 0x7000000000000000ULL,
+
+ /** Unregister I/O memory. Write-only, takes a netio_ipp_address_t. */
+ NETIO_IPP_IOMEM_UNREGISTER_OFF = 0x7100000000000000ULL,
+
+ /* Offsets greater than 0x7FFFFFFF can't be used directly from Linux
+ * userspace code due to limitations in the pread/pwrite syscalls. */
+
+ /** Drain LIPP buffers. */
+ NETIO_IPP_DRAIN_OFF = 0xFA00000000000000ULL,
+
+ /** Supply a netio_ipp_address_t to be used as shared memory for the
+ * LEPP command queue. */
+ NETIO_EPP_SHM_OFF = 0xFB00000000000000ULL,
+
+ /* 0xFC... is currently unused. */
+
+ /** Stop IPP/EPP tiles. Write-only, takes a dummy argument. */
+ NETIO_IPP_STOP_SHIM_OFF = 0xFD00000000000000ULL,
+
+ /** Start IPP/EPP tiles. Write-only, takes a dummy argument. */
+ NETIO_IPP_START_SHIM_OFF = 0xFE00000000000000ULL,
+
+ /** Supply packet arena. Write-only, takes an array of
+ * netio_ipp_address_t values. */
+ NETIO_IPP_ADDRESS_OFF = 0xFF00000000000000ULL,
+} netio_hv_offset_t;
+
+/** Extract the base offset from an offset */
+#define NETIO_BASE_OFFSET(off) ((off) & 0xFF00000000000000ULL)
+/** Extract the local offset from an offset */
+#define NETIO_LOCAL_OFFSET(off) ((off) & 0x00FFFFFFFFFFFFFFULL)
+
+
+/**
+ * Get/set offset.
+ */
+typedef union
+{
+ struct
+ {
+ uint64_t addr:48; /**< Class-specific address */
+ unsigned int class:8; /**< Class (e.g., NETIO_PARAM) */
+ unsigned int opcode:8; /**< High 8 bits of NETIO_IPP_PARAM_OFF */
+ }
+ bits; /**< Bitfields */
+ uint64_t word; /**< Aggregated value to use as the offset */
+}
+__netio_getset_offset_t;
+
+/**
+ * Fast I/O index offsets (must be contiguous).
+ */
+typedef enum
+{
+ NETIO_FASTIO_ALLOCATE = 0, /**< Get empty packet buffer */
+ NETIO_FASTIO_FREE_BUFFER = 1, /**< Give buffer back to IPP */
+ NETIO_FASTIO_RETURN_CREDITS = 2, /**< Give credits to IPP */
+ NETIO_FASTIO_SEND_PKT_NOCK = 3, /**< Send a packet, no checksum */
+ NETIO_FASTIO_SEND_PKT_CK = 4, /**< Send a packet, with checksum */
+ NETIO_FASTIO_SEND_PKT_VEC = 5, /**< Send a vector of packets */
+ NETIO_FASTIO_SENDV_PKT = 6, /**< Sendv one packet */
+ NETIO_FASTIO_NUM_INDEX = 7, /**< Total number of fast I/O indices */
+} netio_fastio_index_t;
+
+/** 3-word return type for Fast I/O call. */
+typedef struct
+{
+ int err; /**< Error code. */
+ uint32_t val0; /**< Value. Meaning depends upon the specific call. */
+ uint32_t val1; /**< Value. Meaning depends upon the specific call. */
+} netio_fastio_rv3_t;
+
+/** 0-argument fast I/O call */
+int __netio_fastio0(uint32_t fastio_index);
+/** 1-argument fast I/O call */
+int __netio_fastio1(uint32_t fastio_index, uint32_t arg0);
+/** 3-argument fast I/O call, 2-word return value */
+netio_fastio_rv3_t __netio_fastio3_rv3(uint32_t fastio_index, uint32_t arg0,
+ uint32_t arg1, uint32_t arg2);
+/** 4-argument fast I/O call */
+int __netio_fastio4(uint32_t fastio_index, uint32_t arg0, uint32_t arg1,
+ uint32_t arg2, uint32_t arg3);
+/** 6-argument fast I/O call */
+int __netio_fastio6(uint32_t fastio_index, uint32_t arg0, uint32_t arg1,
+ uint32_t arg2, uint32_t arg3, uint32_t arg4, uint32_t arg5);
+/** 9-argument fast I/O call */
+int __netio_fastio9(uint32_t fastio_index, uint32_t arg0, uint32_t arg1,
+ uint32_t arg2, uint32_t arg3, uint32_t arg4, uint32_t arg5,
+ uint32_t arg6, uint32_t arg7, uint32_t arg8);
+
+/** Allocate an empty packet.
+ * @param fastio_index Fast I/O index.
+ * @param size Size of the packet to allocate.
+ */
+#define __netio_fastio_allocate(fastio_index, size) \
+ __netio_fastio1((fastio_index) + NETIO_FASTIO_ALLOCATE, size)
+
+/** Free a buffer.
+ * @param fastio_index Fast I/O index.
+ * @param handle Handle for the packet to free.
+ */
+#define __netio_fastio_free_buffer(fastio_index, handle) \
+ __netio_fastio1((fastio_index) + NETIO_FASTIO_FREE_BUFFER, handle)
+
+/** Increment our receive credits.
+ * @param fastio_index Fast I/O index.
+ * @param credits Number of credits to add.
+ */
+#define __netio_fastio_return_credits(fastio_index, credits) \
+ __netio_fastio1((fastio_index) + NETIO_FASTIO_RETURN_CREDITS, credits)
+
+/** Send packet, no checksum.
+ * @param fastio_index Fast I/O index.
+ * @param ackflag Nonzero if we want an ack.
+ * @param size Size of the packet.
+ * @param va Virtual address of start of packet.
+ * @param handle Packet handle.
+ */
+#define __netio_fastio_send_pkt_nock(fastio_index, ackflag, size, va, handle) \
+ __netio_fastio4((fastio_index) + NETIO_FASTIO_SEND_PKT_NOCK, ackflag, \
+ size, va, handle)
+
+/** Send packet, calculate checksum.
+ * @param fastio_index Fast I/O index.
+ * @param ackflag Nonzero if we want an ack.
+ * @param size Size of the packet.
+ * @param va Virtual address of start of packet.
+ * @param handle Packet handle.
+ * @param csum0 Shim checksum header.
+ * @param csum1 Checksum seed.
+ */
+#define __netio_fastio_send_pkt_ck(fastio_index, ackflag, size, va, handle, \
+ csum0, csum1) \
+ __netio_fastio6((fastio_index) + NETIO_FASTIO_SEND_PKT_CK, ackflag, \
+ size, va, handle, csum0, csum1)
+
+
+/** Format for the "csum0" argument to the __netio_fastio_send routines
+ * and LEPP. Note that this is currently exactly identical to the
+ * ShimProtocolOffloadHeader.
+ */
+typedef union
+{
+ struct
+ {
+ unsigned int start_byte:7; /**< The first byte to be checksummed */
+ unsigned int count:14; /**< Number of bytes to be checksummed. */
+ unsigned int destination_byte:7; /**< The byte to write the checksum to. */
+ unsigned int reserved:4; /**< Reserved. */
+ } bits; /**< Decomposed method of access. */
+ unsigned int word; /**< To send out the IDN. */
+} __netio_checksum_header_t;
+
+
+/** Sendv packet with 1 or 2 segments.
+ * @param fastio_index Fast I/O index.
+ * @param flags Ack/csum/notify flags in low 3 bits; number of segments minus
+ * 1 in next 2 bits; expected checksum in high 16 bits.
+ * @param confno Confirmation number to request, if notify flag set.
+ * @param csum0 Checksum descriptor; if zero, no checksum.
+ * @param va_F Virtual address of first segment.
+ * @param va_L Virtual address of last segment, if 2 segments.
+ * @param len_F_L Length of first segment in low 16 bits; length of last
+ * segment, if 2 segments, in high 16 bits.
+ */
+#define __netio_fastio_sendv_pkt_1_2(fastio_index, flags, confno, csum0, \
+ va_F, va_L, len_F_L) \
+ __netio_fastio6((fastio_index) + NETIO_FASTIO_SENDV_PKT, flags, confno, \
+ csum0, va_F, va_L, len_F_L)
+
+/** Send packet on PCIe interface.
+ * @param fastio_index Fast I/O index.
+ * @param flags Ack/csum/notify flags in low 3 bits.
+ * @param confno Confirmation number to request, if notify flag set.
+ * @param csum0 Checksum descriptor; Hard wired 0, not needed for PCIe.
+ * @param va_F Virtual address of the packet buffer.
+ * @param va_L Virtual address of last segment, if 2 segments. Hard wired 0.
+ * @param len_F_L Length of the packet buffer in low 16 bits.
+ */
+#define __netio_fastio_send_pcie_pkt(fastio_index, flags, confno, csum0, \
+ va_F, va_L, len_F_L) \
+ __netio_fastio6((fastio_index) + PCIE_FASTIO_SENDV_PKT, flags, confno, \
+ csum0, va_F, va_L, len_F_L)
+
+/** Sendv packet with 3 or 4 segments.
+ * @param fastio_index Fast I/O index.
+ * @param flags Ack/csum/notify flags in low 3 bits; number of segments minus
+ * 1 in next 2 bits; expected checksum in high 16 bits.
+ * @param confno Confirmation number to request, if notify flag set.
+ * @param csum0 Checksum descriptor; if zero, no checksum.
+ * @param va_F Virtual address of first segment.
+ * @param va_L Virtual address of last segment (third segment if 3 segments,
+ * fourth segment if 4 segments).
+ * @param len_F_L Length of first segment in low 16 bits; length of last
+ * segment in high 16 bits.
+ * @param va_M0 Virtual address of "middle 0" segment; this segment is sent
+ * second when there are three segments, and third if there are four.
+ * @param va_M1 Virtual address of "middle 1" segment; this segment is sent
+ * second when there are four segments.
+ * @param len_M0_M1 Length of middle 0 segment in low 16 bits; length of middle
+ * 1 segment, if 4 segments, in high 16 bits.
+ */
+#define __netio_fastio_sendv_pkt_3_4(fastio_index, flags, confno, csum0, va_F, \
+ va_L, len_F_L, va_M0, va_M1, len_M0_M1) \
+ __netio_fastio9((fastio_index) + NETIO_FASTIO_SENDV_PKT, flags, confno, \
+ csum0, va_F, va_L, len_F_L, va_M0, va_M1, len_M0_M1)
+
+/** Send vector of packets.
+ * @param fastio_index Fast I/O index.
+ * @param seqno Number of packets transmitted so far on this interface;
+ * used to decide which packets should be acknowledged.
+ * @param nentries Number of entries in vector.
+ * @param va Virtual address of start of vector entry array.
+ * @return 3-word netio_fastio_rv3_t structure. The structure's err member
+ * is an error code, or zero if no error. The val0 member is the
+ * updated value of seqno; it has been incremented by 1 for each
+ * packet sent. That increment may be less than nentries if an
+ * error occured, or if some of the entries in the vector contain
+ * handles equal to NETIO_PKT_HANDLE_NONE. The val1 member is the
+ * updated value of nentries; it has been decremented by 1 for each
+ * vector entry processed. Again, that decrement may be less than
+ * nentries (leaving the returned value positive) if an error
+ * occurred.
+ */
+#define __netio_fastio_send_pkt_vec(fastio_index, seqno, nentries, va) \
+ __netio_fastio3_rv3((fastio_index) + NETIO_FASTIO_SEND_PKT_VEC, seqno, \
+ nentries, va)
+
+
+/** An egress DMA command for LEPP. */
+typedef struct
+{
+ /** Is this a TSO transfer?
+ *
+ * NOTE: This field is always 0, to distinguish it from
+ * lepp_tso_cmd_t. It must come first!
+ */
+ uint8_t tso : 1;
+
+ /** Unused padding bits. */
+ uint8_t _unused : 3;
+
+ /** Should this packet be sent directly from caches instead of DRAM,
+ * using hash-for-home to locate the packet data?
+ */
+ uint8_t hash_for_home : 1;
+
+ /** Should we compute a checksum? */
+ uint8_t compute_checksum : 1;
+
+ /** Is this the final buffer for this packet?
+ *
+ * A single packet can be split over several input buffers (a "gather"
+ * operation). This flag indicates that this is the last buffer
+ * in a packet.
+ */
+ uint8_t end_of_packet : 1;
+
+ /** Should LEPP advance 'comp_busy' when this DMA is fully finished? */
+ uint8_t send_completion : 1;
+
+ /** High bits of Client Physical Address of the start of the buffer
+ * to be egressed.
+ *
+ * NOTE: Only 6 bits are actually needed here, as CPAs are
+ * currently 38 bits. So two bits could be scavenged from this.
+ */
+ uint8_t cpa_hi;
+
+ /** The number of bytes to be egressed. */
+ uint16_t length;
+
+ /** Low 32 bits of Client Physical Address of the start of the buffer
+ * to be egressed.
+ */
+ uint32_t cpa_lo;
+
+ /** Checksum information (only used if 'compute_checksum'). */
+ __netio_checksum_header_t checksum_data;
+
+} lepp_cmd_t;
+
+
+/** A chunk of physical memory for a TSO egress. */
+typedef struct
+{
+ /** The low bits of the CPA. */
+ uint32_t cpa_lo;
+ /** The high bits of the CPA. */
+ uint16_t cpa_hi : 15;
+ /** Should this packet be sent directly from caches instead of DRAM,
+ * using hash-for-home to locate the packet data?
+ */
+ uint16_t hash_for_home : 1;
+ /** The length in bytes. */
+ uint16_t length;
+} lepp_frag_t;
+
+
+/** An LEPP command that handles TSO. */
+typedef struct
+{
+ /** Is this a TSO transfer?
+ *
+ * NOTE: This field is always 1, to distinguish it from
+ * lepp_cmd_t. It must come first!
+ */
+ uint8_t tso : 1;
+
+ /** Unused padding bits. */
+ uint8_t _unused : 7;
+
+ /** Size of the header[] array in bytes. It must be in the range
+ * [40, 127], which are the smallest header for a TCP packet over
+ * Ethernet and the maximum possible prepend size supported by
+ * hardware, respectively. Note that the array storage must be
+ * padded out to a multiple of four bytes so that the following
+ * LEPP command is aligned properly.
+ */
+ uint8_t header_size;
+
+ /** Byte offset of the IP header in header[]. */
+ uint8_t ip_offset;
+
+ /** Byte offset of the TCP header in header[]. */
+ uint8_t tcp_offset;
+
+ /** The number of bytes to use for the payload of each packet,
+ * except of course the last one, which may not have enough bytes.
+ * This means that each Ethernet packet except the last will have a
+ * size of header_size + payload_size.
+ */
+ uint16_t payload_size;
+
+ /** The length of the 'frags' array that follows this struct. */
+ uint16_t num_frags;
+
+ /** The actual frags. */
+ lepp_frag_t frags[0 /* Variable-sized; num_frags entries. */];
+
+ /*
+ * The packet header template logically follows frags[],
+ * but you can't declare that in C.
+ *
+ * uint32_t header[header_size_in_words_rounded_up];
+ */
+
+} lepp_tso_cmd_t;
+
+
+/** An LEPP completion ring entry. */
+typedef void* lepp_comp_t;
+
+
+/** Maximum number of frags for one TSO command. This is adapted from
+ * linux's "MAX_SKB_FRAGS", and presumably over-estimates by one, for
+ * our page size of exactly 65536. We add one for a "body" fragment.
+ */
+#define LEPP_MAX_FRAGS (65536 / HV_PAGE_SIZE_SMALL + 2 + 1)
+
+/** Total number of bytes needed for an lepp_tso_cmd_t. */
+#define LEPP_TSO_CMD_SIZE(num_frags, header_size) \
+ (sizeof(lepp_tso_cmd_t) + \
+ (num_frags) * sizeof(lepp_frag_t) + \
+ (((header_size) + 3) & -4))
+
+/** The size of the lepp "cmd" queue. */
+#define LEPP_CMD_QUEUE_BYTES \
+ (((CHIP_L2_CACHE_SIZE() - 2 * CHIP_L2_LINE_SIZE()) / \
+ (sizeof(lepp_cmd_t) + sizeof(lepp_comp_t))) * sizeof(lepp_cmd_t))
+
+/** The largest possible command that can go in lepp_queue_t::cmds[]. */
+#define LEPP_MAX_CMD_SIZE LEPP_TSO_CMD_SIZE(LEPP_MAX_FRAGS, 128)
+
+/** The largest possible value of lepp_queue_t::cmd_{head, tail} (inclusive).
+ */
+#define LEPP_CMD_LIMIT \
+ (LEPP_CMD_QUEUE_BYTES - LEPP_MAX_CMD_SIZE)
+
+/** The maximum number of completions in an LEPP queue. */
+#define LEPP_COMP_QUEUE_SIZE \
+ ((LEPP_CMD_LIMIT + sizeof(lepp_cmd_t) - 1) / sizeof(lepp_cmd_t))
+
+/** Increment an index modulo the queue size. */
+#define LEPP_QINC(var) \
+ (var = __insn_mnz(var - (LEPP_COMP_QUEUE_SIZE - 1), var + 1))
+
+/** A queue used to convey egress commands from the client to LEPP. */
+typedef struct
+{
+ /** Index of first completion not yet processed by user code.
+ * If this is equal to comp_busy, there are no such completions.
+ *
+ * NOTE: This is only read/written by the user.
+ */
+ unsigned int comp_head;
+
+ /** Index of first completion record not yet completed.
+ * If this is equal to comp_tail, there are no such completions.
+ * This index gets advanced (modulo LEPP_QUEUE_SIZE) whenever
+ * a command with the 'completion' bit set is finished.
+ *
+ * NOTE: This is only written by LEPP, only read by the user.
+ */
+ volatile unsigned int comp_busy;
+
+ /** Index of the first empty slot in the completion ring.
+ * Entries from this up to but not including comp_head (in ring order)
+ * can be filled in with completion data.
+ *
+ * NOTE: This is only read/written by the user.
+ */
+ unsigned int comp_tail;
+
+ /** Byte index of first command enqueued for LEPP but not yet processed.
+ *
+ * This is always divisible by sizeof(void*) and always <= LEPP_CMD_LIMIT.
+ *
+ * NOTE: LEPP advances this counter as soon as it no longer needs
+ * the cmds[] storage for this entry, but the transfer is not actually
+ * complete (i.e. the buffer pointed to by the command is no longer
+ * needed) until comp_busy advances.
+ *
+ * If this is equal to cmd_tail, the ring is empty.
+ *
+ * NOTE: This is only written by LEPP, only read by the user.
+ */
+ volatile unsigned int cmd_head;
+
+ /** Byte index of first empty slot in the command ring. This field can
+ * be incremented up to but not equal to cmd_head (because that would
+ * mean the ring is empty).
+ *
+ * This is always divisible by sizeof(void*) and always <= LEPP_CMD_LIMIT.
+ *
+ * NOTE: This is read/written by the user, only read by LEPP.
+ */
+ volatile unsigned int cmd_tail;
+
+ /** A ring of variable-sized egress DMA commands.
+ *
+ * NOTE: Only written by the user, only read by LEPP.
+ */
+ char cmds[LEPP_CMD_QUEUE_BYTES]
+ __attribute__((aligned(CHIP_L2_LINE_SIZE())));
+
+ /** A ring of user completion data.
+ * NOTE: Only read/written by the user.
+ */
+ lepp_comp_t comps[LEPP_COMP_QUEUE_SIZE]
+ __attribute__((aligned(CHIP_L2_LINE_SIZE())));
+} lepp_queue_t;
+
+
+/** An internal helper function for determining the number of entries
+ * available in a ring buffer, given that there is one sentinel.
+ */
+static inline unsigned int
+_lepp_num_free_slots(unsigned int head, unsigned int tail)
+{
+ /*
+ * One entry is reserved for use as a sentinel, to distinguish
+ * "empty" from "full". So we compute
+ * (head - tail - 1) % LEPP_QUEUE_SIZE, but without using a slow % operation.
+ */
+ return (head - tail - 1) + ((head <= tail) ? LEPP_COMP_QUEUE_SIZE : 0);
+}
+
+
+/** Returns how many new comp entries can be enqueued. */
+static inline unsigned int
+lepp_num_free_comp_slots(const lepp_queue_t* q)
+{
+ return _lepp_num_free_slots(q->comp_head, q->comp_tail);
+}
+
+static inline int
+lepp_qsub(int v1, int v2)
+{
+ int delta = v1 - v2;
+ return delta + ((delta >> 31) & LEPP_COMP_QUEUE_SIZE);
+}
+
+
+/** FIXME: Check this from linux, via a new "pwrite()" call. */
+#define LIPP_VERSION 1
+
+
+/** We use exactly two bytes of alignment padding. */
+#define LIPP_PACKET_PADDING 2
+
+/** The minimum size of a "small" buffer (including the padding). */
+#define LIPP_SMALL_PACKET_SIZE 128
+
+/*
+ * NOTE: The following two values should total to less than around
+ * 13582, to keep the total size used for "lipp_state_t" below 64K.
+ */
+
+/** The maximum number of "small" buffers.
+ * This is enough for 53 network cpus with 128 credits. Note that
+ * if these are exhausted, we will fall back to using large buffers.
+ */
+#define LIPP_SMALL_BUFFERS 6785
+
+/** The maximum number of "large" buffers.
+ * This is enough for 53 network cpus with 128 credits.
+ */
+#define LIPP_LARGE_BUFFERS 6785
+
+#endif /* __DRV_XGBE_INTF_H__ */
diff --git a/arch/tile/include/hv/hypervisor.h b/arch/tile/include/hv/hypervisor.h
index 9bd303a141b2..f672544cd4f9 100644
--- a/arch/tile/include/hv/hypervisor.h
+++ b/arch/tile/include/hv/hypervisor.h
@@ -1003,37 +1003,37 @@ int hv_console_write(HV_VirtAddr bytes, int len);
* when these occur in a client's interrupt critical section, they must
* be delivered through the downcall mechanism.
*
- * A downcall is initially delivered to the client as an INTCTRL_1
- * interrupt. Upon entry to the INTCTRL_1 vector, the client must
- * immediately invoke the hv_downcall_dispatch service. This service
- * will not return; instead it will cause one of the client's actual
- * downcall-handling interrupt vectors to be entered. The EX_CONTEXT
- * registers in the client will be set so that when the client irets,
- * it will return to the code which was interrupted by the INTCTRL_1
- * interrupt.
- *
- * Under some circumstances, the firing of INTCTRL_1 can race with
+ * A downcall is initially delivered to the client as an INTCTRL_CL
+ * interrupt, where CL is the client's PL. Upon entry to the INTCTRL_CL
+ * vector, the client must immediately invoke the hv_downcall_dispatch
+ * service. This service will not return; instead it will cause one of
+ * the client's actual downcall-handling interrupt vectors to be entered.
+ * The EX_CONTEXT registers in the client will be set so that when the
+ * client irets, it will return to the code which was interrupted by the
+ * INTCTRL_CL interrupt.
+ *
+ * Under some circumstances, the firing of INTCTRL_CL can race with
* the lowering of a device interrupt. In such a case, the
* hv_downcall_dispatch service may issue an iret instruction instead
* of entering one of the client's actual downcall-handling interrupt
* vectors. This will return execution to the location that was
- * interrupted by INTCTRL_1.
+ * interrupted by INTCTRL_CL.
*
* Any saving of registers should be done by the actual handling
- * vectors; no registers should be changed by the INTCTRL_1 handler.
+ * vectors; no registers should be changed by the INTCTRL_CL handler.
* In particular, the client should not use a jal instruction to invoke
* the hv_downcall_dispatch service, as that would overwrite the client's
* lr register. Note that the hv_downcall_dispatch service may overwrite
* one or more of the client's system save registers.
*
- * The client must not modify the INTCTRL_1_STATUS SPR. The hypervisor
+ * The client must not modify the INTCTRL_CL_STATUS SPR. The hypervisor
* will set this register to cause a downcall to happen, and will clear
* it when no further downcalls are pending.
*
- * When a downcall vector is entered, the INTCTRL_1 interrupt will be
+ * When a downcall vector is entered, the INTCTRL_CL interrupt will be
* masked. When the client is done processing a downcall, and is ready
* to accept another, it must unmask this interrupt; if more downcalls
- * are pending, this will cause the INTCTRL_1 vector to be reentered.
+ * are pending, this will cause the INTCTRL_CL vector to be reentered.
* Currently the following interrupt vectors can be entered through a
* downcall:
*
diff --git a/arch/tile/include/hv/netio_errors.h b/arch/tile/include/hv/netio_errors.h
new file mode 100644
index 000000000000..e1591bff61b5
--- /dev/null
+++ b/arch/tile/include/hv/netio_errors.h
@@ -0,0 +1,122 @@
+/*
+ * Copyright 2010 Tilera Corporation. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation, version 2.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ * NON INFRINGEMENT. See the GNU General Public License for
+ * more details.
+ */
+
+/**
+ * Error codes returned from NetIO routines.
+ */
+
+#ifndef __NETIO_ERRORS_H__
+#define __NETIO_ERRORS_H__
+
+/**
+ * @addtogroup error
+ *
+ * @brief The error codes returned by NetIO functions.
+ *
+ * NetIO functions return 0 (defined as ::NETIO_NO_ERROR) on success, and
+ * a negative value if an error occurs.
+ *
+ * In cases where a NetIO function failed due to a error reported by
+ * system libraries, the error code will be the negation of the
+ * system errno at the time of failure. The @ref netio_strerror()
+ * function will deliver error strings for both NetIO and system error
+ * codes.
+ *
+ * @{
+ */
+
+/** The set of all NetIO errors. */
+typedef enum
+{
+ /** Operation successfully completed. */
+ NETIO_NO_ERROR = 0,
+
+ /** A packet was successfully retrieved from an input queue. */
+ NETIO_PKT = 0,
+
+ /** Largest NetIO error number. */
+ NETIO_ERR_MAX = -701,
+
+ /** The tile is not registered with the IPP. */
+ NETIO_NOT_REGISTERED = -701,
+
+ /** No packet was available to retrieve from the input queue. */
+ NETIO_NOPKT = -702,
+
+ /** The requested function is not implemented. */
+ NETIO_NOT_IMPLEMENTED = -703,
+
+ /** On a registration operation, the target queue already has the maximum
+ * number of tiles registered for it, and no more may be added. On a
+ * packet send operation, the output queue is full and nothing more can
+ * be queued until some of the queued packets are actually transmitted. */
+ NETIO_QUEUE_FULL = -704,
+
+ /** The calling process or thread is not bound to exactly one CPU. */
+ NETIO_BAD_AFFINITY = -705,
+
+ /** Cannot allocate memory on requested controllers. */
+ NETIO_CANNOT_HOME = -706,
+
+ /** On a registration operation, the IPP specified is not configured
+ * to support the options requested; for instance, the application
+ * wants a specific type of tagged headers which the configured IPP
+ * doesn't support. Or, the supplied configuration information is
+ * not self-consistent, or is out of range; for instance, specifying
+ * both NETIO_RECV and NETIO_NO_RECV, or asking for more than
+ * NETIO_MAX_SEND_BUFFERS to be preallocated. On a VLAN or bucket
+ * configure operation, the number of items, or the base item, was
+ * out of range.
+ */
+ NETIO_BAD_CONFIG = -707,
+
+ /** Too many tiles have registered to transmit packets. */
+ NETIO_TOOMANY_XMIT = -708,
+
+ /** Packet transmission was attempted on a queue which was registered
+ with transmit disabled. */
+ NETIO_UNREG_XMIT = -709,
+
+ /** This tile is already registered with the IPP. */
+ NETIO_ALREADY_REGISTERED = -710,
+
+ /** The Ethernet link is down. The application should try again later. */
+ NETIO_LINK_DOWN = -711,
+
+ /** An invalid memory buffer has been specified. This may be an unmapped
+ * virtual address, or one which does not meet alignment requirements.
+ * For netio_input_register(), this error may be returned when multiple
+ * processes specify different memory regions to be used for NetIO
+ * buffers. That can happen if these processes specify explicit memory
+ * regions with the ::NETIO_FIXED_BUFFER_VA flag, or if tmc_cmem_init()
+ * has not been called by a common ancestor of the processes.
+ */
+ NETIO_FAULT = -712,
+
+ /** Cannot combine user-managed shared memory and cache coherence. */
+ NETIO_BAD_CACHE_CONFIG = -713,
+
+ /** Smallest NetIO error number. */
+ NETIO_ERR_MIN = -713,
+
+#ifndef __DOXYGEN__
+ /** Used internally to mean that no response is needed; never returned to
+ * an application. */
+ NETIO_NO_RESPONSE = 1
+#endif
+} netio_error_t;
+
+/** @} */
+
+#endif /* __NETIO_ERRORS_H__ */
diff --git a/arch/tile/include/hv/netio_intf.h b/arch/tile/include/hv/netio_intf.h
new file mode 100644
index 000000000000..8d20972aba2c
--- /dev/null
+++ b/arch/tile/include/hv/netio_intf.h
@@ -0,0 +1,2975 @@
+/*
+ * Copyright 2010 Tilera Corporation. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation, version 2.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ * NON INFRINGEMENT. See the GNU General Public License for
+ * more details.
+ */
+
+/**
+ * NetIO interface structures and macros.
+ */
+
+#ifndef __NETIO_INTF_H__
+#define __NETIO_INTF_H__
+
+#include <hv/netio_errors.h>
+
+#ifdef __KERNEL__
+#include <linux/types.h>
+#else
+#include <stdint.h>
+#endif
+
+#if !defined(__HV__) && !defined(__BOGUX__) && !defined(__KERNEL__)
+#include <assert.h>
+#define netio_assert assert /**< Enable assertions from macros */
+#else
+#define netio_assert(...) ((void)(0)) /**< Disable assertions from macros */
+#endif
+
+/*
+ * If none of these symbols are defined, we're building libnetio in an
+ * environment where we have pthreads, so we'll enable locking.
+ */
+#if !defined(__HV__) && !defined(__BOGUX__) && !defined(__KERNEL__) && \
+ !defined(__NEWLIB__)
+#define _NETIO_PTHREAD /**< Include a mutex in netio_queue_t below */
+
+/*
+ * If NETIO_UNLOCKED is defined, we don't do use per-cpu locks on
+ * per-packet NetIO operations. We still do pthread locking on things
+ * like netio_input_register, though. This is used for building
+ * libnetio_unlocked.
+ */
+#ifndef NETIO_UNLOCKED
+
+/* Avoid PLT overhead by using our own inlined per-cpu lock. */
+#include <sched.h>
+typedef int _netio_percpu_mutex_t;
+
+static __inline int
+_netio_percpu_mutex_init(_netio_percpu_mutex_t* lock)
+{
+ *lock = 0;
+ return 0;
+}
+
+static __inline int
+_netio_percpu_mutex_lock(_netio_percpu_mutex_t* lock)
+{
+ while (__builtin_expect(__insn_tns(lock), 0))
+ sched_yield();
+ return 0;
+}
+
+static __inline int
+_netio_percpu_mutex_unlock(_netio_percpu_mutex_t* lock)
+{
+ *lock = 0;
+ return 0;
+}
+
+#else /* NETIO_UNLOCKED */
+
+/* Don't do any locking for per-packet NetIO operations. */
+typedef int _netio_percpu_mutex_t;
+#define _netio_percpu_mutex_init(L)
+#define _netio_percpu_mutex_lock(L)
+#define _netio_percpu_mutex_unlock(L)
+
+#endif /* NETIO_UNLOCKED */
+#endif /* !__HV__, !__BOGUX, !__KERNEL__, !__NEWLIB__ */
+
+/** How many tiles can register for a given queue.
+ * @ingroup setup */
+#define NETIO_MAX_TILES_PER_QUEUE 64
+
+
+/** Largest permissible queue identifier.
+ * @ingroup setup */
+#define NETIO_MAX_QUEUE_ID 255
+
+
+#ifndef __DOXYGEN__
+
+/* Metadata packet checksum/ethertype flags. */
+
+/** The L4 checksum has not been calculated. */
+#define _NETIO_PKT_NO_L4_CSUM_SHIFT 0
+#define _NETIO_PKT_NO_L4_CSUM_RMASK 1
+#define _NETIO_PKT_NO_L4_CSUM_MASK \
+ (_NETIO_PKT_NO_L4_CSUM_RMASK << _NETIO_PKT_NO_L4_CSUM_SHIFT)
+
+/** The L3 checksum has not been calculated. */
+#define _NETIO_PKT_NO_L3_CSUM_SHIFT 1
+#define _NETIO_PKT_NO_L3_CSUM_RMASK 1
+#define _NETIO_PKT_NO_L3_CSUM_MASK \
+ (_NETIO_PKT_NO_L3_CSUM_RMASK << _NETIO_PKT_NO_L3_CSUM_SHIFT)
+
+/** The L3 checksum is incorrect (or perhaps has not been calculated). */
+#define _NETIO_PKT_BAD_L3_CSUM_SHIFT 2
+#define _NETIO_PKT_BAD_L3_CSUM_RMASK 1
+#define _NETIO_PKT_BAD_L3_CSUM_MASK \
+ (_NETIO_PKT_BAD_L3_CSUM_RMASK << _NETIO_PKT_BAD_L3_CSUM_SHIFT)
+
+/** The Ethernet packet type is unrecognized. */
+#define _NETIO_PKT_TYPE_UNRECOGNIZED_SHIFT 3
+#define _NETIO_PKT_TYPE_UNRECOGNIZED_RMASK 1
+#define _NETIO_PKT_TYPE_UNRECOGNIZED_MASK \
+ (_NETIO_PKT_TYPE_UNRECOGNIZED_RMASK << \
+ _NETIO_PKT_TYPE_UNRECOGNIZED_SHIFT)
+
+/* Metadata packet type flags. */
+
+/** Where the packet type bits are; this field is the index into
+ * _netio_pkt_info. */
+#define _NETIO_PKT_TYPE_SHIFT 4
+#define _NETIO_PKT_TYPE_RMASK 0x3F
+
+/** How many VLAN tags the packet has, and, if we have two, which one we
+ * actually grouped on. A VLAN within a proprietary (Marvell or Broadcom)
+ * tag is counted here. */
+#define _NETIO_PKT_VLAN_SHIFT 4
+#define _NETIO_PKT_VLAN_RMASK 0x3
+#define _NETIO_PKT_VLAN_MASK \
+ (_NETIO_PKT_VLAN_RMASK << _NETIO_PKT_VLAN_SHIFT)
+#define _NETIO_PKT_VLAN_NONE 0 /* No VLAN tag. */
+#define _NETIO_PKT_VLAN_ONE 1 /* One VLAN tag. */
+#define _NETIO_PKT_VLAN_TWO_OUTER 2 /* Two VLAN tags, outer one used. */
+#define _NETIO_PKT_VLAN_TWO_INNER 3 /* Two VLAN tags, inner one used. */
+
+/** Which proprietary tags the packet has. */
+#define _NETIO_PKT_TAG_SHIFT 6
+#define _NETIO_PKT_TAG_RMASK 0x3
+#define _NETIO_PKT_TAG_MASK \
+ (_NETIO_PKT_TAG_RMASK << _NETIO_PKT_TAG_SHIFT)
+#define _NETIO_PKT_TAG_NONE 0 /* No proprietary tags. */
+#define _NETIO_PKT_TAG_MRVL 1 /* Marvell HyperG.Stack tags. */
+#define _NETIO_PKT_TAG_MRVL_EXT 2 /* HyperG.Stack extended tags. */
+#define _NETIO_PKT_TAG_BRCM 3 /* Broadcom HiGig tags. */
+
+/** Whether a packet has an LLC + SNAP header. */
+#define _NETIO_PKT_SNAP_SHIFT 8
+#define _NETIO_PKT_SNAP_RMASK 0x1
+#define _NETIO_PKT_SNAP_MASK \
+ (_NETIO_PKT_SNAP_RMASK << _NETIO_PKT_SNAP_SHIFT)
+
+/* NOTE: Bits 9 and 10 are unused. */
+
+/** Length of any custom data before the L2 header, in words. */
+#define _NETIO_PKT_CUSTOM_LEN_SHIFT 11
+#define _NETIO_PKT_CUSTOM_LEN_RMASK 0x1F
+#define _NETIO_PKT_CUSTOM_LEN_MASK \
+ (_NETIO_PKT_CUSTOM_LEN_RMASK << _NETIO_PKT_CUSTOM_LEN_SHIFT)
+
+/** The L4 checksum is incorrect (or perhaps has not been calculated). */
+#define _NETIO_PKT_BAD_L4_CSUM_SHIFT 16
+#define _NETIO_PKT_BAD_L4_CSUM_RMASK 0x1
+#define _NETIO_PKT_BAD_L4_CSUM_MASK \
+ (_NETIO_PKT_BAD_L4_CSUM_RMASK << _NETIO_PKT_BAD_L4_CSUM_SHIFT)
+
+/** Length of the L2 header, in words. */
+#define _NETIO_PKT_L2_LEN_SHIFT 17
+#define _NETIO_PKT_L2_LEN_RMASK 0x1F
+#define _NETIO_PKT_L2_LEN_MASK \
+ (_NETIO_PKT_L2_LEN_RMASK << _NETIO_PKT_L2_LEN_SHIFT)
+
+
+/* Flags in minimal packet metadata. */
+
+/** We need an eDMA checksum on this packet. */
+#define _NETIO_PKT_NEED_EDMA_CSUM_SHIFT 0
+#define _NETIO_PKT_NEED_EDMA_CSUM_RMASK 1
+#define _NETIO_PKT_NEED_EDMA_CSUM_MASK \
+ (_NETIO_PKT_NEED_EDMA_CSUM_RMASK << _NETIO_PKT_NEED_EDMA_CSUM_SHIFT)
+
+/* Data within the packet information table. */
+
+/* Note that, for efficiency, code which uses these fields assumes that none
+ * of the shift values below are zero. See uses below for an explanation. */
+
+/** Offset within the L2 header of the innermost ethertype (in halfwords). */
+#define _NETIO_PKT_INFO_ETYPE_SHIFT 6
+#define _NETIO_PKT_INFO_ETYPE_RMASK 0x1F
+
+/** Offset within the L2 header of the VLAN tag (in halfwords). */
+#define _NETIO_PKT_INFO_VLAN_SHIFT 11
+#define _NETIO_PKT_INFO_VLAN_RMASK 0x1F
+
+#endif
+
+
+/** The size of a memory buffer representing a small packet.
+ * @ingroup egress */
+#define SMALL_PACKET_SIZE 256
+
+/** The size of a memory buffer representing a large packet.
+ * @ingroup egress */
+#define LARGE_PACKET_SIZE 2048
+
+/** The size of a memory buffer representing a jumbo packet.
+ * @ingroup egress */
+#define JUMBO_PACKET_SIZE (12 * 1024)
+
+
+/* Common ethertypes.
+ * @ingroup ingress */
+/** @{ */
+/** The ethertype of IPv4. */
+#define ETHERTYPE_IPv4 (0x0800)
+/** The ethertype of ARP. */
+#define ETHERTYPE_ARP (0x0806)
+/** The ethertype of VLANs. */
+#define ETHERTYPE_VLAN (0x8100)
+/** The ethertype of a Q-in-Q header. */
+#define ETHERTYPE_Q_IN_Q (0x9100)
+/** The ethertype of IPv6. */
+#define ETHERTYPE_IPv6 (0x86DD)
+/** The ethertype of MPLS. */
+#define ETHERTYPE_MPLS (0x8847)
+/** @} */
+
+
+/** The possible return values of NETIO_PKT_STATUS.
+ * @ingroup ingress
+ */
+typedef enum
+{
+ /** No problems were detected with this packet. */
+ NETIO_PKT_STATUS_OK,
+ /** The packet is undersized; this is expected behavior if the packet's
+ * ethertype is unrecognized, but otherwise the packet is likely corrupt. */
+ NETIO_PKT_STATUS_UNDERSIZE,
+ /** The packet is oversized and some trailing bytes have been discarded.
+ This is expected behavior for short packets, since it's impossible to
+ precisely determine the amount of padding which may have been added to
+ them to make them meet the minimum Ethernet packet size. */
+ NETIO_PKT_STATUS_OVERSIZE,
+ /** The packet was judged to be corrupt by hardware (for instance, it had
+ a bad CRC, or part of it was discarded due to lack of buffer space in
+ the I/O shim) and should be discarded. */
+ NETIO_PKT_STATUS_BAD
+} netio_pkt_status_t;
+
+
+/** Log2 of how many buckets we have. */
+#define NETIO_LOG2_NUM_BUCKETS (10)
+
+/** How many buckets we have.
+ * @ingroup ingress */
+#define NETIO_NUM_BUCKETS (1 << NETIO_LOG2_NUM_BUCKETS)
+
+
+/**
+ * @brief A group-to-bucket identifier.
+ *
+ * @ingroup setup
+ *
+ * This tells us what to do with a given group.
+ */
+typedef union {
+ /** The header broken down into bits. */
+ struct {
+ /** Whether we should balance on L4, if available */
+ unsigned int __balance_on_l4:1;
+ /** Whether we should balance on L3, if available */
+ unsigned int __balance_on_l3:1;
+ /** Whether we should balance on L2, if available */
+ unsigned int __balance_on_l2:1;
+ /** Reserved for future use */
+ unsigned int __reserved:1;
+ /** The base bucket to use to send traffic */
+ unsigned int __bucket_base:NETIO_LOG2_NUM_BUCKETS;
+ /** The mask to apply to the balancing value. This must be one less
+ * than a power of two, e.g. 0x3 or 0xFF.
+ */
+ unsigned int __bucket_mask:NETIO_LOG2_NUM_BUCKETS;
+ /** Pad to 32 bits */
+ unsigned int __padding:(32 - 4 - 2 * NETIO_LOG2_NUM_BUCKETS);
+ } bits;
+ /** To send out the IDN. */
+ unsigned int word;
+}
+netio_group_t;
+
+
+/**
+ * @brief A VLAN-to-bucket identifier.
+ *
+ * @ingroup setup
+ *
+ * This tells us what to do with a given VLAN.
+ */
+typedef netio_group_t netio_vlan_t;
+
+
+/**
+ * A bucket-to-queue mapping.
+ * @ingroup setup
+ */
+typedef unsigned char netio_bucket_t;
+
+
+/**
+ * A packet size can always fit in a netio_size_t.
+ * @ingroup setup
+ */
+typedef unsigned int netio_size_t;
+
+
+/**
+ * @brief Ethernet standard (ingress) packet metadata.
+ *
+ * @ingroup ingress
+ *
+ * This is additional data associated with each packet.
+ * This structure is opaque and accessed through the @ref ingress.
+ *
+ * Also, the buffer population operation currently assumes that standard
+ * metadata is at least as large as minimal metadata, and will need to be
+ * modified if that is no longer the case.
+ */
+typedef struct
+{
+#ifdef __DOXYGEN__
+ /** This structure is opaque. */
+ unsigned char opaque[24];
+#else
+ /** The overall ordinal of the packet */
+ unsigned int __packet_ordinal;
+ /** The ordinal of the packet within the group */
+ unsigned int __group_ordinal;
+ /** The best flow hash IPP could compute. */
+ unsigned int __flow_hash;
+ /** Flags pertaining to checksum calculation, packet type, etc. */
+ unsigned int __flags;
+ /** The first word of "user data". */
+ unsigned int __user_data_0;
+ /** The second word of "user data". */
+ unsigned int __user_data_1;
+#endif
+}
+netio_pkt_metadata_t;
+
+
+/** To ensure that the L3 header is aligned mod 4, the L2 header should be
+ * aligned mod 4 plus 2, since every supported L2 header is 4n + 2 bytes
+ * long. The standard way to do this is to simply add 2 bytes of padding
+ * before the L2 header.
+ */
+#define NETIO_PACKET_PADDING 2
+
+
+
+/**
+ * @brief Ethernet minimal (egress) packet metadata.
+ *
+ * @ingroup egress
+ *
+ * This structure represents information about packets which have
+ * been processed by @ref netio_populate_buffer() or
+ * @ref netio_populate_prepend_buffer(). This structure is opaque
+ * and accessed through the @ref egress.
+ *
+ * @internal This structure is actually copied into the memory used by
+ * standard metadata, which is assumed to be large enough.
+ */
+typedef struct
+{
+#ifdef __DOXYGEN__
+ /** This structure is opaque. */
+ unsigned char opaque[14];
+#else
+ /** The offset of the L2 header from the start of the packet data. */
+ unsigned short l2_offset;
+ /** The offset of the L3 header from the start of the packet data. */
+ unsigned short l3_offset;
+ /** Where to write the checksum. */
+ unsigned char csum_location;
+ /** Where to start checksumming from. */
+ unsigned char csum_start;
+ /** Flags pertaining to checksum calculation etc. */
+ unsigned short flags;
+ /** The L2 length of the packet. */
+ unsigned short l2_length;
+ /** The checksum with which to seed the checksum generator. */
+ unsigned short csum_seed;
+ /** How much to checksum. */
+ unsigned short csum_length;
+#endif
+}
+netio_pkt_minimal_metadata_t;
+
+
+#ifndef __DOXYGEN__
+
+/**
+ * @brief An I/O notification header.
+ *
+ * This is the first word of data received from an I/O shim in a notification
+ * packet. It contains framing and status information.
+ */
+typedef union
+{
+ unsigned int word; /**< The whole word. */
+ /** The various fields. */
+ struct
+ {
+ unsigned int __channel:7; /**< Resource channel. */
+ unsigned int __type:4; /**< Type. */
+ unsigned int __ack:1; /**< Whether an acknowledgement is needed. */
+ unsigned int __reserved:1; /**< Reserved. */
+ unsigned int __protocol:1; /**< A protocol-specific word is added. */
+ unsigned int __status:2; /**< Status of the transfer. */
+ unsigned int __framing:2; /**< Framing of the transfer. */
+ unsigned int __transfer_size:14; /**< Transfer size in bytes (total). */
+ } bits;
+}
+__netio_pkt_notif_t;
+
+
+/**
+ * Returns the base address of the packet.
+ */
+#define _NETIO_PKT_HANDLE_BASE(p) \
+ ((unsigned char*)((p).word & 0xFFFFFFC0))
+
+/**
+ * Returns the base address of the packet.
+ */
+#define _NETIO_PKT_BASE(p) \
+ _NETIO_PKT_HANDLE_BASE(p->__packet)
+
+/**
+ * @brief An I/O notification packet (second word)
+ *
+ * This is the second word of data received from an I/O shim in a notification
+ * packet. This is the virtual address of the packet buffer, plus some flag
+ * bits. (The virtual address of the packet is always 256-byte aligned so we
+ * have room for 8 bits' worth of flags in the low 8 bits.)
+ *
+ * @internal
+ * NOTE: The low two bits must contain "__queue", so the "packet size"
+ * (SIZE_SMALL, SIZE_LARGE, or SIZE_JUMBO) can be determined quickly.
+ *
+ * If __addr or __offset are moved, _NETIO_PKT_BASE
+ * (defined right below this) must be changed.
+ */
+typedef union
+{
+ unsigned int word; /**< The whole word. */
+ /** The various fields. */
+ struct
+ {
+ /** Which queue the packet will be returned to once it is sent back to
+ the IPP. This is one of the SIZE_xxx values. */
+ unsigned int __queue:2;
+
+ /** The IPP handle of the sending IPP. */
+ unsigned int __ipp_handle:2;
+
+ /** Reserved for future use. */
+ unsigned int __reserved:1;
+
+ /** If 1, this packet has minimal (egress) metadata; otherwise, it
+ has standard (ingress) metadata. */
+ unsigned int __minimal:1;
+
+ /** Offset of the metadata within the packet. This value is multiplied
+ * by 64 and added to the base packet address to get the metadata
+ * address. Note that this field is aligned within the word such that
+ * you can easily extract the metadata address with a 26-bit mask. */
+ unsigned int __offset:2;
+
+ /** The top 24 bits of the packet's virtual address. */
+ unsigned int __addr:24;
+ } bits;
+}
+__netio_pkt_handle_t;
+
+#endif /* !__DOXYGEN__ */
+
+
+/**
+ * @brief A handle for an I/O packet's storage.
+ * @ingroup ingress
+ *
+ * netio_pkt_handle_t encodes the concept of a ::netio_pkt_t with its
+ * packet metadata removed. It is a much smaller type that exists to
+ * facilitate applications where the full ::netio_pkt_t type is too
+ * large, such as those that cache enormous numbers of packets or wish
+ * to transmit packet descriptors over the UDN.
+ *
+ * Because there is no metadata, most ::netio_pkt_t operations cannot be
+ * performed on a netio_pkt_handle_t. It supports only
+ * netio_free_handle() (to free the buffer) and
+ * NETIO_PKT_CUSTOM_DATA_H() (to access a pointer to its contents).
+ * The application must acquire any additional metadata it wants from the
+ * original ::netio_pkt_t and record it separately.
+ *
+ * A netio_pkt_handle_t can be extracted from a ::netio_pkt_t by calling
+ * NETIO_PKT_HANDLE(). An invalid handle (analogous to NULL) can be
+ * created by assigning the value ::NETIO_PKT_HANDLE_NONE. A handle can
+ * be tested for validity with NETIO_PKT_HANDLE_IS_VALID().
+ */
+typedef struct
+{
+ unsigned int word; /**< Opaque bits. */
+} netio_pkt_handle_t;
+
+/**
+ * @brief A packet descriptor.
+ *
+ * @ingroup ingress
+ * @ingroup egress
+ *
+ * This data structure represents a packet. The structure is manipulated
+ * through the @ref ingress and the @ref egress.
+ *
+ * While the contents of a netio_pkt_t are opaque, the structure itself is
+ * portable. This means that it may be shared between all tiles which have
+ * done a netio_input_register() call for the interface on which the pkt_t
+ * was initially received (via netio_get_packet()) or retrieved (via
+ * netio_get_buffer()). The contents of a netio_pkt_t can be transmitted to
+ * another tile via shared memory, or via a UDN message, or by other means.
+ * The destination tile may then use the pkt_t as if it had originally been
+ * received locally; it may read or write the packet's data, read its
+ * metadata, free the packet, send the packet, transfer the netio_pkt_t to
+ * yet another tile, and so forth.
+ *
+ * Once a netio_pkt_t has been transferred to a second tile, the first tile
+ * should not reference the original copy; in particular, if more than one
+ * tile frees or sends the same netio_pkt_t, the IPP's packet free lists will
+ * become corrupted. Note also that each tile which reads or modifies
+ * packet data must obey the memory coherency rules outlined in @ref input.
+ */
+typedef struct
+{
+#ifdef __DOXYGEN__
+ /** This structure is opaque. */
+ unsigned char opaque[32];
+#else
+ /** For an ingress packet (one with standard metadata), this is the
+ * notification header we got from the I/O shim. For an egress packet
+ * (one with minimal metadata), this word is zero if the packet has not
+ * been populated, and nonzero if it has. */
+ __netio_pkt_notif_t __notif_header;
+
+ /** Virtual address of the packet buffer, plus state flags. */
+ __netio_pkt_handle_t __packet;
+
+ /** Metadata associated with the packet. */
+ netio_pkt_metadata_t __metadata;
+#endif
+}
+netio_pkt_t;
+
+
+#ifndef __DOXYGEN__
+
+#define __NETIO_PKT_NOTIF_HEADER(pkt) ((pkt)->__notif_header)
+#define __NETIO_PKT_IPP_HANDLE(pkt) ((pkt)->__packet.bits.__ipp_handle)
+#define __NETIO_PKT_QUEUE(pkt) ((pkt)->__packet.bits.__queue)
+#define __NETIO_PKT_NOTIF_HEADER_M(mda, pkt) ((pkt)->__notif_header)
+#define __NETIO_PKT_IPP_HANDLE_M(mda, pkt) ((pkt)->__packet.bits.__ipp_handle)
+#define __NETIO_PKT_MINIMAL(pkt) ((pkt)->__packet.bits.__minimal)
+#define __NETIO_PKT_QUEUE_M(mda, pkt) ((pkt)->__packet.bits.__queue)
+#define __NETIO_PKT_FLAGS_M(mda, pkt) ((mda)->__flags)
+
+/* Packet information table, used by the attribute access functions below. */
+extern const uint16_t _netio_pkt_info[];
+
+#endif /* __DOXYGEN__ */
+
+
+#ifndef __DOXYGEN__
+/* These macros are deprecated and will disappear in a future MDE release. */
+#define NETIO_PKT_GOOD_CHECKSUM(pkt) \
+ NETIO_PKT_L4_CSUM_CORRECT(pkt)
+#define NETIO_PKT_GOOD_CHECKSUM_M(mda, pkt) \
+ NETIO_PKT_L4_CSUM_CORRECT_M(mda, pkt)
+#endif /* __DOXYGEN__ */
+
+
+/* Packet attribute access functions. */
+
+/** Return a pointer to the metadata for a packet.
+ * @ingroup ingress
+ *
+ * Calling this function once and passing the result to other retrieval
+ * functions with a "_M" suffix usually improves performance. This
+ * function must be called on an 'ingress' packet (i.e. one retrieved
+ * by @ref netio_get_packet(), on which @ref netio_populate_buffer() or
+ * @ref netio_populate_prepend_buffer have not been called). Use of this
+ * function on an 'egress' packet will cause an assertion failure.
+ *
+ * @param[in] pkt Packet on which to operate.
+ * @return A pointer to the packet's standard metadata.
+ */
+static __inline netio_pkt_metadata_t*
+NETIO_PKT_METADATA(netio_pkt_t* pkt)
+{
+ netio_assert(!pkt->__packet.bits.__minimal);
+ return &pkt->__metadata;
+}
+
+
+/** Return a pointer to the minimal metadata for a packet.
+ * @ingroup egress
+ *
+ * Calling this function once and passing the result to other retrieval
+ * functions with a "_MM" suffix usually improves performance. This
+ * function must be called on an 'egress' packet (i.e. one on which
+ * @ref netio_populate_buffer() or @ref netio_populate_prepend_buffer()
+ * have been called, or one retrieved by @ref netio_get_buffer()). Use of
+ * this function on an 'ingress' packet will cause an assertion failure.
+ *
+ * @param[in] pkt Packet on which to operate.
+ * @return A pointer to the packet's standard metadata.
+ */
+static __inline netio_pkt_minimal_metadata_t*
+NETIO_PKT_MINIMAL_METADATA(netio_pkt_t* pkt)
+{
+ netio_assert(pkt->__packet.bits.__minimal);
+ return (netio_pkt_minimal_metadata_t*) &pkt->__metadata;
+}
+
+
+/** Determine whether a packet has 'minimal' metadata.
+ * @ingroup pktfuncs
+ *
+ * This function will return nonzero if the packet is an 'egress'
+ * packet (i.e. one on which @ref netio_populate_buffer() or
+ * @ref netio_populate_prepend_buffer() have been called, or one
+ * retrieved by @ref netio_get_buffer()), and zero if the packet
+ * is an 'ingress' packet (i.e. one retrieved by @ref netio_get_packet(),
+ * which has not been converted into an 'egress' packet).
+ *
+ * @param[in] pkt Packet on which to operate.
+ * @return Nonzero if the packet has minimal metadata.
+ */
+static __inline unsigned int
+NETIO_PKT_IS_MINIMAL(netio_pkt_t* pkt)
+{
+ return pkt->__packet.bits.__minimal;
+}
+
+
+/** Return a handle for a packet's storage.
+ * @ingroup pktfuncs
+ *
+ * @param[in] pkt Packet on which to operate.
+ * @return A handle for the packet's storage.
+ */
+static __inline netio_pkt_handle_t
+NETIO_PKT_HANDLE(netio_pkt_t* pkt)
+{
+ netio_pkt_handle_t h;
+ h.word = pkt->__packet.word;
+ return h;
+}
+
+
+/** A special reserved value indicating the absence of a packet handle.
+ *
+ * @ingroup pktfuncs
+ */
+#define NETIO_PKT_HANDLE_NONE ((netio_pkt_handle_t) { 0 })
+
+
+/** Test whether a packet handle is valid.
+ *
+ * Applications may wish to use the reserved value NETIO_PKT_HANDLE_NONE
+ * to indicate no packet at all. This function tests to see if a packet
+ * handle is a real handle, not this special reserved value.
+ *
+ * @ingroup pktfuncs
+ *
+ * @param[in] handle Handle on which to operate.
+ * @return One if the packet handle is valid, else zero.
+ */
+static __inline unsigned int
+NETIO_PKT_HANDLE_IS_VALID(netio_pkt_handle_t handle)
+{
+ return handle.word != 0;
+}
+
+
+
+/** Return a pointer to the start of the packet's custom header.
+ * A custom header may or may not be present, depending upon the IPP; its
+ * contents and alignment are also IPP-dependent. Currently, none of the
+ * standard IPPs supplied by Tilera produce a custom header. If present,
+ * the custom header precedes the L2 header in the packet buffer.
+ * @ingroup ingress
+ *
+ * @param[in] handle Handle on which to operate.
+ * @return A pointer to start of the packet.
+ */
+static __inline unsigned char*
+NETIO_PKT_CUSTOM_DATA_H(netio_pkt_handle_t handle)
+{
+ return _NETIO_PKT_HANDLE_BASE(handle) + NETIO_PACKET_PADDING;
+}
+
+
+/** Return the length of the packet's custom header.
+ * A custom header may or may not be present, depending upon the IPP; its
+ * contents and alignment are also IPP-dependent. Currently, none of the
+ * standard IPPs supplied by Tilera produce a custom header. If present,
+ * the custom header precedes the L2 header in the packet buffer.
+ *
+ * @ingroup ingress
+ *
+ * @param[in] mda Pointer to packet's standard metadata.
+ * @param[in] pkt Packet on which to operate.
+ * @return The length of the packet's custom header, in bytes.
+ */
+static __inline netio_size_t
+NETIO_PKT_CUSTOM_HEADER_LENGTH_M(netio_pkt_metadata_t* mda, netio_pkt_t* pkt)
+{
+ /*
+ * Note that we effectively need to extract a quantity from the flags word
+ * which is measured in words, and then turn it into bytes by shifting
+ * it left by 2. We do this all at once by just shifting right two less
+ * bits, and shifting the mask up two bits.
+ */
+ return ((mda->__flags >> (_NETIO_PKT_CUSTOM_LEN_SHIFT - 2)) &
+ (_NETIO_PKT_CUSTOM_LEN_RMASK << 2));
+}
+
+
+/** Return the length of the packet, starting with the custom header.
+ * A custom header may or may not be present, depending upon the IPP; its
+ * contents and alignment are also IPP-dependent. Currently, none of the
+ * standard IPPs supplied by Tilera produce a custom header. If present,
+ * the custom header precedes the L2 header in the packet buffer.
+ * @ingroup ingress
+ *
+ * @param[in] mda Pointer to packet's standard metadata.
+ * @param[in] pkt Packet on which to operate.
+ * @return The length of the packet, in bytes.
+ */
+static __inline netio_size_t
+NETIO_PKT_CUSTOM_LENGTH_M(netio_pkt_metadata_t* mda, netio_pkt_t* pkt)
+{
+ return (__NETIO_PKT_NOTIF_HEADER(pkt).bits.__transfer_size -
+ NETIO_PACKET_PADDING);
+}
+
+
+/** Return a pointer to the start of the packet's custom header.
+ * A custom header may or may not be present, depending upon the IPP; its
+ * contents and alignment are also IPP-dependent. Currently, none of the
+ * standard IPPs supplied by Tilera produce a custom header. If present,
+ * the custom header precedes the L2 header in the packet buffer.
+ * @ingroup ingress
+ *
+ * @param[in] mda Pointer to packet's standard metadata.
+ * @param[in] pkt Packet on which to operate.
+ * @return A pointer to start of the packet.
+ */
+static __inline unsigned char*
+NETIO_PKT_CUSTOM_DATA_M(netio_pkt_metadata_t* mda, netio_pkt_t* pkt)
+{
+ return NETIO_PKT_CUSTOM_DATA_H(NETIO_PKT_HANDLE(pkt));
+}
+
+
+/** Return the length of the packet's L2 (Ethernet plus VLAN or SNAP) header.
+ * @ingroup ingress
+ *
+ * @param[in] mda Pointer to packet's standard metadata.
+ * @param[in] pkt Packet on which to operate.
+ * @return The length of the packet's L2 header, in bytes.
+ */
+static __inline netio_size_t
+NETIO_PKT_L2_HEADER_LENGTH_M(netio_pkt_metadata_t* mda, netio_pkt_t* pkt)
+{
+ /*
+ * Note that we effectively need to extract a quantity from the flags word
+ * which is measured in words, and then turn it into bytes by shifting
+ * it left by 2. We do this all at once by just shifting right two less
+ * bits, and shifting the mask up two bits. We then add two bytes.
+ */
+ return ((mda->__flags >> (_NETIO_PKT_L2_LEN_SHIFT - 2)) &
+ (_NETIO_PKT_L2_LEN_RMASK << 2)) + 2;
+}
+
+
+/** Return the length of the packet, starting with the L2 (Ethernet) header.
+ * @ingroup ingress
+ *
+ * @param[in] mda Pointer to packet's standard metadata.
+ * @param[in] pkt Packet on which to operate.
+ * @return The length of the packet, in bytes.
+ */
+static __inline netio_size_t
+NETIO_PKT_L2_LENGTH_M(netio_pkt_metadata_t* mda, netio_pkt_t* pkt)
+{
+ return (NETIO_PKT_CUSTOM_LENGTH_M(mda, pkt) -
+ NETIO_PKT_CUSTOM_HEADER_LENGTH_M(mda,pkt));
+}
+
+
+/** Return a pointer to the start of the packet's L2 (Ethernet) header.
+ * @ingroup ingress
+ *
+ * @param[in] mda Pointer to packet's standard metadata.
+ * @param[in] pkt Packet on which to operate.
+ * @return A pointer to start of the packet.
+ */
+static __inline unsigned char*
+NETIO_PKT_L2_DATA_M(netio_pkt_metadata_t* mda, netio_pkt_t* pkt)
+{
+ return (NETIO_PKT_CUSTOM_DATA_M(mda, pkt) +
+ NETIO_PKT_CUSTOM_HEADER_LENGTH_M(mda, pkt));
+}
+
+
+/** Retrieve the length of the packet, starting with the L3 (generally,
+ * the IP) header.
+ * @ingroup ingress
+ *
+ * @param[in] mda Pointer to packet's standard metadata.
+ * @param[in] pkt Packet on which to operate.
+ * @return Length of the packet's L3 header and data, in bytes.
+ */
+static __inline netio_size_t
+NETIO_PKT_L3_LENGTH_M(netio_pkt_metadata_t* mda, netio_pkt_t* pkt)
+{
+ return (NETIO_PKT_L2_LENGTH_M(mda, pkt) -
+ NETIO_PKT_L2_HEADER_LENGTH_M(mda,pkt));
+}
+
+
+/** Return a pointer to the packet's L3 (generally, the IP) header.
+ * @ingroup ingress
+ *
+ * Note that we guarantee word alignment of the L3 header.
+ *
+ * @param[in] mda Pointer to packet's standard metadata.
+ * @param[in] pkt Packet on which to operate.
+ * @return A pointer to the packet's L3 header.
+ */
+static __inline unsigned char*
+NETIO_PKT_L3_DATA_M(netio_pkt_metadata_t* mda, netio_pkt_t* pkt)
+{
+ return (NETIO_PKT_L2_DATA_M(mda, pkt) +
+ NETIO_PKT_L2_HEADER_LENGTH_M(mda, pkt));
+}
+
+
+/** Return the ordinal of the packet.
+ * @ingroup ingress
+ *
+ * Each packet is given an ordinal number when it is delivered by the IPP.
+ * In the medium term, the ordinal is unique and monotonically increasing,
+ * being incremented by 1 for each packet; the ordinal of the first packet
+ * delivered after the IPP starts is zero. (Since the ordinal is of finite
+ * size, given enough input packets, it will eventually wrap around to zero;
+ * in the long term, therefore, ordinals are not unique.) The ordinals
+ * handed out by different IPPs are not disjoint, so two packets from
+ * different IPPs may have identical ordinals. Packets dropped by the
+ * IPP or by the I/O shim are not assigned ordinals.
+ *
+ * @param[in] mda Pointer to packet's standard metadata.
+ * @param[in] pkt Packet on which to operate.
+ * @return The packet's per-IPP packet ordinal.
+ */
+static __inline unsigned int
+NETIO_PKT_ORDINAL_M(netio_pkt_metadata_t* mda, netio_pkt_t* pkt)
+{
+ return mda->__packet_ordinal;
+}
+
+
+/** Return the per-group ordinal of the packet.
+ * @ingroup ingress
+ *
+ * Each packet is given a per-group ordinal number when it is
+ * delivered by the IPP. By default, the group is the packet's VLAN,
+ * although IPP can be recompiled to use different values. In
+ * the medium term, the ordinal is unique and monotonically
+ * increasing, being incremented by 1 for each packet; the ordinal of
+ * the first packet distributed to a particular group is zero.
+ * (Since the ordinal is of finite size, given enough input packets,
+ * it will eventually wrap around to zero; in the long term,
+ * therefore, ordinals are not unique.) The ordinals handed out by
+ * different IPPs are not disjoint, so two packets from different IPPs
+ * may have identical ordinals; similarly, packets distributed to
+ * different groups may have identical ordinals. Packets dropped by
+ * the IPP or by the I/O shim are not assigned ordinals.
+ *
+ * @param[in] mda Pointer to packet's standard metadata.
+ * @param[in] pkt Packet on which to operate.
+ * @return The packet's per-IPP, per-group ordinal.
+ */
+static __inline unsigned int
+NETIO_PKT_GROUP_ORDINAL_M(netio_pkt_metadata_t* mda, netio_pkt_t* pkt)
+{
+ return mda->__group_ordinal;
+}
+
+
+/** Return the VLAN ID assigned to the packet.
+ * @ingroup ingress
+ *
+ * This value is usually contained within the packet header.
+ *
+ * This value will be zero if the packet does not have a VLAN tag, or if
+ * this value was not extracted from the packet.
+ *
+ * @param[in] mda Pointer to packet's standard metadata.
+ * @param[in] pkt Packet on which to operate.
+ * @return The packet's VLAN ID.
+ */
+static __inline unsigned short
+NETIO_PKT_VLAN_ID_M(netio_pkt_metadata_t* mda, netio_pkt_t* pkt)
+{
+ int vl = (mda->__flags >> _NETIO_PKT_VLAN_SHIFT) & _NETIO_PKT_VLAN_RMASK;
+ unsigned short* pkt_p;
+ int index;
+ unsigned short val;
+
+ if (vl == _NETIO_PKT_VLAN_NONE)
+ return 0;
+
+ pkt_p = (unsigned short*) NETIO_PKT_L2_DATA_M(mda, pkt);
+ index = (mda->__flags >> _NETIO_PKT_TYPE_SHIFT) & _NETIO_PKT_TYPE_RMASK;
+
+ val = pkt_p[(_netio_pkt_info[index] >> _NETIO_PKT_INFO_VLAN_SHIFT) &
+ _NETIO_PKT_INFO_VLAN_RMASK];
+
+#ifdef __TILECC__
+ return (__insn_bytex(val) >> 16) & 0xFFF;
+#else
+ return (__builtin_bswap32(val) >> 16) & 0xFFF;
+#endif
+}
+
+
+/** Return the ethertype of the packet.
+ * @ingroup ingress
+ *
+ * This value is usually contained within the packet header.
+ *
+ * This value is reliable if @ref NETIO_PKT_ETHERTYPE_RECOGNIZED_M()
+ * returns true, and otherwise, may not be well defined.
+ *
+ * @param[in] mda Pointer to packet's standard metadata.
+ * @param[in] pkt Packet on which to operate.
+ * @return The packet's ethertype.
+ */
+static __inline unsigned short
+NETIO_PKT_ETHERTYPE_M(netio_pkt_metadata_t* mda, netio_pkt_t* pkt)
+{
+ unsigned short* pkt_p = (unsigned short*) NETIO_PKT_L2_DATA_M(mda, pkt);
+ int index = (mda->__flags >> _NETIO_PKT_TYPE_SHIFT) & _NETIO_PKT_TYPE_RMASK;
+
+ unsigned short val =
+ pkt_p[(_netio_pkt_info[index] >> _NETIO_PKT_INFO_ETYPE_SHIFT) &
+ _NETIO_PKT_INFO_ETYPE_RMASK];
+
+ return __builtin_bswap32(val) >> 16;
+}
+
+
+/** Return the flow hash computed on the packet.
+ * @ingroup ingress
+ *
+ * For TCP and UDP packets, this hash is calculated by hashing together
+ * the "5-tuple" values, specifically the source IP address, destination
+ * IP address, protocol type, source port and destination port.
+ * The hash value is intended to be helpful for millions of distinct
+ * flows.
+ *
+ * For IPv4 or IPv6 packets which are neither TCP nor UDP, the flow hash is
+ * derived by hashing together the source and destination IP addresses.
+ *
+ * For MPLS-encapsulated packets, the flow hash is derived by hashing
+ * the first MPLS label.
+ *
+ * For all other packets the flow hash is computed from the source
+ * and destination Ethernet addresses.
+ *
+ * The hash is symmetric, meaning it produces the same value if the
+ * source and destination are swapped. The only exceptions are
+ * tunneling protocols 0x04 (IP in IP Encapsulation), 0x29 (Simple
+ * Internet Protocol), 0x2F (General Routing Encapsulation) and 0x32
+ * (Encap Security Payload), which use only the destination address
+ * since the source address is not meaningful.
+ *
+ * @param[in] mda Pointer to packet's standard metadata.
+ * @param[in] pkt Packet on which to operate.
+ * @return The packet's 32-bit flow hash.
+ */
+static __inline unsigned int
+NETIO_PKT_FLOW_HASH_M(netio_pkt_metadata_t* mda, netio_pkt_t* pkt)
+{
+ return mda->__flow_hash;
+}
+
+
+/** Return the first word of "user data" for the packet.
+ *
+ * The contents of the user data words depend on the IPP.
+ *
+ * When using the standard ipp1, ipp2, or ipp4 sub-drivers, the first
+ * word of user data contains the least significant bits of the 64-bit
+ * arrival cycle count (see @c get_cycle_count_low()).
+ *
+ * See the <em>System Programmer's Guide</em> for details.
+ *
+ * @ingroup ingress
+ *
+ * @param[in] mda Pointer to packet's standard metadata.
+ * @param[in] pkt Packet on which to operate.
+ * @return The packet's first word of "user data".
+ */
+static __inline unsigned int
+NETIO_PKT_USER_DATA_0_M(netio_pkt_metadata_t* mda, netio_pkt_t* pkt)
+{
+ return mda->__user_data_0;
+}
+
+
+/** Return the second word of "user data" for the packet.
+ *
+ * The contents of the user data words depend on the IPP.
+ *
+ * When using the standard ipp1, ipp2, or ipp4 sub-drivers, the second
+ * word of user data contains the most significant bits of the 64-bit
+ * arrival cycle count (see @c get_cycle_count_high()).
+ *
+ * See the <em>System Programmer's Guide</em> for details.
+ *
+ * @ingroup ingress
+ *
+ * @param[in] mda Pointer to packet's standard metadata.
+ * @param[in] pkt Packet on which to operate.
+ * @return The packet's second word of "user data".
+ */
+static __inline unsigned int
+NETIO_PKT_USER_DATA_1_M(netio_pkt_metadata_t* mda, netio_pkt_t* pkt)
+{
+ return mda->__user_data_1;
+}
+
+
+/** Determine whether the L4 (TCP/UDP) checksum was calculated.
+ * @ingroup ingress
+ *
+ * @param[in] mda Pointer to packet's standard metadata.
+ * @param[in] pkt Packet on which to operate.
+ * @return Nonzero if the L4 checksum was calculated.
+ */
+static __inline unsigned int
+NETIO_PKT_L4_CSUM_CALCULATED_M(netio_pkt_metadata_t* mda, netio_pkt_t* pkt)
+{
+ return !(mda->__flags & _NETIO_PKT_NO_L4_CSUM_MASK);
+}
+
+
+/** Determine whether the L4 (TCP/UDP) checksum was calculated and found to
+ * be correct.
+ * @ingroup ingress
+ *
+ * @param[in] mda Pointer to packet's standard metadata.
+ * @param[in] pkt Packet on which to operate.
+ * @return Nonzero if the checksum was calculated and is correct.
+ */
+static __inline unsigned int
+NETIO_PKT_L4_CSUM_CORRECT_M(netio_pkt_metadata_t* mda, netio_pkt_t* pkt)
+{
+ return !(mda->__flags &
+ (_NETIO_PKT_BAD_L4_CSUM_MASK | _NETIO_PKT_NO_L4_CSUM_MASK));
+}
+
+
+/** Determine whether the L3 (IP) checksum was calculated.
+ * @ingroup ingress
+ *
+ * @param[in] mda Pointer to packet's standard metadata.
+ * @param[in] pkt Packet on which to operate.
+ * @return Nonzero if the L3 (IP) checksum was calculated.
+*/
+static __inline unsigned int
+NETIO_PKT_L3_CSUM_CALCULATED_M(netio_pkt_metadata_t* mda, netio_pkt_t* pkt)
+{
+ return !(mda->__flags & _NETIO_PKT_NO_L3_CSUM_MASK);
+}
+
+
+/** Determine whether the L3 (IP) checksum was calculated and found to be
+ * correct.
+ * @ingroup ingress
+ *
+ * @param[in] mda Pointer to packet's standard metadata.
+ * @param[in] pkt Packet on which to operate.
+ * @return Nonzero if the checksum was calculated and is correct.
+ */
+static __inline unsigned int
+NETIO_PKT_L3_CSUM_CORRECT_M(netio_pkt_metadata_t* mda, netio_pkt_t* pkt)
+{
+ return !(mda->__flags &
+ (_NETIO_PKT_BAD_L3_CSUM_MASK | _NETIO_PKT_NO_L3_CSUM_MASK));
+}
+
+
+/** Determine whether the ethertype was recognized and L3 packet data was
+ * processed.
+ * @ingroup ingress
+ *
+ * @param[in] mda Pointer to packet's standard metadata.
+ * @param[in] pkt Packet on which to operate.
+ * @return Nonzero if the ethertype was recognized and L3 packet data was
+ * processed.
+ */
+static __inline unsigned int
+NETIO_PKT_ETHERTYPE_RECOGNIZED_M(netio_pkt_metadata_t* mda, netio_pkt_t* pkt)
+{
+ return !(mda->__flags & _NETIO_PKT_TYPE_UNRECOGNIZED_MASK);
+}
+
+
+/** Retrieve the status of a packet and any errors that may have occurred
+ * during ingress processing (length mismatches, CRC errors, etc.).
+ * @ingroup ingress
+ *
+ * Note that packets for which @ref NETIO_PKT_ETHERTYPE_RECOGNIZED()
+ * returns zero are always reported as underlength, as there is no a priori
+ * means to determine their length. Normally, applications should use
+ * @ref NETIO_PKT_BAD_M() instead of explicitly checking status with this
+ * function.
+ *
+ * @param[in] mda Pointer to packet's standard metadata.
+ * @param[in] pkt Packet on which to operate.
+ * @return The packet's status.
+ */
+static __inline netio_pkt_status_t
+NETIO_PKT_STATUS_M(netio_pkt_metadata_t* mda, netio_pkt_t* pkt)
+{
+ return (netio_pkt_status_t) __NETIO_PKT_NOTIF_HEADER(pkt).bits.__status;
+}
+
+
+/** Report whether a packet is bad (i.e., was shorter than expected based on
+ * its headers, or had a bad CRC).
+ * @ingroup ingress
+ *
+ * Note that this function does not verify L3 or L4 checksums.
+ *
+ * @param[in] mda Pointer to packet's standard metadata.
+ * @param[in] pkt Packet on which to operate.
+ * @return Nonzero if the packet is bad and should be discarded.
+ */
+static __inline unsigned int
+NETIO_PKT_BAD_M(netio_pkt_metadata_t* mda, netio_pkt_t* pkt)
+{
+ return ((NETIO_PKT_STATUS_M(mda, pkt) & 1) &&
+ (NETIO_PKT_ETHERTYPE_RECOGNIZED_M(mda, pkt) ||
+ NETIO_PKT_STATUS_M(mda, pkt) == NETIO_PKT_STATUS_BAD));
+}
+
+
+/** Return the length of the packet, starting with the L2 (Ethernet) header.
+ * @ingroup egress
+ *
+ * @param[in] mmd Pointer to packet's minimal metadata.
+ * @param[in] pkt Packet on which to operate.
+ * @return The length of the packet, in bytes.
+ */
+static __inline netio_size_t
+NETIO_PKT_L2_LENGTH_MM(netio_pkt_minimal_metadata_t* mmd, netio_pkt_t* pkt)
+{
+ return mmd->l2_length;
+}
+
+
+/** Return the length of the L2 (Ethernet) header.
+ * @ingroup egress
+ *
+ * @param[in] mmd Pointer to packet's minimal metadata.
+ * @param[in] pkt Packet on which to operate.
+ * @return The length of the packet's L2 header, in bytes.
+ */
+static __inline netio_size_t
+NETIO_PKT_L2_HEADER_LENGTH_MM(netio_pkt_minimal_metadata_t* mmd,
+ netio_pkt_t* pkt)
+{
+ return mmd->l3_offset - mmd->l2_offset;
+}
+
+
+/** Return the length of the packet, starting with the L3 (IP) header.
+ * @ingroup egress
+ *
+ * @param[in] mmd Pointer to packet's minimal metadata.
+ * @param[in] pkt Packet on which to operate.
+ * @return Length of the packet's L3 header and data, in bytes.
+ */
+static __inline netio_size_t
+NETIO_PKT_L3_LENGTH_MM(netio_pkt_minimal_metadata_t* mmd, netio_pkt_t* pkt)
+{
+ return (NETIO_PKT_L2_LENGTH_MM(mmd, pkt) -
+ NETIO_PKT_L2_HEADER_LENGTH_MM(mmd, pkt));
+}
+
+
+/** Return a pointer to the packet's L3 (generally, the IP) header.
+ * @ingroup egress
+ *
+ * Note that we guarantee word alignment of the L3 header.
+ *
+ * @param[in] mmd Pointer to packet's minimal metadata.
+ * @param[in] pkt Packet on which to operate.
+ * @return A pointer to the packet's L3 header.
+ */
+static __inline unsigned char*
+NETIO_PKT_L3_DATA_MM(netio_pkt_minimal_metadata_t* mmd, netio_pkt_t* pkt)
+{
+ return _NETIO_PKT_BASE(pkt) + mmd->l3_offset;
+}
+
+
+/** Return a pointer to the packet's L2 (Ethernet) header.
+ * @ingroup egress
+ *
+ * @param[in] mmd Pointer to packet's minimal metadata.
+ * @param[in] pkt Packet on which to operate.
+ * @return A pointer to start of the packet.
+ */
+static __inline unsigned char*
+NETIO_PKT_L2_DATA_MM(netio_pkt_minimal_metadata_t* mmd, netio_pkt_t* pkt)
+{
+ return _NETIO_PKT_BASE(pkt) + mmd->l2_offset;
+}
+
+
+/** Retrieve the status of a packet and any errors that may have occurred
+ * during ingress processing (length mismatches, CRC errors, etc.).
+ * @ingroup ingress
+ *
+ * Note that packets for which @ref NETIO_PKT_ETHERTYPE_RECOGNIZED()
+ * returns zero are always reported as underlength, as there is no a priori
+ * means to determine their length. Normally, applications should use
+ * @ref NETIO_PKT_BAD() instead of explicitly checking status with this
+ * function.
+ *
+ * @param[in] pkt Packet on which to operate.
+ * @return The packet's status.
+ */
+static __inline netio_pkt_status_t
+NETIO_PKT_STATUS(netio_pkt_t* pkt)
+{
+ netio_assert(!pkt->__packet.bits.__minimal);
+
+ return (netio_pkt_status_t) __NETIO_PKT_NOTIF_HEADER(pkt).bits.__status;
+}
+
+
+/** Report whether a packet is bad (i.e., was shorter than expected based on
+ * its headers, or had a bad CRC).
+ * @ingroup ingress
+ *
+ * Note that this function does not verify L3 or L4 checksums.
+ *
+ * @param[in] pkt Packet on which to operate.
+ * @return Nonzero if the packet is bad and should be discarded.
+ */
+static __inline unsigned int
+NETIO_PKT_BAD(netio_pkt_t* pkt)
+{
+ netio_pkt_metadata_t* mda = NETIO_PKT_METADATA(pkt);
+
+ return NETIO_PKT_BAD_M(mda, pkt);
+}
+
+
+/** Return the length of the packet's custom header.
+ * A custom header may or may not be present, depending upon the IPP; its
+ * contents and alignment are also IPP-dependent. Currently, none of the
+ * standard IPPs supplied by Tilera produce a custom header. If present,
+ * the custom header precedes the L2 header in the packet buffer.
+ * @ingroup pktfuncs
+ *
+ * @param[in] pkt Packet on which to operate.
+ * @return The length of the packet's custom header, in bytes.
+ */
+static __inline netio_size_t
+NETIO_PKT_CUSTOM_HEADER_LENGTH(netio_pkt_t* pkt)
+{
+ netio_pkt_metadata_t* mda = NETIO_PKT_METADATA(pkt);
+
+ return NETIO_PKT_CUSTOM_HEADER_LENGTH_M(mda, pkt);
+}
+
+
+/** Return the length of the packet, starting with the custom header.
+ * A custom header may or may not be present, depending upon the IPP; its
+ * contents and alignment are also IPP-dependent. Currently, none of the
+ * standard IPPs supplied by Tilera produce a custom header. If present,
+ * the custom header precedes the L2 header in the packet buffer.
+ * @ingroup pktfuncs
+ *
+ * @param[in] pkt Packet on which to operate.
+ * @return The length of the packet, in bytes.
+ */
+static __inline netio_size_t
+NETIO_PKT_CUSTOM_LENGTH(netio_pkt_t* pkt)
+{
+ netio_pkt_metadata_t* mda = NETIO_PKT_METADATA(pkt);
+
+ return NETIO_PKT_CUSTOM_LENGTH_M(mda, pkt);
+}
+
+
+/** Return a pointer to the packet's custom header.
+ * A custom header may or may not be present, depending upon the IPP; its
+ * contents and alignment are also IPP-dependent. Currently, none of the
+ * standard IPPs supplied by Tilera produce a custom header. If present,
+ * the custom header precedes the L2 header in the packet buffer.
+ * @ingroup pktfuncs
+ *
+ * @param[in] pkt Packet on which to operate.
+ * @return A pointer to start of the packet.
+ */
+static __inline unsigned char*
+NETIO_PKT_CUSTOM_DATA(netio_pkt_t* pkt)
+{
+ netio_pkt_metadata_t* mda = NETIO_PKT_METADATA(pkt);
+
+ return NETIO_PKT_CUSTOM_DATA_M(mda, pkt);
+}
+
+
+/** Return the length of the packet's L2 (Ethernet plus VLAN or SNAP) header.
+ * @ingroup pktfuncs
+ *
+ * @param[in] pkt Packet on which to operate.
+ * @return The length of the packet's L2 header, in bytes.
+ */
+static __inline netio_size_t
+NETIO_PKT_L2_HEADER_LENGTH(netio_pkt_t* pkt)
+{
+ if (NETIO_PKT_IS_MINIMAL(pkt))
+ {
+ netio_pkt_minimal_metadata_t* mmd = NETIO_PKT_MINIMAL_METADATA(pkt);
+
+ return NETIO_PKT_L2_HEADER_LENGTH_MM(mmd, pkt);
+ }
+ else
+ {
+ netio_pkt_metadata_t* mda = NETIO_PKT_METADATA(pkt);
+
+ return NETIO_PKT_L2_HEADER_LENGTH_M(mda, pkt);
+ }
+}
+
+
+/** Return the length of the packet, starting with the L2 (Ethernet) header.
+ * @ingroup pktfuncs
+ *
+ * @param[in] pkt Packet on which to operate.
+ * @return The length of the packet, in bytes.
+ */
+static __inline netio_size_t
+NETIO_PKT_L2_LENGTH(netio_pkt_t* pkt)
+{
+ if (NETIO_PKT_IS_MINIMAL(pkt))
+ {
+ netio_pkt_minimal_metadata_t* mmd = NETIO_PKT_MINIMAL_METADATA(pkt);
+
+ return NETIO_PKT_L2_LENGTH_MM(mmd, pkt);
+ }
+ else
+ {
+ netio_pkt_metadata_t* mda = NETIO_PKT_METADATA(pkt);
+
+ return NETIO_PKT_L2_LENGTH_M(mda, pkt);
+ }
+}
+
+
+/** Return a pointer to the packet's L2 (Ethernet) header.
+ * @ingroup pktfuncs
+ *
+ * @param[in] pkt Packet on which to operate.
+ * @return A pointer to start of the packet.
+ */
+static __inline unsigned char*
+NETIO_PKT_L2_DATA(netio_pkt_t* pkt)
+{
+ if (NETIO_PKT_IS_MINIMAL(pkt))
+ {
+ netio_pkt_minimal_metadata_t* mmd = NETIO_PKT_MINIMAL_METADATA(pkt);
+
+ return NETIO_PKT_L2_DATA_MM(mmd, pkt);
+ }
+ else
+ {
+ netio_pkt_metadata_t* mda = NETIO_PKT_METADATA(pkt);
+
+ return NETIO_PKT_L2_DATA_M(mda, pkt);
+ }
+}
+
+
+/** Retrieve the length of the packet, starting with the L3 (generally, the IP)
+ * header.
+ * @ingroup pktfuncs
+ *
+ * @param[in] pkt Packet on which to operate.
+ * @return Length of the packet's L3 header and data, in bytes.
+ */
+static __inline netio_size_t
+NETIO_PKT_L3_LENGTH(netio_pkt_t* pkt)
+{
+ if (NETIO_PKT_IS_MINIMAL(pkt))
+ {
+ netio_pkt_minimal_metadata_t* mmd = NETIO_PKT_MINIMAL_METADATA(pkt);
+
+ return NETIO_PKT_L3_LENGTH_MM(mmd, pkt);
+ }
+ else
+ {
+ netio_pkt_metadata_t* mda = NETIO_PKT_METADATA(pkt);
+
+ return NETIO_PKT_L3_LENGTH_M(mda, pkt);
+ }
+}
+
+
+/** Return a pointer to the packet's L3 (generally, the IP) header.
+ * @ingroup pktfuncs
+ *
+ * Note that we guarantee word alignment of the L3 header.
+ *
+ * @param[in] pkt Packet on which to operate.
+ * @return A pointer to the packet's L3 header.
+ */
+static __inline unsigned char*
+NETIO_PKT_L3_DATA(netio_pkt_t* pkt)
+{
+ if (NETIO_PKT_IS_MINIMAL(pkt))
+ {
+ netio_pkt_minimal_metadata_t* mmd = NETIO_PKT_MINIMAL_METADATA(pkt);
+
+ return NETIO_PKT_L3_DATA_MM(mmd, pkt);
+ }
+ else
+ {
+ netio_pkt_metadata_t* mda = NETIO_PKT_METADATA(pkt);
+
+ return NETIO_PKT_L3_DATA_M(mda, pkt);
+ }
+}
+
+
+/** Return the ordinal of the packet.
+ * @ingroup ingress
+ *
+ * Each packet is given an ordinal number when it is delivered by the IPP.
+ * In the medium term, the ordinal is unique and monotonically increasing,
+ * being incremented by 1 for each packet; the ordinal of the first packet
+ * delivered after the IPP starts is zero. (Since the ordinal is of finite
+ * size, given enough input packets, it will eventually wrap around to zero;
+ * in the long term, therefore, ordinals are not unique.) The ordinals
+ * handed out by different IPPs are not disjoint, so two packets from
+ * different IPPs may have identical ordinals. Packets dropped by the
+ * IPP or by the I/O shim are not assigned ordinals.
+ *
+ *
+ * @param[in] pkt Packet on which to operate.
+ * @return The packet's per-IPP packet ordinal.
+ */
+static __inline unsigned int
+NETIO_PKT_ORDINAL(netio_pkt_t* pkt)
+{
+ netio_pkt_metadata_t* mda = NETIO_PKT_METADATA(pkt);
+
+ return NETIO_PKT_ORDINAL_M(mda, pkt);
+}
+
+
+/** Return the per-group ordinal of the packet.
+ * @ingroup ingress
+ *
+ * Each packet is given a per-group ordinal number when it is
+ * delivered by the IPP. By default, the group is the packet's VLAN,
+ * although IPP can be recompiled to use different values. In
+ * the medium term, the ordinal is unique and monotonically
+ * increasing, being incremented by 1 for each packet; the ordinal of
+ * the first packet distributed to a particular group is zero.
+ * (Since the ordinal is of finite size, given enough input packets,
+ * it will eventually wrap around to zero; in the long term,
+ * therefore, ordinals are not unique.) The ordinals handed out by
+ * different IPPs are not disjoint, so two packets from different IPPs
+ * may have identical ordinals; similarly, packets distributed to
+ * different groups may have identical ordinals. Packets dropped by
+ * the IPP or by the I/O shim are not assigned ordinals.
+ *
+ * @param[in] pkt Packet on which to operate.
+ * @return The packet's per-IPP, per-group ordinal.
+ */
+static __inline unsigned int
+NETIO_PKT_GROUP_ORDINAL(netio_pkt_t* pkt)
+{
+ netio_pkt_metadata_t* mda = NETIO_PKT_METADATA(pkt);
+
+ return NETIO_PKT_GROUP_ORDINAL_M(mda, pkt);
+}
+
+
+/** Return the VLAN ID assigned to the packet.
+ * @ingroup ingress
+ *
+ * This is usually also contained within the packet header. If the packet
+ * does not have a VLAN tag, the VLAN ID returned by this function is zero.
+ *
+ * @param[in] pkt Packet on which to operate.
+ * @return The packet's VLAN ID.
+ */
+static __inline unsigned short
+NETIO_PKT_VLAN_ID(netio_pkt_t* pkt)
+{
+ netio_pkt_metadata_t* mda = NETIO_PKT_METADATA(pkt);
+
+ return NETIO_PKT_VLAN_ID_M(mda, pkt);
+}
+
+
+/** Return the ethertype of the packet.
+ * @ingroup ingress
+ *
+ * This value is reliable if @ref NETIO_PKT_ETHERTYPE_RECOGNIZED()
+ * returns true, and otherwise, may not be well defined.
+ *
+ * @param[in] pkt Packet on which to operate.
+ * @return The packet's ethertype.
+ */
+static __inline unsigned short
+NETIO_PKT_ETHERTYPE(netio_pkt_t* pkt)
+{
+ netio_pkt_metadata_t* mda = NETIO_PKT_METADATA(pkt);
+
+ return NETIO_PKT_ETHERTYPE_M(mda, pkt);
+}
+
+
+/** Return the flow hash computed on the packet.
+ * @ingroup ingress
+ *
+ * For TCP and UDP packets, this hash is calculated by hashing together
+ * the "5-tuple" values, specifically the source IP address, destination
+ * IP address, protocol type, source port and destination port.
+ * The hash value is intended to be helpful for millions of distinct
+ * flows.
+ *
+ * For IPv4 or IPv6 packets which are neither TCP nor UDP, the flow hash is
+ * derived by hashing together the source and destination IP addresses.
+ *
+ * For MPLS-encapsulated packets, the flow hash is derived by hashing
+ * the first MPLS label.
+ *
+ * For all other packets the flow hash is computed from the source
+ * and destination Ethernet addresses.
+ *
+ * The hash is symmetric, meaning it produces the same value if the
+ * source and destination are swapped. The only exceptions are
+ * tunneling protocols 0x04 (IP in IP Encapsulation), 0x29 (Simple
+ * Internet Protocol), 0x2F (General Routing Encapsulation) and 0x32
+ * (Encap Security Payload), which use only the destination address
+ * since the source address is not meaningful.
+ *
+ * @param[in] pkt Packet on which to operate.
+ * @return The packet's 32-bit flow hash.
+ */
+static __inline unsigned int
+NETIO_PKT_FLOW_HASH(netio_pkt_t* pkt)
+{
+ netio_pkt_metadata_t* mda = NETIO_PKT_METADATA(pkt);
+
+ return NETIO_PKT_FLOW_HASH_M(mda, pkt);
+}
+
+
+/** Return the first word of "user data" for the packet.
+ *
+ * The contents of the user data words depend on the IPP.
+ *
+ * When using the standard ipp1, ipp2, or ipp4 sub-drivers, the first
+ * word of user data contains the least significant bits of the 64-bit
+ * arrival cycle count (see @c get_cycle_count_low()).
+ *
+ * See the <em>System Programmer's Guide</em> for details.
+ *
+ * @ingroup ingress
+ *
+ * @param[in] pkt Packet on which to operate.
+ * @return The packet's first word of "user data".
+ */
+static __inline unsigned int
+NETIO_PKT_USER_DATA_0(netio_pkt_t* pkt)
+{
+ netio_pkt_metadata_t* mda = NETIO_PKT_METADATA(pkt);
+
+ return NETIO_PKT_USER_DATA_0_M(mda, pkt);
+}
+
+
+/** Return the second word of "user data" for the packet.
+ *
+ * The contents of the user data words depend on the IPP.
+ *
+ * When using the standard ipp1, ipp2, or ipp4 sub-drivers, the second
+ * word of user data contains the most significant bits of the 64-bit
+ * arrival cycle count (see @c get_cycle_count_high()).
+ *
+ * See the <em>System Programmer's Guide</em> for details.
+ *
+ * @ingroup ingress
+ *
+ * @param[in] pkt Packet on which to operate.
+ * @return The packet's second word of "user data".
+ */
+static __inline unsigned int
+NETIO_PKT_USER_DATA_1(netio_pkt_t* pkt)
+{
+ netio_pkt_metadata_t* mda = NETIO_PKT_METADATA(pkt);
+
+ return NETIO_PKT_USER_DATA_1_M(mda, pkt);
+}
+
+
+/** Determine whether the L4 (TCP/UDP) checksum was calculated.
+ * @ingroup ingress
+ *
+ * @param[in] pkt Packet on which to operate.
+ * @return Nonzero if the L4 checksum was calculated.
+ */
+static __inline unsigned int
+NETIO_PKT_L4_CSUM_CALCULATED(netio_pkt_t* pkt)
+{
+ netio_pkt_metadata_t* mda = NETIO_PKT_METADATA(pkt);
+
+ return NETIO_PKT_L4_CSUM_CALCULATED_M(mda, pkt);
+}
+
+
+/** Determine whether the L4 (TCP/UDP) checksum was calculated and found to
+ * be correct.
+ * @ingroup ingress
+ *
+ * @param[in] pkt Packet on which to operate.
+ * @return Nonzero if the checksum was calculated and is correct.
+ */
+static __inline unsigned int
+NETIO_PKT_L4_CSUM_CORRECT(netio_pkt_t* pkt)
+{
+ netio_pkt_metadata_t* mda = NETIO_PKT_METADATA(pkt);
+
+ return NETIO_PKT_L4_CSUM_CORRECT_M(mda, pkt);
+}
+
+
+/** Determine whether the L3 (IP) checksum was calculated.
+ * @ingroup ingress
+ *
+ * @param[in] pkt Packet on which to operate.
+ * @return Nonzero if the L3 (IP) checksum was calculated.
+*/
+static __inline unsigned int
+NETIO_PKT_L3_CSUM_CALCULATED(netio_pkt_t* pkt)
+{
+ netio_pkt_metadata_t* mda = NETIO_PKT_METADATA(pkt);
+
+ return NETIO_PKT_L3_CSUM_CALCULATED_M(mda, pkt);
+}
+
+
+/** Determine whether the L3 (IP) checksum was calculated and found to be
+ * correct.
+ * @ingroup ingress
+ *
+ * @param[in] pkt Packet on which to operate.
+ * @return Nonzero if the checksum was calculated and is correct.
+ */
+static __inline unsigned int
+NETIO_PKT_L3_CSUM_CORRECT(netio_pkt_t* pkt)
+{
+ netio_pkt_metadata_t* mda = NETIO_PKT_METADATA(pkt);
+
+ return NETIO_PKT_L3_CSUM_CORRECT_M(mda, pkt);
+}
+
+
+/** Determine whether the Ethertype was recognized and L3 packet data was
+ * processed.
+ * @ingroup ingress
+ *
+ * @param[in] pkt Packet on which to operate.
+ * @return Nonzero if the Ethertype was recognized and L3 packet data was
+ * processed.
+ */
+static __inline unsigned int
+NETIO_PKT_ETHERTYPE_RECOGNIZED(netio_pkt_t* pkt)
+{
+ netio_pkt_metadata_t* mda = NETIO_PKT_METADATA(pkt);
+
+ return NETIO_PKT_ETHERTYPE_RECOGNIZED_M(mda, pkt);
+}
+
+
+/** Set an egress packet's L2 length, using a metadata pointer to speed the
+ * computation.
+ * @ingroup egress
+ *
+ * @param[in,out] mmd Pointer to packet's minimal metadata.
+ * @param[in] pkt Packet on which to operate.
+ * @param[in] len Packet L2 length, in bytes.
+ */
+static __inline void
+NETIO_PKT_SET_L2_LENGTH_MM(netio_pkt_minimal_metadata_t* mmd, netio_pkt_t* pkt,
+ int len)
+{
+ mmd->l2_length = len;
+}
+
+
+/** Set an egress packet's L2 length.
+ * @ingroup egress
+ *
+ * @param[in,out] pkt Packet on which to operate.
+ * @param[in] len Packet L2 length, in bytes.
+ */
+static __inline void
+NETIO_PKT_SET_L2_LENGTH(netio_pkt_t* pkt, int len)
+{
+ netio_pkt_minimal_metadata_t* mmd = NETIO_PKT_MINIMAL_METADATA(pkt);
+
+ NETIO_PKT_SET_L2_LENGTH_MM(mmd, pkt, len);
+}
+
+
+/** Set an egress packet's L2 header length, using a metadata pointer to
+ * speed the computation.
+ * @ingroup egress
+ *
+ * It is not normally necessary to call this routine; only the L2 length,
+ * not the header length, is needed to transmit a packet. It may be useful if
+ * the egress packet will later be processed by code which expects to use
+ * functions like @ref NETIO_PKT_L3_DATA() to get a pointer to the L3 payload.
+ *
+ * @param[in,out] mmd Pointer to packet's minimal metadata.
+ * @param[in] pkt Packet on which to operate.
+ * @param[in] len Packet L2 header length, in bytes.
+ */
+static __inline void
+NETIO_PKT_SET_L2_HEADER_LENGTH_MM(netio_pkt_minimal_metadata_t* mmd,
+ netio_pkt_t* pkt, int len)
+{
+ mmd->l3_offset = mmd->l2_offset + len;
+}
+
+
+/** Set an egress packet's L2 header length.
+ * @ingroup egress
+ *
+ * It is not normally necessary to call this routine; only the L2 length,
+ * not the header length, is needed to transmit a packet. It may be useful if
+ * the egress packet will later be processed by code which expects to use
+ * functions like @ref NETIO_PKT_L3_DATA() to get a pointer to the L3 payload.
+ *
+ * @param[in,out] pkt Packet on which to operate.
+ * @param[in] len Packet L2 header length, in bytes.
+ */
+static __inline void
+NETIO_PKT_SET_L2_HEADER_LENGTH(netio_pkt_t* pkt, int len)
+{
+ netio_pkt_minimal_metadata_t* mmd = NETIO_PKT_MINIMAL_METADATA(pkt);
+
+ NETIO_PKT_SET_L2_HEADER_LENGTH_MM(mmd, pkt, len);
+}
+
+
+/** Set up an egress packet for hardware checksum computation, using a
+ * metadata pointer to speed the operation.
+ * @ingroup egress
+ *
+ * NetIO provides the ability to automatically calculate a standard
+ * 16-bit Internet checksum on transmitted packets. The application
+ * may specify the point in the packet where the checksum starts, the
+ * number of bytes to be checksummed, and the two bytes in the packet
+ * which will be replaced with the completed checksum. (If the range
+ * of bytes to be checksummed includes the bytes to be replaced, the
+ * initial values of those bytes will be included in the checksum.)
+ *
+ * For some protocols, the packet checksum covers data which is not present
+ * in the packet, or is at least not contiguous to the main data payload.
+ * For instance, the TCP checksum includes a "pseudo-header" which includes
+ * the source and destination IP addresses of the packet. To accommodate
+ * this, the checksum engine may be "seeded" with an initial value, which
+ * the application would need to compute based on the specific protocol's
+ * requirements. Note that the seed is given in host byte order (little-
+ * endian), not network byte order (big-endian); code written to compute a
+ * pseudo-header checksum in network byte order will need to byte-swap it
+ * before use as the seed.
+ *
+ * Note that the checksum is computed as part of the transmission process,
+ * so it will not be present in the packet upon completion of this routine.
+ *
+ * @param[in,out] mmd Pointer to packet's minimal metadata.
+ * @param[in] pkt Packet on which to operate.
+ * @param[in] start Offset within L2 packet of the first byte to include in
+ * the checksum.
+ * @param[in] length Number of bytes to include in the checksum.
+ * the checksum.
+ * @param[in] location Offset within L2 packet of the first of the two bytes
+ * to be replaced with the calculated checksum.
+ * @param[in] seed Initial value of the running checksum before any of the
+ * packet data is added.
+ */
+static __inline void
+NETIO_PKT_DO_EGRESS_CSUM_MM(netio_pkt_minimal_metadata_t* mmd,
+ netio_pkt_t* pkt, int start, int length,
+ int location, uint16_t seed)
+{
+ mmd->csum_start = start;
+ mmd->csum_length = length;
+ mmd->csum_location = location;
+ mmd->csum_seed = seed;
+ mmd->flags |= _NETIO_PKT_NEED_EDMA_CSUM_MASK;
+}
+
+
+/** Set up an egress packet for hardware checksum computation.
+ * @ingroup egress
+ *
+ * NetIO provides the ability to automatically calculate a standard
+ * 16-bit Internet checksum on transmitted packets. The application
+ * may specify the point in the packet where the checksum starts, the
+ * number of bytes to be checksummed, and the two bytes in the packet
+ * which will be replaced with the completed checksum. (If the range
+ * of bytes to be checksummed includes the bytes to be replaced, the
+ * initial values of those bytes will be included in the checksum.)
+ *
+ * For some protocols, the packet checksum covers data which is not present
+ * in the packet, or is at least not contiguous to the main data payload.
+ * For instance, the TCP checksum includes a "pseudo-header" which includes
+ * the source and destination IP addresses of the packet. To accommodate
+ * this, the checksum engine may be "seeded" with an initial value, which
+ * the application would need to compute based on the specific protocol's
+ * requirements. Note that the seed is given in host byte order (little-
+ * endian), not network byte order (big-endian); code written to compute a
+ * pseudo-header checksum in network byte order will need to byte-swap it
+ * before use as the seed.
+ *
+ * Note that the checksum is computed as part of the transmission process,
+ * so it will not be present in the packet upon completion of this routine.
+ *
+ * @param[in,out] pkt Packet on which to operate.
+ * @param[in] start Offset within L2 packet of the first byte to include in
+ * the checksum.
+ * @param[in] length Number of bytes to include in the checksum.
+ * the checksum.
+ * @param[in] location Offset within L2 packet of the first of the two bytes
+ * to be replaced with the calculated checksum.
+ * @param[in] seed Initial value of the running checksum before any of the
+ * packet data is added.
+ */
+static __inline void
+NETIO_PKT_DO_EGRESS_CSUM(netio_pkt_t* pkt, int start, int length,
+ int location, uint16_t seed)
+{
+ netio_pkt_minimal_metadata_t* mmd = NETIO_PKT_MINIMAL_METADATA(pkt);
+
+ NETIO_PKT_DO_EGRESS_CSUM_MM(mmd, pkt, start, length, location, seed);
+}
+
+
+/** Return the number of bytes which could be prepended to a packet, using a
+ * metadata pointer to speed the operation.
+ * See @ref netio_populate_prepend_buffer() to get a full description of
+ * prepending.
+ *
+ * @param[in,out] mda Pointer to packet's standard metadata.
+ * @param[in] pkt Packet on which to operate.
+ */
+static __inline int
+NETIO_PKT_PREPEND_AVAIL_M(netio_pkt_metadata_t* mda, netio_pkt_t* pkt)
+{
+ return (pkt->__packet.bits.__offset << 6) +
+ NETIO_PKT_CUSTOM_HEADER_LENGTH_M(mda, pkt);
+}
+
+
+/** Return the number of bytes which could be prepended to a packet, using a
+ * metadata pointer to speed the operation.
+ * See @ref netio_populate_prepend_buffer() to get a full description of
+ * prepending.
+ * @ingroup egress
+ *
+ * @param[in,out] mmd Pointer to packet's minimal metadata.
+ * @param[in] pkt Packet on which to operate.
+ */
+static __inline int
+NETIO_PKT_PREPEND_AVAIL_MM(netio_pkt_minimal_metadata_t* mmd, netio_pkt_t* pkt)
+{
+ return (pkt->__packet.bits.__offset << 6) + mmd->l2_offset;
+}
+
+
+/** Return the number of bytes which could be prepended to a packet.
+ * See @ref netio_populate_prepend_buffer() to get a full description of
+ * prepending.
+ * @ingroup egress
+ *
+ * @param[in] pkt Packet on which to operate.
+ */
+static __inline int
+NETIO_PKT_PREPEND_AVAIL(netio_pkt_t* pkt)
+{
+ if (NETIO_PKT_IS_MINIMAL(pkt))
+ {
+ netio_pkt_minimal_metadata_t* mmd = NETIO_PKT_MINIMAL_METADATA(pkt);
+
+ return NETIO_PKT_PREPEND_AVAIL_MM(mmd, pkt);
+ }
+ else
+ {
+ netio_pkt_metadata_t* mda = NETIO_PKT_METADATA(pkt);
+
+ return NETIO_PKT_PREPEND_AVAIL_M(mda, pkt);
+ }
+}
+
+
+/** Flush a packet's minimal metadata from the cache, using a metadata pointer
+ * to speed the operation.
+ * @ingroup egress
+ *
+ * @param[in] mmd Pointer to packet's minimal metadata.
+ * @param[in] pkt Packet on which to operate.
+ */
+static __inline void
+NETIO_PKT_FLUSH_MINIMAL_METADATA_MM(netio_pkt_minimal_metadata_t* mmd,
+ netio_pkt_t* pkt)
+{
+}
+
+
+/** Invalidate a packet's minimal metadata from the cache, using a metadata
+ * pointer to speed the operation.
+ * @ingroup egress
+ *
+ * @param[in] mmd Pointer to packet's minimal metadata.
+ * @param[in] pkt Packet on which to operate.
+ */
+static __inline void
+NETIO_PKT_INV_MINIMAL_METADATA_MM(netio_pkt_minimal_metadata_t* mmd,
+ netio_pkt_t* pkt)
+{
+}
+
+
+/** Flush and then invalidate a packet's minimal metadata from the cache,
+ * using a metadata pointer to speed the operation.
+ * @ingroup egress
+ *
+ * @param[in] mmd Pointer to packet's minimal metadata.
+ * @param[in] pkt Packet on which to operate.
+ */
+static __inline void
+NETIO_PKT_FLUSH_INV_MINIMAL_METADATA_MM(netio_pkt_minimal_metadata_t* mmd,
+ netio_pkt_t* pkt)
+{
+}
+
+
+/** Flush a packet's metadata from the cache, using a metadata pointer
+ * to speed the operation.
+ * @ingroup ingress
+ *
+ * @param[in] mda Pointer to packet's minimal metadata.
+ * @param[in] pkt Packet on which to operate.
+ */
+static __inline void
+NETIO_PKT_FLUSH_METADATA_M(netio_pkt_metadata_t* mda, netio_pkt_t* pkt)
+{
+}
+
+
+/** Invalidate a packet's metadata from the cache, using a metadata
+ * pointer to speed the operation.
+ * @ingroup ingress
+ *
+ * @param[in] mda Pointer to packet's metadata.
+ * @param[in] pkt Packet on which to operate.
+ */
+static __inline void
+NETIO_PKT_INV_METADATA_M(netio_pkt_metadata_t* mda, netio_pkt_t* pkt)
+{
+}
+
+
+/** Flush and then invalidate a packet's metadata from the cache,
+ * using a metadata pointer to speed the operation.
+ * @ingroup ingress
+ *
+ * @param[in] mda Pointer to packet's metadata.
+ * @param[in] pkt Packet on which to operate.
+ */
+static __inline void
+NETIO_PKT_FLUSH_INV_METADATA_M(netio_pkt_metadata_t* mda, netio_pkt_t* pkt)
+{
+}
+
+
+/** Flush a packet's minimal metadata from the cache.
+ * @ingroup egress
+ *
+ * @param[in] pkt Packet on which to operate.
+ */
+static __inline void
+NETIO_PKT_FLUSH_MINIMAL_METADATA(netio_pkt_t* pkt)
+{
+}
+
+
+/** Invalidate a packet's minimal metadata from the cache.
+ * @ingroup egress
+ *
+ * @param[in] pkt Packet on which to operate.
+ */
+static __inline void
+NETIO_PKT_INV_MINIMAL_METADATA(netio_pkt_t* pkt)
+{
+}
+
+
+/** Flush and then invalidate a packet's minimal metadata from the cache.
+ * @ingroup egress
+ *
+ * @param[in] pkt Packet on which to operate.
+ */
+static __inline void
+NETIO_PKT_FLUSH_INV_MINIMAL_METADATA(netio_pkt_t* pkt)
+{
+}
+
+
+/** Flush a packet's metadata from the cache.
+ * @ingroup ingress
+ *
+ * @param[in] pkt Packet on which to operate.
+ */
+static __inline void
+NETIO_PKT_FLUSH_METADATA(netio_pkt_t* pkt)
+{
+}
+
+
+/** Invalidate a packet's metadata from the cache.
+ * @ingroup ingress
+ *
+ * @param[in] pkt Packet on which to operate.
+ */
+static __inline void
+NETIO_PKT_INV_METADATA(netio_pkt_t* pkt)
+{
+}
+
+
+/** Flush and then invalidate a packet's metadata from the cache.
+ * @ingroup ingress
+ *
+ * @param[in] pkt Packet on which to operate.
+ */
+static __inline void
+NETIO_PKT_FLUSH_INV_METADATA(netio_pkt_t* pkt)
+{
+}
+
+/** Number of NUMA nodes we can distribute buffers to.
+ * @ingroup setup */
+#define NETIO_NUM_NODE_WEIGHTS 16
+
+/**
+ * @brief An object for specifying the characteristics of NetIO communication
+ * endpoint.
+ *
+ * @ingroup setup
+ *
+ * The @ref netio_input_register() function uses this structure to define
+ * how an application tile will communicate with an IPP.
+ *
+ *
+ * Future updates to NetIO may add new members to this structure,
+ * which can affect the success of the registration operation. Thus,
+ * if dynamically initializing the structure, applications are urged to
+ * zero it out first, for example:
+ *
+ * @code
+ * netio_input_config_t config;
+ * memset(&config, 0, sizeof (config));
+ * config.flags = NETIO_RECV | NETIO_XMIT_CSUM | NETIO_TAG_NONE;
+ * config.num_receive_packets = NETIO_MAX_RECEIVE_PKTS;
+ * config.queue_id = 0;
+ * .
+ * .
+ * .
+ * @endcode
+ *
+ * since that guarantees that any unused structure members, including
+ * members which did not exist when the application was first developed,
+ * will not have unexpected values.
+ *
+ * If statically initializing the structure, we strongly recommend use of
+ * C99-style named initializers, for example:
+ *
+ * @code
+ * netio_input_config_t config = {
+ * .flags = NETIO_RECV | NETIO_XMIT_CSUM | NETIO_TAG_NONE,
+ * .num_receive_packets = NETIO_MAX_RECEIVE_PKTS,
+ * .queue_id = 0,
+ * },
+ * @endcode
+ *
+ * instead of the old-style structure initialization:
+ *
+ * @code
+ * // Bad example! Currently equivalent to the above, but don't do this.
+ * netio_input_config_t config = {
+ * NETIO_RECV | NETIO_XMIT_CSUM | NETIO_TAG_NONE, NETIO_MAX_RECEIVE_PKTS, 0
+ * },
+ * @endcode
+ *
+ * since the C99 style requires no changes to the code if elements of the
+ * config structure are rearranged. (It also makes the initialization much
+ * easier to understand.)
+ *
+ * Except for items which address a particular tile's transmit or receive
+ * characteristics, such as the ::NETIO_RECV flag, applications are advised
+ * to specify the same set of configuration data on all registrations.
+ * This prevents differing results if multiple tiles happen to do their
+ * registration operations in a different order on different invocations of
+ * the application. This is particularly important for things like link
+ * management flags, and buffer size and homing specifications.
+ *
+ * Unless the ::NETIO_FIXED_BUFFER_VA flag is specified in flags, the NetIO
+ * buffer pool is automatically created and mapped into the application's
+ * virtual address space at an address chosen by the operating system,
+ * using the common memory (cmem) facility in the Tilera Multicore
+ * Components library. The cmem facility allows multiple processes to gain
+ * access to shared memory which is mapped into each process at an
+ * identical virtual address. In order for this to work, the processes
+ * must have a common ancestor, which must create the common memory using
+ * tmc_cmem_init().
+ *
+ * In programs using the iLib process creation API, or in programs which use
+ * only one process (which include programs using the pthreads library),
+ * tmc_cmem_init() is called automatically. All other applications
+ * must call it explicitly, before any child processes which might call
+ * netio_input_register() are created.
+ */
+typedef struct
+{
+ /** Registration characteristics.
+
+ This value determines several characteristics of the registration;
+ flags for different types of behavior are ORed together to make the
+ final flag value. Generally applications should specify exactly
+ one flag from each of the following categories:
+
+ - Whether the application will be receiving packets on this queue
+ (::NETIO_RECV or ::NETIO_NO_RECV).
+
+ - Whether the application will be transmitting packets on this queue,
+ and if so, whether it will request egress checksum calculation
+ (::NETIO_XMIT, ::NETIO_XMIT_CSUM, or ::NETIO_NO_XMIT). It is
+ legal to call netio_get_buffer() without one of the XMIT flags,
+ as long as ::NETIO_RECV is specified; in this case, the retrieved
+ buffers must be passed to another tile for transmission.
+
+ - Whether the application expects any vendor-specific tags in
+ its packets' L2 headers (::NETIO_TAG_NONE, ::NETIO_TAG_BRCM,
+ or ::NETIO_TAG_MRVL). This must match the configuration of the
+ target IPP.
+
+ To accommodate applications written to previous versions of the NetIO
+ interface, none of the flags above are currently required; if omitted,
+ NetIO behaves more or less as if ::NETIO_RECV | ::NETIO_XMIT_CSUM |
+ ::NETIO_TAG_NONE were used. However, explicit specification of
+ the relevant flags allows NetIO to do a better job of resource
+ allocation, allows earlier detection of certain configuration errors,
+ and may enable advanced features or higher performance in the future,
+ so their use is strongly recommended.
+
+ Note that specifying ::NETIO_NO_RECV along with ::NETIO_NO_XMIT
+ is a special case, intended primarily for use by programs which
+ retrieve network statistics or do link management operations.
+ When these flags are both specified, the resulting queue may not
+ be used with NetIO routines other than netio_get(), netio_set(),
+ and netio_input_unregister(). See @ref link for more information
+ on link management.
+
+ Other flags are optional; their use is described below.
+ */
+ int flags;
+
+ /** Interface name. This is a string which identifies the specific
+ Ethernet controller hardware to be used. The format of the string
+ is a device type and a device index, separated by a slash; so,
+ the first 10 Gigabit Ethernet controller is named "xgbe/0", while
+ the second 10/100/1000 Megabit Ethernet controller is named "gbe/1".
+ */
+ const char* interface;
+
+ /** Receive packet queue size. This specifies the maximum number
+ of ingress packets that can be received on this queue without
+ being retrieved by @ref netio_get_packet(). If the IPP's distribution
+ algorithm calls for a packet to be sent to this queue, and this
+ number of packets are already pending there, the new packet
+ will either be discarded, or sent to another tile registered
+ for the same queue_id (see @ref drops). This value must
+ be at least ::NETIO_MIN_RECEIVE_PKTS, can always be at least
+ ::NETIO_MAX_RECEIVE_PKTS, and may be larger than that on certain
+ interfaces.
+ */
+ int num_receive_packets;
+
+ /** The queue ID being requested. Legal values for this range from 0
+ to ::NETIO_MAX_QUEUE_ID, inclusive. ::NETIO_MAX_QUEUE_ID is always
+ greater than or equal to the number of tiles; this allows one queue
+ for each tile, plus at least one additional queue. Some applications
+ may wish to use the additional queue as a destination for unwanted
+ packets, since packets delivered to queues for which no tiles have
+ registered are discarded.
+ */
+ unsigned int queue_id;
+
+ /** Maximum number of small send buffers to be held in the local empty
+ buffer cache. This specifies the size of the area which holds
+ empty small egress buffers requested from the IPP but not yet
+ retrieved via @ref netio_get_buffer(). This value must be greater
+ than zero if the application will ever use @ref netio_get_buffer()
+ to allocate empty small egress buffers; it may be no larger than
+ ::NETIO_MAX_SEND_BUFFERS. See @ref epp for more details on empty
+ buffer caching.
+ */
+ int num_send_buffers_small_total;
+
+ /** Number of small send buffers to be preallocated at registration.
+ If this value is nonzero, the specified number of empty small egress
+ buffers will be requested from the IPP during the netio_input_register
+ operation; this may speed the execution of @ref netio_get_buffer().
+ This may be no larger than @ref num_send_buffers_small_total. See @ref
+ epp for more details on empty buffer caching.
+ */
+ int num_send_buffers_small_prealloc;
+
+ /** Maximum number of large send buffers to be held in the local empty
+ buffer cache. This specifies the size of the area which holds empty
+ large egress buffers requested from the IPP but not yet retrieved via
+ @ref netio_get_buffer(). This value must be greater than zero if the
+ application will ever use @ref netio_get_buffer() to allocate empty
+ large egress buffers; it may be no larger than ::NETIO_MAX_SEND_BUFFERS.
+ See @ref epp for more details on empty buffer caching.
+ */
+ int num_send_buffers_large_total;
+
+ /** Number of large send buffers to be preallocated at registration.
+ If this value is nonzero, the specified number of empty large egress
+ buffers will be requested from the IPP during the netio_input_register
+ operation; this may speed the execution of @ref netio_get_buffer().
+ This may be no larger than @ref num_send_buffers_large_total. See @ref
+ epp for more details on empty buffer caching.
+ */
+ int num_send_buffers_large_prealloc;
+
+ /** Maximum number of jumbo send buffers to be held in the local empty
+ buffer cache. This specifies the size of the area which holds empty
+ jumbo egress buffers requested from the IPP but not yet retrieved via
+ @ref netio_get_buffer(). This value must be greater than zero if the
+ application will ever use @ref netio_get_buffer() to allocate empty
+ jumbo egress buffers; it may be no larger than ::NETIO_MAX_SEND_BUFFERS.
+ See @ref epp for more details on empty buffer caching.
+ */
+ int num_send_buffers_jumbo_total;
+
+ /** Number of jumbo send buffers to be preallocated at registration.
+ If this value is nonzero, the specified number of empty jumbo egress
+ buffers will be requested from the IPP during the netio_input_register
+ operation; this may speed the execution of @ref netio_get_buffer().
+ This may be no larger than @ref num_send_buffers_jumbo_total. See @ref
+ epp for more details on empty buffer caching.
+ */
+ int num_send_buffers_jumbo_prealloc;
+
+ /** Total packet buffer size. This determines the total size, in bytes,
+ of the NetIO buffer pool. Note that the maximum number of available
+ buffers of each size is determined during hypervisor configuration
+ (see the <em>System Programmer's Guide</em> for details); this just
+ influences how much host memory is allocated for those buffers.
+
+ The buffer pool is allocated from common memory, which will be
+ automatically initialized if needed. If your buffer pool is larger
+ than 240 MB, you might need to explicitly call @c tmc_cmem_init(),
+ as described in the Application Libraries Reference Manual (UG227).
+
+ Packet buffers are currently allocated in chunks of 16 MB; this
+ value will be rounded up to the next larger multiple of 16 MB.
+ If this value is zero, a default of 32 MB will be used; this was
+ the value used by previous versions of NetIO. Note that taking this
+ default also affects the placement of buffers on Linux NUMA nodes.
+ See @ref buffer_node_weights for an explanation of buffer placement.
+
+ In order to successfully allocate packet buffers, Linux must have
+ available huge pages on the relevant Linux NUMA nodes. See the
+ <em>System Programmer's Guide</em> for information on configuring
+ huge page support in Linux.
+ */
+ uint64_t total_buffer_size;
+
+ /** Buffer placement weighting factors.
+
+ This array specifies the relative amount of buffering to place
+ on each of the available Linux NUMA nodes. This array is
+ indexed by the NUMA node, and the values in the array are
+ proportional to the amount of buffer space to allocate on that
+ node.
+
+ If memory striping is enabled in the Hypervisor, then there is
+ only one logical NUMA node (node 0). In that case, NetIO will by
+ default ignore the suggested buffer node weights, and buffers
+ will be striped across the physical memory controllers. See
+ UG209 System Programmer's Guide for a description of the
+ hypervisor option that controls memory striping.
+
+ If memory striping is disabled, then there are up to four NUMA
+ nodes, corresponding to the four DDRAM controllers in the TILE
+ processor architecture. See UG100 Tile Processor Architecture
+ Overview for a diagram showing the location of each of the DDRAM
+ controllers relative to the tile array.
+
+ For instance, if memory striping is disabled, the following
+ configuration strucure:
+
+ @code
+ netio_input_config_t config = {
+ .
+ .
+ .
+ .total_buffer_size = 4 * 16 * 1024 * 1024;
+ .buffer_node_weights = { 1, 0, 1, 0 },
+ },
+ @endcode
+
+ would result in 32 MB of buffers being placed on controller 0, and
+ 32 MB on controller 2. (Since buffers are allocated in units of
+ 16 MB, some sets of weights will not be able to be matched exactly.)
+
+ For the weights to be effective, @ref total_buffer_size must be
+ nonzero. If @ref total_buffer_size is zero, causing the default
+ 32 MB of buffer space to be used, then any specified weights will
+ be ignored, and buffers will positioned as they were in previous
+ versions of NetIO:
+
+ - For xgbe/0 and gbe/0, 16 MB of buffers will be placed on controller 1,
+ and the other 16 MB will be placed on controller 2.
+
+ - For xgbe/1 and gbe/1, 16 MB of buffers will be placed on controller 2,
+ and the other 16 MB will be placed on controller 3.
+
+ If @ref total_buffer_size is nonzero, but all weights are zero,
+ then all buffer space will be allocated on Linux NUMA node zero.
+
+ By default, the specified buffer placement is treated as a hint;
+ if sufficient free memory is not available on the specified
+ controllers, the buffers will be allocated elsewhere. However,
+ if the ::NETIO_STRICT_HOMING flag is specified in @ref flags, then a
+ failure to allocate buffer space exactly as requested will cause the
+ registration operation to fail with an error of ::NETIO_CANNOT_HOME.
+
+ Note that maximal network performance cannot be achieved with
+ only one memory controller.
+ */
+ uint8_t buffer_node_weights[NETIO_NUM_NODE_WEIGHTS];
+
+ /** Fixed virtual address for packet buffers. Only valid when
+ ::NETIO_FIXED_BUFFER_VA is specified in @ref flags; see the
+ description of that flag for details.
+ */
+ void* fixed_buffer_va;
+
+ /**
+ Maximum number of outstanding send packet requests. This value is
+ only relevant when an EPP is in use; it determines the number of
+ slots in the EPP's outgoing packet queue which this tile is allowed
+ to consume, and thus the number of packets which may be sent before
+ the sending tile must wait for an acknowledgment from the EPP.
+ Modifying this value is generally only helpful when using @ref
+ netio_send_packet_vector(), where it can help improve performance by
+ allowing a single vector send operation to process more packets.
+ Typically it is not specified, and the default, which divides the
+ outgoing packet slots evenly between all tiles on the chip, is used.
+
+ If a registration asks for more outgoing packet queue slots than are
+ available, ::NETIO_TOOMANY_XMIT will be returned. The total number
+ of packet queue slots which are available for all tiles for each EPP
+ is subject to change, but is currently ::NETIO_TOTAL_SENDS_OUTSTANDING.
+
+
+ This value is ignored if ::NETIO_XMIT is not specified in flags.
+ If you want to specify a large value here for a specific tile, you are
+ advised to specify NETIO_NO_XMIT on other, non-transmitting tiles so
+ that they do not consume a default number of packet slots. Any tile
+ transmitting is required to have at least ::NETIO_MIN_SENDS_OUTSTANDING
+ slots allocated to it; values less than that will be silently
+ increased by the NetIO library.
+ */
+ int num_sends_outstanding;
+}
+netio_input_config_t;
+
+
+/** Registration flags; used in the @ref netio_input_config_t structure.
+ * @addtogroup setup
+ */
+/** @{ */
+
+/** Fail a registration request if we can't put packet buffers
+ on the specified memory controllers. */
+#define NETIO_STRICT_HOMING 0x00000002
+
+/** This application expects no tags on its L2 headers. */
+#define NETIO_TAG_NONE 0x00000004
+
+/** This application expects Marvell extended tags on its L2 headers. */
+#define NETIO_TAG_MRVL 0x00000008
+
+/** This application expects Broadcom tags on its L2 headers. */
+#define NETIO_TAG_BRCM 0x00000010
+
+/** This registration may call routines which receive packets. */
+#define NETIO_RECV 0x00000020
+
+/** This registration may not call routines which receive packets. */
+#define NETIO_NO_RECV 0x00000040
+
+/** This registration may call routines which transmit packets. */
+#define NETIO_XMIT 0x00000080
+
+/** This registration may call routines which transmit packets with
+ checksum acceleration. */
+#define NETIO_XMIT_CSUM 0x00000100
+
+/** This registration may not call routines which transmit packets. */
+#define NETIO_NO_XMIT 0x00000200
+
+/** This registration wants NetIO buffers mapped at an application-specified
+ virtual address.
+
+ NetIO buffers are by default created by the TMC common memory facility,
+ which must be configured by a common ancestor of all processes sharing
+ a network interface. When this flag is specified, NetIO buffers are
+ instead mapped at an address chosen by the application (and specified
+ in @ref netio_input_config_t::fixed_buffer_va). This allows multiple
+ unrelated but cooperating processes to share a NetIO interface.
+ All processes sharing the same interface must specify this flag,
+ and all must specify the same fixed virtual address.
+
+ @ref netio_input_config_t::fixed_buffer_va must be a
+ multiple of 16 MB, and the packet buffers will occupy @ref
+ netio_input_config_t::total_buffer_size bytes of virtual address
+ space, beginning at that address. If any of those virtual addresses
+ are currently occupied by other memory objects, like application or
+ shared library code or data, @ref netio_input_register() will return
+ ::NETIO_FAULT. While it is impossible to provide a fixed_buffer_va
+ which will work for all applications, a good first guess might be to
+ use 0xb0000000 minus @ref netio_input_config_t::total_buffer_size.
+ If that fails, it might be helpful to consult the running application's
+ virtual address description file (/proc/<em>pid</em>/maps) to see
+ which regions of virtual address space are available.
+ */
+#define NETIO_FIXED_BUFFER_VA 0x00000400
+
+/** This registration call will not complete unless the network link
+ is up. The process will wait several seconds for this to happen (the
+ precise interval is link-dependent), but if the link does not come up,
+ ::NETIO_LINK_DOWN will be returned. This flag is the default if
+ ::NETIO_NOREQUIRE_LINK_UP is not specified. Note that this flag by
+ itself does not request that the link be brought up; that can be done
+ with the ::NETIO_AUTO_LINK_UPDN or ::NETIO_AUTO_LINK_UP flags (the
+ latter is the default if no NETIO_AUTO_LINK_xxx flags are specified),
+ or by explicitly setting the link's desired state via netio_set().
+ If the link is not brought up by one of those methods, and this flag
+ is specified, the registration operation will return ::NETIO_LINK_DOWN.
+ This flag is ignored if it is specified along with ::NETIO_NO_XMIT and
+ ::NETIO_NO_RECV. See @ref link for more information on link
+ management.
+ */
+#define NETIO_REQUIRE_LINK_UP 0x00000800
+
+/** This registration call will complete even if the network link is not up.
+ Whenever the link is not up, packets will not be sent or received:
+ netio_get_packet() will return ::NETIO_NOPKT once all queued packets
+ have been drained, and netio_send_packet() and similar routines will
+ return NETIO_QUEUE_FULL once the outgoing packet queue in the EPP
+ or the I/O shim is full. See @ref link for more information on link
+ management.
+ */
+#define NETIO_NOREQUIRE_LINK_UP 0x00001000
+
+#ifndef __DOXYGEN__
+/*
+ * These are part of the implementation of the NETIO_AUTO_LINK_xxx flags,
+ * but should not be used directly by applications, and are thus not
+ * documented.
+ */
+#define _NETIO_AUTO_UP 0x00002000
+#define _NETIO_AUTO_DN 0x00004000
+#define _NETIO_AUTO_PRESENT 0x00008000
+#endif
+
+/** Set the desired state of the link to up, allowing any speeds which are
+ supported by the link hardware, as part of this registration operation.
+ Do not take down the link automatically. This is the default if
+ no other NETIO_AUTO_LINK_xxx flags are specified. This flag is ignored
+ if it is specified along with ::NETIO_NO_XMIT and ::NETIO_NO_RECV.
+ See @ref link for more information on link management.
+ */
+#define NETIO_AUTO_LINK_UP (_NETIO_AUTO_PRESENT | _NETIO_AUTO_UP)
+
+/** Set the desired state of the link to up, allowing any speeds which are
+ supported by the link hardware, as part of this registration operation.
+ Set the desired state of the link to down the next time no tiles are
+ registered for packet reception or transmission. This flag is ignored
+ if it is specified along with ::NETIO_NO_XMIT and ::NETIO_NO_RECV.
+ See @ref link for more information on link management.
+ */
+#define NETIO_AUTO_LINK_UPDN (_NETIO_AUTO_PRESENT | _NETIO_AUTO_UP | \
+ _NETIO_AUTO_DN)
+
+/** Set the desired state of the link to down the next time no tiles are
+ registered for packet reception or transmission. This flag is ignored
+ if it is specified along with ::NETIO_NO_XMIT and ::NETIO_NO_RECV.
+ See @ref link for more information on link management.
+ */
+#define NETIO_AUTO_LINK_DN (_NETIO_AUTO_PRESENT | _NETIO_AUTO_DN)
+
+/** Do not bring up the link automatically as part of this registration
+ operation. Do not take down the link automatically. This flag
+ is ignored if it is specified along with ::NETIO_NO_XMIT and
+ ::NETIO_NO_RECV. See @ref link for more information on link management.
+ */
+#define NETIO_AUTO_LINK_NONE _NETIO_AUTO_PRESENT
+
+
+/** Minimum number of receive packets. */
+#define NETIO_MIN_RECEIVE_PKTS 16
+
+/** Lower bound on the maximum number of receive packets; may be higher
+ than this on some interfaces. */
+#define NETIO_MAX_RECEIVE_PKTS 128
+
+/** Maximum number of send buffers, per packet size. */
+#define NETIO_MAX_SEND_BUFFERS 16
+
+/** Number of EPP queue slots, and thus outstanding sends, per EPP. */
+#define NETIO_TOTAL_SENDS_OUTSTANDING 2015
+
+/** Minimum number of EPP queue slots, and thus outstanding sends, per
+ * transmitting tile. */
+#define NETIO_MIN_SENDS_OUTSTANDING 16
+
+
+/**@}*/
+
+#ifndef __DOXYGEN__
+
+/**
+ * An object for providing Ethernet packets to a process.
+ */
+struct __netio_queue_impl_t;
+
+/**
+ * An object for managing the user end of a NetIO queue.
+ */
+struct __netio_queue_user_impl_t;
+
+#endif /* !__DOXYGEN__ */
+
+
+/** A netio_queue_t describes a NetIO communications endpoint.
+ * @ingroup setup
+ */
+typedef struct
+{
+#ifdef __DOXYGEN__
+ uint8_t opaque[8]; /**< This is an opaque structure. */
+#else
+ struct __netio_queue_impl_t* __system_part; /**< The system part. */
+ struct __netio_queue_user_impl_t* __user_part; /**< The user part. */
+#ifdef _NETIO_PTHREAD
+ _netio_percpu_mutex_t lock; /**< Queue lock. */
+#endif
+#endif
+}
+netio_queue_t;
+
+
+/**
+ * @brief Packet send context.
+ *
+ * @ingroup egress
+ *
+ * Packet send context for use with netio_send_packet_prepare and _commit.
+ */
+typedef struct
+{
+#ifdef __DOXYGEN__
+ uint8_t opaque[44]; /**< This is an opaque structure. */
+#else
+ uint8_t flags; /**< Defined below */
+ uint8_t datalen; /**< Number of valid words pointed to by data. */
+ uint32_t request[9]; /**< Request to be sent to the EPP or shim. Note
+ that this is smaller than the 11-word maximum
+ request size, since some constant values are
+ not saved in the context. */
+ uint32_t *data; /**< Data to be sent to the EPP or shim via IDN. */
+#endif
+}
+netio_send_pkt_context_t;
+
+
+#ifndef __DOXYGEN__
+#define SEND_PKT_CTX_USE_EPP 1 /**< We're sending to an EPP. */
+#define SEND_PKT_CTX_SEND_CSUM 2 /**< Request includes a checksum. */
+#endif
+
+/**
+ * @brief Packet vector entry.
+ *
+ * @ingroup egress
+ *
+ * This data structure is used with netio_send_packet_vector() to send multiple
+ * packets with one NetIO call. The structure should be initialized by
+ * calling netio_pkt_vector_set(), rather than by setting the fields
+ * directly.
+ *
+ * This structure is guaranteed to be a power of two in size, no
+ * bigger than one L2 cache line, and to be aligned modulo its size.
+ */
+typedef struct
+#ifndef __DOXYGEN__
+__attribute__((aligned(8)))
+#endif
+{
+ /** Reserved for use by the user application. When initialized with
+ * the netio_set_pkt_vector_entry() function, this field is guaranteed
+ * to be visible to readers only after all other fields are already
+ * visible. This way it can be used as a valid flag or generation
+ * counter. */
+ uint8_t user_data;
+
+ /* Structure members below this point should not be accessed directly by
+ * applications, as they may change in the future. */
+
+ /** Low 8 bits of the packet address to send. The high bits are
+ * acquired from the 'handle' field. */
+ uint8_t buffer_address_low;
+
+ /** Number of bytes to transmit. */
+ uint16_t size;
+
+ /** The raw handle from a netio_pkt_t. If this is NETIO_PKT_HANDLE_NONE,
+ * this vector entry will be skipped and no packet will be transmitted. */
+ netio_pkt_handle_t handle;
+}
+netio_pkt_vector_entry_t;
+
+
+/**
+ * @brief Initialize fields in a packet vector entry.
+ *
+ * @ingroup egress
+ *
+ * @param[out] v Pointer to the vector entry to be initialized.
+ * @param[in] pkt Packet to be transmitted when the vector entry is passed to
+ * netio_send_packet_vector(). Note that the packet's attributes
+ * (e.g., its L2 offset and length) are captured at the time this
+ * routine is called; subsequent changes in those attributes will not
+ * be reflected in the packet which is actually transmitted.
+ * Changes in the packet's contents, however, will be so reflected.
+ * If this is NULL, no packet will be transmitted.
+ * @param[in] user_data User data to be set in the vector entry.
+ * This function guarantees that the "user_data" field will become
+ * visible to a reader only after all other fields have become visible.
+ * This allows a structure in a ring buffer to be written and read
+ * by a polling reader without any locks or other synchronization.
+ */
+static __inline void
+netio_pkt_vector_set(volatile netio_pkt_vector_entry_t* v, netio_pkt_t* pkt,
+ uint8_t user_data)
+{
+ if (pkt)
+ {
+ if (NETIO_PKT_IS_MINIMAL(pkt))
+ {
+ netio_pkt_minimal_metadata_t* mmd =
+ (netio_pkt_minimal_metadata_t*) &pkt->__metadata;
+ v->buffer_address_low = (uintptr_t) NETIO_PKT_L2_DATA_MM(mmd, pkt) & 0xFF;
+ v->size = NETIO_PKT_L2_LENGTH_MM(mmd, pkt);
+ }
+ else
+ {
+ netio_pkt_metadata_t* mda = &pkt->__metadata;
+ v->buffer_address_low = (uintptr_t) NETIO_PKT_L2_DATA_M(mda, pkt) & 0xFF;
+ v->size = NETIO_PKT_L2_LENGTH_M(mda, pkt);
+ }
+ v->handle.word = pkt->__packet.word;
+ }
+ else
+ {
+ v->handle.word = 0; /* Set handle to NETIO_PKT_HANDLE_NONE. */
+ }
+
+ __asm__("" : : : "memory");
+
+ v->user_data = user_data;
+}
+
+
+/**
+ * Flags and structures for @ref netio_get() and @ref netio_set().
+ * @ingroup config
+ */
+
+/** @{ */
+/** Parameter class; addr is a NETIO_PARAM_xxx value. */
+#define NETIO_PARAM 0
+/** Interface MAC address. This address is only valid with @ref netio_get().
+ * The value is a 6-byte MAC address. Depending upon the overall system
+ * design, a MAC address may or may not be available for each interface. */
+#define NETIO_PARAM_MAC 0
+
+/** Determine whether to suspend output on the receipt of pause frames.
+ * If the value is nonzero, the I/O shim will suspend output when a pause
+ * frame is received. If the value is zero, pause frames will be ignored. */
+#define NETIO_PARAM_PAUSE_IN 1
+
+/** Determine whether to send pause frames if the I/O shim packet FIFOs are
+ * nearly full. If the value is zero, pause frames are not sent. If
+ * the value is nonzero, it is the delay value which will be sent in any
+ * pause frames which are output, in units of 512 bit times. */
+#define NETIO_PARAM_PAUSE_OUT 2
+
+/** Jumbo frame support. The value is a 4-byte integer. If the value is
+ * nonzero, the MAC will accept frames of up to 10240 bytes. If the value
+ * is zero, the MAC will only accept frames of up to 1544 bytes. */
+#define NETIO_PARAM_JUMBO 3
+
+/** I/O shim's overflow statistics register. The value is two 16-bit integers.
+ * The first 16-bit value (or the low 16 bits, if the value is treated as a
+ * 32-bit number) is the count of packets which were completely dropped and
+ * not delivered by the shim. The second 16-bit value (or the high 16 bits,
+ * if the value is treated as a 32-bit number) is the count of packets
+ * which were truncated and thus only partially delivered by the shim. This
+ * register is automatically reset to zero after it has been read.
+ */
+#define NETIO_PARAM_OVERFLOW 4
+
+/** IPP statistics. This address is only valid with @ref netio_get(). The
+ * value is a netio_stat_t structure. Unlike the I/O shim statistics, the
+ * IPP statistics are not all reset to zero on read; see the description
+ * of the netio_stat_t for details. */
+#define NETIO_PARAM_STAT 5
+
+/** Possible link state. The value is a combination of "NETIO_LINK_xxx"
+ * flags. With @ref netio_get(), this will indicate which flags are
+ * actually supported by the hardware.
+ *
+ * For historical reasons, specifying this value to netio_set() will have
+ * the same behavior as using ::NETIO_PARAM_LINK_CONFIG, but this usage is
+ * discouraged.
+ */
+#define NETIO_PARAM_LINK_POSSIBLE_STATE 6
+
+/** Link configuration. The value is a combination of "NETIO_LINK_xxx" flags.
+ * With @ref netio_set(), this will attempt to immediately bring up the
+ * link using whichever of the requested flags are supported by the
+ * hardware, or take down the link if the flags are zero; if this is
+ * not possible, an error will be returned. Many programs will want
+ * to use ::NETIO_PARAM_LINK_DESIRED_STATE instead.
+ *
+ * For historical reasons, specifying this value to netio_get() will
+ * have the same behavior as using ::NETIO_PARAM_LINK_POSSIBLE_STATE,
+ * but this usage is discouraged.
+ */
+#define NETIO_PARAM_LINK_CONFIG NETIO_PARAM_LINK_POSSIBLE_STATE
+
+/** Current link state. This address is only valid with @ref netio_get().
+ * The value is zero or more of the "NETIO_LINK_xxx" flags, ORed together.
+ * If the link is down, the value ANDed with NETIO_LINK_SPEED will be
+ * zero; if the link is up, the value ANDed with NETIO_LINK_SPEED will
+ * result in exactly one of the NETIO_LINK_xxx values, indicating the
+ * current speed. */
+#define NETIO_PARAM_LINK_CURRENT_STATE 7
+
+/** Variant symbol for current state, retained for compatibility with
+ * pre-MDE-2.1 programs. */
+#define NETIO_PARAM_LINK_STATUS NETIO_PARAM_LINK_CURRENT_STATE
+
+/** Packet Coherence protocol. This address is only valid with @ref netio_get().
+ * The value is nonzero if the interface is configured for cache-coherent DMA.
+ */
+#define NETIO_PARAM_COHERENT 8
+
+/** Desired link state. The value is a conbination of "NETIO_LINK_xxx"
+ * flags, which specify the desired state for the link. With @ref
+ * netio_set(), this will, in the background, attempt to bring up the link
+ * using whichever of the requested flags are reasonable, or take down the
+ * link if the flags are zero. The actual link up or down operation may
+ * happen after this call completes. If the link state changes in the
+ * future, the system will continue to try to get back to the desired link
+ * state; for instance, if the link is brought up successfully, and then
+ * the network cable is disconnected, the link will go down. However, the
+ * desired state of the link is still up, so if the cable is reconnected,
+ * the link will be brought up again.
+ *
+ * With @ref netio_get(), this will indicate the desired state for the
+ * link, as set with a previous netio_set() call, or implicitly by a
+ * netio_input_register() or netio_input_unregister() operation. This may
+ * not reflect the current state of the link; to get that, use
+ * ::NETIO_PARAM_LINK_CURRENT_STATE. */
+#define NETIO_PARAM_LINK_DESIRED_STATE 9
+
+/** NetIO statistics structure. Retrieved using the ::NETIO_PARAM_STAT
+ * address passed to @ref netio_get(). */
+typedef struct
+{
+ /** Number of packets which have been received by the IPP and forwarded
+ * to a tile's receive queue for processing. This value wraps at its
+ * maximum, and is not cleared upon read. */
+ uint32_t packets_received;
+
+ /** Number of packets which have been dropped by the IPP, because they could
+ * not be received, or could not be forwarded to a tile. The former happens
+ * when the IPP does not have a free packet buffer of suitable size for an
+ * incoming frame. The latter happens when all potential destination tiles
+ * for a packet, as defined by the group, bucket, and queue configuration,
+ * have full receive queues. This value wraps at its maximum, and is not
+ * cleared upon read. */
+ uint32_t packets_dropped;
+
+ /*
+ * Note: the #defines after each of the following four one-byte values
+ * denote their location within the third word of the netio_stat_t. They
+ * are intended for use only by the IPP implementation and are thus omitted
+ * from the Doxygen output.
+ */
+
+ /** Number of packets dropped because no worker was able to accept a new
+ * packet. This value saturates at its maximum, and is cleared upon
+ * read. */
+ uint8_t drops_no_worker;
+#ifndef __DOXYGEN__
+#define NETIO_STAT_DROPS_NO_WORKER 0
+#endif
+
+ /** Number of packets dropped because no small buffers were available.
+ * This value saturates at its maximum, and is cleared upon read. */
+ uint8_t drops_no_smallbuf;
+#ifndef __DOXYGEN__
+#define NETIO_STAT_DROPS_NO_SMALLBUF 1
+#endif
+
+ /** Number of packets dropped because no large buffers were available.
+ * This value saturates at its maximum, and is cleared upon read. */
+ uint8_t drops_no_largebuf;
+#ifndef __DOXYGEN__
+#define NETIO_STAT_DROPS_NO_LARGEBUF 2
+#endif
+
+ /** Number of packets dropped because no jumbo buffers were available.
+ * This value saturates at its maximum, and is cleared upon read. */
+ uint8_t drops_no_jumbobuf;
+#ifndef __DOXYGEN__
+#define NETIO_STAT_DROPS_NO_JUMBOBUF 3
+#endif
+}
+netio_stat_t;
+
+
+/** Link can run, should run, or is running at 10 Mbps. */
+#define NETIO_LINK_10M 0x01
+
+/** Link can run, should run, or is running at 100 Mbps. */
+#define NETIO_LINK_100M 0x02
+
+/** Link can run, should run, or is running at 1 Gbps. */
+#define NETIO_LINK_1G 0x04
+
+/** Link can run, should run, or is running at 10 Gbps. */
+#define NETIO_LINK_10G 0x08
+
+/** Link should run at the highest speed supported by the link and by
+ * the device connected to the link. Only usable as a value for
+ * the link's desired state; never returned as a value for the current
+ * or possible states. */
+#define NETIO_LINK_ANYSPEED 0x10
+
+/** All legal link speeds. */
+#define NETIO_LINK_SPEED (NETIO_LINK_10M | \
+ NETIO_LINK_100M | \
+ NETIO_LINK_1G | \
+ NETIO_LINK_10G | \
+ NETIO_LINK_ANYSPEED)
+
+
+/** MAC register class. Addr is a register offset within the MAC.
+ * Registers within the XGbE and GbE MACs are documented in the Tile
+ * Processor I/O Device Guide (UG104). MAC registers start at address
+ * 0x4000, and do not include the MAC_INTERFACE registers. */
+#define NETIO_MAC 1
+
+/** MDIO register class (IEEE 802.3 clause 22 format). Addr is the "addr"
+ * member of a netio_mdio_addr_t structure. */
+#define NETIO_MDIO 2
+
+/** MDIO register class (IEEE 802.3 clause 45 format). Addr is the "addr"
+ * member of a netio_mdio_addr_t structure. */
+#define NETIO_MDIO_CLAUSE45 3
+
+/** NetIO MDIO address type. Retrieved or provided using the ::NETIO_MDIO
+ * address passed to @ref netio_get() or @ref netio_set(). */
+typedef union
+{
+ struct
+ {
+ unsigned int reg:16; /**< MDIO register offset. For clause 22 access,
+ must be less than 32. */
+ unsigned int phy:5; /**< Which MDIO PHY to access. */
+ unsigned int dev:5; /**< Which MDIO device to access within that PHY.
+ Applicable for clause 45 access only; ignored
+ for clause 22 access. */
+ }
+ bits; /**< Container for bitfields. */
+ uint64_t addr; /**< Value to pass to @ref netio_get() or
+ * @ref netio_set(). */
+}
+netio_mdio_addr_t;
+
+/** @} */
+
+#endif /* __NETIO_INTF_H__ */
diff --git a/arch/tile/kernel/Makefile b/arch/tile/kernel/Makefile
index 112b1e248f05..b4c8e8ec45dc 100644
--- a/arch/tile/kernel/Makefile
+++ b/arch/tile/kernel/Makefile
@@ -15,3 +15,4 @@ obj-$(CONFIG_SMP) += smpboot.o smp.o tlb.o
obj-$(CONFIG_MODULES) += module.o
obj-$(CONFIG_EARLY_PRINTK) += early_printk.o
obj-$(CONFIG_KEXEC) += machine_kexec.o relocate_kernel.o
+obj-$(CONFIG_PCI) += pci.o
diff --git a/arch/tile/kernel/backtrace.c b/arch/tile/kernel/backtrace.c
index d3c41c1ff6bd..55a6a74974b4 100644
--- a/arch/tile/kernel/backtrace.c
+++ b/arch/tile/kernel/backtrace.c
@@ -369,6 +369,10 @@ static void find_caller_pc_and_caller_sp(CallerLocation *location,
/* Weird; reserved value, ignore it. */
continue;
}
+ if (info_operand & ENTRY_POINT_INFO_OP) {
+ /* This info op is ignored by the backtracer. */
+ continue;
+ }
/* Skip info ops which are not in the
* "one_ago" mode we want right now.
diff --git a/arch/tile/kernel/compat.c b/arch/tile/kernel/compat.c
index b1e06d041555..dbc213adf5e1 100644
--- a/arch/tile/kernel/compat.c
+++ b/arch/tile/kernel/compat.c
@@ -21,7 +21,6 @@
#include <linux/kdev_t.h>
#include <linux/fs.h>
#include <linux/fcntl.h>
-#include <linux/smp_lock.h>
#include <linux/uaccess.h>
#include <linux/signal.h>
#include <asm/syscalls.h>
@@ -148,14 +147,20 @@ long tile_compat_sys_msgrcv(int msqid,
#define compat_sys_readahead sys32_readahead
#define compat_sys_sync_file_range compat_sys_sync_file_range2
-/* The native 64-bit "struct stat" matches the 32-bit "struct stat64". */
-#define compat_sys_stat64 sys_newstat
-#define compat_sys_lstat64 sys_newlstat
-#define compat_sys_fstat64 sys_newfstat
-#define compat_sys_fstatat64 sys_newfstatat
+/* We leverage the "struct stat64" type for 32-bit time_t/nsec. */
+#define compat_sys_stat64 sys_stat64
+#define compat_sys_lstat64 sys_lstat64
+#define compat_sys_fstat64 sys_fstat64
+#define compat_sys_fstatat64 sys_fstatat64
-/* Pass full 64-bit values through ptrace. */
-#define compat_sys_ptrace tile_compat_sys_ptrace
+/* The native sys_ptrace dynamically handles compat binaries. */
+#define compat_sys_ptrace sys_ptrace
+
+/* Call the trampolines to manage pt_regs where necessary. */
+#define compat_sys_execve _compat_sys_execve
+#define compat_sys_sigaltstack _compat_sys_sigaltstack
+#define compat_sys_rt_sigreturn _compat_sys_rt_sigreturn
+#define sys_clone _sys_clone
/*
* Note that we can't include <linux/unistd.h> here since the header
diff --git a/arch/tile/kernel/compat_signal.c b/arch/tile/kernel/compat_signal.c
index 9c710db43f13..543d6a33aa26 100644
--- a/arch/tile/kernel/compat_signal.c
+++ b/arch/tile/kernel/compat_signal.c
@@ -15,7 +15,6 @@
#include <linux/sched.h>
#include <linux/mm.h>
#include <linux/smp.h>
-#include <linux/smp_lock.h>
#include <linux/kernel.h>
#include <linux/signal.h>
#include <linux/errno.h>
@@ -256,9 +255,9 @@ int copy_siginfo_from_user32(siginfo_t *to, struct compat_siginfo __user *from)
return err;
}
-long _compat_sys_sigaltstack(const struct compat_sigaltstack __user *uss_ptr,
- struct compat_sigaltstack __user *uoss_ptr,
- struct pt_regs *regs)
+long compat_sys_sigaltstack(const struct compat_sigaltstack __user *uss_ptr,
+ struct compat_sigaltstack __user *uoss_ptr,
+ struct pt_regs *regs)
{
stack_t uss, uoss;
int ret;
@@ -291,7 +290,7 @@ long _compat_sys_sigaltstack(const struct compat_sigaltstack __user *uss_ptr,
return ret;
}
-long _compat_sys_rt_sigreturn(struct pt_regs *regs)
+long compat_sys_rt_sigreturn(struct pt_regs *regs)
{
struct compat_rt_sigframe __user *frame =
(struct compat_rt_sigframe __user *) compat_ptr(regs->sp);
@@ -312,7 +311,7 @@ long _compat_sys_rt_sigreturn(struct pt_regs *regs)
if (restore_sigcontext(regs, &frame->uc.uc_mcontext, &r0))
goto badframe;
- if (_compat_sys_sigaltstack(&frame->uc.uc_stack, NULL, regs) != 0)
+ if (compat_sys_sigaltstack(&frame->uc.uc_stack, NULL, regs) != 0)
goto badframe;
return r0;
diff --git a/arch/tile/kernel/early_printk.c b/arch/tile/kernel/early_printk.c
index 2c54fd43a8a0..493a0e66d916 100644
--- a/arch/tile/kernel/early_printk.c
+++ b/arch/tile/kernel/early_printk.c
@@ -54,7 +54,7 @@ void early_printk(const char *fmt, ...)
void early_panic(const char *fmt, ...)
{
va_list ap;
- raw_local_irq_disable_all();
+ arch_local_irq_disable_all();
va_start(ap, fmt);
early_printk("Kernel panic - not syncing: ");
early_vprintk(fmt, ap);
diff --git a/arch/tile/kernel/entry.S b/arch/tile/kernel/entry.S
index 3d01383b1b0e..fd8dc42abdcb 100644
--- a/arch/tile/kernel/entry.S
+++ b/arch/tile/kernel/entry.S
@@ -15,7 +15,9 @@
#include <linux/linkage.h>
#include <linux/unistd.h>
#include <asm/irqflags.h>
+#include <asm/processor.h>
#include <arch/abi.h>
+#include <arch/spr_def.h>
#ifdef __tilegx__
#define bnzt bnezt
@@ -25,28 +27,6 @@ STD_ENTRY(current_text_addr)
{ move r0, lr; jrp lr }
STD_ENDPROC(current_text_addr)
-STD_ENTRY(_sim_syscall)
- /*
- * Wait for r0-r9 to be ready (and lr on the off chance we
- * want the syscall to locate its caller), then make a magic
- * simulator syscall.
- *
- * We carefully stall until the registers are readable in case they
- * are the target of a slow load, etc. so that tile-sim will
- * definitely be able to read all of them inside the magic syscall.
- *
- * Technically this is wrong for r3-r9 and lr, since an interrupt
- * could come in and restore the registers with a slow load right
- * before executing the mtspr. We may need to modify tile-sim to
- * explicitly stall for this case, but we do not yet have
- * a way to implement such a stall.
- */
- { and zero, lr, r9 ; and zero, r8, r7 }
- { and zero, r6, r5 ; and zero, r4, r3 }
- { and zero, r2, r1 ; mtspr SIM_CONTROL, r0 }
- { jrp lr }
- STD_ENDPROC(_sim_syscall)
-
/*
* Implement execve(). The i386 code has a note that forking from kernel
* space results in no copy on write until the execve, so we should be
@@ -102,7 +82,7 @@ STD_ENTRY(KBacktraceIterator_init_current)
STD_ENTRY(cpu_idle_on_new_stack)
{
move sp, r1
- mtspr SYSTEM_SAVE_1_0, r2
+ mtspr SPR_SYSTEM_SAVE_K_0, r2
}
jal free_thread_info
j cpu_idle
@@ -124,15 +104,15 @@ STD_ENTRY(smp_nap)
STD_ENTRY(_cpu_idle)
{
lnk r0
- movei r1, 1
+ movei r1, KERNEL_PL
}
{
addli r0, r0, _cpu_idle_nap - .
mtspr INTERRUPT_CRITICAL_SECTION, r1
}
- IRQ_ENABLE(r2, r3) /* unmask, but still with ICS set */
- mtspr EX_CONTEXT_1_1, r1 /* PL1, ICS clear */
- mtspr EX_CONTEXT_1_0, r0
+ IRQ_ENABLE(r2, r3) /* unmask, but still with ICS set */
+ mtspr SPR_EX_CONTEXT_K_1, r1 /* Kernel PL, ICS clear */
+ mtspr SPR_EX_CONTEXT_K_0, r0
iret
.global _cpu_idle_nap
_cpu_idle_nap:
diff --git a/arch/tile/kernel/hardwall.c b/arch/tile/kernel/hardwall.c
index 1e54a7843410..e910530436e6 100644
--- a/arch/tile/kernel/hardwall.c
+++ b/arch/tile/kernel/hardwall.c
@@ -151,12 +151,12 @@ enum direction_protect {
static void enable_firewall_interrupts(void)
{
- raw_local_irq_unmask_now(INT_UDN_FIREWALL);
+ arch_local_irq_unmask_now(INT_UDN_FIREWALL);
}
static void disable_firewall_interrupts(void)
{
- raw_local_irq_mask_now(INT_UDN_FIREWALL);
+ arch_local_irq_mask_now(INT_UDN_FIREWALL);
}
/* Set up hardwall on this cpu based on the passed hardwall_info. */
@@ -768,13 +768,13 @@ static int hardwall_release(struct inode *inode, struct file *file)
}
static const struct file_operations dev_hardwall_fops = {
+ .open = nonseekable_open,
.unlocked_ioctl = hardwall_ioctl,
#ifdef CONFIG_COMPAT
.compat_ioctl = hardwall_compat_ioctl,
#endif
.flush = hardwall_flush,
.release = hardwall_release,
- .llseek = noop_llseek,
};
static struct cdev hardwall_dev;
diff --git a/arch/tile/kernel/head_32.S b/arch/tile/kernel/head_32.S
index 2b4f6c091701..90e7c4435693 100644
--- a/arch/tile/kernel/head_32.S
+++ b/arch/tile/kernel/head_32.S
@@ -23,6 +23,7 @@
#include <asm/asm-offsets.h>
#include <hv/hypervisor.h>
#include <arch/chip.h>
+#include <arch/spr_def.h>
/*
* This module contains the entry code for kernel images. It performs the
@@ -76,7 +77,7 @@ ENTRY(_start)
}
1:
- /* Get our processor number and save it away in SAVE_1_0. */
+ /* Get our processor number and save it away in SAVE_K_0. */
jal hv_inquire_topology
mulll_uu r4, r1, r2 /* r1 == y, r2 == width */
add r4, r4, r0 /* r0 == x, so r4 == cpu == y*width + x */
@@ -124,7 +125,7 @@ ENTRY(_start)
lw r0, r0
lw sp, r1
or r4, sp, r4
- mtspr SYSTEM_SAVE_1_0, r4 /* save ksp0 + cpu */
+ mtspr SPR_SYSTEM_SAVE_K_0, r4 /* save ksp0 + cpu */
addi sp, sp, -STACK_TOP_DELTA
{
move lr, zero /* stop backtraces in the called function */
diff --git a/arch/tile/kernel/intvec_32.S b/arch/tile/kernel/intvec_32.S
index 8f58bdff20d7..f5821626247f 100644
--- a/arch/tile/kernel/intvec_32.S
+++ b/arch/tile/kernel/intvec_32.S
@@ -32,8 +32,8 @@
# error "No support for kernel preemption currently"
#endif
-#if INT_INTCTRL_1 < 32 || INT_INTCTRL_1 >= 48
-# error INT_INTCTRL_1 coded to set high interrupt mask
+#if INT_INTCTRL_K < 32 || INT_INTCTRL_K >= 48
+# error INT_INTCTRL_K coded to set high interrupt mask
#endif
#define PTREGS_PTR(reg, ptreg) addli reg, sp, C_ABI_SAVE_AREA_SIZE + (ptreg)
@@ -132,8 +132,8 @@ intvec_\vecname:
/* Temporarily save a register so we have somewhere to work. */
- mtspr SYSTEM_SAVE_1_1, r0
- mfspr r0, EX_CONTEXT_1_1
+ mtspr SPR_SYSTEM_SAVE_K_1, r0
+ mfspr r0, SPR_EX_CONTEXT_K_1
/* The cmpxchg code clears sp to force us to reset it here on fault. */
{
@@ -167,18 +167,18 @@ intvec_\vecname:
* The page_fault handler may be downcalled directly by the
* hypervisor even when Linux is running and has ICS set.
*
- * In this case the contents of EX_CONTEXT_1_1 reflect the
+ * In this case the contents of EX_CONTEXT_K_1 reflect the
* previous fault and can't be relied on to choose whether or
* not to reinitialize the stack pointer. So we add a test
- * to see whether SYSTEM_SAVE_1_2 has the high bit set,
+ * to see whether SYSTEM_SAVE_K_2 has the high bit set,
* and if so we don't reinitialize sp, since we must be coming
* from Linux. (In fact the precise case is !(val & ~1),
* but any Linux PC has to have the high bit set.)
*
- * Note that the hypervisor *always* sets SYSTEM_SAVE_1_2 for
+ * Note that the hypervisor *always* sets SYSTEM_SAVE_K_2 for
* any path that turns into a downcall to one of our TLB handlers.
*/
- mfspr r0, SYSTEM_SAVE_1_2
+ mfspr r0, SPR_SYSTEM_SAVE_K_2
{
blz r0, 0f /* high bit in S_S_1_2 is for a PC to use */
move r0, sp
@@ -187,12 +187,12 @@ intvec_\vecname:
2:
/*
- * SYSTEM_SAVE_1_0 holds the cpu number in the low bits, and
+ * SYSTEM_SAVE_K_0 holds the cpu number in the low bits, and
* the current stack top in the higher bits. So we recover
* our stack top by just masking off the low bits, then
* point sp at the top aligned address on the actual stack page.
*/
- mfspr r0, SYSTEM_SAVE_1_0
+ mfspr r0, SPR_SYSTEM_SAVE_K_0
mm r0, r0, zero, LOG2_THREAD_SIZE, 31
0:
@@ -254,7 +254,7 @@ intvec_\vecname:
sw sp, r3
addli sp, sp, PTREGS_OFFSET_PC - PTREGS_OFFSET_REG(3)
}
- mfspr r0, EX_CONTEXT_1_0
+ mfspr r0, SPR_EX_CONTEXT_K_0
.ifc \processing,handle_syscall
/*
* Bump the saved PC by one bundle so that when we return, we won't
@@ -267,7 +267,7 @@ intvec_\vecname:
sw sp, r0
addli sp, sp, PTREGS_OFFSET_EX1 - PTREGS_OFFSET_PC
}
- mfspr r0, EX_CONTEXT_1_1
+ mfspr r0, SPR_EX_CONTEXT_K_1
{
sw sp, r0
addi sp, sp, PTREGS_OFFSET_FAULTNUM - PTREGS_OFFSET_EX1
@@ -289,7 +289,7 @@ intvec_\vecname:
.endif
addli sp, sp, PTREGS_OFFSET_REG(0) - PTREGS_OFFSET_FAULTNUM
}
- mfspr r0, SYSTEM_SAVE_1_1 /* Original r0 */
+ mfspr r0, SPR_SYSTEM_SAVE_K_1 /* Original r0 */
{
sw sp, r0
addi sp, sp, -PTREGS_OFFSET_REG(0) - 4
@@ -309,12 +309,12 @@ intvec_\vecname:
* See discussion below at "finish_interrupt_save".
*/
.ifc \c_routine, do_page_fault
- mfspr r2, SYSTEM_SAVE_1_3 /* address of page fault */
- mfspr r3, SYSTEM_SAVE_1_2 /* info about page fault */
+ mfspr r2, SPR_SYSTEM_SAVE_K_3 /* address of page fault */
+ mfspr r3, SPR_SYSTEM_SAVE_K_2 /* info about page fault */
.else
.ifc \vecnum, INT_DOUBLE_FAULT
{
- mfspr r2, SYSTEM_SAVE_1_2 /* double fault info from HV */
+ mfspr r2, SPR_SYSTEM_SAVE_K_2 /* double fault info from HV */
movei r3, 0
}
.else
@@ -467,7 +467,7 @@ intvec_\vecname:
/* Load tp with our per-cpu offset. */
#ifdef CONFIG_SMP
{
- mfspr r20, SYSTEM_SAVE_1_0
+ mfspr r20, SPR_SYSTEM_SAVE_K_0
moveli r21, lo16(__per_cpu_offset)
}
{
@@ -487,7 +487,7 @@ intvec_\vecname:
* We load flags in r32 here so we can jump to .Lrestore_regs
* directly after do_page_fault_ics() if necessary.
*/
- mfspr r32, EX_CONTEXT_1_1
+ mfspr r32, SPR_EX_CONTEXT_K_1
{
andi r32, r32, SPR_EX_CONTEXT_1_1__PL_MASK /* mask off ICS */
PTREGS_PTR(r21, PTREGS_OFFSET_FLAGS)
@@ -957,11 +957,11 @@ STD_ENTRY(interrupt_return)
pop_reg_zero r21, r3, sp, PTREGS_OFFSET_EX1 - PTREGS_OFFSET_PC
pop_reg_zero lr, r4, sp, PTREGS_OFFSET_REG(52) - PTREGS_OFFSET_EX1
{
- mtspr EX_CONTEXT_1_0, r21
+ mtspr SPR_EX_CONTEXT_K_0, r21
move r5, zero
}
{
- mtspr EX_CONTEXT_1_1, lr
+ mtspr SPR_EX_CONTEXT_K_1, lr
andi lr, lr, SPR_EX_CONTEXT_1_1__PL_MASK /* mask off ICS */
}
@@ -1020,7 +1020,7 @@ STD_ENTRY(interrupt_return)
/* Set r1 to errno if we are returning an error, otherwise zero. */
{
- moveli r29, 1024
+ moveli r29, 4096
sub r1, zero, r0
}
slt_u r29, r1, r29
@@ -1199,7 +1199,7 @@ STD_ENTRY(interrupt_return)
STD_ENDPROC(interrupt_return)
/*
- * This interrupt variant clears the INT_INTCTRL_1 interrupt mask bit
+ * This interrupt variant clears the INT_INTCTRL_K interrupt mask bit
* before returning, so we can properly get more downcalls.
*/
.pushsection .text.handle_interrupt_downcall,"ax"
@@ -1208,11 +1208,11 @@ handle_interrupt_downcall:
check_single_stepping normal, .Ldispatch_downcall
.Ldispatch_downcall:
- /* Clear INTCTRL_1 from the set of interrupts we ever enable. */
+ /* Clear INTCTRL_K from the set of interrupts we ever enable. */
GET_INTERRUPTS_ENABLED_MASK_PTR(r30)
{
addi r30, r30, 4
- movei r31, INT_MASK(INT_INTCTRL_1)
+ movei r31, INT_MASK(INT_INTCTRL_K)
}
{
lw r20, r30
@@ -1227,7 +1227,7 @@ handle_interrupt_downcall:
}
FEEDBACK_REENTER(handle_interrupt_downcall)
- /* Allow INTCTRL_1 to be enabled next time we enable interrupts. */
+ /* Allow INTCTRL_K to be enabled next time we enable interrupts. */
lw r20, r30
or r20, r20, r31
sw r30, r20
@@ -1472,7 +1472,12 @@ handle_ill:
lw r26, r24
sw r28, r26
- /* Clear TIF_SINGLESTEP */
+ /*
+ * Clear TIF_SINGLESTEP to prevent recursion if we execute an ill.
+ * The normal non-arch flow redundantly clears TIF_SINGLESTEP, but we
+ * need to clear it here and can't really impose on all other arches.
+ * So what's another write between friends?
+ */
GET_THREAD_INFO(r0)
addi r1, r0, THREAD_INFO_FLAGS_OFFSET
@@ -1509,7 +1514,7 @@ handle_ill:
/* Various stub interrupt handlers and syscall handlers */
STD_ENTRY_LOCAL(_kernel_double_fault)
- mfspr r1, EX_CONTEXT_1_0
+ mfspr r1, SPR_EX_CONTEXT_K_0
move r2, lr
move r3, sp
move r4, r52
@@ -1518,34 +1523,29 @@ STD_ENTRY_LOCAL(_kernel_double_fault)
STD_ENDPROC(_kernel_double_fault)
STD_ENTRY_LOCAL(bad_intr)
- mfspr r2, EX_CONTEXT_1_0
+ mfspr r2, SPR_EX_CONTEXT_K_0
panic "Unhandled interrupt %#x: PC %#lx"
STD_ENDPROC(bad_intr)
/* Put address of pt_regs in reg and jump. */
#define PTREGS_SYSCALL(x, reg) \
- STD_ENTRY(x); \
+ STD_ENTRY(_##x); \
{ \
PTREGS_PTR(reg, PTREGS_OFFSET_BASE); \
- j _##x \
+ j x \
}; \
- STD_ENDPROC(x)
+ STD_ENDPROC(_##x)
PTREGS_SYSCALL(sys_execve, r3)
PTREGS_SYSCALL(sys_sigaltstack, r2)
PTREGS_SYSCALL(sys_rt_sigreturn, r0)
+PTREGS_SYSCALL(sys_cmpxchg_badaddr, r1)
-/* Save additional callee-saves to pt_regs, put address in reg and jump. */
-#define PTREGS_SYSCALL_ALL_REGS(x, reg) \
- STD_ENTRY(x); \
- push_extra_callee_saves reg; \
- j _##x; \
- STD_ENDPROC(x)
-
-PTREGS_SYSCALL_ALL_REGS(sys_fork, r0)
-PTREGS_SYSCALL_ALL_REGS(sys_vfork, r0)
-PTREGS_SYSCALL_ALL_REGS(sys_clone, r4)
-PTREGS_SYSCALL_ALL_REGS(sys_cmpxchg_badaddr, r1)
+/* Save additional callee-saves to pt_regs, put address in r4 and jump. */
+STD_ENTRY(_sys_clone)
+ push_extra_callee_saves r4
+ j sys_clone
+ STD_ENDPROC(_sys_clone)
/*
* This entrypoint is taken for the cmpxchg and atomic_update fast
@@ -1558,12 +1558,14 @@ PTREGS_SYSCALL_ALL_REGS(sys_cmpxchg_badaddr, r1)
* to be available to it on entry. It does not modify any callee-save
* registers (including "lr"). It does not check what PL it is being
* called at, so you'd better not call it other than at PL0.
+ * The <atomic.h> wrapper assumes it only clobbers r20-r29, so if
+ * it ever is necessary to use more registers, be aware.
*
* It does not use the stack, but since it might be re-interrupted by
* a page fault which would assume the stack was valid, it does
* save/restore the stack pointer and zero it out to make sure it gets reset.
* Since we always keep interrupts disabled, the hypervisor won't
- * clobber our EX_CONTEXT_1_x registers, so we don't save/restore them
+ * clobber our EX_CONTEXT_K_x registers, so we don't save/restore them
* (other than to advance the PC on return).
*
* We have to manually validate the user vs kernel address range
@@ -1769,7 +1771,7 @@ ENTRY(sys_cmpxchg)
/* Do slow mtspr here so the following "mf" waits less. */
{
move sp, r27
- mtspr EX_CONTEXT_1_0, r28
+ mtspr SPR_EX_CONTEXT_K_0, r28
}
mf
@@ -1788,7 +1790,7 @@ ENTRY(sys_cmpxchg)
}
{
move sp, r27
- mtspr EX_CONTEXT_1_0, r28
+ mtspr SPR_EX_CONTEXT_K_0, r28
}
iret
@@ -1816,7 +1818,7 @@ ENTRY(sys_cmpxchg)
#endif
/* Issue the slow SPR here while the tns result is in flight. */
- mfspr r28, EX_CONTEXT_1_0
+ mfspr r28, SPR_EX_CONTEXT_K_0
{
addi r28, r28, 8 /* return to the instruction after the swint1 */
@@ -1904,7 +1906,7 @@ ENTRY(sys_cmpxchg)
.Lcmpxchg64_mismatch:
{
move sp, r27
- mtspr EX_CONTEXT_1_0, r28
+ mtspr SPR_EX_CONTEXT_K_0, r28
}
mf
{
@@ -1985,8 +1987,13 @@ int_unalign:
int_hand INT_PERF_COUNT, PERF_COUNT, \
op_handle_perf_interrupt, handle_nmi
int_hand INT_INTCTRL_3, INTCTRL_3, bad_intr
+#if CONFIG_KERNEL_PL == 2
+ dc_dispatch INT_INTCTRL_2, INTCTRL_2
+ int_hand INT_INTCTRL_1, INTCTRL_1, bad_intr
+#else
int_hand INT_INTCTRL_2, INTCTRL_2, bad_intr
dc_dispatch INT_INTCTRL_1, INTCTRL_1
+#endif
int_hand INT_INTCTRL_0, INTCTRL_0, bad_intr
int_hand INT_MESSAGE_RCV_DWNCL, MESSAGE_RCV_DWNCL, \
hv_message_intr, handle_interrupt_downcall
diff --git a/arch/tile/kernel/irq.c b/arch/tile/kernel/irq.c
index 9a27d563fc30..128805ef8f2c 100644
--- a/arch/tile/kernel/irq.c
+++ b/arch/tile/kernel/irq.c
@@ -26,7 +26,7 @@
#define IS_HW_CLEARED 1
/*
- * The set of interrupts we enable for raw_local_irq_enable().
+ * The set of interrupts we enable for arch_local_irq_enable().
* This is initialized to have just a single interrupt that the kernel
* doesn't actually use as a sentinel. During kernel init,
* interrupts are added as the kernel gets prepared to support them.
@@ -61,9 +61,9 @@ static DEFINE_SPINLOCK(available_irqs_lock);
#if CHIP_HAS_IPI()
/* Use SPRs to manipulate device interrupts. */
-#define mask_irqs(irq_mask) __insn_mtspr(SPR_IPI_MASK_SET_1, irq_mask)
-#define unmask_irqs(irq_mask) __insn_mtspr(SPR_IPI_MASK_RESET_1, irq_mask)
-#define clear_irqs(irq_mask) __insn_mtspr(SPR_IPI_EVENT_RESET_1, irq_mask)
+#define mask_irqs(irq_mask) __insn_mtspr(SPR_IPI_MASK_SET_K, irq_mask)
+#define unmask_irqs(irq_mask) __insn_mtspr(SPR_IPI_MASK_RESET_K, irq_mask)
+#define clear_irqs(irq_mask) __insn_mtspr(SPR_IPI_EVENT_RESET_K, irq_mask)
#else
/* Use HV to manipulate device interrupts. */
#define mask_irqs(irq_mask) hv_disable_intr(irq_mask)
@@ -89,16 +89,16 @@ void tile_dev_intr(struct pt_regs *regs, int intnum)
* masked by a previous interrupt. Then, mask out the ones
* we're going to handle.
*/
- unsigned long masked = __insn_mfspr(SPR_IPI_MASK_1);
- original_irqs = __insn_mfspr(SPR_IPI_EVENT_1) & ~masked;
- __insn_mtspr(SPR_IPI_MASK_SET_1, original_irqs);
+ unsigned long masked = __insn_mfspr(SPR_IPI_MASK_K);
+ original_irqs = __insn_mfspr(SPR_IPI_EVENT_K) & ~masked;
+ __insn_mtspr(SPR_IPI_MASK_SET_K, original_irqs);
#else
/*
* Hypervisor performs the equivalent of the Gx code above and
* then puts the pending interrupt mask into a system save reg
* for us to find.
*/
- original_irqs = __insn_mfspr(SPR_SYSTEM_SAVE_1_3);
+ original_irqs = __insn_mfspr(SPR_SYSTEM_SAVE_K_3);
#endif
remaining_irqs = original_irqs;
@@ -225,7 +225,7 @@ void __cpuinit setup_irq_regs(void)
/* Enable interrupt delivery. */
unmask_irqs(~0UL);
#if CHIP_HAS_IPI()
- raw_local_irq_unmask(INT_IPI_1);
+ arch_local_irq_unmask(INT_IPI_K);
#endif
}
diff --git a/arch/tile/kernel/machine_kexec.c b/arch/tile/kernel/machine_kexec.c
index ba7a265d6179..0d8b9e933487 100644
--- a/arch/tile/kernel/machine_kexec.c
+++ b/arch/tile/kernel/machine_kexec.c
@@ -182,13 +182,13 @@ static void kexec_find_and_set_command_line(struct kimage *image)
if ((entry & IND_SOURCE)) {
void *va =
- kmap_atomic_pfn(entry >> PAGE_SHIFT, KM_USER0);
+ kmap_atomic_pfn(entry >> PAGE_SHIFT);
r = kexec_bn2cl(va);
if (r) {
command_line = r;
break;
}
- kunmap_atomic(va, KM_USER0);
+ kunmap_atomic(va);
}
}
@@ -198,7 +198,7 @@ static void kexec_find_and_set_command_line(struct kimage *image)
hverr = hv_set_command_line(
(HV_VirtAddr) command_line, strlen(command_line));
- kunmap_atomic(command_line, KM_USER0);
+ kunmap_atomic(command_line);
} else {
pr_info("%s: no command line found; making empty\n",
__func__);
diff --git a/arch/tile/kernel/messaging.c b/arch/tile/kernel/messaging.c
index 6d23ed271d10..0858ee6b520f 100644
--- a/arch/tile/kernel/messaging.c
+++ b/arch/tile/kernel/messaging.c
@@ -34,7 +34,7 @@ void __cpuinit init_messaging(void)
panic("hv_register_message_state: error %d", rc);
/* Make sure downcall interrupts will be enabled. */
- raw_local_irq_unmask(INT_INTCTRL_1);
+ arch_local_irq_unmask(INT_INTCTRL_K);
}
void hv_message_intr(struct pt_regs *regs, int intnum)
diff --git a/arch/tile/kernel/pci.c b/arch/tile/kernel/pci.c
new file mode 100644
index 000000000000..a1ee25be9ad9
--- /dev/null
+++ b/arch/tile/kernel/pci.c
@@ -0,0 +1,621 @@
+/*
+ * Copyright 2010 Tilera Corporation. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation, version 2.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ * NON INFRINGEMENT. See the GNU General Public License for
+ * more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/pci.h>
+#include <linux/delay.h>
+#include <linux/string.h>
+#include <linux/init.h>
+#include <linux/capability.h>
+#include <linux/sched.h>
+#include <linux/errno.h>
+#include <linux/bootmem.h>
+#include <linux/irq.h>
+#include <linux/io.h>
+#include <linux/uaccess.h>
+
+#include <asm/processor.h>
+#include <asm/sections.h>
+#include <asm/byteorder.h>
+#include <asm/hv_driver.h>
+#include <hv/drv_pcie_rc_intf.h>
+
+
+/*
+ * Initialization flow and process
+ * -------------------------------
+ *
+ * This files containes the routines to search for PCI buses,
+ * enumerate the buses, and configure any attached devices.
+ *
+ * There are two entry points here:
+ * 1) tile_pci_init
+ * This sets up the pci_controller structs, and opens the
+ * FDs to the hypervisor. This is called from setup_arch() early
+ * in the boot process.
+ * 2) pcibios_init
+ * This probes the PCI bus(es) for any attached hardware. It's
+ * called by subsys_initcall. All of the real work is done by the
+ * generic Linux PCI layer.
+ *
+ */
+
+/*
+ * This flag tells if the platform is TILEmpower that needs
+ * special configuration for the PLX switch chip.
+ */
+int __write_once tile_plx_gen1;
+
+static struct pci_controller controllers[TILE_NUM_PCIE];
+static int num_controllers;
+
+static struct pci_ops tile_cfg_ops;
+
+
+/*
+ * We don't need to worry about the alignment of resources.
+ */
+resource_size_t pcibios_align_resource(void *data, const struct resource *res,
+ resource_size_t size, resource_size_t align)
+{
+ return res->start;
+}
+EXPORT_SYMBOL(pcibios_align_resource);
+
+/*
+ * Open a FD to the hypervisor PCI device.
+ *
+ * controller_id is the controller number, config type is 0 or 1 for
+ * config0 or config1 operations.
+ */
+static int __init tile_pcie_open(int controller_id, int config_type)
+{
+ char filename[32];
+ int fd;
+
+ sprintf(filename, "pcie/%d/config%d", controller_id, config_type);
+
+ fd = hv_dev_open((HV_VirtAddr)filename, 0);
+
+ return fd;
+}
+
+
+/*
+ * Get the IRQ numbers from the HV and set up the handlers for them.
+ */
+static int __init tile_init_irqs(int controller_id,
+ struct pci_controller *controller)
+{
+ char filename[32];
+ int fd;
+ int ret;
+ int x;
+ struct pcie_rc_config rc_config;
+
+ sprintf(filename, "pcie/%d/ctl", controller_id);
+ fd = hv_dev_open((HV_VirtAddr)filename, 0);
+ if (fd < 0) {
+ pr_err("PCI: hv_dev_open(%s) failed\n", filename);
+ return -1;
+ }
+ ret = hv_dev_pread(fd, 0, (HV_VirtAddr)(&rc_config),
+ sizeof(rc_config), PCIE_RC_CONFIG_MASK_OFF);
+ hv_dev_close(fd);
+ if (ret != sizeof(rc_config)) {
+ pr_err("PCI: wanted %zd bytes, got %d\n",
+ sizeof(rc_config), ret);
+ return -1;
+ }
+ /* Record irq_base so that we can map INTx to IRQ # later. */
+ controller->irq_base = rc_config.intr;
+
+ for (x = 0; x < 4; x++)
+ tile_irq_activate(rc_config.intr + x,
+ TILE_IRQ_HW_CLEAR);
+
+ if (rc_config.plx_gen1)
+ controller->plx_gen1 = 1;
+
+ return 0;
+}
+
+/*
+ * First initialization entry point, called from setup_arch().
+ *
+ * Find valid controllers and fill in pci_controller structs for each
+ * of them.
+ *
+ * Returns the number of controllers discovered.
+ */
+int __init tile_pci_init(void)
+{
+ int i;
+
+ pr_info("PCI: Searching for controllers...\n");
+
+ /* Do any configuration we need before using the PCIe */
+
+ for (i = 0; i < TILE_NUM_PCIE; i++) {
+ int hv_cfg_fd0 = -1;
+ int hv_cfg_fd1 = -1;
+ int hv_mem_fd = -1;
+ char name[32];
+ struct pci_controller *controller;
+
+ /*
+ * Open the fd to the HV. If it fails then this
+ * device doesn't exist.
+ */
+ hv_cfg_fd0 = tile_pcie_open(i, 0);
+ if (hv_cfg_fd0 < 0)
+ continue;
+ hv_cfg_fd1 = tile_pcie_open(i, 1);
+ if (hv_cfg_fd1 < 0) {
+ pr_err("PCI: Couldn't open config fd to HV "
+ "for controller %d\n", i);
+ goto err_cont;
+ }
+
+ sprintf(name, "pcie/%d/mem", i);
+ hv_mem_fd = hv_dev_open((HV_VirtAddr)name, 0);
+ if (hv_mem_fd < 0) {
+ pr_err("PCI: Could not open mem fd to HV!\n");
+ goto err_cont;
+ }
+
+ pr_info("PCI: Found PCI controller #%d\n", i);
+
+ controller = &controllers[num_controllers];
+
+ if (tile_init_irqs(i, controller)) {
+ pr_err("PCI: Could not initialize "
+ "IRQs, aborting.\n");
+ goto err_cont;
+ }
+
+ controller->index = num_controllers;
+ controller->hv_cfg_fd[0] = hv_cfg_fd0;
+ controller->hv_cfg_fd[1] = hv_cfg_fd1;
+ controller->hv_mem_fd = hv_mem_fd;
+ controller->first_busno = 0;
+ controller->last_busno = 0xff;
+ controller->ops = &tile_cfg_ops;
+
+ num_controllers++;
+ continue;
+
+err_cont:
+ if (hv_cfg_fd0 >= 0)
+ hv_dev_close(hv_cfg_fd0);
+ if (hv_cfg_fd1 >= 0)
+ hv_dev_close(hv_cfg_fd1);
+ if (hv_mem_fd >= 0)
+ hv_dev_close(hv_mem_fd);
+ continue;
+ }
+
+ /*
+ * Before using the PCIe, see if we need to do any platform-specific
+ * configuration, such as the PLX switch Gen 1 issue on TILEmpower.
+ */
+ for (i = 0; i < num_controllers; i++) {
+ struct pci_controller *controller = &controllers[i];
+
+ if (controller->plx_gen1)
+ tile_plx_gen1 = 1;
+ }
+
+ return num_controllers;
+}
+
+/*
+ * (pin - 1) converts from the PCI standard's [1:4] convention to
+ * a normal [0:3] range.
+ */
+static int tile_map_irq(struct pci_dev *dev, u8 slot, u8 pin)
+{
+ struct pci_controller *controller =
+ (struct pci_controller *)dev->sysdata;
+ return (pin - 1) + controller->irq_base;
+}
+
+
+static void __init fixup_read_and_payload_sizes(void)
+{
+ struct pci_dev *dev = NULL;
+ int smallest_max_payload = 0x1; /* Tile maxes out at 256 bytes. */
+ int max_read_size = 0x2; /* Limit to 512 byte reads. */
+ u16 new_values;
+
+ /* Scan for the smallest maximum payload size. */
+ while ((dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev)) != NULL) {
+ int pcie_caps_offset;
+ u32 devcap;
+ int max_payload;
+
+ pcie_caps_offset = pci_find_capability(dev, PCI_CAP_ID_EXP);
+ if (pcie_caps_offset == 0)
+ continue;
+
+ pci_read_config_dword(dev, pcie_caps_offset + PCI_EXP_DEVCAP,
+ &devcap);
+ max_payload = devcap & PCI_EXP_DEVCAP_PAYLOAD;
+ if (max_payload < smallest_max_payload)
+ smallest_max_payload = max_payload;
+ }
+
+ /* Now, set the max_payload_size for all devices to that value. */
+ new_values = (max_read_size << 12) | (smallest_max_payload << 5);
+ while ((dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev)) != NULL) {
+ int pcie_caps_offset;
+ u16 devctl;
+
+ pcie_caps_offset = pci_find_capability(dev, PCI_CAP_ID_EXP);
+ if (pcie_caps_offset == 0)
+ continue;
+
+ pci_read_config_word(dev, pcie_caps_offset + PCI_EXP_DEVCTL,
+ &devctl);
+ devctl &= ~(PCI_EXP_DEVCTL_PAYLOAD | PCI_EXP_DEVCTL_READRQ);
+ devctl |= new_values;
+ pci_write_config_word(dev, pcie_caps_offset + PCI_EXP_DEVCTL,
+ devctl);
+ }
+}
+
+
+/*
+ * Second PCI initialization entry point, called by subsys_initcall.
+ *
+ * The controllers have been set up by the time we get here, by a call to
+ * tile_pci_init.
+ */
+static int __init pcibios_init(void)
+{
+ int i;
+
+ pr_info("PCI: Probing PCI hardware\n");
+
+ /*
+ * Delay a bit in case devices aren't ready. Some devices are
+ * known to require at least 20ms here, but we use a more
+ * conservative value.
+ */
+ mdelay(250);
+
+ /* Scan all of the recorded PCI controllers. */
+ for (i = 0; i < num_controllers; i++) {
+ struct pci_controller *controller = &controllers[i];
+ struct pci_bus *bus;
+
+ pr_info("PCI: initializing controller #%d\n", i);
+
+ /*
+ * This comes from the generic Linux PCI driver.
+ *
+ * It reads the PCI tree for this bus into the Linux
+ * data structures.
+ *
+ * This is inlined in linux/pci.h and calls into
+ * pci_scan_bus_parented() in probe.c.
+ */
+ bus = pci_scan_bus(0, controller->ops, controller);
+ controller->root_bus = bus;
+ controller->last_busno = bus->subordinate;
+
+ }
+
+ /* Do machine dependent PCI interrupt routing */
+ pci_fixup_irqs(pci_common_swizzle, tile_map_irq);
+
+ /*
+ * This comes from the generic Linux PCI driver.
+ *
+ * It allocates all of the resources (I/O memory, etc)
+ * associated with the devices read in above.
+ */
+
+ pci_assign_unassigned_resources();
+
+ /* Configure the max_read_size and max_payload_size values. */
+ fixup_read_and_payload_sizes();
+
+ /* Record the I/O resources in the PCI controller structure. */
+ for (i = 0; i < num_controllers; i++) {
+ struct pci_bus *root_bus = controllers[i].root_bus;
+ struct pci_bus *next_bus;
+ struct pci_dev *dev;
+
+ list_for_each_entry(dev, &root_bus->devices, bus_list) {
+ /* Find the PCI host controller, ie. the 1st bridge. */
+ if ((dev->class >> 8) == PCI_CLASS_BRIDGE_PCI &&
+ (PCI_SLOT(dev->devfn) == 0)) {
+ next_bus = dev->subordinate;
+ controllers[i].mem_resources[0] =
+ *next_bus->resource[0];
+ controllers[i].mem_resources[1] =
+ *next_bus->resource[1];
+ controllers[i].mem_resources[2] =
+ *next_bus->resource[2];
+
+ break;
+ }
+ }
+
+ }
+
+ return 0;
+}
+subsys_initcall(pcibios_init);
+
+/*
+ * No bus fixups needed.
+ */
+void __devinit pcibios_fixup_bus(struct pci_bus *bus)
+{
+ /* Nothing needs to be done. */
+}
+
+/*
+ * This can be called from the generic PCI layer, but doesn't need to
+ * do anything.
+ */
+char __devinit *pcibios_setup(char *str)
+{
+ /* Nothing needs to be done. */
+ return str;
+}
+
+/*
+ * This is called from the generic Linux layer.
+ */
+void __init pcibios_update_irq(struct pci_dev *dev, int irq)
+{
+ pci_write_config_byte(dev, PCI_INTERRUPT_LINE, irq);
+}
+
+/*
+ * Enable memory and/or address decoding, as appropriate, for the
+ * device described by the 'dev' struct.
+ *
+ * This is called from the generic PCI layer, and can be called
+ * for bridges or endpoints.
+ */
+int pcibios_enable_device(struct pci_dev *dev, int mask)
+{
+ u16 cmd, old_cmd;
+ u8 header_type;
+ int i;
+ struct resource *r;
+
+ pci_read_config_byte(dev, PCI_HEADER_TYPE, &header_type);
+
+ pci_read_config_word(dev, PCI_COMMAND, &cmd);
+ old_cmd = cmd;
+ if ((header_type & 0x7F) == PCI_HEADER_TYPE_BRIDGE) {
+ /*
+ * For bridges, we enable both memory and I/O decoding
+ * in call cases.
+ */
+ cmd |= PCI_COMMAND_IO;
+ cmd |= PCI_COMMAND_MEMORY;
+ } else {
+ /*
+ * For endpoints, we enable memory and/or I/O decoding
+ * only if they have a memory resource of that type.
+ */
+ for (i = 0; i < 6; i++) {
+ r = &dev->resource[i];
+ if (r->flags & IORESOURCE_UNSET) {
+ pr_err("PCI: Device %s not available "
+ "because of resource collisions\n",
+ pci_name(dev));
+ return -EINVAL;
+ }
+ if (r->flags & IORESOURCE_IO)
+ cmd |= PCI_COMMAND_IO;
+ if (r->flags & IORESOURCE_MEM)
+ cmd |= PCI_COMMAND_MEMORY;
+ }
+ }
+
+ /*
+ * We only write the command if it changed.
+ */
+ if (cmd != old_cmd)
+ pci_write_config_word(dev, PCI_COMMAND, cmd);
+ return 0;
+}
+
+void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long max)
+{
+ unsigned long start = pci_resource_start(dev, bar);
+ unsigned long len = pci_resource_len(dev, bar);
+ unsigned long flags = pci_resource_flags(dev, bar);
+
+ if (!len)
+ return NULL;
+ if (max && len > max)
+ len = max;
+
+ if (!(flags & IORESOURCE_MEM)) {
+ pr_info("PCI: Trying to map invalid resource %#lx\n", flags);
+ start = 0;
+ }
+
+ return (void __iomem *)start;
+}
+EXPORT_SYMBOL(pci_iomap);
+
+
+/****************************************************************
+ *
+ * Tile PCI config space read/write routines
+ *
+ ****************************************************************/
+
+/*
+ * These are the normal read and write ops
+ * These are expanded with macros from pci_bus_read_config_byte() etc.
+ *
+ * devfn is the combined PCI slot & function.
+ *
+ * offset is in bytes, from the start of config space for the
+ * specified bus & slot.
+ */
+
+static int __devinit tile_cfg_read(struct pci_bus *bus,
+ unsigned int devfn,
+ int offset,
+ int size,
+ u32 *val)
+{
+ struct pci_controller *controller = bus->sysdata;
+ int busnum = bus->number & 0xff;
+ int slot = (devfn >> 3) & 0x1f;
+ int function = devfn & 0x7;
+ u32 addr;
+ int config_mode = 1;
+
+ /*
+ * There is no bridge between the Tile and bus 0, so we
+ * use config0 to talk to bus 0.
+ *
+ * If we're talking to a bus other than zero then we
+ * must have found a bridge.
+ */
+ if (busnum == 0) {
+ /*
+ * We fake an empty slot for (busnum == 0) && (slot > 0),
+ * since there is only one slot on bus 0.
+ */
+ if (slot) {
+ *val = 0xFFFFFFFF;
+ return 0;
+ }
+ config_mode = 0;
+ }
+
+ addr = busnum << 20; /* Bus in 27:20 */
+ addr |= slot << 15; /* Slot (device) in 19:15 */
+ addr |= function << 12; /* Function is in 14:12 */
+ addr |= (offset & 0xFFF); /* byte address in 0:11 */
+
+ return hv_dev_pread(controller->hv_cfg_fd[config_mode], 0,
+ (HV_VirtAddr)(val), size, addr);
+}
+
+
+/*
+ * See tile_cfg_read() for relevent comments.
+ * Note that "val" is the value to write, not a pointer to that value.
+ */
+static int __devinit tile_cfg_write(struct pci_bus *bus,
+ unsigned int devfn,
+ int offset,
+ int size,
+ u32 val)
+{
+ struct pci_controller *controller = bus->sysdata;
+ int busnum = bus->number & 0xff;
+ int slot = (devfn >> 3) & 0x1f;
+ int function = devfn & 0x7;
+ u32 addr;
+ int config_mode = 1;
+ HV_VirtAddr valp = (HV_VirtAddr)&val;
+
+ /*
+ * For bus 0 slot 0 we use config 0 accesses.
+ */
+ if (busnum == 0) {
+ /*
+ * We fake an empty slot for (busnum == 0) && (slot > 0),
+ * since there is only one slot on bus 0.
+ */
+ if (slot)
+ return 0;
+ config_mode = 0;
+ }
+
+ addr = busnum << 20; /* Bus in 27:20 */
+ addr |= slot << 15; /* Slot (device) in 19:15 */
+ addr |= function << 12; /* Function is in 14:12 */
+ addr |= (offset & 0xFFF); /* byte address in 0:11 */
+
+#ifdef __BIG_ENDIAN
+ /* Point to the correct part of the 32-bit "val". */
+ valp += 4 - size;
+#endif
+
+ return hv_dev_pwrite(controller->hv_cfg_fd[config_mode], 0,
+ valp, size, addr);
+}
+
+
+static struct pci_ops tile_cfg_ops = {
+ .read = tile_cfg_read,
+ .write = tile_cfg_write,
+};
+
+
+/*
+ * In the following, each PCI controller's mem_resources[1]
+ * represents its (non-prefetchable) PCI memory resource.
+ * mem_resources[0] and mem_resources[2] refer to its PCI I/O and
+ * prefetchable PCI memory resources, respectively.
+ * For more details, see pci_setup_bridge() in setup-bus.c.
+ * By comparing the target PCI memory address against the
+ * end address of controller 0, we can determine the controller
+ * that should accept the PCI memory access.
+ */
+#define TILE_READ(size, type) \
+type _tile_read##size(unsigned long addr) \
+{ \
+ type val; \
+ int idx = 0; \
+ if (addr > controllers[0].mem_resources[1].end && \
+ addr > controllers[0].mem_resources[2].end) \
+ idx = 1; \
+ if (hv_dev_pread(controllers[idx].hv_mem_fd, 0, \
+ (HV_VirtAddr)(&val), sizeof(type), addr)) \
+ pr_err("PCI: read %zd bytes at 0x%lX failed\n", \
+ sizeof(type), addr); \
+ return val; \
+} \
+EXPORT_SYMBOL(_tile_read##size)
+
+TILE_READ(b, u8);
+TILE_READ(w, u16);
+TILE_READ(l, u32);
+TILE_READ(q, u64);
+
+#define TILE_WRITE(size, type) \
+void _tile_write##size(type val, unsigned long addr) \
+{ \
+ int idx = 0; \
+ if (addr > controllers[0].mem_resources[1].end && \
+ addr > controllers[0].mem_resources[2].end) \
+ idx = 1; \
+ if (hv_dev_pwrite(controllers[idx].hv_mem_fd, 0, \
+ (HV_VirtAddr)(&val), sizeof(type), addr)) \
+ pr_err("PCI: write %zd bytes at 0x%lX failed\n", \
+ sizeof(type), addr); \
+} \
+EXPORT_SYMBOL(_tile_write##size)
+
+TILE_WRITE(b, u8);
+TILE_WRITE(w, u16);
+TILE_WRITE(l, u32);
+TILE_WRITE(q, u64);
diff --git a/arch/tile/kernel/process.c b/arch/tile/kernel/process.c
index 84c29111756c..8430f45daea6 100644
--- a/arch/tile/kernel/process.c
+++ b/arch/tile/kernel/process.c
@@ -214,9 +214,10 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
/*
* Copy the callee-saved registers from the passed pt_regs struct
* into the context-switch callee-saved registers area.
- * We have to restore the callee-saved registers since we may
- * be cloning a userspace task with userspace register state,
- * and we won't be unwinding the same kernel frames to restore them.
+ * This way when we start the interrupt-return sequence, the
+ * callee-save registers will be correctly in registers, which
+ * is how we assume the compiler leaves them as we start doing
+ * the normal return-from-interrupt path after calling C code.
* Zero out the C ABI save area to mark the top of the stack.
*/
ksp = (unsigned long) childregs;
@@ -304,15 +305,25 @@ int dump_task_regs(struct task_struct *tsk, elf_gregset_t *regs)
/* Allow user processes to access the DMA SPRs */
void grant_dma_mpls(void)
{
+#if CONFIG_KERNEL_PL == 2
+ __insn_mtspr(SPR_MPL_DMA_CPL_SET_1, 1);
+ __insn_mtspr(SPR_MPL_DMA_NOTIFY_SET_1, 1);
+#else
__insn_mtspr(SPR_MPL_DMA_CPL_SET_0, 1);
__insn_mtspr(SPR_MPL_DMA_NOTIFY_SET_0, 1);
+#endif
}
/* Forbid user processes from accessing the DMA SPRs */
void restrict_dma_mpls(void)
{
+#if CONFIG_KERNEL_PL == 2
+ __insn_mtspr(SPR_MPL_DMA_CPL_SET_2, 1);
+ __insn_mtspr(SPR_MPL_DMA_NOTIFY_SET_2, 1);
+#else
__insn_mtspr(SPR_MPL_DMA_CPL_SET_1, 1);
__insn_mtspr(SPR_MPL_DMA_NOTIFY_SET_1, 1);
+#endif
}
/* Pause the DMA engine, then save off its state registers. */
@@ -523,19 +534,14 @@ struct task_struct *__sched _switch_to(struct task_struct *prev,
* Switch kernel SP, PC, and callee-saved registers.
* In the context of the new task, return the old task pointer
* (i.e. the task that actually called __switch_to).
- * Pass the value to use for SYSTEM_SAVE_1_0 when we reset our sp.
+ * Pass the value to use for SYSTEM_SAVE_K_0 when we reset our sp.
*/
return __switch_to(prev, next, next_current_ksp0(next));
}
-long _sys_fork(struct pt_regs *regs)
-{
- return do_fork(SIGCHLD, regs->sp, regs, 0, NULL, NULL);
-}
-
-long _sys_clone(unsigned long clone_flags, unsigned long newsp,
- void __user *parent_tidptr, void __user *child_tidptr,
- struct pt_regs *regs)
+SYSCALL_DEFINE5(clone, unsigned long, clone_flags, unsigned long, newsp,
+ void __user *, parent_tidptr, void __user *, child_tidptr,
+ struct pt_regs *, regs)
{
if (!newsp)
newsp = regs->sp;
@@ -543,18 +549,13 @@ long _sys_clone(unsigned long clone_flags, unsigned long newsp,
parent_tidptr, child_tidptr);
}
-long _sys_vfork(struct pt_regs *regs)
-{
- return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, regs->sp,
- regs, 0, NULL, NULL);
-}
-
/*
* sys_execve() executes a new program.
*/
-long _sys_execve(const char __user *path,
- const char __user *const __user *argv,
- const char __user *const __user *envp, struct pt_regs *regs)
+SYSCALL_DEFINE4(execve, const char __user *, path,
+ const char __user *const __user *, argv,
+ const char __user *const __user *, envp,
+ struct pt_regs *, regs)
{
long error;
char *filename;
@@ -570,9 +571,10 @@ out:
}
#ifdef CONFIG_COMPAT
-long _compat_sys_execve(const char __user *path,
- const compat_uptr_t __user *argv,
- const compat_uptr_t __user *envp, struct pt_regs *regs)
+long compat_sys_execve(const char __user *path,
+ const compat_uptr_t __user *argv,
+ const compat_uptr_t __user *envp,
+ struct pt_regs *regs)
{
long error;
char *filename;
diff --git a/arch/tile/kernel/ptrace.c b/arch/tile/kernel/ptrace.c
index 7161bd03d2fd..e92e40527d6d 100644
--- a/arch/tile/kernel/ptrace.c
+++ b/arch/tile/kernel/ptrace.c
@@ -32,25 +32,6 @@ void user_disable_single_step(struct task_struct *child)
}
/*
- * This routine will put a word on the process's privileged stack.
- */
-static void putreg(struct task_struct *task,
- unsigned long addr, unsigned long value)
-{
- unsigned int regno = addr / sizeof(unsigned long);
- struct pt_regs *childregs = task_pt_regs(task);
- childregs->regs[regno] = value;
- childregs->flags |= PT_FLAGS_RESTORE_REGS;
-}
-
-static unsigned long getreg(struct task_struct *task, unsigned long addr)
-{
- unsigned int regno = addr / sizeof(unsigned long);
- struct pt_regs *childregs = task_pt_regs(task);
- return childregs->regs[regno];
-}
-
-/*
* Called by kernel/ptrace.c when detaching..
*/
void ptrace_disable(struct task_struct *child)
@@ -64,61 +45,80 @@ void ptrace_disable(struct task_struct *child)
clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
}
-long arch_ptrace(struct task_struct *child, long request, long addr, long data)
+long arch_ptrace(struct task_struct *child, long request,
+ unsigned long addr, unsigned long data)
{
- unsigned long __user *datap;
+ unsigned long __user *datap = (long __user __force *)data;
unsigned long tmp;
- int i;
long ret = -EIO;
-
-#ifdef CONFIG_COMPAT
- if (task_thread_info(current)->status & TS_COMPAT)
- data = (u32)data;
- if (task_thread_info(child)->status & TS_COMPAT)
- addr = (u32)addr;
-#endif
- datap = (unsigned long __user __force *)data;
+ char *childreg;
+ struct pt_regs copyregs;
+ int ex1_offset;
switch (request) {
case PTRACE_PEEKUSR: /* Read register from pt_regs. */
- if (addr & (sizeof(data)-1))
- break;
- if (addr < 0 || addr >= PTREGS_SIZE)
+ if (addr >= PTREGS_SIZE)
break;
- tmp = getreg(child, addr); /* Read register */
- ret = put_user(tmp, datap);
+ childreg = (char *)task_pt_regs(child) + addr;
+#ifdef CONFIG_COMPAT
+ if (is_compat_task()) {
+ if (addr & (sizeof(compat_long_t)-1))
+ break;
+ ret = put_user(*(compat_long_t *)childreg,
+ (compat_long_t __user *)datap);
+ } else
+#endif
+ {
+ if (addr & (sizeof(long)-1))
+ break;
+ ret = put_user(*(long *)childreg, datap);
+ }
break;
case PTRACE_POKEUSR: /* Write register in pt_regs. */
- if (addr & (sizeof(data)-1))
+ if (addr >= PTREGS_SIZE)
break;
- if (addr < 0 || addr >= PTREGS_SIZE)
- break;
- putreg(child, addr, data); /* Write register */
+ childreg = (char *)task_pt_regs(child) + addr;
+
+ /* Guard against overwrites of the privilege level. */
+ ex1_offset = PTREGS_OFFSET_EX1;
+#if defined(CONFIG_COMPAT) && defined(__BIG_ENDIAN)
+ if (is_compat_task()) /* point at low word */
+ ex1_offset += sizeof(compat_long_t);
+#endif
+ if (addr == ex1_offset)
+ data = PL_ICS_EX1(USER_PL, EX1_ICS(data));
+
+#ifdef CONFIG_COMPAT
+ if (is_compat_task()) {
+ if (addr & (sizeof(compat_long_t)-1))
+ break;
+ *(compat_long_t *)childreg = data;
+ } else
+#endif
+ {
+ if (addr & (sizeof(long)-1))
+ break;
+ *(long *)childreg = data;
+ }
ret = 0;
break;
case PTRACE_GETREGS: /* Get all registers from the child. */
- if (!access_ok(VERIFY_WRITE, datap, PTREGS_SIZE))
- break;
- for (i = 0; i < PTREGS_SIZE; i += sizeof(long)) {
- ret = __put_user(getreg(child, i), datap);
- if (ret != 0)
- break;
- datap++;
+ if (copy_to_user(datap, task_pt_regs(child),
+ sizeof(struct pt_regs)) == 0) {
+ ret = 0;
}
break;
case PTRACE_SETREGS: /* Set all registers in the child. */
- if (!access_ok(VERIFY_READ, datap, PTREGS_SIZE))
- break;
- for (i = 0; i < PTREGS_SIZE; i += sizeof(long)) {
- ret = __get_user(tmp, datap);
- if (ret != 0)
- break;
- putreg(child, i, tmp);
- datap++;
+ if (copy_from_user(&copyregs, datap,
+ sizeof(struct pt_regs)) == 0) {
+ copyregs.ex1 =
+ PL_ICS_EX1(USER_PL, EX1_ICS(copyregs.ex1));
+ *task_pt_regs(child) = copyregs;
+ ret = 0;
}
break;
diff --git a/arch/tile/kernel/reboot.c b/arch/tile/kernel/reboot.c
index acd86d20beba..baa3d905fee2 100644
--- a/arch/tile/kernel/reboot.c
+++ b/arch/tile/kernel/reboot.c
@@ -27,7 +27,7 @@
void machine_halt(void)
{
warn_early_printk();
- raw_local_irq_disable_all();
+ arch_local_irq_disable_all();
smp_send_stop();
hv_halt();
}
@@ -35,14 +35,14 @@ void machine_halt(void)
void machine_power_off(void)
{
warn_early_printk();
- raw_local_irq_disable_all();
+ arch_local_irq_disable_all();
smp_send_stop();
hv_power_off();
}
void machine_restart(char *cmd)
{
- raw_local_irq_disable_all();
+ arch_local_irq_disable_all();
smp_send_stop();
hv_restart((HV_VirtAddr) "vmlinux", (HV_VirtAddr) cmd);
}
diff --git a/arch/tile/kernel/regs_32.S b/arch/tile/kernel/regs_32.S
index e88d6e122783..caa13101c264 100644
--- a/arch/tile/kernel/regs_32.S
+++ b/arch/tile/kernel/regs_32.S
@@ -85,7 +85,7 @@ STD_ENTRY_SECTION(__switch_to, .sched.text)
{
/* Update sp and ksp0 simultaneously to avoid backtracer warnings. */
move sp, r13
- mtspr SYSTEM_SAVE_1_0, r2
+ mtspr SPR_SYSTEM_SAVE_K_0, r2
}
FOR_EACH_CALLEE_SAVED_REG(LOAD_REG)
.L__switch_to_pc:
diff --git a/arch/tile/kernel/setup.c b/arch/tile/kernel/setup.c
index e7d54c73d5c1..f18573643ed1 100644
--- a/arch/tile/kernel/setup.c
+++ b/arch/tile/kernel/setup.c
@@ -30,8 +30,6 @@
#include <linux/timex.h>
#include <asm/setup.h>
#include <asm/sections.h>
-#include <asm/sections.h>
-#include <asm/cacheflush.h>
#include <asm/cacheflush.h>
#include <asm/pgalloc.h>
#include <asm/mmu_context.h>
@@ -187,11 +185,11 @@ early_param("vmalloc", parse_vmalloc);
#ifdef CONFIG_HIGHMEM
/*
- * Determine for each controller where its lowmem is mapped and how
- * much of it is mapped there. On controller zero, the first few
- * megabytes are mapped at 0xfd000000 as code, so in principle we
- * could start our data mappings higher up, but for now we don't
- * bother, to avoid additional confusion.
+ * Determine for each controller where its lowmem is mapped and how much of
+ * it is mapped there. On controller zero, the first few megabytes are
+ * already mapped in as code at MEM_SV_INTRPT, so in principle we could
+ * start our data mappings higher up, but for now we don't bother, to avoid
+ * additional confusion.
*
* One question is whether, on systems with more than 768 Mb and
* controllers of different sizes, to map in a proportionate amount of
@@ -311,7 +309,7 @@ static void __init setup_memory(void)
#endif
/* We are using a char to hold the cpu_2_node[] mapping */
- BUG_ON(MAX_NUMNODES > 127);
+ BUILD_BUG_ON(MAX_NUMNODES > 127);
/* Discover the ranges of memory available to us */
for (i = 0; ; ++i) {
@@ -842,7 +840,7 @@ static int __init topology_init(void)
for_each_online_node(i)
register_one_node(i);
- for_each_present_cpu(i)
+ for (i = 0; i < smp_height * smp_width; ++i)
register_cpu(&cpu_devices[i], i);
return 0;
@@ -870,11 +868,14 @@ void __cpuinit setup_cpu(int boot)
/* Allow asynchronous TLB interrupts. */
#if CHIP_HAS_TILE_DMA()
- raw_local_irq_unmask(INT_DMATLB_MISS);
- raw_local_irq_unmask(INT_DMATLB_ACCESS);
+ arch_local_irq_unmask(INT_DMATLB_MISS);
+ arch_local_irq_unmask(INT_DMATLB_ACCESS);
#endif
#if CHIP_HAS_SN_PROC()
- raw_local_irq_unmask(INT_SNITLB_MISS);
+ arch_local_irq_unmask(INT_SNITLB_MISS);
+#endif
+#ifdef __tilegx__
+ arch_local_irq_unmask(INT_SINGLE_STEP_K);
#endif
/*
@@ -893,11 +894,12 @@ void __cpuinit setup_cpu(int boot)
#endif
/*
- * Set the MPL for interrupt control 0 to user level.
- * This includes access to the SYSTEM_SAVE and EX_CONTEXT SPRs,
- * as well as the PL 0 interrupt mask.
+ * Set the MPL for interrupt control 0 & 1 to the corresponding
+ * values. This includes access to the SYSTEM_SAVE and EX_CONTEXT
+ * SPRs, as well as the interrupt mask.
*/
__insn_mtspr(SPR_MPL_INTCTRL_0_SET_0, 1);
+ __insn_mtspr(SPR_MPL_INTCTRL_1_SET_1, 1);
/* Initialize IRQ support for this cpu. */
setup_irq_regs();
@@ -1033,7 +1035,7 @@ static void __init validate_va(void)
* In addition, make sure we CAN'T use the end of memory, since
* we use the last chunk of each pgd for the pgd_list.
*/
- int i, fc_fd_ok = 0;
+ int i, user_kernel_ok = 0;
unsigned long max_va = 0;
unsigned long list_va =
((PGD_LIST_OFFSET / sizeof(pgd_t)) << PGDIR_SHIFT);
@@ -1044,13 +1046,13 @@ static void __init validate_va(void)
break;
if (range.start <= MEM_USER_INTRPT &&
range.start + range.size >= MEM_HV_INTRPT)
- fc_fd_ok = 1;
+ user_kernel_ok = 1;
if (range.start == 0)
max_va = range.size;
BUG_ON(range.start + range.size > list_va);
}
- if (!fc_fd_ok)
- early_panic("Hypervisor not configured for VAs 0xfc/0xfd\n");
+ if (!user_kernel_ok)
+ early_panic("Hypervisor not configured for user/kernel VAs\n");
if (max_va == 0)
early_panic("Hypervisor not configured for low VAs\n");
if (max_va < KERNEL_HIGH_VADDR)
@@ -1334,6 +1336,10 @@ static void __init pcpu_fc_populate_pte(unsigned long addr)
pte_t *pte;
BUG_ON(pgd_addr_invalid(addr));
+ if (addr < VMALLOC_START || addr >= VMALLOC_END)
+ panic("PCPU addr %#lx outside vmalloc range %#lx..%#lx;"
+ " try increasing CONFIG_VMALLOC_RESERVE\n",
+ addr, VMALLOC_START, VMALLOC_END);
pgd = swapper_pg_dir + pgd_index(addr);
pud = pud_offset(pgd, addr);
diff --git a/arch/tile/kernel/signal.c b/arch/tile/kernel/signal.c
index ce183aa1492c..757407e36696 100644
--- a/arch/tile/kernel/signal.c
+++ b/arch/tile/kernel/signal.c
@@ -16,7 +16,6 @@
#include <linux/sched.h>
#include <linux/mm.h>
#include <linux/smp.h>
-#include <linux/smp_lock.h>
#include <linux/kernel.h>
#include <linux/signal.h>
#include <linux/errno.h>
@@ -41,8 +40,8 @@
#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
-long _sys_sigaltstack(const stack_t __user *uss,
- stack_t __user *uoss, struct pt_regs *regs)
+SYSCALL_DEFINE3(sigaltstack, const stack_t __user *, uss,
+ stack_t __user *, uoss, struct pt_regs *, regs)
{
return do_sigaltstack(uss, uoss, regs->sp);
}
@@ -71,6 +70,9 @@ int restore_sigcontext(struct pt_regs *regs,
for (i = 0; i < sizeof(struct pt_regs)/sizeof(long); ++i)
err |= __get_user(regs->regs[i], &sc->gregs[i]);
+ /* Ensure that the PL is always set to USER_PL. */
+ regs->ex1 = PL_ICS_EX1(USER_PL, EX1_ICS(regs->ex1));
+
regs->faultnum = INT_SWINT_1_SIGRETURN;
err |= __get_user(*pr0, &sc->gregs[0]);
@@ -78,7 +80,7 @@ int restore_sigcontext(struct pt_regs *regs,
}
/* sigreturn() returns long since it restores r0 in the interrupted code. */
-long _sys_rt_sigreturn(struct pt_regs *regs)
+SYSCALL_DEFINE1(rt_sigreturn, struct pt_regs *, regs)
{
struct rt_sigframe __user *frame =
(struct rt_sigframe __user *)(regs->sp);
@@ -330,7 +332,7 @@ void do_signal(struct pt_regs *regs)
current_thread_info()->status &= ~TS_RESTORE_SIGMASK;
}
- return;
+ goto done;
}
/* Did we come from a system call? */
@@ -358,4 +360,8 @@ void do_signal(struct pt_regs *regs)
current_thread_info()->status &= ~TS_RESTORE_SIGMASK;
sigprocmask(SIG_SETMASK, &current->saved_sigmask, NULL);
}
+
+done:
+ /* Avoid double syscall restart if there are nested signals. */
+ regs->faultnum = INT_SWINT_1_SIGRETURN;
}
diff --git a/arch/tile/kernel/single_step.c b/arch/tile/kernel/single_step.c
index 5ec4b9c651f2..1eb3b39e36c7 100644
--- a/arch/tile/kernel/single_step.c
+++ b/arch/tile/kernel/single_step.c
@@ -15,7 +15,7 @@
* Derived from iLib's single-stepping code.
*/
-#ifndef __tilegx__ /* No support for single-step yet. */
+#ifndef __tilegx__ /* Hardware support for single step unavailable. */
/* These functions are only used on the TILE platform */
#include <linux/slab.h>
@@ -660,4 +660,75 @@ void single_step_once(struct pt_regs *regs)
regs->pc += 8;
}
+#else
+#include <linux/smp.h>
+#include <linux/ptrace.h>
+#include <arch/spr_def.h>
+
+static DEFINE_PER_CPU(unsigned long, ss_saved_pc);
+
+
+/*
+ * Called directly on the occasion of an interrupt.
+ *
+ * If the process doesn't have single step set, then we use this as an
+ * opportunity to turn single step off.
+ *
+ * It has been mentioned that we could conditionally turn off single stepping
+ * on each entry into the kernel and rely on single_step_once to turn it
+ * on for the processes that matter (as we already do), but this
+ * implementation is somewhat more efficient in that we muck with registers
+ * once on a bum interrupt rather than on every entry into the kernel.
+ *
+ * If SINGLE_STEP_CONTROL_K has CANCELED set, then an interrupt occurred,
+ * so we have to run through this process again before we can say that an
+ * instruction has executed.
+ *
+ * swint will set CANCELED, but it's a legitimate instruction. Fortunately
+ * it changes the PC. If it hasn't changed, then we know that the interrupt
+ * wasn't generated by swint and we'll need to run this process again before
+ * we can say an instruction has executed.
+ *
+ * If either CANCELED == 0 or the PC's changed, we send out SIGTRAPs and get
+ * on with our lives.
+ */
+
+void gx_singlestep_handle(struct pt_regs *regs, int fault_num)
+{
+ unsigned long *ss_pc = &__get_cpu_var(ss_saved_pc);
+ struct thread_info *info = (void *)current_thread_info();
+ int is_single_step = test_ti_thread_flag(info, TIF_SINGLESTEP);
+ unsigned long control = __insn_mfspr(SPR_SINGLE_STEP_CONTROL_K);
+
+ if (is_single_step == 0) {
+ __insn_mtspr(SPR_SINGLE_STEP_EN_K_K, 0);
+
+ } else if ((*ss_pc != regs->pc) ||
+ (!(control & SPR_SINGLE_STEP_CONTROL_1__CANCELED_MASK))) {
+
+ ptrace_notify(SIGTRAP);
+ control |= SPR_SINGLE_STEP_CONTROL_1__CANCELED_MASK;
+ control |= SPR_SINGLE_STEP_CONTROL_1__INHIBIT_MASK;
+ __insn_mtspr(SPR_SINGLE_STEP_CONTROL_K, control);
+ }
+}
+
+
+/*
+ * Called from need_singlestep. Set up the control registers and the enable
+ * register, then return back.
+ */
+
+void single_step_once(struct pt_regs *regs)
+{
+ unsigned long *ss_pc = &__get_cpu_var(ss_saved_pc);
+ unsigned long control = __insn_mfspr(SPR_SINGLE_STEP_CONTROL_K);
+
+ *ss_pc = regs->pc;
+ control |= SPR_SINGLE_STEP_CONTROL_1__CANCELED_MASK;
+ control |= SPR_SINGLE_STEP_CONTROL_1__INHIBIT_MASK;
+ __insn_mtspr(SPR_SINGLE_STEP_CONTROL_K, control);
+ __insn_mtspr(SPR_SINGLE_STEP_EN_K_K, 1 << USER_PL);
+}
+
#endif /* !__tilegx__ */
diff --git a/arch/tile/kernel/smp.c b/arch/tile/kernel/smp.c
index 1cb5ec79de04..9575b37a8b75 100644
--- a/arch/tile/kernel/smp.c
+++ b/arch/tile/kernel/smp.c
@@ -115,7 +115,7 @@ static void smp_start_cpu_interrupt(void)
static void smp_stop_cpu_interrupt(void)
{
set_cpu_online(smp_processor_id(), 0);
- raw_local_irq_disable_all();
+ arch_local_irq_disable_all();
for (;;)
asm("nap");
}
@@ -212,7 +212,7 @@ void __init ipi_init(void)
tile.x = cpu_x(cpu);
tile.y = cpu_y(cpu);
- if (hv_get_ipi_pte(tile, 1, &pte) != 0)
+ if (hv_get_ipi_pte(tile, KERNEL_PL, &pte) != 0)
panic("Failed to initialize IPI for cpu %d\n", cpu);
offset = hv_pte_get_pfn(pte) << PAGE_SHIFT;
diff --git a/arch/tile/kernel/smpboot.c b/arch/tile/kernel/smpboot.c
index 74d62d098edf..b949edcec200 100644
--- a/arch/tile/kernel/smpboot.c
+++ b/arch/tile/kernel/smpboot.c
@@ -18,7 +18,6 @@
#include <linux/mm.h>
#include <linux/sched.h>
#include <linux/kernel_stat.h>
-#include <linux/smp_lock.h>
#include <linux/bootmem.h>
#include <linux/notifier.h>
#include <linux/cpu.h>
diff --git a/arch/tile/kernel/stack.c b/arch/tile/kernel/stack.c
index ea2e0ce28380..0d54106be3d6 100644
--- a/arch/tile/kernel/stack.c
+++ b/arch/tile/kernel/stack.c
@@ -30,6 +30,10 @@
#include <arch/abi.h>
#include <arch/interrupts.h>
+#define KBT_ONGOING 0 /* Backtrace still ongoing */
+#define KBT_DONE 1 /* Backtrace cleanly completed */
+#define KBT_RUNNING 2 /* Can't run backtrace on a running task */
+#define KBT_LOOP 3 /* Backtrace entered a loop */
/* Is address on the specified kernel stack? */
static int in_kernel_stack(struct KBacktraceIterator *kbt, VirtualAddress sp)
@@ -207,11 +211,11 @@ static int KBacktraceIterator_next_item_inclusive(
for (;;) {
do {
if (!KBacktraceIterator_is_sigreturn(kbt))
- return 1;
+ return KBT_ONGOING;
} while (backtrace_next(&kbt->it));
if (!KBacktraceIterator_restart(kbt))
- return 0;
+ return KBT_DONE;
}
}
@@ -264,7 +268,7 @@ void KBacktraceIterator_init(struct KBacktraceIterator *kbt,
kbt->pgtable = NULL;
kbt->verbose = 0; /* override in caller if desired */
kbt->profile = 0; /* override in caller if desired */
- kbt->end = 0;
+ kbt->end = KBT_ONGOING;
kbt->new_context = 0;
if (is_current) {
HV_PhysAddr pgdir_pa = hv_inquire_context().page_table;
@@ -290,7 +294,7 @@ void KBacktraceIterator_init(struct KBacktraceIterator *kbt,
if (regs == NULL) {
if (is_current || t->state == TASK_RUNNING) {
/* Can't do this; we need registers */
- kbt->end = 1;
+ kbt->end = KBT_RUNNING;
return;
}
pc = get_switch_to_pc();
@@ -305,26 +309,29 @@ void KBacktraceIterator_init(struct KBacktraceIterator *kbt,
}
backtrace_init(&kbt->it, read_memory_func, kbt, pc, lr, sp, r52);
- kbt->end = !KBacktraceIterator_next_item_inclusive(kbt);
+ kbt->end = KBacktraceIterator_next_item_inclusive(kbt);
}
EXPORT_SYMBOL(KBacktraceIterator_init);
int KBacktraceIterator_end(struct KBacktraceIterator *kbt)
{
- return kbt->end;
+ return kbt->end != KBT_ONGOING;
}
EXPORT_SYMBOL(KBacktraceIterator_end);
void KBacktraceIterator_next(struct KBacktraceIterator *kbt)
{
+ VirtualAddress old_pc = kbt->it.pc, old_sp = kbt->it.sp;
kbt->new_context = 0;
- if (!backtrace_next(&kbt->it) &&
- !KBacktraceIterator_restart(kbt)) {
- kbt->end = 1;
- return;
- }
-
- kbt->end = !KBacktraceIterator_next_item_inclusive(kbt);
+ if (!backtrace_next(&kbt->it) && !KBacktraceIterator_restart(kbt)) {
+ kbt->end = KBT_DONE;
+ return;
+ }
+ kbt->end = KBacktraceIterator_next_item_inclusive(kbt);
+ if (old_pc == kbt->it.pc && old_sp == kbt->it.sp) {
+ /* Trapped in a loop; give up. */
+ kbt->end = KBT_LOOP;
+ }
}
EXPORT_SYMBOL(KBacktraceIterator_next);
@@ -387,6 +394,8 @@ void tile_show_stack(struct KBacktraceIterator *kbt, int headers)
break;
}
}
+ if (kbt->end == KBT_LOOP)
+ pr_err("Stack dump stopped; next frame identical to this one\n");
if (headers)
pr_err("Stack dump complete\n");
}
diff --git a/arch/tile/kernel/sys.c b/arch/tile/kernel/sys.c
index f0f87eab8c39..e2187d24a9b4 100644
--- a/arch/tile/kernel/sys.c
+++ b/arch/tile/kernel/sys.c
@@ -20,7 +20,6 @@
#include <linux/sched.h>
#include <linux/mm.h>
#include <linux/smp.h>
-#include <linux/smp_lock.h>
#include <linux/syscalls.h>
#include <linux/mman.h>
#include <linux/file.h>
@@ -110,6 +109,15 @@ SYSCALL_DEFINE6(mmap, unsigned long, addr, unsigned long, len,
#define sys_sync_file_range sys_sync_file_range2
#endif
+/* Call the trampolines to manage pt_regs where necessary. */
+#define sys_execve _sys_execve
+#define sys_sigaltstack _sys_sigaltstack
+#define sys_rt_sigreturn _sys_rt_sigreturn
+#define sys_clone _sys_clone
+#ifndef __tilegx__
+#define sys_cmpxchg_badaddr _sys_cmpxchg_badaddr
+#endif
+
/*
* Note that we can't include <linux/unistd.h> here since the header
* guard will defeat us; <asm/unistd.h> checks for __SYSCALL as well.
diff --git a/arch/tile/kernel/time.c b/arch/tile/kernel/time.c
index 6bed820e1421..f2e156e44692 100644
--- a/arch/tile/kernel/time.c
+++ b/arch/tile/kernel/time.c
@@ -132,7 +132,7 @@ static int tile_timer_set_next_event(unsigned long ticks,
{
BUG_ON(ticks > MAX_TICK);
__insn_mtspr(SPR_TILE_TIMER_CONTROL, ticks);
- raw_local_irq_unmask_now(INT_TILE_TIMER);
+ arch_local_irq_unmask_now(INT_TILE_TIMER);
return 0;
}
@@ -143,7 +143,7 @@ static int tile_timer_set_next_event(unsigned long ticks,
static void tile_timer_set_mode(enum clock_event_mode mode,
struct clock_event_device *evt)
{
- raw_local_irq_mask_now(INT_TILE_TIMER);
+ arch_local_irq_mask_now(INT_TILE_TIMER);
}
/*
@@ -172,7 +172,7 @@ void __cpuinit setup_tile_timer(void)
evt->cpumask = cpumask_of(smp_processor_id());
/* Start out with timer not firing. */
- raw_local_irq_mask_now(INT_TILE_TIMER);
+ arch_local_irq_mask_now(INT_TILE_TIMER);
/* Register tile timer. */
clockevents_register_device(evt);
@@ -188,7 +188,7 @@ void do_timer_interrupt(struct pt_regs *regs, int fault_num)
* Mask the timer interrupt here, since we are a oneshot timer
* and there are now by definition no events pending.
*/
- raw_local_irq_mask(INT_TILE_TIMER);
+ arch_local_irq_mask(INT_TILE_TIMER);
/* Track time spent here in an interrupt context */
irq_enter();
diff --git a/arch/tile/kernel/traps.c b/arch/tile/kernel/traps.c
index 0f362dc2c57f..5474fc2e77e8 100644
--- a/arch/tile/kernel/traps.c
+++ b/arch/tile/kernel/traps.c
@@ -260,7 +260,7 @@ void __kprobes do_trap(struct pt_regs *regs, int fault_num,
address = regs->pc;
break;
case INT_UNALIGN_DATA:
-#ifndef __tilegx__ /* FIXME: GX: no single-step yet */
+#ifndef __tilegx__ /* Emulated support for single step debugging */
if (unaligned_fixup >= 0) {
struct single_step_state *state =
current_thread_info()->step_state;
@@ -278,7 +278,7 @@ void __kprobes do_trap(struct pt_regs *regs, int fault_num,
case INT_DOUBLE_FAULT:
/*
* For double fault, "reason" is actually passed as
- * SYSTEM_SAVE_1_2, the hypervisor's double-fault info, so
+ * SYSTEM_SAVE_K_2, the hypervisor's double-fault info, so
* we can provide the original fault number rather than
* the uninteresting "INT_DOUBLE_FAULT" so the user can
* learn what actually struck while PL0 ICS was set.
diff --git a/arch/tile/kvm/Kconfig b/arch/tile/kvm/Kconfig
new file mode 100644
index 000000000000..b88f9c047781
--- /dev/null
+++ b/arch/tile/kvm/Kconfig
@@ -0,0 +1,38 @@
+#
+# KVM configuration
+#
+
+source "virt/kvm/Kconfig"
+
+menuconfig VIRTUALIZATION
+ bool "Virtualization"
+ ---help---
+ Say Y here to get to see options for using your Linux host to run
+ other operating systems inside virtual machines (guests).
+ This option alone does not add any kernel code.
+
+ If you say N, all options in this submenu will be skipped and
+ disabled.
+
+if VIRTUALIZATION
+
+config KVM
+ tristate "Kernel-based Virtual Machine (KVM) support"
+ depends on HAVE_KVM && MODULES && EXPERIMENTAL
+ select PREEMPT_NOTIFIERS
+ select ANON_INODES
+ ---help---
+ Support hosting paravirtualized guest machines.
+
+ This module provides access to the hardware capabilities through
+ a character device node named /dev/kvm.
+
+ To compile this as a module, choose M here: the module
+ will be called kvm.
+
+ If unsure, say N.
+
+source drivers/vhost/Kconfig
+source drivers/virtio/Kconfig
+
+endif # VIRTUALIZATION
diff --git a/arch/tile/lib/Makefile b/arch/tile/lib/Makefile
index 746dc81ed3c4..93122d5b1558 100644
--- a/arch/tile/lib/Makefile
+++ b/arch/tile/lib/Makefile
@@ -3,8 +3,8 @@
#
lib-y = cacheflush.o checksum.o cpumask.o delay.o \
- mb_incoherent.o uaccess.o \
- memcpy_$(BITS).o memchr_$(BITS).o memmove_$(BITS).o memset_$(BITS).o \
+ mb_incoherent.o uaccess.o memmove.o \
+ memcpy_$(BITS).o memchr_$(BITS).o memset_$(BITS).o \
strchr_$(BITS).o strlen_$(BITS).o
ifeq ($(CONFIG_TILEGX),y)
diff --git a/arch/tile/lib/atomic_32.c b/arch/tile/lib/atomic_32.c
index 8040b42a8eea..7a5cc706ab62 100644
--- a/arch/tile/lib/atomic_32.c
+++ b/arch/tile/lib/atomic_32.c
@@ -300,7 +300,7 @@ void __init __init_atomic_per_cpu(void)
#else /* ATOMIC_LOCKS_FOUND_VIA_TABLE() */
/* Validate power-of-two and "bigger than cpus" assumption */
- BUG_ON(ATOMIC_HASH_SIZE & (ATOMIC_HASH_SIZE-1));
+ BUILD_BUG_ON(ATOMIC_HASH_SIZE & (ATOMIC_HASH_SIZE-1));
BUG_ON(ATOMIC_HASH_SIZE < nr_cpu_ids);
/*
@@ -314,17 +314,17 @@ void __init __init_atomic_per_cpu(void)
BUG_ON((unsigned long)atomic_locks % PAGE_SIZE != 0);
/* The locks must all fit on one page. */
- BUG_ON(ATOMIC_HASH_SIZE * sizeof(int) > PAGE_SIZE);
+ BUILD_BUG_ON(ATOMIC_HASH_SIZE * sizeof(int) > PAGE_SIZE);
/*
* We use the page offset of the atomic value's address as
* an index into atomic_locks, excluding the low 3 bits.
* That should not produce more indices than ATOMIC_HASH_SIZE.
*/
- BUG_ON((PAGE_SIZE >> 3) > ATOMIC_HASH_SIZE);
+ BUILD_BUG_ON((PAGE_SIZE >> 3) > ATOMIC_HASH_SIZE);
#endif /* ATOMIC_LOCKS_FOUND_VIA_TABLE() */
/* The futex code makes this assumption, so we validate it here. */
- BUG_ON(sizeof(atomic_t) != sizeof(int));
+ BUILD_BUG_ON(sizeof(atomic_t) != sizeof(int));
}
diff --git a/arch/tile/lib/exports.c b/arch/tile/lib/exports.c
index ce5dbf56578f..1509c5597653 100644
--- a/arch/tile/lib/exports.c
+++ b/arch/tile/lib/exports.c
@@ -45,6 +45,9 @@ EXPORT_SYMBOL(__copy_from_user_zeroing);
EXPORT_SYMBOL(__copy_in_user_inatomic);
#endif
+/* arch/tile/lib/mb_incoherent.S */
+EXPORT_SYMBOL(__mb_incoherent);
+
/* hypervisor glue */
#include <hv/hypervisor.h>
EXPORT_SYMBOL(hv_dev_open);
diff --git a/arch/tile/lib/memchr_32.c b/arch/tile/lib/memchr_32.c
index 6235283b4859..cc3d9badf030 100644
--- a/arch/tile/lib/memchr_32.c
+++ b/arch/tile/lib/memchr_32.c
@@ -18,12 +18,24 @@
void *memchr(const void *s, int c, size_t n)
{
+ const uint32_t *last_word_ptr;
+ const uint32_t *p;
+ const char *last_byte_ptr;
+ uintptr_t s_int;
+ uint32_t goal, before_mask, v, bits;
+ char *ret;
+
+ if (__builtin_expect(n == 0, 0)) {
+ /* Don't dereference any memory if the array is empty. */
+ return NULL;
+ }
+
/* Get an aligned pointer. */
- const uintptr_t s_int = (uintptr_t) s;
- const uint32_t *p = (const uint32_t *)(s_int & -4);
+ s_int = (uintptr_t) s;
+ p = (const uint32_t *)(s_int & -4);
/* Create four copies of the byte for which we are looking. */
- const uint32_t goal = 0x01010101 * (uint8_t) c;
+ goal = 0x01010101 * (uint8_t) c;
/* Read the first word, but munge it so that bytes before the array
* will not match goal.
@@ -31,23 +43,14 @@ void *memchr(const void *s, int c, size_t n)
* Note that this shift count expression works because we know
* shift counts are taken mod 32.
*/
- const uint32_t before_mask = (1 << (s_int << 3)) - 1;
- uint32_t v = (*p | before_mask) ^ (goal & before_mask);
+ before_mask = (1 << (s_int << 3)) - 1;
+ v = (*p | before_mask) ^ (goal & before_mask);
/* Compute the address of the last byte. */
- const char *const last_byte_ptr = (const char *)s + n - 1;
+ last_byte_ptr = (const char *)s + n - 1;
/* Compute the address of the word containing the last byte. */
- const uint32_t *const last_word_ptr =
- (const uint32_t *)((uintptr_t) last_byte_ptr & -4);
-
- uint32_t bits;
- char *ret;
-
- if (__builtin_expect(n == 0, 0)) {
- /* Don't dereference any memory if the array is empty. */
- return NULL;
- }
+ last_word_ptr = (const uint32_t *)((uintptr_t) last_byte_ptr & -4);
while ((bits = __insn_seqb(v, goal)) == 0) {
if (__builtin_expect(p == last_word_ptr, 0)) {
diff --git a/arch/tile/lib/memcpy_32.S b/arch/tile/lib/memcpy_32.S
index 30c3b7ebb55d..2a419a6122db 100644
--- a/arch/tile/lib/memcpy_32.S
+++ b/arch/tile/lib/memcpy_32.S
@@ -10,14 +10,16 @@
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
* NON INFRINGEMENT. See the GNU General Public License for
* more details.
- *
- * This file shares the implementation of the userspace memcpy and
- * the kernel's memcpy, copy_to_user and copy_from_user.
*/
#include <arch/chip.h>
+/*
+ * This file shares the implementation of the userspace memcpy and
+ * the kernel's memcpy, copy_to_user and copy_from_user.
+ */
+
#include <linux/linkage.h>
/* On TILE64, we wrap these functions via arch/tile/lib/memcpy_tile64.c */
@@ -53,9 +55,9 @@
*/
ENTRY(__copy_from_user_inatomic)
.type __copy_from_user_inatomic, @function
- FEEDBACK_ENTER_EXPLICIT(__copy_from_user_inatomic, \
+ FEEDBACK_ENTER_EXPLICIT(__copy_from_user_inatomic, \
.text.memcpy_common, \
- .Lend_memcpy_common - __copy_from_user_inatomic)
+ .Lend_memcpy_common - __copy_from_user_inatomic)
{ movei r29, IS_COPY_FROM_USER; j memcpy_common }
.size __copy_from_user_inatomic, . - __copy_from_user_inatomic
@@ -64,7 +66,7 @@ ENTRY(__copy_from_user_inatomic)
*/
ENTRY(__copy_from_user_zeroing)
.type __copy_from_user_zeroing, @function
- FEEDBACK_REENTER(__copy_from_user_inatomic)
+ FEEDBACK_REENTER(__copy_from_user_inatomic)
{ movei r29, IS_COPY_FROM_USER_ZEROING; j memcpy_common }
.size __copy_from_user_zeroing, . - __copy_from_user_zeroing
@@ -74,13 +76,13 @@ ENTRY(__copy_from_user_zeroing)
*/
ENTRY(__copy_to_user_inatomic)
.type __copy_to_user_inatomic, @function
- FEEDBACK_REENTER(__copy_from_user_inatomic)
+ FEEDBACK_REENTER(__copy_from_user_inatomic)
{ movei r29, IS_COPY_TO_USER; j memcpy_common }
.size __copy_to_user_inatomic, . - __copy_to_user_inatomic
ENTRY(memcpy)
.type memcpy, @function
- FEEDBACK_REENTER(__copy_from_user_inatomic)
+ FEEDBACK_REENTER(__copy_from_user_inatomic)
{ movei r29, IS_MEMCPY }
.size memcpy, . - memcpy
/* Fall through */
@@ -157,35 +159,35 @@ EX: { sw r0, r3; addi r0, r0, 4; addi r2, r2, -4 }
{ addi r3, r1, 60; andi r9, r9, -64 }
#if CHIP_HAS_WH64()
- /* No need to prefetch dst, we'll just do the wh64
- * right before we copy a line.
+ /* No need to prefetch dst, we'll just do the wh64
+ * right before we copy a line.
*/
#endif
EX: { lw r5, r3; addi r3, r3, 64; movei r4, 1 }
- /* Intentionally stall for a few cycles to leave L2 cache alone. */
- { bnzt zero, .; move r27, lr }
+ /* Intentionally stall for a few cycles to leave L2 cache alone. */
+ { bnzt zero, .; move r27, lr }
EX: { lw r6, r3; addi r3, r3, 64 }
- /* Intentionally stall for a few cycles to leave L2 cache alone. */
- { bnzt zero, . }
+ /* Intentionally stall for a few cycles to leave L2 cache alone. */
+ { bnzt zero, . }
EX: { lw r7, r3; addi r3, r3, 64 }
#if !CHIP_HAS_WH64()
- /* Prefetch the dest */
- /* Intentionally stall for a few cycles to leave L2 cache alone. */
- { bnzt zero, . }
- /* Use a real load to cause a TLB miss if necessary. We aren't using
- * r28, so this should be fine.
- */
+ /* Prefetch the dest */
+ /* Intentionally stall for a few cycles to leave L2 cache alone. */
+ { bnzt zero, . }
+ /* Use a real load to cause a TLB miss if necessary. We aren't using
+ * r28, so this should be fine.
+ */
EX: { lw r28, r9; addi r9, r9, 64 }
- /* Intentionally stall for a few cycles to leave L2 cache alone. */
- { bnzt zero, . }
- { prefetch r9; addi r9, r9, 64 }
- /* Intentionally stall for a few cycles to leave L2 cache alone. */
- { bnzt zero, . }
- { prefetch r9; addi r9, r9, 64 }
+ /* Intentionally stall for a few cycles to leave L2 cache alone. */
+ { bnzt zero, . }
+ { prefetch r9; addi r9, r9, 64 }
+ /* Intentionally stall for a few cycles to leave L2 cache alone. */
+ { bnzt zero, . }
+ { prefetch r9; addi r9, r9, 64 }
#endif
- /* Intentionally stall for a few cycles to leave L2 cache alone. */
- { bz zero, .Lbig_loop2 }
+ /* Intentionally stall for a few cycles to leave L2 cache alone. */
+ { bz zero, .Lbig_loop2 }
/* On entry to this loop:
* - r0 points to the start of dst line 0
@@ -197,7 +199,7 @@ EX: { lw r28, r9; addi r9, r9, 64 }
* to some "safe" recently loaded address.
* - r5 contains *(r1 + 60) [i.e. last word of source line 0]
* - r6 contains *(r1 + 64 + 60) [i.e. last word of source line 1]
- * - r9 contains ((r0 + 63) & -64)
+ * - r9 contains ((r0 + 63) & -64)
* [start of next dst cache line.]
*/
@@ -208,137 +210,137 @@ EX: { lw r28, r9; addi r9, r9, 64 }
/* Copy line 0, first stalling until r5 is ready. */
EX: { move r12, r5; lw r16, r1 }
{ bz r4, .Lcopy_8_check; slti_u r8, r2, 8 }
- /* Prefetch several lines ahead. */
+ /* Prefetch several lines ahead. */
EX: { lw r5, r3; addi r3, r3, 64 }
- { jal .Lcopy_line }
+ { jal .Lcopy_line }
/* Copy line 1, first stalling until r6 is ready. */
EX: { move r12, r6; lw r16, r1 }
{ bz r4, .Lcopy_8_check; slti_u r8, r2, 8 }
- /* Prefetch several lines ahead. */
+ /* Prefetch several lines ahead. */
EX: { lw r6, r3; addi r3, r3, 64 }
{ jal .Lcopy_line }
/* Copy line 2, first stalling until r7 is ready. */
EX: { move r12, r7; lw r16, r1 }
{ bz r4, .Lcopy_8_check; slti_u r8, r2, 8 }
- /* Prefetch several lines ahead. */
+ /* Prefetch several lines ahead. */
EX: { lw r7, r3; addi r3, r3, 64 }
- /* Use up a caches-busy cycle by jumping back to the top of the
- * loop. Might as well get it out of the way now.
- */
- { j .Lbig_loop }
+ /* Use up a caches-busy cycle by jumping back to the top of the
+ * loop. Might as well get it out of the way now.
+ */
+ { j .Lbig_loop }
/* On entry:
* - r0 points to the destination line.
* - r1 points to the source line.
- * - r3 is the next prefetch address.
+ * - r3 is the next prefetch address.
* - r9 holds the last address used for wh64.
* - r12 = WORD_15
- * - r16 = WORD_0.
- * - r17 == r1 + 16.
- * - r27 holds saved lr to restore.
+ * - r16 = WORD_0.
+ * - r17 == r1 + 16.
+ * - r27 holds saved lr to restore.
*
* On exit:
* - r0 is incremented by 64.
* - r1 is incremented by 64, unless that would point to a word
- * beyond the end of the source array, in which case it is redirected
- * to point to an arbitrary word already in the cache.
+ * beyond the end of the source array, in which case it is redirected
+ * to point to an arbitrary word already in the cache.
* - r2 is decremented by 64.
- * - r3 is unchanged, unless it points to a word beyond the
- * end of the source array, in which case it is redirected
- * to point to an arbitrary word already in the cache.
- * Redirecting is OK since if we are that close to the end
- * of the array we will not come back to this subroutine
- * and use the contents of the prefetched address.
+ * - r3 is unchanged, unless it points to a word beyond the
+ * end of the source array, in which case it is redirected
+ * to point to an arbitrary word already in the cache.
+ * Redirecting is OK since if we are that close to the end
+ * of the array we will not come back to this subroutine
+ * and use the contents of the prefetched address.
* - r4 is nonzero iff r2 >= 64.
- * - r9 is incremented by 64, unless it points beyond the
- * end of the last full destination cache line, in which
- * case it is redirected to a "safe address" that can be
- * clobbered (sp - 64)
+ * - r9 is incremented by 64, unless it points beyond the
+ * end of the last full destination cache line, in which
+ * case it is redirected to a "safe address" that can be
+ * clobbered (sp - 64)
* - lr contains the value in r27.
*/
/* r26 unused */
.Lcopy_line:
- /* TODO: when r3 goes past the end, we would like to redirect it
- * to prefetch the last partial cache line (if any) just once, for the
- * benefit of the final cleanup loop. But we don't want to
- * prefetch that line more than once, or subsequent prefetches
- * will go into the RTF. But then .Lbig_loop should unconditionally
- * branch to top of loop to execute final prefetch, and its
- * nop should become a conditional branch.
- */
-
- /* We need two non-memory cycles here to cover the resources
- * used by the loads initiated by the caller.
- */
- { add r15, r1, r2 }
+ /* TODO: when r3 goes past the end, we would like to redirect it
+ * to prefetch the last partial cache line (if any) just once, for the
+ * benefit of the final cleanup loop. But we don't want to
+ * prefetch that line more than once, or subsequent prefetches
+ * will go into the RTF. But then .Lbig_loop should unconditionally
+ * branch to top of loop to execute final prefetch, and its
+ * nop should become a conditional branch.
+ */
+
+ /* We need two non-memory cycles here to cover the resources
+ * used by the loads initiated by the caller.
+ */
+ { add r15, r1, r2 }
.Lcopy_line2:
- { slt_u r13, r3, r15; addi r17, r1, 16 }
+ { slt_u r13, r3, r15; addi r17, r1, 16 }
- /* NOTE: this will stall for one cycle as L1 is busy. */
+ /* NOTE: this will stall for one cycle as L1 is busy. */
- /* Fill second L1D line. */
+ /* Fill second L1D line. */
EX: { lw r17, r17; addi r1, r1, 48; mvz r3, r13, r1 } /* r17 = WORD_4 */
#if CHIP_HAS_WH64()
- /* Prepare destination line for writing. */
+ /* Prepare destination line for writing. */
EX: { wh64 r9; addi r9, r9, 64 }
#else
- /* Prefetch dest line */
+ /* Prefetch dest line */
{ prefetch r9; addi r9, r9, 64 }
#endif
- /* Load seven words that are L1D hits to cover wh64 L2 usage. */
+ /* Load seven words that are L1D hits to cover wh64 L2 usage. */
- /* Load the three remaining words from the last L1D line, which
- * we know has already filled the L1D.
- */
+ /* Load the three remaining words from the last L1D line, which
+ * we know has already filled the L1D.
+ */
EX: { lw r4, r1; addi r1, r1, 4; addi r20, r1, 16 } /* r4 = WORD_12 */
EX: { lw r8, r1; addi r1, r1, 4; slt_u r13, r20, r15 }/* r8 = WORD_13 */
EX: { lw r11, r1; addi r1, r1, -52; mvz r20, r13, r1 } /* r11 = WORD_14 */
- /* Load the three remaining words from the first L1D line, first
- * stalling until it has filled by "looking at" r16.
- */
+ /* Load the three remaining words from the first L1D line, first
+ * stalling until it has filled by "looking at" r16.
+ */
EX: { lw r13, r1; addi r1, r1, 4; move zero, r16 } /* r13 = WORD_1 */
EX: { lw r14, r1; addi r1, r1, 4 } /* r14 = WORD_2 */
EX: { lw r15, r1; addi r1, r1, 8; addi r10, r0, 60 } /* r15 = WORD_3 */
- /* Load second word from the second L1D line, first
- * stalling until it has filled by "looking at" r17.
- */
+ /* Load second word from the second L1D line, first
+ * stalling until it has filled by "looking at" r17.
+ */
EX: { lw r19, r1; addi r1, r1, 4; move zero, r17 } /* r19 = WORD_5 */
- /* Store last word to the destination line, potentially dirtying it
- * for the first time, which keeps the L2 busy for two cycles.
- */
+ /* Store last word to the destination line, potentially dirtying it
+ * for the first time, which keeps the L2 busy for two cycles.
+ */
EX: { sw r10, r12 } /* store(WORD_15) */
- /* Use two L1D hits to cover the sw L2 access above. */
+ /* Use two L1D hits to cover the sw L2 access above. */
EX: { lw r10, r1; addi r1, r1, 4 } /* r10 = WORD_6 */
EX: { lw r12, r1; addi r1, r1, 4 } /* r12 = WORD_7 */
- /* Fill third L1D line. */
+ /* Fill third L1D line. */
EX: { lw r18, r1; addi r1, r1, 4 } /* r18 = WORD_8 */
- /* Store first L1D line. */
+ /* Store first L1D line. */
EX: { sw r0, r16; addi r0, r0, 4; add r16, r0, r2 } /* store(WORD_0) */
EX: { sw r0, r13; addi r0, r0, 4; andi r16, r16, -64 } /* store(WORD_1) */
EX: { sw r0, r14; addi r0, r0, 4; slt_u r16, r9, r16 } /* store(WORD_2) */
#if CHIP_HAS_WH64()
EX: { sw r0, r15; addi r0, r0, 4; addi r13, sp, -64 } /* store(WORD_3) */
#else
- /* Back up the r9 to a cache line we are already storing to
+ /* Back up the r9 to a cache line we are already storing to
* if it gets past the end of the dest vector. Strictly speaking,
* we don't need to back up to the start of a cache line, but it's free
* and tidy, so why not?
- */
+ */
EX: { sw r0, r15; addi r0, r0, 4; andi r13, r0, -64 } /* store(WORD_3) */
#endif
- /* Store second L1D line. */
+ /* Store second L1D line. */
EX: { sw r0, r17; addi r0, r0, 4; mvz r9, r16, r13 }/* store(WORD_4) */
EX: { sw r0, r19; addi r0, r0, 4 } /* store(WORD_5) */
EX: { sw r0, r10; addi r0, r0, 4 } /* store(WORD_6) */
@@ -348,30 +350,30 @@ EX: { lw r13, r1; addi r1, r1, 4; move zero, r18 } /* r13 = WORD_9 */
EX: { lw r14, r1; addi r1, r1, 4 } /* r14 = WORD_10 */
EX: { lw r15, r1; move r1, r20 } /* r15 = WORD_11 */
- /* Store third L1D line. */
+ /* Store third L1D line. */
EX: { sw r0, r18; addi r0, r0, 4 } /* store(WORD_8) */
EX: { sw r0, r13; addi r0, r0, 4 } /* store(WORD_9) */
EX: { sw r0, r14; addi r0, r0, 4 } /* store(WORD_10) */
EX: { sw r0, r15; addi r0, r0, 4 } /* store(WORD_11) */
- /* Store rest of fourth L1D line. */
+ /* Store rest of fourth L1D line. */
EX: { sw r0, r4; addi r0, r0, 4 } /* store(WORD_12) */
- {
+ {
EX: sw r0, r8 /* store(WORD_13) */
- addi r0, r0, 4
+ addi r0, r0, 4
/* Will r2 be > 64 after we subtract 64 below? */
- shri r4, r2, 7
- }
- {
+ shri r4, r2, 7
+ }
+ {
EX: sw r0, r11 /* store(WORD_14) */
- addi r0, r0, 8
- /* Record 64 bytes successfully copied. */
- addi r2, r2, -64
- }
+ addi r0, r0, 8
+ /* Record 64 bytes successfully copied. */
+ addi r2, r2, -64
+ }
{ jrp lr; move lr, r27 }
- /* Convey to the backtrace library that the stack frame is size
+ /* Convey to the backtrace library that the stack frame is size
* zero, and the real return address is on the stack rather than
* in 'lr'.
*/
diff --git a/arch/tile/lib/memcpy_tile64.c b/arch/tile/lib/memcpy_tile64.c
index dfedea7b266b..f7d4a6ad61e8 100644
--- a/arch/tile/lib/memcpy_tile64.c
+++ b/arch/tile/lib/memcpy_tile64.c
@@ -54,7 +54,7 @@ typedef unsigned long (*memcpy_t)(void *, const void *, unsigned long);
* we must run with interrupts disabled to avoid the risk of some
* other code seeing the incoherent data in our cache. (Recall that
* our cache is indexed by PA, so even if the other code doesn't use
- * our KM_MEMCPY virtual addresses, they'll still hit in cache using
+ * our kmap_atomic virtual addresses, they'll still hit in cache using
* the normal VAs that aren't supposed to hit in cache.)
*/
static void memcpy_multicache(void *dest, const void *source,
@@ -64,6 +64,7 @@ static void memcpy_multicache(void *dest, const void *source,
unsigned long flags, newsrc, newdst;
pmd_t *pmdp;
pte_t *ptep;
+ int type0, type1;
int cpu = get_cpu();
/*
@@ -77,7 +78,8 @@ static void memcpy_multicache(void *dest, const void *source,
sim_allow_multiple_caching(1);
/* Set up the new dest mapping */
- idx = FIX_KMAP_BEGIN + (KM_TYPE_NR * cpu) + KM_MEMCPY0;
+ type0 = kmap_atomic_idx_push();
+ idx = FIX_KMAP_BEGIN + (KM_TYPE_NR * cpu) + type0;
newdst = __fix_to_virt(idx) + ((unsigned long)dest & (PAGE_SIZE-1));
pmdp = pmd_offset(pud_offset(pgd_offset_k(newdst), newdst), newdst);
ptep = pte_offset_kernel(pmdp, newdst);
@@ -87,7 +89,8 @@ static void memcpy_multicache(void *dest, const void *source,
}
/* Set up the new source mapping */
- idx += (KM_MEMCPY0 - KM_MEMCPY1);
+ type1 = kmap_atomic_idx_push();
+ idx += (type0 - type1);
src_pte = hv_pte_set_nc(src_pte);
src_pte = hv_pte_clear_writable(src_pte); /* be paranoid */
newsrc = __fix_to_virt(idx) + ((unsigned long)source & (PAGE_SIZE-1));
@@ -119,6 +122,8 @@ static void memcpy_multicache(void *dest, const void *source,
* We're done: notify the simulator that all is back to normal,
* and re-enable interrupts and pre-emption.
*/
+ kmap_atomic_idx_pop();
+ kmap_atomic_idx_pop();
sim_allow_multiple_caching(0);
local_irq_restore(flags);
put_cpu();
diff --git a/arch/tile/lib/memmove_32.c b/arch/tile/lib/memmove.c
index fd615ae6ade7..fd615ae6ade7 100644
--- a/arch/tile/lib/memmove_32.c
+++ b/arch/tile/lib/memmove.c
diff --git a/arch/tile/lib/memset_32.c b/arch/tile/lib/memset_32.c
index d014c1fbcbc2..57dbb3a5bff8 100644
--- a/arch/tile/lib/memset_32.c
+++ b/arch/tile/lib/memset_32.c
@@ -18,6 +18,7 @@
#include <linux/string.h>
#include <linux/module.h>
+#undef memset
void *memset(void *s, int c, size_t n)
{
diff --git a/arch/tile/lib/spinlock_32.c b/arch/tile/lib/spinlock_32.c
index 485e24d62c6b..5cd1c4004eca 100644
--- a/arch/tile/lib/spinlock_32.c
+++ b/arch/tile/lib/spinlock_32.c
@@ -167,23 +167,30 @@ void arch_write_lock_slow(arch_rwlock_t *rwlock, u32 val)
* when we compare them.
*/
u32 my_ticket_;
+ u32 iterations = 0;
- /* Take out the next ticket; this will also stop would-be readers. */
- if (val & 1)
- val = get_rwlock(rwlock);
- rwlock->lock = __insn_addb(val, 1 << WR_NEXT_SHIFT);
+ /*
+ * Wait until there are no readers, then bump up the next
+ * field and capture the ticket value.
+ */
+ for (;;) {
+ if (!(val & 1)) {
+ if ((val >> RD_COUNT_SHIFT) == 0)
+ break;
+ rwlock->lock = val;
+ }
+ delay_backoff(iterations++);
+ val = __insn_tns((int *)&rwlock->lock);
+ }
- /* Extract my ticket value from the original word. */
+ /* Take out the next ticket and extract my ticket value. */
+ rwlock->lock = __insn_addb(val, 1 << WR_NEXT_SHIFT);
my_ticket_ = val >> WR_NEXT_SHIFT;
- /*
- * Wait until the "current" field matches our ticket, and
- * there are no remaining readers.
- */
+ /* Wait until the "current" field matches our ticket. */
for (;;) {
u32 curr_ = val >> WR_CURR_SHIFT;
- u32 readers = val >> RD_COUNT_SHIFT;
- u32 delta = ((my_ticket_ - curr_) & WR_MASK) + !!readers;
+ u32 delta = ((my_ticket_ - curr_) & WR_MASK);
if (likely(delta == 0))
break;
diff --git a/arch/tile/lib/strlen_32.c b/arch/tile/lib/strlen_32.c
index f26f88e11e4a..4974292a5534 100644
--- a/arch/tile/lib/strlen_32.c
+++ b/arch/tile/lib/strlen_32.c
@@ -16,6 +16,8 @@
#include <linux/string.h>
#include <linux/module.h>
+#undef strlen
+
size_t strlen(const char *s)
{
/* Get an aligned pointer. */
diff --git a/arch/tile/mm/fault.c b/arch/tile/mm/fault.c
index 704f3e8a4385..dcebfc831cd6 100644
--- a/arch/tile/mm/fault.c
+++ b/arch/tile/mm/fault.c
@@ -24,7 +24,6 @@
#include <linux/mman.h>
#include <linux/mm.h>
#include <linux/smp.h>
-#include <linux/smp_lock.h>
#include <linux/interrupt.h>
#include <linux/init.h>
#include <linux/tty.h>
@@ -66,10 +65,10 @@ static noinline void force_sig_info_fault(int si_signo, int si_code,
#ifndef __tilegx__
/*
* Synthesize the fault a PL0 process would get by doing a word-load of
- * an unaligned address or a high kernel address. Called indirectly
- * from sys_cmpxchg() in kernel/intvec.S.
+ * an unaligned address or a high kernel address.
*/
-int _sys_cmpxchg_badaddr(unsigned long address, struct pt_regs *regs)
+SYSCALL_DEFINE2(cmpxchg_badaddr, unsigned long, address,
+ struct pt_regs *, regs)
{
if (address >= PAGE_OFFSET)
force_sig_info_fault(SIGSEGV, SEGV_MAPERR, address,
@@ -563,10 +562,10 @@ do_sigbus:
/*
* When we take an ITLB or DTLB fault or access violation in the
* supervisor while the critical section bit is set, the hypervisor is
- * reluctant to write new values into the EX_CONTEXT_1_x registers,
+ * reluctant to write new values into the EX_CONTEXT_K_x registers,
* since that might indicate we have not yet squirreled the SPR
* contents away and can thus safely take a recursive interrupt.
- * Accordingly, the hypervisor passes us the PC via SYSTEM_SAVE_1_2.
+ * Accordingly, the hypervisor passes us the PC via SYSTEM_SAVE_K_2.
*
* Note that this routine is called before homecache_tlb_defer_enter(),
* which means that we can properly unlock any atomics that might
@@ -610,7 +609,7 @@ struct intvec_state do_page_fault_ics(struct pt_regs *regs, int fault_num,
* fault. We didn't set up a kernel stack on initial entry to
* sys_cmpxchg, but instead had one set up by the fault, which
* (because sys_cmpxchg never releases ICS) came to us via the
- * SYSTEM_SAVE_1_2 mechanism, and thus EX_CONTEXT_1_[01] are
+ * SYSTEM_SAVE_K_2 mechanism, and thus EX_CONTEXT_K_[01] are
* still referencing the original user code. We release the
* atomic lock and rewrite pt_regs so that it appears that we
* came from user-space directly, and after we finish the
diff --git a/arch/tile/mm/highmem.c b/arch/tile/mm/highmem.c
index 12ab137e7d4f..31dbbd9afe47 100644
--- a/arch/tile/mm/highmem.c
+++ b/arch/tile/mm/highmem.c
@@ -56,50 +56,6 @@ void kunmap(struct page *page)
}
EXPORT_SYMBOL(kunmap);
-static void debug_kmap_atomic_prot(enum km_type type)
-{
-#ifdef CONFIG_DEBUG_HIGHMEM
- static unsigned warn_count = 10;
-
- if (unlikely(warn_count == 0))
- return;
-
- if (unlikely(in_interrupt())) {
- if (in_irq()) {
- if (type != KM_IRQ0 && type != KM_IRQ1 &&
- type != KM_BIO_SRC_IRQ &&
- /* type != KM_BIO_DST_IRQ && */
- type != KM_BOUNCE_READ) {
- WARN_ON(1);
- warn_count--;
- }
- } else if (!irqs_disabled()) { /* softirq */
- if (type != KM_IRQ0 && type != KM_IRQ1 &&
- type != KM_SOFTIRQ0 && type != KM_SOFTIRQ1 &&
- type != KM_SKB_SUNRPC_DATA &&
- type != KM_SKB_DATA_SOFTIRQ &&
- type != KM_BOUNCE_READ) {
- WARN_ON(1);
- warn_count--;
- }
- }
- }
-
- if (type == KM_IRQ0 || type == KM_IRQ1 || type == KM_BOUNCE_READ ||
- type == KM_BIO_SRC_IRQ /* || type == KM_BIO_DST_IRQ */) {
- if (!irqs_disabled()) {
- WARN_ON(1);
- warn_count--;
- }
- } else if (type == KM_SOFTIRQ0 || type == KM_SOFTIRQ1) {
- if (irq_count() == 0 && !irqs_disabled()) {
- WARN_ON(1);
- warn_count--;
- }
- }
-#endif
-}
-
/*
* Describe a single atomic mapping of a page on a given cpu at a
* given address, and allow it to be linked into a list.
@@ -240,10 +196,10 @@ void kmap_atomic_fix_kpte(struct page *page, int finished)
* When holding an atomic kmap is is not legal to sleep, so atomic
* kmaps are appropriate for short, tight code paths only.
*/
-void *kmap_atomic_prot(struct page *page, enum km_type type, pgprot_t prot)
+void *kmap_atomic_prot(struct page *page, pgprot_t prot)
{
- enum fixed_addresses idx;
unsigned long vaddr;
+ int idx, type;
pte_t *pte;
/* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */
@@ -255,8 +211,7 @@ void *kmap_atomic_prot(struct page *page, enum km_type type, pgprot_t prot)
if (!PageHighMem(page))
return page_address(page);
- debug_kmap_atomic_prot(type);
-
+ type = kmap_atomic_idx_push();
idx = type + KM_TYPE_NR*smp_processor_id();
vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
pte = kmap_get_pte(vaddr);
@@ -269,28 +224,35 @@ void *kmap_atomic_prot(struct page *page, enum km_type type, pgprot_t prot)
}
EXPORT_SYMBOL(kmap_atomic_prot);
-void *kmap_atomic(struct page *page, enum km_type type)
+void *__kmap_atomic(struct page *page)
{
/* PAGE_NONE is a magic value that tells us to check immutability. */
- return kmap_atomic_prot(page, type, PAGE_NONE);
+ return kmap_atomic_prot(page, PAGE_NONE);
}
-EXPORT_SYMBOL(kmap_atomic);
+EXPORT_SYMBOL(__kmap_atomic);
-void kunmap_atomic_notypecheck(void *kvaddr, enum km_type type)
+void __kunmap_atomic(void *kvaddr)
{
unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK;
- enum fixed_addresses idx = type + KM_TYPE_NR*smp_processor_id();
- /*
- * Force other mappings to Oops if they try to access this pte without
- * first remapping it. Keeping stale mappings around is a bad idea.
- */
- if (vaddr == __fix_to_virt(FIX_KMAP_BEGIN+idx)) {
+ if (vaddr >= __fix_to_virt(FIX_KMAP_END) &&
+ vaddr <= __fix_to_virt(FIX_KMAP_BEGIN)) {
pte_t *pte = kmap_get_pte(vaddr);
pte_t pteval = *pte;
+ int idx, type;
+
+ type = kmap_atomic_idx();
+ idx = type + KM_TYPE_NR*smp_processor_id();
+
+ /*
+ * Force other mappings to Oops if they try to access this pte
+ * without first remapping it. Keeping stale mappings around
+ * is a bad idea.
+ */
BUG_ON(!pte_present(pteval) && !pte_migrating(pteval));
kmap_atomic_unregister(pte_page(pteval), vaddr);
kpte_clear_flush(pte, vaddr);
+ kmap_atomic_idx_pop();
} else {
/* Must be a lowmem page */
BUG_ON(vaddr < PAGE_OFFSET);
@@ -300,19 +262,19 @@ void kunmap_atomic_notypecheck(void *kvaddr, enum km_type type)
arch_flush_lazy_mmu_mode();
pagefault_enable();
}
-EXPORT_SYMBOL(kunmap_atomic_notypecheck);
+EXPORT_SYMBOL(__kunmap_atomic);
/*
* This API is supposed to allow us to map memory without a "struct page".
* Currently we don't support this, though this may change in the future.
*/
-void *kmap_atomic_pfn(unsigned long pfn, enum km_type type)
+void *kmap_atomic_pfn(unsigned long pfn)
{
- return kmap_atomic(pfn_to_page(pfn), type);
+ return kmap_atomic(pfn_to_page(pfn));
}
-void *kmap_atomic_prot_pfn(unsigned long pfn, enum km_type type, pgprot_t prot)
+void *kmap_atomic_prot_pfn(unsigned long pfn, pgprot_t prot)
{
- return kmap_atomic_prot(pfn_to_page(pfn), type, prot);
+ return kmap_atomic_prot(pfn_to_page(pfn), prot);
}
struct page *kmap_atomic_to_page(void *ptr)
diff --git a/arch/tile/mm/homecache.c b/arch/tile/mm/homecache.c
index fb3b4a55cec4..d78df3a6ee15 100644
--- a/arch/tile/mm/homecache.c
+++ b/arch/tile/mm/homecache.c
@@ -37,6 +37,8 @@
#include <asm/pgalloc.h>
#include <asm/homecache.h>
+#include <arch/sim.h>
+
#include "migrate.h"
@@ -217,13 +219,6 @@ static unsigned long cache_flush_length(unsigned long length)
return (length >= CHIP_L2_CACHE_SIZE()) ? HV_FLUSH_EVICT_L2 : length;
}
-/* On the simulator, confirm lines have been evicted everywhere. */
-static void validate_lines_evicted(unsigned long pfn, size_t length)
-{
- sim_syscall(SIM_SYSCALL_VALIDATE_LINES_EVICTED,
- (HV_PhysAddr)pfn << PAGE_SHIFT, length);
-}
-
/* Flush a page out of whatever cache(s) it is in. */
void homecache_flush_cache(struct page *page, int order)
{
@@ -234,7 +229,7 @@ void homecache_flush_cache(struct page *page, int order)
homecache_mask(page, pages, &home_mask);
flush_remote(pfn, length, &home_mask, 0, 0, 0, NULL, NULL, 0);
- validate_lines_evicted(pfn, pages * PAGE_SIZE);
+ sim_validate_lines_evicted(PFN_PHYS(pfn), pages * PAGE_SIZE);
}
diff --git a/arch/tile/mm/hugetlbpage.c b/arch/tile/mm/hugetlbpage.c
index 24688b697a8d..201a582c4137 100644
--- a/arch/tile/mm/hugetlbpage.c
+++ b/arch/tile/mm/hugetlbpage.c
@@ -21,7 +21,6 @@
#include <linux/mm.h>
#include <linux/hugetlb.h>
#include <linux/pagemap.h>
-#include <linux/smp_lock.h>
#include <linux/slab.h>
#include <linux/err.h>
#include <linux/sysctl.h>
diff --git a/arch/tile/mm/init.c b/arch/tile/mm/init.c
index d89c9eacd162..0b9ce69b0ee5 100644
--- a/arch/tile/mm/init.c
+++ b/arch/tile/mm/init.c
@@ -988,8 +988,12 @@ static long __write_once initfree = 1;
/* Select whether to free (1) or mark unusable (0) the __init pages. */
static int __init set_initfree(char *str)
{
- strict_strtol(str, 0, &initfree);
- pr_info("initfree: %s free init pages\n", initfree ? "will" : "won't");
+ long val;
+ if (strict_strtol(str, 0, &val)) {
+ initfree = val;
+ pr_info("initfree: %s free init pages\n",
+ initfree ? "will" : "won't");
+ }
return 1;
}
__setup("initfree=", set_initfree);
@@ -1060,7 +1064,7 @@ void free_initmem(void)
/*
* Free the pages mapped from 0xc0000000 that correspond to code
- * pages from 0xfd000000 that we won't use again after init.
+ * pages from MEM_SV_INTRPT that we won't use again after init.
*/
free_init_pages("unused kernel text",
(unsigned long)_sinittext - text_delta,
diff --git a/arch/tile/mm/pgtable.c b/arch/tile/mm/pgtable.c
index 335c24621c41..1f5430c53d0d 100644
--- a/arch/tile/mm/pgtable.c
+++ b/arch/tile/mm/pgtable.c
@@ -134,9 +134,9 @@ void __set_fixmap(enum fixed_addresses idx, unsigned long phys, pgprot_t flags)
}
#if defined(CONFIG_HIGHPTE)
-pte_t *_pte_offset_map(pmd_t *dir, unsigned long address, enum km_type type)
+pte_t *_pte_offset_map(pmd_t *dir, unsigned long address)
{
- pte_t *pte = kmap_atomic(pmd_page(*dir), type) +
+ pte_t *pte = kmap_atomic(pmd_page(*dir)) +
(pmd_ptfn(*dir) << HV_LOG2_PAGE_TABLE_ALIGN) & ~PAGE_MASK;
return &pte[pte_index(address)];
}
diff --git a/arch/um/Kconfig.common b/arch/um/Kconfig.common
index 7c8e277f6d34..049d048b070d 100644
--- a/arch/um/Kconfig.common
+++ b/arch/um/Kconfig.common
@@ -19,8 +19,6 @@ config MMU
config NO_IOMEM
def_bool y
-mainmenu "Linux/Usermode Kernel Configuration"
-
config ISA
bool
diff --git a/arch/um/Kconfig.um b/arch/um/Kconfig.um
index ec2b8da1aba4..50d6aa20c353 100644
--- a/arch/um/Kconfig.um
+++ b/arch/um/Kconfig.um
@@ -120,6 +120,9 @@ config SMP
If you don't know what to do, say N.
+config GENERIC_HARDIRQS_NO__DO_IRQ
+ def_bool y
+
config NR_CPUS
int "Maximum number of CPUs (2-32)"
range 2 32
@@ -147,3 +150,6 @@ config KERNEL_STACK_ORDER
This option determines the size of UML kernel stacks. They will
be 1 << order pages. The default is OK unless you're running Valgrind
on UML, in which case, set this to 3.
+
+config NO_DMA
+ def_bool y
diff --git a/arch/um/defconfig b/arch/um/defconfig
index 6bd456f96f90..564f3de65b4a 100644
--- a/arch/um/defconfig
+++ b/arch/um/defconfig
@@ -566,7 +566,6 @@ CONFIG_CRC32=m
# CONFIG_CRC7 is not set
# CONFIG_LIBCRC32C is not set
CONFIG_PLIST=y
-CONFIG_HAS_DMA=y
#
# SCSI device support
diff --git a/arch/um/drivers/line.c b/arch/um/drivers/line.c
index 7f7338c90784..1664cce7b0ac 100644
--- a/arch/um/drivers/line.c
+++ b/arch/um/drivers/line.c
@@ -727,6 +727,9 @@ struct winch {
static void free_winch(struct winch *winch, int free_irq_ok)
{
+ if (free_irq_ok)
+ free_irq(WINCH_IRQ, winch);
+
list_del(&winch->list);
if (winch->pid != -1)
@@ -735,8 +738,6 @@ static void free_winch(struct winch *winch, int free_irq_ok)
os_close_file(winch->fd);
if (winch->stack != 0)
free_stack(winch->stack, 0);
- if (free_irq_ok)
- free_irq(WINCH_IRQ, winch);
kfree(winch);
}
diff --git a/arch/um/include/asm/dma-mapping.h b/arch/um/include/asm/dma-mapping.h
deleted file mode 100644
index 1f469e80fdd3..000000000000
--- a/arch/um/include/asm/dma-mapping.h
+++ /dev/null
@@ -1,112 +0,0 @@
-#ifndef _ASM_DMA_MAPPING_H
-#define _ASM_DMA_MAPPING_H
-
-#include <asm/scatterlist.h>
-
-static inline int
-dma_supported(struct device *dev, u64 mask)
-{
- BUG();
- return(0);
-}
-
-static inline int
-dma_set_mask(struct device *dev, u64 dma_mask)
-{
- BUG();
- return(0);
-}
-
-static inline void *
-dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
- gfp_t flag)
-{
- BUG();
- return((void *) 0);
-}
-
-static inline void
-dma_free_coherent(struct device *dev, size_t size, void *cpu_addr,
- dma_addr_t dma_handle)
-{
- BUG();
-}
-
-static inline dma_addr_t
-dma_map_single(struct device *dev, void *cpu_addr, size_t size,
- enum dma_data_direction direction)
-{
- BUG();
- return(0);
-}
-
-static inline void
-dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
- enum dma_data_direction direction)
-{
- BUG();
-}
-
-static inline dma_addr_t
-dma_map_page(struct device *dev, struct page *page,
- unsigned long offset, size_t size,
- enum dma_data_direction direction)
-{
- BUG();
- return(0);
-}
-
-static inline void
-dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size,
- enum dma_data_direction direction)
-{
- BUG();
-}
-
-static inline int
-dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
- enum dma_data_direction direction)
-{
- BUG();
- return(0);
-}
-
-static inline void
-dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nhwentries,
- enum dma_data_direction direction)
-{
- BUG();
-}
-
-static inline void
-dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, size_t size,
- enum dma_data_direction direction)
-{
- BUG();
-}
-
-static inline void
-dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nelems,
- enum dma_data_direction direction)
-{
- BUG();
-}
-
-#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
-#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
-
-static inline void
-dma_cache_sync(struct device *dev, void *vaddr, size_t size,
- enum dma_data_direction direction)
-{
- BUG();
-}
-
-static inline int
-dma_mapping_error(struct device *dev, dma_addr_t dma_handle)
-{
- BUG();
- return 0;
-}
-
-#endif
diff --git a/arch/um/include/asm/pgtable.h b/arch/um/include/asm/pgtable.h
index a9f7251b4a8d..41474fb5eee7 100644
--- a/arch/um/include/asm/pgtable.h
+++ b/arch/um/include/asm/pgtable.h
@@ -338,9 +338,7 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
((pte_t *) pmd_page_vaddr(*(dir)) + pte_index(address))
#define pte_offset_map(dir, address) \
((pte_t *)page_address(pmd_page(*(dir))) + pte_index(address))
-#define pte_offset_map_nested(dir, address) pte_offset_map(dir, address)
#define pte_unmap(pte) do { } while (0)
-#define pte_unmap_nested(pte) do { } while (0)
struct mm_struct;
extern pte_t *virt_to_pte(struct mm_struct *mm, unsigned long addr);
diff --git a/arch/um/include/asm/ptrace-generic.h b/arch/um/include/asm/ptrace-generic.h
index 2cd899f75a3c..b7c5bab9bd77 100644
--- a/arch/um/include/asm/ptrace-generic.h
+++ b/arch/um/include/asm/ptrace-generic.h
@@ -38,8 +38,8 @@ struct pt_regs {
struct task_struct;
-extern long subarch_ptrace(struct task_struct *child, long request, long addr,
- long data);
+extern long subarch_ptrace(struct task_struct *child, long request,
+ unsigned long addr, unsigned long data);
extern unsigned long getreg(struct task_struct *child, int regno);
extern int putreg(struct task_struct *child, int regno, unsigned long value);
extern int get_fpregs(struct user_i387_struct __user *buf,
diff --git a/arch/um/include/asm/system.h b/arch/um/include/asm/system.h
index 93af1cf0907d..68a90ecd1450 100644
--- a/arch/um/include/asm/system.h
+++ b/arch/um/include/asm/system.h
@@ -8,23 +8,38 @@ extern int set_signals(int enable);
extern void block_signals(void);
extern void unblock_signals(void);
-#define local_save_flags(flags) do { typecheck(unsigned long, flags); \
- (flags) = get_signals(); } while(0)
-#define local_irq_restore(flags) do { typecheck(unsigned long, flags); \
- set_signals(flags); } while(0)
-
-#define local_irq_save(flags) do { local_save_flags(flags); \
- local_irq_disable(); } while(0)
-
-#define local_irq_enable() unblock_signals()
-#define local_irq_disable() block_signals()
-
-#define irqs_disabled() \
-({ \
- unsigned long flags; \
- local_save_flags(flags); \
- (flags == 0); \
-})
+static inline unsigned long arch_local_save_flags(void)
+{
+ return get_signals();
+}
+
+static inline void arch_local_irq_restore(unsigned long flags)
+{
+ set_signals(flags);
+}
+
+static inline void arch_local_irq_enable(void)
+{
+ unblock_signals();
+}
+
+static inline void arch_local_irq_disable(void)
+{
+ block_signals();
+}
+
+static inline unsigned long arch_local_irq_save(void)
+{
+ unsigned long flags;
+ flags = arch_local_save_flags();
+ arch_local_irq_disable();
+ return flags;
+}
+
+static inline bool arch_irqs_disabled(void)
+{
+ return arch_local_save_flags() == 0;
+}
extern void *_switch_to(void *prev, void *next, void *last);
#define switch_to(prev, next, last) prev = _switch_to(prev, next, last)
diff --git a/arch/um/kernel/dyn.lds.S b/arch/um/kernel/dyn.lds.S
index 69268014dd8e..a3cab6d3ae02 100644
--- a/arch/um/kernel/dyn.lds.S
+++ b/arch/um/kernel/dyn.lds.S
@@ -50,8 +50,18 @@ SECTIONS
.rela.got : { *(.rela.got) }
.rel.bss : { *(.rel.bss .rel.bss.* .rel.gnu.linkonce.b.*) }
.rela.bss : { *(.rela.bss .rela.bss.* .rela.gnu.linkonce.b.*) }
- .rel.plt : { *(.rel.plt) }
- .rela.plt : { *(.rela.plt) }
+ .rel.plt : {
+ *(.rel.plt)
+ PROVIDE_HIDDEN(__rel_iplt_start = .);
+ *(.rel.iplt)
+ PROVIDE_HIDDEN(__rel_iplt_end = .);
+ }
+ .rela.plt : {
+ *(.rela.plt)
+ PROVIDE_HIDDEN(__rela_iplt_start = .);
+ *(.rela.iplt)
+ PROVIDE_HIDDEN(__rela_iplt_end = .);
+ }
.init : {
KEEP (*(.init))
} =0x90909090
diff --git a/arch/um/kernel/exec.c b/arch/um/kernel/exec.c
index 340268be00b5..09bd7b585726 100644
--- a/arch/um/kernel/exec.c
+++ b/arch/um/kernel/exec.c
@@ -5,7 +5,6 @@
#include "linux/stddef.h"
#include "linux/fs.h"
-#include "linux/smp_lock.h"
#include "linux/ptrace.h"
#include "linux/sched.h"
#include "linux/slab.h"
diff --git a/arch/um/kernel/irq.c b/arch/um/kernel/irq.c
index a746e3037a5b..3f0ac9e0c966 100644
--- a/arch/um/kernel/irq.c
+++ b/arch/um/kernel/irq.c
@@ -334,7 +334,7 @@ unsigned int do_IRQ(int irq, struct uml_pt_regs *regs)
{
struct pt_regs *old_regs = set_irq_regs((struct pt_regs *)regs);
irq_enter();
- __do_IRQ(irq);
+ generic_handle_irq(irq);
irq_exit();
set_irq_regs(old_regs);
return 1;
@@ -391,17 +391,10 @@ void __init init_IRQ(void)
{
int i;
- irq_desc[TIMER_IRQ].status = IRQ_DISABLED;
- irq_desc[TIMER_IRQ].action = NULL;
- irq_desc[TIMER_IRQ].depth = 1;
- irq_desc[TIMER_IRQ].chip = &SIGVTALRM_irq_type;
- enable_irq(TIMER_IRQ);
+ set_irq_chip_and_handler(TIMER_IRQ, &SIGVTALRM_irq_type, handle_edge_irq);
+
for (i = 1; i < NR_IRQS; i++) {
- irq_desc[i].status = IRQ_DISABLED;
- irq_desc[i].action = NULL;
- irq_desc[i].depth = 1;
- irq_desc[i].chip = &normal_irq_type;
- enable_irq(i);
+ set_irq_chip_and_handler(i, &normal_irq_type, handle_edge_irq);
}
}
diff --git a/arch/um/kernel/ptrace.c b/arch/um/kernel/ptrace.c
index e0510496596c..701b672c1122 100644
--- a/arch/um/kernel/ptrace.c
+++ b/arch/um/kernel/ptrace.c
@@ -42,10 +42,12 @@ void ptrace_disable(struct task_struct *child)
extern int peek_user(struct task_struct * child, long addr, long data);
extern int poke_user(struct task_struct * child, long addr, long data);
-long arch_ptrace(struct task_struct *child, long request, long addr, long data)
+long arch_ptrace(struct task_struct *child, long request,
+ unsigned long addr, unsigned long data)
{
int i, ret;
- unsigned long __user *p = (void __user *)(unsigned long)data;
+ unsigned long __user *p = (void __user *)data;
+ void __user *vp = p;
switch (request) {
/* read word at location addr. */
@@ -107,24 +109,20 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
#endif
#ifdef PTRACE_GETFPREGS
case PTRACE_GETFPREGS: /* Get the child FPU state. */
- ret = get_fpregs((struct user_i387_struct __user *) data,
- child);
+ ret = get_fpregs(vp, child);
break;
#endif
#ifdef PTRACE_SETFPREGS
case PTRACE_SETFPREGS: /* Set the child FPU state. */
- ret = set_fpregs((struct user_i387_struct __user *) data,
- child);
+ ret = set_fpregs(vp, child);
break;
#endif
case PTRACE_GET_THREAD_AREA:
- ret = ptrace_get_thread_area(child, addr,
- (struct user_desc __user *) data);
+ ret = ptrace_get_thread_area(child, addr, vp);
break;
case PTRACE_SET_THREAD_AREA:
- ret = ptrace_set_thread_area(child, addr,
- (struct user_desc __user *) data);
+ ret = ptrace_set_thread_area(child, addr, vp);
break;
case PTRACE_FAULTINFO: {
@@ -134,7 +132,8 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
* On i386, ptrace_faultinfo is smaller!
*/
ret = copy_to_user(p, &child->thread.arch.faultinfo,
- sizeof(struct ptrace_faultinfo));
+ sizeof(struct ptrace_faultinfo)) ?
+ -EIO : 0;
break;
}
@@ -158,7 +157,7 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
#ifdef PTRACE_ARCH_PRCTL
case PTRACE_ARCH_PRCTL:
/* XXX Calls ptrace on the host - needs some SMP thinking */
- ret = arch_prctl(child, data, (void *) addr);
+ ret = arch_prctl(child, data, (void __user *) addr);
break;
#endif
default:
diff --git a/arch/um/kernel/uml.lds.S b/arch/um/kernel/uml.lds.S
index ec6378550671..fbd99402d4d2 100644
--- a/arch/um/kernel/uml.lds.S
+++ b/arch/um/kernel/uml.lds.S
@@ -22,7 +22,7 @@ SECTIONS
_text = .;
_stext = .;
__init_begin = .;
- INIT_TEXT_SECTION(PAGE_SIZE)
+ INIT_TEXT_SECTION(0)
. = ALIGN(PAGE_SIZE);
.text :
@@ -43,6 +43,23 @@ SECTIONS
__syscall_stub_end = .;
}
+ /*
+ * These are needed even in a static link, even if they wind up being empty.
+ * Newer glibc needs these __rel{,a}_iplt_{start,end} symbols.
+ */
+ .rel.plt : {
+ *(.rel.plt)
+ PROVIDE_HIDDEN(__rel_iplt_start = .);
+ *(.rel.iplt)
+ PROVIDE_HIDDEN(__rel_iplt_end = .);
+ }
+ .rela.plt : {
+ *(.rela.plt)
+ PROVIDE_HIDDEN(__rela_iplt_start = .);
+ *(.rela.iplt)
+ PROVIDE_HIDDEN(__rela_iplt_end = .);
+ }
+
#include "asm/common.lds.S"
init.data : { INIT_DATA }
diff --git a/arch/um/os-Linux/time.c b/arch/um/os-Linux/time.c
index dec5678fc17f..6e3359d6a839 100644
--- a/arch/um/os-Linux/time.c
+++ b/arch/um/os-Linux/time.c
@@ -60,7 +60,7 @@ static inline long long timeval_to_ns(const struct timeval *tv)
long long disable_timer(void)
{
struct itimerval time = ((struct itimerval) { { 0, 0 }, { 0, 0 } });
- int remain, max = UM_NSEC_PER_SEC / UM_HZ;
+ long long remain, max = UM_NSEC_PER_SEC / UM_HZ;
if (setitimer(ITIMER_VIRTUAL, &time, &time) < 0)
printk(UM_KERN_ERR "disable_timer - setitimer failed, "
diff --git a/arch/um/sys-i386/ptrace.c b/arch/um/sys-i386/ptrace.c
index c9b176534d65..d23b2d3ea384 100644
--- a/arch/um/sys-i386/ptrace.c
+++ b/arch/um/sys-i386/ptrace.c
@@ -203,8 +203,8 @@ int set_fpxregs(struct user_fxsr_struct __user *buf, struct task_struct *child)
(unsigned long *) &fpregs);
}
-long subarch_ptrace(struct task_struct *child, long request, long addr,
- long data)
+long subarch_ptrace(struct task_struct *child, long request,
+ unsigned long addr, unsigned long data)
{
return -EIO;
}
diff --git a/arch/um/sys-x86_64/ptrace.c b/arch/um/sys-x86_64/ptrace.c
index f3458d7d1c5a..f43613643cdb 100644
--- a/arch/um/sys-x86_64/ptrace.c
+++ b/arch/um/sys-x86_64/ptrace.c
@@ -175,19 +175,18 @@ int set_fpregs(struct user_i387_struct __user *buf, struct task_struct *child)
return restore_fp_registers(userspace_pid[cpu], fpregs);
}
-long subarch_ptrace(struct task_struct *child, long request, long addr,
- long data)
+long subarch_ptrace(struct task_struct *child, long request,
+ unsigned long addr, unsigned long data)
{
int ret = -EIO;
+ void __user *datap = (void __user *) data;
switch (request) {
case PTRACE_GETFPXREGS: /* Get the child FPU state. */
- ret = get_fpregs((struct user_i387_struct __user *) data,
- child);
+ ret = get_fpregs(datap, child);
break;
case PTRACE_SETFPXREGS: /* Set the child FPU state. */
- ret = set_fpregs((struct user_i387_struct __user *) data,
- child);
+ ret = set_fpregs(datap, child);
break;
}
diff --git a/arch/x86/Kbuild b/arch/x86/Kbuild
index ad8ec356fb36..0e103236b754 100644
--- a/arch/x86/Kbuild
+++ b/arch/x86/Kbuild
@@ -14,3 +14,4 @@ obj-y += crypto/
obj-y += vdso/
obj-$(CONFIG_IA32_EMULATION) += ia32/
+obj-y += platform/
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index dfabfefc21c4..e330da21b84f 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -1,6 +1,3 @@
-# x86 configuration
-mainmenu "Linux Kernel Configuration for x86"
-
# Select 32 or 64 bit
config 64BIT
bool "64-bit kernel" if ARCH = "x86"
@@ -24,7 +21,7 @@ config X86
select HAVE_UNSTABLE_SCHED_CLOCK
select HAVE_IDE
select HAVE_OPROFILE
- select HAVE_PERF_EVENTS if (!M386 && !M486)
+ select HAVE_PERF_EVENTS
select HAVE_IRQ_WORK
select HAVE_IOREMAP_PROT
select HAVE_KPROBES
@@ -347,6 +344,7 @@ endif
config X86_VSMP
bool "ScaleMP vSMP"
+ select PARAVIRT_GUEST
select PARAVIRT
depends on X86_64 && PCI
depends on X86_EXTENDED_PLATFORM
@@ -1895,6 +1893,11 @@ config PCI_OLPC
def_bool y
depends on PCI && OLPC && (PCI_GOOLPC || PCI_GOANY)
+config PCI_XEN
+ def_bool y
+ depends on PCI && XEN
+ select SWIOTLB_XEN
+
config PCI_DOMAINS
def_bool y
depends on PCI
diff --git a/arch/x86/Makefile_32.cpu b/arch/x86/Makefile_32.cpu
index 1255d953c65d..f2ee1abb1df9 100644
--- a/arch/x86/Makefile_32.cpu
+++ b/arch/x86/Makefile_32.cpu
@@ -51,7 +51,18 @@ cflags-$(CONFIG_X86_GENERIC) += $(call tune,generic,$(call tune,i686))
# prologue (push %ebp, mov %esp, %ebp) which breaks the function graph
# tracer assumptions. For i686, generic, core2 this is set by the
# compiler anyway
-cflags-$(CONFIG_FUNCTION_GRAPH_TRACER) += $(call cc-option,-maccumulate-outgoing-args)
+ifeq ($(CONFIG_FUNCTION_GRAPH_TRACER), y)
+ADD_ACCUMULATE_OUTGOING_ARGS := y
+endif
+
+# Work around to a bug with asm goto with first implementations of it
+# in gcc causing gcc to mess up the push and pop of the stack in some
+# uses of asm goto.
+ifeq ($(CONFIG_JUMP_LABEL), y)
+ADD_ACCUMULATE_OUTGOING_ARGS := y
+endif
+
+cflags-$(ADD_ACCUMULATE_OUTGOING_ARGS) += $(call cc-option,-maccumulate-outgoing-args)
# Bug fix for binutils: this option is required in order to keep
# binutils from generating NOPL instructions against our will.
diff --git a/arch/x86/ia32/sys_ia32.c b/arch/x86/ia32/sys_ia32.c
index 849813f398e7..5852519b2d0f 100644
--- a/arch/x86/ia32/sys_ia32.c
+++ b/arch/x86/ia32/sys_ia32.c
@@ -28,7 +28,6 @@
#include <linux/syscalls.h>
#include <linux/times.h>
#include <linux/utsname.h>
-#include <linux/smp_lock.h>
#include <linux/mm.h>
#include <linux/uio.h>
#include <linux/poll.h>
diff --git a/arch/x86/include/asm/acpi.h b/arch/x86/include/asm/acpi.h
index 92091de11113..55d106b5e31b 100644
--- a/arch/x86/include/asm/acpi.h
+++ b/arch/x86/include/asm/acpi.h
@@ -93,6 +93,9 @@ extern u8 acpi_sci_flags;
extern int acpi_sci_override_gsi;
void acpi_pic_sci_set_trigger(unsigned int, u16);
+extern int (*__acpi_register_gsi)(struct device *dev, u32 gsi,
+ int trigger, int polarity);
+
static inline void disable_acpi(void)
{
acpi_disabled = 1;
diff --git a/arch/x86/include/asm/apic.h b/arch/x86/include/asm/apic.h
index 286de34b0ed6..f6ce0bda3b98 100644
--- a/arch/x86/include/asm/apic.h
+++ b/arch/x86/include/asm/apic.h
@@ -141,13 +141,13 @@ static inline void native_apic_msr_write(u32 reg, u32 v)
static inline u32 native_apic_msr_read(u32 reg)
{
- u32 low, high;
+ u64 msr;
if (reg == APIC_DFR)
return -1;
- rdmsr(APIC_BASE_MSR + (reg >> 4), low, high);
- return low;
+ rdmsrl(APIC_BASE_MSR + (reg >> 4), msr);
+ return (u32)msr;
}
static inline void native_x2apic_wait_icr_idle(void)
@@ -181,12 +181,12 @@ extern void enable_x2apic(void);
extern void x2apic_icr_write(u32 low, u32 id);
static inline int x2apic_enabled(void)
{
- int msr, msr2;
+ u64 msr;
if (!cpu_has_x2apic)
return 0;
- rdmsr(MSR_IA32_APICBASE, msr, msr2);
+ rdmsrl(MSR_IA32_APICBASE, msr);
if (msr & X2APIC_ENABLE)
return 1;
return 0;
diff --git a/arch/x86/include/asm/fixmap.h b/arch/x86/include/asm/fixmap.h
index 4d293dced62f..9479a037419f 100644
--- a/arch/x86/include/asm/fixmap.h
+++ b/arch/x86/include/asm/fixmap.h
@@ -216,8 +216,8 @@ static inline unsigned long virt_to_fix(const unsigned long vaddr)
}
/* Return an pointer with offset calculated */
-static inline unsigned long __set_fixmap_offset(enum fixed_addresses idx,
- phys_addr_t phys, pgprot_t flags)
+static __always_inline unsigned long
+__set_fixmap_offset(enum fixed_addresses idx, phys_addr_t phys, pgprot_t flags)
{
__set_fixmap(idx, phys, flags);
return fix_to_virt(idx) + (phys & (PAGE_SIZE - 1));
diff --git a/arch/x86/include/asm/highmem.h b/arch/x86/include/asm/highmem.h
index 8caac76ac324..3bd04022fd0c 100644
--- a/arch/x86/include/asm/highmem.h
+++ b/arch/x86/include/asm/highmem.h
@@ -59,11 +59,12 @@ extern void kunmap_high(struct page *page);
void *kmap(struct page *page);
void kunmap(struct page *page);
-void *kmap_atomic_prot(struct page *page, enum km_type type, pgprot_t prot);
-void *kmap_atomic(struct page *page, enum km_type type);
-void kunmap_atomic_notypecheck(void *kvaddr, enum km_type type);
-void *kmap_atomic_pfn(unsigned long pfn, enum km_type type);
-void *kmap_atomic_prot_pfn(unsigned long pfn, enum km_type type, pgprot_t prot);
+
+void *kmap_atomic_prot(struct page *page, pgprot_t prot);
+void *__kmap_atomic(struct page *page);
+void __kunmap_atomic(void *kvaddr);
+void *kmap_atomic_pfn(unsigned long pfn);
+void *kmap_atomic_prot_pfn(unsigned long pfn, pgprot_t prot);
struct page *kmap_atomic_to_page(void *ptr);
#define flush_cache_kmaps() do { } while (0)
diff --git a/arch/x86/include/asm/io.h b/arch/x86/include/asm/io.h
index f0203f4791a8..072273082528 100644
--- a/arch/x86/include/asm/io.h
+++ b/arch/x86/include/asm/io.h
@@ -41,6 +41,8 @@
#include <asm-generic/int-ll64.h>
#include <asm/page.h>
+#include <xen/xen.h>
+
#define build_mmio_read(name, size, type, reg, barrier) \
static inline type name(const volatile void __iomem *addr) \
{ type ret; asm volatile("mov" size " %1,%0":reg (ret) \
@@ -351,6 +353,17 @@ extern void early_iounmap(void __iomem *addr, unsigned long size);
extern void fixup_early_ioremap(void);
extern bool is_early_ioremap_ptep(pte_t *ptep);
+#ifdef CONFIG_XEN
+struct bio_vec;
+
+extern bool xen_biovec_phys_mergeable(const struct bio_vec *vec1,
+ const struct bio_vec *vec2);
+
+#define BIOVEC_PHYS_MERGEABLE(vec1, vec2) \
+ (__BIOVEC_PHYS_MERGEABLE(vec1, vec2) && \
+ (!xen_domain() || xen_biovec_phys_mergeable(vec1, vec2)))
+#endif /* CONFIG_XEN */
+
#define IO_SPACE_LIMIT 0xffff
#endif /* _ASM_X86_IO_H */
diff --git a/arch/x86/include/asm/io_apic.h b/arch/x86/include/asm/io_apic.h
index c8be4566c3d2..a6b28d017c2f 100644
--- a/arch/x86/include/asm/io_apic.h
+++ b/arch/x86/include/asm/io_apic.h
@@ -169,6 +169,7 @@ extern void mask_IO_APIC_setup(struct IO_APIC_route_entry **ioapic_entries);
extern int restore_IO_APIC_setup(struct IO_APIC_route_entry **ioapic_entries);
extern void probe_nr_irqs_gsi(void);
+extern int get_nr_irqs_gsi(void);
extern void setup_ioapic_ids_from_mpc(void);
diff --git a/arch/x86/include/asm/iomap.h b/arch/x86/include/asm/iomap.h
index c4191b3b7056..363e33eb6ec1 100644
--- a/arch/x86/include/asm/iomap.h
+++ b/arch/x86/include/asm/iomap.h
@@ -27,10 +27,10 @@
#include <asm/tlbflush.h>
void __iomem *
-iomap_atomic_prot_pfn(unsigned long pfn, enum km_type type, pgprot_t prot);
+iomap_atomic_prot_pfn(unsigned long pfn, pgprot_t prot);
void
-iounmap_atomic(void __iomem *kvaddr, enum km_type type);
+iounmap_atomic(void __iomem *kvaddr);
int
iomap_create_wc(resource_size_t base, unsigned long size, pgprot_t *prot);
diff --git a/arch/x86/include/asm/irq.h b/arch/x86/include/asm/irq.h
index 0bf5b0083650..13b0ebaa512f 100644
--- a/arch/x86/include/asm/irq.h
+++ b/arch/x86/include/asm/irq.h
@@ -21,10 +21,8 @@ static inline int irq_canonicalize(int irq)
#ifdef CONFIG_X86_32
extern void irq_ctx_init(int cpu);
-extern void irq_ctx_exit(int cpu);
#else
# define irq_ctx_init(cpu) do { } while (0)
-# define irq_ctx_exit(cpu) do { } while (0)
#endif
#define __ARCH_HAS_DO_SOFTIRQ
diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h
index 83c4bb1d917d..6b89f5e86021 100644
--- a/arch/x86/include/asm/msr-index.h
+++ b/arch/x86/include/asm/msr-index.h
@@ -121,13 +121,14 @@
#define MSR_AMD64_IBSDCLINAD 0xc0011038
#define MSR_AMD64_IBSDCPHYSAD 0xc0011039
#define MSR_AMD64_IBSCTL 0xc001103a
+#define MSR_AMD64_IBSBRTARGET 0xc001103b
/* Fam 10h MSRs */
#define MSR_FAM10H_MMIO_CONF_BASE 0xc0010058
#define FAM10H_MMIO_CONF_ENABLE (1<<0)
#define FAM10H_MMIO_CONF_BUSRANGE_MASK 0xf
#define FAM10H_MMIO_CONF_BUSRANGE_SHIFT 2
-#define FAM10H_MMIO_CONF_BASE_MASK 0xfffffff
+#define FAM10H_MMIO_CONF_BASE_MASK 0xfffffffULL
#define FAM10H_MMIO_CONF_BASE_SHIFT 20
#define MSR_FAM10H_NODE_ID 0xc001100c
diff --git a/arch/x86/include/asm/olpc.h b/arch/x86/include/asm/olpc.h
index 101229b0d8ed..42a978c0c1b3 100644
--- a/arch/x86/include/asm/olpc.h
+++ b/arch/x86/include/asm/olpc.h
@@ -89,6 +89,8 @@ extern int olpc_ec_mask_unset(uint8_t bits);
/* EC commands */
#define EC_FIRMWARE_REV 0x08
+#define EC_WLAN_ENTER_RESET 0x35
+#define EC_WLAN_LEAVE_RESET 0x25
/* SCI source values */
diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h
index 18e3b8a8709f..ef9975812c77 100644
--- a/arch/x86/include/asm/paravirt.h
+++ b/arch/x86/include/asm/paravirt.h
@@ -824,27 +824,27 @@ static __always_inline void arch_spin_unlock(struct arch_spinlock *lock)
#define __PV_IS_CALLEE_SAVE(func) \
((struct paravirt_callee_save) { func })
-static inline unsigned long arch_local_save_flags(void)
+static inline notrace unsigned long arch_local_save_flags(void)
{
return PVOP_CALLEE0(unsigned long, pv_irq_ops.save_fl);
}
-static inline void arch_local_irq_restore(unsigned long f)
+static inline notrace void arch_local_irq_restore(unsigned long f)
{
PVOP_VCALLEE1(pv_irq_ops.restore_fl, f);
}
-static inline void arch_local_irq_disable(void)
+static inline notrace void arch_local_irq_disable(void)
{
PVOP_VCALLEE0(pv_irq_ops.irq_disable);
}
-static inline void arch_local_irq_enable(void)
+static inline notrace void arch_local_irq_enable(void)
{
PVOP_VCALLEE0(pv_irq_ops.irq_enable);
}
-static inline unsigned long arch_local_irq_save(void)
+static inline notrace unsigned long arch_local_irq_save(void)
{
unsigned long f;
diff --git a/arch/x86/include/asm/pci.h b/arch/x86/include/asm/pci.h
index d395540ff894..ca0437c714b2 100644
--- a/arch/x86/include/asm/pci.h
+++ b/arch/x86/include/asm/pci.h
@@ -7,6 +7,7 @@
#include <linux/string.h>
#include <asm/scatterlist.h>
#include <asm/io.h>
+#include <asm/x86_init.h>
#ifdef __KERNEL__
@@ -94,8 +95,36 @@ static inline void early_quirks(void) { }
extern void pci_iommu_alloc(void);
-/* MSI arch hook */
-#define arch_setup_msi_irqs arch_setup_msi_irqs
+#ifdef CONFIG_PCI_MSI
+/* MSI arch specific hooks */
+static inline int x86_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
+{
+ return x86_msi.setup_msi_irqs(dev, nvec, type);
+}
+
+static inline void x86_teardown_msi_irqs(struct pci_dev *dev)
+{
+ x86_msi.teardown_msi_irqs(dev);
+}
+
+static inline void x86_teardown_msi_irq(unsigned int irq)
+{
+ x86_msi.teardown_msi_irq(irq);
+}
+#define arch_setup_msi_irqs x86_setup_msi_irqs
+#define arch_teardown_msi_irqs x86_teardown_msi_irqs
+#define arch_teardown_msi_irq x86_teardown_msi_irq
+/* implemented in arch/x86/kernel/apic/io_apic. */
+int native_setup_msi_irqs(struct pci_dev *dev, int nvec, int type);
+void native_teardown_msi_irq(unsigned int irq);
+/* default to the implementation in drivers/lib/msi.c */
+#define HAVE_DEFAULT_MSI_TEARDOWN_IRQS
+void default_teardown_msi_irqs(struct pci_dev *dev);
+#else
+#define native_setup_msi_irqs NULL
+#define native_teardown_msi_irq NULL
+#define default_teardown_msi_irqs NULL
+#endif
#define PCI_DMA_BUS_IS_PHYS (dma_ops->is_phys)
diff --git a/arch/x86/include/asm/pci_x86.h b/arch/x86/include/asm/pci_x86.h
index 49c7219826f9..704526734bef 100644
--- a/arch/x86/include/asm/pci_x86.h
+++ b/arch/x86/include/asm/pci_x86.h
@@ -47,6 +47,7 @@ enum pci_bf_sort_state {
extern unsigned int pcibios_max_latency;
void pcibios_resource_survey(void);
+void pcibios_set_cache_line_size(void);
/* pci-pc.c */
diff --git a/arch/x86/include/asm/perf_event.h b/arch/x86/include/asm/perf_event.h
index 6e742cc4251b..550e26b1dbb3 100644
--- a/arch/x86/include/asm/perf_event.h
+++ b/arch/x86/include/asm/perf_event.h
@@ -111,17 +111,18 @@ union cpuid10_edx {
#define X86_PMC_IDX_FIXED_BTS (X86_PMC_IDX_FIXED + 16)
/* IbsFetchCtl bits/masks */
-#define IBS_FETCH_RAND_EN (1ULL<<57)
-#define IBS_FETCH_VAL (1ULL<<49)
-#define IBS_FETCH_ENABLE (1ULL<<48)
-#define IBS_FETCH_CNT 0xFFFF0000ULL
-#define IBS_FETCH_MAX_CNT 0x0000FFFFULL
+#define IBS_FETCH_RAND_EN (1ULL<<57)
+#define IBS_FETCH_VAL (1ULL<<49)
+#define IBS_FETCH_ENABLE (1ULL<<48)
+#define IBS_FETCH_CNT 0xFFFF0000ULL
+#define IBS_FETCH_MAX_CNT 0x0000FFFFULL
/* IbsOpCtl bits */
-#define IBS_OP_CNT_CTL (1ULL<<19)
-#define IBS_OP_VAL (1ULL<<18)
-#define IBS_OP_ENABLE (1ULL<<17)
-#define IBS_OP_MAX_CNT 0x0000FFFFULL
+#define IBS_OP_CNT_CTL (1ULL<<19)
+#define IBS_OP_VAL (1ULL<<18)
+#define IBS_OP_ENABLE (1ULL<<17)
+#define IBS_OP_MAX_CNT 0x0000FFFFULL
+#define IBS_OP_MAX_CNT_EXT 0x007FFFFFULL /* not a register bit mask */
#ifdef CONFIG_PERF_EVENTS
extern void init_hw_perf_events(void);
diff --git a/arch/x86/include/asm/pgtable_32.h b/arch/x86/include/asm/pgtable_32.h
index 8abde9ec90bf..0c92113c4cb6 100644
--- a/arch/x86/include/asm/pgtable_32.h
+++ b/arch/x86/include/asm/pgtable_32.h
@@ -49,24 +49,14 @@ extern void set_pmd_pfn(unsigned long, unsigned long, pgprot_t);
#endif
#if defined(CONFIG_HIGHPTE)
-#define __KM_PTE \
- (in_nmi() ? KM_NMI_PTE : \
- in_irq() ? KM_IRQ_PTE : \
- KM_PTE0)
#define pte_offset_map(dir, address) \
- ((pte_t *)kmap_atomic(pmd_page(*(dir)), __KM_PTE) + \
+ ((pte_t *)kmap_atomic(pmd_page(*(dir))) + \
pte_index((address)))
-#define pte_offset_map_nested(dir, address) \
- ((pte_t *)kmap_atomic(pmd_page(*(dir)), KM_PTE1) + \
- pte_index((address)))
-#define pte_unmap(pte) kunmap_atomic((pte), __KM_PTE)
-#define pte_unmap_nested(pte) kunmap_atomic((pte), KM_PTE1)
+#define pte_unmap(pte) kunmap_atomic((pte))
#else
#define pte_offset_map(dir, address) \
((pte_t *)page_address(pmd_page(*(dir))) + pte_index((address)))
-#define pte_offset_map_nested(dir, address) pte_offset_map((dir), (address))
#define pte_unmap(pte) do { } while (0)
-#define pte_unmap_nested(pte) do { } while (0)
#endif
/* Clear a kernel PTE and flush it from the TLB */
diff --git a/arch/x86/include/asm/pgtable_64.h b/arch/x86/include/asm/pgtable_64.h
index f96ac9bedf75..f86da20347f2 100644
--- a/arch/x86/include/asm/pgtable_64.h
+++ b/arch/x86/include/asm/pgtable_64.h
@@ -127,9 +127,7 @@ static inline int pgd_large(pgd_t pgd) { return 0; }
/* x86-64 always has all page tables mapped. */
#define pte_offset_map(dir, address) pte_offset_kernel((dir), (address))
-#define pte_offset_map_nested(dir, address) pte_offset_kernel((dir), (address))
#define pte_unmap(pte) ((void)(pte))/* NOP */
-#define pte_unmap_nested(pte) ((void)(pte)) /* NOP */
#define update_mmu_cache(vma, address, ptep) do { } while (0)
diff --git a/arch/x86/include/asm/pvclock.h b/arch/x86/include/asm/pvclock.h
index 7f7e577a0e39..31d84acc1512 100644
--- a/arch/x86/include/asm/pvclock.h
+++ b/arch/x86/include/asm/pvclock.h
@@ -11,6 +11,7 @@ unsigned long pvclock_tsc_khz(struct pvclock_vcpu_time_info *src);
void pvclock_read_wallclock(struct pvclock_wall_clock *wall,
struct pvclock_vcpu_time_info *vcpu,
struct timespec *ts);
+void pvclock_resume(void);
/*
* Scale a 64-bit delta by scaling and multiplying by a 32-bit fraction,
diff --git a/arch/x86/include/asm/smp.h b/arch/x86/include/asm/smp.h
index 4cfc90824068..4c2f63c7fc1b 100644
--- a/arch/x86/include/asm/smp.h
+++ b/arch/x86/include/asm/smp.h
@@ -50,7 +50,7 @@ struct smp_ops {
void (*smp_prepare_cpus)(unsigned max_cpus);
void (*smp_cpus_done)(unsigned max_cpus);
- void (*smp_send_stop)(void);
+ void (*stop_other_cpus)(int wait);
void (*smp_send_reschedule)(int cpu);
int (*cpu_up)(unsigned cpu);
@@ -73,7 +73,12 @@ extern struct smp_ops smp_ops;
static inline void smp_send_stop(void)
{
- smp_ops.smp_send_stop();
+ smp_ops.stop_other_cpus(0);
+}
+
+static inline void stop_other_cpus(void)
+{
+ smp_ops.stop_other_cpus(1);
}
static inline void smp_prepare_boot_cpu(void)
diff --git a/arch/x86/include/asm/uv/uv_hub.h b/arch/x86/include/asm/uv/uv_hub.h
index bf6b88ef8eeb..a501741c2335 100644
--- a/arch/x86/include/asm/uv/uv_hub.h
+++ b/arch/x86/include/asm/uv/uv_hub.h
@@ -5,7 +5,7 @@
*
* SGI UV architectural definitions
*
- * Copyright (C) 2007-2008 Silicon Graphics, Inc. All rights reserved.
+ * Copyright (C) 2007-2010 Silicon Graphics, Inc. All rights reserved.
*/
#ifndef _ASM_X86_UV_UV_HUB_H
@@ -77,7 +77,8 @@
*
* 1111110000000000
* 5432109876543210
- * pppppppppplc0cch
+ * pppppppppplc0cch Nehalem-EX
+ * ppppppppplcc0cch Westmere-EX
* sssssssssss
*
* p = pnode bits
@@ -148,12 +149,25 @@ struct uv_hub_info_s {
unsigned char m_val;
unsigned char n_val;
struct uv_scir_s scir;
+ unsigned char apic_pnode_shift;
};
DECLARE_PER_CPU(struct uv_hub_info_s, __uv_hub_info);
#define uv_hub_info (&__get_cpu_var(__uv_hub_info))
#define uv_cpu_hub_info(cpu) (&per_cpu(__uv_hub_info, cpu))
+union uvh_apicid {
+ unsigned long v;
+ struct uvh_apicid_s {
+ unsigned long local_apic_mask : 24;
+ unsigned long local_apic_shift : 5;
+ unsigned long unused1 : 3;
+ unsigned long pnode_mask : 24;
+ unsigned long pnode_shift : 5;
+ unsigned long unused2 : 3;
+ } s;
+};
+
/*
* Local & Global MMR space macros.
* Note: macros are intended to be used ONLY by inline functions
@@ -182,8 +196,11 @@ DECLARE_PER_CPU(struct uv_hub_info_s, __uv_hub_info);
#define UV_GLOBAL_MMR64_PNODE_BITS(p) \
(((unsigned long)(p)) << UV_GLOBAL_MMR64_PNODE_SHIFT)
+#define UVH_APICID 0x002D0E00L
#define UV_APIC_PNODE_SHIFT 6
+#define UV_APICID_HIBIT_MASK 0xffff0000
+
/* Local Bus from cpu's perspective */
#define LOCAL_BUS_BASE 0x1c00000
#define LOCAL_BUS_SIZE (4 * 1024 * 1024)
@@ -280,7 +297,7 @@ static inline void *uv_pnode_offset_to_vaddr(int pnode, unsigned long offset)
*/
static inline int uv_apicid_to_pnode(int apicid)
{
- return (apicid >> UV_APIC_PNODE_SHIFT);
+ return (apicid >> uv_hub_info->apic_pnode_shift);
}
/*
@@ -476,8 +493,10 @@ static inline void uv_set_cpu_scir_bits(int cpu, unsigned char value)
}
}
+extern unsigned int uv_apicid_hibits;
static unsigned long uv_hub_ipi_value(int apicid, int vector, int mode)
{
+ apicid |= uv_apicid_hibits;
return (1UL << UVH_IPI_INT_SEND_SHFT) |
((apicid) << UVH_IPI_INT_APIC_ID_SHFT) |
(mode << UVH_IPI_INT_DELIVERY_MODE_SHFT) |
diff --git a/arch/x86/include/asm/uv/uv_mmrs.h b/arch/x86/include/asm/uv/uv_mmrs.h
index b2f2d2e05cec..20cafeac7455 100644
--- a/arch/x86/include/asm/uv/uv_mmrs.h
+++ b/arch/x86/include/asm/uv/uv_mmrs.h
@@ -5,7 +5,7 @@
*
* SGI UV MMR definitions
*
- * Copyright (C) 2007-2008 Silicon Graphics, Inc. All rights reserved.
+ * Copyright (C) 2007-2010 Silicon Graphics, Inc. All rights reserved.
*/
#ifndef _ASM_X86_UV_UV_MMRS_H
@@ -754,6 +754,23 @@ union uvh_lb_bau_sb_descriptor_base_u {
};
/* ========================================================================= */
+/* UVH_LB_TARGET_PHYSICAL_APIC_ID_MASK */
+/* ========================================================================= */
+#define UVH_LB_TARGET_PHYSICAL_APIC_ID_MASK 0x320130UL
+#define UVH_LB_TARGET_PHYSICAL_APIC_ID_MASK_32 0x009f0
+
+#define UVH_LB_TARGET_PHYSICAL_APIC_ID_MASK_BIT_ENABLES_SHFT 0
+#define UVH_LB_TARGET_PHYSICAL_APIC_ID_MASK_BIT_ENABLES_MASK 0x00000000ffffffffUL
+
+union uvh_lb_target_physical_apic_id_mask_u {
+ unsigned long v;
+ struct uvh_lb_target_physical_apic_id_mask_s {
+ unsigned long bit_enables : 32; /* RW */
+ unsigned long rsvd_32_63 : 32; /* */
+ } s;
+};
+
+/* ========================================================================= */
/* UVH_NODE_ID */
/* ========================================================================= */
#define UVH_NODE_ID 0x0UL
@@ -806,6 +823,78 @@ union uvh_node_present_table_u {
};
/* ========================================================================= */
+/* UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_0_MMR */
+/* ========================================================================= */
+#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_0_MMR 0x16000c8UL
+
+#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_0_MMR_BASE_SHFT 24
+#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_0_MMR_BASE_MASK 0x00000000ff000000UL
+#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_0_MMR_M_ALIAS_SHFT 48
+#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_0_MMR_M_ALIAS_MASK 0x001f000000000000UL
+#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_0_MMR_ENABLE_SHFT 63
+#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_0_MMR_ENABLE_MASK 0x8000000000000000UL
+
+union uvh_rh_gam_alias210_overlay_config_0_mmr_u {
+ unsigned long v;
+ struct uvh_rh_gam_alias210_overlay_config_0_mmr_s {
+ unsigned long rsvd_0_23: 24; /* */
+ unsigned long base : 8; /* RW */
+ unsigned long rsvd_32_47: 16; /* */
+ unsigned long m_alias : 5; /* RW */
+ unsigned long rsvd_53_62: 10; /* */
+ unsigned long enable : 1; /* RW */
+ } s;
+};
+
+/* ========================================================================= */
+/* UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_1_MMR */
+/* ========================================================================= */
+#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_1_MMR 0x16000d8UL
+
+#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_1_MMR_BASE_SHFT 24
+#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_1_MMR_BASE_MASK 0x00000000ff000000UL
+#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_1_MMR_M_ALIAS_SHFT 48
+#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_1_MMR_M_ALIAS_MASK 0x001f000000000000UL
+#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_1_MMR_ENABLE_SHFT 63
+#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_1_MMR_ENABLE_MASK 0x8000000000000000UL
+
+union uvh_rh_gam_alias210_overlay_config_1_mmr_u {
+ unsigned long v;
+ struct uvh_rh_gam_alias210_overlay_config_1_mmr_s {
+ unsigned long rsvd_0_23: 24; /* */
+ unsigned long base : 8; /* RW */
+ unsigned long rsvd_32_47: 16; /* */
+ unsigned long m_alias : 5; /* RW */
+ unsigned long rsvd_53_62: 10; /* */
+ unsigned long enable : 1; /* RW */
+ } s;
+};
+
+/* ========================================================================= */
+/* UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_2_MMR */
+/* ========================================================================= */
+#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_2_MMR 0x16000e8UL
+
+#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_2_MMR_BASE_SHFT 24
+#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_2_MMR_BASE_MASK 0x00000000ff000000UL
+#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_2_MMR_M_ALIAS_SHFT 48
+#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_2_MMR_M_ALIAS_MASK 0x001f000000000000UL
+#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_2_MMR_ENABLE_SHFT 63
+#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_2_MMR_ENABLE_MASK 0x8000000000000000UL
+
+union uvh_rh_gam_alias210_overlay_config_2_mmr_u {
+ unsigned long v;
+ struct uvh_rh_gam_alias210_overlay_config_2_mmr_s {
+ unsigned long rsvd_0_23: 24; /* */
+ unsigned long base : 8; /* RW */
+ unsigned long rsvd_32_47: 16; /* */
+ unsigned long m_alias : 5; /* RW */
+ unsigned long rsvd_53_62: 10; /* */
+ unsigned long enable : 1; /* RW */
+ } s;
+};
+
+/* ========================================================================= */
/* UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_0_MMR */
/* ========================================================================= */
#define UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_0_MMR 0x16000d0UL
@@ -857,6 +946,29 @@ union uvh_rh_gam_alias210_redirect_config_2_mmr_u {
};
/* ========================================================================= */
+/* UVH_RH_GAM_CONFIG_MMR */
+/* ========================================================================= */
+#define UVH_RH_GAM_CONFIG_MMR 0x1600000UL
+
+#define UVH_RH_GAM_CONFIG_MMR_M_SKT_SHFT 0
+#define UVH_RH_GAM_CONFIG_MMR_M_SKT_MASK 0x000000000000003fUL
+#define UVH_RH_GAM_CONFIG_MMR_N_SKT_SHFT 6
+#define UVH_RH_GAM_CONFIG_MMR_N_SKT_MASK 0x00000000000003c0UL
+#define UVH_RH_GAM_CONFIG_MMR_MMIOL_CFG_SHFT 12
+#define UVH_RH_GAM_CONFIG_MMR_MMIOL_CFG_MASK 0x0000000000001000UL
+
+union uvh_rh_gam_config_mmr_u {
+ unsigned long v;
+ struct uvh_rh_gam_config_mmr_s {
+ unsigned long m_skt : 6; /* RW */
+ unsigned long n_skt : 4; /* RW */
+ unsigned long rsvd_10_11: 2; /* */
+ unsigned long mmiol_cfg : 1; /* RW */
+ unsigned long rsvd_13_63: 51; /* */
+ } s;
+};
+
+/* ========================================================================= */
/* UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR */
/* ========================================================================= */
#define UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR 0x1600010UL
@@ -987,97 +1099,5 @@ union uvh_rtc1_int_config_u {
} s;
};
-/* ========================================================================= */
-/* UVH_SI_ADDR_MAP_CONFIG */
-/* ========================================================================= */
-#define UVH_SI_ADDR_MAP_CONFIG 0xc80000UL
-
-#define UVH_SI_ADDR_MAP_CONFIG_M_SKT_SHFT 0
-#define UVH_SI_ADDR_MAP_CONFIG_M_SKT_MASK 0x000000000000003fUL
-#define UVH_SI_ADDR_MAP_CONFIG_N_SKT_SHFT 8
-#define UVH_SI_ADDR_MAP_CONFIG_N_SKT_MASK 0x0000000000000f00UL
-
-union uvh_si_addr_map_config_u {
- unsigned long v;
- struct uvh_si_addr_map_config_s {
- unsigned long m_skt : 6; /* RW */
- unsigned long rsvd_6_7: 2; /* */
- unsigned long n_skt : 4; /* RW */
- unsigned long rsvd_12_63: 52; /* */
- } s;
-};
-
-/* ========================================================================= */
-/* UVH_SI_ALIAS0_OVERLAY_CONFIG */
-/* ========================================================================= */
-#define UVH_SI_ALIAS0_OVERLAY_CONFIG 0xc80008UL
-
-#define UVH_SI_ALIAS0_OVERLAY_CONFIG_BASE_SHFT 24
-#define UVH_SI_ALIAS0_OVERLAY_CONFIG_BASE_MASK 0x00000000ff000000UL
-#define UVH_SI_ALIAS0_OVERLAY_CONFIG_M_ALIAS_SHFT 48
-#define UVH_SI_ALIAS0_OVERLAY_CONFIG_M_ALIAS_MASK 0x001f000000000000UL
-#define UVH_SI_ALIAS0_OVERLAY_CONFIG_ENABLE_SHFT 63
-#define UVH_SI_ALIAS0_OVERLAY_CONFIG_ENABLE_MASK 0x8000000000000000UL
-
-union uvh_si_alias0_overlay_config_u {
- unsigned long v;
- struct uvh_si_alias0_overlay_config_s {
- unsigned long rsvd_0_23: 24; /* */
- unsigned long base : 8; /* RW */
- unsigned long rsvd_32_47: 16; /* */
- unsigned long m_alias : 5; /* RW */
- unsigned long rsvd_53_62: 10; /* */
- unsigned long enable : 1; /* RW */
- } s;
-};
-
-/* ========================================================================= */
-/* UVH_SI_ALIAS1_OVERLAY_CONFIG */
-/* ========================================================================= */
-#define UVH_SI_ALIAS1_OVERLAY_CONFIG 0xc80010UL
-
-#define UVH_SI_ALIAS1_OVERLAY_CONFIG_BASE_SHFT 24
-#define UVH_SI_ALIAS1_OVERLAY_CONFIG_BASE_MASK 0x00000000ff000000UL
-#define UVH_SI_ALIAS1_OVERLAY_CONFIG_M_ALIAS_SHFT 48
-#define UVH_SI_ALIAS1_OVERLAY_CONFIG_M_ALIAS_MASK 0x001f000000000000UL
-#define UVH_SI_ALIAS1_OVERLAY_CONFIG_ENABLE_SHFT 63
-#define UVH_SI_ALIAS1_OVERLAY_CONFIG_ENABLE_MASK 0x8000000000000000UL
-
-union uvh_si_alias1_overlay_config_u {
- unsigned long v;
- struct uvh_si_alias1_overlay_config_s {
- unsigned long rsvd_0_23: 24; /* */
- unsigned long base : 8; /* RW */
- unsigned long rsvd_32_47: 16; /* */
- unsigned long m_alias : 5; /* RW */
- unsigned long rsvd_53_62: 10; /* */
- unsigned long enable : 1; /* RW */
- } s;
-};
-
-/* ========================================================================= */
-/* UVH_SI_ALIAS2_OVERLAY_CONFIG */
-/* ========================================================================= */
-#define UVH_SI_ALIAS2_OVERLAY_CONFIG 0xc80018UL
-
-#define UVH_SI_ALIAS2_OVERLAY_CONFIG_BASE_SHFT 24
-#define UVH_SI_ALIAS2_OVERLAY_CONFIG_BASE_MASK 0x00000000ff000000UL
-#define UVH_SI_ALIAS2_OVERLAY_CONFIG_M_ALIAS_SHFT 48
-#define UVH_SI_ALIAS2_OVERLAY_CONFIG_M_ALIAS_MASK 0x001f000000000000UL
-#define UVH_SI_ALIAS2_OVERLAY_CONFIG_ENABLE_SHFT 63
-#define UVH_SI_ALIAS2_OVERLAY_CONFIG_ENABLE_MASK 0x8000000000000000UL
-
-union uvh_si_alias2_overlay_config_u {
- unsigned long v;
- struct uvh_si_alias2_overlay_config_s {
- unsigned long rsvd_0_23: 24; /* */
- unsigned long base : 8; /* RW */
- unsigned long rsvd_32_47: 16; /* */
- unsigned long m_alias : 5; /* RW */
- unsigned long rsvd_53_62: 10; /* */
- unsigned long enable : 1; /* RW */
- } s;
-};
-
-#endif /* _ASM_X86_UV_UV_MMRS_H */
+#endif /* __ASM_UV_MMRS_X86_H__ */
diff --git a/arch/x86/include/asm/x86_init.h b/arch/x86/include/asm/x86_init.h
index baa579c8e038..64642ad019fb 100644
--- a/arch/x86/include/asm/x86_init.h
+++ b/arch/x86/include/asm/x86_init.h
@@ -154,9 +154,18 @@ struct x86_platform_ops {
int (*i8042_detect)(void);
};
+struct pci_dev;
+
+struct x86_msi_ops {
+ int (*setup_msi_irqs)(struct pci_dev *dev, int nvec, int type);
+ void (*teardown_msi_irq)(unsigned int irq);
+ void (*teardown_msi_irqs)(struct pci_dev *dev);
+};
+
extern struct x86_init_ops x86_init;
extern struct x86_cpuinit_ops x86_cpuinit;
extern struct x86_platform_ops x86_platform;
+extern struct x86_msi_ops x86_msi;
extern void x86_init_noop(void);
extern void x86_init_uint_noop(unsigned int unused);
diff --git a/arch/x86/include/asm/xen/hypercall.h b/arch/x86/include/asm/xen/hypercall.h
index 7fda040a76cd..a3c28ae4025b 100644
--- a/arch/x86/include/asm/xen/hypercall.h
+++ b/arch/x86/include/asm/xen/hypercall.h
@@ -200,6 +200,23 @@ extern struct { char _entry[32]; } hypercall_page[];
(type)__res; \
})
+static inline long
+privcmd_call(unsigned call,
+ unsigned long a1, unsigned long a2,
+ unsigned long a3, unsigned long a4,
+ unsigned long a5)
+{
+ __HYPERCALL_DECLS;
+ __HYPERCALL_5ARG(a1, a2, a3, a4, a5);
+
+ asm volatile("call *%[call]"
+ : __HYPERCALL_5PARAM
+ : [call] "a" (&hypercall_page[call])
+ : __HYPERCALL_CLOBBER5);
+
+ return (long)__res;
+}
+
static inline int
HYPERVISOR_set_trap_table(struct trap_info *table)
{
diff --git a/arch/x86/include/asm/xen/interface.h b/arch/x86/include/asm/xen/interface.h
index e8506c1f0c55..1c10c88ee4e1 100644
--- a/arch/x86/include/asm/xen/interface.h
+++ b/arch/x86/include/asm/xen/interface.h
@@ -61,9 +61,9 @@ DEFINE_GUEST_HANDLE(void);
#define HYPERVISOR_VIRT_START mk_unsigned_long(__HYPERVISOR_VIRT_START)
#endif
-#ifndef machine_to_phys_mapping
-#define machine_to_phys_mapping ((unsigned long *)HYPERVISOR_VIRT_START)
-#endif
+#define MACH2PHYS_VIRT_START mk_unsigned_long(__MACH2PHYS_VIRT_START)
+#define MACH2PHYS_VIRT_END mk_unsigned_long(__MACH2PHYS_VIRT_END)
+#define MACH2PHYS_NR_ENTRIES ((MACH2PHYS_VIRT_END-MACH2PHYS_VIRT_START)>>__MACH2PHYS_SHIFT)
/* Maximum number of virtual CPUs in multi-processor guests. */
#define MAX_VIRT_CPUS 32
diff --git a/arch/x86/include/asm/xen/interface_32.h b/arch/x86/include/asm/xen/interface_32.h
index 42a7e004ae5c..8413688b2571 100644
--- a/arch/x86/include/asm/xen/interface_32.h
+++ b/arch/x86/include/asm/xen/interface_32.h
@@ -32,6 +32,11 @@
/* And the trap vector is... */
#define TRAP_INSTR "int $0x82"
+#define __MACH2PHYS_VIRT_START 0xF5800000
+#define __MACH2PHYS_VIRT_END 0xF6800000
+
+#define __MACH2PHYS_SHIFT 2
+
/*
* Virtual addresses beyond this are not modifiable by guest OSes. The
* machine->physical mapping table starts at this address, read-only.
diff --git a/arch/x86/include/asm/xen/interface_64.h b/arch/x86/include/asm/xen/interface_64.h
index 100d2662b97c..839a4811cf98 100644
--- a/arch/x86/include/asm/xen/interface_64.h
+++ b/arch/x86/include/asm/xen/interface_64.h
@@ -39,18 +39,7 @@
#define __HYPERVISOR_VIRT_END 0xFFFF880000000000
#define __MACH2PHYS_VIRT_START 0xFFFF800000000000
#define __MACH2PHYS_VIRT_END 0xFFFF804000000000
-
-#ifndef HYPERVISOR_VIRT_START
-#define HYPERVISOR_VIRT_START mk_unsigned_long(__HYPERVISOR_VIRT_START)
-#define HYPERVISOR_VIRT_END mk_unsigned_long(__HYPERVISOR_VIRT_END)
-#endif
-
-#define MACH2PHYS_VIRT_START mk_unsigned_long(__MACH2PHYS_VIRT_START)
-#define MACH2PHYS_VIRT_END mk_unsigned_long(__MACH2PHYS_VIRT_END)
-#define MACH2PHYS_NR_ENTRIES ((MACH2PHYS_VIRT_END-MACH2PHYS_VIRT_START)>>3)
-#ifndef machine_to_phys_mapping
-#define machine_to_phys_mapping ((unsigned long *)HYPERVISOR_VIRT_START)
-#endif
+#define __MACH2PHYS_SHIFT 3
/*
* int HYPERVISOR_set_segment_base(unsigned int which, unsigned long base)
diff --git a/arch/x86/include/asm/xen/page.h b/arch/x86/include/asm/xen/page.h
index bf5f7d32bd08..8760cc60a21c 100644
--- a/arch/x86/include/asm/xen/page.h
+++ b/arch/x86/include/asm/xen/page.h
@@ -5,6 +5,7 @@
#include <linux/types.h>
#include <linux/spinlock.h>
#include <linux/pfn.h>
+#include <linux/mm.h>
#include <asm/uaccess.h>
#include <asm/page.h>
@@ -35,16 +36,25 @@ typedef struct xpaddr {
#define MAX_DOMAIN_PAGES \
((unsigned long)((u64)CONFIG_XEN_MAX_DOMAIN_MEMORY * 1024 * 1024 * 1024 / PAGE_SIZE))
+extern unsigned long *machine_to_phys_mapping;
+extern unsigned int machine_to_phys_order;
extern unsigned long get_phys_to_machine(unsigned long pfn);
-extern void set_phys_to_machine(unsigned long pfn, unsigned long mfn);
+extern bool set_phys_to_machine(unsigned long pfn, unsigned long mfn);
static inline unsigned long pfn_to_mfn(unsigned long pfn)
{
+ unsigned long mfn;
+
if (xen_feature(XENFEAT_auto_translated_physmap))
return pfn;
- return get_phys_to_machine(pfn) & ~FOREIGN_FRAME_BIT;
+ mfn = get_phys_to_machine(pfn);
+
+ if (mfn != INVALID_P2M_ENTRY)
+ mfn &= ~FOREIGN_FRAME_BIT;
+
+ return mfn;
}
static inline int phys_to_machine_mapping_valid(unsigned long pfn)
@@ -62,10 +72,8 @@ static inline unsigned long mfn_to_pfn(unsigned long mfn)
if (xen_feature(XENFEAT_auto_translated_physmap))
return mfn;
-#if 0
if (unlikely((mfn >> machine_to_phys_order) != 0))
- return max_mapnr;
-#endif
+ return ~0;
pfn = 0;
/*
@@ -159,6 +167,7 @@ static inline pte_t __pte_ma(pteval_t x)
#define pgd_val_ma(x) ((x).pgd)
+void xen_set_domain_pte(pte_t *ptep, pte_t pteval, unsigned domid);
xmaddr_t arbitrary_virt_to_machine(void *address);
unsigned long arbitrary_virt_to_mfn(void *vaddr);
diff --git a/arch/x86/include/asm/xen/pci.h b/arch/x86/include/asm/xen/pci.h
new file mode 100644
index 000000000000..2329b3eaf8d3
--- /dev/null
+++ b/arch/x86/include/asm/xen/pci.h
@@ -0,0 +1,65 @@
+#ifndef _ASM_X86_XEN_PCI_H
+#define _ASM_X86_XEN_PCI_H
+
+#if defined(CONFIG_PCI_XEN)
+extern int __init pci_xen_init(void);
+extern int __init pci_xen_hvm_init(void);
+#define pci_xen 1
+#else
+#define pci_xen 0
+#define pci_xen_init (0)
+static inline int pci_xen_hvm_init(void)
+{
+ return -1;
+}
+#endif
+#if defined(CONFIG_XEN_DOM0)
+void __init xen_setup_pirqs(void);
+#else
+static inline void __init xen_setup_pirqs(void)
+{
+}
+#endif
+
+#if defined(CONFIG_PCI_MSI)
+#if defined(CONFIG_PCI_XEN)
+/* The drivers/pci/xen-pcifront.c sets this structure to
+ * its own functions.
+ */
+struct xen_pci_frontend_ops {
+ int (*enable_msi)(struct pci_dev *dev, int **vectors);
+ void (*disable_msi)(struct pci_dev *dev);
+ int (*enable_msix)(struct pci_dev *dev, int **vectors, int nvec);
+ void (*disable_msix)(struct pci_dev *dev);
+};
+
+extern struct xen_pci_frontend_ops *xen_pci_frontend;
+
+static inline int xen_pci_frontend_enable_msi(struct pci_dev *dev,
+ int **vectors)
+{
+ if (xen_pci_frontend && xen_pci_frontend->enable_msi)
+ return xen_pci_frontend->enable_msi(dev, vectors);
+ return -ENODEV;
+}
+static inline void xen_pci_frontend_disable_msi(struct pci_dev *dev)
+{
+ if (xen_pci_frontend && xen_pci_frontend->disable_msi)
+ xen_pci_frontend->disable_msi(dev);
+}
+static inline int xen_pci_frontend_enable_msix(struct pci_dev *dev,
+ int **vectors, int nvec)
+{
+ if (xen_pci_frontend && xen_pci_frontend->enable_msix)
+ return xen_pci_frontend->enable_msix(dev, vectors, nvec);
+ return -ENODEV;
+}
+static inline void xen_pci_frontend_disable_msix(struct pci_dev *dev)
+{
+ if (xen_pci_frontend && xen_pci_frontend->disable_msix)
+ xen_pci_frontend->disable_msix(dev);
+}
+#endif /* CONFIG_PCI_XEN */
+#endif /* CONFIG_PCI_MSI */
+
+#endif /* _ASM_X86_XEN_PCI_H */
diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile
index 2c833d8c4141..9e13763b6092 100644
--- a/arch/x86/kernel/Makefile
+++ b/arch/x86/kernel/Makefile
@@ -36,7 +36,6 @@ obj-y += traps.o irq.o irq_$(BITS).o dumpstack_$(BITS).o
obj-y += time.o ioport.o ldt.o dumpstack.o
obj-y += setup.o x86_init.o i8259.o irqinit.o jump_label.o
obj-$(CONFIG_IRQ_WORK) += irq_work.o
-obj-$(CONFIG_X86_VISWS) += visws_quirks.o
obj-$(CONFIG_X86_32) += probe_roms_32.o
obj-$(CONFIG_X86_32) += sys_i386_32.o i386_ksyms_32.o
obj-$(CONFIG_X86_64) += sys_x86_64.o x8664_ksyms_64.o
@@ -58,7 +57,6 @@ obj-$(CONFIG_INTEL_TXT) += tboot.o
obj-$(CONFIG_STACKTRACE) += stacktrace.o
obj-y += cpu/
obj-y += acpi/
-obj-$(CONFIG_SFI) += sfi.o
obj-y += reboot.o
obj-$(CONFIG_MCA) += mca_32.o
obj-$(CONFIG_X86_MSR) += msr.o
@@ -82,7 +80,6 @@ obj-$(CONFIG_KEXEC) += relocate_kernel_$(BITS).o crash.o
obj-$(CONFIG_CRASH_DUMP) += crash_dump_$(BITS).o
obj-$(CONFIG_KPROBES) += kprobes.o
obj-$(CONFIG_MODULES) += module.o
-obj-$(CONFIG_EFI) += efi.o efi_$(BITS).o efi_stub_$(BITS).o
obj-$(CONFIG_DOUBLEFAULT) += doublefault_32.o
obj-$(CONFIG_KGDB) += kgdb.o
obj-$(CONFIG_VM86) += vm86_32.o
@@ -104,14 +101,6 @@ obj-$(CONFIG_PARAVIRT_CLOCK) += pvclock.o
obj-$(CONFIG_PCSPKR_PLATFORM) += pcspeaker.o
-obj-$(CONFIG_SCx200) += scx200.o
-scx200-y += scx200_32.o
-
-obj-$(CONFIG_OLPC) += olpc.o
-obj-$(CONFIG_OLPC_XO1) += olpc-xo1.o
-obj-$(CONFIG_OLPC_OPENFIRMWARE) += olpc_ofw.o
-obj-$(CONFIG_X86_MRST) += mrst.o
-
microcode-y := microcode_core.o
microcode-$(CONFIG_MICROCODE_INTEL) += microcode_intel.o
microcode-$(CONFIG_MICROCODE_AMD) += microcode_amd.o
@@ -124,7 +113,6 @@ obj-$(CONFIG_SWIOTLB) += pci-swiotlb.o
###
# 64 bit specific files
ifeq ($(CONFIG_X86_64),y)
- obj-$(CONFIG_X86_UV) += tlb_uv.o bios_uv.o uv_irq.o uv_sysfs.o uv_time.o
obj-$(CONFIG_AUDIT) += audit_64.o
obj-$(CONFIG_GART_IOMMU) += pci-gart_64.o aperture_64.o
diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c
index c05872aa3ce0..71232b941b6c 100644
--- a/arch/x86/kernel/acpi/boot.c
+++ b/arch/x86/kernel/acpi/boot.c
@@ -513,35 +513,62 @@ int acpi_isa_irq_to_gsi(unsigned isa_irq, u32 *gsi)
return 0;
}
-/*
- * success: return IRQ number (>=0)
- * failure: return < 0
- */
-int acpi_register_gsi(struct device *dev, u32 gsi, int trigger, int polarity)
+static int acpi_register_gsi_pic(struct device *dev, u32 gsi,
+ int trigger, int polarity)
{
- unsigned int irq;
- unsigned int plat_gsi = gsi;
-
#ifdef CONFIG_PCI
/*
* Make sure all (legacy) PCI IRQs are set as level-triggered.
*/
- if (acpi_irq_model == ACPI_IRQ_MODEL_PIC) {
- if (trigger == ACPI_LEVEL_SENSITIVE)
- eisa_set_level_irq(gsi);
- }
+ if (trigger == ACPI_LEVEL_SENSITIVE)
+ eisa_set_level_irq(gsi);
#endif
+ return gsi;
+}
+
+static int acpi_register_gsi_ioapic(struct device *dev, u32 gsi,
+ int trigger, int polarity)
+{
#ifdef CONFIG_X86_IO_APIC
- if (acpi_irq_model == ACPI_IRQ_MODEL_IOAPIC) {
- plat_gsi = mp_register_gsi(dev, gsi, trigger, polarity);
- }
+ gsi = mp_register_gsi(dev, gsi, trigger, polarity);
#endif
+
+ return gsi;
+}
+
+int (*__acpi_register_gsi)(struct device *dev, u32 gsi,
+ int trigger, int polarity) = acpi_register_gsi_pic;
+
+/*
+ * success: return IRQ number (>=0)
+ * failure: return < 0
+ */
+int acpi_register_gsi(struct device *dev, u32 gsi, int trigger, int polarity)
+{
+ unsigned int irq;
+ unsigned int plat_gsi = gsi;
+
+ plat_gsi = (*__acpi_register_gsi)(dev, gsi, trigger, polarity);
irq = gsi_to_irq(plat_gsi);
return irq;
}
+void __init acpi_set_irq_model_pic(void)
+{
+ acpi_irq_model = ACPI_IRQ_MODEL_PIC;
+ __acpi_register_gsi = acpi_register_gsi_pic;
+ acpi_ioapic = 0;
+}
+
+void __init acpi_set_irq_model_ioapic(void)
+{
+ acpi_irq_model = ACPI_IRQ_MODEL_IOAPIC;
+ __acpi_register_gsi = acpi_register_gsi_ioapic;
+ acpi_ioapic = 1;
+}
+
/*
* ACPI based hotplug support for CPU
*/
@@ -1259,8 +1286,7 @@ static void __init acpi_process_madt(void)
*/
error = acpi_parse_madt_ioapic_entries();
if (!error) {
- acpi_irq_model = ACPI_IRQ_MODEL_IOAPIC;
- acpi_ioapic = 1;
+ acpi_set_irq_model_ioapic();
smp_found_config = 1;
}
diff --git a/arch/x86/kernel/acpi/sleep.c b/arch/x86/kernel/acpi/sleep.c
index 74a847835bab..69fd72aa5594 100644
--- a/arch/x86/kernel/acpi/sleep.c
+++ b/arch/x86/kernel/acpi/sleep.c
@@ -15,7 +15,6 @@
#ifdef CONFIG_X86_32
#include <asm/pgtable.h>
-#include <asm/pgtable_32.h>
#endif
#include "realmode/wakeup.h"
diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c
index a36bb90aef53..5079f24c955a 100644
--- a/arch/x86/kernel/alternative.c
+++ b/arch/x86/kernel/alternative.c
@@ -638,71 +638,32 @@ void *__kprobes text_poke_smp(void *addr, const void *opcode, size_t len)
atomic_set(&stop_machine_first, 1);
wrote_text = 0;
/* Use __stop_machine() because the caller already got online_cpus. */
- __stop_machine(stop_machine_text_poke, (void *)&tpp, NULL);
+ __stop_machine(stop_machine_text_poke, (void *)&tpp, cpu_online_mask);
return addr;
}
#if defined(CONFIG_DYNAMIC_FTRACE) || defined(HAVE_JUMP_LABEL)
-unsigned char ideal_nop5[IDEAL_NOP_SIZE_5];
+#ifdef CONFIG_X86_64
+unsigned char ideal_nop5[5] = { 0x66, 0x66, 0x66, 0x66, 0x90 };
+#else
+unsigned char ideal_nop5[5] = { 0x3e, 0x8d, 0x74, 0x26, 0x00 };
+#endif
void __init arch_init_ideal_nop5(void)
{
- extern const unsigned char ftrace_test_p6nop[];
- extern const unsigned char ftrace_test_nop5[];
- extern const unsigned char ftrace_test_jmp[];
- int faulted = 0;
-
/*
- * There is no good nop for all x86 archs.
- * We will default to using the P6_NOP5, but first we
- * will test to make sure that the nop will actually
- * work on this CPU. If it faults, we will then
- * go to a lesser efficient 5 byte nop. If that fails
- * we then just use a jmp as our nop. This isn't the most
- * efficient nop, but we can not use a multi part nop
- * since we would then risk being preempted in the middle
- * of that nop, and if we enabled tracing then, it might
- * cause a system crash.
+ * There is no good nop for all x86 archs. This selection
+ * algorithm should be unified with the one in find_nop_table(),
+ * but this should be good enough for now.
*
- * TODO: check the cpuid to determine the best nop.
+ * For cases other than the ones below, use the safe (as in
+ * always functional) defaults above.
*/
- asm volatile (
- "ftrace_test_jmp:"
- "jmp ftrace_test_p6nop\n"
- "nop\n"
- "nop\n"
- "nop\n" /* 2 byte jmp + 3 bytes */
- "ftrace_test_p6nop:"
- P6_NOP5
- "jmp 1f\n"
- "ftrace_test_nop5:"
- ".byte 0x66,0x66,0x66,0x66,0x90\n"
- "1:"
- ".section .fixup, \"ax\"\n"
- "2: movl $1, %0\n"
- " jmp ftrace_test_nop5\n"
- "3: movl $2, %0\n"
- " jmp 1b\n"
- ".previous\n"
- _ASM_EXTABLE(ftrace_test_p6nop, 2b)
- _ASM_EXTABLE(ftrace_test_nop5, 3b)
- : "=r"(faulted) : "0" (faulted));
-
- switch (faulted) {
- case 0:
- pr_info("converting mcount calls to 0f 1f 44 00 00\n");
- memcpy(ideal_nop5, ftrace_test_p6nop, IDEAL_NOP_SIZE_5);
- break;
- case 1:
- pr_info("converting mcount calls to 66 66 66 66 90\n");
- memcpy(ideal_nop5, ftrace_test_nop5, IDEAL_NOP_SIZE_5);
- break;
- case 2:
- pr_info("converting mcount calls to jmp . + 5\n");
- memcpy(ideal_nop5, ftrace_test_jmp, IDEAL_NOP_SIZE_5);
- break;
- }
-
+#ifdef CONFIG_X86_64
+ /* Don't use these on 32 bits due to broken virtualizers */
+ if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL)
+ memcpy(ideal_nop5, p6_nops[5], 5);
+#endif
}
#endif
diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
index 850657d1b0ed..3f838d537392 100644
--- a/arch/x86/kernel/apic/apic.c
+++ b/arch/x86/kernel/apic/apic.c
@@ -52,7 +52,6 @@
#include <asm/mce.h>
#include <asm/kvm_para.h>
#include <asm/tsc.h>
-#include <asm/atomic.h>
unsigned int num_processors;
diff --git a/arch/x86/kernel/apic/hw_nmi.c b/arch/x86/kernel/apic/hw_nmi.c
index cefd6942f0e9..62f6e1e55b90 100644
--- a/arch/x86/kernel/apic/hw_nmi.c
+++ b/arch/x86/kernel/apic/hw_nmi.c
@@ -17,15 +17,16 @@
#include <linux/nmi.h>
#include <linux/module.h>
-/* For reliability, we're prepared to waste bits here. */
-static DECLARE_BITMAP(backtrace_mask, NR_CPUS) __read_mostly;
-
u64 hw_nmi_get_sample_period(void)
{
return (u64)(cpu_khz) * 1000 * 60;
}
#ifdef ARCH_HAS_NMI_WATCHDOG
+
+/* For reliability, we're prepared to waste bits here. */
+static DECLARE_BITMAP(backtrace_mask, NR_CPUS) __read_mostly;
+
void arch_trigger_all_cpu_backtrace(void)
{
int i;
diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
index 8ae808d110f4..7cc0a721f628 100644
--- a/arch/x86/kernel/apic/io_apic.c
+++ b/arch/x86/kernel/apic/io_apic.c
@@ -3109,7 +3109,7 @@ void destroy_irq(unsigned int irq)
irq_set_status_flags(irq, IRQ_NOREQUEST|IRQ_NOPROBE);
- if (intr_remapping_enabled)
+ if (irq_remapped(cfg))
free_irte(irq);
raw_spin_lock_irqsave(&vector_lock, flags);
__clear_irq_vector(irq, cfg);
@@ -3331,7 +3331,7 @@ static int setup_msi_irq(struct pci_dev *dev, struct msi_desc *msidesc, int irq)
return 0;
}
-int arch_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
+int native_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
{
int node, ret, sub_handle, index = 0;
unsigned int irq, irq_want;
@@ -3389,7 +3389,7 @@ error:
return ret;
}
-void arch_teardown_msi_irq(unsigned int irq)
+void native_teardown_msi_irq(unsigned int irq)
{
destroy_irq(irq);
}
@@ -3650,6 +3650,11 @@ void __init probe_nr_irqs_gsi(void)
printk(KERN_DEBUG "nr_irqs_gsi: %d\n", nr_irqs_gsi);
}
+int get_nr_irqs_gsi(void)
+{
+ return nr_irqs_gsi;
+}
+
#ifdef CONFIG_SPARSE_IRQ
int __init arch_probe_nr_irqs(void)
{
diff --git a/arch/x86/kernel/apic/x2apic_uv_x.c b/arch/x86/kernel/apic/x2apic_uv_x.c
index f744f54cb248..c1c52c341f40 100644
--- a/arch/x86/kernel/apic/x2apic_uv_x.c
+++ b/arch/x86/kernel/apic/x2apic_uv_x.c
@@ -5,7 +5,7 @@
*
* SGI UV APIC functions (note: not an Intel compatible APIC)
*
- * Copyright (C) 2007-2009 Silicon Graphics, Inc. All rights reserved.
+ * Copyright (C) 2007-2010 Silicon Graphics, Inc. All rights reserved.
*/
#include <linux/cpumask.h>
#include <linux/hardirq.h>
@@ -41,8 +41,11 @@ DEFINE_PER_CPU(int, x2apic_extra_bits);
static enum uv_system_type uv_system_type;
static u64 gru_start_paddr, gru_end_paddr;
+static union uvh_apicid uvh_apicid;
int uv_min_hub_revision_id;
EXPORT_SYMBOL_GPL(uv_min_hub_revision_id);
+unsigned int uv_apicid_hibits;
+EXPORT_SYMBOL_GPL(uv_apicid_hibits);
static DEFINE_SPINLOCK(uv_nmi_lock);
static inline bool is_GRU_range(u64 start, u64 end)
@@ -70,12 +73,44 @@ static int early_get_nodeid(void)
return node_id.s.node_id;
}
+static void __init early_get_apic_pnode_shift(void)
+{
+ unsigned long *mmr;
+
+ mmr = early_ioremap(UV_LOCAL_MMR_BASE | UVH_APICID, sizeof(*mmr));
+ uvh_apicid.v = *mmr;
+ early_iounmap(mmr, sizeof(*mmr));
+ if (!uvh_apicid.v)
+ /*
+ * Old bios, use default value
+ */
+ uvh_apicid.s.pnode_shift = UV_APIC_PNODE_SHIFT;
+}
+
+/*
+ * Add an extra bit as dictated by bios to the destination apicid of
+ * interrupts potentially passing through the UV HUB. This prevents
+ * a deadlock between interrupts and IO port operations.
+ */
+static void __init uv_set_apicid_hibit(void)
+{
+ union uvh_lb_target_physical_apic_id_mask_u apicid_mask;
+ unsigned long *mmr;
+
+ mmr = early_ioremap(UV_LOCAL_MMR_BASE |
+ UVH_LB_TARGET_PHYSICAL_APIC_ID_MASK, sizeof(*mmr));
+ apicid_mask.v = *mmr;
+ early_iounmap(mmr, sizeof(*mmr));
+ uv_apicid_hibits = apicid_mask.s.bit_enables & UV_APICID_HIBIT_MASK;
+}
+
static int __init uv_acpi_madt_oem_check(char *oem_id, char *oem_table_id)
{
int nodeid;
if (!strcmp(oem_id, "SGI")) {
nodeid = early_get_nodeid();
+ early_get_apic_pnode_shift();
x86_platform.is_untracked_pat_range = uv_is_untracked_pat_range;
x86_platform.nmi_init = uv_nmi_init;
if (!strcmp(oem_table_id, "UVL"))
@@ -84,8 +119,9 @@ static int __init uv_acpi_madt_oem_check(char *oem_id, char *oem_table_id)
uv_system_type = UV_X2APIC;
else if (!strcmp(oem_table_id, "UVH")) {
__get_cpu_var(x2apic_extra_bits) =
- nodeid << (UV_APIC_PNODE_SHIFT - 1);
+ nodeid << (uvh_apicid.s.pnode_shift - 1);
uv_system_type = UV_NON_UNIQUE_APIC;
+ uv_set_apicid_hibit();
return 1;
}
}
@@ -139,6 +175,7 @@ static int __cpuinit uv_wakeup_secondary(int phys_apicid, unsigned long start_ri
int pnode;
pnode = uv_apicid_to_pnode(phys_apicid);
+ phys_apicid |= uv_apicid_hibits;
val = (1UL << UVH_IPI_INT_SEND_SHFT) |
(phys_apicid << UVH_IPI_INT_APIC_ID_SHFT) |
((start_rip << UVH_IPI_INT_VECTOR_SHFT) >> 12) |
@@ -220,7 +257,7 @@ static unsigned int uv_cpu_mask_to_apicid(const struct cpumask *cpumask)
int cpu = cpumask_first(cpumask);
if ((unsigned)cpu < nr_cpu_ids)
- return per_cpu(x86_cpu_to_apicid, cpu);
+ return per_cpu(x86_cpu_to_apicid, cpu) | uv_apicid_hibits;
else
return BAD_APICID;
}
@@ -239,7 +276,7 @@ uv_cpu_mask_to_apicid_and(const struct cpumask *cpumask,
if (cpumask_test_cpu(cpu, cpu_online_mask))
break;
}
- return per_cpu(x86_cpu_to_apicid, cpu);
+ return per_cpu(x86_cpu_to_apicid, cpu) | uv_apicid_hibits;
}
static unsigned int x2apic_get_apic_id(unsigned long x)
@@ -363,14 +400,14 @@ struct redir_addr {
#define DEST_SHIFT UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_0_MMR_DEST_BASE_SHFT
static __initdata struct redir_addr redir_addrs[] = {
- {UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_0_MMR, UVH_SI_ALIAS0_OVERLAY_CONFIG},
- {UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_1_MMR, UVH_SI_ALIAS1_OVERLAY_CONFIG},
- {UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_2_MMR, UVH_SI_ALIAS2_OVERLAY_CONFIG},
+ {UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_0_MMR, UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_0_MMR},
+ {UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_1_MMR, UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_1_MMR},
+ {UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_2_MMR, UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_2_MMR},
};
static __init void get_lowmem_redirect(unsigned long *base, unsigned long *size)
{
- union uvh_si_alias0_overlay_config_u alias;
+ union uvh_rh_gam_alias210_overlay_config_2_mmr_u alias;
union uvh_rh_gam_alias210_redirect_config_2_mmr_u redirect;
int i;
@@ -644,7 +681,7 @@ void uv_nmi_init(void)
void __init uv_system_init(void)
{
- union uvh_si_addr_map_config_u m_n_config;
+ union uvh_rh_gam_config_mmr_u m_n_config;
union uvh_node_id_u node_id;
unsigned long gnode_upper, lowmem_redir_base, lowmem_redir_size;
int bytes, nid, cpu, lcpu, pnode, blade, i, j, m_val, n_val;
@@ -654,7 +691,7 @@ void __init uv_system_init(void)
map_low_mmrs();
- m_n_config.v = uv_read_local_mmr(UVH_SI_ADDR_MAP_CONFIG);
+ m_n_config.v = uv_read_local_mmr(UVH_RH_GAM_CONFIG_MMR );
m_val = m_n_config.s.m_skt;
n_val = m_n_config.s.n_skt;
mmr_base =
@@ -716,6 +753,10 @@ void __init uv_system_init(void)
int apicid = per_cpu(x86_cpu_to_apicid, cpu);
nid = cpu_to_node(cpu);
+ /*
+ * apic_pnode_shift must be set before calling uv_apicid_to_pnode();
+ */
+ uv_cpu_hub_info(cpu)->apic_pnode_shift = uvh_apicid.s.pnode_shift;
pnode = uv_apicid_to_pnode(apicid);
blade = boot_pnode_to_blade(pnode);
lcpu = uv_blade_info[blade].nr_possible_cpus;
diff --git a/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c b/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c
index cd8da247dda1..a2baafb2fe6d 100644
--- a/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c
+++ b/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c
@@ -701,6 +701,7 @@ static int acpi_cpufreq_cpu_exit(struct cpufreq_policy *policy)
per_cpu(acfreq_data, policy->cpu) = NULL;
acpi_processor_unregister_performance(data->acpi_data,
policy->cpu);
+ kfree(data->freq_table);
kfree(data);
}
diff --git a/arch/x86/kernel/cpu/cpufreq/cpufreq-nforce2.c b/arch/x86/kernel/cpu/cpufreq/cpufreq-nforce2.c
index 733093d60436..141abebc4516 100644
--- a/arch/x86/kernel/cpu/cpufreq/cpufreq-nforce2.c
+++ b/arch/x86/kernel/cpu/cpufreq/cpufreq-nforce2.c
@@ -393,7 +393,7 @@ static struct cpufreq_driver nforce2_driver = {
* Detects nForce2 A2 and C1 stepping
*
*/
-static unsigned int nforce2_detect_chipset(void)
+static int nforce2_detect_chipset(void)
{
nforce2_dev = pci_get_subsys(PCI_VENDOR_ID_NVIDIA,
PCI_DEVICE_ID_NVIDIA_NFORCE2,
diff --git a/arch/x86/kernel/cpu/cpufreq/longrun.c b/arch/x86/kernel/cpu/cpufreq/longrun.c
index fc09f142d94d..d9f51367666b 100644
--- a/arch/x86/kernel/cpu/cpufreq/longrun.c
+++ b/arch/x86/kernel/cpu/cpufreq/longrun.c
@@ -35,7 +35,7 @@ static unsigned int longrun_low_freq, longrun_high_freq;
* Reads the current LongRun policy by access to MSR_TMTA_LONGRUN_FLAGS
* and MSR_TMTA_LONGRUN_CTRL
*/
-static void __init longrun_get_policy(struct cpufreq_policy *policy)
+static void __cpuinit longrun_get_policy(struct cpufreq_policy *policy)
{
u32 msr_lo, msr_hi;
@@ -165,7 +165,7 @@ static unsigned int longrun_get(unsigned int cpu)
* TMTA rules:
* performance_pctg = (target_freq - low_freq)/(high_freq - low_freq)
*/
-static unsigned int __cpuinit longrun_determine_freqs(unsigned int *low_freq,
+static int __cpuinit longrun_determine_freqs(unsigned int *low_freq,
unsigned int *high_freq)
{
u32 msr_lo, msr_hi;
diff --git a/arch/x86/kernel/cpu/intel_cacheinfo.c b/arch/x86/kernel/cpu/intel_cacheinfo.c
index 12cd823c8d03..17ad03366211 100644
--- a/arch/x86/kernel/cpu/intel_cacheinfo.c
+++ b/arch/x86/kernel/cpu/intel_cacheinfo.c
@@ -327,6 +327,7 @@ static void __cpuinit amd_calc_l3_indices(struct amd_l3_cache *l3)
l3->subcaches[3] = sc3 = !(val & BIT(12)) + !(val & BIT(13));
l3->indices = (max(max(max(sc0, sc1), sc2), sc3) << 10) - 1;
+ l3->indices = (max(max3(sc0, sc1, sc2), sc3) << 10) - 1;
}
static struct amd_l3_cache * __cpuinit amd_init_l3_cache(int node)
diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c
index fe73c1844a9a..6d75b9145b13 100644
--- a/arch/x86/kernel/cpu/perf_event.c
+++ b/arch/x86/kernel/cpu/perf_event.c
@@ -49,7 +49,6 @@ static unsigned long
copy_from_user_nmi(void *to, const void __user *from, unsigned long n)
{
unsigned long offset, addr = (unsigned long)from;
- int type = in_nmi() ? KM_NMI : KM_IRQ0;
unsigned long size, len = 0;
struct page *page;
void *map;
@@ -63,9 +62,9 @@ copy_from_user_nmi(void *to, const void __user *from, unsigned long n)
offset = addr & (PAGE_SIZE - 1);
size = min(PAGE_SIZE - offset, n - len);
- map = kmap_atomic(page, type);
+ map = kmap_atomic(page);
memcpy(to, map+offset, size);
- kunmap_atomic(map, type);
+ kunmap_atomic(map);
put_page(page);
len += size;
@@ -238,6 +237,7 @@ struct x86_pmu {
* Intel DebugStore bits
*/
int bts, pebs;
+ int bts_active, pebs_active;
int pebs_record_size;
void (*drain_pebs)(struct pt_regs *regs);
struct event_constraint *pebs_constraints;
@@ -381,7 +381,21 @@ static void release_pmc_hardware(void) {}
#endif
-static int reserve_ds_buffers(void);
+static bool check_hw_exists(void)
+{
+ u64 val, val_new = 0;
+ int ret = 0;
+
+ val = 0xabcdUL;
+ ret |= checking_wrmsrl(x86_pmu.perfctr, val);
+ ret |= rdmsrl_safe(x86_pmu.perfctr, &val_new);
+ if (ret || val != val_new)
+ return false;
+
+ return true;
+}
+
+static void reserve_ds_buffers(void);
static void release_ds_buffers(void);
static void hw_perf_event_destroy(struct perf_event *event)
@@ -478,7 +492,7 @@ static int x86_setup_perfctr(struct perf_event *event)
if ((attr->config == PERF_COUNT_HW_BRANCH_INSTRUCTIONS) &&
(hwc->sample_period == 1)) {
/* BTS is not supported by this architecture. */
- if (!x86_pmu.bts)
+ if (!x86_pmu.bts_active)
return -EOPNOTSUPP;
/* BTS is currently only allowed for user-mode. */
@@ -497,12 +511,13 @@ static int x86_pmu_hw_config(struct perf_event *event)
int precise = 0;
/* Support for constant skid */
- if (x86_pmu.pebs)
+ if (x86_pmu.pebs_active) {
precise++;
- /* Support for IP fixup */
- if (x86_pmu.lbr_nr)
- precise++;
+ /* Support for IP fixup */
+ if (x86_pmu.lbr_nr)
+ precise++;
+ }
if (event->attr.precise_ip > precise)
return -EOPNOTSUPP;
@@ -544,11 +559,8 @@ static int __x86_pmu_event_init(struct perf_event *event)
if (atomic_read(&active_events) == 0) {
if (!reserve_pmc_hardware())
err = -EBUSY;
- else {
- err = reserve_ds_buffers();
- if (err)
- release_pmc_hardware();
- }
+ else
+ reserve_ds_buffers();
}
if (!err)
atomic_inc(&active_events);
@@ -1374,6 +1386,12 @@ void __init init_hw_perf_events(void)
pmu_check_apic();
+ /* sanity check that the hardware exists or is emulated */
+ if (!check_hw_exists()) {
+ pr_cont("Broken PMU hardware detected, software events only.\n");
+ return;
+ }
+
pr_cont("%s PMU driver.\n", x86_pmu.name);
if (x86_pmu.quirks)
diff --git a/arch/x86/kernel/cpu/perf_event_amd.c b/arch/x86/kernel/cpu/perf_event_amd.c
index 46d58448c3af..e421b8cd6944 100644
--- a/arch/x86/kernel/cpu/perf_event_amd.c
+++ b/arch/x86/kernel/cpu/perf_event_amd.c
@@ -280,11 +280,11 @@ static struct amd_nb *amd_alloc_nb(int cpu, int nb_id)
struct amd_nb *nb;
int i;
- nb = kmalloc(sizeof(struct amd_nb), GFP_KERNEL);
+ nb = kmalloc_node(sizeof(struct amd_nb), GFP_KERNEL | __GFP_ZERO,
+ cpu_to_node(cpu));
if (!nb)
return NULL;
- memset(nb, 0, sizeof(*nb));
nb->nb_id = nb_id;
/*
diff --git a/arch/x86/kernel/cpu/perf_event_intel_ds.c b/arch/x86/kernel/cpu/perf_event_intel_ds.c
index 4977f9c400e5..b7dcd9f2b8a0 100644
--- a/arch/x86/kernel/cpu/perf_event_intel_ds.c
+++ b/arch/x86/kernel/cpu/perf_event_intel_ds.c
@@ -74,6 +74,107 @@ static void fini_debug_store_on_cpu(int cpu)
wrmsr_on_cpu(cpu, MSR_IA32_DS_AREA, 0, 0);
}
+static int alloc_pebs_buffer(int cpu)
+{
+ struct debug_store *ds = per_cpu(cpu_hw_events, cpu).ds;
+ int node = cpu_to_node(cpu);
+ int max, thresh = 1; /* always use a single PEBS record */
+ void *buffer;
+
+ if (!x86_pmu.pebs)
+ return 0;
+
+ buffer = kmalloc_node(PEBS_BUFFER_SIZE, GFP_KERNEL | __GFP_ZERO, node);
+ if (unlikely(!buffer))
+ return -ENOMEM;
+
+ max = PEBS_BUFFER_SIZE / x86_pmu.pebs_record_size;
+
+ ds->pebs_buffer_base = (u64)(unsigned long)buffer;
+ ds->pebs_index = ds->pebs_buffer_base;
+ ds->pebs_absolute_maximum = ds->pebs_buffer_base +
+ max * x86_pmu.pebs_record_size;
+
+ ds->pebs_interrupt_threshold = ds->pebs_buffer_base +
+ thresh * x86_pmu.pebs_record_size;
+
+ return 0;
+}
+
+static void release_pebs_buffer(int cpu)
+{
+ struct debug_store *ds = per_cpu(cpu_hw_events, cpu).ds;
+
+ if (!ds || !x86_pmu.pebs)
+ return;
+
+ kfree((void *)(unsigned long)ds->pebs_buffer_base);
+ ds->pebs_buffer_base = 0;
+}
+
+static int alloc_bts_buffer(int cpu)
+{
+ struct debug_store *ds = per_cpu(cpu_hw_events, cpu).ds;
+ int node = cpu_to_node(cpu);
+ int max, thresh;
+ void *buffer;
+
+ if (!x86_pmu.bts)
+ return 0;
+
+ buffer = kmalloc_node(BTS_BUFFER_SIZE, GFP_KERNEL | __GFP_ZERO, node);
+ if (unlikely(!buffer))
+ return -ENOMEM;
+
+ max = BTS_BUFFER_SIZE / BTS_RECORD_SIZE;
+ thresh = max / 16;
+
+ ds->bts_buffer_base = (u64)(unsigned long)buffer;
+ ds->bts_index = ds->bts_buffer_base;
+ ds->bts_absolute_maximum = ds->bts_buffer_base +
+ max * BTS_RECORD_SIZE;
+ ds->bts_interrupt_threshold = ds->bts_absolute_maximum -
+ thresh * BTS_RECORD_SIZE;
+
+ return 0;
+}
+
+static void release_bts_buffer(int cpu)
+{
+ struct debug_store *ds = per_cpu(cpu_hw_events, cpu).ds;
+
+ if (!ds || !x86_pmu.bts)
+ return;
+
+ kfree((void *)(unsigned long)ds->bts_buffer_base);
+ ds->bts_buffer_base = 0;
+}
+
+static int alloc_ds_buffer(int cpu)
+{
+ int node = cpu_to_node(cpu);
+ struct debug_store *ds;
+
+ ds = kmalloc_node(sizeof(*ds), GFP_KERNEL | __GFP_ZERO, node);
+ if (unlikely(!ds))
+ return -ENOMEM;
+
+ per_cpu(cpu_hw_events, cpu).ds = ds;
+
+ return 0;
+}
+
+static void release_ds_buffer(int cpu)
+{
+ struct debug_store *ds = per_cpu(cpu_hw_events, cpu).ds;
+
+ if (!ds)
+ return;
+
+ per_cpu(cpu_hw_events, cpu).ds = NULL;
+ kfree(ds);
+}
+
static void release_ds_buffers(void)
{
int cpu;
@@ -82,93 +183,77 @@ static void release_ds_buffers(void)
return;
get_online_cpus();
-
for_each_online_cpu(cpu)
fini_debug_store_on_cpu(cpu);
for_each_possible_cpu(cpu) {
- struct debug_store *ds = per_cpu(cpu_hw_events, cpu).ds;
-
- if (!ds)
- continue;
-
- per_cpu(cpu_hw_events, cpu).ds = NULL;
-
- kfree((void *)(unsigned long)ds->pebs_buffer_base);
- kfree((void *)(unsigned long)ds->bts_buffer_base);
- kfree(ds);
+ release_pebs_buffer(cpu);
+ release_bts_buffer(cpu);
+ release_ds_buffer(cpu);
}
-
put_online_cpus();
}
-static int reserve_ds_buffers(void)
+static void reserve_ds_buffers(void)
{
- int cpu, err = 0;
+ int bts_err = 0, pebs_err = 0;
+ int cpu;
+
+ x86_pmu.bts_active = 0;
+ x86_pmu.pebs_active = 0;
if (!x86_pmu.bts && !x86_pmu.pebs)
- return 0;
+ return;
+
+ if (!x86_pmu.bts)
+ bts_err = 1;
+
+ if (!x86_pmu.pebs)
+ pebs_err = 1;
get_online_cpus();
for_each_possible_cpu(cpu) {
- struct debug_store *ds;
- void *buffer;
- int max, thresh;
+ if (alloc_ds_buffer(cpu)) {
+ bts_err = 1;
+ pebs_err = 1;
+ }
+
+ if (!bts_err && alloc_bts_buffer(cpu))
+ bts_err = 1;
- err = -ENOMEM;
- ds = kzalloc(sizeof(*ds), GFP_KERNEL);
- if (unlikely(!ds))
+ if (!pebs_err && alloc_pebs_buffer(cpu))
+ pebs_err = 1;
+
+ if (bts_err && pebs_err)
break;
- per_cpu(cpu_hw_events, cpu).ds = ds;
-
- if (x86_pmu.bts) {
- buffer = kzalloc(BTS_BUFFER_SIZE, GFP_KERNEL);
- if (unlikely(!buffer))
- break;
-
- max = BTS_BUFFER_SIZE / BTS_RECORD_SIZE;
- thresh = max / 16;
-
- ds->bts_buffer_base = (u64)(unsigned long)buffer;
- ds->bts_index = ds->bts_buffer_base;
- ds->bts_absolute_maximum = ds->bts_buffer_base +
- max * BTS_RECORD_SIZE;
- ds->bts_interrupt_threshold = ds->bts_absolute_maximum -
- thresh * BTS_RECORD_SIZE;
- }
+ }
- if (x86_pmu.pebs) {
- buffer = kzalloc(PEBS_BUFFER_SIZE, GFP_KERNEL);
- if (unlikely(!buffer))
- break;
-
- max = PEBS_BUFFER_SIZE / x86_pmu.pebs_record_size;
-
- ds->pebs_buffer_base = (u64)(unsigned long)buffer;
- ds->pebs_index = ds->pebs_buffer_base;
- ds->pebs_absolute_maximum = ds->pebs_buffer_base +
- max * x86_pmu.pebs_record_size;
- /*
- * Always use single record PEBS
- */
- ds->pebs_interrupt_threshold = ds->pebs_buffer_base +
- x86_pmu.pebs_record_size;
- }
+ if (bts_err) {
+ for_each_possible_cpu(cpu)
+ release_bts_buffer(cpu);
+ }
- err = 0;
+ if (pebs_err) {
+ for_each_possible_cpu(cpu)
+ release_pebs_buffer(cpu);
}
- if (err)
- release_ds_buffers();
- else {
+ if (bts_err && pebs_err) {
+ for_each_possible_cpu(cpu)
+ release_ds_buffer(cpu);
+ } else {
+ if (x86_pmu.bts && !bts_err)
+ x86_pmu.bts_active = 1;
+
+ if (x86_pmu.pebs && !pebs_err)
+ x86_pmu.pebs_active = 1;
+
for_each_online_cpu(cpu)
init_debug_store_on_cpu(cpu);
}
put_online_cpus();
-
- return err;
}
/*
@@ -233,7 +318,7 @@ static int intel_pmu_drain_bts_buffer(void)
if (!event)
return 0;
- if (!ds)
+ if (!x86_pmu.bts_active)
return 0;
at = (struct bts_record *)(unsigned long)ds->bts_buffer_base;
@@ -503,7 +588,7 @@ static void intel_pmu_drain_pebs_core(struct pt_regs *iregs)
struct pebs_record_core *at, *top;
int n;
- if (!ds || !x86_pmu.pebs)
+ if (!x86_pmu.pebs_active)
return;
at = (struct pebs_record_core *)(unsigned long)ds->pebs_buffer_base;
@@ -545,7 +630,7 @@ static void intel_pmu_drain_pebs_nhm(struct pt_regs *iregs)
u64 status = 0;
int bit, n;
- if (!ds || !x86_pmu.pebs)
+ if (!x86_pmu.pebs_active)
return;
at = (struct pebs_record_nhm *)(unsigned long)ds->pebs_buffer_base;
@@ -630,9 +715,8 @@ static void intel_ds_init(void)
#else /* CONFIG_CPU_SUP_INTEL */
-static int reserve_ds_buffers(void)
+static void reserve_ds_buffers(void)
{
- return 0;
}
static void release_ds_buffers(void)
diff --git a/arch/x86/kernel/cpuid.c b/arch/x86/kernel/cpuid.c
index 1b7b31ab7d86..212a6a42527c 100644
--- a/arch/x86/kernel/cpuid.c
+++ b/arch/x86/kernel/cpuid.c
@@ -33,7 +33,6 @@
#include <linux/init.h>
#include <linux/poll.h>
#include <linux/smp.h>
-#include <linux/smp_lock.h>
#include <linux/major.h>
#include <linux/fs.h>
#include <linux/device.h>
diff --git a/arch/x86/kernel/crash_dump_32.c b/arch/x86/kernel/crash_dump_32.c
index 67414550c3cc..d5cd13945d5a 100644
--- a/arch/x86/kernel/crash_dump_32.c
+++ b/arch/x86/kernel/crash_dump_32.c
@@ -61,7 +61,7 @@ ssize_t copy_oldmem_page(unsigned long pfn, char *buf,
if (!is_crashed_pfn_valid(pfn))
return -EFAULT;
- vaddr = kmap_atomic_pfn(pfn, KM_PTE0);
+ vaddr = kmap_atomic_pfn(pfn);
if (!userbuf) {
memcpy(buf, (vaddr + offset), csize);
diff --git a/arch/x86/kernel/dumpstack_32.c b/arch/x86/kernel/dumpstack_32.c
index 0f6376ffa2d9..1bc7f75a5bda 100644
--- a/arch/x86/kernel/dumpstack_32.c
+++ b/arch/x86/kernel/dumpstack_32.c
@@ -82,11 +82,11 @@ show_stack_log_lvl(struct task_struct *task, struct pt_regs *regs,
if (kstack_end(stack))
break;
if (i && ((i % STACKSLOTS_PER_LINE) == 0))
- printk("\n%s", log_lvl);
- printk(" %08lx", *stack++);
+ printk(KERN_CONT "\n");
+ printk(KERN_CONT " %08lx", *stack++);
touch_nmi_watchdog();
}
- printk("\n");
+ printk(KERN_CONT "\n");
show_trace_log_lvl(task, regs, sp, bp, log_lvl);
}
diff --git a/arch/x86/kernel/dumpstack_64.c b/arch/x86/kernel/dumpstack_64.c
index 57a21f11c791..6a340485249a 100644
--- a/arch/x86/kernel/dumpstack_64.c
+++ b/arch/x86/kernel/dumpstack_64.c
@@ -265,20 +265,20 @@ show_stack_log_lvl(struct task_struct *task, struct pt_regs *regs,
if (stack >= irq_stack && stack <= irq_stack_end) {
if (stack == irq_stack_end) {
stack = (unsigned long *) (irq_stack_end[-1]);
- printk(" <EOI> ");
+ printk(KERN_CONT " <EOI> ");
}
} else {
if (((long) stack & (THREAD_SIZE-1)) == 0)
break;
}
if (i && ((i % STACKSLOTS_PER_LINE) == 0))
- printk("\n%s", log_lvl);
- printk(" %016lx", *stack++);
+ printk(KERN_CONT "\n");
+ printk(KERN_CONT " %016lx", *stack++);
touch_nmi_watchdog();
}
preempt_enable();
- printk("\n");
+ printk(KERN_CONT "\n");
show_trace_log_lvl(task, regs, sp, bp, log_lvl);
}
diff --git a/arch/x86/kernel/entry_32.S b/arch/x86/kernel/entry_32.S
index 59e175e89599..591e60104278 100644
--- a/arch/x86/kernel/entry_32.S
+++ b/arch/x86/kernel/entry_32.S
@@ -395,7 +395,7 @@ sysenter_past_esp:
* A tiny bit of offset fixup is necessary - 4*4 means the 4 words
* pushed above; +8 corresponds to copy_thread's esp0 setting.
*/
- pushl_cfi (TI_sysenter_return-THREAD_SIZE_asm+8+4*4)(%esp)
+ pushl_cfi ((TI_sysenter_return)-THREAD_SIZE_asm+8+4*4)(%esp)
CFI_REL_OFFSET eip, 0
pushl_cfi %eax
diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S
index fe2690d71c0c..e3ba417e8697 100644
--- a/arch/x86/kernel/entry_64.S
+++ b/arch/x86/kernel/entry_64.S
@@ -295,6 +295,7 @@ ENDPROC(native_usergs_sysret64)
.endm
/* save partial stack frame */
+ .pushsection .kprobes.text, "ax"
ENTRY(save_args)
XCPT_FRAME
cld
@@ -334,6 +335,7 @@ ENTRY(save_args)
ret
CFI_ENDPROC
END(save_args)
+ .popsection
ENTRY(save_rest)
PARTIAL_FRAME 1 REST_SKIP+8
diff --git a/arch/x86/kernel/hpet.c b/arch/x86/kernel/hpet.c
index aff0b3c27509..ae03cab4352e 100644
--- a/arch/x86/kernel/hpet.c
+++ b/arch/x86/kernel/hpet.c
@@ -713,7 +713,7 @@ static int hpet_cpuhp_notify(struct notifier_block *n,
switch (action & 0xf) {
case CPU_ONLINE:
- INIT_DELAYED_WORK_ON_STACK(&work.work, hpet_work);
+ INIT_DELAYED_WORK_ONSTACK(&work.work, hpet_work);
init_completion(&work.complete);
/* FIXME: add schedule_work_on() */
schedule_delayed_work_on(cpu, &work.work, 0);
diff --git a/arch/x86/kernel/hw_breakpoint.c b/arch/x86/kernel/hw_breakpoint.c
index ff15c9dcc25d..42c594254507 100644
--- a/arch/x86/kernel/hw_breakpoint.c
+++ b/arch/x86/kernel/hw_breakpoint.c
@@ -433,6 +433,10 @@ static int __kprobes hw_breakpoint_handler(struct die_args *args)
dr6_p = (unsigned long *)ERR_PTR(args->err);
dr6 = *dr6_p;
+ /* If it's a single step, TRAP bits are random */
+ if (dr6 & DR_STEP)
+ return NOTIFY_DONE;
+
/* Do an early return if no trap bits are set in DR6 */
if ((dr6 & DR_TRAP_BITS) == 0)
return NOTIFY_DONE;
diff --git a/arch/x86/kernel/irq_32.c b/arch/x86/kernel/irq_32.c
index 50fbbe60e507..96656f207751 100644
--- a/arch/x86/kernel/irq_32.c
+++ b/arch/x86/kernel/irq_32.c
@@ -17,6 +17,7 @@
#include <linux/delay.h>
#include <linux/uaccess.h>
#include <linux/percpu.h>
+#include <linux/mm.h>
#include <asm/apic.h>
@@ -60,9 +61,6 @@ union irq_ctx {
static DEFINE_PER_CPU(union irq_ctx *, hardirq_ctx);
static DEFINE_PER_CPU(union irq_ctx *, softirq_ctx);
-static DEFINE_PER_CPU_MULTIPAGE_ALIGNED(union irq_ctx, hardirq_stack, THREAD_SIZE);
-static DEFINE_PER_CPU_MULTIPAGE_ALIGNED(union irq_ctx, softirq_stack, THREAD_SIZE);
-
static void call_on_stack(void *func, void *stack)
{
asm volatile("xchgl %%ebx,%%esp \n"
@@ -128,7 +126,9 @@ void __cpuinit irq_ctx_init(int cpu)
if (per_cpu(hardirq_ctx, cpu))
return;
- irqctx = &per_cpu(hardirq_stack, cpu);
+ irqctx = page_address(alloc_pages_node(cpu_to_node(cpu),
+ THREAD_FLAGS,
+ THREAD_ORDER));
irqctx->tinfo.task = NULL;
irqctx->tinfo.exec_domain = NULL;
irqctx->tinfo.cpu = cpu;
@@ -137,7 +137,9 @@ void __cpuinit irq_ctx_init(int cpu)
per_cpu(hardirq_ctx, cpu) = irqctx;
- irqctx = &per_cpu(softirq_stack, cpu);
+ irqctx = page_address(alloc_pages_node(cpu_to_node(cpu),
+ THREAD_FLAGS,
+ THREAD_ORDER));
irqctx->tinfo.task = NULL;
irqctx->tinfo.exec_domain = NULL;
irqctx->tinfo.cpu = cpu;
@@ -150,11 +152,6 @@ void __cpuinit irq_ctx_init(int cpu)
cpu, per_cpu(hardirq_ctx, cpu), per_cpu(softirq_ctx, cpu));
}
-void irq_ctx_exit(int cpu)
-{
- per_cpu(hardirq_ctx, cpu) = NULL;
-}
-
asmlinkage void do_softirq(void)
{
unsigned long flags;
diff --git a/arch/x86/kernel/kgdb.c b/arch/x86/kernel/kgdb.c
index d81cfebb848f..cd21b654dec6 100644
--- a/arch/x86/kernel/kgdb.c
+++ b/arch/x86/kernel/kgdb.c
@@ -315,14 +315,18 @@ static void kgdb_remove_all_hw_break(void)
if (!breakinfo[i].enabled)
continue;
bp = *per_cpu_ptr(breakinfo[i].pev, cpu);
- if (bp->attr.disabled == 1)
+ if (!bp->attr.disabled) {
+ arch_uninstall_hw_breakpoint(bp);
+ bp->attr.disabled = 1;
continue;
+ }
if (dbg_is_early)
early_dr7 &= ~encode_dr7(i, breakinfo[i].len,
breakinfo[i].type);
- else
- arch_uninstall_hw_breakpoint(bp);
- bp->attr.disabled = 1;
+ else if (hw_break_release_slot(i))
+ printk(KERN_ERR "KGDB: hw bpt remove failed %lx\n",
+ breakinfo[i].addr);
+ breakinfo[i].enabled = 0;
}
}
@@ -387,7 +391,7 @@ kgdb_set_hw_break(unsigned long addr, int len, enum kgdb_bptype bptype)
* disable hardware debugging while it is processing gdb packets or
* handling exception.
*/
-void kgdb_disable_hw_debug(struct pt_regs *regs)
+static void kgdb_disable_hw_debug(struct pt_regs *regs)
{
int i;
int cpu = raw_smp_processor_id();
@@ -724,6 +728,7 @@ struct kgdb_arch arch_kgdb_ops = {
.flags = KGDB_HW_BREAKPOINT,
.set_hw_breakpoint = kgdb_set_hw_break,
.remove_hw_breakpoint = kgdb_remove_hw_break,
+ .disable_hw_break = kgdb_disable_hw_debug,
.remove_all_hw_break = kgdb_remove_all_hw_break,
.correct_hw_break = kgdb_correct_hw_break,
};
diff --git a/arch/x86/kernel/microcode_amd.c b/arch/x86/kernel/microcode_amd.c
index e1af7c055c7d..ce0cb4721c9a 100644
--- a/arch/x86/kernel/microcode_amd.c
+++ b/arch/x86/kernel/microcode_amd.c
@@ -212,7 +212,7 @@ static int install_equiv_cpu_table(const u8 *buf)
return 0;
}
- equiv_cpu_table = (struct equiv_cpu_entry *) vmalloc(size);
+ equiv_cpu_table = vmalloc(size);
if (!equiv_cpu_table) {
pr_err("failed to allocate equivalent CPU table\n");
return 0;
diff --git a/arch/x86/kernel/mmconf-fam10h_64.c b/arch/x86/kernel/mmconf-fam10h_64.c
index 71825806cd44..ac861b8348e2 100644
--- a/arch/x86/kernel/mmconf-fam10h_64.c
+++ b/arch/x86/kernel/mmconf-fam10h_64.c
@@ -25,7 +25,6 @@ struct pci_hostbridge_probe {
};
static u64 __cpuinitdata fam10h_pci_mmconf_base;
-static int __cpuinitdata fam10h_pci_mmconf_base_status;
static struct pci_hostbridge_probe pci_probes[] __cpuinitdata = {
{ 0, 0x18, PCI_VENDOR_ID_AMD, 0x1200 },
@@ -44,10 +43,12 @@ static int __cpuinit cmp_range(const void *x1, const void *x2)
return start1 - start2;
}
-/*[47:0] */
-/* need to avoid (0xfd<<32) and (0xfe<<32), ht used space */
+#define MMCONF_UNIT (1ULL << FAM10H_MMIO_CONF_BASE_SHIFT)
+#define MMCONF_MASK (~(MMCONF_UNIT - 1))
+#define MMCONF_SIZE (MMCONF_UNIT << 8)
+/* need to avoid (0xfd<<32), (0xfe<<32), and (0xff<<32), ht used space */
#define FAM10H_PCI_MMCONF_BASE (0xfcULL<<32)
-#define BASE_VALID(b) ((b != (0xfdULL << 32)) && (b != (0xfeULL << 32)))
+#define BASE_VALID(b) ((b) + MMCONF_SIZE <= (0xfdULL<<32) || (b) >= (1ULL<<40))
static void __cpuinit get_fam10h_pci_mmconf_base(void)
{
int i;
@@ -64,12 +65,11 @@ static void __cpuinit get_fam10h_pci_mmconf_base(void)
struct range range[8];
/* only try to get setting from BSP */
- /* -1 or 1 */
- if (fam10h_pci_mmconf_base_status)
+ if (fam10h_pci_mmconf_base)
return;
if (!early_pci_allowed())
- goto fail;
+ return;
found = 0;
for (i = 0; i < ARRAY_SIZE(pci_probes); i++) {
@@ -91,7 +91,7 @@ static void __cpuinit get_fam10h_pci_mmconf_base(void)
}
if (!found)
- goto fail;
+ return;
/* SYS_CFG */
address = MSR_K8_SYSCFG;
@@ -99,16 +99,16 @@ static void __cpuinit get_fam10h_pci_mmconf_base(void)
/* TOP_MEM2 is not enabled? */
if (!(val & (1<<21))) {
- tom2 = 0;
+ tom2 = 1ULL << 32;
} else {
/* TOP_MEM2 */
address = MSR_K8_TOP_MEM2;
rdmsrl(address, val);
- tom2 = val & (0xffffULL<<32);
+ tom2 = max(val & 0xffffff800000ULL, 1ULL << 32);
}
if (base <= tom2)
- base = tom2 + (1ULL<<32);
+ base = (tom2 + 2 * MMCONF_UNIT - 1) & MMCONF_MASK;
/*
* need to check if the range is in the high mmio range that is
@@ -123,11 +123,11 @@ static void __cpuinit get_fam10h_pci_mmconf_base(void)
if (!(reg & 3))
continue;
- start = (((u64)reg) << 8) & (0xffULL << 32); /* 39:16 on 31:8*/
+ start = (u64)(reg & 0xffffff00) << 8; /* 39:16 on 31:8*/
reg = read_pci_config(bus, slot, 1, 0x84 + (i << 3));
- end = (((u64)reg) << 8) & (0xffULL << 32); /* 39:16 on 31:8*/
+ end = ((u64)(reg & 0xffffff00) << 8) | 0xffff; /* 39:16 on 31:8*/
- if (!end)
+ if (end < tom2)
continue;
range[hi_mmio_num].start = start;
@@ -143,32 +143,27 @@ static void __cpuinit get_fam10h_pci_mmconf_base(void)
if (range[hi_mmio_num - 1].end < base)
goto out;
- if (range[0].start > base)
+ if (range[0].start > base + MMCONF_SIZE)
goto out;
/* need to find one window */
- base = range[0].start - (1ULL << 32);
+ base = (range[0].start & MMCONF_MASK) - MMCONF_UNIT;
if ((base > tom2) && BASE_VALID(base))
goto out;
- base = range[hi_mmio_num - 1].end + (1ULL << 32);
- if ((base > tom2) && BASE_VALID(base))
+ base = (range[hi_mmio_num - 1].end + MMCONF_UNIT) & MMCONF_MASK;
+ if (BASE_VALID(base))
goto out;
/* need to find window between ranges */
- if (hi_mmio_num > 1)
- for (i = 0; i < hi_mmio_num - 1; i++) {
- if (range[i + 1].start > (range[i].end + (1ULL << 32))) {
- base = range[i].end + (1ULL << 32);
- if ((base > tom2) && BASE_VALID(base))
- goto out;
- }
+ for (i = 1; i < hi_mmio_num; i++) {
+ base = (range[i - 1].end + MMCONF_UNIT) & MMCONF_MASK;
+ val = range[i].start & MMCONF_MASK;
+ if (val >= base + MMCONF_SIZE && BASE_VALID(base))
+ goto out;
}
-
-fail:
- fam10h_pci_mmconf_base_status = -1;
return;
+
out:
fam10h_pci_mmconf_base = base;
- fam10h_pci_mmconf_base_status = 1;
}
void __cpuinit fam10h_check_enable_mmcfg(void)
@@ -190,11 +185,10 @@ void __cpuinit fam10h_check_enable_mmcfg(void)
/* only trust the one handle 256 buses, if acpi=off */
if (!acpi_pci_disabled || busnbits >= 8) {
- u64 base;
- base = val & (0xffffULL << 32);
- if (fam10h_pci_mmconf_base_status <= 0) {
+ u64 base = val & MMCONF_MASK;
+
+ if (!fam10h_pci_mmconf_base) {
fam10h_pci_mmconf_base = base;
- fam10h_pci_mmconf_base_status = 1;
return;
} else if (fam10h_pci_mmconf_base == base)
return;
@@ -206,8 +200,10 @@ void __cpuinit fam10h_check_enable_mmcfg(void)
* with 256 buses
*/
get_fam10h_pci_mmconf_base();
- if (fam10h_pci_mmconf_base_status <= 0)
+ if (!fam10h_pci_mmconf_base) {
+ pci_probe &= ~PCI_CHECK_ENABLE_AMD_MMCONF;
return;
+ }
printk(KERN_INFO "Enable MMCONFIG on AMD Family 10h\n");
val &= ~((FAM10H_MMIO_CONF_BASE_MASK<<FAM10H_MMIO_CONF_BASE_SHIFT) |
@@ -217,13 +213,13 @@ void __cpuinit fam10h_check_enable_mmcfg(void)
wrmsrl(address, val);
}
-static int __devinit set_check_enable_amd_mmconf(const struct dmi_system_id *d)
+static int __init set_check_enable_amd_mmconf(const struct dmi_system_id *d)
{
pci_probe |= PCI_CHECK_ENABLE_AMD_MMCONF;
return 0;
}
-static const struct dmi_system_id __cpuinitconst mmconf_dmi_table[] = {
+static const struct dmi_system_id __initconst mmconf_dmi_table[] = {
{
.callback = set_check_enable_amd_mmconf,
.ident = "Sun Microsystems Machine",
@@ -234,7 +230,8 @@ static const struct dmi_system_id __cpuinitconst mmconf_dmi_table[] = {
{}
};
-void __cpuinit check_enable_amd_mmconf_dmi(void)
+/* Called from a __cpuinit function, but only on the BSP. */
+void __ref check_enable_amd_mmconf_dmi(void)
{
dmi_check_system(mmconf_dmi_table);
}
diff --git a/arch/x86/kernel/msr.c b/arch/x86/kernel/msr.c
index 7bf2dc4c8f70..12fcbe2c143e 100644
--- a/arch/x86/kernel/msr.c
+++ b/arch/x86/kernel/msr.c
@@ -30,7 +30,6 @@
#include <linux/init.h>
#include <linux/poll.h>
#include <linux/smp.h>
-#include <linux/smp_lock.h>
#include <linux/major.h>
#include <linux/fs.h>
#include <linux/device.h>
diff --git a/arch/x86/kernel/ptrace.c b/arch/x86/kernel/ptrace.c
index 70c4872cd8aa..45892dc4b72a 100644
--- a/arch/x86/kernel/ptrace.c
+++ b/arch/x86/kernel/ptrace.c
@@ -801,7 +801,8 @@ void ptrace_disable(struct task_struct *child)
static const struct user_regset_view user_x86_32_view; /* Initialized below. */
#endif
-long arch_ptrace(struct task_struct *child, long request, long addr, long data)
+long arch_ptrace(struct task_struct *child, long request,
+ unsigned long addr, unsigned long data)
{
int ret;
unsigned long __user *datap = (unsigned long __user *)data;
@@ -812,8 +813,7 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
unsigned long tmp;
ret = -EIO;
- if ((addr & (sizeof(data) - 1)) || addr < 0 ||
- addr >= sizeof(struct user))
+ if ((addr & (sizeof(data) - 1)) || addr >= sizeof(struct user))
break;
tmp = 0; /* Default return condition */
@@ -830,8 +830,7 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
case PTRACE_POKEUSR: /* write the word at location addr in the USER area */
ret = -EIO;
- if ((addr & (sizeof(data) - 1)) || addr < 0 ||
- addr >= sizeof(struct user))
+ if ((addr & (sizeof(data) - 1)) || addr >= sizeof(struct user))
break;
if (addr < sizeof(struct user_regs_struct))
@@ -888,17 +887,17 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
#if defined CONFIG_X86_32 || defined CONFIG_IA32_EMULATION
case PTRACE_GET_THREAD_AREA:
- if (addr < 0)
+ if ((int) addr < 0)
return -EIO;
ret = do_get_thread_area(child, addr,
- (struct user_desc __user *) data);
+ (struct user_desc __user *)data);
break;
case PTRACE_SET_THREAD_AREA:
- if (addr < 0)
+ if ((int) addr < 0)
return -EIO;
ret = do_set_thread_area(child, addr,
- (struct user_desc __user *) data, 0);
+ (struct user_desc __user *)data, 0);
break;
#endif
diff --git a/arch/x86/kernel/pvclock.c b/arch/x86/kernel/pvclock.c
index bab3b9e6f66d..42eb3300dfc6 100644
--- a/arch/x86/kernel/pvclock.c
+++ b/arch/x86/kernel/pvclock.c
@@ -41,44 +41,6 @@ void pvclock_set_flags(u8 flags)
valid_flags = flags;
}
-/*
- * Scale a 64-bit delta by scaling and multiplying by a 32-bit fraction,
- * yielding a 64-bit result.
- */
-static inline u64 scale_delta(u64 delta, u32 mul_frac, int shift)
-{
- u64 product;
-#ifdef __i386__
- u32 tmp1, tmp2;
-#endif
-
- if (shift < 0)
- delta >>= -shift;
- else
- delta <<= shift;
-
-#ifdef __i386__
- __asm__ (
- "mul %5 ; "
- "mov %4,%%eax ; "
- "mov %%edx,%4 ; "
- "mul %5 ; "
- "xor %5,%5 ; "
- "add %4,%%eax ; "
- "adc %5,%%edx ; "
- : "=A" (product), "=r" (tmp1), "=r" (tmp2)
- : "a" ((u32)delta), "1" ((u32)(delta >> 32)), "2" (mul_frac) );
-#elif defined(__x86_64__)
- __asm__ (
- "mul %%rdx ; shrd $32,%%rdx,%%rax"
- : "=a" (product) : "0" (delta), "d" ((u64)mul_frac) );
-#else
-#error implement me!
-#endif
-
- return product;
-}
-
static u64 pvclock_get_nsec_offset(struct pvclock_shadow_time *shadow)
{
u64 delta = native_read_tsc() - shadow->tsc_timestamp;
@@ -121,6 +83,11 @@ unsigned long pvclock_tsc_khz(struct pvclock_vcpu_time_info *src)
static atomic64_t last_value = ATOMIC64_INIT(0);
+void pvclock_resume(void)
+{
+ atomic64_set(&last_value, 0);
+}
+
cycle_t pvclock_clocksource_read(struct pvclock_vcpu_time_info *src)
{
struct pvclock_shadow_time shadow;
diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c
index f7f53dcd3e0a..c495aa8d4815 100644
--- a/arch/x86/kernel/reboot.c
+++ b/arch/x86/kernel/reboot.c
@@ -635,7 +635,7 @@ void native_machine_shutdown(void)
/* O.K Now that I'm on the appropriate processor,
* stop all of the others.
*/
- smp_send_stop();
+ stop_other_cpus();
#endif
lapic_shutdown();
diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
index 95a32746fbf9..21c6746338af 100644
--- a/arch/x86/kernel/setup.c
+++ b/arch/x86/kernel/setup.c
@@ -769,6 +769,8 @@ void __init setup_arch(char **cmdline_p)
x86_init.oem.arch_setup();
+ resource_alloc_from_bottom = 0;
+ iomem_resource.end = (1ULL << boot_cpu_data.x86_phys_bits) - 1;
setup_memory_map();
parse_setup_data();
/* update the e820_saved too */
diff --git a/arch/x86/kernel/smp.c b/arch/x86/kernel/smp.c
index d801210945d6..513deac7228d 100644
--- a/arch/x86/kernel/smp.c
+++ b/arch/x86/kernel/smp.c
@@ -159,10 +159,10 @@ asmlinkage void smp_reboot_interrupt(void)
irq_exit();
}
-static void native_smp_send_stop(void)
+static void native_stop_other_cpus(int wait)
{
unsigned long flags;
- unsigned long wait;
+ unsigned long timeout;
if (reboot_force)
return;
@@ -179,9 +179,12 @@ static void native_smp_send_stop(void)
if (num_online_cpus() > 1) {
apic->send_IPI_allbutself(REBOOT_VECTOR);
- /* Don't wait longer than a second */
- wait = USEC_PER_SEC;
- while (num_online_cpus() > 1 && wait--)
+ /*
+ * Don't wait longer than a second if the caller
+ * didn't ask us to wait.
+ */
+ timeout = USEC_PER_SEC;
+ while (num_online_cpus() > 1 && (wait || timeout--))
udelay(1);
}
@@ -227,7 +230,7 @@ struct smp_ops smp_ops = {
.smp_prepare_cpus = native_smp_prepare_cpus,
.smp_cpus_done = native_smp_cpus_done,
- .smp_send_stop = native_smp_send_stop,
+ .stop_other_cpus = native_stop_other_cpus,
.smp_send_reschedule = native_smp_send_reschedule,
.cpu_up = native_cpu_up,
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
index 6af118511b4a..083e99d1b7df 100644
--- a/arch/x86/kernel/smpboot.c
+++ b/arch/x86/kernel/smpboot.c
@@ -747,7 +747,7 @@ static int __cpuinit do_boot_cpu(int apicid, int cpu)
.done = COMPLETION_INITIALIZER_ONSTACK(c_idle.done),
};
- INIT_WORK_ON_STACK(&c_idle.work, do_fork_idle);
+ INIT_WORK_ONSTACK(&c_idle.work, do_fork_idle);
alternatives_smp_switch(1);
@@ -1373,7 +1373,6 @@ void play_dead_common(void)
{
idle_task_exit();
reset_lazy_tlbstate();
- irq_ctx_exit(raw_smp_processor_id());
c1e_remove_cpu(raw_smp_processor_id());
mb();
diff --git a/arch/x86/kernel/x86_init.c b/arch/x86/kernel/x86_init.c
index cd6da6bf3eca..ceb2911aa439 100644
--- a/arch/x86/kernel/x86_init.c
+++ b/arch/x86/kernel/x86_init.c
@@ -6,10 +6,12 @@
#include <linux/init.h>
#include <linux/ioport.h>
#include <linux/module.h>
+#include <linux/pci.h>
#include <asm/bios_ebda.h>
#include <asm/paravirt.h>
#include <asm/pci_x86.h>
+#include <asm/pci.h>
#include <asm/mpspec.h>
#include <asm/setup.h>
#include <asm/apic.h>
@@ -99,3 +101,8 @@ struct x86_platform_ops x86_platform = {
};
EXPORT_SYMBOL_GPL(x86_platform);
+struct x86_msi_ops x86_msi = {
+ .setup_msi_irqs = native_setup_msi_irqs,
+ .teardown_msi_irq = native_teardown_msi_irq,
+ .teardown_msi_irqs = default_teardown_msi_irqs,
+};
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 908ea5464a51..fb8b376bf28c 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -720,7 +720,7 @@ static void rmap_remove(struct kvm *kvm, u64 *spte)
}
}
-static void set_spte_track_bits(u64 *sptep, u64 new_spte)
+static int set_spte_track_bits(u64 *sptep, u64 new_spte)
{
pfn_t pfn;
u64 old_spte = *sptep;
@@ -731,19 +731,20 @@ static void set_spte_track_bits(u64 *sptep, u64 new_spte)
old_spte = __xchg_spte(sptep, new_spte);
if (!is_rmap_spte(old_spte))
- return;
+ return 0;
pfn = spte_to_pfn(old_spte);
if (!shadow_accessed_mask || old_spte & shadow_accessed_mask)
kvm_set_pfn_accessed(pfn);
if (!shadow_dirty_mask || (old_spte & shadow_dirty_mask))
kvm_set_pfn_dirty(pfn);
+ return 1;
}
static void drop_spte(struct kvm *kvm, u64 *sptep, u64 new_spte)
{
- set_spte_track_bits(sptep, new_spte);
- rmap_remove(kvm, sptep);
+ if (set_spte_track_bits(sptep, new_spte))
+ rmap_remove(kvm, sptep);
}
static u64 *rmap_next(struct kvm *kvm, unsigned long *rmapp, u64 *spte)
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index 82e144a4e514..1ca12298ffc7 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -3395,6 +3395,7 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu)
vcpu->arch.regs[VCPU_REGS_RIP] = svm->vmcb->save.rip;
load_host_msrs(vcpu);
+ kvm_load_ldt(ldt_selector);
loadsegment(fs, fs_selector);
#ifdef CONFIG_X86_64
load_gs_index(gs_selector);
@@ -3402,7 +3403,6 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu)
#else
loadsegment(gs, gs_selector);
#endif
- kvm_load_ldt(ldt_selector);
reload_tss(vcpu);
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 8da0e45ff7c9..ff21fdda0c53 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -821,10 +821,9 @@ static void vmx_save_host_state(struct kvm_vcpu *vcpu)
#endif
#ifdef CONFIG_X86_64
- if (is_long_mode(&vmx->vcpu)) {
- rdmsrl(MSR_KERNEL_GS_BASE, vmx->msr_host_kernel_gs_base);
+ rdmsrl(MSR_KERNEL_GS_BASE, vmx->msr_host_kernel_gs_base);
+ if (is_long_mode(&vmx->vcpu))
wrmsrl(MSR_KERNEL_GS_BASE, vmx->msr_guest_kernel_gs_base);
- }
#endif
for (i = 0; i < vmx->save_nmsrs; ++i)
kvm_set_shared_msr(vmx->guest_msrs[i].index,
@@ -839,23 +838,23 @@ static void __vmx_load_host_state(struct vcpu_vmx *vmx)
++vmx->vcpu.stat.host_state_reload;
vmx->host_state.loaded = 0;
- if (vmx->host_state.fs_reload_needed)
- loadsegment(fs, vmx->host_state.fs_sel);
+#ifdef CONFIG_X86_64
+ if (is_long_mode(&vmx->vcpu))
+ rdmsrl(MSR_KERNEL_GS_BASE, vmx->msr_guest_kernel_gs_base);
+#endif
if (vmx->host_state.gs_ldt_reload_needed) {
kvm_load_ldt(vmx->host_state.ldt_sel);
#ifdef CONFIG_X86_64
load_gs_index(vmx->host_state.gs_sel);
- wrmsrl(MSR_KERNEL_GS_BASE, current->thread.gs);
#else
loadsegment(gs, vmx->host_state.gs_sel);
#endif
}
+ if (vmx->host_state.fs_reload_needed)
+ loadsegment(fs, vmx->host_state.fs_sel);
reload_tss();
#ifdef CONFIG_X86_64
- if (is_long_mode(&vmx->vcpu)) {
- rdmsrl(MSR_KERNEL_GS_BASE, vmx->msr_guest_kernel_gs_base);
- wrmsrl(MSR_KERNEL_GS_BASE, vmx->msr_host_kernel_gs_base);
- }
+ wrmsrl(MSR_KERNEL_GS_BASE, vmx->msr_host_kernel_gs_base);
#endif
if (current_thread_info()->status & TS_USEDFPU)
clts();
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 2288ad829b32..cdac9e592aa5 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -2560,6 +2560,7 @@ static void kvm_vcpu_ioctl_x86_get_vcpu_events(struct kvm_vcpu *vcpu,
!kvm_exception_is_soft(vcpu->arch.exception.nr);
events->exception.nr = vcpu->arch.exception.nr;
events->exception.has_error_code = vcpu->arch.exception.has_error_code;
+ events->exception.pad = 0;
events->exception.error_code = vcpu->arch.exception.error_code;
events->interrupt.injected =
@@ -2573,12 +2574,14 @@ static void kvm_vcpu_ioctl_x86_get_vcpu_events(struct kvm_vcpu *vcpu,
events->nmi.injected = vcpu->arch.nmi_injected;
events->nmi.pending = vcpu->arch.nmi_pending;
events->nmi.masked = kvm_x86_ops->get_nmi_mask(vcpu);
+ events->nmi.pad = 0;
events->sipi_vector = vcpu->arch.sipi_vector;
events->flags = (KVM_VCPUEVENT_VALID_NMI_PENDING
| KVM_VCPUEVENT_VALID_SIPI_VECTOR
| KVM_VCPUEVENT_VALID_SHADOW);
+ memset(&events->reserved, 0, sizeof(events->reserved));
}
static int kvm_vcpu_ioctl_x86_set_vcpu_events(struct kvm_vcpu *vcpu,
@@ -2623,6 +2626,7 @@ static void kvm_vcpu_ioctl_x86_get_debugregs(struct kvm_vcpu *vcpu,
dbgregs->dr6 = vcpu->arch.dr6;
dbgregs->dr7 = vcpu->arch.dr7;
dbgregs->flags = 0;
+ memset(&dbgregs->reserved, 0, sizeof(dbgregs->reserved));
}
static int kvm_vcpu_ioctl_x86_set_debugregs(struct kvm_vcpu *vcpu,
@@ -3106,6 +3110,7 @@ static int kvm_vm_ioctl_get_pit2(struct kvm *kvm, struct kvm_pit_state2 *ps)
sizeof(ps->channels));
ps->flags = kvm->arch.vpit->pit_state.flags;
mutex_unlock(&kvm->arch.vpit->pit_state.lock);
+ memset(&ps->reserved, 0, sizeof(ps->reserved));
return r;
}
@@ -3169,10 +3174,6 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
struct kvm_memslots *slots, *old_slots;
unsigned long *dirty_bitmap;
- spin_lock(&kvm->mmu_lock);
- kvm_mmu_slot_remove_write_access(kvm, log->slot);
- spin_unlock(&kvm->mmu_lock);
-
r = -ENOMEM;
dirty_bitmap = vmalloc(n);
if (!dirty_bitmap)
@@ -3194,6 +3195,10 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
dirty_bitmap = old_slots->memslots[log->slot].dirty_bitmap;
kfree(old_slots);
+ spin_lock(&kvm->mmu_lock);
+ kvm_mmu_slot_remove_write_access(kvm, log->slot);
+ spin_unlock(&kvm->mmu_lock);
+
r = -EFAULT;
if (copy_to_user(log->dirty_bitmap, dirty_bitmap, n)) {
vfree(dirty_bitmap);
@@ -3486,6 +3491,7 @@ long kvm_arch_vm_ioctl(struct file *filp,
user_ns.clock = kvm->arch.kvmclock_offset + now_ns;
local_irq_enable();
user_ns.flags = 0;
+ memset(&user_ns.pad, 0, sizeof(user_ns.pad));
r = -EFAULT;
if (copy_to_user(argp, &user_ns, sizeof(user_ns)))
@@ -3972,8 +3978,10 @@ int kvm_emulate_wbinvd(struct kvm_vcpu *vcpu)
return X86EMUL_CONTINUE;
if (kvm_x86_ops->has_wbinvd_exit()) {
+ preempt_disable();
smp_call_function_many(vcpu->arch.wbinvd_dirty_mask,
wbinvd_ipi, NULL, 1);
+ preempt_enable();
cpumask_clear(vcpu->arch.wbinvd_dirty_mask);
}
wbinvd();
diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
index 79b0b372d2d0..7d90ceb882a4 100644
--- a/arch/x86/mm/fault.c
+++ b/arch/x86/mm/fault.c
@@ -11,6 +11,7 @@
#include <linux/kprobes.h> /* __kprobes, ... */
#include <linux/mmiotrace.h> /* kmmio_handler, ... */
#include <linux/perf_event.h> /* perf_sw_event */
+#include <linux/hugetlb.h> /* hstate_index_to_shift */
#include <asm/traps.h> /* dotraplinkage, ... */
#include <asm/pgalloc.h> /* pgd_*(), ... */
@@ -160,15 +161,20 @@ is_prefetch(struct pt_regs *regs, unsigned long error_code, unsigned long addr)
static void
force_sig_info_fault(int si_signo, int si_code, unsigned long address,
- struct task_struct *tsk)
+ struct task_struct *tsk, int fault)
{
+ unsigned lsb = 0;
siginfo_t info;
info.si_signo = si_signo;
info.si_errno = 0;
info.si_code = si_code;
info.si_addr = (void __user *)address;
- info.si_addr_lsb = si_code == BUS_MCEERR_AR ? PAGE_SHIFT : 0;
+ if (fault & VM_FAULT_HWPOISON_LARGE)
+ lsb = hstate_index_to_shift(VM_FAULT_GET_HINDEX(fault));
+ if (fault & VM_FAULT_HWPOISON)
+ lsb = PAGE_SHIFT;
+ info.si_addr_lsb = lsb;
force_sig_info(si_signo, &info, tsk);
}
@@ -722,7 +728,7 @@ __bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code,
tsk->thread.error_code = error_code | (address >= TASK_SIZE);
tsk->thread.trap_no = 14;
- force_sig_info_fault(SIGSEGV, si_code, address, tsk);
+ force_sig_info_fault(SIGSEGV, si_code, address, tsk, 0);
return;
}
@@ -807,14 +813,14 @@ do_sigbus(struct pt_regs *regs, unsigned long error_code, unsigned long address,
tsk->thread.trap_no = 14;
#ifdef CONFIG_MEMORY_FAILURE
- if (fault & VM_FAULT_HWPOISON) {
+ if (fault & (VM_FAULT_HWPOISON|VM_FAULT_HWPOISON_LARGE)) {
printk(KERN_ERR
"MCE: Killing %s:%d due to hardware memory corruption fault at %lx\n",
tsk->comm, tsk->pid, address);
code = BUS_MCEERR_AR;
}
#endif
- force_sig_info_fault(SIGBUS, code, address, tsk);
+ force_sig_info_fault(SIGBUS, code, address, tsk, fault);
}
static noinline void
@@ -824,7 +830,8 @@ mm_fault_error(struct pt_regs *regs, unsigned long error_code,
if (fault & VM_FAULT_OOM) {
out_of_memory(regs, error_code, address);
} else {
- if (fault & (VM_FAULT_SIGBUS|VM_FAULT_HWPOISON))
+ if (fault & (VM_FAULT_SIGBUS|VM_FAULT_HWPOISON|
+ VM_FAULT_HWPOISON_LARGE))
do_sigbus(regs, error_code, address, fault);
else
BUG();
@@ -912,9 +919,9 @@ spurious_fault(unsigned long error_code, unsigned long address)
int show_unhandled_signals = 1;
static inline int
-access_error(unsigned long error_code, int write, struct vm_area_struct *vma)
+access_error(unsigned long error_code, struct vm_area_struct *vma)
{
- if (write) {
+ if (error_code & PF_WRITE) {
/* write, present and write, not present: */
if (unlikely(!(vma->vm_flags & VM_WRITE)))
return 1;
@@ -949,8 +956,10 @@ do_page_fault(struct pt_regs *regs, unsigned long error_code)
struct task_struct *tsk;
unsigned long address;
struct mm_struct *mm;
- int write;
int fault;
+ int write = error_code & PF_WRITE;
+ unsigned int flags = FAULT_FLAG_ALLOW_RETRY |
+ (write ? FAULT_FLAG_WRITE : 0);
tsk = current;
mm = tsk->mm;
@@ -1061,6 +1070,7 @@ do_page_fault(struct pt_regs *regs, unsigned long error_code)
bad_area_nosemaphore(regs, error_code, address);
return;
}
+retry:
down_read(&mm->mmap_sem);
} else {
/*
@@ -1104,9 +1114,7 @@ do_page_fault(struct pt_regs *regs, unsigned long error_code)
* we can handle it..
*/
good_area:
- write = error_code & PF_WRITE;
-
- if (unlikely(access_error(error_code, write, vma))) {
+ if (unlikely(access_error(error_code, vma))) {
bad_area_access_error(regs, error_code, address);
return;
}
@@ -1116,21 +1124,34 @@ good_area:
* make sure we exit gracefully rather than endlessly redo
* the fault:
*/
- fault = handle_mm_fault(mm, vma, address, write ? FAULT_FLAG_WRITE : 0);
+ fault = handle_mm_fault(mm, vma, address, flags);
if (unlikely(fault & VM_FAULT_ERROR)) {
mm_fault_error(regs, error_code, address, fault);
return;
}
- if (fault & VM_FAULT_MAJOR) {
- tsk->maj_flt++;
- perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, 0,
- regs, address);
- } else {
- tsk->min_flt++;
- perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, 0,
- regs, address);
+ /*
+ * Major/minor page fault accounting is only done on the
+ * initial attempt. If we go through a retry, it is extremely
+ * likely that the page will be found in page cache at that point.
+ */
+ if (flags & FAULT_FLAG_ALLOW_RETRY) {
+ if (fault & VM_FAULT_MAJOR) {
+ tsk->maj_flt++;
+ perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, 0,
+ regs, address);
+ } else {
+ tsk->min_flt++;
+ perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, 0,
+ regs, address);
+ }
+ if (fault & VM_FAULT_RETRY) {
+ /* Clear FAULT_FLAG_ALLOW_RETRY to avoid any risk
+ * of starvation. */
+ flags &= ~FAULT_FLAG_ALLOW_RETRY;
+ goto retry;
+ }
}
check_v8086_mode(regs, address, tsk);
diff --git a/arch/x86/mm/highmem_32.c b/arch/x86/mm/highmem_32.c
index 5e8fa12ef861..b49962662101 100644
--- a/arch/x86/mm/highmem_32.c
+++ b/arch/x86/mm/highmem_32.c
@@ -9,6 +9,7 @@ void *kmap(struct page *page)
return page_address(page);
return kmap_high(page);
}
+EXPORT_SYMBOL(kmap);
void kunmap(struct page *page)
{
@@ -18,6 +19,7 @@ void kunmap(struct page *page)
return;
kunmap_high(page);
}
+EXPORT_SYMBOL(kunmap);
/*
* kmap_atomic/kunmap_atomic is significantly faster than kmap/kunmap because
@@ -27,10 +29,10 @@ void kunmap(struct page *page)
* However when holding an atomic kmap it is not legal to sleep, so atomic
* kmaps are appropriate for short, tight code paths only.
*/
-void *kmap_atomic_prot(struct page *page, enum km_type type, pgprot_t prot)
+void *kmap_atomic_prot(struct page *page, pgprot_t prot)
{
- enum fixed_addresses idx;
unsigned long vaddr;
+ int idx, type;
/* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */
pagefault_disable();
@@ -38,8 +40,7 @@ void *kmap_atomic_prot(struct page *page, enum km_type type, pgprot_t prot)
if (!PageHighMem(page))
return page_address(page);
- debug_kmap_atomic(type);
-
+ type = kmap_atomic_idx_push();
idx = type + KM_TYPE_NR*smp_processor_id();
vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
BUG_ON(!pte_none(*(kmap_pte-idx)));
@@ -47,44 +48,57 @@ void *kmap_atomic_prot(struct page *page, enum km_type type, pgprot_t prot)
return (void *)vaddr;
}
+EXPORT_SYMBOL(kmap_atomic_prot);
+
+void *__kmap_atomic(struct page *page)
+{
+ return kmap_atomic_prot(page, kmap_prot);
+}
+EXPORT_SYMBOL(__kmap_atomic);
-void *kmap_atomic(struct page *page, enum km_type type)
+/*
+ * This is the same as kmap_atomic() but can map memory that doesn't
+ * have a struct page associated with it.
+ */
+void *kmap_atomic_pfn(unsigned long pfn)
{
- return kmap_atomic_prot(page, type, kmap_prot);
+ return kmap_atomic_prot_pfn(pfn, kmap_prot);
}
+EXPORT_SYMBOL_GPL(kmap_atomic_pfn);
-void kunmap_atomic_notypecheck(void *kvaddr, enum km_type type)
+void __kunmap_atomic(void *kvaddr)
{
unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK;
- enum fixed_addresses idx = type + KM_TYPE_NR*smp_processor_id();
-
- /*
- * Force other mappings to Oops if they'll try to access this pte
- * without first remap it. Keeping stale mappings around is a bad idea
- * also, in case the page changes cacheability attributes or becomes
- * a protected page in a hypervisor.
- */
- if (vaddr == __fix_to_virt(FIX_KMAP_BEGIN+idx))
+
+ if (vaddr >= __fix_to_virt(FIX_KMAP_END) &&
+ vaddr <= __fix_to_virt(FIX_KMAP_BEGIN)) {
+ int idx, type;
+
+ type = kmap_atomic_idx();
+ idx = type + KM_TYPE_NR * smp_processor_id();
+
+#ifdef CONFIG_DEBUG_HIGHMEM
+ WARN_ON_ONCE(vaddr != __fix_to_virt(FIX_KMAP_BEGIN + idx));
+#endif
+ /*
+ * Force other mappings to Oops if they'll try to access this
+ * pte without first remap it. Keeping stale mappings around
+ * is a bad idea also, in case the page changes cacheability
+ * attributes or becomes a protected page in a hypervisor.
+ */
kpte_clear_flush(kmap_pte-idx, vaddr);
- else {
+ kmap_atomic_idx_pop();
+ }
#ifdef CONFIG_DEBUG_HIGHMEM
+ else {
BUG_ON(vaddr < PAGE_OFFSET);
BUG_ON(vaddr >= (unsigned long)high_memory);
-#endif
}
+#endif
pagefault_enable();
}
-
-/*
- * This is the same as kmap_atomic() but can map memory that doesn't
- * have a struct page associated with it.
- */
-void *kmap_atomic_pfn(unsigned long pfn, enum km_type type)
-{
- return kmap_atomic_prot_pfn(pfn, type, kmap_prot);
-}
-EXPORT_SYMBOL_GPL(kmap_atomic_pfn); /* temporarily in use by i915 GEM until vmap */
+EXPORT_SYMBOL(__kunmap_atomic);
struct page *kmap_atomic_to_page(void *ptr)
{
@@ -98,12 +112,6 @@ struct page *kmap_atomic_to_page(void *ptr)
pte = kmap_pte - (idx - FIX_KMAP_BEGIN);
return pte_page(*pte);
}
-
-EXPORT_SYMBOL(kmap);
-EXPORT_SYMBOL(kunmap);
-EXPORT_SYMBOL(kmap_atomic);
-EXPORT_SYMBOL(kunmap_atomic_notypecheck);
-EXPORT_SYMBOL(kmap_atomic_prot);
EXPORT_SYMBOL(kmap_atomic_to_page);
void __init set_highmem_pages_init(void)
diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
index 84346200e783..71a59296af80 100644
--- a/arch/x86/mm/init_64.c
+++ b/arch/x86/mm/init_64.c
@@ -51,7 +51,6 @@
#include <asm/numa.h>
#include <asm/cacheflush.h>
#include <asm/init.h>
-#include <linux/bootmem.h>
static int __init parse_direct_gbpages_off(char *arg)
{
diff --git a/arch/x86/mm/iomap_32.c b/arch/x86/mm/iomap_32.c
index 72fc70cf6184..7b179b499fa3 100644
--- a/arch/x86/mm/iomap_32.c
+++ b/arch/x86/mm/iomap_32.c
@@ -48,21 +48,20 @@ int iomap_create_wc(resource_size_t base, unsigned long size, pgprot_t *prot)
}
EXPORT_SYMBOL_GPL(iomap_create_wc);
-void
-iomap_free(resource_size_t base, unsigned long size)
+void iomap_free(resource_size_t base, unsigned long size)
{
io_free_memtype(base, base + size);
}
EXPORT_SYMBOL_GPL(iomap_free);
-void *kmap_atomic_prot_pfn(unsigned long pfn, enum km_type type, pgprot_t prot)
+void *kmap_atomic_prot_pfn(unsigned long pfn, pgprot_t prot)
{
- enum fixed_addresses idx;
unsigned long vaddr;
+ int idx, type;
pagefault_disable();
- debug_kmap_atomic(type);
+ type = kmap_atomic_idx_push();
idx = type + KM_TYPE_NR * smp_processor_id();
vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
set_pte(kmap_pte - idx, pfn_pte(pfn, prot));
@@ -72,10 +71,10 @@ void *kmap_atomic_prot_pfn(unsigned long pfn, enum km_type type, pgprot_t prot)
}
/*
- * Map 'pfn' using fixed map 'type' and protections 'prot'
+ * Map 'pfn' using protections 'prot'
*/
void __iomem *
-iomap_atomic_prot_pfn(unsigned long pfn, enum km_type type, pgprot_t prot)
+iomap_atomic_prot_pfn(unsigned long pfn, pgprot_t prot)
{
/*
* For non-PAT systems, promote PAGE_KERNEL_WC to PAGE_KERNEL_UC_MINUS.
@@ -86,24 +85,34 @@ iomap_atomic_prot_pfn(unsigned long pfn, enum km_type type, pgprot_t prot)
if (!pat_enabled && pgprot_val(prot) == pgprot_val(PAGE_KERNEL_WC))
prot = PAGE_KERNEL_UC_MINUS;
- return (void __force __iomem *) kmap_atomic_prot_pfn(pfn, type, prot);
+ return (void __force __iomem *) kmap_atomic_prot_pfn(pfn, prot);
}
EXPORT_SYMBOL_GPL(iomap_atomic_prot_pfn);
void
-iounmap_atomic(void __iomem *kvaddr, enum km_type type)
+iounmap_atomic(void __iomem *kvaddr)
{
unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK;
- enum fixed_addresses idx = type + KM_TYPE_NR*smp_processor_id();
- /*
- * Force other mappings to Oops if they'll try to access this pte
- * without first remap it. Keeping stale mappings around is a bad idea
- * also, in case the page changes cacheability attributes or becomes
- * a protected page in a hypervisor.
- */
- if (vaddr == __fix_to_virt(FIX_KMAP_BEGIN+idx))
+ if (vaddr >= __fix_to_virt(FIX_KMAP_END) &&
+ vaddr <= __fix_to_virt(FIX_KMAP_BEGIN)) {
+ int idx, type;
+
+ type = kmap_atomic_idx();
+ idx = type + KM_TYPE_NR * smp_processor_id();
+
+#ifdef CONFIG_DEBUG_HIGHMEM
+ WARN_ON_ONCE(vaddr != __fix_to_virt(FIX_KMAP_BEGIN + idx));
+#endif
+ /*
+ * Force other mappings to Oops if they'll try to access this
+ * pte without first remap it. Keeping stale mappings around
+ * is a bad idea also, in case the page changes cacheability
+ * attributes or becomes a protected page in a hypervisor.
+ */
kpte_clear_flush(kmap_pte-idx, vaddr);
+ kmap_atomic_idx_pop();
+ }
pagefault_enable();
}
diff --git a/arch/x86/mm/numa_64.c b/arch/x86/mm/numa_64.c
index 60f498511dd6..7ffc9b727efd 100644
--- a/arch/x86/mm/numa_64.c
+++ b/arch/x86/mm/numa_64.c
@@ -178,11 +178,8 @@ static void * __init early_node_mem(int nodeid, unsigned long start,
/* extend the search scope */
end = max_pfn_mapped << PAGE_SHIFT;
- if (end > (MAX_DMA32_PFN<<PAGE_SHIFT))
- start = MAX_DMA32_PFN<<PAGE_SHIFT;
- else
- start = MAX_DMA_PFN<<PAGE_SHIFT;
- mem = memblock_x86_find_in_range_node(nodeid, start, end, size, align);
+ start = MAX_DMA_PFN << PAGE_SHIFT;
+ mem = memblock_find_in_range(start, end, size, align);
if (mem != MEMBLOCK_ERROR)
return __va(mem);
diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c
index 49358481c733..6acc724d5d8f 100644
--- a/arch/x86/mm/tlb.c
+++ b/arch/x86/mm/tlb.c
@@ -223,7 +223,7 @@ void native_flush_tlb_others(const struct cpumask *cpumask,
static void __cpuinit calculate_tlb_offset(void)
{
- int cpu, node, nr_node_vecs;
+ int cpu, node, nr_node_vecs, idx = 0;
/*
* we are changing tlb_vector_offset for each CPU in runtime, but this
* will not cause inconsistency, as the write is atomic under X86. we
@@ -239,7 +239,7 @@ static void __cpuinit calculate_tlb_offset(void)
nr_node_vecs = NUM_INVALIDATE_TLB_VECTORS/nr_online_nodes;
for_each_online_node(node) {
- int node_offset = (node % NUM_INVALIDATE_TLB_VECTORS) *
+ int node_offset = (idx % NUM_INVALIDATE_TLB_VECTORS) *
nr_node_vecs;
int cpu_offset = 0;
for_each_cpu(cpu, cpumask_of_node(node)) {
@@ -248,10 +248,11 @@ static void __cpuinit calculate_tlb_offset(void)
cpu_offset++;
cpu_offset = cpu_offset % nr_node_vecs;
}
+ idx++;
}
}
-static int tlb_cpuhp_notify(struct notifier_block *n,
+static int __cpuinit tlb_cpuhp_notify(struct notifier_block *n,
unsigned long action, void *hcpu)
{
switch (action & 0xf) {
diff --git a/arch/x86/oprofile/nmi_int.c b/arch/x86/oprofile/nmi_int.c
index bd1489c3ce09..4e8baad36d37 100644
--- a/arch/x86/oprofile/nmi_int.c
+++ b/arch/x86/oprofile/nmi_int.c
@@ -726,6 +726,12 @@ int __init op_nmi_init(struct oprofile_operations *ops)
case 0x11:
cpu_type = "x86-64/family11h";
break;
+ case 0x12:
+ cpu_type = "x86-64/family12h";
+ break;
+ case 0x14:
+ cpu_type = "x86-64/family14h";
+ break;
default:
return -ENODEV;
}
diff --git a/arch/x86/oprofile/op_model_amd.c b/arch/x86/oprofile/op_model_amd.c
index 42fb46f83883..a011bcc0f943 100644
--- a/arch/x86/oprofile/op_model_amd.c
+++ b/arch/x86/oprofile/op_model_amd.c
@@ -48,17 +48,24 @@ static unsigned long reset_value[NUM_VIRT_COUNTERS];
static u32 ibs_caps;
-struct op_ibs_config {
+struct ibs_config {
unsigned long op_enabled;
unsigned long fetch_enabled;
unsigned long max_cnt_fetch;
unsigned long max_cnt_op;
unsigned long rand_en;
unsigned long dispatched_ops;
+ unsigned long branch_target;
};
-static struct op_ibs_config ibs_config;
-static u64 ibs_op_ctl;
+struct ibs_state {
+ u64 ibs_op_ctl;
+ int branch_target;
+ unsigned long sample_size;
+};
+
+static struct ibs_config ibs_config;
+static struct ibs_state ibs_state;
/*
* IBS cpuid feature detection
@@ -71,8 +78,16 @@ static u64 ibs_op_ctl;
* bit 0 is used to indicate the existence of IBS.
*/
#define IBS_CAPS_AVAIL (1U<<0)
+#define IBS_CAPS_FETCHSAM (1U<<1)
+#define IBS_CAPS_OPSAM (1U<<2)
#define IBS_CAPS_RDWROPCNT (1U<<3)
#define IBS_CAPS_OPCNT (1U<<4)
+#define IBS_CAPS_BRNTRGT (1U<<5)
+#define IBS_CAPS_OPCNTEXT (1U<<6)
+
+#define IBS_CAPS_DEFAULT (IBS_CAPS_AVAIL \
+ | IBS_CAPS_FETCHSAM \
+ | IBS_CAPS_OPSAM)
/*
* IBS APIC setup
@@ -99,12 +114,12 @@ static u32 get_ibs_caps(void)
/* check IBS cpuid feature flags */
max_level = cpuid_eax(0x80000000);
if (max_level < IBS_CPUID_FEATURES)
- return IBS_CAPS_AVAIL;
+ return IBS_CAPS_DEFAULT;
ibs_caps = cpuid_eax(IBS_CPUID_FEATURES);
if (!(ibs_caps & IBS_CAPS_AVAIL))
/* cpuid flags not valid */
- return IBS_CAPS_AVAIL;
+ return IBS_CAPS_DEFAULT;
return ibs_caps;
}
@@ -197,8 +212,8 @@ op_amd_handle_ibs(struct pt_regs * const regs,
rdmsrl(MSR_AMD64_IBSOPCTL, ctl);
if (ctl & IBS_OP_VAL) {
rdmsrl(MSR_AMD64_IBSOPRIP, val);
- oprofile_write_reserve(&entry, regs, val,
- IBS_OP_CODE, IBS_OP_SIZE);
+ oprofile_write_reserve(&entry, regs, val, IBS_OP_CODE,
+ ibs_state.sample_size);
oprofile_add_data64(&entry, val);
rdmsrl(MSR_AMD64_IBSOPDATA, val);
oprofile_add_data64(&entry, val);
@@ -210,10 +225,14 @@ op_amd_handle_ibs(struct pt_regs * const regs,
oprofile_add_data64(&entry, val);
rdmsrl(MSR_AMD64_IBSDCPHYSAD, val);
oprofile_add_data64(&entry, val);
+ if (ibs_state.branch_target) {
+ rdmsrl(MSR_AMD64_IBSBRTARGET, val);
+ oprofile_add_data(&entry, (unsigned long)val);
+ }
oprofile_write_commit(&entry);
/* reenable the IRQ */
- ctl = op_amd_randomize_ibs_op(ibs_op_ctl);
+ ctl = op_amd_randomize_ibs_op(ibs_state.ibs_op_ctl);
wrmsrl(MSR_AMD64_IBSOPCTL, ctl);
}
}
@@ -226,21 +245,32 @@ static inline void op_amd_start_ibs(void)
if (!ibs_caps)
return;
+ memset(&ibs_state, 0, sizeof(ibs_state));
+
+ /*
+ * Note: Since the max count settings may out of range we
+ * write back the actual used values so that userland can read
+ * it.
+ */
+
if (ibs_config.fetch_enabled) {
- val = (ibs_config.max_cnt_fetch >> 4) & IBS_FETCH_MAX_CNT;
+ val = ibs_config.max_cnt_fetch >> 4;
+ val = min(val, IBS_FETCH_MAX_CNT);
+ ibs_config.max_cnt_fetch = val << 4;
val |= ibs_config.rand_en ? IBS_FETCH_RAND_EN : 0;
val |= IBS_FETCH_ENABLE;
wrmsrl(MSR_AMD64_IBSFETCHCTL, val);
}
if (ibs_config.op_enabled) {
- ibs_op_ctl = ibs_config.max_cnt_op >> 4;
+ val = ibs_config.max_cnt_op >> 4;
if (!(ibs_caps & IBS_CAPS_RDWROPCNT)) {
/*
* IbsOpCurCnt not supported. See
* op_amd_randomize_ibs_op() for details.
*/
- ibs_op_ctl = clamp(ibs_op_ctl, 0x0081ULL, 0xFF80ULL);
+ val = clamp(val, 0x0081ULL, 0xFF80ULL);
+ ibs_config.max_cnt_op = val << 4;
} else {
/*
* The start value is randomized with a
@@ -248,13 +278,24 @@ static inline void op_amd_start_ibs(void)
* with the half of the randomized range. Also
* avoid underflows.
*/
- ibs_op_ctl = min(ibs_op_ctl + IBS_RANDOM_MAXCNT_OFFSET,
- IBS_OP_MAX_CNT);
+ val += IBS_RANDOM_MAXCNT_OFFSET;
+ if (ibs_caps & IBS_CAPS_OPCNTEXT)
+ val = min(val, IBS_OP_MAX_CNT_EXT);
+ else
+ val = min(val, IBS_OP_MAX_CNT);
+ ibs_config.max_cnt_op =
+ (val - IBS_RANDOM_MAXCNT_OFFSET) << 4;
+ }
+ val = ((val & ~IBS_OP_MAX_CNT) << 4) | (val & IBS_OP_MAX_CNT);
+ val |= ibs_config.dispatched_ops ? IBS_OP_CNT_CTL : 0;
+ val |= IBS_OP_ENABLE;
+ ibs_state.ibs_op_ctl = val;
+ ibs_state.sample_size = IBS_OP_SIZE;
+ if (ibs_config.branch_target) {
+ ibs_state.branch_target = 1;
+ ibs_state.sample_size++;
}
- if (ibs_caps & IBS_CAPS_OPCNT && ibs_config.dispatched_ops)
- ibs_op_ctl |= IBS_OP_CNT_CTL;
- ibs_op_ctl |= IBS_OP_ENABLE;
- val = op_amd_randomize_ibs_op(ibs_op_ctl);
+ val = op_amd_randomize_ibs_op(ibs_state.ibs_op_ctl);
wrmsrl(MSR_AMD64_IBSOPCTL, val);
}
}
@@ -281,29 +322,25 @@ static inline int eilvt_is_available(int offset)
static inline int ibs_eilvt_valid(void)
{
- u64 val;
int offset;
+ u64 val;
rdmsrl(MSR_AMD64_IBSCTL, val);
+ offset = val & IBSCTL_LVT_OFFSET_MASK;
+
if (!(val & IBSCTL_LVT_OFFSET_VALID)) {
- pr_err(FW_BUG "cpu %d, invalid IBS "
- "interrupt offset %d (MSR%08X=0x%016llx)",
- smp_processor_id(), offset,
- MSR_AMD64_IBSCTL, val);
+ pr_err(FW_BUG "cpu %d, invalid IBS interrupt offset %d (MSR%08X=0x%016llx)\n",
+ smp_processor_id(), offset, MSR_AMD64_IBSCTL, val);
return 0;
}
- offset = val & IBSCTL_LVT_OFFSET_MASK;
-
- if (eilvt_is_available(offset))
- return !0;
-
- pr_err(FW_BUG "cpu %d, IBS interrupt offset %d "
- "not available (MSR%08X=0x%016llx)",
- smp_processor_id(), offset,
- MSR_AMD64_IBSCTL, val);
+ if (!eilvt_is_available(offset)) {
+ pr_err(FW_BUG "cpu %d, IBS interrupt offset %d not available (MSR%08X=0x%016llx)\n",
+ smp_processor_id(), offset, MSR_AMD64_IBSCTL, val);
+ return 0;
+ }
- return 0;
+ return 1;
}
static inline int get_ibs_offset(void)
@@ -630,28 +667,33 @@ static int setup_ibs_files(struct super_block *sb, struct dentry *root)
/* model specific files */
/* setup some reasonable defaults */
+ memset(&ibs_config, 0, sizeof(ibs_config));
ibs_config.max_cnt_fetch = 250000;
- ibs_config.fetch_enabled = 0;
ibs_config.max_cnt_op = 250000;
- ibs_config.op_enabled = 0;
- ibs_config.dispatched_ops = 0;
-
- dir = oprofilefs_mkdir(sb, root, "ibs_fetch");
- oprofilefs_create_ulong(sb, dir, "enable",
- &ibs_config.fetch_enabled);
- oprofilefs_create_ulong(sb, dir, "max_count",
- &ibs_config.max_cnt_fetch);
- oprofilefs_create_ulong(sb, dir, "rand_enable",
- &ibs_config.rand_en);
-
- dir = oprofilefs_mkdir(sb, root, "ibs_op");
- oprofilefs_create_ulong(sb, dir, "enable",
- &ibs_config.op_enabled);
- oprofilefs_create_ulong(sb, dir, "max_count",
- &ibs_config.max_cnt_op);
- if (ibs_caps & IBS_CAPS_OPCNT)
- oprofilefs_create_ulong(sb, dir, "dispatched_ops",
- &ibs_config.dispatched_ops);
+
+ if (ibs_caps & IBS_CAPS_FETCHSAM) {
+ dir = oprofilefs_mkdir(sb, root, "ibs_fetch");
+ oprofilefs_create_ulong(sb, dir, "enable",
+ &ibs_config.fetch_enabled);
+ oprofilefs_create_ulong(sb, dir, "max_count",
+ &ibs_config.max_cnt_fetch);
+ oprofilefs_create_ulong(sb, dir, "rand_enable",
+ &ibs_config.rand_en);
+ }
+
+ if (ibs_caps & IBS_CAPS_OPSAM) {
+ dir = oprofilefs_mkdir(sb, root, "ibs_op");
+ oprofilefs_create_ulong(sb, dir, "enable",
+ &ibs_config.op_enabled);
+ oprofilefs_create_ulong(sb, dir, "max_count",
+ &ibs_config.max_cnt_op);
+ if (ibs_caps & IBS_CAPS_OPCNT)
+ oprofilefs_create_ulong(sb, dir, "dispatched_ops",
+ &ibs_config.dispatched_ops);
+ if (ibs_caps & IBS_CAPS_BRNTRGT)
+ oprofilefs_create_ulong(sb, dir, "branch_target",
+ &ibs_config.branch_target);
+ }
return 0;
}
diff --git a/arch/x86/pci/Makefile b/arch/x86/pci/Makefile
index a0207a7fdf39..effd96e33f16 100644
--- a/arch/x86/pci/Makefile
+++ b/arch/x86/pci/Makefile
@@ -4,6 +4,7 @@ obj-$(CONFIG_PCI_BIOS) += pcbios.o
obj-$(CONFIG_PCI_MMCONFIG) += mmconfig_$(BITS).o direct.o mmconfig-shared.o
obj-$(CONFIG_PCI_DIRECT) += direct.o
obj-$(CONFIG_PCI_OLPC) += olpc.o
+obj-$(CONFIG_PCI_XEN) += xen.o
obj-y += fixup.o
obj-$(CONFIG_ACPI) += acpi.o
diff --git a/arch/x86/pci/acpi.c b/arch/x86/pci/acpi.c
index 15466c096ba5..0972315c3860 100644
--- a/arch/x86/pci/acpi.c
+++ b/arch/x86/pci/acpi.c
@@ -138,7 +138,6 @@ setup_resource(struct acpi_resource *acpi_res, void *data)
struct acpi_resource_address64 addr;
acpi_status status;
unsigned long flags;
- struct resource *root, *conflict;
u64 start, end;
status = resource_to_addr(acpi_res, &addr);
@@ -146,12 +145,10 @@ setup_resource(struct acpi_resource *acpi_res, void *data)
return AE_OK;
if (addr.resource_type == ACPI_MEMORY_RANGE) {
- root = &iomem_resource;
flags = IORESOURCE_MEM;
if (addr.info.mem.caching == ACPI_PREFETCHABLE_MEMORY)
flags |= IORESOURCE_PREFETCH;
} else if (addr.resource_type == ACPI_IO_RANGE) {
- root = &ioport_resource;
flags = IORESOURCE_IO;
} else
return AE_OK;
@@ -172,25 +169,90 @@ setup_resource(struct acpi_resource *acpi_res, void *data)
return AE_OK;
}
- conflict = insert_resource_conflict(root, res);
- if (conflict) {
- dev_err(&info->bridge->dev,
- "address space collision: host bridge window %pR "
- "conflicts with %s %pR\n",
- res, conflict->name, conflict);
- } else {
- pci_bus_add_resource(info->bus, res, 0);
- info->res_num++;
- if (addr.translation_offset)
- dev_info(&info->bridge->dev, "host bridge window %pR "
- "(PCI address [%#llx-%#llx])\n",
- res, res->start - addr.translation_offset,
- res->end - addr.translation_offset);
+ info->res_num++;
+ if (addr.translation_offset)
+ dev_info(&info->bridge->dev, "host bridge window %pR "
+ "(PCI address [%#llx-%#llx])\n",
+ res, res->start - addr.translation_offset,
+ res->end - addr.translation_offset);
+ else
+ dev_info(&info->bridge->dev, "host bridge window %pR\n", res);
+
+ return AE_OK;
+}
+
+static bool resource_contains(struct resource *res, resource_size_t point)
+{
+ if (res->start <= point && point <= res->end)
+ return true;
+ return false;
+}
+
+static void coalesce_windows(struct pci_root_info *info, int type)
+{
+ int i, j;
+ struct resource *res1, *res2;
+
+ for (i = 0; i < info->res_num; i++) {
+ res1 = &info->res[i];
+ if (!(res1->flags & type))
+ continue;
+
+ for (j = i + 1; j < info->res_num; j++) {
+ res2 = &info->res[j];
+ if (!(res2->flags & type))
+ continue;
+
+ /*
+ * I don't like throwing away windows because then
+ * our resources no longer match the ACPI _CRS, but
+ * the kernel resource tree doesn't allow overlaps.
+ */
+ if (resource_contains(res1, res2->start) ||
+ resource_contains(res1, res2->end) ||
+ resource_contains(res2, res1->start) ||
+ resource_contains(res2, res1->end)) {
+ res1->start = min(res1->start, res2->start);
+ res1->end = max(res1->end, res2->end);
+ dev_info(&info->bridge->dev,
+ "host bridge window expanded to %pR; %pR ignored\n",
+ res1, res2);
+ res2->flags = 0;
+ }
+ }
+ }
+}
+
+static void add_resources(struct pci_root_info *info)
+{
+ int i;
+ struct resource *res, *root, *conflict;
+
+ if (!pci_use_crs)
+ return;
+
+ coalesce_windows(info, IORESOURCE_MEM);
+ coalesce_windows(info, IORESOURCE_IO);
+
+ for (i = 0; i < info->res_num; i++) {
+ res = &info->res[i];
+
+ if (res->flags & IORESOURCE_MEM)
+ root = &iomem_resource;
+ else if (res->flags & IORESOURCE_IO)
+ root = &ioport_resource;
else
- dev_info(&info->bridge->dev,
- "host bridge window %pR\n", res);
+ continue;
+
+ conflict = insert_resource_conflict(root, res);
+ if (conflict)
+ dev_err(&info->bridge->dev,
+ "address space collision: host bridge window %pR "
+ "conflicts with %s %pR\n",
+ res, conflict->name, conflict);
+ else
+ pci_bus_add_resource(info->bus, res, 0);
}
- return AE_OK;
}
static void
@@ -224,6 +286,7 @@ get_current_resources(struct acpi_device *device, int busnum,
acpi_walk_resources(device->handle, METHOD_NAME__CRS, setup_resource,
&info);
+ add_resources(&info);
return;
name_alloc_fail:
diff --git a/arch/x86/pci/common.c b/arch/x86/pci/common.c
index a0772af64efb..f7c8a399978c 100644
--- a/arch/x86/pci/common.c
+++ b/arch/x86/pci/common.c
@@ -421,16 +421,10 @@ struct pci_bus * __devinit pcibios_scan_root(int busnum)
return bus;
}
-
-int __init pcibios_init(void)
+void __init pcibios_set_cache_line_size(void)
{
struct cpuinfo_x86 *c = &boot_cpu_data;
- if (!raw_pci_ops) {
- printk(KERN_WARNING "PCI: System does not support PCI\n");
- return 0;
- }
-
/*
* Set PCI cacheline size to that of the CPU if the CPU has reported it.
* (For older CPUs that don't support cpuid, we se it to 32 bytes
@@ -445,7 +439,16 @@ int __init pcibios_init(void)
pci_dfl_cache_line_size = 32 >> 2;
printk(KERN_DEBUG "PCI: Unknown cacheline size. Setting to 32 bytes\n");
}
+}
+
+int __init pcibios_init(void)
+{
+ if (!raw_pci_ops) {
+ printk(KERN_WARNING "PCI: System does not support PCI\n");
+ return 0;
+ }
+ pcibios_set_cache_line_size();
pcibios_resource_survey();
if (pci_bf_sort >= pci_force_bf)
diff --git a/arch/x86/pci/i386.c b/arch/x86/pci/i386.c
index 55253095be84..c4bb261c106e 100644
--- a/arch/x86/pci/i386.c
+++ b/arch/x86/pci/i386.c
@@ -65,16 +65,21 @@ pcibios_align_resource(void *data, const struct resource *res,
resource_size_t size, resource_size_t align)
{
struct pci_dev *dev = data;
- resource_size_t start = res->start;
+ resource_size_t start = round_down(res->end - size + 1, align);
if (res->flags & IORESOURCE_IO) {
- if (skip_isa_ioresource_align(dev))
- return start;
- if (start & 0x300)
- start = (start + 0x3ff) & ~0x3ff;
+
+ /*
+ * If we're avoiding ISA aliases, the largest contiguous I/O
+ * port space is 256 bytes. Clearing bits 9 and 10 preserves
+ * all 256-byte and smaller alignments, so the result will
+ * still be correctly aligned.
+ */
+ if (!skip_isa_ioresource_align(dev))
+ start &= ~0x300;
} else if (res->flags & IORESOURCE_MEM) {
if (start < BIOS_END)
- start = BIOS_END;
+ start = res->end; /* fail; no space */
}
return start;
}
@@ -311,6 +316,8 @@ int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
*/
prot |= _PAGE_CACHE_UC_MINUS;
+ prot |= _PAGE_IOMAP; /* creating a mapping for IO */
+
vma->vm_page_prot = __pgprot(prot);
if (io_remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
diff --git a/arch/x86/pci/irq.c b/arch/x86/pci/irq.c
index f547ee05f715..9f9bfb705cf9 100644
--- a/arch/x86/pci/irq.c
+++ b/arch/x86/pci/irq.c
@@ -584,27 +584,28 @@ static __init int intel_router_probe(struct irq_router *r, struct pci_dev *route
case PCI_DEVICE_ID_INTEL_ICH9_3:
case PCI_DEVICE_ID_INTEL_ICH9_4:
case PCI_DEVICE_ID_INTEL_ICH9_5:
- case PCI_DEVICE_ID_INTEL_TOLAPAI_0:
+ case PCI_DEVICE_ID_INTEL_EP80579_0:
case PCI_DEVICE_ID_INTEL_ICH10_0:
case PCI_DEVICE_ID_INTEL_ICH10_1:
case PCI_DEVICE_ID_INTEL_ICH10_2:
case PCI_DEVICE_ID_INTEL_ICH10_3:
+ case PCI_DEVICE_ID_INTEL_PATSBURG_LPC:
r->name = "PIIX/ICH";
r->get = pirq_piix_get;
r->set = pirq_piix_set;
return 1;
}
- if ((device >= PCI_DEVICE_ID_INTEL_PCH_LPC_MIN) &&
- (device <= PCI_DEVICE_ID_INTEL_PCH_LPC_MAX)) {
+ if ((device >= PCI_DEVICE_ID_INTEL_5_3400_SERIES_LPC_MIN) &&
+ (device <= PCI_DEVICE_ID_INTEL_5_3400_SERIES_LPC_MAX)) {
r->name = "PIIX/ICH";
r->get = pirq_piix_get;
r->set = pirq_piix_set;
return 1;
}
- if ((device >= PCI_DEVICE_ID_INTEL_CPT_LPC_MIN) &&
- (device <= PCI_DEVICE_ID_INTEL_CPT_LPC_MAX)) {
+ if ((device >= PCI_DEVICE_ID_INTEL_COUGARPOINT_LPC_MIN) &&
+ (device <= PCI_DEVICE_ID_INTEL_COUGARPOINT_LPC_MAX)) {
r->name = "PIIX/ICH";
r->get = pirq_piix_get;
r->set = pirq_piix_set;
diff --git a/arch/x86/pci/mmconfig-shared.c b/arch/x86/pci/mmconfig-shared.c
index a918553ebc75..e282886616a0 100644
--- a/arch/x86/pci/mmconfig-shared.c
+++ b/arch/x86/pci/mmconfig-shared.c
@@ -65,7 +65,6 @@ static __init struct pci_mmcfg_region *pci_mmconfig_add(int segment, int start,
int end, u64 addr)
{
struct pci_mmcfg_region *new;
- int num_buses;
struct resource *res;
if (addr == 0)
@@ -82,10 +81,9 @@ static __init struct pci_mmcfg_region *pci_mmconfig_add(int segment, int start,
list_add_sorted(new);
- num_buses = end - start + 1;
res = &new->res;
res->start = addr + PCI_MMCFG_BUS_OFFSET(start);
- res->end = addr + PCI_MMCFG_BUS_OFFSET(num_buses) - 1;
+ res->end = addr + PCI_MMCFG_BUS_OFFSET(end + 1) - 1;
res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
snprintf(new->name, PCI_MMCFG_RESOURCE_NAME_LEN,
"PCI MMCONFIG %04x [bus %02x-%02x]", segment, start, end);
diff --git a/arch/x86/pci/xen.c b/arch/x86/pci/xen.c
new file mode 100644
index 000000000000..25cd4a07d09f
--- /dev/null
+++ b/arch/x86/pci/xen.c
@@ -0,0 +1,429 @@
+/*
+ * Xen PCI Frontend Stub - puts some "dummy" functions in to the Linux
+ * x86 PCI core to support the Xen PCI Frontend
+ *
+ * Author: Ryan Wilson <hap9@epoch.ncsc.mil>
+ */
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/pci.h>
+#include <linux/acpi.h>
+
+#include <linux/io.h>
+#include <asm/io_apic.h>
+#include <asm/pci_x86.h>
+
+#include <asm/xen/hypervisor.h>
+
+#include <xen/features.h>
+#include <xen/events.h>
+#include <asm/xen/pci.h>
+
+#ifdef CONFIG_ACPI
+static int xen_hvm_register_pirq(u32 gsi, int triggering)
+{
+ int rc, irq;
+ struct physdev_map_pirq map_irq;
+ int shareable = 0;
+ char *name;
+
+ if (!xen_hvm_domain())
+ return -1;
+
+ map_irq.domid = DOMID_SELF;
+ map_irq.type = MAP_PIRQ_TYPE_GSI;
+ map_irq.index = gsi;
+ map_irq.pirq = -1;
+
+ rc = HYPERVISOR_physdev_op(PHYSDEVOP_map_pirq, &map_irq);
+ if (rc) {
+ printk(KERN_WARNING "xen map irq failed %d\n", rc);
+ return -1;
+ }
+
+ if (triggering == ACPI_EDGE_SENSITIVE) {
+ shareable = 0;
+ name = "ioapic-edge";
+ } else {
+ shareable = 1;
+ name = "ioapic-level";
+ }
+
+ irq = xen_map_pirq_gsi(map_irq.pirq, gsi, shareable, name);
+
+ printk(KERN_DEBUG "xen: --> irq=%d, pirq=%d\n", irq, map_irq.pirq);
+
+ return irq;
+}
+
+static int acpi_register_gsi_xen_hvm(struct device *dev, u32 gsi,
+ int trigger, int polarity)
+{
+ return xen_hvm_register_pirq(gsi, trigger);
+}
+#endif
+
+#if defined(CONFIG_PCI_MSI)
+#include <linux/msi.h>
+#include <asm/msidef.h>
+
+struct xen_pci_frontend_ops *xen_pci_frontend;
+EXPORT_SYMBOL_GPL(xen_pci_frontend);
+
+#define XEN_PIRQ_MSI_DATA (MSI_DATA_TRIGGER_EDGE | \
+ MSI_DATA_LEVEL_ASSERT | (3 << 8) | MSI_DATA_VECTOR(0))
+
+static void xen_msi_compose_msg(struct pci_dev *pdev, unsigned int pirq,
+ struct msi_msg *msg)
+{
+ /* We set vector == 0 to tell the hypervisor we don't care about it,
+ * but we want a pirq setup instead.
+ * We use the dest_id field to pass the pirq that we want. */
+ msg->address_hi = MSI_ADDR_BASE_HI | MSI_ADDR_EXT_DEST_ID(pirq);
+ msg->address_lo =
+ MSI_ADDR_BASE_LO |
+ MSI_ADDR_DEST_MODE_PHYSICAL |
+ MSI_ADDR_REDIRECTION_CPU |
+ MSI_ADDR_DEST_ID(pirq);
+
+ msg->data = XEN_PIRQ_MSI_DATA;
+}
+
+static int xen_hvm_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
+{
+ int irq, pirq, ret = 0;
+ struct msi_desc *msidesc;
+ struct msi_msg msg;
+
+ list_for_each_entry(msidesc, &dev->msi_list, list) {
+ __read_msi_msg(msidesc, &msg);
+ pirq = MSI_ADDR_EXT_DEST_ID(msg.address_hi) |
+ ((msg.address_lo >> MSI_ADDR_DEST_ID_SHIFT) & 0xff);
+ if (xen_irq_from_pirq(pirq) >= 0 && msg.data == XEN_PIRQ_MSI_DATA) {
+ xen_allocate_pirq_msi((type == PCI_CAP_ID_MSIX) ?
+ "msi-x" : "msi", &irq, &pirq, XEN_ALLOC_IRQ);
+ if (irq < 0)
+ goto error;
+ ret = set_irq_msi(irq, msidesc);
+ if (ret < 0)
+ goto error_while;
+ printk(KERN_DEBUG "xen: msi already setup: msi --> irq=%d"
+ " pirq=%d\n", irq, pirq);
+ return 0;
+ }
+ xen_allocate_pirq_msi((type == PCI_CAP_ID_MSIX) ?
+ "msi-x" : "msi", &irq, &pirq, (XEN_ALLOC_IRQ | XEN_ALLOC_PIRQ));
+ if (irq < 0 || pirq < 0)
+ goto error;
+ printk(KERN_DEBUG "xen: msi --> irq=%d, pirq=%d\n", irq, pirq);
+ xen_msi_compose_msg(dev, pirq, &msg);
+ ret = set_irq_msi(irq, msidesc);
+ if (ret < 0)
+ goto error_while;
+ write_msi_msg(irq, &msg);
+ }
+ return 0;
+
+error_while:
+ unbind_from_irqhandler(irq, NULL);
+error:
+ if (ret == -ENODEV)
+ dev_err(&dev->dev, "Xen PCI frontend has not registered" \
+ " MSI/MSI-X support!\n");
+
+ return ret;
+}
+
+/*
+ * For MSI interrupts we have to use drivers/xen/event.s functions to
+ * allocate an irq_desc and setup the right */
+
+
+static int xen_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
+{
+ int irq, ret, i;
+ struct msi_desc *msidesc;
+ int *v;
+
+ v = kzalloc(sizeof(int) * max(1, nvec), GFP_KERNEL);
+ if (!v)
+ return -ENOMEM;
+
+ if (type == PCI_CAP_ID_MSIX)
+ ret = xen_pci_frontend_enable_msix(dev, &v, nvec);
+ else
+ ret = xen_pci_frontend_enable_msi(dev, &v);
+ if (ret)
+ goto error;
+ i = 0;
+ list_for_each_entry(msidesc, &dev->msi_list, list) {
+ irq = xen_allocate_pirq(v[i], 0, /* not sharable */
+ (type == PCI_CAP_ID_MSIX) ?
+ "pcifront-msi-x" : "pcifront-msi");
+ if (irq < 0) {
+ ret = -1;
+ goto free;
+ }
+
+ ret = set_irq_msi(irq, msidesc);
+ if (ret)
+ goto error_while;
+ i++;
+ }
+ kfree(v);
+ return 0;
+
+error_while:
+ unbind_from_irqhandler(irq, NULL);
+error:
+ if (ret == -ENODEV)
+ dev_err(&dev->dev, "Xen PCI frontend has not registered" \
+ " MSI/MSI-X support!\n");
+free:
+ kfree(v);
+ return ret;
+}
+
+static void xen_teardown_msi_irqs(struct pci_dev *dev)
+{
+ struct msi_desc *msidesc;
+
+ msidesc = list_entry(dev->msi_list.next, struct msi_desc, list);
+ if (msidesc->msi_attrib.is_msix)
+ xen_pci_frontend_disable_msix(dev);
+ else
+ xen_pci_frontend_disable_msi(dev);
+}
+
+static void xen_teardown_msi_irq(unsigned int irq)
+{
+ xen_destroy_irq(irq);
+}
+
+static int xen_initdom_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
+{
+ int irq, ret;
+ struct msi_desc *msidesc;
+
+ list_for_each_entry(msidesc, &dev->msi_list, list) {
+ irq = xen_create_msi_irq(dev, msidesc, type);
+ if (irq < 0)
+ return -1;
+
+ ret = set_irq_msi(irq, msidesc);
+ if (ret)
+ goto error;
+ }
+ return 0;
+
+error:
+ xen_destroy_irq(irq);
+ return ret;
+}
+#endif
+
+static int xen_pcifront_enable_irq(struct pci_dev *dev)
+{
+ int rc;
+ int share = 1;
+
+ dev_info(&dev->dev, "Xen PCI enabling IRQ: %d\n", dev->irq);
+
+ if (dev->irq < 0)
+ return -EINVAL;
+
+ if (dev->irq < NR_IRQS_LEGACY)
+ share = 0;
+
+ rc = xen_allocate_pirq(dev->irq, share, "pcifront");
+ if (rc < 0) {
+ dev_warn(&dev->dev, "Xen PCI IRQ: %d, failed to register:%d\n",
+ dev->irq, rc);
+ return rc;
+ }
+ return 0;
+}
+
+int __init pci_xen_init(void)
+{
+ if (!xen_pv_domain() || xen_initial_domain())
+ return -ENODEV;
+
+ printk(KERN_INFO "PCI: setting up Xen PCI frontend stub\n");
+
+ pcibios_set_cache_line_size();
+
+ pcibios_enable_irq = xen_pcifront_enable_irq;
+ pcibios_disable_irq = NULL;
+
+#ifdef CONFIG_ACPI
+ /* Keep ACPI out of the picture */
+ acpi_noirq = 1;
+#endif
+
+#ifdef CONFIG_PCI_MSI
+ x86_msi.setup_msi_irqs = xen_setup_msi_irqs;
+ x86_msi.teardown_msi_irq = xen_teardown_msi_irq;
+ x86_msi.teardown_msi_irqs = xen_teardown_msi_irqs;
+#endif
+ return 0;
+}
+
+int __init pci_xen_hvm_init(void)
+{
+ if (!xen_feature(XENFEAT_hvm_pirqs))
+ return 0;
+
+#ifdef CONFIG_ACPI
+ /*
+ * We don't want to change the actual ACPI delivery model,
+ * just how GSIs get registered.
+ */
+ __acpi_register_gsi = acpi_register_gsi_xen_hvm;
+#endif
+
+#ifdef CONFIG_PCI_MSI
+ x86_msi.setup_msi_irqs = xen_hvm_setup_msi_irqs;
+ x86_msi.teardown_msi_irq = xen_teardown_msi_irq;
+#endif
+ return 0;
+}
+
+#ifdef CONFIG_XEN_DOM0
+static int xen_register_pirq(u32 gsi, int triggering)
+{
+ int rc, irq;
+ struct physdev_map_pirq map_irq;
+ int shareable = 0;
+ char *name;
+
+ if (!xen_pv_domain())
+ return -1;
+
+ if (triggering == ACPI_EDGE_SENSITIVE) {
+ shareable = 0;
+ name = "ioapic-edge";
+ } else {
+ shareable = 1;
+ name = "ioapic-level";
+ }
+
+ irq = xen_allocate_pirq(gsi, shareable, name);
+
+ printk(KERN_DEBUG "xen: --> irq=%d\n", irq);
+
+ if (irq < 0)
+ goto out;
+
+ map_irq.domid = DOMID_SELF;
+ map_irq.type = MAP_PIRQ_TYPE_GSI;
+ map_irq.index = gsi;
+ map_irq.pirq = irq;
+
+ rc = HYPERVISOR_physdev_op(PHYSDEVOP_map_pirq, &map_irq);
+ if (rc) {
+ printk(KERN_WARNING "xen map irq failed %d\n", rc);
+ return -1;
+ }
+
+out:
+ return irq;
+}
+
+static int xen_register_gsi(u32 gsi, int triggering, int polarity)
+{
+ int rc, irq;
+ struct physdev_setup_gsi setup_gsi;
+
+ if (!xen_pv_domain())
+ return -1;
+
+ printk(KERN_DEBUG "xen: registering gsi %u triggering %d polarity %d\n",
+ gsi, triggering, polarity);
+
+ irq = xen_register_pirq(gsi, triggering);
+
+ setup_gsi.gsi = gsi;
+ setup_gsi.triggering = (triggering == ACPI_EDGE_SENSITIVE ? 0 : 1);
+ setup_gsi.polarity = (polarity == ACPI_ACTIVE_HIGH ? 0 : 1);
+
+ rc = HYPERVISOR_physdev_op(PHYSDEVOP_setup_gsi, &setup_gsi);
+ if (rc == -EEXIST)
+ printk(KERN_INFO "Already setup the GSI :%d\n", gsi);
+ else if (rc) {
+ printk(KERN_ERR "Failed to setup GSI :%d, err_code:%d\n",
+ gsi, rc);
+ }
+
+ return irq;
+}
+
+static __init void xen_setup_acpi_sci(void)
+{
+ int rc;
+ int trigger, polarity;
+ int gsi = acpi_sci_override_gsi;
+
+ if (!gsi)
+ return;
+
+ rc = acpi_get_override_irq(gsi, &trigger, &polarity);
+ if (rc) {
+ printk(KERN_WARNING "xen: acpi_get_override_irq failed for acpi"
+ " sci, rc=%d\n", rc);
+ return;
+ }
+ trigger = trigger ? ACPI_LEVEL_SENSITIVE : ACPI_EDGE_SENSITIVE;
+ polarity = polarity ? ACPI_ACTIVE_LOW : ACPI_ACTIVE_HIGH;
+
+ printk(KERN_INFO "xen: sci override: global_irq=%d trigger=%d "
+ "polarity=%d\n", gsi, trigger, polarity);
+
+ gsi = xen_register_gsi(gsi, trigger, polarity);
+ printk(KERN_INFO "xen: acpi sci %d\n", gsi);
+
+ return;
+}
+
+static int acpi_register_gsi_xen(struct device *dev, u32 gsi,
+ int trigger, int polarity)
+{
+ return xen_register_gsi(gsi, trigger, polarity);
+}
+
+static int __init pci_xen_initial_domain(void)
+{
+#ifdef CONFIG_PCI_MSI
+ x86_msi.setup_msi_irqs = xen_initdom_setup_msi_irqs;
+ x86_msi.teardown_msi_irq = xen_teardown_msi_irq;
+#endif
+ xen_setup_acpi_sci();
+ __acpi_register_gsi = acpi_register_gsi_xen;
+
+ return 0;
+}
+
+void __init xen_setup_pirqs(void)
+{
+ int irq;
+
+ pci_xen_initial_domain();
+
+ if (0 == nr_ioapics) {
+ for (irq = 0; irq < NR_IRQS_LEGACY; irq++)
+ xen_allocate_pirq(irq, 0, "xt-pic");
+ return;
+ }
+
+ /* Pre-allocate legacy irqs */
+ for (irq = 0; irq < NR_IRQS_LEGACY; irq++) {
+ int trigger, polarity;
+
+ if (acpi_get_override_irq(irq, &trigger, &polarity) == -1)
+ continue;
+
+ xen_register_pirq(irq,
+ trigger ? ACPI_LEVEL_SENSITIVE : ACPI_EDGE_SENSITIVE);
+ }
+}
+#endif
diff --git a/arch/x86/platform/Makefile b/arch/x86/platform/Makefile
new file mode 100644
index 000000000000..7bf70b812fa2
--- /dev/null
+++ b/arch/x86/platform/Makefile
@@ -0,0 +1,8 @@
+# Platform specific code goes here
+obj-y += efi/
+obj-y += mrst/
+obj-y += olpc/
+obj-y += scx200/
+obj-y += sfi/
+obj-y += visws/
+obj-y += uv/
diff --git a/arch/x86/platform/efi/Makefile b/arch/x86/platform/efi/Makefile
new file mode 100644
index 000000000000..73b8be0f3675
--- /dev/null
+++ b/arch/x86/platform/efi/Makefile
@@ -0,0 +1 @@
+obj-$(CONFIG_EFI) += efi.o efi_$(BITS).o efi_stub_$(BITS).o
diff --git a/arch/x86/kernel/efi.c b/arch/x86/platform/efi/efi.c
index 0fe27d7c6258..0fe27d7c6258 100644
--- a/arch/x86/kernel/efi.c
+++ b/arch/x86/platform/efi/efi.c
diff --git a/arch/x86/kernel/efi_32.c b/arch/x86/platform/efi/efi_32.c
index 5cab48ee61a4..5cab48ee61a4 100644
--- a/arch/x86/kernel/efi_32.c
+++ b/arch/x86/platform/efi/efi_32.c
diff --git a/arch/x86/kernel/efi_64.c b/arch/x86/platform/efi/efi_64.c
index ac0621a7ac3d..ac0621a7ac3d 100644
--- a/arch/x86/kernel/efi_64.c
+++ b/arch/x86/platform/efi/efi_64.c
diff --git a/arch/x86/kernel/efi_stub_32.S b/arch/x86/platform/efi/efi_stub_32.S
index fbe66e626c09..fbe66e626c09 100644
--- a/arch/x86/kernel/efi_stub_32.S
+++ b/arch/x86/platform/efi/efi_stub_32.S
diff --git a/arch/x86/kernel/efi_stub_64.S b/arch/x86/platform/efi/efi_stub_64.S
index 4c07ccab8146..4c07ccab8146 100644
--- a/arch/x86/kernel/efi_stub_64.S
+++ b/arch/x86/platform/efi/efi_stub_64.S
diff --git a/arch/x86/platform/mrst/Makefile b/arch/x86/platform/mrst/Makefile
new file mode 100644
index 000000000000..efbbc552fa95
--- /dev/null
+++ b/arch/x86/platform/mrst/Makefile
@@ -0,0 +1 @@
+obj-$(CONFIG_X86_MRST) += mrst.o
diff --git a/arch/x86/kernel/mrst.c b/arch/x86/platform/mrst/mrst.c
index 79ae68154e87..79ae68154e87 100644
--- a/arch/x86/kernel/mrst.c
+++ b/arch/x86/platform/mrst/mrst.c
diff --git a/arch/x86/platform/olpc/Makefile b/arch/x86/platform/olpc/Makefile
new file mode 100644
index 000000000000..c31b8fcb5a86
--- /dev/null
+++ b/arch/x86/platform/olpc/Makefile
@@ -0,0 +1,3 @@
+obj-$(CONFIG_OLPC) += olpc.o
+obj-$(CONFIG_OLPC_XO1) += olpc-xo1.o
+obj-$(CONFIG_OLPC_OPENFIRMWARE) += olpc_ofw.o
diff --git a/arch/x86/kernel/olpc-xo1.c b/arch/x86/platform/olpc/olpc-xo1.c
index f5442c03abc3..f5442c03abc3 100644
--- a/arch/x86/kernel/olpc-xo1.c
+++ b/arch/x86/platform/olpc/olpc-xo1.c
diff --git a/arch/x86/kernel/olpc.c b/arch/x86/platform/olpc/olpc.c
index edaf3fe8dc5e..edaf3fe8dc5e 100644
--- a/arch/x86/kernel/olpc.c
+++ b/arch/x86/platform/olpc/olpc.c
diff --git a/arch/x86/kernel/olpc_ofw.c b/arch/x86/platform/olpc/olpc_ofw.c
index 787320464379..787320464379 100644
--- a/arch/x86/kernel/olpc_ofw.c
+++ b/arch/x86/platform/olpc/olpc_ofw.c
diff --git a/arch/x86/platform/scx200/Makefile b/arch/x86/platform/scx200/Makefile
new file mode 100644
index 000000000000..762b4c7f4314
--- /dev/null
+++ b/arch/x86/platform/scx200/Makefile
@@ -0,0 +1,2 @@
+obj-$(CONFIG_SCx200) += scx200.o
+scx200-y += scx200_32.o
diff --git a/arch/x86/kernel/scx200_32.c b/arch/x86/platform/scx200/scx200_32.c
index 7e004acbe526..7e004acbe526 100644
--- a/arch/x86/kernel/scx200_32.c
+++ b/arch/x86/platform/scx200/scx200_32.c
diff --git a/arch/x86/platform/sfi/Makefile b/arch/x86/platform/sfi/Makefile
new file mode 100644
index 000000000000..cc5db1168a5e
--- /dev/null
+++ b/arch/x86/platform/sfi/Makefile
@@ -0,0 +1 @@
+obj-$(CONFIG_SFI) += sfi.o
diff --git a/arch/x86/kernel/sfi.c b/arch/x86/platform/sfi/sfi.c
index dd4c281ffe57..dd4c281ffe57 100644
--- a/arch/x86/kernel/sfi.c
+++ b/arch/x86/platform/sfi/sfi.c
diff --git a/arch/x86/platform/uv/Makefile b/arch/x86/platform/uv/Makefile
new file mode 100644
index 000000000000..6c40995fefb8
--- /dev/null
+++ b/arch/x86/platform/uv/Makefile
@@ -0,0 +1 @@
+obj-$(CONFIG_X86_UV) += tlb_uv.o bios_uv.o uv_irq.o uv_sysfs.o uv_time.o
diff --git a/arch/x86/kernel/bios_uv.c b/arch/x86/platform/uv/bios_uv.c
index 8bc57baaa9ad..8bc57baaa9ad 100644
--- a/arch/x86/kernel/bios_uv.c
+++ b/arch/x86/platform/uv/bios_uv.c
diff --git a/arch/x86/kernel/tlb_uv.c b/arch/x86/platform/uv/tlb_uv.c
index 20ea20a39e2a..ba9caa808a9c 100644
--- a/arch/x86/kernel/tlb_uv.c
+++ b/arch/x86/platform/uv/tlb_uv.c
@@ -1343,8 +1343,8 @@ uv_activation_descriptor_init(int node, int pnode)
* each bau_desc is 64 bytes; there are 8 (UV_ITEMS_PER_DESCRIPTOR)
* per cpu; and up to 32 (UV_ADP_SIZE) cpu's per uvhub
*/
- bau_desc = (struct bau_desc *)kmalloc_node(sizeof(struct bau_desc)*
- UV_ADP_SIZE*UV_ITEMS_PER_DESCRIPTOR, GFP_KERNEL, node);
+ bau_desc = kmalloc_node(sizeof(struct bau_desc) * UV_ADP_SIZE
+ * UV_ITEMS_PER_DESCRIPTOR, GFP_KERNEL, node);
BUG_ON(!bau_desc);
pa = uv_gpa(bau_desc); /* need the real nasid*/
@@ -1402,9 +1402,9 @@ uv_payload_queue_init(int node, int pnode)
struct bau_payload_queue_entry *pqp_malloc;
struct bau_control *bcp;
- pqp = (struct bau_payload_queue_entry *) kmalloc_node(
- (DEST_Q_SIZE + 1) * sizeof(struct bau_payload_queue_entry),
- GFP_KERNEL, node);
+ pqp = kmalloc_node((DEST_Q_SIZE + 1)
+ * sizeof(struct bau_payload_queue_entry),
+ GFP_KERNEL, node);
BUG_ON(!pqp);
pqp_malloc = pqp;
@@ -1455,7 +1455,7 @@ static void __init uv_init_uvhub(int uvhub, int vector)
* the below initialization can't be in firmware because the
* messaging IRQ will be determined by the OS
*/
- apicid = uvhub_to_first_apicid(uvhub);
+ apicid = uvhub_to_first_apicid(uvhub) | uv_apicid_hibits;
uv_write_global_mmr64(pnode, UVH_BAU_DATA_CONFIG,
((apicid << 32) | vector));
}
@@ -1520,8 +1520,7 @@ static void __init uv_init_per_cpu(int nuvhubs)
timeout_us = calculate_destination_timeout();
- uvhub_descs = (struct uvhub_desc *)
- kmalloc(nuvhubs * sizeof(struct uvhub_desc), GFP_KERNEL);
+ uvhub_descs = kmalloc(nuvhubs * sizeof(struct uvhub_desc), GFP_KERNEL);
memset(uvhub_descs, 0, nuvhubs * sizeof(struct uvhub_desc));
uvhub_mask = kzalloc((nuvhubs+7)/8, GFP_KERNEL);
for_each_present_cpu(cpu) {
diff --git a/arch/x86/kernel/uv_irq.c b/arch/x86/platform/uv/uv_irq.c
index 7b24460917d5..7b24460917d5 100644
--- a/arch/x86/kernel/uv_irq.c
+++ b/arch/x86/platform/uv/uv_irq.c
diff --git a/arch/x86/kernel/uv_sysfs.c b/arch/x86/platform/uv/uv_sysfs.c
index 309c70fb7759..309c70fb7759 100644
--- a/arch/x86/kernel/uv_sysfs.c
+++ b/arch/x86/platform/uv/uv_sysfs.c
diff --git a/arch/x86/kernel/uv_time.c b/arch/x86/platform/uv/uv_time.c
index 56e421bc379b..9daf5d1af9f1 100644
--- a/arch/x86/kernel/uv_time.c
+++ b/arch/x86/platform/uv/uv_time.c
@@ -89,6 +89,7 @@ static void uv_rtc_send_IPI(int cpu)
apicid = cpu_physical_id(cpu);
pnode = uv_apicid_to_pnode(apicid);
+ apicid |= uv_apicid_hibits;
val = (1UL << UVH_IPI_INT_SEND_SHFT) |
(apicid << UVH_IPI_INT_APIC_ID_SHFT) |
(X86_PLATFORM_IPI_VECTOR << UVH_IPI_INT_VECTOR_SHFT);
@@ -107,6 +108,7 @@ static int uv_intr_pending(int pnode)
static int uv_setup_intr(int cpu, u64 expires)
{
u64 val;
+ unsigned long apicid = cpu_physical_id(cpu) | uv_apicid_hibits;
int pnode = uv_cpu_to_pnode(cpu);
uv_write_global_mmr64(pnode, UVH_RTC1_INT_CONFIG,
@@ -117,7 +119,7 @@ static int uv_setup_intr(int cpu, u64 expires)
UVH_EVENT_OCCURRED0_RTC1_MASK);
val = (X86_PLATFORM_IPI_VECTOR << UVH_RTC1_INT_CONFIG_VECTOR_SHFT) |
- ((u64)cpu_physical_id(cpu) << UVH_RTC1_INT_CONFIG_APIC_ID_SHFT);
+ ((u64)apicid << UVH_RTC1_INT_CONFIG_APIC_ID_SHFT);
/* Set configuration */
uv_write_global_mmr64(pnode, UVH_RTC1_INT_CONFIG, val);
diff --git a/arch/x86/platform/visws/Makefile b/arch/x86/platform/visws/Makefile
new file mode 100644
index 000000000000..91bc17ab2fd5
--- /dev/null
+++ b/arch/x86/platform/visws/Makefile
@@ -0,0 +1 @@
+obj-$(CONFIG_X86_VISWS) += visws_quirks.o
diff --git a/arch/x86/kernel/visws_quirks.c b/arch/x86/platform/visws/visws_quirks.c
index 3371bd053b89..3371bd053b89 100644
--- a/arch/x86/kernel/visws_quirks.c
+++ b/arch/x86/platform/visws/visws_quirks.c
diff --git a/arch/x86/xen/Kconfig b/arch/x86/xen/Kconfig
index 68128a1b401a..5b54892e4bc3 100644
--- a/arch/x86/xen/Kconfig
+++ b/arch/x86/xen/Kconfig
@@ -13,21 +13,28 @@ config XEN
kernel to boot in a paravirtualized environment under the
Xen hypervisor.
+config XEN_DOM0
+ def_bool y
+ depends on XEN && PCI_XEN && SWIOTLB_XEN
+ depends on X86_LOCAL_APIC && X86_IO_APIC && ACPI && PCI
+
+# Dummy symbol since people have come to rely on the PRIVILEGED_GUEST
+# name in tools.
+config XEN_PRIVILEGED_GUEST
+ def_bool XEN_DOM0
+
config XEN_PVHVM
def_bool y
depends on XEN
depends on X86_LOCAL_APIC
config XEN_MAX_DOMAIN_MEMORY
- int "Maximum allowed size of a domain in gigabytes"
- default 8 if X86_32
- default 32 if X86_64
+ int
+ default 128
depends on XEN
help
- The pseudo-physical to machine address array is sized
- according to the maximum possible memory size of a Xen
- domain. This array uses 1 page per gigabyte, so there's no
- need to be too stingy here.
+ This only affects the sizing of some bss arrays, the unused
+ portions of which are freed.
config XEN_SAVE_RESTORE
bool
diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
index 63b83ceebd1a..44dcad43989d 100644
--- a/arch/x86/xen/enlighten.c
+++ b/arch/x86/xen/enlighten.c
@@ -46,6 +46,7 @@
#include <asm/paravirt.h>
#include <asm/apic.h>
#include <asm/page.h>
+#include <asm/xen/pci.h>
#include <asm/xen/hypercall.h>
#include <asm/xen/hypervisor.h>
#include <asm/fixmap.h>
@@ -59,7 +60,6 @@
#include <asm/pgtable.h>
#include <asm/tlbflush.h>
#include <asm/reboot.h>
-#include <asm/setup.h>
#include <asm/stackprotector.h>
#include <asm/hypervisor.h>
@@ -75,6 +75,11 @@ DEFINE_PER_CPU(struct vcpu_info, xen_vcpu_info);
enum xen_domain_type xen_domain_type = XEN_NATIVE;
EXPORT_SYMBOL_GPL(xen_domain_type);
+unsigned long *machine_to_phys_mapping = (void *)MACH2PHYS_VIRT_START;
+EXPORT_SYMBOL(machine_to_phys_mapping);
+unsigned int machine_to_phys_order;
+EXPORT_SYMBOL(machine_to_phys_order);
+
struct start_info *xen_start_info;
EXPORT_SYMBOL_GPL(xen_start_info);
@@ -136,9 +141,6 @@ static void xen_vcpu_setup(int cpu)
info.mfn = arbitrary_virt_to_mfn(vcpup);
info.offset = offset_in_page(vcpup);
- printk(KERN_DEBUG "trying to map vcpu_info %d at %p, mfn %llx, offset %d\n",
- cpu, vcpup, info.mfn, info.offset);
-
/* Check to see if the hypervisor will put the vcpu_info
structure where we want it, which allows direct access via
a percpu-variable. */
@@ -152,9 +154,6 @@ static void xen_vcpu_setup(int cpu)
/* This cpu is using the registered vcpu info, even if
later ones fail to. */
per_cpu(xen_vcpu, cpu) = vcpup;
-
- printk(KERN_DEBUG "cpu %d using vcpu_info at %p\n",
- cpu, vcpup);
}
}
@@ -243,6 +242,7 @@ static __init void xen_init_cpuid_mask(void)
cpuid_leaf1_edx_mask =
~((1 << X86_FEATURE_MCE) | /* disable MCE */
(1 << X86_FEATURE_MCA) | /* disable MCA */
+ (1 << X86_FEATURE_MTRR) | /* disable MTRR */
(1 << X86_FEATURE_ACC)); /* thermal monitoring */
if (!xen_initial_domain())
@@ -836,6 +836,11 @@ static int xen_write_msr_safe(unsigned int msr, unsigned low, unsigned high)
Xen console noise. */
break;
+ case MSR_IA32_CR_PAT:
+ if (smp_processor_id() == 0)
+ xen_set_pat(((u64)high << 32) | low);
+ break;
+
default:
ret = native_write_msr_safe(msr, low, high);
}
@@ -874,8 +879,6 @@ void xen_setup_vcpu_info_placement(void)
/* xen_vcpu_setup managed to place the vcpu_info within the
percpu area for all cpus, so make use of it */
if (have_vcpu_info_placement) {
- printk(KERN_INFO "Xen: using vcpu_info placement\n");
-
pv_irq_ops.save_fl = __PV_IS_CALLEE_SAVE(xen_save_fl_direct);
pv_irq_ops.restore_fl = __PV_IS_CALLEE_SAVE(xen_restore_fl_direct);
pv_irq_ops.irq_disable = __PV_IS_CALLEE_SAVE(xen_irq_disable_direct);
@@ -1018,10 +1021,6 @@ static void xen_reboot(int reason)
{
struct sched_shutdown r = { .reason = reason };
-#ifdef CONFIG_SMP
- smp_send_stop();
-#endif
-
if (HYPERVISOR_sched_op(SCHEDOP_shutdown, &r))
BUG();
}
@@ -1092,6 +1091,8 @@ static void __init xen_setup_stackprotector(void)
/* First C function to be called on Xen boot */
asmlinkage void __init xen_start_kernel(void)
{
+ struct physdev_set_iopl set_iopl;
+ int rc;
pgd_t *pgd;
if (!xen_start_info)
@@ -1099,6 +1100,8 @@ asmlinkage void __init xen_start_kernel(void)
xen_domain_type = XEN_PV_DOMAIN;
+ xen_setup_machphys_mapping();
+
/* Install Xen paravirt ops */
pv_info = xen_info;
pv_init_ops = xen_init_ops;
@@ -1188,8 +1191,10 @@ asmlinkage void __init xen_start_kernel(void)
xen_raw_console_write("mapping kernel into physical memory\n");
pgd = xen_setup_kernel_pagetable(pgd, xen_start_info->nr_pages);
+ xen_ident_map_ISA();
- init_mm.pgd = pgd;
+ /* Allocate and initialize top and mid mfn levels for p2m structure */
+ xen_build_mfn_list_list();
/* keep using Xen gdt for now; no urgent need to change it */
@@ -1200,10 +1205,18 @@ asmlinkage void __init xen_start_kernel(void)
#else
pv_info.kernel_rpl = 0;
#endif
-
/* set the limit of our address space */
xen_reserve_top();
+ /* We used to do this in xen_arch_setup, but that is too late on AMD
+ * were early_cpu_init (run before ->arch_setup()) calls early_amd_init
+ * which pokes 0xcf8 port.
+ */
+ set_iopl.iopl = 1;
+ rc = HYPERVISOR_physdev_op(PHYSDEVOP_set_iopl, &set_iopl);
+ if (rc != 0)
+ xen_raw_printk("physdev_op failed %d\n", rc);
+
#ifdef CONFIG_X86_32
/* set up basic CPUID stuff */
cpu_detect(&new_cpu_data);
@@ -1223,6 +1236,8 @@ asmlinkage void __init xen_start_kernel(void)
add_preferred_console("xenboot", 0, NULL);
add_preferred_console("tty", 0, NULL);
add_preferred_console("hvc", 0, NULL);
+ if (pci_xen)
+ x86_init.pci.arch_init = pci_xen_init;
} else {
/* Make sure ACS will be enabled */
pci_request_acs();
diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
index f72d18c69221..44924e551fde 100644
--- a/arch/x86/xen/mmu.c
+++ b/arch/x86/xen/mmu.c
@@ -57,6 +57,7 @@
#include <asm/linkage.h>
#include <asm/page.h>
#include <asm/init.h>
+#include <asm/pat.h>
#include <asm/xen/hypercall.h>
#include <asm/xen/hypervisor.h>
@@ -140,7 +141,8 @@ static inline void check_zero(void)
* large enough to allocate page table pages to allocate the rest.
* Each page can map 2MB.
*/
-static pte_t level1_ident_pgt[PTRS_PER_PTE * 4] __page_aligned_bss;
+#define LEVEL1_IDENT_ENTRIES (PTRS_PER_PTE * 4)
+static RESERVE_BRK_ARRAY(pte_t, level1_ident_pgt, LEVEL1_IDENT_ENTRIES);
#ifdef CONFIG_X86_64
/* l3 pud for userspace vsyscall mapping */
@@ -171,49 +173,182 @@ DEFINE_PER_CPU(unsigned long, xen_current_cr3); /* actual vcpu cr3 */
*/
#define USER_LIMIT ((STACK_TOP_MAX + PGDIR_SIZE - 1) & PGDIR_MASK)
+/*
+ * Xen leaves the responsibility for maintaining p2m mappings to the
+ * guests themselves, but it must also access and update the p2m array
+ * during suspend/resume when all the pages are reallocated.
+ *
+ * The p2m table is logically a flat array, but we implement it as a
+ * three-level tree to allow the address space to be sparse.
+ *
+ * Xen
+ * |
+ * p2m_top p2m_top_mfn
+ * / \ / \
+ * p2m_mid p2m_mid p2m_mid_mfn p2m_mid_mfn
+ * / \ / \ / /
+ * p2m p2m p2m p2m p2m p2m p2m ...
+ *
+ * The p2m_mid_mfn pages are mapped by p2m_top_mfn_p.
+ *
+ * The p2m_top and p2m_top_mfn levels are limited to 1 page, so the
+ * maximum representable pseudo-physical address space is:
+ * P2M_TOP_PER_PAGE * P2M_MID_PER_PAGE * P2M_PER_PAGE pages
+ *
+ * P2M_PER_PAGE depends on the architecture, as a mfn is always
+ * unsigned long (8 bytes on 64-bit, 4 bytes on 32), leading to
+ * 512 and 1024 entries respectively.
+ */
-#define P2M_ENTRIES_PER_PAGE (PAGE_SIZE / sizeof(unsigned long))
-#define TOP_ENTRIES (MAX_DOMAIN_PAGES / P2M_ENTRIES_PER_PAGE)
+unsigned long xen_max_p2m_pfn __read_mostly;
-/* Placeholder for holes in the address space */
-static unsigned long p2m_missing[P2M_ENTRIES_PER_PAGE] __page_aligned_data =
- { [ 0 ... P2M_ENTRIES_PER_PAGE-1 ] = ~0UL };
+#define P2M_PER_PAGE (PAGE_SIZE / sizeof(unsigned long))
+#define P2M_MID_PER_PAGE (PAGE_SIZE / sizeof(unsigned long *))
+#define P2M_TOP_PER_PAGE (PAGE_SIZE / sizeof(unsigned long **))
- /* Array of pointers to pages containing p2m entries */
-static unsigned long *p2m_top[TOP_ENTRIES] __page_aligned_data =
- { [ 0 ... TOP_ENTRIES - 1] = &p2m_missing[0] };
+#define MAX_P2M_PFN (P2M_TOP_PER_PAGE * P2M_MID_PER_PAGE * P2M_PER_PAGE)
-/* Arrays of p2m arrays expressed in mfns used for save/restore */
-static unsigned long p2m_top_mfn[TOP_ENTRIES] __page_aligned_bss;
+/* Placeholders for holes in the address space */
+static RESERVE_BRK_ARRAY(unsigned long, p2m_missing, P2M_PER_PAGE);
+static RESERVE_BRK_ARRAY(unsigned long *, p2m_mid_missing, P2M_MID_PER_PAGE);
+static RESERVE_BRK_ARRAY(unsigned long, p2m_mid_missing_mfn, P2M_MID_PER_PAGE);
-static unsigned long p2m_top_mfn_list[TOP_ENTRIES / P2M_ENTRIES_PER_PAGE]
- __page_aligned_bss;
+static RESERVE_BRK_ARRAY(unsigned long **, p2m_top, P2M_TOP_PER_PAGE);
+static RESERVE_BRK_ARRAY(unsigned long, p2m_top_mfn, P2M_TOP_PER_PAGE);
+static RESERVE_BRK_ARRAY(unsigned long *, p2m_top_mfn_p, P2M_TOP_PER_PAGE);
+
+RESERVE_BRK(p2m_mid, PAGE_SIZE * (MAX_DOMAIN_PAGES / (P2M_PER_PAGE * P2M_MID_PER_PAGE)));
+RESERVE_BRK(p2m_mid_mfn, PAGE_SIZE * (MAX_DOMAIN_PAGES / (P2M_PER_PAGE * P2M_MID_PER_PAGE)));
static inline unsigned p2m_top_index(unsigned long pfn)
{
- BUG_ON(pfn >= MAX_DOMAIN_PAGES);
- return pfn / P2M_ENTRIES_PER_PAGE;
+ BUG_ON(pfn >= MAX_P2M_PFN);
+ return pfn / (P2M_MID_PER_PAGE * P2M_PER_PAGE);
+}
+
+static inline unsigned p2m_mid_index(unsigned long pfn)
+{
+ return (pfn / P2M_PER_PAGE) % P2M_MID_PER_PAGE;
}
static inline unsigned p2m_index(unsigned long pfn)
{
- return pfn % P2M_ENTRIES_PER_PAGE;
+ return pfn % P2M_PER_PAGE;
}
-/* Build the parallel p2m_top_mfn structures */
+static void p2m_top_init(unsigned long ***top)
+{
+ unsigned i;
+
+ for (i = 0; i < P2M_TOP_PER_PAGE; i++)
+ top[i] = p2m_mid_missing;
+}
+
+static void p2m_top_mfn_init(unsigned long *top)
+{
+ unsigned i;
+
+ for (i = 0; i < P2M_TOP_PER_PAGE; i++)
+ top[i] = virt_to_mfn(p2m_mid_missing_mfn);
+}
+
+static void p2m_top_mfn_p_init(unsigned long **top)
+{
+ unsigned i;
+
+ for (i = 0; i < P2M_TOP_PER_PAGE; i++)
+ top[i] = p2m_mid_missing_mfn;
+}
+
+static void p2m_mid_init(unsigned long **mid)
+{
+ unsigned i;
+
+ for (i = 0; i < P2M_MID_PER_PAGE; i++)
+ mid[i] = p2m_missing;
+}
+
+static void p2m_mid_mfn_init(unsigned long *mid)
+{
+ unsigned i;
+
+ for (i = 0; i < P2M_MID_PER_PAGE; i++)
+ mid[i] = virt_to_mfn(p2m_missing);
+}
+
+static void p2m_init(unsigned long *p2m)
+{
+ unsigned i;
+
+ for (i = 0; i < P2M_MID_PER_PAGE; i++)
+ p2m[i] = INVALID_P2M_ENTRY;
+}
+
+/*
+ * Build the parallel p2m_top_mfn and p2m_mid_mfn structures
+ *
+ * This is called both at boot time, and after resuming from suspend:
+ * - At boot time we're called very early, and must use extend_brk()
+ * to allocate memory.
+ *
+ * - After resume we're called from within stop_machine, but the mfn
+ * tree should alreay be completely allocated.
+ */
void xen_build_mfn_list_list(void)
{
- unsigned pfn, idx;
+ unsigned long pfn;
- for (pfn = 0; pfn < MAX_DOMAIN_PAGES; pfn += P2M_ENTRIES_PER_PAGE) {
- unsigned topidx = p2m_top_index(pfn);
+ /* Pre-initialize p2m_top_mfn to be completely missing */
+ if (p2m_top_mfn == NULL) {
+ p2m_mid_missing_mfn = extend_brk(PAGE_SIZE, PAGE_SIZE);
+ p2m_mid_mfn_init(p2m_mid_missing_mfn);
- p2m_top_mfn[topidx] = virt_to_mfn(p2m_top[topidx]);
+ p2m_top_mfn_p = extend_brk(PAGE_SIZE, PAGE_SIZE);
+ p2m_top_mfn_p_init(p2m_top_mfn_p);
+
+ p2m_top_mfn = extend_brk(PAGE_SIZE, PAGE_SIZE);
+ p2m_top_mfn_init(p2m_top_mfn);
+ } else {
+ /* Reinitialise, mfn's all change after migration */
+ p2m_mid_mfn_init(p2m_mid_missing_mfn);
}
- for (idx = 0; idx < ARRAY_SIZE(p2m_top_mfn_list); idx++) {
- unsigned topidx = idx * P2M_ENTRIES_PER_PAGE;
- p2m_top_mfn_list[idx] = virt_to_mfn(&p2m_top_mfn[topidx]);
+ for (pfn = 0; pfn < xen_max_p2m_pfn; pfn += P2M_PER_PAGE) {
+ unsigned topidx = p2m_top_index(pfn);
+ unsigned mididx = p2m_mid_index(pfn);
+ unsigned long **mid;
+ unsigned long *mid_mfn_p;
+
+ mid = p2m_top[topidx];
+ mid_mfn_p = p2m_top_mfn_p[topidx];
+
+ /* Don't bother allocating any mfn mid levels if
+ * they're just missing, just update the stored mfn,
+ * since all could have changed over a migrate.
+ */
+ if (mid == p2m_mid_missing) {
+ BUG_ON(mididx);
+ BUG_ON(mid_mfn_p != p2m_mid_missing_mfn);
+ p2m_top_mfn[topidx] = virt_to_mfn(p2m_mid_missing_mfn);
+ pfn += (P2M_MID_PER_PAGE - 1) * P2M_PER_PAGE;
+ continue;
+ }
+
+ if (mid_mfn_p == p2m_mid_missing_mfn) {
+ /*
+ * XXX boot-time only! We should never find
+ * missing parts of the mfn tree after
+ * runtime. extend_brk() will BUG if we call
+ * it too late.
+ */
+ mid_mfn_p = extend_brk(PAGE_SIZE, PAGE_SIZE);
+ p2m_mid_mfn_init(mid_mfn_p);
+
+ p2m_top_mfn_p[topidx] = mid_mfn_p;
+ }
+
+ p2m_top_mfn[topidx] = virt_to_mfn(mid_mfn_p);
+ mid_mfn_p[mididx] = virt_to_mfn(mid[mididx]);
}
}
@@ -222,8 +357,8 @@ void xen_setup_mfn_list_list(void)
BUG_ON(HYPERVISOR_shared_info == &xen_dummy_shared_info);
HYPERVISOR_shared_info->arch.pfn_to_mfn_frame_list_list =
- virt_to_mfn(p2m_top_mfn_list);
- HYPERVISOR_shared_info->arch.max_pfn = xen_start_info->nr_pages;
+ virt_to_mfn(p2m_top_mfn);
+ HYPERVISOR_shared_info->arch.max_pfn = xen_max_p2m_pfn;
}
/* Set up p2m_top to point to the domain-builder provided p2m pages */
@@ -231,98 +366,176 @@ void __init xen_build_dynamic_phys_to_machine(void)
{
unsigned long *mfn_list = (unsigned long *)xen_start_info->mfn_list;
unsigned long max_pfn = min(MAX_DOMAIN_PAGES, xen_start_info->nr_pages);
- unsigned pfn;
+ unsigned long pfn;
+
+ xen_max_p2m_pfn = max_pfn;
- for (pfn = 0; pfn < max_pfn; pfn += P2M_ENTRIES_PER_PAGE) {
+ p2m_missing = extend_brk(PAGE_SIZE, PAGE_SIZE);
+ p2m_init(p2m_missing);
+
+ p2m_mid_missing = extend_brk(PAGE_SIZE, PAGE_SIZE);
+ p2m_mid_init(p2m_mid_missing);
+
+ p2m_top = extend_brk(PAGE_SIZE, PAGE_SIZE);
+ p2m_top_init(p2m_top);
+
+ /*
+ * The domain builder gives us a pre-constructed p2m array in
+ * mfn_list for all the pages initially given to us, so we just
+ * need to graft that into our tree structure.
+ */
+ for (pfn = 0; pfn < max_pfn; pfn += P2M_PER_PAGE) {
unsigned topidx = p2m_top_index(pfn);
+ unsigned mididx = p2m_mid_index(pfn);
- p2m_top[topidx] = &mfn_list[pfn];
- }
+ if (p2m_top[topidx] == p2m_mid_missing) {
+ unsigned long **mid = extend_brk(PAGE_SIZE, PAGE_SIZE);
+ p2m_mid_init(mid);
- xen_build_mfn_list_list();
+ p2m_top[topidx] = mid;
+ }
+
+ p2m_top[topidx][mididx] = &mfn_list[pfn];
+ }
}
unsigned long get_phys_to_machine(unsigned long pfn)
{
- unsigned topidx, idx;
+ unsigned topidx, mididx, idx;
- if (unlikely(pfn >= MAX_DOMAIN_PAGES))
+ if (unlikely(pfn >= MAX_P2M_PFN))
return INVALID_P2M_ENTRY;
topidx = p2m_top_index(pfn);
+ mididx = p2m_mid_index(pfn);
idx = p2m_index(pfn);
- return p2m_top[topidx][idx];
+
+ return p2m_top[topidx][mididx][idx];
}
EXPORT_SYMBOL_GPL(get_phys_to_machine);
-/* install a new p2m_top page */
-bool install_p2mtop_page(unsigned long pfn, unsigned long *p)
+static void *alloc_p2m_page(void)
{
- unsigned topidx = p2m_top_index(pfn);
- unsigned long **pfnp, *mfnp;
- unsigned i;
+ return (void *)__get_free_page(GFP_KERNEL | __GFP_REPEAT);
+}
- pfnp = &p2m_top[topidx];
- mfnp = &p2m_top_mfn[topidx];
+static void free_p2m_page(void *p)
+{
+ free_page((unsigned long)p);
+}
+
+/*
+ * Fully allocate the p2m structure for a given pfn. We need to check
+ * that both the top and mid levels are allocated, and make sure the
+ * parallel mfn tree is kept in sync. We may race with other cpus, so
+ * the new pages are installed with cmpxchg; if we lose the race then
+ * simply free the page we allocated and use the one that's there.
+ */
+static bool alloc_p2m(unsigned long pfn)
+{
+ unsigned topidx, mididx;
+ unsigned long ***top_p, **mid;
+ unsigned long *top_mfn_p, *mid_mfn;
- for (i = 0; i < P2M_ENTRIES_PER_PAGE; i++)
- p[i] = INVALID_P2M_ENTRY;
+ topidx = p2m_top_index(pfn);
+ mididx = p2m_mid_index(pfn);
- if (cmpxchg(pfnp, p2m_missing, p) == p2m_missing) {
- *mfnp = virt_to_mfn(p);
- return true;
+ top_p = &p2m_top[topidx];
+ mid = *top_p;
+
+ if (mid == p2m_mid_missing) {
+ /* Mid level is missing, allocate a new one */
+ mid = alloc_p2m_page();
+ if (!mid)
+ return false;
+
+ p2m_mid_init(mid);
+
+ if (cmpxchg(top_p, p2m_mid_missing, mid) != p2m_mid_missing)
+ free_p2m_page(mid);
}
- return false;
-}
+ top_mfn_p = &p2m_top_mfn[topidx];
+ mid_mfn = p2m_top_mfn_p[topidx];
-static void alloc_p2m(unsigned long pfn)
-{
- unsigned long *p;
+ BUG_ON(virt_to_mfn(mid_mfn) != *top_mfn_p);
+
+ if (mid_mfn == p2m_mid_missing_mfn) {
+ /* Separately check the mid mfn level */
+ unsigned long missing_mfn;
+ unsigned long mid_mfn_mfn;
+
+ mid_mfn = alloc_p2m_page();
+ if (!mid_mfn)
+ return false;
+
+ p2m_mid_mfn_init(mid_mfn);
+
+ missing_mfn = virt_to_mfn(p2m_mid_missing_mfn);
+ mid_mfn_mfn = virt_to_mfn(mid_mfn);
+ if (cmpxchg(top_mfn_p, missing_mfn, mid_mfn_mfn) != missing_mfn)
+ free_p2m_page(mid_mfn);
+ else
+ p2m_top_mfn_p[topidx] = mid_mfn;
+ }
+
+ if (p2m_top[topidx][mididx] == p2m_missing) {
+ /* p2m leaf page is missing */
+ unsigned long *p2m;
+
+ p2m = alloc_p2m_page();
+ if (!p2m)
+ return false;
- p = (void *)__get_free_page(GFP_KERNEL | __GFP_NOFAIL);
- BUG_ON(p == NULL);
+ p2m_init(p2m);
- if (!install_p2mtop_page(pfn, p))
- free_page((unsigned long)p);
+ if (cmpxchg(&mid[mididx], p2m_missing, p2m) != p2m_missing)
+ free_p2m_page(p2m);
+ else
+ mid_mfn[mididx] = virt_to_mfn(p2m);
+ }
+
+ return true;
}
/* Try to install p2m mapping; fail if intermediate bits missing */
bool __set_phys_to_machine(unsigned long pfn, unsigned long mfn)
{
- unsigned topidx, idx;
+ unsigned topidx, mididx, idx;
- if (unlikely(pfn >= MAX_DOMAIN_PAGES)) {
+ if (unlikely(pfn >= MAX_P2M_PFN)) {
BUG_ON(mfn != INVALID_P2M_ENTRY);
return true;
}
topidx = p2m_top_index(pfn);
- if (p2m_top[topidx] == p2m_missing) {
- if (mfn == INVALID_P2M_ENTRY)
- return true;
- return false;
- }
-
+ mididx = p2m_mid_index(pfn);
idx = p2m_index(pfn);
- p2m_top[topidx][idx] = mfn;
+
+ if (p2m_top[topidx][mididx] == p2m_missing)
+ return mfn == INVALID_P2M_ENTRY;
+
+ p2m_top[topidx][mididx][idx] = mfn;
return true;
}
-void set_phys_to_machine(unsigned long pfn, unsigned long mfn)
+bool set_phys_to_machine(unsigned long pfn, unsigned long mfn)
{
if (unlikely(xen_feature(XENFEAT_auto_translated_physmap))) {
BUG_ON(pfn != mfn && mfn != INVALID_P2M_ENTRY);
- return;
+ return true;
}
if (unlikely(!__set_phys_to_machine(pfn, mfn))) {
- alloc_p2m(pfn);
+ if (!alloc_p2m(pfn))
+ return false;
if (!__set_phys_to_machine(pfn, mfn))
- BUG();
+ return false;
}
+
+ return true;
}
unsigned long arbitrary_virt_to_mfn(void *vaddr)
@@ -399,7 +612,7 @@ static bool xen_iomap_pte(pte_t pte)
return pte_flags(pte) & _PAGE_IOMAP;
}
-static void xen_set_iomap_pte(pte_t *ptep, pte_t pteval)
+void xen_set_domain_pte(pte_t *ptep, pte_t pteval, unsigned domid)
{
struct multicall_space mcs;
struct mmu_update *u;
@@ -411,10 +624,16 @@ static void xen_set_iomap_pte(pte_t *ptep, pte_t pteval)
u->ptr = arbitrary_virt_to_machine(ptep).maddr;
u->val = pte_val_ma(pteval);
- MULTI_mmu_update(mcs.mc, mcs.args, 1, NULL, DOMID_IO);
+ MULTI_mmu_update(mcs.mc, mcs.args, 1, NULL, domid);
xen_mc_issue(PARAVIRT_LAZY_MMU);
}
+EXPORT_SYMBOL_GPL(xen_set_domain_pte);
+
+static void xen_set_iomap_pte(pte_t *ptep, pte_t pteval)
+{
+ xen_set_domain_pte(ptep, pteval, DOMID_IO);
+}
static void xen_extend_mmu_update(const struct mmu_update *update)
{
@@ -561,7 +780,20 @@ static pteval_t pte_pfn_to_mfn(pteval_t val)
if (val & _PAGE_PRESENT) {
unsigned long pfn = (val & PTE_PFN_MASK) >> PAGE_SHIFT;
pteval_t flags = val & PTE_FLAGS_MASK;
- val = ((pteval_t)pfn_to_mfn(pfn) << PAGE_SHIFT) | flags;
+ unsigned long mfn = pfn_to_mfn(pfn);
+
+ /*
+ * If there's no mfn for the pfn, then just create an
+ * empty non-present pte. Unfortunately this loses
+ * information about the original pfn, so
+ * pte_mfn_to_pfn is asymmetric.
+ */
+ if (unlikely(mfn == INVALID_P2M_ENTRY)) {
+ mfn = 0;
+ flags = 0;
+ }
+
+ val = ((pteval_t)mfn << PAGE_SHIFT) | flags;
}
return val;
@@ -583,10 +815,18 @@ static pteval_t iomap_pte(pteval_t val)
pteval_t xen_pte_val(pte_t pte)
{
- if (xen_initial_domain() && (pte.pte & _PAGE_IOMAP))
- return pte.pte;
+ pteval_t pteval = pte.pte;
+
+ /* If this is a WC pte, convert back from Xen WC to Linux WC */
+ if ((pteval & (_PAGE_PAT | _PAGE_PCD | _PAGE_PWT)) == _PAGE_PAT) {
+ WARN_ON(!pat_enabled);
+ pteval = (pteval & ~_PAGE_PAT) | _PAGE_PWT;
+ }
- return pte_mfn_to_pfn(pte.pte);
+ if (xen_initial_domain() && (pteval & _PAGE_IOMAP))
+ return pteval;
+
+ return pte_mfn_to_pfn(pteval);
}
PV_CALLEE_SAVE_REGS_THUNK(xen_pte_val);
@@ -596,10 +836,48 @@ pgdval_t xen_pgd_val(pgd_t pgd)
}
PV_CALLEE_SAVE_REGS_THUNK(xen_pgd_val);
+/*
+ * Xen's PAT setup is part of its ABI, though I assume entries 6 & 7
+ * are reserved for now, to correspond to the Intel-reserved PAT
+ * types.
+ *
+ * We expect Linux's PAT set as follows:
+ *
+ * Idx PTE flags Linux Xen Default
+ * 0 WB WB WB
+ * 1 PWT WC WT WT
+ * 2 PCD UC- UC- UC-
+ * 3 PCD PWT UC UC UC
+ * 4 PAT WB WC WB
+ * 5 PAT PWT WC WP WT
+ * 6 PAT PCD UC- UC UC-
+ * 7 PAT PCD PWT UC UC UC
+ */
+
+void xen_set_pat(u64 pat)
+{
+ /* We expect Linux to use a PAT setting of
+ * UC UC- WC WB (ignoring the PAT flag) */
+ WARN_ON(pat != 0x0007010600070106ull);
+}
+
pte_t xen_make_pte(pteval_t pte)
{
phys_addr_t addr = (pte & PTE_PFN_MASK);
+ /* If Linux is trying to set a WC pte, then map to the Xen WC.
+ * If _PAGE_PAT is set, then it probably means it is really
+ * _PAGE_PSE, so avoid fiddling with the PAT mapping and hope
+ * things work out OK...
+ *
+ * (We should never see kernel mappings with _PAGE_PSE set,
+ * but we could see hugetlbfs mappings, I think.).
+ */
+ if (pat_enabled && !WARN_ON(pte & _PAGE_PAT)) {
+ if ((pte & (_PAGE_PCD | _PAGE_PWT)) == _PAGE_PWT)
+ pte = (pte & ~(_PAGE_PCD | _PAGE_PWT)) | _PAGE_PAT;
+ }
+
/*
* Unprivileged domains are allowed to do IOMAPpings for
* PCI passthrough, but not map ISA space. The ISA
@@ -1697,6 +1975,7 @@ static void *m2v(phys_addr_t maddr)
return __ka(m2p(maddr));
}
+/* Set the page permissions on an identity-mapped pages */
static void set_page_prot(void *addr, pgprot_t prot)
{
unsigned long pfn = __pa(addr) >> PAGE_SHIFT;
@@ -1712,6 +1991,9 @@ static __init void xen_map_identity_early(pmd_t *pmd, unsigned long max_pfn)
unsigned ident_pte;
unsigned long pfn;
+ level1_ident_pgt = extend_brk(sizeof(pte_t) * LEVEL1_IDENT_ENTRIES,
+ PAGE_SIZE);
+
ident_pte = 0;
pfn = 0;
for (pmdidx = 0; pmdidx < PTRS_PER_PMD && pfn < max_pfn; pmdidx++) {
@@ -1722,7 +2004,7 @@ static __init void xen_map_identity_early(pmd_t *pmd, unsigned long max_pfn)
pte_page = m2v(pmd[pmdidx].pmd);
else {
/* Check for free pte pages */
- if (ident_pte == ARRAY_SIZE(level1_ident_pgt))
+ if (ident_pte == LEVEL1_IDENT_ENTRIES)
break;
pte_page = &level1_ident_pgt[ident_pte];
@@ -1752,6 +2034,20 @@ static __init void xen_map_identity_early(pmd_t *pmd, unsigned long max_pfn)
set_page_prot(pmd, PAGE_KERNEL_RO);
}
+void __init xen_setup_machphys_mapping(void)
+{
+ struct xen_machphys_mapping mapping;
+ unsigned long machine_to_phys_nr_ents;
+
+ if (HYPERVISOR_memory_op(XENMEM_machphys_mapping, &mapping) == 0) {
+ machine_to_phys_mapping = (unsigned long *)mapping.v_start;
+ machine_to_phys_nr_ents = mapping.max_mfn + 1;
+ } else {
+ machine_to_phys_nr_ents = MACH2PHYS_NR_ENTRIES;
+ }
+ machine_to_phys_order = fls(machine_to_phys_nr_ents - 1);
+}
+
#ifdef CONFIG_X86_64
static void convert_pfn_mfn(void *v)
{
@@ -1837,45 +2133,88 @@ __init pgd_t *xen_setup_kernel_pagetable(pgd_t *pgd,
return pgd;
}
#else /* !CONFIG_X86_64 */
-static pmd_t level2_kernel_pgt[PTRS_PER_PMD] __page_aligned_bss;
+static RESERVE_BRK_ARRAY(pmd_t, initial_kernel_pmd, PTRS_PER_PMD);
+static RESERVE_BRK_ARRAY(pmd_t, swapper_kernel_pmd, PTRS_PER_PMD);
+
+static __init void xen_write_cr3_init(unsigned long cr3)
+{
+ unsigned long pfn = PFN_DOWN(__pa(swapper_pg_dir));
+
+ BUG_ON(read_cr3() != __pa(initial_page_table));
+ BUG_ON(cr3 != __pa(swapper_pg_dir));
+
+ /*
+ * We are switching to swapper_pg_dir for the first time (from
+ * initial_page_table) and therefore need to mark that page
+ * read-only and then pin it.
+ *
+ * Xen disallows sharing of kernel PMDs for PAE
+ * guests. Therefore we must copy the kernel PMD from
+ * initial_page_table into a new kernel PMD to be used in
+ * swapper_pg_dir.
+ */
+ swapper_kernel_pmd =
+ extend_brk(sizeof(pmd_t) * PTRS_PER_PMD, PAGE_SIZE);
+ memcpy(swapper_kernel_pmd, initial_kernel_pmd,
+ sizeof(pmd_t) * PTRS_PER_PMD);
+ swapper_pg_dir[KERNEL_PGD_BOUNDARY] =
+ __pgd(__pa(swapper_kernel_pmd) | _PAGE_PRESENT);
+ set_page_prot(swapper_kernel_pmd, PAGE_KERNEL_RO);
+
+ set_page_prot(swapper_pg_dir, PAGE_KERNEL_RO);
+ xen_write_cr3(cr3);
+ pin_pagetable_pfn(MMUEXT_PIN_L3_TABLE, pfn);
+
+ pin_pagetable_pfn(MMUEXT_UNPIN_TABLE,
+ PFN_DOWN(__pa(initial_page_table)));
+ set_page_prot(initial_page_table, PAGE_KERNEL);
+ set_page_prot(initial_kernel_pmd, PAGE_KERNEL);
+
+ pv_mmu_ops.write_cr3 = &xen_write_cr3;
+}
__init pgd_t *xen_setup_kernel_pagetable(pgd_t *pgd,
unsigned long max_pfn)
{
pmd_t *kernel_pmd;
+ initial_kernel_pmd =
+ extend_brk(sizeof(pmd_t) * PTRS_PER_PMD, PAGE_SIZE);
+
max_pfn_mapped = PFN_DOWN(__pa(xen_start_info->pt_base) +
xen_start_info->nr_pt_frames * PAGE_SIZE +
512*1024);
kernel_pmd = m2v(pgd[KERNEL_PGD_BOUNDARY].pgd);
- memcpy(level2_kernel_pgt, kernel_pmd, sizeof(pmd_t) * PTRS_PER_PMD);
+ memcpy(initial_kernel_pmd, kernel_pmd, sizeof(pmd_t) * PTRS_PER_PMD);
- xen_map_identity_early(level2_kernel_pgt, max_pfn);
+ xen_map_identity_early(initial_kernel_pmd, max_pfn);
- memcpy(swapper_pg_dir, pgd, sizeof(pgd_t) * PTRS_PER_PGD);
- set_pgd(&swapper_pg_dir[KERNEL_PGD_BOUNDARY],
- __pgd(__pa(level2_kernel_pgt) | _PAGE_PRESENT));
+ memcpy(initial_page_table, pgd, sizeof(pgd_t) * PTRS_PER_PGD);
+ initial_page_table[KERNEL_PGD_BOUNDARY] =
+ __pgd(__pa(initial_kernel_pmd) | _PAGE_PRESENT);
- set_page_prot(level2_kernel_pgt, PAGE_KERNEL_RO);
- set_page_prot(swapper_pg_dir, PAGE_KERNEL_RO);
+ set_page_prot(initial_kernel_pmd, PAGE_KERNEL_RO);
+ set_page_prot(initial_page_table, PAGE_KERNEL_RO);
set_page_prot(empty_zero_page, PAGE_KERNEL_RO);
pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, PFN_DOWN(__pa(pgd)));
- xen_write_cr3(__pa(swapper_pg_dir));
-
- pin_pagetable_pfn(MMUEXT_PIN_L3_TABLE, PFN_DOWN(__pa(swapper_pg_dir)));
+ pin_pagetable_pfn(MMUEXT_PIN_L3_TABLE,
+ PFN_DOWN(__pa(initial_page_table)));
+ xen_write_cr3(__pa(initial_page_table));
memblock_x86_reserve_range(__pa(xen_start_info->pt_base),
__pa(xen_start_info->pt_base +
xen_start_info->nr_pt_frames * PAGE_SIZE),
"XEN PAGETABLES");
- return swapper_pg_dir;
+ return initial_page_table;
}
#endif /* CONFIG_X86_64 */
+static unsigned char dummy_mapping[PAGE_SIZE] __page_aligned_bss;
+
static void xen_set_fixmap(unsigned idx, phys_addr_t phys, pgprot_t prot)
{
pte_t pte;
@@ -1896,15 +2235,28 @@ static void xen_set_fixmap(unsigned idx, phys_addr_t phys, pgprot_t prot)
#else
case VSYSCALL_LAST_PAGE ... VSYSCALL_FIRST_PAGE:
#endif
-#ifdef CONFIG_X86_LOCAL_APIC
- case FIX_APIC_BASE: /* maps dummy local APIC */
-#endif
case FIX_TEXT_POKE0:
case FIX_TEXT_POKE1:
/* All local page mappings */
pte = pfn_pte(phys, prot);
break;
+#ifdef CONFIG_X86_LOCAL_APIC
+ case FIX_APIC_BASE: /* maps dummy local APIC */
+ pte = pfn_pte(PFN_DOWN(__pa(dummy_mapping)), PAGE_KERNEL);
+ break;
+#endif
+
+#ifdef CONFIG_X86_IO_APIC
+ case FIX_IO_APIC_BASE_0 ... FIX_IO_APIC_BASE_END:
+ /*
+ * We just don't map the IO APIC - all access is via
+ * hypercalls. Keep the address in the pte for reference.
+ */
+ pte = pfn_pte(PFN_DOWN(__pa(dummy_mapping)), PAGE_KERNEL);
+ break;
+#endif
+
case FIX_PARAVIRT_BOOTMAP:
/* This is an MFN, but it isn't an IO mapping from the
IO domain */
@@ -1929,6 +2281,29 @@ static void xen_set_fixmap(unsigned idx, phys_addr_t phys, pgprot_t prot)
#endif
}
+__init void xen_ident_map_ISA(void)
+{
+ unsigned long pa;
+
+ /*
+ * If we're dom0, then linear map the ISA machine addresses into
+ * the kernel's address space.
+ */
+ if (!xen_initial_domain())
+ return;
+
+ xen_raw_printk("Xen: setup ISA identity maps\n");
+
+ for (pa = ISA_START_ADDRESS; pa < ISA_END_ADDRESS; pa += PAGE_SIZE) {
+ pte_t pte = mfn_pte(PFN_DOWN(pa), PAGE_KERNEL_IO);
+
+ if (HYPERVISOR_update_va_mapping(PAGE_OFFSET + pa, pte, 0))
+ BUG();
+ }
+
+ xen_flush_tlb();
+}
+
static __init void xen_post_allocator_init(void)
{
pv_mmu_ops.set_pte = xen_set_pte;
@@ -1968,7 +2343,11 @@ static const struct pv_mmu_ops xen_mmu_ops __initdata = {
.write_cr2 = xen_write_cr2,
.read_cr3 = xen_read_cr3,
+#ifdef CONFIG_X86_32
+ .write_cr3 = xen_write_cr3_init,
+#else
.write_cr3 = xen_write_cr3,
+#endif
.flush_tlb_user = xen_flush_tlb,
.flush_tlb_kernel = xen_flush_tlb,
@@ -2036,7 +2415,7 @@ void __init xen_init_mmu_ops(void)
x86_init.paging.pagetable_setup_done = xen_pagetable_setup_done;
pv_mmu_ops = xen_mmu_ops;
- vmap_lazy_unmap = false;
+ memset(dummy_mapping, 0xff, PAGE_SIZE);
}
/* Protected by xen_reservation_lock. */
@@ -2269,6 +2648,73 @@ void __init xen_hvm_init_mmu_ops(void)
}
#endif
+#define REMAP_BATCH_SIZE 16
+
+struct remap_data {
+ unsigned long mfn;
+ pgprot_t prot;
+ struct mmu_update *mmu_update;
+};
+
+static int remap_area_mfn_pte_fn(pte_t *ptep, pgtable_t token,
+ unsigned long addr, void *data)
+{
+ struct remap_data *rmd = data;
+ pte_t pte = pte_mkspecial(pfn_pte(rmd->mfn++, rmd->prot));
+
+ rmd->mmu_update->ptr = arbitrary_virt_to_machine(ptep).maddr;
+ rmd->mmu_update->val = pte_val_ma(pte);
+ rmd->mmu_update++;
+
+ return 0;
+}
+
+int xen_remap_domain_mfn_range(struct vm_area_struct *vma,
+ unsigned long addr,
+ unsigned long mfn, int nr,
+ pgprot_t prot, unsigned domid)
+{
+ struct remap_data rmd;
+ struct mmu_update mmu_update[REMAP_BATCH_SIZE];
+ int batch;
+ unsigned long range;
+ int err = 0;
+
+ prot = __pgprot(pgprot_val(prot) | _PAGE_IOMAP);
+
+ BUG_ON(!((vma->vm_flags & (VM_PFNMAP | VM_RESERVED | VM_IO)) ==
+ (VM_PFNMAP | VM_RESERVED | VM_IO)));
+
+ rmd.mfn = mfn;
+ rmd.prot = prot;
+
+ while (nr) {
+ batch = min(REMAP_BATCH_SIZE, nr);
+ range = (unsigned long)batch << PAGE_SHIFT;
+
+ rmd.mmu_update = mmu_update;
+ err = apply_to_page_range(vma->vm_mm, addr, range,
+ remap_area_mfn_pte_fn, &rmd);
+ if (err)
+ goto out;
+
+ err = -EFAULT;
+ if (HYPERVISOR_mmu_update(mmu_update, batch, NULL, domid) < 0)
+ goto out;
+
+ nr -= batch;
+ addr += range;
+ }
+
+ err = 0;
+out:
+
+ flush_tlb_all();
+
+ return err;
+}
+EXPORT_SYMBOL_GPL(xen_remap_domain_mfn_range);
+
#ifdef CONFIG_XEN_DEBUG_FS
static struct dentry *d_mmu_debug;
diff --git a/arch/x86/xen/mmu.h b/arch/x86/xen/mmu.h
index fa938c4aa2f7..537bb9aab777 100644
--- a/arch/x86/xen/mmu.h
+++ b/arch/x86/xen/mmu.h
@@ -12,7 +12,6 @@ enum pt_level {
bool __set_phys_to_machine(unsigned long pfn, unsigned long mfn);
-bool install_p2mtop_page(unsigned long pfn, unsigned long *p);
void set_pte_mfn(unsigned long vaddr, unsigned long pfn, pgprot_t flags);
diff --git a/arch/x86/xen/pci-swiotlb-xen.c b/arch/x86/xen/pci-swiotlb-xen.c
index 22471001b74c..bfd0632fe65e 100644
--- a/arch/x86/xen/pci-swiotlb-xen.c
+++ b/arch/x86/xen/pci-swiotlb-xen.c
@@ -1,6 +1,7 @@
/* Glue code to lib/swiotlb-xen.c */
#include <linux/dma-mapping.h>
+#include <linux/pci.h>
#include <xen/swiotlb-xen.h>
#include <asm/xen/hypervisor.h>
@@ -55,6 +56,9 @@ void __init pci_xen_swiotlb_init(void)
if (xen_swiotlb) {
xen_swiotlb_init(1);
dma_ops = &xen_swiotlb_dma_ops;
+
+ /* Make sure ACS will be enabled */
+ pci_request_acs();
}
}
IOMMU_INIT_FINISH(pci_xen_swiotlb_detect,
diff --git a/arch/x86/xen/platform-pci-unplug.c b/arch/x86/xen/platform-pci-unplug.c
index 0f456386cce5..25c52f94a27c 100644
--- a/arch/x86/xen/platform-pci-unplug.c
+++ b/arch/x86/xen/platform-pci-unplug.c
@@ -68,7 +68,7 @@ static int __init check_platform_magic(void)
return 0;
}
-void __init xen_unplug_emulated_devices(void)
+void xen_unplug_emulated_devices(void)
{
int r;
diff --git a/arch/x86/xen/setup.c b/arch/x86/xen/setup.c
index 9729c903404b..b5a7f928234b 100644
--- a/arch/x86/xen/setup.c
+++ b/arch/x86/xen/setup.c
@@ -18,10 +18,11 @@
#include <asm/xen/hypervisor.h>
#include <asm/xen/hypercall.h>
+#include <xen/xen.h>
#include <xen/page.h>
#include <xen/interface/callback.h>
-#include <xen/interface/physdev.h>
#include <xen/interface/memory.h>
+#include <xen/interface/physdev.h>
#include <xen/features.h>
#include "xen-ops.h"
@@ -34,6 +35,39 @@ extern void xen_sysenter_target(void);
extern void xen_syscall_target(void);
extern void xen_syscall32_target(void);
+/* Amount of extra memory space we add to the e820 ranges */
+phys_addr_t xen_extra_mem_start, xen_extra_mem_size;
+
+/*
+ * The maximum amount of extra memory compared to the base size. The
+ * main scaling factor is the size of struct page. At extreme ratios
+ * of base:extra, all the base memory can be filled with page
+ * structures for the extra memory, leaving no space for anything
+ * else.
+ *
+ * 10x seems like a reasonable balance between scaling flexibility and
+ * leaving a practically usable system.
+ */
+#define EXTRA_MEM_RATIO (10)
+
+static __init void xen_add_extra_mem(unsigned long pages)
+{
+ u64 size = (u64)pages * PAGE_SIZE;
+ u64 extra_start = xen_extra_mem_start + xen_extra_mem_size;
+
+ if (!pages)
+ return;
+
+ e820_add_region(extra_start, size, E820_RAM);
+ sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map);
+
+ memblock_x86_reserve_range(extra_start, extra_start + size, "XEN EXTRA");
+
+ xen_extra_mem_size += size;
+
+ xen_max_p2m_pfn = PFN_DOWN(extra_start + size);
+}
+
static unsigned long __init xen_release_chunk(phys_addr_t start_addr,
phys_addr_t end_addr)
{
@@ -83,16 +117,18 @@ static unsigned long __init xen_return_unused_memory(unsigned long max_pfn,
const struct e820map *e820)
{
phys_addr_t max_addr = PFN_PHYS(max_pfn);
- phys_addr_t last_end = 0;
+ phys_addr_t last_end = ISA_END_ADDRESS;
unsigned long released = 0;
int i;
+ /* Free any unused memory above the low 1Mbyte. */
for (i = 0; i < e820->nr_map && last_end < max_addr; i++) {
phys_addr_t end = e820->map[i].addr;
end = min(max_addr, end);
- released += xen_release_chunk(last_end, end);
- last_end = e820->map[i].addr + e820->map[i].size;
+ if (last_end < end)
+ released += xen_release_chunk(last_end, end);
+ last_end = max(last_end, e820->map[i].addr + e820->map[i].size);
}
if (last_end < max_addr)
@@ -105,21 +141,72 @@ static unsigned long __init xen_return_unused_memory(unsigned long max_pfn,
/**
* machine_specific_memory_setup - Hook for machine specific memory setup.
**/
-
char * __init xen_memory_setup(void)
{
+ static struct e820entry map[E820MAX] __initdata;
+
unsigned long max_pfn = xen_start_info->nr_pages;
+ unsigned long long mem_end;
+ int rc;
+ struct xen_memory_map memmap;
+ unsigned long extra_pages = 0;
+ unsigned long extra_limit;
+ int i;
+ int op;
max_pfn = min(MAX_DOMAIN_PAGES, max_pfn);
+ mem_end = PFN_PHYS(max_pfn);
+
+ memmap.nr_entries = E820MAX;
+ set_xen_guest_handle(memmap.buffer, map);
+
+ op = xen_initial_domain() ?
+ XENMEM_machine_memory_map :
+ XENMEM_memory_map;
+ rc = HYPERVISOR_memory_op(op, &memmap);
+ if (rc == -ENOSYS) {
+ BUG_ON(xen_initial_domain());
+ memmap.nr_entries = 1;
+ map[0].addr = 0ULL;
+ map[0].size = mem_end;
+ /* 8MB slack (to balance backend allocations). */
+ map[0].size += 8ULL << 20;
+ map[0].type = E820_RAM;
+ rc = 0;
+ }
+ BUG_ON(rc);
e820.nr_map = 0;
+ xen_extra_mem_start = mem_end;
+ for (i = 0; i < memmap.nr_entries; i++) {
+ unsigned long long end = map[i].addr + map[i].size;
+
+ if (map[i].type == E820_RAM && end > mem_end) {
+ /* RAM off the end - may be partially included */
+ u64 delta = min(map[i].size, end - mem_end);
- e820_add_region(0, PFN_PHYS((u64)max_pfn), E820_RAM);
+ map[i].size -= delta;
+ end -= delta;
+
+ extra_pages += PFN_DOWN(delta);
+ }
+
+ if (map[i].size > 0 && end > xen_extra_mem_start)
+ xen_extra_mem_start = end;
+
+ /* Add region if any remains */
+ if (map[i].size > 0)
+ e820_add_region(map[i].addr, map[i].size, map[i].type);
+ }
/*
- * Even though this is normal, usable memory under Xen, reserve
- * ISA memory anyway because too many things think they can poke
+ * In domU, the ISA region is normal, usable memory, but we
+ * reserve ISA memory anyway because too many things poke
* about in there.
+ *
+ * In Dom0, the host E820 information can leave gaps in the
+ * ISA range, which would cause us to release those pages. To
+ * avoid this, we unconditionally reserve them here.
*/
e820_add_region(ISA_START_ADDRESS, ISA_END_ADDRESS - ISA_START_ADDRESS,
E820_RESERVED);
@@ -136,23 +223,30 @@ char * __init xen_memory_setup(void)
sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map);
- xen_return_unused_memory(xen_start_info->nr_pages, &e820);
+ extra_pages += xen_return_unused_memory(xen_start_info->nr_pages, &e820);
- return "Xen";
-}
+ /*
+ * Clamp the amount of extra memory to a EXTRA_MEM_RATIO
+ * factor the base size. On non-highmem systems, the base
+ * size is the full initial memory allocation; on highmem it
+ * is limited to the max size of lowmem, so that it doesn't
+ * get completely filled.
+ *
+ * In principle there could be a problem in lowmem systems if
+ * the initial memory is also very large with respect to
+ * lowmem, but we won't try to deal with that here.
+ */
+ extra_limit = min(EXTRA_MEM_RATIO * min(max_pfn, PFN_DOWN(MAXMEM)),
+ max_pfn + extra_pages);
-static void xen_idle(void)
-{
- local_irq_disable();
-
- if (need_resched())
- local_irq_enable();
- else {
- current_thread_info()->status &= ~TS_POLLING;
- smp_mb__after_clear_bit();
- safe_halt();
- current_thread_info()->status |= TS_POLLING;
- }
+ if (extra_limit >= max_pfn)
+ extra_pages = extra_limit - max_pfn;
+ else
+ extra_pages = 0;
+
+ xen_add_extra_mem(extra_pages);
+
+ return "Xen";
}
/*
@@ -224,9 +318,6 @@ void __cpuinit xen_enable_syscall(void)
void __init xen_arch_setup(void)
{
- struct physdev_set_iopl set_iopl;
- int rc;
-
xen_panic_handler_init();
HYPERVISOR_vm_assist(VMASST_CMD_enable, VMASST_TYPE_4gb_segments);
@@ -243,11 +334,6 @@ void __init xen_arch_setup(void)
xen_enable_sysenter();
xen_enable_syscall();
- set_iopl.iopl = 1;
- rc = HYPERVISOR_physdev_op(PHYSDEVOP_set_iopl, &set_iopl);
- if (rc != 0)
- printk(KERN_INFO "physdev_op failed %d\n", rc);
-
#ifdef CONFIG_ACPI
if (!(xen_start_info->flags & SIF_INITDOMAIN)) {
printk(KERN_INFO "ACPI in unprivileged domain disabled\n");
@@ -259,9 +345,11 @@ void __init xen_arch_setup(void)
MAX_GUEST_CMDLINE > COMMAND_LINE_SIZE ?
COMMAND_LINE_SIZE : MAX_GUEST_CMDLINE);
- pm_idle = xen_idle;
-
- paravirt_disable_iospace();
+ /* Set up idle, making sure it calls safe_halt() pvop */
+#ifdef CONFIG_X86_32
+ boot_cpu_data.hlt_works_ok = 1;
+#endif
+ pm_idle = default_idle;
fiddle_vdso();
}
diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c
index 25f232b18a82..72a4c7959045 100644
--- a/arch/x86/xen/smp.c
+++ b/arch/x86/xen/smp.c
@@ -28,6 +28,7 @@
#include <asm/xen/interface.h>
#include <asm/xen/hypercall.h>
+#include <xen/xen.h>
#include <xen/page.h>
#include <xen/events.h>
@@ -156,11 +157,35 @@ static void __init xen_fill_possible_map(void)
{
int i, rc;
+ if (xen_initial_domain())
+ return;
+
+ for (i = 0; i < nr_cpu_ids; i++) {
+ rc = HYPERVISOR_vcpu_op(VCPUOP_is_up, i, NULL);
+ if (rc >= 0) {
+ num_processors++;
+ set_cpu_possible(i, true);
+ }
+ }
+}
+
+static void __init xen_filter_cpu_maps(void)
+{
+ int i, rc;
+
+ if (!xen_initial_domain())
+ return;
+
+ num_processors = 0;
+ disabled_cpus = 0;
for (i = 0; i < nr_cpu_ids; i++) {
rc = HYPERVISOR_vcpu_op(VCPUOP_is_up, i, NULL);
if (rc >= 0) {
num_processors++;
set_cpu_possible(i, true);
+ } else {
+ set_cpu_possible(i, false);
+ set_cpu_present(i, false);
}
}
}
@@ -174,6 +199,7 @@ static void __init xen_smp_prepare_boot_cpu(void)
old memory can be recycled */
make_lowmem_page_readwrite(xen_initial_gdt);
+ xen_filter_cpu_maps();
xen_setup_vcpu_info_placement();
}
@@ -400,9 +426,9 @@ static void stop_self(void *v)
BUG();
}
-static void xen_smp_send_stop(void)
+static void xen_stop_other_cpus(int wait)
{
- smp_call_function(stop_self, NULL, 0);
+ smp_call_function(stop_self, NULL, wait);
}
static void xen_smp_send_reschedule(int cpu)
@@ -470,7 +496,7 @@ static const struct smp_ops xen_smp_ops __initdata = {
.cpu_disable = xen_cpu_disable,
.play_dead = xen_play_dead,
- .smp_send_stop = xen_smp_send_stop,
+ .stop_other_cpus = xen_stop_other_cpus,
.smp_send_reschedule = xen_smp_send_reschedule,
.send_call_func_ipi = xen_smp_send_call_function_ipi,
diff --git a/arch/x86/xen/suspend.c b/arch/x86/xen/suspend.c
index 1d789d56877c..9bbd63a129b5 100644
--- a/arch/x86/xen/suspend.c
+++ b/arch/x86/xen/suspend.c
@@ -31,6 +31,7 @@ void xen_hvm_post_suspend(int suspend_cancelled)
int cpu;
xen_hvm_init_shared_info();
xen_callback_vector();
+ xen_unplug_emulated_devices();
if (xen_feature(XENFEAT_hvm_safe_pvclock)) {
for_each_online_cpu(cpu) {
xen_setup_runstate_info(cpu);
diff --git a/arch/x86/xen/time.c b/arch/x86/xen/time.c
index b2bb5aa3b054..5da5e53fb94c 100644
--- a/arch/x86/xen/time.c
+++ b/arch/x86/xen/time.c
@@ -426,6 +426,8 @@ void xen_timer_resume(void)
{
int cpu;
+ pvclock_resume();
+
if (xen_clockevent != &xen_vcpuop_clockevent)
return;
diff --git a/arch/x86/xen/xen-ops.h b/arch/x86/xen/xen-ops.h
index 7c8ab86163e9..9d41bf985757 100644
--- a/arch/x86/xen/xen-ops.h
+++ b/arch/x86/xen/xen-ops.h
@@ -30,6 +30,9 @@ void xen_setup_machphys_mapping(void);
pgd_t *xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn);
void xen_ident_map_ISA(void);
void xen_reserve_top(void);
+extern unsigned long xen_max_p2m_pfn;
+
+void xen_set_pat(u64);
char * __init xen_memory_setup(void);
void __init xen_arch_setup(void);
@@ -40,7 +43,7 @@ void xen_vcpu_restore(void);
void xen_callback_vector(void);
void xen_hvm_init_shared_info(void);
-void __init xen_unplug_emulated_devices(void);
+void xen_unplug_emulated_devices(void);
void __init xen_build_dynamic_phys_to_machine(void);
diff --git a/arch/xtensa/Kconfig b/arch/xtensa/Kconfig
index 0859bfd8ae93..d373d159e75e 100644
--- a/arch/xtensa/Kconfig
+++ b/arch/xtensa/Kconfig
@@ -1,8 +1,3 @@
-# For a description of the syntax of this configuration file,
-# see Documentation/kbuild/kconfig-language.txt.
-
-mainmenu "Linux/Xtensa Kernel Configuration"
-
config FRAME_POINTER
def_bool n
diff --git a/arch/xtensa/include/asm/pgtable.h b/arch/xtensa/include/asm/pgtable.h
index 76bf35554117..b03c043ce75b 100644
--- a/arch/xtensa/include/asm/pgtable.h
+++ b/arch/xtensa/include/asm/pgtable.h
@@ -324,10 +324,7 @@ ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
#define pte_offset_kernel(dir,addr) \
((pte_t*) pmd_page_vaddr(*(dir)) + pte_index(addr))
#define pte_offset_map(dir,addr) pte_offset_kernel((dir),(addr))
-#define pte_offset_map_nested(dir,addr) pte_offset_kernel((dir),(addr))
-
#define pte_unmap(pte) do { } while (0)
-#define pte_unmap_nested(pte) do { } while (0)
/*
diff --git a/arch/xtensa/kernel/ptrace.c b/arch/xtensa/kernel/ptrace.c
index 9d4e1ceb3f09..c72c9473ef99 100644
--- a/arch/xtensa/kernel/ptrace.c
+++ b/arch/xtensa/kernel/ptrace.c
@@ -256,9 +256,11 @@ int ptrace_pokeusr(struct task_struct *child, long regno, long val)
return 0;
}
-long arch_ptrace(struct task_struct *child, long request, long addr, long data)
+long arch_ptrace(struct task_struct *child, long request,
+ unsigned long addr, unsigned long data)
{
int ret = -EPERM;
+ void __user *datap = (void __user *) data;
switch (request) {
case PTRACE_PEEKTEXT: /* read word at location addr. */
@@ -267,7 +269,7 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
break;
case PTRACE_PEEKUSR: /* read register specified by addr. */
- ret = ptrace_peekusr(child, addr, (void __user *) data);
+ ret = ptrace_peekusr(child, addr, datap);
break;
case PTRACE_POKETEXT: /* write the word at location addr. */
@@ -280,19 +282,19 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
break;
case PTRACE_GETREGS:
- ret = ptrace_getregs(child, (void __user *) data);
+ ret = ptrace_getregs(child, datap);
break;
case PTRACE_SETREGS:
- ret = ptrace_setregs(child, (void __user *) data);
+ ret = ptrace_setregs(child, datap);
break;
case PTRACE_GETXTREGS:
- ret = ptrace_getxregs(child, (void __user *) data);
+ ret = ptrace_getxregs(child, datap);
break;
case PTRACE_SETXTREGS:
- ret = ptrace_setxregs(child, (void __user *) data);
+ ret = ptrace_setxregs(child, datap);
break;
default:
diff --git a/block/blk-core.c b/block/blk-core.c
index 881fe44ec7da..4ce953f1b390 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -64,15 +64,13 @@ static void drive_stat_acct(struct request *rq, int new_io)
return;
cpu = part_stat_lock();
+ part = disk_map_sector_rcu(rq->rq_disk, blk_rq_pos(rq));
- if (!new_io) {
- part = rq->part;
+ if (!new_io)
part_stat_inc(cpu, part, merges[rw]);
- } else {
- part = disk_map_sector_rcu(rq->rq_disk, blk_rq_pos(rq));
+ else {
part_round_stats(cpu, part);
part_inc_in_flight(part, rw);
- rq->part = part;
}
part_stat_unlock();
@@ -130,7 +128,6 @@ void blk_rq_init(struct request_queue *q, struct request *rq)
rq->ref_count = 1;
rq->start_time = jiffies;
set_start_time_ns(rq);
- rq->part = NULL;
}
EXPORT_SYMBOL(blk_rq_init);
@@ -805,16 +802,11 @@ static struct request *get_request(struct request_queue *q, int rw_flags,
rl->starved[is_sync] = 0;
priv = !test_bit(QUEUE_FLAG_ELVSWITCH, &q->queue_flags);
- if (priv) {
+ if (priv)
rl->elvpriv++;
- /*
- * Don't do stats for non-priv requests
- */
- if (blk_queue_io_stat(q))
- rw_flags |= REQ_IO_STAT;
- }
-
+ if (blk_queue_io_stat(q))
+ rw_flags |= REQ_IO_STAT;
spin_unlock_irq(q->queue_lock);
rq = blk_alloc_request(q, rw_flags, priv, gfp_mask);
@@ -1202,13 +1194,6 @@ static int __make_request(struct request_queue *q, struct bio *bio)
int where = ELEVATOR_INSERT_SORT;
int rw_flags;
- /* REQ_HARDBARRIER is no more */
- if (WARN_ONCE(bio->bi_rw & REQ_HARDBARRIER,
- "block: HARDBARRIER is deprecated, use FLUSH/FUA instead\n")) {
- bio_endio(bio, -EOPNOTSUPP);
- return 0;
- }
-
/*
* low level driver can indicate that it wants pages above a
* certain limit bounced to low memory (ie for highmem, or even
@@ -1359,7 +1344,7 @@ static void handle_bad_sector(struct bio *bio)
bdevname(bio->bi_bdev, b),
bio->bi_rw,
(unsigned long long)bio->bi_sector + bio_sectors(bio),
- (long long)(bio->bi_bdev->bd_inode->i_size >> 9));
+ (long long)(i_size_read(bio->bi_bdev->bd_inode) >> 9));
set_bit(BIO_EOF, &bio->bi_flags);
}
@@ -1412,7 +1397,7 @@ static inline int bio_check_eod(struct bio *bio, unsigned int nr_sectors)
return 0;
/* Test device or partition size, when known. */
- maxsector = bio->bi_bdev->bd_inode->i_size >> 9;
+ maxsector = i_size_read(bio->bi_bdev->bd_inode) >> 9;
if (maxsector) {
sector_t sector = bio->bi_sector;
@@ -1791,7 +1776,7 @@ static void blk_account_io_completion(struct request *req, unsigned int bytes)
int cpu;
cpu = part_stat_lock();
- part = req->part;
+ part = disk_map_sector_rcu(req->rq_disk, blk_rq_pos(req));
part_stat_add(cpu, part, sectors[rw], bytes >> 9);
part_stat_unlock();
}
@@ -1811,7 +1796,7 @@ static void blk_account_io_done(struct request *req)
int cpu;
cpu = part_stat_lock();
- part = req->part;
+ part = disk_map_sector_rcu(req->rq_disk, blk_rq_pos(req));
part_stat_inc(cpu, part, ios[rw]);
part_stat_add(cpu, part, ticks[rw], duration);
diff --git a/block/blk-ioc.c b/block/blk-ioc.c
index d22c4c55c406..3c7a339fe381 100644
--- a/block/blk-ioc.c
+++ b/block/blk-ioc.c
@@ -153,20 +153,6 @@ struct io_context *get_io_context(gfp_t gfp_flags, int node)
}
EXPORT_SYMBOL(get_io_context);
-void copy_io_context(struct io_context **pdst, struct io_context **psrc)
-{
- struct io_context *src = *psrc;
- struct io_context *dst = *pdst;
-
- if (src) {
- BUG_ON(atomic_long_read(&src->refcount) == 0);
- atomic_long_inc(&src->refcount);
- put_io_context(dst);
- *pdst = src;
- }
-}
-EXPORT_SYMBOL(copy_io_context);
-
static int __init blk_ioc_init(void)
{
iocontext_cachep = kmem_cache_create("blkdev_ioc",
diff --git a/block/blk-map.c b/block/blk-map.c
index d4a586d8691e..5d5dbe47c228 100644
--- a/block/blk-map.c
+++ b/block/blk-map.c
@@ -205,6 +205,8 @@ int blk_rq_map_user_iov(struct request_queue *q, struct request *rq,
unaligned = 1;
break;
}
+ if (!iov[i].iov_len)
+ return -EINVAL;
}
if (unaligned || (q->dma_pad_mask & len) || map_data)
diff --git a/block/blk-merge.c b/block/blk-merge.c
index 0a2fd8a48a38..77b7c26df6b5 100644
--- a/block/blk-merge.c
+++ b/block/blk-merge.c
@@ -351,7 +351,7 @@ static void blk_account_io_merge(struct request *req)
int cpu;
cpu = part_stat_lock();
- part = req->part;
+ part = disk_map_sector_rcu(req->rq_disk, blk_rq_pos(req));
part_round_stats(cpu, part);
part_dec_in_flight(part, rq_data_dir(req));
diff --git a/block/blk-throttle.c b/block/blk-throttle.c
index 56ad4531b412..004be80fd894 100644
--- a/block/blk-throttle.c
+++ b/block/blk-throttle.c
@@ -645,7 +645,7 @@ static int throtl_dispatch_tg(struct throtl_data *td, struct throtl_grp *tg,
{
unsigned int nr_reads = 0, nr_writes = 0;
unsigned int max_nr_reads = throtl_grp_quantum*3/4;
- unsigned int max_nr_writes = throtl_grp_quantum - nr_reads;
+ unsigned int max_nr_writes = throtl_grp_quantum - max_nr_reads;
struct bio *bio;
/* Try to dispatch 75% READS and 25% WRITES */
diff --git a/block/blk.h b/block/blk.h
index 1e675e5ade02..2db8f32838e7 100644
--- a/block/blk.h
+++ b/block/blk.h
@@ -116,6 +116,10 @@ void blk_queue_congestion_threshold(struct request_queue *q);
int blk_dev_init(void);
+void elv_quiesce_start(struct request_queue *q);
+void elv_quiesce_end(struct request_queue *q);
+
+
/*
* Return the threshold (number of used requests) at which the queue is
* considered to be congested. It include a little hysteresis to keep the
diff --git a/block/compat_ioctl.c b/block/compat_ioctl.c
index 119f07b74dc0..cc3eb78e333a 100644
--- a/block/compat_ioctl.c
+++ b/block/compat_ioctl.c
@@ -8,7 +8,6 @@
#include <linux/hdreg.h>
#include <linux/slab.h>
#include <linux/syscalls.h>
-#include <linux/smp_lock.h>
#include <linux/types.h>
#include <linux/uaccess.h>
@@ -744,13 +743,13 @@ long compat_blkdev_ioctl(struct file *file, unsigned cmd, unsigned long arg)
bdi->ra_pages = (arg * 512) / PAGE_CACHE_SIZE;
return 0;
case BLKGETSIZE:
- size = bdev->bd_inode->i_size;
+ size = i_size_read(bdev->bd_inode);
if ((size >> 9) > ~0UL)
return -EFBIG;
return compat_put_ulong(arg, size >> 9);
case BLKGETSIZE64_32:
- return compat_put_u64(arg, bdev->bd_inode->i_size);
+ return compat_put_u64(arg, i_size_read(bdev->bd_inode));
case BLKTRACESETUP32:
case BLKTRACESTART: /* compatible */
diff --git a/block/elevator.c b/block/elevator.c
index 282e8308f7e2..2569512830d3 100644
--- a/block/elevator.c
+++ b/block/elevator.c
@@ -429,7 +429,7 @@ void elv_dispatch_sort(struct request_queue *q, struct request *rq)
q->nr_sorted--;
boundary = q->end_sector;
- stop_flags = REQ_SOFTBARRIER | REQ_HARDBARRIER | REQ_STARTED;
+ stop_flags = REQ_SOFTBARRIER | REQ_STARTED;
list_for_each_prev(entry, &q->queue_head) {
struct request *pos = list_entry_rq(entry);
@@ -691,7 +691,7 @@ void elv_insert(struct request_queue *q, struct request *rq, int where)
void __elv_add_request(struct request_queue *q, struct request *rq, int where,
int plug)
{
- if (rq->cmd_flags & (REQ_SOFTBARRIER | REQ_HARDBARRIER)) {
+ if (rq->cmd_flags & REQ_SOFTBARRIER) {
/* barriers are scheduling boundary, update end_sector */
if (rq->cmd_type == REQ_TYPE_FS ||
(rq->cmd_flags & REQ_DISCARD)) {
diff --git a/block/genhd.c b/block/genhd.c
index a8adf96a4b41..5fa2b44a72ff 100644
--- a/block/genhd.c
+++ b/block/genhd.c
@@ -929,15 +929,8 @@ static void disk_free_ptbl_rcu_cb(struct rcu_head *head)
{
struct disk_part_tbl *ptbl =
container_of(head, struct disk_part_tbl, rcu_head);
- struct gendisk *disk = ptbl->disk;
- struct request_queue *q = disk->queue;
- unsigned long flags;
kfree(ptbl);
-
- spin_lock_irqsave(q->queue_lock, flags);
- elv_quiesce_end(q);
- spin_unlock_irqrestore(q->queue_lock, flags);
}
/**
@@ -955,17 +948,11 @@ static void disk_replace_part_tbl(struct gendisk *disk,
struct disk_part_tbl *new_ptbl)
{
struct disk_part_tbl *old_ptbl = disk->part_tbl;
- struct request_queue *q = disk->queue;
rcu_assign_pointer(disk->part_tbl, new_ptbl);
if (old_ptbl) {
rcu_assign_pointer(old_ptbl->last_lookup, NULL);
-
- spin_lock_irq(q->queue_lock);
- elv_quiesce_start(q);
- spin_unlock_irq(q->queue_lock);
-
call_rcu(&old_ptbl->rcu_head, disk_free_ptbl_rcu_cb);
}
}
@@ -1006,7 +993,6 @@ int disk_expand_part_tbl(struct gendisk *disk, int partno)
return -ENOMEM;
new_ptbl->len = target;
- new_ptbl->disk = disk;
for (i = 0; i < len; i++)
rcu_assign_pointer(new_ptbl->part[i], old_ptbl->part[i]);
diff --git a/block/ioctl.c b/block/ioctl.c
index d724ceb1d465..a9a302eba01e 100644
--- a/block/ioctl.c
+++ b/block/ioctl.c
@@ -5,7 +5,6 @@
#include <linux/hdreg.h>
#include <linux/backing-dev.h>
#include <linux/buffer_head.h>
-#include <linux/smp_lock.h>
#include <linux/blktrace_api.h>
#include <asm/uaccess.h>
@@ -125,7 +124,7 @@ static int blk_ioctl_discard(struct block_device *bdev, uint64_t start,
start >>= 9;
len >>= 9;
- if (start + len > (bdev->bd_inode->i_size >> 9))
+ if (start + len > (i_size_read(bdev->bd_inode) >> 9))
return -EINVAL;
if (secure)
flags |= BLKDEV_DISCARD_SECURE;
@@ -242,6 +241,7 @@ int blkdev_ioctl(struct block_device *bdev, fmode_t mode, unsigned cmd,
* We need to set the startsect first, the driver may
* want to override it.
*/
+ memset(&geo, 0, sizeof(geo));
geo.start = get_start_sect(bdev);
ret = disk->fops->getgeo(bdev, &geo);
if (ret)
@@ -307,12 +307,12 @@ int blkdev_ioctl(struct block_device *bdev, fmode_t mode, unsigned cmd,
ret = blkdev_reread_part(bdev);
break;
case BLKGETSIZE:
- size = bdev->bd_inode->i_size;
+ size = i_size_read(bdev->bd_inode);
if ((size >> 9) > ~0UL)
return -EFBIG;
return put_ulong(arg, size >> 9);
case BLKGETSIZE64:
- return put_u64(arg, bdev->bd_inode->i_size);
+ return put_u64(arg, i_size_read(bdev->bd_inode));
case BLKTRACESTART:
case BLKTRACESTOP:
case BLKTRACESETUP:
diff --git a/block/scsi_ioctl.c b/block/scsi_ioctl.c
index a8b5a10eb5b0..4f4230b79bb6 100644
--- a/block/scsi_ioctl.c
+++ b/block/scsi_ioctl.c
@@ -321,33 +321,47 @@ static int sg_io(struct request_queue *q, struct gendisk *bd_disk,
if (hdr->iovec_count) {
const int size = sizeof(struct sg_iovec) * hdr->iovec_count;
size_t iov_data_len;
- struct sg_iovec *iov;
+ struct sg_iovec *sg_iov;
+ struct iovec *iov;
+ int i;
- iov = kmalloc(size, GFP_KERNEL);
- if (!iov) {
+ sg_iov = kmalloc(size, GFP_KERNEL);
+ if (!sg_iov) {
ret = -ENOMEM;
goto out;
}
- if (copy_from_user(iov, hdr->dxferp, size)) {
- kfree(iov);
+ if (copy_from_user(sg_iov, hdr->dxferp, size)) {
+ kfree(sg_iov);
ret = -EFAULT;
goto out;
}
+ /*
+ * Sum up the vecs, making sure they don't overflow
+ */
+ iov = (struct iovec *) sg_iov;
+ iov_data_len = 0;
+ for (i = 0; i < hdr->iovec_count; i++) {
+ if (iov_data_len + iov[i].iov_len < iov_data_len) {
+ kfree(sg_iov);
+ ret = -EINVAL;
+ goto out;
+ }
+ iov_data_len += iov[i].iov_len;
+ }
+
/* SG_IO howto says that the shorter of the two wins */
- iov_data_len = iov_length((struct iovec *)iov,
- hdr->iovec_count);
if (hdr->dxfer_len < iov_data_len) {
- hdr->iovec_count = iov_shorten((struct iovec *)iov,
+ hdr->iovec_count = iov_shorten(iov,
hdr->iovec_count,
hdr->dxfer_len);
iov_data_len = hdr->dxfer_len;
}
- ret = blk_rq_map_user_iov(q, rq, NULL, iov, hdr->iovec_count,
+ ret = blk_rq_map_user_iov(q, rq, NULL, sg_iov, hdr->iovec_count,
iov_data_len, GFP_KERNEL);
- kfree(iov);
+ kfree(sg_iov);
} else if (hdr->dxfer_len)
ret = blk_rq_map_user(q, rq, NULL, hdr->dxferp, hdr->dxfer_len,
GFP_KERNEL);
diff --git a/crypto/async_tx/Kconfig b/crypto/async_tx/Kconfig
index 5de2ed13b35d..1b11abbb5c91 100644
--- a/crypto/async_tx/Kconfig
+++ b/crypto/async_tx/Kconfig
@@ -24,19 +24,6 @@ config ASYNC_RAID6_RECOV
select ASYNC_PQ
select ASYNC_XOR
-config ASYNC_RAID6_TEST
- tristate "Self test for hardware accelerated raid6 recovery"
- depends on ASYNC_RAID6_RECOV
- select ASYNC_MEMCPY
- ---help---
- This is a one-shot self test that permutes through the
- recovery of all the possible two disk failure scenarios for a
- N-disk array. Recovery is performed with the asynchronous
- raid6 recovery routines, and will optionally use an offload
- engine if one is available.
-
- If unsure, say N.
-
config ASYNC_TX_DISABLE_PQ_VAL_DMA
bool
diff --git a/crypto/async_tx/async_memcpy.c b/crypto/async_tx/async_memcpy.c
index 0ec1fb69d4ea..518c22bd9562 100644
--- a/crypto/async_tx/async_memcpy.c
+++ b/crypto/async_tx/async_memcpy.c
@@ -83,8 +83,8 @@ async_memcpy(struct page *dest, struct page *src, unsigned int dest_offset,
memcpy(dest_buf, src_buf, len);
- kunmap_atomic(dest_buf, KM_USER0);
kunmap_atomic(src_buf, KM_USER1);
+ kunmap_atomic(dest_buf, KM_USER0);
async_tx_sync_epilog(submit);
}
diff --git a/crypto/blkcipher.c b/crypto/blkcipher.c
index 90d26c91f4e9..7a7219266e3c 100644
--- a/crypto/blkcipher.c
+++ b/crypto/blkcipher.c
@@ -89,9 +89,9 @@ static inline unsigned int blkcipher_done_fast(struct blkcipher_walk *walk,
memcpy(walk->dst.virt.addr, walk->page, n);
blkcipher_unmap_dst(walk);
} else if (!(walk->flags & BLKCIPHER_WALK_PHYS)) {
- blkcipher_unmap_src(walk);
if (walk->flags & BLKCIPHER_WALK_DIFF)
blkcipher_unmap_dst(walk);
+ blkcipher_unmap_src(walk);
}
scatterwalk_advance(&walk->in, n);
diff --git a/crypto/pcrypt.c b/crypto/pcrypt.c
index de3078215fe6..75586f1f86e7 100644
--- a/crypto/pcrypt.c
+++ b/crypto/pcrypt.c
@@ -504,7 +504,6 @@ err:
static void pcrypt_fini_padata(struct padata_pcrypt *pcrypt)
{
- kobject_put(&pcrypt->pinst->kobj);
free_cpumask_var(pcrypt->cb_cpumask->mask);
kfree(pcrypt->cb_cpumask);
diff --git a/drivers/Makefile b/drivers/Makefile
index a2aea53a75ed..f3ebb30f1b7f 100644
--- a/drivers/Makefile
+++ b/drivers/Makefile
@@ -26,6 +26,7 @@ obj-$(CONFIG_REGULATOR) += regulator/
# char/ comes before serial/ etc so that the VT console is the boot-time
# default.
+obj-y += tty/
obj-y += char/
# gpu/ comes after char for AGP vs DRM startup
@@ -51,7 +52,6 @@ obj-y += net/
obj-$(CONFIG_ATM) += atm/
obj-$(CONFIG_FUSION) += message/
obj-y += firewire/
-obj-y += ieee1394/
obj-$(CONFIG_UIO) += uio/
obj-y += cdrom/
obj-y += auxdisplay/
@@ -92,6 +92,7 @@ obj-$(CONFIG_EISA) += eisa/
obj-y += lguest/
obj-$(CONFIG_CPU_FREQ) += cpufreq/
obj-$(CONFIG_CPU_IDLE) += cpuidle/
+obj-$(CONFIG_DMA_ENGINE) += dma/
obj-$(CONFIG_MMC) += mmc/
obj-$(CONFIG_MEMSTICK) += memstick/
obj-$(CONFIG_NEW_LEDS) += leds/
@@ -104,7 +105,6 @@ obj-$(CONFIG_ARCH_SHMOBILE) += sh/
ifndef CONFIG_ARCH_USES_GETTIMEOFFSET
obj-y += clocksource/
endif
-obj-$(CONFIG_DMA_ENGINE) += dma/
obj-$(CONFIG_DCA) += dca/
obj-$(CONFIG_HID) += hid/
obj-$(CONFIG_PPC_PS3) += ps3/
diff --git a/drivers/acpi/Kconfig b/drivers/acpi/Kconfig
index 88681aca88c5..3f3489c5ca8c 100644
--- a/drivers/acpi/Kconfig
+++ b/drivers/acpi/Kconfig
@@ -9,7 +9,6 @@ menuconfig ACPI
depends on PCI
depends on PM
select PNP
- select CPU_IDLE
default y
help
Advanced Configuration and Power Interface (ACPI) support for
@@ -66,7 +65,6 @@ config ACPI_PROCFS
config ACPI_PROCFS_POWER
bool "Deprecated power /proc/acpi directories"
depends on PROC_FS
- default y
help
For backwards compatibility, this option allows
deprecated power /proc/acpi/ directories to exist, even when
@@ -90,13 +88,6 @@ config ACPI_POWER_METER
To compile this driver as a module, choose M here:
the module will be called power-meter.
-config ACPI_SYSFS_POWER
- bool "Future power /sys interface"
- select POWER_SUPPLY
- default y
- help
- Say N to disable power /sys interface
-
config ACPI_EC_DEBUGFS
tristate "EC read/write access through /sys/kernel/debug/ec"
default n
@@ -136,6 +127,7 @@ config ACPI_PROC_EVENT
config ACPI_AC
tristate "AC Adapter"
depends on X86
+ select POWER_SUPPLY
default y
help
This driver supports the AC Adapter object, which indicates
@@ -148,6 +140,7 @@ config ACPI_AC
config ACPI_BATTERY
tristate "Battery"
depends on X86
+ select POWER_SUPPLY
default y
help
This driver adds support for battery information through
@@ -206,6 +199,7 @@ config ACPI_DOCK
config ACPI_PROCESSOR
tristate "Processor"
select THERMAL
+ select CPU_IDLE
default y
help
This driver installs ACPI as the idle handler for Linux and uses
@@ -364,6 +358,7 @@ config ACPI_HOTPLUG_MEMORY
config ACPI_SBS
tristate "Smart Battery System"
depends on X86
+ select POWER_SUPPLY
help
This driver supports the Smart Battery System, another
type of access to battery information, found on some laptops.
diff --git a/drivers/acpi/ac.c b/drivers/acpi/ac.c
index 56205a0b85df..ba9afeaa23ac 100644
--- a/drivers/acpi/ac.c
+++ b/drivers/acpi/ac.c
@@ -32,9 +32,7 @@
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
#endif
-#ifdef CONFIG_ACPI_SYSFS_POWER
#include <linux/power_supply.h>
-#endif
#include <acpi/acpi_bus.h>
#include <acpi/acpi_drivers.h>
@@ -86,9 +84,7 @@ static struct acpi_driver acpi_ac_driver = {
};
struct acpi_ac {
-#ifdef CONFIG_ACPI_SYSFS_POWER
struct power_supply charger;
-#endif
struct acpi_device * device;
unsigned long long state;
};
@@ -104,7 +100,6 @@ static const struct file_operations acpi_ac_fops = {
.release = single_release,
};
#endif
-#ifdef CONFIG_ACPI_SYSFS_POWER
static int get_ac_property(struct power_supply *psy,
enum power_supply_property psp,
union power_supply_propval *val)
@@ -123,7 +118,6 @@ static int get_ac_property(struct power_supply *psy,
static enum power_supply_property ac_props[] = {
POWER_SUPPLY_PROP_ONLINE,
};
-#endif
/* --------------------------------------------------------------------------
AC Adapter Management
-------------------------------------------------------------------------- */
@@ -247,9 +241,7 @@ static void acpi_ac_notify(struct acpi_device *device, u32 event)
dev_name(&device->dev), event,
(u32) ac->state);
acpi_notifier_call_chain(device, event, (u32) ac->state);
-#ifdef CONFIG_ACPI_SYSFS_POWER
kobject_uevent(&ac->charger.dev->kobj, KOBJ_CHANGE);
-#endif
}
return;
@@ -282,14 +274,12 @@ static int acpi_ac_add(struct acpi_device *device)
#endif
if (result)
goto end;
-#ifdef CONFIG_ACPI_SYSFS_POWER
ac->charger.name = acpi_device_bid(device);
ac->charger.type = POWER_SUPPLY_TYPE_MAINS;
ac->charger.properties = ac_props;
ac->charger.num_properties = ARRAY_SIZE(ac_props);
ac->charger.get_property = get_ac_property;
power_supply_register(&ac->device->dev, &ac->charger);
-#endif
printk(KERN_INFO PREFIX "%s [%s] (%s)\n",
acpi_device_name(device), acpi_device_bid(device),
@@ -316,10 +306,8 @@ static int acpi_ac_resume(struct acpi_device *device)
old_state = ac->state;
if (acpi_ac_get_state(ac))
return 0;
-#ifdef CONFIG_ACPI_SYSFS_POWER
if (old_state != ac->state)
kobject_uevent(&ac->charger.dev->kobj, KOBJ_CHANGE);
-#endif
return 0;
}
@@ -333,10 +321,8 @@ static int acpi_ac_remove(struct acpi_device *device, int type)
ac = acpi_driver_data(device);
-#ifdef CONFIG_ACPI_SYSFS_POWER
if (ac->charger.dev)
power_supply_unregister(&ac->charger);
-#endif
#ifdef CONFIG_ACPI_PROCFS_POWER
acpi_ac_remove_fs(device);
#endif
diff --git a/drivers/acpi/acpica/Makefile b/drivers/acpi/acpica/Makefile
index d93cc06f4bf8..a7e1d1aa4107 100644
--- a/drivers/acpi/acpica/Makefile
+++ b/drivers/acpi/acpica/Makefile
@@ -21,7 +21,7 @@ acpi-y += exconfig.o exfield.o exnames.o exoparg6.o exresolv.o exstorob.o\
excreate.o exmisc.o exoparg2.o exregion.o exstore.o exutils.o \
exdump.o exmutex.o exoparg3.o exresnte.o exstoren.o exdebug.o
-acpi-y += hwacpi.o hwgpe.o hwregs.o hwsleep.o hwxface.o hwvalid.o
+acpi-y += hwacpi.o hwgpe.o hwregs.o hwsleep.o hwxface.o hwvalid.o hwpci.o
acpi-$(ACPI_FUTURE_USAGE) += hwtimer.o
@@ -44,4 +44,5 @@ acpi-y += tbxface.o tbinstal.o tbutils.o tbfind.o tbfadt.o tbxfroot.o
acpi-y += utalloc.o utdebug.o uteval.o utinit.o utmisc.o utxface.o \
utcopy.o utdelete.o utglobal.o utmath.o utobject.o \
- utstate.o utmutex.o utobject.o utresrc.o utlock.o utids.o
+ utstate.o utmutex.o utobject.o utresrc.o utlock.o utids.o \
+ utosi.o utxferror.o
diff --git a/drivers/acpi/acpica/acdebug.h b/drivers/acpi/acpica/acdebug.h
index 48faf3eba9fb..72e9d5eb083c 100644
--- a/drivers/acpi/acpica/acdebug.h
+++ b/drivers/acpi/acpica/acdebug.h
@@ -105,6 +105,8 @@ void acpi_db_set_method_data(char *type_arg, char *index_arg, char *value_arg);
acpi_status
acpi_db_display_objects(char *obj_type_arg, char *display_count_arg);
+void acpi_db_display_interfaces(char *action_arg, char *interface_name_arg);
+
acpi_status acpi_db_find_name_in_namespace(char *name_arg);
void acpi_db_set_scope(char *name);
diff --git a/drivers/acpi/acpica/acevents.h b/drivers/acpi/acpica/acevents.h
index 36867cd70eac..a6f99cc37a19 100644
--- a/drivers/acpi/acpica/acevents.h
+++ b/drivers/acpi/acpica/acevents.h
@@ -105,8 +105,9 @@ acpi_ev_create_gpe_block(struct acpi_namespace_node *gpe_device,
struct acpi_gpe_block_info **return_gpe_block);
acpi_status
-acpi_ev_initialize_gpe_block(struct acpi_namespace_node *gpe_device,
- struct acpi_gpe_block_info *gpe_block);
+acpi_ev_initialize_gpe_block(struct acpi_gpe_xrupt_info *gpe_xrupt_info,
+ struct acpi_gpe_block_info *gpe_block,
+ void *ignored);
acpi_status acpi_ev_delete_gpe_block(struct acpi_gpe_block_info *gpe_block);
diff --git a/drivers/acpi/acpica/acglobal.h b/drivers/acpi/acpica/acglobal.h
index 1d192142c691..ad88fcae4eb9 100644
--- a/drivers/acpi/acpica/acglobal.h
+++ b/drivers/acpi/acpica/acglobal.h
@@ -132,6 +132,7 @@ struct acpi_table_fadt acpi_gbl_FADT;
u32 acpi_current_gpe_count;
u32 acpi_gbl_trace_flags;
acpi_name acpi_gbl_trace_method_name;
+u8 acpi_gbl_system_awake_and_running;
#endif
@@ -187,6 +188,10 @@ ACPI_EXTERN u8 acpi_gbl_integer_bit_width;
ACPI_EXTERN u8 acpi_gbl_integer_byte_width;
ACPI_EXTERN u8 acpi_gbl_integer_nybble_width;
+/* Mutex for _OSI support */
+
+ACPI_EXTERN acpi_mutex acpi_gbl_osi_mutex;
+
/* Reader/Writer lock is used for namespace walk and dynamic table unload */
ACPI_EXTERN struct acpi_rw_lock acpi_gbl_namespace_rw_lock;
@@ -255,6 +260,7 @@ ACPI_EXTERN acpi_init_handler acpi_gbl_init_handler;
ACPI_EXTERN acpi_tbl_handler acpi_gbl_table_handler;
ACPI_EXTERN void *acpi_gbl_table_handler_context;
ACPI_EXTERN struct acpi_walk_state *acpi_gbl_breakpoint_walk;
+ACPI_EXTERN acpi_interface_handler acpi_gbl_interface_handler;
/* Owner ID support */
@@ -273,8 +279,8 @@ ACPI_EXTERN u8 acpi_gbl_debugger_configuration;
ACPI_EXTERN u8 acpi_gbl_step_to_next_call;
ACPI_EXTERN u8 acpi_gbl_acpi_hardware_present;
ACPI_EXTERN u8 acpi_gbl_events_initialized;
-ACPI_EXTERN u8 acpi_gbl_system_awake_and_running;
ACPI_EXTERN u8 acpi_gbl_osi_data;
+ACPI_EXTERN struct acpi_interface_info *acpi_gbl_supported_interfaces;
#ifndef DEFINE_ACPI_GLOBALS
@@ -364,6 +370,7 @@ ACPI_EXTERN struct acpi_fixed_event_handler
ACPI_EXTERN struct acpi_gpe_xrupt_info *acpi_gbl_gpe_xrupt_list_head;
ACPI_EXTERN struct acpi_gpe_block_info
*acpi_gbl_gpe_fadt_blocks[ACPI_MAX_GPE_BLOCKS];
+ACPI_EXTERN u8 acpi_all_gpes_initialized;
/*****************************************************************************
*
diff --git a/drivers/acpi/acpica/achware.h b/drivers/acpi/acpica/achware.h
index 120b3af56596..167470ad2d21 100644
--- a/drivers/acpi/acpica/achware.h
+++ b/drivers/acpi/acpica/achware.h
@@ -121,6 +121,13 @@ acpi_hw_enable_runtime_gpe_block(struct acpi_gpe_xrupt_info *gpe_xrupt_info,
struct acpi_gpe_block_info *gpe_block,
void *context);
+/*
+ * hwpci - PCI configuration support
+ */
+acpi_status
+acpi_hw_derive_pci_id(struct acpi_pci_id *pci_id,
+ acpi_handle root_pci_device, acpi_handle pci_region);
+
#ifdef ACPI_FUTURE_USAGE
/*
* hwtimer - ACPI Timer prototypes
diff --git a/drivers/acpi/acpica/aclocal.h b/drivers/acpi/acpica/aclocal.h
index 7dad9160f209..2ceb0c05b2d7 100644
--- a/drivers/acpi/acpica/aclocal.h
+++ b/drivers/acpi/acpica/aclocal.h
@@ -413,6 +413,7 @@ struct acpi_handler_info {
void *context; /* Context to be passed to handler */
struct acpi_namespace_node *method_node; /* Method node for this GPE level (saved) */
u8 orig_flags; /* Original misc info about this GPE */
+ u8 orig_enabled; /* Set if the GPE was originally enabled */
};
union acpi_gpe_dispatch_info {
@@ -457,6 +458,7 @@ struct acpi_gpe_block_info {
u32 register_count; /* Number of register pairs in block */
u16 gpe_count; /* Number of individual GPEs in block */
u8 block_base_number; /* Base GPE number for this block */
+ u8 initialized; /* If set, the GPE block has been initialized */
};
/* Information about GPE interrupt handlers, one per each interrupt level used for GPEs */
@@ -473,7 +475,6 @@ struct acpi_gpe_walk_info {
struct acpi_gpe_block_info *gpe_block;
u16 count;
acpi_owner_id owner_id;
- u8 enable_this_gpe;
u8 execute_by_owner_id;
};
@@ -854,7 +855,7 @@ struct acpi_bit_register_info {
ACPI_BITMASK_POWER_BUTTON_STATUS | \
ACPI_BITMASK_SLEEP_BUTTON_STATUS | \
ACPI_BITMASK_RT_CLOCK_STATUS | \
- ACPI_BITMASK_PCIEXP_WAKE_DISABLE | \
+ ACPI_BITMASK_PCIEXP_WAKE_STATUS | \
ACPI_BITMASK_WAKE_STATUS)
#define ACPI_BITMASK_TIMER_ENABLE 0x0001
@@ -909,15 +910,21 @@ struct acpi_bit_register_info {
#define ACPI_OSI_WIN_VISTA 0x07
#define ACPI_OSI_WINSRV_2008 0x08
#define ACPI_OSI_WIN_VISTA_SP1 0x09
-#define ACPI_OSI_WIN_7 0x0A
+#define ACPI_OSI_WIN_VISTA_SP2 0x0A
+#define ACPI_OSI_WIN_7 0x0B
#define ACPI_ALWAYS_ILLEGAL 0x00
struct acpi_interface_info {
char *name;
+ struct acpi_interface_info *next;
+ u8 flags;
u8 value;
};
+#define ACPI_OSI_INVALID 0x01
+#define ACPI_OSI_DYNAMIC 0x02
+
struct acpi_port_info {
char *name;
u16 start;
@@ -997,7 +1004,7 @@ struct acpi_port_info {
struct acpi_db_method_info {
acpi_handle main_thread_gate;
acpi_handle thread_complete_gate;
- u32 *threads;
+ acpi_thread_id *threads;
u32 num_threads;
u32 num_created;
u32 num_completed;
diff --git a/drivers/acpi/acpica/acmacros.h b/drivers/acpi/acpica/acmacros.h
index 9894929a2abb..8d5c9e0a495f 100644
--- a/drivers/acpi/acpica/acmacros.h
+++ b/drivers/acpi/acpica/acmacros.h
@@ -338,8 +338,8 @@
* the plist contains a set of parens to allow variable-length lists.
* These macros are used for both the debug and non-debug versions of the code.
*/
-#define ACPI_ERROR_NAMESPACE(s, e) acpi_ns_report_error (AE_INFO, s, e);
-#define ACPI_ERROR_METHOD(s, n, p, e) acpi_ns_report_method_error (AE_INFO, s, n, p, e);
+#define ACPI_ERROR_NAMESPACE(s, e) acpi_ut_namespace_error (AE_INFO, s, e);
+#define ACPI_ERROR_METHOD(s, n, p, e) acpi_ut_method_error (AE_INFO, s, n, p, e);
#define ACPI_WARN_PREDEFINED(plist) acpi_ut_predefined_warning plist
#define ACPI_INFO_PREDEFINED(plist) acpi_ut_predefined_info plist
diff --git a/drivers/acpi/acpica/acnamesp.h b/drivers/acpi/acpica/acnamesp.h
index 9f60ff002203..d44d3bc5b847 100644
--- a/drivers/acpi/acpica/acnamesp.h
+++ b/drivers/acpi/acpica/acnamesp.h
@@ -339,18 +339,6 @@ acpi_object_type acpi_ns_get_type(struct acpi_namespace_node *node);
u32 acpi_ns_local(acpi_object_type type);
void
-acpi_ns_report_error(const char *module_name,
- u32 line_number,
- const char *internal_name, acpi_status lookup_status);
-
-void
-acpi_ns_report_method_error(const char *module_name,
- u32 line_number,
- const char *message,
- struct acpi_namespace_node *node,
- const char *path, acpi_status lookup_status);
-
-void
acpi_ns_print_node_pathname(struct acpi_namespace_node *node, const char *msg);
acpi_status acpi_ns_build_internal_name(struct acpi_namestring_info *info);
diff --git a/drivers/acpi/acpica/acobject.h b/drivers/acpi/acpica/acobject.h
index 54857fa87aaf..bdbfaf22bd14 100644
--- a/drivers/acpi/acpica/acobject.h
+++ b/drivers/acpi/acpica/acobject.h
@@ -248,7 +248,7 @@ ACPI_OBJECT_COMMON_HEADER ACPI_COMMON_NOTIFY_INFO};
u32 base_byte_offset; /* Byte offset within containing object */\
u32 value; /* Value to store into the Bank or Index register */\
u8 start_field_bit_offset;/* Bit offset within first field datum (0-63) */\
- u8 access_bit_width; /* Read/Write size in bits (8-64) */
+
struct acpi_object_field_common { /* COMMON FIELD (for BUFFER, REGION, BANK, and INDEX fields) */
ACPI_OBJECT_COMMON_HEADER ACPI_COMMON_FIELD_INFO union acpi_operand_object *region_obj; /* Parent Operation Region object (REGION/BANK fields only) */
diff --git a/drivers/acpi/acpica/acutils.h b/drivers/acpi/acpica/acutils.h
index 35df755251ce..72e4183c1937 100644
--- a/drivers/acpi/acpica/acutils.h
+++ b/drivers/acpi/acpica/acutils.h
@@ -312,8 +312,6 @@ void acpi_ut_delete_internal_object_list(union acpi_operand_object **obj_list);
/*
* uteval - object evaluation
*/
-acpi_status acpi_ut_osi_implementation(struct acpi_walk_state *walk_state);
-
acpi_status
acpi_ut_evaluate_object(struct acpi_namespace_node *prefix_node,
char *path,
@@ -395,6 +393,21 @@ acpi_status
acpi_ut_get_object_size(union acpi_operand_object *obj, acpi_size * obj_length);
/*
+ * utosi - Support for the _OSI predefined control method
+ */
+acpi_status acpi_ut_initialize_interfaces(void);
+
+void acpi_ut_interface_terminate(void);
+
+acpi_status acpi_ut_install_interface(acpi_string interface_name);
+
+acpi_status acpi_ut_remove_interface(acpi_string interface_name);
+
+struct acpi_interface_info *acpi_ut_get_interface(acpi_string interface_name);
+
+acpi_status acpi_ut_osi_implementation(struct acpi_walk_state *walk_state);
+
+/*
* utstate - Generic state creation/cache routines
*/
void
@@ -473,17 +486,6 @@ u8 acpi_ut_valid_acpi_char(char character, u32 position);
acpi_status acpi_ut_strtoul64(char *string, u32 base, u64 * ret_integer);
-void ACPI_INTERNAL_VAR_XFACE
-acpi_ut_predefined_warning(const char *module_name,
- u32 line_number,
- char *pathname,
- u8 node_flags, const char *format, ...);
-
-void ACPI_INTERNAL_VAR_XFACE
-acpi_ut_predefined_info(const char *module_name,
- u32 line_number,
- char *pathname, u8 node_flags, const char *format, ...);
-
/* Values for Base above (16=Hex, 10=Decimal) */
#define ACPI_ANY_BASE 0
@@ -574,6 +576,32 @@ acpi_status
acpi_ut_create_list(char *list_name,
u16 object_size, struct acpi_memory_list **return_cache);
-#endif
+#endif /* ACPI_DBG_TRACK_ALLOCATIONS */
+
+/*
+ * utxferror - various error/warning output functions
+ */
+void ACPI_INTERNAL_VAR_XFACE
+acpi_ut_predefined_warning(const char *module_name,
+ u32 line_number,
+ char *pathname,
+ u8 node_flags, const char *format, ...);
+
+void ACPI_INTERNAL_VAR_XFACE
+acpi_ut_predefined_info(const char *module_name,
+ u32 line_number,
+ char *pathname, u8 node_flags, const char *format, ...);
+
+void
+acpi_ut_namespace_error(const char *module_name,
+ u32 line_number,
+ const char *internal_name, acpi_status lookup_status);
+
+void
+acpi_ut_method_error(const char *module_name,
+ u32 line_number,
+ const char *message,
+ struct acpi_namespace_node *node,
+ const char *path, acpi_status lookup_status);
#endif /* _ACUTILS_H */
diff --git a/drivers/acpi/acpica/dsmethod.c b/drivers/acpi/acpica/dsmethod.c
index 64750ee96e20..d94dd8974b55 100644
--- a/drivers/acpi/acpica/dsmethod.c
+++ b/drivers/acpi/acpica/dsmethod.c
@@ -573,7 +573,7 @@ acpi_ds_terminate_control_method(union acpi_operand_object *method_desc,
acpi_os_release_mutex(method_desc->method.
mutex->mutex.os_mutex);
- method_desc->method.mutex->mutex.thread_id = NULL;
+ method_desc->method.mutex->mutex.thread_id = 0;
}
}
diff --git a/drivers/acpi/acpica/dswexec.c b/drivers/acpi/acpica/dswexec.c
index d555b374e314..6b0b5d08d97a 100644
--- a/drivers/acpi/acpica/dswexec.c
+++ b/drivers/acpi/acpica/dswexec.c
@@ -300,10 +300,25 @@ acpi_ds_exec_begin_op(struct acpi_walk_state *walk_state,
* we must enter this object into the namespace. The created
* object is temporary and will be deleted upon completion of
* the execution of this method.
+ *
+ * Note 10/2010: Except for the Scope() op. This opcode does
+ * not actually create a new object, it refers to an existing
+ * object. However, for Scope(), we want to indeed open a
+ * new scope.
*/
- status = acpi_ds_load2_begin_op(walk_state, NULL);
+ if (op->common.aml_opcode != AML_SCOPE_OP) {
+ status =
+ acpi_ds_load2_begin_op(walk_state, NULL);
+ } else {
+ status =
+ acpi_ds_scope_stack_push(op->named.node,
+ op->named.node->
+ type, walk_state);
+ if (ACPI_FAILURE(status)) {
+ return_ACPI_STATUS(status);
+ }
+ }
}
-
break;
case AML_CLASS_EXECUTE:
diff --git a/drivers/acpi/acpica/evevent.c b/drivers/acpi/acpica/evevent.c
index 303618889da0..c61c3039c31a 100644
--- a/drivers/acpi/acpica/evevent.c
+++ b/drivers/acpi/acpica/evevent.c
@@ -95,47 +95,6 @@ acpi_status acpi_ev_initialize_events(void)
/*******************************************************************************
*
- * FUNCTION: acpi_ev_install_fadt_gpes
- *
- * PARAMETERS: None
- *
- * RETURN: Status
- *
- * DESCRIPTION: Completes initialization of the FADT-defined GPE blocks
- * (0 and 1). The HW must be fully initialized at this point,
- * including global lock support.
- *
- ******************************************************************************/
-
-acpi_status acpi_ev_install_fadt_gpes(void)
-{
- acpi_status status;
-
- ACPI_FUNCTION_TRACE(ev_install_fadt_gpes);
-
- /* Namespace must be locked */
-
- status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE);
- if (ACPI_FAILURE(status)) {
- return (status);
- }
-
- /* FADT GPE Block 0 */
-
- (void)acpi_ev_initialize_gpe_block(acpi_gbl_fadt_gpe_device,
- acpi_gbl_gpe_fadt_blocks[0]);
-
- /* FADT GPE Block 1 */
-
- (void)acpi_ev_initialize_gpe_block(acpi_gbl_fadt_gpe_device,
- acpi_gbl_gpe_fadt_blocks[1]);
-
- (void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
- return_ACPI_STATUS(AE_OK);
-}
-
-/*******************************************************************************
- *
* FUNCTION: acpi_ev_install_xrupt_handlers
*
* PARAMETERS: None
diff --git a/drivers/acpi/acpica/evgpeblk.c b/drivers/acpi/acpica/evgpeblk.c
index 85445fb5844e..020add3eee1c 100644
--- a/drivers/acpi/acpica/evgpeblk.c
+++ b/drivers/acpi/acpica/evgpeblk.c
@@ -363,6 +363,7 @@ acpi_ev_create_gpe_block(struct acpi_namespace_node *gpe_device,
gpe_block->gpe_count = (u16)(register_count * ACPI_GPE_REGISTER_WIDTH);
gpe_block->register_count = register_count;
gpe_block->block_base_number = gpe_block_base_number;
+ gpe_block->initialized = FALSE;
ACPI_MEMCPY(&gpe_block->block_address, gpe_block_address,
sizeof(struct acpi_generic_address));
@@ -385,11 +386,12 @@ acpi_ev_create_gpe_block(struct acpi_namespace_node *gpe_device,
return_ACPI_STATUS(status);
}
+ acpi_all_gpes_initialized = FALSE;
+
/* Find all GPE methods (_Lxx or_Exx) for this block */
walk_info.gpe_block = gpe_block;
walk_info.gpe_device = gpe_device;
- walk_info.enable_this_gpe = FALSE;
walk_info.execute_by_owner_id = FALSE;
status = acpi_ns_walk_namespace(ACPI_TYPE_METHOD, gpe_device,
@@ -434,35 +436,34 @@ acpi_ev_create_gpe_block(struct acpi_namespace_node *gpe_device,
******************************************************************************/
acpi_status
-acpi_ev_initialize_gpe_block(struct acpi_namespace_node *gpe_device,
- struct acpi_gpe_block_info *gpe_block)
+acpi_ev_initialize_gpe_block(struct acpi_gpe_xrupt_info *gpe_xrupt_info,
+ struct acpi_gpe_block_info *gpe_block,
+ void *ignored)
{
acpi_status status;
struct acpi_gpe_event_info *gpe_event_info;
u32 gpe_enabled_count;
u32 gpe_index;
- u32 gpe_number;
u32 i;
u32 j;
ACPI_FUNCTION_TRACE(ev_initialize_gpe_block);
- /* Ignore a null GPE block (e.g., if no GPE block 1 exists) */
-
- if (!gpe_block) {
+ /*
+ * Ignore a null GPE block (e.g., if no GPE block 1 exists) and
+ * GPE blocks that have been initialized already.
+ */
+ if (!gpe_block || gpe_block->initialized) {
return_ACPI_STATUS(AE_OK);
}
/*
- * Enable all GPEs that have a corresponding method. Any other GPEs
- * within this block must be enabled via the acpi_enable_gpe interface.
+ * Enable all GPEs that have a corresponding method and have the
+ * ACPI_GPE_CAN_WAKE flag unset. Any other GPEs within this block must
+ * be enabled via the acpi_enable_gpe() interface.
*/
gpe_enabled_count = 0;
- if (gpe_device == acpi_gbl_fadt_gpe_device) {
- gpe_device = NULL;
- }
-
for (i = 0; i < gpe_block->register_count; i++) {
for (j = 0; j < ACPI_GPE_REGISTER_WIDTH; j++) {
@@ -470,27 +471,19 @@ acpi_ev_initialize_gpe_block(struct acpi_namespace_node *gpe_device,
gpe_index = (i * ACPI_GPE_REGISTER_WIDTH) + j;
gpe_event_info = &gpe_block->event_info[gpe_index];
- gpe_number = gpe_index + gpe_block->block_base_number;
/* Ignore GPEs that have no corresponding _Lxx/_Exx method */
- if (!(gpe_event_info->flags & ACPI_GPE_DISPATCH_METHOD)) {
+ if (!(gpe_event_info->flags & ACPI_GPE_DISPATCH_METHOD)
+ || (gpe_event_info->flags & ACPI_GPE_CAN_WAKE)) {
continue;
}
- /*
- * If the GPE has already been enabled for runtime
- * signaling, make sure it remains enabled, but do not
- * increment its reference counter.
- */
- status = gpe_event_info->runtime_count ?
- acpi_ev_enable_gpe(gpe_event_info) :
- acpi_enable_gpe(gpe_device, gpe_number);
-
+ status = acpi_raw_enable_gpe(gpe_event_info);
if (ACPI_FAILURE(status)) {
ACPI_EXCEPTION((AE_INFO, status,
- "Could not enable GPE 0x%02X",
- gpe_number));
+ "Could not enable GPE 0x%02X",
+ gpe_index + gpe_block->block_base_number));
continue;
}
@@ -504,5 +497,7 @@ acpi_ev_initialize_gpe_block(struct acpi_namespace_node *gpe_device,
gpe_enabled_count));
}
+ gpe_block->initialized = TRUE;
+
return_ACPI_STATUS(AE_OK);
}
diff --git a/drivers/acpi/acpica/evgpeinit.c b/drivers/acpi/acpica/evgpeinit.c
index 3084c5de1bba..2c7def95f721 100644
--- a/drivers/acpi/acpica/evgpeinit.c
+++ b/drivers/acpi/acpica/evgpeinit.c
@@ -210,8 +210,7 @@ acpi_status acpi_ev_gpe_initialize(void)
*
* DESCRIPTION: Check for new GPE methods (_Lxx/_Exx) made available as a
* result of a Load() or load_table() operation. If new GPE
- * methods have been installed, register the new methods and
- * enable and runtime GPEs that are associated with them.
+ * methods have been installed, register the new methods.
*
******************************************************************************/
@@ -239,7 +238,6 @@ void acpi_ev_update_gpes(acpi_owner_id table_owner_id)
walk_info.owner_id = table_owner_id;
walk_info.execute_by_owner_id = TRUE;
walk_info.count = 0;
- walk_info.enable_this_gpe = TRUE;
/* Walk the interrupt level descriptor list */
@@ -301,8 +299,6 @@ void acpi_ev_update_gpes(acpi_owner_id table_owner_id)
*
* If walk_info->execute_by_owner_id is TRUE, we only execute examine GPE methods
* with that owner.
- * If walk_info->enable_this_gpe is TRUE, the GPE that is referred to by a GPE
- * method is immediately enabled (Used for Load/load_table operators)
*
******************************************************************************/
@@ -315,8 +311,6 @@ acpi_ev_match_gpe_method(acpi_handle obj_handle,
struct acpi_gpe_walk_info *walk_info =
ACPI_CAST_PTR(struct acpi_gpe_walk_info, context);
struct acpi_gpe_event_info *gpe_event_info;
- struct acpi_namespace_node *gpe_device;
- acpi_status status;
u32 gpe_number;
char name[ACPI_NAME_SIZE + 1];
u8 type;
@@ -421,29 +415,6 @@ acpi_ev_match_gpe_method(acpi_handle obj_handle,
gpe_event_info->flags |= (u8)(type | ACPI_GPE_DISPATCH_METHOD);
gpe_event_info->dispatch.method_node = method_node;
- /*
- * Enable this GPE if requested. This only happens when during the
- * execution of a Load or load_table operator. We have found a new
- * GPE method and want to immediately enable the GPE if it is a
- * runtime GPE.
- */
- if (walk_info->enable_this_gpe) {
-
- walk_info->count++;
- gpe_device = walk_info->gpe_device;
-
- if (gpe_device == acpi_gbl_fadt_gpe_device) {
- gpe_device = NULL;
- }
-
- status = acpi_enable_gpe(gpe_device, gpe_number);
- if (ACPI_FAILURE(status)) {
- ACPI_EXCEPTION((AE_INFO, status,
- "Could not enable GPE 0x%02X",
- gpe_number));
- }
- }
-
ACPI_DEBUG_PRINT((ACPI_DB_LOAD,
"Registered GPE method %s as GPE number 0x%.2X\n",
name, gpe_number));
diff --git a/drivers/acpi/acpica/evmisc.c b/drivers/acpi/acpica/evmisc.c
index df0aea9a8cfd..fcaed9fb44ff 100644
--- a/drivers/acpi/acpica/evmisc.c
+++ b/drivers/acpi/acpica/evmisc.c
@@ -553,7 +553,7 @@ acpi_status acpi_ev_release_global_lock(void)
acpi_gbl_global_lock_acquired = FALSE;
/* Release the local GL mutex */
- acpi_ev_global_lock_thread_id = NULL;
+ acpi_ev_global_lock_thread_id = 0;
acpi_ev_global_lock_acquired = 0;
acpi_os_release_mutex(acpi_gbl_global_lock_mutex->mutex.os_mutex);
return_ACPI_STATUS(status);
diff --git a/drivers/acpi/acpica/evrgnini.c b/drivers/acpi/acpica/evrgnini.c
index f40d271bf568..0b47a6dc9290 100644
--- a/drivers/acpi/acpica/evrgnini.c
+++ b/drivers/acpi/acpica/evrgnini.c
@@ -289,8 +289,8 @@ acpi_ev_pci_config_region_setup(acpi_handle handle,
}
/*
- * Get the PCI device and function numbers from the _ADR object contained
- * in the parent's scope.
+ * Get the PCI device and function numbers from the _ADR object
+ * contained in the parent's scope.
*/
status = acpi_ut_evaluate_numeric_object(METHOD_NAME__ADR,
pci_device_node, &pci_value);
@@ -320,9 +320,15 @@ acpi_ev_pci_config_region_setup(acpi_handle handle,
pci_id->bus = ACPI_LOWORD(pci_value);
}
- /* Complete this device's pci_id */
+ /* Complete/update the PCI ID for this device */
- acpi_os_derive_pci_id(pci_root_node, region_obj->region.node, &pci_id);
+ status =
+ acpi_hw_derive_pci_id(pci_id, pci_root_node,
+ region_obj->region.node);
+ if (ACPI_FAILURE(status)) {
+ ACPI_FREE(pci_id);
+ return_ACPI_STATUS(status);
+ }
*region_context = pci_id;
return_ACPI_STATUS(AE_OK);
diff --git a/drivers/acpi/acpica/evxface.c b/drivers/acpi/acpica/evxface.c
index 14e48add32fa..36af222cac65 100644
--- a/drivers/acpi/acpica/evxface.c
+++ b/drivers/acpi/acpica/evxface.c
@@ -726,15 +726,16 @@ acpi_install_gpe_handler(acpi_handle gpe_device,
(ACPI_GPE_XRUPT_TYPE_MASK | ACPI_GPE_DISPATCH_MASK);
/*
- * If the GPE is associated with a method and it cannot wake up the
- * system from sleep states, it was enabled automatically during
- * initialization, so it has to be disabled now to avoid spurious
- * execution of the handler.
+ * If the GPE is associated with a method, it might have been enabled
+ * automatically during initialization, in which case it has to be
+ * disabled now to avoid spurious execution of the handler.
*/
if ((handler->orig_flags & ACPI_GPE_DISPATCH_METHOD)
- && !(gpe_event_info->flags & ACPI_GPE_CAN_WAKE))
+ && gpe_event_info->runtime_count) {
+ handler->orig_enabled = 1;
(void)acpi_raw_disable_gpe(gpe_event_info);
+ }
/* Install the handler */
@@ -837,13 +838,13 @@ acpi_remove_gpe_handler(acpi_handle gpe_device,
gpe_event_info->flags |= handler->orig_flags;
/*
- * If the GPE was previously associated with a method and it cannot wake
- * up the system from sleep states, it should be enabled at this point
- * to restore the post-initialization configuration.
+ * If the GPE was previously associated with a method and it was
+ * enabled, it should be enabled at this point to restore the
+ * post-initialization configuration.
*/
if ((handler->orig_flags & ACPI_GPE_DISPATCH_METHOD)
- && !(gpe_event_info->flags & ACPI_GPE_CAN_WAKE))
+ && handler->orig_enabled)
(void)acpi_raw_enable_gpe(gpe_event_info);
/* Now we can free the handler object */
diff --git a/drivers/acpi/acpica/evxfevnt.c b/drivers/acpi/acpica/evxfevnt.c
index 304825528d48..a1dabe3fd8ae 100644
--- a/drivers/acpi/acpica/evxfevnt.c
+++ b/drivers/acpi/acpica/evxfevnt.c
@@ -379,21 +379,12 @@ acpi_status acpi_gpe_can_wake(acpi_handle gpe_device, u32 gpe_number)
/* Ensure that we have a valid GPE number */
gpe_event_info = acpi_ev_get_gpe_event_info(gpe_device, gpe_number);
- if (!gpe_event_info) {
+ if (gpe_event_info) {
+ gpe_event_info->flags |= ACPI_GPE_CAN_WAKE;
+ } else {
status = AE_BAD_PARAMETER;
- goto unlock_and_exit;
- }
-
- if (gpe_event_info->flags & ACPI_GPE_CAN_WAKE) {
- goto unlock_and_exit;
}
- gpe_event_info->flags |= ACPI_GPE_CAN_WAKE;
- if (gpe_event_info->flags & ACPI_GPE_DISPATCH_METHOD) {
- (void)acpi_raw_disable_gpe(gpe_event_info);
- }
-
-unlock_and_exit:
acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
return_ACPI_STATUS(status);
}
@@ -651,7 +642,7 @@ acpi_install_gpe_block(acpi_handle gpe_device,
struct acpi_generic_address *gpe_block_address,
u32 register_count, u32 interrupt_number)
{
- acpi_status status;
+ acpi_status status = AE_OK;
union acpi_operand_object *obj_desc;
struct acpi_namespace_node *node;
struct acpi_gpe_block_info *gpe_block;
@@ -715,10 +706,6 @@ acpi_install_gpe_block(acpi_handle gpe_device,
obj_desc->device.gpe_block = gpe_block;
- /* Enable the runtime GPEs in the new block */
-
- status = acpi_ev_initialize_gpe_block(node, gpe_block);
-
unlock_and_exit:
(void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
return_ACPI_STATUS(status);
@@ -924,3 +911,43 @@ acpi_status acpi_enable_all_runtime_gpes(void)
return_ACPI_STATUS(status);
}
+
+/******************************************************************************
+ *
+ * FUNCTION: acpi_update_gpes
+ *
+ * PARAMETERS: None
+ *
+ * RETURN: None
+ *
+ * DESCRIPTION: Enable all GPEs that have associated _Lxx or _Exx methods and
+ * are not pointed to by any device _PRW methods indicating that
+ * these GPEs are generally intended for system or device wakeup
+ * (such GPEs have to be enabled directly when the devices whose
+ * _PRW methods point to them are set up for wakeup signaling).
+ *
+ ******************************************************************************/
+
+acpi_status acpi_update_gpes(void)
+{
+ acpi_status status;
+
+ ACPI_FUNCTION_TRACE(acpi_update_gpes);
+
+ status = acpi_ut_acquire_mutex(ACPI_MTX_EVENTS);
+ if (ACPI_FAILURE(status)) {
+ return_ACPI_STATUS(status);
+ } else if (acpi_all_gpes_initialized) {
+ goto unlock;
+ }
+
+ status = acpi_ev_walk_gpe_list(acpi_ev_initialize_gpe_block, NULL);
+ if (ACPI_SUCCESS(status)) {
+ acpi_all_gpes_initialized = TRUE;
+ }
+
+unlock:
+ (void)acpi_ut_release_mutex(ACPI_MTX_EVENTS);
+
+ return_ACPI_STATUS(status);
+}
diff --git a/drivers/acpi/acpica/evxfregn.c b/drivers/acpi/acpica/evxfregn.c
index 541cbc1544d5..ce9314f79451 100644
--- a/drivers/acpi/acpica/evxfregn.c
+++ b/drivers/acpi/acpica/evxfregn.c
@@ -64,6 +64,12 @@ ACPI_MODULE_NAME("evxfregn")
*
* DESCRIPTION: Install a handler for all op_regions of a given space_id.
*
+ * NOTE: This function should only be called after acpi_enable_subsystem has
+ * been called. This is because any _REG methods associated with the Space ID
+ * are executed here, and these methods can only be safely executed after
+ * the default handlers have been installed and the hardware has been
+ * initialized (via acpi_enable_subsystem.)
+ *
******************************************************************************/
acpi_status
acpi_install_address_space_handler(acpi_handle device,
diff --git a/drivers/acpi/acpica/exfldio.c b/drivers/acpi/acpica/exfldio.c
index 047217303a4b..38293fd3e088 100644
--- a/drivers/acpi/acpica/exfldio.c
+++ b/drivers/acpi/acpica/exfldio.c
@@ -119,8 +119,8 @@ acpi_ex_setup_region(union acpi_operand_object *obj_desc,
}
/*
- * Exit now for SMBus or IPMI address space, it has a non-linear address space
- * and the request cannot be directly validated
+ * Exit now for SMBus or IPMI address space, it has a non-linear
+ * address space and the request cannot be directly validated
*/
if (rgn_desc->region.space_id == ACPI_ADR_SPACE_SMBUS ||
rgn_desc->region.space_id == ACPI_ADR_SPACE_IPMI) {
@@ -147,8 +147,7 @@ acpi_ex_setup_region(union acpi_operand_object *obj_desc,
* (Region length is specified in bytes)
*/
if (rgn_desc->region.length <
- (obj_desc->common_field.base_byte_offset +
- field_datum_byte_offset +
+ (obj_desc->common_field.base_byte_offset + field_datum_byte_offset +
obj_desc->common_field.access_byte_width)) {
if (acpi_gbl_enable_interpreter_slack) {
/*
@@ -680,6 +679,7 @@ acpi_ex_extract_from_field(union acpi_operand_object *obj_desc,
u32 buffer_tail_bits;
u32 datum_count;
u32 field_datum_count;
+ u32 access_bit_width;
u32 i;
ACPI_FUNCTION_TRACE(ex_extract_from_field);
@@ -694,16 +694,36 @@ acpi_ex_extract_from_field(union acpi_operand_object *obj_desc,
return_ACPI_STATUS(AE_BUFFER_OVERFLOW);
}
+
ACPI_MEMSET(buffer, 0, buffer_length);
+ access_bit_width = ACPI_MUL_8(obj_desc->common_field.access_byte_width);
+
+ /* Handle the simple case here */
+
+ if ((obj_desc->common_field.start_field_bit_offset == 0) &&
+ (obj_desc->common_field.bit_length == access_bit_width)) {
+ status = acpi_ex_field_datum_io(obj_desc, 0, buffer, ACPI_READ);
+ return_ACPI_STATUS(status);
+ }
+
+/* TBD: Move to common setup code */
+
+ /* Field algorithm is limited to sizeof(u64), truncate if needed */
+
+ if (obj_desc->common_field.access_byte_width > sizeof(u64)) {
+ obj_desc->common_field.access_byte_width = sizeof(u64);
+ access_bit_width = sizeof(u64) * 8;
+ }
/* Compute the number of datums (access width data items) */
- datum_count = ACPI_ROUND_UP_TO(obj_desc->common_field.bit_length,
- obj_desc->common_field.access_bit_width);
+ datum_count =
+ ACPI_ROUND_UP_TO(obj_desc->common_field.bit_length,
+ access_bit_width);
+
field_datum_count = ACPI_ROUND_UP_TO(obj_desc->common_field.bit_length +
obj_desc->common_field.
start_field_bit_offset,
- obj_desc->common_field.
access_bit_width);
/* Priming read from the field */
@@ -738,12 +758,11 @@ acpi_ex_extract_from_field(union acpi_operand_object *obj_desc,
* This avoids the differences in behavior between different compilers
* concerning shift values larger than the target data width.
*/
- if ((obj_desc->common_field.access_bit_width -
- obj_desc->common_field.start_field_bit_offset) <
+ if (access_bit_width -
+ obj_desc->common_field.start_field_bit_offset <
ACPI_INTEGER_BIT_SIZE) {
merged_datum |=
- raw_datum << (obj_desc->common_field.
- access_bit_width -
+ raw_datum << (access_bit_width -
obj_desc->common_field.
start_field_bit_offset);
}
@@ -765,8 +784,7 @@ acpi_ex_extract_from_field(union acpi_operand_object *obj_desc,
/* Mask off any extra bits in the last datum */
- buffer_tail_bits = obj_desc->common_field.bit_length %
- obj_desc->common_field.access_bit_width;
+ buffer_tail_bits = obj_desc->common_field.bit_length % access_bit_width;
if (buffer_tail_bits) {
merged_datum &= ACPI_MASK_BITS_ABOVE(buffer_tail_bits);
}
@@ -798,6 +816,7 @@ acpi_status
acpi_ex_insert_into_field(union acpi_operand_object *obj_desc,
void *buffer, u32 buffer_length)
{
+ void *new_buffer;
acpi_status status;
u64 mask;
u64 width_mask;
@@ -808,9 +827,9 @@ acpi_ex_insert_into_field(union acpi_operand_object *obj_desc,
u32 buffer_tail_bits;
u32 datum_count;
u32 field_datum_count;
- u32 i;
+ u32 access_bit_width;
u32 required_length;
- void *new_buffer;
+ u32 i;
ACPI_FUNCTION_TRACE(ex_insert_into_field);
@@ -844,17 +863,24 @@ acpi_ex_insert_into_field(union acpi_operand_object *obj_desc,
buffer_length = required_length;
}
+/* TBD: Move to common setup code */
+
+ /* Algo is limited to sizeof(u64), so cut the access_byte_width */
+ if (obj_desc->common_field.access_byte_width > sizeof(u64)) {
+ obj_desc->common_field.access_byte_width = sizeof(u64);
+ }
+
+ access_bit_width = ACPI_MUL_8(obj_desc->common_field.access_byte_width);
+
/*
* Create the bitmasks used for bit insertion.
* Note: This if/else is used to bypass compiler differences with the
* shift operator
*/
- if (obj_desc->common_field.access_bit_width == ACPI_INTEGER_BIT_SIZE) {
+ if (access_bit_width == ACPI_INTEGER_BIT_SIZE) {
width_mask = ACPI_UINT64_MAX;
} else {
- width_mask =
- ACPI_MASK_BITS_ABOVE(obj_desc->common_field.
- access_bit_width);
+ width_mask = ACPI_MASK_BITS_ABOVE(access_bit_width);
}
mask = width_mask &
@@ -863,12 +889,11 @@ acpi_ex_insert_into_field(union acpi_operand_object *obj_desc,
/* Compute the number of datums (access width data items) */
datum_count = ACPI_ROUND_UP_TO(obj_desc->common_field.bit_length,
- obj_desc->common_field.access_bit_width);
+ access_bit_width);
field_datum_count = ACPI_ROUND_UP_TO(obj_desc->common_field.bit_length +
obj_desc->common_field.
start_field_bit_offset,
- obj_desc->common_field.
access_bit_width);
/* Get initial Datum from the input buffer */
@@ -905,12 +930,11 @@ acpi_ex_insert_into_field(union acpi_operand_object *obj_desc,
* This avoids the differences in behavior between different compilers
* concerning shift values larger than the target data width.
*/
- if ((obj_desc->common_field.access_bit_width -
+ if ((access_bit_width -
obj_desc->common_field.start_field_bit_offset) <
ACPI_INTEGER_BIT_SIZE) {
merged_datum =
- raw_datum >> (obj_desc->common_field.
- access_bit_width -
+ raw_datum >> (access_bit_width -
obj_desc->common_field.
start_field_bit_offset);
} else {
@@ -929,6 +953,7 @@ acpi_ex_insert_into_field(union acpi_operand_object *obj_desc,
ACPI_MEMCPY(&raw_datum, ((char *)buffer) + buffer_offset,
ACPI_MIN(obj_desc->common_field.access_byte_width,
buffer_length - buffer_offset));
+
merged_datum |=
raw_datum << obj_desc->common_field.start_field_bit_offset;
}
@@ -937,7 +962,7 @@ acpi_ex_insert_into_field(union acpi_operand_object *obj_desc,
buffer_tail_bits = (obj_desc->common_field.bit_length +
obj_desc->common_field.start_field_bit_offset) %
- obj_desc->common_field.access_bit_width;
+ access_bit_width;
if (buffer_tail_bits) {
mask &= ACPI_MASK_BITS_ABOVE(buffer_tail_bits);
}
diff --git a/drivers/acpi/acpica/exmutex.c b/drivers/acpi/acpica/exmutex.c
index f73be97043c0..6af14e43f839 100644
--- a/drivers/acpi/acpica/exmutex.c
+++ b/drivers/acpi/acpica/exmutex.c
@@ -336,7 +336,7 @@ acpi_status acpi_ex_release_mutex_object(union acpi_operand_object *obj_desc)
/* Clear mutex info */
- obj_desc->mutex.thread_id = NULL;
+ obj_desc->mutex.thread_id = 0;
return_ACPI_STATUS(status);
}
@@ -393,10 +393,10 @@ acpi_ex_release_mutex(union acpi_operand_object *obj_desc,
if ((owner_thread->thread_id != walk_state->thread->thread_id) &&
(obj_desc != acpi_gbl_global_lock_mutex)) {
ACPI_ERROR((AE_INFO,
- "Thread %p cannot release Mutex [%4.4s] acquired by thread %p",
- ACPI_CAST_PTR(void, walk_state->thread->thread_id),
+ "Thread %u cannot release Mutex [%4.4s] acquired by thread %u",
+ (u32)walk_state->thread->thread_id,
acpi_ut_get_node_name(obj_desc->mutex.node),
- ACPI_CAST_PTR(void, owner_thread->thread_id)));
+ (u32)owner_thread->thread_id));
return_ACPI_STATUS(AE_AML_NOT_OWNER);
}
@@ -488,7 +488,7 @@ void acpi_ex_release_all_mutexes(struct acpi_thread_state *thread)
/* Mark mutex unowned */
obj_desc->mutex.owner_thread = NULL;
- obj_desc->mutex.thread_id = NULL;
+ obj_desc->mutex.thread_id = 0;
/* Update Thread sync_level (Last mutex is the important one) */
diff --git a/drivers/acpi/acpica/exprep.c b/drivers/acpi/acpica/exprep.c
index 98a331d2249b..7aae29f73d3f 100644
--- a/drivers/acpi/acpica/exprep.c
+++ b/drivers/acpi/acpica/exprep.c
@@ -355,12 +355,10 @@ acpi_ex_prep_common_field_object(union acpi_operand_object *obj_desc,
return_ACPI_STATUS(AE_AML_OPERAND_VALUE);
}
- /* Setup width (access granularity) fields */
+ /* Setup width (access granularity) fields (values are: 1, 2, 4, 8) */
obj_desc->common_field.access_byte_width = (u8)
- ACPI_DIV_8(access_bit_width); /* 1, 2, 4, 8 */
-
- obj_desc->common_field.access_bit_width = (u8) access_bit_width;
+ ACPI_DIV_8(access_bit_width);
/*
* base_byte_offset is the address of the start of the field within the
@@ -405,8 +403,9 @@ acpi_status acpi_ex_prep_field_value(struct acpi_create_field_info *info)
{
union acpi_operand_object *obj_desc;
union acpi_operand_object *second_desc = NULL;
- u32 type;
acpi_status status;
+ u32 access_byte_width;
+ u32 type;
ACPI_FUNCTION_TRACE(ex_prep_field_value);
@@ -421,8 +420,8 @@ acpi_status acpi_ex_prep_field_value(struct acpi_create_field_info *info)
type = acpi_ns_get_type(info->region_node);
if (type != ACPI_TYPE_REGION) {
ACPI_ERROR((AE_INFO,
- "Needed Region, found type 0x%X (%s)",
- type, acpi_ut_get_type_name(type)));
+ "Needed Region, found type 0x%X (%s)", type,
+ acpi_ut_get_type_name(type)));
return_ACPI_STATUS(AE_AML_OPERAND_TYPE);
}
@@ -438,7 +437,8 @@ acpi_status acpi_ex_prep_field_value(struct acpi_create_field_info *info)
/* Initialize areas of the object that are common to all fields */
obj_desc->common_field.node = info->field_node;
- status = acpi_ex_prep_common_field_object(obj_desc, info->field_flags,
+ status = acpi_ex_prep_common_field_object(obj_desc,
+ info->field_flags,
info->attribute,
info->field_bit_position,
info->field_bit_length);
@@ -455,26 +455,25 @@ acpi_status acpi_ex_prep_field_value(struct acpi_create_field_info *info)
obj_desc->field.region_obj =
acpi_ns_get_attached_object(info->region_node);
- /* An additional reference for the container */
+ /* Allow full data read from EC address space */
- acpi_ut_add_reference(obj_desc->field.region_obj);
+ if ((obj_desc->field.region_obj->region.space_id ==
+ ACPI_ADR_SPACE_EC)
+ && (obj_desc->common_field.bit_length > 8)) {
+ access_byte_width =
+ ACPI_ROUND_BITS_UP_TO_BYTES(obj_desc->common_field.
+ bit_length);
+
+ /* Maximum byte width supported is 255 */
- /* allow full data read from EC address space */
- if (obj_desc->field.region_obj->region.space_id ==
- ACPI_ADR_SPACE_EC) {
- if (obj_desc->common_field.bit_length > 8) {
- unsigned width =
- ACPI_ROUND_BITS_UP_TO_BYTES(
- obj_desc->common_field.bit_length);
- // access_bit_width is u8, don't overflow it
- if (width > 8)
- width = 8;
+ if (access_byte_width < 256) {
obj_desc->common_field.access_byte_width =
- width;
- obj_desc->common_field.access_bit_width =
- 8 * width;
+ (u8)access_byte_width;
}
}
+ /* An additional reference for the container */
+
+ acpi_ut_add_reference(obj_desc->field.region_obj);
ACPI_DEBUG_PRINT((ACPI_DB_BFIELD,
"RegionField: BitOff %X, Off %X, Gran %X, Region %p\n",
diff --git a/drivers/acpi/acpica/exregion.c b/drivers/acpi/acpica/exregion.c
index 8819d2ac5aee..de17e10da0ed 100644
--- a/drivers/acpi/acpica/exregion.c
+++ b/drivers/acpi/acpica/exregion.c
@@ -353,7 +353,6 @@ acpi_ex_pci_config_space_handler(u32 function,
acpi_status status = AE_OK;
struct acpi_pci_id *pci_id;
u16 pci_register;
- u32 value32;
ACPI_FUNCTION_TRACE(ex_pci_config_space_handler);
@@ -381,8 +380,7 @@ acpi_ex_pci_config_space_handler(u32 function,
case ACPI_READ:
status = acpi_os_read_pci_configuration(pci_id, pci_register,
- &value32, bit_width);
- *value = value32;
+ value, bit_width);
break;
case ACPI_WRITE:
diff --git a/drivers/acpi/acpica/hwpci.c b/drivers/acpi/acpica/hwpci.c
new file mode 100644
index 000000000000..ad21c7d8bf4f
--- /dev/null
+++ b/drivers/acpi/acpica/hwpci.c
@@ -0,0 +1,412 @@
+/*******************************************************************************
+ *
+ * Module Name: hwpci - Obtain PCI bus, device, and function numbers
+ *
+ ******************************************************************************/
+
+/*
+ * Copyright (C) 2000 - 2010, Intel Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ * substantially similar to the "NO WARRANTY" disclaimer below
+ * ("Disclaimer") and any redistribution must be conditioned upon
+ * including a substantially similar Disclaimer requirement for further
+ * binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ * of any contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ */
+
+#include <acpi/acpi.h>
+#include "accommon.h"
+
+#define _COMPONENT ACPI_NAMESPACE
+ACPI_MODULE_NAME("hwpci")
+
+/* PCI configuration space values */
+#define PCI_CFG_HEADER_TYPE_REG 0x0E
+#define PCI_CFG_PRIMARY_BUS_NUMBER_REG 0x18
+#define PCI_CFG_SECONDARY_BUS_NUMBER_REG 0x19
+/* PCI header values */
+#define PCI_HEADER_TYPE_MASK 0x7F
+#define PCI_TYPE_BRIDGE 0x01
+#define PCI_TYPE_CARDBUS_BRIDGE 0x02
+typedef struct acpi_pci_device {
+ acpi_handle device;
+ struct acpi_pci_device *next;
+
+} acpi_pci_device;
+
+/* Local prototypes */
+
+static acpi_status
+acpi_hw_build_pci_list(acpi_handle root_pci_device,
+ acpi_handle pci_region,
+ struct acpi_pci_device **return_list_head);
+
+static acpi_status
+acpi_hw_process_pci_list(struct acpi_pci_id *pci_id,
+ struct acpi_pci_device *list_head);
+
+static void acpi_hw_delete_pci_list(struct acpi_pci_device *list_head);
+
+static acpi_status
+acpi_hw_get_pci_device_info(struct acpi_pci_id *pci_id,
+ acpi_handle pci_device,
+ u16 *bus_number, u8 *is_bridge);
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_hw_derive_pci_id
+ *
+ * PARAMETERS: pci_id - Initial values for the PCI ID. May be
+ * modified by this function.
+ * root_pci_device - A handle to a PCI device object. This
+ * object must be a PCI Root Bridge having a
+ * _HID value of either PNP0A03 or PNP0A08
+ * pci_region - A handle to a PCI configuration space
+ * Operation Region being initialized
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: This function derives a full PCI ID for a PCI device,
+ * consisting of a Segment number, Bus number, Device number,
+ * and function code.
+ *
+ * The PCI hardware dynamically configures PCI bus numbers
+ * depending on the bus topology discovered during system
+ * initialization. This function is invoked during configuration
+ * of a PCI_Config Operation Region in order to (possibly) update
+ * the Bus/Device/Function numbers in the pci_id with the actual
+ * values as determined by the hardware and operating system
+ * configuration.
+ *
+ * The pci_id parameter is initially populated during the Operation
+ * Region initialization. This function is then called, and is
+ * will make any necessary modifications to the Bus, Device, or
+ * Function number PCI ID subfields as appropriate for the
+ * current hardware and OS configuration.
+ *
+ * NOTE: Created 08/2010. Replaces the previous OSL acpi_os_derive_pci_id
+ * interface since this feature is OS-independent. This module
+ * specifically avoids any use of recursion by building a local
+ * temporary device list.
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_hw_derive_pci_id(struct acpi_pci_id *pci_id,
+ acpi_handle root_pci_device, acpi_handle pci_region)
+{
+ acpi_status status;
+ struct acpi_pci_device *list_head = NULL;
+
+ ACPI_FUNCTION_TRACE(hw_derive_pci_id);
+
+ if (!pci_id) {
+ return_ACPI_STATUS(AE_BAD_PARAMETER);
+ }
+
+ /* Build a list of PCI devices, from pci_region up to root_pci_device */
+
+ status =
+ acpi_hw_build_pci_list(root_pci_device, pci_region, &list_head);
+ if (ACPI_SUCCESS(status)) {
+
+ /* Walk the list, updating the PCI device/function/bus numbers */
+
+ status = acpi_hw_process_pci_list(pci_id, list_head);
+ }
+
+ /* Always delete the list */
+
+ acpi_hw_delete_pci_list(list_head);
+ return_ACPI_STATUS(status);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_hw_build_pci_list
+ *
+ * PARAMETERS: root_pci_device - A handle to a PCI device object. This
+ * object is guaranteed to be a PCI Root
+ * Bridge having a _HID value of either
+ * PNP0A03 or PNP0A08
+ * pci_region - A handle to the PCI configuration space
+ * Operation Region
+ * return_list_head - Where the PCI device list is returned
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Builds a list of devices from the input PCI region up to the
+ * Root PCI device for this namespace subtree.
+ *
+ ******************************************************************************/
+
+static acpi_status
+acpi_hw_build_pci_list(acpi_handle root_pci_device,
+ acpi_handle pci_region,
+ struct acpi_pci_device **return_list_head)
+{
+ acpi_handle current_device;
+ acpi_handle parent_device;
+ acpi_status status;
+ struct acpi_pci_device *list_element;
+ struct acpi_pci_device *list_head = NULL;
+
+ /*
+ * Ascend namespace branch until the root_pci_device is reached, building
+ * a list of device nodes. Loop will exit when either the PCI device is
+ * found, or the root of the namespace is reached.
+ */
+ current_device = pci_region;
+ while (1) {
+ status = acpi_get_parent(current_device, &parent_device);
+ if (ACPI_FAILURE(status)) {
+ return (status);
+ }
+
+ /* Finished when we reach the PCI root device (PNP0A03 or PNP0A08) */
+
+ if (parent_device == root_pci_device) {
+ *return_list_head = list_head;
+ return (AE_OK);
+ }
+
+ list_element = ACPI_ALLOCATE(sizeof(struct acpi_pci_device));
+ if (!list_element) {
+ return (AE_NO_MEMORY);
+ }
+
+ /* Put new element at the head of the list */
+
+ list_element->next = list_head;
+ list_element->device = parent_device;
+ list_head = list_element;
+
+ current_device = parent_device;
+ }
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_hw_process_pci_list
+ *
+ * PARAMETERS: pci_id - Initial values for the PCI ID. May be
+ * modified by this function.
+ * list_head - Device list created by
+ * acpi_hw_build_pci_list
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Walk downward through the PCI device list, getting the device
+ * info for each, via the PCI configuration space and updating
+ * the PCI ID as necessary. Deletes the list during traversal.
+ *
+ ******************************************************************************/
+
+static acpi_status
+acpi_hw_process_pci_list(struct acpi_pci_id *pci_id,
+ struct acpi_pci_device *list_head)
+{
+ acpi_status status = AE_OK;
+ struct acpi_pci_device *info;
+ u16 bus_number;
+ u8 is_bridge = TRUE;
+
+ ACPI_FUNCTION_NAME(hw_process_pci_list);
+
+ ACPI_DEBUG_PRINT((ACPI_DB_OPREGION,
+ "Input PciId: Seg %4.4X Bus %4.4X Dev %4.4X Func %4.4X\n",
+ pci_id->segment, pci_id->bus, pci_id->device,
+ pci_id->function));
+
+ bus_number = pci_id->bus;
+
+ /*
+ * Descend down the namespace tree, collecting PCI device, function,
+ * and bus numbers. bus_number is only important for PCI bridges.
+ * Algorithm: As we descend the tree, use the last valid PCI device,
+ * function, and bus numbers that are discovered, and assign them
+ * to the PCI ID for the target device.
+ */
+ info = list_head;
+ while (info) {
+ status = acpi_hw_get_pci_device_info(pci_id, info->device,
+ &bus_number, &is_bridge);
+ if (ACPI_FAILURE(status)) {
+ return_ACPI_STATUS(status);
+ }
+
+ info = info->next;
+ }
+
+ ACPI_DEBUG_PRINT((ACPI_DB_OPREGION,
+ "Output PciId: Seg %4.4X Bus %4.4X Dev %4.4X Func %4.4X "
+ "Status %X BusNumber %X IsBridge %X\n",
+ pci_id->segment, pci_id->bus, pci_id->device,
+ pci_id->function, status, bus_number, is_bridge));
+
+ return_ACPI_STATUS(AE_OK);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_hw_delete_pci_list
+ *
+ * PARAMETERS: list_head - Device list created by
+ * acpi_hw_build_pci_list
+ *
+ * RETURN: None
+ *
+ * DESCRIPTION: Free the entire PCI list.
+ *
+ ******************************************************************************/
+
+static void acpi_hw_delete_pci_list(struct acpi_pci_device *list_head)
+{
+ struct acpi_pci_device *next;
+ struct acpi_pci_device *previous;
+
+ next = list_head;
+ while (next) {
+ previous = next;
+ next = previous->next;
+ ACPI_FREE(previous);
+ }
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_hw_get_pci_device_info
+ *
+ * PARAMETERS: pci_id - Initial values for the PCI ID. May be
+ * modified by this function.
+ * pci_device - Handle for the PCI device object
+ * bus_number - Where a PCI bridge bus number is returned
+ * is_bridge - Return value, indicates if this PCI
+ * device is a PCI bridge
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Get the device info for a single PCI device object. Get the
+ * _ADR (contains PCI device and function numbers), and for PCI
+ * bridge devices, get the bus number from PCI configuration
+ * space.
+ *
+ ******************************************************************************/
+
+static acpi_status
+acpi_hw_get_pci_device_info(struct acpi_pci_id *pci_id,
+ acpi_handle pci_device,
+ u16 *bus_number, u8 *is_bridge)
+{
+ acpi_status status;
+ acpi_object_type object_type;
+ u64 return_value;
+ u64 pci_value;
+
+ /* We only care about objects of type Device */
+
+ status = acpi_get_type(pci_device, &object_type);
+ if (ACPI_FAILURE(status)) {
+ return (status);
+ }
+
+ if (object_type != ACPI_TYPE_DEVICE) {
+ return (AE_OK);
+ }
+
+ /* We need an _ADR. Ignore device if not present */
+
+ status = acpi_ut_evaluate_numeric_object(METHOD_NAME__ADR,
+ pci_device, &return_value);
+ if (ACPI_FAILURE(status)) {
+ return (AE_OK);
+ }
+
+ /*
+ * From _ADR, get the PCI Device and Function and
+ * update the PCI ID.
+ */
+ pci_id->device = ACPI_HIWORD(ACPI_LODWORD(return_value));
+ pci_id->function = ACPI_LOWORD(ACPI_LODWORD(return_value));
+
+ /*
+ * If the previous device was a bridge, use the previous
+ * device bus number
+ */
+ if (*is_bridge) {
+ pci_id->bus = *bus_number;
+ }
+
+ /*
+ * Get the bus numbers from PCI Config space:
+ *
+ * First, get the PCI header_type
+ */
+ *is_bridge = FALSE;
+ status = acpi_os_read_pci_configuration(pci_id,
+ PCI_CFG_HEADER_TYPE_REG,
+ &pci_value, 8);
+ if (ACPI_FAILURE(status)) {
+ return (status);
+ }
+
+ /* We only care about bridges (1=pci_bridge, 2=card_bus_bridge) */
+
+ pci_value &= PCI_HEADER_TYPE_MASK;
+
+ if ((pci_value != PCI_TYPE_BRIDGE) &&
+ (pci_value != PCI_TYPE_CARDBUS_BRIDGE)) {
+ return (AE_OK);
+ }
+
+ /* Bridge: Get the Primary bus_number */
+
+ status = acpi_os_read_pci_configuration(pci_id,
+ PCI_CFG_PRIMARY_BUS_NUMBER_REG,
+ &pci_value, 8);
+ if (ACPI_FAILURE(status)) {
+ return (status);
+ }
+
+ *is_bridge = TRUE;
+ pci_id->bus = (u16)pci_value;
+
+ /* Bridge: Get the Secondary bus_number */
+
+ status = acpi_os_read_pci_configuration(pci_id,
+ PCI_CFG_SECONDARY_BUS_NUMBER_REG,
+ &pci_value, 8);
+ if (ACPI_FAILURE(status)) {
+ return (status);
+ }
+
+ *bus_number = (u16)pci_value;
+ return (AE_OK);
+}
diff --git a/drivers/acpi/acpica/nsrepair2.c b/drivers/acpi/acpica/nsrepair2.c
index 4009498fbabd..4ef9f43ea926 100644
--- a/drivers/acpi/acpica/nsrepair2.c
+++ b/drivers/acpi/acpica/nsrepair2.c
@@ -74,10 +74,18 @@ acpi_ns_repair_ALR(struct acpi_predefined_data *data,
union acpi_operand_object **return_object_ptr);
static acpi_status
+acpi_ns_repair_CID(struct acpi_predefined_data *data,
+ union acpi_operand_object **return_object_ptr);
+
+static acpi_status
acpi_ns_repair_FDE(struct acpi_predefined_data *data,
union acpi_operand_object **return_object_ptr);
static acpi_status
+acpi_ns_repair_HID(struct acpi_predefined_data *data,
+ union acpi_operand_object **return_object_ptr);
+
+static acpi_status
acpi_ns_repair_PSS(struct acpi_predefined_data *data,
union acpi_operand_object **return_object_ptr);
@@ -108,8 +116,10 @@ acpi_ns_sort_list(union acpi_operand_object **elements,
* As necessary:
*
* _ALR: Sort the list ascending by ambient_illuminance
+ * _CID: Strings: uppercase all, remove any leading asterisk
* _FDE: Convert Buffer of BYTEs to a Buffer of DWORDs
* _GTM: Convert Buffer of BYTEs to a Buffer of DWORDs
+ * _HID: Strings: uppercase all, remove any leading asterisk
* _PSS: Sort the list descending by Power
* _TSS: Sort the list descending by Power
*
@@ -122,8 +132,10 @@ acpi_ns_sort_list(union acpi_operand_object **elements,
*/
static const struct acpi_repair_info acpi_ns_repairable_names[] = {
{"_ALR", acpi_ns_repair_ALR},
+ {"_CID", acpi_ns_repair_CID},
{"_FDE", acpi_ns_repair_FDE},
{"_GTM", acpi_ns_repair_FDE}, /* _GTM has same repair as _FDE */
+ {"_HID", acpi_ns_repair_HID},
{"_PSS", acpi_ns_repair_PSS},
{"_TSS", acpi_ns_repair_TSS},
{{0, 0, 0, 0}, NULL} /* Table terminator */
@@ -321,6 +333,157 @@ acpi_ns_repair_FDE(struct acpi_predefined_data *data,
/******************************************************************************
*
+ * FUNCTION: acpi_ns_repair_CID
+ *
+ * PARAMETERS: Data - Pointer to validation data structure
+ * return_object_ptr - Pointer to the object returned from the
+ * evaluation of a method or object
+ *
+ * RETURN: Status. AE_OK if object is OK or was repaired successfully
+ *
+ * DESCRIPTION: Repair for the _CID object. If a string, ensure that all
+ * letters are uppercase and that there is no leading asterisk.
+ * If a Package, ensure same for all string elements.
+ *
+ *****************************************************************************/
+
+static acpi_status
+acpi_ns_repair_CID(struct acpi_predefined_data *data,
+ union acpi_operand_object **return_object_ptr)
+{
+ acpi_status status;
+ union acpi_operand_object *return_object = *return_object_ptr;
+ union acpi_operand_object **element_ptr;
+ union acpi_operand_object *original_element;
+ u16 original_ref_count;
+ u32 i;
+
+ /* Check for _CID as a simple string */
+
+ if (return_object->common.type == ACPI_TYPE_STRING) {
+ status = acpi_ns_repair_HID(data, return_object_ptr);
+ return (status);
+ }
+
+ /* Exit if not a Package */
+
+ if (return_object->common.type != ACPI_TYPE_PACKAGE) {
+ return (AE_OK);
+ }
+
+ /* Examine each element of the _CID package */
+
+ element_ptr = return_object->package.elements;
+ for (i = 0; i < return_object->package.count; i++) {
+ original_element = *element_ptr;
+ original_ref_count = original_element->common.reference_count;
+
+ status = acpi_ns_repair_HID(data, element_ptr);
+ if (ACPI_FAILURE(status)) {
+ return (status);
+ }
+
+ /* Take care with reference counts */
+
+ if (original_element != *element_ptr) {
+
+ /* Element was replaced */
+
+ (*element_ptr)->common.reference_count =
+ original_ref_count;
+
+ acpi_ut_remove_reference(original_element);
+ }
+
+ element_ptr++;
+ }
+
+ return (AE_OK);
+}
+
+/******************************************************************************
+ *
+ * FUNCTION: acpi_ns_repair_HID
+ *
+ * PARAMETERS: Data - Pointer to validation data structure
+ * return_object_ptr - Pointer to the object returned from the
+ * evaluation of a method or object
+ *
+ * RETURN: Status. AE_OK if object is OK or was repaired successfully
+ *
+ * DESCRIPTION: Repair for the _HID object. If a string, ensure that all
+ * letters are uppercase and that there is no leading asterisk.
+ *
+ *****************************************************************************/
+
+static acpi_status
+acpi_ns_repair_HID(struct acpi_predefined_data *data,
+ union acpi_operand_object **return_object_ptr)
+{
+ union acpi_operand_object *return_object = *return_object_ptr;
+ union acpi_operand_object *new_string;
+ char *source;
+ char *dest;
+
+ ACPI_FUNCTION_NAME(ns_repair_HID);
+
+ /* We only care about string _HID objects (not integers) */
+
+ if (return_object->common.type != ACPI_TYPE_STRING) {
+ return (AE_OK);
+ }
+
+ if (return_object->string.length == 0) {
+ ACPI_WARN_PREDEFINED((AE_INFO, data->pathname, data->node_flags,
+ "Invalid zero-length _HID or _CID string"));
+
+ /* Return AE_OK anyway, let driver handle it */
+
+ data->flags |= ACPI_OBJECT_REPAIRED;
+ return (AE_OK);
+ }
+
+ /* It is simplest to always create a new string object */
+
+ new_string = acpi_ut_create_string_object(return_object->string.length);
+ if (!new_string) {
+ return (AE_NO_MEMORY);
+ }
+
+ /*
+ * Remove a leading asterisk if present. For some unknown reason, there
+ * are many machines in the field that contains IDs like this.
+ *
+ * Examples: "*PNP0C03", "*ACPI0003"
+ */
+ source = return_object->string.pointer;
+ if (*source == '*') {
+ source++;
+ new_string->string.length--;
+
+ ACPI_DEBUG_PRINT((ACPI_DB_REPAIR,
+ "%s: Removed invalid leading asterisk\n",
+ data->pathname));
+ }
+
+ /*
+ * Copy and uppercase the string. From the ACPI specification:
+ *
+ * A valid PNP ID must be of the form "AAA####" where A is an uppercase
+ * letter and # is a hex digit. A valid ACPI ID must be of the form
+ * "ACPI####" where # is a hex digit.
+ */
+ for (dest = new_string->string.pointer; *source; dest++, source++) {
+ *dest = (char)ACPI_TOUPPER(*source);
+ }
+
+ acpi_ut_remove_reference(return_object);
+ *return_object_ptr = new_string;
+ return (AE_OK);
+}
+
+/******************************************************************************
+ *
* FUNCTION: acpi_ns_repair_TSS
*
* PARAMETERS: Data - Pointer to validation data structure
diff --git a/drivers/acpi/acpica/nsutils.c b/drivers/acpi/acpica/nsutils.c
index e1add3491b04..a7d6ad9c111b 100644
--- a/drivers/acpi/acpica/nsutils.c
+++ b/drivers/acpi/acpica/nsutils.c
@@ -60,104 +60,6 @@ acpi_name acpi_ns_find_parent_name(struct acpi_namespace_node *node_to_search);
/*******************************************************************************
*
- * FUNCTION: acpi_ns_report_error
- *
- * PARAMETERS: module_name - Caller's module name (for error output)
- * line_number - Caller's line number (for error output)
- * internal_name - Name or path of the namespace node
- * lookup_status - Exception code from NS lookup
- *
- * RETURN: None
- *
- * DESCRIPTION: Print warning message with full pathname
- *
- ******************************************************************************/
-
-void
-acpi_ns_report_error(const char *module_name,
- u32 line_number,
- const char *internal_name, acpi_status lookup_status)
-{
- acpi_status status;
- u32 bad_name;
- char *name = NULL;
-
- acpi_os_printf("ACPI Error (%s-%04d): ", module_name, line_number);
-
- if (lookup_status == AE_BAD_CHARACTER) {
-
- /* There is a non-ascii character in the name */
-
- ACPI_MOVE_32_TO_32(&bad_name,
- ACPI_CAST_PTR(u32, internal_name));
- acpi_os_printf("[0x%4.4X] (NON-ASCII)", bad_name);
- } else {
- /* Convert path to external format */
-
- status = acpi_ns_externalize_name(ACPI_UINT32_MAX,
- internal_name, NULL, &name);
-
- /* Print target name */
-
- if (ACPI_SUCCESS(status)) {
- acpi_os_printf("[%s]", name);
- } else {
- acpi_os_printf("[COULD NOT EXTERNALIZE NAME]");
- }
-
- if (name) {
- ACPI_FREE(name);
- }
- }
-
- acpi_os_printf(" Namespace lookup failure, %s\n",
- acpi_format_exception(lookup_status));
-}
-
-/*******************************************************************************
- *
- * FUNCTION: acpi_ns_report_method_error
- *
- * PARAMETERS: module_name - Caller's module name (for error output)
- * line_number - Caller's line number (for error output)
- * Message - Error message to use on failure
- * prefix_node - Prefix relative to the path
- * Path - Path to the node (optional)
- * method_status - Execution status
- *
- * RETURN: None
- *
- * DESCRIPTION: Print warning message with full pathname
- *
- ******************************************************************************/
-
-void
-acpi_ns_report_method_error(const char *module_name,
- u32 line_number,
- const char *message,
- struct acpi_namespace_node *prefix_node,
- const char *path, acpi_status method_status)
-{
- acpi_status status;
- struct acpi_namespace_node *node = prefix_node;
-
- acpi_os_printf("ACPI Error (%s-%04d): ", module_name, line_number);
-
- if (path) {
- status =
- acpi_ns_get_node(prefix_node, path, ACPI_NS_NO_UPSEARCH,
- &node);
- if (ACPI_FAILURE(status)) {
- acpi_os_printf("[Could not get node by pathname]");
- }
- }
-
- acpi_ns_print_node_pathname(node, message);
- acpi_os_printf(", %s\n", acpi_format_exception(method_status));
-}
-
-/*******************************************************************************
- *
* FUNCTION: acpi_ns_print_node_pathname
*
* PARAMETERS: Node - Object
diff --git a/drivers/acpi/acpica/tbfadt.c b/drivers/acpi/acpica/tbfadt.c
index 1728cb9bf600..d2ff4325c427 100644
--- a/drivers/acpi/acpica/tbfadt.c
+++ b/drivers/acpi/acpica/tbfadt.c
@@ -49,7 +49,7 @@
ACPI_MODULE_NAME("tbfadt")
/* Local prototypes */
-static inline void
+static ACPI_INLINE void
acpi_tb_init_generic_address(struct acpi_generic_address *generic_address,
u8 space_id, u8 byte_width, u64 address);
@@ -181,7 +181,7 @@ static struct acpi_fadt_pm_info fadt_pm_info_table[] = {
*
******************************************************************************/
-static inline void
+static ACPI_INLINE void
acpi_tb_init_generic_address(struct acpi_generic_address *generic_address,
u8 space_id, u8 byte_width, u64 address)
{
diff --git a/drivers/acpi/acpica/utdebug.c b/drivers/acpi/acpica/utdebug.c
index 983510640059..f21c486929a5 100644
--- a/drivers/acpi/acpica/utdebug.c
+++ b/drivers/acpi/acpica/utdebug.c
@@ -179,9 +179,8 @@ acpi_debug_print(u32 requested_debug_level,
if (thread_id != acpi_gbl_prev_thread_id) {
if (ACPI_LV_THREADS & acpi_dbg_level) {
acpi_os_printf
- ("\n**** Context Switch from TID %p to TID %p ****\n\n",
- ACPI_CAST_PTR(void, acpi_gbl_prev_thread_id),
- ACPI_CAST_PTR(void, thread_id));
+ ("\n**** Context Switch from TID %u to TID %u ****\n\n",
+ (u32)acpi_gbl_prev_thread_id, (u32)thread_id);
}
acpi_gbl_prev_thread_id = thread_id;
@@ -194,7 +193,7 @@ acpi_debug_print(u32 requested_debug_level,
acpi_os_printf("%8s-%04ld ", module_name, line_number);
if (ACPI_LV_THREADS & acpi_dbg_level) {
- acpi_os_printf("[%p] ", ACPI_CAST_PTR(void, thread_id));
+ acpi_os_printf("[%u] ", (u32)thread_id);
}
acpi_os_printf("[%02ld] %-22.22s: ",
diff --git a/drivers/acpi/acpica/uteval.c b/drivers/acpi/acpica/uteval.c
index 6dfdeb653490..22f59ef604e0 100644
--- a/drivers/acpi/acpica/uteval.c
+++ b/drivers/acpi/acpica/uteval.c
@@ -48,153 +48,6 @@
#define _COMPONENT ACPI_UTILITIES
ACPI_MODULE_NAME("uteval")
-/*
- * Strings supported by the _OSI predefined (internal) method.
- *
- * March 2009: Removed "Linux" as this host no longer wants to respond true
- * for this string. Basically, the only safe OS strings are windows-related
- * and in many or most cases represent the only test path within the
- * BIOS-provided ASL code.
- *
- * The second element of each entry is used to track the newest version of
- * Windows that the BIOS has requested.
- */
-static struct acpi_interface_info acpi_interfaces_supported[] = {
- /* Operating System Vendor Strings */
-
- {"Windows 2000", ACPI_OSI_WIN_2000}, /* Windows 2000 */
- {"Windows 2001", ACPI_OSI_WIN_XP}, /* Windows XP */
- {"Windows 2001 SP1", ACPI_OSI_WIN_XP_SP1}, /* Windows XP SP1 */
- {"Windows 2001.1", ACPI_OSI_WINSRV_2003}, /* Windows Server 2003 */
- {"Windows 2001 SP2", ACPI_OSI_WIN_XP_SP2}, /* Windows XP SP2 */
- {"Windows 2001.1 SP1", ACPI_OSI_WINSRV_2003_SP1}, /* Windows Server 2003 SP1 - Added 03/2006 */
- {"Windows 2006", ACPI_OSI_WIN_VISTA}, /* Windows Vista - Added 03/2006 */
- {"Windows 2006.1", ACPI_OSI_WINSRV_2008}, /* Windows Server 2008 - Added 09/2009 */
- {"Windows 2006 SP1", ACPI_OSI_WIN_VISTA_SP1}, /* Windows Vista SP1 - Added 09/2009 */
- {"Windows 2009", ACPI_OSI_WIN_7}, /* Windows 7 and Server 2008 R2 - Added 09/2009 */
-
- /* Feature Group Strings */
-
- {"Extended Address Space Descriptor", 0}
-
- /*
- * All "optional" feature group strings (features that are implemented
- * by the host) should be implemented in the host version of
- * acpi_os_validate_interface and should not be added here.
- */
-};
-
-/*******************************************************************************
- *
- * FUNCTION: acpi_ut_osi_implementation
- *
- * PARAMETERS: walk_state - Current walk state
- *
- * RETURN: Status
- *
- * DESCRIPTION: Implementation of the _OSI predefined control method
- *
- ******************************************************************************/
-
-acpi_status acpi_ut_osi_implementation(struct acpi_walk_state *walk_state)
-{
- acpi_status status;
- union acpi_operand_object *string_desc;
- union acpi_operand_object *return_desc;
- u32 return_value;
- u32 i;
-
- ACPI_FUNCTION_TRACE(ut_osi_implementation);
-
- /* Validate the string input argument */
-
- string_desc = walk_state->arguments[0].object;
- if (!string_desc || (string_desc->common.type != ACPI_TYPE_STRING)) {
- return_ACPI_STATUS(AE_TYPE);
- }
-
- /* Create a return object */
-
- return_desc = acpi_ut_create_internal_object(ACPI_TYPE_INTEGER);
- if (!return_desc) {
- return_ACPI_STATUS(AE_NO_MEMORY);
- }
-
- /* Default return value is 0, NOT SUPPORTED */
-
- return_value = 0;
-
- /* Compare input string to static table of supported interfaces */
-
- for (i = 0; i < ACPI_ARRAY_LENGTH(acpi_interfaces_supported); i++) {
- if (!ACPI_STRCMP(string_desc->string.pointer,
- acpi_interfaces_supported[i].name)) {
- /*
- * The interface is supported.
- * Update the osi_data if necessary. We keep track of the latest
- * version of Windows that has been requested by the BIOS.
- */
- if (acpi_interfaces_supported[i].value >
- acpi_gbl_osi_data) {
- acpi_gbl_osi_data =
- acpi_interfaces_supported[i].value;
- }
-
- return_value = ACPI_UINT32_MAX;
- goto exit;
- }
- }
-
- /*
- * Did not match the string in the static table, call the host OSL to
- * check for a match with one of the optional strings (such as
- * "Module Device", "3.0 Thermal Model", etc.)
- */
- status = acpi_os_validate_interface(string_desc->string.pointer);
- if (ACPI_SUCCESS(status)) {
-
- /* The interface is supported */
-
- return_value = ACPI_UINT32_MAX;
- }
-
-exit:
- ACPI_DEBUG_PRINT_RAW ((ACPI_DB_INFO,
- "ACPI: BIOS _OSI(%s) is %ssupported\n",
- string_desc->string.pointer, return_value == 0 ? "not " : ""));
-
- /* Complete the return value */
-
- return_desc->integer.value = return_value;
- walk_state->return_desc = return_desc;
- return_ACPI_STATUS (AE_OK);
-}
-
-/*******************************************************************************
- *
- * FUNCTION: acpi_osi_invalidate
- *
- * PARAMETERS: interface_string
- *
- * RETURN: Status
- *
- * DESCRIPTION: invalidate string in pre-defiend _OSI string list
- *
- ******************************************************************************/
-
-acpi_status acpi_osi_invalidate(char *interface)
-{
- int i;
-
- for (i = 0; i < ACPI_ARRAY_LENGTH(acpi_interfaces_supported); i++) {
- if (!ACPI_STRCMP(interface, acpi_interfaces_supported[i].name)) {
- *acpi_interfaces_supported[i].name = '\0';
- return AE_OK;
- }
- }
- return AE_NOT_FOUND;
-}
-
/*******************************************************************************
*
* FUNCTION: acpi_ut_evaluate_object
diff --git a/drivers/acpi/acpica/utglobal.c b/drivers/acpi/acpica/utglobal.c
index 0558747579ef..e87bc6760be6 100644
--- a/drivers/acpi/acpica/utglobal.c
+++ b/drivers/acpi/acpica/utglobal.c
@@ -154,14 +154,16 @@ ACPI_EXPORT_SYMBOL(acpi_format_exception)
* 1) _SB_ is defined to be a device to allow \_SB_._INI to be run
* during the initialization sequence.
* 2) _TZ_ is defined to be a thermal zone in order to allow ASL code to
- * perform a Notify() operation on it.
+ * perform a Notify() operation on it. 09/2010: Changed to type Device.
+ * This still allows notifies, but does not confuse host code that
+ * searches for valid thermal_zone objects.
*/
const struct acpi_predefined_names acpi_gbl_pre_defined_names[] = {
{"_GPE", ACPI_TYPE_LOCAL_SCOPE, NULL},
{"_PR_", ACPI_TYPE_LOCAL_SCOPE, NULL},
{"_SB_", ACPI_TYPE_DEVICE, NULL},
{"_SI_", ACPI_TYPE_LOCAL_SCOPE, NULL},
- {"_TZ_", ACPI_TYPE_THERMAL, NULL},
+ {"_TZ_", ACPI_TYPE_DEVICE, NULL},
{"_REV", ACPI_TYPE_INTEGER, (char *)ACPI_CA_SUPPORT_LEVEL},
{"_OS_", ACPI_TYPE_STRING, ACPI_OS_NAME},
{"_GL_", ACPI_TYPE_MUTEX, (char *)1},
@@ -766,6 +768,7 @@ acpi_status acpi_ut_init_globals(void)
acpi_gbl_gpe_fadt_blocks[0] = NULL;
acpi_gbl_gpe_fadt_blocks[1] = NULL;
acpi_current_gpe_count = 0;
+ acpi_all_gpes_initialized = FALSE;
/* Global handlers */
@@ -774,6 +777,7 @@ acpi_status acpi_ut_init_globals(void)
acpi_gbl_exception_handler = NULL;
acpi_gbl_init_handler = NULL;
acpi_gbl_table_handler = NULL;
+ acpi_gbl_interface_handler = NULL;
/* Global Lock support */
@@ -800,6 +804,7 @@ acpi_status acpi_ut_init_globals(void)
acpi_gbl_debugger_configuration = DEBUGGER_THREADING;
acpi_gbl_db_output_flags = ACPI_DB_CONSOLE_OUTPUT;
acpi_gbl_osi_data = 0;
+ acpi_gbl_osi_mutex = NULL;
/* Hardware oriented */
diff --git a/drivers/acpi/acpica/utids.c b/drivers/acpi/acpica/utids.c
index 1397fadd0d4b..d2906328535d 100644
--- a/drivers/acpi/acpica/utids.c
+++ b/drivers/acpi/acpica/utids.c
@@ -48,42 +48,6 @@
#define _COMPONENT ACPI_UTILITIES
ACPI_MODULE_NAME("utids")
-/* Local prototypes */
-static void acpi_ut_copy_id_string(char *destination, char *source);
-
-/*******************************************************************************
- *
- * FUNCTION: acpi_ut_copy_id_string
- *
- * PARAMETERS: Destination - Where to copy the string
- * Source - Source string
- *
- * RETURN: None
- *
- * DESCRIPTION: Copies an ID string for the _HID, _CID, and _UID methods.
- * Performs removal of a leading asterisk if present -- workaround
- * for a known issue on a bunch of machines.
- *
- ******************************************************************************/
-
-static void acpi_ut_copy_id_string(char *destination, char *source)
-{
-
- /*
- * Workaround for ID strings that have a leading asterisk. This construct
- * is not allowed by the ACPI specification (ID strings must be
- * alphanumeric), but enough existing machines have this embedded in their
- * ID strings that the following code is useful.
- */
- if (*source == '*') {
- source++;
- }
-
- /* Do the actual copy */
-
- ACPI_STRCPY(destination, source);
-}
-
/*******************************************************************************
*
* FUNCTION: acpi_ut_execute_HID
@@ -101,7 +65,6 @@ static void acpi_ut_copy_id_string(char *destination, char *source)
* NOTE: Internal function, no parameter validation
*
******************************************************************************/
-
acpi_status
acpi_ut_execute_HID(struct acpi_namespace_node *device_node,
struct acpica_device_id **return_id)
@@ -147,7 +110,7 @@ acpi_ut_execute_HID(struct acpi_namespace_node *device_node,
if (obj_desc->common.type == ACPI_TYPE_INTEGER) {
acpi_ex_eisa_id_to_string(hid->string, obj_desc->integer.value);
} else {
- acpi_ut_copy_id_string(hid->string, obj_desc->string.pointer);
+ ACPI_STRCPY(hid->string, obj_desc->string.pointer);
}
hid->length = length;
@@ -224,7 +187,7 @@ acpi_ut_execute_UID(struct acpi_namespace_node *device_node,
if (obj_desc->common.type == ACPI_TYPE_INTEGER) {
acpi_ex_integer_to_string(uid->string, obj_desc->integer.value);
} else {
- acpi_ut_copy_id_string(uid->string, obj_desc->string.pointer);
+ ACPI_STRCPY(uid->string, obj_desc->string.pointer);
}
uid->length = length;
@@ -357,8 +320,8 @@ acpi_ut_execute_CID(struct acpi_namespace_node *device_node,
/* Copy the String CID from the returned object */
- acpi_ut_copy_id_string(next_id_string,
- cid_objects[i]->string.pointer);
+ ACPI_STRCPY(next_id_string,
+ cid_objects[i]->string.pointer);
length = cid_objects[i]->string.length + 1;
}
diff --git a/drivers/acpi/acpica/utinit.c b/drivers/acpi/acpica/utinit.c
index a39c93dac719..c1b1c803ea9b 100644
--- a/drivers/acpi/acpica/utinit.c
+++ b/drivers/acpi/acpica/utinit.c
@@ -117,6 +117,10 @@ void acpi_ut_subsystem_shutdown(void)
/* Close the acpi_event Handling */
acpi_ev_terminate();
+
+ /* Delete any dynamic _OSI interfaces */
+
+ acpi_ut_interface_terminate();
#endif
/* Close the Namespace */
diff --git a/drivers/acpi/acpica/utmath.c b/drivers/acpi/acpica/utmath.c
index 35059a14eb72..49cf7b7fd816 100644
--- a/drivers/acpi/acpica/utmath.c
+++ b/drivers/acpi/acpica/utmath.c
@@ -48,11 +48,27 @@
ACPI_MODULE_NAME("utmath")
/*
- * Support for double-precision integer divide. This code is included here
- * in order to support kernel environments where the double-precision math
- * library is not available.
+ * Optional support for 64-bit double-precision integer divide. This code
+ * is configurable and is implemented in order to support 32-bit kernel
+ * environments where a 64-bit double-precision math library is not available.
+ *
+ * Support for a more normal 64-bit divide/modulo (with check for a divide-
+ * by-zero) appears after this optional section of code.
*/
#ifndef ACPI_USE_NATIVE_DIVIDE
+/* Structures used only for 64-bit divide */
+typedef struct uint64_struct {
+ u32 lo;
+ u32 hi;
+
+} uint64_struct;
+
+typedef union uint64_overlay {
+ u64 full;
+ struct uint64_struct part;
+
+} uint64_overlay;
+
/*******************************************************************************
*
* FUNCTION: acpi_ut_short_divide
@@ -69,6 +85,7 @@ ACPI_MODULE_NAME("utmath")
* 32-bit remainder.
*
******************************************************************************/
+
acpi_status
acpi_ut_short_divide(u64 dividend,
u32 divisor, u64 *out_quotient, u32 *out_remainder)
diff --git a/drivers/acpi/acpica/utmisc.c b/drivers/acpi/acpica/utmisc.c
index e8d0724ee403..c7d0e05ef5a4 100644
--- a/drivers/acpi/acpica/utmisc.c
+++ b/drivers/acpi/acpica/utmisc.c
@@ -50,11 +50,6 @@
#define _COMPONENT ACPI_UTILITIES
ACPI_MODULE_NAME("utmisc")
-/*
- * Common suffix for messages
- */
-#define ACPI_COMMON_MSG_SUFFIX \
- acpi_os_printf(" (%8.8X/%s-%u)\n", ACPI_CA_VERSION, module_name, line_number)
/*******************************************************************************
*
* FUNCTION: acpi_ut_validate_exception
@@ -1044,160 +1039,3 @@ acpi_ut_walk_package_tree(union acpi_operand_object * source_object,
return_ACPI_STATUS(AE_AML_INTERNAL);
}
-
-/*******************************************************************************
- *
- * FUNCTION: acpi_error, acpi_exception, acpi_warning, acpi_info
- *
- * PARAMETERS: module_name - Caller's module name (for error output)
- * line_number - Caller's line number (for error output)
- * Format - Printf format string + additional args
- *
- * RETURN: None
- *
- * DESCRIPTION: Print message with module/line/version info
- *
- ******************************************************************************/
-
-void ACPI_INTERNAL_VAR_XFACE
-acpi_error(const char *module_name, u32 line_number, const char *format, ...)
-{
- va_list args;
-
- acpi_os_printf("ACPI Error: ");
-
- va_start(args, format);
- acpi_os_vprintf(format, args);
- ACPI_COMMON_MSG_SUFFIX;
- va_end(args);
-}
-
-void ACPI_INTERNAL_VAR_XFACE
-acpi_exception(const char *module_name,
- u32 line_number, acpi_status status, const char *format, ...)
-{
- va_list args;
-
- acpi_os_printf("ACPI Exception: %s, ", acpi_format_exception(status));
-
- va_start(args, format);
- acpi_os_vprintf(format, args);
- ACPI_COMMON_MSG_SUFFIX;
- va_end(args);
-}
-
-void ACPI_INTERNAL_VAR_XFACE
-acpi_warning(const char *module_name, u32 line_number, const char *format, ...)
-{
- va_list args;
-
- acpi_os_printf("ACPI Warning: ");
-
- va_start(args, format);
- acpi_os_vprintf(format, args);
- ACPI_COMMON_MSG_SUFFIX;
- va_end(args);
-}
-
-void ACPI_INTERNAL_VAR_XFACE
-acpi_info(const char *module_name, u32 line_number, const char *format, ...)
-{
- va_list args;
-
- acpi_os_printf("ACPI: ");
-
- va_start(args, format);
- acpi_os_vprintf(format, args);
- acpi_os_printf("\n");
- va_end(args);
-}
-
-ACPI_EXPORT_SYMBOL(acpi_error)
-ACPI_EXPORT_SYMBOL(acpi_exception)
-ACPI_EXPORT_SYMBOL(acpi_warning)
-ACPI_EXPORT_SYMBOL(acpi_info)
-
-/*******************************************************************************
- *
- * FUNCTION: acpi_ut_predefined_warning
- *
- * PARAMETERS: module_name - Caller's module name (for error output)
- * line_number - Caller's line number (for error output)
- * Pathname - Full pathname to the node
- * node_flags - From Namespace node for the method/object
- * Format - Printf format string + additional args
- *
- * RETURN: None
- *
- * DESCRIPTION: Warnings for the predefined validation module. Messages are
- * only emitted the first time a problem with a particular
- * method/object is detected. This prevents a flood of error
- * messages for methods that are repeatedly evaluated.
- *
-******************************************************************************/
-
-void ACPI_INTERNAL_VAR_XFACE
-acpi_ut_predefined_warning(const char *module_name,
- u32 line_number,
- char *pathname,
- u8 node_flags, const char *format, ...)
-{
- va_list args;
-
- /*
- * Warning messages for this method/object will be disabled after the
- * first time a validation fails or an object is successfully repaired.
- */
- if (node_flags & ANOBJ_EVALUATED) {
- return;
- }
-
- acpi_os_printf("ACPI Warning for %s: ", pathname);
-
- va_start(args, format);
- acpi_os_vprintf(format, args);
- ACPI_COMMON_MSG_SUFFIX;
- va_end(args);
-}
-
-/*******************************************************************************
- *
- * FUNCTION: acpi_ut_predefined_info
- *
- * PARAMETERS: module_name - Caller's module name (for error output)
- * line_number - Caller's line number (for error output)
- * Pathname - Full pathname to the node
- * node_flags - From Namespace node for the method/object
- * Format - Printf format string + additional args
- *
- * RETURN: None
- *
- * DESCRIPTION: Info messages for the predefined validation module. Messages
- * are only emitted the first time a problem with a particular
- * method/object is detected. This prevents a flood of
- * messages for methods that are repeatedly evaluated.
- *
- ******************************************************************************/
-
-void ACPI_INTERNAL_VAR_XFACE
-acpi_ut_predefined_info(const char *module_name,
- u32 line_number,
- char *pathname, u8 node_flags, const char *format, ...)
-{
- va_list args;
-
- /*
- * Warning messages for this method/object will be disabled after the
- * first time a validation fails or an object is successfully repaired.
- */
- if (node_flags & ANOBJ_EVALUATED) {
- return;
- }
-
- acpi_os_printf("ACPI Info for %s: ", pathname);
-
- va_start(args, format);
- acpi_os_vprintf(format, args);
- ACPI_COMMON_MSG_SUFFIX;
- va_end(args);
-}
diff --git a/drivers/acpi/acpica/utmutex.c b/drivers/acpi/acpica/utmutex.c
index f5cca3a1300c..d9efa495b433 100644
--- a/drivers/acpi/acpica/utmutex.c
+++ b/drivers/acpi/acpica/utmutex.c
@@ -86,6 +86,12 @@ acpi_status acpi_ut_mutex_initialize(void)
spin_lock_init(acpi_gbl_gpe_lock);
spin_lock_init(acpi_gbl_hardware_lock);
+ /* Mutex for _OSI support */
+ status = acpi_os_create_mutex(&acpi_gbl_osi_mutex);
+ if (ACPI_FAILURE(status)) {
+ return_ACPI_STATUS(status);
+ }
+
/* Create the reader/writer lock for namespace access */
status = acpi_ut_create_rw_lock(&acpi_gbl_namespace_rw_lock);
@@ -117,6 +123,8 @@ void acpi_ut_mutex_terminate(void)
acpi_ut_delete_mutex(i);
}
+ acpi_os_delete_mutex(acpi_gbl_osi_mutex);
+
/* Delete the spinlocks */
acpi_os_delete_lock(acpi_gbl_gpe_lock);
@@ -220,18 +228,17 @@ acpi_status acpi_ut_acquire_mutex(acpi_mutex_handle mutex_id)
if (acpi_gbl_mutex_info[i].thread_id == this_thread_id) {
if (i == mutex_id) {
ACPI_ERROR((AE_INFO,
- "Mutex [%s] already acquired by this thread [%p]",
+ "Mutex [%s] already acquired by this thread [%u]",
acpi_ut_get_mutex_name
(mutex_id),
- ACPI_CAST_PTR(void,
- this_thread_id)));
+ (u32)this_thread_id));
return (AE_ALREADY_ACQUIRED);
}
ACPI_ERROR((AE_INFO,
- "Invalid acquire order: Thread %p owns [%s], wants [%s]",
- ACPI_CAST_PTR(void, this_thread_id),
+ "Invalid acquire order: Thread %u owns [%s], wants [%s]",
+ (u32)this_thread_id,
acpi_ut_get_mutex_name(i),
acpi_ut_get_mutex_name(mutex_id)));
@@ -242,24 +249,24 @@ acpi_status acpi_ut_acquire_mutex(acpi_mutex_handle mutex_id)
#endif
ACPI_DEBUG_PRINT((ACPI_DB_MUTEX,
- "Thread %p attempting to acquire Mutex [%s]\n",
- ACPI_CAST_PTR(void, this_thread_id),
+ "Thread %u attempting to acquire Mutex [%s]\n",
+ (u32)this_thread_id,
acpi_ut_get_mutex_name(mutex_id)));
status = acpi_os_acquire_mutex(acpi_gbl_mutex_info[mutex_id].mutex,
ACPI_WAIT_FOREVER);
if (ACPI_SUCCESS(status)) {
ACPI_DEBUG_PRINT((ACPI_DB_MUTEX,
- "Thread %p acquired Mutex [%s]\n",
- ACPI_CAST_PTR(void, this_thread_id),
+ "Thread %u acquired Mutex [%s]\n",
+ (u32)this_thread_id,
acpi_ut_get_mutex_name(mutex_id)));
acpi_gbl_mutex_info[mutex_id].use_count++;
acpi_gbl_mutex_info[mutex_id].thread_id = this_thread_id;
} else {
ACPI_EXCEPTION((AE_INFO, status,
- "Thread %p could not acquire Mutex [0x%X]",
- ACPI_CAST_PTR(void, this_thread_id), mutex_id));
+ "Thread %u could not acquire Mutex [0x%X]",
+ (u32)this_thread_id, mutex_id));
}
return (status);
@@ -279,10 +286,14 @@ acpi_status acpi_ut_acquire_mutex(acpi_mutex_handle mutex_id)
acpi_status acpi_ut_release_mutex(acpi_mutex_handle mutex_id)
{
+ acpi_thread_id this_thread_id;
+
ACPI_FUNCTION_NAME(ut_release_mutex);
- ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "Thread %p releasing Mutex [%s]\n",
- ACPI_CAST_PTR(void, acpi_os_get_thread_id()),
+ this_thread_id = acpi_os_get_thread_id();
+
+ ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "Thread %u releasing Mutex [%s]\n",
+ (u32)this_thread_id,
acpi_ut_get_mutex_name(mutex_id)));
if (mutex_id > ACPI_MAX_MUTEX) {
diff --git a/drivers/acpi/acpica/utosi.c b/drivers/acpi/acpica/utosi.c
new file mode 100644
index 000000000000..18c59a85fdca
--- /dev/null
+++ b/drivers/acpi/acpica/utosi.c
@@ -0,0 +1,380 @@
+/******************************************************************************
+ *
+ * Module Name: utosi - Support for the _OSI predefined control method
+ *
+ *****************************************************************************/
+
+/*
+ * Copyright (C) 2000 - 2010, Intel Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ * substantially similar to the "NO WARRANTY" disclaimer below
+ * ("Disclaimer") and any redistribution must be conditioned upon
+ * including a substantially similar Disclaimer requirement for further
+ * binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ * of any contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ */
+
+#include <acpi/acpi.h>
+#include "accommon.h"
+
+#define _COMPONENT ACPI_UTILITIES
+ACPI_MODULE_NAME("utosi")
+
+/*
+ * Strings supported by the _OSI predefined control method (which is
+ * implemented internally within this module.)
+ *
+ * March 2009: Removed "Linux" as this host no longer wants to respond true
+ * for this string. Basically, the only safe OS strings are windows-related
+ * and in many or most cases represent the only test path within the
+ * BIOS-provided ASL code.
+ *
+ * The last element of each entry is used to track the newest version of
+ * Windows that the BIOS has requested.
+ */
+static struct acpi_interface_info acpi_default_supported_interfaces[] = {
+ /* Operating System Vendor Strings */
+
+ {"Windows 2000", NULL, 0, ACPI_OSI_WIN_2000}, /* Windows 2000 */
+ {"Windows 2001", NULL, 0, ACPI_OSI_WIN_XP}, /* Windows XP */
+ {"Windows 2001 SP1", NULL, 0, ACPI_OSI_WIN_XP_SP1}, /* Windows XP SP1 */
+ {"Windows 2001.1", NULL, 0, ACPI_OSI_WINSRV_2003}, /* Windows Server 2003 */
+ {"Windows 2001 SP2", NULL, 0, ACPI_OSI_WIN_XP_SP2}, /* Windows XP SP2 */
+ {"Windows 2001.1 SP1", NULL, 0, ACPI_OSI_WINSRV_2003_SP1}, /* Windows Server 2003 SP1 - Added 03/2006 */
+ {"Windows 2006", NULL, 0, ACPI_OSI_WIN_VISTA}, /* Windows Vista - Added 03/2006 */
+ {"Windows 2006.1", NULL, 0, ACPI_OSI_WINSRV_2008}, /* Windows Server 2008 - Added 09/2009 */
+ {"Windows 2006 SP1", NULL, 0, ACPI_OSI_WIN_VISTA_SP1}, /* Windows Vista SP1 - Added 09/2009 */
+ {"Windows 2006 SP2", NULL, 0, ACPI_OSI_WIN_VISTA_SP2}, /* Windows Vista SP2 - Added 09/2010 */
+ {"Windows 2009", NULL, 0, ACPI_OSI_WIN_7}, /* Windows 7 and Server 2008 R2 - Added 09/2009 */
+
+ /* Feature Group Strings */
+
+ {"Extended Address Space Descriptor", NULL, 0, 0}
+
+ /*
+ * All "optional" feature group strings (features that are implemented
+ * by the host) should be dynamically added by the host via
+ * acpi_install_interface and should not be manually added here.
+ *
+ * Examples of optional feature group strings:
+ *
+ * "Module Device"
+ * "Processor Device"
+ * "3.0 Thermal Model"
+ * "3.0 _SCP Extensions"
+ * "Processor Aggregator Device"
+ */
+};
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ut_initialize_interfaces
+ *
+ * PARAMETERS: None
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Initialize the global _OSI supported interfaces list
+ *
+ ******************************************************************************/
+
+acpi_status acpi_ut_initialize_interfaces(void)
+{
+ u32 i;
+
+ (void)acpi_os_acquire_mutex(acpi_gbl_osi_mutex, ACPI_WAIT_FOREVER);
+ acpi_gbl_supported_interfaces = acpi_default_supported_interfaces;
+
+ /* Link the static list of supported interfaces */
+
+ for (i = 0;
+ i < (ACPI_ARRAY_LENGTH(acpi_default_supported_interfaces) - 1);
+ i++) {
+ acpi_default_supported_interfaces[i].next =
+ &acpi_default_supported_interfaces[(acpi_size) i + 1];
+ }
+
+ acpi_os_release_mutex(acpi_gbl_osi_mutex);
+ return (AE_OK);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ut_interface_terminate
+ *
+ * PARAMETERS: None
+ *
+ * RETURN: None
+ *
+ * DESCRIPTION: Delete all interfaces in the global list. Sets
+ * acpi_gbl_supported_interfaces to NULL.
+ *
+ ******************************************************************************/
+
+void acpi_ut_interface_terminate(void)
+{
+ struct acpi_interface_info *next_interface;
+
+ (void)acpi_os_acquire_mutex(acpi_gbl_osi_mutex, ACPI_WAIT_FOREVER);
+ next_interface = acpi_gbl_supported_interfaces;
+
+ while (next_interface) {
+ acpi_gbl_supported_interfaces = next_interface->next;
+
+ /* Only interfaces added at runtime can be freed */
+
+ if (next_interface->flags & ACPI_OSI_DYNAMIC) {
+ ACPI_FREE(next_interface->name);
+ ACPI_FREE(next_interface);
+ }
+
+ next_interface = acpi_gbl_supported_interfaces;
+ }
+
+ acpi_os_release_mutex(acpi_gbl_osi_mutex);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ut_install_interface
+ *
+ * PARAMETERS: interface_name - The interface to install
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Install the interface into the global interface list.
+ * Caller MUST hold acpi_gbl_osi_mutex
+ *
+ ******************************************************************************/
+
+acpi_status acpi_ut_install_interface(acpi_string interface_name)
+{
+ struct acpi_interface_info *interface_info;
+
+ /* Allocate info block and space for the name string */
+
+ interface_info =
+ ACPI_ALLOCATE_ZEROED(sizeof(struct acpi_interface_info));
+ if (!interface_info) {
+ return (AE_NO_MEMORY);
+ }
+
+ interface_info->name =
+ ACPI_ALLOCATE_ZEROED(ACPI_STRLEN(interface_name) + 1);
+ if (!interface_info->name) {
+ ACPI_FREE(interface_info);
+ return (AE_NO_MEMORY);
+ }
+
+ /* Initialize new info and insert at the head of the global list */
+
+ ACPI_STRCPY(interface_info->name, interface_name);
+ interface_info->flags = ACPI_OSI_DYNAMIC;
+ interface_info->next = acpi_gbl_supported_interfaces;
+
+ acpi_gbl_supported_interfaces = interface_info;
+ return (AE_OK);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ut_remove_interface
+ *
+ * PARAMETERS: interface_name - The interface to remove
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Remove the interface from the global interface list.
+ * Caller MUST hold acpi_gbl_osi_mutex
+ *
+ ******************************************************************************/
+
+acpi_status acpi_ut_remove_interface(acpi_string interface_name)
+{
+ struct acpi_interface_info *previous_interface;
+ struct acpi_interface_info *next_interface;
+
+ previous_interface = next_interface = acpi_gbl_supported_interfaces;
+ while (next_interface) {
+ if (!ACPI_STRCMP(interface_name, next_interface->name)) {
+
+ /* Found: name is in either the static list or was added at runtime */
+
+ if (next_interface->flags & ACPI_OSI_DYNAMIC) {
+
+ /* Interface was added dynamically, remove and free it */
+
+ if (previous_interface == next_interface) {
+ acpi_gbl_supported_interfaces =
+ next_interface->next;
+ } else {
+ previous_interface->next =
+ next_interface->next;
+ }
+
+ ACPI_FREE(next_interface->name);
+ ACPI_FREE(next_interface);
+ } else {
+ /*
+ * Interface is in static list. If marked invalid, then it
+ * does not actually exist. Else, mark it invalid.
+ */
+ if (next_interface->flags & ACPI_OSI_INVALID) {
+ return (AE_NOT_EXIST);
+ }
+
+ next_interface->flags |= ACPI_OSI_INVALID;
+ }
+
+ return (AE_OK);
+ }
+
+ previous_interface = next_interface;
+ next_interface = next_interface->next;
+ }
+
+ /* Interface was not found */
+
+ return (AE_NOT_EXIST);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ut_get_interface
+ *
+ * PARAMETERS: interface_name - The interface to find
+ *
+ * RETURN: struct acpi_interface_info if found. NULL if not found.
+ *
+ * DESCRIPTION: Search for the specified interface name in the global list.
+ * Caller MUST hold acpi_gbl_osi_mutex
+ *
+ ******************************************************************************/
+
+struct acpi_interface_info *acpi_ut_get_interface(acpi_string interface_name)
+{
+ struct acpi_interface_info *next_interface;
+
+ next_interface = acpi_gbl_supported_interfaces;
+ while (next_interface) {
+ if (!ACPI_STRCMP(interface_name, next_interface->name)) {
+ return (next_interface);
+ }
+
+ next_interface = next_interface->next;
+ }
+
+ return (NULL);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ut_osi_implementation
+ *
+ * PARAMETERS: walk_state - Current walk state
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Implementation of the _OSI predefined control method. When
+ * an invocation of _OSI is encountered in the system AML,
+ * control is transferred to this function.
+ *
+ ******************************************************************************/
+
+acpi_status acpi_ut_osi_implementation(struct acpi_walk_state * walk_state)
+{
+ union acpi_operand_object *string_desc;
+ union acpi_operand_object *return_desc;
+ struct acpi_interface_info *interface_info;
+ acpi_interface_handler interface_handler;
+ u32 return_value;
+
+ ACPI_FUNCTION_TRACE(ut_osi_implementation);
+
+ /* Validate the string input argument (from the AML caller) */
+
+ string_desc = walk_state->arguments[0].object;
+ if (!string_desc || (string_desc->common.type != ACPI_TYPE_STRING)) {
+ return_ACPI_STATUS(AE_TYPE);
+ }
+
+ /* Create a return object */
+
+ return_desc = acpi_ut_create_internal_object(ACPI_TYPE_INTEGER);
+ if (!return_desc) {
+ return_ACPI_STATUS(AE_NO_MEMORY);
+ }
+
+ /* Default return value is 0, NOT SUPPORTED */
+
+ return_value = 0;
+ (void)acpi_os_acquire_mutex(acpi_gbl_osi_mutex, ACPI_WAIT_FOREVER);
+
+ /* Lookup the interface in the global _OSI list */
+
+ interface_info = acpi_ut_get_interface(string_desc->string.pointer);
+ if (interface_info && !(interface_info->flags & ACPI_OSI_INVALID)) {
+ /*
+ * The interface is supported.
+ * Update the osi_data if necessary. We keep track of the latest
+ * version of Windows that has been requested by the BIOS.
+ */
+ if (interface_info->value > acpi_gbl_osi_data) {
+ acpi_gbl_osi_data = interface_info->value;
+ }
+
+ return_value = ACPI_UINT32_MAX;
+ }
+
+ acpi_os_release_mutex(acpi_gbl_osi_mutex);
+
+ /*
+ * Invoke an optional _OSI interface handler. The host OS may wish
+ * to do some interface-specific handling. For example, warn about
+ * certain interfaces or override the true/false support value.
+ */
+ interface_handler = acpi_gbl_interface_handler;
+ if (interface_handler) {
+ return_value =
+ interface_handler(string_desc->string.pointer,
+ return_value);
+ }
+
+ ACPI_DEBUG_PRINT_RAW((ACPI_DB_INFO,
+ "ACPI: BIOS _OSI(\"%s\") is %ssupported\n",
+ string_desc->string.pointer,
+ return_value == 0 ? "not " : ""));
+
+ /* Complete the return object */
+
+ return_desc->integer.value = return_value;
+ walk_state->return_desc = return_desc;
+ return_ACPI_STATUS(AE_OK);
+}
diff --git a/drivers/acpi/acpica/utxface.c b/drivers/acpi/acpica/utxface.c
index 7f8cefcb2b32..1f484c9a6888 100644
--- a/drivers/acpi/acpica/utxface.c
+++ b/drivers/acpi/acpica/utxface.c
@@ -110,6 +110,15 @@ acpi_status __init acpi_initialize_subsystem(void)
return_ACPI_STATUS(status);
}
+ /* Initialize the global OSI interfaces list with the static names */
+
+ status = acpi_ut_initialize_interfaces();
+ if (ACPI_FAILURE(status)) {
+ ACPI_EXCEPTION((AE_INFO, status,
+ "During OSI interfaces initialization"));
+ return_ACPI_STATUS(status);
+ }
+
/* If configured, initialize the AML debugger */
ACPI_DEBUGGER_EXEC(status = acpi_db_initialize());
@@ -290,19 +299,6 @@ acpi_status acpi_initialize_objects(u32 flags)
}
/*
- * Complete the GPE initialization for the GPE blocks defined in the FADT
- * (GPE block 0 and 1).
- *
- * NOTE: Currently, there seems to be no need to run the _REG methods
- * before enabling the GPEs.
- */
- if (!(flags & ACPI_NO_EVENT_INIT)) {
- status = acpi_ev_install_fadt_gpes();
- if (ACPI_FAILURE(status))
- return (status);
- }
-
- /*
* Empty the caches (delete the cached objects) on the assumption that
* the table load filled them up more than they will be at runtime --
* thus wasting non-paged memory.
@@ -506,6 +502,7 @@ acpi_install_initialization_handler(acpi_init_handler handler, u32 function)
ACPI_EXPORT_SYMBOL(acpi_install_initialization_handler)
#endif /* ACPI_FUTURE_USAGE */
+
/*****************************************************************************
*
* FUNCTION: acpi_purge_cached_objects
@@ -529,4 +526,117 @@ acpi_status acpi_purge_cached_objects(void)
}
ACPI_EXPORT_SYMBOL(acpi_purge_cached_objects)
-#endif
+
+/*****************************************************************************
+ *
+ * FUNCTION: acpi_install_interface
+ *
+ * PARAMETERS: interface_name - The interface to install
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Install an _OSI interface to the global list
+ *
+ ****************************************************************************/
+acpi_status acpi_install_interface(acpi_string interface_name)
+{
+ acpi_status status;
+ struct acpi_interface_info *interface_info;
+
+ /* Parameter validation */
+
+ if (!interface_name || (ACPI_STRLEN(interface_name) == 0)) {
+ return (AE_BAD_PARAMETER);
+ }
+
+ (void)acpi_os_acquire_mutex(acpi_gbl_osi_mutex, ACPI_WAIT_FOREVER);
+
+ /* Check if the interface name is already in the global list */
+
+ interface_info = acpi_ut_get_interface(interface_name);
+ if (interface_info) {
+ /*
+ * The interface already exists in the list. This is OK if the
+ * interface has been marked invalid -- just clear the bit.
+ */
+ if (interface_info->flags & ACPI_OSI_INVALID) {
+ interface_info->flags &= ~ACPI_OSI_INVALID;
+ status = AE_OK;
+ } else {
+ status = AE_ALREADY_EXISTS;
+ }
+ } else {
+ /* New interface name, install into the global list */
+
+ status = acpi_ut_install_interface(interface_name);
+ }
+
+ acpi_os_release_mutex(acpi_gbl_osi_mutex);
+ return (status);
+}
+
+ACPI_EXPORT_SYMBOL(acpi_install_interface)
+
+/*****************************************************************************
+ *
+ * FUNCTION: acpi_remove_interface
+ *
+ * PARAMETERS: interface_name - The interface to remove
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Remove an _OSI interface from the global list
+ *
+ ****************************************************************************/
+acpi_status acpi_remove_interface(acpi_string interface_name)
+{
+ acpi_status status;
+
+ /* Parameter validation */
+
+ if (!interface_name || (ACPI_STRLEN(interface_name) == 0)) {
+ return (AE_BAD_PARAMETER);
+ }
+
+ (void)acpi_os_acquire_mutex(acpi_gbl_osi_mutex, ACPI_WAIT_FOREVER);
+
+ status = acpi_ut_remove_interface(interface_name);
+
+ acpi_os_release_mutex(acpi_gbl_osi_mutex);
+ return (status);
+}
+
+ACPI_EXPORT_SYMBOL(acpi_remove_interface)
+
+/*****************************************************************************
+ *
+ * FUNCTION: acpi_install_interface_handler
+ *
+ * PARAMETERS: Handler - The _OSI interface handler to install
+ * NULL means "remove existing handler"
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Install a handler for the predefined _OSI ACPI method.
+ * invoked during execution of the internal implementation of
+ * _OSI. A NULL handler simply removes any existing handler.
+ *
+ ****************************************************************************/
+acpi_status acpi_install_interface_handler(acpi_interface_handler handler)
+{
+ acpi_status status = AE_OK;
+
+ (void)acpi_os_acquire_mutex(acpi_gbl_osi_mutex, ACPI_WAIT_FOREVER);
+
+ if (handler && acpi_gbl_interface_handler) {
+ status = AE_ALREADY_EXISTS;
+ } else {
+ acpi_gbl_interface_handler = handler;
+ }
+
+ acpi_os_release_mutex(acpi_gbl_osi_mutex);
+ return (status);
+}
+
+ACPI_EXPORT_SYMBOL(acpi_install_interface_handler)
+#endif /* !ACPI_ASL_COMPILER */
diff --git a/drivers/acpi/acpica/utxferror.c b/drivers/acpi/acpica/utxferror.c
new file mode 100644
index 000000000000..6f12e314fbae
--- /dev/null
+++ b/drivers/acpi/acpica/utxferror.c
@@ -0,0 +1,415 @@
+/*******************************************************************************
+ *
+ * Module Name: utxferror - Various error/warning output functions
+ *
+ ******************************************************************************/
+
+/*
+ * Copyright (C) 2000 - 2010, Intel Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ * substantially similar to the "NO WARRANTY" disclaimer below
+ * ("Disclaimer") and any redistribution must be conditioned upon
+ * including a substantially similar Disclaimer requirement for further
+ * binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ * of any contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ */
+
+#include <acpi/acpi.h>
+#include "accommon.h"
+#include "acnamesp.h"
+
+#define _COMPONENT ACPI_UTILITIES
+ACPI_MODULE_NAME("utxferror")
+
+/*
+ * This module is used for the in-kernel ACPICA as well as the ACPICA
+ * tools/applications.
+ *
+ * For the i_aSL compiler case, the output is redirected to stderr so that
+ * any of the various ACPI errors and warnings do not appear in the output
+ * files, for either the compiler or disassembler portions of the tool.
+ */
+#ifdef ACPI_ASL_COMPILER
+#include <stdio.h>
+extern FILE *acpi_gbl_output_file;
+
+#define ACPI_MSG_REDIRECT_BEGIN \
+ FILE *output_file = acpi_gbl_output_file; \
+ acpi_os_redirect_output (stderr);
+
+#define ACPI_MSG_REDIRECT_END \
+ acpi_os_redirect_output (output_file);
+
+#else
+/*
+ * non-i_aSL case - no redirection, nothing to do
+ */
+#define ACPI_MSG_REDIRECT_BEGIN
+#define ACPI_MSG_REDIRECT_END
+#endif
+/*
+ * Common message prefixes
+ */
+#define ACPI_MSG_ERROR "ACPI Error: "
+#define ACPI_MSG_EXCEPTION "ACPI Exception: "
+#define ACPI_MSG_WARNING "ACPI Warning: "
+#define ACPI_MSG_INFO "ACPI: "
+/*
+ * Common message suffix
+ */
+#define ACPI_MSG_SUFFIX \
+ acpi_os_printf (" (%8.8X/%s-%u)\n", ACPI_CA_VERSION, module_name, line_number)
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_error
+ *
+ * PARAMETERS: module_name - Caller's module name (for error output)
+ * line_number - Caller's line number (for error output)
+ * Format - Printf format string + additional args
+ *
+ * RETURN: None
+ *
+ * DESCRIPTION: Print "ACPI Error" message with module/line/version info
+ *
+ ******************************************************************************/
+void ACPI_INTERNAL_VAR_XFACE
+acpi_error(const char *module_name, u32 line_number, const char *format, ...)
+{
+ va_list arg_list;
+
+ ACPI_MSG_REDIRECT_BEGIN;
+ acpi_os_printf(ACPI_MSG_ERROR);
+
+ va_start(arg_list, format);
+ acpi_os_vprintf(format, arg_list);
+ ACPI_MSG_SUFFIX;
+ va_end(arg_list);
+
+ ACPI_MSG_REDIRECT_END;
+}
+
+ACPI_EXPORT_SYMBOL(acpi_error)
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_exception
+ *
+ * PARAMETERS: module_name - Caller's module name (for error output)
+ * line_number - Caller's line number (for error output)
+ * Status - Status to be formatted
+ * Format - Printf format string + additional args
+ *
+ * RETURN: None
+ *
+ * DESCRIPTION: Print "ACPI Exception" message with module/line/version info
+ * and decoded acpi_status.
+ *
+ ******************************************************************************/
+void ACPI_INTERNAL_VAR_XFACE
+acpi_exception(const char *module_name,
+ u32 line_number, acpi_status status, const char *format, ...)
+{
+ va_list arg_list;
+
+ ACPI_MSG_REDIRECT_BEGIN;
+ acpi_os_printf(ACPI_MSG_EXCEPTION "%s, ",
+ acpi_format_exception(status));
+
+ va_start(arg_list, format);
+ acpi_os_vprintf(format, arg_list);
+ ACPI_MSG_SUFFIX;
+ va_end(arg_list);
+
+ ACPI_MSG_REDIRECT_END;
+}
+
+ACPI_EXPORT_SYMBOL(acpi_exception)
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_warning
+ *
+ * PARAMETERS: module_name - Caller's module name (for error output)
+ * line_number - Caller's line number (for error output)
+ * Format - Printf format string + additional args
+ *
+ * RETURN: None
+ *
+ * DESCRIPTION: Print "ACPI Warning" message with module/line/version info
+ *
+ ******************************************************************************/
+void ACPI_INTERNAL_VAR_XFACE
+acpi_warning(const char *module_name, u32 line_number, const char *format, ...)
+{
+ va_list arg_list;
+
+ ACPI_MSG_REDIRECT_BEGIN;
+ acpi_os_printf(ACPI_MSG_WARNING);
+
+ va_start(arg_list, format);
+ acpi_os_vprintf(format, arg_list);
+ ACPI_MSG_SUFFIX;
+ va_end(arg_list);
+
+ ACPI_MSG_REDIRECT_END;
+}
+
+ACPI_EXPORT_SYMBOL(acpi_warning)
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_info
+ *
+ * PARAMETERS: module_name - Caller's module name (for error output)
+ * line_number - Caller's line number (for error output)
+ * Format - Printf format string + additional args
+ *
+ * RETURN: None
+ *
+ * DESCRIPTION: Print generic "ACPI:" information message. There is no
+ * module/line/version info in order to keep the message simple.
+ *
+ * TBD: module_name and line_number args are not needed, should be removed.
+ *
+ ******************************************************************************/
+void ACPI_INTERNAL_VAR_XFACE
+acpi_info(const char *module_name, u32 line_number, const char *format, ...)
+{
+ va_list arg_list;
+
+ ACPI_MSG_REDIRECT_BEGIN;
+ acpi_os_printf(ACPI_MSG_INFO);
+
+ va_start(arg_list, format);
+ acpi_os_vprintf(format, arg_list);
+ acpi_os_printf("\n");
+ va_end(arg_list);
+
+ ACPI_MSG_REDIRECT_END;
+}
+
+ACPI_EXPORT_SYMBOL(acpi_info)
+
+/*
+ * The remainder of this module contains internal error functions that may
+ * be configured out.
+ */
+#if !defined (ACPI_NO_ERROR_MESSAGES) && !defined (ACPI_BIN_APP)
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ut_predefined_warning
+ *
+ * PARAMETERS: module_name - Caller's module name (for error output)
+ * line_number - Caller's line number (for error output)
+ * Pathname - Full pathname to the node
+ * node_flags - From Namespace node for the method/object
+ * Format - Printf format string + additional args
+ *
+ * RETURN: None
+ *
+ * DESCRIPTION: Warnings for the predefined validation module. Messages are
+ * only emitted the first time a problem with a particular
+ * method/object is detected. This prevents a flood of error
+ * messages for methods that are repeatedly evaluated.
+ *
+ ******************************************************************************/
+void ACPI_INTERNAL_VAR_XFACE
+acpi_ut_predefined_warning(const char *module_name,
+ u32 line_number,
+ char *pathname,
+ u8 node_flags, const char *format, ...)
+{
+ va_list arg_list;
+
+ /*
+ * Warning messages for this method/object will be disabled after the
+ * first time a validation fails or an object is successfully repaired.
+ */
+ if (node_flags & ANOBJ_EVALUATED) {
+ return;
+ }
+
+ acpi_os_printf(ACPI_MSG_WARNING "For %s: ", pathname);
+
+ va_start(arg_list, format);
+ acpi_os_vprintf(format, arg_list);
+ ACPI_MSG_SUFFIX;
+ va_end(arg_list);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ut_predefined_info
+ *
+ * PARAMETERS: module_name - Caller's module name (for error output)
+ * line_number - Caller's line number (for error output)
+ * Pathname - Full pathname to the node
+ * node_flags - From Namespace node for the method/object
+ * Format - Printf format string + additional args
+ *
+ * RETURN: None
+ *
+ * DESCRIPTION: Info messages for the predefined validation module. Messages
+ * are only emitted the first time a problem with a particular
+ * method/object is detected. This prevents a flood of
+ * messages for methods that are repeatedly evaluated.
+ *
+ ******************************************************************************/
+
+void ACPI_INTERNAL_VAR_XFACE
+acpi_ut_predefined_info(const char *module_name,
+ u32 line_number,
+ char *pathname, u8 node_flags, const char *format, ...)
+{
+ va_list arg_list;
+
+ /*
+ * Warning messages for this method/object will be disabled after the
+ * first time a validation fails or an object is successfully repaired.
+ */
+ if (node_flags & ANOBJ_EVALUATED) {
+ return;
+ }
+
+ acpi_os_printf(ACPI_MSG_INFO "For %s: ", pathname);
+
+ va_start(arg_list, format);
+ acpi_os_vprintf(format, arg_list);
+ ACPI_MSG_SUFFIX;
+ va_end(arg_list);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ut_namespace_error
+ *
+ * PARAMETERS: module_name - Caller's module name (for error output)
+ * line_number - Caller's line number (for error output)
+ * internal_name - Name or path of the namespace node
+ * lookup_status - Exception code from NS lookup
+ *
+ * RETURN: None
+ *
+ * DESCRIPTION: Print error message with the full pathname for the NS node.
+ *
+ ******************************************************************************/
+
+void
+acpi_ut_namespace_error(const char *module_name,
+ u32 line_number,
+ const char *internal_name, acpi_status lookup_status)
+{
+ acpi_status status;
+ u32 bad_name;
+ char *name = NULL;
+
+ ACPI_MSG_REDIRECT_BEGIN;
+ acpi_os_printf(ACPI_MSG_ERROR);
+
+ if (lookup_status == AE_BAD_CHARACTER) {
+
+ /* There is a non-ascii character in the name */
+
+ ACPI_MOVE_32_TO_32(&bad_name,
+ ACPI_CAST_PTR(u32, internal_name));
+ acpi_os_printf("[0x%4.4X] (NON-ASCII)", bad_name);
+ } else {
+ /* Convert path to external format */
+
+ status = acpi_ns_externalize_name(ACPI_UINT32_MAX,
+ internal_name, NULL, &name);
+
+ /* Print target name */
+
+ if (ACPI_SUCCESS(status)) {
+ acpi_os_printf("[%s]", name);
+ } else {
+ acpi_os_printf("[COULD NOT EXTERNALIZE NAME]");
+ }
+
+ if (name) {
+ ACPI_FREE(name);
+ }
+ }
+
+ acpi_os_printf(" Namespace lookup failure, %s",
+ acpi_format_exception(lookup_status));
+
+ ACPI_MSG_SUFFIX;
+ ACPI_MSG_REDIRECT_END;
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ut_method_error
+ *
+ * PARAMETERS: module_name - Caller's module name (for error output)
+ * line_number - Caller's line number (for error output)
+ * Message - Error message to use on failure
+ * prefix_node - Prefix relative to the path
+ * Path - Path to the node (optional)
+ * method_status - Execution status
+ *
+ * RETURN: None
+ *
+ * DESCRIPTION: Print error message with the full pathname for the method.
+ *
+ ******************************************************************************/
+
+void
+acpi_ut_method_error(const char *module_name,
+ u32 line_number,
+ const char *message,
+ struct acpi_namespace_node *prefix_node,
+ const char *path, acpi_status method_status)
+{
+ acpi_status status;
+ struct acpi_namespace_node *node = prefix_node;
+
+ ACPI_MSG_REDIRECT_BEGIN;
+ acpi_os_printf(ACPI_MSG_ERROR);
+
+ if (path) {
+ status =
+ acpi_ns_get_node(prefix_node, path, ACPI_NS_NO_UPSEARCH,
+ &node);
+ if (ACPI_FAILURE(status)) {
+ acpi_os_printf("[Could not get node by pathname]");
+ }
+ }
+
+ acpi_ns_print_node_pathname(node, message);
+ acpi_os_printf(", %s", acpi_format_exception(method_status));
+
+ ACPI_MSG_SUFFIX;
+ ACPI_MSG_REDIRECT_END;
+}
+
+#endif /* ACPI_NO_ERROR_MESSAGES */
diff --git a/drivers/acpi/battery.c b/drivers/acpi/battery.c
index 98417201e9ce..95649d373071 100644
--- a/drivers/acpi/battery.c
+++ b/drivers/acpi/battery.c
@@ -42,10 +42,7 @@
#include <acpi/acpi_bus.h>
#include <acpi/acpi_drivers.h>
-
-#ifdef CONFIG_ACPI_SYSFS_POWER
#include <linux/power_supply.h>
-#endif
#define PREFIX "ACPI: "
@@ -98,13 +95,12 @@ enum {
* due to bad math.
*/
ACPI_BATTERY_QUIRK_SIGNED16_CURRENT,
+ ACPI_BATTERY_QUIRK_PERCENTAGE_CAPACITY,
};
struct acpi_battery {
struct mutex lock;
-#ifdef CONFIG_ACPI_SYSFS_POWER
struct power_supply bat;
-#endif
struct acpi_device *device;
unsigned long update_time;
int rate_now;
@@ -141,7 +137,6 @@ inline int acpi_battery_present(struct acpi_battery *battery)
return battery->device->status.battery_present;
}
-#ifdef CONFIG_ACPI_SYSFS_POWER
static int acpi_battery_technology(struct acpi_battery *battery)
{
if (!strcasecmp("NiCd", battery->type))
@@ -186,6 +181,7 @@ static int acpi_battery_get_property(struct power_supply *psy,
enum power_supply_property psp,
union power_supply_propval *val)
{
+ int ret = 0;
struct acpi_battery *battery = to_acpi_battery(psy);
if (acpi_battery_present(battery)) {
@@ -214,26 +210,44 @@ static int acpi_battery_get_property(struct power_supply *psy,
val->intval = battery->cycle_count;
break;
case POWER_SUPPLY_PROP_VOLTAGE_MIN_DESIGN:
- val->intval = battery->design_voltage * 1000;
+ if (battery->design_voltage == ACPI_BATTERY_VALUE_UNKNOWN)
+ ret = -ENODEV;
+ else
+ val->intval = battery->design_voltage * 1000;
break;
case POWER_SUPPLY_PROP_VOLTAGE_NOW:
- val->intval = battery->voltage_now * 1000;
+ if (battery->voltage_now == ACPI_BATTERY_VALUE_UNKNOWN)
+ ret = -ENODEV;
+ else
+ val->intval = battery->voltage_now * 1000;
break;
case POWER_SUPPLY_PROP_CURRENT_NOW:
case POWER_SUPPLY_PROP_POWER_NOW:
- val->intval = battery->rate_now * 1000;
+ if (battery->rate_now == ACPI_BATTERY_VALUE_UNKNOWN)
+ ret = -ENODEV;
+ else
+ val->intval = battery->rate_now * 1000;
break;
case POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN:
case POWER_SUPPLY_PROP_ENERGY_FULL_DESIGN:
- val->intval = battery->design_capacity * 1000;
+ if (battery->design_capacity == ACPI_BATTERY_VALUE_UNKNOWN)
+ ret = -ENODEV;
+ else
+ val->intval = battery->design_capacity * 1000;
break;
case POWER_SUPPLY_PROP_CHARGE_FULL:
case POWER_SUPPLY_PROP_ENERGY_FULL:
- val->intval = battery->full_charge_capacity * 1000;
+ if (battery->full_charge_capacity == ACPI_BATTERY_VALUE_UNKNOWN)
+ ret = -ENODEV;
+ else
+ val->intval = battery->full_charge_capacity * 1000;
break;
case POWER_SUPPLY_PROP_CHARGE_NOW:
case POWER_SUPPLY_PROP_ENERGY_NOW:
- val->intval = battery->capacity_now * 1000;
+ if (battery->capacity_now == ACPI_BATTERY_VALUE_UNKNOWN)
+ ret = -ENODEV;
+ else
+ val->intval = battery->capacity_now * 1000;
break;
case POWER_SUPPLY_PROP_MODEL_NAME:
val->strval = battery->model_number;
@@ -245,9 +259,9 @@ static int acpi_battery_get_property(struct power_supply *psy,
val->strval = battery->serial_number;
break;
default:
- return -EINVAL;
+ ret = -EINVAL;
}
- return 0;
+ return ret;
}
static enum power_supply_property charge_battery_props[] = {
@@ -281,7 +295,6 @@ static enum power_supply_property energy_battery_props[] = {
POWER_SUPPLY_PROP_MANUFACTURER,
POWER_SUPPLY_PROP_SERIAL_NUMBER,
};
-#endif
#ifdef CONFIG_ACPI_PROCFS_POWER
inline char *acpi_battery_units(struct acpi_battery *battery)
@@ -412,6 +425,8 @@ static int acpi_battery_get_info(struct acpi_battery *battery)
result = extract_package(battery, buffer.pointer,
info_offsets, ARRAY_SIZE(info_offsets));
kfree(buffer.pointer);
+ if (test_bit(ACPI_BATTERY_QUIRK_PERCENTAGE_CAPACITY, &battery->flags))
+ battery->full_charge_capacity = battery->design_capacity;
return result;
}
@@ -448,6 +463,10 @@ static int acpi_battery_get_state(struct acpi_battery *battery)
battery->rate_now != -1)
battery->rate_now = abs((s16)battery->rate_now);
+ if (test_bit(ACPI_BATTERY_QUIRK_PERCENTAGE_CAPACITY, &battery->flags)
+ && battery->capacity_now >= 0 && battery->capacity_now <= 100)
+ battery->capacity_now = (battery->capacity_now *
+ battery->full_charge_capacity) / 100;
return result;
}
@@ -492,7 +511,6 @@ static int acpi_battery_init_alarm(struct acpi_battery *battery)
return acpi_battery_set_alarm(battery);
}
-#ifdef CONFIG_ACPI_SYSFS_POWER
static ssize_t acpi_battery_alarm_show(struct device *dev,
struct device_attribute *attr,
char *buf)
@@ -552,7 +570,6 @@ static void sysfs_remove_battery(struct acpi_battery *battery)
power_supply_unregister(&battery->bat);
battery->bat.dev = NULL;
}
-#endif
static void acpi_battery_quirks(struct acpi_battery *battery)
{
@@ -561,6 +578,33 @@ static void acpi_battery_quirks(struct acpi_battery *battery)
}
}
+/*
+ * According to the ACPI spec, some kinds of primary batteries can
+ * report percentage battery remaining capacity directly to OS.
+ * In this case, it reports the Last Full Charged Capacity == 100
+ * and BatteryPresentRate == 0xFFFFFFFF.
+ *
+ * Now we found some battery reports percentage remaining capacity
+ * even if it's rechargeable.
+ * https://bugzilla.kernel.org/show_bug.cgi?id=15979
+ *
+ * Handle this correctly so that they won't break userspace.
+ */
+static void acpi_battery_quirks2(struct acpi_battery *battery)
+{
+ if (test_bit(ACPI_BATTERY_QUIRK_PERCENTAGE_CAPACITY, &battery->flags))
+ return ;
+
+ if (battery->full_charge_capacity == 100 &&
+ battery->rate_now == ACPI_BATTERY_VALUE_UNKNOWN &&
+ battery->capacity_now >=0 && battery->capacity_now <= 100) {
+ set_bit(ACPI_BATTERY_QUIRK_PERCENTAGE_CAPACITY, &battery->flags);
+ battery->full_charge_capacity = battery->design_capacity;
+ battery->capacity_now = (battery->capacity_now *
+ battery->full_charge_capacity) / 100;
+ }
+}
+
static int acpi_battery_update(struct acpi_battery *battery)
{
int result, old_present = acpi_battery_present(battery);
@@ -568,9 +612,7 @@ static int acpi_battery_update(struct acpi_battery *battery)
if (result)
return result;
if (!acpi_battery_present(battery)) {
-#ifdef CONFIG_ACPI_SYSFS_POWER
sysfs_remove_battery(battery);
-#endif
battery->update_time = 0;
return 0;
}
@@ -582,11 +624,11 @@ static int acpi_battery_update(struct acpi_battery *battery)
acpi_battery_quirks(battery);
acpi_battery_init_alarm(battery);
}
-#ifdef CONFIG_ACPI_SYSFS_POWER
if (!battery->bat.dev)
sysfs_add_battery(battery);
-#endif
- return acpi_battery_get_state(battery);
+ result = acpi_battery_get_state(battery);
+ acpi_battery_quirks2(battery);
+ return result;
}
/* --------------------------------------------------------------------------
@@ -867,26 +909,20 @@ static void acpi_battery_remove_fs(struct acpi_device *device)
static void acpi_battery_notify(struct acpi_device *device, u32 event)
{
struct acpi_battery *battery = acpi_driver_data(device);
-#ifdef CONFIG_ACPI_SYSFS_POWER
struct device *old;
-#endif
if (!battery)
return;
-#ifdef CONFIG_ACPI_SYSFS_POWER
old = battery->bat.dev;
-#endif
acpi_battery_update(battery);
acpi_bus_generate_proc_event(device, event,
acpi_battery_present(battery));
acpi_bus_generate_netlink_event(device->pnp.device_class,
dev_name(&device->dev), event,
acpi_battery_present(battery));
-#ifdef CONFIG_ACPI_SYSFS_POWER
/* acpi_battery_update could remove power_supply object */
if (old && battery->bat.dev)
power_supply_changed(&battery->bat);
-#endif
}
static int acpi_battery_add(struct acpi_device *device)
@@ -934,9 +970,7 @@ static int acpi_battery_remove(struct acpi_device *device, int type)
#ifdef CONFIG_ACPI_PROCFS_POWER
acpi_battery_remove_fs(device);
#endif
-#ifdef CONFIG_ACPI_SYSFS_POWER
sysfs_remove_battery(battery);
-#endif
mutex_destroy(&battery->lock);
kfree(battery);
return 0;
diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c
index 310e3b9749cb..d68bd61072bb 100644
--- a/drivers/acpi/bus.c
+++ b/drivers/acpi/bus.c
@@ -935,6 +935,12 @@ static int __init acpi_bus_init(void)
goto error1;
}
+ /*
+ * _PDC control method may load dynamic SSDT tables,
+ * and we need to install the table handler before that.
+ */
+ acpi_sysfs_init();
+
acpi_early_processor_set_pdc();
/*
@@ -1026,7 +1032,6 @@ static int __init acpi_init(void)
acpi_scan_init();
acpi_ec_init();
acpi_power_init();
- acpi_sysfs_init();
acpi_debugfs_init();
acpi_sleep_proc_init();
acpi_wakeup_device_init();
diff --git a/drivers/acpi/button.c b/drivers/acpi/button.c
index 1575a9b51f1d..71ef9cd0735f 100644
--- a/drivers/acpi/button.c
+++ b/drivers/acpi/button.c
@@ -338,7 +338,8 @@ static int acpi_button_add(struct acpi_device *device)
{
struct acpi_button *button;
struct input_dev *input;
- char *hid, *name, *class;
+ const char *hid = acpi_device_hid(device);
+ char *name, *class;
int error;
button = kzalloc(sizeof(struct acpi_button), GFP_KERNEL);
@@ -353,7 +354,6 @@ static int acpi_button_add(struct acpi_device *device)
goto err_free_button;
}
- hid = acpi_device_hid(device);
name = acpi_device_name(device);
class = acpi_device_class(device);
diff --git a/drivers/acpi/debugfs.c b/drivers/acpi/debugfs.c
index 6355b575ee5a..5df67f1d6c61 100644
--- a/drivers/acpi/debugfs.c
+++ b/drivers/acpi/debugfs.c
@@ -80,7 +80,7 @@ int __init acpi_debugfs_init(void)
if (!acpi_dir)
goto err;
- cm_dentry = debugfs_create_file("custom_method", S_IWUGO,
+ cm_dentry = debugfs_create_file("custom_method", S_IWUSR,
acpi_dir, NULL, &cm_fops);
if (!cm_dentry)
goto err;
diff --git a/drivers/acpi/dock.c b/drivers/acpi/dock.c
index 3fe29e992be8..81514a4918cc 100644
--- a/drivers/acpi/dock.c
+++ b/drivers/acpi/dock.c
@@ -725,6 +725,7 @@ static void dock_notify(acpi_handle handle, u32 event, void *data)
complete_dock(ds);
dock_event(ds, event, DOCK_EVENT);
dock_lock(ds, 1);
+ acpi_update_gpes();
break;
}
if (dock_present(ds) || dock_in_progress(ds))
@@ -929,7 +930,7 @@ static struct attribute_group dock_attribute_group = {
* allocated and initialize a new dock station device. Find all devices
* that are on the dock station, and register for dock event notifications.
*/
-static int dock_add(acpi_handle handle)
+static int __init dock_add(acpi_handle handle)
{
int ret, id;
struct dock_station ds, *dock_station;
@@ -1023,7 +1024,7 @@ static int dock_remove(struct dock_station *ds)
*
* This is called by acpi_walk_namespace to look for dock stations.
*/
-static acpi_status
+static __init acpi_status
find_dock(acpi_handle handle, u32 lvl, void *context, void **rv)
{
if (is_dock(handle))
@@ -1032,7 +1033,7 @@ find_dock(acpi_handle handle, u32 lvl, void *context, void **rv)
return AE_OK;
}
-static acpi_status
+static __init acpi_status
find_bay(acpi_handle handle, u32 lvl, void *context, void **rv)
{
/* If bay is a dock, it's already handled */
diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c
index f31291ba94d0..372ff80b7b0c 100644
--- a/drivers/acpi/ec.c
+++ b/drivers/acpi/ec.c
@@ -83,6 +83,11 @@ enum {
EC_FLAGS_BLOCKED, /* Transactions are blocked */
};
+/* ec.c is compiled in acpi namespace so this shows up as acpi.ec_delay param */
+static unsigned int ec_delay __read_mostly = ACPI_EC_DELAY;
+module_param(ec_delay, uint, 0644);
+MODULE_PARM_DESC(ec_delay, "Timeout(ms) waited until an EC command completes");
+
/* If we find an EC via the ECDT, we need to keep a ptr to its context */
/* External interfaces use first EC only, so remember */
typedef int (*acpi_ec_query_func) (void *data);
@@ -210,7 +215,7 @@ static int ec_poll(struct acpi_ec *ec)
int repeat = 2; /* number of command restarts */
while (repeat--) {
unsigned long delay = jiffies +
- msecs_to_jiffies(ACPI_EC_DELAY);
+ msecs_to_jiffies(ec_delay);
do {
/* don't sleep with disabled interrupts */
if (EC_FLAGS_MSI || irqs_disabled()) {
@@ -265,7 +270,7 @@ static int ec_check_ibf0(struct acpi_ec *ec)
static int ec_wait_ibf0(struct acpi_ec *ec)
{
- unsigned long delay = jiffies + msecs_to_jiffies(ACPI_EC_DELAY);
+ unsigned long delay = jiffies + msecs_to_jiffies(ec_delay);
/* interrupt wait manually if GPE mode is not active */
while (time_before(jiffies, delay))
if (wait_event_timeout(ec->wait, ec_check_ibf0(ec),
diff --git a/drivers/acpi/fan.c b/drivers/acpi/fan.c
index d94d2953c974..60049080c869 100644
--- a/drivers/acpi/fan.c
+++ b/drivers/acpi/fan.c
@@ -27,8 +27,6 @@
#include <linux/module.h>
#include <linux/init.h>
#include <linux/types.h>
-#include <linux/proc_fs.h>
-#include <linux/seq_file.h>
#include <asm/uaccess.h>
#include <linux/thermal.h>
#include <acpi/acpi_bus.h>
@@ -119,122 +117,6 @@ static struct thermal_cooling_device_ops fan_cooling_ops = {
};
/* --------------------------------------------------------------------------
- FS Interface (/proc)
- -------------------------------------------------------------------------- */
-#ifdef CONFIG_ACPI_PROCFS
-
-static struct proc_dir_entry *acpi_fan_dir;
-
-static int acpi_fan_read_state(struct seq_file *seq, void *offset)
-{
- struct acpi_device *device = seq->private;
- int state = 0;
-
-
- if (device) {
- if (acpi_bus_get_power(device->handle, &state))
- seq_printf(seq, "status: ERROR\n");
- else
- seq_printf(seq, "status: %s\n",
- !state ? "on" : "off");
- }
- return 0;
-}
-
-static int acpi_fan_state_open_fs(struct inode *inode, struct file *file)
-{
- return single_open(file, acpi_fan_read_state, PDE(inode)->data);
-}
-
-static ssize_t
-acpi_fan_write_state(struct file *file, const char __user * buffer,
- size_t count, loff_t * ppos)
-{
- int result = 0;
- struct seq_file *m = file->private_data;
- struct acpi_device *device = m->private;
- char state_string[3] = { '\0' };
-
- if (count > sizeof(state_string) - 1)
- return -EINVAL;
-
- if (copy_from_user(state_string, buffer, count))
- return -EFAULT;
-
- state_string[count] = '\0';
- if ((state_string[0] < '0') || (state_string[0] > '3'))
- return -EINVAL;
- if (state_string[1] == '\n')
- state_string[1] = '\0';
- if (state_string[1] != '\0')
- return -EINVAL;
-
- result = acpi_bus_set_power(device->handle,
- simple_strtoul(state_string, NULL, 0));
- if (result)
- return result;
-
- return count;
-}
-
-static const struct file_operations acpi_fan_state_ops = {
- .open = acpi_fan_state_open_fs,
- .read = seq_read,
- .write = acpi_fan_write_state,
- .llseek = seq_lseek,
- .release = single_release,
- .owner = THIS_MODULE,
-};
-
-static int acpi_fan_add_fs(struct acpi_device *device)
-{
- struct proc_dir_entry *entry = NULL;
-
-
- if (!device)
- return -EINVAL;
-
- if (!acpi_device_dir(device)) {
- acpi_device_dir(device) = proc_mkdir(acpi_device_bid(device),
- acpi_fan_dir);
- if (!acpi_device_dir(device))
- return -ENODEV;
- }
-
- /* 'status' [R/W] */
- entry = proc_create_data(ACPI_FAN_FILE_STATE,
- S_IFREG | S_IRUGO | S_IWUSR,
- acpi_device_dir(device),
- &acpi_fan_state_ops,
- device);
- if (!entry)
- return -ENODEV;
- return 0;
-}
-
-static int acpi_fan_remove_fs(struct acpi_device *device)
-{
-
- if (acpi_device_dir(device)) {
- remove_proc_entry(ACPI_FAN_FILE_STATE, acpi_device_dir(device));
- remove_proc_entry(acpi_device_bid(device), acpi_fan_dir);
- acpi_device_dir(device) = NULL;
- }
-
- return 0;
-}
-#else
-static int acpi_fan_add_fs(struct acpi_device *device)
-{
- return 0;
-}
-
-static int acpi_fan_remove_fs(struct acpi_device *device)
-{
- return 0;
-}
-#endif
-/* --------------------------------------------------------------------------
Driver Interface
-------------------------------------------------------------------------- */
@@ -284,10 +166,6 @@ static int acpi_fan_add(struct acpi_device *device)
dev_err(&device->dev, "Failed to create sysfs link "
"'device'\n");
- result = acpi_fan_add_fs(device);
- if (result)
- goto end;
-
printk(KERN_INFO PREFIX "%s [%s] (%s)\n",
acpi_device_name(device), acpi_device_bid(device),
!device->power.state ? "on" : "off");
@@ -303,7 +181,6 @@ static int acpi_fan_remove(struct acpi_device *device, int type)
if (!device || !cdev)
return -EINVAL;
- acpi_fan_remove_fs(device);
sysfs_remove_link(&device->dev.kobj, "thermal_cooling");
sysfs_remove_link(&cdev->device.kobj, "device");
thermal_cooling_device_unregister(cdev);
@@ -347,19 +224,9 @@ static int __init acpi_fan_init(void)
{
int result = 0;
-#ifdef CONFIG_ACPI_PROCFS
- acpi_fan_dir = proc_mkdir(ACPI_FAN_CLASS, acpi_root_dir);
- if (!acpi_fan_dir)
- return -ENODEV;
-#endif
-
result = acpi_bus_register_driver(&acpi_fan_driver);
- if (result < 0) {
-#ifdef CONFIG_ACPI_PROCFS
- remove_proc_entry(ACPI_FAN_CLASS, acpi_root_dir);
-#endif
+ if (result < 0)
return -ENODEV;
- }
return 0;
}
@@ -369,10 +236,6 @@ static void __exit acpi_fan_exit(void)
acpi_bus_unregister_driver(&acpi_fan_driver);
-#ifdef CONFIG_ACPI_PROCFS
- remove_proc_entry(ACPI_FAN_CLASS, acpi_root_dir);
-#endif
-
return;
}
diff --git a/drivers/acpi/osl.c b/drivers/acpi/osl.c
index 65b25a303b86..966feddf6b1b 100644
--- a/drivers/acpi/osl.c
+++ b/drivers/acpi/osl.c
@@ -95,8 +95,25 @@ struct acpi_res_list {
static LIST_HEAD(resource_list_head);
static DEFINE_SPINLOCK(acpi_res_lock);
+/*
+ * This list of permanent mappings is for memory that may be accessed from
+ * interrupt context, where we can't do the ioremap().
+ */
+struct acpi_ioremap {
+ struct list_head list;
+ void __iomem *virt;
+ acpi_physical_address phys;
+ acpi_size size;
+ struct kref ref;
+};
+
+static LIST_HEAD(acpi_ioremaps);
+static DEFINE_SPINLOCK(acpi_ioremap_lock);
+
#define OSI_STRING_LENGTH_MAX 64 /* arbitrary */
-static char osi_additional_string[OSI_STRING_LENGTH_MAX];
+static char osi_setup_string[OSI_STRING_LENGTH_MAX];
+
+static void __init acpi_osi_setup_late(void);
/*
* The story of _OSI(Linux)
@@ -138,6 +155,20 @@ static struct osi_linux {
unsigned int known:1;
} osi_linux = { 0, 0, 0, 0};
+static u32 acpi_osi_handler(acpi_string interface, u32 supported)
+{
+ if (!strcmp("Linux", interface)) {
+
+ printk(KERN_NOTICE FW_BUG PREFIX
+ "BIOS _OSI(Linux) query %s%s\n",
+ osi_linux.enable ? "honored" : "ignored",
+ osi_linux.cmdline ? " via cmdline" :
+ osi_linux.dmi ? " via DMI" : "");
+ }
+
+ return supported;
+}
+
static void __init acpi_request_region (struct acpi_generic_address *addr,
unsigned int length, char *desc)
{
@@ -185,36 +216,6 @@ static int __init acpi_reserve_resources(void)
}
device_initcall(acpi_reserve_resources);
-acpi_status __init acpi_os_initialize(void)
-{
- return AE_OK;
-}
-
-acpi_status acpi_os_initialize1(void)
-{
- kacpid_wq = create_workqueue("kacpid");
- kacpi_notify_wq = create_workqueue("kacpi_notify");
- kacpi_hotplug_wq = create_workqueue("kacpi_hotplug");
- BUG_ON(!kacpid_wq);
- BUG_ON(!kacpi_notify_wq);
- BUG_ON(!kacpi_hotplug_wq);
- return AE_OK;
-}
-
-acpi_status acpi_os_terminate(void)
-{
- if (acpi_irq_handler) {
- acpi_os_remove_interrupt_handler(acpi_irq_irq,
- acpi_irq_handler);
- }
-
- destroy_workqueue(kacpid_wq);
- destroy_workqueue(kacpi_notify_wq);
- destroy_workqueue(kacpi_hotplug_wq);
-
- return AE_OK;
-}
-
void acpi_os_printf(const char *fmt, ...)
{
va_list args;
@@ -260,29 +261,135 @@ acpi_physical_address __init acpi_os_get_root_pointer(void)
}
}
+/* Must be called with 'acpi_ioremap_lock' or RCU read lock held. */
+static struct acpi_ioremap *
+acpi_map_lookup(acpi_physical_address phys, acpi_size size)
+{
+ struct acpi_ioremap *map;
+
+ list_for_each_entry_rcu(map, &acpi_ioremaps, list)
+ if (map->phys <= phys &&
+ phys + size <= map->phys + map->size)
+ return map;
+
+ return NULL;
+}
+
+/* Must be called with 'acpi_ioremap_lock' or RCU read lock held. */
+static void __iomem *
+acpi_map_vaddr_lookup(acpi_physical_address phys, unsigned int size)
+{
+ struct acpi_ioremap *map;
+
+ map = acpi_map_lookup(phys, size);
+ if (map)
+ return map->virt + (phys - map->phys);
+
+ return NULL;
+}
+
+/* Must be called with 'acpi_ioremap_lock' or RCU read lock held. */
+static struct acpi_ioremap *
+acpi_map_lookup_virt(void __iomem *virt, acpi_size size)
+{
+ struct acpi_ioremap *map;
+
+ list_for_each_entry_rcu(map, &acpi_ioremaps, list)
+ if (map->virt <= virt &&
+ virt + size <= map->virt + map->size)
+ return map;
+
+ return NULL;
+}
+
void __iomem *__init_refok
acpi_os_map_memory(acpi_physical_address phys, acpi_size size)
{
+ struct acpi_ioremap *map, *tmp_map;
+ unsigned long flags, pg_sz;
+ void __iomem *virt;
+ phys_addr_t pg_off;
+
if (phys > ULONG_MAX) {
printk(KERN_ERR PREFIX "Cannot map memory that high\n");
return NULL;
}
- if (acpi_gbl_permanent_mmap)
- /*
- * ioremap checks to ensure this is in reserved space
- */
- return ioremap((unsigned long)phys, size);
- else
+
+ if (!acpi_gbl_permanent_mmap)
return __acpi_map_table((unsigned long)phys, size);
+
+ map = kzalloc(sizeof(*map), GFP_KERNEL);
+ if (!map)
+ return NULL;
+
+ pg_off = round_down(phys, PAGE_SIZE);
+ pg_sz = round_up(phys + size, PAGE_SIZE) - pg_off;
+ virt = ioremap(pg_off, pg_sz);
+ if (!virt) {
+ kfree(map);
+ return NULL;
+ }
+
+ INIT_LIST_HEAD(&map->list);
+ map->virt = virt;
+ map->phys = pg_off;
+ map->size = pg_sz;
+ kref_init(&map->ref);
+
+ spin_lock_irqsave(&acpi_ioremap_lock, flags);
+ /* Check if page has already been mapped. */
+ tmp_map = acpi_map_lookup(phys, size);
+ if (tmp_map) {
+ kref_get(&tmp_map->ref);
+ spin_unlock_irqrestore(&acpi_ioremap_lock, flags);
+ iounmap(map->virt);
+ kfree(map);
+ return tmp_map->virt + (phys - tmp_map->phys);
+ }
+ list_add_tail_rcu(&map->list, &acpi_ioremaps);
+ spin_unlock_irqrestore(&acpi_ioremap_lock, flags);
+
+ return map->virt + (phys - map->phys);
}
EXPORT_SYMBOL_GPL(acpi_os_map_memory);
+static void acpi_kref_del_iomap(struct kref *ref)
+{
+ struct acpi_ioremap *map;
+
+ map = container_of(ref, struct acpi_ioremap, ref);
+ list_del_rcu(&map->list);
+}
+
void __ref acpi_os_unmap_memory(void __iomem *virt, acpi_size size)
{
- if (acpi_gbl_permanent_mmap)
- iounmap(virt);
- else
+ struct acpi_ioremap *map;
+ unsigned long flags;
+ int del;
+
+ if (!acpi_gbl_permanent_mmap) {
__acpi_unmap_table(virt, size);
+ return;
+ }
+
+ spin_lock_irqsave(&acpi_ioremap_lock, flags);
+ map = acpi_map_lookup_virt(virt, size);
+ if (!map) {
+ spin_unlock_irqrestore(&acpi_ioremap_lock, flags);
+ printk(KERN_ERR PREFIX "%s: bad address %p\n", __func__, virt);
+ dump_stack();
+ return;
+ }
+
+ del = kref_put(&map->ref, acpi_kref_del_iomap);
+ spin_unlock_irqrestore(&acpi_ioremap_lock, flags);
+
+ if (!del)
+ return;
+
+ synchronize_rcu();
+ iounmap(map->virt);
+ kfree(map);
}
EXPORT_SYMBOL_GPL(acpi_os_unmap_memory);
@@ -292,6 +399,44 @@ void __init early_acpi_os_unmap_memory(void __iomem *virt, acpi_size size)
__acpi_unmap_table(virt, size);
}
+int acpi_os_map_generic_address(struct acpi_generic_address *addr)
+{
+ void __iomem *virt;
+
+ if (addr->space_id != ACPI_ADR_SPACE_SYSTEM_MEMORY)
+ return 0;
+
+ if (!addr->address || !addr->bit_width)
+ return -EINVAL;
+
+ virt = acpi_os_map_memory(addr->address, addr->bit_width / 8);
+ if (!virt)
+ return -EIO;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(acpi_os_map_generic_address);
+
+void acpi_os_unmap_generic_address(struct acpi_generic_address *addr)
+{
+ void __iomem *virt;
+ unsigned long flags;
+ acpi_size size = addr->bit_width / 8;
+
+ if (addr->space_id != ACPI_ADR_SPACE_SYSTEM_MEMORY)
+ return;
+
+ if (!addr->address || !addr->bit_width)
+ return;
+
+ spin_lock_irqsave(&acpi_ioremap_lock, flags);
+ virt = acpi_map_vaddr_lookup(addr->address, size);
+ spin_unlock_irqrestore(&acpi_ioremap_lock, flags);
+
+ acpi_os_unmap_memory(virt, size);
+}
+EXPORT_SYMBOL_GPL(acpi_os_unmap_generic_address);
+
#ifdef ACPI_FUTURE_USAGE
acpi_status
acpi_os_get_physical_address(void *virt, acpi_physical_address * phys)
@@ -495,8 +640,15 @@ acpi_os_read_memory(acpi_physical_address phys_addr, u32 * value, u32 width)
{
u32 dummy;
void __iomem *virt_addr;
-
- virt_addr = ioremap(phys_addr, width);
+ int size = width / 8, unmap = 0;
+
+ rcu_read_lock();
+ virt_addr = acpi_map_vaddr_lookup(phys_addr, size);
+ rcu_read_unlock();
+ if (!virt_addr) {
+ virt_addr = ioremap(phys_addr, size);
+ unmap = 1;
+ }
if (!value)
value = &dummy;
@@ -514,7 +666,8 @@ acpi_os_read_memory(acpi_physical_address phys_addr, u32 * value, u32 width)
BUG();
}
- iounmap(virt_addr);
+ if (unmap)
+ iounmap(virt_addr);
return AE_OK;
}
@@ -523,8 +676,15 @@ acpi_status
acpi_os_write_memory(acpi_physical_address phys_addr, u32 value, u32 width)
{
void __iomem *virt_addr;
-
- virt_addr = ioremap(phys_addr, width);
+ int size = width / 8, unmap = 0;
+
+ rcu_read_lock();
+ virt_addr = acpi_map_vaddr_lookup(phys_addr, size);
+ rcu_read_unlock();
+ if (!virt_addr) {
+ virt_addr = ioremap(phys_addr, size);
+ unmap = 1;
+ }
switch (width) {
case 8:
@@ -540,16 +700,18 @@ acpi_os_write_memory(acpi_physical_address phys_addr, u32 value, u32 width)
BUG();
}
- iounmap(virt_addr);
+ if (unmap)
+ iounmap(virt_addr);
return AE_OK;
}
acpi_status
acpi_os_read_pci_configuration(struct acpi_pci_id * pci_id, u32 reg,
- u32 *value, u32 width)
+ u64 *value, u32 width)
{
int result, size;
+ u32 value32;
if (!value)
return AE_BAD_PARAMETER;
@@ -570,7 +732,8 @@ acpi_os_read_pci_configuration(struct acpi_pci_id * pci_id, u32 reg,
result = raw_pci_read(pci_id->segment, pci_id->bus,
PCI_DEVFN(pci_id->device, pci_id->function),
- reg, size, value);
+ reg, size, &value32);
+ *value = value32;
return (result ? AE_ERROR : AE_OK);
}
@@ -602,74 +765,6 @@ acpi_os_write_pci_configuration(struct acpi_pci_id * pci_id, u32 reg,
return (result ? AE_ERROR : AE_OK);
}
-/* TODO: Change code to take advantage of driver model more */
-static void acpi_os_derive_pci_id_2(acpi_handle rhandle, /* upper bound */
- acpi_handle chandle, /* current node */
- struct acpi_pci_id **id,
- int *is_bridge, u8 * bus_number)
-{
- acpi_handle handle;
- struct acpi_pci_id *pci_id = *id;
- acpi_status status;
- unsigned long long temp;
- acpi_object_type type;
-
- acpi_get_parent(chandle, &handle);
- if (handle != rhandle) {
- acpi_os_derive_pci_id_2(rhandle, handle, &pci_id, is_bridge,
- bus_number);
-
- status = acpi_get_type(handle, &type);
- if ((ACPI_FAILURE(status)) || (type != ACPI_TYPE_DEVICE))
- return;
-
- status = acpi_evaluate_integer(handle, METHOD_NAME__ADR, NULL,
- &temp);
- if (ACPI_SUCCESS(status)) {
- u32 val;
- pci_id->device = ACPI_HIWORD(ACPI_LODWORD(temp));
- pci_id->function = ACPI_LOWORD(ACPI_LODWORD(temp));
-
- if (*is_bridge)
- pci_id->bus = *bus_number;
-
- /* any nicer way to get bus number of bridge ? */
- status =
- acpi_os_read_pci_configuration(pci_id, 0x0e, &val,
- 8);
- if (ACPI_SUCCESS(status)
- && ((val & 0x7f) == 1 || (val & 0x7f) == 2)) {
- status =
- acpi_os_read_pci_configuration(pci_id, 0x18,
- &val, 8);
- if (!ACPI_SUCCESS(status)) {
- /* Certainly broken... FIX ME */
- return;
- }
- *is_bridge = 1;
- pci_id->bus = val;
- status =
- acpi_os_read_pci_configuration(pci_id, 0x19,
- &val, 8);
- if (ACPI_SUCCESS(status)) {
- *bus_number = val;
- }
- } else
- *is_bridge = 0;
- }
- }
-}
-
-void acpi_os_derive_pci_id(acpi_handle rhandle, /* upper bound */
- acpi_handle chandle, /* current node */
- struct acpi_pci_id **id)
-{
- int is_bridge = 1;
- u8 bus_number = (*id)->bus;
-
- acpi_os_derive_pci_id_2(rhandle, chandle, id, &is_bridge, &bus_number);
-}
-
static void acpi_os_execute_deferred(struct work_struct *work)
{
struct acpi_os_dpc *dpc = container_of(work, struct acpi_os_dpc, work);
@@ -780,16 +875,6 @@ void acpi_os_wait_events_complete(void *context)
EXPORT_SYMBOL(acpi_os_wait_events_complete);
/*
- * Allocate the memory for a spinlock and initialize it.
- */
-acpi_status acpi_os_create_lock(acpi_spinlock * handle)
-{
- spin_lock_init(*handle);
-
- return AE_OK;
-}
-
-/*
* Deallocate the memory for a spinlock.
*/
void acpi_os_delete_lock(acpi_spinlock handle)
@@ -977,6 +1062,12 @@ static void __init set_osi_linux(unsigned int enable)
printk(KERN_NOTICE PREFIX "%sed _OSI(Linux)\n",
enable ? "Add": "Delet");
}
+
+ if (osi_linux.enable)
+ acpi_osi_setup("Linux");
+ else
+ acpi_osi_setup("!Linux");
+
return;
}
@@ -1011,21 +1102,33 @@ void __init acpi_dmi_osi_linux(int enable, const struct dmi_system_id *d)
* string starting with '!' disables that string
* otherwise string is added to list, augmenting built-in strings
*/
-int __init acpi_osi_setup(char *str)
+static void __init acpi_osi_setup_late(void)
{
- if (str == NULL || *str == '\0') {
- printk(KERN_INFO PREFIX "_OSI method disabled\n");
- acpi_gbl_create_osi_method = FALSE;
- } else if (!strcmp("!Linux", str)) {
+ char *str = osi_setup_string;
+
+ if (*str == '\0')
+ return;
+
+ if (!strcmp("!Linux", str)) {
acpi_cmdline_osi_linux(0); /* !enable */
} else if (*str == '!') {
- if (acpi_osi_invalidate(++str) == AE_OK)
+ if (acpi_remove_interface(++str) == AE_OK)
printk(KERN_INFO PREFIX "Deleted _OSI(%s)\n", str);
} else if (!strcmp("Linux", str)) {
acpi_cmdline_osi_linux(1); /* enable */
- } else if (*osi_additional_string == '\0') {
- strncpy(osi_additional_string, str, OSI_STRING_LENGTH_MAX);
- printk(KERN_INFO PREFIX "Added _OSI(%s)\n", str);
+ } else {
+ if (acpi_install_interface(str) == AE_OK)
+ printk(KERN_INFO PREFIX "Added _OSI(%s)\n", str);
+ }
+}
+
+int __init acpi_osi_setup(char *str)
+{
+ if (str == NULL || *str == '\0') {
+ printk(KERN_INFO PREFIX "_OSI method disabled\n");
+ acpi_gbl_create_osi_method = FALSE;
+ } else {
+ strncpy(osi_setup_string, str, OSI_STRING_LENGTH_MAX);
}
return 1;
@@ -1152,21 +1255,6 @@ int acpi_check_region(resource_size_t start, resource_size_t n,
}
EXPORT_SYMBOL(acpi_check_region);
-int acpi_check_mem_region(resource_size_t start, resource_size_t n,
- const char *name)
-{
- struct resource res = {
- .start = start,
- .end = start + n - 1,
- .name = name,
- .flags = IORESOURCE_MEM,
- };
-
- return acpi_check_resource_conflict(&res);
-
-}
-EXPORT_SYMBOL(acpi_check_mem_region);
-
/*
* Let drivers know whether the resource checks are effective
*/
@@ -1282,38 +1370,6 @@ acpi_status acpi_os_release_object(acpi_cache_t * cache, void *object)
return (AE_OK);
}
-/******************************************************************************
- *
- * FUNCTION: acpi_os_validate_interface
- *
- * PARAMETERS: interface - Requested interface to be validated
- *
- * RETURN: AE_OK if interface is supported, AE_SUPPORT otherwise
- *
- * DESCRIPTION: Match an interface string to the interfaces supported by the
- * host. Strings originate from an AML call to the _OSI method.
- *
- *****************************************************************************/
-
-acpi_status
-acpi_os_validate_interface (char *interface)
-{
- if (!strncmp(osi_additional_string, interface, OSI_STRING_LENGTH_MAX))
- return AE_OK;
- if (!strcmp("Linux", interface)) {
-
- printk(KERN_NOTICE PREFIX
- "BIOS _OSI(Linux) query %s%s\n",
- osi_linux.enable ? "honored" : "ignored",
- osi_linux.cmdline ? " via cmdline" :
- osi_linux.dmi ? " via DMI" : "");
-
- if (osi_linux.enable)
- return AE_OK;
- }
- return AE_SUPPORT;
-}
-
static inline int acpi_res_list_add(struct acpi_res_list *res)
{
struct acpi_res_list *res_list_elem;
@@ -1462,5 +1518,46 @@ acpi_os_validate_address (
}
return AE_OK;
}
-
#endif
+
+acpi_status __init acpi_os_initialize(void)
+{
+ acpi_os_map_generic_address(&acpi_gbl_FADT.xpm1a_event_block);
+ acpi_os_map_generic_address(&acpi_gbl_FADT.xpm1b_event_block);
+ acpi_os_map_generic_address(&acpi_gbl_FADT.xgpe0_block);
+ acpi_os_map_generic_address(&acpi_gbl_FADT.xgpe1_block);
+
+ return AE_OK;
+}
+
+acpi_status acpi_os_initialize1(void)
+{
+ kacpid_wq = create_workqueue("kacpid");
+ kacpi_notify_wq = create_workqueue("kacpi_notify");
+ kacpi_hotplug_wq = create_workqueue("kacpi_hotplug");
+ BUG_ON(!kacpid_wq);
+ BUG_ON(!kacpi_notify_wq);
+ BUG_ON(!kacpi_hotplug_wq);
+ acpi_install_interface_handler(acpi_osi_handler);
+ acpi_osi_setup_late();
+ return AE_OK;
+}
+
+acpi_status acpi_os_terminate(void)
+{
+ if (acpi_irq_handler) {
+ acpi_os_remove_interrupt_handler(acpi_irq_irq,
+ acpi_irq_handler);
+ }
+
+ acpi_os_unmap_generic_address(&acpi_gbl_FADT.xgpe1_block);
+ acpi_os_unmap_generic_address(&acpi_gbl_FADT.xgpe0_block);
+ acpi_os_unmap_generic_address(&acpi_gbl_FADT.xpm1b_event_block);
+ acpi_os_unmap_generic_address(&acpi_gbl_FADT.xpm1a_event_block);
+
+ destroy_workqueue(kacpid_wq);
+ destroy_workqueue(kacpi_notify_wq);
+ destroy_workqueue(kacpi_hotplug_wq);
+
+ return AE_OK;
+}
diff --git a/drivers/acpi/pci_irq.c b/drivers/acpi/pci_irq.c
index e4804fb05e23..f907cfbfa13c 100644
--- a/drivers/acpi/pci_irq.c
+++ b/drivers/acpi/pci_irq.c
@@ -32,7 +32,6 @@
#include <linux/module.h>
#include <linux/init.h>
#include <linux/types.h>
-#include <linux/proc_fs.h>
#include <linux/spinlock.h>
#include <linux/pm.h>
#include <linux/pci.h>
diff --git a/drivers/acpi/pci_link.c b/drivers/acpi/pci_link.c
index 8d47a5846aeb..9ff80a7e9f6a 100644
--- a/drivers/acpi/pci_link.c
+++ b/drivers/acpi/pci_link.c
@@ -34,7 +34,6 @@
#include <linux/module.h>
#include <linux/init.h>
#include <linux/types.h>
-#include <linux/proc_fs.h>
#include <linux/spinlock.h>
#include <linux/pm.h>
#include <linux/pci.h>
diff --git a/drivers/acpi/pci_root.c b/drivers/acpi/pci_root.c
index 3ba8d1f44a73..96668ad09622 100644
--- a/drivers/acpi/pci_root.c
+++ b/drivers/acpi/pci_root.c
@@ -27,7 +27,6 @@
#include <linux/module.h>
#include <linux/init.h>
#include <linux/types.h>
-#include <linux/proc_fs.h>
#include <linux/spinlock.h>
#include <linux/pm.h>
#include <linux/pm_runtime.h>
diff --git a/drivers/acpi/power.c b/drivers/acpi/power.c
index 844c155aeb0f..67dedeed144c 100644
--- a/drivers/acpi/power.c
+++ b/drivers/acpi/power.c
@@ -80,18 +80,13 @@ static struct acpi_driver acpi_power_driver = {
},
};
-struct acpi_power_reference {
- struct list_head node;
- struct acpi_device *device;
-};
-
struct acpi_power_resource {
struct acpi_device * device;
acpi_bus_id name;
u32 system_level;
u32 order;
+ unsigned int ref_count;
struct mutex resource_lock;
- struct list_head reference;
};
static struct list_head acpi_power_resource_list;
@@ -184,101 +179,89 @@ static int acpi_power_get_list_state(struct acpi_handle_list *list, int *state)
return result;
}
-static int acpi_power_on(acpi_handle handle, struct acpi_device *dev)
+static int __acpi_power_on(struct acpi_power_resource *resource)
{
- int result = 0;
- int found = 0;
acpi_status status = AE_OK;
- struct acpi_power_resource *resource = NULL;
- struct list_head *node, *next;
- struct acpi_power_reference *ref;
+ status = acpi_evaluate_object(resource->device->handle, "_ON", NULL, NULL);
+ if (ACPI_FAILURE(status))
+ return -ENODEV;
+
+ /* Update the power resource's _device_ power state */
+ resource->device->power.state = ACPI_STATE_D0;
+
+ ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Power resource [%s] turned on\n",
+ resource->name));
+
+ return 0;
+}
+
+static int acpi_power_on(acpi_handle handle)
+{
+ int result = 0;
+ struct acpi_power_resource *resource = NULL;
result = acpi_power_get_context(handle, &resource);
if (result)
return result;
mutex_lock(&resource->resource_lock);
- list_for_each_safe(node, next, &resource->reference) {
- ref = container_of(node, struct acpi_power_reference, node);
- if (dev->handle == ref->device->handle) {
- ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Device [%s] already referenced by resource [%s]\n",
- dev->pnp.bus_id, resource->name));
- found = 1;
- break;
- }
- }
- if (!found) {
- ref = kmalloc(sizeof (struct acpi_power_reference),
- irqs_disabled() ? GFP_ATOMIC : GFP_KERNEL);
- if (!ref) {
- ACPI_DEBUG_PRINT((ACPI_DB_INFO, "kmalloc() failed\n"));
- mutex_unlock(&resource->resource_lock);
- return -ENOMEM;
- }
- list_add_tail(&ref->node, &resource->reference);
- ref->device = dev;
- ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Device [%s] added to resource [%s] references\n",
- dev->pnp.bus_id, resource->name));
+ if (resource->ref_count++) {
+ ACPI_DEBUG_PRINT((ACPI_DB_INFO,
+ "Power resource [%s] already on",
+ resource->name));
+ } else {
+ result = __acpi_power_on(resource);
}
- mutex_unlock(&resource->resource_lock);
- status = acpi_evaluate_object(resource->device->handle, "_ON", NULL, NULL);
- if (ACPI_FAILURE(status))
- return -ENODEV;
-
- /* Update the power resource's _device_ power state */
- resource->device->power.state = ACPI_STATE_D0;
+ mutex_unlock(&resource->resource_lock);
- ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Resource [%s] turned on\n",
- resource->name));
return 0;
}
-static int acpi_power_off_device(acpi_handle handle, struct acpi_device *dev)
+static int acpi_power_off_device(acpi_handle handle)
{
int result = 0;
acpi_status status = AE_OK;
struct acpi_power_resource *resource = NULL;
- struct list_head *node, *next;
- struct acpi_power_reference *ref;
result = acpi_power_get_context(handle, &resource);
if (result)
return result;
mutex_lock(&resource->resource_lock);
- list_for_each_safe(node, next, &resource->reference) {
- ref = container_of(node, struct acpi_power_reference, node);
- if (dev->handle == ref->device->handle) {
- list_del(&ref->node);
- kfree(ref);
- ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Device [%s] removed from resource [%s] references\n",
- dev->pnp.bus_id, resource->name));
- break;
- }
+
+ if (!resource->ref_count) {
+ ACPI_DEBUG_PRINT((ACPI_DB_INFO,
+ "Power resource [%s] already off",
+ resource->name));
+ goto unlock;
}
- if (!list_empty(&resource->reference)) {
- ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Cannot turn resource [%s] off - resource is in use\n",
- resource->name));
- mutex_unlock(&resource->resource_lock);
- return 0;
+ if (--resource->ref_count) {
+ ACPI_DEBUG_PRINT((ACPI_DB_INFO,
+ "Power resource [%s] still in use\n",
+ resource->name));
+ goto unlock;
}
- mutex_unlock(&resource->resource_lock);
status = acpi_evaluate_object(resource->device->handle, "_OFF", NULL, NULL);
- if (ACPI_FAILURE(status))
- return -ENODEV;
+ if (ACPI_FAILURE(status)) {
+ result = -ENODEV;
+ } else {
+ /* Update the power resource's _device_ power state */
+ resource->device->power.state = ACPI_STATE_D3;
- /* Update the power resource's _device_ power state */
- resource->device->power.state = ACPI_STATE_D3;
+ ACPI_DEBUG_PRINT((ACPI_DB_INFO,
+ "Power resource [%s] turned off\n",
+ resource->name));
+ }
- ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Resource [%s] turned off\n",
- resource->name));
+ unlock:
+ mutex_unlock(&resource->resource_lock);
- return 0;
+ return result;
}
/**
@@ -364,7 +347,7 @@ int acpi_enable_wakeup_device_power(struct acpi_device *dev, int sleep_state)
/* Open power resource */
for (i = 0; i < dev->wakeup.resources.count; i++) {
- int ret = acpi_power_on(dev->wakeup.resources.handles[i], dev);
+ int ret = acpi_power_on(dev->wakeup.resources.handles[i]);
if (ret) {
printk(KERN_ERR PREFIX "Transition power state\n");
dev->wakeup.flags.valid = 0;
@@ -420,7 +403,7 @@ int acpi_disable_wakeup_device_power(struct acpi_device *dev)
/* Close power resource */
for (i = 0; i < dev->wakeup.resources.count; i++) {
int ret = acpi_power_off_device(
- dev->wakeup.resources.handles[i], dev);
+ dev->wakeup.resources.handles[i]);
if (ret) {
printk(KERN_ERR PREFIX "Transition power state\n");
dev->wakeup.flags.valid = 0;
@@ -500,7 +483,7 @@ int acpi_power_transition(struct acpi_device *device, int state)
* (e.g. so the device doesn't lose power while transitioning).
*/
for (i = 0; i < tl->count; i++) {
- result = acpi_power_on(tl->handles[i], device);
+ result = acpi_power_on(tl->handles[i]);
if (result)
goto end;
}
@@ -513,7 +496,7 @@ int acpi_power_transition(struct acpi_device *device, int state)
* Then we dereference all power resources used in the current list.
*/
for (i = 0; i < cl->count; i++) {
- result = acpi_power_off_device(cl->handles[i], device);
+ result = acpi_power_off_device(cl->handles[i]);
if (result)
goto end;
}
@@ -551,7 +534,6 @@ static int acpi_power_add(struct acpi_device *device)
resource->device = device;
mutex_init(&resource->resource_lock);
- INIT_LIST_HEAD(&resource->reference);
strcpy(resource->name, device->pnp.bus_id);
strcpy(acpi_device_name(device), ACPI_POWER_DEVICE_NAME);
strcpy(acpi_device_class(device), ACPI_POWER_CLASS);
@@ -594,22 +576,14 @@ static int acpi_power_add(struct acpi_device *device)
static int acpi_power_remove(struct acpi_device *device, int type)
{
- struct acpi_power_resource *resource = NULL;
- struct list_head *node, *next;
+ struct acpi_power_resource *resource;
-
- if (!device || !acpi_driver_data(device))
+ if (!device)
return -EINVAL;
resource = acpi_driver_data(device);
-
- mutex_lock(&resource->resource_lock);
- list_for_each_safe(node, next, &resource->reference) {
- struct acpi_power_reference *ref = container_of(node, struct acpi_power_reference, node);
- list_del(&ref->node);
- kfree(ref);
- }
- mutex_unlock(&resource->resource_lock);
+ if (!resource)
+ return -EINVAL;
kfree(resource);
@@ -619,29 +593,28 @@ static int acpi_power_remove(struct acpi_device *device, int type)
static int acpi_power_resume(struct acpi_device *device)
{
int result = 0, state;
- struct acpi_power_resource *resource = NULL;
- struct acpi_power_reference *ref;
+ struct acpi_power_resource *resource;
- if (!device || !acpi_driver_data(device))
+ if (!device)
return -EINVAL;
resource = acpi_driver_data(device);
+ if (!resource)
+ return -EINVAL;
+
+ mutex_lock(&resource->resource_lock);
result = acpi_power_get_state(device->handle, &state);
if (result)
- return result;
+ goto unlock;
- mutex_lock(&resource->resource_lock);
- if (state == ACPI_POWER_RESOURCE_STATE_OFF &&
- !list_empty(&resource->reference)) {
- ref = container_of(resource->reference.next, struct acpi_power_reference, node);
- mutex_unlock(&resource->resource_lock);
- result = acpi_power_on(device->handle, ref->device);
- return result;
- }
+ if (state == ACPI_POWER_RESOURCE_STATE_OFF && resource->ref_count)
+ result = __acpi_power_on(resource);
+ unlock:
mutex_unlock(&resource->resource_lock);
- return 0;
+
+ return result;
}
int __init acpi_power_init(void)
diff --git a/drivers/acpi/processor_driver.c b/drivers/acpi/processor_driver.c
index 347eb21b2353..85e48047d7b0 100644
--- a/drivers/acpi/processor_driver.c
+++ b/drivers/acpi/processor_driver.c
@@ -40,8 +40,10 @@
#include <linux/pm.h>
#include <linux/cpufreq.h>
#include <linux/cpu.h>
+#ifdef CONFIG_ACPI_PROCFS
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
+#endif
#include <linux/dmi.h>
#include <linux/moduleparam.h>
#include <linux/cpuidle.h>
@@ -244,6 +246,7 @@ static int acpi_processor_errata(struct acpi_processor *pr)
return result;
}
+#ifdef CONFIG_ACPI_PROCFS
static struct proc_dir_entry *acpi_processor_dir = NULL;
static int __cpuinit acpi_processor_add_fs(struct acpi_device *device)
@@ -280,7 +283,16 @@ static int acpi_processor_remove_fs(struct acpi_device *device)
return 0;
}
-
+#else
+static inline int acpi_processor_add_fs(struct acpi_device *device)
+{
+ return 0;
+}
+static inline int acpi_processor_remove_fs(struct acpi_device *device)
+{
+ return 0;
+}
+#endif
/* --------------------------------------------------------------------------
Driver Interface
-------------------------------------------------------------------------- */
@@ -842,9 +854,11 @@ static int __init acpi_processor_init(void)
memset(&errata, 0, sizeof(errata));
+#ifdef CONFIG_ACPI_PROCFS
acpi_processor_dir = proc_mkdir(ACPI_PROCESSOR_CLASS, acpi_root_dir);
if (!acpi_processor_dir)
return -ENOMEM;
+#endif
if (!cpuidle_register_driver(&acpi_idle_driver)) {
printk(KERN_DEBUG "ACPI: %s registered with cpuidle\n",
@@ -871,7 +885,9 @@ static int __init acpi_processor_init(void)
out_cpuidle:
cpuidle_unregister_driver(&acpi_idle_driver);
+#ifdef CONFIG_ACPI_PROCFS
remove_proc_entry(ACPI_PROCESSOR_CLASS, acpi_root_dir);
+#endif
return result;
}
@@ -891,7 +907,9 @@ static void __exit acpi_processor_exit(void)
cpuidle_unregister_driver(&acpi_idle_driver);
+#ifdef CONFIG_ACPI_PROCFS
remove_proc_entry(ACPI_PROCESSOR_CLASS, acpi_root_dir);
+#endif
return;
}
@@ -899,6 +917,4 @@ static void __exit acpi_processor_exit(void)
module_init(acpi_processor_init);
module_exit(acpi_processor_exit);
-EXPORT_SYMBOL(acpi_processor_set_thermal_limit);
-
MODULE_ALIAS("processor");
diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c
index f4428e82b352..dcb38f8ddfda 100644
--- a/drivers/acpi/processor_idle.c
+++ b/drivers/acpi/processor_idle.c
@@ -64,7 +64,6 @@
#define ACPI_PROCESSOR_CLASS "processor"
#define _COMPONENT ACPI_PROCESSOR_COMPONENT
ACPI_MODULE_NAME("processor_idle");
-#define ACPI_PROCESSOR_FILE_POWER "power"
#define PM_TIMER_TICK_NS (1000000000ULL/PM_TIMER_FREQUENCY)
#define C2_OVERHEAD 1 /* 1us */
#define C3_OVERHEAD 1 /* 1us */
@@ -1013,7 +1012,6 @@ static int acpi_processor_setup_cpuidle(struct acpi_processor *pr)
strncpy(state->desc, cx->desc, CPUIDLE_DESC_LEN);
state->exit_latency = cx->latency;
state->target_residency = cx->latency * latency_factor;
- state->power_usage = cx->power;
state->flags = 0;
switch (cx->type) {
diff --git a/drivers/acpi/processor_thermal.c b/drivers/acpi/processor_thermal.c
index 953b25fb9869..fde49b9b1d99 100644
--- a/drivers/acpi/processor_thermal.c
+++ b/drivers/acpi/processor_thermal.c
@@ -44,47 +44,6 @@
#define _COMPONENT ACPI_PROCESSOR_COMPONENT
ACPI_MODULE_NAME("processor_thermal");
-/* --------------------------------------------------------------------------
- Limit Interface
- -------------------------------------------------------------------------- */
-static int acpi_processor_apply_limit(struct acpi_processor *pr)
-{
- int result = 0;
- u16 px = 0;
- u16 tx = 0;
-
-
- if (!pr)
- return -EINVAL;
-
- if (!pr->flags.limit)
- return -ENODEV;
-
- if (pr->flags.throttling) {
- if (pr->limit.user.tx > tx)
- tx = pr->limit.user.tx;
- if (pr->limit.thermal.tx > tx)
- tx = pr->limit.thermal.tx;
-
- result = acpi_processor_set_throttling(pr, tx, false);
- if (result)
- goto end;
- }
-
- pr->limit.state.px = px;
- pr->limit.state.tx = tx;
-
- ACPI_DEBUG_PRINT((ACPI_DB_INFO,
- "Processor [%d] limit set to (P%d:T%d)\n", pr->id,
- pr->limit.state.px, pr->limit.state.tx));
-
- end:
- if (result)
- printk(KERN_ERR PREFIX "Unable to set limit\n");
-
- return result;
-}
-
#ifdef CONFIG_CPU_FREQ
/* If a passive cooling situation is detected, primarily CPUfreq is used, as it
@@ -107,36 +66,6 @@ static int cpu_has_cpufreq(unsigned int cpu)
return 1;
}
-static int acpi_thermal_cpufreq_increase(unsigned int cpu)
-{
- if (!cpu_has_cpufreq(cpu))
- return -ENODEV;
-
- if (per_cpu(cpufreq_thermal_reduction_pctg, cpu) <
- CPUFREQ_THERMAL_MAX_STEP) {
- per_cpu(cpufreq_thermal_reduction_pctg, cpu)++;
- cpufreq_update_policy(cpu);
- return 0;
- }
-
- return -ERANGE;
-}
-
-static int acpi_thermal_cpufreq_decrease(unsigned int cpu)
-{
- if (!cpu_has_cpufreq(cpu))
- return -ENODEV;
-
- if (per_cpu(cpufreq_thermal_reduction_pctg, cpu) >
- (CPUFREQ_THERMAL_MIN_STEP + 1))
- per_cpu(cpufreq_thermal_reduction_pctg, cpu)--;
- else
- per_cpu(cpufreq_thermal_reduction_pctg, cpu) = 0;
- cpufreq_update_policy(cpu);
- /* We reached max freq again and can leave passive mode */
- return !per_cpu(cpufreq_thermal_reduction_pctg, cpu);
-}
-
static int acpi_thermal_cpufreq_notifier(struct notifier_block *nb,
unsigned long event, void *data)
{
@@ -238,113 +167,6 @@ static int acpi_thermal_cpufreq_decrease(unsigned int cpu)
#endif
-int acpi_processor_set_thermal_limit(acpi_handle handle, int type)
-{
- int result = 0;
- struct acpi_processor *pr = NULL;
- struct acpi_device *device = NULL;
- int tx = 0, max_tx_px = 0;
-
-
- if ((type < ACPI_PROCESSOR_LIMIT_NONE)
- || (type > ACPI_PROCESSOR_LIMIT_DECREMENT))
- return -EINVAL;
-
- result = acpi_bus_get_device(handle, &device);
- if (result)
- return result;
-
- pr = acpi_driver_data(device);
- if (!pr)
- return -ENODEV;
-
- /* Thermal limits are always relative to the current Px/Tx state. */
- if (pr->flags.throttling)
- pr->limit.thermal.tx = pr->throttling.state;
-
- /*
- * Our default policy is to only use throttling at the lowest
- * performance state.
- */
-
- tx = pr->limit.thermal.tx;
-
- switch (type) {
-
- case ACPI_PROCESSOR_LIMIT_NONE:
- do {
- result = acpi_thermal_cpufreq_decrease(pr->id);
- } while (!result);
- tx = 0;
- break;
-
- case ACPI_PROCESSOR_LIMIT_INCREMENT:
- /* if going up: P-states first, T-states later */
-
- result = acpi_thermal_cpufreq_increase(pr->id);
- if (!result)
- goto end;
- else if (result == -ERANGE)
- ACPI_DEBUG_PRINT((ACPI_DB_INFO,
- "At maximum performance state\n"));
-
- if (pr->flags.throttling) {
- if (tx == (pr->throttling.state_count - 1))
- ACPI_DEBUG_PRINT((ACPI_DB_INFO,
- "At maximum throttling state\n"));
- else
- tx++;
- }
- break;
-
- case ACPI_PROCESSOR_LIMIT_DECREMENT:
- /* if going down: T-states first, P-states later */
-
- if (pr->flags.throttling) {
- if (tx == 0) {
- max_tx_px = 1;
- ACPI_DEBUG_PRINT((ACPI_DB_INFO,
- "At minimum throttling state\n"));
- } else {
- tx--;
- goto end;
- }
- }
-
- result = acpi_thermal_cpufreq_decrease(pr->id);
- if (result) {
- /*
- * We only could get -ERANGE, 1 or 0.
- * In the first two cases we reached max freq again.
- */
- ACPI_DEBUG_PRINT((ACPI_DB_INFO,
- "At minimum performance state\n"));
- max_tx_px = 1;
- } else
- max_tx_px = 0;
-
- break;
- }
-
- end:
- if (pr->flags.throttling) {
- pr->limit.thermal.px = 0;
- pr->limit.thermal.tx = tx;
-
- result = acpi_processor_apply_limit(pr);
- if (result)
- printk(KERN_ERR PREFIX "Unable to set thermal limit\n");
-
- ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Thermal limit now (P%d:T%d)\n",
- pr->limit.thermal.px, pr->limit.thermal.tx));
- } else
- result = 0;
- if (max_tx_px)
- return 1;
- else
- return result;
-}
-
int acpi_processor_get_limit_info(struct acpi_processor *pr)
{
diff --git a/drivers/acpi/processor_throttling.c b/drivers/acpi/processor_throttling.c
index 730863855ed5..ff3632717c51 100644
--- a/drivers/acpi/processor_throttling.c
+++ b/drivers/acpi/processor_throttling.c
@@ -32,8 +32,10 @@
#include <linux/init.h>
#include <linux/sched.h>
#include <linux/cpufreq.h>
+#ifdef CONFIG_ACPI_PROCFS
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
+#endif
#include <asm/io.h>
#include <asm/uaccess.h>
@@ -1214,6 +1216,7 @@ int acpi_processor_get_throttling_info(struct acpi_processor *pr)
return result;
}
+#ifdef CONFIG_ACPI_PROCFS
/* proc interface */
static int acpi_processor_throttling_seq_show(struct seq_file *seq,
void *offset)
@@ -1322,3 +1325,4 @@ const struct file_operations acpi_processor_throttling_fops = {
.llseek = seq_lseek,
.release = single_release,
};
+#endif
diff --git a/drivers/acpi/sbs.c b/drivers/acpi/sbs.c
index 4ff76e8174eb..e5dbedb16bbf 100644
--- a/drivers/acpi/sbs.c
+++ b/drivers/acpi/sbs.c
@@ -40,10 +40,7 @@
#include <linux/timer.h>
#include <linux/jiffies.h>
#include <linux/delay.h>
-
-#ifdef CONFIG_ACPI_SYSFS_POWER
#include <linux/power_supply.h>
-#endif
#include "sbshc.h"
@@ -85,9 +82,7 @@ static const struct acpi_device_id sbs_device_ids[] = {
MODULE_DEVICE_TABLE(acpi, sbs_device_ids);
struct acpi_battery {
-#ifdef CONFIG_ACPI_SYSFS_POWER
struct power_supply bat;
-#endif
struct acpi_sbs *sbs;
#ifdef CONFIG_ACPI_PROCFS_POWER
struct proc_dir_entry *proc_entry;
@@ -120,9 +115,7 @@ struct acpi_battery {
#define to_acpi_battery(x) container_of(x, struct acpi_battery, bat);
struct acpi_sbs {
-#ifdef CONFIG_ACPI_SYSFS_POWER
struct power_supply charger;
-#endif
struct acpi_device *device;
struct acpi_smb_hc *hc;
struct mutex lock;
@@ -166,7 +159,6 @@ static inline int acpi_battery_scale(struct acpi_battery *battery)
acpi_battery_ipscale(battery);
}
-#ifdef CONFIG_ACPI_SYSFS_POWER
static int sbs_get_ac_property(struct power_supply *psy,
enum power_supply_property psp,
union power_supply_propval *val)
@@ -313,7 +305,6 @@ static enum power_supply_property sbs_energy_battery_props[] = {
POWER_SUPPLY_PROP_MANUFACTURER,
};
-#endif
/* --------------------------------------------------------------------------
Smart Battery System Management
@@ -449,7 +440,6 @@ static int acpi_ac_get_present(struct acpi_sbs *sbs)
return result;
}
-#ifdef CONFIG_ACPI_SYSFS_POWER
static ssize_t acpi_battery_alarm_show(struct device *dev,
struct device_attribute *attr,
char *buf)
@@ -479,7 +469,6 @@ static struct device_attribute alarm_attr = {
.show = acpi_battery_alarm_show,
.store = acpi_battery_alarm_store,
};
-#endif
/* --------------------------------------------------------------------------
FS Interface (/proc/acpi)
@@ -798,7 +787,6 @@ static int acpi_battery_add(struct acpi_sbs *sbs, int id)
&acpi_battery_state_fops, &acpi_battery_alarm_fops,
battery);
#endif
-#ifdef CONFIG_ACPI_SYSFS_POWER
battery->bat.name = battery->name;
battery->bat.type = POWER_SUPPLY_TYPE_BATTERY;
if (!acpi_battery_mode(battery)) {
@@ -819,7 +807,6 @@ static int acpi_battery_add(struct acpi_sbs *sbs, int id)
goto end;
battery->have_sysfs_alarm = 1;
end:
-#endif
printk(KERN_INFO PREFIX "%s [%s]: Battery Slot [%s] (battery %s)\n",
ACPI_SBS_DEVICE_NAME, acpi_device_bid(sbs->device),
battery->name, battery->present ? "present" : "absent");
@@ -828,17 +815,13 @@ static int acpi_battery_add(struct acpi_sbs *sbs, int id)
static void acpi_battery_remove(struct acpi_sbs *sbs, int id)
{
-#if defined(CONFIG_ACPI_SYSFS_POWER) || defined(CONFIG_ACPI_PROCFS_POWER)
struct acpi_battery *battery = &sbs->battery[id];
-#endif
-#ifdef CONFIG_ACPI_SYSFS_POWER
if (battery->bat.dev) {
if (battery->have_sysfs_alarm)
device_remove_file(battery->bat.dev, &alarm_attr);
power_supply_unregister(&battery->bat);
}
-#endif
#ifdef CONFIG_ACPI_PROCFS_POWER
if (battery->proc_entry)
acpi_sbs_remove_fs(&battery->proc_entry, acpi_battery_dir);
@@ -859,14 +842,12 @@ static int acpi_charger_add(struct acpi_sbs *sbs)
if (result)
goto end;
#endif
-#ifdef CONFIG_ACPI_SYSFS_POWER
sbs->charger.name = "sbs-charger";
sbs->charger.type = POWER_SUPPLY_TYPE_MAINS;
sbs->charger.properties = sbs_ac_props;
sbs->charger.num_properties = ARRAY_SIZE(sbs_ac_props);
sbs->charger.get_property = sbs_get_ac_property;
power_supply_register(&sbs->device->dev, &sbs->charger);
-#endif
printk(KERN_INFO PREFIX "%s [%s]: AC Adapter [%s] (%s)\n",
ACPI_SBS_DEVICE_NAME, acpi_device_bid(sbs->device),
ACPI_AC_DIR_NAME, sbs->charger_present ? "on-line" : "off-line");
@@ -876,10 +857,8 @@ static int acpi_charger_add(struct acpi_sbs *sbs)
static void acpi_charger_remove(struct acpi_sbs *sbs)
{
-#ifdef CONFIG_ACPI_SYSFS_POWER
if (sbs->charger.dev)
power_supply_unregister(&sbs->charger);
-#endif
#ifdef CONFIG_ACPI_PROCFS_POWER
if (sbs->charger_entry)
acpi_sbs_remove_fs(&sbs->charger_entry, acpi_ac_dir);
@@ -900,9 +879,7 @@ static void acpi_sbs_callback(void *context)
ACPI_SBS_NOTIFY_STATUS,
sbs->charger_present);
#endif
-#ifdef CONFIG_ACPI_SYSFS_POWER
kobject_uevent(&sbs->charger.dev->kobj, KOBJ_CHANGE);
-#endif
}
if (sbs->manager_present) {
for (id = 0; id < MAX_SBS_BAT; ++id) {
@@ -919,9 +896,7 @@ static void acpi_sbs_callback(void *context)
ACPI_SBS_NOTIFY_STATUS,
bat->present);
#endif
-#ifdef CONFIG_ACPI_SYSFS_POWER
kobject_uevent(&bat->bat.dev->kobj, KOBJ_CHANGE);
-#endif
}
}
}
diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c
index b23825ecfa37..2b6c21d86b98 100644
--- a/drivers/acpi/scan.c
+++ b/drivers/acpi/scan.c
@@ -26,6 +26,8 @@ extern struct acpi_device *acpi_root;
#define ACPI_IS_ROOT_DEVICE(device) (!(device)->parent)
+static const char *dummy_hid = "device";
+
static LIST_HEAD(acpi_device_list);
static LIST_HEAD(acpi_bus_id_list);
DEFINE_MUTEX(acpi_device_lock);
@@ -49,6 +51,9 @@ static int create_modalias(struct acpi_device *acpi_dev, char *modalias,
int count;
struct acpi_hardware_id *id;
+ if (list_empty(&acpi_dev->pnp.ids))
+ return 0;
+
len = snprintf(modalias, size, "acpi:");
size -= len;
@@ -202,13 +207,15 @@ static int acpi_device_setup_files(struct acpi_device *dev)
goto end;
}
- result = device_create_file(&dev->dev, &dev_attr_hid);
- if (result)
- goto end;
+ if (!list_empty(&dev->pnp.ids)) {
+ result = device_create_file(&dev->dev, &dev_attr_hid);
+ if (result)
+ goto end;
- result = device_create_file(&dev->dev, &dev_attr_modalias);
- if (result)
- goto end;
+ result = device_create_file(&dev->dev, &dev_attr_modalias);
+ if (result)
+ goto end;
+ }
/*
* If device has _EJ0, 'eject' file is created that is used to trigger
@@ -316,6 +323,9 @@ static int acpi_device_uevent(struct device *dev, struct kobj_uevent_env *env)
struct acpi_device *acpi_dev = to_acpi_device(dev);
int len;
+ if (list_empty(&acpi_dev->pnp.ids))
+ return 0;
+
if (add_uevent_var(env, "MODALIAS="))
return -ENOMEM;
len = create_modalias(acpi_dev, &env->buf[env->buflen - 1],
@@ -1010,10 +1020,13 @@ static int acpi_dock_match(struct acpi_device *device)
return acpi_get_handle(device->handle, "_DCK", &tmp);
}
-char *acpi_device_hid(struct acpi_device *device)
+const char *acpi_device_hid(struct acpi_device *device)
{
struct acpi_hardware_id *hid;
+ if (list_empty(&device->pnp.ids))
+ return dummy_hid;
+
hid = list_first_entry(&device->pnp.ids, struct acpi_hardware_id, list);
return hid->id;
}
@@ -1142,16 +1155,6 @@ static void acpi_device_set_id(struct acpi_device *device)
acpi_add_id(device, ACPI_BUTTON_HID_SLEEPF);
break;
}
-
- /*
- * We build acpi_devices for some objects that don't have _HID or _CID,
- * e.g., PCI bridges and slots. Drivers can't bind to these objects,
- * but we do use them indirectly by traversing the acpi_device tree.
- * This generic ID isn't useful for driver binding, but it provides
- * the useful property that "every acpi_device has an ID."
- */
- if (list_empty(&device->pnp.ids))
- acpi_add_id(device, "device");
}
static int acpi_device_set_context(struct acpi_device *device)
@@ -1431,6 +1434,7 @@ EXPORT_SYMBOL(acpi_bus_add);
int acpi_bus_start(struct acpi_device *device)
{
struct acpi_bus_ops ops;
+ int result;
if (!device)
return -EINVAL;
@@ -1438,7 +1442,11 @@ int acpi_bus_start(struct acpi_device *device)
memset(&ops, 0, sizeof(ops));
ops.acpi_op_start = 1;
- return acpi_bus_scan(device->handle, &ops, NULL);
+ result = acpi_bus_scan(device->handle, &ops, NULL);
+
+ acpi_update_gpes();
+
+ return result;
}
EXPORT_SYMBOL(acpi_bus_start);
@@ -1552,6 +1560,8 @@ int __init acpi_scan_init(void)
if (result)
acpi_device_unregister(acpi_root, ACPI_BUS_REMOVAL_NORMAL);
+ else
+ acpi_update_gpes();
return result;
}
diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c
index 4754ff6e70e6..721d93b3ceee 100644
--- a/drivers/acpi/sleep.c
+++ b/drivers/acpi/sleep.c
@@ -25,7 +25,9 @@
#include "internal.h"
#include "sleep.h"
-u8 sleep_states[ACPI_S_STATE_COUNT];
+static u8 sleep_states[ACPI_S_STATE_COUNT];
+
+static u32 acpi_target_sleep_state = ACPI_STATE_S0;
static void acpi_sleep_tts_switch(u32 acpi_state)
{
@@ -79,8 +81,6 @@ static int acpi_sleep_prepare(u32 acpi_state)
}
#ifdef CONFIG_ACPI_SLEEP
-static u32 acpi_target_sleep_state = ACPI_STATE_S0;
-
/*
* The ACPI specification wants us to save NVS memory regions during hibernation
* and to restore them during the subsequent resume. Windows does that also for
@@ -419,6 +419,14 @@ static struct dmi_system_id __initdata acpisleep_dmi_table[] = {
DMI_MATCH(DMI_PRODUCT_NAME, "Everex StepNote Series"),
},
},
+ {
+ .callback = init_nvs_nosave,
+ .ident = "Sony Vaio VPCEB1Z1E",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "VPCEB1Z1E"),
+ },
+ },
{},
};
#endif /* CONFIG_SUSPEND */
@@ -562,7 +570,7 @@ int acpi_suspend(u32 acpi_state)
return -EINVAL;
}
-#ifdef CONFIG_PM_SLEEP
+#ifdef CONFIG_PM_OPS
/**
* acpi_pm_device_sleep_state - return preferred power state of ACPI device
* in the system sleep state given by %acpi_target_sleep_state
@@ -624,7 +632,7 @@ int acpi_pm_device_sleep_state(struct device *dev, int *d_min_p)
* can wake the system. _S0W may be valid, too.
*/
if (acpi_target_sleep_state == ACPI_STATE_S0 ||
- (device_may_wakeup(dev) && adev->wakeup.state.enabled &&
+ (device_may_wakeup(dev) &&
adev->wakeup.sleep_state <= acpi_target_sleep_state)) {
acpi_status status;
@@ -632,7 +640,9 @@ int acpi_pm_device_sleep_state(struct device *dev, int *d_min_p)
status = acpi_evaluate_integer(handle, acpi_method, NULL,
&d_max);
if (ACPI_FAILURE(status)) {
- d_max = d_min;
+ if (acpi_target_sleep_state != ACPI_STATE_S0 ||
+ status != AE_NOT_FOUND)
+ d_max = d_min;
} else if (d_max < d_min) {
/* Warn the user of the broken DSDT */
printk(KERN_WARNING "ACPI: Wrong value from %s\n",
@@ -646,7 +656,9 @@ int acpi_pm_device_sleep_state(struct device *dev, int *d_min_p)
*d_min_p = d_min;
return d_max;
}
+#endif /* CONFIG_PM_OPS */
+#ifdef CONFIG_PM_SLEEP
/**
* acpi_pm_device_sleep_wake - enable or disable the system wake-up
* capability of given device
@@ -677,7 +689,7 @@ int acpi_pm_device_sleep_wake(struct device *dev, bool enable)
return error;
}
-#endif
+#endif /* CONFIG_PM_SLEEP */
static void acpi_power_off_prepare(void)
{
@@ -702,7 +714,7 @@ static void acpi_power_off(void)
* paths through the BIOS, so disable _GTS and _BFS by default,
* but do speak up and offer the option to enable them.
*/
-void __init acpi_gts_bfs_check(void)
+static void __init acpi_gts_bfs_check(void)
{
acpi_handle dummy;
diff --git a/drivers/acpi/sleep.h b/drivers/acpi/sleep.h
index d8821805c3bc..74d59c8f4678 100644
--- a/drivers/acpi/sleep.h
+++ b/drivers/acpi/sleep.h
@@ -1,5 +1,4 @@
-extern u8 sleep_states[];
extern int acpi_suspend(u32 state);
extern void acpi_enable_wakeup_devices(u8 sleep_state);
diff --git a/drivers/acpi/thermal.c b/drivers/acpi/thermal.c
index 2f8f17131d9f..5a27b0a31315 100644
--- a/drivers/acpi/thermal.c
+++ b/drivers/acpi/thermal.c
@@ -37,12 +37,6 @@
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/types.h>
-
-#ifdef CONFIG_ACPI_PROCFS
-#include <linux/proc_fs.h>
-#include <linux/seq_file.h>
-#endif
-
#include <linux/jiffies.h>
#include <linux/kmod.h>
#include <linux/reboot.h>
@@ -195,61 +189,6 @@ struct acpi_thermal {
struct mutex lock;
};
-#ifdef CONFIG_ACPI_PROCFS
-static int acpi_thermal_state_open_fs(struct inode *inode, struct file *file);
-static int acpi_thermal_temp_open_fs(struct inode *inode, struct file *file);
-static int acpi_thermal_trip_open_fs(struct inode *inode, struct file *file);
-static int acpi_thermal_cooling_open_fs(struct inode *inode, struct file *file);
-static ssize_t acpi_thermal_write_cooling_mode(struct file *,
- const char __user *, size_t,
- loff_t *);
-static int acpi_thermal_polling_open_fs(struct inode *inode, struct file *file);
-static ssize_t acpi_thermal_write_polling(struct file *, const char __user *,
- size_t, loff_t *);
-
-static const struct file_operations acpi_thermal_state_fops = {
- .owner = THIS_MODULE,
- .open = acpi_thermal_state_open_fs,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
-
-static const struct file_operations acpi_thermal_temp_fops = {
- .owner = THIS_MODULE,
- .open = acpi_thermal_temp_open_fs,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
-
-static const struct file_operations acpi_thermal_trip_fops = {
- .owner = THIS_MODULE,
- .open = acpi_thermal_trip_open_fs,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
-
-static const struct file_operations acpi_thermal_cooling_fops = {
- .owner = THIS_MODULE,
- .open = acpi_thermal_cooling_open_fs,
- .read = seq_read,
- .write = acpi_thermal_write_cooling_mode,
- .llseek = seq_lseek,
- .release = single_release,
-};
-
-static const struct file_operations acpi_thermal_polling_fops = {
- .owner = THIS_MODULE,
- .open = acpi_thermal_polling_open_fs,
- .read = seq_read,
- .write = acpi_thermal_write_polling,
- .llseek = seq_lseek,
- .release = single_release,
-};
-#endif /* CONFIG_ACPI_PROCFS*/
-
/* --------------------------------------------------------------------------
Thermal Zone Management
-------------------------------------------------------------------------- */
@@ -958,358 +897,6 @@ static void acpi_thermal_unregister_thermal_zone(struct acpi_thermal *tz)
/* --------------------------------------------------------------------------
- FS Interface (/proc)
- -------------------------------------------------------------------------- */
-#ifdef CONFIG_ACPI_PROCFS
-static struct proc_dir_entry *acpi_thermal_dir;
-
-static int acpi_thermal_state_seq_show(struct seq_file *seq, void *offset)
-{
- struct acpi_thermal *tz = seq->private;
-
-
- if (!tz)
- goto end;
-
- seq_puts(seq, "state: ");
-
- if (!tz->state.critical && !tz->state.hot && !tz->state.passive
- && !tz->state.active)
- seq_puts(seq, "ok\n");
- else {
- if (tz->state.critical)
- seq_puts(seq, "critical ");
- if (tz->state.hot)
- seq_puts(seq, "hot ");
- if (tz->state.passive)
- seq_puts(seq, "passive ");
- if (tz->state.active)
- seq_printf(seq, "active[%d]", tz->state.active_index);
- seq_puts(seq, "\n");
- }
-
- end:
- return 0;
-}
-
-static int acpi_thermal_state_open_fs(struct inode *inode, struct file *file)
-{
- return single_open(file, acpi_thermal_state_seq_show, PDE(inode)->data);
-}
-
-static int acpi_thermal_temp_seq_show(struct seq_file *seq, void *offset)
-{
- int result = 0;
- struct acpi_thermal *tz = seq->private;
-
-
- if (!tz)
- goto end;
-
- result = acpi_thermal_get_temperature(tz);
- if (result)
- goto end;
-
- seq_printf(seq, "temperature: %ld C\n",
- KELVIN_TO_CELSIUS(tz->temperature));
-
- end:
- return 0;
-}
-
-static int acpi_thermal_temp_open_fs(struct inode *inode, struct file *file)
-{
- return single_open(file, acpi_thermal_temp_seq_show, PDE(inode)->data);
-}
-
-static int acpi_thermal_trip_seq_show(struct seq_file *seq, void *offset)
-{
- struct acpi_thermal *tz = seq->private;
- struct acpi_device *device;
- acpi_status status;
-
- int i = 0;
- int j = 0;
-
-
- if (!tz)
- goto end;
-
- if (tz->trips.critical.flags.valid)
- seq_printf(seq, "critical (S5): %ld C%s",
- KELVIN_TO_CELSIUS(tz->trips.critical.temperature),
- nocrt ? " <disabled>\n" : "\n");
-
- if (tz->trips.hot.flags.valid)
- seq_printf(seq, "hot (S4): %ld C%s",
- KELVIN_TO_CELSIUS(tz->trips.hot.temperature),
- nocrt ? " <disabled>\n" : "\n");
-
- if (tz->trips.passive.flags.valid) {
- seq_printf(seq,
- "passive: %ld C: tc1=%lu tc2=%lu tsp=%lu devices=",
- KELVIN_TO_CELSIUS(tz->trips.passive.temperature),
- tz->trips.passive.tc1, tz->trips.passive.tc2,
- tz->trips.passive.tsp);
- for (j = 0; j < tz->trips.passive.devices.count; j++) {
- status = acpi_bus_get_device(tz->trips.passive.devices.
- handles[j], &device);
- seq_printf(seq, "%4.4s ", status ? "" :
- acpi_device_bid(device));
- }
- seq_puts(seq, "\n");
- } else {
- seq_printf(seq, "passive (forced):");
- if (tz->thermal_zone->forced_passive)
- seq_printf(seq, " %i C\n",
- tz->thermal_zone->forced_passive / 1000);
- else
- seq_printf(seq, "<not set>\n");
- }
-
- for (i = 0; i < ACPI_THERMAL_MAX_ACTIVE; i++) {
- if (!(tz->trips.active[i].flags.valid))
- break;
- seq_printf(seq, "active[%d]: %ld C: devices=",
- i,
- KELVIN_TO_CELSIUS(tz->trips.active[i].temperature));
- for (j = 0; j < tz->trips.active[i].devices.count; j++){
- status = acpi_bus_get_device(tz->trips.active[i].
- devices.handles[j],
- &device);
- seq_printf(seq, "%4.4s ", status ? "" :
- acpi_device_bid(device));
- }
- seq_puts(seq, "\n");
- }
-
- end:
- return 0;
-}
-
-static int acpi_thermal_trip_open_fs(struct inode *inode, struct file *file)
-{
- return single_open(file, acpi_thermal_trip_seq_show, PDE(inode)->data);
-}
-
-static int acpi_thermal_cooling_seq_show(struct seq_file *seq, void *offset)
-{
- struct acpi_thermal *tz = seq->private;
-
-
- if (!tz)
- goto end;
-
- if (!tz->flags.cooling_mode)
- seq_puts(seq, "<setting not supported>\n");
- else
- seq_puts(seq, "0 - Active; 1 - Passive\n");
-
- end:
- return 0;
-}
-
-static int acpi_thermal_cooling_open_fs(struct inode *inode, struct file *file)
-{
- return single_open(file, acpi_thermal_cooling_seq_show,
- PDE(inode)->data);
-}
-
-static ssize_t
-acpi_thermal_write_cooling_mode(struct file *file,
- const char __user * buffer,
- size_t count, loff_t * ppos)
-{
- struct seq_file *m = file->private_data;
- struct acpi_thermal *tz = m->private;
- int result = 0;
- char mode_string[12] = { '\0' };
-
-
- if (!tz || (count > sizeof(mode_string) - 1))
- return -EINVAL;
-
- if (!tz->flags.cooling_mode)
- return -ENODEV;
-
- if (copy_from_user(mode_string, buffer, count))
- return -EFAULT;
-
- mode_string[count] = '\0';
-
- result = acpi_thermal_set_cooling_mode(tz,
- simple_strtoul(mode_string, NULL,
- 0));
- if (result)
- return result;
-
- acpi_thermal_check(tz);
-
- return count;
-}
-
-static int acpi_thermal_polling_seq_show(struct seq_file *seq, void *offset)
-{
- struct acpi_thermal *tz = seq->private;
-
-
- if (!tz)
- goto end;
-
- if (!tz->thermal_zone->polling_delay) {
- seq_puts(seq, "<polling disabled>\n");
- goto end;
- }
-
- seq_printf(seq, "polling frequency: %d seconds\n",
- (tz->thermal_zone->polling_delay / 1000));
-
- end:
- return 0;
-}
-
-static int acpi_thermal_polling_open_fs(struct inode *inode, struct file *file)
-{
- return single_open(file, acpi_thermal_polling_seq_show,
- PDE(inode)->data);
-}
-
-static int acpi_thermal_set_polling(struct acpi_thermal *tz, int seconds)
-{
- if (!tz)
- return -EINVAL;
-
- /* Convert value to deci-seconds */
- tz->polling_frequency = seconds * 10;
-
- tz->thermal_zone->polling_delay = seconds * 1000;
-
- if (tz->tz_enabled)
- thermal_zone_device_update(tz->thermal_zone);
-
- ACPI_DEBUG_PRINT((ACPI_DB_INFO,
- "Polling frequency set to %lu seconds\n",
- tz->polling_frequency/10));
-
- return 0;
-}
-
-static ssize_t
-acpi_thermal_write_polling(struct file *file,
- const char __user * buffer,
- size_t count, loff_t * ppos)
-{
- struct seq_file *m = file->private_data;
- struct acpi_thermal *tz = m->private;
- int result = 0;
- char polling_string[12] = { '\0' };
- int seconds = 0;
-
-
- if (!tz || (count > sizeof(polling_string) - 1))
- return -EINVAL;
-
- if (copy_from_user(polling_string, buffer, count))
- return -EFAULT;
-
- polling_string[count] = '\0';
-
- seconds = simple_strtoul(polling_string, NULL, 0);
-
- result = acpi_thermal_set_polling(tz, seconds);
- if (result)
- return result;
-
- acpi_thermal_check(tz);
-
- return count;
-}
-
-static int acpi_thermal_add_fs(struct acpi_device *device)
-{
- struct proc_dir_entry *entry = NULL;
-
-
- if (!acpi_device_dir(device)) {
- acpi_device_dir(device) = proc_mkdir(acpi_device_bid(device),
- acpi_thermal_dir);
- if (!acpi_device_dir(device))
- return -ENODEV;
- }
-
- /* 'state' [R] */
- entry = proc_create_data(ACPI_THERMAL_FILE_STATE,
- S_IRUGO, acpi_device_dir(device),
- &acpi_thermal_state_fops,
- acpi_driver_data(device));
- if (!entry)
- return -ENODEV;
-
- /* 'temperature' [R] */
- entry = proc_create_data(ACPI_THERMAL_FILE_TEMPERATURE,
- S_IRUGO, acpi_device_dir(device),
- &acpi_thermal_temp_fops,
- acpi_driver_data(device));
- if (!entry)
- return -ENODEV;
-
- /* 'trip_points' [R] */
- entry = proc_create_data(ACPI_THERMAL_FILE_TRIP_POINTS,
- S_IRUGO,
- acpi_device_dir(device),
- &acpi_thermal_trip_fops,
- acpi_driver_data(device));
- if (!entry)
- return -ENODEV;
-
- /* 'cooling_mode' [R/W] */
- entry = proc_create_data(ACPI_THERMAL_FILE_COOLING_MODE,
- S_IFREG | S_IRUGO | S_IWUSR,
- acpi_device_dir(device),
- &acpi_thermal_cooling_fops,
- acpi_driver_data(device));
- if (!entry)
- return -ENODEV;
-
- /* 'polling_frequency' [R/W] */
- entry = proc_create_data(ACPI_THERMAL_FILE_POLLING_FREQ,
- S_IFREG | S_IRUGO | S_IWUSR,
- acpi_device_dir(device),
- &acpi_thermal_polling_fops,
- acpi_driver_data(device));
- if (!entry)
- return -ENODEV;
- return 0;
-}
-
-static int acpi_thermal_remove_fs(struct acpi_device *device)
-{
-
- if (acpi_device_dir(device)) {
- remove_proc_entry(ACPI_THERMAL_FILE_POLLING_FREQ,
- acpi_device_dir(device));
- remove_proc_entry(ACPI_THERMAL_FILE_COOLING_MODE,
- acpi_device_dir(device));
- remove_proc_entry(ACPI_THERMAL_FILE_TRIP_POINTS,
- acpi_device_dir(device));
- remove_proc_entry(ACPI_THERMAL_FILE_TEMPERATURE,
- acpi_device_dir(device));
- remove_proc_entry(ACPI_THERMAL_FILE_STATE,
- acpi_device_dir(device));
- remove_proc_entry(acpi_device_bid(device), acpi_thermal_dir);
- acpi_device_dir(device) = NULL;
- }
-
- return 0;
-}
-#else
-static inline int acpi_thermal_add_fs(struct acpi_device *device) { return 0; }
-static inline int acpi_thermal_remove_fs(struct acpi_device *device)
-{
- return 0;
-}
-#endif /* CONFIG_ACPI_PROCFS */
-/* --------------------------------------------------------------------------
Driver Interface
-------------------------------------------------------------------------- */
@@ -1428,17 +1015,11 @@ static int acpi_thermal_add(struct acpi_device *device)
if (result)
goto free_memory;
- result = acpi_thermal_add_fs(device);
- if (result)
- goto unregister_thermal_zone;
-
printk(KERN_INFO PREFIX "%s [%s] (%ld C)\n",
acpi_device_name(device), acpi_device_bid(device),
KELVIN_TO_CELSIUS(tz->temperature));
goto end;
-unregister_thermal_zone:
- thermal_zone_device_unregister(tz->thermal_zone);
free_memory:
kfree(tz);
end:
@@ -1454,7 +1035,6 @@ static int acpi_thermal_remove(struct acpi_device *device, int type)
tz = acpi_driver_data(device);
- acpi_thermal_remove_fs(device);
acpi_thermal_unregister_thermal_zone(tz);
mutex_destroy(&tz->lock);
kfree(tz);
@@ -1580,19 +1160,9 @@ static int __init acpi_thermal_init(void)
return -ENODEV;
}
-#ifdef CONFIG_ACPI_PROCFS
- acpi_thermal_dir = proc_mkdir(ACPI_THERMAL_CLASS, acpi_root_dir);
- if (!acpi_thermal_dir)
- return -ENODEV;
-#endif
-
result = acpi_bus_register_driver(&acpi_thermal_driver);
- if (result < 0) {
-#ifdef CONFIG_ACPI_PROCFS
- remove_proc_entry(ACPI_THERMAL_CLASS, acpi_root_dir);
-#endif
+ if (result < 0)
return -ENODEV;
- }
return 0;
}
@@ -1602,10 +1172,6 @@ static void __exit acpi_thermal_exit(void)
acpi_bus_unregister_driver(&acpi_thermal_driver);
-#ifdef CONFIG_ACPI_PROCFS
- remove_proc_entry(ACPI_THERMAL_CLASS, acpi_root_dir);
-#endif
-
return;
}
diff --git a/drivers/acpi/video.c b/drivers/acpi/video.c
index 67dec0c675aa..5cd0228d2daa 100644
--- a/drivers/acpi/video.c
+++ b/drivers/acpi/video.c
@@ -30,8 +30,6 @@
#include <linux/types.h>
#include <linux/list.h>
#include <linux/mutex.h>
-#include <linux/proc_fs.h>
-#include <linux/seq_file.h>
#include <linux/input.h>
#include <linux/backlight.h>
#include <linux/thermal.h>
@@ -152,9 +150,6 @@ struct acpi_video_bus {
struct acpi_video_bus_flags flags;
struct list_head video_device_list;
struct mutex device_list_lock; /* protects video_device_list */
-#ifdef CONFIG_ACPI_PROCFS
- struct proc_dir_entry *dir;
-#endif
struct input_dev *input;
char phys[32]; /* for input device */
struct notifier_block pm_nb;
@@ -210,108 +205,6 @@ struct acpi_video_device {
struct output_device *output_dev;
};
-#ifdef CONFIG_ACPI_PROCFS
-/* bus */
-static int acpi_video_bus_info_open_fs(struct inode *inode, struct file *file);
-static const struct file_operations acpi_video_bus_info_fops = {
- .owner = THIS_MODULE,
- .open = acpi_video_bus_info_open_fs,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
-
-static int acpi_video_bus_ROM_open_fs(struct inode *inode, struct file *file);
-static const struct file_operations acpi_video_bus_ROM_fops = {
- .owner = THIS_MODULE,
- .open = acpi_video_bus_ROM_open_fs,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
-
-static int acpi_video_bus_POST_info_open_fs(struct inode *inode,
- struct file *file);
-static const struct file_operations acpi_video_bus_POST_info_fops = {
- .owner = THIS_MODULE,
- .open = acpi_video_bus_POST_info_open_fs,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
-
-static int acpi_video_bus_POST_open_fs(struct inode *inode, struct file *file);
-static ssize_t acpi_video_bus_write_POST(struct file *file,
- const char __user *buffer, size_t count, loff_t *data);
-static const struct file_operations acpi_video_bus_POST_fops = {
- .owner = THIS_MODULE,
- .open = acpi_video_bus_POST_open_fs,
- .read = seq_read,
- .write = acpi_video_bus_write_POST,
- .llseek = seq_lseek,
- .release = single_release,
-};
-
-static int acpi_video_bus_DOS_open_fs(struct inode *inode, struct file *file);
-static ssize_t acpi_video_bus_write_DOS(struct file *file,
- const char __user *buffer, size_t count, loff_t *data);
-static const struct file_operations acpi_video_bus_DOS_fops = {
- .owner = THIS_MODULE,
- .open = acpi_video_bus_DOS_open_fs,
- .read = seq_read,
- .write = acpi_video_bus_write_DOS,
- .llseek = seq_lseek,
- .release = single_release,
-};
-
-/* device */
-static int acpi_video_device_info_open_fs(struct inode *inode,
- struct file *file);
-static const struct file_operations acpi_video_device_info_fops = {
- .owner = THIS_MODULE,
- .open = acpi_video_device_info_open_fs,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
-
-static int acpi_video_device_state_open_fs(struct inode *inode,
- struct file *file);
-static ssize_t acpi_video_device_write_state(struct file *file,
- const char __user *buffer, size_t count, loff_t *data);
-static const struct file_operations acpi_video_device_state_fops = {
- .owner = THIS_MODULE,
- .open = acpi_video_device_state_open_fs,
- .read = seq_read,
- .write = acpi_video_device_write_state,
- .llseek = seq_lseek,
- .release = single_release,
-};
-
-static int acpi_video_device_brightness_open_fs(struct inode *inode,
- struct file *file);
-static ssize_t acpi_video_device_write_brightness(struct file *file,
- const char __user *buffer, size_t count, loff_t *data);
-static const struct file_operations acpi_video_device_brightness_fops = {
- .owner = THIS_MODULE,
- .open = acpi_video_device_brightness_open_fs,
- .read = seq_read,
- .write = acpi_video_device_write_brightness,
- .llseek = seq_lseek,
- .release = single_release,
-};
-
-static int acpi_video_device_EDID_open_fs(struct inode *inode,
- struct file *file);
-static const struct file_operations acpi_video_device_EDID_fops = {
- .owner = THIS_MODULE,
- .open = acpi_video_device_EDID_open_fs,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
-#endif /* CONFIG_ACPI_PROCFS */
-
static const char device_decode[][30] = {
"motherboard VGA device",
"PCI VGA device",
@@ -1111,646 +1004,6 @@ static int acpi_video_bus_check(struct acpi_video_bus *video)
}
/* --------------------------------------------------------------------------
- FS Interface (/proc)
- -------------------------------------------------------------------------- */
-#ifdef CONFIG_ACPI_PROCFS
-
-static struct proc_dir_entry *acpi_video_dir;
-
-/* video devices */
-
-static int acpi_video_device_info_seq_show(struct seq_file *seq, void *offset)
-{
- struct acpi_video_device *dev = seq->private;
-
-
- if (!dev)
- goto end;
-
- seq_printf(seq, "device_id: 0x%04x\n", (u32) dev->device_id);
- seq_printf(seq, "type: ");
- if (dev->flags.crt)
- seq_printf(seq, "CRT\n");
- else if (dev->flags.lcd)
- seq_printf(seq, "LCD\n");
- else if (dev->flags.tvout)
- seq_printf(seq, "TVOUT\n");
- else if (dev->flags.dvi)
- seq_printf(seq, "DVI\n");
- else
- seq_printf(seq, "UNKNOWN\n");
-
- seq_printf(seq, "known by bios: %s\n", dev->flags.bios ? "yes" : "no");
-
- end:
- return 0;
-}
-
-static int
-acpi_video_device_info_open_fs(struct inode *inode, struct file *file)
-{
- return single_open(file, acpi_video_device_info_seq_show,
- PDE(inode)->data);
-}
-
-static int
-acpi_video_device_query(struct acpi_video_device *device,
- unsigned long long *state)
-{
- int status;
-
- status = acpi_evaluate_integer(device->dev->handle, "_DGS",
- NULL, state);
-
- return status;
-}
-
-static int acpi_video_device_state_seq_show(struct seq_file *seq, void *offset)
-{
- int status;
- struct acpi_video_device *dev = seq->private;
- unsigned long long state;
-
-
- if (!dev)
- goto end;
-
- status = acpi_video_device_get_state(dev, &state);
- seq_printf(seq, "state: ");
- if (ACPI_SUCCESS(status))
- seq_printf(seq, "0x%02llx\n", state);
- else
- seq_printf(seq, "<not supported>\n");
-
- status = acpi_video_device_query(dev, &state);
- seq_printf(seq, "query: ");
- if (ACPI_SUCCESS(status))
- seq_printf(seq, "0x%02llx\n", state);
- else
- seq_printf(seq, "<not supported>\n");
-
- end:
- return 0;
-}
-
-static int
-acpi_video_device_state_open_fs(struct inode *inode, struct file *file)
-{
- return single_open(file, acpi_video_device_state_seq_show,
- PDE(inode)->data);
-}
-
-static ssize_t
-acpi_video_device_write_state(struct file *file,
- const char __user * buffer,
- size_t count, loff_t * data)
-{
- int status;
- struct seq_file *m = file->private_data;
- struct acpi_video_device *dev = m->private;
- char str[12] = { 0 };
- u32 state = 0;
-
-
- if (!dev || count >= sizeof(str))
- return -EINVAL;
-
- if (copy_from_user(str, buffer, count))
- return -EFAULT;
-
- str[count] = 0;
- state = simple_strtoul(str, NULL, 0);
- state &= ((1ul << 31) | (1ul << 30) | (1ul << 0));
-
- status = acpi_video_device_set_state(dev, state);
-
- if (status)
- return -EFAULT;
-
- return count;
-}
-
-static int
-acpi_video_device_brightness_seq_show(struct seq_file *seq, void *offset)
-{
- struct acpi_video_device *dev = seq->private;
- int i;
-
-
- if (!dev || !dev->brightness) {
- seq_printf(seq, "<not supported>\n");
- return 0;
- }
-
- seq_printf(seq, "levels: ");
- for (i = 2; i < dev->brightness->count; i++)
- seq_printf(seq, " %d", dev->brightness->levels[i]);
- seq_printf(seq, "\ncurrent: %d\n", dev->brightness->curr);
-
- return 0;
-}
-
-static int
-acpi_video_device_brightness_open_fs(struct inode *inode, struct file *file)
-{
- return single_open(file, acpi_video_device_brightness_seq_show,
- PDE(inode)->data);
-}
-
-static ssize_t
-acpi_video_device_write_brightness(struct file *file,
- const char __user * buffer,
- size_t count, loff_t * data)
-{
- struct seq_file *m = file->private_data;
- struct acpi_video_device *dev = m->private;
- char str[5] = { 0 };
- unsigned int level = 0;
- int i;
-
-
- if (!dev || !dev->brightness || count >= sizeof(str))
- return -EINVAL;
-
- if (copy_from_user(str, buffer, count))
- return -EFAULT;
-
- str[count] = 0;
- level = simple_strtoul(str, NULL, 0);
-
- if (level > 100)
- return -EFAULT;
-
- /* validate through the list of available levels */
- for (i = 2; i < dev->brightness->count; i++)
- if (level == dev->brightness->levels[i]) {
- if (!acpi_video_device_lcd_set_level(dev, level))
- return count;
- break;
- }
-
- return -EINVAL;
-}
-
-static int acpi_video_device_EDID_seq_show(struct seq_file *seq, void *offset)
-{
- struct acpi_video_device *dev = seq->private;
- int status;
- int i;
- union acpi_object *edid = NULL;
-
-
- if (!dev)
- goto out;
-
- status = acpi_video_device_EDID(dev, &edid, 128);
- if (ACPI_FAILURE(status)) {
- status = acpi_video_device_EDID(dev, &edid, 256);
- }
-
- if (ACPI_FAILURE(status)) {
- goto out;
- }
-
- if (edid && edid->type == ACPI_TYPE_BUFFER) {
- for (i = 0; i < edid->buffer.length; i++)
- seq_putc(seq, edid->buffer.pointer[i]);
- }
-
- out:
- if (!edid)
- seq_printf(seq, "<not supported>\n");
- else
- kfree(edid);
-
- return 0;
-}
-
-static int
-acpi_video_device_EDID_open_fs(struct inode *inode, struct file *file)
-{
- return single_open(file, acpi_video_device_EDID_seq_show,
- PDE(inode)->data);
-}
-
-static int acpi_video_device_add_fs(struct acpi_device *device)
-{
- struct proc_dir_entry *entry, *device_dir;
- struct acpi_video_device *vid_dev;
-
- vid_dev = acpi_driver_data(device);
- if (!vid_dev)
- return -ENODEV;
-
- device_dir = proc_mkdir(acpi_device_bid(device),
- vid_dev->video->dir);
- if (!device_dir)
- return -ENOMEM;
-
- /* 'info' [R] */
- entry = proc_create_data("info", S_IRUGO, device_dir,
- &acpi_video_device_info_fops, acpi_driver_data(device));
- if (!entry)
- goto err_remove_dir;
-
- /* 'state' [R/W] */
- entry = proc_create_data("state", S_IFREG | S_IRUGO | S_IWUSR,
- device_dir,
- &acpi_video_device_state_fops,
- acpi_driver_data(device));
- if (!entry)
- goto err_remove_info;
-
- /* 'brightness' [R/W] */
- entry = proc_create_data("brightness", S_IFREG | S_IRUGO | S_IWUSR,
- device_dir,
- &acpi_video_device_brightness_fops,
- acpi_driver_data(device));
- if (!entry)
- goto err_remove_state;
-
- /* 'EDID' [R] */
- entry = proc_create_data("EDID", S_IRUGO, device_dir,
- &acpi_video_device_EDID_fops,
- acpi_driver_data(device));
- if (!entry)
- goto err_remove_brightness;
-
- acpi_device_dir(device) = device_dir;
-
- return 0;
-
- err_remove_brightness:
- remove_proc_entry("brightness", device_dir);
- err_remove_state:
- remove_proc_entry("state", device_dir);
- err_remove_info:
- remove_proc_entry("info", device_dir);
- err_remove_dir:
- remove_proc_entry(acpi_device_bid(device), vid_dev->video->dir);
- return -ENOMEM;
-}
-
-static int acpi_video_device_remove_fs(struct acpi_device *device)
-{
- struct acpi_video_device *vid_dev;
- struct proc_dir_entry *device_dir;
-
- vid_dev = acpi_driver_data(device);
- if (!vid_dev || !vid_dev->video || !vid_dev->video->dir)
- return -ENODEV;
-
- device_dir = acpi_device_dir(device);
- if (device_dir) {
- remove_proc_entry("info", device_dir);
- remove_proc_entry("state", device_dir);
- remove_proc_entry("brightness", device_dir);
- remove_proc_entry("EDID", device_dir);
- remove_proc_entry(acpi_device_bid(device), vid_dev->video->dir);
- acpi_device_dir(device) = NULL;
- }
-
- return 0;
-}
-
-/* video bus */
-static int acpi_video_bus_info_seq_show(struct seq_file *seq, void *offset)
-{
- struct acpi_video_bus *video = seq->private;
-
-
- if (!video)
- goto end;
-
- seq_printf(seq, "Switching heads: %s\n",
- video->flags.multihead ? "yes" : "no");
- seq_printf(seq, "Video ROM: %s\n",
- video->flags.rom ? "yes" : "no");
- seq_printf(seq, "Device to be POSTed on boot: %s\n",
- video->flags.post ? "yes" : "no");
-
- end:
- return 0;
-}
-
-static int acpi_video_bus_info_open_fs(struct inode *inode, struct file *file)
-{
- return single_open(file, acpi_video_bus_info_seq_show,
- PDE(inode)->data);
-}
-
-static int acpi_video_bus_ROM_seq_show(struct seq_file *seq, void *offset)
-{
- struct acpi_video_bus *video = seq->private;
-
-
- if (!video)
- goto end;
-
- printk(KERN_INFO PREFIX "Please implement %s\n", __func__);
- seq_printf(seq, "<TODO>\n");
-
- end:
- return 0;
-}
-
-static int acpi_video_bus_ROM_open_fs(struct inode *inode, struct file *file)
-{
- return single_open(file, acpi_video_bus_ROM_seq_show, PDE(inode)->data);
-}
-
-static int
-acpi_video_bus_POST_options(struct acpi_video_bus *video,
- unsigned long long *options)
-{
- int status;
-
- status = acpi_evaluate_integer(video->device->handle, "_VPO",
- NULL, options);
- *options &= 3;
-
- return status;
-}
-
-static int acpi_video_bus_POST_info_seq_show(struct seq_file *seq, void *offset)
-{
- struct acpi_video_bus *video = seq->private;
- unsigned long long options;
- int status;
-
-
- if (!video)
- goto end;
-
- status = acpi_video_bus_POST_options(video, &options);
- if (ACPI_SUCCESS(status)) {
- if (!(options & 1)) {
- printk(KERN_WARNING PREFIX
- "The motherboard VGA device is not listed as a possible POST device.\n");
- printk(KERN_WARNING PREFIX
- "This indicates a BIOS bug. Please contact the manufacturer.\n");
- }
- printk(KERN_WARNING "%llx\n", options);
- seq_printf(seq, "can POST: <integrated video>");
- if (options & 2)
- seq_printf(seq, " <PCI video>");
- if (options & 4)
- seq_printf(seq, " <AGP video>");
- seq_putc(seq, '\n');
- } else
- seq_printf(seq, "<not supported>\n");
- end:
- return 0;
-}
-
-static int
-acpi_video_bus_POST_info_open_fs(struct inode *inode, struct file *file)
-{
- return single_open(file, acpi_video_bus_POST_info_seq_show,
- PDE(inode)->data);
-}
-
-static int
-acpi_video_bus_get_POST(struct acpi_video_bus *video, unsigned long long *id)
-{
- int status;
-
- status = acpi_evaluate_integer(video->device->handle, "_GPD", NULL, id);
-
- return status;
-}
-
-static int acpi_video_bus_POST_seq_show(struct seq_file *seq, void *offset)
-{
- struct acpi_video_bus *video = seq->private;
- int status;
- unsigned long long id;
-
-
- if (!video)
- goto end;
-
- status = acpi_video_bus_get_POST(video, &id);
- if (!ACPI_SUCCESS(status)) {
- seq_printf(seq, "<not supported>\n");
- goto end;
- }
- seq_printf(seq, "device POSTed is <%s>\n", device_decode[id & 3]);
-
- end:
- return 0;
-}
-
-static int acpi_video_bus_DOS_seq_show(struct seq_file *seq, void *offset)
-{
- struct acpi_video_bus *video = seq->private;
-
-
- seq_printf(seq, "DOS setting: <%d>\n", video->dos_setting);
-
- return 0;
-}
-
-static int acpi_video_bus_POST_open_fs(struct inode *inode, struct file *file)
-{
- return single_open(file, acpi_video_bus_POST_seq_show,
- PDE(inode)->data);
-}
-
-static int acpi_video_bus_DOS_open_fs(struct inode *inode, struct file *file)
-{
- return single_open(file, acpi_video_bus_DOS_seq_show, PDE(inode)->data);
-}
-
-static int
-acpi_video_bus_set_POST(struct acpi_video_bus *video, unsigned long option)
-{
- int status;
- unsigned long long tmp;
- union acpi_object arg0 = { ACPI_TYPE_INTEGER };
- struct acpi_object_list args = { 1, &arg0 };
-
-
- arg0.integer.value = option;
-
- status = acpi_evaluate_integer(video->device->handle, "_SPD",
- &args, &tmp);
- if (ACPI_SUCCESS(status))
- status = tmp ? (-EINVAL) : (AE_OK);
-
- return status;
-}
-
-static ssize_t
-acpi_video_bus_write_POST(struct file *file,
- const char __user * buffer,
- size_t count, loff_t * data)
-{
- int status;
- struct seq_file *m = file->private_data;
- struct acpi_video_bus *video = m->private;
- char str[12] = { 0 };
- unsigned long long opt, options;
-
-
- if (!video || count >= sizeof(str))
- return -EINVAL;
-
- status = acpi_video_bus_POST_options(video, &options);
- if (!ACPI_SUCCESS(status))
- return -EINVAL;
-
- if (copy_from_user(str, buffer, count))
- return -EFAULT;
-
- str[count] = 0;
- opt = strtoul(str, NULL, 0);
- if (opt > 3)
- return -EFAULT;
-
- /* just in case an OEM 'forgot' the motherboard... */
- options |= 1;
-
- if (options & (1ul << opt)) {
- status = acpi_video_bus_set_POST(video, opt);
- if (!ACPI_SUCCESS(status))
- return -EFAULT;
-
- }
-
- return count;
-}
-
-static ssize_t
-acpi_video_bus_write_DOS(struct file *file,
- const char __user * buffer,
- size_t count, loff_t * data)
-{
- int status;
- struct seq_file *m = file->private_data;
- struct acpi_video_bus *video = m->private;
- char str[12] = { 0 };
- unsigned long opt;
-
-
- if (!video || count >= sizeof(str))
- return -EINVAL;
-
- if (copy_from_user(str, buffer, count))
- return -EFAULT;
-
- str[count] = 0;
- opt = strtoul(str, NULL, 0);
- if (opt > 7)
- return -EFAULT;
-
- status = acpi_video_bus_DOS(video, opt & 0x3, (opt & 0x4) >> 2);
-
- if (!ACPI_SUCCESS(status))
- return -EFAULT;
-
- return count;
-}
-
-static int acpi_video_bus_add_fs(struct acpi_device *device)
-{
- struct acpi_video_bus *video = acpi_driver_data(device);
- struct proc_dir_entry *device_dir;
- struct proc_dir_entry *entry;
-
- device_dir = proc_mkdir(acpi_device_bid(device), acpi_video_dir);
- if (!device_dir)
- return -ENOMEM;
-
- /* 'info' [R] */
- entry = proc_create_data("info", S_IRUGO, device_dir,
- &acpi_video_bus_info_fops,
- acpi_driver_data(device));
- if (!entry)
- goto err_remove_dir;
-
- /* 'ROM' [R] */
- entry = proc_create_data("ROM", S_IRUGO, device_dir,
- &acpi_video_bus_ROM_fops,
- acpi_driver_data(device));
- if (!entry)
- goto err_remove_info;
-
- /* 'POST_info' [R] */
- entry = proc_create_data("POST_info", S_IRUGO, device_dir,
- &acpi_video_bus_POST_info_fops,
- acpi_driver_data(device));
- if (!entry)
- goto err_remove_rom;
-
- /* 'POST' [R/W] */
- entry = proc_create_data("POST", S_IFREG | S_IRUGO | S_IWUSR,
- device_dir,
- &acpi_video_bus_POST_fops,
- acpi_driver_data(device));
- if (!entry)
- goto err_remove_post_info;
-
- /* 'DOS' [R/W] */
- entry = proc_create_data("DOS", S_IFREG | S_IRUGO | S_IWUSR,
- device_dir,
- &acpi_video_bus_DOS_fops,
- acpi_driver_data(device));
- if (!entry)
- goto err_remove_post;
-
- video->dir = acpi_device_dir(device) = device_dir;
- return 0;
-
- err_remove_post:
- remove_proc_entry("POST", device_dir);
- err_remove_post_info:
- remove_proc_entry("POST_info", device_dir);
- err_remove_rom:
- remove_proc_entry("ROM", device_dir);
- err_remove_info:
- remove_proc_entry("info", device_dir);
- err_remove_dir:
- remove_proc_entry(acpi_device_bid(device), acpi_video_dir);
- return -ENOMEM;
-}
-
-static int acpi_video_bus_remove_fs(struct acpi_device *device)
-{
- struct proc_dir_entry *device_dir = acpi_device_dir(device);
-
- if (device_dir) {
- remove_proc_entry("info", device_dir);
- remove_proc_entry("ROM", device_dir);
- remove_proc_entry("POST_info", device_dir);
- remove_proc_entry("POST", device_dir);
- remove_proc_entry("DOS", device_dir);
- remove_proc_entry(acpi_device_bid(device), acpi_video_dir);
- acpi_device_dir(device) = NULL;
- }
-
- return 0;
-}
-#else
-static inline int acpi_video_device_add_fs(struct acpi_device *device)
-{
- return 0;
-}
-static inline int acpi_video_device_remove_fs(struct acpi_device *device)
-{
- return 0;
-}
-static inline int acpi_video_bus_add_fs(struct acpi_device *device)
-{
- return 0;
-}
-static inline int acpi_video_bus_remove_fs(struct acpi_device *device)
-{
- return 0;
-}
-#endif /* CONFIG_ACPI_PROCFS */
-
-/* --------------------------------------------------------------------------
Driver Interface
-------------------------------------------------------------------------- */
@@ -1877,8 +1130,6 @@ acpi_video_bus_get_one_device(struct acpi_device *device,
list_add_tail(&data->entry, &video->video_device_list);
mutex_unlock(&video->device_list_lock);
- acpi_video_device_add_fs(device);
-
return 0;
}
@@ -2181,8 +1432,6 @@ static int acpi_video_bus_put_one_device(struct acpi_video_device *device)
if (!device || !device->video)
return -ENOENT;
- acpi_video_device_remove_fs(device->dev);
-
status = acpi_remove_notify_handler(device->dev->handle,
ACPI_DEVICE_NOTIFY,
acpi_video_device_notify);
@@ -2466,10 +1715,6 @@ static int acpi_video_bus_add(struct acpi_device *device)
if (error)
goto err_free_video;
- error = acpi_video_bus_add_fs(device);
- if (error)
- goto err_free_video;
-
mutex_init(&video->device_list_lock);
INIT_LIST_HEAD(&video->video_device_list);
@@ -2522,7 +1767,6 @@ static int acpi_video_bus_add(struct acpi_device *device)
acpi_video_bus_stop_devices(video);
acpi_video_bus_put_devices(video);
kfree(video->attached_array);
- acpi_video_bus_remove_fs(device);
err_free_video:
kfree(video);
device->driver_data = NULL;
@@ -2544,7 +1788,6 @@ static int acpi_video_bus_remove(struct acpi_device *device, int type)
acpi_video_bus_stop_devices(video);
acpi_video_bus_put_devices(video);
- acpi_video_bus_remove_fs(device);
input_unregister_device(video->input);
kfree(video->attached_array);
@@ -2584,17 +1827,9 @@ int acpi_video_register(void)
return 0;
}
-#ifdef CONFIG_ACPI_PROCFS
- acpi_video_dir = proc_mkdir(ACPI_VIDEO_CLASS, acpi_root_dir);
- if (!acpi_video_dir)
- return -ENODEV;
-#endif
-
result = acpi_bus_register_driver(&acpi_video_bus);
- if (result < 0) {
- remove_proc_entry(ACPI_VIDEO_CLASS, acpi_root_dir);
+ if (result < 0)
return -ENODEV;
- }
/*
* When the acpi_video_bus is loaded successfully, increase
@@ -2617,10 +1852,6 @@ void acpi_video_unregister(void)
}
acpi_bus_unregister_driver(&acpi_video_bus);
-#ifdef CONFIG_ACPI_PROCFS
- remove_proc_entry(ACPI_VIDEO_CLASS, acpi_root_dir);
-#endif
-
register_count = 0;
return;
diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
index d050e073e570..66aa4bee80a6 100644
--- a/drivers/ata/libata-scsi.c
+++ b/drivers/ata/libata-scsi.c
@@ -2552,8 +2552,11 @@ static void atapi_qc_complete(struct ata_queued_cmd *qc)
*
* If door lock fails, always clear sdev->locked to
* avoid this infinite loop.
+ *
+ * This may happen before SCSI scan is complete. Make
+ * sure qc->dev->sdev isn't NULL before dereferencing.
*/
- if (qc->cdb[0] == ALLOW_MEDIUM_REMOVAL)
+ if (qc->cdb[0] == ALLOW_MEDIUM_REMOVAL && qc->dev->sdev)
qc->dev->sdev->locked = 0;
qc->scsicmd->result = SAM_STAT_CHECK_CONDITION;
@@ -3163,8 +3166,8 @@ static inline int __ata_scsi_queuecmd(struct scsi_cmnd *scmd,
/**
* ata_scsi_queuecmd - Issue SCSI cdb to libata-managed device
+ * @shost: SCSI host of command to be sent
* @cmd: SCSI command to be sent
- * @done: Completion function, called when command is complete
*
* In some cases, this function translates SCSI commands into
* ATA taskfiles, and queues the taskfiles to be sent to
@@ -3174,37 +3177,36 @@ static inline int __ata_scsi_queuecmd(struct scsi_cmnd *scmd,
* ATA and ATAPI devices appearing as SCSI devices.
*
* LOCKING:
- * Releases scsi-layer-held lock, and obtains host lock.
+ * ATA host lock
*
* RETURNS:
* Return value from __ata_scsi_queuecmd() if @cmd can be queued,
* 0 otherwise.
*/
-int ata_scsi_queuecmd(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *))
+int ata_scsi_queuecmd(struct Scsi_Host *shost, struct scsi_cmnd *cmd)
{
struct ata_port *ap;
struct ata_device *dev;
struct scsi_device *scsidev = cmd->device;
- struct Scsi_Host *shost = scsidev->host;
int rc = 0;
+ unsigned long irq_flags;
ap = ata_shost_to_port(shost);
- spin_unlock(shost->host_lock);
- spin_lock(ap->lock);
+ spin_lock_irqsave(ap->lock, irq_flags);
ata_scsi_dump_cdb(ap, cmd);
dev = ata_scsi_find_dev(ap, scsidev);
if (likely(dev))
- rc = __ata_scsi_queuecmd(cmd, done, dev);
+ rc = __ata_scsi_queuecmd(cmd, cmd->scsi_done, dev);
else {
cmd->result = (DID_BAD_TARGET << 16);
- done(cmd);
+ cmd->scsi_done(cmd);
}
- spin_unlock(ap->lock);
- spin_lock(shost->host_lock);
+ spin_unlock_irqrestore(ap->lock, irq_flags);
+
return rc;
}
diff --git a/drivers/ata/pata_legacy.c b/drivers/ata/pata_legacy.c
index eaf194138f21..6bd9425ba5ab 100644
--- a/drivers/ata/pata_legacy.c
+++ b/drivers/ata/pata_legacy.c
@@ -142,7 +142,7 @@ static int autospeed; /* Chip present which snoops speed changes */
static int pio_mask = ATA_PIO4; /* PIO range for autospeed devices */
static int iordy_mask = 0xFFFFFFFF; /* Use iordy if available */
-#ifdef PATA_WINBOND_VLB_MODULE
+#ifdef CONFIG_PATA_WINBOND_VLB_MODULE
static int winbond = 1; /* Set to probe Winbond controllers,
give I/O port if non standard */
#else
diff --git a/drivers/ata/pata_octeon_cf.c b/drivers/ata/pata_octeon_cf.c
index 06ddd91ffeda..fa1b95a9a7ff 100644
--- a/drivers/ata/pata_octeon_cf.c
+++ b/drivers/ata/pata_octeon_cf.c
@@ -60,7 +60,7 @@ static unsigned int ns_to_tim_reg(unsigned int tim_mult, unsigned int nsecs)
* Compute # of eclock periods to get desired duration in
* nanoseconds.
*/
- val = DIV_ROUND_UP(nsecs * (octeon_get_clock_rate() / 1000000),
+ val = DIV_ROUND_UP(nsecs * (octeon_get_io_clock_rate() / 1000000),
1000 * tim_mult);
return val;
@@ -653,8 +653,6 @@ static irqreturn_t octeon_cf_interrupt(int irq, void *dev_instance)
ap = host->ports[i];
ocd = ap->dev->platform_data;
-
- ocd = ap->dev->platform_data;
cf_port = ap->private_data;
dma_int.u64 =
cvmx_read_csr(CVMX_MIO_BOOT_DMA_INTX(ocd->dma_engine));
diff --git a/drivers/ata/sata_via.c b/drivers/ata/sata_via.c
index c21589986c69..8b677bbf2d37 100644
--- a/drivers/ata/sata_via.c
+++ b/drivers/ata/sata_via.c
@@ -538,7 +538,7 @@ static int vt8251_prepare_host(struct pci_dev *pdev, struct ata_host **r_host)
return 0;
}
-static void svia_configure(struct pci_dev *pdev)
+static void svia_configure(struct pci_dev *pdev, int board_id)
{
u8 tmp8;
@@ -577,7 +577,7 @@ static void svia_configure(struct pci_dev *pdev)
}
/*
- * vt6421 has problems talking to some drives. The following
+ * vt6420/1 has problems talking to some drives. The following
* is the fix from Joseph Chan <JosephChan@via.com.tw>.
*
* When host issues HOLD, device may send up to 20DW of data
@@ -596,8 +596,9 @@ static void svia_configure(struct pci_dev *pdev)
*
* https://bugzilla.kernel.org/show_bug.cgi?id=15173
* http://article.gmane.org/gmane.linux.ide/46352
+ * http://thread.gmane.org/gmane.linux.kernel/1062139
*/
- if (pdev->device == 0x3249) {
+ if (board_id == vt6420 || board_id == vt6421) {
pci_read_config_byte(pdev, 0x52, &tmp8);
tmp8 |= 1 << 2;
pci_write_config_byte(pdev, 0x52, tmp8);
@@ -652,7 +653,7 @@ static int svia_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
if (rc)
return rc;
- svia_configure(pdev);
+ svia_configure(pdev, board_id);
pci_set_master(pdev);
return ata_host_activate(host, pdev->irq, ata_bmdma_interrupt,
diff --git a/drivers/atm/eni.c b/drivers/atm/eni.c
index 80f9f3659e4d..97c5898cd76e 100644
--- a/drivers/atm/eni.c
+++ b/drivers/atm/eni.c
@@ -1736,9 +1736,10 @@ static int __devinit eni_do_init(struct atm_dev *dev)
eprom = (base+EPROM_SIZE-sizeof(struct midway_eprom));
if (readl(&eprom->magic) != ENI155_MAGIC) {
printk("\n");
- printk(KERN_ERR KERN_ERR DEV_LABEL "(itf %d): bad "
- "magic - expected 0x%x, got 0x%x\n",dev->number,
- ENI155_MAGIC,(unsigned) readl(&eprom->magic));
+ printk(KERN_ERR DEV_LABEL
+ "(itf %d): bad magic - expected 0x%x, got 0x%x\n",
+ dev->number, ENI155_MAGIC,
+ (unsigned)readl(&eprom->magic));
error = -EINVAL;
goto unmap;
}
diff --git a/drivers/atm/solos-attrlist.c b/drivers/atm/solos-attrlist.c
index 1a9332e4efe0..9a676ee30824 100644
--- a/drivers/atm/solos-attrlist.c
+++ b/drivers/atm/solos-attrlist.c
@@ -1,6 +1,7 @@
SOLOS_ATTR_RO(DriverVersion)
SOLOS_ATTR_RO(APIVersion)
SOLOS_ATTR_RO(FirmwareVersion)
+SOLOS_ATTR_RO(Version)
// SOLOS_ATTR_RO(DspVersion)
// SOLOS_ATTR_RO(CommonHandshake)
SOLOS_ATTR_RO(Connected)
diff --git a/drivers/atm/solos-pci.c b/drivers/atm/solos-pci.c
index f46138ab38b6..2e08c996fd30 100644
--- a/drivers/atm/solos-pci.c
+++ b/drivers/atm/solos-pci.c
@@ -1161,6 +1161,14 @@ static int fpga_probe(struct pci_dev *dev, const struct pci_device_id *id)
dev_info(&dev->dev, "Solos FPGA Version %d.%02d svn-%d\n",
major_ver, minor_ver, fpga_ver);
+ if (fpga_ver < 37 && (fpga_upgrade || firmware_upgrade ||
+ db_fpga_upgrade || db_firmware_upgrade)) {
+ dev_warn(&dev->dev,
+ "FPGA too old; cannot upgrade flash. Use JTAG.\n");
+ fpga_upgrade = firmware_upgrade = 0;
+ db_fpga_upgrade = db_firmware_upgrade = 0;
+ }
+
if (card->fpga_version >= DMA_SUPPORTED){
card->using_dma = 1;
} else {
diff --git a/drivers/base/devtmpfs.c b/drivers/base/devtmpfs.c
index af0600143d1c..82bbb5967aa9 100644
--- a/drivers/base/devtmpfs.c
+++ b/drivers/base/devtmpfs.c
@@ -29,33 +29,33 @@
static struct vfsmount *dev_mnt;
#if defined CONFIG_DEVTMPFS_MOUNT
-static int dev_mount = 1;
+static int mount_dev = 1;
#else
-static int dev_mount;
+static int mount_dev;
#endif
static DEFINE_MUTEX(dirlock);
static int __init mount_param(char *str)
{
- dev_mount = simple_strtoul(str, NULL, 0);
+ mount_dev = simple_strtoul(str, NULL, 0);
return 1;
}
__setup("devtmpfs.mount=", mount_param);
-static int dev_get_sb(struct file_system_type *fs_type, int flags,
- const char *dev_name, void *data, struct vfsmount *mnt)
+static struct dentry *dev_mount(struct file_system_type *fs_type, int flags,
+ const char *dev_name, void *data)
{
#ifdef CONFIG_TMPFS
- return get_sb_single(fs_type, flags, data, shmem_fill_super, mnt);
+ return mount_single(fs_type, flags, data, shmem_fill_super);
#else
- return get_sb_single(fs_type, flags, data, ramfs_fill_super, mnt);
+ return mount_single(fs_type, flags, data, ramfs_fill_super);
#endif
}
static struct file_system_type dev_fs_type = {
.name = "devtmpfs",
- .get_sb = dev_get_sb,
+ .mount = dev_mount,
.kill_sb = kill_litter_super,
};
@@ -351,7 +351,7 @@ int devtmpfs_mount(const char *mntdir)
{
int err;
- if (!dev_mount)
+ if (!mount_dev)
return 0;
if (!dev_mnt)
diff --git a/drivers/base/node.c b/drivers/base/node.c
index ee53558b452f..ce012a9c6201 100644
--- a/drivers/base/node.c
+++ b/drivers/base/node.c
@@ -160,6 +160,18 @@ static ssize_t node_read_numastat(struct sys_device * dev,
}
static SYSDEV_ATTR(numastat, S_IRUGO, node_read_numastat, NULL);
+static ssize_t node_read_vmstat(struct sys_device *dev,
+ struct sysdev_attribute *attr, char *buf)
+{
+ int nid = dev->id;
+ return sprintf(buf,
+ "nr_written %lu\n"
+ "nr_dirtied %lu\n",
+ node_page_state(nid, NR_WRITTEN),
+ node_page_state(nid, NR_DIRTIED));
+}
+static SYSDEV_ATTR(vmstat, S_IRUGO, node_read_vmstat, NULL);
+
static ssize_t node_read_distance(struct sys_device * dev,
struct sysdev_attribute *attr, char * buf)
{
@@ -243,6 +255,7 @@ int register_node(struct node *node, int num, struct node *parent)
sysdev_create_file(&node->sysdev, &attr_meminfo);
sysdev_create_file(&node->sysdev, &attr_numastat);
sysdev_create_file(&node->sysdev, &attr_distance);
+ sysdev_create_file(&node->sysdev, &attr_vmstat);
scan_unevictable_register_node(node);
@@ -267,6 +280,7 @@ void unregister_node(struct node *node)
sysdev_remove_file(&node->sysdev, &attr_meminfo);
sysdev_remove_file(&node->sysdev, &attr_numastat);
sysdev_remove_file(&node->sysdev, &attr_distance);
+ sysdev_remove_file(&node->sysdev, &attr_vmstat);
scan_unevictable_unregister_node(node);
hugetlb_unregister_node(node); /* no-op, if memoryless node */
diff --git a/drivers/base/platform.c b/drivers/base/platform.c
index 3966e62ad019..f051cfff18af 100644
--- a/drivers/base/platform.c
+++ b/drivers/base/platform.c
@@ -147,6 +147,7 @@ static void platform_device_release(struct device *dev)
struct platform_object *pa = container_of(dev, struct platform_object,
pdev.dev);
+ of_device_node_put(&pa->pdev.dev);
kfree(pa->pdev.dev.platform_data);
kfree(pa->pdev.resource);
kfree(pa);
diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c
index 31b526661ec4..ead3e79d6fcf 100644
--- a/drivers/base/power/main.c
+++ b/drivers/base/power/main.c
@@ -475,20 +475,33 @@ End:
*/
void dpm_resume_noirq(pm_message_t state)
{
- struct device *dev;
+ struct list_head list;
ktime_t starttime = ktime_get();
+ INIT_LIST_HEAD(&list);
mutex_lock(&dpm_list_mtx);
transition_started = false;
- list_for_each_entry(dev, &dpm_list, power.entry)
+ while (!list_empty(&dpm_list)) {
+ struct device *dev = to_device(dpm_list.next);
+
+ get_device(dev);
if (dev->power.status > DPM_OFF) {
int error;
dev->power.status = DPM_OFF;
+ mutex_unlock(&dpm_list_mtx);
+
error = device_resume_noirq(dev, state);
+
+ mutex_lock(&dpm_list_mtx);
if (error)
pm_dev_err(dev, state, " early", error);
}
+ if (!list_empty(&dev->power.entry))
+ list_move_tail(&dev->power.entry, &list);
+ put_device(dev);
+ }
+ list_splice(&list, &dpm_list);
mutex_unlock(&dpm_list_mtx);
dpm_show_time(starttime, state, "early");
resume_device_irqs();
@@ -789,20 +802,33 @@ End:
*/
int dpm_suspend_noirq(pm_message_t state)
{
- struct device *dev;
+ struct list_head list;
ktime_t starttime = ktime_get();
int error = 0;
+ INIT_LIST_HEAD(&list);
suspend_device_irqs();
mutex_lock(&dpm_list_mtx);
- list_for_each_entry_reverse(dev, &dpm_list, power.entry) {
+ while (!list_empty(&dpm_list)) {
+ struct device *dev = to_device(dpm_list.prev);
+
+ get_device(dev);
+ mutex_unlock(&dpm_list_mtx);
+
error = device_suspend_noirq(dev, state);
+
+ mutex_lock(&dpm_list_mtx);
if (error) {
pm_dev_err(dev, state, " late", error);
+ put_device(dev);
break;
}
dev->power.status = DPM_OFF_IRQ;
+ if (!list_empty(&dev->power.entry))
+ list_move(&dev->power.entry, &list);
+ put_device(dev);
}
+ list_splice_tail(&list, &dpm_list);
mutex_unlock(&dpm_list_mtx);
if (error)
dpm_resume_noirq(resume_event(state));
diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c
index 1dd8676d7f55..02c652be83e7 100644
--- a/drivers/base/power/runtime.c
+++ b/drivers/base/power/runtime.c
@@ -143,7 +143,7 @@ static int rpm_check_suspend_allowed(struct device *dev)
/* Pending resume requests take precedence over suspends. */
else if ((dev->power.deferred_resume
- && dev->power.status == RPM_SUSPENDING)
+ && dev->power.runtime_status == RPM_SUSPENDING)
|| (dev->power.request_pending
&& dev->power.request == RPM_REQ_RESUME))
retval = -EAGAIN;
@@ -503,7 +503,7 @@ static int rpm_resume(struct device *dev, int rpmflags)
* the resume will actually succeed.
*/
if (dev->power.no_callbacks && !parent && dev->parent) {
- spin_lock(&dev->parent->power.lock);
+ spin_lock_nested(&dev->parent->power.lock, SINGLE_DEPTH_NESTING);
if (dev->parent->power.disable_depth > 0
|| dev->parent->power.ignore_children
|| dev->parent->power.runtime_status == RPM_ACTIVE) {
diff --git a/drivers/block/amiflop.c b/drivers/block/amiflop.c
index a1725e6488d3..7888501ad9ee 100644
--- a/drivers/block/amiflop.c
+++ b/drivers/block/amiflop.c
@@ -1341,7 +1341,7 @@ static struct request *set_next_request(void)
{
struct request_queue *q;
int cnt = FD_MAX_UNITS;
- struct request *rq;
+ struct request *rq = NULL;
/* Find next queue we can dispatch from */
fdc_queue = fdc_queue + 1;
diff --git a/drivers/block/aoe/aoeblk.c b/drivers/block/aoe/aoeblk.c
index f21c237a9e5e..528f6318ded1 100644
--- a/drivers/block/aoe/aoeblk.c
+++ b/drivers/block/aoe/aoeblk.c
@@ -4,12 +4,14 @@
* block device routines
*/
+#include <linux/kernel.h>
#include <linux/hdreg.h>
#include <linux/blkdev.h>
#include <linux/backing-dev.h>
#include <linux/fs.h>
#include <linux/ioctl.h>
#include <linux/slab.h>
+#include <linux/ratelimit.h>
#include <linux/genhd.h>
#include <linux/netdevice.h>
#include <linux/mutex.h>
@@ -178,9 +180,6 @@ aoeblk_make_request(struct request_queue *q, struct bio *bio)
BUG();
bio_endio(bio, -ENXIO);
return 0;
- } else if (bio->bi_rw & REQ_HARDBARRIER) {
- bio_endio(bio, -EOPNOTSUPP);
- return 0;
} else if (bio->bi_io_vec == NULL) {
printk(KERN_ERR "aoe: bi_io_vec is NULL\n");
BUG();
@@ -207,7 +206,7 @@ aoeblk_make_request(struct request_queue *q, struct bio *bio)
spin_lock_irqsave(&d->lock, flags);
if ((d->flags & DEVFL_UP) == 0) {
- printk(KERN_INFO "aoe: device %ld.%d is not up\n",
+ pr_info_ratelimited("aoe: device %ld.%d is not up\n",
d->aoemajor, d->aoeminor);
spin_unlock_irqrestore(&d->lock, flags);
mempool_free(buf, d->bufpool);
diff --git a/drivers/block/aoe/aoedev.c b/drivers/block/aoe/aoedev.c
index 0849280bfc1c..6b5110a47458 100644
--- a/drivers/block/aoe/aoedev.c
+++ b/drivers/block/aoe/aoedev.c
@@ -102,6 +102,7 @@ aoedev_freedev(struct aoedev *d)
{
struct aoetgt **t, **e;
+ cancel_work_sync(&d->work);
if (d->gd) {
aoedisk_rm_sysfs(d);
del_gendisk(d->gd);
@@ -135,7 +136,6 @@ aoedev_flush(const char __user *str, size_t cnt)
all = !strncmp(buf, "all", 3);
}
- flush_scheduled_work();
spin_lock_irqsave(&devlist_lock, flags);
dd = &devlist;
while ((d = *dd)) {
@@ -257,8 +257,6 @@ aoedev_exit(void)
struct aoedev *d;
ulong flags;
- flush_scheduled_work();
-
while ((d = devlist)) {
devlist = d->next;
diff --git a/drivers/block/ataflop.c b/drivers/block/ataflop.c
index 4e4cc6c828cb..605a67e40bbf 100644
--- a/drivers/block/ataflop.c
+++ b/drivers/block/ataflop.c
@@ -1399,7 +1399,7 @@ static struct request *set_next_request(void)
{
struct request_queue *q;
int old_pos = fdc_queue;
- struct request *rq;
+ struct request *rq = NULL;
do {
q = unit[fdc_queue].disk->queue;
diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c
index f09e6df15aa7..f291587d753e 100644
--- a/drivers/block/cciss.c
+++ b/drivers/block/cciss.c
@@ -66,11 +66,7 @@ MODULE_VERSION("3.6.26");
MODULE_LICENSE("GPL");
static DEFINE_MUTEX(cciss_mutex);
-static int cciss_allow_hpsa;
-module_param(cciss_allow_hpsa, int, S_IRUGO|S_IWUSR);
-MODULE_PARM_DESC(cciss_allow_hpsa,
- "Prevent cciss driver from accessing hardware known to be "
- " supported by the hpsa driver");
+static struct proc_dir_entry *proc_cciss;
#include "cciss_cmd.h"
#include "cciss.h"
@@ -98,19 +94,6 @@ static const struct pci_device_id cciss_pci_device_id[] = {
{PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSD, 0x103C, 0x3215},
{PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSC, 0x103C, 0x3237},
{PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSC, 0x103C, 0x323D},
- {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3241},
- {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3243},
- {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3245},
- {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3247},
- {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3249},
- {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x324A},
- {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x324B},
- {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3350},
- {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3351},
- {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3352},
- {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3353},
- {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3354},
- {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3355},
{0,}
};
@@ -131,6 +114,8 @@ static struct board_type products[] = {
{0x409D0E11, "Smart Array 6400 EM", &SA5_access},
{0x40910E11, "Smart Array 6i", &SA5_access},
{0x3225103C, "Smart Array P600", &SA5_access},
+ {0x3223103C, "Smart Array P800", &SA5_access},
+ {0x3234103C, "Smart Array P400", &SA5_access},
{0x3235103C, "Smart Array P400i", &SA5_access},
{0x3211103C, "Smart Array E200i", &SA5_access},
{0x3212103C, "Smart Array E200", &SA5_access},
@@ -138,24 +123,9 @@ static struct board_type products[] = {
{0x3214103C, "Smart Array E200i", &SA5_access},
{0x3215103C, "Smart Array E200i", &SA5_access},
{0x3237103C, "Smart Array E500", &SA5_access},
-/* controllers below this line are also supported by the hpsa driver. */
-#define HPSA_BOUNDARY 0x3223103C
{0x3223103C, "Smart Array P800", &SA5_access},
{0x3234103C, "Smart Array P400", &SA5_access},
{0x323D103C, "Smart Array P700m", &SA5_access},
- {0x3241103C, "Smart Array P212", &SA5_access},
- {0x3243103C, "Smart Array P410", &SA5_access},
- {0x3245103C, "Smart Array P410i", &SA5_access},
- {0x3247103C, "Smart Array P411", &SA5_access},
- {0x3249103C, "Smart Array P812", &SA5_access},
- {0x324A103C, "Smart Array P712m", &SA5_access},
- {0x324B103C, "Smart Array P711m", &SA5_access},
- {0x3350103C, "Smart Array", &SA5_access},
- {0x3351103C, "Smart Array", &SA5_access},
- {0x3352103C, "Smart Array", &SA5_access},
- {0x3353103C, "Smart Array", &SA5_access},
- {0x3354103C, "Smart Array", &SA5_access},
- {0x3355103C, "Smart Array", &SA5_access},
};
/* How long to wait (in milliseconds) for board to go into simple mode */
@@ -394,8 +364,6 @@ static const char *raid_label[] = { "0", "4", "1(1+0)", "5", "5+1", "ADG",
#define ENG_GIG_FACTOR (ENG_GIG/512)
#define ENGAGE_SCSI "engage scsi"
-static struct proc_dir_entry *proc_cciss;
-
static void cciss_seq_show_header(struct seq_file *seq)
{
ctlr_info_t *h = seq->private;
@@ -1184,6 +1152,7 @@ static int cciss_ioctl32_big_passthru(struct block_device *bdev, fmode_t mode,
int err;
u32 cp;
+ memset(&arg64, 0, sizeof(arg64));
err = 0;
err |=
copy_from_user(&arg64.LUN_info, &arg32->LUN_info,
@@ -3785,7 +3754,7 @@ static void __devinit cciss_wait_for_mode_change_ack(ctlr_info_t *h)
for (i = 0; i < MAX_CONFIG_WAIT; i++) {
if (!(readl(h->vaddr + SA5_DOORBELL) & CFGTBL_ChangeReq))
break;
- msleep(10);
+ usleep_range(10000, 20000);
}
}
@@ -3969,13 +3938,9 @@ static int __devinit cciss_lookup_board_id(struct pci_dev *pdev, u32 *board_id)
*board_id = ((subsystem_device_id << 16) & 0xffff0000) |
subsystem_vendor_id;
- for (i = 0; i < ARRAY_SIZE(products); i++) {
- /* Stand aside for hpsa driver on request */
- if (cciss_allow_hpsa && products[i].board_id == HPSA_BOUNDARY)
- return -ENODEV;
+ for (i = 0; i < ARRAY_SIZE(products); i++)
if (*board_id == products[i].board_id)
return i;
- }
dev_warn(&pdev->dev, "unrecognized board ID: 0x%08x, ignoring.\n",
*board_id);
return -ENODEV;
@@ -4006,18 +3971,31 @@ static int __devinit cciss_pci_find_memory_BAR(struct pci_dev *pdev,
return -ENODEV;
}
-static int __devinit cciss_wait_for_board_ready(ctlr_info_t *h)
+static int __devinit cciss_wait_for_board_state(struct pci_dev *pdev,
+ void __iomem *vaddr, int wait_for_ready)
+#define BOARD_READY 1
+#define BOARD_NOT_READY 0
{
- int i;
+ int i, iterations;
u32 scratchpad;
- for (i = 0; i < CCISS_BOARD_READY_ITERATIONS; i++) {
- scratchpad = readl(h->vaddr + SA5_SCRATCHPAD_OFFSET);
- if (scratchpad == CCISS_FIRMWARE_READY)
- return 0;
+ if (wait_for_ready)
+ iterations = CCISS_BOARD_READY_ITERATIONS;
+ else
+ iterations = CCISS_BOARD_NOT_READY_ITERATIONS;
+
+ for (i = 0; i < iterations; i++) {
+ scratchpad = readl(vaddr + SA5_SCRATCHPAD_OFFSET);
+ if (wait_for_ready) {
+ if (scratchpad == CCISS_FIRMWARE_READY)
+ return 0;
+ } else {
+ if (scratchpad != CCISS_FIRMWARE_READY)
+ return 0;
+ }
msleep(CCISS_BOARD_READY_POLL_INTERVAL_MSECS);
}
- dev_warn(&h->pdev->dev, "board not ready, timed out.\n");
+ dev_warn(&pdev->dev, "board not ready, timed out.\n");
return -ENODEV;
}
@@ -4066,6 +4044,11 @@ static int __devinit cciss_find_cfgtables(ctlr_info_t *h)
static void __devinit cciss_get_max_perf_mode_cmds(struct ctlr_info *h)
{
h->max_commands = readl(&(h->cfgtable->MaxPerformantModeCommands));
+
+ /* Limit commands in memory limited kdump scenario. */
+ if (reset_devices && h->max_commands > 32)
+ h->max_commands = 32;
+
if (h->max_commands < 16) {
dev_warn(&h->pdev->dev, "Controller reports "
"max supported commands of %d, an obvious lie. "
@@ -4183,7 +4166,7 @@ static int __devinit cciss_pci_init(ctlr_info_t *h)
err = -ENOMEM;
goto err_out_free_res;
}
- err = cciss_wait_for_board_ready(h);
+ err = cciss_wait_for_board_state(h->pdev, h->vaddr, BOARD_READY);
if (err)
goto err_out_free_res;
err = cciss_find_cfgtables(h);
@@ -4348,36 +4331,6 @@ static __devinit int cciss_message(struct pci_dev *pdev, unsigned char opcode, u
#define cciss_soft_reset_controller(p) cciss_message(p, 1, 0)
#define cciss_noop(p) cciss_message(p, 3, 0)
-static __devinit int cciss_reset_msi(struct pci_dev *pdev)
-{
-/* the #defines are stolen from drivers/pci/msi.h. */
-#define msi_control_reg(base) (base + PCI_MSI_FLAGS)
-#define PCI_MSIX_FLAGS_ENABLE (1 << 15)
-
- int pos;
- u16 control = 0;
-
- pos = pci_find_capability(pdev, PCI_CAP_ID_MSI);
- if (pos) {
- pci_read_config_word(pdev, msi_control_reg(pos), &control);
- if (control & PCI_MSI_FLAGS_ENABLE) {
- dev_info(&pdev->dev, "resetting MSI\n");
- pci_write_config_word(pdev, msi_control_reg(pos), control & ~PCI_MSI_FLAGS_ENABLE);
- }
- }
-
- pos = pci_find_capability(pdev, PCI_CAP_ID_MSIX);
- if (pos) {
- pci_read_config_word(pdev, msi_control_reg(pos), &control);
- if (control & PCI_MSIX_FLAGS_ENABLE) {
- dev_info(&pdev->dev, "resetting MSI-X\n");
- pci_write_config_word(pdev, msi_control_reg(pos), control & ~PCI_MSIX_FLAGS_ENABLE);
- }
- }
-
- return 0;
-}
-
static int cciss_controller_hard_reset(struct pci_dev *pdev,
void * __iomem vaddr, bool use_doorbell)
{
@@ -4432,17 +4385,17 @@ static int cciss_controller_hard_reset(struct pci_dev *pdev,
* states or using the doorbell register. */
static __devinit int cciss_kdump_hard_reset_controller(struct pci_dev *pdev)
{
- u16 saved_config_space[32];
u64 cfg_offset;
u32 cfg_base_addr;
u64 cfg_base_addr_index;
void __iomem *vaddr;
unsigned long paddr;
u32 misc_fw_support, active_transport;
- int rc, i;
+ int rc;
CfgTable_struct __iomem *cfgtable;
bool use_doorbell;
u32 board_id;
+ u16 command_register;
/* For controllers as old a the p600, this is very nearly
* the same thing as
@@ -4452,14 +4405,6 @@ static __devinit int cciss_kdump_hard_reset_controller(struct pci_dev *pdev)
* pci_set_power_state(pci_dev, PCI_D0);
* pci_restore_state(pci_dev);
*
- * but we can't use these nice canned kernel routines on
- * kexec, because they also check the MSI/MSI-X state in PCI
- * configuration space and do the wrong thing when it is
- * set/cleared. Also, the pci_save/restore_state functions
- * violate the ordering requirements for restoring the
- * configuration space from the CCISS document (see the
- * comment below). So we roll our own ....
- *
* For controllers newer than the P600, the pci power state
* method of resetting doesn't work so we have another way
* using the doorbell register.
@@ -4478,8 +4423,13 @@ static __devinit int cciss_kdump_hard_reset_controller(struct pci_dev *pdev)
return -ENODEV;
}
- for (i = 0; i < 32; i++)
- pci_read_config_word(pdev, 2*i, &saved_config_space[i]);
+ /* Save the PCI command register */
+ pci_read_config_word(pdev, 4, &command_register);
+ /* Turn the board off. This is so that later pci_restore_state()
+ * won't turn the board on before the rest of config space is ready.
+ */
+ pci_disable_device(pdev);
+ pci_save_state(pdev);
/* find the first memory BAR, so we can find the cfg table */
rc = cciss_pci_find_memory_BAR(pdev, &paddr);
@@ -4514,26 +4464,32 @@ static __devinit int cciss_kdump_hard_reset_controller(struct pci_dev *pdev)
rc = cciss_controller_hard_reset(pdev, vaddr, use_doorbell);
if (rc)
goto unmap_cfgtable;
-
- /* Restore the PCI configuration space. The Open CISS
- * Specification says, "Restore the PCI Configuration
- * Registers, offsets 00h through 60h. It is important to
- * restore the command register, 16-bits at offset 04h,
- * last. Do not restore the configuration status register,
- * 16-bits at offset 06h." Note that the offset is 2*i.
- */
- for (i = 0; i < 32; i++) {
- if (i == 2 || i == 3)
- continue;
- pci_write_config_word(pdev, 2*i, saved_config_space[i]);
+ pci_restore_state(pdev);
+ rc = pci_enable_device(pdev);
+ if (rc) {
+ dev_warn(&pdev->dev, "failed to enable device.\n");
+ goto unmap_cfgtable;
}
- wmb();
- pci_write_config_word(pdev, 4, saved_config_space[2]);
+ pci_write_config_word(pdev, 4, command_register);
/* Some devices (notably the HP Smart Array 5i Controller)
need a little pause here */
msleep(CCISS_POST_RESET_PAUSE_MSECS);
+ /* Wait for board to become not ready, then ready. */
+ dev_info(&pdev->dev, "Waiting for board to become ready.\n");
+ rc = cciss_wait_for_board_state(pdev, vaddr, BOARD_NOT_READY);
+ if (rc) /* Don't bail, might be E500, etc. which can't be reset */
+ dev_warn(&pdev->dev,
+ "failed waiting for board to become not ready\n");
+ rc = cciss_wait_for_board_state(pdev, vaddr, BOARD_READY);
+ if (rc) {
+ dev_warn(&pdev->dev,
+ "failed waiting for board to become ready\n");
+ goto unmap_cfgtable;
+ }
+ dev_info(&pdev->dev, "board ready.\n");
+
/* Controller should be in simple mode at this point. If it's not,
* It means we're on one of those controllers which doesn't support
* the doorbell reset method and on which the PCI power management reset
@@ -4574,8 +4530,6 @@ static __devinit int cciss_init_reset_devices(struct pci_dev *pdev)
return 0; /* just try to do the kdump anyhow. */
if (rc)
return -ENODEV;
- if (cciss_reset_msi(pdev))
- return -ENODEV;
/* Now try to get the controller to respond to a no-op */
for (i = 0; i < CCISS_POST_RESET_NOOP_RETRIES; i++) {
@@ -4971,7 +4925,8 @@ static void __exit cciss_cleanup(void)
}
}
kthread_stop(cciss_scan_thread);
- remove_proc_entry("driver/cciss", NULL);
+ if (proc_cciss)
+ remove_proc_entry("driver/cciss", NULL);
bus_unregister(&cciss_bus_type);
}
diff --git a/drivers/block/cciss.h b/drivers/block/cciss.h
index ae340ffc8f81..4b8933d778f1 100644
--- a/drivers/block/cciss.h
+++ b/drivers/block/cciss.h
@@ -200,10 +200,14 @@ struct ctlr_info
* the above.
*/
#define CCISS_BOARD_READY_WAIT_SECS (120)
+#define CCISS_BOARD_NOT_READY_WAIT_SECS (10)
#define CCISS_BOARD_READY_POLL_INTERVAL_MSECS (100)
#define CCISS_BOARD_READY_ITERATIONS \
((CCISS_BOARD_READY_WAIT_SECS * 1000) / \
CCISS_BOARD_READY_POLL_INTERVAL_MSECS)
+#define CCISS_BOARD_NOT_READY_ITERATIONS \
+ ((CCISS_BOARD_NOT_READY_WAIT_SECS * 1000) / \
+ CCISS_BOARD_READY_POLL_INTERVAL_MSECS)
#define CCISS_POST_RESET_PAUSE_MSECS (3000)
#define CCISS_POST_RESET_NOOP_INTERVAL_MSECS (1000)
#define CCISS_POST_RESET_NOOP_RETRIES (12)
diff --git a/drivers/block/cciss_scsi.c b/drivers/block/cciss_scsi.c
index 575495f3c4b8..727d0225b7d0 100644
--- a/drivers/block/cciss_scsi.c
+++ b/drivers/block/cciss_scsi.c
@@ -62,8 +62,8 @@ static int cciss_scsi_proc_info(
int length, /* length of data in buffer */
int func); /* 0 == read, 1 == write */
-static int cciss_scsi_queue_command (struct scsi_cmnd *cmd,
- void (* done)(struct scsi_cmnd *));
+static int cciss_scsi_queue_command (struct Scsi_Host *h,
+ struct scsi_cmnd *cmd);
static int cciss_eh_device_reset_handler(struct scsi_cmnd *);
static int cciss_eh_abort_handler(struct scsi_cmnd *);
@@ -1406,7 +1406,7 @@ static void cciss_scatter_gather(ctlr_info_t *h, CommandList_struct *c,
static int
-cciss_scsi_queue_command (struct scsi_cmnd *cmd, void (* done)(struct scsi_cmnd *))
+cciss_scsi_queue_command_lck(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *))
{
ctlr_info_t *h;
int rc;
@@ -1504,6 +1504,8 @@ cciss_scsi_queue_command (struct scsi_cmnd *cmd, void (* done)(struct scsi_cmnd
return 0;
}
+static DEF_SCSI_QCMD(cciss_scsi_queue_command)
+
static void cciss_unregister_scsi(ctlr_info_t *h)
{
struct cciss_scsi_adapter_data_t *sa;
diff --git a/drivers/block/drbd/drbd_actlog.c b/drivers/block/drbd/drbd_actlog.c
index ac04ef97eac2..ba95cba192be 100644
--- a/drivers/block/drbd/drbd_actlog.c
+++ b/drivers/block/drbd/drbd_actlog.c
@@ -78,11 +78,10 @@ static int _drbd_md_sync_page_io(struct drbd_conf *mdev,
init_completion(&md_io.event);
md_io.error = 0;
- if ((rw & WRITE) && !test_bit(MD_NO_BARRIER, &mdev->flags))
- rw |= REQ_HARDBARRIER;
+ if ((rw & WRITE) && !test_bit(MD_NO_FUA, &mdev->flags))
+ rw |= REQ_FUA;
rw |= REQ_UNPLUG | REQ_SYNC;
- retry:
bio = bio_alloc(GFP_NOIO, 1);
bio->bi_bdev = bdev->md_bdev;
bio->bi_sector = sector;
@@ -100,17 +99,6 @@ static int _drbd_md_sync_page_io(struct drbd_conf *mdev,
wait_for_completion(&md_io.event);
ok = bio_flagged(bio, BIO_UPTODATE) && md_io.error == 0;
- /* check for unsupported barrier op.
- * would rather check on EOPNOTSUPP, but that is not reliable.
- * don't try again for ANY return value != 0 */
- if (unlikely((bio->bi_rw & REQ_HARDBARRIER) && !ok)) {
- /* Try again with no barrier */
- dev_warn(DEV, "Barriers not supported on meta data device - disabling\n");
- set_bit(MD_NO_BARRIER, &mdev->flags);
- rw &= ~REQ_HARDBARRIER;
- bio_put(bio);
- goto retry;
- }
out:
bio_put(bio);
return ok;
@@ -284,18 +272,32 @@ w_al_write_transaction(struct drbd_conf *mdev, struct drbd_work *w, int unused)
u32 xor_sum = 0;
if (!get_ldev(mdev)) {
- dev_err(DEV, "get_ldev() failed in w_al_write_transaction\n");
+ dev_err(DEV,
+ "disk is %s, cannot start al transaction (-%d +%d)\n",
+ drbd_disk_str(mdev->state.disk), evicted, new_enr);
complete(&((struct update_al_work *)w)->event);
return 1;
}
/* do we have to do a bitmap write, first?
* TODO reduce maximum latency:
* submit both bios, then wait for both,
- * instead of doing two synchronous sector writes. */
+ * instead of doing two synchronous sector writes.
+ * For now, we must not write the transaction,
+ * if we cannot write out the bitmap of the evicted extent. */
if (mdev->state.conn < C_CONNECTED && evicted != LC_FREE)
drbd_bm_write_sect(mdev, evicted/AL_EXT_PER_BM_SECT);
- mutex_lock(&mdev->md_io_mutex); /* protects md_io_page, al_tr_cycle, ... */
+ /* The bitmap write may have failed, causing a state change. */
+ if (mdev->state.disk < D_INCONSISTENT) {
+ dev_err(DEV,
+ "disk is %s, cannot write al transaction (-%d +%d)\n",
+ drbd_disk_str(mdev->state.disk), evicted, new_enr);
+ complete(&((struct update_al_work *)w)->event);
+ put_ldev(mdev);
+ return 1;
+ }
+
+ mutex_lock(&mdev->md_io_mutex); /* protects md_io_buffer, al_tr_cycle, ... */
buffer = (struct al_transaction *)page_address(mdev->md_io_page);
buffer->magic = __constant_cpu_to_be32(DRBD_MAGIC);
@@ -739,7 +741,7 @@ void drbd_al_apply_to_bm(struct drbd_conf *mdev)
unsigned int enr;
unsigned long add = 0;
char ppb[10];
- int i;
+ int i, tmp;
wait_event(mdev->al_wait, lc_try_lock(mdev->act_log));
@@ -747,7 +749,9 @@ void drbd_al_apply_to_bm(struct drbd_conf *mdev)
enr = lc_element_by_index(mdev->act_log, i)->lc_number;
if (enr == LC_FREE)
continue;
- add += drbd_bm_ALe_set_all(mdev, enr);
+ tmp = drbd_bm_ALe_set_all(mdev, enr);
+ dynamic_dev_dbg(DEV, "AL: set %d bits in extent %u\n", tmp, enr);
+ add += tmp;
}
lc_unlock(mdev->act_log);
diff --git a/drivers/block/drbd/drbd_int.h b/drivers/block/drbd/drbd_int.h
index 9bdcf4393c0a..1ea1a34e78b2 100644
--- a/drivers/block/drbd/drbd_int.h
+++ b/drivers/block/drbd/drbd_int.h
@@ -114,11 +114,11 @@ struct drbd_conf;
#define D_ASSERT(exp) if (!(exp)) \
dev_err(DEV, "ASSERT( " #exp " ) in %s:%d\n", __FILE__, __LINE__)
-#define ERR_IF(exp) if (({ \
- int _b = (exp) != 0; \
- if (_b) dev_err(DEV, "%s: (%s) in %s:%d\n", \
- __func__, #exp, __FILE__, __LINE__); \
- _b; \
+#define ERR_IF(exp) if (({ \
+ int _b = (exp) != 0; \
+ if (_b) dev_err(DEV, "ASSERT FAILED: %s: (%s) in %s:%d\n", \
+ __func__, #exp, __FILE__, __LINE__); \
+ _b; \
}))
/* Defines to control fault insertion */
@@ -749,17 +749,12 @@ struct drbd_epoch {
/* drbd_epoch flag bits */
enum {
- DE_BARRIER_IN_NEXT_EPOCH_ISSUED,
- DE_BARRIER_IN_NEXT_EPOCH_DONE,
- DE_CONTAINS_A_BARRIER,
DE_HAVE_BARRIER_NUMBER,
- DE_IS_FINISHING,
};
enum epoch_event {
EV_PUT,
EV_GOT_BARRIER_NR,
- EV_BARRIER_DONE,
EV_BECAME_LAST,
EV_CLEANUP = 32, /* used as flag */
};
@@ -801,11 +796,6 @@ enum {
__EE_CALL_AL_COMPLETE_IO,
__EE_MAY_SET_IN_SYNC,
- /* This epoch entry closes an epoch using a barrier.
- * On sucessful completion, the epoch is released,
- * and the P_BARRIER_ACK send. */
- __EE_IS_BARRIER,
-
/* In case a barrier failed,
* we need to resubmit without the barrier flag. */
__EE_RESUBMITTED,
@@ -820,7 +810,6 @@ enum {
};
#define EE_CALL_AL_COMPLETE_IO (1<<__EE_CALL_AL_COMPLETE_IO)
#define EE_MAY_SET_IN_SYNC (1<<__EE_MAY_SET_IN_SYNC)
-#define EE_IS_BARRIER (1<<__EE_IS_BARRIER)
#define EE_RESUBMITTED (1<<__EE_RESUBMITTED)
#define EE_WAS_ERROR (1<<__EE_WAS_ERROR)
#define EE_HAS_DIGEST (1<<__EE_HAS_DIGEST)
@@ -843,16 +832,15 @@ enum {
* Gets cleared when the state.conn
* goes into C_CONNECTED state. */
WRITE_BM_AFTER_RESYNC, /* A kmalloc() during resync failed */
- NO_BARRIER_SUPP, /* underlying block device doesn't implement barriers */
CONSIDER_RESYNC,
- MD_NO_BARRIER, /* meta data device does not support barriers,
- so don't even try */
+ MD_NO_FUA, /* Users wants us to not use FUA/FLUSH on meta data dev */
SUSPEND_IO, /* suspend application io */
BITMAP_IO, /* suspend application io;
once no more io in flight, start bitmap io */
BITMAP_IO_QUEUED, /* Started bitmap IO */
- GO_DISKLESS, /* Disk failed, local_cnt reached zero, we are going diskless */
+ GO_DISKLESS, /* Disk is being detached, on io-error or admin request. */
+ WAS_IO_ERROR, /* Local disk failed returned IO error */
RESYNC_AFTER_NEG, /* Resync after online grow after the attach&negotiate finished. */
NET_CONGESTED, /* The data socket is congested */
@@ -947,7 +935,6 @@ enum write_ordering_e {
WO_none,
WO_drain_io,
WO_bdev_flush,
- WO_bio_barrier
};
struct fifo_buffer {
@@ -1281,6 +1268,7 @@ extern int drbd_bmio_set_n_write(struct drbd_conf *mdev);
extern int drbd_bmio_clear_n_write(struct drbd_conf *mdev);
extern int drbd_bitmap_io(struct drbd_conf *mdev, int (*io_fn)(struct drbd_conf *), char *why);
extern void drbd_go_diskless(struct drbd_conf *mdev);
+extern void drbd_ldev_destroy(struct drbd_conf *mdev);
/* Meta data layout
@@ -1798,17 +1786,17 @@ static inline void __drbd_chk_io_error_(struct drbd_conf *mdev, int forcedetach,
case EP_PASS_ON:
if (!forcedetach) {
if (__ratelimit(&drbd_ratelimit_state))
- dev_err(DEV, "Local IO failed in %s."
- "Passing error on...\n", where);
+ dev_err(DEV, "Local IO failed in %s.\n", where);
break;
}
/* NOTE fall through to detach case if forcedetach set */
case EP_DETACH:
case EP_CALL_HELPER:
+ set_bit(WAS_IO_ERROR, &mdev->flags);
if (mdev->state.disk > D_FAILED) {
_drbd_set_state(_NS(mdev, disk, D_FAILED), CS_HARD, NULL);
- dev_err(DEV, "Local IO failed in %s."
- "Detaching...\n", where);
+ dev_err(DEV,
+ "Local IO failed in %s. Detaching...\n", where);
}
break;
}
@@ -1874,7 +1862,7 @@ static inline sector_t drbd_md_last_sector(struct drbd_backing_dev *bdev)
static inline sector_t drbd_get_capacity(struct block_device *bdev)
{
/* return bdev ? get_capacity(bdev->bd_disk) : 0; */
- return bdev ? bdev->bd_inode->i_size >> 9 : 0;
+ return bdev ? i_size_read(bdev->bd_inode) >> 9 : 0;
}
/**
@@ -2127,7 +2115,11 @@ static inline void put_ldev(struct drbd_conf *mdev)
__release(local);
D_ASSERT(i >= 0);
if (i == 0) {
+ if (mdev->state.disk == D_DISKLESS)
+ /* even internal references gone, safe to destroy */
+ drbd_ldev_destroy(mdev);
if (mdev->state.disk == D_FAILED)
+ /* all application IO references gone. */
drbd_go_diskless(mdev);
wake_up(&mdev->misc_wait);
}
@@ -2138,6 +2130,10 @@ static inline int _get_ldev_if_state(struct drbd_conf *mdev, enum drbd_disk_stat
{
int io_allowed;
+ /* never get a reference while D_DISKLESS */
+ if (mdev->state.disk == D_DISKLESS)
+ return 0;
+
atomic_inc(&mdev->local_cnt);
io_allowed = (mdev->state.disk >= mins);
if (!io_allowed)
@@ -2406,12 +2402,12 @@ static inline void drbd_md_flush(struct drbd_conf *mdev)
{
int r;
- if (test_bit(MD_NO_BARRIER, &mdev->flags))
+ if (test_bit(MD_NO_FUA, &mdev->flags))
return;
r = blkdev_issue_flush(mdev->ldev->md_bdev, GFP_KERNEL, NULL);
if (r) {
- set_bit(MD_NO_BARRIER, &mdev->flags);
+ set_bit(MD_NO_FUA, &mdev->flags);
dev_err(DEV, "meta data flush failed with status %d, disabling md-flushes\n", r);
}
}
diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c
index c5dfe6486cf3..6be5401d0e88 100644
--- a/drivers/block/drbd/drbd_main.c
+++ b/drivers/block/drbd/drbd_main.c
@@ -835,6 +835,15 @@ static union drbd_state sanitize_state(struct drbd_conf *mdev, union drbd_state
ns.conn != C_UNCONNECTED && ns.conn != C_DISCONNECTING && ns.conn <= C_TEAR_DOWN)
ns.conn = os.conn;
+ /* we cannot fail (again) if we already detached */
+ if (ns.disk == D_FAILED && os.disk == D_DISKLESS)
+ ns.disk = D_DISKLESS;
+
+ /* if we are only D_ATTACHING yet,
+ * we can (and should) go directly to D_DISKLESS. */
+ if (ns.disk == D_FAILED && os.disk == D_ATTACHING)
+ ns.disk = D_DISKLESS;
+
/* After C_DISCONNECTING only C_STANDALONE may follow */
if (os.conn == C_DISCONNECTING && ns.conn != C_STANDALONE)
ns.conn = os.conn;
@@ -1056,7 +1065,15 @@ int __drbd_set_state(struct drbd_conf *mdev,
!test_and_set_bit(CONFIG_PENDING, &mdev->flags))
set_bit(DEVICE_DYING, &mdev->flags);
- mdev->state.i = ns.i;
+ /* if we are going -> D_FAILED or D_DISKLESS, grab one extra reference
+ * on the ldev here, to be sure the transition -> D_DISKLESS resp.
+ * drbd_ldev_destroy() won't happen before our corresponding
+ * after_state_ch works run, where we put_ldev again. */
+ if ((os.disk != D_FAILED && ns.disk == D_FAILED) ||
+ (os.disk != D_DISKLESS && ns.disk == D_DISKLESS))
+ atomic_inc(&mdev->local_cnt);
+
+ mdev->state = ns;
wake_up(&mdev->misc_wait);
wake_up(&mdev->state_wait);
@@ -1268,7 +1285,6 @@ static void after_state_ch(struct drbd_conf *mdev, union drbd_state os,
if (test_bit(NEW_CUR_UUID, &mdev->flags)) {
drbd_uuid_new_current(mdev);
clear_bit(NEW_CUR_UUID, &mdev->flags);
- drbd_md_sync(mdev);
}
spin_lock_irq(&mdev->req_lock);
_drbd_set_state(_NS(mdev, susp_fen, 0), CS_VERBOSE, NULL);
@@ -1365,63 +1381,64 @@ static void after_state_ch(struct drbd_conf *mdev, union drbd_state os,
os.disk > D_INCONSISTENT && ns.disk == D_INCONSISTENT)
drbd_queue_bitmap_io(mdev, &drbd_bmio_set_n_write, NULL, "set_n_write from invalidate");
- /* first half of local IO error */
- if (os.disk > D_FAILED && ns.disk == D_FAILED) {
- enum drbd_io_error_p eh = EP_PASS_ON;
+ /* first half of local IO error, failure to attach,
+ * or administrative detach */
+ if (os.disk != D_FAILED && ns.disk == D_FAILED) {
+ enum drbd_io_error_p eh;
+ int was_io_error;
+ /* corresponding get_ldev was in __drbd_set_state, to serialize
+ * our cleanup here with the transition to D_DISKLESS,
+ * so it is safe to dreference ldev here. */
+ eh = mdev->ldev->dc.on_io_error;
+ was_io_error = test_and_clear_bit(WAS_IO_ERROR, &mdev->flags);
+
+ /* current state still has to be D_FAILED,
+ * there is only one way out: to D_DISKLESS,
+ * and that may only happen after our put_ldev below. */
+ if (mdev->state.disk != D_FAILED)
+ dev_err(DEV,
+ "ASSERT FAILED: disk is %s during detach\n",
+ drbd_disk_str(mdev->state.disk));
if (drbd_send_state(mdev))
- dev_warn(DEV, "Notified peer that my disk is broken.\n");
+ dev_warn(DEV, "Notified peer that I am detaching my disk\n");
else
- dev_err(DEV, "Sending state for drbd_io_error() failed\n");
+ dev_err(DEV, "Sending state for detaching disk failed\n");
drbd_rs_cancel_all(mdev);
- if (get_ldev_if_state(mdev, D_FAILED)) {
- eh = mdev->ldev->dc.on_io_error;
- put_ldev(mdev);
- }
- if (eh == EP_CALL_HELPER)
+ /* In case we want to get something to stable storage still,
+ * this may be the last chance.
+ * Following put_ldev may transition to D_DISKLESS. */
+ drbd_md_sync(mdev);
+ put_ldev(mdev);
+
+ if (was_io_error && eh == EP_CALL_HELPER)
drbd_khelper(mdev, "local-io-error");
}
+ /* second half of local IO error, failure to attach,
+ * or administrative detach,
+ * after local_cnt references have reached zero again */
+ if (os.disk != D_DISKLESS && ns.disk == D_DISKLESS) {
+ /* We must still be diskless,
+ * re-attach has to be serialized with this! */
+ if (mdev->state.disk != D_DISKLESS)
+ dev_err(DEV,
+ "ASSERT FAILED: disk is %s while going diskless\n",
+ drbd_disk_str(mdev->state.disk));
- /* second half of local IO error handling,
- * after local_cnt references have reached zero: */
- if (os.disk == D_FAILED && ns.disk == D_DISKLESS) {
- mdev->rs_total = 0;
- mdev->rs_failed = 0;
- atomic_set(&mdev->rs_pending_cnt, 0);
- }
-
- if (os.disk > D_DISKLESS && ns.disk == D_DISKLESS) {
- /* We must still be diskless,
- * re-attach has to be serialized with this! */
- if (mdev->state.disk != D_DISKLESS)
- dev_err(DEV,
- "ASSERT FAILED: disk is %s while going diskless\n",
- drbd_disk_str(mdev->state.disk));
+ mdev->rs_total = 0;
+ mdev->rs_failed = 0;
+ atomic_set(&mdev->rs_pending_cnt, 0);
- /* we cannot assert local_cnt == 0 here, as get_ldev_if_state
- * will inc/dec it frequently. Since we became D_DISKLESS, no
- * one has touched the protected members anymore, though, so we
- * are safe to free them here. */
if (drbd_send_state(mdev))
- dev_warn(DEV, "Notified peer that I detached my disk.\n");
+ dev_warn(DEV, "Notified peer that I'm now diskless.\n");
else
- dev_err(DEV, "Sending state for detach failed\n");
-
- lc_destroy(mdev->resync);
- mdev->resync = NULL;
- lc_destroy(mdev->act_log);
- mdev->act_log = NULL;
- __no_warn(local,
- drbd_free_bc(mdev->ldev);
- mdev->ldev = NULL;);
-
- if (mdev->md_io_tmpp) {
- __free_page(mdev->md_io_tmpp);
- mdev->md_io_tmpp = NULL;
- }
+ dev_err(DEV, "Sending state for being diskless failed\n");
+ /* corresponding get_ldev in __drbd_set_state
+ * this may finaly trigger drbd_ldev_destroy. */
+ put_ldev(mdev);
}
/* Disks got bigger while they were detached */
@@ -2772,11 +2789,6 @@ void drbd_init_set_defaults(struct drbd_conf *mdev)
drbd_set_defaults(mdev);
- /* for now, we do NOT yet support it,
- * even though we start some framework
- * to eventually support barriers */
- set_bit(NO_BARRIER_SUPP, &mdev->flags);
-
atomic_set(&mdev->ap_bio_cnt, 0);
atomic_set(&mdev->ap_pending_cnt, 0);
atomic_set(&mdev->rs_pending_cnt, 0);
@@ -2842,7 +2854,7 @@ void drbd_init_set_defaults(struct drbd_conf *mdev)
drbd_thread_init(mdev, &mdev->asender, drbd_asender);
mdev->agreed_pro_version = PRO_VERSION_MAX;
- mdev->write_ordering = WO_bio_barrier;
+ mdev->write_ordering = WO_bdev_flush;
mdev->resync_wenr = LC_FREE;
}
@@ -2899,7 +2911,6 @@ void drbd_mdev_cleanup(struct drbd_conf *mdev)
D_ASSERT(list_empty(&mdev->resync_work.list));
D_ASSERT(list_empty(&mdev->unplug_work.list));
D_ASSERT(list_empty(&mdev->go_diskless.list));
-
}
@@ -2982,7 +2993,7 @@ static int drbd_create_mempools(void)
drbd_ee_mempool = mempool_create(number,
mempool_alloc_slab, mempool_free_slab, drbd_ee_cache);
- if (drbd_request_mempool == NULL)
+ if (drbd_ee_mempool == NULL)
goto Enomem;
/* drbd's page pool */
@@ -3660,6 +3671,8 @@ void drbd_uuid_new_current(struct drbd_conf *mdev) __must_hold(local)
get_random_bytes(&val, sizeof(u64));
_drbd_uuid_set(mdev, UI_CURRENT, val);
+ /* get it to stable storage _now_ */
+ drbd_md_sync(mdev);
}
void drbd_uuid_set_bm(struct drbd_conf *mdev, u64 val) __must_hold(local)
@@ -3756,19 +3769,31 @@ static int w_bitmap_io(struct drbd_conf *mdev, struct drbd_work *w, int unused)
return 1;
}
+void drbd_ldev_destroy(struct drbd_conf *mdev)
+{
+ lc_destroy(mdev->resync);
+ mdev->resync = NULL;
+ lc_destroy(mdev->act_log);
+ mdev->act_log = NULL;
+ __no_warn(local,
+ drbd_free_bc(mdev->ldev);
+ mdev->ldev = NULL;);
+
+ if (mdev->md_io_tmpp) {
+ __free_page(mdev->md_io_tmpp);
+ mdev->md_io_tmpp = NULL;
+ }
+ clear_bit(GO_DISKLESS, &mdev->flags);
+}
+
static int w_go_diskless(struct drbd_conf *mdev, struct drbd_work *w, int unused)
{
D_ASSERT(mdev->state.disk == D_FAILED);
/* we cannot assert local_cnt == 0 here, as get_ldev_if_state will
* inc/dec it frequently. Once we are D_DISKLESS, no one will touch
- * the protected members anymore, though, so in the after_state_ch work
- * it will be safe to free them. */
+ * the protected members anymore, though, so once put_ldev reaches zero
+ * again, it will be safe to free them. */
drbd_force_state(mdev, NS(disk, D_DISKLESS));
- /* We need to wait for return of references checked out while we still
- * have been D_FAILED, though (drbd_md_sync, bitmap io). */
- wait_event(mdev->misc_wait, !atomic_read(&mdev->local_cnt));
-
- clear_bit(GO_DISKLESS, &mdev->flags);
return 1;
}
@@ -3777,9 +3802,6 @@ void drbd_go_diskless(struct drbd_conf *mdev)
D_ASSERT(mdev->state.disk == D_FAILED);
if (!test_and_set_bit(GO_DISKLESS, &mdev->flags))
drbd_queue_work(&mdev->data.work, &mdev->go_diskless);
- /* don't drbd_queue_work_front,
- * we need to serialize with the after_state_ch work
- * of the -> D_FAILED transition. */
}
/**
diff --git a/drivers/block/drbd/drbd_nl.c b/drivers/block/drbd/drbd_nl.c
index 87925e97e613..29e5c70e4e26 100644
--- a/drivers/block/drbd/drbd_nl.c
+++ b/drivers/block/drbd/drbd_nl.c
@@ -870,6 +870,11 @@ static int drbd_nl_disk_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp
retcode = ERR_DISK_CONFIGURED;
goto fail;
}
+ /* It may just now have detached because of IO error. Make sure
+ * drbd_ldev_destroy is done already, we may end up here very fast,
+ * e.g. if someone calls attach from the on-io-error handler,
+ * to realize a "hot spare" feature (not that I'd recommend that) */
+ wait_event(mdev->misc_wait, !atomic_read(&mdev->local_cnt));
/* allocation not in the IO path, cqueue thread context */
nbc = kzalloc(sizeof(struct drbd_backing_dev), GFP_KERNEL);
@@ -1098,9 +1103,9 @@ static int drbd_nl_disk_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp
/* Reset the "barriers don't work" bits here, then force meta data to
* be written, to ensure we determine if barriers are supported. */
if (nbc->dc.no_md_flush)
- set_bit(MD_NO_BARRIER, &mdev->flags);
+ set_bit(MD_NO_FUA, &mdev->flags);
else
- clear_bit(MD_NO_BARRIER, &mdev->flags);
+ clear_bit(MD_NO_FUA, &mdev->flags);
/* Point of no return reached.
* Devices and memory are no longer released by error cleanup below.
@@ -1112,8 +1117,8 @@ static int drbd_nl_disk_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp
nbc = NULL;
resync_lru = NULL;
- mdev->write_ordering = WO_bio_barrier;
- drbd_bump_write_ordering(mdev, WO_bio_barrier);
+ mdev->write_ordering = WO_bdev_flush;
+ drbd_bump_write_ordering(mdev, WO_bdev_flush);
if (drbd_md_test_flag(mdev->ldev, MDF_CRASHED_PRIMARY))
set_bit(CRASHED_PRIMARY, &mdev->flags);
@@ -1262,7 +1267,7 @@ static int drbd_nl_disk_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp
force_diskless_dec:
put_ldev(mdev);
force_diskless:
- drbd_force_state(mdev, NS(disk, D_DISKLESS));
+ drbd_force_state(mdev, NS(disk, D_FAILED));
drbd_md_sync(mdev);
release_bdev2_fail:
if (nbc)
@@ -1285,10 +1290,19 @@ static int drbd_nl_disk_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp
return 0;
}
+/* Detaching the disk is a process in multiple stages. First we need to lock
+ * out application IO, in-flight IO, IO stuck in drbd_al_begin_io.
+ * Then we transition to D_DISKLESS, and wait for put_ldev() to return all
+ * internal references as well.
+ * Only then we have finally detached. */
static int drbd_nl_detach(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
struct drbd_nl_cfg_reply *reply)
{
+ drbd_suspend_io(mdev); /* so no-one is stuck in drbd_al_begin_io */
reply->ret_code = drbd_request_state(mdev, NS(disk, D_DISKLESS));
+ if (mdev->state.disk == D_DISKLESS)
+ wait_event(mdev->misc_wait, !atomic_read(&mdev->local_cnt));
+ drbd_resume_io(mdev);
return 0;
}
@@ -1953,7 +1967,6 @@ static int drbd_nl_resume_io(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp
if (test_bit(NEW_CUR_UUID, &mdev->flags)) {
drbd_uuid_new_current(mdev);
clear_bit(NEW_CUR_UUID, &mdev->flags);
- drbd_md_sync(mdev);
}
drbd_suspend_io(mdev);
reply->ret_code = drbd_request_state(mdev, NS3(susp, 0, susp_nod, 0, susp_fen, 0));
diff --git a/drivers/block/drbd/drbd_proc.c b/drivers/block/drbd/drbd_proc.c
index ad325c5d0ce1..7e6ac307e2de 100644
--- a/drivers/block/drbd/drbd_proc.c
+++ b/drivers/block/drbd/drbd_proc.c
@@ -158,7 +158,6 @@ static int drbd_seq_show(struct seq_file *seq, void *v)
[WO_none] = 'n',
[WO_drain_io] = 'd',
[WO_bdev_flush] = 'f',
- [WO_bio_barrier] = 'b',
};
seq_printf(seq, "version: " REL_VERSION " (api:%d/proto:%d-%d)\n%s\n",
diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c
index efd6169acf2f..89d8a7cc4054 100644
--- a/drivers/block/drbd/drbd_receiver.c
+++ b/drivers/block/drbd/drbd_receiver.c
@@ -36,7 +36,6 @@
#include <linux/memcontrol.h>
#include <linux/mm_inline.h>
#include <linux/slab.h>
-#include <linux/smp_lock.h>
#include <linux/pkt_sched.h>
#define __KERNEL_SYSCALLS__
#include <linux/unistd.h>
@@ -49,11 +48,6 @@
#include "drbd_vli.h"
-struct flush_work {
- struct drbd_work w;
- struct drbd_epoch *epoch;
-};
-
enum finish_epoch {
FE_STILL_LIVE,
FE_DESTROYED,
@@ -66,16 +60,6 @@ static int drbd_do_auth(struct drbd_conf *mdev);
static enum finish_epoch drbd_may_finish_epoch(struct drbd_conf *, struct drbd_epoch *, enum epoch_event);
static int e_end_block(struct drbd_conf *, struct drbd_work *, int);
-static struct drbd_epoch *previous_epoch(struct drbd_conf *mdev, struct drbd_epoch *epoch)
-{
- struct drbd_epoch *prev;
- spin_lock(&mdev->epoch_lock);
- prev = list_entry(epoch->list.prev, struct drbd_epoch, list);
- if (prev == epoch || prev == mdev->current_epoch)
- prev = NULL;
- spin_unlock(&mdev->epoch_lock);
- return prev;
-}
#define GFP_TRY (__GFP_HIGHMEM | __GFP_NOWARN)
@@ -981,7 +965,7 @@ static int drbd_recv_header(struct drbd_conf *mdev, enum drbd_packets *cmd, unsi
return TRUE;
}
-static enum finish_epoch drbd_flush_after_epoch(struct drbd_conf *mdev, struct drbd_epoch *epoch)
+static void drbd_flush(struct drbd_conf *mdev)
{
int rv;
@@ -997,24 +981,6 @@ static enum finish_epoch drbd_flush_after_epoch(struct drbd_conf *mdev, struct d
}
put_ldev(mdev);
}
-
- return drbd_may_finish_epoch(mdev, epoch, EV_BARRIER_DONE);
-}
-
-static int w_flush(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
-{
- struct flush_work *fw = (struct flush_work *)w;
- struct drbd_epoch *epoch = fw->epoch;
-
- kfree(w);
-
- if (!test_and_set_bit(DE_BARRIER_IN_NEXT_EPOCH_ISSUED, &epoch->flags))
- drbd_flush_after_epoch(mdev, epoch);
-
- drbd_may_finish_epoch(mdev, epoch, EV_PUT |
- (mdev->state.conn < C_CONNECTED ? EV_CLEANUP : 0));
-
- return 1;
}
/**
@@ -1027,15 +993,13 @@ static enum finish_epoch drbd_may_finish_epoch(struct drbd_conf *mdev,
struct drbd_epoch *epoch,
enum epoch_event ev)
{
- int finish, epoch_size;
+ int epoch_size;
struct drbd_epoch *next_epoch;
- int schedule_flush = 0;
enum finish_epoch rv = FE_STILL_LIVE;
spin_lock(&mdev->epoch_lock);
do {
next_epoch = NULL;
- finish = 0;
epoch_size = atomic_read(&epoch->epoch_size);
@@ -1045,16 +1009,6 @@ static enum finish_epoch drbd_may_finish_epoch(struct drbd_conf *mdev,
break;
case EV_GOT_BARRIER_NR:
set_bit(DE_HAVE_BARRIER_NUMBER, &epoch->flags);
-
- /* Special case: If we just switched from WO_bio_barrier to
- WO_bdev_flush we should not finish the current epoch */
- if (test_bit(DE_CONTAINS_A_BARRIER, &epoch->flags) && epoch_size == 1 &&
- mdev->write_ordering != WO_bio_barrier &&
- epoch == mdev->current_epoch)
- clear_bit(DE_CONTAINS_A_BARRIER, &epoch->flags);
- break;
- case EV_BARRIER_DONE:
- set_bit(DE_BARRIER_IN_NEXT_EPOCH_DONE, &epoch->flags);
break;
case EV_BECAME_LAST:
/* nothing to do*/
@@ -1063,23 +1017,7 @@ static enum finish_epoch drbd_may_finish_epoch(struct drbd_conf *mdev,
if (epoch_size != 0 &&
atomic_read(&epoch->active) == 0 &&
- test_bit(DE_HAVE_BARRIER_NUMBER, &epoch->flags) &&
- epoch->list.prev == &mdev->current_epoch->list &&
- !test_bit(DE_IS_FINISHING, &epoch->flags)) {
- /* Nearly all conditions are met to finish that epoch... */
- if (test_bit(DE_BARRIER_IN_NEXT_EPOCH_DONE, &epoch->flags) ||
- mdev->write_ordering == WO_none ||
- (epoch_size == 1 && test_bit(DE_CONTAINS_A_BARRIER, &epoch->flags)) ||
- ev & EV_CLEANUP) {
- finish = 1;
- set_bit(DE_IS_FINISHING, &epoch->flags);
- } else if (!test_bit(DE_BARRIER_IN_NEXT_EPOCH_ISSUED, &epoch->flags) &&
- mdev->write_ordering == WO_bio_barrier) {
- atomic_inc(&epoch->active);
- schedule_flush = 1;
- }
- }
- if (finish) {
+ test_bit(DE_HAVE_BARRIER_NUMBER, &epoch->flags)) {
if (!(ev & EV_CLEANUP)) {
spin_unlock(&mdev->epoch_lock);
drbd_send_b_ack(mdev, epoch->barrier_nr, epoch_size);
@@ -1102,6 +1040,7 @@ static enum finish_epoch drbd_may_finish_epoch(struct drbd_conf *mdev,
/* atomic_set(&epoch->active, 0); is already zero */
if (rv == FE_STILL_LIVE)
rv = FE_RECYCLED;
+ wake_up(&mdev->ee_wait);
}
}
@@ -1113,22 +1052,6 @@ static enum finish_epoch drbd_may_finish_epoch(struct drbd_conf *mdev,
spin_unlock(&mdev->epoch_lock);
- if (schedule_flush) {
- struct flush_work *fw;
- fw = kmalloc(sizeof(*fw), GFP_ATOMIC);
- if (fw) {
- fw->w.cb = w_flush;
- fw->epoch = epoch;
- drbd_queue_work(&mdev->data.work, &fw->w);
- } else {
- dev_warn(DEV, "Could not kmalloc a flush_work obj\n");
- set_bit(DE_BARRIER_IN_NEXT_EPOCH_ISSUED, &epoch->flags);
- /* That is not a recursion, only one level */
- drbd_may_finish_epoch(mdev, epoch, EV_BARRIER_DONE);
- drbd_may_finish_epoch(mdev, epoch, EV_PUT);
- }
- }
-
return rv;
}
@@ -1144,19 +1067,16 @@ void drbd_bump_write_ordering(struct drbd_conf *mdev, enum write_ordering_e wo)
[WO_none] = "none",
[WO_drain_io] = "drain",
[WO_bdev_flush] = "flush",
- [WO_bio_barrier] = "barrier",
};
pwo = mdev->write_ordering;
wo = min(pwo, wo);
- if (wo == WO_bio_barrier && mdev->ldev->dc.no_disk_barrier)
- wo = WO_bdev_flush;
if (wo == WO_bdev_flush && mdev->ldev->dc.no_disk_flush)
wo = WO_drain_io;
if (wo == WO_drain_io && mdev->ldev->dc.no_disk_drain)
wo = WO_none;
mdev->write_ordering = wo;
- if (pwo != mdev->write_ordering || wo == WO_bio_barrier)
+ if (pwo != mdev->write_ordering || wo == WO_bdev_flush)
dev_info(DEV, "Method to ensure write ordering: %s\n", write_ordering_str[mdev->write_ordering]);
}
@@ -1192,7 +1112,7 @@ next_bio:
bio->bi_sector = sector;
bio->bi_bdev = mdev->ldev->backing_bdev;
/* we special case some flags in the multi-bio case, see below
- * (REQ_UNPLUG, REQ_HARDBARRIER) */
+ * (REQ_UNPLUG) */
bio->bi_rw = rw;
bio->bi_private = e;
bio->bi_end_io = drbd_endio_sec;
@@ -1226,11 +1146,6 @@ next_bio:
bio->bi_rw &= ~REQ_UNPLUG;
drbd_generic_make_request(mdev, fault_type, bio);
-
- /* strip off REQ_HARDBARRIER,
- * unless it is the first or last bio */
- if (bios && bios->bi_next)
- bios->bi_rw &= ~REQ_HARDBARRIER;
} while (bios);
maybe_kick_lo(mdev);
return 0;
@@ -1244,45 +1159,9 @@ fail:
return -ENOMEM;
}
-/**
- * w_e_reissue() - Worker callback; Resubmit a bio, without REQ_HARDBARRIER set
- * @mdev: DRBD device.
- * @w: work object.
- * @cancel: The connection will be closed anyways (unused in this callback)
- */
-int w_e_reissue(struct drbd_conf *mdev, struct drbd_work *w, int cancel) __releases(local)
-{
- struct drbd_epoch_entry *e = (struct drbd_epoch_entry *)w;
- /* We leave DE_CONTAINS_A_BARRIER and EE_IS_BARRIER in place,
- (and DE_BARRIER_IN_NEXT_EPOCH_ISSUED in the previous Epoch)
- so that we can finish that epoch in drbd_may_finish_epoch().
- That is necessary if we already have a long chain of Epochs, before
- we realize that REQ_HARDBARRIER is actually not supported */
-
- /* As long as the -ENOTSUPP on the barrier is reported immediately
- that will never trigger. If it is reported late, we will just
- print that warning and continue correctly for all future requests
- with WO_bdev_flush */
- if (previous_epoch(mdev, e->epoch))
- dev_warn(DEV, "Write ordering was not enforced (one time event)\n");
-
- /* we still have a local reference,
- * get_ldev was done in receive_Data. */
-
- e->w.cb = e_end_block;
- if (drbd_submit_ee(mdev, e, WRITE, DRBD_FAULT_DT_WR) != 0) {
- /* drbd_submit_ee fails for one reason only:
- * if was not able to allocate sufficient bios.
- * requeue, try again later. */
- e->w.cb = w_e_reissue;
- drbd_queue_work(&mdev->data.work, &e->w);
- }
- return 1;
-}
-
static int receive_Barrier(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size)
{
- int rv, issue_flush;
+ int rv;
struct p_barrier *p = &mdev->data.rbuf.barrier;
struct drbd_epoch *epoch;
@@ -1300,44 +1179,40 @@ static int receive_Barrier(struct drbd_conf *mdev, enum drbd_packets cmd, unsign
* Therefore we must send the barrier_ack after the barrier request was
* completed. */
switch (mdev->write_ordering) {
- case WO_bio_barrier:
case WO_none:
if (rv == FE_RECYCLED)
return TRUE;
- break;
+
+ /* receiver context, in the writeout path of the other node.
+ * avoid potential distributed deadlock */
+ epoch = kmalloc(sizeof(struct drbd_epoch), GFP_NOIO);
+ if (epoch)
+ break;
+ else
+ dev_warn(DEV, "Allocation of an epoch failed, slowing down\n");
+ /* Fall through */
case WO_bdev_flush:
case WO_drain_io:
- if (rv == FE_STILL_LIVE) {
- set_bit(DE_BARRIER_IN_NEXT_EPOCH_ISSUED, &mdev->current_epoch->flags);
- drbd_wait_ee_list_empty(mdev, &mdev->active_ee);
- rv = drbd_flush_after_epoch(mdev, mdev->current_epoch);
- }
- if (rv == FE_RECYCLED)
- return TRUE;
-
- /* The asender will send all the ACKs and barrier ACKs out, since
- all EEs moved from the active_ee to the done_ee. We need to
- provide a new epoch object for the EEs that come in soon */
- break;
- }
-
- /* receiver context, in the writeout path of the other node.
- * avoid potential distributed deadlock */
- epoch = kmalloc(sizeof(struct drbd_epoch), GFP_NOIO);
- if (!epoch) {
- dev_warn(DEV, "Allocation of an epoch failed, slowing down\n");
- issue_flush = !test_and_set_bit(DE_BARRIER_IN_NEXT_EPOCH_ISSUED, &mdev->current_epoch->flags);
drbd_wait_ee_list_empty(mdev, &mdev->active_ee);
- if (issue_flush) {
- rv = drbd_flush_after_epoch(mdev, mdev->current_epoch);
- if (rv == FE_RECYCLED)
- return TRUE;
+ drbd_flush(mdev);
+
+ if (atomic_read(&mdev->current_epoch->epoch_size)) {
+ epoch = kmalloc(sizeof(struct drbd_epoch), GFP_NOIO);
+ if (epoch)
+ break;
}
- drbd_wait_ee_list_empty(mdev, &mdev->done_ee);
+ epoch = mdev->current_epoch;
+ wait_event(mdev->ee_wait, atomic_read(&epoch->epoch_size) == 0);
+
+ D_ASSERT(atomic_read(&epoch->active) == 0);
+ D_ASSERT(epoch->flags == 0);
return TRUE;
+ default:
+ dev_err(DEV, "Strangeness in mdev->write_ordering %d\n", mdev->write_ordering);
+ return FALSE;
}
epoch->flags = 0;
@@ -1652,15 +1527,8 @@ static int e_end_block(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
{
struct drbd_epoch_entry *e = (struct drbd_epoch_entry *)w;
sector_t sector = e->sector;
- struct drbd_epoch *epoch;
int ok = 1, pcmd;
- if (e->flags & EE_IS_BARRIER) {
- epoch = previous_epoch(mdev, e->epoch);
- if (epoch)
- drbd_may_finish_epoch(mdev, epoch, EV_BARRIER_DONE + (cancel ? EV_CLEANUP : 0));
- }
-
if (mdev->net_conf->wire_protocol == DRBD_PROT_C) {
if (likely((e->flags & EE_WAS_ERROR) == 0)) {
pcmd = (mdev->state.conn >= C_SYNC_SOURCE &&
@@ -1817,27 +1685,6 @@ static int receive_Data(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned
e->epoch = mdev->current_epoch;
atomic_inc(&e->epoch->epoch_size);
atomic_inc(&e->epoch->active);
-
- if (mdev->write_ordering == WO_bio_barrier && atomic_read(&e->epoch->epoch_size) == 1) {
- struct drbd_epoch *epoch;
- /* Issue a barrier if we start a new epoch, and the previous epoch
- was not a epoch containing a single request which already was
- a Barrier. */
- epoch = list_entry(e->epoch->list.prev, struct drbd_epoch, list);
- if (epoch == e->epoch) {
- set_bit(DE_CONTAINS_A_BARRIER, &e->epoch->flags);
- rw |= REQ_HARDBARRIER;
- e->flags |= EE_IS_BARRIER;
- } else {
- if (atomic_read(&epoch->epoch_size) > 1 ||
- !test_bit(DE_CONTAINS_A_BARRIER, &epoch->flags)) {
- set_bit(DE_BARRIER_IN_NEXT_EPOCH_ISSUED, &epoch->flags);
- set_bit(DE_CONTAINS_A_BARRIER, &e->epoch->flags);
- rw |= REQ_HARDBARRIER;
- e->flags |= EE_IS_BARRIER;
- }
- }
- }
spin_unlock(&mdev->epoch_lock);
dp_flags = be32_to_cpu(p->dp_flags);
@@ -1995,10 +1842,11 @@ static int receive_Data(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned
break;
}
- if (mdev->state.pdsk == D_DISKLESS) {
+ if (mdev->state.pdsk < D_INCONSISTENT) {
/* In case we have the only disk of the cluster, */
drbd_set_out_of_sync(mdev, e->sector, e->size);
e->flags |= EE_CALL_AL_COMPLETE_IO;
+ e->flags &= ~EE_MAY_SET_IN_SYNC;
drbd_al_begin_io(mdev, e->sector);
}
@@ -3362,7 +3210,7 @@ static int receive_state(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned
if (ns.conn == C_MASK) {
ns.conn = C_CONNECTED;
if (mdev->state.disk == D_NEGOTIATING) {
- drbd_force_state(mdev, NS(disk, D_DISKLESS));
+ drbd_force_state(mdev, NS(disk, D_FAILED));
} else if (peer_state.disk == D_NEGOTIATING) {
dev_err(DEV, "Disk attach process on the peer node was aborted.\n");
peer_state.disk = D_DISKLESS;
diff --git a/drivers/block/drbd/drbd_req.c b/drivers/block/drbd/drbd_req.c
index 9e91a2545fc8..11a75d32a2e2 100644
--- a/drivers/block/drbd/drbd_req.c
+++ b/drivers/block/drbd/drbd_req.c
@@ -258,7 +258,7 @@ void _req_may_be_done(struct drbd_request *req, struct bio_and_error *m)
if (!hlist_unhashed(&req->colision))
hlist_del(&req->colision);
else
- D_ASSERT((s & RQ_NET_MASK) == 0);
+ D_ASSERT((s & (RQ_NET_MASK & ~RQ_NET_DONE)) == 0);
/* for writes we need to do some extra housekeeping */
if (rw == WRITE)
@@ -813,7 +813,8 @@ static int drbd_make_request_common(struct drbd_conf *mdev, struct bio *bio)
mdev->state.conn >= C_CONNECTED));
if (!(local || remote) && !is_susp(mdev->state)) {
- dev_err(DEV, "IO ERROR: neither local nor remote disk\n");
+ if (__ratelimit(&drbd_ratelimit_state))
+ dev_err(DEV, "IO ERROR: neither local nor remote disk\n");
goto fail_free_complete;
}
@@ -942,12 +943,21 @@ allocate_barrier:
if (local) {
req->private_bio->bi_bdev = mdev->ldev->backing_bdev;
- if (FAULT_ACTIVE(mdev, rw == WRITE ? DRBD_FAULT_DT_WR
- : rw == READ ? DRBD_FAULT_DT_RD
- : DRBD_FAULT_DT_RA))
+ /* State may have changed since we grabbed our reference on the
+ * mdev->ldev member. Double check, and short-circuit to endio.
+ * In case the last activity log transaction failed to get on
+ * stable storage, and this is a WRITE, we may not even submit
+ * this bio. */
+ if (get_ldev(mdev)) {
+ if (FAULT_ACTIVE(mdev, rw == WRITE ? DRBD_FAULT_DT_WR
+ : rw == READ ? DRBD_FAULT_DT_RD
+ : DRBD_FAULT_DT_RA))
+ bio_endio(req->private_bio, -EIO);
+ else
+ generic_make_request(req->private_bio);
+ put_ldev(mdev);
+ } else
bio_endio(req->private_bio, -EIO);
- else
- generic_make_request(req->private_bio);
}
/* we need to plug ALWAYS since we possibly need to kick lo_dev.
@@ -1022,20 +1032,6 @@ int drbd_make_request_26(struct request_queue *q, struct bio *bio)
return 0;
}
- /* Reject barrier requests if we know the underlying device does
- * not support them.
- * XXX: Need to get this info from peer as well some how so we
- * XXX: reject if EITHER side/data/metadata area does not support them.
- *
- * because of those XXX, this is not yet enabled,
- * i.e. in drbd_init_set_defaults we set the NO_BARRIER_SUPP bit.
- */
- if (unlikely(bio->bi_rw & REQ_HARDBARRIER) && test_bit(NO_BARRIER_SUPP, &mdev->flags)) {
- /* dev_warn(DEV, "Rejecting barrier request as underlying device does not support\n"); */
- bio_endio(bio, -EOPNOTSUPP);
- return 0;
- }
-
/*
* what we "blindly" assume:
*/
diff --git a/drivers/block/drbd/drbd_worker.c b/drivers/block/drbd/drbd_worker.c
index 108d58015cd1..47d223c2409c 100644
--- a/drivers/block/drbd/drbd_worker.c
+++ b/drivers/block/drbd/drbd_worker.c
@@ -26,7 +26,6 @@
#include <linux/module.h>
#include <linux/drbd.h>
#include <linux/sched.h>
-#include <linux/smp_lock.h>
#include <linux/wait.h>
#include <linux/mm.h>
#include <linux/memcontrol.h>
@@ -102,12 +101,6 @@ void drbd_endio_read_sec_final(struct drbd_epoch_entry *e) __releases(local)
put_ldev(mdev);
}
-static int is_failed_barrier(int ee_flags)
-{
- return (ee_flags & (EE_IS_BARRIER|EE_WAS_ERROR|EE_RESUBMITTED))
- == (EE_IS_BARRIER|EE_WAS_ERROR);
-}
-
/* writes on behalf of the partner, or resync writes,
* "submitted" by the receiver, final stage. */
static void drbd_endio_write_sec_final(struct drbd_epoch_entry *e) __releases(local)
@@ -119,21 +112,6 @@ static void drbd_endio_write_sec_final(struct drbd_epoch_entry *e) __releases(lo
int is_syncer_req;
int do_al_complete_io;
- /* if this is a failed barrier request, disable use of barriers,
- * and schedule for resubmission */
- if (is_failed_barrier(e->flags)) {
- drbd_bump_write_ordering(mdev, WO_bdev_flush);
- spin_lock_irqsave(&mdev->req_lock, flags);
- list_del(&e->w.list);
- e->flags = (e->flags & ~EE_WAS_ERROR) | EE_RESUBMITTED;
- e->w.cb = w_e_reissue;
- /* put_ldev actually happens below, once we come here again. */
- __release(local);
- spin_unlock_irqrestore(&mdev->req_lock, flags);
- drbd_queue_work(&mdev->data.work, &e->w);
- return;
- }
-
D_ASSERT(e->block_id != ID_VACANT);
/* after we moved e to done_ee,
@@ -925,7 +903,7 @@ out:
drbd_md_sync(mdev);
if (test_and_clear_bit(WRITE_BM_AFTER_RESYNC, &mdev->flags)) {
- dev_warn(DEV, "Writing the whole bitmap, due to failed kmalloc\n");
+ dev_info(DEV, "Writing the whole bitmap\n");
drbd_queue_bitmap_io(mdev, &drbd_bm_write, NULL, "write from resync_finished");
}
diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c
index 767107cce982..3951020e494a 100644
--- a/drivers/block/floppy.c
+++ b/drivers/block/floppy.c
@@ -4363,9 +4363,9 @@ out_unreg_blkdev:
out_put_disk:
while (dr--) {
del_timer(&motor_off_timer[dr]);
- put_disk(disks[dr]);
if (disks[dr]->queue)
blk_cleanup_queue(disks[dr]->queue);
+ put_disk(disks[dr]);
}
return err;
}
@@ -4573,8 +4573,8 @@ static void __exit floppy_module_exit(void)
device_remove_file(&floppy_device[drive].dev, &dev_attr_cmos);
platform_device_unregister(&floppy_device[drive]);
}
- put_disk(disks[drive]);
blk_cleanup_queue(disks[drive]->queue);
+ put_disk(disks[drive]);
}
del_timer_sync(&fd_timeout);
diff --git a/drivers/block/loop.c b/drivers/block/loop.c
index 6c48b3545f84..7ea0bea2f7e3 100644
--- a/drivers/block/loop.c
+++ b/drivers/block/loop.c
@@ -101,8 +101,8 @@ static int transfer_none(struct loop_device *lo, int cmd,
else
memcpy(raw_buf, loop_buf, size);
- kunmap_atomic(raw_buf, KM_USER0);
kunmap_atomic(loop_buf, KM_USER1);
+ kunmap_atomic(raw_buf, KM_USER0);
cond_resched();
return 0;
}
@@ -130,8 +130,8 @@ static int transfer_xor(struct loop_device *lo, int cmd,
for (i = 0; i < size; i++)
*out++ = *in++ ^ key[(i & 511) % keysize];
- kunmap_atomic(raw_buf, KM_USER0);
kunmap_atomic(loop_buf, KM_USER1);
+ kunmap_atomic(raw_buf, KM_USER0);
cond_resched();
return 0;
}
@@ -481,12 +481,6 @@ static int do_bio_filebacked(struct loop_device *lo, struct bio *bio)
if (bio_rw(bio) == WRITE) {
struct file *file = lo->lo_backing_file;
- /* REQ_HARDBARRIER is deprecated */
- if (bio->bi_rw & REQ_HARDBARRIER) {
- ret = -EOPNOTSUPP;
- goto out;
- }
-
if (bio->bi_rw & REQ_FLUSH) {
ret = vfs_fsync(file, 0);
if (unlikely(ret && ret != -EINVAL)) {
@@ -1049,9 +1043,9 @@ static int loop_clr_fd(struct loop_device *lo, struct block_device *bdev)
if (bdev)
invalidate_bdev(bdev);
set_capacity(lo->lo_disk, 0);
+ loop_sysfs_exit(lo);
if (bdev) {
bd_set_size(bdev, 0);
- loop_sysfs_exit(lo);
/* let user-space know about this change */
kobject_uevent(&disk_to_dev(bdev->bd_disk)->kobj, KOBJ_CHANGE);
}
diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
index 6ec9d53806c5..008d4a00b50d 100644
--- a/drivers/block/rbd.c
+++ b/drivers/block/rbd.c
@@ -21,80 +21,9 @@
- Instructions for use
- --------------------
+ For usage instructions, please refer to:
- 1) Map a Linux block device to an existing rbd image.
-
- Usage: <mon ip addr> <options> <pool name> <rbd image name> [snap name]
-
- $ echo "192.168.0.1 name=admin rbd foo" > /sys/class/rbd/add
-
- The snapshot name can be "-" or omitted to map the image read/write.
-
- 2) List all active blkdev<->object mappings.
-
- In this example, we have performed step #1 twice, creating two blkdevs,
- mapped to two separate rados objects in the rados rbd pool
-
- $ cat /sys/class/rbd/list
- #id major client_name pool name snap KB
- 0 254 client4143 rbd foo - 1024000
-
- The columns, in order, are:
- - blkdev unique id
- - blkdev assigned major
- - rados client id
- - rados pool name
- - rados block device name
- - mapped snapshot ("-" if none)
- - device size in KB
-
-
- 3) Create a snapshot.
-
- Usage: <blkdev id> <snapname>
-
- $ echo "0 mysnap" > /sys/class/rbd/snap_create
-
-
- 4) Listing a snapshot.
-
- $ cat /sys/class/rbd/snaps_list
- #id snap KB
- 0 - 1024000 (*)
- 0 foo 1024000
-
- The columns, in order, are:
- - blkdev unique id
- - snapshot name, '-' means none (active read/write version)
- - size of device at time of snapshot
- - the (*) indicates this is the active version
-
- 5) Rollback to snapshot.
-
- Usage: <blkdev id> <snapname>
-
- $ echo "0 mysnap" > /sys/class/rbd/snap_rollback
-
-
- 6) Mapping an image using snapshot.
-
- A snapshot mapping is read-only. This is being done by passing
- snap=<snapname> to the options when adding a device.
-
- $ echo "192.168.0.1 name=admin,snap=mysnap rbd foo" > /sys/class/rbd/add
-
-
- 7) Remove an active blkdev<->rbd image mapping.
-
- In this example, we remove the mapping with blkdev unique id 1.
-
- $ echo 1 > /sys/class/rbd/remove
-
-
- NOTE: The actual creation and deletion of rados objects is outside the scope
- of this driver.
+ Documentation/ABI/testing/sysfs-bus-rbd
*/
@@ -163,6 +92,14 @@ struct rbd_request {
u64 len;
};
+struct rbd_snap {
+ struct device dev;
+ const char *name;
+ size_t size;
+ struct list_head node;
+ u64 id;
+};
+
/*
* a single device
*/
@@ -193,21 +130,60 @@ struct rbd_device {
int read_only;
struct list_head node;
+
+ /* list of snapshots */
+ struct list_head snaps;
+
+ /* sysfs related */
+ struct device dev;
+};
+
+static struct bus_type rbd_bus_type = {
+ .name = "rbd",
};
static spinlock_t node_lock; /* protects client get/put */
-static struct class *class_rbd; /* /sys/class/rbd */
static DEFINE_MUTEX(ctl_mutex); /* Serialize open/close/setup/teardown */
static LIST_HEAD(rbd_dev_list); /* devices */
static LIST_HEAD(rbd_client_list); /* clients */
+static int __rbd_init_snaps_header(struct rbd_device *rbd_dev);
+static void rbd_dev_release(struct device *dev);
+static ssize_t rbd_snap_rollback(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t size);
+static ssize_t rbd_snap_add(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t count);
+static void __rbd_remove_snap_dev(struct rbd_device *rbd_dev,
+ struct rbd_snap *snap);;
+
+
+static struct rbd_device *dev_to_rbd(struct device *dev)
+{
+ return container_of(dev, struct rbd_device, dev);
+}
+
+static struct device *rbd_get_dev(struct rbd_device *rbd_dev)
+{
+ return get_device(&rbd_dev->dev);
+}
+
+static void rbd_put_dev(struct rbd_device *rbd_dev)
+{
+ put_device(&rbd_dev->dev);
+}
static int rbd_open(struct block_device *bdev, fmode_t mode)
{
struct gendisk *disk = bdev->bd_disk;
struct rbd_device *rbd_dev = disk->private_data;
+ rbd_get_dev(rbd_dev);
+
set_device_ro(bdev, rbd_dev->read_only);
if ((mode & FMODE_WRITE) && rbd_dev->read_only)
@@ -216,9 +192,19 @@ static int rbd_open(struct block_device *bdev, fmode_t mode)
return 0;
}
+static int rbd_release(struct gendisk *disk, fmode_t mode)
+{
+ struct rbd_device *rbd_dev = disk->private_data;
+
+ rbd_put_dev(rbd_dev);
+
+ return 0;
+}
+
static const struct block_device_operations rbd_bd_ops = {
.owner = THIS_MODULE,
.open = rbd_open,
+ .release = rbd_release,
};
/*
@@ -361,7 +347,6 @@ static int rbd_header_from_disk(struct rbd_image_header *header,
int ret = -ENOMEM;
init_rwsem(&header->snap_rwsem);
-
header->snap_names_len = le64_to_cpu(ondisk->snap_names_len);
header->snapc = kmalloc(sizeof(struct ceph_snap_context) +
snap_count *
@@ -1256,10 +1241,20 @@ bad:
return -ERANGE;
}
+static void __rbd_remove_all_snaps(struct rbd_device *rbd_dev)
+{
+ struct rbd_snap *snap;
+
+ while (!list_empty(&rbd_dev->snaps)) {
+ snap = list_first_entry(&rbd_dev->snaps, struct rbd_snap, node);
+ __rbd_remove_snap_dev(rbd_dev, snap);
+ }
+}
+
/*
* only read the first part of the ondisk header, without the snaps info
*/
-static int rbd_update_snaps(struct rbd_device *rbd_dev)
+static int __rbd_update_snaps(struct rbd_device *rbd_dev)
{
int ret;
struct rbd_image_header h;
@@ -1280,12 +1275,15 @@ static int rbd_update_snaps(struct rbd_device *rbd_dev)
rbd_dev->header.total_snaps = h.total_snaps;
rbd_dev->header.snapc = h.snapc;
rbd_dev->header.snap_names = h.snap_names;
+ rbd_dev->header.snap_names_len = h.snap_names_len;
rbd_dev->header.snap_sizes = h.snap_sizes;
rbd_dev->header.snapc->seq = snap_seq;
+ ret = __rbd_init_snaps_header(rbd_dev);
+
up_write(&rbd_dev->header.snap_rwsem);
- return 0;
+ return ret;
}
static int rbd_init_disk(struct rbd_device *rbd_dev)
@@ -1300,6 +1298,11 @@ static int rbd_init_disk(struct rbd_device *rbd_dev)
if (rc)
return rc;
+ /* no need to lock here, as rbd_dev is not registered yet */
+ rc = __rbd_init_snaps_header(rbd_dev);
+ if (rc)
+ return rc;
+
rc = rbd_header_set_snap(rbd_dev, rbd_dev->snap_name, &total_size);
if (rc)
return rc;
@@ -1343,54 +1346,360 @@ out:
return rc;
}
-/********************************************************************
- * /sys/class/rbd/
- * add map rados objects to blkdev
- * remove unmap rados objects
- * list show mappings
- *******************************************************************/
+/*
+ sysfs
+*/
+
+static ssize_t rbd_size_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct rbd_device *rbd_dev = dev_to_rbd(dev);
+
+ return sprintf(buf, "%llu\n", (unsigned long long)rbd_dev->header.image_size);
+}
+
+static ssize_t rbd_major_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct rbd_device *rbd_dev = dev_to_rbd(dev);
-static void class_rbd_release(struct class *cls)
+ return sprintf(buf, "%d\n", rbd_dev->major);
+}
+
+static ssize_t rbd_client_id_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
- kfree(cls);
+ struct rbd_device *rbd_dev = dev_to_rbd(dev);
+
+ return sprintf(buf, "client%lld\n", ceph_client_id(rbd_dev->client));
}
-static ssize_t class_rbd_list(struct class *c,
- struct class_attribute *attr,
- char *data)
+static ssize_t rbd_pool_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
- int n = 0;
- struct list_head *tmp;
- int max = PAGE_SIZE;
+ struct rbd_device *rbd_dev = dev_to_rbd(dev);
+
+ return sprintf(buf, "%s\n", rbd_dev->pool_name);
+}
+
+static ssize_t rbd_name_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct rbd_device *rbd_dev = dev_to_rbd(dev);
+
+ return sprintf(buf, "%s\n", rbd_dev->obj);
+}
+
+static ssize_t rbd_snap_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct rbd_device *rbd_dev = dev_to_rbd(dev);
+
+ return sprintf(buf, "%s\n", rbd_dev->snap_name);
+}
+
+static ssize_t rbd_image_refresh(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t size)
+{
+ struct rbd_device *rbd_dev = dev_to_rbd(dev);
+ int rc;
+ int ret = size;
mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
- n += snprintf(data, max,
- "#id\tmajor\tclient_name\tpool\tname\tsnap\tKB\n");
+ rc = __rbd_update_snaps(rbd_dev);
+ if (rc < 0)
+ ret = rc;
- list_for_each(tmp, &rbd_dev_list) {
- struct rbd_device *rbd_dev;
+ mutex_unlock(&ctl_mutex);
+ return ret;
+}
- rbd_dev = list_entry(tmp, struct rbd_device, node);
- n += snprintf(data+n, max-n,
- "%d\t%d\tclient%lld\t%s\t%s\t%s\t%lld\n",
- rbd_dev->id,
- rbd_dev->major,
- ceph_client_id(rbd_dev->client),
- rbd_dev->pool_name,
- rbd_dev->obj, rbd_dev->snap_name,
- rbd_dev->header.image_size >> 10);
- if (n == max)
+static DEVICE_ATTR(size, S_IRUGO, rbd_size_show, NULL);
+static DEVICE_ATTR(major, S_IRUGO, rbd_major_show, NULL);
+static DEVICE_ATTR(client_id, S_IRUGO, rbd_client_id_show, NULL);
+static DEVICE_ATTR(pool, S_IRUGO, rbd_pool_show, NULL);
+static DEVICE_ATTR(name, S_IRUGO, rbd_name_show, NULL);
+static DEVICE_ATTR(refresh, S_IWUSR, NULL, rbd_image_refresh);
+static DEVICE_ATTR(current_snap, S_IRUGO, rbd_snap_show, NULL);
+static DEVICE_ATTR(create_snap, S_IWUSR, NULL, rbd_snap_add);
+static DEVICE_ATTR(rollback_snap, S_IWUSR, NULL, rbd_snap_rollback);
+
+static struct attribute *rbd_attrs[] = {
+ &dev_attr_size.attr,
+ &dev_attr_major.attr,
+ &dev_attr_client_id.attr,
+ &dev_attr_pool.attr,
+ &dev_attr_name.attr,
+ &dev_attr_current_snap.attr,
+ &dev_attr_refresh.attr,
+ &dev_attr_create_snap.attr,
+ &dev_attr_rollback_snap.attr,
+ NULL
+};
+
+static struct attribute_group rbd_attr_group = {
+ .attrs = rbd_attrs,
+};
+
+static const struct attribute_group *rbd_attr_groups[] = {
+ &rbd_attr_group,
+ NULL
+};
+
+static void rbd_sysfs_dev_release(struct device *dev)
+{
+}
+
+static struct device_type rbd_device_type = {
+ .name = "rbd",
+ .groups = rbd_attr_groups,
+ .release = rbd_sysfs_dev_release,
+};
+
+
+/*
+ sysfs - snapshots
+*/
+
+static ssize_t rbd_snap_size_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct rbd_snap *snap = container_of(dev, struct rbd_snap, dev);
+
+ return sprintf(buf, "%lld\n", (long long)snap->size);
+}
+
+static ssize_t rbd_snap_id_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct rbd_snap *snap = container_of(dev, struct rbd_snap, dev);
+
+ return sprintf(buf, "%lld\n", (long long)snap->id);
+}
+
+static DEVICE_ATTR(snap_size, S_IRUGO, rbd_snap_size_show, NULL);
+static DEVICE_ATTR(snap_id, S_IRUGO, rbd_snap_id_show, NULL);
+
+static struct attribute *rbd_snap_attrs[] = {
+ &dev_attr_snap_size.attr,
+ &dev_attr_snap_id.attr,
+ NULL,
+};
+
+static struct attribute_group rbd_snap_attr_group = {
+ .attrs = rbd_snap_attrs,
+};
+
+static void rbd_snap_dev_release(struct device *dev)
+{
+ struct rbd_snap *snap = container_of(dev, struct rbd_snap, dev);
+ kfree(snap->name);
+ kfree(snap);
+}
+
+static const struct attribute_group *rbd_snap_attr_groups[] = {
+ &rbd_snap_attr_group,
+ NULL
+};
+
+static struct device_type rbd_snap_device_type = {
+ .groups = rbd_snap_attr_groups,
+ .release = rbd_snap_dev_release,
+};
+
+static void __rbd_remove_snap_dev(struct rbd_device *rbd_dev,
+ struct rbd_snap *snap)
+{
+ list_del(&snap->node);
+ device_unregister(&snap->dev);
+}
+
+static int rbd_register_snap_dev(struct rbd_device *rbd_dev,
+ struct rbd_snap *snap,
+ struct device *parent)
+{
+ struct device *dev = &snap->dev;
+ int ret;
+
+ dev->type = &rbd_snap_device_type;
+ dev->parent = parent;
+ dev->release = rbd_snap_dev_release;
+ dev_set_name(dev, "snap_%s", snap->name);
+ ret = device_register(dev);
+
+ return ret;
+}
+
+static int __rbd_add_snap_dev(struct rbd_device *rbd_dev,
+ int i, const char *name,
+ struct rbd_snap **snapp)
+{
+ int ret;
+ struct rbd_snap *snap = kzalloc(sizeof(*snap), GFP_KERNEL);
+ if (!snap)
+ return -ENOMEM;
+ snap->name = kstrdup(name, GFP_KERNEL);
+ snap->size = rbd_dev->header.snap_sizes[i];
+ snap->id = rbd_dev->header.snapc->snaps[i];
+ if (device_is_registered(&rbd_dev->dev)) {
+ ret = rbd_register_snap_dev(rbd_dev, snap,
+ &rbd_dev->dev);
+ if (ret < 0)
+ goto err;
+ }
+ *snapp = snap;
+ return 0;
+err:
+ kfree(snap->name);
+ kfree(snap);
+ return ret;
+}
+
+/*
+ * search for the previous snap in a null delimited string list
+ */
+const char *rbd_prev_snap_name(const char *name, const char *start)
+{
+ if (name < start + 2)
+ return NULL;
+
+ name -= 2;
+ while (*name) {
+ if (name == start)
+ return start;
+ name--;
+ }
+ return name + 1;
+}
+
+/*
+ * compare the old list of snapshots that we have to what's in the header
+ * and update it accordingly. Note that the header holds the snapshots
+ * in a reverse order (from newest to oldest) and we need to go from
+ * older to new so that we don't get a duplicate snap name when
+ * doing the process (e.g., removed snapshot and recreated a new
+ * one with the same name.
+ */
+static int __rbd_init_snaps_header(struct rbd_device *rbd_dev)
+{
+ const char *name, *first_name;
+ int i = rbd_dev->header.total_snaps;
+ struct rbd_snap *snap, *old_snap = NULL;
+ int ret;
+ struct list_head *p, *n;
+
+ first_name = rbd_dev->header.snap_names;
+ name = first_name + rbd_dev->header.snap_names_len;
+
+ list_for_each_prev_safe(p, n, &rbd_dev->snaps) {
+ u64 cur_id;
+
+ old_snap = list_entry(p, struct rbd_snap, node);
+
+ if (i)
+ cur_id = rbd_dev->header.snapc->snaps[i - 1];
+
+ if (!i || old_snap->id < cur_id) {
+ /* old_snap->id was skipped, thus was removed */
+ __rbd_remove_snap_dev(rbd_dev, old_snap);
+ continue;
+ }
+ if (old_snap->id == cur_id) {
+ /* we have this snapshot already */
+ i--;
+ name = rbd_prev_snap_name(name, first_name);
+ continue;
+ }
+ for (; i > 0;
+ i--, name = rbd_prev_snap_name(name, first_name)) {
+ if (!name) {
+ WARN_ON(1);
+ return -EINVAL;
+ }
+ cur_id = rbd_dev->header.snapc->snaps[i];
+ /* snapshot removal? handle it above */
+ if (cur_id >= old_snap->id)
+ break;
+ /* a new snapshot */
+ ret = __rbd_add_snap_dev(rbd_dev, i - 1, name, &snap);
+ if (ret < 0)
+ return ret;
+
+ /* note that we add it backward so using n and not p */
+ list_add(&snap->node, n);
+ p = &snap->node;
+ }
+ }
+ /* we're done going over the old snap list, just add what's left */
+ for (; i > 0; i--) {
+ name = rbd_prev_snap_name(name, first_name);
+ if (!name) {
+ WARN_ON(1);
+ return -EINVAL;
+ }
+ ret = __rbd_add_snap_dev(rbd_dev, i - 1, name, &snap);
+ if (ret < 0)
+ return ret;
+ list_add(&snap->node, &rbd_dev->snaps);
+ }
+
+ return 0;
+}
+
+
+static void rbd_root_dev_release(struct device *dev)
+{
+}
+
+static struct device rbd_root_dev = {
+ .init_name = "rbd",
+ .release = rbd_root_dev_release,
+};
+
+static int rbd_bus_add_dev(struct rbd_device *rbd_dev)
+{
+ int ret = -ENOMEM;
+ struct device *dev;
+ struct rbd_snap *snap;
+
+ mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
+ dev = &rbd_dev->dev;
+
+ dev->bus = &rbd_bus_type;
+ dev->type = &rbd_device_type;
+ dev->parent = &rbd_root_dev;
+ dev->release = rbd_dev_release;
+ dev_set_name(dev, "%d", rbd_dev->id);
+ ret = device_register(dev);
+ if (ret < 0)
+ goto done_free;
+
+ list_for_each_entry(snap, &rbd_dev->snaps, node) {
+ ret = rbd_register_snap_dev(rbd_dev, snap,
+ &rbd_dev->dev);
+ if (ret < 0)
break;
}
mutex_unlock(&ctl_mutex);
- return n;
+ return 0;
+done_free:
+ mutex_unlock(&ctl_mutex);
+ return ret;
}
-static ssize_t class_rbd_add(struct class *c,
- struct class_attribute *attr,
- const char *buf, size_t count)
+static void rbd_bus_del_dev(struct rbd_device *rbd_dev)
+{
+ device_unregister(&rbd_dev->dev);
+}
+
+static ssize_t rbd_add(struct bus_type *bus, const char *buf, size_t count)
{
struct ceph_osd_client *osdc;
struct rbd_device *rbd_dev;
@@ -1419,6 +1728,7 @@ static ssize_t class_rbd_add(struct class *c,
/* static rbd_device initialization */
spin_lock_init(&rbd_dev->lock);
INIT_LIST_HEAD(&rbd_dev->node);
+ INIT_LIST_HEAD(&rbd_dev->snaps);
/* generate unique id: find highest unique id, add one */
mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
@@ -1478,6 +1788,9 @@ static ssize_t class_rbd_add(struct class *c,
}
rbd_dev->major = irc;
+ rc = rbd_bus_add_dev(rbd_dev);
+ if (rc)
+ goto err_out_disk;
/* set up and announce blkdev mapping */
rc = rbd_init_disk(rbd_dev);
if (rc)
@@ -1487,6 +1800,8 @@ static ssize_t class_rbd_add(struct class *c,
err_out_blkdev:
unregister_blkdev(rbd_dev->major, rbd_dev->name);
+err_out_disk:
+ rbd_free_disk(rbd_dev);
err_out_client:
rbd_put_client(rbd_dev);
mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
@@ -1518,35 +1833,10 @@ static struct rbd_device *__rbd_get_dev(unsigned long id)
return NULL;
}
-static ssize_t class_rbd_remove(struct class *c,
- struct class_attribute *attr,
- const char *buf,
- size_t count)
+static void rbd_dev_release(struct device *dev)
{
- struct rbd_device *rbd_dev = NULL;
- int target_id, rc;
- unsigned long ul;
-
- rc = strict_strtoul(buf, 10, &ul);
- if (rc)
- return rc;
-
- /* convert to int; abort if we lost anything in the conversion */
- target_id = (int) ul;
- if (target_id != ul)
- return -EINVAL;
-
- /* remove object from list immediately */
- mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
-
- rbd_dev = __rbd_get_dev(target_id);
- if (rbd_dev)
- list_del_init(&rbd_dev->node);
-
- mutex_unlock(&ctl_mutex);
-
- if (!rbd_dev)
- return -ENOENT;
+ struct rbd_device *rbd_dev =
+ container_of(dev, struct rbd_device, dev);
rbd_put_client(rbd_dev);
@@ -1557,67 +1847,11 @@ static ssize_t class_rbd_remove(struct class *c,
/* release module ref */
module_put(THIS_MODULE);
-
- return count;
}
-static ssize_t class_rbd_snaps_list(struct class *c,
- struct class_attribute *attr,
- char *data)
-{
- struct rbd_device *rbd_dev = NULL;
- struct list_head *tmp;
- struct rbd_image_header *header;
- int i, n = 0, max = PAGE_SIZE;
- int ret;
-
- mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
-
- n += snprintf(data, max, "#id\tsnap\tKB\n");
-
- list_for_each(tmp, &rbd_dev_list) {
- char *names, *p;
- struct ceph_snap_context *snapc;
-
- rbd_dev = list_entry(tmp, struct rbd_device, node);
- header = &rbd_dev->header;
-
- down_read(&header->snap_rwsem);
-
- names = header->snap_names;
- snapc = header->snapc;
-
- n += snprintf(data + n, max - n, "%d\t%s\t%lld%s\n",
- rbd_dev->id, RBD_SNAP_HEAD_NAME,
- header->image_size >> 10,
- (!rbd_dev->cur_snap ? " (*)" : ""));
- if (n == max)
- break;
-
- p = names;
- for (i = 0; i < header->total_snaps; i++, p += strlen(p) + 1) {
- n += snprintf(data + n, max - n, "%d\t%s\t%lld%s\n",
- rbd_dev->id, p, header->snap_sizes[i] >> 10,
- (rbd_dev->cur_snap &&
- (snap_index(header, i) == rbd_dev->cur_snap) ?
- " (*)" : ""));
- if (n == max)
- break;
- }
-
- up_read(&header->snap_rwsem);
- }
-
-
- ret = n;
- mutex_unlock(&ctl_mutex);
- return ret;
-}
-
-static ssize_t class_rbd_snaps_refresh(struct class *c,
- struct class_attribute *attr,
- const char *buf,
- size_t count)
+static ssize_t rbd_remove(struct bus_type *bus,
+ const char *buf,
+ size_t count)
{
struct rbd_device *rbd_dev = NULL;
int target_id, rc;
@@ -1641,95 +1875,70 @@ static ssize_t class_rbd_snaps_refresh(struct class *c,
goto done;
}
- rc = rbd_update_snaps(rbd_dev);
- if (rc < 0)
- ret = rc;
+ list_del_init(&rbd_dev->node);
+
+ __rbd_remove_all_snaps(rbd_dev);
+ rbd_bus_del_dev(rbd_dev);
done:
mutex_unlock(&ctl_mutex);
return ret;
}
-static ssize_t class_rbd_snap_create(struct class *c,
- struct class_attribute *attr,
- const char *buf,
- size_t count)
+static ssize_t rbd_snap_add(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t count)
{
- struct rbd_device *rbd_dev = NULL;
- int target_id, ret;
- char *name;
-
- name = kmalloc(RBD_MAX_SNAP_NAME_LEN + 1, GFP_KERNEL);
+ struct rbd_device *rbd_dev = dev_to_rbd(dev);
+ int ret;
+ char *name = kmalloc(count + 1, GFP_KERNEL);
if (!name)
return -ENOMEM;
- /* parse snaps add command */
- if (sscanf(buf, "%d "
- "%" __stringify(RBD_MAX_SNAP_NAME_LEN) "s",
- &target_id,
- name) != 2) {
- ret = -EINVAL;
- goto done;
- }
+ snprintf(name, count, "%s", buf);
mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
- rbd_dev = __rbd_get_dev(target_id);
- if (!rbd_dev) {
- ret = -ENOENT;
- goto done_unlock;
- }
-
ret = rbd_header_add_snap(rbd_dev,
name, GFP_KERNEL);
if (ret < 0)
goto done_unlock;
- ret = rbd_update_snaps(rbd_dev);
+ ret = __rbd_update_snaps(rbd_dev);
if (ret < 0)
goto done_unlock;
ret = count;
done_unlock:
mutex_unlock(&ctl_mutex);
-done:
kfree(name);
return ret;
}
-static ssize_t class_rbd_rollback(struct class *c,
- struct class_attribute *attr,
- const char *buf,
- size_t count)
+static ssize_t rbd_snap_rollback(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t count)
{
- struct rbd_device *rbd_dev = NULL;
- int target_id, ret;
+ struct rbd_device *rbd_dev = dev_to_rbd(dev);
+ int ret;
u64 snapid;
- char snap_name[RBD_MAX_SNAP_NAME_LEN];
u64 cur_ofs;
- char *seg_name;
+ char *seg_name = NULL;
+ char *snap_name = kmalloc(count + 1, GFP_KERNEL);
+ ret = -ENOMEM;
+ if (!snap_name)
+ return ret;
/* parse snaps add command */
- if (sscanf(buf, "%d "
- "%" __stringify(RBD_MAX_SNAP_NAME_LEN) "s",
- &target_id,
- snap_name) != 2) {
- return -EINVAL;
- }
-
- ret = -ENOMEM;
+ snprintf(snap_name, count, "%s", buf);
seg_name = kmalloc(RBD_MAX_SEG_NAME_LEN + 1, GFP_NOIO);
if (!seg_name)
- return ret;
+ goto done;
mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
- rbd_dev = __rbd_get_dev(target_id);
- if (!rbd_dev) {
- ret = -ENOENT;
- goto done_unlock;
- }
-
ret = snap_by_name(&rbd_dev->header, snap_name, &snapid, NULL);
if (ret < 0)
goto done_unlock;
@@ -1750,7 +1959,7 @@ static ssize_t class_rbd_rollback(struct class *c,
seg_name, ret);
}
- ret = rbd_update_snaps(rbd_dev);
+ ret = __rbd_update_snaps(rbd_dev);
if (ret < 0)
goto done_unlock;
@@ -1758,57 +1967,42 @@ static ssize_t class_rbd_rollback(struct class *c,
done_unlock:
mutex_unlock(&ctl_mutex);
+done:
kfree(seg_name);
+ kfree(snap_name);
return ret;
}
-static struct class_attribute class_rbd_attrs[] = {
- __ATTR(add, 0200, NULL, class_rbd_add),
- __ATTR(remove, 0200, NULL, class_rbd_remove),
- __ATTR(list, 0444, class_rbd_list, NULL),
- __ATTR(snaps_refresh, 0200, NULL, class_rbd_snaps_refresh),
- __ATTR(snap_create, 0200, NULL, class_rbd_snap_create),
- __ATTR(snaps_list, 0444, class_rbd_snaps_list, NULL),
- __ATTR(snap_rollback, 0200, NULL, class_rbd_rollback),
+static struct bus_attribute rbd_bus_attrs[] = {
+ __ATTR(add, S_IWUSR, NULL, rbd_add),
+ __ATTR(remove, S_IWUSR, NULL, rbd_remove),
__ATTR_NULL
};
/*
* create control files in sysfs
- * /sys/class/rbd/...
+ * /sys/bus/rbd/...
*/
static int rbd_sysfs_init(void)
{
- int ret = -ENOMEM;
+ int ret;
- class_rbd = kzalloc(sizeof(*class_rbd), GFP_KERNEL);
- if (!class_rbd)
- goto out;
+ rbd_bus_type.bus_attrs = rbd_bus_attrs;
- class_rbd->name = DRV_NAME;
- class_rbd->owner = THIS_MODULE;
- class_rbd->class_release = class_rbd_release;
- class_rbd->class_attrs = class_rbd_attrs;
+ ret = bus_register(&rbd_bus_type);
+ if (ret < 0)
+ return ret;
- ret = class_register(class_rbd);
- if (ret)
- goto out_class;
- return 0;
+ ret = device_register(&rbd_root_dev);
-out_class:
- kfree(class_rbd);
- class_rbd = NULL;
- pr_err(DRV_NAME ": failed to create class rbd\n");
-out:
return ret;
}
static void rbd_sysfs_cleanup(void)
{
- if (class_rbd)
- class_destroy(class_rbd);
- class_rbd = NULL;
+ device_unregister(&rbd_root_dev);
+ bus_unregister(&rbd_bus_type);
}
int __init rbd_init(void)
diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c
index 4b33a18c32e0..4f9e22f29138 100644
--- a/drivers/block/xen-blkfront.c
+++ b/drivers/block/xen-blkfront.c
@@ -65,7 +65,7 @@ enum blkif_state {
struct blk_shadow {
struct blkif_request req;
- unsigned long request;
+ struct request *request;
unsigned long frame[BLKIF_MAX_SEGMENTS_PER_REQUEST];
};
@@ -136,7 +136,7 @@ static void add_id_to_freelist(struct blkfront_info *info,
unsigned long id)
{
info->shadow[id].req.id = info->shadow_free;
- info->shadow[id].request = 0;
+ info->shadow[id].request = NULL;
info->shadow_free = id;
}
@@ -245,14 +245,11 @@ static int blkif_ioctl(struct block_device *bdev, fmode_t mode,
}
/*
- * blkif_queue_request
+ * Generate a Xen blkfront IO request from a blk layer request. Reads
+ * and writes are handled as expected. Since we lack a loose flush
+ * request, we map flushes into a full ordered barrier.
*
- * request block io
- *
- * id: for guest use only.
- * operation: BLKIF_OP_{READ,WRITE,PROBE}
- * buffer: buffer to read/write into. this should be a
- * virtual address in the guest os.
+ * @req: a request struct
*/
static int blkif_queue_request(struct request *req)
{
@@ -281,7 +278,7 @@ static int blkif_queue_request(struct request *req)
/* Fill out a communications ring structure. */
ring_req = RING_GET_REQUEST(&info->ring, info->ring.req_prod_pvt);
id = get_id_from_freelist(info);
- info->shadow[id].request = (unsigned long)req;
+ info->shadow[id].request = req;
ring_req->id = id;
ring_req->sector_number = (blkif_sector_t)blk_rq_pos(req);
@@ -289,8 +286,18 @@ static int blkif_queue_request(struct request *req)
ring_req->operation = rq_data_dir(req) ?
BLKIF_OP_WRITE : BLKIF_OP_READ;
- if (req->cmd_flags & REQ_HARDBARRIER)
+
+ if (req->cmd_flags & (REQ_FLUSH | REQ_FUA)) {
+ /*
+ * Ideally we could just do an unordered
+ * flush-to-disk, but all we have is a full write
+ * barrier at the moment. However, a barrier write is
+ * a superset of FUA, so we can implement it the same
+ * way. (It's also a FLUSH+FUA, since it is
+ * guaranteed ordered WRT previous writes.)
+ */
ring_req->operation = BLKIF_OP_WRITE_BARRIER;
+ }
ring_req->nr_segments = blk_rq_map_sg(req->q, req, info->sg);
BUG_ON(ring_req->nr_segments > BLKIF_MAX_SEGMENTS_PER_REQUEST);
@@ -636,7 +643,7 @@ static irqreturn_t blkif_interrupt(int irq, void *dev_id)
bret = RING_GET_RESPONSE(&info->ring, i);
id = bret->id;
- req = (struct request *)info->shadow[id].request;
+ req = info->shadow[id].request;
blkif_completion(&info->shadow[id]);
@@ -649,6 +656,16 @@ static irqreturn_t blkif_interrupt(int irq, void *dev_id)
printk(KERN_WARNING "blkfront: %s: write barrier op failed\n",
info->gd->disk_name);
error = -EOPNOTSUPP;
+ }
+ if (unlikely(bret->status == BLKIF_RSP_ERROR &&
+ info->shadow[id].req.nr_segments == 0)) {
+ printk(KERN_WARNING "blkfront: %s: empty write barrier op failed\n",
+ info->gd->disk_name);
+ error = -EOPNOTSUPP;
+ }
+ if (unlikely(error)) {
+ if (error == -EOPNOTSUPP)
+ error = 0;
info->feature_flush = 0;
xlvbd_flush(info);
}
@@ -901,7 +918,7 @@ static int blkif_recover(struct blkfront_info *info)
/* Stage 3: Find pending requests and requeue them. */
for (i = 0; i < BLK_RING_SIZE; i++) {
/* Not in use? */
- if (copy[i].request == 0)
+ if (!copy[i].request)
continue;
/* Grab a request slot and copy shadow state into it. */
@@ -918,9 +935,7 @@ static int blkif_recover(struct blkfront_info *info)
req->seg[j].gref,
info->xbdev->otherend_id,
pfn_to_mfn(info->shadow[req->id].frame[j]),
- rq_data_dir(
- (struct request *)
- info->shadow[req->id].request));
+ rq_data_dir(info->shadow[req->id].request));
info->shadow[req->id].req = *req;
info->ring.req_prod_pvt++;
@@ -1069,14 +1084,8 @@ static void blkfront_connect(struct blkfront_info *info)
*/
info->feature_flush = 0;
- /*
- * The driver doesn't properly handled empty flushes, so
- * lets disable barrier support for now.
- */
-#if 0
if (!err && barrier)
- info->feature_flush = REQ_FLUSH;
-#endif
+ info->feature_flush = REQ_FLUSH | REQ_FUA;
err = xlvbd_alloc_gendisk(sectors, info, binfo, sector_size);
if (err) {
@@ -1112,6 +1121,8 @@ static void blkback_changed(struct xenbus_device *dev,
case XenbusStateInitialising:
case XenbusStateInitWait:
case XenbusStateInitialised:
+ case XenbusStateReconfiguring:
+ case XenbusStateReconfigured:
case XenbusStateUnknown:
case XenbusStateClosed:
break;
diff --git a/drivers/block/xsysace.c b/drivers/block/xsysace.c
index 6e968cd4893c..829161edae53 100644
--- a/drivers/block/xsysace.c
+++ b/drivers/block/xsysace.c
@@ -1225,7 +1225,8 @@ ace_of_probe(struct platform_device *op, const struct of_device_id *match)
bus_width = ACE_BUS_WIDTH_8;
/* Call the bus-independant setup code */
- return ace_alloc(&op->dev, id ? *id : 0, physaddr, irq, bus_width);
+ return ace_alloc(&op->dev, id ? be32_to_cpup(id) : 0,
+ physaddr, irq, bus_width);
}
static int __devexit ace_of_remove(struct platform_device *op)
diff --git a/drivers/block/z2ram.c b/drivers/block/z2ram.c
index dcd4cfcf4126..a22e3f895947 100644
--- a/drivers/block/z2ram.c
+++ b/drivers/block/z2ram.c
@@ -80,8 +80,10 @@ static void do_z2_request(struct request_queue *q)
int err = 0;
if (start + len > z2ram_size) {
- printk( KERN_ERR DEVICE_NAME ": bad access: block=%lu, count=%u\n",
- blk_rq_pos(req), blk_rq_cur_sectors(req));
+ pr_err(DEVICE_NAME ": bad access: block=%llu, "
+ "count=%u\n",
+ (unsigned long long)blk_rq_pos(req),
+ blk_rq_cur_sectors(req));
err = -EIO;
goto done;
}
diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
index d120a5c1c093..ab3894f742c3 100644
--- a/drivers/bluetooth/btusb.c
+++ b/drivers/bluetooth/btusb.c
@@ -68,6 +68,9 @@ static struct usb_device_id btusb_table[] = {
/* Apple MacBookPro6,2 */
{ USB_DEVICE(0x05ac, 0x8218) },
+ /* Apple MacBookAir3,1, MacBookAir3,2 */
+ { USB_DEVICE(0x05ac, 0x821b) },
+
/* AVM BlueFRITZ! USB v2.0 */
{ USB_DEVICE(0x057c, 0x3800) },
@@ -1029,6 +1032,8 @@ static int btusb_probe(struct usb_interface *intf,
usb_set_intfdata(intf, data);
+ usb_enable_autosuspend(interface_to_usbdev(intf));
+
return 0;
}
diff --git a/drivers/cdrom/gdrom.c b/drivers/cdrom/gdrom.c
index 3af6516919b7..de65915308fb 100644
--- a/drivers/cdrom/gdrom.c
+++ b/drivers/cdrom/gdrom.c
@@ -142,18 +142,18 @@ static int gdrom_hardreset(struct cdrom_device_info *cd_info);
static bool gdrom_is_busy(void)
{
- return (ctrl_inb(GDROM_ALTSTATUS_REG) & 0x80) != 0;
+ return (__raw_readb(GDROM_ALTSTATUS_REG) & 0x80) != 0;
}
static bool gdrom_data_request(void)
{
- return (ctrl_inb(GDROM_ALTSTATUS_REG) & 0x88) == 8;
+ return (__raw_readb(GDROM_ALTSTATUS_REG) & 0x88) == 8;
}
static bool gdrom_wait_clrbusy(void)
{
unsigned long timeout = jiffies + GDROM_DEFAULT_TIMEOUT;
- while ((ctrl_inb(GDROM_ALTSTATUS_REG) & 0x80) &&
+ while ((__raw_readb(GDROM_ALTSTATUS_REG) & 0x80) &&
(time_before(jiffies, timeout)))
cpu_relax();
return time_before(jiffies, timeout + 1);
@@ -181,14 +181,14 @@ static void gdrom_identifydevice(void *buf)
gdrom_getsense(NULL);
return;
}
- ctrl_outb(GDROM_COM_IDDEV, GDROM_STATUSCOMMAND_REG);
+ __raw_writeb(GDROM_COM_IDDEV, GDROM_STATUSCOMMAND_REG);
if (!gdrom_wait_busy_sleeps()) {
gdrom_getsense(NULL);
return;
}
/* now read in the data */
for (c = 0; c < 40; c++)
- data[c] = ctrl_inw(GDROM_DATA_REG);
+ data[c] = __raw_readw(GDROM_DATA_REG);
}
static void gdrom_spicommand(void *spi_string, int buflen)
@@ -197,21 +197,21 @@ static void gdrom_spicommand(void *spi_string, int buflen)
unsigned long timeout;
/* ensure IRQ_WAIT is set */
- ctrl_outb(0x08, GDROM_ALTSTATUS_REG);
+ __raw_writeb(0x08, GDROM_ALTSTATUS_REG);
/* specify how many bytes we expect back */
- ctrl_outb(buflen & 0xFF, GDROM_BCL_REG);
- ctrl_outb((buflen >> 8) & 0xFF, GDROM_BCH_REG);
+ __raw_writeb(buflen & 0xFF, GDROM_BCL_REG);
+ __raw_writeb((buflen >> 8) & 0xFF, GDROM_BCH_REG);
/* other parameters */
- ctrl_outb(0, GDROM_INTSEC_REG);
- ctrl_outb(0, GDROM_SECNUM_REG);
- ctrl_outb(0, GDROM_ERROR_REG);
+ __raw_writeb(0, GDROM_INTSEC_REG);
+ __raw_writeb(0, GDROM_SECNUM_REG);
+ __raw_writeb(0, GDROM_ERROR_REG);
/* Wait until we can go */
if (!gdrom_wait_clrbusy()) {
gdrom_getsense(NULL);
return;
}
timeout = jiffies + GDROM_DEFAULT_TIMEOUT;
- ctrl_outb(GDROM_COM_PACKET, GDROM_STATUSCOMMAND_REG);
+ __raw_writeb(GDROM_COM_PACKET, GDROM_STATUSCOMMAND_REG);
while (!gdrom_data_request() && time_before(jiffies, timeout))
cpu_relax();
if (!time_before(jiffies, timeout + 1)) {
@@ -233,10 +233,10 @@ static char gdrom_execute_diagnostic(void)
gdrom_hardreset(gd.cd_info);
if (!gdrom_wait_clrbusy())
return 0;
- ctrl_outb(GDROM_COM_EXECDIAG, GDROM_STATUSCOMMAND_REG);
+ __raw_writeb(GDROM_COM_EXECDIAG, GDROM_STATUSCOMMAND_REG);
if (!gdrom_wait_busy_sleeps())
return 0;
- return ctrl_inb(GDROM_ERROR_REG);
+ return __raw_readb(GDROM_ERROR_REG);
}
/*
@@ -385,7 +385,7 @@ static void gdrom_release(struct cdrom_device_info *cd_info)
static int gdrom_drivestatus(struct cdrom_device_info *cd_info, int ignore)
{
/* read the sense key */
- char sense = ctrl_inb(GDROM_ERROR_REG);
+ char sense = __raw_readb(GDROM_ERROR_REG);
sense &= 0xF0;
if (sense == 0)
return CDS_DISC_OK;
@@ -398,16 +398,16 @@ static int gdrom_drivestatus(struct cdrom_device_info *cd_info, int ignore)
static int gdrom_mediachanged(struct cdrom_device_info *cd_info, int ignore)
{
/* check the sense key */
- return (ctrl_inb(GDROM_ERROR_REG) & 0xF0) == 0x60;
+ return (__raw_readb(GDROM_ERROR_REG) & 0xF0) == 0x60;
}
/* reset the G1 bus */
static int gdrom_hardreset(struct cdrom_device_info *cd_info)
{
int count;
- ctrl_outl(0x1fffff, GDROM_RESET_REG);
+ __raw_writel(0x1fffff, GDROM_RESET_REG);
for (count = 0xa0000000; count < 0xa0200000; count += 4)
- ctrl_inl(count);
+ __raw_readl(count);
return 0;
}
@@ -536,7 +536,7 @@ static const struct block_device_operations gdrom_bdops = {
static irqreturn_t gdrom_command_interrupt(int irq, void *dev_id)
{
- gd.status = ctrl_inb(GDROM_STATUSCOMMAND_REG);
+ gd.status = __raw_readb(GDROM_STATUSCOMMAND_REG);
if (gd.pending != 1)
return IRQ_HANDLED;
gd.pending = 0;
@@ -546,7 +546,7 @@ static irqreturn_t gdrom_command_interrupt(int irq, void *dev_id)
static irqreturn_t gdrom_dma_interrupt(int irq, void *dev_id)
{
- gd.status = ctrl_inb(GDROM_STATUSCOMMAND_REG);
+ gd.status = __raw_readb(GDROM_STATUSCOMMAND_REG);
if (gd.transfer != 1)
return IRQ_HANDLED;
gd.transfer = 0;
@@ -600,10 +600,10 @@ static void gdrom_readdisk_dma(struct work_struct *work)
spin_unlock(&gdrom_lock);
block = blk_rq_pos(req)/GD_TO_BLK + GD_SESSION_OFFSET;
block_cnt = blk_rq_sectors(req)/GD_TO_BLK;
- ctrl_outl(virt_to_phys(req->buffer), GDROM_DMA_STARTADDR_REG);
- ctrl_outl(block_cnt * GDROM_HARD_SECTOR, GDROM_DMA_LENGTH_REG);
- ctrl_outl(1, GDROM_DMA_DIRECTION_REG);
- ctrl_outl(1, GDROM_DMA_ENABLE_REG);
+ __raw_writel(virt_to_phys(req->buffer), GDROM_DMA_STARTADDR_REG);
+ __raw_writel(block_cnt * GDROM_HARD_SECTOR, GDROM_DMA_LENGTH_REG);
+ __raw_writel(1, GDROM_DMA_DIRECTION_REG);
+ __raw_writel(1, GDROM_DMA_ENABLE_REG);
read_command->cmd[2] = (block >> 16) & 0xFF;
read_command->cmd[3] = (block >> 8) & 0xFF;
read_command->cmd[4] = block & 0xFF;
@@ -611,18 +611,18 @@ static void gdrom_readdisk_dma(struct work_struct *work)
read_command->cmd[9] = (block_cnt >> 8) & 0xFF;
read_command->cmd[10] = block_cnt & 0xFF;
/* set for DMA */
- ctrl_outb(1, GDROM_ERROR_REG);
+ __raw_writeb(1, GDROM_ERROR_REG);
/* other registers */
- ctrl_outb(0, GDROM_SECNUM_REG);
- ctrl_outb(0, GDROM_BCL_REG);
- ctrl_outb(0, GDROM_BCH_REG);
- ctrl_outb(0, GDROM_DSEL_REG);
- ctrl_outb(0, GDROM_INTSEC_REG);
+ __raw_writeb(0, GDROM_SECNUM_REG);
+ __raw_writeb(0, GDROM_BCL_REG);
+ __raw_writeb(0, GDROM_BCH_REG);
+ __raw_writeb(0, GDROM_DSEL_REG);
+ __raw_writeb(0, GDROM_INTSEC_REG);
/* Wait for registers to reset after any previous activity */
timeout = jiffies + HZ / 2;
while (gdrom_is_busy() && time_before(jiffies, timeout))
cpu_relax();
- ctrl_outb(GDROM_COM_PACKET, GDROM_STATUSCOMMAND_REG);
+ __raw_writeb(GDROM_COM_PACKET, GDROM_STATUSCOMMAND_REG);
timeout = jiffies + HZ / 2;
/* Wait for packet command to finish */
while (gdrom_is_busy() && time_before(jiffies, timeout))
@@ -632,11 +632,11 @@ static void gdrom_readdisk_dma(struct work_struct *work)
outsw(GDROM_DATA_REG, &read_command->cmd, 6);
timeout = jiffies + HZ / 2;
/* Wait for any pending DMA to finish */
- while (ctrl_inb(GDROM_DMA_STATUS_REG) &&
+ while (__raw_readb(GDROM_DMA_STATUS_REG) &&
time_before(jiffies, timeout))
cpu_relax();
/* start transfer */
- ctrl_outb(1, GDROM_DMA_STATUS_REG);
+ __raw_writeb(1, GDROM_DMA_STATUS_REG);
wait_event_interruptible_timeout(request_queue,
gd.transfer == 0, GDROM_DEFAULT_TIMEOUT);
err = gd.transfer ? -EIO : 0;
@@ -714,11 +714,11 @@ free_id:
/* set the default mode for DMA transfer */
static int __devinit gdrom_init_dma_mode(void)
{
- ctrl_outb(0x13, GDROM_ERROR_REG);
- ctrl_outb(0x22, GDROM_INTSEC_REG);
+ __raw_writeb(0x13, GDROM_ERROR_REG);
+ __raw_writeb(0x22, GDROM_INTSEC_REG);
if (!gdrom_wait_clrbusy())
return -EBUSY;
- ctrl_outb(0xEF, GDROM_STATUSCOMMAND_REG);
+ __raw_writeb(0xEF, GDROM_STATUSCOMMAND_REG);
if (!gdrom_wait_busy_sleeps())
return -EBUSY;
/* Memory protection setting for GDROM DMA
@@ -728,8 +728,8 @@ static int __devinit gdrom_init_dma_mode(void)
* Bits 6 - 0 end of transfer range in 1 MB blocks OR'ed with 0x80
* (0x40 | 0x80) = start range at 0x0C000000
* (0x7F | 0x80) = end range at 0x0FFFFFFF */
- ctrl_outl(0x8843407F, GDROM_DMA_ACCESS_CTRL_REG);
- ctrl_outl(9, GDROM_DMA_WAIT_REG); /* DMA word setting */
+ __raw_writel(0x8843407F, GDROM_DMA_ACCESS_CTRL_REG);
+ __raw_writel(9, GDROM_DMA_WAIT_REG); /* DMA word setting */
return 0;
}
diff --git a/drivers/char/Makefile b/drivers/char/Makefile
index 3a9c01416839..ba53ec956c95 100644
--- a/drivers/char/Makefile
+++ b/drivers/char/Makefile
@@ -2,24 +2,10 @@
# Makefile for the kernel character device drivers.
#
-#
-# This file contains the font map for the default (hardware) font
-#
-FONTMAPFILE = cp437.uni
-
-obj-y += mem.o random.o tty_io.o n_tty.o tty_ioctl.o tty_ldisc.o tty_buffer.o tty_port.o
-
-obj-y += tty_mutex.o
-obj-$(CONFIG_LEGACY_PTYS) += pty.o
-obj-$(CONFIG_UNIX98_PTYS) += pty.o
+obj-y += mem.o random.o
obj-$(CONFIG_TTY_PRINTK) += ttyprintk.o
obj-y += misc.o
-obj-$(CONFIG_VT) += vt_ioctl.o vc_screen.o selection.o keyboard.o
obj-$(CONFIG_BFIN_JTAG_COMM) += bfin_jtag_comm.o
-obj-$(CONFIG_CONSOLE_TRANSLATIONS) += consolemap.o consolemap_deftbl.o
-obj-$(CONFIG_HW_CONSOLE) += vt.o defkeymap.o
-obj-$(CONFIG_AUDIT) += tty_audit.o
-obj-$(CONFIG_MAGIC_SYSRQ) += sysrq.o
obj-$(CONFIG_MVME147_SCC) += generic_serial.o vme_scc.o
obj-$(CONFIG_MVME162_SCC) += generic_serial.o vme_scc.o
obj-$(CONFIG_BVME6000_SCC) += generic_serial.o vme_scc.o
@@ -41,8 +27,6 @@ obj-$(CONFIG_ISI) += isicom.o
obj-$(CONFIG_SYNCLINK) += synclink.o
obj-$(CONFIG_SYNCLINKMP) += synclinkmp.o
obj-$(CONFIG_SYNCLINK_GT) += synclink_gt.o
-obj-$(CONFIG_N_HDLC) += n_hdlc.o
-obj-$(CONFIG_N_GSM) += n_gsm.o
obj-$(CONFIG_AMIGA_BUILTIN_SERIAL) += amiserial.o
obj-$(CONFIG_SX) += sx.o generic_serial.o
obj-$(CONFIG_RIO) += rio/ generic_serial.o
@@ -74,7 +58,6 @@ obj-$(CONFIG_PRINTER) += lp.o
obj-$(CONFIG_APM_EMULATION) += apm-emulation.o
obj-$(CONFIG_DTLK) += dtlk.o
-obj-$(CONFIG_R3964) += n_r3964.o
obj-$(CONFIG_APPLICOM) += applicom.o
obj-$(CONFIG_SONYPI) += sonypi.o
obj-$(CONFIG_RTC) += rtc.o
@@ -115,28 +98,3 @@ obj-$(CONFIG_RAMOOPS) += ramoops.o
obj-$(CONFIG_JS_RTC) += js-rtc.o
js-rtc-y = rtc.o
-
-# Files generated that shall be removed upon make clean
-clean-files := consolemap_deftbl.c defkeymap.c
-
-quiet_cmd_conmk = CONMK $@
- cmd_conmk = scripts/conmakehash $< > $@
-
-$(obj)/consolemap_deftbl.c: $(src)/$(FONTMAPFILE)
- $(call cmd,conmk)
-
-$(obj)/defkeymap.o: $(obj)/defkeymap.c
-
-# Uncomment if you're changing the keymap and have an appropriate
-# loadkeys version for the map. By default, we'll use the shipped
-# versions.
-# GENERATE_KEYMAP := 1
-
-ifdef GENERATE_KEYMAP
-
-$(obj)/defkeymap.c: $(obj)/%.c: $(src)/%.map
- loadkeys --mktable $< > $@.tmp
- sed -e 's/^static *//' $@.tmp > $@
- rm $@.tmp
-
-endif
diff --git a/drivers/char/agp/Makefile b/drivers/char/agp/Makefile
index 627f542827c7..8eb56e273e75 100644
--- a/drivers/char/agp/Makefile
+++ b/drivers/char/agp/Makefile
@@ -13,6 +13,7 @@ obj-$(CONFIG_AGP_HP_ZX1) += hp-agp.o
obj-$(CONFIG_AGP_PARISC) += parisc-agp.o
obj-$(CONFIG_AGP_I460) += i460-agp.o
obj-$(CONFIG_AGP_INTEL) += intel-agp.o
+obj-$(CONFIG_AGP_INTEL) += intel-gtt.o
obj-$(CONFIG_AGP_NVIDIA) += nvidia-agp.o
obj-$(CONFIG_AGP_SGI_TIOCA) += sgi-agp.o
obj-$(CONFIG_AGP_SIS) += sis-agp.o
diff --git a/drivers/char/agp/agp.h b/drivers/char/agp/agp.h
index 120490949997..5259065f3c79 100644
--- a/drivers/char/agp/agp.h
+++ b/drivers/char/agp/agp.h
@@ -121,11 +121,6 @@ struct agp_bridge_driver {
void (*agp_destroy_pages)(struct agp_memory *);
int (*agp_type_to_mask_type) (struct agp_bridge_data *, int);
void (*chipset_flush)(struct agp_bridge_data *);
-
- int (*agp_map_page)(struct page *page, dma_addr_t *ret);
- void (*agp_unmap_page)(struct page *page, dma_addr_t dma);
- int (*agp_map_memory)(struct agp_memory *mem);
- void (*agp_unmap_memory)(struct agp_memory *mem);
};
struct agp_bridge_data {
diff --git a/drivers/char/agp/amd-k7-agp.c b/drivers/char/agp/amd-k7-agp.c
index b6b1568314c8..b1b4362bc648 100644
--- a/drivers/char/agp/amd-k7-agp.c
+++ b/drivers/char/agp/amd-k7-agp.c
@@ -309,7 +309,8 @@ static int amd_insert_memory(struct agp_memory *mem, off_t pg_start, int type)
num_entries = A_SIZE_LVL2(agp_bridge->current_size)->num_entries;
- if (type != 0 || mem->type != 0)
+ if (type != mem->type ||
+ agp_bridge->driver->agp_type_to_mask_type(agp_bridge, type))
return -EINVAL;
if ((pg_start + mem->page_count) > num_entries)
@@ -348,7 +349,8 @@ static int amd_remove_memory(struct agp_memory *mem, off_t pg_start, int type)
unsigned long __iomem *cur_gatt;
unsigned long addr;
- if (type != 0 || mem->type != 0)
+ if (type != mem->type ||
+ agp_bridge->driver->agp_type_to_mask_type(agp_bridge, type))
return -EINVAL;
for (i = pg_start; i < (mem->page_count + pg_start); i++) {
diff --git a/drivers/char/agp/backend.c b/drivers/char/agp/backend.c
index ee4f855611b6..f27d0d0816d3 100644
--- a/drivers/char/agp/backend.c
+++ b/drivers/char/agp/backend.c
@@ -151,17 +151,7 @@ static int agp_backend_initialize(struct agp_bridge_data *bridge)
}
bridge->scratch_page_page = page;
- if (bridge->driver->agp_map_page) {
- if (bridge->driver->agp_map_page(page,
- &bridge->scratch_page_dma)) {
- dev_err(&bridge->dev->dev,
- "unable to dma-map scratch page\n");
- rc = -ENOMEM;
- goto err_out_nounmap;
- }
- } else {
- bridge->scratch_page_dma = page_to_phys(page);
- }
+ bridge->scratch_page_dma = page_to_phys(page);
bridge->scratch_page = bridge->driver->mask_memory(bridge,
bridge->scratch_page_dma, 0);
@@ -204,12 +194,6 @@ static int agp_backend_initialize(struct agp_bridge_data *bridge)
return 0;
err_out:
- if (bridge->driver->needs_scratch_page &&
- bridge->driver->agp_unmap_page) {
- bridge->driver->agp_unmap_page(bridge->scratch_page_page,
- bridge->scratch_page_dma);
- }
-err_out_nounmap:
if (bridge->driver->needs_scratch_page) {
void *va = page_address(bridge->scratch_page_page);
@@ -240,10 +224,6 @@ static void agp_backend_cleanup(struct agp_bridge_data *bridge)
bridge->driver->needs_scratch_page) {
void *va = page_address(bridge->scratch_page_page);
- if (bridge->driver->agp_unmap_page)
- bridge->driver->agp_unmap_page(bridge->scratch_page_page,
- bridge->scratch_page_dma);
-
bridge->driver->agp_destroy_page(va, AGP_PAGE_DESTROY_UNMAP);
bridge->driver->agp_destroy_page(va, AGP_PAGE_DESTROY_FREE);
}
diff --git a/drivers/char/agp/frontend.c b/drivers/char/agp/frontend.c
index 43412c03969e..3cb4539a98b2 100644
--- a/drivers/char/agp/frontend.c
+++ b/drivers/char/agp/frontend.c
@@ -39,7 +39,6 @@
#include <linux/mm.h>
#include <linux/fs.h>
#include <linux/sched.h>
-#include <linux/smp_lock.h>
#include <asm/uaccess.h>
#include <asm/pgtable.h>
#include "agp.h"
diff --git a/drivers/char/agp/generic.c b/drivers/char/agp/generic.c
index 64255cef8a7d..4956f1c8f9d5 100644
--- a/drivers/char/agp/generic.c
+++ b/drivers/char/agp/generic.c
@@ -437,11 +437,6 @@ int agp_bind_memory(struct agp_memory *curr, off_t pg_start)
curr->is_flushed = true;
}
- if (curr->bridge->driver->agp_map_memory) {
- ret_val = curr->bridge->driver->agp_map_memory(curr);
- if (ret_val)
- return ret_val;
- }
ret_val = curr->bridge->driver->insert_memory(curr, pg_start, curr->type);
if (ret_val != 0)
@@ -483,9 +478,6 @@ int agp_unbind_memory(struct agp_memory *curr)
if (ret_val != 0)
return ret_val;
- if (curr->bridge->driver->agp_unmap_memory)
- curr->bridge->driver->agp_unmap_memory(curr);
-
curr->is_bound = false;
curr->pg_start = 0;
spin_lock(&curr->bridge->mapped_lock);
diff --git a/drivers/char/agp/intel-agp.c b/drivers/char/agp/intel-agp.c
index cd18493c9527..e72f49d52202 100644
--- a/drivers/char/agp/intel-agp.c
+++ b/drivers/char/agp/intel-agp.c
@@ -12,9 +12,6 @@
#include <asm/smp.h>
#include "agp.h"
#include "intel-agp.h"
-#include <linux/intel-gtt.h>
-
-#include "intel-gtt.c"
int intel_agp_enabled;
EXPORT_SYMBOL(intel_agp_enabled);
@@ -703,179 +700,37 @@ static const struct agp_bridge_driver intel_7505_driver = {
.agp_type_to_mask_type = agp_generic_type_to_mask_type,
};
-static int find_gmch(u16 device)
-{
- struct pci_dev *gmch_device;
-
- gmch_device = pci_get_device(PCI_VENDOR_ID_INTEL, device, NULL);
- if (gmch_device && PCI_FUNC(gmch_device->devfn) != 0) {
- gmch_device = pci_get_device(PCI_VENDOR_ID_INTEL,
- device, gmch_device);
- }
-
- if (!gmch_device)
- return 0;
-
- intel_private.pcidev = gmch_device;
- return 1;
-}
-
/* Table to describe Intel GMCH and AGP/PCIE GART drivers. At least one of
* driver and gmch_driver must be non-null, and find_gmch will determine
* which one should be used if a gmch_chip_id is present.
*/
-static const struct intel_driver_description {
+static const struct intel_agp_driver_description {
unsigned int chip_id;
- unsigned int gmch_chip_id;
char *name;
const struct agp_bridge_driver *driver;
- const struct agp_bridge_driver *gmch_driver;
} intel_agp_chipsets[] = {
- { PCI_DEVICE_ID_INTEL_82443LX_0, 0, "440LX", &intel_generic_driver, NULL },
- { PCI_DEVICE_ID_INTEL_82443BX_0, 0, "440BX", &intel_generic_driver, NULL },
- { PCI_DEVICE_ID_INTEL_82443GX_0, 0, "440GX", &intel_generic_driver, NULL },
- { PCI_DEVICE_ID_INTEL_82810_MC1, PCI_DEVICE_ID_INTEL_82810_IG1, "i810",
- NULL, &intel_810_driver },
- { PCI_DEVICE_ID_INTEL_82810_MC3, PCI_DEVICE_ID_INTEL_82810_IG3, "i810",
- NULL, &intel_810_driver },
- { PCI_DEVICE_ID_INTEL_82810E_MC, PCI_DEVICE_ID_INTEL_82810E_IG, "i810",
- NULL, &intel_810_driver },
- { PCI_DEVICE_ID_INTEL_82815_MC, PCI_DEVICE_ID_INTEL_82815_CGC, "i815",
- &intel_815_driver, &intel_810_driver },
- { PCI_DEVICE_ID_INTEL_82820_HB, 0, "i820", &intel_820_driver, NULL },
- { PCI_DEVICE_ID_INTEL_82820_UP_HB, 0, "i820", &intel_820_driver, NULL },
- { PCI_DEVICE_ID_INTEL_82830_HB, PCI_DEVICE_ID_INTEL_82830_CGC, "830M",
- &intel_830mp_driver, &intel_830_driver },
- { PCI_DEVICE_ID_INTEL_82840_HB, 0, "i840", &intel_840_driver, NULL },
- { PCI_DEVICE_ID_INTEL_82845_HB, 0, "845G", &intel_845_driver, NULL },
- { PCI_DEVICE_ID_INTEL_82845G_HB, PCI_DEVICE_ID_INTEL_82845G_IG, "830M",
- &intel_845_driver, &intel_830_driver },
- { PCI_DEVICE_ID_INTEL_82850_HB, 0, "i850", &intel_850_driver, NULL },
- { PCI_DEVICE_ID_INTEL_82854_HB, PCI_DEVICE_ID_INTEL_82854_IG, "854",
- &intel_845_driver, &intel_830_driver },
- { PCI_DEVICE_ID_INTEL_82855PM_HB, 0, "855PM", &intel_845_driver, NULL },
- { PCI_DEVICE_ID_INTEL_82855GM_HB, PCI_DEVICE_ID_INTEL_82855GM_IG, "855GM",
- &intel_845_driver, &intel_830_driver },
- { PCI_DEVICE_ID_INTEL_82860_HB, 0, "i860", &intel_860_driver, NULL },
- { PCI_DEVICE_ID_INTEL_82865_HB, PCI_DEVICE_ID_INTEL_82865_IG, "865",
- &intel_845_driver, &intel_830_driver },
- { PCI_DEVICE_ID_INTEL_82875_HB, 0, "i875", &intel_845_driver, NULL },
- { PCI_DEVICE_ID_INTEL_E7221_HB, PCI_DEVICE_ID_INTEL_E7221_IG, "E7221 (i915)",
- NULL, &intel_915_driver },
- { PCI_DEVICE_ID_INTEL_82915G_HB, PCI_DEVICE_ID_INTEL_82915G_IG, "915G",
- NULL, &intel_915_driver },
- { PCI_DEVICE_ID_INTEL_82915GM_HB, PCI_DEVICE_ID_INTEL_82915GM_IG, "915GM",
- NULL, &intel_915_driver },
- { PCI_DEVICE_ID_INTEL_82945G_HB, PCI_DEVICE_ID_INTEL_82945G_IG, "945G",
- NULL, &intel_915_driver },
- { PCI_DEVICE_ID_INTEL_82945GM_HB, PCI_DEVICE_ID_INTEL_82945GM_IG, "945GM",
- NULL, &intel_915_driver },
- { PCI_DEVICE_ID_INTEL_82945GME_HB, PCI_DEVICE_ID_INTEL_82945GME_IG, "945GME",
- NULL, &intel_915_driver },
- { PCI_DEVICE_ID_INTEL_82946GZ_HB, PCI_DEVICE_ID_INTEL_82946GZ_IG, "946GZ",
- NULL, &intel_i965_driver },
- { PCI_DEVICE_ID_INTEL_82G35_HB, PCI_DEVICE_ID_INTEL_82G35_IG, "G35",
- NULL, &intel_i965_driver },
- { PCI_DEVICE_ID_INTEL_82965Q_HB, PCI_DEVICE_ID_INTEL_82965Q_IG, "965Q",
- NULL, &intel_i965_driver },
- { PCI_DEVICE_ID_INTEL_82965G_HB, PCI_DEVICE_ID_INTEL_82965G_IG, "965G",
- NULL, &intel_i965_driver },
- { PCI_DEVICE_ID_INTEL_82965GM_HB, PCI_DEVICE_ID_INTEL_82965GM_IG, "965GM",
- NULL, &intel_i965_driver },
- { PCI_DEVICE_ID_INTEL_82965GME_HB, PCI_DEVICE_ID_INTEL_82965GME_IG, "965GME/GLE",
- NULL, &intel_i965_driver },
- { PCI_DEVICE_ID_INTEL_7505_0, 0, "E7505", &intel_7505_driver, NULL },
- { PCI_DEVICE_ID_INTEL_7205_0, 0, "E7205", &intel_7505_driver, NULL },
- { PCI_DEVICE_ID_INTEL_G33_HB, PCI_DEVICE_ID_INTEL_G33_IG, "G33",
- NULL, &intel_g33_driver },
- { PCI_DEVICE_ID_INTEL_Q35_HB, PCI_DEVICE_ID_INTEL_Q35_IG, "Q35",
- NULL, &intel_g33_driver },
- { PCI_DEVICE_ID_INTEL_Q33_HB, PCI_DEVICE_ID_INTEL_Q33_IG, "Q33",
- NULL, &intel_g33_driver },
- { PCI_DEVICE_ID_INTEL_PINEVIEW_M_HB, PCI_DEVICE_ID_INTEL_PINEVIEW_M_IG, "GMA3150",
- NULL, &intel_g33_driver },
- { PCI_DEVICE_ID_INTEL_PINEVIEW_HB, PCI_DEVICE_ID_INTEL_PINEVIEW_IG, "GMA3150",
- NULL, &intel_g33_driver },
- { PCI_DEVICE_ID_INTEL_GM45_HB, PCI_DEVICE_ID_INTEL_GM45_IG,
- "GM45", NULL, &intel_i965_driver },
- { PCI_DEVICE_ID_INTEL_EAGLELAKE_HB, PCI_DEVICE_ID_INTEL_EAGLELAKE_IG,
- "Eaglelake", NULL, &intel_i965_driver },
- { PCI_DEVICE_ID_INTEL_Q45_HB, PCI_DEVICE_ID_INTEL_Q45_IG,
- "Q45/Q43", NULL, &intel_i965_driver },
- { PCI_DEVICE_ID_INTEL_G45_HB, PCI_DEVICE_ID_INTEL_G45_IG,
- "G45/G43", NULL, &intel_i965_driver },
- { PCI_DEVICE_ID_INTEL_B43_HB, PCI_DEVICE_ID_INTEL_B43_IG,
- "B43", NULL, &intel_i965_driver },
- { PCI_DEVICE_ID_INTEL_B43_1_HB, PCI_DEVICE_ID_INTEL_B43_1_IG,
- "B43", NULL, &intel_i965_driver },
- { PCI_DEVICE_ID_INTEL_G41_HB, PCI_DEVICE_ID_INTEL_G41_IG,
- "G41", NULL, &intel_i965_driver },
- { PCI_DEVICE_ID_INTEL_IRONLAKE_D_HB, PCI_DEVICE_ID_INTEL_IRONLAKE_D_IG,
- "HD Graphics", NULL, &intel_i965_driver },
- { PCI_DEVICE_ID_INTEL_IRONLAKE_M_HB, PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG,
- "HD Graphics", NULL, &intel_i965_driver },
- { PCI_DEVICE_ID_INTEL_IRONLAKE_MA_HB, PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG,
- "HD Graphics", NULL, &intel_i965_driver },
- { PCI_DEVICE_ID_INTEL_IRONLAKE_MC2_HB, PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG,
- "HD Graphics", NULL, &intel_i965_driver },
- { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB, PCI_DEVICE_ID_INTEL_SANDYBRIDGE_GT1_IG,
- "Sandybridge", NULL, &intel_gen6_driver },
- { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB, PCI_DEVICE_ID_INTEL_SANDYBRIDGE_GT2_IG,
- "Sandybridge", NULL, &intel_gen6_driver },
- { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB, PCI_DEVICE_ID_INTEL_SANDYBRIDGE_GT2_PLUS_IG,
- "Sandybridge", NULL, &intel_gen6_driver },
- { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB, PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_GT1_IG,
- "Sandybridge", NULL, &intel_gen6_driver },
- { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB, PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_GT2_IG,
- "Sandybridge", NULL, &intel_gen6_driver },
- { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB, PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_GT2_PLUS_IG,
- "Sandybridge", NULL, &intel_gen6_driver },
- { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_S_HB, PCI_DEVICE_ID_INTEL_SANDYBRIDGE_S_IG,
- "Sandybridge", NULL, &intel_gen6_driver },
- { 0, 0, NULL, NULL, NULL }
+ { PCI_DEVICE_ID_INTEL_82443LX_0, "440LX", &intel_generic_driver },
+ { PCI_DEVICE_ID_INTEL_82443BX_0, "440BX", &intel_generic_driver },
+ { PCI_DEVICE_ID_INTEL_82443GX_0, "440GX", &intel_generic_driver },
+ { PCI_DEVICE_ID_INTEL_82815_MC, "i815", &intel_815_driver },
+ { PCI_DEVICE_ID_INTEL_82820_HB, "i820", &intel_820_driver },
+ { PCI_DEVICE_ID_INTEL_82820_UP_HB, "i820", &intel_820_driver },
+ { PCI_DEVICE_ID_INTEL_82830_HB, "830M", &intel_830mp_driver },
+ { PCI_DEVICE_ID_INTEL_82840_HB, "i840", &intel_840_driver },
+ { PCI_DEVICE_ID_INTEL_82845_HB, "845G", &intel_845_driver },
+ { PCI_DEVICE_ID_INTEL_82845G_HB, "830M", &intel_845_driver },
+ { PCI_DEVICE_ID_INTEL_82850_HB, "i850", &intel_850_driver },
+ { PCI_DEVICE_ID_INTEL_82854_HB, "854", &intel_845_driver },
+ { PCI_DEVICE_ID_INTEL_82855PM_HB, "855PM", &intel_845_driver },
+ { PCI_DEVICE_ID_INTEL_82855GM_HB, "855GM", &intel_845_driver },
+ { PCI_DEVICE_ID_INTEL_82860_HB, "i860", &intel_860_driver },
+ { PCI_DEVICE_ID_INTEL_82865_HB, "865", &intel_845_driver },
+ { PCI_DEVICE_ID_INTEL_82875_HB, "i875", &intel_845_driver },
+ { PCI_DEVICE_ID_INTEL_7505_0, "E7505", &intel_7505_driver },
+ { PCI_DEVICE_ID_INTEL_7205_0, "E7205", &intel_7505_driver },
+ { 0, NULL, NULL }
};
-static int __devinit intel_gmch_probe(struct pci_dev *pdev,
- struct agp_bridge_data *bridge)
-{
- int i, mask;
-
- bridge->driver = NULL;
-
- for (i = 0; intel_agp_chipsets[i].name != NULL; i++) {
- if ((intel_agp_chipsets[i].gmch_chip_id != 0) &&
- find_gmch(intel_agp_chipsets[i].gmch_chip_id)) {
- bridge->driver =
- intel_agp_chipsets[i].gmch_driver;
- break;
- }
- }
-
- if (!bridge->driver)
- return 0;
-
- bridge->dev_private_data = &intel_private;
- bridge->dev = pdev;
-
- dev_info(&pdev->dev, "Intel %s Chipset\n", intel_agp_chipsets[i].name);
-
- if (bridge->driver->mask_memory == intel_gen6_mask_memory)
- mask = 40;
- else if (bridge->driver->mask_memory == intel_i965_mask_memory)
- mask = 36;
- else
- mask = 32;
-
- if (pci_set_dma_mask(intel_private.pcidev, DMA_BIT_MASK(mask)))
- dev_err(&intel_private.pcidev->dev,
- "set gfx device dma mask %d-bit failed!\n", mask);
- else
- pci_set_consistent_dma_mask(intel_private.pcidev,
- DMA_BIT_MASK(mask));
-
- return 1;
-}
-
static int __devinit agp_intel_probe(struct pci_dev *pdev,
const struct pci_device_id *ent)
{
@@ -905,7 +760,7 @@ static int __devinit agp_intel_probe(struct pci_dev *pdev,
}
}
- if (intel_agp_chipsets[i].name == NULL) {
+ if (!bridge->driver) {
if (cap_ptr)
dev_warn(&pdev->dev, "unsupported Intel chipset [%04x/%04x]\n",
pdev->vendor, pdev->device);
@@ -913,14 +768,6 @@ static int __devinit agp_intel_probe(struct pci_dev *pdev,
return -ENODEV;
}
- if (!bridge->driver) {
- if (cap_ptr)
- dev_warn(&pdev->dev, "can't find bridge device (chip_id: %04x)\n",
- intel_agp_chipsets[i].gmch_chip_id);
- agp_put_bridge(bridge);
- return -ENODEV;
- }
-
bridge->dev = pdev;
bridge->dev_private_data = NULL;
@@ -972,8 +819,7 @@ static void __devexit agp_intel_remove(struct pci_dev *pdev)
agp_remove_bridge(bridge);
- if (intel_private.pcidev)
- pci_dev_put(intel_private.pcidev);
+ intel_gmch_remove(pdev);
agp_put_bridge(bridge);
}
@@ -1049,6 +895,7 @@ static struct pci_device_id agp_intel_pci_table[] = {
ID(PCI_DEVICE_ID_INTEL_G45_HB),
ID(PCI_DEVICE_ID_INTEL_G41_HB),
ID(PCI_DEVICE_ID_INTEL_B43_HB),
+ ID(PCI_DEVICE_ID_INTEL_B43_1_HB),
ID(PCI_DEVICE_ID_INTEL_IRONLAKE_D_HB),
ID(PCI_DEVICE_ID_INTEL_IRONLAKE_M_HB),
ID(PCI_DEVICE_ID_INTEL_IRONLAKE_MA_HB),
diff --git a/drivers/char/agp/intel-agp.h b/drivers/char/agp/intel-agp.h
index d09b1ab7e8ab..90539df02504 100644
--- a/drivers/char/agp/intel-agp.h
+++ b/drivers/char/agp/intel-agp.h
@@ -215,44 +215,7 @@
#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_S_HB 0x0108 /* Server */
#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_S_IG 0x010A
-/* cover 915 and 945 variants */
-#define IS_I915 (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_E7221_HB || \
- agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82915G_HB || \
- agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82915GM_HB || \
- agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82945G_HB || \
- agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82945GM_HB || \
- agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82945GME_HB)
-
-#define IS_I965 (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82946GZ_HB || \
- agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82G35_HB || \
- agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82965Q_HB || \
- agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82965G_HB || \
- agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82965GM_HB || \
- agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82965GME_HB)
-
-#define IS_G33 (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_G33_HB || \
- agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_Q35_HB || \
- agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_Q33_HB || \
- agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_PINEVIEW_M_HB || \
- agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_PINEVIEW_HB)
-
-#define IS_PINEVIEW (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_PINEVIEW_M_HB || \
- agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_PINEVIEW_HB)
-
-#define IS_SNB (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB || \
- agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB || \
- agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_SANDYBRIDGE_S_HB)
-
-#define IS_G4X (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_EAGLELAKE_HB || \
- agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_Q45_HB || \
- agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_G45_HB || \
- agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_GM45_HB || \
- agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_G41_HB || \
- agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_B43_HB || \
- agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IRONLAKE_D_HB || \
- agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IRONLAKE_M_HB || \
- agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IRONLAKE_MA_HB || \
- agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IRONLAKE_MC2_HB || \
- IS_SNB)
-
+int intel_gmch_probe(struct pci_dev *pdev,
+ struct agp_bridge_data *bridge);
+void intel_gmch_remove(struct pci_dev *pdev);
#endif
diff --git a/drivers/char/agp/intel-gtt.c b/drivers/char/agp/intel-gtt.c
index 75e0a3497888..16a2847b7cdb 100644
--- a/drivers/char/agp/intel-gtt.c
+++ b/drivers/char/agp/intel-gtt.c
@@ -15,6 +15,18 @@
* /fairy-tale-mode off
*/
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/pagemap.h>
+#include <linux/agp_backend.h>
+#include <asm/smp.h>
+#include "agp.h"
+#include "intel-agp.h"
+#include <linux/intel-gtt.h>
+#include <drm/intel-gtt.h>
+
/*
* If we have Intel graphics, we're not going to have anything other than
* an Intel IOMMU. So make the correct use of the PCI DMA API contingent
@@ -23,11 +35,12 @@
*/
#ifdef CONFIG_DMAR
#define USE_PCI_DMA_API 1
+#else
+#define USE_PCI_DMA_API 0
#endif
/* Max amount of stolen space, anything above will be returned to Linux */
int intel_max_stolen = 32 * 1024 * 1024;
-EXPORT_SYMBOL(intel_max_stolen);
static const struct aper_size_info_fixed intel_i810_sizes[] =
{
@@ -55,32 +68,36 @@ static struct gatt_mask intel_i810_masks[] =
#define INTEL_AGP_CACHED_MEMORY_LLC_MLC 3
#define INTEL_AGP_CACHED_MEMORY_LLC_MLC_GFDT 4
-static struct gatt_mask intel_gen6_masks[] =
-{
- {.mask = I810_PTE_VALID | GEN6_PTE_UNCACHED,
- .type = INTEL_AGP_UNCACHED_MEMORY },
- {.mask = I810_PTE_VALID | GEN6_PTE_LLC,
- .type = INTEL_AGP_CACHED_MEMORY_LLC },
- {.mask = I810_PTE_VALID | GEN6_PTE_LLC | GEN6_PTE_GFDT,
- .type = INTEL_AGP_CACHED_MEMORY_LLC_GFDT },
- {.mask = I810_PTE_VALID | GEN6_PTE_LLC_MLC,
- .type = INTEL_AGP_CACHED_MEMORY_LLC_MLC },
- {.mask = I810_PTE_VALID | GEN6_PTE_LLC_MLC | GEN6_PTE_GFDT,
- .type = INTEL_AGP_CACHED_MEMORY_LLC_MLC_GFDT },
+struct intel_gtt_driver {
+ unsigned int gen : 8;
+ unsigned int is_g33 : 1;
+ unsigned int is_pineview : 1;
+ unsigned int is_ironlake : 1;
+ unsigned int dma_mask_size : 8;
+ /* Chipset specific GTT setup */
+ int (*setup)(void);
+ /* This should undo anything done in ->setup() save the unmapping
+ * of the mmio register file, that's done in the generic code. */
+ void (*cleanup)(void);
+ void (*write_entry)(dma_addr_t addr, unsigned int entry, unsigned int flags);
+ /* Flags is a more or less chipset specific opaque value.
+ * For chipsets that need to support old ums (non-gem) code, this
+ * needs to be identical to the various supported agp memory types! */
+ bool (*check_flags)(unsigned int flags);
+ void (*chipset_flush)(void);
};
static struct _intel_private {
+ struct intel_gtt base;
+ const struct intel_gtt_driver *driver;
struct pci_dev *pcidev; /* device one */
+ struct pci_dev *bridge_dev;
u8 __iomem *registers;
+ phys_addr_t gtt_bus_addr;
+ phys_addr_t gma_bus_addr;
+ phys_addr_t pte_bus_addr;
u32 __iomem *gtt; /* I915G */
int num_dcache_entries;
- /* gtt_entries is the number of gtt entries that are already mapped
- * to stolen memory. Stolen memory is larger than the memory mapped
- * through gtt_entries, as it includes some reserved space for the BIOS
- * popup and for the GTT.
- */
- int gtt_entries; /* i830+ */
- int gtt_total_size;
union {
void __iomem *i9xx_flush_page;
void *i8xx_flush_page;
@@ -88,23 +105,14 @@ static struct _intel_private {
struct page *i8xx_page;
struct resource ifp_resource;
int resource_valid;
+ struct page *scratch_page;
+ dma_addr_t scratch_page_dma;
} intel_private;
-#ifdef USE_PCI_DMA_API
-static int intel_agp_map_page(struct page *page, dma_addr_t *ret)
-{
- *ret = pci_map_page(intel_private.pcidev, page, 0,
- PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
- if (pci_dma_mapping_error(intel_private.pcidev, *ret))
- return -EINVAL;
- return 0;
-}
-
-static void intel_agp_unmap_page(struct page *page, dma_addr_t dma)
-{
- pci_unmap_page(intel_private.pcidev, dma,
- PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
-}
+#define INTEL_GTT_GEN intel_private.driver->gen
+#define IS_G33 intel_private.driver->is_g33
+#define IS_PINEVIEW intel_private.driver->is_pineview
+#define IS_IRONLAKE intel_private.driver->is_ironlake
static void intel_agp_free_sglist(struct agp_memory *mem)
{
@@ -125,6 +133,9 @@ static int intel_agp_map_memory(struct agp_memory *mem)
struct scatterlist *sg;
int i;
+ if (mem->sg_list)
+ return 0; /* already mapped (for e.g. resume */
+
DBG("try mapping %lu pages\n", (unsigned long)mem->page_count);
if (sg_alloc_table(&st, mem->page_count, GFP_KERNEL))
@@ -156,70 +167,17 @@ static void intel_agp_unmap_memory(struct agp_memory *mem)
intel_agp_free_sglist(mem);
}
-static void intel_agp_insert_sg_entries(struct agp_memory *mem,
- off_t pg_start, int mask_type)
-{
- struct scatterlist *sg;
- int i, j;
-
- j = pg_start;
-
- WARN_ON(!mem->num_sg);
-
- if (mem->num_sg == mem->page_count) {
- for_each_sg(mem->sg_list, sg, mem->page_count, i) {
- writel(agp_bridge->driver->mask_memory(agp_bridge,
- sg_dma_address(sg), mask_type),
- intel_private.gtt+j);
- j++;
- }
- } else {
- /* sg may merge pages, but we have to separate
- * per-page addr for GTT */
- unsigned int len, m;
-
- for_each_sg(mem->sg_list, sg, mem->num_sg, i) {
- len = sg_dma_len(sg) / PAGE_SIZE;
- for (m = 0; m < len; m++) {
- writel(agp_bridge->driver->mask_memory(agp_bridge,
- sg_dma_address(sg) + m * PAGE_SIZE,
- mask_type),
- intel_private.gtt+j);
- j++;
- }
- }
- }
- readl(intel_private.gtt+j-1);
-}
-
-#else
-
-static void intel_agp_insert_sg_entries(struct agp_memory *mem,
- off_t pg_start, int mask_type)
-{
- int i, j;
-
- for (i = 0, j = pg_start; i < mem->page_count; i++, j++) {
- writel(agp_bridge->driver->mask_memory(agp_bridge,
- page_to_phys(mem->pages[i]), mask_type),
- intel_private.gtt+j);
- }
-
- readl(intel_private.gtt+j-1);
-}
-
-#endif
-
static int intel_i810_fetch_size(void)
{
u32 smram_miscc;
struct aper_size_info_fixed *values;
- pci_read_config_dword(agp_bridge->dev, I810_SMRAM_MISCC, &smram_miscc);
+ pci_read_config_dword(intel_private.bridge_dev,
+ I810_SMRAM_MISCC, &smram_miscc);
values = A_SIZE_FIX(agp_bridge->driver->aperture_sizes);
if ((smram_miscc & I810_GMS) == I810_GMS_DISABLE) {
- dev_warn(&agp_bridge->dev->dev, "i810 is disabled\n");
+ dev_warn(&intel_private.bridge_dev->dev, "i810 is disabled\n");
return 0;
}
if ((smram_miscc & I810_GFX_MEM_WIN_SIZE) == I810_GFX_MEM_WIN_32M) {
@@ -284,7 +242,7 @@ static void intel_i810_cleanup(void)
iounmap(intel_private.registers);
}
-static void intel_i810_agp_enable(struct agp_bridge_data *bridge, u32 mode)
+static void intel_fake_agp_enable(struct agp_bridge_data *bridge, u32 mode)
{
return;
}
@@ -319,34 +277,6 @@ static void i8xx_destroy_pages(struct page *page)
atomic_dec(&agp_bridge->current_memory_agp);
}
-static int intel_i830_type_to_mask_type(struct agp_bridge_data *bridge,
- int type)
-{
- if (type < AGP_USER_TYPES)
- return type;
- else if (type == AGP_USER_CACHED_MEMORY)
- return INTEL_AGP_CACHED_MEMORY;
- else
- return 0;
-}
-
-static int intel_gen6_type_to_mask_type(struct agp_bridge_data *bridge,
- int type)
-{
- unsigned int type_mask = type & ~AGP_USER_CACHED_MEMORY_GFDT;
- unsigned int gfdt = type & AGP_USER_CACHED_MEMORY_GFDT;
-
- if (type_mask == AGP_USER_UNCACHED_MEMORY)
- return INTEL_AGP_UNCACHED_MEMORY;
- else if (type_mask == AGP_USER_CACHED_MEMORY_LLC_MLC)
- return gfdt ? INTEL_AGP_CACHED_MEMORY_LLC_MLC_GFDT :
- INTEL_AGP_CACHED_MEMORY_LLC_MLC;
- else /* set 'normal'/'cached' to LLC by default */
- return gfdt ? INTEL_AGP_CACHED_MEMORY_LLC_GFDT :
- INTEL_AGP_CACHED_MEMORY_LLC;
-}
-
-
static int intel_i810_insert_entries(struct agp_memory *mem, off_t pg_start,
int type)
{
@@ -514,8 +444,33 @@ static unsigned long intel_i810_mask_memory(struct agp_bridge_data *bridge,
return addr | bridge->driver->masks[type].mask;
}
-static struct aper_size_info_fixed intel_i830_sizes[] =
+static int intel_gtt_setup_scratch_page(void)
{
+ struct page *page;
+ dma_addr_t dma_addr;
+
+ page = alloc_page(GFP_KERNEL | GFP_DMA32 | __GFP_ZERO);
+ if (page == NULL)
+ return -ENOMEM;
+ get_page(page);
+ set_pages_uc(page, 1);
+
+ if (USE_PCI_DMA_API && INTEL_GTT_GEN > 2) {
+ dma_addr = pci_map_page(intel_private.pcidev, page, 0,
+ PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
+ if (pci_dma_mapping_error(intel_private.pcidev, dma_addr))
+ return -EINVAL;
+
+ intel_private.scratch_page_dma = dma_addr;
+ } else
+ intel_private.scratch_page_dma = page_to_phys(page);
+
+ intel_private.scratch_page = page;
+
+ return 0;
+}
+
+static const struct aper_size_info_fixed const intel_fake_agp_sizes[] = {
{128, 32768, 5},
/* The 64M mode still requires a 128k gatt */
{64, 16384, 5},
@@ -523,102 +478,49 @@ static struct aper_size_info_fixed intel_i830_sizes[] =
{512, 131072, 7},
};
-static void intel_i830_init_gtt_entries(void)
+static unsigned int intel_gtt_stolen_entries(void)
{
u16 gmch_ctrl;
- int gtt_entries = 0;
u8 rdct;
int local = 0;
static const int ddt[4] = { 0, 16, 32, 64 };
- int size; /* reserved space (in kb) at the top of stolen memory */
+ unsigned int overhead_entries, stolen_entries;
+ unsigned int stolen_size = 0;
- pci_read_config_word(agp_bridge->dev, I830_GMCH_CTRL, &gmch_ctrl);
+ pci_read_config_word(intel_private.bridge_dev,
+ I830_GMCH_CTRL, &gmch_ctrl);
- if (IS_I965) {
- u32 pgetbl_ctl;
- pgetbl_ctl = readl(intel_private.registers+I810_PGETBL_CTL);
+ if (INTEL_GTT_GEN > 4 || IS_PINEVIEW)
+ overhead_entries = 0;
+ else
+ overhead_entries = intel_private.base.gtt_mappable_entries
+ / 1024;
- /* The 965 has a field telling us the size of the GTT,
- * which may be larger than what is necessary to map the
- * aperture.
- */
- switch (pgetbl_ctl & I965_PGETBL_SIZE_MASK) {
- case I965_PGETBL_SIZE_128KB:
- size = 128;
- break;
- case I965_PGETBL_SIZE_256KB:
- size = 256;
- break;
- case I965_PGETBL_SIZE_512KB:
- size = 512;
- break;
- case I965_PGETBL_SIZE_1MB:
- size = 1024;
- break;
- case I965_PGETBL_SIZE_2MB:
- size = 2048;
- break;
- case I965_PGETBL_SIZE_1_5MB:
- size = 1024 + 512;
- break;
- default:
- dev_info(&intel_private.pcidev->dev,
- "unknown page table size, assuming 512KB\n");
- size = 512;
- }
- size += 4; /* add in BIOS popup space */
- } else if (IS_G33 && !IS_PINEVIEW) {
- /* G33's GTT size defined in gmch_ctrl */
- switch (gmch_ctrl & G33_PGETBL_SIZE_MASK) {
- case G33_PGETBL_SIZE_1M:
- size = 1024;
- break;
- case G33_PGETBL_SIZE_2M:
- size = 2048;
- break;
- default:
- dev_info(&agp_bridge->dev->dev,
- "unknown page table size 0x%x, assuming 512KB\n",
- (gmch_ctrl & G33_PGETBL_SIZE_MASK));
- size = 512;
- }
- size += 4;
- } else if (IS_G4X || IS_PINEVIEW) {
- /* On 4 series hardware, GTT stolen is separate from graphics
- * stolen, ignore it in stolen gtt entries counting. However,
- * 4KB of the stolen memory doesn't get mapped to the GTT.
- */
- size = 4;
- } else {
- /* On previous hardware, the GTT size was just what was
- * required to map the aperture.
- */
- size = agp_bridge->driver->fetch_size() + 4;
- }
+ overhead_entries += 1; /* BIOS popup */
- if (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82830_HB ||
- agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82845G_HB) {
+ if (intel_private.bridge_dev->device == PCI_DEVICE_ID_INTEL_82830_HB ||
+ intel_private.bridge_dev->device == PCI_DEVICE_ID_INTEL_82845G_HB) {
switch (gmch_ctrl & I830_GMCH_GMS_MASK) {
case I830_GMCH_GMS_STOLEN_512:
- gtt_entries = KB(512) - KB(size);
+ stolen_size = KB(512);
break;
case I830_GMCH_GMS_STOLEN_1024:
- gtt_entries = MB(1) - KB(size);
+ stolen_size = MB(1);
break;
case I830_GMCH_GMS_STOLEN_8192:
- gtt_entries = MB(8) - KB(size);
+ stolen_size = MB(8);
break;
case I830_GMCH_GMS_LOCAL:
rdct = readb(intel_private.registers+I830_RDRAM_CHANNEL_TYPE);
- gtt_entries = (I830_RDRAM_ND(rdct) + 1) *
+ stolen_size = (I830_RDRAM_ND(rdct) + 1) *
MB(ddt[I830_RDRAM_DDT(rdct)]);
local = 1;
break;
default:
- gtt_entries = 0;
+ stolen_size = 0;
break;
}
- } else if (IS_SNB) {
+ } else if (INTEL_GTT_GEN == 6) {
/*
* SandyBridge has new memory control reg at 0x50.w
*/
@@ -626,149 +528,294 @@ static void intel_i830_init_gtt_entries(void)
pci_read_config_word(intel_private.pcidev, SNB_GMCH_CTRL, &snb_gmch_ctl);
switch (snb_gmch_ctl & SNB_GMCH_GMS_STOLEN_MASK) {
case SNB_GMCH_GMS_STOLEN_32M:
- gtt_entries = MB(32) - KB(size);
+ stolen_size = MB(32);
break;
case SNB_GMCH_GMS_STOLEN_64M:
- gtt_entries = MB(64) - KB(size);
+ stolen_size = MB(64);
break;
case SNB_GMCH_GMS_STOLEN_96M:
- gtt_entries = MB(96) - KB(size);
+ stolen_size = MB(96);
break;
case SNB_GMCH_GMS_STOLEN_128M:
- gtt_entries = MB(128) - KB(size);
+ stolen_size = MB(128);
break;
case SNB_GMCH_GMS_STOLEN_160M:
- gtt_entries = MB(160) - KB(size);
+ stolen_size = MB(160);
break;
case SNB_GMCH_GMS_STOLEN_192M:
- gtt_entries = MB(192) - KB(size);
+ stolen_size = MB(192);
break;
case SNB_GMCH_GMS_STOLEN_224M:
- gtt_entries = MB(224) - KB(size);
+ stolen_size = MB(224);
break;
case SNB_GMCH_GMS_STOLEN_256M:
- gtt_entries = MB(256) - KB(size);
+ stolen_size = MB(256);
break;
case SNB_GMCH_GMS_STOLEN_288M:
- gtt_entries = MB(288) - KB(size);
+ stolen_size = MB(288);
break;
case SNB_GMCH_GMS_STOLEN_320M:
- gtt_entries = MB(320) - KB(size);
+ stolen_size = MB(320);
break;
case SNB_GMCH_GMS_STOLEN_352M:
- gtt_entries = MB(352) - KB(size);
+ stolen_size = MB(352);
break;
case SNB_GMCH_GMS_STOLEN_384M:
- gtt_entries = MB(384) - KB(size);
+ stolen_size = MB(384);
break;
case SNB_GMCH_GMS_STOLEN_416M:
- gtt_entries = MB(416) - KB(size);
+ stolen_size = MB(416);
break;
case SNB_GMCH_GMS_STOLEN_448M:
- gtt_entries = MB(448) - KB(size);
+ stolen_size = MB(448);
break;
case SNB_GMCH_GMS_STOLEN_480M:
- gtt_entries = MB(480) - KB(size);
+ stolen_size = MB(480);
break;
case SNB_GMCH_GMS_STOLEN_512M:
- gtt_entries = MB(512) - KB(size);
+ stolen_size = MB(512);
break;
}
} else {
switch (gmch_ctrl & I855_GMCH_GMS_MASK) {
case I855_GMCH_GMS_STOLEN_1M:
- gtt_entries = MB(1) - KB(size);
+ stolen_size = MB(1);
break;
case I855_GMCH_GMS_STOLEN_4M:
- gtt_entries = MB(4) - KB(size);
+ stolen_size = MB(4);
break;
case I855_GMCH_GMS_STOLEN_8M:
- gtt_entries = MB(8) - KB(size);
+ stolen_size = MB(8);
break;
case I855_GMCH_GMS_STOLEN_16M:
- gtt_entries = MB(16) - KB(size);
+ stolen_size = MB(16);
break;
case I855_GMCH_GMS_STOLEN_32M:
- gtt_entries = MB(32) - KB(size);
+ stolen_size = MB(32);
break;
case I915_GMCH_GMS_STOLEN_48M:
- /* Check it's really I915G */
- if (IS_I915 || IS_I965 || IS_G33 || IS_G4X)
- gtt_entries = MB(48) - KB(size);
- else
- gtt_entries = 0;
+ stolen_size = MB(48);
break;
case I915_GMCH_GMS_STOLEN_64M:
- /* Check it's really I915G */
- if (IS_I915 || IS_I965 || IS_G33 || IS_G4X)
- gtt_entries = MB(64) - KB(size);
- else
- gtt_entries = 0;
+ stolen_size = MB(64);
break;
case G33_GMCH_GMS_STOLEN_128M:
- if (IS_G33 || IS_I965 || IS_G4X)
- gtt_entries = MB(128) - KB(size);
- else
- gtt_entries = 0;
+ stolen_size = MB(128);
break;
case G33_GMCH_GMS_STOLEN_256M:
- if (IS_G33 || IS_I965 || IS_G4X)
- gtt_entries = MB(256) - KB(size);
- else
- gtt_entries = 0;
+ stolen_size = MB(256);
break;
case INTEL_GMCH_GMS_STOLEN_96M:
- if (IS_I965 || IS_G4X)
- gtt_entries = MB(96) - KB(size);
- else
- gtt_entries = 0;
+ stolen_size = MB(96);
break;
case INTEL_GMCH_GMS_STOLEN_160M:
- if (IS_I965 || IS_G4X)
- gtt_entries = MB(160) - KB(size);
- else
- gtt_entries = 0;
+ stolen_size = MB(160);
break;
case INTEL_GMCH_GMS_STOLEN_224M:
- if (IS_I965 || IS_G4X)
- gtt_entries = MB(224) - KB(size);
- else
- gtt_entries = 0;
+ stolen_size = MB(224);
break;
case INTEL_GMCH_GMS_STOLEN_352M:
- if (IS_I965 || IS_G4X)
- gtt_entries = MB(352) - KB(size);
- else
- gtt_entries = 0;
+ stolen_size = MB(352);
break;
default:
- gtt_entries = 0;
+ stolen_size = 0;
break;
}
}
- if (!local && gtt_entries > intel_max_stolen) {
- dev_info(&agp_bridge->dev->dev,
+
+ if (!local && stolen_size > intel_max_stolen) {
+ dev_info(&intel_private.bridge_dev->dev,
"detected %dK stolen memory, trimming to %dK\n",
- gtt_entries / KB(1), intel_max_stolen / KB(1));
- gtt_entries = intel_max_stolen / KB(4);
- } else if (gtt_entries > 0) {
- dev_info(&agp_bridge->dev->dev, "detected %dK %s memory\n",
- gtt_entries / KB(1), local ? "local" : "stolen");
- gtt_entries /= KB(4);
+ stolen_size / KB(1), intel_max_stolen / KB(1));
+ stolen_size = intel_max_stolen;
+ } else if (stolen_size > 0) {
+ dev_info(&intel_private.bridge_dev->dev, "detected %dK %s memory\n",
+ stolen_size / KB(1), local ? "local" : "stolen");
} else {
- dev_info(&agp_bridge->dev->dev,
+ dev_info(&intel_private.bridge_dev->dev,
"no pre-allocated video memory detected\n");
- gtt_entries = 0;
+ stolen_size = 0;
+ }
+
+ stolen_entries = stolen_size/KB(4) - overhead_entries;
+
+ return stolen_entries;
+}
+
+static unsigned int intel_gtt_total_entries(void)
+{
+ int size;
+
+ if (IS_G33 || INTEL_GTT_GEN == 4 || INTEL_GTT_GEN == 5) {
+ u32 pgetbl_ctl;
+ pgetbl_ctl = readl(intel_private.registers+I810_PGETBL_CTL);
+
+ switch (pgetbl_ctl & I965_PGETBL_SIZE_MASK) {
+ case I965_PGETBL_SIZE_128KB:
+ size = KB(128);
+ break;
+ case I965_PGETBL_SIZE_256KB:
+ size = KB(256);
+ break;
+ case I965_PGETBL_SIZE_512KB:
+ size = KB(512);
+ break;
+ case I965_PGETBL_SIZE_1MB:
+ size = KB(1024);
+ break;
+ case I965_PGETBL_SIZE_2MB:
+ size = KB(2048);
+ break;
+ case I965_PGETBL_SIZE_1_5MB:
+ size = KB(1024 + 512);
+ break;
+ default:
+ dev_info(&intel_private.pcidev->dev,
+ "unknown page table size, assuming 512KB\n");
+ size = KB(512);
+ }
+
+ return size/4;
+ } else if (INTEL_GTT_GEN == 6) {
+ u16 snb_gmch_ctl;
+
+ pci_read_config_word(intel_private.pcidev, SNB_GMCH_CTRL, &snb_gmch_ctl);
+ switch (snb_gmch_ctl & SNB_GTT_SIZE_MASK) {
+ default:
+ case SNB_GTT_SIZE_0M:
+ printk(KERN_ERR "Bad GTT size mask: 0x%04x.\n", snb_gmch_ctl);
+ size = MB(0);
+ break;
+ case SNB_GTT_SIZE_1M:
+ size = MB(1);
+ break;
+ case SNB_GTT_SIZE_2M:
+ size = MB(2);
+ break;
+ }
+ return size/4;
+ } else {
+ /* On previous hardware, the GTT size was just what was
+ * required to map the aperture.
+ */
+ return intel_private.base.gtt_mappable_entries;
+ }
+}
+
+static unsigned int intel_gtt_mappable_entries(void)
+{
+ unsigned int aperture_size;
+
+ if (INTEL_GTT_GEN == 2) {
+ u16 gmch_ctrl;
+
+ pci_read_config_word(intel_private.bridge_dev,
+ I830_GMCH_CTRL, &gmch_ctrl);
+
+ if ((gmch_ctrl & I830_GMCH_MEM_MASK) == I830_GMCH_MEM_64M)
+ aperture_size = MB(64);
+ else
+ aperture_size = MB(128);
+ } else {
+ /* 9xx supports large sizes, just look at the length */
+ aperture_size = pci_resource_len(intel_private.pcidev, 2);
}
- intel_private.gtt_entries = gtt_entries;
+ return aperture_size >> PAGE_SHIFT;
+}
+
+static void intel_gtt_teardown_scratch_page(void)
+{
+ set_pages_wb(intel_private.scratch_page, 1);
+ pci_unmap_page(intel_private.pcidev, intel_private.scratch_page_dma,
+ PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
+ put_page(intel_private.scratch_page);
+ __free_page(intel_private.scratch_page);
}
-static void intel_i830_fini_flush(void)
+static void intel_gtt_cleanup(void)
{
- kunmap(intel_private.i8xx_page);
- intel_private.i8xx_flush_page = NULL;
- unmap_page_from_agp(intel_private.i8xx_page);
+ intel_private.driver->cleanup();
+
+ iounmap(intel_private.gtt);
+ iounmap(intel_private.registers);
+
+ intel_gtt_teardown_scratch_page();
+}
+
+static int intel_gtt_init(void)
+{
+ u32 gtt_map_size;
+ int ret;
+
+ ret = intel_private.driver->setup();
+ if (ret != 0)
+ return ret;
+
+ intel_private.base.gtt_mappable_entries = intel_gtt_mappable_entries();
+ intel_private.base.gtt_total_entries = intel_gtt_total_entries();
+
+ dev_info(&intel_private.bridge_dev->dev,
+ "detected gtt size: %dK total, %dK mappable\n",
+ intel_private.base.gtt_total_entries * 4,
+ intel_private.base.gtt_mappable_entries * 4);
+
+ gtt_map_size = intel_private.base.gtt_total_entries * 4;
+
+ intel_private.gtt = ioremap(intel_private.gtt_bus_addr,
+ gtt_map_size);
+ if (!intel_private.gtt) {
+ intel_private.driver->cleanup();
+ iounmap(intel_private.registers);
+ return -ENOMEM;
+ }
+
+ global_cache_flush(); /* FIXME: ? */
+
+ /* we have to call this as early as possible after the MMIO base address is known */
+ intel_private.base.gtt_stolen_entries = intel_gtt_stolen_entries();
+ if (intel_private.base.gtt_stolen_entries == 0) {
+ intel_private.driver->cleanup();
+ iounmap(intel_private.registers);
+ iounmap(intel_private.gtt);
+ return -ENOMEM;
+ }
+
+ ret = intel_gtt_setup_scratch_page();
+ if (ret != 0) {
+ intel_gtt_cleanup();
+ return ret;
+ }
+
+ return 0;
+}
+
+static int intel_fake_agp_fetch_size(void)
+{
+ int num_sizes = ARRAY_SIZE(intel_fake_agp_sizes);
+ unsigned int aper_size;
+ int i;
+
+ aper_size = (intel_private.base.gtt_mappable_entries << PAGE_SHIFT)
+ / MB(1);
+
+ for (i = 0; i < num_sizes; i++) {
+ if (aper_size == intel_fake_agp_sizes[i].size) {
+ agp_bridge->current_size =
+ (void *) (intel_fake_agp_sizes + i);
+ return aper_size;
+ }
+ }
+
+ return 0;
+}
+
+static void i830_cleanup(void)
+{
+ if (intel_private.i8xx_flush_page) {
+ kunmap(intel_private.i8xx_flush_page);
+ intel_private.i8xx_flush_page = NULL;
+ }
__free_page(intel_private.i8xx_page);
intel_private.i8xx_page = NULL;
@@ -780,13 +827,13 @@ static void intel_i830_setup_flush(void)
if (intel_private.i8xx_page)
return;
- intel_private.i8xx_page = alloc_page(GFP_KERNEL | __GFP_ZERO | GFP_DMA32);
+ intel_private.i8xx_page = alloc_page(GFP_KERNEL);
if (!intel_private.i8xx_page)
return;
intel_private.i8xx_flush_page = kmap(intel_private.i8xx_page);
if (!intel_private.i8xx_flush_page)
- intel_i830_fini_flush();
+ i830_cleanup();
}
/* The chipset_flush interface needs to get data that has already been
@@ -799,7 +846,7 @@ static void intel_i830_setup_flush(void)
* that buffer out, we just fill 1KB and clflush it out, on the assumption
* that it'll push whatever was in there out. It appears to work.
*/
-static void intel_i830_chipset_flush(struct agp_bridge_data *bridge)
+static void i830_chipset_flush(void)
{
unsigned int *pg = intel_private.i8xx_flush_page;
@@ -811,169 +858,184 @@ static void intel_i830_chipset_flush(struct agp_bridge_data *bridge)
printk(KERN_ERR "Timed out waiting for cache flush.\n");
}
-/* The intel i830 automatically initializes the agp aperture during POST.
- * Use the memory already set aside for in the GTT.
- */
-static int intel_i830_create_gatt_table(struct agp_bridge_data *bridge)
+static void i830_write_entry(dma_addr_t addr, unsigned int entry,
+ unsigned int flags)
{
- int page_order;
- struct aper_size_info_fixed *size;
- int num_entries;
- u32 temp;
+ u32 pte_flags = I810_PTE_VALID;
+
+ switch (flags) {
+ case AGP_DCACHE_MEMORY:
+ pte_flags |= I810_PTE_LOCAL;
+ break;
+ case AGP_USER_CACHED_MEMORY:
+ pte_flags |= I830_PTE_SYSTEM_CACHED;
+ break;
+ }
- size = agp_bridge->current_size;
- page_order = size->page_order;
- num_entries = size->num_entries;
- agp_bridge->gatt_table_real = NULL;
+ writel(addr | pte_flags, intel_private.gtt + entry);
+}
- pci_read_config_dword(intel_private.pcidev, I810_MMADDR, &temp);
- temp &= 0xfff80000;
+static void intel_enable_gtt(void)
+{
+ u32 gma_addr;
+ u16 gmch_ctrl;
- intel_private.registers = ioremap(temp, 128 * 4096);
- if (!intel_private.registers)
- return -ENOMEM;
+ if (INTEL_GTT_GEN == 2)
+ pci_read_config_dword(intel_private.pcidev, I810_GMADDR,
+ &gma_addr);
+ else
+ pci_read_config_dword(intel_private.pcidev, I915_GMADDR,
+ &gma_addr);
- temp = readl(intel_private.registers+I810_PGETBL_CTL) & 0xfffff000;
- global_cache_flush(); /* FIXME: ?? */
+ intel_private.gma_bus_addr = (gma_addr & PCI_BASE_ADDRESS_MEM_MASK);
- /* we have to call this as early as possible after the MMIO base address is known */
- intel_i830_init_gtt_entries();
- if (intel_private.gtt_entries == 0) {
- iounmap(intel_private.registers);
+ pci_read_config_word(intel_private.bridge_dev, I830_GMCH_CTRL, &gmch_ctrl);
+ gmch_ctrl |= I830_GMCH_ENABLED;
+ pci_write_config_word(intel_private.bridge_dev, I830_GMCH_CTRL, gmch_ctrl);
+
+ writel(intel_private.pte_bus_addr|I810_PGETBL_ENABLED,
+ intel_private.registers+I810_PGETBL_CTL);
+ readl(intel_private.registers+I810_PGETBL_CTL); /* PCI Posting. */
+}
+
+static int i830_setup(void)
+{
+ u32 reg_addr;
+
+ pci_read_config_dword(intel_private.pcidev, I810_MMADDR, &reg_addr);
+ reg_addr &= 0xfff80000;
+
+ intel_private.registers = ioremap(reg_addr, KB(64));
+ if (!intel_private.registers)
return -ENOMEM;
- }
- agp_bridge->gatt_table = NULL;
+ intel_private.gtt_bus_addr = reg_addr + I810_PTE_BASE;
+ intel_private.pte_bus_addr =
+ readl(intel_private.registers+I810_PGETBL_CTL) & 0xfffff000;
- agp_bridge->gatt_bus_addr = temp;
+ intel_i830_setup_flush();
return 0;
}
-/* Return the gatt table to a sane state. Use the top of stolen
- * memory for the GTT.
- */
-static int intel_i830_free_gatt_table(struct agp_bridge_data *bridge)
+static int intel_fake_agp_create_gatt_table(struct agp_bridge_data *bridge)
{
+ agp_bridge->gatt_table_real = NULL;
+ agp_bridge->gatt_table = NULL;
+ agp_bridge->gatt_bus_addr = 0;
+
return 0;
}
-static int intel_i830_fetch_size(void)
+static int intel_fake_agp_free_gatt_table(struct agp_bridge_data *bridge)
{
- u16 gmch_ctrl;
- struct aper_size_info_fixed *values;
-
- values = A_SIZE_FIX(agp_bridge->driver->aperture_sizes);
-
- if (agp_bridge->dev->device != PCI_DEVICE_ID_INTEL_82830_HB &&
- agp_bridge->dev->device != PCI_DEVICE_ID_INTEL_82845G_HB) {
- /* 855GM/852GM/865G has 128MB aperture size */
- agp_bridge->current_size = (void *) values;
- agp_bridge->aperture_size_idx = 0;
- return values[0].size;
- }
-
- pci_read_config_word(agp_bridge->dev, I830_GMCH_CTRL, &gmch_ctrl);
-
- if ((gmch_ctrl & I830_GMCH_MEM_MASK) == I830_GMCH_MEM_128M) {
- agp_bridge->current_size = (void *) values;
- agp_bridge->aperture_size_idx = 0;
- return values[0].size;
- } else {
- agp_bridge->current_size = (void *) (values + 1);
- agp_bridge->aperture_size_idx = 1;
- return values[1].size;
- }
-
return 0;
}
-static int intel_i830_configure(void)
+static int intel_fake_agp_configure(void)
{
- struct aper_size_info_fixed *current_size;
- u32 temp;
- u16 gmch_ctrl;
int i;
- current_size = A_SIZE_FIX(agp_bridge->current_size);
+ intel_enable_gtt();
- pci_read_config_dword(intel_private.pcidev, I810_GMADDR, &temp);
- agp_bridge->gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK);
-
- pci_read_config_word(agp_bridge->dev, I830_GMCH_CTRL, &gmch_ctrl);
- gmch_ctrl |= I830_GMCH_ENABLED;
- pci_write_config_word(agp_bridge->dev, I830_GMCH_CTRL, gmch_ctrl);
-
- writel(agp_bridge->gatt_bus_addr|I810_PGETBL_ENABLED, intel_private.registers+I810_PGETBL_CTL);
- readl(intel_private.registers+I810_PGETBL_CTL); /* PCI Posting. */
+ agp_bridge->gart_bus_addr = intel_private.gma_bus_addr;
- if (agp_bridge->driver->needs_scratch_page) {
- for (i = intel_private.gtt_entries; i < current_size->num_entries; i++) {
- writel(agp_bridge->scratch_page, intel_private.registers+I810_PTE_BASE+(i*4));
- }
- readl(intel_private.registers+I810_PTE_BASE+((i-1)*4)); /* PCI Posting. */
+ for (i = intel_private.base.gtt_stolen_entries;
+ i < intel_private.base.gtt_total_entries; i++) {
+ intel_private.driver->write_entry(intel_private.scratch_page_dma,
+ i, 0);
}
+ readl(intel_private.gtt+i-1); /* PCI Posting. */
global_cache_flush();
- intel_i830_setup_flush();
return 0;
}
-static void intel_i830_cleanup(void)
+static bool i830_check_flags(unsigned int flags)
{
- iounmap(intel_private.registers);
+ switch (flags) {
+ case 0:
+ case AGP_PHYS_MEMORY:
+ case AGP_USER_CACHED_MEMORY:
+ case AGP_USER_MEMORY:
+ return true;
+ }
+
+ return false;
}
-static int intel_i830_insert_entries(struct agp_memory *mem, off_t pg_start,
- int type)
+static void intel_gtt_insert_sg_entries(struct scatterlist *sg_list,
+ unsigned int sg_len,
+ unsigned int pg_start,
+ unsigned int flags)
{
- int i, j, num_entries;
- void *temp;
+ struct scatterlist *sg;
+ unsigned int len, m;
+ int i, j;
+
+ j = pg_start;
+
+ /* sg may merge pages, but we have to separate
+ * per-page addr for GTT */
+ for_each_sg(sg_list, sg, sg_len, i) {
+ len = sg_dma_len(sg) >> PAGE_SHIFT;
+ for (m = 0; m < len; m++) {
+ dma_addr_t addr = sg_dma_address(sg) + (m << PAGE_SHIFT);
+ intel_private.driver->write_entry(addr,
+ j, flags);
+ j++;
+ }
+ }
+ readl(intel_private.gtt+j-1);
+}
+
+static int intel_fake_agp_insert_entries(struct agp_memory *mem,
+ off_t pg_start, int type)
+{
+ int i, j;
int ret = -EINVAL;
- int mask_type;
if (mem->page_count == 0)
goto out;
- temp = agp_bridge->current_size;
- num_entries = A_SIZE_FIX(temp)->num_entries;
-
- if (pg_start < intel_private.gtt_entries) {
+ if (pg_start < intel_private.base.gtt_stolen_entries) {
dev_printk(KERN_DEBUG, &intel_private.pcidev->dev,
- "pg_start == 0x%.8lx, intel_private.gtt_entries == 0x%.8x\n",
- pg_start, intel_private.gtt_entries);
+ "pg_start == 0x%.8lx, gtt_stolen_entries == 0x%.8x\n",
+ pg_start, intel_private.base.gtt_stolen_entries);
dev_info(&intel_private.pcidev->dev,
"trying to insert into local/stolen memory\n");
goto out_err;
}
- if ((pg_start + mem->page_count) > num_entries)
+ if ((pg_start + mem->page_count) > intel_private.base.gtt_total_entries)
goto out_err;
- /* The i830 can't check the GTT for entries since its read only,
- * depend on the caller to make the correct offset decisions.
- */
-
if (type != mem->type)
goto out_err;
- mask_type = agp_bridge->driver->agp_type_to_mask_type(agp_bridge, type);
-
- if (mask_type != 0 && mask_type != AGP_PHYS_MEMORY &&
- mask_type != INTEL_AGP_CACHED_MEMORY)
+ if (!intel_private.driver->check_flags(type))
goto out_err;
if (!mem->is_flushed)
global_cache_flush();
- for (i = 0, j = pg_start; i < mem->page_count; i++, j++) {
- writel(agp_bridge->driver->mask_memory(agp_bridge,
- page_to_phys(mem->pages[i]), mask_type),
- intel_private.registers+I810_PTE_BASE+(j*4));
+ if (USE_PCI_DMA_API && INTEL_GTT_GEN > 2) {
+ ret = intel_agp_map_memory(mem);
+ if (ret != 0)
+ return ret;
+
+ intel_gtt_insert_sg_entries(mem->sg_list, mem->num_sg,
+ pg_start, type);
+ } else {
+ for (i = 0, j = pg_start; i < mem->page_count; i++, j++) {
+ dma_addr_t addr = page_to_phys(mem->pages[i]);
+ intel_private.driver->write_entry(addr,
+ j, type);
+ }
+ readl(intel_private.gtt+j-1);
}
- readl(intel_private.registers+I810_PTE_BASE+((j-1)*4));
out:
ret = 0;
@@ -982,29 +1044,39 @@ out_err:
return ret;
}
-static int intel_i830_remove_entries(struct agp_memory *mem, off_t pg_start,
- int type)
+static int intel_fake_agp_remove_entries(struct agp_memory *mem,
+ off_t pg_start, int type)
{
int i;
if (mem->page_count == 0)
return 0;
- if (pg_start < intel_private.gtt_entries) {
+ if (pg_start < intel_private.base.gtt_stolen_entries) {
dev_info(&intel_private.pcidev->dev,
"trying to disable local/stolen memory\n");
return -EINVAL;
}
+ if (USE_PCI_DMA_API && INTEL_GTT_GEN > 2)
+ intel_agp_unmap_memory(mem);
+
for (i = pg_start; i < (mem->page_count + pg_start); i++) {
- writel(agp_bridge->scratch_page, intel_private.registers+I810_PTE_BASE+(i*4));
+ intel_private.driver->write_entry(intel_private.scratch_page_dma,
+ i, 0);
}
- readl(intel_private.registers+I810_PTE_BASE+((i-1)*4));
+ readl(intel_private.gtt+i-1);
return 0;
}
-static struct agp_memory *intel_i830_alloc_by_type(size_t pg_count, int type)
+static void intel_fake_agp_chipset_flush(struct agp_bridge_data *bridge)
+{
+ intel_private.driver->chipset_flush();
+}
+
+static struct agp_memory *intel_fake_agp_alloc_by_type(size_t pg_count,
+ int type)
{
if (type == AGP_PHYS_MEMORY)
return alloc_agpphysmem_i8xx(pg_count, type);
@@ -1015,9 +1087,9 @@ static struct agp_memory *intel_i830_alloc_by_type(size_t pg_count, int type)
static int intel_alloc_chipset_flush_resource(void)
{
int ret;
- ret = pci_bus_alloc_resource(agp_bridge->dev->bus, &intel_private.ifp_resource, PAGE_SIZE,
+ ret = pci_bus_alloc_resource(intel_private.bridge_dev->bus, &intel_private.ifp_resource, PAGE_SIZE,
PAGE_SIZE, PCIBIOS_MIN_MEM, 0,
- pcibios_align_resource, agp_bridge->dev);
+ pcibios_align_resource, intel_private.bridge_dev);
return ret;
}
@@ -1027,11 +1099,11 @@ static void intel_i915_setup_chipset_flush(void)
int ret;
u32 temp;
- pci_read_config_dword(agp_bridge->dev, I915_IFPADDR, &temp);
+ pci_read_config_dword(intel_private.bridge_dev, I915_IFPADDR, &temp);
if (!(temp & 0x1)) {
intel_alloc_chipset_flush_resource();
intel_private.resource_valid = 1;
- pci_write_config_dword(agp_bridge->dev, I915_IFPADDR, (intel_private.ifp_resource.start & 0xffffffff) | 0x1);
+ pci_write_config_dword(intel_private.bridge_dev, I915_IFPADDR, (intel_private.ifp_resource.start & 0xffffffff) | 0x1);
} else {
temp &= ~1;
@@ -1050,17 +1122,17 @@ static void intel_i965_g33_setup_chipset_flush(void)
u32 temp_hi, temp_lo;
int ret;
- pci_read_config_dword(agp_bridge->dev, I965_IFPADDR + 4, &temp_hi);
- pci_read_config_dword(agp_bridge->dev, I965_IFPADDR, &temp_lo);
+ pci_read_config_dword(intel_private.bridge_dev, I965_IFPADDR + 4, &temp_hi);
+ pci_read_config_dword(intel_private.bridge_dev, I965_IFPADDR, &temp_lo);
if (!(temp_lo & 0x1)) {
intel_alloc_chipset_flush_resource();
intel_private.resource_valid = 1;
- pci_write_config_dword(agp_bridge->dev, I965_IFPADDR + 4,
+ pci_write_config_dword(intel_private.bridge_dev, I965_IFPADDR + 4,
upper_32_bits(intel_private.ifp_resource.start));
- pci_write_config_dword(agp_bridge->dev, I965_IFPADDR, (intel_private.ifp_resource.start & 0xffffffff) | 0x1);
+ pci_write_config_dword(intel_private.bridge_dev, I965_IFPADDR, (intel_private.ifp_resource.start & 0xffffffff) | 0x1);
} else {
u64 l64;
@@ -1083,7 +1155,7 @@ static void intel_i9xx_setup_flush(void)
if (intel_private.ifp_resource.start)
return;
- if (IS_SNB)
+ if (INTEL_GTT_GEN == 6)
return;
/* setup a resource for this object */
@@ -1091,7 +1163,7 @@ static void intel_i9xx_setup_flush(void)
intel_private.ifp_resource.flags = IORESOURCE_MEM;
/* Setup chipset flush for 915 */
- if (IS_I965 || IS_G33 || IS_G4X) {
+ if (IS_G33 || INTEL_GTT_GEN >= 4) {
intel_i965_g33_setup_chipset_flush();
} else {
intel_i915_setup_chipset_flush();
@@ -1104,41 +1176,7 @@ static void intel_i9xx_setup_flush(void)
"can't ioremap flush page - no chipset flushing\n");
}
-static int intel_i9xx_configure(void)
-{
- struct aper_size_info_fixed *current_size;
- u32 temp;
- u16 gmch_ctrl;
- int i;
-
- current_size = A_SIZE_FIX(agp_bridge->current_size);
-
- pci_read_config_dword(intel_private.pcidev, I915_GMADDR, &temp);
-
- agp_bridge->gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK);
-
- pci_read_config_word(agp_bridge->dev, I830_GMCH_CTRL, &gmch_ctrl);
- gmch_ctrl |= I830_GMCH_ENABLED;
- pci_write_config_word(agp_bridge->dev, I830_GMCH_CTRL, gmch_ctrl);
-
- writel(agp_bridge->gatt_bus_addr|I810_PGETBL_ENABLED, intel_private.registers+I810_PGETBL_CTL);
- readl(intel_private.registers+I810_PGETBL_CTL); /* PCI Posting. */
-
- if (agp_bridge->driver->needs_scratch_page) {
- for (i = intel_private.gtt_entries; i < intel_private.gtt_total_size; i++) {
- writel(agp_bridge->scratch_page, intel_private.gtt+i);
- }
- readl(intel_private.gtt+i-1); /* PCI Posting. */
- }
-
- global_cache_flush();
-
- intel_i9xx_setup_flush();
-
- return 0;
-}
-
-static void intel_i915_cleanup(void)
+static void i9xx_cleanup(void)
{
if (intel_private.i9xx_flush_page)
iounmap(intel_private.i9xx_flush_page);
@@ -1146,320 +1184,93 @@ static void intel_i915_cleanup(void)
release_resource(&intel_private.ifp_resource);
intel_private.ifp_resource.start = 0;
intel_private.resource_valid = 0;
- iounmap(intel_private.gtt);
- iounmap(intel_private.registers);
}
-static void intel_i915_chipset_flush(struct agp_bridge_data *bridge)
+static void i9xx_chipset_flush(void)
{
if (intel_private.i9xx_flush_page)
writel(1, intel_private.i9xx_flush_page);
}
-static int intel_i915_insert_entries(struct agp_memory *mem, off_t pg_start,
- int type)
+static void i965_write_entry(dma_addr_t addr, unsigned int entry,
+ unsigned int flags)
{
- int num_entries;
- void *temp;
- int ret = -EINVAL;
- int mask_type;
-
- if (mem->page_count == 0)
- goto out;
-
- temp = agp_bridge->current_size;
- num_entries = A_SIZE_FIX(temp)->num_entries;
-
- if (pg_start < intel_private.gtt_entries) {
- dev_printk(KERN_DEBUG, &intel_private.pcidev->dev,
- "pg_start == 0x%.8lx, intel_private.gtt_entries == 0x%.8x\n",
- pg_start, intel_private.gtt_entries);
-
- dev_info(&intel_private.pcidev->dev,
- "trying to insert into local/stolen memory\n");
- goto out_err;
- }
-
- if ((pg_start + mem->page_count) > num_entries)
- goto out_err;
-
- /* The i915 can't check the GTT for entries since it's read only;
- * depend on the caller to make the correct offset decisions.
- */
-
- if (type != mem->type)
- goto out_err;
-
- mask_type = agp_bridge->driver->agp_type_to_mask_type(agp_bridge, type);
-
- if (!IS_SNB && mask_type != 0 && mask_type != AGP_PHYS_MEMORY &&
- mask_type != INTEL_AGP_CACHED_MEMORY)
- goto out_err;
-
- if (!mem->is_flushed)
- global_cache_flush();
-
- intel_agp_insert_sg_entries(mem, pg_start, mask_type);
-
- out:
- ret = 0;
- out_err:
- mem->is_flushed = true;
- return ret;
+ /* Shift high bits down */
+ addr |= (addr >> 28) & 0xf0;
+ writel(addr | I810_PTE_VALID, intel_private.gtt + entry);
}
-static int intel_i915_remove_entries(struct agp_memory *mem, off_t pg_start,
- int type)
+static bool gen6_check_flags(unsigned int flags)
{
- int i;
-
- if (mem->page_count == 0)
- return 0;
-
- if (pg_start < intel_private.gtt_entries) {
- dev_info(&intel_private.pcidev->dev,
- "trying to disable local/stolen memory\n");
- return -EINVAL;
- }
-
- for (i = pg_start; i < (mem->page_count + pg_start); i++)
- writel(agp_bridge->scratch_page, intel_private.gtt+i);
-
- readl(intel_private.gtt+i-1);
-
- return 0;
+ return true;
}
-/* Return the aperture size by just checking the resource length. The effect
- * described in the spec of the MSAC registers is just changing of the
- * resource size.
- */
-static int intel_i9xx_fetch_size(void)
+static void gen6_write_entry(dma_addr_t addr, unsigned int entry,
+ unsigned int flags)
{
- int num_sizes = ARRAY_SIZE(intel_i830_sizes);
- int aper_size; /* size in megabytes */
- int i;
-
- aper_size = pci_resource_len(intel_private.pcidev, 2) / MB(1);
-
- for (i = 0; i < num_sizes; i++) {
- if (aper_size == intel_i830_sizes[i].size) {
- agp_bridge->current_size = intel_i830_sizes + i;
- return aper_size;
- }
+ unsigned int type_mask = flags & ~AGP_USER_CACHED_MEMORY_GFDT;
+ unsigned int gfdt = flags & AGP_USER_CACHED_MEMORY_GFDT;
+ u32 pte_flags;
+
+ if (type_mask == AGP_USER_MEMORY)
+ pte_flags = GEN6_PTE_UNCACHED | I810_PTE_VALID;
+ else if (type_mask == AGP_USER_CACHED_MEMORY_LLC_MLC) {
+ pte_flags = GEN6_PTE_LLC_MLC | I810_PTE_VALID;
+ if (gfdt)
+ pte_flags |= GEN6_PTE_GFDT;
+ } else { /* set 'normal'/'cached' to LLC by default */
+ pte_flags = GEN6_PTE_LLC | I810_PTE_VALID;
+ if (gfdt)
+ pte_flags |= GEN6_PTE_GFDT;
}
- return 0;
+ /* gen6 has bit11-4 for physical addr bit39-32 */
+ addr |= (addr >> 28) & 0xff0;
+ writel(addr | pte_flags, intel_private.gtt + entry);
}
-static int intel_i915_get_gtt_size(void)
+static void gen6_cleanup(void)
{
- int size;
-
- if (IS_G33) {
- u16 gmch_ctrl;
-
- /* G33's GTT size defined in gmch_ctrl */
- pci_read_config_word(agp_bridge->dev, I830_GMCH_CTRL, &gmch_ctrl);
- switch (gmch_ctrl & I830_GMCH_GMS_MASK) {
- case I830_GMCH_GMS_STOLEN_512:
- size = 512;
- break;
- case I830_GMCH_GMS_STOLEN_1024:
- size = 1024;
- break;
- case I830_GMCH_GMS_STOLEN_8192:
- size = 8*1024;
- break;
- default:
- dev_info(&agp_bridge->dev->dev,
- "unknown page table size 0x%x, assuming 512KB\n",
- (gmch_ctrl & I830_GMCH_GMS_MASK));
- size = 512;
- }
- } else {
- /* On previous hardware, the GTT size was just what was
- * required to map the aperture.
- */
- size = agp_bridge->driver->fetch_size();
- }
-
- return KB(size);
}
-/* The intel i915 automatically initializes the agp aperture during POST.
- * Use the memory already set aside for in the GTT.
- */
-static int intel_i915_create_gatt_table(struct agp_bridge_data *bridge)
+static int i9xx_setup(void)
{
- int page_order;
- struct aper_size_info_fixed *size;
- int num_entries;
- u32 temp, temp2;
- int gtt_map_size;
-
- size = agp_bridge->current_size;
- page_order = size->page_order;
- num_entries = size->num_entries;
- agp_bridge->gatt_table_real = NULL;
-
- pci_read_config_dword(intel_private.pcidev, I915_MMADDR, &temp);
- pci_read_config_dword(intel_private.pcidev, I915_PTEADDR, &temp2);
-
- gtt_map_size = intel_i915_get_gtt_size();
+ u32 reg_addr;
- intel_private.gtt = ioremap(temp2, gtt_map_size);
- if (!intel_private.gtt)
- return -ENOMEM;
-
- intel_private.gtt_total_size = gtt_map_size / 4;
+ pci_read_config_dword(intel_private.pcidev, I915_MMADDR, &reg_addr);
- temp &= 0xfff80000;
+ reg_addr &= 0xfff80000;
- intel_private.registers = ioremap(temp, 128 * 4096);
- if (!intel_private.registers) {
- iounmap(intel_private.gtt);
- return -ENOMEM;
- }
-
- temp = readl(intel_private.registers+I810_PGETBL_CTL) & 0xfffff000;
- global_cache_flush(); /* FIXME: ? */
-
- /* we have to call this as early as possible after the MMIO base address is known */
- intel_i830_init_gtt_entries();
- if (intel_private.gtt_entries == 0) {
- iounmap(intel_private.gtt);
- iounmap(intel_private.registers);
+ intel_private.registers = ioremap(reg_addr, 128 * 4096);
+ if (!intel_private.registers)
return -ENOMEM;
- }
-
- agp_bridge->gatt_table = NULL;
-
- agp_bridge->gatt_bus_addr = temp;
-
- return 0;
-}
-/*
- * The i965 supports 36-bit physical addresses, but to keep
- * the format of the GTT the same, the bits that don't fit
- * in a 32-bit word are shifted down to bits 4..7.
- *
- * Gcc is smart enough to notice that "(addr >> 28) & 0xf0"
- * is always zero on 32-bit architectures, so no need to make
- * this conditional.
- */
-static unsigned long intel_i965_mask_memory(struct agp_bridge_data *bridge,
- dma_addr_t addr, int type)
-{
- /* Shift high bits down */
- addr |= (addr >> 28) & 0xf0;
+ if (INTEL_GTT_GEN == 3) {
+ u32 gtt_addr;
- /* Type checking must be done elsewhere */
- return addr | bridge->driver->masks[type].mask;
-}
-
-static unsigned long intel_gen6_mask_memory(struct agp_bridge_data *bridge,
- dma_addr_t addr, int type)
-{
- /* gen6 has bit11-4 for physical addr bit39-32 */
- addr |= (addr >> 28) & 0xff0;
-
- /* Type checking must be done elsewhere */
- return addr | bridge->driver->masks[type].mask;
-}
-
-static void intel_i965_get_gtt_range(int *gtt_offset, int *gtt_size)
-{
- u16 snb_gmch_ctl;
-
- switch (agp_bridge->dev->device) {
- case PCI_DEVICE_ID_INTEL_GM45_HB:
- case PCI_DEVICE_ID_INTEL_EAGLELAKE_HB:
- case PCI_DEVICE_ID_INTEL_Q45_HB:
- case PCI_DEVICE_ID_INTEL_G45_HB:
- case PCI_DEVICE_ID_INTEL_G41_HB:
- case PCI_DEVICE_ID_INTEL_B43_HB:
- case PCI_DEVICE_ID_INTEL_IRONLAKE_D_HB:
- case PCI_DEVICE_ID_INTEL_IRONLAKE_M_HB:
- case PCI_DEVICE_ID_INTEL_IRONLAKE_MA_HB:
- case PCI_DEVICE_ID_INTEL_IRONLAKE_MC2_HB:
- *gtt_offset = *gtt_size = MB(2);
- break;
- case PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB:
- case PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB:
- case PCI_DEVICE_ID_INTEL_SANDYBRIDGE_S_HB:
- *gtt_offset = MB(2);
+ pci_read_config_dword(intel_private.pcidev,
+ I915_PTEADDR, &gtt_addr);
+ intel_private.gtt_bus_addr = gtt_addr;
+ } else {
+ u32 gtt_offset;
- pci_read_config_word(intel_private.pcidev, SNB_GMCH_CTRL, &snb_gmch_ctl);
- switch (snb_gmch_ctl & SNB_GTT_SIZE_MASK) {
- default:
- case SNB_GTT_SIZE_0M:
- printk(KERN_ERR "Bad GTT size mask: 0x%04x.\n", snb_gmch_ctl);
- *gtt_size = MB(0);
+ switch (INTEL_GTT_GEN) {
+ case 5:
+ case 6:
+ gtt_offset = MB(2);
break;
- case SNB_GTT_SIZE_1M:
- *gtt_size = MB(1);
- break;
- case SNB_GTT_SIZE_2M:
- *gtt_size = MB(2);
+ case 4:
+ default:
+ gtt_offset = KB(512);
break;
}
- break;
- default:
- *gtt_offset = *gtt_size = KB(512);
- }
-}
-
-/* The intel i965 automatically initializes the agp aperture during POST.
- * Use the memory already set aside for in the GTT.
- */
-static int intel_i965_create_gatt_table(struct agp_bridge_data *bridge)
-{
- int page_order;
- struct aper_size_info_fixed *size;
- int num_entries;
- u32 temp;
- int gtt_offset, gtt_size;
-
- size = agp_bridge->current_size;
- page_order = size->page_order;
- num_entries = size->num_entries;
- agp_bridge->gatt_table_real = NULL;
-
- pci_read_config_dword(intel_private.pcidev, I915_MMADDR, &temp);
-
- temp &= 0xfff00000;
-
- intel_i965_get_gtt_range(&gtt_offset, &gtt_size);
-
- intel_private.gtt = ioremap((temp + gtt_offset) , gtt_size);
-
- if (!intel_private.gtt)
- return -ENOMEM;
-
- intel_private.gtt_total_size = gtt_size / 4;
-
- intel_private.registers = ioremap(temp, 128 * 4096);
- if (!intel_private.registers) {
- iounmap(intel_private.gtt);
- return -ENOMEM;
+ intel_private.gtt_bus_addr = reg_addr + gtt_offset;
}
- temp = readl(intel_private.registers+I810_PGETBL_CTL) & 0xfffff000;
- global_cache_flush(); /* FIXME: ? */
-
- /* we have to call this as early as possible after the MMIO base address is known */
- intel_i830_init_gtt_entries();
- if (intel_private.gtt_entries == 0) {
- iounmap(intel_private.gtt);
- iounmap(intel_private.registers);
- return -ENOMEM;
- }
-
- agp_bridge->gatt_table = NULL;
+ intel_private.pte_bus_addr =
+ readl(intel_private.registers+I810_PGETBL_CTL) & 0xfffff000;
- agp_bridge->gatt_bus_addr = temp;
+ intel_i9xx_setup_flush();
return 0;
}
@@ -1475,7 +1286,7 @@ static const struct agp_bridge_driver intel_810_driver = {
.cleanup = intel_i810_cleanup,
.mask_memory = intel_i810_mask_memory,
.masks = intel_i810_masks,
- .agp_enable = intel_i810_agp_enable,
+ .agp_enable = intel_fake_agp_enable,
.cache_flush = global_cache_flush,
.create_gatt_table = agp_generic_create_gatt_table,
.free_gatt_table = agp_generic_free_gatt_table,
@@ -1490,161 +1301,282 @@ static const struct agp_bridge_driver intel_810_driver = {
.agp_type_to_mask_type = agp_generic_type_to_mask_type,
};
-static const struct agp_bridge_driver intel_830_driver = {
+static const struct agp_bridge_driver intel_fake_agp_driver = {
.owner = THIS_MODULE,
- .aperture_sizes = intel_i830_sizes,
.size_type = FIXED_APER_SIZE,
- .num_aperture_sizes = 4,
- .needs_scratch_page = true,
- .configure = intel_i830_configure,
- .fetch_size = intel_i830_fetch_size,
- .cleanup = intel_i830_cleanup,
- .mask_memory = intel_i810_mask_memory,
- .masks = intel_i810_masks,
- .agp_enable = intel_i810_agp_enable,
+ .aperture_sizes = intel_fake_agp_sizes,
+ .num_aperture_sizes = ARRAY_SIZE(intel_fake_agp_sizes),
+ .configure = intel_fake_agp_configure,
+ .fetch_size = intel_fake_agp_fetch_size,
+ .cleanup = intel_gtt_cleanup,
+ .agp_enable = intel_fake_agp_enable,
.cache_flush = global_cache_flush,
- .create_gatt_table = intel_i830_create_gatt_table,
- .free_gatt_table = intel_i830_free_gatt_table,
- .insert_memory = intel_i830_insert_entries,
- .remove_memory = intel_i830_remove_entries,
- .alloc_by_type = intel_i830_alloc_by_type,
+ .create_gatt_table = intel_fake_agp_create_gatt_table,
+ .free_gatt_table = intel_fake_agp_free_gatt_table,
+ .insert_memory = intel_fake_agp_insert_entries,
+ .remove_memory = intel_fake_agp_remove_entries,
+ .alloc_by_type = intel_fake_agp_alloc_by_type,
.free_by_type = intel_i810_free_by_type,
.agp_alloc_page = agp_generic_alloc_page,
.agp_alloc_pages = agp_generic_alloc_pages,
.agp_destroy_page = agp_generic_destroy_page,
.agp_destroy_pages = agp_generic_destroy_pages,
- .agp_type_to_mask_type = intel_i830_type_to_mask_type,
- .chipset_flush = intel_i830_chipset_flush,
+ .chipset_flush = intel_fake_agp_chipset_flush,
};
-static const struct agp_bridge_driver intel_915_driver = {
- .owner = THIS_MODULE,
- .aperture_sizes = intel_i830_sizes,
- .size_type = FIXED_APER_SIZE,
- .num_aperture_sizes = 4,
- .needs_scratch_page = true,
- .configure = intel_i9xx_configure,
- .fetch_size = intel_i9xx_fetch_size,
- .cleanup = intel_i915_cleanup,
- .mask_memory = intel_i810_mask_memory,
- .masks = intel_i810_masks,
- .agp_enable = intel_i810_agp_enable,
- .cache_flush = global_cache_flush,
- .create_gatt_table = intel_i915_create_gatt_table,
- .free_gatt_table = intel_i830_free_gatt_table,
- .insert_memory = intel_i915_insert_entries,
- .remove_memory = intel_i915_remove_entries,
- .alloc_by_type = intel_i830_alloc_by_type,
- .free_by_type = intel_i810_free_by_type,
- .agp_alloc_page = agp_generic_alloc_page,
- .agp_alloc_pages = agp_generic_alloc_pages,
- .agp_destroy_page = agp_generic_destroy_page,
- .agp_destroy_pages = agp_generic_destroy_pages,
- .agp_type_to_mask_type = intel_i830_type_to_mask_type,
- .chipset_flush = intel_i915_chipset_flush,
-#ifdef USE_PCI_DMA_API
- .agp_map_page = intel_agp_map_page,
- .agp_unmap_page = intel_agp_unmap_page,
- .agp_map_memory = intel_agp_map_memory,
- .agp_unmap_memory = intel_agp_unmap_memory,
-#endif
+static const struct intel_gtt_driver i81x_gtt_driver = {
+ .gen = 1,
+ .dma_mask_size = 32,
};
-
-static const struct agp_bridge_driver intel_i965_driver = {
- .owner = THIS_MODULE,
- .aperture_sizes = intel_i830_sizes,
- .size_type = FIXED_APER_SIZE,
- .num_aperture_sizes = 4,
- .needs_scratch_page = true,
- .configure = intel_i9xx_configure,
- .fetch_size = intel_i9xx_fetch_size,
- .cleanup = intel_i915_cleanup,
- .mask_memory = intel_i965_mask_memory,
- .masks = intel_i810_masks,
- .agp_enable = intel_i810_agp_enable,
- .cache_flush = global_cache_flush,
- .create_gatt_table = intel_i965_create_gatt_table,
- .free_gatt_table = intel_i830_free_gatt_table,
- .insert_memory = intel_i915_insert_entries,
- .remove_memory = intel_i915_remove_entries,
- .alloc_by_type = intel_i830_alloc_by_type,
- .free_by_type = intel_i810_free_by_type,
- .agp_alloc_page = agp_generic_alloc_page,
- .agp_alloc_pages = agp_generic_alloc_pages,
- .agp_destroy_page = agp_generic_destroy_page,
- .agp_destroy_pages = agp_generic_destroy_pages,
- .agp_type_to_mask_type = intel_i830_type_to_mask_type,
- .chipset_flush = intel_i915_chipset_flush,
-#ifdef USE_PCI_DMA_API
- .agp_map_page = intel_agp_map_page,
- .agp_unmap_page = intel_agp_unmap_page,
- .agp_map_memory = intel_agp_map_memory,
- .agp_unmap_memory = intel_agp_unmap_memory,
-#endif
+static const struct intel_gtt_driver i8xx_gtt_driver = {
+ .gen = 2,
+ .setup = i830_setup,
+ .cleanup = i830_cleanup,
+ .write_entry = i830_write_entry,
+ .dma_mask_size = 32,
+ .check_flags = i830_check_flags,
+ .chipset_flush = i830_chipset_flush,
};
-
-static const struct agp_bridge_driver intel_gen6_driver = {
- .owner = THIS_MODULE,
- .aperture_sizes = intel_i830_sizes,
- .size_type = FIXED_APER_SIZE,
- .num_aperture_sizes = 4,
- .needs_scratch_page = true,
- .configure = intel_i9xx_configure,
- .fetch_size = intel_i9xx_fetch_size,
- .cleanup = intel_i915_cleanup,
- .mask_memory = intel_gen6_mask_memory,
- .masks = intel_gen6_masks,
- .agp_enable = intel_i810_agp_enable,
- .cache_flush = global_cache_flush,
- .create_gatt_table = intel_i965_create_gatt_table,
- .free_gatt_table = intel_i830_free_gatt_table,
- .insert_memory = intel_i915_insert_entries,
- .remove_memory = intel_i915_remove_entries,
- .alloc_by_type = intel_i830_alloc_by_type,
- .free_by_type = intel_i810_free_by_type,
- .agp_alloc_page = agp_generic_alloc_page,
- .agp_alloc_pages = agp_generic_alloc_pages,
- .agp_destroy_page = agp_generic_destroy_page,
- .agp_destroy_pages = agp_generic_destroy_pages,
- .agp_type_to_mask_type = intel_gen6_type_to_mask_type,
- .chipset_flush = intel_i915_chipset_flush,
-#ifdef USE_PCI_DMA_API
- .agp_map_page = intel_agp_map_page,
- .agp_unmap_page = intel_agp_unmap_page,
- .agp_map_memory = intel_agp_map_memory,
- .agp_unmap_memory = intel_agp_unmap_memory,
-#endif
+static const struct intel_gtt_driver i915_gtt_driver = {
+ .gen = 3,
+ .setup = i9xx_setup,
+ .cleanup = i9xx_cleanup,
+ /* i945 is the last gpu to need phys mem (for overlay and cursors). */
+ .write_entry = i830_write_entry,
+ .dma_mask_size = 32,
+ .check_flags = i830_check_flags,
+ .chipset_flush = i9xx_chipset_flush,
+};
+static const struct intel_gtt_driver g33_gtt_driver = {
+ .gen = 3,
+ .is_g33 = 1,
+ .setup = i9xx_setup,
+ .cleanup = i9xx_cleanup,
+ .write_entry = i965_write_entry,
+ .dma_mask_size = 36,
+ .check_flags = i830_check_flags,
+ .chipset_flush = i9xx_chipset_flush,
+};
+static const struct intel_gtt_driver pineview_gtt_driver = {
+ .gen = 3,
+ .is_pineview = 1, .is_g33 = 1,
+ .setup = i9xx_setup,
+ .cleanup = i9xx_cleanup,
+ .write_entry = i965_write_entry,
+ .dma_mask_size = 36,
+ .check_flags = i830_check_flags,
+ .chipset_flush = i9xx_chipset_flush,
+};
+static const struct intel_gtt_driver i965_gtt_driver = {
+ .gen = 4,
+ .setup = i9xx_setup,
+ .cleanup = i9xx_cleanup,
+ .write_entry = i965_write_entry,
+ .dma_mask_size = 36,
+ .check_flags = i830_check_flags,
+ .chipset_flush = i9xx_chipset_flush,
+};
+static const struct intel_gtt_driver g4x_gtt_driver = {
+ .gen = 5,
+ .setup = i9xx_setup,
+ .cleanup = i9xx_cleanup,
+ .write_entry = i965_write_entry,
+ .dma_mask_size = 36,
+ .check_flags = i830_check_flags,
+ .chipset_flush = i9xx_chipset_flush,
+};
+static const struct intel_gtt_driver ironlake_gtt_driver = {
+ .gen = 5,
+ .is_ironlake = 1,
+ .setup = i9xx_setup,
+ .cleanup = i9xx_cleanup,
+ .write_entry = i965_write_entry,
+ .dma_mask_size = 36,
+ .check_flags = i830_check_flags,
+ .chipset_flush = i9xx_chipset_flush,
+};
+static const struct intel_gtt_driver sandybridge_gtt_driver = {
+ .gen = 6,
+ .setup = i9xx_setup,
+ .cleanup = gen6_cleanup,
+ .write_entry = gen6_write_entry,
+ .dma_mask_size = 40,
+ .check_flags = gen6_check_flags,
+ .chipset_flush = i9xx_chipset_flush,
};
-static const struct agp_bridge_driver intel_g33_driver = {
- .owner = THIS_MODULE,
- .aperture_sizes = intel_i830_sizes,
- .size_type = FIXED_APER_SIZE,
- .num_aperture_sizes = 4,
- .needs_scratch_page = true,
- .configure = intel_i9xx_configure,
- .fetch_size = intel_i9xx_fetch_size,
- .cleanup = intel_i915_cleanup,
- .mask_memory = intel_i965_mask_memory,
- .masks = intel_i810_masks,
- .agp_enable = intel_i810_agp_enable,
- .cache_flush = global_cache_flush,
- .create_gatt_table = intel_i915_create_gatt_table,
- .free_gatt_table = intel_i830_free_gatt_table,
- .insert_memory = intel_i915_insert_entries,
- .remove_memory = intel_i915_remove_entries,
- .alloc_by_type = intel_i830_alloc_by_type,
- .free_by_type = intel_i810_free_by_type,
- .agp_alloc_page = agp_generic_alloc_page,
- .agp_alloc_pages = agp_generic_alloc_pages,
- .agp_destroy_page = agp_generic_destroy_page,
- .agp_destroy_pages = agp_generic_destroy_pages,
- .agp_type_to_mask_type = intel_i830_type_to_mask_type,
- .chipset_flush = intel_i915_chipset_flush,
-#ifdef USE_PCI_DMA_API
- .agp_map_page = intel_agp_map_page,
- .agp_unmap_page = intel_agp_unmap_page,
- .agp_map_memory = intel_agp_map_memory,
- .agp_unmap_memory = intel_agp_unmap_memory,
-#endif
+/* Table to describe Intel GMCH and AGP/PCIE GART drivers. At least one of
+ * driver and gmch_driver must be non-null, and find_gmch will determine
+ * which one should be used if a gmch_chip_id is present.
+ */
+static const struct intel_gtt_driver_description {
+ unsigned int gmch_chip_id;
+ char *name;
+ const struct agp_bridge_driver *gmch_driver;
+ const struct intel_gtt_driver *gtt_driver;
+} intel_gtt_chipsets[] = {
+ { PCI_DEVICE_ID_INTEL_82810_IG1, "i810", &intel_810_driver,
+ &i81x_gtt_driver},
+ { PCI_DEVICE_ID_INTEL_82810_IG3, "i810", &intel_810_driver,
+ &i81x_gtt_driver},
+ { PCI_DEVICE_ID_INTEL_82810E_IG, "i810", &intel_810_driver,
+ &i81x_gtt_driver},
+ { PCI_DEVICE_ID_INTEL_82815_CGC, "i815", &intel_810_driver,
+ &i81x_gtt_driver},
+ { PCI_DEVICE_ID_INTEL_82830_CGC, "830M",
+ &intel_fake_agp_driver, &i8xx_gtt_driver},
+ { PCI_DEVICE_ID_INTEL_82845G_IG, "830M",
+ &intel_fake_agp_driver, &i8xx_gtt_driver},
+ { PCI_DEVICE_ID_INTEL_82854_IG, "854",
+ &intel_fake_agp_driver, &i8xx_gtt_driver},
+ { PCI_DEVICE_ID_INTEL_82855GM_IG, "855GM",
+ &intel_fake_agp_driver, &i8xx_gtt_driver},
+ { PCI_DEVICE_ID_INTEL_82865_IG, "865",
+ &intel_fake_agp_driver, &i8xx_gtt_driver},
+ { PCI_DEVICE_ID_INTEL_E7221_IG, "E7221 (i915)",
+ &intel_fake_agp_driver, &i915_gtt_driver },
+ { PCI_DEVICE_ID_INTEL_82915G_IG, "915G",
+ &intel_fake_agp_driver, &i915_gtt_driver },
+ { PCI_DEVICE_ID_INTEL_82915GM_IG, "915GM",
+ &intel_fake_agp_driver, &i915_gtt_driver },
+ { PCI_DEVICE_ID_INTEL_82945G_IG, "945G",
+ &intel_fake_agp_driver, &i915_gtt_driver },
+ { PCI_DEVICE_ID_INTEL_82945GM_IG, "945GM",
+ &intel_fake_agp_driver, &i915_gtt_driver },
+ { PCI_DEVICE_ID_INTEL_82945GME_IG, "945GME",
+ &intel_fake_agp_driver, &i915_gtt_driver },
+ { PCI_DEVICE_ID_INTEL_82946GZ_IG, "946GZ",
+ &intel_fake_agp_driver, &i965_gtt_driver },
+ { PCI_DEVICE_ID_INTEL_82G35_IG, "G35",
+ &intel_fake_agp_driver, &i965_gtt_driver },
+ { PCI_DEVICE_ID_INTEL_82965Q_IG, "965Q",
+ &intel_fake_agp_driver, &i965_gtt_driver },
+ { PCI_DEVICE_ID_INTEL_82965G_IG, "965G",
+ &intel_fake_agp_driver, &i965_gtt_driver },
+ { PCI_DEVICE_ID_INTEL_82965GM_IG, "965GM",
+ &intel_fake_agp_driver, &i965_gtt_driver },
+ { PCI_DEVICE_ID_INTEL_82965GME_IG, "965GME/GLE",
+ &intel_fake_agp_driver, &i965_gtt_driver },
+ { PCI_DEVICE_ID_INTEL_G33_IG, "G33",
+ &intel_fake_agp_driver, &g33_gtt_driver },
+ { PCI_DEVICE_ID_INTEL_Q35_IG, "Q35",
+ &intel_fake_agp_driver, &g33_gtt_driver },
+ { PCI_DEVICE_ID_INTEL_Q33_IG, "Q33",
+ &intel_fake_agp_driver, &g33_gtt_driver },
+ { PCI_DEVICE_ID_INTEL_PINEVIEW_M_IG, "GMA3150",
+ &intel_fake_agp_driver, &pineview_gtt_driver },
+ { PCI_DEVICE_ID_INTEL_PINEVIEW_IG, "GMA3150",
+ &intel_fake_agp_driver, &pineview_gtt_driver },
+ { PCI_DEVICE_ID_INTEL_GM45_IG, "GM45",
+ &intel_fake_agp_driver, &g4x_gtt_driver },
+ { PCI_DEVICE_ID_INTEL_EAGLELAKE_IG, "Eaglelake",
+ &intel_fake_agp_driver, &g4x_gtt_driver },
+ { PCI_DEVICE_ID_INTEL_Q45_IG, "Q45/Q43",
+ &intel_fake_agp_driver, &g4x_gtt_driver },
+ { PCI_DEVICE_ID_INTEL_G45_IG, "G45/G43",
+ &intel_fake_agp_driver, &g4x_gtt_driver },
+ { PCI_DEVICE_ID_INTEL_B43_IG, "B43",
+ &intel_fake_agp_driver, &g4x_gtt_driver },
+ { PCI_DEVICE_ID_INTEL_B43_1_IG, "B43",
+ &intel_fake_agp_driver, &g4x_gtt_driver },
+ { PCI_DEVICE_ID_INTEL_G41_IG, "G41",
+ &intel_fake_agp_driver, &g4x_gtt_driver },
+ { PCI_DEVICE_ID_INTEL_IRONLAKE_D_IG,
+ "HD Graphics", &intel_fake_agp_driver, &ironlake_gtt_driver },
+ { PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG,
+ "HD Graphics", &intel_fake_agp_driver, &ironlake_gtt_driver },
+ { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_GT1_IG,
+ "Sandybridge", &intel_fake_agp_driver, &sandybridge_gtt_driver },
+ { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_GT2_IG,
+ "Sandybridge", &intel_fake_agp_driver, &sandybridge_gtt_driver },
+ { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_GT2_PLUS_IG,
+ "Sandybridge", &intel_fake_agp_driver, &sandybridge_gtt_driver },
+ { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_GT1_IG,
+ "Sandybridge", &intel_fake_agp_driver, &sandybridge_gtt_driver },
+ { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_GT2_IG,
+ "Sandybridge", &intel_fake_agp_driver, &sandybridge_gtt_driver },
+ { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_GT2_PLUS_IG,
+ "Sandybridge", &intel_fake_agp_driver, &sandybridge_gtt_driver },
+ { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_S_IG,
+ "Sandybridge", &intel_fake_agp_driver, &sandybridge_gtt_driver },
+ { 0, NULL, NULL }
};
+
+static int find_gmch(u16 device)
+{
+ struct pci_dev *gmch_device;
+
+ gmch_device = pci_get_device(PCI_VENDOR_ID_INTEL, device, NULL);
+ if (gmch_device && PCI_FUNC(gmch_device->devfn) != 0) {
+ gmch_device = pci_get_device(PCI_VENDOR_ID_INTEL,
+ device, gmch_device);
+ }
+
+ if (!gmch_device)
+ return 0;
+
+ intel_private.pcidev = gmch_device;
+ return 1;
+}
+
+int intel_gmch_probe(struct pci_dev *pdev,
+ struct agp_bridge_data *bridge)
+{
+ int i, mask;
+ bridge->driver = NULL;
+
+ for (i = 0; intel_gtt_chipsets[i].name != NULL; i++) {
+ if (find_gmch(intel_gtt_chipsets[i].gmch_chip_id)) {
+ bridge->driver =
+ intel_gtt_chipsets[i].gmch_driver;
+ intel_private.driver =
+ intel_gtt_chipsets[i].gtt_driver;
+ break;
+ }
+ }
+
+ if (!bridge->driver)
+ return 0;
+
+ bridge->dev_private_data = &intel_private;
+ bridge->dev = pdev;
+
+ intel_private.bridge_dev = pci_dev_get(pdev);
+
+ dev_info(&pdev->dev, "Intel %s Chipset\n", intel_gtt_chipsets[i].name);
+
+ mask = intel_private.driver->dma_mask_size;
+ if (pci_set_dma_mask(intel_private.pcidev, DMA_BIT_MASK(mask)))
+ dev_err(&intel_private.pcidev->dev,
+ "set gfx device dma mask %d-bit failed!\n", mask);
+ else
+ pci_set_consistent_dma_mask(intel_private.pcidev,
+ DMA_BIT_MASK(mask));
+
+ if (bridge->driver == &intel_810_driver)
+ return 1;
+
+ if (intel_gtt_init() != 0)
+ return 0;
+
+ return 1;
+}
+EXPORT_SYMBOL(intel_gmch_probe);
+
+struct intel_gtt *intel_gtt_get(void)
+{
+ return &intel_private.base;
+}
+EXPORT_SYMBOL(intel_gtt_get);
+
+void intel_gmch_remove(struct pci_dev *pdev)
+{
+ if (intel_private.pcidev)
+ pci_dev_put(intel_private.pcidev);
+ if (intel_private.bridge_dev)
+ pci_dev_put(intel_private.bridge_dev);
+}
+EXPORT_SYMBOL(intel_gmch_remove);
+
+MODULE_AUTHOR("Dave Jones <davej@redhat.com>");
+MODULE_LICENSE("GPL and additional rights");
diff --git a/drivers/char/agp/parisc-agp.c b/drivers/char/agp/parisc-agp.c
index 1c129211302d..94821ab01c6d 100644
--- a/drivers/char/agp/parisc-agp.c
+++ b/drivers/char/agp/parisc-agp.c
@@ -19,6 +19,7 @@
#include <linux/klist.h>
#include <linux/agp_backend.h>
#include <linux/log2.h>
+#include <linux/slab.h>
#include <asm/parisc-device.h>
#include <asm/ropes.h>
@@ -358,8 +359,12 @@ parisc_agp_setup(void __iomem *ioc_hpa, void __iomem *lba_hpa)
bridge->dev = fake_bridge_dev;
error = agp_add_bridge(bridge);
+ if (error)
+ goto fail;
+ return 0;
fail:
+ kfree(fake_bridge_dev);
return error;
}
diff --git a/drivers/char/amiserial.c b/drivers/char/amiserial.c
index b0a70461a12c..6ee3348bc3e4 100644
--- a/drivers/char/amiserial.c
+++ b/drivers/char/amiserial.c
@@ -81,7 +81,6 @@ static char *serial_version = "4.30";
#include <linux/mm.h>
#include <linux/seq_file.h>
#include <linux/slab.h>
-#include <linux/smp_lock.h>
#include <linux/init.h>
#include <linux/bitops.h>
#include <linux/platform_device.h>
@@ -1299,7 +1298,6 @@ static int rs_ioctl(struct tty_struct *tty, struct file * file,
{
struct async_struct * info = tty->driver_data;
struct async_icount cprev, cnow; /* kernel counter temps */
- struct serial_icounter_struct icount;
void __user *argp = (void __user *)arg;
unsigned long flags;
diff --git a/drivers/char/applicom.c b/drivers/char/applicom.c
index e7ba774beda6..25373df1dcf8 100644
--- a/drivers/char/applicom.c
+++ b/drivers/char/applicom.c
@@ -566,6 +566,7 @@ static ssize_t ac_read (struct file *filp, char __user *buf, size_t count, loff_
struct mailbox mailbox;
/* Got a packet for us */
+ memset(&st_loc, 0, sizeof(st_loc));
ret = do_ac_read(i, buf, &st_loc, &mailbox);
spin_unlock_irqrestore(&apbs[i].mutex, flags);
set_current_state(TASK_RUNNING);
diff --git a/drivers/char/briq_panel.c b/drivers/char/briq_panel.c
index f6718f05dad4..095ab90535ce 100644
--- a/drivers/char/briq_panel.c
+++ b/drivers/char/briq_panel.c
@@ -6,7 +6,6 @@
#include <linux/module.h>
-#include <linux/smp_lock.h>
#include <linux/types.h>
#include <linux/errno.h>
#include <linux/tty.h>
diff --git a/drivers/char/hpet.c b/drivers/char/hpet.c
index a4eee324eb1e..7066e801b9d3 100644
--- a/drivers/char/hpet.c
+++ b/drivers/char/hpet.c
@@ -14,7 +14,6 @@
#include <linux/interrupt.h>
#include <linux/module.h>
#include <linux/kernel.h>
-#include <linux/smp_lock.h>
#include <linux/types.h>
#include <linux/miscdevice.h>
#include <linux/major.h>
@@ -32,12 +31,12 @@
#include <linux/bitops.h>
#include <linux/compat.h>
#include <linux/clocksource.h>
+#include <linux/uaccess.h>
#include <linux/slab.h>
+#include <linux/io.h>
#include <asm/current.h>
-#include <asm/uaccess.h>
#include <asm/system.h>
-#include <asm/io.h>
#include <asm/irq.h>
#include <asm/div64.h>
@@ -81,13 +80,13 @@ static cycle_t read_hpet(struct clocksource *cs)
}
static struct clocksource clocksource_hpet = {
- .name = "hpet",
- .rating = 250,
- .read = read_hpet,
- .mask = CLOCKSOURCE_MASK(64),
- .mult = 0, /* to be calculated */
- .shift = 10,
- .flags = CLOCK_SOURCE_IS_CONTINUOUS,
+ .name = "hpet",
+ .rating = 250,
+ .read = read_hpet,
+ .mask = CLOCKSOURCE_MASK(64),
+ .mult = 0, /* to be calculated */
+ .shift = 10,
+ .flags = CLOCK_SOURCE_IS_CONTINUOUS,
};
static struct clocksource *hpet_clocksource;
#endif
@@ -465,6 +464,21 @@ static int hpet_ioctl_ieon(struct hpet_dev *devp)
if (irq) {
unsigned long irq_flags;
+ if (devp->hd_flags & HPET_SHARED_IRQ) {
+ /*
+ * To prevent the interrupt handler from seeing an
+ * unwanted interrupt status bit, program the timer
+ * so that it will not fire in the near future ...
+ */
+ writel(readl(&timer->hpet_config) & ~Tn_TYPE_CNF_MASK,
+ &timer->hpet_config);
+ write_counter(read_counter(&hpet->hpet_mc),
+ &timer->hpet_compare);
+ /* ... and clear any left-over status. */
+ isr = 1 << (devp - devp->hd_hpets->hp_dev);
+ writel(isr, &hpet->hpet_isr);
+ }
+
sprintf(devp->hd_name, "hpet%d", (int)(devp - hpetp->hp_dev));
irq_flags = devp->hd_flags & HPET_SHARED_IRQ
? IRQF_SHARED : IRQF_DISABLED;
@@ -581,11 +595,10 @@ hpet_ioctl_common(struct hpet_dev *devp, int cmd, unsigned long arg,
break;
case HPET_INFO:
{
+ memset(info, 0, sizeof(*info));
if (devp->hd_ireqfreq)
info->hi_ireqfreq =
hpet_time_div(hpetp, devp->hd_ireqfreq);
- else
- info->hi_ireqfreq = 0;
info->hi_flags =
readq(&timer->hpet_config) & Tn_PER_INT_CAP_MASK;
info->hi_hpet = hpetp->hp_which;
@@ -811,7 +824,7 @@ int hpet_alloc(struct hpet_data *hdp)
struct hpets *hpetp;
size_t siz;
struct hpet __iomem *hpet;
- static struct hpets *last = NULL;
+ static struct hpets *last;
unsigned long period;
unsigned long long temp;
u32 remainder;
@@ -1000,6 +1013,8 @@ static int hpet_acpi_add(struct acpi_device *device)
return -ENODEV;
if (!data.hd_address || !data.hd_nirqs) {
+ if (data.hd_address)
+ iounmap(data.hd_address);
printk("%s: no address or irqs in _CRS\n", __func__);
return -ENODEV;
}
diff --git a/drivers/char/hvc_console.c b/drivers/char/hvc_console.c
index 3afd62e856eb..e9cba13ee800 100644
--- a/drivers/char/hvc_console.c
+++ b/drivers/char/hvc_console.c
@@ -713,7 +713,6 @@ static int khvcd(void *unused)
struct hvc_struct *hp;
set_freezable();
- __set_current_state(TASK_RUNNING);
do {
poll_mask = 0;
hvc_kicked = 0;
diff --git a/drivers/char/hvc_iucv.c b/drivers/char/hvc_iucv.c
index 7b01bc609de3..c3425bb3a1f6 100644
--- a/drivers/char/hvc_iucv.c
+++ b/drivers/char/hvc_iucv.c
@@ -1303,13 +1303,11 @@ static int __init hvc_iucv_init(void)
if (rc) {
pr_err("Registering IUCV handlers failed with error code=%d\n",
rc);
- goto out_error_iucv;
+ goto out_error_hvc;
}
return 0;
-out_error_iucv:
- iucv_unregister(&hvc_iucv_handler, 0);
out_error_hvc:
for (i = 0; i < hvc_iucv_devices; i++)
if (hvc_iucv_table[i])
diff --git a/drivers/char/hvc_tile.c b/drivers/char/hvc_tile.c
index c4efb55cbc03..7a84a0595477 100644
--- a/drivers/char/hvc_tile.c
+++ b/drivers/char/hvc_tile.c
@@ -61,7 +61,8 @@ console_initcall(hvc_tile_console_init);
static int __init hvc_tile_init(void)
{
- hvc_alloc(0, 0, &hvc_tile_get_put_ops, 128);
- return 0;
+ struct hvc_struct *s;
+ s = hvc_alloc(0, 0, &hvc_tile_get_put_ops, 128);
+ return IS_ERR(s) ? PTR_ERR(s) : 0;
}
device_initcall(hvc_tile_init);
diff --git a/drivers/char/hvc_xen.c b/drivers/char/hvc_xen.c
index 60446f82a3fc..3740e327f180 100644
--- a/drivers/char/hvc_xen.c
+++ b/drivers/char/hvc_xen.c
@@ -74,11 +74,12 @@ static int __write_console(const char *data, int len)
wmb(); /* write ring before updating pointer */
intf->out_prod = prod;
- notify_daemon();
+ if (sent)
+ notify_daemon();
return sent;
}
-static int write_console(uint32_t vtermno, const char *data, int len)
+static int domU_write_console(uint32_t vtermno, const char *data, int len)
{
int ret = len;
@@ -101,7 +102,7 @@ static int write_console(uint32_t vtermno, const char *data, int len)
return ret;
}
-static int read_console(uint32_t vtermno, char *buf, int len)
+static int domU_read_console(uint32_t vtermno, char *buf, int len)
{
struct xencons_interface *intf = xencons_interface();
XENCONS_RING_IDX cons, prod;
@@ -122,28 +123,62 @@ static int read_console(uint32_t vtermno, char *buf, int len)
return recv;
}
-static const struct hv_ops hvc_ops = {
- .get_chars = read_console,
- .put_chars = write_console,
+static struct hv_ops domU_hvc_ops = {
+ .get_chars = domU_read_console,
+ .put_chars = domU_write_console,
.notifier_add = notifier_add_irq,
.notifier_del = notifier_del_irq,
.notifier_hangup = notifier_hangup_irq,
};
-static int __init xen_init(void)
+static int dom0_read_console(uint32_t vtermno, char *buf, int len)
+{
+ return HYPERVISOR_console_io(CONSOLEIO_read, len, buf);
+}
+
+/*
+ * Either for a dom0 to write to the system console, or a domU with a
+ * debug version of Xen
+ */
+static int dom0_write_console(uint32_t vtermno, const char *str, int len)
+{
+ int rc = HYPERVISOR_console_io(CONSOLEIO_write, len, (char *)str);
+ if (rc < 0)
+ return 0;
+
+ return len;
+}
+
+static struct hv_ops dom0_hvc_ops = {
+ .get_chars = dom0_read_console,
+ .put_chars = dom0_write_console,
+ .notifier_add = notifier_add_irq,
+ .notifier_del = notifier_del_irq,
+ .notifier_hangup = notifier_hangup_irq,
+};
+
+static int __init xen_hvc_init(void)
{
struct hvc_struct *hp;
+ struct hv_ops *ops;
- if (!xen_pv_domain() ||
- xen_initial_domain() ||
- !xen_start_info->console.domU.evtchn)
+ if (!xen_pv_domain())
return -ENODEV;
- xencons_irq = bind_evtchn_to_irq(xen_start_info->console.domU.evtchn);
+ if (xen_initial_domain()) {
+ ops = &dom0_hvc_ops;
+ xencons_irq = bind_virq_to_irq(VIRQ_CONSOLE, 0);
+ } else {
+ if (!xen_start_info->console.domU.evtchn)
+ return -ENODEV;
+
+ ops = &domU_hvc_ops;
+ xencons_irq = bind_evtchn_to_irq(xen_start_info->console.domU.evtchn);
+ }
if (xencons_irq < 0)
xencons_irq = 0; /* NO_IRQ */
- hp = hvc_alloc(HVC_COOKIE, xencons_irq, &hvc_ops, 256);
+ hp = hvc_alloc(HVC_COOKIE, xencons_irq, ops, 256);
if (IS_ERR(hp))
return PTR_ERR(hp);
@@ -160,7 +195,7 @@ void xen_console_resume(void)
rebind_evtchn_irq(xen_start_info->console.domU.evtchn, xencons_irq);
}
-static void __exit xen_fini(void)
+static void __exit xen_hvc_fini(void)
{
if (hvc)
hvc_remove(hvc);
@@ -168,29 +203,24 @@ static void __exit xen_fini(void)
static int xen_cons_init(void)
{
+ struct hv_ops *ops;
+
if (!xen_pv_domain())
return 0;
- hvc_instantiate(HVC_COOKIE, 0, &hvc_ops);
+ if (xen_initial_domain())
+ ops = &dom0_hvc_ops;
+ else
+ ops = &domU_hvc_ops;
+
+ hvc_instantiate(HVC_COOKIE, 0, ops);
return 0;
}
-module_init(xen_init);
-module_exit(xen_fini);
+module_init(xen_hvc_init);
+module_exit(xen_hvc_fini);
console_initcall(xen_cons_init);
-static void raw_console_write(const char *str, int len)
-{
- while(len > 0) {
- int rc = HYPERVISOR_console_io(CONSOLEIO_write, len, (char *)str);
- if (rc <= 0)
- break;
-
- str += rc;
- len -= rc;
- }
-}
-
#ifdef CONFIG_EARLY_PRINTK
static void xenboot_write_console(struct console *console, const char *string,
unsigned len)
@@ -198,19 +228,22 @@ static void xenboot_write_console(struct console *console, const char *string,
unsigned int linelen, off = 0;
const char *pos;
- raw_console_write(string, len);
+ dom0_write_console(0, string, len);
+
+ if (xen_initial_domain())
+ return;
- write_console(0, "(early) ", 8);
+ domU_write_console(0, "(early) ", 8);
while (off < len && NULL != (pos = strchr(string+off, '\n'))) {
linelen = pos-string+off;
if (off + linelen > len)
break;
- write_console(0, string+off, linelen);
- write_console(0, "\r\n", 2);
+ domU_write_console(0, string+off, linelen);
+ domU_write_console(0, "\r\n", 2);
off += linelen + 1;
}
if (off < len)
- write_console(0, string+off, len-off);
+ domU_write_console(0, string+off, len-off);
}
struct console xenboot_console = {
@@ -222,7 +255,7 @@ struct console xenboot_console = {
void xen_raw_console_write(const char *str)
{
- raw_console_write(str, strlen(str));
+ dom0_write_console(0, str, strlen(str));
}
void xen_raw_printk(const char *fmt, ...)
diff --git a/drivers/char/hw_random/core.c b/drivers/char/hw_random/core.c
index 788da05190cc..2016aad85203 100644
--- a/drivers/char/hw_random/core.c
+++ b/drivers/char/hw_random/core.c
@@ -37,7 +37,6 @@
#include <linux/kernel.h>
#include <linux/fs.h>
#include <linux/sched.h>
-#include <linux/smp_lock.h>
#include <linux/init.h>
#include <linux/miscdevice.h>
#include <linux/delay.h>
diff --git a/drivers/char/i8k.c b/drivers/char/i8k.c
index 3bc0eef88717..d72433f2d310 100644
--- a/drivers/char/i8k.c
+++ b/drivers/char/i8k.c
@@ -120,7 +120,7 @@ static int i8k_smm(struct smm_regs *regs)
int eax = regs->eax;
#if defined(CONFIG_X86_64)
- asm("pushq %%rax\n\t"
+ asm volatile("pushq %%rax\n\t"
"movl 0(%%rax),%%edx\n\t"
"pushq %%rdx\n\t"
"movl 4(%%rax),%%ebx\n\t"
@@ -146,7 +146,7 @@ static int i8k_smm(struct smm_regs *regs)
: "a"(regs)
: "%ebx", "%ecx", "%edx", "%esi", "%edi", "memory");
#else
- asm("pushl %%eax\n\t"
+ asm volatile("pushl %%eax\n\t"
"movl 0(%%eax),%%edx\n\t"
"push %%edx\n\t"
"movl 4(%%eax),%%ebx\n\t"
@@ -167,7 +167,8 @@ static int i8k_smm(struct smm_regs *regs)
"movl %%edx,0(%%eax)\n\t"
"lahf\n\t"
"shrl $8,%%eax\n\t"
- "andl $1,%%eax\n":"=a"(rc)
+ "andl $1,%%eax\n"
+ :"=a"(rc)
: "a"(regs)
: "%ebx", "%ecx", "%edx", "%esi", "%edi", "memory");
#endif
diff --git a/drivers/char/ip2/Makefile b/drivers/char/ip2/Makefile
index bc397d92b499..7b78e0dfc5b0 100644
--- a/drivers/char/ip2/Makefile
+++ b/drivers/char/ip2/Makefile
@@ -4,5 +4,5 @@
obj-$(CONFIG_COMPUTONE) += ip2.o
-ip2-objs := ip2main.o
+ip2-y := ip2main.o
diff --git a/drivers/char/ipmi/Makefile b/drivers/char/ipmi/Makefile
index eb8a1a8c188e..16a93648d54e 100644
--- a/drivers/char/ipmi/Makefile
+++ b/drivers/char/ipmi/Makefile
@@ -2,7 +2,7 @@
# Makefile for the ipmi drivers.
#
-ipmi_si-objs := ipmi_si_intf.o ipmi_kcs_sm.o ipmi_smic_sm.o ipmi_bt_sm.o
+ipmi_si-y := ipmi_si_intf.o ipmi_kcs_sm.o ipmi_smic_sm.o ipmi_bt_sm.o
obj-$(CONFIG_IPMI_HANDLER) += ipmi_msghandler.o
obj-$(CONFIG_IPMI_DEVICE_INTERFACE) += ipmi_devintf.o
diff --git a/drivers/char/ipmi/ipmi_devintf.c b/drivers/char/ipmi/ipmi_devintf.c
index 1fc8876af1f5..2aa3977aae5e 100644
--- a/drivers/char/ipmi/ipmi_devintf.c
+++ b/drivers/char/ipmi/ipmi_devintf.c
@@ -916,7 +916,7 @@ static struct ipmi_smi_watcher smi_watcher =
.smi_gone = ipmi_smi_gone,
};
-static __init int init_ipmi_devintf(void)
+static int __init init_ipmi_devintf(void)
{
int rv;
@@ -954,7 +954,7 @@ static __init int init_ipmi_devintf(void)
}
module_init(init_ipmi_devintf);
-static __exit void cleanup_ipmi(void)
+static void __exit cleanup_ipmi(void)
{
struct ipmi_reg_list *entry, *entry2;
mutex_lock(&reg_list_mutex);
diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c
index 4f3f8c9ec262..2fe72f8edf44 100644
--- a/drivers/char/ipmi/ipmi_msghandler.c
+++ b/drivers/char/ipmi/ipmi_msghandler.c
@@ -4442,13 +4442,13 @@ static int ipmi_init_msghandler(void)
return 0;
}
-static __init int ipmi_init_msghandler_mod(void)
+static int __init ipmi_init_msghandler_mod(void)
{
ipmi_init_msghandler();
return 0;
}
-static __exit void cleanup_ipmi(void)
+static void __exit cleanup_ipmi(void)
{
int count;
diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c
index e537610d2f09..035da9e64a17 100644
--- a/drivers/char/ipmi/ipmi_si_intf.c
+++ b/drivers/char/ipmi/ipmi_si_intf.c
@@ -1665,6 +1665,17 @@ static int check_hotmod_int_op(const char *curr, const char *option,
return 0;
}
+static struct smi_info *smi_info_alloc(void)
+{
+ struct smi_info *info = kzalloc(sizeof(*info), GFP_KERNEL);
+
+ if (info) {
+ spin_lock_init(&info->si_lock);
+ spin_lock_init(&info->msg_lock);
+ }
+ return info;
+}
+
static int hotmod_handler(const char *val, struct kernel_param *kp)
{
char *str = kstrdup(val, GFP_KERNEL);
@@ -1779,7 +1790,7 @@ static int hotmod_handler(const char *val, struct kernel_param *kp)
}
if (op == HM_ADD) {
- info = kzalloc(sizeof(*info), GFP_KERNEL);
+ info = smi_info_alloc();
if (!info) {
rv = -ENOMEM;
goto out;
@@ -1835,7 +1846,7 @@ static int hotmod_handler(const char *val, struct kernel_param *kp)
return rv;
}
-static __devinit void hardcode_find_bmc(void)
+static void __devinit hardcode_find_bmc(void)
{
int i;
struct smi_info *info;
@@ -1844,7 +1855,7 @@ static __devinit void hardcode_find_bmc(void)
if (!ports[i] && !addrs[i])
continue;
- info = kzalloc(sizeof(*info), GFP_KERNEL);
+ info = smi_info_alloc();
if (!info)
return;
@@ -2018,7 +2029,7 @@ struct SPMITable {
s8 spmi_id[1]; /* A '\0' terminated array starts here. */
};
-static __devinit int try_init_spmi(struct SPMITable *spmi)
+static int __devinit try_init_spmi(struct SPMITable *spmi)
{
struct smi_info *info;
@@ -2027,7 +2038,7 @@ static __devinit int try_init_spmi(struct SPMITable *spmi)
return -ENODEV;
}
- info = kzalloc(sizeof(*info), GFP_KERNEL);
+ info = smi_info_alloc();
if (!info) {
printk(KERN_ERR PFX "Could not allocate SI data (3)\n");
return -ENOMEM;
@@ -2101,7 +2112,7 @@ static __devinit int try_init_spmi(struct SPMITable *spmi)
return 0;
}
-static __devinit void spmi_find_bmc(void)
+static void __devinit spmi_find_bmc(void)
{
acpi_status status;
struct SPMITable *spmi;
@@ -2137,7 +2148,7 @@ static int __devinit ipmi_pnp_probe(struct pnp_dev *dev,
if (!acpi_dev)
return -ENODEV;
- info = kzalloc(sizeof(*info), GFP_KERNEL);
+ info = smi_info_alloc();
if (!info)
return -ENOMEM;
@@ -2314,11 +2325,11 @@ static int __devinit decode_dmi(const struct dmi_header *dm,
return 0;
}
-static __devinit void try_init_dmi(struct dmi_ipmi_data *ipmi_data)
+static void __devinit try_init_dmi(struct dmi_ipmi_data *ipmi_data)
{
struct smi_info *info;
- info = kzalloc(sizeof(*info), GFP_KERNEL);
+ info = smi_info_alloc();
if (!info) {
printk(KERN_ERR PFX "Could not allocate SI data\n");
return;
@@ -2425,7 +2436,7 @@ static int __devinit ipmi_pci_probe(struct pci_dev *pdev,
int class_type = pdev->class & PCI_ERMC_CLASSCODE_TYPE_MASK;
struct smi_info *info;
- info = kzalloc(sizeof(*info), GFP_KERNEL);
+ info = smi_info_alloc();
if (!info)
return -ENOMEM;
@@ -2566,7 +2577,7 @@ static int __devinit ipmi_of_probe(struct platform_device *dev,
return -EINVAL;
}
- info = kzalloc(sizeof(*info), GFP_KERNEL);
+ info = smi_info_alloc();
if (!info) {
dev_err(&dev->dev,
@@ -3001,7 +3012,7 @@ static __devinitdata struct ipmi_default_vals
{ .port = 0 }
};
-static __devinit void default_find_bmc(void)
+static void __devinit default_find_bmc(void)
{
struct smi_info *info;
int i;
@@ -3013,7 +3024,7 @@ static __devinit void default_find_bmc(void)
if (check_legacy_ioport(ipmi_defaults[i].port))
continue;
#endif
- info = kzalloc(sizeof(*info), GFP_KERNEL);
+ info = smi_info_alloc();
if (!info)
return;
@@ -3138,9 +3149,6 @@ static int try_smi_init(struct smi_info *new_smi)
goto out_err;
}
- spin_lock_init(&(new_smi->si_lock));
- spin_lock_init(&(new_smi->msg_lock));
-
/* Do low-level detection first. */
if (new_smi->handlers->detect(new_smi->si_sm)) {
if (new_smi->addr_source)
@@ -3304,7 +3312,7 @@ static int try_smi_init(struct smi_info *new_smi)
return rv;
}
-static __devinit int init_ipmi_si(void)
+static int __devinit init_ipmi_si(void)
{
int i;
char *str;
@@ -3517,7 +3525,7 @@ static void cleanup_one_si(struct smi_info *to_clean)
kfree(to_clean);
}
-static __exit void cleanup_ipmi_si(void)
+static void __exit cleanup_ipmi_si(void)
{
struct smi_info *e, *tmp_e;
diff --git a/drivers/char/istallion.c b/drivers/char/istallion.c
index 667abd23ad6a..7c6de4c92458 100644
--- a/drivers/char/istallion.c
+++ b/drivers/char/istallion.c
@@ -21,7 +21,6 @@
#include <linux/module.h>
#include <linux/sched.h>
#include <linux/slab.h>
-#include <linux/smp_lock.h>
#include <linux/interrupt.h>
#include <linux/tty.h>
#include <linux/tty_flip.h>
diff --git a/drivers/char/mem.c b/drivers/char/mem.c
index e985b1c2730e..1256454b2d43 100644
--- a/drivers/char/mem.c
+++ b/drivers/char/mem.c
@@ -876,6 +876,10 @@ static int memory_open(struct inode *inode, struct file *filp)
if (dev->dev_info)
filp->f_mapping->backing_dev_info = dev->dev_info;
+ /* Is /dev/mem or /dev/kmem ? */
+ if (dev->dev_info == &directly_mappable_cdev_bdi)
+ filp->f_mode |= FMODE_UNSIGNED_OFFSET;
+
if (dev->fops->open)
return dev->fops->open(inode, filp);
diff --git a/drivers/char/mmtimer.c b/drivers/char/mmtimer.c
index c070b53984e4..e6d75627c6c8 100644
--- a/drivers/char/mmtimer.c
+++ b/drivers/char/mmtimer.c
@@ -176,9 +176,9 @@ static void mmtimer_setup_int_2(int cpu, u64 expires)
* in order to insure that the setup succeeds in a deterministic time frame.
* It will check if the interrupt setup succeeded.
*/
-static int mmtimer_setup(int cpu, int comparator, unsigned long expires)
+static int mmtimer_setup(int cpu, int comparator, unsigned long expires,
+ u64 *set_completion_time)
{
-
switch (comparator) {
case 0:
mmtimer_setup_int_0(cpu, expires);
@@ -191,7 +191,8 @@ static int mmtimer_setup(int cpu, int comparator, unsigned long expires)
break;
}
/* We might've missed our expiration time */
- if (rtc_time() <= expires)
+ *set_completion_time = rtc_time();
+ if (*set_completion_time <= expires)
return 1;
/*
@@ -227,6 +228,8 @@ static int mmtimer_disable_int(long nasid, int comparator)
#define TIMER_OFF 0xbadcabLL /* Timer is not setup */
#define TIMER_SET 0 /* Comparator is set for this timer */
+#define MMTIMER_INTERVAL_RETRY_INCREMENT_DEFAULT 40
+
/* There is one of these for each timer */
struct mmtimer {
struct rb_node list;
@@ -242,6 +245,11 @@ struct mmtimer_node {
};
static struct mmtimer_node *timers;
+static unsigned mmtimer_interval_retry_increment =
+ MMTIMER_INTERVAL_RETRY_INCREMENT_DEFAULT;
+module_param(mmtimer_interval_retry_increment, uint, 0644);
+MODULE_PARM_DESC(mmtimer_interval_retry_increment,
+ "RTC ticks to add to expiration on interval retry (default 40)");
/*
* Add a new mmtimer struct to the node's mmtimer list.
@@ -289,7 +297,8 @@ static void mmtimer_set_next_timer(int nodeid)
struct mmtimer_node *n = &timers[nodeid];
struct mmtimer *x;
struct k_itimer *t;
- int o;
+ u64 expires, exp, set_completion_time;
+ int i;
restart:
if (n->next == NULL)
@@ -300,7 +309,8 @@ restart:
if (!t->it.mmtimer.incr) {
/* Not an interval timer */
if (!mmtimer_setup(x->cpu, COMPARATOR,
- t->it.mmtimer.expires)) {
+ t->it.mmtimer.expires,
+ &set_completion_time)) {
/* Late setup, fire now */
tasklet_schedule(&n->tasklet);
}
@@ -308,14 +318,23 @@ restart:
}
/* Interval timer */
- o = 0;
- while (!mmtimer_setup(x->cpu, COMPARATOR, t->it.mmtimer.expires)) {
- unsigned long e, e1;
- struct rb_node *next;
- t->it.mmtimer.expires += t->it.mmtimer.incr << o;
- t->it_overrun += 1 << o;
- o++;
- if (o > 20) {
+ i = 0;
+ expires = exp = t->it.mmtimer.expires;
+ while (!mmtimer_setup(x->cpu, COMPARATOR, expires,
+ &set_completion_time)) {
+ int to;
+
+ i++;
+ expires = set_completion_time +
+ mmtimer_interval_retry_increment + (1 << i);
+ /* Calculate overruns as we go. */
+ to = ((u64)(expires - exp) / t->it.mmtimer.incr);
+ if (to) {
+ t->it_overrun += to;
+ t->it.mmtimer.expires += t->it.mmtimer.incr * to;
+ exp = t->it.mmtimer.expires;
+ }
+ if (i > 20) {
printk(KERN_ALERT "mmtimer: cannot reschedule timer\n");
t->it.mmtimer.clock = TIMER_OFF;
n->next = rb_next(&x->list);
@@ -323,21 +342,6 @@ restart:
kfree(x);
goto restart;
}
-
- e = t->it.mmtimer.expires;
- next = rb_next(&x->list);
-
- if (next == NULL)
- continue;
-
- e1 = rb_entry(next, struct mmtimer, list)->
- timer->it.mmtimer.expires;
- if (e > e1) {
- n->next = next;
- rb_erase(&x->list, &n->timer_head);
- mmtimer_add_list(x);
- goto restart;
- }
}
}
diff --git a/drivers/char/mwave/Makefile b/drivers/char/mwave/Makefile
index 754c9e2058ed..26b4fce217b6 100644
--- a/drivers/char/mwave/Makefile
+++ b/drivers/char/mwave/Makefile
@@ -6,10 +6,10 @@
obj-$(CONFIG_MWAVE) += mwave.o
-mwave-objs := mwavedd.o smapi.o tp3780i.o 3780i.o
+mwave-y := mwavedd.o smapi.o tp3780i.o 3780i.o
# To have the mwave driver disable other uarts if necessary
# EXTRA_CFLAGS += -DMWAVE_FUTZ_WITH_OTHER_DEVICES
# To compile in lots (~20 KiB) of run-time enablable printk()s for debugging:
-EXTRA_CFLAGS += -DMW_TRACE
+ccflags-y := -DMW_TRACE
diff --git a/drivers/char/mxser.c b/drivers/char/mxser.c
index 463df27494bd..dd9d75351cd6 100644
--- a/drivers/char/mxser.c
+++ b/drivers/char/mxser.c
@@ -303,6 +303,7 @@ static void mxser_enable_must_enchance_mode(unsigned long baseio)
outb(oldlcr, baseio + UART_LCR);
}
+#ifdef CONFIG_PCI
static void mxser_disable_must_enchance_mode(unsigned long baseio)
{
u8 oldlcr;
@@ -317,6 +318,7 @@ static void mxser_disable_must_enchance_mode(unsigned long baseio)
outb(efr, baseio + MOXA_MUST_EFR_REGISTER);
outb(oldlcr, baseio + UART_LCR);
}
+#endif
static void mxser_set_must_xon1_value(unsigned long baseio, u8 value)
{
@@ -388,6 +390,7 @@ static void mxser_set_must_enum_value(unsigned long baseio, u8 value)
outb(oldlcr, baseio + UART_LCR);
}
+#ifdef CONFIG_PCI
static void mxser_get_must_hardware_id(unsigned long baseio, u8 *pId)
{
u8 oldlcr;
@@ -404,6 +407,7 @@ static void mxser_get_must_hardware_id(unsigned long baseio, u8 *pId)
*pId = inb(baseio + MOXA_MUST_HWID_REGISTER);
outb(oldlcr, baseio + UART_LCR);
}
+#endif
static void SET_MOXA_MUST_NO_SOFTWARE_FLOW_CONTROL(unsigned long baseio)
{
diff --git a/drivers/char/nozomi.c b/drivers/char/nozomi.c
index dd3f9b1f11b4..294d03e8c61a 100644
--- a/drivers/char/nozomi.c
+++ b/drivers/char/nozomi.c
@@ -1828,7 +1828,6 @@ static int ntty_ioctl(struct tty_struct *tty, struct file *file,
unsigned int cmd, unsigned long arg)
{
struct port *port = tty->driver_data;
- void __user *argp = (void __user *)arg;
int rval = -ENOIOCTLCMD;
DBG1("******** IOCTL, cmd: %d", cmd);
diff --git a/drivers/char/pcmcia/cm4000_cs.c b/drivers/char/pcmcia/cm4000_cs.c
index d962f25dcc2a..777181a2e603 100644
--- a/drivers/char/pcmcia/cm4000_cs.c
+++ b/drivers/char/pcmcia/cm4000_cs.c
@@ -979,8 +979,9 @@ static ssize_t cmm_read(struct file *filp, __user char *buf, size_t count,
if (dev->flags0 & 1) {
set_bit(IS_CMM_ABSENT, &dev->flags);
rc = -ENODEV;
+ } else {
+ rc = -EIO;
}
- rc = -EIO;
goto release_io;
}
diff --git a/drivers/char/pcmcia/ipwireless/Makefile b/drivers/char/pcmcia/ipwireless/Makefile
index b71eb593643d..db80873d7f20 100644
--- a/drivers/char/pcmcia/ipwireless/Makefile
+++ b/drivers/char/pcmcia/ipwireless/Makefile
@@ -6,5 +6,5 @@
obj-$(CONFIG_IPWIRELESS) += ipwireless.o
-ipwireless-objs := hardware.o main.o network.o tty.o
+ipwireless-y := hardware.o main.o network.o tty.o
diff --git a/drivers/char/pcmcia/synclink_cs.c b/drivers/char/pcmcia/synclink_cs.c
index bfc10f89d951..eaa41992fbe2 100644
--- a/drivers/char/pcmcia/synclink_cs.c
+++ b/drivers/char/pcmcia/synclink_cs.c
@@ -2796,6 +2796,7 @@ static const struct tty_operations mgslpc_ops = {
.hangup = mgslpc_hangup,
.tiocmget = tiocmget,
.tiocmset = tiocmset,
+ .get_icount = mgslpc_get_icount,
.proc_fops = &mgslpc_proc_fops,
};
diff --git a/drivers/char/ppdev.c b/drivers/char/ppdev.c
index 723152d978a9..f176dbaeb15a 100644
--- a/drivers/char/ppdev.c
+++ b/drivers/char/ppdev.c
@@ -613,6 +613,7 @@ static int pp_do_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
case PPGETTIME:
to_jiffies = pp->pdev->timeout;
+ memset(&par_timeout, 0, sizeof(par_timeout));
par_timeout.tv_sec = to_jiffies / HZ;
par_timeout.tv_usec = (to_jiffies % (long)HZ) * (1000000/HZ);
if (copy_to_user (argp, &par_timeout, sizeof(struct timeval)))
diff --git a/drivers/char/ramoops.c b/drivers/char/ramoops.c
index 74f00b5ffa36..73dcb0ee41fd 100644
--- a/drivers/char/ramoops.c
+++ b/drivers/char/ramoops.c
@@ -25,6 +25,8 @@
#include <linux/time.h>
#include <linux/io.h>
#include <linux/ioport.h>
+#include <linux/platform_device.h>
+#include <linux/ramoops.h>
#define RAMOOPS_KERNMSG_HDR "===="
#define RAMOOPS_HEADER_SIZE (5 + sizeof(struct timeval))
@@ -91,11 +93,17 @@ static void ramoops_do_dump(struct kmsg_dumper *dumper,
cxt->count = (cxt->count + 1) % cxt->max_count;
}
-static int __init ramoops_init(void)
+static int __init ramoops_probe(struct platform_device *pdev)
{
+ struct ramoops_platform_data *pdata = pdev->dev.platform_data;
struct ramoops_context *cxt = &oops_cxt;
int err = -EINVAL;
+ if (pdata) {
+ mem_size = pdata->mem_size;
+ mem_address = pdata->mem_address;
+ }
+
if (!mem_size) {
printk(KERN_ERR "ramoops: invalid size specification");
goto fail3;
@@ -142,7 +150,7 @@ fail3:
return err;
}
-static void __exit ramoops_exit(void)
+static int __exit ramoops_remove(struct platform_device *pdev)
{
struct ramoops_context *cxt = &oops_cxt;
@@ -151,8 +159,26 @@ static void __exit ramoops_exit(void)
iounmap(cxt->virt_addr);
release_mem_region(cxt->phys_addr, cxt->size);
+ return 0;
}
+static struct platform_driver ramoops_driver = {
+ .remove = __exit_p(ramoops_remove),
+ .driver = {
+ .name = "ramoops",
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init ramoops_init(void)
+{
+ return platform_driver_probe(&ramoops_driver, ramoops_probe);
+}
+
+static void __exit ramoops_exit(void)
+{
+ platform_driver_unregister(&ramoops_driver);
+}
module_init(ramoops_init);
module_exit(ramoops_exit);
diff --git a/drivers/char/rio/Makefile b/drivers/char/rio/Makefile
index 2d1c5a7cba7d..1661875883fb 100644
--- a/drivers/char/rio/Makefile
+++ b/drivers/char/rio/Makefile
@@ -8,5 +8,5 @@
obj-$(CONFIG_RIO) += rio.o
-rio-objs := rio_linux.o rioinit.o rioboot.o riocmd.o rioctrl.o riointr.o \
+rio-y := rio_linux.o rioinit.o rioboot.o riocmd.o rioctrl.o riointr.o \
rioparam.o rioroute.o riotable.o riotty.o
diff --git a/drivers/char/rocket.c b/drivers/char/rocket.c
index 7c79d243acc9..86308830ac42 100644
--- a/drivers/char/rocket.c
+++ b/drivers/char/rocket.c
@@ -2345,7 +2345,7 @@ static int __init rp_init(void)
ret = tty_register_driver(rocket_driver);
if (ret < 0) {
printk(KERN_ERR "Couldn't install tty RocketPort driver\n");
- goto err_tty;
+ goto err_controller;
}
#ifdef ROCKET_DEBUG_OPEN
@@ -2380,6 +2380,9 @@ static int __init rp_init(void)
return 0;
err_ttyu:
tty_unregister_driver(rocket_driver);
+err_controller:
+ if (controller)
+ release_region(controller, 4);
err_tty:
put_tty_driver(rocket_driver);
err:
diff --git a/drivers/char/serial167.c b/drivers/char/serial167.c
index f646725bd567..748c3b0ecd89 100644
--- a/drivers/char/serial167.c
+++ b/drivers/char/serial167.c
@@ -52,7 +52,6 @@
#include <linux/interrupt.h>
#include <linux/serial.h>
#include <linux/serialP.h>
-#include <linux/smp_lock.h>
#include <linux/string.h>
#include <linux/fcntl.h>
#include <linux/ptrace.h>
diff --git a/drivers/char/specialix.c b/drivers/char/specialix.c
index 9f8495b4fc8f..a7616d226a49 100644
--- a/drivers/char/specialix.c
+++ b/drivers/char/specialix.c
@@ -87,7 +87,6 @@
#include <linux/tty_flip.h>
#include <linux/mm.h>
#include <linux/serial.h>
-#include <linux/smp_lock.h>
#include <linux/fcntl.h>
#include <linux/major.h>
#include <linux/delay.h>
diff --git a/drivers/char/stallion.c b/drivers/char/stallion.c
index 4bef6ab83622..461a5a045517 100644
--- a/drivers/char/stallion.c
+++ b/drivers/char/stallion.c
@@ -40,7 +40,6 @@
#include <linux/stallion.h>
#include <linux/ioport.h>
#include <linux/init.h>
-#include <linux/smp_lock.h>
#include <linux/device.h>
#include <linux/delay.h>
#include <linux/ctype.h>
diff --git a/drivers/char/sx.c b/drivers/char/sx.c
index e53f16865397..a786326cea2f 100644
--- a/drivers/char/sx.c
+++ b/drivers/char/sx.c
@@ -216,7 +216,6 @@
#include <linux/eisa.h>
#include <linux/pci.h>
#include <linux/slab.h>
-#include <linux/smp_lock.h>
#include <linux/init.h>
#include <linux/miscdevice.h>
#include <linux/bitops.h>
diff --git a/drivers/char/synclink_gt.c b/drivers/char/synclink_gt.c
index 1746d91205f7..d01fffeac951 100644
--- a/drivers/char/synclink_gt.c
+++ b/drivers/char/synclink_gt.c
@@ -301,6 +301,8 @@ struct slgt_info {
unsigned int rx_pio;
unsigned int if_mode;
unsigned int base_clock;
+ unsigned int xsync;
+ unsigned int xctrl;
/* device status */
@@ -405,6 +407,8 @@ static MGSL_PARAMS default_params = {
#define TDCSR 0x94 /* tx DMA control/status */
#define RDDAR 0x98 /* rx DMA descriptor address */
#define TDDAR 0x9c /* tx DMA descriptor address */
+#define XSR 0x40 /* extended sync pattern */
+#define XCR 0x44 /* extended control */
#define RXIDLE BIT14
#define RXBREAK BIT14
@@ -517,6 +521,10 @@ static int set_interface(struct slgt_info *info, int if_mode);
static int set_gpio(struct slgt_info *info, struct gpio_desc __user *gpio);
static int get_gpio(struct slgt_info *info, struct gpio_desc __user *gpio);
static int wait_gpio(struct slgt_info *info, struct gpio_desc __user *gpio);
+static int get_xsync(struct slgt_info *info, int __user *if_mode);
+static int set_xsync(struct slgt_info *info, int if_mode);
+static int get_xctrl(struct slgt_info *info, int __user *if_mode);
+static int set_xctrl(struct slgt_info *info, int if_mode);
/*
* driver functions
@@ -1056,6 +1064,14 @@ static int ioctl(struct tty_struct *tty, struct file *file,
return get_gpio(info, argp);
case MGSL_IOCWAITGPIO:
return wait_gpio(info, argp);
+ case MGSL_IOCGXSYNC:
+ return get_xsync(info, argp);
+ case MGSL_IOCSXSYNC:
+ return set_xsync(info, (int)arg);
+ case MGSL_IOCGXCTRL:
+ return get_xctrl(info, argp);
+ case MGSL_IOCSXCTRL:
+ return set_xctrl(info, (int)arg);
}
mutex_lock(&info->port.mutex);
switch (cmd) {
@@ -1132,6 +1148,7 @@ static long get_params32(struct slgt_info *info, struct MGSL_PARAMS32 __user *us
struct MGSL_PARAMS32 tmp_params;
DBGINFO(("%s get_params32\n", info->device_name));
+ memset(&tmp_params, 0, sizeof(tmp_params));
tmp_params.mode = (compat_ulong_t)info->params.mode;
tmp_params.loopback = info->params.loopback;
tmp_params.flags = info->params.flags;
@@ -1212,12 +1229,16 @@ static long slgt_compat_ioctl(struct tty_struct *tty, struct file *file,
case MGSL_IOCSGPIO:
case MGSL_IOCGGPIO:
case MGSL_IOCWAITGPIO:
+ case MGSL_IOCGXSYNC:
+ case MGSL_IOCGXCTRL:
case MGSL_IOCSTXIDLE:
case MGSL_IOCTXENABLE:
case MGSL_IOCRXENABLE:
case MGSL_IOCTXABORT:
case TIOCMIWAIT:
case MGSL_IOCSIF:
+ case MGSL_IOCSXSYNC:
+ case MGSL_IOCSXCTRL:
rc = ioctl(tty, file, cmd, arg);
break;
}
@@ -1617,6 +1638,8 @@ static int hdlcdev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
if (cmd != SIOCWANDEV)
return hdlc_ioctl(dev, ifr, cmd);
+ memset(&new_line, 0, sizeof(new_line));
+
switch(ifr->ifr_settings.type) {
case IF_GET_IFACE: /* return current sync_serial_settings */
@@ -1958,6 +1981,7 @@ static void bh_handler(struct work_struct *work)
case MGSL_MODE_RAW:
case MGSL_MODE_MONOSYNC:
case MGSL_MODE_BISYNC:
+ case MGSL_MODE_XSYNC:
while(rx_get_buf(info));
break;
}
@@ -2357,26 +2381,27 @@ static irqreturn_t slgt_interrupt(int dummy, void *dev_id)
DBGISR(("slgt_interrupt irq=%d entry\n", info->irq_level));
- spin_lock(&info->lock);
-
while((gsr = rd_reg32(info, GSR) & 0xffffff00)) {
DBGISR(("%s gsr=%08x\n", info->device_name, gsr));
info->irq_occurred = true;
for(i=0; i < info->port_count ; i++) {
if (info->port_array[i] == NULL)
continue;
+ spin_lock(&info->port_array[i]->lock);
if (gsr & (BIT8 << i))
isr_serial(info->port_array[i]);
if (gsr & (BIT16 << (i*2)))
isr_rdma(info->port_array[i]);
if (gsr & (BIT17 << (i*2)))
isr_tdma(info->port_array[i]);
+ spin_unlock(&info->port_array[i]->lock);
}
}
if (info->gpio_present) {
unsigned int state;
unsigned int changed;
+ spin_lock(&info->lock);
while ((changed = rd_reg32(info, IOSR)) != 0) {
DBGISR(("%s iosr=%08x\n", info->device_name, changed));
/* read latched state of GPIO signals */
@@ -2388,22 +2413,24 @@ static irqreturn_t slgt_interrupt(int dummy, void *dev_id)
isr_gpio(info->port_array[i], changed, state);
}
}
+ spin_unlock(&info->lock);
}
for(i=0; i < info->port_count ; i++) {
struct slgt_info *port = info->port_array[i];
-
- if (port && (port->port.count || port->netcount) &&
+ if (port == NULL)
+ continue;
+ spin_lock(&port->lock);
+ if ((port->port.count || port->netcount) &&
port->pending_bh && !port->bh_running &&
!port->bh_requested) {
DBGISR(("%s bh queued\n", port->device_name));
schedule_work(&port->task);
port->bh_requested = true;
}
+ spin_unlock(&port->lock);
}
- spin_unlock(&info->lock);
-
DBGISR(("slgt_interrupt irq=%d exit\n", info->irq_level));
return IRQ_HANDLED;
}
@@ -2883,6 +2910,69 @@ static int set_interface(struct slgt_info *info, int if_mode)
return 0;
}
+static int get_xsync(struct slgt_info *info, int __user *xsync)
+{
+ DBGINFO(("%s get_xsync=%x\n", info->device_name, info->xsync));
+ if (put_user(info->xsync, xsync))
+ return -EFAULT;
+ return 0;
+}
+
+/*
+ * set extended sync pattern (1 to 4 bytes) for extended sync mode
+ *
+ * sync pattern is contained in least significant bytes of value
+ * most significant byte of sync pattern is oldest (1st sent/detected)
+ */
+static int set_xsync(struct slgt_info *info, int xsync)
+{
+ unsigned long flags;
+
+ DBGINFO(("%s set_xsync=%x)\n", info->device_name, xsync));
+ spin_lock_irqsave(&info->lock, flags);
+ info->xsync = xsync;
+ wr_reg32(info, XSR, xsync);
+ spin_unlock_irqrestore(&info->lock, flags);
+ return 0;
+}
+
+static int get_xctrl(struct slgt_info *info, int __user *xctrl)
+{
+ DBGINFO(("%s get_xctrl=%x\n", info->device_name, info->xctrl));
+ if (put_user(info->xctrl, xctrl))
+ return -EFAULT;
+ return 0;
+}
+
+/*
+ * set extended control options
+ *
+ * xctrl[31:19] reserved, must be zero
+ * xctrl[18:17] extended sync pattern length in bytes
+ * 00 = 1 byte in xsr[7:0]
+ * 01 = 2 bytes in xsr[15:0]
+ * 10 = 3 bytes in xsr[23:0]
+ * 11 = 4 bytes in xsr[31:0]
+ * xctrl[16] 1 = enable terminal count, 0=disabled
+ * xctrl[15:0] receive terminal count for fixed length packets
+ * value is count minus one (0 = 1 byte packet)
+ * when terminal count is reached, receiver
+ * automatically returns to hunt mode and receive
+ * FIFO contents are flushed to DMA buffers with
+ * end of frame (EOF) status
+ */
+static int set_xctrl(struct slgt_info *info, int xctrl)
+{
+ unsigned long flags;
+
+ DBGINFO(("%s set_xctrl=%x)\n", info->device_name, xctrl));
+ spin_lock_irqsave(&info->lock, flags);
+ info->xctrl = xctrl;
+ wr_reg32(info, XCR, xctrl);
+ spin_unlock_irqrestore(&info->lock, flags);
+ return 0;
+}
+
/*
* set general purpose IO pin state and direction
*
@@ -2906,7 +2996,7 @@ static int set_gpio(struct slgt_info *info, struct gpio_desc __user *user_gpio)
info->device_name, gpio.state, gpio.smask,
gpio.dir, gpio.dmask));
- spin_lock_irqsave(&info->lock,flags);
+ spin_lock_irqsave(&info->port_array[0]->lock, flags);
if (gpio.dmask) {
data = rd_reg32(info, IODR);
data |= gpio.dmask & gpio.dir;
@@ -2919,7 +3009,7 @@ static int set_gpio(struct slgt_info *info, struct gpio_desc __user *user_gpio)
data &= ~(gpio.smask & ~gpio.state);
wr_reg32(info, IOVR, data);
}
- spin_unlock_irqrestore(&info->lock,flags);
+ spin_unlock_irqrestore(&info->port_array[0]->lock, flags);
return 0;
}
@@ -3020,7 +3110,7 @@ static int wait_gpio(struct slgt_info *info, struct gpio_desc __user *user_gpio)
return -EINVAL;
init_cond_wait(&wait, gpio.smask);
- spin_lock_irqsave(&info->lock, flags);
+ spin_lock_irqsave(&info->port_array[0]->lock, flags);
/* enable interrupts for watched pins */
wr_reg32(info, IOER, rd_reg32(info, IOER) | gpio.smask);
/* get current pin states */
@@ -3032,20 +3122,20 @@ static int wait_gpio(struct slgt_info *info, struct gpio_desc __user *user_gpio)
} else {
/* wait for target state */
add_cond_wait(&info->gpio_wait_q, &wait);
- spin_unlock_irqrestore(&info->lock, flags);
+ spin_unlock_irqrestore(&info->port_array[0]->lock, flags);
schedule();
if (signal_pending(current))
rc = -ERESTARTSYS;
else
gpio.state = wait.data;
- spin_lock_irqsave(&info->lock, flags);
+ spin_lock_irqsave(&info->port_array[0]->lock, flags);
remove_cond_wait(&info->gpio_wait_q, &wait);
}
/* disable all GPIO interrupts if no waiting processes */
if (info->gpio_wait_q == NULL)
wr_reg32(info, IOER, 0);
- spin_unlock_irqrestore(&info->lock,flags);
+ spin_unlock_irqrestore(&info->port_array[0]->lock, flags);
if ((rc == 0) && copy_to_user(user_gpio, &gpio, sizeof(gpio)))
rc = -EFAULT;
@@ -3578,7 +3668,6 @@ static void device_init(int adapter_num, struct pci_dev *pdev)
/* copy resource information from first port to others */
for (i = 1; i < port_count; ++i) {
- port_array[i]->lock = port_array[0]->lock;
port_array[i]->irq_level = port_array[0]->irq_level;
port_array[i]->reg_addr = port_array[0]->reg_addr;
alloc_dma_bufs(port_array[i]);
@@ -3763,7 +3852,9 @@ module_exit(slgt_exit);
#define CALC_REGADDR() \
unsigned long reg_addr = ((unsigned long)info->reg_addr) + addr; \
if (addr >= 0x80) \
- reg_addr += (info->port_num) * 32;
+ reg_addr += (info->port_num) * 32; \
+ else if (addr >= 0x40) \
+ reg_addr += (info->port_num) * 16;
static __u8 rd_reg8(struct slgt_info *info, unsigned int addr)
{
@@ -4182,7 +4273,13 @@ static void sync_mode(struct slgt_info *info)
/* TCR (tx control)
*
- * 15..13 mode, 000=HDLC 001=raw 010=async 011=monosync 100=bisync
+ * 15..13 mode
+ * 000=HDLC/SDLC
+ * 001=raw bit synchronous
+ * 010=asynchronous/isochronous
+ * 011=monosync byte synchronous
+ * 100=bisync byte synchronous
+ * 101=xsync byte synchronous
* 12..10 encoding
* 09 CRC enable
* 08 CRC32
@@ -4197,6 +4294,9 @@ static void sync_mode(struct slgt_info *info)
val = BIT2;
switch(info->params.mode) {
+ case MGSL_MODE_XSYNC:
+ val |= BIT15 + BIT13;
+ break;
case MGSL_MODE_MONOSYNC: val |= BIT14 + BIT13; break;
case MGSL_MODE_BISYNC: val |= BIT15; break;
case MGSL_MODE_RAW: val |= BIT13; break;
@@ -4251,7 +4351,13 @@ static void sync_mode(struct slgt_info *info)
/* RCR (rx control)
*
- * 15..13 mode, 000=HDLC 001=raw 010=async 011=monosync 100=bisync
+ * 15..13 mode
+ * 000=HDLC/SDLC
+ * 001=raw bit synchronous
+ * 010=asynchronous/isochronous
+ * 011=monosync byte synchronous
+ * 100=bisync byte synchronous
+ * 101=xsync byte synchronous
* 12..10 encoding
* 09 CRC enable
* 08 CRC32
@@ -4263,6 +4369,9 @@ static void sync_mode(struct slgt_info *info)
val = 0;
switch(info->params.mode) {
+ case MGSL_MODE_XSYNC:
+ val |= BIT15 + BIT13;
+ break;
case MGSL_MODE_MONOSYNC: val |= BIT14 + BIT13; break;
case MGSL_MODE_BISYNC: val |= BIT15; break;
case MGSL_MODE_RAW: val |= BIT13; break;
@@ -4679,6 +4788,7 @@ static bool rx_get_buf(struct slgt_info *info)
switch(info->params.mode) {
case MGSL_MODE_MONOSYNC:
case MGSL_MODE_BISYNC:
+ case MGSL_MODE_XSYNC:
/* ignore residue in byte synchronous modes */
if (desc_residue(info->rbufs[i]))
count--;
diff --git a/drivers/char/tpm/tpm_tis.c b/drivers/char/tpm/tpm_tis.c
index 1030f8420137..c17a305ecb28 100644
--- a/drivers/char/tpm/tpm_tis.c
+++ b/drivers/char/tpm/tpm_tis.c
@@ -25,6 +25,7 @@
#include <linux/slab.h>
#include <linux/interrupt.h>
#include <linux/wait.h>
+#include <linux/acpi.h>
#include "tpm.h"
#define TPM_HEADER_SIZE 10
@@ -78,6 +79,26 @@ enum tis_defaults {
static LIST_HEAD(tis_chips);
static DEFINE_SPINLOCK(tis_lock);
+#ifdef CONFIG_ACPI
+static int is_itpm(struct pnp_dev *dev)
+{
+ struct acpi_device *acpi = pnp_acpi_device(dev);
+ struct acpi_hardware_id *id;
+
+ list_for_each_entry(id, &acpi->pnp.ids, list) {
+ if (!strcmp("INTC0102", id->id))
+ return 1;
+ }
+
+ return 0;
+}
+#else
+static int is_itpm(struct pnp_dev *dev)
+{
+ return 0;
+}
+#endif
+
static int check_locality(struct tpm_chip *chip, int l)
{
if ((ioread8(chip->vendor.iobase + TPM_ACCESS(l)) &
@@ -472,6 +493,9 @@ static int tpm_tis_init(struct device *dev, resource_size_t start,
"1.2 TPM (device-id 0x%X, rev-id %d)\n",
vendor >> 16, ioread8(chip->vendor.iobase + TPM_RID(0)));
+ if (is_itpm(to_pnp_dev(dev)))
+ itpm = 1;
+
if (itpm)
dev_info(dev, "Intel iTPM workaround enabled\n");
diff --git a/drivers/char/uv_mmtimer.c b/drivers/char/uv_mmtimer.c
index 493b47a0d511..956ebe2080a5 100644
--- a/drivers/char/uv_mmtimer.c
+++ b/drivers/char/uv_mmtimer.c
@@ -23,7 +23,6 @@
#include <linux/interrupt.h>
#include <linux/time.h>
#include <linux/math64.h>
-#include <linux/smp_lock.h>
#include <asm/genapic.h>
#include <asm/uv/uv_hub.h>
diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c
index 6c1b676643a9..896a2ced1d27 100644
--- a/drivers/char/virtio_console.c
+++ b/drivers/char/virtio_console.c
@@ -1547,31 +1547,16 @@ static int init_vqs(struct ports_device *portdev)
nr_queues = use_multiport(portdev) ? (nr_ports + 1) * 2 : 2;
vqs = kmalloc(nr_queues * sizeof(struct virtqueue *), GFP_KERNEL);
- if (!vqs) {
- err = -ENOMEM;
- goto fail;
- }
io_callbacks = kmalloc(nr_queues * sizeof(vq_callback_t *), GFP_KERNEL);
- if (!io_callbacks) {
- err = -ENOMEM;
- goto free_vqs;
- }
io_names = kmalloc(nr_queues * sizeof(char *), GFP_KERNEL);
- if (!io_names) {
- err = -ENOMEM;
- goto free_callbacks;
- }
portdev->in_vqs = kmalloc(nr_ports * sizeof(struct virtqueue *),
GFP_KERNEL);
- if (!portdev->in_vqs) {
- err = -ENOMEM;
- goto free_names;
- }
portdev->out_vqs = kmalloc(nr_ports * sizeof(struct virtqueue *),
GFP_KERNEL);
- if (!portdev->out_vqs) {
+ if (!vqs || !io_callbacks || !io_names || !portdev->in_vqs ||
+ !portdev->out_vqs) {
err = -ENOMEM;
- goto free_invqs;
+ goto free;
}
/*
@@ -1605,7 +1590,7 @@ static int init_vqs(struct ports_device *portdev)
io_callbacks,
(const char **)io_names);
if (err)
- goto free_outvqs;
+ goto free;
j = 0;
portdev->in_vqs[0] = vqs[0];
@@ -1621,23 +1606,19 @@ static int init_vqs(struct ports_device *portdev)
portdev->out_vqs[i] = vqs[j + 1];
}
}
- kfree(io_callbacks);
kfree(io_names);
+ kfree(io_callbacks);
kfree(vqs);
return 0;
-free_names:
- kfree(io_names);
-free_callbacks:
- kfree(io_callbacks);
-free_outvqs:
+free:
kfree(portdev->out_vqs);
-free_invqs:
kfree(portdev->in_vqs);
-free_vqs:
+ kfree(io_names);
+ kfree(io_callbacks);
kfree(vqs);
-fail:
+
return err;
}
diff --git a/drivers/clocksource/sh_cmt.c b/drivers/clocksource/sh_cmt.c
index 717305d30444..d68d3aa1814b 100644
--- a/drivers/clocksource/sh_cmt.c
+++ b/drivers/clocksource/sh_cmt.c
@@ -308,7 +308,7 @@ static irqreturn_t sh_cmt_interrupt(int irq, void *dev_id)
* isr before we end up here.
*/
if (p->flags & FLAG_CLOCKSOURCE)
- p->total_cycles += p->match_value;
+ p->total_cycles += p->match_value + 1;
if (!(p->flags & FLAG_REPROGRAM))
p->next_match_value = p->max_match_value;
@@ -403,7 +403,7 @@ static cycle_t sh_cmt_clocksource_read(struct clocksource *cs)
raw = sh_cmt_get_counter(p, &has_wrapped);
if (unlikely(has_wrapped))
- raw += p->match_value;
+ raw += p->match_value + 1;
spin_unlock_irqrestore(&p->lock, flags);
return value + raw;
@@ -445,7 +445,7 @@ static int sh_cmt_register_clocksource(struct sh_cmt_priv *p,
/* clk_get_rate() needs an enabled clock */
clk_enable(p->clk);
- p->rate = clk_get_rate(p->clk) / (p->width == 16) ? 512 : 8;
+ p->rate = clk_get_rate(p->clk) / ((p->width == 16) ? 512 : 8);
clk_disable(p->clk);
/* TODO: calculate good shift from rate and counter bit width */
@@ -478,7 +478,7 @@ static void sh_cmt_clock_event_start(struct sh_cmt_priv *p, int periodic)
ced->min_delta_ns = clockevent_delta2ns(0x1f, ced);
if (periodic)
- sh_cmt_set_next(p, (p->rate + HZ/2) / HZ);
+ sh_cmt_set_next(p, ((p->rate + HZ/2) / HZ) - 1);
else
sh_cmt_set_next(p, p->max_match_value);
}
@@ -523,9 +523,9 @@ static int sh_cmt_clock_event_next(unsigned long delta,
BUG_ON(ced->mode != CLOCK_EVT_MODE_ONESHOT);
if (likely(p->flags & FLAG_IRQCONTEXT))
- p->next_match_value = delta;
+ p->next_match_value = delta - 1;
else
- sh_cmt_set_next(p, delta);
+ sh_cmt_set_next(p, delta - 1);
return 0;
}
@@ -616,13 +616,9 @@ static int sh_cmt_setup(struct sh_cmt_priv *p, struct platform_device *pdev)
/* get hold of clock */
p->clk = clk_get(&p->pdev->dev, "cmt_fck");
if (IS_ERR(p->clk)) {
- dev_warn(&p->pdev->dev, "using deprecated clock lookup\n");
- p->clk = clk_get(&p->pdev->dev, cfg->clk);
- if (IS_ERR(p->clk)) {
- dev_err(&p->pdev->dev, "cannot get clock\n");
- ret = PTR_ERR(p->clk);
- goto err1;
- }
+ dev_err(&p->pdev->dev, "cannot get clock\n");
+ ret = PTR_ERR(p->clk);
+ goto err1;
}
if (resource_size(res) == 6) {
diff --git a/drivers/clocksource/sh_mtu2.c b/drivers/clocksource/sh_mtu2.c
index ef7a5be8a09f..40630cb98237 100644
--- a/drivers/clocksource/sh_mtu2.c
+++ b/drivers/clocksource/sh_mtu2.c
@@ -287,13 +287,9 @@ static int sh_mtu2_setup(struct sh_mtu2_priv *p, struct platform_device *pdev)
/* get hold of clock */
p->clk = clk_get(&p->pdev->dev, "mtu2_fck");
if (IS_ERR(p->clk)) {
- dev_warn(&p->pdev->dev, "using deprecated clock lookup\n");
- p->clk = clk_get(&p->pdev->dev, cfg->clk);
- if (IS_ERR(p->clk)) {
- dev_err(&p->pdev->dev, "cannot get clock\n");
- ret = PTR_ERR(p->clk);
- goto err1;
- }
+ dev_err(&p->pdev->dev, "cannot get clock\n");
+ ret = PTR_ERR(p->clk);
+ goto err1;
}
return sh_mtu2_register(p, (char *)dev_name(&p->pdev->dev),
diff --git a/drivers/clocksource/sh_tmu.c b/drivers/clocksource/sh_tmu.c
index de715901b82a..36aba9923060 100644
--- a/drivers/clocksource/sh_tmu.c
+++ b/drivers/clocksource/sh_tmu.c
@@ -393,13 +393,9 @@ static int sh_tmu_setup(struct sh_tmu_priv *p, struct platform_device *pdev)
/* get hold of clock */
p->clk = clk_get(&p->pdev->dev, "tmu_fck");
if (IS_ERR(p->clk)) {
- dev_warn(&p->pdev->dev, "using deprecated clock lookup\n");
- p->clk = clk_get(&p->pdev->dev, cfg->clk);
- if (IS_ERR(p->clk)) {
- dev_err(&p->pdev->dev, "cannot get clock\n");
- ret = PTR_ERR(p->clk);
- goto err1;
- }
+ dev_err(&p->pdev->dev, "cannot get clock\n");
+ ret = PTR_ERR(p->clk);
+ goto err1;
}
return sh_tmu_register(p, (char *)dev_name(&p->pdev->dev),
diff --git a/drivers/connector/cn_queue.c b/drivers/connector/cn_queue.c
index 210338ea222f..81270d221e5a 100644
--- a/drivers/connector/cn_queue.c
+++ b/drivers/connector/cn_queue.c
@@ -31,48 +31,6 @@
#include <linux/connector.h>
#include <linux/delay.h>
-
-/*
- * This job is sent to the kevent workqueue.
- * While no event is once sent to any callback, the connector workqueue
- * is not created to avoid a useless waiting kernel task.
- * Once the first event is received, we create this dedicated workqueue which
- * is necessary because the flow of data can be high and we don't want
- * to encumber keventd with that.
- */
-static void cn_queue_create(struct work_struct *work)
-{
- struct cn_queue_dev *dev;
-
- dev = container_of(work, struct cn_queue_dev, wq_creation);
-
- dev->cn_queue = create_singlethread_workqueue(dev->name);
- /* If we fail, we will use keventd for all following connector jobs */
- WARN_ON(!dev->cn_queue);
-}
-
-/*
- * Queue a data sent to a callback.
- * If the connector workqueue is already created, we queue the job on it.
- * Otherwise, we queue the job to kevent and queue the connector workqueue
- * creation too.
- */
-int queue_cn_work(struct cn_callback_entry *cbq, struct work_struct *work)
-{
- struct cn_queue_dev *pdev = cbq->pdev;
-
- if (likely(pdev->cn_queue))
- return queue_work(pdev->cn_queue, work);
-
- /* Don't create the connector workqueue twice */
- if (atomic_inc_return(&pdev->wq_requested) == 1)
- schedule_work(&pdev->wq_creation);
- else
- atomic_dec(&pdev->wq_requested);
-
- return schedule_work(work);
-}
-
void cn_queue_wrapper(struct work_struct *work)
{
struct cn_callback_entry *cbq =
@@ -111,11 +69,7 @@ cn_queue_alloc_callback_entry(char *name, struct cb_id *id,
static void cn_queue_free_callback(struct cn_callback_entry *cbq)
{
- /* The first jobs have been sent to kevent, flush them too */
- flush_scheduled_work();
- if (cbq->pdev->cn_queue)
- flush_workqueue(cbq->pdev->cn_queue);
-
+ flush_workqueue(cbq->pdev->cn_queue);
kfree(cbq);
}
@@ -193,11 +147,14 @@ struct cn_queue_dev *cn_queue_alloc_dev(char *name, struct sock *nls)
atomic_set(&dev->refcnt, 0);
INIT_LIST_HEAD(&dev->queue_list);
spin_lock_init(&dev->queue_lock);
- init_waitqueue_head(&dev->wq_created);
dev->nls = nls;
- INIT_WORK(&dev->wq_creation, cn_queue_create);
+ dev->cn_queue = alloc_ordered_workqueue(dev->name, 0);
+ if (!dev->cn_queue) {
+ kfree(dev);
+ return NULL;
+ }
return dev;
}
@@ -205,25 +162,9 @@ struct cn_queue_dev *cn_queue_alloc_dev(char *name, struct sock *nls)
void cn_queue_free_dev(struct cn_queue_dev *dev)
{
struct cn_callback_entry *cbq, *n;
- long timeout;
- DEFINE_WAIT(wait);
-
- /* Flush the first pending jobs queued on kevent */
- flush_scheduled_work();
-
- /* If the connector workqueue creation is still pending, wait for it */
- prepare_to_wait(&dev->wq_created, &wait, TASK_UNINTERRUPTIBLE);
- if (atomic_read(&dev->wq_requested) && !dev->cn_queue) {
- timeout = schedule_timeout(HZ * 2);
- if (!timeout && !dev->cn_queue)
- WARN_ON(1);
- }
- finish_wait(&dev->wq_created, &wait);
- if (dev->cn_queue) {
- flush_workqueue(dev->cn_queue);
- destroy_workqueue(dev->cn_queue);
- }
+ flush_workqueue(dev->cn_queue);
+ destroy_workqueue(dev->cn_queue);
spin_lock_bh(&dev->queue_lock);
list_for_each_entry_safe(cbq, n, &dev->queue_list, callback_entry)
diff --git a/drivers/connector/connector.c b/drivers/connector/connector.c
index 1d48f40342cb..e16c3fa8d2e3 100644
--- a/drivers/connector/connector.c
+++ b/drivers/connector/connector.c
@@ -133,7 +133,8 @@ static int cn_call_callback(struct sk_buff *skb)
__cbq->data.skb == NULL)) {
__cbq->data.skb = skb;
- if (queue_cn_work(__cbq, &__cbq->work))
+ if (queue_work(dev->cbdev->cn_queue,
+ &__cbq->work))
err = 0;
else
err = -EINVAL;
@@ -148,13 +149,11 @@ static int cn_call_callback(struct sk_buff *skb)
d->callback = __cbq->data.callback;
d->free = __new_cbq;
- __new_cbq->pdev = __cbq->pdev;
-
INIT_WORK(&__new_cbq->work,
&cn_queue_wrapper);
- if (queue_cn_work(__new_cbq,
- &__new_cbq->work))
+ if (queue_work(dev->cbdev->cn_queue,
+ &__new_cbq->work))
err = 0;
else {
kfree(__new_cbq);
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index 199dcb9f0b83..c63a43823744 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -918,8 +918,8 @@ static int cpufreq_add_dev_interface(unsigned int cpu,
spin_lock_irqsave(&cpufreq_driver_lock, flags);
for_each_cpu(j, policy->cpus) {
- if (!cpu_online(j))
- continue;
+ if (!cpu_online(j))
+ continue;
per_cpu(cpufreq_cpu_data, j) = policy;
per_cpu(cpufreq_policy_cpu, j) = policy->cpu;
}
diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c
index 7b5093664e49..c631f27a3dcc 100644
--- a/drivers/cpufreq/cpufreq_ondemand.c
+++ b/drivers/cpufreq/cpufreq_ondemand.c
@@ -30,6 +30,8 @@
#define DEF_FREQUENCY_DOWN_DIFFERENTIAL (10)
#define DEF_FREQUENCY_UP_THRESHOLD (80)
+#define DEF_SAMPLING_DOWN_FACTOR (1)
+#define MAX_SAMPLING_DOWN_FACTOR (100000)
#define MICRO_FREQUENCY_DOWN_DIFFERENTIAL (3)
#define MICRO_FREQUENCY_UP_THRESHOLD (95)
#define MICRO_FREQUENCY_MIN_SAMPLE_RATE (10000)
@@ -82,6 +84,7 @@ struct cpu_dbs_info_s {
unsigned int freq_lo;
unsigned int freq_lo_jiffies;
unsigned int freq_hi_jiffies;
+ unsigned int rate_mult;
int cpu;
unsigned int sample_type:1;
/*
@@ -108,10 +111,12 @@ static struct dbs_tuners {
unsigned int up_threshold;
unsigned int down_differential;
unsigned int ignore_nice;
+ unsigned int sampling_down_factor;
unsigned int powersave_bias;
unsigned int io_is_busy;
} dbs_tuners_ins = {
.up_threshold = DEF_FREQUENCY_UP_THRESHOLD,
+ .sampling_down_factor = DEF_SAMPLING_DOWN_FACTOR,
.down_differential = DEF_FREQUENCY_DOWN_DIFFERENTIAL,
.ignore_nice = 0,
.powersave_bias = 0,
@@ -259,6 +264,7 @@ static ssize_t show_##file_name \
show_one(sampling_rate, sampling_rate);
show_one(io_is_busy, io_is_busy);
show_one(up_threshold, up_threshold);
+show_one(sampling_down_factor, sampling_down_factor);
show_one(ignore_nice_load, ignore_nice);
show_one(powersave_bias, powersave_bias);
@@ -340,6 +346,29 @@ static ssize_t store_up_threshold(struct kobject *a, struct attribute *b,
return count;
}
+static ssize_t store_sampling_down_factor(struct kobject *a,
+ struct attribute *b, const char *buf, size_t count)
+{
+ unsigned int input, j;
+ int ret;
+ ret = sscanf(buf, "%u", &input);
+
+ if (ret != 1 || input > MAX_SAMPLING_DOWN_FACTOR || input < 1)
+ return -EINVAL;
+ mutex_lock(&dbs_mutex);
+ dbs_tuners_ins.sampling_down_factor = input;
+
+ /* Reset down sampling multiplier in case it was active */
+ for_each_online_cpu(j) {
+ struct cpu_dbs_info_s *dbs_info;
+ dbs_info = &per_cpu(od_cpu_dbs_info, j);
+ dbs_info->rate_mult = 1;
+ }
+ mutex_unlock(&dbs_mutex);
+
+ return count;
+}
+
static ssize_t store_ignore_nice_load(struct kobject *a, struct attribute *b,
const char *buf, size_t count)
{
@@ -401,6 +430,7 @@ static ssize_t store_powersave_bias(struct kobject *a, struct attribute *b,
define_one_global_rw(sampling_rate);
define_one_global_rw(io_is_busy);
define_one_global_rw(up_threshold);
+define_one_global_rw(sampling_down_factor);
define_one_global_rw(ignore_nice_load);
define_one_global_rw(powersave_bias);
@@ -409,6 +439,7 @@ static struct attribute *dbs_attributes[] = {
&sampling_rate_min.attr,
&sampling_rate.attr,
&up_threshold.attr,
+ &sampling_down_factor.attr,
&ignore_nice_load.attr,
&powersave_bias.attr,
&io_is_busy.attr,
@@ -562,6 +593,10 @@ static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info)
/* Check for frequency increase */
if (max_load_freq > dbs_tuners_ins.up_threshold * policy->cur) {
+ /* If switching to max speed, apply sampling_down_factor */
+ if (policy->cur < policy->max)
+ this_dbs_info->rate_mult =
+ dbs_tuners_ins.sampling_down_factor;
dbs_freq_increase(policy, policy->max);
return;
}
@@ -584,6 +619,9 @@ static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info)
(dbs_tuners_ins.up_threshold -
dbs_tuners_ins.down_differential);
+ /* No longer fully busy, reset rate_mult */
+ this_dbs_info->rate_mult = 1;
+
if (freq_next < policy->min)
freq_next = policy->min;
@@ -607,7 +645,8 @@ static void do_dbs_timer(struct work_struct *work)
int sample_type = dbs_info->sample_type;
/* We want all CPUs to do sampling nearly on same jiffy */
- int delay = usecs_to_jiffies(dbs_tuners_ins.sampling_rate);
+ int delay = usecs_to_jiffies(dbs_tuners_ins.sampling_rate
+ * dbs_info->rate_mult);
if (num_online_cpus() > 1)
delay -= jiffies % delay;
@@ -711,6 +750,7 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
}
}
this_dbs_info->cpu = cpu;
+ this_dbs_info->rate_mult = 1;
ondemand_powersave_bias_init_cpu(cpu);
/*
* Start the timerschedule work, when this governor
diff --git a/drivers/crypto/hifn_795x.c b/drivers/crypto/hifn_795x.c
index 0eac3da566ba..a84250a5dd51 100644
--- a/drivers/crypto/hifn_795x.c
+++ b/drivers/crypto/hifn_795x.c
@@ -1467,7 +1467,7 @@ static int ablkcipher_add(unsigned int *drestp, struct scatterlist *dst,
return -EINVAL;
while (size) {
- copy = min(drest, min(size, dst->length));
+ copy = min3(drest, size, dst->length);
size -= copy;
drest -= copy;
@@ -1729,7 +1729,7 @@ static int ablkcipher_get(void *saddr, unsigned int *srestp, unsigned int offset
return -EINVAL;
while (size) {
- copy = min(srest, min(dst->length, size));
+ copy = min3(srest, dst->length, size);
daddr = kmap_atomic(sg_page(dst), KM_IRQ0);
memcpy(daddr + dst->offset + offset, saddr, copy);
diff --git a/drivers/crypto/n2_core.c b/drivers/crypto/n2_core.c
index 88ee01510ec0..76141262ea1d 100644
--- a/drivers/crypto/n2_core.c
+++ b/drivers/crypto/n2_core.c
@@ -1832,7 +1832,7 @@ static int __devinit get_irq_props(struct mdesc_handle *mdesc, u64 node,
return -ENODEV;
ino = mdesc_get_property(mdesc, node, "ino", &ino_len);
- if (!intr)
+ if (!ino)
return -ENODEV;
if (intr_len != ino_len)
diff --git a/drivers/crypto/padlock-aes.c b/drivers/crypto/padlock-aes.c
index 2e992bc8015b..8a515baa38f7 100644
--- a/drivers/crypto/padlock-aes.c
+++ b/drivers/crypto/padlock-aes.c
@@ -286,7 +286,7 @@ static inline u8 *padlock_xcrypt_cbc(const u8 *input, u8 *output, void *key,
if (initial)
asm volatile (".byte 0xf3,0x0f,0xa7,0xd0" /* rep xcryptcbc */
: "+S" (input), "+D" (output), "+a" (iv)
- : "d" (control_word), "b" (key), "c" (count));
+ : "d" (control_word), "b" (key), "c" (initial));
asm volatile (".byte 0xf3,0x0f,0xa7,0xd0" /* rep xcryptcbc */
: "+S" (input), "+D" (output), "+a" (iv)
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index 9520cf02edc8..6ee23592700a 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -46,15 +46,22 @@ config INTEL_MID_DMAC
If unsure, say N.
-config ASYNC_TX_DISABLE_CHANNEL_SWITCH
+config ASYNC_TX_ENABLE_CHANNEL_SWITCH
bool
+config AMBA_PL08X
+ bool "ARM PrimeCell PL080 or PL081 support"
+ depends on ARM_AMBA && EXPERIMENTAL
+ select DMA_ENGINE
+ help
+ Platform has a PL08x DMAC device
+ which can provide DMA engine support
+
config INTEL_IOATDMA
tristate "Intel I/OAT DMA support"
depends on PCI && X86
select DMA_ENGINE
select DCA
- select ASYNC_TX_DISABLE_CHANNEL_SWITCH
select ASYNC_TX_DISABLE_PQ_VAL_DMA
select ASYNC_TX_DISABLE_XOR_VAL_DMA
help
@@ -69,6 +76,7 @@ config INTEL_IOP_ADMA
tristate "Intel IOP ADMA support"
depends on ARCH_IOP32X || ARCH_IOP33X || ARCH_IOP13XX
select DMA_ENGINE
+ select ASYNC_TX_ENABLE_CHANNEL_SWITCH
help
Enable support for the Intel(R) IOP Series RAID engines.
@@ -93,6 +101,7 @@ config FSL_DMA
tristate "Freescale Elo and Elo Plus DMA support"
depends on FSL_SOC
select DMA_ENGINE
+ select ASYNC_TX_ENABLE_CHANNEL_SWITCH
---help---
Enable support for the Freescale Elo and Elo Plus DMA controllers.
The Elo is the DMA controller on some 82xx and 83xx parts, and the
@@ -109,6 +118,7 @@ config MV_XOR
bool "Marvell XOR engine support"
depends on PLAT_ORION
select DMA_ENGINE
+ select ASYNC_TX_ENABLE_CHANNEL_SWITCH
---help---
Enable support for the Marvell XOR engine.
@@ -166,6 +176,7 @@ config AMCC_PPC440SPE_ADMA
depends on 440SPe || 440SP
select DMA_ENGINE
select ARCH_HAS_ASYNC_TX_FIND_CHANNEL
+ select ASYNC_TX_ENABLE_CHANNEL_SWITCH
help
Enable support for the AMCC PPC440SPe RAID engines.
@@ -189,11 +200,27 @@ config PL330_DMA
platform_data for a dma-pl330 device.
config PCH_DMA
- tristate "Topcliff PCH DMA support"
+ tristate "Topcliff (Intel EG20T) PCH DMA support"
depends on PCI && X86
select DMA_ENGINE
help
- Enable support for the Topcliff PCH DMA engine.
+ Enable support for the Topcliff (Intel EG20T) PCH DMA engine.
+
+config IMX_SDMA
+ tristate "i.MX SDMA support"
+ depends on ARCH_MX25 || ARCH_MX3 || ARCH_MX5
+ select DMA_ENGINE
+ help
+ Support the i.MX SDMA engine. This engine is integrated into
+ Freescale i.MX25/31/35/51 chips.
+
+config IMX_DMA
+ tristate "i.MX DMA support"
+ depends on ARCH_MX1 || ARCH_MX21 || MACH_MX27
+ select DMA_ENGINE
+ help
+ Support the i.MX DMA engine. This engine is integrated into
+ Freescale i.MX1/21/27 chips.
config DMA_ENGINE
bool
diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile
index 72bd70384d8a..a8a84f4587f2 100644
--- a/drivers/dma/Makefile
+++ b/drivers/dma/Makefile
@@ -21,7 +21,10 @@ obj-$(CONFIG_TXX9_DMAC) += txx9dmac.o
obj-$(CONFIG_SH_DMAE) += shdma.o
obj-$(CONFIG_COH901318) += coh901318.o coh901318_lli.o
obj-$(CONFIG_AMCC_PPC440SPE_ADMA) += ppc4xx/
+obj-$(CONFIG_IMX_SDMA) += imx-sdma.o
+obj-$(CONFIG_IMX_DMA) += imx-dma.o
obj-$(CONFIG_TIMB_DMA) += timb_dma.o
obj-$(CONFIG_STE_DMA40) += ste_dma40.o ste_dma40_ll.o
obj-$(CONFIG_PL330_DMA) += pl330.o
obj-$(CONFIG_PCH_DMA) += pch_dma.o
+obj-$(CONFIG_AMBA_PL08X) += amba-pl08x.o
diff --git a/drivers/dma/amba-pl08x.c b/drivers/dma/amba-pl08x.c
new file mode 100644
index 000000000000..b605cc9ac3a2
--- /dev/null
+++ b/drivers/dma/amba-pl08x.c
@@ -0,0 +1,2167 @@
+/*
+ * Copyright (c) 2006 ARM Ltd.
+ * Copyright (c) 2010 ST-Ericsson SA
+ *
+ * Author: Peter Pearse <peter.pearse@arm.com>
+ * Author: Linus Walleij <linus.walleij@stericsson.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc., 59
+ * Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * The full GNU General Public License is iin this distribution in the
+ * file called COPYING.
+ *
+ * Documentation: ARM DDI 0196G == PL080
+ * Documentation: ARM DDI 0218E == PL081
+ *
+ * PL080 & PL081 both have 16 sets of DMA signals that can be routed to
+ * any channel.
+ *
+ * The PL080 has 8 channels available for simultaneous use, and the PL081
+ * has only two channels. So on these DMA controllers the number of channels
+ * and the number of incoming DMA signals are two totally different things.
+ * It is usually not possible to theoretically handle all physical signals,
+ * so a multiplexing scheme with possible denial of use is necessary.
+ *
+ * The PL080 has a dual bus master, PL081 has a single master.
+ *
+ * Memory to peripheral transfer may be visualized as
+ * Get data from memory to DMAC
+ * Until no data left
+ * On burst request from peripheral
+ * Destination burst from DMAC to peripheral
+ * Clear burst request
+ * Raise terminal count interrupt
+ *
+ * For peripherals with a FIFO:
+ * Source burst size == half the depth of the peripheral FIFO
+ * Destination burst size == the depth of the peripheral FIFO
+ *
+ * (Bursts are irrelevant for mem to mem transfers - there are no burst
+ * signals, the DMA controller will simply facilitate its AHB master.)
+ *
+ * ASSUMES default (little) endianness for DMA transfers
+ *
+ * Only DMAC flow control is implemented
+ *
+ * Global TODO:
+ * - Break out common code from arch/arm/mach-s3c64xx and share
+ */
+#include <linux/device.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/interrupt.h>
+#include <linux/slab.h>
+#include <linux/dmapool.h>
+#include <linux/amba/bus.h>
+#include <linux/dmaengine.h>
+#include <linux/amba/pl08x.h>
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+
+#include <asm/hardware/pl080.h>
+#include <asm/dma.h>
+#include <asm/mach/dma.h>
+#include <asm/atomic.h>
+#include <asm/processor.h>
+#include <asm/cacheflush.h>
+
+#define DRIVER_NAME "pl08xdmac"
+
+/**
+ * struct vendor_data - vendor-specific config parameters
+ * for PL08x derivates
+ * @name: the name of this specific variant
+ * @channels: the number of channels available in this variant
+ * @dualmaster: whether this version supports dual AHB masters
+ * or not.
+ */
+struct vendor_data {
+ char *name;
+ u8 channels;
+ bool dualmaster;
+};
+
+/*
+ * PL08X private data structures
+ * An LLI struct - see pl08x TRM
+ * Note that next uses bit[0] as a bus bit,
+ * start & end do not - their bus bit info
+ * is in cctl
+ */
+struct lli {
+ dma_addr_t src;
+ dma_addr_t dst;
+ dma_addr_t next;
+ u32 cctl;
+};
+
+/**
+ * struct pl08x_driver_data - the local state holder for the PL08x
+ * @slave: slave engine for this instance
+ * @memcpy: memcpy engine for this instance
+ * @base: virtual memory base (remapped) for the PL08x
+ * @adev: the corresponding AMBA (PrimeCell) bus entry
+ * @vd: vendor data for this PL08x variant
+ * @pd: platform data passed in from the platform/machine
+ * @phy_chans: array of data for the physical channels
+ * @pool: a pool for the LLI descriptors
+ * @pool_ctr: counter of LLIs in the pool
+ * @lock: a spinlock for this struct
+ */
+struct pl08x_driver_data {
+ struct dma_device slave;
+ struct dma_device memcpy;
+ void __iomem *base;
+ struct amba_device *adev;
+ struct vendor_data *vd;
+ struct pl08x_platform_data *pd;
+ struct pl08x_phy_chan *phy_chans;
+ struct dma_pool *pool;
+ int pool_ctr;
+ spinlock_t lock;
+};
+
+/*
+ * PL08X specific defines
+ */
+
+/*
+ * Memory boundaries: the manual for PL08x says that the controller
+ * cannot read past a 1KiB boundary, so these defines are used to
+ * create transfer LLIs that do not cross such boundaries.
+ */
+#define PL08X_BOUNDARY_SHIFT (10) /* 1KB 0x400 */
+#define PL08X_BOUNDARY_SIZE (1 << PL08X_BOUNDARY_SHIFT)
+
+/* Minimum period between work queue runs */
+#define PL08X_WQ_PERIODMIN 20
+
+/* Size (bytes) of each LLI buffer allocated for one transfer */
+# define PL08X_LLI_TSFR_SIZE 0x2000
+
+/* Maximimum times we call dma_pool_alloc on this pool without freeing */
+#define PL08X_MAX_ALLOCS 0x40
+#define MAX_NUM_TSFR_LLIS (PL08X_LLI_TSFR_SIZE/sizeof(struct lli))
+#define PL08X_ALIGN 8
+
+static inline struct pl08x_dma_chan *to_pl08x_chan(struct dma_chan *chan)
+{
+ return container_of(chan, struct pl08x_dma_chan, chan);
+}
+
+/*
+ * Physical channel handling
+ */
+
+/* Whether a certain channel is busy or not */
+static int pl08x_phy_channel_busy(struct pl08x_phy_chan *ch)
+{
+ unsigned int val;
+
+ val = readl(ch->base + PL080_CH_CONFIG);
+ return val & PL080_CONFIG_ACTIVE;
+}
+
+/*
+ * Set the initial DMA register values i.e. those for the first LLI
+ * The next lli pointer and the configuration interrupt bit have
+ * been set when the LLIs were constructed
+ */
+static void pl08x_set_cregs(struct pl08x_driver_data *pl08x,
+ struct pl08x_phy_chan *ch)
+{
+ /* Wait for channel inactive */
+ while (pl08x_phy_channel_busy(ch))
+ ;
+
+ dev_vdbg(&pl08x->adev->dev,
+ "WRITE channel %d: csrc=%08x, cdst=%08x, "
+ "cctl=%08x, clli=%08x, ccfg=%08x\n",
+ ch->id,
+ ch->csrc,
+ ch->cdst,
+ ch->cctl,
+ ch->clli,
+ ch->ccfg);
+
+ writel(ch->csrc, ch->base + PL080_CH_SRC_ADDR);
+ writel(ch->cdst, ch->base + PL080_CH_DST_ADDR);
+ writel(ch->clli, ch->base + PL080_CH_LLI);
+ writel(ch->cctl, ch->base + PL080_CH_CONTROL);
+ writel(ch->ccfg, ch->base + PL080_CH_CONFIG);
+}
+
+static inline void pl08x_config_phychan_for_txd(struct pl08x_dma_chan *plchan)
+{
+ struct pl08x_channel_data *cd = plchan->cd;
+ struct pl08x_phy_chan *phychan = plchan->phychan;
+ struct pl08x_txd *txd = plchan->at;
+
+ /* Copy the basic control register calculated at transfer config */
+ phychan->csrc = txd->csrc;
+ phychan->cdst = txd->cdst;
+ phychan->clli = txd->clli;
+ phychan->cctl = txd->cctl;
+
+ /* Assign the signal to the proper control registers */
+ phychan->ccfg = cd->ccfg;
+ phychan->ccfg &= ~PL080_CONFIG_SRC_SEL_MASK;
+ phychan->ccfg &= ~PL080_CONFIG_DST_SEL_MASK;
+ /* If it wasn't set from AMBA, ignore it */
+ if (txd->direction == DMA_TO_DEVICE)
+ /* Select signal as destination */
+ phychan->ccfg |=
+ (phychan->signal << PL080_CONFIG_DST_SEL_SHIFT);
+ else if (txd->direction == DMA_FROM_DEVICE)
+ /* Select signal as source */
+ phychan->ccfg |=
+ (phychan->signal << PL080_CONFIG_SRC_SEL_SHIFT);
+ /* Always enable error interrupts */
+ phychan->ccfg |= PL080_CONFIG_ERR_IRQ_MASK;
+ /* Always enable terminal interrupts */
+ phychan->ccfg |= PL080_CONFIG_TC_IRQ_MASK;
+}
+
+/*
+ * Enable the DMA channel
+ * Assumes all other configuration bits have been set
+ * as desired before this code is called
+ */
+static void pl08x_enable_phy_chan(struct pl08x_driver_data *pl08x,
+ struct pl08x_phy_chan *ch)
+{
+ u32 val;
+
+ /*
+ * Do not access config register until channel shows as disabled
+ */
+ while (readl(pl08x->base + PL080_EN_CHAN) & (1 << ch->id))
+ ;
+
+ /*
+ * Do not access config register until channel shows as inactive
+ */
+ val = readl(ch->base + PL080_CH_CONFIG);
+ while ((val & PL080_CONFIG_ACTIVE) || (val & PL080_CONFIG_ENABLE))
+ val = readl(ch->base + PL080_CH_CONFIG);
+
+ writel(val | PL080_CONFIG_ENABLE, ch->base + PL080_CH_CONFIG);
+}
+
+/*
+ * Overall DMAC remains enabled always.
+ *
+ * Disabling individual channels could lose data.
+ *
+ * Disable the peripheral DMA after disabling the DMAC
+ * in order to allow the DMAC FIFO to drain, and
+ * hence allow the channel to show inactive
+ *
+ */
+static void pl08x_pause_phy_chan(struct pl08x_phy_chan *ch)
+{
+ u32 val;
+
+ /* Set the HALT bit and wait for the FIFO to drain */
+ val = readl(ch->base + PL080_CH_CONFIG);
+ val |= PL080_CONFIG_HALT;
+ writel(val, ch->base + PL080_CH_CONFIG);
+
+ /* Wait for channel inactive */
+ while (pl08x_phy_channel_busy(ch))
+ ;
+}
+
+static void pl08x_resume_phy_chan(struct pl08x_phy_chan *ch)
+{
+ u32 val;
+
+ /* Clear the HALT bit */
+ val = readl(ch->base + PL080_CH_CONFIG);
+ val &= ~PL080_CONFIG_HALT;
+ writel(val, ch->base + PL080_CH_CONFIG);
+}
+
+
+/* Stops the channel */
+static void pl08x_stop_phy_chan(struct pl08x_phy_chan *ch)
+{
+ u32 val;
+
+ pl08x_pause_phy_chan(ch);
+
+ /* Disable channel */
+ val = readl(ch->base + PL080_CH_CONFIG);
+ val &= ~PL080_CONFIG_ENABLE;
+ val &= ~PL080_CONFIG_ERR_IRQ_MASK;
+ val &= ~PL080_CONFIG_TC_IRQ_MASK;
+ writel(val, ch->base + PL080_CH_CONFIG);
+}
+
+static inline u32 get_bytes_in_cctl(u32 cctl)
+{
+ /* The source width defines the number of bytes */
+ u32 bytes = cctl & PL080_CONTROL_TRANSFER_SIZE_MASK;
+
+ switch (cctl >> PL080_CONTROL_SWIDTH_SHIFT) {
+ case PL080_WIDTH_8BIT:
+ break;
+ case PL080_WIDTH_16BIT:
+ bytes *= 2;
+ break;
+ case PL080_WIDTH_32BIT:
+ bytes *= 4;
+ break;
+ }
+ return bytes;
+}
+
+/* The channel should be paused when calling this */
+static u32 pl08x_getbytes_chan(struct pl08x_dma_chan *plchan)
+{
+ struct pl08x_phy_chan *ch;
+ struct pl08x_txd *txdi = NULL;
+ struct pl08x_txd *txd;
+ unsigned long flags;
+ u32 bytes = 0;
+
+ spin_lock_irqsave(&plchan->lock, flags);
+
+ ch = plchan->phychan;
+ txd = plchan->at;
+
+ /*
+ * Next follow the LLIs to get the number of pending bytes in the
+ * currently active transaction.
+ */
+ if (ch && txd) {
+ struct lli *llis_va = txd->llis_va;
+ struct lli *llis_bus = (struct lli *) txd->llis_bus;
+ u32 clli = readl(ch->base + PL080_CH_LLI);
+
+ /* First get the bytes in the current active LLI */
+ bytes = get_bytes_in_cctl(readl(ch->base + PL080_CH_CONTROL));
+
+ if (clli) {
+ int i = 0;
+
+ /* Forward to the LLI pointed to by clli */
+ while ((clli != (u32) &(llis_bus[i])) &&
+ (i < MAX_NUM_TSFR_LLIS))
+ i++;
+
+ while (clli) {
+ bytes += get_bytes_in_cctl(llis_va[i].cctl);
+ /*
+ * A clli of 0x00000000 will terminate the
+ * LLI list
+ */
+ clli = llis_va[i].next;
+ i++;
+ }
+ }
+ }
+
+ /* Sum up all queued transactions */
+ if (!list_empty(&plchan->desc_list)) {
+ list_for_each_entry(txdi, &plchan->desc_list, node) {
+ bytes += txdi->len;
+ }
+
+ }
+
+ spin_unlock_irqrestore(&plchan->lock, flags);
+
+ return bytes;
+}
+
+/*
+ * Allocate a physical channel for a virtual channel
+ */
+static struct pl08x_phy_chan *
+pl08x_get_phy_channel(struct pl08x_driver_data *pl08x,
+ struct pl08x_dma_chan *virt_chan)
+{
+ struct pl08x_phy_chan *ch = NULL;
+ unsigned long flags;
+ int i;
+
+ /*
+ * Try to locate a physical channel to be used for
+ * this transfer. If all are taken return NULL and
+ * the requester will have to cope by using some fallback
+ * PIO mode or retrying later.
+ */
+ for (i = 0; i < pl08x->vd->channels; i++) {
+ ch = &pl08x->phy_chans[i];
+
+ spin_lock_irqsave(&ch->lock, flags);
+
+ if (!ch->serving) {
+ ch->serving = virt_chan;
+ ch->signal = -1;
+ spin_unlock_irqrestore(&ch->lock, flags);
+ break;
+ }
+
+ spin_unlock_irqrestore(&ch->lock, flags);
+ }
+
+ if (i == pl08x->vd->channels) {
+ /* No physical channel available, cope with it */
+ return NULL;
+ }
+
+ return ch;
+}
+
+static inline void pl08x_put_phy_channel(struct pl08x_driver_data *pl08x,
+ struct pl08x_phy_chan *ch)
+{
+ unsigned long flags;
+
+ /* Stop the channel and clear its interrupts */
+ pl08x_stop_phy_chan(ch);
+ writel((1 << ch->id), pl08x->base + PL080_ERR_CLEAR);
+ writel((1 << ch->id), pl08x->base + PL080_TC_CLEAR);
+
+ /* Mark it as free */
+ spin_lock_irqsave(&ch->lock, flags);
+ ch->serving = NULL;
+ spin_unlock_irqrestore(&ch->lock, flags);
+}
+
+/*
+ * LLI handling
+ */
+
+static inline unsigned int pl08x_get_bytes_for_cctl(unsigned int coded)
+{
+ switch (coded) {
+ case PL080_WIDTH_8BIT:
+ return 1;
+ case PL080_WIDTH_16BIT:
+ return 2;
+ case PL080_WIDTH_32BIT:
+ return 4;
+ default:
+ break;
+ }
+ BUG();
+ return 0;
+}
+
+static inline u32 pl08x_cctl_bits(u32 cctl, u8 srcwidth, u8 dstwidth,
+ u32 tsize)
+{
+ u32 retbits = cctl;
+
+ /* Remove all src, dst and transfersize bits */
+ retbits &= ~PL080_CONTROL_DWIDTH_MASK;
+ retbits &= ~PL080_CONTROL_SWIDTH_MASK;
+ retbits &= ~PL080_CONTROL_TRANSFER_SIZE_MASK;
+
+ /* Then set the bits according to the parameters */
+ switch (srcwidth) {
+ case 1:
+ retbits |= PL080_WIDTH_8BIT << PL080_CONTROL_SWIDTH_SHIFT;
+ break;
+ case 2:
+ retbits |= PL080_WIDTH_16BIT << PL080_CONTROL_SWIDTH_SHIFT;
+ break;
+ case 4:
+ retbits |= PL080_WIDTH_32BIT << PL080_CONTROL_SWIDTH_SHIFT;
+ break;
+ default:
+ BUG();
+ break;
+ }
+
+ switch (dstwidth) {
+ case 1:
+ retbits |= PL080_WIDTH_8BIT << PL080_CONTROL_DWIDTH_SHIFT;
+ break;
+ case 2:
+ retbits |= PL080_WIDTH_16BIT << PL080_CONTROL_DWIDTH_SHIFT;
+ break;
+ case 4:
+ retbits |= PL080_WIDTH_32BIT << PL080_CONTROL_DWIDTH_SHIFT;
+ break;
+ default:
+ BUG();
+ break;
+ }
+
+ retbits |= tsize << PL080_CONTROL_TRANSFER_SIZE_SHIFT;
+ return retbits;
+}
+
+/*
+ * Autoselect a master bus to use for the transfer
+ * this prefers the destination bus if both available
+ * if fixed address on one bus the other will be chosen
+ */
+void pl08x_choose_master_bus(struct pl08x_bus_data *src_bus,
+ struct pl08x_bus_data *dst_bus, struct pl08x_bus_data **mbus,
+ struct pl08x_bus_data **sbus, u32 cctl)
+{
+ if (!(cctl & PL080_CONTROL_DST_INCR)) {
+ *mbus = src_bus;
+ *sbus = dst_bus;
+ } else if (!(cctl & PL080_CONTROL_SRC_INCR)) {
+ *mbus = dst_bus;
+ *sbus = src_bus;
+ } else {
+ if (dst_bus->buswidth == 4) {
+ *mbus = dst_bus;
+ *sbus = src_bus;
+ } else if (src_bus->buswidth == 4) {
+ *mbus = src_bus;
+ *sbus = dst_bus;
+ } else if (dst_bus->buswidth == 2) {
+ *mbus = dst_bus;
+ *sbus = src_bus;
+ } else if (src_bus->buswidth == 2) {
+ *mbus = src_bus;
+ *sbus = dst_bus;
+ } else {
+ /* src_bus->buswidth == 1 */
+ *mbus = dst_bus;
+ *sbus = src_bus;
+ }
+ }
+}
+
+/*
+ * Fills in one LLI for a certain transfer descriptor
+ * and advance the counter
+ */
+int pl08x_fill_lli_for_desc(struct pl08x_driver_data *pl08x,
+ struct pl08x_txd *txd, int num_llis, int len,
+ u32 cctl, u32 *remainder)
+{
+ struct lli *llis_va = txd->llis_va;
+ struct lli *llis_bus = (struct lli *) txd->llis_bus;
+
+ BUG_ON(num_llis >= MAX_NUM_TSFR_LLIS);
+
+ llis_va[num_llis].cctl = cctl;
+ llis_va[num_llis].src = txd->srcbus.addr;
+ llis_va[num_llis].dst = txd->dstbus.addr;
+
+ /*
+ * On versions with dual masters, you can optionally AND on
+ * PL080_LLI_LM_AHB2 to the LLI to tell the hardware to read
+ * in new LLIs with that controller, but we always try to
+ * choose AHB1 to point into memory. The idea is to have AHB2
+ * fixed on the peripheral and AHB1 messing around in the
+ * memory. So we don't manipulate this bit currently.
+ */
+
+ llis_va[num_llis].next =
+ (dma_addr_t)((u32) &(llis_bus[num_llis + 1]));
+
+ if (cctl & PL080_CONTROL_SRC_INCR)
+ txd->srcbus.addr += len;
+ if (cctl & PL080_CONTROL_DST_INCR)
+ txd->dstbus.addr += len;
+
+ *remainder -= len;
+
+ return num_llis + 1;
+}
+
+/*
+ * Return number of bytes to fill to boundary, or len
+ */
+static inline u32 pl08x_pre_boundary(u32 addr, u32 len)
+{
+ u32 boundary;
+
+ boundary = ((addr >> PL08X_BOUNDARY_SHIFT) + 1)
+ << PL08X_BOUNDARY_SHIFT;
+
+ if (boundary < addr + len)
+ return boundary - addr;
+ else
+ return len;
+}
+
+/*
+ * This fills in the table of LLIs for the transfer descriptor
+ * Note that we assume we never have to change the burst sizes
+ * Return 0 for error
+ */
+static int pl08x_fill_llis_for_desc(struct pl08x_driver_data *pl08x,
+ struct pl08x_txd *txd)
+{
+ struct pl08x_channel_data *cd = txd->cd;
+ struct pl08x_bus_data *mbus, *sbus;
+ u32 remainder;
+ int num_llis = 0;
+ u32 cctl;
+ int max_bytes_per_lli;
+ int total_bytes = 0;
+ struct lli *llis_va;
+ struct lli *llis_bus;
+
+ if (!txd) {
+ dev_err(&pl08x->adev->dev, "%s no descriptor\n", __func__);
+ return 0;
+ }
+
+ txd->llis_va = dma_pool_alloc(pl08x->pool, GFP_NOWAIT,
+ &txd->llis_bus);
+ if (!txd->llis_va) {
+ dev_err(&pl08x->adev->dev, "%s no memory for llis\n", __func__);
+ return 0;
+ }
+
+ pl08x->pool_ctr++;
+
+ /*
+ * Initialize bus values for this transfer
+ * from the passed optimal values
+ */
+ if (!cd) {
+ dev_err(&pl08x->adev->dev, "%s no channel data\n", __func__);
+ return 0;
+ }
+
+ /* Get the default CCTL from the platform data */
+ cctl = cd->cctl;
+
+ /*
+ * On the PL080 we have two bus masters and we
+ * should select one for source and one for
+ * destination. We try to use AHB2 for the
+ * bus which does not increment (typically the
+ * peripheral) else we just choose something.
+ */
+ cctl &= ~(PL080_CONTROL_DST_AHB2 | PL080_CONTROL_SRC_AHB2);
+ if (pl08x->vd->dualmaster) {
+ if (cctl & PL080_CONTROL_SRC_INCR)
+ /* Source increments, use AHB2 for destination */
+ cctl |= PL080_CONTROL_DST_AHB2;
+ else if (cctl & PL080_CONTROL_DST_INCR)
+ /* Destination increments, use AHB2 for source */
+ cctl |= PL080_CONTROL_SRC_AHB2;
+ else
+ /* Just pick something, source AHB1 dest AHB2 */
+ cctl |= PL080_CONTROL_DST_AHB2;
+ }
+
+ /* Find maximum width of the source bus */
+ txd->srcbus.maxwidth =
+ pl08x_get_bytes_for_cctl((cctl & PL080_CONTROL_SWIDTH_MASK) >>
+ PL080_CONTROL_SWIDTH_SHIFT);
+
+ /* Find maximum width of the destination bus */
+ txd->dstbus.maxwidth =
+ pl08x_get_bytes_for_cctl((cctl & PL080_CONTROL_DWIDTH_MASK) >>
+ PL080_CONTROL_DWIDTH_SHIFT);
+
+ /* Set up the bus widths to the maximum */
+ txd->srcbus.buswidth = txd->srcbus.maxwidth;
+ txd->dstbus.buswidth = txd->dstbus.maxwidth;
+ dev_vdbg(&pl08x->adev->dev,
+ "%s source bus is %d bytes wide, dest bus is %d bytes wide\n",
+ __func__, txd->srcbus.buswidth, txd->dstbus.buswidth);
+
+
+ /*
+ * Bytes transferred == tsize * MIN(buswidths), not max(buswidths)
+ */
+ max_bytes_per_lli = min(txd->srcbus.buswidth, txd->dstbus.buswidth) *
+ PL080_CONTROL_TRANSFER_SIZE_MASK;
+ dev_vdbg(&pl08x->adev->dev,
+ "%s max bytes per lli = %d\n",
+ __func__, max_bytes_per_lli);
+
+ /* We need to count this down to zero */
+ remainder = txd->len;
+ dev_vdbg(&pl08x->adev->dev,
+ "%s remainder = %d\n",
+ __func__, remainder);
+
+ /*
+ * Choose bus to align to
+ * - prefers destination bus if both available
+ * - if fixed address on one bus chooses other
+ * - modifies cctl to choose an apropriate master
+ */
+ pl08x_choose_master_bus(&txd->srcbus, &txd->dstbus,
+ &mbus, &sbus, cctl);
+
+
+ /*
+ * The lowest bit of the LLI register
+ * is also used to indicate which master to
+ * use for reading the LLIs.
+ */
+
+ if (txd->len < mbus->buswidth) {
+ /*
+ * Less than a bus width available
+ * - send as single bytes
+ */
+ while (remainder) {
+ dev_vdbg(&pl08x->adev->dev,
+ "%s single byte LLIs for a transfer of "
+ "less than a bus width (remain %08x)\n",
+ __func__, remainder);
+ cctl = pl08x_cctl_bits(cctl, 1, 1, 1);
+ num_llis =
+ pl08x_fill_lli_for_desc(pl08x, txd, num_llis, 1,
+ cctl, &remainder);
+ total_bytes++;
+ }
+ } else {
+ /*
+ * Make one byte LLIs until master bus is aligned
+ * - slave will then be aligned also
+ */
+ while ((mbus->addr) % (mbus->buswidth)) {
+ dev_vdbg(&pl08x->adev->dev,
+ "%s adjustment lli for less than bus width "
+ "(remain %08x)\n",
+ __func__, remainder);
+ cctl = pl08x_cctl_bits(cctl, 1, 1, 1);
+ num_llis = pl08x_fill_lli_for_desc
+ (pl08x, txd, num_llis, 1, cctl, &remainder);
+ total_bytes++;
+ }
+
+ /*
+ * Master now aligned
+ * - if slave is not then we must set its width down
+ */
+ if (sbus->addr % sbus->buswidth) {
+ dev_dbg(&pl08x->adev->dev,
+ "%s set down bus width to one byte\n",
+ __func__);
+
+ sbus->buswidth = 1;
+ }
+
+ /*
+ * Make largest possible LLIs until less than one bus
+ * width left
+ */
+ while (remainder > (mbus->buswidth - 1)) {
+ int lli_len, target_len;
+ int tsize;
+ int odd_bytes;
+
+ /*
+ * If enough left try to send max possible,
+ * otherwise try to send the remainder
+ */
+ target_len = remainder;
+ if (remainder > max_bytes_per_lli)
+ target_len = max_bytes_per_lli;
+
+ /*
+ * Set bus lengths for incrementing busses
+ * to number of bytes which fill to next memory
+ * boundary
+ */
+ if (cctl & PL080_CONTROL_SRC_INCR)
+ txd->srcbus.fill_bytes =
+ pl08x_pre_boundary(
+ txd->srcbus.addr,
+ remainder);
+ else
+ txd->srcbus.fill_bytes =
+ max_bytes_per_lli;
+
+ if (cctl & PL080_CONTROL_DST_INCR)
+ txd->dstbus.fill_bytes =
+ pl08x_pre_boundary(
+ txd->dstbus.addr,
+ remainder);
+ else
+ txd->dstbus.fill_bytes =
+ max_bytes_per_lli;
+
+ /*
+ * Find the nearest
+ */
+ lli_len = min(txd->srcbus.fill_bytes,
+ txd->dstbus.fill_bytes);
+
+ BUG_ON(lli_len > remainder);
+
+ if (lli_len <= 0) {
+ dev_err(&pl08x->adev->dev,
+ "%s lli_len is %d, <= 0\n",
+ __func__, lli_len);
+ return 0;
+ }
+
+ if (lli_len == target_len) {
+ /*
+ * Can send what we wanted
+ */
+ /*
+ * Maintain alignment
+ */
+ lli_len = (lli_len/mbus->buswidth) *
+ mbus->buswidth;
+ odd_bytes = 0;
+ } else {
+ /*
+ * So now we know how many bytes to transfer
+ * to get to the nearest boundary
+ * The next lli will past the boundary
+ * - however we may be working to a boundary
+ * on the slave bus
+ * We need to ensure the master stays aligned
+ */
+ odd_bytes = lli_len % mbus->buswidth;
+ /*
+ * - and that we are working in multiples
+ * of the bus widths
+ */
+ lli_len -= odd_bytes;
+
+ }
+
+ if (lli_len) {
+ /*
+ * Check against minimum bus alignment:
+ * Calculate actual transfer size in relation
+ * to bus width an get a maximum remainder of
+ * the smallest bus width - 1
+ */
+ /* FIXME: use round_down()? */
+ tsize = lli_len / min(mbus->buswidth,
+ sbus->buswidth);
+ lli_len = tsize * min(mbus->buswidth,
+ sbus->buswidth);
+
+ if (target_len != lli_len) {
+ dev_vdbg(&pl08x->adev->dev,
+ "%s can't send what we want. Desired %08x, lli of %08x bytes in txd of %08x\n",
+ __func__, target_len, lli_len, txd->len);
+ }
+
+ cctl = pl08x_cctl_bits(cctl,
+ txd->srcbus.buswidth,
+ txd->dstbus.buswidth,
+ tsize);
+
+ dev_vdbg(&pl08x->adev->dev,
+ "%s fill lli with single lli chunk of size %08x (remainder %08x)\n",
+ __func__, lli_len, remainder);
+ num_llis = pl08x_fill_lli_for_desc(pl08x, txd,
+ num_llis, lli_len, cctl,
+ &remainder);
+ total_bytes += lli_len;
+ }
+
+
+ if (odd_bytes) {
+ /*
+ * Creep past the boundary,
+ * maintaining master alignment
+ */
+ int j;
+ for (j = 0; (j < mbus->buswidth)
+ && (remainder); j++) {
+ cctl = pl08x_cctl_bits(cctl, 1, 1, 1);
+ dev_vdbg(&pl08x->adev->dev,
+ "%s align with boundardy, single byte (remain %08x)\n",
+ __func__, remainder);
+ num_llis =
+ pl08x_fill_lli_for_desc(pl08x,
+ txd, num_llis, 1,
+ cctl, &remainder);
+ total_bytes++;
+ }
+ }
+ }
+
+ /*
+ * Send any odd bytes
+ */
+ if (remainder < 0) {
+ dev_err(&pl08x->adev->dev, "%s remainder not fitted 0x%08x bytes\n",
+ __func__, remainder);
+ return 0;
+ }
+
+ while (remainder) {
+ cctl = pl08x_cctl_bits(cctl, 1, 1, 1);
+ dev_vdbg(&pl08x->adev->dev,
+ "%s align with boundardy, single odd byte (remain %d)\n",
+ __func__, remainder);
+ num_llis = pl08x_fill_lli_for_desc(pl08x, txd, num_llis,
+ 1, cctl, &remainder);
+ total_bytes++;
+ }
+ }
+ if (total_bytes != txd->len) {
+ dev_err(&pl08x->adev->dev,
+ "%s size of encoded lli:s don't match total txd, transferred 0x%08x from size 0x%08x\n",
+ __func__, total_bytes, txd->len);
+ return 0;
+ }
+
+ if (num_llis >= MAX_NUM_TSFR_LLIS) {
+ dev_err(&pl08x->adev->dev,
+ "%s need to increase MAX_NUM_TSFR_LLIS from 0x%08x\n",
+ __func__, (u32) MAX_NUM_TSFR_LLIS);
+ return 0;
+ }
+ /*
+ * Decide whether this is a loop or a terminated transfer
+ */
+ llis_va = txd->llis_va;
+ llis_bus = (struct lli *) txd->llis_bus;
+
+ if (cd->circular_buffer) {
+ /*
+ * Loop the circular buffer so that the next element
+ * points back to the beginning of the LLI.
+ */
+ llis_va[num_llis - 1].next =
+ (dma_addr_t)((unsigned int)&(llis_bus[0]));
+ } else {
+ /*
+ * On non-circular buffers, the final LLI terminates
+ * the LLI.
+ */
+ llis_va[num_llis - 1].next = 0;
+ /*
+ * The final LLI element shall also fire an interrupt
+ */
+ llis_va[num_llis - 1].cctl |= PL080_CONTROL_TC_IRQ_EN;
+ }
+
+ /* Now store the channel register values */
+ txd->csrc = llis_va[0].src;
+ txd->cdst = llis_va[0].dst;
+ if (num_llis > 1)
+ txd->clli = llis_va[0].next;
+ else
+ txd->clli = 0;
+
+ txd->cctl = llis_va[0].cctl;
+ /* ccfg will be set at physical channel allocation time */
+
+#ifdef VERBOSE_DEBUG
+ {
+ int i;
+
+ for (i = 0; i < num_llis; i++) {
+ dev_vdbg(&pl08x->adev->dev,
+ "lli %d @%p: csrc=%08x, cdst=%08x, cctl=%08x, clli=%08x\n",
+ i,
+ &llis_va[i],
+ llis_va[i].src,
+ llis_va[i].dst,
+ llis_va[i].cctl,
+ llis_va[i].next
+ );
+ }
+ }
+#endif
+
+ return num_llis;
+}
+
+/* You should call this with the struct pl08x lock held */
+static void pl08x_free_txd(struct pl08x_driver_data *pl08x,
+ struct pl08x_txd *txd)
+{
+ if (!txd)
+ dev_err(&pl08x->adev->dev,
+ "%s no descriptor to free\n",
+ __func__);
+
+ /* Free the LLI */
+ dma_pool_free(pl08x->pool, txd->llis_va,
+ txd->llis_bus);
+
+ pl08x->pool_ctr--;
+
+ kfree(txd);
+}
+
+static void pl08x_free_txd_list(struct pl08x_driver_data *pl08x,
+ struct pl08x_dma_chan *plchan)
+{
+ struct pl08x_txd *txdi = NULL;
+ struct pl08x_txd *next;
+
+ if (!list_empty(&plchan->desc_list)) {
+ list_for_each_entry_safe(txdi,
+ next, &plchan->desc_list, node) {
+ list_del(&txdi->node);
+ pl08x_free_txd(pl08x, txdi);
+ }
+
+ }
+}
+
+/*
+ * The DMA ENGINE API
+ */
+static int pl08x_alloc_chan_resources(struct dma_chan *chan)
+{
+ return 0;
+}
+
+static void pl08x_free_chan_resources(struct dma_chan *chan)
+{
+}
+
+/*
+ * This should be called with the channel plchan->lock held
+ */
+static int prep_phy_channel(struct pl08x_dma_chan *plchan,
+ struct pl08x_txd *txd)
+{
+ struct pl08x_driver_data *pl08x = plchan->host;
+ struct pl08x_phy_chan *ch;
+ int ret;
+
+ /* Check if we already have a channel */
+ if (plchan->phychan)
+ return 0;
+
+ ch = pl08x_get_phy_channel(pl08x, plchan);
+ if (!ch) {
+ /* No physical channel available, cope with it */
+ dev_dbg(&pl08x->adev->dev, "no physical channel available for xfer on %s\n", plchan->name);
+ return -EBUSY;
+ }
+
+ /*
+ * OK we have a physical channel: for memcpy() this is all we
+ * need, but for slaves the physical signals may be muxed!
+ * Can the platform allow us to use this channel?
+ */
+ if (plchan->slave &&
+ ch->signal < 0 &&
+ pl08x->pd->get_signal) {
+ ret = pl08x->pd->get_signal(plchan);
+ if (ret < 0) {
+ dev_dbg(&pl08x->adev->dev,
+ "unable to use physical channel %d for transfer on %s due to platform restrictions\n",
+ ch->id, plchan->name);
+ /* Release physical channel & return */
+ pl08x_put_phy_channel(pl08x, ch);
+ return -EBUSY;
+ }
+ ch->signal = ret;
+ }
+
+ dev_dbg(&pl08x->adev->dev, "allocated physical channel %d and signal %d for xfer on %s\n",
+ ch->id,
+ ch->signal,
+ plchan->name);
+
+ plchan->phychan = ch;
+
+ return 0;
+}
+
+static dma_cookie_t pl08x_tx_submit(struct dma_async_tx_descriptor *tx)
+{
+ struct pl08x_dma_chan *plchan = to_pl08x_chan(tx->chan);
+
+ atomic_inc(&plchan->last_issued);
+ tx->cookie = atomic_read(&plchan->last_issued);
+ /* This unlock follows the lock in the prep() function */
+ spin_unlock_irqrestore(&plchan->lock, plchan->lockflags);
+
+ return tx->cookie;
+}
+
+static struct dma_async_tx_descriptor *pl08x_prep_dma_interrupt(
+ struct dma_chan *chan, unsigned long flags)
+{
+ struct dma_async_tx_descriptor *retval = NULL;
+
+ return retval;
+}
+
+/*
+ * Code accessing dma_async_is_complete() in a tight loop
+ * may give problems - could schedule where indicated.
+ * If slaves are relying on interrupts to signal completion this
+ * function must not be called with interrupts disabled
+ */
+static enum dma_status
+pl08x_dma_tx_status(struct dma_chan *chan,
+ dma_cookie_t cookie,
+ struct dma_tx_state *txstate)
+{
+ struct pl08x_dma_chan *plchan = to_pl08x_chan(chan);
+ dma_cookie_t last_used;
+ dma_cookie_t last_complete;
+ enum dma_status ret;
+ u32 bytesleft = 0;
+
+ last_used = atomic_read(&plchan->last_issued);
+ last_complete = plchan->lc;
+
+ ret = dma_async_is_complete(cookie, last_complete, last_used);
+ if (ret == DMA_SUCCESS) {
+ dma_set_tx_state(txstate, last_complete, last_used, 0);
+ return ret;
+ }
+
+ /*
+ * schedule(); could be inserted here
+ */
+
+ /*
+ * This cookie not complete yet
+ */
+ last_used = atomic_read(&plchan->last_issued);
+ last_complete = plchan->lc;
+
+ /* Get number of bytes left in the active transactions and queue */
+ bytesleft = pl08x_getbytes_chan(plchan);
+
+ dma_set_tx_state(txstate, last_complete, last_used,
+ bytesleft);
+
+ if (plchan->state == PL08X_CHAN_PAUSED)
+ return DMA_PAUSED;
+
+ /* Whether waiting or running, we're in progress */
+ return DMA_IN_PROGRESS;
+}
+
+/* PrimeCell DMA extension */
+struct burst_table {
+ int burstwords;
+ u32 reg;
+};
+
+static const struct burst_table burst_sizes[] = {
+ {
+ .burstwords = 256,
+ .reg = (PL080_BSIZE_256 << PL080_CONTROL_SB_SIZE_SHIFT) |
+ (PL080_BSIZE_256 << PL080_CONTROL_DB_SIZE_SHIFT),
+ },
+ {
+ .burstwords = 128,
+ .reg = (PL080_BSIZE_128 << PL080_CONTROL_SB_SIZE_SHIFT) |
+ (PL080_BSIZE_128 << PL080_CONTROL_DB_SIZE_SHIFT),
+ },
+ {
+ .burstwords = 64,
+ .reg = (PL080_BSIZE_64 << PL080_CONTROL_SB_SIZE_SHIFT) |
+ (PL080_BSIZE_64 << PL080_CONTROL_DB_SIZE_SHIFT),
+ },
+ {
+ .burstwords = 32,
+ .reg = (PL080_BSIZE_32 << PL080_CONTROL_SB_SIZE_SHIFT) |
+ (PL080_BSIZE_32 << PL080_CONTROL_DB_SIZE_SHIFT),
+ },
+ {
+ .burstwords = 16,
+ .reg = (PL080_BSIZE_16 << PL080_CONTROL_SB_SIZE_SHIFT) |
+ (PL080_BSIZE_16 << PL080_CONTROL_DB_SIZE_SHIFT),
+ },
+ {
+ .burstwords = 8,
+ .reg = (PL080_BSIZE_8 << PL080_CONTROL_SB_SIZE_SHIFT) |
+ (PL080_BSIZE_8 << PL080_CONTROL_DB_SIZE_SHIFT),
+ },
+ {
+ .burstwords = 4,
+ .reg = (PL080_BSIZE_4 << PL080_CONTROL_SB_SIZE_SHIFT) |
+ (PL080_BSIZE_4 << PL080_CONTROL_DB_SIZE_SHIFT),
+ },
+ {
+ .burstwords = 1,
+ .reg = (PL080_BSIZE_1 << PL080_CONTROL_SB_SIZE_SHIFT) |
+ (PL080_BSIZE_1 << PL080_CONTROL_DB_SIZE_SHIFT),
+ },
+};
+
+static void dma_set_runtime_config(struct dma_chan *chan,
+ struct dma_slave_config *config)
+{
+ struct pl08x_dma_chan *plchan = to_pl08x_chan(chan);
+ struct pl08x_driver_data *pl08x = plchan->host;
+ struct pl08x_channel_data *cd = plchan->cd;
+ enum dma_slave_buswidth addr_width;
+ u32 maxburst;
+ u32 cctl = 0;
+ /* Mask out all except src and dst channel */
+ u32 ccfg = cd->ccfg & 0x000003DEU;
+ int i = 0;
+
+ /* Transfer direction */
+ plchan->runtime_direction = config->direction;
+ if (config->direction == DMA_TO_DEVICE) {
+ plchan->runtime_addr = config->dst_addr;
+ cctl |= PL080_CONTROL_SRC_INCR;
+ ccfg |= PL080_FLOW_MEM2PER << PL080_CONFIG_FLOW_CONTROL_SHIFT;
+ addr_width = config->dst_addr_width;
+ maxburst = config->dst_maxburst;
+ } else if (config->direction == DMA_FROM_DEVICE) {
+ plchan->runtime_addr = config->src_addr;
+ cctl |= PL080_CONTROL_DST_INCR;
+ ccfg |= PL080_FLOW_PER2MEM << PL080_CONFIG_FLOW_CONTROL_SHIFT;
+ addr_width = config->src_addr_width;
+ maxburst = config->src_maxburst;
+ } else {
+ dev_err(&pl08x->adev->dev,
+ "bad runtime_config: alien transfer direction\n");
+ return;
+ }
+
+ switch (addr_width) {
+ case DMA_SLAVE_BUSWIDTH_1_BYTE:
+ cctl |= (PL080_WIDTH_8BIT << PL080_CONTROL_SWIDTH_SHIFT) |
+ (PL080_WIDTH_8BIT << PL080_CONTROL_DWIDTH_SHIFT);
+ break;
+ case DMA_SLAVE_BUSWIDTH_2_BYTES:
+ cctl |= (PL080_WIDTH_16BIT << PL080_CONTROL_SWIDTH_SHIFT) |
+ (PL080_WIDTH_16BIT << PL080_CONTROL_DWIDTH_SHIFT);
+ break;
+ case DMA_SLAVE_BUSWIDTH_4_BYTES:
+ cctl |= (PL080_WIDTH_32BIT << PL080_CONTROL_SWIDTH_SHIFT) |
+ (PL080_WIDTH_32BIT << PL080_CONTROL_DWIDTH_SHIFT);
+ break;
+ default:
+ dev_err(&pl08x->adev->dev,
+ "bad runtime_config: alien address width\n");
+ return;
+ }
+
+ /*
+ * Now decide on a maxburst:
+ * If this channel will only request single transfers, set
+ * this down to ONE element.
+ */
+ if (plchan->cd->single) {
+ cctl |= (PL080_BSIZE_1 << PL080_CONTROL_SB_SIZE_SHIFT) |
+ (PL080_BSIZE_1 << PL080_CONTROL_DB_SIZE_SHIFT);
+ } else {
+ while (i < ARRAY_SIZE(burst_sizes)) {
+ if (burst_sizes[i].burstwords <= maxburst)
+ break;
+ i++;
+ }
+ cctl |= burst_sizes[i].reg;
+ }
+
+ /* Access the cell in privileged mode, non-bufferable, non-cacheable */
+ cctl &= ~PL080_CONTROL_PROT_MASK;
+ cctl |= PL080_CONTROL_PROT_SYS;
+
+ /* Modify the default channel data to fit PrimeCell request */
+ cd->cctl = cctl;
+ cd->ccfg = ccfg;
+
+ dev_dbg(&pl08x->adev->dev,
+ "configured channel %s (%s) for %s, data width %d, "
+ "maxburst %d words, LE, CCTL=%08x, CCFG=%08x\n",
+ dma_chan_name(chan), plchan->name,
+ (config->direction == DMA_FROM_DEVICE) ? "RX" : "TX",
+ addr_width,
+ maxburst,
+ cctl, ccfg);
+}
+
+/*
+ * Slave transactions callback to the slave device to allow
+ * synchronization of slave DMA signals with the DMAC enable
+ */
+static void pl08x_issue_pending(struct dma_chan *chan)
+{
+ struct pl08x_dma_chan *plchan = to_pl08x_chan(chan);
+ struct pl08x_driver_data *pl08x = plchan->host;
+ unsigned long flags;
+
+ spin_lock_irqsave(&plchan->lock, flags);
+ /* Something is already active */
+ if (plchan->at) {
+ spin_unlock_irqrestore(&plchan->lock, flags);
+ return;
+ }
+
+ /* Didn't get a physical channel so waiting for it ... */
+ if (plchan->state == PL08X_CHAN_WAITING)
+ return;
+
+ /* Take the first element in the queue and execute it */
+ if (!list_empty(&plchan->desc_list)) {
+ struct pl08x_txd *next;
+
+ next = list_first_entry(&plchan->desc_list,
+ struct pl08x_txd,
+ node);
+ list_del(&next->node);
+ plchan->at = next;
+ plchan->state = PL08X_CHAN_RUNNING;
+
+ /* Configure the physical channel for the active txd */
+ pl08x_config_phychan_for_txd(plchan);
+ pl08x_set_cregs(pl08x, plchan->phychan);
+ pl08x_enable_phy_chan(pl08x, plchan->phychan);
+ }
+
+ spin_unlock_irqrestore(&plchan->lock, flags);
+}
+
+static int pl08x_prep_channel_resources(struct pl08x_dma_chan *plchan,
+ struct pl08x_txd *txd)
+{
+ int num_llis;
+ struct pl08x_driver_data *pl08x = plchan->host;
+ int ret;
+
+ num_llis = pl08x_fill_llis_for_desc(pl08x, txd);
+
+ if (!num_llis)
+ return -EINVAL;
+
+ spin_lock_irqsave(&plchan->lock, plchan->lockflags);
+
+ /*
+ * If this device is not using a circular buffer then
+ * queue this new descriptor for transfer.
+ * The descriptor for a circular buffer continues
+ * to be used until the channel is freed.
+ */
+ if (txd->cd->circular_buffer)
+ dev_err(&pl08x->adev->dev,
+ "%s attempting to queue a circular buffer\n",
+ __func__);
+ else
+ list_add_tail(&txd->node,
+ &plchan->desc_list);
+
+ /*
+ * See if we already have a physical channel allocated,
+ * else this is the time to try to get one.
+ */
+ ret = prep_phy_channel(plchan, txd);
+ if (ret) {
+ /*
+ * No physical channel available, we will
+ * stack up the memcpy channels until there is a channel
+ * available to handle it whereas slave transfers may
+ * have been denied due to platform channel muxing restrictions
+ * and since there is no guarantee that this will ever be
+ * resolved, and since the signal must be aquired AFTER
+ * aquiring the physical channel, we will let them be NACK:ed
+ * with -EBUSY here. The drivers can alway retry the prep()
+ * call if they are eager on doing this using DMA.
+ */
+ if (plchan->slave) {
+ pl08x_free_txd_list(pl08x, plchan);
+ spin_unlock_irqrestore(&plchan->lock, plchan->lockflags);
+ return -EBUSY;
+ }
+ /* Do this memcpy whenever there is a channel ready */
+ plchan->state = PL08X_CHAN_WAITING;
+ plchan->waiting = txd;
+ } else
+ /*
+ * Else we're all set, paused and ready to roll,
+ * status will switch to PL08X_CHAN_RUNNING when
+ * we call issue_pending(). If there is something
+ * running on the channel already we don't change
+ * its state.
+ */
+ if (plchan->state == PL08X_CHAN_IDLE)
+ plchan->state = PL08X_CHAN_PAUSED;
+
+ /*
+ * Notice that we leave plchan->lock locked on purpose:
+ * it will be unlocked in the subsequent tx_submit()
+ * call. This is a consequence of the current API.
+ */
+
+ return 0;
+}
+
+/*
+ * Initialize a descriptor to be used by memcpy submit
+ */
+static struct dma_async_tx_descriptor *pl08x_prep_dma_memcpy(
+ struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
+ size_t len, unsigned long flags)
+{
+ struct pl08x_dma_chan *plchan = to_pl08x_chan(chan);
+ struct pl08x_driver_data *pl08x = plchan->host;
+ struct pl08x_txd *txd;
+ int ret;
+
+ txd = kzalloc(sizeof(struct pl08x_txd), GFP_NOWAIT);
+ if (!txd) {
+ dev_err(&pl08x->adev->dev,
+ "%s no memory for descriptor\n", __func__);
+ return NULL;
+ }
+
+ dma_async_tx_descriptor_init(&txd->tx, chan);
+ txd->direction = DMA_NONE;
+ txd->srcbus.addr = src;
+ txd->dstbus.addr = dest;
+
+ /* Set platform data for m2m */
+ txd->cd = &pl08x->pd->memcpy_channel;
+ /* Both to be incremented or the code will break */
+ txd->cd->cctl |= PL080_CONTROL_SRC_INCR | PL080_CONTROL_DST_INCR;
+ txd->tx.tx_submit = pl08x_tx_submit;
+ txd->tx.callback = NULL;
+ txd->tx.callback_param = NULL;
+ txd->len = len;
+
+ INIT_LIST_HEAD(&txd->node);
+ ret = pl08x_prep_channel_resources(plchan, txd);
+ if (ret)
+ return NULL;
+ /*
+ * NB: the channel lock is held at this point so tx_submit()
+ * must be called in direct succession.
+ */
+
+ return &txd->tx;
+}
+
+struct dma_async_tx_descriptor *pl08x_prep_slave_sg(
+ struct dma_chan *chan, struct scatterlist *sgl,
+ unsigned int sg_len, enum dma_data_direction direction,
+ unsigned long flags)
+{
+ struct pl08x_dma_chan *plchan = to_pl08x_chan(chan);
+ struct pl08x_driver_data *pl08x = plchan->host;
+ struct pl08x_txd *txd;
+ int ret;
+
+ /*
+ * Current implementation ASSUMES only one sg
+ */
+ if (sg_len != 1) {
+ dev_err(&pl08x->adev->dev, "%s prepared too long sglist\n",
+ __func__);
+ BUG();
+ }
+
+ dev_dbg(&pl08x->adev->dev, "%s prepare transaction of %d bytes from %s\n",
+ __func__, sgl->length, plchan->name);
+
+ txd = kzalloc(sizeof(struct pl08x_txd), GFP_NOWAIT);
+ if (!txd) {
+ dev_err(&pl08x->adev->dev, "%s no txd\n", __func__);
+ return NULL;
+ }
+
+ dma_async_tx_descriptor_init(&txd->tx, chan);
+
+ if (direction != plchan->runtime_direction)
+ dev_err(&pl08x->adev->dev, "%s DMA setup does not match "
+ "the direction configured for the PrimeCell\n",
+ __func__);
+
+ /*
+ * Set up addresses, the PrimeCell configured address
+ * will take precedence since this may configure the
+ * channel target address dynamically at runtime.
+ */
+ txd->direction = direction;
+ if (direction == DMA_TO_DEVICE) {
+ txd->srcbus.addr = sgl->dma_address;
+ if (plchan->runtime_addr)
+ txd->dstbus.addr = plchan->runtime_addr;
+ else
+ txd->dstbus.addr = plchan->cd->addr;
+ } else if (direction == DMA_FROM_DEVICE) {
+ if (plchan->runtime_addr)
+ txd->srcbus.addr = plchan->runtime_addr;
+ else
+ txd->srcbus.addr = plchan->cd->addr;
+ txd->dstbus.addr = sgl->dma_address;
+ } else {
+ dev_err(&pl08x->adev->dev,
+ "%s direction unsupported\n", __func__);
+ return NULL;
+ }
+ txd->cd = plchan->cd;
+ txd->tx.tx_submit = pl08x_tx_submit;
+ txd->tx.callback = NULL;
+ txd->tx.callback_param = NULL;
+ txd->len = sgl->length;
+ INIT_LIST_HEAD(&txd->node);
+
+ ret = pl08x_prep_channel_resources(plchan, txd);
+ if (ret)
+ return NULL;
+ /*
+ * NB: the channel lock is held at this point so tx_submit()
+ * must be called in direct succession.
+ */
+
+ return &txd->tx;
+}
+
+static int pl08x_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
+ unsigned long arg)
+{
+ struct pl08x_dma_chan *plchan = to_pl08x_chan(chan);
+ struct pl08x_driver_data *pl08x = plchan->host;
+ unsigned long flags;
+ int ret = 0;
+
+ /* Controls applicable to inactive channels */
+ if (cmd == DMA_SLAVE_CONFIG) {
+ dma_set_runtime_config(chan,
+ (struct dma_slave_config *)
+ arg);
+ return 0;
+ }
+
+ /*
+ * Anything succeeds on channels with no physical allocation and
+ * no queued transfers.
+ */
+ spin_lock_irqsave(&plchan->lock, flags);
+ if (!plchan->phychan && !plchan->at) {
+ spin_unlock_irqrestore(&plchan->lock, flags);
+ return 0;
+ }
+
+ switch (cmd) {
+ case DMA_TERMINATE_ALL:
+ plchan->state = PL08X_CHAN_IDLE;
+
+ if (plchan->phychan) {
+ pl08x_stop_phy_chan(plchan->phychan);
+
+ /*
+ * Mark physical channel as free and free any slave
+ * signal
+ */
+ if ((plchan->phychan->signal >= 0) &&
+ pl08x->pd->put_signal) {
+ pl08x->pd->put_signal(plchan);
+ plchan->phychan->signal = -1;
+ }
+ pl08x_put_phy_channel(pl08x, plchan->phychan);
+ plchan->phychan = NULL;
+ }
+ /* Stop any pending tasklet */
+ tasklet_disable(&plchan->tasklet);
+ /* Dequeue jobs and free LLIs */
+ if (plchan->at) {
+ pl08x_free_txd(pl08x, plchan->at);
+ plchan->at = NULL;
+ }
+ /* Dequeue jobs not yet fired as well */
+ pl08x_free_txd_list(pl08x, plchan);
+ break;
+ case DMA_PAUSE:
+ pl08x_pause_phy_chan(plchan->phychan);
+ plchan->state = PL08X_CHAN_PAUSED;
+ break;
+ case DMA_RESUME:
+ pl08x_resume_phy_chan(plchan->phychan);
+ plchan->state = PL08X_CHAN_RUNNING;
+ break;
+ default:
+ /* Unknown command */
+ ret = -ENXIO;
+ break;
+ }
+
+ spin_unlock_irqrestore(&plchan->lock, flags);
+
+ return ret;
+}
+
+bool pl08x_filter_id(struct dma_chan *chan, void *chan_id)
+{
+ struct pl08x_dma_chan *plchan = to_pl08x_chan(chan);
+ char *name = chan_id;
+
+ /* Check that the channel is not taken! */
+ if (!strcmp(plchan->name, name))
+ return true;
+
+ return false;
+}
+
+/*
+ * Just check that the device is there and active
+ * TODO: turn this bit on/off depending on the number of
+ * physical channels actually used, if it is zero... well
+ * shut it off. That will save some power. Cut the clock
+ * at the same time.
+ */
+static void pl08x_ensure_on(struct pl08x_driver_data *pl08x)
+{
+ u32 val;
+
+ val = readl(pl08x->base + PL080_CONFIG);
+ val &= ~(PL080_CONFIG_M2_BE | PL080_CONFIG_M1_BE | PL080_CONFIG_ENABLE);
+ /* We implictly clear bit 1 and that means little-endian mode */
+ val |= PL080_CONFIG_ENABLE;
+ writel(val, pl08x->base + PL080_CONFIG);
+}
+
+static void pl08x_tasklet(unsigned long data)
+{
+ struct pl08x_dma_chan *plchan = (struct pl08x_dma_chan *) data;
+ struct pl08x_phy_chan *phychan = plchan->phychan;
+ struct pl08x_driver_data *pl08x = plchan->host;
+
+ if (!plchan)
+ BUG();
+
+ spin_lock(&plchan->lock);
+
+ if (plchan->at) {
+ dma_async_tx_callback callback =
+ plchan->at->tx.callback;
+ void *callback_param =
+ plchan->at->tx.callback_param;
+
+ /*
+ * Update last completed
+ */
+ plchan->lc =
+ (plchan->at->tx.cookie);
+
+ /*
+ * Callback to signal completion
+ */
+ if (callback)
+ callback(callback_param);
+
+ /*
+ * Device callbacks should NOT clear
+ * the current transaction on the channel
+ * Linus: sometimes they should?
+ */
+ if (!plchan->at)
+ BUG();
+
+ /*
+ * Free the descriptor if it's not for a device
+ * using a circular buffer
+ */
+ if (!plchan->at->cd->circular_buffer) {
+ pl08x_free_txd(pl08x, plchan->at);
+ plchan->at = NULL;
+ }
+ /*
+ * else descriptor for circular
+ * buffers only freed when
+ * client has disabled dma
+ */
+ }
+ /*
+ * If a new descriptor is queued, set it up
+ * plchan->at is NULL here
+ */
+ if (!list_empty(&plchan->desc_list)) {
+ struct pl08x_txd *next;
+
+ next = list_first_entry(&plchan->desc_list,
+ struct pl08x_txd,
+ node);
+ list_del(&next->node);
+ plchan->at = next;
+ /* Configure the physical channel for the next txd */
+ pl08x_config_phychan_for_txd(plchan);
+ pl08x_set_cregs(pl08x, plchan->phychan);
+ pl08x_enable_phy_chan(pl08x, plchan->phychan);
+ } else {
+ struct pl08x_dma_chan *waiting = NULL;
+
+ /*
+ * No more jobs, so free up the physical channel
+ * Free any allocated signal on slave transfers too
+ */
+ if ((phychan->signal >= 0) && pl08x->pd->put_signal) {
+ pl08x->pd->put_signal(plchan);
+ phychan->signal = -1;
+ }
+ pl08x_put_phy_channel(pl08x, phychan);
+ plchan->phychan = NULL;
+ plchan->state = PL08X_CHAN_IDLE;
+
+ /*
+ * And NOW before anyone else can grab that free:d
+ * up physical channel, see if there is some memcpy
+ * pending that seriously needs to start because of
+ * being stacked up while we were choking the
+ * physical channels with data.
+ */
+ list_for_each_entry(waiting, &pl08x->memcpy.channels,
+ chan.device_node) {
+ if (waiting->state == PL08X_CHAN_WAITING &&
+ waiting->waiting != NULL) {
+ int ret;
+
+ /* This should REALLY not fail now */
+ ret = prep_phy_channel(waiting,
+ waiting->waiting);
+ BUG_ON(ret);
+ waiting->state = PL08X_CHAN_RUNNING;
+ waiting->waiting = NULL;
+ pl08x_issue_pending(&waiting->chan);
+ break;
+ }
+ }
+ }
+
+ spin_unlock(&plchan->lock);
+}
+
+static irqreturn_t pl08x_irq(int irq, void *dev)
+{
+ struct pl08x_driver_data *pl08x = dev;
+ u32 mask = 0;
+ u32 val;
+ int i;
+
+ val = readl(pl08x->base + PL080_ERR_STATUS);
+ if (val) {
+ /*
+ * An error interrupt (on one or more channels)
+ */
+ dev_err(&pl08x->adev->dev,
+ "%s error interrupt, register value 0x%08x\n",
+ __func__, val);
+ /*
+ * Simply clear ALL PL08X error interrupts,
+ * regardless of channel and cause
+ * FIXME: should be 0x00000003 on PL081 really.
+ */
+ writel(0x000000FF, pl08x->base + PL080_ERR_CLEAR);
+ }
+ val = readl(pl08x->base + PL080_INT_STATUS);
+ for (i = 0; i < pl08x->vd->channels; i++) {
+ if ((1 << i) & val) {
+ /* Locate physical channel */
+ struct pl08x_phy_chan *phychan = &pl08x->phy_chans[i];
+ struct pl08x_dma_chan *plchan = phychan->serving;
+
+ /* Schedule tasklet on this channel */
+ tasklet_schedule(&plchan->tasklet);
+
+ mask |= (1 << i);
+ }
+ }
+ /*
+ * Clear only the terminal interrupts on channels we processed
+ */
+ writel(mask, pl08x->base + PL080_TC_CLEAR);
+
+ return mask ? IRQ_HANDLED : IRQ_NONE;
+}
+
+/*
+ * Initialise the DMAC memcpy/slave channels.
+ * Make a local wrapper to hold required data
+ */
+static int pl08x_dma_init_virtual_channels(struct pl08x_driver_data *pl08x,
+ struct dma_device *dmadev,
+ unsigned int channels,
+ bool slave)
+{
+ struct pl08x_dma_chan *chan;
+ int i;
+
+ INIT_LIST_HEAD(&dmadev->channels);
+ /*
+ * Register as many many memcpy as we have physical channels,
+ * we won't always be able to use all but the code will have
+ * to cope with that situation.
+ */
+ for (i = 0; i < channels; i++) {
+ chan = kzalloc(sizeof(struct pl08x_dma_chan), GFP_KERNEL);
+ if (!chan) {
+ dev_err(&pl08x->adev->dev,
+ "%s no memory for channel\n", __func__);
+ return -ENOMEM;
+ }
+
+ chan->host = pl08x;
+ chan->state = PL08X_CHAN_IDLE;
+
+ if (slave) {
+ chan->slave = true;
+ chan->name = pl08x->pd->slave_channels[i].bus_id;
+ chan->cd = &pl08x->pd->slave_channels[i];
+ } else {
+ chan->cd = &pl08x->pd->memcpy_channel;
+ chan->name = kasprintf(GFP_KERNEL, "memcpy%d", i);
+ if (!chan->name) {
+ kfree(chan);
+ return -ENOMEM;
+ }
+ }
+ dev_info(&pl08x->adev->dev,
+ "initialize virtual channel \"%s\"\n",
+ chan->name);
+
+ chan->chan.device = dmadev;
+ atomic_set(&chan->last_issued, 0);
+ chan->lc = atomic_read(&chan->last_issued);
+
+ spin_lock_init(&chan->lock);
+ INIT_LIST_HEAD(&chan->desc_list);
+ tasklet_init(&chan->tasklet, pl08x_tasklet,
+ (unsigned long) chan);
+
+ list_add_tail(&chan->chan.device_node, &dmadev->channels);
+ }
+ dev_info(&pl08x->adev->dev, "initialized %d virtual %s channels\n",
+ i, slave ? "slave" : "memcpy");
+ return i;
+}
+
+static void pl08x_free_virtual_channels(struct dma_device *dmadev)
+{
+ struct pl08x_dma_chan *chan = NULL;
+ struct pl08x_dma_chan *next;
+
+ list_for_each_entry_safe(chan,
+ next, &dmadev->channels, chan.device_node) {
+ list_del(&chan->chan.device_node);
+ kfree(chan);
+ }
+}
+
+#ifdef CONFIG_DEBUG_FS
+static const char *pl08x_state_str(enum pl08x_dma_chan_state state)
+{
+ switch (state) {
+ case PL08X_CHAN_IDLE:
+ return "idle";
+ case PL08X_CHAN_RUNNING:
+ return "running";
+ case PL08X_CHAN_PAUSED:
+ return "paused";
+ case PL08X_CHAN_WAITING:
+ return "waiting";
+ default:
+ break;
+ }
+ return "UNKNOWN STATE";
+}
+
+static int pl08x_debugfs_show(struct seq_file *s, void *data)
+{
+ struct pl08x_driver_data *pl08x = s->private;
+ struct pl08x_dma_chan *chan;
+ struct pl08x_phy_chan *ch;
+ unsigned long flags;
+ int i;
+
+ seq_printf(s, "PL08x physical channels:\n");
+ seq_printf(s, "CHANNEL:\tUSER:\n");
+ seq_printf(s, "--------\t-----\n");
+ for (i = 0; i < pl08x->vd->channels; i++) {
+ struct pl08x_dma_chan *virt_chan;
+
+ ch = &pl08x->phy_chans[i];
+
+ spin_lock_irqsave(&ch->lock, flags);
+ virt_chan = ch->serving;
+
+ seq_printf(s, "%d\t\t%s\n",
+ ch->id, virt_chan ? virt_chan->name : "(none)");
+
+ spin_unlock_irqrestore(&ch->lock, flags);
+ }
+
+ seq_printf(s, "\nPL08x virtual memcpy channels:\n");
+ seq_printf(s, "CHANNEL:\tSTATE:\n");
+ seq_printf(s, "--------\t------\n");
+ list_for_each_entry(chan, &pl08x->memcpy.channels, chan.device_node) {
+ seq_printf(s, "%s\t\t\%s\n", chan->name,
+ pl08x_state_str(chan->state));
+ }
+
+ seq_printf(s, "\nPL08x virtual slave channels:\n");
+ seq_printf(s, "CHANNEL:\tSTATE:\n");
+ seq_printf(s, "--------\t------\n");
+ list_for_each_entry(chan, &pl08x->slave.channels, chan.device_node) {
+ seq_printf(s, "%s\t\t\%s\n", chan->name,
+ pl08x_state_str(chan->state));
+ }
+
+ return 0;
+}
+
+static int pl08x_debugfs_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, pl08x_debugfs_show, inode->i_private);
+}
+
+static const struct file_operations pl08x_debugfs_operations = {
+ .open = pl08x_debugfs_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static void init_pl08x_debugfs(struct pl08x_driver_data *pl08x)
+{
+ /* Expose a simple debugfs interface to view all clocks */
+ (void) debugfs_create_file(dev_name(&pl08x->adev->dev), S_IFREG | S_IRUGO,
+ NULL, pl08x,
+ &pl08x_debugfs_operations);
+}
+
+#else
+static inline void init_pl08x_debugfs(struct pl08x_driver_data *pl08x)
+{
+}
+#endif
+
+static int pl08x_probe(struct amba_device *adev, struct amba_id *id)
+{
+ struct pl08x_driver_data *pl08x;
+ struct vendor_data *vd = id->data;
+ int ret = 0;
+ int i;
+
+ ret = amba_request_regions(adev, NULL);
+ if (ret)
+ return ret;
+
+ /* Create the driver state holder */
+ pl08x = kzalloc(sizeof(struct pl08x_driver_data), GFP_KERNEL);
+ if (!pl08x) {
+ ret = -ENOMEM;
+ goto out_no_pl08x;
+ }
+
+ /* Initialize memcpy engine */
+ dma_cap_set(DMA_MEMCPY, pl08x->memcpy.cap_mask);
+ pl08x->memcpy.dev = &adev->dev;
+ pl08x->memcpy.device_alloc_chan_resources = pl08x_alloc_chan_resources;
+ pl08x->memcpy.device_free_chan_resources = pl08x_free_chan_resources;
+ pl08x->memcpy.device_prep_dma_memcpy = pl08x_prep_dma_memcpy;
+ pl08x->memcpy.device_prep_dma_interrupt = pl08x_prep_dma_interrupt;
+ pl08x->memcpy.device_tx_status = pl08x_dma_tx_status;
+ pl08x->memcpy.device_issue_pending = pl08x_issue_pending;
+ pl08x->memcpy.device_control = pl08x_control;
+
+ /* Initialize slave engine */
+ dma_cap_set(DMA_SLAVE, pl08x->slave.cap_mask);
+ pl08x->slave.dev = &adev->dev;
+ pl08x->slave.device_alloc_chan_resources = pl08x_alloc_chan_resources;
+ pl08x->slave.device_free_chan_resources = pl08x_free_chan_resources;
+ pl08x->slave.device_prep_dma_interrupt = pl08x_prep_dma_interrupt;
+ pl08x->slave.device_tx_status = pl08x_dma_tx_status;
+ pl08x->slave.device_issue_pending = pl08x_issue_pending;
+ pl08x->slave.device_prep_slave_sg = pl08x_prep_slave_sg;
+ pl08x->slave.device_control = pl08x_control;
+
+ /* Get the platform data */
+ pl08x->pd = dev_get_platdata(&adev->dev);
+ if (!pl08x->pd) {
+ dev_err(&adev->dev, "no platform data supplied\n");
+ goto out_no_platdata;
+ }
+
+ /* Assign useful pointers to the driver state */
+ pl08x->adev = adev;
+ pl08x->vd = vd;
+
+ /* A DMA memory pool for LLIs, align on 1-byte boundary */
+ pl08x->pool = dma_pool_create(DRIVER_NAME, &pl08x->adev->dev,
+ PL08X_LLI_TSFR_SIZE, PL08X_ALIGN, 0);
+ if (!pl08x->pool) {
+ ret = -ENOMEM;
+ goto out_no_lli_pool;
+ }
+
+ spin_lock_init(&pl08x->lock);
+
+ pl08x->base = ioremap(adev->res.start, resource_size(&adev->res));
+ if (!pl08x->base) {
+ ret = -ENOMEM;
+ goto out_no_ioremap;
+ }
+
+ /* Turn on the PL08x */
+ pl08x_ensure_on(pl08x);
+
+ /*
+ * Attach the interrupt handler
+ */
+ writel(0x000000FF, pl08x->base + PL080_ERR_CLEAR);
+ writel(0x000000FF, pl08x->base + PL080_TC_CLEAR);
+
+ ret = request_irq(adev->irq[0], pl08x_irq, IRQF_DISABLED,
+ vd->name, pl08x);
+ if (ret) {
+ dev_err(&adev->dev, "%s failed to request interrupt %d\n",
+ __func__, adev->irq[0]);
+ goto out_no_irq;
+ }
+
+ /* Initialize physical channels */
+ pl08x->phy_chans = kmalloc((vd->channels * sizeof(struct pl08x_phy_chan)),
+ GFP_KERNEL);
+ if (!pl08x->phy_chans) {
+ dev_err(&adev->dev, "%s failed to allocate "
+ "physical channel holders\n",
+ __func__);
+ goto out_no_phychans;
+ }
+
+ for (i = 0; i < vd->channels; i++) {
+ struct pl08x_phy_chan *ch = &pl08x->phy_chans[i];
+
+ ch->id = i;
+ ch->base = pl08x->base + PL080_Cx_BASE(i);
+ spin_lock_init(&ch->lock);
+ ch->serving = NULL;
+ ch->signal = -1;
+ dev_info(&adev->dev,
+ "physical channel %d is %s\n", i,
+ pl08x_phy_channel_busy(ch) ? "BUSY" : "FREE");
+ }
+
+ /* Register as many memcpy channels as there are physical channels */
+ ret = pl08x_dma_init_virtual_channels(pl08x, &pl08x->memcpy,
+ pl08x->vd->channels, false);
+ if (ret <= 0) {
+ dev_warn(&pl08x->adev->dev,
+ "%s failed to enumerate memcpy channels - %d\n",
+ __func__, ret);
+ goto out_no_memcpy;
+ }
+ pl08x->memcpy.chancnt = ret;
+
+ /* Register slave channels */
+ ret = pl08x_dma_init_virtual_channels(pl08x, &pl08x->slave,
+ pl08x->pd->num_slave_channels,
+ true);
+ if (ret <= 0) {
+ dev_warn(&pl08x->adev->dev,
+ "%s failed to enumerate slave channels - %d\n",
+ __func__, ret);
+ goto out_no_slave;
+ }
+ pl08x->slave.chancnt = ret;
+
+ ret = dma_async_device_register(&pl08x->memcpy);
+ if (ret) {
+ dev_warn(&pl08x->adev->dev,
+ "%s failed to register memcpy as an async device - %d\n",
+ __func__, ret);
+ goto out_no_memcpy_reg;
+ }
+
+ ret = dma_async_device_register(&pl08x->slave);
+ if (ret) {
+ dev_warn(&pl08x->adev->dev,
+ "%s failed to register slave as an async device - %d\n",
+ __func__, ret);
+ goto out_no_slave_reg;
+ }
+
+ amba_set_drvdata(adev, pl08x);
+ init_pl08x_debugfs(pl08x);
+ dev_info(&pl08x->adev->dev, "ARM(R) %s DMA block initialized @%08x\n",
+ vd->name, adev->res.start);
+ return 0;
+
+out_no_slave_reg:
+ dma_async_device_unregister(&pl08x->memcpy);
+out_no_memcpy_reg:
+ pl08x_free_virtual_channels(&pl08x->slave);
+out_no_slave:
+ pl08x_free_virtual_channels(&pl08x->memcpy);
+out_no_memcpy:
+ kfree(pl08x->phy_chans);
+out_no_phychans:
+ free_irq(adev->irq[0], pl08x);
+out_no_irq:
+ iounmap(pl08x->base);
+out_no_ioremap:
+ dma_pool_destroy(pl08x->pool);
+out_no_lli_pool:
+out_no_platdata:
+ kfree(pl08x);
+out_no_pl08x:
+ amba_release_regions(adev);
+ return ret;
+}
+
+/* PL080 has 8 channels and the PL080 have just 2 */
+static struct vendor_data vendor_pl080 = {
+ .name = "PL080",
+ .channels = 8,
+ .dualmaster = true,
+};
+
+static struct vendor_data vendor_pl081 = {
+ .name = "PL081",
+ .channels = 2,
+ .dualmaster = false,
+};
+
+static struct amba_id pl08x_ids[] = {
+ /* PL080 */
+ {
+ .id = 0x00041080,
+ .mask = 0x000fffff,
+ .data = &vendor_pl080,
+ },
+ /* PL081 */
+ {
+ .id = 0x00041081,
+ .mask = 0x000fffff,
+ .data = &vendor_pl081,
+ },
+ /* Nomadik 8815 PL080 variant */
+ {
+ .id = 0x00280880,
+ .mask = 0x00ffffff,
+ .data = &vendor_pl080,
+ },
+ { 0, 0 },
+};
+
+static struct amba_driver pl08x_amba_driver = {
+ .drv.name = DRIVER_NAME,
+ .id_table = pl08x_ids,
+ .probe = pl08x_probe,
+};
+
+static int __init pl08x_init(void)
+{
+ int retval;
+ retval = amba_driver_register(&pl08x_amba_driver);
+ if (retval)
+ printk(KERN_WARNING DRIVER_NAME
+ "failed to register as an amba device (%d)\n",
+ retval);
+ return retval;
+}
+subsys_initcall(pl08x_init);
diff --git a/drivers/dma/coh901318.c b/drivers/dma/coh901318.c
index ae2b8714d190..a6656834f0ff 100644
--- a/drivers/dma/coh901318.c
+++ b/drivers/dma/coh901318.c
@@ -1610,7 +1610,7 @@ int __init coh901318_init(void)
{
return platform_driver_probe(&coh901318_driver, coh901318_probe);
}
-subsys_initcall(coh901318_init);
+arch_initcall(coh901318_init);
void __exit coh901318_exit(void)
{
diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c
index 9d31d5eb95c1..8bcb15fb959d 100644
--- a/drivers/dma/dmaengine.c
+++ b/drivers/dma/dmaengine.c
@@ -690,8 +690,12 @@ int dma_async_device_register(struct dma_device *device)
!device->device_prep_dma_memset);
BUG_ON(dma_has_cap(DMA_INTERRUPT, device->cap_mask) &&
!device->device_prep_dma_interrupt);
+ BUG_ON(dma_has_cap(DMA_SG, device->cap_mask) &&
+ !device->device_prep_dma_sg);
BUG_ON(dma_has_cap(DMA_SLAVE, device->cap_mask) &&
!device->device_prep_slave_sg);
+ BUG_ON(dma_has_cap(DMA_CYCLIC, device->cap_mask) &&
+ !device->device_prep_dma_cyclic);
BUG_ON(dma_has_cap(DMA_SLAVE, device->cap_mask) &&
!device->device_control);
@@ -702,7 +706,7 @@ int dma_async_device_register(struct dma_device *device)
BUG_ON(!device->dev);
/* note: this only matters in the
- * CONFIG_ASYNC_TX_DISABLE_CHANNEL_SWITCH=y case
+ * CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH=n case
*/
if (device_has_all_tx_types(device))
dma_cap_set(DMA_ASYNC_TX, device->cap_mask);
@@ -976,7 +980,7 @@ void dma_async_tx_descriptor_init(struct dma_async_tx_descriptor *tx,
struct dma_chan *chan)
{
tx->chan = chan;
- #ifndef CONFIG_ASYNC_TX_DISABLE_CHANNEL_SWITCH
+ #ifdef CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH
spin_lock_init(&tx->lock);
#endif
}
diff --git a/drivers/dma/fsldma.c b/drivers/dma/fsldma.c
index cea08bed9cf9..286c3ac6bdcc 100644
--- a/drivers/dma/fsldma.c
+++ b/drivers/dma/fsldma.c
@@ -35,9 +35,10 @@
#include <linux/dmapool.h>
#include <linux/of_platform.h>
-#include <asm/fsldma.h>
#include "fsldma.h"
+static const char msg_ld_oom[] = "No free memory for link descriptor\n";
+
static void dma_init(struct fsldma_chan *chan)
{
/* Reset the channel */
@@ -499,7 +500,7 @@ fsl_dma_prep_interrupt(struct dma_chan *dchan, unsigned long flags)
new = fsl_dma_alloc_descriptor(chan);
if (!new) {
- dev_err(chan->dev, "No free memory for link descriptor\n");
+ dev_err(chan->dev, msg_ld_oom);
return NULL;
}
@@ -536,8 +537,7 @@ static struct dma_async_tx_descriptor *fsl_dma_prep_memcpy(
/* Allocate the link descriptor from DMA pool */
new = fsl_dma_alloc_descriptor(chan);
if (!new) {
- dev_err(chan->dev,
- "No free memory for link descriptor\n");
+ dev_err(chan->dev, msg_ld_oom);
goto fail;
}
#ifdef FSL_DMA_LD_DEBUG
@@ -583,223 +583,205 @@ fail:
return NULL;
}
-/**
- * fsl_dma_prep_slave_sg - prepare descriptors for a DMA_SLAVE transaction
- * @chan: DMA channel
- * @sgl: scatterlist to transfer to/from
- * @sg_len: number of entries in @scatterlist
- * @direction: DMA direction
- * @flags: DMAEngine flags
- *
- * Prepare a set of descriptors for a DMA_SLAVE transaction. Following the
- * DMA_SLAVE API, this gets the device-specific information from the
- * chan->private variable.
- */
-static struct dma_async_tx_descriptor *fsl_dma_prep_slave_sg(
- struct dma_chan *dchan, struct scatterlist *sgl, unsigned int sg_len,
- enum dma_data_direction direction, unsigned long flags)
+static struct dma_async_tx_descriptor *fsl_dma_prep_sg(struct dma_chan *dchan,
+ struct scatterlist *dst_sg, unsigned int dst_nents,
+ struct scatterlist *src_sg, unsigned int src_nents,
+ unsigned long flags)
{
- struct fsldma_chan *chan;
struct fsl_desc_sw *first = NULL, *prev = NULL, *new = NULL;
- struct fsl_dma_slave *slave;
- size_t copy;
-
- int i;
- struct scatterlist *sg;
- size_t sg_used;
- size_t hw_used;
- struct fsl_dma_hw_addr *hw;
- dma_addr_t dma_dst, dma_src;
+ struct fsldma_chan *chan = to_fsl_chan(dchan);
+ size_t dst_avail, src_avail;
+ dma_addr_t dst, src;
+ size_t len;
- if (!dchan)
+ /* basic sanity checks */
+ if (dst_nents == 0 || src_nents == 0)
return NULL;
- if (!dchan->private)
+ if (dst_sg == NULL || src_sg == NULL)
return NULL;
- chan = to_fsl_chan(dchan);
- slave = dchan->private;
+ /*
+ * TODO: should we check that both scatterlists have the same
+ * TODO: number of bytes in total? Is that really an error?
+ */
- if (list_empty(&slave->addresses))
- return NULL;
+ /* get prepared for the loop */
+ dst_avail = sg_dma_len(dst_sg);
+ src_avail = sg_dma_len(src_sg);
- hw = list_first_entry(&slave->addresses, struct fsl_dma_hw_addr, entry);
- hw_used = 0;
+ /* run until we are out of scatterlist entries */
+ while (true) {
- /*
- * Build the hardware transaction to copy from the scatterlist to
- * the hardware, or from the hardware to the scatterlist
- *
- * If you are copying from the hardware to the scatterlist and it
- * takes two hardware entries to fill an entire page, then both
- * hardware entries will be coalesced into the same page
- *
- * If you are copying from the scatterlist to the hardware and a
- * single page can fill two hardware entries, then the data will
- * be read out of the page into the first hardware entry, and so on
- */
- for_each_sg(sgl, sg, sg_len, i) {
- sg_used = 0;
-
- /* Loop until the entire scatterlist entry is used */
- while (sg_used < sg_dma_len(sg)) {
-
- /*
- * If we've used up the current hardware address/length
- * pair, we need to load a new one
- *
- * This is done in a while loop so that descriptors with
- * length == 0 will be skipped
- */
- while (hw_used >= hw->length) {
-
- /*
- * If the current hardware entry is the last
- * entry in the list, we're finished
- */
- if (list_is_last(&hw->entry, &slave->addresses))
- goto finished;
-
- /* Get the next hardware address/length pair */
- hw = list_entry(hw->entry.next,
- struct fsl_dma_hw_addr, entry);
- hw_used = 0;
- }
-
- /* Allocate the link descriptor from DMA pool */
- new = fsl_dma_alloc_descriptor(chan);
- if (!new) {
- dev_err(chan->dev, "No free memory for "
- "link descriptor\n");
- goto fail;
- }
+ /* create the largest transaction possible */
+ len = min_t(size_t, src_avail, dst_avail);
+ len = min_t(size_t, len, FSL_DMA_BCR_MAX_CNT);
+ if (len == 0)
+ goto fetch;
+
+ dst = sg_dma_address(dst_sg) + sg_dma_len(dst_sg) - dst_avail;
+ src = sg_dma_address(src_sg) + sg_dma_len(src_sg) - src_avail;
+
+ /* allocate and populate the descriptor */
+ new = fsl_dma_alloc_descriptor(chan);
+ if (!new) {
+ dev_err(chan->dev, msg_ld_oom);
+ goto fail;
+ }
#ifdef FSL_DMA_LD_DEBUG
- dev_dbg(chan->dev, "new link desc alloc %p\n", new);
+ dev_dbg(chan->dev, "new link desc alloc %p\n", new);
#endif
- /*
- * Calculate the maximum number of bytes to transfer,
- * making sure it is less than the DMA controller limit
- */
- copy = min_t(size_t, sg_dma_len(sg) - sg_used,
- hw->length - hw_used);
- copy = min_t(size_t, copy, FSL_DMA_BCR_MAX_CNT);
-
- /*
- * DMA_FROM_DEVICE
- * from the hardware to the scatterlist
- *
- * DMA_TO_DEVICE
- * from the scatterlist to the hardware
- */
- if (direction == DMA_FROM_DEVICE) {
- dma_src = hw->address + hw_used;
- dma_dst = sg_dma_address(sg) + sg_used;
- } else {
- dma_src = sg_dma_address(sg) + sg_used;
- dma_dst = hw->address + hw_used;
- }
-
- /* Fill in the descriptor */
- set_desc_cnt(chan, &new->hw, copy);
- set_desc_src(chan, &new->hw, dma_src);
- set_desc_dst(chan, &new->hw, dma_dst);
-
- /*
- * If this is not the first descriptor, chain the
- * current descriptor after the previous descriptor
- */
- if (!first) {
- first = new;
- } else {
- set_desc_next(chan, &prev->hw,
- new->async_tx.phys);
- }
-
- new->async_tx.cookie = 0;
- async_tx_ack(&new->async_tx);
-
- prev = new;
- sg_used += copy;
- hw_used += copy;
-
- /* Insert the link descriptor into the LD ring */
- list_add_tail(&new->node, &first->tx_list);
- }
- }
+ set_desc_cnt(chan, &new->hw, len);
+ set_desc_src(chan, &new->hw, src);
+ set_desc_dst(chan, &new->hw, dst);
-finished:
+ if (!first)
+ first = new;
+ else
+ set_desc_next(chan, &prev->hw, new->async_tx.phys);
- /* All of the hardware address/length pairs had length == 0 */
- if (!first || !new)
- return NULL;
+ new->async_tx.cookie = 0;
+ async_tx_ack(&new->async_tx);
+ prev = new;
- new->async_tx.flags = flags;
- new->async_tx.cookie = -EBUSY;
+ /* Insert the link descriptor to the LD ring */
+ list_add_tail(&new->node, &first->tx_list);
- /* Set End-of-link to the last link descriptor of new list */
- set_ld_eol(chan, new);
+ /* update metadata */
+ dst_avail -= len;
+ src_avail -= len;
+
+fetch:
+ /* fetch the next dst scatterlist entry */
+ if (dst_avail == 0) {
+
+ /* no more entries: we're done */
+ if (dst_nents == 0)
+ break;
+
+ /* fetch the next entry: if there are no more: done */
+ dst_sg = sg_next(dst_sg);
+ if (dst_sg == NULL)
+ break;
+
+ dst_nents--;
+ dst_avail = sg_dma_len(dst_sg);
+ }
- /* Enable extra controller features */
- if (chan->set_src_loop_size)
- chan->set_src_loop_size(chan, slave->src_loop_size);
+ /* fetch the next src scatterlist entry */
+ if (src_avail == 0) {
- if (chan->set_dst_loop_size)
- chan->set_dst_loop_size(chan, slave->dst_loop_size);
+ /* no more entries: we're done */
+ if (src_nents == 0)
+ break;
- if (chan->toggle_ext_start)
- chan->toggle_ext_start(chan, slave->external_start);
+ /* fetch the next entry: if there are no more: done */
+ src_sg = sg_next(src_sg);
+ if (src_sg == NULL)
+ break;
- if (chan->toggle_ext_pause)
- chan->toggle_ext_pause(chan, slave->external_pause);
+ src_nents--;
+ src_avail = sg_dma_len(src_sg);
+ }
+ }
- if (chan->set_request_count)
- chan->set_request_count(chan, slave->request_count);
+ new->async_tx.flags = flags; /* client is in control of this ack */
+ new->async_tx.cookie = -EBUSY;
+
+ /* Set End-of-link to the last link descriptor of new list */
+ set_ld_eol(chan, new);
return &first->async_tx;
fail:
- /* If first was not set, then we failed to allocate the very first
- * descriptor, and we're done */
if (!first)
return NULL;
+ fsldma_free_desc_list_reverse(chan, &first->tx_list);
+ return NULL;
+}
+
+/**
+ * fsl_dma_prep_slave_sg - prepare descriptors for a DMA_SLAVE transaction
+ * @chan: DMA channel
+ * @sgl: scatterlist to transfer to/from
+ * @sg_len: number of entries in @scatterlist
+ * @direction: DMA direction
+ * @flags: DMAEngine flags
+ *
+ * Prepare a set of descriptors for a DMA_SLAVE transaction. Following the
+ * DMA_SLAVE API, this gets the device-specific information from the
+ * chan->private variable.
+ */
+static struct dma_async_tx_descriptor *fsl_dma_prep_slave_sg(
+ struct dma_chan *dchan, struct scatterlist *sgl, unsigned int sg_len,
+ enum dma_data_direction direction, unsigned long flags)
+{
/*
- * First is set, so all of the descriptors we allocated have been added
- * to first->tx_list, INCLUDING "first" itself. Therefore we
- * must traverse the list backwards freeing each descriptor in turn
+ * This operation is not supported on the Freescale DMA controller
*
- * We're re-using variables for the loop, oh well
+ * However, we need to provide the function pointer to allow the
+ * device_control() method to work.
*/
- fsldma_free_desc_list_reverse(chan, &first->tx_list);
return NULL;
}
static int fsl_dma_device_control(struct dma_chan *dchan,
enum dma_ctrl_cmd cmd, unsigned long arg)
{
+ struct dma_slave_config *config;
struct fsldma_chan *chan;
unsigned long flags;
-
- /* Only supports DMA_TERMINATE_ALL */
- if (cmd != DMA_TERMINATE_ALL)
- return -ENXIO;
+ int size;
if (!dchan)
return -EINVAL;
chan = to_fsl_chan(dchan);
- /* Halt the DMA engine */
- dma_halt(chan);
+ switch (cmd) {
+ case DMA_TERMINATE_ALL:
+ /* Halt the DMA engine */
+ dma_halt(chan);
- spin_lock_irqsave(&chan->desc_lock, flags);
+ spin_lock_irqsave(&chan->desc_lock, flags);
- /* Remove and free all of the descriptors in the LD queue */
- fsldma_free_desc_list(chan, &chan->ld_pending);
- fsldma_free_desc_list(chan, &chan->ld_running);
+ /* Remove and free all of the descriptors in the LD queue */
+ fsldma_free_desc_list(chan, &chan->ld_pending);
+ fsldma_free_desc_list(chan, &chan->ld_running);
- spin_unlock_irqrestore(&chan->desc_lock, flags);
+ spin_unlock_irqrestore(&chan->desc_lock, flags);
+ return 0;
+
+ case DMA_SLAVE_CONFIG:
+ config = (struct dma_slave_config *)arg;
+
+ /* make sure the channel supports setting burst size */
+ if (!chan->set_request_count)
+ return -ENXIO;
+
+ /* we set the controller burst size depending on direction */
+ if (config->direction == DMA_TO_DEVICE)
+ size = config->dst_addr_width * config->dst_maxburst;
+ else
+ size = config->src_addr_width * config->src_maxburst;
+
+ chan->set_request_count(chan, size);
+ return 0;
+
+ case FSLDMA_EXTERNAL_START:
+
+ /* make sure the channel supports external start */
+ if (!chan->toggle_ext_start)
+ return -ENXIO;
+
+ chan->toggle_ext_start(chan, arg);
+ return 0;
+
+ default:
+ return -ENXIO;
+ }
return 0;
}
@@ -1327,11 +1309,13 @@ static int __devinit fsldma_of_probe(struct platform_device *op,
dma_cap_set(DMA_MEMCPY, fdev->common.cap_mask);
dma_cap_set(DMA_INTERRUPT, fdev->common.cap_mask);
+ dma_cap_set(DMA_SG, fdev->common.cap_mask);
dma_cap_set(DMA_SLAVE, fdev->common.cap_mask);
fdev->common.device_alloc_chan_resources = fsl_dma_alloc_chan_resources;
fdev->common.device_free_chan_resources = fsl_dma_free_chan_resources;
fdev->common.device_prep_dma_interrupt = fsl_dma_prep_interrupt;
fdev->common.device_prep_dma_memcpy = fsl_dma_prep_memcpy;
+ fdev->common.device_prep_dma_sg = fsl_dma_prep_sg;
fdev->common.device_tx_status = fsl_tx_status;
fdev->common.device_issue_pending = fsl_dma_memcpy_issue_pending;
fdev->common.device_prep_slave_sg = fsl_dma_prep_slave_sg;
diff --git a/drivers/dma/imx-dma.c b/drivers/dma/imx-dma.c
new file mode 100644
index 000000000000..f629e4961af5
--- /dev/null
+++ b/drivers/dma/imx-dma.c
@@ -0,0 +1,424 @@
+/*
+ * drivers/dma/imx-dma.c
+ *
+ * This file contains a driver for the Freescale i.MX DMA engine
+ * found on i.MX1/21/27
+ *
+ * Copyright 2010 Sascha Hauer, Pengutronix <s.hauer@pengutronix.de>
+ *
+ * The code contained herein is licensed under the GNU General Public
+ * License. You may obtain a copy of the GNU General Public License
+ * Version 2 or later at the following locations:
+ *
+ * http://www.opensource.org/licenses/gpl-license.html
+ * http://www.gnu.org/copyleft/gpl.html
+ */
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/mm.h>
+#include <linux/interrupt.h>
+#include <linux/spinlock.h>
+#include <linux/device.h>
+#include <linux/dma-mapping.h>
+#include <linux/slab.h>
+#include <linux/platform_device.h>
+#include <linux/dmaengine.h>
+
+#include <asm/irq.h>
+#include <mach/dma-v1.h>
+#include <mach/hardware.h>
+
+struct imxdma_channel {
+ struct imxdma_engine *imxdma;
+ unsigned int channel;
+ unsigned int imxdma_channel;
+
+ enum dma_slave_buswidth word_size;
+ dma_addr_t per_address;
+ u32 watermark_level;
+ struct dma_chan chan;
+ spinlock_t lock;
+ struct dma_async_tx_descriptor desc;
+ dma_cookie_t last_completed;
+ enum dma_status status;
+ int dma_request;
+ struct scatterlist *sg_list;
+};
+
+#define MAX_DMA_CHANNELS 8
+
+struct imxdma_engine {
+ struct device *dev;
+ struct dma_device dma_device;
+ struct imxdma_channel channel[MAX_DMA_CHANNELS];
+};
+
+static struct imxdma_channel *to_imxdma_chan(struct dma_chan *chan)
+{
+ return container_of(chan, struct imxdma_channel, chan);
+}
+
+static void imxdma_handle(struct imxdma_channel *imxdmac)
+{
+ if (imxdmac->desc.callback)
+ imxdmac->desc.callback(imxdmac->desc.callback_param);
+ imxdmac->last_completed = imxdmac->desc.cookie;
+}
+
+static void imxdma_irq_handler(int channel, void *data)
+{
+ struct imxdma_channel *imxdmac = data;
+
+ imxdmac->status = DMA_SUCCESS;
+ imxdma_handle(imxdmac);
+}
+
+static void imxdma_err_handler(int channel, void *data, int error)
+{
+ struct imxdma_channel *imxdmac = data;
+
+ imxdmac->status = DMA_ERROR;
+ imxdma_handle(imxdmac);
+}
+
+static void imxdma_progression(int channel, void *data,
+ struct scatterlist *sg)
+{
+ struct imxdma_channel *imxdmac = data;
+
+ imxdmac->status = DMA_SUCCESS;
+ imxdma_handle(imxdmac);
+}
+
+static int imxdma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
+ unsigned long arg)
+{
+ struct imxdma_channel *imxdmac = to_imxdma_chan(chan);
+ struct dma_slave_config *dmaengine_cfg = (void *)arg;
+ int ret;
+ unsigned int mode = 0;
+
+ switch (cmd) {
+ case DMA_TERMINATE_ALL:
+ imxdmac->status = DMA_ERROR;
+ imx_dma_disable(imxdmac->imxdma_channel);
+ return 0;
+ case DMA_SLAVE_CONFIG:
+ if (dmaengine_cfg->direction == DMA_FROM_DEVICE) {
+ imxdmac->per_address = dmaengine_cfg->src_addr;
+ imxdmac->watermark_level = dmaengine_cfg->src_maxburst;
+ imxdmac->word_size = dmaengine_cfg->src_addr_width;
+ } else {
+ imxdmac->per_address = dmaengine_cfg->dst_addr;
+ imxdmac->watermark_level = dmaengine_cfg->dst_maxburst;
+ imxdmac->word_size = dmaengine_cfg->dst_addr_width;
+ }
+
+ switch (imxdmac->word_size) {
+ case DMA_SLAVE_BUSWIDTH_1_BYTE:
+ mode = IMX_DMA_MEMSIZE_8;
+ break;
+ case DMA_SLAVE_BUSWIDTH_2_BYTES:
+ mode = IMX_DMA_MEMSIZE_16;
+ break;
+ default:
+ case DMA_SLAVE_BUSWIDTH_4_BYTES:
+ mode = IMX_DMA_MEMSIZE_32;
+ break;
+ }
+ ret = imx_dma_config_channel(imxdmac->imxdma_channel,
+ mode | IMX_DMA_TYPE_FIFO,
+ IMX_DMA_MEMSIZE_32 | IMX_DMA_TYPE_LINEAR,
+ imxdmac->dma_request, 1);
+
+ if (ret)
+ return ret;
+
+ imx_dma_config_burstlen(imxdmac->imxdma_channel, imxdmac->watermark_level);
+
+ return 0;
+ default:
+ return -ENOSYS;
+ }
+
+ return -EINVAL;
+}
+
+static enum dma_status imxdma_tx_status(struct dma_chan *chan,
+ dma_cookie_t cookie,
+ struct dma_tx_state *txstate)
+{
+ struct imxdma_channel *imxdmac = to_imxdma_chan(chan);
+ dma_cookie_t last_used;
+ enum dma_status ret;
+
+ last_used = chan->cookie;
+
+ ret = dma_async_is_complete(cookie, imxdmac->last_completed, last_used);
+ dma_set_tx_state(txstate, imxdmac->last_completed, last_used, 0);
+
+ return ret;
+}
+
+static dma_cookie_t imxdma_assign_cookie(struct imxdma_channel *imxdma)
+{
+ dma_cookie_t cookie = imxdma->chan.cookie;
+
+ if (++cookie < 0)
+ cookie = 1;
+
+ imxdma->chan.cookie = cookie;
+ imxdma->desc.cookie = cookie;
+
+ return cookie;
+}
+
+static dma_cookie_t imxdma_tx_submit(struct dma_async_tx_descriptor *tx)
+{
+ struct imxdma_channel *imxdmac = to_imxdma_chan(tx->chan);
+ dma_cookie_t cookie;
+
+ spin_lock_irq(&imxdmac->lock);
+
+ cookie = imxdma_assign_cookie(imxdmac);
+
+ imx_dma_enable(imxdmac->imxdma_channel);
+
+ spin_unlock_irq(&imxdmac->lock);
+
+ return cookie;
+}
+
+static int imxdma_alloc_chan_resources(struct dma_chan *chan)
+{
+ struct imxdma_channel *imxdmac = to_imxdma_chan(chan);
+ struct imx_dma_data *data = chan->private;
+
+ imxdmac->dma_request = data->dma_request;
+
+ dma_async_tx_descriptor_init(&imxdmac->desc, chan);
+ imxdmac->desc.tx_submit = imxdma_tx_submit;
+ /* txd.flags will be overwritten in prep funcs */
+ imxdmac->desc.flags = DMA_CTRL_ACK;
+
+ imxdmac->status = DMA_SUCCESS;
+
+ return 0;
+}
+
+static void imxdma_free_chan_resources(struct dma_chan *chan)
+{
+ struct imxdma_channel *imxdmac = to_imxdma_chan(chan);
+
+ imx_dma_disable(imxdmac->imxdma_channel);
+
+ if (imxdmac->sg_list) {
+ kfree(imxdmac->sg_list);
+ imxdmac->sg_list = NULL;
+ }
+}
+
+static struct dma_async_tx_descriptor *imxdma_prep_slave_sg(
+ struct dma_chan *chan, struct scatterlist *sgl,
+ unsigned int sg_len, enum dma_data_direction direction,
+ unsigned long flags)
+{
+ struct imxdma_channel *imxdmac = to_imxdma_chan(chan);
+ struct scatterlist *sg;
+ int i, ret, dma_length = 0;
+ unsigned int dmamode;
+
+ if (imxdmac->status == DMA_IN_PROGRESS)
+ return NULL;
+
+ imxdmac->status = DMA_IN_PROGRESS;
+
+ for_each_sg(sgl, sg, sg_len, i) {
+ dma_length += sg->length;
+ }
+
+ if (direction == DMA_FROM_DEVICE)
+ dmamode = DMA_MODE_READ;
+ else
+ dmamode = DMA_MODE_WRITE;
+
+ ret = imx_dma_setup_sg(imxdmac->imxdma_channel, sgl, sg_len,
+ dma_length, imxdmac->per_address, dmamode);
+ if (ret)
+ return NULL;
+
+ return &imxdmac->desc;
+}
+
+static struct dma_async_tx_descriptor *imxdma_prep_dma_cyclic(
+ struct dma_chan *chan, dma_addr_t dma_addr, size_t buf_len,
+ size_t period_len, enum dma_data_direction direction)
+{
+ struct imxdma_channel *imxdmac = to_imxdma_chan(chan);
+ struct imxdma_engine *imxdma = imxdmac->imxdma;
+ int i, ret;
+ unsigned int periods = buf_len / period_len;
+ unsigned int dmamode;
+
+ dev_dbg(imxdma->dev, "%s channel: %d buf_len=%d period_len=%d\n",
+ __func__, imxdmac->channel, buf_len, period_len);
+
+ if (imxdmac->status == DMA_IN_PROGRESS)
+ return NULL;
+ imxdmac->status = DMA_IN_PROGRESS;
+
+ ret = imx_dma_setup_progression_handler(imxdmac->imxdma_channel,
+ imxdma_progression);
+ if (ret) {
+ dev_err(imxdma->dev, "Failed to setup the DMA handler\n");
+ return NULL;
+ }
+
+ if (imxdmac->sg_list)
+ kfree(imxdmac->sg_list);
+
+ imxdmac->sg_list = kcalloc(periods + 1,
+ sizeof(struct scatterlist), GFP_KERNEL);
+ if (!imxdmac->sg_list)
+ return NULL;
+
+ sg_init_table(imxdmac->sg_list, periods);
+
+ for (i = 0; i < periods; i++) {
+ imxdmac->sg_list[i].page_link = 0;
+ imxdmac->sg_list[i].offset = 0;
+ imxdmac->sg_list[i].dma_address = dma_addr;
+ imxdmac->sg_list[i].length = period_len;
+ dma_addr += period_len;
+ }
+
+ /* close the loop */
+ imxdmac->sg_list[periods].offset = 0;
+ imxdmac->sg_list[periods].length = 0;
+ imxdmac->sg_list[periods].page_link =
+ ((unsigned long)imxdmac->sg_list | 0x01) & ~0x02;
+
+ if (direction == DMA_FROM_DEVICE)
+ dmamode = DMA_MODE_READ;
+ else
+ dmamode = DMA_MODE_WRITE;
+
+ ret = imx_dma_setup_sg(imxdmac->imxdma_channel, imxdmac->sg_list, periods,
+ IMX_DMA_LENGTH_LOOP, imxdmac->per_address, dmamode);
+ if (ret)
+ return NULL;
+
+ return &imxdmac->desc;
+}
+
+static void imxdma_issue_pending(struct dma_chan *chan)
+{
+ /*
+ * Nothing to do. We only have a single descriptor
+ */
+}
+
+static int __init imxdma_probe(struct platform_device *pdev)
+{
+ struct imxdma_engine *imxdma;
+ int ret, i;
+
+ imxdma = kzalloc(sizeof(*imxdma), GFP_KERNEL);
+ if (!imxdma)
+ return -ENOMEM;
+
+ INIT_LIST_HEAD(&imxdma->dma_device.channels);
+
+ /* Initialize channel parameters */
+ for (i = 0; i < MAX_DMA_CHANNELS; i++) {
+ struct imxdma_channel *imxdmac = &imxdma->channel[i];
+
+ imxdmac->imxdma_channel = imx_dma_request_by_prio("dmaengine",
+ DMA_PRIO_MEDIUM);
+ if ((int)imxdmac->channel < 0) {
+ ret = -ENODEV;
+ goto err_init;
+ }
+
+ imx_dma_setup_handlers(imxdmac->imxdma_channel,
+ imxdma_irq_handler, imxdma_err_handler, imxdmac);
+
+ imxdmac->imxdma = imxdma;
+ spin_lock_init(&imxdmac->lock);
+
+ dma_cap_set(DMA_SLAVE, imxdma->dma_device.cap_mask);
+ dma_cap_set(DMA_CYCLIC, imxdma->dma_device.cap_mask);
+
+ imxdmac->chan.device = &imxdma->dma_device;
+ imxdmac->chan.chan_id = i;
+ imxdmac->channel = i;
+
+ /* Add the channel to the DMAC list */
+ list_add_tail(&imxdmac->chan.device_node, &imxdma->dma_device.channels);
+ }
+
+ imxdma->dev = &pdev->dev;
+ imxdma->dma_device.dev = &pdev->dev;
+
+ imxdma->dma_device.device_alloc_chan_resources = imxdma_alloc_chan_resources;
+ imxdma->dma_device.device_free_chan_resources = imxdma_free_chan_resources;
+ imxdma->dma_device.device_tx_status = imxdma_tx_status;
+ imxdma->dma_device.device_prep_slave_sg = imxdma_prep_slave_sg;
+ imxdma->dma_device.device_prep_dma_cyclic = imxdma_prep_dma_cyclic;
+ imxdma->dma_device.device_control = imxdma_control;
+ imxdma->dma_device.device_issue_pending = imxdma_issue_pending;
+
+ platform_set_drvdata(pdev, imxdma);
+
+ ret = dma_async_device_register(&imxdma->dma_device);
+ if (ret) {
+ dev_err(&pdev->dev, "unable to register\n");
+ goto err_init;
+ }
+
+ return 0;
+
+err_init:
+ while (i-- >= 0) {
+ struct imxdma_channel *imxdmac = &imxdma->channel[i];
+ imx_dma_free(imxdmac->imxdma_channel);
+ }
+
+ kfree(imxdma);
+ return ret;
+}
+
+static int __exit imxdma_remove(struct platform_device *pdev)
+{
+ struct imxdma_engine *imxdma = platform_get_drvdata(pdev);
+ int i;
+
+ dma_async_device_unregister(&imxdma->dma_device);
+
+ for (i = 0; i < MAX_DMA_CHANNELS; i++) {
+ struct imxdma_channel *imxdmac = &imxdma->channel[i];
+
+ imx_dma_free(imxdmac->imxdma_channel);
+ }
+
+ kfree(imxdma);
+
+ return 0;
+}
+
+static struct platform_driver imxdma_driver = {
+ .driver = {
+ .name = "imx-dma",
+ },
+ .remove = __exit_p(imxdma_remove),
+};
+
+static int __init imxdma_module_init(void)
+{
+ return platform_driver_probe(&imxdma_driver, imxdma_probe);
+}
+subsys_initcall(imxdma_module_init);
+
+MODULE_AUTHOR("Sascha Hauer, Pengutronix <s.hauer@pengutronix.de>");
+MODULE_DESCRIPTION("i.MX dma driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/dma/imx-sdma.c b/drivers/dma/imx-sdma.c
new file mode 100644
index 000000000000..0834323a0599
--- /dev/null
+++ b/drivers/dma/imx-sdma.c
@@ -0,0 +1,1392 @@
+/*
+ * drivers/dma/imx-sdma.c
+ *
+ * This file contains a driver for the Freescale Smart DMA engine
+ *
+ * Copyright 2010 Sascha Hauer, Pengutronix <s.hauer@pengutronix.de>
+ *
+ * Based on code from Freescale:
+ *
+ * Copyright 2004-2009 Freescale Semiconductor, Inc. All Rights Reserved.
+ *
+ * The code contained herein is licensed under the GNU General Public
+ * License. You may obtain a copy of the GNU General Public License
+ * Version 2 or later at the following locations:
+ *
+ * http://www.opensource.org/licenses/gpl-license.html
+ * http://www.gnu.org/copyleft/gpl.html
+ */
+
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/mm.h>
+#include <linux/interrupt.h>
+#include <linux/clk.h>
+#include <linux/wait.h>
+#include <linux/sched.h>
+#include <linux/semaphore.h>
+#include <linux/spinlock.h>
+#include <linux/device.h>
+#include <linux/dma-mapping.h>
+#include <linux/firmware.h>
+#include <linux/slab.h>
+#include <linux/platform_device.h>
+#include <linux/dmaengine.h>
+
+#include <asm/irq.h>
+#include <mach/sdma.h>
+#include <mach/dma.h>
+#include <mach/hardware.h>
+
+/* SDMA registers */
+#define SDMA_H_C0PTR 0x000
+#define SDMA_H_INTR 0x004
+#define SDMA_H_STATSTOP 0x008
+#define SDMA_H_START 0x00c
+#define SDMA_H_EVTOVR 0x010
+#define SDMA_H_DSPOVR 0x014
+#define SDMA_H_HOSTOVR 0x018
+#define SDMA_H_EVTPEND 0x01c
+#define SDMA_H_DSPENBL 0x020
+#define SDMA_H_RESET 0x024
+#define SDMA_H_EVTERR 0x028
+#define SDMA_H_INTRMSK 0x02c
+#define SDMA_H_PSW 0x030
+#define SDMA_H_EVTERRDBG 0x034
+#define SDMA_H_CONFIG 0x038
+#define SDMA_ONCE_ENB 0x040
+#define SDMA_ONCE_DATA 0x044
+#define SDMA_ONCE_INSTR 0x048
+#define SDMA_ONCE_STAT 0x04c
+#define SDMA_ONCE_CMD 0x050
+#define SDMA_EVT_MIRROR 0x054
+#define SDMA_ILLINSTADDR 0x058
+#define SDMA_CHN0ADDR 0x05c
+#define SDMA_ONCE_RTB 0x060
+#define SDMA_XTRIG_CONF1 0x070
+#define SDMA_XTRIG_CONF2 0x074
+#define SDMA_CHNENBL0_V2 0x200
+#define SDMA_CHNENBL0_V1 0x080
+#define SDMA_CHNPRI_0 0x100
+
+/*
+ * Buffer descriptor status values.
+ */
+#define BD_DONE 0x01
+#define BD_WRAP 0x02
+#define BD_CONT 0x04
+#define BD_INTR 0x08
+#define BD_RROR 0x10
+#define BD_LAST 0x20
+#define BD_EXTD 0x80
+
+/*
+ * Data Node descriptor status values.
+ */
+#define DND_END_OF_FRAME 0x80
+#define DND_END_OF_XFER 0x40
+#define DND_DONE 0x20
+#define DND_UNUSED 0x01
+
+/*
+ * IPCV2 descriptor status values.
+ */
+#define BD_IPCV2_END_OF_FRAME 0x40
+
+#define IPCV2_MAX_NODES 50
+/*
+ * Error bit set in the CCB status field by the SDMA,
+ * in setbd routine, in case of a transfer error
+ */
+#define DATA_ERROR 0x10000000
+
+/*
+ * Buffer descriptor commands.
+ */
+#define C0_ADDR 0x01
+#define C0_LOAD 0x02
+#define C0_DUMP 0x03
+#define C0_SETCTX 0x07
+#define C0_GETCTX 0x03
+#define C0_SETDM 0x01
+#define C0_SETPM 0x04
+#define C0_GETDM 0x02
+#define C0_GETPM 0x08
+/*
+ * Change endianness indicator in the BD command field
+ */
+#define CHANGE_ENDIANNESS 0x80
+
+/*
+ * Mode/Count of data node descriptors - IPCv2
+ */
+struct sdma_mode_count {
+ u32 count : 16; /* size of the buffer pointed by this BD */
+ u32 status : 8; /* E,R,I,C,W,D status bits stored here */
+ u32 command : 8; /* command mostlky used for channel 0 */
+};
+
+/*
+ * Buffer descriptor
+ */
+struct sdma_buffer_descriptor {
+ struct sdma_mode_count mode;
+ u32 buffer_addr; /* address of the buffer described */
+ u32 ext_buffer_addr; /* extended buffer address */
+} __attribute__ ((packed));
+
+/**
+ * struct sdma_channel_control - Channel control Block
+ *
+ * @current_bd_ptr current buffer descriptor processed
+ * @base_bd_ptr first element of buffer descriptor array
+ * @unused padding. The SDMA engine expects an array of 128 byte
+ * control blocks
+ */
+struct sdma_channel_control {
+ u32 current_bd_ptr;
+ u32 base_bd_ptr;
+ u32 unused[2];
+} __attribute__ ((packed));
+
+/**
+ * struct sdma_state_registers - SDMA context for a channel
+ *
+ * @pc: program counter
+ * @t: test bit: status of arithmetic & test instruction
+ * @rpc: return program counter
+ * @sf: source fault while loading data
+ * @spc: loop start program counter
+ * @df: destination fault while storing data
+ * @epc: loop end program counter
+ * @lm: loop mode
+ */
+struct sdma_state_registers {
+ u32 pc :14;
+ u32 unused1: 1;
+ u32 t : 1;
+ u32 rpc :14;
+ u32 unused0: 1;
+ u32 sf : 1;
+ u32 spc :14;
+ u32 unused2: 1;
+ u32 df : 1;
+ u32 epc :14;
+ u32 lm : 2;
+} __attribute__ ((packed));
+
+/**
+ * struct sdma_context_data - sdma context specific to a channel
+ *
+ * @channel_state: channel state bits
+ * @gReg: general registers
+ * @mda: burst dma destination address register
+ * @msa: burst dma source address register
+ * @ms: burst dma status register
+ * @md: burst dma data register
+ * @pda: peripheral dma destination address register
+ * @psa: peripheral dma source address register
+ * @ps: peripheral dma status register
+ * @pd: peripheral dma data register
+ * @ca: CRC polynomial register
+ * @cs: CRC accumulator register
+ * @dda: dedicated core destination address register
+ * @dsa: dedicated core source address register
+ * @ds: dedicated core status register
+ * @dd: dedicated core data register
+ */
+struct sdma_context_data {
+ struct sdma_state_registers channel_state;
+ u32 gReg[8];
+ u32 mda;
+ u32 msa;
+ u32 ms;
+ u32 md;
+ u32 pda;
+ u32 psa;
+ u32 ps;
+ u32 pd;
+ u32 ca;
+ u32 cs;
+ u32 dda;
+ u32 dsa;
+ u32 ds;
+ u32 dd;
+ u32 scratch0;
+ u32 scratch1;
+ u32 scratch2;
+ u32 scratch3;
+ u32 scratch4;
+ u32 scratch5;
+ u32 scratch6;
+ u32 scratch7;
+} __attribute__ ((packed));
+
+#define NUM_BD (int)(PAGE_SIZE / sizeof(struct sdma_buffer_descriptor))
+
+struct sdma_engine;
+
+/**
+ * struct sdma_channel - housekeeping for a SDMA channel
+ *
+ * @sdma pointer to the SDMA engine for this channel
+ * @channel the channel number, matches dmaengine chan_id
+ * @direction transfer type. Needed for setting SDMA script
+ * @peripheral_type Peripheral type. Needed for setting SDMA script
+ * @event_id0 aka dma request line
+ * @event_id1 for channels that use 2 events
+ * @word_size peripheral access size
+ * @buf_tail ID of the buffer that was processed
+ * @done channel completion
+ * @num_bd max NUM_BD. number of descriptors currently handling
+ */
+struct sdma_channel {
+ struct sdma_engine *sdma;
+ unsigned int channel;
+ enum dma_data_direction direction;
+ enum sdma_peripheral_type peripheral_type;
+ unsigned int event_id0;
+ unsigned int event_id1;
+ enum dma_slave_buswidth word_size;
+ unsigned int buf_tail;
+ struct completion done;
+ unsigned int num_bd;
+ struct sdma_buffer_descriptor *bd;
+ dma_addr_t bd_phys;
+ unsigned int pc_from_device, pc_to_device;
+ unsigned long flags;
+ dma_addr_t per_address;
+ u32 event_mask0, event_mask1;
+ u32 watermark_level;
+ u32 shp_addr, per_addr;
+ struct dma_chan chan;
+ spinlock_t lock;
+ struct dma_async_tx_descriptor desc;
+ dma_cookie_t last_completed;
+ enum dma_status status;
+};
+
+#define IMX_DMA_SG_LOOP (1 << 0)
+
+#define MAX_DMA_CHANNELS 32
+#define MXC_SDMA_DEFAULT_PRIORITY 1
+#define MXC_SDMA_MIN_PRIORITY 1
+#define MXC_SDMA_MAX_PRIORITY 7
+
+/**
+ * struct sdma_script_start_addrs - SDMA script start pointers
+ *
+ * start addresses of the different functions in the physical
+ * address space of the SDMA engine.
+ */
+struct sdma_script_start_addrs {
+ u32 ap_2_ap_addr;
+ u32 ap_2_bp_addr;
+ u32 ap_2_ap_fixed_addr;
+ u32 bp_2_ap_addr;
+ u32 loopback_on_dsp_side_addr;
+ u32 mcu_interrupt_only_addr;
+ u32 firi_2_per_addr;
+ u32 firi_2_mcu_addr;
+ u32 per_2_firi_addr;
+ u32 mcu_2_firi_addr;
+ u32 uart_2_per_addr;
+ u32 uart_2_mcu_addr;
+ u32 per_2_app_addr;
+ u32 mcu_2_app_addr;
+ u32 per_2_per_addr;
+ u32 uartsh_2_per_addr;
+ u32 uartsh_2_mcu_addr;
+ u32 per_2_shp_addr;
+ u32 mcu_2_shp_addr;
+ u32 ata_2_mcu_addr;
+ u32 mcu_2_ata_addr;
+ u32 app_2_per_addr;
+ u32 app_2_mcu_addr;
+ u32 shp_2_per_addr;
+ u32 shp_2_mcu_addr;
+ u32 mshc_2_mcu_addr;
+ u32 mcu_2_mshc_addr;
+ u32 spdif_2_mcu_addr;
+ u32 mcu_2_spdif_addr;
+ u32 asrc_2_mcu_addr;
+ u32 ext_mem_2_ipu_addr;
+ u32 descrambler_addr;
+ u32 dptc_dvfs_addr;
+ u32 utra_addr;
+ u32 ram_code_start_addr;
+};
+
+#define SDMA_FIRMWARE_MAGIC 0x414d4453
+
+/**
+ * struct sdma_firmware_header - Layout of the firmware image
+ *
+ * @magic "SDMA"
+ * @version_major increased whenever layout of struct sdma_script_start_addrs
+ * changes.
+ * @version_minor firmware minor version (for binary compatible changes)
+ * @script_addrs_start offset of struct sdma_script_start_addrs in this image
+ * @num_script_addrs Number of script addresses in this image
+ * @ram_code_start offset of SDMA ram image in this firmware image
+ * @ram_code_size size of SDMA ram image
+ * @script_addrs Stores the start address of the SDMA scripts
+ * (in SDMA memory space)
+ */
+struct sdma_firmware_header {
+ u32 magic;
+ u32 version_major;
+ u32 version_minor;
+ u32 script_addrs_start;
+ u32 num_script_addrs;
+ u32 ram_code_start;
+ u32 ram_code_size;
+};
+
+struct sdma_engine {
+ struct device *dev;
+ struct sdma_channel channel[MAX_DMA_CHANNELS];
+ struct sdma_channel_control *channel_control;
+ void __iomem *regs;
+ unsigned int version;
+ unsigned int num_events;
+ struct sdma_context_data *context;
+ dma_addr_t context_phys;
+ struct dma_device dma_device;
+ struct clk *clk;
+ struct sdma_script_start_addrs *script_addrs;
+};
+
+#define SDMA_H_CONFIG_DSPDMA (1 << 12) /* indicates if the DSPDMA is used */
+#define SDMA_H_CONFIG_RTD_PINS (1 << 11) /* indicates if Real-Time Debug pins are enabled */
+#define SDMA_H_CONFIG_ACR (1 << 4) /* indicates if AHB freq /core freq = 2 or 1 */
+#define SDMA_H_CONFIG_CSM (3) /* indicates which context switch mode is selected*/
+
+static inline u32 chnenbl_ofs(struct sdma_engine *sdma, unsigned int event)
+{
+ u32 chnenbl0 = (sdma->version == 2 ? SDMA_CHNENBL0_V2 : SDMA_CHNENBL0_V1);
+
+ return chnenbl0 + event * 4;
+}
+
+static int sdma_config_ownership(struct sdma_channel *sdmac,
+ bool event_override, bool mcu_override, bool dsp_override)
+{
+ struct sdma_engine *sdma = sdmac->sdma;
+ int channel = sdmac->channel;
+ u32 evt, mcu, dsp;
+
+ if (event_override && mcu_override && dsp_override)
+ return -EINVAL;
+
+ evt = __raw_readl(sdma->regs + SDMA_H_EVTOVR);
+ mcu = __raw_readl(sdma->regs + SDMA_H_HOSTOVR);
+ dsp = __raw_readl(sdma->regs + SDMA_H_DSPOVR);
+
+ if (dsp_override)
+ dsp &= ~(1 << channel);
+ else
+ dsp |= (1 << channel);
+
+ if (event_override)
+ evt &= ~(1 << channel);
+ else
+ evt |= (1 << channel);
+
+ if (mcu_override)
+ mcu &= ~(1 << channel);
+ else
+ mcu |= (1 << channel);
+
+ __raw_writel(evt, sdma->regs + SDMA_H_EVTOVR);
+ __raw_writel(mcu, sdma->regs + SDMA_H_HOSTOVR);
+ __raw_writel(dsp, sdma->regs + SDMA_H_DSPOVR);
+
+ return 0;
+}
+
+/*
+ * sdma_run_channel - run a channel and wait till it's done
+ */
+static int sdma_run_channel(struct sdma_channel *sdmac)
+{
+ struct sdma_engine *sdma = sdmac->sdma;
+ int channel = sdmac->channel;
+ int ret;
+
+ init_completion(&sdmac->done);
+
+ __raw_writel(1 << channel, sdma->regs + SDMA_H_START);
+
+ ret = wait_for_completion_timeout(&sdmac->done, HZ);
+
+ return ret ? 0 : -ETIMEDOUT;
+}
+
+static int sdma_load_script(struct sdma_engine *sdma, void *buf, int size,
+ u32 address)
+{
+ struct sdma_buffer_descriptor *bd0 = sdma->channel[0].bd;
+ void *buf_virt;
+ dma_addr_t buf_phys;
+ int ret;
+
+ buf_virt = dma_alloc_coherent(NULL,
+ size,
+ &buf_phys, GFP_KERNEL);
+ if (!buf_virt)
+ return -ENOMEM;
+
+ bd0->mode.command = C0_SETPM;
+ bd0->mode.status = BD_DONE | BD_INTR | BD_WRAP | BD_EXTD;
+ bd0->mode.count = size / 2;
+ bd0->buffer_addr = buf_phys;
+ bd0->ext_buffer_addr = address;
+
+ memcpy(buf_virt, buf, size);
+
+ ret = sdma_run_channel(&sdma->channel[0]);
+
+ dma_free_coherent(NULL, size, buf_virt, buf_phys);
+
+ return ret;
+}
+
+static void sdma_event_enable(struct sdma_channel *sdmac, unsigned int event)
+{
+ struct sdma_engine *sdma = sdmac->sdma;
+ int channel = sdmac->channel;
+ u32 val;
+ u32 chnenbl = chnenbl_ofs(sdma, event);
+
+ val = __raw_readl(sdma->regs + chnenbl);
+ val |= (1 << channel);
+ __raw_writel(val, sdma->regs + chnenbl);
+}
+
+static void sdma_event_disable(struct sdma_channel *sdmac, unsigned int event)
+{
+ struct sdma_engine *sdma = sdmac->sdma;
+ int channel = sdmac->channel;
+ u32 chnenbl = chnenbl_ofs(sdma, event);
+ u32 val;
+
+ val = __raw_readl(sdma->regs + chnenbl);
+ val &= ~(1 << channel);
+ __raw_writel(val, sdma->regs + chnenbl);
+}
+
+static void sdma_handle_channel_loop(struct sdma_channel *sdmac)
+{
+ struct sdma_buffer_descriptor *bd;
+
+ /*
+ * loop mode. Iterate over descriptors, re-setup them and
+ * call callback function.
+ */
+ while (1) {
+ bd = &sdmac->bd[sdmac->buf_tail];
+
+ if (bd->mode.status & BD_DONE)
+ break;
+
+ if (bd->mode.status & BD_RROR)
+ sdmac->status = DMA_ERROR;
+ else
+ sdmac->status = DMA_SUCCESS;
+
+ bd->mode.status |= BD_DONE;
+ sdmac->buf_tail++;
+ sdmac->buf_tail %= sdmac->num_bd;
+
+ if (sdmac->desc.callback)
+ sdmac->desc.callback(sdmac->desc.callback_param);
+ }
+}
+
+static void mxc_sdma_handle_channel_normal(struct sdma_channel *sdmac)
+{
+ struct sdma_buffer_descriptor *bd;
+ int i, error = 0;
+
+ /*
+ * non loop mode. Iterate over all descriptors, collect
+ * errors and call callback function
+ */
+ for (i = 0; i < sdmac->num_bd; i++) {
+ bd = &sdmac->bd[i];
+
+ if (bd->mode.status & (BD_DONE | BD_RROR))
+ error = -EIO;
+ }
+
+ if (error)
+ sdmac->status = DMA_ERROR;
+ else
+ sdmac->status = DMA_SUCCESS;
+
+ if (sdmac->desc.callback)
+ sdmac->desc.callback(sdmac->desc.callback_param);
+ sdmac->last_completed = sdmac->desc.cookie;
+}
+
+static void mxc_sdma_handle_channel(struct sdma_channel *sdmac)
+{
+ complete(&sdmac->done);
+
+ /* not interested in channel 0 interrupts */
+ if (sdmac->channel == 0)
+ return;
+
+ if (sdmac->flags & IMX_DMA_SG_LOOP)
+ sdma_handle_channel_loop(sdmac);
+ else
+ mxc_sdma_handle_channel_normal(sdmac);
+}
+
+static irqreturn_t sdma_int_handler(int irq, void *dev_id)
+{
+ struct sdma_engine *sdma = dev_id;
+ u32 stat;
+
+ stat = __raw_readl(sdma->regs + SDMA_H_INTR);
+ __raw_writel(stat, sdma->regs + SDMA_H_INTR);
+
+ while (stat) {
+ int channel = fls(stat) - 1;
+ struct sdma_channel *sdmac = &sdma->channel[channel];
+
+ mxc_sdma_handle_channel(sdmac);
+
+ stat &= ~(1 << channel);
+ }
+
+ return IRQ_HANDLED;
+}
+
+/*
+ * sets the pc of SDMA script according to the peripheral type
+ */
+static void sdma_get_pc(struct sdma_channel *sdmac,
+ enum sdma_peripheral_type peripheral_type)
+{
+ struct sdma_engine *sdma = sdmac->sdma;
+ int per_2_emi = 0, emi_2_per = 0;
+ /*
+ * These are needed once we start to support transfers between
+ * two peripherals or memory-to-memory transfers
+ */
+ int per_2_per = 0, emi_2_emi = 0;
+
+ sdmac->pc_from_device = 0;
+ sdmac->pc_to_device = 0;
+
+ switch (peripheral_type) {
+ case IMX_DMATYPE_MEMORY:
+ emi_2_emi = sdma->script_addrs->ap_2_ap_addr;
+ break;
+ case IMX_DMATYPE_DSP:
+ emi_2_per = sdma->script_addrs->bp_2_ap_addr;
+ per_2_emi = sdma->script_addrs->ap_2_bp_addr;
+ break;
+ case IMX_DMATYPE_FIRI:
+ per_2_emi = sdma->script_addrs->firi_2_mcu_addr;
+ emi_2_per = sdma->script_addrs->mcu_2_firi_addr;
+ break;
+ case IMX_DMATYPE_UART:
+ per_2_emi = sdma->script_addrs->uart_2_mcu_addr;
+ emi_2_per = sdma->script_addrs->mcu_2_app_addr;
+ break;
+ case IMX_DMATYPE_UART_SP:
+ per_2_emi = sdma->script_addrs->uartsh_2_mcu_addr;
+ emi_2_per = sdma->script_addrs->mcu_2_shp_addr;
+ break;
+ case IMX_DMATYPE_ATA:
+ per_2_emi = sdma->script_addrs->ata_2_mcu_addr;
+ emi_2_per = sdma->script_addrs->mcu_2_ata_addr;
+ break;
+ case IMX_DMATYPE_CSPI:
+ case IMX_DMATYPE_EXT:
+ case IMX_DMATYPE_SSI:
+ per_2_emi = sdma->script_addrs->app_2_mcu_addr;
+ emi_2_per = sdma->script_addrs->mcu_2_app_addr;
+ break;
+ case IMX_DMATYPE_SSI_SP:
+ case IMX_DMATYPE_MMC:
+ case IMX_DMATYPE_SDHC:
+ case IMX_DMATYPE_CSPI_SP:
+ case IMX_DMATYPE_ESAI:
+ case IMX_DMATYPE_MSHC_SP:
+ per_2_emi = sdma->script_addrs->shp_2_mcu_addr;
+ emi_2_per = sdma->script_addrs->mcu_2_shp_addr;
+ break;
+ case IMX_DMATYPE_ASRC:
+ per_2_emi = sdma->script_addrs->asrc_2_mcu_addr;
+ emi_2_per = sdma->script_addrs->asrc_2_mcu_addr;
+ per_2_per = sdma->script_addrs->per_2_per_addr;
+ break;
+ case IMX_DMATYPE_MSHC:
+ per_2_emi = sdma->script_addrs->mshc_2_mcu_addr;
+ emi_2_per = sdma->script_addrs->mcu_2_mshc_addr;
+ break;
+ case IMX_DMATYPE_CCM:
+ per_2_emi = sdma->script_addrs->dptc_dvfs_addr;
+ break;
+ case IMX_DMATYPE_SPDIF:
+ per_2_emi = sdma->script_addrs->spdif_2_mcu_addr;
+ emi_2_per = sdma->script_addrs->mcu_2_spdif_addr;
+ break;
+ case IMX_DMATYPE_IPU_MEMORY:
+ emi_2_per = sdma->script_addrs->ext_mem_2_ipu_addr;
+ break;
+ default:
+ break;
+ }
+
+ sdmac->pc_from_device = per_2_emi;
+ sdmac->pc_to_device = emi_2_per;
+}
+
+static int sdma_load_context(struct sdma_channel *sdmac)
+{
+ struct sdma_engine *sdma = sdmac->sdma;
+ int channel = sdmac->channel;
+ int load_address;
+ struct sdma_context_data *context = sdma->context;
+ struct sdma_buffer_descriptor *bd0 = sdma->channel[0].bd;
+ int ret;
+
+ if (sdmac->direction == DMA_FROM_DEVICE) {
+ load_address = sdmac->pc_from_device;
+ } else {
+ load_address = sdmac->pc_to_device;
+ }
+
+ if (load_address < 0)
+ return load_address;
+
+ dev_dbg(sdma->dev, "load_address = %d\n", load_address);
+ dev_dbg(sdma->dev, "wml = 0x%08x\n", sdmac->watermark_level);
+ dev_dbg(sdma->dev, "shp_addr = 0x%08x\n", sdmac->shp_addr);
+ dev_dbg(sdma->dev, "per_addr = 0x%08x\n", sdmac->per_addr);
+ dev_dbg(sdma->dev, "event_mask0 = 0x%08x\n", sdmac->event_mask0);
+ dev_dbg(sdma->dev, "event_mask1 = 0x%08x\n", sdmac->event_mask1);
+
+ memset(context, 0, sizeof(*context));
+ context->channel_state.pc = load_address;
+
+ /* Send by context the event mask,base address for peripheral
+ * and watermark level
+ */
+ context->gReg[0] = sdmac->event_mask1;
+ context->gReg[1] = sdmac->event_mask0;
+ context->gReg[2] = sdmac->per_addr;
+ context->gReg[6] = sdmac->shp_addr;
+ context->gReg[7] = sdmac->watermark_level;
+
+ bd0->mode.command = C0_SETDM;
+ bd0->mode.status = BD_DONE | BD_INTR | BD_WRAP | BD_EXTD;
+ bd0->mode.count = sizeof(*context) / 4;
+ bd0->buffer_addr = sdma->context_phys;
+ bd0->ext_buffer_addr = 2048 + (sizeof(*context) / 4) * channel;
+
+ ret = sdma_run_channel(&sdma->channel[0]);
+
+ return ret;
+}
+
+static void sdma_disable_channel(struct sdma_channel *sdmac)
+{
+ struct sdma_engine *sdma = sdmac->sdma;
+ int channel = sdmac->channel;
+
+ __raw_writel(1 << channel, sdma->regs + SDMA_H_STATSTOP);
+ sdmac->status = DMA_ERROR;
+}
+
+static int sdma_config_channel(struct sdma_channel *sdmac)
+{
+ int ret;
+
+ sdma_disable_channel(sdmac);
+
+ sdmac->event_mask0 = 0;
+ sdmac->event_mask1 = 0;
+ sdmac->shp_addr = 0;
+ sdmac->per_addr = 0;
+
+ if (sdmac->event_id0) {
+ if (sdmac->event_id0 > 32)
+ return -EINVAL;
+ sdma_event_enable(sdmac, sdmac->event_id0);
+ }
+
+ switch (sdmac->peripheral_type) {
+ case IMX_DMATYPE_DSP:
+ sdma_config_ownership(sdmac, false, true, true);
+ break;
+ case IMX_DMATYPE_MEMORY:
+ sdma_config_ownership(sdmac, false, true, false);
+ break;
+ default:
+ sdma_config_ownership(sdmac, true, true, false);
+ break;
+ }
+
+ sdma_get_pc(sdmac, sdmac->peripheral_type);
+
+ if ((sdmac->peripheral_type != IMX_DMATYPE_MEMORY) &&
+ (sdmac->peripheral_type != IMX_DMATYPE_DSP)) {
+ /* Handle multiple event channels differently */
+ if (sdmac->event_id1) {
+ sdmac->event_mask1 = 1 << (sdmac->event_id1 % 32);
+ if (sdmac->event_id1 > 31)
+ sdmac->watermark_level |= 1 << 31;
+ sdmac->event_mask0 = 1 << (sdmac->event_id0 % 32);
+ if (sdmac->event_id0 > 31)
+ sdmac->watermark_level |= 1 << 30;
+ } else {
+ sdmac->event_mask0 = 1 << sdmac->event_id0;
+ sdmac->event_mask1 = 1 << (sdmac->event_id0 - 32);
+ }
+ /* Watermark Level */
+ sdmac->watermark_level |= sdmac->watermark_level;
+ /* Address */
+ sdmac->shp_addr = sdmac->per_address;
+ } else {
+ sdmac->watermark_level = 0; /* FIXME: M3_BASE_ADDRESS */
+ }
+
+ ret = sdma_load_context(sdmac);
+
+ return ret;
+}
+
+static int sdma_set_channel_priority(struct sdma_channel *sdmac,
+ unsigned int priority)
+{
+ struct sdma_engine *sdma = sdmac->sdma;
+ int channel = sdmac->channel;
+
+ if (priority < MXC_SDMA_MIN_PRIORITY
+ || priority > MXC_SDMA_MAX_PRIORITY) {
+ return -EINVAL;
+ }
+
+ __raw_writel(priority, sdma->regs + SDMA_CHNPRI_0 + 4 * channel);
+
+ return 0;
+}
+
+static int sdma_request_channel(struct sdma_channel *sdmac)
+{
+ struct sdma_engine *sdma = sdmac->sdma;
+ int channel = sdmac->channel;
+ int ret = -EBUSY;
+
+ sdmac->bd = dma_alloc_coherent(NULL, PAGE_SIZE, &sdmac->bd_phys, GFP_KERNEL);
+ if (!sdmac->bd) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ memset(sdmac->bd, 0, PAGE_SIZE);
+
+ sdma->channel_control[channel].base_bd_ptr = sdmac->bd_phys;
+ sdma->channel_control[channel].current_bd_ptr = sdmac->bd_phys;
+
+ clk_enable(sdma->clk);
+
+ sdma_set_channel_priority(sdmac, MXC_SDMA_DEFAULT_PRIORITY);
+
+ init_completion(&sdmac->done);
+
+ sdmac->buf_tail = 0;
+
+ return 0;
+out:
+
+ return ret;
+}
+
+static void sdma_enable_channel(struct sdma_engine *sdma, int channel)
+{
+ __raw_writel(1 << channel, sdma->regs + SDMA_H_START);
+}
+
+static dma_cookie_t sdma_assign_cookie(struct sdma_channel *sdma)
+{
+ dma_cookie_t cookie = sdma->chan.cookie;
+
+ if (++cookie < 0)
+ cookie = 1;
+
+ sdma->chan.cookie = cookie;
+ sdma->desc.cookie = cookie;
+
+ return cookie;
+}
+
+static struct sdma_channel *to_sdma_chan(struct dma_chan *chan)
+{
+ return container_of(chan, struct sdma_channel, chan);
+}
+
+static dma_cookie_t sdma_tx_submit(struct dma_async_tx_descriptor *tx)
+{
+ struct sdma_channel *sdmac = to_sdma_chan(tx->chan);
+ struct sdma_engine *sdma = sdmac->sdma;
+ dma_cookie_t cookie;
+
+ spin_lock_irq(&sdmac->lock);
+
+ cookie = sdma_assign_cookie(sdmac);
+
+ sdma_enable_channel(sdma, tx->chan->chan_id);
+
+ spin_unlock_irq(&sdmac->lock);
+
+ return cookie;
+}
+
+static int sdma_alloc_chan_resources(struct dma_chan *chan)
+{
+ struct sdma_channel *sdmac = to_sdma_chan(chan);
+ struct imx_dma_data *data = chan->private;
+ int prio, ret;
+
+ /* No need to execute this for internal channel 0 */
+ if (chan->chan_id == 0)
+ return 0;
+
+ if (!data)
+ return -EINVAL;
+
+ switch (data->priority) {
+ case DMA_PRIO_HIGH:
+ prio = 3;
+ break;
+ case DMA_PRIO_MEDIUM:
+ prio = 2;
+ break;
+ case DMA_PRIO_LOW:
+ default:
+ prio = 1;
+ break;
+ }
+
+ sdmac->peripheral_type = data->peripheral_type;
+ sdmac->event_id0 = data->dma_request;
+ ret = sdma_set_channel_priority(sdmac, prio);
+ if (ret)
+ return ret;
+
+ ret = sdma_request_channel(sdmac);
+ if (ret)
+ return ret;
+
+ dma_async_tx_descriptor_init(&sdmac->desc, chan);
+ sdmac->desc.tx_submit = sdma_tx_submit;
+ /* txd.flags will be overwritten in prep funcs */
+ sdmac->desc.flags = DMA_CTRL_ACK;
+
+ return 0;
+}
+
+static void sdma_free_chan_resources(struct dma_chan *chan)
+{
+ struct sdma_channel *sdmac = to_sdma_chan(chan);
+ struct sdma_engine *sdma = sdmac->sdma;
+
+ sdma_disable_channel(sdmac);
+
+ if (sdmac->event_id0)
+ sdma_event_disable(sdmac, sdmac->event_id0);
+ if (sdmac->event_id1)
+ sdma_event_disable(sdmac, sdmac->event_id1);
+
+ sdmac->event_id0 = 0;
+ sdmac->event_id1 = 0;
+
+ sdma_set_channel_priority(sdmac, 0);
+
+ dma_free_coherent(NULL, PAGE_SIZE, sdmac->bd, sdmac->bd_phys);
+
+ clk_disable(sdma->clk);
+}
+
+static struct dma_async_tx_descriptor *sdma_prep_slave_sg(
+ struct dma_chan *chan, struct scatterlist *sgl,
+ unsigned int sg_len, enum dma_data_direction direction,
+ unsigned long flags)
+{
+ struct sdma_channel *sdmac = to_sdma_chan(chan);
+ struct sdma_engine *sdma = sdmac->sdma;
+ int ret, i, count;
+ int channel = chan->chan_id;
+ struct scatterlist *sg;
+
+ if (sdmac->status == DMA_IN_PROGRESS)
+ return NULL;
+ sdmac->status = DMA_IN_PROGRESS;
+
+ sdmac->flags = 0;
+
+ dev_dbg(sdma->dev, "setting up %d entries for channel %d.\n",
+ sg_len, channel);
+
+ sdmac->direction = direction;
+ ret = sdma_load_context(sdmac);
+ if (ret)
+ goto err_out;
+
+ if (sg_len > NUM_BD) {
+ dev_err(sdma->dev, "SDMA channel %d: maximum number of sg exceeded: %d > %d\n",
+ channel, sg_len, NUM_BD);
+ ret = -EINVAL;
+ goto err_out;
+ }
+
+ for_each_sg(sgl, sg, sg_len, i) {
+ struct sdma_buffer_descriptor *bd = &sdmac->bd[i];
+ int param;
+
+ bd->buffer_addr = sgl->dma_address;
+
+ count = sg->length;
+
+ if (count > 0xffff) {
+ dev_err(sdma->dev, "SDMA channel %d: maximum bytes for sg entry exceeded: %d > %d\n",
+ channel, count, 0xffff);
+ ret = -EINVAL;
+ goto err_out;
+ }
+
+ bd->mode.count = count;
+
+ if (sdmac->word_size > DMA_SLAVE_BUSWIDTH_4_BYTES) {
+ ret = -EINVAL;
+ goto err_out;
+ }
+ if (sdmac->word_size == DMA_SLAVE_BUSWIDTH_4_BYTES)
+ bd->mode.command = 0;
+ else
+ bd->mode.command = sdmac->word_size;
+
+ param = BD_DONE | BD_EXTD | BD_CONT;
+
+ if (sdmac->flags & IMX_DMA_SG_LOOP) {
+ param |= BD_INTR;
+ if (i + 1 == sg_len)
+ param |= BD_WRAP;
+ }
+
+ if (i + 1 == sg_len)
+ param |= BD_INTR;
+
+ dev_dbg(sdma->dev, "entry %d: count: %d dma: 0x%08x %s%s\n",
+ i, count, sg->dma_address,
+ param & BD_WRAP ? "wrap" : "",
+ param & BD_INTR ? " intr" : "");
+
+ bd->mode.status = param;
+ }
+
+ sdmac->num_bd = sg_len;
+ sdma->channel_control[channel].current_bd_ptr = sdmac->bd_phys;
+
+ return &sdmac->desc;
+err_out:
+ return NULL;
+}
+
+static struct dma_async_tx_descriptor *sdma_prep_dma_cyclic(
+ struct dma_chan *chan, dma_addr_t dma_addr, size_t buf_len,
+ size_t period_len, enum dma_data_direction direction)
+{
+ struct sdma_channel *sdmac = to_sdma_chan(chan);
+ struct sdma_engine *sdma = sdmac->sdma;
+ int num_periods = buf_len / period_len;
+ int channel = chan->chan_id;
+ int ret, i = 0, buf = 0;
+
+ dev_dbg(sdma->dev, "%s channel: %d\n", __func__, channel);
+
+ if (sdmac->status == DMA_IN_PROGRESS)
+ return NULL;
+
+ sdmac->status = DMA_IN_PROGRESS;
+
+ sdmac->flags |= IMX_DMA_SG_LOOP;
+ sdmac->direction = direction;
+ ret = sdma_load_context(sdmac);
+ if (ret)
+ goto err_out;
+
+ if (num_periods > NUM_BD) {
+ dev_err(sdma->dev, "SDMA channel %d: maximum number of sg exceeded: %d > %d\n",
+ channel, num_periods, NUM_BD);
+ goto err_out;
+ }
+
+ if (period_len > 0xffff) {
+ dev_err(sdma->dev, "SDMA channel %d: maximum period size exceeded: %d > %d\n",
+ channel, period_len, 0xffff);
+ goto err_out;
+ }
+
+ while (buf < buf_len) {
+ struct sdma_buffer_descriptor *bd = &sdmac->bd[i];
+ int param;
+
+ bd->buffer_addr = dma_addr;
+
+ bd->mode.count = period_len;
+
+ if (sdmac->word_size > DMA_SLAVE_BUSWIDTH_4_BYTES)
+ goto err_out;
+ if (sdmac->word_size == DMA_SLAVE_BUSWIDTH_4_BYTES)
+ bd->mode.command = 0;
+ else
+ bd->mode.command = sdmac->word_size;
+
+ param = BD_DONE | BD_EXTD | BD_CONT | BD_INTR;
+ if (i + 1 == num_periods)
+ param |= BD_WRAP;
+
+ dev_dbg(sdma->dev, "entry %d: count: %d dma: 0x%08x %s%s\n",
+ i, period_len, dma_addr,
+ param & BD_WRAP ? "wrap" : "",
+ param & BD_INTR ? " intr" : "");
+
+ bd->mode.status = param;
+
+ dma_addr += period_len;
+ buf += period_len;
+
+ i++;
+ }
+
+ sdmac->num_bd = num_periods;
+ sdma->channel_control[channel].current_bd_ptr = sdmac->bd_phys;
+
+ return &sdmac->desc;
+err_out:
+ sdmac->status = DMA_ERROR;
+ return NULL;
+}
+
+static int sdma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
+ unsigned long arg)
+{
+ struct sdma_channel *sdmac = to_sdma_chan(chan);
+ struct dma_slave_config *dmaengine_cfg = (void *)arg;
+
+ switch (cmd) {
+ case DMA_TERMINATE_ALL:
+ sdma_disable_channel(sdmac);
+ return 0;
+ case DMA_SLAVE_CONFIG:
+ if (dmaengine_cfg->direction == DMA_FROM_DEVICE) {
+ sdmac->per_address = dmaengine_cfg->src_addr;
+ sdmac->watermark_level = dmaengine_cfg->src_maxburst;
+ sdmac->word_size = dmaengine_cfg->src_addr_width;
+ } else {
+ sdmac->per_address = dmaengine_cfg->dst_addr;
+ sdmac->watermark_level = dmaengine_cfg->dst_maxburst;
+ sdmac->word_size = dmaengine_cfg->dst_addr_width;
+ }
+ return sdma_config_channel(sdmac);
+ default:
+ return -ENOSYS;
+ }
+
+ return -EINVAL;
+}
+
+static enum dma_status sdma_tx_status(struct dma_chan *chan,
+ dma_cookie_t cookie,
+ struct dma_tx_state *txstate)
+{
+ struct sdma_channel *sdmac = to_sdma_chan(chan);
+ dma_cookie_t last_used;
+ enum dma_status ret;
+
+ last_used = chan->cookie;
+
+ ret = dma_async_is_complete(cookie, sdmac->last_completed, last_used);
+ dma_set_tx_state(txstate, sdmac->last_completed, last_used, 0);
+
+ return ret;
+}
+
+static void sdma_issue_pending(struct dma_chan *chan)
+{
+ /*
+ * Nothing to do. We only have a single descriptor
+ */
+}
+
+static int __init sdma_init(struct sdma_engine *sdma,
+ void *ram_code, int ram_code_size)
+{
+ int i, ret;
+ dma_addr_t ccb_phys;
+
+ switch (sdma->version) {
+ case 1:
+ sdma->num_events = 32;
+ break;
+ case 2:
+ sdma->num_events = 48;
+ break;
+ default:
+ dev_err(sdma->dev, "Unknown version %d. aborting\n", sdma->version);
+ return -ENODEV;
+ }
+
+ clk_enable(sdma->clk);
+
+ /* Be sure SDMA has not started yet */
+ __raw_writel(0, sdma->regs + SDMA_H_C0PTR);
+
+ sdma->channel_control = dma_alloc_coherent(NULL,
+ MAX_DMA_CHANNELS * sizeof (struct sdma_channel_control) +
+ sizeof(struct sdma_context_data),
+ &ccb_phys, GFP_KERNEL);
+
+ if (!sdma->channel_control) {
+ ret = -ENOMEM;
+ goto err_dma_alloc;
+ }
+
+ sdma->context = (void *)sdma->channel_control +
+ MAX_DMA_CHANNELS * sizeof (struct sdma_channel_control);
+ sdma->context_phys = ccb_phys +
+ MAX_DMA_CHANNELS * sizeof (struct sdma_channel_control);
+
+ /* Zero-out the CCB structures array just allocated */
+ memset(sdma->channel_control, 0,
+ MAX_DMA_CHANNELS * sizeof (struct sdma_channel_control));
+
+ /* disable all channels */
+ for (i = 0; i < sdma->num_events; i++)
+ __raw_writel(0, sdma->regs + chnenbl_ofs(sdma, i));
+
+ /* All channels have priority 0 */
+ for (i = 0; i < MAX_DMA_CHANNELS; i++)
+ __raw_writel(0, sdma->regs + SDMA_CHNPRI_0 + i * 4);
+
+ ret = sdma_request_channel(&sdma->channel[0]);
+ if (ret)
+ goto err_dma_alloc;
+
+ sdma_config_ownership(&sdma->channel[0], false, true, false);
+
+ /* Set Command Channel (Channel Zero) */
+ __raw_writel(0x4050, sdma->regs + SDMA_CHN0ADDR);
+
+ /* Set bits of CONFIG register but with static context switching */
+ /* FIXME: Check whether to set ACR bit depending on clock ratios */
+ __raw_writel(0, sdma->regs + SDMA_H_CONFIG);
+
+ __raw_writel(ccb_phys, sdma->regs + SDMA_H_C0PTR);
+
+ /* download the RAM image for SDMA */
+ sdma_load_script(sdma, ram_code,
+ ram_code_size,
+ sdma->script_addrs->ram_code_start_addr);
+
+ /* Set bits of CONFIG register with given context switching mode */
+ __raw_writel(SDMA_H_CONFIG_CSM, sdma->regs + SDMA_H_CONFIG);
+
+ /* Initializes channel's priorities */
+ sdma_set_channel_priority(&sdma->channel[0], 7);
+
+ clk_disable(sdma->clk);
+
+ return 0;
+
+err_dma_alloc:
+ clk_disable(sdma->clk);
+ dev_err(sdma->dev, "initialisation failed with %d\n", ret);
+ return ret;
+}
+
+static int __init sdma_probe(struct platform_device *pdev)
+{
+ int ret;
+ const struct firmware *fw;
+ const struct sdma_firmware_header *header;
+ const struct sdma_script_start_addrs *addr;
+ int irq;
+ unsigned short *ram_code;
+ struct resource *iores;
+ struct sdma_platform_data *pdata = pdev->dev.platform_data;
+ char *fwname;
+ int i;
+ dma_cap_mask_t mask;
+ struct sdma_engine *sdma;
+
+ sdma = kzalloc(sizeof(*sdma), GFP_KERNEL);
+ if (!sdma)
+ return -ENOMEM;
+
+ sdma->dev = &pdev->dev;
+
+ iores = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ irq = platform_get_irq(pdev, 0);
+ if (!iores || irq < 0 || !pdata) {
+ ret = -EINVAL;
+ goto err_irq;
+ }
+
+ if (!request_mem_region(iores->start, resource_size(iores), pdev->name)) {
+ ret = -EBUSY;
+ goto err_request_region;
+ }
+
+ sdma->clk = clk_get(&pdev->dev, NULL);
+ if (IS_ERR(sdma->clk)) {
+ ret = PTR_ERR(sdma->clk);
+ goto err_clk;
+ }
+
+ sdma->regs = ioremap(iores->start, resource_size(iores));
+ if (!sdma->regs) {
+ ret = -ENOMEM;
+ goto err_ioremap;
+ }
+
+ ret = request_irq(irq, sdma_int_handler, 0, "sdma", sdma);
+ if (ret)
+ goto err_request_irq;
+
+ fwname = kasprintf(GFP_KERNEL, "sdma-%s-to%d.bin",
+ pdata->cpu_name, pdata->to_version);
+ if (!fwname) {
+ ret = -ENOMEM;
+ goto err_cputype;
+ }
+
+ ret = request_firmware(&fw, fwname, &pdev->dev);
+ if (ret) {
+ dev_err(&pdev->dev, "request firmware \"%s\" failed with %d\n",
+ fwname, ret);
+ kfree(fwname);
+ goto err_cputype;
+ }
+ kfree(fwname);
+
+ if (fw->size < sizeof(*header))
+ goto err_firmware;
+
+ header = (struct sdma_firmware_header *)fw->data;
+
+ if (header->magic != SDMA_FIRMWARE_MAGIC)
+ goto err_firmware;
+ if (header->ram_code_start + header->ram_code_size > fw->size)
+ goto err_firmware;
+
+ addr = (void *)header + header->script_addrs_start;
+ ram_code = (void *)header + header->ram_code_start;
+ sdma->script_addrs = kmalloc(sizeof(*addr), GFP_KERNEL);
+ if (!sdma->script_addrs)
+ goto err_firmware;
+ memcpy(sdma->script_addrs, addr, sizeof(*addr));
+
+ sdma->version = pdata->sdma_version;
+
+ INIT_LIST_HEAD(&sdma->dma_device.channels);
+ /* Initialize channel parameters */
+ for (i = 0; i < MAX_DMA_CHANNELS; i++) {
+ struct sdma_channel *sdmac = &sdma->channel[i];
+
+ sdmac->sdma = sdma;
+ spin_lock_init(&sdmac->lock);
+
+ dma_cap_set(DMA_SLAVE, sdma->dma_device.cap_mask);
+ dma_cap_set(DMA_CYCLIC, sdma->dma_device.cap_mask);
+
+ sdmac->chan.device = &sdma->dma_device;
+ sdmac->chan.chan_id = i;
+ sdmac->channel = i;
+
+ /* Add the channel to the DMAC list */
+ list_add_tail(&sdmac->chan.device_node, &sdma->dma_device.channels);
+ }
+
+ ret = sdma_init(sdma, ram_code, header->ram_code_size);
+ if (ret)
+ goto err_init;
+
+ sdma->dma_device.dev = &pdev->dev;
+
+ sdma->dma_device.device_alloc_chan_resources = sdma_alloc_chan_resources;
+ sdma->dma_device.device_free_chan_resources = sdma_free_chan_resources;
+ sdma->dma_device.device_tx_status = sdma_tx_status;
+ sdma->dma_device.device_prep_slave_sg = sdma_prep_slave_sg;
+ sdma->dma_device.device_prep_dma_cyclic = sdma_prep_dma_cyclic;
+ sdma->dma_device.device_control = sdma_control;
+ sdma->dma_device.device_issue_pending = sdma_issue_pending;
+
+ ret = dma_async_device_register(&sdma->dma_device);
+ if (ret) {
+ dev_err(&pdev->dev, "unable to register\n");
+ goto err_init;
+ }
+
+ dev_info(&pdev->dev, "initialized (firmware %d.%d)\n",
+ header->version_major,
+ header->version_minor);
+
+ /* request channel 0. This is an internal control channel
+ * to the SDMA engine and not available to clients.
+ */
+ dma_cap_zero(mask);
+ dma_cap_set(DMA_SLAVE, mask);
+ dma_request_channel(mask, NULL, NULL);
+
+ release_firmware(fw);
+
+ return 0;
+
+err_init:
+ kfree(sdma->script_addrs);
+err_firmware:
+ release_firmware(fw);
+err_cputype:
+ free_irq(irq, sdma);
+err_request_irq:
+ iounmap(sdma->regs);
+err_ioremap:
+ clk_put(sdma->clk);
+err_clk:
+ release_mem_region(iores->start, resource_size(iores));
+err_request_region:
+err_irq:
+ kfree(sdma);
+ return 0;
+}
+
+static int __exit sdma_remove(struct platform_device *pdev)
+{
+ return -EBUSY;
+}
+
+static struct platform_driver sdma_driver = {
+ .driver = {
+ .name = "imx-sdma",
+ },
+ .remove = __exit_p(sdma_remove),
+};
+
+static int __init sdma_module_init(void)
+{
+ return platform_driver_probe(&sdma_driver, sdma_probe);
+}
+subsys_initcall(sdma_module_init);
+
+MODULE_AUTHOR("Sascha Hauer, Pengutronix <s.hauer@pengutronix.de>");
+MODULE_DESCRIPTION("i.MX SDMA driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/dma/intel_mid_dma.c b/drivers/dma/intel_mid_dma.c
index c2591e8d9b6e..338bc4eed1f3 100644
--- a/drivers/dma/intel_mid_dma.c
+++ b/drivers/dma/intel_mid_dma.c
@@ -25,6 +25,7 @@
*/
#include <linux/pci.h>
#include <linux/interrupt.h>
+#include <linux/pm_runtime.h>
#include <linux/intel_mid_dma.h>
#define MAX_CHAN 4 /*max ch across controllers*/
@@ -91,13 +92,13 @@ static int get_block_ts(int len, int tx_width, int block_size)
int byte_width = 0, block_ts = 0;
switch (tx_width) {
- case LNW_DMA_WIDTH_8BIT:
+ case DMA_SLAVE_BUSWIDTH_1_BYTE:
byte_width = 1;
break;
- case LNW_DMA_WIDTH_16BIT:
+ case DMA_SLAVE_BUSWIDTH_2_BYTES:
byte_width = 2;
break;
- case LNW_DMA_WIDTH_32BIT:
+ case DMA_SLAVE_BUSWIDTH_4_BYTES:
default:
byte_width = 4;
break;
@@ -247,16 +248,17 @@ static void midc_dostart(struct intel_mid_dma_chan *midc,
struct middma_device *mid = to_middma_device(midc->chan.device);
/* channel is idle */
- if (midc->in_use && test_ch_en(midc->dma_base, midc->ch_id)) {
+ if (midc->busy && test_ch_en(midc->dma_base, midc->ch_id)) {
/*error*/
pr_err("ERR_MDMA: channel is busy in start\n");
/* The tasklet will hopefully advance the queue... */
return;
}
-
+ midc->busy = true;
/*write registers and en*/
iowrite32(first->sar, midc->ch_regs + SAR);
iowrite32(first->dar, midc->ch_regs + DAR);
+ iowrite32(first->lli_phys, midc->ch_regs + LLP);
iowrite32(first->cfg_hi, midc->ch_regs + CFG_HIGH);
iowrite32(first->cfg_lo, midc->ch_regs + CFG_LOW);
iowrite32(first->ctl_lo, midc->ch_regs + CTL_LOW);
@@ -264,9 +266,9 @@ static void midc_dostart(struct intel_mid_dma_chan *midc,
pr_debug("MDMA:TX SAR %x,DAR %x,CFGL %x,CFGH %x,CTLH %x, CTLL %x\n",
(int)first->sar, (int)first->dar, first->cfg_hi,
first->cfg_lo, first->ctl_hi, first->ctl_lo);
+ first->status = DMA_IN_PROGRESS;
iowrite32(ENABLE_CHANNEL(midc->ch_id), mid->dma_base + DMA_CHAN_EN);
- first->status = DMA_IN_PROGRESS;
}
/**
@@ -283,20 +285,36 @@ static void midc_descriptor_complete(struct intel_mid_dma_chan *midc,
{
struct dma_async_tx_descriptor *txd = &desc->txd;
dma_async_tx_callback callback_txd = NULL;
+ struct intel_mid_dma_lli *llitem;
void *param_txd = NULL;
midc->completed = txd->cookie;
callback_txd = txd->callback;
param_txd = txd->callback_param;
- list_move(&desc->desc_node, &midc->free_list);
-
+ if (desc->lli != NULL) {
+ /*clear the DONE bit of completed LLI in memory*/
+ llitem = desc->lli + desc->current_lli;
+ llitem->ctl_hi &= CLEAR_DONE;
+ if (desc->current_lli < desc->lli_length-1)
+ (desc->current_lli)++;
+ else
+ desc->current_lli = 0;
+ }
spin_unlock_bh(&midc->lock);
if (callback_txd) {
pr_debug("MDMA: TXD callback set ... calling\n");
callback_txd(param_txd);
- spin_lock_bh(&midc->lock);
- return;
+ }
+ if (midc->raw_tfr) {
+ desc->status = DMA_SUCCESS;
+ if (desc->lli != NULL) {
+ pci_pool_free(desc->lli_pool, desc->lli,
+ desc->lli_phys);
+ pci_pool_destroy(desc->lli_pool);
+ }
+ list_move(&desc->desc_node, &midc->free_list);
+ midc->busy = false;
}
spin_lock_bh(&midc->lock);
@@ -317,14 +335,89 @@ static void midc_scan_descriptors(struct middma_device *mid,
/*tx is complete*/
list_for_each_entry_safe(desc, _desc, &midc->active_list, desc_node) {
- if (desc->status == DMA_IN_PROGRESS) {
- desc->status = DMA_SUCCESS;
+ if (desc->status == DMA_IN_PROGRESS)
midc_descriptor_complete(midc, desc);
- }
}
return;
-}
+ }
+/**
+ * midc_lli_fill_sg - Helper function to convert
+ * SG list to Linked List Items.
+ *@midc: Channel
+ *@desc: DMA descriptor
+ *@sglist: Pointer to SG list
+ *@sglen: SG list length
+ *@flags: DMA transaction flags
+ *
+ * Walk through the SG list and convert the SG list into Linked
+ * List Items (LLI).
+ */
+static int midc_lli_fill_sg(struct intel_mid_dma_chan *midc,
+ struct intel_mid_dma_desc *desc,
+ struct scatterlist *sglist,
+ unsigned int sglen,
+ unsigned int flags)
+{
+ struct intel_mid_dma_slave *mids;
+ struct scatterlist *sg;
+ dma_addr_t lli_next, sg_phy_addr;
+ struct intel_mid_dma_lli *lli_bloc_desc;
+ union intel_mid_dma_ctl_lo ctl_lo;
+ union intel_mid_dma_ctl_hi ctl_hi;
+ int i;
+ pr_debug("MDMA: Entered midc_lli_fill_sg\n");
+ mids = midc->mid_slave;
+
+ lli_bloc_desc = desc->lli;
+ lli_next = desc->lli_phys;
+
+ ctl_lo.ctl_lo = desc->ctl_lo;
+ ctl_hi.ctl_hi = desc->ctl_hi;
+ for_each_sg(sglist, sg, sglen, i) {
+ /*Populate CTL_LOW and LLI values*/
+ if (i != sglen - 1) {
+ lli_next = lli_next +
+ sizeof(struct intel_mid_dma_lli);
+ } else {
+ /*Check for circular list, otherwise terminate LLI to ZERO*/
+ if (flags & DMA_PREP_CIRCULAR_LIST) {
+ pr_debug("MDMA: LLI is configured in circular mode\n");
+ lli_next = desc->lli_phys;
+ } else {
+ lli_next = 0;
+ ctl_lo.ctlx.llp_dst_en = 0;
+ ctl_lo.ctlx.llp_src_en = 0;
+ }
+ }
+ /*Populate CTL_HI values*/
+ ctl_hi.ctlx.block_ts = get_block_ts(sg->length,
+ desc->width,
+ midc->dma->block_size);
+ /*Populate SAR and DAR values*/
+ sg_phy_addr = sg_phys(sg);
+ if (desc->dirn == DMA_TO_DEVICE) {
+ lli_bloc_desc->sar = sg_phy_addr;
+ lli_bloc_desc->dar = mids->dma_slave.dst_addr;
+ } else if (desc->dirn == DMA_FROM_DEVICE) {
+ lli_bloc_desc->sar = mids->dma_slave.src_addr;
+ lli_bloc_desc->dar = sg_phy_addr;
+ }
+ /*Copy values into block descriptor in system memroy*/
+ lli_bloc_desc->llp = lli_next;
+ lli_bloc_desc->ctl_lo = ctl_lo.ctl_lo;
+ lli_bloc_desc->ctl_hi = ctl_hi.ctl_hi;
+
+ lli_bloc_desc++;
+ }
+ /*Copy very first LLI values to descriptor*/
+ desc->ctl_lo = desc->lli->ctl_lo;
+ desc->ctl_hi = desc->lli->ctl_hi;
+ desc->sar = desc->lli->sar;
+ desc->dar = desc->lli->dar;
+
+ return 0;
+}
/*****************************************************************************
DMA engine callback Functions*/
/**
@@ -349,12 +442,12 @@ static dma_cookie_t intel_mid_dma_tx_submit(struct dma_async_tx_descriptor *tx)
desc->txd.cookie = cookie;
- if (list_empty(&midc->active_list)) {
- midc_dostart(midc, desc);
+ if (list_empty(&midc->active_list))
list_add_tail(&desc->desc_node, &midc->active_list);
- } else {
+ else
list_add_tail(&desc->desc_node, &midc->queue);
- }
+
+ midc_dostart(midc, desc);
spin_unlock_bh(&midc->lock);
return cookie;
@@ -414,6 +507,23 @@ static enum dma_status intel_mid_dma_tx_status(struct dma_chan *chan,
return ret;
}
+static int dma_slave_control(struct dma_chan *chan, unsigned long arg)
+{
+ struct intel_mid_dma_chan *midc = to_intel_mid_dma_chan(chan);
+ struct dma_slave_config *slave = (struct dma_slave_config *)arg;
+ struct intel_mid_dma_slave *mid_slave;
+
+ BUG_ON(!midc);
+ BUG_ON(!slave);
+ pr_debug("MDMA: slave control called\n");
+
+ mid_slave = to_intel_mid_dma_slave(slave);
+
+ BUG_ON(!mid_slave);
+
+ midc->mid_slave = mid_slave;
+ return 0;
+}
/**
* intel_mid_dma_device_control - DMA device control
* @chan: chan for DMA control
@@ -428,49 +538,41 @@ static int intel_mid_dma_device_control(struct dma_chan *chan,
struct intel_mid_dma_chan *midc = to_intel_mid_dma_chan(chan);
struct middma_device *mid = to_middma_device(chan->device);
struct intel_mid_dma_desc *desc, *_desc;
- LIST_HEAD(list);
+ union intel_mid_dma_cfg_lo cfg_lo;
+
+ if (cmd == DMA_SLAVE_CONFIG)
+ return dma_slave_control(chan, arg);
if (cmd != DMA_TERMINATE_ALL)
return -ENXIO;
spin_lock_bh(&midc->lock);
- if (midc->in_use == false) {
+ if (midc->busy == false) {
spin_unlock_bh(&midc->lock);
return 0;
}
- list_splice_init(&midc->free_list, &list);
- midc->descs_allocated = 0;
- midc->slave = NULL;
-
+ /*Suspend and disable the channel*/
+ cfg_lo.cfg_lo = ioread32(midc->ch_regs + CFG_LOW);
+ cfg_lo.cfgx.ch_susp = 1;
+ iowrite32(cfg_lo.cfg_lo, midc->ch_regs + CFG_LOW);
+ iowrite32(DISABLE_CHANNEL(midc->ch_id), mid->dma_base + DMA_CHAN_EN);
+ midc->busy = false;
/* Disable interrupts */
disable_dma_interrupt(midc);
+ midc->descs_allocated = 0;
spin_unlock_bh(&midc->lock);
- list_for_each_entry_safe(desc, _desc, &list, desc_node) {
- pr_debug("MDMA: freeing descriptor %p\n", desc);
- pci_pool_free(mid->dma_pool, desc, desc->txd.phys);
+ list_for_each_entry_safe(desc, _desc, &midc->active_list, desc_node) {
+ if (desc->lli != NULL) {
+ pci_pool_free(desc->lli_pool, desc->lli,
+ desc->lli_phys);
+ pci_pool_destroy(desc->lli_pool);
+ }
+ list_move(&desc->desc_node, &midc->free_list);
}
return 0;
}
-/**
- * intel_mid_dma_prep_slave_sg - Prep slave sg txn
- * @chan: chan for DMA transfer
- * @sgl: scatter gather list
- * @sg_len: length of sg txn
- * @direction: DMA transfer dirtn
- * @flags: DMA flags
- *
- * Do DMA sg txn: NOT supported now
- */
-static struct dma_async_tx_descriptor *intel_mid_dma_prep_slave_sg(
- struct dma_chan *chan, struct scatterlist *sgl,
- unsigned int sg_len, enum dma_data_direction direction,
- unsigned long flags)
-{
- /*not supported now*/
- return NULL;
-}
/**
* intel_mid_dma_prep_memcpy - Prep memcpy txn
@@ -495,23 +597,24 @@ static struct dma_async_tx_descriptor *intel_mid_dma_prep_memcpy(
union intel_mid_dma_ctl_hi ctl_hi;
union intel_mid_dma_cfg_lo cfg_lo;
union intel_mid_dma_cfg_hi cfg_hi;
- enum intel_mid_dma_width width = 0;
+ enum dma_slave_buswidth width;
pr_debug("MDMA: Prep for memcpy\n");
- WARN_ON(!chan);
+ BUG_ON(!chan);
if (!len)
return NULL;
- mids = chan->private;
- WARN_ON(!mids);
-
midc = to_intel_mid_dma_chan(chan);
- WARN_ON(!midc);
+ BUG_ON(!midc);
+
+ mids = midc->mid_slave;
+ BUG_ON(!mids);
pr_debug("MDMA:called for DMA %x CH %d Length %zu\n",
midc->dma->pci_id, midc->ch_id, len);
pr_debug("MDMA:Cfg passed Mode %x, Dirn %x, HS %x, Width %x\n",
- mids->cfg_mode, mids->dirn, mids->hs_mode, mids->src_width);
+ mids->cfg_mode, mids->dma_slave.direction,
+ mids->hs_mode, mids->dma_slave.src_addr_width);
/*calculate CFG_LO*/
if (mids->hs_mode == LNW_DMA_SW_HS) {
@@ -530,13 +633,13 @@ static struct dma_async_tx_descriptor *intel_mid_dma_prep_memcpy(
if (midc->dma->pimr_mask) {
cfg_hi.cfgx.protctl = 0x0; /*default value*/
cfg_hi.cfgx.fifo_mode = 1;
- if (mids->dirn == DMA_TO_DEVICE) {
+ if (mids->dma_slave.direction == DMA_TO_DEVICE) {
cfg_hi.cfgx.src_per = 0;
if (mids->device_instance == 0)
cfg_hi.cfgx.dst_per = 3;
if (mids->device_instance == 1)
cfg_hi.cfgx.dst_per = 1;
- } else if (mids->dirn == DMA_FROM_DEVICE) {
+ } else if (mids->dma_slave.direction == DMA_FROM_DEVICE) {
if (mids->device_instance == 0)
cfg_hi.cfgx.src_per = 2;
if (mids->device_instance == 1)
@@ -552,7 +655,8 @@ static struct dma_async_tx_descriptor *intel_mid_dma_prep_memcpy(
/*calculate CTL_HI*/
ctl_hi.ctlx.reser = 0;
- width = mids->src_width;
+ ctl_hi.ctlx.done = 0;
+ width = mids->dma_slave.src_addr_width;
ctl_hi.ctlx.block_ts = get_block_ts(len, width, midc->dma->block_size);
pr_debug("MDMA:calc len %d for block size %d\n",
@@ -560,21 +664,21 @@ static struct dma_async_tx_descriptor *intel_mid_dma_prep_memcpy(
/*calculate CTL_LO*/
ctl_lo.ctl_lo = 0;
ctl_lo.ctlx.int_en = 1;
- ctl_lo.ctlx.dst_tr_width = mids->dst_width;
- ctl_lo.ctlx.src_tr_width = mids->src_width;
- ctl_lo.ctlx.dst_msize = mids->src_msize;
- ctl_lo.ctlx.src_msize = mids->dst_msize;
+ ctl_lo.ctlx.dst_tr_width = mids->dma_slave.dst_addr_width;
+ ctl_lo.ctlx.src_tr_width = mids->dma_slave.src_addr_width;
+ ctl_lo.ctlx.dst_msize = mids->dma_slave.src_maxburst;
+ ctl_lo.ctlx.src_msize = mids->dma_slave.dst_maxburst;
if (mids->cfg_mode == LNW_DMA_MEM_TO_MEM) {
ctl_lo.ctlx.tt_fc = 0;
ctl_lo.ctlx.sinc = 0;
ctl_lo.ctlx.dinc = 0;
} else {
- if (mids->dirn == DMA_TO_DEVICE) {
+ if (mids->dma_slave.direction == DMA_TO_DEVICE) {
ctl_lo.ctlx.sinc = 0;
ctl_lo.ctlx.dinc = 2;
ctl_lo.ctlx.tt_fc = 1;
- } else if (mids->dirn == DMA_FROM_DEVICE) {
+ } else if (mids->dma_slave.direction == DMA_FROM_DEVICE) {
ctl_lo.ctlx.sinc = 2;
ctl_lo.ctlx.dinc = 0;
ctl_lo.ctlx.tt_fc = 2;
@@ -597,7 +701,10 @@ static struct dma_async_tx_descriptor *intel_mid_dma_prep_memcpy(
desc->ctl_lo = ctl_lo.ctl_lo;
desc->ctl_hi = ctl_hi.ctl_hi;
desc->width = width;
- desc->dirn = mids->dirn;
+ desc->dirn = mids->dma_slave.direction;
+ desc->lli_phys = 0;
+ desc->lli = NULL;
+ desc->lli_pool = NULL;
return &desc->txd;
err_desc_get:
@@ -605,6 +712,85 @@ err_desc_get:
midc_desc_put(midc, desc);
return NULL;
}
+/**
+ * intel_mid_dma_prep_slave_sg - Prep slave sg txn
+ * @chan: chan for DMA transfer
+ * @sgl: scatter gather list
+ * @sg_len: length of sg txn
+ * @direction: DMA transfer dirtn
+ * @flags: DMA flags
+ *
+ * Prepares LLI based periphral transfer
+ */
+static struct dma_async_tx_descriptor *intel_mid_dma_prep_slave_sg(
+ struct dma_chan *chan, struct scatterlist *sgl,
+ unsigned int sg_len, enum dma_data_direction direction,
+ unsigned long flags)
+{
+ struct intel_mid_dma_chan *midc = NULL;
+ struct intel_mid_dma_slave *mids = NULL;
+ struct intel_mid_dma_desc *desc = NULL;
+ struct dma_async_tx_descriptor *txd = NULL;
+ union intel_mid_dma_ctl_lo ctl_lo;
+
+ pr_debug("MDMA: Prep for slave SG\n");
+
+ if (!sg_len) {
+ pr_err("MDMA: Invalid SG length\n");
+ return NULL;
+ }
+ midc = to_intel_mid_dma_chan(chan);
+ BUG_ON(!midc);
+
+ mids = midc->mid_slave;
+ BUG_ON(!mids);
+
+ if (!midc->dma->pimr_mask) {
+ pr_debug("MDMA: SG list is not supported by this controller\n");
+ return NULL;
+ }
+
+ pr_debug("MDMA: SG Length = %d, direction = %d, Flags = %#lx\n",
+ sg_len, direction, flags);
+
+ txd = intel_mid_dma_prep_memcpy(chan, 0, 0, sgl->length, flags);
+ if (NULL == txd) {
+ pr_err("MDMA: Prep memcpy failed\n");
+ return NULL;
+ }
+ desc = to_intel_mid_dma_desc(txd);
+ desc->dirn = direction;
+ ctl_lo.ctl_lo = desc->ctl_lo;
+ ctl_lo.ctlx.llp_dst_en = 1;
+ ctl_lo.ctlx.llp_src_en = 1;
+ desc->ctl_lo = ctl_lo.ctl_lo;
+ desc->lli_length = sg_len;
+ desc->current_lli = 0;
+ /* DMA coherent memory pool for LLI descriptors*/
+ desc->lli_pool = pci_pool_create("intel_mid_dma_lli_pool",
+ midc->dma->pdev,
+ (sizeof(struct intel_mid_dma_lli)*sg_len),
+ 32, 0);
+ if (NULL == desc->lli_pool) {
+ pr_err("MID_DMA:LLI pool create failed\n");
+ return NULL;
+ }
+
+ desc->lli = pci_pool_alloc(desc->lli_pool, GFP_KERNEL, &desc->lli_phys);
+ if (!desc->lli) {
+ pr_err("MID_DMA: LLI alloc failed\n");
+ pci_pool_destroy(desc->lli_pool);
+ return NULL;
+ }
+
+ midc_lli_fill_sg(midc, desc, sgl, sg_len, flags);
+ if (flags & DMA_PREP_INTERRUPT) {
+ iowrite32(UNMASK_INTR_REG(midc->ch_id),
+ midc->dma_base + MASK_BLOCK);
+ pr_debug("MDMA:Enabled Block interrupt\n");
+ }
+ return &desc->txd;
+}
/**
* intel_mid_dma_free_chan_resources - Frees dma resources
@@ -618,11 +804,11 @@ static void intel_mid_dma_free_chan_resources(struct dma_chan *chan)
struct middma_device *mid = to_middma_device(chan->device);
struct intel_mid_dma_desc *desc, *_desc;
- if (true == midc->in_use) {
+ if (true == midc->busy) {
/*trying to free ch in use!!!!!*/
pr_err("ERR_MDMA: trying to free ch in use\n");
}
-
+ pm_runtime_put(&mid->pdev->dev);
spin_lock_bh(&midc->lock);
midc->descs_allocated = 0;
list_for_each_entry_safe(desc, _desc, &midc->active_list, desc_node) {
@@ -639,6 +825,7 @@ static void intel_mid_dma_free_chan_resources(struct dma_chan *chan)
}
spin_unlock_bh(&midc->lock);
midc->in_use = false;
+ midc->busy = false;
/* Disable CH interrupts */
iowrite32(MASK_INTR_REG(midc->ch_id), mid->dma_base + MASK_BLOCK);
iowrite32(MASK_INTR_REG(midc->ch_id), mid->dma_base + MASK_ERR);
@@ -659,11 +846,20 @@ static int intel_mid_dma_alloc_chan_resources(struct dma_chan *chan)
dma_addr_t phys;
int i = 0;
+ pm_runtime_get_sync(&mid->pdev->dev);
+
+ if (mid->state == SUSPENDED) {
+ if (dma_resume(mid->pdev)) {
+ pr_err("ERR_MDMA: resume failed");
+ return -EFAULT;
+ }
+ }
/* ASSERT: channel is idle */
if (test_ch_en(mid->dma_base, midc->ch_id)) {
/*ch is not idle*/
pr_err("ERR_MDMA: ch not idle\n");
+ pm_runtime_put(&mid->pdev->dev);
return -EIO;
}
midc->completed = chan->cookie = 1;
@@ -674,6 +870,7 @@ static int intel_mid_dma_alloc_chan_resources(struct dma_chan *chan)
desc = pci_pool_alloc(mid->dma_pool, GFP_KERNEL, &phys);
if (!desc) {
pr_err("ERR_MDMA: desc failed\n");
+ pm_runtime_put(&mid->pdev->dev);
return -ENOMEM;
/*check*/
}
@@ -686,7 +883,8 @@ static int intel_mid_dma_alloc_chan_resources(struct dma_chan *chan)
list_add_tail(&desc->desc_node, &midc->free_list);
}
spin_unlock_bh(&midc->lock);
- midc->in_use = false;
+ midc->in_use = true;
+ midc->busy = false;
pr_debug("MID_DMA: Desc alloc done ret: %d desc\n", i);
return i;
}
@@ -715,7 +913,7 @@ static void dma_tasklet(unsigned long data)
{
struct middma_device *mid = NULL;
struct intel_mid_dma_chan *midc = NULL;
- u32 status;
+ u32 status, raw_tfr, raw_block;
int i;
mid = (struct middma_device *)data;
@@ -724,8 +922,9 @@ static void dma_tasklet(unsigned long data)
return;
}
pr_debug("MDMA: in tasklet for device %x\n", mid->pci_id);
- status = ioread32(mid->dma_base + RAW_TFR);
- pr_debug("MDMA:RAW_TFR %x\n", status);
+ raw_tfr = ioread32(mid->dma_base + RAW_TFR);
+ raw_block = ioread32(mid->dma_base + RAW_BLOCK);
+ status = raw_tfr | raw_block;
status &= mid->intr_mask;
while (status) {
/*txn interrupt*/
@@ -741,15 +940,23 @@ static void dma_tasklet(unsigned long data)
}
pr_debug("MDMA:Tx complete interrupt %x, Ch No %d Index %d\n",
status, midc->ch_id, i);
+ midc->raw_tfr = raw_tfr;
+ midc->raw_block = raw_block;
+ spin_lock_bh(&midc->lock);
/*clearing this interrupts first*/
iowrite32((1 << midc->ch_id), mid->dma_base + CLEAR_TFR);
- iowrite32((1 << midc->ch_id), mid->dma_base + CLEAR_BLOCK);
-
- spin_lock_bh(&midc->lock);
+ if (raw_block) {
+ iowrite32((1 << midc->ch_id),
+ mid->dma_base + CLEAR_BLOCK);
+ }
midc_scan_descriptors(mid, midc);
pr_debug("MDMA:Scan of desc... complete, unmasking\n");
iowrite32(UNMASK_INTR_REG(midc->ch_id),
mid->dma_base + MASK_TFR);
+ if (raw_block) {
+ iowrite32(UNMASK_INTR_REG(midc->ch_id),
+ mid->dma_base + MASK_BLOCK);
+ }
spin_unlock_bh(&midc->lock);
}
@@ -804,9 +1011,14 @@ static void dma_tasklet2(unsigned long data)
static irqreturn_t intel_mid_dma_interrupt(int irq, void *data)
{
struct middma_device *mid = data;
- u32 status;
+ u32 tfr_status, err_status;
int call_tasklet = 0;
+ tfr_status = ioread32(mid->dma_base + RAW_TFR);
+ err_status = ioread32(mid->dma_base + RAW_ERR);
+ if (!tfr_status && !err_status)
+ return IRQ_NONE;
+
/*DMA Interrupt*/
pr_debug("MDMA:Got an interrupt on irq %d\n", irq);
if (!mid) {
@@ -814,19 +1026,18 @@ static irqreturn_t intel_mid_dma_interrupt(int irq, void *data)
return -EINVAL;
}
- status = ioread32(mid->dma_base + RAW_TFR);
- pr_debug("MDMA: Status %x, Mask %x\n", status, mid->intr_mask);
- status &= mid->intr_mask;
- if (status) {
+ pr_debug("MDMA: Status %x, Mask %x\n", tfr_status, mid->intr_mask);
+ tfr_status &= mid->intr_mask;
+ if (tfr_status) {
/*need to disable intr*/
- iowrite32((status << 8), mid->dma_base + MASK_TFR);
- pr_debug("MDMA: Calling tasklet %x\n", status);
+ iowrite32((tfr_status << INT_MASK_WE), mid->dma_base + MASK_TFR);
+ iowrite32((tfr_status << INT_MASK_WE), mid->dma_base + MASK_BLOCK);
+ pr_debug("MDMA: Calling tasklet %x\n", tfr_status);
call_tasklet = 1;
}
- status = ioread32(mid->dma_base + RAW_ERR);
- status &= mid->intr_mask;
- if (status) {
- iowrite32(MASK_INTR_REG(status), mid->dma_base + MASK_ERR);
+ err_status &= mid->intr_mask;
+ if (err_status) {
+ iowrite32(MASK_INTR_REG(err_status), mid->dma_base + MASK_ERR);
call_tasklet = 1;
}
if (call_tasklet)
@@ -856,7 +1067,6 @@ static int mid_setup_dma(struct pci_dev *pdev)
{
struct middma_device *dma = pci_get_drvdata(pdev);
int err, i;
- unsigned int irq_level;
/* DMA coherent memory pool for DMA descriptor allocations */
dma->dma_pool = pci_pool_create("intel_mid_dma_desc_pool", pdev,
@@ -884,6 +1094,7 @@ static int mid_setup_dma(struct pci_dev *pdev)
pr_debug("MDMA:Adding %d channel for this controller\n", dma->max_chan);
/*init CH structures*/
dma->intr_mask = 0;
+ dma->state = RUNNING;
for (i = 0; i < dma->max_chan; i++) {
struct intel_mid_dma_chan *midch = &dma->ch[i];
@@ -943,7 +1154,6 @@ static int mid_setup_dma(struct pci_dev *pdev)
/*register irq */
if (dma->pimr_mask) {
- irq_level = IRQF_SHARED;
pr_debug("MDMA:Requesting irq shared for DMAC1\n");
err = request_irq(pdev->irq, intel_mid_dma_interrupt1,
IRQF_SHARED, "INTEL_MID_DMAC1", dma);
@@ -951,10 +1161,9 @@ static int mid_setup_dma(struct pci_dev *pdev)
goto err_irq;
} else {
dma->intr_mask = 0x03;
- irq_level = 0;
pr_debug("MDMA:Requesting irq for DMAC2\n");
err = request_irq(pdev->irq, intel_mid_dma_interrupt2,
- 0, "INTEL_MID_DMAC2", dma);
+ IRQF_SHARED, "INTEL_MID_DMAC2", dma);
if (0 != err)
goto err_irq;
}
@@ -1070,6 +1279,9 @@ static int __devinit intel_mid_dma_probe(struct pci_dev *pdev,
if (err)
goto err_dma;
+ pm_runtime_set_active(&pdev->dev);
+ pm_runtime_enable(&pdev->dev);
+ pm_runtime_allow(&pdev->dev);
return 0;
err_dma:
@@ -1104,6 +1316,85 @@ static void __devexit intel_mid_dma_remove(struct pci_dev *pdev)
pci_disable_device(pdev);
}
+/* Power Management */
+/*
+* dma_suspend - PCI suspend function
+*
+* @pci: PCI device structure
+* @state: PM message
+*
+* This function is called by OS when a power event occurs
+*/
+int dma_suspend(struct pci_dev *pci, pm_message_t state)
+{
+ int i;
+ struct middma_device *device = pci_get_drvdata(pci);
+ pr_debug("MDMA: dma_suspend called\n");
+
+ for (i = 0; i < device->max_chan; i++) {
+ if (device->ch[i].in_use)
+ return -EAGAIN;
+ }
+ device->state = SUSPENDED;
+ pci_set_drvdata(pci, device);
+ pci_save_state(pci);
+ pci_disable_device(pci);
+ pci_set_power_state(pci, PCI_D3hot);
+ return 0;
+}
+
+/**
+* dma_resume - PCI resume function
+*
+* @pci: PCI device structure
+*
+* This function is called by OS when a power event occurs
+*/
+int dma_resume(struct pci_dev *pci)
+{
+ int ret;
+ struct middma_device *device = pci_get_drvdata(pci);
+
+ pr_debug("MDMA: dma_resume called\n");
+ pci_set_power_state(pci, PCI_D0);
+ pci_restore_state(pci);
+ ret = pci_enable_device(pci);
+ if (ret) {
+ pr_err("MDMA: device cant be enabled for %x\n", pci->device);
+ return ret;
+ }
+ device->state = RUNNING;
+ iowrite32(REG_BIT0, device->dma_base + DMA_CFG);
+ pci_set_drvdata(pci, device);
+ return 0;
+}
+
+static int dma_runtime_suspend(struct device *dev)
+{
+ struct pci_dev *pci_dev = to_pci_dev(dev);
+ return dma_suspend(pci_dev, PMSG_SUSPEND);
+}
+
+static int dma_runtime_resume(struct device *dev)
+{
+ struct pci_dev *pci_dev = to_pci_dev(dev);
+ return dma_resume(pci_dev);
+}
+
+static int dma_runtime_idle(struct device *dev)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct middma_device *device = pci_get_drvdata(pdev);
+ int i;
+
+ for (i = 0; i < device->max_chan; i++) {
+ if (device->ch[i].in_use)
+ return -EAGAIN;
+ }
+
+ return pm_schedule_suspend(dev, 0);
+}
+
/******************************************************************************
* PCI stuff
*/
@@ -1116,11 +1407,24 @@ static struct pci_device_id intel_mid_dma_ids[] = {
};
MODULE_DEVICE_TABLE(pci, intel_mid_dma_ids);
+static const struct dev_pm_ops intel_mid_dma_pm = {
+ .runtime_suspend = dma_runtime_suspend,
+ .runtime_resume = dma_runtime_resume,
+ .runtime_idle = dma_runtime_idle,
+};
+
static struct pci_driver intel_mid_dma_pci = {
.name = "Intel MID DMA",
.id_table = intel_mid_dma_ids,
.probe = intel_mid_dma_probe,
.remove = __devexit_p(intel_mid_dma_remove),
+#ifdef CONFIG_PM
+ .suspend = dma_suspend,
+ .resume = dma_resume,
+ .driver = {
+ .pm = &intel_mid_dma_pm,
+ },
+#endif
};
static int __init intel_mid_dma_init(void)
diff --git a/drivers/dma/intel_mid_dma_regs.h b/drivers/dma/intel_mid_dma_regs.h
index d81aa658ab09..709fecbdde79 100644
--- a/drivers/dma/intel_mid_dma_regs.h
+++ b/drivers/dma/intel_mid_dma_regs.h
@@ -29,11 +29,12 @@
#include <linux/dmapool.h>
#include <linux/pci_ids.h>
-#define INTEL_MID_DMA_DRIVER_VERSION "1.0.5"
+#define INTEL_MID_DMA_DRIVER_VERSION "1.1.0"
#define REG_BIT0 0x00000001
#define REG_BIT8 0x00000100
-
+#define INT_MASK_WE 0x8
+#define CLEAR_DONE 0xFFFFEFFF
#define UNMASK_INTR_REG(chan_num) \
((REG_BIT0 << chan_num) | (REG_BIT8 << chan_num))
#define MASK_INTR_REG(chan_num) (REG_BIT8 << chan_num)
@@ -41,6 +42,9 @@
#define ENABLE_CHANNEL(chan_num) \
((REG_BIT0 << chan_num) | (REG_BIT8 << chan_num))
+#define DISABLE_CHANNEL(chan_num) \
+ (REG_BIT8 << chan_num)
+
#define DESCS_PER_CHANNEL 16
/*DMA Registers*/
/*registers associated with channel programming*/
@@ -50,6 +54,7 @@
/*CH X REG = (DMA_CH_SIZE)*CH_NO + REG*/
#define SAR 0x00 /* Source Address Register*/
#define DAR 0x08 /* Destination Address Register*/
+#define LLP 0x10 /* Linked List Pointer Register*/
#define CTL_LOW 0x18 /* Control Register*/
#define CTL_HIGH 0x1C /* Control Register*/
#define CFG_LOW 0x40 /* Configuration Register Low*/
@@ -112,8 +117,8 @@ union intel_mid_dma_ctl_lo {
union intel_mid_dma_ctl_hi {
struct {
u32 block_ts:12; /*block transfer size*/
- /*configured by DMAC*/
- u32 reser:20;
+ u32 done:1; /*Done - updated by DMAC*/
+ u32 reser:19; /*configured by DMAC*/
} ctlx;
u32 ctl_hi;
@@ -152,6 +157,7 @@ union intel_mid_dma_cfg_hi {
u32 cfg_hi;
};
+
/**
* struct intel_mid_dma_chan - internal mid representation of a DMA channel
* @chan: dma_chan strcture represetation for mid chan
@@ -166,7 +172,10 @@ union intel_mid_dma_cfg_hi {
* @slave: dma slave struture
* @descs_allocated: total number of decsiptors allocated
* @dma: dma device struture pointer
+ * @busy: bool representing if ch is busy (active txn) or not
* @in_use: bool representing if ch is in use or not
+ * @raw_tfr: raw trf interrupt recieved
+ * @raw_block: raw block interrupt recieved
*/
struct intel_mid_dma_chan {
struct dma_chan chan;
@@ -178,10 +187,13 @@ struct intel_mid_dma_chan {
struct list_head active_list;
struct list_head queue;
struct list_head free_list;
- struct intel_mid_dma_slave *slave;
unsigned int descs_allocated;
struct middma_device *dma;
+ bool busy;
bool in_use;
+ u32 raw_tfr;
+ u32 raw_block;
+ struct intel_mid_dma_slave *mid_slave;
};
static inline struct intel_mid_dma_chan *to_intel_mid_dma_chan(
@@ -190,6 +202,10 @@ static inline struct intel_mid_dma_chan *to_intel_mid_dma_chan(
return container_of(chan, struct intel_mid_dma_chan, chan);
}
+enum intel_mid_dma_state {
+ RUNNING = 0,
+ SUSPENDED,
+};
/**
* struct middma_device - internal representation of a DMA device
* @pdev: PCI device
@@ -205,6 +221,7 @@ static inline struct intel_mid_dma_chan *to_intel_mid_dma_chan(
* @max_chan: max number of chs supported (from drv_data)
* @block_size: Block size of DMA transfer supported (from drv_data)
* @pimr_mask: MMIO register addr for periphral interrupt (from drv_data)
+ * @state: dma PM device state
*/
struct middma_device {
struct pci_dev *pdev;
@@ -220,6 +237,7 @@ struct middma_device {
int max_chan;
int block_size;
unsigned int pimr_mask;
+ enum intel_mid_dma_state state;
};
static inline struct middma_device *to_middma_device(struct dma_device *common)
@@ -238,14 +256,27 @@ struct intel_mid_dma_desc {
u32 cfg_lo;
u32 ctl_lo;
u32 ctl_hi;
+ struct pci_pool *lli_pool;
+ struct intel_mid_dma_lli *lli;
+ dma_addr_t lli_phys;
+ unsigned int lli_length;
+ unsigned int current_lli;
dma_addr_t next;
enum dma_data_direction dirn;
enum dma_status status;
- enum intel_mid_dma_width width; /*width of DMA txn*/
+ enum dma_slave_buswidth width; /*width of DMA txn*/
enum intel_mid_dma_mode cfg_mode; /*mode configuration*/
};
+struct intel_mid_dma_lli {
+ dma_addr_t sar;
+ dma_addr_t dar;
+ dma_addr_t llp;
+ u32 ctl_lo;
+ u32 ctl_hi;
+} __attribute__ ((packed));
+
static inline int test_ch_en(void __iomem *dma, u32 ch_no)
{
u32 en_reg = ioread32(dma + DMA_CHAN_EN);
@@ -257,4 +288,14 @@ static inline struct intel_mid_dma_desc *to_intel_mid_dma_desc
{
return container_of(txd, struct intel_mid_dma_desc, txd);
}
+
+static inline struct intel_mid_dma_slave *to_intel_mid_dma_slave
+ (struct dma_slave_config *slave)
+{
+ return container_of(slave, struct intel_mid_dma_slave, dma_slave);
+}
+
+
+int dma_resume(struct pci_dev *pci);
+
#endif /*__INTEL_MID_DMAC_REGS_H__*/
diff --git a/drivers/dma/pch_dma.c b/drivers/dma/pch_dma.c
index 3533948b88ba..92b679024fed 100644
--- a/drivers/dma/pch_dma.c
+++ b/drivers/dma/pch_dma.c
@@ -926,6 +926,7 @@ static void __devexit pch_dma_remove(struct pci_dev *pdev)
static const struct pci_device_id pch_dma_id_table[] = {
{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_PCH_DMA_8CH), 8 },
{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_PCH_DMA_4CH), 4 },
+ { 0, },
};
static struct pci_driver pch_dma_driver = {
diff --git a/drivers/dma/shdma.c b/drivers/dma/shdma.c
index eb6b54dbb806..85ffd5e38c50 100644
--- a/drivers/dma/shdma.c
+++ b/drivers/dma/shdma.c
@@ -1213,3 +1213,4 @@ module_exit(sh_dmae_exit);
MODULE_AUTHOR("Nobuhiro Iwamatsu <iwamatsu.nobuhiro@renesas.com>");
MODULE_DESCRIPTION("Renesas SH DMA Engine driver");
MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:sh-dma-engine");
diff --git a/drivers/dma/ste_dma40.c b/drivers/dma/ste_dma40.c
index 17e2600a00cf..fab68a553205 100644
--- a/drivers/dma/ste_dma40.c
+++ b/drivers/dma/ste_dma40.c
@@ -1,11 +1,8 @@
/*
- * driver/dma/ste_dma40.c
- *
- * Copyright (C) ST-Ericsson 2007-2010
+ * Copyright (C) ST-Ericsson SA 2007-2010
+ * Author: Per Forlin <per.forlin@stericsson.com> for ST-Ericsson
+ * Author: Jonas Aaberg <jonas.aberg@stericsson.com> for ST-Ericsson
* License terms: GNU General Public License (GPL) version 2
- * Author: Per Friden <per.friden@stericsson.com>
- * Author: Jonas Aaberg <jonas.aberg@stericsson.com>
- *
*/
#include <linux/kernel.h>
@@ -14,6 +11,7 @@
#include <linux/platform_device.h>
#include <linux/clk.h>
#include <linux/delay.h>
+#include <linux/err.h>
#include <plat/ste_dma40.h>
@@ -32,6 +30,11 @@
/* Hardware requirement on LCLA alignment */
#define LCLA_ALIGNMENT 0x40000
+
+/* Max number of links per event group */
+#define D40_LCLA_LINK_PER_EVENT_GRP 128
+#define D40_LCLA_END D40_LCLA_LINK_PER_EVENT_GRP
+
/* Attempts before giving up to trying to get pages that are aligned */
#define MAX_LCLA_ALLOC_ATTEMPTS 256
@@ -41,7 +44,7 @@
#define D40_ALLOC_LOG_FREE 0
/* Hardware designer of the block */
-#define D40_PERIPHID2_DESIGNER 0x8
+#define D40_HW_DESIGNER 0x8
/**
* enum 40_command - The different commands and/or statuses.
@@ -84,18 +87,17 @@ struct d40_lli_pool {
* @lli_log: Same as above but for logical channels.
* @lli_pool: The pool with two entries pre-allocated.
* @lli_len: Number of llis of current descriptor.
- * @lli_count: Number of transfered llis.
- * @lli_tx_len: Max number of LLIs per transfer, there can be
- * many transfer for one descriptor.
+ * @lli_current: Number of transfered llis.
+ * @lcla_alloc: Number of LCLA entries allocated.
* @txd: DMA engine struct. Used for among other things for communication
* during a transfer.
* @node: List entry.
- * @dir: The transfer direction of this job.
* @is_in_client_list: true if the client owns this descriptor.
+ * @is_hw_linked: true if this job will automatically be continued for
+ * the previous one.
*
* This descriptor is used for both logical and physical transfers.
*/
-
struct d40_desc {
/* LLI physical */
struct d40_phy_lli_bidir lli_phy;
@@ -104,14 +106,14 @@ struct d40_desc {
struct d40_lli_pool lli_pool;
int lli_len;
- int lli_count;
- u32 lli_tx_len;
+ int lli_current;
+ int lcla_alloc;
struct dma_async_tx_descriptor txd;
struct list_head node;
- enum dma_data_direction dir;
bool is_in_client_list;
+ bool is_hw_linked;
};
/**
@@ -123,17 +125,14 @@ struct d40_desc {
* @pages: The number of pages needed for all physical channels.
* Only used later for clean-up on error
* @lock: Lock to protect the content in this struct.
- * @alloc_map: Bitmap mapping between physical channel and LCLA entries.
- * @num_blocks: The number of entries of alloc_map. Equals to the
- * number of physical channels.
+ * @alloc_map: big map over which LCLA entry is own by which job.
*/
struct d40_lcla_pool {
void *base;
void *base_unaligned;
int pages;
spinlock_t lock;
- u32 *alloc_map;
- int num_blocks;
+ struct d40_desc **alloc_map;
};
/**
@@ -146,9 +145,7 @@ struct d40_lcla_pool {
* this physical channel. Can also be free or physically allocated.
* @allocated_dst: Same as for src but is dst.
* allocated_dst and allocated_src uses the D40_ALLOC* defines as well as
- * event line number. Both allocated_src and allocated_dst can not be
- * allocated to a physical channel, since the interrupt handler has then
- * no way of figure out which one the interrupt belongs to.
+ * event line number.
*/
struct d40_phy_res {
spinlock_t lock;
@@ -178,6 +175,7 @@ struct d40_base;
* @active: Active descriptor.
* @queue: Queued jobs.
* @dma_cfg: The client configuration of this dma channel.
+ * @configured: whether the dma_cfg configuration is valid
* @base: Pointer to the device instance struct.
* @src_def_cfg: Default cfg register setting for src.
* @dst_def_cfg: Default cfg register setting for dst.
@@ -201,12 +199,12 @@ struct d40_chan {
struct list_head active;
struct list_head queue;
struct stedma40_chan_cfg dma_cfg;
+ bool configured;
struct d40_base *base;
/* Default register configurations */
u32 src_def_cfg;
u32 dst_def_cfg;
struct d40_def_lcsp log_def;
- struct d40_lcla_elem lcla;
struct d40_log_lli_full *lcpa;
/* Runtime reconfiguration */
dma_addr_t runtime_addr;
@@ -234,7 +232,6 @@ struct d40_chan {
* @dma_both: dma_device channels that can do both memcpy and slave transfers.
* @dma_slave: dma_device channels that can do only do slave transfers.
* @dma_memcpy: dma_device channels that can do only do memcpy transfers.
- * @phy_chans: Room for all possible physical channels in system.
* @log_chans: Room for all possible logical channels in system.
* @lookup_log_chans: Used to map interrupt number to logical channel. Points
* to log_chans entries.
@@ -340,9 +337,6 @@ static int d40_pool_lli_alloc(struct d40_desc *d40d,
align);
d40d->lli_phy.dst = PTR_ALIGN(d40d->lli_phy.src + lli_len,
align);
-
- d40d->lli_phy.src_addr = virt_to_phys(d40d->lli_phy.src);
- d40d->lli_phy.dst_addr = virt_to_phys(d40d->lli_phy.dst);
}
return 0;
@@ -357,22 +351,67 @@ static void d40_pool_lli_free(struct d40_desc *d40d)
d40d->lli_log.dst = NULL;
d40d->lli_phy.src = NULL;
d40d->lli_phy.dst = NULL;
- d40d->lli_phy.src_addr = 0;
- d40d->lli_phy.dst_addr = 0;
}
-static dma_cookie_t d40_assign_cookie(struct d40_chan *d40c,
- struct d40_desc *desc)
+static int d40_lcla_alloc_one(struct d40_chan *d40c,
+ struct d40_desc *d40d)
{
- dma_cookie_t cookie = d40c->chan.cookie;
+ unsigned long flags;
+ int i;
+ int ret = -EINVAL;
+ int p;
- if (++cookie < 0)
- cookie = 1;
+ spin_lock_irqsave(&d40c->base->lcla_pool.lock, flags);
+
+ p = d40c->phy_chan->num * D40_LCLA_LINK_PER_EVENT_GRP;
- d40c->chan.cookie = cookie;
- desc->txd.cookie = cookie;
+ /*
+ * Allocate both src and dst at the same time, therefore the half
+ * start on 1 since 0 can't be used since zero is used as end marker.
+ */
+ for (i = 1 ; i < D40_LCLA_LINK_PER_EVENT_GRP / 2; i++) {
+ if (!d40c->base->lcla_pool.alloc_map[p + i]) {
+ d40c->base->lcla_pool.alloc_map[p + i] = d40d;
+ d40d->lcla_alloc++;
+ ret = i;
+ break;
+ }
+ }
+
+ spin_unlock_irqrestore(&d40c->base->lcla_pool.lock, flags);
+
+ return ret;
+}
+
+static int d40_lcla_free_all(struct d40_chan *d40c,
+ struct d40_desc *d40d)
+{
+ unsigned long flags;
+ int i;
+ int ret = -EINVAL;
+
+ if (d40c->log_num == D40_PHY_CHAN)
+ return 0;
+
+ spin_lock_irqsave(&d40c->base->lcla_pool.lock, flags);
+
+ for (i = 1 ; i < D40_LCLA_LINK_PER_EVENT_GRP / 2; i++) {
+ if (d40c->base->lcla_pool.alloc_map[d40c->phy_chan->num *
+ D40_LCLA_LINK_PER_EVENT_GRP + i] == d40d) {
+ d40c->base->lcla_pool.alloc_map[d40c->phy_chan->num *
+ D40_LCLA_LINK_PER_EVENT_GRP + i] = NULL;
+ d40d->lcla_alloc--;
+ if (d40d->lcla_alloc == 0) {
+ ret = 0;
+ break;
+ }
+ }
+ }
+
+ spin_unlock_irqrestore(&d40c->base->lcla_pool.lock, flags);
+
+ return ret;
- return cookie;
}
static void d40_desc_remove(struct d40_desc *d40d)
@@ -382,28 +421,35 @@ static void d40_desc_remove(struct d40_desc *d40d)
static struct d40_desc *d40_desc_get(struct d40_chan *d40c)
{
- struct d40_desc *d;
- struct d40_desc *_d;
+ struct d40_desc *desc = NULL;
if (!list_empty(&d40c->client)) {
+ struct d40_desc *d;
+ struct d40_desc *_d;
+
list_for_each_entry_safe(d, _d, &d40c->client, node)
if (async_tx_test_ack(&d->txd)) {
d40_pool_lli_free(d);
d40_desc_remove(d);
+ desc = d;
+ memset(desc, 0, sizeof(*desc));
break;
}
- } else {
- d = kmem_cache_alloc(d40c->base->desc_slab, GFP_NOWAIT);
- if (d != NULL) {
- memset(d, 0, sizeof(struct d40_desc));
- INIT_LIST_HEAD(&d->node);
- }
}
- return d;
+
+ if (!desc)
+ desc = kmem_cache_zalloc(d40c->base->desc_slab, GFP_NOWAIT);
+
+ if (desc)
+ INIT_LIST_HEAD(&desc->node);
+
+ return desc;
}
static void d40_desc_free(struct d40_chan *d40c, struct d40_desc *d40d)
{
+
+ d40_lcla_free_all(d40c, d40d);
kmem_cache_free(d40c->base->desc_slab, d40d);
}
@@ -412,6 +458,59 @@ static void d40_desc_submit(struct d40_chan *d40c, struct d40_desc *desc)
list_add_tail(&desc->node, &d40c->active);
}
+static void d40_desc_load(struct d40_chan *d40c, struct d40_desc *d40d)
+{
+ int curr_lcla = -EINVAL, next_lcla;
+
+ if (d40c->log_num == D40_PHY_CHAN) {
+ d40_phy_lli_write(d40c->base->virtbase,
+ d40c->phy_chan->num,
+ d40d->lli_phy.dst,
+ d40d->lli_phy.src);
+ d40d->lli_current = d40d->lli_len;
+ } else {
+
+ if ((d40d->lli_len - d40d->lli_current) > 1)
+ curr_lcla = d40_lcla_alloc_one(d40c, d40d);
+
+ d40_log_lli_lcpa_write(d40c->lcpa,
+ &d40d->lli_log.dst[d40d->lli_current],
+ &d40d->lli_log.src[d40d->lli_current],
+ curr_lcla);
+
+ d40d->lli_current++;
+ for (; d40d->lli_current < d40d->lli_len; d40d->lli_current++) {
+ struct d40_log_lli *lcla;
+
+ if (d40d->lli_current + 1 < d40d->lli_len)
+ next_lcla = d40_lcla_alloc_one(d40c, d40d);
+ else
+ next_lcla = -EINVAL;
+
+ lcla = d40c->base->lcla_pool.base +
+ d40c->phy_chan->num * 1024 +
+ 8 * curr_lcla * 2;
+
+ d40_log_lli_lcla_write(lcla,
+ &d40d->lli_log.dst[d40d->lli_current],
+ &d40d->lli_log.src[d40d->lli_current],
+ next_lcla);
+
+ (void) dma_map_single(d40c->base->dev, lcla,
+ 2 * sizeof(struct d40_log_lli),
+ DMA_TO_DEVICE);
+
+ curr_lcla = next_lcla;
+
+ if (curr_lcla == -EINVAL) {
+ d40d->lli_current++;
+ break;
+ }
+
+ }
+ }
+}
+
static struct d40_desc *d40_first_active_get(struct d40_chan *d40c)
{
struct d40_desc *d;
@@ -443,68 +542,26 @@ static struct d40_desc *d40_first_queued(struct d40_chan *d40c)
return d;
}
-/* Support functions for logical channels */
-
-static int d40_lcla_id_get(struct d40_chan *d40c)
+static struct d40_desc *d40_last_queued(struct d40_chan *d40c)
{
- int src_id = 0;
- int dst_id = 0;
- struct d40_log_lli *lcla_lidx_base =
- d40c->base->lcla_pool.base + d40c->phy_chan->num * 1024;
- int i;
- int lli_per_log = d40c->base->plat_data->llis_per_log;
- unsigned long flags;
-
- if (d40c->lcla.src_id >= 0 && d40c->lcla.dst_id >= 0)
- return 0;
-
- if (d40c->base->lcla_pool.num_blocks > 32)
- return -EINVAL;
-
- spin_lock_irqsave(&d40c->base->lcla_pool.lock, flags);
-
- for (i = 0; i < d40c->base->lcla_pool.num_blocks; i++) {
- if (!(d40c->base->lcla_pool.alloc_map[d40c->phy_chan->num] &
- (0x1 << i))) {
- d40c->base->lcla_pool.alloc_map[d40c->phy_chan->num] |=
- (0x1 << i);
- break;
- }
- }
- src_id = i;
- if (src_id >= d40c->base->lcla_pool.num_blocks)
- goto err;
+ struct d40_desc *d;
- for (; i < d40c->base->lcla_pool.num_blocks; i++) {
- if (!(d40c->base->lcla_pool.alloc_map[d40c->phy_chan->num] &
- (0x1 << i))) {
- d40c->base->lcla_pool.alloc_map[d40c->phy_chan->num] |=
- (0x1 << i);
+ if (list_empty(&d40c->queue))
+ return NULL;
+ list_for_each_entry(d, &d40c->queue, node)
+ if (list_is_last(&d->node, &d40c->queue))
break;
- }
- }
-
- dst_id = i;
- if (dst_id == src_id)
- goto err;
-
- d40c->lcla.src_id = src_id;
- d40c->lcla.dst_id = dst_id;
- d40c->lcla.dst = lcla_lidx_base + dst_id * lli_per_log + 1;
- d40c->lcla.src = lcla_lidx_base + src_id * lli_per_log + 1;
-
- spin_unlock_irqrestore(&d40c->base->lcla_pool.lock, flags);
- return 0;
-err:
- spin_unlock_irqrestore(&d40c->base->lcla_pool.lock, flags);
- return -EINVAL;
+ return d;
}
+/* Support functions for logical channels */
+
static int d40_channel_execute_command(struct d40_chan *d40c,
enum d40_command command)
{
- int status, i;
+ u32 status;
+ int i;
void __iomem *active_reg;
int ret = 0;
unsigned long flags;
@@ -567,35 +624,19 @@ done:
static void d40_term_all(struct d40_chan *d40c)
{
struct d40_desc *d40d;
- unsigned long flags;
/* Release active descriptors */
while ((d40d = d40_first_active_get(d40c))) {
d40_desc_remove(d40d);
-
- /* Return desc to free-list */
d40_desc_free(d40c, d40d);
}
/* Release queued descriptors waiting for transfer */
while ((d40d = d40_first_queued(d40c))) {
d40_desc_remove(d40d);
-
- /* Return desc to free-list */
d40_desc_free(d40c, d40d);
}
- spin_lock_irqsave(&d40c->base->lcla_pool.lock, flags);
-
- d40c->base->lcla_pool.alloc_map[d40c->phy_chan->num] &=
- (~(0x1 << d40c->lcla.dst_id));
- d40c->base->lcla_pool.alloc_map[d40c->phy_chan->num] &=
- (~(0x1 << d40c->lcla.src_id));
-
- d40c->lcla.src_id = -1;
- d40c->lcla.dst_id = -1;
-
- spin_unlock_irqrestore(&d40c->base->lcla_pool.lock, flags);
d40c->pending_tx = 0;
d40c->busy = false;
@@ -640,45 +681,47 @@ static void d40_config_set_event(struct d40_chan *d40c, bool do_enable)
static u32 d40_chan_has_events(struct d40_chan *d40c)
{
- u32 val = 0;
+ u32 val;
- /* If SSLNK or SDLNK is zero all events are disabled */
- if ((d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM) ||
- (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_PERIPH))
- val = readl(d40c->base->virtbase + D40_DREG_PCBASE +
- d40c->phy_chan->num * D40_DREG_PCDELTA +
- D40_CHAN_REG_SSLNK);
-
- if (d40c->dma_cfg.dir != STEDMA40_PERIPH_TO_MEM)
- val = readl(d40c->base->virtbase + D40_DREG_PCBASE +
- d40c->phy_chan->num * D40_DREG_PCDELTA +
- D40_CHAN_REG_SDLNK);
+ val = readl(d40c->base->virtbase + D40_DREG_PCBASE +
+ d40c->phy_chan->num * D40_DREG_PCDELTA +
+ D40_CHAN_REG_SSLNK);
+
+ val |= readl(d40c->base->virtbase + D40_DREG_PCBASE +
+ d40c->phy_chan->num * D40_DREG_PCDELTA +
+ D40_CHAN_REG_SDLNK);
return val;
}
-static void d40_config_enable_lidx(struct d40_chan *d40c)
+static u32 d40_get_prmo(struct d40_chan *d40c)
{
- /* Set LIDX for lcla */
- writel((d40c->phy_chan->num << D40_SREG_ELEM_LOG_LIDX_POS) &
- D40_SREG_ELEM_LOG_LIDX_MASK,
- d40c->base->virtbase + D40_DREG_PCBASE +
- d40c->phy_chan->num * D40_DREG_PCDELTA + D40_CHAN_REG_SDELT);
-
- writel((d40c->phy_chan->num << D40_SREG_ELEM_LOG_LIDX_POS) &
- D40_SREG_ELEM_LOG_LIDX_MASK,
- d40c->base->virtbase + D40_DREG_PCBASE +
- d40c->phy_chan->num * D40_DREG_PCDELTA + D40_CHAN_REG_SSELT);
+ static const unsigned int phy_map[] = {
+ [STEDMA40_PCHAN_BASIC_MODE]
+ = D40_DREG_PRMO_PCHAN_BASIC,
+ [STEDMA40_PCHAN_MODULO_MODE]
+ = D40_DREG_PRMO_PCHAN_MODULO,
+ [STEDMA40_PCHAN_DOUBLE_DST_MODE]
+ = D40_DREG_PRMO_PCHAN_DOUBLE_DST,
+ };
+ static const unsigned int log_map[] = {
+ [STEDMA40_LCHAN_SRC_PHY_DST_LOG]
+ = D40_DREG_PRMO_LCHAN_SRC_PHY_DST_LOG,
+ [STEDMA40_LCHAN_SRC_LOG_DST_PHY]
+ = D40_DREG_PRMO_LCHAN_SRC_LOG_DST_PHY,
+ [STEDMA40_LCHAN_SRC_LOG_DST_LOG]
+ = D40_DREG_PRMO_LCHAN_SRC_LOG_DST_LOG,
+ };
+
+ if (d40c->log_num == D40_PHY_CHAN)
+ return phy_map[d40c->dma_cfg.mode_opt];
+ else
+ return log_map[d40c->dma_cfg.mode_opt];
}
-static int d40_config_write(struct d40_chan *d40c)
+static void d40_config_write(struct d40_chan *d40c)
{
u32 addr_base;
u32 var;
- int res;
-
- res = d40_channel_execute_command(d40c, D40_DMA_SUSPEND_REQ);
- if (res)
- return res;
/* Odd addresses are even addresses + 4 */
addr_base = (d40c->phy_chan->num % 2) * 4;
@@ -688,8 +731,7 @@ static int d40_config_write(struct d40_chan *d40c)
writel(var, d40c->base->virtbase + D40_DREG_PRMSE + addr_base);
/* Setup operational mode option register */
- var = ((d40c->dma_cfg.channel_type >> STEDMA40_INFO_CH_MODE_OPT_POS) &
- 0x3) << D40_CHAN_POS(d40c->phy_chan->num);
+ var = d40_get_prmo(d40c) << D40_CHAN_POS(d40c->phy_chan->num);
writel(var, d40c->base->virtbase + D40_DREG_PRMOE + addr_base);
@@ -704,41 +746,181 @@ static int d40_config_write(struct d40_chan *d40c)
d40c->phy_chan->num * D40_DREG_PCDELTA +
D40_CHAN_REG_SDCFG);
- d40_config_enable_lidx(d40c);
+ /* Set LIDX for lcla */
+ writel((d40c->phy_chan->num << D40_SREG_ELEM_LOG_LIDX_POS) &
+ D40_SREG_ELEM_LOG_LIDX_MASK,
+ d40c->base->virtbase + D40_DREG_PCBASE +
+ d40c->phy_chan->num * D40_DREG_PCDELTA +
+ D40_CHAN_REG_SDELT);
+
+ writel((d40c->phy_chan->num << D40_SREG_ELEM_LOG_LIDX_POS) &
+ D40_SREG_ELEM_LOG_LIDX_MASK,
+ d40c->base->virtbase + D40_DREG_PCBASE +
+ d40c->phy_chan->num * D40_DREG_PCDELTA +
+ D40_CHAN_REG_SSELT);
+
+ }
+}
+
+static u32 d40_residue(struct d40_chan *d40c)
+{
+ u32 num_elt;
+
+ if (d40c->log_num != D40_PHY_CHAN)
+ num_elt = (readl(&d40c->lcpa->lcsp2) & D40_MEM_LCSP2_ECNT_MASK)
+ >> D40_MEM_LCSP2_ECNT_POS;
+ else
+ num_elt = (readl(d40c->base->virtbase + D40_DREG_PCBASE +
+ d40c->phy_chan->num * D40_DREG_PCDELTA +
+ D40_CHAN_REG_SDELT) &
+ D40_SREG_ELEM_PHY_ECNT_MASK) >>
+ D40_SREG_ELEM_PHY_ECNT_POS;
+ return num_elt * (1 << d40c->dma_cfg.dst_info.data_width);
+}
+
+static bool d40_tx_is_linked(struct d40_chan *d40c)
+{
+ bool is_link;
+
+ if (d40c->log_num != D40_PHY_CHAN)
+ is_link = readl(&d40c->lcpa->lcsp3) & D40_MEM_LCSP3_DLOS_MASK;
+ else
+ is_link = readl(d40c->base->virtbase + D40_DREG_PCBASE +
+ d40c->phy_chan->num * D40_DREG_PCDELTA +
+ D40_CHAN_REG_SDLNK) &
+ D40_SREG_LNK_PHYS_LNK_MASK;
+ return is_link;
+}
+
+static int d40_pause(struct dma_chan *chan)
+{
+ struct d40_chan *d40c =
+ container_of(chan, struct d40_chan, chan);
+ int res = 0;
+ unsigned long flags;
+
+ if (!d40c->busy)
+ return 0;
+
+ spin_lock_irqsave(&d40c->lock, flags);
+
+ res = d40_channel_execute_command(d40c, D40_DMA_SUSPEND_REQ);
+ if (res == 0) {
+ if (d40c->log_num != D40_PHY_CHAN) {
+ d40_config_set_event(d40c, false);
+ /* Resume the other logical channels if any */
+ if (d40_chan_has_events(d40c))
+ res = d40_channel_execute_command(d40c,
+ D40_DMA_RUN);
+ }
}
+
+ spin_unlock_irqrestore(&d40c->lock, flags);
return res;
}
-static void d40_desc_load(struct d40_chan *d40c, struct d40_desc *d40d)
+static int d40_resume(struct dma_chan *chan)
{
- if (d40d->lli_phy.dst && d40d->lli_phy.src) {
- d40_phy_lli_write(d40c->base->virtbase,
- d40c->phy_chan->num,
- d40d->lli_phy.dst,
- d40d->lli_phy.src);
- } else if (d40d->lli_log.dst && d40d->lli_log.src) {
- struct d40_log_lli *src = d40d->lli_log.src;
- struct d40_log_lli *dst = d40d->lli_log.dst;
- int s;
-
- src += d40d->lli_count;
- dst += d40d->lli_count;
- s = d40_log_lli_write(d40c->lcpa,
- d40c->lcla.src, d40c->lcla.dst,
- dst, src,
- d40c->base->plat_data->llis_per_log);
-
- /* If s equals to zero, the job is not linked */
- if (s > 0) {
- (void) dma_map_single(d40c->base->dev, d40c->lcla.src,
- s * sizeof(struct d40_log_lli),
- DMA_TO_DEVICE);
- (void) dma_map_single(d40c->base->dev, d40c->lcla.dst,
- s * sizeof(struct d40_log_lli),
- DMA_TO_DEVICE);
+ struct d40_chan *d40c =
+ container_of(chan, struct d40_chan, chan);
+ int res = 0;
+ unsigned long flags;
+
+ if (!d40c->busy)
+ return 0;
+
+ spin_lock_irqsave(&d40c->lock, flags);
+
+ if (d40c->base->rev == 0)
+ if (d40c->log_num != D40_PHY_CHAN) {
+ res = d40_channel_execute_command(d40c,
+ D40_DMA_SUSPEND_REQ);
+ goto no_suspend;
}
+
+ /* If bytes left to transfer or linked tx resume job */
+ if (d40_residue(d40c) || d40_tx_is_linked(d40c)) {
+
+ if (d40c->log_num != D40_PHY_CHAN)
+ d40_config_set_event(d40c, true);
+
+ res = d40_channel_execute_command(d40c, D40_DMA_RUN);
+ }
+
+no_suspend:
+ spin_unlock_irqrestore(&d40c->lock, flags);
+ return res;
+}
+
+static void d40_tx_submit_log(struct d40_chan *d40c, struct d40_desc *d40d)
+{
+ /* TODO: Write */
+}
+
+static void d40_tx_submit_phy(struct d40_chan *d40c, struct d40_desc *d40d)
+{
+ struct d40_desc *d40d_prev = NULL;
+ int i;
+ u32 val;
+
+ if (!list_empty(&d40c->queue))
+ d40d_prev = d40_last_queued(d40c);
+ else if (!list_empty(&d40c->active))
+ d40d_prev = d40_first_active_get(d40c);
+
+ if (!d40d_prev)
+ return;
+
+ /* Here we try to join this job with previous jobs */
+ val = readl(d40c->base->virtbase + D40_DREG_PCBASE +
+ d40c->phy_chan->num * D40_DREG_PCDELTA +
+ D40_CHAN_REG_SSLNK);
+
+ /* Figure out which link we're currently transmitting */
+ for (i = 0; i < d40d_prev->lli_len; i++)
+ if (val == d40d_prev->lli_phy.src[i].reg_lnk)
+ break;
+
+ val = readl(d40c->base->virtbase + D40_DREG_PCBASE +
+ d40c->phy_chan->num * D40_DREG_PCDELTA +
+ D40_CHAN_REG_SSELT) >> D40_SREG_ELEM_LOG_ECNT_POS;
+
+ if (i == (d40d_prev->lli_len - 1) && val > 0) {
+ /* Change the current one */
+ writel(virt_to_phys(d40d->lli_phy.src),
+ d40c->base->virtbase + D40_DREG_PCBASE +
+ d40c->phy_chan->num * D40_DREG_PCDELTA +
+ D40_CHAN_REG_SSLNK);
+ writel(virt_to_phys(d40d->lli_phy.dst),
+ d40c->base->virtbase + D40_DREG_PCBASE +
+ d40c->phy_chan->num * D40_DREG_PCDELTA +
+ D40_CHAN_REG_SDLNK);
+
+ d40d->is_hw_linked = true;
+
+ } else if (i < d40d_prev->lli_len) {
+ (void) dma_unmap_single(d40c->base->dev,
+ virt_to_phys(d40d_prev->lli_phy.src),
+ d40d_prev->lli_pool.size,
+ DMA_TO_DEVICE);
+
+ /* Keep the settings */
+ val = d40d_prev->lli_phy.src[d40d_prev->lli_len - 1].reg_lnk &
+ ~D40_SREG_LNK_PHYS_LNK_MASK;
+ d40d_prev->lli_phy.src[d40d_prev->lli_len - 1].reg_lnk =
+ val | virt_to_phys(d40d->lli_phy.src);
+
+ val = d40d_prev->lli_phy.dst[d40d_prev->lli_len - 1].reg_lnk &
+ ~D40_SREG_LNK_PHYS_LNK_MASK;
+ d40d_prev->lli_phy.dst[d40d_prev->lli_len - 1].reg_lnk =
+ val | virt_to_phys(d40d->lli_phy.dst);
+
+ (void) dma_map_single(d40c->base->dev,
+ d40d_prev->lli_phy.src,
+ d40d_prev->lli_pool.size,
+ DMA_TO_DEVICE);
+ d40d->is_hw_linked = true;
}
- d40d->lli_count += d40d->lli_tx_len;
}
static dma_cookie_t d40_tx_submit(struct dma_async_tx_descriptor *tx)
@@ -749,14 +931,28 @@ static dma_cookie_t d40_tx_submit(struct dma_async_tx_descriptor *tx)
struct d40_desc *d40d = container_of(tx, struct d40_desc, txd);
unsigned long flags;
+ (void) d40_pause(&d40c->chan);
+
spin_lock_irqsave(&d40c->lock, flags);
- tx->cookie = d40_assign_cookie(d40c, d40d);
+ d40c->chan.cookie++;
+
+ if (d40c->chan.cookie < 0)
+ d40c->chan.cookie = 1;
+
+ d40d->txd.cookie = d40c->chan.cookie;
+
+ if (d40c->log_num == D40_PHY_CHAN)
+ d40_tx_submit_phy(d40c, d40d);
+ else
+ d40_tx_submit_log(d40c, d40d);
d40_desc_queue(d40c, d40d);
spin_unlock_irqrestore(&d40c->lock, flags);
+ (void) d40_resume(&d40c->chan);
+
return tx->cookie;
}
@@ -796,14 +992,21 @@ static struct d40_desc *d40_queue_start(struct d40_chan *d40c)
/* Add to active queue */
d40_desc_submit(d40c, d40d);
- /* Initiate DMA job */
- d40_desc_load(d40c, d40d);
+ /*
+ * If this job is already linked in hw,
+ * do not submit it.
+ */
- /* Start dma job */
- err = d40_start(d40c);
+ if (!d40d->is_hw_linked) {
+ /* Initiate DMA job */
+ d40_desc_load(d40c, d40d);
- if (err)
- return NULL;
+ /* Start dma job */
+ err = d40_start(d40c);
+
+ if (err)
+ return NULL;
+ }
}
return d40d;
@@ -814,17 +1017,15 @@ static void dma_tc_handle(struct d40_chan *d40c)
{
struct d40_desc *d40d;
- if (!d40c->phy_chan)
- return;
-
/* Get first active entry from list */
d40d = d40_first_active_get(d40c);
if (d40d == NULL)
return;
- if (d40d->lli_count < d40d->lli_len) {
+ d40_lcla_free_all(d40c, d40d);
+ if (d40d->lli_current < d40d->lli_len) {
d40_desc_load(d40c, d40d);
/* Start dma job */
(void) d40_start(d40c);
@@ -842,7 +1043,7 @@ static void dma_tc_handle(struct d40_chan *d40c)
static void dma_tasklet(unsigned long data)
{
struct d40_chan *d40c = (struct d40_chan *) data;
- struct d40_desc *d40d_fin;
+ struct d40_desc *d40d;
unsigned long flags;
dma_async_tx_callback callback;
void *callback_param;
@@ -850,12 +1051,12 @@ static void dma_tasklet(unsigned long data)
spin_lock_irqsave(&d40c->lock, flags);
/* Get first active entry from list */
- d40d_fin = d40_first_active_get(d40c);
+ d40d = d40_first_active_get(d40c);
- if (d40d_fin == NULL)
+ if (d40d == NULL)
goto err;
- d40c->completed = d40d_fin->txd.cookie;
+ d40c->completed = d40d->txd.cookie;
/*
* If terminating a channel pending_tx is set to zero.
@@ -867,19 +1068,19 @@ static void dma_tasklet(unsigned long data)
}
/* Callback to client */
- callback = d40d_fin->txd.callback;
- callback_param = d40d_fin->txd.callback_param;
-
- if (async_tx_test_ack(&d40d_fin->txd)) {
- d40_pool_lli_free(d40d_fin);
- d40_desc_remove(d40d_fin);
- /* Return desc to free-list */
- d40_desc_free(d40c, d40d_fin);
+ callback = d40d->txd.callback;
+ callback_param = d40d->txd.callback_param;
+
+ if (async_tx_test_ack(&d40d->txd)) {
+ d40_pool_lli_free(d40d);
+ d40_desc_remove(d40d);
+ d40_desc_free(d40c, d40d);
} else {
- if (!d40d_fin->is_in_client_list) {
- d40_desc_remove(d40d_fin);
- list_add_tail(&d40d_fin->node, &d40c->client);
- d40d_fin->is_in_client_list = true;
+ if (!d40d->is_in_client_list) {
+ d40_desc_remove(d40d);
+ d40_lcla_free_all(d40c, d40d);
+ list_add_tail(&d40d->node, &d40c->client);
+ d40d->is_in_client_list = true;
}
}
@@ -890,7 +1091,7 @@ static void dma_tasklet(unsigned long data)
spin_unlock_irqrestore(&d40c->lock, flags);
- if (callback)
+ if (callback && (d40d->txd.flags & DMA_PREP_INTERRUPT))
callback(callback_param);
return;
@@ -919,7 +1120,6 @@ static irqreturn_t d40_handle_interrupt(int irq, void *data)
int i;
u32 regs[ARRAY_SIZE(il)];
- u32 tmp;
u32 idx;
u32 row;
long chan = -1;
@@ -946,9 +1146,7 @@ static irqreturn_t d40_handle_interrupt(int irq, void *data)
idx = chan & (BITS_PER_LONG - 1);
/* ACK interrupt */
- tmp = readl(base->virtbase + il[row].clr);
- tmp |= 1 << idx;
- writel(tmp, base->virtbase + il[row].clr);
+ writel(1 << idx, base->virtbase + il[row].clr);
if (il[row].offset == D40_PHY_CHAN)
d40c = base->lookup_phy_chans[idx];
@@ -971,24 +1169,47 @@ static irqreturn_t d40_handle_interrupt(int irq, void *data)
return IRQ_HANDLED;
}
-
static int d40_validate_conf(struct d40_chan *d40c,
struct stedma40_chan_cfg *conf)
{
int res = 0;
u32 dst_event_group = D40_TYPE_TO_GROUP(conf->dst_dev_type);
u32 src_event_group = D40_TYPE_TO_GROUP(conf->src_dev_type);
- bool is_log = (conf->channel_type & STEDMA40_CHANNEL_IN_OPER_MODE)
- == STEDMA40_CHANNEL_IN_LOG_MODE;
+ bool is_log = conf->mode == STEDMA40_MODE_LOGICAL;
+
+ if (!conf->dir) {
+ dev_err(&d40c->chan.dev->device, "[%s] Invalid direction.\n",
+ __func__);
+ res = -EINVAL;
+ }
+
+ if (conf->dst_dev_type != STEDMA40_DEV_DST_MEMORY &&
+ d40c->base->plat_data->dev_tx[conf->dst_dev_type] == 0 &&
+ d40c->runtime_addr == 0) {
+
+ dev_err(&d40c->chan.dev->device,
+ "[%s] Invalid TX channel address (%d)\n",
+ __func__, conf->dst_dev_type);
+ res = -EINVAL;
+ }
+
+ if (conf->src_dev_type != STEDMA40_DEV_SRC_MEMORY &&
+ d40c->base->plat_data->dev_rx[conf->src_dev_type] == 0 &&
+ d40c->runtime_addr == 0) {
+ dev_err(&d40c->chan.dev->device,
+ "[%s] Invalid RX channel address (%d)\n",
+ __func__, conf->src_dev_type);
+ res = -EINVAL;
+ }
- if (d40c->dma_cfg.dir == STEDMA40_MEM_TO_PERIPH &&
+ if (conf->dir == STEDMA40_MEM_TO_PERIPH &&
dst_event_group == STEDMA40_DEV_DST_MEMORY) {
dev_err(&d40c->chan.dev->device, "[%s] Invalid dst\n",
__func__);
res = -EINVAL;
}
- if (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM &&
+ if (conf->dir == STEDMA40_PERIPH_TO_MEM &&
src_event_group == STEDMA40_DEV_SRC_MEMORY) {
dev_err(&d40c->chan.dev->device, "[%s] Invalid src\n",
__func__);
@@ -1082,7 +1303,6 @@ static bool d40_alloc_mask_free(struct d40_phy_res *phy, bool is_src,
spin_lock_irqsave(&phy->lock, flags);
if (!log_event_line) {
- /* Physical interrupts are masked per physical full channel */
phy->allocated_dst = D40_ALLOC_FREE;
phy->allocated_src = D40_ALLOC_FREE;
is_free = true;
@@ -1119,10 +1339,7 @@ static int d40_allocate_channel(struct d40_chan *d40c)
int j;
int log_num;
bool is_src;
- bool is_log = (d40c->dma_cfg.channel_type &
- STEDMA40_CHANNEL_IN_OPER_MODE)
- == STEDMA40_CHANNEL_IN_LOG_MODE;
-
+ bool is_log = d40c->dma_cfg.mode == STEDMA40_MODE_LOGICAL;
phys = d40c->base->phy_res;
@@ -1251,7 +1468,6 @@ static int d40_free_dma(struct d40_chan *d40c)
list_for_each_entry_safe(d, _d, &d40c->client, node) {
d40_pool_lli_free(d);
d40_desc_remove(d);
- /* Return desc to free-list */
d40_desc_free(d40c, d);
}
@@ -1324,37 +1540,12 @@ static int d40_free_dma(struct d40_chan *d40c)
return res;
}
d40c->phy_chan = NULL;
- /* Invalidate channel type */
- d40c->dma_cfg.channel_type = 0;
+ d40c->configured = false;
d40c->base->lookup_phy_chans[phy->num] = NULL;
return 0;
}
-static int d40_pause(struct dma_chan *chan)
-{
- struct d40_chan *d40c =
- container_of(chan, struct d40_chan, chan);
- int res;
- unsigned long flags;
-
- spin_lock_irqsave(&d40c->lock, flags);
-
- res = d40_channel_execute_command(d40c, D40_DMA_SUSPEND_REQ);
- if (res == 0) {
- if (d40c->log_num != D40_PHY_CHAN) {
- d40_config_set_event(d40c, false);
- /* Resume the other logical channels if any */
- if (d40_chan_has_events(d40c))
- res = d40_channel_execute_command(d40c,
- D40_DMA_RUN);
- }
- }
-
- spin_unlock_irqrestore(&d40c->lock, flags);
- return res;
-}
-
static bool d40_is_paused(struct d40_chan *d40c)
{
bool is_paused = false;
@@ -1381,16 +1572,22 @@ static bool d40_is_paused(struct d40_chan *d40c)
}
if (d40c->dma_cfg.dir == STEDMA40_MEM_TO_PERIPH ||
- d40c->dma_cfg.dir == STEDMA40_MEM_TO_MEM)
+ d40c->dma_cfg.dir == STEDMA40_MEM_TO_MEM) {
event = D40_TYPE_TO_EVENT(d40c->dma_cfg.dst_dev_type);
- else if (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM)
+ status = readl(d40c->base->virtbase + D40_DREG_PCBASE +
+ d40c->phy_chan->num * D40_DREG_PCDELTA +
+ D40_CHAN_REG_SDLNK);
+ } else if (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM) {
event = D40_TYPE_TO_EVENT(d40c->dma_cfg.src_dev_type);
- else {
+ status = readl(d40c->base->virtbase + D40_DREG_PCBASE +
+ d40c->phy_chan->num * D40_DREG_PCDELTA +
+ D40_CHAN_REG_SSLNK);
+ } else {
dev_err(&d40c->chan.dev->device,
"[%s] Unknown direction\n", __func__);
goto _exit;
}
- status = d40_chan_has_events(d40c);
+
status = (status & D40_EVENTLINE_MASK(event)) >>
D40_EVENTLINE_POS(event);
@@ -1403,64 +1600,6 @@ _exit:
}
-static bool d40_tx_is_linked(struct d40_chan *d40c)
-{
- bool is_link;
-
- if (d40c->log_num != D40_PHY_CHAN)
- is_link = readl(&d40c->lcpa->lcsp3) & D40_MEM_LCSP3_DLOS_MASK;
- else
- is_link = readl(d40c->base->virtbase + D40_DREG_PCBASE +
- d40c->phy_chan->num * D40_DREG_PCDELTA +
- D40_CHAN_REG_SDLNK) &
- D40_SREG_LNK_PHYS_LNK_MASK;
- return is_link;
-}
-
-static u32 d40_residue(struct d40_chan *d40c)
-{
- u32 num_elt;
-
- if (d40c->log_num != D40_PHY_CHAN)
- num_elt = (readl(&d40c->lcpa->lcsp2) & D40_MEM_LCSP2_ECNT_MASK)
- >> D40_MEM_LCSP2_ECNT_POS;
- else
- num_elt = (readl(d40c->base->virtbase + D40_DREG_PCBASE +
- d40c->phy_chan->num * D40_DREG_PCDELTA +
- D40_CHAN_REG_SDELT) &
- D40_SREG_ELEM_PHY_ECNT_MASK) >>
- D40_SREG_ELEM_PHY_ECNT_POS;
- return num_elt * (1 << d40c->dma_cfg.dst_info.data_width);
-}
-
-static int d40_resume(struct dma_chan *chan)
-{
- struct d40_chan *d40c =
- container_of(chan, struct d40_chan, chan);
- int res = 0;
- unsigned long flags;
-
- spin_lock_irqsave(&d40c->lock, flags);
-
- if (d40c->base->rev == 0)
- if (d40c->log_num != D40_PHY_CHAN) {
- res = d40_channel_execute_command(d40c,
- D40_DMA_SUSPEND_REQ);
- goto no_suspend;
- }
-
- /* If bytes left to transfer or linked tx resume job */
- if (d40_residue(d40c) || d40_tx_is_linked(d40c)) {
- if (d40c->log_num != D40_PHY_CHAN)
- d40_config_set_event(d40c, true);
- res = d40_channel_execute_command(d40c, D40_DMA_RUN);
- }
-
-no_suspend:
- spin_unlock_irqrestore(&d40c->lock, flags);
- return res;
-}
-
static u32 stedma40_residue(struct dma_chan *chan)
{
struct d40_chan *d40c =
@@ -1475,51 +1614,6 @@ static u32 stedma40_residue(struct dma_chan *chan)
return bytes_left;
}
-/* Public DMA functions in addition to the DMA engine framework */
-
-int stedma40_set_psize(struct dma_chan *chan,
- int src_psize,
- int dst_psize)
-{
- struct d40_chan *d40c =
- container_of(chan, struct d40_chan, chan);
- unsigned long flags;
-
- spin_lock_irqsave(&d40c->lock, flags);
-
- if (d40c->log_num != D40_PHY_CHAN) {
- d40c->log_def.lcsp1 &= ~D40_MEM_LCSP1_SCFG_PSIZE_MASK;
- d40c->log_def.lcsp3 &= ~D40_MEM_LCSP1_SCFG_PSIZE_MASK;
- d40c->log_def.lcsp1 |= src_psize <<
- D40_MEM_LCSP1_SCFG_PSIZE_POS;
- d40c->log_def.lcsp3 |= dst_psize <<
- D40_MEM_LCSP1_SCFG_PSIZE_POS;
- goto out;
- }
-
- if (src_psize == STEDMA40_PSIZE_PHY_1)
- d40c->src_def_cfg &= ~(1 << D40_SREG_CFG_PHY_PEN_POS);
- else {
- d40c->src_def_cfg |= 1 << D40_SREG_CFG_PHY_PEN_POS;
- d40c->src_def_cfg &= ~(STEDMA40_PSIZE_PHY_16 <<
- D40_SREG_CFG_PSIZE_POS);
- d40c->src_def_cfg |= src_psize << D40_SREG_CFG_PSIZE_POS;
- }
-
- if (dst_psize == STEDMA40_PSIZE_PHY_1)
- d40c->dst_def_cfg &= ~(1 << D40_SREG_CFG_PHY_PEN_POS);
- else {
- d40c->dst_def_cfg |= 1 << D40_SREG_CFG_PHY_PEN_POS;
- d40c->dst_def_cfg &= ~(STEDMA40_PSIZE_PHY_16 <<
- D40_SREG_CFG_PSIZE_POS);
- d40c->dst_def_cfg |= dst_psize << D40_SREG_CFG_PSIZE_POS;
- }
-out:
- spin_unlock_irqrestore(&d40c->lock, flags);
- return 0;
-}
-EXPORT_SYMBOL(stedma40_set_psize);
-
struct dma_async_tx_descriptor *stedma40_memcpy_sg(struct dma_chan *chan,
struct scatterlist *sgl_dst,
struct scatterlist *sgl_src,
@@ -1545,21 +1639,10 @@ struct dma_async_tx_descriptor *stedma40_memcpy_sg(struct dma_chan *chan,
goto err;
d40d->lli_len = sgl_len;
- d40d->lli_tx_len = d40d->lli_len;
+ d40d->lli_current = 0;
d40d->txd.flags = dma_flags;
if (d40c->log_num != D40_PHY_CHAN) {
- if (d40d->lli_len > d40c->base->plat_data->llis_per_log)
- d40d->lli_tx_len = d40c->base->plat_data->llis_per_log;
-
- if (sgl_len > 1)
- /*
- * Check if there is space available in lcla. If not,
- * split list into 1-length and run only in lcpa
- * space.
- */
- if (d40_lcla_id_get(d40c) != 0)
- d40d->lli_tx_len = 1;
if (d40_pool_lli_alloc(d40d, sgl_len, true) < 0) {
dev_err(&d40c->chan.dev->device,
@@ -1567,27 +1650,17 @@ struct dma_async_tx_descriptor *stedma40_memcpy_sg(struct dma_chan *chan,
goto err;
}
- (void) d40_log_sg_to_lli(d40c->lcla.src_id,
- sgl_src,
+ (void) d40_log_sg_to_lli(sgl_src,
sgl_len,
d40d->lli_log.src,
d40c->log_def.lcsp1,
- d40c->dma_cfg.src_info.data_width,
- dma_flags & DMA_PREP_INTERRUPT,
- d40d->lli_tx_len,
- d40c->base->plat_data->llis_per_log);
+ d40c->dma_cfg.src_info.data_width);
- (void) d40_log_sg_to_lli(d40c->lcla.dst_id,
- sgl_dst,
+ (void) d40_log_sg_to_lli(sgl_dst,
sgl_len,
d40d->lli_log.dst,
d40c->log_def.lcsp3,
- d40c->dma_cfg.dst_info.data_width,
- dma_flags & DMA_PREP_INTERRUPT,
- d40d->lli_tx_len,
- d40c->base->plat_data->llis_per_log);
-
-
+ d40c->dma_cfg.dst_info.data_width);
} else {
if (d40_pool_lli_alloc(d40d, sgl_len, false) < 0) {
dev_err(&d40c->chan.dev->device,
@@ -1599,11 +1672,10 @@ struct dma_async_tx_descriptor *stedma40_memcpy_sg(struct dma_chan *chan,
sgl_len,
0,
d40d->lli_phy.src,
- d40d->lli_phy.src_addr,
+ virt_to_phys(d40d->lli_phy.src),
d40c->src_def_cfg,
d40c->dma_cfg.src_info.data_width,
- d40c->dma_cfg.src_info.psize,
- true);
+ d40c->dma_cfg.src_info.psize);
if (res < 0)
goto err;
@@ -1612,11 +1684,10 @@ struct dma_async_tx_descriptor *stedma40_memcpy_sg(struct dma_chan *chan,
sgl_len,
0,
d40d->lli_phy.dst,
- d40d->lli_phy.dst_addr,
+ virt_to_phys(d40d->lli_phy.dst),
d40c->dst_def_cfg,
d40c->dma_cfg.dst_info.data_width,
- d40c->dma_cfg.dst_info.psize,
- true);
+ d40c->dma_cfg.dst_info.psize);
if (res < 0)
goto err;
@@ -1633,6 +1704,8 @@ struct dma_async_tx_descriptor *stedma40_memcpy_sg(struct dma_chan *chan,
return &d40d->txd;
err:
+ if (d40d)
+ d40_desc_free(d40c, d40d);
spin_unlock_irqrestore(&d40c->lock, flags);
return NULL;
}
@@ -1652,6 +1725,9 @@ bool stedma40_filter(struct dma_chan *chan, void *data)
} else
err = d40_config_memcpy(d40c);
+ if (!err)
+ d40c->configured = true;
+
return err == 0;
}
EXPORT_SYMBOL(stedma40_filter);
@@ -1668,11 +1744,8 @@ static int d40_alloc_chan_resources(struct dma_chan *chan)
d40c->completed = chan->cookie = 1;
- /*
- * If no dma configuration is set (channel_type == 0)
- * use default configuration (memcpy)
- */
- if (d40c->dma_cfg.channel_type == 0) {
+ /* If no dma configuration is set use default configuration (memcpy) */
+ if (!d40c->configured) {
err = d40_config_memcpy(d40c);
if (err) {
dev_err(&d40c->chan.dev->device,
@@ -1712,14 +1785,8 @@ static int d40_alloc_chan_resources(struct dma_chan *chan)
* resource is free. In case of multiple logical channels
* on the same physical resource, only the first write is necessary.
*/
- if (is_free_phy) {
- err = d40_config_write(d40c);
- if (err) {
- dev_err(&d40c->chan.dev->device,
- "[%s] Failed to configure channel\n",
- __func__);
- }
- }
+ if (is_free_phy)
+ d40_config_write(d40c);
fail:
spin_unlock_irqrestore(&d40c->lock, flags);
return err;
@@ -1790,23 +1857,21 @@ static struct dma_async_tx_descriptor *d40_prep_memcpy(struct dma_chan *chan,
goto err;
}
d40d->lli_len = 1;
- d40d->lli_tx_len = 1;
+ d40d->lli_current = 0;
d40_log_fill_lli(d40d->lli_log.src,
src,
size,
- 0,
d40c->log_def.lcsp1,
d40c->dma_cfg.src_info.data_width,
- false, true);
+ true);
d40_log_fill_lli(d40d->lli_log.dst,
dst,
size,
- 0,
d40c->log_def.lcsp3,
d40c->dma_cfg.dst_info.data_width,
- true, true);
+ true);
} else {
@@ -1851,12 +1916,25 @@ static struct dma_async_tx_descriptor *d40_prep_memcpy(struct dma_chan *chan,
err_fill_lli:
dev_err(&d40c->chan.dev->device,
"[%s] Failed filling in PHY LLI\n", __func__);
- d40_pool_lli_free(d40d);
err:
+ if (d40d)
+ d40_desc_free(d40c, d40d);
spin_unlock_irqrestore(&d40c->lock, flags);
return NULL;
}
+static struct dma_async_tx_descriptor *
+d40_prep_sg(struct dma_chan *chan,
+ struct scatterlist *dst_sg, unsigned int dst_nents,
+ struct scatterlist *src_sg, unsigned int src_nents,
+ unsigned long dma_flags)
+{
+ if (dst_nents != src_nents)
+ return NULL;
+
+ return stedma40_memcpy_sg(chan, dst_sg, src_sg, dst_nents, dma_flags);
+}
+
static int d40_prep_slave_sg_log(struct d40_desc *d40d,
struct d40_chan *d40c,
struct scatterlist *sgl,
@@ -1874,19 +1952,7 @@ static int d40_prep_slave_sg_log(struct d40_desc *d40d,
}
d40d->lli_len = sg_len;
- if (d40d->lli_len <= d40c->base->plat_data->llis_per_log)
- d40d->lli_tx_len = d40d->lli_len;
- else
- d40d->lli_tx_len = d40c->base->plat_data->llis_per_log;
-
- if (sg_len > 1)
- /*
- * Check if there is space available in lcla.
- * If not, split list into 1-length and run only
- * in lcpa space.
- */
- if (d40_lcla_id_get(d40c) != 0)
- d40d->lli_tx_len = 1;
+ d40d->lli_current = 0;
if (direction == DMA_FROM_DEVICE)
if (d40c->runtime_addr)
@@ -1902,16 +1968,13 @@ static int d40_prep_slave_sg_log(struct d40_desc *d40d,
else
return -EINVAL;
- total_size = d40_log_sg_to_dev(&d40c->lcla,
- sgl, sg_len,
+ total_size = d40_log_sg_to_dev(sgl, sg_len,
&d40d->lli_log,
&d40c->log_def,
d40c->dma_cfg.src_info.data_width,
d40c->dma_cfg.dst_info.data_width,
direction,
- dma_flags & DMA_PREP_INTERRUPT,
- dev_addr, d40d->lli_tx_len,
- d40c->base->plat_data->llis_per_log);
+ dev_addr);
if (total_size < 0)
return -EINVAL;
@@ -1937,7 +2000,7 @@ static int d40_prep_slave_sg_phy(struct d40_desc *d40d,
}
d40d->lli_len = sgl_len;
- d40d->lli_tx_len = sgl_len;
+ d40d->lli_current = 0;
if (direction == DMA_FROM_DEVICE) {
dst_dev_addr = 0;
@@ -1958,11 +2021,10 @@ static int d40_prep_slave_sg_phy(struct d40_desc *d40d,
sgl_len,
src_dev_addr,
d40d->lli_phy.src,
- d40d->lli_phy.src_addr,
+ virt_to_phys(d40d->lli_phy.src),
d40c->src_def_cfg,
d40c->dma_cfg.src_info.data_width,
- d40c->dma_cfg.src_info.psize,
- true);
+ d40c->dma_cfg.src_info.psize);
if (res < 0)
return res;
@@ -1970,11 +2032,10 @@ static int d40_prep_slave_sg_phy(struct d40_desc *d40d,
sgl_len,
dst_dev_addr,
d40d->lli_phy.dst,
- d40d->lli_phy.dst_addr,
+ virt_to_phys(d40d->lli_phy.dst),
d40c->dst_def_cfg,
d40c->dma_cfg.dst_info.data_width,
- d40c->dma_cfg.dst_info.psize,
- true);
+ d40c->dma_cfg.dst_info.psize);
if (res < 0)
return res;
@@ -2001,17 +2062,11 @@ static struct dma_async_tx_descriptor *d40_prep_slave_sg(struct dma_chan *chan,
return ERR_PTR(-EINVAL);
}
- if (d40c->dma_cfg.pre_transfer)
- d40c->dma_cfg.pre_transfer(chan,
- d40c->dma_cfg.pre_transfer_data,
- sg_dma_len(sgl));
-
spin_lock_irqsave(&d40c->lock, flags);
d40d = d40_desc_get(d40c);
- spin_unlock_irqrestore(&d40c->lock, flags);
if (d40d == NULL)
- return NULL;
+ goto err;
if (d40c->log_num != D40_PHY_CHAN)
err = d40_prep_slave_sg_log(d40d, d40c, sgl, sg_len,
@@ -2024,7 +2079,7 @@ static struct dma_async_tx_descriptor *d40_prep_slave_sg(struct dma_chan *chan,
"[%s] Failed to prepare %s slave sg job: %d\n",
__func__,
d40c->log_num != D40_PHY_CHAN ? "log" : "phy", err);
- return NULL;
+ goto err;
}
d40d->txd.flags = dma_flags;
@@ -2033,7 +2088,14 @@ static struct dma_async_tx_descriptor *d40_prep_slave_sg(struct dma_chan *chan,
d40d->txd.tx_submit = d40_tx_submit;
+ spin_unlock_irqrestore(&d40c->lock, flags);
return &d40d->txd;
+
+err:
+ if (d40d)
+ d40_desc_free(d40c, d40d);
+ spin_unlock_irqrestore(&d40c->lock, flags);
+ return NULL;
}
static enum dma_status d40_tx_status(struct dma_chan *chan,
@@ -2166,25 +2228,43 @@ static void d40_set_runtime_config(struct dma_chan *chan,
return;
}
- if (config_maxburst >= 16)
- psize = STEDMA40_PSIZE_LOG_16;
- else if (config_maxburst >= 8)
- psize = STEDMA40_PSIZE_LOG_8;
- else if (config_maxburst >= 4)
- psize = STEDMA40_PSIZE_LOG_4;
- else
- psize = STEDMA40_PSIZE_LOG_1;
+ if (d40c->log_num != D40_PHY_CHAN) {
+ if (config_maxburst >= 16)
+ psize = STEDMA40_PSIZE_LOG_16;
+ else if (config_maxburst >= 8)
+ psize = STEDMA40_PSIZE_LOG_8;
+ else if (config_maxburst >= 4)
+ psize = STEDMA40_PSIZE_LOG_4;
+ else
+ psize = STEDMA40_PSIZE_LOG_1;
+ } else {
+ if (config_maxburst >= 16)
+ psize = STEDMA40_PSIZE_PHY_16;
+ else if (config_maxburst >= 8)
+ psize = STEDMA40_PSIZE_PHY_8;
+ else if (config_maxburst >= 4)
+ psize = STEDMA40_PSIZE_PHY_4;
+ else
+ psize = STEDMA40_PSIZE_PHY_1;
+ }
/* Set up all the endpoint configs */
cfg->src_info.data_width = addr_width;
cfg->src_info.psize = psize;
- cfg->src_info.endianess = STEDMA40_LITTLE_ENDIAN;
+ cfg->src_info.big_endian = false;
cfg->src_info.flow_ctrl = STEDMA40_NO_FLOW_CTRL;
cfg->dst_info.data_width = addr_width;
cfg->dst_info.psize = psize;
- cfg->dst_info.endianess = STEDMA40_LITTLE_ENDIAN;
+ cfg->dst_info.big_endian = false;
cfg->dst_info.flow_ctrl = STEDMA40_NO_FLOW_CTRL;
+ /* Fill in register values */
+ if (d40c->log_num != D40_PHY_CHAN)
+ d40_log_cfg(cfg, &d40c->log_def.lcsp1, &d40c->log_def.lcsp3);
+ else
+ d40_phy_cfg(cfg, &d40c->src_def_cfg,
+ &d40c->dst_def_cfg, false);
+
/* These settings will take precedence later */
d40c->runtime_addr = config_addr;
d40c->runtime_direction = config->direction;
@@ -2247,10 +2327,6 @@ static void __init d40_chan_init(struct d40_base *base, struct dma_device *dma,
d40c->base = base;
d40c->chan.device = dma;
- /* Invalidate lcla element */
- d40c->lcla.src_id = -1;
- d40c->lcla.dst_id = -1;
-
spin_lock_init(&d40c->lock);
d40c->log_num = D40_PHY_CHAN;
@@ -2281,6 +2357,7 @@ static int __init d40_dmaengine_init(struct d40_base *base,
base->dma_slave.device_alloc_chan_resources = d40_alloc_chan_resources;
base->dma_slave.device_free_chan_resources = d40_free_chan_resources;
base->dma_slave.device_prep_dma_memcpy = d40_prep_memcpy;
+ base->dma_slave.device_prep_dma_sg = d40_prep_sg;
base->dma_slave.device_prep_slave_sg = d40_prep_slave_sg;
base->dma_slave.device_tx_status = d40_tx_status;
base->dma_slave.device_issue_pending = d40_issue_pending;
@@ -2301,10 +2378,12 @@ static int __init d40_dmaengine_init(struct d40_base *base,
dma_cap_zero(base->dma_memcpy.cap_mask);
dma_cap_set(DMA_MEMCPY, base->dma_memcpy.cap_mask);
+ dma_cap_set(DMA_SG, base->dma_slave.cap_mask);
base->dma_memcpy.device_alloc_chan_resources = d40_alloc_chan_resources;
base->dma_memcpy.device_free_chan_resources = d40_free_chan_resources;
base->dma_memcpy.device_prep_dma_memcpy = d40_prep_memcpy;
+ base->dma_slave.device_prep_dma_sg = d40_prep_sg;
base->dma_memcpy.device_prep_slave_sg = d40_prep_slave_sg;
base->dma_memcpy.device_tx_status = d40_tx_status;
base->dma_memcpy.device_issue_pending = d40_issue_pending;
@@ -2331,10 +2410,12 @@ static int __init d40_dmaengine_init(struct d40_base *base,
dma_cap_zero(base->dma_both.cap_mask);
dma_cap_set(DMA_SLAVE, base->dma_both.cap_mask);
dma_cap_set(DMA_MEMCPY, base->dma_both.cap_mask);
+ dma_cap_set(DMA_SG, base->dma_slave.cap_mask);
base->dma_both.device_alloc_chan_resources = d40_alloc_chan_resources;
base->dma_both.device_free_chan_resources = d40_free_chan_resources;
base->dma_both.device_prep_dma_memcpy = d40_prep_memcpy;
+ base->dma_slave.device_prep_dma_sg = d40_prep_sg;
base->dma_both.device_prep_slave_sg = d40_prep_slave_sg;
base->dma_both.device_tx_status = d40_tx_status;
base->dma_both.device_issue_pending = d40_issue_pending;
@@ -2387,9 +2468,11 @@ static int __init d40_phy_res_init(struct d40_base *base)
/* Mark disabled channels as occupied */
for (i = 0; base->plat_data->disabled_channels[i] != -1; i++) {
- base->phy_res[i].allocated_src = D40_ALLOC_PHY;
- base->phy_res[i].allocated_dst = D40_ALLOC_PHY;
- num_phy_chans_avail--;
+ int chan = base->plat_data->disabled_channels[i];
+
+ base->phy_res[chan].allocated_src = D40_ALLOC_PHY;
+ base->phy_res[chan].allocated_dst = D40_ALLOC_PHY;
+ num_phy_chans_avail--;
}
dev_info(base->dev, "%d of %d physical DMA channels available\n",
@@ -2441,6 +2524,7 @@ static struct d40_base * __init d40_hw_detect_init(struct platform_device *pdev)
int num_phy_chans;
int i;
u32 val;
+ u32 rev;
clk = clk_get(&pdev->dev, NULL);
@@ -2479,21 +2563,26 @@ static struct d40_base * __init d40_hw_detect_init(struct platform_device *pdev)
}
}
- /* Get silicon revision */
+ /* Get silicon revision and designer */
val = readl(virtbase + D40_DREG_PERIPHID2);
- if ((val & 0xf) != D40_PERIPHID2_DESIGNER) {
+ if ((val & D40_DREG_PERIPHID2_DESIGNER_MASK) !=
+ D40_HW_DESIGNER) {
dev_err(&pdev->dev,
"[%s] Unknown designer! Got %x wanted %x\n",
- __func__, val & 0xf, D40_PERIPHID2_DESIGNER);
+ __func__, val & D40_DREG_PERIPHID2_DESIGNER_MASK,
+ D40_HW_DESIGNER);
goto failure;
}
+ rev = (val & D40_DREG_PERIPHID2_REV_MASK) >>
+ D40_DREG_PERIPHID2_REV_POS;
+
/* The number of physical channels on this HW */
num_phy_chans = 4 * (readl(virtbase + D40_DREG_ICFG) & 0x7) + 4;
dev_info(&pdev->dev, "hardware revision: %d @ 0x%x\n",
- (val >> 4) & 0xf, res->start);
+ rev, res->start);
plat_data = pdev->dev.platform_data;
@@ -2515,7 +2604,7 @@ static struct d40_base * __init d40_hw_detect_init(struct platform_device *pdev)
goto failure;
}
- base->rev = (val >> 4) & 0xf;
+ base->rev = rev;
base->clk = clk;
base->num_phy_chans = num_phy_chans;
base->num_log_chans = num_log_chans;
@@ -2549,7 +2638,10 @@ static struct d40_base * __init d40_hw_detect_init(struct platform_device *pdev)
if (!base->lookup_log_chans)
goto failure;
}
- base->lcla_pool.alloc_map = kzalloc(num_phy_chans * sizeof(u32),
+
+ base->lcla_pool.alloc_map = kzalloc(num_phy_chans *
+ sizeof(struct d40_desc *) *
+ D40_LCLA_LINK_PER_EVENT_GRP,
GFP_KERNEL);
if (!base->lcla_pool.alloc_map)
goto failure;
@@ -2563,7 +2655,7 @@ static struct d40_base * __init d40_hw_detect_init(struct platform_device *pdev)
return base;
failure:
- if (clk) {
+ if (!IS_ERR(clk)) {
clk_disable(clk);
clk_put(clk);
}
@@ -2700,8 +2792,10 @@ static int __init d40_lcla_allocate(struct d40_base *base)
if (i < MAX_LCLA_ALLOC_ATTEMPTS) {
base->lcla_pool.base = (void *)page_list[i];
} else {
- /* After many attempts, no succees with finding the correct
- * alignment try with allocating a big buffer */
+ /*
+ * After many attempts and no succees with finding the correct
+ * alignment, try with allocating a big buffer.
+ */
dev_warn(base->dev,
"[%s] Failed to get %d pages @ 18 bit align.\n",
__func__, base->lcla_pool.pages);
@@ -2794,8 +2888,6 @@ static int __init d40_probe(struct platform_device *pdev)
spin_lock_init(&base->lcla_pool.lock);
- base->lcla_pool.num_blocks = base->num_phy_chans;
-
base->irq = platform_get_irq(pdev, 0);
ret = request_irq(base->irq, d40_handle_interrupt, 0, D40_NAME, base);
@@ -2823,8 +2915,9 @@ failure:
if (!base->lcla_pool.base_unaligned && base->lcla_pool.base)
free_pages((unsigned long)base->lcla_pool.base,
base->lcla_pool.pages);
- if (base->lcla_pool.base_unaligned)
- kfree(base->lcla_pool.base_unaligned);
+
+ kfree(base->lcla_pool.base_unaligned);
+
if (base->phy_lcpa)
release_mem_region(base->phy_lcpa,
base->lcpa_size);
diff --git a/drivers/dma/ste_dma40_ll.c b/drivers/dma/ste_dma40_ll.c
index d937f76d6e2e..8557cb88b255 100644
--- a/drivers/dma/ste_dma40_ll.c
+++ b/drivers/dma/ste_dma40_ll.c
@@ -1,10 +1,8 @@
/*
- * driver/dma/ste_dma40_ll.c
- *
- * Copyright (C) ST-Ericsson 2007-2010
+ * Copyright (C) ST-Ericsson SA 2007-2010
+ * Author: Per Friden <per.friden@stericsson.com> for ST-Ericsson
+ * Author: Jonas Aaberg <jonas.aberg@stericsson.com> for ST-Ericsson
* License terms: GNU General Public License (GPL) version 2
- * Author: Per Friden <per.friden@stericsson.com>
- * Author: Jonas Aaberg <jonas.aberg@stericsson.com>
*/
#include <linux/kernel.h>
@@ -39,16 +37,13 @@ void d40_log_cfg(struct stedma40_chan_cfg *cfg,
cfg->dir == STEDMA40_PERIPH_TO_PERIPH)
l3 |= 1 << D40_MEM_LCSP3_DCFG_MST_POS;
- l3 |= 1 << D40_MEM_LCSP3_DCFG_TIM_POS;
l3 |= 1 << D40_MEM_LCSP3_DCFG_EIM_POS;
l3 |= cfg->dst_info.psize << D40_MEM_LCSP3_DCFG_PSIZE_POS;
l3 |= cfg->dst_info.data_width << D40_MEM_LCSP3_DCFG_ESIZE_POS;
- l3 |= 1 << D40_MEM_LCSP3_DTCP_POS;
l1 |= 1 << D40_MEM_LCSP1_SCFG_EIM_POS;
l1 |= cfg->src_info.psize << D40_MEM_LCSP1_SCFG_PSIZE_POS;
l1 |= cfg->src_info.data_width << D40_MEM_LCSP1_SCFG_ESIZE_POS;
- l1 |= 1 << D40_MEM_LCSP1_STCP_POS;
*lcsp1 = l1;
*lcsp3 = l3;
@@ -113,13 +108,15 @@ void d40_phy_cfg(struct stedma40_chan_cfg *cfg,
src |= 1 << D40_SREG_CFG_LOG_GIM_POS;
}
- if (cfg->channel_type & STEDMA40_HIGH_PRIORITY_CHANNEL) {
+ if (cfg->high_priority) {
src |= 1 << D40_SREG_CFG_PRI_POS;
dst |= 1 << D40_SREG_CFG_PRI_POS;
}
- src |= cfg->src_info.endianess << D40_SREG_CFG_LBE_POS;
- dst |= cfg->dst_info.endianess << D40_SREG_CFG_LBE_POS;
+ if (cfg->src_info.big_endian)
+ src |= 1 << D40_SREG_CFG_LBE_POS;
+ if (cfg->dst_info.big_endian)
+ dst |= 1 << D40_SREG_CFG_LBE_POS;
*src_cfg = src;
*dst_cfg = dst;
@@ -197,8 +194,7 @@ int d40_phy_sg_to_lli(struct scatterlist *sg,
dma_addr_t lli_phys,
u32 reg_cfg,
u32 data_width,
- int psize,
- bool term_int)
+ int psize)
{
int total_size = 0;
int i;
@@ -238,7 +234,7 @@ int d40_phy_sg_to_lli(struct scatterlist *sg,
}
return total_size;
- err:
+err:
return err;
}
@@ -271,11 +267,59 @@ void d40_phy_lli_write(void __iomem *virtbase,
/* DMA logical lli operations */
+static void d40_log_lli_link(struct d40_log_lli *lli_dst,
+ struct d40_log_lli *lli_src,
+ int next)
+{
+ u32 slos = 0;
+ u32 dlos = 0;
+
+ if (next != -EINVAL) {
+ slos = next * 2;
+ dlos = next * 2 + 1;
+ } else {
+ lli_dst->lcsp13 |= D40_MEM_LCSP1_SCFG_TIM_MASK;
+ lli_dst->lcsp13 |= D40_MEM_LCSP3_DTCP_MASK;
+ }
+
+ lli_src->lcsp13 = (lli_src->lcsp13 & ~D40_MEM_LCSP1_SLOS_MASK) |
+ (slos << D40_MEM_LCSP1_SLOS_POS);
+
+ lli_dst->lcsp13 = (lli_dst->lcsp13 & ~D40_MEM_LCSP1_SLOS_MASK) |
+ (dlos << D40_MEM_LCSP1_SLOS_POS);
+}
+
+void d40_log_lli_lcpa_write(struct d40_log_lli_full *lcpa,
+ struct d40_log_lli *lli_dst,
+ struct d40_log_lli *lli_src,
+ int next)
+{
+ d40_log_lli_link(lli_dst, lli_src, next);
+
+ writel(lli_src->lcsp02, &lcpa[0].lcsp0);
+ writel(lli_src->lcsp13, &lcpa[0].lcsp1);
+ writel(lli_dst->lcsp02, &lcpa[0].lcsp2);
+ writel(lli_dst->lcsp13, &lcpa[0].lcsp3);
+}
+
+void d40_log_lli_lcla_write(struct d40_log_lli *lcla,
+ struct d40_log_lli *lli_dst,
+ struct d40_log_lli *lli_src,
+ int next)
+{
+ d40_log_lli_link(lli_dst, lli_src, next);
+
+ writel(lli_src->lcsp02, &lcla[0].lcsp02);
+ writel(lli_src->lcsp13, &lcla[0].lcsp13);
+ writel(lli_dst->lcsp02, &lcla[1].lcsp02);
+ writel(lli_dst->lcsp13, &lcla[1].lcsp13);
+}
+
void d40_log_fill_lli(struct d40_log_lli *lli,
dma_addr_t data, u32 data_size,
- u32 lli_next_off, u32 reg_cfg,
+ u32 reg_cfg,
u32 data_width,
- bool term_int, bool addr_inc)
+ bool addr_inc)
{
lli->lcsp13 = reg_cfg;
@@ -290,165 +334,69 @@ void d40_log_fill_lli(struct d40_log_lli *lli,
if (addr_inc)
lli->lcsp13 |= D40_MEM_LCSP1_SCFG_INCR_MASK;
- lli->lcsp13 |= D40_MEM_LCSP3_DTCP_MASK;
- /* If this scatter list entry is the last one, no next link */
- lli->lcsp13 |= (lli_next_off << D40_MEM_LCSP1_SLOS_POS) &
- D40_MEM_LCSP1_SLOS_MASK;
-
- if (term_int)
- lli->lcsp13 |= D40_MEM_LCSP1_SCFG_TIM_MASK;
- else
- lli->lcsp13 &= ~D40_MEM_LCSP1_SCFG_TIM_MASK;
}
-int d40_log_sg_to_dev(struct d40_lcla_elem *lcla,
- struct scatterlist *sg,
+int d40_log_sg_to_dev(struct scatterlist *sg,
int sg_len,
struct d40_log_lli_bidir *lli,
struct d40_def_lcsp *lcsp,
u32 src_data_width,
u32 dst_data_width,
enum dma_data_direction direction,
- bool term_int, dma_addr_t dev_addr, int max_len,
- int llis_per_log)
+ dma_addr_t dev_addr)
{
int total_size = 0;
struct scatterlist *current_sg = sg;
int i;
- u32 next_lli_off_dst = 0;
- u32 next_lli_off_src = 0;
for_each_sg(sg, current_sg, sg_len, i) {
total_size += sg_dma_len(current_sg);
- /*
- * If this scatter list entry is the last one or
- * max length, terminate link.
- */
- if (sg_len - 1 == i || ((i+1) % max_len == 0)) {
- next_lli_off_src = 0;
- next_lli_off_dst = 0;
- } else {
- if (next_lli_off_dst == 0 &&
- next_lli_off_src == 0) {
- /* The first lli will be at next_lli_off */
- next_lli_off_dst = (lcla->dst_id *
- llis_per_log + 1);
- next_lli_off_src = (lcla->src_id *
- llis_per_log + 1);
- } else {
- next_lli_off_dst++;
- next_lli_off_src++;
- }
- }
-
if (direction == DMA_TO_DEVICE) {
d40_log_fill_lli(&lli->src[i],
sg_phys(current_sg),
sg_dma_len(current_sg),
- next_lli_off_src,
lcsp->lcsp1, src_data_width,
- false,
true);
d40_log_fill_lli(&lli->dst[i],
dev_addr,
sg_dma_len(current_sg),
- next_lli_off_dst,
lcsp->lcsp3, dst_data_width,
- /* No next == terminal interrupt */
- term_int && !next_lli_off_dst,
false);
} else {
d40_log_fill_lli(&lli->dst[i],
sg_phys(current_sg),
sg_dma_len(current_sg),
- next_lli_off_dst,
lcsp->lcsp3, dst_data_width,
- /* No next == terminal interrupt */
- term_int && !next_lli_off_dst,
true);
d40_log_fill_lli(&lli->src[i],
dev_addr,
sg_dma_len(current_sg),
- next_lli_off_src,
lcsp->lcsp1, src_data_width,
- false,
false);
}
}
return total_size;
}
-int d40_log_sg_to_lli(int lcla_id,
- struct scatterlist *sg,
+int d40_log_sg_to_lli(struct scatterlist *sg,
int sg_len,
struct d40_log_lli *lli_sg,
u32 lcsp13, /* src or dst*/
- u32 data_width,
- bool term_int, int max_len, int llis_per_log)
+ u32 data_width)
{
int total_size = 0;
struct scatterlist *current_sg = sg;
int i;
- u32 next_lli_off = 0;
for_each_sg(sg, current_sg, sg_len, i) {
total_size += sg_dma_len(current_sg);
- /*
- * If this scatter list entry is the last one or
- * max length, terminate link.
- */
- if (sg_len - 1 == i || ((i+1) % max_len == 0))
- next_lli_off = 0;
- else {
- if (next_lli_off == 0)
- /* The first lli will be at next_lli_off */
- next_lli_off = lcla_id * llis_per_log + 1;
- else
- next_lli_off++;
- }
-
d40_log_fill_lli(&lli_sg[i],
sg_phys(current_sg),
sg_dma_len(current_sg),
- next_lli_off,
lcsp13, data_width,
- term_int && !next_lli_off,
true);
}
return total_size;
}
-
-int d40_log_lli_write(struct d40_log_lli_full *lcpa,
- struct d40_log_lli *lcla_src,
- struct d40_log_lli *lcla_dst,
- struct d40_log_lli *lli_dst,
- struct d40_log_lli *lli_src,
- int llis_per_log)
-{
- u32 slos;
- u32 dlos;
- int i;
-
- writel(lli_src->lcsp02, &lcpa->lcsp0);
- writel(lli_src->lcsp13, &lcpa->lcsp1);
- writel(lli_dst->lcsp02, &lcpa->lcsp2);
- writel(lli_dst->lcsp13, &lcpa->lcsp3);
-
- slos = lli_src->lcsp13 & D40_MEM_LCSP1_SLOS_MASK;
- dlos = lli_dst->lcsp13 & D40_MEM_LCSP3_DLOS_MASK;
-
- for (i = 0; (i < llis_per_log) && slos && dlos; i++) {
- writel(lli_src[i + 1].lcsp02, &lcla_src[i].lcsp02);
- writel(lli_src[i + 1].lcsp13, &lcla_src[i].lcsp13);
- writel(lli_dst[i + 1].lcsp02, &lcla_dst[i].lcsp02);
- writel(lli_dst[i + 1].lcsp13, &lcla_dst[i].lcsp13);
-
- slos = lli_src[i + 1].lcsp13 & D40_MEM_LCSP1_SLOS_MASK;
- dlos = lli_dst[i + 1].lcsp13 & D40_MEM_LCSP3_DLOS_MASK;
- }
-
- return i;
-
-}
diff --git a/drivers/dma/ste_dma40_ll.h b/drivers/dma/ste_dma40_ll.h
index 9c0fa2f5fe57..9e419b907544 100644
--- a/drivers/dma/ste_dma40_ll.h
+++ b/drivers/dma/ste_dma40_ll.h
@@ -1,10 +1,8 @@
/*
- * driver/dma/ste_dma40_ll.h
- *
- * Copyright (C) ST-Ericsson 2007-2010
+ * Copyright (C) ST-Ericsson SA 2007-2010
+ * Author: Per Friden <per.friden@stericsson.com> for ST-Ericsson SA
+ * Author: Jonas Aaberg <jonas.aberg@stericsson.com> for ST-Ericsson SA
* License terms: GNU General Public License (GPL) version 2
- * Author: Per Friden <per.friden@stericsson.com>
- * Author: Jonas Aaberg <jonas.aberg@stericsson.com>
*/
#ifndef STE_DMA40_LL_H
#define STE_DMA40_LL_H
@@ -132,6 +130,13 @@
#define D40_DREG_PRMSO 0x014
#define D40_DREG_PRMOE 0x018
#define D40_DREG_PRMOO 0x01C
+#define D40_DREG_PRMO_PCHAN_BASIC 0x1
+#define D40_DREG_PRMO_PCHAN_MODULO 0x2
+#define D40_DREG_PRMO_PCHAN_DOUBLE_DST 0x3
+#define D40_DREG_PRMO_LCHAN_SRC_PHY_DST_LOG 0x1
+#define D40_DREG_PRMO_LCHAN_SRC_LOG_DST_PHY 0x2
+#define D40_DREG_PRMO_LCHAN_SRC_LOG_DST_LOG 0x3
+
#define D40_DREG_LCPA 0x020
#define D40_DREG_LCLA 0x024
#define D40_DREG_ACTIVE 0x050
@@ -163,6 +168,9 @@
#define D40_DREG_PERIPHID0 0xFE0
#define D40_DREG_PERIPHID1 0xFE4
#define D40_DREG_PERIPHID2 0xFE8
+#define D40_DREG_PERIPHID2_REV_POS 4
+#define D40_DREG_PERIPHID2_REV_MASK (0xf << D40_DREG_PERIPHID2_REV_POS)
+#define D40_DREG_PERIPHID2_DESIGNER_MASK 0xf
#define D40_DREG_PERIPHID3 0xFEC
#define D40_DREG_CELLID0 0xFF0
#define D40_DREG_CELLID1 0xFF4
@@ -199,8 +207,6 @@ struct d40_phy_lli {
*
* @src: Register settings for src channel.
* @dst: Register settings for dst channel.
- * @dst_addr: Physical destination address.
- * @src_addr: Physical source address.
*
* All DMA transfers have a source and a destination.
*/
@@ -208,8 +214,6 @@ struct d40_phy_lli {
struct d40_phy_lli_bidir {
struct d40_phy_lli *src;
struct d40_phy_lli *dst;
- dma_addr_t dst_addr;
- dma_addr_t src_addr;
};
@@ -271,29 +275,16 @@ struct d40_def_lcsp {
u32 lcsp1;
};
-/**
- * struct d40_lcla_elem - Info for one LCA element.
- *
- * @src_id: logical channel src id
- * @dst_id: logical channel dst id
- * @src: LCPA formated src parameters
- * @dst: LCPA formated dst parameters
- *
- */
-struct d40_lcla_elem {
- int src_id;
- int dst_id;
- struct d40_log_lli *src;
- struct d40_log_lli *dst;
-};
-
/* Physical channels */
void d40_phy_cfg(struct stedma40_chan_cfg *cfg,
- u32 *src_cfg, u32 *dst_cfg, bool is_log);
+ u32 *src_cfg,
+ u32 *dst_cfg,
+ bool is_log);
void d40_log_cfg(struct stedma40_chan_cfg *cfg,
- u32 *lcsp1, u32 *lcsp2);
+ u32 *lcsp1,
+ u32 *lcsp2);
int d40_phy_sg_to_lli(struct scatterlist *sg,
int sg_len,
@@ -302,8 +293,7 @@ int d40_phy_sg_to_lli(struct scatterlist *sg,
dma_addr_t lli_phys,
u32 reg_cfg,
u32 data_width,
- int psize,
- bool term_int);
+ int psize);
int d40_phy_fill_lli(struct d40_phy_lli *lli,
dma_addr_t data,
@@ -323,35 +313,35 @@ void d40_phy_lli_write(void __iomem *virtbase,
/* Logical channels */
void d40_log_fill_lli(struct d40_log_lli *lli,
- dma_addr_t data, u32 data_size,
- u32 lli_next_off, u32 reg_cfg,
+ dma_addr_t data,
+ u32 data_size,
+ u32 reg_cfg,
u32 data_width,
- bool term_int, bool addr_inc);
+ bool addr_inc);
-int d40_log_sg_to_dev(struct d40_lcla_elem *lcla,
- struct scatterlist *sg,
+int d40_log_sg_to_dev(struct scatterlist *sg,
int sg_len,
struct d40_log_lli_bidir *lli,
struct d40_def_lcsp *lcsp,
u32 src_data_width,
u32 dst_data_width,
enum dma_data_direction direction,
- bool term_int, dma_addr_t dev_addr, int max_len,
- int llis_per_log);
-
-int d40_log_lli_write(struct d40_log_lli_full *lcpa,
- struct d40_log_lli *lcla_src,
- struct d40_log_lli *lcla_dst,
- struct d40_log_lli *lli_dst,
- struct d40_log_lli *lli_src,
- int llis_per_log);
-
-int d40_log_sg_to_lli(int lcla_id,
- struct scatterlist *sg,
+ dma_addr_t dev_addr);
+
+int d40_log_sg_to_lli(struct scatterlist *sg,
int sg_len,
struct d40_log_lli *lli_sg,
u32 lcsp13, /* src or dst*/
- u32 data_width,
- bool term_int, int max_len, int llis_per_log);
+ u32 data_width);
+
+void d40_log_lli_lcpa_write(struct d40_log_lli_full *lcpa,
+ struct d40_log_lli *lli_dst,
+ struct d40_log_lli *lli_src,
+ int next);
+
+void d40_log_lli_lcla_write(struct d40_log_lli *lcla,
+ struct d40_log_lli *lli_dst,
+ struct d40_log_lli *lli_src,
+ int next);
#endif /* STE_DMA40_LLI_H */
diff --git a/drivers/dma/timb_dma.c b/drivers/dma/timb_dma.c
index 2ec1ed56f204..3b88a4e7c98a 100644
--- a/drivers/dma/timb_dma.c
+++ b/drivers/dma/timb_dma.c
@@ -759,7 +759,7 @@ static int __devinit td_probe(struct platform_device *pdev)
pdata->channels + i;
/* even channels are RX, odd are TX */
- if (((i % 2) && pchan->rx) || (!(i % 2) && !pchan->rx)) {
+ if ((i % 2) == pchan->rx) {
dev_err(&pdev->dev, "Wrong channel configuration\n");
err = -EINVAL;
goto err_tasklet_kill;
diff --git a/drivers/edac/Makefile b/drivers/edac/Makefile
index b3781399b38a..ba2898b3639b 100644
--- a/drivers/edac/Makefile
+++ b/drivers/edac/Makefile
@@ -10,16 +10,16 @@ obj-$(CONFIG_EDAC) := edac_stub.o
obj-$(CONFIG_EDAC_MM_EDAC) += edac_core.o
obj-$(CONFIG_EDAC_MCE) += edac_mce.o
-edac_core-objs := edac_mc.o edac_device.o edac_mc_sysfs.o edac_pci_sysfs.o
-edac_core-objs += edac_module.o edac_device_sysfs.o
+edac_core-y := edac_mc.o edac_device.o edac_mc_sysfs.o edac_pci_sysfs.o
+edac_core-y += edac_module.o edac_device_sysfs.o
ifdef CONFIG_PCI
-edac_core-objs += edac_pci.o edac_pci_sysfs.o
+edac_core-y += edac_pci.o edac_pci_sysfs.o
endif
obj-$(CONFIG_EDAC_MCE_INJ) += mce_amd_inj.o
-edac_mce_amd-objs := mce_amd.o
+edac_mce_amd-y := mce_amd.o
obj-$(CONFIG_EDAC_DECODE_MCE) += edac_mce_amd.o
obj-$(CONFIG_EDAC_AMD76X) += amd76x_edac.o
diff --git a/drivers/edac/edac_core.h b/drivers/edac/edac_core.h
index ce7146677e9b..d7ca43a828bd 100644
--- a/drivers/edac/edac_core.h
+++ b/drivers/edac/edac_core.h
@@ -42,8 +42,10 @@
#if PAGE_SHIFT < 20
#define PAGES_TO_MiB( pages ) ( ( pages ) >> ( 20 - PAGE_SHIFT ) )
+#define MiB_TO_PAGES(mb) ((mb) >> (20 - PAGE_SHIFT))
#else /* PAGE_SHIFT > 20 */
#define PAGES_TO_MiB( pages ) ( ( pages ) << ( PAGE_SHIFT - 20 ) )
+#define MiB_TO_PAGES(mb) ((mb) >> (PAGE_SHIFT - 20))
#endif
#define edac_printk(level, prefix, fmt, arg...) \
@@ -328,7 +330,7 @@ struct csrow_info {
struct mcidev_sysfs_group {
const char *name; /* group name */
- struct mcidev_sysfs_attribute *mcidev_attr; /* group attributes */
+ const struct mcidev_sysfs_attribute *mcidev_attr; /* group attributes */
};
struct mcidev_sysfs_group_kobj {
@@ -336,7 +338,7 @@ struct mcidev_sysfs_group_kobj {
struct kobject kobj; /* kobj for the group */
- struct mcidev_sysfs_group *grp; /* group description table */
+ const struct mcidev_sysfs_group *grp; /* group description table */
struct mem_ctl_info *mci; /* the parent */
};
@@ -347,7 +349,7 @@ struct mcidev_sysfs_group_kobj {
struct mcidev_sysfs_attribute {
/* It should use either attr or grp */
struct attribute attr;
- struct mcidev_sysfs_group *grp; /* Points to a group of attributes */
+ const struct mcidev_sysfs_group *grp; /* Points to a group of attributes */
/* Ops for show/store values at the attribute - not used on group */
ssize_t (*show)(struct mem_ctl_info *,char *);
@@ -440,7 +442,7 @@ struct mem_ctl_info {
* If attributes are desired, then set to array of attributes
* If no attributes are desired, leave NULL
*/
- struct mcidev_sysfs_attribute *mc_driver_sysfs_attributes;
+ const struct mcidev_sysfs_attribute *mc_driver_sysfs_attributes;
/* work struct for this MC */
struct delayed_work work;
@@ -810,6 +812,7 @@ extern struct mem_ctl_info *edac_mc_alloc(unsigned sz_pvt, unsigned nr_csrows,
extern int edac_mc_add_mc(struct mem_ctl_info *mci);
extern void edac_mc_free(struct mem_ctl_info *mci);
extern struct mem_ctl_info *edac_mc_find(int idx);
+extern struct mem_ctl_info *find_mci_by_dev(struct device *dev);
extern struct mem_ctl_info *edac_mc_del_mc(struct device *dev);
extern int edac_mc_find_csrow_by_page(struct mem_ctl_info *mci,
unsigned long page);
diff --git a/drivers/edac/edac_mc.c b/drivers/edac/edac_mc.c
index 6b21e25f7a84..ba6586a69ccc 100644
--- a/drivers/edac/edac_mc.c
+++ b/drivers/edac/edac_mc.c
@@ -207,6 +207,7 @@ struct mem_ctl_info *edac_mc_alloc(unsigned sz_pvt, unsigned nr_csrows,
}
mci->op_state = OP_ALLOC;
+ INIT_LIST_HEAD(&mci->grp_kobj_list);
/*
* Initialize the 'root' kobj for the edac_mc controller
@@ -234,18 +235,24 @@ EXPORT_SYMBOL_GPL(edac_mc_alloc);
*/
void edac_mc_free(struct mem_ctl_info *mci)
{
+ debugf1("%s()\n", __func__);
+
edac_mc_unregister_sysfs_main_kobj(mci);
+
+ /* free the mci instance memory here */
+ kfree(mci);
}
EXPORT_SYMBOL_GPL(edac_mc_free);
-/*
+/**
* find_mci_by_dev
*
* scan list of controllers looking for the one that manages
* the 'dev' device
+ * @dev: pointer to a struct device related with the MCI
*/
-static struct mem_ctl_info *find_mci_by_dev(struct device *dev)
+struct mem_ctl_info *find_mci_by_dev(struct device *dev)
{
struct mem_ctl_info *mci;
struct list_head *item;
@@ -261,6 +268,7 @@ static struct mem_ctl_info *find_mci_by_dev(struct device *dev)
return NULL;
}
+EXPORT_SYMBOL_GPL(find_mci_by_dev);
/*
* handler for EDAC to check if NMI type handler has asserted interrupt
diff --git a/drivers/edac/edac_mc_sysfs.c b/drivers/edac/edac_mc_sysfs.c
index a4135860149b..dce61f7ba38b 100644
--- a/drivers/edac/edac_mc_sysfs.c
+++ b/drivers/edac/edac_mc_sysfs.c
@@ -631,9 +631,6 @@ static void edac_mci_control_release(struct kobject *kobj)
/* decrement the module ref count */
module_put(mci->owner);
-
- /* free the mci instance memory here */
- kfree(mci);
}
static struct kobj_type ktype_mci = {
@@ -713,6 +710,8 @@ fail_out:
*/
void edac_mc_unregister_sysfs_main_kobj(struct mem_ctl_info *mci)
{
+ debugf1("%s()\n", __func__);
+
/* delete the kobj from the mc_kset */
kobject_put(&mci->edac_mci_kobj);
}
@@ -760,8 +759,6 @@ static void edac_inst_grp_release(struct kobject *kobj)
grp = container_of(kobj, struct mcidev_sysfs_group_kobj, kobj);
mci = grp->mci;
-
- kobject_put(&mci->edac_mci_kobj);
}
/* Intermediate show/store table */
@@ -784,7 +781,7 @@ static struct kobj_type ktype_inst_grp = {
* object tree.
*/
static int edac_create_mci_instance_attributes(struct mem_ctl_info *mci,
- struct mcidev_sysfs_attribute *sysfs_attrib,
+ const struct mcidev_sysfs_attribute *sysfs_attrib,
struct kobject *kobj)
{
int err;
@@ -792,6 +789,7 @@ static int edac_create_mci_instance_attributes(struct mem_ctl_info *mci,
debugf1("%s()\n", __func__);
while (sysfs_attrib) {
+ debugf1("%s() sysfs_attrib = %p\n",__func__, sysfs_attrib);
if (sysfs_attrib->grp) {
struct mcidev_sysfs_group_kobj *grp_kobj;
@@ -799,10 +797,9 @@ static int edac_create_mci_instance_attributes(struct mem_ctl_info *mci,
if (!grp_kobj)
return -ENOMEM;
- list_add_tail(&grp_kobj->list, &mci->grp_kobj_list);
-
grp_kobj->grp = sysfs_attrib->grp;
grp_kobj->mci = mci;
+ list_add_tail(&grp_kobj->list, &mci->grp_kobj_list);
debugf0("%s() grp %s, mci %p\n", __func__,
sysfs_attrib->grp->name, mci);
@@ -811,26 +808,28 @@ static int edac_create_mci_instance_attributes(struct mem_ctl_info *mci,
&ktype_inst_grp,
&mci->edac_mci_kobj,
sysfs_attrib->grp->name);
- if (err)
+ if (err < 0) {
+ printk(KERN_ERR "kobject_init_and_add failed: %d\n", err);
return err;
-
+ }
err = edac_create_mci_instance_attributes(mci,
grp_kobj->grp->mcidev_attr,
&grp_kobj->kobj);
- if (err)
+ if (err < 0)
return err;
} else if (sysfs_attrib->attr.name) {
debugf0("%s() file %s\n", __func__,
sysfs_attrib->attr.name);
err = sysfs_create_file(kobj, &sysfs_attrib->attr);
+ if (err < 0) {
+ printk(KERN_ERR "sysfs_create_file failed: %d\n", err);
+ return err;
+ }
} else
break;
- if (err) {
- return err;
- }
sysfs_attrib++;
}
@@ -843,7 +842,7 @@ static int edac_create_mci_instance_attributes(struct mem_ctl_info *mci,
* directory of this mci instance.
*/
static void edac_remove_mci_instance_attributes(struct mem_ctl_info *mci,
- struct mcidev_sysfs_attribute *sysfs_attrib,
+ const struct mcidev_sysfs_attribute *sysfs_attrib,
struct kobject *kobj, int count)
{
struct mcidev_sysfs_group_kobj *grp_kobj, *tmp;
@@ -855,13 +854,24 @@ static void edac_remove_mci_instance_attributes(struct mem_ctl_info *mci,
* Remove first all the atributes
*/
while (sysfs_attrib) {
+ debugf1("%s() sysfs_attrib = %p\n",__func__, sysfs_attrib);
if (sysfs_attrib->grp) {
- list_for_each_entry(grp_kobj, &mci->grp_kobj_list,
- list)
- if (grp_kobj->grp == sysfs_attrib->grp)
+ debugf1("%s() seeking for group %s\n",
+ __func__, sysfs_attrib->grp->name);
+ list_for_each_entry(grp_kobj,
+ &mci->grp_kobj_list, list) {
+ debugf1("%s() grp_kobj->grp = %p\n",__func__, grp_kobj->grp);
+ if (grp_kobj->grp == sysfs_attrib->grp) {
edac_remove_mci_instance_attributes(mci,
grp_kobj->grp->mcidev_attr,
&grp_kobj->kobj, count + 1);
+ debugf0("%s() group %s\n", __func__,
+ sysfs_attrib->grp->name);
+ kobject_put(&grp_kobj->kobj);
+ }
+ }
+ debugf1("%s() end of seeking for group %s\n",
+ __func__, sysfs_attrib->grp->name);
} else if (sysfs_attrib->attr.name) {
debugf0("%s() file %s\n", __func__,
sysfs_attrib->attr.name);
@@ -871,15 +881,14 @@ static void edac_remove_mci_instance_attributes(struct mem_ctl_info *mci,
sysfs_attrib++;
}
- /*
- * Now that all attributes got removed, it is save to remove all groups
- */
- if (!count)
- list_for_each_entry_safe(grp_kobj, tmp, &mci->grp_kobj_list,
- list) {
- debugf0("%s() grp %s\n", __func__, grp_kobj->grp->name);
- kobject_put(&grp_kobj->kobj);
- }
+ /* Remove the group objects */
+ if (count)
+ return;
+ list_for_each_entry_safe(grp_kobj, tmp,
+ &mci->grp_kobj_list, list) {
+ list_del(&grp_kobj->list);
+ kfree(grp_kobj);
+ }
}
@@ -971,6 +980,7 @@ void edac_remove_sysfs_mci_device(struct mem_ctl_info *mci)
debugf0("%s()\n", __func__);
/* remove all csrow kobjects */
+ debugf0("%s() unregister this mci kobj\n", __func__);
for (i = 0; i < mci->nr_csrows; i++) {
if (mci->csrows[i].nr_pages > 0) {
debugf0("%s() unreg csrow-%d\n", __func__, i);
@@ -978,20 +988,20 @@ void edac_remove_sysfs_mci_device(struct mem_ctl_info *mci)
}
}
- debugf0("%s() remove_link\n", __func__);
+ /* remove this mci instance's attribtes */
+ if (mci->mc_driver_sysfs_attributes) {
+ debugf0("%s() unregister mci private attributes\n", __func__);
+ edac_remove_mci_instance_attributes(mci,
+ mci->mc_driver_sysfs_attributes,
+ &mci->edac_mci_kobj, 0);
+ }
/* remove the symlink */
+ debugf0("%s() remove_link\n", __func__);
sysfs_remove_link(&mci->edac_mci_kobj, EDAC_DEVICE_SYMLINK);
- debugf0("%s() remove_mci_instance\n", __func__);
-
- /* remove this mci instance's attribtes */
- edac_remove_mci_instance_attributes(mci,
- mci->mc_driver_sysfs_attributes,
- &mci->edac_mci_kobj, 0);
- debugf0("%s() unregister this mci kobj\n", __func__);
-
/* unregister this instance's kobject */
+ debugf0("%s() remove_mci_instance\n", __func__);
kobject_put(&mci->edac_mci_kobj);
}
diff --git a/drivers/edac/i7core_edac.c b/drivers/edac/i7core_edac.c
index 0fd5b85a0f75..362861c15779 100644
--- a/drivers/edac/i7core_edac.c
+++ b/drivers/edac/i7core_edac.c
@@ -39,6 +39,14 @@
#include "edac_core.h"
+/* Static vars */
+static LIST_HEAD(i7core_edac_list);
+static DEFINE_MUTEX(i7core_edac_lock);
+static int probed;
+
+static int use_pci_fixup;
+module_param(use_pci_fixup, int, 0444);
+MODULE_PARM_DESC(use_pci_fixup, "Enable PCI fixup to seek for hidden devices");
/*
* This is used for Nehalem-EP and Nehalem-EX devices, where the non-core
* registers start at bus 255, and are not reported by BIOS.
@@ -212,8 +220,8 @@ struct pci_id_descr {
};
struct pci_id_table {
- struct pci_id_descr *descr;
- int n_devs;
+ const struct pci_id_descr *descr;
+ int n_devs;
};
struct i7core_dev {
@@ -235,8 +243,6 @@ struct i7core_pvt {
struct i7core_inject inject;
struct i7core_channel channel[NUM_CHANS];
- int channels; /* Number of active channels */
-
int ce_count_available;
int csrow_map[NUM_CHANS][MAX_DIMMS];
@@ -261,22 +267,22 @@ struct i7core_pvt {
/* Count indicator to show errors not got */
unsigned mce_overrun;
-};
-/* Static vars */
-static LIST_HEAD(i7core_edac_list);
-static DEFINE_MUTEX(i7core_edac_lock);
+ /* Struct to control EDAC polling */
+ struct edac_pci_ctl_info *i7core_pci;
+};
#define PCI_DESCR(device, function, device_id) \
.dev = (device), \
.func = (function), \
.dev_id = (device_id)
-struct pci_id_descr pci_dev_descr_i7core_nehalem[] = {
+static const struct pci_id_descr pci_dev_descr_i7core_nehalem[] = {
/* Memory controller */
{ PCI_DESCR(3, 0, PCI_DEVICE_ID_INTEL_I7_MCR) },
{ PCI_DESCR(3, 1, PCI_DEVICE_ID_INTEL_I7_MC_TAD) },
- /* Exists only for RDIMM */
+
+ /* Exists only for RDIMM */
{ PCI_DESCR(3, 2, PCI_DEVICE_ID_INTEL_I7_MC_RAS), .optional = 1 },
{ PCI_DESCR(3, 4, PCI_DEVICE_ID_INTEL_I7_MC_TEST) },
@@ -297,19 +303,9 @@ struct pci_id_descr pci_dev_descr_i7core_nehalem[] = {
{ PCI_DESCR(6, 1, PCI_DEVICE_ID_INTEL_I7_MC_CH2_ADDR) },
{ PCI_DESCR(6, 2, PCI_DEVICE_ID_INTEL_I7_MC_CH2_RANK) },
{ PCI_DESCR(6, 3, PCI_DEVICE_ID_INTEL_I7_MC_CH2_TC) },
-
- /* Generic Non-core registers */
- /*
- * This is the PCI device on i7core and on Xeon 35xx (8086:2c41)
- * On Xeon 55xx, however, it has a different id (8086:2c40). So,
- * the probing code needs to test for the other address in case of
- * failure of this one
- */
- { PCI_DESCR(0, 0, PCI_DEVICE_ID_INTEL_I7_NONCORE) },
-
};
-struct pci_id_descr pci_dev_descr_lynnfield[] = {
+static const struct pci_id_descr pci_dev_descr_lynnfield[] = {
{ PCI_DESCR( 3, 0, PCI_DEVICE_ID_INTEL_LYNNFIELD_MCR) },
{ PCI_DESCR( 3, 1, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_TAD) },
{ PCI_DESCR( 3, 4, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_TEST) },
@@ -323,15 +319,9 @@ struct pci_id_descr pci_dev_descr_lynnfield[] = {
{ PCI_DESCR( 5, 1, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH1_ADDR) },
{ PCI_DESCR( 5, 2, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH1_RANK) },
{ PCI_DESCR( 5, 3, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH1_TC) },
-
- /*
- * This is the PCI device has an alternate address on some
- * processors like Core i7 860
- */
- { PCI_DESCR( 0, 0, PCI_DEVICE_ID_INTEL_LYNNFIELD_NONCORE) },
};
-struct pci_id_descr pci_dev_descr_i7core_westmere[] = {
+static const struct pci_id_descr pci_dev_descr_i7core_westmere[] = {
/* Memory controller */
{ PCI_DESCR(3, 0, PCI_DEVICE_ID_INTEL_LYNNFIELD_MCR_REV2) },
{ PCI_DESCR(3, 1, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_TAD_REV2) },
@@ -356,17 +346,14 @@ struct pci_id_descr pci_dev_descr_i7core_westmere[] = {
{ PCI_DESCR(6, 1, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH2_ADDR_REV2) },
{ PCI_DESCR(6, 2, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH2_RANK_REV2) },
{ PCI_DESCR(6, 3, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH2_TC_REV2) },
-
- /* Generic Non-core registers */
- { PCI_DESCR(0, 0, PCI_DEVICE_ID_INTEL_LYNNFIELD_NONCORE_REV2) },
-
};
-#define PCI_ID_TABLE_ENTRY(A) { A, ARRAY_SIZE(A) }
-struct pci_id_table pci_dev_table[] = {
+#define PCI_ID_TABLE_ENTRY(A) { .descr=A, .n_devs = ARRAY_SIZE(A) }
+static const struct pci_id_table pci_dev_table[] = {
PCI_ID_TABLE_ENTRY(pci_dev_descr_i7core_nehalem),
PCI_ID_TABLE_ENTRY(pci_dev_descr_lynnfield),
PCI_ID_TABLE_ENTRY(pci_dev_descr_i7core_westmere),
+ {0,} /* 0 terminated list. */
};
/*
@@ -378,8 +365,6 @@ static const struct pci_device_id i7core_pci_tbl[] __devinitdata = {
{0,} /* 0 terminated list. */
};
-static struct edac_pci_ctl_info *i7core_pci;
-
/****************************************************************************
Anciliary status routines
****************************************************************************/
@@ -442,6 +427,36 @@ static struct i7core_dev *get_i7core_dev(u8 socket)
return NULL;
}
+static struct i7core_dev *alloc_i7core_dev(u8 socket,
+ const struct pci_id_table *table)
+{
+ struct i7core_dev *i7core_dev;
+
+ i7core_dev = kzalloc(sizeof(*i7core_dev), GFP_KERNEL);
+ if (!i7core_dev)
+ return NULL;
+
+ i7core_dev->pdev = kzalloc(sizeof(*i7core_dev->pdev) * table->n_devs,
+ GFP_KERNEL);
+ if (!i7core_dev->pdev) {
+ kfree(i7core_dev);
+ return NULL;
+ }
+
+ i7core_dev->socket = socket;
+ i7core_dev->n_devs = table->n_devs;
+ list_add_tail(&i7core_dev->list, &i7core_edac_list);
+
+ return i7core_dev;
+}
+
+static void free_i7core_dev(struct i7core_dev *i7core_dev)
+{
+ list_del(&i7core_dev->list);
+ kfree(i7core_dev->pdev);
+ kfree(i7core_dev);
+}
+
/****************************************************************************
Memory check routines
****************************************************************************/
@@ -484,7 +499,7 @@ static struct pci_dev *get_pdev_slot_func(u8 socket, unsigned slot,
* to add a fake description for csrows.
* So, this driver is attributing one DIMM memory for one csrow.
*/
-static int i7core_get_active_channels(u8 socket, unsigned *channels,
+static int i7core_get_active_channels(const u8 socket, unsigned *channels,
unsigned *csrows)
{
struct pci_dev *pdev = NULL;
@@ -545,12 +560,13 @@ static int i7core_get_active_channels(u8 socket, unsigned *channels,
return 0;
}
-static int get_dimm_config(struct mem_ctl_info *mci, int *csrow)
+static int get_dimm_config(const struct mem_ctl_info *mci)
{
struct i7core_pvt *pvt = mci->pvt_info;
struct csrow_info *csr;
struct pci_dev *pdev;
int i, j;
+ int csrow = 0;
unsigned long last_page = 0;
enum edac_type mode;
enum mem_type mtype;
@@ -664,13 +680,9 @@ static int get_dimm_config(struct mem_ctl_info *mci, int *csrow)
RANKOFFSET(dimm_dod[j]),
banks, ranks, rows, cols);
-#if PAGE_SHIFT > 20
- npages = size >> (PAGE_SHIFT - 20);
-#else
- npages = size << (20 - PAGE_SHIFT);
-#endif
+ npages = MiB_TO_PAGES(size);
- csr = &mci->csrows[*csrow];
+ csr = &mci->csrows[csrow];
csr->first_page = last_page + 1;
last_page += npages;
csr->last_page = last_page;
@@ -678,13 +690,13 @@ static int get_dimm_config(struct mem_ctl_info *mci, int *csrow)
csr->page_mask = 0;
csr->grain = 8;
- csr->csrow_idx = *csrow;
+ csr->csrow_idx = csrow;
csr->nr_channels = 1;
csr->channels[0].chan_idx = i;
csr->channels[0].ce_count = 0;
- pvt->csrow_map[i][j] = *csrow;
+ pvt->csrow_map[i][j] = csrow;
switch (banks) {
case 4:
@@ -703,7 +715,7 @@ static int get_dimm_config(struct mem_ctl_info *mci, int *csrow)
csr->edac_mode = mode;
csr->mtype = mtype;
- (*csrow)++;
+ csrow++;
}
pci_read_config_dword(pdev, MC_SAG_CH_0, &value[0]);
@@ -736,7 +748,7 @@ static int get_dimm_config(struct mem_ctl_info *mci, int *csrow)
we're disabling error injection on all write calls to the sysfs nodes that
controls the error code injection.
*/
-static int disable_inject(struct mem_ctl_info *mci)
+static int disable_inject(const struct mem_ctl_info *mci)
{
struct i7core_pvt *pvt = mci->pvt_info;
@@ -921,7 +933,7 @@ DECLARE_ADDR_MATCH(bank, 32);
DECLARE_ADDR_MATCH(page, 0x10000);
DECLARE_ADDR_MATCH(col, 0x4000);
-static int write_and_test(struct pci_dev *dev, int where, u32 val)
+static int write_and_test(struct pci_dev *dev, const int where, const u32 val)
{
u32 read;
int count;
@@ -1120,35 +1132,34 @@ DECLARE_COUNTER(2);
* Sysfs struct
*/
-
-static struct mcidev_sysfs_attribute i7core_addrmatch_attrs[] = {
+static const struct mcidev_sysfs_attribute i7core_addrmatch_attrs[] = {
ATTR_ADDR_MATCH(channel),
ATTR_ADDR_MATCH(dimm),
ATTR_ADDR_MATCH(rank),
ATTR_ADDR_MATCH(bank),
ATTR_ADDR_MATCH(page),
ATTR_ADDR_MATCH(col),
- { .attr = { .name = NULL } }
+ { } /* End of list */
};
-static struct mcidev_sysfs_group i7core_inject_addrmatch = {
+static const struct mcidev_sysfs_group i7core_inject_addrmatch = {
.name = "inject_addrmatch",
.mcidev_attr = i7core_addrmatch_attrs,
};
-static struct mcidev_sysfs_attribute i7core_udimm_counters_attrs[] = {
+static const struct mcidev_sysfs_attribute i7core_udimm_counters_attrs[] = {
ATTR_COUNTER(0),
ATTR_COUNTER(1),
ATTR_COUNTER(2),
{ .attr = { .name = NULL } }
};
-static struct mcidev_sysfs_group i7core_udimm_counters = {
+static const struct mcidev_sysfs_group i7core_udimm_counters = {
.name = "all_channel_counts",
.mcidev_attr = i7core_udimm_counters_attrs,
};
-static struct mcidev_sysfs_attribute i7core_sysfs_attrs[] = {
+static const struct mcidev_sysfs_attribute i7core_sysfs_rdimm_attrs[] = {
{
.attr = {
.name = "inject_section",
@@ -1180,8 +1191,44 @@ static struct mcidev_sysfs_attribute i7core_sysfs_attrs[] = {
.show = i7core_inject_enable_show,
.store = i7core_inject_enable_store,
},
- { .attr = { .name = NULL } }, /* Reserved for udimm counters */
- { .attr = { .name = NULL } }
+ { } /* End of list */
+};
+
+static const struct mcidev_sysfs_attribute i7core_sysfs_udimm_attrs[] = {
+ {
+ .attr = {
+ .name = "inject_section",
+ .mode = (S_IRUGO | S_IWUSR)
+ },
+ .show = i7core_inject_section_show,
+ .store = i7core_inject_section_store,
+ }, {
+ .attr = {
+ .name = "inject_type",
+ .mode = (S_IRUGO | S_IWUSR)
+ },
+ .show = i7core_inject_type_show,
+ .store = i7core_inject_type_store,
+ }, {
+ .attr = {
+ .name = "inject_eccmask",
+ .mode = (S_IRUGO | S_IWUSR)
+ },
+ .show = i7core_inject_eccmask_show,
+ .store = i7core_inject_eccmask_store,
+ }, {
+ .grp = &i7core_inject_addrmatch,
+ }, {
+ .attr = {
+ .name = "inject_enable",
+ .mode = (S_IRUGO | S_IWUSR)
+ },
+ .show = i7core_inject_enable_show,
+ .store = i7core_inject_enable_store,
+ }, {
+ .grp = &i7core_udimm_counters,
+ },
+ { } /* End of list */
};
/****************************************************************************
@@ -1189,7 +1236,7 @@ static struct mcidev_sysfs_attribute i7core_sysfs_attrs[] = {
****************************************************************************/
/*
- * i7core_put_devices 'put' all the devices that we have
+ * i7core_put_all_devices 'put' all the devices that we have
* reserved via 'get'
*/
static void i7core_put_devices(struct i7core_dev *i7core_dev)
@@ -1206,23 +1253,23 @@ static void i7core_put_devices(struct i7core_dev *i7core_dev)
PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn));
pci_dev_put(pdev);
}
- kfree(i7core_dev->pdev);
- list_del(&i7core_dev->list);
- kfree(i7core_dev);
}
static void i7core_put_all_devices(void)
{
struct i7core_dev *i7core_dev, *tmp;
- list_for_each_entry_safe(i7core_dev, tmp, &i7core_edac_list, list)
+ list_for_each_entry_safe(i7core_dev, tmp, &i7core_edac_list, list) {
i7core_put_devices(i7core_dev);
+ free_i7core_dev(i7core_dev);
+ }
}
-static void __init i7core_xeon_pci_fixup(struct pci_id_table *table)
+static void __init i7core_xeon_pci_fixup(const struct pci_id_table *table)
{
struct pci_dev *pdev = NULL;
int i;
+
/*
* On Xeon 55xx, the Intel Quckpath Arch Generic Non-core pci buses
* aren't announced by acpi. So, we need to use a legacy scan probing
@@ -1257,16 +1304,18 @@ static unsigned i7core_pci_lastbus(void)
}
/*
- * i7core_get_devices Find and perform 'get' operation on the MCH's
+ * i7core_get_all_devices Find and perform 'get' operation on the MCH's
* device/functions we want to reference for this driver
*
* Need to 'get' device 16 func 1 and func 2
*/
-int i7core_get_onedevice(struct pci_dev **prev, int devno,
- struct pci_id_descr *dev_descr, unsigned n_devs,
- unsigned last_bus)
+static int i7core_get_onedevice(struct pci_dev **prev,
+ const struct pci_id_table *table,
+ const unsigned devno,
+ const unsigned last_bus)
{
struct i7core_dev *i7core_dev;
+ const struct pci_id_descr *dev_descr = &table->descr[devno];
struct pci_dev *pdev = NULL;
u8 bus = 0;
@@ -1275,20 +1324,6 @@ int i7core_get_onedevice(struct pci_dev **prev, int devno,
pdev = pci_get_device(PCI_VENDOR_ID_INTEL,
dev_descr->dev_id, *prev);
- /*
- * On Xeon 55xx, the Intel Quckpath Arch Generic Non-core regs
- * is at addr 8086:2c40, instead of 8086:2c41. So, we need
- * to probe for the alternate address in case of failure
- */
- if (dev_descr->dev_id == PCI_DEVICE_ID_INTEL_I7_NONCORE && !pdev)
- pdev = pci_get_device(PCI_VENDOR_ID_INTEL,
- PCI_DEVICE_ID_INTEL_I7_NONCORE_ALT, *prev);
-
- if (dev_descr->dev_id == PCI_DEVICE_ID_INTEL_LYNNFIELD_NONCORE && !pdev)
- pdev = pci_get_device(PCI_VENDOR_ID_INTEL,
- PCI_DEVICE_ID_INTEL_LYNNFIELD_NONCORE_ALT,
- *prev);
-
if (!pdev) {
if (*prev) {
*prev = pdev;
@@ -1315,18 +1350,11 @@ int i7core_get_onedevice(struct pci_dev **prev, int devno,
i7core_dev = get_i7core_dev(socket);
if (!i7core_dev) {
- i7core_dev = kzalloc(sizeof(*i7core_dev), GFP_KERNEL);
- if (!i7core_dev)
- return -ENOMEM;
- i7core_dev->pdev = kzalloc(sizeof(*i7core_dev->pdev) * n_devs,
- GFP_KERNEL);
- if (!i7core_dev->pdev) {
- kfree(i7core_dev);
+ i7core_dev = alloc_i7core_dev(socket, table);
+ if (!i7core_dev) {
+ pci_dev_put(pdev);
return -ENOMEM;
}
- i7core_dev->socket = socket;
- i7core_dev->n_devs = n_devs;
- list_add_tail(&i7core_dev->list, &i7core_edac_list);
}
if (i7core_dev->pdev[devno]) {
@@ -1368,27 +1396,31 @@ int i7core_get_onedevice(struct pci_dev **prev, int devno,
dev_descr->func,
PCI_VENDOR_ID_INTEL, dev_descr->dev_id);
+ /*
+ * As stated on drivers/pci/search.c, the reference count for
+ * @from is always decremented if it is not %NULL. So, as we need
+ * to get all devices up to null, we need to do a get for the device
+ */
+ pci_dev_get(pdev);
+
*prev = pdev;
return 0;
}
-static int i7core_get_devices(struct pci_id_table *table)
+static int i7core_get_all_devices(void)
{
int i, rc, last_bus;
struct pci_dev *pdev = NULL;
- struct pci_id_descr *dev_descr;
+ const struct pci_id_table *table = pci_dev_table;
last_bus = i7core_pci_lastbus();
while (table && table->descr) {
- dev_descr = table->descr;
for (i = 0; i < table->n_devs; i++) {
pdev = NULL;
do {
- rc = i7core_get_onedevice(&pdev, i,
- &dev_descr[i],
- table->n_devs,
+ rc = i7core_get_onedevice(&pdev, table, i,
last_bus);
if (rc < 0) {
if (i == 0) {
@@ -1404,7 +1436,6 @@ static int i7core_get_devices(struct pci_id_table *table)
}
return 0;
- return 0;
}
static int mci_bind_devs(struct mem_ctl_info *mci,
@@ -1414,10 +1445,6 @@ static int mci_bind_devs(struct mem_ctl_info *mci,
struct pci_dev *pdev;
int i, func, slot;
- /* Associates i7core_dev and mci for future usage */
- pvt->i7core_dev = i7core_dev;
- i7core_dev->mci = mci;
-
pvt->is_registered = 0;
for (i = 0; i < i7core_dev->n_devs; i++) {
pdev = i7core_dev->pdev[i];
@@ -1448,15 +1475,6 @@ static int mci_bind_devs(struct mem_ctl_info *mci,
pvt->is_registered = 1;
}
- /*
- * Add extra nodes to count errors on udimm
- * For registered memory, this is not needed, since the counters
- * are already displayed at the standard locations
- */
- if (!pvt->is_registered)
- i7core_sysfs_attrs[ARRAY_SIZE(i7core_sysfs_attrs)-2].grp =
- &i7core_udimm_counters;
-
return 0;
error:
@@ -1470,7 +1488,9 @@ error:
Error check routines
****************************************************************************/
static void i7core_rdimm_update_csrow(struct mem_ctl_info *mci,
- int chan, int dimm, int add)
+ const int chan,
+ const int dimm,
+ const int add)
{
char *msg;
struct i7core_pvt *pvt = mci->pvt_info;
@@ -1487,7 +1507,10 @@ static void i7core_rdimm_update_csrow(struct mem_ctl_info *mci,
}
static void i7core_rdimm_update_ce_count(struct mem_ctl_info *mci,
- int chan, int new0, int new1, int new2)
+ const int chan,
+ const int new0,
+ const int new1,
+ const int new2)
{
struct i7core_pvt *pvt = mci->pvt_info;
int add0 = 0, add1 = 0, add2 = 0;
@@ -1641,7 +1664,7 @@ static void i7core_udimm_check_mc_ecc_err(struct mem_ctl_info *mci)
* fields
*/
static void i7core_mce_output_error(struct mem_ctl_info *mci,
- struct mce *m)
+ const struct mce *m)
{
struct i7core_pvt *pvt = mci->pvt_info;
char *type, *optype, *err, *msg;
@@ -1845,28 +1868,85 @@ static int i7core_mce_check_error(void *priv, struct mce *mce)
return 1;
}
-static int i7core_register_mci(struct i7core_dev *i7core_dev,
- int num_channels, int num_csrows)
+static void i7core_pci_ctl_create(struct i7core_pvt *pvt)
+{
+ pvt->i7core_pci = edac_pci_create_generic_ctl(
+ &pvt->i7core_dev->pdev[0]->dev,
+ EDAC_MOD_STR);
+ if (unlikely(!pvt->i7core_pci))
+ pr_warn("Unable to setup PCI error report via EDAC\n");
+}
+
+static void i7core_pci_ctl_release(struct i7core_pvt *pvt)
+{
+ if (likely(pvt->i7core_pci))
+ edac_pci_release_generic_ctl(pvt->i7core_pci);
+ else
+ i7core_printk(KERN_ERR,
+ "Couldn't find mem_ctl_info for socket %d\n",
+ pvt->i7core_dev->socket);
+ pvt->i7core_pci = NULL;
+}
+
+static void i7core_unregister_mci(struct i7core_dev *i7core_dev)
+{
+ struct mem_ctl_info *mci = i7core_dev->mci;
+ struct i7core_pvt *pvt;
+
+ if (unlikely(!mci || !mci->pvt_info)) {
+ debugf0("MC: " __FILE__ ": %s(): dev = %p\n",
+ __func__, &i7core_dev->pdev[0]->dev);
+
+ i7core_printk(KERN_ERR, "Couldn't find mci handler\n");
+ return;
+ }
+
+ pvt = mci->pvt_info;
+
+ debugf0("MC: " __FILE__ ": %s(): mci = %p, dev = %p\n",
+ __func__, mci, &i7core_dev->pdev[0]->dev);
+
+ /* Disable MCE NMI handler */
+ edac_mce_unregister(&pvt->edac_mce);
+
+ /* Disable EDAC polling */
+ i7core_pci_ctl_release(pvt);
+
+ /* Remove MC sysfs nodes */
+ edac_mc_del_mc(mci->dev);
+
+ debugf1("%s: free mci struct\n", mci->ctl_name);
+ kfree(mci->ctl_name);
+ edac_mc_free(mci);
+ i7core_dev->mci = NULL;
+}
+
+static int i7core_register_mci(struct i7core_dev *i7core_dev)
{
struct mem_ctl_info *mci;
struct i7core_pvt *pvt;
- int csrow = 0;
- int rc;
+ int rc, channels, csrows;
+
+ /* Check the number of active and not disabled channels */
+ rc = i7core_get_active_channels(i7core_dev->socket, &channels, &csrows);
+ if (unlikely(rc < 0))
+ return rc;
/* allocate a new MC control structure */
- mci = edac_mc_alloc(sizeof(*pvt), num_csrows, num_channels,
- i7core_dev->socket);
+ mci = edac_mc_alloc(sizeof(*pvt), csrows, channels, i7core_dev->socket);
if (unlikely(!mci))
return -ENOMEM;
- debugf0("MC: " __FILE__ ": %s(): mci = %p\n", __func__, mci);
-
- /* record ptr to the generic device */
- mci->dev = &i7core_dev->pdev[0]->dev;
+ debugf0("MC: " __FILE__ ": %s(): mci = %p, dev = %p\n",
+ __func__, mci, &i7core_dev->pdev[0]->dev);
pvt = mci->pvt_info;
memset(pvt, 0, sizeof(*pvt));
+ /* Associates i7core_dev and mci for future usage */
+ pvt->i7core_dev = i7core_dev;
+ i7core_dev->mci = mci;
+
/*
* FIXME: how to handle RDDR3 at MCI level? It is possible to have
* Mixed RDDR3/UDDR3 with Nehalem, provided that they are on different
@@ -1881,17 +1961,23 @@ static int i7core_register_mci(struct i7core_dev *i7core_dev,
i7core_dev->socket);
mci->dev_name = pci_name(i7core_dev->pdev[0]);
mci->ctl_page_to_phys = NULL;
- mci->mc_driver_sysfs_attributes = i7core_sysfs_attrs;
- /* Set the function pointer to an actual operation function */
- mci->edac_check = i7core_check_error;
/* Store pci devices at mci for faster access */
rc = mci_bind_devs(mci, i7core_dev);
if (unlikely(rc < 0))
- goto fail;
+ goto fail0;
+
+ if (pvt->is_registered)
+ mci->mc_driver_sysfs_attributes = i7core_sysfs_rdimm_attrs;
+ else
+ mci->mc_driver_sysfs_attributes = i7core_sysfs_udimm_attrs;
/* Get dimm basic config */
- get_dimm_config(mci, &csrow);
+ get_dimm_config(mci);
+ /* record ptr to the generic device */
+ mci->dev = &i7core_dev->pdev[0]->dev;
+ /* Set the function pointer to an actual operation function */
+ mci->edac_check = i7core_check_error;
/* add this new MC control structure to EDAC's list of MCs */
if (unlikely(edac_mc_add_mc(mci))) {
@@ -1902,19 +1988,7 @@ static int i7core_register_mci(struct i7core_dev *i7core_dev,
*/
rc = -EINVAL;
- goto fail;
- }
-
- /* allocating generic PCI control info */
- i7core_pci = edac_pci_create_generic_ctl(&i7core_dev->pdev[0]->dev,
- EDAC_MOD_STR);
- if (unlikely(!i7core_pci)) {
- printk(KERN_WARNING
- "%s(): Unable to create PCI control\n",
- __func__);
- printk(KERN_WARNING
- "%s(): PCI error report via EDAC not setup\n",
- __func__);
+ goto fail0;
}
/* Default error mask is any memory */
@@ -1925,19 +1999,28 @@ static int i7core_register_mci(struct i7core_dev *i7core_dev,
pvt->inject.page = -1;
pvt->inject.col = -1;
+ /* allocating generic PCI control info */
+ i7core_pci_ctl_create(pvt);
+
/* Registers on edac_mce in order to receive memory errors */
pvt->edac_mce.priv = mci;
pvt->edac_mce.check_error = i7core_mce_check_error;
-
rc = edac_mce_register(&pvt->edac_mce);
if (unlikely(rc < 0)) {
debugf0("MC: " __FILE__
": %s(): failed edac_mce_register()\n", __func__);
+ goto fail1;
}
-fail:
- if (rc < 0)
- edac_mc_free(mci);
+ return 0;
+
+fail1:
+ i7core_pci_ctl_release(pvt);
+ edac_mc_del_mc(mci->dev);
+fail0:
+ kfree(mci->ctl_name);
+ edac_mc_free(mci);
+ i7core_dev->mci = NULL;
return rc;
}
@@ -1949,8 +2032,6 @@ fail:
* < 0 for error code
*/
-static int probed = 0;
-
static int __devinit i7core_probe(struct pci_dev *pdev,
const struct pci_device_id *id)
{
@@ -1965,25 +2046,16 @@ static int __devinit i7core_probe(struct pci_dev *pdev,
*/
if (unlikely(probed >= 1)) {
mutex_unlock(&i7core_edac_lock);
- return -EINVAL;
+ return -ENODEV;
}
probed++;
- rc = i7core_get_devices(pci_dev_table);
+ rc = i7core_get_all_devices();
if (unlikely(rc < 0))
goto fail0;
list_for_each_entry(i7core_dev, &i7core_edac_list, list) {
- int channels;
- int csrows;
-
- /* Check the number of active and not disabled channels */
- rc = i7core_get_active_channels(i7core_dev->socket,
- &channels, &csrows);
- if (unlikely(rc < 0))
- goto fail1;
-
- rc = i7core_register_mci(i7core_dev, channels, csrows);
+ rc = i7core_register_mci(i7core_dev);
if (unlikely(rc < 0))
goto fail1;
}
@@ -1994,6 +2066,9 @@ static int __devinit i7core_probe(struct pci_dev *pdev,
return 0;
fail1:
+ list_for_each_entry(i7core_dev, &i7core_edac_list, list)
+ i7core_unregister_mci(i7core_dev);
+
i7core_put_all_devices();
fail0:
mutex_unlock(&i7core_edac_lock);
@@ -2006,14 +2081,10 @@ fail0:
*/
static void __devexit i7core_remove(struct pci_dev *pdev)
{
- struct mem_ctl_info *mci;
- struct i7core_dev *i7core_dev, *tmp;
+ struct i7core_dev *i7core_dev;
debugf0(__FILE__ ": %s()\n", __func__);
- if (i7core_pci)
- edac_pci_release_generic_ctl(i7core_pci);
-
/*
* we have a trouble here: pdev value for removal will be wrong, since
* it will point to the X58 register used to detect that the machine
@@ -2023,22 +2094,18 @@ static void __devexit i7core_remove(struct pci_dev *pdev)
*/
mutex_lock(&i7core_edac_lock);
- list_for_each_entry_safe(i7core_dev, tmp, &i7core_edac_list, list) {
- mci = edac_mc_del_mc(&i7core_dev->pdev[0]->dev);
- if (mci) {
- struct i7core_pvt *pvt = mci->pvt_info;
-
- i7core_dev = pvt->i7core_dev;
- edac_mce_unregister(&pvt->edac_mce);
- kfree(mci->ctl_name);
- edac_mc_free(mci);
- i7core_put_devices(i7core_dev);
- } else {
- i7core_printk(KERN_ERR,
- "Couldn't find mci for socket %d\n",
- i7core_dev->socket);
- }
+
+ if (unlikely(!probed)) {
+ mutex_unlock(&i7core_edac_lock);
+ return;
}
+
+ list_for_each_entry(i7core_dev, &i7core_edac_list, list)
+ i7core_unregister_mci(i7core_dev);
+
+ /* Release PCI resources */
+ i7core_put_all_devices();
+
probed--;
mutex_unlock(&i7core_edac_lock);
@@ -2070,7 +2137,8 @@ static int __init i7core_init(void)
/* Ensure that the OPSTATE is set correctly for POLL or NMI */
opstate_init();
- i7core_xeon_pci_fixup(pci_dev_table);
+ if (use_pci_fixup)
+ i7core_xeon_pci_fixup(pci_dev_table);
pci_rc = pci_register_driver(&i7core_driver);
diff --git a/drivers/edac/mce_amd_inj.c b/drivers/edac/mce_amd_inj.c
index 8d0688f36d4c..39faded3cadd 100644
--- a/drivers/edac/mce_amd_inj.c
+++ b/drivers/edac/mce_amd_inj.c
@@ -139,7 +139,7 @@ static int __init edac_init_mce_inject(void)
return 0;
err_sysfs_create:
- while (i-- >= 0)
+ while (--i >= 0)
sysfs_remove_file(mce_kobj, &sysfs_attrs[i]->attr);
kobject_del(mce_kobj);
diff --git a/drivers/firewire/Kconfig b/drivers/firewire/Kconfig
index fcf3ea28340b..40a222e19b2d 100644
--- a/drivers/firewire/Kconfig
+++ b/drivers/firewire/Kconfig
@@ -3,9 +3,6 @@ menu "IEEE 1394 (FireWire) support"
# firewire-core does not depend on PCI but is
# not useful without PCI controller driver
-comment "You can enable one or both FireWire driver stacks."
-comment "The newer stack is recommended."
-
config FIREWIRE
tristate "FireWire driver stack"
select CRC_ITU_T
@@ -64,8 +61,6 @@ config FIREWIRE_NET
To compile this driver as a module, say M here: The module will be
called firewire-net.
-source "drivers/ieee1394/Kconfig"
-
config FIREWIRE_NOSY
tristate "Nosy - a FireWire traffic sniffer for PCILynx cards"
depends on PCI
diff --git a/drivers/firewire/Makefile b/drivers/firewire/Makefile
index 3c6a7fb20aa7..e3870d5c43dd 100644
--- a/drivers/firewire/Makefile
+++ b/drivers/firewire/Makefile
@@ -13,3 +13,4 @@ obj-$(CONFIG_FIREWIRE_OHCI) += firewire-ohci.o
obj-$(CONFIG_FIREWIRE_SBP2) += firewire-sbp2.o
obj-$(CONFIG_FIREWIRE_NET) += firewire-net.o
obj-$(CONFIG_FIREWIRE_NOSY) += nosy.o
+obj-$(CONFIG_PROVIDE_OHCI1394_DMA_INIT) += init_ohci1394_dma.o
diff --git a/drivers/ieee1394/init_ohci1394_dma.c b/drivers/firewire/init_ohci1394_dma.c
index ddaab6eb8ace..a9a347adb353 100644
--- a/drivers/ieee1394/init_ohci1394_dma.c
+++ b/drivers/firewire/init_ohci1394_dma.c
@@ -32,23 +32,41 @@
* Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*/
-#include <linux/interrupt.h> /* for ohci1394.h */
#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
#include <linux/pci.h> /* for PCI defines */
-#include <linux/init_ohci1394_dma.h>
+#include <linux/string.h>
+
#include <asm/pci-direct.h> /* for direct PCI config space access */
#include <asm/fixmap.h>
-#include "ieee1394_types.h"
-#include "ohci1394.h"
+#include <linux/init_ohci1394_dma.h>
+#include "ohci.h"
int __initdata init_ohci1394_dma_early;
+struct ohci {
+ void __iomem *registers;
+};
+
+static inline void reg_write(const struct ohci *ohci, int offset, u32 data)
+{
+ writel(data, ohci->registers + offset);
+}
+
+static inline u32 reg_read(const struct ohci *ohci, int offset)
+{
+ return readl(ohci->registers + offset);
+}
+
+#define OHCI_LOOP_COUNT 100 /* Number of loops for reg read waits */
+
/* Reads a PHY register of an OHCI-1394 controller */
-static inline u8 __init get_phy_reg(struct ti_ohci *ohci, u8 addr)
+static inline u8 __init get_phy_reg(struct ohci *ohci, u8 addr)
{
int i;
- quadlet_t r;
+ u32 r;
reg_write(ohci, OHCI1394_PhyControl, (addr << 8) | 0x00008000);
@@ -63,22 +81,22 @@ static inline u8 __init get_phy_reg(struct ti_ohci *ohci, u8 addr)
}
/* Writes to a PHY register of an OHCI-1394 controller */
-static inline void __init set_phy_reg(struct ti_ohci *ohci, u8 addr, u8 data)
+static inline void __init set_phy_reg(struct ohci *ohci, u8 addr, u8 data)
{
int i;
reg_write(ohci, OHCI1394_PhyControl, (addr << 8) | data | 0x00004000);
for (i = 0; i < OHCI_LOOP_COUNT; i++) {
- u32 r = reg_read(ohci, OHCI1394_PhyControl);
- if (!(r & 0x00004000))
+ if (!(reg_read(ohci, OHCI1394_PhyControl) & 0x00004000))
break;
mdelay(1);
}
}
/* Resets an OHCI-1394 controller (for sane state before initialization) */
-static inline void __init init_ohci1394_soft_reset(struct ti_ohci *ohci) {
+static inline void __init init_ohci1394_soft_reset(struct ohci *ohci)
+{
int i;
reg_write(ohci, OHCI1394_HCControlSet, OHCI1394_HCControl_softReset);
@@ -91,10 +109,14 @@ static inline void __init init_ohci1394_soft_reset(struct ti_ohci *ohci) {
}
}
+#define OHCI1394_MAX_AT_REQ_RETRIES 0xf
+#define OHCI1394_MAX_AT_RESP_RETRIES 0x2
+#define OHCI1394_MAX_PHYS_RESP_RETRIES 0x8
+
/* Basic OHCI-1394 register and port inititalization */
-static inline void __init init_ohci1394_initialize(struct ti_ohci *ohci)
+static inline void __init init_ohci1394_initialize(struct ohci *ohci)
{
- quadlet_t bus_options;
+ u32 bus_options;
int num_ports, i;
/* Put some defaults to these undefined bus options */
@@ -116,7 +138,7 @@ static inline void __init init_ohci1394_initialize(struct ti_ohci *ohci)
/* enable phys */
reg_write(ohci, OHCI1394_LinkControlSet,
- OHCI1394_LinkControl_RcvPhyPkt);
+ OHCI1394_LinkControl_rcvPhyPkt);
/* Don't accept phy packets into AR request context */
reg_write(ohci, OHCI1394_LinkControlClear, 0x00000400);
@@ -128,7 +150,7 @@ static inline void __init init_ohci1394_initialize(struct ti_ohci *ohci)
reg_write(ohci, OHCI1394_IsoXmitIntEventClear, 0xffffffff);
/* Accept asyncronous transfer requests from all nodes for now */
- reg_write(ohci,OHCI1394_AsReqFilterHiSet, 0x80000000);
+ reg_write(ohci, OHCI1394_AsReqFilterHiSet, 0x80000000);
/* Specify asyncronous transfer retries */
reg_write(ohci, OHCI1394_ATRetries,
@@ -137,7 +159,8 @@ static inline void __init init_ohci1394_initialize(struct ti_ohci *ohci)
(OHCI1394_MAX_PHYS_RESP_RETRIES<<8));
/* We don't want hardware swapping */
- reg_write(ohci, OHCI1394_HCControlClear, OHCI1394_HCControl_noByteSwap);
+ reg_write(ohci, OHCI1394_HCControlClear,
+ OHCI1394_HCControl_noByteSwapData);
/* Enable link */
reg_write(ohci, OHCI1394_HCControlSet, OHCI1394_HCControl_linkEnable);
@@ -164,11 +187,11 @@ static inline void __init init_ohci1394_initialize(struct ti_ohci *ohci)
* has to be enabled after each bus reset when needed. We resort
* to polling here because on early boot, we have no interrupts.
*/
-static inline void __init init_ohci1394_wait_for_busresets(struct ti_ohci *ohci)
+static inline void __init init_ohci1394_wait_for_busresets(struct ohci *ohci)
{
int i, events;
- for (i=0; i < 9; i++) {
+ for (i = 0; i < 9; i++) {
mdelay(200);
events = reg_read(ohci, OHCI1394_IntEventSet);
if (events & OHCI1394_busReset)
@@ -182,18 +205,18 @@ static inline void __init init_ohci1394_wait_for_busresets(struct ti_ohci *ohci)
* This enables remote DMA access over IEEE1394 from every host for the low
* 4GB of address space. DMA accesses above 4GB are not available currently.
*/
-static inline void __init init_ohci1394_enable_physical_dma(struct ti_ohci *hci)
+static inline void __init init_ohci1394_enable_physical_dma(struct ohci *ohci)
{
- reg_write(hci, OHCI1394_PhyReqFilterHiSet, 0xffffffff);
- reg_write(hci, OHCI1394_PhyReqFilterLoSet, 0xffffffff);
- reg_write(hci, OHCI1394_PhyUpperBound, 0xffff0000);
+ reg_write(ohci, OHCI1394_PhyReqFilterHiSet, 0xffffffff);
+ reg_write(ohci, OHCI1394_PhyReqFilterLoSet, 0xffffffff);
+ reg_write(ohci, OHCI1394_PhyUpperBound, 0xffff0000);
}
/**
* init_ohci1394_reset_and_init_dma - init controller and enable DMA
* This initializes the given controller and enables physical DMA engine in it.
*/
-static inline void __init init_ohci1394_reset_and_init_dma(struct ti_ohci *ohci)
+static inline void __init init_ohci1394_reset_and_init_dma(struct ohci *ohci)
{
/* Start off with a soft reset, clears everything to a sane state. */
init_ohci1394_soft_reset(ohci);
@@ -225,7 +248,7 @@ static inline void __init init_ohci1394_reset_and_init_dma(struct ti_ohci *ohci)
static inline void __init init_ohci1394_controller(int num, int slot, int func)
{
unsigned long ohci_base;
- struct ti_ohci ohci;
+ struct ohci ohci;
printk(KERN_INFO "init_ohci1394_dma: initializing OHCI-1394"
" at %02x:%02x.%x\n", num, slot, func);
@@ -235,7 +258,7 @@ static inline void __init init_ohci1394_controller(int num, int slot, int func)
set_fixmap_nocache(FIX_OHCI1394_BASE, ohci_base);
- ohci.registers = (void *)fix_to_virt(FIX_OHCI1394_BASE);
+ ohci.registers = (void __iomem *)fix_to_virt(FIX_OHCI1394_BASE);
init_ohci1394_reset_and_init_dma(&ohci);
}
@@ -247,6 +270,7 @@ static inline void __init init_ohci1394_controller(int num, int slot, int func)
void __init init_ohci1394_dma_on_all_controllers(void)
{
int num, slot, func;
+ u32 class;
if (!early_pci_allowed())
return;
@@ -255,9 +279,9 @@ void __init init_ohci1394_dma_on_all_controllers(void)
for (num = 0; num < 32; num++) {
for (slot = 0; slot < 32; slot++) {
for (func = 0; func < 8; func++) {
- u32 class = read_pci_config(num,slot,func,
+ class = read_pci_config(num, slot, func,
PCI_CLASS_REVISION);
- if ((class == 0xffffffff))
+ if (class == 0xffffffff)
continue; /* No device at this func */
if (class>>8 != PCI_CLASS_SERIAL_FIREWIRE_OHCI)
diff --git a/drivers/firewire/net.c b/drivers/firewire/net.c
index 18fdd9703b48..1a467a91fb0b 100644
--- a/drivers/firewire/net.c
+++ b/drivers/firewire/net.c
@@ -7,6 +7,7 @@
*/
#include <linux/bug.h>
+#include <linux/delay.h>
#include <linux/device.h>
#include <linux/firewire.h>
#include <linux/firewire-constants.h>
@@ -26,8 +27,14 @@
#include <asm/unaligned.h>
#include <net/arp.h>
-#define FWNET_MAX_FRAGMENTS 25 /* arbitrary limit */
-#define FWNET_ISO_PAGE_COUNT (PAGE_SIZE < 16 * 1024 ? 4 : 2)
+/* rx limits */
+#define FWNET_MAX_FRAGMENTS 30 /* arbitrary, > TX queue depth */
+#define FWNET_ISO_PAGE_COUNT (PAGE_SIZE < 16*1024 ? 4 : 2)
+
+/* tx limits */
+#define FWNET_MAX_QUEUED_DATAGRAMS 20 /* < 64 = number of tlabels */
+#define FWNET_MIN_QUEUED_DATAGRAMS 10 /* should keep AT DMA busy enough */
+#define FWNET_TX_QUEUE_LEN FWNET_MAX_QUEUED_DATAGRAMS /* ? */
#define IEEE1394_BROADCAST_CHANNEL 31
#define IEEE1394_ALL_NODES (0xffc0 | 0x003f)
@@ -169,15 +176,8 @@ struct fwnet_device {
struct fw_address_handler handler;
u64 local_fifo;
- /* List of packets to be sent */
- struct list_head packet_list;
- /*
- * List of packets that were broadcasted. When we get an ISO interrupt
- * one of them has been sent
- */
- struct list_head broadcasted_list;
- /* List of packets that have been sent but not yet acked */
- struct list_head sent_list;
+ /* Number of tx datagrams that have been queued but not yet acked */
+ int queued_datagrams;
struct list_head peer_list;
struct fw_card *card;
@@ -195,7 +195,7 @@ struct fwnet_peer {
unsigned pdg_size; /* pd_list size */
u16 datagram_label; /* outgoing datagram label */
- unsigned max_payload; /* includes RFC2374_FRAG_HDR_SIZE overhead */
+ u16 max_payload; /* includes RFC2374_FRAG_HDR_SIZE overhead */
int node_id;
int generation;
unsigned speed;
@@ -203,22 +203,18 @@ struct fwnet_peer {
/* This is our task struct. It's used for the packet complete callback. */
struct fwnet_packet_task {
- /*
- * ptask can actually be on dev->packet_list, dev->broadcasted_list,
- * or dev->sent_list depending on its current state.
- */
- struct list_head pt_link;
struct fw_transaction transaction;
struct rfc2734_header hdr;
struct sk_buff *skb;
struct fwnet_device *dev;
int outstanding_pkts;
- unsigned max_payload;
u64 fifo_addr;
u16 dest_node;
+ u16 max_payload;
u8 generation;
u8 speed;
+ u8 enqueued;
};
/*
@@ -650,8 +646,6 @@ static int fwnet_finish_incoming_packet(struct net_device *net,
net->stats.rx_packets++;
net->stats.rx_bytes += skb->len;
}
- if (netif_queue_stopped(net))
- netif_wake_queue(net);
return 0;
@@ -660,8 +654,6 @@ static int fwnet_finish_incoming_packet(struct net_device *net,
net->stats.rx_dropped++;
dev_kfree_skb_any(skb);
- if (netif_queue_stopped(net))
- netif_wake_queue(net);
return -ENOENT;
}
@@ -793,15 +785,10 @@ static int fwnet_incoming_packet(struct fwnet_device *dev, __be32 *buf, int len,
* Datagram is not complete, we're done for the
* moment.
*/
- spin_unlock_irqrestore(&dev->lock, flags);
-
- return 0;
+ retval = 0;
fail:
spin_unlock_irqrestore(&dev->lock, flags);
- if (netif_queue_stopped(net))
- netif_wake_queue(net);
-
return retval;
}
@@ -901,11 +888,19 @@ static void fwnet_free_ptask(struct fwnet_packet_task *ptask)
kmem_cache_free(fwnet_packet_task_cache, ptask);
}
+/* Caller must hold dev->lock. */
+static void dec_queued_datagrams(struct fwnet_device *dev)
+{
+ if (--dev->queued_datagrams == FWNET_MIN_QUEUED_DATAGRAMS)
+ netif_wake_queue(dev->netdev);
+}
+
static int fwnet_send_packet(struct fwnet_packet_task *ptask);
static void fwnet_transmit_packet_done(struct fwnet_packet_task *ptask)
{
struct fwnet_device *dev = ptask->dev;
+ struct sk_buff *skb = ptask->skb;
unsigned long flags;
bool free;
@@ -914,10 +909,14 @@ static void fwnet_transmit_packet_done(struct fwnet_packet_task *ptask)
ptask->outstanding_pkts--;
/* Check whether we or the networking TX soft-IRQ is last user. */
- free = (ptask->outstanding_pkts == 0 && !list_empty(&ptask->pt_link));
+ free = (ptask->outstanding_pkts == 0 && ptask->enqueued);
+ if (free)
+ dec_queued_datagrams(dev);
- if (ptask->outstanding_pkts == 0)
- list_del(&ptask->pt_link);
+ if (ptask->outstanding_pkts == 0) {
+ dev->netdev->stats.tx_packets++;
+ dev->netdev->stats.tx_bytes += skb->len;
+ }
spin_unlock_irqrestore(&dev->lock, flags);
@@ -926,7 +925,6 @@ static void fwnet_transmit_packet_done(struct fwnet_packet_task *ptask)
u16 fg_off;
u16 datagram_label;
u16 lf;
- struct sk_buff *skb;
/* Update the ptask to point to the next fragment and send it */
lf = fwnet_get_hdr_lf(&ptask->hdr);
@@ -953,7 +951,7 @@ static void fwnet_transmit_packet_done(struct fwnet_packet_task *ptask)
datagram_label = fwnet_get_hdr_dgl(&ptask->hdr);
break;
}
- skb = ptask->skb;
+
skb_pull(skb, ptask->max_payload);
if (ptask->outstanding_pkts > 1) {
fwnet_make_sf_hdr(&ptask->hdr, RFC2374_HDR_INTFRAG,
@@ -970,6 +968,31 @@ static void fwnet_transmit_packet_done(struct fwnet_packet_task *ptask)
fwnet_free_ptask(ptask);
}
+static void fwnet_transmit_packet_failed(struct fwnet_packet_task *ptask)
+{
+ struct fwnet_device *dev = ptask->dev;
+ unsigned long flags;
+ bool free;
+
+ spin_lock_irqsave(&dev->lock, flags);
+
+ /* One fragment failed; don't try to send remaining fragments. */
+ ptask->outstanding_pkts = 0;
+
+ /* Check whether we or the networking TX soft-IRQ is last user. */
+ free = ptask->enqueued;
+ if (free)
+ dec_queued_datagrams(dev);
+
+ dev->netdev->stats.tx_dropped++;
+ dev->netdev->stats.tx_errors++;
+
+ spin_unlock_irqrestore(&dev->lock, flags);
+
+ if (free)
+ fwnet_free_ptask(ptask);
+}
+
static void fwnet_write_complete(struct fw_card *card, int rcode,
void *payload, size_t length, void *data)
{
@@ -977,11 +1000,12 @@ static void fwnet_write_complete(struct fw_card *card, int rcode,
ptask = data;
- if (rcode == RCODE_COMPLETE)
+ if (rcode == RCODE_COMPLETE) {
fwnet_transmit_packet_done(ptask);
- else
+ } else {
fw_error("fwnet_write_complete: failed: %x\n", rcode);
- /* ??? error recovery */
+ fwnet_transmit_packet_failed(ptask);
+ }
}
static int fwnet_send_packet(struct fwnet_packet_task *ptask)
@@ -1039,9 +1063,11 @@ static int fwnet_send_packet(struct fwnet_packet_task *ptask)
spin_lock_irqsave(&dev->lock, flags);
/* If the AT tasklet already ran, we may be last user. */
- free = (ptask->outstanding_pkts == 0 && list_empty(&ptask->pt_link));
+ free = (ptask->outstanding_pkts == 0 && !ptask->enqueued);
if (!free)
- list_add_tail(&ptask->pt_link, &dev->broadcasted_list);
+ ptask->enqueued = true;
+ else
+ dec_queued_datagrams(dev);
spin_unlock_irqrestore(&dev->lock, flags);
@@ -1056,9 +1082,11 @@ static int fwnet_send_packet(struct fwnet_packet_task *ptask)
spin_lock_irqsave(&dev->lock, flags);
/* If the AT tasklet already ran, we may be last user. */
- free = (ptask->outstanding_pkts == 0 && list_empty(&ptask->pt_link));
+ free = (ptask->outstanding_pkts == 0 && !ptask->enqueued);
if (!free)
- list_add_tail(&ptask->pt_link, &dev->sent_list);
+ ptask->enqueued = true;
+ else
+ dec_queued_datagrams(dev);
spin_unlock_irqrestore(&dev->lock, flags);
@@ -1224,6 +1252,15 @@ static netdev_tx_t fwnet_tx(struct sk_buff *skb, struct net_device *net)
struct fwnet_peer *peer;
unsigned long flags;
+ spin_lock_irqsave(&dev->lock, flags);
+
+ /* Can this happen? */
+ if (netif_queue_stopped(dev->netdev)) {
+ spin_unlock_irqrestore(&dev->lock, flags);
+
+ return NETDEV_TX_BUSY;
+ }
+
ptask = kmem_cache_alloc(fwnet_packet_task_cache, GFP_ATOMIC);
if (ptask == NULL)
goto fail;
@@ -1242,9 +1279,6 @@ static netdev_tx_t fwnet_tx(struct sk_buff *skb, struct net_device *net)
proto = hdr_buf.h_proto;
dg_size = skb->len;
- /* serialize access to peer, including peer->datagram_label */
- spin_lock_irqsave(&dev->lock, flags);
-
/*
* Set the transmission type for the packet. ARP packets and IP
* broadcast packets are sent via GASP.
@@ -1266,7 +1300,7 @@ static netdev_tx_t fwnet_tx(struct sk_buff *skb, struct net_device *net)
peer = fwnet_peer_find_by_guid(dev, be64_to_cpu(guid));
if (!peer || peer->fifo == FWNET_NO_FIFO_ADDR)
- goto fail_unlock;
+ goto fail;
generation = peer->generation;
dest_node = peer->node_id;
@@ -1320,18 +1354,21 @@ static netdev_tx_t fwnet_tx(struct sk_buff *skb, struct net_device *net)
max_payload += RFC2374_FRAG_HDR_SIZE;
}
+ if (++dev->queued_datagrams == FWNET_MAX_QUEUED_DATAGRAMS)
+ netif_stop_queue(dev->netdev);
+
spin_unlock_irqrestore(&dev->lock, flags);
ptask->max_payload = max_payload;
- INIT_LIST_HEAD(&ptask->pt_link);
+ ptask->enqueued = 0;
fwnet_send_packet(ptask);
return NETDEV_TX_OK;
- fail_unlock:
- spin_unlock_irqrestore(&dev->lock, flags);
fail:
+ spin_unlock_irqrestore(&dev->lock, flags);
+
if (ptask)
kmem_cache_free(fwnet_packet_task_cache, ptask);
@@ -1377,7 +1414,7 @@ static void fwnet_init_dev(struct net_device *net)
net->addr_len = FWNET_ALEN;
net->hard_header_len = FWNET_HLEN;
net->type = ARPHRD_IEEE1394;
- net->tx_queue_len = 10;
+ net->tx_queue_len = FWNET_TX_QUEUE_LEN;
}
/* caller must hold fwnet_device_mutex */
@@ -1457,14 +1494,9 @@ static int fwnet_probe(struct device *_dev)
dev->broadcast_rcv_context = NULL;
dev->broadcast_xmt_max_payload = 0;
dev->broadcast_xmt_datagramlabel = 0;
-
dev->local_fifo = FWNET_NO_FIFO_ADDR;
-
- INIT_LIST_HEAD(&dev->packet_list);
- INIT_LIST_HEAD(&dev->broadcasted_list);
- INIT_LIST_HEAD(&dev->sent_list);
+ dev->queued_datagrams = 0;
INIT_LIST_HEAD(&dev->peer_list);
-
dev->card = card;
dev->netdev = net;
@@ -1522,7 +1554,7 @@ static int fwnet_remove(struct device *_dev)
struct fwnet_peer *peer = dev_get_drvdata(_dev);
struct fwnet_device *dev = peer->dev;
struct net_device *net;
- struct fwnet_packet_task *ptask, *pt_next;
+ int i;
mutex_lock(&fwnet_device_mutex);
@@ -1540,21 +1572,9 @@ static int fwnet_remove(struct device *_dev)
dev->card);
fw_iso_context_destroy(dev->broadcast_rcv_context);
}
- list_for_each_entry_safe(ptask, pt_next,
- &dev->packet_list, pt_link) {
- dev_kfree_skb_any(ptask->skb);
- kmem_cache_free(fwnet_packet_task_cache, ptask);
- }
- list_for_each_entry_safe(ptask, pt_next,
- &dev->broadcasted_list, pt_link) {
- dev_kfree_skb_any(ptask->skb);
- kmem_cache_free(fwnet_packet_task_cache, ptask);
- }
- list_for_each_entry_safe(ptask, pt_next,
- &dev->sent_list, pt_link) {
- dev_kfree_skb_any(ptask->skb);
- kmem_cache_free(fwnet_packet_task_cache, ptask);
- }
+ for (i = 0; dev->queued_datagrams && i < 5; i++)
+ ssleep(1);
+ WARN_ON(dev->queued_datagrams);
list_del(&dev->dev_link);
free_netdev(net);
diff --git a/drivers/firewire/ohci.c b/drivers/firewire/ohci.c
index 9dcb17d51aee..84eb607d6c03 100644
--- a/drivers/firewire/ohci.c
+++ b/drivers/firewire/ohci.c
@@ -577,17 +577,11 @@ static int ohci_update_phy_reg(struct fw_card *card, int addr,
return ret;
}
-static int ar_context_add_page(struct ar_context *ctx)
+static void ar_context_link_page(struct ar_context *ctx,
+ struct ar_buffer *ab, dma_addr_t ab_bus)
{
- struct device *dev = ctx->ohci->card.device;
- struct ar_buffer *ab;
- dma_addr_t uninitialized_var(ab_bus);
size_t offset;
- ab = dma_alloc_coherent(dev, PAGE_SIZE, &ab_bus, GFP_ATOMIC);
- if (ab == NULL)
- return -ENOMEM;
-
ab->next = NULL;
memset(&ab->descriptor, 0, sizeof(ab->descriptor));
ab->descriptor.control = cpu_to_le16(DESCRIPTOR_INPUT_MORE |
@@ -606,6 +600,19 @@ static int ar_context_add_page(struct ar_context *ctx)
reg_write(ctx->ohci, CONTROL_SET(ctx->regs), CONTEXT_WAKE);
flush_writes(ctx->ohci);
+}
+
+static int ar_context_add_page(struct ar_context *ctx)
+{
+ struct device *dev = ctx->ohci->card.device;
+ struct ar_buffer *ab;
+ dma_addr_t uninitialized_var(ab_bus);
+
+ ab = dma_alloc_coherent(dev, PAGE_SIZE, &ab_bus, GFP_ATOMIC);
+ if (ab == NULL)
+ return -ENOMEM;
+
+ ar_context_link_page(ctx, ab, ab_bus);
return 0;
}
@@ -730,16 +737,17 @@ static __le32 *handle_ar_packet(struct ar_context *ctx, __le32 *buffer)
static void ar_context_tasklet(unsigned long data)
{
struct ar_context *ctx = (struct ar_context *)data;
- struct fw_ohci *ohci = ctx->ohci;
struct ar_buffer *ab;
struct descriptor *d;
void *buffer, *end;
+ __le16 res_count;
ab = ctx->current_buffer;
d = &ab->descriptor;
- if (d->res_count == 0) {
- size_t size, rest, offset;
+ res_count = ACCESS_ONCE(d->res_count);
+ if (res_count == 0) {
+ size_t size, size2, rest, pktsize, size3, offset;
dma_addr_t start_bus;
void *start;
@@ -750,29 +758,63 @@ static void ar_context_tasklet(unsigned long data)
*/
offset = offsetof(struct ar_buffer, data);
- start = buffer = ab;
+ start = ab;
start_bus = le32_to_cpu(ab->descriptor.data_address) - offset;
+ buffer = ab->data;
ab = ab->next;
d = &ab->descriptor;
- size = buffer + PAGE_SIZE - ctx->pointer;
+ size = start + PAGE_SIZE - ctx->pointer;
+ /* valid buffer data in the next page */
rest = le16_to_cpu(d->req_count) - le16_to_cpu(d->res_count);
+ /* what actually fits in this page */
+ size2 = min(rest, (size_t)PAGE_SIZE - offset - size);
memmove(buffer, ctx->pointer, size);
- memcpy(buffer + size, ab->data, rest);
- ctx->current_buffer = ab;
- ctx->pointer = (void *) ab->data + rest;
- end = buffer + size + rest;
+ memcpy(buffer + size, ab->data, size2);
+
+ while (size > 0) {
+ void *next = handle_ar_packet(ctx, buffer);
+ pktsize = next - buffer;
+ if (pktsize >= size) {
+ /*
+ * We have handled all the data that was
+ * originally in this page, so we can now
+ * continue in the next page.
+ */
+ buffer = next;
+ break;
+ }
+ /* move the next packet to the start of the buffer */
+ memmove(buffer, next, size + size2 - pktsize);
+ size -= pktsize;
+ /* fill up this page again */
+ size3 = min(rest - size2,
+ (size_t)PAGE_SIZE - offset - size - size2);
+ memcpy(buffer + size + size2,
+ (void *) ab->data + size2, size3);
+ size2 += size3;
+ }
- while (buffer < end)
- buffer = handle_ar_packet(ctx, buffer);
+ if (rest > 0) {
+ /* handle the packets that are fully in the next page */
+ buffer = (void *) ab->data +
+ (buffer - (start + offset + size));
+ end = (void *) ab->data + rest;
+
+ while (buffer < end)
+ buffer = handle_ar_packet(ctx, buffer);
- dma_free_coherent(ohci->card.device, PAGE_SIZE,
- start, start_bus);
- ar_context_add_page(ctx);
+ ctx->current_buffer = ab;
+ ctx->pointer = end;
+
+ ar_context_link_page(ctx, start, start_bus);
+ } else {
+ ctx->pointer = start + PAGE_SIZE;
+ }
} else {
buffer = ctx->pointer;
ctx->pointer = end =
- (void *) ab + PAGE_SIZE - le16_to_cpu(d->res_count);
+ (void *) ab + PAGE_SIZE - le16_to_cpu(res_count);
while (buffer < end)
buffer = handle_ar_packet(ctx, buffer);
diff --git a/drivers/firewire/sbp2.c b/drivers/firewire/sbp2.c
index bfae4b309791..afa576a75a8e 100644
--- a/drivers/firewire/sbp2.c
+++ b/drivers/firewire/sbp2.c
@@ -1468,7 +1468,7 @@ static int sbp2_map_scatterlist(struct sbp2_command_orb *orb,
/* SCSI stack integration */
-static int sbp2_scsi_queuecommand(struct scsi_cmnd *cmd, scsi_done_fn_t done)
+static int sbp2_scsi_queuecommand_lck(struct scsi_cmnd *cmd, scsi_done_fn_t done)
{
struct sbp2_logical_unit *lu = cmd->device->hostdata;
struct fw_device *device = target_device(lu->tgt);
@@ -1534,6 +1534,8 @@ static int sbp2_scsi_queuecommand(struct scsi_cmnd *cmd, scsi_done_fn_t done)
return retval;
}
+static DEF_SCSI_QCMD(sbp2_scsi_queuecommand)
+
static int sbp2_scsi_slave_alloc(struct scsi_device *sdev)
{
struct sbp2_logical_unit *lu = sdev->hostdata;
diff --git a/drivers/firmware/dmi_scan.c b/drivers/firmware/dmi_scan.c
index b3d22d659990..e28e41668177 100644
--- a/drivers/firmware/dmi_scan.c
+++ b/drivers/firmware/dmi_scan.c
@@ -2,6 +2,7 @@
#include <linux/string.h>
#include <linux/init.h>
#include <linux/module.h>
+#include <linux/ctype.h>
#include <linux/dmi.h>
#include <linux/efi.h>
#include <linux/bootmem.h>
@@ -361,6 +362,33 @@ static void __init dmi_decode(const struct dmi_header *dm, void *dummy)
}
}
+static void __init print_filtered(const char *info)
+{
+ const char *p;
+
+ if (!info)
+ return;
+
+ for (p = info; *p; p++)
+ if (isprint(*p))
+ printk(KERN_CONT "%c", *p);
+ else
+ printk(KERN_CONT "\\x%02x", *p & 0xff);
+}
+
+static void __init dmi_dump_ids(void)
+{
+ printk(KERN_DEBUG "DMI: ");
+ print_filtered(dmi_get_system_info(DMI_BOARD_NAME));
+ printk(KERN_CONT "/");
+ print_filtered(dmi_get_system_info(DMI_PRODUCT_NAME));
+ printk(KERN_CONT ", BIOS ");
+ print_filtered(dmi_get_system_info(DMI_BIOS_VERSION));
+ printk(KERN_CONT " ");
+ print_filtered(dmi_get_system_info(DMI_BIOS_DATE));
+ printk(KERN_CONT "\n");
+}
+
static int __init dmi_present(const char __iomem *p)
{
u8 buf[15];
@@ -381,8 +409,10 @@ static int __init dmi_present(const char __iomem *p)
buf[14] >> 4, buf[14] & 0xF);
else
printk(KERN_INFO "DMI present.\n");
- if (dmi_walk_early(dmi_decode) == 0)
+ if (dmi_walk_early(dmi_decode) == 0) {
+ dmi_dump_ids();
return 0;
+ }
}
return 1;
}
diff --git a/drivers/gpio/74x164.c b/drivers/gpio/74x164.c
new file mode 100644
index 000000000000..d91ff4c282e9
--- /dev/null
+++ b/drivers/gpio/74x164.c
@@ -0,0 +1,182 @@
+/*
+ * 74Hx164 - Generic serial-in/parallel-out 8-bits shift register GPIO driver
+ *
+ * Copyright (C) 2010 Gabor Juhos <juhosg@openwrt.org>
+ * Copyright (C) 2010 Miguel Gaio <miguel.gaio@efixo.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/init.h>
+#include <linux/mutex.h>
+#include <linux/spi/spi.h>
+#include <linux/spi/74x164.h>
+#include <linux/gpio.h>
+#include <linux/slab.h>
+
+#define GEN_74X164_GPIO_COUNT 8
+
+
+struct gen_74x164_chip {
+ struct spi_device *spi;
+ struct gpio_chip gpio_chip;
+ struct mutex lock;
+ u8 port_config;
+};
+
+static void gen_74x164_set_value(struct gpio_chip *, unsigned, int);
+
+static struct gen_74x164_chip *gpio_to_chip(struct gpio_chip *gc)
+{
+ return container_of(gc, struct gen_74x164_chip, gpio_chip);
+}
+
+static int __gen_74x164_write_config(struct gen_74x164_chip *chip)
+{
+ return spi_write(chip->spi,
+ &chip->port_config, sizeof(chip->port_config));
+}
+
+static int gen_74x164_direction_output(struct gpio_chip *gc,
+ unsigned offset, int val)
+{
+ gen_74x164_set_value(gc, offset, val);
+ return 0;
+}
+
+static int gen_74x164_get_value(struct gpio_chip *gc, unsigned offset)
+{
+ struct gen_74x164_chip *chip = gpio_to_chip(gc);
+ int ret;
+
+ mutex_lock(&chip->lock);
+ ret = (chip->port_config >> offset) & 0x1;
+ mutex_unlock(&chip->lock);
+
+ return ret;
+}
+
+static void gen_74x164_set_value(struct gpio_chip *gc,
+ unsigned offset, int val)
+{
+ struct gen_74x164_chip *chip = gpio_to_chip(gc);
+
+ mutex_lock(&chip->lock);
+ if (val)
+ chip->port_config |= (1 << offset);
+ else
+ chip->port_config &= ~(1 << offset);
+
+ __gen_74x164_write_config(chip);
+ mutex_unlock(&chip->lock);
+}
+
+static int __devinit gen_74x164_probe(struct spi_device *spi)
+{
+ struct gen_74x164_chip *chip;
+ struct gen_74x164_chip_platform_data *pdata;
+ int ret;
+
+ pdata = spi->dev.platform_data;
+ if (!pdata || !pdata->base) {
+ dev_dbg(&spi->dev, "incorrect or missing platform data\n");
+ return -EINVAL;
+ }
+
+ /*
+ * bits_per_word cannot be configured in platform data
+ */
+ spi->bits_per_word = 8;
+
+ ret = spi_setup(spi);
+ if (ret < 0)
+ return ret;
+
+ chip = kzalloc(sizeof(*chip), GFP_KERNEL);
+ if (!chip)
+ return -ENOMEM;
+
+ mutex_init(&chip->lock);
+
+ dev_set_drvdata(&spi->dev, chip);
+
+ chip->spi = spi;
+
+ chip->gpio_chip.label = GEN_74X164_DRIVER_NAME,
+ chip->gpio_chip.direction_output = gen_74x164_direction_output;
+ chip->gpio_chip.get = gen_74x164_get_value;
+ chip->gpio_chip.set = gen_74x164_set_value;
+ chip->gpio_chip.base = pdata->base;
+ chip->gpio_chip.ngpio = GEN_74X164_GPIO_COUNT;
+ chip->gpio_chip.can_sleep = 1;
+ chip->gpio_chip.dev = &spi->dev;
+ chip->gpio_chip.owner = THIS_MODULE;
+
+ ret = __gen_74x164_write_config(chip);
+ if (ret) {
+ dev_err(&spi->dev, "Failed writing: %d\n", ret);
+ goto exit_destroy;
+ }
+
+ ret = gpiochip_add(&chip->gpio_chip);
+ if (ret)
+ goto exit_destroy;
+
+ return ret;
+
+exit_destroy:
+ dev_set_drvdata(&spi->dev, NULL);
+ mutex_destroy(&chip->lock);
+ kfree(chip);
+ return ret;
+}
+
+static int gen_74x164_remove(struct spi_device *spi)
+{
+ struct gen_74x164_chip *chip;
+ int ret;
+
+ chip = dev_get_drvdata(&spi->dev);
+ if (chip == NULL)
+ return -ENODEV;
+
+ dev_set_drvdata(&spi->dev, NULL);
+
+ ret = gpiochip_remove(&chip->gpio_chip);
+ if (!ret) {
+ mutex_destroy(&chip->lock);
+ kfree(chip);
+ } else
+ dev_err(&spi->dev, "Failed to remove the GPIO controller: %d\n",
+ ret);
+
+ return ret;
+}
+
+static struct spi_driver gen_74x164_driver = {
+ .driver = {
+ .name = GEN_74X164_DRIVER_NAME,
+ .owner = THIS_MODULE,
+ },
+ .probe = gen_74x164_probe,
+ .remove = __devexit_p(gen_74x164_remove),
+};
+
+static int __init gen_74x164_init(void)
+{
+ return spi_register_driver(&gen_74x164_driver);
+}
+subsys_initcall(gen_74x164_init);
+
+static void __exit gen_74x164_exit(void)
+{
+ spi_unregister_driver(&gen_74x164_driver);
+}
+module_exit(gen_74x164_exit);
+
+MODULE_AUTHOR("Gabor Juhos <juhosg@openwrt.org>");
+MODULE_AUTHOR("Miguel Gaio <miguel.gaio@efixo.com>");
+MODULE_DESCRIPTION("GPIO expander driver for 74X164 8-bits shift register");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpio/Kconfig b/drivers/gpio/Kconfig
index 510aa2054544..3143ac795eb0 100644
--- a/drivers/gpio/Kconfig
+++ b/drivers/gpio/Kconfig
@@ -70,6 +70,11 @@ config GPIO_MAX730X
comment "Memory mapped GPIO expanders:"
+config GPIO_BASIC_MMIO
+ tristate "Basic memory-mapped GPIO controllers support"
+ help
+ Say yes here to support basic memory-mapped GPIO controllers.
+
config GPIO_IT8761E
tristate "IT8761E GPIO support"
depends on GPIOLIB
@@ -111,6 +116,18 @@ config GPIO_SCH
This driver can also be built as a module. If so, the module
will be called sch-gpio.
+config GPIO_VX855
+ tristate "VIA VX855/VX875 GPIO"
+ depends on GPIOLIB
+ select MFD_CORE
+ select MFD_VX855
+ help
+ Support access to the VX855/VX875 GPIO lines through the gpio library.
+
+ This driver provides common support for accessing the device,
+ additional drivers must be enabled in order to use the
+ functionality of the device.
+
comment "I2C GPIO expanders:"
config GPIO_MAX7300
@@ -267,6 +284,13 @@ config GPIO_ADP5588
To compile this driver as a module, choose M here: the module will be
called adp5588-gpio.
+config GPIO_ADP5588_IRQ
+ bool "Interrupt controller support for ADP5588"
+ depends on GPIO_ADP5588=y
+ help
+ Say yes here to enable the adp5588 to be used as an interrupt
+ controller. It requires the driver to be built in the kernel.
+
comment "PCI GPIO expanders:"
config GPIO_CS5535
@@ -301,6 +325,14 @@ config GPIO_LANGWELL
help
Say Y here to support Intel Langwell/Penwell GPIO.
+config GPIO_PCH
+ tristate "PCH GPIO of Intel Topcliff"
+ depends on PCI
+ help
+ This driver is for PCH(Platform controller Hub) GPIO of Intel Topcliff
+ which is an IOH(Input/Output Hub) for x86 embedded processor.
+ This driver can access PCH GPIO device.
+
config GPIO_TIMBERDALE
bool "Support for timberdale GPIO IP"
depends on MFD_TIMBERDALE && GPIOLIB && HAS_IOMEM
@@ -339,6 +371,14 @@ config GPIO_MC33880
SPI driver for Freescale MC33880 high-side/low-side switch.
This provides GPIO interface supporting inputs and outputs.
+config GPIO_74X164
+ tristate "74x164 serial-in/parallel-out 8-bits shift register"
+ depends on SPI_MASTER
+ help
+ Platform driver for 74x164 compatible serial-in/parallel-out
+ 8-outputs shift registers. This driver can be used to provide access
+ to more gpio outputs.
+
comment "AC97 GPIO expanders:"
config GPIO_UCB1400
diff --git a/drivers/gpio/Makefile b/drivers/gpio/Makefile
index fc6019d93720..bdf3ddec0652 100644
--- a/drivers/gpio/Makefile
+++ b/drivers/gpio/Makefile
@@ -10,6 +10,7 @@ obj-$(CONFIG_GPIOLIB) += gpiolib.o
obj-$(CONFIG_GPIO_ADP5520) += adp5520-gpio.o
obj-$(CONFIG_GPIO_ADP5588) += adp5588-gpio.o
+obj-$(CONFIG_GPIO_BASIC_MMIO) += basic_mmio_gpio.o
obj-$(CONFIG_GPIO_LANGWELL) += langwell_gpio.o
obj-$(CONFIG_GPIO_MAX730X) += max730x.o
obj-$(CONFIG_GPIO_MAX7300) += max7300.o
@@ -17,8 +18,10 @@ obj-$(CONFIG_GPIO_MAX7301) += max7301.o
obj-$(CONFIG_GPIO_MAX732X) += max732x.o
obj-$(CONFIG_GPIO_MC33880) += mc33880.o
obj-$(CONFIG_GPIO_MCP23S08) += mcp23s08.o
+obj-$(CONFIG_GPIO_74X164) += 74x164.o
obj-$(CONFIG_GPIO_PCA953X) += pca953x.o
obj-$(CONFIG_GPIO_PCF857X) += pcf857x.o
+obj-$(CONFIG_GPIO_PCH) += pch_gpio.o
obj-$(CONFIG_GPIO_PL061) += pl061.o
obj-$(CONFIG_GPIO_STMPE) += stmpe-gpio.o
obj-$(CONFIG_GPIO_TC35892) += tc35892-gpio.o
@@ -37,3 +40,4 @@ obj-$(CONFIG_GPIO_SCH) += sch_gpio.o
obj-$(CONFIG_GPIO_RDC321X) += rdc321x-gpio.o
obj-$(CONFIG_GPIO_JANZ_TTL) += janz-ttl.o
obj-$(CONFIG_GPIO_SX150X) += sx150x.o
+obj-$(CONFIG_GPIO_VX855) += vx855_gpio.o
diff --git a/drivers/gpio/adp5588-gpio.c b/drivers/gpio/adp5588-gpio.c
index 2e8e9e24f887..0871f78af593 100644
--- a/drivers/gpio/adp5588-gpio.c
+++ b/drivers/gpio/adp5588-gpio.c
@@ -1,8 +1,8 @@
/*
* GPIO Chip driver for Analog Devices
- * ADP5588 I/O Expander and QWERTY Keypad Controller
+ * ADP5588/ADP5587 I/O Expander and QWERTY Keypad Controller
*
- * Copyright 2009 Analog Devices Inc.
+ * Copyright 2009-2010 Analog Devices Inc.
*
* Licensed under the GPL-2 or later.
*/
@@ -13,21 +13,34 @@
#include <linux/init.h>
#include <linux/i2c.h>
#include <linux/gpio.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
#include <linux/i2c/adp5588.h>
-#define DRV_NAME "adp5588-gpio"
-#define MAXGPIO 18
-#define ADP_BANK(offs) ((offs) >> 3)
-#define ADP_BIT(offs) (1u << ((offs) & 0x7))
+#define DRV_NAME "adp5588-gpio"
+
+/*
+ * Early pre 4.0 Silicon required to delay readout by at least 25ms,
+ * since the Event Counter Register updated 25ms after the interrupt
+ * asserted.
+ */
+#define WA_DELAYED_READOUT_REVID(rev) ((rev) < 4)
struct adp5588_gpio {
struct i2c_client *client;
struct gpio_chip gpio_chip;
struct mutex lock; /* protect cached dir, dat_out */
+ /* protect serialized access to the interrupt controller bus */
+ struct mutex irq_lock;
unsigned gpio_start;
+ unsigned irq_base;
uint8_t dat_out[3];
uint8_t dir[3];
+ uint8_t int_lvl[3];
+ uint8_t int_en[3];
+ uint8_t irq_mask[3];
+ uint8_t irq_stat[3];
};
static int adp5588_gpio_read(struct i2c_client *client, u8 reg)
@@ -55,8 +68,8 @@ static int adp5588_gpio_get_value(struct gpio_chip *chip, unsigned off)
struct adp5588_gpio *dev =
container_of(chip, struct adp5588_gpio, gpio_chip);
- return !!(adp5588_gpio_read(dev->client, GPIO_DAT_STAT1 + ADP_BANK(off))
- & ADP_BIT(off));
+ return !!(adp5588_gpio_read(dev->client,
+ GPIO_DAT_STAT1 + ADP5588_BANK(off)) & ADP5588_BIT(off));
}
static void adp5588_gpio_set_value(struct gpio_chip *chip,
@@ -66,8 +79,8 @@ static void adp5588_gpio_set_value(struct gpio_chip *chip,
struct adp5588_gpio *dev =
container_of(chip, struct adp5588_gpio, gpio_chip);
- bank = ADP_BANK(off);
- bit = ADP_BIT(off);
+ bank = ADP5588_BANK(off);
+ bit = ADP5588_BIT(off);
mutex_lock(&dev->lock);
if (val)
@@ -87,10 +100,10 @@ static int adp5588_gpio_direction_input(struct gpio_chip *chip, unsigned off)
struct adp5588_gpio *dev =
container_of(chip, struct adp5588_gpio, gpio_chip);
- bank = ADP_BANK(off);
+ bank = ADP5588_BANK(off);
mutex_lock(&dev->lock);
- dev->dir[bank] &= ~ADP_BIT(off);
+ dev->dir[bank] &= ~ADP5588_BIT(off);
ret = adp5588_gpio_write(dev->client, GPIO_DIR1 + bank, dev->dir[bank]);
mutex_unlock(&dev->lock);
@@ -105,8 +118,8 @@ static int adp5588_gpio_direction_output(struct gpio_chip *chip,
struct adp5588_gpio *dev =
container_of(chip, struct adp5588_gpio, gpio_chip);
- bank = ADP_BANK(off);
- bit = ADP_BIT(off);
+ bank = ADP5588_BANK(off);
+ bit = ADP5588_BIT(off);
mutex_lock(&dev->lock);
dev->dir[bank] |= bit;
@@ -125,6 +138,213 @@ static int adp5588_gpio_direction_output(struct gpio_chip *chip,
return ret;
}
+#ifdef CONFIG_GPIO_ADP5588_IRQ
+static int adp5588_gpio_to_irq(struct gpio_chip *chip, unsigned off)
+{
+ struct adp5588_gpio *dev =
+ container_of(chip, struct adp5588_gpio, gpio_chip);
+ return dev->irq_base + off;
+}
+
+static void adp5588_irq_bus_lock(unsigned int irq)
+{
+ struct adp5588_gpio *dev = get_irq_chip_data(irq);
+ mutex_lock(&dev->irq_lock);
+}
+
+ /*
+ * genirq core code can issue chip->mask/unmask from atomic context.
+ * This doesn't work for slow busses where an access needs to sleep.
+ * bus_sync_unlock() is therefore called outside the atomic context,
+ * syncs the current irq mask state with the slow external controller
+ * and unlocks the bus.
+ */
+
+static void adp5588_irq_bus_sync_unlock(unsigned int irq)
+{
+ struct adp5588_gpio *dev = get_irq_chip_data(irq);
+ int i;
+
+ for (i = 0; i <= ADP5588_BANK(ADP5588_MAXGPIO); i++)
+ if (dev->int_en[i] ^ dev->irq_mask[i]) {
+ dev->int_en[i] = dev->irq_mask[i];
+ adp5588_gpio_write(dev->client, GPIO_INT_EN1 + i,
+ dev->int_en[i]);
+ }
+
+ mutex_unlock(&dev->irq_lock);
+}
+
+static void adp5588_irq_mask(unsigned int irq)
+{
+ struct adp5588_gpio *dev = get_irq_chip_data(irq);
+ unsigned gpio = irq - dev->irq_base;
+
+ dev->irq_mask[ADP5588_BANK(gpio)] &= ~ADP5588_BIT(gpio);
+}
+
+static void adp5588_irq_unmask(unsigned int irq)
+{
+ struct adp5588_gpio *dev = get_irq_chip_data(irq);
+ unsigned gpio = irq - dev->irq_base;
+
+ dev->irq_mask[ADP5588_BANK(gpio)] |= ADP5588_BIT(gpio);
+}
+
+static int adp5588_irq_set_type(unsigned int irq, unsigned int type)
+{
+ struct adp5588_gpio *dev = get_irq_chip_data(irq);
+ uint16_t gpio = irq - dev->irq_base;
+ unsigned bank, bit;
+
+ if ((type & IRQ_TYPE_EDGE_BOTH)) {
+ dev_err(&dev->client->dev, "irq %d: unsupported type %d\n",
+ irq, type);
+ return -EINVAL;
+ }
+
+ bank = ADP5588_BANK(gpio);
+ bit = ADP5588_BIT(gpio);
+
+ if (type & IRQ_TYPE_LEVEL_HIGH)
+ dev->int_lvl[bank] |= bit;
+ else if (type & IRQ_TYPE_LEVEL_LOW)
+ dev->int_lvl[bank] &= ~bit;
+ else
+ return -EINVAL;
+
+ adp5588_gpio_direction_input(&dev->gpio_chip, gpio);
+ adp5588_gpio_write(dev->client, GPIO_INT_LVL1 + bank,
+ dev->int_lvl[bank]);
+
+ return 0;
+}
+
+static struct irq_chip adp5588_irq_chip = {
+ .name = "adp5588",
+ .mask = adp5588_irq_mask,
+ .unmask = adp5588_irq_unmask,
+ .bus_lock = adp5588_irq_bus_lock,
+ .bus_sync_unlock = adp5588_irq_bus_sync_unlock,
+ .set_type = adp5588_irq_set_type,
+};
+
+static int adp5588_gpio_read_intstat(struct i2c_client *client, u8 *buf)
+{
+ int ret = i2c_smbus_read_i2c_block_data(client, GPIO_INT_STAT1, 3, buf);
+
+ if (ret < 0)
+ dev_err(&client->dev, "Read INT_STAT Error\n");
+
+ return ret;
+}
+
+static irqreturn_t adp5588_irq_handler(int irq, void *devid)
+{
+ struct adp5588_gpio *dev = devid;
+ unsigned status, bank, bit, pending;
+ int ret;
+ status = adp5588_gpio_read(dev->client, INT_STAT);
+
+ if (status & ADP5588_GPI_INT) {
+ ret = adp5588_gpio_read_intstat(dev->client, dev->irq_stat);
+ if (ret < 0)
+ memset(dev->irq_stat, 0, ARRAY_SIZE(dev->irq_stat));
+
+ for (bank = 0; bank <= ADP5588_BANK(ADP5588_MAXGPIO);
+ bank++, bit = 0) {
+ pending = dev->irq_stat[bank] & dev->irq_mask[bank];
+
+ while (pending) {
+ if (pending & (1 << bit)) {
+ handle_nested_irq(dev->irq_base +
+ (bank << 3) + bit);
+ pending &= ~(1 << bit);
+
+ }
+ bit++;
+ }
+ }
+ }
+
+ adp5588_gpio_write(dev->client, INT_STAT, status); /* Status is W1C */
+
+ return IRQ_HANDLED;
+}
+
+static int adp5588_irq_setup(struct adp5588_gpio *dev)
+{
+ struct i2c_client *client = dev->client;
+ struct adp5588_gpio_platform_data *pdata = client->dev.platform_data;
+ unsigned gpio;
+ int ret;
+
+ adp5588_gpio_write(client, CFG, ADP5588_AUTO_INC);
+ adp5588_gpio_write(client, INT_STAT, -1); /* status is W1C */
+ adp5588_gpio_read_intstat(client, dev->irq_stat); /* read to clear */
+
+ dev->irq_base = pdata->irq_base;
+ mutex_init(&dev->irq_lock);
+
+ for (gpio = 0; gpio < dev->gpio_chip.ngpio; gpio++) {
+ int irq = gpio + dev->irq_base;
+ set_irq_chip_data(irq, dev);
+ set_irq_chip_and_handler(irq, &adp5588_irq_chip,
+ handle_level_irq);
+ set_irq_nested_thread(irq, 1);
+#ifdef CONFIG_ARM
+ /*
+ * ARM needs us to explicitly flag the IRQ as VALID,
+ * once we do so, it will also set the noprobe.
+ */
+ set_irq_flags(irq, IRQF_VALID);
+#else
+ set_irq_noprobe(irq);
+#endif
+ }
+
+ ret = request_threaded_irq(client->irq,
+ NULL,
+ adp5588_irq_handler,
+ IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
+ dev_name(&client->dev), dev);
+ if (ret) {
+ dev_err(&client->dev, "failed to request irq %d\n",
+ client->irq);
+ goto out;
+ }
+
+ dev->gpio_chip.to_irq = adp5588_gpio_to_irq;
+ adp5588_gpio_write(client, CFG,
+ ADP5588_AUTO_INC | ADP5588_INT_CFG | ADP5588_GPI_INT);
+
+ return 0;
+
+out:
+ dev->irq_base = 0;
+ return ret;
+}
+
+static void adp5588_irq_teardown(struct adp5588_gpio *dev)
+{
+ if (dev->irq_base)
+ free_irq(dev->client->irq, dev);
+}
+
+#else
+static int adp5588_irq_setup(struct adp5588_gpio *dev)
+{
+ struct i2c_client *client = dev->client;
+ dev_warn(&client->dev, "interrupt support not compiled in\n");
+
+ return 0;
+}
+
+static void adp5588_irq_teardown(struct adp5588_gpio *dev)
+{
+}
+#endif /* CONFIG_GPIO_ADP5588_IRQ */
+
static int __devinit adp5588_gpio_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
@@ -160,37 +380,46 @@ static int __devinit adp5588_gpio_probe(struct i2c_client *client,
gc->can_sleep = 1;
gc->base = pdata->gpio_start;
- gc->ngpio = MAXGPIO;
+ gc->ngpio = ADP5588_MAXGPIO;
gc->label = client->name;
gc->owner = THIS_MODULE;
mutex_init(&dev->lock);
-
ret = adp5588_gpio_read(dev->client, DEV_ID);
if (ret < 0)
goto err;
revid = ret & ADP5588_DEVICE_ID_MASK;
- for (i = 0, ret = 0; i <= ADP_BANK(MAXGPIO); i++) {
+ for (i = 0, ret = 0; i <= ADP5588_BANK(ADP5588_MAXGPIO); i++) {
dev->dat_out[i] = adp5588_gpio_read(client, GPIO_DAT_OUT1 + i);
dev->dir[i] = adp5588_gpio_read(client, GPIO_DIR1 + i);
ret |= adp5588_gpio_write(client, KP_GPIO1 + i, 0);
ret |= adp5588_gpio_write(client, GPIO_PULL1 + i,
(pdata->pullup_dis_mask >> (8 * i)) & 0xFF);
-
+ ret |= adp5588_gpio_write(client, GPIO_INT_EN1 + i, 0);
if (ret)
goto err;
}
+ if (pdata->irq_base) {
+ if (WA_DELAYED_READOUT_REVID(revid)) {
+ dev_warn(&client->dev, "GPIO int not supported\n");
+ } else {
+ ret = adp5588_irq_setup(dev);
+ if (ret)
+ goto err;
+ }
+ }
+
ret = gpiochip_add(&dev->gpio_chip);
if (ret)
- goto err;
+ goto err_irq;
- dev_info(&client->dev, "gpios %d..%d on a %s Rev. %d\n",
+ dev_info(&client->dev, "gpios %d..%d (IRQ Base %d) on a %s Rev. %d\n",
gc->base, gc->base + gc->ngpio - 1,
- client->name, revid);
+ pdata->irq_base, client->name, revid);
if (pdata->setup) {
ret = pdata->setup(client, gc->base, gc->ngpio, pdata->context);
@@ -199,8 +428,11 @@ static int __devinit adp5588_gpio_probe(struct i2c_client *client,
}
i2c_set_clientdata(client, dev);
+
return 0;
+err_irq:
+ adp5588_irq_teardown(dev);
err:
kfree(dev);
return ret;
@@ -222,6 +454,9 @@ static int __devexit adp5588_gpio_remove(struct i2c_client *client)
}
}
+ if (dev->irq_base)
+ free_irq(dev->client->irq, dev);
+
ret = gpiochip_remove(&dev->gpio_chip);
if (ret) {
dev_err(&client->dev, "gpiochip_remove failed %d\n", ret);
diff --git a/drivers/gpio/basic_mmio_gpio.c b/drivers/gpio/basic_mmio_gpio.c
new file mode 100644
index 000000000000..3addea65894e
--- /dev/null
+++ b/drivers/gpio/basic_mmio_gpio.c
@@ -0,0 +1,297 @@
+/*
+ * Driver for basic memory-mapped GPIO controllers.
+ *
+ * Copyright 2008 MontaVista Software, Inc.
+ * Copyright 2008,2010 Anton Vorontsov <cbouatmailru@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * ....``.```~~~~````.`.`.`.`.```````'',,,.........`````......`.......
+ * ...`` ```````..
+ * ..The simplest form of a GPIO controller that the driver supports is``
+ * `.just a single "data" register, where GPIO state can be read and/or `
+ * `,..written. ,,..``~~~~ .....``.`.`.~~.```.`.........``````.```````
+ * `````````
+ ___
+_/~~|___/~| . ```~~~~~~ ___/___\___ ,~.`.`.`.`````.~~...,,,,...
+__________|~$@~~~ %~ /o*o*o*o*o*o\ .. Implementing such a GPIO .
+o ` ~~~~\___/~~~~ ` controller in FPGA is ,.`
+ `....trivial..'~`.```.```
+ * ```````
+ * .```````~~~~`..`.``.``.
+ * . The driver supports `... ,..```.`~~~```````````````....````.``,,
+ * . big-endian notation, just`. .. A bit more sophisticated controllers ,
+ * . register the device with -be`. .with a pair of set/clear-bit registers ,
+ * `.. suffix. ```~~`````....`.` . affecting the data register and the .`
+ * ``.`.``...``` ```.. output pins are also supported.`
+ * ^^ `````.`````````.,``~``~``~~``````
+ * . ^^
+ * ,..`.`.`...````````````......`.`.`.`.`.`..`.`.`..
+ * .. The expectation is that in at least some cases . ,-~~~-,
+ * .this will be used with roll-your-own ASIC/FPGA .` \ /
+ * .logic in Verilog or VHDL. ~~~`````````..`````~~` \ /
+ * ..````````......``````````` \o_
+ * |
+ * ^^ / \
+ *
+ * ...`````~~`.....``.`..........``````.`.``.```........``.
+ * ` 8, 16, 32 and 64 bits registers are supported, and``.
+ * . the number of GPIOs is determined by the width of ~
+ * .. the registers. ,............```.`.`..`.`.~~~.`.`.`~
+ * `.......````.```
+ */
+
+#include <linux/init.h>
+#include <linux/bug.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/spinlock.h>
+#include <linux/compiler.h>
+#include <linux/types.h>
+#include <linux/errno.h>
+#include <linux/log2.h>
+#include <linux/ioport.h>
+#include <linux/io.h>
+#include <linux/gpio.h>
+#include <linux/slab.h>
+#include <linux/platform_device.h>
+#include <linux/mod_devicetable.h>
+#include <linux/basic_mmio_gpio.h>
+
+struct bgpio_chip {
+ struct gpio_chip gc;
+ void __iomem *reg_dat;
+ void __iomem *reg_set;
+ void __iomem *reg_clr;
+
+ /* Number of bits (GPIOs): <register width> * 8. */
+ int bits;
+
+ /*
+ * Some GPIO controllers work with the big-endian bits notation,
+ * e.g. in a 8-bits register, GPIO7 is the least significant bit.
+ */
+ int big_endian_bits;
+
+ /*
+ * Used to lock bgpio_chip->data. Also, this is needed to keep
+ * shadowed and real data registers writes together.
+ */
+ spinlock_t lock;
+
+ /* Shadowed data register to clear/set bits safely. */
+ unsigned long data;
+};
+
+static struct bgpio_chip *to_bgpio_chip(struct gpio_chip *gc)
+{
+ return container_of(gc, struct bgpio_chip, gc);
+}
+
+static unsigned long bgpio_in(struct bgpio_chip *bgc)
+{
+ switch (bgc->bits) {
+ case 8:
+ return __raw_readb(bgc->reg_dat);
+ case 16:
+ return __raw_readw(bgc->reg_dat);
+ case 32:
+ return __raw_readl(bgc->reg_dat);
+#if BITS_PER_LONG >= 64
+ case 64:
+ return __raw_readq(bgc->reg_dat);
+#endif
+ }
+ return -EINVAL;
+}
+
+static void bgpio_out(struct bgpio_chip *bgc, void __iomem *reg,
+ unsigned long data)
+{
+ switch (bgc->bits) {
+ case 8:
+ __raw_writeb(data, reg);
+ return;
+ case 16:
+ __raw_writew(data, reg);
+ return;
+ case 32:
+ __raw_writel(data, reg);
+ return;
+#if BITS_PER_LONG >= 64
+ case 64:
+ __raw_writeq(data, reg);
+ return;
+#endif
+ }
+}
+
+static unsigned long bgpio_pin2mask(struct bgpio_chip *bgc, unsigned int pin)
+{
+ if (bgc->big_endian_bits)
+ return 1 << (bgc->bits - 1 - pin);
+ else
+ return 1 << pin;
+}
+
+static int bgpio_get(struct gpio_chip *gc, unsigned int gpio)
+{
+ struct bgpio_chip *bgc = to_bgpio_chip(gc);
+
+ return bgpio_in(bgc) & bgpio_pin2mask(bgc, gpio);
+}
+
+static void bgpio_set(struct gpio_chip *gc, unsigned int gpio, int val)
+{
+ struct bgpio_chip *bgc = to_bgpio_chip(gc);
+ unsigned long mask = bgpio_pin2mask(bgc, gpio);
+ unsigned long flags;
+
+ if (bgc->reg_set) {
+ if (val)
+ bgpio_out(bgc, bgc->reg_set, mask);
+ else
+ bgpio_out(bgc, bgc->reg_clr, mask);
+ return;
+ }
+
+ spin_lock_irqsave(&bgc->lock, flags);
+
+ if (val)
+ bgc->data |= mask;
+ else
+ bgc->data &= ~mask;
+
+ bgpio_out(bgc, bgc->reg_dat, bgc->data);
+
+ spin_unlock_irqrestore(&bgc->lock, flags);
+}
+
+static int bgpio_dir_in(struct gpio_chip *gc, unsigned int gpio)
+{
+ return 0;
+}
+
+static int bgpio_dir_out(struct gpio_chip *gc, unsigned int gpio, int val)
+{
+ bgpio_set(gc, gpio, val);
+ return 0;
+}
+
+static int __devinit bgpio_probe(struct platform_device *pdev)
+{
+ const struct platform_device_id *platid = platform_get_device_id(pdev);
+ struct device *dev = &pdev->dev;
+ struct bgpio_pdata *pdata = dev_get_platdata(dev);
+ struct bgpio_chip *bgc;
+ struct resource *res_dat;
+ struct resource *res_set;
+ struct resource *res_clr;
+ resource_size_t dat_sz;
+ int bits;
+ int ret;
+
+ res_dat = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dat");
+ if (!res_dat)
+ return -EINVAL;
+
+ dat_sz = resource_size(res_dat);
+ if (!is_power_of_2(dat_sz))
+ return -EINVAL;
+
+ bits = dat_sz * 8;
+ if (bits > BITS_PER_LONG)
+ return -EINVAL;
+
+ bgc = devm_kzalloc(dev, sizeof(*bgc), GFP_KERNEL);
+ if (!bgc)
+ return -ENOMEM;
+
+ bgc->reg_dat = devm_ioremap(dev, res_dat->start, dat_sz);
+ if (!bgc->reg_dat)
+ return -ENOMEM;
+
+ res_set = platform_get_resource_byname(pdev, IORESOURCE_MEM, "set");
+ res_clr = platform_get_resource_byname(pdev, IORESOURCE_MEM, "clr");
+ if (res_set && res_clr) {
+ if (resource_size(res_set) != resource_size(res_clr) ||
+ resource_size(res_set) != dat_sz)
+ return -EINVAL;
+
+ bgc->reg_set = devm_ioremap(dev, res_set->start, dat_sz);
+ bgc->reg_clr = devm_ioremap(dev, res_clr->start, dat_sz);
+ if (!bgc->reg_set || !bgc->reg_clr)
+ return -ENOMEM;
+ } else if (res_set || res_clr) {
+ return -EINVAL;
+ }
+
+ spin_lock_init(&bgc->lock);
+
+ bgc->bits = bits;
+ bgc->big_endian_bits = !strcmp(platid->name, "basic-mmio-gpio-be");
+ bgc->data = bgpio_in(bgc);
+
+ bgc->gc.ngpio = bits;
+ bgc->gc.direction_input = bgpio_dir_in;
+ bgc->gc.direction_output = bgpio_dir_out;
+ bgc->gc.get = bgpio_get;
+ bgc->gc.set = bgpio_set;
+ bgc->gc.dev = dev;
+ bgc->gc.label = dev_name(dev);
+
+ if (pdata)
+ bgc->gc.base = pdata->base;
+ else
+ bgc->gc.base = -1;
+
+ dev_set_drvdata(dev, bgc);
+
+ ret = gpiochip_add(&bgc->gc);
+ if (ret)
+ dev_err(dev, "gpiochip_add() failed: %d\n", ret);
+
+ return ret;
+}
+
+static int __devexit bgpio_remove(struct platform_device *pdev)
+{
+ struct bgpio_chip *bgc = dev_get_drvdata(&pdev->dev);
+
+ return gpiochip_remove(&bgc->gc);
+}
+
+static const struct platform_device_id bgpio_id_table[] = {
+ { "basic-mmio-gpio", },
+ { "basic-mmio-gpio-be", },
+ {},
+};
+MODULE_DEVICE_TABLE(platform, bgpio_id_table);
+
+static struct platform_driver bgpio_driver = {
+ .driver = {
+ .name = "basic-mmio-gpio",
+ },
+ .id_table = bgpio_id_table,
+ .probe = bgpio_probe,
+ .remove = __devexit_p(bgpio_remove),
+};
+
+static int __init bgpio_init(void)
+{
+ return platform_driver_register(&bgpio_driver);
+}
+module_init(bgpio_init);
+
+static void __exit bgpio_exit(void)
+{
+ platform_driver_unregister(&bgpio_driver);
+}
+module_exit(bgpio_exit);
+
+MODULE_DESCRIPTION("Driver for basic memory-mapped GPIO controllers");
+MODULE_AUTHOR("Anton Vorontsov <cbouatmailru@gmail.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpio/cs5535-gpio.c b/drivers/gpio/cs5535-gpio.c
index e23c06893d19..599f6c9e0fbf 100644
--- a/drivers/gpio/cs5535-gpio.c
+++ b/drivers/gpio/cs5535-gpio.c
@@ -56,6 +56,18 @@ static struct cs5535_gpio_chip {
* registers, see include/linux/cs5535.h.
*/
+static void errata_outl(u32 val, unsigned long addr)
+{
+ /*
+ * According to the CS5536 errata (#36), after suspend
+ * a write to the high bank GPIO register will clear all
+ * non-selected bits; the recommended workaround is a
+ * read-modify-write operation.
+ */
+ val |= inl(addr);
+ outl(val, addr);
+}
+
static void __cs5535_gpio_set(struct cs5535_gpio_chip *chip, unsigned offset,
unsigned int reg)
{
@@ -64,7 +76,7 @@ static void __cs5535_gpio_set(struct cs5535_gpio_chip *chip, unsigned offset,
outl(1 << offset, chip->base + reg);
else
/* high bank register */
- outl(1 << (offset - 16), chip->base + 0x80 + reg);
+ errata_outl(1 << (offset - 16), chip->base + 0x80 + reg);
}
void cs5535_gpio_set(unsigned offset, unsigned int reg)
@@ -86,7 +98,7 @@ static void __cs5535_gpio_clear(struct cs5535_gpio_chip *chip, unsigned offset,
outl(1 << (offset + 16), chip->base + reg);
else
/* high bank register */
- outl(1 << offset, chip->base + 0x80 + reg);
+ errata_outl(1 << offset, chip->base + 0x80 + reg);
}
void cs5535_gpio_clear(unsigned offset, unsigned int reg)
diff --git a/drivers/gpio/langwell_gpio.c b/drivers/gpio/langwell_gpio.c
index 8383a8d7f994..64db9dc3a275 100644
--- a/drivers/gpio/langwell_gpio.c
+++ b/drivers/gpio/langwell_gpio.c
@@ -18,10 +18,12 @@
/* Supports:
* Moorestown platform Langwell chip.
* Medfield platform Penwell chip.
+ * Whitney point.
*/
#include <linux/module.h>
#include <linux/pci.h>
+#include <linux/platform_device.h>
#include <linux/kernel.h>
#include <linux/delay.h>
#include <linux/stddef.h>
@@ -158,15 +160,15 @@ static int lnw_irq_type(unsigned irq, unsigned type)
spin_unlock_irqrestore(&lnw->lock, flags);
return 0;
-};
+}
static void lnw_irq_unmask(unsigned irq)
{
-};
+}
static void lnw_irq_mask(unsigned irq)
{
-};
+}
static struct irq_chip lnw_irqchip = {
.name = "LNW-GPIO",
@@ -300,9 +302,88 @@ static struct pci_driver lnw_gpio_driver = {
.probe = lnw_gpio_probe,
};
+
+static int __devinit wp_gpio_probe(struct platform_device *pdev)
+{
+ struct lnw_gpio *lnw;
+ struct gpio_chip *gc;
+ struct resource *rc;
+ int retval = 0;
+
+ rc = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!rc)
+ return -EINVAL;
+
+ lnw = kzalloc(sizeof(struct lnw_gpio), GFP_KERNEL);
+ if (!lnw) {
+ dev_err(&pdev->dev,
+ "can't allocate whitneypoint_gpio chip data\n");
+ return -ENOMEM;
+ }
+ lnw->reg_base = ioremap_nocache(rc->start, resource_size(rc));
+ if (lnw->reg_base == NULL) {
+ retval = -EINVAL;
+ goto err_kmalloc;
+ }
+ spin_lock_init(&lnw->lock);
+ gc = &lnw->chip;
+ gc->label = dev_name(&pdev->dev);
+ gc->owner = THIS_MODULE;
+ gc->direction_input = lnw_gpio_direction_input;
+ gc->direction_output = lnw_gpio_direction_output;
+ gc->get = lnw_gpio_get;
+ gc->set = lnw_gpio_set;
+ gc->to_irq = NULL;
+ gc->base = 0;
+ gc->ngpio = 64;
+ gc->can_sleep = 0;
+ retval = gpiochip_add(gc);
+ if (retval) {
+ dev_err(&pdev->dev, "whitneypoint gpiochip_add error %d\n",
+ retval);
+ goto err_ioremap;
+ }
+ platform_set_drvdata(pdev, lnw);
+ return 0;
+err_ioremap:
+ iounmap(lnw->reg_base);
+err_kmalloc:
+ kfree(lnw);
+ return retval;
+}
+
+static int __devexit wp_gpio_remove(struct platform_device *pdev)
+{
+ struct lnw_gpio *lnw = platform_get_drvdata(pdev);
+ int err;
+ err = gpiochip_remove(&lnw->chip);
+ if (err)
+ dev_err(&pdev->dev, "failed to remove gpio_chip.\n");
+ iounmap(lnw->reg_base);
+ kfree(lnw);
+ platform_set_drvdata(pdev, NULL);
+ return 0;
+}
+
+static struct platform_driver wp_gpio_driver = {
+ .probe = wp_gpio_probe,
+ .remove = __devexit_p(wp_gpio_remove),
+ .driver = {
+ .name = "wp_gpio",
+ .owner = THIS_MODULE,
+ },
+};
+
static int __init lnw_gpio_init(void)
{
- return pci_register_driver(&lnw_gpio_driver);
+ int ret;
+ ret = pci_register_driver(&lnw_gpio_driver);
+ if (ret < 0)
+ return ret;
+ ret = platform_driver_register(&wp_gpio_driver);
+ if (ret < 0)
+ pci_unregister_driver(&lnw_gpio_driver);
+ return ret;
}
device_initcall(lnw_gpio_init);
diff --git a/drivers/gpio/pca953x.c b/drivers/gpio/pca953x.c
index a2b12aa1f2b9..501866662e05 100644
--- a/drivers/gpio/pca953x.c
+++ b/drivers/gpio/pca953x.c
@@ -345,7 +345,7 @@ static irqreturn_t pca953x_irq_handler(int irq, void *devid)
do {
level = __ffs(pending);
- handle_nested_irq(level + chip->irq_base);
+ generic_handle_irq(level + chip->irq_base);
pending &= ~(1 << level);
} while (pending);
@@ -360,7 +360,8 @@ static int pca953x_irq_setup(struct pca953x_chip *chip,
struct pca953x_platform_data *pdata = client->dev.platform_data;
int ret;
- if (pdata->irq_base && (id->driver_data & PCA953X_INT)) {
+ if (pdata->irq_base != -1
+ && (id->driver_data & PCA953X_INT)) {
int lvl;
ret = pca953x_read_reg(chip, PCA953X_INPUT,
@@ -383,7 +384,6 @@ static int pca953x_irq_setup(struct pca953x_chip *chip,
set_irq_chip_data(irq, chip);
set_irq_chip_and_handler(irq, &pca953x_irq_chip,
handle_edge_irq);
- set_irq_nested_thread(irq, 1);
#ifdef CONFIG_ARM
set_irq_flags(irq, IRQF_VALID);
#else
@@ -394,6 +394,7 @@ static int pca953x_irq_setup(struct pca953x_chip *chip,
ret = request_threaded_irq(client->irq,
NULL,
pca953x_irq_handler,
+ IRQF_TRIGGER_RISING |
IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
dev_name(&client->dev), chip);
if (ret) {
@@ -408,13 +409,13 @@ static int pca953x_irq_setup(struct pca953x_chip *chip,
return 0;
out_failed:
- chip->irq_base = 0;
+ chip->irq_base = -1;
return ret;
}
static void pca953x_irq_teardown(struct pca953x_chip *chip)
{
- if (chip->irq_base)
+ if (chip->irq_base != -1)
free_irq(chip->client->irq, chip);
}
#else /* CONFIG_GPIO_PCA953X_IRQ */
@@ -424,7 +425,7 @@ static int pca953x_irq_setup(struct pca953x_chip *chip,
struct i2c_client *client = chip->client;
struct pca953x_platform_data *pdata = client->dev.platform_data;
- if (pdata->irq_base && (id->driver_data & PCA953X_INT))
+ if (pdata->irq_base != -1 && (id->driver_data & PCA953X_INT))
dev_warn(&client->dev, "interrupt support not compiled in\n");
return 0;
diff --git a/drivers/gpio/pch_gpio.c b/drivers/gpio/pch_gpio.c
new file mode 100644
index 000000000000..0eba0a75c804
--- /dev/null
+++ b/drivers/gpio/pch_gpio.c
@@ -0,0 +1,312 @@
+/*
+ * Copyright (C) 2010 OKI SEMICONDUCTOR Co., LTD.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
+ */
+#include <linux/kernel.h>
+#include <linux/pci.h>
+#include <linux/gpio.h>
+
+#define PCH_GPIO_ALL_PINS 0xfff /* Mask for GPIO pins 0 to 11 */
+#define GPIO_NUM_PINS 12 /* Specifies number of GPIO PINS GPIO0-GPIO11 */
+
+struct pch_regs {
+ u32 ien;
+ u32 istatus;
+ u32 idisp;
+ u32 iclr;
+ u32 imask;
+ u32 imaskclr;
+ u32 po;
+ u32 pi;
+ u32 pm;
+ u32 im0;
+ u32 im1;
+ u32 reserved[4];
+ u32 reset;
+};
+
+/**
+ * struct pch_gpio_reg_data - The register store data.
+ * @po_reg: To store contents of PO register.
+ * @pm_reg: To store contents of PM register.
+ */
+struct pch_gpio_reg_data {
+ u32 po_reg;
+ u32 pm_reg;
+};
+
+/**
+ * struct pch_gpio - GPIO private data structure.
+ * @base: PCI base address of Memory mapped I/O register.
+ * @reg: Memory mapped PCH GPIO register list.
+ * @dev: Pointer to device structure.
+ * @gpio: Data for GPIO infrastructure.
+ * @pch_gpio_reg: Memory mapped Register data is saved here
+ * when suspend.
+ */
+struct pch_gpio {
+ void __iomem *base;
+ struct pch_regs __iomem *reg;
+ struct device *dev;
+ struct gpio_chip gpio;
+ struct pch_gpio_reg_data pch_gpio_reg;
+ struct mutex lock;
+};
+
+static void pch_gpio_set(struct gpio_chip *gpio, unsigned nr, int val)
+{
+ u32 reg_val;
+ struct pch_gpio *chip = container_of(gpio, struct pch_gpio, gpio);
+
+ mutex_lock(&chip->lock);
+ reg_val = ioread32(&chip->reg->po);
+ if (val)
+ reg_val |= (1 << nr);
+ else
+ reg_val &= ~(1 << nr);
+
+ iowrite32(reg_val, &chip->reg->po);
+ mutex_unlock(&chip->lock);
+}
+
+static int pch_gpio_get(struct gpio_chip *gpio, unsigned nr)
+{
+ struct pch_gpio *chip = container_of(gpio, struct pch_gpio, gpio);
+
+ return ioread32(&chip->reg->pi) & (1 << nr);
+}
+
+static int pch_gpio_direction_output(struct gpio_chip *gpio, unsigned nr,
+ int val)
+{
+ struct pch_gpio *chip = container_of(gpio, struct pch_gpio, gpio);
+ u32 pm;
+ u32 reg_val;
+
+ mutex_lock(&chip->lock);
+ pm = ioread32(&chip->reg->pm) & PCH_GPIO_ALL_PINS;
+ pm |= (1 << nr);
+ iowrite32(pm, &chip->reg->pm);
+
+ reg_val = ioread32(&chip->reg->po);
+ if (val)
+ reg_val |= (1 << nr);
+ else
+ reg_val &= ~(1 << nr);
+
+ mutex_unlock(&chip->lock);
+
+ return 0;
+}
+
+static int pch_gpio_direction_input(struct gpio_chip *gpio, unsigned nr)
+{
+ struct pch_gpio *chip = container_of(gpio, struct pch_gpio, gpio);
+ u32 pm;
+
+ mutex_lock(&chip->lock);
+ pm = ioread32(&chip->reg->pm) & PCH_GPIO_ALL_PINS; /*bits 0-11*/
+ pm &= ~(1 << nr);
+ iowrite32(pm, &chip->reg->pm);
+ mutex_unlock(&chip->lock);
+
+ return 0;
+}
+
+/*
+ * Save register configuration and disable interrupts.
+ */
+static void pch_gpio_save_reg_conf(struct pch_gpio *chip)
+{
+ chip->pch_gpio_reg.po_reg = ioread32(&chip->reg->po);
+ chip->pch_gpio_reg.pm_reg = ioread32(&chip->reg->pm);
+}
+
+/*
+ * This function restores the register configuration of the GPIO device.
+ */
+static void pch_gpio_restore_reg_conf(struct pch_gpio *chip)
+{
+ /* to store contents of PO register */
+ iowrite32(chip->pch_gpio_reg.po_reg, &chip->reg->po);
+ /* to store contents of PM register */
+ iowrite32(chip->pch_gpio_reg.pm_reg, &chip->reg->pm);
+}
+
+static void pch_gpio_setup(struct pch_gpio *chip)
+{
+ struct gpio_chip *gpio = &chip->gpio;
+
+ gpio->label = dev_name(chip->dev);
+ gpio->owner = THIS_MODULE;
+ gpio->direction_input = pch_gpio_direction_input;
+ gpio->get = pch_gpio_get;
+ gpio->direction_output = pch_gpio_direction_output;
+ gpio->set = pch_gpio_set;
+ gpio->dbg_show = NULL;
+ gpio->base = -1;
+ gpio->ngpio = GPIO_NUM_PINS;
+ gpio->can_sleep = 0;
+}
+
+static int __devinit pch_gpio_probe(struct pci_dev *pdev,
+ const struct pci_device_id *id)
+{
+ s32 ret;
+ struct pch_gpio *chip;
+
+ chip = kzalloc(sizeof(*chip), GFP_KERNEL);
+ if (chip == NULL)
+ return -ENOMEM;
+
+ chip->dev = &pdev->dev;
+ ret = pci_enable_device(pdev);
+ if (ret) {
+ dev_err(&pdev->dev, "%s : pci_enable_device FAILED", __func__);
+ goto err_pci_enable;
+ }
+
+ ret = pci_request_regions(pdev, KBUILD_MODNAME);
+ if (ret) {
+ dev_err(&pdev->dev, "pci_request_regions FAILED-%d", ret);
+ goto err_request_regions;
+ }
+
+ chip->base = pci_iomap(pdev, 1, 0);
+ if (chip->base == 0) {
+ dev_err(&pdev->dev, "%s : pci_iomap FAILED", __func__);
+ ret = -ENOMEM;
+ goto err_iomap;
+ }
+
+ chip->reg = chip->base;
+ pci_set_drvdata(pdev, chip);
+ mutex_init(&chip->lock);
+ pch_gpio_setup(chip);
+ ret = gpiochip_add(&chip->gpio);
+ if (ret) {
+ dev_err(&pdev->dev, "PCH gpio: Failed to register GPIO\n");
+ goto err_gpiochip_add;
+ }
+
+ return 0;
+
+err_gpiochip_add:
+ pci_iounmap(pdev, chip->base);
+
+err_iomap:
+ pci_release_regions(pdev);
+
+err_request_regions:
+ pci_disable_device(pdev);
+
+err_pci_enable:
+ kfree(chip);
+ dev_err(&pdev->dev, "%s Failed returns %d\n", __func__, ret);
+ return ret;
+}
+
+static void __devexit pch_gpio_remove(struct pci_dev *pdev)
+{
+ int err;
+ struct pch_gpio *chip = pci_get_drvdata(pdev);
+
+ err = gpiochip_remove(&chip->gpio);
+ if (err)
+ dev_err(&pdev->dev, "Failed gpiochip_remove\n");
+
+ pci_iounmap(pdev, chip->base);
+ pci_release_regions(pdev);
+ pci_disable_device(pdev);
+ kfree(chip);
+}
+
+#ifdef CONFIG_PM
+static int pch_gpio_suspend(struct pci_dev *pdev, pm_message_t state)
+{
+ s32 ret;
+ struct pch_gpio *chip = pci_get_drvdata(pdev);
+
+ pch_gpio_save_reg_conf(chip);
+ pch_gpio_restore_reg_conf(chip);
+
+ ret = pci_save_state(pdev);
+ if (ret) {
+ dev_err(&pdev->dev, "pci_save_state Failed-%d\n", ret);
+ return ret;
+ }
+ pci_disable_device(pdev);
+ pci_set_power_state(pdev, PCI_D0);
+ ret = pci_enable_wake(pdev, PCI_D0, 1);
+ if (ret)
+ dev_err(&pdev->dev, "pci_enable_wake Failed -%d\n", ret);
+
+ return 0;
+}
+
+static int pch_gpio_resume(struct pci_dev *pdev)
+{
+ s32 ret;
+ struct pch_gpio *chip = pci_get_drvdata(pdev);
+
+ ret = pci_enable_wake(pdev, PCI_D0, 0);
+
+ pci_set_power_state(pdev, PCI_D0);
+ ret = pci_enable_device(pdev);
+ if (ret) {
+ dev_err(&pdev->dev, "pci_enable_device Failed-%d ", ret);
+ return ret;
+ }
+ pci_restore_state(pdev);
+
+ iowrite32(0x01, &chip->reg->reset);
+ iowrite32(0x00, &chip->reg->reset);
+ pch_gpio_restore_reg_conf(chip);
+
+ return 0;
+}
+#else
+#define pch_gpio_suspend NULL
+#define pch_gpio_resume NULL
+#endif
+
+static DEFINE_PCI_DEVICE_TABLE(pch_gpio_pcidev_id) = {
+ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x8803) },
+ { 0, }
+};
+
+static struct pci_driver pch_gpio_driver = {
+ .name = "pch_gpio",
+ .id_table = pch_gpio_pcidev_id,
+ .probe = pch_gpio_probe,
+ .remove = __devexit_p(pch_gpio_remove),
+ .suspend = pch_gpio_suspend,
+ .resume = pch_gpio_resume
+};
+
+static int __init pch_gpio_pci_init(void)
+{
+ return pci_register_driver(&pch_gpio_driver);
+}
+module_init(pch_gpio_pci_init);
+
+static void __exit pch_gpio_pci_exit(void)
+{
+ pci_unregister_driver(&pch_gpio_driver);
+}
+module_exit(pch_gpio_pci_exit);
+
+MODULE_DESCRIPTION("PCH GPIO PCI Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpio/stmpe-gpio.c b/drivers/gpio/stmpe-gpio.c
index 4e1f1b9d5e67..7c9e6a052c45 100644
--- a/drivers/gpio/stmpe-gpio.c
+++ b/drivers/gpio/stmpe-gpio.c
@@ -30,6 +30,7 @@ struct stmpe_gpio {
struct mutex irq_lock;
int irq_base;
+ unsigned norequest_mask;
/* Caches of interrupt control registers for bus_lock */
u8 regs[CACHE_NR_REGS][CACHE_NR_BANKS];
@@ -103,6 +104,9 @@ static int stmpe_gpio_request(struct gpio_chip *chip, unsigned offset)
struct stmpe_gpio *stmpe_gpio = to_stmpe_gpio(chip);
struct stmpe *stmpe = stmpe_gpio->stmpe;
+ if (stmpe_gpio->norequest_mask & (1 << offset))
+ return -EINVAL;
+
return stmpe_set_altfunc(stmpe, 1 << offset, STMPE_BLOCK_GPIO);
}
@@ -287,8 +291,6 @@ static int __devinit stmpe_gpio_probe(struct platform_device *pdev)
int irq;
pdata = stmpe->pdata->gpio;
- if (!pdata)
- return -ENODEV;
irq = platform_get_irq(pdev, 0);
if (irq < 0)
@@ -302,6 +304,7 @@ static int __devinit stmpe_gpio_probe(struct platform_device *pdev)
stmpe_gpio->dev = &pdev->dev;
stmpe_gpio->stmpe = stmpe;
+ stmpe_gpio->norequest_mask = pdata ? pdata->norequest_mask : 0;
stmpe_gpio->chip = template_chip;
stmpe_gpio->chip.ngpio = stmpe->num_gpios;
@@ -312,11 +315,11 @@ static int __devinit stmpe_gpio_probe(struct platform_device *pdev)
ret = stmpe_enable(stmpe, STMPE_BLOCK_GPIO);
if (ret)
- return ret;
+ goto out_free;
ret = stmpe_gpio_irq_init(stmpe_gpio);
if (ret)
- goto out_free;
+ goto out_disable;
ret = request_threaded_irq(irq, NULL, stmpe_gpio_irq, IRQF_ONESHOT,
"stmpe-gpio", stmpe_gpio);
@@ -342,6 +345,8 @@ out_freeirq:
free_irq(irq, stmpe_gpio);
out_removeirq:
stmpe_gpio_irq_remove(stmpe_gpio);
+out_disable:
+ stmpe_disable(stmpe, STMPE_BLOCK_GPIO);
out_free:
kfree(stmpe_gpio);
return ret;
diff --git a/drivers/gpio/timbgpio.c b/drivers/gpio/timbgpio.c
index ddd053108a13..45293662e950 100644
--- a/drivers/gpio/timbgpio.c
+++ b/drivers/gpio/timbgpio.c
@@ -47,6 +47,7 @@ struct timbgpio {
spinlock_t lock; /* mutual exclusion */
struct gpio_chip gpio;
int irq_base;
+ unsigned long last_ier;
};
static int timbgpio_update_bit(struct gpio_chip *gpio, unsigned index,
@@ -112,16 +113,24 @@ static void timbgpio_irq_disable(unsigned irq)
{
struct timbgpio *tgpio = get_irq_chip_data(irq);
int offset = irq - tgpio->irq_base;
+ unsigned long flags;
- timbgpio_update_bit(&tgpio->gpio, offset, TGPIO_IER, 0);
+ spin_lock_irqsave(&tgpio->lock, flags);
+ tgpio->last_ier &= ~(1 << offset);
+ iowrite32(tgpio->last_ier, tgpio->membase + TGPIO_IER);
+ spin_unlock_irqrestore(&tgpio->lock, flags);
}
static void timbgpio_irq_enable(unsigned irq)
{
struct timbgpio *tgpio = get_irq_chip_data(irq);
int offset = irq - tgpio->irq_base;
+ unsigned long flags;
- timbgpio_update_bit(&tgpio->gpio, offset, TGPIO_IER, 1);
+ spin_lock_irqsave(&tgpio->lock, flags);
+ tgpio->last_ier |= 1 << offset;
+ iowrite32(tgpio->last_ier, tgpio->membase + TGPIO_IER);
+ spin_unlock_irqrestore(&tgpio->lock, flags);
}
static int timbgpio_irq_type(unsigned irq, unsigned trigger)
@@ -194,8 +203,16 @@ static void timbgpio_irq(unsigned int irq, struct irq_desc *desc)
ipr = ioread32(tgpio->membase + TGPIO_IPR);
iowrite32(ipr, tgpio->membase + TGPIO_ICR);
+ /*
+ * Some versions of the hardware trash the IER register if more than
+ * one interrupt is received simultaneously.
+ */
+ iowrite32(0, tgpio->membase + TGPIO_IER);
+
for_each_set_bit(offset, &ipr, tgpio->gpio.ngpio)
generic_handle_irq(timbgpio_to_irq(&tgpio->gpio, offset));
+
+ iowrite32(tgpio->last_ier, tgpio->membase + TGPIO_IER);
}
static struct irq_chip timbgpio_irqchip = {
diff --git a/drivers/gpio/vx855_gpio.c b/drivers/gpio/vx855_gpio.c
new file mode 100644
index 000000000000..8a98ee5d5f6c
--- /dev/null
+++ b/drivers/gpio/vx855_gpio.c
@@ -0,0 +1,332 @@
+/*
+ * Linux GPIOlib driver for the VIA VX855 integrated southbridge GPIO
+ *
+ * Copyright (C) 2009 VIA Technologies, Inc.
+ * Copyright (C) 2010 One Laptop per Child
+ * Author: Harald Welte <HaraldWelte@viatech.com>
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
+ * MA 02111-1307 USA
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/gpio.h>
+#include <linux/device.h>
+#include <linux/platform_device.h>
+#include <linux/pci.h>
+#include <linux/io.h>
+
+#define MODULE_NAME "vx855_gpio"
+
+/* The VX855 south bridge has the following GPIO pins:
+ * GPI 0...13 General Purpose Input
+ * GPO 0...12 General Purpose Output
+ * GPIO 0...14 General Purpose I/O (Open-Drain)
+ */
+
+#define NR_VX855_GPI 14
+#define NR_VX855_GPO 13
+#define NR_VX855_GPIO 15
+
+#define NR_VX855_GPInO (NR_VX855_GPI + NR_VX855_GPO)
+#define NR_VX855_GP (NR_VX855_GPI + NR_VX855_GPO + NR_VX855_GPIO)
+
+struct vx855_gpio {
+ struct gpio_chip gpio;
+ spinlock_t lock;
+ u32 io_gpi;
+ u32 io_gpo;
+ bool gpi_reserved;
+ bool gpo_reserved;
+};
+
+/* resolve a GPIx into the corresponding bit position */
+static inline u_int32_t gpi_i_bit(int i)
+{
+ if (i < 10)
+ return 1 << i;
+ else
+ return 1 << (i + 14);
+}
+
+static inline u_int32_t gpo_o_bit(int i)
+{
+ if (i < 11)
+ return 1 << i;
+ else
+ return 1 << (i + 14);
+}
+
+static inline u_int32_t gpio_i_bit(int i)
+{
+ if (i < 14)
+ return 1 << (i + 10);
+ else
+ return 1 << (i + 14);
+}
+
+static inline u_int32_t gpio_o_bit(int i)
+{
+ if (i < 14)
+ return 1 << (i + 11);
+ else
+ return 1 << (i + 13);
+}
+
+/* Mapping betwee numeric GPIO ID and the actual GPIO hardware numbering:
+ * 0..13 GPI 0..13
+ * 14..26 GPO 0..12
+ * 27..41 GPIO 0..14
+ */
+
+static int vx855gpio_direction_input(struct gpio_chip *gpio,
+ unsigned int nr)
+{
+ struct vx855_gpio *vg = container_of(gpio, struct vx855_gpio, gpio);
+ unsigned long flags;
+ u_int32_t reg_out;
+
+ /* Real GPI bits are always in input direction */
+ if (nr < NR_VX855_GPI)
+ return 0;
+
+ /* Real GPO bits cannot be put in output direction */
+ if (nr < NR_VX855_GPInO)
+ return -EINVAL;
+
+ /* Open Drain GPIO have to be set to one */
+ spin_lock_irqsave(&vg->lock, flags);
+ reg_out = inl(vg->io_gpo);
+ reg_out |= gpio_o_bit(nr - NR_VX855_GPInO);
+ outl(reg_out, vg->io_gpo);
+ spin_unlock_irqrestore(&vg->lock, flags);
+
+ return 0;
+}
+
+static int vx855gpio_get(struct gpio_chip *gpio, unsigned int nr)
+{
+ struct vx855_gpio *vg = container_of(gpio, struct vx855_gpio, gpio);
+ u_int32_t reg_in;
+ int ret = 0;
+
+ if (nr < NR_VX855_GPI) {
+ reg_in = inl(vg->io_gpi);
+ if (reg_in & gpi_i_bit(nr))
+ ret = 1;
+ } else if (nr < NR_VX855_GPInO) {
+ /* GPO don't have an input bit, we need to read it
+ * back from the output register */
+ reg_in = inl(vg->io_gpo);
+ if (reg_in & gpo_o_bit(nr - NR_VX855_GPI))
+ ret = 1;
+ } else {
+ reg_in = inl(vg->io_gpi);
+ if (reg_in & gpio_i_bit(nr - NR_VX855_GPInO))
+ ret = 1;
+ }
+
+ return ret;
+}
+
+static void vx855gpio_set(struct gpio_chip *gpio, unsigned int nr,
+ int val)
+{
+ struct vx855_gpio *vg = container_of(gpio, struct vx855_gpio, gpio);
+ unsigned long flags;
+ u_int32_t reg_out;
+
+ /* True GPI cannot be switched to output mode */
+ if (nr < NR_VX855_GPI)
+ return;
+
+ spin_lock_irqsave(&vg->lock, flags);
+ reg_out = inl(vg->io_gpo);
+ if (nr < NR_VX855_GPInO) {
+ if (val)
+ reg_out |= gpo_o_bit(nr - NR_VX855_GPI);
+ else
+ reg_out &= ~gpo_o_bit(nr - NR_VX855_GPI);
+ } else {
+ if (val)
+ reg_out |= gpio_o_bit(nr - NR_VX855_GPInO);
+ else
+ reg_out &= ~gpio_o_bit(nr - NR_VX855_GPInO);
+ }
+ outl(reg_out, vg->io_gpo);
+ spin_unlock_irqrestore(&vg->lock, flags);
+}
+
+static int vx855gpio_direction_output(struct gpio_chip *gpio,
+ unsigned int nr, int val)
+{
+ /* True GPI cannot be switched to output mode */
+ if (nr < NR_VX855_GPI)
+ return -EINVAL;
+
+ /* True GPO don't need to be switched to output mode,
+ * and GPIO are open-drain, i.e. also need no switching,
+ * so all we do is set the level */
+ vx855gpio_set(gpio, nr, val);
+
+ return 0;
+}
+
+static const char *vx855gpio_names[NR_VX855_GP] = {
+ "VX855_GPI0", "VX855_GPI1", "VX855_GPI2", "VX855_GPI3", "VX855_GPI4",
+ "VX855_GPI5", "VX855_GPI6", "VX855_GPI7", "VX855_GPI8", "VX855_GPI9",
+ "VX855_GPI10", "VX855_GPI11", "VX855_GPI12", "VX855_GPI13",
+ "VX855_GPO0", "VX855_GPO1", "VX855_GPO2", "VX855_GPO3", "VX855_GPO4",
+ "VX855_GPO5", "VX855_GPO6", "VX855_GPO7", "VX855_GPO8", "VX855_GPO9",
+ "VX855_GPO10", "VX855_GPO11", "VX855_GPO12",
+ "VX855_GPIO0", "VX855_GPIO1", "VX855_GPIO2", "VX855_GPIO3",
+ "VX855_GPIO4", "VX855_GPIO5", "VX855_GPIO6", "VX855_GPIO7",
+ "VX855_GPIO8", "VX855_GPIO9", "VX855_GPIO10", "VX855_GPIO11",
+ "VX855_GPIO12", "VX855_GPIO13", "VX855_GPIO14"
+};
+
+static void vx855gpio_gpio_setup(struct vx855_gpio *vg)
+{
+ struct gpio_chip *c = &vg->gpio;
+
+ c->label = "VX855 South Bridge";
+ c->owner = THIS_MODULE;
+ c->direction_input = vx855gpio_direction_input;
+ c->direction_output = vx855gpio_direction_output;
+ c->get = vx855gpio_get;
+ c->set = vx855gpio_set;
+ c->dbg_show = NULL;
+ c->base = 0;
+ c->ngpio = NR_VX855_GP;
+ c->can_sleep = 0;
+ c->names = vx855gpio_names;
+}
+
+/* This platform device is ordinarily registered by the vx855 mfd driver */
+static __devinit int vx855gpio_probe(struct platform_device *pdev)
+{
+ struct resource *res_gpi;
+ struct resource *res_gpo;
+ struct vx855_gpio *vg;
+ int ret;
+
+ res_gpi = platform_get_resource(pdev, IORESOURCE_IO, 0);
+ res_gpo = platform_get_resource(pdev, IORESOURCE_IO, 1);
+ if (!res_gpi || !res_gpo)
+ return -EBUSY;
+
+ vg = kzalloc(sizeof(*vg), GFP_KERNEL);
+ if (!vg)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, vg);
+
+ dev_info(&pdev->dev, "found VX855 GPIO controller\n");
+ vg->io_gpi = res_gpi->start;
+ vg->io_gpo = res_gpo->start;
+ spin_lock_init(&vg->lock);
+
+ /*
+ * A single byte is used to control various GPIO ports on the VX855,
+ * and in the case of the OLPC XO-1.5, some of those ports are used
+ * for switches that are interpreted and exposed through ACPI. ACPI
+ * will have reserved the region, so our own reservation will not
+ * succeed. Ignore and continue.
+ */
+
+ if (!request_region(res_gpi->start, resource_size(res_gpi),
+ MODULE_NAME "_gpi"))
+ dev_warn(&pdev->dev,
+ "GPI I/O resource busy, probably claimed by ACPI\n");
+ else
+ vg->gpi_reserved = true;
+
+ if (!request_region(res_gpo->start, resource_size(res_gpo),
+ MODULE_NAME "_gpo"))
+ dev_warn(&pdev->dev,
+ "GPO I/O resource busy, probably claimed by ACPI\n");
+ else
+ vg->gpo_reserved = true;
+
+ vx855gpio_gpio_setup(vg);
+
+ ret = gpiochip_add(&vg->gpio);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to register GPIOs\n");
+ goto out_release;
+ }
+
+ return 0;
+
+out_release:
+ if (vg->gpi_reserved)
+ release_region(res_gpi->start, resource_size(res_gpi));
+ if (vg->gpo_reserved)
+ release_region(res_gpi->start, resource_size(res_gpo));
+ platform_set_drvdata(pdev, NULL);
+ kfree(vg);
+ return ret;
+}
+
+static int __devexit vx855gpio_remove(struct platform_device *pdev)
+{
+ struct vx855_gpio *vg = platform_get_drvdata(pdev);
+ struct resource *res;
+
+ if (gpiochip_remove(&vg->gpio))
+ dev_err(&pdev->dev, "unable to remove gpio_chip?\n");
+
+ if (vg->gpi_reserved) {
+ res = platform_get_resource(pdev, IORESOURCE_IO, 0);
+ release_region(res->start, resource_size(res));
+ }
+ if (vg->gpo_reserved) {
+ res = platform_get_resource(pdev, IORESOURCE_IO, 1);
+ release_region(res->start, resource_size(res));
+ }
+
+ platform_set_drvdata(pdev, NULL);
+ kfree(vg);
+ return 0;
+}
+
+static struct platform_driver vx855gpio_driver = {
+ .driver = {
+ .name = MODULE_NAME,
+ .owner = THIS_MODULE,
+ },
+ .probe = vx855gpio_probe,
+ .remove = __devexit_p(vx855gpio_remove),
+};
+
+static int vx855gpio_init(void)
+{
+ return platform_driver_register(&vx855gpio_driver);
+}
+module_init(vx855gpio_init);
+
+static void vx855gpio_exit(void)
+{
+ platform_driver_unregister(&vx855gpio_driver);
+}
+module_exit(vx855gpio_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Harald Welte <HaraldWelte@viatech.com>");
+MODULE_DESCRIPTION("GPIO driver for the VIA VX855 chipset");
+MODULE_ALIAS("platform:vx855_gpio");
diff --git a/drivers/gpio/wm8994-gpio.c b/drivers/gpio/wm8994-gpio.c
index 2ac9a16d3daa..618398e4ed8e 100644
--- a/drivers/gpio/wm8994-gpio.c
+++ b/drivers/gpio/wm8994-gpio.c
@@ -140,6 +140,7 @@ static struct gpio_chip template_chip = {
.get = wm8994_gpio_get,
.direction_output = wm8994_gpio_direction_out,
.set = wm8994_gpio_set,
+ .to_irq = wm8994_gpio_to_irq,
.dbg_show = wm8994_gpio_dbg_show,
.can_sleep = 1,
};
diff --git a/drivers/gpio/xilinx_gpio.c b/drivers/gpio/xilinx_gpio.c
index 709690995d0d..846fbd5e31bf 100644
--- a/drivers/gpio/xilinx_gpio.c
+++ b/drivers/gpio/xilinx_gpio.c
@@ -171,13 +171,13 @@ static int __devinit xgpio_of_probe(struct device_node *np)
/* Update GPIO state shadow register with default value */
tree_info = of_get_property(np, "xlnx,dout-default", NULL);
if (tree_info)
- chip->gpio_state = *tree_info;
+ chip->gpio_state = be32_to_cpup(tree_info);
/* Update GPIO direction shadow register with default value */
chip->gpio_dir = 0xFFFFFFFF; /* By default, all pins are inputs */
tree_info = of_get_property(np, "xlnx,tri-default", NULL);
if (tree_info)
- chip->gpio_dir = *tree_info;
+ chip->gpio_dir = be32_to_cpup(tree_info);
/* Check device node and parent device node for device width */
chip->mmchip.gc.ngpio = 32; /* By default assume full GPIO controller */
@@ -186,7 +186,7 @@ static int __devinit xgpio_of_probe(struct device_node *np)
tree_info = of_get_property(np->parent,
"xlnx,gpio-width", NULL);
if (tree_info)
- chip->mmchip.gc.ngpio = *tree_info;
+ chip->mmchip.gc.ngpio = be32_to_cpup(tree_info);
spin_lock_init(&chip->gpio_lock);
diff --git a/drivers/gpu/Makefile b/drivers/gpu/Makefile
index 30879df3daea..cc9277885dd0 100644
--- a/drivers/gpu/Makefile
+++ b/drivers/gpu/Makefile
@@ -1 +1 @@
-obj-y += drm/ vga/
+obj-y += drm/ vga/ stub/
diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile
index f3a23a329f4e..997c43d04909 100644
--- a/drivers/gpu/drm/Makefile
+++ b/drivers/gpu/drm/Makefile
@@ -5,7 +5,7 @@
ccflags-y := -Iinclude/drm
drm-y := drm_auth.o drm_buffer.o drm_bufs.o drm_cache.o \
- drm_context.o drm_dma.o drm_drawable.o \
+ drm_context.o drm_dma.o \
drm_drv.o drm_fops.o drm_gem.o drm_ioctl.o drm_irq.o \
drm_lock.o drm_memory.o drm_proc.o drm_stub.o drm_vm.o \
drm_agpsupport.o drm_scatter.o ati_pcigart.o drm_pci.o \
diff --git a/drivers/gpu/drm/drm_agpsupport.c b/drivers/gpu/drm/drm_agpsupport.c
index ba38e0147220..252fdb98b73a 100644
--- a/drivers/gpu/drm/drm_agpsupport.c
+++ b/drivers/gpu/drm/drm_agpsupport.c
@@ -193,7 +193,7 @@ int drm_agp_enable_ioctl(struct drm_device *dev, void *data,
* \return zero on success or a negative number on failure.
*
* Verifies the AGP device is present and has been acquired, allocates the
- * memory via alloc_agp() and creates a drm_agp_mem entry for it.
+ * memory via agp_allocate_memory() and creates a drm_agp_mem entry for it.
*/
int drm_agp_alloc(struct drm_device *dev, struct drm_agp_buffer *request)
{
@@ -211,7 +211,7 @@ int drm_agp_alloc(struct drm_device *dev, struct drm_agp_buffer *request)
pages = (request->size + PAGE_SIZE - 1) / PAGE_SIZE;
type = (u32) request->type;
- if (!(memory = drm_alloc_agp(dev, pages, type))) {
+ if (!(memory = agp_allocate_memory(dev->agp->bridge, pages, type))) {
kfree(entry);
return -ENOMEM;
}
@@ -423,38 +423,6 @@ struct drm_agp_head *drm_agp_init(struct drm_device *dev)
return head;
}
-/** Calls agp_allocate_memory() */
-DRM_AGP_MEM *drm_agp_allocate_memory(struct agp_bridge_data * bridge,
- size_t pages, u32 type)
-{
- return agp_allocate_memory(bridge, pages, type);
-}
-
-/** Calls agp_free_memory() */
-int drm_agp_free_memory(DRM_AGP_MEM * handle)
-{
- if (!handle)
- return 0;
- agp_free_memory(handle);
- return 1;
-}
-
-/** Calls agp_bind_memory() */
-int drm_agp_bind_memory(DRM_AGP_MEM * handle, off_t start)
-{
- if (!handle)
- return -EINVAL;
- return agp_bind_memory(handle, start);
-}
-
-/** Calls agp_unbind_memory() */
-int drm_agp_unbind_memory(DRM_AGP_MEM * handle)
-{
- if (!handle)
- return -EINVAL;
- return agp_unbind_memory(handle);
-}
-
/**
* Binds a collection of pages into AGP memory at the given offset, returning
* the AGP memory structure containing them.
@@ -474,7 +442,7 @@ drm_agp_bind_pages(struct drm_device *dev,
DRM_DEBUG("\n");
- mem = drm_agp_allocate_memory(dev->agp->bridge, num_pages,
+ mem = agp_allocate_memory(dev->agp->bridge, num_pages,
type);
if (mem == NULL) {
DRM_ERROR("Failed to allocate memory for %ld pages\n",
@@ -487,7 +455,7 @@ drm_agp_bind_pages(struct drm_device *dev,
mem->page_count = num_pages;
mem->is_flushed = true;
- ret = drm_agp_bind_memory(mem, gtt_offset / PAGE_SIZE);
+ ret = agp_bind_memory(mem, gtt_offset / PAGE_SIZE);
if (ret != 0) {
DRM_ERROR("Failed to bind AGP memory: %d\n", ret);
agp_free_memory(mem);
diff --git a/drivers/gpu/drm/drm_context.c b/drivers/gpu/drm/drm_context.c
index 2607753a320b..6d440fb894cf 100644
--- a/drivers/gpu/drm/drm_context.c
+++ b/drivers/gpu/drm/drm_context.c
@@ -333,14 +333,6 @@ int drm_addctx(struct drm_device *dev, void *data,
return -ENOMEM;
}
- if (ctx->handle != DRM_KERNEL_CONTEXT) {
- if (dev->driver->context_ctor)
- if (!dev->driver->context_ctor(dev, ctx->handle)) {
- DRM_DEBUG("Running out of ctxs or memory.\n");
- return -ENOMEM;
- }
- }
-
ctx_entry = kmalloc(sizeof(*ctx_entry), GFP_KERNEL);
if (!ctx_entry) {
DRM_DEBUG("out of memory\n");
diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
index 37e0b4fa482a..6985cb1da72c 100644
--- a/drivers/gpu/drm/drm_crtc.c
+++ b/drivers/gpu/drm/drm_crtc.c
@@ -1854,7 +1854,8 @@ int drm_mode_dirtyfb_ioctl(struct drm_device *dev,
}
if (fb->funcs->dirty) {
- ret = fb->funcs->dirty(fb, flags, r->color, clips, num_clips);
+ ret = fb->funcs->dirty(fb, file_priv, flags, r->color,
+ clips, num_clips);
} else {
ret = -ENOSYS;
goto out_err2;
diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c
index dcbeb98f195a..bede10a03407 100644
--- a/drivers/gpu/drm/drm_crtc_helper.c
+++ b/drivers/gpu/drm/drm_crtc_helper.c
@@ -241,7 +241,7 @@ void drm_helper_disable_unused_functions(struct drm_device *dev)
}
list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
- if (!drm_helper_encoder_in_use(encoder)) {
+ if (encoder->crtc && !drm_helper_encoder_in_use(encoder)) {
drm_encoder_disable(encoder);
/* disconnector encoder from any connector */
encoder->crtc = NULL;
@@ -276,7 +276,7 @@ static bool drm_encoder_crtc_ok(struct drm_encoder *encoder,
struct drm_crtc *tmp;
int crtc_mask = 1;
- WARN(!crtc, "checking null crtc?");
+ WARN(!crtc, "checking null crtc?\n");
dev = crtc->dev;
@@ -471,6 +471,7 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set)
int count = 0, ro, fail = 0;
struct drm_crtc_helper_funcs *crtc_funcs;
int ret = 0;
+ int i;
DRM_DEBUG_KMS("\n");
@@ -666,6 +667,12 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set)
if (ret != 0)
goto fail;
}
+ DRM_DEBUG_KMS("Setting connector DPMS state to on\n");
+ for (i = 0; i < set->num_connectors; i++) {
+ DRM_DEBUG_KMS("\t[CONNECTOR:%d:%s] set DPMS on\n", set->connectors[i]->base.id,
+ drm_get_connector_name(set->connectors[i]));
+ set->connectors[i]->dpms = DRM_MODE_DPMS_ON;
+ }
kfree(save_connectors);
kfree(save_encoders);
@@ -841,7 +848,7 @@ static void output_poll_execute(struct work_struct *work)
struct delayed_work *delayed_work = to_delayed_work(work);
struct drm_device *dev = container_of(delayed_work, struct drm_device, mode_config.output_poll_work);
struct drm_connector *connector;
- enum drm_connector_status old_status, status;
+ enum drm_connector_status old_status;
bool repoll = false, changed = false;
if (!drm_kms_helper_poll)
@@ -866,8 +873,9 @@ static void output_poll_execute(struct work_struct *work)
!(connector->polled & DRM_CONNECTOR_POLL_HPD))
continue;
- status = connector->funcs->detect(connector, false);
- if (old_status != status)
+ connector->status = connector->funcs->detect(connector, false);
+ DRM_DEBUG_KMS("connector status updated to %d\n", connector->status);
+ if (old_status != connector->status)
changed = true;
}
diff --git a/drivers/gpu/drm/drm_debugfs.c b/drivers/gpu/drm/drm_debugfs.c
index 677b275fa721..9d8c892d07c9 100644
--- a/drivers/gpu/drm/drm_debugfs.c
+++ b/drivers/gpu/drm/drm_debugfs.c
@@ -48,7 +48,6 @@ static struct drm_info_list drm_debugfs_list[] = {
{"queues", drm_queues_info, 0},
{"bufs", drm_bufs_info, 0},
{"gem_names", drm_gem_name_info, DRIVER_GEM},
- {"gem_objects", drm_gem_object_info, DRIVER_GEM},
#if DRM_DEBUG_CODE
{"vma", drm_vma_info, 0},
#endif
diff --git a/drivers/gpu/drm/drm_drawable.c b/drivers/gpu/drm/drm_drawable.c
deleted file mode 100644
index c53c9768cc11..000000000000
--- a/drivers/gpu/drm/drm_drawable.c
+++ /dev/null
@@ -1,198 +0,0 @@
-/**
- * \file drm_drawable.c
- * IOCTLs for drawables
- *
- * \author Rickard E. (Rik) Faith <faith@valinux.com>
- * \author Gareth Hughes <gareth@valinux.com>
- * \author Michel Dänzer <michel@tungstengraphics.com>
- */
-
-/*
- * Created: Tue Feb 2 08:37:54 1999 by faith@valinux.com
- *
- * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
- * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
- * Copyright 2006 Tungsten Graphics, Inc., Bismarck, North Dakota.
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- */
-
-#include "drmP.h"
-
-/**
- * Allocate drawable ID and memory to store information about it.
- */
-int drm_adddraw(struct drm_device *dev, void *data, struct drm_file *file_priv)
-{
- unsigned long irqflags;
- struct drm_draw *draw = data;
- int new_id = 0;
- int ret;
-
-again:
- if (idr_pre_get(&dev->drw_idr, GFP_KERNEL) == 0) {
- DRM_ERROR("Out of memory expanding drawable idr\n");
- return -ENOMEM;
- }
-
- spin_lock_irqsave(&dev->drw_lock, irqflags);
- ret = idr_get_new_above(&dev->drw_idr, NULL, 1, &new_id);
- if (ret == -EAGAIN) {
- spin_unlock_irqrestore(&dev->drw_lock, irqflags);
- goto again;
- }
-
- spin_unlock_irqrestore(&dev->drw_lock, irqflags);
-
- draw->handle = new_id;
-
- DRM_DEBUG("%d\n", draw->handle);
-
- return 0;
-}
-
-/**
- * Free drawable ID and memory to store information about it.
- */
-int drm_rmdraw(struct drm_device *dev, void *data, struct drm_file *file_priv)
-{
- struct drm_draw *draw = data;
- unsigned long irqflags;
- struct drm_drawable_info *info;
-
- spin_lock_irqsave(&dev->drw_lock, irqflags);
-
- info = drm_get_drawable_info(dev, draw->handle);
- if (info == NULL) {
- spin_unlock_irqrestore(&dev->drw_lock, irqflags);
- return -EINVAL;
- }
- kfree(info->rects);
- kfree(info);
-
- idr_remove(&dev->drw_idr, draw->handle);
-
- spin_unlock_irqrestore(&dev->drw_lock, irqflags);
- DRM_DEBUG("%d\n", draw->handle);
- return 0;
-}
-
-int drm_update_drawable_info(struct drm_device *dev, void *data, struct drm_file *file_priv)
-{
- struct drm_update_draw *update = data;
- unsigned long irqflags;
- struct drm_clip_rect *rects;
- struct drm_drawable_info *info;
- int err;
-
- info = idr_find(&dev->drw_idr, update->handle);
- if (!info) {
- info = kzalloc(sizeof(*info), GFP_KERNEL);
- if (!info)
- return -ENOMEM;
- if (IS_ERR(idr_replace(&dev->drw_idr, info, update->handle))) {
- DRM_ERROR("No such drawable %d\n", update->handle);
- kfree(info);
- return -EINVAL;
- }
- }
-
- switch (update->type) {
- case DRM_DRAWABLE_CLIPRECTS:
- if (update->num == 0)
- rects = NULL;
- else if (update->num != info->num_rects) {
- rects = kmalloc(update->num *
- sizeof(struct drm_clip_rect),
- GFP_KERNEL);
- } else
- rects = info->rects;
-
- if (update->num && !rects) {
- DRM_ERROR("Failed to allocate cliprect memory\n");
- err = -ENOMEM;
- goto error;
- }
-
- if (update->num && DRM_COPY_FROM_USER(rects,
- (struct drm_clip_rect __user *)
- (unsigned long)update->data,
- update->num *
- sizeof(*rects))) {
- DRM_ERROR("Failed to copy cliprects from userspace\n");
- err = -EFAULT;
- goto error;
- }
-
- spin_lock_irqsave(&dev->drw_lock, irqflags);
-
- if (rects != info->rects) {
- kfree(info->rects);
- }
-
- info->rects = rects;
- info->num_rects = update->num;
-
- spin_unlock_irqrestore(&dev->drw_lock, irqflags);
-
- DRM_DEBUG("Updated %d cliprects for drawable %d\n",
- info->num_rects, update->handle);
- break;
- default:
- DRM_ERROR("Invalid update type %d\n", update->type);
- return -EINVAL;
- }
-
- return 0;
-
-error:
- if (rects != info->rects)
- kfree(rects);
-
- return err;
-}
-
-/**
- * Caller must hold the drawable spinlock!
- */
-struct drm_drawable_info *drm_get_drawable_info(struct drm_device *dev, drm_drawable_t id)
-{
- return idr_find(&dev->drw_idr, id);
-}
-EXPORT_SYMBOL(drm_get_drawable_info);
-
-static int drm_drawable_free(int idr, void *p, void *data)
-{
- struct drm_drawable_info *info = p;
-
- if (info) {
- kfree(info->rects);
- kfree(info);
- }
-
- return 0;
-}
-
-void drm_drawable_free_all(struct drm_device *dev)
-{
- idr_for_each(&dev->drw_idr, drm_drawable_free, NULL);
- idr_remove_all(&dev->drw_idr);
-}
diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
index ff6690f4fc87..271835a71570 100644
--- a/drivers/gpu/drm/drm_drv.c
+++ b/drivers/gpu/drm/drm_drv.c
@@ -91,8 +91,8 @@ static struct drm_ioctl_desc drm_ioctls[] = {
DRM_IOCTL_DEF(DRM_IOCTL_NEW_CTX, drm_newctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
DRM_IOCTL_DEF(DRM_IOCTL_RES_CTX, drm_resctx, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_IOCTL_ADD_DRAW, drm_adddraw, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
- DRM_IOCTL_DEF(DRM_IOCTL_RM_DRAW, drm_rmdraw, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF(DRM_IOCTL_ADD_DRAW, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF(DRM_IOCTL_RM_DRAW, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
DRM_IOCTL_DEF(DRM_IOCTL_LOCK, drm_lock, DRM_AUTH),
DRM_IOCTL_DEF(DRM_IOCTL_UNLOCK, drm_unlock, DRM_AUTH),
@@ -127,7 +127,7 @@ static struct drm_ioctl_desc drm_ioctls[] = {
DRM_IOCTL_DEF(DRM_IOCTL_MODESET_CTL, drm_modeset_ctl, 0),
- DRM_IOCTL_DEF(DRM_IOCTL_UPDATE_DRAW, drm_update_drawable_info, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF(DRM_IOCTL_UPDATE_DRAW, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
DRM_IOCTL_DEF(DRM_IOCTL_GEM_CLOSE, drm_gem_close_ioctl, DRM_UNLOCKED),
DRM_IOCTL_DEF(DRM_IOCTL_GEM_FLINK, drm_gem_flink_ioctl, DRM_AUTH|DRM_UNLOCKED),
@@ -180,10 +180,6 @@ int drm_lastclose(struct drm_device * dev)
mutex_lock(&dev->struct_mutex);
- /* Free drawable information memory */
- drm_drawable_free_all(dev);
- del_timer(&dev->timer);
-
/* Clear AGP information */
if (drm_core_has_AGP(dev) && dev->agp &&
!drm_core_check_feature(dev, DRIVER_MODESET)) {
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
index 96e963108225..a245d17165ae 100644
--- a/drivers/gpu/drm/drm_edid.c
+++ b/drivers/gpu/drm/drm_edid.c
@@ -30,7 +30,6 @@
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/i2c.h>
-#include <linux/i2c-algo-bit.h>
#include "drmP.h"
#include "drm_edid.h"
#include "drm_edid_modes.h"
@@ -241,7 +240,7 @@ drm_do_probe_ddc_edid(struct i2c_adapter *adapter, unsigned char *buf,
.addr = DDC_ADDR,
.flags = I2C_M_RD,
.len = len,
- .buf = buf + start,
+ .buf = buf,
}
};
@@ -254,7 +253,7 @@ drm_do_probe_ddc_edid(struct i2c_adapter *adapter, unsigned char *buf,
static u8 *
drm_do_get_edid(struct drm_connector *connector, struct i2c_adapter *adapter)
{
- int i, j = 0;
+ int i, j = 0, valid_extensions = 0;
u8 *block, *new;
if ((block = kmalloc(EDID_LENGTH, GFP_KERNEL)) == NULL)
@@ -281,14 +280,28 @@ drm_do_get_edid(struct drm_connector *connector, struct i2c_adapter *adapter)
for (j = 1; j <= block[0x7e]; j++) {
for (i = 0; i < 4; i++) {
- if (drm_do_probe_ddc_edid(adapter, block, j,
- EDID_LENGTH))
+ if (drm_do_probe_ddc_edid(adapter,
+ block + (valid_extensions + 1) * EDID_LENGTH,
+ j, EDID_LENGTH))
goto out;
- if (drm_edid_block_valid(block + j * EDID_LENGTH))
+ if (drm_edid_block_valid(block + (valid_extensions + 1) * EDID_LENGTH)) {
+ valid_extensions++;
break;
+ }
}
if (i == 4)
- goto carp;
+ dev_warn(connector->dev->dev,
+ "%s: Ignoring invalid EDID block %d.\n",
+ drm_get_connector_name(connector), j);
+ }
+
+ if (valid_extensions != block[0x7e]) {
+ block[EDID_LENGTH-1] += block[0x7e] - valid_extensions;
+ block[0x7e] = valid_extensions;
+ new = krealloc(block, (valid_extensions + 1) * EDID_LENGTH, GFP_KERNEL);
+ if (!new)
+ goto out;
+ block = new;
}
return block;
@@ -1268,34 +1281,51 @@ add_detailed_modes(struct drm_connector *connector, struct edid *edid,
}
#define HDMI_IDENTIFIER 0x000C03
+#define AUDIO_BLOCK 0x01
#define VENDOR_BLOCK 0x03
+#define EDID_BASIC_AUDIO (1 << 6)
+
/**
- * drm_detect_hdmi_monitor - detect whether monitor is hdmi.
- * @edid: monitor EDID information
- *
- * Parse the CEA extension according to CEA-861-B.
- * Return true if HDMI, false if not or unknown.
+ * Search EDID for CEA extension block.
*/
-bool drm_detect_hdmi_monitor(struct edid *edid)
+static u8 *drm_find_cea_extension(struct edid *edid)
{
- char *edid_ext = NULL;
- int i, hdmi_id;
- int start_offset, end_offset;
- bool is_hdmi = false;
+ u8 *edid_ext = NULL;
+ int i;
/* No EDID or EDID extensions */
if (edid == NULL || edid->extensions == 0)
- goto end;
+ return NULL;
/* Find CEA extension */
for (i = 0; i < edid->extensions; i++) {
- edid_ext = (char *)edid + EDID_LENGTH * (i + 1);
- /* This block is CEA extension */
- if (edid_ext[0] == 0x02)
+ edid_ext = (u8 *)edid + EDID_LENGTH * (i + 1);
+ if (edid_ext[0] == CEA_EXT)
break;
}
if (i == edid->extensions)
+ return NULL;
+
+ return edid_ext;
+}
+
+/**
+ * drm_detect_hdmi_monitor - detect whether monitor is hdmi.
+ * @edid: monitor EDID information
+ *
+ * Parse the CEA extension according to CEA-861-B.
+ * Return true if HDMI, false if not or unknown.
+ */
+bool drm_detect_hdmi_monitor(struct edid *edid)
+{
+ u8 *edid_ext;
+ int i, hdmi_id;
+ int start_offset, end_offset;
+ bool is_hdmi = false;
+
+ edid_ext = drm_find_cea_extension(edid);
+ if (!edid_ext)
goto end;
/* Data block offset in CEA extension block */
@@ -1326,6 +1356,53 @@ end:
EXPORT_SYMBOL(drm_detect_hdmi_monitor);
/**
+ * drm_detect_monitor_audio - check monitor audio capability
+ *
+ * Monitor should have CEA extension block.
+ * If monitor has 'basic audio', but no CEA audio blocks, it's 'basic
+ * audio' only. If there is any audio extension block and supported
+ * audio format, assume at least 'basic audio' support, even if 'basic
+ * audio' is not defined in EDID.
+ *
+ */
+bool drm_detect_monitor_audio(struct edid *edid)
+{
+ u8 *edid_ext;
+ int i, j;
+ bool has_audio = false;
+ int start_offset, end_offset;
+
+ edid_ext = drm_find_cea_extension(edid);
+ if (!edid_ext)
+ goto end;
+
+ has_audio = ((edid_ext[3] & EDID_BASIC_AUDIO) != 0);
+
+ if (has_audio) {
+ DRM_DEBUG_KMS("Monitor has basic audio support\n");
+ goto end;
+ }
+
+ /* Data block offset in CEA extension block */
+ start_offset = 4;
+ end_offset = edid_ext[2];
+
+ for (i = start_offset; i < end_offset;
+ i += ((edid_ext[i] & 0x1f) + 1)) {
+ if ((edid_ext[i] >> 5) == AUDIO_BLOCK) {
+ has_audio = true;
+ for (j = 1; j < (edid_ext[i] & 0x1f); j += 3)
+ DRM_DEBUG_KMS("CEA audio format %d\n",
+ (edid_ext[i + j] >> 3) & 0xf);
+ goto end;
+ }
+ }
+end:
+ return has_audio;
+}
+EXPORT_SYMBOL(drm_detect_monitor_audio);
+
+/**
* drm_add_edid_modes - add modes from EDID data, if available
* @connector: connector we're probing
* @edid: edid data
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
index 6a5e403f9aa1..d2849e4ea4d0 100644
--- a/drivers/gpu/drm/drm_fb_helper.c
+++ b/drivers/gpu/drm/drm_fb_helper.c
@@ -242,6 +242,30 @@ static int drm_fb_helper_parse_command_line(struct drm_fb_helper *fb_helper)
return 0;
}
+static void drm_fb_helper_save_lut_atomic(struct drm_crtc *crtc, struct drm_fb_helper *helper)
+{
+ uint16_t *r_base, *g_base, *b_base;
+ int i;
+
+ r_base = crtc->gamma_store;
+ g_base = r_base + crtc->gamma_size;
+ b_base = g_base + crtc->gamma_size;
+
+ for (i = 0; i < crtc->gamma_size; i++)
+ helper->funcs->gamma_get(crtc, &r_base[i], &g_base[i], &b_base[i], i);
+}
+
+static void drm_fb_helper_restore_lut_atomic(struct drm_crtc *crtc)
+{
+ uint16_t *r_base, *g_base, *b_base;
+
+ r_base = crtc->gamma_store;
+ g_base = r_base + crtc->gamma_size;
+ b_base = g_base + crtc->gamma_size;
+
+ crtc->funcs->gamma_set(crtc, r_base, g_base, b_base, 0, crtc->gamma_size);
+}
+
int drm_fb_helper_debug_enter(struct fb_info *info)
{
struct drm_fb_helper *helper = info->par;
@@ -260,11 +284,12 @@ int drm_fb_helper_debug_enter(struct fb_info *info)
continue;
funcs = mode_set->crtc->helper_private;
+ drm_fb_helper_save_lut_atomic(mode_set->crtc, helper);
funcs->mode_set_base_atomic(mode_set->crtc,
mode_set->fb,
mode_set->x,
- mode_set->y);
-
+ mode_set->y,
+ ENTER_ATOMIC_MODE_SET);
}
}
@@ -308,8 +333,9 @@ int drm_fb_helper_debug_leave(struct fb_info *info)
continue;
}
+ drm_fb_helper_restore_lut_atomic(mode_set->crtc);
funcs->mode_set_base_atomic(mode_set->crtc, fb, crtc->x,
- crtc->y);
+ crtc->y, LEAVE_ATOMIC_MODE_SET);
}
return 0;
diff --git a/drivers/gpu/drm/drm_fops.c b/drivers/gpu/drm/drm_fops.c
index b744dad5c237..a39794bac04b 100644
--- a/drivers/gpu/drm/drm_fops.c
+++ b/drivers/gpu/drm/drm_fops.c
@@ -37,7 +37,6 @@
#include "drmP.h"
#include <linux/poll.h>
#include <linux/slab.h>
-#include <linux/smp_lock.h>
/* from BKL pushdown: note that nothing else serializes idr_find() */
DEFINE_MUTEX(drm_global_mutex);
diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c
index 5663d2719063..ea1c4b019ebf 100644
--- a/drivers/gpu/drm/drm_gem.c
+++ b/drivers/gpu/drm/drm_gem.c
@@ -92,12 +92,6 @@ drm_gem_init(struct drm_device *dev)
spin_lock_init(&dev->object_name_lock);
idr_init(&dev->object_name_idr);
- atomic_set(&dev->object_count, 0);
- atomic_set(&dev->object_memory, 0);
- atomic_set(&dev->pin_count, 0);
- atomic_set(&dev->pin_memory, 0);
- atomic_set(&dev->gtt_count, 0);
- atomic_set(&dev->gtt_memory, 0);
mm = kzalloc(sizeof(struct drm_gem_mm), GFP_KERNEL);
if (!mm) {
@@ -151,9 +145,6 @@ int drm_gem_object_init(struct drm_device *dev,
atomic_set(&obj->handle_count, 0);
obj->size = size;
- atomic_inc(&dev->object_count);
- atomic_add(obj->size, &dev->object_memory);
-
return 0;
}
EXPORT_SYMBOL(drm_gem_object_init);
@@ -180,8 +171,6 @@ drm_gem_object_alloc(struct drm_device *dev, size_t size)
return obj;
fput:
/* Object_init mangles the global counters - readjust them. */
- atomic_dec(&dev->object_count);
- atomic_sub(obj->size, &dev->object_memory);
fput(obj->filp);
free:
kfree(obj);
@@ -436,10 +425,7 @@ drm_gem_release(struct drm_device *dev, struct drm_file *file_private)
void
drm_gem_object_release(struct drm_gem_object *obj)
{
- struct drm_device *dev = obj->dev;
fput(obj->filp);
- atomic_dec(&dev->object_count);
- atomic_sub(obj->size, &dev->object_memory);
}
EXPORT_SYMBOL(drm_gem_object_release);
diff --git a/drivers/gpu/drm/drm_info.c b/drivers/gpu/drm/drm_info.c
index 974e970ce3f8..3cdbaf379bb5 100644
--- a/drivers/gpu/drm/drm_info.c
+++ b/drivers/gpu/drm/drm_info.c
@@ -270,20 +270,6 @@ int drm_gem_name_info(struct seq_file *m, void *data)
return 0;
}
-int drm_gem_object_info(struct seq_file *m, void* data)
-{
- struct drm_info_node *node = (struct drm_info_node *) m->private;
- struct drm_device *dev = node->minor->dev;
-
- seq_printf(m, "%d objects\n", atomic_read(&dev->object_count));
- seq_printf(m, "%d object bytes\n", atomic_read(&dev->object_memory));
- seq_printf(m, "%d pinned\n", atomic_read(&dev->pin_count));
- seq_printf(m, "%d pin bytes\n", atomic_read(&dev->pin_memory));
- seq_printf(m, "%d gtt bytes\n", atomic_read(&dev->gtt_memory));
- seq_printf(m, "%d gtt total\n", dev->gtt_total);
- return 0;
-}
-
#if DRM_DEBUG_CODE
int drm_vma_info(struct seq_file *m, void *data)
diff --git a/drivers/gpu/drm/drm_irq.c b/drivers/gpu/drm/drm_irq.c
index 9d3a5030b6e1..722700d5d73e 100644
--- a/drivers/gpu/drm/drm_irq.c
+++ b/drivers/gpu/drm/drm_irq.c
@@ -585,10 +585,13 @@ static int drm_queue_vblank_event(struct drm_device *dev, int pipe,
struct timeval now;
unsigned long flags;
unsigned int seq;
+ int ret;
e = kzalloc(sizeof *e, GFP_KERNEL);
- if (e == NULL)
- return -ENOMEM;
+ if (e == NULL) {
+ ret = -ENOMEM;
+ goto err_put;
+ }
e->pipe = pipe;
e->base.pid = current->pid;
@@ -603,9 +606,8 @@ static int drm_queue_vblank_event(struct drm_device *dev, int pipe,
spin_lock_irqsave(&dev->event_lock, flags);
if (file_priv->event_space < sizeof e->event) {
- spin_unlock_irqrestore(&dev->event_lock, flags);
- kfree(e);
- return -ENOMEM;
+ ret = -EBUSY;
+ goto err_unlock;
}
file_priv->event_space -= sizeof e->event;
@@ -638,6 +640,13 @@ static int drm_queue_vblank_event(struct drm_device *dev, int pipe,
spin_unlock_irqrestore(&dev->event_lock, flags);
return 0;
+
+err_unlock:
+ spin_unlock_irqrestore(&dev->event_lock, flags);
+ kfree(e);
+err_put:
+ drm_vblank_put(dev, e->pipe);
+ return ret;
}
/**
diff --git a/drivers/gpu/drm/drm_lock.c b/drivers/gpu/drm/drm_lock.c
index 9bf93bc9a32c..632ae243ede0 100644
--- a/drivers/gpu/drm/drm_lock.c
+++ b/drivers/gpu/drm/drm_lock.c
@@ -37,6 +37,8 @@
static int drm_notifier(void *priv);
+static int drm_lock_take(struct drm_lock_data *lock_data, unsigned int context);
+
/**
* Lock ioctl.
*
@@ -124,9 +126,6 @@ int drm_lock(struct drm_device *dev, void *data, struct drm_file *file_priv)
block_all_signals(drm_notifier, &dev->sigdata, &dev->sigmask);
}
- if (dev->driver->dma_ready && (lock->flags & _DRM_LOCK_READY))
- dev->driver->dma_ready(dev);
-
if (dev->driver->dma_quiescent && (lock->flags & _DRM_LOCK_QUIESCENT))
{
if (dev->driver->dma_quiescent(dev)) {
@@ -136,12 +135,6 @@ int drm_lock(struct drm_device *dev, void *data, struct drm_file *file_priv)
}
}
- if (dev->driver->kernel_context_switch &&
- dev->last_context != lock->context) {
- dev->driver->kernel_context_switch(dev, dev->last_context,
- lock->context);
- }
-
return 0;
}
@@ -169,15 +162,8 @@ int drm_unlock(struct drm_device *dev, void *data, struct drm_file *file_priv)
atomic_inc(&dev->counts[_DRM_STAT_UNLOCKS]);
- /* kernel_context_switch isn't used by any of the x86 drm
- * modules but is required by the Sparc driver.
- */
- if (dev->driver->kernel_context_switch_unlock)
- dev->driver->kernel_context_switch_unlock(dev);
- else {
- if (drm_lock_free(&master->lock, lock->context)) {
- /* FIXME: Should really bail out here. */
- }
+ if (drm_lock_free(&master->lock, lock->context)) {
+ /* FIXME: Should really bail out here. */
}
unblock_all_signals();
@@ -193,6 +179,7 @@ int drm_unlock(struct drm_device *dev, void *data, struct drm_file *file_priv)
*
* Attempt to mark the lock as held by the given context, via the \p cmpxchg instruction.
*/
+static
int drm_lock_take(struct drm_lock_data *lock_data,
unsigned int context)
{
@@ -229,7 +216,6 @@ int drm_lock_take(struct drm_lock_data *lock_data,
}
return 0;
}
-EXPORT_SYMBOL(drm_lock_take);
/**
* This takes a lock forcibly and hands it to context. Should ONLY be used
@@ -297,7 +283,6 @@ int drm_lock_free(struct drm_lock_data *lock_data, unsigned int context)
wake_up_interruptible(&lock_data->lock_queue);
return 0;
}
-EXPORT_SYMBOL(drm_lock_free);
/**
* If we get here, it means that the process has called DRM_IOCTL_LOCK
@@ -360,7 +345,6 @@ void drm_idlelock_take(struct drm_lock_data *lock_data)
}
spin_unlock_bh(&lock_data->spinlock);
}
-EXPORT_SYMBOL(drm_idlelock_take);
void drm_idlelock_release(struct drm_lock_data *lock_data)
{
@@ -380,8 +364,6 @@ void drm_idlelock_release(struct drm_lock_data *lock_data)
}
spin_unlock_bh(&lock_data->spinlock);
}
-EXPORT_SYMBOL(drm_idlelock_release);
-
int drm_i_have_hw_lock(struct drm_device *dev, struct drm_file *file_priv)
{
@@ -390,5 +372,3 @@ int drm_i_have_hw_lock(struct drm_device *dev, struct drm_file *file_priv)
_DRM_LOCK_IS_HELD(master->lock.hw_lock->lock) &&
master->lock.file_priv == file_priv);
}
-
-EXPORT_SYMBOL(drm_i_have_hw_lock);
diff --git a/drivers/gpu/drm/drm_memory.c b/drivers/gpu/drm/drm_memory.c
index 7732268eced2..c9b805000a11 100644
--- a/drivers/gpu/drm/drm_memory.c
+++ b/drivers/gpu/drm/drm_memory.c
@@ -99,29 +99,23 @@ static void *agp_remap(unsigned long offset, unsigned long size,
return addr;
}
-/** Wrapper around agp_allocate_memory() */
-DRM_AGP_MEM *drm_alloc_agp(struct drm_device * dev, int pages, u32 type)
-{
- return drm_agp_allocate_memory(dev->agp->bridge, pages, type);
-}
-
/** Wrapper around agp_free_memory() */
-int drm_free_agp(DRM_AGP_MEM * handle, int pages)
+void drm_free_agp(DRM_AGP_MEM * handle, int pages)
{
- return drm_agp_free_memory(handle) ? 0 : -EINVAL;
+ agp_free_memory(handle);
}
EXPORT_SYMBOL(drm_free_agp);
/** Wrapper around agp_bind_memory() */
int drm_bind_agp(DRM_AGP_MEM * handle, unsigned int start)
{
- return drm_agp_bind_memory(handle, start);
+ return agp_bind_memory(handle, start);
}
/** Wrapper around agp_unbind_memory() */
int drm_unbind_agp(DRM_AGP_MEM * handle)
{
- return drm_agp_unbind_memory(handle);
+ return agp_unbind_memory(handle);
}
EXPORT_SYMBOL(drm_unbind_agp);
diff --git a/drivers/gpu/drm/drm_proc.c b/drivers/gpu/drm/drm_proc.c
index a9ba6b69ad35..9e5b07efebb7 100644
--- a/drivers/gpu/drm/drm_proc.c
+++ b/drivers/gpu/drm/drm_proc.c
@@ -55,7 +55,6 @@ static struct drm_info_list drm_proc_list[] = {
{"queues", drm_queues_info, 0},
{"bufs", drm_bufs_info, 0},
{"gem_names", drm_gem_name_info, DRIVER_GEM},
- {"gem_objects", drm_gem_object_info, DRIVER_GEM},
#if DRM_DEBUG_CODE
{"vma", drm_vma_info, 0},
#endif
@@ -151,7 +150,6 @@ fail:
int drm_proc_init(struct drm_minor *minor, int minor_id,
struct proc_dir_entry *root)
{
- struct drm_device *dev = minor->dev;
char name[64];
int ret;
@@ -172,14 +170,6 @@ int drm_proc_init(struct drm_minor *minor, int minor_id,
return ret;
}
- if (dev->driver->proc_init) {
- ret = dev->driver->proc_init(minor);
- if (ret) {
- DRM_ERROR("DRM: Driver failed to initialize "
- "/proc/dri.\n");
- return ret;
- }
- }
return 0;
}
@@ -216,15 +206,11 @@ int drm_proc_remove_files(struct drm_info_list *files, int count,
*/
int drm_proc_cleanup(struct drm_minor *minor, struct proc_dir_entry *root)
{
- struct drm_device *dev = minor->dev;
char name[64];
if (!root || !minor->proc_root)
return 0;
- if (dev->driver->proc_cleanup)
- dev->driver->proc_cleanup(minor);
-
drm_proc_remove_files(drm_proc_list, DRM_PROC_ENTRIES, minor);
sprintf(name, "%d", minor->index);
diff --git a/drivers/gpu/drm/drm_scatter.c b/drivers/gpu/drm/drm_scatter.c
index 9034c4c6100d..d15e09b0ae0b 100644
--- a/drivers/gpu/drm/drm_scatter.c
+++ b/drivers/gpu/drm/drm_scatter.c
@@ -184,8 +184,6 @@ int drm_sg_alloc(struct drm_device *dev, struct drm_scatter_gather * request)
drm_sg_cleanup(entry);
return -ENOMEM;
}
-EXPORT_SYMBOL(drm_sg_alloc);
-
int drm_sg_alloc_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
diff --git a/drivers/gpu/drm/drm_stub.c b/drivers/gpu/drm/drm_stub.c
index d1ad57450df1..cdc89ee042cc 100644
--- a/drivers/gpu/drm/drm_stub.c
+++ b/drivers/gpu/drm/drm_stub.c
@@ -240,14 +240,10 @@ int drm_fill_in_dev(struct drm_device *dev,
INIT_LIST_HEAD(&dev->vblank_event_list);
spin_lock_init(&dev->count_lock);
- spin_lock_init(&dev->drw_lock);
spin_lock_init(&dev->event_lock);
- init_timer(&dev->timer);
mutex_init(&dev->struct_mutex);
mutex_init(&dev->ctxlist_mutex);
- idr_init(&dev->drw_idr);
-
if (drm_ht_create(&dev->map_hash, 12)) {
return -ENOMEM;
}
diff --git a/drivers/gpu/drm/drm_vm.c b/drivers/gpu/drm/drm_vm.c
index 5df450683aab..2c3fcbdfd8ff 100644
--- a/drivers/gpu/drm/drm_vm.c
+++ b/drivers/gpu/drm/drm_vm.c
@@ -523,14 +523,7 @@ static int drm_mmap_dma(struct file *filp, struct vm_area_struct *vma)
return 0;
}
-resource_size_t drm_core_get_map_ofs(struct drm_local_map * map)
-{
- return map->offset;
-}
-
-EXPORT_SYMBOL(drm_core_get_map_ofs);
-
-resource_size_t drm_core_get_reg_ofs(struct drm_device *dev)
+static resource_size_t drm_core_get_reg_ofs(struct drm_device *dev)
{
#ifdef __alpha__
return dev->hose->dense_mem_base - dev->hose->mem_space->start;
@@ -539,8 +532,6 @@ resource_size_t drm_core_get_reg_ofs(struct drm_device *dev)
#endif
}
-EXPORT_SYMBOL(drm_core_get_reg_ofs);
-
/**
* mmap DMA memory.
*
@@ -627,7 +618,7 @@ int drm_mmap_locked(struct file *filp, struct vm_area_struct *vma)
#endif
case _DRM_FRAME_BUFFER:
case _DRM_REGISTERS:
- offset = dev->driver->get_reg_ofs(dev);
+ offset = drm_core_get_reg_ofs(dev);
vma->vm_flags |= VM_IO; /* not in core dump */
vma->vm_page_prot = drm_io_prot(map->type, vma);
#if !defined(__arm__)
diff --git a/drivers/gpu/drm/i810/i810_drv.c b/drivers/gpu/drm/i810/i810_drv.c
index fe69914ce507..88bcd331e7c5 100644
--- a/drivers/gpu/drm/i810/i810_drv.c
+++ b/drivers/gpu/drm/i810/i810_drv.c
@@ -52,8 +52,6 @@ static struct drm_driver driver = {
.device_is_agp = i810_driver_device_is_agp,
.reclaim_buffers_locked = i810_driver_reclaim_buffers_locked,
.dma_quiescent = i810_driver_dma_quiescent,
- .get_map_ofs = drm_core_get_map_ofs,
- .get_reg_ofs = drm_core_get_reg_ofs,
.ioctls = i810_ioctls,
.fops = {
.owner = THIS_MODULE,
diff --git a/drivers/gpu/drm/i830/i830_drv.c b/drivers/gpu/drm/i830/i830_drv.c
index 5b6298b24e24..f655ab7977da 100644
--- a/drivers/gpu/drm/i830/i830_drv.c
+++ b/drivers/gpu/drm/i830/i830_drv.c
@@ -57,8 +57,6 @@ static struct drm_driver driver = {
.device_is_agp = i830_driver_device_is_agp,
.reclaim_buffers_locked = i830_driver_reclaim_buffers_locked,
.dma_quiescent = i830_driver_dma_quiescent,
- .get_map_ofs = drm_core_get_map_ofs,
- .get_reg_ofs = drm_core_get_reg_ofs,
#if USE_IRQS
.irq_preinstall = i830_driver_irq_preinstall,
.irq_postinstall = i830_driver_irq_postinstall,
diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
index 5c8e53458edb..fdc833d5cc7b 100644
--- a/drivers/gpu/drm/i915/Makefile
+++ b/drivers/gpu/drm/i915/Makefile
@@ -26,15 +26,17 @@ i915-y := i915_drv.o i915_dma.o i915_irq.o i915_mem.o \
intel_dvo.o \
intel_ringbuffer.o \
intel_overlay.o \
+ intel_opregion.o \
dvo_ch7xxx.o \
dvo_ch7017.o \
dvo_ivch.o \
dvo_tfp410.o \
dvo_sil164.o
-i915-$(CONFIG_ACPI) += i915_opregion.o
i915-$(CONFIG_COMPAT) += i915_ioc32.o
+i915-$(CONFIG_ACPI) += intel_acpi.o
+
obj-$(CONFIG_DRM_I915) += i915.o
CFLAGS_i915_trace_points.o := -I$(src)
diff --git a/drivers/gpu/drm/i915/dvo_ch7017.c b/drivers/gpu/drm/i915/dvo_ch7017.c
index 14d59804acd7..af70337567ce 100644
--- a/drivers/gpu/drm/i915/dvo_ch7017.c
+++ b/drivers/gpu/drm/i915/dvo_ch7017.c
@@ -165,67 +165,44 @@ struct ch7017_priv {
static void ch7017_dump_regs(struct intel_dvo_device *dvo);
static void ch7017_dpms(struct intel_dvo_device *dvo, int mode);
-static bool ch7017_read(struct intel_dvo_device *dvo, int addr, uint8_t *val)
+static bool ch7017_read(struct intel_dvo_device *dvo, u8 addr, u8 *val)
{
- struct i2c_adapter *adapter = dvo->i2c_bus;
- struct intel_i2c_chan *i2cbus = container_of(adapter, struct intel_i2c_chan, adapter);
- u8 out_buf[2];
- u8 in_buf[2];
-
struct i2c_msg msgs[] = {
{
.addr = dvo->slave_addr,
.flags = 0,
.len = 1,
- .buf = out_buf,
+ .buf = &addr,
},
{
.addr = dvo->slave_addr,
.flags = I2C_M_RD,
.len = 1,
- .buf = in_buf,
+ .buf = val,
}
};
-
- out_buf[0] = addr;
- out_buf[1] = 0;
-
- if (i2c_transfer(&i2cbus->adapter, msgs, 2) == 2) {
- *val= in_buf[0];
- return true;
- };
-
- return false;
+ return i2c_transfer(dvo->i2c_bus, msgs, 2) == 2;
}
-static bool ch7017_write(struct intel_dvo_device *dvo, int addr, uint8_t val)
+static bool ch7017_write(struct intel_dvo_device *dvo, u8 addr, u8 val)
{
- struct i2c_adapter *adapter = dvo->i2c_bus;
- struct intel_i2c_chan *i2cbus = container_of(adapter, struct intel_i2c_chan, adapter);
- uint8_t out_buf[2];
+ uint8_t buf[2] = { addr, val };
struct i2c_msg msg = {
.addr = dvo->slave_addr,
.flags = 0,
.len = 2,
- .buf = out_buf,
+ .buf = buf,
};
-
- out_buf[0] = addr;
- out_buf[1] = val;
-
- if (i2c_transfer(&i2cbus->adapter, &msg, 1) == 1)
- return true;
-
- return false;
+ return i2c_transfer(dvo->i2c_bus, &msg, 1) == 1;
}
/** Probes for a CH7017 on the given bus and slave address. */
static bool ch7017_init(struct intel_dvo_device *dvo,
struct i2c_adapter *adapter)
{
- struct intel_i2c_chan *i2cbus = container_of(adapter, struct intel_i2c_chan, adapter);
struct ch7017_priv *priv;
- uint8_t val;
+ const char *str;
+ u8 val;
priv = kzalloc(sizeof(struct ch7017_priv), GFP_KERNEL);
if (priv == NULL)
@@ -237,16 +214,27 @@ static bool ch7017_init(struct intel_dvo_device *dvo,
if (!ch7017_read(dvo, CH7017_DEVICE_ID, &val))
goto fail;
- if (val != CH7017_DEVICE_ID_VALUE &&
- val != CH7018_DEVICE_ID_VALUE &&
- val != CH7019_DEVICE_ID_VALUE) {
+ switch (val) {
+ case CH7017_DEVICE_ID_VALUE:
+ str = "ch7017";
+ break;
+ case CH7018_DEVICE_ID_VALUE:
+ str = "ch7018";
+ break;
+ case CH7019_DEVICE_ID_VALUE:
+ str = "ch7019";
+ break;
+ default:
DRM_DEBUG_KMS("ch701x not detected, got %d: from %s "
- "Slave %d.\n",
- val, i2cbus->adapter.name,dvo->slave_addr);
+ "slave %d.\n",
+ val, adapter->name,dvo->slave_addr);
goto fail;
}
+ DRM_DEBUG_KMS("%s detected on %s, addr %d\n",
+ str, adapter->name, dvo->slave_addr);
return true;
+
fail:
kfree(priv);
return false;
@@ -368,7 +356,7 @@ static void ch7017_dpms(struct intel_dvo_device *dvo, int mode)
}
/* XXX: Should actually wait for update power status somehow */
- udelay(20000);
+ msleep(20);
}
static void ch7017_dump_regs(struct intel_dvo_device *dvo)
diff --git a/drivers/gpu/drm/i915/dvo_ch7xxx.c b/drivers/gpu/drm/i915/dvo_ch7xxx.c
index 6f1944b24441..7eaa94e4ff06 100644
--- a/drivers/gpu/drm/i915/dvo_ch7xxx.c
+++ b/drivers/gpu/drm/i915/dvo_ch7xxx.c
@@ -113,7 +113,6 @@ static bool ch7xxx_readb(struct intel_dvo_device *dvo, int addr, uint8_t *ch)
{
struct ch7xxx_priv *ch7xxx= dvo->dev_priv;
struct i2c_adapter *adapter = dvo->i2c_bus;
- struct intel_i2c_chan *i2cbus = container_of(adapter, struct intel_i2c_chan, adapter);
u8 out_buf[2];
u8 in_buf[2];
@@ -135,14 +134,14 @@ static bool ch7xxx_readb(struct intel_dvo_device *dvo, int addr, uint8_t *ch)
out_buf[0] = addr;
out_buf[1] = 0;
- if (i2c_transfer(&i2cbus->adapter, msgs, 2) == 2) {
+ if (i2c_transfer(adapter, msgs, 2) == 2) {
*ch = in_buf[0];
return true;
};
if (!ch7xxx->quiet) {
DRM_DEBUG_KMS("Unable to read register 0x%02x from %s:%02x.\n",
- addr, i2cbus->adapter.name, dvo->slave_addr);
+ addr, adapter->name, dvo->slave_addr);
}
return false;
}
@@ -152,7 +151,6 @@ static bool ch7xxx_writeb(struct intel_dvo_device *dvo, int addr, uint8_t ch)
{
struct ch7xxx_priv *ch7xxx = dvo->dev_priv;
struct i2c_adapter *adapter = dvo->i2c_bus;
- struct intel_i2c_chan *i2cbus = container_of(adapter, struct intel_i2c_chan, adapter);
uint8_t out_buf[2];
struct i2c_msg msg = {
.addr = dvo->slave_addr,
@@ -164,12 +162,12 @@ static bool ch7xxx_writeb(struct intel_dvo_device *dvo, int addr, uint8_t ch)
out_buf[0] = addr;
out_buf[1] = ch;
- if (i2c_transfer(&i2cbus->adapter, &msg, 1) == 1)
+ if (i2c_transfer(adapter, &msg, 1) == 1)
return true;
if (!ch7xxx->quiet) {
DRM_DEBUG_KMS("Unable to write register 0x%02x to %s:%d.\n",
- addr, i2cbus->adapter.name, dvo->slave_addr);
+ addr, adapter->name, dvo->slave_addr);
}
return false;
diff --git a/drivers/gpu/drm/i915/dvo_ivch.c b/drivers/gpu/drm/i915/dvo_ivch.c
index a2ec3f487202..a12ed9414cc7 100644
--- a/drivers/gpu/drm/i915/dvo_ivch.c
+++ b/drivers/gpu/drm/i915/dvo_ivch.c
@@ -167,7 +167,6 @@ static bool ivch_read(struct intel_dvo_device *dvo, int addr, uint16_t *data)
{
struct ivch_priv *priv = dvo->dev_priv;
struct i2c_adapter *adapter = dvo->i2c_bus;
- struct intel_i2c_chan *i2cbus = container_of(adapter, struct intel_i2c_chan, adapter);
u8 out_buf[1];
u8 in_buf[2];
@@ -193,7 +192,7 @@ static bool ivch_read(struct intel_dvo_device *dvo, int addr, uint16_t *data)
out_buf[0] = addr;
- if (i2c_transfer(&i2cbus->adapter, msgs, 3) == 3) {
+ if (i2c_transfer(adapter, msgs, 3) == 3) {
*data = (in_buf[1] << 8) | in_buf[0];
return true;
};
@@ -201,7 +200,7 @@ static bool ivch_read(struct intel_dvo_device *dvo, int addr, uint16_t *data)
if (!priv->quiet) {
DRM_DEBUG_KMS("Unable to read register 0x%02x from "
"%s:%02x.\n",
- addr, i2cbus->adapter.name, dvo->slave_addr);
+ addr, adapter->name, dvo->slave_addr);
}
return false;
}
@@ -211,7 +210,6 @@ static bool ivch_write(struct intel_dvo_device *dvo, int addr, uint16_t data)
{
struct ivch_priv *priv = dvo->dev_priv;
struct i2c_adapter *adapter = dvo->i2c_bus;
- struct intel_i2c_chan *i2cbus = container_of(adapter, struct intel_i2c_chan, adapter);
u8 out_buf[3];
struct i2c_msg msg = {
.addr = dvo->slave_addr,
@@ -224,12 +222,12 @@ static bool ivch_write(struct intel_dvo_device *dvo, int addr, uint16_t data)
out_buf[1] = data & 0xff;
out_buf[2] = data >> 8;
- if (i2c_transfer(&i2cbus->adapter, &msg, 1) == 1)
+ if (i2c_transfer(adapter, &msg, 1) == 1)
return true;
if (!priv->quiet) {
DRM_DEBUG_KMS("Unable to write register 0x%02x to %s:%d.\n",
- addr, i2cbus->adapter.name, dvo->slave_addr);
+ addr, adapter->name, dvo->slave_addr);
}
return false;
diff --git a/drivers/gpu/drm/i915/dvo_sil164.c b/drivers/gpu/drm/i915/dvo_sil164.c
index 9b8e6765cf26..e4b4091df942 100644
--- a/drivers/gpu/drm/i915/dvo_sil164.c
+++ b/drivers/gpu/drm/i915/dvo_sil164.c
@@ -69,7 +69,6 @@ static bool sil164_readb(struct intel_dvo_device *dvo, int addr, uint8_t *ch)
{
struct sil164_priv *sil = dvo->dev_priv;
struct i2c_adapter *adapter = dvo->i2c_bus;
- struct intel_i2c_chan *i2cbus = container_of(adapter, struct intel_i2c_chan, adapter);
u8 out_buf[2];
u8 in_buf[2];
@@ -91,14 +90,14 @@ static bool sil164_readb(struct intel_dvo_device *dvo, int addr, uint8_t *ch)
out_buf[0] = addr;
out_buf[1] = 0;
- if (i2c_transfer(&i2cbus->adapter, msgs, 2) == 2) {
+ if (i2c_transfer(adapter, msgs, 2) == 2) {
*ch = in_buf[0];
return true;
};
if (!sil->quiet) {
DRM_DEBUG_KMS("Unable to read register 0x%02x from %s:%02x.\n",
- addr, i2cbus->adapter.name, dvo->slave_addr);
+ addr, adapter->name, dvo->slave_addr);
}
return false;
}
@@ -107,7 +106,6 @@ static bool sil164_writeb(struct intel_dvo_device *dvo, int addr, uint8_t ch)
{
struct sil164_priv *sil= dvo->dev_priv;
struct i2c_adapter *adapter = dvo->i2c_bus;
- struct intel_i2c_chan *i2cbus = container_of(adapter, struct intel_i2c_chan, adapter);
uint8_t out_buf[2];
struct i2c_msg msg = {
.addr = dvo->slave_addr,
@@ -119,12 +117,12 @@ static bool sil164_writeb(struct intel_dvo_device *dvo, int addr, uint8_t ch)
out_buf[0] = addr;
out_buf[1] = ch;
- if (i2c_transfer(&i2cbus->adapter, &msg, 1) == 1)
+ if (i2c_transfer(adapter, &msg, 1) == 1)
return true;
if (!sil->quiet) {
DRM_DEBUG_KMS("Unable to write register 0x%02x to %s:%d.\n",
- addr, i2cbus->adapter.name, dvo->slave_addr);
+ addr, adapter->name, dvo->slave_addr);
}
return false;
diff --git a/drivers/gpu/drm/i915/dvo_tfp410.c b/drivers/gpu/drm/i915/dvo_tfp410.c
index 56f66426207f..8ab2855bb544 100644
--- a/drivers/gpu/drm/i915/dvo_tfp410.c
+++ b/drivers/gpu/drm/i915/dvo_tfp410.c
@@ -94,7 +94,6 @@ static bool tfp410_readb(struct intel_dvo_device *dvo, int addr, uint8_t *ch)
{
struct tfp410_priv *tfp = dvo->dev_priv;
struct i2c_adapter *adapter = dvo->i2c_bus;
- struct intel_i2c_chan *i2cbus = container_of(adapter, struct intel_i2c_chan, adapter);
u8 out_buf[2];
u8 in_buf[2];
@@ -116,14 +115,14 @@ static bool tfp410_readb(struct intel_dvo_device *dvo, int addr, uint8_t *ch)
out_buf[0] = addr;
out_buf[1] = 0;
- if (i2c_transfer(&i2cbus->adapter, msgs, 2) == 2) {
+ if (i2c_transfer(adapter, msgs, 2) == 2) {
*ch = in_buf[0];
return true;
};
if (!tfp->quiet) {
DRM_DEBUG_KMS("Unable to read register 0x%02x from %s:%02x.\n",
- addr, i2cbus->adapter.name, dvo->slave_addr);
+ addr, adapter->name, dvo->slave_addr);
}
return false;
}
@@ -132,7 +131,6 @@ static bool tfp410_writeb(struct intel_dvo_device *dvo, int addr, uint8_t ch)
{
struct tfp410_priv *tfp = dvo->dev_priv;
struct i2c_adapter *adapter = dvo->i2c_bus;
- struct intel_i2c_chan *i2cbus = container_of(adapter, struct intel_i2c_chan, adapter);
uint8_t out_buf[2];
struct i2c_msg msg = {
.addr = dvo->slave_addr,
@@ -144,12 +142,12 @@ static bool tfp410_writeb(struct intel_dvo_device *dvo, int addr, uint8_t ch)
out_buf[0] = addr;
out_buf[1] = ch;
- if (i2c_transfer(&i2cbus->adapter, &msg, 1) == 1)
+ if (i2c_transfer(adapter, &msg, 1) == 1)
return true;
if (!tfp->quiet) {
DRM_DEBUG_KMS("Unable to write register 0x%02x to %s:%d.\n",
- addr, i2cbus->adapter.name, dvo->slave_addr);
+ addr, adapter->name, dvo->slave_addr);
}
return false;
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 048149748fdc..1f4f3ceb63c7 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -40,9 +40,51 @@
#if defined(CONFIG_DEBUG_FS)
-#define ACTIVE_LIST 1
-#define FLUSHING_LIST 2
-#define INACTIVE_LIST 3
+enum {
+ ACTIVE_LIST,
+ FLUSHING_LIST,
+ INACTIVE_LIST,
+ PINNED_LIST,
+ DEFERRED_FREE_LIST,
+};
+
+static const char *yesno(int v)
+{
+ return v ? "yes" : "no";
+}
+
+static int i915_capabilities(struct seq_file *m, void *data)
+{
+ struct drm_info_node *node = (struct drm_info_node *) m->private;
+ struct drm_device *dev = node->minor->dev;
+ const struct intel_device_info *info = INTEL_INFO(dev);
+
+ seq_printf(m, "gen: %d\n", info->gen);
+#define B(x) seq_printf(m, #x ": %s\n", yesno(info->x))
+ B(is_mobile);
+ B(is_i85x);
+ B(is_i915g);
+ B(is_i945gm);
+ B(is_g33);
+ B(need_gfx_hws);
+ B(is_g4x);
+ B(is_pineview);
+ B(is_broadwater);
+ B(is_crestline);
+ B(has_fbc);
+ B(has_rc6);
+ B(has_pipe_cxsr);
+ B(has_hotplug);
+ B(cursor_needs_physical);
+ B(has_overlay);
+ B(overlay_needs_physical);
+ B(supports_tv);
+ B(has_bsd_ring);
+ B(has_blt_ring);
+#undef B
+
+ return 0;
+}
static const char *get_pin_flag(struct drm_i915_gem_object *obj_priv)
{
@@ -64,6 +106,29 @@ static const char *get_tiling_flag(struct drm_i915_gem_object *obj_priv)
}
}
+static void
+describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
+{
+ seq_printf(m, "%p: %s%s %8zd %08x %08x %d%s%s",
+ &obj->base,
+ get_pin_flag(obj),
+ get_tiling_flag(obj),
+ obj->base.size,
+ obj->base.read_domains,
+ obj->base.write_domain,
+ obj->last_rendering_seqno,
+ obj->dirty ? " dirty" : "",
+ obj->madv == I915_MADV_DONTNEED ? " purgeable" : "");
+ if (obj->base.name)
+ seq_printf(m, " (name: %d)", obj->base.name);
+ if (obj->fence_reg != I915_FENCE_REG_NONE)
+ seq_printf(m, " (fence: %d)", obj->fence_reg);
+ if (obj->gtt_space != NULL)
+ seq_printf(m, " (gtt_offset: %08x)", obj->gtt_offset);
+ if (obj->ring != NULL)
+ seq_printf(m, " (%s)", obj->ring->name);
+}
+
static int i915_gem_object_list_info(struct seq_file *m, void *data)
{
struct drm_info_node *node = (struct drm_info_node *) m->private;
@@ -72,56 +137,80 @@ static int i915_gem_object_list_info(struct seq_file *m, void *data)
struct drm_device *dev = node->minor->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_i915_gem_object *obj_priv;
- spinlock_t *lock = NULL;
+ size_t total_obj_size, total_gtt_size;
+ int count, ret;
+
+ ret = mutex_lock_interruptible(&dev->struct_mutex);
+ if (ret)
+ return ret;
switch (list) {
case ACTIVE_LIST:
seq_printf(m, "Active:\n");
- lock = &dev_priv->mm.active_list_lock;
- head = &dev_priv->render_ring.active_list;
+ head = &dev_priv->mm.active_list;
break;
case INACTIVE_LIST:
seq_printf(m, "Inactive:\n");
head = &dev_priv->mm.inactive_list;
break;
+ case PINNED_LIST:
+ seq_printf(m, "Pinned:\n");
+ head = &dev_priv->mm.pinned_list;
+ break;
case FLUSHING_LIST:
seq_printf(m, "Flushing:\n");
head = &dev_priv->mm.flushing_list;
break;
+ case DEFERRED_FREE_LIST:
+ seq_printf(m, "Deferred free:\n");
+ head = &dev_priv->mm.deferred_free_list;
+ break;
default:
- DRM_INFO("Ooops, unexpected list\n");
- return 0;
+ mutex_unlock(&dev->struct_mutex);
+ return -EINVAL;
}
- if (lock)
- spin_lock(lock);
- list_for_each_entry(obj_priv, head, list)
- {
- seq_printf(m, " %p: %s %8zd %08x %08x %d%s%s",
- &obj_priv->base,
- get_pin_flag(obj_priv),
- obj_priv->base.size,
- obj_priv->base.read_domains,
- obj_priv->base.write_domain,
- obj_priv->last_rendering_seqno,
- obj_priv->dirty ? " dirty" : "",
- obj_priv->madv == I915_MADV_DONTNEED ? " purgeable" : "");
-
- if (obj_priv->base.name)
- seq_printf(m, " (name: %d)", obj_priv->base.name);
- if (obj_priv->fence_reg != I915_FENCE_REG_NONE)
- seq_printf(m, " (fence: %d)", obj_priv->fence_reg);
- if (obj_priv->gtt_space != NULL)
- seq_printf(m, " (gtt_offset: %08x)", obj_priv->gtt_offset);
-
+ total_obj_size = total_gtt_size = count = 0;
+ list_for_each_entry(obj_priv, head, mm_list) {
+ seq_printf(m, " ");
+ describe_obj(m, obj_priv);
seq_printf(m, "\n");
+ total_obj_size += obj_priv->base.size;
+ total_gtt_size += obj_priv->gtt_space->size;
+ count++;
}
+ mutex_unlock(&dev->struct_mutex);
- if (lock)
- spin_unlock(lock);
+ seq_printf(m, "Total %d objects, %zu bytes, %zu GTT size\n",
+ count, total_obj_size, total_gtt_size);
return 0;
}
+static int i915_gem_object_info(struct seq_file *m, void* data)
+{
+ struct drm_info_node *node = (struct drm_info_node *) m->private;
+ struct drm_device *dev = node->minor->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int ret;
+
+ ret = mutex_lock_interruptible(&dev->struct_mutex);
+ if (ret)
+ return ret;
+
+ seq_printf(m, "%u objects\n", dev_priv->mm.object_count);
+ seq_printf(m, "%zu object bytes\n", dev_priv->mm.object_memory);
+ seq_printf(m, "%u pinned\n", dev_priv->mm.pin_count);
+ seq_printf(m, "%zu pin bytes\n", dev_priv->mm.pin_memory);
+ seq_printf(m, "%u objects in gtt\n", dev_priv->mm.gtt_count);
+ seq_printf(m, "%zu gtt bytes\n", dev_priv->mm.gtt_memory);
+ seq_printf(m, "%zu gtt total\n", dev_priv->mm.gtt_total);
+
+ mutex_unlock(&dev->struct_mutex);
+
+ return 0;
+}
+
+
static int i915_gem_pageflip_info(struct seq_file *m, void *data)
{
struct drm_info_node *node = (struct drm_info_node *) m->private;
@@ -176,6 +265,11 @@ static int i915_gem_request_info(struct seq_file *m, void *data)
struct drm_device *dev = node->minor->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_i915_gem_request *gem_request;
+ int ret;
+
+ ret = mutex_lock_interruptible(&dev->struct_mutex);
+ if (ret)
+ return ret;
seq_printf(m, "Request:\n");
list_for_each_entry(gem_request, &dev_priv->render_ring.request_list,
@@ -184,6 +278,8 @@ static int i915_gem_request_info(struct seq_file *m, void *data)
gem_request->seqno,
(int) (jiffies - gem_request->emitted_jiffies));
}
+ mutex_unlock(&dev->struct_mutex);
+
return 0;
}
@@ -192,16 +288,24 @@ static int i915_gem_seqno_info(struct seq_file *m, void *data)
struct drm_info_node *node = (struct drm_info_node *) m->private;
struct drm_device *dev = node->minor->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
+ int ret;
+
+ ret = mutex_lock_interruptible(&dev->struct_mutex);
+ if (ret)
+ return ret;
if (dev_priv->render_ring.status_page.page_addr != NULL) {
seq_printf(m, "Current sequence: %d\n",
- i915_get_gem_seqno(dev, &dev_priv->render_ring));
+ dev_priv->render_ring.get_seqno(dev, &dev_priv->render_ring));
} else {
seq_printf(m, "Current sequence: hws uninitialized\n");
}
seq_printf(m, "Waiter sequence: %d\n",
dev_priv->mm.waiting_gem_seqno);
seq_printf(m, "IRQ sequence: %d\n", dev_priv->mm.irq_gem_seqno);
+
+ mutex_unlock(&dev->struct_mutex);
+
return 0;
}
@@ -211,6 +315,11 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
struct drm_info_node *node = (struct drm_info_node *) m->private;
struct drm_device *dev = node->minor->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
+ int ret;
+
+ ret = mutex_lock_interruptible(&dev->struct_mutex);
+ if (ret)
+ return ret;
if (!HAS_PCH_SPLIT(dev)) {
seq_printf(m, "Interrupt enable: %08x\n",
@@ -247,7 +356,7 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
atomic_read(&dev_priv->irq_received));
if (dev_priv->render_ring.status_page.page_addr != NULL) {
seq_printf(m, "Current sequence: %d\n",
- i915_get_gem_seqno(dev, &dev_priv->render_ring));
+ dev_priv->render_ring.get_seqno(dev, &dev_priv->render_ring));
} else {
seq_printf(m, "Current sequence: hws uninitialized\n");
}
@@ -255,6 +364,8 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
dev_priv->mm.waiting_gem_seqno);
seq_printf(m, "IRQ sequence: %d\n",
dev_priv->mm.irq_gem_seqno);
+ mutex_unlock(&dev->struct_mutex);
+
return 0;
}
@@ -263,7 +374,11 @@ static int i915_gem_fence_regs_info(struct seq_file *m, void *data)
struct drm_info_node *node = (struct drm_info_node *) m->private;
struct drm_device *dev = node->minor->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
- int i;
+ int i, ret;
+
+ ret = mutex_lock_interruptible(&dev->struct_mutex);
+ if (ret)
+ return ret;
seq_printf(m, "Reserved fences = %d\n", dev_priv->fence_reg_start);
seq_printf(m, "Total fences = %d\n", dev_priv->num_fence_regs);
@@ -289,6 +404,7 @@ static int i915_gem_fence_regs_info(struct seq_file *m, void *data)
seq_printf(m, "\n");
}
}
+ mutex_unlock(&dev->struct_mutex);
return 0;
}
@@ -313,16 +429,19 @@ static int i915_hws_info(struct seq_file *m, void *data)
return 0;
}
-static void i915_dump_pages(struct seq_file *m, struct page **pages, int page_count)
+static void i915_dump_object(struct seq_file *m,
+ struct io_mapping *mapping,
+ struct drm_i915_gem_object *obj_priv)
{
- int page, i;
- uint32_t *mem;
+ int page, page_count, i;
+ page_count = obj_priv->base.size / PAGE_SIZE;
for (page = 0; page < page_count; page++) {
- mem = kmap_atomic(pages[page], KM_USER0);
+ u32 *mem = io_mapping_map_wc(mapping,
+ obj_priv->gtt_offset + page * PAGE_SIZE);
for (i = 0; i < PAGE_SIZE; i += 4)
seq_printf(m, "%08x : %08x\n", i, mem[i / 4]);
- kunmap_atomic(mem, KM_USER0);
+ io_mapping_unmap(mem);
}
}
@@ -335,27 +454,20 @@ static int i915_batchbuffer_info(struct seq_file *m, void *data)
struct drm_i915_gem_object *obj_priv;
int ret;
- spin_lock(&dev_priv->mm.active_list_lock);
+ ret = mutex_lock_interruptible(&dev->struct_mutex);
+ if (ret)
+ return ret;
- list_for_each_entry(obj_priv, &dev_priv->render_ring.active_list,
- list) {
+ list_for_each_entry(obj_priv, &dev_priv->mm.active_list, mm_list) {
obj = &obj_priv->base;
if (obj->read_domains & I915_GEM_DOMAIN_COMMAND) {
- ret = i915_gem_object_get_pages(obj, 0);
- if (ret) {
- DRM_ERROR("Failed to get pages: %d\n", ret);
- spin_unlock(&dev_priv->mm.active_list_lock);
- return ret;
- }
-
- seq_printf(m, "--- gtt_offset = 0x%08x\n", obj_priv->gtt_offset);
- i915_dump_pages(m, obj_priv->pages, obj->size / PAGE_SIZE);
-
- i915_gem_object_put_pages(obj);
+ seq_printf(m, "--- gtt_offset = 0x%08x\n",
+ obj_priv->gtt_offset);
+ i915_dump_object(m, dev_priv->mm.gtt_mapping, obj_priv);
}
}
- spin_unlock(&dev_priv->mm.active_list_lock);
+ mutex_unlock(&dev->struct_mutex);
return 0;
}
@@ -365,20 +477,24 @@ static int i915_ringbuffer_data(struct seq_file *m, void *data)
struct drm_info_node *node = (struct drm_info_node *) m->private;
struct drm_device *dev = node->minor->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
- u8 *virt;
- uint32_t *ptr, off;
+ int ret;
+
+ ret = mutex_lock_interruptible(&dev->struct_mutex);
+ if (ret)
+ return ret;
if (!dev_priv->render_ring.gem_object) {
seq_printf(m, "No ringbuffer setup\n");
- return 0;
- }
-
- virt = dev_priv->render_ring.virtual_start;
+ } else {
+ u8 *virt = dev_priv->render_ring.virtual_start;
+ uint32_t off;
- for (off = 0; off < dev_priv->render_ring.size; off += 4) {
- ptr = (uint32_t *)(virt + off);
- seq_printf(m, "%08x : %08x\n", off, *ptr);
+ for (off = 0; off < dev_priv->render_ring.size; off += 4) {
+ uint32_t *ptr = (uint32_t *)(virt + off);
+ seq_printf(m, "%08x : %08x\n", off, *ptr);
+ }
}
+ mutex_unlock(&dev->struct_mutex);
return 0;
}
@@ -396,7 +512,7 @@ static int i915_ringbuffer_info(struct seq_file *m, void *data)
seq_printf(m, "RingHead : %08x\n", head);
seq_printf(m, "RingTail : %08x\n", tail);
seq_printf(m, "RingSize : %08lx\n", dev_priv->render_ring.size);
- seq_printf(m, "Acthd : %08x\n", I915_READ(IS_I965G(dev) ? ACTHD_I965 : ACTHD));
+ seq_printf(m, "Acthd : %08x\n", I915_READ(INTEL_INFO(dev)->gen >= 4 ? ACTHD_I965 : ACTHD));
return 0;
}
@@ -458,7 +574,7 @@ static int i915_error_state(struct seq_file *m, void *unused)
seq_printf(m, " IPEHR: 0x%08x\n", error->ipehr);
seq_printf(m, " INSTDONE: 0x%08x\n", error->instdone);
seq_printf(m, " ACTHD: 0x%08x\n", error->acthd);
- if (IS_I965G(dev)) {
+ if (INTEL_INFO(dev)->gen >= 4) {
seq_printf(m, " INSTPS: 0x%08x\n", error->instps);
seq_printf(m, " INSTDONE1: 0x%08x\n", error->instdone1);
}
@@ -642,6 +758,9 @@ static int i915_fbc_status(struct seq_file *m, void *unused)
} else {
seq_printf(m, "FBC disabled: ");
switch (dev_priv->no_fbc_reason) {
+ case FBC_NO_OUTPUT:
+ seq_printf(m, "no outputs");
+ break;
case FBC_STOLEN_TOO_SMALL:
seq_printf(m, "not enough stolen memory");
break;
@@ -675,15 +794,17 @@ static int i915_sr_status(struct seq_file *m, void *unused)
drm_i915_private_t *dev_priv = dev->dev_private;
bool sr_enabled = false;
- if (IS_I965GM(dev) || IS_I945G(dev) || IS_I945GM(dev))
+ if (IS_GEN5(dev))
+ sr_enabled = I915_READ(WM1_LP_ILK) & WM1_LP_SR_EN;
+ else if (IS_CRESTLINE(dev) || IS_I945G(dev) || IS_I945GM(dev))
sr_enabled = I915_READ(FW_BLC_SELF) & FW_BLC_SELF_EN;
else if (IS_I915GM(dev))
sr_enabled = I915_READ(INSTPM) & INSTPM_SELF_EN;
else if (IS_PINEVIEW(dev))
sr_enabled = I915_READ(DSPFW3) & PINEVIEW_SELF_REFRESH_EN;
- seq_printf(m, "self-refresh: %s\n", sr_enabled ? "enabled" :
- "disabled");
+ seq_printf(m, "self-refresh: %s\n",
+ sr_enabled ? "enabled" : "disabled");
return 0;
}
@@ -694,10 +815,16 @@ static int i915_emon_status(struct seq_file *m, void *unused)
struct drm_device *dev = node->minor->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
unsigned long temp, chipset, gfx;
+ int ret;
+
+ ret = mutex_lock_interruptible(&dev->struct_mutex);
+ if (ret)
+ return ret;
temp = i915_mch_val(dev_priv);
chipset = i915_chipset_val(dev_priv);
gfx = i915_gfx_val(dev_priv);
+ mutex_unlock(&dev->struct_mutex);
seq_printf(m, "GMCH temp: %ld\n", temp);
seq_printf(m, "Chipset power: %ld\n", chipset);
@@ -718,6 +845,68 @@ static int i915_gfxec(struct seq_file *m, void *unused)
return 0;
}
+static int i915_opregion(struct seq_file *m, void *unused)
+{
+ struct drm_info_node *node = (struct drm_info_node *) m->private;
+ struct drm_device *dev = node->minor->dev;
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ struct intel_opregion *opregion = &dev_priv->opregion;
+ int ret;
+
+ ret = mutex_lock_interruptible(&dev->struct_mutex);
+ if (ret)
+ return ret;
+
+ if (opregion->header)
+ seq_write(m, opregion->header, OPREGION_SIZE);
+
+ mutex_unlock(&dev->struct_mutex);
+
+ return 0;
+}
+
+static int i915_gem_framebuffer_info(struct seq_file *m, void *data)
+{
+ struct drm_info_node *node = (struct drm_info_node *) m->private;
+ struct drm_device *dev = node->minor->dev;
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ struct intel_fbdev *ifbdev;
+ struct intel_framebuffer *fb;
+ int ret;
+
+ ret = mutex_lock_interruptible(&dev->mode_config.mutex);
+ if (ret)
+ return ret;
+
+ ifbdev = dev_priv->fbdev;
+ fb = to_intel_framebuffer(ifbdev->helper.fb);
+
+ seq_printf(m, "fbcon size: %d x %d, depth %d, %d bpp, obj ",
+ fb->base.width,
+ fb->base.height,
+ fb->base.depth,
+ fb->base.bits_per_pixel);
+ describe_obj(m, to_intel_bo(fb->obj));
+ seq_printf(m, "\n");
+
+ list_for_each_entry(fb, &dev->mode_config.fb_list, base.head) {
+ if (&fb->base == ifbdev->helper.fb)
+ continue;
+
+ seq_printf(m, "user size: %d x %d, depth %d, %d bpp, obj ",
+ fb->base.width,
+ fb->base.height,
+ fb->base.depth,
+ fb->base.bits_per_pixel);
+ describe_obj(m, to_intel_bo(fb->obj));
+ seq_printf(m, "\n");
+ }
+
+ mutex_unlock(&dev->mode_config.mutex);
+
+ return 0;
+}
+
static int
i915_wedged_open(struct inode *inode,
struct file *filp)
@@ -741,6 +930,9 @@ i915_wedged_read(struct file *filp,
"wedged : %d\n",
atomic_read(&dev_priv->mm.wedged));
+ if (len > sizeof (buf))
+ len = sizeof (buf);
+
return simple_read_from_buffer(ubuf, max, ppos, buf, len);
}
@@ -770,7 +962,7 @@ i915_wedged_write(struct file *filp,
atomic_set(&dev_priv->mm.wedged, val);
if (val) {
- DRM_WAKEUP(&dev_priv->irq_queue);
+ wake_up_all(&dev_priv->irq_queue);
queue_work(dev_priv->wq, &dev_priv->error_work);
}
@@ -824,9 +1016,13 @@ static int i915_wedged_create(struct dentry *root, struct drm_minor *minor)
}
static struct drm_info_list i915_debugfs_list[] = {
+ {"i915_capabilities", i915_capabilities, 0, 0},
+ {"i915_gem_objects", i915_gem_object_info, 0},
{"i915_gem_active", i915_gem_object_list_info, 0, (void *) ACTIVE_LIST},
{"i915_gem_flushing", i915_gem_object_list_info, 0, (void *) FLUSHING_LIST},
{"i915_gem_inactive", i915_gem_object_list_info, 0, (void *) INACTIVE_LIST},
+ {"i915_gem_pinned", i915_gem_object_list_info, 0, (void *) PINNED_LIST},
+ {"i915_gem_deferred_free", i915_gem_object_list_info, 0, (void *) DEFERRED_FREE_LIST},
{"i915_gem_pageflip", i915_gem_pageflip_info, 0},
{"i915_gem_request", i915_gem_request_info, 0},
{"i915_gem_seqno", i915_gem_seqno_info, 0},
@@ -846,6 +1042,8 @@ static struct drm_info_list i915_debugfs_list[] = {
{"i915_gfxec", i915_gfxec, 0},
{"i915_fbc_status", i915_fbc_status, 0},
{"i915_sr_status", i915_sr_status, 0},
+ {"i915_opregion", i915_opregion, 0},
+ {"i915_gem_framebuffer", i915_gem_framebuffer_info, 0},
};
#define I915_DEBUGFS_ENTRIES ARRAY_SIZE(i915_debugfs_list)
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index 2dd2c93ebfa3..e6800819bca8 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -40,8 +40,7 @@
#include <linux/pnp.h>
#include <linux/vga_switcheroo.h>
#include <linux/slab.h>
-
-extern int intel_max_stolen; /* from AGP driver */
+#include <acpi/video.h>
/**
* Sets up the hardware status page for devices that need a physical address
@@ -64,7 +63,7 @@ static int i915_init_phys_hws(struct drm_device *dev)
memset(dev_priv->render_ring.status_page.page_addr, 0, PAGE_SIZE);
- if (IS_I965G(dev))
+ if (INTEL_INFO(dev)->gen >= 4)
dev_priv->dma_status_page |= (dev_priv->dma_status_page >> 28) &
0xf0;
@@ -133,8 +132,8 @@ static int i915_dma_cleanup(struct drm_device * dev)
mutex_lock(&dev->struct_mutex);
intel_cleanup_ring_buffer(dev, &dev_priv->render_ring);
- if (HAS_BSD(dev))
- intel_cleanup_ring_buffer(dev, &dev_priv->bsd_ring);
+ intel_cleanup_ring_buffer(dev, &dev_priv->bsd_ring);
+ intel_cleanup_ring_buffer(dev, &dev_priv->blt_ring);
mutex_unlock(&dev->struct_mutex);
/* Clear the HWS virtual address at teardown */
@@ -222,7 +221,7 @@ static int i915_dma_resume(struct drm_device * dev)
DRM_DEBUG_DRIVER("hw status page @ %p\n",
ring->status_page.page_addr);
if (ring->status_page.gfx_addr != 0)
- ring->setup_status_page(dev, ring);
+ intel_ring_setup_status_page(dev, ring);
else
I915_WRITE(HWS_PGA, dev_priv->dma_status_page);
@@ -377,7 +376,7 @@ i915_emit_box(struct drm_device *dev,
return -EINVAL;
}
- if (IS_I965G(dev)) {
+ if (INTEL_INFO(dev)->gen >= 4) {
BEGIN_LP_RING(4);
OUT_RING(GFX_OP_DRAWRECT_INFO_I965);
OUT_RING((box.x1 & 0xffff) | (box.y1 << 16));
@@ -481,7 +480,7 @@ static int i915_dispatch_batchbuffer(struct drm_device * dev,
if (!IS_I830(dev) && !IS_845G(dev)) {
BEGIN_LP_RING(2);
- if (IS_I965G(dev)) {
+ if (INTEL_INFO(dev)->gen >= 4) {
OUT_RING(MI_BATCH_BUFFER_START | (2 << 6) | MI_BATCH_NON_SECURE_I965);
OUT_RING(batch->start);
} else {
@@ -500,7 +499,7 @@ static int i915_dispatch_batchbuffer(struct drm_device * dev,
}
- if (IS_G4X(dev) || IS_IRONLAKE(dev)) {
+ if (IS_G4X(dev) || IS_GEN5(dev)) {
BEGIN_LP_RING(2);
OUT_RING(MI_FLUSH | MI_NO_WRITE_FLUSH | MI_INVALIDATE_ISP);
OUT_RING(MI_NOOP);
@@ -765,6 +764,12 @@ static int i915_getparam(struct drm_device *dev, void *data,
case I915_PARAM_HAS_BSD:
value = HAS_BSD(dev);
break;
+ case I915_PARAM_HAS_BLT:
+ value = HAS_BLT(dev);
+ break;
+ case I915_PARAM_HAS_COHERENT_RINGS:
+ value = 1;
+ break;
default:
DRM_DEBUG_DRIVER("Unknown parameter %d\n",
param->param);
@@ -888,12 +893,12 @@ static int
intel_alloc_mchbar_resource(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
- int reg = IS_I965G(dev) ? MCHBAR_I965 : MCHBAR_I915;
+ int reg = INTEL_INFO(dev)->gen >= 4 ? MCHBAR_I965 : MCHBAR_I915;
u32 temp_lo, temp_hi = 0;
u64 mchbar_addr;
int ret;
- if (IS_I965G(dev))
+ if (INTEL_INFO(dev)->gen >= 4)
pci_read_config_dword(dev_priv->bridge_dev, reg + 4, &temp_hi);
pci_read_config_dword(dev_priv->bridge_dev, reg, &temp_lo);
mchbar_addr = ((u64)temp_hi << 32) | temp_lo;
@@ -920,7 +925,7 @@ intel_alloc_mchbar_resource(struct drm_device *dev)
return ret;
}
- if (IS_I965G(dev))
+ if (INTEL_INFO(dev)->gen >= 4)
pci_write_config_dword(dev_priv->bridge_dev, reg + 4,
upper_32_bits(dev_priv->mch_res.start));
@@ -934,7 +939,7 @@ static void
intel_setup_mchbar(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
- int mchbar_reg = IS_I965G(dev) ? MCHBAR_I965 : MCHBAR_I915;
+ int mchbar_reg = INTEL_INFO(dev)->gen >= 4 ? MCHBAR_I965 : MCHBAR_I915;
u32 temp;
bool enabled;
@@ -971,7 +976,7 @@ static void
intel_teardown_mchbar(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
- int mchbar_reg = IS_I965G(dev) ? MCHBAR_I965 : MCHBAR_I915;
+ int mchbar_reg = INTEL_INFO(dev)->gen >= 4 ? MCHBAR_I965 : MCHBAR_I915;
u32 temp;
if (dev_priv->mchbar_need_disable) {
@@ -990,174 +995,6 @@ intel_teardown_mchbar(struct drm_device *dev)
release_resource(&dev_priv->mch_res);
}
-/**
- * i915_probe_agp - get AGP bootup configuration
- * @pdev: PCI device
- * @aperture_size: returns AGP aperture configured size
- * @preallocated_size: returns size of BIOS preallocated AGP space
- *
- * Since Intel integrated graphics are UMA, the BIOS has to set aside
- * some RAM for the framebuffer at early boot. This code figures out
- * how much was set aside so we can use it for our own purposes.
- */
-static int i915_probe_agp(struct drm_device *dev, uint32_t *aperture_size,
- uint32_t *preallocated_size,
- uint32_t *start)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- u16 tmp = 0;
- unsigned long overhead;
- unsigned long stolen;
-
- /* Get the fb aperture size and "stolen" memory amount. */
- pci_read_config_word(dev_priv->bridge_dev, INTEL_GMCH_CTRL, &tmp);
-
- *aperture_size = 1024 * 1024;
- *preallocated_size = 1024 * 1024;
-
- switch (dev->pdev->device) {
- case PCI_DEVICE_ID_INTEL_82830_CGC:
- case PCI_DEVICE_ID_INTEL_82845G_IG:
- case PCI_DEVICE_ID_INTEL_82855GM_IG:
- case PCI_DEVICE_ID_INTEL_82865_IG:
- if ((tmp & INTEL_GMCH_MEM_MASK) == INTEL_GMCH_MEM_64M)
- *aperture_size *= 64;
- else
- *aperture_size *= 128;
- break;
- default:
- /* 9xx supports large sizes, just look at the length */
- *aperture_size = pci_resource_len(dev->pdev, 2);
- break;
- }
-
- /*
- * Some of the preallocated space is taken by the GTT
- * and popup. GTT is 1K per MB of aperture size, and popup is 4K.
- */
- if (IS_G4X(dev) || IS_PINEVIEW(dev) || IS_IRONLAKE(dev) || IS_GEN6(dev))
- overhead = 4096;
- else
- overhead = (*aperture_size / 1024) + 4096;
-
- if (IS_GEN6(dev)) {
- /* SNB has memory control reg at 0x50.w */
- pci_read_config_word(dev->pdev, SNB_GMCH_CTRL, &tmp);
-
- switch (tmp & SNB_GMCH_GMS_STOLEN_MASK) {
- case INTEL_855_GMCH_GMS_DISABLED:
- DRM_ERROR("video memory is disabled\n");
- return -1;
- case SNB_GMCH_GMS_STOLEN_32M:
- stolen = 32 * 1024 * 1024;
- break;
- case SNB_GMCH_GMS_STOLEN_64M:
- stolen = 64 * 1024 * 1024;
- break;
- case SNB_GMCH_GMS_STOLEN_96M:
- stolen = 96 * 1024 * 1024;
- break;
- case SNB_GMCH_GMS_STOLEN_128M:
- stolen = 128 * 1024 * 1024;
- break;
- case SNB_GMCH_GMS_STOLEN_160M:
- stolen = 160 * 1024 * 1024;
- break;
- case SNB_GMCH_GMS_STOLEN_192M:
- stolen = 192 * 1024 * 1024;
- break;
- case SNB_GMCH_GMS_STOLEN_224M:
- stolen = 224 * 1024 * 1024;
- break;
- case SNB_GMCH_GMS_STOLEN_256M:
- stolen = 256 * 1024 * 1024;
- break;
- case SNB_GMCH_GMS_STOLEN_288M:
- stolen = 288 * 1024 * 1024;
- break;
- case SNB_GMCH_GMS_STOLEN_320M:
- stolen = 320 * 1024 * 1024;
- break;
- case SNB_GMCH_GMS_STOLEN_352M:
- stolen = 352 * 1024 * 1024;
- break;
- case SNB_GMCH_GMS_STOLEN_384M:
- stolen = 384 * 1024 * 1024;
- break;
- case SNB_GMCH_GMS_STOLEN_416M:
- stolen = 416 * 1024 * 1024;
- break;
- case SNB_GMCH_GMS_STOLEN_448M:
- stolen = 448 * 1024 * 1024;
- break;
- case SNB_GMCH_GMS_STOLEN_480M:
- stolen = 480 * 1024 * 1024;
- break;
- case SNB_GMCH_GMS_STOLEN_512M:
- stolen = 512 * 1024 * 1024;
- break;
- default:
- DRM_ERROR("unexpected GMCH_GMS value: 0x%02x\n",
- tmp & SNB_GMCH_GMS_STOLEN_MASK);
- return -1;
- }
- } else {
- switch (tmp & INTEL_GMCH_GMS_MASK) {
- case INTEL_855_GMCH_GMS_DISABLED:
- DRM_ERROR("video memory is disabled\n");
- return -1;
- case INTEL_855_GMCH_GMS_STOLEN_1M:
- stolen = 1 * 1024 * 1024;
- break;
- case INTEL_855_GMCH_GMS_STOLEN_4M:
- stolen = 4 * 1024 * 1024;
- break;
- case INTEL_855_GMCH_GMS_STOLEN_8M:
- stolen = 8 * 1024 * 1024;
- break;
- case INTEL_855_GMCH_GMS_STOLEN_16M:
- stolen = 16 * 1024 * 1024;
- break;
- case INTEL_855_GMCH_GMS_STOLEN_32M:
- stolen = 32 * 1024 * 1024;
- break;
- case INTEL_915G_GMCH_GMS_STOLEN_48M:
- stolen = 48 * 1024 * 1024;
- break;
- case INTEL_915G_GMCH_GMS_STOLEN_64M:
- stolen = 64 * 1024 * 1024;
- break;
- case INTEL_GMCH_GMS_STOLEN_128M:
- stolen = 128 * 1024 * 1024;
- break;
- case INTEL_GMCH_GMS_STOLEN_256M:
- stolen = 256 * 1024 * 1024;
- break;
- case INTEL_GMCH_GMS_STOLEN_96M:
- stolen = 96 * 1024 * 1024;
- break;
- case INTEL_GMCH_GMS_STOLEN_160M:
- stolen = 160 * 1024 * 1024;
- break;
- case INTEL_GMCH_GMS_STOLEN_224M:
- stolen = 224 * 1024 * 1024;
- break;
- case INTEL_GMCH_GMS_STOLEN_352M:
- stolen = 352 * 1024 * 1024;
- break;
- default:
- DRM_ERROR("unexpected GMCH_GMS value: 0x%02x\n",
- tmp & INTEL_GMCH_GMS_MASK);
- return -1;
- }
- }
-
- *preallocated_size = stolen - overhead;
- *start = overhead;
-
- return 0;
-}
-
#define PTE_ADDRESS_MASK 0xfffff000
#define PTE_ADDRESS_MASK_HIGH 0x000000f0 /* i915+ */
#define PTE_MAPPING_TYPE_UNCACHED (0 << 1)
@@ -1181,11 +1018,11 @@ static unsigned long i915_gtt_to_phys(struct drm_device *dev,
{
unsigned long *gtt;
unsigned long entry, phys;
- int gtt_bar = IS_I9XX(dev) ? 0 : 1;
+ int gtt_bar = IS_GEN2(dev) ? 1 : 0;
int gtt_offset, gtt_size;
- if (IS_I965G(dev)) {
- if (IS_G4X(dev) || IS_IRONLAKE(dev) || IS_GEN6(dev)) {
+ if (INTEL_INFO(dev)->gen >= 4) {
+ if (IS_G4X(dev) || INTEL_INFO(dev)->gen > 4) {
gtt_offset = 2*1024*1024;
gtt_size = 2*1024*1024;
} else {
@@ -1210,10 +1047,8 @@ static unsigned long i915_gtt_to_phys(struct drm_device *dev,
DRM_DEBUG_DRIVER("GTT addr: 0x%08lx, PTE: 0x%08lx\n", gtt_addr, entry);
/* Mask out these reserved bits on this hardware. */
- if (!IS_I9XX(dev) || IS_I915G(dev) || IS_I915GM(dev) ||
- IS_I945G(dev) || IS_I945GM(dev)) {
+ if (INTEL_INFO(dev)->gen < 4 && !IS_G33(dev))
entry &= ~PTE_ADDRESS_MASK_HIGH;
- }
/* If it's not a mapping type we know, then bail. */
if ((entry & PTE_MAPPING_TYPE_MASK) != PTE_MAPPING_TYPE_UNCACHED &&
@@ -1252,7 +1087,7 @@ static void i915_setup_compression(struct drm_device *dev, int size)
unsigned long ll_base = 0;
/* Leave 1M for line length buffer & misc. */
- compressed_fb = drm_mm_search_free(&dev_priv->vram, size, 4096, 0);
+ compressed_fb = drm_mm_search_free(&dev_priv->mm.vram, size, 4096, 0);
if (!compressed_fb) {
dev_priv->no_fbc_reason = FBC_STOLEN_TOO_SMALL;
i915_warn_stolen(dev);
@@ -1273,7 +1108,7 @@ static void i915_setup_compression(struct drm_device *dev, int size)
}
if (!(IS_GM45(dev) || IS_IRONLAKE_M(dev))) {
- compressed_llb = drm_mm_search_free(&dev_priv->vram, 4096,
+ compressed_llb = drm_mm_search_free(&dev_priv->mm.vram, 4096,
4096, 0);
if (!compressed_llb) {
i915_warn_stolen(dev);
@@ -1343,10 +1178,8 @@ static void i915_switcheroo_set_state(struct pci_dev *pdev, enum vga_switcheroo_
/* i915 resume handler doesn't set to D0 */
pci_set_power_state(dev->pdev, PCI_D0);
i915_resume(dev);
- drm_kms_helper_poll_enable(dev);
} else {
printk(KERN_ERR "i915: switched off\n");
- drm_kms_helper_poll_disable(dev);
i915_suspend(dev, pmm);
}
}
@@ -1363,23 +1196,14 @@ static bool i915_switcheroo_can_switch(struct pci_dev *pdev)
}
static int i915_load_modeset_init(struct drm_device *dev,
- unsigned long prealloc_start,
unsigned long prealloc_size,
unsigned long agp_size)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- int fb_bar = IS_I9XX(dev) ? 2 : 0;
int ret = 0;
- dev->mode_config.fb_base = pci_resource_start(dev->pdev, fb_bar) &
- 0xff000000;
-
- /* Basic memrange allocator for stolen space (aka vram) */
- drm_mm_init(&dev_priv->vram, 0, prealloc_size);
- DRM_INFO("set up %ldM of stolen space\n", prealloc_size / (1024*1024));
-
- /* We're off and running w/KMS */
- dev_priv->mm.suspended = 0;
+ /* Basic memrange allocator for stolen space (aka mm.vram) */
+ drm_mm_init(&dev_priv->mm.vram, 0, prealloc_size);
/* Let GEM Manage from end of prealloc space to end of aperture.
*
@@ -1414,7 +1238,7 @@ static int i915_load_modeset_init(struct drm_device *dev,
*/
dev_priv->allow_batchbuffer = 1;
- ret = intel_init_bios(dev);
+ ret = intel_parse_bios(dev);
if (ret)
DRM_INFO("failed to find VBIOS tables\n");
@@ -1423,6 +1247,8 @@ static int i915_load_modeset_init(struct drm_device *dev,
if (ret)
goto cleanup_ringbuffer;
+ intel_register_dsm_handler();
+
ret = vga_switcheroo_register_client(dev->pdev,
i915_switcheroo_set_state,
i915_switcheroo_can_switch);
@@ -1443,17 +1269,15 @@ static int i915_load_modeset_init(struct drm_device *dev,
/* FIXME: do pre/post-mode set stuff in core KMS code */
dev->vblank_disable_allowed = 1;
- /*
- * Initialize the hardware status page IRQ location.
- */
-
- I915_WRITE(INSTPM, (1 << 5) | (1 << 21));
-
ret = intel_fbdev_init(dev);
if (ret)
goto cleanup_irq;
drm_kms_helper_poll_init(dev);
+
+ /* We're off and running w/KMS */
+ dev_priv->mm.suspended = 0;
+
return 0;
cleanup_irq:
@@ -1907,7 +1731,7 @@ static struct drm_i915_private *i915_mch_dev;
* - dev_priv->fmax
* - dev_priv->gpu_busy
*/
-DEFINE_SPINLOCK(mchdev_lock);
+static DEFINE_SPINLOCK(mchdev_lock);
/**
* i915_read_mch_val - return value for IPS use
@@ -2062,7 +1886,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
struct drm_i915_private *dev_priv;
resource_size_t base, size;
int ret = 0, mmio_bar;
- uint32_t agp_size, prealloc_size, prealloc_start;
+ uint32_t agp_size, prealloc_size;
/* i915 has 4 more counters */
dev->counters += 4;
dev->types[6] = _DRM_STAT_IRQ;
@@ -2079,7 +1903,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
dev_priv->info = (struct intel_device_info *) flags;
/* Add register map (needed for suspend/resume) */
- mmio_bar = IS_I9XX(dev) ? 0 : 1;
+ mmio_bar = IS_GEN2(dev) ? 1 : 0;
base = pci_resource_start(dev->pdev, mmio_bar);
size = pci_resource_len(dev->pdev, mmio_bar);
@@ -2121,17 +1945,32 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
"performance may suffer.\n");
}
- ret = i915_probe_agp(dev, &agp_size, &prealloc_size, &prealloc_start);
- if (ret)
+ dev_priv->mm.gtt = intel_gtt_get();
+ if (!dev_priv->mm.gtt) {
+ DRM_ERROR("Failed to initialize GTT\n");
+ ret = -ENODEV;
goto out_iomapfree;
-
- if (prealloc_size > intel_max_stolen) {
- DRM_INFO("detected %dM stolen memory, trimming to %dM\n",
- prealloc_size >> 20, intel_max_stolen >> 20);
- prealloc_size = intel_max_stolen;
}
- dev_priv->wq = create_singlethread_workqueue("i915");
+ prealloc_size = dev_priv->mm.gtt->gtt_stolen_entries << PAGE_SHIFT;
+ agp_size = dev_priv->mm.gtt->gtt_mappable_entries << PAGE_SHIFT;
+
+ /* The i915 workqueue is primarily used for batched retirement of
+ * requests (and thus managing bo) once the task has been completed
+ * by the GPU. i915_gem_retire_requests() is called directly when we
+ * need high-priority retirement, such as waiting for an explicit
+ * bo.
+ *
+ * It is also used for periodic low-priority events, such as
+ * idle-timers and hangcheck.
+ *
+ * All tasks on the workqueue are expected to acquire the dev mutex
+ * so there is no point in running more than one instance of the
+ * workqueue at any time: max_active = 1 and NON_REENTRANT.
+ */
+ dev_priv->wq = alloc_workqueue("i915",
+ WQ_UNBOUND | WQ_NON_REENTRANT,
+ 1);
if (dev_priv->wq == NULL) {
DRM_ERROR("Failed to create our workqueue.\n");
ret = -ENOMEM;
@@ -2159,13 +1998,18 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
dev->driver->get_vblank_counter = i915_get_vblank_counter;
dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */
- if (IS_G4X(dev) || IS_IRONLAKE(dev) || IS_GEN6(dev)) {
+ if (IS_G4X(dev) || IS_GEN5(dev) || IS_GEN6(dev)) {
dev->max_vblank_count = 0xffffffff; /* full 32 bit counter */
dev->driver->get_vblank_counter = gm45_get_vblank_counter;
}
/* Try to make sure MCHBAR is enabled before poking at it */
intel_setup_mchbar(dev);
+ intel_setup_gmbus(dev);
+ intel_opregion_setup(dev);
+
+ /* Make sure the bios did its job and set up vital registers */
+ intel_setup_bios(dev);
i915_gem_load(dev);
@@ -2178,7 +2022,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
if (IS_PINEVIEW(dev))
i915_pineview_get_mem_freq(dev);
- else if (IS_IRONLAKE(dev))
+ else if (IS_GEN5(dev))
i915_ironlake_get_mem_freq(dev);
/* On the 945G/GM, the chipset reports the MSI capability on the
@@ -2212,8 +2056,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
intel_detect_pch(dev);
if (drm_core_check_feature(dev, DRIVER_MODESET)) {
- ret = i915_load_modeset_init(dev, prealloc_start,
- prealloc_size, agp_size);
+ ret = i915_load_modeset_init(dev, prealloc_size, agp_size);
if (ret < 0) {
DRM_ERROR("failed to init modeset\n");
goto out_workqueue_free;
@@ -2221,7 +2064,8 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
}
/* Must be done after probing outputs */
- intel_opregion_init(dev, 0);
+ intel_opregion_init(dev);
+ acpi_video_register();
setup_timer(&dev_priv->hangcheck_timer, i915_hangcheck_elapsed,
(unsigned long) dev);
@@ -2231,9 +2075,6 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
dev_priv->mchdev_lock = &mchdev_lock;
spin_unlock(&mchdev_lock);
- /* XXX Prevent module unload due to memory corruption bugs. */
- __module_get(THIS_MODULE);
-
return 0;
out_workqueue_free:
@@ -2252,15 +2093,20 @@ free_priv:
int i915_driver_unload(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
-
- i915_destroy_error_state(dev);
+ int ret;
spin_lock(&mchdev_lock);
i915_mch_dev = NULL;
spin_unlock(&mchdev_lock);
- destroy_workqueue(dev_priv->wq);
- del_timer_sync(&dev_priv->hangcheck_timer);
+ mutex_lock(&dev->struct_mutex);
+ ret = i915_gpu_idle(dev);
+ if (ret)
+ DRM_ERROR("failed to idle hardware: %d\n", ret);
+ mutex_unlock(&dev->struct_mutex);
+
+ /* Cancel the retire work handler, which should be idle now. */
+ cancel_delayed_work_sync(&dev_priv->mm.retire_work);
io_mapping_free(dev_priv->mm.gtt_mapping);
if (dev_priv->mm.gtt_mtrr >= 0) {
@@ -2269,7 +2115,10 @@ int i915_driver_unload(struct drm_device *dev)
dev_priv->mm.gtt_mtrr = -1;
}
+ acpi_video_unregister();
+
if (drm_core_check_feature(dev, DRIVER_MODESET)) {
+ intel_fbdev_fini(dev);
intel_modeset_cleanup(dev);
/*
@@ -2281,20 +2130,25 @@ int i915_driver_unload(struct drm_device *dev)
dev_priv->child_dev = NULL;
dev_priv->child_dev_num = 0;
}
- drm_irq_uninstall(dev);
+
vga_switcheroo_unregister_client(dev->pdev);
vga_client_register(dev->pdev, NULL, NULL, NULL);
}
+ /* Free error state after interrupts are fully disabled. */
+ del_timer_sync(&dev_priv->hangcheck_timer);
+ cancel_work_sync(&dev_priv->error_work);
+ i915_destroy_error_state(dev);
+
if (dev->pdev->msi_enabled)
pci_disable_msi(dev->pdev);
- if (dev_priv->regs != NULL)
- iounmap(dev_priv->regs);
-
- intel_opregion_free(dev, 0);
+ intel_opregion_fini(dev);
if (drm_core_check_feature(dev, DRIVER_MODESET)) {
+ /* Flush any outstanding unpin_work. */
+ flush_workqueue(dev_priv->wq);
+
i915_gem_free_all_phys_object(dev);
mutex_lock(&dev->struct_mutex);
@@ -2302,34 +2156,41 @@ int i915_driver_unload(struct drm_device *dev)
mutex_unlock(&dev->struct_mutex);
if (I915_HAS_FBC(dev) && i915_powersave)
i915_cleanup_compression(dev);
- drm_mm_takedown(&dev_priv->vram);
- i915_gem_lastclose(dev);
+ drm_mm_takedown(&dev_priv->mm.vram);
intel_cleanup_overlay(dev);
+
+ if (!I915_NEED_GFX_HWS(dev))
+ i915_free_hws(dev);
}
+ if (dev_priv->regs != NULL)
+ iounmap(dev_priv->regs);
+
+ intel_teardown_gmbus(dev);
intel_teardown_mchbar(dev);
+ destroy_workqueue(dev_priv->wq);
+
pci_dev_put(dev_priv->bridge_dev);
kfree(dev->dev_private);
return 0;
}
-int i915_driver_open(struct drm_device *dev, struct drm_file *file_priv)
+int i915_driver_open(struct drm_device *dev, struct drm_file *file)
{
- struct drm_i915_file_private *i915_file_priv;
+ struct drm_i915_file_private *file_priv;
DRM_DEBUG_DRIVER("\n");
- i915_file_priv = (struct drm_i915_file_private *)
- kmalloc(sizeof(*i915_file_priv), GFP_KERNEL);
-
- if (!i915_file_priv)
+ file_priv = kmalloc(sizeof(*file_priv), GFP_KERNEL);
+ if (!file_priv)
return -ENOMEM;
- file_priv->driver_priv = i915_file_priv;
+ file->driver_priv = file_priv;
- INIT_LIST_HEAD(&i915_file_priv->mm.request_list);
+ spin_lock_init(&file_priv->mm.lock);
+ INIT_LIST_HEAD(&file_priv->mm.request_list);
return 0;
}
@@ -2372,11 +2233,11 @@ void i915_driver_preclose(struct drm_device * dev, struct drm_file *file_priv)
i915_mem_release(dev, file_priv, dev_priv->agp_heap);
}
-void i915_driver_postclose(struct drm_device *dev, struct drm_file *file_priv)
+void i915_driver_postclose(struct drm_device *dev, struct drm_file *file)
{
- struct drm_i915_file_private *i915_file_priv = file_priv->driver_priv;
+ struct drm_i915_file_private *file_priv = file->driver_priv;
- kfree(i915_file_priv);
+ kfree(file_priv);
}
struct drm_ioctl_desc i915_ioctls[] = {
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 895ab896e336..f737960712e6 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -32,6 +32,7 @@
#include "drm.h"
#include "i915_drm.h"
#include "i915_drv.h"
+#include "intel_drv.h"
#include <linux/console.h>
#include "drm_crtc_helper.h"
@@ -43,7 +44,7 @@ unsigned int i915_fbpercrtc = 0;
module_param_named(fbpercrtc, i915_fbpercrtc, int, 0400);
unsigned int i915_powersave = 1;
-module_param_named(powersave, i915_powersave, int, 0400);
+module_param_named(powersave, i915_powersave, int, 0600);
unsigned int i915_lvds_downclock = 0;
module_param_named(lvds_downclock, i915_lvds_downclock, int, 0400);
@@ -61,86 +62,111 @@ extern int intel_agp_enabled;
.driver_data = (unsigned long) info }
static const struct intel_device_info intel_i830_info = {
- .gen = 2, .is_i8xx = 1, .is_mobile = 1, .cursor_needs_physical = 1,
+ .gen = 2, .is_mobile = 1, .cursor_needs_physical = 1,
+ .has_overlay = 1, .overlay_needs_physical = 1,
};
static const struct intel_device_info intel_845g_info = {
- .gen = 2, .is_i8xx = 1,
+ .gen = 2,
+ .has_overlay = 1, .overlay_needs_physical = 1,
};
static const struct intel_device_info intel_i85x_info = {
- .gen = 2, .is_i8xx = 1, .is_i85x = 1, .is_mobile = 1,
+ .gen = 2, .is_i85x = 1, .is_mobile = 1,
.cursor_needs_physical = 1,
+ .has_overlay = 1, .overlay_needs_physical = 1,
};
static const struct intel_device_info intel_i865g_info = {
- .gen = 2, .is_i8xx = 1,
+ .gen = 2,
+ .has_overlay = 1, .overlay_needs_physical = 1,
};
static const struct intel_device_info intel_i915g_info = {
- .gen = 3, .is_i915g = 1, .is_i9xx = 1, .cursor_needs_physical = 1,
+ .gen = 3, .is_i915g = 1, .cursor_needs_physical = 1,
+ .has_overlay = 1, .overlay_needs_physical = 1,
};
static const struct intel_device_info intel_i915gm_info = {
- .gen = 3, .is_i9xx = 1, .is_mobile = 1,
+ .gen = 3, .is_mobile = 1,
.cursor_needs_physical = 1,
+ .has_overlay = 1, .overlay_needs_physical = 1,
+ .supports_tv = 1,
};
static const struct intel_device_info intel_i945g_info = {
- .gen = 3, .is_i9xx = 1, .has_hotplug = 1, .cursor_needs_physical = 1,
+ .gen = 3, .has_hotplug = 1, .cursor_needs_physical = 1,
+ .has_overlay = 1, .overlay_needs_physical = 1,
};
static const struct intel_device_info intel_i945gm_info = {
- .gen = 3, .is_i945gm = 1, .is_i9xx = 1, .is_mobile = 1,
+ .gen = 3, .is_i945gm = 1, .is_mobile = 1,
.has_hotplug = 1, .cursor_needs_physical = 1,
+ .has_overlay = 1, .overlay_needs_physical = 1,
+ .supports_tv = 1,
};
static const struct intel_device_info intel_i965g_info = {
- .gen = 4, .is_broadwater = 1, .is_i965g = 1, .is_i9xx = 1,
+ .gen = 4, .is_broadwater = 1,
.has_hotplug = 1,
+ .has_overlay = 1,
};
static const struct intel_device_info intel_i965gm_info = {
- .gen = 4, .is_crestline = 1, .is_i965g = 1, .is_i965gm = 1, .is_i9xx = 1,
+ .gen = 4, .is_crestline = 1,
.is_mobile = 1, .has_fbc = 1, .has_rc6 = 1, .has_hotplug = 1,
+ .has_overlay = 1,
+ .supports_tv = 1,
};
static const struct intel_device_info intel_g33_info = {
- .gen = 3, .is_g33 = 1, .is_i9xx = 1,
+ .gen = 3, .is_g33 = 1,
.need_gfx_hws = 1, .has_hotplug = 1,
+ .has_overlay = 1,
};
static const struct intel_device_info intel_g45_info = {
- .gen = 4, .is_i965g = 1, .is_g4x = 1, .is_i9xx = 1, .need_gfx_hws = 1,
+ .gen = 4, .is_g4x = 1, .need_gfx_hws = 1,
.has_pipe_cxsr = 1, .has_hotplug = 1,
+ .has_bsd_ring = 1,
};
static const struct intel_device_info intel_gm45_info = {
- .gen = 4, .is_i965g = 1, .is_g4x = 1, .is_i9xx = 1,
+ .gen = 4, .is_g4x = 1,
.is_mobile = 1, .need_gfx_hws = 1, .has_fbc = 1, .has_rc6 = 1,
.has_pipe_cxsr = 1, .has_hotplug = 1,
+ .supports_tv = 1,
+ .has_bsd_ring = 1,
};
static const struct intel_device_info intel_pineview_info = {
- .gen = 3, .is_g33 = 1, .is_pineview = 1, .is_mobile = 1, .is_i9xx = 1,
+ .gen = 3, .is_g33 = 1, .is_pineview = 1, .is_mobile = 1,
.need_gfx_hws = 1, .has_hotplug = 1,
+ .has_overlay = 1,
};
static const struct intel_device_info intel_ironlake_d_info = {
- .gen = 5, .is_ironlake = 1, .is_i965g = 1, .is_i9xx = 1,
+ .gen = 5,
.need_gfx_hws = 1, .has_pipe_cxsr = 1, .has_hotplug = 1,
+ .has_bsd_ring = 1,
};
static const struct intel_device_info intel_ironlake_m_info = {
- .gen = 5, .is_ironlake = 1, .is_mobile = 1, .is_i965g = 1, .is_i9xx = 1,
- .need_gfx_hws = 1, .has_fbc = 1, .has_rc6 = 1, .has_hotplug = 1,
+ .gen = 5, .is_mobile = 1,
+ .need_gfx_hws = 1, .has_rc6 = 1, .has_hotplug = 1,
+ .has_fbc = 0, /* disabled due to buggy hardware */
+ .has_bsd_ring = 1,
};
static const struct intel_device_info intel_sandybridge_d_info = {
- .gen = 6, .is_i965g = 1, .is_i9xx = 1,
+ .gen = 6,
.need_gfx_hws = 1, .has_hotplug = 1,
+ .has_bsd_ring = 1,
+ .has_blt_ring = 1,
};
static const struct intel_device_info intel_sandybridge_m_info = {
- .gen = 6, .is_i965g = 1, .is_mobile = 1, .is_i9xx = 1,
+ .gen = 6, .is_mobile = 1,
.need_gfx_hws = 1, .has_hotplug = 1,
+ .has_bsd_ring = 1,
+ .has_blt_ring = 1,
};
static const struct pci_device_id pciidlist[] = { /* aka */
@@ -237,7 +263,7 @@ static int i915_drm_freeze(struct drm_device *dev)
i915_save_state(dev);
- intel_opregion_free(dev, 1);
+ intel_opregion_fini(dev);
/* Modeset on resume, not lid events */
dev_priv->modeset_on_lid = 0;
@@ -258,6 +284,8 @@ int i915_suspend(struct drm_device *dev, pm_message_t state)
if (state.event == PM_EVENT_PRETHAW)
return 0;
+ drm_kms_helper_poll_disable(dev);
+
error = i915_drm_freeze(dev);
if (error)
return error;
@@ -277,8 +305,7 @@ static int i915_drm_thaw(struct drm_device *dev)
int error = 0;
i915_restore_state(dev);
-
- intel_opregion_init(dev, 1);
+ intel_opregion_setup(dev);
/* KMS EnterVT equivalent */
if (drm_core_check_feature(dev, DRIVER_MODESET)) {
@@ -294,6 +321,8 @@ static int i915_drm_thaw(struct drm_device *dev)
drm_helper_resume_force_mode(dev);
}
+ intel_opregion_init(dev);
+
dev_priv->modeset_on_lid = 0;
return error;
@@ -301,12 +330,79 @@ static int i915_drm_thaw(struct drm_device *dev)
int i915_resume(struct drm_device *dev)
{
+ int ret;
+
if (pci_enable_device(dev->pdev))
return -EIO;
pci_set_master(dev->pdev);
- return i915_drm_thaw(dev);
+ ret = i915_drm_thaw(dev);
+ if (ret)
+ return ret;
+
+ drm_kms_helper_poll_enable(dev);
+ return 0;
+}
+
+static int i8xx_do_reset(struct drm_device *dev, u8 flags)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ if (IS_I85X(dev))
+ return -ENODEV;
+
+ I915_WRITE(D_STATE, I915_READ(D_STATE) | DSTATE_GFX_RESET_I830);
+ POSTING_READ(D_STATE);
+
+ if (IS_I830(dev) || IS_845G(dev)) {
+ I915_WRITE(DEBUG_RESET_I830,
+ DEBUG_RESET_DISPLAY |
+ DEBUG_RESET_RENDER |
+ DEBUG_RESET_FULL);
+ POSTING_READ(DEBUG_RESET_I830);
+ msleep(1);
+
+ I915_WRITE(DEBUG_RESET_I830, 0);
+ POSTING_READ(DEBUG_RESET_I830);
+ }
+
+ msleep(1);
+
+ I915_WRITE(D_STATE, I915_READ(D_STATE) & ~DSTATE_GFX_RESET_I830);
+ POSTING_READ(D_STATE);
+
+ return 0;
+}
+
+static int i965_reset_complete(struct drm_device *dev)
+{
+ u8 gdrst;
+ pci_read_config_byte(dev->pdev, I965_GDRST, &gdrst);
+ return gdrst & 0x1;
+}
+
+static int i965_do_reset(struct drm_device *dev, u8 flags)
+{
+ u8 gdrst;
+
+ /*
+ * Set the domains we want to reset (GRDOM/bits 2 and 3) as
+ * well as the reset bit (GR/bit 0). Setting the GR bit
+ * triggers the reset; when done, the hardware will clear it.
+ */
+ pci_read_config_byte(dev->pdev, I965_GDRST, &gdrst);
+ pci_write_config_byte(dev->pdev, I965_GDRST, gdrst | flags | 0x1);
+
+ return wait_for(i965_reset_complete(dev), 500);
+}
+
+static int ironlake_do_reset(struct drm_device *dev, u8 flags)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 gdrst = I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR);
+ I915_WRITE(MCHBAR_MIRROR_BASE + ILK_GDSR, gdrst | flags | 0x1);
+ return wait_for(I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR) & 0x1, 500);
}
/**
@@ -325,54 +421,39 @@ int i915_resume(struct drm_device *dev)
* - re-init interrupt state
* - re-init display
*/
-int i965_reset(struct drm_device *dev, u8 flags)
+int i915_reset(struct drm_device *dev, u8 flags)
{
drm_i915_private_t *dev_priv = dev->dev_private;
- unsigned long timeout;
- u8 gdrst;
/*
* We really should only reset the display subsystem if we actually
* need to
*/
bool need_display = true;
+ int ret;
mutex_lock(&dev->struct_mutex);
- /*
- * Clear request list
- */
- i915_gem_retire_requests(dev);
-
- if (need_display)
- i915_save_display(dev);
-
- if (IS_I965G(dev) || IS_G4X(dev)) {
- /*
- * Set the domains we want to reset, then the reset bit (bit 0).
- * Clear the reset bit after a while and wait for hardware status
- * bit (bit 1) to be set
- */
- pci_read_config_byte(dev->pdev, GDRST, &gdrst);
- pci_write_config_byte(dev->pdev, GDRST, gdrst | flags | ((flags == GDRST_FULL) ? 0x1 : 0x0));
- udelay(50);
- pci_write_config_byte(dev->pdev, GDRST, gdrst & 0xfe);
-
- /* ...we don't want to loop forever though, 500ms should be plenty */
- timeout = jiffies + msecs_to_jiffies(500);
- do {
- udelay(100);
- pci_read_config_byte(dev->pdev, GDRST, &gdrst);
- } while ((gdrst & 0x1) && time_after(timeout, jiffies));
-
- if (gdrst & 0x1) {
- WARN(true, "i915: Failed to reset chip\n");
- mutex_unlock(&dev->struct_mutex);
- return -EIO;
- }
- } else {
- DRM_ERROR("Error occurred. Don't know how to reset this chip.\n");
+ i915_gem_reset(dev);
+
+ ret = -ENODEV;
+ if (get_seconds() - dev_priv->last_gpu_reset < 5) {
+ DRM_ERROR("GPU hanging too fast, declaring wedged!\n");
+ } else switch (INTEL_INFO(dev)->gen) {
+ case 5:
+ ret = ironlake_do_reset(dev, flags);
+ break;
+ case 4:
+ ret = i965_do_reset(dev, flags);
+ break;
+ case 2:
+ ret = i8xx_do_reset(dev, flags);
+ break;
+ }
+ dev_priv->last_gpu_reset = get_seconds();
+ if (ret) {
+ DRM_ERROR("Failed to reset chip.\n");
mutex_unlock(&dev->struct_mutex);
- return -ENODEV;
+ return ret;
}
/* Ok, now get things going again... */
@@ -400,13 +481,19 @@ int i965_reset(struct drm_device *dev, u8 flags)
mutex_lock(&dev->struct_mutex);
}
+ mutex_unlock(&dev->struct_mutex);
+
/*
- * Display needs restore too...
+ * Perform a full modeset as on later generations, e.g. Ironlake, we may
+ * need to retrain the display link and cannot just restore the register
+ * values.
*/
- if (need_display)
- i915_restore_display(dev);
+ if (need_display) {
+ mutex_lock(&dev->mode_config.mutex);
+ drm_helper_resume_force_mode(dev);
+ mutex_unlock(&dev->mode_config.mutex);
+ }
- mutex_unlock(&dev->struct_mutex);
return 0;
}
@@ -524,8 +611,6 @@ static struct drm_driver driver = {
.irq_uninstall = i915_driver_irq_uninstall,
.irq_handler = i915_driver_irq_handler,
.reclaim_buffers = drm_core_reclaim_buffers,
- .get_map_ofs = drm_core_get_map_ofs,
- .get_reg_ofs = drm_core_get_reg_ofs,
.master_create = i915_master_create,
.master_destroy = i915_master_destroy,
#if defined(CONFIG_DEBUG_FS)
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index af4a263cf257..409826da3099 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -34,6 +34,8 @@
#include "intel_bios.h"
#include "intel_ringbuffer.h"
#include <linux/io-mapping.h>
+#include <linux/i2c.h>
+#include <drm/intel-gtt.h>
/* General customization:
*/
@@ -73,11 +75,9 @@ enum plane {
#define DRIVER_PATCHLEVEL 0
#define WATCH_COHERENCY 0
-#define WATCH_BUF 0
#define WATCH_EXEC 0
-#define WATCH_LRU 0
#define WATCH_RELOC 0
-#define WATCH_INACTIVE 0
+#define WATCH_LISTS 0
#define WATCH_PWRITE 0
#define I915_GEM_PHYS_CURSOR_0 1
@@ -110,8 +110,9 @@ struct intel_opregion {
struct opregion_acpi *acpi;
struct opregion_swsci *swsci;
struct opregion_asle *asle;
- int enabled;
+ void *vbt;
};
+#define OPREGION_SIZE (8*1024)
struct intel_overlay;
struct intel_overlay_error_state;
@@ -125,13 +126,16 @@ struct drm_i915_master_private {
struct drm_i915_fence_reg {
struct drm_gem_object *obj;
struct list_head lru_list;
+ bool gpu;
};
struct sdvo_device_mapping {
+ u8 initialized;
u8 dvo_port;
u8 slave_addr;
u8 dvo_wiring;
- u8 initialized;
+ u8 i2c_pin;
+ u8 i2c_speed;
u8 ddc_pin;
};
@@ -193,28 +197,29 @@ struct drm_i915_display_funcs {
struct intel_device_info {
u8 gen;
u8 is_mobile : 1;
- u8 is_i8xx : 1;
u8 is_i85x : 1;
u8 is_i915g : 1;
- u8 is_i9xx : 1;
u8 is_i945gm : 1;
- u8 is_i965g : 1;
- u8 is_i965gm : 1;
u8 is_g33 : 1;
u8 need_gfx_hws : 1;
u8 is_g4x : 1;
u8 is_pineview : 1;
u8 is_broadwater : 1;
u8 is_crestline : 1;
- u8 is_ironlake : 1;
u8 has_fbc : 1;
u8 has_rc6 : 1;
u8 has_pipe_cxsr : 1;
u8 has_hotplug : 1;
u8 cursor_needs_physical : 1;
+ u8 has_overlay : 1;
+ u8 overlay_needs_physical : 1;
+ u8 supports_tv : 1;
+ u8 has_bsd_ring : 1;
+ u8 has_blt_ring : 1;
};
enum no_fbc_reason {
+ FBC_NO_OUTPUT, /* no outputs enabled to compress */
FBC_STOLEN_TOO_SMALL, /* not enough space to hold compressed buffers */
FBC_UNSUPPORTED_MODE, /* interlace or doublescanned mode */
FBC_MODE_TOO_LARGE, /* mode too large for compression */
@@ -241,9 +246,16 @@ typedef struct drm_i915_private {
void __iomem *regs;
+ struct intel_gmbus {
+ struct i2c_adapter adapter;
+ struct i2c_adapter *force_bit;
+ u32 reg0;
+ } *gmbus;
+
struct pci_dev *bridge_dev;
struct intel_ring_buffer render_ring;
struct intel_ring_buffer bsd_ring;
+ struct intel_ring_buffer blt_ring;
uint32_t next_seqno;
drm_dma_handle_t *status_page_dmah;
@@ -263,6 +275,9 @@ typedef struct drm_i915_private {
int front_offset;
int current_page;
int page_flipping;
+#define I915_DEBUG_READ (1<<0)
+#define I915_DEBUG_WRITE (1<<1)
+ unsigned long debug_flags;
wait_queue_head_t irq_queue;
atomic_t irq_received;
@@ -289,24 +304,21 @@ typedef struct drm_i915_private {
unsigned int sr01, adpa, ppcr, dvob, dvoc, lvds;
int vblank_pipe;
int num_pipe;
- u32 flush_rings;
-#define FLUSH_RENDER_RING 0x1
-#define FLUSH_BSD_RING 0x2
/* For hangcheck timer */
-#define DRM_I915_HANGCHECK_PERIOD 75 /* in jiffies */
+#define DRM_I915_HANGCHECK_PERIOD 250 /* in ms */
struct timer_list hangcheck_timer;
int hangcheck_count;
uint32_t last_acthd;
uint32_t last_instdone;
uint32_t last_instdone1;
- struct drm_mm vram;
-
unsigned long cfb_size;
unsigned long cfb_pitch;
+ unsigned long cfb_offset;
int cfb_fence;
int cfb_plane;
+ int cfb_y;
int irq_enabled;
@@ -316,8 +328,7 @@ typedef struct drm_i915_private {
struct intel_overlay *overlay;
/* LVDS info */
- int backlight_duty_cycle; /* restore backlight to this value */
- bool panel_wants_dither;
+ int backlight_level; /* restore backlight to this value */
struct drm_display_mode *panel_fixed_mode;
struct drm_display_mode *lfp_lvds_vbt_mode; /* if any */
struct drm_display_mode *sdvo_lvds_vbt_mode; /* if any */
@@ -328,13 +339,23 @@ typedef struct drm_i915_private {
unsigned int lvds_vbt:1;
unsigned int int_crt_support:1;
unsigned int lvds_use_ssc:1;
- unsigned int edp_support:1;
int lvds_ssc_freq;
- int edp_bpp;
+ struct {
+ int rate;
+ int lanes;
+ int preemphasis;
+ int vswing;
+
+ bool initialized;
+ bool support;
+ int bpp;
+ struct edp_power_seq pps;
+ } edp;
+ bool no_aux_handshake;
struct notifier_block lid_notifier;
- int crt_ddc_bus; /* 0 = unknown, else GPIO to use for CRT DDC */
+ int crt_ddc_pin;
struct drm_i915_fence_reg fence_regs[16]; /* assume 965 */
int fence_reg_start; /* 4 if userland hasn't ioctl'd us yet */
int num_fence_regs; /* 8 on pre-965, 16 otherwise */
@@ -344,6 +365,7 @@ typedef struct drm_i915_private {
spinlock_t error_lock;
struct drm_i915_error_state *first_error;
struct work_struct error_work;
+ struct completion error_completion;
struct workqueue_struct *wq;
/* Display functions */
@@ -507,6 +529,11 @@ typedef struct drm_i915_private {
u32 saveMCHBAR_RENDER_STANDBY;
struct {
+ /** Bridge to intel-gtt-ko */
+ struct intel_gtt *gtt;
+ /** Memory allocator for GTT stolen memory */
+ struct drm_mm vram;
+ /** Memory allocator for GTT */
struct drm_mm gtt_space;
struct io_mapping *gtt_mapping;
@@ -521,7 +548,16 @@ typedef struct drm_i915_private {
*/
struct list_head shrink_list;
- spinlock_t active_list_lock;
+ /**
+ * List of objects currently involved in rendering.
+ *
+ * Includes buffers having the contents of their GPU caches
+ * flushed, not necessarily primitives. last_rendering_seqno
+ * represents when the rendering involved will be completed.
+ *
+ * A reference is held on the buffer while on this list.
+ */
+ struct list_head active_list;
/**
* List of objects which are not in the ringbuffer but which
@@ -535,15 +571,6 @@ typedef struct drm_i915_private {
struct list_head flushing_list;
/**
- * List of objects currently pending a GPU write flush.
- *
- * All elements on this list will belong to either the
- * active_list or flushing_list, last_rendering_seqno can
- * be used to differentiate between the two elements.
- */
- struct list_head gpu_write_list;
-
- /**
* LRU list of objects which are not in the ringbuffer and
* are ready to unbind, but are still in the GTT.
*
@@ -555,6 +582,12 @@ typedef struct drm_i915_private {
*/
struct list_head inactive_list;
+ /**
+ * LRU list of objects which are not in the ringbuffer but
+ * are still pinned in the GTT.
+ */
+ struct list_head pinned_list;
+
/** LRU list of objects with fence regs on them. */
struct list_head fence_list;
@@ -611,6 +644,17 @@ typedef struct drm_i915_private {
/* storage for physical objects */
struct drm_i915_gem_phys_object *phys_objs[I915_MAX_PHYS_OBJECT];
+
+ uint32_t flush_rings;
+
+ /* accounting, useful for userland debugging */
+ size_t object_memory;
+ size_t pin_memory;
+ size_t gtt_memory;
+ size_t gtt_total;
+ u32 object_count;
+ u32 pin_count;
+ u32 gtt_count;
} mm;
struct sdvo_device_mapping sdvo_mappings[2];
/* indicate whether the LVDS_BORDER should be enabled or not */
@@ -626,8 +670,6 @@ typedef struct drm_i915_private {
/* Reclocking support */
bool render_reclock_avail;
bool lvds_downclock_avail;
- /* indicate whether the LVDS EDID is OK */
- bool lvds_edid_good;
/* indicates the reduced downclock for LVDS*/
int lvds_downclock;
struct work_struct idle_work;
@@ -661,6 +703,8 @@ typedef struct drm_i915_private {
struct drm_mm_node *compressed_fb;
struct drm_mm_node *compressed_llb;
+ unsigned long last_gpu_reset;
+
/* list of fbdev register on this device */
struct intel_fbdev *fbdev;
} drm_i915_private_t;
@@ -673,7 +717,8 @@ struct drm_i915_gem_object {
struct drm_mm_node *gtt_space;
/** This object's place on the active/flushing/inactive lists */
- struct list_head list;
+ struct list_head ring_list;
+ struct list_head mm_list;
/** This object's place on GPU write list */
struct list_head gpu_write_list;
/** This object's place on eviction list */
@@ -816,12 +861,14 @@ struct drm_i915_gem_request {
/** global list entry for this request */
struct list_head list;
+ struct drm_i915_file_private *file_priv;
/** file_priv list entry for this request */
struct list_head client_list;
};
struct drm_i915_file_private {
struct {
+ struct spinlock lock;
struct list_head request_list;
} mm;
};
@@ -862,7 +909,7 @@ extern long i915_compat_ioctl(struct file *filp, unsigned int cmd,
extern int i915_emit_box(struct drm_device *dev,
struct drm_clip_rect *boxes,
int i, int DR1, int DR4);
-extern int i965_reset(struct drm_device *dev, u8 flags);
+extern int i915_reset(struct drm_device *dev, u8 flags);
extern unsigned long i915_chipset_val(struct drm_i915_private *dev_priv);
extern unsigned long i915_mch_val(struct drm_i915_private *dev_priv);
extern unsigned long i915_gfx_val(struct drm_i915_private *dev_priv);
@@ -871,7 +918,6 @@ extern void i915_update_gfx_val(struct drm_i915_private *dev_priv);
/* i915_irq.c */
void i915_hangcheck_elapsed(unsigned long data);
-void i915_destroy_error_state(struct drm_device *dev);
extern int i915_irq_emit(struct drm_device *dev, void *data,
struct drm_file *file_priv);
extern int i915_irq_wait(struct drm_device *dev, void *data,
@@ -908,6 +954,12 @@ i915_disable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask);
void intel_enable_asle (struct drm_device *dev);
+#ifdef CONFIG_DEBUG_FS
+extern void i915_destroy_error_state(struct drm_device *dev);
+#else
+#define i915_destroy_error_state(x)
+#endif
+
/* i915_mem.c */
extern int i915_mem_alloc(struct drm_device *dev, void *data,
@@ -922,6 +974,7 @@ extern void i915_mem_takedown(struct mem_block **heap);
extern void i915_mem_release(struct drm_device * dev,
struct drm_file *file_priv, struct mem_block *heap);
/* i915_gem.c */
+int i915_gem_check_is_wedged(struct drm_device *dev);
int i915_gem_init_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
int i915_gem_create_ioctl(struct drm_device *dev, void *data,
@@ -972,17 +1025,28 @@ void i915_gem_object_unpin(struct drm_gem_object *obj);
int i915_gem_object_unbind(struct drm_gem_object *obj);
void i915_gem_release_mmap(struct drm_gem_object *obj);
void i915_gem_lastclose(struct drm_device *dev);
-uint32_t i915_get_gem_seqno(struct drm_device *dev,
- struct intel_ring_buffer *ring);
-bool i915_seqno_passed(uint32_t seq1, uint32_t seq2);
-int i915_gem_object_get_fence_reg(struct drm_gem_object *obj);
-int i915_gem_object_put_fence_reg(struct drm_gem_object *obj);
+
+/**
+ * Returns true if seq1 is later than seq2.
+ */
+static inline bool
+i915_seqno_passed(uint32_t seq1, uint32_t seq2)
+{
+ return (int32_t)(seq1 - seq2) >= 0;
+}
+
+int i915_gem_object_get_fence_reg(struct drm_gem_object *obj,
+ bool interruptible);
+int i915_gem_object_put_fence_reg(struct drm_gem_object *obj,
+ bool interruptible);
void i915_gem_retire_requests(struct drm_device *dev);
-void i915_gem_retire_work_handler(struct work_struct *work);
+void i915_gem_reset(struct drm_device *dev);
void i915_gem_clflush_object(struct drm_gem_object *obj);
int i915_gem_object_set_domain(struct drm_gem_object *obj,
uint32_t read_domains,
uint32_t write_domain);
+int i915_gem_object_flush_gpu(struct drm_i915_gem_object *obj,
+ bool interruptible);
int i915_gem_init_ringbuffer(struct drm_device *dev);
void i915_gem_cleanup_ringbuffer(struct drm_device *dev);
int i915_gem_do_init(struct drm_device *dev, unsigned long start,
@@ -990,16 +1054,18 @@ int i915_gem_do_init(struct drm_device *dev, unsigned long start,
int i915_gpu_idle(struct drm_device *dev);
int i915_gem_idle(struct drm_device *dev);
uint32_t i915_add_request(struct drm_device *dev,
- struct drm_file *file_priv,
- uint32_t flush_domains,
- struct intel_ring_buffer *ring);
+ struct drm_file *file_priv,
+ struct drm_i915_gem_request *request,
+ struct intel_ring_buffer *ring);
int i915_do_wait_request(struct drm_device *dev,
- uint32_t seqno, int interruptible,
- struct intel_ring_buffer *ring);
+ uint32_t seqno,
+ bool interruptible,
+ struct intel_ring_buffer *ring);
int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
int i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj,
int write);
-int i915_gem_object_set_to_display_plane(struct drm_gem_object *obj);
+int i915_gem_object_set_to_display_plane(struct drm_gem_object *obj,
+ bool pipelined);
int i915_gem_attach_phys_object(struct drm_device *dev,
struct drm_gem_object *obj,
int id,
@@ -1007,10 +1073,7 @@ int i915_gem_attach_phys_object(struct drm_device *dev,
void i915_gem_detach_phys_object(struct drm_device *dev,
struct drm_gem_object *obj);
void i915_gem_free_all_phys_object(struct drm_device *dev);
-int i915_gem_object_get_pages(struct drm_gem_object *obj, gfp_t gfpmask);
-void i915_gem_object_put_pages(struct drm_gem_object *obj);
void i915_gem_release(struct drm_device * dev, struct drm_file *file_priv);
-int i915_gem_object_flush_write_domain(struct drm_gem_object *obj);
void i915_gem_shrinker_init(void);
void i915_gem_shrinker_exit(void);
@@ -1032,15 +1095,14 @@ bool i915_gem_object_fence_offset_ok(struct drm_gem_object *obj,
/* i915_gem_debug.c */
void i915_gem_dump_object(struct drm_gem_object *obj, int len,
const char *where, uint32_t mark);
-#if WATCH_INACTIVE
-void i915_verify_inactive(struct drm_device *dev, char *file, int line);
+#if WATCH_LISTS
+int i915_verify_lists(struct drm_device *dev);
#else
-#define i915_verify_inactive(dev, file, line)
+#define i915_verify_lists(dev) 0
#endif
void i915_gem_object_check_coherency(struct drm_gem_object *obj, int handle);
void i915_gem_dump_object(struct drm_gem_object *obj, int len,
const char *where, uint32_t mark);
-void i915_dump_lru(struct drm_device *dev, const char *where);
/* i915_debugfs.c */
int i915_debugfs_init(struct drm_minor *minor);
@@ -1054,21 +1116,42 @@ extern int i915_restore_state(struct drm_device *dev);
extern int i915_save_state(struct drm_device *dev);
extern int i915_restore_state(struct drm_device *dev);
+/* intel_i2c.c */
+extern int intel_setup_gmbus(struct drm_device *dev);
+extern void intel_teardown_gmbus(struct drm_device *dev);
+extern void intel_gmbus_set_speed(struct i2c_adapter *adapter, int speed);
+extern void intel_gmbus_force_bit(struct i2c_adapter *adapter, bool force_bit);
+extern inline bool intel_gmbus_is_forced_bit(struct i2c_adapter *adapter)
+{
+ return container_of(adapter, struct intel_gmbus, adapter)->force_bit;
+}
+extern void intel_i2c_reset(struct drm_device *dev);
+
+/* intel_opregion.c */
+extern int intel_opregion_setup(struct drm_device *dev);
#ifdef CONFIG_ACPI
-/* i915_opregion.c */
-extern int intel_opregion_init(struct drm_device *dev, int resume);
-extern void intel_opregion_free(struct drm_device *dev, int suspend);
-extern void opregion_asle_intr(struct drm_device *dev);
-extern void ironlake_opregion_gse_intr(struct drm_device *dev);
-extern void opregion_enable_asle(struct drm_device *dev);
+extern void intel_opregion_init(struct drm_device *dev);
+extern void intel_opregion_fini(struct drm_device *dev);
+extern void intel_opregion_asle_intr(struct drm_device *dev);
+extern void intel_opregion_gse_intr(struct drm_device *dev);
+extern void intel_opregion_enable_asle(struct drm_device *dev);
#else
-static inline int intel_opregion_init(struct drm_device *dev, int resume) { return 0; }
-static inline void intel_opregion_free(struct drm_device *dev, int suspend) { return; }
-static inline void opregion_asle_intr(struct drm_device *dev) { return; }
-static inline void ironlake_opregion_gse_intr(struct drm_device *dev) { return; }
-static inline void opregion_enable_asle(struct drm_device *dev) { return; }
+static inline void intel_opregion_init(struct drm_device *dev) { return; }
+static inline void intel_opregion_fini(struct drm_device *dev) { return; }
+static inline void intel_opregion_asle_intr(struct drm_device *dev) { return; }
+static inline void intel_opregion_gse_intr(struct drm_device *dev) { return; }
+static inline void intel_opregion_enable_asle(struct drm_device *dev) { return; }
#endif
+/* intel_acpi.c */
+#ifdef CONFIG_ACPI
+extern void intel_register_dsm_handler(void);
+extern void intel_unregister_dsm_handler(void);
+#else
+static inline void intel_register_dsm_handler(void) { return; }
+static inline void intel_unregister_dsm_handler(void) { return; }
+#endif /* CONFIG_ACPI */
+
/* modesetting */
extern void intel_modeset_init(struct drm_device *dev);
extern void intel_modeset_cleanup(struct drm_device *dev);
@@ -1084,8 +1167,10 @@ extern void intel_detect_pch (struct drm_device *dev);
extern int intel_trans_dp_port_sel (struct drm_crtc *crtc);
/* overlay */
+#ifdef CONFIG_DEBUG_FS
extern struct intel_overlay_error_state *intel_overlay_capture_error_state(struct drm_device *dev);
extern void intel_overlay_print_error_state(struct seq_file *m, struct intel_overlay_error_state *error);
+#endif
/**
* Lock test for when it's just for synchronization of ring access.
@@ -1099,8 +1184,26 @@ extern void intel_overlay_print_error_state(struct seq_file *m, struct intel_ove
LOCK_TEST_WITH_RETURN(dev, file_priv); \
} while (0)
-#define I915_READ(reg) readl(dev_priv->regs + (reg))
-#define I915_WRITE(reg, val) writel(val, dev_priv->regs + (reg))
+static inline u32 i915_read(struct drm_i915_private *dev_priv, u32 reg)
+{
+ u32 val;
+
+ val = readl(dev_priv->regs + reg);
+ if (dev_priv->debug_flags & I915_DEBUG_READ)
+ printk(KERN_ERR "read 0x%08x from 0x%08x\n", val, reg);
+ return val;
+}
+
+static inline void i915_write(struct drm_i915_private *dev_priv, u32 reg,
+ u32 val)
+{
+ writel(val, dev_priv->regs + reg);
+ if (dev_priv->debug_flags & I915_DEBUG_WRITE)
+ printk(KERN_ERR "wrote 0x%08x to 0x%08x\n", val, reg);
+}
+
+#define I915_READ(reg) i915_read(dev_priv, (reg))
+#define I915_WRITE(reg, val) i915_write(dev_priv, (reg), (val))
#define I915_READ16(reg) readw(dev_priv->regs + (reg))
#define I915_WRITE16(reg, val) writel(val, dev_priv->regs + (reg))
#define I915_READ8(reg) readb(dev_priv->regs + (reg))
@@ -1110,6 +1213,11 @@ extern void intel_overlay_print_error_state(struct seq_file *m, struct intel_ove
#define POSTING_READ(reg) (void)I915_READ(reg)
#define POSTING_READ16(reg) (void)I915_READ16(reg)
+#define I915_DEBUG_ENABLE_IO() (dev_priv->debug_flags |= I915_DEBUG_READ | \
+ I915_DEBUG_WRITE)
+#define I915_DEBUG_DISABLE_IO() (dev_priv->debug_flags &= ~(I915_DEBUG_READ | \
+ I915_DEBUG_WRITE))
+
#define I915_VERBOSE 0
#define BEGIN_LP_RING(n) do { \
@@ -1166,8 +1274,6 @@ extern void intel_overlay_print_error_state(struct seq_file *m, struct intel_ove
#define IS_I915GM(dev) ((dev)->pci_device == 0x2592)
#define IS_I945G(dev) ((dev)->pci_device == 0x2772)
#define IS_I945GM(dev) (INTEL_INFO(dev)->is_i945gm)
-#define IS_I965G(dev) (INTEL_INFO(dev)->is_i965g)
-#define IS_I965GM(dev) (INTEL_INFO(dev)->is_i965gm)
#define IS_BROADWATER(dev) (INTEL_INFO(dev)->is_broadwater)
#define IS_CRESTLINE(dev) (INTEL_INFO(dev)->is_crestline)
#define IS_GM45(dev) ((dev)->pci_device == 0x2A42)
@@ -1178,8 +1284,6 @@ extern void intel_overlay_print_error_state(struct seq_file *m, struct intel_ove
#define IS_G33(dev) (INTEL_INFO(dev)->is_g33)
#define IS_IRONLAKE_D(dev) ((dev)->pci_device == 0x0042)
#define IS_IRONLAKE_M(dev) ((dev)->pci_device == 0x0046)
-#define IS_IRONLAKE(dev) (INTEL_INFO(dev)->is_ironlake)
-#define IS_I9XX(dev) (INTEL_INFO(dev)->is_i9xx)
#define IS_MOBILE(dev) (INTEL_INFO(dev)->is_mobile)
#define IS_GEN2(dev) (INTEL_INFO(dev)->gen == 2)
@@ -1188,36 +1292,38 @@ extern void intel_overlay_print_error_state(struct seq_file *m, struct intel_ove
#define IS_GEN5(dev) (INTEL_INFO(dev)->gen == 5)
#define IS_GEN6(dev) (INTEL_INFO(dev)->gen == 6)
-#define HAS_BSD(dev) (IS_IRONLAKE(dev) || IS_G4X(dev))
+#define HAS_BSD(dev) (INTEL_INFO(dev)->has_bsd_ring)
+#define HAS_BLT(dev) (INTEL_INFO(dev)->has_blt_ring)
#define I915_NEED_GFX_HWS(dev) (INTEL_INFO(dev)->need_gfx_hws)
+#define HAS_OVERLAY(dev) (INTEL_INFO(dev)->has_overlay)
+#define OVERLAY_NEEDS_PHYSICAL(dev) (INTEL_INFO(dev)->overlay_needs_physical)
+
/* With the 945 and later, Y tiling got adjusted so that it was 32 128-byte
* rows, which changed the alignment requirements and fence programming.
*/
-#define HAS_128_BYTE_Y_TILING(dev) (IS_I9XX(dev) && !(IS_I915G(dev) || \
+#define HAS_128_BYTE_Y_TILING(dev) (!IS_GEN2(dev) && !(IS_I915G(dev) || \
IS_I915GM(dev)))
-#define SUPPORTS_DIGITAL_OUTPUTS(dev) (IS_I9XX(dev) && !IS_PINEVIEW(dev))
-#define SUPPORTS_INTEGRATED_HDMI(dev) (IS_G4X(dev) || IS_IRONLAKE(dev))
-#define SUPPORTS_INTEGRATED_DP(dev) (IS_G4X(dev) || IS_IRONLAKE(dev))
+#define SUPPORTS_DIGITAL_OUTPUTS(dev) (!IS_GEN2(dev) && !IS_PINEVIEW(dev))
+#define SUPPORTS_INTEGRATED_HDMI(dev) (IS_G4X(dev) || IS_GEN5(dev))
+#define SUPPORTS_INTEGRATED_DP(dev) (IS_G4X(dev) || IS_GEN5(dev))
#define SUPPORTS_EDP(dev) (IS_IRONLAKE_M(dev))
-#define SUPPORTS_TV(dev) (IS_I9XX(dev) && IS_MOBILE(dev) && \
- !IS_IRONLAKE(dev) && !IS_PINEVIEW(dev) && \
- !IS_GEN6(dev))
+#define SUPPORTS_TV(dev) (INTEL_INFO(dev)->supports_tv)
#define I915_HAS_HOTPLUG(dev) (INTEL_INFO(dev)->has_hotplug)
/* dsparb controlled by hw only */
#define DSPARB_HWCONTROL(dev) (IS_G4X(dev) || IS_IRONLAKE(dev))
-#define HAS_FW_BLC(dev) (IS_I9XX(dev) || IS_G4X(dev) || IS_IRONLAKE(dev))
+#define HAS_FW_BLC(dev) (INTEL_INFO(dev)->gen > 2)
#define HAS_PIPE_CXSR(dev) (INTEL_INFO(dev)->has_pipe_cxsr)
#define I915_HAS_FBC(dev) (INTEL_INFO(dev)->has_fbc)
#define I915_HAS_RC6(dev) (INTEL_INFO(dev)->has_rc6)
-#define HAS_PCH_SPLIT(dev) (IS_IRONLAKE(dev) || \
- IS_GEN6(dev))
-#define HAS_PIPE_CONTROL(dev) (IS_IRONLAKE(dev) || IS_GEN6(dev))
+#define HAS_PCH_SPLIT(dev) (IS_GEN5(dev) || IS_GEN6(dev))
+#define HAS_PIPE_CONTROL(dev) (IS_GEN5(dev) || IS_GEN6(dev))
#define INTEL_PCH_TYPE(dev) (((struct drm_i915_private *)(dev)->dev_private)->pch_type)
#define HAS_PCH_CPT(dev) (INTEL_PCH_TYPE(dev) == PCH_CPT)
+#define HAS_PCH_IBX(dev) (INTEL_PCH_TYPE(dev) == PCH_IBX)
#define PRIMARY_RINGBUFFER_SIZE (128*1024)
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 90b1d6753b9d..275ec6ed43ae 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -37,6 +37,7 @@
#include <linux/intel-gtt.h>
static uint32_t i915_gem_get_gtt_alignment(struct drm_gem_object *obj);
+
static int i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj);
static void i915_gem_object_flush_gtt_write_domain(struct drm_gem_object *obj);
static void i915_gem_object_flush_cpu_write_domain(struct drm_gem_object *obj);
@@ -46,7 +47,8 @@ static int i915_gem_object_set_cpu_read_domain_range(struct drm_gem_object *obj,
uint64_t offset,
uint64_t size);
static void i915_gem_object_set_to_full_cpu_read_domain(struct drm_gem_object *obj);
-static int i915_gem_object_wait_rendering(struct drm_gem_object *obj);
+static int i915_gem_object_wait_rendering(struct drm_gem_object *obj,
+ bool interruptible);
static int i915_gem_object_bind_to_gtt(struct drm_gem_object *obj,
unsigned alignment);
static void i915_gem_clear_fence_reg(struct drm_gem_object *obj);
@@ -55,9 +57,111 @@ static int i915_gem_phys_pwrite(struct drm_device *dev, struct drm_gem_object *o
struct drm_file *file_priv);
static void i915_gem_free_object_tail(struct drm_gem_object *obj);
+static int
+i915_gem_object_get_pages(struct drm_gem_object *obj,
+ gfp_t gfpmask);
+
+static void
+i915_gem_object_put_pages(struct drm_gem_object *obj);
+
static LIST_HEAD(shrink_list);
static DEFINE_SPINLOCK(shrink_list_lock);
+/* some bookkeeping */
+static void i915_gem_info_add_obj(struct drm_i915_private *dev_priv,
+ size_t size)
+{
+ dev_priv->mm.object_count++;
+ dev_priv->mm.object_memory += size;
+}
+
+static void i915_gem_info_remove_obj(struct drm_i915_private *dev_priv,
+ size_t size)
+{
+ dev_priv->mm.object_count--;
+ dev_priv->mm.object_memory -= size;
+}
+
+static void i915_gem_info_add_gtt(struct drm_i915_private *dev_priv,
+ size_t size)
+{
+ dev_priv->mm.gtt_count++;
+ dev_priv->mm.gtt_memory += size;
+}
+
+static void i915_gem_info_remove_gtt(struct drm_i915_private *dev_priv,
+ size_t size)
+{
+ dev_priv->mm.gtt_count--;
+ dev_priv->mm.gtt_memory -= size;
+}
+
+static void i915_gem_info_add_pin(struct drm_i915_private *dev_priv,
+ size_t size)
+{
+ dev_priv->mm.pin_count++;
+ dev_priv->mm.pin_memory += size;
+}
+
+static void i915_gem_info_remove_pin(struct drm_i915_private *dev_priv,
+ size_t size)
+{
+ dev_priv->mm.pin_count--;
+ dev_priv->mm.pin_memory -= size;
+}
+
+int
+i915_gem_check_is_wedged(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct completion *x = &dev_priv->error_completion;
+ unsigned long flags;
+ int ret;
+
+ if (!atomic_read(&dev_priv->mm.wedged))
+ return 0;
+
+ ret = wait_for_completion_interruptible(x);
+ if (ret)
+ return ret;
+
+ /* Success, we reset the GPU! */
+ if (!atomic_read(&dev_priv->mm.wedged))
+ return 0;
+
+ /* GPU is hung, bump the completion count to account for
+ * the token we just consumed so that we never hit zero and
+ * end up waiting upon a subsequent completion event that
+ * will never happen.
+ */
+ spin_lock_irqsave(&x->wait.lock, flags);
+ x->done++;
+ spin_unlock_irqrestore(&x->wait.lock, flags);
+ return -EIO;
+}
+
+static int i915_mutex_lock_interruptible(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int ret;
+
+ ret = i915_gem_check_is_wedged(dev);
+ if (ret)
+ return ret;
+
+ ret = mutex_lock_interruptible(&dev->struct_mutex);
+ if (ret)
+ return ret;
+
+ if (atomic_read(&dev_priv->mm.wedged)) {
+ mutex_unlock(&dev->struct_mutex);
+ return -EAGAIN;
+ }
+
+ WARN_ON(i915_verify_lists(dev));
+ return 0;
+}
+
static inline bool
i915_gem_object_is_inactive(struct drm_i915_gem_object *obj_priv)
{
@@ -66,7 +170,8 @@ i915_gem_object_is_inactive(struct drm_i915_gem_object *obj_priv)
obj_priv->pin_count == 0;
}
-int i915_gem_do_init(struct drm_device *dev, unsigned long start,
+int i915_gem_do_init(struct drm_device *dev,
+ unsigned long start,
unsigned long end)
{
drm_i915_private_t *dev_priv = dev->dev_private;
@@ -80,7 +185,7 @@ int i915_gem_do_init(struct drm_device *dev, unsigned long start,
drm_mm_init(&dev_priv->mm.gtt_space, start,
end - start);
- dev->gtt_total = (uint32_t) (end - start);
+ dev_priv->mm.gtt_total = end - start;
return 0;
}
@@ -103,14 +208,16 @@ int
i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
+ struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_gem_get_aperture *args = data;
if (!(dev->driver->driver_features & DRIVER_GEM))
return -ENODEV;
- args->aper_size = dev->gtt_total;
- args->aper_available_size = (args->aper_size -
- atomic_read(&dev->pin_memory));
+ mutex_lock(&dev->struct_mutex);
+ args->aper_size = dev_priv->mm.gtt_total;
+ args->aper_available_size = args->aper_size - dev_priv->mm.pin_memory;
+ mutex_unlock(&dev->struct_mutex);
return 0;
}
@@ -136,12 +243,17 @@ i915_gem_create_ioctl(struct drm_device *dev, void *data,
return -ENOMEM;
ret = drm_gem_handle_create(file_priv, obj, &handle);
- /* drop reference from allocate - handle holds it now */
- drm_gem_object_unreference_unlocked(obj);
if (ret) {
+ drm_gem_object_release(obj);
+ i915_gem_info_remove_obj(dev->dev_private, obj->size);
+ kfree(obj);
return ret;
}
+ /* drop reference from allocate - handle holds it now */
+ drm_gem_object_unreference(obj);
+ trace_i915_gem_object_create(obj);
+
args->handle = handle;
return 0;
}
@@ -152,19 +264,14 @@ fast_shmem_read(struct page **pages,
char __user *data,
int length)
{
- char __iomem *vaddr;
- int unwritten;
-
- vaddr = kmap_atomic(pages[page_base >> PAGE_SHIFT], KM_USER0);
- if (vaddr == NULL)
- return -ENOMEM;
- unwritten = __copy_to_user_inatomic(data, vaddr + page_offset, length);
- kunmap_atomic(vaddr, KM_USER0);
+ char *vaddr;
+ int ret;
- if (unwritten)
- return -EFAULT;
+ vaddr = kmap_atomic(pages[page_base >> PAGE_SHIFT]);
+ ret = __copy_to_user_inatomic(data, vaddr + page_offset, length);
+ kunmap_atomic(vaddr);
- return 0;
+ return ret;
}
static int i915_gem_object_needs_bit17_swizzle(struct drm_gem_object *obj)
@@ -258,22 +365,10 @@ i915_gem_shmem_pread_fast(struct drm_device *dev, struct drm_gem_object *obj,
loff_t offset, page_base;
char __user *user_data;
int page_offset, page_length;
- int ret;
user_data = (char __user *) (uintptr_t) args->data_ptr;
remain = args->size;
- mutex_lock(&dev->struct_mutex);
-
- ret = i915_gem_object_get_pages(obj, 0);
- if (ret != 0)
- goto fail_unlock;
-
- ret = i915_gem_object_set_cpu_read_domain_range(obj, args->offset,
- args->size);
- if (ret != 0)
- goto fail_put_pages;
-
obj_priv = to_intel_bo(obj);
offset = args->offset;
@@ -290,23 +385,17 @@ i915_gem_shmem_pread_fast(struct drm_device *dev, struct drm_gem_object *obj,
if ((page_offset + remain) > PAGE_SIZE)
page_length = PAGE_SIZE - page_offset;
- ret = fast_shmem_read(obj_priv->pages,
- page_base, page_offset,
- user_data, page_length);
- if (ret)
- goto fail_put_pages;
+ if (fast_shmem_read(obj_priv->pages,
+ page_base, page_offset,
+ user_data, page_length))
+ return -EFAULT;
remain -= page_length;
user_data += page_length;
offset += page_length;
}
-fail_put_pages:
- i915_gem_object_put_pages(obj);
-fail_unlock:
- mutex_unlock(&dev->struct_mutex);
-
- return ret;
+ return 0;
}
static int
@@ -367,31 +456,28 @@ i915_gem_shmem_pread_slow(struct drm_device *dev, struct drm_gem_object *obj,
last_data_page = (data_ptr + args->size - 1) / PAGE_SIZE;
num_pages = last_data_page - first_data_page + 1;
- user_pages = drm_calloc_large(num_pages, sizeof(struct page *));
+ user_pages = drm_malloc_ab(num_pages, sizeof(struct page *));
if (user_pages == NULL)
return -ENOMEM;
+ mutex_unlock(&dev->struct_mutex);
down_read(&mm->mmap_sem);
pinned_pages = get_user_pages(current, mm, (uintptr_t)args->data_ptr,
num_pages, 1, 0, user_pages, NULL);
up_read(&mm->mmap_sem);
+ mutex_lock(&dev->struct_mutex);
if (pinned_pages < num_pages) {
ret = -EFAULT;
- goto fail_put_user_pages;
+ goto out;
}
- do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
-
- mutex_lock(&dev->struct_mutex);
-
- ret = i915_gem_object_get_pages_or_evict(obj);
+ ret = i915_gem_object_set_cpu_read_domain_range(obj,
+ args->offset,
+ args->size);
if (ret)
- goto fail_unlock;
+ goto out;
- ret = i915_gem_object_set_cpu_read_domain_range(obj, args->offset,
- args->size);
- if (ret != 0)
- goto fail_put_pages;
+ do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
obj_priv = to_intel_bo(obj);
offset = args->offset;
@@ -436,11 +522,7 @@ i915_gem_shmem_pread_slow(struct drm_device *dev, struct drm_gem_object *obj,
offset += page_length;
}
-fail_put_pages:
- i915_gem_object_put_pages(obj);
-fail_unlock:
- mutex_unlock(&dev->struct_mutex);
-fail_put_user_pages:
+out:
for (i = 0; i < pinned_pages; i++) {
SetPageDirty(user_pages[i]);
page_cache_release(user_pages[i]);
@@ -462,37 +544,60 @@ i915_gem_pread_ioctl(struct drm_device *dev, void *data,
struct drm_i915_gem_pread *args = data;
struct drm_gem_object *obj;
struct drm_i915_gem_object *obj_priv;
- int ret;
+ int ret = 0;
+
+ if (args->size == 0)
+ return 0;
+
+ if (!access_ok(VERIFY_WRITE,
+ (char __user *)(uintptr_t)args->data_ptr,
+ args->size))
+ return -EFAULT;
+
+ ret = fault_in_pages_writeable((char __user *)(uintptr_t)args->data_ptr,
+ args->size);
+ if (ret)
+ return -EFAULT;
+
+ ret = i915_mutex_lock_interruptible(dev);
+ if (ret)
+ return ret;
obj = drm_gem_object_lookup(dev, file_priv, args->handle);
- if (obj == NULL)
- return -ENOENT;
+ if (obj == NULL) {
+ ret = -ENOENT;
+ goto unlock;
+ }
obj_priv = to_intel_bo(obj);
/* Bounds check source. */
if (args->offset > obj->size || args->size > obj->size - args->offset) {
ret = -EINVAL;
- goto err;
+ goto out;
}
- if (!access_ok(VERIFY_WRITE,
- (char __user *)(uintptr_t)args->data_ptr,
- args->size)) {
- ret = -EFAULT;
- goto err;
- }
+ ret = i915_gem_object_get_pages_or_evict(obj);
+ if (ret)
+ goto out;
- if (i915_gem_object_needs_bit17_swizzle(obj)) {
- ret = i915_gem_shmem_pread_slow(dev, obj, args, file_priv);
- } else {
+ ret = i915_gem_object_set_cpu_read_domain_range(obj,
+ args->offset,
+ args->size);
+ if (ret)
+ goto out_put;
+
+ ret = -EFAULT;
+ if (!i915_gem_object_needs_bit17_swizzle(obj))
ret = i915_gem_shmem_pread_fast(dev, obj, args, file_priv);
- if (ret != 0)
- ret = i915_gem_shmem_pread_slow(dev, obj, args,
- file_priv);
- }
+ if (ret == -EFAULT)
+ ret = i915_gem_shmem_pread_slow(dev, obj, args, file_priv);
-err:
- drm_gem_object_unreference_unlocked(obj);
+out_put:
+ i915_gem_object_put_pages(obj);
+out:
+ drm_gem_object_unreference(obj);
+unlock:
+ mutex_unlock(&dev->struct_mutex);
return ret;
}
@@ -509,13 +614,11 @@ fast_user_write(struct io_mapping *mapping,
char *vaddr_atomic;
unsigned long unwritten;
- vaddr_atomic = io_mapping_map_atomic_wc(mapping, page_base, KM_USER0);
+ vaddr_atomic = io_mapping_map_atomic_wc(mapping, page_base);
unwritten = __copy_from_user_inatomic_nocache(vaddr_atomic + page_offset,
user_data, length);
- io_mapping_unmap_atomic(vaddr_atomic, KM_USER0);
- if (unwritten)
- return -EFAULT;
- return 0;
+ io_mapping_unmap_atomic(vaddr_atomic);
+ return unwritten;
}
/* Here's the write path which can sleep for
@@ -548,18 +651,14 @@ fast_shmem_write(struct page **pages,
char __user *data,
int length)
{
- char __iomem *vaddr;
- unsigned long unwritten;
+ char *vaddr;
+ int ret;
- vaddr = kmap_atomic(pages[page_base >> PAGE_SHIFT], KM_USER0);
- if (vaddr == NULL)
- return -ENOMEM;
- unwritten = __copy_from_user_inatomic(vaddr + page_offset, data, length);
- kunmap_atomic(vaddr, KM_USER0);
+ vaddr = kmap_atomic(pages[page_base >> PAGE_SHIFT]);
+ ret = __copy_from_user_inatomic(vaddr + page_offset, data, length);
+ kunmap_atomic(vaddr);
- if (unwritten)
- return -EFAULT;
- return 0;
+ return ret;
}
/**
@@ -577,22 +676,10 @@ i915_gem_gtt_pwrite_fast(struct drm_device *dev, struct drm_gem_object *obj,
loff_t offset, page_base;
char __user *user_data;
int page_offset, page_length;
- int ret;
user_data = (char __user *) (uintptr_t) args->data_ptr;
remain = args->size;
-
- mutex_lock(&dev->struct_mutex);
- ret = i915_gem_object_pin(obj, 0);
- if (ret) {
- mutex_unlock(&dev->struct_mutex);
- return ret;
- }
- ret = i915_gem_object_set_to_gtt_domain(obj, 1);
- if (ret)
- goto fail;
-
obj_priv = to_intel_bo(obj);
offset = obj_priv->gtt_offset + args->offset;
@@ -609,26 +696,21 @@ i915_gem_gtt_pwrite_fast(struct drm_device *dev, struct drm_gem_object *obj,
if ((page_offset + remain) > PAGE_SIZE)
page_length = PAGE_SIZE - page_offset;
- ret = fast_user_write (dev_priv->mm.gtt_mapping, page_base,
- page_offset, user_data, page_length);
-
/* If we get a fault while copying data, then (presumably) our
* source page isn't available. Return the error and we'll
* retry in the slow path.
*/
- if (ret)
- goto fail;
+ if (fast_user_write(dev_priv->mm.gtt_mapping, page_base,
+ page_offset, user_data, page_length))
+
+ return -EFAULT;
remain -= page_length;
user_data += page_length;
offset += page_length;
}
-fail:
- i915_gem_object_unpin(obj);
- mutex_unlock(&dev->struct_mutex);
-
- return ret;
+ return 0;
}
/**
@@ -665,27 +747,24 @@ i915_gem_gtt_pwrite_slow(struct drm_device *dev, struct drm_gem_object *obj,
last_data_page = (data_ptr + args->size - 1) / PAGE_SIZE;
num_pages = last_data_page - first_data_page + 1;
- user_pages = drm_calloc_large(num_pages, sizeof(struct page *));
+ user_pages = drm_malloc_ab(num_pages, sizeof(struct page *));
if (user_pages == NULL)
return -ENOMEM;
+ mutex_unlock(&dev->struct_mutex);
down_read(&mm->mmap_sem);
pinned_pages = get_user_pages(current, mm, (uintptr_t)args->data_ptr,
num_pages, 0, 0, user_pages, NULL);
up_read(&mm->mmap_sem);
+ mutex_lock(&dev->struct_mutex);
if (pinned_pages < num_pages) {
ret = -EFAULT;
goto out_unpin_pages;
}
- mutex_lock(&dev->struct_mutex);
- ret = i915_gem_object_pin(obj, 0);
- if (ret)
- goto out_unlock;
-
ret = i915_gem_object_set_to_gtt_domain(obj, 1);
if (ret)
- goto out_unpin_object;
+ goto out_unpin_pages;
obj_priv = to_intel_bo(obj);
offset = obj_priv->gtt_offset + args->offset;
@@ -721,10 +800,6 @@ i915_gem_gtt_pwrite_slow(struct drm_device *dev, struct drm_gem_object *obj,
data_ptr += page_length;
}
-out_unpin_object:
- i915_gem_object_unpin(obj);
-out_unlock:
- mutex_unlock(&dev->struct_mutex);
out_unpin_pages:
for (i = 0; i < pinned_pages; i++)
page_cache_release(user_pages[i]);
@@ -747,21 +822,10 @@ i915_gem_shmem_pwrite_fast(struct drm_device *dev, struct drm_gem_object *obj,
loff_t offset, page_base;
char __user *user_data;
int page_offset, page_length;
- int ret;
user_data = (char __user *) (uintptr_t) args->data_ptr;
remain = args->size;
- mutex_lock(&dev->struct_mutex);
-
- ret = i915_gem_object_get_pages(obj, 0);
- if (ret != 0)
- goto fail_unlock;
-
- ret = i915_gem_object_set_to_cpu_domain(obj, 1);
- if (ret != 0)
- goto fail_put_pages;
-
obj_priv = to_intel_bo(obj);
offset = args->offset;
obj_priv->dirty = 1;
@@ -779,23 +843,17 @@ i915_gem_shmem_pwrite_fast(struct drm_device *dev, struct drm_gem_object *obj,
if ((page_offset + remain) > PAGE_SIZE)
page_length = PAGE_SIZE - page_offset;
- ret = fast_shmem_write(obj_priv->pages,
+ if (fast_shmem_write(obj_priv->pages,
page_base, page_offset,
- user_data, page_length);
- if (ret)
- goto fail_put_pages;
+ user_data, page_length))
+ return -EFAULT;
remain -= page_length;
user_data += page_length;
offset += page_length;
}
-fail_put_pages:
- i915_gem_object_put_pages(obj);
-fail_unlock:
- mutex_unlock(&dev->struct_mutex);
-
- return ret;
+ return 0;
}
/**
@@ -833,30 +891,26 @@ i915_gem_shmem_pwrite_slow(struct drm_device *dev, struct drm_gem_object *obj,
last_data_page = (data_ptr + args->size - 1) / PAGE_SIZE;
num_pages = last_data_page - first_data_page + 1;
- user_pages = drm_calloc_large(num_pages, sizeof(struct page *));
+ user_pages = drm_malloc_ab(num_pages, sizeof(struct page *));
if (user_pages == NULL)
return -ENOMEM;
+ mutex_unlock(&dev->struct_mutex);
down_read(&mm->mmap_sem);
pinned_pages = get_user_pages(current, mm, (uintptr_t)args->data_ptr,
num_pages, 0, 0, user_pages, NULL);
up_read(&mm->mmap_sem);
+ mutex_lock(&dev->struct_mutex);
if (pinned_pages < num_pages) {
ret = -EFAULT;
- goto fail_put_user_pages;
+ goto out;
}
- do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
-
- mutex_lock(&dev->struct_mutex);
-
- ret = i915_gem_object_get_pages_or_evict(obj);
+ ret = i915_gem_object_set_to_cpu_domain(obj, 1);
if (ret)
- goto fail_unlock;
+ goto out;
- ret = i915_gem_object_set_to_cpu_domain(obj, 1);
- if (ret != 0)
- goto fail_put_pages;
+ do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
obj_priv = to_intel_bo(obj);
offset = args->offset;
@@ -902,11 +956,7 @@ i915_gem_shmem_pwrite_slow(struct drm_device *dev, struct drm_gem_object *obj,
offset += page_length;
}
-fail_put_pages:
- i915_gem_object_put_pages(obj);
-fail_unlock:
- mutex_unlock(&dev->struct_mutex);
-fail_put_user_pages:
+out:
for (i = 0; i < pinned_pages; i++)
page_cache_release(user_pages[i]);
drm_free_large(user_pages);
@@ -921,29 +971,41 @@ fail_put_user_pages:
*/
int
i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
+ struct drm_file *file)
{
struct drm_i915_gem_pwrite *args = data;
struct drm_gem_object *obj;
struct drm_i915_gem_object *obj_priv;
- int ret = 0;
+ int ret;
- obj = drm_gem_object_lookup(dev, file_priv, args->handle);
- if (obj == NULL)
- return -ENOENT;
+ if (args->size == 0)
+ return 0;
+
+ if (!access_ok(VERIFY_READ,
+ (char __user *)(uintptr_t)args->data_ptr,
+ args->size))
+ return -EFAULT;
+
+ ret = fault_in_pages_readable((char __user *)(uintptr_t)args->data_ptr,
+ args->size);
+ if (ret)
+ return -EFAULT;
+
+ ret = i915_mutex_lock_interruptible(dev);
+ if (ret)
+ return ret;
+
+ obj = drm_gem_object_lookup(dev, file, args->handle);
+ if (obj == NULL) {
+ ret = -ENOENT;
+ goto unlock;
+ }
obj_priv = to_intel_bo(obj);
/* Bounds check destination. */
if (args->offset > obj->size || args->size > obj->size - args->offset) {
ret = -EINVAL;
- goto err;
- }
-
- if (!access_ok(VERIFY_READ,
- (char __user *)(uintptr_t)args->data_ptr,
- args->size)) {
- ret = -EFAULT;
- goto err;
+ goto out;
}
/* We can only do the GTT pwrite on untiled buffers, as otherwise
@@ -953,32 +1015,47 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
* perspective, requiring manual detiling by the client.
*/
if (obj_priv->phys_obj)
- ret = i915_gem_phys_pwrite(dev, obj, args, file_priv);
+ ret = i915_gem_phys_pwrite(dev, obj, args, file);
else if (obj_priv->tiling_mode == I915_TILING_NONE &&
- dev->gtt_total != 0 &&
+ obj_priv->gtt_space &&
obj->write_domain != I915_GEM_DOMAIN_CPU) {
- ret = i915_gem_gtt_pwrite_fast(dev, obj, args, file_priv);
- if (ret == -EFAULT) {
- ret = i915_gem_gtt_pwrite_slow(dev, obj, args,
- file_priv);
- }
- } else if (i915_gem_object_needs_bit17_swizzle(obj)) {
- ret = i915_gem_shmem_pwrite_slow(dev, obj, args, file_priv);
+ ret = i915_gem_object_pin(obj, 0);
+ if (ret)
+ goto out;
+
+ ret = i915_gem_object_set_to_gtt_domain(obj, 1);
+ if (ret)
+ goto out_unpin;
+
+ ret = i915_gem_gtt_pwrite_fast(dev, obj, args, file);
+ if (ret == -EFAULT)
+ ret = i915_gem_gtt_pwrite_slow(dev, obj, args, file);
+
+out_unpin:
+ i915_gem_object_unpin(obj);
} else {
- ret = i915_gem_shmem_pwrite_fast(dev, obj, args, file_priv);
- if (ret == -EFAULT) {
- ret = i915_gem_shmem_pwrite_slow(dev, obj, args,
- file_priv);
- }
- }
+ ret = i915_gem_object_get_pages_or_evict(obj);
+ if (ret)
+ goto out;
-#if WATCH_PWRITE
- if (ret)
- DRM_INFO("pwrite failed %d\n", ret);
-#endif
+ ret = i915_gem_object_set_to_cpu_domain(obj, 1);
+ if (ret)
+ goto out_put;
-err:
- drm_gem_object_unreference_unlocked(obj);
+ ret = -EFAULT;
+ if (!i915_gem_object_needs_bit17_swizzle(obj))
+ ret = i915_gem_shmem_pwrite_fast(dev, obj, args, file);
+ if (ret == -EFAULT)
+ ret = i915_gem_shmem_pwrite_slow(dev, obj, args, file);
+
+out_put:
+ i915_gem_object_put_pages(obj);
+ }
+
+out:
+ drm_gem_object_unreference(obj);
+unlock:
+ mutex_unlock(&dev->struct_mutex);
return ret;
}
@@ -1014,19 +1091,19 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
if (write_domain != 0 && read_domains != write_domain)
return -EINVAL;
+ ret = i915_mutex_lock_interruptible(dev);
+ if (ret)
+ return ret;
+
obj = drm_gem_object_lookup(dev, file_priv, args->handle);
- if (obj == NULL)
- return -ENOENT;
+ if (obj == NULL) {
+ ret = -ENOENT;
+ goto unlock;
+ }
obj_priv = to_intel_bo(obj);
- mutex_lock(&dev->struct_mutex);
-
intel_mark_busy(dev, obj);
-#if WATCH_BUF
- DRM_INFO("set_domain_ioctl %p(%zd), %08x %08x\n",
- obj, obj->size, read_domains, write_domain);
-#endif
if (read_domains & I915_GEM_DOMAIN_GTT) {
ret = i915_gem_object_set_to_gtt_domain(obj, write_domain != 0);
@@ -1050,12 +1127,12 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
ret = i915_gem_object_set_to_cpu_domain(obj, write_domain != 0);
}
-
/* Maintain LRU order of "inactive" objects */
if (ret == 0 && i915_gem_object_is_inactive(obj_priv))
- list_move_tail(&obj_priv->list, &dev_priv->mm.inactive_list);
+ list_move_tail(&obj_priv->mm_list, &dev_priv->mm.inactive_list);
drm_gem_object_unreference(obj);
+unlock:
mutex_unlock(&dev->struct_mutex);
return ret;
}
@@ -1069,30 +1146,27 @@ i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
{
struct drm_i915_gem_sw_finish *args = data;
struct drm_gem_object *obj;
- struct drm_i915_gem_object *obj_priv;
int ret = 0;
if (!(dev->driver->driver_features & DRIVER_GEM))
return -ENODEV;
- mutex_lock(&dev->struct_mutex);
+ ret = i915_mutex_lock_interruptible(dev);
+ if (ret)
+ return ret;
+
obj = drm_gem_object_lookup(dev, file_priv, args->handle);
if (obj == NULL) {
- mutex_unlock(&dev->struct_mutex);
- return -ENOENT;
+ ret = -ENOENT;
+ goto unlock;
}
-#if WATCH_BUF
- DRM_INFO("%s: sw_finish %d (%p %zd)\n",
- __func__, args->handle, obj, obj->size);
-#endif
- obj_priv = to_intel_bo(obj);
-
/* Pinned buffers may be scanout, so flush the cache */
- if (obj_priv->pin_count)
+ if (to_intel_bo(obj)->pin_count)
i915_gem_object_flush_cpu_write_domain(obj);
drm_gem_object_unreference(obj);
+unlock:
mutex_unlock(&dev->struct_mutex);
return ret;
}
@@ -1181,13 +1255,13 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
/* Need a new fence register? */
if (obj_priv->tiling_mode != I915_TILING_NONE) {
- ret = i915_gem_object_get_fence_reg(obj);
+ ret = i915_gem_object_get_fence_reg(obj, true);
if (ret)
goto unlock;
}
if (i915_gem_object_is_inactive(obj_priv))
- list_move_tail(&obj_priv->list, &dev_priv->mm.inactive_list);
+ list_move_tail(&obj_priv->mm_list, &dev_priv->mm.inactive_list);
pfn = ((dev->agp->base + obj_priv->gtt_offset) >> PAGE_SHIFT) +
page_offset;
@@ -1246,7 +1320,7 @@ i915_gem_create_mmap_offset(struct drm_gem_object *obj)
obj->size / PAGE_SIZE, 0, 0);
if (!list->file_offset_node) {
DRM_ERROR("failed to allocate offset for bo %d\n", obj->name);
- ret = -ENOMEM;
+ ret = -ENOSPC;
goto out_free_list;
}
@@ -1258,9 +1332,9 @@ i915_gem_create_mmap_offset(struct drm_gem_object *obj)
}
list->hash.key = list->file_offset_node->start;
- if (drm_ht_insert_item(&mm->offset_hash, &list->hash)) {
+ ret = drm_ht_insert_item(&mm->offset_hash, &list->hash);
+ if (ret) {
DRM_ERROR("failed to add to map hash\n");
- ret = -ENOMEM;
goto out_free_mm;
}
@@ -1345,14 +1419,14 @@ i915_gem_get_gtt_alignment(struct drm_gem_object *obj)
* Minimum alignment is 4k (GTT page size), but might be greater
* if a fence register is needed for the object.
*/
- if (IS_I965G(dev) || obj_priv->tiling_mode == I915_TILING_NONE)
+ if (INTEL_INFO(dev)->gen >= 4 || obj_priv->tiling_mode == I915_TILING_NONE)
return 4096;
/*
* Previous chips need to be aligned to the size of the smallest
* fence register that can contain the object.
*/
- if (IS_I9XX(dev))
+ if (INTEL_INFO(dev)->gen == 3)
start = 1024*1024;
else
start = 512*1024;
@@ -1390,29 +1464,27 @@ i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
if (!(dev->driver->driver_features & DRIVER_GEM))
return -ENODEV;
- obj = drm_gem_object_lookup(dev, file_priv, args->handle);
- if (obj == NULL)
- return -ENOENT;
-
- mutex_lock(&dev->struct_mutex);
+ ret = i915_mutex_lock_interruptible(dev);
+ if (ret)
+ return ret;
+ obj = drm_gem_object_lookup(dev, file_priv, args->handle);
+ if (obj == NULL) {
+ ret = -ENOENT;
+ goto unlock;
+ }
obj_priv = to_intel_bo(obj);
if (obj_priv->madv != I915_MADV_WILLNEED) {
DRM_ERROR("Attempting to mmap a purgeable buffer\n");
- drm_gem_object_unreference(obj);
- mutex_unlock(&dev->struct_mutex);
- return -EINVAL;
+ ret = -EINVAL;
+ goto out;
}
-
if (!obj_priv->mmap_offset) {
ret = i915_gem_create_mmap_offset(obj);
- if (ret) {
- drm_gem_object_unreference(obj);
- mutex_unlock(&dev->struct_mutex);
- return ret;
- }
+ if (ret)
+ goto out;
}
args->offset = obj_priv->mmap_offset;
@@ -1423,20 +1495,18 @@ i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
*/
if (!obj_priv->agp_mem) {
ret = i915_gem_object_bind_to_gtt(obj, 0);
- if (ret) {
- drm_gem_object_unreference(obj);
- mutex_unlock(&dev->struct_mutex);
- return ret;
- }
+ if (ret)
+ goto out;
}
+out:
drm_gem_object_unreference(obj);
+unlock:
mutex_unlock(&dev->struct_mutex);
-
- return 0;
+ return ret;
}
-void
+static void
i915_gem_object_put_pages(struct drm_gem_object *obj)
{
struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
@@ -1470,13 +1540,25 @@ i915_gem_object_put_pages(struct drm_gem_object *obj)
obj_priv->pages = NULL;
}
+static uint32_t
+i915_gem_next_request_seqno(struct drm_device *dev,
+ struct intel_ring_buffer *ring)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+
+ ring->outstanding_lazy_request = true;
+ return dev_priv->next_seqno;
+}
+
static void
-i915_gem_object_move_to_active(struct drm_gem_object *obj, uint32_t seqno,
+i915_gem_object_move_to_active(struct drm_gem_object *obj,
struct intel_ring_buffer *ring)
{
struct drm_device *dev = obj->dev;
- drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
+ uint32_t seqno = i915_gem_next_request_seqno(dev, ring);
+
BUG_ON(ring == NULL);
obj_priv->ring = ring;
@@ -1485,10 +1567,10 @@ i915_gem_object_move_to_active(struct drm_gem_object *obj, uint32_t seqno,
drm_gem_object_reference(obj);
obj_priv->active = 1;
}
+
/* Move from whatever list we were on to the tail of execution. */
- spin_lock(&dev_priv->mm.active_list_lock);
- list_move_tail(&obj_priv->list, &ring->active_list);
- spin_unlock(&dev_priv->mm.active_list_lock);
+ list_move_tail(&obj_priv->mm_list, &dev_priv->mm.active_list);
+ list_move_tail(&obj_priv->ring_list, &ring->active_list);
obj_priv->last_rendering_seqno = seqno;
}
@@ -1500,7 +1582,8 @@ i915_gem_object_move_to_flushing(struct drm_gem_object *obj)
struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
BUG_ON(!obj_priv->active);
- list_move_tail(&obj_priv->list, &dev_priv->mm.flushing_list);
+ list_move_tail(&obj_priv->mm_list, &dev_priv->mm.flushing_list);
+ list_del_init(&obj_priv->ring_list);
obj_priv->last_rendering_seqno = 0;
}
@@ -1538,11 +1621,11 @@ i915_gem_object_move_to_inactive(struct drm_gem_object *obj)
drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
- i915_verify_inactive(dev, __FILE__, __LINE__);
if (obj_priv->pin_count != 0)
- list_del_init(&obj_priv->list);
+ list_move_tail(&obj_priv->mm_list, &dev_priv->mm.pinned_list);
else
- list_move_tail(&obj_priv->list, &dev_priv->mm.inactive_list);
+ list_move_tail(&obj_priv->mm_list, &dev_priv->mm.inactive_list);
+ list_del_init(&obj_priv->ring_list);
BUG_ON(!list_empty(&obj_priv->gpu_write_list));
@@ -1552,30 +1635,28 @@ i915_gem_object_move_to_inactive(struct drm_gem_object *obj)
obj_priv->active = 0;
drm_gem_object_unreference(obj);
}
- i915_verify_inactive(dev, __FILE__, __LINE__);
+ WARN_ON(i915_verify_lists(dev));
}
static void
i915_gem_process_flushing_list(struct drm_device *dev,
- uint32_t flush_domains, uint32_t seqno,
+ uint32_t flush_domains,
struct intel_ring_buffer *ring)
{
drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_i915_gem_object *obj_priv, *next;
list_for_each_entry_safe(obj_priv, next,
- &dev_priv->mm.gpu_write_list,
+ &ring->gpu_write_list,
gpu_write_list) {
struct drm_gem_object *obj = &obj_priv->base;
- if ((obj->write_domain & flush_domains) ==
- obj->write_domain &&
- obj_priv->ring->ring_flag == ring->ring_flag) {
+ if (obj->write_domain & flush_domains) {
uint32_t old_write_domain = obj->write_domain;
obj->write_domain = 0;
list_del_init(&obj_priv->gpu_write_list);
- i915_gem_object_move_to_active(obj, seqno, ring);
+ i915_gem_object_move_to_active(obj, ring);
/* update the fence lru list */
if (obj_priv->fence_reg != I915_FENCE_REG_NONE) {
@@ -1593,23 +1674,27 @@ i915_gem_process_flushing_list(struct drm_device *dev,
}
uint32_t
-i915_add_request(struct drm_device *dev, struct drm_file *file_priv,
- uint32_t flush_domains, struct intel_ring_buffer *ring)
+i915_add_request(struct drm_device *dev,
+ struct drm_file *file,
+ struct drm_i915_gem_request *request,
+ struct intel_ring_buffer *ring)
{
drm_i915_private_t *dev_priv = dev->dev_private;
- struct drm_i915_file_private *i915_file_priv = NULL;
- struct drm_i915_gem_request *request;
+ struct drm_i915_file_private *file_priv = NULL;
uint32_t seqno;
int was_empty;
- if (file_priv != NULL)
- i915_file_priv = file_priv->driver_priv;
+ if (file != NULL)
+ file_priv = file->driver_priv;
- request = kzalloc(sizeof(*request), GFP_KERNEL);
- if (request == NULL)
- return 0;
+ if (request == NULL) {
+ request = kzalloc(sizeof(*request), GFP_KERNEL);
+ if (request == NULL)
+ return 0;
+ }
- seqno = ring->add_request(dev, ring, file_priv, flush_domains);
+ seqno = ring->add_request(dev, ring, 0);
+ ring->outstanding_lazy_request = false;
request->seqno = seqno;
request->ring = ring;
@@ -1617,23 +1702,20 @@ i915_add_request(struct drm_device *dev, struct drm_file *file_priv,
was_empty = list_empty(&ring->request_list);
list_add_tail(&request->list, &ring->request_list);
- if (i915_file_priv) {
+ if (file_priv) {
+ spin_lock(&file_priv->mm.lock);
+ request->file_priv = file_priv;
list_add_tail(&request->client_list,
- &i915_file_priv->mm.request_list);
- } else {
- INIT_LIST_HEAD(&request->client_list);
+ &file_priv->mm.request_list);
+ spin_unlock(&file_priv->mm.lock);
}
- /* Associate any objects on the flushing list matching the write
- * domain we're flushing with our flush.
- */
- if (flush_domains != 0)
- i915_gem_process_flushing_list(dev, flush_domains, seqno, ring);
-
if (!dev_priv->mm.suspended) {
- mod_timer(&dev_priv->hangcheck_timer, jiffies + DRM_I915_HANGCHECK_PERIOD);
+ mod_timer(&dev_priv->hangcheck_timer,
+ jiffies + msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD));
if (was_empty)
- queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, HZ);
+ queue_delayed_work(dev_priv->wq,
+ &dev_priv->mm.retire_work, HZ);
}
return seqno;
}
@@ -1644,91 +1726,105 @@ i915_add_request(struct drm_device *dev, struct drm_file *file_priv,
* Ensures that all commands in the ring are finished
* before signalling the CPU
*/
-static uint32_t
+static void
i915_retire_commands(struct drm_device *dev, struct intel_ring_buffer *ring)
{
uint32_t flush_domains = 0;
/* The sampler always gets flushed on i965 (sigh) */
- if (IS_I965G(dev))
+ if (INTEL_INFO(dev)->gen >= 4)
flush_domains |= I915_GEM_DOMAIN_SAMPLER;
ring->flush(dev, ring,
I915_GEM_DOMAIN_COMMAND, flush_domains);
- return flush_domains;
}
-/**
- * Moves buffers associated only with the given active seqno from the active
- * to inactive list, potentially freeing them.
- */
-static void
-i915_gem_retire_request(struct drm_device *dev,
- struct drm_i915_gem_request *request)
+static inline void
+i915_gem_request_remove_from_client(struct drm_i915_gem_request *request)
{
- drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_i915_file_private *file_priv = request->file_priv;
- trace_i915_gem_request_retire(dev, request->seqno);
+ if (!file_priv)
+ return;
- /* Move any buffers on the active list that are no longer referenced
- * by the ringbuffer to the flushing/inactive lists as appropriate.
- */
- spin_lock(&dev_priv->mm.active_list_lock);
- while (!list_empty(&request->ring->active_list)) {
- struct drm_gem_object *obj;
- struct drm_i915_gem_object *obj_priv;
+ spin_lock(&file_priv->mm.lock);
+ list_del(&request->client_list);
+ request->file_priv = NULL;
+ spin_unlock(&file_priv->mm.lock);
+}
- obj_priv = list_first_entry(&request->ring->active_list,
- struct drm_i915_gem_object,
- list);
- obj = &obj_priv->base;
+static void i915_gem_reset_ring_lists(struct drm_i915_private *dev_priv,
+ struct intel_ring_buffer *ring)
+{
+ while (!list_empty(&ring->request_list)) {
+ struct drm_i915_gem_request *request;
- /* If the seqno being retired doesn't match the oldest in the
- * list, then the oldest in the list must still be newer than
- * this seqno.
- */
- if (obj_priv->last_rendering_seqno != request->seqno)
- goto out;
+ request = list_first_entry(&ring->request_list,
+ struct drm_i915_gem_request,
+ list);
-#if WATCH_LRU
- DRM_INFO("%s: retire %d moves to inactive list %p\n",
- __func__, request->seqno, obj);
-#endif
+ list_del(&request->list);
+ i915_gem_request_remove_from_client(request);
+ kfree(request);
+ }
- if (obj->write_domain != 0)
- i915_gem_object_move_to_flushing(obj);
- else {
- /* Take a reference on the object so it won't be
- * freed while the spinlock is held. The list
- * protection for this spinlock is safe when breaking
- * the lock like this since the next thing we do
- * is just get the head of the list again.
- */
- drm_gem_object_reference(obj);
- i915_gem_object_move_to_inactive(obj);
- spin_unlock(&dev_priv->mm.active_list_lock);
- drm_gem_object_unreference(obj);
- spin_lock(&dev_priv->mm.active_list_lock);
- }
+ while (!list_empty(&ring->active_list)) {
+ struct drm_i915_gem_object *obj_priv;
+
+ obj_priv = list_first_entry(&ring->active_list,
+ struct drm_i915_gem_object,
+ ring_list);
+
+ obj_priv->base.write_domain = 0;
+ list_del_init(&obj_priv->gpu_write_list);
+ i915_gem_object_move_to_inactive(&obj_priv->base);
}
-out:
- spin_unlock(&dev_priv->mm.active_list_lock);
}
-/**
- * Returns true if seq1 is later than seq2.
- */
-bool
-i915_seqno_passed(uint32_t seq1, uint32_t seq2)
+void i915_gem_reset(struct drm_device *dev)
{
- return (int32_t)(seq1 - seq2) >= 0;
-}
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_gem_object *obj_priv;
+ int i;
-uint32_t
-i915_get_gem_seqno(struct drm_device *dev,
- struct intel_ring_buffer *ring)
-{
- return ring->get_gem_seqno(dev, ring);
+ i915_gem_reset_ring_lists(dev_priv, &dev_priv->render_ring);
+ i915_gem_reset_ring_lists(dev_priv, &dev_priv->bsd_ring);
+ i915_gem_reset_ring_lists(dev_priv, &dev_priv->blt_ring);
+
+ /* Remove anything from the flushing lists. The GPU cache is likely
+ * to be lost on reset along with the data, so simply move the
+ * lost bo to the inactive list.
+ */
+ while (!list_empty(&dev_priv->mm.flushing_list)) {
+ obj_priv = list_first_entry(&dev_priv->mm.flushing_list,
+ struct drm_i915_gem_object,
+ mm_list);
+
+ obj_priv->base.write_domain = 0;
+ list_del_init(&obj_priv->gpu_write_list);
+ i915_gem_object_move_to_inactive(&obj_priv->base);
+ }
+
+ /* Move everything out of the GPU domains to ensure we do any
+ * necessary invalidation upon reuse.
+ */
+ list_for_each_entry(obj_priv,
+ &dev_priv->mm.inactive_list,
+ mm_list)
+ {
+ obj_priv->base.read_domains &= ~I915_GEM_GPU_DOMAINS;
+ }
+
+ /* The fence registers are invalidated so clear them out */
+ for (i = 0; i < 16; i++) {
+ struct drm_i915_fence_reg *reg;
+
+ reg = &dev_priv->fence_regs[i];
+ if (!reg->obj)
+ continue;
+
+ i915_gem_clear_fence_reg(reg->obj);
+ }
}
/**
@@ -1741,38 +1837,58 @@ i915_gem_retire_requests_ring(struct drm_device *dev,
drm_i915_private_t *dev_priv = dev->dev_private;
uint32_t seqno;
- if (!ring->status_page.page_addr
- || list_empty(&ring->request_list))
+ if (!ring->status_page.page_addr ||
+ list_empty(&ring->request_list))
return;
- seqno = i915_get_gem_seqno(dev, ring);
+ WARN_ON(i915_verify_lists(dev));
+ seqno = ring->get_seqno(dev, ring);
while (!list_empty(&ring->request_list)) {
struct drm_i915_gem_request *request;
- uint32_t retiring_seqno;
request = list_first_entry(&ring->request_list,
struct drm_i915_gem_request,
list);
- retiring_seqno = request->seqno;
- if (i915_seqno_passed(seqno, retiring_seqno) ||
- atomic_read(&dev_priv->mm.wedged)) {
- i915_gem_retire_request(dev, request);
+ if (!i915_seqno_passed(seqno, request->seqno))
+ break;
+
+ trace_i915_gem_request_retire(dev, request->seqno);
+
+ list_del(&request->list);
+ i915_gem_request_remove_from_client(request);
+ kfree(request);
+ }
- list_del(&request->list);
- list_del(&request->client_list);
- kfree(request);
- } else
+ /* Move any buffers on the active list that are no longer referenced
+ * by the ringbuffer to the flushing/inactive lists as appropriate.
+ */
+ while (!list_empty(&ring->active_list)) {
+ struct drm_gem_object *obj;
+ struct drm_i915_gem_object *obj_priv;
+
+ obj_priv = list_first_entry(&ring->active_list,
+ struct drm_i915_gem_object,
+ ring_list);
+
+ if (!i915_seqno_passed(seqno, obj_priv->last_rendering_seqno))
break;
+
+ obj = &obj_priv->base;
+ if (obj->write_domain != 0)
+ i915_gem_object_move_to_flushing(obj);
+ else
+ i915_gem_object_move_to_inactive(obj);
}
if (unlikely (dev_priv->trace_irq_seqno &&
i915_seqno_passed(dev_priv->trace_irq_seqno, seqno))) {
-
ring->user_irq_put(dev, ring);
dev_priv->trace_irq_seqno = 0;
}
+
+ WARN_ON(i915_verify_lists(dev));
}
void
@@ -1790,16 +1906,16 @@ i915_gem_retire_requests(struct drm_device *dev)
*/
list_for_each_entry_safe(obj_priv, tmp,
&dev_priv->mm.deferred_free_list,
- list)
+ mm_list)
i915_gem_free_object_tail(&obj_priv->base);
}
i915_gem_retire_requests_ring(dev, &dev_priv->render_ring);
- if (HAS_BSD(dev))
- i915_gem_retire_requests_ring(dev, &dev_priv->bsd_ring);
+ i915_gem_retire_requests_ring(dev, &dev_priv->bsd_ring);
+ i915_gem_retire_requests_ring(dev, &dev_priv->blt_ring);
}
-void
+static void
i915_gem_retire_work_handler(struct work_struct *work)
{
drm_i915_private_t *dev_priv;
@@ -1809,20 +1925,25 @@ i915_gem_retire_work_handler(struct work_struct *work)
mm.retire_work.work);
dev = dev_priv->dev;
- mutex_lock(&dev->struct_mutex);
+ /* Come back later if the device is busy... */
+ if (!mutex_trylock(&dev->struct_mutex)) {
+ queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, HZ);
+ return;
+ }
+
i915_gem_retire_requests(dev);
if (!dev_priv->mm.suspended &&
(!list_empty(&dev_priv->render_ring.request_list) ||
- (HAS_BSD(dev) &&
- !list_empty(&dev_priv->bsd_ring.request_list))))
+ !list_empty(&dev_priv->bsd_ring.request_list) ||
+ !list_empty(&dev_priv->blt_ring.request_list)))
queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, HZ);
mutex_unlock(&dev->struct_mutex);
}
int
i915_do_wait_request(struct drm_device *dev, uint32_t seqno,
- int interruptible, struct intel_ring_buffer *ring)
+ bool interruptible, struct intel_ring_buffer *ring)
{
drm_i915_private_t *dev_priv = dev->dev_private;
u32 ier;
@@ -1831,9 +1952,16 @@ i915_do_wait_request(struct drm_device *dev, uint32_t seqno,
BUG_ON(seqno == 0);
if (atomic_read(&dev_priv->mm.wedged))
- return -EIO;
+ return -EAGAIN;
- if (!i915_seqno_passed(ring->get_gem_seqno(dev, ring), seqno)) {
+ if (ring->outstanding_lazy_request) {
+ seqno = i915_add_request(dev, NULL, NULL, ring);
+ if (seqno == 0)
+ return -ENOMEM;
+ }
+ BUG_ON(seqno == dev_priv->next_seqno);
+
+ if (!i915_seqno_passed(ring->get_seqno(dev, ring), seqno)) {
if (HAS_PCH_SPLIT(dev))
ier = I915_READ(DEIER) | I915_READ(GTIER);
else
@@ -1852,12 +1980,12 @@ i915_do_wait_request(struct drm_device *dev, uint32_t seqno,
if (interruptible)
ret = wait_event_interruptible(ring->irq_queue,
i915_seqno_passed(
- ring->get_gem_seqno(dev, ring), seqno)
+ ring->get_seqno(dev, ring), seqno)
|| atomic_read(&dev_priv->mm.wedged));
else
wait_event(ring->irq_queue,
i915_seqno_passed(
- ring->get_gem_seqno(dev, ring), seqno)
+ ring->get_seqno(dev, ring), seqno)
|| atomic_read(&dev_priv->mm.wedged));
ring->user_irq_put(dev, ring);
@@ -1866,11 +1994,12 @@ i915_do_wait_request(struct drm_device *dev, uint32_t seqno,
trace_i915_gem_request_wait_end(dev, seqno);
}
if (atomic_read(&dev_priv->mm.wedged))
- ret = -EIO;
+ ret = -EAGAIN;
if (ret && ret != -ERESTARTSYS)
- DRM_ERROR("%s returns %d (awaiting %d at %d)\n",
- __func__, ret, seqno, ring->get_gem_seqno(dev, ring));
+ DRM_ERROR("%s returns %d (awaiting %d at %d, next %d)\n",
+ __func__, ret, seqno, ring->get_seqno(dev, ring),
+ dev_priv->next_seqno);
/* Directly dispatch request retiring. While we have the work queue
* to handle this, the waiter on a request often wants an associated
@@ -1889,27 +2018,48 @@ i915_do_wait_request(struct drm_device *dev, uint32_t seqno,
*/
static int
i915_wait_request(struct drm_device *dev, uint32_t seqno,
- struct intel_ring_buffer *ring)
+ struct intel_ring_buffer *ring)
{
return i915_do_wait_request(dev, seqno, 1, ring);
}
static void
+i915_gem_flush_ring(struct drm_device *dev,
+ struct drm_file *file_priv,
+ struct intel_ring_buffer *ring,
+ uint32_t invalidate_domains,
+ uint32_t flush_domains)
+{
+ ring->flush(dev, ring, invalidate_domains, flush_domains);
+ i915_gem_process_flushing_list(dev, flush_domains, ring);
+}
+
+static void
i915_gem_flush(struct drm_device *dev,
+ struct drm_file *file_priv,
uint32_t invalidate_domains,
- uint32_t flush_domains)
+ uint32_t flush_domains,
+ uint32_t flush_rings)
{
drm_i915_private_t *dev_priv = dev->dev_private;
+
if (flush_domains & I915_GEM_DOMAIN_CPU)
drm_agp_chipset_flush(dev);
- dev_priv->render_ring.flush(dev, &dev_priv->render_ring,
- invalidate_domains,
- flush_domains);
- if (HAS_BSD(dev))
- dev_priv->bsd_ring.flush(dev, &dev_priv->bsd_ring,
- invalidate_domains,
- flush_domains);
+ if ((flush_domains | invalidate_domains) & I915_GEM_GPU_DOMAINS) {
+ if (flush_rings & RING_RENDER)
+ i915_gem_flush_ring(dev, file_priv,
+ &dev_priv->render_ring,
+ invalidate_domains, flush_domains);
+ if (flush_rings & RING_BSD)
+ i915_gem_flush_ring(dev, file_priv,
+ &dev_priv->bsd_ring,
+ invalidate_domains, flush_domains);
+ if (flush_rings & RING_BLT)
+ i915_gem_flush_ring(dev, file_priv,
+ &dev_priv->blt_ring,
+ invalidate_domains, flush_domains);
+ }
}
/**
@@ -1917,7 +2067,8 @@ i915_gem_flush(struct drm_device *dev,
* safe to unbind from the GTT or access from the CPU.
*/
static int
-i915_gem_object_wait_rendering(struct drm_gem_object *obj)
+i915_gem_object_wait_rendering(struct drm_gem_object *obj,
+ bool interruptible)
{
struct drm_device *dev = obj->dev;
struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
@@ -1932,13 +2083,11 @@ i915_gem_object_wait_rendering(struct drm_gem_object *obj)
* it.
*/
if (obj_priv->active) {
-#if WATCH_BUF
- DRM_INFO("%s: object %p wait for seqno %08x\n",
- __func__, obj, obj_priv->last_rendering_seqno);
-#endif
- ret = i915_wait_request(dev,
- obj_priv->last_rendering_seqno, obj_priv->ring);
- if (ret != 0)
+ ret = i915_do_wait_request(dev,
+ obj_priv->last_rendering_seqno,
+ interruptible,
+ obj_priv->ring);
+ if (ret)
return ret;
}
@@ -1952,14 +2101,10 @@ int
i915_gem_object_unbind(struct drm_gem_object *obj)
{
struct drm_device *dev = obj->dev;
- drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
int ret = 0;
-#if WATCH_BUF
- DRM_INFO("%s:%d %p\n", __func__, __LINE__, obj);
- DRM_INFO("gtt_space %p\n", obj_priv->gtt_space);
-#endif
if (obj_priv->gtt_space == NULL)
return 0;
@@ -1984,33 +2129,27 @@ i915_gem_object_unbind(struct drm_gem_object *obj)
* should be safe and we need to cleanup or else we might
* cause memory corruption through use-after-free.
*/
+ if (ret) {
+ i915_gem_clflush_object(obj);
+ obj->read_domains = obj->write_domain = I915_GEM_DOMAIN_CPU;
+ }
/* release the fence reg _after_ flushing */
if (obj_priv->fence_reg != I915_FENCE_REG_NONE)
i915_gem_clear_fence_reg(obj);
- if (obj_priv->agp_mem != NULL) {
- drm_unbind_agp(obj_priv->agp_mem);
- drm_free_agp(obj_priv->agp_mem, obj->size / PAGE_SIZE);
- obj_priv->agp_mem = NULL;
- }
+ drm_unbind_agp(obj_priv->agp_mem);
+ drm_free_agp(obj_priv->agp_mem, obj->size / PAGE_SIZE);
i915_gem_object_put_pages(obj);
BUG_ON(obj_priv->pages_refcount);
- if (obj_priv->gtt_space) {
- atomic_dec(&dev->gtt_count);
- atomic_sub(obj->size, &dev->gtt_memory);
-
- drm_mm_put_block(obj_priv->gtt_space);
- obj_priv->gtt_space = NULL;
- }
+ i915_gem_info_remove_gtt(dev_priv, obj->size);
+ list_del_init(&obj_priv->mm_list);
- /* Remove ourselves from the LRU list if present. */
- spin_lock(&dev_priv->mm.active_list_lock);
- if (!list_empty(&obj_priv->list))
- list_del_init(&obj_priv->list);
- spin_unlock(&dev_priv->mm.active_list_lock);
+ drm_mm_put_block(obj_priv->gtt_space);
+ obj_priv->gtt_space = NULL;
+ obj_priv->gtt_offset = 0;
if (i915_gem_object_is_purgeable(obj_priv))
i915_gem_object_truncate(obj);
@@ -2020,48 +2159,48 @@ i915_gem_object_unbind(struct drm_gem_object *obj)
return ret;
}
+static int i915_ring_idle(struct drm_device *dev,
+ struct intel_ring_buffer *ring)
+{
+ if (list_empty(&ring->gpu_write_list) && list_empty(&ring->active_list))
+ return 0;
+
+ i915_gem_flush_ring(dev, NULL, ring,
+ I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
+ return i915_wait_request(dev,
+ i915_gem_next_request_seqno(dev, ring),
+ ring);
+}
+
int
i915_gpu_idle(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
bool lists_empty;
- uint32_t seqno1, seqno2;
int ret;
- spin_lock(&dev_priv->mm.active_list_lock);
lists_empty = (list_empty(&dev_priv->mm.flushing_list) &&
- list_empty(&dev_priv->render_ring.active_list) &&
- (!HAS_BSD(dev) ||
- list_empty(&dev_priv->bsd_ring.active_list)));
- spin_unlock(&dev_priv->mm.active_list_lock);
-
+ list_empty(&dev_priv->mm.active_list));
if (lists_empty)
return 0;
/* Flush everything onto the inactive list. */
- i915_gem_flush(dev, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
- seqno1 = i915_add_request(dev, NULL, I915_GEM_GPU_DOMAINS,
- &dev_priv->render_ring);
- if (seqno1 == 0)
- return -ENOMEM;
- ret = i915_wait_request(dev, seqno1, &dev_priv->render_ring);
-
- if (HAS_BSD(dev)) {
- seqno2 = i915_add_request(dev, NULL, I915_GEM_GPU_DOMAINS,
- &dev_priv->bsd_ring);
- if (seqno2 == 0)
- return -ENOMEM;
+ ret = i915_ring_idle(dev, &dev_priv->render_ring);
+ if (ret)
+ return ret;
- ret = i915_wait_request(dev, seqno2, &dev_priv->bsd_ring);
- if (ret)
- return ret;
- }
+ ret = i915_ring_idle(dev, &dev_priv->bsd_ring);
+ if (ret)
+ return ret;
+ ret = i915_ring_idle(dev, &dev_priv->blt_ring);
+ if (ret)
+ return ret;
- return ret;
+ return 0;
}
-int
+static int
i915_gem_object_get_pages(struct drm_gem_object *obj,
gfp_t gfpmask)
{
@@ -2241,7 +2380,8 @@ static void i830_write_fence_reg(struct drm_i915_fence_reg *reg)
I915_WRITE(FENCE_REG_830_0 + (regnum * 4), val);
}
-static int i915_find_fence_reg(struct drm_device *dev)
+static int i915_find_fence_reg(struct drm_device *dev,
+ bool interruptible)
{
struct drm_i915_fence_reg *reg = NULL;
struct drm_i915_gem_object *obj_priv = NULL;
@@ -2286,7 +2426,7 @@ static int i915_find_fence_reg(struct drm_device *dev)
* private reference to obj like the other callers of put_fence_reg
* (set_tiling ioctl) do. */
drm_gem_object_reference(obj);
- ret = i915_gem_object_put_fence_reg(obj);
+ ret = i915_gem_object_put_fence_reg(obj, interruptible);
drm_gem_object_unreference(obj);
if (ret != 0)
return ret;
@@ -2308,7 +2448,8 @@ static int i915_find_fence_reg(struct drm_device *dev)
* and tiling format.
*/
int
-i915_gem_object_get_fence_reg(struct drm_gem_object *obj)
+i915_gem_object_get_fence_reg(struct drm_gem_object *obj,
+ bool interruptible)
{
struct drm_device *dev = obj->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -2343,7 +2484,7 @@ i915_gem_object_get_fence_reg(struct drm_gem_object *obj)
break;
}
- ret = i915_find_fence_reg(dev);
+ ret = i915_find_fence_reg(dev, interruptible);
if (ret < 0)
return ret;
@@ -2421,15 +2562,19 @@ i915_gem_clear_fence_reg(struct drm_gem_object *obj)
* i915_gem_object_put_fence_reg - waits on outstanding fenced access
* to the buffer to finish, and then resets the fence register.
* @obj: tiled object holding a fence register.
+ * @bool: whether the wait upon the fence is interruptible
*
* Zeroes out the fence register itself and clears out the associated
* data structures in dev_priv and obj_priv.
*/
int
-i915_gem_object_put_fence_reg(struct drm_gem_object *obj)
+i915_gem_object_put_fence_reg(struct drm_gem_object *obj,
+ bool interruptible)
{
struct drm_device *dev = obj->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
+ struct drm_i915_fence_reg *reg;
if (obj_priv->fence_reg == I915_FENCE_REG_NONE)
return 0;
@@ -2444,20 +2589,23 @@ i915_gem_object_put_fence_reg(struct drm_gem_object *obj)
* therefore we must wait for any outstanding access to complete
* before clearing the fence.
*/
- if (!IS_I965G(dev)) {
+ reg = &dev_priv->fence_regs[obj_priv->fence_reg];
+ if (reg->gpu) {
int ret;
ret = i915_gem_object_flush_gpu_write_domain(obj);
- if (ret != 0)
+ if (ret)
return ret;
- ret = i915_gem_object_wait_rendering(obj);
- if (ret != 0)
+ ret = i915_gem_object_wait_rendering(obj, interruptible);
+ if (ret)
return ret;
+
+ reg->gpu = false;
}
i915_gem_object_flush_gtt_write_domain(obj);
- i915_gem_clear_fence_reg (obj);
+ i915_gem_clear_fence_reg(obj);
return 0;
}
@@ -2490,7 +2638,7 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment)
/* If the object is bigger than the entire aperture, reject it early
* before evicting everything in a vain attempt to find space.
*/
- if (obj->size > dev->gtt_total) {
+ if (obj->size > dev_priv->mm.gtt_total) {
DRM_ERROR("Attempting to bind an object larger than the aperture\n");
return -E2BIG;
}
@@ -2498,19 +2646,13 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment)
search_free:
free_space = drm_mm_search_free(&dev_priv->mm.gtt_space,
obj->size, alignment, 0);
- if (free_space != NULL) {
+ if (free_space != NULL)
obj_priv->gtt_space = drm_mm_get_block(free_space, obj->size,
alignment);
- if (obj_priv->gtt_space != NULL)
- obj_priv->gtt_offset = obj_priv->gtt_space->start;
- }
if (obj_priv->gtt_space == NULL) {
/* If the gtt is empty and we're still having trouble
* fitting our object in, we're out of memory.
*/
-#if WATCH_LRU
- DRM_INFO("%s: GTT full, evicting something\n", __func__);
-#endif
ret = i915_gem_evict_something(dev, obj->size, alignment);
if (ret)
return ret;
@@ -2518,10 +2660,6 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment)
goto search_free;
}
-#if WATCH_BUF
- DRM_INFO("Binding object of size %zd at 0x%08x\n",
- obj->size, obj_priv->gtt_offset);
-#endif
ret = i915_gem_object_get_pages(obj, gfpmask);
if (ret) {
drm_mm_put_block(obj_priv->gtt_space);
@@ -2553,7 +2691,7 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment)
obj_priv->agp_mem = drm_agp_bind_pages(dev,
obj_priv->pages,
obj->size >> PAGE_SHIFT,
- obj_priv->gtt_offset,
+ obj_priv->gtt_space->start,
obj_priv->agp_type);
if (obj_priv->agp_mem == NULL) {
i915_gem_object_put_pages(obj);
@@ -2566,11 +2704,10 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment)
goto search_free;
}
- atomic_inc(&dev->gtt_count);
- atomic_add(obj->size, &dev->gtt_memory);
/* keep track of bounds object by adding it to the inactive list */
- list_add_tail(&obj_priv->list, &dev_priv->mm.inactive_list);
+ list_add_tail(&obj_priv->mm_list, &dev_priv->mm.inactive_list);
+ i915_gem_info_add_gtt(dev_priv, obj->size);
/* Assert that the object is not currently in any GPU domain. As it
* wasn't in the GTT, there shouldn't be any way it could have been in
@@ -2579,6 +2716,7 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment)
BUG_ON(obj->read_domains & I915_GEM_GPU_DOMAINS);
BUG_ON(obj->write_domain & I915_GEM_GPU_DOMAINS);
+ obj_priv->gtt_offset = obj_priv->gtt_space->start;
trace_i915_gem_object_bind(obj, obj_priv->gtt_offset);
return 0;
@@ -2607,20 +2745,21 @@ i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj)
{
struct drm_device *dev = obj->dev;
uint32_t old_write_domain;
- struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
if ((obj->write_domain & I915_GEM_GPU_DOMAINS) == 0)
return 0;
/* Queue the GPU write cache flushing we need. */
old_write_domain = obj->write_domain;
- i915_gem_flush(dev, 0, obj->write_domain);
- if (i915_add_request(dev, NULL, obj->write_domain, obj_priv->ring) == 0)
- return -ENOMEM;
+ i915_gem_flush_ring(dev, NULL,
+ to_intel_bo(obj)->ring,
+ 0, obj->write_domain);
+ BUG_ON(obj->write_domain);
trace_i915_gem_object_change_domain(obj,
obj->read_domains,
old_write_domain);
+
return 0;
}
@@ -2665,26 +2804,6 @@ i915_gem_object_flush_cpu_write_domain(struct drm_gem_object *obj)
old_write_domain);
}
-int
-i915_gem_object_flush_write_domain(struct drm_gem_object *obj)
-{
- int ret = 0;
-
- switch (obj->write_domain) {
- case I915_GEM_DOMAIN_GTT:
- i915_gem_object_flush_gtt_write_domain(obj);
- break;
- case I915_GEM_DOMAIN_CPU:
- i915_gem_object_flush_cpu_write_domain(obj);
- break;
- default:
- ret = i915_gem_object_flush_gpu_write_domain(obj);
- break;
- }
-
- return ret;
-}
-
/**
* Moves a single object to the GTT read, and possibly write domain.
*
@@ -2705,29 +2824,22 @@ i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj, int write)
ret = i915_gem_object_flush_gpu_write_domain(obj);
if (ret != 0)
return ret;
-
- /* Wait on any GPU rendering and flushing to occur. */
- ret = i915_gem_object_wait_rendering(obj);
- if (ret != 0)
+ ret = i915_gem_object_wait_rendering(obj, true);
+ if (ret)
return ret;
+ i915_gem_object_flush_cpu_write_domain(obj);
+
old_write_domain = obj->write_domain;
old_read_domains = obj->read_domains;
- /* If we're writing through the GTT domain, then CPU and GPU caches
- * will need to be invalidated at next use.
- */
- if (write)
- obj->read_domains &= I915_GEM_DOMAIN_GTT;
-
- i915_gem_object_flush_cpu_write_domain(obj);
-
/* It should now be out of any other write domains, and we can update
* the domain values for our changes.
*/
BUG_ON((obj->write_domain & ~I915_GEM_DOMAIN_GTT) != 0);
obj->read_domains |= I915_GEM_DOMAIN_GTT;
if (write) {
+ obj->read_domains = I915_GEM_DOMAIN_GTT;
obj->write_domain = I915_GEM_DOMAIN_GTT;
obj_priv->dirty = 1;
}
@@ -2744,11 +2856,11 @@ i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj, int write)
* wait, as in modesetting process we're not supposed to be interrupted.
*/
int
-i915_gem_object_set_to_display_plane(struct drm_gem_object *obj)
+i915_gem_object_set_to_display_plane(struct drm_gem_object *obj,
+ bool pipelined)
{
- struct drm_device *dev = obj->dev;
struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
- uint32_t old_write_domain, old_read_domains;
+ uint32_t old_read_domains;
int ret;
/* Not valid to be called on unbound objects. */
@@ -2759,40 +2871,39 @@ i915_gem_object_set_to_display_plane(struct drm_gem_object *obj)
if (ret)
return ret;
- /* Wait on any GPU rendering and flushing to occur. */
- if (obj_priv->active) {
-#if WATCH_BUF
- DRM_INFO("%s: object %p wait for seqno %08x\n",
- __func__, obj, obj_priv->last_rendering_seqno);
-#endif
- ret = i915_do_wait_request(dev,
- obj_priv->last_rendering_seqno,
- 0,
- obj_priv->ring);
- if (ret != 0)
+ /* Currently, we are always called from an non-interruptible context. */
+ if (!pipelined) {
+ ret = i915_gem_object_wait_rendering(obj, false);
+ if (ret)
return ret;
}
i915_gem_object_flush_cpu_write_domain(obj);
- old_write_domain = obj->write_domain;
old_read_domains = obj->read_domains;
-
- /* It should now be out of any other write domains, and we can update
- * the domain values for our changes.
- */
- BUG_ON((obj->write_domain & ~I915_GEM_DOMAIN_GTT) != 0);
- obj->read_domains = I915_GEM_DOMAIN_GTT;
- obj->write_domain = I915_GEM_DOMAIN_GTT;
- obj_priv->dirty = 1;
+ obj->read_domains |= I915_GEM_DOMAIN_GTT;
trace_i915_gem_object_change_domain(obj,
old_read_domains,
- old_write_domain);
+ obj->write_domain);
return 0;
}
+int
+i915_gem_object_flush_gpu(struct drm_i915_gem_object *obj,
+ bool interruptible)
+{
+ if (!obj->active)
+ return 0;
+
+ if (obj->base.write_domain & I915_GEM_GPU_DOMAINS)
+ i915_gem_flush_ring(obj->base.dev, NULL, obj->ring,
+ 0, obj->base.write_domain);
+
+ return i915_gem_object_wait_rendering(&obj->base, interruptible);
+}
+
/**
* Moves a single object to the CPU read, and possibly write domain.
*
@@ -2806,13 +2917,11 @@ i915_gem_object_set_to_cpu_domain(struct drm_gem_object *obj, int write)
int ret;
ret = i915_gem_object_flush_gpu_write_domain(obj);
- if (ret)
- return ret;
-
- /* Wait on any GPU rendering and flushing to occur. */
- ret = i915_gem_object_wait_rendering(obj);
if (ret != 0)
return ret;
+ ret = i915_gem_object_wait_rendering(obj, true);
+ if (ret)
+ return ret;
i915_gem_object_flush_gtt_write_domain(obj);
@@ -2840,7 +2949,7 @@ i915_gem_object_set_to_cpu_domain(struct drm_gem_object *obj, int write)
* need to be invalidated at next use.
*/
if (write) {
- obj->read_domains &= I915_GEM_DOMAIN_CPU;
+ obj->read_domains = I915_GEM_DOMAIN_CPU;
obj->write_domain = I915_GEM_DOMAIN_CPU;
}
@@ -2963,26 +3072,18 @@ i915_gem_object_set_to_cpu_domain(struct drm_gem_object *obj, int write)
* drm_agp_chipset_flush
*/
static void
-i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj)
+i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj,
+ struct intel_ring_buffer *ring)
{
struct drm_device *dev = obj->dev;
- drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
uint32_t invalidate_domains = 0;
uint32_t flush_domains = 0;
uint32_t old_read_domains;
- BUG_ON(obj->pending_read_domains & I915_GEM_DOMAIN_CPU);
- BUG_ON(obj->pending_write_domain == I915_GEM_DOMAIN_CPU);
-
intel_mark_busy(dev, obj);
-#if WATCH_BUF
- DRM_INFO("%s: object %p read %08x -> %08x write %08x -> %08x\n",
- __func__, obj,
- obj->read_domains, obj->pending_read_domains,
- obj->write_domain, obj->pending_write_domain);
-#endif
/*
* If the object isn't moving to a new write domain,
* let the object stay in multiple read domains
@@ -2999,7 +3100,8 @@ i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj)
* write domain
*/
if (obj->write_domain &&
- obj->write_domain != obj->pending_read_domains) {
+ (obj->write_domain != obj->pending_read_domains ||
+ obj_priv->ring != ring)) {
flush_domains |= obj->write_domain;
invalidate_domains |=
obj->pending_read_domains & ~obj->write_domain;
@@ -3009,13 +3111,8 @@ i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj)
* stale data. That is, any new read domains.
*/
invalidate_domains |= obj->pending_read_domains & ~obj->read_domains;
- if ((flush_domains | invalidate_domains) & I915_GEM_DOMAIN_CPU) {
-#if WATCH_BUF
- DRM_INFO("%s: CPU domain flush %08x invalidate %08x\n",
- __func__, flush_domains, invalidate_domains);
-#endif
+ if ((flush_domains | invalidate_domains) & I915_GEM_DOMAIN_CPU)
i915_gem_clflush_object(obj);
- }
old_read_domains = obj->read_domains;
@@ -3029,21 +3126,12 @@ i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj)
obj->pending_write_domain = obj->write_domain;
obj->read_domains = obj->pending_read_domains;
- if (flush_domains & I915_GEM_GPU_DOMAINS) {
- if (obj_priv->ring == &dev_priv->render_ring)
- dev_priv->flush_rings |= FLUSH_RENDER_RING;
- else if (obj_priv->ring == &dev_priv->bsd_ring)
- dev_priv->flush_rings |= FLUSH_BSD_RING;
- }
-
dev->invalidate_domains |= invalidate_domains;
dev->flush_domains |= flush_domains;
-#if WATCH_BUF
- DRM_INFO("%s: read %08x write %08x invalidate %08x flush %08x\n",
- __func__,
- obj->read_domains, obj->write_domain,
- dev->invalidate_domains, dev->flush_domains);
-#endif
+ if (flush_domains & I915_GEM_GPU_DOMAINS)
+ dev_priv->mm.flush_rings |= obj_priv->ring->id;
+ if (invalidate_domains & I915_GEM_GPU_DOMAINS)
+ dev_priv->mm.flush_rings |= ring->id;
trace_i915_gem_object_change_domain(obj,
old_read_domains,
@@ -3107,13 +3195,12 @@ i915_gem_object_set_cpu_read_domain_range(struct drm_gem_object *obj,
return i915_gem_object_set_to_cpu_domain(obj, 0);
ret = i915_gem_object_flush_gpu_write_domain(obj);
+ if (ret != 0)
+ return ret;
+ ret = i915_gem_object_wait_rendering(obj, true);
if (ret)
return ret;
- /* Wait on any GPU rendering and flushing to occur. */
- ret = i915_gem_object_wait_rendering(obj);
- if (ret != 0)
- return ret;
i915_gem_object_flush_gtt_write_domain(obj);
/* If we're already fully in the CPU read domain, we're done. */
@@ -3160,339 +3247,478 @@ i915_gem_object_set_cpu_read_domain_range(struct drm_gem_object *obj,
return 0;
}
-/**
- * Pin an object to the GTT and evaluate the relocations landing in it.
- */
static int
-i915_gem_object_pin_and_relocate(struct drm_gem_object *obj,
- struct drm_file *file_priv,
- struct drm_i915_gem_exec_object2 *entry,
- struct drm_i915_gem_relocation_entry *relocs)
+i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
+ struct drm_file *file_priv,
+ struct drm_i915_gem_exec_object2 *entry,
+ struct drm_i915_gem_relocation_entry *reloc)
{
- struct drm_device *dev = obj->dev;
- drm_i915_private_t *dev_priv = dev->dev_private;
- struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
- int i, ret;
- void __iomem *reloc_page;
- bool need_fence;
+ struct drm_device *dev = obj->base.dev;
+ struct drm_gem_object *target_obj;
+ uint32_t target_offset;
+ int ret = -EINVAL;
+
+ target_obj = drm_gem_object_lookup(dev, file_priv,
+ reloc->target_handle);
+ if (target_obj == NULL)
+ return -ENOENT;
- need_fence = entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
- obj_priv->tiling_mode != I915_TILING_NONE;
+ target_offset = to_intel_bo(target_obj)->gtt_offset;
- /* Check fence reg constraints and rebind if necessary */
- if (need_fence &&
- !i915_gem_object_fence_offset_ok(obj,
- obj_priv->tiling_mode)) {
- ret = i915_gem_object_unbind(obj);
- if (ret)
- return ret;
+#if WATCH_RELOC
+ DRM_INFO("%s: obj %p offset %08x target %d "
+ "read %08x write %08x gtt %08x "
+ "presumed %08x delta %08x\n",
+ __func__,
+ obj,
+ (int) reloc->offset,
+ (int) reloc->target_handle,
+ (int) reloc->read_domains,
+ (int) reloc->write_domain,
+ (int) target_offset,
+ (int) reloc->presumed_offset,
+ reloc->delta);
+#endif
+
+ /* The target buffer should have appeared before us in the
+ * exec_object list, so it should have a GTT space bound by now.
+ */
+ if (target_offset == 0) {
+ DRM_ERROR("No GTT space found for object %d\n",
+ reloc->target_handle);
+ goto err;
}
- /* Choose the GTT offset for our buffer and put it there. */
- ret = i915_gem_object_pin(obj, (uint32_t) entry->alignment);
- if (ret)
- return ret;
+ /* Validate that the target is in a valid r/w GPU domain */
+ if (reloc->write_domain & (reloc->write_domain - 1)) {
+ DRM_ERROR("reloc with multiple write domains: "
+ "obj %p target %d offset %d "
+ "read %08x write %08x",
+ obj, reloc->target_handle,
+ (int) reloc->offset,
+ reloc->read_domains,
+ reloc->write_domain);
+ goto err;
+ }
+ if (reloc->write_domain & I915_GEM_DOMAIN_CPU ||
+ reloc->read_domains & I915_GEM_DOMAIN_CPU) {
+ DRM_ERROR("reloc with read/write CPU domains: "
+ "obj %p target %d offset %d "
+ "read %08x write %08x",
+ obj, reloc->target_handle,
+ (int) reloc->offset,
+ reloc->read_domains,
+ reloc->write_domain);
+ goto err;
+ }
+ if (reloc->write_domain && target_obj->pending_write_domain &&
+ reloc->write_domain != target_obj->pending_write_domain) {
+ DRM_ERROR("Write domain conflict: "
+ "obj %p target %d offset %d "
+ "new %08x old %08x\n",
+ obj, reloc->target_handle,
+ (int) reloc->offset,
+ reloc->write_domain,
+ target_obj->pending_write_domain);
+ goto err;
+ }
- /*
- * Pre-965 chips need a fence register set up in order to
- * properly handle blits to/from tiled surfaces.
+ target_obj->pending_read_domains |= reloc->read_domains;
+ target_obj->pending_write_domain |= reloc->write_domain;
+
+ /* If the relocation already has the right value in it, no
+ * more work needs to be done.
*/
- if (need_fence) {
- ret = i915_gem_object_get_fence_reg(obj);
- if (ret != 0) {
- i915_gem_object_unpin(obj);
- return ret;
- }
+ if (target_offset == reloc->presumed_offset)
+ goto out;
+
+ /* Check that the relocation address is valid... */
+ if (reloc->offset > obj->base.size - 4) {
+ DRM_ERROR("Relocation beyond object bounds: "
+ "obj %p target %d offset %d size %d.\n",
+ obj, reloc->target_handle,
+ (int) reloc->offset,
+ (int) obj->base.size);
+ goto err;
+ }
+ if (reloc->offset & 3) {
+ DRM_ERROR("Relocation not 4-byte aligned: "
+ "obj %p target %d offset %d.\n",
+ obj, reloc->target_handle,
+ (int) reloc->offset);
+ goto err;
+ }
+
+ /* and points to somewhere within the target object. */
+ if (reloc->delta >= target_obj->size) {
+ DRM_ERROR("Relocation beyond target object bounds: "
+ "obj %p target %d delta %d size %d.\n",
+ obj, reloc->target_handle,
+ (int) reloc->delta,
+ (int) target_obj->size);
+ goto err;
}
- entry->offset = obj_priv->gtt_offset;
+ reloc->delta += target_offset;
+ if (obj->base.write_domain == I915_GEM_DOMAIN_CPU) {
+ uint32_t page_offset = reloc->offset & ~PAGE_MASK;
+ char *vaddr;
- /* Apply the relocations, using the GTT aperture to avoid cache
- * flushing requirements.
- */
- for (i = 0; i < entry->relocation_count; i++) {
- struct drm_i915_gem_relocation_entry *reloc= &relocs[i];
- struct drm_gem_object *target_obj;
- struct drm_i915_gem_object *target_obj_priv;
- uint32_t reloc_val, reloc_offset;
+ vaddr = kmap_atomic(obj->pages[reloc->offset >> PAGE_SHIFT]);
+ *(uint32_t *)(vaddr + page_offset) = reloc->delta;
+ kunmap_atomic(vaddr);
+ } else {
+ struct drm_i915_private *dev_priv = dev->dev_private;
uint32_t __iomem *reloc_entry;
+ void __iomem *reloc_page;
- target_obj = drm_gem_object_lookup(obj->dev, file_priv,
- reloc->target_handle);
- if (target_obj == NULL) {
- i915_gem_object_unpin(obj);
- return -ENOENT;
- }
- target_obj_priv = to_intel_bo(target_obj);
+ ret = i915_gem_object_set_to_gtt_domain(&obj->base, 1);
+ if (ret)
+ goto err;
-#if WATCH_RELOC
- DRM_INFO("%s: obj %p offset %08x target %d "
- "read %08x write %08x gtt %08x "
- "presumed %08x delta %08x\n",
- __func__,
- obj,
- (int) reloc->offset,
- (int) reloc->target_handle,
- (int) reloc->read_domains,
- (int) reloc->write_domain,
- (int) target_obj_priv->gtt_offset,
- (int) reloc->presumed_offset,
- reloc->delta);
-#endif
+ /* Map the page containing the relocation we're going to perform. */
+ reloc->offset += obj->gtt_offset;
+ reloc_page = io_mapping_map_atomic_wc(dev_priv->mm.gtt_mapping,
+ reloc->offset & PAGE_MASK);
+ reloc_entry = (uint32_t __iomem *)
+ (reloc_page + (reloc->offset & ~PAGE_MASK));
+ iowrite32(reloc->delta, reloc_entry);
+ io_mapping_unmap_atomic(reloc_page);
+ }
- /* The target buffer should have appeared before us in the
- * exec_object list, so it should have a GTT space bound by now.
- */
- if (target_obj_priv->gtt_space == NULL) {
- DRM_ERROR("No GTT space found for object %d\n",
- reloc->target_handle);
- drm_gem_object_unreference(target_obj);
- i915_gem_object_unpin(obj);
- return -EINVAL;
- }
+ /* and update the user's relocation entry */
+ reloc->presumed_offset = target_offset;
- /* Validate that the target is in a valid r/w GPU domain */
- if (reloc->write_domain & (reloc->write_domain - 1)) {
- DRM_ERROR("reloc with multiple write domains: "
- "obj %p target %d offset %d "
- "read %08x write %08x",
- obj, reloc->target_handle,
- (int) reloc->offset,
- reloc->read_domains,
- reloc->write_domain);
- drm_gem_object_unreference(target_obj);
- i915_gem_object_unpin(obj);
- return -EINVAL;
- }
- if (reloc->write_domain & I915_GEM_DOMAIN_CPU ||
- reloc->read_domains & I915_GEM_DOMAIN_CPU) {
- DRM_ERROR("reloc with read/write CPU domains: "
- "obj %p target %d offset %d "
- "read %08x write %08x",
- obj, reloc->target_handle,
- (int) reloc->offset,
- reloc->read_domains,
- reloc->write_domain);
- drm_gem_object_unreference(target_obj);
- i915_gem_object_unpin(obj);
- return -EINVAL;
- }
- if (reloc->write_domain && target_obj->pending_write_domain &&
- reloc->write_domain != target_obj->pending_write_domain) {
- DRM_ERROR("Write domain conflict: "
- "obj %p target %d offset %d "
- "new %08x old %08x\n",
- obj, reloc->target_handle,
- (int) reloc->offset,
- reloc->write_domain,
- target_obj->pending_write_domain);
- drm_gem_object_unreference(target_obj);
- i915_gem_object_unpin(obj);
- return -EINVAL;
- }
+out:
+ ret = 0;
+err:
+ drm_gem_object_unreference(target_obj);
+ return ret;
+}
- target_obj->pending_read_domains |= reloc->read_domains;
- target_obj->pending_write_domain |= reloc->write_domain;
+static int
+i915_gem_execbuffer_relocate_object(struct drm_i915_gem_object *obj,
+ struct drm_file *file_priv,
+ struct drm_i915_gem_exec_object2 *entry)
+{
+ struct drm_i915_gem_relocation_entry __user *user_relocs;
+ int i, ret;
- /* If the relocation already has the right value in it, no
- * more work needs to be done.
- */
- if (target_obj_priv->gtt_offset == reloc->presumed_offset) {
- drm_gem_object_unreference(target_obj);
- continue;
- }
+ user_relocs = (void __user *)(uintptr_t)entry->relocs_ptr;
+ for (i = 0; i < entry->relocation_count; i++) {
+ struct drm_i915_gem_relocation_entry reloc;
- /* Check that the relocation address is valid... */
- if (reloc->offset > obj->size - 4) {
- DRM_ERROR("Relocation beyond object bounds: "
- "obj %p target %d offset %d size %d.\n",
- obj, reloc->target_handle,
- (int) reloc->offset, (int) obj->size);
- drm_gem_object_unreference(target_obj);
- i915_gem_object_unpin(obj);
- return -EINVAL;
- }
- if (reloc->offset & 3) {
- DRM_ERROR("Relocation not 4-byte aligned: "
- "obj %p target %d offset %d.\n",
- obj, reloc->target_handle,
- (int) reloc->offset);
- drm_gem_object_unreference(target_obj);
- i915_gem_object_unpin(obj);
- return -EINVAL;
- }
+ if (__copy_from_user_inatomic(&reloc,
+ user_relocs+i,
+ sizeof(reloc)))
+ return -EFAULT;
- /* and points to somewhere within the target object. */
- if (reloc->delta >= target_obj->size) {
- DRM_ERROR("Relocation beyond target object bounds: "
- "obj %p target %d delta %d size %d.\n",
- obj, reloc->target_handle,
- (int) reloc->delta, (int) target_obj->size);
- drm_gem_object_unreference(target_obj);
- i915_gem_object_unpin(obj);
- return -EINVAL;
- }
+ ret = i915_gem_execbuffer_relocate_entry(obj, file_priv, entry, &reloc);
+ if (ret)
+ return ret;
- ret = i915_gem_object_set_to_gtt_domain(obj, 1);
- if (ret != 0) {
- drm_gem_object_unreference(target_obj);
- i915_gem_object_unpin(obj);
- return -EINVAL;
- }
+ if (__copy_to_user_inatomic(&user_relocs[i].presumed_offset,
+ &reloc.presumed_offset,
+ sizeof(reloc.presumed_offset)))
+ return -EFAULT;
+ }
- /* Map the page containing the relocation we're going to
- * perform.
- */
- reloc_offset = obj_priv->gtt_offset + reloc->offset;
- reloc_page = io_mapping_map_atomic_wc(dev_priv->mm.gtt_mapping,
- (reloc_offset &
- ~(PAGE_SIZE - 1)),
- KM_USER0);
- reloc_entry = (uint32_t __iomem *)(reloc_page +
- (reloc_offset & (PAGE_SIZE - 1)));
- reloc_val = target_obj_priv->gtt_offset + reloc->delta;
-
-#if WATCH_BUF
- DRM_INFO("Applied relocation: %p@0x%08x %08x -> %08x\n",
- obj, (unsigned int) reloc->offset,
- readl(reloc_entry), reloc_val);
-#endif
- writel(reloc_val, reloc_entry);
- io_mapping_unmap_atomic(reloc_page, KM_USER0);
+ return 0;
+}
- /* The updated presumed offset for this entry will be
- * copied back out to the user.
- */
- reloc->presumed_offset = target_obj_priv->gtt_offset;
+static int
+i915_gem_execbuffer_relocate_object_slow(struct drm_i915_gem_object *obj,
+ struct drm_file *file_priv,
+ struct drm_i915_gem_exec_object2 *entry,
+ struct drm_i915_gem_relocation_entry *relocs)
+{
+ int i, ret;
- drm_gem_object_unreference(target_obj);
+ for (i = 0; i < entry->relocation_count; i++) {
+ ret = i915_gem_execbuffer_relocate_entry(obj, file_priv, entry, &relocs[i]);
+ if (ret)
+ return ret;
}
-#if WATCH_BUF
- if (0)
- i915_gem_dump_object(obj, 128, __func__, ~0);
-#endif
return 0;
}
-/* Throttle our rendering by waiting until the ring has completed our requests
- * emitted over 20 msec ago.
- *
- * Note that if we were to use the current jiffies each time around the loop,
- * we wouldn't escape the function with any frames outstanding if the time to
- * render a frame was over 20ms.
- *
- * This should get us reasonable parallelism between CPU and GPU but also
- * relatively low latency when blocking on a particular request to finish.
- */
static int
-i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file_priv)
+i915_gem_execbuffer_relocate(struct drm_device *dev,
+ struct drm_file *file,
+ struct drm_gem_object **object_list,
+ struct drm_i915_gem_exec_object2 *exec_list,
+ int count)
{
- struct drm_i915_file_private *i915_file_priv = file_priv->driver_priv;
- int ret = 0;
- unsigned long recent_enough = jiffies - msecs_to_jiffies(20);
+ int i, ret;
- mutex_lock(&dev->struct_mutex);
- while (!list_empty(&i915_file_priv->mm.request_list)) {
- struct drm_i915_gem_request *request;
+ for (i = 0; i < count; i++) {
+ struct drm_i915_gem_object *obj = to_intel_bo(object_list[i]);
+ obj->base.pending_read_domains = 0;
+ obj->base.pending_write_domain = 0;
+ ret = i915_gem_execbuffer_relocate_object(obj, file,
+ &exec_list[i]);
+ if (ret)
+ return ret;
+ }
- request = list_first_entry(&i915_file_priv->mm.request_list,
- struct drm_i915_gem_request,
- client_list);
+ return 0;
+}
- if (time_after_eq(request->emitted_jiffies, recent_enough))
- break;
+static int
+i915_gem_execbuffer_reserve(struct drm_device *dev,
+ struct drm_file *file,
+ struct drm_gem_object **object_list,
+ struct drm_i915_gem_exec_object2 *exec_list,
+ int count)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int ret, i, retry;
+
+ /* attempt to pin all of the buffers into the GTT */
+ for (retry = 0; retry < 2; retry++) {
+ ret = 0;
+ for (i = 0; i < count; i++) {
+ struct drm_i915_gem_exec_object2 *entry = &exec_list[i];
+ struct drm_i915_gem_object *obj= to_intel_bo(object_list[i]);
+ bool need_fence =
+ entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
+ obj->tiling_mode != I915_TILING_NONE;
+
+ /* Check fence reg constraints and rebind if necessary */
+ if (need_fence &&
+ !i915_gem_object_fence_offset_ok(&obj->base,
+ obj->tiling_mode)) {
+ ret = i915_gem_object_unbind(&obj->base);
+ if (ret)
+ break;
+ }
+
+ ret = i915_gem_object_pin(&obj->base, entry->alignment);
+ if (ret)
+ break;
+
+ /*
+ * Pre-965 chips need a fence register set up in order
+ * to properly handle blits to/from tiled surfaces.
+ */
+ if (need_fence) {
+ ret = i915_gem_object_get_fence_reg(&obj->base, true);
+ if (ret) {
+ i915_gem_object_unpin(&obj->base);
+ break;
+ }
+
+ dev_priv->fence_regs[obj->fence_reg].gpu = true;
+ }
+
+ entry->offset = obj->gtt_offset;
+ }
- ret = i915_wait_request(dev, request->seqno, request->ring);
- if (ret != 0)
+ while (i--)
+ i915_gem_object_unpin(object_list[i]);
+
+ if (ret == 0)
break;
+
+ if (ret != -ENOSPC || retry)
+ return ret;
+
+ ret = i915_gem_evict_everything(dev);
+ if (ret)
+ return ret;
}
- mutex_unlock(&dev->struct_mutex);
- return ret;
+ return 0;
}
static int
-i915_gem_get_relocs_from_user(struct drm_i915_gem_exec_object2 *exec_list,
- uint32_t buffer_count,
- struct drm_i915_gem_relocation_entry **relocs)
+i915_gem_execbuffer_relocate_slow(struct drm_device *dev,
+ struct drm_file *file,
+ struct drm_gem_object **object_list,
+ struct drm_i915_gem_exec_object2 *exec_list,
+ int count)
{
- uint32_t reloc_count = 0, reloc_index = 0, i;
- int ret;
+ struct drm_i915_gem_relocation_entry *reloc;
+ int i, total, ret;
- *relocs = NULL;
- for (i = 0; i < buffer_count; i++) {
- if (reloc_count + exec_list[i].relocation_count < reloc_count)
- return -EINVAL;
- reloc_count += exec_list[i].relocation_count;
+ for (i = 0; i < count; i++) {
+ struct drm_i915_gem_object *obj = to_intel_bo(object_list[i]);
+ obj->in_execbuffer = false;
}
- *relocs = drm_calloc_large(reloc_count, sizeof(**relocs));
- if (*relocs == NULL) {
- DRM_ERROR("failed to alloc relocs, count %d\n", reloc_count);
+ mutex_unlock(&dev->struct_mutex);
+
+ total = 0;
+ for (i = 0; i < count; i++)
+ total += exec_list[i].relocation_count;
+
+ reloc = drm_malloc_ab(total, sizeof(*reloc));
+ if (reloc == NULL) {
+ mutex_lock(&dev->struct_mutex);
return -ENOMEM;
}
- for (i = 0; i < buffer_count; i++) {
+ total = 0;
+ for (i = 0; i < count; i++) {
struct drm_i915_gem_relocation_entry __user *user_relocs;
user_relocs = (void __user *)(uintptr_t)exec_list[i].relocs_ptr;
- ret = copy_from_user(&(*relocs)[reloc_index],
- user_relocs,
- exec_list[i].relocation_count *
- sizeof(**relocs));
- if (ret != 0) {
- drm_free_large(*relocs);
- *relocs = NULL;
- return -EFAULT;
+ if (copy_from_user(reloc+total, user_relocs,
+ exec_list[i].relocation_count *
+ sizeof(*reloc))) {
+ ret = -EFAULT;
+ mutex_lock(&dev->struct_mutex);
+ goto err;
}
- reloc_index += exec_list[i].relocation_count;
+ total += exec_list[i].relocation_count;
}
- return 0;
+ ret = i915_mutex_lock_interruptible(dev);
+ if (ret) {
+ mutex_lock(&dev->struct_mutex);
+ goto err;
+ }
+
+ ret = i915_gem_execbuffer_reserve(dev, file,
+ object_list, exec_list,
+ count);
+ if (ret)
+ goto err;
+
+ total = 0;
+ for (i = 0; i < count; i++) {
+ struct drm_i915_gem_object *obj = to_intel_bo(object_list[i]);
+ obj->base.pending_read_domains = 0;
+ obj->base.pending_write_domain = 0;
+ ret = i915_gem_execbuffer_relocate_object_slow(obj, file,
+ &exec_list[i],
+ reloc + total);
+ if (ret)
+ goto err;
+
+ total += exec_list[i].relocation_count;
+ }
+
+ /* Leave the user relocations as are, this is the painfully slow path,
+ * and we want to avoid the complication of dropping the lock whilst
+ * having buffers reserved in the aperture and so causing spurious
+ * ENOSPC for random operations.
+ */
+
+err:
+ drm_free_large(reloc);
+ return ret;
}
static int
-i915_gem_put_relocs_to_user(struct drm_i915_gem_exec_object2 *exec_list,
- uint32_t buffer_count,
- struct drm_i915_gem_relocation_entry *relocs)
+i915_gem_execbuffer_move_to_gpu(struct drm_device *dev,
+ struct drm_file *file,
+ struct intel_ring_buffer *ring,
+ struct drm_gem_object **objects,
+ int count)
{
- uint32_t reloc_count = 0, i;
- int ret = 0;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int ret, i;
- if (relocs == NULL)
- return 0;
+ /* Zero the global flush/invalidate flags. These
+ * will be modified as new domains are computed
+ * for each object
+ */
+ dev->invalidate_domains = 0;
+ dev->flush_domains = 0;
+ dev_priv->mm.flush_rings = 0;
+ for (i = 0; i < count; i++)
+ i915_gem_object_set_to_gpu_domain(objects[i], ring);
- for (i = 0; i < buffer_count; i++) {
- struct drm_i915_gem_relocation_entry __user *user_relocs;
- int unwritten;
+ if (dev->invalidate_domains | dev->flush_domains) {
+#if WATCH_EXEC
+ DRM_INFO("%s: invalidate_domains %08x flush_domains %08x\n",
+ __func__,
+ dev->invalidate_domains,
+ dev->flush_domains);
+#endif
+ i915_gem_flush(dev, file,
+ dev->invalidate_domains,
+ dev->flush_domains,
+ dev_priv->mm.flush_rings);
+ }
- user_relocs = (void __user *)(uintptr_t)exec_list[i].relocs_ptr;
+ for (i = 0; i < count; i++) {
+ struct drm_i915_gem_object *obj = to_intel_bo(objects[i]);
+ /* XXX replace with semaphores */
+ if (obj->ring && ring != obj->ring) {
+ ret = i915_gem_object_wait_rendering(&obj->base, true);
+ if (ret)
+ return ret;
+ }
+ }
- unwritten = copy_to_user(user_relocs,
- &relocs[reloc_count],
- exec_list[i].relocation_count *
- sizeof(*relocs));
+ return 0;
+}
- if (unwritten) {
- ret = -EFAULT;
- goto err;
- }
+/* Throttle our rendering by waiting until the ring has completed our requests
+ * emitted over 20 msec ago.
+ *
+ * Note that if we were to use the current jiffies each time around the loop,
+ * we wouldn't escape the function with any frames outstanding if the time to
+ * render a frame was over 20ms.
+ *
+ * This should get us reasonable parallelism between CPU and GPU but also
+ * relatively low latency when blocking on a particular request to finish.
+ */
+static int
+i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_file_private *file_priv = file->driver_priv;
+ unsigned long recent_enough = jiffies - msecs_to_jiffies(20);
+ struct drm_i915_gem_request *request;
+ struct intel_ring_buffer *ring = NULL;
+ u32 seqno = 0;
+ int ret;
- reloc_count += exec_list[i].relocation_count;
+ spin_lock(&file_priv->mm.lock);
+ list_for_each_entry(request, &file_priv->mm.request_list, client_list) {
+ if (time_after_eq(request->emitted_jiffies, recent_enough))
+ break;
+
+ ring = request->ring;
+ seqno = request->seqno;
}
+ spin_unlock(&file_priv->mm.lock);
-err:
- drm_free_large(relocs);
+ if (seqno == 0)
+ return 0;
+
+ ret = 0;
+ if (!i915_seqno_passed(ring->get_seqno(dev, ring), seqno)) {
+ /* And wait for the seqno passing without holding any locks and
+ * causing extra latency for others. This is safe as the irq
+ * generation is designed to be run atomically and so is
+ * lockless.
+ */
+ ring->user_irq_get(dev, ring);
+ ret = wait_event_interruptible(ring->irq_queue,
+ i915_seqno_passed(ring->get_seqno(dev, ring), seqno)
+ || atomic_read(&dev_priv->mm.wedged));
+ ring->user_irq_put(dev, ring);
+
+ if (ret == 0 && atomic_read(&dev_priv->mm.wedged))
+ ret = -EIO;
+ }
+
+ if (ret == 0)
+ queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, 0);
return ret;
}
static int
-i915_gem_check_execbuffer (struct drm_i915_gem_execbuffer2 *exec,
- uint64_t exec_offset)
+i915_gem_check_execbuffer(struct drm_i915_gem_execbuffer2 *exec,
+ uint64_t exec_offset)
{
uint32_t exec_start, exec_len;
@@ -3509,44 +3735,39 @@ i915_gem_check_execbuffer (struct drm_i915_gem_execbuffer2 *exec,
}
static int
-i915_gem_wait_for_pending_flip(struct drm_device *dev,
- struct drm_gem_object **object_list,
- int count)
+validate_exec_list(struct drm_i915_gem_exec_object2 *exec,
+ int count)
{
- drm_i915_private_t *dev_priv = dev->dev_private;
- struct drm_i915_gem_object *obj_priv;
- DEFINE_WAIT(wait);
- int i, ret = 0;
+ int i;
- for (;;) {
- prepare_to_wait(&dev_priv->pending_flip_queue,
- &wait, TASK_INTERRUPTIBLE);
- for (i = 0; i < count; i++) {
- obj_priv = to_intel_bo(object_list[i]);
- if (atomic_read(&obj_priv->pending_flip) > 0)
- break;
- }
- if (i == count)
- break;
+ for (i = 0; i < count; i++) {
+ char __user *ptr = (char __user *)(uintptr_t)exec[i].relocs_ptr;
+ int length; /* limited by fault_in_pages_readable() */
- if (!signal_pending(current)) {
- mutex_unlock(&dev->struct_mutex);
- schedule();
- mutex_lock(&dev->struct_mutex);
- continue;
- }
- ret = -ERESTARTSYS;
- break;
+ /* First check for malicious input causing overflow */
+ if (exec[i].relocation_count >
+ INT_MAX / sizeof(struct drm_i915_gem_relocation_entry))
+ return -EINVAL;
+
+ length = exec[i].relocation_count *
+ sizeof(struct drm_i915_gem_relocation_entry);
+ if (!access_ok(VERIFY_READ, ptr, length))
+ return -EFAULT;
+
+ /* we may also need to update the presumed offsets */
+ if (!access_ok(VERIFY_WRITE, ptr, length))
+ return -EFAULT;
+
+ if (fault_in_pages_readable(ptr, length))
+ return -EFAULT;
}
- finish_wait(&dev_priv->pending_flip_queue, &wait);
- return ret;
+ return 0;
}
-
-int
+static int
i915_gem_do_execbuffer(struct drm_device *dev, void *data,
- struct drm_file *file_priv,
+ struct drm_file *file,
struct drm_i915_gem_execbuffer2 *args,
struct drm_i915_gem_exec_object2 *exec_list)
{
@@ -3555,26 +3776,47 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
struct drm_gem_object *batch_obj;
struct drm_i915_gem_object *obj_priv;
struct drm_clip_rect *cliprects = NULL;
- struct drm_i915_gem_relocation_entry *relocs = NULL;
- int ret = 0, ret2, i, pinned = 0;
+ struct drm_i915_gem_request *request = NULL;
+ int ret, i, flips;
uint64_t exec_offset;
- uint32_t seqno, flush_domains, reloc_index;
- int pin_tries, flips;
struct intel_ring_buffer *ring = NULL;
+ ret = i915_gem_check_is_wedged(dev);
+ if (ret)
+ return ret;
+
+ ret = validate_exec_list(exec_list, args->buffer_count);
+ if (ret)
+ return ret;
+
#if WATCH_EXEC
DRM_INFO("buffers_ptr %d buffer_count %d len %08x\n",
(int) args->buffers_ptr, args->buffer_count, args->batch_len);
#endif
- if (args->flags & I915_EXEC_BSD) {
+ switch (args->flags & I915_EXEC_RING_MASK) {
+ case I915_EXEC_DEFAULT:
+ case I915_EXEC_RENDER:
+ ring = &dev_priv->render_ring;
+ break;
+ case I915_EXEC_BSD:
if (!HAS_BSD(dev)) {
- DRM_ERROR("execbuf with wrong flag\n");
+ DRM_ERROR("execbuf with invalid ring (BSD)\n");
return -EINVAL;
}
ring = &dev_priv->bsd_ring;
- } else {
- ring = &dev_priv->render_ring;
+ break;
+ case I915_EXEC_BLT:
+ if (!HAS_BLT(dev)) {
+ DRM_ERROR("execbuf with invalid ring (BLT)\n");
+ return -EINVAL;
+ }
+ ring = &dev_priv->blt_ring;
+ break;
+ default:
+ DRM_ERROR("execbuf with unknown ring: %d\n",
+ (int)(args->flags & I915_EXEC_RING_MASK));
+ return -EINVAL;
}
if (args->buffer_count < 1) {
@@ -3609,20 +3851,15 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
}
}
- ret = i915_gem_get_relocs_from_user(exec_list, args->buffer_count,
- &relocs);
- if (ret != 0)
+ request = kzalloc(sizeof(*request), GFP_KERNEL);
+ if (request == NULL) {
+ ret = -ENOMEM;
goto pre_mutex_err;
+ }
- mutex_lock(&dev->struct_mutex);
-
- i915_verify_inactive(dev, __FILE__, __LINE__);
-
- if (atomic_read(&dev_priv->mm.wedged)) {
- mutex_unlock(&dev->struct_mutex);
- ret = -EIO;
+ ret = i915_mutex_lock_interruptible(dev);
+ if (ret)
goto pre_mutex_err;
- }
if (dev_priv->mm.suspended) {
mutex_unlock(&dev->struct_mutex);
@@ -3631,9 +3868,8 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
}
/* Look up object handles */
- flips = 0;
for (i = 0; i < args->buffer_count; i++) {
- object_list[i] = drm_gem_object_lookup(dev, file_priv,
+ object_list[i] = drm_gem_object_lookup(dev, file,
exec_list[i].handle);
if (object_list[i] == NULL) {
DRM_ERROR("Invalid object handle %d at index %d\n",
@@ -3654,75 +3890,28 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
goto err;
}
obj_priv->in_execbuffer = true;
- flips += atomic_read(&obj_priv->pending_flip);
- }
-
- if (flips > 0) {
- ret = i915_gem_wait_for_pending_flip(dev, object_list,
- args->buffer_count);
- if (ret)
- goto err;
}
- /* Pin and relocate */
- for (pin_tries = 0; ; pin_tries++) {
- ret = 0;
- reloc_index = 0;
-
- for (i = 0; i < args->buffer_count; i++) {
- object_list[i]->pending_read_domains = 0;
- object_list[i]->pending_write_domain = 0;
- ret = i915_gem_object_pin_and_relocate(object_list[i],
- file_priv,
- &exec_list[i],
- &relocs[reloc_index]);
- if (ret)
- break;
- pinned = i + 1;
- reloc_index += exec_list[i].relocation_count;
- }
- /* success */
- if (ret == 0)
- break;
+ /* Move the objects en-masse into the GTT, evicting if necessary. */
+ ret = i915_gem_execbuffer_reserve(dev, file,
+ object_list, exec_list,
+ args->buffer_count);
+ if (ret)
+ goto err;
- /* error other than GTT full, or we've already tried again */
- if (ret != -ENOSPC || pin_tries >= 1) {
- if (ret != -ERESTARTSYS) {
- unsigned long long total_size = 0;
- int num_fences = 0;
- for (i = 0; i < args->buffer_count; i++) {
- obj_priv = to_intel_bo(object_list[i]);
-
- total_size += object_list[i]->size;
- num_fences +=
- exec_list[i].flags & EXEC_OBJECT_NEEDS_FENCE &&
- obj_priv->tiling_mode != I915_TILING_NONE;
- }
- DRM_ERROR("Failed to pin buffer %d of %d, total %llu bytes, %d fences: %d\n",
- pinned+1, args->buffer_count,
- total_size, num_fences,
- ret);
- DRM_ERROR("%d objects [%d pinned], "
- "%d object bytes [%d pinned], "
- "%d/%d gtt bytes\n",
- atomic_read(&dev->object_count),
- atomic_read(&dev->pin_count),
- atomic_read(&dev->object_memory),
- atomic_read(&dev->pin_memory),
- atomic_read(&dev->gtt_memory),
- dev->gtt_total);
- }
- goto err;
+ /* The objects are in their final locations, apply the relocations. */
+ ret = i915_gem_execbuffer_relocate(dev, file,
+ object_list, exec_list,
+ args->buffer_count);
+ if (ret) {
+ if (ret == -EFAULT) {
+ ret = i915_gem_execbuffer_relocate_slow(dev, file,
+ object_list,
+ exec_list,
+ args->buffer_count);
+ BUG_ON(!mutex_is_locked(&dev->struct_mutex));
}
-
- /* unpin all of our buffers */
- for (i = 0; i < pinned; i++)
- i915_gem_object_unpin(object_list[i]);
- pinned = 0;
-
- /* evict everyone we can from the aperture */
- ret = i915_gem_evict_everything(dev);
- if (ret && ret != -ENOSPC)
+ if (ret)
goto err;
}
@@ -3735,72 +3924,28 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
}
batch_obj->pending_read_domains |= I915_GEM_DOMAIN_COMMAND;
- /* Sanity check the batch buffer, prior to moving objects */
- exec_offset = exec_list[args->buffer_count - 1].offset;
- ret = i915_gem_check_execbuffer (args, exec_offset);
+ /* Sanity check the batch buffer */
+ exec_offset = to_intel_bo(batch_obj)->gtt_offset;
+ ret = i915_gem_check_execbuffer(args, exec_offset);
if (ret != 0) {
DRM_ERROR("execbuf with invalid offset/length\n");
goto err;
}
- i915_verify_inactive(dev, __FILE__, __LINE__);
-
- /* Zero the global flush/invalidate flags. These
- * will be modified as new domains are computed
- * for each object
- */
- dev->invalidate_domains = 0;
- dev->flush_domains = 0;
- dev_priv->flush_rings = 0;
-
- for (i = 0; i < args->buffer_count; i++) {
- struct drm_gem_object *obj = object_list[i];
-
- /* Compute new gpu domains and update invalidate/flush */
- i915_gem_object_set_to_gpu_domain(obj);
- }
-
- i915_verify_inactive(dev, __FILE__, __LINE__);
-
- if (dev->invalidate_domains | dev->flush_domains) {
-#if WATCH_EXEC
- DRM_INFO("%s: invalidate_domains %08x flush_domains %08x\n",
- __func__,
- dev->invalidate_domains,
- dev->flush_domains);
-#endif
- i915_gem_flush(dev,
- dev->invalidate_domains,
- dev->flush_domains);
- if (dev_priv->flush_rings & FLUSH_RENDER_RING)
- (void)i915_add_request(dev, file_priv,
- dev->flush_domains,
- &dev_priv->render_ring);
- if (dev_priv->flush_rings & FLUSH_BSD_RING)
- (void)i915_add_request(dev, file_priv,
- dev->flush_domains,
- &dev_priv->bsd_ring);
- }
+ ret = i915_gem_execbuffer_move_to_gpu(dev, file, ring,
+ object_list, args->buffer_count);
+ if (ret)
+ goto err;
for (i = 0; i < args->buffer_count; i++) {
struct drm_gem_object *obj = object_list[i];
- struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
uint32_t old_write_domain = obj->write_domain;
-
obj->write_domain = obj->pending_write_domain;
- if (obj->write_domain)
- list_move_tail(&obj_priv->gpu_write_list,
- &dev_priv->mm.gpu_write_list);
- else
- list_del_init(&obj_priv->gpu_write_list);
-
trace_i915_gem_object_change_domain(obj,
obj->read_domains,
old_write_domain);
}
- i915_verify_inactive(dev, __FILE__, __LINE__);
-
#if WATCH_COHERENCY
for (i = 0; i < args->buffer_count; i++) {
i915_gem_object_check_coherency(object_list[i],
@@ -3815,9 +3960,38 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
~0);
#endif
+ /* Check for any pending flips. As we only maintain a flip queue depth
+ * of 1, we can simply insert a WAIT for the next display flip prior
+ * to executing the batch and avoid stalling the CPU.
+ */
+ flips = 0;
+ for (i = 0; i < args->buffer_count; i++) {
+ if (object_list[i]->write_domain)
+ flips |= atomic_read(&to_intel_bo(object_list[i])->pending_flip);
+ }
+ if (flips) {
+ int plane, flip_mask;
+
+ for (plane = 0; flips >> plane; plane++) {
+ if (((flips >> plane) & 1) == 0)
+ continue;
+
+ if (plane)
+ flip_mask = MI_WAIT_FOR_PLANE_B_FLIP;
+ else
+ flip_mask = MI_WAIT_FOR_PLANE_A_FLIP;
+
+ intel_ring_begin(dev, ring, 2);
+ intel_ring_emit(dev, ring,
+ MI_WAIT_FOR_EVENT | flip_mask);
+ intel_ring_emit(dev, ring, MI_NOOP);
+ intel_ring_advance(dev, ring);
+ }
+ }
+
/* Exec the batchbuffer */
ret = ring->dispatch_gem_execbuffer(dev, ring, args,
- cliprects, exec_offset);
+ cliprects, exec_offset);
if (ret) {
DRM_ERROR("dispatch failed %d\n", ret);
goto err;
@@ -3827,38 +4001,21 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
* Ensure that the commands in the batch buffer are
* finished before the interrupt fires
*/
- flush_domains = i915_retire_commands(dev, ring);
+ i915_retire_commands(dev, ring);
- i915_verify_inactive(dev, __FILE__, __LINE__);
-
- /*
- * Get a seqno representing the execution of the current buffer,
- * which we can wait on. We would like to mitigate these interrupts,
- * likely by only creating seqnos occasionally (so that we have
- * *some* interrupts representing completion of buffers that we can
- * wait on when trying to clear up gtt space).
- */
- seqno = i915_add_request(dev, file_priv, flush_domains, ring);
- BUG_ON(seqno == 0);
for (i = 0; i < args->buffer_count; i++) {
struct drm_gem_object *obj = object_list[i];
- obj_priv = to_intel_bo(obj);
- i915_gem_object_move_to_active(obj, seqno, ring);
-#if WATCH_LRU
- DRM_INFO("%s: move to exec list %p\n", __func__, obj);
-#endif
+ i915_gem_object_move_to_active(obj, ring);
+ if (obj->write_domain)
+ list_move_tail(&to_intel_bo(obj)->gpu_write_list,
+ &ring->gpu_write_list);
}
-#if WATCH_LRU
- i915_dump_lru(dev, __func__);
-#endif
- i915_verify_inactive(dev, __FILE__, __LINE__);
+ i915_add_request(dev, file, request, ring);
+ request = NULL;
err:
- for (i = 0; i < pinned; i++)
- i915_gem_object_unpin(object_list[i]);
-
for (i = 0; i < args->buffer_count; i++) {
if (object_list[i]) {
obj_priv = to_intel_bo(object_list[i]);
@@ -3870,22 +4027,9 @@ err:
mutex_unlock(&dev->struct_mutex);
pre_mutex_err:
- /* Copy the updated relocations out regardless of current error
- * state. Failure to update the relocs would mean that the next
- * time userland calls execbuf, it would do so with presumed offset
- * state that didn't match the actual object state.
- */
- ret2 = i915_gem_put_relocs_to_user(exec_list, args->buffer_count,
- relocs);
- if (ret2 != 0) {
- DRM_ERROR("Failed to copy relocations back out: %d\n", ret2);
-
- if (ret == 0)
- ret = ret2;
- }
-
drm_free_large(object_list);
kfree(cliprects);
+ kfree(request);
return ret;
}
@@ -3942,7 +4086,7 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
exec2_list[i].relocs_ptr = exec_list[i].relocs_ptr;
exec2_list[i].alignment = exec_list[i].alignment;
exec2_list[i].offset = exec_list[i].offset;
- if (!IS_I965G(dev))
+ if (INTEL_INFO(dev)->gen < 4)
exec2_list[i].flags = EXEC_OBJECT_NEEDS_FENCE;
else
exec2_list[i].flags = 0;
@@ -4039,20 +4183,19 @@ int
i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment)
{
struct drm_device *dev = obj->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
int ret;
BUG_ON(obj_priv->pin_count == DRM_I915_GEM_OBJECT_MAX_PIN_COUNT);
-
- i915_verify_inactive(dev, __FILE__, __LINE__);
+ WARN_ON(i915_verify_lists(dev));
if (obj_priv->gtt_space != NULL) {
if (alignment == 0)
alignment = i915_gem_get_gtt_alignment(obj);
if (obj_priv->gtt_offset & (alignment - 1)) {
WARN(obj_priv->pin_count,
- "bo is already pinned with incorrect alignment:"
- " offset=%x, req.alignment=%x\n",
+ "bo is already pinned with incorrect alignment: offset=%x, req.alignment=%x\n",
obj_priv->gtt_offset, alignment);
ret = i915_gem_object_unbind(obj);
if (ret)
@@ -4072,14 +4215,13 @@ i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment)
* remove it from the inactive list
*/
if (obj_priv->pin_count == 1) {
- atomic_inc(&dev->pin_count);
- atomic_add(obj->size, &dev->pin_memory);
- if (!obj_priv->active &&
- (obj->write_domain & I915_GEM_GPU_DOMAINS) == 0)
- list_del_init(&obj_priv->list);
+ i915_gem_info_add_pin(dev_priv, obj->size);
+ if (!obj_priv->active)
+ list_move_tail(&obj_priv->mm_list,
+ &dev_priv->mm.pinned_list);
}
- i915_verify_inactive(dev, __FILE__, __LINE__);
+ WARN_ON(i915_verify_lists(dev));
return 0;
}
@@ -4090,7 +4232,7 @@ i915_gem_object_unpin(struct drm_gem_object *obj)
drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
- i915_verify_inactive(dev, __FILE__, __LINE__);
+ WARN_ON(i915_verify_lists(dev));
obj_priv->pin_count--;
BUG_ON(obj_priv->pin_count < 0);
BUG_ON(obj_priv->gtt_space == NULL);
@@ -4100,14 +4242,12 @@ i915_gem_object_unpin(struct drm_gem_object *obj)
* the inactive list
*/
if (obj_priv->pin_count == 0) {
- if (!obj_priv->active &&
- (obj->write_domain & I915_GEM_GPU_DOMAINS) == 0)
- list_move_tail(&obj_priv->list,
+ if (!obj_priv->active)
+ list_move_tail(&obj_priv->mm_list,
&dev_priv->mm.inactive_list);
- atomic_dec(&dev->pin_count);
- atomic_sub(obj->size, &dev->pin_memory);
+ i915_gem_info_remove_pin(dev_priv, obj->size);
}
- i915_verify_inactive(dev, __FILE__, __LINE__);
+ WARN_ON(i915_verify_lists(dev));
}
int
@@ -4119,41 +4259,36 @@ i915_gem_pin_ioctl(struct drm_device *dev, void *data,
struct drm_i915_gem_object *obj_priv;
int ret;
- mutex_lock(&dev->struct_mutex);
+ ret = i915_mutex_lock_interruptible(dev);
+ if (ret)
+ return ret;
obj = drm_gem_object_lookup(dev, file_priv, args->handle);
if (obj == NULL) {
- DRM_ERROR("Bad handle in i915_gem_pin_ioctl(): %d\n",
- args->handle);
- mutex_unlock(&dev->struct_mutex);
- return -ENOENT;
+ ret = -ENOENT;
+ goto unlock;
}
obj_priv = to_intel_bo(obj);
if (obj_priv->madv != I915_MADV_WILLNEED) {
DRM_ERROR("Attempting to pin a purgeable buffer\n");
- drm_gem_object_unreference(obj);
- mutex_unlock(&dev->struct_mutex);
- return -EINVAL;
+ ret = -EINVAL;
+ goto out;
}
if (obj_priv->pin_filp != NULL && obj_priv->pin_filp != file_priv) {
DRM_ERROR("Already pinned in i915_gem_pin_ioctl(): %d\n",
args->handle);
- drm_gem_object_unreference(obj);
- mutex_unlock(&dev->struct_mutex);
- return -EINVAL;
+ ret = -EINVAL;
+ goto out;
}
obj_priv->user_pin_count++;
obj_priv->pin_filp = file_priv;
if (obj_priv->user_pin_count == 1) {
ret = i915_gem_object_pin(obj, args->alignment);
- if (ret != 0) {
- drm_gem_object_unreference(obj);
- mutex_unlock(&dev->struct_mutex);
- return ret;
- }
+ if (ret)
+ goto out;
}
/* XXX - flush the CPU caches for pinned objects
@@ -4161,10 +4296,11 @@ i915_gem_pin_ioctl(struct drm_device *dev, void *data,
*/
i915_gem_object_flush_cpu_write_domain(obj);
args->offset = obj_priv->gtt_offset;
+out:
drm_gem_object_unreference(obj);
+unlock:
mutex_unlock(&dev->struct_mutex);
-
- return 0;
+ return ret;
}
int
@@ -4174,24 +4310,24 @@ i915_gem_unpin_ioctl(struct drm_device *dev, void *data,
struct drm_i915_gem_pin *args = data;
struct drm_gem_object *obj;
struct drm_i915_gem_object *obj_priv;
+ int ret;
- mutex_lock(&dev->struct_mutex);
+ ret = i915_mutex_lock_interruptible(dev);
+ if (ret)
+ return ret;
obj = drm_gem_object_lookup(dev, file_priv, args->handle);
if (obj == NULL) {
- DRM_ERROR("Bad handle in i915_gem_unpin_ioctl(): %d\n",
- args->handle);
- mutex_unlock(&dev->struct_mutex);
- return -ENOENT;
+ ret = -ENOENT;
+ goto unlock;
}
-
obj_priv = to_intel_bo(obj);
+
if (obj_priv->pin_filp != file_priv) {
DRM_ERROR("Not pinned by caller in i915_gem_pin_ioctl(): %d\n",
args->handle);
- drm_gem_object_unreference(obj);
- mutex_unlock(&dev->struct_mutex);
- return -EINVAL;
+ ret = -EINVAL;
+ goto out;
}
obj_priv->user_pin_count--;
if (obj_priv->user_pin_count == 0) {
@@ -4199,9 +4335,11 @@ i915_gem_unpin_ioctl(struct drm_device *dev, void *data,
i915_gem_object_unpin(obj);
}
+out:
drm_gem_object_unreference(obj);
+unlock:
mutex_unlock(&dev->struct_mutex);
- return 0;
+ return ret;
}
int
@@ -4211,22 +4349,24 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data,
struct drm_i915_gem_busy *args = data;
struct drm_gem_object *obj;
struct drm_i915_gem_object *obj_priv;
+ int ret;
+
+ ret = i915_mutex_lock_interruptible(dev);
+ if (ret)
+ return ret;
obj = drm_gem_object_lookup(dev, file_priv, args->handle);
if (obj == NULL) {
- DRM_ERROR("Bad handle in i915_gem_busy_ioctl(): %d\n",
- args->handle);
- return -ENOENT;
+ ret = -ENOENT;
+ goto unlock;
}
-
- mutex_lock(&dev->struct_mutex);
+ obj_priv = to_intel_bo(obj);
/* Count all active objects as busy, even if they are currently not used
* by the gpu. Users of this interface expect objects to eventually
* become non-busy without any further actions, therefore emit any
* necessary flushes here.
*/
- obj_priv = to_intel_bo(obj);
args->busy = obj_priv->active;
if (args->busy) {
/* Unconditionally flush objects, even when the gpu still uses this
@@ -4234,9 +4374,19 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data,
* use this buffer rather sooner than later, so issuing the required
* flush earlier is beneficial.
*/
- if (obj->write_domain) {
- i915_gem_flush(dev, 0, obj->write_domain);
- (void)i915_add_request(dev, file_priv, obj->write_domain, obj_priv->ring);
+ if (obj->write_domain & I915_GEM_GPU_DOMAINS) {
+ i915_gem_flush_ring(dev, file_priv,
+ obj_priv->ring,
+ 0, obj->write_domain);
+ } else if (obj_priv->ring->outstanding_lazy_request) {
+ /* This ring is not being cleared by active usage,
+ * so emit a request to do so.
+ */
+ u32 seqno = i915_add_request(dev,
+ NULL, NULL,
+ obj_priv->ring);
+ if (seqno == 0)
+ ret = -ENOMEM;
}
/* Update the active list for the hardware's current position.
@@ -4250,8 +4400,9 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data,
}
drm_gem_object_unreference(obj);
+unlock:
mutex_unlock(&dev->struct_mutex);
- return 0;
+ return ret;
}
int
@@ -4268,6 +4419,7 @@ i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
struct drm_i915_gem_madvise *args = data;
struct drm_gem_object *obj;
struct drm_i915_gem_object *obj_priv;
+ int ret;
switch (args->madv) {
case I915_MADV_DONTNEED:
@@ -4277,22 +4429,20 @@ i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
return -EINVAL;
}
+ ret = i915_mutex_lock_interruptible(dev);
+ if (ret)
+ return ret;
+
obj = drm_gem_object_lookup(dev, file_priv, args->handle);
if (obj == NULL) {
- DRM_ERROR("Bad handle in i915_gem_madvise_ioctl(): %d\n",
- args->handle);
- return -ENOENT;
+ ret = -ENOENT;
+ goto unlock;
}
-
- mutex_lock(&dev->struct_mutex);
obj_priv = to_intel_bo(obj);
if (obj_priv->pin_count) {
- drm_gem_object_unreference(obj);
- mutex_unlock(&dev->struct_mutex);
-
- DRM_ERROR("Attempted i915_gem_madvise_ioctl() on a pinned object\n");
- return -EINVAL;
+ ret = -EINVAL;
+ goto out;
}
if (obj_priv->madv != __I915_MADV_PURGED)
@@ -4305,15 +4455,17 @@ i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
args->retained = obj_priv->madv != __I915_MADV_PURGED;
+out:
drm_gem_object_unreference(obj);
+unlock:
mutex_unlock(&dev->struct_mutex);
-
- return 0;
+ return ret;
}
struct drm_gem_object * i915_gem_alloc_object(struct drm_device *dev,
size_t size)
{
+ struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_gem_object *obj;
obj = kzalloc(sizeof(*obj), GFP_KERNEL);
@@ -4325,18 +4477,19 @@ struct drm_gem_object * i915_gem_alloc_object(struct drm_device *dev,
return NULL;
}
+ i915_gem_info_add_obj(dev_priv, size);
+
obj->base.write_domain = I915_GEM_DOMAIN_CPU;
obj->base.read_domains = I915_GEM_DOMAIN_CPU;
obj->agp_type = AGP_USER_MEMORY;
obj->base.driver_private = NULL;
obj->fence_reg = I915_FENCE_REG_NONE;
- INIT_LIST_HEAD(&obj->list);
+ INIT_LIST_HEAD(&obj->mm_list);
+ INIT_LIST_HEAD(&obj->ring_list);
INIT_LIST_HEAD(&obj->gpu_write_list);
obj->madv = I915_MADV_WILLNEED;
- trace_i915_gem_object_create(&obj->base);
-
return &obj->base;
}
@@ -4356,7 +4509,7 @@ static void i915_gem_free_object_tail(struct drm_gem_object *obj)
ret = i915_gem_object_unbind(obj);
if (ret == -ERESTARTSYS) {
- list_move(&obj_priv->list,
+ list_move(&obj_priv->mm_list,
&dev_priv->mm.deferred_free_list);
return;
}
@@ -4365,6 +4518,7 @@ static void i915_gem_free_object_tail(struct drm_gem_object *obj)
i915_gem_free_mmap_offset(obj);
drm_gem_object_release(obj);
+ i915_gem_info_remove_obj(dev_priv, obj->size);
kfree(obj_priv->page_cpu_valid);
kfree(obj_priv->bit_17);
@@ -4395,10 +4549,7 @@ i915_gem_idle(struct drm_device *dev)
mutex_lock(&dev->struct_mutex);
- if (dev_priv->mm.suspended ||
- (dev_priv->render_ring.gem_object == NULL) ||
- (HAS_BSD(dev) &&
- dev_priv->bsd_ring.gem_object == NULL)) {
+ if (dev_priv->mm.suspended) {
mutex_unlock(&dev->struct_mutex);
return 0;
}
@@ -4423,7 +4574,7 @@ i915_gem_idle(struct drm_device *dev)
* And not confound mm.suspended!
*/
dev_priv->mm.suspended = 1;
- del_timer(&dev_priv->hangcheck_timer);
+ del_timer_sync(&dev_priv->hangcheck_timer);
i915_kernel_lost_context(dev);
i915_gem_cleanup_ringbuffer(dev);
@@ -4503,36 +4654,34 @@ i915_gem_init_ringbuffer(struct drm_device *dev)
drm_i915_private_t *dev_priv = dev->dev_private;
int ret;
- dev_priv->render_ring = render_ring;
-
- if (!I915_NEED_GFX_HWS(dev)) {
- dev_priv->render_ring.status_page.page_addr
- = dev_priv->status_page_dmah->vaddr;
- memset(dev_priv->render_ring.status_page.page_addr,
- 0, PAGE_SIZE);
- }
-
if (HAS_PIPE_CONTROL(dev)) {
ret = i915_gem_init_pipe_control(dev);
if (ret)
return ret;
}
- ret = intel_init_ring_buffer(dev, &dev_priv->render_ring);
+ ret = intel_init_render_ring_buffer(dev);
if (ret)
goto cleanup_pipe_control;
if (HAS_BSD(dev)) {
- dev_priv->bsd_ring = bsd_ring;
- ret = intel_init_ring_buffer(dev, &dev_priv->bsd_ring);
+ ret = intel_init_bsd_ring_buffer(dev);
if (ret)
goto cleanup_render_ring;
}
+ if (HAS_BLT(dev)) {
+ ret = intel_init_blt_ring_buffer(dev);
+ if (ret)
+ goto cleanup_bsd_ring;
+ }
+
dev_priv->next_seqno = 1;
return 0;
+cleanup_bsd_ring:
+ intel_cleanup_ring_buffer(dev, &dev_priv->bsd_ring);
cleanup_render_ring:
intel_cleanup_ring_buffer(dev, &dev_priv->render_ring);
cleanup_pipe_control:
@@ -4547,8 +4696,8 @@ i915_gem_cleanup_ringbuffer(struct drm_device *dev)
drm_i915_private_t *dev_priv = dev->dev_private;
intel_cleanup_ring_buffer(dev, &dev_priv->render_ring);
- if (HAS_BSD(dev))
- intel_cleanup_ring_buffer(dev, &dev_priv->bsd_ring);
+ intel_cleanup_ring_buffer(dev, &dev_priv->bsd_ring);
+ intel_cleanup_ring_buffer(dev, &dev_priv->blt_ring);
if (HAS_PIPE_CONTROL(dev))
i915_gem_cleanup_pipe_control(dev);
}
@@ -4577,15 +4726,15 @@ i915_gem_entervt_ioctl(struct drm_device *dev, void *data,
return ret;
}
- spin_lock(&dev_priv->mm.active_list_lock);
+ BUG_ON(!list_empty(&dev_priv->mm.active_list));
BUG_ON(!list_empty(&dev_priv->render_ring.active_list));
- BUG_ON(HAS_BSD(dev) && !list_empty(&dev_priv->bsd_ring.active_list));
- spin_unlock(&dev_priv->mm.active_list_lock);
-
+ BUG_ON(!list_empty(&dev_priv->bsd_ring.active_list));
+ BUG_ON(!list_empty(&dev_priv->blt_ring.active_list));
BUG_ON(!list_empty(&dev_priv->mm.flushing_list));
BUG_ON(!list_empty(&dev_priv->mm.inactive_list));
BUG_ON(!list_empty(&dev_priv->render_ring.request_list));
- BUG_ON(HAS_BSD(dev) && !list_empty(&dev_priv->bsd_ring.request_list));
+ BUG_ON(!list_empty(&dev_priv->bsd_ring.request_list));
+ BUG_ON(!list_empty(&dev_priv->blt_ring.request_list));
mutex_unlock(&dev->struct_mutex);
ret = drm_irq_install(dev);
@@ -4627,28 +4776,34 @@ i915_gem_lastclose(struct drm_device *dev)
DRM_ERROR("failed to idle hardware: %d\n", ret);
}
+static void
+init_ring_lists(struct intel_ring_buffer *ring)
+{
+ INIT_LIST_HEAD(&ring->active_list);
+ INIT_LIST_HEAD(&ring->request_list);
+ INIT_LIST_HEAD(&ring->gpu_write_list);
+}
+
void
i915_gem_load(struct drm_device *dev)
{
int i;
drm_i915_private_t *dev_priv = dev->dev_private;
- spin_lock_init(&dev_priv->mm.active_list_lock);
+ INIT_LIST_HEAD(&dev_priv->mm.active_list);
INIT_LIST_HEAD(&dev_priv->mm.flushing_list);
- INIT_LIST_HEAD(&dev_priv->mm.gpu_write_list);
INIT_LIST_HEAD(&dev_priv->mm.inactive_list);
+ INIT_LIST_HEAD(&dev_priv->mm.pinned_list);
INIT_LIST_HEAD(&dev_priv->mm.fence_list);
INIT_LIST_HEAD(&dev_priv->mm.deferred_free_list);
- INIT_LIST_HEAD(&dev_priv->render_ring.active_list);
- INIT_LIST_HEAD(&dev_priv->render_ring.request_list);
- if (HAS_BSD(dev)) {
- INIT_LIST_HEAD(&dev_priv->bsd_ring.active_list);
- INIT_LIST_HEAD(&dev_priv->bsd_ring.request_list);
- }
+ init_ring_lists(&dev_priv->render_ring);
+ init_ring_lists(&dev_priv->bsd_ring);
+ init_ring_lists(&dev_priv->blt_ring);
for (i = 0; i < 16; i++)
INIT_LIST_HEAD(&dev_priv->fence_regs[i].lru_list);
INIT_DELAYED_WORK(&dev_priv->mm.retire_work,
i915_gem_retire_work_handler);
+ init_completion(&dev_priv->error_completion);
spin_lock(&shrink_list_lock);
list_add(&dev_priv->mm.shrink_list, &shrink_list);
spin_unlock(&shrink_list_lock);
@@ -4667,21 +4822,30 @@ i915_gem_load(struct drm_device *dev)
if (!drm_core_check_feature(dev, DRIVER_MODESET))
dev_priv->fence_reg_start = 3;
- if (IS_I965G(dev) || IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
+ if (INTEL_INFO(dev)->gen >= 4 || IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
dev_priv->num_fence_regs = 16;
else
dev_priv->num_fence_regs = 8;
/* Initialize fence registers to zero */
- if (IS_I965G(dev)) {
+ switch (INTEL_INFO(dev)->gen) {
+ case 6:
+ for (i = 0; i < 16; i++)
+ I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 + (i * 8), 0);
+ break;
+ case 5:
+ case 4:
for (i = 0; i < 16; i++)
I915_WRITE64(FENCE_REG_965_0 + (i * 8), 0);
- } else {
- for (i = 0; i < 8; i++)
- I915_WRITE(FENCE_REG_830_0 + (i * 4), 0);
+ break;
+ case 3:
if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
for (i = 0; i < 8; i++)
I915_WRITE(FENCE_REG_945_8 + (i * 4), 0);
+ case 2:
+ for (i = 0; i < 8; i++)
+ I915_WRITE(FENCE_REG_830_0 + (i * 4), 0);
+ break;
}
i915_gem_detect_bit_6_swizzle(dev);
init_waitqueue_head(&dev_priv->pending_flip_queue);
@@ -4691,8 +4855,8 @@ i915_gem_load(struct drm_device *dev)
* Create a physically contiguous memory object for this object
* e.g. for cursor + overlay regs
*/
-int i915_gem_init_phys_object(struct drm_device *dev,
- int id, int size, int align)
+static int i915_gem_init_phys_object(struct drm_device *dev,
+ int id, int size, int align)
{
drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_i915_gem_phys_object *phys_obj;
@@ -4724,7 +4888,7 @@ kfree_obj:
return ret;
}
-void i915_gem_free_phys_object(struct drm_device *dev, int id)
+static void i915_gem_free_phys_object(struct drm_device *dev, int id)
{
drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_i915_gem_phys_object *phys_obj;
@@ -4772,11 +4936,11 @@ void i915_gem_detach_phys_object(struct drm_device *dev,
page_count = obj->size / PAGE_SIZE;
for (i = 0; i < page_count; i++) {
- char *dst = kmap_atomic(obj_priv->pages[i], KM_USER0);
+ char *dst = kmap_atomic(obj_priv->pages[i]);
char *src = obj_priv->phys_obj->handle->vaddr + (i * PAGE_SIZE);
memcpy(dst, src, PAGE_SIZE);
- kunmap_atomic(dst, KM_USER0);
+ kunmap_atomic(dst);
}
drm_clflush_pages(obj_priv->pages, page_count);
drm_agp_chipset_flush(dev);
@@ -4833,11 +4997,11 @@ i915_gem_attach_phys_object(struct drm_device *dev,
page_count = obj->size / PAGE_SIZE;
for (i = 0; i < page_count; i++) {
- char *src = kmap_atomic(obj_priv->pages[i], KM_USER0);
+ char *src = kmap_atomic(obj_priv->pages[i]);
char *dst = obj_priv->phys_obj->handle->vaddr + (i * PAGE_SIZE);
memcpy(dst, src, PAGE_SIZE);
- kunmap_atomic(src, KM_USER0);
+ kunmap_atomic(src);
}
i915_gem_object_put_pages(obj);
@@ -4853,34 +5017,48 @@ i915_gem_phys_pwrite(struct drm_device *dev, struct drm_gem_object *obj,
struct drm_file *file_priv)
{
struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
- void *obj_addr;
- int ret;
- char __user *user_data;
+ void *vaddr = obj_priv->phys_obj->handle->vaddr + args->offset;
+ char __user *user_data = (char __user *) (uintptr_t) args->data_ptr;
- user_data = (char __user *) (uintptr_t) args->data_ptr;
- obj_addr = obj_priv->phys_obj->handle->vaddr + args->offset;
+ DRM_DEBUG_DRIVER("vaddr %p, %lld\n", vaddr, args->size);
- DRM_DEBUG_DRIVER("obj_addr %p, %lld\n", obj_addr, args->size);
- ret = copy_from_user(obj_addr, user_data, args->size);
- if (ret)
- return -EFAULT;
+ if (__copy_from_user_inatomic_nocache(vaddr, user_data, args->size)) {
+ unsigned long unwritten;
+
+ /* The physical object once assigned is fixed for the lifetime
+ * of the obj, so we can safely drop the lock and continue
+ * to access vaddr.
+ */
+ mutex_unlock(&dev->struct_mutex);
+ unwritten = copy_from_user(vaddr, user_data, args->size);
+ mutex_lock(&dev->struct_mutex);
+ if (unwritten)
+ return -EFAULT;
+ }
drm_agp_chipset_flush(dev);
return 0;
}
-void i915_gem_release(struct drm_device * dev, struct drm_file *file_priv)
+void i915_gem_release(struct drm_device *dev, struct drm_file *file)
{
- struct drm_i915_file_private *i915_file_priv = file_priv->driver_priv;
+ struct drm_i915_file_private *file_priv = file->driver_priv;
/* Clean up our request list when the client is going away, so that
* later retire_requests won't dereference our soon-to-be-gone
* file_priv.
*/
- mutex_lock(&dev->struct_mutex);
- while (!list_empty(&i915_file_priv->mm.request_list))
- list_del_init(i915_file_priv->mm.request_list.next);
- mutex_unlock(&dev->struct_mutex);
+ spin_lock(&file_priv->mm.lock);
+ while (!list_empty(&file_priv->mm.request_list)) {
+ struct drm_i915_gem_request *request;
+
+ request = list_first_entry(&file_priv->mm.request_list,
+ struct drm_i915_gem_request,
+ client_list);
+ list_del(&request->client_list);
+ request->file_priv = NULL;
+ }
+ spin_unlock(&file_priv->mm.lock);
}
static int
@@ -4889,12 +5067,8 @@ i915_gpu_is_active(struct drm_device *dev)
drm_i915_private_t *dev_priv = dev->dev_private;
int lists_empty;
- spin_lock(&dev_priv->mm.active_list_lock);
lists_empty = list_empty(&dev_priv->mm.flushing_list) &&
- list_empty(&dev_priv->render_ring.active_list);
- if (HAS_BSD(dev))
- lists_empty &= list_empty(&dev_priv->bsd_ring.active_list);
- spin_unlock(&dev_priv->mm.active_list_lock);
+ list_empty(&dev_priv->mm.active_list);
return !lists_empty;
}
@@ -4916,7 +5090,7 @@ i915_gem_shrink(struct shrinker *shrink, int nr_to_scan, gfp_t gfp_mask)
if (mutex_trylock(&dev->struct_mutex)) {
list_for_each_entry(obj_priv,
&dev_priv->mm.inactive_list,
- list)
+ mm_list)
cnt++;
mutex_unlock(&dev->struct_mutex);
}
@@ -4942,7 +5116,7 @@ rescan:
list_for_each_entry_safe(obj_priv, next_obj,
&dev_priv->mm.inactive_list,
- list) {
+ mm_list) {
if (i915_gem_object_is_purgeable(obj_priv)) {
i915_gem_object_unbind(&obj_priv->base);
if (--nr_to_scan <= 0)
@@ -4971,7 +5145,7 @@ rescan:
list_for_each_entry_safe(obj_priv, next_obj,
&dev_priv->mm.inactive_list,
- list) {
+ mm_list) {
if (nr_to_scan > 0) {
i915_gem_object_unbind(&obj_priv->base);
nr_to_scan--;
diff --git a/drivers/gpu/drm/i915/i915_gem_debug.c b/drivers/gpu/drm/i915/i915_gem_debug.c
index 80f380b1d951..48644b840a8d 100644
--- a/drivers/gpu/drm/i915/i915_gem_debug.c
+++ b/drivers/gpu/drm/i915/i915_gem_debug.c
@@ -30,29 +30,112 @@
#include "i915_drm.h"
#include "i915_drv.h"
-#if WATCH_INACTIVE
-void
-i915_verify_inactive(struct drm_device *dev, char *file, int line)
+#if WATCH_LISTS
+int
+i915_verify_lists(struct drm_device *dev)
{
+ static int warned;
drm_i915_private_t *dev_priv = dev->dev_private;
- struct drm_gem_object *obj;
- struct drm_i915_gem_object *obj_priv;
-
- list_for_each_entry(obj_priv, &dev_priv->mm.inactive_list, list) {
- obj = &obj_priv->base;
- if (obj_priv->pin_count || obj_priv->active ||
- (obj->write_domain & ~(I915_GEM_DOMAIN_CPU |
- I915_GEM_DOMAIN_GTT)))
- DRM_ERROR("inactive %p (p %d a %d w %x) %s:%d\n",
+ struct drm_i915_gem_object *obj;
+ int err = 0;
+
+ if (warned)
+ return 0;
+
+ list_for_each_entry(obj, &dev_priv->render_ring.active_list, list) {
+ if (obj->base.dev != dev ||
+ !atomic_read(&obj->base.refcount.refcount)) {
+ DRM_ERROR("freed render active %p\n", obj);
+ err++;
+ break;
+ } else if (!obj->active ||
+ (obj->base.read_domains & I915_GEM_GPU_DOMAINS) == 0) {
+ DRM_ERROR("invalid render active %p (a %d r %x)\n",
+ obj,
+ obj->active,
+ obj->base.read_domains);
+ err++;
+ } else if (obj->base.write_domain && list_empty(&obj->gpu_write_list)) {
+ DRM_ERROR("invalid render active %p (w %x, gwl %d)\n",
+ obj,
+ obj->base.write_domain,
+ !list_empty(&obj->gpu_write_list));
+ err++;
+ }
+ }
+
+ list_for_each_entry(obj, &dev_priv->mm.flushing_list, list) {
+ if (obj->base.dev != dev ||
+ !atomic_read(&obj->base.refcount.refcount)) {
+ DRM_ERROR("freed flushing %p\n", obj);
+ err++;
+ break;
+ } else if (!obj->active ||
+ (obj->base.write_domain & I915_GEM_GPU_DOMAINS) == 0 ||
+ list_empty(&obj->gpu_write_list)){
+ DRM_ERROR("invalid flushing %p (a %d w %x gwl %d)\n",
obj,
- obj_priv->pin_count, obj_priv->active,
- obj->write_domain, file, line);
+ obj->active,
+ obj->base.write_domain,
+ !list_empty(&obj->gpu_write_list));
+ err++;
+ }
+ }
+
+ list_for_each_entry(obj, &dev_priv->mm.gpu_write_list, gpu_write_list) {
+ if (obj->base.dev != dev ||
+ !atomic_read(&obj->base.refcount.refcount)) {
+ DRM_ERROR("freed gpu write %p\n", obj);
+ err++;
+ break;
+ } else if (!obj->active ||
+ (obj->base.write_domain & I915_GEM_GPU_DOMAINS) == 0) {
+ DRM_ERROR("invalid gpu write %p (a %d w %x)\n",
+ obj,
+ obj->active,
+ obj->base.write_domain);
+ err++;
+ }
+ }
+
+ list_for_each_entry(obj, &dev_priv->mm.inactive_list, list) {
+ if (obj->base.dev != dev ||
+ !atomic_read(&obj->base.refcount.refcount)) {
+ DRM_ERROR("freed inactive %p\n", obj);
+ err++;
+ break;
+ } else if (obj->pin_count || obj->active ||
+ (obj->base.write_domain & I915_GEM_GPU_DOMAINS)) {
+ DRM_ERROR("invalid inactive %p (p %d a %d w %x)\n",
+ obj,
+ obj->pin_count, obj->active,
+ obj->base.write_domain);
+ err++;
+ }
}
+
+ list_for_each_entry(obj, &dev_priv->mm.pinned_list, list) {
+ if (obj->base.dev != dev ||
+ !atomic_read(&obj->base.refcount.refcount)) {
+ DRM_ERROR("freed pinned %p\n", obj);
+ err++;
+ break;
+ } else if (!obj->pin_count || obj->active ||
+ (obj->base.write_domain & I915_GEM_GPU_DOMAINS)) {
+ DRM_ERROR("invalid pinned %p (p %d a %d w %x)\n",
+ obj,
+ obj->pin_count, obj->active,
+ obj->base.write_domain);
+ err++;
+ }
+ }
+
+ return warned = err;
}
#endif /* WATCH_INACTIVE */
-#if WATCH_BUF | WATCH_EXEC | WATCH_PWRITE
+#if WATCH_EXEC | WATCH_PWRITE
static void
i915_gem_dump_page(struct page *page, uint32_t start, uint32_t end,
uint32_t bias, uint32_t mark)
@@ -97,41 +180,6 @@ i915_gem_dump_object(struct drm_gem_object *obj, int len,
}
#endif
-#if WATCH_LRU
-void
-i915_dump_lru(struct drm_device *dev, const char *where)
-{
- drm_i915_private_t *dev_priv = dev->dev_private;
- struct drm_i915_gem_object *obj_priv;
-
- DRM_INFO("active list %s {\n", where);
- spin_lock(&dev_priv->mm.active_list_lock);
- list_for_each_entry(obj_priv, &dev_priv->mm.active_list,
- list)
- {
- DRM_INFO(" %p: %08x\n", obj_priv,
- obj_priv->last_rendering_seqno);
- }
- spin_unlock(&dev_priv->mm.active_list_lock);
- DRM_INFO("}\n");
- DRM_INFO("flushing list %s {\n", where);
- list_for_each_entry(obj_priv, &dev_priv->mm.flushing_list,
- list)
- {
- DRM_INFO(" %p: %08x\n", obj_priv,
- obj_priv->last_rendering_seqno);
- }
- DRM_INFO("}\n");
- DRM_INFO("inactive %s {\n", where);
- list_for_each_entry(obj_priv, &dev_priv->mm.inactive_list, list) {
- DRM_INFO(" %p: %08x\n", obj_priv,
- obj_priv->last_rendering_seqno);
- }
- DRM_INFO("}\n");
-}
-#endif
-
-
#if WATCH_COHERENCY
void
i915_gem_object_check_coherency(struct drm_gem_object *obj, int handle)
diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c
index 5c428fa3e0b3..d8ae7d1d0cc6 100644
--- a/drivers/gpu/drm/i915/i915_gem_evict.c
+++ b/drivers/gpu/drm/i915/i915_gem_evict.c
@@ -31,49 +31,6 @@
#include "i915_drv.h"
#include "i915_drm.h"
-static struct drm_i915_gem_object *
-i915_gem_next_active_object(struct drm_device *dev,
- struct list_head **render_iter,
- struct list_head **bsd_iter)
-{
- drm_i915_private_t *dev_priv = dev->dev_private;
- struct drm_i915_gem_object *render_obj = NULL, *bsd_obj = NULL;
-
- if (*render_iter != &dev_priv->render_ring.active_list)
- render_obj = list_entry(*render_iter,
- struct drm_i915_gem_object,
- list);
-
- if (HAS_BSD(dev)) {
- if (*bsd_iter != &dev_priv->bsd_ring.active_list)
- bsd_obj = list_entry(*bsd_iter,
- struct drm_i915_gem_object,
- list);
-
- if (render_obj == NULL) {
- *bsd_iter = (*bsd_iter)->next;
- return bsd_obj;
- }
-
- if (bsd_obj == NULL) {
- *render_iter = (*render_iter)->next;
- return render_obj;
- }
-
- /* XXX can we handle seqno wrapping? */
- if (render_obj->last_rendering_seqno < bsd_obj->last_rendering_seqno) {
- *render_iter = (*render_iter)->next;
- return render_obj;
- } else {
- *bsd_iter = (*bsd_iter)->next;
- return bsd_obj;
- }
- } else {
- *render_iter = (*render_iter)->next;
- return render_obj;
- }
-}
-
static bool
mark_free(struct drm_i915_gem_object *obj_priv,
struct list_head *unwind)
@@ -83,18 +40,12 @@ mark_free(struct drm_i915_gem_object *obj_priv,
return drm_mm_scan_add_block(obj_priv->gtt_space);
}
-#define i915_for_each_active_object(OBJ, R, B) \
- *(R) = dev_priv->render_ring.active_list.next; \
- *(B) = dev_priv->bsd_ring.active_list.next; \
- while (((OBJ) = i915_gem_next_active_object(dev, (R), (B))) != NULL)
-
int
i915_gem_evict_something(struct drm_device *dev, int min_size, unsigned alignment)
{
drm_i915_private_t *dev_priv = dev->dev_private;
struct list_head eviction_list, unwind_list;
struct drm_i915_gem_object *obj_priv;
- struct list_head *render_iter, *bsd_iter;
int ret = 0;
i915_gem_retire_requests(dev);
@@ -131,13 +82,13 @@ i915_gem_evict_something(struct drm_device *dev, int min_size, unsigned alignmen
drm_mm_init_scan(&dev_priv->mm.gtt_space, min_size, alignment);
/* First see if there is a large enough contiguous idle region... */
- list_for_each_entry(obj_priv, &dev_priv->mm.inactive_list, list) {
+ list_for_each_entry(obj_priv, &dev_priv->mm.inactive_list, mm_list) {
if (mark_free(obj_priv, &unwind_list))
goto found;
}
/* Now merge in the soon-to-be-expired objects... */
- i915_for_each_active_object(obj_priv, &render_iter, &bsd_iter) {
+ list_for_each_entry(obj_priv, &dev_priv->mm.active_list, mm_list) {
/* Does the object require an outstanding flush? */
if (obj_priv->base.write_domain || obj_priv->pin_count)
continue;
@@ -147,14 +98,14 @@ i915_gem_evict_something(struct drm_device *dev, int min_size, unsigned alignmen
}
/* Finally add anything with a pending flush (in order of retirement) */
- list_for_each_entry(obj_priv, &dev_priv->mm.flushing_list, list) {
+ list_for_each_entry(obj_priv, &dev_priv->mm.flushing_list, mm_list) {
if (obj_priv->pin_count)
continue;
if (mark_free(obj_priv, &unwind_list))
goto found;
}
- i915_for_each_active_object(obj_priv, &render_iter, &bsd_iter) {
+ list_for_each_entry(obj_priv, &dev_priv->mm.active_list, mm_list) {
if (! obj_priv->base.write_domain || obj_priv->pin_count)
continue;
@@ -212,14 +163,9 @@ i915_gem_evict_everything(struct drm_device *dev)
int ret;
bool lists_empty;
- spin_lock(&dev_priv->mm.active_list_lock);
lists_empty = (list_empty(&dev_priv->mm.inactive_list) &&
list_empty(&dev_priv->mm.flushing_list) &&
- list_empty(&dev_priv->render_ring.active_list) &&
- (!HAS_BSD(dev)
- || list_empty(&dev_priv->bsd_ring.active_list)));
- spin_unlock(&dev_priv->mm.active_list_lock);
-
+ list_empty(&dev_priv->mm.active_list));
if (lists_empty)
return -ENOSPC;
@@ -234,13 +180,9 @@ i915_gem_evict_everything(struct drm_device *dev)
if (ret)
return ret;
- spin_lock(&dev_priv->mm.active_list_lock);
lists_empty = (list_empty(&dev_priv->mm.inactive_list) &&
list_empty(&dev_priv->mm.flushing_list) &&
- list_empty(&dev_priv->render_ring.active_list) &&
- (!HAS_BSD(dev)
- || list_empty(&dev_priv->bsd_ring.active_list)));
- spin_unlock(&dev_priv->mm.active_list_lock);
+ list_empty(&dev_priv->mm.active_list));
BUG_ON(!lists_empty);
return 0;
@@ -258,7 +200,7 @@ i915_gem_evict_inactive(struct drm_device *dev)
obj = &list_first_entry(&dev_priv->mm.inactive_list,
struct drm_i915_gem_object,
- list)->base;
+ mm_list)->base;
ret = i915_gem_object_unbind(obj);
if (ret != 0) {
diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c
index 710eca70b323..af352de70be1 100644
--- a/drivers/gpu/drm/i915/i915_gem_tiling.c
+++ b/drivers/gpu/drm/i915/i915_gem_tiling.c
@@ -92,13 +92,13 @@ i915_gem_detect_bit_6_swizzle(struct drm_device *dev)
uint32_t swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN;
uint32_t swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN;
- if (IS_IRONLAKE(dev) || IS_GEN6(dev)) {
+ if (IS_GEN5(dev) || IS_GEN6(dev)) {
/* On Ironlake whatever DRAM config, GPU always do
* same swizzling setup.
*/
swizzle_x = I915_BIT_6_SWIZZLE_9_10;
swizzle_y = I915_BIT_6_SWIZZLE_9;
- } else if (!IS_I9XX(dev)) {
+ } else if (IS_GEN2(dev)) {
/* As far as we know, the 865 doesn't have these bit 6
* swizzling issues.
*/
@@ -190,19 +190,19 @@ i915_tiling_ok(struct drm_device *dev, int stride, int size, int tiling_mode)
if (tiling_mode == I915_TILING_NONE)
return true;
- if (!IS_I9XX(dev) ||
+ if (IS_GEN2(dev) ||
(tiling_mode == I915_TILING_Y && HAS_128_BYTE_Y_TILING(dev)))
tile_width = 128;
else
tile_width = 512;
/* check maximum stride & object size */
- if (IS_I965G(dev)) {
+ if (INTEL_INFO(dev)->gen >= 4) {
/* i965 stores the end address of the gtt mapping in the fence
* reg, so dont bother to check the size */
if (stride / 128 > I965_FENCE_MAX_PITCH_VAL)
return false;
- } else if (IS_GEN3(dev) || IS_GEN2(dev)) {
+ } else {
if (stride > 8192)
return false;
@@ -216,7 +216,7 @@ i915_tiling_ok(struct drm_device *dev, int stride, int size, int tiling_mode)
}
/* 965+ just needs multiples of tile width */
- if (IS_I965G(dev)) {
+ if (INTEL_INFO(dev)->gen >= 4) {
if (stride & (tile_width - 1))
return false;
return true;
@@ -244,16 +244,18 @@ i915_gem_object_fence_offset_ok(struct drm_gem_object *obj, int tiling_mode)
if (tiling_mode == I915_TILING_NONE)
return true;
- if (!IS_I965G(dev)) {
- if (obj_priv->gtt_offset & (obj->size - 1))
+ if (INTEL_INFO(dev)->gen >= 4)
+ return true;
+
+ if (obj_priv->gtt_offset & (obj->size - 1))
+ return false;
+
+ if (IS_GEN3(dev)) {
+ if (obj_priv->gtt_offset & ~I915_FENCE_START_MASK)
+ return false;
+ } else {
+ if (obj_priv->gtt_offset & ~I830_FENCE_START_MASK)
return false;
- if (IS_I9XX(dev)) {
- if (obj_priv->gtt_offset & ~I915_FENCE_START_MASK)
- return false;
- } else {
- if (obj_priv->gtt_offset & ~I830_FENCE_START_MASK)
- return false;
- }
}
return true;
@@ -271,7 +273,11 @@ i915_gem_set_tiling(struct drm_device *dev, void *data,
drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_gem_object *obj;
struct drm_i915_gem_object *obj_priv;
- int ret = 0;
+ int ret;
+
+ ret = i915_gem_check_is_wedged(dev);
+ if (ret)
+ return ret;
obj = drm_gem_object_lookup(dev, file_priv, args->handle);
if (obj == NULL)
@@ -328,7 +334,7 @@ i915_gem_set_tiling(struct drm_device *dev, void *data,
if (!i915_gem_object_fence_offset_ok(obj, args->tiling_mode))
ret = i915_gem_object_unbind(obj);
else if (obj_priv->fence_reg != I915_FENCE_REG_NONE)
- ret = i915_gem_object_put_fence_reg(obj);
+ ret = i915_gem_object_put_fence_reg(obj, true);
else
i915_gem_release_mmap(obj);
@@ -399,16 +405,14 @@ i915_gem_get_tiling(struct drm_device *dev, void *data,
* bit 17 of its physical address and therefore being interpreted differently
* by the GPU.
*/
-static int
+static void
i915_gem_swizzle_page(struct page *page)
{
+ char temp[64];
char *vaddr;
int i;
- char temp[64];
vaddr = kmap(page);
- if (vaddr == NULL)
- return -ENOMEM;
for (i = 0; i < PAGE_SIZE; i += 128) {
memcpy(temp, &vaddr[i], 64);
@@ -417,8 +421,6 @@ i915_gem_swizzle_page(struct page *page)
}
kunmap(page);
-
- return 0;
}
void
@@ -440,11 +442,7 @@ i915_gem_object_do_bit_17_swizzle(struct drm_gem_object *obj)
char new_bit_17 = page_to_phys(obj_priv->pages[i]) >> 17;
if ((new_bit_17 & 0x1) !=
(test_bit(i, obj_priv->bit_17) != 0)) {
- int ret = i915_gem_swizzle_page(obj_priv->pages[i]);
- if (ret != 0) {
- DRM_ERROR("Failed to swizzle page\n");
- return;
- }
+ i915_gem_swizzle_page(obj_priv->pages[i]);
set_page_dirty(obj_priv->pages[i]);
}
}
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 744225ebb4b2..729fd0c91d7b 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -85,7 +85,7 @@ ironlake_disable_graphics_irq(drm_i915_private_t *dev_priv, u32 mask)
}
/* For display hotplug interrupt */
-void
+static void
ironlake_enable_display_irq(drm_i915_private_t *dev_priv, u32 mask)
{
if ((dev_priv->irq_mask_reg & mask) != 0) {
@@ -172,7 +172,7 @@ void intel_enable_asle (struct drm_device *dev)
else {
i915_enable_pipestat(dev_priv, 1,
PIPE_LEGACY_BLC_EVENT_ENABLE);
- if (IS_I965G(dev))
+ if (INTEL_INFO(dev)->gen >= 4)
i915_enable_pipestat(dev_priv, 0,
PIPE_LEGACY_BLC_EVENT_ENABLE);
}
@@ -191,12 +191,7 @@ static int
i915_pipe_enabled(struct drm_device *dev, int pipe)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
- unsigned long pipeconf = pipe ? PIPEBCONF : PIPEACONF;
-
- if (I915_READ(pipeconf) & PIPEACONF_ENABLE)
- return 1;
-
- return 0;
+ return I915_READ(PIPECONF(pipe)) & PIPECONF_ENABLE;
}
/* Called from drm generic code, passed a 'crtc', which
@@ -207,10 +202,7 @@ u32 i915_get_vblank_counter(struct drm_device *dev, int pipe)
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
unsigned long high_frame;
unsigned long low_frame;
- u32 high1, high2, low, count;
-
- high_frame = pipe ? PIPEBFRAMEHIGH : PIPEAFRAMEHIGH;
- low_frame = pipe ? PIPEBFRAMEPIXEL : PIPEAFRAMEPIXEL;
+ u32 high1, high2, low;
if (!i915_pipe_enabled(dev, pipe)) {
DRM_DEBUG_DRIVER("trying to get vblank count for disabled "
@@ -218,23 +210,23 @@ u32 i915_get_vblank_counter(struct drm_device *dev, int pipe)
return 0;
}
+ high_frame = pipe ? PIPEBFRAMEHIGH : PIPEAFRAMEHIGH;
+ low_frame = pipe ? PIPEBFRAMEPIXEL : PIPEAFRAMEPIXEL;
+
/*
* High & low register fields aren't synchronized, so make sure
* we get a low value that's stable across two reads of the high
* register.
*/
do {
- high1 = ((I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK) >>
- PIPE_FRAME_HIGH_SHIFT);
- low = ((I915_READ(low_frame) & PIPE_FRAME_LOW_MASK) >>
- PIPE_FRAME_LOW_SHIFT);
- high2 = ((I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK) >>
- PIPE_FRAME_HIGH_SHIFT);
+ high1 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK;
+ low = I915_READ(low_frame) & PIPE_FRAME_LOW_MASK;
+ high2 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK;
} while (high1 != high2);
- count = (high1 << 8) | low;
-
- return count;
+ high1 >>= PIPE_FRAME_HIGH_SHIFT;
+ low >>= PIPE_FRAME_LOW_SHIFT;
+ return (high1 << 8) | low;
}
u32 gm45_get_vblank_counter(struct drm_device *dev, int pipe)
@@ -260,16 +252,12 @@ static void i915_hotplug_work_func(struct work_struct *work)
hotplug_work);
struct drm_device *dev = dev_priv->dev;
struct drm_mode_config *mode_config = &dev->mode_config;
- struct drm_encoder *encoder;
-
- if (mode_config->num_encoder) {
- list_for_each_entry(encoder, &mode_config->encoder_list, head) {
- struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
-
- if (intel_encoder->hot_plug)
- (*intel_encoder->hot_plug) (intel_encoder);
- }
- }
+ struct intel_encoder *encoder;
+
+ list_for_each_entry(encoder, &mode_config->encoder_list, base.head)
+ if (encoder->hot_plug)
+ encoder->hot_plug(encoder);
+
/* Just fire off a uevent and let userspace tell us what to do */
drm_helper_hpd_irq_event(dev);
}
@@ -305,13 +293,30 @@ static void i915_handle_rps_change(struct drm_device *dev)
return;
}
-irqreturn_t ironlake_irq_handler(struct drm_device *dev)
+static void notify_ring(struct drm_device *dev,
+ struct intel_ring_buffer *ring)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 seqno = ring->get_seqno(dev, ring);
+ ring->irq_gem_seqno = seqno;
+ trace_i915_gem_request_complete(dev, seqno);
+ wake_up_all(&ring->irq_queue);
+ dev_priv->hangcheck_count = 0;
+ mod_timer(&dev_priv->hangcheck_timer,
+ jiffies + msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD));
+}
+
+static irqreturn_t ironlake_irq_handler(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
int ret = IRQ_NONE;
u32 de_iir, gt_iir, de_ier, pch_iir;
+ u32 hotplug_mask;
struct drm_i915_master_private *master_priv;
- struct intel_ring_buffer *render_ring = &dev_priv->render_ring;
+ u32 bsd_usr_interrupt = GT_BSD_USER_INTERRUPT;
+
+ if (IS_GEN6(dev))
+ bsd_usr_interrupt = GT_GEN6_BSD_USER_INTERRUPT;
/* disable master interrupt before clearing iir */
de_ier = I915_READ(DEIER);
@@ -325,6 +330,11 @@ irqreturn_t ironlake_irq_handler(struct drm_device *dev)
if (de_iir == 0 && gt_iir == 0 && pch_iir == 0)
goto done;
+ if (HAS_PCH_CPT(dev))
+ hotplug_mask = SDE_HOTPLUG_MASK_CPT;
+ else
+ hotplug_mask = SDE_HOTPLUG_MASK;
+
ret = IRQ_HANDLED;
if (dev->primary->master) {
@@ -334,29 +344,24 @@ irqreturn_t ironlake_irq_handler(struct drm_device *dev)
READ_BREADCRUMB(dev_priv);
}
- if (gt_iir & GT_PIPE_NOTIFY) {
- u32 seqno = render_ring->get_gem_seqno(dev, render_ring);
- render_ring->irq_gem_seqno = seqno;
- trace_i915_gem_request_complete(dev, seqno);
- DRM_WAKEUP(&dev_priv->render_ring.irq_queue);
- dev_priv->hangcheck_count = 0;
- mod_timer(&dev_priv->hangcheck_timer, jiffies + DRM_I915_HANGCHECK_PERIOD);
- }
- if (gt_iir & GT_BSD_USER_INTERRUPT)
- DRM_WAKEUP(&dev_priv->bsd_ring.irq_queue);
-
+ if (gt_iir & GT_PIPE_NOTIFY)
+ notify_ring(dev, &dev_priv->render_ring);
+ if (gt_iir & bsd_usr_interrupt)
+ notify_ring(dev, &dev_priv->bsd_ring);
+ if (HAS_BLT(dev) && gt_iir & GT_BLT_USER_INTERRUPT)
+ notify_ring(dev, &dev_priv->blt_ring);
if (de_iir & DE_GSE)
- ironlake_opregion_gse_intr(dev);
+ intel_opregion_gse_intr(dev);
if (de_iir & DE_PLANEA_FLIP_DONE) {
intel_prepare_page_flip(dev, 0);
- intel_finish_page_flip(dev, 0);
+ intel_finish_page_flip_plane(dev, 0);
}
if (de_iir & DE_PLANEB_FLIP_DONE) {
intel_prepare_page_flip(dev, 1);
- intel_finish_page_flip(dev, 1);
+ intel_finish_page_flip_plane(dev, 1);
}
if (de_iir & DE_PIPEA_VBLANK)
@@ -366,10 +371,8 @@ irqreturn_t ironlake_irq_handler(struct drm_device *dev)
drm_handle_vblank(dev, 1);
/* check event from PCH */
- if ((de_iir & DE_PCH_EVENT) &&
- (pch_iir & SDE_HOTPLUG_MASK)) {
+ if ((de_iir & DE_PCH_EVENT) && (pch_iir & hotplug_mask))
queue_work(dev_priv->wq, &dev_priv->hotplug_work);
- }
if (de_iir & DE_PCU_EVENT) {
I915_WRITE16(MEMINTRSTS, I915_READ(MEMINTRSTS));
@@ -404,23 +407,20 @@ static void i915_error_work_func(struct work_struct *work)
char *reset_event[] = { "RESET=1", NULL };
char *reset_done_event[] = { "ERROR=0", NULL };
- DRM_DEBUG_DRIVER("generating error event\n");
kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, error_event);
if (atomic_read(&dev_priv->mm.wedged)) {
- if (IS_I965G(dev)) {
- DRM_DEBUG_DRIVER("resetting chip\n");
- kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, reset_event);
- if (!i965_reset(dev, GDRST_RENDER)) {
- atomic_set(&dev_priv->mm.wedged, 0);
- kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, reset_done_event);
- }
- } else {
- DRM_DEBUG_DRIVER("reboot required\n");
+ DRM_DEBUG_DRIVER("resetting chip\n");
+ kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, reset_event);
+ if (!i915_reset(dev, GRDOM_RENDER)) {
+ atomic_set(&dev_priv->mm.wedged, 0);
+ kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, reset_done_event);
}
+ complete_all(&dev_priv->error_completion);
}
}
+#ifdef CONFIG_DEBUG_FS
static struct drm_i915_error_object *
i915_error_object_create(struct drm_device *dev,
struct drm_gem_object *src)
@@ -456,10 +456,9 @@ i915_error_object_create(struct drm_device *dev,
local_irq_save(flags);
s = io_mapping_map_atomic_wc(dev_priv->mm.gtt_mapping,
- reloc_offset,
- KM_IRQ0);
+ reloc_offset);
memcpy_fromio(d, s, PAGE_SIZE);
- io_mapping_unmap_atomic(s, KM_IRQ0);
+ io_mapping_unmap_atomic(s);
local_irq_restore(flags);
dst->pages[page] = d;
@@ -511,7 +510,7 @@ i915_get_bbaddr(struct drm_device *dev, u32 *ring)
if (IS_I830(dev) || IS_845G(dev))
cmd = MI_BATCH_BUFFER;
- else if (IS_I965G(dev))
+ else if (INTEL_INFO(dev)->gen >= 4)
cmd = (MI_BATCH_BUFFER_START | (2 << 6) |
MI_BATCH_NON_SECURE_I965);
else
@@ -584,13 +583,16 @@ static void i915_capture_error_state(struct drm_device *dev)
return;
}
- error->seqno = i915_get_gem_seqno(dev, &dev_priv->render_ring);
+ DRM_DEBUG_DRIVER("generating error event\n");
+
+ error->seqno =
+ dev_priv->render_ring.get_seqno(dev, &dev_priv->render_ring);
error->eir = I915_READ(EIR);
error->pgtbl_er = I915_READ(PGTBL_ER);
error->pipeastat = I915_READ(PIPEASTAT);
error->pipebstat = I915_READ(PIPEBSTAT);
error->instpm = I915_READ(INSTPM);
- if (!IS_I965G(dev)) {
+ if (INTEL_INFO(dev)->gen < 4) {
error->ipeir = I915_READ(IPEIR);
error->ipehr = I915_READ(IPEHR);
error->instdone = I915_READ(INSTDONE);
@@ -612,9 +614,7 @@ static void i915_capture_error_state(struct drm_device *dev)
batchbuffer[0] = NULL;
batchbuffer[1] = NULL;
count = 0;
- list_for_each_entry(obj_priv,
- &dev_priv->render_ring.active_list, list) {
-
+ list_for_each_entry(obj_priv, &dev_priv->mm.active_list, mm_list) {
struct drm_gem_object *obj = &obj_priv->base;
if (batchbuffer[0] == NULL &&
@@ -631,7 +631,7 @@ static void i915_capture_error_state(struct drm_device *dev)
}
/* Scan the other lists for completeness for those bizarre errors. */
if (batchbuffer[0] == NULL || batchbuffer[1] == NULL) {
- list_for_each_entry(obj_priv, &dev_priv->mm.flushing_list, list) {
+ list_for_each_entry(obj_priv, &dev_priv->mm.flushing_list, mm_list) {
struct drm_gem_object *obj = &obj_priv->base;
if (batchbuffer[0] == NULL &&
@@ -649,7 +649,7 @@ static void i915_capture_error_state(struct drm_device *dev)
}
}
if (batchbuffer[0] == NULL || batchbuffer[1] == NULL) {
- list_for_each_entry(obj_priv, &dev_priv->mm.inactive_list, list) {
+ list_for_each_entry(obj_priv, &dev_priv->mm.inactive_list, mm_list) {
struct drm_gem_object *obj = &obj_priv->base;
if (batchbuffer[0] == NULL &&
@@ -668,7 +668,7 @@ static void i915_capture_error_state(struct drm_device *dev)
}
/* We need to copy these to an anonymous buffer as the simplest
- * method to avoid being overwritten by userpace.
+ * method to avoid being overwritten by userspace.
*/
error->batchbuffer[0] = i915_error_object_create(dev, batchbuffer[0]);
if (batchbuffer[1] != batchbuffer[0])
@@ -690,8 +690,7 @@ static void i915_capture_error_state(struct drm_device *dev)
if (error->active_bo) {
int i = 0;
- list_for_each_entry(obj_priv,
- &dev_priv->render_ring.active_list, list) {
+ list_for_each_entry(obj_priv, &dev_priv->mm.active_list, mm_list) {
struct drm_gem_object *obj = &obj_priv->base;
error->active_bo[i].size = obj->size;
@@ -744,6 +743,9 @@ void i915_destroy_error_state(struct drm_device *dev)
if (error)
i915_error_state_free(dev, error);
}
+#else
+#define i915_capture_error_state(x)
+#endif
static void i915_report_and_clear_eir(struct drm_device *dev)
{
@@ -785,7 +787,7 @@ static void i915_report_and_clear_eir(struct drm_device *dev)
}
}
- if (IS_I9XX(dev)) {
+ if (!IS_GEN2(dev)) {
if (eir & I915_ERROR_PAGE_TABLE) {
u32 pgtbl_err = I915_READ(PGTBL_ER);
printk(KERN_ERR "page table error\n");
@@ -811,7 +813,7 @@ static void i915_report_and_clear_eir(struct drm_device *dev)
printk(KERN_ERR "instruction error\n");
printk(KERN_ERR " INSTPM: 0x%08x\n",
I915_READ(INSTPM));
- if (!IS_I965G(dev)) {
+ if (INTEL_INFO(dev)->gen < 4) {
u32 ipeir = I915_READ(IPEIR);
printk(KERN_ERR " IPEIR: 0x%08x\n",
@@ -876,12 +878,17 @@ static void i915_handle_error(struct drm_device *dev, bool wedged)
i915_report_and_clear_eir(dev);
if (wedged) {
+ INIT_COMPLETION(dev_priv->error_completion);
atomic_set(&dev_priv->mm.wedged, 1);
/*
* Wakeup waiting processes so they don't hang
*/
- DRM_WAKEUP(&dev_priv->render_ring.irq_queue);
+ wake_up_all(&dev_priv->render_ring.irq_queue);
+ if (HAS_BSD(dev))
+ wake_up_all(&dev_priv->bsd_ring.irq_queue);
+ if (HAS_BLT(dev))
+ wake_up_all(&dev_priv->blt_ring.irq_queue);
}
queue_work(dev_priv->wq, &dev_priv->error_work);
@@ -912,7 +919,7 @@ static void i915_pageflip_stall_check(struct drm_device *dev, int pipe)
/* Potential stall - if we see that the flip has happened, assume a missed interrupt */
obj_priv = to_intel_bo(work->pending_flip_obj);
- if(IS_I965G(dev)) {
+ if (INTEL_INFO(dev)->gen >= 4) {
int dspsurf = intel_crtc->plane == 0 ? DSPASURF : DSPBSURF;
stall_detected = I915_READ(dspsurf) == obj_priv->gtt_offset;
} else {
@@ -942,7 +949,6 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
unsigned long irqflags;
int irq_received;
int ret = IRQ_NONE;
- struct intel_ring_buffer *render_ring = &dev_priv->render_ring;
atomic_inc(&dev_priv->irq_received);
@@ -951,7 +957,7 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
iir = I915_READ(IIR);
- if (IS_I965G(dev))
+ if (INTEL_INFO(dev)->gen >= 4)
vblank_status = PIPE_START_VBLANK_INTERRUPT_STATUS;
else
vblank_status = PIPE_VBLANK_INTERRUPT_STATUS;
@@ -1019,18 +1025,10 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
READ_BREADCRUMB(dev_priv);
}
- if (iir & I915_USER_INTERRUPT) {
- u32 seqno =
- render_ring->get_gem_seqno(dev, render_ring);
- render_ring->irq_gem_seqno = seqno;
- trace_i915_gem_request_complete(dev, seqno);
- DRM_WAKEUP(&dev_priv->render_ring.irq_queue);
- dev_priv->hangcheck_count = 0;
- mod_timer(&dev_priv->hangcheck_timer, jiffies + DRM_I915_HANGCHECK_PERIOD);
- }
-
+ if (iir & I915_USER_INTERRUPT)
+ notify_ring(dev, &dev_priv->render_ring);
if (HAS_BSD(dev) && (iir & I915_BSD_USER_INTERRUPT))
- DRM_WAKEUP(&dev_priv->bsd_ring.irq_queue);
+ notify_ring(dev, &dev_priv->bsd_ring);
if (iir & I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT) {
intel_prepare_page_flip(dev, 0);
@@ -1065,7 +1063,7 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
if ((pipea_stats & PIPE_LEGACY_BLC_EVENT_STATUS) ||
(pipeb_stats & PIPE_LEGACY_BLC_EVENT_STATUS) ||
(iir & I915_ASLE_INTERRUPT))
- opregion_asle_intr(dev);
+ intel_opregion_asle_intr(dev);
/* With MSI, interrupts are only generated when iir
* transitions from zero to nonzero. If another bit got
@@ -1207,18 +1205,15 @@ int i915_enable_vblank(struct drm_device *dev, int pipe)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
unsigned long irqflags;
- int pipeconf_reg = (pipe == 0) ? PIPEACONF : PIPEBCONF;
- u32 pipeconf;
- pipeconf = I915_READ(pipeconf_reg);
- if (!(pipeconf & PIPEACONF_ENABLE))
+ if (!i915_pipe_enabled(dev, pipe))
return -EINVAL;
spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
if (HAS_PCH_SPLIT(dev))
ironlake_enable_display_irq(dev_priv, (pipe == 0) ?
DE_PIPEA_VBLANK: DE_PIPEB_VBLANK);
- else if (IS_I965G(dev))
+ else if (INTEL_INFO(dev)->gen >= 4)
i915_enable_pipestat(dev_priv, pipe,
PIPE_START_VBLANK_INTERRUPT_ENABLE);
else
@@ -1252,7 +1247,7 @@ void i915_enable_interrupt (struct drm_device *dev)
struct drm_i915_private *dev_priv = dev->dev_private;
if (!HAS_PCH_SPLIT(dev))
- opregion_enable_asle(dev);
+ intel_opregion_enable_asle(dev);
dev_priv->irq_enabled = 1;
}
@@ -1311,7 +1306,7 @@ int i915_vblank_swap(struct drm_device *dev, void *data,
return -EINVAL;
}
-struct drm_i915_gem_request *
+static struct drm_i915_gem_request *
i915_get_tail_request(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
@@ -1331,11 +1326,7 @@ void i915_hangcheck_elapsed(unsigned long data)
drm_i915_private_t *dev_priv = dev->dev_private;
uint32_t acthd, instdone, instdone1;
- /* No reset support on this chip yet. */
- if (IS_GEN6(dev))
- return;
-
- if (!IS_I965G(dev)) {
+ if (INTEL_INFO(dev)->gen < 4) {
acthd = I915_READ(ACTHD);
instdone = I915_READ(INSTDONE);
instdone1 = 0;
@@ -1347,9 +1338,8 @@ void i915_hangcheck_elapsed(unsigned long data)
/* If all work is done then ACTHD clearly hasn't advanced. */
if (list_empty(&dev_priv->render_ring.request_list) ||
- i915_seqno_passed(i915_get_gem_seqno(dev,
- &dev_priv->render_ring),
- i915_get_tail_request(dev)->seqno)) {
+ i915_seqno_passed(dev_priv->render_ring.get_seqno(dev, &dev_priv->render_ring),
+ i915_get_tail_request(dev)->seqno)) {
bool missed_wakeup = false;
dev_priv->hangcheck_count = 0;
@@ -1357,13 +1347,19 @@ void i915_hangcheck_elapsed(unsigned long data)
/* Issue a wake-up to catch stuck h/w. */
if (dev_priv->render_ring.waiting_gem_seqno &&
waitqueue_active(&dev_priv->render_ring.irq_queue)) {
- DRM_WAKEUP(&dev_priv->render_ring.irq_queue);
+ wake_up_all(&dev_priv->render_ring.irq_queue);
missed_wakeup = true;
}
if (dev_priv->bsd_ring.waiting_gem_seqno &&
waitqueue_active(&dev_priv->bsd_ring.irq_queue)) {
- DRM_WAKEUP(&dev_priv->bsd_ring.irq_queue);
+ wake_up_all(&dev_priv->bsd_ring.irq_queue);
+ missed_wakeup = true;
+ }
+
+ if (dev_priv->blt_ring.waiting_gem_seqno &&
+ waitqueue_active(&dev_priv->blt_ring.irq_queue)) {
+ wake_up_all(&dev_priv->blt_ring.irq_queue);
missed_wakeup = true;
}
@@ -1377,6 +1373,21 @@ void i915_hangcheck_elapsed(unsigned long data)
dev_priv->last_instdone1 == instdone1) {
if (dev_priv->hangcheck_count++ > 1) {
DRM_ERROR("Hangcheck timer elapsed... GPU hung\n");
+
+ if (!IS_GEN2(dev)) {
+ /* Is the chip hanging on a WAIT_FOR_EVENT?
+ * If so we can simply poke the RB_WAIT bit
+ * and break the hang. This should work on
+ * all but the second generation chipsets.
+ */
+ u32 tmp = I915_READ(PRB0_CTL);
+ if (tmp & RING_WAIT) {
+ I915_WRITE(PRB0_CTL, tmp);
+ POSTING_READ(PRB0_CTL);
+ goto out;
+ }
+ }
+
i915_handle_error(dev, true);
return;
}
@@ -1388,8 +1399,10 @@ void i915_hangcheck_elapsed(unsigned long data)
dev_priv->last_instdone1 = instdone1;
}
+out:
/* Reset timer case chip hangs without another request being added */
- mod_timer(&dev_priv->hangcheck_timer, jiffies + DRM_I915_HANGCHECK_PERIOD);
+ mod_timer(&dev_priv->hangcheck_timer,
+ jiffies + msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD));
}
/* drm_dma.h hooks
@@ -1424,8 +1437,7 @@ static int ironlake_irq_postinstall(struct drm_device *dev)
u32 display_mask = DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT |
DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE;
u32 render_mask = GT_PIPE_NOTIFY | GT_BSD_USER_INTERRUPT;
- u32 hotplug_mask = SDE_CRT_HOTPLUG | SDE_PORTB_HOTPLUG |
- SDE_PORTC_HOTPLUG | SDE_PORTD_HOTPLUG;
+ u32 hotplug_mask;
dev_priv->irq_mask_reg = ~display_mask;
dev_priv->de_irq_enable_reg = display_mask | DE_PIPEA_VBLANK | DE_PIPEB_VBLANK;
@@ -1436,20 +1448,35 @@ static int ironlake_irq_postinstall(struct drm_device *dev)
I915_WRITE(DEIER, dev_priv->de_irq_enable_reg);
(void) I915_READ(DEIER);
- /* Gen6 only needs render pipe_control now */
- if (IS_GEN6(dev))
- render_mask = GT_PIPE_NOTIFY;
+ if (IS_GEN6(dev)) {
+ render_mask =
+ GT_PIPE_NOTIFY |
+ GT_GEN6_BSD_USER_INTERRUPT |
+ GT_BLT_USER_INTERRUPT;
+ }
dev_priv->gt_irq_mask_reg = ~render_mask;
dev_priv->gt_irq_enable_reg = render_mask;
I915_WRITE(GTIIR, I915_READ(GTIIR));
I915_WRITE(GTIMR, dev_priv->gt_irq_mask_reg);
- if (IS_GEN6(dev))
+ if (IS_GEN6(dev)) {
I915_WRITE(GEN6_RENDER_IMR, ~GEN6_RENDER_PIPE_CONTROL_NOTIFY_INTERRUPT);
+ I915_WRITE(GEN6_BSD_IMR, ~GEN6_BSD_IMR_USER_INTERRUPT);
+ I915_WRITE(GEN6_BLITTER_IMR, ~GEN6_BLITTER_USER_INTERRUPT);
+ }
+
I915_WRITE(GTIER, dev_priv->gt_irq_enable_reg);
(void) I915_READ(GTIER);
+ if (HAS_PCH_CPT(dev)) {
+ hotplug_mask = SDE_CRT_HOTPLUG_CPT | SDE_PORTB_HOTPLUG_CPT |
+ SDE_PORTC_HOTPLUG_CPT | SDE_PORTD_HOTPLUG_CPT ;
+ } else {
+ hotplug_mask = SDE_CRT_HOTPLUG | SDE_PORTB_HOTPLUG |
+ SDE_PORTC_HOTPLUG | SDE_PORTD_HOTPLUG;
+ }
+
dev_priv->pch_irq_mask_reg = ~hotplug_mask;
dev_priv->pch_irq_enable_reg = hotplug_mask;
@@ -1506,9 +1533,10 @@ int i915_driver_irq_postinstall(struct drm_device *dev)
u32 error_mask;
DRM_INIT_WAITQUEUE(&dev_priv->render_ring.irq_queue);
-
if (HAS_BSD(dev))
DRM_INIT_WAITQUEUE(&dev_priv->bsd_ring.irq_queue);
+ if (HAS_BLT(dev))
+ DRM_INIT_WAITQUEUE(&dev_priv->blt_ring.irq_queue);
dev_priv->vblank_pipe = DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B;
@@ -1578,7 +1606,7 @@ int i915_driver_irq_postinstall(struct drm_device *dev)
I915_WRITE(PORT_HOTPLUG_EN, hotplug_en);
}
- opregion_enable_asle(dev);
+ intel_opregion_enable_asle(dev);
return 0;
}
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 4f5e15577e89..878fc766a12c 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -25,52 +25,16 @@
#ifndef _I915_REG_H_
#define _I915_REG_H_
+#define _PIPE(pipe, a, b) ((a) + (pipe)*((b)-(a)))
+
/*
* The Bridge device's PCI config space has information about the
* fb aperture size and the amount of pre-reserved memory.
+ * This is all handled in the intel-gtt.ko module. i915.ko only
+ * cares about the vga bit for the vga rbiter.
*/
#define INTEL_GMCH_CTRL 0x52
#define INTEL_GMCH_VGA_DISABLE (1 << 1)
-#define INTEL_GMCH_ENABLED 0x4
-#define INTEL_GMCH_MEM_MASK 0x1
-#define INTEL_GMCH_MEM_64M 0x1
-#define INTEL_GMCH_MEM_128M 0
-
-#define INTEL_GMCH_GMS_MASK (0xf << 4)
-#define INTEL_855_GMCH_GMS_DISABLED (0x0 << 4)
-#define INTEL_855_GMCH_GMS_STOLEN_1M (0x1 << 4)
-#define INTEL_855_GMCH_GMS_STOLEN_4M (0x2 << 4)
-#define INTEL_855_GMCH_GMS_STOLEN_8M (0x3 << 4)
-#define INTEL_855_GMCH_GMS_STOLEN_16M (0x4 << 4)
-#define INTEL_855_GMCH_GMS_STOLEN_32M (0x5 << 4)
-
-#define INTEL_915G_GMCH_GMS_STOLEN_48M (0x6 << 4)
-#define INTEL_915G_GMCH_GMS_STOLEN_64M (0x7 << 4)
-#define INTEL_GMCH_GMS_STOLEN_128M (0x8 << 4)
-#define INTEL_GMCH_GMS_STOLEN_256M (0x9 << 4)
-#define INTEL_GMCH_GMS_STOLEN_96M (0xa << 4)
-#define INTEL_GMCH_GMS_STOLEN_160M (0xb << 4)
-#define INTEL_GMCH_GMS_STOLEN_224M (0xc << 4)
-#define INTEL_GMCH_GMS_STOLEN_352M (0xd << 4)
-
-#define SNB_GMCH_CTRL 0x50
-#define SNB_GMCH_GMS_STOLEN_MASK 0xF8
-#define SNB_GMCH_GMS_STOLEN_32M (1 << 3)
-#define SNB_GMCH_GMS_STOLEN_64M (2 << 3)
-#define SNB_GMCH_GMS_STOLEN_96M (3 << 3)
-#define SNB_GMCH_GMS_STOLEN_128M (4 << 3)
-#define SNB_GMCH_GMS_STOLEN_160M (5 << 3)
-#define SNB_GMCH_GMS_STOLEN_192M (6 << 3)
-#define SNB_GMCH_GMS_STOLEN_224M (7 << 3)
-#define SNB_GMCH_GMS_STOLEN_256M (8 << 3)
-#define SNB_GMCH_GMS_STOLEN_288M (9 << 3)
-#define SNB_GMCH_GMS_STOLEN_320M (0xa << 3)
-#define SNB_GMCH_GMS_STOLEN_352M (0xb << 3)
-#define SNB_GMCH_GMS_STOLEN_384M (0xc << 3)
-#define SNB_GMCH_GMS_STOLEN_416M (0xd << 3)
-#define SNB_GMCH_GMS_STOLEN_448M (0xe << 3)
-#define SNB_GMCH_GMS_STOLEN_480M (0xf << 3)
-#define SNB_GMCH_GMS_STOLEN_512M (0x10 << 3)
/* PCI config space */
@@ -106,10 +70,13 @@
#define I915_GC_RENDER_CLOCK_200_MHZ (1 << 0)
#define I915_GC_RENDER_CLOCK_333_MHZ (4 << 0)
#define LBB 0xf4
-#define GDRST 0xc0
-#define GDRST_FULL (0<<2)
-#define GDRST_RENDER (1<<2)
-#define GDRST_MEDIA (3<<2)
+
+/* Graphics reset regs */
+#define I965_GDRST 0xc0 /* PCI config register */
+#define ILK_GDSR 0x2ca4 /* MCHBAR offset */
+#define GRDOM_FULL (0<<2)
+#define GRDOM_RENDER (1<<2)
+#define GRDOM_MEDIA (3<<2)
/* VGA stuff */
@@ -192,11 +159,11 @@
#define MI_STORE_DWORD_INDEX MI_INSTR(0x21, 1)
#define MI_STORE_DWORD_INDEX_SHIFT 2
#define MI_LOAD_REGISTER_IMM MI_INSTR(0x22, 1)
+#define MI_FLUSH_DW MI_INSTR(0x26, 2) /* for GEN6 */
#define MI_BATCH_BUFFER MI_INSTR(0x30, 1)
#define MI_BATCH_NON_SECURE (1)
#define MI_BATCH_NON_SECURE_I965 (1<<8)
#define MI_BATCH_BUFFER_START MI_INSTR(0x31, 0)
-
/*
* 3D instructions used by the kernel
*/
@@ -249,6 +216,16 @@
#define PIPE_CONTROL_GLOBAL_GTT (1<<2) /* in addr dword */
#define PIPE_CONTROL_STALL_EN (1<<1) /* in addr word, Ironlake+ only */
+
+/*
+ * Reset registers
+ */
+#define DEBUG_RESET_I830 0x6070
+#define DEBUG_RESET_FULL (1<<7)
+#define DEBUG_RESET_RENDER (1<<8)
+#define DEBUG_RESET_DISPLAY (1<<9)
+
+
/*
* Fence registers
*/
@@ -283,6 +260,17 @@
#define PRB0_HEAD 0x02034
#define PRB0_START 0x02038
#define PRB0_CTL 0x0203c
+#define RENDER_RING_BASE 0x02000
+#define BSD_RING_BASE 0x04000
+#define GEN6_BSD_RING_BASE 0x12000
+#define BLT_RING_BASE 0x22000
+#define RING_TAIL(base) ((base)+0x30)
+#define RING_HEAD(base) ((base)+0x34)
+#define RING_START(base) ((base)+0x38)
+#define RING_CTL(base) ((base)+0x3c)
+#define RING_HWS_PGA(base) ((base)+0x80)
+#define RING_HWS_PGA_GEN6(base) ((base)+0x2080)
+#define RING_ACTHD(base) ((base)+0x74)
#define TAIL_ADDR 0x001FFFF8
#define HEAD_WRAP_COUNT 0xFFE00000
#define HEAD_WRAP_ONE 0x00200000
@@ -295,6 +283,8 @@
#define RING_VALID_MASK 0x00000001
#define RING_VALID 0x00000001
#define RING_INVALID 0x00000000
+#define RING_WAIT_I8XX (1<<0) /* gen2, PRBx_HEAD */
+#define RING_WAIT (1<<11) /* gen3+, PRBx_CTL */
#define PRB1_TAIL 0x02040 /* 915+ only */
#define PRB1_HEAD 0x02044 /* 915+ only */
#define PRB1_START 0x02048 /* 915+ only */
@@ -306,7 +296,6 @@
#define INSTDONE1 0x0207c /* 965+ only */
#define ACTHD_I965 0x02074
#define HWS_PGA 0x02080
-#define HWS_PGA_GEN6 0x04080
#define HWS_ADDRESS_MASK 0xfffff000
#define HWS_START_ADDRESS_SHIFT 4
#define PWRCTXA 0x2088 /* 965GM+ only */
@@ -464,17 +453,17 @@
#define GEN6_BLITTER_COMMAND_PARSER_MASTER_ERROR (1 << 25)
#define GEN6_BLITTER_SYNC_STATUS (1 << 24)
#define GEN6_BLITTER_USER_INTERRUPT (1 << 22)
-/*
- * BSD (bit stream decoder instruction and interrupt control register defines
- * (G4X and Ironlake only)
- */
-#define BSD_RING_TAIL 0x04030
-#define BSD_RING_HEAD 0x04034
-#define BSD_RING_START 0x04038
-#define BSD_RING_CTL 0x0403c
-#define BSD_RING_ACTHD 0x04074
-#define BSD_HWS_PGA 0x04080
+#define GEN6_BSD_SLEEP_PSMI_CONTROL 0x12050
+#define GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_MODIFY_MASK (1 << 16)
+#define GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_DISABLE (1 << 0)
+#define GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_ENABLE 0
+#define GEN6_BSD_SLEEP_PSMI_CONTROL_IDLE_INDICATOR (1 << 3)
+
+#define GEN6_BSD_IMR 0x120a8
+#define GEN6_BSD_IMR_USER_INTERRUPT (1 << 12)
+
+#define GEN6_BSD_RNCID 0x12198
/*
* Framebuffer compression (915+ only)
@@ -579,12 +568,51 @@
# define GPIO_DATA_VAL_IN (1 << 12)
# define GPIO_DATA_PULLUP_DISABLE (1 << 13)
-#define GMBUS0 0x5100
-#define GMBUS1 0x5104
-#define GMBUS2 0x5108
-#define GMBUS3 0x510c
-#define GMBUS4 0x5110
-#define GMBUS5 0x5120
+#define GMBUS0 0x5100 /* clock/port select */
+#define GMBUS_RATE_100KHZ (0<<8)
+#define GMBUS_RATE_50KHZ (1<<8)
+#define GMBUS_RATE_400KHZ (2<<8) /* reserved on Pineview */
+#define GMBUS_RATE_1MHZ (3<<8) /* reserved on Pineview */
+#define GMBUS_HOLD_EXT (1<<7) /* 300ns hold time, rsvd on Pineview */
+#define GMBUS_PORT_DISABLED 0
+#define GMBUS_PORT_SSC 1
+#define GMBUS_PORT_VGADDC 2
+#define GMBUS_PORT_PANEL 3
+#define GMBUS_PORT_DPC 4 /* HDMIC */
+#define GMBUS_PORT_DPB 5 /* SDVO, HDMIB */
+ /* 6 reserved */
+#define GMBUS_PORT_DPD 7 /* HDMID */
+#define GMBUS_NUM_PORTS 8
+#define GMBUS1 0x5104 /* command/status */
+#define GMBUS_SW_CLR_INT (1<<31)
+#define GMBUS_SW_RDY (1<<30)
+#define GMBUS_ENT (1<<29) /* enable timeout */
+#define GMBUS_CYCLE_NONE (0<<25)
+#define GMBUS_CYCLE_WAIT (1<<25)
+#define GMBUS_CYCLE_INDEX (2<<25)
+#define GMBUS_CYCLE_STOP (4<<25)
+#define GMBUS_BYTE_COUNT_SHIFT 16
+#define GMBUS_SLAVE_INDEX_SHIFT 8
+#define GMBUS_SLAVE_ADDR_SHIFT 1
+#define GMBUS_SLAVE_READ (1<<0)
+#define GMBUS_SLAVE_WRITE (0<<0)
+#define GMBUS2 0x5108 /* status */
+#define GMBUS_INUSE (1<<15)
+#define GMBUS_HW_WAIT_PHASE (1<<14)
+#define GMBUS_STALL_TIMEOUT (1<<13)
+#define GMBUS_INT (1<<12)
+#define GMBUS_HW_RDY (1<<11)
+#define GMBUS_SATOER (1<<10)
+#define GMBUS_ACTIVE (1<<9)
+#define GMBUS3 0x510c /* data buffer bytes 3-0 */
+#define GMBUS4 0x5110 /* interrupt mask (Pineview+) */
+#define GMBUS_SLAVE_TIMEOUT_EN (1<<4)
+#define GMBUS_NAK_EN (1<<3)
+#define GMBUS_IDLE_EN (1<<2)
+#define GMBUS_HW_WAIT_EN (1<<1)
+#define GMBUS_HW_RDY_EN (1<<0)
+#define GMBUS5 0x5120 /* byte index */
+#define GMBUS_2BYTE_INDEX_EN (1<<31)
/*
* Clock control & power management
@@ -603,6 +631,7 @@
#define VGA1_PD_P1_MASK (0x1f << 8)
#define DPLL_A 0x06014
#define DPLL_B 0x06018
+#define DPLL(pipe) _PIPE(pipe, DPLL_A, DPLL_B)
#define DPLL_VCO_ENABLE (1 << 31)
#define DPLL_DVO_HIGH_SPEED (1 << 30)
#define DPLL_SYNCLOCK_ENABLE (1 << 29)
@@ -633,31 +662,6 @@
#define LVDS 0x61180
#define LVDS_ON (1<<31)
-#define ADPA 0x61100
-#define ADPA_DPMS_MASK (~(3<<10))
-#define ADPA_DPMS_ON (0<<10)
-#define ADPA_DPMS_SUSPEND (1<<10)
-#define ADPA_DPMS_STANDBY (2<<10)
-#define ADPA_DPMS_OFF (3<<10)
-
-#define RING_TAIL 0x00
-#define TAIL_ADDR 0x001FFFF8
-#define RING_HEAD 0x04
-#define HEAD_WRAP_COUNT 0xFFE00000
-#define HEAD_WRAP_ONE 0x00200000
-#define HEAD_ADDR 0x001FFFFC
-#define RING_START 0x08
-#define START_ADDR 0xFFFFF000
-#define RING_LEN 0x0C
-#define RING_NR_PAGES 0x001FF000
-#define RING_REPORT_MASK 0x00000006
-#define RING_REPORT_64K 0x00000002
-#define RING_REPORT_128K 0x00000004
-#define RING_NO_REPORT 0x00000000
-#define RING_VALID_MASK 0x00000001
-#define RING_VALID 0x00000001
-#define RING_INVALID 0x00000000
-
/* Scratch pad debug 0 reg:
*/
#define DPLL_FPA01_P1_POST_DIV_MASK_I830 0x001f0000
@@ -736,10 +740,13 @@
#define DPLL_MD_VGA_UDI_MULTIPLIER_MASK 0x0000003f
#define DPLL_MD_VGA_UDI_MULTIPLIER_SHIFT 0
#define DPLL_B_MD 0x06020 /* 965+ only */
+#define DPLL_MD(pipe) _PIPE(pipe, DPLL_A_MD, DPLL_B_MD)
#define FPA0 0x06040
#define FPA1 0x06044
#define FPB0 0x06048
#define FPB1 0x0604c
+#define FP0(pipe) _PIPE(pipe, FPA0, FPB0)
+#define FP1(pipe) _PIPE(pipe, FPA1, FPB1)
#define FP_N_DIV_MASK 0x003f0000
#define FP_N_PINEVIEW_DIV_MASK 0x00ff0000
#define FP_N_DIV_SHIFT 16
@@ -760,6 +767,7 @@
#define DPLLA_TEST_M_BYPASS (1 << 2)
#define DPLLA_INPUT_BUFFER_ENABLE (1 << 0)
#define D_STATE 0x6104
+#define DSTATE_GFX_RESET_I830 (1<<6)
#define DSTATE_PLL_D3_OFF (1<<3)
#define DSTATE_GFX_CLOCK_GATING (1<<1)
#define DSTATE_DOT_CLOCK_GATING (1<<0)
@@ -926,6 +934,8 @@
#define CLKCFG_MEM_800 (3 << 4)
#define CLKCFG_MEM_MASK (7 << 4)
+#define TSC1 0x11001
+#define TSE (1<<0)
#define TR1 0x11006
#define TSFS 0x11020
#define TSFS_SLOPE_MASK 0x0000ff00
@@ -1070,6 +1080,8 @@
#define MEMSTAT_SRC_CTL_STDBY 3
#define RCPREVBSYTUPAVG 0x113b8
#define RCPREVBSYTDNAVG 0x113bc
+#define PMMISC 0x11214
+#define MCPPCE_EN (1<<0) /* enable PM_MSG from PCH->MPC */
#define SDEW 0x1124c
#define CSIEW0 0x11250
#define CSIEW1 0x11254
@@ -1150,6 +1162,15 @@
#define PIPEBSRC 0x6101c
#define BCLRPAT_B 0x61020
+#define HTOTAL(pipe) _PIPE(pipe, HTOTAL_A, HTOTAL_B)
+#define HBLANK(pipe) _PIPE(pipe, HBLANK_A, HBLANK_B)
+#define HSYNC(pipe) _PIPE(pipe, HSYNC_A, HSYNC_B)
+#define VTOTAL(pipe) _PIPE(pipe, VTOTAL_A, VTOTAL_B)
+#define VBLANK(pipe) _PIPE(pipe, VBLANK_A, VBLANK_B)
+#define VSYNC(pipe) _PIPE(pipe, VSYNC_A, VSYNC_B)
+#define PIPESRC(pipe) _PIPE(pipe, PIPEASRC, PIPEBSRC)
+#define BCLRPAT(pipe) _PIPE(pipe, BCLRPAT_A, BCLRPAT_B)
+
/* VGA port control */
#define ADPA 0x61100
#define ADPA_DAC_ENABLE (1<<31)
@@ -1173,6 +1194,7 @@
#define ADPA_DPMS_STANDBY (2<<10)
#define ADPA_DPMS_OFF (3<<10)
+
/* Hotplug control (945+ only) */
#define PORT_HOTPLUG_EN 0x61110
#define HDMIB_HOTPLUG_INT_EN (1 << 29)
@@ -1331,6 +1353,22 @@
#define LVDS_B0B3_POWER_DOWN (0 << 2)
#define LVDS_B0B3_POWER_UP (3 << 2)
+/* Video Data Island Packet control */
+#define VIDEO_DIP_DATA 0x61178
+#define VIDEO_DIP_CTL 0x61170
+#define VIDEO_DIP_ENABLE (1 << 31)
+#define VIDEO_DIP_PORT_B (1 << 29)
+#define VIDEO_DIP_PORT_C (2 << 29)
+#define VIDEO_DIP_ENABLE_AVI (1 << 21)
+#define VIDEO_DIP_ENABLE_VENDOR (2 << 21)
+#define VIDEO_DIP_ENABLE_SPD (8 << 21)
+#define VIDEO_DIP_SELECT_AVI (0 << 19)
+#define VIDEO_DIP_SELECT_VENDOR (1 << 19)
+#define VIDEO_DIP_SELECT_SPD (3 << 19)
+#define VIDEO_DIP_FREQ_ONCE (0 << 16)
+#define VIDEO_DIP_FREQ_VSYNC (1 << 16)
+#define VIDEO_DIP_FREQ_2VSYNC (2 << 16)
+
/* Panel power sequencing */
#define PP_STATUS 0x61200
#define PP_ON (1 << 31)
@@ -1346,6 +1384,9 @@
#define PP_SEQUENCE_ON (1 << 28)
#define PP_SEQUENCE_OFF (2 << 28)
#define PP_SEQUENCE_MASK 0x30000000
+#define PP_CYCLE_DELAY_ACTIVE (1 << 27)
+#define PP_SEQUENCE_STATE_ON_IDLE (1 << 3)
+#define PP_SEQUENCE_STATE_MASK 0x0000000f
#define PP_CONTROL 0x61204
#define POWER_TARGET_ON (1 << 0)
#define PP_ON_DELAYS 0x61208
@@ -1481,6 +1522,7 @@
# define TV_TEST_MODE_MASK (7 << 0)
#define TV_DAC 0x68004
+# define TV_DAC_SAVE 0x00ffff00
/**
* Reports that DAC state change logic has reported change (RO).
*
@@ -2075,29 +2117,35 @@
/* Display & cursor control */
-/* dithering flag on Ironlake */
-#define PIPE_ENABLE_DITHER (1 << 4)
-#define PIPE_DITHER_TYPE_MASK (3 << 2)
-#define PIPE_DITHER_TYPE_SPATIAL (0 << 2)
-#define PIPE_DITHER_TYPE_ST01 (1 << 2)
/* Pipe A */
#define PIPEADSL 0x70000
-#define DSL_LINEMASK 0x00000fff
+#define DSL_LINEMASK 0x00000fff
#define PIPEACONF 0x70008
-#define PIPEACONF_ENABLE (1<<31)
-#define PIPEACONF_DISABLE 0
-#define PIPEACONF_DOUBLE_WIDE (1<<30)
+#define PIPECONF_ENABLE (1<<31)
+#define PIPECONF_DISABLE 0
+#define PIPECONF_DOUBLE_WIDE (1<<30)
#define I965_PIPECONF_ACTIVE (1<<30)
-#define PIPEACONF_SINGLE_WIDE 0
-#define PIPEACONF_PIPE_UNLOCKED 0
-#define PIPEACONF_PIPE_LOCKED (1<<25)
-#define PIPEACONF_PALETTE 0
-#define PIPEACONF_GAMMA (1<<24)
+#define PIPECONF_SINGLE_WIDE 0
+#define PIPECONF_PIPE_UNLOCKED 0
+#define PIPECONF_PIPE_LOCKED (1<<25)
+#define PIPECONF_PALETTE 0
+#define PIPECONF_GAMMA (1<<24)
#define PIPECONF_FORCE_BORDER (1<<25)
#define PIPECONF_PROGRESSIVE (0 << 21)
#define PIPECONF_INTERLACE_W_FIELD_INDICATION (6 << 21)
#define PIPECONF_INTERLACE_FIELD_0_ONLY (7 << 21)
#define PIPECONF_CXSR_DOWNCLOCK (1<<16)
+#define PIPECONF_BPP_MASK (0x000000e0)
+#define PIPECONF_BPP_8 (0<<5)
+#define PIPECONF_BPP_10 (1<<5)
+#define PIPECONF_BPP_6 (2<<5)
+#define PIPECONF_BPP_12 (3<<5)
+#define PIPECONF_DITHER_EN (1<<4)
+#define PIPECONF_DITHER_TYPE_MASK (0x0000000c)
+#define PIPECONF_DITHER_TYPE_SP (0<<2)
+#define PIPECONF_DITHER_TYPE_ST1 (1<<2)
+#define PIPECONF_DITHER_TYPE_ST2 (2<<2)
+#define PIPECONF_DITHER_TYPE_TEMP (3<<2)
#define PIPEASTAT 0x70024
#define PIPE_FIFO_UNDERRUN_STATUS (1UL<<31)
#define PIPE_CRC_ERROR_ENABLE (1UL<<29)
@@ -2128,12 +2176,15 @@
#define PIPE_START_VBLANK_INTERRUPT_STATUS (1UL<<2) /* 965 or later */
#define PIPE_VBLANK_INTERRUPT_STATUS (1UL<<1)
#define PIPE_OVERLAY_UPDATED_STATUS (1UL<<0)
-#define PIPE_BPC_MASK (7 << 5) /* Ironlake */
+#define PIPE_BPC_MASK (7 << 5) /* Ironlake */
#define PIPE_8BPC (0 << 5)
#define PIPE_10BPC (1 << 5)
#define PIPE_6BPC (2 << 5)
#define PIPE_12BPC (3 << 5)
+#define PIPECONF(pipe) _PIPE(pipe, PIPEACONF, PIPEBCONF)
+#define PIPEDSL(pipe) _PIPE(pipe, PIPEADSL, PIPEBDSL)
+
#define DSPARB 0x70030
#define DSPARB_CSTART_MASK (0x7f << 7)
#define DSPARB_CSTART_SHIFT 7
@@ -2206,8 +2257,8 @@
#define WM1_LP_SR_EN (1<<31)
#define WM1_LP_LATENCY_SHIFT 24
#define WM1_LP_LATENCY_MASK (0x7f<<24)
-#define WM1_LP_FBC_LP1_MASK (0xf<<20)
-#define WM1_LP_FBC_LP1_SHIFT 20
+#define WM1_LP_FBC_MASK (0xf<<20)
+#define WM1_LP_FBC_SHIFT 20
#define WM1_LP_SR_MASK (0x1ff<<8)
#define WM1_LP_SR_SHIFT 8
#define WM1_LP_CURSOR_MASK (0x3f)
@@ -2333,6 +2384,14 @@
#define DSPASURF 0x7019C /* 965+ only */
#define DSPATILEOFF 0x701A4 /* 965+ only */
+#define DSPCNTR(plane) _PIPE(plane, DSPACNTR, DSPBCNTR)
+#define DSPADDR(plane) _PIPE(plane, DSPAADDR, DSPBADDR)
+#define DSPSTRIDE(plane) _PIPE(plane, DSPASTRIDE, DSPBSTRIDE)
+#define DSPPOS(plane) _PIPE(plane, DSPAPOS, DSPBPOS)
+#define DSPSIZE(plane) _PIPE(plane, DSPASIZE, DSPBSIZE)
+#define DSPSURF(plane) _PIPE(plane, DSPASURF, DSPBSURF)
+#define DSPTILEOFF(plane) _PIPE(plane, DSPATILEOFF, DSPBTILEOFF)
+
/* VBIOS flags */
#define SWF00 0x71410
#define SWF01 0x71414
@@ -2397,6 +2456,7 @@
#define RR_HW_HIGH_POWER_FRAMES_MASK 0xff00
#define FDI_PLL_BIOS_0 0x46000
+#define FDI_PLL_FB_CLOCK_MASK 0xff
#define FDI_PLL_BIOS_1 0x46004
#define FDI_PLL_BIOS_2 0x46008
#define DISPLAY_PORT_PLL_BIOS_0 0x4600c
@@ -2420,46 +2480,47 @@
#define PIPEA_DATA_M1 0x60030
#define TU_SIZE(x) (((x)-1) << 25) /* default size 64 */
#define TU_SIZE_MASK 0x7e000000
-#define PIPEA_DATA_M1_OFFSET 0
+#define PIPE_DATA_M1_OFFSET 0
#define PIPEA_DATA_N1 0x60034
-#define PIPEA_DATA_N1_OFFSET 0
+#define PIPE_DATA_N1_OFFSET 0
#define PIPEA_DATA_M2 0x60038
-#define PIPEA_DATA_M2_OFFSET 0
+#define PIPE_DATA_M2_OFFSET 0
#define PIPEA_DATA_N2 0x6003c
-#define PIPEA_DATA_N2_OFFSET 0
+#define PIPE_DATA_N2_OFFSET 0
#define PIPEA_LINK_M1 0x60040
-#define PIPEA_LINK_M1_OFFSET 0
+#define PIPE_LINK_M1_OFFSET 0
#define PIPEA_LINK_N1 0x60044
-#define PIPEA_LINK_N1_OFFSET 0
+#define PIPE_LINK_N1_OFFSET 0
#define PIPEA_LINK_M2 0x60048
-#define PIPEA_LINK_M2_OFFSET 0
+#define PIPE_LINK_M2_OFFSET 0
#define PIPEA_LINK_N2 0x6004c
-#define PIPEA_LINK_N2_OFFSET 0
+#define PIPE_LINK_N2_OFFSET 0
/* PIPEB timing regs are same start from 0x61000 */
#define PIPEB_DATA_M1 0x61030
-#define PIPEB_DATA_M1_OFFSET 0
#define PIPEB_DATA_N1 0x61034
-#define PIPEB_DATA_N1_OFFSET 0
#define PIPEB_DATA_M2 0x61038
-#define PIPEB_DATA_M2_OFFSET 0
#define PIPEB_DATA_N2 0x6103c
-#define PIPEB_DATA_N2_OFFSET 0
#define PIPEB_LINK_M1 0x61040
-#define PIPEB_LINK_M1_OFFSET 0
#define PIPEB_LINK_N1 0x61044
-#define PIPEB_LINK_N1_OFFSET 0
#define PIPEB_LINK_M2 0x61048
-#define PIPEB_LINK_M2_OFFSET 0
#define PIPEB_LINK_N2 0x6104c
-#define PIPEB_LINK_N2_OFFSET 0
+
+#define PIPE_DATA_M1(pipe) _PIPE(pipe, PIPEA_DATA_M1, PIPEB_DATA_M1)
+#define PIPE_DATA_N1(pipe) _PIPE(pipe, PIPEA_DATA_N1, PIPEB_DATA_N1)
+#define PIPE_DATA_M2(pipe) _PIPE(pipe, PIPEA_DATA_M2, PIPEB_DATA_M2)
+#define PIPE_DATA_N2(pipe) _PIPE(pipe, PIPEA_DATA_N2, PIPEB_DATA_N2)
+#define PIPE_LINK_M1(pipe) _PIPE(pipe, PIPEA_LINK_M1, PIPEB_LINK_M1)
+#define PIPE_LINK_N1(pipe) _PIPE(pipe, PIPEA_LINK_N1, PIPEB_LINK_N1)
+#define PIPE_LINK_M2(pipe) _PIPE(pipe, PIPEA_LINK_M2, PIPEB_LINK_M2)
+#define PIPE_LINK_N2(pipe) _PIPE(pipe, PIPEA_LINK_N2, PIPEB_LINK_N2)
/* CPU panel fitter */
#define PFA_CTL_1 0x68080
@@ -2516,7 +2577,8 @@
#define GT_SYNC_STATUS (1 << 2)
#define GT_USER_INTERRUPT (1 << 0)
#define GT_BSD_USER_INTERRUPT (1 << 5)
-
+#define GT_GEN6_BSD_USER_INTERRUPT (1 << 12)
+#define GT_BLT_USER_INTERRUPT (1 << 22)
#define GTISR 0x44010
#define GTIMR 0x44014
@@ -2551,6 +2613,10 @@
#define SDE_PORTD_HOTPLUG_CPT (1 << 23)
#define SDE_PORTC_HOTPLUG_CPT (1 << 22)
#define SDE_PORTB_HOTPLUG_CPT (1 << 21)
+#define SDE_HOTPLUG_MASK_CPT (SDE_CRT_HOTPLUG_CPT | \
+ SDE_PORTD_HOTPLUG_CPT | \
+ SDE_PORTC_HOTPLUG_CPT | \
+ SDE_PORTB_HOTPLUG_CPT)
#define SDEISR 0xc4000
#define SDEIMR 0xc4004
@@ -2600,11 +2666,14 @@
#define PCH_DPLL_A 0xc6014
#define PCH_DPLL_B 0xc6018
+#define PCH_DPLL(pipe) _PIPE(pipe, PCH_DPLL_A, PCH_DPLL_B)
#define PCH_FPA0 0xc6040
#define PCH_FPA1 0xc6044
#define PCH_FPB0 0xc6048
#define PCH_FPB1 0xc604c
+#define PCH_FP0(pipe) _PIPE(pipe, PCH_FPA0, PCH_FPB0)
+#define PCH_FP1(pipe) _PIPE(pipe, PCH_FPA1, PCH_FPB1)
#define PCH_DPLL_TEST 0xc606c
@@ -2690,6 +2759,13 @@
#define TRANS_VBLANK_B 0xe1010
#define TRANS_VSYNC_B 0xe1014
+#define TRANS_HTOTAL(pipe) _PIPE(pipe, TRANS_HTOTAL_A, TRANS_HTOTAL_B)
+#define TRANS_HBLANK(pipe) _PIPE(pipe, TRANS_HBLANK_A, TRANS_HBLANK_B)
+#define TRANS_HSYNC(pipe) _PIPE(pipe, TRANS_HSYNC_A, TRANS_HSYNC_B)
+#define TRANS_VTOTAL(pipe) _PIPE(pipe, TRANS_VTOTAL_A, TRANS_VTOTAL_B)
+#define TRANS_VBLANK(pipe) _PIPE(pipe, TRANS_VBLANK_A, TRANS_VBLANK_B)
+#define TRANS_VSYNC(pipe) _PIPE(pipe, TRANS_VSYNC_A, TRANS_VSYNC_B)
+
#define TRANSB_DATA_M1 0xe1030
#define TRANSB_DATA_N1 0xe1034
#define TRANSB_DATA_M2 0xe1038
@@ -2701,6 +2777,7 @@
#define TRANSACONF 0xf0008
#define TRANSBCONF 0xf1008
+#define TRANSCONF(plane) _PIPE(plane, TRANSACONF, TRANSBCONF)
#define TRANS_DISABLE (0<<31)
#define TRANS_ENABLE (1<<31)
#define TRANS_STATE_MASK (1<<30)
@@ -2721,10 +2798,15 @@
#define FDI_RXA_CHICKEN 0xc200c
#define FDI_RXB_CHICKEN 0xc2010
#define FDI_RX_PHASE_SYNC_POINTER_ENABLE (1)
+#define FDI_RX_CHICKEN(pipe) _PIPE(pipe, FDI_RXA_CHICKEN, FDI_RXB_CHICKEN)
+
+#define SOUTH_DSPCLK_GATE_D 0xc2020
+#define PCH_DPLSUNIT_CLOCK_GATE_DISABLE (1<<29)
/* CPU: FDI_TX */
#define FDI_TXA_CTL 0x60100
#define FDI_TXB_CTL 0x61100
+#define FDI_TX_CTL(pipe) _PIPE(pipe, FDI_TXA_CTL, FDI_TXB_CTL)
#define FDI_TX_DISABLE (0<<31)
#define FDI_TX_ENABLE (1<<31)
#define FDI_LINK_TRAIN_PATTERN_1 (0<<28)
@@ -2766,8 +2848,8 @@
/* FDI_RX, FDI_X is hard-wired to Transcoder_X */
#define FDI_RXA_CTL 0xf000c
#define FDI_RXB_CTL 0xf100c
+#define FDI_RX_CTL(pipe) _PIPE(pipe, FDI_RXA_CTL, FDI_RXB_CTL)
#define FDI_RX_ENABLE (1<<31)
-#define FDI_RX_DISABLE (0<<31)
/* train, dp width same as FDI_TX */
#define FDI_DP_PORT_WIDTH_X8 (7<<19)
#define FDI_8BPC (0<<16)
@@ -2782,8 +2864,7 @@
#define FDI_FS_ERR_REPORT_ENABLE (1<<9)
#define FDI_FE_ERR_REPORT_ENABLE (1<<8)
#define FDI_RX_ENHANCE_FRAME_ENABLE (1<<6)
-#define FDI_SEL_RAWCLK (0<<4)
-#define FDI_SEL_PCDCLK (1<<4)
+#define FDI_PCDCLK (1<<4)
/* CPT */
#define FDI_AUTO_TRAINING (1<<10)
#define FDI_LINK_TRAIN_PATTERN_1_CPT (0<<8)
@@ -2798,6 +2879,9 @@
#define FDI_RXA_TUSIZE2 0xf0038
#define FDI_RXB_TUSIZE1 0xf1030
#define FDI_RXB_TUSIZE2 0xf1038
+#define FDI_RX_MISC(pipe) _PIPE(pipe, FDI_RXA_MISC, FDI_RXB_MISC)
+#define FDI_RX_TUSIZE1(pipe) _PIPE(pipe, FDI_RXA_TUSIZE1, FDI_RXB_TUSIZE1)
+#define FDI_RX_TUSIZE2(pipe) _PIPE(pipe, FDI_RXA_TUSIZE2, FDI_RXB_TUSIZE2)
/* FDI_RX interrupt register format */
#define FDI_RX_INTER_LANE_ALIGN (1<<10)
@@ -2816,6 +2900,8 @@
#define FDI_RXA_IMR 0xf0018
#define FDI_RXB_IIR 0xf1014
#define FDI_RXB_IMR 0xf1018
+#define FDI_RX_IIR(pipe) _PIPE(pipe, FDI_RXA_IIR, FDI_RXB_IIR)
+#define FDI_RX_IMR(pipe) _PIPE(pipe, FDI_RXA_IMR, FDI_RXB_IMR)
#define FDI_PLL_CTL_1 0xfe000
#define FDI_PLL_CTL_2 0xfe004
@@ -2935,6 +3021,7 @@
#define TRANS_DP_CTL_A 0xe0300
#define TRANS_DP_CTL_B 0xe1300
#define TRANS_DP_CTL_C 0xe2300
+#define TRANS_DP_CTL(pipe) (TRANS_DP_CTL_A + (pipe) * 0x01000)
#define TRANS_DP_OUTPUT_ENABLE (1<<31)
#define TRANS_DP_PORT_SEL_B (0<<29)
#define TRANS_DP_PORT_SEL_C (1<<29)
@@ -2946,6 +3033,7 @@
#define TRANS_DP_10BPC (1<<9)
#define TRANS_DP_6BPC (2<<9)
#define TRANS_DP_12BPC (3<<9)
+#define TRANS_DP_BPC_MASK (3<<9)
#define TRANS_DP_VSYNC_ACTIVE_HIGH (1<<4)
#define TRANS_DP_VSYNC_ACTIVE_LOW 0
#define TRANS_DP_HSYNC_ACTIVE_HIGH (1<<3)
diff --git a/drivers/gpu/drm/i915/i915_suspend.c b/drivers/gpu/drm/i915/i915_suspend.c
index 31f08581e93a..42729d25da58 100644
--- a/drivers/gpu/drm/i915/i915_suspend.c
+++ b/drivers/gpu/drm/i915/i915_suspend.c
@@ -239,6 +239,16 @@ static void i915_save_modeset_reg(struct drm_device *dev)
if (drm_core_check_feature(dev, DRIVER_MODESET))
return;
+ /* Cursor state */
+ dev_priv->saveCURACNTR = I915_READ(CURACNTR);
+ dev_priv->saveCURAPOS = I915_READ(CURAPOS);
+ dev_priv->saveCURABASE = I915_READ(CURABASE);
+ dev_priv->saveCURBCNTR = I915_READ(CURBCNTR);
+ dev_priv->saveCURBPOS = I915_READ(CURBPOS);
+ dev_priv->saveCURBBASE = I915_READ(CURBBASE);
+ if (IS_GEN2(dev))
+ dev_priv->saveCURSIZE = I915_READ(CURSIZE);
+
if (HAS_PCH_SPLIT(dev)) {
dev_priv->savePCH_DREF_CONTROL = I915_READ(PCH_DREF_CONTROL);
dev_priv->saveDISP_ARB_CTL = I915_READ(DISP_ARB_CTL);
@@ -256,7 +266,7 @@ static void i915_save_modeset_reg(struct drm_device *dev)
dev_priv->saveFPA1 = I915_READ(FPA1);
dev_priv->saveDPLL_A = I915_READ(DPLL_A);
}
- if (IS_I965G(dev) && !HAS_PCH_SPLIT(dev))
+ if (INTEL_INFO(dev)->gen >= 4 && !HAS_PCH_SPLIT(dev))
dev_priv->saveDPLL_A_MD = I915_READ(DPLL_A_MD);
dev_priv->saveHTOTAL_A = I915_READ(HTOTAL_A);
dev_priv->saveHBLANK_A = I915_READ(HBLANK_A);
@@ -294,7 +304,7 @@ static void i915_save_modeset_reg(struct drm_device *dev)
dev_priv->saveDSPASIZE = I915_READ(DSPASIZE);
dev_priv->saveDSPAPOS = I915_READ(DSPAPOS);
dev_priv->saveDSPAADDR = I915_READ(DSPAADDR);
- if (IS_I965G(dev)) {
+ if (INTEL_INFO(dev)->gen >= 4) {
dev_priv->saveDSPASURF = I915_READ(DSPASURF);
dev_priv->saveDSPATILEOFF = I915_READ(DSPATILEOFF);
}
@@ -313,7 +323,7 @@ static void i915_save_modeset_reg(struct drm_device *dev)
dev_priv->saveFPB1 = I915_READ(FPB1);
dev_priv->saveDPLL_B = I915_READ(DPLL_B);
}
- if (IS_I965G(dev) && !HAS_PCH_SPLIT(dev))
+ if (INTEL_INFO(dev)->gen >= 4 && !HAS_PCH_SPLIT(dev))
dev_priv->saveDPLL_B_MD = I915_READ(DPLL_B_MD);
dev_priv->saveHTOTAL_B = I915_READ(HTOTAL_B);
dev_priv->saveHBLANK_B = I915_READ(HBLANK_B);
@@ -351,7 +361,7 @@ static void i915_save_modeset_reg(struct drm_device *dev)
dev_priv->saveDSPBSIZE = I915_READ(DSPBSIZE);
dev_priv->saveDSPBPOS = I915_READ(DSPBPOS);
dev_priv->saveDSPBADDR = I915_READ(DSPBADDR);
- if (IS_I965GM(dev) || IS_GM45(dev)) {
+ if (INTEL_INFO(dev)->gen >= 4) {
dev_priv->saveDSPBSURF = I915_READ(DSPBSURF);
dev_priv->saveDSPBTILEOFF = I915_READ(DSPBTILEOFF);
}
@@ -404,7 +414,7 @@ static void i915_restore_modeset_reg(struct drm_device *dev)
I915_WRITE(dpll_a_reg, dev_priv->saveDPLL_A);
POSTING_READ(dpll_a_reg);
udelay(150);
- if (IS_I965G(dev) && !HAS_PCH_SPLIT(dev)) {
+ if (INTEL_INFO(dev)->gen >= 4 && !HAS_PCH_SPLIT(dev)) {
I915_WRITE(DPLL_A_MD, dev_priv->saveDPLL_A_MD);
POSTING_READ(DPLL_A_MD);
}
@@ -448,7 +458,7 @@ static void i915_restore_modeset_reg(struct drm_device *dev)
I915_WRITE(PIPEASRC, dev_priv->savePIPEASRC);
I915_WRITE(DSPAADDR, dev_priv->saveDSPAADDR);
I915_WRITE(DSPASTRIDE, dev_priv->saveDSPASTRIDE);
- if (IS_I965G(dev)) {
+ if (INTEL_INFO(dev)->gen >= 4) {
I915_WRITE(DSPASURF, dev_priv->saveDSPASURF);
I915_WRITE(DSPATILEOFF, dev_priv->saveDSPATILEOFF);
}
@@ -473,7 +483,7 @@ static void i915_restore_modeset_reg(struct drm_device *dev)
I915_WRITE(dpll_b_reg, dev_priv->saveDPLL_B);
POSTING_READ(dpll_b_reg);
udelay(150);
- if (IS_I965G(dev) && !HAS_PCH_SPLIT(dev)) {
+ if (INTEL_INFO(dev)->gen >= 4 && !HAS_PCH_SPLIT(dev)) {
I915_WRITE(DPLL_B_MD, dev_priv->saveDPLL_B_MD);
POSTING_READ(DPLL_B_MD);
}
@@ -517,7 +527,7 @@ static void i915_restore_modeset_reg(struct drm_device *dev)
I915_WRITE(PIPEBSRC, dev_priv->savePIPEBSRC);
I915_WRITE(DSPBADDR, dev_priv->saveDSPBADDR);
I915_WRITE(DSPBSTRIDE, dev_priv->saveDSPBSTRIDE);
- if (IS_I965G(dev)) {
+ if (INTEL_INFO(dev)->gen >= 4) {
I915_WRITE(DSPBSURF, dev_priv->saveDSPBSURF);
I915_WRITE(DSPBTILEOFF, dev_priv->saveDSPBTILEOFF);
}
@@ -529,6 +539,16 @@ static void i915_restore_modeset_reg(struct drm_device *dev)
I915_WRITE(DSPBCNTR, dev_priv->saveDSPBCNTR);
I915_WRITE(DSPBADDR, I915_READ(DSPBADDR));
+ /* Cursor state */
+ I915_WRITE(CURAPOS, dev_priv->saveCURAPOS);
+ I915_WRITE(CURACNTR, dev_priv->saveCURACNTR);
+ I915_WRITE(CURABASE, dev_priv->saveCURABASE);
+ I915_WRITE(CURBPOS, dev_priv->saveCURBPOS);
+ I915_WRITE(CURBCNTR, dev_priv->saveCURBCNTR);
+ I915_WRITE(CURBBASE, dev_priv->saveCURBBASE);
+ if (IS_GEN2(dev))
+ I915_WRITE(CURSIZE, dev_priv->saveCURSIZE);
+
return;
}
@@ -543,16 +563,6 @@ void i915_save_display(struct drm_device *dev)
/* Don't save them in KMS mode */
i915_save_modeset_reg(dev);
- /* Cursor state */
- dev_priv->saveCURACNTR = I915_READ(CURACNTR);
- dev_priv->saveCURAPOS = I915_READ(CURAPOS);
- dev_priv->saveCURABASE = I915_READ(CURABASE);
- dev_priv->saveCURBCNTR = I915_READ(CURBCNTR);
- dev_priv->saveCURBPOS = I915_READ(CURBPOS);
- dev_priv->saveCURBBASE = I915_READ(CURBBASE);
- if (!IS_I9XX(dev))
- dev_priv->saveCURSIZE = I915_READ(CURSIZE);
-
/* CRT state */
if (HAS_PCH_SPLIT(dev)) {
dev_priv->saveADPA = I915_READ(PCH_ADPA);
@@ -573,7 +583,7 @@ void i915_save_display(struct drm_device *dev)
dev_priv->savePFIT_PGM_RATIOS = I915_READ(PFIT_PGM_RATIOS);
dev_priv->saveBLC_PWM_CTL = I915_READ(BLC_PWM_CTL);
dev_priv->saveBLC_HIST_CTL = I915_READ(BLC_HIST_CTL);
- if (IS_I965G(dev))
+ if (INTEL_INFO(dev)->gen >= 4)
dev_priv->saveBLC_PWM_CTL2 = I915_READ(BLC_PWM_CTL2);
if (IS_MOBILE(dev) && !IS_I830(dev))
dev_priv->saveLVDS = I915_READ(LVDS);
@@ -657,16 +667,6 @@ void i915_restore_display(struct drm_device *dev)
/* Don't restore them in KMS mode */
i915_restore_modeset_reg(dev);
- /* Cursor state */
- I915_WRITE(CURAPOS, dev_priv->saveCURAPOS);
- I915_WRITE(CURACNTR, dev_priv->saveCURACNTR);
- I915_WRITE(CURABASE, dev_priv->saveCURABASE);
- I915_WRITE(CURBPOS, dev_priv->saveCURBPOS);
- I915_WRITE(CURBCNTR, dev_priv->saveCURBCNTR);
- I915_WRITE(CURBBASE, dev_priv->saveCURBBASE);
- if (!IS_I9XX(dev))
- I915_WRITE(CURSIZE, dev_priv->saveCURSIZE);
-
/* CRT state */
if (HAS_PCH_SPLIT(dev))
I915_WRITE(PCH_ADPA, dev_priv->saveADPA);
@@ -674,7 +674,7 @@ void i915_restore_display(struct drm_device *dev)
I915_WRITE(ADPA, dev_priv->saveADPA);
/* LVDS state */
- if (IS_I965G(dev) && !HAS_PCH_SPLIT(dev))
+ if (INTEL_INFO(dev)->gen >= 4 && !HAS_PCH_SPLIT(dev))
I915_WRITE(BLC_PWM_CTL2, dev_priv->saveBLC_PWM_CTL2);
if (HAS_PCH_SPLIT(dev)) {
@@ -862,8 +862,10 @@ int i915_restore_state(struct drm_device *dev)
/* Clock gating state */
intel_init_clock_gating(dev);
- if (HAS_PCH_SPLIT(dev))
+ if (HAS_PCH_SPLIT(dev)) {
ironlake_enable_drps(dev);
+ intel_init_emon(dev);
+ }
/* Cache mode state */
I915_WRITE (CACHE_MODE_0, dev_priv->saveCACHE_MODE_0 | 0xffff0000);
@@ -878,9 +880,7 @@ int i915_restore_state(struct drm_device *dev)
for (i = 0; i < 3; i++)
I915_WRITE(SWF30 + (i << 2), dev_priv->saveSWF2[i]);
- /* I2C state */
- intel_i2c_reset_gmbus(dev);
+ intel_i2c_reset(dev);
return 0;
}
-
diff --git a/drivers/gpu/drm/i915/intel_acpi.c b/drivers/gpu/drm/i915/intel_acpi.c
new file mode 100644
index 000000000000..2cb8e0b9f1ee
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_acpi.c
@@ -0,0 +1,252 @@
+/*
+ * Intel ACPI functions
+ *
+ * _DSM related code stolen from nouveau_acpi.c.
+ */
+#include <linux/pci.h>
+#include <linux/acpi.h>
+#include <linux/vga_switcheroo.h>
+#include <acpi/acpi_drivers.h>
+
+#include "drmP.h"
+
+#define INTEL_DSM_REVISION_ID 1 /* For Calpella anyway... */
+
+#define INTEL_DSM_FN_SUPPORTED_FUNCTIONS 0 /* No args */
+#define INTEL_DSM_FN_PLATFORM_MUX_INFO 1 /* No args */
+
+static struct intel_dsm_priv {
+ acpi_handle dhandle;
+} intel_dsm_priv;
+
+static const u8 intel_dsm_guid[] = {
+ 0xd3, 0x73, 0xd8, 0x7e,
+ 0xd0, 0xc2,
+ 0x4f, 0x4e,
+ 0xa8, 0x54,
+ 0x0f, 0x13, 0x17, 0xb0, 0x1c, 0x2c
+};
+
+static int intel_dsm(acpi_handle handle, int func, int arg)
+{
+ struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL };
+ struct acpi_object_list input;
+ union acpi_object params[4];
+ union acpi_object *obj;
+ u32 result;
+ int ret = 0;
+
+ input.count = 4;
+ input.pointer = params;
+ params[0].type = ACPI_TYPE_BUFFER;
+ params[0].buffer.length = sizeof(intel_dsm_guid);
+ params[0].buffer.pointer = (char *)intel_dsm_guid;
+ params[1].type = ACPI_TYPE_INTEGER;
+ params[1].integer.value = INTEL_DSM_REVISION_ID;
+ params[2].type = ACPI_TYPE_INTEGER;
+ params[2].integer.value = func;
+ params[3].type = ACPI_TYPE_INTEGER;
+ params[3].integer.value = arg;
+
+ ret = acpi_evaluate_object(handle, "_DSM", &input, &output);
+ if (ret) {
+ DRM_DEBUG_DRIVER("failed to evaluate _DSM: %d\n", ret);
+ return ret;
+ }
+
+ obj = (union acpi_object *)output.pointer;
+
+ result = 0;
+ switch (obj->type) {
+ case ACPI_TYPE_INTEGER:
+ result = obj->integer.value;
+ break;
+
+ case ACPI_TYPE_BUFFER:
+ if (obj->buffer.length == 4) {
+ result =(obj->buffer.pointer[0] |
+ (obj->buffer.pointer[1] << 8) |
+ (obj->buffer.pointer[2] << 16) |
+ (obj->buffer.pointer[3] << 24));
+ break;
+ }
+ default:
+ ret = -EINVAL;
+ break;
+ }
+ if (result == 0x80000002)
+ ret = -ENODEV;
+
+ kfree(output.pointer);
+ return ret;
+}
+
+static char *intel_dsm_port_name(u8 id)
+{
+ switch (id) {
+ case 0:
+ return "Reserved";
+ case 1:
+ return "Analog VGA";
+ case 2:
+ return "LVDS";
+ case 3:
+ return "Reserved";
+ case 4:
+ return "HDMI/DVI_B";
+ case 5:
+ return "HDMI/DVI_C";
+ case 6:
+ return "HDMI/DVI_D";
+ case 7:
+ return "DisplayPort_A";
+ case 8:
+ return "DisplayPort_B";
+ case 9:
+ return "DisplayPort_C";
+ case 0xa:
+ return "DisplayPort_D";
+ case 0xb:
+ case 0xc:
+ case 0xd:
+ return "Reserved";
+ case 0xe:
+ return "WiDi";
+ default:
+ return "bad type";
+ }
+}
+
+static char *intel_dsm_mux_type(u8 type)
+{
+ switch (type) {
+ case 0:
+ return "unknown";
+ case 1:
+ return "No MUX, iGPU only";
+ case 2:
+ return "No MUX, dGPU only";
+ case 3:
+ return "MUXed between iGPU and dGPU";
+ default:
+ return "bad type";
+ }
+}
+
+static void intel_dsm_platform_mux_info(void)
+{
+ struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL };
+ struct acpi_object_list input;
+ union acpi_object params[4];
+ union acpi_object *pkg;
+ int i, ret;
+
+ input.count = 4;
+ input.pointer = params;
+ params[0].type = ACPI_TYPE_BUFFER;
+ params[0].buffer.length = sizeof(intel_dsm_guid);
+ params[0].buffer.pointer = (char *)intel_dsm_guid;
+ params[1].type = ACPI_TYPE_INTEGER;
+ params[1].integer.value = INTEL_DSM_REVISION_ID;
+ params[2].type = ACPI_TYPE_INTEGER;
+ params[2].integer.value = INTEL_DSM_FN_PLATFORM_MUX_INFO;
+ params[3].type = ACPI_TYPE_INTEGER;
+ params[3].integer.value = 0;
+
+ ret = acpi_evaluate_object(intel_dsm_priv.dhandle, "_DSM", &input,
+ &output);
+ if (ret) {
+ DRM_DEBUG_DRIVER("failed to evaluate _DSM: %d\n", ret);
+ goto out;
+ }
+
+ pkg = (union acpi_object *)output.pointer;
+
+ if (pkg->type == ACPI_TYPE_PACKAGE) {
+ union acpi_object *connector_count = &pkg->package.elements[0];
+ DRM_DEBUG_DRIVER("MUX info connectors: %lld\n",
+ (unsigned long long)connector_count->integer.value);
+ for (i = 1; i < pkg->package.count; i++) {
+ union acpi_object *obj = &pkg->package.elements[i];
+ union acpi_object *connector_id =
+ &obj->package.elements[0];
+ union acpi_object *info = &obj->package.elements[1];
+ DRM_DEBUG_DRIVER("Connector id: 0x%016llx\n",
+ (unsigned long long)connector_id->integer.value);
+ DRM_DEBUG_DRIVER(" port id: %s\n",
+ intel_dsm_port_name(info->buffer.pointer[0]));
+ DRM_DEBUG_DRIVER(" display mux info: %s\n",
+ intel_dsm_mux_type(info->buffer.pointer[1]));
+ DRM_DEBUG_DRIVER(" aux/dc mux info: %s\n",
+ intel_dsm_mux_type(info->buffer.pointer[2]));
+ DRM_DEBUG_DRIVER(" hpd mux info: %s\n",
+ intel_dsm_mux_type(info->buffer.pointer[3]));
+ }
+ } else {
+ DRM_ERROR("MUX INFO call failed\n");
+ }
+
+out:
+ kfree(output.pointer);
+}
+
+static bool intel_dsm_pci_probe(struct pci_dev *pdev)
+{
+ acpi_handle dhandle, intel_handle;
+ acpi_status status;
+ int ret;
+
+ dhandle = DEVICE_ACPI_HANDLE(&pdev->dev);
+ if (!dhandle)
+ return false;
+
+ status = acpi_get_handle(dhandle, "_DSM", &intel_handle);
+ if (ACPI_FAILURE(status)) {
+ DRM_DEBUG_KMS("no _DSM method for intel device\n");
+ return false;
+ }
+
+ ret = intel_dsm(dhandle, INTEL_DSM_FN_SUPPORTED_FUNCTIONS, 0);
+ if (ret < 0) {
+ DRM_ERROR("failed to get supported _DSM functions\n");
+ return false;
+ }
+
+ intel_dsm_priv.dhandle = dhandle;
+
+ intel_dsm_platform_mux_info();
+ return true;
+}
+
+static bool intel_dsm_detect(void)
+{
+ char acpi_method_name[255] = { 0 };
+ struct acpi_buffer buffer = {sizeof(acpi_method_name), acpi_method_name};
+ struct pci_dev *pdev = NULL;
+ bool has_dsm = false;
+ int vga_count = 0;
+
+ while ((pdev = pci_get_class(PCI_CLASS_DISPLAY_VGA << 8, pdev)) != NULL) {
+ vga_count++;
+ has_dsm |= intel_dsm_pci_probe(pdev);
+ }
+
+ if (vga_count == 2 && has_dsm) {
+ acpi_get_name(intel_dsm_priv.dhandle, ACPI_FULL_PATHNAME, &buffer);
+ DRM_DEBUG_DRIVER("VGA switcheroo: detected DSM switching method %s handle\n",
+ acpi_method_name);
+ return true;
+ }
+
+ return false;
+}
+
+void intel_register_dsm_handler(void)
+{
+ if (!intel_dsm_detect())
+ return;
+}
+
+void intel_unregister_dsm_handler(void)
+{
+}
diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c
index 96f75d7f6633..b0b1200ed650 100644
--- a/drivers/gpu/drm/i915/intel_bios.c
+++ b/drivers/gpu/drm/i915/intel_bios.c
@@ -24,6 +24,7 @@
* Eric Anholt <eric@anholt.net>
*
*/
+#include <drm/drm_dp_helper.h>
#include "drmP.h"
#include "drm.h"
#include "i915_drm.h"
@@ -129,10 +130,6 @@ parse_lfp_panel_data(struct drm_i915_private *dev_priv,
int i, temp_downclock;
struct drm_display_mode *temp_mode;
- /* Defaults if we can't find VBT info */
- dev_priv->lvds_dither = 0;
- dev_priv->lvds_vbt = 0;
-
lvds_options = find_section(bdb, BDB_LVDS_OPTIONS);
if (!lvds_options)
return;
@@ -140,6 +137,7 @@ parse_lfp_panel_data(struct drm_i915_private *dev_priv,
dev_priv->lvds_dither = lvds_options->pixel_dither;
if (lvds_options->panel_type == 0xff)
return;
+
panel_type = lvds_options->panel_type;
lvds_lfp_data = find_section(bdb, BDB_LVDS_LFP_DATA);
@@ -169,6 +167,8 @@ parse_lfp_panel_data(struct drm_i915_private *dev_priv,
((unsigned char *)entry + dvo_timing_offset);
panel_fixed_mode = kzalloc(sizeof(*panel_fixed_mode), GFP_KERNEL);
+ if (!panel_fixed_mode)
+ return;
fill_detail_timing_data(panel_fixed_mode, dvo_timing);
@@ -230,8 +230,6 @@ parse_sdvo_panel_data(struct drm_i915_private *dev_priv,
struct lvds_dvo_timing *dvo_timing;
struct drm_display_mode *panel_fixed_mode;
- dev_priv->sdvo_lvds_vbt_mode = NULL;
-
sdvo_lvds_options = find_section(bdb, BDB_SDVO_LVDS_OPTIONS);
if (!sdvo_lvds_options)
return;
@@ -260,10 +258,6 @@ parse_general_features(struct drm_i915_private *dev_priv,
struct drm_device *dev = dev_priv->dev;
struct bdb_general_features *general;
- /* Set sensible defaults in case we can't find the general block */
- dev_priv->int_tv_support = 1;
- dev_priv->int_crt_support = 1;
-
general = find_section(bdb, BDB_GENERAL_FEATURES);
if (general) {
dev_priv->int_tv_support = general->int_tv_support;
@@ -271,10 +265,10 @@ parse_general_features(struct drm_i915_private *dev_priv,
dev_priv->lvds_use_ssc = general->enable_ssc;
if (dev_priv->lvds_use_ssc) {
- if (IS_I85X(dev_priv->dev))
+ if (IS_I85X(dev))
dev_priv->lvds_ssc_freq =
general->ssc_freq ? 66 : 48;
- else if (IS_IRONLAKE(dev_priv->dev) || IS_GEN6(dev))
+ else if (IS_GEN5(dev) || IS_GEN6(dev))
dev_priv->lvds_ssc_freq =
general->ssc_freq ? 100 : 120;
else
@@ -289,14 +283,6 @@ parse_general_definitions(struct drm_i915_private *dev_priv,
struct bdb_header *bdb)
{
struct bdb_general_definitions *general;
- const int crt_bus_map_table[] = {
- GPIOB,
- GPIOA,
- GPIOC,
- GPIOD,
- GPIOE,
- GPIOF,
- };
general = find_section(bdb, BDB_GENERAL_DEFINITIONS);
if (general) {
@@ -304,10 +290,8 @@ parse_general_definitions(struct drm_i915_private *dev_priv,
if (block_size >= sizeof(*general)) {
int bus_pin = general->crt_ddc_gmbus_pin;
DRM_DEBUG_KMS("crt_ddc_bus_pin: %d\n", bus_pin);
- if ((bus_pin >= 1) && (bus_pin <= 6)) {
- dev_priv->crt_ddc_bus =
- crt_bus_map_table[bus_pin-1];
- }
+ if (bus_pin >= 1 && bus_pin <= 6)
+ dev_priv->crt_ddc_pin = bus_pin;
} else {
DRM_DEBUG_KMS("BDB_GD too small (%d). Invalid.\n",
block_size);
@@ -317,7 +301,7 @@ parse_general_definitions(struct drm_i915_private *dev_priv,
static void
parse_sdvo_device_mapping(struct drm_i915_private *dev_priv,
- struct bdb_header *bdb)
+ struct bdb_header *bdb)
{
struct sdvo_device_mapping *p_mapping;
struct bdb_general_definitions *p_defs;
@@ -327,7 +311,7 @@ parse_sdvo_device_mapping(struct drm_i915_private *dev_priv,
p_defs = find_section(bdb, BDB_GENERAL_DEFINITIONS);
if (!p_defs) {
- DRM_DEBUG_KMS("No general definition block is found\n");
+ DRM_DEBUG_KMS("No general definition block is found, unable to construct sdvo mapping.\n");
return;
}
/* judge whether the size of child device meets the requirements.
@@ -377,7 +361,16 @@ parse_sdvo_device_mapping(struct drm_i915_private *dev_priv,
p_mapping->slave_addr = p_child->slave_addr;
p_mapping->dvo_wiring = p_child->dvo_wiring;
p_mapping->ddc_pin = p_child->ddc_pin;
+ p_mapping->i2c_pin = p_child->i2c_pin;
+ p_mapping->i2c_speed = p_child->i2c_speed;
p_mapping->initialized = 1;
+ DRM_DEBUG_KMS("SDVO device: dvo=%x, addr=%x, wiring=%d, ddc_pin=%d, i2c_pin=%d, i2c_speed=%d\n",
+ p_mapping->dvo_port,
+ p_mapping->slave_addr,
+ p_mapping->dvo_wiring,
+ p_mapping->ddc_pin,
+ p_mapping->i2c_pin,
+ p_mapping->i2c_speed);
} else {
DRM_DEBUG_KMS("Maybe one SDVO port is shared by "
"two SDVO device.\n");
@@ -409,14 +402,11 @@ parse_driver_features(struct drm_i915_private *dev_priv,
if (!driver)
return;
- if (driver && SUPPORTS_EDP(dev) &&
- driver->lvds_config == BDB_DRIVER_FEATURE_EDP) {
- dev_priv->edp_support = 1;
- } else {
- dev_priv->edp_support = 0;
- }
+ if (SUPPORTS_EDP(dev) &&
+ driver->lvds_config == BDB_DRIVER_FEATURE_EDP)
+ dev_priv->edp.support = 1;
- if (driver && driver->dual_frequency)
+ if (driver->dual_frequency)
dev_priv->render_reclock_avail = true;
}
@@ -424,27 +414,78 @@ static void
parse_edp(struct drm_i915_private *dev_priv, struct bdb_header *bdb)
{
struct bdb_edp *edp;
+ struct edp_power_seq *edp_pps;
+ struct edp_link_params *edp_link_params;
edp = find_section(bdb, BDB_EDP);
if (!edp) {
- if (SUPPORTS_EDP(dev_priv->dev) && dev_priv->edp_support) {
+ if (SUPPORTS_EDP(dev_priv->dev) && dev_priv->edp.support) {
DRM_DEBUG_KMS("No eDP BDB found but eDP panel "
- "supported, assume 18bpp panel color "
- "depth.\n");
- dev_priv->edp_bpp = 18;
+ "supported, assume %dbpp panel color "
+ "depth.\n",
+ dev_priv->edp.bpp);
}
return;
}
switch ((edp->color_depth >> (panel_type * 2)) & 3) {
case EDP_18BPP:
- dev_priv->edp_bpp = 18;
+ dev_priv->edp.bpp = 18;
break;
case EDP_24BPP:
- dev_priv->edp_bpp = 24;
+ dev_priv->edp.bpp = 24;
break;
case EDP_30BPP:
- dev_priv->edp_bpp = 30;
+ dev_priv->edp.bpp = 30;
+ break;
+ }
+
+ /* Get the eDP sequencing and link info */
+ edp_pps = &edp->power_seqs[panel_type];
+ edp_link_params = &edp->link_params[panel_type];
+
+ dev_priv->edp.pps = *edp_pps;
+
+ dev_priv->edp.rate = edp_link_params->rate ? DP_LINK_BW_2_7 :
+ DP_LINK_BW_1_62;
+ switch (edp_link_params->lanes) {
+ case 0:
+ dev_priv->edp.lanes = 1;
+ break;
+ case 1:
+ dev_priv->edp.lanes = 2;
+ break;
+ case 3:
+ default:
+ dev_priv->edp.lanes = 4;
+ break;
+ }
+ switch (edp_link_params->preemphasis) {
+ case 0:
+ dev_priv->edp.preemphasis = DP_TRAIN_PRE_EMPHASIS_0;
+ break;
+ case 1:
+ dev_priv->edp.preemphasis = DP_TRAIN_PRE_EMPHASIS_3_5;
+ break;
+ case 2:
+ dev_priv->edp.preemphasis = DP_TRAIN_PRE_EMPHASIS_6;
+ break;
+ case 3:
+ dev_priv->edp.preemphasis = DP_TRAIN_PRE_EMPHASIS_9_5;
+ break;
+ }
+ switch (edp_link_params->vswing) {
+ case 0:
+ dev_priv->edp.vswing = DP_TRAIN_VOLTAGE_SWING_400;
+ break;
+ case 1:
+ dev_priv->edp.vswing = DP_TRAIN_VOLTAGE_SWING_600;
+ break;
+ case 2:
+ dev_priv->edp.vswing = DP_TRAIN_VOLTAGE_SWING_800;
+ break;
+ case 3:
+ dev_priv->edp.vswing = DP_TRAIN_VOLTAGE_SWING_1200;
break;
}
}
@@ -460,7 +501,7 @@ parse_device_mapping(struct drm_i915_private *dev_priv,
p_defs = find_section(bdb, BDB_GENERAL_DEFINITIONS);
if (!p_defs) {
- DRM_DEBUG_KMS("No general definition block is found\n");
+ DRM_DEBUG_KMS("No general definition block is found, no devices defined.\n");
return;
}
/* judge whether the size of child device meets the requirements.
@@ -513,50 +554,83 @@ parse_device_mapping(struct drm_i915_private *dev_priv,
}
return;
}
+
+static void
+init_vbt_defaults(struct drm_i915_private *dev_priv)
+{
+ dev_priv->crt_ddc_pin = GMBUS_PORT_VGADDC;
+
+ /* LFP panel data */
+ dev_priv->lvds_dither = 1;
+ dev_priv->lvds_vbt = 0;
+
+ /* SDVO panel data */
+ dev_priv->sdvo_lvds_vbt_mode = NULL;
+
+ /* general features */
+ dev_priv->int_tv_support = 1;
+ dev_priv->int_crt_support = 1;
+ dev_priv->lvds_use_ssc = 0;
+
+ /* eDP data */
+ dev_priv->edp.bpp = 18;
+}
+
/**
- * intel_init_bios - initialize VBIOS settings & find VBT
+ * intel_parse_bios - find VBT and initialize settings from the BIOS
* @dev: DRM device
*
* Loads the Video BIOS and checks that the VBT exists. Sets scratch registers
* to appropriate values.
*
- * VBT existence is a sanity check that is relied on by other i830_bios.c code.
- * Note that it would be better to use a BIOS call to get the VBT, as BIOSes may
- * feed an updated VBT back through that, compared to what we'll fetch using
- * this method of groping around in the BIOS data.
- *
* Returns 0 on success, nonzero on failure.
*/
bool
-intel_init_bios(struct drm_device *dev)
+intel_parse_bios(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct pci_dev *pdev = dev->pdev;
- struct vbt_header *vbt = NULL;
- struct bdb_header *bdb;
- u8 __iomem *bios;
- size_t size;
- int i;
-
- bios = pci_map_rom(pdev, &size);
- if (!bios)
- return -1;
-
- /* Scour memory looking for the VBT signature */
- for (i = 0; i + 4 < size; i++) {
- if (!memcmp(bios + i, "$VBT", 4)) {
- vbt = (struct vbt_header *)(bios + i);
- break;
- }
+ struct bdb_header *bdb = NULL;
+ u8 __iomem *bios = NULL;
+
+ init_vbt_defaults(dev_priv);
+
+ /* XXX Should this validation be moved to intel_opregion.c? */
+ if (dev_priv->opregion.vbt) {
+ struct vbt_header *vbt = dev_priv->opregion.vbt;
+ if (memcmp(vbt->signature, "$VBT", 4) == 0) {
+ DRM_DEBUG_DRIVER("Using VBT from OpRegion: %20s\n",
+ vbt->signature);
+ bdb = (struct bdb_header *)((char *)vbt + vbt->bdb_offset);
+ } else
+ dev_priv->opregion.vbt = NULL;
}
- if (!vbt) {
- DRM_ERROR("VBT signature missing\n");
- pci_unmap_rom(pdev, bios);
- return -1;
- }
+ if (bdb == NULL) {
+ struct vbt_header *vbt = NULL;
+ size_t size;
+ int i;
- bdb = (struct bdb_header *)(bios + i + vbt->bdb_offset);
+ bios = pci_map_rom(pdev, &size);
+ if (!bios)
+ return -1;
+
+ /* Scour memory looking for the VBT signature */
+ for (i = 0; i + 4 < size; i++) {
+ if (!memcmp(bios + i, "$VBT", 4)) {
+ vbt = (struct vbt_header *)(bios + i);
+ break;
+ }
+ }
+
+ if (!vbt) {
+ DRM_ERROR("VBT signature missing\n");
+ pci_unmap_rom(pdev, bios);
+ return -1;
+ }
+
+ bdb = (struct bdb_header *)(bios + i + vbt->bdb_offset);
+ }
/* Grab useful general definitions */
parse_general_features(dev_priv, bdb);
@@ -568,7 +642,25 @@ intel_init_bios(struct drm_device *dev)
parse_driver_features(dev_priv, bdb);
parse_edp(dev_priv, bdb);
- pci_unmap_rom(pdev, bios);
+ if (bios)
+ pci_unmap_rom(pdev, bios);
return 0;
}
+
+/* Ensure that vital registers have been initialised, even if the BIOS
+ * is absent or just failing to do its job.
+ */
+void intel_setup_bios(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ /* Set the Panel Power On/Off timings if uninitialized. */
+ if ((I915_READ(PP_ON_DELAYS) == 0) && (I915_READ(PP_OFF_DELAYS) == 0)) {
+ /* Set T2 to 40ms and T5 to 200ms */
+ I915_WRITE(PP_ON_DELAYS, 0x019007d0);
+
+ /* Set T3 to 35ms and Tx to 200ms */
+ I915_WRITE(PP_OFF_DELAYS, 0x015e07d0);
+ }
+}
diff --git a/drivers/gpu/drm/i915/intel_bios.h b/drivers/gpu/drm/i915/intel_bios.h
index 4c18514f6f80..5f8e4edcbbb9 100644
--- a/drivers/gpu/drm/i915/intel_bios.h
+++ b/drivers/gpu/drm/i915/intel_bios.h
@@ -197,7 +197,8 @@ struct bdb_general_features {
struct child_device_config {
u16 handle;
u16 device_type;
- u8 device_id[10]; /* See DEVICE_TYPE_* above */
+ u8 i2c_speed;
+ u8 rsvd[9];
u16 addin_offset;
u8 dvo_port; /* See Device_PORT_* above */
u8 i2c_pin;
@@ -466,7 +467,8 @@ struct bdb_edp {
struct edp_link_params link_params[16];
} __attribute__ ((packed));
-bool intel_init_bios(struct drm_device *dev);
+void intel_setup_bios(struct drm_device *dev);
+bool intel_parse_bios(struct drm_device *dev);
/*
* Driver<->VBIOS interaction occurs through scratch bits in
diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c
index 197d4f32585a..8df574316063 100644
--- a/drivers/gpu/drm/i915/intel_crt.c
+++ b/drivers/gpu/drm/i915/intel_crt.c
@@ -34,6 +34,25 @@
#include "i915_drm.h"
#include "i915_drv.h"
+/* Here's the desired hotplug mode */
+#define ADPA_HOTPLUG_BITS (ADPA_CRT_HOTPLUG_PERIOD_128 | \
+ ADPA_CRT_HOTPLUG_WARMUP_10MS | \
+ ADPA_CRT_HOTPLUG_SAMPLE_4S | \
+ ADPA_CRT_HOTPLUG_VOLTAGE_50 | \
+ ADPA_CRT_HOTPLUG_VOLREF_325MV | \
+ ADPA_CRT_HOTPLUG_ENABLE)
+
+struct intel_crt {
+ struct intel_encoder base;
+ bool force_hotplug_required;
+};
+
+static struct intel_crt *intel_attached_crt(struct drm_connector *connector)
+{
+ return container_of(intel_attached_encoder(connector),
+ struct intel_crt, base);
+}
+
static void intel_crt_dpms(struct drm_encoder *encoder, int mode)
{
struct drm_device *dev = encoder->dev;
@@ -79,7 +98,7 @@ static int intel_crt_mode_valid(struct drm_connector *connector,
if (mode->clock < 25000)
return MODE_CLOCK_LOW;
- if (!IS_I9XX(dev))
+ if (IS_GEN2(dev))
max_clock = 350000;
else
max_clock = 400000;
@@ -123,13 +142,13 @@ static void intel_crt_mode_set(struct drm_encoder *encoder,
* Disable separate mode multiplier used when cloning SDVO to CRT
* XXX this needs to be adjusted when we really are cloning
*/
- if (IS_I965G(dev) && !HAS_PCH_SPLIT(dev)) {
+ if (INTEL_INFO(dev)->gen >= 4 && !HAS_PCH_SPLIT(dev)) {
dpll_md = I915_READ(dpll_md_reg);
I915_WRITE(dpll_md_reg,
dpll_md & ~DPLL_MD_UDI_MULTIPLIER_MASK);
}
- adpa = 0;
+ adpa = ADPA_HOTPLUG_BITS;
if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
adpa |= ADPA_HSYNC_ACTIVE_HIGH;
if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
@@ -157,52 +176,44 @@ static void intel_crt_mode_set(struct drm_encoder *encoder,
static bool intel_ironlake_crt_detect_hotplug(struct drm_connector *connector)
{
struct drm_device *dev = connector->dev;
+ struct intel_crt *crt = intel_attached_crt(connector);
struct drm_i915_private *dev_priv = dev->dev_private;
- u32 adpa, temp;
+ u32 adpa;
bool ret;
- bool turn_off_dac = false;
- temp = adpa = I915_READ(PCH_ADPA);
+ /* The first time through, trigger an explicit detection cycle */
+ if (crt->force_hotplug_required) {
+ bool turn_off_dac = HAS_PCH_SPLIT(dev);
+ u32 save_adpa;
- if (HAS_PCH_SPLIT(dev))
- turn_off_dac = true;
-
- adpa &= ~ADPA_CRT_HOTPLUG_MASK;
- if (turn_off_dac)
- adpa &= ~ADPA_DAC_ENABLE;
-
- /* disable HPD first */
- I915_WRITE(PCH_ADPA, adpa);
- (void)I915_READ(PCH_ADPA);
-
- adpa |= (ADPA_CRT_HOTPLUG_PERIOD_128 |
- ADPA_CRT_HOTPLUG_WARMUP_10MS |
- ADPA_CRT_HOTPLUG_SAMPLE_4S |
- ADPA_CRT_HOTPLUG_VOLTAGE_50 | /* default */
- ADPA_CRT_HOTPLUG_VOLREF_325MV |
- ADPA_CRT_HOTPLUG_ENABLE |
- ADPA_CRT_HOTPLUG_FORCE_TRIGGER);
-
- DRM_DEBUG_KMS("pch crt adpa 0x%x", adpa);
- I915_WRITE(PCH_ADPA, adpa);
-
- if (wait_for((I915_READ(PCH_ADPA) & ADPA_CRT_HOTPLUG_FORCE_TRIGGER) == 0,
- 1000, 1))
- DRM_DEBUG_KMS("timed out waiting for FORCE_TRIGGER");
-
- if (turn_off_dac) {
- I915_WRITE(PCH_ADPA, temp);
- (void)I915_READ(PCH_ADPA);
+ crt->force_hotplug_required = 0;
+
+ save_adpa = adpa = I915_READ(PCH_ADPA);
+ DRM_DEBUG_KMS("trigger hotplug detect cycle: adpa=0x%x\n", adpa);
+
+ adpa |= ADPA_CRT_HOTPLUG_FORCE_TRIGGER;
+ if (turn_off_dac)
+ adpa &= ~ADPA_DAC_ENABLE;
+
+ I915_WRITE(PCH_ADPA, adpa);
+
+ if (wait_for((I915_READ(PCH_ADPA) & ADPA_CRT_HOTPLUG_FORCE_TRIGGER) == 0,
+ 1000))
+ DRM_DEBUG_KMS("timed out waiting for FORCE_TRIGGER");
+
+ if (turn_off_dac) {
+ I915_WRITE(PCH_ADPA, save_adpa);
+ POSTING_READ(PCH_ADPA);
+ }
}
/* Check the status to see if both blue and green are on now */
adpa = I915_READ(PCH_ADPA);
- adpa &= ADPA_CRT_HOTPLUG_MONITOR_MASK;
- if ((adpa == ADPA_CRT_HOTPLUG_MONITOR_COLOR) ||
- (adpa == ADPA_CRT_HOTPLUG_MONITOR_MONO))
+ if ((adpa & ADPA_CRT_HOTPLUG_MONITOR_MASK) != 0)
ret = true;
else
ret = false;
+ DRM_DEBUG_KMS("ironlake hotplug adpa=0x%x, result %d\n", adpa, ret);
return ret;
}
@@ -244,7 +255,7 @@ static bool intel_crt_detect_hotplug(struct drm_connector *connector)
/* wait for FORCE_DETECT to go off */
if (wait_for((I915_READ(PORT_HOTPLUG_EN) &
CRT_HOTPLUG_FORCE_DETECT) == 0,
- 1000, 1))
+ 1000))
DRM_DEBUG_KMS("timed out waiting for FORCE_DETECT to go off");
}
@@ -261,21 +272,46 @@ static bool intel_crt_detect_hotplug(struct drm_connector *connector)
return ret;
}
-static bool intel_crt_detect_ddc(struct drm_encoder *encoder)
+static bool intel_crt_ddc_probe(struct drm_i915_private *dev_priv, int ddc_bus)
+{
+ u8 buf;
+ struct i2c_msg msgs[] = {
+ {
+ .addr = 0xA0,
+ .flags = 0,
+ .len = 1,
+ .buf = &buf,
+ },
+ };
+ /* DDC monitor detect: Does it ACK a write to 0xA0? */
+ return i2c_transfer(&dev_priv->gmbus[ddc_bus].adapter, msgs, 1) == 1;
+}
+
+static bool intel_crt_detect_ddc(struct intel_crt *crt)
{
- struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
+ struct drm_i915_private *dev_priv = crt->base.base.dev->dev_private;
/* CRT should always be at 0, but check anyway */
- if (intel_encoder->type != INTEL_OUTPUT_ANALOG)
+ if (crt->base.type != INTEL_OUTPUT_ANALOG)
return false;
- return intel_ddc_probe(intel_encoder);
+ if (intel_crt_ddc_probe(dev_priv, dev_priv->crt_ddc_pin)) {
+ DRM_DEBUG_KMS("CRT detected via DDC:0xa0\n");
+ return true;
+ }
+
+ if (intel_ddc_probe(&crt->base, dev_priv->crt_ddc_pin)) {
+ DRM_DEBUG_KMS("CRT detected via DDC:0x50 [EDID]\n");
+ return true;
+ }
+
+ return false;
}
static enum drm_connector_status
-intel_crt_load_detect(struct drm_crtc *crtc, struct intel_encoder *intel_encoder)
+intel_crt_load_detect(struct drm_crtc *crtc, struct intel_crt *crt)
{
- struct drm_encoder *encoder = &intel_encoder->enc;
+ struct drm_encoder *encoder = &crt->base.base;
struct drm_device *dev = encoder->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
@@ -295,6 +331,8 @@ intel_crt_load_detect(struct drm_crtc *crtc, struct intel_encoder *intel_encoder
uint8_t st00;
enum drm_connector_status status;
+ DRM_DEBUG_KMS("starting load-detect on CRT\n");
+
if (pipe == 0) {
bclrpat_reg = BCLRPAT_A;
vtotal_reg = VTOTAL_A;
@@ -324,9 +362,10 @@ intel_crt_load_detect(struct drm_crtc *crtc, struct intel_encoder *intel_encoder
/* Set the border color to purple. */
I915_WRITE(bclrpat_reg, 0x500050);
- if (IS_I9XX(dev)) {
+ if (!IS_GEN2(dev)) {
uint32_t pipeconf = I915_READ(pipeconf_reg);
I915_WRITE(pipeconf_reg, pipeconf | PIPECONF_FORCE_BORDER);
+ POSTING_READ(pipeconf_reg);
/* Wait for next Vblank to substitue
* border color for Color info */
intel_wait_for_vblank(dev, pipe);
@@ -404,34 +443,40 @@ static enum drm_connector_status
intel_crt_detect(struct drm_connector *connector, bool force)
{
struct drm_device *dev = connector->dev;
- struct drm_encoder *encoder = intel_attached_encoder(connector);
- struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
+ struct intel_crt *crt = intel_attached_crt(connector);
struct drm_crtc *crtc;
int dpms_mode;
enum drm_connector_status status;
- if (IS_I9XX(dev) && !IS_I915G(dev) && !IS_I915GM(dev)) {
- if (intel_crt_detect_hotplug(connector))
+ if (I915_HAS_HOTPLUG(dev)) {
+ if (intel_crt_detect_hotplug(connector)) {
+ DRM_DEBUG_KMS("CRT detected via hotplug\n");
return connector_status_connected;
- else
+ } else {
+ DRM_DEBUG_KMS("CRT not detected via hotplug\n");
return connector_status_disconnected;
+ }
}
- if (intel_crt_detect_ddc(encoder))
+ if (intel_crt_detect_ddc(crt))
return connector_status_connected;
if (!force)
return connector->status;
/* for pre-945g platforms use load detect */
- if (encoder->crtc && encoder->crtc->enabled) {
- status = intel_crt_load_detect(encoder->crtc, intel_encoder);
+ crtc = crt->base.base.crtc;
+ if (crtc && crtc->enabled) {
+ status = intel_crt_load_detect(crtc, crt);
} else {
- crtc = intel_get_load_detect_pipe(intel_encoder, connector,
+ crtc = intel_get_load_detect_pipe(&crt->base, connector,
NULL, &dpms_mode);
if (crtc) {
- status = intel_crt_load_detect(crtc, intel_encoder);
- intel_release_load_detect_pipe(intel_encoder,
+ if (intel_crt_detect_ddc(crt))
+ status = connector_status_connected;
+ else
+ status = intel_crt_load_detect(crtc, crt);
+ intel_release_load_detect_pipe(&crt->base,
connector, dpms_mode);
} else
status = connector_status_unknown;
@@ -449,32 +494,18 @@ static void intel_crt_destroy(struct drm_connector *connector)
static int intel_crt_get_modes(struct drm_connector *connector)
{
- int ret;
- struct drm_encoder *encoder = intel_attached_encoder(connector);
- struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
- struct i2c_adapter *ddc_bus;
struct drm_device *dev = connector->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int ret;
-
- ret = intel_ddc_get_modes(connector, intel_encoder->ddc_bus);
+ ret = intel_ddc_get_modes(connector,
+ &dev_priv->gmbus[dev_priv->crt_ddc_pin].adapter);
if (ret || !IS_G4X(dev))
- goto end;
+ return ret;
/* Try to probe digital port for output in DVI-I -> VGA mode. */
- ddc_bus = intel_i2c_create(connector->dev, GPIOD, "CRTDDC_D");
-
- if (!ddc_bus) {
- dev_printk(KERN_ERR, &connector->dev->pdev->dev,
- "DDC bus registration failed for CRTDDC_D.\n");
- goto end;
- }
- /* Try to get modes by GPIOD port */
- ret = intel_ddc_get_modes(connector, ddc_bus);
- intel_i2c_destroy(ddc_bus);
-
-end:
- return ret;
-
+ return intel_ddc_get_modes(connector,
+ &dev_priv->gmbus[GMBUS_PORT_DPB].adapter);
}
static int intel_crt_set_property(struct drm_connector *connector,
@@ -507,7 +538,7 @@ static const struct drm_connector_funcs intel_crt_connector_funcs = {
static const struct drm_connector_helper_funcs intel_crt_connector_helper_funcs = {
.mode_valid = intel_crt_mode_valid,
.get_modes = intel_crt_get_modes,
- .best_encoder = intel_attached_encoder,
+ .best_encoder = intel_best_encoder,
};
static const struct drm_encoder_funcs intel_crt_enc_funcs = {
@@ -517,18 +548,17 @@ static const struct drm_encoder_funcs intel_crt_enc_funcs = {
void intel_crt_init(struct drm_device *dev)
{
struct drm_connector *connector;
- struct intel_encoder *intel_encoder;
+ struct intel_crt *crt;
struct intel_connector *intel_connector;
struct drm_i915_private *dev_priv = dev->dev_private;
- u32 i2c_reg;
- intel_encoder = kzalloc(sizeof(struct intel_encoder), GFP_KERNEL);
- if (!intel_encoder)
+ crt = kzalloc(sizeof(struct intel_crt), GFP_KERNEL);
+ if (!crt)
return;
intel_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL);
if (!intel_connector) {
- kfree(intel_encoder);
+ kfree(crt);
return;
}
@@ -536,37 +566,20 @@ void intel_crt_init(struct drm_device *dev)
drm_connector_init(dev, &intel_connector->base,
&intel_crt_connector_funcs, DRM_MODE_CONNECTOR_VGA);
- drm_encoder_init(dev, &intel_encoder->enc, &intel_crt_enc_funcs,
+ drm_encoder_init(dev, &crt->base.base, &intel_crt_enc_funcs,
DRM_MODE_ENCODER_DAC);
- drm_mode_connector_attach_encoder(&intel_connector->base,
- &intel_encoder->enc);
-
- /* Set up the DDC bus. */
- if (HAS_PCH_SPLIT(dev))
- i2c_reg = PCH_GPIOA;
- else {
- i2c_reg = GPIOA;
- /* Use VBT information for CRT DDC if available */
- if (dev_priv->crt_ddc_bus != 0)
- i2c_reg = dev_priv->crt_ddc_bus;
- }
- intel_encoder->ddc_bus = intel_i2c_create(dev, i2c_reg, "CRTDDC_A");
- if (!intel_encoder->ddc_bus) {
- dev_printk(KERN_ERR, &dev->pdev->dev, "DDC bus registration "
- "failed.\n");
- return;
- }
+ intel_connector_attach_encoder(intel_connector, &crt->base);
- intel_encoder->type = INTEL_OUTPUT_ANALOG;
- intel_encoder->clone_mask = (1 << INTEL_SDVO_NON_TV_CLONE_BIT) |
- (1 << INTEL_ANALOG_CLONE_BIT) |
- (1 << INTEL_SDVO_LVDS_CLONE_BIT);
- intel_encoder->crtc_mask = (1 << 0) | (1 << 1);
+ crt->base.type = INTEL_OUTPUT_ANALOG;
+ crt->base.clone_mask = (1 << INTEL_SDVO_NON_TV_CLONE_BIT |
+ 1 << INTEL_ANALOG_CLONE_BIT |
+ 1 << INTEL_SDVO_LVDS_CLONE_BIT);
+ crt->base.crtc_mask = (1 << 0) | (1 << 1);
connector->interlace_allowed = 1;
connector->doublescan_allowed = 0;
- drm_encoder_helper_add(&intel_encoder->enc, &intel_crt_helper_funcs);
+ drm_encoder_helper_add(&crt->base.base, &intel_crt_helper_funcs);
drm_connector_helper_add(connector, &intel_crt_connector_helper_funcs);
drm_sysfs_connector_add(connector);
@@ -576,5 +589,22 @@ void intel_crt_init(struct drm_device *dev)
else
connector->polled = DRM_CONNECTOR_POLL_CONNECT;
+ /*
+ * Configure the automatic hotplug detection stuff
+ */
+ crt->force_hotplug_required = 0;
+ if (HAS_PCH_SPLIT(dev)) {
+ u32 adpa;
+
+ adpa = I915_READ(PCH_ADPA);
+ adpa &= ~ADPA_CRT_HOTPLUG_MASK;
+ adpa |= ADPA_HOTPLUG_BITS;
+ I915_WRITE(PCH_ADPA, adpa);
+ POSTING_READ(PCH_ADPA);
+
+ DRM_DEBUG_KMS("pch crt adpa set to 0x%x\n", adpa);
+ crt->force_hotplug_required = 1;
+ }
+
dev_priv->hotplug_supported_mask |= CRT_HOTPLUG_INT_STATUS;
}
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 979228594599..d9b7092439ef 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -43,8 +43,8 @@
bool intel_pipe_has_type (struct drm_crtc *crtc, int type);
static void intel_update_watermarks(struct drm_device *dev);
-static void intel_increase_pllclock(struct drm_crtc *crtc, bool schedule);
-static void intel_crtc_update_cursor(struct drm_crtc *crtc);
+static void intel_increase_pllclock(struct drm_crtc *crtc);
+static void intel_crtc_update_cursor(struct drm_crtc *crtc, bool on);
typedef struct {
/* given values */
@@ -342,6 +342,16 @@ static bool
intel_find_pll_ironlake_dp(const intel_limit_t *, struct drm_crtc *crtc,
int target, int refclk, intel_clock_t *best_clock);
+static inline u32 /* units of 100MHz */
+intel_fdi_link_freq(struct drm_device *dev)
+{
+ if (IS_GEN5(dev)) {
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ return (I915_READ(FDI_PLL_BIOS_0) & FDI_PLL_FB_CLOCK_MASK) + 2;
+ } else
+ return 27;
+}
+
static const intel_limit_t intel_limits_i8xx_dvo = {
.dot = { .min = I8XX_DOT_MIN, .max = I8XX_DOT_MAX },
.vco = { .min = I8XX_VCO_MIN, .max = I8XX_VCO_MAX },
@@ -701,16 +711,16 @@ static const intel_limit_t *intel_limit(struct drm_crtc *crtc)
limit = intel_ironlake_limit(crtc);
else if (IS_G4X(dev)) {
limit = intel_g4x_limit(crtc);
- } else if (IS_I9XX(dev) && !IS_PINEVIEW(dev)) {
- if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
- limit = &intel_limits_i9xx_lvds;
- else
- limit = &intel_limits_i9xx_sdvo;
} else if (IS_PINEVIEW(dev)) {
if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
limit = &intel_limits_pineview_lvds;
else
limit = &intel_limits_pineview_sdvo;
+ } else if (!IS_GEN2(dev)) {
+ if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
+ limit = &intel_limits_i9xx_lvds;
+ else
+ limit = &intel_limits_i9xx_sdvo;
} else {
if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
limit = &intel_limits_i8xx_lvds;
@@ -744,20 +754,17 @@ static void intel_clock(struct drm_device *dev, int refclk, intel_clock_t *clock
/**
* Returns whether any output on the specified pipe is of the specified type
*/
-bool intel_pipe_has_type (struct drm_crtc *crtc, int type)
+bool intel_pipe_has_type(struct drm_crtc *crtc, int type)
{
- struct drm_device *dev = crtc->dev;
- struct drm_mode_config *mode_config = &dev->mode_config;
- struct drm_encoder *l_entry;
+ struct drm_device *dev = crtc->dev;
+ struct drm_mode_config *mode_config = &dev->mode_config;
+ struct intel_encoder *encoder;
+
+ list_for_each_entry(encoder, &mode_config->encoder_list, base.head)
+ if (encoder->base.crtc == crtc && encoder->type == type)
+ return true;
- list_for_each_entry(l_entry, &mode_config->encoder_list, head) {
- if (l_entry && l_entry->crtc == crtc) {
- struct intel_encoder *intel_encoder = enc_to_intel_encoder(l_entry);
- if (intel_encoder->type == type)
- return true;
- }
- }
- return false;
+ return false;
}
#define INTELPllInvalid(s) do { /* DRM_DEBUG(s); */ return false; } while (0)
@@ -928,10 +935,6 @@ intel_find_pll_ironlake_dp(const intel_limit_t *limit, struct drm_crtc *crtc,
struct drm_device *dev = crtc->dev;
intel_clock_t clock;
- /* return directly when it is eDP */
- if (HAS_eDP)
- return true;
-
if (target < 200000) {
clock.n = 1;
clock.p1 = 2;
@@ -955,26 +958,26 @@ static bool
intel_find_pll_g4x_dp(const intel_limit_t *limit, struct drm_crtc *crtc,
int target, int refclk, intel_clock_t *best_clock)
{
- intel_clock_t clock;
- if (target < 200000) {
- clock.p1 = 2;
- clock.p2 = 10;
- clock.n = 2;
- clock.m1 = 23;
- clock.m2 = 8;
- } else {
- clock.p1 = 1;
- clock.p2 = 10;
- clock.n = 1;
- clock.m1 = 14;
- clock.m2 = 2;
- }
- clock.m = 5 * (clock.m1 + 2) + (clock.m2 + 2);
- clock.p = (clock.p1 * clock.p2);
- clock.dot = 96000 * clock.m / (clock.n + 2) / clock.p;
- clock.vco = 0;
- memcpy(best_clock, &clock, sizeof(intel_clock_t));
- return true;
+ intel_clock_t clock;
+ if (target < 200000) {
+ clock.p1 = 2;
+ clock.p2 = 10;
+ clock.n = 2;
+ clock.m1 = 23;
+ clock.m2 = 8;
+ } else {
+ clock.p1 = 1;
+ clock.p2 = 10;
+ clock.n = 1;
+ clock.m1 = 14;
+ clock.m2 = 2;
+ }
+ clock.m = 5 * (clock.m1 + 2) + (clock.m2 + 2);
+ clock.p = (clock.p1 * clock.p2);
+ clock.dot = 96000 * clock.m / (clock.n + 2) / clock.p;
+ clock.vco = 0;
+ memcpy(best_clock, &clock, sizeof(intel_clock_t));
+ return true;
}
/**
@@ -1007,9 +1010,9 @@ void intel_wait_for_vblank(struct drm_device *dev, int pipe)
I915_READ(pipestat_reg) | PIPE_VBLANK_INTERRUPT_STATUS);
/* Wait for vblank interrupt bit to set */
- if (wait_for((I915_READ(pipestat_reg) &
- PIPE_VBLANK_INTERRUPT_STATUS),
- 50, 0))
+ if (wait_for(I915_READ(pipestat_reg) &
+ PIPE_VBLANK_INTERRUPT_STATUS,
+ 50))
DRM_DEBUG_KMS("vblank wait timed out\n");
}
@@ -1028,36 +1031,35 @@ void intel_wait_for_vblank(struct drm_device *dev, int pipe)
* Otherwise:
* wait for the display line value to settle (it usually
* ends up stopping at the start of the next frame).
- *
+ *
*/
-static void intel_wait_for_pipe_off(struct drm_device *dev, int pipe)
+void intel_wait_for_pipe_off(struct drm_device *dev, int pipe)
{
struct drm_i915_private *dev_priv = dev->dev_private;
if (INTEL_INFO(dev)->gen >= 4) {
- int pipeconf_reg = (pipe == 0 ? PIPEACONF : PIPEBCONF);
+ int reg = PIPECONF(pipe);
/* Wait for the Pipe State to go off */
- if (wait_for((I915_READ(pipeconf_reg) & I965_PIPECONF_ACTIVE) == 0,
- 100, 0))
+ if (wait_for((I915_READ(reg) & I965_PIPECONF_ACTIVE) == 0,
+ 100))
DRM_DEBUG_KMS("pipe_off wait timed out\n");
} else {
u32 last_line;
- int pipedsl_reg = (pipe == 0 ? PIPEADSL : PIPEBDSL);
+ int reg = PIPEDSL(pipe);
unsigned long timeout = jiffies + msecs_to_jiffies(100);
/* Wait for the display line to settle */
do {
- last_line = I915_READ(pipedsl_reg) & DSL_LINEMASK;
+ last_line = I915_READ(reg) & DSL_LINEMASK;
mdelay(5);
- } while (((I915_READ(pipedsl_reg) & DSL_LINEMASK) != last_line) &&
+ } while (((I915_READ(reg) & DSL_LINEMASK) != last_line) &&
time_after(timeout, jiffies));
if (time_after(jiffies, timeout))
DRM_DEBUG_KMS("pipe_off wait timed out\n");
}
}
-/* Parameters have changed, update FBC info */
static void i8xx_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
{
struct drm_device *dev = crtc->dev;
@@ -1069,6 +1071,14 @@ static void i8xx_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
int plane, i;
u32 fbc_ctl, fbc_ctl2;
+ if (fb->pitch == dev_priv->cfb_pitch &&
+ obj_priv->fence_reg == dev_priv->cfb_fence &&
+ intel_crtc->plane == dev_priv->cfb_plane &&
+ I915_READ(FBC_CONTROL) & FBC_CTL_EN)
+ return;
+
+ i8xx_disable_fbc(dev);
+
dev_priv->cfb_pitch = dev_priv->cfb_size / FBC_LL_SIZE;
if (fb->pitch < dev_priv->cfb_pitch)
@@ -1102,7 +1112,7 @@ static void i8xx_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
I915_WRITE(FBC_CONTROL, fbc_ctl);
DRM_DEBUG_KMS("enabled FBC, pitch %ld, yoff %d, plane %d, ",
- dev_priv->cfb_pitch, crtc->y, dev_priv->cfb_plane);
+ dev_priv->cfb_pitch, crtc->y, dev_priv->cfb_plane);
}
void i8xx_disable_fbc(struct drm_device *dev)
@@ -1110,19 +1120,16 @@ void i8xx_disable_fbc(struct drm_device *dev)
struct drm_i915_private *dev_priv = dev->dev_private;
u32 fbc_ctl;
- if (!I915_HAS_FBC(dev))
- return;
-
- if (!(I915_READ(FBC_CONTROL) & FBC_CTL_EN))
- return; /* Already off, just return */
-
/* Disable compression */
fbc_ctl = I915_READ(FBC_CONTROL);
+ if ((fbc_ctl & FBC_CTL_EN) == 0)
+ return;
+
fbc_ctl &= ~FBC_CTL_EN;
I915_WRITE(FBC_CONTROL, fbc_ctl);
/* Wait for compressing bit to clear */
- if (wait_for((I915_READ(FBC_STATUS) & FBC_STAT_COMPRESSING) == 0, 10, 0)) {
+ if (wait_for((I915_READ(FBC_STATUS) & FBC_STAT_COMPRESSING) == 0, 10)) {
DRM_DEBUG_KMS("FBC idle timed out\n");
return;
}
@@ -1145,14 +1152,27 @@ static void g4x_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
struct drm_i915_gem_object *obj_priv = to_intel_bo(intel_fb->obj);
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- int plane = (intel_crtc->plane == 0 ? DPFC_CTL_PLANEA :
- DPFC_CTL_PLANEB);
+ int plane = intel_crtc->plane == 0 ? DPFC_CTL_PLANEA : DPFC_CTL_PLANEB;
unsigned long stall_watermark = 200;
u32 dpfc_ctl;
+ dpfc_ctl = I915_READ(DPFC_CONTROL);
+ if (dpfc_ctl & DPFC_CTL_EN) {
+ if (dev_priv->cfb_pitch == dev_priv->cfb_pitch / 64 - 1 &&
+ dev_priv->cfb_fence == obj_priv->fence_reg &&
+ dev_priv->cfb_plane == intel_crtc->plane &&
+ dev_priv->cfb_y == crtc->y)
+ return;
+
+ I915_WRITE(DPFC_CONTROL, dpfc_ctl & ~DPFC_CTL_EN);
+ POSTING_READ(DPFC_CONTROL);
+ intel_wait_for_vblank(dev, intel_crtc->pipe);
+ }
+
dev_priv->cfb_pitch = (dev_priv->cfb_pitch / 64) - 1;
dev_priv->cfb_fence = obj_priv->fence_reg;
dev_priv->cfb_plane = intel_crtc->plane;
+ dev_priv->cfb_y = crtc->y;
dpfc_ctl = plane | DPFC_SR_EN | DPFC_CTL_LIMIT_1X;
if (obj_priv->tiling_mode != I915_TILING_NONE) {
@@ -1162,7 +1182,6 @@ static void g4x_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
I915_WRITE(DPFC_CHICKEN, ~DPFC_HT_MODIFY);
}
- I915_WRITE(DPFC_CONTROL, dpfc_ctl);
I915_WRITE(DPFC_RECOMP_CTL, DPFC_RECOMP_STALL_EN |
(stall_watermark << DPFC_RECOMP_STALL_WM_SHIFT) |
(interval << DPFC_RECOMP_TIMER_COUNT_SHIFT));
@@ -1181,10 +1200,12 @@ void g4x_disable_fbc(struct drm_device *dev)
/* Disable compression */
dpfc_ctl = I915_READ(DPFC_CONTROL);
- dpfc_ctl &= ~DPFC_CTL_EN;
- I915_WRITE(DPFC_CONTROL, dpfc_ctl);
+ if (dpfc_ctl & DPFC_CTL_EN) {
+ dpfc_ctl &= ~DPFC_CTL_EN;
+ I915_WRITE(DPFC_CONTROL, dpfc_ctl);
- DRM_DEBUG_KMS("disabled FBC\n");
+ DRM_DEBUG_KMS("disabled FBC\n");
+ }
}
static bool g4x_fbc_enabled(struct drm_device *dev)
@@ -1202,16 +1223,30 @@ static void ironlake_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
struct drm_i915_gem_object *obj_priv = to_intel_bo(intel_fb->obj);
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- int plane = (intel_crtc->plane == 0) ? DPFC_CTL_PLANEA :
- DPFC_CTL_PLANEB;
+ int plane = intel_crtc->plane == 0 ? DPFC_CTL_PLANEA : DPFC_CTL_PLANEB;
unsigned long stall_watermark = 200;
u32 dpfc_ctl;
+ dpfc_ctl = I915_READ(ILK_DPFC_CONTROL);
+ if (dpfc_ctl & DPFC_CTL_EN) {
+ if (dev_priv->cfb_pitch == dev_priv->cfb_pitch / 64 - 1 &&
+ dev_priv->cfb_fence == obj_priv->fence_reg &&
+ dev_priv->cfb_plane == intel_crtc->plane &&
+ dev_priv->cfb_offset == obj_priv->gtt_offset &&
+ dev_priv->cfb_y == crtc->y)
+ return;
+
+ I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl & ~DPFC_CTL_EN);
+ POSTING_READ(ILK_DPFC_CONTROL);
+ intel_wait_for_vblank(dev, intel_crtc->pipe);
+ }
+
dev_priv->cfb_pitch = (dev_priv->cfb_pitch / 64) - 1;
dev_priv->cfb_fence = obj_priv->fence_reg;
dev_priv->cfb_plane = intel_crtc->plane;
+ dev_priv->cfb_offset = obj_priv->gtt_offset;
+ dev_priv->cfb_y = crtc->y;
- dpfc_ctl = I915_READ(ILK_DPFC_CONTROL);
dpfc_ctl &= DPFC_RESERVED;
dpfc_ctl |= (plane | DPFC_CTL_LIMIT_1X);
if (obj_priv->tiling_mode != I915_TILING_NONE) {
@@ -1221,15 +1256,13 @@ static void ironlake_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
I915_WRITE(ILK_DPFC_CHICKEN, ~DPFC_HT_MODIFY);
}
- I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl);
I915_WRITE(ILK_DPFC_RECOMP_CTL, DPFC_RECOMP_STALL_EN |
(stall_watermark << DPFC_RECOMP_STALL_WM_SHIFT) |
(interval << DPFC_RECOMP_TIMER_COUNT_SHIFT));
I915_WRITE(ILK_DPFC_FENCE_YOFF, crtc->y);
I915_WRITE(ILK_FBC_RT_BASE, obj_priv->gtt_offset | ILK_FBC_RT_VALID);
/* enable it... */
- I915_WRITE(ILK_DPFC_CONTROL, I915_READ(ILK_DPFC_CONTROL) |
- DPFC_CTL_EN);
+ I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN);
DRM_DEBUG_KMS("enabled fbc on plane %d\n", intel_crtc->plane);
}
@@ -1241,10 +1274,12 @@ void ironlake_disable_fbc(struct drm_device *dev)
/* Disable compression */
dpfc_ctl = I915_READ(ILK_DPFC_CONTROL);
- dpfc_ctl &= ~DPFC_CTL_EN;
- I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl);
+ if (dpfc_ctl & DPFC_CTL_EN) {
+ dpfc_ctl &= ~DPFC_CTL_EN;
+ I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl);
- DRM_DEBUG_KMS("disabled FBC\n");
+ DRM_DEBUG_KMS("disabled FBC\n");
+ }
}
static bool ironlake_fbc_enabled(struct drm_device *dev)
@@ -1286,8 +1321,7 @@ void intel_disable_fbc(struct drm_device *dev)
/**
* intel_update_fbc - enable/disable FBC as needed
- * @crtc: CRTC to point the compressor at
- * @mode: mode in use
+ * @dev: the drm_device
*
* Set up the framebuffer compression hardware at mode set time. We
* enable it if possible:
@@ -1304,18 +1338,14 @@ void intel_disable_fbc(struct drm_device *dev)
*
* We need to enable/disable FBC on a global basis.
*/
-static void intel_update_fbc(struct drm_crtc *crtc,
- struct drm_display_mode *mode)
+static void intel_update_fbc(struct drm_device *dev)
{
- struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- struct drm_framebuffer *fb = crtc->fb;
+ struct drm_crtc *crtc = NULL, *tmp_crtc;
+ struct intel_crtc *intel_crtc;
+ struct drm_framebuffer *fb;
struct intel_framebuffer *intel_fb;
struct drm_i915_gem_object *obj_priv;
- struct drm_crtc *tmp_crtc;
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- int plane = intel_crtc->plane;
- int crtcs_enabled = 0;
DRM_DEBUG_KMS("\n");
@@ -1325,12 +1355,6 @@ static void intel_update_fbc(struct drm_crtc *crtc,
if (!I915_HAS_FBC(dev))
return;
- if (!crtc->fb)
- return;
-
- intel_fb = to_intel_framebuffer(fb);
- obj_priv = to_intel_bo(intel_fb->obj);
-
/*
* If FBC is already on, we just have to verify that we can
* keep it that way...
@@ -1341,35 +1365,47 @@ static void intel_update_fbc(struct drm_crtc *crtc,
* - going to an unsupported config (interlace, pixel multiply, etc.)
*/
list_for_each_entry(tmp_crtc, &dev->mode_config.crtc_list, head) {
- if (tmp_crtc->enabled)
- crtcs_enabled++;
+ if (tmp_crtc->enabled) {
+ if (crtc) {
+ DRM_DEBUG_KMS("more than one pipe active, disabling compression\n");
+ dev_priv->no_fbc_reason = FBC_MULTIPLE_PIPES;
+ goto out_disable;
+ }
+ crtc = tmp_crtc;
+ }
}
- DRM_DEBUG_KMS("%d pipes active\n", crtcs_enabled);
- if (crtcs_enabled > 1) {
- DRM_DEBUG_KMS("more than one pipe active, disabling compression\n");
- dev_priv->no_fbc_reason = FBC_MULTIPLE_PIPES;
+
+ if (!crtc || crtc->fb == NULL) {
+ DRM_DEBUG_KMS("no output, disabling\n");
+ dev_priv->no_fbc_reason = FBC_NO_OUTPUT;
goto out_disable;
}
+
+ intel_crtc = to_intel_crtc(crtc);
+ fb = crtc->fb;
+ intel_fb = to_intel_framebuffer(fb);
+ obj_priv = to_intel_bo(intel_fb->obj);
+
if (intel_fb->obj->size > dev_priv->cfb_size) {
DRM_DEBUG_KMS("framebuffer too large, disabling "
- "compression\n");
+ "compression\n");
dev_priv->no_fbc_reason = FBC_STOLEN_TOO_SMALL;
goto out_disable;
}
- if ((mode->flags & DRM_MODE_FLAG_INTERLACE) ||
- (mode->flags & DRM_MODE_FLAG_DBLSCAN)) {
+ if ((crtc->mode.flags & DRM_MODE_FLAG_INTERLACE) ||
+ (crtc->mode.flags & DRM_MODE_FLAG_DBLSCAN)) {
DRM_DEBUG_KMS("mode incompatible with compression, "
- "disabling\n");
+ "disabling\n");
dev_priv->no_fbc_reason = FBC_UNSUPPORTED_MODE;
goto out_disable;
}
- if ((mode->hdisplay > 2048) ||
- (mode->vdisplay > 1536)) {
+ if ((crtc->mode.hdisplay > 2048) ||
+ (crtc->mode.vdisplay > 1536)) {
DRM_DEBUG_KMS("mode too large for compression, disabling\n");
dev_priv->no_fbc_reason = FBC_MODE_TOO_LARGE;
goto out_disable;
}
- if ((IS_I915GM(dev) || IS_I945GM(dev)) && plane != 0) {
+ if ((IS_I915GM(dev) || IS_I945GM(dev)) && intel_crtc->plane != 0) {
DRM_DEBUG_KMS("plane not 0, disabling compression\n");
dev_priv->no_fbc_reason = FBC_BAD_PLANE;
goto out_disable;
@@ -1384,18 +1420,7 @@ static void intel_update_fbc(struct drm_crtc *crtc,
if (in_dbg_master())
goto out_disable;
- if (intel_fbc_enabled(dev)) {
- /* We can re-enable it in this case, but need to update pitch */
- if ((fb->pitch > dev_priv->cfb_pitch) ||
- (obj_priv->fence_reg != dev_priv->cfb_fence) ||
- (plane != dev_priv->cfb_plane))
- intel_disable_fbc(dev);
- }
-
- /* Now try to turn it back on if possible */
- if (!intel_fbc_enabled(dev))
- intel_enable_fbc(crtc, 500);
-
+ intel_enable_fbc(crtc, 500);
return;
out_disable:
@@ -1407,7 +1432,9 @@ out_disable:
}
int
-intel_pin_and_fence_fb_obj(struct drm_device *dev, struct drm_gem_object *obj)
+intel_pin_and_fence_fb_obj(struct drm_device *dev,
+ struct drm_gem_object *obj,
+ bool pipelined)
{
struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
u32 alignment;
@@ -1417,7 +1444,7 @@ intel_pin_and_fence_fb_obj(struct drm_device *dev, struct drm_gem_object *obj)
case I915_TILING_NONE:
if (IS_BROADWATER(dev) || IS_CRESTLINE(dev))
alignment = 128 * 1024;
- else if (IS_I965G(dev))
+ else if (INTEL_INFO(dev)->gen >= 4)
alignment = 4 * 1024;
else
alignment = 64 * 1024;
@@ -1435,9 +1462,13 @@ intel_pin_and_fence_fb_obj(struct drm_device *dev, struct drm_gem_object *obj)
}
ret = i915_gem_object_pin(obj, alignment);
- if (ret != 0)
+ if (ret)
return ret;
+ ret = i915_gem_object_set_to_display_plane(obj, pipelined);
+ if (ret)
+ goto err_unpin;
+
/* Install a fence for tiled scan-out. Pre-i965 always needs a
* fence, whereas 965+ only requires a fence if using
* framebuffer compression. For simplicity, we always install
@@ -1445,20 +1476,22 @@ intel_pin_and_fence_fb_obj(struct drm_device *dev, struct drm_gem_object *obj)
*/
if (obj_priv->fence_reg == I915_FENCE_REG_NONE &&
obj_priv->tiling_mode != I915_TILING_NONE) {
- ret = i915_gem_object_get_fence_reg(obj);
- if (ret != 0) {
- i915_gem_object_unpin(obj);
- return ret;
- }
+ ret = i915_gem_object_get_fence_reg(obj, false);
+ if (ret)
+ goto err_unpin;
}
return 0;
+
+err_unpin:
+ i915_gem_object_unpin(obj);
+ return ret;
}
/* Assume fb object is pinned & idle & fenced and just update base pointers */
static int
intel_pipe_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb,
- int x, int y)
+ int x, int y, enum mode_set_atomic state)
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -1468,12 +1501,8 @@ intel_pipe_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb,
struct drm_gem_object *obj;
int plane = intel_crtc->plane;
unsigned long Start, Offset;
- int dspbase = (plane == 0 ? DSPAADDR : DSPBADDR);
- int dspsurf = (plane == 0 ? DSPASURF : DSPBSURF);
- int dspstride = (plane == 0) ? DSPASTRIDE : DSPBSTRIDE;
- int dsptileoff = (plane == 0 ? DSPATILEOFF : DSPBTILEOFF);
- int dspcntr_reg = (plane == 0) ? DSPACNTR : DSPBCNTR;
u32 dspcntr;
+ u32 reg;
switch (plane) {
case 0:
@@ -1488,7 +1517,8 @@ intel_pipe_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb,
obj = intel_fb->obj;
obj_priv = to_intel_bo(obj);
- dspcntr = I915_READ(dspcntr_reg);
+ reg = DSPCNTR(plane);
+ dspcntr = I915_READ(reg);
/* Mask out pixel format bits in case we change it */
dspcntr &= ~DISPPLANE_PIXFORMAT_MASK;
switch (fb->bits_per_pixel) {
@@ -1509,7 +1539,7 @@ intel_pipe_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb,
DRM_ERROR("Unknown color depth\n");
return -EINVAL;
}
- if (IS_I965G(dev)) {
+ if (INTEL_INFO(dev)->gen >= 4) {
if (obj_priv->tiling_mode != I915_TILING_NONE)
dspcntr |= DISPPLANE_TILED;
else
@@ -1520,28 +1550,24 @@ intel_pipe_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb,
/* must disable */
dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE;
- I915_WRITE(dspcntr_reg, dspcntr);
+ I915_WRITE(reg, dspcntr);
Start = obj_priv->gtt_offset;
Offset = y * fb->pitch + x * (fb->bits_per_pixel / 8);
DRM_DEBUG_KMS("Writing base %08lX %08lX %d %d %d\n",
Start, Offset, x, y, fb->pitch);
- I915_WRITE(dspstride, fb->pitch);
- if (IS_I965G(dev)) {
- I915_WRITE(dspsurf, Start);
- I915_WRITE(dsptileoff, (y << 16) | x);
- I915_WRITE(dspbase, Offset);
- } else {
- I915_WRITE(dspbase, Start + Offset);
- }
- POSTING_READ(dspbase);
-
- if (IS_I965G(dev) || plane == 0)
- intel_update_fbc(crtc, &crtc->mode);
+ I915_WRITE(DSPSTRIDE(plane), fb->pitch);
+ if (INTEL_INFO(dev)->gen >= 4) {
+ I915_WRITE(DSPSURF(plane), Start);
+ I915_WRITE(DSPTILEOFF(plane), (y << 16) | x);
+ I915_WRITE(DSPADDR(plane), Offset);
+ } else
+ I915_WRITE(DSPADDR(plane), Start + Offset);
+ POSTING_READ(reg);
- intel_wait_for_vblank(dev, intel_crtc->pipe);
- intel_increase_pllclock(crtc, true);
+ intel_update_fbc(dev);
+ intel_increase_pllclock(crtc);
return 0;
}
@@ -1553,11 +1579,6 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
struct drm_device *dev = crtc->dev;
struct drm_i915_master_private *master_priv;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- struct intel_framebuffer *intel_fb;
- struct drm_i915_gem_object *obj_priv;
- struct drm_gem_object *obj;
- int pipe = intel_crtc->pipe;
- int plane = intel_crtc->plane;
int ret;
/* no fb bound */
@@ -1566,45 +1587,54 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
return 0;
}
- switch (plane) {
+ switch (intel_crtc->plane) {
case 0:
case 1:
break;
default:
- DRM_ERROR("Can't update plane %d in SAREA\n", plane);
return -EINVAL;
}
- intel_fb = to_intel_framebuffer(crtc->fb);
- obj = intel_fb->obj;
- obj_priv = to_intel_bo(obj);
-
mutex_lock(&dev->struct_mutex);
- ret = intel_pin_and_fence_fb_obj(dev, obj);
+ ret = intel_pin_and_fence_fb_obj(dev,
+ to_intel_framebuffer(crtc->fb)->obj,
+ false);
if (ret != 0) {
mutex_unlock(&dev->struct_mutex);
return ret;
}
- ret = i915_gem_object_set_to_display_plane(obj);
- if (ret != 0) {
- i915_gem_object_unpin(obj);
- mutex_unlock(&dev->struct_mutex);
- return ret;
+ if (old_fb) {
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_gem_object *obj = to_intel_framebuffer(old_fb)->obj;
+ struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
+
+ wait_event(dev_priv->pending_flip_queue,
+ atomic_read(&obj_priv->pending_flip) == 0);
+
+ /* Big Hammer, we also need to ensure that any pending
+ * MI_WAIT_FOR_EVENT inside a user batch buffer on the
+ * current scanout is retired before unpinning the old
+ * framebuffer.
+ */
+ ret = i915_gem_object_flush_gpu(obj_priv, false);
+ if (ret) {
+ i915_gem_object_unpin(to_intel_framebuffer(crtc->fb)->obj);
+ mutex_unlock(&dev->struct_mutex);
+ return ret;
+ }
}
- ret = intel_pipe_set_base_atomic(crtc, crtc->fb, x, y);
+ ret = intel_pipe_set_base_atomic(crtc, crtc->fb, x, y,
+ LEAVE_ATOMIC_MODE_SET);
if (ret) {
- i915_gem_object_unpin(obj);
+ i915_gem_object_unpin(to_intel_framebuffer(crtc->fb)->obj);
mutex_unlock(&dev->struct_mutex);
return ret;
}
- if (old_fb) {
- intel_fb = to_intel_framebuffer(old_fb);
- obj_priv = to_intel_bo(intel_fb->obj);
- i915_gem_object_unpin(intel_fb->obj);
- }
+ if (old_fb)
+ i915_gem_object_unpin(to_intel_framebuffer(old_fb)->obj);
mutex_unlock(&dev->struct_mutex);
@@ -1615,7 +1645,7 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
if (!master_priv->sarea_priv)
return 0;
- if (pipe) {
+ if (intel_crtc->pipe) {
master_priv->sarea_priv->pipeB_x = x;
master_priv->sarea_priv->pipeB_y = y;
} else {
@@ -1626,7 +1656,7 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
return 0;
}
-static void ironlake_set_pll_edp (struct drm_crtc *crtc, int clock)
+static void ironlake_set_pll_edp(struct drm_crtc *crtc, int clock)
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -1659,9 +1689,41 @@ static void ironlake_set_pll_edp (struct drm_crtc *crtc, int clock)
}
I915_WRITE(DP_A, dpa_ctl);
+ POSTING_READ(DP_A);
udelay(500);
}
+static void intel_fdi_normal_train(struct drm_crtc *crtc)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ int pipe = intel_crtc->pipe;
+ u32 reg, temp;
+
+ /* enable normal train */
+ reg = FDI_TX_CTL(pipe);
+ temp = I915_READ(reg);
+ temp &= ~FDI_LINK_TRAIN_NONE;
+ temp |= FDI_LINK_TRAIN_NONE | FDI_TX_ENHANCE_FRAME_ENABLE;
+ I915_WRITE(reg, temp);
+
+ reg = FDI_RX_CTL(pipe);
+ temp = I915_READ(reg);
+ if (HAS_PCH_CPT(dev)) {
+ temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
+ temp |= FDI_LINK_TRAIN_NORMAL_CPT;
+ } else {
+ temp &= ~FDI_LINK_TRAIN_NONE;
+ temp |= FDI_LINK_TRAIN_NONE;
+ }
+ I915_WRITE(reg, temp | FDI_RX_ENHANCE_FRAME_ENABLE);
+
+ /* wait one idle pattern time */
+ POSTING_READ(reg);
+ udelay(1000);
+}
+
/* The FDI link training functions for ILK/Ibexpeak. */
static void ironlake_fdi_link_train(struct drm_crtc *crtc)
{
@@ -1669,84 +1731,88 @@ static void ironlake_fdi_link_train(struct drm_crtc *crtc)
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int pipe = intel_crtc->pipe;
- int fdi_tx_reg = (pipe == 0) ? FDI_TXA_CTL : FDI_TXB_CTL;
- int fdi_rx_reg = (pipe == 0) ? FDI_RXA_CTL : FDI_RXB_CTL;
- int fdi_rx_iir_reg = (pipe == 0) ? FDI_RXA_IIR : FDI_RXB_IIR;
- int fdi_rx_imr_reg = (pipe == 0) ? FDI_RXA_IMR : FDI_RXB_IMR;
- u32 temp, tries = 0;
+ u32 reg, temp, tries;
/* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
for train result */
- temp = I915_READ(fdi_rx_imr_reg);
+ reg = FDI_RX_IMR(pipe);
+ temp = I915_READ(reg);
temp &= ~FDI_RX_SYMBOL_LOCK;
temp &= ~FDI_RX_BIT_LOCK;
- I915_WRITE(fdi_rx_imr_reg, temp);
- I915_READ(fdi_rx_imr_reg);
+ I915_WRITE(reg, temp);
+ I915_READ(reg);
udelay(150);
/* enable CPU FDI TX and PCH FDI RX */
- temp = I915_READ(fdi_tx_reg);
- temp |= FDI_TX_ENABLE;
+ reg = FDI_TX_CTL(pipe);
+ temp = I915_READ(reg);
temp &= ~(7 << 19);
temp |= (intel_crtc->fdi_lanes - 1) << 19;
temp &= ~FDI_LINK_TRAIN_NONE;
temp |= FDI_LINK_TRAIN_PATTERN_1;
- I915_WRITE(fdi_tx_reg, temp);
- I915_READ(fdi_tx_reg);
+ I915_WRITE(reg, temp | FDI_TX_ENABLE);
- temp = I915_READ(fdi_rx_reg);
+ reg = FDI_RX_CTL(pipe);
+ temp = I915_READ(reg);
temp &= ~FDI_LINK_TRAIN_NONE;
temp |= FDI_LINK_TRAIN_PATTERN_1;
- I915_WRITE(fdi_rx_reg, temp | FDI_RX_ENABLE);
- I915_READ(fdi_rx_reg);
+ I915_WRITE(reg, temp | FDI_RX_ENABLE);
+
+ POSTING_READ(reg);
udelay(150);
+ /* Ironlake workaround, enable clock pointer after FDI enable*/
+ I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_ENABLE);
+
+ reg = FDI_RX_IIR(pipe);
for (tries = 0; tries < 5; tries++) {
- temp = I915_READ(fdi_rx_iir_reg);
+ temp = I915_READ(reg);
DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
if ((temp & FDI_RX_BIT_LOCK)) {
DRM_DEBUG_KMS("FDI train 1 done.\n");
- I915_WRITE(fdi_rx_iir_reg,
- temp | FDI_RX_BIT_LOCK);
+ I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
break;
}
}
if (tries == 5)
- DRM_DEBUG_KMS("FDI train 1 fail!\n");
+ DRM_ERROR("FDI train 1 fail!\n");
/* Train 2 */
- temp = I915_READ(fdi_tx_reg);
+ reg = FDI_TX_CTL(pipe);
+ temp = I915_READ(reg);
temp &= ~FDI_LINK_TRAIN_NONE;
temp |= FDI_LINK_TRAIN_PATTERN_2;
- I915_WRITE(fdi_tx_reg, temp);
+ I915_WRITE(reg, temp);
- temp = I915_READ(fdi_rx_reg);
+ reg = FDI_RX_CTL(pipe);
+ temp = I915_READ(reg);
temp &= ~FDI_LINK_TRAIN_NONE;
temp |= FDI_LINK_TRAIN_PATTERN_2;
- I915_WRITE(fdi_rx_reg, temp);
- udelay(150);
+ I915_WRITE(reg, temp);
- tries = 0;
+ POSTING_READ(reg);
+ udelay(150);
+ reg = FDI_RX_IIR(pipe);
for (tries = 0; tries < 5; tries++) {
- temp = I915_READ(fdi_rx_iir_reg);
+ temp = I915_READ(reg);
DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
if (temp & FDI_RX_SYMBOL_LOCK) {
- I915_WRITE(fdi_rx_iir_reg,
- temp | FDI_RX_SYMBOL_LOCK);
+ I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
DRM_DEBUG_KMS("FDI train 2 done.\n");
break;
}
}
if (tries == 5)
- DRM_DEBUG_KMS("FDI train 2 fail!\n");
+ DRM_ERROR("FDI train 2 fail!\n");
DRM_DEBUG_KMS("FDI train done\n");
+
}
-static int snb_b_fdi_train_param [] = {
+static const int const snb_b_fdi_train_param [] = {
FDI_LINK_TRAIN_400MV_0DB_SNB_B,
FDI_LINK_TRAIN_400MV_6DB_SNB_B,
FDI_LINK_TRAIN_600MV_3_5DB_SNB_B,
@@ -1760,24 +1826,22 @@ static void gen6_fdi_link_train(struct drm_crtc *crtc)
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int pipe = intel_crtc->pipe;
- int fdi_tx_reg = (pipe == 0) ? FDI_TXA_CTL : FDI_TXB_CTL;
- int fdi_rx_reg = (pipe == 0) ? FDI_RXA_CTL : FDI_RXB_CTL;
- int fdi_rx_iir_reg = (pipe == 0) ? FDI_RXA_IIR : FDI_RXB_IIR;
- int fdi_rx_imr_reg = (pipe == 0) ? FDI_RXA_IMR : FDI_RXB_IMR;
- u32 temp, i;
+ u32 reg, temp, i;
/* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
for train result */
- temp = I915_READ(fdi_rx_imr_reg);
+ reg = FDI_RX_IMR(pipe);
+ temp = I915_READ(reg);
temp &= ~FDI_RX_SYMBOL_LOCK;
temp &= ~FDI_RX_BIT_LOCK;
- I915_WRITE(fdi_rx_imr_reg, temp);
- I915_READ(fdi_rx_imr_reg);
+ I915_WRITE(reg, temp);
+
+ POSTING_READ(reg);
udelay(150);
/* enable CPU FDI TX and PCH FDI RX */
- temp = I915_READ(fdi_tx_reg);
- temp |= FDI_TX_ENABLE;
+ reg = FDI_TX_CTL(pipe);
+ temp = I915_READ(reg);
temp &= ~(7 << 19);
temp |= (intel_crtc->fdi_lanes - 1) << 19;
temp &= ~FDI_LINK_TRAIN_NONE;
@@ -1785,10 +1849,10 @@ static void gen6_fdi_link_train(struct drm_crtc *crtc)
temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
/* SNB-B */
temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
- I915_WRITE(fdi_tx_reg, temp);
- I915_READ(fdi_tx_reg);
+ I915_WRITE(reg, temp | FDI_TX_ENABLE);
- temp = I915_READ(fdi_rx_reg);
+ reg = FDI_RX_CTL(pipe);
+ temp = I915_READ(reg);
if (HAS_PCH_CPT(dev)) {
temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
@@ -1796,32 +1860,37 @@ static void gen6_fdi_link_train(struct drm_crtc *crtc)
temp &= ~FDI_LINK_TRAIN_NONE;
temp |= FDI_LINK_TRAIN_PATTERN_1;
}
- I915_WRITE(fdi_rx_reg, temp | FDI_RX_ENABLE);
- I915_READ(fdi_rx_reg);
+ I915_WRITE(reg, temp | FDI_RX_ENABLE);
+
+ POSTING_READ(reg);
udelay(150);
for (i = 0; i < 4; i++ ) {
- temp = I915_READ(fdi_tx_reg);
+ reg = FDI_TX_CTL(pipe);
+ temp = I915_READ(reg);
temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
temp |= snb_b_fdi_train_param[i];
- I915_WRITE(fdi_tx_reg, temp);
+ I915_WRITE(reg, temp);
+
+ POSTING_READ(reg);
udelay(500);
- temp = I915_READ(fdi_rx_iir_reg);
+ reg = FDI_RX_IIR(pipe);
+ temp = I915_READ(reg);
DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
if (temp & FDI_RX_BIT_LOCK) {
- I915_WRITE(fdi_rx_iir_reg,
- temp | FDI_RX_BIT_LOCK);
+ I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
DRM_DEBUG_KMS("FDI train 1 done.\n");
break;
}
}
if (i == 4)
- DRM_DEBUG_KMS("FDI train 1 fail!\n");
+ DRM_ERROR("FDI train 1 fail!\n");
/* Train 2 */
- temp = I915_READ(fdi_tx_reg);
+ reg = FDI_TX_CTL(pipe);
+ temp = I915_READ(reg);
temp &= ~FDI_LINK_TRAIN_NONE;
temp |= FDI_LINK_TRAIN_PATTERN_2;
if (IS_GEN6(dev)) {
@@ -1829,9 +1898,10 @@ static void gen6_fdi_link_train(struct drm_crtc *crtc)
/* SNB-B */
temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
}
- I915_WRITE(fdi_tx_reg, temp);
+ I915_WRITE(reg, temp);
- temp = I915_READ(fdi_rx_reg);
+ reg = FDI_RX_CTL(pipe);
+ temp = I915_READ(reg);
if (HAS_PCH_CPT(dev)) {
temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
temp |= FDI_LINK_TRAIN_PATTERN_2_CPT;
@@ -1839,535 +1909,598 @@ static void gen6_fdi_link_train(struct drm_crtc *crtc)
temp &= ~FDI_LINK_TRAIN_NONE;
temp |= FDI_LINK_TRAIN_PATTERN_2;
}
- I915_WRITE(fdi_rx_reg, temp);
+ I915_WRITE(reg, temp);
+
+ POSTING_READ(reg);
udelay(150);
for (i = 0; i < 4; i++ ) {
- temp = I915_READ(fdi_tx_reg);
+ reg = FDI_TX_CTL(pipe);
+ temp = I915_READ(reg);
temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
temp |= snb_b_fdi_train_param[i];
- I915_WRITE(fdi_tx_reg, temp);
+ I915_WRITE(reg, temp);
+
+ POSTING_READ(reg);
udelay(500);
- temp = I915_READ(fdi_rx_iir_reg);
+ reg = FDI_RX_IIR(pipe);
+ temp = I915_READ(reg);
DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
if (temp & FDI_RX_SYMBOL_LOCK) {
- I915_WRITE(fdi_rx_iir_reg,
- temp | FDI_RX_SYMBOL_LOCK);
+ I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
DRM_DEBUG_KMS("FDI train 2 done.\n");
break;
}
}
if (i == 4)
- DRM_DEBUG_KMS("FDI train 2 fail!\n");
+ DRM_ERROR("FDI train 2 fail!\n");
DRM_DEBUG_KMS("FDI train done.\n");
}
-static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode)
+static void ironlake_fdi_enable(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int pipe = intel_crtc->pipe;
- int plane = intel_crtc->plane;
- int pch_dpll_reg = (pipe == 0) ? PCH_DPLL_A : PCH_DPLL_B;
- int pipeconf_reg = (pipe == 0) ? PIPEACONF : PIPEBCONF;
- int dspcntr_reg = (plane == 0) ? DSPACNTR : DSPBCNTR;
- int dspbase_reg = (plane == 0) ? DSPAADDR : DSPBADDR;
- int fdi_tx_reg = (pipe == 0) ? FDI_TXA_CTL : FDI_TXB_CTL;
- int fdi_rx_reg = (pipe == 0) ? FDI_RXA_CTL : FDI_RXB_CTL;
- int transconf_reg = (pipe == 0) ? TRANSACONF : TRANSBCONF;
- int cpu_htot_reg = (pipe == 0) ? HTOTAL_A : HTOTAL_B;
- int cpu_hblank_reg = (pipe == 0) ? HBLANK_A : HBLANK_B;
- int cpu_hsync_reg = (pipe == 0) ? HSYNC_A : HSYNC_B;
- int cpu_vtot_reg = (pipe == 0) ? VTOTAL_A : VTOTAL_B;
- int cpu_vblank_reg = (pipe == 0) ? VBLANK_A : VBLANK_B;
- int cpu_vsync_reg = (pipe == 0) ? VSYNC_A : VSYNC_B;
- int trans_htot_reg = (pipe == 0) ? TRANS_HTOTAL_A : TRANS_HTOTAL_B;
- int trans_hblank_reg = (pipe == 0) ? TRANS_HBLANK_A : TRANS_HBLANK_B;
- int trans_hsync_reg = (pipe == 0) ? TRANS_HSYNC_A : TRANS_HSYNC_B;
- int trans_vtot_reg = (pipe == 0) ? TRANS_VTOTAL_A : TRANS_VTOTAL_B;
- int trans_vblank_reg = (pipe == 0) ? TRANS_VBLANK_A : TRANS_VBLANK_B;
- int trans_vsync_reg = (pipe == 0) ? TRANS_VSYNC_A : TRANS_VSYNC_B;
- int trans_dpll_sel = (pipe == 0) ? 0 : 1;
- u32 temp;
- u32 pipe_bpc;
-
- temp = I915_READ(pipeconf_reg);
- pipe_bpc = temp & PIPE_BPC_MASK;
+ u32 reg, temp;
- /* XXX: When our outputs are all unaware of DPMS modes other than off
- * and on, we should map those modes to DRM_MODE_DPMS_OFF in the CRTC.
- */
- switch (mode) {
- case DRM_MODE_DPMS_ON:
- case DRM_MODE_DPMS_STANDBY:
- case DRM_MODE_DPMS_SUSPEND:
- DRM_DEBUG_KMS("crtc %d/%d dpms on\n", pipe, plane);
+ /* Write the TU size bits so error detection works */
+ I915_WRITE(FDI_RX_TUSIZE1(pipe),
+ I915_READ(PIPE_DATA_M1(pipe)) & TU_SIZE_MASK);
- if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
- temp = I915_READ(PCH_LVDS);
- if ((temp & LVDS_PORT_EN) == 0) {
- I915_WRITE(PCH_LVDS, temp | LVDS_PORT_EN);
- POSTING_READ(PCH_LVDS);
- }
- }
+ /* enable PCH FDI RX PLL, wait warmup plus DMI latency */
+ reg = FDI_RX_CTL(pipe);
+ temp = I915_READ(reg);
+ temp &= ~((0x7 << 19) | (0x7 << 16));
+ temp |= (intel_crtc->fdi_lanes - 1) << 19;
+ temp |= (I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK) << 11;
+ I915_WRITE(reg, temp | FDI_RX_PLL_ENABLE);
- if (!HAS_eDP) {
+ POSTING_READ(reg);
+ udelay(200);
- /* enable PCH FDI RX PLL, wait warmup plus DMI latency */
- temp = I915_READ(fdi_rx_reg);
- /*
- * make the BPC in FDI Rx be consistent with that in
- * pipeconf reg.
- */
- temp &= ~(0x7 << 16);
- temp |= (pipe_bpc << 11);
- temp &= ~(7 << 19);
- temp |= (intel_crtc->fdi_lanes - 1) << 19;
- I915_WRITE(fdi_rx_reg, temp | FDI_RX_PLL_ENABLE);
- I915_READ(fdi_rx_reg);
- udelay(200);
+ /* Switch from Rawclk to PCDclk */
+ temp = I915_READ(reg);
+ I915_WRITE(reg, temp | FDI_PCDCLK);
- /* Switch from Rawclk to PCDclk */
- temp = I915_READ(fdi_rx_reg);
- I915_WRITE(fdi_rx_reg, temp | FDI_SEL_PCDCLK);
- I915_READ(fdi_rx_reg);
- udelay(200);
+ POSTING_READ(reg);
+ udelay(200);
- /* Enable CPU FDI TX PLL, always on for Ironlake */
- temp = I915_READ(fdi_tx_reg);
- if ((temp & FDI_TX_PLL_ENABLE) == 0) {
- I915_WRITE(fdi_tx_reg, temp | FDI_TX_PLL_ENABLE);
- I915_READ(fdi_tx_reg);
- udelay(100);
- }
- }
+ /* Enable CPU FDI TX PLL, always on for Ironlake */
+ reg = FDI_TX_CTL(pipe);
+ temp = I915_READ(reg);
+ if ((temp & FDI_TX_PLL_ENABLE) == 0) {
+ I915_WRITE(reg, temp | FDI_TX_PLL_ENABLE);
- /* Enable panel fitting for LVDS */
- if (dev_priv->pch_pf_size &&
- (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)
- || HAS_eDP || intel_pch_has_edp(crtc))) {
- /* Force use of hard-coded filter coefficients
- * as some pre-programmed values are broken,
- * e.g. x201.
- */
- I915_WRITE(pipe ? PFB_CTL_1 : PFA_CTL_1,
- PF_ENABLE | PF_FILTER_MED_3x3);
- I915_WRITE(pipe ? PFB_WIN_POS : PFA_WIN_POS,
- dev_priv->pch_pf_pos);
- I915_WRITE(pipe ? PFB_WIN_SZ : PFA_WIN_SZ,
- dev_priv->pch_pf_size);
- }
+ POSTING_READ(reg);
+ udelay(100);
+ }
+}
- /* Enable CPU pipe */
- temp = I915_READ(pipeconf_reg);
- if ((temp & PIPEACONF_ENABLE) == 0) {
- I915_WRITE(pipeconf_reg, temp | PIPEACONF_ENABLE);
- I915_READ(pipeconf_reg);
- udelay(100);
- }
+static void intel_flush_display_plane(struct drm_device *dev,
+ int plane)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 reg = DSPADDR(plane);
+ I915_WRITE(reg, I915_READ(reg));
+}
- /* configure and enable CPU plane */
- temp = I915_READ(dspcntr_reg);
- if ((temp & DISPLAY_PLANE_ENABLE) == 0) {
- I915_WRITE(dspcntr_reg, temp | DISPLAY_PLANE_ENABLE);
- /* Flush the plane changes */
- I915_WRITE(dspbase_reg, I915_READ(dspbase_reg));
- }
+/*
+ * When we disable a pipe, we need to clear any pending scanline wait events
+ * to avoid hanging the ring, which we assume we are waiting on.
+ */
+static void intel_clear_scanline_wait(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 tmp;
- if (!HAS_eDP) {
- /* For PCH output, training FDI link */
- if (IS_GEN6(dev))
- gen6_fdi_link_train(crtc);
- else
- ironlake_fdi_link_train(crtc);
+ if (IS_GEN2(dev))
+ /* Can't break the hang on i8xx */
+ return;
- /* enable PCH DPLL */
- temp = I915_READ(pch_dpll_reg);
- if ((temp & DPLL_VCO_ENABLE) == 0) {
- I915_WRITE(pch_dpll_reg, temp | DPLL_VCO_ENABLE);
- I915_READ(pch_dpll_reg);
- }
- udelay(200);
+ tmp = I915_READ(PRB0_CTL);
+ if (tmp & RING_WAIT) {
+ I915_WRITE(PRB0_CTL, tmp);
+ POSTING_READ(PRB0_CTL);
+ }
+}
- if (HAS_PCH_CPT(dev)) {
- /* Be sure PCH DPLL SEL is set */
- temp = I915_READ(PCH_DPLL_SEL);
- if (trans_dpll_sel == 0 &&
- (temp & TRANSA_DPLL_ENABLE) == 0)
- temp |= (TRANSA_DPLL_ENABLE | TRANSA_DPLLA_SEL);
- else if (trans_dpll_sel == 1 &&
- (temp & TRANSB_DPLL_ENABLE) == 0)
- temp |= (TRANSB_DPLL_ENABLE | TRANSB_DPLLB_SEL);
- I915_WRITE(PCH_DPLL_SEL, temp);
- I915_READ(PCH_DPLL_SEL);
- }
+static void intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc)
+{
+ struct drm_i915_gem_object *obj_priv;
+ struct drm_i915_private *dev_priv;
- /* set transcoder timing */
- I915_WRITE(trans_htot_reg, I915_READ(cpu_htot_reg));
- I915_WRITE(trans_hblank_reg, I915_READ(cpu_hblank_reg));
- I915_WRITE(trans_hsync_reg, I915_READ(cpu_hsync_reg));
-
- I915_WRITE(trans_vtot_reg, I915_READ(cpu_vtot_reg));
- I915_WRITE(trans_vblank_reg, I915_READ(cpu_vblank_reg));
- I915_WRITE(trans_vsync_reg, I915_READ(cpu_vsync_reg));
-
- /* enable normal train */
- temp = I915_READ(fdi_tx_reg);
- temp &= ~FDI_LINK_TRAIN_NONE;
- I915_WRITE(fdi_tx_reg, temp | FDI_LINK_TRAIN_NONE |
- FDI_TX_ENHANCE_FRAME_ENABLE);
- I915_READ(fdi_tx_reg);
-
- temp = I915_READ(fdi_rx_reg);
- if (HAS_PCH_CPT(dev)) {
- temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
- temp |= FDI_LINK_TRAIN_NORMAL_CPT;
- } else {
- temp &= ~FDI_LINK_TRAIN_NONE;
- temp |= FDI_LINK_TRAIN_NONE;
- }
- I915_WRITE(fdi_rx_reg, temp | FDI_RX_ENHANCE_FRAME_ENABLE);
- I915_READ(fdi_rx_reg);
+ if (crtc->fb == NULL)
+ return;
- /* wait one idle pattern time */
- udelay(100);
+ obj_priv = to_intel_bo(to_intel_framebuffer(crtc->fb)->obj);
+ dev_priv = crtc->dev->dev_private;
+ wait_event(dev_priv->pending_flip_queue,
+ atomic_read(&obj_priv->pending_flip) == 0);
+}
- /* For PCH DP, enable TRANS_DP_CTL */
- if (HAS_PCH_CPT(dev) &&
- intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT)) {
- int trans_dp_ctl = (pipe == 0) ? TRANS_DP_CTL_A : TRANS_DP_CTL_B;
- int reg;
-
- reg = I915_READ(trans_dp_ctl);
- reg &= ~(TRANS_DP_PORT_SEL_MASK |
- TRANS_DP_SYNC_MASK);
- reg |= (TRANS_DP_OUTPUT_ENABLE |
- TRANS_DP_ENH_FRAMING);
-
- if (crtc->mode.flags & DRM_MODE_FLAG_PHSYNC)
- reg |= TRANS_DP_HSYNC_ACTIVE_HIGH;
- if (crtc->mode.flags & DRM_MODE_FLAG_PVSYNC)
- reg |= TRANS_DP_VSYNC_ACTIVE_HIGH;
-
- switch (intel_trans_dp_port_sel(crtc)) {
- case PCH_DP_B:
- reg |= TRANS_DP_PORT_SEL_B;
- break;
- case PCH_DP_C:
- reg |= TRANS_DP_PORT_SEL_C;
- break;
- case PCH_DP_D:
- reg |= TRANS_DP_PORT_SEL_D;
- break;
- default:
- DRM_DEBUG_KMS("Wrong PCH DP port return. Guess port B\n");
- reg |= TRANS_DP_PORT_SEL_B;
- break;
- }
+static void ironlake_crtc_enable(struct drm_crtc *crtc)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ int pipe = intel_crtc->pipe;
+ int plane = intel_crtc->plane;
+ u32 reg, temp;
- I915_WRITE(trans_dp_ctl, reg);
- POSTING_READ(trans_dp_ctl);
- }
+ if (intel_crtc->active)
+ return;
- /* enable PCH transcoder */
- temp = I915_READ(transconf_reg);
- /*
- * make the BPC in transcoder be consistent with
- * that in pipeconf reg.
- */
- temp &= ~PIPE_BPC_MASK;
- temp |= pipe_bpc;
- I915_WRITE(transconf_reg, temp | TRANS_ENABLE);
- I915_READ(transconf_reg);
+ intel_crtc->active = true;
+ intel_update_watermarks(dev);
- if (wait_for(I915_READ(transconf_reg) & TRANS_STATE_ENABLE, 100, 1))
- DRM_ERROR("failed to enable transcoder\n");
- }
+ if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
+ temp = I915_READ(PCH_LVDS);
+ if ((temp & LVDS_PORT_EN) == 0)
+ I915_WRITE(PCH_LVDS, temp | LVDS_PORT_EN);
+ }
- intel_crtc_load_lut(crtc);
+ ironlake_fdi_enable(crtc);
- intel_update_fbc(crtc, &crtc->mode);
- break;
+ /* Enable panel fitting for LVDS */
+ if (dev_priv->pch_pf_size &&
+ (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) || HAS_eDP)) {
+ /* Force use of hard-coded filter coefficients
+ * as some pre-programmed values are broken,
+ * e.g. x201.
+ */
+ I915_WRITE(pipe ? PFB_CTL_1 : PFA_CTL_1,
+ PF_ENABLE | PF_FILTER_MED_3x3);
+ I915_WRITE(pipe ? PFB_WIN_POS : PFA_WIN_POS,
+ dev_priv->pch_pf_pos);
+ I915_WRITE(pipe ? PFB_WIN_SZ : PFA_WIN_SZ,
+ dev_priv->pch_pf_size);
+ }
+
+ /* Enable CPU pipe */
+ reg = PIPECONF(pipe);
+ temp = I915_READ(reg);
+ if ((temp & PIPECONF_ENABLE) == 0) {
+ I915_WRITE(reg, temp | PIPECONF_ENABLE);
+ POSTING_READ(reg);
+ intel_wait_for_vblank(dev, intel_crtc->pipe);
+ }
+
+ /* configure and enable CPU plane */
+ reg = DSPCNTR(plane);
+ temp = I915_READ(reg);
+ if ((temp & DISPLAY_PLANE_ENABLE) == 0) {
+ I915_WRITE(reg, temp | DISPLAY_PLANE_ENABLE);
+ intel_flush_display_plane(dev, plane);
+ }
+
+ /* For PCH output, training FDI link */
+ if (IS_GEN6(dev))
+ gen6_fdi_link_train(crtc);
+ else
+ ironlake_fdi_link_train(crtc);
+
+ /* enable PCH DPLL */
+ reg = PCH_DPLL(pipe);
+ temp = I915_READ(reg);
+ if ((temp & DPLL_VCO_ENABLE) == 0) {
+ I915_WRITE(reg, temp | DPLL_VCO_ENABLE);
+ POSTING_READ(reg);
+ udelay(200);
+ }
- case DRM_MODE_DPMS_OFF:
- DRM_DEBUG_KMS("crtc %d/%d dpms off\n", pipe, plane);
+ if (HAS_PCH_CPT(dev)) {
+ /* Be sure PCH DPLL SEL is set */
+ temp = I915_READ(PCH_DPLL_SEL);
+ if (pipe == 0 && (temp & TRANSA_DPLL_ENABLE) == 0)
+ temp |= (TRANSA_DPLL_ENABLE | TRANSA_DPLLA_SEL);
+ else if (pipe == 1 && (temp & TRANSB_DPLL_ENABLE) == 0)
+ temp |= (TRANSB_DPLL_ENABLE | TRANSB_DPLLB_SEL);
+ I915_WRITE(PCH_DPLL_SEL, temp);
+ }
- drm_vblank_off(dev, pipe);
- /* Disable display plane */
- temp = I915_READ(dspcntr_reg);
- if ((temp & DISPLAY_PLANE_ENABLE) != 0) {
- I915_WRITE(dspcntr_reg, temp & ~DISPLAY_PLANE_ENABLE);
- /* Flush the plane changes */
- I915_WRITE(dspbase_reg, I915_READ(dspbase_reg));
- I915_READ(dspbase_reg);
+ /* set transcoder timing */
+ I915_WRITE(TRANS_HTOTAL(pipe), I915_READ(HTOTAL(pipe)));
+ I915_WRITE(TRANS_HBLANK(pipe), I915_READ(HBLANK(pipe)));
+ I915_WRITE(TRANS_HSYNC(pipe), I915_READ(HSYNC(pipe)));
+
+ I915_WRITE(TRANS_VTOTAL(pipe), I915_READ(VTOTAL(pipe)));
+ I915_WRITE(TRANS_VBLANK(pipe), I915_READ(VBLANK(pipe)));
+ I915_WRITE(TRANS_VSYNC(pipe), I915_READ(VSYNC(pipe)));
+
+ intel_fdi_normal_train(crtc);
+
+ /* For PCH DP, enable TRANS_DP_CTL */
+ if (HAS_PCH_CPT(dev) &&
+ intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT)) {
+ reg = TRANS_DP_CTL(pipe);
+ temp = I915_READ(reg);
+ temp &= ~(TRANS_DP_PORT_SEL_MASK |
+ TRANS_DP_SYNC_MASK |
+ TRANS_DP_BPC_MASK);
+ temp |= (TRANS_DP_OUTPUT_ENABLE |
+ TRANS_DP_ENH_FRAMING);
+ temp |= TRANS_DP_8BPC;
+
+ if (crtc->mode.flags & DRM_MODE_FLAG_PHSYNC)
+ temp |= TRANS_DP_HSYNC_ACTIVE_HIGH;
+ if (crtc->mode.flags & DRM_MODE_FLAG_PVSYNC)
+ temp |= TRANS_DP_VSYNC_ACTIVE_HIGH;
+
+ switch (intel_trans_dp_port_sel(crtc)) {
+ case PCH_DP_B:
+ temp |= TRANS_DP_PORT_SEL_B;
+ break;
+ case PCH_DP_C:
+ temp |= TRANS_DP_PORT_SEL_C;
+ break;
+ case PCH_DP_D:
+ temp |= TRANS_DP_PORT_SEL_D;
+ break;
+ default:
+ DRM_DEBUG_KMS("Wrong PCH DP port return. Guess port B\n");
+ temp |= TRANS_DP_PORT_SEL_B;
+ break;
}
- if (dev_priv->cfb_plane == plane &&
- dev_priv->display.disable_fbc)
- dev_priv->display.disable_fbc(dev);
+ I915_WRITE(reg, temp);
+ }
- /* disable cpu pipe, disable after all planes disabled */
- temp = I915_READ(pipeconf_reg);
- if ((temp & PIPEACONF_ENABLE) != 0) {
- I915_WRITE(pipeconf_reg, temp & ~PIPEACONF_ENABLE);
+ /* enable PCH transcoder */
+ reg = TRANSCONF(pipe);
+ temp = I915_READ(reg);
+ /*
+ * make the BPC in transcoder be consistent with
+ * that in pipeconf reg.
+ */
+ temp &= ~PIPE_BPC_MASK;
+ temp |= I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK;
+ I915_WRITE(reg, temp | TRANS_ENABLE);
+ if (wait_for(I915_READ(reg) & TRANS_STATE_ENABLE, 100))
+ DRM_ERROR("failed to enable transcoder %d\n", pipe);
- /* wait for cpu pipe off, pipe state */
- if (wait_for((I915_READ(pipeconf_reg) & I965_PIPECONF_ACTIVE) == 0, 50, 1))
- DRM_ERROR("failed to turn off cpu pipe\n");
- } else
- DRM_DEBUG_KMS("crtc %d is disabled\n", pipe);
+ intel_crtc_load_lut(crtc);
+ intel_update_fbc(dev);
+ intel_crtc_update_cursor(crtc, true);
+}
- udelay(100);
+static void ironlake_crtc_disable(struct drm_crtc *crtc)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ int pipe = intel_crtc->pipe;
+ int plane = intel_crtc->plane;
+ u32 reg, temp;
- /* Disable PF */
- I915_WRITE(pipe ? PFB_CTL_1 : PFA_CTL_1, 0);
- I915_WRITE(pipe ? PFB_WIN_SZ : PFA_WIN_SZ, 0);
+ if (!intel_crtc->active)
+ return;
- /* disable CPU FDI tx and PCH FDI rx */
- temp = I915_READ(fdi_tx_reg);
- I915_WRITE(fdi_tx_reg, temp & ~FDI_TX_ENABLE);
- I915_READ(fdi_tx_reg);
+ intel_crtc_wait_for_pending_flips(crtc);
+ drm_vblank_off(dev, pipe);
+ intel_crtc_update_cursor(crtc, false);
- temp = I915_READ(fdi_rx_reg);
- /* BPC in FDI rx is consistent with that in pipeconf */
- temp &= ~(0x07 << 16);
- temp |= (pipe_bpc << 11);
- I915_WRITE(fdi_rx_reg, temp & ~FDI_RX_ENABLE);
- I915_READ(fdi_rx_reg);
+ /* Disable display plane */
+ reg = DSPCNTR(plane);
+ temp = I915_READ(reg);
+ if (temp & DISPLAY_PLANE_ENABLE) {
+ I915_WRITE(reg, temp & ~DISPLAY_PLANE_ENABLE);
+ intel_flush_display_plane(dev, plane);
+ }
- udelay(100);
+ if (dev_priv->cfb_plane == plane &&
+ dev_priv->display.disable_fbc)
+ dev_priv->display.disable_fbc(dev);
- /* still set train pattern 1 */
- temp = I915_READ(fdi_tx_reg);
+ /* disable cpu pipe, disable after all planes disabled */
+ reg = PIPECONF(pipe);
+ temp = I915_READ(reg);
+ if (temp & PIPECONF_ENABLE) {
+ I915_WRITE(reg, temp & ~PIPECONF_ENABLE);
+ POSTING_READ(reg);
+ /* wait for cpu pipe off, pipe state */
+ intel_wait_for_pipe_off(dev, intel_crtc->pipe);
+ }
+
+ /* Disable PF */
+ I915_WRITE(pipe ? PFB_CTL_1 : PFA_CTL_1, 0);
+ I915_WRITE(pipe ? PFB_WIN_SZ : PFA_WIN_SZ, 0);
+
+ /* disable CPU FDI tx and PCH FDI rx */
+ reg = FDI_TX_CTL(pipe);
+ temp = I915_READ(reg);
+ I915_WRITE(reg, temp & ~FDI_TX_ENABLE);
+ POSTING_READ(reg);
+
+ reg = FDI_RX_CTL(pipe);
+ temp = I915_READ(reg);
+ temp &= ~(0x7 << 16);
+ temp |= (I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK) << 11;
+ I915_WRITE(reg, temp & ~FDI_RX_ENABLE);
+
+ POSTING_READ(reg);
+ udelay(100);
+
+ /* Ironlake workaround, disable clock pointer after downing FDI */
+ if (HAS_PCH_IBX(dev))
+ I915_WRITE(FDI_RX_CHICKEN(pipe),
+ I915_READ(FDI_RX_CHICKEN(pipe) &
+ ~FDI_RX_PHASE_SYNC_POINTER_ENABLE));
+
+ /* still set train pattern 1 */
+ reg = FDI_TX_CTL(pipe);
+ temp = I915_READ(reg);
+ temp &= ~FDI_LINK_TRAIN_NONE;
+ temp |= FDI_LINK_TRAIN_PATTERN_1;
+ I915_WRITE(reg, temp);
+
+ reg = FDI_RX_CTL(pipe);
+ temp = I915_READ(reg);
+ if (HAS_PCH_CPT(dev)) {
+ temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
+ temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
+ } else {
temp &= ~FDI_LINK_TRAIN_NONE;
temp |= FDI_LINK_TRAIN_PATTERN_1;
- I915_WRITE(fdi_tx_reg, temp);
- POSTING_READ(fdi_tx_reg);
-
- temp = I915_READ(fdi_rx_reg);
- if (HAS_PCH_CPT(dev)) {
- temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
- temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
- } else {
- temp &= ~FDI_LINK_TRAIN_NONE;
- temp |= FDI_LINK_TRAIN_PATTERN_1;
- }
- I915_WRITE(fdi_rx_reg, temp);
- POSTING_READ(fdi_rx_reg);
+ }
+ /* BPC in FDI rx is consistent with that in PIPECONF */
+ temp &= ~(0x07 << 16);
+ temp |= (I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK) << 11;
+ I915_WRITE(reg, temp);
- udelay(100);
+ POSTING_READ(reg);
+ udelay(100);
- if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
- temp = I915_READ(PCH_LVDS);
+ if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
+ temp = I915_READ(PCH_LVDS);
+ if (temp & LVDS_PORT_EN) {
I915_WRITE(PCH_LVDS, temp & ~LVDS_PORT_EN);
- I915_READ(PCH_LVDS);
+ POSTING_READ(PCH_LVDS);
udelay(100);
}
+ }
- /* disable PCH transcoder */
- temp = I915_READ(transconf_reg);
- if ((temp & TRANS_ENABLE) != 0) {
- I915_WRITE(transconf_reg, temp & ~TRANS_ENABLE);
+ /* disable PCH transcoder */
+ reg = TRANSCONF(plane);
+ temp = I915_READ(reg);
+ if (temp & TRANS_ENABLE) {
+ I915_WRITE(reg, temp & ~TRANS_ENABLE);
+ /* wait for PCH transcoder off, transcoder state */
+ if (wait_for((I915_READ(reg) & TRANS_STATE_ENABLE) == 0, 50))
+ DRM_ERROR("failed to disable transcoder\n");
+ }
- /* wait for PCH transcoder off, transcoder state */
- if (wait_for((I915_READ(transconf_reg) & TRANS_STATE_ENABLE) == 0, 50, 1))
- DRM_ERROR("failed to disable transcoder\n");
- }
+ if (HAS_PCH_CPT(dev)) {
+ /* disable TRANS_DP_CTL */
+ reg = TRANS_DP_CTL(pipe);
+ temp = I915_READ(reg);
+ temp &= ~(TRANS_DP_OUTPUT_ENABLE | TRANS_DP_PORT_SEL_MASK);
+ I915_WRITE(reg, temp);
- temp = I915_READ(transconf_reg);
- /* BPC in transcoder is consistent with that in pipeconf */
- temp &= ~PIPE_BPC_MASK;
- temp |= pipe_bpc;
- I915_WRITE(transconf_reg, temp);
- I915_READ(transconf_reg);
- udelay(100);
+ /* disable DPLL_SEL */
+ temp = I915_READ(PCH_DPLL_SEL);
+ if (pipe == 0)
+ temp &= ~(TRANSA_DPLL_ENABLE | TRANSA_DPLLB_SEL);
+ else
+ temp &= ~(TRANSB_DPLL_ENABLE | TRANSB_DPLLB_SEL);
+ I915_WRITE(PCH_DPLL_SEL, temp);
+ }
- if (HAS_PCH_CPT(dev)) {
- /* disable TRANS_DP_CTL */
- int trans_dp_ctl = (pipe == 0) ? TRANS_DP_CTL_A : TRANS_DP_CTL_B;
- int reg;
+ /* disable PCH DPLL */
+ reg = PCH_DPLL(pipe);
+ temp = I915_READ(reg);
+ I915_WRITE(reg, temp & ~DPLL_VCO_ENABLE);
- reg = I915_READ(trans_dp_ctl);
- reg &= ~(TRANS_DP_OUTPUT_ENABLE | TRANS_DP_PORT_SEL_MASK);
- I915_WRITE(trans_dp_ctl, reg);
- POSTING_READ(trans_dp_ctl);
+ /* Switch from PCDclk to Rawclk */
+ reg = FDI_RX_CTL(pipe);
+ temp = I915_READ(reg);
+ I915_WRITE(reg, temp & ~FDI_PCDCLK);
- /* disable DPLL_SEL */
- temp = I915_READ(PCH_DPLL_SEL);
- if (trans_dpll_sel == 0)
- temp &= ~(TRANSA_DPLL_ENABLE | TRANSA_DPLLB_SEL);
- else
- temp &= ~(TRANSB_DPLL_ENABLE | TRANSB_DPLLB_SEL);
- I915_WRITE(PCH_DPLL_SEL, temp);
- I915_READ(PCH_DPLL_SEL);
+ /* Disable CPU FDI TX PLL */
+ reg = FDI_TX_CTL(pipe);
+ temp = I915_READ(reg);
+ I915_WRITE(reg, temp & ~FDI_TX_PLL_ENABLE);
- }
+ POSTING_READ(reg);
+ udelay(100);
- /* disable PCH DPLL */
- temp = I915_READ(pch_dpll_reg);
- I915_WRITE(pch_dpll_reg, temp & ~DPLL_VCO_ENABLE);
- I915_READ(pch_dpll_reg);
-
- /* Switch from PCDclk to Rawclk */
- temp = I915_READ(fdi_rx_reg);
- temp &= ~FDI_SEL_PCDCLK;
- I915_WRITE(fdi_rx_reg, temp);
- I915_READ(fdi_rx_reg);
-
- /* Disable CPU FDI TX PLL */
- temp = I915_READ(fdi_tx_reg);
- I915_WRITE(fdi_tx_reg, temp & ~FDI_TX_PLL_ENABLE);
- I915_READ(fdi_tx_reg);
- udelay(100);
+ reg = FDI_RX_CTL(pipe);
+ temp = I915_READ(reg);
+ I915_WRITE(reg, temp & ~FDI_RX_PLL_ENABLE);
- temp = I915_READ(fdi_rx_reg);
- temp &= ~FDI_RX_PLL_ENABLE;
- I915_WRITE(fdi_rx_reg, temp);
- I915_READ(fdi_rx_reg);
+ /* Wait for the clocks to turn off. */
+ POSTING_READ(reg);
+ udelay(100);
- /* Wait for the clocks to turn off. */
- udelay(100);
+ intel_crtc->active = false;
+ intel_update_watermarks(dev);
+ intel_update_fbc(dev);
+ intel_clear_scanline_wait(dev);
+}
+
+static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode)
+{
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ int pipe = intel_crtc->pipe;
+ int plane = intel_crtc->plane;
+
+ /* XXX: When our outputs are all unaware of DPMS modes other than off
+ * and on, we should map those modes to DRM_MODE_DPMS_OFF in the CRTC.
+ */
+ switch (mode) {
+ case DRM_MODE_DPMS_ON:
+ case DRM_MODE_DPMS_STANDBY:
+ case DRM_MODE_DPMS_SUSPEND:
+ DRM_DEBUG_KMS("crtc %d/%d dpms on\n", pipe, plane);
+ ironlake_crtc_enable(crtc);
+ break;
+
+ case DRM_MODE_DPMS_OFF:
+ DRM_DEBUG_KMS("crtc %d/%d dpms off\n", pipe, plane);
+ ironlake_crtc_disable(crtc);
break;
}
}
static void intel_crtc_dpms_overlay(struct intel_crtc *intel_crtc, bool enable)
{
- struct intel_overlay *overlay;
- int ret;
-
if (!enable && intel_crtc->overlay) {
- overlay = intel_crtc->overlay;
- mutex_lock(&overlay->dev->struct_mutex);
- for (;;) {
- ret = intel_overlay_switch_off(overlay);
- if (ret == 0)
- break;
+ struct drm_device *dev = intel_crtc->base.dev;
- ret = intel_overlay_recover_from_interrupt(overlay, 0);
- if (ret != 0) {
- /* overlay doesn't react anymore. Usually
- * results in a black screen and an unkillable
- * X server. */
- BUG();
- overlay->hw_wedged = HW_WEDGED;
- break;
- }
- }
- mutex_unlock(&overlay->dev->struct_mutex);
+ mutex_lock(&dev->struct_mutex);
+ (void) intel_overlay_switch_off(intel_crtc->overlay, false);
+ mutex_unlock(&dev->struct_mutex);
}
- /* Let userspace switch the overlay on again. In most cases userspace
- * has to recompute where to put it anyway. */
- return;
+ /* Let userspace switch the overlay on again. In most cases userspace
+ * has to recompute where to put it anyway.
+ */
}
-static void i9xx_crtc_dpms(struct drm_crtc *crtc, int mode)
+static void i9xx_crtc_enable(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int pipe = intel_crtc->pipe;
int plane = intel_crtc->plane;
- int dpll_reg = (pipe == 0) ? DPLL_A : DPLL_B;
- int dspcntr_reg = (plane == 0) ? DSPACNTR : DSPBCNTR;
- int dspbase_reg = (plane == 0) ? DSPAADDR : DSPBADDR;
- int pipeconf_reg = (pipe == 0) ? PIPEACONF : PIPEBCONF;
- u32 temp;
+ u32 reg, temp;
- /* XXX: When our outputs are all unaware of DPMS modes other than off
- * and on, we should map those modes to DRM_MODE_DPMS_OFF in the CRTC.
- */
- switch (mode) {
- case DRM_MODE_DPMS_ON:
- case DRM_MODE_DPMS_STANDBY:
- case DRM_MODE_DPMS_SUSPEND:
- /* Enable the DPLL */
- temp = I915_READ(dpll_reg);
- if ((temp & DPLL_VCO_ENABLE) == 0) {
- I915_WRITE(dpll_reg, temp);
- I915_READ(dpll_reg);
- /* Wait for the clocks to stabilize. */
- udelay(150);
- I915_WRITE(dpll_reg, temp | DPLL_VCO_ENABLE);
- I915_READ(dpll_reg);
- /* Wait for the clocks to stabilize. */
- udelay(150);
- I915_WRITE(dpll_reg, temp | DPLL_VCO_ENABLE);
- I915_READ(dpll_reg);
- /* Wait for the clocks to stabilize. */
- udelay(150);
- }
+ if (intel_crtc->active)
+ return;
- /* Enable the pipe */
- temp = I915_READ(pipeconf_reg);
- if ((temp & PIPEACONF_ENABLE) == 0)
- I915_WRITE(pipeconf_reg, temp | PIPEACONF_ENABLE);
-
- /* Enable the plane */
- temp = I915_READ(dspcntr_reg);
- if ((temp & DISPLAY_PLANE_ENABLE) == 0) {
- I915_WRITE(dspcntr_reg, temp | DISPLAY_PLANE_ENABLE);
- /* Flush the plane changes */
- I915_WRITE(dspbase_reg, I915_READ(dspbase_reg));
- }
+ intel_crtc->active = true;
+ intel_update_watermarks(dev);
- intel_crtc_load_lut(crtc);
+ /* Enable the DPLL */
+ reg = DPLL(pipe);
+ temp = I915_READ(reg);
+ if ((temp & DPLL_VCO_ENABLE) == 0) {
+ I915_WRITE(reg, temp);
- if ((IS_I965G(dev) || plane == 0))
- intel_update_fbc(crtc, &crtc->mode);
+ /* Wait for the clocks to stabilize. */
+ POSTING_READ(reg);
+ udelay(150);
- /* Give the overlay scaler a chance to enable if it's on this pipe */
- intel_crtc_dpms_overlay(intel_crtc, true);
- break;
- case DRM_MODE_DPMS_OFF:
- /* Give the overlay scaler a chance to disable if it's on this pipe */
- intel_crtc_dpms_overlay(intel_crtc, false);
- drm_vblank_off(dev, pipe);
-
- if (dev_priv->cfb_plane == plane &&
- dev_priv->display.disable_fbc)
- dev_priv->display.disable_fbc(dev);
-
- /* Disable display plane */
- temp = I915_READ(dspcntr_reg);
- if ((temp & DISPLAY_PLANE_ENABLE) != 0) {
- I915_WRITE(dspcntr_reg, temp & ~DISPLAY_PLANE_ENABLE);
- /* Flush the plane changes */
- I915_WRITE(dspbase_reg, I915_READ(dspbase_reg));
- I915_READ(dspbase_reg);
- }
+ I915_WRITE(reg, temp | DPLL_VCO_ENABLE);
- /* Don't disable pipe A or pipe A PLLs if needed */
- if (pipeconf_reg == PIPEACONF &&
- (dev_priv->quirks & QUIRK_PIPEA_FORCE)) {
- /* Wait for vblank for the disable to take effect */
+ /* Wait for the clocks to stabilize. */
+ POSTING_READ(reg);
+ udelay(150);
+
+ I915_WRITE(reg, temp | DPLL_VCO_ENABLE);
+
+ /* Wait for the clocks to stabilize. */
+ POSTING_READ(reg);
+ udelay(150);
+ }
+
+ /* Enable the pipe */
+ reg = PIPECONF(pipe);
+ temp = I915_READ(reg);
+ if ((temp & PIPECONF_ENABLE) == 0)
+ I915_WRITE(reg, temp | PIPECONF_ENABLE);
+
+ /* Enable the plane */
+ reg = DSPCNTR(plane);
+ temp = I915_READ(reg);
+ if ((temp & DISPLAY_PLANE_ENABLE) == 0) {
+ I915_WRITE(reg, temp | DISPLAY_PLANE_ENABLE);
+ intel_flush_display_plane(dev, plane);
+ }
+
+ intel_crtc_load_lut(crtc);
+ intel_update_fbc(dev);
+
+ /* Give the overlay scaler a chance to enable if it's on this pipe */
+ intel_crtc_dpms_overlay(intel_crtc, true);
+ intel_crtc_update_cursor(crtc, true);
+}
+
+static void i9xx_crtc_disable(struct drm_crtc *crtc)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ int pipe = intel_crtc->pipe;
+ int plane = intel_crtc->plane;
+ u32 reg, temp;
+
+ if (!intel_crtc->active)
+ return;
+
+ /* Give the overlay scaler a chance to disable if it's on this pipe */
+ intel_crtc_wait_for_pending_flips(crtc);
+ drm_vblank_off(dev, pipe);
+ intel_crtc_dpms_overlay(intel_crtc, false);
+ intel_crtc_update_cursor(crtc, false);
+
+ if (dev_priv->cfb_plane == plane &&
+ dev_priv->display.disable_fbc)
+ dev_priv->display.disable_fbc(dev);
+
+ /* Disable display plane */
+ reg = DSPCNTR(plane);
+ temp = I915_READ(reg);
+ if (temp & DISPLAY_PLANE_ENABLE) {
+ I915_WRITE(reg, temp & ~DISPLAY_PLANE_ENABLE);
+ /* Flush the plane changes */
+ intel_flush_display_plane(dev, plane);
+
+ /* Wait for vblank for the disable to take effect */
+ if (IS_GEN2(dev))
intel_wait_for_vblank(dev, pipe);
- goto skip_pipe_off;
- }
+ }
- /* Next, disable display pipes */
- temp = I915_READ(pipeconf_reg);
- if ((temp & PIPEACONF_ENABLE) != 0) {
- I915_WRITE(pipeconf_reg, temp & ~PIPEACONF_ENABLE);
- I915_READ(pipeconf_reg);
- }
+ /* Don't disable pipe A or pipe A PLLs if needed */
+ if (pipe == 0 && (dev_priv->quirks & QUIRK_PIPEA_FORCE))
+ goto done;
+
+ /* Next, disable display pipes */
+ reg = PIPECONF(pipe);
+ temp = I915_READ(reg);
+ if (temp & PIPECONF_ENABLE) {
+ I915_WRITE(reg, temp & ~PIPECONF_ENABLE);
/* Wait for the pipe to turn off */
+ POSTING_READ(reg);
intel_wait_for_pipe_off(dev, pipe);
+ }
+
+ reg = DPLL(pipe);
+ temp = I915_READ(reg);
+ if (temp & DPLL_VCO_ENABLE) {
+ I915_WRITE(reg, temp & ~DPLL_VCO_ENABLE);
- temp = I915_READ(dpll_reg);
- if ((temp & DPLL_VCO_ENABLE) != 0) {
- I915_WRITE(dpll_reg, temp & ~DPLL_VCO_ENABLE);
- I915_READ(dpll_reg);
- }
- skip_pipe_off:
/* Wait for the clocks to turn off. */
+ POSTING_READ(reg);
udelay(150);
+ }
+
+done:
+ intel_crtc->active = false;
+ intel_update_fbc(dev);
+ intel_update_watermarks(dev);
+ intel_clear_scanline_wait(dev);
+}
+
+static void i9xx_crtc_dpms(struct drm_crtc *crtc, int mode)
+{
+ /* XXX: When our outputs are all unaware of DPMS modes other than off
+ * and on, we should map those modes to DRM_MODE_DPMS_OFF in the CRTC.
+ */
+ switch (mode) {
+ case DRM_MODE_DPMS_ON:
+ case DRM_MODE_DPMS_STANDBY:
+ case DRM_MODE_DPMS_SUSPEND:
+ i9xx_crtc_enable(crtc);
+ break;
+ case DRM_MODE_DPMS_OFF:
+ i9xx_crtc_disable(crtc);
break;
}
}
@@ -2388,26 +2521,9 @@ static void intel_crtc_dpms(struct drm_crtc *crtc, int mode)
return;
intel_crtc->dpms_mode = mode;
- intel_crtc->cursor_on = mode == DRM_MODE_DPMS_ON;
-
- /* When switching on the display, ensure that SR is disabled
- * with multiple pipes prior to enabling to new pipe.
- *
- * When switching off the display, make sure the cursor is
- * properly hidden prior to disabling the pipe.
- */
- if (mode == DRM_MODE_DPMS_ON)
- intel_update_watermarks(dev);
- else
- intel_crtc_update_cursor(crtc);
dev_priv->display.dpms(crtc, mode);
- if (mode == DRM_MODE_DPMS_ON)
- intel_crtc_update_cursor(crtc);
- else
- intel_update_watermarks(dev);
-
if (!dev->primary->master)
return;
@@ -2432,16 +2548,46 @@ static void intel_crtc_dpms(struct drm_crtc *crtc, int mode)
}
}
-static void intel_crtc_prepare (struct drm_crtc *crtc)
+static void intel_crtc_disable(struct drm_crtc *crtc)
{
struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
+ struct drm_device *dev = crtc->dev;
+
crtc_funcs->dpms(crtc, DRM_MODE_DPMS_OFF);
+
+ if (crtc->fb) {
+ mutex_lock(&dev->struct_mutex);
+ i915_gem_object_unpin(to_intel_framebuffer(crtc->fb)->obj);
+ mutex_unlock(&dev->struct_mutex);
+ }
+}
+
+/* Prepare for a mode set.
+ *
+ * Note we could be a lot smarter here. We need to figure out which outputs
+ * will be enabled, which disabled (in short, how the config will changes)
+ * and perform the minimum necessary steps to accomplish that, e.g. updating
+ * watermarks, FBC configuration, making sure PLLs are programmed correctly,
+ * panel fitting is in the proper state, etc.
+ */
+static void i9xx_crtc_prepare(struct drm_crtc *crtc)
+{
+ i9xx_crtc_disable(crtc);
}
-static void intel_crtc_commit (struct drm_crtc *crtc)
+static void i9xx_crtc_commit(struct drm_crtc *crtc)
{
- struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
- crtc_funcs->dpms(crtc, DRM_MODE_DPMS_ON);
+ i9xx_crtc_enable(crtc);
+}
+
+static void ironlake_crtc_prepare(struct drm_crtc *crtc)
+{
+ ironlake_crtc_disable(crtc);
+}
+
+static void ironlake_crtc_commit(struct drm_crtc *crtc)
+{
+ ironlake_crtc_enable(crtc);
}
void intel_encoder_prepare (struct drm_encoder *encoder)
@@ -2460,13 +2606,7 @@ void intel_encoder_commit (struct drm_encoder *encoder)
void intel_encoder_destroy(struct drm_encoder *encoder)
{
- struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
-
- if (intel_encoder->ddc_bus)
- intel_i2c_destroy(intel_encoder->ddc_bus);
-
- if (intel_encoder->i2c_bus)
- intel_i2c_destroy(intel_encoder->i2c_bus);
+ struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
drm_encoder_cleanup(encoder);
kfree(intel_encoder);
@@ -2557,33 +2697,6 @@ static int i830_get_display_clock_speed(struct drm_device *dev)
return 133000;
}
-/**
- * Return the pipe currently connected to the panel fitter,
- * or -1 if the panel fitter is not present or not in use
- */
-int intel_panel_fitter_pipe (struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- u32 pfit_control;
-
- /* i830 doesn't have a panel fitter */
- if (IS_I830(dev))
- return -1;
-
- pfit_control = I915_READ(PFIT_CONTROL);
-
- /* See if the panel fitter is in use */
- if ((pfit_control & PFIT_ENABLE) == 0)
- return -1;
-
- /* 965 can place panel fitter on either pipe */
- if (IS_I965G(dev))
- return (pfit_control >> 29) & 0x3;
-
- /* older chips can only use pipe 1 */
- return 1;
-}
-
struct fdi_m_n {
u32 tu;
u32 gmch_m;
@@ -2601,27 +2714,19 @@ fdi_reduce_ratio(u32 *num, u32 *den)
}
}
-#define DATA_N 0x800000
-#define LINK_N 0x80000
-
static void
ironlake_compute_m_n(int bits_per_pixel, int nlanes, int pixel_clock,
int link_clock, struct fdi_m_n *m_n)
{
- u64 temp;
-
m_n->tu = 64; /* default size */
- temp = (u64) DATA_N * pixel_clock;
- temp = div_u64(temp, link_clock);
- m_n->gmch_m = div_u64(temp * bits_per_pixel, nlanes);
- m_n->gmch_m >>= 3; /* convert to bytes_per_pixel */
- m_n->gmch_n = DATA_N;
+ /* BUG_ON(pixel_clock > INT_MAX / 36); */
+ m_n->gmch_m = bits_per_pixel * pixel_clock;
+ m_n->gmch_n = link_clock * nlanes * 8;
fdi_reduce_ratio(&m_n->gmch_m, &m_n->gmch_n);
- temp = (u64) LINK_N * pixel_clock;
- m_n->link_m = div_u64(temp, link_clock);
- m_n->link_n = LINK_N;
+ m_n->link_m = pixel_clock;
+ m_n->link_n = link_clock;
fdi_reduce_ratio(&m_n->link_m, &m_n->link_n);
}
@@ -2902,7 +3007,7 @@ static int i9xx_get_fifo_size(struct drm_device *dev, int plane)
size = ((dsparb >> DSPARB_CSTART_SHIFT) & 0x7f) - size;
DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
- plane ? "B" : "A", size);
+ plane ? "B" : "A", size);
return size;
}
@@ -2919,7 +3024,7 @@ static int i85x_get_fifo_size(struct drm_device *dev, int plane)
size >>= 1; /* Convert to cachelines */
DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
- plane ? "B" : "A", size);
+ plane ? "B" : "A", size);
return size;
}
@@ -2934,8 +3039,8 @@ static int i845_get_fifo_size(struct drm_device *dev, int plane)
size >>= 2; /* Convert to cachelines */
DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
- plane ? "B" : "A",
- size);
+ plane ? "B" : "A",
+ size);
return size;
}
@@ -2950,14 +3055,14 @@ static int i830_get_fifo_size(struct drm_device *dev, int plane)
size >>= 1; /* Convert to cachelines */
DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
- plane ? "B" : "A", size);
+ plane ? "B" : "A", size);
return size;
}
static void pineview_update_wm(struct drm_device *dev, int planea_clock,
- int planeb_clock, int sr_hdisplay, int unused,
- int pixel_size)
+ int planeb_clock, int sr_hdisplay, int unused,
+ int pixel_size)
{
struct drm_i915_private *dev_priv = dev->dev_private;
const struct cxsr_latency *latency;
@@ -3069,13 +3174,13 @@ static void g4x_update_wm(struct drm_device *dev, int planea_clock,
/* Use ns/us then divide to preserve precision */
sr_entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) *
- pixel_size * sr_hdisplay;
+ pixel_size * sr_hdisplay;
sr_entries = DIV_ROUND_UP(sr_entries, cacheline_size);
entries_required = (((sr_latency_ns / line_time_us) +
1000) / 1000) * pixel_size * 64;
entries_required = DIV_ROUND_UP(entries_required,
- g4x_cursor_wm_info.cacheline_size);
+ g4x_cursor_wm_info.cacheline_size);
cursor_sr = entries_required + g4x_cursor_wm_info.guard_size;
if (cursor_sr > g4x_cursor_wm_info.max_wm)
@@ -3087,7 +3192,7 @@ static void g4x_update_wm(struct drm_device *dev, int planea_clock,
} else {
/* Turn off self refresh if both pipes are enabled */
I915_WRITE(FW_BLC_SELF, I915_READ(FW_BLC_SELF)
- & ~FW_BLC_SELF_EN);
+ & ~FW_BLC_SELF_EN);
}
DRM_DEBUG("Setting FIFO watermarks - A: %d, B: %d, SR %d\n",
@@ -3125,7 +3230,7 @@ static void i965_update_wm(struct drm_device *dev, int planea_clock,
/* Use ns/us then divide to preserve precision */
sr_entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) *
- pixel_size * sr_hdisplay;
+ pixel_size * sr_hdisplay;
sr_entries = DIV_ROUND_UP(sr_entries, I915_FIFO_LINE_SIZE);
DRM_DEBUG("self-refresh entries: %d\n", sr_entries);
srwm = I965_FIFO_SIZE - sr_entries;
@@ -3134,11 +3239,11 @@ static void i965_update_wm(struct drm_device *dev, int planea_clock,
srwm &= 0x1ff;
sr_entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) *
- pixel_size * 64;
+ pixel_size * 64;
sr_entries = DIV_ROUND_UP(sr_entries,
i965_cursor_wm_info.cacheline_size);
cursor_sr = i965_cursor_wm_info.fifo_size -
- (sr_entries + i965_cursor_wm_info.guard_size);
+ (sr_entries + i965_cursor_wm_info.guard_size);
if (cursor_sr > i965_cursor_wm_info.max_wm)
cursor_sr = i965_cursor_wm_info.max_wm;
@@ -3146,11 +3251,11 @@ static void i965_update_wm(struct drm_device *dev, int planea_clock,
DRM_DEBUG_KMS("self-refresh watermark: display plane %d "
"cursor %d\n", srwm, cursor_sr);
- if (IS_I965GM(dev))
+ if (IS_CRESTLINE(dev))
I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN);
} else {
/* Turn off self refresh if both pipes are enabled */
- if (IS_I965GM(dev))
+ if (IS_CRESTLINE(dev))
I915_WRITE(FW_BLC_SELF, I915_READ(FW_BLC_SELF)
& ~FW_BLC_SELF_EN);
}
@@ -3180,9 +3285,9 @@ static void i9xx_update_wm(struct drm_device *dev, int planea_clock,
int sr_clock, sr_entries = 0;
/* Create copies of the base settings for each pipe */
- if (IS_I965GM(dev) || IS_I945GM(dev))
+ if (IS_CRESTLINE(dev) || IS_I945GM(dev))
planea_params = planeb_params = i945_wm_info;
- else if (IS_I9XX(dev))
+ else if (!IS_GEN2(dev))
planea_params = planeb_params = i915_wm_info;
else
planea_params = planeb_params = i855_wm_info;
@@ -3217,7 +3322,7 @@ static void i9xx_update_wm(struct drm_device *dev, int planea_clock,
/* Use ns/us then divide to preserve precision */
sr_entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) *
- pixel_size * sr_hdisplay;
+ pixel_size * sr_hdisplay;
sr_entries = DIV_ROUND_UP(sr_entries, cacheline_size);
DRM_DEBUG_KMS("self-refresh entries: %d\n", sr_entries);
srwm = total_size - sr_entries;
@@ -3242,7 +3347,7 @@ static void i9xx_update_wm(struct drm_device *dev, int planea_clock,
}
DRM_DEBUG_KMS("Setting FIFO watermarks - A: %d, B: %d, C: %d, SR %d\n",
- planea_wm, planeb_wm, cwm, srwm);
+ planea_wm, planeb_wm, cwm, srwm);
fwater_lo = ((planeb_wm & 0x3f) << 16) | (planea_wm & 0x3f);
fwater_hi = (cwm & 0x1f);
@@ -3276,146 +3381,130 @@ static void i830_update_wm(struct drm_device *dev, int planea_clock, int unused,
#define ILK_LP0_PLANE_LATENCY 700
#define ILK_LP0_CURSOR_LATENCY 1300
-static void ironlake_update_wm(struct drm_device *dev, int planea_clock,
- int planeb_clock, int sr_hdisplay, int sr_htotal,
- int pixel_size)
+static bool ironlake_compute_wm0(struct drm_device *dev,
+ int pipe,
+ int *plane_wm,
+ int *cursor_wm)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
- int planea_wm, planeb_wm, cursora_wm, cursorb_wm;
- int sr_wm, cursor_wm;
- unsigned long line_time_us;
- int sr_clock, entries_required;
- u32 reg_value;
- int line_count;
- int planea_htotal = 0, planeb_htotal = 0;
struct drm_crtc *crtc;
+ int htotal, hdisplay, clock, pixel_size = 0;
+ int line_time_us, line_count, entries;
- /* Need htotal for all active display plane */
- list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- if (intel_crtc->dpms_mode == DRM_MODE_DPMS_ON) {
- if (intel_crtc->plane == 0)
- planea_htotal = crtc->mode.htotal;
- else
- planeb_htotal = crtc->mode.htotal;
- }
- }
-
- /* Calculate and update the watermark for plane A */
- if (planea_clock) {
- entries_required = ((planea_clock / 1000) * pixel_size *
- ILK_LP0_PLANE_LATENCY) / 1000;
- entries_required = DIV_ROUND_UP(entries_required,
- ironlake_display_wm_info.cacheline_size);
- planea_wm = entries_required +
- ironlake_display_wm_info.guard_size;
-
- if (planea_wm > (int)ironlake_display_wm_info.max_wm)
- planea_wm = ironlake_display_wm_info.max_wm;
-
- /* Use the large buffer method to calculate cursor watermark */
- line_time_us = (planea_htotal * 1000) / planea_clock;
-
- /* Use ns/us then divide to preserve precision */
- line_count = (ILK_LP0_CURSOR_LATENCY / line_time_us + 1000) / 1000;
-
- /* calculate the cursor watermark for cursor A */
- entries_required = line_count * 64 * pixel_size;
- entries_required = DIV_ROUND_UP(entries_required,
- ironlake_cursor_wm_info.cacheline_size);
- cursora_wm = entries_required + ironlake_cursor_wm_info.guard_size;
- if (cursora_wm > ironlake_cursor_wm_info.max_wm)
- cursora_wm = ironlake_cursor_wm_info.max_wm;
-
- reg_value = I915_READ(WM0_PIPEA_ILK);
- reg_value &= ~(WM0_PIPE_PLANE_MASK | WM0_PIPE_CURSOR_MASK);
- reg_value |= (planea_wm << WM0_PIPE_PLANE_SHIFT) |
- (cursora_wm & WM0_PIPE_CURSOR_MASK);
- I915_WRITE(WM0_PIPEA_ILK, reg_value);
- DRM_DEBUG_KMS("FIFO watermarks For pipe A - plane %d, "
- "cursor: %d\n", planea_wm, cursora_wm);
- }
- /* Calculate and update the watermark for plane B */
- if (planeb_clock) {
- entries_required = ((planeb_clock / 1000) * pixel_size *
- ILK_LP0_PLANE_LATENCY) / 1000;
- entries_required = DIV_ROUND_UP(entries_required,
- ironlake_display_wm_info.cacheline_size);
- planeb_wm = entries_required +
- ironlake_display_wm_info.guard_size;
-
- if (planeb_wm > (int)ironlake_display_wm_info.max_wm)
- planeb_wm = ironlake_display_wm_info.max_wm;
+ crtc = intel_get_crtc_for_pipe(dev, pipe);
+ if (crtc->fb == NULL || !crtc->enabled)
+ return false;
- /* Use the large buffer method to calculate cursor watermark */
- line_time_us = (planeb_htotal * 1000) / planeb_clock;
+ htotal = crtc->mode.htotal;
+ hdisplay = crtc->mode.hdisplay;
+ clock = crtc->mode.clock;
+ pixel_size = crtc->fb->bits_per_pixel / 8;
+
+ /* Use the small buffer method to calculate plane watermark */
+ entries = ((clock * pixel_size / 1000) * ILK_LP0_PLANE_LATENCY) / 1000;
+ entries = DIV_ROUND_UP(entries,
+ ironlake_display_wm_info.cacheline_size);
+ *plane_wm = entries + ironlake_display_wm_info.guard_size;
+ if (*plane_wm > (int)ironlake_display_wm_info.max_wm)
+ *plane_wm = ironlake_display_wm_info.max_wm;
+
+ /* Use the large buffer method to calculate cursor watermark */
+ line_time_us = ((htotal * 1000) / clock);
+ line_count = (ILK_LP0_CURSOR_LATENCY / line_time_us + 1000) / 1000;
+ entries = line_count * 64 * pixel_size;
+ entries = DIV_ROUND_UP(entries,
+ ironlake_cursor_wm_info.cacheline_size);
+ *cursor_wm = entries + ironlake_cursor_wm_info.guard_size;
+ if (*cursor_wm > ironlake_cursor_wm_info.max_wm)
+ *cursor_wm = ironlake_cursor_wm_info.max_wm;
- /* Use ns/us then divide to preserve precision */
- line_count = (ILK_LP0_CURSOR_LATENCY / line_time_us + 1000) / 1000;
+ return true;
+}
- /* calculate the cursor watermark for cursor B */
- entries_required = line_count * 64 * pixel_size;
- entries_required = DIV_ROUND_UP(entries_required,
- ironlake_cursor_wm_info.cacheline_size);
- cursorb_wm = entries_required + ironlake_cursor_wm_info.guard_size;
- if (cursorb_wm > ironlake_cursor_wm_info.max_wm)
- cursorb_wm = ironlake_cursor_wm_info.max_wm;
+static void ironlake_update_wm(struct drm_device *dev,
+ int planea_clock, int planeb_clock,
+ int sr_hdisplay, int sr_htotal,
+ int pixel_size)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int plane_wm, cursor_wm, enabled;
+ int tmp;
+
+ enabled = 0;
+ if (ironlake_compute_wm0(dev, 0, &plane_wm, &cursor_wm)) {
+ I915_WRITE(WM0_PIPEA_ILK,
+ (plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm);
+ DRM_DEBUG_KMS("FIFO watermarks For pipe A -"
+ " plane %d, " "cursor: %d\n",
+ plane_wm, cursor_wm);
+ enabled++;
+ }
- reg_value = I915_READ(WM0_PIPEB_ILK);
- reg_value &= ~(WM0_PIPE_PLANE_MASK | WM0_PIPE_CURSOR_MASK);
- reg_value |= (planeb_wm << WM0_PIPE_PLANE_SHIFT) |
- (cursorb_wm & WM0_PIPE_CURSOR_MASK);
- I915_WRITE(WM0_PIPEB_ILK, reg_value);
- DRM_DEBUG_KMS("FIFO watermarks For pipe B - plane %d, "
- "cursor: %d\n", planeb_wm, cursorb_wm);
+ if (ironlake_compute_wm0(dev, 1, &plane_wm, &cursor_wm)) {
+ I915_WRITE(WM0_PIPEB_ILK,
+ (plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm);
+ DRM_DEBUG_KMS("FIFO watermarks For pipe B -"
+ " plane %d, cursor: %d\n",
+ plane_wm, cursor_wm);
+ enabled++;
}
/*
* Calculate and update the self-refresh watermark only when one
* display plane is used.
*/
- if (!planea_clock || !planeb_clock) {
-
+ tmp = 0;
+ if (enabled == 1 && /* XXX disabled due to buggy implmentation? */ 0) {
+ unsigned long line_time_us;
+ int small, large, plane_fbc;
+ int sr_clock, entries;
+ int line_count, line_size;
/* Read the self-refresh latency. The unit is 0.5us */
int ilk_sr_latency = I915_READ(MLTR_ILK) & ILK_SRLT_MASK;
sr_clock = planea_clock ? planea_clock : planeb_clock;
- line_time_us = ((sr_htotal * 1000) / sr_clock);
+ line_time_us = (sr_htotal * 1000) / sr_clock;
/* Use ns/us then divide to preserve precision */
line_count = ((ilk_sr_latency * 500) / line_time_us + 1000)
- / 1000;
+ / 1000;
+ line_size = sr_hdisplay * pixel_size;
- /* calculate the self-refresh watermark for display plane */
- entries_required = line_count * sr_hdisplay * pixel_size;
- entries_required = DIV_ROUND_UP(entries_required,
- ironlake_display_srwm_info.cacheline_size);
- sr_wm = entries_required +
- ironlake_display_srwm_info.guard_size;
+ /* Use the minimum of the small and large buffer method for primary */
+ small = ((sr_clock * pixel_size / 1000) * (ilk_sr_latency * 500)) / 1000;
+ large = line_count * line_size;
- /* calculate the self-refresh watermark for display cursor */
- entries_required = line_count * pixel_size * 64;
- entries_required = DIV_ROUND_UP(entries_required,
- ironlake_cursor_srwm_info.cacheline_size);
- cursor_wm = entries_required +
- ironlake_cursor_srwm_info.guard_size;
+ entries = DIV_ROUND_UP(min(small, large),
+ ironlake_display_srwm_info.cacheline_size);
- /* configure watermark and enable self-refresh */
- reg_value = I915_READ(WM1_LP_ILK);
- reg_value &= ~(WM1_LP_LATENCY_MASK | WM1_LP_SR_MASK |
- WM1_LP_CURSOR_MASK);
- reg_value |= (ilk_sr_latency << WM1_LP_LATENCY_SHIFT) |
- (sr_wm << WM1_LP_SR_SHIFT) | cursor_wm;
+ plane_fbc = entries * 64;
+ plane_fbc = DIV_ROUND_UP(plane_fbc, line_size);
- I915_WRITE(WM1_LP_ILK, reg_value);
- DRM_DEBUG_KMS("self-refresh watermark: display plane %d "
- "cursor %d\n", sr_wm, cursor_wm);
+ plane_wm = entries + ironlake_display_srwm_info.guard_size;
+ if (plane_wm > (int)ironlake_display_srwm_info.max_wm)
+ plane_wm = ironlake_display_srwm_info.max_wm;
- } else {
- /* Turn off self refresh if both pipes are enabled */
- I915_WRITE(WM1_LP_ILK, I915_READ(WM1_LP_ILK) & ~WM1_LP_SR_EN);
- }
+ /* calculate the self-refresh watermark for display cursor */
+ entries = line_count * pixel_size * 64;
+ entries = DIV_ROUND_UP(entries,
+ ironlake_cursor_srwm_info.cacheline_size);
+
+ cursor_wm = entries + ironlake_cursor_srwm_info.guard_size;
+ if (cursor_wm > (int)ironlake_cursor_srwm_info.max_wm)
+ cursor_wm = ironlake_cursor_srwm_info.max_wm;
+
+ /* configure watermark and enable self-refresh */
+ tmp = (WM1_LP_SR_EN |
+ (ilk_sr_latency << WM1_LP_LATENCY_SHIFT) |
+ (plane_fbc << WM1_LP_FBC_SHIFT) |
+ (plane_wm << WM1_LP_SR_SHIFT) |
+ cursor_wm);
+ DRM_DEBUG_KMS("self-refresh watermark: display plane %d, fbc lines %d,"
+ " cursor %d\n", plane_wm, plane_fbc, cursor_wm);
+ }
+ I915_WRITE(WM1_LP_ILK, tmp);
+ /* XXX setup WM2 and WM3 */
}
+
/**
* intel_update_watermarks - update FIFO watermark values based on current modes
*
@@ -3447,7 +3536,7 @@ static void ironlake_update_wm(struct drm_device *dev, int planea_clock,
*
* We don't use the sprite, so we can ignore that. And on Crestline we have
* to set the non-SR watermarks to 8.
- */
+ */
static void intel_update_watermarks(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -3463,15 +3552,15 @@ static void intel_update_watermarks(struct drm_device *dev)
/* Get the clock config from both planes */
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- if (intel_crtc->dpms_mode == DRM_MODE_DPMS_ON) {
+ if (intel_crtc->active) {
enabled++;
if (intel_crtc->plane == 0) {
DRM_DEBUG_KMS("plane A (pipe %d) clock: %d\n",
- intel_crtc->pipe, crtc->mode.clock);
+ intel_crtc->pipe, crtc->mode.clock);
planea_clock = crtc->mode.clock;
} else {
DRM_DEBUG_KMS("plane B (pipe %d) clock: %d\n",
- intel_crtc->pipe, crtc->mode.clock);
+ intel_crtc->pipe, crtc->mode.clock);
planeb_clock = crtc->mode.clock;
}
sr_hdisplay = crtc->mode.hdisplay;
@@ -3502,62 +3591,35 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int pipe = intel_crtc->pipe;
int plane = intel_crtc->plane;
- int fp_reg = (pipe == 0) ? FPA0 : FPB0;
- int dpll_reg = (pipe == 0) ? DPLL_A : DPLL_B;
- int dpll_md_reg = (intel_crtc->pipe == 0) ? DPLL_A_MD : DPLL_B_MD;
- int dspcntr_reg = (plane == 0) ? DSPACNTR : DSPBCNTR;
- int pipeconf_reg = (pipe == 0) ? PIPEACONF : PIPEBCONF;
- int htot_reg = (pipe == 0) ? HTOTAL_A : HTOTAL_B;
- int hblank_reg = (pipe == 0) ? HBLANK_A : HBLANK_B;
- int hsync_reg = (pipe == 0) ? HSYNC_A : HSYNC_B;
- int vtot_reg = (pipe == 0) ? VTOTAL_A : VTOTAL_B;
- int vblank_reg = (pipe == 0) ? VBLANK_A : VBLANK_B;
- int vsync_reg = (pipe == 0) ? VSYNC_A : VSYNC_B;
- int dspsize_reg = (plane == 0) ? DSPASIZE : DSPBSIZE;
- int dsppos_reg = (plane == 0) ? DSPAPOS : DSPBPOS;
- int pipesrc_reg = (pipe == 0) ? PIPEASRC : PIPEBSRC;
+ u32 fp_reg, dpll_reg;
int refclk, num_connectors = 0;
intel_clock_t clock, reduced_clock;
- u32 dpll = 0, fp = 0, fp2 = 0, dspcntr, pipeconf;
+ u32 dpll, fp = 0, fp2 = 0, dspcntr, pipeconf;
bool ok, has_reduced_clock = false, is_sdvo = false, is_dvo = false;
bool is_crt = false, is_lvds = false, is_tv = false, is_dp = false;
struct intel_encoder *has_edp_encoder = NULL;
struct drm_mode_config *mode_config = &dev->mode_config;
- struct drm_encoder *encoder;
+ struct intel_encoder *encoder;
const intel_limit_t *limit;
int ret;
struct fdi_m_n m_n = {0};
- int data_m1_reg = (pipe == 0) ? PIPEA_DATA_M1 : PIPEB_DATA_M1;
- int data_n1_reg = (pipe == 0) ? PIPEA_DATA_N1 : PIPEB_DATA_N1;
- int link_m1_reg = (pipe == 0) ? PIPEA_LINK_M1 : PIPEB_LINK_M1;
- int link_n1_reg = (pipe == 0) ? PIPEA_LINK_N1 : PIPEB_LINK_N1;
- int pch_fp_reg = (pipe == 0) ? PCH_FPA0 : PCH_FPB0;
- int pch_dpll_reg = (pipe == 0) ? PCH_DPLL_A : PCH_DPLL_B;
- int fdi_rx_reg = (pipe == 0) ? FDI_RXA_CTL : FDI_RXB_CTL;
- int fdi_tx_reg = (pipe == 0) ? FDI_TXA_CTL : FDI_TXB_CTL;
- int trans_dpll_sel = (pipe == 0) ? 0 : 1;
- int lvds_reg = LVDS;
- u32 temp;
- int sdvo_pixel_multiply;
+ u32 reg, temp;
int target_clock;
drm_vblank_pre_modeset(dev, pipe);
- list_for_each_entry(encoder, &mode_config->encoder_list, head) {
- struct intel_encoder *intel_encoder;
-
- if (encoder->crtc != crtc)
+ list_for_each_entry(encoder, &mode_config->encoder_list, base.head) {
+ if (encoder->base.crtc != crtc)
continue;
- intel_encoder = enc_to_intel_encoder(encoder);
- switch (intel_encoder->type) {
+ switch (encoder->type) {
case INTEL_OUTPUT_LVDS:
is_lvds = true;
break;
case INTEL_OUTPUT_SDVO:
case INTEL_OUTPUT_HDMI:
is_sdvo = true;
- if (intel_encoder->needs_tv_clock)
+ if (encoder->needs_tv_clock)
is_tv = true;
break;
case INTEL_OUTPUT_DVO:
@@ -3573,7 +3635,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
is_dp = true;
break;
case INTEL_OUTPUT_EDP:
- has_edp_encoder = intel_encoder;
+ has_edp_encoder = encoder;
break;
}
@@ -3583,15 +3645,15 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
if (is_lvds && dev_priv->lvds_use_ssc && num_connectors < 2) {
refclk = dev_priv->lvds_ssc_freq * 1000;
DRM_DEBUG_KMS("using SSC reference clock of %d MHz\n",
- refclk / 1000);
- } else if (IS_I9XX(dev)) {
+ refclk / 1000);
+ } else if (!IS_GEN2(dev)) {
refclk = 96000;
- if (HAS_PCH_SPLIT(dev))
+ if (HAS_PCH_SPLIT(dev) &&
+ (!has_edp_encoder || intel_encoder_is_pch_edp(&has_edp_encoder->base)))
refclk = 120000; /* 120Mhz refclk */
} else {
refclk = 48000;
}
-
/*
* Returns a set of divisors for the desired target clock with the given
@@ -3607,13 +3669,13 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
}
/* Ensure that the cursor is valid for the new mode before changing... */
- intel_crtc_update_cursor(crtc);
+ intel_crtc_update_cursor(crtc, true);
if (is_lvds && dev_priv->lvds_downclock_avail) {
has_reduced_clock = limit->find_pll(limit, crtc,
- dev_priv->lvds_downclock,
- refclk,
- &reduced_clock);
+ dev_priv->lvds_downclock,
+ refclk,
+ &reduced_clock);
if (has_reduced_clock && (clock.p != reduced_clock.p)) {
/*
* If the different P is found, it means that we can't
@@ -3622,7 +3684,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
* feature.
*/
DRM_DEBUG_KMS("Different P is found for "
- "LVDS clock/downclock\n");
+ "LVDS clock/downclock\n");
has_reduced_clock = 0;
}
}
@@ -3630,14 +3692,14 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
this mirrors vbios setting. */
if (is_sdvo && is_tv) {
if (adjusted_mode->clock >= 100000
- && adjusted_mode->clock < 140500) {
+ && adjusted_mode->clock < 140500) {
clock.p1 = 2;
clock.p2 = 10;
clock.n = 3;
clock.m1 = 16;
clock.m2 = 8;
} else if (adjusted_mode->clock >= 140500
- && adjusted_mode->clock <= 200000) {
+ && adjusted_mode->clock <= 200000) {
clock.p1 = 1;
clock.p2 = 10;
clock.n = 6;
@@ -3648,35 +3710,43 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
/* FDI link */
if (HAS_PCH_SPLIT(dev)) {
+ int pixel_multiplier = intel_mode_get_pixel_multiplier(adjusted_mode);
int lane = 0, link_bw, bpp;
- /* eDP doesn't require FDI link, so just set DP M/N
+ /* CPU eDP doesn't require FDI link, so just set DP M/N
according to current link config */
- if (has_edp_encoder) {
+ if (has_edp_encoder && !intel_encoder_is_pch_edp(&encoder->base)) {
target_clock = mode->clock;
intel_edp_link_config(has_edp_encoder,
&lane, &link_bw);
} else {
- /* DP over FDI requires target mode clock
+ /* [e]DP over FDI requires target mode clock
instead of link clock */
- if (is_dp)
+ if (is_dp || intel_encoder_is_pch_edp(&has_edp_encoder->base))
target_clock = mode->clock;
else
target_clock = adjusted_mode->clock;
- link_bw = 270000;
+
+ /* FDI is a binary signal running at ~2.7GHz, encoding
+ * each output octet as 10 bits. The actual frequency
+ * is stored as a divider into a 100MHz clock, and the
+ * mode pixel clock is stored in units of 1KHz.
+ * Hence the bw of each lane in terms of the mode signal
+ * is:
+ */
+ link_bw = intel_fdi_link_freq(dev) * MHz(100)/KHz(1)/10;
}
/* determine panel color depth */
- temp = I915_READ(pipeconf_reg);
+ temp = I915_READ(PIPECONF(pipe));
temp &= ~PIPE_BPC_MASK;
if (is_lvds) {
- int lvds_reg = I915_READ(PCH_LVDS);
/* the BPC will be 6 if it is 18-bit LVDS panel */
- if ((lvds_reg & LVDS_A3_POWER_MASK) == LVDS_A3_POWER_UP)
+ if ((I915_READ(PCH_LVDS) & LVDS_A3_POWER_MASK) == LVDS_A3_POWER_UP)
temp |= PIPE_8BPC;
else
temp |= PIPE_6BPC;
- } else if (has_edp_encoder || (is_dp && intel_pch_has_edp(crtc))) {
- switch (dev_priv->edp_bpp/3) {
+ } else if (has_edp_encoder) {
+ switch (dev_priv->edp.bpp/3) {
case 8:
temp |= PIPE_8BPC;
break;
@@ -3692,8 +3762,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
}
} else
temp |= PIPE_8BPC;
- I915_WRITE(pipeconf_reg, temp);
- I915_READ(pipeconf_reg);
+ I915_WRITE(PIPECONF(pipe), temp);
switch (temp & PIPE_BPC_MASK) {
case PIPE_8BPC:
@@ -3725,6 +3794,8 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
intel_crtc->fdi_lanes = lane;
+ if (pixel_multiplier > 1)
+ link_bw *= pixel_multiplier;
ironlake_compute_m_n(bpp, lane, target_clock, link_bw, &m_n);
}
@@ -3738,33 +3809,39 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
/* Always enable nonspread source */
temp &= ~DREF_NONSPREAD_SOURCE_MASK;
temp |= DREF_NONSPREAD_SOURCE_ENABLE;
- I915_WRITE(PCH_DREF_CONTROL, temp);
- POSTING_READ(PCH_DREF_CONTROL);
-
temp &= ~DREF_SSC_SOURCE_MASK;
temp |= DREF_SSC_SOURCE_ENABLE;
I915_WRITE(PCH_DREF_CONTROL, temp);
- POSTING_READ(PCH_DREF_CONTROL);
+ POSTING_READ(PCH_DREF_CONTROL);
udelay(200);
if (has_edp_encoder) {
if (dev_priv->lvds_use_ssc) {
temp |= DREF_SSC1_ENABLE;
I915_WRITE(PCH_DREF_CONTROL, temp);
- POSTING_READ(PCH_DREF_CONTROL);
-
- udelay(200);
- temp &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
- temp |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD;
- I915_WRITE(PCH_DREF_CONTROL, temp);
POSTING_READ(PCH_DREF_CONTROL);
+ udelay(200);
+ }
+ temp &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
+
+ /* Enable CPU source on CPU attached eDP */
+ if (!intel_encoder_is_pch_edp(&has_edp_encoder->base)) {
+ if (dev_priv->lvds_use_ssc)
+ temp |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD;
+ else
+ temp |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD;
} else {
- temp |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD;
- I915_WRITE(PCH_DREF_CONTROL, temp);
- POSTING_READ(PCH_DREF_CONTROL);
+ /* Enable SSC on PCH eDP if needed */
+ if (dev_priv->lvds_use_ssc) {
+ DRM_ERROR("enabling SSC on PCH\n");
+ temp |= DREF_SUPERSPREAD_SOURCE_ENABLE;
+ }
}
+ I915_WRITE(PCH_DREF_CONTROL, temp);
+ POSTING_READ(PCH_DREF_CONTROL);
+ udelay(200);
}
}
@@ -3780,23 +3857,26 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
reduced_clock.m2;
}
+ dpll = 0;
if (!HAS_PCH_SPLIT(dev))
dpll = DPLL_VGA_MODE_DIS;
- if (IS_I9XX(dev)) {
+ if (!IS_GEN2(dev)) {
if (is_lvds)
dpll |= DPLLB_MODE_LVDS;
else
dpll |= DPLLB_MODE_DAC_SERIAL;
if (is_sdvo) {
+ int pixel_multiplier = intel_mode_get_pixel_multiplier(adjusted_mode);
+ if (pixel_multiplier > 1) {
+ if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
+ dpll |= (pixel_multiplier - 1) << SDVO_MULTIPLIER_SHIFT_HIRES;
+ else if (HAS_PCH_SPLIT(dev))
+ dpll |= (pixel_multiplier - 1) << PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT;
+ }
dpll |= DPLL_DVO_HIGH_SPEED;
- sdvo_pixel_multiply = adjusted_mode->clock / mode->clock;
- if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
- dpll |= (sdvo_pixel_multiply - 1) << SDVO_MULTIPLIER_SHIFT_HIRES;
- else if (HAS_PCH_SPLIT(dev))
- dpll |= (sdvo_pixel_multiply - 1) << PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT;
}
- if (is_dp)
+ if (is_dp || intel_encoder_is_pch_edp(&has_edp_encoder->base))
dpll |= DPLL_DVO_HIGH_SPEED;
/* compute bitmask from p1 value */
@@ -3824,7 +3904,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14;
break;
}
- if (IS_I965G(dev) && !HAS_PCH_SPLIT(dev))
+ if (INTEL_INFO(dev)->gen >= 4 && !HAS_PCH_SPLIT(dev))
dpll |= (6 << PLL_LOAD_PULSE_PHASE_SHIFT);
} else {
if (is_lvds) {
@@ -3851,7 +3931,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
dpll |= PLL_REF_INPUT_DREFCLK;
/* setup pipeconf */
- pipeconf = I915_READ(pipeconf_reg);
+ pipeconf = I915_READ(PIPECONF(pipe));
/* Set up the display plane register */
dspcntr = DISPPLANE_GAMMA_ENABLE;
@@ -3865,7 +3945,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
dspcntr |= DISPPLANE_SEL_PIPE_B;
}
- if (pipe == 0 && !IS_I965G(dev)) {
+ if (pipe == 0 && INTEL_INFO(dev)->gen < 4) {
/* Enable pixel doubling when the dot clock is > 90% of the (display)
* core speed.
*
@@ -3874,51 +3954,47 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
*/
if (mode->clock >
dev_priv->display.get_display_clock_speed(dev) * 9 / 10)
- pipeconf |= PIPEACONF_DOUBLE_WIDE;
+ pipeconf |= PIPECONF_DOUBLE_WIDE;
else
- pipeconf &= ~PIPEACONF_DOUBLE_WIDE;
+ pipeconf &= ~PIPECONF_DOUBLE_WIDE;
}
dspcntr |= DISPLAY_PLANE_ENABLE;
- pipeconf |= PIPEACONF_ENABLE;
+ pipeconf |= PIPECONF_ENABLE;
dpll |= DPLL_VCO_ENABLE;
-
- /* Disable the panel fitter if it was on our pipe */
- if (!HAS_PCH_SPLIT(dev) && intel_panel_fitter_pipe(dev) == pipe)
- I915_WRITE(PFIT_CONTROL, 0);
-
DRM_DEBUG_KMS("Mode for pipe %c:\n", pipe == 0 ? 'A' : 'B');
drm_mode_debug_printmodeline(mode);
/* assign to Ironlake registers */
if (HAS_PCH_SPLIT(dev)) {
- fp_reg = pch_fp_reg;
- dpll_reg = pch_dpll_reg;
+ fp_reg = PCH_FP0(pipe);
+ dpll_reg = PCH_DPLL(pipe);
+ } else {
+ fp_reg = FP0(pipe);
+ dpll_reg = DPLL(pipe);
}
- if (!has_edp_encoder) {
+ /* PCH eDP needs FDI, but CPU eDP does not */
+ if (!has_edp_encoder || intel_encoder_is_pch_edp(&has_edp_encoder->base)) {
I915_WRITE(fp_reg, fp);
I915_WRITE(dpll_reg, dpll & ~DPLL_VCO_ENABLE);
- I915_READ(dpll_reg);
+
+ POSTING_READ(dpll_reg);
udelay(150);
}
/* enable transcoder DPLL */
if (HAS_PCH_CPT(dev)) {
temp = I915_READ(PCH_DPLL_SEL);
- if (trans_dpll_sel == 0)
- temp |= (TRANSA_DPLL_ENABLE | TRANSA_DPLLA_SEL);
+ if (pipe == 0)
+ temp |= TRANSA_DPLL_ENABLE | TRANSA_DPLLA_SEL;
else
- temp |= (TRANSB_DPLL_ENABLE | TRANSB_DPLLB_SEL);
+ temp |= TRANSB_DPLL_ENABLE | TRANSB_DPLLB_SEL;
I915_WRITE(PCH_DPLL_SEL, temp);
- I915_READ(PCH_DPLL_SEL);
- udelay(150);
- }
- if (HAS_PCH_SPLIT(dev)) {
- pipeconf &= ~PIPE_ENABLE_DITHER;
- pipeconf &= ~PIPE_DITHER_TYPE_MASK;
+ POSTING_READ(PCH_DPLL_SEL);
+ udelay(150);
}
/* The LVDS pin pair needs to be on before the DPLLs are enabled.
@@ -3926,58 +4002,60 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
* things on.
*/
if (is_lvds) {
- u32 lvds;
-
+ reg = LVDS;
if (HAS_PCH_SPLIT(dev))
- lvds_reg = PCH_LVDS;
+ reg = PCH_LVDS;
- lvds = I915_READ(lvds_reg);
- lvds |= LVDS_PORT_EN | LVDS_A0A2_CLKA_POWER_UP;
+ temp = I915_READ(reg);
+ temp |= LVDS_PORT_EN | LVDS_A0A2_CLKA_POWER_UP;
if (pipe == 1) {
if (HAS_PCH_CPT(dev))
- lvds |= PORT_TRANS_B_SEL_CPT;
+ temp |= PORT_TRANS_B_SEL_CPT;
else
- lvds |= LVDS_PIPEB_SELECT;
+ temp |= LVDS_PIPEB_SELECT;
} else {
if (HAS_PCH_CPT(dev))
- lvds &= ~PORT_TRANS_SEL_MASK;
+ temp &= ~PORT_TRANS_SEL_MASK;
else
- lvds &= ~LVDS_PIPEB_SELECT;
+ temp &= ~LVDS_PIPEB_SELECT;
}
/* set the corresponsding LVDS_BORDER bit */
- lvds |= dev_priv->lvds_border_bits;
+ temp |= dev_priv->lvds_border_bits;
/* Set the B0-B3 data pairs corresponding to whether we're going to
* set the DPLLs for dual-channel mode or not.
*/
if (clock.p2 == 7)
- lvds |= LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP;
+ temp |= LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP;
else
- lvds &= ~(LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP);
+ temp &= ~(LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP);
/* It would be nice to set 24 vs 18-bit mode (LVDS_A3_POWER_UP)
* appropriately here, but we need to look more thoroughly into how
* panels behave in the two modes.
*/
- /* set the dithering flag */
- if (IS_I965G(dev)) {
- if (dev_priv->lvds_dither) {
- if (HAS_PCH_SPLIT(dev)) {
- pipeconf |= PIPE_ENABLE_DITHER;
- pipeconf |= PIPE_DITHER_TYPE_ST01;
- } else
- lvds |= LVDS_ENABLE_DITHER;
- } else {
- if (!HAS_PCH_SPLIT(dev)) {
- lvds &= ~LVDS_ENABLE_DITHER;
- }
- }
+ /* set the dithering flag on non-PCH LVDS as needed */
+ if (INTEL_INFO(dev)->gen >= 4 && !HAS_PCH_SPLIT(dev)) {
+ if (dev_priv->lvds_dither)
+ temp |= LVDS_ENABLE_DITHER;
+ else
+ temp &= ~LVDS_ENABLE_DITHER;
+ }
+ I915_WRITE(reg, temp);
+ }
+
+ /* set the dithering flag and clear for anything other than a panel. */
+ if (HAS_PCH_SPLIT(dev)) {
+ pipeconf &= ~PIPECONF_DITHER_EN;
+ pipeconf &= ~PIPECONF_DITHER_TYPE_MASK;
+ if (dev_priv->lvds_dither && (is_lvds || has_edp_encoder)) {
+ pipeconf |= PIPECONF_DITHER_EN;
+ pipeconf |= PIPECONF_DITHER_TYPE_ST1;
}
- I915_WRITE(lvds_reg, lvds);
- I915_READ(lvds_reg);
}
- if (is_dp)
+
+ if (is_dp || intel_encoder_is_pch_edp(&has_edp_encoder->base)) {
intel_dp_set_m_n(crtc, mode, adjusted_mode);
- else if (HAS_PCH_SPLIT(dev)) {
+ } else if (HAS_PCH_SPLIT(dev)) {
/* For non-DP output, clear any trans DP clock recovery setting.*/
if (pipe == 0) {
I915_WRITE(TRANSA_DATA_M1, 0);
@@ -3992,29 +4070,35 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
}
}
- if (!has_edp_encoder) {
+ if (!has_edp_encoder || intel_encoder_is_pch_edp(&has_edp_encoder->base)) {
I915_WRITE(fp_reg, fp);
I915_WRITE(dpll_reg, dpll);
- I915_READ(dpll_reg);
+
/* Wait for the clocks to stabilize. */
+ POSTING_READ(dpll_reg);
udelay(150);
- if (IS_I965G(dev) && !HAS_PCH_SPLIT(dev)) {
+ if (INTEL_INFO(dev)->gen >= 4 && !HAS_PCH_SPLIT(dev)) {
+ temp = 0;
if (is_sdvo) {
- sdvo_pixel_multiply = adjusted_mode->clock / mode->clock;
- I915_WRITE(dpll_md_reg, (0 << DPLL_MD_UDI_DIVIDER_SHIFT) |
- ((sdvo_pixel_multiply - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT));
- } else
- I915_WRITE(dpll_md_reg, 0);
+ temp = intel_mode_get_pixel_multiplier(adjusted_mode);
+ if (temp > 1)
+ temp = (temp - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT;
+ else
+ temp = 0;
+ }
+ I915_WRITE(DPLL_MD(pipe), temp);
} else {
/* write it again -- the BIOS does, after all */
I915_WRITE(dpll_reg, dpll);
}
- I915_READ(dpll_reg);
+
/* Wait for the clocks to stabilize. */
+ POSTING_READ(dpll_reg);
udelay(150);
}
+ intel_crtc->lowfreq_avail = false;
if (is_lvds && has_reduced_clock && i915_powersave) {
I915_WRITE(fp_reg + 4, fp2);
intel_crtc->lowfreq_avail = true;
@@ -4024,7 +4108,6 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
}
} else {
I915_WRITE(fp_reg + 4, fp);
- intel_crtc->lowfreq_avail = false;
if (HAS_PIPE_CXSR(dev)) {
DRM_DEBUG_KMS("disabling CxSR downclocking\n");
pipeconf &= ~PIPECONF_CXSR_DOWNCLOCK;
@@ -4043,70 +4126,62 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
} else
pipeconf &= ~PIPECONF_INTERLACE_W_FIELD_INDICATION; /* progressive */
- I915_WRITE(htot_reg, (adjusted_mode->crtc_hdisplay - 1) |
+ I915_WRITE(HTOTAL(pipe),
+ (adjusted_mode->crtc_hdisplay - 1) |
((adjusted_mode->crtc_htotal - 1) << 16));
- I915_WRITE(hblank_reg, (adjusted_mode->crtc_hblank_start - 1) |
+ I915_WRITE(HBLANK(pipe),
+ (adjusted_mode->crtc_hblank_start - 1) |
((adjusted_mode->crtc_hblank_end - 1) << 16));
- I915_WRITE(hsync_reg, (adjusted_mode->crtc_hsync_start - 1) |
+ I915_WRITE(HSYNC(pipe),
+ (adjusted_mode->crtc_hsync_start - 1) |
((adjusted_mode->crtc_hsync_end - 1) << 16));
- I915_WRITE(vtot_reg, (adjusted_mode->crtc_vdisplay - 1) |
+
+ I915_WRITE(VTOTAL(pipe),
+ (adjusted_mode->crtc_vdisplay - 1) |
((adjusted_mode->crtc_vtotal - 1) << 16));
- I915_WRITE(vblank_reg, (adjusted_mode->crtc_vblank_start - 1) |
+ I915_WRITE(VBLANK(pipe),
+ (adjusted_mode->crtc_vblank_start - 1) |
((adjusted_mode->crtc_vblank_end - 1) << 16));
- I915_WRITE(vsync_reg, (adjusted_mode->crtc_vsync_start - 1) |
+ I915_WRITE(VSYNC(pipe),
+ (adjusted_mode->crtc_vsync_start - 1) |
((adjusted_mode->crtc_vsync_end - 1) << 16));
- /* pipesrc and dspsize control the size that is scaled from, which should
- * always be the user's requested size.
+
+ /* pipesrc and dspsize control the size that is scaled from,
+ * which should always be the user's requested size.
*/
if (!HAS_PCH_SPLIT(dev)) {
- I915_WRITE(dspsize_reg, ((mode->vdisplay - 1) << 16) |
- (mode->hdisplay - 1));
- I915_WRITE(dsppos_reg, 0);
+ I915_WRITE(DSPSIZE(plane),
+ ((mode->vdisplay - 1) << 16) |
+ (mode->hdisplay - 1));
+ I915_WRITE(DSPPOS(plane), 0);
}
- I915_WRITE(pipesrc_reg, ((mode->hdisplay - 1) << 16) | (mode->vdisplay - 1));
+ I915_WRITE(PIPESRC(pipe),
+ ((mode->hdisplay - 1) << 16) | (mode->vdisplay - 1));
if (HAS_PCH_SPLIT(dev)) {
- I915_WRITE(data_m1_reg, TU_SIZE(m_n.tu) | m_n.gmch_m);
- I915_WRITE(data_n1_reg, TU_SIZE(m_n.tu) | m_n.gmch_n);
- I915_WRITE(link_m1_reg, m_n.link_m);
- I915_WRITE(link_n1_reg, m_n.link_n);
+ I915_WRITE(PIPE_DATA_M1(pipe), TU_SIZE(m_n.tu) | m_n.gmch_m);
+ I915_WRITE(PIPE_DATA_N1(pipe), m_n.gmch_n);
+ I915_WRITE(PIPE_LINK_M1(pipe), m_n.link_m);
+ I915_WRITE(PIPE_LINK_N1(pipe), m_n.link_n);
- if (has_edp_encoder) {
+ if (has_edp_encoder && !intel_encoder_is_pch_edp(&has_edp_encoder->base)) {
ironlake_set_pll_edp(crtc, adjusted_mode->clock);
- } else {
- /* enable FDI RX PLL too */
- temp = I915_READ(fdi_rx_reg);
- I915_WRITE(fdi_rx_reg, temp | FDI_RX_PLL_ENABLE);
- I915_READ(fdi_rx_reg);
- udelay(200);
-
- /* enable FDI TX PLL too */
- temp = I915_READ(fdi_tx_reg);
- I915_WRITE(fdi_tx_reg, temp | FDI_TX_PLL_ENABLE);
- I915_READ(fdi_tx_reg);
-
- /* enable FDI RX PCDCLK */
- temp = I915_READ(fdi_rx_reg);
- I915_WRITE(fdi_rx_reg, temp | FDI_SEL_PCDCLK);
- I915_READ(fdi_rx_reg);
- udelay(200);
}
}
- I915_WRITE(pipeconf_reg, pipeconf);
- I915_READ(pipeconf_reg);
+ I915_WRITE(PIPECONF(pipe), pipeconf);
+ POSTING_READ(PIPECONF(pipe));
intel_wait_for_vblank(dev, pipe);
- if (IS_IRONLAKE(dev)) {
+ if (IS_GEN5(dev)) {
/* enable address swizzle for tiling buffer */
temp = I915_READ(DISP_ARB_CTL);
I915_WRITE(DISP_ARB_CTL, temp | DISP_TILE_SURFACE_SWIZZLING);
}
- I915_WRITE(dspcntr_reg, dspcntr);
+ I915_WRITE(DSPCNTR(plane), dspcntr);
- /* Flush the plane changes */
ret = intel_pipe_set_base(crtc, x, y, old_fb);
intel_update_watermarks(dev);
@@ -4199,7 +4274,8 @@ static void i9xx_update_cursor(struct drm_crtc *crtc, u32 base)
}
/* If no-part of the cursor is visible on the framebuffer, then the GPU may hang... */
-static void intel_crtc_update_cursor(struct drm_crtc *crtc)
+static void intel_crtc_update_cursor(struct drm_crtc *crtc,
+ bool on)
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -4212,7 +4288,7 @@ static void intel_crtc_update_cursor(struct drm_crtc *crtc)
pos = 0;
- if (intel_crtc->cursor_on && crtc->fb) {
+ if (on && crtc->enabled && crtc->fb) {
base = intel_crtc->cursor_addr;
if (x > (int) crtc->fb->width)
base = 0;
@@ -4324,7 +4400,7 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc,
addr = obj_priv->phys_obj->handle->busaddr;
}
- if (!IS_I9XX(dev))
+ if (IS_GEN2(dev))
I915_WRITE(CURSIZE, (height << 12) | width);
finish:
@@ -4344,7 +4420,7 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc,
intel_crtc->cursor_width = width;
intel_crtc->cursor_height = height;
- intel_crtc_update_cursor(crtc);
+ intel_crtc_update_cursor(crtc, true);
return 0;
fail_unpin:
@@ -4363,7 +4439,7 @@ static int intel_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
intel_crtc->cursor_x = x;
intel_crtc->cursor_y = y;
- intel_crtc_update_cursor(crtc);
+ intel_crtc_update_cursor(crtc, true);
return 0;
}
@@ -4432,7 +4508,7 @@ struct drm_crtc *intel_get_load_detect_pipe(struct intel_encoder *intel_encoder,
struct intel_crtc *intel_crtc;
struct drm_crtc *possible_crtc;
struct drm_crtc *supported_crtc =NULL;
- struct drm_encoder *encoder = &intel_encoder->enc;
+ struct drm_encoder *encoder = &intel_encoder->base;
struct drm_crtc *crtc = NULL;
struct drm_device *dev = encoder->dev;
struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private;
@@ -4513,7 +4589,7 @@ struct drm_crtc *intel_get_load_detect_pipe(struct intel_encoder *intel_encoder,
void intel_release_load_detect_pipe(struct intel_encoder *intel_encoder,
struct drm_connector *connector, int dpms_mode)
{
- struct drm_encoder *encoder = &intel_encoder->enc;
+ struct drm_encoder *encoder = &intel_encoder->base;
struct drm_device *dev = encoder->dev;
struct drm_crtc *crtc = encoder->crtc;
struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private;
@@ -4559,7 +4635,7 @@ static int intel_crtc_clock_get(struct drm_device *dev, struct drm_crtc *crtc)
clock.m2 = (fp & FP_M2_DIV_MASK) >> FP_M2_DIV_SHIFT;
}
- if (IS_I9XX(dev)) {
+ if (!IS_GEN2(dev)) {
if (IS_PINEVIEW(dev))
clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_PINEVIEW) >>
DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW);
@@ -4663,8 +4739,6 @@ static void intel_gpu_idle_timer(unsigned long arg)
struct drm_device *dev = (struct drm_device *)arg;
drm_i915_private_t *dev_priv = dev->dev_private;
- DRM_DEBUG_DRIVER("idle timer fired, downclocking\n");
-
dev_priv->busy = false;
queue_work(dev_priv->wq, &dev_priv->idle_work);
@@ -4678,14 +4752,12 @@ static void intel_crtc_idle_timer(unsigned long arg)
struct drm_crtc *crtc = &intel_crtc->base;
drm_i915_private_t *dev_priv = crtc->dev->dev_private;
- DRM_DEBUG_DRIVER("idle timer fired, downclocking\n");
-
intel_crtc->busy = false;
queue_work(dev_priv->wq, &dev_priv->idle_work);
}
-static void intel_increase_pllclock(struct drm_crtc *crtc, bool schedule)
+static void intel_increase_pllclock(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
@@ -4720,9 +4792,8 @@ static void intel_increase_pllclock(struct drm_crtc *crtc, bool schedule)
}
/* Schedule downclock */
- if (schedule)
- mod_timer(&intel_crtc->idle_timer, jiffies +
- msecs_to_jiffies(CRTC_IDLE_TIMEOUT));
+ mod_timer(&intel_crtc->idle_timer, jiffies +
+ msecs_to_jiffies(CRTC_IDLE_TIMEOUT));
}
static void intel_decrease_pllclock(struct drm_crtc *crtc)
@@ -4858,7 +4929,7 @@ void intel_mark_busy(struct drm_device *dev, struct drm_gem_object *obj)
I915_WRITE(FW_BLC_SELF, fw_blc_self | FW_BLC_SELF_EN_MASK);
}
/* Non-busy -> busy, upclock */
- intel_increase_pllclock(crtc, true);
+ intel_increase_pllclock(crtc);
intel_crtc->busy = true;
} else {
/* Busy -> busy, put off timer */
@@ -4872,8 +4943,22 @@ void intel_mark_busy(struct drm_device *dev, struct drm_gem_object *obj)
static void intel_crtc_destroy(struct drm_crtc *crtc)
{
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ struct drm_device *dev = crtc->dev;
+ struct intel_unpin_work *work;
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev->event_lock, flags);
+ work = intel_crtc->unpin_work;
+ intel_crtc->unpin_work = NULL;
+ spin_unlock_irqrestore(&dev->event_lock, flags);
+
+ if (work) {
+ cancel_work_sync(&work->work);
+ kfree(work);
+ }
drm_crtc_cleanup(crtc);
+
kfree(intel_crtc);
}
@@ -4928,12 +5013,11 @@ static void do_intel_finish_page_flip(struct drm_device *dev,
spin_unlock_irqrestore(&dev->event_lock, flags);
- obj_priv = to_intel_bo(work->pending_flip_obj);
-
- /* Initial scanout buffer will have a 0 pending flip count */
- if ((atomic_read(&obj_priv->pending_flip) == 0) ||
- atomic_dec_and_test(&obj_priv->pending_flip))
- DRM_WAKEUP(&dev_priv->pending_flip_queue);
+ obj_priv = to_intel_bo(work->old_fb_obj);
+ atomic_clear_mask(1 << intel_crtc->plane,
+ &obj_priv->pending_flip.counter);
+ if (atomic_read(&obj_priv->pending_flip) == 0)
+ wake_up(&dev_priv->pending_flip_queue);
schedule_work(&work->work);
trace_i915_flip_complete(intel_crtc->plane, work->pending_flip_obj);
@@ -5014,7 +5098,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
obj = intel_fb->obj;
mutex_lock(&dev->struct_mutex);
- ret = intel_pin_and_fence_fb_obj(dev, obj);
+ ret = intel_pin_and_fence_fb_obj(dev, obj, true);
if (ret)
goto cleanup_work;
@@ -5023,29 +5107,33 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
drm_gem_object_reference(obj);
crtc->fb = fb;
- ret = i915_gem_object_flush_write_domain(obj);
- if (ret)
- goto cleanup_objs;
ret = drm_vblank_get(dev, intel_crtc->pipe);
if (ret)
goto cleanup_objs;
- obj_priv = to_intel_bo(obj);
- atomic_inc(&obj_priv->pending_flip);
+ /* Block clients from rendering to the new back buffer until
+ * the flip occurs and the object is no longer visible.
+ */
+ atomic_add(1 << intel_crtc->plane,
+ &to_intel_bo(work->old_fb_obj)->pending_flip);
+
work->pending_flip_obj = obj;
+ obj_priv = to_intel_bo(obj);
if (IS_GEN3(dev) || IS_GEN2(dev)) {
u32 flip_mask;
+ /* Can't queue multiple flips, so wait for the previous
+ * one to finish before executing the next.
+ */
+ BEGIN_LP_RING(2);
if (intel_crtc->plane)
flip_mask = MI_WAIT_FOR_PLANE_B_FLIP;
else
flip_mask = MI_WAIT_FOR_PLANE_A_FLIP;
-
- BEGIN_LP_RING(2);
OUT_RING(MI_WAIT_FOR_EVENT | flip_mask);
- OUT_RING(0);
+ OUT_RING(MI_NOOP);
ADVANCE_LP_RING();
}
@@ -5126,15 +5214,14 @@ cleanup_work:
return ret;
}
-static const struct drm_crtc_helper_funcs intel_helper_funcs = {
+static struct drm_crtc_helper_funcs intel_helper_funcs = {
.dpms = intel_crtc_dpms,
.mode_fixup = intel_crtc_mode_fixup,
.mode_set = intel_crtc_mode_set,
.mode_set_base = intel_pipe_set_base,
.mode_set_base_atomic = intel_pipe_set_base_atomic,
- .prepare = intel_crtc_prepare,
- .commit = intel_crtc_commit,
.load_lut = intel_crtc_load_lut,
+ .disable = intel_crtc_disable,
};
static const struct drm_crtc_funcs intel_crtc_funcs = {
@@ -5146,6 +5233,55 @@ static const struct drm_crtc_funcs intel_crtc_funcs = {
.page_flip = intel_crtc_page_flip,
};
+static void intel_sanitize_modesetting(struct drm_device *dev,
+ int pipe, int plane)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 reg, val;
+
+ if (HAS_PCH_SPLIT(dev))
+ return;
+
+ /* Who knows what state these registers were left in by the BIOS or
+ * grub?
+ *
+ * If we leave the registers in a conflicting state (e.g. with the
+ * display plane reading from the other pipe than the one we intend
+ * to use) then when we attempt to teardown the active mode, we will
+ * not disable the pipes and planes in the correct order -- leaving
+ * a plane reading from a disabled pipe and possibly leading to
+ * undefined behaviour.
+ */
+
+ reg = DSPCNTR(plane);
+ val = I915_READ(reg);
+
+ if ((val & DISPLAY_PLANE_ENABLE) == 0)
+ return;
+ if (!!(val & DISPPLANE_SEL_PIPE_MASK) == pipe)
+ return;
+
+ /* This display plane is active and attached to the other CPU pipe. */
+ pipe = !pipe;
+
+ /* Disable the plane and wait for it to stop reading from the pipe. */
+ I915_WRITE(reg, val & ~DISPLAY_PLANE_ENABLE);
+ intel_flush_display_plane(dev, plane);
+
+ if (IS_GEN2(dev))
+ intel_wait_for_vblank(dev, pipe);
+
+ if (pipe == 0 && (dev_priv->quirks & QUIRK_PIPEA_FORCE))
+ return;
+
+ /* Switch off the pipe. */
+ reg = PIPECONF(pipe);
+ val = I915_READ(reg);
+ if (val & PIPECONF_ENABLE) {
+ I915_WRITE(reg, val & ~PIPECONF_ENABLE);
+ intel_wait_for_pipe_off(dev, pipe);
+ }
+}
static void intel_crtc_init(struct drm_device *dev, int pipe)
{
@@ -5160,8 +5296,6 @@ static void intel_crtc_init(struct drm_device *dev, int pipe)
drm_crtc_init(dev, &intel_crtc->base, &intel_crtc_funcs);
drm_mode_crtc_set_gamma_size(&intel_crtc->base, 256);
- intel_crtc->pipe = pipe;
- intel_crtc->plane = pipe;
for (i = 0; i < 256; i++) {
intel_crtc->lut_r[i] = i;
intel_crtc->lut_g[i] = i;
@@ -5171,9 +5305,9 @@ static void intel_crtc_init(struct drm_device *dev, int pipe)
/* Swap pipes & planes for FBC on pre-965 */
intel_crtc->pipe = pipe;
intel_crtc->plane = pipe;
- if (IS_MOBILE(dev) && (IS_I9XX(dev) && !IS_I965G(dev))) {
+ if (IS_MOBILE(dev) && IS_GEN3(dev)) {
DRM_DEBUG_KMS("swapping pipes & planes for FBC\n");
- intel_crtc->plane = ((pipe == 0) ? 1 : 0);
+ intel_crtc->plane = !pipe;
}
BUG_ON(pipe >= ARRAY_SIZE(dev_priv->plane_to_crtc_mapping) ||
@@ -5183,12 +5317,24 @@ static void intel_crtc_init(struct drm_device *dev, int pipe)
intel_crtc->cursor_addr = 0;
intel_crtc->dpms_mode = -1;
+ intel_crtc->active = true; /* force the pipe off on setup_init_config */
+
+ if (HAS_PCH_SPLIT(dev)) {
+ intel_helper_funcs.prepare = ironlake_crtc_prepare;
+ intel_helper_funcs.commit = ironlake_crtc_commit;
+ } else {
+ intel_helper_funcs.prepare = i9xx_crtc_prepare;
+ intel_helper_funcs.commit = i9xx_crtc_commit;
+ }
+
drm_crtc_helper_add(&intel_crtc->base, &intel_helper_funcs);
intel_crtc->busy = false;
setup_timer(&intel_crtc->idle_timer, intel_crtc_idle_timer,
(unsigned long)intel_crtc);
+
+ intel_sanitize_modesetting(dev, intel_crtc->pipe, intel_crtc->plane);
}
int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data,
@@ -5218,42 +5364,34 @@ int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data,
return 0;
}
-struct drm_crtc *intel_get_crtc_from_pipe(struct drm_device *dev, int pipe)
-{
- struct drm_crtc *crtc = NULL;
-
- list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- if (intel_crtc->pipe == pipe)
- break;
- }
- return crtc;
-}
-
static int intel_encoder_clones(struct drm_device *dev, int type_mask)
{
+ struct intel_encoder *encoder;
int index_mask = 0;
- struct drm_encoder *encoder;
int entry = 0;
- list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
- struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
- if (type_mask & intel_encoder->clone_mask)
+ list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head) {
+ if (type_mask & encoder->clone_mask)
index_mask |= (1 << entry);
entry++;
}
+
return index_mask;
}
-
static void intel_setup_outputs(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- struct drm_encoder *encoder;
+ struct intel_encoder *encoder;
bool dpd_is_edp = false;
+ bool has_lvds = false;
if (IS_MOBILE(dev) && !IS_I830(dev))
- intel_lvds_init(dev);
+ has_lvds = intel_lvds_init(dev);
+ if (!has_lvds && !HAS_PCH_SPLIT(dev)) {
+ /* disable the panel fitter on everything but LVDS */
+ I915_WRITE(PFIT_CONTROL, 0);
+ }
if (HAS_PCH_SPLIT(dev)) {
dpd_is_edp = intel_dpd_is_edp(dev);
@@ -5338,12 +5476,10 @@ static void intel_setup_outputs(struct drm_device *dev)
if (SUPPORTS_TV(dev))
intel_tv_init(dev);
- list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
- struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
-
- encoder->possible_crtcs = intel_encoder->crtc_mask;
- encoder->possible_clones = intel_encoder_clones(dev,
- intel_encoder->clone_mask);
+ list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head) {
+ encoder->base.possible_crtcs = encoder->crtc_mask;
+ encoder->base.possible_clones =
+ intel_encoder_clones(dev, encoder->clone_mask);
}
}
@@ -5377,8 +5513,25 @@ int intel_framebuffer_init(struct drm_device *dev,
struct drm_mode_fb_cmd *mode_cmd,
struct drm_gem_object *obj)
{
+ struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
int ret;
+ if (obj_priv->tiling_mode == I915_TILING_Y)
+ return -EINVAL;
+
+ if (mode_cmd->pitch & 63)
+ return -EINVAL;
+
+ switch (mode_cmd->bpp) {
+ case 8:
+ case 16:
+ case 24:
+ case 32:
+ break;
+ default:
+ return -EINVAL;
+ }
+
ret = drm_framebuffer_init(dev, &intel_fb->base, &intel_fb_funcs);
if (ret) {
DRM_ERROR("framebuffer init failed %d\n", ret);
@@ -5487,6 +5640,10 @@ void ironlake_enable_drps(struct drm_device *dev)
u32 rgvmodectl = I915_READ(MEMMODECTL);
u8 fmax, fmin, fstart, vstart;
+ /* Enable temp reporting */
+ I915_WRITE16(PMMISC, I915_READ(PMMISC) | MCPPCE_EN);
+ I915_WRITE16(TSC1, I915_READ(TSC1) | TSE);
+
/* 100ms RC evaluation intervals */
I915_WRITE(RCUPEI, 100000);
I915_WRITE(RCDNEI, 100000);
@@ -5502,20 +5659,19 @@ void ironlake_enable_drps(struct drm_device *dev)
fmin = (rgvmodectl & MEMMODE_FMIN_MASK);
fstart = (rgvmodectl & MEMMODE_FSTART_MASK) >>
MEMMODE_FSTART_SHIFT;
- fstart = fmax;
vstart = (I915_READ(PXVFREQ_BASE + (fstart * 4)) & PXVFREQ_PX_MASK) >>
PXVFREQ_PX_SHIFT;
- dev_priv->fmax = fstart; /* IPS callback will increase this */
+ dev_priv->fmax = fmax; /* IPS callback will increase this */
dev_priv->fstart = fstart;
- dev_priv->max_delay = fmax;
+ dev_priv->max_delay = fstart;
dev_priv->min_delay = fmin;
dev_priv->cur_delay = fstart;
- DRM_DEBUG_DRIVER("fmax: %d, fmin: %d, fstart: %d\n", fmax, fmin,
- fstart);
+ DRM_DEBUG_DRIVER("fmax: %d, fmin: %d, fstart: %d\n",
+ fmax, fmin, fstart);
I915_WRITE(MEMINTREN, MEMINT_CX_SUPR_EN | MEMINT_EVAL_CHG_EN);
@@ -5529,7 +5685,7 @@ void ironlake_enable_drps(struct drm_device *dev)
rgvmodectl |= MEMMODE_SWMODE_EN;
I915_WRITE(MEMMODECTL, rgvmodectl);
- if (wait_for((I915_READ(MEMSWCTL) & MEMCTL_CMD_STS) == 0, 1, 0))
+ if (wait_for((I915_READ(MEMSWCTL) & MEMCTL_CMD_STS) == 0, 10))
DRM_ERROR("stuck trying to change perf mode\n");
msleep(1);
@@ -5660,7 +5816,7 @@ void intel_init_clock_gating(struct drm_device *dev)
if (HAS_PCH_SPLIT(dev)) {
uint32_t dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE;
- if (IS_IRONLAKE(dev)) {
+ if (IS_GEN5(dev)) {
/* Required for FBC */
dspclk_gate |= DPFDUNIT_CLOCK_GATE_DISABLE;
/* Required for CxSR */
@@ -5674,13 +5830,20 @@ void intel_init_clock_gating(struct drm_device *dev)
I915_WRITE(PCH_DSPCLK_GATE_D, dspclk_gate);
/*
+ * On Ibex Peak and Cougar Point, we need to disable clock
+ * gating for the panel power sequencer or it will fail to
+ * start up when no ports are active.
+ */
+ I915_WRITE(SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE);
+
+ /*
* According to the spec the following bits should be set in
* order to enable memory self-refresh
* The bit 22/21 of 0x42004
* The bit 5 of 0x42020
* The bit 15 of 0x45000
*/
- if (IS_IRONLAKE(dev)) {
+ if (IS_GEN5(dev)) {
I915_WRITE(ILK_DISPLAY_CHICKEN2,
(I915_READ(ILK_DISPLAY_CHICKEN2) |
ILK_DPARB_GATE | ILK_VSDPFD_FULL));
@@ -5728,20 +5891,20 @@ void intel_init_clock_gating(struct drm_device *dev)
if (IS_GM45(dev))
dspclk_gate |= DSSUNIT_CLOCK_GATE_DISABLE;
I915_WRITE(DSPCLK_GATE_D, dspclk_gate);
- } else if (IS_I965GM(dev)) {
+ } else if (IS_CRESTLINE(dev)) {
I915_WRITE(RENCLK_GATE_D1, I965_RCC_CLOCK_GATE_DISABLE);
I915_WRITE(RENCLK_GATE_D2, 0);
I915_WRITE(DSPCLK_GATE_D, 0);
I915_WRITE(RAMCLK_GATE_D, 0);
I915_WRITE16(DEUC, 0);
- } else if (IS_I965G(dev)) {
+ } else if (IS_BROADWATER(dev)) {
I915_WRITE(RENCLK_GATE_D1, I965_RCZ_CLOCK_GATE_DISABLE |
I965_RCC_CLOCK_GATE_DISABLE |
I965_RCPB_CLOCK_GATE_DISABLE |
I965_ISC_CLOCK_GATE_DISABLE |
I965_FBC_CLOCK_GATE_DISABLE);
I915_WRITE(RENCLK_GATE_D2, 0);
- } else if (IS_I9XX(dev)) {
+ } else if (IS_GEN3(dev)) {
u32 dstate = I915_READ(D_STATE);
dstate |= DSTATE_PLL_D3_OFF | DSTATE_GFX_CLOCK_GATING |
@@ -5823,7 +5986,7 @@ static void intel_init_display(struct drm_device *dev)
dev_priv->display.fbc_enabled = g4x_fbc_enabled;
dev_priv->display.enable_fbc = g4x_enable_fbc;
dev_priv->display.disable_fbc = g4x_disable_fbc;
- } else if (IS_I965GM(dev)) {
+ } else if (IS_CRESTLINE(dev)) {
dev_priv->display.fbc_enabled = i8xx_fbc_enabled;
dev_priv->display.enable_fbc = i8xx_enable_fbc;
dev_priv->display.disable_fbc = i8xx_disable_fbc;
@@ -5856,7 +6019,7 @@ static void intel_init_display(struct drm_device *dev)
/* For FIFO watermark updates */
if (HAS_PCH_SPLIT(dev)) {
- if (IS_IRONLAKE(dev)) {
+ if (IS_GEN5(dev)) {
if (I915_READ(MLTR_ILK) & ILK_SRLT_MASK)
dev_priv->display.update_wm = ironlake_update_wm;
else {
@@ -5883,9 +6046,9 @@ static void intel_init_display(struct drm_device *dev)
dev_priv->display.update_wm = pineview_update_wm;
} else if (IS_G4X(dev))
dev_priv->display.update_wm = g4x_update_wm;
- else if (IS_I965G(dev))
+ else if (IS_GEN4(dev))
dev_priv->display.update_wm = i965_update_wm;
- else if (IS_I9XX(dev)) {
+ else if (IS_GEN3(dev)) {
dev_priv->display.update_wm = i9xx_update_wm;
dev_priv->display.get_fifo_size = i9xx_get_fifo_size;
} else if (IS_I85X(dev)) {
@@ -5999,24 +6162,24 @@ void intel_modeset_init(struct drm_device *dev)
intel_init_display(dev);
- if (IS_I965G(dev)) {
- dev->mode_config.max_width = 8192;
- dev->mode_config.max_height = 8192;
- } else if (IS_I9XX(dev)) {
+ if (IS_GEN2(dev)) {
+ dev->mode_config.max_width = 2048;
+ dev->mode_config.max_height = 2048;
+ } else if (IS_GEN3(dev)) {
dev->mode_config.max_width = 4096;
dev->mode_config.max_height = 4096;
} else {
- dev->mode_config.max_width = 2048;
- dev->mode_config.max_height = 2048;
+ dev->mode_config.max_width = 8192;
+ dev->mode_config.max_height = 8192;
}
/* set memory base */
- if (IS_I9XX(dev))
- dev->mode_config.fb_base = pci_resource_start(dev->pdev, 2);
- else
+ if (IS_GEN2(dev))
dev->mode_config.fb_base = pci_resource_start(dev->pdev, 0);
+ else
+ dev->mode_config.fb_base = pci_resource_start(dev->pdev, 2);
- if (IS_MOBILE(dev) || IS_I9XX(dev))
+ if (IS_MOBILE(dev) || !IS_GEN2(dev))
dev_priv->num_pipe = 2;
else
dev_priv->num_pipe = 1;
@@ -6052,10 +6215,11 @@ void intel_modeset_cleanup(struct drm_device *dev)
struct drm_crtc *crtc;
struct intel_crtc *intel_crtc;
+ drm_kms_helper_poll_fini(dev);
mutex_lock(&dev->struct_mutex);
- drm_kms_helper_poll_fini(dev);
- intel_fbdev_fini(dev);
+ intel_unregister_dsm_handler();
+
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
/* Skip inactive CRTCs */
@@ -6063,12 +6227,9 @@ void intel_modeset_cleanup(struct drm_device *dev)
continue;
intel_crtc = to_intel_crtc(crtc);
- intel_increase_pllclock(crtc, false);
- del_timer_sync(&intel_crtc->idle_timer);
+ intel_increase_pllclock(crtc);
}
- del_timer_sync(&dev_priv->idle_timer);
-
if (dev_priv->display.disable_fbc)
dev_priv->display.disable_fbc(dev);
@@ -6097,33 +6258,36 @@ void intel_modeset_cleanup(struct drm_device *dev)
mutex_unlock(&dev->struct_mutex);
+ /* Disable the irq before mode object teardown, for the irq might
+ * enqueue unpin/hotplug work. */
+ drm_irq_uninstall(dev);
+ cancel_work_sync(&dev_priv->hotplug_work);
+
+ /* Shut off idle work before the crtcs get freed. */
+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+ intel_crtc = to_intel_crtc(crtc);
+ del_timer_sync(&intel_crtc->idle_timer);
+ }
+ del_timer_sync(&dev_priv->idle_timer);
+ cancel_work_sync(&dev_priv->idle_work);
+
drm_mode_config_cleanup(dev);
}
-
/*
* Return which encoder is currently attached for connector.
*/
-struct drm_encoder *intel_attached_encoder (struct drm_connector *connector)
+struct drm_encoder *intel_best_encoder(struct drm_connector *connector)
{
- struct drm_mode_object *obj;
- struct drm_encoder *encoder;
- int i;
-
- for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
- if (connector->encoder_ids[i] == 0)
- break;
-
- obj = drm_mode_object_find(connector->dev,
- connector->encoder_ids[i],
- DRM_MODE_OBJECT_ENCODER);
- if (!obj)
- continue;
+ return &intel_attached_encoder(connector)->base;
+}
- encoder = obj_to_encoder(obj);
- return encoder;
- }
- return NULL;
+void intel_connector_attach_encoder(struct intel_connector *connector,
+ struct intel_encoder *encoder)
+{
+ connector->encoder = encoder;
+ drm_mode_connector_attach_encoder(&connector->base,
+ &encoder->base);
}
/*
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index 9ab8708ac6ba..df648cb4c296 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -42,15 +42,13 @@
#define DP_LINK_CONFIGURATION_SIZE 9
-#define IS_eDP(i) ((i)->base.type == INTEL_OUTPUT_EDP)
-#define IS_PCH_eDP(i) ((i)->is_pch_edp)
-
struct intel_dp {
struct intel_encoder base;
uint32_t output_reg;
uint32_t DP;
uint8_t link_configuration[DP_LINK_CONFIGURATION_SIZE];
bool has_audio;
+ int force_audio;
int dpms_mode;
uint8_t link_bw;
uint8_t lane_count;
@@ -58,14 +56,69 @@ struct intel_dp {
struct i2c_adapter adapter;
struct i2c_algo_dp_aux_data algo;
bool is_pch_edp;
+ uint8_t train_set[4];
+ uint8_t link_status[DP_LINK_STATUS_SIZE];
+
+ struct drm_property *force_audio_property;
};
+/**
+ * is_edp - is the given port attached to an eDP panel (either CPU or PCH)
+ * @intel_dp: DP struct
+ *
+ * If a CPU or PCH DP output is attached to an eDP panel, this function
+ * will return true, and false otherwise.
+ */
+static bool is_edp(struct intel_dp *intel_dp)
+{
+ return intel_dp->base.type == INTEL_OUTPUT_EDP;
+}
+
+/**
+ * is_pch_edp - is the port on the PCH and attached to an eDP panel?
+ * @intel_dp: DP struct
+ *
+ * Returns true if the given DP struct corresponds to a PCH DP port attached
+ * to an eDP panel, false otherwise. Helpful for determining whether we
+ * may need FDI resources for a given DP output or not.
+ */
+static bool is_pch_edp(struct intel_dp *intel_dp)
+{
+ return intel_dp->is_pch_edp;
+}
+
static struct intel_dp *enc_to_intel_dp(struct drm_encoder *encoder)
{
- return container_of(enc_to_intel_encoder(encoder), struct intel_dp, base);
+ return container_of(encoder, struct intel_dp, base.base);
+}
+
+static struct intel_dp *intel_attached_dp(struct drm_connector *connector)
+{
+ return container_of(intel_attached_encoder(connector),
+ struct intel_dp, base);
}
-static void intel_dp_link_train(struct intel_dp *intel_dp);
+/**
+ * intel_encoder_is_pch_edp - is the given encoder a PCH attached eDP?
+ * @encoder: DRM encoder
+ *
+ * Return true if @encoder corresponds to a PCH attached eDP panel. Needed
+ * by intel_display.c.
+ */
+bool intel_encoder_is_pch_edp(struct drm_encoder *encoder)
+{
+ struct intel_dp *intel_dp;
+
+ if (!encoder)
+ return false;
+
+ intel_dp = enc_to_intel_dp(encoder);
+
+ return is_pch_edp(intel_dp);
+}
+
+static void intel_dp_start_link_train(struct intel_dp *intel_dp);
+static void intel_dp_complete_link_train(struct intel_dp *intel_dp);
static void intel_dp_link_down(struct intel_dp *intel_dp);
void
@@ -129,8 +182,8 @@ intel_dp_link_required(struct drm_device *dev, struct intel_dp *intel_dp, int pi
{
struct drm_i915_private *dev_priv = dev->dev_private;
- if (IS_eDP(intel_dp) || IS_PCH_eDP(intel_dp))
- return (pixel_clock * dev_priv->edp_bpp) / 8;
+ if (is_edp(intel_dp))
+ return (pixel_clock * dev_priv->edp.bpp + 7) / 8;
else
return pixel_clock * 3;
}
@@ -145,15 +198,13 @@ static int
intel_dp_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode)
{
- struct drm_encoder *encoder = intel_attached_encoder(connector);
- struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+ struct intel_dp *intel_dp = intel_attached_dp(connector);
struct drm_device *dev = connector->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
int max_link_clock = intel_dp_link_clock(intel_dp_max_link_bw(intel_dp));
int max_lanes = intel_dp_max_lane_count(intel_dp);
- if ((IS_eDP(intel_dp) || IS_PCH_eDP(intel_dp)) &&
- dev_priv->panel_fixed_mode) {
+ if (is_edp(intel_dp) && dev_priv->panel_fixed_mode) {
if (mode->hdisplay > dev_priv->panel_fixed_mode->hdisplay)
return MODE_PANEL;
@@ -163,7 +214,7 @@ intel_dp_mode_valid(struct drm_connector *connector,
/* only refuse the mode on non eDP since we have seen some wierd eDP panels
which are outside spec tolerances but somehow work by magic */
- if (!IS_eDP(intel_dp) &&
+ if (!is_edp(intel_dp) &&
(intel_dp_link_required(connector->dev, intel_dp, mode->clock)
> intel_dp_max_data_rate(max_link_clock, max_lanes)))
return MODE_CLOCK_HIGH;
@@ -233,7 +284,7 @@ intel_dp_aux_ch(struct intel_dp *intel_dp,
uint8_t *recv, int recv_size)
{
uint32_t output_reg = intel_dp->output_reg;
- struct drm_device *dev = intel_dp->base.enc.dev;
+ struct drm_device *dev = intel_dp->base.base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
uint32_t ch_ctl = output_reg + 0x10;
uint32_t ch_data = ch_ctl + 4;
@@ -246,8 +297,11 @@ intel_dp_aux_ch(struct intel_dp *intel_dp,
/* The clock divider is based off the hrawclk,
* and would like to run at 2MHz. So, take the
* hrawclk value and divide by 2 and use that
+ *
+ * Note that PCH attached eDP panels should use a 125MHz input
+ * clock divider.
*/
- if (IS_eDP(intel_dp)) {
+ if (is_edp(intel_dp) && !is_pch_edp(intel_dp)) {
if (IS_GEN6(dev))
aux_clock_divider = 200; /* SNB eDP input clock at 400Mhz */
else
@@ -519,8 +573,7 @@ intel_dp_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode,
int max_clock = intel_dp_max_link_bw(intel_dp) == DP_LINK_BW_2_7 ? 1 : 0;
static int bws[2] = { DP_LINK_BW_1_62, DP_LINK_BW_2_7 };
- if ((IS_eDP(intel_dp) || IS_PCH_eDP(intel_dp)) &&
- dev_priv->panel_fixed_mode) {
+ if (is_edp(intel_dp) && dev_priv->panel_fixed_mode) {
intel_fixed_panel_mode(dev_priv->panel_fixed_mode, adjusted_mode);
intel_pch_panel_fitting(dev, DRM_MODE_SCALE_FULLSCREEN,
mode, adjusted_mode);
@@ -549,7 +602,7 @@ intel_dp_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode,
}
}
- if (IS_eDP(intel_dp) || IS_PCH_eDP(intel_dp)) {
+ if (is_edp(intel_dp)) {
/* okay we failed just pick the highest */
intel_dp->lane_count = max_lane_count;
intel_dp->link_bw = bws[max_clock];
@@ -598,25 +651,6 @@ intel_dp_compute_m_n(int bpp,
intel_reduce_ratio(&m_n->link_m, &m_n->link_n);
}
-bool intel_pch_has_edp(struct drm_crtc *crtc)
-{
- struct drm_device *dev = crtc->dev;
- struct drm_mode_config *mode_config = &dev->mode_config;
- struct drm_encoder *encoder;
-
- list_for_each_entry(encoder, &mode_config->encoder_list, head) {
- struct intel_dp *intel_dp;
-
- if (encoder->crtc != crtc)
- continue;
-
- intel_dp = enc_to_intel_dp(encoder);
- if (intel_dp->base.type == INTEL_OUTPUT_DISPLAYPORT)
- return intel_dp->is_pch_edp;
- }
- return false;
-}
-
void
intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
@@ -641,8 +675,10 @@ intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode,
intel_dp = enc_to_intel_dp(encoder);
if (intel_dp->base.type == INTEL_OUTPUT_DISPLAYPORT) {
lane_count = intel_dp->lane_count;
- if (IS_PCH_eDP(intel_dp))
- bpp = dev_priv->edp_bpp;
+ break;
+ } else if (is_edp(intel_dp)) {
+ lane_count = dev_priv->edp.lanes;
+ bpp = dev_priv->edp.bpp;
break;
}
}
@@ -698,7 +734,7 @@ intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
{
struct drm_device *dev = encoder->dev;
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
- struct drm_crtc *crtc = intel_dp->base.enc.crtc;
+ struct drm_crtc *crtc = intel_dp->base.base.crtc;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
intel_dp->DP = (DP_VOLTAGE_0_4 |
@@ -709,7 +745,7 @@ intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
intel_dp->DP |= DP_SYNC_VS_HIGH;
- if (HAS_PCH_CPT(dev) && !IS_eDP(intel_dp))
+ if (HAS_PCH_CPT(dev) && !is_edp(intel_dp))
intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
else
intel_dp->DP |= DP_LINK_TRAIN_OFF;
@@ -744,7 +780,7 @@ intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
if (intel_crtc->pipe == 1 && !HAS_PCH_CPT(dev))
intel_dp->DP |= DP_PIPEB_SELECT;
- if (IS_eDP(intel_dp)) {
+ if (is_edp(intel_dp) && !is_pch_edp(intel_dp)) {
/* don't miss out required setting for eDP */
intel_dp->DP |= DP_PLL_ENABLE;
if (adjusted_mode->clock < 200000)
@@ -754,13 +790,15 @@ intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
}
}
-static void ironlake_edp_panel_on (struct drm_device *dev)
+/* Returns true if the panel was already on when called */
+static bool ironlake_edp_panel_on (struct intel_dp *intel_dp)
{
+ struct drm_device *dev = intel_dp->base.base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- u32 pp;
+ u32 pp, idle_on_mask = PP_ON | PP_SEQUENCE_STATE_ON_IDLE;
if (I915_READ(PCH_PP_STATUS) & PP_ON)
- return;
+ return true;
pp = I915_READ(PCH_PP_CONTROL);
@@ -771,21 +809,30 @@ static void ironlake_edp_panel_on (struct drm_device *dev)
pp |= PANEL_UNLOCK_REGS | POWER_TARGET_ON;
I915_WRITE(PCH_PP_CONTROL, pp);
+ POSTING_READ(PCH_PP_CONTROL);
+
+ /* Ouch. We need to wait here for some panels, like Dell e6510
+ * https://bugs.freedesktop.org/show_bug.cgi?id=29278i
+ */
+ msleep(300);
- if (wait_for(I915_READ(PCH_PP_STATUS) & PP_ON, 5000, 10))
+ if (wait_for((I915_READ(PCH_PP_STATUS) & idle_on_mask) == idle_on_mask,
+ 5000))
DRM_ERROR("panel on wait timed out: 0x%08x\n",
I915_READ(PCH_PP_STATUS));
- pp &= ~(PANEL_UNLOCK_REGS | EDP_FORCE_VDD);
pp |= PANEL_POWER_RESET; /* restore panel reset bit */
I915_WRITE(PCH_PP_CONTROL, pp);
POSTING_READ(PCH_PP_CONTROL);
+
+ return false;
}
static void ironlake_edp_panel_off (struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- u32 pp;
+ u32 pp, idle_off_mask = PP_ON | PP_SEQUENCE_MASK |
+ PP_CYCLE_DELAY_ACTIVE | PP_SEQUENCE_STATE_MASK;
pp = I915_READ(PCH_PP_CONTROL);
@@ -796,15 +843,20 @@ static void ironlake_edp_panel_off (struct drm_device *dev)
pp &= ~POWER_TARGET_ON;
I915_WRITE(PCH_PP_CONTROL, pp);
+ POSTING_READ(PCH_PP_CONTROL);
- if (wait_for((I915_READ(PCH_PP_STATUS) & PP_ON) == 0, 5000, 10))
+ if (wait_for((I915_READ(PCH_PP_STATUS) & idle_off_mask) == 0, 5000))
DRM_ERROR("panel off wait timed out: 0x%08x\n",
I915_READ(PCH_PP_STATUS));
- /* Make sure VDD is enabled so DP AUX will work */
- pp |= EDP_FORCE_VDD | PANEL_POWER_RESET; /* restore panel reset bit */
+ pp |= PANEL_POWER_RESET; /* restore panel reset bit */
I915_WRITE(PCH_PP_CONTROL, pp);
POSTING_READ(PCH_PP_CONTROL);
+
+ /* Ouch. We need to wait here for some panels, like Dell e6510
+ * https://bugs.freedesktop.org/show_bug.cgi?id=29278i
+ */
+ msleep(300);
}
static void ironlake_edp_backlight_on (struct drm_device *dev)
@@ -813,6 +865,13 @@ static void ironlake_edp_backlight_on (struct drm_device *dev)
u32 pp;
DRM_DEBUG_KMS("\n");
+ /*
+ * If we enable the backlight right away following a panel power
+ * on, we may see slight flicker as the panel syncs with the eDP
+ * link. So delay a bit to make sure the image is solid before
+ * allowing it to appear.
+ */
+ msleep(300);
pp = I915_READ(PCH_PP_CONTROL);
pp |= EDP_BLC_ENABLE;
I915_WRITE(PCH_PP_CONTROL, pp);
@@ -837,8 +896,10 @@ static void ironlake_edp_pll_on(struct drm_encoder *encoder)
DRM_DEBUG_KMS("\n");
dpa_ctl = I915_READ(DP_A);
- dpa_ctl &= ~DP_PLL_ENABLE;
+ dpa_ctl |= DP_PLL_ENABLE;
I915_WRITE(DP_A, dpa_ctl);
+ POSTING_READ(DP_A);
+ udelay(200);
}
static void ironlake_edp_pll_off(struct drm_encoder *encoder)
@@ -848,8 +909,9 @@ static void ironlake_edp_pll_off(struct drm_encoder *encoder)
u32 dpa_ctl;
dpa_ctl = I915_READ(DP_A);
- dpa_ctl |= DP_PLL_ENABLE;
+ dpa_ctl &= ~DP_PLL_ENABLE;
I915_WRITE(DP_A, dpa_ctl);
+ POSTING_READ(DP_A);
udelay(200);
}
@@ -857,29 +919,31 @@ static void intel_dp_prepare(struct drm_encoder *encoder)
{
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
struct drm_device *dev = encoder->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- uint32_t dp_reg = I915_READ(intel_dp->output_reg);
- if (IS_eDP(intel_dp)) {
+ if (is_edp(intel_dp)) {
ironlake_edp_backlight_off(dev);
- ironlake_edp_panel_on(dev);
- ironlake_edp_pll_on(encoder);
+ ironlake_edp_panel_on(intel_dp);
+ if (!is_pch_edp(intel_dp))
+ ironlake_edp_pll_on(encoder);
+ else
+ ironlake_edp_pll_off(encoder);
}
- if (dp_reg & DP_PORT_EN)
- intel_dp_link_down(intel_dp);
+ intel_dp_link_down(intel_dp);
}
static void intel_dp_commit(struct drm_encoder *encoder)
{
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
struct drm_device *dev = encoder->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- uint32_t dp_reg = I915_READ(intel_dp->output_reg);
- if (!(dp_reg & DP_PORT_EN)) {
- intel_dp_link_train(intel_dp);
- }
- if (IS_eDP(intel_dp) || IS_PCH_eDP(intel_dp))
+ intel_dp_start_link_train(intel_dp);
+
+ if (is_edp(intel_dp))
+ ironlake_edp_panel_on(intel_dp);
+
+ intel_dp_complete_link_train(intel_dp);
+
+ if (is_edp(intel_dp))
ironlake_edp_backlight_on(dev);
}
@@ -892,22 +956,22 @@ intel_dp_dpms(struct drm_encoder *encoder, int mode)
uint32_t dp_reg = I915_READ(intel_dp->output_reg);
if (mode != DRM_MODE_DPMS_ON) {
- if (IS_eDP(intel_dp) || IS_PCH_eDP(intel_dp)) {
+ if (is_edp(intel_dp))
ironlake_edp_backlight_off(dev);
+ intel_dp_link_down(intel_dp);
+ if (is_edp(intel_dp))
ironlake_edp_panel_off(dev);
- }
- if (dp_reg & DP_PORT_EN)
- intel_dp_link_down(intel_dp);
- if (IS_eDP(intel_dp) || IS_PCH_eDP(intel_dp))
+ if (is_edp(intel_dp) && !is_pch_edp(intel_dp))
ironlake_edp_pll_off(encoder);
} else {
+ if (is_edp(intel_dp))
+ ironlake_edp_panel_on(intel_dp);
if (!(dp_reg & DP_PORT_EN)) {
- if (IS_eDP(intel_dp) || IS_PCH_eDP(intel_dp))
- ironlake_edp_panel_on(dev);
- intel_dp_link_train(intel_dp);
- if (IS_eDP(intel_dp) || IS_PCH_eDP(intel_dp))
- ironlake_edp_backlight_on(dev);
+ intel_dp_start_link_train(intel_dp);
+ intel_dp_complete_link_train(intel_dp);
}
+ if (is_edp(intel_dp))
+ ironlake_edp_backlight_on(dev);
}
intel_dp->dpms_mode = mode;
}
@@ -917,14 +981,13 @@ intel_dp_dpms(struct drm_encoder *encoder, int mode)
* link status information
*/
static bool
-intel_dp_get_link_status(struct intel_dp *intel_dp,
- uint8_t link_status[DP_LINK_STATUS_SIZE])
+intel_dp_get_link_status(struct intel_dp *intel_dp)
{
int ret;
ret = intel_dp_aux_native_read(intel_dp,
DP_LANE0_1_STATUS,
- link_status, DP_LINK_STATUS_SIZE);
+ intel_dp->link_status, DP_LINK_STATUS_SIZE);
if (ret != DP_LINK_STATUS_SIZE)
return false;
return true;
@@ -999,18 +1062,15 @@ intel_dp_pre_emphasis_max(uint8_t voltage_swing)
}
static void
-intel_get_adjust_train(struct intel_dp *intel_dp,
- uint8_t link_status[DP_LINK_STATUS_SIZE],
- int lane_count,
- uint8_t train_set[4])
+intel_get_adjust_train(struct intel_dp *intel_dp)
{
uint8_t v = 0;
uint8_t p = 0;
int lane;
- for (lane = 0; lane < lane_count; lane++) {
- uint8_t this_v = intel_get_adjust_request_voltage(link_status, lane);
- uint8_t this_p = intel_get_adjust_request_pre_emphasis(link_status, lane);
+ for (lane = 0; lane < intel_dp->lane_count; lane++) {
+ uint8_t this_v = intel_get_adjust_request_voltage(intel_dp->link_status, lane);
+ uint8_t this_p = intel_get_adjust_request_pre_emphasis(intel_dp->link_status, lane);
if (this_v > v)
v = this_v;
@@ -1025,7 +1085,7 @@ intel_get_adjust_train(struct intel_dp *intel_dp,
p = intel_dp_pre_emphasis_max(v) | DP_TRAIN_MAX_PRE_EMPHASIS_REACHED;
for (lane = 0; lane < 4; lane++)
- train_set[lane] = v | p;
+ intel_dp->train_set[lane] = v | p;
}
static uint32_t
@@ -1116,18 +1176,18 @@ intel_clock_recovery_ok(uint8_t link_status[DP_LINK_STATUS_SIZE], int lane_count
DP_LANE_CHANNEL_EQ_DONE|\
DP_LANE_SYMBOL_LOCKED)
static bool
-intel_channel_eq_ok(uint8_t link_status[DP_LINK_STATUS_SIZE], int lane_count)
+intel_channel_eq_ok(struct intel_dp *intel_dp)
{
uint8_t lane_align;
uint8_t lane_status;
int lane;
- lane_align = intel_dp_link_status(link_status,
+ lane_align = intel_dp_link_status(intel_dp->link_status,
DP_LANE_ALIGN_STATUS_UPDATED);
if ((lane_align & DP_INTERLANE_ALIGN_DONE) == 0)
return false;
- for (lane = 0; lane < lane_count; lane++) {
- lane_status = intel_get_lane_status(link_status, lane);
+ for (lane = 0; lane < intel_dp->lane_count; lane++) {
+ lane_status = intel_get_lane_status(intel_dp->link_status, lane);
if ((lane_status & CHANNEL_EQ_BITS) != CHANNEL_EQ_BITS)
return false;
}
@@ -1137,10 +1197,9 @@ intel_channel_eq_ok(uint8_t link_status[DP_LINK_STATUS_SIZE], int lane_count)
static bool
intel_dp_set_link_train(struct intel_dp *intel_dp,
uint32_t dp_reg_value,
- uint8_t dp_train_pat,
- uint8_t train_set[4])
+ uint8_t dp_train_pat)
{
- struct drm_device *dev = intel_dp->base.enc.dev;
+ struct drm_device *dev = intel_dp->base.base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
int ret;
@@ -1152,28 +1211,27 @@ intel_dp_set_link_train(struct intel_dp *intel_dp,
dp_train_pat);
ret = intel_dp_aux_native_write(intel_dp,
- DP_TRAINING_LANE0_SET, train_set, 4);
+ DP_TRAINING_LANE0_SET,
+ intel_dp->train_set, 4);
if (ret != 4)
return false;
return true;
}
+/* Enable corresponding port and start training pattern 1 */
static void
-intel_dp_link_train(struct intel_dp *intel_dp)
+intel_dp_start_link_train(struct intel_dp *intel_dp)
{
- struct drm_device *dev = intel_dp->base.enc.dev;
+ struct drm_device *dev = intel_dp->base.base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- uint8_t train_set[4];
- uint8_t link_status[DP_LINK_STATUS_SIZE];
+ struct intel_crtc *intel_crtc = to_intel_crtc(intel_dp->base.base.crtc);
int i;
uint8_t voltage;
bool clock_recovery = false;
- bool channel_eq = false;
int tries;
u32 reg;
uint32_t DP = intel_dp->DP;
- struct intel_crtc *intel_crtc = to_intel_crtc(intel_dp->base.enc.crtc);
/* Enable output, wait for it to become active */
I915_WRITE(intel_dp->output_reg, intel_dp->DP);
@@ -1186,94 +1244,107 @@ intel_dp_link_train(struct intel_dp *intel_dp)
DP_LINK_CONFIGURATION_SIZE);
DP |= DP_PORT_EN;
- if (HAS_PCH_CPT(dev) && !IS_eDP(intel_dp))
+ if (HAS_PCH_CPT(dev) && !is_edp(intel_dp))
DP &= ~DP_LINK_TRAIN_MASK_CPT;
else
DP &= ~DP_LINK_TRAIN_MASK;
- memset(train_set, 0, 4);
+ memset(intel_dp->train_set, 0, 4);
voltage = 0xff;
tries = 0;
clock_recovery = false;
for (;;) {
- /* Use train_set[0] to set the voltage and pre emphasis values */
+ /* Use intel_dp->train_set[0] to set the voltage and pre emphasis values */
uint32_t signal_levels;
- if (IS_GEN6(dev) && IS_eDP(intel_dp)) {
- signal_levels = intel_gen6_edp_signal_levels(train_set[0]);
+ if (IS_GEN6(dev) && is_edp(intel_dp)) {
+ signal_levels = intel_gen6_edp_signal_levels(intel_dp->train_set[0]);
DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_SNB) | signal_levels;
} else {
- signal_levels = intel_dp_signal_levels(train_set[0], intel_dp->lane_count);
+ signal_levels = intel_dp_signal_levels(intel_dp->train_set[0], intel_dp->lane_count);
DP = (DP & ~(DP_VOLTAGE_MASK|DP_PRE_EMPHASIS_MASK)) | signal_levels;
}
- if (HAS_PCH_CPT(dev) && !IS_eDP(intel_dp))
+ if (HAS_PCH_CPT(dev) && !is_edp(intel_dp))
reg = DP | DP_LINK_TRAIN_PAT_1_CPT;
else
reg = DP | DP_LINK_TRAIN_PAT_1;
if (!intel_dp_set_link_train(intel_dp, reg,
- DP_TRAINING_PATTERN_1, train_set))
+ DP_TRAINING_PATTERN_1))
break;
/* Set training pattern 1 */
udelay(100);
- if (!intel_dp_get_link_status(intel_dp, link_status))
+ if (!intel_dp_get_link_status(intel_dp))
break;
- if (intel_clock_recovery_ok(link_status, intel_dp->lane_count)) {
+ if (intel_clock_recovery_ok(intel_dp->link_status, intel_dp->lane_count)) {
clock_recovery = true;
break;
}
/* Check to see if we've tried the max voltage */
for (i = 0; i < intel_dp->lane_count; i++)
- if ((train_set[i] & DP_TRAIN_MAX_SWING_REACHED) == 0)
+ if ((intel_dp->train_set[i] & DP_TRAIN_MAX_SWING_REACHED) == 0)
break;
if (i == intel_dp->lane_count)
break;
/* Check to see if we've tried the same voltage 5 times */
- if ((train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK) == voltage) {
+ if ((intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK) == voltage) {
++tries;
if (tries == 5)
break;
} else
tries = 0;
- voltage = train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK;
+ voltage = intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK;
- /* Compute new train_set as requested by target */
- intel_get_adjust_train(intel_dp, link_status, intel_dp->lane_count, train_set);
+ /* Compute new intel_dp->train_set as requested by target */
+ intel_get_adjust_train(intel_dp);
}
+ intel_dp->DP = DP;
+}
+
+static void
+intel_dp_complete_link_train(struct intel_dp *intel_dp)
+{
+ struct drm_device *dev = intel_dp->base.base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ bool channel_eq = false;
+ int tries;
+ u32 reg;
+ uint32_t DP = intel_dp->DP;
+
/* channel equalization */
tries = 0;
channel_eq = false;
for (;;) {
- /* Use train_set[0] to set the voltage and pre emphasis values */
+ /* Use intel_dp->train_set[0] to set the voltage and pre emphasis values */
uint32_t signal_levels;
- if (IS_GEN6(dev) && IS_eDP(intel_dp)) {
- signal_levels = intel_gen6_edp_signal_levels(train_set[0]);
+ if (IS_GEN6(dev) && is_edp(intel_dp)) {
+ signal_levels = intel_gen6_edp_signal_levels(intel_dp->train_set[0]);
DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_SNB) | signal_levels;
} else {
- signal_levels = intel_dp_signal_levels(train_set[0], intel_dp->lane_count);
+ signal_levels = intel_dp_signal_levels(intel_dp->train_set[0], intel_dp->lane_count);
DP = (DP & ~(DP_VOLTAGE_MASK|DP_PRE_EMPHASIS_MASK)) | signal_levels;
}
- if (HAS_PCH_CPT(dev) && !IS_eDP(intel_dp))
+ if (HAS_PCH_CPT(dev) && !is_edp(intel_dp))
reg = DP | DP_LINK_TRAIN_PAT_2_CPT;
else
reg = DP | DP_LINK_TRAIN_PAT_2;
/* channel eq pattern */
if (!intel_dp_set_link_train(intel_dp, reg,
- DP_TRAINING_PATTERN_2, train_set))
+ DP_TRAINING_PATTERN_2))
break;
udelay(400);
- if (!intel_dp_get_link_status(intel_dp, link_status))
+ if (!intel_dp_get_link_status(intel_dp))
break;
- if (intel_channel_eq_ok(link_status, intel_dp->lane_count)) {
+ if (intel_channel_eq_ok(intel_dp)) {
channel_eq = true;
break;
}
@@ -1282,12 +1353,12 @@ intel_dp_link_train(struct intel_dp *intel_dp)
if (tries > 5)
break;
- /* Compute new train_set as requested by target */
- intel_get_adjust_train(intel_dp, link_status, intel_dp->lane_count, train_set);
+ /* Compute new intel_dp->train_set as requested by target */
+ intel_get_adjust_train(intel_dp);
++tries;
}
- if (HAS_PCH_CPT(dev) && !IS_eDP(intel_dp))
+ if (HAS_PCH_CPT(dev) && !is_edp(intel_dp))
reg = DP | DP_LINK_TRAIN_OFF_CPT;
else
reg = DP | DP_LINK_TRAIN_OFF;
@@ -1301,33 +1372,57 @@ intel_dp_link_train(struct intel_dp *intel_dp)
static void
intel_dp_link_down(struct intel_dp *intel_dp)
{
- struct drm_device *dev = intel_dp->base.enc.dev;
+ struct drm_device *dev = intel_dp->base.base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
uint32_t DP = intel_dp->DP;
+ if ((I915_READ(intel_dp->output_reg) & DP_PORT_EN) == 0)
+ return;
+
DRM_DEBUG_KMS("\n");
- if (IS_eDP(intel_dp)) {
+ if (is_edp(intel_dp)) {
DP &= ~DP_PLL_ENABLE;
I915_WRITE(intel_dp->output_reg, DP);
POSTING_READ(intel_dp->output_reg);
udelay(100);
}
- if (HAS_PCH_CPT(dev) && !IS_eDP(intel_dp)) {
+ if (HAS_PCH_CPT(dev) && !is_edp(intel_dp)) {
DP &= ~DP_LINK_TRAIN_MASK_CPT;
I915_WRITE(intel_dp->output_reg, DP | DP_LINK_TRAIN_PAT_IDLE_CPT);
- POSTING_READ(intel_dp->output_reg);
} else {
DP &= ~DP_LINK_TRAIN_MASK;
I915_WRITE(intel_dp->output_reg, DP | DP_LINK_TRAIN_PAT_IDLE);
- POSTING_READ(intel_dp->output_reg);
}
+ POSTING_READ(intel_dp->output_reg);
- udelay(17000);
+ msleep(17);
- if (IS_eDP(intel_dp))
+ if (is_edp(intel_dp))
DP |= DP_LINK_TRAIN_OFF;
+
+ if (!HAS_PCH_CPT(dev) &&
+ I915_READ(intel_dp->output_reg) & DP_PIPEB_SELECT) {
+ struct intel_crtc *intel_crtc = to_intel_crtc(intel_dp->base.base.crtc);
+ /* Hardware workaround: leaving our transcoder select
+ * set to transcoder B while it's off will prevent the
+ * corresponding HDMI output on transcoder A.
+ *
+ * Combine this with another hardware workaround:
+ * transcoder select bit can only be cleared while the
+ * port is enabled.
+ */
+ DP &= ~DP_PIPEB_SELECT;
+ I915_WRITE(intel_dp->output_reg, DP);
+
+ /* Changes to enable or select take place the vblank
+ * after being written.
+ */
+ intel_wait_for_vblank(intel_dp->base.base.dev,
+ intel_crtc->pipe);
+ }
+
I915_WRITE(intel_dp->output_reg, DP & ~DP_PORT_EN);
POSTING_READ(intel_dp->output_reg);
}
@@ -1344,32 +1439,34 @@ intel_dp_link_down(struct intel_dp *intel_dp)
static void
intel_dp_check_link_status(struct intel_dp *intel_dp)
{
- uint8_t link_status[DP_LINK_STATUS_SIZE];
-
- if (!intel_dp->base.enc.crtc)
+ if (!intel_dp->base.base.crtc)
return;
- if (!intel_dp_get_link_status(intel_dp, link_status)) {
+ if (!intel_dp_get_link_status(intel_dp)) {
intel_dp_link_down(intel_dp);
return;
}
- if (!intel_channel_eq_ok(link_status, intel_dp->lane_count))
- intel_dp_link_train(intel_dp);
+ if (!intel_channel_eq_ok(intel_dp)) {
+ intel_dp_start_link_train(intel_dp);
+ intel_dp_complete_link_train(intel_dp);
+ }
}
static enum drm_connector_status
-ironlake_dp_detect(struct drm_connector *connector)
+ironlake_dp_detect(struct intel_dp *intel_dp)
{
- struct drm_encoder *encoder = intel_attached_encoder(connector);
- struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
enum drm_connector_status status;
+ /* Can't disconnect eDP */
+ if (is_edp(intel_dp))
+ return connector_status_connected;
+
status = connector_status_disconnected;
if (intel_dp_aux_native_read(intel_dp,
0x000, intel_dp->dpcd,
- sizeof (intel_dp->dpcd)) == sizeof (intel_dp->dpcd))
- {
+ sizeof (intel_dp->dpcd))
+ == sizeof(intel_dp->dpcd)) {
if (intel_dp->dpcd[0] != 0)
status = connector_status_connected;
}
@@ -1378,26 +1475,13 @@ ironlake_dp_detect(struct drm_connector *connector)
return status;
}
-/**
- * Uses CRT_HOTPLUG_EN and CRT_HOTPLUG_STAT to detect DP connection.
- *
- * \return true if DP port is connected.
- * \return false if DP port is disconnected.
- */
static enum drm_connector_status
-intel_dp_detect(struct drm_connector *connector, bool force)
+g4x_dp_detect(struct intel_dp *intel_dp)
{
- struct drm_encoder *encoder = intel_attached_encoder(connector);
- struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
- struct drm_device *dev = intel_dp->base.enc.dev;
+ struct drm_device *dev = intel_dp->base.base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- uint32_t temp, bit;
enum drm_connector_status status;
-
- intel_dp->has_audio = false;
-
- if (HAS_PCH_SPLIT(dev))
- return ironlake_dp_detect(connector);
+ uint32_t temp, bit;
switch (intel_dp->output_reg) {
case DP_B:
@@ -1419,31 +1503,66 @@ intel_dp_detect(struct drm_connector *connector, bool force)
return connector_status_disconnected;
status = connector_status_disconnected;
- if (intel_dp_aux_native_read(intel_dp,
- 0x000, intel_dp->dpcd,
+ if (intel_dp_aux_native_read(intel_dp, 0x000, intel_dp->dpcd,
sizeof (intel_dp->dpcd)) == sizeof (intel_dp->dpcd))
{
if (intel_dp->dpcd[0] != 0)
status = connector_status_connected;
}
+
return status;
}
+/**
+ * Uses CRT_HOTPLUG_EN and CRT_HOTPLUG_STAT to detect DP connection.
+ *
+ * \return true if DP port is connected.
+ * \return false if DP port is disconnected.
+ */
+static enum drm_connector_status
+intel_dp_detect(struct drm_connector *connector, bool force)
+{
+ struct intel_dp *intel_dp = intel_attached_dp(connector);
+ struct drm_device *dev = intel_dp->base.base.dev;
+ enum drm_connector_status status;
+ struct edid *edid = NULL;
+
+ intel_dp->has_audio = false;
+
+ if (HAS_PCH_SPLIT(dev))
+ status = ironlake_dp_detect(intel_dp);
+ else
+ status = g4x_dp_detect(intel_dp);
+ if (status != connector_status_connected)
+ return status;
+
+ if (intel_dp->force_audio) {
+ intel_dp->has_audio = intel_dp->force_audio > 0;
+ } else {
+ edid = drm_get_edid(connector, &intel_dp->adapter);
+ if (edid) {
+ intel_dp->has_audio = drm_detect_monitor_audio(edid);
+ connector->display_info.raw_edid = NULL;
+ kfree(edid);
+ }
+ }
+
+ return connector_status_connected;
+}
+
static int intel_dp_get_modes(struct drm_connector *connector)
{
- struct drm_encoder *encoder = intel_attached_encoder(connector);
- struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
- struct drm_device *dev = intel_dp->base.enc.dev;
+ struct intel_dp *intel_dp = intel_attached_dp(connector);
+ struct drm_device *dev = intel_dp->base.base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
int ret;
/* We should parse the EDID data and find out if it has an audio sink
*/
- ret = intel_ddc_get_modes(connector, intel_dp->base.ddc_bus);
+ ret = intel_ddc_get_modes(connector, &intel_dp->adapter);
if (ret) {
- if ((IS_eDP(intel_dp) || IS_PCH_eDP(intel_dp)) &&
- !dev_priv->panel_fixed_mode) {
+ if (is_edp(intel_dp) && !dev_priv->panel_fixed_mode) {
struct drm_display_mode *newmode;
list_for_each_entry(newmode, &connector->probed_modes,
head) {
@@ -1459,7 +1578,7 @@ static int intel_dp_get_modes(struct drm_connector *connector)
}
/* if eDP has no EDID, try to use fixed panel mode from VBT */
- if (IS_eDP(intel_dp) || IS_PCH_eDP(intel_dp)) {
+ if (is_edp(intel_dp)) {
if (dev_priv->panel_fixed_mode != NULL) {
struct drm_display_mode *mode;
mode = drm_mode_duplicate(dev, dev_priv->panel_fixed_mode);
@@ -1470,6 +1589,46 @@ static int intel_dp_get_modes(struct drm_connector *connector)
return 0;
}
+static int
+intel_dp_set_property(struct drm_connector *connector,
+ struct drm_property *property,
+ uint64_t val)
+{
+ struct intel_dp *intel_dp = intel_attached_dp(connector);
+ int ret;
+
+ ret = drm_connector_property_set_value(connector, property, val);
+ if (ret)
+ return ret;
+
+ if (property == intel_dp->force_audio_property) {
+ if (val == intel_dp->force_audio)
+ return 0;
+
+ intel_dp->force_audio = val;
+
+ if (val > 0 && intel_dp->has_audio)
+ return 0;
+ if (val < 0 && !intel_dp->has_audio)
+ return 0;
+
+ intel_dp->has_audio = val > 0;
+ goto done;
+ }
+
+ return -EINVAL;
+
+done:
+ if (intel_dp->base.base.crtc) {
+ struct drm_crtc *crtc = intel_dp->base.base.crtc;
+ drm_crtc_helper_set_mode(crtc, &crtc->mode,
+ crtc->x, crtc->y,
+ crtc->fb);
+ }
+
+ return 0;
+}
+
static void
intel_dp_destroy (struct drm_connector *connector)
{
@@ -1478,6 +1637,15 @@ intel_dp_destroy (struct drm_connector *connector)
kfree(connector);
}
+static void intel_dp_encoder_destroy(struct drm_encoder *encoder)
+{
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+
+ i2c_del_adapter(&intel_dp->adapter);
+ drm_encoder_cleanup(encoder);
+ kfree(intel_dp);
+}
+
static const struct drm_encoder_helper_funcs intel_dp_helper_funcs = {
.dpms = intel_dp_dpms,
.mode_fixup = intel_dp_mode_fixup,
@@ -1490,20 +1658,21 @@ static const struct drm_connector_funcs intel_dp_connector_funcs = {
.dpms = drm_helper_connector_dpms,
.detect = intel_dp_detect,
.fill_modes = drm_helper_probe_single_connector_modes,
+ .set_property = intel_dp_set_property,
.destroy = intel_dp_destroy,
};
static const struct drm_connector_helper_funcs intel_dp_connector_helper_funcs = {
.get_modes = intel_dp_get_modes,
.mode_valid = intel_dp_mode_valid,
- .best_encoder = intel_attached_encoder,
+ .best_encoder = intel_best_encoder,
};
static const struct drm_encoder_funcs intel_dp_enc_funcs = {
- .destroy = intel_encoder_destroy,
+ .destroy = intel_dp_encoder_destroy,
};
-void
+static void
intel_dp_hot_plug(struct intel_encoder *intel_encoder)
{
struct intel_dp *intel_dp = container_of(intel_encoder, struct intel_dp, base);
@@ -1554,6 +1723,20 @@ bool intel_dpd_is_edp(struct drm_device *dev)
return false;
}
+static void
+intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connector)
+{
+ struct drm_device *dev = connector->dev;
+
+ intel_dp->force_audio_property =
+ drm_property_create(dev, DRM_MODE_PROP_RANGE, "force_audio", 2);
+ if (intel_dp->force_audio_property) {
+ intel_dp->force_audio_property->values[0] = -1;
+ intel_dp->force_audio_property->values[1] = 1;
+ drm_connector_attach_property(connector, intel_dp->force_audio_property, 0);
+ }
+}
+
void
intel_dp_init(struct drm_device *dev, int output_reg)
{
@@ -1580,7 +1763,7 @@ intel_dp_init(struct drm_device *dev, int output_reg)
if (intel_dpd_is_edp(dev))
intel_dp->is_pch_edp = true;
- if (output_reg == DP_A || IS_PCH_eDP(intel_dp)) {
+ if (output_reg == DP_A || is_pch_edp(intel_dp)) {
type = DRM_MODE_CONNECTOR_eDP;
intel_encoder->type = INTEL_OUTPUT_EDP;
} else {
@@ -1601,7 +1784,7 @@ intel_dp_init(struct drm_device *dev, int output_reg)
else if (output_reg == DP_D || output_reg == PCH_DP_D)
intel_encoder->clone_mask = (1 << INTEL_DP_D_CLONE_BIT);
- if (IS_eDP(intel_dp))
+ if (is_edp(intel_dp))
intel_encoder->clone_mask = (1 << INTEL_EDP_CLONE_BIT);
intel_encoder->crtc_mask = (1 << 0) | (1 << 1);
@@ -1612,12 +1795,11 @@ intel_dp_init(struct drm_device *dev, int output_reg)
intel_dp->has_audio = false;
intel_dp->dpms_mode = DRM_MODE_DPMS_ON;
- drm_encoder_init(dev, &intel_encoder->enc, &intel_dp_enc_funcs,
+ drm_encoder_init(dev, &intel_encoder->base, &intel_dp_enc_funcs,
DRM_MODE_ENCODER_TMDS);
- drm_encoder_helper_add(&intel_encoder->enc, &intel_dp_helper_funcs);
+ drm_encoder_helper_add(&intel_encoder->base, &intel_dp_helper_funcs);
- drm_mode_connector_attach_encoder(&intel_connector->base,
- &intel_encoder->enc);
+ intel_connector_attach_encoder(intel_connector, intel_encoder);
drm_sysfs_connector_add(connector);
/* Set up the DDC bus. */
@@ -1647,10 +1829,29 @@ intel_dp_init(struct drm_device *dev, int output_reg)
intel_dp_i2c_init(intel_dp, intel_connector, name);
- intel_encoder->ddc_bus = &intel_dp->adapter;
+ /* Cache some DPCD data in the eDP case */
+ if (is_edp(intel_dp)) {
+ int ret;
+ bool was_on;
+
+ was_on = ironlake_edp_panel_on(intel_dp);
+ ret = intel_dp_aux_native_read(intel_dp, DP_DPCD_REV,
+ intel_dp->dpcd,
+ sizeof(intel_dp->dpcd));
+ if (ret == sizeof(intel_dp->dpcd)) {
+ if (intel_dp->dpcd[0] >= 0x11)
+ dev_priv->no_aux_handshake = intel_dp->dpcd[3] &
+ DP_NO_AUX_HANDSHAKE_LINK_TRAINING;
+ } else {
+ DRM_ERROR("failed to retrieve link info\n");
+ }
+ if (!was_on)
+ ironlake_edp_panel_off(dev);
+ }
+
intel_encoder->hot_plug = intel_dp_hot_plug;
- if (output_reg == DP_A || IS_PCH_eDP(intel_dp)) {
+ if (is_edp(intel_dp)) {
/* initialize panel mode from VBT if available for eDP */
if (dev_priv->lfp_lvds_vbt_mode) {
dev_priv->panel_fixed_mode =
@@ -1662,6 +1863,8 @@ intel_dp_init(struct drm_device *dev, int output_reg)
}
}
+ intel_dp_add_properties(intel_dp, connector);
+
/* For G4X desktop chip, PEG_BAND_GAP_DATA 3:0 must first be written
* 0xd. Failure to do so will result in spurious interrupts being
* generated on the port when a cable is not attached.
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index 8828b3ac6414..e52c6125bb1f 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -26,14 +26,12 @@
#define __INTEL_DRV_H__
#include <linux/i2c.h>
-#include <linux/i2c-id.h>
-#include <linux/i2c-algo-bit.h>
#include "i915_drv.h"
#include "drm_crtc.h"
-
#include "drm_crtc_helper.h"
+#include "drm_fb_helper.h"
-#define wait_for(COND, MS, W) ({ \
+#define _wait_for(COND, MS, W) ({ \
unsigned long timeout__ = jiffies + msecs_to_jiffies(MS); \
int ret__ = 0; \
while (! (COND)) { \
@@ -41,11 +39,24 @@
ret__ = -ETIMEDOUT; \
break; \
} \
- if (W) msleep(W); \
+ if (W && !in_dbg_master()) msleep(W); \
} \
ret__; \
})
+#define wait_for(COND, MS) _wait_for(COND, MS, 1)
+#define wait_for_atomic(COND, MS) _wait_for(COND, MS, 0)
+
+#define MSLEEP(x) do { \
+ if (in_dbg_master()) \
+ mdelay(x); \
+ else \
+ msleep(x); \
+} while(0)
+
+#define KHz(x) (1000*x)
+#define MHz(x) KHz(1000*x)
+
/*
* Display related stuff
*/
@@ -96,24 +107,39 @@
#define INTEL_DVO_CHIP_TMDS 2
#define INTEL_DVO_CHIP_TVOUT 4
-struct intel_i2c_chan {
- struct drm_device *drm_dev; /* for getting at dev. private (mmio etc.) */
- u32 reg; /* GPIO reg */
- struct i2c_adapter adapter;
- struct i2c_algo_bit_data algo;
-};
+/* drm_display_mode->private_flags */
+#define INTEL_MODE_PIXEL_MULTIPLIER_SHIFT (0x0)
+#define INTEL_MODE_PIXEL_MULTIPLIER_MASK (0xf << INTEL_MODE_PIXEL_MULTIPLIER_SHIFT)
+
+static inline void
+intel_mode_set_pixel_multiplier(struct drm_display_mode *mode,
+ int multiplier)
+{
+ mode->clock *= multiplier;
+ mode->private_flags |= multiplier;
+}
+
+static inline int
+intel_mode_get_pixel_multiplier(const struct drm_display_mode *mode)
+{
+ return (mode->private_flags & INTEL_MODE_PIXEL_MULTIPLIER_MASK) >> INTEL_MODE_PIXEL_MULTIPLIER_SHIFT;
+}
struct intel_framebuffer {
struct drm_framebuffer base;
struct drm_gem_object *obj;
};
+struct intel_fbdev {
+ struct drm_fb_helper helper;
+ struct intel_framebuffer ifb;
+ struct list_head fbdev_list;
+ struct drm_display_mode *our_mode;
+};
struct intel_encoder {
- struct drm_encoder enc;
+ struct drm_encoder base;
int type;
- struct i2c_adapter *i2c_bus;
- struct i2c_adapter *ddc_bus;
bool load_detect_temp;
bool needs_tv_clock;
void (*hot_plug)(struct intel_encoder *);
@@ -123,32 +149,7 @@ struct intel_encoder {
struct intel_connector {
struct drm_connector base;
-};
-
-struct intel_crtc;
-struct intel_overlay {
- struct drm_device *dev;
- struct intel_crtc *crtc;
- struct drm_i915_gem_object *vid_bo;
- struct drm_i915_gem_object *old_vid_bo;
- int active;
- int pfit_active;
- u32 pfit_vscale_ratio; /* shifted-point number, (1<<12) == 1.0 */
- u32 color_key;
- u32 brightness, contrast, saturation;
- u32 old_xscale, old_yscale;
- /* register access */
- u32 flip_addr;
- struct drm_i915_gem_object *reg_bo;
- void *virt_addr;
- /* flip handling */
- uint32_t last_flip_req;
- int hw_wedged;
-#define HW_WEDGED 1
-#define NEEDS_WAIT_FOR_FLIP 2
-#define RELEASE_OLD_VID 3
-#define SWITCH_OFF_STAGE_1 4
-#define SWITCH_OFF_STAGE_2 5
+ struct intel_encoder *encoder;
};
struct intel_crtc {
@@ -157,6 +158,7 @@ struct intel_crtc {
enum plane plane;
u8 lut_r[256], lut_g[256], lut_b[256];
int dpms_mode;
+ bool active; /* is the crtc on? independent of the dpms mode */
bool busy; /* is scanout buffer being updated frequently? */
struct timer_list idle_timer;
bool lowfreq_avail;
@@ -168,14 +170,53 @@ struct intel_crtc {
uint32_t cursor_addr;
int16_t cursor_x, cursor_y;
int16_t cursor_width, cursor_height;
- bool cursor_visible, cursor_on;
+ bool cursor_visible;
};
#define to_intel_crtc(x) container_of(x, struct intel_crtc, base)
#define to_intel_connector(x) container_of(x, struct intel_connector, base)
-#define enc_to_intel_encoder(x) container_of(x, struct intel_encoder, enc)
+#define to_intel_encoder(x) container_of(x, struct intel_encoder, base)
#define to_intel_framebuffer(x) container_of(x, struct intel_framebuffer, base)
+#define DIP_TYPE_AVI 0x82
+#define DIP_VERSION_AVI 0x2
+#define DIP_LEN_AVI 13
+
+struct dip_infoframe {
+ uint8_t type; /* HB0 */
+ uint8_t ver; /* HB1 */
+ uint8_t len; /* HB2 - body len, not including checksum */
+ uint8_t ecc; /* Header ECC */
+ uint8_t checksum; /* PB0 */
+ union {
+ struct {
+ /* PB1 - Y 6:5, A 4:4, B 3:2, S 1:0 */
+ uint8_t Y_A_B_S;
+ /* PB2 - C 7:6, M 5:4, R 3:0 */
+ uint8_t C_M_R;
+ /* PB3 - ITC 7:7, EC 6:4, Q 3:2, SC 1:0 */
+ uint8_t ITC_EC_Q_SC;
+ /* PB4 - VIC 6:0 */
+ uint8_t VIC;
+ /* PB5 - PR 3:0 */
+ uint8_t PR;
+ /* PB6 to PB13 */
+ uint16_t top_bar_end;
+ uint16_t bottom_bar_start;
+ uint16_t left_bar_end;
+ uint16_t right_bar_start;
+ } avi;
+ uint8_t payload[27];
+ } __attribute__ ((packed)) body;
+} __attribute__((packed));
+
+static inline struct drm_crtc *
+intel_get_crtc_for_pipe(struct drm_device *dev, int pipe)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ return dev_priv->pipe_to_crtc_mapping[pipe];
+}
+
struct intel_unpin_work {
struct work_struct work;
struct drm_device *dev;
@@ -186,51 +227,56 @@ struct intel_unpin_work {
bool enable_stall_check;
};
-struct i2c_adapter *intel_i2c_create(struct drm_device *dev, const u32 reg,
- const char *name);
-void intel_i2c_destroy(struct i2c_adapter *adapter);
int intel_ddc_get_modes(struct drm_connector *c, struct i2c_adapter *adapter);
-extern bool intel_ddc_probe(struct intel_encoder *intel_encoder);
-void intel_i2c_quirk_set(struct drm_device *dev, bool enable);
-void intel_i2c_reset_gmbus(struct drm_device *dev);
+extern bool intel_ddc_probe(struct intel_encoder *intel_encoder, int ddc_bus);
extern void intel_crt_init(struct drm_device *dev);
extern void intel_hdmi_init(struct drm_device *dev, int sdvox_reg);
+void intel_dip_infoframe_csum(struct dip_infoframe *avi_if);
extern bool intel_sdvo_init(struct drm_device *dev, int output_device);
extern void intel_dvo_init(struct drm_device *dev);
extern void intel_tv_init(struct drm_device *dev);
extern void intel_mark_busy(struct drm_device *dev, struct drm_gem_object *obj);
-extern void intel_lvds_init(struct drm_device *dev);
+extern bool intel_lvds_init(struct drm_device *dev);
extern void intel_dp_init(struct drm_device *dev, int dp_reg);
void
intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode);
-extern bool intel_pch_has_edp(struct drm_crtc *crtc);
extern bool intel_dpd_is_edp(struct drm_device *dev);
extern void intel_edp_link_config (struct intel_encoder *, int *, int *);
+extern bool intel_encoder_is_pch_edp(struct drm_encoder *encoder);
-
+/* intel_panel.c */
extern void intel_fixed_panel_mode(struct drm_display_mode *fixed_mode,
struct drm_display_mode *adjusted_mode);
extern void intel_pch_panel_fitting(struct drm_device *dev,
int fitting_mode,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode);
+extern u32 intel_panel_get_max_backlight(struct drm_device *dev);
+extern u32 intel_panel_get_backlight(struct drm_device *dev);
+extern void intel_panel_set_backlight(struct drm_device *dev, u32 level);
-extern int intel_panel_fitter_pipe (struct drm_device *dev);
extern void intel_crtc_load_lut(struct drm_crtc *crtc);
extern void intel_encoder_prepare (struct drm_encoder *encoder);
extern void intel_encoder_commit (struct drm_encoder *encoder);
extern void intel_encoder_destroy(struct drm_encoder *encoder);
-extern struct drm_encoder *intel_attached_encoder(struct drm_connector *connector);
+static inline struct intel_encoder *intel_attached_encoder(struct drm_connector *connector)
+{
+ return to_intel_connector(connector)->encoder;
+}
+
+extern void intel_connector_attach_encoder(struct intel_connector *connector,
+ struct intel_encoder *encoder);
+extern struct drm_encoder *intel_best_encoder(struct drm_connector *connector);
extern struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev,
struct drm_crtc *crtc);
int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data,
struct drm_file *file_priv);
extern void intel_wait_for_vblank(struct drm_device *dev, int pipe);
-extern struct drm_crtc *intel_get_crtc_from_pipe(struct drm_device *dev, int pipe);
+extern void intel_wait_for_pipe_off(struct drm_device *dev, int pipe);
extern struct drm_crtc *intel_get_load_detect_pipe(struct intel_encoder *intel_encoder,
struct drm_connector *connector,
struct drm_display_mode *mode,
@@ -250,9 +296,11 @@ extern void intel_crtc_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green,
extern void intel_init_clock_gating(struct drm_device *dev);
extern void ironlake_enable_drps(struct drm_device *dev);
extern void ironlake_disable_drps(struct drm_device *dev);
+extern void intel_init_emon(struct drm_device *dev);
extern int intel_pin_and_fence_fb_obj(struct drm_device *dev,
- struct drm_gem_object *obj);
+ struct drm_gem_object *obj,
+ bool pipelined);
extern int intel_framebuffer_init(struct drm_device *dev,
struct intel_framebuffer *ifb,
@@ -267,9 +315,8 @@ extern void intel_finish_page_flip_plane(struct drm_device *dev, int plane);
extern void intel_setup_overlay(struct drm_device *dev);
extern void intel_cleanup_overlay(struct drm_device *dev);
-extern int intel_overlay_switch_off(struct intel_overlay *overlay);
-extern int intel_overlay_recover_from_interrupt(struct intel_overlay *overlay,
- int interruptible);
+extern int intel_overlay_switch_off(struct intel_overlay *overlay,
+ bool interruptible);
extern int intel_overlay_put_image(struct drm_device *dev, void *data,
struct drm_file *file_priv);
extern int intel_overlay_attrs(struct drm_device *dev, void *data,
diff --git a/drivers/gpu/drm/i915/intel_dvo.c b/drivers/gpu/drm/i915/intel_dvo.c
index 7c9ec1472d46..ea373283c93b 100644
--- a/drivers/gpu/drm/i915/intel_dvo.c
+++ b/drivers/gpu/drm/i915/intel_dvo.c
@@ -72,7 +72,7 @@ static const struct intel_dvo_device intel_dvo_devices[] = {
.name = "ch7017",
.dvo_reg = DVOC,
.slave_addr = 0x75,
- .gpio = GPIOE,
+ .gpio = GMBUS_PORT_DPB,
.dev_ops = &ch7017_ops,
}
};
@@ -88,7 +88,13 @@ struct intel_dvo {
static struct intel_dvo *enc_to_intel_dvo(struct drm_encoder *encoder)
{
- return container_of(enc_to_intel_encoder(encoder), struct intel_dvo, base);
+ return container_of(encoder, struct intel_dvo, base.base);
+}
+
+static struct intel_dvo *intel_attached_dvo(struct drm_connector *connector)
+{
+ return container_of(intel_attached_encoder(connector),
+ struct intel_dvo, base);
}
static void intel_dvo_dpms(struct drm_encoder *encoder, int mode)
@@ -112,8 +118,7 @@ static void intel_dvo_dpms(struct drm_encoder *encoder, int mode)
static int intel_dvo_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode)
{
- struct drm_encoder *encoder = intel_attached_encoder(connector);
- struct intel_dvo *intel_dvo = enc_to_intel_dvo(encoder);
+ struct intel_dvo *intel_dvo = intel_attached_dvo(connector);
if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
return MODE_NO_DBLESCAN;
@@ -224,23 +229,22 @@ static void intel_dvo_mode_set(struct drm_encoder *encoder,
static enum drm_connector_status
intel_dvo_detect(struct drm_connector *connector, bool force)
{
- struct drm_encoder *encoder = intel_attached_encoder(connector);
- struct intel_dvo *intel_dvo = enc_to_intel_dvo(encoder);
-
+ struct intel_dvo *intel_dvo = intel_attached_dvo(connector);
return intel_dvo->dev.dev_ops->detect(&intel_dvo->dev);
}
static int intel_dvo_get_modes(struct drm_connector *connector)
{
- struct drm_encoder *encoder = intel_attached_encoder(connector);
- struct intel_dvo *intel_dvo = enc_to_intel_dvo(encoder);
+ struct intel_dvo *intel_dvo = intel_attached_dvo(connector);
+ struct drm_i915_private *dev_priv = connector->dev->dev_private;
/* We should probably have an i2c driver get_modes function for those
* devices which will have a fixed set of modes determined by the chip
* (TV-out, for example), but for now with just TMDS and LVDS,
* that's not the case.
*/
- intel_ddc_get_modes(connector, intel_dvo->base.ddc_bus);
+ intel_ddc_get_modes(connector,
+ &dev_priv->gmbus[GMBUS_PORT_DPC].adapter);
if (!list_empty(&connector->probed_modes))
return 1;
@@ -281,7 +285,7 @@ static const struct drm_connector_funcs intel_dvo_connector_funcs = {
static const struct drm_connector_helper_funcs intel_dvo_connector_helper_funcs = {
.mode_valid = intel_dvo_mode_valid,
.get_modes = intel_dvo_get_modes,
- .best_encoder = intel_attached_encoder,
+ .best_encoder = intel_best_encoder,
};
static void intel_dvo_enc_destroy(struct drm_encoder *encoder)
@@ -311,8 +315,7 @@ intel_dvo_get_current_mode(struct drm_connector *connector)
{
struct drm_device *dev = connector->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- struct drm_encoder *encoder = intel_attached_encoder(connector);
- struct intel_dvo *intel_dvo = enc_to_intel_dvo(encoder);
+ struct intel_dvo *intel_dvo = intel_attached_dvo(connector);
uint32_t dvo_val = I915_READ(intel_dvo->dev.dvo_reg);
struct drm_display_mode *mode = NULL;
@@ -323,7 +326,7 @@ intel_dvo_get_current_mode(struct drm_connector *connector)
struct drm_crtc *crtc;
int pipe = (dvo_val & DVO_PIPE_B_SELECT) ? 1 : 0;
- crtc = intel_get_crtc_from_pipe(dev, pipe);
+ crtc = intel_get_crtc_for_pipe(dev, pipe);
if (crtc) {
mode = intel_crtc_mode_get(dev, crtc);
if (mode) {
@@ -341,11 +344,10 @@ intel_dvo_get_current_mode(struct drm_connector *connector)
void intel_dvo_init(struct drm_device *dev)
{
+ struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_encoder *intel_encoder;
struct intel_dvo *intel_dvo;
struct intel_connector *intel_connector;
- struct i2c_adapter *i2cbus = NULL;
- int ret = 0;
int i;
int encoder_type = DRM_MODE_ENCODER_NONE;
@@ -360,16 +362,14 @@ void intel_dvo_init(struct drm_device *dev)
}
intel_encoder = &intel_dvo->base;
-
- /* Set up the DDC bus */
- intel_encoder->ddc_bus = intel_i2c_create(dev, GPIOD, "DVODDC_D");
- if (!intel_encoder->ddc_bus)
- goto free_intel;
+ drm_encoder_init(dev, &intel_encoder->base,
+ &intel_dvo_enc_funcs, encoder_type);
/* Now, try to find a controller */
for (i = 0; i < ARRAY_SIZE(intel_dvo_devices); i++) {
struct drm_connector *connector = &intel_connector->base;
const struct intel_dvo_device *dvo = &intel_dvo_devices[i];
+ struct i2c_adapter *i2c;
int gpio;
/* Allow the I2C driver info to specify the GPIO to be used in
@@ -379,24 +379,18 @@ void intel_dvo_init(struct drm_device *dev)
if (dvo->gpio != 0)
gpio = dvo->gpio;
else if (dvo->type == INTEL_DVO_CHIP_LVDS)
- gpio = GPIOB;
+ gpio = GMBUS_PORT_SSC;
else
- gpio = GPIOE;
+ gpio = GMBUS_PORT_DPB;
/* Set up the I2C bus necessary for the chip we're probing.
* It appears that everything is on GPIOE except for panels
* on i830 laptops, which are on GPIOB (DVOA).
*/
- if (i2cbus != NULL)
- intel_i2c_destroy(i2cbus);
- if (!(i2cbus = intel_i2c_create(dev, gpio,
- gpio == GPIOB ? "DVOI2C_B" : "DVOI2C_E"))) {
- continue;
- }
+ i2c = &dev_priv->gmbus[gpio].adapter;
intel_dvo->dev = *dvo;
- ret = dvo->dev_ops->init(&intel_dvo->dev, i2cbus);
- if (!ret)
+ if (!dvo->dev_ops->init(&intel_dvo->dev, i2c))
continue;
intel_encoder->type = INTEL_OUTPUT_DVO;
@@ -427,13 +421,10 @@ void intel_dvo_init(struct drm_device *dev)
connector->interlace_allowed = false;
connector->doublescan_allowed = false;
- drm_encoder_init(dev, &intel_encoder->enc,
- &intel_dvo_enc_funcs, encoder_type);
- drm_encoder_helper_add(&intel_encoder->enc,
+ drm_encoder_helper_add(&intel_encoder->base,
&intel_dvo_helper_funcs);
- drm_mode_connector_attach_encoder(&intel_connector->base,
- &intel_encoder->enc);
+ intel_connector_attach_encoder(intel_connector, intel_encoder);
if (dvo->type == INTEL_DVO_CHIP_LVDS) {
/* For our LVDS chipsets, we should hopefully be able
* to dig the fixed panel mode out of the BIOS data.
@@ -451,11 +442,7 @@ void intel_dvo_init(struct drm_device *dev)
return;
}
- intel_i2c_destroy(intel_encoder->ddc_bus);
- /* Didn't find a chip, so tear down. */
- if (i2cbus != NULL)
- intel_i2c_destroy(i2cbus);
-free_intel:
+ drm_encoder_cleanup(&intel_encoder->base);
kfree(intel_dvo);
kfree(intel_connector);
}
diff --git a/drivers/gpu/drm/i915/intel_fb.c b/drivers/gpu/drm/i915/intel_fb.c
index b61966c126d3..af2a1dddc28e 100644
--- a/drivers/gpu/drm/i915/intel_fb.c
+++ b/drivers/gpu/drm/i915/intel_fb.c
@@ -44,13 +44,6 @@
#include "i915_drm.h"
#include "i915_drv.h"
-struct intel_fbdev {
- struct drm_fb_helper helper;
- struct intel_framebuffer ifb;
- struct list_head fbdev_list;
- struct drm_display_mode *our_mode;
-};
-
static struct fb_ops intelfb_ops = {
.owner = THIS_MODULE,
.fb_check_var = drm_fb_helper_check_var,
@@ -75,7 +68,7 @@ static int intelfb_create(struct intel_fbdev *ifbdev,
struct drm_gem_object *fbo = NULL;
struct drm_i915_gem_object *obj_priv;
struct device *device = &dev->pdev->dev;
- int size, ret, mmio_bar = IS_I9XX(dev) ? 0 : 1;
+ int size, ret, mmio_bar = IS_GEN2(dev) ? 1 : 0;
/* we don't do packed 24bpp */
if (sizes->surface_bpp == 24)
@@ -100,19 +93,13 @@ static int intelfb_create(struct intel_fbdev *ifbdev,
mutex_lock(&dev->struct_mutex);
- ret = intel_pin_and_fence_fb_obj(dev, fbo);
+ /* Flush everything out, we'll be doing GTT only from now on */
+ ret = intel_pin_and_fence_fb_obj(dev, fbo, false);
if (ret) {
DRM_ERROR("failed to pin fb: %d\n", ret);
goto out_unref;
}
- /* Flush everything out, we'll be doing GTT only from now on */
- ret = i915_gem_object_set_to_gtt_domain(fbo, 1);
- if (ret) {
- DRM_ERROR("failed to bind fb: %d.\n", ret);
- goto out_unpin;
- }
-
info = framebuffer_alloc(0, device);
if (!info) {
ret = -ENOMEM;
@@ -142,7 +129,7 @@ static int intelfb_create(struct intel_fbdev *ifbdev,
goto out_unpin;
}
info->apertures->ranges[0].base = dev->mode_config.fb_base;
- if (IS_I9XX(dev))
+ if (!IS_GEN2(dev))
info->apertures->ranges[0].size = pci_resource_len(dev->pdev, 2);
else
info->apertures->ranges[0].size = pci_resource_len(dev->pdev, 0);
@@ -219,8 +206,8 @@ static struct drm_fb_helper_funcs intel_fb_helper_funcs = {
.fb_probe = intel_fb_find_or_create_single,
};
-int intel_fbdev_destroy(struct drm_device *dev,
- struct intel_fbdev *ifbdev)
+static void intel_fbdev_destroy(struct drm_device *dev,
+ struct intel_fbdev *ifbdev)
{
struct fb_info *info;
struct intel_framebuffer *ifb = &ifbdev->ifb;
@@ -238,11 +225,9 @@ int intel_fbdev_destroy(struct drm_device *dev,
drm_framebuffer_cleanup(&ifb->base);
if (ifb->obj) {
- drm_gem_object_unreference(ifb->obj);
+ drm_gem_object_unreference_unlocked(ifb->obj);
ifb->obj = NULL;
}
-
- return 0;
}
int intel_fbdev_init(struct drm_device *dev)
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c
index 926934a482ec..0d0273e7b029 100644
--- a/drivers/gpu/drm/i915/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/intel_hdmi.c
@@ -40,12 +40,76 @@
struct intel_hdmi {
struct intel_encoder base;
u32 sdvox_reg;
+ int ddc_bus;
bool has_hdmi_sink;
+ bool has_audio;
+ int force_audio;
+ struct drm_property *force_audio_property;
};
static struct intel_hdmi *enc_to_intel_hdmi(struct drm_encoder *encoder)
{
- return container_of(enc_to_intel_encoder(encoder), struct intel_hdmi, base);
+ return container_of(encoder, struct intel_hdmi, base.base);
+}
+
+static struct intel_hdmi *intel_attached_hdmi(struct drm_connector *connector)
+{
+ return container_of(intel_attached_encoder(connector),
+ struct intel_hdmi, base);
+}
+
+void intel_dip_infoframe_csum(struct dip_infoframe *avi_if)
+{
+ uint8_t *data = (uint8_t *)avi_if;
+ uint8_t sum = 0;
+ unsigned i;
+
+ avi_if->checksum = 0;
+ avi_if->ecc = 0;
+
+ for (i = 0; i < sizeof(*avi_if); i++)
+ sum += data[i];
+
+ avi_if->checksum = 0x100 - sum;
+}
+
+static void intel_hdmi_set_avi_infoframe(struct drm_encoder *encoder)
+{
+ struct dip_infoframe avi_if = {
+ .type = DIP_TYPE_AVI,
+ .ver = DIP_VERSION_AVI,
+ .len = DIP_LEN_AVI,
+ };
+ uint32_t *data = (uint32_t *)&avi_if;
+ struct drm_device *dev = encoder->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
+ u32 port;
+ unsigned i;
+
+ if (!intel_hdmi->has_hdmi_sink)
+ return;
+
+ /* XXX first guess at handling video port, is this corrent? */
+ if (intel_hdmi->sdvox_reg == SDVOB)
+ port = VIDEO_DIP_PORT_B;
+ else if (intel_hdmi->sdvox_reg == SDVOC)
+ port = VIDEO_DIP_PORT_C;
+ else
+ return;
+
+ I915_WRITE(VIDEO_DIP_CTL, VIDEO_DIP_ENABLE | port |
+ VIDEO_DIP_SELECT_AVI | VIDEO_DIP_FREQ_VSYNC);
+
+ intel_dip_infoframe_csum(&avi_if);
+ for (i = 0; i < sizeof(avi_if); i += 4) {
+ I915_WRITE(VIDEO_DIP_DATA, *data);
+ data++;
+ }
+
+ I915_WRITE(VIDEO_DIP_CTL, VIDEO_DIP_ENABLE | port |
+ VIDEO_DIP_SELECT_AVI | VIDEO_DIP_FREQ_VSYNC |
+ VIDEO_DIP_ENABLE_AVI);
}
static void intel_hdmi_mode_set(struct drm_encoder *encoder,
@@ -65,10 +129,13 @@ static void intel_hdmi_mode_set(struct drm_encoder *encoder,
if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
sdvox |= SDVO_HSYNC_ACTIVE_HIGH;
- if (intel_hdmi->has_hdmi_sink) {
+ /* Required on CPT */
+ if (intel_hdmi->has_hdmi_sink && HAS_PCH_CPT(dev))
+ sdvox |= HDMI_MODE_SELECT;
+
+ if (intel_hdmi->has_audio) {
sdvox |= SDVO_AUDIO_ENABLE;
- if (HAS_PCH_CPT(dev))
- sdvox |= HDMI_MODE_SELECT;
+ sdvox |= SDVO_NULL_PACKETS_DURING_VSYNC;
}
if (intel_crtc->pipe == 1) {
@@ -80,6 +147,8 @@ static void intel_hdmi_mode_set(struct drm_encoder *encoder,
I915_WRITE(intel_hdmi->sdvox_reg, sdvox);
POSTING_READ(intel_hdmi->sdvox_reg);
+
+ intel_hdmi_set_avi_infoframe(encoder);
}
static void intel_hdmi_dpms(struct drm_encoder *encoder, int mode)
@@ -141,36 +210,85 @@ static bool intel_hdmi_mode_fixup(struct drm_encoder *encoder,
static enum drm_connector_status
intel_hdmi_detect(struct drm_connector *connector, bool force)
{
- struct drm_encoder *encoder = intel_attached_encoder(connector);
- struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
- struct edid *edid = NULL;
+ struct intel_hdmi *intel_hdmi = intel_attached_hdmi(connector);
+ struct drm_i915_private *dev_priv = connector->dev->dev_private;
+ struct edid *edid;
enum drm_connector_status status = connector_status_disconnected;
intel_hdmi->has_hdmi_sink = false;
- edid = drm_get_edid(connector, intel_hdmi->base.ddc_bus);
+ intel_hdmi->has_audio = false;
+ edid = drm_get_edid(connector,
+ &dev_priv->gmbus[intel_hdmi->ddc_bus].adapter);
if (edid) {
if (edid->input & DRM_EDID_INPUT_DIGITAL) {
status = connector_status_connected;
intel_hdmi->has_hdmi_sink = drm_detect_hdmi_monitor(edid);
+ intel_hdmi->has_audio = drm_detect_monitor_audio(edid);
}
connector->display_info.raw_edid = NULL;
kfree(edid);
}
+ if (status == connector_status_connected) {
+ if (intel_hdmi->force_audio)
+ intel_hdmi->has_audio = intel_hdmi->force_audio > 0;
+ }
+
return status;
}
static int intel_hdmi_get_modes(struct drm_connector *connector)
{
- struct drm_encoder *encoder = intel_attached_encoder(connector);
- struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
+ struct intel_hdmi *intel_hdmi = intel_attached_hdmi(connector);
+ struct drm_i915_private *dev_priv = connector->dev->dev_private;
/* We should parse the EDID data and find out if it's an HDMI sink so
* we can send audio to it.
*/
- return intel_ddc_get_modes(connector, intel_hdmi->base.ddc_bus);
+ return intel_ddc_get_modes(connector,
+ &dev_priv->gmbus[intel_hdmi->ddc_bus].adapter);
+}
+
+static int
+intel_hdmi_set_property(struct drm_connector *connector,
+ struct drm_property *property,
+ uint64_t val)
+{
+ struct intel_hdmi *intel_hdmi = intel_attached_hdmi(connector);
+ int ret;
+
+ ret = drm_connector_property_set_value(connector, property, val);
+ if (ret)
+ return ret;
+
+ if (property == intel_hdmi->force_audio_property) {
+ if (val == intel_hdmi->force_audio)
+ return 0;
+
+ intel_hdmi->force_audio = val;
+
+ if (val > 0 && intel_hdmi->has_audio)
+ return 0;
+ if (val < 0 && !intel_hdmi->has_audio)
+ return 0;
+
+ intel_hdmi->has_audio = val > 0;
+ goto done;
+ }
+
+ return -EINVAL;
+
+done:
+ if (intel_hdmi->base.base.crtc) {
+ struct drm_crtc *crtc = intel_hdmi->base.base.crtc;
+ drm_crtc_helper_set_mode(crtc, &crtc->mode,
+ crtc->x, crtc->y,
+ crtc->fb);
+ }
+
+ return 0;
}
static void intel_hdmi_destroy(struct drm_connector *connector)
@@ -192,19 +310,34 @@ static const struct drm_connector_funcs intel_hdmi_connector_funcs = {
.dpms = drm_helper_connector_dpms,
.detect = intel_hdmi_detect,
.fill_modes = drm_helper_probe_single_connector_modes,
+ .set_property = intel_hdmi_set_property,
.destroy = intel_hdmi_destroy,
};
static const struct drm_connector_helper_funcs intel_hdmi_connector_helper_funcs = {
.get_modes = intel_hdmi_get_modes,
.mode_valid = intel_hdmi_mode_valid,
- .best_encoder = intel_attached_encoder,
+ .best_encoder = intel_best_encoder,
};
static const struct drm_encoder_funcs intel_hdmi_enc_funcs = {
.destroy = intel_encoder_destroy,
};
+static void
+intel_hdmi_add_properties(struct intel_hdmi *intel_hdmi, struct drm_connector *connector)
+{
+ struct drm_device *dev = connector->dev;
+
+ intel_hdmi->force_audio_property =
+ drm_property_create(dev, DRM_MODE_PROP_RANGE, "force_audio", 2);
+ if (intel_hdmi->force_audio_property) {
+ intel_hdmi->force_audio_property->values[0] = -1;
+ intel_hdmi->force_audio_property->values[1] = 1;
+ drm_connector_attach_property(connector, intel_hdmi->force_audio_property, 0);
+ }
+}
+
void intel_hdmi_init(struct drm_device *dev, int sdvox_reg)
{
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -224,6 +357,9 @@ void intel_hdmi_init(struct drm_device *dev, int sdvox_reg)
}
intel_encoder = &intel_hdmi->base;
+ drm_encoder_init(dev, &intel_encoder->base, &intel_hdmi_enc_funcs,
+ DRM_MODE_ENCODER_TMDS);
+
connector = &intel_connector->base;
drm_connector_init(dev, connector, &intel_hdmi_connector_funcs,
DRM_MODE_CONNECTOR_HDMIA);
@@ -239,39 +375,33 @@ void intel_hdmi_init(struct drm_device *dev, int sdvox_reg)
/* Set up the DDC bus. */
if (sdvox_reg == SDVOB) {
intel_encoder->clone_mask = (1 << INTEL_HDMIB_CLONE_BIT);
- intel_encoder->ddc_bus = intel_i2c_create(dev, GPIOE, "HDMIB");
+ intel_hdmi->ddc_bus = GMBUS_PORT_DPB;
dev_priv->hotplug_supported_mask |= HDMIB_HOTPLUG_INT_STATUS;
} else if (sdvox_reg == SDVOC) {
intel_encoder->clone_mask = (1 << INTEL_HDMIC_CLONE_BIT);
- intel_encoder->ddc_bus = intel_i2c_create(dev, GPIOD, "HDMIC");
+ intel_hdmi->ddc_bus = GMBUS_PORT_DPC;
dev_priv->hotplug_supported_mask |= HDMIC_HOTPLUG_INT_STATUS;
} else if (sdvox_reg == HDMIB) {
intel_encoder->clone_mask = (1 << INTEL_HDMID_CLONE_BIT);
- intel_encoder->ddc_bus = intel_i2c_create(dev, PCH_GPIOE,
- "HDMIB");
+ intel_hdmi->ddc_bus = GMBUS_PORT_DPB;
dev_priv->hotplug_supported_mask |= HDMIB_HOTPLUG_INT_STATUS;
} else if (sdvox_reg == HDMIC) {
intel_encoder->clone_mask = (1 << INTEL_HDMIE_CLONE_BIT);
- intel_encoder->ddc_bus = intel_i2c_create(dev, PCH_GPIOD,
- "HDMIC");
+ intel_hdmi->ddc_bus = GMBUS_PORT_DPC;
dev_priv->hotplug_supported_mask |= HDMIC_HOTPLUG_INT_STATUS;
} else if (sdvox_reg == HDMID) {
intel_encoder->clone_mask = (1 << INTEL_HDMIF_CLONE_BIT);
- intel_encoder->ddc_bus = intel_i2c_create(dev, PCH_GPIOF,
- "HDMID");
+ intel_hdmi->ddc_bus = GMBUS_PORT_DPD;
dev_priv->hotplug_supported_mask |= HDMID_HOTPLUG_INT_STATUS;
}
- if (!intel_encoder->ddc_bus)
- goto err_connector;
intel_hdmi->sdvox_reg = sdvox_reg;
- drm_encoder_init(dev, &intel_encoder->enc, &intel_hdmi_enc_funcs,
- DRM_MODE_ENCODER_TMDS);
- drm_encoder_helper_add(&intel_encoder->enc, &intel_hdmi_helper_funcs);
+ drm_encoder_helper_add(&intel_encoder->base, &intel_hdmi_helper_funcs);
+
+ intel_hdmi_add_properties(intel_hdmi, connector);
- drm_mode_connector_attach_encoder(&intel_connector->base,
- &intel_encoder->enc);
+ intel_connector_attach_encoder(intel_connector, intel_encoder);
drm_sysfs_connector_add(connector);
/* For G4X desktop chip, PEG_BAND_GAP_DATA 3:0 must first be written
@@ -282,13 +412,4 @@ void intel_hdmi_init(struct drm_device *dev, int sdvox_reg)
u32 temp = I915_READ(PEG_BAND_GAP_DATA);
I915_WRITE(PEG_BAND_GAP_DATA, (temp & ~0xf) | 0xd);
}
-
- return;
-
-err_connector:
- drm_connector_cleanup(connector);
- kfree(intel_hdmi);
- kfree(intel_connector);
-
- return;
}
diff --git a/drivers/gpu/drm/i915/intel_i2c.c b/drivers/gpu/drm/i915/intel_i2c.c
index c2649c7df14c..3dba086e7eea 100644
--- a/drivers/gpu/drm/i915/intel_i2c.c
+++ b/drivers/gpu/drm/i915/intel_i2c.c
@@ -1,6 +1,6 @@
/*
* Copyright (c) 2006 Dave Airlie <airlied@linux.ie>
- * Copyright © 2006-2008 Intel Corporation
+ * Copyright © 2006-2008,2010 Intel Corporation
* Jesse Barnes <jesse.barnes@intel.com>
*
* Permission is hereby granted, free of charge, to any person obtaining a
@@ -24,10 +24,9 @@
*
* Authors:
* Eric Anholt <eric@anholt.net>
+ * Chris Wilson <chris@chris-wilson.co.uk>
*/
#include <linux/i2c.h>
-#include <linux/slab.h>
-#include <linux/i2c-id.h>
#include <linux/i2c-algo-bit.h>
#include "drmP.h"
#include "drm.h"
@@ -35,79 +34,106 @@
#include "i915_drm.h"
#include "i915_drv.h"
-void intel_i2c_quirk_set(struct drm_device *dev, bool enable)
+/* Intel GPIO access functions */
+
+#define I2C_RISEFALL_TIME 20
+
+static inline struct intel_gmbus *
+to_intel_gmbus(struct i2c_adapter *i2c)
+{
+ return container_of(i2c, struct intel_gmbus, adapter);
+}
+
+struct intel_gpio {
+ struct i2c_adapter adapter;
+ struct i2c_algo_bit_data algo;
+ struct drm_i915_private *dev_priv;
+ u32 reg;
+};
+
+void
+intel_i2c_reset(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
+ if (HAS_PCH_SPLIT(dev))
+ I915_WRITE(PCH_GMBUS0, 0);
+ else
+ I915_WRITE(GMBUS0, 0);
+}
+
+static void intel_i2c_quirk_set(struct drm_i915_private *dev_priv, bool enable)
+{
+ u32 val;
/* When using bit bashing for I2C, this bit needs to be set to 1 */
- if (!IS_PINEVIEW(dev))
+ if (!IS_PINEVIEW(dev_priv->dev))
return;
+
+ val = I915_READ(DSPCLK_GATE_D);
if (enable)
- I915_WRITE(DSPCLK_GATE_D,
- I915_READ(DSPCLK_GATE_D) | DPCUNIT_CLOCK_GATE_DISABLE);
+ val |= DPCUNIT_CLOCK_GATE_DISABLE;
else
- I915_WRITE(DSPCLK_GATE_D,
- I915_READ(DSPCLK_GATE_D) & (~DPCUNIT_CLOCK_GATE_DISABLE));
+ val &= ~DPCUNIT_CLOCK_GATE_DISABLE;
+ I915_WRITE(DSPCLK_GATE_D, val);
}
-/*
- * Intel GPIO access functions
- */
+static u32 get_reserved(struct intel_gpio *gpio)
+{
+ struct drm_i915_private *dev_priv = gpio->dev_priv;
+ struct drm_device *dev = dev_priv->dev;
+ u32 reserved = 0;
-#define I2C_RISEFALL_TIME 20
+ /* On most chips, these bits must be preserved in software. */
+ if (!IS_I830(dev) && !IS_845G(dev))
+ reserved = I915_READ(gpio->reg) & (GPIO_DATA_PULLUP_DISABLE |
+ GPIO_CLOCK_PULLUP_DISABLE);
+
+ return reserved;
+}
static int get_clock(void *data)
{
- struct intel_i2c_chan *chan = data;
- struct drm_i915_private *dev_priv = chan->drm_dev->dev_private;
- u32 val;
-
- val = I915_READ(chan->reg);
- return ((val & GPIO_CLOCK_VAL_IN) != 0);
+ struct intel_gpio *gpio = data;
+ struct drm_i915_private *dev_priv = gpio->dev_priv;
+ u32 reserved = get_reserved(gpio);
+ I915_WRITE(gpio->reg, reserved | GPIO_CLOCK_DIR_MASK);
+ I915_WRITE(gpio->reg, reserved);
+ return (I915_READ(gpio->reg) & GPIO_CLOCK_VAL_IN) != 0;
}
static int get_data(void *data)
{
- struct intel_i2c_chan *chan = data;
- struct drm_i915_private *dev_priv = chan->drm_dev->dev_private;
- u32 val;
-
- val = I915_READ(chan->reg);
- return ((val & GPIO_DATA_VAL_IN) != 0);
+ struct intel_gpio *gpio = data;
+ struct drm_i915_private *dev_priv = gpio->dev_priv;
+ u32 reserved = get_reserved(gpio);
+ I915_WRITE(gpio->reg, reserved | GPIO_DATA_DIR_MASK);
+ I915_WRITE(gpio->reg, reserved);
+ return (I915_READ(gpio->reg) & GPIO_DATA_VAL_IN) != 0;
}
static void set_clock(void *data, int state_high)
{
- struct intel_i2c_chan *chan = data;
- struct drm_device *dev = chan->drm_dev;
- struct drm_i915_private *dev_priv = chan->drm_dev->dev_private;
- u32 reserved = 0, clock_bits;
-
- /* On most chips, these bits must be preserved in software. */
- if (!IS_I830(dev) && !IS_845G(dev))
- reserved = I915_READ(chan->reg) & (GPIO_DATA_PULLUP_DISABLE |
- GPIO_CLOCK_PULLUP_DISABLE);
+ struct intel_gpio *gpio = data;
+ struct drm_i915_private *dev_priv = gpio->dev_priv;
+ u32 reserved = get_reserved(gpio);
+ u32 clock_bits;
if (state_high)
clock_bits = GPIO_CLOCK_DIR_IN | GPIO_CLOCK_DIR_MASK;
else
clock_bits = GPIO_CLOCK_DIR_OUT | GPIO_CLOCK_DIR_MASK |
GPIO_CLOCK_VAL_MASK;
- I915_WRITE(chan->reg, reserved | clock_bits);
- udelay(I2C_RISEFALL_TIME); /* wait for the line to change state */
+
+ I915_WRITE(gpio->reg, reserved | clock_bits);
+ POSTING_READ(gpio->reg);
}
static void set_data(void *data, int state_high)
{
- struct intel_i2c_chan *chan = data;
- struct drm_device *dev = chan->drm_dev;
- struct drm_i915_private *dev_priv = chan->drm_dev->dev_private;
- u32 reserved = 0, data_bits;
-
- /* On most chips, these bits must be preserved in software. */
- if (!IS_I830(dev) && !IS_845G(dev))
- reserved = I915_READ(chan->reg) & (GPIO_DATA_PULLUP_DISABLE |
- GPIO_CLOCK_PULLUP_DISABLE);
+ struct intel_gpio *gpio = data;
+ struct drm_i915_private *dev_priv = gpio->dev_priv;
+ u32 reserved = get_reserved(gpio);
+ u32 data_bits;
if (state_high)
data_bits = GPIO_DATA_DIR_IN | GPIO_DATA_DIR_MASK;
@@ -115,109 +141,314 @@ static void set_data(void *data, int state_high)
data_bits = GPIO_DATA_DIR_OUT | GPIO_DATA_DIR_MASK |
GPIO_DATA_VAL_MASK;
- I915_WRITE(chan->reg, reserved | data_bits);
- udelay(I2C_RISEFALL_TIME); /* wait for the line to change state */
+ I915_WRITE(gpio->reg, reserved | data_bits);
+ POSTING_READ(gpio->reg);
}
-/* Clears the GMBUS setup. Our driver doesn't make use of the GMBUS I2C
- * engine, but if the BIOS leaves it enabled, then that can break our use
- * of the bit-banging I2C interfaces. This is notably the case with the
- * Mac Mini in EFI mode.
- */
-void
-intel_i2c_reset_gmbus(struct drm_device *dev)
+static struct i2c_adapter *
+intel_gpio_create(struct drm_i915_private *dev_priv, u32 pin)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ static const int map_pin_to_reg[] = {
+ 0,
+ GPIOB,
+ GPIOA,
+ GPIOC,
+ GPIOD,
+ GPIOE,
+ 0,
+ GPIOF,
+ };
+ struct intel_gpio *gpio;
- if (HAS_PCH_SPLIT(dev)) {
- I915_WRITE(PCH_GMBUS0, 0);
- } else {
- I915_WRITE(GMBUS0, 0);
+ if (pin >= ARRAY_SIZE(map_pin_to_reg) || !map_pin_to_reg[pin])
+ return NULL;
+
+ gpio = kzalloc(sizeof(struct intel_gpio), GFP_KERNEL);
+ if (gpio == NULL)
+ return NULL;
+
+ gpio->reg = map_pin_to_reg[pin];
+ if (HAS_PCH_SPLIT(dev_priv->dev))
+ gpio->reg += PCH_GPIOA - GPIOA;
+ gpio->dev_priv = dev_priv;
+
+ snprintf(gpio->adapter.name, sizeof(gpio->adapter.name),
+ "i915 GPIO%c", "?BACDE?F"[pin]);
+ gpio->adapter.owner = THIS_MODULE;
+ gpio->adapter.algo_data = &gpio->algo;
+ gpio->adapter.dev.parent = &dev_priv->dev->pdev->dev;
+ gpio->algo.setsda = set_data;
+ gpio->algo.setscl = set_clock;
+ gpio->algo.getsda = get_data;
+ gpio->algo.getscl = get_clock;
+ gpio->algo.udelay = I2C_RISEFALL_TIME;
+ gpio->algo.timeout = usecs_to_jiffies(2200);
+ gpio->algo.data = gpio;
+
+ if (i2c_bit_add_bus(&gpio->adapter))
+ goto out_free;
+
+ return &gpio->adapter;
+
+out_free:
+ kfree(gpio);
+ return NULL;
+}
+
+static int
+intel_i2c_quirk_xfer(struct drm_i915_private *dev_priv,
+ struct i2c_adapter *adapter,
+ struct i2c_msg *msgs,
+ int num)
+{
+ struct intel_gpio *gpio = container_of(adapter,
+ struct intel_gpio,
+ adapter);
+ int ret;
+
+ intel_i2c_reset(dev_priv->dev);
+
+ intel_i2c_quirk_set(dev_priv, true);
+ set_data(gpio, 1);
+ set_clock(gpio, 1);
+ udelay(I2C_RISEFALL_TIME);
+
+ ret = adapter->algo->master_xfer(adapter, msgs, num);
+
+ set_data(gpio, 1);
+ set_clock(gpio, 1);
+ intel_i2c_quirk_set(dev_priv, false);
+
+ return ret;
+}
+
+static int
+gmbus_xfer(struct i2c_adapter *adapter,
+ struct i2c_msg *msgs,
+ int num)
+{
+ struct intel_gmbus *bus = container_of(adapter,
+ struct intel_gmbus,
+ adapter);
+ struct drm_i915_private *dev_priv = adapter->algo_data;
+ int i, reg_offset;
+
+ if (bus->force_bit)
+ return intel_i2c_quirk_xfer(dev_priv,
+ bus->force_bit, msgs, num);
+
+ reg_offset = HAS_PCH_SPLIT(dev_priv->dev) ? PCH_GMBUS0 - GMBUS0 : 0;
+
+ I915_WRITE(GMBUS0 + reg_offset, bus->reg0);
+
+ for (i = 0; i < num; i++) {
+ u16 len = msgs[i].len;
+ u8 *buf = msgs[i].buf;
+
+ if (msgs[i].flags & I2C_M_RD) {
+ I915_WRITE(GMBUS1 + reg_offset,
+ GMBUS_CYCLE_WAIT | (i + 1 == num ? GMBUS_CYCLE_STOP : 0) |
+ (len << GMBUS_BYTE_COUNT_SHIFT) |
+ (msgs[i].addr << GMBUS_SLAVE_ADDR_SHIFT) |
+ GMBUS_SLAVE_READ | GMBUS_SW_RDY);
+ POSTING_READ(GMBUS2+reg_offset);
+ do {
+ u32 val, loop = 0;
+
+ if (wait_for(I915_READ(GMBUS2 + reg_offset) & (GMBUS_SATOER | GMBUS_HW_RDY), 50))
+ goto timeout;
+ if (I915_READ(GMBUS2 + reg_offset) & GMBUS_SATOER)
+ return 0;
+
+ val = I915_READ(GMBUS3 + reg_offset);
+ do {
+ *buf++ = val & 0xff;
+ val >>= 8;
+ } while (--len && ++loop < 4);
+ } while (len);
+ } else {
+ u32 val, loop;
+
+ val = loop = 0;
+ do {
+ val |= *buf++ << (8 * loop);
+ } while (--len && ++loop < 4);
+
+ I915_WRITE(GMBUS3 + reg_offset, val);
+ I915_WRITE(GMBUS1 + reg_offset,
+ (i + 1 == num ? GMBUS_CYCLE_STOP : GMBUS_CYCLE_WAIT) |
+ (msgs[i].len << GMBUS_BYTE_COUNT_SHIFT) |
+ (msgs[i].addr << GMBUS_SLAVE_ADDR_SHIFT) |
+ GMBUS_SLAVE_WRITE | GMBUS_SW_RDY);
+ POSTING_READ(GMBUS2+reg_offset);
+
+ while (len) {
+ if (wait_for(I915_READ(GMBUS2 + reg_offset) & (GMBUS_SATOER | GMBUS_HW_RDY), 50))
+ goto timeout;
+ if (I915_READ(GMBUS2 + reg_offset) & GMBUS_SATOER)
+ return 0;
+
+ val = loop = 0;
+ do {
+ val |= *buf++ << (8 * loop);
+ } while (--len && ++loop < 4);
+
+ I915_WRITE(GMBUS3 + reg_offset, val);
+ POSTING_READ(GMBUS2+reg_offset);
+ }
+ }
+
+ if (i + 1 < num && wait_for(I915_READ(GMBUS2 + reg_offset) & (GMBUS_SATOER | GMBUS_HW_WAIT_PHASE), 50))
+ goto timeout;
+ if (I915_READ(GMBUS2 + reg_offset) & GMBUS_SATOER)
+ return 0;
}
+
+ return num;
+
+timeout:
+ DRM_INFO("GMBUS timed out, falling back to bit banging on pin %d [%s]\n",
+ bus->reg0 & 0xff, bus->adapter.name);
+ /* Hardware may not support GMBUS over these pins? Try GPIO bitbanging instead. */
+ bus->force_bit = intel_gpio_create(dev_priv, bus->reg0 & 0xff);
+ if (!bus->force_bit)
+ return -ENOMEM;
+
+ return intel_i2c_quirk_xfer(dev_priv, bus->force_bit, msgs, num);
}
+static u32 gmbus_func(struct i2c_adapter *adapter)
+{
+ struct intel_gmbus *bus = container_of(adapter,
+ struct intel_gmbus,
+ adapter);
+
+ if (bus->force_bit)
+ bus->force_bit->algo->functionality(bus->force_bit);
+
+ return (I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL |
+ /* I2C_FUNC_10BIT_ADDR | */
+ I2C_FUNC_SMBUS_READ_BLOCK_DATA |
+ I2C_FUNC_SMBUS_BLOCK_PROC_CALL);
+}
+
+static const struct i2c_algorithm gmbus_algorithm = {
+ .master_xfer = gmbus_xfer,
+ .functionality = gmbus_func
+};
+
/**
- * intel_i2c_create - instantiate an Intel i2c bus using the specified GPIO reg
+ * intel_gmbus_setup - instantiate all Intel i2c GMBuses
* @dev: DRM device
- * @output: driver specific output device
- * @reg: GPIO reg to use
- * @name: name for this bus
- * @slave_addr: slave address (if fixed)
- *
- * Creates and registers a new i2c bus with the Linux i2c layer, for use
- * in output probing and control (e.g. DDC or SDVO control functions).
- *
- * Possible values for @reg include:
- * %GPIOA
- * %GPIOB
- * %GPIOC
- * %GPIOD
- * %GPIOE
- * %GPIOF
- * %GPIOG
- * %GPIOH
- * see PRM for details on how these different busses are used.
*/
-struct i2c_adapter *intel_i2c_create(struct drm_device *dev, const u32 reg,
- const char *name)
+int intel_setup_gmbus(struct drm_device *dev)
{
- struct intel_i2c_chan *chan;
+ static const char *names[GMBUS_NUM_PORTS] = {
+ "disabled",
+ "ssc",
+ "vga",
+ "panel",
+ "dpc",
+ "dpb",
+ "reserved",
+ "dpd",
+ };
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int ret, i;
- chan = kzalloc(sizeof(struct intel_i2c_chan), GFP_KERNEL);
- if (!chan)
- goto out_free;
+ dev_priv->gmbus = kcalloc(sizeof(struct intel_gmbus), GMBUS_NUM_PORTS,
+ GFP_KERNEL);
+ if (dev_priv->gmbus == NULL)
+ return -ENOMEM;
- chan->drm_dev = dev;
- chan->reg = reg;
- snprintf(chan->adapter.name, I2C_NAME_SIZE, "intel drm %s", name);
- chan->adapter.owner = THIS_MODULE;
- chan->adapter.algo_data = &chan->algo;
- chan->adapter.dev.parent = &dev->pdev->dev;
- chan->algo.setsda = set_data;
- chan->algo.setscl = set_clock;
- chan->algo.getsda = get_data;
- chan->algo.getscl = get_clock;
- chan->algo.udelay = 20;
- chan->algo.timeout = usecs_to_jiffies(2200);
- chan->algo.data = chan;
-
- i2c_set_adapdata(&chan->adapter, chan);
-
- if(i2c_bit_add_bus(&chan->adapter))
- goto out_free;
+ for (i = 0; i < GMBUS_NUM_PORTS; i++) {
+ struct intel_gmbus *bus = &dev_priv->gmbus[i];
- intel_i2c_reset_gmbus(dev);
+ bus->adapter.owner = THIS_MODULE;
+ bus->adapter.class = I2C_CLASS_DDC;
+ snprintf(bus->adapter.name,
+ sizeof(bus->adapter.name),
+ "i915 gmbus %s",
+ names[i]);
- /* JJJ: raise SCL and SDA? */
- intel_i2c_quirk_set(dev, true);
- set_data(chan, 1);
- set_clock(chan, 1);
- intel_i2c_quirk_set(dev, false);
- udelay(20);
+ bus->adapter.dev.parent = &dev->pdev->dev;
+ bus->adapter.algo_data = dev_priv;
- return &chan->adapter;
+ bus->adapter.algo = &gmbus_algorithm;
+ ret = i2c_add_adapter(&bus->adapter);
+ if (ret)
+ goto err;
-out_free:
- kfree(chan);
- return NULL;
+ /* By default use a conservative clock rate */
+ bus->reg0 = i | GMBUS_RATE_100KHZ;
+
+ /* XXX force bit banging until GMBUS is fully debugged */
+ bus->force_bit = intel_gpio_create(dev_priv, i);
+ }
+
+ intel_i2c_reset(dev_priv->dev);
+
+ return 0;
+
+err:
+ while (--i) {
+ struct intel_gmbus *bus = &dev_priv->gmbus[i];
+ i2c_del_adapter(&bus->adapter);
+ }
+ kfree(dev_priv->gmbus);
+ dev_priv->gmbus = NULL;
+ return ret;
}
-/**
- * intel_i2c_destroy - unregister and free i2c bus resources
- * @output: channel to free
- *
- * Unregister the adapter from the i2c layer, then free the structure.
- */
-void intel_i2c_destroy(struct i2c_adapter *adapter)
+void intel_gmbus_set_speed(struct i2c_adapter *adapter, int speed)
+{
+ struct intel_gmbus *bus = to_intel_gmbus(adapter);
+
+ /* speed:
+ * 0x0 = 100 KHz
+ * 0x1 = 50 KHz
+ * 0x2 = 400 KHz
+ * 0x3 = 1000 Khz
+ */
+ bus->reg0 = (bus->reg0 & ~(0x3 << 8)) | (speed << 8);
+}
+
+void intel_gmbus_force_bit(struct i2c_adapter *adapter, bool force_bit)
+{
+ struct intel_gmbus *bus = to_intel_gmbus(adapter);
+
+ if (force_bit) {
+ if (bus->force_bit == NULL) {
+ struct drm_i915_private *dev_priv = adapter->algo_data;
+ bus->force_bit = intel_gpio_create(dev_priv,
+ bus->reg0 & 0xff);
+ }
+ } else {
+ if (bus->force_bit) {
+ i2c_del_adapter(bus->force_bit);
+ kfree(bus->force_bit);
+ bus->force_bit = NULL;
+ }
+ }
+}
+
+void intel_teardown_gmbus(struct drm_device *dev)
{
- struct intel_i2c_chan *chan;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int i;
- if (!adapter)
+ if (dev_priv->gmbus == NULL)
return;
- chan = container_of(adapter,
- struct intel_i2c_chan,
- adapter);
- i2c_del_adapter(&chan->adapter);
- kfree(chan);
+ for (i = 0; i < GMBUS_NUM_PORTS; i++) {
+ struct intel_gmbus *bus = &dev_priv->gmbus[i];
+ if (bus->force_bit) {
+ i2c_del_adapter(bus->force_bit);
+ kfree(bus->force_bit);
+ }
+ i2c_del_adapter(&bus->adapter);
+ }
+
+ kfree(dev_priv->gmbus);
+ dev_priv->gmbus = NULL;
}
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
index 6ec39a86ed06..25bcedf386fd 100644
--- a/drivers/gpu/drm/i915/intel_lvds.c
+++ b/drivers/gpu/drm/i915/intel_lvds.c
@@ -43,102 +43,111 @@
/* Private structure for the integrated LVDS support */
struct intel_lvds {
struct intel_encoder base;
+
+ struct edid *edid;
+
int fitting_mode;
u32 pfit_control;
u32 pfit_pgm_ratios;
+ bool pfit_dirty;
+
+ struct drm_display_mode *fixed_mode;
};
-static struct intel_lvds *enc_to_intel_lvds(struct drm_encoder *encoder)
+static struct intel_lvds *to_intel_lvds(struct drm_encoder *encoder)
{
- return container_of(enc_to_intel_encoder(encoder), struct intel_lvds, base);
+ return container_of(encoder, struct intel_lvds, base.base);
}
-/**
- * Sets the backlight level.
- *
- * \param level backlight level, from 0 to intel_lvds_get_max_backlight().
- */
-static void intel_lvds_set_backlight(struct drm_device *dev, int level)
+static struct intel_lvds *intel_attached_lvds(struct drm_connector *connector)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
- u32 blc_pwm_ctl, reg;
-
- if (HAS_PCH_SPLIT(dev))
- reg = BLC_PWM_CPU_CTL;
- else
- reg = BLC_PWM_CTL;
-
- blc_pwm_ctl = I915_READ(reg) & ~BACKLIGHT_DUTY_CYCLE_MASK;
- I915_WRITE(reg, (blc_pwm_ctl |
- (level << BACKLIGHT_DUTY_CYCLE_SHIFT)));
+ return container_of(intel_attached_encoder(connector),
+ struct intel_lvds, base);
}
/**
- * Returns the maximum level of the backlight duty cycle field.
+ * Sets the power state for the panel.
*/
-static u32 intel_lvds_get_max_backlight(struct drm_device *dev)
+static void intel_lvds_enable(struct intel_lvds *intel_lvds)
{
+ struct drm_device *dev = intel_lvds->base.base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- u32 reg;
+ u32 ctl_reg, lvds_reg;
- if (HAS_PCH_SPLIT(dev))
- reg = BLC_PWM_PCH_CTL2;
- else
- reg = BLC_PWM_CTL;
+ if (HAS_PCH_SPLIT(dev)) {
+ ctl_reg = PCH_PP_CONTROL;
+ lvds_reg = PCH_LVDS;
+ } else {
+ ctl_reg = PP_CONTROL;
+ lvds_reg = LVDS;
+ }
+
+ I915_WRITE(lvds_reg, I915_READ(lvds_reg) | LVDS_PORT_EN);
+
+ if (intel_lvds->pfit_dirty) {
+ /*
+ * Enable automatic panel scaling so that non-native modes
+ * fill the screen. The panel fitter should only be
+ * adjusted whilst the pipe is disabled, according to
+ * register description and PRM.
+ */
+ DRM_DEBUG_KMS("applying panel-fitter: %x, %x\n",
+ intel_lvds->pfit_control,
+ intel_lvds->pfit_pgm_ratios);
+ if (wait_for((I915_READ(PP_STATUS) & PP_ON) == 0, 1000)) {
+ DRM_ERROR("timed out waiting for panel to power off\n");
+ } else {
+ I915_WRITE(PFIT_PGM_RATIOS, intel_lvds->pfit_pgm_ratios);
+ I915_WRITE(PFIT_CONTROL, intel_lvds->pfit_control);
+ intel_lvds->pfit_dirty = false;
+ }
+ }
+
+ I915_WRITE(ctl_reg, I915_READ(ctl_reg) | POWER_TARGET_ON);
+ POSTING_READ(lvds_reg);
- return ((I915_READ(reg) & BACKLIGHT_MODULATION_FREQ_MASK) >>
- BACKLIGHT_MODULATION_FREQ_SHIFT) * 2;
+ intel_panel_set_backlight(dev, dev_priv->backlight_level);
}
-/**
- * Sets the power state for the panel.
- */
-static void intel_lvds_set_power(struct drm_device *dev, bool on)
+static void intel_lvds_disable(struct intel_lvds *intel_lvds)
{
+ struct drm_device *dev = intel_lvds->base.base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- u32 ctl_reg, status_reg, lvds_reg;
+ u32 ctl_reg, lvds_reg;
if (HAS_PCH_SPLIT(dev)) {
ctl_reg = PCH_PP_CONTROL;
- status_reg = PCH_PP_STATUS;
lvds_reg = PCH_LVDS;
} else {
ctl_reg = PP_CONTROL;
- status_reg = PP_STATUS;
lvds_reg = LVDS;
}
- if (on) {
- I915_WRITE(lvds_reg, I915_READ(lvds_reg) | LVDS_PORT_EN);
- POSTING_READ(lvds_reg);
+ dev_priv->backlight_level = intel_panel_get_backlight(dev);
+ intel_panel_set_backlight(dev, 0);
- I915_WRITE(ctl_reg, I915_READ(ctl_reg) |
- POWER_TARGET_ON);
- if (wait_for(I915_READ(status_reg) & PP_ON, 1000, 0))
- DRM_ERROR("timed out waiting to enable LVDS pipe");
+ I915_WRITE(ctl_reg, I915_READ(ctl_reg) & ~POWER_TARGET_ON);
- intel_lvds_set_backlight(dev, dev_priv->backlight_duty_cycle);
- } else {
- intel_lvds_set_backlight(dev, 0);
+ if (intel_lvds->pfit_control) {
+ if (wait_for((I915_READ(PP_STATUS) & PP_ON) == 0, 1000))
+ DRM_ERROR("timed out waiting for panel to power off\n");
- I915_WRITE(ctl_reg, I915_READ(ctl_reg) &
- ~POWER_TARGET_ON);
- if (wait_for((I915_READ(status_reg) & PP_ON) == 0, 1000, 0))
- DRM_ERROR("timed out waiting for LVDS pipe to turn off");
-
- I915_WRITE(lvds_reg, I915_READ(lvds_reg) & ~LVDS_PORT_EN);
- POSTING_READ(lvds_reg);
+ I915_WRITE(PFIT_CONTROL, 0);
+ intel_lvds->pfit_dirty = true;
}
+
+ I915_WRITE(lvds_reg, I915_READ(lvds_reg) & ~LVDS_PORT_EN);
+ POSTING_READ(lvds_reg);
}
static void intel_lvds_dpms(struct drm_encoder *encoder, int mode)
{
- struct drm_device *dev = encoder->dev;
+ struct intel_lvds *intel_lvds = to_intel_lvds(encoder);
if (mode == DRM_MODE_DPMS_ON)
- intel_lvds_set_power(dev, true);
+ intel_lvds_enable(intel_lvds);
else
- intel_lvds_set_power(dev, false);
+ intel_lvds_disable(intel_lvds);
/* XXX: We never power down the LVDS pairs. */
}
@@ -146,16 +155,13 @@ static void intel_lvds_dpms(struct drm_encoder *encoder, int mode)
static int intel_lvds_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode)
{
- struct drm_device *dev = connector->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct drm_display_mode *fixed_mode = dev_priv->panel_fixed_mode;
+ struct intel_lvds *intel_lvds = intel_attached_lvds(connector);
+ struct drm_display_mode *fixed_mode = intel_lvds->fixed_mode;
- if (fixed_mode) {
- if (mode->hdisplay > fixed_mode->hdisplay)
- return MODE_PANEL;
- if (mode->vdisplay > fixed_mode->vdisplay)
- return MODE_PANEL;
- }
+ if (mode->hdisplay > fixed_mode->hdisplay)
+ return MODE_PANEL;
+ if (mode->vdisplay > fixed_mode->vdisplay)
+ return MODE_PANEL;
return MODE_OK;
}
@@ -223,12 +229,12 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder,
struct drm_device *dev = encoder->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
- struct intel_lvds *intel_lvds = enc_to_intel_lvds(encoder);
+ struct intel_lvds *intel_lvds = to_intel_lvds(encoder);
struct drm_encoder *tmp_encoder;
u32 pfit_control = 0, pfit_pgm_ratios = 0, border = 0;
/* Should never happen!! */
- if (!IS_I965G(dev) && intel_crtc->pipe == 0) {
+ if (INTEL_INFO(dev)->gen < 4 && intel_crtc->pipe == 0) {
DRM_ERROR("Can't support LVDS on pipe A\n");
return false;
}
@@ -241,9 +247,6 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder,
return false;
}
}
- /* If we don't have a panel mode, there is nothing we can do */
- if (dev_priv->panel_fixed_mode == NULL)
- return true;
/*
* We have timings from the BIOS for the panel, put them in
@@ -251,7 +254,7 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder,
* with the panel scaling set up to source from the H/VDisplay
* of the original mode.
*/
- intel_fixed_panel_mode(dev_priv->panel_fixed_mode, adjusted_mode);
+ intel_fixed_panel_mode(intel_lvds->fixed_mode, adjusted_mode);
if (HAS_PCH_SPLIT(dev)) {
intel_pch_panel_fitting(dev, intel_lvds->fitting_mode,
@@ -260,8 +263,8 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder,
}
/* Make sure pre-965s set dither correctly */
- if (!IS_I965G(dev)) {
- if (dev_priv->panel_wants_dither || dev_priv->lvds_dither)
+ if (INTEL_INFO(dev)->gen < 4) {
+ if (dev_priv->lvds_dither)
pfit_control |= PANEL_8TO6_DITHER_ENABLE;
}
@@ -271,7 +274,7 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder,
goto out;
/* 965+ wants fuzzy fitting */
- if (IS_I965G(dev))
+ if (INTEL_INFO(dev)->gen >= 4)
pfit_control |= ((intel_crtc->pipe << PFIT_PIPE_SHIFT) |
PFIT_FILTER_FUZZY);
@@ -297,7 +300,7 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder,
case DRM_MODE_SCALE_ASPECT:
/* Scale but preserve the aspect ratio */
- if (IS_I965G(dev)) {
+ if (INTEL_INFO(dev)->gen >= 4) {
u32 scaled_width = adjusted_mode->hdisplay * mode->vdisplay;
u32 scaled_height = mode->hdisplay * adjusted_mode->vdisplay;
@@ -356,7 +359,7 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder,
* Fortunately this is all done for us in hw.
*/
pfit_control |= PFIT_ENABLE;
- if (IS_I965G(dev))
+ if (INTEL_INFO(dev)->gen >= 4)
pfit_control |= PFIT_SCALING_AUTO;
else
pfit_control |= (VERT_AUTO_SCALE | HORIZ_AUTO_SCALE |
@@ -369,8 +372,12 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder,
}
out:
- intel_lvds->pfit_control = pfit_control;
- intel_lvds->pfit_pgm_ratios = pfit_pgm_ratios;
+ if (pfit_control != intel_lvds->pfit_control ||
+ pfit_pgm_ratios != intel_lvds->pfit_pgm_ratios) {
+ intel_lvds->pfit_control = pfit_control;
+ intel_lvds->pfit_pgm_ratios = pfit_pgm_ratios;
+ intel_lvds->pfit_dirty = true;
+ }
dev_priv->lvds_border_bits = border;
/*
@@ -386,56 +393,71 @@ static void intel_lvds_prepare(struct drm_encoder *encoder)
{
struct drm_device *dev = encoder->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- u32 reg;
-
- if (HAS_PCH_SPLIT(dev))
- reg = BLC_PWM_CPU_CTL;
- else
- reg = BLC_PWM_CTL;
-
- dev_priv->saveBLC_PWM_CTL = I915_READ(reg);
- dev_priv->backlight_duty_cycle = (dev_priv->saveBLC_PWM_CTL &
- BACKLIGHT_DUTY_CYCLE_MASK);
+ struct intel_lvds *intel_lvds = to_intel_lvds(encoder);
+
+ dev_priv->backlight_level = intel_panel_get_backlight(dev);
+
+ /* We try to do the minimum that is necessary in order to unlock
+ * the registers for mode setting.
+ *
+ * On Ironlake, this is quite simple as we just set the unlock key
+ * and ignore all subtleties. (This may cause some issues...)
+ *
+ * Prior to Ironlake, we must disable the pipe if we want to adjust
+ * the panel fitter. However at all other times we can just reset
+ * the registers regardless.
+ */
- intel_lvds_set_power(dev, false);
+ if (HAS_PCH_SPLIT(dev)) {
+ I915_WRITE(PCH_PP_CONTROL,
+ I915_READ(PCH_PP_CONTROL) | PANEL_UNLOCK_REGS);
+ } else if (intel_lvds->pfit_dirty) {
+ I915_WRITE(PP_CONTROL,
+ (I915_READ(PP_CONTROL) | PANEL_UNLOCK_REGS)
+ & ~POWER_TARGET_ON);
+ } else {
+ I915_WRITE(PP_CONTROL,
+ I915_READ(PP_CONTROL) | PANEL_UNLOCK_REGS);
+ }
}
-static void intel_lvds_commit( struct drm_encoder *encoder)
+static void intel_lvds_commit(struct drm_encoder *encoder)
{
struct drm_device *dev = encoder->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_lvds *intel_lvds = to_intel_lvds(encoder);
+
+ if (dev_priv->backlight_level == 0)
+ dev_priv->backlight_level = intel_panel_get_max_backlight(dev);
- if (dev_priv->backlight_duty_cycle == 0)
- dev_priv->backlight_duty_cycle =
- intel_lvds_get_max_backlight(dev);
+ /* Undo any unlocking done in prepare to prevent accidental
+ * adjustment of the registers.
+ */
+ if (HAS_PCH_SPLIT(dev)) {
+ u32 val = I915_READ(PCH_PP_CONTROL);
+ if ((val & PANEL_UNLOCK_REGS) == PANEL_UNLOCK_REGS)
+ I915_WRITE(PCH_PP_CONTROL, val & 0x3);
+ } else {
+ u32 val = I915_READ(PP_CONTROL);
+ if ((val & PANEL_UNLOCK_REGS) == PANEL_UNLOCK_REGS)
+ I915_WRITE(PP_CONTROL, val & 0x3);
+ }
- intel_lvds_set_power(dev, true);
+ /* Always do a full power on as we do not know what state
+ * we were left in.
+ */
+ intel_lvds_enable(intel_lvds);
}
static void intel_lvds_mode_set(struct drm_encoder *encoder,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
- struct drm_device *dev = encoder->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_lvds *intel_lvds = enc_to_intel_lvds(encoder);
-
/*
* The LVDS pin pair will already have been turned on in the
* intel_crtc_mode_set since it has a large impact on the DPLL
* settings.
*/
-
- if (HAS_PCH_SPLIT(dev))
- return;
-
- /*
- * Enable automatic panel scaling so that non-native modes fill the
- * screen. Should be enabled before the pipe is enabled, according to
- * register description and PRM.
- */
- I915_WRITE(PFIT_PGM_RATIOS, intel_lvds->pfit_pgm_ratios);
- I915_WRITE(PFIT_CONTROL, intel_lvds->pfit_control);
}
/**
@@ -465,38 +487,19 @@ intel_lvds_detect(struct drm_connector *connector, bool force)
*/
static int intel_lvds_get_modes(struct drm_connector *connector)
{
+ struct intel_lvds *intel_lvds = intel_attached_lvds(connector);
struct drm_device *dev = connector->dev;
- struct drm_encoder *encoder = intel_attached_encoder(connector);
- struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
- struct drm_i915_private *dev_priv = dev->dev_private;
- int ret = 0;
+ struct drm_display_mode *mode;
- if (dev_priv->lvds_edid_good) {
- ret = intel_ddc_get_modes(connector, intel_encoder->ddc_bus);
+ if (intel_lvds->edid)
+ return drm_add_edid_modes(connector, intel_lvds->edid);
- if (ret)
- return ret;
- }
-
- /* Didn't get an EDID, so
- * Set wide sync ranges so we get all modes
- * handed to valid_mode for checking
- */
- connector->display_info.min_vfreq = 0;
- connector->display_info.max_vfreq = 200;
- connector->display_info.min_hfreq = 0;
- connector->display_info.max_hfreq = 200;
-
- if (dev_priv->panel_fixed_mode != NULL) {
- struct drm_display_mode *mode;
-
- mode = drm_mode_duplicate(dev, dev_priv->panel_fixed_mode);
- drm_mode_probed_add(connector, mode);
-
- return 1;
- }
+ mode = drm_mode_duplicate(dev, intel_lvds->fixed_mode);
+ if (mode == 0)
+ return 0;
- return 0;
+ drm_mode_probed_add(connector, mode);
+ return 1;
}
static int intel_no_modeset_on_lid_dmi_callback(const struct dmi_system_id *id)
@@ -587,18 +590,17 @@ static int intel_lvds_set_property(struct drm_connector *connector,
struct drm_property *property,
uint64_t value)
{
+ struct intel_lvds *intel_lvds = intel_attached_lvds(connector);
struct drm_device *dev = connector->dev;
- if (property == dev->mode_config.scaling_mode_property &&
- connector->encoder) {
- struct drm_crtc *crtc = connector->encoder->crtc;
- struct drm_encoder *encoder = connector->encoder;
- struct intel_lvds *intel_lvds = enc_to_intel_lvds(encoder);
+ if (property == dev->mode_config.scaling_mode_property) {
+ struct drm_crtc *crtc = intel_lvds->base.base.crtc;
if (value == DRM_MODE_SCALE_NONE) {
DRM_DEBUG_KMS("no scaling not supported\n");
- return 0;
+ return -EINVAL;
}
+
if (intel_lvds->fitting_mode == value) {
/* the LVDS scaling property is not changed */
return 0;
@@ -628,7 +630,7 @@ static const struct drm_encoder_helper_funcs intel_lvds_helper_funcs = {
static const struct drm_connector_helper_funcs intel_lvds_connector_helper_funcs = {
.get_modes = intel_lvds_get_modes,
.mode_valid = intel_lvds_mode_valid,
- .best_encoder = intel_attached_encoder,
+ .best_encoder = intel_best_encoder,
};
static const struct drm_connector_funcs intel_lvds_connector_funcs = {
@@ -726,16 +728,14 @@ static const struct dmi_system_id intel_no_lvds[] = {
* Find the reduced downclock for LVDS in EDID.
*/
static void intel_find_lvds_downclock(struct drm_device *dev,
- struct drm_connector *connector)
+ struct drm_display_mode *fixed_mode,
+ struct drm_connector *connector)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- struct drm_display_mode *scan, *panel_fixed_mode;
+ struct drm_display_mode *scan;
int temp_downclock;
- panel_fixed_mode = dev_priv->panel_fixed_mode;
- temp_downclock = panel_fixed_mode->clock;
-
- mutex_lock(&dev->mode_config.mutex);
+ temp_downclock = fixed_mode->clock;
list_for_each_entry(scan, &connector->probed_modes, head) {
/*
* If one mode has the same resolution with the fixed_panel
@@ -744,14 +744,14 @@ static void intel_find_lvds_downclock(struct drm_device *dev,
* case we can set the different FPx0/1 to dynamically select
* between low and high frequency.
*/
- if (scan->hdisplay == panel_fixed_mode->hdisplay &&
- scan->hsync_start == panel_fixed_mode->hsync_start &&
- scan->hsync_end == panel_fixed_mode->hsync_end &&
- scan->htotal == panel_fixed_mode->htotal &&
- scan->vdisplay == panel_fixed_mode->vdisplay &&
- scan->vsync_start == panel_fixed_mode->vsync_start &&
- scan->vsync_end == panel_fixed_mode->vsync_end &&
- scan->vtotal == panel_fixed_mode->vtotal) {
+ if (scan->hdisplay == fixed_mode->hdisplay &&
+ scan->hsync_start == fixed_mode->hsync_start &&
+ scan->hsync_end == fixed_mode->hsync_end &&
+ scan->htotal == fixed_mode->htotal &&
+ scan->vdisplay == fixed_mode->vdisplay &&
+ scan->vsync_start == fixed_mode->vsync_start &&
+ scan->vsync_end == fixed_mode->vsync_end &&
+ scan->vtotal == fixed_mode->vtotal) {
if (scan->clock < temp_downclock) {
/*
* The downclock is already found. But we
@@ -761,17 +761,14 @@ static void intel_find_lvds_downclock(struct drm_device *dev,
}
}
}
- mutex_unlock(&dev->mode_config.mutex);
- if (temp_downclock < panel_fixed_mode->clock &&
- i915_lvds_downclock) {
+ if (temp_downclock < fixed_mode->clock && i915_lvds_downclock) {
/* We found the downclock for LVDS. */
dev_priv->lvds_downclock_avail = 1;
dev_priv->lvds_downclock = temp_downclock;
DRM_DEBUG_KMS("LVDS downclock is found in EDID. "
- "Normal clock %dKhz, downclock %dKhz\n",
- panel_fixed_mode->clock, temp_downclock);
+ "Normal clock %dKhz, downclock %dKhz\n",
+ fixed_mode->clock, temp_downclock);
}
- return;
}
/*
@@ -780,38 +777,67 @@ static void intel_find_lvds_downclock(struct drm_device *dev,
* If it is present, return 1.
* If it is not present, return false.
* If no child dev is parsed from VBT, it assumes that the LVDS is present.
- * Note: The addin_offset should also be checked for LVDS panel.
- * Only when it is non-zero, it is assumed that it is present.
*/
-static int lvds_is_present_in_vbt(struct drm_device *dev)
+static bool lvds_is_present_in_vbt(struct drm_device *dev,
+ u8 *i2c_pin)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- struct child_device_config *p_child;
- int i, ret;
+ int i;
if (!dev_priv->child_dev_num)
- return 1;
+ return true;
- ret = 0;
for (i = 0; i < dev_priv->child_dev_num; i++) {
- p_child = dev_priv->child_dev + i;
- /*
- * If the device type is not LFP, continue.
- * If the device type is 0x22, it is also regarded as LFP.
+ struct child_device_config *child = dev_priv->child_dev + i;
+
+ /* If the device type is not LFP, continue.
+ * We have to check both the new identifiers as well as the
+ * old for compatibility with some BIOSes.
*/
- if (p_child->device_type != DEVICE_TYPE_INT_LFP &&
- p_child->device_type != DEVICE_TYPE_LFP)
+ if (child->device_type != DEVICE_TYPE_INT_LFP &&
+ child->device_type != DEVICE_TYPE_LFP)
continue;
- /* The addin_offset should be checked. Only when it is
- * non-zero, it is regarded as present.
+ if (child->i2c_pin)
+ *i2c_pin = child->i2c_pin;
+
+ /* However, we cannot trust the BIOS writers to populate
+ * the VBT correctly. Since LVDS requires additional
+ * information from AIM blocks, a non-zero addin offset is
+ * a good indicator that the LVDS is actually present.
*/
- if (p_child->addin_offset) {
- ret = 1;
- break;
- }
+ if (child->addin_offset)
+ return true;
+
+ /* But even then some BIOS writers perform some black magic
+ * and instantiate the device without reference to any
+ * additional data. Trust that if the VBT was written into
+ * the OpRegion then they have validated the LVDS's existence.
+ */
+ if (dev_priv->opregion.vbt)
+ return true;
}
- return ret;
+
+ return false;
+}
+
+static bool intel_lvds_ddc_probe(struct drm_device *dev, u8 pin)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u8 buf = 0;
+ struct i2c_msg msgs[] = {
+ {
+ .addr = 0xA0,
+ .flags = 0,
+ .len = 1,
+ .buf = &buf,
+ },
+ };
+ struct i2c_adapter *i2c = &dev_priv->gmbus[pin].adapter;
+ /* XXX this only appears to work when using GMBUS */
+ if (intel_gmbus_is_forced_bit(i2c))
+ return true;
+ return i2c_transfer(i2c, msgs, 1) == 1;
}
/**
@@ -821,7 +847,7 @@ static int lvds_is_present_in_vbt(struct drm_device *dev)
* Create the connector, register the LVDS DDC bus, and try to figure out what
* modes we can display on the LVDS panel (if present).
*/
-void intel_lvds_init(struct drm_device *dev)
+bool intel_lvds_init(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_lvds *intel_lvds;
@@ -832,48 +858,58 @@ void intel_lvds_init(struct drm_device *dev)
struct drm_display_mode *scan; /* *modes, *bios_mode; */
struct drm_crtc *crtc;
u32 lvds;
- int pipe, gpio = GPIOC;
+ int pipe;
+ u8 pin;
/* Skip init on machines we know falsely report LVDS */
if (dmi_check_system(intel_no_lvds))
- return;
+ return false;
- if (!lvds_is_present_in_vbt(dev)) {
+ pin = GMBUS_PORT_PANEL;
+ if (!lvds_is_present_in_vbt(dev, &pin)) {
DRM_DEBUG_KMS("LVDS is not present in VBT\n");
- return;
+ return false;
}
if (HAS_PCH_SPLIT(dev)) {
if ((I915_READ(PCH_LVDS) & LVDS_DETECTED) == 0)
- return;
- if (dev_priv->edp_support) {
+ return false;
+ if (dev_priv->edp.support) {
DRM_DEBUG_KMS("disable LVDS for eDP support\n");
- return;
+ return false;
}
- gpio = PCH_GPIOC;
+ }
+
+ if (!intel_lvds_ddc_probe(dev, pin)) {
+ DRM_DEBUG_KMS("LVDS did not respond to DDC probe\n");
+ return false;
}
intel_lvds = kzalloc(sizeof(struct intel_lvds), GFP_KERNEL);
if (!intel_lvds) {
- return;
+ return false;
}
intel_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL);
if (!intel_connector) {
kfree(intel_lvds);
- return;
+ return false;
+ }
+
+ if (!HAS_PCH_SPLIT(dev)) {
+ intel_lvds->pfit_control = I915_READ(PFIT_CONTROL);
}
intel_encoder = &intel_lvds->base;
- encoder = &intel_encoder->enc;
+ encoder = &intel_encoder->base;
connector = &intel_connector->base;
drm_connector_init(dev, &intel_connector->base, &intel_lvds_connector_funcs,
DRM_MODE_CONNECTOR_LVDS);
- drm_encoder_init(dev, &intel_encoder->enc, &intel_lvds_enc_funcs,
+ drm_encoder_init(dev, &intel_encoder->base, &intel_lvds_enc_funcs,
DRM_MODE_ENCODER_LVDS);
- drm_mode_connector_attach_encoder(&intel_connector->base, &intel_encoder->enc);
+ intel_connector_attach_encoder(intel_connector, intel_encoder);
intel_encoder->type = INTEL_OUTPUT_LVDS;
intel_encoder->clone_mask = (1 << INTEL_LVDS_CLONE_BIT);
@@ -904,43 +940,50 @@ void intel_lvds_init(struct drm_device *dev)
* if closed, act like it's not there for now
*/
- /* Set up the DDC bus. */
- intel_encoder->ddc_bus = intel_i2c_create(dev, gpio, "LVDSDDC_C");
- if (!intel_encoder->ddc_bus) {
- dev_printk(KERN_ERR, &dev->pdev->dev, "DDC bus registration "
- "failed.\n");
- goto failed;
- }
-
/*
* Attempt to get the fixed panel mode from DDC. Assume that the
* preferred mode is the right one.
*/
- dev_priv->lvds_edid_good = true;
-
- if (!intel_ddc_get_modes(connector, intel_encoder->ddc_bus))
- dev_priv->lvds_edid_good = false;
+ intel_lvds->edid = drm_get_edid(connector,
+ &dev_priv->gmbus[pin].adapter);
+ if (intel_lvds->edid) {
+ if (drm_add_edid_modes(connector,
+ intel_lvds->edid)) {
+ drm_mode_connector_update_edid_property(connector,
+ intel_lvds->edid);
+ } else {
+ kfree(intel_lvds->edid);
+ intel_lvds->edid = NULL;
+ }
+ }
+ if (!intel_lvds->edid) {
+ /* Didn't get an EDID, so
+ * Set wide sync ranges so we get all modes
+ * handed to valid_mode for checking
+ */
+ connector->display_info.min_vfreq = 0;
+ connector->display_info.max_vfreq = 200;
+ connector->display_info.min_hfreq = 0;
+ connector->display_info.max_hfreq = 200;
+ }
list_for_each_entry(scan, &connector->probed_modes, head) {
- mutex_lock(&dev->mode_config.mutex);
if (scan->type & DRM_MODE_TYPE_PREFERRED) {
- dev_priv->panel_fixed_mode =
+ intel_lvds->fixed_mode =
drm_mode_duplicate(dev, scan);
- mutex_unlock(&dev->mode_config.mutex);
- intel_find_lvds_downclock(dev, connector);
+ intel_find_lvds_downclock(dev,
+ intel_lvds->fixed_mode,
+ connector);
goto out;
}
- mutex_unlock(&dev->mode_config.mutex);
}
/* Failed to get EDID, what about VBT? */
if (dev_priv->lfp_lvds_vbt_mode) {
- mutex_lock(&dev->mode_config.mutex);
- dev_priv->panel_fixed_mode =
+ intel_lvds->fixed_mode =
drm_mode_duplicate(dev, dev_priv->lfp_lvds_vbt_mode);
- mutex_unlock(&dev->mode_config.mutex);
- if (dev_priv->panel_fixed_mode) {
- dev_priv->panel_fixed_mode->type |=
+ if (intel_lvds->fixed_mode) {
+ intel_lvds->fixed_mode->type |=
DRM_MODE_TYPE_PREFERRED;
goto out;
}
@@ -958,19 +1001,19 @@ void intel_lvds_init(struct drm_device *dev)
lvds = I915_READ(LVDS);
pipe = (lvds & LVDS_PIPEB_SELECT) ? 1 : 0;
- crtc = intel_get_crtc_from_pipe(dev, pipe);
+ crtc = intel_get_crtc_for_pipe(dev, pipe);
if (crtc && (lvds & LVDS_PORT_EN)) {
- dev_priv->panel_fixed_mode = intel_crtc_mode_get(dev, crtc);
- if (dev_priv->panel_fixed_mode) {
- dev_priv->panel_fixed_mode->type |=
+ intel_lvds->fixed_mode = intel_crtc_mode_get(dev, crtc);
+ if (intel_lvds->fixed_mode) {
+ intel_lvds->fixed_mode->type |=
DRM_MODE_TYPE_PREFERRED;
goto out;
}
}
/* If we still don't have a mode after all that, give up. */
- if (!dev_priv->panel_fixed_mode)
+ if (!intel_lvds->fixed_mode)
goto failed;
out:
@@ -993,14 +1036,13 @@ out:
/* keep the LVDS connector */
dev_priv->int_lvds_connector = connector;
drm_sysfs_connector_add(connector);
- return;
+ return true;
failed:
DRM_DEBUG_KMS("No LVDS modes found, disabling.\n");
- if (intel_encoder->ddc_bus)
- intel_i2c_destroy(intel_encoder->ddc_bus);
drm_connector_cleanup(connector);
drm_encoder_cleanup(encoder);
kfree(intel_lvds);
kfree(intel_connector);
+ return false;
}
diff --git a/drivers/gpu/drm/i915/intel_modes.c b/drivers/gpu/drm/i915/intel_modes.c
index 4b1fd3d9c73c..f70b7cf32bff 100644
--- a/drivers/gpu/drm/i915/intel_modes.c
+++ b/drivers/gpu/drm/i915/intel_modes.c
@@ -1,6 +1,6 @@
/*
* Copyright (c) 2007 Dave Airlie <airlied@linux.ie>
- * Copyright (c) 2007 Intel Corporation
+ * Copyright (c) 2007, 2010 Intel Corporation
* Jesse Barnes <jesse.barnes@intel.com>
*
* Permission is hereby granted, free of charge, to any person obtaining a
@@ -34,11 +34,11 @@
* intel_ddc_probe
*
*/
-bool intel_ddc_probe(struct intel_encoder *intel_encoder)
+bool intel_ddc_probe(struct intel_encoder *intel_encoder, int ddc_bus)
{
+ struct drm_i915_private *dev_priv = intel_encoder->base.dev->dev_private;
u8 out_buf[] = { 0x0, 0x0};
u8 buf[2];
- int ret;
struct i2c_msg msgs[] = {
{
.addr = 0x50,
@@ -54,13 +54,7 @@ bool intel_ddc_probe(struct intel_encoder *intel_encoder)
}
};
- intel_i2c_quirk_set(intel_encoder->enc.dev, true);
- ret = i2c_transfer(intel_encoder->ddc_bus, msgs, 2);
- intel_i2c_quirk_set(intel_encoder->enc.dev, false);
- if (ret == 2)
- return true;
-
- return false;
+ return i2c_transfer(&dev_priv->gmbus[ddc_bus].adapter, msgs, 2) == 2;
}
/**
@@ -76,9 +70,7 @@ int intel_ddc_get_modes(struct drm_connector *connector,
struct edid *edid;
int ret = 0;
- intel_i2c_quirk_set(connector->dev, true);
edid = drm_get_edid(connector, adapter);
- intel_i2c_quirk_set(connector->dev, false);
if (edid) {
drm_mode_connector_update_edid_property(connector, edid);
ret = drm_add_edid_modes(connector, edid);
diff --git a/drivers/gpu/drm/i915/i915_opregion.c b/drivers/gpu/drm/i915/intel_opregion.c
index ea5d3fea4b61..9b0d9a867aea 100644
--- a/drivers/gpu/drm/i915/i915_opregion.c
+++ b/drivers/gpu/drm/i915/intel_opregion.c
@@ -31,17 +31,16 @@
#include "drmP.h"
#include "i915_drm.h"
#include "i915_drv.h"
+#include "intel_drv.h"
#define PCI_ASLE 0xe4
-#define PCI_LBPC 0xf4
#define PCI_ASLS 0xfc
-#define OPREGION_SZ (8*1024)
#define OPREGION_HEADER_OFFSET 0
#define OPREGION_ACPI_OFFSET 0x100
#define OPREGION_SWSCI_OFFSET 0x200
#define OPREGION_ASLE_OFFSET 0x300
-#define OPREGION_VBT_OFFSET 0x1000
+#define OPREGION_VBT_OFFSET 0x400
#define OPREGION_SIGNATURE "IntelGraphicsMem"
#define MBOX_ACPI (1<<0)
@@ -143,40 +142,22 @@ struct opregion_asle {
#define ACPI_DIGITAL_OUTPUT (3<<8)
#define ACPI_LVDS_OUTPUT (4<<8)
+#ifdef CONFIG_ACPI
static u32 asle_set_backlight(struct drm_device *dev, u32 bclp)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct opregion_asle *asle = dev_priv->opregion.asle;
- u32 blc_pwm_ctl, blc_pwm_ctl2;
- u32 max_backlight, level, shift;
+ u32 max;
if (!(bclp & ASLE_BCLP_VALID))
return ASLE_BACKLIGHT_FAILED;
bclp &= ASLE_BCLP_MSK;
- if (bclp < 0 || bclp > 255)
+ if (bclp > 255)
return ASLE_BACKLIGHT_FAILED;
- blc_pwm_ctl = I915_READ(BLC_PWM_CTL);
- blc_pwm_ctl2 = I915_READ(BLC_PWM_CTL2);
-
- if (IS_I965G(dev) && (blc_pwm_ctl2 & BLM_COMBINATION_MODE))
- pci_write_config_dword(dev->pdev, PCI_LBPC, bclp);
- else {
- if (IS_PINEVIEW(dev)) {
- blc_pwm_ctl &= ~(BACKLIGHT_DUTY_CYCLE_MASK - 1);
- max_backlight = (blc_pwm_ctl & BACKLIGHT_MODULATION_FREQ_MASK) >>
- BACKLIGHT_MODULATION_FREQ_SHIFT;
- shift = BACKLIGHT_DUTY_CYCLE_SHIFT + 1;
- } else {
- blc_pwm_ctl &= ~BACKLIGHT_DUTY_CYCLE_MASK;
- max_backlight = ((blc_pwm_ctl & BACKLIGHT_MODULATION_FREQ_MASK) >>
- BACKLIGHT_MODULATION_FREQ_SHIFT) * 2;
- shift = BACKLIGHT_DUTY_CYCLE_SHIFT;
- }
- level = (bclp * max_backlight) / 255;
- I915_WRITE(BLC_PWM_CTL, blc_pwm_ctl | (level << shift));
- }
+ max = intel_panel_get_max_backlight(dev);
+ intel_panel_set_backlight(dev, bclp * max / 255);
asle->cblv = (bclp*0x64)/0xff | ASLE_CBLV_VALID;
return 0;
@@ -211,7 +192,7 @@ static u32 asle_set_pfit(struct drm_device *dev, u32 pfit)
return 0;
}
-void opregion_asle_intr(struct drm_device *dev)
+void intel_opregion_asle_intr(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct opregion_asle *asle = dev_priv->opregion.asle;
@@ -243,37 +224,8 @@ void opregion_asle_intr(struct drm_device *dev)
asle->aslc = asle_stat;
}
-static u32 asle_set_backlight_ironlake(struct drm_device *dev, u32 bclp)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct opregion_asle *asle = dev_priv->opregion.asle;
- u32 cpu_pwm_ctl, pch_pwm_ctl2;
- u32 max_backlight, level;
-
- if (!(bclp & ASLE_BCLP_VALID))
- return ASLE_BACKLIGHT_FAILED;
-
- bclp &= ASLE_BCLP_MSK;
- if (bclp < 0 || bclp > 255)
- return ASLE_BACKLIGHT_FAILED;
-
- cpu_pwm_ctl = I915_READ(BLC_PWM_CPU_CTL);
- pch_pwm_ctl2 = I915_READ(BLC_PWM_PCH_CTL2);
- /* get the max PWM frequency */
- max_backlight = (pch_pwm_ctl2 >> 16) & BACKLIGHT_DUTY_CYCLE_MASK;
- /* calculate the expected PMW frequency */
- level = (bclp * max_backlight) / 255;
- /* reserve the high 16 bits */
- cpu_pwm_ctl &= ~(BACKLIGHT_DUTY_CYCLE_MASK);
- /* write the updated PWM frequency */
- I915_WRITE(BLC_PWM_CPU_CTL, cpu_pwm_ctl | level);
-
- asle->cblv = (bclp*0x64)/0xff | ASLE_CBLV_VALID;
-
- return 0;
-}
-
-void ironlake_opregion_gse_intr(struct drm_device *dev)
+/* Only present on Ironlake+ */
+void intel_opregion_gse_intr(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct opregion_asle *asle = dev_priv->opregion.asle;
@@ -296,7 +248,7 @@ void ironlake_opregion_gse_intr(struct drm_device *dev)
}
if (asle_req & ASLE_SET_BACKLIGHT)
- asle_stat |= asle_set_backlight_ironlake(dev, asle->bclp);
+ asle_stat |= asle_set_backlight(dev, asle->bclp);
if (asle_req & ASLE_SET_PFIT) {
DRM_DEBUG_DRIVER("Pfit is not supported\n");
@@ -315,7 +267,7 @@ void ironlake_opregion_gse_intr(struct drm_device *dev)
#define ASLE_PFIT_EN (1<<2)
#define ASLE_PFMB_EN (1<<3)
-void opregion_enable_asle(struct drm_device *dev)
+void intel_opregion_enable_asle(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct opregion_asle *asle = dev_priv->opregion.asle;
@@ -464,7 +416,58 @@ blind_set:
goto end;
}
-int intel_opregion_init(struct drm_device *dev, int resume)
+void intel_opregion_init(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_opregion *opregion = &dev_priv->opregion;
+
+ if (!opregion->header)
+ return;
+
+ if (opregion->acpi) {
+ if (drm_core_check_feature(dev, DRIVER_MODESET))
+ intel_didl_outputs(dev);
+
+ /* Notify BIOS we are ready to handle ACPI video ext notifs.
+ * Right now, all the events are handled by the ACPI video module.
+ * We don't actually need to do anything with them. */
+ opregion->acpi->csts = 0;
+ opregion->acpi->drdy = 1;
+
+ system_opregion = opregion;
+ register_acpi_notifier(&intel_opregion_notifier);
+ }
+
+ if (opregion->asle)
+ intel_opregion_enable_asle(dev);
+}
+
+void intel_opregion_fini(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_opregion *opregion = &dev_priv->opregion;
+
+ if (!opregion->header)
+ return;
+
+ if (opregion->acpi) {
+ opregion->acpi->drdy = 0;
+
+ system_opregion = NULL;
+ unregister_acpi_notifier(&intel_opregion_notifier);
+ }
+
+ /* just clear all opregion memory pointers now */
+ iounmap(opregion->header);
+ opregion->header = NULL;
+ opregion->acpi = NULL;
+ opregion->swsci = NULL;
+ opregion->asle = NULL;
+ opregion->vbt = NULL;
+}
+#endif
+
+int intel_opregion_setup(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_opregion *opregion = &dev_priv->opregion;
@@ -479,29 +482,23 @@ int intel_opregion_init(struct drm_device *dev, int resume)
return -ENOTSUPP;
}
- base = ioremap(asls, OPREGION_SZ);
+ base = ioremap(asls, OPREGION_SIZE);
if (!base)
return -ENOMEM;
- opregion->header = base;
- if (memcmp(opregion->header->signature, OPREGION_SIGNATURE, 16)) {
+ if (memcmp(base, OPREGION_SIGNATURE, 16)) {
DRM_DEBUG_DRIVER("opregion signature mismatch\n");
err = -EINVAL;
goto err_out;
}
+ opregion->header = base;
+ opregion->vbt = base + OPREGION_VBT_OFFSET;
mboxes = opregion->header->mboxes;
if (mboxes & MBOX_ACPI) {
DRM_DEBUG_DRIVER("Public ACPI methods supported\n");
opregion->acpi = base + OPREGION_ACPI_OFFSET;
- if (drm_core_check_feature(dev, DRIVER_MODESET))
- intel_didl_outputs(dev);
- } else {
- DRM_DEBUG_DRIVER("Public ACPI methods not supported\n");
- err = -ENOTSUPP;
- goto err_out;
}
- opregion->enabled = 1;
if (mboxes & MBOX_SWSCI) {
DRM_DEBUG_DRIVER("SWSCI supported\n");
@@ -510,53 +507,11 @@ int intel_opregion_init(struct drm_device *dev, int resume)
if (mboxes & MBOX_ASLE) {
DRM_DEBUG_DRIVER("ASLE supported\n");
opregion->asle = base + OPREGION_ASLE_OFFSET;
- opregion_enable_asle(dev);
}
- if (!resume)
- acpi_video_register();
-
-
- /* Notify BIOS we are ready to handle ACPI video ext notifs.
- * Right now, all the events are handled by the ACPI video module.
- * We don't actually need to do anything with them. */
- opregion->acpi->csts = 0;
- opregion->acpi->drdy = 1;
-
- system_opregion = opregion;
- register_acpi_notifier(&intel_opregion_notifier);
-
return 0;
err_out:
- iounmap(opregion->header);
- opregion->header = NULL;
- acpi_video_register();
+ iounmap(base);
return err;
}
-
-void intel_opregion_free(struct drm_device *dev, int suspend)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_opregion *opregion = &dev_priv->opregion;
-
- if (!opregion->enabled)
- return;
-
- if (!suspend)
- acpi_video_unregister();
-
- opregion->acpi->drdy = 0;
-
- system_opregion = NULL;
- unregister_acpi_notifier(&intel_opregion_notifier);
-
- /* just clear all opregion memory pointers now */
- iounmap(opregion->header);
- opregion->header = NULL;
- opregion->acpi = NULL;
- opregion->swsci = NULL;
- opregion->asle = NULL;
-
- opregion->enabled = 0;
-}
diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c
index 1d306a458be6..02ff0a481f47 100644
--- a/drivers/gpu/drm/i915/intel_overlay.c
+++ b/drivers/gpu/drm/i915/intel_overlay.c
@@ -170,57 +170,143 @@ struct overlay_registers {
u16 RESERVEDG[0x100 / 2 - N_HORIZ_UV_TAPS * N_PHASES];
};
-/* overlay flip addr flag */
-#define OFC_UPDATE 0x1
-
-#define OVERLAY_NONPHYSICAL(dev) (IS_G33(dev) || IS_I965G(dev))
-#define OVERLAY_EXISTS(dev) (!IS_G4X(dev) && !IS_IRONLAKE(dev) && !IS_GEN6(dev))
-
+struct intel_overlay {
+ struct drm_device *dev;
+ struct intel_crtc *crtc;
+ struct drm_i915_gem_object *vid_bo;
+ struct drm_i915_gem_object *old_vid_bo;
+ int active;
+ int pfit_active;
+ u32 pfit_vscale_ratio; /* shifted-point number, (1<<12) == 1.0 */
+ u32 color_key;
+ u32 brightness, contrast, saturation;
+ u32 old_xscale, old_yscale;
+ /* register access */
+ u32 flip_addr;
+ struct drm_i915_gem_object *reg_bo;
+ /* flip handling */
+ uint32_t last_flip_req;
+ void (*flip_tail)(struct intel_overlay *);
+};
-static struct overlay_registers *intel_overlay_map_regs_atomic(struct intel_overlay *overlay)
+static struct overlay_registers *
+intel_overlay_map_regs(struct intel_overlay *overlay)
{
drm_i915_private_t *dev_priv = overlay->dev->dev_private;
struct overlay_registers *regs;
- /* no recursive mappings */
- BUG_ON(overlay->virt_addr);
+ if (OVERLAY_NEEDS_PHYSICAL(overlay->dev))
+ regs = overlay->reg_bo->phys_obj->handle->vaddr;
+ else
+ regs = io_mapping_map_wc(dev_priv->mm.gtt_mapping,
+ overlay->reg_bo->gtt_offset);
- if (OVERLAY_NONPHYSICAL(overlay->dev)) {
- regs = io_mapping_map_atomic_wc(dev_priv->mm.gtt_mapping,
- overlay->reg_bo->gtt_offset,
- KM_USER0);
+ return regs;
+}
- if (!regs) {
- DRM_ERROR("failed to map overlay regs in GTT\n");
- return NULL;
- }
- } else
- regs = overlay->reg_bo->phys_obj->handle->vaddr;
+static void intel_overlay_unmap_regs(struct intel_overlay *overlay,
+ struct overlay_registers *regs)
+{
+ if (!OVERLAY_NEEDS_PHYSICAL(overlay->dev))
+ io_mapping_unmap(regs);
+}
+
+static int intel_overlay_do_wait_request(struct intel_overlay *overlay,
+ struct drm_i915_gem_request *request,
+ bool interruptible,
+ void (*tail)(struct intel_overlay *))
+{
+ struct drm_device *dev = overlay->dev;
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ int ret;
- return overlay->virt_addr = regs;
+ BUG_ON(overlay->last_flip_req);
+ overlay->last_flip_req =
+ i915_add_request(dev, NULL, request, &dev_priv->render_ring);
+ if (overlay->last_flip_req == 0)
+ return -ENOMEM;
+
+ overlay->flip_tail = tail;
+ ret = i915_do_wait_request(dev,
+ overlay->last_flip_req, true,
+ &dev_priv->render_ring);
+ if (ret)
+ return ret;
+
+ overlay->last_flip_req = 0;
+ return 0;
}
-static void intel_overlay_unmap_regs_atomic(struct intel_overlay *overlay)
+/* Workaround for i830 bug where pipe a must be enable to change control regs */
+static int
+i830_activate_pipe_a(struct drm_device *dev)
{
- if (OVERLAY_NONPHYSICAL(overlay->dev))
- io_mapping_unmap_atomic(overlay->virt_addr, KM_USER0);
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ struct intel_crtc *crtc;
+ struct drm_crtc_helper_funcs *crtc_funcs;
+ struct drm_display_mode vesa_640x480 = {
+ DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 25175, 640, 656,
+ 752, 800, 0, 480, 489, 492, 525, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC)
+ }, *mode;
+
+ crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[0]);
+ if (crtc->dpms_mode == DRM_MODE_DPMS_ON)
+ return 0;
- overlay->virt_addr = NULL;
+ /* most i8xx have pipe a forced on, so don't trust dpms mode */
+ if (I915_READ(PIPEACONF) & PIPECONF_ENABLE)
+ return 0;
- return;
+ crtc_funcs = crtc->base.helper_private;
+ if (crtc_funcs->dpms == NULL)
+ return 0;
+
+ DRM_DEBUG_DRIVER("Enabling pipe A in order to enable overlay\n");
+
+ mode = drm_mode_duplicate(dev, &vesa_640x480);
+ drm_mode_set_crtcinfo(mode, CRTC_INTERLACE_HALVE_V);
+ if(!drm_crtc_helper_set_mode(&crtc->base, mode,
+ crtc->base.x, crtc->base.y,
+ crtc->base.fb))
+ return 0;
+
+ crtc_funcs->dpms(&crtc->base, DRM_MODE_DPMS_ON);
+ return 1;
+}
+
+static void
+i830_deactivate_pipe_a(struct drm_device *dev)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[0];
+ struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
+
+ crtc_funcs->dpms(crtc, DRM_MODE_DPMS_OFF);
}
/* overlay needs to be disable in OCMD reg */
static int intel_overlay_on(struct intel_overlay *overlay)
{
struct drm_device *dev = overlay->dev;
+ struct drm_i915_gem_request *request;
+ int pipe_a_quirk = 0;
int ret;
- drm_i915_private_t *dev_priv = dev->dev_private;
BUG_ON(overlay->active);
-
overlay->active = 1;
- overlay->hw_wedged = NEEDS_WAIT_FOR_FLIP;
+
+ if (IS_I830(dev)) {
+ pipe_a_quirk = i830_activate_pipe_a(dev);
+ if (pipe_a_quirk < 0)
+ return pipe_a_quirk;
+ }
+
+ request = kzalloc(sizeof(*request), GFP_KERNEL);
+ if (request == NULL) {
+ ret = -ENOMEM;
+ goto out;
+ }
BEGIN_LP_RING(4);
OUT_RING(MI_OVERLAY_FLIP | MI_OVERLAY_ON);
@@ -229,32 +315,30 @@ static int intel_overlay_on(struct intel_overlay *overlay)
OUT_RING(MI_NOOP);
ADVANCE_LP_RING();
- overlay->last_flip_req =
- i915_add_request(dev, NULL, 0, &dev_priv->render_ring);
- if (overlay->last_flip_req == 0)
- return -ENOMEM;
-
- ret = i915_do_wait_request(dev,
- overlay->last_flip_req, 1, &dev_priv->render_ring);
- if (ret != 0)
- return ret;
+ ret = intel_overlay_do_wait_request(overlay, request, true, NULL);
+out:
+ if (pipe_a_quirk)
+ i830_deactivate_pipe_a(dev);
- overlay->hw_wedged = 0;
- overlay->last_flip_req = 0;
- return 0;
+ return ret;
}
/* overlay needs to be enabled in OCMD reg */
-static void intel_overlay_continue(struct intel_overlay *overlay,
- bool load_polyphase_filter)
+static int intel_overlay_continue(struct intel_overlay *overlay,
+ bool load_polyphase_filter)
{
struct drm_device *dev = overlay->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_i915_gem_request *request;
u32 flip_addr = overlay->flip_addr;
u32 tmp;
BUG_ON(!overlay->active);
+ request = kzalloc(sizeof(*request), GFP_KERNEL);
+ if (request == NULL)
+ return -ENOMEM;
+
if (load_polyphase_filter)
flip_addr |= OFC_UPDATE;
@@ -269,220 +353,132 @@ static void intel_overlay_continue(struct intel_overlay *overlay,
ADVANCE_LP_RING();
overlay->last_flip_req =
- i915_add_request(dev, NULL, 0, &dev_priv->render_ring);
+ i915_add_request(dev, NULL, request, &dev_priv->render_ring);
+ return 0;
}
-static int intel_overlay_wait_flip(struct intel_overlay *overlay)
+static void intel_overlay_release_old_vid_tail(struct intel_overlay *overlay)
{
- struct drm_device *dev = overlay->dev;
- drm_i915_private_t *dev_priv = dev->dev_private;
- int ret;
- u32 tmp;
+ struct drm_gem_object *obj = &overlay->old_vid_bo->base;
- if (overlay->last_flip_req != 0) {
- ret = i915_do_wait_request(dev, overlay->last_flip_req,
- 1, &dev_priv->render_ring);
- if (ret == 0) {
- overlay->last_flip_req = 0;
-
- tmp = I915_READ(ISR);
-
- if (!(tmp & I915_OVERLAY_PLANE_FLIP_PENDING_INTERRUPT))
- return 0;
- }
- }
+ i915_gem_object_unpin(obj);
+ drm_gem_object_unreference(obj);
- /* synchronous slowpath */
- overlay->hw_wedged = RELEASE_OLD_VID;
+ overlay->old_vid_bo = NULL;
+}
- BEGIN_LP_RING(2);
- OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
- OUT_RING(MI_NOOP);
- ADVANCE_LP_RING();
+static void intel_overlay_off_tail(struct intel_overlay *overlay)
+{
+ struct drm_gem_object *obj;
- overlay->last_flip_req =
- i915_add_request(dev, NULL, 0, &dev_priv->render_ring);
- if (overlay->last_flip_req == 0)
- return -ENOMEM;
+ /* never have the overlay hw on without showing a frame */
+ BUG_ON(!overlay->vid_bo);
+ obj = &overlay->vid_bo->base;
- ret = i915_do_wait_request(dev, overlay->last_flip_req,
- 1, &dev_priv->render_ring);
- if (ret != 0)
- return ret;
+ i915_gem_object_unpin(obj);
+ drm_gem_object_unreference(obj);
+ overlay->vid_bo = NULL;
- overlay->hw_wedged = 0;
- overlay->last_flip_req = 0;
- return 0;
+ overlay->crtc->overlay = NULL;
+ overlay->crtc = NULL;
+ overlay->active = 0;
}
/* overlay needs to be disabled in OCMD reg */
-static int intel_overlay_off(struct intel_overlay *overlay)
+static int intel_overlay_off(struct intel_overlay *overlay,
+ bool interruptible)
{
- u32 flip_addr = overlay->flip_addr;
struct drm_device *dev = overlay->dev;
- drm_i915_private_t *dev_priv = dev->dev_private;
- int ret;
+ u32 flip_addr = overlay->flip_addr;
+ struct drm_i915_gem_request *request;
BUG_ON(!overlay->active);
+ request = kzalloc(sizeof(*request), GFP_KERNEL);
+ if (request == NULL)
+ return -ENOMEM;
+
/* According to intel docs the overlay hw may hang (when switching
* off) without loading the filter coeffs. It is however unclear whether
* this applies to the disabling of the overlay or to the switching off
* of the hw. Do it in both cases */
flip_addr |= OFC_UPDATE;
+ BEGIN_LP_RING(6);
/* wait for overlay to go idle */
- overlay->hw_wedged = SWITCH_OFF_STAGE_1;
-
- BEGIN_LP_RING(4);
OUT_RING(MI_OVERLAY_FLIP | MI_OVERLAY_CONTINUE);
OUT_RING(flip_addr);
- OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
- OUT_RING(MI_NOOP);
- ADVANCE_LP_RING();
-
- overlay->last_flip_req =
- i915_add_request(dev, NULL, 0, &dev_priv->render_ring);
- if (overlay->last_flip_req == 0)
- return -ENOMEM;
-
- ret = i915_do_wait_request(dev, overlay->last_flip_req,
- 1, &dev_priv->render_ring);
- if (ret != 0)
- return ret;
-
+ OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
/* turn overlay off */
- overlay->hw_wedged = SWITCH_OFF_STAGE_2;
-
- BEGIN_LP_RING(4);
- OUT_RING(MI_OVERLAY_FLIP | MI_OVERLAY_OFF);
+ OUT_RING(MI_OVERLAY_FLIP | MI_OVERLAY_OFF);
OUT_RING(flip_addr);
- OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
- OUT_RING(MI_NOOP);
+ OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
ADVANCE_LP_RING();
- overlay->last_flip_req =
- i915_add_request(dev, NULL, 0, &dev_priv->render_ring);
- if (overlay->last_flip_req == 0)
- return -ENOMEM;
-
- ret = i915_do_wait_request(dev, overlay->last_flip_req,
- 1, &dev_priv->render_ring);
- if (ret != 0)
- return ret;
-
- overlay->hw_wedged = 0;
- overlay->last_flip_req = 0;
- return ret;
-}
-
-static void intel_overlay_off_tail(struct intel_overlay *overlay)
-{
- struct drm_gem_object *obj;
-
- /* never have the overlay hw on without showing a frame */
- BUG_ON(!overlay->vid_bo);
- obj = &overlay->vid_bo->base;
-
- i915_gem_object_unpin(obj);
- drm_gem_object_unreference(obj);
- overlay->vid_bo = NULL;
-
- overlay->crtc->overlay = NULL;
- overlay->crtc = NULL;
- overlay->active = 0;
+ return intel_overlay_do_wait_request(overlay, request, interruptible,
+ intel_overlay_off_tail);
}
/* recover from an interruption due to a signal
* We have to be careful not to repeat work forever an make forward progess. */
-int intel_overlay_recover_from_interrupt(struct intel_overlay *overlay,
- int interruptible)
+static int intel_overlay_recover_from_interrupt(struct intel_overlay *overlay,
+ bool interruptible)
{
struct drm_device *dev = overlay->dev;
- struct drm_gem_object *obj;
drm_i915_private_t *dev_priv = dev->dev_private;
- u32 flip_addr;
int ret;
- if (overlay->hw_wedged == HW_WEDGED)
- return -EIO;
-
- if (overlay->last_flip_req == 0) {
- overlay->last_flip_req =
- i915_add_request(dev, NULL, 0, &dev_priv->render_ring);
- if (overlay->last_flip_req == 0)
- return -ENOMEM;
- }
+ if (overlay->last_flip_req == 0)
+ return 0;
ret = i915_do_wait_request(dev, overlay->last_flip_req,
- interruptible, &dev_priv->render_ring);
- if (ret != 0)
+ interruptible, &dev_priv->render_ring);
+ if (ret)
return ret;
- switch (overlay->hw_wedged) {
- case RELEASE_OLD_VID:
- obj = &overlay->old_vid_bo->base;
- i915_gem_object_unpin(obj);
- drm_gem_object_unreference(obj);
- overlay->old_vid_bo = NULL;
- break;
- case SWITCH_OFF_STAGE_1:
- flip_addr = overlay->flip_addr;
- flip_addr |= OFC_UPDATE;
-
- overlay->hw_wedged = SWITCH_OFF_STAGE_2;
-
- BEGIN_LP_RING(4);
- OUT_RING(MI_OVERLAY_FLIP | MI_OVERLAY_OFF);
- OUT_RING(flip_addr);
- OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
- OUT_RING(MI_NOOP);
- ADVANCE_LP_RING();
-
- overlay->last_flip_req = i915_add_request(dev, NULL,
- 0, &dev_priv->render_ring);
- if (overlay->last_flip_req == 0)
- return -ENOMEM;
-
- ret = i915_do_wait_request(dev, overlay->last_flip_req,
- interruptible, &dev_priv->render_ring);
- if (ret != 0)
- return ret;
-
- case SWITCH_OFF_STAGE_2:
- intel_overlay_off_tail(overlay);
- break;
- default:
- BUG_ON(overlay->hw_wedged != NEEDS_WAIT_FOR_FLIP);
- }
+ if (overlay->flip_tail)
+ overlay->flip_tail(overlay);
- overlay->hw_wedged = 0;
overlay->last_flip_req = 0;
return 0;
}
/* Wait for pending overlay flip and release old frame.
* Needs to be called before the overlay register are changed
- * via intel_overlay_(un)map_regs_atomic */
+ * via intel_overlay_(un)map_regs
+ */
static int intel_overlay_release_old_vid(struct intel_overlay *overlay)
{
+ struct drm_device *dev = overlay->dev;
+ drm_i915_private_t *dev_priv = dev->dev_private;
int ret;
- struct drm_gem_object *obj;
- /* only wait if there is actually an old frame to release to
- * guarantee forward progress */
+ /* Only wait if there is actually an old frame to release to
+ * guarantee forward progress.
+ */
if (!overlay->old_vid_bo)
return 0;
- ret = intel_overlay_wait_flip(overlay);
- if (ret != 0)
- return ret;
+ if (I915_READ(ISR) & I915_OVERLAY_PLANE_FLIP_PENDING_INTERRUPT) {
+ struct drm_i915_gem_request *request;
- obj = &overlay->old_vid_bo->base;
- i915_gem_object_unpin(obj);
- drm_gem_object_unreference(obj);
- overlay->old_vid_bo = NULL;
+ /* synchronous slowpath */
+ request = kzalloc(sizeof(*request), GFP_KERNEL);
+ if (request == NULL)
+ return -ENOMEM;
+
+ BEGIN_LP_RING(2);
+ OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
+ OUT_RING(MI_NOOP);
+ ADVANCE_LP_RING();
+
+ ret = intel_overlay_do_wait_request(overlay, request, true,
+ intel_overlay_release_old_vid_tail);
+ if (ret)
+ return ret;
+ }
+ intel_overlay_release_old_vid_tail(overlay);
return 0;
}
@@ -506,65 +502,65 @@ struct put_image_params {
static int packed_depth_bytes(u32 format)
{
switch (format & I915_OVERLAY_DEPTH_MASK) {
- case I915_OVERLAY_YUV422:
- return 4;
- case I915_OVERLAY_YUV411:
- /* return 6; not implemented */
- default:
- return -EINVAL;
+ case I915_OVERLAY_YUV422:
+ return 4;
+ case I915_OVERLAY_YUV411:
+ /* return 6; not implemented */
+ default:
+ return -EINVAL;
}
}
static int packed_width_bytes(u32 format, short width)
{
switch (format & I915_OVERLAY_DEPTH_MASK) {
- case I915_OVERLAY_YUV422:
- return width << 1;
- default:
- return -EINVAL;
+ case I915_OVERLAY_YUV422:
+ return width << 1;
+ default:
+ return -EINVAL;
}
}
static int uv_hsubsampling(u32 format)
{
switch (format & I915_OVERLAY_DEPTH_MASK) {
- case I915_OVERLAY_YUV422:
- case I915_OVERLAY_YUV420:
- return 2;
- case I915_OVERLAY_YUV411:
- case I915_OVERLAY_YUV410:
- return 4;
- default:
- return -EINVAL;
+ case I915_OVERLAY_YUV422:
+ case I915_OVERLAY_YUV420:
+ return 2;
+ case I915_OVERLAY_YUV411:
+ case I915_OVERLAY_YUV410:
+ return 4;
+ default:
+ return -EINVAL;
}
}
static int uv_vsubsampling(u32 format)
{
switch (format & I915_OVERLAY_DEPTH_MASK) {
- case I915_OVERLAY_YUV420:
- case I915_OVERLAY_YUV410:
- return 2;
- case I915_OVERLAY_YUV422:
- case I915_OVERLAY_YUV411:
- return 1;
- default:
- return -EINVAL;
+ case I915_OVERLAY_YUV420:
+ case I915_OVERLAY_YUV410:
+ return 2;
+ case I915_OVERLAY_YUV422:
+ case I915_OVERLAY_YUV411:
+ return 1;
+ default:
+ return -EINVAL;
}
}
static u32 calc_swidthsw(struct drm_device *dev, u32 offset, u32 width)
{
u32 mask, shift, ret;
- if (IS_I9XX(dev)) {
- mask = 0x3f;
- shift = 6;
- } else {
+ if (IS_GEN2(dev)) {
mask = 0x1f;
shift = 5;
+ } else {
+ mask = 0x3f;
+ shift = 6;
}
ret = ((offset + width + mask) >> shift) - (offset >> shift);
- if (IS_I9XX(dev))
+ if (!IS_GEN2(dev))
ret <<= 1;
ret -=1;
return ret << 2;
@@ -587,7 +583,9 @@ static const u16 y_static_hcoeffs[N_HORIZ_Y_TAPS * N_PHASES] = {
0x3020, 0xb340, 0x1fb8, 0x34a0, 0xb060,
0x3020, 0xb240, 0x1fe0, 0x32e0, 0xb040,
0x3020, 0xb140, 0x1ff8, 0x3160, 0xb020,
- 0xb000, 0x3000, 0x0800, 0x3000, 0xb000};
+ 0xb000, 0x3000, 0x0800, 0x3000, 0xb000
+};
+
static const u16 uv_static_hcoeffs[N_HORIZ_UV_TAPS * N_PHASES] = {
0x3000, 0x1800, 0x1800, 0xb000, 0x18d0, 0x2e60,
0xb000, 0x1990, 0x2ce0, 0xb020, 0x1a68, 0x2b40,
@@ -597,7 +595,8 @@ static const u16 uv_static_hcoeffs[N_HORIZ_UV_TAPS * N_PHASES] = {
0xb100, 0x1eb8, 0x3620, 0xb100, 0x1f18, 0x34a0,
0xb100, 0x1f68, 0x3360, 0xb0e0, 0x1fa8, 0x3240,
0xb0c0, 0x1fe0, 0x3140, 0xb060, 0x1ff0, 0x30a0,
- 0x3000, 0x0800, 0x3000};
+ 0x3000, 0x0800, 0x3000
+};
static void update_polyphase_filter(struct overlay_registers *regs)
{
@@ -630,29 +629,31 @@ static bool update_scaling_factors(struct intel_overlay *overlay,
yscale = 1 << FP_SHIFT;
/*if (params->format & I915_OVERLAY_YUV_PLANAR) {*/
- xscale_UV = xscale/uv_hscale;
- yscale_UV = yscale/uv_vscale;
- /* make the Y scale to UV scale ratio an exact multiply */
- xscale = xscale_UV * uv_hscale;
- yscale = yscale_UV * uv_vscale;
+ xscale_UV = xscale/uv_hscale;
+ yscale_UV = yscale/uv_vscale;
+ /* make the Y scale to UV scale ratio an exact multiply */
+ xscale = xscale_UV * uv_hscale;
+ yscale = yscale_UV * uv_vscale;
/*} else {
- xscale_UV = 0;
- yscale_UV = 0;
- }*/
+ xscale_UV = 0;
+ yscale_UV = 0;
+ }*/
if (xscale != overlay->old_xscale || yscale != overlay->old_yscale)
scale_changed = true;
overlay->old_xscale = xscale;
overlay->old_yscale = yscale;
- regs->YRGBSCALE = ((yscale & FRACT_MASK) << 20)
- | ((xscale >> FP_SHIFT) << 16)
- | ((xscale & FRACT_MASK) << 3);
- regs->UVSCALE = ((yscale_UV & FRACT_MASK) << 20)
- | ((xscale_UV >> FP_SHIFT) << 16)
- | ((xscale_UV & FRACT_MASK) << 3);
- regs->UVSCALEV = ((yscale >> FP_SHIFT) << 16)
- | ((yscale_UV >> FP_SHIFT) << 0);
+ regs->YRGBSCALE = (((yscale & FRACT_MASK) << 20) |
+ ((xscale >> FP_SHIFT) << 16) |
+ ((xscale & FRACT_MASK) << 3));
+
+ regs->UVSCALE = (((yscale_UV & FRACT_MASK) << 20) |
+ ((xscale_UV >> FP_SHIFT) << 16) |
+ ((xscale_UV & FRACT_MASK) << 3));
+
+ regs->UVSCALEV = ((((yscale >> FP_SHIFT) << 16) |
+ ((yscale_UV >> FP_SHIFT) << 0)));
if (scale_changed)
update_polyphase_filter(regs);
@@ -664,22 +665,28 @@ static void update_colorkey(struct intel_overlay *overlay,
struct overlay_registers *regs)
{
u32 key = overlay->color_key;
+
switch (overlay->crtc->base.fb->bits_per_pixel) {
- case 8:
- regs->DCLRKV = 0;
- regs->DCLRKM = CLK_RGB8I_MASK | DST_KEY_ENABLE;
- case 16:
- if (overlay->crtc->base.fb->depth == 15) {
- regs->DCLRKV = RGB15_TO_COLORKEY(key);
- regs->DCLRKM = CLK_RGB15_MASK | DST_KEY_ENABLE;
- } else {
- regs->DCLRKV = RGB16_TO_COLORKEY(key);
- regs->DCLRKM = CLK_RGB16_MASK | DST_KEY_ENABLE;
- }
- case 24:
- case 32:
- regs->DCLRKV = key;
- regs->DCLRKM = CLK_RGB24_MASK | DST_KEY_ENABLE;
+ case 8:
+ regs->DCLRKV = 0;
+ regs->DCLRKM = CLK_RGB8I_MASK | DST_KEY_ENABLE;
+ break;
+
+ case 16:
+ if (overlay->crtc->base.fb->depth == 15) {
+ regs->DCLRKV = RGB15_TO_COLORKEY(key);
+ regs->DCLRKM = CLK_RGB15_MASK | DST_KEY_ENABLE;
+ } else {
+ regs->DCLRKV = RGB16_TO_COLORKEY(key);
+ regs->DCLRKM = CLK_RGB16_MASK | DST_KEY_ENABLE;
+ }
+ break;
+
+ case 24:
+ case 32:
+ regs->DCLRKV = key;
+ regs->DCLRKM = CLK_RGB24_MASK | DST_KEY_ENABLE;
+ break;
}
}
@@ -689,48 +696,48 @@ static u32 overlay_cmd_reg(struct put_image_params *params)
if (params->format & I915_OVERLAY_YUV_PLANAR) {
switch (params->format & I915_OVERLAY_DEPTH_MASK) {
- case I915_OVERLAY_YUV422:
- cmd |= OCMD_YUV_422_PLANAR;
- break;
- case I915_OVERLAY_YUV420:
- cmd |= OCMD_YUV_420_PLANAR;
- break;
- case I915_OVERLAY_YUV411:
- case I915_OVERLAY_YUV410:
- cmd |= OCMD_YUV_410_PLANAR;
- break;
+ case I915_OVERLAY_YUV422:
+ cmd |= OCMD_YUV_422_PLANAR;
+ break;
+ case I915_OVERLAY_YUV420:
+ cmd |= OCMD_YUV_420_PLANAR;
+ break;
+ case I915_OVERLAY_YUV411:
+ case I915_OVERLAY_YUV410:
+ cmd |= OCMD_YUV_410_PLANAR;
+ break;
}
} else { /* YUV packed */
switch (params->format & I915_OVERLAY_DEPTH_MASK) {
- case I915_OVERLAY_YUV422:
- cmd |= OCMD_YUV_422_PACKED;
- break;
- case I915_OVERLAY_YUV411:
- cmd |= OCMD_YUV_411_PACKED;
- break;
+ case I915_OVERLAY_YUV422:
+ cmd |= OCMD_YUV_422_PACKED;
+ break;
+ case I915_OVERLAY_YUV411:
+ cmd |= OCMD_YUV_411_PACKED;
+ break;
}
switch (params->format & I915_OVERLAY_SWAP_MASK) {
- case I915_OVERLAY_NO_SWAP:
- break;
- case I915_OVERLAY_UV_SWAP:
- cmd |= OCMD_UV_SWAP;
- break;
- case I915_OVERLAY_Y_SWAP:
- cmd |= OCMD_Y_SWAP;
- break;
- case I915_OVERLAY_Y_AND_UV_SWAP:
- cmd |= OCMD_Y_AND_UV_SWAP;
- break;
+ case I915_OVERLAY_NO_SWAP:
+ break;
+ case I915_OVERLAY_UV_SWAP:
+ cmd |= OCMD_UV_SWAP;
+ break;
+ case I915_OVERLAY_Y_SWAP:
+ cmd |= OCMD_Y_SWAP;
+ break;
+ case I915_OVERLAY_Y_AND_UV_SWAP:
+ cmd |= OCMD_Y_AND_UV_SWAP;
+ break;
}
}
return cmd;
}
-int intel_overlay_do_put_image(struct intel_overlay *overlay,
- struct drm_gem_object *new_bo,
- struct put_image_params *params)
+static int intel_overlay_do_put_image(struct intel_overlay *overlay,
+ struct drm_gem_object *new_bo,
+ struct put_image_params *params)
{
int ret, tmp_width;
struct overlay_registers *regs;
@@ -755,24 +762,24 @@ int intel_overlay_do_put_image(struct intel_overlay *overlay,
goto out_unpin;
if (!overlay->active) {
- regs = intel_overlay_map_regs_atomic(overlay);
+ regs = intel_overlay_map_regs(overlay);
if (!regs) {
ret = -ENOMEM;
goto out_unpin;
}
regs->OCONFIG = OCONF_CC_OUT_8BIT;
- if (IS_I965GM(overlay->dev))
+ if (IS_GEN4(overlay->dev))
regs->OCONFIG |= OCONF_CSC_MODE_BT709;
regs->OCONFIG |= overlay->crtc->pipe == 0 ?
OCONF_PIPE_A : OCONF_PIPE_B;
- intel_overlay_unmap_regs_atomic(overlay);
+ intel_overlay_unmap_regs(overlay, regs);
ret = intel_overlay_on(overlay);
if (ret != 0)
goto out_unpin;
}
- regs = intel_overlay_map_regs_atomic(overlay);
+ regs = intel_overlay_map_regs(overlay);
if (!regs) {
ret = -ENOMEM;
goto out_unpin;
@@ -788,7 +795,7 @@ int intel_overlay_do_put_image(struct intel_overlay *overlay,
regs->SWIDTH = params->src_w;
regs->SWIDTHSW = calc_swidthsw(overlay->dev,
- params->offset_Y, tmp_width);
+ params->offset_Y, tmp_width);
regs->SHEIGHT = params->src_h;
regs->OBUF_0Y = bo_priv->gtt_offset + params-> offset_Y;
regs->OSTRIDE = params->stride_Y;
@@ -799,9 +806,9 @@ int intel_overlay_do_put_image(struct intel_overlay *overlay,
u32 tmp_U, tmp_V;
regs->SWIDTH |= (params->src_w/uv_hscale) << 16;
tmp_U = calc_swidthsw(overlay->dev, params->offset_U,
- params->src_w/uv_hscale);
+ params->src_w/uv_hscale);
tmp_V = calc_swidthsw(overlay->dev, params->offset_V,
- params->src_w/uv_hscale);
+ params->src_w/uv_hscale);
regs->SWIDTHSW |= max_t(u32, tmp_U, tmp_V) << 16;
regs->SHEIGHT |= (params->src_h/uv_vscale) << 16;
regs->OBUF_0U = bo_priv->gtt_offset + params->offset_U;
@@ -815,9 +822,11 @@ int intel_overlay_do_put_image(struct intel_overlay *overlay,
regs->OCMD = overlay_cmd_reg(params);
- intel_overlay_unmap_regs_atomic(overlay);
+ intel_overlay_unmap_regs(overlay, regs);
- intel_overlay_continue(overlay, scale_changed);
+ ret = intel_overlay_continue(overlay, scale_changed);
+ if (ret)
+ goto out_unpin;
overlay->old_vid_bo = overlay->vid_bo;
overlay->vid_bo = to_intel_bo(new_bo);
@@ -829,20 +838,19 @@ out_unpin:
return ret;
}
-int intel_overlay_switch_off(struct intel_overlay *overlay)
+int intel_overlay_switch_off(struct intel_overlay *overlay,
+ bool interruptible)
{
- int ret;
struct overlay_registers *regs;
struct drm_device *dev = overlay->dev;
+ int ret;
BUG_ON(!mutex_is_locked(&dev->struct_mutex));
BUG_ON(!mutex_is_locked(&dev->mode_config.mutex));
- if (overlay->hw_wedged) {
- ret = intel_overlay_recover_from_interrupt(overlay, 1);
- if (ret != 0)
- return ret;
- }
+ ret = intel_overlay_recover_from_interrupt(overlay, interruptible);
+ if (ret != 0)
+ return ret;
if (!overlay->active)
return 0;
@@ -851,33 +859,29 @@ int intel_overlay_switch_off(struct intel_overlay *overlay)
if (ret != 0)
return ret;
- regs = intel_overlay_map_regs_atomic(overlay);
+ regs = intel_overlay_map_regs(overlay);
regs->OCMD = 0;
- intel_overlay_unmap_regs_atomic(overlay);
+ intel_overlay_unmap_regs(overlay, regs);
- ret = intel_overlay_off(overlay);
+ ret = intel_overlay_off(overlay, interruptible);
if (ret != 0)
return ret;
intel_overlay_off_tail(overlay);
-
return 0;
}
static int check_overlay_possible_on_crtc(struct intel_overlay *overlay,
struct intel_crtc *crtc)
{
- drm_i915_private_t *dev_priv = overlay->dev->dev_private;
- u32 pipeconf;
- int pipeconf_reg = (crtc->pipe == 0) ? PIPEACONF : PIPEBCONF;
+ drm_i915_private_t *dev_priv = overlay->dev->dev_private;
- if (!crtc->base.enabled || crtc->dpms_mode != DRM_MODE_DPMS_ON)
+ if (!crtc->active)
return -EINVAL;
- pipeconf = I915_READ(pipeconf_reg);
-
/* can't use the overlay with double wide pipe */
- if (!IS_I965G(overlay->dev) && pipeconf & PIPEACONF_DOUBLE_WIDE)
+ if (INTEL_INFO(overlay->dev)->gen < 4 &&
+ (I915_READ(PIPECONF(crtc->pipe)) & (PIPECONF_DOUBLE_WIDE | PIPECONF_ENABLE)) != PIPECONF_ENABLE)
return -EINVAL;
return 0;
@@ -886,20 +890,22 @@ static int check_overlay_possible_on_crtc(struct intel_overlay *overlay,
static void update_pfit_vscale_ratio(struct intel_overlay *overlay)
{
struct drm_device *dev = overlay->dev;
- drm_i915_private_t *dev_priv = dev->dev_private;
- u32 ratio;
+ drm_i915_private_t *dev_priv = dev->dev_private;
u32 pfit_control = I915_READ(PFIT_CONTROL);
+ u32 ratio;
/* XXX: This is not the same logic as in the xorg driver, but more in
- * line with the intel documentation for the i965 */
- if (!IS_I965G(dev) && (pfit_control & VERT_AUTO_SCALE)) {
- ratio = I915_READ(PFIT_AUTO_RATIOS) >> PFIT_VERT_SCALE_SHIFT;
- } else { /* on i965 use the PGM reg to read out the autoscaler values */
- ratio = I915_READ(PFIT_PGM_RATIOS);
- if (IS_I965G(dev))
- ratio >>= PFIT_VERT_SCALE_SHIFT_965;
+ * line with the intel documentation for the i965
+ */
+ if (INTEL_INFO(dev)->gen >= 4) {
+ /* on i965 use the PGM reg to read out the autoscaler values */
+ ratio = I915_READ(PFIT_PGM_RATIOS) >> PFIT_VERT_SCALE_SHIFT_965;
+ } else {
+ if (pfit_control & VERT_AUTO_SCALE)
+ ratio = I915_READ(PFIT_AUTO_RATIOS);
else
- ratio >>= PFIT_VERT_SCALE_SHIFT;
+ ratio = I915_READ(PFIT_PGM_RATIOS);
+ ratio >>= PFIT_VERT_SCALE_SHIFT;
}
overlay->pfit_vscale_ratio = ratio;
@@ -910,12 +916,10 @@ static int check_overlay_dst(struct intel_overlay *overlay,
{
struct drm_display_mode *mode = &overlay->crtc->base.mode;
- if ((rec->dst_x < mode->crtc_hdisplay)
- && (rec->dst_x + rec->dst_width
- <= mode->crtc_hdisplay)
- && (rec->dst_y < mode->crtc_vdisplay)
- && (rec->dst_y + rec->dst_height
- <= mode->crtc_vdisplay))
+ if (rec->dst_x < mode->crtc_hdisplay &&
+ rec->dst_x + rec->dst_width <= mode->crtc_hdisplay &&
+ rec->dst_y < mode->crtc_vdisplay &&
+ rec->dst_y + rec->dst_height <= mode->crtc_vdisplay)
return 0;
else
return -EINVAL;
@@ -940,53 +944,59 @@ static int check_overlay_src(struct drm_device *dev,
struct drm_intel_overlay_put_image *rec,
struct drm_gem_object *new_bo)
{
- u32 stride_mask;
- int depth;
int uv_hscale = uv_hsubsampling(rec->flags);
int uv_vscale = uv_vsubsampling(rec->flags);
- size_t tmp;
+ u32 stride_mask;
+ int depth;
+ u32 tmp;
/* check src dimensions */
if (IS_845G(dev) || IS_I830(dev)) {
- if (rec->src_height > IMAGE_MAX_HEIGHT_LEGACY
- || rec->src_width > IMAGE_MAX_WIDTH_LEGACY)
+ if (rec->src_height > IMAGE_MAX_HEIGHT_LEGACY ||
+ rec->src_width > IMAGE_MAX_WIDTH_LEGACY)
return -EINVAL;
} else {
- if (rec->src_height > IMAGE_MAX_HEIGHT
- || rec->src_width > IMAGE_MAX_WIDTH)
+ if (rec->src_height > IMAGE_MAX_HEIGHT ||
+ rec->src_width > IMAGE_MAX_WIDTH)
return -EINVAL;
}
+
/* better safe than sorry, use 4 as the maximal subsampling ratio */
- if (rec->src_height < N_VERT_Y_TAPS*4
- || rec->src_width < N_HORIZ_Y_TAPS*4)
+ if (rec->src_height < N_VERT_Y_TAPS*4 ||
+ rec->src_width < N_HORIZ_Y_TAPS*4)
return -EINVAL;
/* check alignment constraints */
switch (rec->flags & I915_OVERLAY_TYPE_MASK) {
- case I915_OVERLAY_RGB:
- /* not implemented */
+ case I915_OVERLAY_RGB:
+ /* not implemented */
+ return -EINVAL;
+
+ case I915_OVERLAY_YUV_PACKED:
+ if (uv_vscale != 1)
return -EINVAL;
- case I915_OVERLAY_YUV_PACKED:
- depth = packed_depth_bytes(rec->flags);
- if (uv_vscale != 1)
- return -EINVAL;
- if (depth < 0)
- return depth;
- /* ignore UV planes */
- rec->stride_UV = 0;
- rec->offset_U = 0;
- rec->offset_V = 0;
- /* check pixel alignment */
- if (rec->offset_Y % depth)
- return -EINVAL;
- break;
- case I915_OVERLAY_YUV_PLANAR:
- if (uv_vscale < 0 || uv_hscale < 0)
- return -EINVAL;
- /* no offset restrictions for planar formats */
- break;
- default:
+
+ depth = packed_depth_bytes(rec->flags);
+ if (depth < 0)
+ return depth;
+
+ /* ignore UV planes */
+ rec->stride_UV = 0;
+ rec->offset_U = 0;
+ rec->offset_V = 0;
+ /* check pixel alignment */
+ if (rec->offset_Y % depth)
+ return -EINVAL;
+ break;
+
+ case I915_OVERLAY_YUV_PLANAR:
+ if (uv_vscale < 0 || uv_hscale < 0)
return -EINVAL;
+ /* no offset restrictions for planar formats */
+ break;
+
+ default:
+ return -EINVAL;
}
if (rec->src_width % uv_hscale)
@@ -1000,47 +1010,74 @@ static int check_overlay_src(struct drm_device *dev,
if (rec->stride_Y & stride_mask || rec->stride_UV & stride_mask)
return -EINVAL;
- if (IS_I965G(dev) && rec->stride_Y < 512)
+ if (IS_GEN4(dev) && rec->stride_Y < 512)
return -EINVAL;
tmp = (rec->flags & I915_OVERLAY_TYPE_MASK) == I915_OVERLAY_YUV_PLANAR ?
- 4 : 8;
- if (rec->stride_Y > tmp*1024 || rec->stride_UV > 2*1024)
+ 4096 : 8192;
+ if (rec->stride_Y > tmp || rec->stride_UV > 2*1024)
return -EINVAL;
/* check buffer dimensions */
switch (rec->flags & I915_OVERLAY_TYPE_MASK) {
- case I915_OVERLAY_RGB:
- case I915_OVERLAY_YUV_PACKED:
- /* always 4 Y values per depth pixels */
- if (packed_width_bytes(rec->flags, rec->src_width)
- > rec->stride_Y)
- return -EINVAL;
-
- tmp = rec->stride_Y*rec->src_height;
- if (rec->offset_Y + tmp > new_bo->size)
- return -EINVAL;
- break;
- case I915_OVERLAY_YUV_PLANAR:
- if (rec->src_width > rec->stride_Y)
- return -EINVAL;
- if (rec->src_width/uv_hscale > rec->stride_UV)
- return -EINVAL;
-
- tmp = rec->stride_Y*rec->src_height;
- if (rec->offset_Y + tmp > new_bo->size)
- return -EINVAL;
- tmp = rec->stride_UV*rec->src_height;
- tmp /= uv_vscale;
- if (rec->offset_U + tmp > new_bo->size
- || rec->offset_V + tmp > new_bo->size)
- return -EINVAL;
- break;
+ case I915_OVERLAY_RGB:
+ case I915_OVERLAY_YUV_PACKED:
+ /* always 4 Y values per depth pixels */
+ if (packed_width_bytes(rec->flags, rec->src_width) > rec->stride_Y)
+ return -EINVAL;
+
+ tmp = rec->stride_Y*rec->src_height;
+ if (rec->offset_Y + tmp > new_bo->size)
+ return -EINVAL;
+ break;
+
+ case I915_OVERLAY_YUV_PLANAR:
+ if (rec->src_width > rec->stride_Y)
+ return -EINVAL;
+ if (rec->src_width/uv_hscale > rec->stride_UV)
+ return -EINVAL;
+
+ tmp = rec->stride_Y * rec->src_height;
+ if (rec->offset_Y + tmp > new_bo->size)
+ return -EINVAL;
+
+ tmp = rec->stride_UV * (rec->src_height / uv_vscale);
+ if (rec->offset_U + tmp > new_bo->size ||
+ rec->offset_V + tmp > new_bo->size)
+ return -EINVAL;
+ break;
}
return 0;
}
+/**
+ * Return the pipe currently connected to the panel fitter,
+ * or -1 if the panel fitter is not present or not in use
+ */
+static int intel_panel_fitter_pipe(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 pfit_control;
+
+ /* i830 doesn't have a panel fitter */
+ if (IS_I830(dev))
+ return -1;
+
+ pfit_control = I915_READ(PFIT_CONTROL);
+
+ /* See if the panel fitter is in use */
+ if ((pfit_control & PFIT_ENABLE) == 0)
+ return -1;
+
+ /* 965 can place panel fitter on either pipe */
+ if (IS_GEN4(dev))
+ return (pfit_control >> 29) & 0x3;
+
+ /* older chips can only use pipe 1 */
+ return 1;
+}
+
int intel_overlay_put_image(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
@@ -1068,7 +1105,7 @@ int intel_overlay_put_image(struct drm_device *dev, void *data,
mutex_lock(&dev->mode_config.mutex);
mutex_lock(&dev->struct_mutex);
- ret = intel_overlay_switch_off(overlay);
+ ret = intel_overlay_switch_off(overlay, true);
mutex_unlock(&dev->struct_mutex);
mutex_unlock(&dev->mode_config.mutex);
@@ -1081,7 +1118,7 @@ int intel_overlay_put_image(struct drm_device *dev, void *data,
return -ENOMEM;
drmmode_obj = drm_mode_object_find(dev, put_image_rec->crtc_id,
- DRM_MODE_OBJECT_CRTC);
+ DRM_MODE_OBJECT_CRTC);
if (!drmmode_obj) {
ret = -ENOENT;
goto out_free;
@@ -1089,7 +1126,7 @@ int intel_overlay_put_image(struct drm_device *dev, void *data,
crtc = to_intel_crtc(obj_to_crtc(drmmode_obj));
new_bo = drm_gem_object_lookup(dev, file_priv,
- put_image_rec->bo_handle);
+ put_image_rec->bo_handle);
if (!new_bo) {
ret = -ENOENT;
goto out_free;
@@ -1098,15 +1135,13 @@ int intel_overlay_put_image(struct drm_device *dev, void *data,
mutex_lock(&dev->mode_config.mutex);
mutex_lock(&dev->struct_mutex);
- if (overlay->hw_wedged) {
- ret = intel_overlay_recover_from_interrupt(overlay, 1);
- if (ret != 0)
- goto out_unlock;
- }
+ ret = intel_overlay_recover_from_interrupt(overlay, true);
+ if (ret != 0)
+ goto out_unlock;
if (overlay->crtc != crtc) {
struct drm_display_mode *mode = &crtc->base.mode;
- ret = intel_overlay_switch_off(overlay);
+ ret = intel_overlay_switch_off(overlay, true);
if (ret != 0)
goto out_unlock;
@@ -1117,9 +1152,9 @@ int intel_overlay_put_image(struct drm_device *dev, void *data,
overlay->crtc = crtc;
crtc->overlay = overlay;
- if (intel_panel_fitter_pipe(dev) == crtc->pipe
- /* and line to wide, i.e. one-line-mode */
- && mode->hdisplay > 1024) {
+ /* line too wide, i.e. one-line-mode */
+ if (mode->hdisplay > 1024 &&
+ intel_panel_fitter_pipe(dev) == crtc->pipe) {
overlay->pfit_active = 1;
update_pfit_vscale_ratio(overlay);
} else
@@ -1132,10 +1167,10 @@ int intel_overlay_put_image(struct drm_device *dev, void *data,
if (overlay->pfit_active) {
params->dst_y = ((((u32)put_image_rec->dst_y) << 12) /
- overlay->pfit_vscale_ratio);
+ overlay->pfit_vscale_ratio);
/* shifting right rounds downwards, so add 1 */
params->dst_h = ((((u32)put_image_rec->dst_height) << 12) /
- overlay->pfit_vscale_ratio) + 1;
+ overlay->pfit_vscale_ratio) + 1;
} else {
params->dst_y = put_image_rec->dst_y;
params->dst_h = put_image_rec->dst_height;
@@ -1147,8 +1182,8 @@ int intel_overlay_put_image(struct drm_device *dev, void *data,
params->src_h = put_image_rec->src_height;
params->src_scan_w = put_image_rec->src_scan_width;
params->src_scan_h = put_image_rec->src_scan_height;
- if (params->src_scan_h > params->src_h
- || params->src_scan_w > params->src_w) {
+ if (params->src_scan_h > params->src_h ||
+ params->src_scan_w > params->src_w) {
ret = -EINVAL;
goto out_unlock;
}
@@ -1204,7 +1239,7 @@ static bool check_gamma_bounds(u32 gamma1, u32 gamma2)
return false;
for (i = 0; i < 3; i++) {
- if (((gamma1 >> i * 8) & 0xff) >= ((gamma2 >> i*8) & 0xff))
+ if (((gamma1 >> i*8) & 0xff) >= ((gamma2 >> i*8) & 0xff))
return false;
}
@@ -1225,16 +1260,18 @@ static bool check_gamma5_errata(u32 gamma5)
static int check_gamma(struct drm_intel_overlay_attrs *attrs)
{
- if (!check_gamma_bounds(0, attrs->gamma0)
- || !check_gamma_bounds(attrs->gamma0, attrs->gamma1)
- || !check_gamma_bounds(attrs->gamma1, attrs->gamma2)
- || !check_gamma_bounds(attrs->gamma2, attrs->gamma3)
- || !check_gamma_bounds(attrs->gamma3, attrs->gamma4)
- || !check_gamma_bounds(attrs->gamma4, attrs->gamma5)
- || !check_gamma_bounds(attrs->gamma5, 0x00ffffff))
+ if (!check_gamma_bounds(0, attrs->gamma0) ||
+ !check_gamma_bounds(attrs->gamma0, attrs->gamma1) ||
+ !check_gamma_bounds(attrs->gamma1, attrs->gamma2) ||
+ !check_gamma_bounds(attrs->gamma2, attrs->gamma3) ||
+ !check_gamma_bounds(attrs->gamma3, attrs->gamma4) ||
+ !check_gamma_bounds(attrs->gamma4, attrs->gamma5) ||
+ !check_gamma_bounds(attrs->gamma5, 0x00ffffff))
return -EINVAL;
+
if (!check_gamma5_errata(attrs->gamma5))
return -EINVAL;
+
return 0;
}
@@ -1261,13 +1298,14 @@ int intel_overlay_attrs(struct drm_device *dev, void *data,
mutex_lock(&dev->mode_config.mutex);
mutex_lock(&dev->struct_mutex);
+ ret = -EINVAL;
if (!(attrs->flags & I915_OVERLAY_UPDATE_ATTRS)) {
- attrs->color_key = overlay->color_key;
+ attrs->color_key = overlay->color_key;
attrs->brightness = overlay->brightness;
- attrs->contrast = overlay->contrast;
+ attrs->contrast = overlay->contrast;
attrs->saturation = overlay->saturation;
- if (IS_I9XX(dev)) {
+ if (!IS_GEN2(dev)) {
attrs->gamma0 = I915_READ(OGAMC0);
attrs->gamma1 = I915_READ(OGAMC1);
attrs->gamma2 = I915_READ(OGAMC2);
@@ -1275,29 +1313,20 @@ int intel_overlay_attrs(struct drm_device *dev, void *data,
attrs->gamma4 = I915_READ(OGAMC4);
attrs->gamma5 = I915_READ(OGAMC5);
}
- ret = 0;
} else {
- overlay->color_key = attrs->color_key;
- if (attrs->brightness >= -128 && attrs->brightness <= 127) {
- overlay->brightness = attrs->brightness;
- } else {
- ret = -EINVAL;
+ if (attrs->brightness < -128 || attrs->brightness > 127)
goto out_unlock;
- }
- if (attrs->contrast <= 255) {
- overlay->contrast = attrs->contrast;
- } else {
- ret = -EINVAL;
+ if (attrs->contrast > 255)
goto out_unlock;
- }
- if (attrs->saturation <= 1023) {
- overlay->saturation = attrs->saturation;
- } else {
- ret = -EINVAL;
+ if (attrs->saturation > 1023)
goto out_unlock;
- }
- regs = intel_overlay_map_regs_atomic(overlay);
+ overlay->color_key = attrs->color_key;
+ overlay->brightness = attrs->brightness;
+ overlay->contrast = attrs->contrast;
+ overlay->saturation = attrs->saturation;
+
+ regs = intel_overlay_map_regs(overlay);
if (!regs) {
ret = -ENOMEM;
goto out_unlock;
@@ -1305,13 +1334,11 @@ int intel_overlay_attrs(struct drm_device *dev, void *data,
update_reg_attrs(overlay, regs);
- intel_overlay_unmap_regs_atomic(overlay);
+ intel_overlay_unmap_regs(overlay, regs);
if (attrs->flags & I915_OVERLAY_UPDATE_GAMMA) {
- if (!IS_I9XX(dev)) {
- ret = -EINVAL;
+ if (IS_GEN2(dev))
goto out_unlock;
- }
if (overlay->active) {
ret = -EBUSY;
@@ -1319,7 +1346,7 @@ int intel_overlay_attrs(struct drm_device *dev, void *data,
}
ret = check_gamma(attrs);
- if (ret != 0)
+ if (ret)
goto out_unlock;
I915_WRITE(OGAMC0, attrs->gamma0);
@@ -1329,9 +1356,9 @@ int intel_overlay_attrs(struct drm_device *dev, void *data,
I915_WRITE(OGAMC4, attrs->gamma4);
I915_WRITE(OGAMC5, attrs->gamma5);
}
- ret = 0;
}
+ ret = 0;
out_unlock:
mutex_unlock(&dev->struct_mutex);
mutex_unlock(&dev->mode_config.mutex);
@@ -1347,7 +1374,7 @@ void intel_setup_overlay(struct drm_device *dev)
struct overlay_registers *regs;
int ret;
- if (!OVERLAY_EXISTS(dev))
+ if (!HAS_OVERLAY(dev))
return;
overlay = kzalloc(sizeof(struct intel_overlay), GFP_KERNEL);
@@ -1360,22 +1387,28 @@ void intel_setup_overlay(struct drm_device *dev)
goto out_free;
overlay->reg_bo = to_intel_bo(reg_bo);
- if (OVERLAY_NONPHYSICAL(dev)) {
- ret = i915_gem_object_pin(reg_bo, PAGE_SIZE);
- if (ret) {
- DRM_ERROR("failed to pin overlay register bo\n");
- goto out_free_bo;
- }
- overlay->flip_addr = overlay->reg_bo->gtt_offset;
- } else {
+ if (OVERLAY_NEEDS_PHYSICAL(dev)) {
ret = i915_gem_attach_phys_object(dev, reg_bo,
I915_GEM_PHYS_OVERLAY_REGS,
- 0);
+ PAGE_SIZE);
if (ret) {
DRM_ERROR("failed to attach phys overlay regs\n");
goto out_free_bo;
}
overlay->flip_addr = overlay->reg_bo->phys_obj->handle->busaddr;
+ } else {
+ ret = i915_gem_object_pin(reg_bo, PAGE_SIZE);
+ if (ret) {
+ DRM_ERROR("failed to pin overlay register bo\n");
+ goto out_free_bo;
+ }
+ overlay->flip_addr = overlay->reg_bo->gtt_offset;
+
+ ret = i915_gem_object_set_to_gtt_domain(reg_bo, true);
+ if (ret) {
+ DRM_ERROR("failed to move overlay register bo into the GTT\n");
+ goto out_unpin_bo;
+ }
}
/* init all values */
@@ -1384,21 +1417,22 @@ void intel_setup_overlay(struct drm_device *dev)
overlay->contrast = 75;
overlay->saturation = 146;
- regs = intel_overlay_map_regs_atomic(overlay);
+ regs = intel_overlay_map_regs(overlay);
if (!regs)
goto out_free_bo;
memset(regs, 0, sizeof(struct overlay_registers));
update_polyphase_filter(regs);
-
update_reg_attrs(overlay, regs);
- intel_overlay_unmap_regs_atomic(overlay);
+ intel_overlay_unmap_regs(overlay, regs);
dev_priv->overlay = overlay;
DRM_INFO("initialized overlay support\n");
return;
+out_unpin_bo:
+ i915_gem_object_unpin(reg_bo);
out_free_bo:
drm_gem_object_unreference(reg_bo);
out_free:
@@ -1408,18 +1442,23 @@ out_free:
void intel_cleanup_overlay(struct drm_device *dev)
{
- drm_i915_private_t *dev_priv = dev->dev_private;
+ drm_i915_private_t *dev_priv = dev->dev_private;
+
+ if (!dev_priv->overlay)
+ return;
- if (dev_priv->overlay) {
- /* The bo's should be free'd by the generic code already.
- * Furthermore modesetting teardown happens beforehand so the
- * hardware should be off already */
- BUG_ON(dev_priv->overlay->active);
+ /* The bo's should be free'd by the generic code already.
+ * Furthermore modesetting teardown happens beforehand so the
+ * hardware should be off already */
+ BUG_ON(dev_priv->overlay->active);
- kfree(dev_priv->overlay);
- }
+ drm_gem_object_unreference_unlocked(&dev_priv->overlay->reg_bo->base);
+ kfree(dev_priv->overlay);
}
+#ifdef CONFIG_DEBUG_FS
+#include <linux/seq_file.h>
+
struct intel_overlay_error_state {
struct overlay_registers regs;
unsigned long base;
@@ -1427,6 +1466,29 @@ struct intel_overlay_error_state {
u32 isr;
};
+static struct overlay_registers *
+intel_overlay_map_regs_atomic(struct intel_overlay *overlay)
+{
+ drm_i915_private_t *dev_priv = overlay->dev->dev_private;
+ struct overlay_registers *regs;
+
+ if (OVERLAY_NEEDS_PHYSICAL(overlay->dev))
+ regs = overlay->reg_bo->phys_obj->handle->vaddr;
+ else
+ regs = io_mapping_map_atomic_wc(dev_priv->mm.gtt_mapping,
+ overlay->reg_bo->gtt_offset);
+
+ return regs;
+}
+
+static void intel_overlay_unmap_regs_atomic(struct intel_overlay *overlay,
+ struct overlay_registers *regs)
+{
+ if (!OVERLAY_NEEDS_PHYSICAL(overlay->dev))
+ io_mapping_unmap_atomic(regs);
+}
+
+
struct intel_overlay_error_state *
intel_overlay_capture_error_state(struct drm_device *dev)
{
@@ -1444,17 +1506,17 @@ intel_overlay_capture_error_state(struct drm_device *dev)
error->dovsta = I915_READ(DOVSTA);
error->isr = I915_READ(ISR);
- if (OVERLAY_NONPHYSICAL(overlay->dev))
- error->base = (long) overlay->reg_bo->gtt_offset;
- else
+ if (OVERLAY_NEEDS_PHYSICAL(overlay->dev))
error->base = (long) overlay->reg_bo->phys_obj->handle->vaddr;
+ else
+ error->base = (long) overlay->reg_bo->gtt_offset;
regs = intel_overlay_map_regs_atomic(overlay);
if (!regs)
goto err;
memcpy_fromio(&error->regs, regs, sizeof(struct overlay_registers));
- intel_overlay_unmap_regs_atomic(overlay);
+ intel_overlay_unmap_regs_atomic(overlay, regs);
return error;
@@ -1515,3 +1577,4 @@ intel_overlay_print_error_state(struct seq_file *m, struct intel_overlay_error_s
P(UVSCALEV);
#undef P
}
+#endif
diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c
index e7f5299d9d57..92ff8f385278 100644
--- a/drivers/gpu/drm/i915/intel_panel.c
+++ b/drivers/gpu/drm/i915/intel_panel.c
@@ -30,6 +30,8 @@
#include "intel_drv.h"
+#define PCI_LBPC 0xf4 /* legacy/combination backlight modes */
+
void
intel_fixed_panel_mode(struct drm_display_mode *fixed_mode,
struct drm_display_mode *adjusted_mode)
@@ -109,3 +111,110 @@ done:
dev_priv->pch_pf_pos = (x << 16) | y;
dev_priv->pch_pf_size = (width << 16) | height;
}
+
+static int is_backlight_combination_mode(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ if (INTEL_INFO(dev)->gen >= 4)
+ return I915_READ(BLC_PWM_CTL2) & BLM_COMBINATION_MODE;
+
+ if (IS_GEN2(dev))
+ return I915_READ(BLC_PWM_CTL) & BLM_LEGACY_MODE;
+
+ return 0;
+}
+
+u32 intel_panel_get_max_backlight(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 max;
+
+ if (HAS_PCH_SPLIT(dev)) {
+ max = I915_READ(BLC_PWM_PCH_CTL2) >> 16;
+ } else {
+ max = I915_READ(BLC_PWM_CTL);
+ if (IS_PINEVIEW(dev)) {
+ max >>= 17;
+ } else {
+ max >>= 16;
+ if (INTEL_INFO(dev)->gen < 4)
+ max &= ~1;
+ }
+
+ if (is_backlight_combination_mode(dev))
+ max *= 0xff;
+ }
+
+ if (max == 0) {
+ /* XXX add code here to query mode clock or hardware clock
+ * and program max PWM appropriately.
+ */
+ DRM_ERROR("fixme: max PWM is zero.\n");
+ max = 1;
+ }
+
+ DRM_DEBUG_DRIVER("max backlight PWM = %d\n", max);
+ return max;
+}
+
+u32 intel_panel_get_backlight(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 val;
+
+ if (HAS_PCH_SPLIT(dev)) {
+ val = I915_READ(BLC_PWM_CPU_CTL) & BACKLIGHT_DUTY_CYCLE_MASK;
+ } else {
+ val = I915_READ(BLC_PWM_CTL) & BACKLIGHT_DUTY_CYCLE_MASK;
+ if (IS_PINEVIEW(dev))
+ val >>= 1;
+
+ if (is_backlight_combination_mode(dev)){
+ u8 lbpc;
+
+ val &= ~1;
+ pci_read_config_byte(dev->pdev, PCI_LBPC, &lbpc);
+ val *= lbpc;
+ val >>= 1;
+ }
+ }
+
+ DRM_DEBUG_DRIVER("get backlight PWM = %d\n", val);
+ return val;
+}
+
+static void intel_pch_panel_set_backlight(struct drm_device *dev, u32 level)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 val = I915_READ(BLC_PWM_CPU_CTL) & ~BACKLIGHT_DUTY_CYCLE_MASK;
+ I915_WRITE(BLC_PWM_CPU_CTL, val | level);
+}
+
+void intel_panel_set_backlight(struct drm_device *dev, u32 level)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 tmp;
+
+ DRM_DEBUG_DRIVER("set backlight PWM = %d\n", level);
+
+ if (HAS_PCH_SPLIT(dev))
+ return intel_pch_panel_set_backlight(dev, level);
+
+ if (is_backlight_combination_mode(dev)){
+ u32 max = intel_panel_get_max_backlight(dev);
+ u8 lpbc;
+
+ lpbc = level * 0xfe / max + 1;
+ level /= lpbc;
+ pci_write_config_byte(dev->pdev, PCI_LBPC, lpbc);
+ }
+
+ tmp = I915_READ(BLC_PWM_CTL);
+ if (IS_PINEVIEW(dev)) {
+ tmp &= ~(BACKLIGHT_DUTY_CYCLE_MASK - 1);
+ level <<= 1;
+ } else
+ tmp &= ~BACKLIGHT_DUTY_CYCLE_MASK;
+ I915_WRITE(BLC_PWM_CTL, tmp | level);
+}
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index cb3508f78bc3..89a65be8a3f3 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -32,6 +32,7 @@
#include "i915_drv.h"
#include "i915_drm.h"
#include "i915_trace.h"
+#include "intel_drv.h"
static u32 i915_gem_get_seqno(struct drm_device *dev)
{
@@ -49,9 +50,9 @@ static u32 i915_gem_get_seqno(struct drm_device *dev)
static void
render_ring_flush(struct drm_device *dev,
- struct intel_ring_buffer *ring,
- u32 invalidate_domains,
- u32 flush_domains)
+ struct intel_ring_buffer *ring,
+ u32 invalidate_domains,
+ u32 flush_domains)
{
drm_i915_private_t *dev_priv = dev->dev_private;
u32 cmd;
@@ -97,7 +98,7 @@ render_ring_flush(struct drm_device *dev,
if ((invalidate_domains|flush_domains) &
I915_GEM_DOMAIN_RENDER)
cmd &= ~MI_NO_WRITE_FLUSH;
- if (!IS_I965G(dev)) {
+ if (INTEL_INFO(dev)->gen < 4) {
/*
* On the 965, the sampler cache always gets flushed
* and this bit is reserved.
@@ -118,38 +119,26 @@ render_ring_flush(struct drm_device *dev,
}
}
-static unsigned int render_ring_get_head(struct drm_device *dev,
- struct intel_ring_buffer *ring)
-{
- drm_i915_private_t *dev_priv = dev->dev_private;
- return I915_READ(PRB0_HEAD) & HEAD_ADDR;
-}
-
-static unsigned int render_ring_get_tail(struct drm_device *dev,
- struct intel_ring_buffer *ring)
+static void ring_write_tail(struct drm_device *dev,
+ struct intel_ring_buffer *ring,
+ u32 value)
{
drm_i915_private_t *dev_priv = dev->dev_private;
- return I915_READ(PRB0_TAIL) & TAIL_ADDR;
+ I915_WRITE_TAIL(ring, value);
}
-static unsigned int render_ring_get_active_head(struct drm_device *dev,
- struct intel_ring_buffer *ring)
+u32 intel_ring_get_active_head(struct drm_device *dev,
+ struct intel_ring_buffer *ring)
{
drm_i915_private_t *dev_priv = dev->dev_private;
- u32 acthd_reg = IS_I965G(dev) ? ACTHD_I965 : ACTHD;
+ u32 acthd_reg = INTEL_INFO(dev)->gen >= 4 ?
+ RING_ACTHD(ring->mmio_base) : ACTHD;
return I915_READ(acthd_reg);
}
-static void render_ring_advance_ring(struct drm_device *dev,
- struct intel_ring_buffer *ring)
-{
- drm_i915_private_t *dev_priv = dev->dev_private;
- I915_WRITE(PRB0_TAIL, ring->tail);
-}
-
static int init_ring_common(struct drm_device *dev,
- struct intel_ring_buffer *ring)
+ struct intel_ring_buffer *ring)
{
u32 head;
drm_i915_private_t *dev_priv = dev->dev_private;
@@ -157,57 +146,59 @@ static int init_ring_common(struct drm_device *dev,
obj_priv = to_intel_bo(ring->gem_object);
/* Stop the ring if it's running. */
- I915_WRITE(ring->regs.ctl, 0);
- I915_WRITE(ring->regs.head, 0);
- I915_WRITE(ring->regs.tail, 0);
+ I915_WRITE_CTL(ring, 0);
+ I915_WRITE_HEAD(ring, 0);
+ ring->write_tail(dev, ring, 0);
/* Initialize the ring. */
- I915_WRITE(ring->regs.start, obj_priv->gtt_offset);
- head = ring->get_head(dev, ring);
+ I915_WRITE_START(ring, obj_priv->gtt_offset);
+ head = I915_READ_HEAD(ring) & HEAD_ADDR;
/* G45 ring initialization fails to reset head to zero */
if (head != 0) {
- DRM_ERROR("%s head not reset to zero "
- "ctl %08x head %08x tail %08x start %08x\n",
- ring->name,
- I915_READ(ring->regs.ctl),
- I915_READ(ring->regs.head),
- I915_READ(ring->regs.tail),
- I915_READ(ring->regs.start));
-
- I915_WRITE(ring->regs.head, 0);
-
- DRM_ERROR("%s head forced to zero "
- "ctl %08x head %08x tail %08x start %08x\n",
- ring->name,
- I915_READ(ring->regs.ctl),
- I915_READ(ring->regs.head),
- I915_READ(ring->regs.tail),
- I915_READ(ring->regs.start));
+ DRM_DEBUG_KMS("%s head not reset to zero "
+ "ctl %08x head %08x tail %08x start %08x\n",
+ ring->name,
+ I915_READ_CTL(ring),
+ I915_READ_HEAD(ring),
+ I915_READ_TAIL(ring),
+ I915_READ_START(ring));
+
+ I915_WRITE_HEAD(ring, 0);
+
+ if (I915_READ_HEAD(ring) & HEAD_ADDR) {
+ DRM_ERROR("failed to set %s head to zero "
+ "ctl %08x head %08x tail %08x start %08x\n",
+ ring->name,
+ I915_READ_CTL(ring),
+ I915_READ_HEAD(ring),
+ I915_READ_TAIL(ring),
+ I915_READ_START(ring));
+ }
}
- I915_WRITE(ring->regs.ctl,
+ I915_WRITE_CTL(ring,
((ring->gem_object->size - PAGE_SIZE) & RING_NR_PAGES)
- | RING_NO_REPORT | RING_VALID);
+ | RING_REPORT_64K | RING_VALID);
- head = I915_READ(ring->regs.head) & HEAD_ADDR;
+ head = I915_READ_HEAD(ring) & HEAD_ADDR;
/* If the head is still not zero, the ring is dead */
if (head != 0) {
DRM_ERROR("%s initialization failed "
"ctl %08x head %08x tail %08x start %08x\n",
ring->name,
- I915_READ(ring->regs.ctl),
- I915_READ(ring->regs.head),
- I915_READ(ring->regs.tail),
- I915_READ(ring->regs.start));
+ I915_READ_CTL(ring),
+ I915_READ_HEAD(ring),
+ I915_READ_TAIL(ring),
+ I915_READ_START(ring));
return -EIO;
}
if (!drm_core_check_feature(dev, DRIVER_MODESET))
i915_kernel_lost_context(dev);
else {
- ring->head = ring->get_head(dev, ring);
- ring->tail = ring->get_tail(dev, ring);
+ ring->head = I915_READ_HEAD(ring) & HEAD_ADDR;
+ ring->tail = I915_READ_TAIL(ring) & TAIL_ADDR;
ring->space = ring->head - (ring->tail + 8);
if (ring->space < 0)
ring->space += ring->size;
@@ -216,13 +207,13 @@ static int init_ring_common(struct drm_device *dev,
}
static int init_render_ring(struct drm_device *dev,
- struct intel_ring_buffer *ring)
+ struct intel_ring_buffer *ring)
{
drm_i915_private_t *dev_priv = dev->dev_private;
int ret = init_ring_common(dev, ring);
int mode;
- if (IS_I9XX(dev) && !IS_GEN3(dev)) {
+ if (INTEL_INFO(dev)->gen > 3) {
mode = VS_TIMER_DISPATCH << 16 | VS_TIMER_DISPATCH;
if (IS_GEN6(dev))
mode |= MI_FLUSH_ENABLE << 16 | MI_FLUSH_ENABLE;
@@ -250,9 +241,8 @@ do { \
*/
static u32
render_ring_add_request(struct drm_device *dev,
- struct intel_ring_buffer *ring,
- struct drm_file *file_priv,
- u32 flush_domains)
+ struct intel_ring_buffer *ring,
+ u32 flush_domains)
{
drm_i915_private_t *dev_priv = dev->dev_private;
u32 seqno;
@@ -315,8 +305,8 @@ render_ring_add_request(struct drm_device *dev,
}
static u32
-render_ring_get_gem_seqno(struct drm_device *dev,
- struct intel_ring_buffer *ring)
+render_ring_get_seqno(struct drm_device *dev,
+ struct intel_ring_buffer *ring)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
if (HAS_PIPE_CONTROL(dev))
@@ -327,7 +317,7 @@ render_ring_get_gem_seqno(struct drm_device *dev,
static void
render_ring_get_user_irq(struct drm_device *dev,
- struct intel_ring_buffer *ring)
+ struct intel_ring_buffer *ring)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
unsigned long irqflags;
@@ -344,7 +334,7 @@ render_ring_get_user_irq(struct drm_device *dev,
static void
render_ring_put_user_irq(struct drm_device *dev,
- struct intel_ring_buffer *ring)
+ struct intel_ring_buffer *ring)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
unsigned long irqflags;
@@ -360,21 +350,23 @@ render_ring_put_user_irq(struct drm_device *dev,
spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags);
}
-static void render_setup_status_page(struct drm_device *dev,
- struct intel_ring_buffer *ring)
+void intel_ring_setup_status_page(struct drm_device *dev,
+ struct intel_ring_buffer *ring)
{
drm_i915_private_t *dev_priv = dev->dev_private;
if (IS_GEN6(dev)) {
- I915_WRITE(HWS_PGA_GEN6, ring->status_page.gfx_addr);
- I915_READ(HWS_PGA_GEN6); /* posting read */
+ I915_WRITE(RING_HWS_PGA_GEN6(ring->mmio_base),
+ ring->status_page.gfx_addr);
+ I915_READ(RING_HWS_PGA_GEN6(ring->mmio_base)); /* posting read */
} else {
- I915_WRITE(HWS_PGA, ring->status_page.gfx_addr);
- I915_READ(HWS_PGA); /* posting read */
+ I915_WRITE(RING_HWS_PGA(ring->mmio_base),
+ ring->status_page.gfx_addr);
+ I915_READ(RING_HWS_PGA(ring->mmio_base)); /* posting read */
}
}
-void
+static void
bsd_ring_flush(struct drm_device *dev,
struct intel_ring_buffer *ring,
u32 invalidate_domains,
@@ -386,45 +378,16 @@ bsd_ring_flush(struct drm_device *dev,
intel_ring_advance(dev, ring);
}
-static inline unsigned int bsd_ring_get_head(struct drm_device *dev,
- struct intel_ring_buffer *ring)
-{
- drm_i915_private_t *dev_priv = dev->dev_private;
- return I915_READ(BSD_RING_HEAD) & HEAD_ADDR;
-}
-
-static inline unsigned int bsd_ring_get_tail(struct drm_device *dev,
- struct intel_ring_buffer *ring)
-{
- drm_i915_private_t *dev_priv = dev->dev_private;
- return I915_READ(BSD_RING_TAIL) & TAIL_ADDR;
-}
-
-static inline unsigned int bsd_ring_get_active_head(struct drm_device *dev,
- struct intel_ring_buffer *ring)
-{
- drm_i915_private_t *dev_priv = dev->dev_private;
- return I915_READ(BSD_RING_ACTHD);
-}
-
-static inline void bsd_ring_advance_ring(struct drm_device *dev,
- struct intel_ring_buffer *ring)
-{
- drm_i915_private_t *dev_priv = dev->dev_private;
- I915_WRITE(BSD_RING_TAIL, ring->tail);
-}
-
static int init_bsd_ring(struct drm_device *dev,
- struct intel_ring_buffer *ring)
+ struct intel_ring_buffer *ring)
{
return init_ring_common(dev, ring);
}
static u32
-bsd_ring_add_request(struct drm_device *dev,
- struct intel_ring_buffer *ring,
- struct drm_file *file_priv,
- u32 flush_domains)
+ring_add_request(struct drm_device *dev,
+ struct intel_ring_buffer *ring,
+ u32 flush_domains)
{
u32 seqno;
@@ -443,40 +406,32 @@ bsd_ring_add_request(struct drm_device *dev,
return seqno;
}
-static void bsd_setup_status_page(struct drm_device *dev,
- struct intel_ring_buffer *ring)
-{
- drm_i915_private_t *dev_priv = dev->dev_private;
- I915_WRITE(BSD_HWS_PGA, ring->status_page.gfx_addr);
- I915_READ(BSD_HWS_PGA);
-}
-
static void
bsd_ring_get_user_irq(struct drm_device *dev,
- struct intel_ring_buffer *ring)
+ struct intel_ring_buffer *ring)
{
/* do nothing */
}
static void
bsd_ring_put_user_irq(struct drm_device *dev,
- struct intel_ring_buffer *ring)
+ struct intel_ring_buffer *ring)
{
/* do nothing */
}
static u32
-bsd_ring_get_gem_seqno(struct drm_device *dev,
- struct intel_ring_buffer *ring)
+ring_status_page_get_seqno(struct drm_device *dev,
+ struct intel_ring_buffer *ring)
{
return intel_read_status_page(ring, I915_GEM_HWS_INDEX);
}
static int
-bsd_ring_dispatch_gem_execbuffer(struct drm_device *dev,
- struct intel_ring_buffer *ring,
- struct drm_i915_gem_execbuffer2 *exec,
- struct drm_clip_rect *cliprects,
- uint64_t exec_offset)
+ring_dispatch_gem_execbuffer(struct drm_device *dev,
+ struct intel_ring_buffer *ring,
+ struct drm_i915_gem_execbuffer2 *exec,
+ struct drm_clip_rect *cliprects,
+ uint64_t exec_offset)
{
uint32_t exec_start;
exec_start = (uint32_t) exec_offset + exec->batch_start_offset;
@@ -488,13 +443,12 @@ bsd_ring_dispatch_gem_execbuffer(struct drm_device *dev,
return 0;
}
-
static int
render_ring_dispatch_gem_execbuffer(struct drm_device *dev,
- struct intel_ring_buffer *ring,
- struct drm_i915_gem_execbuffer2 *exec,
- struct drm_clip_rect *cliprects,
- uint64_t exec_offset)
+ struct intel_ring_buffer *ring,
+ struct drm_i915_gem_execbuffer2 *exec,
+ struct drm_clip_rect *cliprects,
+ uint64_t exec_offset)
{
drm_i915_private_t *dev_priv = dev->dev_private;
int nbox = exec->num_cliprects;
@@ -523,8 +477,8 @@ render_ring_dispatch_gem_execbuffer(struct drm_device *dev,
intel_ring_emit(dev, ring, exec_start + exec_len - 4);
intel_ring_emit(dev, ring, 0);
} else {
- intel_ring_begin(dev, ring, 4);
- if (IS_I965G(dev)) {
+ intel_ring_begin(dev, ring, 2);
+ if (INTEL_INFO(dev)->gen >= 4) {
intel_ring_emit(dev, ring,
MI_BATCH_BUFFER_START | (2 << 6)
| MI_BATCH_NON_SECURE_I965);
@@ -539,7 +493,7 @@ render_ring_dispatch_gem_execbuffer(struct drm_device *dev,
intel_ring_advance(dev, ring);
}
- if (IS_G4X(dev) || IS_IRONLAKE(dev)) {
+ if (IS_G4X(dev) || IS_GEN5(dev)) {
intel_ring_begin(dev, ring, 2);
intel_ring_emit(dev, ring, MI_FLUSH |
MI_NO_WRITE_FLUSH |
@@ -553,7 +507,7 @@ render_ring_dispatch_gem_execbuffer(struct drm_device *dev,
}
static void cleanup_status_page(struct drm_device *dev,
- struct intel_ring_buffer *ring)
+ struct intel_ring_buffer *ring)
{
drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_gem_object *obj;
@@ -573,7 +527,7 @@ static void cleanup_status_page(struct drm_device *dev,
}
static int init_status_page(struct drm_device *dev,
- struct intel_ring_buffer *ring)
+ struct intel_ring_buffer *ring)
{
drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_gem_object *obj;
@@ -603,7 +557,7 @@ static int init_status_page(struct drm_device *dev,
ring->status_page.obj = obj;
memset(ring->status_page.page_addr, 0, PAGE_SIZE);
- ring->setup_status_page(dev, ring);
+ intel_ring_setup_status_page(dev, ring);
DRM_DEBUG_DRIVER("%s hws offset: 0x%08x\n",
ring->name, ring->status_page.gfx_addr);
@@ -617,15 +571,18 @@ err:
return ret;
}
-
int intel_init_ring_buffer(struct drm_device *dev,
- struct intel_ring_buffer *ring)
+ struct intel_ring_buffer *ring)
{
+ struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_gem_object *obj_priv;
struct drm_gem_object *obj;
int ret;
ring->dev = dev;
+ INIT_LIST_HEAD(&ring->active_list);
+ INIT_LIST_HEAD(&ring->request_list);
+ INIT_LIST_HEAD(&ring->gpu_write_list);
if (I915_NEED_GFX_HWS(dev)) {
ret = init_status_page(dev, ring);
@@ -642,7 +599,7 @@ int intel_init_ring_buffer(struct drm_device *dev,
ring->gem_object = obj;
- ret = i915_gem_object_pin(obj, ring->alignment);
+ ret = i915_gem_object_pin(obj, PAGE_SIZE);
if (ret)
goto err_unref;
@@ -668,14 +625,12 @@ int intel_init_ring_buffer(struct drm_device *dev,
if (!drm_core_check_feature(dev, DRIVER_MODESET))
i915_kernel_lost_context(dev);
else {
- ring->head = ring->get_head(dev, ring);
- ring->tail = ring->get_tail(dev, ring);
+ ring->head = I915_READ_HEAD(ring) & HEAD_ADDR;
+ ring->tail = I915_READ_TAIL(ring) & TAIL_ADDR;
ring->space = ring->head - (ring->tail + 8);
if (ring->space < 0)
ring->space += ring->size;
}
- INIT_LIST_HEAD(&ring->active_list);
- INIT_LIST_HEAD(&ring->request_list);
return ret;
err_unmap:
@@ -691,7 +646,7 @@ err_hws:
}
void intel_cleanup_ring_buffer(struct drm_device *dev,
- struct intel_ring_buffer *ring)
+ struct intel_ring_buffer *ring)
{
if (ring->gem_object == NULL)
return;
@@ -701,11 +656,15 @@ void intel_cleanup_ring_buffer(struct drm_device *dev,
i915_gem_object_unpin(ring->gem_object);
drm_gem_object_unreference(ring->gem_object);
ring->gem_object = NULL;
+
+ if (ring->cleanup)
+ ring->cleanup(ring);
+
cleanup_status_page(dev, ring);
}
-int intel_wrap_ring_buffer(struct drm_device *dev,
- struct intel_ring_buffer *ring)
+static int intel_wrap_ring_buffer(struct drm_device *dev,
+ struct intel_ring_buffer *ring)
{
unsigned int *virt;
int rem;
@@ -731,14 +690,26 @@ int intel_wrap_ring_buffer(struct drm_device *dev,
}
int intel_wait_ring_buffer(struct drm_device *dev,
- struct intel_ring_buffer *ring, int n)
+ struct intel_ring_buffer *ring, int n)
{
unsigned long end;
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ u32 head;
+
+ head = intel_read_status_page(ring, 4);
+ if (head) {
+ ring->head = head & HEAD_ADDR;
+ ring->space = ring->head - (ring->tail + 8);
+ if (ring->space < 0)
+ ring->space += ring->size;
+ if (ring->space >= n)
+ return 0;
+ }
trace_i915_ring_wait_begin (dev);
end = jiffies + 3 * HZ;
do {
- ring->head = ring->get_head(dev, ring);
+ ring->head = I915_READ_HEAD(ring) & HEAD_ADDR;
ring->space = ring->head - (ring->tail + 8);
if (ring->space < 0)
ring->space += ring->size;
@@ -753,14 +724,15 @@ int intel_wait_ring_buffer(struct drm_device *dev,
master_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT;
}
- yield();
+ msleep(1);
} while (!time_after(jiffies, end));
trace_i915_ring_wait_end (dev);
return -EBUSY;
}
void intel_ring_begin(struct drm_device *dev,
- struct intel_ring_buffer *ring, int num_dwords)
+ struct intel_ring_buffer *ring,
+ int num_dwords)
{
int n = 4*num_dwords;
if (unlikely(ring->tail + n > ring->size))
@@ -772,97 +744,287 @@ void intel_ring_begin(struct drm_device *dev,
}
void intel_ring_advance(struct drm_device *dev,
- struct intel_ring_buffer *ring)
+ struct intel_ring_buffer *ring)
{
ring->tail &= ring->size - 1;
- ring->advance_ring(dev, ring);
+ ring->write_tail(dev, ring, ring->tail);
}
-void intel_fill_struct(struct drm_device *dev,
- struct intel_ring_buffer *ring,
- void *data,
- unsigned int len)
-{
- unsigned int *virt = ring->virtual_start + ring->tail;
- BUG_ON((len&~(4-1)) != 0);
- intel_ring_begin(dev, ring, len/4);
- memcpy(virt, data, len);
- ring->tail += len;
- ring->tail &= ring->size - 1;
- ring->space -= len;
- intel_ring_advance(dev, ring);
-}
-
-struct intel_ring_buffer render_ring = {
+static const struct intel_ring_buffer render_ring = {
.name = "render ring",
- .regs = {
- .ctl = PRB0_CTL,
- .head = PRB0_HEAD,
- .tail = PRB0_TAIL,
- .start = PRB0_START
- },
- .ring_flag = I915_EXEC_RENDER,
+ .id = RING_RENDER,
+ .mmio_base = RENDER_RING_BASE,
.size = 32 * PAGE_SIZE,
- .alignment = PAGE_SIZE,
- .virtual_start = NULL,
- .dev = NULL,
- .gem_object = NULL,
- .head = 0,
- .tail = 0,
- .space = 0,
- .user_irq_refcount = 0,
- .irq_gem_seqno = 0,
- .waiting_gem_seqno = 0,
- .setup_status_page = render_setup_status_page,
.init = init_render_ring,
- .get_head = render_ring_get_head,
- .get_tail = render_ring_get_tail,
- .get_active_head = render_ring_get_active_head,
- .advance_ring = render_ring_advance_ring,
+ .write_tail = ring_write_tail,
.flush = render_ring_flush,
.add_request = render_ring_add_request,
- .get_gem_seqno = render_ring_get_gem_seqno,
+ .get_seqno = render_ring_get_seqno,
.user_irq_get = render_ring_get_user_irq,
.user_irq_put = render_ring_put_user_irq,
.dispatch_gem_execbuffer = render_ring_dispatch_gem_execbuffer,
- .status_page = {NULL, 0, NULL},
- .map = {0,}
};
/* ring buffer for bit-stream decoder */
-struct intel_ring_buffer bsd_ring = {
+static const struct intel_ring_buffer bsd_ring = {
.name = "bsd ring",
- .regs = {
- .ctl = BSD_RING_CTL,
- .head = BSD_RING_HEAD,
- .tail = BSD_RING_TAIL,
- .start = BSD_RING_START
- },
- .ring_flag = I915_EXEC_BSD,
+ .id = RING_BSD,
+ .mmio_base = BSD_RING_BASE,
.size = 32 * PAGE_SIZE,
- .alignment = PAGE_SIZE,
- .virtual_start = NULL,
- .dev = NULL,
- .gem_object = NULL,
- .head = 0,
- .tail = 0,
- .space = 0,
- .user_irq_refcount = 0,
- .irq_gem_seqno = 0,
- .waiting_gem_seqno = 0,
- .setup_status_page = bsd_setup_status_page,
.init = init_bsd_ring,
- .get_head = bsd_ring_get_head,
- .get_tail = bsd_ring_get_tail,
- .get_active_head = bsd_ring_get_active_head,
- .advance_ring = bsd_ring_advance_ring,
+ .write_tail = ring_write_tail,
.flush = bsd_ring_flush,
- .add_request = bsd_ring_add_request,
- .get_gem_seqno = bsd_ring_get_gem_seqno,
+ .add_request = ring_add_request,
+ .get_seqno = ring_status_page_get_seqno,
.user_irq_get = bsd_ring_get_user_irq,
.user_irq_put = bsd_ring_put_user_irq,
- .dispatch_gem_execbuffer = bsd_ring_dispatch_gem_execbuffer,
- .status_page = {NULL, 0, NULL},
- .map = {0,}
+ .dispatch_gem_execbuffer = ring_dispatch_gem_execbuffer,
+};
+
+
+static void gen6_bsd_ring_write_tail(struct drm_device *dev,
+ struct intel_ring_buffer *ring,
+ u32 value)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+
+ /* Every tail move must follow the sequence below */
+ I915_WRITE(GEN6_BSD_SLEEP_PSMI_CONTROL,
+ GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_MODIFY_MASK |
+ GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_DISABLE);
+ I915_WRITE(GEN6_BSD_RNCID, 0x0);
+
+ if (wait_for((I915_READ(GEN6_BSD_SLEEP_PSMI_CONTROL) &
+ GEN6_BSD_SLEEP_PSMI_CONTROL_IDLE_INDICATOR) == 0,
+ 50))
+ DRM_ERROR("timed out waiting for IDLE Indicator\n");
+
+ I915_WRITE_TAIL(ring, value);
+ I915_WRITE(GEN6_BSD_SLEEP_PSMI_CONTROL,
+ GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_MODIFY_MASK |
+ GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_ENABLE);
+}
+
+static void gen6_ring_flush(struct drm_device *dev,
+ struct intel_ring_buffer *ring,
+ u32 invalidate_domains,
+ u32 flush_domains)
+{
+ intel_ring_begin(dev, ring, 4);
+ intel_ring_emit(dev, ring, MI_FLUSH_DW);
+ intel_ring_emit(dev, ring, 0);
+ intel_ring_emit(dev, ring, 0);
+ intel_ring_emit(dev, ring, 0);
+ intel_ring_advance(dev, ring);
+}
+
+static int
+gen6_ring_dispatch_gem_execbuffer(struct drm_device *dev,
+ struct intel_ring_buffer *ring,
+ struct drm_i915_gem_execbuffer2 *exec,
+ struct drm_clip_rect *cliprects,
+ uint64_t exec_offset)
+{
+ uint32_t exec_start;
+
+ exec_start = (uint32_t) exec_offset + exec->batch_start_offset;
+
+ intel_ring_begin(dev, ring, 2);
+ intel_ring_emit(dev, ring,
+ MI_BATCH_BUFFER_START | MI_BATCH_NON_SECURE_I965);
+ /* bit0-7 is the length on GEN6+ */
+ intel_ring_emit(dev, ring, exec_start);
+ intel_ring_advance(dev, ring);
+
+ return 0;
+}
+
+/* ring buffer for Video Codec for Gen6+ */
+static const struct intel_ring_buffer gen6_bsd_ring = {
+ .name = "gen6 bsd ring",
+ .id = RING_BSD,
+ .mmio_base = GEN6_BSD_RING_BASE,
+ .size = 32 * PAGE_SIZE,
+ .init = init_bsd_ring,
+ .write_tail = gen6_bsd_ring_write_tail,
+ .flush = gen6_ring_flush,
+ .add_request = ring_add_request,
+ .get_seqno = ring_status_page_get_seqno,
+ .user_irq_get = bsd_ring_get_user_irq,
+ .user_irq_put = bsd_ring_put_user_irq,
+ .dispatch_gem_execbuffer = gen6_ring_dispatch_gem_execbuffer,
};
+
+/* Blitter support (SandyBridge+) */
+
+static void
+blt_ring_get_user_irq(struct drm_device *dev,
+ struct intel_ring_buffer *ring)
+{
+ /* do nothing */
+}
+static void
+blt_ring_put_user_irq(struct drm_device *dev,
+ struct intel_ring_buffer *ring)
+{
+ /* do nothing */
+}
+
+
+/* Workaround for some stepping of SNB,
+ * each time when BLT engine ring tail moved,
+ * the first command in the ring to be parsed
+ * should be MI_BATCH_BUFFER_START
+ */
+#define NEED_BLT_WORKAROUND(dev) \
+ (IS_GEN6(dev) && (dev->pdev->revision < 8))
+
+static inline struct drm_i915_gem_object *
+to_blt_workaround(struct intel_ring_buffer *ring)
+{
+ return ring->private;
+}
+
+static int blt_ring_init(struct drm_device *dev,
+ struct intel_ring_buffer *ring)
+{
+ if (NEED_BLT_WORKAROUND(dev)) {
+ struct drm_i915_gem_object *obj;
+ u32 __iomem *ptr;
+ int ret;
+
+ obj = to_intel_bo(i915_gem_alloc_object(dev, 4096));
+ if (obj == NULL)
+ return -ENOMEM;
+
+ ret = i915_gem_object_pin(&obj->base, 4096);
+ if (ret) {
+ drm_gem_object_unreference(&obj->base);
+ return ret;
+ }
+
+ ptr = kmap(obj->pages[0]);
+ iowrite32(MI_BATCH_BUFFER_END, ptr);
+ iowrite32(MI_NOOP, ptr+1);
+ kunmap(obj->pages[0]);
+
+ ret = i915_gem_object_set_to_gtt_domain(&obj->base, false);
+ if (ret) {
+ i915_gem_object_unpin(&obj->base);
+ drm_gem_object_unreference(&obj->base);
+ return ret;
+ }
+
+ ring->private = obj;
+ }
+
+ return init_ring_common(dev, ring);
+}
+
+static void blt_ring_begin(struct drm_device *dev,
+ struct intel_ring_buffer *ring,
+ int num_dwords)
+{
+ if (ring->private) {
+ intel_ring_begin(dev, ring, num_dwords+2);
+ intel_ring_emit(dev, ring, MI_BATCH_BUFFER_START);
+ intel_ring_emit(dev, ring, to_blt_workaround(ring)->gtt_offset);
+ } else
+ intel_ring_begin(dev, ring, 4);
+}
+
+static void blt_ring_flush(struct drm_device *dev,
+ struct intel_ring_buffer *ring,
+ u32 invalidate_domains,
+ u32 flush_domains)
+{
+ blt_ring_begin(dev, ring, 4);
+ intel_ring_emit(dev, ring, MI_FLUSH_DW);
+ intel_ring_emit(dev, ring, 0);
+ intel_ring_emit(dev, ring, 0);
+ intel_ring_emit(dev, ring, 0);
+ intel_ring_advance(dev, ring);
+}
+
+static u32
+blt_ring_add_request(struct drm_device *dev,
+ struct intel_ring_buffer *ring,
+ u32 flush_domains)
+{
+ u32 seqno = i915_gem_get_seqno(dev);
+
+ blt_ring_begin(dev, ring, 4);
+ intel_ring_emit(dev, ring, MI_STORE_DWORD_INDEX);
+ intel_ring_emit(dev, ring,
+ I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
+ intel_ring_emit(dev, ring, seqno);
+ intel_ring_emit(dev, ring, MI_USER_INTERRUPT);
+ intel_ring_advance(dev, ring);
+
+ DRM_DEBUG_DRIVER("%s %d\n", ring->name, seqno);
+ return seqno;
+}
+
+static void blt_ring_cleanup(struct intel_ring_buffer *ring)
+{
+ if (!ring->private)
+ return;
+
+ i915_gem_object_unpin(ring->private);
+ drm_gem_object_unreference(ring->private);
+ ring->private = NULL;
+}
+
+static const struct intel_ring_buffer gen6_blt_ring = {
+ .name = "blt ring",
+ .id = RING_BLT,
+ .mmio_base = BLT_RING_BASE,
+ .size = 32 * PAGE_SIZE,
+ .init = blt_ring_init,
+ .write_tail = ring_write_tail,
+ .flush = blt_ring_flush,
+ .add_request = blt_ring_add_request,
+ .get_seqno = ring_status_page_get_seqno,
+ .user_irq_get = blt_ring_get_user_irq,
+ .user_irq_put = blt_ring_put_user_irq,
+ .dispatch_gem_execbuffer = gen6_ring_dispatch_gem_execbuffer,
+ .cleanup = blt_ring_cleanup,
+};
+
+int intel_init_render_ring_buffer(struct drm_device *dev)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+
+ dev_priv->render_ring = render_ring;
+
+ if (!I915_NEED_GFX_HWS(dev)) {
+ dev_priv->render_ring.status_page.page_addr
+ = dev_priv->status_page_dmah->vaddr;
+ memset(dev_priv->render_ring.status_page.page_addr,
+ 0, PAGE_SIZE);
+ }
+
+ return intel_init_ring_buffer(dev, &dev_priv->render_ring);
+}
+
+int intel_init_bsd_ring_buffer(struct drm_device *dev)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+
+ if (IS_GEN6(dev))
+ dev_priv->bsd_ring = gen6_bsd_ring;
+ else
+ dev_priv->bsd_ring = bsd_ring;
+
+ return intel_init_ring_buffer(dev, &dev_priv->bsd_ring);
+}
+
+int intel_init_blt_ring_buffer(struct drm_device *dev)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+
+ dev_priv->blt_ring = gen6_blt_ring;
+
+ return intel_init_ring_buffer(dev, &dev_priv->blt_ring);
+}
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index 525e7d3edda8..3126c2681983 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -7,25 +7,32 @@ struct intel_hw_status_page {
struct drm_gem_object *obj;
};
+#define I915_READ_TAIL(ring) I915_READ(RING_TAIL(ring->mmio_base))
+#define I915_WRITE_TAIL(ring, val) I915_WRITE(RING_TAIL(ring->mmio_base), val)
+#define I915_READ_START(ring) I915_READ(RING_START(ring->mmio_base))
+#define I915_WRITE_START(ring, val) I915_WRITE(RING_START(ring->mmio_base), val)
+#define I915_READ_HEAD(ring) I915_READ(RING_HEAD(ring->mmio_base))
+#define I915_WRITE_HEAD(ring, val) I915_WRITE(RING_HEAD(ring->mmio_base), val)
+#define I915_READ_CTL(ring) I915_READ(RING_CTL(ring->mmio_base))
+#define I915_WRITE_CTL(ring, val) I915_WRITE(RING_CTL(ring->mmio_base), val)
+
struct drm_i915_gem_execbuffer2;
struct intel_ring_buffer {
const char *name;
- struct ring_regs {
- u32 ctl;
- u32 head;
- u32 tail;
- u32 start;
- } regs;
- unsigned int ring_flag;
+ enum intel_ring_id {
+ RING_RENDER = 0x1,
+ RING_BSD = 0x2,
+ RING_BLT = 0x4,
+ } id;
+ u32 mmio_base;
unsigned long size;
- unsigned int alignment;
void *virtual_start;
struct drm_device *dev;
struct drm_gem_object *gem_object;
unsigned int head;
unsigned int tail;
- unsigned int space;
+ int space;
struct intel_hw_status_page status_page;
u32 irq_gem_seqno; /* last seq seem at irq time */
@@ -35,35 +42,28 @@ struct intel_ring_buffer {
struct intel_ring_buffer *ring);
void (*user_irq_put)(struct drm_device *dev,
struct intel_ring_buffer *ring);
- void (*setup_status_page)(struct drm_device *dev,
- struct intel_ring_buffer *ring);
int (*init)(struct drm_device *dev,
struct intel_ring_buffer *ring);
- unsigned int (*get_head)(struct drm_device *dev,
- struct intel_ring_buffer *ring);
- unsigned int (*get_tail)(struct drm_device *dev,
- struct intel_ring_buffer *ring);
- unsigned int (*get_active_head)(struct drm_device *dev,
- struct intel_ring_buffer *ring);
- void (*advance_ring)(struct drm_device *dev,
- struct intel_ring_buffer *ring);
+ void (*write_tail)(struct drm_device *dev,
+ struct intel_ring_buffer *ring,
+ u32 value);
void (*flush)(struct drm_device *dev,
struct intel_ring_buffer *ring,
u32 invalidate_domains,
u32 flush_domains);
u32 (*add_request)(struct drm_device *dev,
struct intel_ring_buffer *ring,
- struct drm_file *file_priv,
u32 flush_domains);
- u32 (*get_gem_seqno)(struct drm_device *dev,
- struct intel_ring_buffer *ring);
+ u32 (*get_seqno)(struct drm_device *dev,
+ struct intel_ring_buffer *ring);
int (*dispatch_gem_execbuffer)(struct drm_device *dev,
struct intel_ring_buffer *ring,
struct drm_i915_gem_execbuffer2 *exec,
struct drm_clip_rect *cliprects,
uint64_t exec_offset);
+ void (*cleanup)(struct intel_ring_buffer *ring);
/**
* List of objects currently involved in rendering from the
@@ -83,8 +83,24 @@ struct intel_ring_buffer {
*/
struct list_head request_list;
+ /**
+ * List of objects currently pending a GPU write flush.
+ *
+ * All elements on this list will belong to either the
+ * active_list or flushing_list, last_rendering_seqno can
+ * be used to differentiate between the two elements.
+ */
+ struct list_head gpu_write_list;
+
+ /**
+ * Do we have some not yet emitted requests outstanding?
+ */
+ bool outstanding_lazy_request;
+
wait_queue_head_t irq_queue;
drm_local_map_t map;
+
+ void *private;
};
static inline u32
@@ -96,15 +112,13 @@ intel_read_status_page(struct intel_ring_buffer *ring,
}
int intel_init_ring_buffer(struct drm_device *dev,
- struct intel_ring_buffer *ring);
+ struct intel_ring_buffer *ring);
void intel_cleanup_ring_buffer(struct drm_device *dev,
- struct intel_ring_buffer *ring);
+ struct intel_ring_buffer *ring);
int intel_wait_ring_buffer(struct drm_device *dev,
- struct intel_ring_buffer *ring, int n);
-int intel_wrap_ring_buffer(struct drm_device *dev,
- struct intel_ring_buffer *ring);
+ struct intel_ring_buffer *ring, int n);
void intel_ring_begin(struct drm_device *dev,
- struct intel_ring_buffer *ring, int n);
+ struct intel_ring_buffer *ring, int n);
static inline void intel_ring_emit(struct drm_device *dev,
struct intel_ring_buffer *ring,
@@ -115,17 +129,19 @@ static inline void intel_ring_emit(struct drm_device *dev,
ring->tail += 4;
}
-void intel_fill_struct(struct drm_device *dev,
- struct intel_ring_buffer *ring,
- void *data,
- unsigned int len);
void intel_ring_advance(struct drm_device *dev,
struct intel_ring_buffer *ring);
u32 intel_ring_get_seqno(struct drm_device *dev,
struct intel_ring_buffer *ring);
-extern struct intel_ring_buffer render_ring;
-extern struct intel_ring_buffer bsd_ring;
+int intel_init_render_ring_buffer(struct drm_device *dev);
+int intel_init_bsd_ring_buffer(struct drm_device *dev);
+int intel_init_blt_ring_buffer(struct drm_device *dev);
+
+u32 intel_ring_get_active_head(struct drm_device *dev,
+ struct intel_ring_buffer *ring);
+void intel_ring_setup_status_page(struct drm_device *dev,
+ struct intel_ring_buffer *ring);
#endif /* _INTEL_RINGBUFFER_H_ */
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c
index ee73e428a84a..d97e6cb52d34 100644
--- a/drivers/gpu/drm/i915/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/intel_sdvo.c
@@ -65,8 +65,11 @@ static const char *tv_format_names[] = {
struct intel_sdvo {
struct intel_encoder base;
+ struct i2c_adapter *i2c;
u8 slave_addr;
+ struct i2c_adapter ddc;
+
/* Register for the SDVO device: SDVOB or SDVOC */
int sdvo_reg;
@@ -104,34 +107,25 @@ struct intel_sdvo {
* This is set if we treat the device as HDMI, instead of DVI.
*/
bool is_hdmi;
+ bool has_hdmi_monitor;
+ bool has_hdmi_audio;
/**
- * This is set if we detect output of sdvo device as LVDS.
+ * This is set if we detect output of sdvo device as LVDS and
+ * have a valid fixed mode to use with the panel.
*/
bool is_lvds;
/**
- * This is sdvo flags for input timing.
- */
- uint8_t sdvo_flags;
-
- /**
* This is sdvo fixed pannel mode pointer
*/
struct drm_display_mode *sdvo_lvds_fixed_mode;
- /*
- * supported encoding mode, used to determine whether HDMI is
- * supported
- */
- struct intel_sdvo_encode encode;
-
/* DDC bus used by this SDVO encoder */
uint8_t ddc_bus;
- /* Mac mini hack -- use the same DDC as the analog connector */
- struct i2c_adapter *analog_ddc_bus;
-
+ /* Input timings for adjusted_mode */
+ struct intel_sdvo_dtd input_dtd;
};
struct intel_sdvo_connector {
@@ -140,11 +134,15 @@ struct intel_sdvo_connector {
/* Mark the type of connector */
uint16_t output_flag;
+ int force_audio;
+
/* This contains all current supported TV format */
u8 tv_format_supported[TV_FORMAT_NUM];
int format_supported_num;
struct drm_property *tv_format;
+ struct drm_property *force_audio_property;
+
/* add the property for the SDVO-TV */
struct drm_property *left;
struct drm_property *right;
@@ -186,9 +184,15 @@ struct intel_sdvo_connector {
u32 cur_dot_crawl, max_dot_crawl;
};
-static struct intel_sdvo *enc_to_intel_sdvo(struct drm_encoder *encoder)
+static struct intel_sdvo *to_intel_sdvo(struct drm_encoder *encoder)
+{
+ return container_of(encoder, struct intel_sdvo, base.base);
+}
+
+static struct intel_sdvo *intel_attached_sdvo(struct drm_connector *connector)
{
- return container_of(enc_to_intel_encoder(encoder), struct intel_sdvo, base);
+ return container_of(intel_attached_encoder(connector),
+ struct intel_sdvo, base);
}
static struct intel_sdvo_connector *to_intel_sdvo_connector(struct drm_connector *connector)
@@ -213,7 +217,7 @@ intel_sdvo_create_enhance_property(struct intel_sdvo *intel_sdvo,
*/
static void intel_sdvo_write_sdvox(struct intel_sdvo *intel_sdvo, u32 val)
{
- struct drm_device *dev = intel_sdvo->base.enc.dev;
+ struct drm_device *dev = intel_sdvo->base.base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
u32 bval = val, cval = val;
int i;
@@ -245,49 +249,29 @@ static void intel_sdvo_write_sdvox(struct intel_sdvo *intel_sdvo, u32 val)
static bool intel_sdvo_read_byte(struct intel_sdvo *intel_sdvo, u8 addr, u8 *ch)
{
- u8 out_buf[2] = { addr, 0 };
- u8 buf[2];
struct i2c_msg msgs[] = {
{
- .addr = intel_sdvo->slave_addr >> 1,
+ .addr = intel_sdvo->slave_addr,
.flags = 0,
.len = 1,
- .buf = out_buf,
+ .buf = &addr,
},
{
- .addr = intel_sdvo->slave_addr >> 1,
+ .addr = intel_sdvo->slave_addr,
.flags = I2C_M_RD,
.len = 1,
- .buf = buf,
+ .buf = ch,
}
};
int ret;
- if ((ret = i2c_transfer(intel_sdvo->base.i2c_bus, msgs, 2)) == 2)
- {
- *ch = buf[0];
+ if ((ret = i2c_transfer(intel_sdvo->i2c, msgs, 2)) == 2)
return true;
- }
DRM_DEBUG_KMS("i2c transfer returned %d\n", ret);
return false;
}
-static bool intel_sdvo_write_byte(struct intel_sdvo *intel_sdvo, int addr, u8 ch)
-{
- u8 out_buf[2] = { addr, ch };
- struct i2c_msg msgs[] = {
- {
- .addr = intel_sdvo->slave_addr >> 1,
- .flags = 0,
- .len = 2,
- .buf = out_buf,
- }
- };
-
- return i2c_transfer(intel_sdvo->base.i2c_bus, msgs, 1) == 1;
-}
-
#define SDVO_CMD_NAME_ENTRY(cmd) {cmd, #cmd}
/** Mapping of command numbers to names, for debug output */
static const struct _sdvo_cmd_name {
@@ -432,22 +416,6 @@ static void intel_sdvo_debug_write(struct intel_sdvo *intel_sdvo, u8 cmd,
DRM_LOG_KMS("\n");
}
-static bool intel_sdvo_write_cmd(struct intel_sdvo *intel_sdvo, u8 cmd,
- const void *args, int args_len)
-{
- int i;
-
- intel_sdvo_debug_write(intel_sdvo, cmd, args, args_len);
-
- for (i = 0; i < args_len; i++) {
- if (!intel_sdvo_write_byte(intel_sdvo, SDVO_I2C_ARG_0 - i,
- ((u8*)args)[i]))
- return false;
- }
-
- return intel_sdvo_write_byte(intel_sdvo, SDVO_I2C_OPCODE, cmd);
-}
-
static const char *cmd_status_names[] = {
"Power on",
"Success",
@@ -458,54 +426,115 @@ static const char *cmd_status_names[] = {
"Scaling not supported"
};
-static void intel_sdvo_debug_response(struct intel_sdvo *intel_sdvo,
- void *response, int response_len,
- u8 status)
+static bool intel_sdvo_write_cmd(struct intel_sdvo *intel_sdvo, u8 cmd,
+ const void *args, int args_len)
{
- int i;
+ u8 buf[args_len*2 + 2], status;
+ struct i2c_msg msgs[args_len + 3];
+ int i, ret;
- DRM_DEBUG_KMS("%s: R: ", SDVO_NAME(intel_sdvo));
- for (i = 0; i < response_len; i++)
- DRM_LOG_KMS("%02X ", ((u8 *)response)[i]);
- for (; i < 8; i++)
- DRM_LOG_KMS(" ");
- if (status <= SDVO_CMD_STATUS_SCALING_NOT_SUPP)
- DRM_LOG_KMS("(%s)", cmd_status_names[status]);
- else
- DRM_LOG_KMS("(??? %d)", status);
- DRM_LOG_KMS("\n");
+ intel_sdvo_debug_write(intel_sdvo, cmd, args, args_len);
+
+ for (i = 0; i < args_len; i++) {
+ msgs[i].addr = intel_sdvo->slave_addr;
+ msgs[i].flags = 0;
+ msgs[i].len = 2;
+ msgs[i].buf = buf + 2 *i;
+ buf[2*i + 0] = SDVO_I2C_ARG_0 - i;
+ buf[2*i + 1] = ((u8*)args)[i];
+ }
+ msgs[i].addr = intel_sdvo->slave_addr;
+ msgs[i].flags = 0;
+ msgs[i].len = 2;
+ msgs[i].buf = buf + 2*i;
+ buf[2*i + 0] = SDVO_I2C_OPCODE;
+ buf[2*i + 1] = cmd;
+
+ /* the following two are to read the response */
+ status = SDVO_I2C_CMD_STATUS;
+ msgs[i+1].addr = intel_sdvo->slave_addr;
+ msgs[i+1].flags = 0;
+ msgs[i+1].len = 1;
+ msgs[i+1].buf = &status;
+
+ msgs[i+2].addr = intel_sdvo->slave_addr;
+ msgs[i+2].flags = I2C_M_RD;
+ msgs[i+2].len = 1;
+ msgs[i+2].buf = &status;
+
+ ret = i2c_transfer(intel_sdvo->i2c, msgs, i+3);
+ if (ret < 0) {
+ DRM_DEBUG_KMS("I2c transfer returned %d\n", ret);
+ return false;
+ }
+ if (ret != i+3) {
+ /* failure in I2C transfer */
+ DRM_DEBUG_KMS("I2c transfer returned %d/%d\n", ret, i+3);
+ return false;
+ }
+
+ i = 3;
+ while (status == SDVO_CMD_STATUS_PENDING && i--) {
+ if (!intel_sdvo_read_byte(intel_sdvo,
+ SDVO_I2C_CMD_STATUS,
+ &status))
+ return false;
+ }
+ if (status != SDVO_CMD_STATUS_SUCCESS) {
+ DRM_DEBUG_KMS("command returns response %s [%d]\n",
+ status <= SDVO_CMD_STATUS_SCALING_NOT_SUPP ? cmd_status_names[status] : "???",
+ status);
+ return false;
+ }
+
+ return true;
}
static bool intel_sdvo_read_response(struct intel_sdvo *intel_sdvo,
void *response, int response_len)
{
- int i;
+ u8 retry = 5;
u8 status;
- u8 retry = 50;
-
- while (retry--) {
- /* Read the command response */
- for (i = 0; i < response_len; i++) {
- if (!intel_sdvo_read_byte(intel_sdvo,
- SDVO_I2C_RETURN_0 + i,
- &((u8 *)response)[i]))
- return false;
- }
+ int i;
- /* read the return status */
- if (!intel_sdvo_read_byte(intel_sdvo, SDVO_I2C_CMD_STATUS,
+ /*
+ * The documentation states that all commands will be
+ * processed within 15µs, and that we need only poll
+ * the status byte a maximum of 3 times in order for the
+ * command to be complete.
+ *
+ * Check 5 times in case the hardware failed to read the docs.
+ */
+ do {
+ if (!intel_sdvo_read_byte(intel_sdvo,
+ SDVO_I2C_CMD_STATUS,
&status))
return false;
+ } while (status == SDVO_CMD_STATUS_PENDING && --retry);
- intel_sdvo_debug_response(intel_sdvo, response, response_len,
- status);
- if (status != SDVO_CMD_STATUS_PENDING)
- break;
+ DRM_DEBUG_KMS("%s: R: ", SDVO_NAME(intel_sdvo));
+ if (status <= SDVO_CMD_STATUS_SCALING_NOT_SUPP)
+ DRM_LOG_KMS("(%s)", cmd_status_names[status]);
+ else
+ DRM_LOG_KMS("(??? %d)", status);
- mdelay(50);
+ if (status != SDVO_CMD_STATUS_SUCCESS)
+ goto log_fail;
+
+ /* Read the command response */
+ for (i = 0; i < response_len; i++) {
+ if (!intel_sdvo_read_byte(intel_sdvo,
+ SDVO_I2C_RETURN_0 + i,
+ &((u8 *)response)[i]))
+ goto log_fail;
+ DRM_LOG_KMS(" %02X", ((u8 *)response)[i]);
}
+ DRM_LOG_KMS("\n");
+ return true;
- return status == SDVO_CMD_STATUS_SUCCESS;
+log_fail:
+ DRM_LOG_KMS("\n");
+ return false;
}
static int intel_sdvo_get_pixel_multiplier(struct drm_display_mode *mode)
@@ -518,71 +547,17 @@ static int intel_sdvo_get_pixel_multiplier(struct drm_display_mode *mode)
return 4;
}
-/**
- * Try to read the response after issuie the DDC switch command. But it
- * is noted that we must do the action of reading response and issuing DDC
- * switch command in one I2C transaction. Otherwise when we try to start
- * another I2C transaction after issuing the DDC bus switch, it will be
- * switched to the internal SDVO register.
- */
-static void intel_sdvo_set_control_bus_switch(struct intel_sdvo *intel_sdvo,
- u8 target)
+static bool intel_sdvo_set_control_bus_switch(struct intel_sdvo *intel_sdvo,
+ u8 ddc_bus)
{
- u8 out_buf[2], cmd_buf[2], ret_value[2], ret;
- struct i2c_msg msgs[] = {
- {
- .addr = intel_sdvo->slave_addr >> 1,
- .flags = 0,
- .len = 2,
- .buf = out_buf,
- },
- /* the following two are to read the response */
- {
- .addr = intel_sdvo->slave_addr >> 1,
- .flags = 0,
- .len = 1,
- .buf = cmd_buf,
- },
- {
- .addr = intel_sdvo->slave_addr >> 1,
- .flags = I2C_M_RD,
- .len = 1,
- .buf = ret_value,
- },
- };
-
- intel_sdvo_debug_write(intel_sdvo, SDVO_CMD_SET_CONTROL_BUS_SWITCH,
- &target, 1);
- /* write the DDC switch command argument */
- intel_sdvo_write_byte(intel_sdvo, SDVO_I2C_ARG_0, target);
-
- out_buf[0] = SDVO_I2C_OPCODE;
- out_buf[1] = SDVO_CMD_SET_CONTROL_BUS_SWITCH;
- cmd_buf[0] = SDVO_I2C_CMD_STATUS;
- cmd_buf[1] = 0;
- ret_value[0] = 0;
- ret_value[1] = 0;
-
- ret = i2c_transfer(intel_sdvo->base.i2c_bus, msgs, 3);
- if (ret != 3) {
- /* failure in I2C transfer */
- DRM_DEBUG_KMS("I2c transfer returned %d\n", ret);
- return;
- }
- if (ret_value[0] != SDVO_CMD_STATUS_SUCCESS) {
- DRM_DEBUG_KMS("DDC switch command returns response %d\n",
- ret_value[0]);
- return;
- }
- return;
+ return intel_sdvo_write_cmd(intel_sdvo,
+ SDVO_CMD_SET_CONTROL_BUS_SWITCH,
+ &ddc_bus, 1);
}
static bool intel_sdvo_set_value(struct intel_sdvo *intel_sdvo, u8 cmd, const void *data, int len)
{
- if (!intel_sdvo_write_cmd(intel_sdvo, cmd, data, len))
- return false;
-
- return intel_sdvo_read_response(intel_sdvo, NULL, 0);
+ return intel_sdvo_write_cmd(intel_sdvo, cmd, data, len);
}
static bool
@@ -819,17 +794,13 @@ static void intel_sdvo_get_mode_from_dtd(struct drm_display_mode * mode,
mode->flags |= DRM_MODE_FLAG_PVSYNC;
}
-static bool intel_sdvo_get_supp_encode(struct intel_sdvo *intel_sdvo,
- struct intel_sdvo_encode *encode)
+static bool intel_sdvo_check_supp_encode(struct intel_sdvo *intel_sdvo)
{
- if (intel_sdvo_get_value(intel_sdvo,
- SDVO_CMD_GET_SUPP_ENCODE,
- encode, sizeof(*encode)))
- return true;
+ struct intel_sdvo_encode encode;
- /* non-support means DVI */
- memset(encode, 0, sizeof(*encode));
- return false;
+ return intel_sdvo_get_value(intel_sdvo,
+ SDVO_CMD_GET_SUPP_ENCODE,
+ &encode, sizeof(encode));
}
static bool intel_sdvo_set_encode(struct intel_sdvo *intel_sdvo,
@@ -874,115 +845,33 @@ static void intel_sdvo_dump_hdmi_buf(struct intel_sdvo *intel_sdvo)
}
#endif
-static bool intel_sdvo_set_hdmi_buf(struct intel_sdvo *intel_sdvo,
- int index,
- uint8_t *data, int8_t size, uint8_t tx_rate)
-{
- uint8_t set_buf_index[2];
-
- set_buf_index[0] = index;
- set_buf_index[1] = 0;
-
- if (!intel_sdvo_write_cmd(intel_sdvo, SDVO_CMD_SET_HBUF_INDEX,
- set_buf_index, 2))
- return false;
-
- for (; size > 0; size -= 8) {
- if (!intel_sdvo_write_cmd(intel_sdvo, SDVO_CMD_SET_HBUF_DATA, data, 8))
- return false;
-
- data += 8;
- }
-
- return intel_sdvo_write_cmd(intel_sdvo, SDVO_CMD_SET_HBUF_TXRATE, &tx_rate, 1);
-}
-
-static uint8_t intel_sdvo_calc_hbuf_csum(uint8_t *data, uint8_t size)
-{
- uint8_t csum = 0;
- int i;
-
- for (i = 0; i < size; i++)
- csum += data[i];
-
- return 0x100 - csum;
-}
-
-#define DIP_TYPE_AVI 0x82
-#define DIP_VERSION_AVI 0x2
-#define DIP_LEN_AVI 13
-
-struct dip_infoframe {
- uint8_t type;
- uint8_t version;
- uint8_t len;
- uint8_t checksum;
- union {
- struct {
- /* Packet Byte #1 */
- uint8_t S:2;
- uint8_t B:2;
- uint8_t A:1;
- uint8_t Y:2;
- uint8_t rsvd1:1;
- /* Packet Byte #2 */
- uint8_t R:4;
- uint8_t M:2;
- uint8_t C:2;
- /* Packet Byte #3 */
- uint8_t SC:2;
- uint8_t Q:2;
- uint8_t EC:3;
- uint8_t ITC:1;
- /* Packet Byte #4 */
- uint8_t VIC:7;
- uint8_t rsvd2:1;
- /* Packet Byte #5 */
- uint8_t PR:4;
- uint8_t rsvd3:4;
- /* Packet Byte #6~13 */
- uint16_t top_bar_end;
- uint16_t bottom_bar_start;
- uint16_t left_bar_end;
- uint16_t right_bar_start;
- } avi;
- struct {
- /* Packet Byte #1 */
- uint8_t channel_count:3;
- uint8_t rsvd1:1;
- uint8_t coding_type:4;
- /* Packet Byte #2 */
- uint8_t sample_size:2; /* SS0, SS1 */
- uint8_t sample_frequency:3;
- uint8_t rsvd2:3;
- /* Packet Byte #3 */
- uint8_t coding_type_private:5;
- uint8_t rsvd3:3;
- /* Packet Byte #4 */
- uint8_t channel_allocation;
- /* Packet Byte #5 */
- uint8_t rsvd4:3;
- uint8_t level_shift:4;
- uint8_t downmix_inhibit:1;
- } audio;
- uint8_t payload[28];
- } __attribute__ ((packed)) u;
-} __attribute__((packed));
-
-static bool intel_sdvo_set_avi_infoframe(struct intel_sdvo *intel_sdvo,
- struct drm_display_mode * mode)
+static bool intel_sdvo_set_avi_infoframe(struct intel_sdvo *intel_sdvo)
{
struct dip_infoframe avi_if = {
.type = DIP_TYPE_AVI,
- .version = DIP_VERSION_AVI,
+ .ver = DIP_VERSION_AVI,
.len = DIP_LEN_AVI,
};
+ uint8_t tx_rate = SDVO_HBUF_TX_VSYNC;
+ uint8_t set_buf_index[2] = { 1, 0 };
+ uint64_t *data = (uint64_t *)&avi_if;
+ unsigned i;
+
+ intel_dip_infoframe_csum(&avi_if);
- avi_if.checksum = intel_sdvo_calc_hbuf_csum((uint8_t *)&avi_if,
- 4 + avi_if.len);
- return intel_sdvo_set_hdmi_buf(intel_sdvo, 1, (uint8_t *)&avi_if,
- 4 + avi_if.len,
- SDVO_HBUF_TX_VSYNC);
+ if (!intel_sdvo_write_cmd(intel_sdvo, SDVO_CMD_SET_HBUF_INDEX,
+ set_buf_index, 2))
+ return false;
+
+ for (i = 0; i < sizeof(avi_if); i += 8) {
+ if (!intel_sdvo_write_cmd(intel_sdvo, SDVO_CMD_SET_HBUF_DATA,
+ data, 8))
+ return false;
+ data++;
+ }
+
+ return intel_sdvo_write_cmd(intel_sdvo, SDVO_CMD_SET_HBUF_TXRATE,
+ &tx_rate, 1);
}
static bool intel_sdvo_set_tv_format(struct intel_sdvo *intel_sdvo)
@@ -1022,8 +911,6 @@ intel_sdvo_set_input_timings_for_mode(struct intel_sdvo *intel_sdvo,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
- struct intel_sdvo_dtd input_dtd;
-
/* Reset the input timing to the screen. Assume always input 0. */
if (!intel_sdvo_set_target_input(intel_sdvo))
return false;
@@ -1035,14 +922,12 @@ intel_sdvo_set_input_timings_for_mode(struct intel_sdvo *intel_sdvo,
return false;
if (!intel_sdvo_get_preferred_input_timing(intel_sdvo,
- &input_dtd))
+ &intel_sdvo->input_dtd))
return false;
- intel_sdvo_get_mode_from_dtd(adjusted_mode, &input_dtd);
- intel_sdvo->sdvo_flags = input_dtd.part2.sdvo_flags;
+ intel_sdvo_get_mode_from_dtd(adjusted_mode, &intel_sdvo->input_dtd);
drm_mode_set_crtcinfo(adjusted_mode, 0);
- mode->clock = adjusted_mode->clock;
return true;
}
@@ -1050,7 +935,8 @@ static bool intel_sdvo_mode_fixup(struct drm_encoder *encoder,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
- struct intel_sdvo *intel_sdvo = enc_to_intel_sdvo(encoder);
+ struct intel_sdvo *intel_sdvo = to_intel_sdvo(encoder);
+ int multiplier;
/* We need to construct preferred input timings based on our
* output timings. To do that, we have to set the output
@@ -1065,10 +951,8 @@ static bool intel_sdvo_mode_fixup(struct drm_encoder *encoder,
mode,
adjusted_mode);
} else if (intel_sdvo->is_lvds) {
- drm_mode_set_crtcinfo(intel_sdvo->sdvo_lvds_fixed_mode, 0);
-
if (!intel_sdvo_set_output_timings_from_mode(intel_sdvo,
- intel_sdvo->sdvo_lvds_fixed_mode))
+ intel_sdvo->sdvo_lvds_fixed_mode))
return false;
(void) intel_sdvo_set_input_timings_for_mode(intel_sdvo,
@@ -1077,9 +961,10 @@ static bool intel_sdvo_mode_fixup(struct drm_encoder *encoder,
}
/* Make the CRTC code factor in the SDVO pixel multiplier. The
- * SDVO device will be told of the multiplier during mode_set.
+ * SDVO device will factor out the multiplier during mode_set.
*/
- adjusted_mode->clock *= intel_sdvo_get_pixel_multiplier(mode);
+ multiplier = intel_sdvo_get_pixel_multiplier(adjusted_mode);
+ intel_mode_set_pixel_multiplier(adjusted_mode, multiplier);
return true;
}
@@ -1092,11 +977,12 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder,
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_crtc *crtc = encoder->crtc;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- struct intel_sdvo *intel_sdvo = enc_to_intel_sdvo(encoder);
- u32 sdvox = 0;
- int sdvo_pixel_multiply, rate;
+ struct intel_sdvo *intel_sdvo = to_intel_sdvo(encoder);
+ u32 sdvox;
struct intel_sdvo_in_out_map in_out;
struct intel_sdvo_dtd input_dtd;
+ int pixel_multiplier = intel_mode_get_pixel_multiplier(adjusted_mode);
+ int rate;
if (!mode)
return;
@@ -1114,28 +1000,23 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder,
SDVO_CMD_SET_IN_OUT_MAP,
&in_out, sizeof(in_out));
- if (intel_sdvo->is_hdmi) {
- if (!intel_sdvo_set_avi_infoframe(intel_sdvo, mode))
- return;
-
- sdvox |= SDVO_AUDIO_ENABLE;
- }
+ /* Set the output timings to the screen */
+ if (!intel_sdvo_set_target_output(intel_sdvo,
+ intel_sdvo->attached_output))
+ return;
/* We have tried to get input timing in mode_fixup, and filled into
- adjusted_mode */
- intel_sdvo_get_dtd_from_mode(&input_dtd, adjusted_mode);
- if (intel_sdvo->is_tv || intel_sdvo->is_lvds)
- input_dtd.part2.sdvo_flags = intel_sdvo->sdvo_flags;
-
- /* If it's a TV, we already set the output timing in mode_fixup.
- * Otherwise, the output timing is equal to the input timing.
+ * adjusted_mode.
*/
- if (!intel_sdvo->is_tv && !intel_sdvo->is_lvds) {
+ if (intel_sdvo->is_tv || intel_sdvo->is_lvds) {
+ input_dtd = intel_sdvo->input_dtd;
+ } else {
/* Set the output timing to the screen */
if (!intel_sdvo_set_target_output(intel_sdvo,
intel_sdvo->attached_output))
return;
+ intel_sdvo_get_dtd_from_mode(&input_dtd, adjusted_mode);
(void) intel_sdvo_set_output_timing(intel_sdvo, &input_dtd);
}
@@ -1143,31 +1024,18 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder,
if (!intel_sdvo_set_target_input(intel_sdvo))
return;
- if (intel_sdvo->is_tv) {
- if (!intel_sdvo_set_tv_format(intel_sdvo))
- return;
- }
+ if (intel_sdvo->has_hdmi_monitor &&
+ !intel_sdvo_set_avi_infoframe(intel_sdvo))
+ return;
- /* We would like to use intel_sdvo_create_preferred_input_timing() to
- * provide the device with a timing it can support, if it supports that
- * feature. However, presumably we would need to adjust the CRTC to
- * output the preferred timing, and we don't support that currently.
- */
-#if 0
- success = intel_sdvo_create_preferred_input_timing(encoder, clock,
- width, height);
- if (success) {
- struct intel_sdvo_dtd *input_dtd;
+ if (intel_sdvo->is_tv &&
+ !intel_sdvo_set_tv_format(intel_sdvo))
+ return;
- intel_sdvo_get_preferred_input_timing(encoder, &input_dtd);
- intel_sdvo_set_input_timing(encoder, &input_dtd);
- }
-#else
(void) intel_sdvo_set_input_timing(intel_sdvo, &input_dtd);
-#endif
- sdvo_pixel_multiply = intel_sdvo_get_pixel_multiplier(mode);
- switch (sdvo_pixel_multiply) {
+ switch (pixel_multiplier) {
+ default:
case 1: rate = SDVO_CLOCK_RATE_MULT_1X; break;
case 2: rate = SDVO_CLOCK_RATE_MULT_2X; break;
case 4: rate = SDVO_CLOCK_RATE_MULT_4X; break;
@@ -1176,14 +1044,14 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder,
return;
/* Set the SDVO control regs. */
- if (IS_I965G(dev)) {
- sdvox |= SDVO_BORDER_ENABLE;
+ if (INTEL_INFO(dev)->gen >= 4) {
+ sdvox = SDVO_BORDER_ENABLE;
if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
sdvox |= SDVO_VSYNC_ACTIVE_HIGH;
if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
sdvox |= SDVO_HSYNC_ACTIVE_HIGH;
} else {
- sdvox |= I915_READ(intel_sdvo->sdvo_reg);
+ sdvox = I915_READ(intel_sdvo->sdvo_reg);
switch (intel_sdvo->sdvo_reg) {
case SDVOB:
sdvox &= SDVOB_PRESERVE_MASK;
@@ -1196,16 +1064,18 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder,
}
if (intel_crtc->pipe == 1)
sdvox |= SDVO_PIPE_B_SELECT;
+ if (intel_sdvo->has_hdmi_audio)
+ sdvox |= SDVO_AUDIO_ENABLE;
- if (IS_I965G(dev)) {
+ if (INTEL_INFO(dev)->gen >= 4) {
/* done in crtc_mode_set as the dpll_md reg must be written early */
} else if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) {
/* done in crtc_mode_set as it lives inside the dpll register */
} else {
- sdvox |= (sdvo_pixel_multiply - 1) << SDVO_PORT_MULTIPLY_SHIFT;
+ sdvox |= (pixel_multiplier - 1) << SDVO_PORT_MULTIPLY_SHIFT;
}
- if (intel_sdvo->sdvo_flags & SDVO_NEED_TO_STALL)
+ if (input_dtd.part2.sdvo_flags & SDVO_NEED_TO_STALL)
sdvox |= SDVO_STALL_SELECT;
intel_sdvo_write_sdvox(intel_sdvo, sdvox);
}
@@ -1214,7 +1084,7 @@ static void intel_sdvo_dpms(struct drm_encoder *encoder, int mode)
{
struct drm_device *dev = encoder->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_sdvo *intel_sdvo = enc_to_intel_sdvo(encoder);
+ struct intel_sdvo *intel_sdvo = to_intel_sdvo(encoder);
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
u32 temp;
@@ -1260,8 +1130,7 @@ static void intel_sdvo_dpms(struct drm_encoder *encoder, int mode)
static int intel_sdvo_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode)
{
- struct drm_encoder *encoder = intel_attached_encoder(connector);
- struct intel_sdvo *intel_sdvo = enc_to_intel_sdvo(encoder);
+ struct intel_sdvo *intel_sdvo = intel_attached_sdvo(connector);
if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
return MODE_NO_DBLESCAN;
@@ -1285,7 +1154,38 @@ static int intel_sdvo_mode_valid(struct drm_connector *connector,
static bool intel_sdvo_get_capabilities(struct intel_sdvo *intel_sdvo, struct intel_sdvo_caps *caps)
{
- return intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_DEVICE_CAPS, caps, sizeof(*caps));
+ if (!intel_sdvo_get_value(intel_sdvo,
+ SDVO_CMD_GET_DEVICE_CAPS,
+ caps, sizeof(*caps)))
+ return false;
+
+ DRM_DEBUG_KMS("SDVO capabilities:\n"
+ " vendor_id: %d\n"
+ " device_id: %d\n"
+ " device_rev_id: %d\n"
+ " sdvo_version_major: %d\n"
+ " sdvo_version_minor: %d\n"
+ " sdvo_inputs_mask: %d\n"
+ " smooth_scaling: %d\n"
+ " sharp_scaling: %d\n"
+ " up_scaling: %d\n"
+ " down_scaling: %d\n"
+ " stall_support: %d\n"
+ " output_flags: %d\n",
+ caps->vendor_id,
+ caps->device_id,
+ caps->device_rev_id,
+ caps->sdvo_version_major,
+ caps->sdvo_version_minor,
+ caps->sdvo_inputs_mask,
+ caps->smooth_scaling,
+ caps->sharp_scaling,
+ caps->up_scaling,
+ caps->down_scaling,
+ caps->stall_support,
+ caps->output_flags);
+
+ return true;
}
/* No use! */
@@ -1389,99 +1289,79 @@ intel_sdvo_multifunc_encoder(struct intel_sdvo *intel_sdvo)
return (caps > 1);
}
-static struct drm_connector *
-intel_find_analog_connector(struct drm_device *dev)
+static struct edid *
+intel_sdvo_get_edid(struct drm_connector *connector)
{
- struct drm_connector *connector;
- struct drm_encoder *encoder;
- struct intel_sdvo *intel_sdvo;
-
- list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
- intel_sdvo = enc_to_intel_sdvo(encoder);
- if (intel_sdvo->base.type == INTEL_OUTPUT_ANALOG) {
- list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
- if (encoder == intel_attached_encoder(connector))
- return connector;
- }
- }
- }
- return NULL;
+ struct intel_sdvo *sdvo = intel_attached_sdvo(connector);
+ return drm_get_edid(connector, &sdvo->ddc);
}
-static int
-intel_analog_is_connected(struct drm_device *dev)
+/* Mac mini hack -- use the same DDC as the analog connector */
+static struct edid *
+intel_sdvo_get_analog_edid(struct drm_connector *connector)
{
- struct drm_connector *analog_connector;
-
- analog_connector = intel_find_analog_connector(dev);
- if (!analog_connector)
- return false;
-
- if (analog_connector->funcs->detect(analog_connector, false) ==
- connector_status_disconnected)
- return false;
+ struct drm_i915_private *dev_priv = connector->dev->dev_private;
- return true;
+ return drm_get_edid(connector,
+ &dev_priv->gmbus[dev_priv->crt_ddc_pin].adapter);
}
enum drm_connector_status
intel_sdvo_hdmi_sink_detect(struct drm_connector *connector)
{
- struct drm_encoder *encoder = intel_attached_encoder(connector);
- struct intel_sdvo *intel_sdvo = enc_to_intel_sdvo(encoder);
- struct intel_sdvo_connector *intel_sdvo_connector = to_intel_sdvo_connector(connector);
- enum drm_connector_status status = connector_status_connected;
- struct edid *edid = NULL;
+ struct intel_sdvo *intel_sdvo = intel_attached_sdvo(connector);
+ enum drm_connector_status status;
+ struct edid *edid;
- edid = drm_get_edid(connector, intel_sdvo->base.ddc_bus);
+ edid = intel_sdvo_get_edid(connector);
- /* This is only applied to SDVO cards with multiple outputs */
if (edid == NULL && intel_sdvo_multifunc_encoder(intel_sdvo)) {
- uint8_t saved_ddc, temp_ddc;
- saved_ddc = intel_sdvo->ddc_bus;
- temp_ddc = intel_sdvo->ddc_bus >> 1;
+ u8 ddc, saved_ddc = intel_sdvo->ddc_bus;
+
/*
* Don't use the 1 as the argument of DDC bus switch to get
* the EDID. It is used for SDVO SPD ROM.
*/
- while(temp_ddc > 1) {
- intel_sdvo->ddc_bus = temp_ddc;
- edid = drm_get_edid(connector, intel_sdvo->base.ddc_bus);
- if (edid) {
- /*
- * When we can get the EDID, maybe it is the
- * correct DDC bus. Update it.
- */
- intel_sdvo->ddc_bus = temp_ddc;
+ for (ddc = intel_sdvo->ddc_bus >> 1; ddc > 1; ddc >>= 1) {
+ intel_sdvo->ddc_bus = ddc;
+ edid = intel_sdvo_get_edid(connector);
+ if (edid)
break;
- }
- temp_ddc >>= 1;
}
+ /*
+ * If we found the EDID on the other bus,
+ * assume that is the correct DDC bus.
+ */
if (edid == NULL)
intel_sdvo->ddc_bus = saved_ddc;
}
- /* when there is no edid and no monitor is connected with VGA
- * port, try to use the CRT ddc to read the EDID for DVI-connector
+
+ /*
+ * When there is no edid and no monitor is connected with VGA
+ * port, try to use the CRT ddc to read the EDID for DVI-connector.
*/
- if (edid == NULL && intel_sdvo->analog_ddc_bus &&
- !intel_analog_is_connected(connector->dev))
- edid = drm_get_edid(connector, intel_sdvo->analog_ddc_bus);
+ if (edid == NULL)
+ edid = intel_sdvo_get_analog_edid(connector);
+ status = connector_status_unknown;
if (edid != NULL) {
- bool is_digital = !!(edid->input & DRM_EDID_INPUT_DIGITAL);
- bool need_digital = !!(intel_sdvo_connector->output_flag & SDVO_TMDS_MASK);
-
/* DDC bus is shared, match EDID to connector type */
- if (is_digital && need_digital)
- intel_sdvo->is_hdmi = drm_detect_hdmi_monitor(edid);
- else if (is_digital != need_digital)
- status = connector_status_disconnected;
-
+ if (edid->input & DRM_EDID_INPUT_DIGITAL) {
+ status = connector_status_connected;
+ if (intel_sdvo->is_hdmi) {
+ intel_sdvo->has_hdmi_monitor = drm_detect_hdmi_monitor(edid);
+ intel_sdvo->has_hdmi_audio = drm_detect_monitor_audio(edid);
+ }
+ }
connector->display_info.raw_edid = NULL;
- } else
- status = connector_status_disconnected;
-
- kfree(edid);
+ kfree(edid);
+ }
+
+ if (status == connector_status_connected) {
+ struct intel_sdvo_connector *intel_sdvo_connector = to_intel_sdvo_connector(connector);
+ if (intel_sdvo_connector->force_audio)
+ intel_sdvo->has_hdmi_audio = intel_sdvo_connector->force_audio > 0;
+ }
return status;
}
@@ -1490,22 +1370,25 @@ static enum drm_connector_status
intel_sdvo_detect(struct drm_connector *connector, bool force)
{
uint16_t response;
- struct drm_encoder *encoder = intel_attached_encoder(connector);
- struct intel_sdvo *intel_sdvo = enc_to_intel_sdvo(encoder);
+ struct intel_sdvo *intel_sdvo = intel_attached_sdvo(connector);
struct intel_sdvo_connector *intel_sdvo_connector = to_intel_sdvo_connector(connector);
enum drm_connector_status ret;
if (!intel_sdvo_write_cmd(intel_sdvo,
- SDVO_CMD_GET_ATTACHED_DISPLAYS, NULL, 0))
+ SDVO_CMD_GET_ATTACHED_DISPLAYS, NULL, 0))
return connector_status_unknown;
- if (intel_sdvo->is_tv) {
- /* add 30ms delay when the output type is SDVO-TV */
+
+ /* add 30ms delay when the output type might be TV */
+ if (intel_sdvo->caps.output_flags &
+ (SDVO_OUTPUT_SVID0 | SDVO_OUTPUT_CVBS0))
mdelay(30);
- }
+
if (!intel_sdvo_read_response(intel_sdvo, &response, 2))
return connector_status_unknown;
- DRM_DEBUG_KMS("SDVO response %d %d\n", response & 0xff, response >> 8);
+ DRM_DEBUG_KMS("SDVO response %d %d [%x]\n",
+ response & 0xff, response >> 8,
+ intel_sdvo_connector->output_flag);
if (response == 0)
return connector_status_disconnected;
@@ -1538,12 +1421,10 @@ intel_sdvo_detect(struct drm_connector *connector, bool force)
static void intel_sdvo_get_ddc_modes(struct drm_connector *connector)
{
- struct drm_encoder *encoder = intel_attached_encoder(connector);
- struct intel_sdvo *intel_sdvo = enc_to_intel_sdvo(encoder);
- int num_modes;
+ struct edid *edid;
/* set the bus switch and get the modes */
- num_modes = intel_ddc_get_modes(connector, intel_sdvo->base.ddc_bus);
+ edid = intel_sdvo_get_edid(connector);
/*
* Mac mini hack. On this device, the DVI-I connector shares one DDC
@@ -1551,12 +1432,16 @@ static void intel_sdvo_get_ddc_modes(struct drm_connector *connector)
* DDC fails, check to see if the analog output is disconnected, in
* which case we'll look there for the digital DDC data.
*/
- if (num_modes == 0 &&
- intel_sdvo->analog_ddc_bus &&
- !intel_analog_is_connected(connector->dev)) {
- /* Switch to the analog ddc bus and try that
- */
- (void) intel_ddc_get_modes(connector, intel_sdvo->analog_ddc_bus);
+ if (edid == NULL)
+ edid = intel_sdvo_get_analog_edid(connector);
+
+ if (edid != NULL) {
+ if (edid->input & DRM_EDID_INPUT_DIGITAL) {
+ drm_mode_connector_update_edid_property(connector, edid);
+ drm_add_edid_modes(connector, edid);
+ }
+ connector->display_info.raw_edid = NULL;
+ kfree(edid);
}
}
@@ -1627,8 +1512,7 @@ struct drm_display_mode sdvo_tv_modes[] = {
static void intel_sdvo_get_tv_modes(struct drm_connector *connector)
{
- struct drm_encoder *encoder = intel_attached_encoder(connector);
- struct intel_sdvo *intel_sdvo = enc_to_intel_sdvo(encoder);
+ struct intel_sdvo *intel_sdvo = intel_attached_sdvo(connector);
struct intel_sdvo_sdtv_resolution_request tv_res;
uint32_t reply = 0, format_map = 0;
int i;
@@ -1644,7 +1528,8 @@ static void intel_sdvo_get_tv_modes(struct drm_connector *connector)
return;
BUILD_BUG_ON(sizeof(tv_res) != 3);
- if (!intel_sdvo_write_cmd(intel_sdvo, SDVO_CMD_GET_SDTV_RESOLUTION_SUPPORT,
+ if (!intel_sdvo_write_cmd(intel_sdvo,
+ SDVO_CMD_GET_SDTV_RESOLUTION_SUPPORT,
&tv_res, sizeof(tv_res)))
return;
if (!intel_sdvo_read_response(intel_sdvo, &reply, 3))
@@ -1662,8 +1547,7 @@ static void intel_sdvo_get_tv_modes(struct drm_connector *connector)
static void intel_sdvo_get_lvds_modes(struct drm_connector *connector)
{
- struct drm_encoder *encoder = intel_attached_encoder(connector);
- struct intel_sdvo *intel_sdvo = enc_to_intel_sdvo(encoder);
+ struct intel_sdvo *intel_sdvo = intel_attached_sdvo(connector);
struct drm_i915_private *dev_priv = connector->dev->dev_private;
struct drm_display_mode *newmode;
@@ -1672,7 +1556,7 @@ static void intel_sdvo_get_lvds_modes(struct drm_connector *connector)
* Assume that the preferred modes are
* arranged in priority order.
*/
- intel_ddc_get_modes(connector, intel_sdvo->base.ddc_bus);
+ intel_ddc_get_modes(connector, intel_sdvo->i2c);
if (list_empty(&connector->probed_modes) == false)
goto end;
@@ -1693,6 +1577,10 @@ end:
if (newmode->type & DRM_MODE_TYPE_PREFERRED) {
intel_sdvo->sdvo_lvds_fixed_mode =
drm_mode_duplicate(connector->dev, newmode);
+
+ drm_mode_set_crtcinfo(intel_sdvo->sdvo_lvds_fixed_mode,
+ 0);
+
intel_sdvo->is_lvds = true;
break;
}
@@ -1775,8 +1663,7 @@ intel_sdvo_set_property(struct drm_connector *connector,
struct drm_property *property,
uint64_t val)
{
- struct drm_encoder *encoder = intel_attached_encoder(connector);
- struct intel_sdvo *intel_sdvo = enc_to_intel_sdvo(encoder);
+ struct intel_sdvo *intel_sdvo = intel_attached_sdvo(connector);
struct intel_sdvo_connector *intel_sdvo_connector = to_intel_sdvo_connector(connector);
uint16_t temp_value;
uint8_t cmd;
@@ -1786,6 +1673,21 @@ intel_sdvo_set_property(struct drm_connector *connector,
if (ret)
return ret;
+ if (property == intel_sdvo_connector->force_audio_property) {
+ if (val == intel_sdvo_connector->force_audio)
+ return 0;
+
+ intel_sdvo_connector->force_audio = val;
+
+ if (val > 0 && intel_sdvo->has_hdmi_audio)
+ return 0;
+ if (val < 0 && !intel_sdvo->has_hdmi_audio)
+ return 0;
+
+ intel_sdvo->has_hdmi_audio = val > 0;
+ goto done;
+ }
+
#define CHECK_PROPERTY(name, NAME) \
if (intel_sdvo_connector->name == property) { \
if (intel_sdvo_connector->cur_##name == temp_value) return 0; \
@@ -1879,9 +1781,8 @@ set_value:
done:
- if (encoder->crtc) {
- struct drm_crtc *crtc = encoder->crtc;
-
+ if (intel_sdvo->base.base.crtc) {
+ struct drm_crtc *crtc = intel_sdvo->base.base.crtc;
drm_crtc_helper_set_mode(crtc, &crtc->mode, crtc->x,
crtc->y, crtc->fb);
}
@@ -1909,20 +1810,18 @@ static const struct drm_connector_funcs intel_sdvo_connector_funcs = {
static const struct drm_connector_helper_funcs intel_sdvo_connector_helper_funcs = {
.get_modes = intel_sdvo_get_modes,
.mode_valid = intel_sdvo_mode_valid,
- .best_encoder = intel_attached_encoder,
+ .best_encoder = intel_best_encoder,
};
static void intel_sdvo_enc_destroy(struct drm_encoder *encoder)
{
- struct intel_sdvo *intel_sdvo = enc_to_intel_sdvo(encoder);
-
- if (intel_sdvo->analog_ddc_bus)
- intel_i2c_destroy(intel_sdvo->analog_ddc_bus);
+ struct intel_sdvo *intel_sdvo = to_intel_sdvo(encoder);
if (intel_sdvo->sdvo_lvds_fixed_mode != NULL)
drm_mode_destroy(encoder->dev,
intel_sdvo->sdvo_lvds_fixed_mode);
+ i2c_del_adapter(&intel_sdvo->ddc);
intel_encoder_destroy(encoder);
}
@@ -1990,54 +1889,49 @@ intel_sdvo_select_ddc_bus(struct drm_i915_private *dev_priv,
intel_sdvo_guess_ddc_bus(sdvo);
}
-static bool
-intel_sdvo_get_digital_encoding_mode(struct intel_sdvo *intel_sdvo, int device)
+static void
+intel_sdvo_select_i2c_bus(struct drm_i915_private *dev_priv,
+ struct intel_sdvo *sdvo, u32 reg)
{
- return intel_sdvo_set_target_output(intel_sdvo,
- device == 0 ? SDVO_OUTPUT_TMDS0 : SDVO_OUTPUT_TMDS1) &&
- intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_ENCODE,
- &intel_sdvo->is_hdmi, 1);
-}
+ struct sdvo_device_mapping *mapping;
+ u8 pin, speed;
-static struct intel_sdvo *
-intel_sdvo_chan_to_intel_sdvo(struct intel_i2c_chan *chan)
-{
- struct drm_device *dev = chan->drm_dev;
- struct drm_encoder *encoder;
+ if (IS_SDVOB(reg))
+ mapping = &dev_priv->sdvo_mappings[0];
+ else
+ mapping = &dev_priv->sdvo_mappings[1];
- list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
- struct intel_sdvo *intel_sdvo = enc_to_intel_sdvo(encoder);
- if (intel_sdvo->base.ddc_bus == &chan->adapter)
- return intel_sdvo;
+ pin = GMBUS_PORT_DPB;
+ speed = GMBUS_RATE_1MHZ >> 8;
+ if (mapping->initialized) {
+ pin = mapping->i2c_pin;
+ speed = mapping->i2c_speed;
}
- return NULL;
+ sdvo->i2c = &dev_priv->gmbus[pin].adapter;
+ intel_gmbus_set_speed(sdvo->i2c, speed);
+ intel_gmbus_force_bit(sdvo->i2c, true);
}
-static int intel_sdvo_master_xfer(struct i2c_adapter *i2c_adap,
- struct i2c_msg msgs[], int num)
+static bool
+intel_sdvo_is_hdmi_connector(struct intel_sdvo *intel_sdvo, int device)
{
- struct intel_sdvo *intel_sdvo;
- struct i2c_algo_bit_data *algo_data;
- const struct i2c_algorithm *algo;
+ int is_hdmi;
+
+ if (!intel_sdvo_check_supp_encode(intel_sdvo))
+ return false;
- algo_data = (struct i2c_algo_bit_data *)i2c_adap->algo_data;
- intel_sdvo =
- intel_sdvo_chan_to_intel_sdvo((struct intel_i2c_chan *)
- (algo_data->data));
- if (intel_sdvo == NULL)
- return -EINVAL;
+ if (!intel_sdvo_set_target_output(intel_sdvo,
+ device == 0 ? SDVO_OUTPUT_TMDS0 : SDVO_OUTPUT_TMDS1))
+ return false;
- algo = intel_sdvo->base.i2c_bus->algo;
+ is_hdmi = 0;
+ if (!intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_ENCODE, &is_hdmi, 1))
+ return false;
- intel_sdvo_set_control_bus_switch(intel_sdvo, intel_sdvo->ddc_bus);
- return algo->master_xfer(i2c_adap, msgs, num);
+ return !!is_hdmi;
}
-static struct i2c_algorithm intel_sdvo_i2c_bit_algo = {
- .master_xfer = intel_sdvo_master_xfer,
-};
-
static u8
intel_sdvo_get_slave_addr(struct drm_device *dev, int sdvo_reg)
{
@@ -2076,26 +1970,44 @@ intel_sdvo_get_slave_addr(struct drm_device *dev, int sdvo_reg)
}
static void
-intel_sdvo_connector_init(struct drm_encoder *encoder,
- struct drm_connector *connector)
+intel_sdvo_connector_init(struct intel_sdvo_connector *connector,
+ struct intel_sdvo *encoder)
{
- drm_connector_init(encoder->dev, connector, &intel_sdvo_connector_funcs,
- connector->connector_type);
+ drm_connector_init(encoder->base.base.dev,
+ &connector->base.base,
+ &intel_sdvo_connector_funcs,
+ connector->base.base.connector_type);
+
+ drm_connector_helper_add(&connector->base.base,
+ &intel_sdvo_connector_helper_funcs);
- drm_connector_helper_add(connector, &intel_sdvo_connector_helper_funcs);
+ connector->base.base.interlace_allowed = 0;
+ connector->base.base.doublescan_allowed = 0;
+ connector->base.base.display_info.subpixel_order = SubPixelHorizontalRGB;
- connector->interlace_allowed = 0;
- connector->doublescan_allowed = 0;
- connector->display_info.subpixel_order = SubPixelHorizontalRGB;
+ intel_connector_attach_encoder(&connector->base, &encoder->base);
+ drm_sysfs_connector_add(&connector->base.base);
+}
+
+static void
+intel_sdvo_add_hdmi_properties(struct intel_sdvo_connector *connector)
+{
+ struct drm_device *dev = connector->base.base.dev;
- drm_mode_connector_attach_encoder(connector, encoder);
- drm_sysfs_connector_add(connector);
+ connector->force_audio_property =
+ drm_property_create(dev, DRM_MODE_PROP_RANGE, "force_audio", 2);
+ if (connector->force_audio_property) {
+ connector->force_audio_property->values[0] = -1;
+ connector->force_audio_property->values[1] = 1;
+ drm_connector_attach_property(&connector->base.base,
+ connector->force_audio_property, 0);
+ }
}
static bool
intel_sdvo_dvi_init(struct intel_sdvo *intel_sdvo, int device)
{
- struct drm_encoder *encoder = &intel_sdvo->base.enc;
+ struct drm_encoder *encoder = &intel_sdvo->base.base;
struct drm_connector *connector;
struct intel_connector *intel_connector;
struct intel_sdvo_connector *intel_sdvo_connector;
@@ -2118,19 +2030,20 @@ intel_sdvo_dvi_init(struct intel_sdvo *intel_sdvo, int device)
encoder->encoder_type = DRM_MODE_ENCODER_TMDS;
connector->connector_type = DRM_MODE_CONNECTOR_DVID;
- if (intel_sdvo_get_supp_encode(intel_sdvo, &intel_sdvo->encode)
- && intel_sdvo_get_digital_encoding_mode(intel_sdvo, device)
- && intel_sdvo->is_hdmi) {
+ if (intel_sdvo_is_hdmi_connector(intel_sdvo, device)) {
/* enable hdmi encoding mode if supported */
intel_sdvo_set_encode(intel_sdvo, SDVO_ENCODE_HDMI);
intel_sdvo_set_colorimetry(intel_sdvo,
SDVO_COLORIMETRY_RGB256);
connector->connector_type = DRM_MODE_CONNECTOR_HDMIA;
+
+ intel_sdvo_add_hdmi_properties(intel_sdvo_connector);
+ intel_sdvo->is_hdmi = true;
}
intel_sdvo->base.clone_mask = ((1 << INTEL_SDVO_NON_TV_CLONE_BIT) |
(1 << INTEL_ANALOG_CLONE_BIT));
- intel_sdvo_connector_init(encoder, connector);
+ intel_sdvo_connector_init(intel_sdvo_connector, intel_sdvo);
return true;
}
@@ -2138,36 +2051,36 @@ intel_sdvo_dvi_init(struct intel_sdvo *intel_sdvo, int device)
static bool
intel_sdvo_tv_init(struct intel_sdvo *intel_sdvo, int type)
{
- struct drm_encoder *encoder = &intel_sdvo->base.enc;
- struct drm_connector *connector;
- struct intel_connector *intel_connector;
- struct intel_sdvo_connector *intel_sdvo_connector;
+ struct drm_encoder *encoder = &intel_sdvo->base.base;
+ struct drm_connector *connector;
+ struct intel_connector *intel_connector;
+ struct intel_sdvo_connector *intel_sdvo_connector;
intel_sdvo_connector = kzalloc(sizeof(struct intel_sdvo_connector), GFP_KERNEL);
if (!intel_sdvo_connector)
return false;
intel_connector = &intel_sdvo_connector->base;
- connector = &intel_connector->base;
- encoder->encoder_type = DRM_MODE_ENCODER_TVDAC;
- connector->connector_type = DRM_MODE_CONNECTOR_SVIDEO;
+ connector = &intel_connector->base;
+ encoder->encoder_type = DRM_MODE_ENCODER_TVDAC;
+ connector->connector_type = DRM_MODE_CONNECTOR_SVIDEO;
- intel_sdvo->controlled_output |= type;
- intel_sdvo_connector->output_flag = type;
+ intel_sdvo->controlled_output |= type;
+ intel_sdvo_connector->output_flag = type;
- intel_sdvo->is_tv = true;
- intel_sdvo->base.needs_tv_clock = true;
- intel_sdvo->base.clone_mask = 1 << INTEL_SDVO_TV_CLONE_BIT;
+ intel_sdvo->is_tv = true;
+ intel_sdvo->base.needs_tv_clock = true;
+ intel_sdvo->base.clone_mask = 1 << INTEL_SDVO_TV_CLONE_BIT;
- intel_sdvo_connector_init(encoder, connector);
+ intel_sdvo_connector_init(intel_sdvo_connector, intel_sdvo);
- if (!intel_sdvo_tv_create_property(intel_sdvo, intel_sdvo_connector, type))
+ if (!intel_sdvo_tv_create_property(intel_sdvo, intel_sdvo_connector, type))
goto err;
- if (!intel_sdvo_create_enhance_property(intel_sdvo, intel_sdvo_connector))
+ if (!intel_sdvo_create_enhance_property(intel_sdvo, intel_sdvo_connector))
goto err;
- return true;
+ return true;
err:
intel_sdvo_destroy(connector);
@@ -2177,43 +2090,44 @@ err:
static bool
intel_sdvo_analog_init(struct intel_sdvo *intel_sdvo, int device)
{
- struct drm_encoder *encoder = &intel_sdvo->base.enc;
- struct drm_connector *connector;
- struct intel_connector *intel_connector;
- struct intel_sdvo_connector *intel_sdvo_connector;
+ struct drm_encoder *encoder = &intel_sdvo->base.base;
+ struct drm_connector *connector;
+ struct intel_connector *intel_connector;
+ struct intel_sdvo_connector *intel_sdvo_connector;
intel_sdvo_connector = kzalloc(sizeof(struct intel_sdvo_connector), GFP_KERNEL);
if (!intel_sdvo_connector)
return false;
intel_connector = &intel_sdvo_connector->base;
- connector = &intel_connector->base;
+ connector = &intel_connector->base;
connector->polled = DRM_CONNECTOR_POLL_CONNECT;
- encoder->encoder_type = DRM_MODE_ENCODER_DAC;
- connector->connector_type = DRM_MODE_CONNECTOR_VGA;
-
- if (device == 0) {
- intel_sdvo->controlled_output |= SDVO_OUTPUT_RGB0;
- intel_sdvo_connector->output_flag = SDVO_OUTPUT_RGB0;
- } else if (device == 1) {
- intel_sdvo->controlled_output |= SDVO_OUTPUT_RGB1;
- intel_sdvo_connector->output_flag = SDVO_OUTPUT_RGB1;
- }
-
- intel_sdvo->base.clone_mask = ((1 << INTEL_SDVO_NON_TV_CLONE_BIT) |
+ encoder->encoder_type = DRM_MODE_ENCODER_DAC;
+ connector->connector_type = DRM_MODE_CONNECTOR_VGA;
+
+ if (device == 0) {
+ intel_sdvo->controlled_output |= SDVO_OUTPUT_RGB0;
+ intel_sdvo_connector->output_flag = SDVO_OUTPUT_RGB0;
+ } else if (device == 1) {
+ intel_sdvo->controlled_output |= SDVO_OUTPUT_RGB1;
+ intel_sdvo_connector->output_flag = SDVO_OUTPUT_RGB1;
+ }
+
+ intel_sdvo->base.clone_mask = ((1 << INTEL_SDVO_NON_TV_CLONE_BIT) |
(1 << INTEL_ANALOG_CLONE_BIT));
- intel_sdvo_connector_init(encoder, connector);
- return true;
+ intel_sdvo_connector_init(intel_sdvo_connector,
+ intel_sdvo);
+ return true;
}
static bool
intel_sdvo_lvds_init(struct intel_sdvo *intel_sdvo, int device)
{
- struct drm_encoder *encoder = &intel_sdvo->base.enc;
- struct drm_connector *connector;
- struct intel_connector *intel_connector;
- struct intel_sdvo_connector *intel_sdvo_connector;
+ struct drm_encoder *encoder = &intel_sdvo->base.base;
+ struct drm_connector *connector;
+ struct intel_connector *intel_connector;
+ struct intel_sdvo_connector *intel_sdvo_connector;
intel_sdvo_connector = kzalloc(sizeof(struct intel_sdvo_connector), GFP_KERNEL);
if (!intel_sdvo_connector)
@@ -2221,22 +2135,22 @@ intel_sdvo_lvds_init(struct intel_sdvo *intel_sdvo, int device)
intel_connector = &intel_sdvo_connector->base;
connector = &intel_connector->base;
- encoder->encoder_type = DRM_MODE_ENCODER_LVDS;
- connector->connector_type = DRM_MODE_CONNECTOR_LVDS;
-
- if (device == 0) {
- intel_sdvo->controlled_output |= SDVO_OUTPUT_LVDS0;
- intel_sdvo_connector->output_flag = SDVO_OUTPUT_LVDS0;
- } else if (device == 1) {
- intel_sdvo->controlled_output |= SDVO_OUTPUT_LVDS1;
- intel_sdvo_connector->output_flag = SDVO_OUTPUT_LVDS1;
- }
-
- intel_sdvo->base.clone_mask = ((1 << INTEL_ANALOG_CLONE_BIT) |
+ encoder->encoder_type = DRM_MODE_ENCODER_LVDS;
+ connector->connector_type = DRM_MODE_CONNECTOR_LVDS;
+
+ if (device == 0) {
+ intel_sdvo->controlled_output |= SDVO_OUTPUT_LVDS0;
+ intel_sdvo_connector->output_flag = SDVO_OUTPUT_LVDS0;
+ } else if (device == 1) {
+ intel_sdvo->controlled_output |= SDVO_OUTPUT_LVDS1;
+ intel_sdvo_connector->output_flag = SDVO_OUTPUT_LVDS1;
+ }
+
+ intel_sdvo->base.clone_mask = ((1 << INTEL_ANALOG_CLONE_BIT) |
(1 << INTEL_SDVO_LVDS_CLONE_BIT));
- intel_sdvo_connector_init(encoder, connector);
- if (!intel_sdvo_create_enhance_property(intel_sdvo, intel_sdvo_connector))
+ intel_sdvo_connector_init(intel_sdvo_connector, intel_sdvo);
+ if (!intel_sdvo_create_enhance_property(intel_sdvo, intel_sdvo_connector))
goto err;
return true;
@@ -2307,7 +2221,7 @@ static bool intel_sdvo_tv_create_property(struct intel_sdvo *intel_sdvo,
struct intel_sdvo_connector *intel_sdvo_connector,
int type)
{
- struct drm_device *dev = intel_sdvo->base.enc.dev;
+ struct drm_device *dev = intel_sdvo->base.base.dev;
struct intel_sdvo_tv_format format;
uint32_t format_map, i;
@@ -2373,7 +2287,7 @@ intel_sdvo_create_enhance_property_tv(struct intel_sdvo *intel_sdvo,
struct intel_sdvo_connector *intel_sdvo_connector,
struct intel_sdvo_enhancements_reply enhancements)
{
- struct drm_device *dev = intel_sdvo->base.enc.dev;
+ struct drm_device *dev = intel_sdvo->base.base.dev;
struct drm_connector *connector = &intel_sdvo_connector->base.base;
uint16_t response, data_value[2];
@@ -2502,7 +2416,7 @@ intel_sdvo_create_enhance_property_lvds(struct intel_sdvo *intel_sdvo,
struct intel_sdvo_connector *intel_sdvo_connector,
struct intel_sdvo_enhancements_reply enhancements)
{
- struct drm_device *dev = intel_sdvo->base.enc.dev;
+ struct drm_device *dev = intel_sdvo->base.base.dev;
struct drm_connector *connector = &intel_sdvo_connector->base.base;
uint16_t response, data_value[2];
@@ -2535,7 +2449,43 @@ static bool intel_sdvo_create_enhance_property(struct intel_sdvo *intel_sdvo,
return intel_sdvo_create_enhance_property_lvds(intel_sdvo, intel_sdvo_connector, enhancements.reply);
else
return true;
+}
+static int intel_sdvo_ddc_proxy_xfer(struct i2c_adapter *adapter,
+ struct i2c_msg *msgs,
+ int num)
+{
+ struct intel_sdvo *sdvo = adapter->algo_data;
+
+ if (!intel_sdvo_set_control_bus_switch(sdvo, sdvo->ddc_bus))
+ return -EIO;
+
+ return sdvo->i2c->algo->master_xfer(sdvo->i2c, msgs, num);
+}
+
+static u32 intel_sdvo_ddc_proxy_func(struct i2c_adapter *adapter)
+{
+ struct intel_sdvo *sdvo = adapter->algo_data;
+ return sdvo->i2c->algo->functionality(sdvo->i2c);
+}
+
+static const struct i2c_algorithm intel_sdvo_ddc_proxy = {
+ .master_xfer = intel_sdvo_ddc_proxy_xfer,
+ .functionality = intel_sdvo_ddc_proxy_func
+};
+
+static bool
+intel_sdvo_init_ddc_proxy(struct intel_sdvo *sdvo,
+ struct drm_device *dev)
+{
+ sdvo->ddc.owner = THIS_MODULE;
+ sdvo->ddc.class = I2C_CLASS_DDC;
+ snprintf(sdvo->ddc.name, I2C_NAME_SIZE, "SDVO DDC proxy");
+ sdvo->ddc.dev.parent = &dev->pdev->dev;
+ sdvo->ddc.algo_data = sdvo;
+ sdvo->ddc.algo = &intel_sdvo_ddc_proxy;
+
+ return i2c_add_adapter(&sdvo->ddc) == 0;
}
bool intel_sdvo_init(struct drm_device *dev, int sdvo_reg)
@@ -2543,95 +2493,66 @@ bool intel_sdvo_init(struct drm_device *dev, int sdvo_reg)
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_encoder *intel_encoder;
struct intel_sdvo *intel_sdvo;
- u8 ch[0x40];
int i;
- u32 i2c_reg, ddc_reg, analog_ddc_reg;
intel_sdvo = kzalloc(sizeof(struct intel_sdvo), GFP_KERNEL);
if (!intel_sdvo)
return false;
+ if (!intel_sdvo_init_ddc_proxy(intel_sdvo, dev)) {
+ kfree(intel_sdvo);
+ return false;
+ }
+
intel_sdvo->sdvo_reg = sdvo_reg;
intel_encoder = &intel_sdvo->base;
intel_encoder->type = INTEL_OUTPUT_SDVO;
+ /* encoder type will be decided later */
+ drm_encoder_init(dev, &intel_encoder->base, &intel_sdvo_enc_funcs, 0);
- if (HAS_PCH_SPLIT(dev)) {
- i2c_reg = PCH_GPIOE;
- ddc_reg = PCH_GPIOE;
- analog_ddc_reg = PCH_GPIOA;
- } else {
- i2c_reg = GPIOE;
- ddc_reg = GPIOE;
- analog_ddc_reg = GPIOA;
- }
-
- /* setup the DDC bus. */
- if (IS_SDVOB(sdvo_reg))
- intel_encoder->i2c_bus = intel_i2c_create(dev, i2c_reg, "SDVOCTRL_E for SDVOB");
- else
- intel_encoder->i2c_bus = intel_i2c_create(dev, i2c_reg, "SDVOCTRL_E for SDVOC");
-
- if (!intel_encoder->i2c_bus)
- goto err_inteloutput;
-
- intel_sdvo->slave_addr = intel_sdvo_get_slave_addr(dev, sdvo_reg);
-
- /* Save the bit-banging i2c functionality for use by the DDC wrapper */
- intel_sdvo_i2c_bit_algo.functionality = intel_encoder->i2c_bus->algo->functionality;
+ intel_sdvo->slave_addr = intel_sdvo_get_slave_addr(dev, sdvo_reg) >> 1;
+ intel_sdvo_select_i2c_bus(dev_priv, intel_sdvo, sdvo_reg);
/* Read the regs to test if we can talk to the device */
for (i = 0; i < 0x40; i++) {
- if (!intel_sdvo_read_byte(intel_sdvo, i, &ch[i])) {
+ u8 byte;
+
+ if (!intel_sdvo_read_byte(intel_sdvo, i, &byte)) {
DRM_DEBUG_KMS("No SDVO device found on SDVO%c\n",
IS_SDVOB(sdvo_reg) ? 'B' : 'C');
- goto err_i2c;
+ goto err;
}
}
- /* setup the DDC bus. */
- if (IS_SDVOB(sdvo_reg)) {
- intel_encoder->ddc_bus = intel_i2c_create(dev, ddc_reg, "SDVOB DDC BUS");
- intel_sdvo->analog_ddc_bus = intel_i2c_create(dev, analog_ddc_reg,
- "SDVOB/VGA DDC BUS");
+ if (IS_SDVOB(sdvo_reg))
dev_priv->hotplug_supported_mask |= SDVOB_HOTPLUG_INT_STATUS;
- } else {
- intel_encoder->ddc_bus = intel_i2c_create(dev, ddc_reg, "SDVOC DDC BUS");
- intel_sdvo->analog_ddc_bus = intel_i2c_create(dev, analog_ddc_reg,
- "SDVOC/VGA DDC BUS");
+ else
dev_priv->hotplug_supported_mask |= SDVOC_HOTPLUG_INT_STATUS;
- }
- if (intel_encoder->ddc_bus == NULL || intel_sdvo->analog_ddc_bus == NULL)
- goto err_i2c;
- /* Wrap with our custom algo which switches to DDC mode */
- intel_encoder->ddc_bus->algo = &intel_sdvo_i2c_bit_algo;
-
- /* encoder type will be decided later */
- drm_encoder_init(dev, &intel_encoder->enc, &intel_sdvo_enc_funcs, 0);
- drm_encoder_helper_add(&intel_encoder->enc, &intel_sdvo_helper_funcs);
+ drm_encoder_helper_add(&intel_encoder->base, &intel_sdvo_helper_funcs);
/* In default case sdvo lvds is false */
if (!intel_sdvo_get_capabilities(intel_sdvo, &intel_sdvo->caps))
- goto err_enc;
+ goto err;
if (intel_sdvo_output_setup(intel_sdvo,
intel_sdvo->caps.output_flags) != true) {
DRM_DEBUG_KMS("SDVO output failed to setup on SDVO%c\n",
IS_SDVOB(sdvo_reg) ? 'B' : 'C');
- goto err_enc;
+ goto err;
}
intel_sdvo_select_ddc_bus(dev_priv, intel_sdvo, sdvo_reg);
/* Set the input timing to the screen. Assume always input 0. */
if (!intel_sdvo_set_target_input(intel_sdvo))
- goto err_enc;
+ goto err;
if (!intel_sdvo_get_input_pixel_clock_range(intel_sdvo,
&intel_sdvo->pixel_clock_min,
&intel_sdvo->pixel_clock_max))
- goto err_enc;
+ goto err;
DRM_DEBUG_KMS("%s device VID/DID: %02X:%02X.%02X, "
"clock range %dMHz - %dMHz, "
@@ -2651,16 +2572,9 @@ bool intel_sdvo_init(struct drm_device *dev, int sdvo_reg)
(SDVO_OUTPUT_TMDS1 | SDVO_OUTPUT_RGB1) ? 'Y' : 'N');
return true;
-err_enc:
- drm_encoder_cleanup(&intel_encoder->enc);
-err_i2c:
- if (intel_sdvo->analog_ddc_bus != NULL)
- intel_i2c_destroy(intel_sdvo->analog_ddc_bus);
- if (intel_encoder->ddc_bus != NULL)
- intel_i2c_destroy(intel_encoder->ddc_bus);
- if (intel_encoder->i2c_bus != NULL)
- intel_i2c_destroy(intel_encoder->i2c_bus);
-err_inteloutput:
+err:
+ drm_encoder_cleanup(&intel_encoder->base);
+ i2c_del_adapter(&intel_sdvo->ddc);
kfree(intel_sdvo);
return false;
diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c
index 4a117e318a73..2f7681989316 100644
--- a/drivers/gpu/drm/i915/intel_tv.c
+++ b/drivers/gpu/drm/i915/intel_tv.c
@@ -48,7 +48,7 @@ struct intel_tv {
struct intel_encoder base;
int type;
- char *tv_format;
+ const char *tv_format;
int margin[4];
u32 save_TV_H_CTL_1;
u32 save_TV_H_CTL_2;
@@ -350,7 +350,7 @@ static const struct video_levels component_levels = {
struct tv_mode {
- char *name;
+ const char *name;
int clock;
int refresh; /* in millihertz (for precision) */
u32 oversample;
@@ -900,7 +900,14 @@ static const struct tv_mode tv_modes[] = {
static struct intel_tv *enc_to_intel_tv(struct drm_encoder *encoder)
{
- return container_of(enc_to_intel_encoder(encoder), struct intel_tv, base);
+ return container_of(encoder, struct intel_tv, base.base);
+}
+
+static struct intel_tv *intel_attached_tv(struct drm_connector *connector)
+{
+ return container_of(intel_attached_encoder(connector),
+ struct intel_tv,
+ base);
}
static void
@@ -922,7 +929,7 @@ intel_tv_dpms(struct drm_encoder *encoder, int mode)
}
static const struct tv_mode *
-intel_tv_mode_lookup (char *tv_format)
+intel_tv_mode_lookup(const char *tv_format)
{
int i;
@@ -936,22 +943,23 @@ intel_tv_mode_lookup (char *tv_format)
}
static const struct tv_mode *
-intel_tv_mode_find (struct intel_tv *intel_tv)
+intel_tv_mode_find(struct intel_tv *intel_tv)
{
return intel_tv_mode_lookup(intel_tv->tv_format);
}
static enum drm_mode_status
-intel_tv_mode_valid(struct drm_connector *connector, struct drm_display_mode *mode)
+intel_tv_mode_valid(struct drm_connector *connector,
+ struct drm_display_mode *mode)
{
- struct drm_encoder *encoder = intel_attached_encoder(connector);
- struct intel_tv *intel_tv = enc_to_intel_tv(encoder);
+ struct intel_tv *intel_tv = intel_attached_tv(connector);
const struct tv_mode *tv_mode = intel_tv_mode_find(intel_tv);
/* Ensure TV refresh is close to desired refresh */
if (tv_mode && abs(tv_mode->refresh - drm_mode_vrefresh(mode) * 1000)
< 1000)
return MODE_OK;
+
return MODE_CLOCK_RANGE;
}
@@ -1131,7 +1139,7 @@ intel_tv_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
color_conversion->av);
}
- if (IS_I965G(dev))
+ if (INTEL_INFO(dev)->gen >= 4)
I915_WRITE(TV_CLR_KNOBS, 0x00404000);
else
I915_WRITE(TV_CLR_KNOBS, 0x00606000);
@@ -1157,12 +1165,12 @@ intel_tv_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
I915_WRITE(dspbase_reg, I915_READ(dspbase_reg));
/* Wait for vblank for the disable to take effect */
- if (!IS_I9XX(dev))
+ if (IS_GEN2(dev))
intel_wait_for_vblank(dev, intel_crtc->pipe);
- I915_WRITE(pipeconf_reg, pipeconf & ~PIPEACONF_ENABLE);
+ I915_WRITE(pipeconf_reg, pipeconf & ~PIPECONF_ENABLE);
/* Wait for vblank for the disable to take effect. */
- intel_wait_for_vblank(dev, intel_crtc->pipe);
+ intel_wait_for_pipe_off(dev, intel_crtc->pipe);
/* Filter ctl must be set before TV_WIN_SIZE */
I915_WRITE(TV_FILTER_CTL_1, TV_AUTO_SCALE);
@@ -1196,7 +1204,7 @@ intel_tv_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
I915_WRITE(TV_V_LUMA_0 + (i<<2), tv_mode->filter_table[j++]);
for (i = 0; i < 43; i++)
I915_WRITE(TV_V_CHROMA_0 + (i<<2), tv_mode->filter_table[j++]);
- I915_WRITE(TV_DAC, 0);
+ I915_WRITE(TV_DAC, I915_READ(TV_DAC) & TV_DAC_SAVE);
I915_WRITE(TV_CTL, tv_ctl);
}
@@ -1228,15 +1236,13 @@ static const struct drm_display_mode reported_modes[] = {
static int
intel_tv_detect_type (struct intel_tv *intel_tv)
{
- struct drm_encoder *encoder = &intel_tv->base.enc;
+ struct drm_encoder *encoder = &intel_tv->base.base;
struct drm_device *dev = encoder->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
unsigned long irqflags;
u32 tv_ctl, save_tv_ctl;
u32 tv_dac, save_tv_dac;
- int type = DRM_MODE_CONNECTOR_Unknown;
-
- tv_dac = I915_READ(TV_DAC);
+ int type;
/* Disable TV interrupts around load detect or we'll recurse */
spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
@@ -1244,19 +1250,14 @@ intel_tv_detect_type (struct intel_tv *intel_tv)
PIPE_HOTPLUG_TV_INTERRUPT_ENABLE);
spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags);
- /*
- * Detect TV by polling)
- */
- save_tv_dac = tv_dac;
- tv_ctl = I915_READ(TV_CTL);
- save_tv_ctl = tv_ctl;
- tv_ctl &= ~TV_ENC_ENABLE;
- tv_ctl &= ~TV_TEST_MODE_MASK;
+ save_tv_dac = tv_dac = I915_READ(TV_DAC);
+ save_tv_ctl = tv_ctl = I915_READ(TV_CTL);
+
+ /* Poll for TV detection */
+ tv_ctl &= ~(TV_ENC_ENABLE | TV_TEST_MODE_MASK);
tv_ctl |= TV_TEST_MODE_MONITOR_DETECT;
- tv_dac &= ~TVDAC_SENSE_MASK;
- tv_dac &= ~DAC_A_MASK;
- tv_dac &= ~DAC_B_MASK;
- tv_dac &= ~DAC_C_MASK;
+
+ tv_dac &= ~(TVDAC_SENSE_MASK | DAC_A_MASK | DAC_B_MASK | DAC_C_MASK);
tv_dac |= (TVDAC_STATE_CHG_EN |
TVDAC_A_SENSE_CTL |
TVDAC_B_SENSE_CTL |
@@ -1265,37 +1266,40 @@ intel_tv_detect_type (struct intel_tv *intel_tv)
DAC_A_0_7_V |
DAC_B_0_7_V |
DAC_C_0_7_V);
+
I915_WRITE(TV_CTL, tv_ctl);
I915_WRITE(TV_DAC, tv_dac);
POSTING_READ(TV_DAC);
- msleep(20);
- tv_dac = I915_READ(TV_DAC);
- I915_WRITE(TV_DAC, save_tv_dac);
- I915_WRITE(TV_CTL, save_tv_ctl);
- POSTING_READ(TV_CTL);
- msleep(20);
+ intel_wait_for_vblank(intel_tv->base.base.dev,
+ to_intel_crtc(intel_tv->base.base.crtc)->pipe);
- /*
- * A B C
- * 0 1 1 Composite
- * 1 0 X svideo
- * 0 0 0 Component
- */
- if ((tv_dac & TVDAC_SENSE_MASK) == (TVDAC_B_SENSE | TVDAC_C_SENSE)) {
- DRM_DEBUG_KMS("Detected Composite TV connection\n");
- type = DRM_MODE_CONNECTOR_Composite;
- } else if ((tv_dac & (TVDAC_A_SENSE|TVDAC_B_SENSE)) == TVDAC_A_SENSE) {
- DRM_DEBUG_KMS("Detected S-Video TV connection\n");
- type = DRM_MODE_CONNECTOR_SVIDEO;
- } else if ((tv_dac & TVDAC_SENSE_MASK) == 0) {
- DRM_DEBUG_KMS("Detected Component TV connection\n");
- type = DRM_MODE_CONNECTOR_Component;
- } else {
- DRM_DEBUG_KMS("No TV connection detected\n");
- type = -1;
+ type = -1;
+ if (wait_for((tv_dac = I915_READ(TV_DAC)) & TVDAC_STATE_CHG, 20) == 0) {
+ DRM_DEBUG_KMS("TV detected: %x, %x\n", tv_ctl, tv_dac);
+ /*
+ * A B C
+ * 0 1 1 Composite
+ * 1 0 X svideo
+ * 0 0 0 Component
+ */
+ if ((tv_dac & TVDAC_SENSE_MASK) == (TVDAC_B_SENSE | TVDAC_C_SENSE)) {
+ DRM_DEBUG_KMS("Detected Composite TV connection\n");
+ type = DRM_MODE_CONNECTOR_Composite;
+ } else if ((tv_dac & (TVDAC_A_SENSE|TVDAC_B_SENSE)) == TVDAC_A_SENSE) {
+ DRM_DEBUG_KMS("Detected S-Video TV connection\n");
+ type = DRM_MODE_CONNECTOR_SVIDEO;
+ } else if ((tv_dac & TVDAC_SENSE_MASK) == 0) {
+ DRM_DEBUG_KMS("Detected Component TV connection\n");
+ type = DRM_MODE_CONNECTOR_Component;
+ } else {
+ DRM_DEBUG_KMS("Unrecognised TV connection\n");
+ }
}
+ I915_WRITE(TV_DAC, save_tv_dac & ~TVDAC_STATE_CHG_EN);
+ I915_WRITE(TV_CTL, save_tv_ctl);
+
/* Restore interrupt config */
spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
i915_enable_pipestat(dev_priv, 0, PIPE_HOTPLUG_INTERRUPT_ENABLE |
@@ -1311,8 +1315,7 @@ intel_tv_detect_type (struct intel_tv *intel_tv)
*/
static void intel_tv_find_better_format(struct drm_connector *connector)
{
- struct drm_encoder *encoder = intel_attached_encoder(connector);
- struct intel_tv *intel_tv = enc_to_intel_tv(encoder);
+ struct intel_tv *intel_tv = intel_attached_tv(connector);
const struct tv_mode *tv_mode = intel_tv_mode_find(intel_tv);
int i;
@@ -1344,14 +1347,13 @@ static enum drm_connector_status
intel_tv_detect(struct drm_connector *connector, bool force)
{
struct drm_display_mode mode;
- struct drm_encoder *encoder = intel_attached_encoder(connector);
- struct intel_tv *intel_tv = enc_to_intel_tv(encoder);
+ struct intel_tv *intel_tv = intel_attached_tv(connector);
int type;
mode = reported_modes[0];
drm_mode_set_crtcinfo(&mode, CRTC_INTERLACE_HALVE_V);
- if (encoder->crtc && encoder->crtc->enabled) {
+ if (intel_tv->base.base.crtc && intel_tv->base.base.crtc->enabled) {
type = intel_tv_detect_type(intel_tv);
} else if (force) {
struct drm_crtc *crtc;
@@ -1375,11 +1377,10 @@ intel_tv_detect(struct drm_connector *connector, bool force)
return connector_status_connected;
}
-static struct input_res {
- char *name;
+static const struct input_res {
+ const char *name;
int w, h;
-} input_res_table[] =
-{
+} input_res_table[] = {
{"640x480", 640, 480},
{"800x600", 800, 600},
{"1024x768", 1024, 768},
@@ -1396,8 +1397,7 @@ static void
intel_tv_chose_preferred_modes(struct drm_connector *connector,
struct drm_display_mode *mode_ptr)
{
- struct drm_encoder *encoder = intel_attached_encoder(connector);
- struct intel_tv *intel_tv = enc_to_intel_tv(encoder);
+ struct intel_tv *intel_tv = intel_attached_tv(connector);
const struct tv_mode *tv_mode = intel_tv_mode_find(intel_tv);
if (tv_mode->nbr_end < 480 && mode_ptr->vdisplay == 480)
@@ -1422,15 +1422,14 @@ static int
intel_tv_get_modes(struct drm_connector *connector)
{
struct drm_display_mode *mode_ptr;
- struct drm_encoder *encoder = intel_attached_encoder(connector);
- struct intel_tv *intel_tv = enc_to_intel_tv(encoder);
+ struct intel_tv *intel_tv = intel_attached_tv(connector);
const struct tv_mode *tv_mode = intel_tv_mode_find(intel_tv);
int j, count = 0;
u64 tmp;
for (j = 0; j < ARRAY_SIZE(input_res_table);
j++) {
- struct input_res *input = &input_res_table[j];
+ const struct input_res *input = &input_res_table[j];
unsigned int hactive_s = input->w;
unsigned int vactive_s = input->h;
@@ -1488,9 +1487,8 @@ intel_tv_set_property(struct drm_connector *connector, struct drm_property *prop
uint64_t val)
{
struct drm_device *dev = connector->dev;
- struct drm_encoder *encoder = intel_attached_encoder(connector);
- struct intel_tv *intel_tv = enc_to_intel_tv(encoder);
- struct drm_crtc *crtc = encoder->crtc;
+ struct intel_tv *intel_tv = intel_attached_tv(connector);
+ struct drm_crtc *crtc = intel_tv->base.base.crtc;
int ret = 0;
bool changed = false;
@@ -1555,7 +1553,7 @@ static const struct drm_connector_funcs intel_tv_connector_funcs = {
static const struct drm_connector_helper_funcs intel_tv_connector_helper_funcs = {
.mode_valid = intel_tv_mode_valid,
.get_modes = intel_tv_get_modes,
- .best_encoder = intel_attached_encoder,
+ .best_encoder = intel_best_encoder,
};
static const struct drm_encoder_funcs intel_tv_enc_funcs = {
@@ -1607,7 +1605,7 @@ intel_tv_init(struct drm_device *dev)
struct intel_encoder *intel_encoder;
struct intel_connector *intel_connector;
u32 tv_dac_on, tv_dac_off, save_tv_dac;
- char **tv_format_names;
+ char *tv_format_names[ARRAY_SIZE(tv_modes)];
int i, initial_mode = 0;
if ((I915_READ(TV_CTL) & TV_FUSE_STATE_MASK) == TV_FUSE_STATE_DISABLED)
@@ -1661,15 +1659,15 @@ intel_tv_init(struct drm_device *dev)
drm_connector_init(dev, connector, &intel_tv_connector_funcs,
DRM_MODE_CONNECTOR_SVIDEO);
- drm_encoder_init(dev, &intel_encoder->enc, &intel_tv_enc_funcs,
+ drm_encoder_init(dev, &intel_encoder->base, &intel_tv_enc_funcs,
DRM_MODE_ENCODER_TVDAC);
- drm_mode_connector_attach_encoder(&intel_connector->base, &intel_encoder->enc);
+ intel_connector_attach_encoder(intel_connector, intel_encoder);
intel_encoder->type = INTEL_OUTPUT_TVOUT;
intel_encoder->crtc_mask = (1 << 0) | (1 << 1);
intel_encoder->clone_mask = (1 << INTEL_TV_CLONE_BIT);
- intel_encoder->enc.possible_crtcs = ((1 << 0) | (1 << 1));
- intel_encoder->enc.possible_clones = (1 << INTEL_OUTPUT_TVOUT);
+ intel_encoder->base.possible_crtcs = ((1 << 0) | (1 << 1));
+ intel_encoder->base.possible_clones = (1 << INTEL_OUTPUT_TVOUT);
intel_tv->type = DRM_MODE_CONNECTOR_Unknown;
/* BIOS margin values */
@@ -1678,21 +1676,19 @@ intel_tv_init(struct drm_device *dev)
intel_tv->margin[TV_MARGIN_RIGHT] = 46;
intel_tv->margin[TV_MARGIN_BOTTOM] = 37;
- intel_tv->tv_format = kstrdup(tv_modes[initial_mode].name, GFP_KERNEL);
+ intel_tv->tv_format = tv_modes[initial_mode].name;
- drm_encoder_helper_add(&intel_encoder->enc, &intel_tv_helper_funcs);
+ drm_encoder_helper_add(&intel_encoder->base, &intel_tv_helper_funcs);
drm_connector_helper_add(connector, &intel_tv_connector_helper_funcs);
connector->interlace_allowed = false;
connector->doublescan_allowed = false;
/* Create TV properties then attach current values */
- tv_format_names = kmalloc(sizeof(char *) * ARRAY_SIZE(tv_modes),
- GFP_KERNEL);
- if (!tv_format_names)
- goto out;
for (i = 0; i < ARRAY_SIZE(tv_modes); i++)
- tv_format_names[i] = tv_modes[i].name;
- drm_mode_create_tv_properties(dev, ARRAY_SIZE(tv_modes), tv_format_names);
+ tv_format_names[i] = (char *)tv_modes[i].name;
+ drm_mode_create_tv_properties(dev,
+ ARRAY_SIZE(tv_modes),
+ tv_format_names);
drm_connector_attach_property(connector, dev->mode_config.tv_mode_property,
initial_mode);
@@ -1708,6 +1704,5 @@ intel_tv_init(struct drm_device *dev)
drm_connector_attach_property(connector,
dev->mode_config.tv_bottom_margin_property,
intel_tv->margin[TV_MARGIN_BOTTOM]);
-out:
drm_sysfs_connector_add(connector);
}
diff --git a/drivers/gpu/drm/mga/mga_drv.c b/drivers/gpu/drm/mga/mga_drv.c
index ac64f0b0392e..0aaf5f67a436 100644
--- a/drivers/gpu/drm/mga/mga_drv.c
+++ b/drivers/gpu/drm/mga/mga_drv.c
@@ -60,8 +60,6 @@ static struct drm_driver driver = {
.irq_uninstall = mga_driver_irq_uninstall,
.irq_handler = mga_driver_irq_handler,
.reclaim_buffers = drm_core_reclaim_buffers,
- .get_map_ofs = drm_core_get_map_ofs,
- .get_reg_ofs = drm_core_get_reg_ofs,
.ioctls = mga_ioctls,
.dma_ioctl = mga_dma_buffers,
.fops = {
diff --git a/drivers/gpu/drm/nouveau/Kconfig b/drivers/gpu/drm/nouveau/Kconfig
index d2d28048efb2..72730e9ca06c 100644
--- a/drivers/gpu/drm/nouveau/Kconfig
+++ b/drivers/gpu/drm/nouveau/Kconfig
@@ -10,6 +10,7 @@ config DRM_NOUVEAU
select FB
select FRAMEBUFFER_CONSOLE if !EMBEDDED
select FB_BACKLIGHT if DRM_NOUVEAU_BACKLIGHT
+ select ACPI_VIDEO if ACPI
help
Choose this option for open-source nVidia support.
diff --git a/drivers/gpu/drm/nouveau/Makefile b/drivers/gpu/drm/nouveau/Makefile
index e9b06e4ef2a2..23fa82d667d6 100644
--- a/drivers/gpu/drm/nouveau/Makefile
+++ b/drivers/gpu/drm/nouveau/Makefile
@@ -9,7 +9,8 @@ nouveau-y := nouveau_drv.o nouveau_state.o nouveau_channel.o nouveau_mem.o \
nouveau_bo.o nouveau_fence.o nouveau_gem.o nouveau_ttm.o \
nouveau_hw.o nouveau_calc.o nouveau_bios.o nouveau_i2c.o \
nouveau_display.o nouveau_connector.o nouveau_fbcon.o \
- nouveau_dp.o \
+ nouveau_dp.o nouveau_ramht.o \
+ nouveau_pm.o nouveau_volt.o nouveau_perf.o nouveau_temp.o \
nv04_timer.o \
nv04_mc.o nv40_mc.o nv50_mc.o \
nv04_fb.o nv10_fb.o nv30_fb.o nv40_fb.o nv50_fb.o nvc0_fb.o \
@@ -23,7 +24,8 @@ nouveau-y := nouveau_drv.o nouveau_state.o nouveau_channel.o nouveau_mem.o \
nv04_dac.o nv04_dfp.o nv04_tv.o nv17_tv.o nv17_tv_modes.o \
nv04_crtc.o nv04_display.o nv04_cursor.o nv04_fbcon.o \
nv10_gpio.o nv50_gpio.o \
- nv50_calc.o
+ nv50_calc.o \
+ nv04_pm.o nv50_pm.o nva3_pm.o
nouveau-$(CONFIG_DRM_NOUVEAU_DEBUG) += nouveau_debugfs.o
nouveau-$(CONFIG_COMPAT) += nouveau_ioc32.o
diff --git a/drivers/gpu/drm/nouveau/nouveau_acpi.c b/drivers/gpu/drm/nouveau/nouveau_acpi.c
index c17a055ee3e5..119152606e4c 100644
--- a/drivers/gpu/drm/nouveau/nouveau_acpi.c
+++ b/drivers/gpu/drm/nouveau/nouveau_acpi.c
@@ -292,6 +292,6 @@ nouveau_acpi_edid(struct drm_device *dev, struct drm_connector *connector)
if (ret < 0)
return ret;
- nv_connector->edid = edid;
+ nv_connector->edid = kmemdup(edid, EDID_LENGTH, GFP_KERNEL);
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_backlight.c b/drivers/gpu/drm/nouveau/nouveau_backlight.c
index 406228f4a2a0..b14c81110575 100644
--- a/drivers/gpu/drm/nouveau/nouveau_backlight.c
+++ b/drivers/gpu/drm/nouveau/nouveau_backlight.c
@@ -31,6 +31,7 @@
*/
#include <linux/backlight.h>
+#include <linux/acpi.h>
#include "drmP.h"
#include "nouveau_drv.h"
@@ -136,6 +137,14 @@ int nouveau_backlight_init(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
+#ifdef CONFIG_ACPI
+ if (acpi_video_backlight_support()) {
+ NV_INFO(dev, "ACPI backlight interface available, "
+ "not registering our own\n");
+ return 0;
+ }
+#endif
+
switch (dev_priv->card_type) {
case NV_40:
return nouveau_nv40_backlight_init(dev);
diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.c b/drivers/gpu/drm/nouveau/nouveau_bios.c
index 974b0f8ae048..b2293576f278 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bios.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bios.c
@@ -43,9 +43,6 @@
#define BIOSLOG(sip, fmt, arg...) NV_DEBUG(sip->dev, fmt, ##arg)
#define LOG_OLD_VALUE(x)
-#define ROM16(x) le16_to_cpu(*(uint16_t *)&(x))
-#define ROM32(x) le32_to_cpu(*(uint32_t *)&(x))
-
struct init_exec {
bool execute;
bool repeat;
@@ -272,12 +269,6 @@ struct init_tbl_entry {
int (*handler)(struct nvbios *, uint16_t, struct init_exec *);
};
-struct bit_entry {
- uint8_t id[2];
- uint16_t length;
- uint16_t offset;
-};
-
static int parse_init_table(struct nvbios *, unsigned int, struct init_exec *);
#define MACRO_INDEX_SIZE 2
@@ -1231,7 +1222,7 @@ init_dp_condition(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
return 3;
}
- if (cond & 1)
+ if (!(cond & 1))
iexec->execute = false;
}
break;
@@ -2167,11 +2158,11 @@ peek_fb(struct drm_device *dev, struct io_mapping *fb,
if (off < pci_resource_len(dev->pdev, 1)) {
uint8_t __iomem *p =
- io_mapping_map_atomic_wc(fb, off & PAGE_MASK, KM_USER0);
+ io_mapping_map_atomic_wc(fb, off & PAGE_MASK);
val = ioread32(p + (off & ~PAGE_MASK));
- io_mapping_unmap_atomic(p, KM_USER0);
+ io_mapping_unmap_atomic(p);
}
return val;
@@ -2183,12 +2174,12 @@ poke_fb(struct drm_device *dev, struct io_mapping *fb,
{
if (off < pci_resource_len(dev->pdev, 1)) {
uint8_t __iomem *p =
- io_mapping_map_atomic_wc(fb, off & PAGE_MASK, KM_USER0);
+ io_mapping_map_atomic_wc(fb, off & PAGE_MASK);
iowrite32(val, p + (off & ~PAGE_MASK));
wmb();
- io_mapping_unmap_atomic(p, KM_USER0);
+ io_mapping_unmap_atomic(p);
}
}
@@ -4675,6 +4666,92 @@ int run_tmds_table(struct drm_device *dev, struct dcb_entry *dcbent, int head, i
return 0;
}
+struct pll_mapping {
+ u8 type;
+ u32 reg;
+};
+
+static struct pll_mapping nv04_pll_mapping[] = {
+ { PLL_CORE , NV_PRAMDAC_NVPLL_COEFF },
+ { PLL_MEMORY, NV_PRAMDAC_MPLL_COEFF },
+ { PLL_VPLL0 , NV_PRAMDAC_VPLL_COEFF },
+ { PLL_VPLL1 , NV_RAMDAC_VPLL2 },
+ {}
+};
+
+static struct pll_mapping nv40_pll_mapping[] = {
+ { PLL_CORE , 0x004000 },
+ { PLL_MEMORY, 0x004020 },
+ { PLL_VPLL0 , NV_PRAMDAC_VPLL_COEFF },
+ { PLL_VPLL1 , NV_RAMDAC_VPLL2 },
+ {}
+};
+
+static struct pll_mapping nv50_pll_mapping[] = {
+ { PLL_CORE , 0x004028 },
+ { PLL_SHADER, 0x004020 },
+ { PLL_UNK03 , 0x004000 },
+ { PLL_MEMORY, 0x004008 },
+ { PLL_UNK40 , 0x00e810 },
+ { PLL_UNK41 , 0x00e818 },
+ { PLL_UNK42 , 0x00e824 },
+ { PLL_VPLL0 , 0x614100 },
+ { PLL_VPLL1 , 0x614900 },
+ {}
+};
+
+static struct pll_mapping nv84_pll_mapping[] = {
+ { PLL_CORE , 0x004028 },
+ { PLL_SHADER, 0x004020 },
+ { PLL_MEMORY, 0x004008 },
+ { PLL_UNK05 , 0x004030 },
+ { PLL_UNK41 , 0x00e818 },
+ { PLL_VPLL0 , 0x614100 },
+ { PLL_VPLL1 , 0x614900 },
+ {}
+};
+
+u32
+get_pll_register(struct drm_device *dev, enum pll_types type)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nvbios *bios = &dev_priv->vbios;
+ struct pll_mapping *map;
+ int i;
+
+ if (dev_priv->card_type < NV_40)
+ map = nv04_pll_mapping;
+ else
+ if (dev_priv->card_type < NV_50)
+ map = nv40_pll_mapping;
+ else {
+ u8 *plim = &bios->data[bios->pll_limit_tbl_ptr];
+
+ if (plim[0] >= 0x30) {
+ u8 *entry = plim + plim[1];
+ for (i = 0; i < plim[3]; i++, entry += plim[2]) {
+ if (entry[0] == type)
+ return ROM32(entry[3]);
+ }
+
+ return 0;
+ }
+
+ if (dev_priv->chipset == 0x50)
+ map = nv50_pll_mapping;
+ else
+ map = nv84_pll_mapping;
+ }
+
+ while (map->reg) {
+ if (map->type == type)
+ return map->reg;
+ map++;
+ }
+
+ return 0;
+}
+
int get_pll_limits(struct drm_device *dev, uint32_t limit_match, struct pll_lims *pll_lim)
{
/*
@@ -4750,6 +4827,17 @@ int get_pll_limits(struct drm_device *dev, uint32_t limit_match, struct pll_lims
/* initialize all members to zero */
memset(pll_lim, 0, sizeof(struct pll_lims));
+ /* if we were passed a type rather than a register, figure
+ * out the register and store it
+ */
+ if (limit_match > PLL_MAX)
+ pll_lim->reg = limit_match;
+ else {
+ pll_lim->reg = get_pll_register(dev, limit_match);
+ if (!pll_lim->reg)
+ return -ENOENT;
+ }
+
if (pll_lim_ver == 0x10 || pll_lim_ver == 0x11) {
uint8_t *pll_rec = &bios->data[bios->pll_limit_tbl_ptr + headerlen + recordlen * pllindex];
@@ -4785,7 +4873,6 @@ int get_pll_limits(struct drm_device *dev, uint32_t limit_match, struct pll_lims
pll_lim->max_usable_log2p = 0x6;
} else if (pll_lim_ver == 0x20 || pll_lim_ver == 0x21) {
uint16_t plloffs = bios->pll_limit_tbl_ptr + headerlen;
- uint32_t reg = 0; /* default match */
uint8_t *pll_rec;
int i;
@@ -4797,37 +4884,22 @@ int get_pll_limits(struct drm_device *dev, uint32_t limit_match, struct pll_lims
NV_WARN(dev, "Default PLL limit entry has non-zero "
"register field\n");
- if (limit_match > MAX_PLL_TYPES)
- /* we've been passed a reg as the match */
- reg = limit_match;
- else /* limit match is a pll type */
- for (i = 1; i < entries && !reg; i++) {
- uint32_t cmpreg = ROM32(bios->data[plloffs + recordlen * i]);
-
- if (limit_match == NVPLL &&
- (cmpreg == NV_PRAMDAC_NVPLL_COEFF || cmpreg == 0x4000))
- reg = cmpreg;
- if (limit_match == MPLL &&
- (cmpreg == NV_PRAMDAC_MPLL_COEFF || cmpreg == 0x4020))
- reg = cmpreg;
- if (limit_match == VPLL1 &&
- (cmpreg == NV_PRAMDAC_VPLL_COEFF || cmpreg == 0x4010))
- reg = cmpreg;
- if (limit_match == VPLL2 &&
- (cmpreg == NV_RAMDAC_VPLL2 || cmpreg == 0x4018))
- reg = cmpreg;
- }
-
for (i = 1; i < entries; i++)
- if (ROM32(bios->data[plloffs + recordlen * i]) == reg) {
+ if (ROM32(bios->data[plloffs + recordlen * i]) == pll_lim->reg) {
pllindex = i;
break;
}
+ if ((dev_priv->card_type >= NV_50) && (pllindex == 0)) {
+ NV_ERROR(dev, "Register 0x%08x not found in PLL "
+ "limits table", pll_lim->reg);
+ return -ENOENT;
+ }
+
pll_rec = &bios->data[plloffs + recordlen * pllindex];
BIOSLOG(bios, "Loading PLL limits for reg 0x%08x\n",
- pllindex ? reg : 0);
+ pllindex ? pll_lim->reg : 0);
/*
* Frequencies are stored in tables in MHz, kHz are more
@@ -4877,8 +4949,8 @@ int get_pll_limits(struct drm_device *dev, uint32_t limit_match, struct pll_lims
if (cv == 0x51 && !pll_lim->refclk) {
uint32_t sel_clk = bios_rd32(bios, NV_PRAMDAC_SEL_CLK);
- if (((limit_match == NV_PRAMDAC_VPLL_COEFF || limit_match == VPLL1) && sel_clk & 0x20) ||
- ((limit_match == NV_RAMDAC_VPLL2 || limit_match == VPLL2) && sel_clk & 0x80)) {
+ if ((pll_lim->reg == NV_PRAMDAC_VPLL_COEFF && sel_clk & 0x20) ||
+ (pll_lim->reg == NV_RAMDAC_VPLL2 && sel_clk & 0x80)) {
if (bios_idxprt_rd(bios, NV_CIO_CRX__COLOR, NV_CIO_CRE_CHIP_ID_INDEX) < 0xa3)
pll_lim->refclk = 200000;
else
@@ -4891,10 +4963,10 @@ int get_pll_limits(struct drm_device *dev, uint32_t limit_match, struct pll_lims
int i;
BIOSLOG(bios, "Loading PLL limits for register 0x%08x\n",
- limit_match);
+ pll_lim->reg);
for (i = 0; i < entries; i++, entry += recordlen) {
- if (ROM32(entry[3]) == limit_match) {
+ if (ROM32(entry[3]) == pll_lim->reg) {
record = &bios->data[ROM16(entry[1])];
break;
}
@@ -4902,7 +4974,7 @@ int get_pll_limits(struct drm_device *dev, uint32_t limit_match, struct pll_lims
if (!record) {
NV_ERROR(dev, "Register 0x%08x not found in PLL "
- "limits table", limit_match);
+ "limits table", pll_lim->reg);
return -ENOENT;
}
@@ -4931,10 +5003,10 @@ int get_pll_limits(struct drm_device *dev, uint32_t limit_match, struct pll_lims
int i;
BIOSLOG(bios, "Loading PLL limits for register 0x%08x\n",
- limit_match);
+ pll_lim->reg);
for (i = 0; i < entries; i++, entry += recordlen) {
- if (ROM32(entry[3]) == limit_match) {
+ if (ROM32(entry[3]) == pll_lim->reg) {
record = &bios->data[ROM16(entry[1])];
break;
}
@@ -4942,7 +5014,7 @@ int get_pll_limits(struct drm_device *dev, uint32_t limit_match, struct pll_lims
if (!record) {
NV_ERROR(dev, "Register 0x%08x not found in PLL "
- "limits table", limit_match);
+ "limits table", pll_lim->reg);
return -ENOENT;
}
@@ -5293,7 +5365,7 @@ parse_bit_M_tbl_entry(struct drm_device *dev, struct nvbios *bios,
if (bitentry->length < 0x5)
return 0;
- if (bitentry->id[1] < 2) {
+ if (bitentry->version < 2) {
bios->ram_restrict_group_count = bios->data[bitentry->offset + 2];
bios->ram_restrict_tbl_ptr = ROM16(bios->data[bitentry->offset + 3]);
} else {
@@ -5403,27 +5475,40 @@ struct bit_table {
#define BIT_TABLE(id, funcid) ((struct bit_table){ id, parse_bit_##funcid##_tbl_entry })
+int
+bit_table(struct drm_device *dev, u8 id, struct bit_entry *bit)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nvbios *bios = &dev_priv->vbios;
+ u8 entries, *entry;
+
+ entries = bios->data[bios->offset + 10];
+ entry = &bios->data[bios->offset + 12];
+ while (entries--) {
+ if (entry[0] == id) {
+ bit->id = entry[0];
+ bit->version = entry[1];
+ bit->length = ROM16(entry[2]);
+ bit->offset = ROM16(entry[4]);
+ bit->data = ROMPTR(bios, entry[4]);
+ return 0;
+ }
+
+ entry += bios->data[bios->offset + 9];
+ }
+
+ return -ENOENT;
+}
+
static int
parse_bit_table(struct nvbios *bios, const uint16_t bitoffset,
struct bit_table *table)
{
struct drm_device *dev = bios->dev;
- uint8_t maxentries = bios->data[bitoffset + 4];
- int i, offset;
struct bit_entry bitentry;
- for (i = 0, offset = bitoffset + 6; i < maxentries; i++, offset += 6) {
- bitentry.id[0] = bios->data[offset];
-
- if (bitentry.id[0] != table->id)
- continue;
-
- bitentry.id[1] = bios->data[offset + 1];
- bitentry.length = ROM16(bios->data[offset + 2]);
- bitentry.offset = ROM16(bios->data[offset + 4]);
-
+ if (bit_table(dev, table->id, &bitentry) == 0)
return table->parse_fn(dev, bios, &bitentry);
- }
NV_INFO(dev, "BIT table '%c' not found\n", table->id);
return -ENOSYS;
@@ -5683,8 +5768,14 @@ static uint16_t findstr(uint8_t *data, int n, const uint8_t *str, int len)
static struct dcb_gpio_entry *
new_gpio_entry(struct nvbios *bios)
{
+ struct drm_device *dev = bios->dev;
struct dcb_gpio_table *gpio = &bios->dcb.gpio;
+ if (gpio->entries >= DCB_MAX_NUM_GPIO_ENTRIES) {
+ NV_ERROR(dev, "exceeded maximum number of gpio entries!!\n");
+ return NULL;
+ }
+
return &gpio->entry[gpio->entries++];
}
@@ -5706,113 +5797,90 @@ nouveau_bios_gpio_entry(struct drm_device *dev, enum dcb_gpio_tag tag)
}
static void
-parse_dcb30_gpio_entry(struct nvbios *bios, uint16_t offset)
-{
- struct dcb_gpio_entry *gpio;
- uint16_t ent = ROM16(bios->data[offset]);
- uint8_t line = ent & 0x1f,
- tag = ent >> 5 & 0x3f,
- flags = ent >> 11 & 0x1f;
-
- if (tag == 0x3f)
- return;
-
- gpio = new_gpio_entry(bios);
-
- gpio->tag = tag;
- gpio->line = line;
- gpio->invert = flags != 4;
- gpio->entry = ent;
-}
-
-static void
-parse_dcb40_gpio_entry(struct nvbios *bios, uint16_t offset)
-{
- uint32_t entry = ROM32(bios->data[offset]);
- struct dcb_gpio_entry *gpio;
-
- if ((entry & 0x0000ff00) == 0x0000ff00)
- return;
-
- gpio = new_gpio_entry(bios);
- gpio->tag = (entry & 0x0000ff00) >> 8;
- gpio->line = (entry & 0x0000001f) >> 0;
- gpio->state_default = (entry & 0x01000000) >> 24;
- gpio->state[0] = (entry & 0x18000000) >> 27;
- gpio->state[1] = (entry & 0x60000000) >> 29;
- gpio->entry = entry;
-}
-
-static void
parse_dcb_gpio_table(struct nvbios *bios)
{
struct drm_device *dev = bios->dev;
- uint16_t gpio_table_ptr = bios->dcb.gpio_table_ptr;
- uint8_t *gpio_table = &bios->data[gpio_table_ptr];
- int header_len = gpio_table[1],
- entries = gpio_table[2],
- entry_len = gpio_table[3];
- void (*parse_entry)(struct nvbios *, uint16_t) = NULL;
+ struct dcb_gpio_entry *e;
+ u8 headerlen, entries, recordlen;
+ u8 *dcb, *gpio = NULL, *entry;
int i;
- if (bios->dcb.version >= 0x40) {
- if (gpio_table_ptr && entry_len != 4) {
- NV_WARN(dev, "Invalid DCB GPIO table entry length.\n");
- return;
- }
+ dcb = ROMPTR(bios, bios->data[0x36]);
+ if (dcb[0] >= 0x30) {
+ gpio = ROMPTR(bios, dcb[10]);
+ if (!gpio)
+ goto no_table;
- parse_entry = parse_dcb40_gpio_entry;
+ headerlen = gpio[1];
+ entries = gpio[2];
+ recordlen = gpio[3];
+ } else
+ if (dcb[0] >= 0x22 && dcb[-1] >= 0x13) {
+ gpio = ROMPTR(bios, dcb[-15]);
+ if (!gpio)
+ goto no_table;
+
+ headerlen = 3;
+ entries = gpio[2];
+ recordlen = gpio[1];
+ } else
+ if (dcb[0] >= 0x22) {
+ /* No GPIO table present, parse the TVDAC GPIO data. */
+ uint8_t *tvdac_gpio = &dcb[-5];
- } else if (bios->dcb.version >= 0x30) {
- if (gpio_table_ptr && entry_len != 2) {
- NV_WARN(dev, "Invalid DCB GPIO table entry length.\n");
- return;
+ if (tvdac_gpio[0] & 1) {
+ e = new_gpio_entry(bios);
+ e->tag = DCB_GPIO_TVDAC0;
+ e->line = tvdac_gpio[1] >> 4;
+ e->invert = tvdac_gpio[0] & 2;
}
- parse_entry = parse_dcb30_gpio_entry;
-
- } else if (bios->dcb.version >= 0x22) {
- /*
- * DCBs older than v3.0 don't really have a GPIO
- * table, instead they keep some GPIO info at fixed
- * locations.
- */
- uint16_t dcbptr = ROM16(bios->data[0x36]);
- uint8_t *tvdac_gpio = &bios->data[dcbptr - 5];
+ goto no_table;
+ } else {
+ NV_DEBUG(dev, "no/unknown gpio table on DCB 0x%02x\n", dcb[0]);
+ goto no_table;
+ }
- if (tvdac_gpio[0] & 1) {
- struct dcb_gpio_entry *gpio = new_gpio_entry(bios);
+ entry = gpio + headerlen;
+ for (i = 0; i < entries; i++, entry += recordlen) {
+ e = new_gpio_entry(bios);
+ if (!e)
+ break;
- gpio->tag = DCB_GPIO_TVDAC0;
- gpio->line = tvdac_gpio[1] >> 4;
- gpio->invert = tvdac_gpio[0] & 2;
- }
- } else {
- /*
- * No systematic way to store GPIO info on pre-v2.2
- * DCBs, try to match the PCI device IDs.
- */
+ if (gpio[0] < 0x40) {
+ e->entry = ROM16(entry[0]);
+ e->tag = (e->entry & 0x07e0) >> 5;
+ if (e->tag == 0x3f) {
+ bios->dcb.gpio.entries--;
+ continue;
+ }
- /* Apple iMac G4 NV18 */
- if (nv_match_device(dev, 0x0189, 0x10de, 0x0010)) {
- struct dcb_gpio_entry *gpio = new_gpio_entry(bios);
+ e->line = (e->entry & 0x001f);
+ e->invert = ((e->entry & 0xf800) >> 11) != 4;
+ } else {
+ e->entry = ROM32(entry[0]);
+ e->tag = (e->entry & 0x0000ff00) >> 8;
+ if (e->tag == 0xff) {
+ bios->dcb.gpio.entries--;
+ continue;
+ }
- gpio->tag = DCB_GPIO_TVDAC0;
- gpio->line = 4;
+ e->line = (e->entry & 0x0000001f) >> 0;
+ e->state_default = (e->entry & 0x01000000) >> 24;
+ e->state[0] = (e->entry & 0x18000000) >> 27;
+ e->state[1] = (e->entry & 0x60000000) >> 29;
}
-
}
- if (!gpio_table_ptr)
- return;
-
- if (entries > DCB_MAX_NUM_GPIO_ENTRIES) {
- NV_WARN(dev, "Too many entries in the DCB GPIO table.\n");
- entries = DCB_MAX_NUM_GPIO_ENTRIES;
+no_table:
+ /* Apple iMac G4 NV18 */
+ if (nv_match_device(dev, 0x0189, 0x10de, 0x0010)) {
+ e = new_gpio_entry(bios);
+ if (e) {
+ e->tag = DCB_GPIO_TVDAC0;
+ e->line = 4;
+ }
}
-
- for (i = 0; i < entries; i++)
- parse_entry(bios, gpio_table_ptr + header_len + entry_len * i);
}
struct dcb_connector_table_entry *
@@ -6680,6 +6748,8 @@ static int nouveau_parse_vbios_struct(struct drm_device *dev)
bit_signature, sizeof(bit_signature));
if (offset) {
NV_TRACE(dev, "BIT BIOS found\n");
+ bios->type = NVBIOS_BIT;
+ bios->offset = offset;
return parse_bit_structure(bios, offset + 6);
}
@@ -6687,6 +6757,8 @@ static int nouveau_parse_vbios_struct(struct drm_device *dev)
bmp_signature, sizeof(bmp_signature));
if (offset) {
NV_TRACE(dev, "BMP BIOS found\n");
+ bios->type = NVBIOS_BMP;
+ bios->offset = offset;
return parse_bmp_structure(dev, bios, offset);
}
@@ -6757,7 +6829,7 @@ nouveau_bios_posted(struct drm_device *dev)
struct drm_nouveau_private *dev_priv = dev->dev_private;
unsigned htotal;
- if (dev_priv->chipset >= NV_50) {
+ if (dev_priv->card_type >= NV_50) {
if (NVReadVgaCrtc(dev, 0, 0x00) == 0 &&
NVReadVgaCrtc(dev, 0, 0x1a) == 0)
return false;
@@ -6806,6 +6878,8 @@ nouveau_bios_init(struct drm_device *dev)
"running VBIOS init tables.\n");
bios->execute = true;
}
+ if (nouveau_force_post)
+ bios->execute = true;
ret = nouveau_run_vbios_init(dev);
if (ret)
diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.h b/drivers/gpu/drm/nouveau/nouveau_bios.h
index c1de2f3fcb0e..50a648e01c49 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bios.h
+++ b/drivers/gpu/drm/nouveau/nouveau_bios.h
@@ -34,6 +34,20 @@
#define DCB_LOC_ON_CHIP 0
+#define ROM16(x) le16_to_cpu(*(uint16_t *)&(x))
+#define ROM32(x) le32_to_cpu(*(uint32_t *)&(x))
+#define ROMPTR(bios, x) (ROM16(x) ? &(bios)->data[ROM16(x)] : NULL)
+
+struct bit_entry {
+ uint8_t id;
+ uint8_t version;
+ uint16_t length;
+ uint16_t offset;
+ uint8_t *data;
+};
+
+int bit_table(struct drm_device *, u8 id, struct bit_entry *);
+
struct dcb_i2c_entry {
uint32_t entry;
uint8_t port_type;
@@ -170,16 +184,28 @@ enum LVDS_script {
LVDS_PANEL_OFF
};
-/* changing these requires matching changes to reg tables in nv_get_clock */
-#define MAX_PLL_TYPES 4
+/* these match types in pll limits table version 0x40,
+ * nouveau uses them on all chipsets internally where a
+ * specific pll needs to be referenced, but the exact
+ * register isn't known.
+ */
enum pll_types {
- NVPLL,
- MPLL,
- VPLL1,
- VPLL2
+ PLL_CORE = 0x01,
+ PLL_SHADER = 0x02,
+ PLL_UNK03 = 0x03,
+ PLL_MEMORY = 0x04,
+ PLL_UNK05 = 0x05,
+ PLL_UNK40 = 0x40,
+ PLL_UNK41 = 0x41,
+ PLL_UNK42 = 0x42,
+ PLL_VPLL0 = 0x80,
+ PLL_VPLL1 = 0x81,
+ PLL_MAX = 0xff
};
struct pll_lims {
+ u32 reg;
+
struct {
int minfreq;
int maxfreq;
@@ -212,6 +238,11 @@ struct pll_lims {
struct nvbios {
struct drm_device *dev;
+ enum {
+ NVBIOS_BMP,
+ NVBIOS_BIT
+ } type;
+ uint16_t offset;
uint8_t chip_version;
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c
index f6f44779d82f..c41e1c200ef5 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.c
@@ -36,21 +36,6 @@
#include <linux/log2.h>
#include <linux/slab.h>
-int
-nouveau_bo_sync_gpu(struct nouveau_bo *nvbo, struct nouveau_channel *chan)
-{
- struct nouveau_fence *prev_fence = nvbo->bo.sync_obj;
- int ret;
-
- if (!prev_fence || nouveau_fence_channel(prev_fence) == chan)
- return 0;
-
- spin_lock(&nvbo->bo.lock);
- ret = ttm_bo_wait(&nvbo->bo, false, false, false);
- spin_unlock(&nvbo->bo.lock);
- return ret;
-}
-
static void
nouveau_bo_del_ttm(struct ttm_buffer_object *bo)
{
@@ -58,8 +43,6 @@ nouveau_bo_del_ttm(struct ttm_buffer_object *bo)
struct drm_device *dev = dev_priv->dev;
struct nouveau_bo *nvbo = nouveau_bo(bo);
- ttm_bo_kunmap(&nvbo->kmap);
-
if (unlikely(nvbo->gem))
DRM_ERROR("bo %p still attached to GEM object\n", bo);
@@ -160,12 +143,12 @@ nouveau_bo_new(struct drm_device *dev, struct nouveau_channel *chan,
nvbo->no_vm = no_vm;
nvbo->tile_mode = tile_mode;
nvbo->tile_flags = tile_flags;
+ nvbo->bo.bdev = &dev_priv->ttm.bdev;
- nouveau_bo_fixup_align(dev, tile_mode, tile_flags, &align, &size);
+ nouveau_bo_fixup_align(dev, tile_mode, nouveau_bo_tile_layout(nvbo),
+ &align, &size);
align >>= PAGE_SHIFT;
- nvbo->placement.fpfn = 0;
- nvbo->placement.lpfn = mappable ? dev_priv->fb_mappable_pages : 0;
nouveau_bo_placement_set(nvbo, flags, 0);
nvbo->channel = chan;
@@ -195,6 +178,31 @@ set_placement_list(uint32_t *pl, unsigned *n, uint32_t type, uint32_t flags)
pl[(*n)++] = TTM_PL_FLAG_SYSTEM | flags;
}
+static void
+set_placement_range(struct nouveau_bo *nvbo, uint32_t type)
+{
+ struct drm_nouveau_private *dev_priv = nouveau_bdev(nvbo->bo.bdev);
+
+ if (dev_priv->card_type == NV_10 &&
+ nvbo->tile_mode && (type & TTM_PL_FLAG_VRAM)) {
+ /*
+ * Make sure that the color and depth buffers are handled
+ * by independent memory controller units. Up to a 9x
+ * speed up when alpha-blending and depth-test are enabled
+ * at the same time.
+ */
+ int vram_pages = dev_priv->vram_size >> PAGE_SHIFT;
+
+ if (nvbo->tile_flags & NOUVEAU_GEM_TILE_ZETA) {
+ nvbo->placement.fpfn = vram_pages / 2;
+ nvbo->placement.lpfn = ~0;
+ } else {
+ nvbo->placement.fpfn = 0;
+ nvbo->placement.lpfn = vram_pages / 2;
+ }
+ }
+}
+
void
nouveau_bo_placement_set(struct nouveau_bo *nvbo, uint32_t type, uint32_t busy)
{
@@ -209,6 +217,8 @@ nouveau_bo_placement_set(struct nouveau_bo *nvbo, uint32_t type, uint32_t busy)
pl->busy_placement = nvbo->busy_placements;
set_placement_list(nvbo->busy_placements, &pl->num_busy_placement,
type | busy, flags);
+
+ set_placement_range(nvbo, type);
}
int
@@ -305,7 +315,8 @@ nouveau_bo_map(struct nouveau_bo *nvbo)
void
nouveau_bo_unmap(struct nouveau_bo *nvbo)
{
- ttm_bo_kunmap(&nvbo->kmap);
+ if (nvbo)
+ ttm_bo_kunmap(&nvbo->kmap);
}
u16
@@ -399,14 +410,19 @@ nouveau_bo_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
man->default_caching = TTM_PL_FLAG_CACHED;
break;
case TTM_PL_VRAM:
+ man->func = &ttm_bo_manager_func;
man->flags = TTM_MEMTYPE_FLAG_FIXED |
TTM_MEMTYPE_FLAG_MAPPABLE;
man->available_caching = TTM_PL_FLAG_UNCACHED |
TTM_PL_FLAG_WC;
man->default_caching = TTM_PL_FLAG_WC;
- man->gpu_offset = dev_priv->vm_vram_base;
+ if (dev_priv->card_type == NV_50)
+ man->gpu_offset = 0x40000000;
+ else
+ man->gpu_offset = 0;
break;
case TTM_PL_TT:
+ man->func = &ttm_bo_manager_func;
switch (dev_priv->gart_info.type) {
case NOUVEAU_GART_AGP:
man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
@@ -469,19 +485,26 @@ nouveau_bo_move_accel_cleanup(struct nouveau_channel *chan,
if (ret)
return ret;
- ret = ttm_bo_move_accel_cleanup(&nvbo->bo, fence, NULL,
- evict || (nvbo->channel &&
- nvbo->channel != chan),
+ if (nvbo->channel) {
+ ret = nouveau_fence_sync(fence, nvbo->channel);
+ if (ret)
+ goto out;
+ }
+
+ ret = ttm_bo_move_accel_cleanup(&nvbo->bo, fence, NULL, evict,
no_wait_reserve, no_wait_gpu, new_mem);
+out:
nouveau_fence_unref((void *)&fence);
return ret;
}
static inline uint32_t
-nouveau_bo_mem_ctxdma(struct nouveau_bo *nvbo, struct nouveau_channel *chan,
- struct ttm_mem_reg *mem)
+nouveau_bo_mem_ctxdma(struct ttm_buffer_object *bo,
+ struct nouveau_channel *chan, struct ttm_mem_reg *mem)
{
- if (chan == nouveau_bdev(nvbo->bo.bdev)->channel) {
+ struct nouveau_bo *nvbo = nouveau_bo(bo);
+
+ if (nvbo->no_vm) {
if (mem->mem_type == TTM_PL_TT)
return NvDmaGART;
return NvDmaVRAM;
@@ -493,86 +516,183 @@ nouveau_bo_mem_ctxdma(struct nouveau_bo *nvbo, struct nouveau_channel *chan,
}
static int
-nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, bool intr,
- bool no_wait_reserve, bool no_wait_gpu,
- struct ttm_mem_reg *new_mem)
+nv50_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
+ struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
{
- struct nouveau_bo *nvbo = nouveau_bo(bo);
struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
- struct ttm_mem_reg *old_mem = &bo->mem;
- struct nouveau_channel *chan;
- uint64_t src_offset, dst_offset;
- uint32_t page_count;
+ struct nouveau_bo *nvbo = nouveau_bo(bo);
+ u64 length = (new_mem->num_pages << PAGE_SHIFT);
+ u64 src_offset, dst_offset;
int ret;
- chan = nvbo->channel;
- if (!chan || nvbo->tile_flags || nvbo->no_vm)
- chan = dev_priv->channel;
-
- src_offset = old_mem->mm_node->start << PAGE_SHIFT;
- dst_offset = new_mem->mm_node->start << PAGE_SHIFT;
- if (chan != dev_priv->channel) {
- if (old_mem->mem_type == TTM_PL_TT)
- src_offset += dev_priv->vm_gart_base;
- else
+ src_offset = old_mem->start << PAGE_SHIFT;
+ dst_offset = new_mem->start << PAGE_SHIFT;
+ if (!nvbo->no_vm) {
+ if (old_mem->mem_type == TTM_PL_VRAM)
src_offset += dev_priv->vm_vram_base;
-
- if (new_mem->mem_type == TTM_PL_TT)
- dst_offset += dev_priv->vm_gart_base;
else
+ src_offset += dev_priv->vm_gart_base;
+
+ if (new_mem->mem_type == TTM_PL_VRAM)
dst_offset += dev_priv->vm_vram_base;
+ else
+ dst_offset += dev_priv->vm_gart_base;
}
ret = RING_SPACE(chan, 3);
if (ret)
return ret;
- BEGIN_RING(chan, NvSubM2MF, NV_MEMORY_TO_MEMORY_FORMAT_DMA_SOURCE, 2);
- OUT_RING(chan, nouveau_bo_mem_ctxdma(nvbo, chan, old_mem));
- OUT_RING(chan, nouveau_bo_mem_ctxdma(nvbo, chan, new_mem));
- if (dev_priv->card_type >= NV_50) {
- ret = RING_SPACE(chan, 4);
+ BEGIN_RING(chan, NvSubM2MF, 0x0184, 2);
+ OUT_RING (chan, nouveau_bo_mem_ctxdma(bo, chan, old_mem));
+ OUT_RING (chan, nouveau_bo_mem_ctxdma(bo, chan, new_mem));
+
+ while (length) {
+ u32 amount, stride, height;
+
+ amount = min(length, (u64)(4 * 1024 * 1024));
+ stride = 16 * 4;
+ height = amount / stride;
+
+ if (new_mem->mem_type == TTM_PL_VRAM &&
+ nouveau_bo_tile_layout(nvbo)) {
+ ret = RING_SPACE(chan, 8);
+ if (ret)
+ return ret;
+
+ BEGIN_RING(chan, NvSubM2MF, 0x0200, 7);
+ OUT_RING (chan, 0);
+ OUT_RING (chan, 0);
+ OUT_RING (chan, stride);
+ OUT_RING (chan, height);
+ OUT_RING (chan, 1);
+ OUT_RING (chan, 0);
+ OUT_RING (chan, 0);
+ } else {
+ ret = RING_SPACE(chan, 2);
+ if (ret)
+ return ret;
+
+ BEGIN_RING(chan, NvSubM2MF, 0x0200, 1);
+ OUT_RING (chan, 1);
+ }
+ if (old_mem->mem_type == TTM_PL_VRAM &&
+ nouveau_bo_tile_layout(nvbo)) {
+ ret = RING_SPACE(chan, 8);
+ if (ret)
+ return ret;
+
+ BEGIN_RING(chan, NvSubM2MF, 0x021c, 7);
+ OUT_RING (chan, 0);
+ OUT_RING (chan, 0);
+ OUT_RING (chan, stride);
+ OUT_RING (chan, height);
+ OUT_RING (chan, 1);
+ OUT_RING (chan, 0);
+ OUT_RING (chan, 0);
+ } else {
+ ret = RING_SPACE(chan, 2);
+ if (ret)
+ return ret;
+
+ BEGIN_RING(chan, NvSubM2MF, 0x021c, 1);
+ OUT_RING (chan, 1);
+ }
+
+ ret = RING_SPACE(chan, 14);
if (ret)
return ret;
- BEGIN_RING(chan, NvSubM2MF, 0x0200, 1);
- OUT_RING(chan, 1);
- BEGIN_RING(chan, NvSubM2MF, 0x021c, 1);
- OUT_RING(chan, 1);
+
+ BEGIN_RING(chan, NvSubM2MF, 0x0238, 2);
+ OUT_RING (chan, upper_32_bits(src_offset));
+ OUT_RING (chan, upper_32_bits(dst_offset));
+ BEGIN_RING(chan, NvSubM2MF, 0x030c, 8);
+ OUT_RING (chan, lower_32_bits(src_offset));
+ OUT_RING (chan, lower_32_bits(dst_offset));
+ OUT_RING (chan, stride);
+ OUT_RING (chan, stride);
+ OUT_RING (chan, stride);
+ OUT_RING (chan, height);
+ OUT_RING (chan, 0x00000101);
+ OUT_RING (chan, 0x00000000);
+ BEGIN_RING(chan, NvSubM2MF, NV_MEMORY_TO_MEMORY_FORMAT_NOP, 1);
+ OUT_RING (chan, 0);
+
+ length -= amount;
+ src_offset += amount;
+ dst_offset += amount;
}
+ return 0;
+}
+
+static int
+nv04_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
+ struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
+{
+ u32 src_offset = old_mem->start << PAGE_SHIFT;
+ u32 dst_offset = new_mem->start << PAGE_SHIFT;
+ u32 page_count = new_mem->num_pages;
+ int ret;
+
+ ret = RING_SPACE(chan, 3);
+ if (ret)
+ return ret;
+
+ BEGIN_RING(chan, NvSubM2MF, NV_MEMORY_TO_MEMORY_FORMAT_DMA_SOURCE, 2);
+ OUT_RING (chan, nouveau_bo_mem_ctxdma(bo, chan, old_mem));
+ OUT_RING (chan, nouveau_bo_mem_ctxdma(bo, chan, new_mem));
+
page_count = new_mem->num_pages;
while (page_count) {
int line_count = (page_count > 2047) ? 2047 : page_count;
- if (dev_priv->card_type >= NV_50) {
- ret = RING_SPACE(chan, 3);
- if (ret)
- return ret;
- BEGIN_RING(chan, NvSubM2MF, 0x0238, 2);
- OUT_RING(chan, upper_32_bits(src_offset));
- OUT_RING(chan, upper_32_bits(dst_offset));
- }
ret = RING_SPACE(chan, 11);
if (ret)
return ret;
+
BEGIN_RING(chan, NvSubM2MF,
NV_MEMORY_TO_MEMORY_FORMAT_OFFSET_IN, 8);
- OUT_RING(chan, lower_32_bits(src_offset));
- OUT_RING(chan, lower_32_bits(dst_offset));
- OUT_RING(chan, PAGE_SIZE); /* src_pitch */
- OUT_RING(chan, PAGE_SIZE); /* dst_pitch */
- OUT_RING(chan, PAGE_SIZE); /* line_length */
- OUT_RING(chan, line_count);
- OUT_RING(chan, (1<<8)|(1<<0));
- OUT_RING(chan, 0);
+ OUT_RING (chan, src_offset);
+ OUT_RING (chan, dst_offset);
+ OUT_RING (chan, PAGE_SIZE); /* src_pitch */
+ OUT_RING (chan, PAGE_SIZE); /* dst_pitch */
+ OUT_RING (chan, PAGE_SIZE); /* line_length */
+ OUT_RING (chan, line_count);
+ OUT_RING (chan, 0x00000101);
+ OUT_RING (chan, 0x00000000);
BEGIN_RING(chan, NvSubM2MF, NV_MEMORY_TO_MEMORY_FORMAT_NOP, 1);
- OUT_RING(chan, 0);
+ OUT_RING (chan, 0);
page_count -= line_count;
src_offset += (PAGE_SIZE * line_count);
dst_offset += (PAGE_SIZE * line_count);
}
+ return 0;
+}
+
+static int
+nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, bool intr,
+ bool no_wait_reserve, bool no_wait_gpu,
+ struct ttm_mem_reg *new_mem)
+{
+ struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
+ struct nouveau_bo *nvbo = nouveau_bo(bo);
+ struct nouveau_channel *chan;
+ int ret;
+
+ chan = nvbo->channel;
+ if (!chan || nvbo->no_vm)
+ chan = dev_priv->channel;
+
+ if (dev_priv->card_type < NV_50)
+ ret = nv04_bo_move_m2mf(chan, bo, &bo->mem, new_mem);
+ else
+ ret = nv50_bo_move_m2mf(chan, bo, &bo->mem, new_mem);
+ if (ret)
+ return ret;
+
return nouveau_bo_move_accel_cleanup(chan, nvbo, evict, no_wait_reserve, no_wait_gpu, new_mem);
}
@@ -606,12 +726,7 @@ nouveau_bo_move_flipd(struct ttm_buffer_object *bo, bool evict, bool intr,
ret = ttm_bo_move_ttm(bo, evict, no_wait_reserve, no_wait_gpu, new_mem);
out:
- if (tmp_mem.mm_node) {
- spin_lock(&bo->bdev->glob->lru_lock);
- drm_mm_put_block(tmp_mem.mm_node);
- spin_unlock(&bo->bdev->glob->lru_lock);
- }
-
+ ttm_bo_mem_put(bo, &tmp_mem);
return ret;
}
@@ -644,12 +759,7 @@ nouveau_bo_move_flips(struct ttm_buffer_object *bo, bool evict, bool intr,
goto out;
out:
- if (tmp_mem.mm_node) {
- spin_lock(&bo->bdev->glob->lru_lock);
- drm_mm_put_block(tmp_mem.mm_node);
- spin_unlock(&bo->bdev->glob->lru_lock);
- }
-
+ ttm_bo_mem_put(bo, &tmp_mem);
return ret;
}
@@ -669,12 +779,13 @@ nouveau_bo_vm_bind(struct ttm_buffer_object *bo, struct ttm_mem_reg *new_mem,
return 0;
}
- offset = new_mem->mm_node->start << PAGE_SHIFT;
+ offset = new_mem->start << PAGE_SHIFT;
if (dev_priv->card_type == NV_50) {
ret = nv50_mem_vm_bind_linear(dev,
offset + dev_priv->vm_vram_base,
- new_mem->size, nvbo->tile_flags,
+ new_mem->size,
+ nouveau_bo_tile_layout(nvbo),
offset);
if (ret)
return ret;
@@ -719,12 +830,6 @@ nouveau_bo_move(struct ttm_buffer_object *bo, bool evict, bool intr,
if (ret)
return ret;
- /* Software copy if the card isn't up and running yet. */
- if (!dev_priv->channel) {
- ret = ttm_bo_move_memcpy(bo, evict, no_wait_reserve, no_wait_gpu, new_mem);
- goto out;
- }
-
/* Fake bo copy. */
if (old_mem->mem_type == TTM_PL_SYSTEM && !bo->ttm) {
BUG_ON(bo->mem.mm_node != NULL);
@@ -733,6 +838,12 @@ nouveau_bo_move(struct ttm_buffer_object *bo, bool evict, bool intr,
goto out;
}
+ /* Software copy if the card isn't up and running yet. */
+ if (!dev_priv->channel) {
+ ret = ttm_bo_move_memcpy(bo, evict, no_wait_reserve, no_wait_gpu, new_mem);
+ goto out;
+ }
+
/* Hardware assisted copy. */
if (new_mem->mem_type == TTM_PL_SYSTEM)
ret = nouveau_bo_move_flipd(bo, evict, intr, no_wait_reserve, no_wait_gpu, new_mem);
@@ -783,14 +894,14 @@ nouveau_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
case TTM_PL_TT:
#if __OS_HAS_AGP
if (dev_priv->gart_info.type == NOUVEAU_GART_AGP) {
- mem->bus.offset = mem->mm_node->start << PAGE_SHIFT;
+ mem->bus.offset = mem->start << PAGE_SHIFT;
mem->bus.base = dev_priv->gart_info.aper_base;
mem->bus.is_iomem = true;
}
#endif
break;
case TTM_PL_VRAM:
- mem->bus.offset = mem->mm_node->start << PAGE_SHIFT;
+ mem->bus.offset = mem->start << PAGE_SHIFT;
mem->bus.base = pci_resource_start(dev->pdev, 1);
mem->bus.is_iomem = true;
break;
@@ -808,7 +919,27 @@ nouveau_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
static int
nouveau_ttm_fault_reserve_notify(struct ttm_buffer_object *bo)
{
- return 0;
+ struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
+ struct nouveau_bo *nvbo = nouveau_bo(bo);
+
+ /* as long as the bo isn't in vram, and isn't tiled, we've got
+ * nothing to do here.
+ */
+ if (bo->mem.mem_type != TTM_PL_VRAM) {
+ if (dev_priv->card_type < NV_50 ||
+ !nouveau_bo_tile_layout(nvbo))
+ return 0;
+ }
+
+ /* make sure bo is in mappable vram */
+ if (bo->mem.start + bo->mem.num_pages < dev_priv->fb_mappable_pages)
+ return 0;
+
+
+ nvbo->placement.fpfn = 0;
+ nvbo->placement.lpfn = dev_priv->fb_mappable_pages;
+ nouveau_bo_placement_set(nvbo, TTM_PL_VRAM, 0);
+ return ttm_bo_validate(bo, &nvbo->placement, false, true, false);
}
struct ttm_bo_driver nouveau_bo_driver = {
diff --git a/drivers/gpu/drm/nouveau/nouveau_calc.c b/drivers/gpu/drm/nouveau/nouveau_calc.c
index ca85da784846..dad96cce5e39 100644
--- a/drivers/gpu/drm/nouveau/nouveau_calc.c
+++ b/drivers/gpu/drm/nouveau/nouveau_calc.c
@@ -198,8 +198,8 @@ nv04_update_arb(struct drm_device *dev, int VClk, int bpp,
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nv_fifo_info fifo_data;
struct nv_sim_state sim_data;
- int MClk = nouveau_hw_get_clock(dev, MPLL);
- int NVClk = nouveau_hw_get_clock(dev, NVPLL);
+ int MClk = nouveau_hw_get_clock(dev, PLL_MEMORY);
+ int NVClk = nouveau_hw_get_clock(dev, PLL_CORE);
uint32_t cfg1 = nvReadFB(dev, NV04_PFB_CFG1);
sim_data.pclk_khz = VClk;
@@ -234,7 +234,7 @@ nv04_update_arb(struct drm_device *dev, int VClk, int bpp,
}
static void
-nv30_update_arb(int *burst, int *lwm)
+nv20_update_arb(int *burst, int *lwm)
{
unsigned int fifo_size, burst_size, graphics_lwm;
@@ -251,14 +251,14 @@ nouveau_calc_arb(struct drm_device *dev, int vclk, int bpp, int *burst, int *lwm
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
- if (dev_priv->card_type < NV_30)
+ if (dev_priv->card_type < NV_20)
nv04_update_arb(dev, vclk, bpp, burst, lwm);
else if ((dev->pci_device & 0xfff0) == 0x0240 /*CHIPSET_C51*/ ||
(dev->pci_device & 0xfff0) == 0x03d0 /*CHIPSET_C512*/) {
*burst = 128;
*lwm = 0x0480;
} else
- nv30_update_arb(burst, lwm);
+ nv20_update_arb(burst, lwm);
}
static int
diff --git a/drivers/gpu/drm/nouveau/nouveau_channel.c b/drivers/gpu/drm/nouveau/nouveau_channel.c
index 0480f064f2c1..373950e34814 100644
--- a/drivers/gpu/drm/nouveau/nouveau_channel.c
+++ b/drivers/gpu/drm/nouveau/nouveau_channel.c
@@ -48,14 +48,14 @@ nouveau_channel_pushbuf_ctxdma_init(struct nouveau_channel *chan)
dev_priv->gart_info.aper_size,
NV_DMA_ACCESS_RO, &pushbuf,
NULL);
- chan->pushbuf_base = pb->bo.mem.mm_node->start << PAGE_SHIFT;
+ chan->pushbuf_base = pb->bo.mem.start << PAGE_SHIFT;
} else
if (dev_priv->card_type != NV_04) {
ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY, 0,
dev_priv->fb_available_size,
NV_DMA_ACCESS_RO,
NV_DMA_TARGET_VIDMEM, &pushbuf);
- chan->pushbuf_base = pb->bo.mem.mm_node->start << PAGE_SHIFT;
+ chan->pushbuf_base = pb->bo.mem.start << PAGE_SHIFT;
} else {
/* NV04 cmdbuf hack, from original ddx.. not sure of it's
* exact reason for existing :) PCI access to cmdbuf in
@@ -67,17 +67,11 @@ nouveau_channel_pushbuf_ctxdma_init(struct nouveau_channel *chan)
dev_priv->fb_available_size,
NV_DMA_ACCESS_RO,
NV_DMA_TARGET_PCI, &pushbuf);
- chan->pushbuf_base = pb->bo.mem.mm_node->start << PAGE_SHIFT;
- }
-
- ret = nouveau_gpuobj_ref_add(dev, chan, 0, pushbuf, &chan->pushbuf);
- if (ret) {
- NV_ERROR(dev, "Error referencing pushbuf ctxdma: %d\n", ret);
- if (pushbuf != dev_priv->gart_info.sg_ctxdma)
- nouveau_gpuobj_del(dev, &pushbuf);
- return ret;
+ chan->pushbuf_base = pb->bo.mem.start << PAGE_SHIFT;
}
+ nouveau_gpuobj_ref(pushbuf, &chan->pushbuf);
+ nouveau_gpuobj_ref(NULL, &pushbuf);
return 0;
}
@@ -229,7 +223,7 @@ nouveau_channel_alloc(struct drm_device *dev, struct nouveau_channel **chan_ret,
ret = nouveau_dma_init(chan);
if (!ret)
- ret = nouveau_fence_init(chan);
+ ret = nouveau_fence_channel_init(chan);
if (ret) {
nouveau_channel_free(chan);
return ret;
@@ -276,7 +270,7 @@ nouveau_channel_free(struct nouveau_channel *chan)
* above attempts at idling were OK, but if we failed this'll tell TTM
* we're done with the buffers.
*/
- nouveau_fence_fini(chan);
+ nouveau_fence_channel_fini(chan);
/* This will prevent pfifo from switching channels. */
pfifo->reassign(dev, false);
@@ -308,8 +302,9 @@ nouveau_channel_free(struct nouveau_channel *chan)
spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
/* Release the channel's resources */
- nouveau_gpuobj_ref_del(dev, &chan->pushbuf);
+ nouveau_gpuobj_ref(NULL, &chan->pushbuf);
if (chan->pushbuf_bo) {
+ nouveau_bo_unmap(chan->pushbuf_bo);
nouveau_bo_unpin(chan->pushbuf_bo);
nouveau_bo_ref(NULL, &chan->pushbuf_bo);
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c
index fc737037f751..52c356e9a3d1 100644
--- a/drivers/gpu/drm/nouveau/nouveau_connector.c
+++ b/drivers/gpu/drm/nouveau/nouveau_connector.c
@@ -76,6 +76,22 @@ nouveau_encoder_connector_get(struct nouveau_encoder *encoder)
return NULL;
}
+/*TODO: This could use improvement, and learn to handle the fixed
+ * BIOS tables etc. It's fine currently, for its only user.
+ */
+int
+nouveau_connector_bpp(struct drm_connector *connector)
+{
+ struct nouveau_connector *nv_connector = nouveau_connector(connector);
+
+ if (nv_connector->edid && nv_connector->edid->revision >= 4) {
+ u8 bpc = ((nv_connector->edid->input & 0x70) >> 3) + 4;
+ if (bpc > 4)
+ return bpc;
+ }
+
+ return 18;
+}
static void
nouveau_connector_destroy(struct drm_connector *drm_connector)
@@ -130,6 +146,36 @@ nouveau_connector_ddc_detect(struct drm_connector *connector,
return NULL;
}
+static struct nouveau_encoder *
+nouveau_connector_of_detect(struct drm_connector *connector)
+{
+#ifdef __powerpc__
+ struct drm_device *dev = connector->dev;
+ struct nouveau_connector *nv_connector = nouveau_connector(connector);
+ struct nouveau_encoder *nv_encoder;
+ struct device_node *cn, *dn = pci_device_to_OF_node(dev->pdev);
+
+ if (!dn ||
+ !((nv_encoder = find_encoder_by_type(connector, OUTPUT_TMDS)) ||
+ (nv_encoder = find_encoder_by_type(connector, OUTPUT_ANALOG))))
+ return NULL;
+
+ for_each_child_of_node(dn, cn) {
+ const char *name = of_get_property(cn, "name", NULL);
+ const void *edid = of_get_property(cn, "EDID", NULL);
+ int idx = name ? name[strlen(name) - 1] - 'A' : 0;
+
+ if (nv_encoder->dcb->i2c_index == idx && edid) {
+ nv_connector->edid =
+ kmemdup(edid, EDID_LENGTH, GFP_KERNEL);
+ of_node_put(cn);
+ return nv_encoder;
+ }
+ }
+#endif
+ return NULL;
+}
+
static void
nouveau_connector_set_encoder(struct drm_connector *connector,
struct nouveau_encoder *nv_encoder)
@@ -225,11 +271,17 @@ nouveau_connector_detect(struct drm_connector *connector, bool force)
return connector_status_connected;
}
+ nv_encoder = nouveau_connector_of_detect(connector);
+ if (nv_encoder) {
+ nouveau_connector_set_encoder(connector, nv_encoder);
+ return connector_status_connected;
+ }
+
detect_analog:
nv_encoder = find_encoder_by_type(connector, OUTPUT_ANALOG);
if (!nv_encoder && !nouveau_tv_disable)
nv_encoder = find_encoder_by_type(connector, OUTPUT_TV);
- if (nv_encoder) {
+ if (nv_encoder && force) {
struct drm_encoder *encoder = to_drm_encoder(nv_encoder);
struct drm_encoder_helper_funcs *helper =
encoder->helper_private;
@@ -589,11 +641,28 @@ nouveau_connector_get_modes(struct drm_connector *connector)
return ret;
}
+static unsigned
+get_tmds_link_bandwidth(struct drm_connector *connector)
+{
+ struct nouveau_connector *nv_connector = nouveau_connector(connector);
+ struct drm_nouveau_private *dev_priv = connector->dev->dev_private;
+ struct dcb_entry *dcb = nv_connector->detected_encoder->dcb;
+
+ if (dcb->location != DCB_LOC_ON_CHIP ||
+ dev_priv->chipset >= 0x46)
+ return 165000;
+ else if (dev_priv->chipset >= 0x40)
+ return 155000;
+ else if (dev_priv->chipset >= 0x18)
+ return 135000;
+ else
+ return 112000;
+}
+
static int
nouveau_connector_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode)
{
- struct drm_nouveau_private *dev_priv = connector->dev->dev_private;
struct nouveau_connector *nv_connector = nouveau_connector(connector);
struct nouveau_encoder *nv_encoder = nv_connector->detected_encoder;
struct drm_encoder *encoder = to_drm_encoder(nv_encoder);
@@ -611,11 +680,9 @@ nouveau_connector_mode_valid(struct drm_connector *connector,
max_clock = 400000;
break;
case OUTPUT_TMDS:
- if ((dev_priv->card_type >= NV_50 && !nouveau_duallink) ||
- !nv_encoder->dcb->duallink_possible)
- max_clock = 165000;
- else
- max_clock = 330000;
+ max_clock = get_tmds_link_bandwidth(connector);
+ if (nouveau_duallink && nv_encoder->dcb->duallink_possible)
+ max_clock *= 2;
break;
case OUTPUT_ANALOG:
max_clock = nv_encoder->dcb->crtconf.maxfreq;
@@ -630,7 +697,7 @@ nouveau_connector_mode_valid(struct drm_connector *connector,
else
max_clock = nv_encoder->dp.link_nr * 162000;
- clock *= 3;
+ clock = clock * nouveau_connector_bpp(connector) / 8;
break;
default:
BUG_ON(1);
@@ -657,44 +724,6 @@ nouveau_connector_best_encoder(struct drm_connector *connector)
return NULL;
}
-void
-nouveau_connector_set_polling(struct drm_connector *connector)
-{
- struct drm_device *dev = connector->dev;
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct drm_crtc *crtc;
- bool spare_crtc = false;
-
- list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
- spare_crtc |= !crtc->enabled;
-
- connector->polled = 0;
-
- switch (connector->connector_type) {
- case DRM_MODE_CONNECTOR_VGA:
- case DRM_MODE_CONNECTOR_TV:
- if (dev_priv->card_type >= NV_50 ||
- (nv_gf4_disp_arch(dev) && spare_crtc))
- connector->polled = DRM_CONNECTOR_POLL_CONNECT;
- break;
-
- case DRM_MODE_CONNECTOR_DVII:
- case DRM_MODE_CONNECTOR_DVID:
- case DRM_MODE_CONNECTOR_HDMIA:
- case DRM_MODE_CONNECTOR_DisplayPort:
- case DRM_MODE_CONNECTOR_eDP:
- if (dev_priv->card_type >= NV_50)
- connector->polled = DRM_CONNECTOR_POLL_HPD;
- else if (connector->connector_type == DRM_MODE_CONNECTOR_DVID ||
- spare_crtc)
- connector->polled = DRM_CONNECTOR_POLL_CONNECT;
- break;
-
- default:
- break;
- }
-}
-
static const struct drm_connector_helper_funcs
nouveau_connector_helper_funcs = {
.get_modes = nouveau_connector_get_modes,
@@ -820,6 +849,7 @@ nouveau_connector_create(struct drm_device *dev, int index)
dev->mode_config.scaling_mode_property,
nv_connector->scaling_mode);
}
+ connector->polled = DRM_CONNECTOR_POLL_CONNECT;
/* fall-through */
case DCB_CONNECTOR_TV_0:
case DCB_CONNECTOR_TV_1:
@@ -836,11 +866,16 @@ nouveau_connector_create(struct drm_device *dev, int index)
dev->mode_config.dithering_mode_property,
nv_connector->use_dithering ?
DRM_MODE_DITHERING_ON : DRM_MODE_DITHERING_OFF);
+
+ if (dcb->type != DCB_CONNECTOR_LVDS) {
+ if (dev_priv->card_type >= NV_50)
+ connector->polled = DRM_CONNECTOR_POLL_HPD;
+ else
+ connector->polled = DRM_CONNECTOR_POLL_CONNECT;
+ }
break;
}
- nouveau_connector_set_polling(connector);
-
drm_sysfs_connector_add(connector);
dcb->drm = connector;
return dcb->drm;
diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.h b/drivers/gpu/drm/nouveau/nouveau_connector.h
index 0d2e668ccfe5..711b1e9203af 100644
--- a/drivers/gpu/drm/nouveau/nouveau_connector.h
+++ b/drivers/gpu/drm/nouveau/nouveau_connector.h
@@ -52,7 +52,7 @@ static inline struct nouveau_connector *nouveau_connector(
struct drm_connector *
nouveau_connector_create(struct drm_device *, int index);
-void
-nouveau_connector_set_polling(struct drm_connector *);
+int
+nouveau_connector_bpp(struct drm_connector *);
#endif /* __NOUVEAU_CONNECTOR_H__ */
diff --git a/drivers/gpu/drm/nouveau/nouveau_debugfs.c b/drivers/gpu/drm/nouveau/nouveau_debugfs.c
index 7933de4aff2e..8e1592368cce 100644
--- a/drivers/gpu/drm/nouveau/nouveau_debugfs.c
+++ b/drivers/gpu/drm/nouveau/nouveau_debugfs.c
@@ -157,7 +157,23 @@ nouveau_debugfs_vbios_image(struct seq_file *m, void *data)
return 0;
}
+static int
+nouveau_debugfs_evict_vram(struct seq_file *m, void *data)
+{
+ struct drm_info_node *node = (struct drm_info_node *) m->private;
+ struct drm_nouveau_private *dev_priv = node->minor->dev->dev_private;
+ int ret;
+
+ ret = ttm_bo_evict_mm(&dev_priv->ttm.bdev, TTM_PL_VRAM);
+ if (ret)
+ seq_printf(m, "failed: %d", ret);
+ else
+ seq_printf(m, "succeeded\n");
+ return 0;
+}
+
static struct drm_info_list nouveau_debugfs_list[] = {
+ { "evict_vram", nouveau_debugfs_evict_vram, 0, NULL },
{ "chipset", nouveau_debugfs_chipset_info, 0, NULL },
{ "memory", nouveau_debugfs_memory_info, 0, NULL },
{ "vbios.rom", nouveau_debugfs_vbios_image, 0, NULL },
diff --git a/drivers/gpu/drm/nouveau/nouveau_dma.c b/drivers/gpu/drm/nouveau/nouveau_dma.c
index 2e3c6caa97ee..82581e600dcd 100644
--- a/drivers/gpu/drm/nouveau/nouveau_dma.c
+++ b/drivers/gpu/drm/nouveau/nouveau_dma.c
@@ -28,6 +28,7 @@
#include "drm.h"
#include "nouveau_drv.h"
#include "nouveau_dma.h"
+#include "nouveau_ramht.h"
void
nouveau_dma_pre_init(struct nouveau_channel *chan)
@@ -58,26 +59,17 @@ nouveau_dma_init(struct nouveau_channel *chan)
{
struct drm_device *dev = chan->dev;
struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_gpuobj *m2mf = NULL;
- struct nouveau_gpuobj *nvsw = NULL;
+ struct nouveau_gpuobj *obj = NULL;
int ret, i;
/* Create NV_MEMORY_TO_MEMORY_FORMAT for buffer moves */
ret = nouveau_gpuobj_gr_new(chan, dev_priv->card_type < NV_50 ?
- 0x0039 : 0x5039, &m2mf);
+ 0x0039 : 0x5039, &obj);
if (ret)
return ret;
- ret = nouveau_gpuobj_ref_add(dev, chan, NvM2MF, m2mf, NULL);
- if (ret)
- return ret;
-
- /* Create an NV_SW object for various sync purposes */
- ret = nouveau_gpuobj_sw_new(chan, NV_SW, &nvsw);
- if (ret)
- return ret;
-
- ret = nouveau_gpuobj_ref_add(dev, chan, NvSw, nvsw, NULL);
+ ret = nouveau_ramht_insert(chan, NvM2MF, obj);
+ nouveau_gpuobj_ref(NULL, &obj);
if (ret)
return ret;
@@ -91,11 +83,6 @@ nouveau_dma_init(struct nouveau_channel *chan)
if (ret)
return ret;
- /* Map M2MF notifier object - fbcon. */
- ret = nouveau_bo_map(chan->notifier_bo);
- if (ret)
- return ret;
-
/* Insert NOPS for NOUVEAU_DMA_SKIPS */
ret = RING_SPACE(chan, NOUVEAU_DMA_SKIPS);
if (ret)
@@ -113,13 +100,6 @@ nouveau_dma_init(struct nouveau_channel *chan)
BEGIN_RING(chan, NvSubM2MF, NV_MEMORY_TO_MEMORY_FORMAT_DMA_NOTIFY, 1);
OUT_RING(chan, NvNotify0);
- /* Initialise NV_SW */
- ret = RING_SPACE(chan, 2);
- if (ret)
- return ret;
- BEGIN_RING(chan, NvSubSw, 0, 1);
- OUT_RING(chan, NvSw);
-
/* Sit back and pray the channel works.. */
FIRE_RING(chan);
@@ -217,7 +197,7 @@ nv50_dma_push_wait(struct nouveau_channel *chan, int count)
chan->dma.ib_free = get - chan->dma.ib_put;
if (chan->dma.ib_free <= 0)
- chan->dma.ib_free += chan->dma.ib_max + 1;
+ chan->dma.ib_free += chan->dma.ib_max;
}
return 0;
diff --git a/drivers/gpu/drm/nouveau/nouveau_dma.h b/drivers/gpu/drm/nouveau/nouveau_dma.h
index 8b05c15866d5..d578c21d3c8d 100644
--- a/drivers/gpu/drm/nouveau/nouveau_dma.h
+++ b/drivers/gpu/drm/nouveau/nouveau_dma.h
@@ -72,6 +72,7 @@ enum {
NvGdiRect = 0x8000000c,
NvImageBlit = 0x8000000d,
NvSw = 0x8000000e,
+ NvSema = 0x8000000f,
/* G80+ display objects */
NvEvoVRAM = 0x01000000,
diff --git a/drivers/gpu/drm/nouveau/nouveau_dp.c b/drivers/gpu/drm/nouveau/nouveau_dp.c
index 8a1b188b4cd1..4562f309ae3d 100644
--- a/drivers/gpu/drm/nouveau/nouveau_dp.c
+++ b/drivers/gpu/drm/nouveau/nouveau_dp.c
@@ -317,7 +317,8 @@ train:
return false;
config[0] = nv_encoder->dp.link_nr;
- if (nv_encoder->dp.dpcd_version >= 0x11)
+ if (nv_encoder->dp.dpcd_version >= 0x11 &&
+ nv_encoder->dp.enhanced_frame)
config[0] |= DP_LANE_COUNT_ENHANCED_FRAME_EN;
ret = nouveau_dp_lane_count_set(encoder, config[0]);
@@ -468,10 +469,12 @@ nouveau_dp_detect(struct drm_encoder *encoder)
!nv_encoder->dcb->dpconf.link_bw)
nv_encoder->dp.link_bw = DP_LINK_BW_1_62;
- nv_encoder->dp.link_nr = dpcd[2] & 0xf;
+ nv_encoder->dp.link_nr = dpcd[2] & DP_MAX_LANE_COUNT_MASK;
if (nv_encoder->dp.link_nr > nv_encoder->dcb->dpconf.link_nr)
nv_encoder->dp.link_nr = nv_encoder->dcb->dpconf.link_nr;
+ nv_encoder->dp.enhanced_frame = (dpcd[2] & DP_ENHANCED_FRAME_CAP);
+
return true;
}
@@ -524,7 +527,8 @@ nouveau_dp_auxch(struct nouveau_i2c_chan *auxch, int cmd, int addr,
nv_wr32(dev, NV50_AUXCH_CTRL(index), ctrl | 0x80000000);
nv_wr32(dev, NV50_AUXCH_CTRL(index), ctrl);
nv_wr32(dev, NV50_AUXCH_CTRL(index), ctrl | 0x00010000);
- if (!nv_wait(NV50_AUXCH_CTRL(index), 0x00010000, 0x00000000)) {
+ if (!nv_wait(dev, NV50_AUXCH_CTRL(index),
+ 0x00010000, 0x00000000)) {
NV_ERROR(dev, "expected bit 16 == 0, got 0x%08x\n",
nv_rd32(dev, NV50_AUXCH_CTRL(index)));
ret = -EBUSY;
diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.c b/drivers/gpu/drm/nouveau/nouveau_drv.c
index eb15345162a0..90875494a65a 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drv.c
+++ b/drivers/gpu/drm/nouveau/nouveau_drv.c
@@ -31,13 +31,14 @@
#include "nouveau_hw.h"
#include "nouveau_fb.h"
#include "nouveau_fbcon.h"
+#include "nouveau_pm.h"
#include "nv50_display.h"
#include "drm_pciids.h"
-MODULE_PARM_DESC(noagp, "Disable AGP");
-int nouveau_noagp;
-module_param_named(noagp, nouveau_noagp, int, 0400);
+MODULE_PARM_DESC(agpmode, "AGP mode (0 to disable AGP)");
+int nouveau_agpmode = -1;
+module_param_named(agpmode, nouveau_agpmode, int, 0400);
MODULE_PARM_DESC(modeset, "Enable kernel modesetting");
static int nouveau_modeset = -1; /* kms */
@@ -79,6 +80,10 @@ MODULE_PARM_DESC(nofbaccel, "Disable fbcon acceleration");
int nouveau_nofbaccel = 0;
module_param_named(nofbaccel, nouveau_nofbaccel, int, 0400);
+MODULE_PARM_DESC(force_post, "Force POST");
+int nouveau_force_post = 0;
+module_param_named(force_post, nouveau_force_post, int, 0400);
+
MODULE_PARM_DESC(override_conntype, "Ignore DCB connector type");
int nouveau_override_conntype = 0;
module_param_named(override_conntype, nouveau_override_conntype, int, 0400);
@@ -102,6 +107,14 @@ MODULE_PARM_DESC(reg_debug, "Register access debug bitmask:\n"
int nouveau_reg_debug;
module_param_named(reg_debug, nouveau_reg_debug, int, 0600);
+MODULE_PARM_DESC(perflvl, "Performance level (default: boot)\n");
+char *nouveau_perflvl;
+module_param_named(perflvl, nouveau_perflvl, charp, 0400);
+
+MODULE_PARM_DESC(perflvl_wr, "Allow perflvl changes (warning: dangerous!)\n");
+int nouveau_perflvl_wr;
+module_param_named(perflvl_wr, nouveau_perflvl_wr, int, 0400);
+
int nouveau_fbpercrtc;
#if 0
module_param_named(fbpercrtc, nouveau_fbpercrtc, int, 0400);
@@ -271,6 +284,8 @@ nouveau_pci_resume(struct pci_dev *pdev)
if (ret)
return ret;
+ nouveau_pm_resume(dev);
+
if (dev_priv->gart_info.type == NOUVEAU_GART_AGP) {
ret = nouveau_mem_init_agp(dev);
if (ret) {
@@ -379,8 +394,6 @@ static struct drm_driver driver = {
.irq_uninstall = nouveau_irq_uninstall,
.irq_handler = nouveau_irq_handler,
.reclaim_buffers = drm_core_reclaim_buffers,
- .get_map_ofs = drm_core_get_map_ofs,
- .get_reg_ofs = drm_core_get_reg_ofs,
.ioctls = nouveau_ioctls,
.fops = {
.owner = THIS_MODULE,
diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h
index b1be617373b6..1c7db64c03bf 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drv.h
+++ b/drivers/gpu/drm/nouveau/nouveau_drv.h
@@ -100,6 +100,9 @@ struct nouveau_bo {
int pin_refcnt;
};
+#define nouveau_bo_tile_layout(nvbo) \
+ ((nvbo)->tile_flags & NOUVEAU_GEM_TILE_LAYOUT_MASK)
+
static inline struct nouveau_bo *
nouveau_bo(struct ttm_buffer_object *bo)
{
@@ -133,22 +136,24 @@ enum nouveau_flags {
#define NVOBJ_ENGINE_DISPLAY 2
#define NVOBJ_ENGINE_INT 0xdeadbeef
-#define NVOBJ_FLAG_ALLOW_NO_REFS (1 << 0)
#define NVOBJ_FLAG_ZERO_ALLOC (1 << 1)
#define NVOBJ_FLAG_ZERO_FREE (1 << 2)
-#define NVOBJ_FLAG_FAKE (1 << 3)
struct nouveau_gpuobj {
+ struct drm_device *dev;
+ struct kref refcount;
struct list_head list;
- struct nouveau_channel *im_channel;
struct drm_mm_node *im_pramin;
struct nouveau_bo *im_backing;
- uint32_t im_backing_start;
uint32_t *im_backing_suspend;
int im_bound;
uint32_t flags;
- int refcount;
+
+ u32 size;
+ u32 pinst;
+ u32 cinst;
+ u64 vinst;
uint32_t engine;
uint32_t class;
@@ -157,16 +162,6 @@ struct nouveau_gpuobj {
void *priv;
};
-struct nouveau_gpuobj_ref {
- struct list_head list;
-
- struct nouveau_gpuobj *gpuobj;
- uint32_t instance;
-
- struct nouveau_channel *channel;
- int handle;
-};
-
struct nouveau_channel {
struct drm_device *dev;
int id;
@@ -192,33 +187,32 @@ struct nouveau_channel {
} fence;
/* DMA push buffer */
- struct nouveau_gpuobj_ref *pushbuf;
- struct nouveau_bo *pushbuf_bo;
- uint32_t pushbuf_base;
+ struct nouveau_gpuobj *pushbuf;
+ struct nouveau_bo *pushbuf_bo;
+ uint32_t pushbuf_base;
/* Notifier memory */
struct nouveau_bo *notifier_bo;
struct drm_mm notifier_heap;
/* PFIFO context */
- struct nouveau_gpuobj_ref *ramfc;
- struct nouveau_gpuobj_ref *cache;
+ struct nouveau_gpuobj *ramfc;
+ struct nouveau_gpuobj *cache;
/* PGRAPH context */
/* XXX may be merge 2 pointers as private data ??? */
- struct nouveau_gpuobj_ref *ramin_grctx;
+ struct nouveau_gpuobj *ramin_grctx;
void *pgraph_ctx;
/* NV50 VM */
- struct nouveau_gpuobj *vm_pd;
- struct nouveau_gpuobj_ref *vm_gart_pt;
- struct nouveau_gpuobj_ref *vm_vram_pt[NV50_VM_VRAM_NR];
+ struct nouveau_gpuobj *vm_pd;
+ struct nouveau_gpuobj *vm_gart_pt;
+ struct nouveau_gpuobj *vm_vram_pt[NV50_VM_VRAM_NR];
/* Objects */
- struct nouveau_gpuobj_ref *ramin; /* Private instmem */
- struct drm_mm ramin_heap; /* Private PRAMIN heap */
- struct nouveau_gpuobj_ref *ramht; /* Hash table */
- struct list_head ramht_refs; /* Objects referenced by RAMHT */
+ struct nouveau_gpuobj *ramin; /* Private instmem */
+ struct drm_mm ramin_heap; /* Private PRAMIN heap */
+ struct nouveau_ramht *ramht; /* Hash table */
/* GPU object info for stuff used in-kernel (mm_enabled) */
uint32_t m2mf_ntfy;
@@ -296,7 +290,7 @@ struct nouveau_fb_engine {
struct nouveau_fifo_engine {
int channels;
- struct nouveau_gpuobj_ref *playlist[2];
+ struct nouveau_gpuobj *playlist[2];
int cur_playlist;
int (*init)(struct drm_device *);
@@ -305,7 +299,6 @@ struct nouveau_fifo_engine {
void (*disable)(struct drm_device *);
void (*enable)(struct drm_device *);
bool (*reassign)(struct drm_device *, bool enable);
- bool (*cache_flush)(struct drm_device *dev);
bool (*cache_pull)(struct drm_device *dev, bool enable);
int (*channel_id)(struct drm_device *);
@@ -314,6 +307,7 @@ struct nouveau_fifo_engine {
void (*destroy_context)(struct nouveau_channel *);
int (*load_context)(struct nouveau_channel *);
int (*unload_context)(struct drm_device *);
+ void (*tlb_flush)(struct drm_device *dev);
};
struct nouveau_pgraph_object_method {
@@ -334,7 +328,7 @@ struct nouveau_pgraph_engine {
int grctx_size;
/* NV2x/NV3x context table (0x400780) */
- struct nouveau_gpuobj_ref *ctx_table;
+ struct nouveau_gpuobj *ctx_table;
int (*init)(struct drm_device *);
void (*takedown)(struct drm_device *);
@@ -346,6 +340,7 @@ struct nouveau_pgraph_engine {
void (*destroy_context)(struct nouveau_channel *);
int (*load_context)(struct nouveau_channel *);
int (*unload_context)(struct drm_device *);
+ void (*tlb_flush)(struct drm_device *dev);
void (*set_region_tiling)(struct drm_device *dev, int i, uint32_t addr,
uint32_t size, uint32_t pitch);
@@ -369,6 +364,91 @@ struct nouveau_gpio_engine {
void (*irq_enable)(struct drm_device *, enum dcb_gpio_tag, bool on);
};
+struct nouveau_pm_voltage_level {
+ u8 voltage;
+ u8 vid;
+};
+
+struct nouveau_pm_voltage {
+ bool supported;
+ u8 vid_mask;
+
+ struct nouveau_pm_voltage_level *level;
+ int nr_level;
+};
+
+#define NOUVEAU_PM_MAX_LEVEL 8
+struct nouveau_pm_level {
+ struct device_attribute dev_attr;
+ char name[32];
+ int id;
+
+ u32 core;
+ u32 memory;
+ u32 shader;
+ u32 unk05;
+
+ u8 voltage;
+ u8 fanspeed;
+
+ u16 memscript;
+};
+
+struct nouveau_pm_temp_sensor_constants {
+ u16 offset_constant;
+ s16 offset_mult;
+ u16 offset_div;
+ u16 slope_mult;
+ u16 slope_div;
+};
+
+struct nouveau_pm_threshold_temp {
+ s16 critical;
+ s16 down_clock;
+ s16 fan_boost;
+};
+
+struct nouveau_pm_memtiming {
+ u32 reg_100220;
+ u32 reg_100224;
+ u32 reg_100228;
+ u32 reg_10022c;
+ u32 reg_100230;
+ u32 reg_100234;
+ u32 reg_100238;
+ u32 reg_10023c;
+};
+
+struct nouveau_pm_memtimings {
+ bool supported;
+ struct nouveau_pm_memtiming *timing;
+ int nr_timing;
+};
+
+struct nouveau_pm_engine {
+ struct nouveau_pm_voltage voltage;
+ struct nouveau_pm_level perflvl[NOUVEAU_PM_MAX_LEVEL];
+ int nr_perflvl;
+ struct nouveau_pm_memtimings memtimings;
+ struct nouveau_pm_temp_sensor_constants sensor_constants;
+ struct nouveau_pm_threshold_temp threshold_temp;
+
+ struct nouveau_pm_level boot;
+ struct nouveau_pm_level *cur;
+
+ struct device *hwmon;
+
+ int (*clock_get)(struct drm_device *, u32 id);
+ void *(*clock_pre)(struct drm_device *, struct nouveau_pm_level *,
+ u32 id, int khz);
+ void (*clock_set)(struct drm_device *, void *);
+ int (*voltage_get)(struct drm_device *);
+ int (*voltage_set)(struct drm_device *, int voltage);
+ int (*fanspeed_get)(struct drm_device *);
+ int (*fanspeed_set)(struct drm_device *, int fanspeed);
+ int (*temp_get)(struct drm_device *);
+};
+
struct nouveau_engine {
struct nouveau_instmem_engine instmem;
struct nouveau_mc_engine mc;
@@ -378,6 +458,7 @@ struct nouveau_engine {
struct nouveau_fifo_engine fifo;
struct nouveau_display_engine display;
struct nouveau_gpio_engine gpio;
+ struct nouveau_pm_engine pm;
};
struct nouveau_pll_vals {
@@ -409,13 +490,13 @@ enum nv04_fp_display_regs {
};
struct nv04_crtc_reg {
- unsigned char MiscOutReg; /* */
+ unsigned char MiscOutReg;
uint8_t CRTC[0xa0];
uint8_t CR58[0x10];
uint8_t Sequencer[5];
uint8_t Graphics[9];
uint8_t Attribute[21];
- unsigned char DAC[768]; /* Internal Colorlookuptable */
+ unsigned char DAC[768];
/* PCRTC regs */
uint32_t fb_start;
@@ -463,43 +544,9 @@ struct nv04_output_reg {
};
struct nv04_mode_state {
- uint32_t bpp;
- uint32_t width;
- uint32_t height;
- uint32_t interlace;
- uint32_t repaint0;
- uint32_t repaint1;
- uint32_t screen;
- uint32_t scale;
- uint32_t dither;
- uint32_t extra;
- uint32_t fifo;
- uint32_t pixel;
- uint32_t horiz;
- int arbitration0;
- int arbitration1;
- uint32_t pll;
- uint32_t pllB;
- uint32_t vpll;
- uint32_t vpll2;
- uint32_t vpllB;
- uint32_t vpll2B;
+ struct nv04_crtc_reg crtc_reg[2];
uint32_t pllsel;
uint32_t sel_clk;
- uint32_t general;
- uint32_t crtcOwner;
- uint32_t head;
- uint32_t head2;
- uint32_t cursorConfig;
- uint32_t cursor0;
- uint32_t cursor1;
- uint32_t cursor2;
- uint32_t timingH;
- uint32_t timingV;
- uint32_t displayV;
- uint32_t crtcSync;
-
- struct nv04_crtc_reg crtc_reg[2];
};
enum nouveau_card_type {
@@ -522,8 +569,14 @@ struct drm_nouveau_private {
int flags;
void __iomem *mmio;
+
+ spinlock_t ramin_lock;
void __iomem *ramin;
- uint32_t ramin_size;
+ u32 ramin_size;
+ u32 ramin_base;
+ bool ramin_available;
+ struct drm_mm ramin_heap;
+ struct list_head gpuobj_list;
struct nouveau_bo *vga_ram;
@@ -531,6 +584,12 @@ struct drm_nouveau_private {
struct work_struct irq_work;
struct work_struct hpd_work;
+ struct {
+ spinlock_t lock;
+ uint32_t hpd0_bits;
+ uint32_t hpd1_bits;
+ } hpd_state;
+
struct list_head vbl_waiting;
struct {
@@ -540,6 +599,12 @@ struct drm_nouveau_private {
atomic_t validate_sequence;
} ttm;
+ struct {
+ spinlock_t lock;
+ struct drm_mm heap;
+ struct nouveau_bo *bo;
+ } fence;
+
int fifo_alloc_count;
struct nouveau_channel *fifos[NOUVEAU_MAX_CHANNEL_NR];
@@ -550,15 +615,11 @@ struct drm_nouveau_private {
spinlock_t context_switch_lock;
/* RAMIN configuration, RAMFC, RAMHT and RAMRO offsets */
- struct nouveau_gpuobj *ramht;
+ struct nouveau_ramht *ramht;
+ struct nouveau_gpuobj *ramfc;
+ struct nouveau_gpuobj *ramro;
+
uint32_t ramin_rsvd_vram;
- uint32_t ramht_offset;
- uint32_t ramht_size;
- uint32_t ramht_bits;
- uint32_t ramfc_offset;
- uint32_t ramfc_size;
- uint32_t ramro_offset;
- uint32_t ramro_size;
struct {
enum {
@@ -576,14 +637,12 @@ struct drm_nouveau_private {
} gart_info;
/* nv10-nv40 tiling regions */
- struct {
- struct nouveau_tile_reg reg[NOUVEAU_MAX_TILE_NR];
- spinlock_t lock;
- } tile;
+ struct nouveau_tile_reg tile[NOUVEAU_MAX_TILE_NR];
/* VRAM/fb configuration */
uint64_t vram_size;
uint64_t vram_sys_base;
+ u32 vram_rblock_size;
uint64_t fb_phys;
uint64_t fb_available_size;
@@ -600,10 +659,6 @@ struct drm_nouveau_private {
struct nouveau_gpuobj *vm_vram_pt[NV50_VM_VRAM_NR];
int vm_vram_pt_nr;
- struct drm_mm ramin_heap;
-
- struct list_head gpuobj_list;
-
struct nvbios vbios;
struct nv04_mode_state mode_reg;
@@ -634,6 +689,12 @@ struct drm_nouveau_private {
};
static inline struct drm_nouveau_private *
+nouveau_private(struct drm_device *dev)
+{
+ return dev->dev_private;
+}
+
+static inline struct drm_nouveau_private *
nouveau_bdev(struct ttm_bo_device *bd)
{
return container_of(bd, struct drm_nouveau_private, ttm.bdev);
@@ -669,7 +730,7 @@ nouveau_bo_ref(struct nouveau_bo *ref, struct nouveau_bo **pnvbo)
} while (0)
/* nouveau_drv.c */
-extern int nouveau_noagp;
+extern int nouveau_agpmode;
extern int nouveau_duallink;
extern int nouveau_uscript_lvds;
extern int nouveau_uscript_tmds;
@@ -683,7 +744,10 @@ extern char *nouveau_vbios;
extern int nouveau_ignorelid;
extern int nouveau_nofbaccel;
extern int nouveau_noaccel;
+extern int nouveau_force_post;
extern int nouveau_override_conntype;
+extern char *nouveau_perflvl;
+extern int nouveau_perflvl_wr;
extern int nouveau_pci_suspend(struct pci_dev *pdev, pm_message_t pm_state);
extern int nouveau_pci_resume(struct pci_dev *pdev);
@@ -704,8 +768,10 @@ extern bool nouveau_wait_for_idle(struct drm_device *);
extern int nouveau_card_init(struct drm_device *);
/* nouveau_mem.c */
-extern int nouveau_mem_detect(struct drm_device *dev);
-extern int nouveau_mem_init(struct drm_device *);
+extern int nouveau_mem_vram_init(struct drm_device *);
+extern void nouveau_mem_vram_fini(struct drm_device *);
+extern int nouveau_mem_gart_init(struct drm_device *);
+extern void nouveau_mem_gart_fini(struct drm_device *);
extern int nouveau_mem_init_agp(struct drm_device *);
extern int nouveau_mem_reset_agp(struct drm_device *);
extern void nouveau_mem_close(struct drm_device *);
@@ -749,7 +815,6 @@ extern void nouveau_channel_free(struct nouveau_channel *);
extern int nouveau_gpuobj_early_init(struct drm_device *);
extern int nouveau_gpuobj_init(struct drm_device *);
extern void nouveau_gpuobj_takedown(struct drm_device *);
-extern void nouveau_gpuobj_late_takedown(struct drm_device *);
extern int nouveau_gpuobj_suspend(struct drm_device *dev);
extern void nouveau_gpuobj_suspend_cleanup(struct drm_device *dev);
extern void nouveau_gpuobj_resume(struct drm_device *dev);
@@ -759,24 +824,11 @@ extern void nouveau_gpuobj_channel_takedown(struct nouveau_channel *);
extern int nouveau_gpuobj_new(struct drm_device *, struct nouveau_channel *,
uint32_t size, int align, uint32_t flags,
struct nouveau_gpuobj **);
-extern int nouveau_gpuobj_del(struct drm_device *, struct nouveau_gpuobj **);
-extern int nouveau_gpuobj_ref_add(struct drm_device *, struct nouveau_channel *,
- uint32_t handle, struct nouveau_gpuobj *,
- struct nouveau_gpuobj_ref **);
-extern int nouveau_gpuobj_ref_del(struct drm_device *,
- struct nouveau_gpuobj_ref **);
-extern int nouveau_gpuobj_ref_find(struct nouveau_channel *, uint32_t handle,
- struct nouveau_gpuobj_ref **ref_ret);
-extern int nouveau_gpuobj_new_ref(struct drm_device *,
- struct nouveau_channel *alloc_chan,
- struct nouveau_channel *ref_chan,
- uint32_t handle, uint32_t size, int align,
- uint32_t flags, struct nouveau_gpuobj_ref **);
-extern int nouveau_gpuobj_new_fake(struct drm_device *,
- uint32_t p_offset, uint32_t b_offset,
- uint32_t size, uint32_t flags,
- struct nouveau_gpuobj **,
- struct nouveau_gpuobj_ref**);
+extern void nouveau_gpuobj_ref(struct nouveau_gpuobj *,
+ struct nouveau_gpuobj **);
+extern int nouveau_gpuobj_new_fake(struct drm_device *, u32 pinst, u64 vinst,
+ u32 size, u32 flags,
+ struct nouveau_gpuobj **);
extern int nouveau_gpuobj_dma_new(struct nouveau_channel *, int class,
uint64_t offset, uint64_t size, int access,
int target, struct nouveau_gpuobj **);
@@ -879,6 +931,7 @@ extern struct dcb_gpio_entry *nouveau_bios_gpio_entry(struct drm_device *,
enum dcb_gpio_tag);
extern struct dcb_connector_table_entry *
nouveau_bios_connector_entry(struct drm_device *, int index);
+extern u32 get_pll_register(struct drm_device *, enum pll_types);
extern int get_pll_limits(struct drm_device *, uint32_t limit_match,
struct pll_lims *);
extern int nouveau_bios_run_display_table(struct drm_device *,
@@ -925,10 +978,10 @@ extern int nv40_fb_init(struct drm_device *);
extern void nv40_fb_takedown(struct drm_device *);
extern void nv40_fb_set_region_tiling(struct drm_device *, int, uint32_t,
uint32_t, uint32_t);
-
/* nv50_fb.c */
extern int nv50_fb_init(struct drm_device *);
extern void nv50_fb_takedown(struct drm_device *);
+extern void nv50_fb_vm_trap(struct drm_device *, int display, const char *);
/* nvc0_fb.c */
extern int nvc0_fb_init(struct drm_device *);
@@ -939,7 +992,6 @@ extern int nv04_fifo_init(struct drm_device *);
extern void nv04_fifo_disable(struct drm_device *);
extern void nv04_fifo_enable(struct drm_device *);
extern bool nv04_fifo_reassign(struct drm_device *, bool);
-extern bool nv04_fifo_cache_flush(struct drm_device *);
extern bool nv04_fifo_cache_pull(struct drm_device *, bool);
extern int nv04_fifo_channel_id(struct drm_device *);
extern int nv04_fifo_create_context(struct nouveau_channel *);
@@ -970,6 +1022,7 @@ extern int nv50_fifo_create_context(struct nouveau_channel *);
extern void nv50_fifo_destroy_context(struct nouveau_channel *);
extern int nv50_fifo_load_context(struct nouveau_channel *);
extern int nv50_fifo_unload_context(struct drm_device *);
+extern void nv50_fifo_tlb_flush(struct drm_device *dev);
/* nvc0_fifo.c */
extern int nvc0_fifo_init(struct drm_device *);
@@ -977,7 +1030,6 @@ extern void nvc0_fifo_takedown(struct drm_device *);
extern void nvc0_fifo_disable(struct drm_device *);
extern void nvc0_fifo_enable(struct drm_device *);
extern bool nvc0_fifo_reassign(struct drm_device *, bool);
-extern bool nvc0_fifo_cache_flush(struct drm_device *);
extern bool nvc0_fifo_cache_pull(struct drm_device *, bool);
extern int nvc0_fifo_channel_id(struct drm_device *);
extern int nvc0_fifo_create_context(struct nouveau_channel *);
@@ -1048,6 +1100,8 @@ extern int nv50_graph_load_context(struct nouveau_channel *);
extern int nv50_graph_unload_context(struct drm_device *);
extern void nv50_graph_context_switch(struct drm_device *);
extern int nv50_grctx_init(struct nouveau_grctx *);
+extern void nv50_graph_tlb_flush(struct drm_device *dev);
+extern void nv86_graph_tlb_flush(struct drm_device *dev);
/* nvc0_graph.c */
extern int nvc0_graph_init(struct drm_device *);
@@ -1165,19 +1219,24 @@ extern u16 nouveau_bo_rd16(struct nouveau_bo *nvbo, unsigned index);
extern void nouveau_bo_wr16(struct nouveau_bo *nvbo, unsigned index, u16 val);
extern u32 nouveau_bo_rd32(struct nouveau_bo *nvbo, unsigned index);
extern void nouveau_bo_wr32(struct nouveau_bo *nvbo, unsigned index, u32 val);
-extern int nouveau_bo_sync_gpu(struct nouveau_bo *, struct nouveau_channel *);
/* nouveau_fence.c */
struct nouveau_fence;
-extern int nouveau_fence_init(struct nouveau_channel *);
-extern void nouveau_fence_fini(struct nouveau_channel *);
+extern int nouveau_fence_init(struct drm_device *);
+extern void nouveau_fence_fini(struct drm_device *);
+extern int nouveau_fence_channel_init(struct nouveau_channel *);
+extern void nouveau_fence_channel_fini(struct nouveau_channel *);
extern void nouveau_fence_update(struct nouveau_channel *);
extern int nouveau_fence_new(struct nouveau_channel *, struct nouveau_fence **,
bool emit);
extern int nouveau_fence_emit(struct nouveau_fence *);
+extern void nouveau_fence_work(struct nouveau_fence *fence,
+ void (*work)(void *priv, bool signalled),
+ void *priv);
struct nouveau_channel *nouveau_fence_channel(struct nouveau_fence *);
extern bool nouveau_fence_signalled(void *obj, void *arg);
extern int nouveau_fence_wait(void *obj, void *arg, bool lazy, bool intr);
+extern int nouveau_fence_sync(struct nouveau_fence *, struct nouveau_channel *);
extern int nouveau_fence_flush(void *obj, void *arg);
extern void nouveau_fence_unref(void **obj);
extern void *nouveau_fence_ref(void *obj);
@@ -1255,12 +1314,11 @@ static inline void nv_wr32(struct drm_device *dev, unsigned reg, u32 val)
iowrite32_native(val, dev_priv->mmio + reg);
}
-static inline void nv_mask(struct drm_device *dev, u32 reg, u32 mask, u32 val)
+static inline u32 nv_mask(struct drm_device *dev, u32 reg, u32 mask, u32 val)
{
u32 tmp = nv_rd32(dev, reg);
- tmp &= ~mask;
- tmp |= val;
- nv_wr32(dev, reg, tmp);
+ nv_wr32(dev, reg, (tmp & ~mask) | val);
+ return tmp;
}
static inline u8 nv_rd08(struct drm_device *dev, unsigned reg)
@@ -1275,7 +1333,7 @@ static inline void nv_wr08(struct drm_device *dev, unsigned reg, u8 val)
iowrite8(val, dev_priv->mmio + reg);
}
-#define nv_wait(reg, mask, val) \
+#define nv_wait(dev, reg, mask, val) \
nouveau_wait_until(dev, 2000000000ULL, (reg), (mask), (val))
/* PRAMIN access */
@@ -1292,17 +1350,8 @@ static inline void nv_wi32(struct drm_device *dev, unsigned offset, u32 val)
}
/* object access */
-static inline u32 nv_ro32(struct drm_device *dev, struct nouveau_gpuobj *obj,
- unsigned index)
-{
- return nv_ri32(dev, obj->im_pramin->start + index * 4);
-}
-
-static inline void nv_wo32(struct drm_device *dev, struct nouveau_gpuobj *obj,
- unsigned index, u32 val)
-{
- nv_wi32(dev, obj->im_pramin->start + index * 4, val);
-}
+extern u32 nv_ro32(struct nouveau_gpuobj *, u32 offset);
+extern void nv_wo32(struct nouveau_gpuobj *, u32 offset, u32 val);
/*
* Logging
@@ -1403,6 +1452,7 @@ nv_match_device(struct drm_device *dev, unsigned device,
#define NV_SW_SEMAPHORE_OFFSET 0x00000064
#define NV_SW_SEMAPHORE_ACQUIRE 0x00000068
#define NV_SW_SEMAPHORE_RELEASE 0x0000006c
+#define NV_SW_YIELD 0x00000080
#define NV_SW_DMA_VBLSEM 0x0000018c
#define NV_SW_VBLSEM_OFFSET 0x00000400
#define NV_SW_VBLSEM_RELEASE_VALUE 0x00000404
diff --git a/drivers/gpu/drm/nouveau/nouveau_encoder.h b/drivers/gpu/drm/nouveau/nouveau_encoder.h
index 7c82d68bc155..ae69b61d93db 100644
--- a/drivers/gpu/drm/nouveau/nouveau_encoder.h
+++ b/drivers/gpu/drm/nouveau/nouveau_encoder.h
@@ -55,6 +55,7 @@ struct nouveau_encoder {
int dpcd_version;
int link_nr;
int link_bw;
+ bool enhanced_frame;
} dp;
};
};
diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.c b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
index dbd30b2e43fd..02a4d1fd4845 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
@@ -104,6 +104,8 @@ static struct fb_ops nouveau_fbcon_ops = {
.fb_pan_display = drm_fb_helper_pan_display,
.fb_blank = drm_fb_helper_blank,
.fb_setcmap = drm_fb_helper_setcmap,
+ .fb_debug_enter = drm_fb_helper_debug_enter,
+ .fb_debug_leave = drm_fb_helper_debug_leave,
};
static struct fb_ops nv04_fbcon_ops = {
@@ -117,6 +119,8 @@ static struct fb_ops nv04_fbcon_ops = {
.fb_pan_display = drm_fb_helper_pan_display,
.fb_blank = drm_fb_helper_blank,
.fb_setcmap = drm_fb_helper_setcmap,
+ .fb_debug_enter = drm_fb_helper_debug_enter,
+ .fb_debug_leave = drm_fb_helper_debug_leave,
};
static struct fb_ops nv50_fbcon_ops = {
@@ -130,6 +134,8 @@ static struct fb_ops nv50_fbcon_ops = {
.fb_pan_display = drm_fb_helper_pan_display,
.fb_blank = drm_fb_helper_blank,
.fb_setcmap = drm_fb_helper_setcmap,
+ .fb_debug_enter = drm_fb_helper_debug_enter,
+ .fb_debug_leave = drm_fb_helper_debug_leave,
};
static void nouveau_fbcon_gamma_set(struct drm_crtc *crtc, u16 red, u16 green,
diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.c b/drivers/gpu/drm/nouveau/nouveau_fence.c
index 87ac21ec23d2..ab1bbfbf266e 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fence.c
+++ b/drivers/gpu/drm/nouveau/nouveau_fence.c
@@ -28,9 +28,11 @@
#include "drm.h"
#include "nouveau_drv.h"
+#include "nouveau_ramht.h"
#include "nouveau_dma.h"
-#define USE_REFCNT (dev_priv->card_type >= NV_10)
+#define USE_REFCNT(dev) (nouveau_private(dev)->chipset >= 0x10)
+#define USE_SEMA(dev) (nouveau_private(dev)->chipset >= 0x17)
struct nouveau_fence {
struct nouveau_channel *channel;
@@ -39,6 +41,15 @@ struct nouveau_fence {
uint32_t sequence;
bool signalled;
+
+ void (*work)(void *priv, bool signalled);
+ void *priv;
+};
+
+struct nouveau_semaphore {
+ struct kref ref;
+ struct drm_device *dev;
+ struct drm_mm_node *mem;
};
static inline struct nouveau_fence *
@@ -59,14 +70,13 @@ nouveau_fence_del(struct kref *ref)
void
nouveau_fence_update(struct nouveau_channel *chan)
{
- struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
- struct list_head *entry, *tmp;
- struct nouveau_fence *fence;
+ struct drm_device *dev = chan->dev;
+ struct nouveau_fence *tmp, *fence;
uint32_t sequence;
spin_lock(&chan->fence.lock);
- if (USE_REFCNT)
+ if (USE_REFCNT(dev))
sequence = nvchan_rd32(chan, 0x48);
else
sequence = atomic_read(&chan->fence.last_sequence_irq);
@@ -75,12 +85,14 @@ nouveau_fence_update(struct nouveau_channel *chan)
goto out;
chan->fence.sequence_ack = sequence;
- list_for_each_safe(entry, tmp, &chan->fence.pending) {
- fence = list_entry(entry, struct nouveau_fence, entry);
-
+ list_for_each_entry_safe(fence, tmp, &chan->fence.pending, entry) {
sequence = fence->sequence;
fence->signalled = true;
list_del(&fence->entry);
+
+ if (unlikely(fence->work))
+ fence->work(fence->priv, true);
+
kref_put(&fence->refcount, nouveau_fence_del);
if (sequence == chan->fence.sequence_ack)
@@ -121,8 +133,8 @@ nouveau_fence_channel(struct nouveau_fence *fence)
int
nouveau_fence_emit(struct nouveau_fence *fence)
{
- struct drm_nouveau_private *dev_priv = fence->channel->dev->dev_private;
struct nouveau_channel *chan = fence->channel;
+ struct drm_device *dev = chan->dev;
int ret;
ret = RING_SPACE(chan, 2);
@@ -143,7 +155,7 @@ nouveau_fence_emit(struct nouveau_fence *fence)
list_add_tail(&fence->entry, &chan->fence.pending);
spin_unlock(&chan->fence.lock);
- BEGIN_RING(chan, NvSubSw, USE_REFCNT ? 0x0050 : 0x0150, 1);
+ BEGIN_RING(chan, NvSubSw, USE_REFCNT(dev) ? 0x0050 : 0x0150, 1);
OUT_RING(chan, fence->sequence);
FIRE_RING(chan);
@@ -151,6 +163,25 @@ nouveau_fence_emit(struct nouveau_fence *fence)
}
void
+nouveau_fence_work(struct nouveau_fence *fence,
+ void (*work)(void *priv, bool signalled),
+ void *priv)
+{
+ BUG_ON(fence->work);
+
+ spin_lock(&fence->channel->fence.lock);
+
+ if (fence->signalled) {
+ work(priv, true);
+ } else {
+ fence->work = work;
+ fence->priv = priv;
+ }
+
+ spin_unlock(&fence->channel->fence.lock);
+}
+
+void
nouveau_fence_unref(void **sync_obj)
{
struct nouveau_fence *fence = nouveau_fence(*sync_obj);
@@ -213,6 +244,167 @@ nouveau_fence_wait(void *sync_obj, void *sync_arg, bool lazy, bool intr)
return ret;
}
+static struct nouveau_semaphore *
+alloc_semaphore(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_semaphore *sema;
+ int ret;
+
+ if (!USE_SEMA(dev))
+ return NULL;
+
+ sema = kmalloc(sizeof(*sema), GFP_KERNEL);
+ if (!sema)
+ goto fail;
+
+ ret = drm_mm_pre_get(&dev_priv->fence.heap);
+ if (ret)
+ goto fail;
+
+ spin_lock(&dev_priv->fence.lock);
+ sema->mem = drm_mm_search_free(&dev_priv->fence.heap, 4, 0, 0);
+ if (sema->mem)
+ sema->mem = drm_mm_get_block_atomic(sema->mem, 4, 0);
+ spin_unlock(&dev_priv->fence.lock);
+
+ if (!sema->mem)
+ goto fail;
+
+ kref_init(&sema->ref);
+ sema->dev = dev;
+ nouveau_bo_wr32(dev_priv->fence.bo, sema->mem->start / 4, 0);
+
+ return sema;
+fail:
+ kfree(sema);
+ return NULL;
+}
+
+static void
+free_semaphore(struct kref *ref)
+{
+ struct nouveau_semaphore *sema =
+ container_of(ref, struct nouveau_semaphore, ref);
+ struct drm_nouveau_private *dev_priv = sema->dev->dev_private;
+
+ spin_lock(&dev_priv->fence.lock);
+ drm_mm_put_block(sema->mem);
+ spin_unlock(&dev_priv->fence.lock);
+
+ kfree(sema);
+}
+
+static void
+semaphore_work(void *priv, bool signalled)
+{
+ struct nouveau_semaphore *sema = priv;
+ struct drm_nouveau_private *dev_priv = sema->dev->dev_private;
+
+ if (unlikely(!signalled))
+ nouveau_bo_wr32(dev_priv->fence.bo, sema->mem->start / 4, 1);
+
+ kref_put(&sema->ref, free_semaphore);
+}
+
+static int
+emit_semaphore(struct nouveau_channel *chan, int method,
+ struct nouveau_semaphore *sema)
+{
+ struct drm_nouveau_private *dev_priv = sema->dev->dev_private;
+ struct nouveau_fence *fence;
+ bool smart = (dev_priv->card_type >= NV_50);
+ int ret;
+
+ ret = RING_SPACE(chan, smart ? 8 : 4);
+ if (ret)
+ return ret;
+
+ if (smart) {
+ BEGIN_RING(chan, NvSubSw, NV_SW_DMA_SEMAPHORE, 1);
+ OUT_RING(chan, NvSema);
+ }
+ BEGIN_RING(chan, NvSubSw, NV_SW_SEMAPHORE_OFFSET, 1);
+ OUT_RING(chan, sema->mem->start);
+
+ if (smart && method == NV_SW_SEMAPHORE_ACQUIRE) {
+ /*
+ * NV50 tries to be too smart and context-switch
+ * between semaphores instead of doing a "first come,
+ * first served" strategy like previous cards
+ * do.
+ *
+ * That's bad because the ACQUIRE latency can get as
+ * large as the PFIFO context time slice in the
+ * typical DRI2 case where you have several
+ * outstanding semaphores at the same moment.
+ *
+ * If we're going to ACQUIRE, force the card to
+ * context switch before, just in case the matching
+ * RELEASE is already scheduled to be executed in
+ * another channel.
+ */
+ BEGIN_RING(chan, NvSubSw, NV_SW_YIELD, 1);
+ OUT_RING(chan, 0);
+ }
+
+ BEGIN_RING(chan, NvSubSw, method, 1);
+ OUT_RING(chan, 1);
+
+ if (smart && method == NV_SW_SEMAPHORE_RELEASE) {
+ /*
+ * Force the card to context switch, there may be
+ * another channel waiting for the semaphore we just
+ * released.
+ */
+ BEGIN_RING(chan, NvSubSw, NV_SW_YIELD, 1);
+ OUT_RING(chan, 0);
+ }
+
+ /* Delay semaphore destruction until its work is done */
+ ret = nouveau_fence_new(chan, &fence, true);
+ if (ret)
+ return ret;
+
+ kref_get(&sema->ref);
+ nouveau_fence_work(fence, semaphore_work, sema);
+ nouveau_fence_unref((void *)&fence);
+
+ return 0;
+}
+
+int
+nouveau_fence_sync(struct nouveau_fence *fence,
+ struct nouveau_channel *wchan)
+{
+ struct nouveau_channel *chan = nouveau_fence_channel(fence);
+ struct drm_device *dev = wchan->dev;
+ struct nouveau_semaphore *sema;
+ int ret;
+
+ if (likely(!fence || chan == wchan ||
+ nouveau_fence_signalled(fence, NULL)))
+ return 0;
+
+ sema = alloc_semaphore(dev);
+ if (!sema) {
+ /* Early card or broken userspace, fall back to
+ * software sync. */
+ return nouveau_fence_wait(fence, NULL, false, false);
+ }
+
+ /* Make wchan wait until it gets signalled */
+ ret = emit_semaphore(wchan, NV_SW_SEMAPHORE_ACQUIRE, sema);
+ if (ret)
+ goto out;
+
+ /* Signal the semaphore from chan */
+ ret = emit_semaphore(chan, NV_SW_SEMAPHORE_RELEASE, sema);
+out:
+ kref_put(&sema->ref, free_semaphore);
+ return ret;
+}
+
int
nouveau_fence_flush(void *sync_obj, void *sync_arg)
{
@@ -220,26 +412,123 @@ nouveau_fence_flush(void *sync_obj, void *sync_arg)
}
int
-nouveau_fence_init(struct nouveau_channel *chan)
+nouveau_fence_channel_init(struct nouveau_channel *chan)
{
+ struct drm_device *dev = chan->dev;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_gpuobj *obj = NULL;
+ int ret;
+
+ /* Create an NV_SW object for various sync purposes */
+ ret = nouveau_gpuobj_sw_new(chan, NV_SW, &obj);
+ if (ret)
+ return ret;
+
+ ret = nouveau_ramht_insert(chan, NvSw, obj);
+ nouveau_gpuobj_ref(NULL, &obj);
+ if (ret)
+ return ret;
+
+ ret = RING_SPACE(chan, 2);
+ if (ret)
+ return ret;
+ BEGIN_RING(chan, NvSubSw, 0, 1);
+ OUT_RING(chan, NvSw);
+
+ /* Create a DMA object for the shared cross-channel sync area. */
+ if (USE_SEMA(dev)) {
+ struct drm_mm_node *mem = dev_priv->fence.bo->bo.mem.mm_node;
+
+ ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY,
+ mem->start << PAGE_SHIFT,
+ mem->size << PAGE_SHIFT,
+ NV_DMA_ACCESS_RW,
+ NV_DMA_TARGET_VIDMEM, &obj);
+ if (ret)
+ return ret;
+
+ ret = nouveau_ramht_insert(chan, NvSema, obj);
+ nouveau_gpuobj_ref(NULL, &obj);
+ if (ret)
+ return ret;
+
+ ret = RING_SPACE(chan, 2);
+ if (ret)
+ return ret;
+ BEGIN_RING(chan, NvSubSw, NV_SW_DMA_SEMAPHORE, 1);
+ OUT_RING(chan, NvSema);
+ }
+
+ FIRE_RING(chan);
+
INIT_LIST_HEAD(&chan->fence.pending);
spin_lock_init(&chan->fence.lock);
atomic_set(&chan->fence.last_sequence_irq, 0);
+
return 0;
}
void
-nouveau_fence_fini(struct nouveau_channel *chan)
+nouveau_fence_channel_fini(struct nouveau_channel *chan)
{
- struct list_head *entry, *tmp;
- struct nouveau_fence *fence;
-
- list_for_each_safe(entry, tmp, &chan->fence.pending) {
- fence = list_entry(entry, struct nouveau_fence, entry);
+ struct nouveau_fence *tmp, *fence;
+ list_for_each_entry_safe(fence, tmp, &chan->fence.pending, entry) {
fence->signalled = true;
list_del(&fence->entry);
+
+ if (unlikely(fence->work))
+ fence->work(fence->priv, false);
+
kref_put(&fence->refcount, nouveau_fence_del);
}
}
+int
+nouveau_fence_init(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ int ret;
+
+ /* Create a shared VRAM heap for cross-channel sync. */
+ if (USE_SEMA(dev)) {
+ ret = nouveau_bo_new(dev, NULL, 4096, 0, TTM_PL_FLAG_VRAM,
+ 0, 0, false, true, &dev_priv->fence.bo);
+ if (ret)
+ return ret;
+
+ ret = nouveau_bo_pin(dev_priv->fence.bo, TTM_PL_FLAG_VRAM);
+ if (ret)
+ goto fail;
+
+ ret = nouveau_bo_map(dev_priv->fence.bo);
+ if (ret)
+ goto fail;
+
+ ret = drm_mm_init(&dev_priv->fence.heap, 0,
+ dev_priv->fence.bo->bo.mem.size);
+ if (ret)
+ goto fail;
+
+ spin_lock_init(&dev_priv->fence.lock);
+ }
+
+ return 0;
+fail:
+ nouveau_bo_unmap(dev_priv->fence.bo);
+ nouveau_bo_ref(NULL, &dev_priv->fence.bo);
+ return ret;
+}
+
+void
+nouveau_fence_fini(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+
+ if (USE_SEMA(dev)) {
+ drm_mm_takedown(&dev_priv->fence.heap);
+ nouveau_bo_unmap(dev_priv->fence.bo);
+ nouveau_bo_unpin(dev_priv->fence.bo);
+ nouveau_bo_ref(NULL, &dev_priv->fence.bo);
+ }
+}
diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c
index 19620a6709f5..9a1fdcf400c2 100644
--- a/drivers/gpu/drm/nouveau/nouveau_gem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_gem.c
@@ -107,23 +107,29 @@ nouveau_gem_info(struct drm_gem_object *gem, struct drm_nouveau_gem_info *rep)
}
static bool
-nouveau_gem_tile_flags_valid(struct drm_device *dev, uint32_t tile_flags) {
- switch (tile_flags) {
- case 0x0000:
- case 0x1800:
- case 0x2800:
- case 0x4800:
- case 0x7000:
- case 0x7400:
- case 0x7a00:
- case 0xe000:
- break;
- default:
- NV_ERROR(dev, "bad page flags: 0x%08x\n", tile_flags);
- return false;
+nouveau_gem_tile_flags_valid(struct drm_device *dev, uint32_t tile_flags)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+
+ if (dev_priv->card_type >= NV_50) {
+ switch (tile_flags & NOUVEAU_GEM_TILE_LAYOUT_MASK) {
+ case 0x0000:
+ case 0x1800:
+ case 0x2800:
+ case 0x4800:
+ case 0x7000:
+ case 0x7400:
+ case 0x7a00:
+ case 0xe000:
+ return true;
+ }
+ } else {
+ if (!(tile_flags & NOUVEAU_GEM_TILE_LAYOUT_MASK))
+ return true;
}
- return true;
+ NV_ERROR(dev, "bad page flags: 0x%08x\n", tile_flags);
+ return false;
}
int
@@ -362,7 +368,7 @@ validate_list(struct nouveau_channel *chan, struct list_head *list,
list_for_each_entry(nvbo, list, entry) {
struct drm_nouveau_gem_pushbuf_bo *b = &pbbo[nvbo->pbbo_index];
- ret = nouveau_bo_sync_gpu(nvbo, chan);
+ ret = nouveau_fence_sync(nvbo->bo.sync_obj, chan);
if (unlikely(ret)) {
NV_ERROR(dev, "fail pre-validate sync\n");
return ret;
@@ -385,7 +391,7 @@ validate_list(struct nouveau_channel *chan, struct list_head *list,
return ret;
}
- ret = nouveau_bo_sync_gpu(nvbo, chan);
+ ret = nouveau_fence_sync(nvbo->bo.sync_obj, chan);
if (unlikely(ret)) {
NV_ERROR(dev, "fail post-validate sync\n");
return ret;
diff --git a/drivers/gpu/drm/nouveau/nouveau_grctx.h b/drivers/gpu/drm/nouveau/nouveau_grctx.h
index 5d39c4ce8006..4a8ad1307fa4 100644
--- a/drivers/gpu/drm/nouveau/nouveau_grctx.h
+++ b/drivers/gpu/drm/nouveau/nouveau_grctx.h
@@ -126,7 +126,7 @@ gr_def(struct nouveau_grctx *ctx, uint32_t reg, uint32_t val)
reg = (reg - 0x00400000) / 4;
reg = (reg - ctx->ctxprog_reg) + ctx->ctxvals_base;
- nv_wo32(ctx->dev, ctx->data, reg, val);
+ nv_wo32(ctx->data, reg * 4, val);
}
#endif
diff --git a/drivers/gpu/drm/nouveau/nouveau_hw.c b/drivers/gpu/drm/nouveau/nouveau_hw.c
index 7b613682e400..b9672a05c411 100644
--- a/drivers/gpu/drm/nouveau/nouveau_hw.c
+++ b/drivers/gpu/drm/nouveau/nouveau_hw.c
@@ -305,7 +305,7 @@ setPLL_double_lowregs(struct drm_device *dev, uint32_t NMNMreg,
bool mpll = Preg == 0x4020;
uint32_t oldPval = nvReadMC(dev, Preg);
uint32_t NMNM = pv->NM2 << 16 | pv->NM1;
- uint32_t Pval = (oldPval & (mpll ? ~(0x11 << 16) : ~(1 << 16))) |
+ uint32_t Pval = (oldPval & (mpll ? ~(0x77 << 16) : ~(7 << 16))) |
0xc << 28 | pv->log2P << 16;
uint32_t saved4600 = 0;
/* some cards have different maskc040s */
@@ -427,22 +427,12 @@ nouveau_hw_get_pllvals(struct drm_device *dev, enum pll_types plltype,
struct nouveau_pll_vals *pllvals)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
- const uint32_t nv04_regs[MAX_PLL_TYPES] = { NV_PRAMDAC_NVPLL_COEFF,
- NV_PRAMDAC_MPLL_COEFF,
- NV_PRAMDAC_VPLL_COEFF,
- NV_RAMDAC_VPLL2 };
- const uint32_t nv40_regs[MAX_PLL_TYPES] = { 0x4000,
- 0x4020,
- NV_PRAMDAC_VPLL_COEFF,
- NV_RAMDAC_VPLL2 };
- uint32_t reg1, pll1, pll2 = 0;
+ uint32_t reg1 = get_pll_register(dev, plltype), pll1, pll2 = 0;
struct pll_lims pll_lim;
int ret;
- if (dev_priv->card_type < NV_40)
- reg1 = nv04_regs[plltype];
- else
- reg1 = nv40_regs[plltype];
+ if (reg1 == 0)
+ return -ENOENT;
pll1 = nvReadMC(dev, reg1);
@@ -491,8 +481,10 @@ int
nouveau_hw_get_clock(struct drm_device *dev, enum pll_types plltype)
{
struct nouveau_pll_vals pllvals;
+ int ret;
- if (plltype == MPLL && (dev->pci_device & 0x0ff0) == CHIPSET_NFORCE) {
+ if (plltype == PLL_MEMORY &&
+ (dev->pci_device & 0x0ff0) == CHIPSET_NFORCE) {
uint32_t mpllP;
pci_read_config_dword(pci_get_bus_and_slot(0, 3), 0x6c, &mpllP);
@@ -501,14 +493,17 @@ nouveau_hw_get_clock(struct drm_device *dev, enum pll_types plltype)
return 400000 / mpllP;
} else
- if (plltype == MPLL && (dev->pci_device & 0xff0) == CHIPSET_NFORCE2) {
+ if (plltype == PLL_MEMORY &&
+ (dev->pci_device & 0xff0) == CHIPSET_NFORCE2) {
uint32_t clock;
pci_read_config_dword(pci_get_bus_and_slot(0, 5), 0x4c, &clock);
return clock;
}
- nouveau_hw_get_pllvals(dev, plltype, &pllvals);
+ ret = nouveau_hw_get_pllvals(dev, plltype, &pllvals);
+ if (ret)
+ return ret;
return nouveau_hw_pllvals_to_clk(&pllvals);
}
@@ -524,11 +519,11 @@ nouveau_hw_fix_bad_vpll(struct drm_device *dev, int head)
struct pll_lims pll_lim;
struct nouveau_pll_vals pv;
- uint32_t pllreg = head ? NV_RAMDAC_VPLL2 : NV_PRAMDAC_VPLL_COEFF;
+ enum pll_types pll = head ? PLL_VPLL1 : PLL_VPLL0;
- if (get_pll_limits(dev, head ? VPLL2 : VPLL1, &pll_lim))
+ if (get_pll_limits(dev, pll, &pll_lim))
return;
- nouveau_hw_get_pllvals(dev, head ? VPLL2 : VPLL1, &pv);
+ nouveau_hw_get_pllvals(dev, pll, &pv);
if (pv.M1 >= pll_lim.vco1.min_m && pv.M1 <= pll_lim.vco1.max_m &&
pv.N1 >= pll_lim.vco1.min_n && pv.N1 <= pll_lim.vco1.max_n &&
@@ -541,7 +536,7 @@ nouveau_hw_fix_bad_vpll(struct drm_device *dev, int head)
pv.M1 = pll_lim.vco1.max_m;
pv.N1 = pll_lim.vco1.min_n;
pv.log2P = pll_lim.max_usable_log2p;
- nouveau_hw_setpll(dev, pllreg, &pv);
+ nouveau_hw_setpll(dev, pll_lim.reg, &pv);
}
/*
@@ -661,7 +656,7 @@ nv_save_state_ramdac(struct drm_device *dev, int head,
if (dev_priv->card_type >= NV_10)
regp->nv10_cursync = NVReadRAMDAC(dev, head, NV_RAMDAC_NV10_CURSYNC);
- nouveau_hw_get_pllvals(dev, head ? VPLL2 : VPLL1, &regp->pllvals);
+ nouveau_hw_get_pllvals(dev, head ? PLL_VPLL1 : PLL_VPLL0, &regp->pllvals);
state->pllsel = NVReadRAMDAC(dev, 0, NV_PRAMDAC_PLL_COEFF_SELECT);
if (nv_two_heads(dev))
state->sel_clk = NVReadRAMDAC(dev, 0, NV_PRAMDAC_SEL_CLK);
@@ -866,10 +861,11 @@ nv_save_state_ext(struct drm_device *dev, int head,
rd_cio_state(dev, head, regp, NV_CIO_CRE_FFLWM__INDEX);
rd_cio_state(dev, head, regp, NV_CIO_CRE_21);
- if (dev_priv->card_type >= NV_30) {
+ if (dev_priv->card_type >= NV_20)
rd_cio_state(dev, head, regp, NV_CIO_CRE_47);
+
+ if (dev_priv->card_type >= NV_30)
rd_cio_state(dev, head, regp, 0x9f);
- }
rd_cio_state(dev, head, regp, NV_CIO_CRE_49);
rd_cio_state(dev, head, regp, NV_CIO_CRE_HCUR_ADDR0_INDEX);
@@ -976,10 +972,11 @@ nv_load_state_ext(struct drm_device *dev, int head,
wr_cio_state(dev, head, regp, NV_CIO_CRE_FF_INDEX);
wr_cio_state(dev, head, regp, NV_CIO_CRE_FFLWM__INDEX);
- if (dev_priv->card_type >= NV_30) {
+ if (dev_priv->card_type >= NV_20)
wr_cio_state(dev, head, regp, NV_CIO_CRE_47);
+
+ if (dev_priv->card_type >= NV_30)
wr_cio_state(dev, head, regp, 0x9f);
- }
wr_cio_state(dev, head, regp, NV_CIO_CRE_49);
wr_cio_state(dev, head, regp, NV_CIO_CRE_HCUR_ADDR0_INDEX);
diff --git a/drivers/gpu/drm/nouveau/nouveau_hw.h b/drivers/gpu/drm/nouveau/nouveau_hw.h
index 869130f83602..2989090b9434 100644
--- a/drivers/gpu/drm/nouveau/nouveau_hw.h
+++ b/drivers/gpu/drm/nouveau/nouveau_hw.h
@@ -416,6 +416,25 @@ nv_fix_nv40_hw_cursor(struct drm_device *dev, int head)
}
static inline void
+nv_set_crtc_base(struct drm_device *dev, int head, uint32_t offset)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+
+ NVWriteCRTC(dev, head, NV_PCRTC_START, offset);
+
+ if (dev_priv->card_type == NV_04) {
+ /*
+ * Hilarious, the 24th bit doesn't want to stick to
+ * PCRTC_START...
+ */
+ int cre_heb = NVReadVgaCrtc(dev, head, NV_CIO_CRE_HEB__INDEX);
+
+ NVWriteVgaCrtc(dev, head, NV_CIO_CRE_HEB__INDEX,
+ (cre_heb & ~0x40) | ((offset >> 18) & 0x40));
+ }
+}
+
+static inline void
nv_show_cursor(struct drm_device *dev, int head, bool show)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
diff --git a/drivers/gpu/drm/nouveau/nouveau_i2c.c b/drivers/gpu/drm/nouveau/nouveau_i2c.c
index 84614858728b..cb389d014326 100644
--- a/drivers/gpu/drm/nouveau/nouveau_i2c.c
+++ b/drivers/gpu/drm/nouveau/nouveau_i2c.c
@@ -256,7 +256,7 @@ nouveau_i2c_find(struct drm_device *dev, int index)
if (index >= DCB_MAX_NUM_I2C_ENTRIES)
return NULL;
- if (dev_priv->chipset >= NV_50 && (i2c->entry & 0x00000100)) {
+ if (dev_priv->card_type >= NV_50 && (i2c->entry & 0x00000100)) {
uint32_t reg = 0xe500, val;
if (i2c->port_type == 6) {
@@ -299,7 +299,10 @@ nouveau_probe_i2c_addr(struct nouveau_i2c_chan *i2c, int addr)
int
nouveau_i2c_identify(struct drm_device *dev, const char *what,
- struct i2c_board_info *info, int index)
+ struct i2c_board_info *info,
+ bool (*match)(struct nouveau_i2c_chan *,
+ struct i2c_board_info *),
+ int index)
{
struct nouveau_i2c_chan *i2c = nouveau_i2c_find(dev, index);
int i;
@@ -307,7 +310,8 @@ nouveau_i2c_identify(struct drm_device *dev, const char *what,
NV_DEBUG(dev, "Probing %ss on I2C bus: %d\n", what, index);
for (i = 0; info[i].addr; i++) {
- if (nouveau_probe_i2c_addr(i2c, info[i].addr)) {
+ if (nouveau_probe_i2c_addr(i2c, info[i].addr) &&
+ (!match || match(i2c, &info[i]))) {
NV_INFO(dev, "Detected %s: %s\n", what, info[i].type);
return i;
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_i2c.h b/drivers/gpu/drm/nouveau/nouveau_i2c.h
index cfe7c8426d1d..422b62fd8272 100644
--- a/drivers/gpu/drm/nouveau/nouveau_i2c.h
+++ b/drivers/gpu/drm/nouveau/nouveau_i2c.h
@@ -43,7 +43,10 @@ void nouveau_i2c_fini(struct drm_device *, struct dcb_i2c_entry *);
struct nouveau_i2c_chan *nouveau_i2c_find(struct drm_device *, int index);
bool nouveau_probe_i2c_addr(struct nouveau_i2c_chan *i2c, int addr);
int nouveau_i2c_identify(struct drm_device *dev, const char *what,
- struct i2c_board_info *info, int index);
+ struct i2c_board_info *info,
+ bool (*match)(struct nouveau_i2c_chan *,
+ struct i2c_board_info *),
+ int index);
extern const struct i2c_algorithm nouveau_dp_i2c_algo;
diff --git a/drivers/gpu/drm/nouveau/nouveau_irq.c b/drivers/gpu/drm/nouveau/nouveau_irq.c
index 794b0ee30cf6..7bfd9e6c9d67 100644
--- a/drivers/gpu/drm/nouveau/nouveau_irq.c
+++ b/drivers/gpu/drm/nouveau/nouveau_irq.c
@@ -35,12 +35,20 @@
#include "nouveau_drm.h"
#include "nouveau_drv.h"
#include "nouveau_reg.h"
+#include "nouveau_ramht.h"
#include <linux/ratelimit.h>
/* needed for hotplug irq */
#include "nouveau_connector.h"
#include "nv50_display.h"
+static DEFINE_RATELIMIT_STATE(nouveau_ratelimit_state, 3 * HZ, 20);
+
+static int nouveau_ratelimit(void)
+{
+ return __ratelimit(&nouveau_ratelimit_state);
+}
+
void
nouveau_irq_preinstall(struct drm_device *dev)
{
@@ -52,6 +60,7 @@ nouveau_irq_preinstall(struct drm_device *dev)
if (dev_priv->card_type >= NV_50) {
INIT_WORK(&dev_priv->irq_work, nv50_display_irq_handler_bh);
INIT_WORK(&dev_priv->hpd_work, nv50_display_irq_hotplug_bh);
+ spin_lock_init(&dev_priv->hpd_state.lock);
INIT_LIST_HEAD(&dev_priv->vbl_waiting);
}
}
@@ -106,15 +115,16 @@ nouveau_fifo_swmthd(struct nouveau_channel *chan, uint32_t addr, uint32_t data)
const int mthd = addr & 0x1ffc;
if (mthd == 0x0000) {
- struct nouveau_gpuobj_ref *ref = NULL;
+ struct nouveau_gpuobj *gpuobj;
- if (nouveau_gpuobj_ref_find(chan, data, &ref))
+ gpuobj = nouveau_ramht_find(chan, data);
+ if (!gpuobj)
return false;
- if (ref->gpuobj->engine != NVOBJ_ENGINE_SW)
+ if (gpuobj->engine != NVOBJ_ENGINE_SW)
return false;
- chan->sw_subchannel[subc] = ref->gpuobj->class;
+ chan->sw_subchannel[subc] = gpuobj->class;
nv_wr32(dev, NV04_PFIFO_CACHE1_ENGINE, nv_rd32(dev,
NV04_PFIFO_CACHE1_ENGINE) & ~(0xf << subc * 4));
return true;
@@ -200,16 +210,47 @@ nouveau_fifo_irq_handler(struct drm_device *dev)
}
if (status & NV_PFIFO_INTR_DMA_PUSHER) {
- NV_INFO(dev, "PFIFO_DMA_PUSHER - Ch %d\n", chid);
+ u32 dma_get = nv_rd32(dev, 0x003244);
+ u32 dma_put = nv_rd32(dev, 0x003240);
+ u32 push = nv_rd32(dev, 0x003220);
+ u32 state = nv_rd32(dev, 0x003228);
+
+ if (dev_priv->card_type == NV_50) {
+ u32 ho_get = nv_rd32(dev, 0x003328);
+ u32 ho_put = nv_rd32(dev, 0x003320);
+ u32 ib_get = nv_rd32(dev, 0x003334);
+ u32 ib_put = nv_rd32(dev, 0x003330);
+
+ if (nouveau_ratelimit())
+ NV_INFO(dev, "PFIFO_DMA_PUSHER - Ch %d Get 0x%02x%08x "
+ "Put 0x%02x%08x IbGet 0x%08x IbPut 0x%08x "
+ "State 0x%08x Push 0x%08x\n",
+ chid, ho_get, dma_get, ho_put,
+ dma_put, ib_get, ib_put, state,
+ push);
+
+ /* METHOD_COUNT, in DMA_STATE on earlier chipsets */
+ nv_wr32(dev, 0x003364, 0x00000000);
+ if (dma_get != dma_put || ho_get != ho_put) {
+ nv_wr32(dev, 0x003244, dma_put);
+ nv_wr32(dev, 0x003328, ho_put);
+ } else
+ if (ib_get != ib_put) {
+ nv_wr32(dev, 0x003334, ib_put);
+ }
+ } else {
+ NV_INFO(dev, "PFIFO_DMA_PUSHER - Ch %d Get 0x%08x "
+ "Put 0x%08x State 0x%08x Push 0x%08x\n",
+ chid, dma_get, dma_put, state, push);
- status &= ~NV_PFIFO_INTR_DMA_PUSHER;
- nv_wr32(dev, NV03_PFIFO_INTR_0,
- NV_PFIFO_INTR_DMA_PUSHER);
+ if (dma_get != dma_put)
+ nv_wr32(dev, 0x003244, dma_put);
+ }
- nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_STATE, 0x00000000);
- if (nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_PUT) != get)
- nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_GET,
- get + 4);
+ nv_wr32(dev, 0x003228, 0x00000000);
+ nv_wr32(dev, 0x003220, 0x00000001);
+ nv_wr32(dev, 0x002100, NV_PFIFO_INTR_DMA_PUSHER);
+ status &= ~NV_PFIFO_INTR_DMA_PUSHER;
}
if (status & NV_PFIFO_INTR_SEMAPHORE) {
@@ -226,9 +267,18 @@ nouveau_fifo_irq_handler(struct drm_device *dev)
nv_wr32(dev, NV04_PFIFO_CACHE1_PULL0, 1);
}
+ if (dev_priv->card_type == NV_50) {
+ if (status & 0x00000010) {
+ nv50_fb_vm_trap(dev, 1, "PFIFO_BAR_FAULT");
+ status &= ~0x00000010;
+ nv_wr32(dev, 0x002100, 0x00000010);
+ }
+ }
+
if (status) {
- NV_INFO(dev, "PFIFO_INTR 0x%08x - Ch %d\n",
- status, chid);
+ if (nouveau_ratelimit())
+ NV_INFO(dev, "PFIFO_INTR 0x%08x - Ch %d\n",
+ status, chid);
nv_wr32(dev, NV03_PFIFO_INTR_0, status);
status = 0;
}
@@ -357,7 +407,7 @@ nouveau_graph_chid_from_grctx(struct drm_device *dev)
if (!chan || !chan->ramin_grctx)
continue;
- if (inst == chan->ramin_grctx->instance)
+ if (inst == chan->ramin_grctx->pinst)
break;
}
} else {
@@ -369,7 +419,7 @@ nouveau_graph_chid_from_grctx(struct drm_device *dev)
if (!chan || !chan->ramin)
continue;
- if (inst == chan->ramin->instance)
+ if (inst == chan->ramin->vinst)
break;
}
}
@@ -505,13 +555,6 @@ nouveau_pgraph_intr_notify(struct drm_device *dev, uint32_t nsource)
nouveau_graph_dump_trap_info(dev, "PGRAPH_NOTIFY", &trap);
}
-static DEFINE_RATELIMIT_STATE(nouveau_ratelimit_state, 3 * HZ, 20);
-
-static int nouveau_ratelimit(void)
-{
- return __ratelimit(&nouveau_ratelimit_state);
-}
-
static inline void
nouveau_pgraph_intr_error(struct drm_device *dev, uint32_t nsource)
@@ -605,40 +648,6 @@ nouveau_pgraph_irq_handler(struct drm_device *dev)
nv_wr32(dev, NV03_PMC_INTR_0, NV_PMC_INTR_0_PGRAPH_PENDING);
}
-static void
-nv50_pfb_vm_trap(struct drm_device *dev, int display, const char *name)
-{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- uint32_t trap[6];
- int i, ch;
- uint32_t idx = nv_rd32(dev, 0x100c90);
- if (idx & 0x80000000) {
- idx &= 0xffffff;
- if (display) {
- for (i = 0; i < 6; i++) {
- nv_wr32(dev, 0x100c90, idx | i << 24);
- trap[i] = nv_rd32(dev, 0x100c94);
- }
- for (ch = 0; ch < dev_priv->engine.fifo.channels; ch++) {
- struct nouveau_channel *chan = dev_priv->fifos[ch];
-
- if (!chan || !chan->ramin)
- continue;
-
- if (trap[1] == chan->ramin->instance >> 12)
- break;
- }
- NV_INFO(dev, "%s - VM: Trapped %s at %02x%04x%04x status %08x %08x channel %d\n",
- name, (trap[5]&0x100?"read":"write"),
- trap[5]&0xff, trap[4]&0xffff,
- trap[3]&0xffff, trap[0], trap[2], ch);
- }
- nv_wr32(dev, 0x100c90, idx | 0x80000000);
- } else if (display) {
- NV_INFO(dev, "%s - no VM fault?\n", name);
- }
-}
-
static struct nouveau_enum_names nv50_mp_exec_error_names[] =
{
{ 3, "STACK_UNDERFLOW" },
@@ -711,7 +720,7 @@ nv50_pgraph_tp_trap(struct drm_device *dev, int type, uint32_t ustatus_old,
tps++;
switch (type) {
case 6: /* texture error... unknown for now */
- nv50_pfb_vm_trap(dev, display, name);
+ nv50_fb_vm_trap(dev, display, name);
if (display) {
NV_ERROR(dev, "magic set %d:\n", i);
for (r = ustatus_addr + 4; r <= ustatus_addr + 0x10; r += 4)
@@ -734,7 +743,7 @@ nv50_pgraph_tp_trap(struct drm_device *dev, int type, uint32_t ustatus_old,
uint32_t e1c = nv_rd32(dev, ustatus_addr + 0x14);
uint32_t e20 = nv_rd32(dev, ustatus_addr + 0x18);
uint32_t e24 = nv_rd32(dev, ustatus_addr + 0x1c);
- nv50_pfb_vm_trap(dev, display, name);
+ nv50_fb_vm_trap(dev, display, name);
/* 2d engine destination */
if (ustatus & 0x00000010) {
if (display) {
@@ -817,7 +826,7 @@ nv50_pgraph_trap_handler(struct drm_device *dev)
/* Known to be triggered by screwed up NOTIFY and COND... */
if (ustatus & 0x00000001) {
- nv50_pfb_vm_trap(dev, display, "PGRAPH_TRAP_DISPATCH_FAULT");
+ nv50_fb_vm_trap(dev, display, "PGRAPH_TRAP_DISPATCH_FAULT");
nv_wr32(dev, 0x400500, 0);
if (nv_rd32(dev, 0x400808) & 0x80000000) {
if (display) {
@@ -842,7 +851,7 @@ nv50_pgraph_trap_handler(struct drm_device *dev)
ustatus &= ~0x00000001;
}
if (ustatus & 0x00000002) {
- nv50_pfb_vm_trap(dev, display, "PGRAPH_TRAP_DISPATCH_QUERY");
+ nv50_fb_vm_trap(dev, display, "PGRAPH_TRAP_DISPATCH_QUERY");
nv_wr32(dev, 0x400500, 0);
if (nv_rd32(dev, 0x40084c) & 0x80000000) {
if (display) {
@@ -884,15 +893,15 @@ nv50_pgraph_trap_handler(struct drm_device *dev)
NV_INFO(dev, "PGRAPH_TRAP_M2MF - no ustatus?\n");
}
if (ustatus & 0x00000001) {
- nv50_pfb_vm_trap(dev, display, "PGRAPH_TRAP_M2MF_NOTIFY");
+ nv50_fb_vm_trap(dev, display, "PGRAPH_TRAP_M2MF_NOTIFY");
ustatus &= ~0x00000001;
}
if (ustatus & 0x00000002) {
- nv50_pfb_vm_trap(dev, display, "PGRAPH_TRAP_M2MF_IN");
+ nv50_fb_vm_trap(dev, display, "PGRAPH_TRAP_M2MF_IN");
ustatus &= ~0x00000002;
}
if (ustatus & 0x00000004) {
- nv50_pfb_vm_trap(dev, display, "PGRAPH_TRAP_M2MF_OUT");
+ nv50_fb_vm_trap(dev, display, "PGRAPH_TRAP_M2MF_OUT");
ustatus &= ~0x00000004;
}
NV_INFO (dev, "PGRAPH_TRAP_M2MF - %08x %08x %08x %08x\n",
@@ -917,7 +926,7 @@ nv50_pgraph_trap_handler(struct drm_device *dev)
NV_INFO(dev, "PGRAPH_TRAP_VFETCH - no ustatus?\n");
}
if (ustatus & 0x00000001) {
- nv50_pfb_vm_trap(dev, display, "PGRAPH_TRAP_VFETCH_FAULT");
+ nv50_fb_vm_trap(dev, display, "PGRAPH_TRAP_VFETCH_FAULT");
NV_INFO (dev, "PGRAPH_TRAP_VFETCH_FAULT - %08x %08x %08x %08x\n",
nv_rd32(dev, 0x400c00),
nv_rd32(dev, 0x400c08),
@@ -939,7 +948,7 @@ nv50_pgraph_trap_handler(struct drm_device *dev)
NV_INFO(dev, "PGRAPH_TRAP_STRMOUT - no ustatus?\n");
}
if (ustatus & 0x00000001) {
- nv50_pfb_vm_trap(dev, display, "PGRAPH_TRAP_STRMOUT_FAULT");
+ nv50_fb_vm_trap(dev, display, "PGRAPH_TRAP_STRMOUT_FAULT");
NV_INFO (dev, "PGRAPH_TRAP_STRMOUT_FAULT - %08x %08x %08x %08x\n",
nv_rd32(dev, 0x401804),
nv_rd32(dev, 0x401808),
@@ -964,7 +973,7 @@ nv50_pgraph_trap_handler(struct drm_device *dev)
NV_INFO(dev, "PGRAPH_TRAP_CCACHE - no ustatus?\n");
}
if (ustatus & 0x00000001) {
- nv50_pfb_vm_trap(dev, display, "PGRAPH_TRAP_CCACHE_FAULT");
+ nv50_fb_vm_trap(dev, display, "PGRAPH_TRAP_CCACHE_FAULT");
NV_INFO (dev, "PGRAPH_TRAP_CCACHE_FAULT - %08x %08x %08x %08x %08x %08x %08x\n",
nv_rd32(dev, 0x405800),
nv_rd32(dev, 0x405804),
@@ -986,7 +995,7 @@ nv50_pgraph_trap_handler(struct drm_device *dev)
* remaining, so try to handle it anyway. Perhaps related to that
* unknown DMA slot on tesla? */
if (status & 0x20) {
- nv50_pfb_vm_trap(dev, display, "PGRAPH_TRAP_UNKC04");
+ nv50_fb_vm_trap(dev, display, "PGRAPH_TRAP_UNKC04");
ustatus = nv_rd32(dev, 0x402000) & 0x7fffffff;
if (display)
NV_INFO(dev, "PGRAPH_TRAP_UNKC04 - Unhandled ustatus 0x%08x\n", ustatus);
diff --git a/drivers/gpu/drm/nouveau/nouveau_mem.c b/drivers/gpu/drm/nouveau/nouveau_mem.c
index 9689d4147686..fe4a30dc4b42 100644
--- a/drivers/gpu/drm/nouveau/nouveau_mem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_mem.c
@@ -33,7 +33,9 @@
#include "drmP.h"
#include "drm.h"
#include "drm_sarea.h"
+
#include "nouveau_drv.h"
+#include "nouveau_pm.h"
/*
* NV10-NV40 tiling helpers
@@ -47,18 +49,14 @@ nv10_mem_set_region_tiling(struct drm_device *dev, int i, uint32_t addr,
struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
struct nouveau_fb_engine *pfb = &dev_priv->engine.fb;
struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
- struct nouveau_tile_reg *tile = &dev_priv->tile.reg[i];
+ struct nouveau_tile_reg *tile = &dev_priv->tile[i];
tile->addr = addr;
tile->size = size;
tile->used = !!pitch;
nouveau_fence_unref((void **)&tile->fence);
- if (!pfifo->cache_flush(dev))
- return;
-
pfifo->reassign(dev, false);
- pfifo->cache_flush(dev);
pfifo->cache_pull(dev, false);
nouveau_wait_for_idle(dev);
@@ -76,34 +74,36 @@ nv10_mem_set_tiling(struct drm_device *dev, uint32_t addr, uint32_t size,
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_fb_engine *pfb = &dev_priv->engine.fb;
- struct nouveau_tile_reg *tile = dev_priv->tile.reg, *found = NULL;
- int i;
+ struct nouveau_tile_reg *found = NULL;
+ unsigned long i, flags;
- spin_lock(&dev_priv->tile.lock);
+ spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
for (i = 0; i < pfb->num_tiles; i++) {
- if (tile[i].used)
+ struct nouveau_tile_reg *tile = &dev_priv->tile[i];
+
+ if (tile->used)
/* Tile region in use. */
continue;
- if (tile[i].fence &&
- !nouveau_fence_signalled(tile[i].fence, NULL))
+ if (tile->fence &&
+ !nouveau_fence_signalled(tile->fence, NULL))
/* Pending tile region. */
continue;
- if (max(tile[i].addr, addr) <
- min(tile[i].addr + tile[i].size, addr + size))
+ if (max(tile->addr, addr) <
+ min(tile->addr + tile->size, addr + size))
/* Kill an intersecting tile region. */
nv10_mem_set_region_tiling(dev, i, 0, 0, 0);
if (pitch && !found) {
/* Free tile region. */
nv10_mem_set_region_tiling(dev, i, addr, size, pitch);
- found = &tile[i];
+ found = tile;
}
}
- spin_unlock(&dev_priv->tile.lock);
+ spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
return found;
}
@@ -169,16 +169,16 @@ nv50_mem_vm_bind_linear(struct drm_device *dev, uint64_t virt, uint32_t size,
virt += (end - pte);
while (pte < end) {
- nv_wo32(dev, pgt, pte++, offset_l);
- nv_wo32(dev, pgt, pte++, offset_h);
+ nv_wo32(pgt, (pte * 4) + 0, offset_l);
+ nv_wo32(pgt, (pte * 4) + 4, offset_h);
+ pte += 2;
}
}
}
- dev_priv->engine.instmem.flush(dev);
- nv50_vm_flush(dev, 5);
- nv50_vm_flush(dev, 0);
- nv50_vm_flush(dev, 4);
+ dev_priv->engine.instmem.flush(dev);
+ dev_priv->engine.fifo.tlb_flush(dev);
+ dev_priv->engine.graph.tlb_flush(dev);
nv50_vm_flush(dev, 6);
return 0;
}
@@ -203,14 +203,15 @@ nv50_mem_vm_unbind(struct drm_device *dev, uint64_t virt, uint32_t size)
pages -= (end - pte);
virt += (end - pte) << 15;
- while (pte < end)
- nv_wo32(dev, pgt, pte++, 0);
+ while (pte < end) {
+ nv_wo32(pgt, (pte * 4), 0);
+ pte++;
+ }
}
- dev_priv->engine.instmem.flush(dev);
- nv50_vm_flush(dev, 5);
- nv50_vm_flush(dev, 0);
- nv50_vm_flush(dev, 4);
+ dev_priv->engine.instmem.flush(dev);
+ dev_priv->engine.fifo.tlb_flush(dev);
+ dev_priv->engine.graph.tlb_flush(dev);
nv50_vm_flush(dev, 6);
}
@@ -218,7 +219,7 @@ nv50_mem_vm_unbind(struct drm_device *dev, uint64_t virt, uint32_t size)
* Cleanup everything
*/
void
-nouveau_mem_close(struct drm_device *dev)
+nouveau_mem_vram_fini(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
@@ -229,6 +230,19 @@ nouveau_mem_close(struct drm_device *dev)
nouveau_ttm_global_release(dev_priv);
+ if (dev_priv->fb_mtrr >= 0) {
+ drm_mtrr_del(dev_priv->fb_mtrr,
+ pci_resource_start(dev->pdev, 1),
+ pci_resource_len(dev->pdev, 1), DRM_MTRR_WC);
+ dev_priv->fb_mtrr = -1;
+ }
+}
+
+void
+nouveau_mem_gart_fini(struct drm_device *dev)
+{
+ nouveau_sgdma_takedown(dev);
+
if (drm_core_has_AGP(dev) && dev->agp) {
struct drm_agp_mem *entry, *tempe;
@@ -248,13 +262,6 @@ nouveau_mem_close(struct drm_device *dev)
dev->agp->acquired = 0;
dev->agp->enabled = 0;
}
-
- if (dev_priv->fb_mtrr) {
- drm_mtrr_del(dev_priv->fb_mtrr,
- pci_resource_start(dev->pdev, 1),
- pci_resource_len(dev->pdev, 1), DRM_MTRR_WC);
- dev_priv->fb_mtrr = -1;
- }
}
static uint32_t
@@ -305,8 +312,62 @@ nouveau_mem_detect_nforce(struct drm_device *dev)
return 0;
}
-/* returns the amount of FB ram in bytes */
-int
+static void
+nv50_vram_preinit(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ int i, parts, colbits, rowbitsa, rowbitsb, banks;
+ u64 rowsize, predicted;
+ u32 r0, r4, rt, ru;
+
+ r0 = nv_rd32(dev, 0x100200);
+ r4 = nv_rd32(dev, 0x100204);
+ rt = nv_rd32(dev, 0x100250);
+ ru = nv_rd32(dev, 0x001540);
+ NV_DEBUG(dev, "memcfg 0x%08x 0x%08x 0x%08x 0x%08x\n", r0, r4, rt, ru);
+
+ for (i = 0, parts = 0; i < 8; i++) {
+ if (ru & (0x00010000 << i))
+ parts++;
+ }
+
+ colbits = (r4 & 0x0000f000) >> 12;
+ rowbitsa = ((r4 & 0x000f0000) >> 16) + 8;
+ rowbitsb = ((r4 & 0x00f00000) >> 20) + 8;
+ banks = ((r4 & 0x01000000) ? 8 : 4);
+
+ rowsize = parts * banks * (1 << colbits) * 8;
+ predicted = rowsize << rowbitsa;
+ if (r0 & 0x00000004)
+ predicted += rowsize << rowbitsb;
+
+ if (predicted != dev_priv->vram_size) {
+ NV_WARN(dev, "memory controller reports %dMiB VRAM\n",
+ (u32)(dev_priv->vram_size >> 20));
+ NV_WARN(dev, "we calculated %dMiB VRAM\n",
+ (u32)(predicted >> 20));
+ }
+
+ dev_priv->vram_rblock_size = rowsize >> 12;
+ if (rt & 1)
+ dev_priv->vram_rblock_size *= 3;
+
+ NV_DEBUG(dev, "rblock %lld bytes\n",
+ (u64)dev_priv->vram_rblock_size << 12);
+}
+
+static void
+nvaa_vram_preinit(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+
+ /* To our knowledge, there's no large scale reordering of pages
+ * that occurs on IGP chipsets.
+ */
+ dev_priv->vram_rblock_size = 1;
+}
+
+static int
nouveau_mem_detect(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
@@ -325,9 +386,18 @@ nouveau_mem_detect(struct drm_device *dev)
dev_priv->vram_size = nv_rd32(dev, NV04_PFB_FIFO_DATA);
dev_priv->vram_size |= (dev_priv->vram_size & 0xff) << 32;
dev_priv->vram_size &= 0xffffffff00ll;
- if (dev_priv->chipset == 0xaa || dev_priv->chipset == 0xac) {
+
+ switch (dev_priv->chipset) {
+ case 0xaa:
+ case 0xac:
+ case 0xaf:
dev_priv->vram_sys_base = nv_rd32(dev, 0x100e10);
dev_priv->vram_sys_base <<= 12;
+ nvaa_vram_preinit(dev);
+ break;
+ default:
+ nv50_vram_preinit(dev);
+ break;
}
} else {
dev_priv->vram_size = nv_rd32(dev, 0x10f20c) << 20;
@@ -345,6 +415,33 @@ nouveau_mem_detect(struct drm_device *dev)
return -ENOMEM;
}
+#if __OS_HAS_AGP
+static unsigned long
+get_agp_mode(struct drm_device *dev, unsigned long mode)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+
+ /*
+ * FW seems to be broken on nv18, it makes the card lock up
+ * randomly.
+ */
+ if (dev_priv->chipset == 0x18)
+ mode &= ~PCI_AGP_COMMAND_FW;
+
+ /*
+ * AGP mode set in the command line.
+ */
+ if (nouveau_agpmode > 0) {
+ bool agpv3 = mode & 0x8;
+ int rate = agpv3 ? nouveau_agpmode / 4 : nouveau_agpmode;
+
+ mode = (mode & ~0x7) | (rate & 0x7);
+ }
+
+ return mode;
+}
+#endif
+
int
nouveau_mem_reset_agp(struct drm_device *dev)
{
@@ -355,7 +452,8 @@ nouveau_mem_reset_agp(struct drm_device *dev)
/* First of all, disable fast writes, otherwise if it's
* already enabled in the AGP bridge and we disable the card's
* AGP controller we might be locking ourselves out of it. */
- if (nv_rd32(dev, NV04_PBUS_PCI_NV_19) & PCI_AGP_COMMAND_FW) {
+ if ((nv_rd32(dev, NV04_PBUS_PCI_NV_19) |
+ dev->agp->mode) & PCI_AGP_COMMAND_FW) {
struct drm_agp_info info;
struct drm_agp_mode mode;
@@ -363,7 +461,7 @@ nouveau_mem_reset_agp(struct drm_device *dev)
if (ret)
return ret;
- mode.mode = info.mode & ~PCI_AGP_COMMAND_FW;
+ mode.mode = get_agp_mode(dev, info.mode) & ~PCI_AGP_COMMAND_FW;
ret = drm_agp_enable(dev, mode);
if (ret)
return ret;
@@ -418,7 +516,7 @@ nouveau_mem_init_agp(struct drm_device *dev)
}
/* see agp.h for the AGPSTAT_* modes available */
- mode.mode = info.mode;
+ mode.mode = get_agp_mode(dev, info.mode);
ret = drm_agp_enable(dev, mode);
if (ret) {
NV_ERROR(dev, "Unable to enable AGP: %d\n", ret);
@@ -433,24 +531,27 @@ nouveau_mem_init_agp(struct drm_device *dev)
}
int
-nouveau_mem_init(struct drm_device *dev)
+nouveau_mem_vram_init(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct ttm_bo_device *bdev = &dev_priv->ttm.bdev;
- int ret, dma_bits = 32;
-
- dev_priv->fb_phys = pci_resource_start(dev->pdev, 1);
- dev_priv->gart_info.type = NOUVEAU_GART_NONE;
+ int ret, dma_bits;
if (dev_priv->card_type >= NV_50 &&
pci_dma_supported(dev->pdev, DMA_BIT_MASK(40)))
dma_bits = 40;
+ else
+ dma_bits = 32;
ret = pci_set_dma_mask(dev->pdev, DMA_BIT_MASK(dma_bits));
- if (ret) {
- NV_ERROR(dev, "Error setting DMA mask: %d\n", ret);
+ if (ret)
return ret;
- }
+
+ ret = nouveau_mem_detect(dev);
+ if (ret)
+ return ret;
+
+ dev_priv->fb_phys = pci_resource_start(dev->pdev, 1);
ret = nouveau_ttm_global_init(dev_priv);
if (ret)
@@ -465,8 +566,6 @@ nouveau_mem_init(struct drm_device *dev)
return ret;
}
- spin_lock_init(&dev_priv->tile.lock);
-
dev_priv->fb_available_size = dev_priv->vram_size;
dev_priv->fb_mappable_pages = dev_priv->fb_available_size;
if (dev_priv->fb_mappable_pages > pci_resource_len(dev->pdev, 1))
@@ -474,7 +573,16 @@ nouveau_mem_init(struct drm_device *dev)
pci_resource_len(dev->pdev, 1);
dev_priv->fb_mappable_pages >>= PAGE_SHIFT;
- /* remove reserved space at end of vram from available amount */
+ /* reserve space at end of VRAM for PRAMIN */
+ if (dev_priv->chipset == 0x40 || dev_priv->chipset == 0x47 ||
+ dev_priv->chipset == 0x49 || dev_priv->chipset == 0x4b)
+ dev_priv->ramin_rsvd_vram = (2 * 1024 * 1024);
+ else
+ if (dev_priv->card_type >= NV_40)
+ dev_priv->ramin_rsvd_vram = (1 * 1024 * 1024);
+ else
+ dev_priv->ramin_rsvd_vram = (512 * 1024);
+
dev_priv->fb_available_size -= dev_priv->ramin_rsvd_vram;
dev_priv->fb_aper_free = dev_priv->fb_available_size;
@@ -495,9 +603,23 @@ nouveau_mem_init(struct drm_device *dev)
nouveau_bo_ref(NULL, &dev_priv->vga_ram);
}
- /* GART */
+ dev_priv->fb_mtrr = drm_mtrr_add(pci_resource_start(dev->pdev, 1),
+ pci_resource_len(dev->pdev, 1),
+ DRM_MTRR_WC);
+ return 0;
+}
+
+int
+nouveau_mem_gart_init(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct ttm_bo_device *bdev = &dev_priv->ttm.bdev;
+ int ret;
+
+ dev_priv->gart_info.type = NOUVEAU_GART_NONE;
+
#if !defined(__powerpc__) && !defined(__ia64__)
- if (drm_device_is_agp(dev) && dev->agp && !nouveau_noagp) {
+ if (drm_device_is_agp(dev) && dev->agp && nouveau_agpmode) {
ret = nouveau_mem_init_agp(dev);
if (ret)
NV_ERROR(dev, "Error initialising AGP: %d\n", ret);
@@ -523,11 +645,157 @@ nouveau_mem_init(struct drm_device *dev)
return ret;
}
- dev_priv->fb_mtrr = drm_mtrr_add(pci_resource_start(dev->pdev, 1),
- pci_resource_len(dev->pdev, 1),
- DRM_MTRR_WC);
-
return 0;
}
+void
+nouveau_mem_timing_init(struct drm_device *dev)
+{
+ /* cards < NVC0 only */
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_pm_engine *pm = &dev_priv->engine.pm;
+ struct nouveau_pm_memtimings *memtimings = &pm->memtimings;
+ struct nvbios *bios = &dev_priv->vbios;
+ struct bit_entry P;
+ u8 tUNK_0, tUNK_1, tUNK_2;
+ u8 tRP; /* Byte 3 */
+ u8 tRAS; /* Byte 5 */
+ u8 tRFC; /* Byte 7 */
+ u8 tRC; /* Byte 9 */
+ u8 tUNK_10, tUNK_11, tUNK_12, tUNK_13, tUNK_14;
+ u8 tUNK_18, tUNK_19, tUNK_20, tUNK_21;
+ u8 *mem = NULL, *entry;
+ int i, recordlen, entries;
+
+ if (bios->type == NVBIOS_BIT) {
+ if (bit_table(dev, 'P', &P))
+ return;
+
+ if (P.version == 1)
+ mem = ROMPTR(bios, P.data[4]);
+ else
+ if (P.version == 2)
+ mem = ROMPTR(bios, P.data[8]);
+ else {
+ NV_WARN(dev, "unknown mem for BIT P %d\n", P.version);
+ }
+ } else {
+ NV_DEBUG(dev, "BMP version too old for memory\n");
+ return;
+ }
+
+ if (!mem) {
+ NV_DEBUG(dev, "memory timing table pointer invalid\n");
+ return;
+ }
+
+ if (mem[0] != 0x10) {
+ NV_WARN(dev, "memory timing table 0x%02x unknown\n", mem[0]);
+ return;
+ }
+
+ /* validate record length */
+ entries = mem[2];
+ recordlen = mem[3];
+ if (recordlen < 15) {
+ NV_ERROR(dev, "mem timing table length unknown: %d\n", mem[3]);
+ return;
+ }
+
+ /* parse vbios entries into common format */
+ memtimings->timing =
+ kcalloc(entries, sizeof(*memtimings->timing), GFP_KERNEL);
+ if (!memtimings->timing)
+ return;
+
+ entry = mem + mem[1];
+ for (i = 0; i < entries; i++, entry += recordlen) {
+ struct nouveau_pm_memtiming *timing = &pm->memtimings.timing[i];
+ if (entry[0] == 0)
+ continue;
+
+ tUNK_18 = 1;
+ tUNK_19 = 1;
+ tUNK_20 = 0;
+ tUNK_21 = 0;
+ switch (min(recordlen, 22)) {
+ case 22:
+ tUNK_21 = entry[21];
+ case 21:
+ tUNK_20 = entry[20];
+ case 20:
+ tUNK_19 = entry[19];
+ case 19:
+ tUNK_18 = entry[18];
+ default:
+ tUNK_0 = entry[0];
+ tUNK_1 = entry[1];
+ tUNK_2 = entry[2];
+ tRP = entry[3];
+ tRAS = entry[5];
+ tRFC = entry[7];
+ tRC = entry[9];
+ tUNK_10 = entry[10];
+ tUNK_11 = entry[11];
+ tUNK_12 = entry[12];
+ tUNK_13 = entry[13];
+ tUNK_14 = entry[14];
+ break;
+ }
+
+ timing->reg_100220 = (tRC << 24 | tRFC << 16 | tRAS << 8 | tRP);
+
+ /* XXX: I don't trust the -1's and +1's... they must come
+ * from somewhere! */
+ timing->reg_100224 = ((tUNK_0 + tUNK_19 + 1) << 24 |
+ tUNK_18 << 16 |
+ (tUNK_1 + tUNK_19 + 1) << 8 |
+ (tUNK_2 - 1));
+
+ timing->reg_100228 = (tUNK_12 << 16 | tUNK_11 << 8 | tUNK_10);
+ if(recordlen > 19) {
+ timing->reg_100228 += (tUNK_19 - 1) << 24;
+ }/* I cannot back-up this else-statement right now
+ else {
+ timing->reg_100228 += tUNK_12 << 24;
+ }*/
+
+ /* XXX: reg_10022c */
+ timing->reg_10022c = tUNK_2 - 1;
+
+ timing->reg_100230 = (tUNK_20 << 24 | tUNK_21 << 16 |
+ tUNK_13 << 8 | tUNK_13);
+
+ /* XXX: +6? */
+ timing->reg_100234 = (tRAS << 24 | (tUNK_19 + 6) << 8 | tRC);
+ timing->reg_100234 += max(tUNK_10,tUNK_11) << 16;
+
+ /* XXX; reg_100238, reg_10023c
+ * reg: 0x00??????
+ * reg_10023c:
+ * 0 for pre-NV50 cards
+ * 0x????0202 for NV50+ cards (empirical evidence) */
+ if(dev_priv->card_type >= NV_50) {
+ timing->reg_10023c = 0x202;
+ }
+
+ NV_DEBUG(dev, "Entry %d: 220: %08x %08x %08x %08x\n", i,
+ timing->reg_100220, timing->reg_100224,
+ timing->reg_100228, timing->reg_10022c);
+ NV_DEBUG(dev, " 230: %08x %08x %08x %08x\n",
+ timing->reg_100230, timing->reg_100234,
+ timing->reg_100238, timing->reg_10023c);
+ }
+
+ memtimings->nr_timing = entries;
+ memtimings->supported = true;
+}
+
+void
+nouveau_mem_timing_fini(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_pm_memtimings *mem = &dev_priv->engine.pm.memtimings;
+ kfree(mem->timing);
+}
diff --git a/drivers/gpu/drm/nouveau/nouveau_notifier.c b/drivers/gpu/drm/nouveau/nouveau_notifier.c
index 3ec181ff50ce..2cc59f8c658b 100644
--- a/drivers/gpu/drm/nouveau/nouveau_notifier.c
+++ b/drivers/gpu/drm/nouveau/nouveau_notifier.c
@@ -28,6 +28,7 @@
#include "drmP.h"
#include "drm.h"
#include "nouveau_drv.h"
+#include "nouveau_ramht.h"
int
nouveau_notifier_init_channel(struct nouveau_channel *chan)
@@ -112,7 +113,7 @@ nouveau_notifier_alloc(struct nouveau_channel *chan, uint32_t handle,
return -ENOMEM;
}
- offset = chan->notifier_bo->bo.mem.mm_node->start << PAGE_SHIFT;
+ offset = chan->notifier_bo->bo.mem.start << PAGE_SHIFT;
if (chan->notifier_bo->bo.mem.mem_type == TTM_PL_VRAM) {
target = NV_DMA_TARGET_VIDMEM;
} else
@@ -146,11 +147,11 @@ nouveau_notifier_alloc(struct nouveau_channel *chan, uint32_t handle,
nobj->dtor = nouveau_notifier_gpuobj_dtor;
nobj->priv = mem;
- ret = nouveau_gpuobj_ref_add(dev, chan, handle, nobj, NULL);
+ ret = nouveau_ramht_insert(chan, handle, nobj);
+ nouveau_gpuobj_ref(NULL, &nobj);
if (ret) {
- nouveau_gpuobj_del(dev, &nobj);
drm_mm_put_block(mem);
- NV_ERROR(dev, "Error referencing notifier ctxdma: %d\n", ret);
+ NV_ERROR(dev, "Error adding notifier to ramht: %d\n", ret);
return ret;
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_object.c b/drivers/gpu/drm/nouveau/nouveau_object.c
index b6bcb254f4ab..dd572adca02a 100644
--- a/drivers/gpu/drm/nouveau/nouveau_object.c
+++ b/drivers/gpu/drm/nouveau/nouveau_object.c
@@ -34,6 +34,7 @@
#include "drm.h"
#include "nouveau_drv.h"
#include "nouveau_drm.h"
+#include "nouveau_ramht.h"
/* NVidia uses context objects to drive drawing operations.
@@ -65,137 +66,6 @@
The key into the hash table depends on the object handle and channel id and
is given as:
*/
-static uint32_t
-nouveau_ramht_hash_handle(struct drm_device *dev, int channel, uint32_t handle)
-{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- uint32_t hash = 0;
- int i;
-
- NV_DEBUG(dev, "ch%d handle=0x%08x\n", channel, handle);
-
- for (i = 32; i > 0; i -= dev_priv->ramht_bits) {
- hash ^= (handle & ((1 << dev_priv->ramht_bits) - 1));
- handle >>= dev_priv->ramht_bits;
- }
-
- if (dev_priv->card_type < NV_50)
- hash ^= channel << (dev_priv->ramht_bits - 4);
- hash <<= 3;
-
- NV_DEBUG(dev, "hash=0x%08x\n", hash);
- return hash;
-}
-
-static int
-nouveau_ramht_entry_valid(struct drm_device *dev, struct nouveau_gpuobj *ramht,
- uint32_t offset)
-{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- uint32_t ctx = nv_ro32(dev, ramht, (offset + 4)/4);
-
- if (dev_priv->card_type < NV_40)
- return ((ctx & NV_RAMHT_CONTEXT_VALID) != 0);
- return (ctx != 0);
-}
-
-static int
-nouveau_ramht_insert(struct drm_device *dev, struct nouveau_gpuobj_ref *ref)
-{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_instmem_engine *instmem = &dev_priv->engine.instmem;
- struct nouveau_channel *chan = ref->channel;
- struct nouveau_gpuobj *ramht = chan->ramht ? chan->ramht->gpuobj : NULL;
- uint32_t ctx, co, ho;
-
- if (!ramht) {
- NV_ERROR(dev, "No hash table!\n");
- return -EINVAL;
- }
-
- if (dev_priv->card_type < NV_40) {
- ctx = NV_RAMHT_CONTEXT_VALID | (ref->instance >> 4) |
- (chan->id << NV_RAMHT_CONTEXT_CHANNEL_SHIFT) |
- (ref->gpuobj->engine << NV_RAMHT_CONTEXT_ENGINE_SHIFT);
- } else
- if (dev_priv->card_type < NV_50) {
- ctx = (ref->instance >> 4) |
- (chan->id << NV40_RAMHT_CONTEXT_CHANNEL_SHIFT) |
- (ref->gpuobj->engine << NV40_RAMHT_CONTEXT_ENGINE_SHIFT);
- } else {
- if (ref->gpuobj->engine == NVOBJ_ENGINE_DISPLAY) {
- ctx = (ref->instance << 10) | 2;
- } else {
- ctx = (ref->instance >> 4) |
- ((ref->gpuobj->engine <<
- NV40_RAMHT_CONTEXT_ENGINE_SHIFT));
- }
- }
-
- co = ho = nouveau_ramht_hash_handle(dev, chan->id, ref->handle);
- do {
- if (!nouveau_ramht_entry_valid(dev, ramht, co)) {
- NV_DEBUG(dev,
- "insert ch%d 0x%08x: h=0x%08x, c=0x%08x\n",
- chan->id, co, ref->handle, ctx);
- nv_wo32(dev, ramht, (co + 0)/4, ref->handle);
- nv_wo32(dev, ramht, (co + 4)/4, ctx);
-
- list_add_tail(&ref->list, &chan->ramht_refs);
- instmem->flush(dev);
- return 0;
- }
- NV_DEBUG(dev, "collision ch%d 0x%08x: h=0x%08x\n",
- chan->id, co, nv_ro32(dev, ramht, co/4));
-
- co += 8;
- if (co >= dev_priv->ramht_size)
- co = 0;
- } while (co != ho);
-
- NV_ERROR(dev, "RAMHT space exhausted. ch=%d\n", chan->id);
- return -ENOMEM;
-}
-
-static void
-nouveau_ramht_remove(struct drm_device *dev, struct nouveau_gpuobj_ref *ref)
-{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_instmem_engine *instmem = &dev_priv->engine.instmem;
- struct nouveau_channel *chan = ref->channel;
- struct nouveau_gpuobj *ramht = chan->ramht ? chan->ramht->gpuobj : NULL;
- uint32_t co, ho;
-
- if (!ramht) {
- NV_ERROR(dev, "No hash table!\n");
- return;
- }
-
- co = ho = nouveau_ramht_hash_handle(dev, chan->id, ref->handle);
- do {
- if (nouveau_ramht_entry_valid(dev, ramht, co) &&
- (ref->handle == nv_ro32(dev, ramht, (co/4)))) {
- NV_DEBUG(dev,
- "remove ch%d 0x%08x: h=0x%08x, c=0x%08x\n",
- chan->id, co, ref->handle,
- nv_ro32(dev, ramht, (co + 4)));
- nv_wo32(dev, ramht, (co + 0)/4, 0x00000000);
- nv_wo32(dev, ramht, (co + 4)/4, 0x00000000);
-
- list_del(&ref->list);
- instmem->flush(dev);
- return;
- }
-
- co += 8;
- if (co >= dev_priv->ramht_size)
- co = 0;
- } while (co != ho);
- list_del(&ref->list);
-
- NV_ERROR(dev, "RAMHT entry not found. ch=%d, handle=0x%08x\n",
- chan->id, ref->handle);
-}
int
nouveau_gpuobj_new(struct drm_device *dev, struct nouveau_channel *chan,
@@ -205,7 +75,7 @@ nouveau_gpuobj_new(struct drm_device *dev, struct nouveau_channel *chan,
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_engine *engine = &dev_priv->engine;
struct nouveau_gpuobj *gpuobj;
- struct drm_mm *pramin = NULL;
+ struct drm_mm_node *ramin = NULL;
int ret;
NV_DEBUG(dev, "ch%d size=%u align=%d flags=0x%08x\n",
@@ -218,69 +88,102 @@ nouveau_gpuobj_new(struct drm_device *dev, struct nouveau_channel *chan,
if (!gpuobj)
return -ENOMEM;
NV_DEBUG(dev, "gpuobj %p\n", gpuobj);
+ gpuobj->dev = dev;
gpuobj->flags = flags;
- gpuobj->im_channel = chan;
+ kref_init(&gpuobj->refcount);
+ gpuobj->size = size;
+ spin_lock(&dev_priv->ramin_lock);
list_add_tail(&gpuobj->list, &dev_priv->gpuobj_list);
+ spin_unlock(&dev_priv->ramin_lock);
- /* Choose between global instmem heap, and per-channel private
- * instmem heap. On <NV50 allow requests for private instmem
- * to be satisfied from global heap if no per-channel area
- * available.
- */
if (chan) {
NV_DEBUG(dev, "channel heap\n");
- pramin = &chan->ramin_heap;
+
+ ramin = drm_mm_search_free(&chan->ramin_heap, size, align, 0);
+ if (ramin)
+ ramin = drm_mm_get_block(ramin, size, align);
+
+ if (!ramin) {
+ nouveau_gpuobj_ref(NULL, &gpuobj);
+ return -ENOMEM;
+ }
} else {
NV_DEBUG(dev, "global heap\n");
- pramin = &dev_priv->ramin_heap;
+ /* allocate backing pages, sets vinst */
ret = engine->instmem.populate(dev, gpuobj, &size);
if (ret) {
- nouveau_gpuobj_del(dev, &gpuobj);
+ nouveau_gpuobj_ref(NULL, &gpuobj);
return ret;
}
- }
- /* Allocate a chunk of the PRAMIN aperture */
- gpuobj->im_pramin = drm_mm_search_free(pramin, size, align, 0);
- if (gpuobj->im_pramin)
- gpuobj->im_pramin = drm_mm_get_block(gpuobj->im_pramin, size, align);
+ /* try and get aperture space */
+ do {
+ if (drm_mm_pre_get(&dev_priv->ramin_heap))
+ return -ENOMEM;
+
+ spin_lock(&dev_priv->ramin_lock);
+ ramin = drm_mm_search_free(&dev_priv->ramin_heap, size,
+ align, 0);
+ if (ramin == NULL) {
+ spin_unlock(&dev_priv->ramin_lock);
+ nouveau_gpuobj_ref(NULL, &gpuobj);
+ return -ENOMEM;
+ }
- if (!gpuobj->im_pramin) {
- nouveau_gpuobj_del(dev, &gpuobj);
- return -ENOMEM;
+ ramin = drm_mm_get_block_atomic(ramin, size, align);
+ spin_unlock(&dev_priv->ramin_lock);
+ } while (ramin == NULL);
+
+ /* on nv50 it's ok to fail, we have a fallback path */
+ if (!ramin && dev_priv->card_type < NV_50) {
+ nouveau_gpuobj_ref(NULL, &gpuobj);
+ return -ENOMEM;
+ }
}
- if (!chan) {
+ /* if we got a chunk of the aperture, map pages into it */
+ gpuobj->im_pramin = ramin;
+ if (!chan && gpuobj->im_pramin && dev_priv->ramin_available) {
ret = engine->instmem.bind(dev, gpuobj);
if (ret) {
- nouveau_gpuobj_del(dev, &gpuobj);
+ nouveau_gpuobj_ref(NULL, &gpuobj);
return ret;
}
}
+ /* calculate the various different addresses for the object */
+ if (chan) {
+ gpuobj->pinst = chan->ramin->pinst;
+ if (gpuobj->pinst != ~0)
+ gpuobj->pinst += gpuobj->im_pramin->start;
+
+ if (dev_priv->card_type < NV_50) {
+ gpuobj->cinst = gpuobj->pinst;
+ } else {
+ gpuobj->cinst = gpuobj->im_pramin->start;
+ gpuobj->vinst = gpuobj->im_pramin->start +
+ chan->ramin->vinst;
+ }
+ } else {
+ if (gpuobj->im_pramin)
+ gpuobj->pinst = gpuobj->im_pramin->start;
+ else
+ gpuobj->pinst = ~0;
+ gpuobj->cinst = 0xdeadbeef;
+ }
+
if (gpuobj->flags & NVOBJ_FLAG_ZERO_ALLOC) {
int i;
- for (i = 0; i < gpuobj->im_pramin->size; i += 4)
- nv_wo32(dev, gpuobj, i/4, 0);
+ for (i = 0; i < gpuobj->size; i += 4)
+ nv_wo32(gpuobj, i, 0);
engine->instmem.flush(dev);
}
- *gpuobj_ret = gpuobj;
- return 0;
-}
-
-int
-nouveau_gpuobj_early_init(struct drm_device *dev)
-{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
-
- NV_DEBUG(dev, "\n");
-
- INIT_LIST_HEAD(&dev_priv->gpuobj_list);
+ *gpuobj_ret = gpuobj;
return 0;
}
@@ -288,18 +191,12 @@ int
nouveau_gpuobj_init(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
- int ret;
NV_DEBUG(dev, "\n");
- if (dev_priv->card_type < NV_50) {
- ret = nouveau_gpuobj_new_fake(dev,
- dev_priv->ramht_offset, ~0, dev_priv->ramht_size,
- NVOBJ_FLAG_ZERO_ALLOC | NVOBJ_FLAG_ALLOW_NO_REFS,
- &dev_priv->ramht, NULL);
- if (ret)
- return ret;
- }
+ INIT_LIST_HEAD(&dev_priv->gpuobj_list);
+ spin_lock_init(&dev_priv->ramin_lock);
+ dev_priv->ramin_base = ~0;
return 0;
}
@@ -311,297 +208,89 @@ nouveau_gpuobj_takedown(struct drm_device *dev)
NV_DEBUG(dev, "\n");
- nouveau_gpuobj_del(dev, &dev_priv->ramht);
+ BUG_ON(!list_empty(&dev_priv->gpuobj_list));
}
-void
-nouveau_gpuobj_late_takedown(struct drm_device *dev)
-{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_gpuobj *gpuobj = NULL;
- struct list_head *entry, *tmp;
-
- NV_DEBUG(dev, "\n");
-
- list_for_each_safe(entry, tmp, &dev_priv->gpuobj_list) {
- gpuobj = list_entry(entry, struct nouveau_gpuobj, list);
-
- NV_ERROR(dev, "gpuobj %p still exists at takedown, refs=%d\n",
- gpuobj, gpuobj->refcount);
- gpuobj->refcount = 0;
- nouveau_gpuobj_del(dev, &gpuobj);
- }
-}
-int
-nouveau_gpuobj_del(struct drm_device *dev, struct nouveau_gpuobj **pgpuobj)
+static void
+nouveau_gpuobj_del(struct kref *ref)
{
+ struct nouveau_gpuobj *gpuobj =
+ container_of(ref, struct nouveau_gpuobj, refcount);
+ struct drm_device *dev = gpuobj->dev;
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_engine *engine = &dev_priv->engine;
- struct nouveau_gpuobj *gpuobj;
int i;
- NV_DEBUG(dev, "gpuobj %p\n", pgpuobj ? *pgpuobj : NULL);
-
- if (!dev_priv || !pgpuobj || !(*pgpuobj))
- return -EINVAL;
- gpuobj = *pgpuobj;
-
- if (gpuobj->refcount != 0) {
- NV_ERROR(dev, "gpuobj refcount is %d\n", gpuobj->refcount);
- return -EINVAL;
- }
+ NV_DEBUG(dev, "gpuobj %p\n", gpuobj);
if (gpuobj->im_pramin && (gpuobj->flags & NVOBJ_FLAG_ZERO_FREE)) {
- for (i = 0; i < gpuobj->im_pramin->size; i += 4)
- nv_wo32(dev, gpuobj, i/4, 0);
+ for (i = 0; i < gpuobj->size; i += 4)
+ nv_wo32(gpuobj, i, 0);
engine->instmem.flush(dev);
}
if (gpuobj->dtor)
gpuobj->dtor(dev, gpuobj);
- if (gpuobj->im_backing && !(gpuobj->flags & NVOBJ_FLAG_FAKE))
+ if (gpuobj->im_backing)
engine->instmem.clear(dev, gpuobj);
- if (gpuobj->im_pramin) {
- if (gpuobj->flags & NVOBJ_FLAG_FAKE)
- kfree(gpuobj->im_pramin);
- else
- drm_mm_put_block(gpuobj->im_pramin);
- }
-
+ spin_lock(&dev_priv->ramin_lock);
+ if (gpuobj->im_pramin)
+ drm_mm_put_block(gpuobj->im_pramin);
list_del(&gpuobj->list);
+ spin_unlock(&dev_priv->ramin_lock);
- *pgpuobj = NULL;
kfree(gpuobj);
- return 0;
-}
-
-static int
-nouveau_gpuobj_instance_get(struct drm_device *dev,
- struct nouveau_channel *chan,
- struct nouveau_gpuobj *gpuobj, uint32_t *inst)
-{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_gpuobj *cpramin;
-
- /* <NV50 use PRAMIN address everywhere */
- if (dev_priv->card_type < NV_50) {
- *inst = gpuobj->im_pramin->start;
- return 0;
- }
-
- if (chan && gpuobj->im_channel != chan) {
- NV_ERROR(dev, "Channel mismatch: obj %d, ref %d\n",
- gpuobj->im_channel->id, chan->id);
- return -EINVAL;
- }
-
- /* NV50 channel-local instance */
- if (chan) {
- cpramin = chan->ramin->gpuobj;
- *inst = gpuobj->im_pramin->start - cpramin->im_pramin->start;
- return 0;
- }
-
- /* NV50 global (VRAM) instance */
- if (!gpuobj->im_channel) {
- /* ...from global heap */
- if (!gpuobj->im_backing) {
- NV_ERROR(dev, "AII, no VRAM backing gpuobj\n");
- return -EINVAL;
- }
- *inst = gpuobj->im_backing_start;
- return 0;
- } else {
- /* ...from local heap */
- cpramin = gpuobj->im_channel->ramin->gpuobj;
- *inst = cpramin->im_backing_start +
- (gpuobj->im_pramin->start - cpramin->im_pramin->start);
- return 0;
- }
-
- return -EINVAL;
-}
-
-int
-nouveau_gpuobj_ref_add(struct drm_device *dev, struct nouveau_channel *chan,
- uint32_t handle, struct nouveau_gpuobj *gpuobj,
- struct nouveau_gpuobj_ref **ref_ret)
-{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_gpuobj_ref *ref;
- uint32_t instance;
- int ret;
-
- NV_DEBUG(dev, "ch%d h=0x%08x gpuobj=%p\n",
- chan ? chan->id : -1, handle, gpuobj);
-
- if (!dev_priv || !gpuobj || (ref_ret && *ref_ret != NULL))
- return -EINVAL;
-
- if (!chan && !ref_ret)
- return -EINVAL;
-
- if (gpuobj->engine == NVOBJ_ENGINE_SW && !gpuobj->im_pramin) {
- /* sw object */
- instance = 0x40;
- } else {
- ret = nouveau_gpuobj_instance_get(dev, chan, gpuobj, &instance);
- if (ret)
- return ret;
- }
-
- ref = kzalloc(sizeof(*ref), GFP_KERNEL);
- if (!ref)
- return -ENOMEM;
- INIT_LIST_HEAD(&ref->list);
- ref->gpuobj = gpuobj;
- ref->channel = chan;
- ref->instance = instance;
-
- if (!ref_ret) {
- ref->handle = handle;
-
- ret = nouveau_ramht_insert(dev, ref);
- if (ret) {
- kfree(ref);
- return ret;
- }
- } else {
- ref->handle = ~0;
- *ref_ret = ref;
- }
-
- ref->gpuobj->refcount++;
- return 0;
-}
-
-int nouveau_gpuobj_ref_del(struct drm_device *dev, struct nouveau_gpuobj_ref **pref)
-{
- struct nouveau_gpuobj_ref *ref;
-
- NV_DEBUG(dev, "ref %p\n", pref ? *pref : NULL);
-
- if (!dev || !pref || *pref == NULL)
- return -EINVAL;
- ref = *pref;
-
- if (ref->handle != ~0)
- nouveau_ramht_remove(dev, ref);
-
- if (ref->gpuobj) {
- ref->gpuobj->refcount--;
-
- if (ref->gpuobj->refcount == 0) {
- if (!(ref->gpuobj->flags & NVOBJ_FLAG_ALLOW_NO_REFS))
- nouveau_gpuobj_del(dev, &ref->gpuobj);
- }
- }
-
- *pref = NULL;
- kfree(ref);
- return 0;
-}
-
-int
-nouveau_gpuobj_new_ref(struct drm_device *dev,
- struct nouveau_channel *oc, struct nouveau_channel *rc,
- uint32_t handle, uint32_t size, int align,
- uint32_t flags, struct nouveau_gpuobj_ref **ref)
-{
- struct nouveau_gpuobj *gpuobj = NULL;
- int ret;
-
- ret = nouveau_gpuobj_new(dev, oc, size, align, flags, &gpuobj);
- if (ret)
- return ret;
-
- ret = nouveau_gpuobj_ref_add(dev, rc, handle, gpuobj, ref);
- if (ret) {
- nouveau_gpuobj_del(dev, &gpuobj);
- return ret;
- }
-
- return 0;
}
-int
-nouveau_gpuobj_ref_find(struct nouveau_channel *chan, uint32_t handle,
- struct nouveau_gpuobj_ref **ref_ret)
+void
+nouveau_gpuobj_ref(struct nouveau_gpuobj *ref, struct nouveau_gpuobj **ptr)
{
- struct nouveau_gpuobj_ref *ref;
- struct list_head *entry, *tmp;
-
- list_for_each_safe(entry, tmp, &chan->ramht_refs) {
- ref = list_entry(entry, struct nouveau_gpuobj_ref, list);
+ if (ref)
+ kref_get(&ref->refcount);
- if (ref->handle == handle) {
- if (ref_ret)
- *ref_ret = ref;
- return 0;
- }
- }
+ if (*ptr)
+ kref_put(&(*ptr)->refcount, nouveau_gpuobj_del);
- return -EINVAL;
+ *ptr = ref;
}
int
-nouveau_gpuobj_new_fake(struct drm_device *dev, uint32_t p_offset,
- uint32_t b_offset, uint32_t size,
- uint32_t flags, struct nouveau_gpuobj **pgpuobj,
- struct nouveau_gpuobj_ref **pref)
+nouveau_gpuobj_new_fake(struct drm_device *dev, u32 pinst, u64 vinst,
+ u32 size, u32 flags, struct nouveau_gpuobj **pgpuobj)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_gpuobj *gpuobj = NULL;
int i;
NV_DEBUG(dev,
- "p_offset=0x%08x b_offset=0x%08x size=0x%08x flags=0x%08x\n",
- p_offset, b_offset, size, flags);
+ "pinst=0x%08x vinst=0x%010llx size=0x%08x flags=0x%08x\n",
+ pinst, vinst, size, flags);
gpuobj = kzalloc(sizeof(*gpuobj), GFP_KERNEL);
if (!gpuobj)
return -ENOMEM;
NV_DEBUG(dev, "gpuobj %p\n", gpuobj);
- gpuobj->im_channel = NULL;
- gpuobj->flags = flags | NVOBJ_FLAG_FAKE;
-
- list_add_tail(&gpuobj->list, &dev_priv->gpuobj_list);
-
- if (p_offset != ~0) {
- gpuobj->im_pramin = kzalloc(sizeof(struct drm_mm_node),
- GFP_KERNEL);
- if (!gpuobj->im_pramin) {
- nouveau_gpuobj_del(dev, &gpuobj);
- return -ENOMEM;
- }
- gpuobj->im_pramin->start = p_offset;
- gpuobj->im_pramin->size = size;
- }
-
- if (b_offset != ~0) {
- gpuobj->im_backing = (struct nouveau_bo *)-1;
- gpuobj->im_backing_start = b_offset;
- }
+ gpuobj->dev = dev;
+ gpuobj->flags = flags;
+ kref_init(&gpuobj->refcount);
+ gpuobj->size = size;
+ gpuobj->pinst = pinst;
+ gpuobj->cinst = 0xdeadbeef;
+ gpuobj->vinst = vinst;
if (gpuobj->flags & NVOBJ_FLAG_ZERO_ALLOC) {
- for (i = 0; i < gpuobj->im_pramin->size; i += 4)
- nv_wo32(dev, gpuobj, i/4, 0);
+ for (i = 0; i < gpuobj->size; i += 4)
+ nv_wo32(gpuobj, i, 0);
dev_priv->engine.instmem.flush(dev);
}
- if (pref) {
- i = nouveau_gpuobj_ref_add(dev, NULL, 0, gpuobj, pref);
- if (i) {
- nouveau_gpuobj_del(dev, &gpuobj);
- return i;
- }
- }
-
- if (pgpuobj)
- *pgpuobj = gpuobj;
+ spin_lock(&dev_priv->ramin_lock);
+ list_add_tail(&gpuobj->list, &dev_priv->gpuobj_list);
+ spin_unlock(&dev_priv->ramin_lock);
+ *pgpuobj = gpuobj;
return 0;
}
@@ -685,14 +374,12 @@ nouveau_gpuobj_dma_new(struct nouveau_channel *chan, int class,
adjust = offset & 0x00000fff;
frame = offset & ~0x00000fff;
- nv_wo32(dev, *gpuobj, 0, ((1<<12) | (1<<13) |
- (adjust << 20) |
- (access << 14) |
- (target << 16) |
- class));
- nv_wo32(dev, *gpuobj, 1, size - 1);
- nv_wo32(dev, *gpuobj, 2, frame | pte_flags);
- nv_wo32(dev, *gpuobj, 3, frame | pte_flags);
+ nv_wo32(*gpuobj, 0, ((1<<12) | (1<<13) | (adjust << 20) |
+ (access << 14) | (target << 16) |
+ class));
+ nv_wo32(*gpuobj, 4, size - 1);
+ nv_wo32(*gpuobj, 8, frame | pte_flags);
+ nv_wo32(*gpuobj, 12, frame | pte_flags);
} else {
uint64_t limit = offset + size - 1;
uint32_t flags0, flags5;
@@ -705,12 +392,12 @@ nouveau_gpuobj_dma_new(struct nouveau_channel *chan, int class,
flags5 = 0x00080000;
}
- nv_wo32(dev, *gpuobj, 0, flags0 | class);
- nv_wo32(dev, *gpuobj, 1, lower_32_bits(limit));
- nv_wo32(dev, *gpuobj, 2, lower_32_bits(offset));
- nv_wo32(dev, *gpuobj, 3, ((upper_32_bits(limit) & 0xff) << 24) |
- (upper_32_bits(offset) & 0xff));
- nv_wo32(dev, *gpuobj, 5, flags5);
+ nv_wo32(*gpuobj, 0, flags0 | class);
+ nv_wo32(*gpuobj, 4, lower_32_bits(limit));
+ nv_wo32(*gpuobj, 8, lower_32_bits(offset));
+ nv_wo32(*gpuobj, 12, ((upper_32_bits(limit) & 0xff) << 24) |
+ (upper_32_bits(offset) & 0xff));
+ nv_wo32(*gpuobj, 20, flags5);
}
instmem->flush(dev);
@@ -741,7 +428,7 @@ nouveau_gpuobj_gart_dma_new(struct nouveau_channel *chan,
*o_ret = 0;
} else
if (dev_priv->gart_info.type == NOUVEAU_GART_SGDMA) {
- *gpuobj = dev_priv->gart_info.sg_ctxdma;
+ nouveau_gpuobj_ref(dev_priv->gart_info.sg_ctxdma, gpuobj);
if (offset & ~0xffffffffULL) {
NV_ERROR(dev, "obj offset exceeds 32-bits\n");
return -EINVAL;
@@ -829,25 +516,25 @@ nouveau_gpuobj_gr_new(struct nouveau_channel *chan, int class,
}
if (dev_priv->card_type >= NV_50) {
- nv_wo32(dev, *gpuobj, 0, class);
- nv_wo32(dev, *gpuobj, 5, 0x00010000);
+ nv_wo32(*gpuobj, 0, class);
+ nv_wo32(*gpuobj, 20, 0x00010000);
} else {
switch (class) {
case NV_CLASS_NULL:
- nv_wo32(dev, *gpuobj, 0, 0x00001030);
- nv_wo32(dev, *gpuobj, 1, 0xFFFFFFFF);
+ nv_wo32(*gpuobj, 0, 0x00001030);
+ nv_wo32(*gpuobj, 4, 0xFFFFFFFF);
break;
default:
if (dev_priv->card_type >= NV_40) {
- nv_wo32(dev, *gpuobj, 0, class);
+ nv_wo32(*gpuobj, 0, class);
#ifdef __BIG_ENDIAN
- nv_wo32(dev, *gpuobj, 2, 0x01000000);
+ nv_wo32(*gpuobj, 8, 0x01000000);
#endif
} else {
#ifdef __BIG_ENDIAN
- nv_wo32(dev, *gpuobj, 0, class | 0x00080000);
+ nv_wo32(*gpuobj, 0, class | 0x00080000);
#else
- nv_wo32(dev, *gpuobj, 0, class);
+ nv_wo32(*gpuobj, 0, class);
#endif
}
}
@@ -873,10 +560,15 @@ nouveau_gpuobj_sw_new(struct nouveau_channel *chan, int class,
gpuobj = kzalloc(sizeof(*gpuobj), GFP_KERNEL);
if (!gpuobj)
return -ENOMEM;
+ gpuobj->dev = chan->dev;
gpuobj->engine = NVOBJ_ENGINE_SW;
gpuobj->class = class;
+ kref_init(&gpuobj->refcount);
+ gpuobj->cinst = 0x40;
+ spin_lock(&dev_priv->ramin_lock);
list_add_tail(&gpuobj->list, &dev_priv->gpuobj_list);
+ spin_unlock(&dev_priv->ramin_lock);
*gpuobj_ret = gpuobj;
return 0;
}
@@ -886,7 +578,6 @@ nouveau_gpuobj_channel_init_pramin(struct nouveau_channel *chan)
{
struct drm_device *dev = chan->dev;
struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_gpuobj *pramin = NULL;
uint32_t size;
uint32_t base;
int ret;
@@ -911,18 +602,16 @@ nouveau_gpuobj_channel_init_pramin(struct nouveau_channel *chan)
size += 0x1000;
}
- ret = nouveau_gpuobj_new_ref(dev, NULL, NULL, 0, size, 0x1000, 0,
- &chan->ramin);
+ ret = nouveau_gpuobj_new(dev, NULL, size, 0x1000, 0, &chan->ramin);
if (ret) {
NV_ERROR(dev, "Error allocating channel PRAMIN: %d\n", ret);
return ret;
}
- pramin = chan->ramin->gpuobj;
- ret = drm_mm_init(&chan->ramin_heap, pramin->im_pramin->start + base, size);
+ ret = drm_mm_init(&chan->ramin_heap, base, size);
if (ret) {
NV_ERROR(dev, "Error creating PRAMIN heap: %d\n", ret);
- nouveau_gpuobj_ref_del(dev, &chan->ramin);
+ nouveau_gpuobj_ref(NULL, &chan->ramin);
return ret;
}
@@ -939,8 +628,6 @@ nouveau_gpuobj_channel_init(struct nouveau_channel *chan,
struct nouveau_gpuobj *vram = NULL, *tt = NULL;
int ret, i;
- INIT_LIST_HEAD(&chan->ramht_refs);
-
NV_DEBUG(dev, "ch%d vram=0x%08x tt=0x%08x\n", chan->id, vram_h, tt_h);
/* Allocate a chunk of memory for per-channel object storage */
@@ -956,41 +643,38 @@ nouveau_gpuobj_channel_init(struct nouveau_channel *chan,
* locations determined during init.
*/
if (dev_priv->card_type >= NV_50) {
- uint32_t vm_offset, pde;
+ u32 pgd_offs = (dev_priv->chipset == 0x50) ? 0x1400 : 0x0200;
+ u64 vm_vinst = chan->ramin->vinst + pgd_offs;
+ u32 vm_pinst = chan->ramin->pinst;
+ u32 pde;
- vm_offset = (dev_priv->chipset & 0xf0) == 0x50 ? 0x1400 : 0x200;
- vm_offset += chan->ramin->gpuobj->im_pramin->start;
+ if (vm_pinst != ~0)
+ vm_pinst += pgd_offs;
- ret = nouveau_gpuobj_new_fake(dev, vm_offset, ~0, 0x4000,
- 0, &chan->vm_pd, NULL);
+ ret = nouveau_gpuobj_new_fake(dev, vm_pinst, vm_vinst, 0x4000,
+ 0, &chan->vm_pd);
if (ret)
return ret;
for (i = 0; i < 0x4000; i += 8) {
- nv_wo32(dev, chan->vm_pd, (i+0)/4, 0x00000000);
- nv_wo32(dev, chan->vm_pd, (i+4)/4, 0xdeadcafe);
+ nv_wo32(chan->vm_pd, i + 0, 0x00000000);
+ nv_wo32(chan->vm_pd, i + 4, 0xdeadcafe);
}
- pde = (dev_priv->vm_gart_base / (512*1024*1024)) * 2;
- ret = nouveau_gpuobj_ref_add(dev, NULL, 0,
- dev_priv->gart_info.sg_ctxdma,
- &chan->vm_gart_pt);
- if (ret)
- return ret;
- nv_wo32(dev, chan->vm_pd, pde++,
- chan->vm_gart_pt->instance | 0x03);
- nv_wo32(dev, chan->vm_pd, pde++, 0x00000000);
+ nouveau_gpuobj_ref(dev_priv->gart_info.sg_ctxdma,
+ &chan->vm_gart_pt);
+ pde = (dev_priv->vm_gart_base / (512*1024*1024)) * 8;
+ nv_wo32(chan->vm_pd, pde + 0, chan->vm_gart_pt->vinst | 3);
+ nv_wo32(chan->vm_pd, pde + 4, 0x00000000);
- pde = (dev_priv->vm_vram_base / (512*1024*1024)) * 2;
+ pde = (dev_priv->vm_vram_base / (512*1024*1024)) * 8;
for (i = 0; i < dev_priv->vm_vram_pt_nr; i++) {
- ret = nouveau_gpuobj_ref_add(dev, NULL, 0,
- dev_priv->vm_vram_pt[i],
- &chan->vm_vram_pt[i]);
- if (ret)
- return ret;
-
- nv_wo32(dev, chan->vm_pd, pde++,
- chan->vm_vram_pt[i]->instance | 0x61);
- nv_wo32(dev, chan->vm_pd, pde++, 0x00000000);
+ nouveau_gpuobj_ref(dev_priv->vm_vram_pt[i],
+ &chan->vm_vram_pt[i]);
+
+ nv_wo32(chan->vm_pd, pde + 0,
+ chan->vm_vram_pt[i]->vinst | 0x61);
+ nv_wo32(chan->vm_pd, pde + 4, 0x00000000);
+ pde += 8;
}
instmem->flush(dev);
@@ -998,15 +682,17 @@ nouveau_gpuobj_channel_init(struct nouveau_channel *chan,
/* RAMHT */
if (dev_priv->card_type < NV_50) {
- ret = nouveau_gpuobj_ref_add(dev, NULL, 0, dev_priv->ramht,
- &chan->ramht);
+ nouveau_ramht_ref(dev_priv->ramht, &chan->ramht, NULL);
+ } else {
+ struct nouveau_gpuobj *ramht = NULL;
+
+ ret = nouveau_gpuobj_new(dev, chan, 0x8000, 16,
+ NVOBJ_FLAG_ZERO_ALLOC, &ramht);
if (ret)
return ret;
- } else {
- ret = nouveau_gpuobj_new_ref(dev, chan, chan, 0,
- 0x8000, 16,
- NVOBJ_FLAG_ZERO_ALLOC,
- &chan->ramht);
+
+ ret = nouveau_ramht_new(dev, ramht, &chan->ramht);
+ nouveau_gpuobj_ref(NULL, &ramht);
if (ret)
return ret;
}
@@ -1023,24 +709,32 @@ nouveau_gpuobj_channel_init(struct nouveau_channel *chan,
}
} else {
ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY,
- 0, dev_priv->fb_available_size,
- NV_DMA_ACCESS_RW,
- NV_DMA_TARGET_VIDMEM, &vram);
+ 0, dev_priv->fb_available_size,
+ NV_DMA_ACCESS_RW,
+ NV_DMA_TARGET_VIDMEM, &vram);
if (ret) {
NV_ERROR(dev, "Error creating VRAM ctxdma: %d\n", ret);
return ret;
}
}
- ret = nouveau_gpuobj_ref_add(dev, chan, vram_h, vram, NULL);
+ ret = nouveau_ramht_insert(chan, vram_h, vram);
+ nouveau_gpuobj_ref(NULL, &vram);
if (ret) {
- NV_ERROR(dev, "Error referencing VRAM ctxdma: %d\n", ret);
+ NV_ERROR(dev, "Error adding VRAM ctxdma to RAMHT: %d\n", ret);
return ret;
}
/* TT memory ctxdma */
if (dev_priv->card_type >= NV_50) {
- tt = vram;
+ ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY,
+ 0, dev_priv->vm_end,
+ NV_DMA_ACCESS_RW,
+ NV_DMA_TARGET_AGP, &tt);
+ if (ret) {
+ NV_ERROR(dev, "Error creating VRAM ctxdma: %d\n", ret);
+ return ret;
+ }
} else
if (dev_priv->gart_info.type != NOUVEAU_GART_NONE) {
ret = nouveau_gpuobj_gart_dma_new(chan, 0,
@@ -1056,9 +750,10 @@ nouveau_gpuobj_channel_init(struct nouveau_channel *chan,
return ret;
}
- ret = nouveau_gpuobj_ref_add(dev, chan, tt_h, tt, NULL);
+ ret = nouveau_ramht_insert(chan, tt_h, tt);
+ nouveau_gpuobj_ref(NULL, &tt);
if (ret) {
- NV_ERROR(dev, "Error referencing TT ctxdma: %d\n", ret);
+ NV_ERROR(dev, "Error adding TT ctxdma to RAMHT: %d\n", ret);
return ret;
}
@@ -1070,33 +765,23 @@ nouveau_gpuobj_channel_takedown(struct nouveau_channel *chan)
{
struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
struct drm_device *dev = chan->dev;
- struct list_head *entry, *tmp;
- struct nouveau_gpuobj_ref *ref;
int i;
NV_DEBUG(dev, "ch%d\n", chan->id);
- if (!chan->ramht_refs.next)
+ if (!chan->ramht)
return;
- list_for_each_safe(entry, tmp, &chan->ramht_refs) {
- ref = list_entry(entry, struct nouveau_gpuobj_ref, list);
-
- nouveau_gpuobj_ref_del(dev, &ref);
- }
-
- nouveau_gpuobj_ref_del(dev, &chan->ramht);
+ nouveau_ramht_ref(NULL, &chan->ramht, chan);
- nouveau_gpuobj_del(dev, &chan->vm_pd);
- nouveau_gpuobj_ref_del(dev, &chan->vm_gart_pt);
+ nouveau_gpuobj_ref(NULL, &chan->vm_pd);
+ nouveau_gpuobj_ref(NULL, &chan->vm_gart_pt);
for (i = 0; i < dev_priv->vm_vram_pt_nr; i++)
- nouveau_gpuobj_ref_del(dev, &chan->vm_vram_pt[i]);
+ nouveau_gpuobj_ref(NULL, &chan->vm_vram_pt[i]);
if (chan->ramin_heap.free_stack.next)
drm_mm_takedown(&chan->ramin_heap);
- if (chan->ramin)
- nouveau_gpuobj_ref_del(dev, &chan->ramin);
-
+ nouveau_gpuobj_ref(NULL, &chan->ramin);
}
int
@@ -1117,17 +802,17 @@ nouveau_gpuobj_suspend(struct drm_device *dev)
}
list_for_each_entry(gpuobj, &dev_priv->gpuobj_list, list) {
- if (!gpuobj->im_backing || (gpuobj->flags & NVOBJ_FLAG_FAKE))
+ if (!gpuobj->im_backing)
continue;
- gpuobj->im_backing_suspend = vmalloc(gpuobj->im_pramin->size);
+ gpuobj->im_backing_suspend = vmalloc(gpuobj->size);
if (!gpuobj->im_backing_suspend) {
nouveau_gpuobj_resume(dev);
return -ENOMEM;
}
- for (i = 0; i < gpuobj->im_pramin->size / 4; i++)
- gpuobj->im_backing_suspend[i] = nv_ro32(dev, gpuobj, i);
+ for (i = 0; i < gpuobj->size; i += 4)
+ gpuobj->im_backing_suspend[i/4] = nv_ro32(gpuobj, i);
}
return 0;
@@ -1172,8 +857,8 @@ nouveau_gpuobj_resume(struct drm_device *dev)
if (!gpuobj->im_backing_suspend)
continue;
- for (i = 0; i < gpuobj->im_pramin->size / 4; i++)
- nv_wo32(dev, gpuobj, i, gpuobj->im_backing_suspend[i]);
+ for (i = 0; i < gpuobj->size; i += 4)
+ nv_wo32(gpuobj, i, gpuobj->im_backing_suspend[i/4]);
dev_priv->engine.instmem.flush(dev);
}
@@ -1208,25 +893,24 @@ int nouveau_ioctl_grobj_alloc(struct drm_device *dev, void *data,
return -EPERM;
}
- if (nouveau_gpuobj_ref_find(chan, init->handle, NULL) == 0)
+ if (nouveau_ramht_find(chan, init->handle))
return -EEXIST;
if (!grc->software)
ret = nouveau_gpuobj_gr_new(chan, grc->id, &gr);
else
ret = nouveau_gpuobj_sw_new(chan, grc->id, &gr);
-
if (ret) {
NV_ERROR(dev, "Error creating object: %d (%d/0x%08x)\n",
ret, init->channel, init->handle);
return ret;
}
- ret = nouveau_gpuobj_ref_add(dev, chan, init->handle, gr, NULL);
+ ret = nouveau_ramht_insert(chan, init->handle, gr);
+ nouveau_gpuobj_ref(NULL, &gr);
if (ret) {
NV_ERROR(dev, "Error referencing object: %d (%d/0x%08x)\n",
ret, init->channel, init->handle);
- nouveau_gpuobj_del(dev, &gr);
return ret;
}
@@ -1237,16 +921,62 @@ int nouveau_ioctl_gpuobj_free(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_nouveau_gpuobj_free *objfree = data;
- struct nouveau_gpuobj_ref *ref;
+ struct nouveau_gpuobj *gpuobj;
struct nouveau_channel *chan;
- int ret;
NOUVEAU_GET_USER_CHANNEL_WITH_RETURN(objfree->channel, file_priv, chan);
- ret = nouveau_gpuobj_ref_find(chan, objfree->handle, &ref);
- if (ret)
- return ret;
- nouveau_gpuobj_ref_del(dev, &ref);
+ gpuobj = nouveau_ramht_find(chan, objfree->handle);
+ if (!gpuobj)
+ return -ENOENT;
+ nouveau_ramht_remove(chan, objfree->handle);
return 0;
}
+
+u32
+nv_ro32(struct nouveau_gpuobj *gpuobj, u32 offset)
+{
+ struct drm_nouveau_private *dev_priv = gpuobj->dev->dev_private;
+ struct drm_device *dev = gpuobj->dev;
+
+ if (gpuobj->pinst == ~0 || !dev_priv->ramin_available) {
+ u64 ptr = gpuobj->vinst + offset;
+ u32 base = ptr >> 16;
+ u32 val;
+
+ spin_lock(&dev_priv->ramin_lock);
+ if (dev_priv->ramin_base != base) {
+ dev_priv->ramin_base = base;
+ nv_wr32(dev, 0x001700, dev_priv->ramin_base);
+ }
+ val = nv_rd32(dev, 0x700000 + (ptr & 0xffff));
+ spin_unlock(&dev_priv->ramin_lock);
+ return val;
+ }
+
+ return nv_ri32(dev, gpuobj->pinst + offset);
+}
+
+void
+nv_wo32(struct nouveau_gpuobj *gpuobj, u32 offset, u32 val)
+{
+ struct drm_nouveau_private *dev_priv = gpuobj->dev->dev_private;
+ struct drm_device *dev = gpuobj->dev;
+
+ if (gpuobj->pinst == ~0 || !dev_priv->ramin_available) {
+ u64 ptr = gpuobj->vinst + offset;
+ u32 base = ptr >> 16;
+
+ spin_lock(&dev_priv->ramin_lock);
+ if (dev_priv->ramin_base != base) {
+ dev_priv->ramin_base = base;
+ nv_wr32(dev, 0x001700, dev_priv->ramin_base);
+ }
+ nv_wr32(dev, 0x700000 + (ptr & 0xffff), val);
+ spin_unlock(&dev_priv->ramin_lock);
+ return;
+ }
+
+ nv_wi32(dev, gpuobj->pinst + offset, val);
+}
diff --git a/drivers/gpu/drm/nouveau/nouveau_perf.c b/drivers/gpu/drm/nouveau/nouveau_perf.c
new file mode 100644
index 000000000000..ac62a1b8c4fc
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_perf.c
@@ -0,0 +1,205 @@
+/*
+ * Copyright 2010 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include "drmP.h"
+
+#include "nouveau_drv.h"
+#include "nouveau_pm.h"
+
+static void
+legacy_perf_init(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nvbios *bios = &dev_priv->vbios;
+ struct nouveau_pm_engine *pm = &dev_priv->engine.pm;
+ char *perf, *entry, *bmp = &bios->data[bios->offset];
+ int headerlen, use_straps;
+
+ if (bmp[5] < 0x5 || bmp[6] < 0x14) {
+ NV_DEBUG(dev, "BMP version too old for perf\n");
+ return;
+ }
+
+ perf = ROMPTR(bios, bmp[0x73]);
+ if (!perf) {
+ NV_DEBUG(dev, "No memclock table pointer found.\n");
+ return;
+ }
+
+ switch (perf[0]) {
+ case 0x12:
+ case 0x14:
+ case 0x18:
+ use_straps = 0;
+ headerlen = 1;
+ break;
+ case 0x01:
+ use_straps = perf[1] & 1;
+ headerlen = (use_straps ? 8 : 2);
+ break;
+ default:
+ NV_WARN(dev, "Unknown memclock table version %x.\n", perf[0]);
+ return;
+ }
+
+ entry = perf + headerlen;
+ if (use_straps)
+ entry += (nv_rd32(dev, NV_PEXTDEV_BOOT_0) & 0x3c) >> 1;
+
+ sprintf(pm->perflvl[0].name, "performance_level_0");
+ pm->perflvl[0].memory = ROM16(entry[0]) * 20;
+ pm->nr_perflvl = 1;
+}
+
+void
+nouveau_perf_init(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_pm_engine *pm = &dev_priv->engine.pm;
+ struct nvbios *bios = &dev_priv->vbios;
+ struct bit_entry P;
+ u8 version, headerlen, recordlen, entries;
+ u8 *perf, *entry;
+ int vid, i;
+
+ if (bios->type == NVBIOS_BIT) {
+ if (bit_table(dev, 'P', &P))
+ return;
+
+ if (P.version != 1 && P.version != 2) {
+ NV_WARN(dev, "unknown perf for BIT P %d\n", P.version);
+ return;
+ }
+
+ perf = ROMPTR(bios, P.data[0]);
+ version = perf[0];
+ headerlen = perf[1];
+ if (version < 0x40) {
+ recordlen = perf[3] + (perf[4] * perf[5]);
+ entries = perf[2];
+ } else {
+ recordlen = perf[2] + (perf[3] * perf[4]);
+ entries = perf[5];
+ }
+ } else {
+ if (bios->data[bios->offset + 6] < 0x25) {
+ legacy_perf_init(dev);
+ return;
+ }
+
+ perf = ROMPTR(bios, bios->data[bios->offset + 0x94]);
+ if (!perf) {
+ NV_DEBUG(dev, "perf table pointer invalid\n");
+ return;
+ }
+
+ version = perf[1];
+ headerlen = perf[0];
+ recordlen = perf[3];
+ entries = perf[2];
+ }
+
+ entry = perf + headerlen;
+ for (i = 0; i < entries; i++) {
+ struct nouveau_pm_level *perflvl = &pm->perflvl[pm->nr_perflvl];
+
+ if (entry[0] == 0xff) {
+ entry += recordlen;
+ continue;
+ }
+
+ switch (version) {
+ case 0x12:
+ case 0x13:
+ case 0x15:
+ perflvl->fanspeed = entry[55];
+ perflvl->voltage = entry[56];
+ perflvl->core = ROM32(entry[1]) * 10;
+ perflvl->memory = ROM32(entry[5]) * 20;
+ break;
+ case 0x21:
+ case 0x23:
+ case 0x24:
+ perflvl->fanspeed = entry[4];
+ perflvl->voltage = entry[5];
+ perflvl->core = ROM16(entry[6]) * 1000;
+
+ if (dev_priv->chipset == 0x49 ||
+ dev_priv->chipset == 0x4b)
+ perflvl->memory = ROM16(entry[11]) * 1000;
+ else
+ perflvl->memory = ROM16(entry[11]) * 2000;
+
+ break;
+ case 0x25:
+ perflvl->fanspeed = entry[4];
+ perflvl->voltage = entry[5];
+ perflvl->core = ROM16(entry[6]) * 1000;
+ perflvl->shader = ROM16(entry[10]) * 1000;
+ perflvl->memory = ROM16(entry[12]) * 1000;
+ break;
+ case 0x30:
+ perflvl->memscript = ROM16(entry[2]);
+ case 0x35:
+ perflvl->fanspeed = entry[6];
+ perflvl->voltage = entry[7];
+ perflvl->core = ROM16(entry[8]) * 1000;
+ perflvl->shader = ROM16(entry[10]) * 1000;
+ perflvl->memory = ROM16(entry[12]) * 1000;
+ /*XXX: confirm on 0x35 */
+ perflvl->unk05 = ROM16(entry[16]) * 1000;
+ break;
+ case 0x40:
+#define subent(n) entry[perf[2] + ((n) * perf[3])]
+ perflvl->fanspeed = 0; /*XXX*/
+ perflvl->voltage = entry[2];
+ perflvl->core = (ROM16(subent(0)) & 0xfff) * 1000;
+ perflvl->shader = (ROM16(subent(1)) & 0xfff) * 1000;
+ perflvl->memory = (ROM16(subent(2)) & 0xfff) * 1000;
+ break;
+ }
+
+ /* make sure vid is valid */
+ if (pm->voltage.supported && perflvl->voltage) {
+ vid = nouveau_volt_vid_lookup(dev, perflvl->voltage);
+ if (vid < 0) {
+ NV_DEBUG(dev, "drop perflvl %d, bad vid\n", i);
+ entry += recordlen;
+ continue;
+ }
+ }
+
+ snprintf(perflvl->name, sizeof(perflvl->name),
+ "performance_level_%d", i);
+ perflvl->id = i;
+ pm->nr_perflvl++;
+
+ entry += recordlen;
+ }
+}
+
+void
+nouveau_perf_fini(struct drm_device *dev)
+{
+}
diff --git a/drivers/gpu/drm/nouveau/nouveau_pm.c b/drivers/gpu/drm/nouveau/nouveau_pm.c
new file mode 100644
index 000000000000..9f7b158f5825
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_pm.c
@@ -0,0 +1,523 @@
+/*
+ * Copyright 2010 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include "drmP.h"
+
+#include "nouveau_drv.h"
+#include "nouveau_pm.h"
+
+#include <linux/hwmon.h>
+#include <linux/hwmon-sysfs.h>
+
+static int
+nouveau_pm_clock_set(struct drm_device *dev, struct nouveau_pm_level *perflvl,
+ u8 id, u32 khz)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_pm_engine *pm = &dev_priv->engine.pm;
+ void *pre_state;
+
+ if (khz == 0)
+ return 0;
+
+ pre_state = pm->clock_pre(dev, perflvl, id, khz);
+ if (IS_ERR(pre_state))
+ return PTR_ERR(pre_state);
+
+ if (pre_state)
+ pm->clock_set(dev, pre_state);
+ return 0;
+}
+
+static int
+nouveau_pm_perflvl_set(struct drm_device *dev, struct nouveau_pm_level *perflvl)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_pm_engine *pm = &dev_priv->engine.pm;
+ int ret;
+
+ if (perflvl == pm->cur)
+ return 0;
+
+ if (pm->voltage.supported && pm->voltage_set && perflvl->voltage) {
+ ret = pm->voltage_set(dev, perflvl->voltage);
+ if (ret) {
+ NV_ERROR(dev, "voltage_set %d failed: %d\n",
+ perflvl->voltage, ret);
+ }
+ }
+
+ nouveau_pm_clock_set(dev, perflvl, PLL_CORE, perflvl->core);
+ nouveau_pm_clock_set(dev, perflvl, PLL_SHADER, perflvl->shader);
+ nouveau_pm_clock_set(dev, perflvl, PLL_MEMORY, perflvl->memory);
+ nouveau_pm_clock_set(dev, perflvl, PLL_UNK05, perflvl->unk05);
+
+ pm->cur = perflvl;
+ return 0;
+}
+
+static int
+nouveau_pm_profile_set(struct drm_device *dev, const char *profile)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_pm_engine *pm = &dev_priv->engine.pm;
+ struct nouveau_pm_level *perflvl = NULL;
+
+ /* safety precaution, for now */
+ if (nouveau_perflvl_wr != 7777)
+ return -EPERM;
+
+ if (!pm->clock_set)
+ return -EINVAL;
+
+ if (!strncmp(profile, "boot", 4))
+ perflvl = &pm->boot;
+ else {
+ int pl = simple_strtol(profile, NULL, 10);
+ int i;
+
+ for (i = 0; i < pm->nr_perflvl; i++) {
+ if (pm->perflvl[i].id == pl) {
+ perflvl = &pm->perflvl[i];
+ break;
+ }
+ }
+
+ if (!perflvl)
+ return -EINVAL;
+ }
+
+ NV_INFO(dev, "setting performance level: %s\n", profile);
+ return nouveau_pm_perflvl_set(dev, perflvl);
+}
+
+static int
+nouveau_pm_perflvl_get(struct drm_device *dev, struct nouveau_pm_level *perflvl)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_pm_engine *pm = &dev_priv->engine.pm;
+ int ret;
+
+ if (!pm->clock_get)
+ return -EINVAL;
+
+ memset(perflvl, 0, sizeof(*perflvl));
+
+ ret = pm->clock_get(dev, PLL_CORE);
+ if (ret > 0)
+ perflvl->core = ret;
+
+ ret = pm->clock_get(dev, PLL_MEMORY);
+ if (ret > 0)
+ perflvl->memory = ret;
+
+ ret = pm->clock_get(dev, PLL_SHADER);
+ if (ret > 0)
+ perflvl->shader = ret;
+
+ ret = pm->clock_get(dev, PLL_UNK05);
+ if (ret > 0)
+ perflvl->unk05 = ret;
+
+ if (pm->voltage.supported && pm->voltage_get) {
+ ret = pm->voltage_get(dev);
+ if (ret > 0)
+ perflvl->voltage = ret;
+ }
+
+ return 0;
+}
+
+static void
+nouveau_pm_perflvl_info(struct nouveau_pm_level *perflvl, char *ptr, int len)
+{
+ char c[16], s[16], v[16], f[16];
+
+ c[0] = '\0';
+ if (perflvl->core)
+ snprintf(c, sizeof(c), " core %dMHz", perflvl->core / 1000);
+
+ s[0] = '\0';
+ if (perflvl->shader)
+ snprintf(s, sizeof(s), " shader %dMHz", perflvl->shader / 1000);
+
+ v[0] = '\0';
+ if (perflvl->voltage)
+ snprintf(v, sizeof(v), " voltage %dmV", perflvl->voltage * 10);
+
+ f[0] = '\0';
+ if (perflvl->fanspeed)
+ snprintf(f, sizeof(f), " fanspeed %d%%", perflvl->fanspeed);
+
+ snprintf(ptr, len, "memory %dMHz%s%s%s%s\n", perflvl->memory / 1000,
+ c, s, v, f);
+}
+
+static ssize_t
+nouveau_pm_get_perflvl_info(struct device *d,
+ struct device_attribute *a, char *buf)
+{
+ struct nouveau_pm_level *perflvl = (struct nouveau_pm_level *)a;
+ char *ptr = buf;
+ int len = PAGE_SIZE;
+
+ snprintf(ptr, len, "%d: ", perflvl->id);
+ ptr += strlen(buf);
+ len -= strlen(buf);
+
+ nouveau_pm_perflvl_info(perflvl, ptr, len);
+ return strlen(buf);
+}
+
+static ssize_t
+nouveau_pm_get_perflvl(struct device *d, struct device_attribute *a, char *buf)
+{
+ struct drm_device *dev = pci_get_drvdata(to_pci_dev(d));
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_pm_engine *pm = &dev_priv->engine.pm;
+ struct nouveau_pm_level cur;
+ int len = PAGE_SIZE, ret;
+ char *ptr = buf;
+
+ if (!pm->cur)
+ snprintf(ptr, len, "setting: boot\n");
+ else if (pm->cur == &pm->boot)
+ snprintf(ptr, len, "setting: boot\nc: ");
+ else
+ snprintf(ptr, len, "setting: static %d\nc: ", pm->cur->id);
+ ptr += strlen(buf);
+ len -= strlen(buf);
+
+ ret = nouveau_pm_perflvl_get(dev, &cur);
+ if (ret == 0)
+ nouveau_pm_perflvl_info(&cur, ptr, len);
+ return strlen(buf);
+}
+
+static ssize_t
+nouveau_pm_set_perflvl(struct device *d, struct device_attribute *a,
+ const char *buf, size_t count)
+{
+ struct drm_device *dev = pci_get_drvdata(to_pci_dev(d));
+ int ret;
+
+ ret = nouveau_pm_profile_set(dev, buf);
+ if (ret)
+ return ret;
+ return strlen(buf);
+}
+
+static DEVICE_ATTR(performance_level, S_IRUGO | S_IWUSR,
+ nouveau_pm_get_perflvl, nouveau_pm_set_perflvl);
+
+static int
+nouveau_sysfs_init(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_pm_engine *pm = &dev_priv->engine.pm;
+ struct device *d = &dev->pdev->dev;
+ int ret, i;
+
+ ret = device_create_file(d, &dev_attr_performance_level);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < pm->nr_perflvl; i++) {
+ struct nouveau_pm_level *perflvl = &pm->perflvl[i];
+
+ perflvl->dev_attr.attr.name = perflvl->name;
+ perflvl->dev_attr.attr.mode = S_IRUGO;
+ perflvl->dev_attr.show = nouveau_pm_get_perflvl_info;
+ perflvl->dev_attr.store = NULL;
+ sysfs_attr_init(&perflvl->dev_attr.attr);
+
+ ret = device_create_file(d, &perflvl->dev_attr);
+ if (ret) {
+ NV_ERROR(dev, "failed pervlvl %d sysfs: %d\n",
+ perflvl->id, i);
+ perflvl->dev_attr.attr.name = NULL;
+ nouveau_pm_fini(dev);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static void
+nouveau_sysfs_fini(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_pm_engine *pm = &dev_priv->engine.pm;
+ struct device *d = &dev->pdev->dev;
+ int i;
+
+ device_remove_file(d, &dev_attr_performance_level);
+ for (i = 0; i < pm->nr_perflvl; i++) {
+ struct nouveau_pm_level *pl = &pm->perflvl[i];
+
+ if (!pl->dev_attr.attr.name)
+ break;
+
+ device_remove_file(d, &pl->dev_attr);
+ }
+}
+
+#ifdef CONFIG_HWMON
+static ssize_t
+nouveau_hwmon_show_temp(struct device *d, struct device_attribute *a, char *buf)
+{
+ struct drm_device *dev = dev_get_drvdata(d);
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_pm_engine *pm = &dev_priv->engine.pm;
+
+ return snprintf(buf, PAGE_SIZE, "%d\n", pm->temp_get(dev)*1000);
+}
+static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, nouveau_hwmon_show_temp,
+ NULL, 0);
+
+static ssize_t
+nouveau_hwmon_max_temp(struct device *d, struct device_attribute *a, char *buf)
+{
+ struct drm_device *dev = dev_get_drvdata(d);
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_pm_engine *pm = &dev_priv->engine.pm;
+ struct nouveau_pm_threshold_temp *temp = &pm->threshold_temp;
+
+ return snprintf(buf, PAGE_SIZE, "%d\n", temp->down_clock*1000);
+}
+static ssize_t
+nouveau_hwmon_set_max_temp(struct device *d, struct device_attribute *a,
+ const char *buf, size_t count)
+{
+ struct drm_device *dev = dev_get_drvdata(d);
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_pm_engine *pm = &dev_priv->engine.pm;
+ struct nouveau_pm_threshold_temp *temp = &pm->threshold_temp;
+ long value;
+
+ if (strict_strtol(buf, 10, &value) == -EINVAL)
+ return count;
+
+ temp->down_clock = value/1000;
+
+ nouveau_temp_safety_checks(dev);
+
+ return count;
+}
+static SENSOR_DEVICE_ATTR(temp1_max, S_IRUGO | S_IWUSR, nouveau_hwmon_max_temp,
+ nouveau_hwmon_set_max_temp,
+ 0);
+
+static ssize_t
+nouveau_hwmon_critical_temp(struct device *d, struct device_attribute *a,
+ char *buf)
+{
+ struct drm_device *dev = dev_get_drvdata(d);
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_pm_engine *pm = &dev_priv->engine.pm;
+ struct nouveau_pm_threshold_temp *temp = &pm->threshold_temp;
+
+ return snprintf(buf, PAGE_SIZE, "%d\n", temp->critical*1000);
+}
+static ssize_t
+nouveau_hwmon_set_critical_temp(struct device *d, struct device_attribute *a,
+ const char *buf,
+ size_t count)
+{
+ struct drm_device *dev = dev_get_drvdata(d);
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_pm_engine *pm = &dev_priv->engine.pm;
+ struct nouveau_pm_threshold_temp *temp = &pm->threshold_temp;
+ long value;
+
+ if (strict_strtol(buf, 10, &value) == -EINVAL)
+ return count;
+
+ temp->critical = value/1000;
+
+ nouveau_temp_safety_checks(dev);
+
+ return count;
+}
+static SENSOR_DEVICE_ATTR(temp1_crit, S_IRUGO | S_IWUSR,
+ nouveau_hwmon_critical_temp,
+ nouveau_hwmon_set_critical_temp,
+ 0);
+
+static ssize_t nouveau_hwmon_show_name(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ return sprintf(buf, "nouveau\n");
+}
+static SENSOR_DEVICE_ATTR(name, S_IRUGO, nouveau_hwmon_show_name, NULL, 0);
+
+static ssize_t nouveau_hwmon_show_update_rate(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ return sprintf(buf, "1000\n");
+}
+static SENSOR_DEVICE_ATTR(update_rate, S_IRUGO,
+ nouveau_hwmon_show_update_rate,
+ NULL, 0);
+
+static struct attribute *hwmon_attributes[] = {
+ &sensor_dev_attr_temp1_input.dev_attr.attr,
+ &sensor_dev_attr_temp1_max.dev_attr.attr,
+ &sensor_dev_attr_temp1_crit.dev_attr.attr,
+ &sensor_dev_attr_name.dev_attr.attr,
+ &sensor_dev_attr_update_rate.dev_attr.attr,
+ NULL
+};
+
+static const struct attribute_group hwmon_attrgroup = {
+ .attrs = hwmon_attributes,
+};
+#endif
+
+static int
+nouveau_hwmon_init(struct drm_device *dev)
+{
+#ifdef CONFIG_HWMON
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_pm_engine *pm = &dev_priv->engine.pm;
+ struct device *hwmon_dev;
+ int ret;
+
+ if (!pm->temp_get)
+ return -ENODEV;
+
+ hwmon_dev = hwmon_device_register(&dev->pdev->dev);
+ if (IS_ERR(hwmon_dev)) {
+ ret = PTR_ERR(hwmon_dev);
+ NV_ERROR(dev,
+ "Unable to register hwmon device: %d\n", ret);
+ return ret;
+ }
+ dev_set_drvdata(hwmon_dev, dev);
+ ret = sysfs_create_group(&hwmon_dev->kobj,
+ &hwmon_attrgroup);
+ if (ret) {
+ NV_ERROR(dev,
+ "Unable to create hwmon sysfs file: %d\n", ret);
+ hwmon_device_unregister(hwmon_dev);
+ return ret;
+ }
+
+ pm->hwmon = hwmon_dev;
+#endif
+ return 0;
+}
+
+static void
+nouveau_hwmon_fini(struct drm_device *dev)
+{
+#ifdef CONFIG_HWMON
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_pm_engine *pm = &dev_priv->engine.pm;
+
+ if (pm->hwmon) {
+ sysfs_remove_group(&pm->hwmon->kobj, &hwmon_attrgroup);
+ hwmon_device_unregister(pm->hwmon);
+ }
+#endif
+}
+
+int
+nouveau_pm_init(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_pm_engine *pm = &dev_priv->engine.pm;
+ char info[256];
+ int ret, i;
+
+ nouveau_volt_init(dev);
+ nouveau_perf_init(dev);
+ nouveau_temp_init(dev);
+ nouveau_mem_timing_init(dev);
+
+ NV_INFO(dev, "%d available performance level(s)\n", pm->nr_perflvl);
+ for (i = 0; i < pm->nr_perflvl; i++) {
+ nouveau_pm_perflvl_info(&pm->perflvl[i], info, sizeof(info));
+ NV_INFO(dev, "%d: %s", pm->perflvl[i].id, info);
+ }
+
+ /* determine current ("boot") performance level */
+ ret = nouveau_pm_perflvl_get(dev, &pm->boot);
+ if (ret == 0) {
+ pm->cur = &pm->boot;
+
+ nouveau_pm_perflvl_info(&pm->boot, info, sizeof(info));
+ NV_INFO(dev, "c: %s", info);
+ }
+
+ /* switch performance levels now if requested */
+ if (nouveau_perflvl != NULL) {
+ ret = nouveau_pm_profile_set(dev, nouveau_perflvl);
+ if (ret) {
+ NV_ERROR(dev, "error setting perflvl \"%s\": %d\n",
+ nouveau_perflvl, ret);
+ }
+ }
+
+ nouveau_sysfs_init(dev);
+ nouveau_hwmon_init(dev);
+
+ return 0;
+}
+
+void
+nouveau_pm_fini(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_pm_engine *pm = &dev_priv->engine.pm;
+
+ if (pm->cur != &pm->boot)
+ nouveau_pm_perflvl_set(dev, &pm->boot);
+
+ nouveau_mem_timing_fini(dev);
+ nouveau_temp_fini(dev);
+ nouveau_perf_fini(dev);
+ nouveau_volt_fini(dev);
+
+ nouveau_hwmon_fini(dev);
+ nouveau_sysfs_fini(dev);
+}
+
+void
+nouveau_pm_resume(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_pm_engine *pm = &dev_priv->engine.pm;
+ struct nouveau_pm_level *perflvl;
+
+ if (pm->cur == &pm->boot)
+ return;
+
+ perflvl = pm->cur;
+ pm->cur = &pm->boot;
+ nouveau_pm_perflvl_set(dev, perflvl);
+}
diff --git a/drivers/gpu/drm/nouveau/nouveau_pm.h b/drivers/gpu/drm/nouveau/nouveau_pm.h
new file mode 100644
index 000000000000..4a9838ddacec
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_pm.h
@@ -0,0 +1,74 @@
+/*
+ * Copyright 2010 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#ifndef __NOUVEAU_PM_H__
+#define __NOUVEAU_PM_H__
+
+/* nouveau_pm.c */
+int nouveau_pm_init(struct drm_device *dev);
+void nouveau_pm_fini(struct drm_device *dev);
+void nouveau_pm_resume(struct drm_device *dev);
+
+/* nouveau_volt.c */
+void nouveau_volt_init(struct drm_device *);
+void nouveau_volt_fini(struct drm_device *);
+int nouveau_volt_vid_lookup(struct drm_device *, int voltage);
+int nouveau_volt_lvl_lookup(struct drm_device *, int vid);
+int nouveau_voltage_gpio_get(struct drm_device *);
+int nouveau_voltage_gpio_set(struct drm_device *, int voltage);
+
+/* nouveau_perf.c */
+void nouveau_perf_init(struct drm_device *);
+void nouveau_perf_fini(struct drm_device *);
+
+/* nouveau_mem.c */
+void nouveau_mem_timing_init(struct drm_device *);
+void nouveau_mem_timing_fini(struct drm_device *);
+
+/* nv04_pm.c */
+int nv04_pm_clock_get(struct drm_device *, u32 id);
+void *nv04_pm_clock_pre(struct drm_device *, struct nouveau_pm_level *,
+ u32 id, int khz);
+void nv04_pm_clock_set(struct drm_device *, void *);
+
+/* nv50_pm.c */
+int nv50_pm_clock_get(struct drm_device *, u32 id);
+void *nv50_pm_clock_pre(struct drm_device *, struct nouveau_pm_level *,
+ u32 id, int khz);
+void nv50_pm_clock_set(struct drm_device *, void *);
+
+/* nva3_pm.c */
+int nva3_pm_clock_get(struct drm_device *, u32 id);
+void *nva3_pm_clock_pre(struct drm_device *, struct nouveau_pm_level *,
+ u32 id, int khz);
+void nva3_pm_clock_set(struct drm_device *, void *);
+
+/* nouveau_temp.c */
+void nouveau_temp_init(struct drm_device *dev);
+void nouveau_temp_fini(struct drm_device *dev);
+void nouveau_temp_safety_checks(struct drm_device *dev);
+int nv40_temp_get(struct drm_device *dev);
+int nv84_temp_get(struct drm_device *dev);
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/nouveau_ramht.c b/drivers/gpu/drm/nouveau/nouveau_ramht.c
new file mode 100644
index 000000000000..2d8580927ca4
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_ramht.c
@@ -0,0 +1,306 @@
+/*
+ * Copyright 2010 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include "drmP.h"
+
+#include "nouveau_drv.h"
+#include "nouveau_ramht.h"
+
+static u32
+nouveau_ramht_hash_handle(struct nouveau_channel *chan, u32 handle)
+{
+ struct drm_device *dev = chan->dev;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_ramht *ramht = chan->ramht;
+ u32 hash = 0;
+ int i;
+
+ NV_DEBUG(dev, "ch%d handle=0x%08x\n", chan->id, handle);
+
+ for (i = 32; i > 0; i -= ramht->bits) {
+ hash ^= (handle & ((1 << ramht->bits) - 1));
+ handle >>= ramht->bits;
+ }
+
+ if (dev_priv->card_type < NV_50)
+ hash ^= chan->id << (ramht->bits - 4);
+ hash <<= 3;
+
+ NV_DEBUG(dev, "hash=0x%08x\n", hash);
+ return hash;
+}
+
+static int
+nouveau_ramht_entry_valid(struct drm_device *dev, struct nouveau_gpuobj *ramht,
+ u32 offset)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ u32 ctx = nv_ro32(ramht, offset + 4);
+
+ if (dev_priv->card_type < NV_40)
+ return ((ctx & NV_RAMHT_CONTEXT_VALID) != 0);
+ return (ctx != 0);
+}
+
+static int
+nouveau_ramht_entry_same_channel(struct nouveau_channel *chan,
+ struct nouveau_gpuobj *ramht, u32 offset)
+{
+ struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
+ u32 ctx = nv_ro32(ramht, offset + 4);
+
+ if (dev_priv->card_type >= NV_50)
+ return true;
+ else if (dev_priv->card_type >= NV_40)
+ return chan->id ==
+ ((ctx >> NV40_RAMHT_CONTEXT_CHANNEL_SHIFT) & 0x1f);
+ else
+ return chan->id ==
+ ((ctx >> NV_RAMHT_CONTEXT_CHANNEL_SHIFT) & 0x1f);
+}
+
+int
+nouveau_ramht_insert(struct nouveau_channel *chan, u32 handle,
+ struct nouveau_gpuobj *gpuobj)
+{
+ struct drm_device *dev = chan->dev;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_instmem_engine *instmem = &dev_priv->engine.instmem;
+ struct nouveau_ramht_entry *entry;
+ struct nouveau_gpuobj *ramht = chan->ramht->gpuobj;
+ unsigned long flags;
+ u32 ctx, co, ho;
+
+ if (nouveau_ramht_find(chan, handle))
+ return -EEXIST;
+
+ entry = kmalloc(sizeof(*entry), GFP_KERNEL);
+ if (!entry)
+ return -ENOMEM;
+ entry->channel = chan;
+ entry->gpuobj = NULL;
+ entry->handle = handle;
+ nouveau_gpuobj_ref(gpuobj, &entry->gpuobj);
+
+ if (dev_priv->card_type < NV_40) {
+ ctx = NV_RAMHT_CONTEXT_VALID | (gpuobj->cinst >> 4) |
+ (chan->id << NV_RAMHT_CONTEXT_CHANNEL_SHIFT) |
+ (gpuobj->engine << NV_RAMHT_CONTEXT_ENGINE_SHIFT);
+ } else
+ if (dev_priv->card_type < NV_50) {
+ ctx = (gpuobj->cinst >> 4) |
+ (chan->id << NV40_RAMHT_CONTEXT_CHANNEL_SHIFT) |
+ (gpuobj->engine << NV40_RAMHT_CONTEXT_ENGINE_SHIFT);
+ } else {
+ if (gpuobj->engine == NVOBJ_ENGINE_DISPLAY) {
+ ctx = (gpuobj->cinst << 10) | 2;
+ } else {
+ ctx = (gpuobj->cinst >> 4) |
+ ((gpuobj->engine <<
+ NV40_RAMHT_CONTEXT_ENGINE_SHIFT));
+ }
+ }
+
+ spin_lock_irqsave(&chan->ramht->lock, flags);
+ list_add(&entry->head, &chan->ramht->entries);
+
+ co = ho = nouveau_ramht_hash_handle(chan, handle);
+ do {
+ if (!nouveau_ramht_entry_valid(dev, ramht, co)) {
+ NV_DEBUG(dev,
+ "insert ch%d 0x%08x: h=0x%08x, c=0x%08x\n",
+ chan->id, co, handle, ctx);
+ nv_wo32(ramht, co + 0, handle);
+ nv_wo32(ramht, co + 4, ctx);
+
+ spin_unlock_irqrestore(&chan->ramht->lock, flags);
+ instmem->flush(dev);
+ return 0;
+ }
+ NV_DEBUG(dev, "collision ch%d 0x%08x: h=0x%08x\n",
+ chan->id, co, nv_ro32(ramht, co));
+
+ co += 8;
+ if (co >= ramht->size)
+ co = 0;
+ } while (co != ho);
+
+ NV_ERROR(dev, "RAMHT space exhausted. ch=%d\n", chan->id);
+ list_del(&entry->head);
+ spin_unlock_irqrestore(&chan->ramht->lock, flags);
+ kfree(entry);
+ return -ENOMEM;
+}
+
+static struct nouveau_ramht_entry *
+nouveau_ramht_remove_entry(struct nouveau_channel *chan, u32 handle)
+{
+ struct nouveau_ramht *ramht = chan ? chan->ramht : NULL;
+ struct nouveau_ramht_entry *entry;
+ unsigned long flags;
+
+ if (!ramht)
+ return NULL;
+
+ spin_lock_irqsave(&ramht->lock, flags);
+ list_for_each_entry(entry, &ramht->entries, head) {
+ if (entry->channel == chan &&
+ (!handle || entry->handle == handle)) {
+ list_del(&entry->head);
+ spin_unlock_irqrestore(&ramht->lock, flags);
+
+ return entry;
+ }
+ }
+ spin_unlock_irqrestore(&ramht->lock, flags);
+
+ return NULL;
+}
+
+static void
+nouveau_ramht_remove_hash(struct nouveau_channel *chan, u32 handle)
+{
+ struct drm_device *dev = chan->dev;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_instmem_engine *instmem = &dev_priv->engine.instmem;
+ struct nouveau_gpuobj *ramht = chan->ramht->gpuobj;
+ unsigned long flags;
+ u32 co, ho;
+
+ spin_lock_irqsave(&chan->ramht->lock, flags);
+ co = ho = nouveau_ramht_hash_handle(chan, handle);
+ do {
+ if (nouveau_ramht_entry_valid(dev, ramht, co) &&
+ nouveau_ramht_entry_same_channel(chan, ramht, co) &&
+ (handle == nv_ro32(ramht, co))) {
+ NV_DEBUG(dev,
+ "remove ch%d 0x%08x: h=0x%08x, c=0x%08x\n",
+ chan->id, co, handle, nv_ro32(ramht, co + 4));
+ nv_wo32(ramht, co + 0, 0x00000000);
+ nv_wo32(ramht, co + 4, 0x00000000);
+ instmem->flush(dev);
+ goto out;
+ }
+
+ co += 8;
+ if (co >= ramht->size)
+ co = 0;
+ } while (co != ho);
+
+ NV_ERROR(dev, "RAMHT entry not found. ch=%d, handle=0x%08x\n",
+ chan->id, handle);
+out:
+ spin_unlock_irqrestore(&chan->ramht->lock, flags);
+}
+
+void
+nouveau_ramht_remove(struct nouveau_channel *chan, u32 handle)
+{
+ struct nouveau_ramht_entry *entry;
+
+ entry = nouveau_ramht_remove_entry(chan, handle);
+ if (!entry)
+ return;
+
+ nouveau_ramht_remove_hash(chan, entry->handle);
+ nouveau_gpuobj_ref(NULL, &entry->gpuobj);
+ kfree(entry);
+}
+
+struct nouveau_gpuobj *
+nouveau_ramht_find(struct nouveau_channel *chan, u32 handle)
+{
+ struct nouveau_ramht *ramht = chan->ramht;
+ struct nouveau_ramht_entry *entry;
+ struct nouveau_gpuobj *gpuobj = NULL;
+ unsigned long flags;
+
+ if (unlikely(!chan->ramht))
+ return NULL;
+
+ spin_lock_irqsave(&ramht->lock, flags);
+ list_for_each_entry(entry, &chan->ramht->entries, head) {
+ if (entry->channel == chan && entry->handle == handle) {
+ gpuobj = entry->gpuobj;
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&ramht->lock, flags);
+
+ return gpuobj;
+}
+
+int
+nouveau_ramht_new(struct drm_device *dev, struct nouveau_gpuobj *gpuobj,
+ struct nouveau_ramht **pramht)
+{
+ struct nouveau_ramht *ramht;
+
+ ramht = kzalloc(sizeof(*ramht), GFP_KERNEL);
+ if (!ramht)
+ return -ENOMEM;
+
+ ramht->dev = dev;
+ kref_init(&ramht->refcount);
+ ramht->bits = drm_order(gpuobj->size / 8);
+ INIT_LIST_HEAD(&ramht->entries);
+ spin_lock_init(&ramht->lock);
+ nouveau_gpuobj_ref(gpuobj, &ramht->gpuobj);
+
+ *pramht = ramht;
+ return 0;
+}
+
+static void
+nouveau_ramht_del(struct kref *ref)
+{
+ struct nouveau_ramht *ramht =
+ container_of(ref, struct nouveau_ramht, refcount);
+
+ nouveau_gpuobj_ref(NULL, &ramht->gpuobj);
+ kfree(ramht);
+}
+
+void
+nouveau_ramht_ref(struct nouveau_ramht *ref, struct nouveau_ramht **ptr,
+ struct nouveau_channel *chan)
+{
+ struct nouveau_ramht_entry *entry;
+ struct nouveau_ramht *ramht;
+
+ if (ref)
+ kref_get(&ref->refcount);
+
+ ramht = *ptr;
+ if (ramht) {
+ while ((entry = nouveau_ramht_remove_entry(chan, 0))) {
+ nouveau_ramht_remove_hash(chan, entry->handle);
+ nouveau_gpuobj_ref(NULL, &entry->gpuobj);
+ kfree(entry);
+ }
+
+ kref_put(&ramht->refcount, nouveau_ramht_del);
+ }
+ *ptr = ref;
+}
diff --git a/drivers/gpu/drm/nouveau/nouveau_ramht.h b/drivers/gpu/drm/nouveau/nouveau_ramht.h
new file mode 100644
index 000000000000..b79cb5e1a8f1
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_ramht.h
@@ -0,0 +1,55 @@
+/*
+ * Copyright 2010 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#ifndef __NOUVEAU_RAMHT_H__
+#define __NOUVEAU_RAMHT_H__
+
+struct nouveau_ramht_entry {
+ struct list_head head;
+ struct nouveau_channel *channel;
+ struct nouveau_gpuobj *gpuobj;
+ u32 handle;
+};
+
+struct nouveau_ramht {
+ struct drm_device *dev;
+ struct kref refcount;
+ spinlock_t lock;
+ struct nouveau_gpuobj *gpuobj;
+ struct list_head entries;
+ int bits;
+};
+
+extern int nouveau_ramht_new(struct drm_device *, struct nouveau_gpuobj *,
+ struct nouveau_ramht **);
+extern void nouveau_ramht_ref(struct nouveau_ramht *, struct nouveau_ramht **,
+ struct nouveau_channel *unref_channel);
+
+extern int nouveau_ramht_insert(struct nouveau_channel *, u32 handle,
+ struct nouveau_gpuobj *);
+extern void nouveau_ramht_remove(struct nouveau_channel *, u32 handle);
+extern struct nouveau_gpuobj *
+nouveau_ramht_find(struct nouveau_channel *chan, u32 handle);
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/nouveau_reg.h b/drivers/gpu/drm/nouveau/nouveau_reg.h
index 21a6e453b975..1b42541ca9e5 100644
--- a/drivers/gpu/drm/nouveau/nouveau_reg.h
+++ b/drivers/gpu/drm/nouveau/nouveau_reg.h
@@ -551,6 +551,8 @@
#define NV10_PFIFO_CACHE1_DMA_SUBROUTINE 0x0000324C
#define NV03_PFIFO_CACHE1_PULL0 0x00003240
#define NV04_PFIFO_CACHE1_PULL0 0x00003250
+# define NV04_PFIFO_CACHE1_PULL0_HASH_FAILED 0x00000010
+# define NV04_PFIFO_CACHE1_PULL0_HASH_BUSY 0x00001000
#define NV03_PFIFO_CACHE1_PULL1 0x00003250
#define NV04_PFIFO_CACHE1_PULL1 0x00003254
#define NV04_PFIFO_CACHE1_HASH 0x00003258
@@ -785,15 +787,12 @@
#define NV50_PDISPLAY_DAC_MODE_CTRL_C(i) (0x00610b5c + (i) * 0x8)
#define NV50_PDISPLAY_SOR_MODE_CTRL_P(i) (0x00610b70 + (i) * 0x8)
#define NV50_PDISPLAY_SOR_MODE_CTRL_C(i) (0x00610b74 + (i) * 0x8)
+#define NV50_PDISPLAY_EXT_MODE_CTRL_P(i) (0x00610b80 + (i) * 0x8)
+#define NV50_PDISPLAY_EXT_MODE_CTRL_C(i) (0x00610b84 + (i) * 0x8)
#define NV50_PDISPLAY_DAC_MODE_CTRL2_P(i) (0x00610bdc + (i) * 0x8)
#define NV50_PDISPLAY_DAC_MODE_CTRL2_C(i) (0x00610be0 + (i) * 0x8)
-
#define NV90_PDISPLAY_SOR_MODE_CTRL_P(i) (0x00610794 + (i) * 0x8)
#define NV90_PDISPLAY_SOR_MODE_CTRL_C(i) (0x00610798 + (i) * 0x8)
-#define NV90_PDISPLAY_DAC_MODE_CTRL_P(i) (0x00610b58 + (i) * 0x8)
-#define NV90_PDISPLAY_DAC_MODE_CTRL_C(i) (0x00610b5c + (i) * 0x8)
-#define NV90_PDISPLAY_DAC_MODE_CTRL2_P(i) (0x00610b80 + (i) * 0x8)
-#define NV90_PDISPLAY_DAC_MODE_CTRL2_C(i) (0x00610b84 + (i) * 0x8)
#define NV50_PDISPLAY_CRTC_CLK 0x00614000
#define NV50_PDISPLAY_CRTC_CLK_CTRL1(i) ((i) * 0x800 + 0x614100)
diff --git a/drivers/gpu/drm/nouveau/nouveau_sgdma.c b/drivers/gpu/drm/nouveau/nouveau_sgdma.c
index 6b9187d7f67d..d4ac97007038 100644
--- a/drivers/gpu/drm/nouveau/nouveau_sgdma.c
+++ b/drivers/gpu/drm/nouveau/nouveau_sgdma.c
@@ -95,9 +95,9 @@ nouveau_sgdma_bind(struct ttm_backend *be, struct ttm_mem_reg *mem)
struct nouveau_gpuobj *gpuobj = dev_priv->gart_info.sg_ctxdma;
unsigned i, j, pte;
- NV_DEBUG(dev, "pg=0x%lx\n", mem->mm_node->start);
+ NV_DEBUG(dev, "pg=0x%lx\n", mem->start);
- pte = nouveau_sgdma_pte(nvbe->dev, mem->mm_node->start << PAGE_SHIFT);
+ pte = nouveau_sgdma_pte(nvbe->dev, mem->start << PAGE_SHIFT);
nvbe->pte_start = pte;
for (i = 0; i < nvbe->nr_pages; i++) {
dma_addr_t dma_offset = nvbe->pages[i];
@@ -105,11 +105,13 @@ nouveau_sgdma_bind(struct ttm_backend *be, struct ttm_mem_reg *mem)
uint32_t offset_h = upper_32_bits(dma_offset);
for (j = 0; j < PAGE_SIZE / NV_CTXDMA_PAGE_SIZE; j++) {
- if (dev_priv->card_type < NV_50)
- nv_wo32(dev, gpuobj, pte++, offset_l | 3);
- else {
- nv_wo32(dev, gpuobj, pte++, offset_l | 0x21);
- nv_wo32(dev, gpuobj, pte++, offset_h & 0xff);
+ if (dev_priv->card_type < NV_50) {
+ nv_wo32(gpuobj, (pte * 4) + 0, offset_l | 3);
+ pte += 1;
+ } else {
+ nv_wo32(gpuobj, (pte * 4) + 0, offset_l | 0x21);
+ nv_wo32(gpuobj, (pte * 4) + 4, offset_h & 0xff);
+ pte += 2;
}
dma_offset += NV_CTXDMA_PAGE_SIZE;
@@ -118,8 +120,8 @@ nouveau_sgdma_bind(struct ttm_backend *be, struct ttm_mem_reg *mem)
dev_priv->engine.instmem.flush(nvbe->dev);
if (dev_priv->card_type == NV_50) {
- nv50_vm_flush(dev, 5); /* PGRAPH */
- nv50_vm_flush(dev, 0); /* PFIFO */
+ dev_priv->engine.fifo.tlb_flush(dev);
+ dev_priv->engine.graph.tlb_flush(dev);
}
nvbe->bound = true;
@@ -145,11 +147,13 @@ nouveau_sgdma_unbind(struct ttm_backend *be)
dma_addr_t dma_offset = dev_priv->gart_info.sg_dummy_bus;
for (j = 0; j < PAGE_SIZE / NV_CTXDMA_PAGE_SIZE; j++) {
- if (dev_priv->card_type < NV_50)
- nv_wo32(dev, gpuobj, pte++, dma_offset | 3);
- else {
- nv_wo32(dev, gpuobj, pte++, dma_offset | 0x21);
- nv_wo32(dev, gpuobj, pte++, 0x00000000);
+ if (dev_priv->card_type < NV_50) {
+ nv_wo32(gpuobj, (pte * 4) + 0, dma_offset | 3);
+ pte += 1;
+ } else {
+ nv_wo32(gpuobj, (pte * 4) + 0, 0x00000000);
+ nv_wo32(gpuobj, (pte * 4) + 4, 0x00000000);
+ pte += 2;
}
dma_offset += NV_CTXDMA_PAGE_SIZE;
@@ -158,8 +162,8 @@ nouveau_sgdma_unbind(struct ttm_backend *be)
dev_priv->engine.instmem.flush(nvbe->dev);
if (dev_priv->card_type == NV_50) {
- nv50_vm_flush(dev, 5);
- nv50_vm_flush(dev, 0);
+ dev_priv->engine.fifo.tlb_flush(dev);
+ dev_priv->engine.graph.tlb_flush(dev);
}
nvbe->bound = false;
@@ -220,7 +224,11 @@ nouveau_sgdma_init(struct drm_device *dev)
int i, ret;
if (dev_priv->card_type < NV_50) {
- aper_size = (64 * 1024 * 1024);
+ if(dev_priv->ramin_rsvd_vram < 2 * 1024 * 1024)
+ aper_size = 64 * 1024 * 1024;
+ else
+ aper_size = 512 * 1024 * 1024;
+
obj_size = (aper_size >> NV_CTXDMA_PAGE_SHIFT) * 4;
obj_size += 8; /* ctxdma header */
} else {
@@ -230,7 +238,6 @@ nouveau_sgdma_init(struct drm_device *dev)
}
ret = nouveau_gpuobj_new(dev, NULL, obj_size, 16,
- NVOBJ_FLAG_ALLOW_NO_REFS |
NVOBJ_FLAG_ZERO_ALLOC |
NVOBJ_FLAG_ZERO_FREE, &gpuobj);
if (ret) {
@@ -239,9 +246,9 @@ nouveau_sgdma_init(struct drm_device *dev)
}
dev_priv->gart_info.sg_dummy_page =
- alloc_page(GFP_KERNEL|__GFP_DMA32);
+ alloc_page(GFP_KERNEL|__GFP_DMA32|__GFP_ZERO);
if (!dev_priv->gart_info.sg_dummy_page) {
- nouveau_gpuobj_del(dev, &gpuobj);
+ nouveau_gpuobj_ref(NULL, &gpuobj);
return -ENOMEM;
}
@@ -250,29 +257,34 @@ nouveau_sgdma_init(struct drm_device *dev)
pci_map_page(pdev, dev_priv->gart_info.sg_dummy_page, 0,
PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
if (pci_dma_mapping_error(pdev, dev_priv->gart_info.sg_dummy_bus)) {
- nouveau_gpuobj_del(dev, &gpuobj);
+ nouveau_gpuobj_ref(NULL, &gpuobj);
return -EFAULT;
}
if (dev_priv->card_type < NV_50) {
+ /* special case, allocated from global instmem heap so
+ * cinst is invalid, we use it on all channels though so
+ * cinst needs to be valid, set it the same as pinst
+ */
+ gpuobj->cinst = gpuobj->pinst;
+
/* Maybe use NV_DMA_TARGET_AGP for PCIE? NVIDIA do this, and
* confirmed to work on c51. Perhaps means NV_DMA_TARGET_PCIE
* on those cards? */
- nv_wo32(dev, gpuobj, 0, NV_CLASS_DMA_IN_MEMORY |
- (1 << 12) /* PT present */ |
- (0 << 13) /* PT *not* linear */ |
- (NV_DMA_ACCESS_RW << 14) |
- (NV_DMA_TARGET_PCI << 16));
- nv_wo32(dev, gpuobj, 1, aper_size - 1);
+ nv_wo32(gpuobj, 0, NV_CLASS_DMA_IN_MEMORY |
+ (1 << 12) /* PT present */ |
+ (0 << 13) /* PT *not* linear */ |
+ (NV_DMA_ACCESS_RW << 14) |
+ (NV_DMA_TARGET_PCI << 16));
+ nv_wo32(gpuobj, 4, aper_size - 1);
for (i = 2; i < 2 + (aper_size >> 12); i++) {
- nv_wo32(dev, gpuobj, i,
- dev_priv->gart_info.sg_dummy_bus | 3);
+ nv_wo32(gpuobj, i * 4,
+ dev_priv->gart_info.sg_dummy_bus | 3);
}
} else {
for (i = 0; i < obj_size; i += 8) {
- nv_wo32(dev, gpuobj, (i+0)/4,
- dev_priv->gart_info.sg_dummy_bus | 0x21);
- nv_wo32(dev, gpuobj, (i+4)/4, 0);
+ nv_wo32(gpuobj, i + 0, 0x00000000);
+ nv_wo32(gpuobj, i + 4, 0x00000000);
}
}
dev_priv->engine.instmem.flush(dev);
@@ -298,7 +310,7 @@ nouveau_sgdma_takedown(struct drm_device *dev)
dev_priv->gart_info.sg_dummy_bus = 0;
}
- nouveau_gpuobj_del(dev, &dev_priv->gart_info.sg_ctxdma);
+ nouveau_gpuobj_ref(NULL, &dev_priv->gart_info.sg_ctxdma);
}
int
@@ -308,9 +320,9 @@ nouveau_sgdma_get_page(struct drm_device *dev, uint32_t offset, uint32_t *page)
struct nouveau_gpuobj *gpuobj = dev_priv->gart_info.sg_ctxdma;
int pte;
- pte = (offset >> NV_CTXDMA_PAGE_SHIFT);
+ pte = (offset >> NV_CTXDMA_PAGE_SHIFT) << 2;
if (dev_priv->card_type < NV_50) {
- *page = nv_ro32(dev, gpuobj, (pte + 2)) & ~NV_CTXDMA_PAGE_MASK;
+ *page = nv_ro32(gpuobj, (pte + 8)) & ~NV_CTXDMA_PAGE_MASK;
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_state.c b/drivers/gpu/drm/nouveau/nouveau_state.c
index 989322be3728..049f755567e5 100644
--- a/drivers/gpu/drm/nouveau/nouveau_state.c
+++ b/drivers/gpu/drm/nouveau/nouveau_state.c
@@ -35,6 +35,8 @@
#include "nouveau_drv.h"
#include "nouveau_drm.h"
#include "nouveau_fbcon.h"
+#include "nouveau_ramht.h"
+#include "nouveau_pm.h"
#include "nv50_display.h"
static void nouveau_stub_takedown(struct drm_device *dev) {}
@@ -78,7 +80,6 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
engine->fifo.disable = nv04_fifo_disable;
engine->fifo.enable = nv04_fifo_enable;
engine->fifo.reassign = nv04_fifo_reassign;
- engine->fifo.cache_flush = nv04_fifo_cache_flush;
engine->fifo.cache_pull = nv04_fifo_cache_pull;
engine->fifo.channel_id = nv04_fifo_channel_id;
engine->fifo.create_context = nv04_fifo_create_context;
@@ -95,6 +96,9 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
engine->gpio.get = NULL;
engine->gpio.set = NULL;
engine->gpio.irq_enable = NULL;
+ engine->pm.clock_get = nv04_pm_clock_get;
+ engine->pm.clock_pre = nv04_pm_clock_pre;
+ engine->pm.clock_set = nv04_pm_clock_set;
break;
case 0x10:
engine->instmem.init = nv04_instmem_init;
@@ -130,7 +134,6 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
engine->fifo.disable = nv04_fifo_disable;
engine->fifo.enable = nv04_fifo_enable;
engine->fifo.reassign = nv04_fifo_reassign;
- engine->fifo.cache_flush = nv04_fifo_cache_flush;
engine->fifo.cache_pull = nv04_fifo_cache_pull;
engine->fifo.channel_id = nv10_fifo_channel_id;
engine->fifo.create_context = nv10_fifo_create_context;
@@ -147,6 +150,9 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
engine->gpio.get = nv10_gpio_get;
engine->gpio.set = nv10_gpio_set;
engine->gpio.irq_enable = NULL;
+ engine->pm.clock_get = nv04_pm_clock_get;
+ engine->pm.clock_pre = nv04_pm_clock_pre;
+ engine->pm.clock_set = nv04_pm_clock_set;
break;
case 0x20:
engine->instmem.init = nv04_instmem_init;
@@ -182,7 +188,6 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
engine->fifo.disable = nv04_fifo_disable;
engine->fifo.enable = nv04_fifo_enable;
engine->fifo.reassign = nv04_fifo_reassign;
- engine->fifo.cache_flush = nv04_fifo_cache_flush;
engine->fifo.cache_pull = nv04_fifo_cache_pull;
engine->fifo.channel_id = nv10_fifo_channel_id;
engine->fifo.create_context = nv10_fifo_create_context;
@@ -199,6 +204,9 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
engine->gpio.get = nv10_gpio_get;
engine->gpio.set = nv10_gpio_set;
engine->gpio.irq_enable = NULL;
+ engine->pm.clock_get = nv04_pm_clock_get;
+ engine->pm.clock_pre = nv04_pm_clock_pre;
+ engine->pm.clock_set = nv04_pm_clock_set;
break;
case 0x30:
engine->instmem.init = nv04_instmem_init;
@@ -234,7 +242,6 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
engine->fifo.disable = nv04_fifo_disable;
engine->fifo.enable = nv04_fifo_enable;
engine->fifo.reassign = nv04_fifo_reassign;
- engine->fifo.cache_flush = nv04_fifo_cache_flush;
engine->fifo.cache_pull = nv04_fifo_cache_pull;
engine->fifo.channel_id = nv10_fifo_channel_id;
engine->fifo.create_context = nv10_fifo_create_context;
@@ -251,6 +258,11 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
engine->gpio.get = nv10_gpio_get;
engine->gpio.set = nv10_gpio_set;
engine->gpio.irq_enable = NULL;
+ engine->pm.clock_get = nv04_pm_clock_get;
+ engine->pm.clock_pre = nv04_pm_clock_pre;
+ engine->pm.clock_set = nv04_pm_clock_set;
+ engine->pm.voltage_get = nouveau_voltage_gpio_get;
+ engine->pm.voltage_set = nouveau_voltage_gpio_set;
break;
case 0x40:
case 0x60:
@@ -287,7 +299,6 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
engine->fifo.disable = nv04_fifo_disable;
engine->fifo.enable = nv04_fifo_enable;
engine->fifo.reassign = nv04_fifo_reassign;
- engine->fifo.cache_flush = nv04_fifo_cache_flush;
engine->fifo.cache_pull = nv04_fifo_cache_pull;
engine->fifo.channel_id = nv10_fifo_channel_id;
engine->fifo.create_context = nv40_fifo_create_context;
@@ -304,6 +315,12 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
engine->gpio.get = nv10_gpio_get;
engine->gpio.set = nv10_gpio_set;
engine->gpio.irq_enable = NULL;
+ engine->pm.clock_get = nv04_pm_clock_get;
+ engine->pm.clock_pre = nv04_pm_clock_pre;
+ engine->pm.clock_set = nv04_pm_clock_set;
+ engine->pm.voltage_get = nouveau_voltage_gpio_get;
+ engine->pm.voltage_set = nouveau_voltage_gpio_set;
+ engine->pm.temp_get = nv40_temp_get;
break;
case 0x50:
case 0x80: /* gotta love NVIDIA's consistency.. */
@@ -337,6 +354,15 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
engine->graph.destroy_context = nv50_graph_destroy_context;
engine->graph.load_context = nv50_graph_load_context;
engine->graph.unload_context = nv50_graph_unload_context;
+ if (dev_priv->chipset != 0x86)
+ engine->graph.tlb_flush = nv50_graph_tlb_flush;
+ else {
+ /* from what i can see nvidia do this on every
+ * pre-NVA3 board except NVAC, but, we've only
+ * ever seen problems on NV86
+ */
+ engine->graph.tlb_flush = nv86_graph_tlb_flush;
+ }
engine->fifo.channels = 128;
engine->fifo.init = nv50_fifo_init;
engine->fifo.takedown = nv50_fifo_takedown;
@@ -348,6 +374,7 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
engine->fifo.destroy_context = nv50_fifo_destroy_context;
engine->fifo.load_context = nv50_fifo_load_context;
engine->fifo.unload_context = nv50_fifo_unload_context;
+ engine->fifo.tlb_flush = nv50_fifo_tlb_flush;
engine->display.early_init = nv50_display_early_init;
engine->display.late_takedown = nv50_display_late_takedown;
engine->display.create = nv50_display_create;
@@ -358,6 +385,27 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
engine->gpio.get = nv50_gpio_get;
engine->gpio.set = nv50_gpio_set;
engine->gpio.irq_enable = nv50_gpio_irq_enable;
+ switch (dev_priv->chipset) {
+ case 0xa3:
+ case 0xa5:
+ case 0xa8:
+ case 0xaf:
+ engine->pm.clock_get = nva3_pm_clock_get;
+ engine->pm.clock_pre = nva3_pm_clock_pre;
+ engine->pm.clock_set = nva3_pm_clock_set;
+ break;
+ default:
+ engine->pm.clock_get = nv50_pm_clock_get;
+ engine->pm.clock_pre = nv50_pm_clock_pre;
+ engine->pm.clock_set = nv50_pm_clock_set;
+ break;
+ }
+ engine->pm.voltage_get = nouveau_voltage_gpio_get;
+ engine->pm.voltage_set = nouveau_voltage_gpio_set;
+ if (dev_priv->chipset >= 0x84)
+ engine->pm.temp_get = nv84_temp_get;
+ else
+ engine->pm.temp_get = nv40_temp_get;
break;
case 0xC0:
engine->instmem.init = nvc0_instmem_init;
@@ -437,16 +485,14 @@ static int
nouveau_card_init_channel(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_gpuobj *gpuobj;
+ struct nouveau_gpuobj *gpuobj = NULL;
int ret;
ret = nouveau_channel_alloc(dev, &dev_priv->channel,
- (struct drm_file *)-2,
- NvDmaFB, NvDmaTT);
+ (struct drm_file *)-2, NvDmaFB, NvDmaTT);
if (ret)
return ret;
- gpuobj = NULL;
ret = nouveau_gpuobj_dma_new(dev_priv->channel, NV_CLASS_DMA_IN_MEMORY,
0, dev_priv->vram_size,
NV_DMA_ACCESS_RW, NV_DMA_TARGET_VIDMEM,
@@ -454,26 +500,25 @@ nouveau_card_init_channel(struct drm_device *dev)
if (ret)
goto out_err;
- ret = nouveau_gpuobj_ref_add(dev, dev_priv->channel, NvDmaVRAM,
- gpuobj, NULL);
+ ret = nouveau_ramht_insert(dev_priv->channel, NvDmaVRAM, gpuobj);
+ nouveau_gpuobj_ref(NULL, &gpuobj);
if (ret)
goto out_err;
- gpuobj = NULL;
ret = nouveau_gpuobj_gart_dma_new(dev_priv->channel, 0,
dev_priv->gart_info.aper_size,
NV_DMA_ACCESS_RW, &gpuobj, NULL);
if (ret)
goto out_err;
- ret = nouveau_gpuobj_ref_add(dev, dev_priv->channel, NvDmaGART,
- gpuobj, NULL);
+ ret = nouveau_ramht_insert(dev_priv->channel, NvDmaGART, gpuobj);
+ nouveau_gpuobj_ref(NULL, &gpuobj);
if (ret)
goto out_err;
return 0;
+
out_err:
- nouveau_gpuobj_del(dev, &gpuobj);
nouveau_channel_free(dev_priv->channel);
dev_priv->channel = NULL;
return ret;
@@ -534,35 +579,28 @@ nouveau_card_init(struct drm_device *dev)
if (ret)
goto out_display_early;
- ret = nouveau_mem_detect(dev);
+ nouveau_pm_init(dev);
+
+ ret = nouveau_mem_vram_init(dev);
if (ret)
goto out_bios;
- ret = nouveau_gpuobj_early_init(dev);
+ ret = nouveau_gpuobj_init(dev);
if (ret)
- goto out_bios;
+ goto out_vram;
- /* Initialise instance memory, must happen before mem_init so we
- * know exactly how much VRAM we're able to use for "normal"
- * purposes.
- */
ret = engine->instmem.init(dev);
if (ret)
- goto out_gpuobj_early;
+ goto out_gpuobj;
- /* Setup the memory manager */
- ret = nouveau_mem_init(dev);
+ ret = nouveau_mem_gart_init(dev);
if (ret)
goto out_instmem;
- ret = nouveau_gpuobj_init(dev);
- if (ret)
- goto out_mem;
-
/* PMC */
ret = engine->mc.init(dev);
if (ret)
- goto out_gpuobj;
+ goto out_gart;
/* PGPIO */
ret = engine->gpio.init(dev);
@@ -611,9 +649,13 @@ nouveau_card_init(struct drm_device *dev)
/* what about PVIDEO/PCRTC/PRAMDAC etc? */
if (!engine->graph.accel_blocked) {
- ret = nouveau_card_init_channel(dev);
+ ret = nouveau_fence_init(dev);
if (ret)
goto out_irq;
+
+ ret = nouveau_card_init_channel(dev);
+ if (ret)
+ goto out_fence;
}
ret = nouveau_backlight_init(dev);
@@ -624,6 +666,8 @@ nouveau_card_init(struct drm_device *dev)
drm_kms_helper_poll_init(dev);
return 0;
+out_fence:
+ nouveau_fence_fini(dev);
out_irq:
drm_irq_uninstall(dev);
out_display:
@@ -642,16 +686,16 @@ out_gpio:
engine->gpio.takedown(dev);
out_mc:
engine->mc.takedown(dev);
-out_gpuobj:
- nouveau_gpuobj_takedown(dev);
-out_mem:
- nouveau_sgdma_takedown(dev);
- nouveau_mem_close(dev);
+out_gart:
+ nouveau_mem_gart_fini(dev);
out_instmem:
engine->instmem.takedown(dev);
-out_gpuobj_early:
- nouveau_gpuobj_late_takedown(dev);
+out_gpuobj:
+ nouveau_gpuobj_takedown(dev);
+out_vram:
+ nouveau_mem_vram_fini(dev);
out_bios:
+ nouveau_pm_fini(dev);
nouveau_bios_takedown(dev);
out_display_early:
engine->display.late_takedown(dev);
@@ -667,7 +711,8 @@ static void nouveau_card_takedown(struct drm_device *dev)
nouveau_backlight_exit(dev);
- if (dev_priv->channel) {
+ if (!engine->graph.accel_blocked) {
+ nouveau_fence_fini(dev);
nouveau_channel_free(dev_priv->channel);
dev_priv->channel = NULL;
}
@@ -686,15 +731,15 @@ static void nouveau_card_takedown(struct drm_device *dev)
ttm_bo_clean_mm(&dev_priv->ttm.bdev, TTM_PL_VRAM);
ttm_bo_clean_mm(&dev_priv->ttm.bdev, TTM_PL_TT);
mutex_unlock(&dev->struct_mutex);
- nouveau_sgdma_takedown(dev);
+ nouveau_mem_gart_fini(dev);
- nouveau_gpuobj_takedown(dev);
- nouveau_mem_close(dev);
engine->instmem.takedown(dev);
+ nouveau_gpuobj_takedown(dev);
+ nouveau_mem_vram_fini(dev);
drm_irq_uninstall(dev);
- nouveau_gpuobj_late_takedown(dev);
+ nouveau_pm_fini(dev);
nouveau_bios_takedown(dev);
vga_client_register(dev->pdev, NULL, NULL, NULL);
@@ -1006,6 +1051,9 @@ int nouveau_ioctl_getparam(struct drm_device *dev, void *data,
case NOUVEAU_GETPARAM_PTIMER_TIME:
getparam->value = dev_priv->engine.timer.read(dev);
break;
+ case NOUVEAU_GETPARAM_HAS_BO_USAGE:
+ getparam->value = 1;
+ break;
case NOUVEAU_GETPARAM_GRAPH_UNITS:
/* NV40 and NV50 versions are quite different, but register
* address is the same. User is supposed to know the card
@@ -1016,7 +1064,7 @@ int nouveau_ioctl_getparam(struct drm_device *dev, void *data,
}
/* FALLTHRU */
default:
- NV_ERROR(dev, "unknown parameter %lld\n", getparam->param);
+ NV_DEBUG(dev, "unknown parameter %lld\n", getparam->param);
return -EINVAL;
}
@@ -1031,7 +1079,7 @@ nouveau_ioctl_setparam(struct drm_device *dev, void *data,
switch (setparam->param) {
default:
- NV_ERROR(dev, "unknown parameter %lld\n", setparam->param);
+ NV_DEBUG(dev, "unknown parameter %lld\n", setparam->param);
return -EINVAL;
}
@@ -1057,7 +1105,7 @@ bool nouveau_wait_until(struct drm_device *dev, uint64_t timeout,
/* Waits for PGRAPH to go completely idle */
bool nouveau_wait_for_idle(struct drm_device *dev)
{
- if (!nv_wait(NV04_PGRAPH_STATUS, 0xffffffff, 0x00000000)) {
+ if (!nv_wait(dev, NV04_PGRAPH_STATUS, 0xffffffff, 0x00000000)) {
NV_ERROR(dev, "PGRAPH idle timed out with status 0x%08x\n",
nv_rd32(dev, NV04_PGRAPH_STATUS));
return false;
diff --git a/drivers/gpu/drm/nouveau/nouveau_temp.c b/drivers/gpu/drm/nouveau/nouveau_temp.c
new file mode 100644
index 000000000000..7ecc4adc1e45
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_temp.c
@@ -0,0 +1,309 @@
+/*
+ * Copyright 2010 PathScale inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Martin Peres
+ */
+
+#include "drmP.h"
+
+#include "nouveau_drv.h"
+#include "nouveau_pm.h"
+
+static void
+nouveau_temp_vbios_parse(struct drm_device *dev, u8 *temp)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_pm_engine *pm = &dev_priv->engine.pm;
+ struct nouveau_pm_temp_sensor_constants *sensor = &pm->sensor_constants;
+ struct nouveau_pm_threshold_temp *temps = &pm->threshold_temp;
+ int i, headerlen, recordlen, entries;
+
+ if (!temp) {
+ NV_DEBUG(dev, "temperature table pointer invalid\n");
+ return;
+ }
+
+ /* Set the default sensor's contants */
+ sensor->offset_constant = 0;
+ sensor->offset_mult = 1;
+ sensor->offset_div = 1;
+ sensor->slope_mult = 1;
+ sensor->slope_div = 1;
+
+ /* Set the default temperature thresholds */
+ temps->critical = 110;
+ temps->down_clock = 100;
+ temps->fan_boost = 90;
+
+ /* Set the known default values to setup the temperature sensor */
+ if (dev_priv->card_type >= NV_40) {
+ switch (dev_priv->chipset) {
+ case 0x43:
+ sensor->offset_mult = 32060;
+ sensor->offset_div = 1000;
+ sensor->slope_mult = 792;
+ sensor->slope_div = 1000;
+ break;
+
+ case 0x44:
+ case 0x47:
+ case 0x4a:
+ sensor->offset_mult = 27839;
+ sensor->offset_div = 1000;
+ sensor->slope_mult = 780;
+ sensor->slope_div = 1000;
+ break;
+
+ case 0x46:
+ sensor->offset_mult = -24775;
+ sensor->offset_div = 100;
+ sensor->slope_mult = 467;
+ sensor->slope_div = 10000;
+ break;
+
+ case 0x49:
+ sensor->offset_mult = -25051;
+ sensor->offset_div = 100;
+ sensor->slope_mult = 458;
+ sensor->slope_div = 10000;
+ break;
+
+ case 0x4b:
+ sensor->offset_mult = -24088;
+ sensor->offset_div = 100;
+ sensor->slope_mult = 442;
+ sensor->slope_div = 10000;
+ break;
+
+ case 0x50:
+ sensor->offset_mult = -22749;
+ sensor->offset_div = 100;
+ sensor->slope_mult = 431;
+ sensor->slope_div = 10000;
+ break;
+ }
+ }
+
+ headerlen = temp[1];
+ recordlen = temp[2];
+ entries = temp[3];
+ temp = temp + headerlen;
+
+ /* Read the entries from the table */
+ for (i = 0; i < entries; i++) {
+ u16 value = ROM16(temp[1]);
+
+ switch (temp[0]) {
+ case 0x01:
+ if ((value & 0x8f) == 0)
+ sensor->offset_constant = (value >> 9) & 0x7f;
+ break;
+
+ case 0x04:
+ if ((value & 0xf00f) == 0xa000) /* core */
+ temps->critical = (value&0x0ff0) >> 4;
+ break;
+
+ case 0x07:
+ if ((value & 0xf00f) == 0xa000) /* core */
+ temps->down_clock = (value&0x0ff0) >> 4;
+ break;
+
+ case 0x08:
+ if ((value & 0xf00f) == 0xa000) /* core */
+ temps->fan_boost = (value&0x0ff0) >> 4;
+ break;
+
+ case 0x10:
+ sensor->offset_mult = value;
+ break;
+
+ case 0x11:
+ sensor->offset_div = value;
+ break;
+
+ case 0x12:
+ sensor->slope_mult = value;
+ break;
+
+ case 0x13:
+ sensor->slope_div = value;
+ break;
+ }
+ temp += recordlen;
+ }
+
+ nouveau_temp_safety_checks(dev);
+}
+
+static int
+nv40_sensor_setup(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_pm_engine *pm = &dev_priv->engine.pm;
+ struct nouveau_pm_temp_sensor_constants *sensor = &pm->sensor_constants;
+ u32 offset = sensor->offset_mult / sensor->offset_div;
+ u32 sensor_calibration;
+
+ /* set up the sensors */
+ sensor_calibration = 120 - offset - sensor->offset_constant;
+ sensor_calibration = sensor_calibration * sensor->slope_div /
+ sensor->slope_mult;
+
+ if (dev_priv->chipset >= 0x46)
+ sensor_calibration |= 0x80000000;
+ else
+ sensor_calibration |= 0x10000000;
+
+ nv_wr32(dev, 0x0015b0, sensor_calibration);
+
+ /* Wait for the sensor to update */
+ msleep(5);
+
+ /* read */
+ return nv_rd32(dev, 0x0015b4) & 0x1fff;
+}
+
+int
+nv40_temp_get(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_pm_engine *pm = &dev_priv->engine.pm;
+ struct nouveau_pm_temp_sensor_constants *sensor = &pm->sensor_constants;
+ int offset = sensor->offset_mult / sensor->offset_div;
+ int core_temp;
+
+ if (dev_priv->card_type >= NV_50) {
+ core_temp = nv_rd32(dev, 0x20008);
+ } else {
+ core_temp = nv_rd32(dev, 0x0015b4) & 0x1fff;
+ /* Setup the sensor if the temperature is 0 */
+ if (core_temp == 0)
+ core_temp = nv40_sensor_setup(dev);
+ }
+
+ core_temp = core_temp * sensor->slope_mult / sensor->slope_div;
+ core_temp = core_temp + offset + sensor->offset_constant;
+
+ return core_temp;
+}
+
+int
+nv84_temp_get(struct drm_device *dev)
+{
+ return nv_rd32(dev, 0x20400);
+}
+
+void
+nouveau_temp_safety_checks(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_pm_engine *pm = &dev_priv->engine.pm;
+ struct nouveau_pm_threshold_temp *temps = &pm->threshold_temp;
+
+ if (temps->critical > 120)
+ temps->critical = 120;
+ else if (temps->critical < 80)
+ temps->critical = 80;
+
+ if (temps->down_clock > 110)
+ temps->down_clock = 110;
+ else if (temps->down_clock < 60)
+ temps->down_clock = 60;
+
+ if (temps->fan_boost > 100)
+ temps->fan_boost = 100;
+ else if (temps->fan_boost < 40)
+ temps->fan_boost = 40;
+}
+
+static bool
+probe_monitoring_device(struct nouveau_i2c_chan *i2c,
+ struct i2c_board_info *info)
+{
+ char modalias[16] = "i2c:";
+ struct i2c_client *client;
+
+ strlcat(modalias, info->type, sizeof(modalias));
+ request_module(modalias);
+
+ client = i2c_new_device(&i2c->adapter, info);
+ if (!client)
+ return false;
+
+ if (!client->driver || client->driver->detect(client, info)) {
+ i2c_unregister_device(client);
+ return false;
+ }
+
+ return true;
+}
+
+static void
+nouveau_temp_probe_i2c(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct dcb_table *dcb = &dev_priv->vbios.dcb;
+ struct i2c_board_info info[] = {
+ { I2C_BOARD_INFO("w83l785ts", 0x2d) },
+ { I2C_BOARD_INFO("w83781d", 0x2d) },
+ { I2C_BOARD_INFO("f75375", 0x2e) },
+ { I2C_BOARD_INFO("adt7473", 0x2e) },
+ { I2C_BOARD_INFO("lm99", 0x4c) },
+ { }
+ };
+ int idx = (dcb->version >= 0x40 ?
+ dcb->i2c_default_indices & 0xf : 2);
+
+ nouveau_i2c_identify(dev, "monitoring device", info,
+ probe_monitoring_device, idx);
+}
+
+void
+nouveau_temp_init(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nvbios *bios = &dev_priv->vbios;
+ struct bit_entry P;
+ u8 *temp = NULL;
+
+ if (bios->type == NVBIOS_BIT) {
+ if (bit_table(dev, 'P', &P))
+ return;
+
+ if (P.version == 1)
+ temp = ROMPTR(bios, P.data[12]);
+ else if (P.version == 2)
+ temp = ROMPTR(bios, P.data[16]);
+ else
+ NV_WARN(dev, "unknown temp for BIT P %d\n", P.version);
+
+ nouveau_temp_vbios_parse(dev, temp);
+ }
+
+ nouveau_temp_probe_i2c(dev);
+}
+
+void
+nouveau_temp_fini(struct drm_device *dev)
+{
+
+}
diff --git a/drivers/gpu/drm/nouveau/nouveau_volt.c b/drivers/gpu/drm/nouveau/nouveau_volt.c
new file mode 100644
index 000000000000..04fdc00a67d5
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_volt.c
@@ -0,0 +1,212 @@
+/*
+ * Copyright 2010 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include "drmP.h"
+
+#include "nouveau_drv.h"
+#include "nouveau_pm.h"
+
+static const enum dcb_gpio_tag vidtag[] = { 0x04, 0x05, 0x06, 0x1a };
+static int nr_vidtag = sizeof(vidtag) / sizeof(vidtag[0]);
+
+int
+nouveau_voltage_gpio_get(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_gpio_engine *gpio = &dev_priv->engine.gpio;
+ struct nouveau_pm_voltage *volt = &dev_priv->engine.pm.voltage;
+ u8 vid = 0;
+ int i;
+
+ for (i = 0; i < nr_vidtag; i++) {
+ if (!(volt->vid_mask & (1 << i)))
+ continue;
+
+ vid |= gpio->get(dev, vidtag[i]) << i;
+ }
+
+ return nouveau_volt_lvl_lookup(dev, vid);
+}
+
+int
+nouveau_voltage_gpio_set(struct drm_device *dev, int voltage)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_gpio_engine *gpio = &dev_priv->engine.gpio;
+ struct nouveau_pm_voltage *volt = &dev_priv->engine.pm.voltage;
+ int vid, i;
+
+ vid = nouveau_volt_vid_lookup(dev, voltage);
+ if (vid < 0)
+ return vid;
+
+ for (i = 0; i < nr_vidtag; i++) {
+ if (!(volt->vid_mask & (1 << i)))
+ continue;
+
+ gpio->set(dev, vidtag[i], !!(vid & (1 << i)));
+ }
+
+ return 0;
+}
+
+int
+nouveau_volt_vid_lookup(struct drm_device *dev, int voltage)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_pm_voltage *volt = &dev_priv->engine.pm.voltage;
+ int i;
+
+ for (i = 0; i < volt->nr_level; i++) {
+ if (volt->level[i].voltage == voltage)
+ return volt->level[i].vid;
+ }
+
+ return -ENOENT;
+}
+
+int
+nouveau_volt_lvl_lookup(struct drm_device *dev, int vid)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_pm_voltage *volt = &dev_priv->engine.pm.voltage;
+ int i;
+
+ for (i = 0; i < volt->nr_level; i++) {
+ if (volt->level[i].vid == vid)
+ return volt->level[i].voltage;
+ }
+
+ return -ENOENT;
+}
+
+void
+nouveau_volt_init(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_pm_engine *pm = &dev_priv->engine.pm;
+ struct nouveau_pm_voltage *voltage = &pm->voltage;
+ struct nvbios *bios = &dev_priv->vbios;
+ struct bit_entry P;
+ u8 *volt = NULL, *entry;
+ int i, headerlen, recordlen, entries, vidmask, vidshift;
+
+ if (bios->type == NVBIOS_BIT) {
+ if (bit_table(dev, 'P', &P))
+ return;
+
+ if (P.version == 1)
+ volt = ROMPTR(bios, P.data[16]);
+ else
+ if (P.version == 2)
+ volt = ROMPTR(bios, P.data[12]);
+ else {
+ NV_WARN(dev, "unknown volt for BIT P %d\n", P.version);
+ }
+ } else {
+ if (bios->data[bios->offset + 6] < 0x27) {
+ NV_DEBUG(dev, "BMP version too old for voltage\n");
+ return;
+ }
+
+ volt = ROMPTR(bios, bios->data[bios->offset + 0x98]);
+ }
+
+ if (!volt) {
+ NV_DEBUG(dev, "voltage table pointer invalid\n");
+ return;
+ }
+
+ switch (volt[0]) {
+ case 0x10:
+ case 0x11:
+ case 0x12:
+ headerlen = 5;
+ recordlen = volt[1];
+ entries = volt[2];
+ vidshift = 0;
+ vidmask = volt[4];
+ break;
+ case 0x20:
+ headerlen = volt[1];
+ recordlen = volt[3];
+ entries = volt[2];
+ vidshift = 0; /* could be vidshift like 0x30? */
+ vidmask = volt[5];
+ break;
+ case 0x30:
+ headerlen = volt[1];
+ recordlen = volt[2];
+ entries = volt[3];
+ vidshift = hweight8(volt[5]);
+ vidmask = volt[4];
+ break;
+ default:
+ NV_WARN(dev, "voltage table 0x%02x unknown\n", volt[0]);
+ return;
+ }
+
+ /* validate vid mask */
+ voltage->vid_mask = vidmask;
+ if (!voltage->vid_mask)
+ return;
+
+ i = 0;
+ while (vidmask) {
+ if (i > nr_vidtag) {
+ NV_DEBUG(dev, "vid bit %d unknown\n", i);
+ return;
+ }
+
+ if (!nouveau_bios_gpio_entry(dev, vidtag[i])) {
+ NV_DEBUG(dev, "vid bit %d has no gpio tag\n", i);
+ return;
+ }
+
+ vidmask >>= 1;
+ i++;
+ }
+
+ /* parse vbios entries into common format */
+ voltage->level = kcalloc(entries, sizeof(*voltage->level), GFP_KERNEL);
+ if (!voltage->level)
+ return;
+
+ entry = volt + headerlen;
+ for (i = 0; i < entries; i++, entry += recordlen) {
+ voltage->level[i].voltage = entry[0];
+ voltage->level[i].vid = entry[1] >> vidshift;
+ }
+ voltage->nr_level = entries;
+ voltage->supported = true;
+}
+
+void
+nouveau_volt_fini(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_pm_voltage *volt = &dev_priv->engine.pm.voltage;
+
+ kfree(volt->level);
+}
diff --git a/drivers/gpu/drm/nouveau/nv04_crtc.c b/drivers/gpu/drm/nouveau/nv04_crtc.c
index 497df8765f28..40e180741629 100644
--- a/drivers/gpu/drm/nouveau/nv04_crtc.c
+++ b/drivers/gpu/drm/nouveau/nv04_crtc.c
@@ -33,6 +33,7 @@
#include "nouveau_fb.h"
#include "nouveau_hw.h"
#include "nvreg.h"
+#include "nouveau_fbcon.h"
static int
nv04_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y,
@@ -109,7 +110,7 @@ static void nv_crtc_calc_state_ext(struct drm_crtc *crtc, struct drm_display_mod
struct nouveau_pll_vals *pv = &regp->pllvals;
struct pll_lims pll_lim;
- if (get_pll_limits(dev, nv_crtc->index ? VPLL2 : VPLL1, &pll_lim))
+ if (get_pll_limits(dev, nv_crtc->index ? PLL_VPLL1 : PLL_VPLL0, &pll_lim))
return;
/* NM2 == 0 is used to determine single stage mode on two stage plls */
@@ -157,7 +158,6 @@ nv_crtc_dpms(struct drm_crtc *crtc, int mode)
{
struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
struct drm_device *dev = crtc->dev;
- struct drm_connector *connector;
unsigned char seq1 = 0, crtc17 = 0;
unsigned char crtc1A;
@@ -212,10 +212,6 @@ nv_crtc_dpms(struct drm_crtc *crtc, int mode)
NVVgaSeqReset(dev, nv_crtc->index, false);
NVWriteVgaCrtc(dev, nv_crtc->index, NV_CIO_CRE_RPC1_INDEX, crtc1A);
-
- /* Update connector polling modes */
- list_for_each_entry(connector, &dev->mode_config.connector_list, head)
- nouveau_connector_set_polling(connector);
}
static bool
@@ -718,6 +714,7 @@ static void nv_crtc_destroy(struct drm_crtc *crtc)
drm_crtc_cleanup(crtc);
+ nouveau_bo_unmap(nv_crtc->cursor.nvbo);
nouveau_bo_ref(NULL, &nv_crtc->cursor.nvbo);
kfree(nv_crtc);
}
@@ -768,8 +765,9 @@ nv_crtc_gamma_set(struct drm_crtc *crtc, u16 *r, u16 *g, u16 *b, uint32_t start,
}
static int
-nv04_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y,
- struct drm_framebuffer *old_fb)
+nv04_crtc_do_mode_set_base(struct drm_crtc *crtc,
+ struct drm_framebuffer *passed_fb,
+ int x, int y, bool atomic)
{
struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
struct drm_device *dev = crtc->dev;
@@ -780,13 +778,26 @@ nv04_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y,
int arb_burst, arb_lwm;
int ret;
- ret = nouveau_bo_pin(fb->nvbo, TTM_PL_FLAG_VRAM);
- if (ret)
- return ret;
+ /* If atomic, we want to switch to the fb we were passed, so
+ * now we update pointers to do that. (We don't pin; just
+ * assume we're already pinned and update the base address.)
+ */
+ if (atomic) {
+ drm_fb = passed_fb;
+ fb = nouveau_framebuffer(passed_fb);
+ }
+ else {
+ /* If not atomic, we can go ahead and pin, and unpin the
+ * old fb we were passed.
+ */
+ ret = nouveau_bo_pin(fb->nvbo, TTM_PL_FLAG_VRAM);
+ if (ret)
+ return ret;
- if (old_fb) {
- struct nouveau_framebuffer *ofb = nouveau_framebuffer(old_fb);
- nouveau_bo_unpin(ofb->nvbo);
+ if (passed_fb) {
+ struct nouveau_framebuffer *ofb = nouveau_framebuffer(passed_fb);
+ nouveau_bo_unpin(ofb->nvbo);
+ }
}
nv_crtc->fb.offset = fb->nvbo->bo.offset;
@@ -815,7 +826,7 @@ nv04_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y,
/* Update the framebuffer location. */
regp->fb_start = nv_crtc->fb.offset & ~3;
regp->fb_start += (y * drm_fb->pitch) + (x * drm_fb->bits_per_pixel / 8);
- NVWriteCRTC(dev, nv_crtc->index, NV_PCRTC_START, regp->fb_start);
+ nv_set_crtc_base(dev, nv_crtc->index, regp->fb_start);
/* Update the arbitration parameters. */
nouveau_calc_arb(dev, crtc->mode.clock, drm_fb->bits_per_pixel,
@@ -826,7 +837,7 @@ nv04_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y,
crtc_wr_cio_state(crtc, regp, NV_CIO_CRE_FF_INDEX);
crtc_wr_cio_state(crtc, regp, NV_CIO_CRE_FFLWM__INDEX);
- if (dev_priv->card_type >= NV_30) {
+ if (dev_priv->card_type >= NV_20) {
regp->CRTC[NV_CIO_CRE_47] = arb_lwm >> 8;
crtc_wr_cio_state(crtc, regp, NV_CIO_CRE_47);
}
@@ -834,6 +845,29 @@ nv04_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y,
return 0;
}
+static int
+nv04_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y,
+ struct drm_framebuffer *old_fb)
+{
+ return nv04_crtc_do_mode_set_base(crtc, old_fb, x, y, false);
+}
+
+static int
+nv04_crtc_mode_set_base_atomic(struct drm_crtc *crtc,
+ struct drm_framebuffer *fb,
+ int x, int y, enum mode_set_atomic state)
+{
+ struct drm_nouveau_private *dev_priv = crtc->dev->dev_private;
+ struct drm_device *dev = dev_priv->dev;
+
+ if (state == ENTER_ATOMIC_MODE_SET)
+ nouveau_fbcon_save_disable_accel(dev);
+ else
+ nouveau_fbcon_restore_accel(dev);
+
+ return nv04_crtc_do_mode_set_base(crtc, fb, x, y, true);
+}
+
static void nv04_cursor_upload(struct drm_device *dev, struct nouveau_bo *src,
struct nouveau_bo *dst)
{
@@ -962,6 +996,7 @@ static const struct drm_crtc_helper_funcs nv04_crtc_helper_funcs = {
.mode_fixup = nv_crtc_mode_fixup,
.mode_set = nv_crtc_mode_set,
.mode_set_base = nv04_crtc_mode_set_base,
+ .mode_set_base_atomic = nv04_crtc_mode_set_base_atomic,
.load_lut = nv_crtc_gamma_load,
};
diff --git a/drivers/gpu/drm/nouveau/nv04_dac.c b/drivers/gpu/drm/nouveau/nv04_dac.c
index ea3627041ecf..ba6423f2ffcc 100644
--- a/drivers/gpu/drm/nouveau/nv04_dac.c
+++ b/drivers/gpu/drm/nouveau/nv04_dac.c
@@ -291,6 +291,8 @@ uint32_t nv17_dac_sample_load(struct drm_encoder *encoder)
msleep(5);
sample = NVReadRAMDAC(dev, 0, NV_PRAMDAC_TEST_CONTROL + regoffset);
+ /* do it again just in case it's a residual current */
+ sample &= NVReadRAMDAC(dev, 0, NV_PRAMDAC_TEST_CONTROL + regoffset);
temp = NVReadRAMDAC(dev, head, NV_PRAMDAC_TEST_CONTROL);
NVWriteRAMDAC(dev, head, NV_PRAMDAC_TEST_CONTROL,
@@ -343,22 +345,13 @@ static void nv04_dac_prepare(struct drm_encoder *encoder)
{
struct drm_encoder_helper_funcs *helper = encoder->helper_private;
struct drm_device *dev = encoder->dev;
- struct drm_nouveau_private *dev_priv = dev->dev_private;
int head = nouveau_crtc(encoder->crtc)->index;
- struct nv04_crtc_reg *crtcstate = dev_priv->mode_reg.crtc_reg;
helper->dpms(encoder, DRM_MODE_DPMS_OFF);
nv04_dfp_disable(dev, head);
-
- /* Some NV4x have unknown values (0x3f, 0x50, 0x54, 0x6b, 0x79, 0x7f)
- * at LCD__INDEX which we don't alter
- */
- if (!(crtcstate[head].CRTC[NV_CIO_CRE_LCD__INDEX] & 0x44))
- crtcstate[head].CRTC[NV_CIO_CRE_LCD__INDEX] = 0;
}
-
static void nv04_dac_mode_set(struct drm_encoder *encoder,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
diff --git a/drivers/gpu/drm/nouveau/nv04_dfp.c b/drivers/gpu/drm/nouveau/nv04_dfp.c
index 0d3206a7046c..ef23550407b5 100644
--- a/drivers/gpu/drm/nouveau/nv04_dfp.c
+++ b/drivers/gpu/drm/nouveau/nv04_dfp.c
@@ -104,6 +104,8 @@ void nv04_dfp_disable(struct drm_device *dev, int head)
}
/* don't inadvertently turn it on when state written later */
crtcstate[head].fp_control = FP_TG_CONTROL_OFF;
+ crtcstate[head].CRTC[NV_CIO_CRE_LCD__INDEX] &=
+ ~NV_CIO_CRE_LCD_ROUTE_MASK;
}
void nv04_dfp_update_fp_control(struct drm_encoder *encoder, int mode)
@@ -183,14 +185,15 @@ static bool nv04_dfp_mode_fixup(struct drm_encoder *encoder,
struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
struct nouveau_connector *nv_connector = nouveau_encoder_connector_get(nv_encoder);
- /* For internal panels and gpu scaling on DVI we need the native mode */
- if (nv_connector->scaling_mode != DRM_MODE_SCALE_NONE) {
- if (!nv_connector->native_mode)
- return false;
+ if (!nv_connector->native_mode ||
+ nv_connector->scaling_mode == DRM_MODE_SCALE_NONE ||
+ mode->hdisplay > nv_connector->native_mode->hdisplay ||
+ mode->vdisplay > nv_connector->native_mode->vdisplay) {
+ nv_encoder->mode = *adjusted_mode;
+
+ } else {
nv_encoder->mode = *nv_connector->native_mode;
adjusted_mode->clock = nv_connector->native_mode->clock;
- } else {
- nv_encoder->mode = *adjusted_mode;
}
return true;
@@ -253,26 +256,21 @@ static void nv04_dfp_prepare(struct drm_encoder *encoder)
nv04_dfp_prepare_sel_clk(dev, nv_encoder, head);
- /* Some NV4x have unknown values (0x3f, 0x50, 0x54, 0x6b, 0x79, 0x7f)
- * at LCD__INDEX which we don't alter
- */
- if (!(*cr_lcd & 0x44)) {
- *cr_lcd = 0x3;
-
- if (nv_two_heads(dev)) {
- if (nv_encoder->dcb->location == DCB_LOC_ON_CHIP)
- *cr_lcd |= head ? 0x0 : 0x8;
- else {
- *cr_lcd |= (nv_encoder->dcb->or << 4) & 0x30;
- if (nv_encoder->dcb->type == OUTPUT_LVDS)
- *cr_lcd |= 0x30;
- if ((*cr_lcd & 0x30) == (*cr_lcd_oth & 0x30)) {
- /* avoid being connected to both crtcs */
- *cr_lcd_oth &= ~0x30;
- NVWriteVgaCrtc(dev, head ^ 1,
- NV_CIO_CRE_LCD__INDEX,
- *cr_lcd_oth);
- }
+ *cr_lcd = (*cr_lcd & ~NV_CIO_CRE_LCD_ROUTE_MASK) | 0x3;
+
+ if (nv_two_heads(dev)) {
+ if (nv_encoder->dcb->location == DCB_LOC_ON_CHIP)
+ *cr_lcd |= head ? 0x0 : 0x8;
+ else {
+ *cr_lcd |= (nv_encoder->dcb->or << 4) & 0x30;
+ if (nv_encoder->dcb->type == OUTPUT_LVDS)
+ *cr_lcd |= 0x30;
+ if ((*cr_lcd & 0x30) == (*cr_lcd_oth & 0x30)) {
+ /* avoid being connected to both crtcs */
+ *cr_lcd_oth &= ~0x30;
+ NVWriteVgaCrtc(dev, head ^ 1,
+ NV_CIO_CRE_LCD__INDEX,
+ *cr_lcd_oth);
}
}
}
@@ -640,7 +638,7 @@ static void nv04_tmds_slave_init(struct drm_encoder *encoder)
get_tmds_slave(encoder))
return;
- type = nouveau_i2c_identify(dev, "TMDS transmitter", info, 2);
+ type = nouveau_i2c_identify(dev, "TMDS transmitter", info, NULL, 2);
if (type < 0)
return;
diff --git a/drivers/gpu/drm/nouveau/nv04_fbcon.c b/drivers/gpu/drm/nouveau/nv04_fbcon.c
index 1eeac4fae73d..33e4c9388bc1 100644
--- a/drivers/gpu/drm/nouveau/nv04_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nv04_fbcon.c
@@ -25,6 +25,7 @@
#include "drmP.h"
#include "nouveau_drv.h"
#include "nouveau_dma.h"
+#include "nouveau_ramht.h"
#include "nouveau_fbcon.h"
void
@@ -169,11 +170,9 @@ nv04_fbcon_grobj_new(struct drm_device *dev, int class, uint32_t handle)
if (ret)
return ret;
- ret = nouveau_gpuobj_ref_add(dev, dev_priv->channel, handle, obj, NULL);
- if (ret)
- return ret;
-
- return 0;
+ ret = nouveau_ramht_insert(dev_priv->channel, handle, obj);
+ nouveau_gpuobj_ref(NULL, &obj);
+ return ret;
}
int
diff --git a/drivers/gpu/drm/nouveau/nv04_fifo.c b/drivers/gpu/drm/nouveau/nv04_fifo.c
index 06cedd99c26a..708293b7ddcd 100644
--- a/drivers/gpu/drm/nouveau/nv04_fifo.c
+++ b/drivers/gpu/drm/nouveau/nv04_fifo.c
@@ -27,8 +27,9 @@
#include "drmP.h"
#include "drm.h"
#include "nouveau_drv.h"
+#include "nouveau_ramht.h"
-#define NV04_RAMFC(c) (dev_priv->ramfc_offset + ((c) * NV04_RAMFC__SIZE))
+#define NV04_RAMFC(c) (dev_priv->ramfc->pinst + ((c) * NV04_RAMFC__SIZE))
#define NV04_RAMFC__SIZE 32
#define NV04_RAMFC_DMA_PUT 0x00
#define NV04_RAMFC_DMA_GET 0x04
@@ -38,10 +39,8 @@
#define NV04_RAMFC_ENGINE 0x14
#define NV04_RAMFC_PULL1_ENGINE 0x18
-#define RAMFC_WR(offset, val) nv_wo32(dev, chan->ramfc->gpuobj, \
- NV04_RAMFC_##offset/4, (val))
-#define RAMFC_RD(offset) nv_ro32(dev, chan->ramfc->gpuobj, \
- NV04_RAMFC_##offset/4)
+#define RAMFC_WR(offset, val) nv_wo32(chan->ramfc, NV04_RAMFC_##offset, (val))
+#define RAMFC_RD(offset) nv_ro32(chan->ramfc, NV04_RAMFC_##offset)
void
nv04_fifo_disable(struct drm_device *dev)
@@ -72,37 +71,32 @@ nv04_fifo_reassign(struct drm_device *dev, bool enable)
}
bool
-nv04_fifo_cache_flush(struct drm_device *dev)
-{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_timer_engine *ptimer = &dev_priv->engine.timer;
- uint64_t start = ptimer->read(dev);
-
- do {
- if (nv_rd32(dev, NV03_PFIFO_CACHE1_GET) ==
- nv_rd32(dev, NV03_PFIFO_CACHE1_PUT))
- return true;
-
- } while (ptimer->read(dev) - start < 100000000);
-
- NV_ERROR(dev, "Timeout flushing the PFIFO cache.\n");
-
- return false;
-}
-
-bool
nv04_fifo_cache_pull(struct drm_device *dev, bool enable)
{
- uint32_t pull = nv_rd32(dev, NV04_PFIFO_CACHE1_PULL0);
+ int pull = nv_mask(dev, NV04_PFIFO_CACHE1_PULL0, 1, enable);
+
+ if (!enable) {
+ /* In some cases the PFIFO puller may be left in an
+ * inconsistent state if you try to stop it when it's
+ * busy translating handles. Sometimes you get a
+ * PFIFO_CACHE_ERROR, sometimes it just fails silently
+ * sending incorrect instance offsets to PGRAPH after
+ * it's started up again. To avoid the latter we
+ * invalidate the most recently calculated instance.
+ */
+ if (!nv_wait(dev, NV04_PFIFO_CACHE1_PULL0,
+ NV04_PFIFO_CACHE1_PULL0_HASH_BUSY, 0))
+ NV_ERROR(dev, "Timeout idling the PFIFO puller.\n");
+
+ if (nv_rd32(dev, NV04_PFIFO_CACHE1_PULL0) &
+ NV04_PFIFO_CACHE1_PULL0_HASH_FAILED)
+ nv_wr32(dev, NV03_PFIFO_INTR_0,
+ NV_PFIFO_INTR_CACHE_ERROR);
- if (enable) {
- nv_wr32(dev, NV04_PFIFO_CACHE1_PULL0, pull | 1);
- } else {
- nv_wr32(dev, NV04_PFIFO_CACHE1_PULL0, pull & ~1);
nv_wr32(dev, NV04_PFIFO_CACHE1_HASH, 0);
}
- return !!(pull & 1);
+ return pull & 1;
}
int
@@ -130,7 +124,7 @@ nv04_fifo_create_context(struct nouveau_channel *chan)
NV04_RAMFC__SIZE,
NVOBJ_FLAG_ZERO_ALLOC |
NVOBJ_FLAG_ZERO_FREE,
- NULL, &chan->ramfc);
+ &chan->ramfc);
if (ret)
return ret;
@@ -139,7 +133,7 @@ nv04_fifo_create_context(struct nouveau_channel *chan)
/* Setup initial state */
RAMFC_WR(DMA_PUT, chan->pushbuf_base);
RAMFC_WR(DMA_GET, chan->pushbuf_base);
- RAMFC_WR(DMA_INSTANCE, chan->pushbuf->instance >> 4);
+ RAMFC_WR(DMA_INSTANCE, chan->pushbuf->pinst >> 4);
RAMFC_WR(DMA_FETCH, (NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES |
NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES |
NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_8 |
@@ -161,7 +155,7 @@ nv04_fifo_destroy_context(struct nouveau_channel *chan)
nv_wr32(dev, NV04_PFIFO_MODE,
nv_rd32(dev, NV04_PFIFO_MODE) & ~(1 << chan->id));
- nouveau_gpuobj_ref_del(dev, &chan->ramfc);
+ nouveau_gpuobj_ref(NULL, &chan->ramfc);
}
static void
@@ -264,10 +258,10 @@ nv04_fifo_init_ramxx(struct drm_device *dev)
struct drm_nouveau_private *dev_priv = dev->dev_private;
nv_wr32(dev, NV03_PFIFO_RAMHT, (0x03 << 24) /* search 128 */ |
- ((dev_priv->ramht_bits - 9) << 16) |
- (dev_priv->ramht_offset >> 8));
- nv_wr32(dev, NV03_PFIFO_RAMRO, dev_priv->ramro_offset>>8);
- nv_wr32(dev, NV03_PFIFO_RAMFC, dev_priv->ramfc_offset >> 8);
+ ((dev_priv->ramht->bits - 9) << 16) |
+ (dev_priv->ramht->gpuobj->pinst >> 8));
+ nv_wr32(dev, NV03_PFIFO_RAMRO, dev_priv->ramro->pinst >> 8);
+ nv_wr32(dev, NV03_PFIFO_RAMFC, dev_priv->ramfc->pinst >> 8);
}
static void
diff --git a/drivers/gpu/drm/nouveau/nv04_instmem.c b/drivers/gpu/drm/nouveau/nv04_instmem.c
index 4408232d33f1..0b5ae297abde 100644
--- a/drivers/gpu/drm/nouveau/nv04_instmem.c
+++ b/drivers/gpu/drm/nouveau/nv04_instmem.c
@@ -1,6 +1,7 @@
#include "drmP.h"
#include "drm.h"
#include "nouveau_drv.h"
+#include "nouveau_ramht.h"
/* returns the size of fifo context */
static int
@@ -17,102 +18,51 @@ nouveau_fifo_ctx_size(struct drm_device *dev)
return 32;
}
-static void
-nv04_instmem_determine_amount(struct drm_device *dev)
+int nv04_instmem_init(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
- int i;
+ struct nouveau_gpuobj *ramht = NULL;
+ u32 offset, length;
+ int ret;
- /* Figure out how much instance memory we need */
- if (dev_priv->card_type >= NV_40) {
- /* We'll want more instance memory than this on some NV4x cards.
- * There's a 16MB aperture to play with that maps onto the end
- * of vram. For now, only reserve a small piece until we know
- * more about what each chipset requires.
- */
- switch (dev_priv->chipset) {
- case 0x40:
- case 0x47:
- case 0x49:
- case 0x4b:
- dev_priv->ramin_rsvd_vram = (2 * 1024 * 1024);
- break;
- default:
- dev_priv->ramin_rsvd_vram = (1 * 1024 * 1024);
- break;
- }
- } else {
- /*XXX: what *are* the limits on <NV40 cards?
- */
- dev_priv->ramin_rsvd_vram = (512 * 1024);
- }
- NV_DEBUG(dev, "RAMIN size: %dKiB\n", dev_priv->ramin_rsvd_vram >> 10);
+ /* RAMIN always available */
+ dev_priv->ramin_available = true;
- /* Clear all of it, except the BIOS image that's in the first 64KiB */
- for (i = 64 * 1024; i < dev_priv->ramin_rsvd_vram; i += 4)
- nv_wi32(dev, i, 0x00000000);
-}
+ /* Setup shared RAMHT */
+ ret = nouveau_gpuobj_new_fake(dev, 0x10000, ~0, 4096,
+ NVOBJ_FLAG_ZERO_ALLOC, &ramht);
+ if (ret)
+ return ret;
-static void
-nv04_instmem_configure_fixed_tables(struct drm_device *dev)
-{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_engine *engine = &dev_priv->engine;
+ ret = nouveau_ramht_new(dev, ramht, &dev_priv->ramht);
+ nouveau_gpuobj_ref(NULL, &ramht);
+ if (ret)
+ return ret;
- /* FIFO hash table (RAMHT)
- * use 4k hash table at RAMIN+0x10000
- * TODO: extend the hash table
- */
- dev_priv->ramht_offset = 0x10000;
- dev_priv->ramht_bits = 9;
- dev_priv->ramht_size = (1 << dev_priv->ramht_bits); /* nr entries */
- dev_priv->ramht_size *= 8; /* 2 32-bit values per entry in RAMHT */
- NV_DEBUG(dev, "RAMHT offset=0x%x, size=%d\n", dev_priv->ramht_offset,
- dev_priv->ramht_size);
-
- /* FIFO runout table (RAMRO) - 512k at 0x11200 */
- dev_priv->ramro_offset = 0x11200;
- dev_priv->ramro_size = 512;
- NV_DEBUG(dev, "RAMRO offset=0x%x, size=%d\n", dev_priv->ramro_offset,
- dev_priv->ramro_size);
-
- /* FIFO context table (RAMFC)
- * NV40 : Not sure exactly how to position RAMFC on some cards,
- * 0x30002 seems to position it at RAMIN+0x20000 on these
- * cards. RAMFC is 4kb (32 fifos, 128byte entries).
- * Others: Position RAMFC at RAMIN+0x11400
- */
- dev_priv->ramfc_size = engine->fifo.channels *
- nouveau_fifo_ctx_size(dev);
+ /* And RAMRO */
+ ret = nouveau_gpuobj_new_fake(dev, 0x11200, ~0, 512,
+ NVOBJ_FLAG_ZERO_ALLOC, &dev_priv->ramro);
+ if (ret)
+ return ret;
+
+ /* And RAMFC */
+ length = dev_priv->engine.fifo.channels * nouveau_fifo_ctx_size(dev);
switch (dev_priv->card_type) {
case NV_40:
- dev_priv->ramfc_offset = 0x20000;
+ offset = 0x20000;
break;
- case NV_30:
- case NV_20:
- case NV_10:
- case NV_04:
default:
- dev_priv->ramfc_offset = 0x11400;
+ offset = 0x11400;
break;
}
- NV_DEBUG(dev, "RAMFC offset=0x%x, size=%d\n", dev_priv->ramfc_offset,
- dev_priv->ramfc_size);
-}
-int nv04_instmem_init(struct drm_device *dev)
-{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- uint32_t offset;
- int ret;
-
- nv04_instmem_determine_amount(dev);
- nv04_instmem_configure_fixed_tables(dev);
+ ret = nouveau_gpuobj_new_fake(dev, offset, ~0, length,
+ NVOBJ_FLAG_ZERO_ALLOC, &dev_priv->ramfc);
+ if (ret)
+ return ret;
- /* Create a heap to manage RAMIN allocations, we don't allocate
- * the space that was reserved for RAMHT/FC/RO.
- */
- offset = dev_priv->ramfc_offset + dev_priv->ramfc_size;
+ /* Only allow space after RAMFC to be used for object allocation */
+ offset += length;
/* It appears RAMRO (or something?) is controlled by 0x2220/0x2230
* on certain NV4x chipsets as well as RAMFC. When 0x2230 == 0
@@ -140,46 +90,34 @@ int nv04_instmem_init(struct drm_device *dev)
void
nv04_instmem_takedown(struct drm_device *dev)
{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+
+ nouveau_ramht_ref(NULL, &dev_priv->ramht, NULL);
+ nouveau_gpuobj_ref(NULL, &dev_priv->ramro);
+ nouveau_gpuobj_ref(NULL, &dev_priv->ramfc);
}
int
-nv04_instmem_populate(struct drm_device *dev, struct nouveau_gpuobj *gpuobj, uint32_t *sz)
+nv04_instmem_populate(struct drm_device *dev, struct nouveau_gpuobj *gpuobj,
+ uint32_t *sz)
{
- if (gpuobj->im_backing)
- return -EINVAL;
-
return 0;
}
void
nv04_instmem_clear(struct drm_device *dev, struct nouveau_gpuobj *gpuobj)
{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
-
- if (gpuobj && gpuobj->im_backing) {
- if (gpuobj->im_bound)
- dev_priv->engine.instmem.unbind(dev, gpuobj);
- gpuobj->im_backing = NULL;
- }
}
int
nv04_instmem_bind(struct drm_device *dev, struct nouveau_gpuobj *gpuobj)
{
- if (!gpuobj->im_pramin || gpuobj->im_bound)
- return -EINVAL;
-
- gpuobj->im_bound = 1;
return 0;
}
int
nv04_instmem_unbind(struct drm_device *dev, struct nouveau_gpuobj *gpuobj)
{
- if (gpuobj->im_bound == 0)
- return -EINVAL;
-
- gpuobj->im_bound = 0;
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/nv04_pm.c b/drivers/gpu/drm/nouveau/nv04_pm.c
new file mode 100644
index 000000000000..eb1c70dd82ed
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nv04_pm.c
@@ -0,0 +1,90 @@
+/*
+ * Copyright 2010 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include "drmP.h"
+#include "nouveau_drv.h"
+#include "nouveau_hw.h"
+#include "nouveau_pm.h"
+
+struct nv04_pm_state {
+ struct pll_lims pll;
+ struct nouveau_pll_vals calc;
+};
+
+int
+nv04_pm_clock_get(struct drm_device *dev, u32 id)
+{
+ return nouveau_hw_get_clock(dev, id);
+}
+
+void *
+nv04_pm_clock_pre(struct drm_device *dev, struct nouveau_pm_level *perflvl,
+ u32 id, int khz)
+{
+ struct nv04_pm_state *state;
+ int ret;
+
+ state = kzalloc(sizeof(*state), GFP_KERNEL);
+ if (!state)
+ return ERR_PTR(-ENOMEM);
+
+ ret = get_pll_limits(dev, id, &state->pll);
+ if (ret) {
+ kfree(state);
+ return (ret == -ENOENT) ? NULL : ERR_PTR(ret);
+ }
+
+ ret = nouveau_calc_pll_mnp(dev, &state->pll, khz, &state->calc);
+ if (!ret) {
+ kfree(state);
+ return ERR_PTR(-EINVAL);
+ }
+
+ return state;
+}
+
+void
+nv04_pm_clock_set(struct drm_device *dev, void *pre_state)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nv04_pm_state *state = pre_state;
+ u32 reg = state->pll.reg;
+
+ /* thank the insane nouveau_hw_setpll() interface for this */
+ if (dev_priv->card_type >= NV_40)
+ reg += 4;
+
+ nouveau_hw_setpll(dev, reg, &state->calc);
+
+ if (dev_priv->card_type < NV_30 && reg == NV_PRAMDAC_MPLL_COEFF) {
+ if (dev_priv->card_type == NV_20)
+ nv_mask(dev, 0x1002c4, 0, 1 << 20);
+
+ /* Reset the DLLs */
+ nv_mask(dev, 0x1002c0, 0, 1 << 8);
+ }
+
+ kfree(state);
+}
+
diff --git a/drivers/gpu/drm/nouveau/nv04_tv.c b/drivers/gpu/drm/nouveau/nv04_tv.c
index 0b5d012d7c28..3eb605ddfd03 100644
--- a/drivers/gpu/drm/nouveau/nv04_tv.c
+++ b/drivers/gpu/drm/nouveau/nv04_tv.c
@@ -49,8 +49,8 @@ static struct i2c_board_info nv04_tv_encoder_info[] = {
int nv04_tv_identify(struct drm_device *dev, int i2c_index)
{
- return nouveau_i2c_identify(dev, "TV encoder",
- nv04_tv_encoder_info, i2c_index);
+ return nouveau_i2c_identify(dev, "TV encoder", nv04_tv_encoder_info,
+ NULL, i2c_index);
}
@@ -99,12 +99,10 @@ static void nv04_tv_bind(struct drm_device *dev, int head, bool bind)
state->tv_setup = 0;
- if (bind) {
- state->CRTC[NV_CIO_CRE_LCD__INDEX] = 0;
+ if (bind)
state->CRTC[NV_CIO_CRE_49] |= 0x10;
- } else {
+ else
state->CRTC[NV_CIO_CRE_49] &= ~0x10;
- }
NVWriteVgaCrtc(dev, head, NV_CIO_CRE_LCD__INDEX,
state->CRTC[NV_CIO_CRE_LCD__INDEX]);
diff --git a/drivers/gpu/drm/nouveau/nv10_fifo.c b/drivers/gpu/drm/nouveau/nv10_fifo.c
index 7a4069cf5d0b..f1b03ad58fd5 100644
--- a/drivers/gpu/drm/nouveau/nv10_fifo.c
+++ b/drivers/gpu/drm/nouveau/nv10_fifo.c
@@ -27,8 +27,9 @@
#include "drmP.h"
#include "drm.h"
#include "nouveau_drv.h"
+#include "nouveau_ramht.h"
-#define NV10_RAMFC(c) (dev_priv->ramfc_offset + ((c) * NV10_RAMFC__SIZE))
+#define NV10_RAMFC(c) (dev_priv->ramfc->pinst + ((c) * NV10_RAMFC__SIZE))
#define NV10_RAMFC__SIZE ((dev_priv->chipset) >= 0x17 ? 64 : 32)
int
@@ -48,7 +49,7 @@ nv10_fifo_create_context(struct nouveau_channel *chan)
ret = nouveau_gpuobj_new_fake(dev, NV10_RAMFC(chan->id), ~0,
NV10_RAMFC__SIZE, NVOBJ_FLAG_ZERO_ALLOC |
- NVOBJ_FLAG_ZERO_FREE, NULL, &chan->ramfc);
+ NVOBJ_FLAG_ZERO_FREE, &chan->ramfc);
if (ret)
return ret;
@@ -57,7 +58,7 @@ nv10_fifo_create_context(struct nouveau_channel *chan)
*/
nv_wi32(dev, fc + 0, chan->pushbuf_base);
nv_wi32(dev, fc + 4, chan->pushbuf_base);
- nv_wi32(dev, fc + 12, chan->pushbuf->instance >> 4);
+ nv_wi32(dev, fc + 12, chan->pushbuf->pinst >> 4);
nv_wi32(dev, fc + 20, NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES |
NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES |
NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_8 |
@@ -80,7 +81,7 @@ nv10_fifo_destroy_context(struct nouveau_channel *chan)
nv_wr32(dev, NV04_PFIFO_MODE,
nv_rd32(dev, NV04_PFIFO_MODE) & ~(1 << chan->id));
- nouveau_gpuobj_ref_del(dev, &chan->ramfc);
+ nouveau_gpuobj_ref(NULL, &chan->ramfc);
}
static void
@@ -202,14 +203,14 @@ nv10_fifo_init_ramxx(struct drm_device *dev)
struct drm_nouveau_private *dev_priv = dev->dev_private;
nv_wr32(dev, NV03_PFIFO_RAMHT, (0x03 << 24) /* search 128 */ |
- ((dev_priv->ramht_bits - 9) << 16) |
- (dev_priv->ramht_offset >> 8));
- nv_wr32(dev, NV03_PFIFO_RAMRO, dev_priv->ramro_offset>>8);
+ ((dev_priv->ramht->bits - 9) << 16) |
+ (dev_priv->ramht->gpuobj->pinst >> 8));
+ nv_wr32(dev, NV03_PFIFO_RAMRO, dev_priv->ramro->pinst >> 8);
if (dev_priv->chipset < 0x17) {
- nv_wr32(dev, NV03_PFIFO_RAMFC, dev_priv->ramfc_offset >> 8);
+ nv_wr32(dev, NV03_PFIFO_RAMFC, dev_priv->ramfc->pinst >> 8);
} else {
- nv_wr32(dev, NV03_PFIFO_RAMFC, (dev_priv->ramfc_offset >> 8) |
+ nv_wr32(dev, NV03_PFIFO_RAMFC, (dev_priv->ramfc->pinst >> 8) |
(1 << 16) /* 64 Bytes entry*/);
/* XXX nvidia blob set bit 18, 21,23 for nv20 & nv30 */
}
diff --git a/drivers/gpu/drm/nouveau/nv10_graph.c b/drivers/gpu/drm/nouveau/nv10_graph.c
index b2f6a57c0cc5..8e68c9731159 100644
--- a/drivers/gpu/drm/nouveau/nv10_graph.c
+++ b/drivers/gpu/drm/nouveau/nv10_graph.c
@@ -803,7 +803,7 @@ nv10_graph_context_switch(struct drm_device *dev)
/* Load context for next channel */
chid = (nv_rd32(dev, NV04_PGRAPH_TRAPPED_ADDR) >> 20) & 0x1f;
chan = dev_priv->fifos[chid];
- if (chan)
+ if (chan && chan->pgraph_ctx)
nv10_graph_load_context(chan);
pgraph->fifo_access(dev, true);
diff --git a/drivers/gpu/drm/nouveau/nv17_tv.c b/drivers/gpu/drm/nouveau/nv17_tv.c
index 13cdc05b7c2d..28119fd19d03 100644
--- a/drivers/gpu/drm/nouveau/nv17_tv.c
+++ b/drivers/gpu/drm/nouveau/nv17_tv.c
@@ -193,55 +193,56 @@ nv17_tv_detect(struct drm_encoder *encoder, struct drm_connector *connector)
}
}
-static const struct {
- int hdisplay;
- int vdisplay;
-} modes[] = {
- { 640, 400 },
- { 640, 480 },
- { 720, 480 },
- { 720, 576 },
- { 800, 600 },
- { 1024, 768 },
- { 1280, 720 },
- { 1280, 1024 },
- { 1920, 1080 }
-};
-
-static int nv17_tv_get_modes(struct drm_encoder *encoder,
- struct drm_connector *connector)
+static int nv17_tv_get_ld_modes(struct drm_encoder *encoder,
+ struct drm_connector *connector)
{
struct nv17_tv_norm_params *tv_norm = get_tv_norm(encoder);
- struct drm_display_mode *mode;
- struct drm_display_mode *output_mode;
+ struct drm_display_mode *mode, *tv_mode;
int n = 0;
- int i;
-
- if (tv_norm->kind != CTV_ENC_MODE) {
- struct drm_display_mode *tv_mode;
- for (tv_mode = nv17_tv_modes; tv_mode->hdisplay; tv_mode++) {
- mode = drm_mode_duplicate(encoder->dev, tv_mode);
+ for (tv_mode = nv17_tv_modes; tv_mode->hdisplay; tv_mode++) {
+ mode = drm_mode_duplicate(encoder->dev, tv_mode);
- mode->clock = tv_norm->tv_enc_mode.vrefresh *
- mode->htotal / 1000 *
- mode->vtotal / 1000;
+ mode->clock = tv_norm->tv_enc_mode.vrefresh *
+ mode->htotal / 1000 *
+ mode->vtotal / 1000;
- if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
- mode->clock *= 2;
+ if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
+ mode->clock *= 2;
- if (mode->hdisplay == tv_norm->tv_enc_mode.hdisplay &&
- mode->vdisplay == tv_norm->tv_enc_mode.vdisplay)
- mode->type |= DRM_MODE_TYPE_PREFERRED;
+ if (mode->hdisplay == tv_norm->tv_enc_mode.hdisplay &&
+ mode->vdisplay == tv_norm->tv_enc_mode.vdisplay)
+ mode->type |= DRM_MODE_TYPE_PREFERRED;
- drm_mode_probed_add(connector, mode);
- n++;
- }
- return n;
+ drm_mode_probed_add(connector, mode);
+ n++;
}
- /* tv_norm->kind == CTV_ENC_MODE */
- output_mode = &tv_norm->ctv_enc_mode.mode;
+ return n;
+}
+
+static int nv17_tv_get_hd_modes(struct drm_encoder *encoder,
+ struct drm_connector *connector)
+{
+ struct nv17_tv_norm_params *tv_norm = get_tv_norm(encoder);
+ struct drm_display_mode *output_mode = &tv_norm->ctv_enc_mode.mode;
+ struct drm_display_mode *mode;
+ const struct {
+ int hdisplay;
+ int vdisplay;
+ } modes[] = {
+ { 640, 400 },
+ { 640, 480 },
+ { 720, 480 },
+ { 720, 576 },
+ { 800, 600 },
+ { 1024, 768 },
+ { 1280, 720 },
+ { 1280, 1024 },
+ { 1920, 1080 }
+ };
+ int i, n = 0;
+
for (i = 0; i < ARRAY_SIZE(modes); i++) {
if (modes[i].hdisplay > output_mode->hdisplay ||
modes[i].vdisplay > output_mode->vdisplay)
@@ -251,11 +252,12 @@ static int nv17_tv_get_modes(struct drm_encoder *encoder,
modes[i].vdisplay == output_mode->vdisplay) {
mode = drm_mode_duplicate(encoder->dev, output_mode);
mode->type |= DRM_MODE_TYPE_PREFERRED;
+
} else {
mode = drm_cvt_mode(encoder->dev, modes[i].hdisplay,
- modes[i].vdisplay, 60, false,
- output_mode->flags & DRM_MODE_FLAG_INTERLACE,
- false);
+ modes[i].vdisplay, 60, false,
+ (output_mode->flags &
+ DRM_MODE_FLAG_INTERLACE), false);
}
/* CVT modes are sometimes unsuitable... */
@@ -266,6 +268,7 @@ static int nv17_tv_get_modes(struct drm_encoder *encoder,
- mode->hdisplay) * 9 / 10) & ~7;
mode->hsync_end = mode->hsync_start + 8;
}
+
if (output_mode->vdisplay >= 1024) {
mode->vtotal = output_mode->vtotal;
mode->vsync_start = output_mode->vsync_start;
@@ -276,9 +279,21 @@ static int nv17_tv_get_modes(struct drm_encoder *encoder,
drm_mode_probed_add(connector, mode);
n++;
}
+
return n;
}
+static int nv17_tv_get_modes(struct drm_encoder *encoder,
+ struct drm_connector *connector)
+{
+ struct nv17_tv_norm_params *tv_norm = get_tv_norm(encoder);
+
+ if (tv_norm->kind == CTV_ENC_MODE)
+ return nv17_tv_get_hd_modes(encoder, connector);
+ else
+ return nv17_tv_get_ld_modes(encoder, connector);
+}
+
static int nv17_tv_mode_valid(struct drm_encoder *encoder,
struct drm_display_mode *mode)
{
@@ -408,15 +423,8 @@ static void nv17_tv_prepare(struct drm_encoder *encoder)
}
- /* Some NV4x have unknown values (0x3f, 0x50, 0x54, 0x6b, 0x79, 0x7f)
- * at LCD__INDEX which we don't alter
- */
- if (!(*cr_lcd & 0x44)) {
- if (tv_norm->kind == CTV_ENC_MODE)
- *cr_lcd = 0x1 | (head ? 0x0 : 0x8);
- else
- *cr_lcd = 0;
- }
+ if (tv_norm->kind == CTV_ENC_MODE)
+ *cr_lcd |= 0x1 | (head ? 0x0 : 0x8);
/* Set the DACCLK register */
dacclk = (NVReadRAMDAC(dev, 0, dacclk_off) & ~0x30) | 0x1;
diff --git a/drivers/gpu/drm/nouveau/nv17_tv.h b/drivers/gpu/drm/nouveau/nv17_tv.h
index c00977cedabd..6bf03840f9eb 100644
--- a/drivers/gpu/drm/nouveau/nv17_tv.h
+++ b/drivers/gpu/drm/nouveau/nv17_tv.h
@@ -127,7 +127,8 @@ void nv17_ctv_update_rescaler(struct drm_encoder *encoder);
/* TV hardware access functions */
-static inline void nv_write_ptv(struct drm_device *dev, uint32_t reg, uint32_t val)
+static inline void nv_write_ptv(struct drm_device *dev, uint32_t reg,
+ uint32_t val)
{
nv_wr32(dev, reg, val);
}
@@ -137,7 +138,8 @@ static inline uint32_t nv_read_ptv(struct drm_device *dev, uint32_t reg)
return nv_rd32(dev, reg);
}
-static inline void nv_write_tv_enc(struct drm_device *dev, uint8_t reg, uint8_t val)
+static inline void nv_write_tv_enc(struct drm_device *dev, uint8_t reg,
+ uint8_t val)
{
nv_write_ptv(dev, NV_PTV_TV_INDEX, reg);
nv_write_ptv(dev, NV_PTV_TV_DATA, val);
@@ -149,8 +151,11 @@ static inline uint8_t nv_read_tv_enc(struct drm_device *dev, uint8_t reg)
return nv_read_ptv(dev, NV_PTV_TV_DATA);
}
-#define nv_load_ptv(dev, state, reg) nv_write_ptv(dev, NV_PTV_OFFSET + 0x##reg, state->ptv_##reg)
-#define nv_save_ptv(dev, state, reg) state->ptv_##reg = nv_read_ptv(dev, NV_PTV_OFFSET + 0x##reg)
-#define nv_load_tv_enc(dev, state, reg) nv_write_tv_enc(dev, 0x##reg, state->tv_enc[0x##reg])
+#define nv_load_ptv(dev, state, reg) \
+ nv_write_ptv(dev, NV_PTV_OFFSET + 0x##reg, state->ptv_##reg)
+#define nv_save_ptv(dev, state, reg) \
+ state->ptv_##reg = nv_read_ptv(dev, NV_PTV_OFFSET + 0x##reg)
+#define nv_load_tv_enc(dev, state, reg) \
+ nv_write_tv_enc(dev, 0x##reg, state->tv_enc[0x##reg])
#endif
diff --git a/drivers/gpu/drm/nouveau/nv17_tv_modes.c b/drivers/gpu/drm/nouveau/nv17_tv_modes.c
index d64683d97e0d..9d3893c50a41 100644
--- a/drivers/gpu/drm/nouveau/nv17_tv_modes.c
+++ b/drivers/gpu/drm/nouveau/nv17_tv_modes.c
@@ -336,12 +336,17 @@ static void tv_setup_filter(struct drm_encoder *encoder)
struct filter_params *p = &fparams[k][j];
for (i = 0; i < 7; i++) {
- int64_t c = (p->k1 + p->ki*i + p->ki2*i*i + p->ki3*i*i*i)
- + (p->kr + p->kir*i + p->ki2r*i*i + p->ki3r*i*i*i)*rs[k]
- + (p->kf + p->kif*i + p->ki2f*i*i + p->ki3f*i*i*i)*flicker
- + (p->krf + p->kirf*i + p->ki2rf*i*i + p->ki3rf*i*i*i)*flicker*rs[k];
-
- (*filters[k])[j][i] = (c + id5/2) >> 39 & (0x1 << 31 | 0x7f << 9);
+ int64_t c = (p->k1 + p->ki*i + p->ki2*i*i +
+ p->ki3*i*i*i)
+ + (p->kr + p->kir*i + p->ki2r*i*i +
+ p->ki3r*i*i*i) * rs[k]
+ + (p->kf + p->kif*i + p->ki2f*i*i +
+ p->ki3f*i*i*i) * flicker
+ + (p->krf + p->kirf*i + p->ki2rf*i*i +
+ p->ki3rf*i*i*i) * flicker * rs[k];
+
+ (*filters[k])[j][i] = (c + id5/2) >> 39
+ & (0x1 << 31 | 0x7f << 9);
}
}
}
@@ -349,7 +354,8 @@ static void tv_setup_filter(struct drm_encoder *encoder)
/* Hardware state saving/restoring */
-static void tv_save_filter(struct drm_device *dev, uint32_t base, uint32_t regs[4][7])
+static void tv_save_filter(struct drm_device *dev, uint32_t base,
+ uint32_t regs[4][7])
{
int i, j;
uint32_t offsets[] = { base, base + 0x1c, base + 0x40, base + 0x5c };
@@ -360,7 +366,8 @@ static void tv_save_filter(struct drm_device *dev, uint32_t base, uint32_t regs[
}
}
-static void tv_load_filter(struct drm_device *dev, uint32_t base, uint32_t regs[4][7])
+static void tv_load_filter(struct drm_device *dev, uint32_t base,
+ uint32_t regs[4][7])
{
int i, j;
uint32_t offsets[] = { base, base + 0x1c, base + 0x40, base + 0x5c };
@@ -504,10 +511,10 @@ void nv17_tv_update_properties(struct drm_encoder *encoder)
break;
}
- regs->tv_enc[0x20] = interpolate(0, tv_norm->tv_enc_mode.tv_enc[0x20], 255,
- tv_enc->saturation);
- regs->tv_enc[0x22] = interpolate(0, tv_norm->tv_enc_mode.tv_enc[0x22], 255,
- tv_enc->saturation);
+ regs->tv_enc[0x20] = interpolate(0, tv_norm->tv_enc_mode.tv_enc[0x20],
+ 255, tv_enc->saturation);
+ regs->tv_enc[0x22] = interpolate(0, tv_norm->tv_enc_mode.tv_enc[0x22],
+ 255, tv_enc->saturation);
regs->tv_enc[0x25] = tv_enc->hue * 255 / 100;
nv_load_ptv(dev, regs, 204);
@@ -541,7 +548,8 @@ void nv17_ctv_update_rescaler(struct drm_encoder *encoder)
int head = nouveau_crtc(encoder->crtc)->index;
struct nv04_crtc_reg *regs = &dev_priv->mode_reg.crtc_reg[head];
struct drm_display_mode *crtc_mode = &encoder->crtc->mode;
- struct drm_display_mode *output_mode = &get_tv_norm(encoder)->ctv_enc_mode.mode;
+ struct drm_display_mode *output_mode =
+ &get_tv_norm(encoder)->ctv_enc_mode.mode;
int overscan, hmargin, vmargin, hratio, vratio;
/* The rescaler doesn't do the right thing for interlaced modes. */
@@ -553,13 +561,15 @@ void nv17_ctv_update_rescaler(struct drm_encoder *encoder)
hmargin = (output_mode->hdisplay - crtc_mode->hdisplay) / 2;
vmargin = (output_mode->vdisplay - crtc_mode->vdisplay) / 2;
- hmargin = interpolate(0, min(hmargin, output_mode->hdisplay/20), hmargin,
- overscan);
- vmargin = interpolate(0, min(vmargin, output_mode->vdisplay/20), vmargin,
- overscan);
+ hmargin = interpolate(0, min(hmargin, output_mode->hdisplay/20),
+ hmargin, overscan);
+ vmargin = interpolate(0, min(vmargin, output_mode->vdisplay/20),
+ vmargin, overscan);
- hratio = crtc_mode->hdisplay * 0x800 / (output_mode->hdisplay - 2*hmargin);
- vratio = crtc_mode->vdisplay * 0x800 / (output_mode->vdisplay - 2*vmargin) & ~3;
+ hratio = crtc_mode->hdisplay * 0x800 /
+ (output_mode->hdisplay - 2*hmargin);
+ vratio = crtc_mode->vdisplay * 0x800 /
+ (output_mode->vdisplay - 2*vmargin) & ~3;
regs->fp_horiz_regs[FP_VALID_START] = hmargin;
regs->fp_horiz_regs[FP_VALID_END] = output_mode->hdisplay - hmargin - 1;
diff --git a/drivers/gpu/drm/nouveau/nv20_graph.c b/drivers/gpu/drm/nouveau/nv20_graph.c
index 17f309b36c91..12ab9cd56eca 100644
--- a/drivers/gpu/drm/nouveau/nv20_graph.c
+++ b/drivers/gpu/drm/nouveau/nv20_graph.c
@@ -37,49 +37,49 @@ nv20_graph_context_init(struct drm_device *dev, struct nouveau_gpuobj *ctx)
{
int i;
- nv_wo32(dev, ctx, 0x033c/4, 0xffff0000);
- nv_wo32(dev, ctx, 0x03a0/4, 0x0fff0000);
- nv_wo32(dev, ctx, 0x03a4/4, 0x0fff0000);
- nv_wo32(dev, ctx, 0x047c/4, 0x00000101);
- nv_wo32(dev, ctx, 0x0490/4, 0x00000111);
- nv_wo32(dev, ctx, 0x04a8/4, 0x44400000);
+ nv_wo32(ctx, 0x033c, 0xffff0000);
+ nv_wo32(ctx, 0x03a0, 0x0fff0000);
+ nv_wo32(ctx, 0x03a4, 0x0fff0000);
+ nv_wo32(ctx, 0x047c, 0x00000101);
+ nv_wo32(ctx, 0x0490, 0x00000111);
+ nv_wo32(ctx, 0x04a8, 0x44400000);
for (i = 0x04d4; i <= 0x04e0; i += 4)
- nv_wo32(dev, ctx, i/4, 0x00030303);
+ nv_wo32(ctx, i, 0x00030303);
for (i = 0x04f4; i <= 0x0500; i += 4)
- nv_wo32(dev, ctx, i/4, 0x00080000);
+ nv_wo32(ctx, i, 0x00080000);
for (i = 0x050c; i <= 0x0518; i += 4)
- nv_wo32(dev, ctx, i/4, 0x01012000);
+ nv_wo32(ctx, i, 0x01012000);
for (i = 0x051c; i <= 0x0528; i += 4)
- nv_wo32(dev, ctx, i/4, 0x000105b8);
+ nv_wo32(ctx, i, 0x000105b8);
for (i = 0x052c; i <= 0x0538; i += 4)
- nv_wo32(dev, ctx, i/4, 0x00080008);
+ nv_wo32(ctx, i, 0x00080008);
for (i = 0x055c; i <= 0x0598; i += 4)
- nv_wo32(dev, ctx, i/4, 0x07ff0000);
- nv_wo32(dev, ctx, 0x05a4/4, 0x4b7fffff);
- nv_wo32(dev, ctx, 0x05fc/4, 0x00000001);
- nv_wo32(dev, ctx, 0x0604/4, 0x00004000);
- nv_wo32(dev, ctx, 0x0610/4, 0x00000001);
- nv_wo32(dev, ctx, 0x0618/4, 0x00040000);
- nv_wo32(dev, ctx, 0x061c/4, 0x00010000);
+ nv_wo32(ctx, i, 0x07ff0000);
+ nv_wo32(ctx, 0x05a4, 0x4b7fffff);
+ nv_wo32(ctx, 0x05fc, 0x00000001);
+ nv_wo32(ctx, 0x0604, 0x00004000);
+ nv_wo32(ctx, 0x0610, 0x00000001);
+ nv_wo32(ctx, 0x0618, 0x00040000);
+ nv_wo32(ctx, 0x061c, 0x00010000);
for (i = 0x1c1c; i <= 0x248c; i += 16) {
- nv_wo32(dev, ctx, (i + 0)/4, 0x10700ff9);
- nv_wo32(dev, ctx, (i + 4)/4, 0x0436086c);
- nv_wo32(dev, ctx, (i + 8)/4, 0x000c001b);
+ nv_wo32(ctx, (i + 0), 0x10700ff9);
+ nv_wo32(ctx, (i + 4), 0x0436086c);
+ nv_wo32(ctx, (i + 8), 0x000c001b);
}
- nv_wo32(dev, ctx, 0x281c/4, 0x3f800000);
- nv_wo32(dev, ctx, 0x2830/4, 0x3f800000);
- nv_wo32(dev, ctx, 0x285c/4, 0x40000000);
- nv_wo32(dev, ctx, 0x2860/4, 0x3f800000);
- nv_wo32(dev, ctx, 0x2864/4, 0x3f000000);
- nv_wo32(dev, ctx, 0x286c/4, 0x40000000);
- nv_wo32(dev, ctx, 0x2870/4, 0x3f800000);
- nv_wo32(dev, ctx, 0x2878/4, 0xbf800000);
- nv_wo32(dev, ctx, 0x2880/4, 0xbf800000);
- nv_wo32(dev, ctx, 0x34a4/4, 0x000fe000);
- nv_wo32(dev, ctx, 0x3530/4, 0x000003f8);
- nv_wo32(dev, ctx, 0x3540/4, 0x002fe000);
+ nv_wo32(ctx, 0x281c, 0x3f800000);
+ nv_wo32(ctx, 0x2830, 0x3f800000);
+ nv_wo32(ctx, 0x285c, 0x40000000);
+ nv_wo32(ctx, 0x2860, 0x3f800000);
+ nv_wo32(ctx, 0x2864, 0x3f000000);
+ nv_wo32(ctx, 0x286c, 0x40000000);
+ nv_wo32(ctx, 0x2870, 0x3f800000);
+ nv_wo32(ctx, 0x2878, 0xbf800000);
+ nv_wo32(ctx, 0x2880, 0xbf800000);
+ nv_wo32(ctx, 0x34a4, 0x000fe000);
+ nv_wo32(ctx, 0x3530, 0x000003f8);
+ nv_wo32(ctx, 0x3540, 0x002fe000);
for (i = 0x355c; i <= 0x3578; i += 4)
- nv_wo32(dev, ctx, i/4, 0x001c527c);
+ nv_wo32(ctx, i, 0x001c527c);
}
static void
@@ -87,58 +87,58 @@ nv25_graph_context_init(struct drm_device *dev, struct nouveau_gpuobj *ctx)
{
int i;
- nv_wo32(dev, ctx, 0x035c/4, 0xffff0000);
- nv_wo32(dev, ctx, 0x03c0/4, 0x0fff0000);
- nv_wo32(dev, ctx, 0x03c4/4, 0x0fff0000);
- nv_wo32(dev, ctx, 0x049c/4, 0x00000101);
- nv_wo32(dev, ctx, 0x04b0/4, 0x00000111);
- nv_wo32(dev, ctx, 0x04c8/4, 0x00000080);
- nv_wo32(dev, ctx, 0x04cc/4, 0xffff0000);
- nv_wo32(dev, ctx, 0x04d0/4, 0x00000001);
- nv_wo32(dev, ctx, 0x04e4/4, 0x44400000);
- nv_wo32(dev, ctx, 0x04fc/4, 0x4b800000);
+ nv_wo32(ctx, 0x035c, 0xffff0000);
+ nv_wo32(ctx, 0x03c0, 0x0fff0000);
+ nv_wo32(ctx, 0x03c4, 0x0fff0000);
+ nv_wo32(ctx, 0x049c, 0x00000101);
+ nv_wo32(ctx, 0x04b0, 0x00000111);
+ nv_wo32(ctx, 0x04c8, 0x00000080);
+ nv_wo32(ctx, 0x04cc, 0xffff0000);
+ nv_wo32(ctx, 0x04d0, 0x00000001);
+ nv_wo32(ctx, 0x04e4, 0x44400000);
+ nv_wo32(ctx, 0x04fc, 0x4b800000);
for (i = 0x0510; i <= 0x051c; i += 4)
- nv_wo32(dev, ctx, i/4, 0x00030303);
+ nv_wo32(ctx, i, 0x00030303);
for (i = 0x0530; i <= 0x053c; i += 4)
- nv_wo32(dev, ctx, i/4, 0x00080000);
+ nv_wo32(ctx, i, 0x00080000);
for (i = 0x0548; i <= 0x0554; i += 4)
- nv_wo32(dev, ctx, i/4, 0x01012000);
+ nv_wo32(ctx, i, 0x01012000);
for (i = 0x0558; i <= 0x0564; i += 4)
- nv_wo32(dev, ctx, i/4, 0x000105b8);
+ nv_wo32(ctx, i, 0x000105b8);
for (i = 0x0568; i <= 0x0574; i += 4)
- nv_wo32(dev, ctx, i/4, 0x00080008);
+ nv_wo32(ctx, i, 0x00080008);
for (i = 0x0598; i <= 0x05d4; i += 4)
- nv_wo32(dev, ctx, i/4, 0x07ff0000);
- nv_wo32(dev, ctx, 0x05e0/4, 0x4b7fffff);
- nv_wo32(dev, ctx, 0x0620/4, 0x00000080);
- nv_wo32(dev, ctx, 0x0624/4, 0x30201000);
- nv_wo32(dev, ctx, 0x0628/4, 0x70605040);
- nv_wo32(dev, ctx, 0x062c/4, 0xb0a09080);
- nv_wo32(dev, ctx, 0x0630/4, 0xf0e0d0c0);
- nv_wo32(dev, ctx, 0x0664/4, 0x00000001);
- nv_wo32(dev, ctx, 0x066c/4, 0x00004000);
- nv_wo32(dev, ctx, 0x0678/4, 0x00000001);
- nv_wo32(dev, ctx, 0x0680/4, 0x00040000);
- nv_wo32(dev, ctx, 0x0684/4, 0x00010000);
+ nv_wo32(ctx, i, 0x07ff0000);
+ nv_wo32(ctx, 0x05e0, 0x4b7fffff);
+ nv_wo32(ctx, 0x0620, 0x00000080);
+ nv_wo32(ctx, 0x0624, 0x30201000);
+ nv_wo32(ctx, 0x0628, 0x70605040);
+ nv_wo32(ctx, 0x062c, 0xb0a09080);
+ nv_wo32(ctx, 0x0630, 0xf0e0d0c0);
+ nv_wo32(ctx, 0x0664, 0x00000001);
+ nv_wo32(ctx, 0x066c, 0x00004000);
+ nv_wo32(ctx, 0x0678, 0x00000001);
+ nv_wo32(ctx, 0x0680, 0x00040000);
+ nv_wo32(ctx, 0x0684, 0x00010000);
for (i = 0x1b04; i <= 0x2374; i += 16) {
- nv_wo32(dev, ctx, (i + 0)/4, 0x10700ff9);
- nv_wo32(dev, ctx, (i + 4)/4, 0x0436086c);
- nv_wo32(dev, ctx, (i + 8)/4, 0x000c001b);
+ nv_wo32(ctx, (i + 0), 0x10700ff9);
+ nv_wo32(ctx, (i + 4), 0x0436086c);
+ nv_wo32(ctx, (i + 8), 0x000c001b);
}
- nv_wo32(dev, ctx, 0x2704/4, 0x3f800000);
- nv_wo32(dev, ctx, 0x2718/4, 0x3f800000);
- nv_wo32(dev, ctx, 0x2744/4, 0x40000000);
- nv_wo32(dev, ctx, 0x2748/4, 0x3f800000);
- nv_wo32(dev, ctx, 0x274c/4, 0x3f000000);
- nv_wo32(dev, ctx, 0x2754/4, 0x40000000);
- nv_wo32(dev, ctx, 0x2758/4, 0x3f800000);
- nv_wo32(dev, ctx, 0x2760/4, 0xbf800000);
- nv_wo32(dev, ctx, 0x2768/4, 0xbf800000);
- nv_wo32(dev, ctx, 0x308c/4, 0x000fe000);
- nv_wo32(dev, ctx, 0x3108/4, 0x000003f8);
- nv_wo32(dev, ctx, 0x3468/4, 0x002fe000);
+ nv_wo32(ctx, 0x2704, 0x3f800000);
+ nv_wo32(ctx, 0x2718, 0x3f800000);
+ nv_wo32(ctx, 0x2744, 0x40000000);
+ nv_wo32(ctx, 0x2748, 0x3f800000);
+ nv_wo32(ctx, 0x274c, 0x3f000000);
+ nv_wo32(ctx, 0x2754, 0x40000000);
+ nv_wo32(ctx, 0x2758, 0x3f800000);
+ nv_wo32(ctx, 0x2760, 0xbf800000);
+ nv_wo32(ctx, 0x2768, 0xbf800000);
+ nv_wo32(ctx, 0x308c, 0x000fe000);
+ nv_wo32(ctx, 0x3108, 0x000003f8);
+ nv_wo32(ctx, 0x3468, 0x002fe000);
for (i = 0x3484; i <= 0x34a0; i += 4)
- nv_wo32(dev, ctx, i/4, 0x001c527c);
+ nv_wo32(ctx, i, 0x001c527c);
}
static void
@@ -146,49 +146,49 @@ nv2a_graph_context_init(struct drm_device *dev, struct nouveau_gpuobj *ctx)
{
int i;
- nv_wo32(dev, ctx, 0x033c/4, 0xffff0000);
- nv_wo32(dev, ctx, 0x03a0/4, 0x0fff0000);
- nv_wo32(dev, ctx, 0x03a4/4, 0x0fff0000);
- nv_wo32(dev, ctx, 0x047c/4, 0x00000101);
- nv_wo32(dev, ctx, 0x0490/4, 0x00000111);
- nv_wo32(dev, ctx, 0x04a8/4, 0x44400000);
+ nv_wo32(ctx, 0x033c, 0xffff0000);
+ nv_wo32(ctx, 0x03a0, 0x0fff0000);
+ nv_wo32(ctx, 0x03a4, 0x0fff0000);
+ nv_wo32(ctx, 0x047c, 0x00000101);
+ nv_wo32(ctx, 0x0490, 0x00000111);
+ nv_wo32(ctx, 0x04a8, 0x44400000);
for (i = 0x04d4; i <= 0x04e0; i += 4)
- nv_wo32(dev, ctx, i/4, 0x00030303);
+ nv_wo32(ctx, i, 0x00030303);
for (i = 0x04f4; i <= 0x0500; i += 4)
- nv_wo32(dev, ctx, i/4, 0x00080000);
+ nv_wo32(ctx, i, 0x00080000);
for (i = 0x050c; i <= 0x0518; i += 4)
- nv_wo32(dev, ctx, i/4, 0x01012000);
+ nv_wo32(ctx, i, 0x01012000);
for (i = 0x051c; i <= 0x0528; i += 4)
- nv_wo32(dev, ctx, i/4, 0x000105b8);
+ nv_wo32(ctx, i, 0x000105b8);
for (i = 0x052c; i <= 0x0538; i += 4)
- nv_wo32(dev, ctx, i/4, 0x00080008);
+ nv_wo32(ctx, i, 0x00080008);
for (i = 0x055c; i <= 0x0598; i += 4)
- nv_wo32(dev, ctx, i/4, 0x07ff0000);
- nv_wo32(dev, ctx, 0x05a4/4, 0x4b7fffff);
- nv_wo32(dev, ctx, 0x05fc/4, 0x00000001);
- nv_wo32(dev, ctx, 0x0604/4, 0x00004000);
- nv_wo32(dev, ctx, 0x0610/4, 0x00000001);
- nv_wo32(dev, ctx, 0x0618/4, 0x00040000);
- nv_wo32(dev, ctx, 0x061c/4, 0x00010000);
+ nv_wo32(ctx, i, 0x07ff0000);
+ nv_wo32(ctx, 0x05a4, 0x4b7fffff);
+ nv_wo32(ctx, 0x05fc, 0x00000001);
+ nv_wo32(ctx, 0x0604, 0x00004000);
+ nv_wo32(ctx, 0x0610, 0x00000001);
+ nv_wo32(ctx, 0x0618, 0x00040000);
+ nv_wo32(ctx, 0x061c, 0x00010000);
for (i = 0x1a9c; i <= 0x22fc; i += 16) { /*XXX: check!! */
- nv_wo32(dev, ctx, (i + 0)/4, 0x10700ff9);
- nv_wo32(dev, ctx, (i + 4)/4, 0x0436086c);
- nv_wo32(dev, ctx, (i + 8)/4, 0x000c001b);
+ nv_wo32(ctx, (i + 0), 0x10700ff9);
+ nv_wo32(ctx, (i + 4), 0x0436086c);
+ nv_wo32(ctx, (i + 8), 0x000c001b);
}
- nv_wo32(dev, ctx, 0x269c/4, 0x3f800000);
- nv_wo32(dev, ctx, 0x26b0/4, 0x3f800000);
- nv_wo32(dev, ctx, 0x26dc/4, 0x40000000);
- nv_wo32(dev, ctx, 0x26e0/4, 0x3f800000);
- nv_wo32(dev, ctx, 0x26e4/4, 0x3f000000);
- nv_wo32(dev, ctx, 0x26ec/4, 0x40000000);
- nv_wo32(dev, ctx, 0x26f0/4, 0x3f800000);
- nv_wo32(dev, ctx, 0x26f8/4, 0xbf800000);
- nv_wo32(dev, ctx, 0x2700/4, 0xbf800000);
- nv_wo32(dev, ctx, 0x3024/4, 0x000fe000);
- nv_wo32(dev, ctx, 0x30a0/4, 0x000003f8);
- nv_wo32(dev, ctx, 0x33fc/4, 0x002fe000);
+ nv_wo32(ctx, 0x269c, 0x3f800000);
+ nv_wo32(ctx, 0x26b0, 0x3f800000);
+ nv_wo32(ctx, 0x26dc, 0x40000000);
+ nv_wo32(ctx, 0x26e0, 0x3f800000);
+ nv_wo32(ctx, 0x26e4, 0x3f000000);
+ nv_wo32(ctx, 0x26ec, 0x40000000);
+ nv_wo32(ctx, 0x26f0, 0x3f800000);
+ nv_wo32(ctx, 0x26f8, 0xbf800000);
+ nv_wo32(ctx, 0x2700, 0xbf800000);
+ nv_wo32(ctx, 0x3024, 0x000fe000);
+ nv_wo32(ctx, 0x30a0, 0x000003f8);
+ nv_wo32(ctx, 0x33fc, 0x002fe000);
for (i = 0x341c; i <= 0x3438; i += 4)
- nv_wo32(dev, ctx, i/4, 0x001c527c);
+ nv_wo32(ctx, i, 0x001c527c);
}
static void
@@ -196,57 +196,57 @@ nv30_31_graph_context_init(struct drm_device *dev, struct nouveau_gpuobj *ctx)
{
int i;
- nv_wo32(dev, ctx, 0x0410/4, 0x00000101);
- nv_wo32(dev, ctx, 0x0424/4, 0x00000111);
- nv_wo32(dev, ctx, 0x0428/4, 0x00000060);
- nv_wo32(dev, ctx, 0x0444/4, 0x00000080);
- nv_wo32(dev, ctx, 0x0448/4, 0xffff0000);
- nv_wo32(dev, ctx, 0x044c/4, 0x00000001);
- nv_wo32(dev, ctx, 0x0460/4, 0x44400000);
- nv_wo32(dev, ctx, 0x048c/4, 0xffff0000);
+ nv_wo32(ctx, 0x0410, 0x00000101);
+ nv_wo32(ctx, 0x0424, 0x00000111);
+ nv_wo32(ctx, 0x0428, 0x00000060);
+ nv_wo32(ctx, 0x0444, 0x00000080);
+ nv_wo32(ctx, 0x0448, 0xffff0000);
+ nv_wo32(ctx, 0x044c, 0x00000001);
+ nv_wo32(ctx, 0x0460, 0x44400000);
+ nv_wo32(ctx, 0x048c, 0xffff0000);
for (i = 0x04e0; i < 0x04e8; i += 4)
- nv_wo32(dev, ctx, i/4, 0x0fff0000);
- nv_wo32(dev, ctx, 0x04ec/4, 0x00011100);
+ nv_wo32(ctx, i, 0x0fff0000);
+ nv_wo32(ctx, 0x04ec, 0x00011100);
for (i = 0x0508; i < 0x0548; i += 4)
- nv_wo32(dev, ctx, i/4, 0x07ff0000);
- nv_wo32(dev, ctx, 0x0550/4, 0x4b7fffff);
- nv_wo32(dev, ctx, 0x058c/4, 0x00000080);
- nv_wo32(dev, ctx, 0x0590/4, 0x30201000);
- nv_wo32(dev, ctx, 0x0594/4, 0x70605040);
- nv_wo32(dev, ctx, 0x0598/4, 0xb8a89888);
- nv_wo32(dev, ctx, 0x059c/4, 0xf8e8d8c8);
- nv_wo32(dev, ctx, 0x05b0/4, 0xb0000000);
+ nv_wo32(ctx, i, 0x07ff0000);
+ nv_wo32(ctx, 0x0550, 0x4b7fffff);
+ nv_wo32(ctx, 0x058c, 0x00000080);
+ nv_wo32(ctx, 0x0590, 0x30201000);
+ nv_wo32(ctx, 0x0594, 0x70605040);
+ nv_wo32(ctx, 0x0598, 0xb8a89888);
+ nv_wo32(ctx, 0x059c, 0xf8e8d8c8);
+ nv_wo32(ctx, 0x05b0, 0xb0000000);
for (i = 0x0600; i < 0x0640; i += 4)
- nv_wo32(dev, ctx, i/4, 0x00010588);
+ nv_wo32(ctx, i, 0x00010588);
for (i = 0x0640; i < 0x0680; i += 4)
- nv_wo32(dev, ctx, i/4, 0x00030303);
+ nv_wo32(ctx, i, 0x00030303);
for (i = 0x06c0; i < 0x0700; i += 4)
- nv_wo32(dev, ctx, i/4, 0x0008aae4);
+ nv_wo32(ctx, i, 0x0008aae4);
for (i = 0x0700; i < 0x0740; i += 4)
- nv_wo32(dev, ctx, i/4, 0x01012000);
+ nv_wo32(ctx, i, 0x01012000);
for (i = 0x0740; i < 0x0780; i += 4)
- nv_wo32(dev, ctx, i/4, 0x00080008);
- nv_wo32(dev, ctx, 0x085c/4, 0x00040000);
- nv_wo32(dev, ctx, 0x0860/4, 0x00010000);
+ nv_wo32(ctx, i, 0x00080008);
+ nv_wo32(ctx, 0x085c, 0x00040000);
+ nv_wo32(ctx, 0x0860, 0x00010000);
for (i = 0x0864; i < 0x0874; i += 4)
- nv_wo32(dev, ctx, i/4, 0x00040004);
+ nv_wo32(ctx, i, 0x00040004);
for (i = 0x1f18; i <= 0x3088 ; i += 16) {
- nv_wo32(dev, ctx, i/4 + 0, 0x10700ff9);
- nv_wo32(dev, ctx, i/4 + 1, 0x0436086c);
- nv_wo32(dev, ctx, i/4 + 2, 0x000c001b);
+ nv_wo32(ctx, i + 0, 0x10700ff9);
+ nv_wo32(ctx, i + 1, 0x0436086c);
+ nv_wo32(ctx, i + 2, 0x000c001b);
}
for (i = 0x30b8; i < 0x30c8; i += 4)
- nv_wo32(dev, ctx, i/4, 0x0000ffff);
- nv_wo32(dev, ctx, 0x344c/4, 0x3f800000);
- nv_wo32(dev, ctx, 0x3808/4, 0x3f800000);
- nv_wo32(dev, ctx, 0x381c/4, 0x3f800000);
- nv_wo32(dev, ctx, 0x3848/4, 0x40000000);
- nv_wo32(dev, ctx, 0x384c/4, 0x3f800000);
- nv_wo32(dev, ctx, 0x3850/4, 0x3f000000);
- nv_wo32(dev, ctx, 0x3858/4, 0x40000000);
- nv_wo32(dev, ctx, 0x385c/4, 0x3f800000);
- nv_wo32(dev, ctx, 0x3864/4, 0xbf800000);
- nv_wo32(dev, ctx, 0x386c/4, 0xbf800000);
+ nv_wo32(ctx, i, 0x0000ffff);
+ nv_wo32(ctx, 0x344c, 0x3f800000);
+ nv_wo32(ctx, 0x3808, 0x3f800000);
+ nv_wo32(ctx, 0x381c, 0x3f800000);
+ nv_wo32(ctx, 0x3848, 0x40000000);
+ nv_wo32(ctx, 0x384c, 0x3f800000);
+ nv_wo32(ctx, 0x3850, 0x3f000000);
+ nv_wo32(ctx, 0x3858, 0x40000000);
+ nv_wo32(ctx, 0x385c, 0x3f800000);
+ nv_wo32(ctx, 0x3864, 0xbf800000);
+ nv_wo32(ctx, 0x386c, 0xbf800000);
}
static void
@@ -254,57 +254,57 @@ nv34_graph_context_init(struct drm_device *dev, struct nouveau_gpuobj *ctx)
{
int i;
- nv_wo32(dev, ctx, 0x040c/4, 0x01000101);
- nv_wo32(dev, ctx, 0x0420/4, 0x00000111);
- nv_wo32(dev, ctx, 0x0424/4, 0x00000060);
- nv_wo32(dev, ctx, 0x0440/4, 0x00000080);
- nv_wo32(dev, ctx, 0x0444/4, 0xffff0000);
- nv_wo32(dev, ctx, 0x0448/4, 0x00000001);
- nv_wo32(dev, ctx, 0x045c/4, 0x44400000);
- nv_wo32(dev, ctx, 0x0480/4, 0xffff0000);
+ nv_wo32(ctx, 0x040c, 0x01000101);
+ nv_wo32(ctx, 0x0420, 0x00000111);
+ nv_wo32(ctx, 0x0424, 0x00000060);
+ nv_wo32(ctx, 0x0440, 0x00000080);
+ nv_wo32(ctx, 0x0444, 0xffff0000);
+ nv_wo32(ctx, 0x0448, 0x00000001);
+ nv_wo32(ctx, 0x045c, 0x44400000);
+ nv_wo32(ctx, 0x0480, 0xffff0000);
for (i = 0x04d4; i < 0x04dc; i += 4)
- nv_wo32(dev, ctx, i/4, 0x0fff0000);
- nv_wo32(dev, ctx, 0x04e0/4, 0x00011100);
+ nv_wo32(ctx, i, 0x0fff0000);
+ nv_wo32(ctx, 0x04e0, 0x00011100);
for (i = 0x04fc; i < 0x053c; i += 4)
- nv_wo32(dev, ctx, i/4, 0x07ff0000);
- nv_wo32(dev, ctx, 0x0544/4, 0x4b7fffff);
- nv_wo32(dev, ctx, 0x057c/4, 0x00000080);
- nv_wo32(dev, ctx, 0x0580/4, 0x30201000);
- nv_wo32(dev, ctx, 0x0584/4, 0x70605040);
- nv_wo32(dev, ctx, 0x0588/4, 0xb8a89888);
- nv_wo32(dev, ctx, 0x058c/4, 0xf8e8d8c8);
- nv_wo32(dev, ctx, 0x05a0/4, 0xb0000000);
+ nv_wo32(ctx, i, 0x07ff0000);
+ nv_wo32(ctx, 0x0544, 0x4b7fffff);
+ nv_wo32(ctx, 0x057c, 0x00000080);
+ nv_wo32(ctx, 0x0580, 0x30201000);
+ nv_wo32(ctx, 0x0584, 0x70605040);
+ nv_wo32(ctx, 0x0588, 0xb8a89888);
+ nv_wo32(ctx, 0x058c, 0xf8e8d8c8);
+ nv_wo32(ctx, 0x05a0, 0xb0000000);
for (i = 0x05f0; i < 0x0630; i += 4)
- nv_wo32(dev, ctx, i/4, 0x00010588);
+ nv_wo32(ctx, i, 0x00010588);
for (i = 0x0630; i < 0x0670; i += 4)
- nv_wo32(dev, ctx, i/4, 0x00030303);
+ nv_wo32(ctx, i, 0x00030303);
for (i = 0x06b0; i < 0x06f0; i += 4)
- nv_wo32(dev, ctx, i/4, 0x0008aae4);
+ nv_wo32(ctx, i, 0x0008aae4);
for (i = 0x06f0; i < 0x0730; i += 4)
- nv_wo32(dev, ctx, i/4, 0x01012000);
+ nv_wo32(ctx, i, 0x01012000);
for (i = 0x0730; i < 0x0770; i += 4)
- nv_wo32(dev, ctx, i/4, 0x00080008);
- nv_wo32(dev, ctx, 0x0850/4, 0x00040000);
- nv_wo32(dev, ctx, 0x0854/4, 0x00010000);
+ nv_wo32(ctx, i, 0x00080008);
+ nv_wo32(ctx, 0x0850, 0x00040000);
+ nv_wo32(ctx, 0x0854, 0x00010000);
for (i = 0x0858; i < 0x0868; i += 4)
- nv_wo32(dev, ctx, i/4, 0x00040004);
+ nv_wo32(ctx, i, 0x00040004);
for (i = 0x15ac; i <= 0x271c ; i += 16) {
- nv_wo32(dev, ctx, i/4 + 0, 0x10700ff9);
- nv_wo32(dev, ctx, i/4 + 1, 0x0436086c);
- nv_wo32(dev, ctx, i/4 + 2, 0x000c001b);
+ nv_wo32(ctx, i + 0, 0x10700ff9);
+ nv_wo32(ctx, i + 1, 0x0436086c);
+ nv_wo32(ctx, i + 2, 0x000c001b);
}
for (i = 0x274c; i < 0x275c; i += 4)
- nv_wo32(dev, ctx, i/4, 0x0000ffff);
- nv_wo32(dev, ctx, 0x2ae0/4, 0x3f800000);
- nv_wo32(dev, ctx, 0x2e9c/4, 0x3f800000);
- nv_wo32(dev, ctx, 0x2eb0/4, 0x3f800000);
- nv_wo32(dev, ctx, 0x2edc/4, 0x40000000);
- nv_wo32(dev, ctx, 0x2ee0/4, 0x3f800000);
- nv_wo32(dev, ctx, 0x2ee4/4, 0x3f000000);
- nv_wo32(dev, ctx, 0x2eec/4, 0x40000000);
- nv_wo32(dev, ctx, 0x2ef0/4, 0x3f800000);
- nv_wo32(dev, ctx, 0x2ef8/4, 0xbf800000);
- nv_wo32(dev, ctx, 0x2f00/4, 0xbf800000);
+ nv_wo32(ctx, i, 0x0000ffff);
+ nv_wo32(ctx, 0x2ae0, 0x3f800000);
+ nv_wo32(ctx, 0x2e9c, 0x3f800000);
+ nv_wo32(ctx, 0x2eb0, 0x3f800000);
+ nv_wo32(ctx, 0x2edc, 0x40000000);
+ nv_wo32(ctx, 0x2ee0, 0x3f800000);
+ nv_wo32(ctx, 0x2ee4, 0x3f000000);
+ nv_wo32(ctx, 0x2eec, 0x40000000);
+ nv_wo32(ctx, 0x2ef0, 0x3f800000);
+ nv_wo32(ctx, 0x2ef8, 0xbf800000);
+ nv_wo32(ctx, 0x2f00, 0xbf800000);
}
static void
@@ -312,57 +312,57 @@ nv35_36_graph_context_init(struct drm_device *dev, struct nouveau_gpuobj *ctx)
{
int i;
- nv_wo32(dev, ctx, 0x040c/4, 0x00000101);
- nv_wo32(dev, ctx, 0x0420/4, 0x00000111);
- nv_wo32(dev, ctx, 0x0424/4, 0x00000060);
- nv_wo32(dev, ctx, 0x0440/4, 0x00000080);
- nv_wo32(dev, ctx, 0x0444/4, 0xffff0000);
- nv_wo32(dev, ctx, 0x0448/4, 0x00000001);
- nv_wo32(dev, ctx, 0x045c/4, 0x44400000);
- nv_wo32(dev, ctx, 0x0488/4, 0xffff0000);
+ nv_wo32(ctx, 0x040c, 0x00000101);
+ nv_wo32(ctx, 0x0420, 0x00000111);
+ nv_wo32(ctx, 0x0424, 0x00000060);
+ nv_wo32(ctx, 0x0440, 0x00000080);
+ nv_wo32(ctx, 0x0444, 0xffff0000);
+ nv_wo32(ctx, 0x0448, 0x00000001);
+ nv_wo32(ctx, 0x045c, 0x44400000);
+ nv_wo32(ctx, 0x0488, 0xffff0000);
for (i = 0x04dc; i < 0x04e4; i += 4)
- nv_wo32(dev, ctx, i/4, 0x0fff0000);
- nv_wo32(dev, ctx, 0x04e8/4, 0x00011100);
+ nv_wo32(ctx, i, 0x0fff0000);
+ nv_wo32(ctx, 0x04e8, 0x00011100);
for (i = 0x0504; i < 0x0544; i += 4)
- nv_wo32(dev, ctx, i/4, 0x07ff0000);
- nv_wo32(dev, ctx, 0x054c/4, 0x4b7fffff);
- nv_wo32(dev, ctx, 0x0588/4, 0x00000080);
- nv_wo32(dev, ctx, 0x058c/4, 0x30201000);
- nv_wo32(dev, ctx, 0x0590/4, 0x70605040);
- nv_wo32(dev, ctx, 0x0594/4, 0xb8a89888);
- nv_wo32(dev, ctx, 0x0598/4, 0xf8e8d8c8);
- nv_wo32(dev, ctx, 0x05ac/4, 0xb0000000);
+ nv_wo32(ctx, i, 0x07ff0000);
+ nv_wo32(ctx, 0x054c, 0x4b7fffff);
+ nv_wo32(ctx, 0x0588, 0x00000080);
+ nv_wo32(ctx, 0x058c, 0x30201000);
+ nv_wo32(ctx, 0x0590, 0x70605040);
+ nv_wo32(ctx, 0x0594, 0xb8a89888);
+ nv_wo32(ctx, 0x0598, 0xf8e8d8c8);
+ nv_wo32(ctx, 0x05ac, 0xb0000000);
for (i = 0x0604; i < 0x0644; i += 4)
- nv_wo32(dev, ctx, i/4, 0x00010588);
+ nv_wo32(ctx, i, 0x00010588);
for (i = 0x0644; i < 0x0684; i += 4)
- nv_wo32(dev, ctx, i/4, 0x00030303);
+ nv_wo32(ctx, i, 0x00030303);
for (i = 0x06c4; i < 0x0704; i += 4)
- nv_wo32(dev, ctx, i/4, 0x0008aae4);
+ nv_wo32(ctx, i, 0x0008aae4);
for (i = 0x0704; i < 0x0744; i += 4)
- nv_wo32(dev, ctx, i/4, 0x01012000);
+ nv_wo32(ctx, i, 0x01012000);
for (i = 0x0744; i < 0x0784; i += 4)
- nv_wo32(dev, ctx, i/4, 0x00080008);
- nv_wo32(dev, ctx, 0x0860/4, 0x00040000);
- nv_wo32(dev, ctx, 0x0864/4, 0x00010000);
+ nv_wo32(ctx, i, 0x00080008);
+ nv_wo32(ctx, 0x0860, 0x00040000);
+ nv_wo32(ctx, 0x0864, 0x00010000);
for (i = 0x0868; i < 0x0878; i += 4)
- nv_wo32(dev, ctx, i/4, 0x00040004);
+ nv_wo32(ctx, i, 0x00040004);
for (i = 0x1f1c; i <= 0x308c ; i += 16) {
- nv_wo32(dev, ctx, i/4 + 0, 0x10700ff9);
- nv_wo32(dev, ctx, i/4 + 1, 0x0436086c);
- nv_wo32(dev, ctx, i/4 + 2, 0x000c001b);
+ nv_wo32(ctx, i + 0, 0x10700ff9);
+ nv_wo32(ctx, i + 4, 0x0436086c);
+ nv_wo32(ctx, i + 8, 0x000c001b);
}
for (i = 0x30bc; i < 0x30cc; i += 4)
- nv_wo32(dev, ctx, i/4, 0x0000ffff);
- nv_wo32(dev, ctx, 0x3450/4, 0x3f800000);
- nv_wo32(dev, ctx, 0x380c/4, 0x3f800000);
- nv_wo32(dev, ctx, 0x3820/4, 0x3f800000);
- nv_wo32(dev, ctx, 0x384c/4, 0x40000000);
- nv_wo32(dev, ctx, 0x3850/4, 0x3f800000);
- nv_wo32(dev, ctx, 0x3854/4, 0x3f000000);
- nv_wo32(dev, ctx, 0x385c/4, 0x40000000);
- nv_wo32(dev, ctx, 0x3860/4, 0x3f800000);
- nv_wo32(dev, ctx, 0x3868/4, 0xbf800000);
- nv_wo32(dev, ctx, 0x3870/4, 0xbf800000);
+ nv_wo32(ctx, i, 0x0000ffff);
+ nv_wo32(ctx, 0x3450, 0x3f800000);
+ nv_wo32(ctx, 0x380c, 0x3f800000);
+ nv_wo32(ctx, 0x3820, 0x3f800000);
+ nv_wo32(ctx, 0x384c, 0x40000000);
+ nv_wo32(ctx, 0x3850, 0x3f800000);
+ nv_wo32(ctx, 0x3854, 0x3f000000);
+ nv_wo32(ctx, 0x385c, 0x40000000);
+ nv_wo32(ctx, 0x3860, 0x3f800000);
+ nv_wo32(ctx, 0x3868, 0xbf800000);
+ nv_wo32(ctx, 0x3870, 0xbf800000);
}
int
@@ -372,7 +372,7 @@ nv20_graph_create_context(struct nouveau_channel *chan)
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
void (*ctx_init)(struct drm_device *, struct nouveau_gpuobj *);
- unsigned int idoffs = 0x28/4;
+ unsigned int idoffs = 0x28;
int ret;
switch (dev_priv->chipset) {
@@ -403,21 +403,19 @@ nv20_graph_create_context(struct nouveau_channel *chan)
BUG_ON(1);
}
- ret = nouveau_gpuobj_new_ref(dev, chan, NULL, 0, pgraph->grctx_size,
- 16, NVOBJ_FLAG_ZERO_ALLOC,
- &chan->ramin_grctx);
+ ret = nouveau_gpuobj_new(dev, chan, pgraph->grctx_size, 16,
+ NVOBJ_FLAG_ZERO_ALLOC, &chan->ramin_grctx);
if (ret)
return ret;
/* Initialise default context values */
- ctx_init(dev, chan->ramin_grctx->gpuobj);
+ ctx_init(dev, chan->ramin_grctx);
/* nv20: nv_wo32(dev, chan->ramin_grctx->gpuobj, 10, chan->id<<24); */
- nv_wo32(dev, chan->ramin_grctx->gpuobj, idoffs,
- (chan->id << 24) | 0x1); /* CTX_USER */
+ nv_wo32(chan->ramin_grctx, idoffs,
+ (chan->id << 24) | 0x1); /* CTX_USER */
- nv_wo32(dev, pgraph->ctx_table->gpuobj, chan->id,
- chan->ramin_grctx->instance >> 4);
+ nv_wo32(pgraph->ctx_table, chan->id * 4, chan->ramin_grctx->pinst >> 4);
return 0;
}
@@ -428,10 +426,8 @@ nv20_graph_destroy_context(struct nouveau_channel *chan)
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
- if (chan->ramin_grctx)
- nouveau_gpuobj_ref_del(dev, &chan->ramin_grctx);
-
- nv_wo32(dev, pgraph->ctx_table->gpuobj, chan->id, 0);
+ nouveau_gpuobj_ref(NULL, &chan->ramin_grctx);
+ nv_wo32(pgraph->ctx_table, chan->id * 4, 0);
}
int
@@ -442,7 +438,7 @@ nv20_graph_load_context(struct nouveau_channel *chan)
if (!chan->ramin_grctx)
return -EINVAL;
- inst = chan->ramin_grctx->instance >> 4;
+ inst = chan->ramin_grctx->pinst >> 4;
nv_wr32(dev, NV20_PGRAPH_CHANNEL_CTX_POINTER, inst);
nv_wr32(dev, NV20_PGRAPH_CHANNEL_CTX_XFER,
@@ -465,7 +461,7 @@ nv20_graph_unload_context(struct drm_device *dev)
chan = pgraph->channel(dev);
if (!chan)
return 0;
- inst = chan->ramin_grctx->instance >> 4;
+ inst = chan->ramin_grctx->pinst >> 4;
nv_wr32(dev, NV20_PGRAPH_CHANNEL_CTX_POINTER, inst);
nv_wr32(dev, NV20_PGRAPH_CHANNEL_CTX_XFER,
@@ -552,15 +548,15 @@ nv20_graph_init(struct drm_device *dev)
if (!pgraph->ctx_table) {
/* Create Context Pointer Table */
- ret = nouveau_gpuobj_new_ref(dev, NULL, NULL, 0, 32 * 4, 16,
- NVOBJ_FLAG_ZERO_ALLOC,
- &pgraph->ctx_table);
+ ret = nouveau_gpuobj_new(dev, NULL, 32 * 4, 16,
+ NVOBJ_FLAG_ZERO_ALLOC,
+ &pgraph->ctx_table);
if (ret)
return ret;
}
nv_wr32(dev, NV20_PGRAPH_CHANNEL_CTX_TABLE,
- pgraph->ctx_table->instance >> 4);
+ pgraph->ctx_table->pinst >> 4);
nv20_graph_rdi(dev);
@@ -646,7 +642,7 @@ nv20_graph_takedown(struct drm_device *dev)
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
- nouveau_gpuobj_ref_del(dev, &pgraph->ctx_table);
+ nouveau_gpuobj_ref(NULL, &pgraph->ctx_table);
}
int
@@ -681,15 +677,15 @@ nv30_graph_init(struct drm_device *dev)
if (!pgraph->ctx_table) {
/* Create Context Pointer Table */
- ret = nouveau_gpuobj_new_ref(dev, NULL, NULL, 0, 32 * 4, 16,
- NVOBJ_FLAG_ZERO_ALLOC,
- &pgraph->ctx_table);
+ ret = nouveau_gpuobj_new(dev, NULL, 32 * 4, 16,
+ NVOBJ_FLAG_ZERO_ALLOC,
+ &pgraph->ctx_table);
if (ret)
return ret;
}
nv_wr32(dev, NV20_PGRAPH_CHANNEL_CTX_TABLE,
- pgraph->ctx_table->instance >> 4);
+ pgraph->ctx_table->pinst >> 4);
nv_wr32(dev, NV03_PGRAPH_INTR , 0xFFFFFFFF);
nv_wr32(dev, NV03_PGRAPH_INTR_EN, 0xFFFFFFFF);
diff --git a/drivers/gpu/drm/nouveau/nv40_fifo.c b/drivers/gpu/drm/nouveau/nv40_fifo.c
index 2b67f1835c39..d337b8b28cdd 100644
--- a/drivers/gpu/drm/nouveau/nv40_fifo.c
+++ b/drivers/gpu/drm/nouveau/nv40_fifo.c
@@ -27,8 +27,9 @@
#include "drmP.h"
#include "nouveau_drv.h"
#include "nouveau_drm.h"
+#include "nouveau_ramht.h"
-#define NV40_RAMFC(c) (dev_priv->ramfc_offset + ((c) * NV40_RAMFC__SIZE))
+#define NV40_RAMFC(c) (dev_priv->ramfc->pinst + ((c) * NV40_RAMFC__SIZE))
#define NV40_RAMFC__SIZE 128
int
@@ -42,7 +43,7 @@ nv40_fifo_create_context(struct nouveau_channel *chan)
ret = nouveau_gpuobj_new_fake(dev, NV40_RAMFC(chan->id), ~0,
NV40_RAMFC__SIZE, NVOBJ_FLAG_ZERO_ALLOC |
- NVOBJ_FLAG_ZERO_FREE, NULL, &chan->ramfc);
+ NVOBJ_FLAG_ZERO_FREE, &chan->ramfc);
if (ret)
return ret;
@@ -50,7 +51,7 @@ nv40_fifo_create_context(struct nouveau_channel *chan)
nv_wi32(dev, fc + 0, chan->pushbuf_base);
nv_wi32(dev, fc + 4, chan->pushbuf_base);
- nv_wi32(dev, fc + 12, chan->pushbuf->instance >> 4);
+ nv_wi32(dev, fc + 12, chan->pushbuf->pinst >> 4);
nv_wi32(dev, fc + 24, NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES |
NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES |
NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_8 |
@@ -58,7 +59,7 @@ nv40_fifo_create_context(struct nouveau_channel *chan)
NV_PFIFO_CACHE1_BIG_ENDIAN |
#endif
0x30000000 /* no idea.. */);
- nv_wi32(dev, fc + 56, chan->ramin_grctx->instance >> 4);
+ nv_wi32(dev, fc + 56, chan->ramin_grctx->pinst >> 4);
nv_wi32(dev, fc + 60, 0x0001FFFF);
/* enable the fifo dma operation */
@@ -77,8 +78,7 @@ nv40_fifo_destroy_context(struct nouveau_channel *chan)
nv_wr32(dev, NV04_PFIFO_MODE,
nv_rd32(dev, NV04_PFIFO_MODE) & ~(1 << chan->id));
- if (chan->ramfc)
- nouveau_gpuobj_ref_del(dev, &chan->ramfc);
+ nouveau_gpuobj_ref(NULL, &chan->ramfc);
}
static void
@@ -241,9 +241,9 @@ nv40_fifo_init_ramxx(struct drm_device *dev)
struct drm_nouveau_private *dev_priv = dev->dev_private;
nv_wr32(dev, NV03_PFIFO_RAMHT, (0x03 << 24) /* search 128 */ |
- ((dev_priv->ramht_bits - 9) << 16) |
- (dev_priv->ramht_offset >> 8));
- nv_wr32(dev, NV03_PFIFO_RAMRO, dev_priv->ramro_offset>>8);
+ ((dev_priv->ramht->bits - 9) << 16) |
+ (dev_priv->ramht->gpuobj->pinst >> 8));
+ nv_wr32(dev, NV03_PFIFO_RAMRO, dev_priv->ramro->pinst >> 8);
switch (dev_priv->chipset) {
case 0x47:
@@ -271,7 +271,7 @@ nv40_fifo_init_ramxx(struct drm_device *dev)
nv_wr32(dev, 0x2230, 0);
nv_wr32(dev, NV40_PFIFO_RAMFC,
((dev_priv->vram_size - 512 * 1024 +
- dev_priv->ramfc_offset) >> 16) | (3 << 16));
+ dev_priv->ramfc->pinst) >> 16) | (3 << 16));
break;
}
}
diff --git a/drivers/gpu/drm/nouveau/nv40_graph.c b/drivers/gpu/drm/nouveau/nv40_graph.c
index fd7d2b501316..7ee1b91569b8 100644
--- a/drivers/gpu/drm/nouveau/nv40_graph.c
+++ b/drivers/gpu/drm/nouveau/nv40_graph.c
@@ -45,7 +45,7 @@ nv40_graph_channel(struct drm_device *dev)
struct nouveau_channel *chan = dev_priv->fifos[i];
if (chan && chan->ramin_grctx &&
- chan->ramin_grctx->instance == inst)
+ chan->ramin_grctx->pinst == inst)
return chan;
}
@@ -61,27 +61,25 @@ nv40_graph_create_context(struct nouveau_channel *chan)
struct nouveau_grctx ctx = {};
int ret;
- ret = nouveau_gpuobj_new_ref(dev, chan, NULL, 0, pgraph->grctx_size,
- 16, NVOBJ_FLAG_ZERO_ALLOC,
- &chan->ramin_grctx);
+ ret = nouveau_gpuobj_new(dev, chan, pgraph->grctx_size, 16,
+ NVOBJ_FLAG_ZERO_ALLOC, &chan->ramin_grctx);
if (ret)
return ret;
/* Initialise default context values */
ctx.dev = chan->dev;
ctx.mode = NOUVEAU_GRCTX_VALS;
- ctx.data = chan->ramin_grctx->gpuobj;
+ ctx.data = chan->ramin_grctx;
nv40_grctx_init(&ctx);
- nv_wo32(dev, chan->ramin_grctx->gpuobj, 0,
- chan->ramin_grctx->gpuobj->im_pramin->start);
+ nv_wo32(chan->ramin_grctx, 0, chan->ramin_grctx->pinst);
return 0;
}
void
nv40_graph_destroy_context(struct nouveau_channel *chan)
{
- nouveau_gpuobj_ref_del(chan->dev, &chan->ramin_grctx);
+ nouveau_gpuobj_ref(NULL, &chan->ramin_grctx);
}
static int
@@ -135,7 +133,7 @@ nv40_graph_load_context(struct nouveau_channel *chan)
if (!chan->ramin_grctx)
return -EINVAL;
- inst = chan->ramin_grctx->instance >> 4;
+ inst = chan->ramin_grctx->pinst >> 4;
ret = nv40_graph_transfer_context(dev, inst, 0);
if (ret)
diff --git a/drivers/gpu/drm/nouveau/nv40_grctx.c b/drivers/gpu/drm/nouveau/nv40_grctx.c
index 9b5c97469588..ce585093264e 100644
--- a/drivers/gpu/drm/nouveau/nv40_grctx.c
+++ b/drivers/gpu/drm/nouveau/nv40_grctx.c
@@ -596,13 +596,13 @@ nv40_graph_construct_shader(struct nouveau_grctx *ctx)
offset += 0x0280/4;
for (i = 0; i < 16; i++, offset += 2)
- nv_wo32(dev, obj, offset, 0x3f800000);
+ nv_wo32(obj, offset * 4, 0x3f800000);
for (vs = 0; vs < vs_nr; vs++, offset += vs_len) {
for (i = 0; i < vs_nr_b0 * 6; i += 6)
- nv_wo32(dev, obj, offset + b0_offset + i, 0x00000001);
+ nv_wo32(obj, (offset + b0_offset + i) * 4, 0x00000001);
for (i = 0; i < vs_nr_b1 * 4; i += 4)
- nv_wo32(dev, obj, offset + b1_offset + i, 0x3f800000);
+ nv_wo32(obj, (offset + b1_offset + i) * 4, 0x3f800000);
}
}
diff --git a/drivers/gpu/drm/nouveau/nv50_calc.c b/drivers/gpu/drm/nouveau/nv50_calc.c
index 2cdc2bfe7179..de81151648f8 100644
--- a/drivers/gpu/drm/nouveau/nv50_calc.c
+++ b/drivers/gpu/drm/nouveau/nv50_calc.c
@@ -51,24 +51,28 @@ nv50_calc_pll2(struct drm_device *dev, struct pll_lims *pll, int clk,
int *N, int *fN, int *M, int *P)
{
fixed20_12 fb_div, a, b;
+ u32 refclk = pll->refclk / 10;
+ u32 max_vco_freq = pll->vco1.maxfreq / 10;
+ u32 max_vco_inputfreq = pll->vco1.max_inputfreq / 10;
+ clk /= 10;
- *P = pll->vco1.maxfreq / clk;
+ *P = max_vco_freq / clk;
if (*P > pll->max_p)
*P = pll->max_p;
if (*P < pll->min_p)
*P = pll->min_p;
- /* *M = ceil(refclk / pll->vco.max_inputfreq); */
- a.full = dfixed_const(pll->refclk);
- b.full = dfixed_const(pll->vco1.max_inputfreq);
+ /* *M = floor((refclk + max_vco_inputfreq) / max_vco_inputfreq); */
+ a.full = dfixed_const(refclk + max_vco_inputfreq);
+ b.full = dfixed_const(max_vco_inputfreq);
a.full = dfixed_div(a, b);
- a.full = dfixed_ceil(a);
+ a.full = dfixed_floor(a);
*M = dfixed_trunc(a);
/* fb_div = (vco * *M) / refclk; */
fb_div.full = dfixed_const(clk * *P);
fb_div.full = dfixed_mul(fb_div, a);
- a.full = dfixed_const(pll->refclk);
+ a.full = dfixed_const(refclk);
fb_div.full = dfixed_div(fb_div, a);
/* *N = floor(fb_div); */
diff --git a/drivers/gpu/drm/nouveau/nv50_crtc.c b/drivers/gpu/drm/nouveau/nv50_crtc.c
index bfd4ca2fe7ef..56476d0c6de8 100644
--- a/drivers/gpu/drm/nouveau/nv50_crtc.c
+++ b/drivers/gpu/drm/nouveau/nv50_crtc.c
@@ -104,8 +104,7 @@ nv50_crtc_blank(struct nouveau_crtc *nv_crtc, bool blanked)
OUT_RING(evo, nv_crtc->lut.depth == 8 ?
NV50_EVO_CRTC_CLUT_MODE_OFF :
NV50_EVO_CRTC_CLUT_MODE_ON);
- OUT_RING(evo, (nv_crtc->lut.nvbo->bo.mem.mm_node->start <<
- PAGE_SHIFT) >> 8);
+ OUT_RING(evo, (nv_crtc->lut.nvbo->bo.mem.start << PAGE_SHIFT) >> 8);
if (dev_priv->chipset != 0x50) {
BEGIN_RING(evo, 0, NV84_EVO_CRTC(index, CLUT_DMA), 1);
OUT_RING(evo, NvEvoVRAM);
@@ -266,15 +265,10 @@ nv50_crtc_set_clock(struct drm_device *dev, int head, int pclk)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct pll_lims pll;
- uint32_t reg, reg1, reg2;
+ uint32_t reg1, reg2;
int ret, N1, M1, N2, M2, P;
- if (dev_priv->chipset < NV_C0)
- reg = NV50_PDISPLAY_CRTC_CLK_CTRL1(head);
- else
- reg = 0x614140 + (head * 0x800);
-
- ret = get_pll_limits(dev, reg, &pll);
+ ret = get_pll_limits(dev, PLL_VPLL0 + head, &pll);
if (ret)
return ret;
@@ -286,11 +280,11 @@ nv50_crtc_set_clock(struct drm_device *dev, int head, int pclk)
NV_DEBUG(dev, "pclk %d out %d NM1 %d %d NM2 %d %d P %d\n",
pclk, ret, N1, M1, N2, M2, P);
- reg1 = nv_rd32(dev, reg + 4) & 0xff00ff00;
- reg2 = nv_rd32(dev, reg + 8) & 0x8000ff00;
- nv_wr32(dev, reg, 0x10000611);
- nv_wr32(dev, reg + 4, reg1 | (M1 << 16) | N1);
- nv_wr32(dev, reg + 8, reg2 | (P << 28) | (M2 << 16) | N2);
+ reg1 = nv_rd32(dev, pll.reg + 4) & 0xff00ff00;
+ reg2 = nv_rd32(dev, pll.reg + 8) & 0x8000ff00;
+ nv_wr32(dev, pll.reg + 0, 0x10000611);
+ nv_wr32(dev, pll.reg + 4, reg1 | (M1 << 16) | N1);
+ nv_wr32(dev, pll.reg + 8, reg2 | (P << 28) | (M2 << 16) | N2);
} else
if (dev_priv->chipset < NV_C0) {
ret = nv50_calc_pll2(dev, &pll, pclk, &N1, &N2, &M1, &P);
@@ -300,10 +294,10 @@ nv50_crtc_set_clock(struct drm_device *dev, int head, int pclk)
NV_DEBUG(dev, "pclk %d out %d N %d fN 0x%04x M %d P %d\n",
pclk, ret, N1, N2, M1, P);
- reg1 = nv_rd32(dev, reg + 4) & 0xffc00000;
- nv_wr32(dev, reg, 0x50000610);
- nv_wr32(dev, reg + 4, reg1 | (P << 16) | (M1 << 8) | N1);
- nv_wr32(dev, reg + 8, N2);
+ reg1 = nv_rd32(dev, pll.reg + 4) & 0xffc00000;
+ nv_wr32(dev, pll.reg + 0, 0x50000610);
+ nv_wr32(dev, pll.reg + 4, reg1 | (P << 16) | (M1 << 8) | N1);
+ nv_wr32(dev, pll.reg + 8, N2);
} else {
ret = nv50_calc_pll2(dev, &pll, pclk, &N1, &N2, &M1, &P);
if (ret <= 0)
@@ -312,9 +306,9 @@ nv50_crtc_set_clock(struct drm_device *dev, int head, int pclk)
NV_DEBUG(dev, "pclk %d out %d N %d fN 0x%04x M %d P %d\n",
pclk, ret, N1, N2, M1, P);
- nv_mask(dev, reg + 0x0c, 0x00000000, 0x00000100);
- nv_wr32(dev, reg + 0x04, (P << 16) | (N1 << 8) | M1);
- nv_wr32(dev, reg + 0x10, N2 << 16);
+ nv_mask(dev, pll.reg + 0x0c, 0x00000000, 0x00000100);
+ nv_wr32(dev, pll.reg + 0x04, (P << 16) | (N1 << 8) | M1);
+ nv_wr32(dev, pll.reg + 0x10, N2 << 16);
}
return 0;
@@ -338,7 +332,9 @@ nv50_crtc_destroy(struct drm_crtc *crtc)
nv50_cursor_fini(nv_crtc);
+ nouveau_bo_unmap(nv_crtc->lut.nvbo);
nouveau_bo_ref(NULL, &nv_crtc->lut.nvbo);
+ nouveau_bo_unmap(nv_crtc->cursor.nvbo);
nouveau_bo_ref(NULL, &nv_crtc->cursor.nvbo);
kfree(nv_crtc->mode);
kfree(nv_crtc);
@@ -491,8 +487,9 @@ nv50_crtc_mode_fixup(struct drm_crtc *crtc, struct drm_display_mode *mode,
}
static int
-nv50_crtc_do_mode_set_base(struct drm_crtc *crtc, int x, int y,
- struct drm_framebuffer *old_fb, bool update)
+nv50_crtc_do_mode_set_base(struct drm_crtc *crtc,
+ struct drm_framebuffer *passed_fb,
+ int x, int y, bool update, bool atomic)
{
struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
struct drm_device *dev = nv_crtc->base.dev;
@@ -504,6 +501,28 @@ nv50_crtc_do_mode_set_base(struct drm_crtc *crtc, int x, int y,
NV_DEBUG_KMS(dev, "index %d\n", nv_crtc->index);
+ /* If atomic, we want to switch to the fb we were passed, so
+ * now we update pointers to do that. (We don't pin; just
+ * assume we're already pinned and update the base address.)
+ */
+ if (atomic) {
+ drm_fb = passed_fb;
+ fb = nouveau_framebuffer(passed_fb);
+ }
+ else {
+ /* If not atomic, we can go ahead and pin, and unpin the
+ * old fb we were passed.
+ */
+ ret = nouveau_bo_pin(fb->nvbo, TTM_PL_FLAG_VRAM);
+ if (ret)
+ return ret;
+
+ if (passed_fb) {
+ struct nouveau_framebuffer *ofb = nouveau_framebuffer(passed_fb);
+ nouveau_bo_unpin(ofb->nvbo);
+ }
+ }
+
switch (drm_fb->depth) {
case 8:
format = NV50_EVO_CRTC_FB_DEPTH_8;
@@ -526,17 +545,8 @@ nv50_crtc_do_mode_set_base(struct drm_crtc *crtc, int x, int y,
return -EINVAL;
}
- ret = nouveau_bo_pin(fb->nvbo, TTM_PL_FLAG_VRAM);
- if (ret)
- return ret;
-
- if (old_fb) {
- struct nouveau_framebuffer *ofb = nouveau_framebuffer(old_fb);
- nouveau_bo_unpin(ofb->nvbo);
- }
-
nv_crtc->fb.offset = fb->nvbo->bo.offset - dev_priv->vm_vram_base;
- nv_crtc->fb.tile_flags = fb->nvbo->tile_flags;
+ nv_crtc->fb.tile_flags = nouveau_bo_tile_layout(fb->nvbo);
nv_crtc->fb.cpp = drm_fb->bits_per_pixel / 8;
if (!nv_crtc->fb.blanked && dev_priv->chipset != 0x50) {
ret = RING_SPACE(evo, 2);
@@ -568,7 +578,7 @@ nv50_crtc_do_mode_set_base(struct drm_crtc *crtc, int x, int y,
fb->nvbo->tile_mode);
}
if (dev_priv->chipset == 0x50)
- OUT_RING(evo, (fb->nvbo->tile_flags << 8) | format);
+ OUT_RING(evo, (nv_crtc->fb.tile_flags << 8) | format);
else
OUT_RING(evo, format);
@@ -685,14 +695,22 @@ nv50_crtc_mode_set(struct drm_crtc *crtc, struct drm_display_mode *mode,
nv_crtc->set_dither(nv_crtc, nv_connector->use_dithering, false);
nv_crtc->set_scale(nv_crtc, nv_connector->scaling_mode, false);
- return nv50_crtc_do_mode_set_base(crtc, x, y, old_fb, false);
+ return nv50_crtc_do_mode_set_base(crtc, old_fb, x, y, false, false);
}
static int
nv50_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y,
struct drm_framebuffer *old_fb)
{
- return nv50_crtc_do_mode_set_base(crtc, x, y, old_fb, true);
+ return nv50_crtc_do_mode_set_base(crtc, old_fb, x, y, true, false);
+}
+
+static int
+nv50_crtc_mode_set_base_atomic(struct drm_crtc *crtc,
+ struct drm_framebuffer *fb,
+ int x, int y, enum mode_set_atomic state)
+{
+ return nv50_crtc_do_mode_set_base(crtc, fb, x, y, true, true);
}
static const struct drm_crtc_helper_funcs nv50_crtc_helper_funcs = {
@@ -702,6 +720,7 @@ static const struct drm_crtc_helper_funcs nv50_crtc_helper_funcs = {
.mode_fixup = nv50_crtc_mode_fixup,
.mode_set = nv50_crtc_mode_set,
.mode_set_base = nv50_crtc_mode_set_base,
+ .mode_set_base_atomic = nv50_crtc_mode_set_base_atomic,
.load_lut = nv50_crtc_lut_load,
};
diff --git a/drivers/gpu/drm/nouveau/nv50_cursor.c b/drivers/gpu/drm/nouveau/nv50_cursor.c
index 03ad7ab14f09..1b9ce3021aa3 100644
--- a/drivers/gpu/drm/nouveau/nv50_cursor.c
+++ b/drivers/gpu/drm/nouveau/nv50_cursor.c
@@ -147,7 +147,7 @@ nv50_cursor_fini(struct nouveau_crtc *nv_crtc)
NV_DEBUG_KMS(dev, "\n");
nv_wr32(dev, NV50_PDISPLAY_CURSOR_CURSOR_CTRL2(idx), 0);
- if (!nv_wait(NV50_PDISPLAY_CURSOR_CURSOR_CTRL2(idx),
+ if (!nv_wait(dev, NV50_PDISPLAY_CURSOR_CURSOR_CTRL2(idx),
NV50_PDISPLAY_CURSOR_CURSOR_CTRL2_STATUS, 0)) {
NV_ERROR(dev, "timeout: CURSOR_CTRL2_STATUS == 0\n");
NV_ERROR(dev, "CURSOR_CTRL2 = 0x%08x\n",
diff --git a/drivers/gpu/drm/nouveau/nv50_dac.c b/drivers/gpu/drm/nouveau/nv50_dac.c
index 1bc085962945..875414b09ade 100644
--- a/drivers/gpu/drm/nouveau/nv50_dac.c
+++ b/drivers/gpu/drm/nouveau/nv50_dac.c
@@ -79,7 +79,7 @@ nv50_dac_detect(struct drm_encoder *encoder, struct drm_connector *connector)
nv_wr32(dev, NV50_PDISPLAY_DAC_DPMS_CTRL(or),
0x00150000 | NV50_PDISPLAY_DAC_DPMS_CTRL_PENDING);
- if (!nv_wait(NV50_PDISPLAY_DAC_DPMS_CTRL(or),
+ if (!nv_wait(dev, NV50_PDISPLAY_DAC_DPMS_CTRL(or),
NV50_PDISPLAY_DAC_DPMS_CTRL_PENDING, 0)) {
NV_ERROR(dev, "timeout: DAC_DPMS_CTRL_PENDING(%d) == 0\n", or);
NV_ERROR(dev, "DAC_DPMS_CTRL(%d) = 0x%08x\n", or,
@@ -130,7 +130,7 @@ nv50_dac_dpms(struct drm_encoder *encoder, int mode)
NV_DEBUG_KMS(dev, "or %d mode %d\n", or, mode);
/* wait for it to be done */
- if (!nv_wait(NV50_PDISPLAY_DAC_DPMS_CTRL(or),
+ if (!nv_wait(dev, NV50_PDISPLAY_DAC_DPMS_CTRL(or),
NV50_PDISPLAY_DAC_DPMS_CTRL_PENDING, 0)) {
NV_ERROR(dev, "timeout: DAC_DPMS_CTRL_PENDING(%d) == 0\n", or);
NV_ERROR(dev, "DAC_DPMS_CTRL(%d) = 0x%08x\n", or,
diff --git a/drivers/gpu/drm/nouveau/nv50_display.c b/drivers/gpu/drm/nouveau/nv50_display.c
index 612fa6d6a0cb..f624c611ddea 100644
--- a/drivers/gpu/drm/nouveau/nv50_display.c
+++ b/drivers/gpu/drm/nouveau/nv50_display.c
@@ -30,8 +30,22 @@
#include "nouveau_connector.h"
#include "nouveau_fb.h"
#include "nouveau_fbcon.h"
+#include "nouveau_ramht.h"
#include "drm_crtc_helper.h"
+static inline int
+nv50_sor_nr(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+
+ if (dev_priv->chipset < 0x90 ||
+ dev_priv->chipset == 0x92 ||
+ dev_priv->chipset == 0xa0)
+ return 2;
+
+ return 4;
+}
+
static void
nv50_evo_channel_del(struct nouveau_channel **pchan)
{
@@ -42,6 +56,7 @@ nv50_evo_channel_del(struct nouveau_channel **pchan)
*pchan = NULL;
nouveau_gpuobj_channel_takedown(chan);
+ nouveau_bo_unmap(chan->pushbuf_bo);
nouveau_bo_ref(NULL, &chan->pushbuf_bo);
if (chan->user)
@@ -65,23 +80,23 @@ nv50_evo_dmaobj_new(struct nouveau_channel *evo, uint32_t class, uint32_t name,
return ret;
obj->engine = NVOBJ_ENGINE_DISPLAY;
- ret = nouveau_gpuobj_ref_add(dev, evo, name, obj, NULL);
- if (ret) {
- nouveau_gpuobj_del(dev, &obj);
- return ret;
- }
-
- nv_wo32(dev, obj, 0, (tile_flags << 22) | (magic_flags << 16) | class);
- nv_wo32(dev, obj, 1, limit);
- nv_wo32(dev, obj, 2, offset);
- nv_wo32(dev, obj, 3, 0x00000000);
- nv_wo32(dev, obj, 4, 0x00000000);
+ nv_wo32(obj, 0, (tile_flags << 22) | (magic_flags << 16) | class);
+ nv_wo32(obj, 4, limit);
+ nv_wo32(obj, 8, offset);
+ nv_wo32(obj, 12, 0x00000000);
+ nv_wo32(obj, 16, 0x00000000);
if (dev_priv->card_type < NV_C0)
- nv_wo32(dev, obj, 5, 0x00010000);
+ nv_wo32(obj, 20, 0x00010000);
else
- nv_wo32(dev, obj, 5, 0x00020000);
+ nv_wo32(obj, 20, 0x00020000);
dev_priv->engine.instmem.flush(dev);
+ ret = nouveau_ramht_insert(evo, name, obj);
+ nouveau_gpuobj_ref(NULL, &obj);
+ if (ret) {
+ return ret;
+ }
+
return 0;
}
@@ -89,6 +104,7 @@ static int
nv50_evo_channel_new(struct drm_device *dev, struct nouveau_channel **pchan)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_gpuobj *ramht = NULL;
struct nouveau_channel *chan;
int ret;
@@ -102,32 +118,35 @@ nv50_evo_channel_new(struct drm_device *dev, struct nouveau_channel **pchan)
chan->user_get = 4;
chan->user_put = 0;
- INIT_LIST_HEAD(&chan->ramht_refs);
-
- ret = nouveau_gpuobj_new_ref(dev, NULL, NULL, 0, 32768, 0x1000,
- NVOBJ_FLAG_ZERO_ALLOC, &chan->ramin);
+ ret = nouveau_gpuobj_new(dev, NULL, 32768, 0x1000,
+ NVOBJ_FLAG_ZERO_ALLOC, &chan->ramin);
if (ret) {
NV_ERROR(dev, "Error allocating EVO channel memory: %d\n", ret);
nv50_evo_channel_del(pchan);
return ret;
}
- ret = drm_mm_init(&chan->ramin_heap,
- chan->ramin->gpuobj->im_pramin->start, 32768);
+ ret = drm_mm_init(&chan->ramin_heap, 0, 32768);
if (ret) {
NV_ERROR(dev, "Error initialising EVO PRAMIN heap: %d\n", ret);
nv50_evo_channel_del(pchan);
return ret;
}
- ret = nouveau_gpuobj_new_ref(dev, chan, chan, 0, 4096, 16,
- 0, &chan->ramht);
+ ret = nouveau_gpuobj_new(dev, chan, 4096, 16, 0, &ramht);
if (ret) {
NV_ERROR(dev, "Unable to allocate EVO RAMHT: %d\n", ret);
nv50_evo_channel_del(pchan);
return ret;
}
+ ret = nouveau_ramht_new(dev, ramht, &chan->ramht);
+ nouveau_gpuobj_ref(NULL, &ramht);
+ if (ret) {
+ nv50_evo_channel_del(pchan);
+ return ret;
+ }
+
if (dev_priv->chipset != 0x50) {
ret = nv50_evo_dmaobj_new(chan, 0x3d, NvEvoFB16, 0x70, 0x19,
0, 0xffffffff);
@@ -227,11 +246,11 @@ nv50_display_init(struct drm_device *dev)
nv_wr32(dev, 0x006101d0 + (i * 0x04), val);
}
/* SOR */
- for (i = 0; i < 4; i++) {
+ for (i = 0; i < nv50_sor_nr(dev); i++) {
val = nv_rd32(dev, 0x0061c000 + (i * 0x800));
nv_wr32(dev, 0x006101e0 + (i * 0x04), val);
}
- /* Something not yet in use, tv-out maybe. */
+ /* EXT */
for (i = 0; i < 3; i++) {
val = nv_rd32(dev, 0x0061e000 + (i * 0x800));
nv_wr32(dev, 0x006101f0 + (i * 0x04), val);
@@ -260,7 +279,7 @@ nv50_display_init(struct drm_device *dev)
if (nv_rd32(dev, NV50_PDISPLAY_INTR_1) & 0x100) {
nv_wr32(dev, NV50_PDISPLAY_INTR_1, 0x100);
nv_wr32(dev, 0x006194e8, nv_rd32(dev, 0x006194e8) & ~1);
- if (!nv_wait(0x006194e8, 2, 0)) {
+ if (!nv_wait(dev, 0x006194e8, 2, 0)) {
NV_ERROR(dev, "timeout: (0x6194e8 & 2) != 0\n");
NV_ERROR(dev, "0x6194e8 = 0x%08x\n",
nv_rd32(dev, 0x6194e8));
@@ -291,7 +310,8 @@ nv50_display_init(struct drm_device *dev)
nv_wr32(dev, NV50_PDISPLAY_CTRL_STATE, NV50_PDISPLAY_CTRL_STATE_ENABLE);
nv_wr32(dev, NV50_PDISPLAY_CHANNEL_STAT(0), 0x1000b03);
- if (!nv_wait(NV50_PDISPLAY_CHANNEL_STAT(0), 0x40000000, 0x40000000)) {
+ if (!nv_wait(dev, NV50_PDISPLAY_CHANNEL_STAT(0),
+ 0x40000000, 0x40000000)) {
NV_ERROR(dev, "timeout: (0x610200 & 0x40000000) == 0x40000000\n");
NV_ERROR(dev, "0x610200 = 0x%08x\n",
nv_rd32(dev, NV50_PDISPLAY_CHANNEL_STAT(0)));
@@ -300,7 +320,7 @@ nv50_display_init(struct drm_device *dev)
for (i = 0; i < 2; i++) {
nv_wr32(dev, NV50_PDISPLAY_CURSOR_CURSOR_CTRL2(i), 0x2000);
- if (!nv_wait(NV50_PDISPLAY_CURSOR_CURSOR_CTRL2(i),
+ if (!nv_wait(dev, NV50_PDISPLAY_CURSOR_CURSOR_CTRL2(i),
NV50_PDISPLAY_CURSOR_CURSOR_CTRL2_STATUS, 0)) {
NV_ERROR(dev, "timeout: CURSOR_CTRL2_STATUS == 0\n");
NV_ERROR(dev, "CURSOR_CTRL2 = 0x%08x\n",
@@ -310,7 +330,7 @@ nv50_display_init(struct drm_device *dev)
nv_wr32(dev, NV50_PDISPLAY_CURSOR_CURSOR_CTRL2(i),
NV50_PDISPLAY_CURSOR_CURSOR_CTRL2_ON);
- if (!nv_wait(NV50_PDISPLAY_CURSOR_CURSOR_CTRL2(i),
+ if (!nv_wait(dev, NV50_PDISPLAY_CURSOR_CURSOR_CTRL2(i),
NV50_PDISPLAY_CURSOR_CURSOR_CTRL2_STATUS,
NV50_PDISPLAY_CURSOR_CURSOR_CTRL2_STATUS_ACTIVE)) {
NV_ERROR(dev, "timeout: "
@@ -321,16 +341,16 @@ nv50_display_init(struct drm_device *dev)
}
}
- nv_wr32(dev, NV50_PDISPLAY_OBJECTS, (evo->ramin->instance >> 8) | 9);
+ nv_wr32(dev, NV50_PDISPLAY_OBJECTS, (evo->ramin->vinst >> 8) | 9);
/* initialise fifo */
nv_wr32(dev, NV50_PDISPLAY_CHANNEL_DMA_CB(0),
- ((evo->pushbuf_bo->bo.mem.mm_node->start << PAGE_SHIFT) >> 8) |
+ ((evo->pushbuf_bo->bo.mem.start << PAGE_SHIFT) >> 8) |
NV50_PDISPLAY_CHANNEL_DMA_CB_LOCATION_VRAM |
NV50_PDISPLAY_CHANNEL_DMA_CB_VALID);
nv_wr32(dev, NV50_PDISPLAY_CHANNEL_UNK2(0), 0x00010000);
nv_wr32(dev, NV50_PDISPLAY_CHANNEL_UNK3(0), 0x00000002);
- if (!nv_wait(0x610200, 0x80000000, 0x00000000)) {
+ if (!nv_wait(dev, 0x610200, 0x80000000, 0x00000000)) {
NV_ERROR(dev, "timeout: (0x610200 & 0x80000000) == 0\n");
NV_ERROR(dev, "0x610200 = 0x%08x\n", nv_rd32(dev, 0x610200));
return -EBUSY;
@@ -370,7 +390,7 @@ nv50_display_init(struct drm_device *dev)
BEGIN_RING(evo, 0, NV50_EVO_CRTC(0, UNK082C), 1);
OUT_RING(evo, 0);
FIRE_RING(evo);
- if (!nv_wait(0x640004, 0xffffffff, evo->dma.put << 2))
+ if (!nv_wait(dev, 0x640004, 0xffffffff, evo->dma.put << 2))
NV_ERROR(dev, "evo pushbuf stalled\n");
/* enable clock change interrupts. */
@@ -424,7 +444,7 @@ static int nv50_display_disable(struct drm_device *dev)
continue;
nv_wr32(dev, NV50_PDISPLAY_INTR_1, mask);
- if (!nv_wait(NV50_PDISPLAY_INTR_1, mask, mask)) {
+ if (!nv_wait(dev, NV50_PDISPLAY_INTR_1, mask, mask)) {
NV_ERROR(dev, "timeout: (0x610024 & 0x%08x) == "
"0x%08x\n", mask, mask);
NV_ERROR(dev, "0x610024 = 0x%08x\n",
@@ -434,14 +454,14 @@ static int nv50_display_disable(struct drm_device *dev)
nv_wr32(dev, NV50_PDISPLAY_CHANNEL_STAT(0), 0);
nv_wr32(dev, NV50_PDISPLAY_CTRL_STATE, 0);
- if (!nv_wait(NV50_PDISPLAY_CHANNEL_STAT(0), 0x1e0000, 0)) {
+ if (!nv_wait(dev, NV50_PDISPLAY_CHANNEL_STAT(0), 0x1e0000, 0)) {
NV_ERROR(dev, "timeout: (0x610200 & 0x1e0000) == 0\n");
NV_ERROR(dev, "0x610200 = 0x%08x\n",
nv_rd32(dev, NV50_PDISPLAY_CHANNEL_STAT(0)));
}
for (i = 0; i < 3; i++) {
- if (!nv_wait(NV50_PDISPLAY_SOR_DPMS_STATE(i),
+ if (!nv_wait(dev, NV50_PDISPLAY_SOR_DPMS_STATE(i),
NV50_PDISPLAY_SOR_DPMS_STATE_WAIT, 0)) {
NV_ERROR(dev, "timeout: SOR_DPMS_STATE_WAIT(%d) == 0\n", i);
NV_ERROR(dev, "SOR_DPMS_STATE(%d) = 0x%08x\n", i,
@@ -710,7 +730,7 @@ nv50_display_unk10_handler(struct drm_device *dev)
or = i;
}
- for (i = 0; type == OUTPUT_ANY && i < 4; i++) {
+ for (i = 0; type == OUTPUT_ANY && i < nv50_sor_nr(dev); i++) {
if (dev_priv->chipset < 0x90 ||
dev_priv->chipset == 0x92 ||
dev_priv->chipset == 0xa0)
@@ -841,7 +861,7 @@ nv50_display_unk20_handler(struct drm_device *dev)
or = i;
}
- for (i = 0; type == OUTPUT_ANY && i < 4; i++) {
+ for (i = 0; type == OUTPUT_ANY && i < nv50_sor_nr(dev); i++) {
if (dev_priv->chipset < 0x90 ||
dev_priv->chipset == 0x92 ||
dev_priv->chipset == 0xa0)
@@ -1012,11 +1032,18 @@ nv50_display_irq_hotplug_bh(struct work_struct *work)
struct drm_connector *connector;
const uint32_t gpio_reg[4] = { 0xe104, 0xe108, 0xe280, 0xe284 };
uint32_t unplug_mask, plug_mask, change_mask;
- uint32_t hpd0, hpd1 = 0;
+ uint32_t hpd0, hpd1;
+
+ spin_lock_irq(&dev_priv->hpd_state.lock);
+ hpd0 = dev_priv->hpd_state.hpd0_bits;
+ dev_priv->hpd_state.hpd0_bits = 0;
+ hpd1 = dev_priv->hpd_state.hpd1_bits;
+ dev_priv->hpd_state.hpd1_bits = 0;
+ spin_unlock_irq(&dev_priv->hpd_state.lock);
- hpd0 = nv_rd32(dev, 0xe054) & nv_rd32(dev, 0xe050);
+ hpd0 &= nv_rd32(dev, 0xe050);
if (dev_priv->chipset >= 0x90)
- hpd1 = nv_rd32(dev, 0xe074) & nv_rd32(dev, 0xe070);
+ hpd1 &= nv_rd32(dev, 0xe070);
plug_mask = (hpd0 & 0x0000ffff) | (hpd1 << 16);
unplug_mask = (hpd0 >> 16) | (hpd1 & 0xffff0000);
@@ -1058,10 +1085,6 @@ nv50_display_irq_hotplug_bh(struct work_struct *work)
helper->dpms(connector->encoder, DRM_MODE_DPMS_OFF);
}
- nv_wr32(dev, 0xe054, nv_rd32(dev, 0xe054));
- if (dev_priv->chipset >= 0x90)
- nv_wr32(dev, 0xe074, nv_rd32(dev, 0xe074));
-
drm_helper_hpd_irq_event(dev);
}
@@ -1072,8 +1095,22 @@ nv50_display_irq_handler(struct drm_device *dev)
uint32_t delayed = 0;
if (nv_rd32(dev, NV50_PMC_INTR_0) & NV50_PMC_INTR_0_HOTPLUG) {
- if (!work_pending(&dev_priv->hpd_work))
- queue_work(dev_priv->wq, &dev_priv->hpd_work);
+ uint32_t hpd0_bits, hpd1_bits = 0;
+
+ hpd0_bits = nv_rd32(dev, 0xe054);
+ nv_wr32(dev, 0xe054, hpd0_bits);
+
+ if (dev_priv->chipset >= 0x90) {
+ hpd1_bits = nv_rd32(dev, 0xe074);
+ nv_wr32(dev, 0xe074, hpd1_bits);
+ }
+
+ spin_lock(&dev_priv->hpd_state.lock);
+ dev_priv->hpd_state.hpd0_bits |= hpd0_bits;
+ dev_priv->hpd_state.hpd1_bits |= hpd1_bits;
+ spin_unlock(&dev_priv->hpd_state.lock);
+
+ queue_work(dev_priv->wq, &dev_priv->hpd_work);
}
while (nv_rd32(dev, NV50_PMC_INTR_0) & NV50_PMC_INTR_0_DISPLAY) {
diff --git a/drivers/gpu/drm/nouveau/nv50_fb.c b/drivers/gpu/drm/nouveau/nv50_fb.c
index 32611bd30e6d..cd1988b15d2c 100644
--- a/drivers/gpu/drm/nouveau/nv50_fb.c
+++ b/drivers/gpu/drm/nouveau/nv50_fb.c
@@ -20,6 +20,7 @@ nv50_fb_init(struct drm_device *dev)
case 0x50:
nv_wr32(dev, 0x100c90, 0x0707ff);
break;
+ case 0xa3:
case 0xa5:
case 0xa8:
nv_wr32(dev, 0x100c90, 0x0d0fff);
@@ -36,3 +37,42 @@ void
nv50_fb_takedown(struct drm_device *dev)
{
}
+
+void
+nv50_fb_vm_trap(struct drm_device *dev, int display, const char *name)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ u32 trap[6], idx, chinst;
+ int i, ch;
+
+ idx = nv_rd32(dev, 0x100c90);
+ if (!(idx & 0x80000000))
+ return;
+ idx &= 0x00ffffff;
+
+ for (i = 0; i < 6; i++) {
+ nv_wr32(dev, 0x100c90, idx | i << 24);
+ trap[i] = nv_rd32(dev, 0x100c94);
+ }
+ nv_wr32(dev, 0x100c90, idx | 0x80000000);
+
+ if (!display)
+ return;
+
+ chinst = (trap[2] << 16) | trap[1];
+ for (ch = 0; ch < dev_priv->engine.fifo.channels; ch++) {
+ struct nouveau_channel *chan = dev_priv->fifos[ch];
+
+ if (!chan || !chan->ramin)
+ continue;
+
+ if (chinst == chan->ramin->vinst >> 12)
+ break;
+ }
+
+ NV_INFO(dev, "%s - VM: Trapped %s at %02x%04x%04x status %08x "
+ "channel %d (0x%08x)\n",
+ name, (trap[5] & 0x100 ? "read" : "write"),
+ trap[5] & 0xff, trap[4] & 0xffff, trap[3] & 0xffff,
+ trap[0], ch, chinst);
+}
diff --git a/drivers/gpu/drm/nouveau/nv50_fbcon.c b/drivers/gpu/drm/nouveau/nv50_fbcon.c
index 6bf025c6fc6f..6dcf048eddbc 100644
--- a/drivers/gpu/drm/nouveau/nv50_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nv50_fbcon.c
@@ -1,6 +1,7 @@
#include "drmP.h"
#include "nouveau_drv.h"
#include "nouveau_dma.h"
+#include "nouveau_ramht.h"
#include "nouveau_fbcon.h"
void
@@ -193,7 +194,8 @@ nv50_fbcon_accel_init(struct fb_info *info)
if (ret)
return ret;
- ret = nouveau_gpuobj_ref_add(dev, dev_priv->channel, Nv2D, eng2d, NULL);
+ ret = nouveau_ramht_insert(dev_priv->channel, Nv2D, eng2d);
+ nouveau_gpuobj_ref(NULL, &eng2d);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/nouveau/nv50_fifo.c b/drivers/gpu/drm/nouveau/nv50_fifo.c
index fb0281ae8f90..1da65bd60c10 100644
--- a/drivers/gpu/drm/nouveau/nv50_fifo.c
+++ b/drivers/gpu/drm/nouveau/nv50_fifo.c
@@ -27,13 +27,14 @@
#include "drmP.h"
#include "drm.h"
#include "nouveau_drv.h"
+#include "nouveau_ramht.h"
static void
nv50_fifo_playlist_update(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
- struct nouveau_gpuobj_ref *cur;
+ struct nouveau_gpuobj *cur;
int i, nr;
NV_DEBUG(dev, "\n");
@@ -43,12 +44,14 @@ nv50_fifo_playlist_update(struct drm_device *dev)
/* We never schedule channel 0 or 127 */
for (i = 1, nr = 0; i < 127; i++) {
- if (dev_priv->fifos[i] && dev_priv->fifos[i]->ramfc)
- nv_wo32(dev, cur->gpuobj, nr++, i);
+ if (dev_priv->fifos[i] && dev_priv->fifos[i]->ramfc) {
+ nv_wo32(cur, (nr * 4), i);
+ nr++;
+ }
}
dev_priv->engine.instmem.flush(dev);
- nv_wr32(dev, 0x32f4, cur->instance >> 12);
+ nv_wr32(dev, 0x32f4, cur->vinst >> 12);
nv_wr32(dev, 0x32ec, nr);
nv_wr32(dev, 0x2500, 0x101);
}
@@ -63,9 +66,9 @@ nv50_fifo_channel_enable(struct drm_device *dev, int channel)
NV_DEBUG(dev, "ch%d\n", channel);
if (dev_priv->chipset == 0x50)
- inst = chan->ramfc->instance >> 12;
+ inst = chan->ramfc->vinst >> 12;
else
- inst = chan->ramfc->instance >> 8;
+ inst = chan->ramfc->vinst >> 8;
nv_wr32(dev, NV50_PFIFO_CTX_TABLE(channel), inst |
NV50_PFIFO_CTX_TABLE_CHANNEL_ENABLED);
@@ -163,19 +166,19 @@ nv50_fifo_init(struct drm_device *dev)
goto just_reset;
}
- ret = nouveau_gpuobj_new_ref(dev, NULL, NULL, 0, 128*4, 0x1000,
- NVOBJ_FLAG_ZERO_ALLOC,
- &pfifo->playlist[0]);
+ ret = nouveau_gpuobj_new(dev, NULL, 128*4, 0x1000,
+ NVOBJ_FLAG_ZERO_ALLOC,
+ &pfifo->playlist[0]);
if (ret) {
NV_ERROR(dev, "error creating playlist 0: %d\n", ret);
return ret;
}
- ret = nouveau_gpuobj_new_ref(dev, NULL, NULL, 0, 128*4, 0x1000,
- NVOBJ_FLAG_ZERO_ALLOC,
- &pfifo->playlist[1]);
+ ret = nouveau_gpuobj_new(dev, NULL, 128*4, 0x1000,
+ NVOBJ_FLAG_ZERO_ALLOC,
+ &pfifo->playlist[1]);
if (ret) {
- nouveau_gpuobj_ref_del(dev, &pfifo->playlist[0]);
+ nouveau_gpuobj_ref(NULL, &pfifo->playlist[0]);
NV_ERROR(dev, "error creating playlist 1: %d\n", ret);
return ret;
}
@@ -203,8 +206,8 @@ nv50_fifo_takedown(struct drm_device *dev)
if (!pfifo->playlist[0])
return;
- nouveau_gpuobj_ref_del(dev, &pfifo->playlist[0]);
- nouveau_gpuobj_ref_del(dev, &pfifo->playlist[1]);
+ nouveau_gpuobj_ref(NULL, &pfifo->playlist[0]);
+ nouveau_gpuobj_ref(NULL, &pfifo->playlist[1]);
}
int
@@ -226,59 +229,54 @@ nv50_fifo_create_context(struct nouveau_channel *chan)
NV_DEBUG(dev, "ch%d\n", chan->id);
if (dev_priv->chipset == 0x50) {
- uint32_t ramin_poffset = chan->ramin->gpuobj->im_pramin->start;
- uint32_t ramin_voffset = chan->ramin->gpuobj->im_backing_start;
-
- ret = nouveau_gpuobj_new_fake(dev, ramin_poffset, ramin_voffset,
- 0x100, NVOBJ_FLAG_ZERO_ALLOC |
- NVOBJ_FLAG_ZERO_FREE, &ramfc,
+ ret = nouveau_gpuobj_new_fake(dev, chan->ramin->pinst,
+ chan->ramin->vinst, 0x100,
+ NVOBJ_FLAG_ZERO_ALLOC |
+ NVOBJ_FLAG_ZERO_FREE,
&chan->ramfc);
if (ret)
return ret;
- ret = nouveau_gpuobj_new_fake(dev, ramin_poffset + 0x0400,
- ramin_voffset + 0x0400, 4096,
- 0, NULL, &chan->cache);
+ ret = nouveau_gpuobj_new_fake(dev, chan->ramin->pinst + 0x0400,
+ chan->ramin->vinst + 0x0400,
+ 4096, 0, &chan->cache);
if (ret)
return ret;
} else {
- ret = nouveau_gpuobj_new_ref(dev, chan, NULL, 0, 0x100, 256,
- NVOBJ_FLAG_ZERO_ALLOC |
- NVOBJ_FLAG_ZERO_FREE,
- &chan->ramfc);
+ ret = nouveau_gpuobj_new(dev, chan, 0x100, 256,
+ NVOBJ_FLAG_ZERO_ALLOC |
+ NVOBJ_FLAG_ZERO_FREE, &chan->ramfc);
if (ret)
return ret;
- ramfc = chan->ramfc->gpuobj;
- ret = nouveau_gpuobj_new_ref(dev, chan, NULL, 0, 4096, 1024,
- 0, &chan->cache);
+ ret = nouveau_gpuobj_new(dev, chan, 4096, 1024,
+ 0, &chan->cache);
if (ret)
return ret;
}
+ ramfc = chan->ramfc;
spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
- nv_wo32(dev, ramfc, 0x48/4, chan->pushbuf->instance >> 4);
- nv_wo32(dev, ramfc, 0x80/4, (0 << 27) /* 4KiB */ |
- (4 << 24) /* SEARCH_FULL */ |
- (chan->ramht->instance >> 4));
- nv_wo32(dev, ramfc, 0x44/4, 0x2101ffff);
- nv_wo32(dev, ramfc, 0x60/4, 0x7fffffff);
- nv_wo32(dev, ramfc, 0x40/4, 0x00000000);
- nv_wo32(dev, ramfc, 0x7c/4, 0x30000001);
- nv_wo32(dev, ramfc, 0x78/4, 0x00000000);
- nv_wo32(dev, ramfc, 0x3c/4, 0x403f6078);
- nv_wo32(dev, ramfc, 0x50/4, chan->pushbuf_base +
- chan->dma.ib_base * 4);
- nv_wo32(dev, ramfc, 0x54/4, drm_order(chan->dma.ib_max + 1) << 16);
+ nv_wo32(ramfc, 0x48, chan->pushbuf->cinst >> 4);
+ nv_wo32(ramfc, 0x80, ((chan->ramht->bits - 9) << 27) |
+ (4 << 24) /* SEARCH_FULL */ |
+ (chan->ramht->gpuobj->cinst >> 4));
+ nv_wo32(ramfc, 0x44, 0x2101ffff);
+ nv_wo32(ramfc, 0x60, 0x7fffffff);
+ nv_wo32(ramfc, 0x40, 0x00000000);
+ nv_wo32(ramfc, 0x7c, 0x30000001);
+ nv_wo32(ramfc, 0x78, 0x00000000);
+ nv_wo32(ramfc, 0x3c, 0x403f6078);
+ nv_wo32(ramfc, 0x50, chan->pushbuf_base + chan->dma.ib_base * 4);
+ nv_wo32(ramfc, 0x54, drm_order(chan->dma.ib_max + 1) << 16);
if (dev_priv->chipset != 0x50) {
- nv_wo32(dev, chan->ramin->gpuobj, 0, chan->id);
- nv_wo32(dev, chan->ramin->gpuobj, 1,
- chan->ramfc->instance >> 8);
+ nv_wo32(chan->ramin, 0, chan->id);
+ nv_wo32(chan->ramin, 4, chan->ramfc->vinst >> 8);
- nv_wo32(dev, ramfc, 0x88/4, chan->cache->instance >> 10);
- nv_wo32(dev, ramfc, 0x98/4, chan->ramin->instance >> 12);
+ nv_wo32(ramfc, 0x88, chan->cache->vinst >> 10);
+ nv_wo32(ramfc, 0x98, chan->ramin->vinst >> 12);
}
dev_priv->engine.instmem.flush(dev);
@@ -293,12 +291,13 @@ void
nv50_fifo_destroy_context(struct nouveau_channel *chan)
{
struct drm_device *dev = chan->dev;
- struct nouveau_gpuobj_ref *ramfc = chan->ramfc;
+ struct nouveau_gpuobj *ramfc = NULL;
NV_DEBUG(dev, "ch%d\n", chan->id);
/* This will ensure the channel is seen as disabled. */
- chan->ramfc = NULL;
+ nouveau_gpuobj_ref(chan->ramfc, &ramfc);
+ nouveau_gpuobj_ref(NULL, &chan->ramfc);
nv50_fifo_channel_disable(dev, chan->id);
/* Dummy channel, also used on ch 127 */
@@ -306,8 +305,8 @@ nv50_fifo_destroy_context(struct nouveau_channel *chan)
nv50_fifo_channel_disable(dev, 127);
nv50_fifo_playlist_update(dev);
- nouveau_gpuobj_ref_del(dev, &ramfc);
- nouveau_gpuobj_ref_del(dev, &chan->cache);
+ nouveau_gpuobj_ref(NULL, &ramfc);
+ nouveau_gpuobj_ref(NULL, &chan->cache);
}
int
@@ -315,63 +314,63 @@ nv50_fifo_load_context(struct nouveau_channel *chan)
{
struct drm_device *dev = chan->dev;
struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_gpuobj *ramfc = chan->ramfc->gpuobj;
- struct nouveau_gpuobj *cache = chan->cache->gpuobj;
+ struct nouveau_gpuobj *ramfc = chan->ramfc;
+ struct nouveau_gpuobj *cache = chan->cache;
int ptr, cnt;
NV_DEBUG(dev, "ch%d\n", chan->id);
- nv_wr32(dev, 0x3330, nv_ro32(dev, ramfc, 0x00/4));
- nv_wr32(dev, 0x3334, nv_ro32(dev, ramfc, 0x04/4));
- nv_wr32(dev, 0x3240, nv_ro32(dev, ramfc, 0x08/4));
- nv_wr32(dev, 0x3320, nv_ro32(dev, ramfc, 0x0c/4));
- nv_wr32(dev, 0x3244, nv_ro32(dev, ramfc, 0x10/4));
- nv_wr32(dev, 0x3328, nv_ro32(dev, ramfc, 0x14/4));
- nv_wr32(dev, 0x3368, nv_ro32(dev, ramfc, 0x18/4));
- nv_wr32(dev, 0x336c, nv_ro32(dev, ramfc, 0x1c/4));
- nv_wr32(dev, 0x3370, nv_ro32(dev, ramfc, 0x20/4));
- nv_wr32(dev, 0x3374, nv_ro32(dev, ramfc, 0x24/4));
- nv_wr32(dev, 0x3378, nv_ro32(dev, ramfc, 0x28/4));
- nv_wr32(dev, 0x337c, nv_ro32(dev, ramfc, 0x2c/4));
- nv_wr32(dev, 0x3228, nv_ro32(dev, ramfc, 0x30/4));
- nv_wr32(dev, 0x3364, nv_ro32(dev, ramfc, 0x34/4));
- nv_wr32(dev, 0x32a0, nv_ro32(dev, ramfc, 0x38/4));
- nv_wr32(dev, 0x3224, nv_ro32(dev, ramfc, 0x3c/4));
- nv_wr32(dev, 0x324c, nv_ro32(dev, ramfc, 0x40/4));
- nv_wr32(dev, 0x2044, nv_ro32(dev, ramfc, 0x44/4));
- nv_wr32(dev, 0x322c, nv_ro32(dev, ramfc, 0x48/4));
- nv_wr32(dev, 0x3234, nv_ro32(dev, ramfc, 0x4c/4));
- nv_wr32(dev, 0x3340, nv_ro32(dev, ramfc, 0x50/4));
- nv_wr32(dev, 0x3344, nv_ro32(dev, ramfc, 0x54/4));
- nv_wr32(dev, 0x3280, nv_ro32(dev, ramfc, 0x58/4));
- nv_wr32(dev, 0x3254, nv_ro32(dev, ramfc, 0x5c/4));
- nv_wr32(dev, 0x3260, nv_ro32(dev, ramfc, 0x60/4));
- nv_wr32(dev, 0x3264, nv_ro32(dev, ramfc, 0x64/4));
- nv_wr32(dev, 0x3268, nv_ro32(dev, ramfc, 0x68/4));
- nv_wr32(dev, 0x326c, nv_ro32(dev, ramfc, 0x6c/4));
- nv_wr32(dev, 0x32e4, nv_ro32(dev, ramfc, 0x70/4));
- nv_wr32(dev, 0x3248, nv_ro32(dev, ramfc, 0x74/4));
- nv_wr32(dev, 0x2088, nv_ro32(dev, ramfc, 0x78/4));
- nv_wr32(dev, 0x2058, nv_ro32(dev, ramfc, 0x7c/4));
- nv_wr32(dev, 0x2210, nv_ro32(dev, ramfc, 0x80/4));
-
- cnt = nv_ro32(dev, ramfc, 0x84/4);
+ nv_wr32(dev, 0x3330, nv_ro32(ramfc, 0x00));
+ nv_wr32(dev, 0x3334, nv_ro32(ramfc, 0x04));
+ nv_wr32(dev, 0x3240, nv_ro32(ramfc, 0x08));
+ nv_wr32(dev, 0x3320, nv_ro32(ramfc, 0x0c));
+ nv_wr32(dev, 0x3244, nv_ro32(ramfc, 0x10));
+ nv_wr32(dev, 0x3328, nv_ro32(ramfc, 0x14));
+ nv_wr32(dev, 0x3368, nv_ro32(ramfc, 0x18));
+ nv_wr32(dev, 0x336c, nv_ro32(ramfc, 0x1c));
+ nv_wr32(dev, 0x3370, nv_ro32(ramfc, 0x20));
+ nv_wr32(dev, 0x3374, nv_ro32(ramfc, 0x24));
+ nv_wr32(dev, 0x3378, nv_ro32(ramfc, 0x28));
+ nv_wr32(dev, 0x337c, nv_ro32(ramfc, 0x2c));
+ nv_wr32(dev, 0x3228, nv_ro32(ramfc, 0x30));
+ nv_wr32(dev, 0x3364, nv_ro32(ramfc, 0x34));
+ nv_wr32(dev, 0x32a0, nv_ro32(ramfc, 0x38));
+ nv_wr32(dev, 0x3224, nv_ro32(ramfc, 0x3c));
+ nv_wr32(dev, 0x324c, nv_ro32(ramfc, 0x40));
+ nv_wr32(dev, 0x2044, nv_ro32(ramfc, 0x44));
+ nv_wr32(dev, 0x322c, nv_ro32(ramfc, 0x48));
+ nv_wr32(dev, 0x3234, nv_ro32(ramfc, 0x4c));
+ nv_wr32(dev, 0x3340, nv_ro32(ramfc, 0x50));
+ nv_wr32(dev, 0x3344, nv_ro32(ramfc, 0x54));
+ nv_wr32(dev, 0x3280, nv_ro32(ramfc, 0x58));
+ nv_wr32(dev, 0x3254, nv_ro32(ramfc, 0x5c));
+ nv_wr32(dev, 0x3260, nv_ro32(ramfc, 0x60));
+ nv_wr32(dev, 0x3264, nv_ro32(ramfc, 0x64));
+ nv_wr32(dev, 0x3268, nv_ro32(ramfc, 0x68));
+ nv_wr32(dev, 0x326c, nv_ro32(ramfc, 0x6c));
+ nv_wr32(dev, 0x32e4, nv_ro32(ramfc, 0x70));
+ nv_wr32(dev, 0x3248, nv_ro32(ramfc, 0x74));
+ nv_wr32(dev, 0x2088, nv_ro32(ramfc, 0x78));
+ nv_wr32(dev, 0x2058, nv_ro32(ramfc, 0x7c));
+ nv_wr32(dev, 0x2210, nv_ro32(ramfc, 0x80));
+
+ cnt = nv_ro32(ramfc, 0x84);
for (ptr = 0; ptr < cnt; ptr++) {
nv_wr32(dev, NV40_PFIFO_CACHE1_METHOD(ptr),
- nv_ro32(dev, cache, (ptr * 2) + 0));
+ nv_ro32(cache, (ptr * 8) + 0));
nv_wr32(dev, NV40_PFIFO_CACHE1_DATA(ptr),
- nv_ro32(dev, cache, (ptr * 2) + 1));
+ nv_ro32(cache, (ptr * 8) + 4));
}
nv_wr32(dev, NV03_PFIFO_CACHE1_PUT, cnt << 2);
nv_wr32(dev, NV03_PFIFO_CACHE1_GET, 0);
/* guessing that all the 0x34xx regs aren't on NV50 */
if (dev_priv->chipset != 0x50) {
- nv_wr32(dev, 0x340c, nv_ro32(dev, ramfc, 0x88/4));
- nv_wr32(dev, 0x3400, nv_ro32(dev, ramfc, 0x8c/4));
- nv_wr32(dev, 0x3404, nv_ro32(dev, ramfc, 0x90/4));
- nv_wr32(dev, 0x3408, nv_ro32(dev, ramfc, 0x94/4));
- nv_wr32(dev, 0x3410, nv_ro32(dev, ramfc, 0x98/4));
+ nv_wr32(dev, 0x340c, nv_ro32(ramfc, 0x88));
+ nv_wr32(dev, 0x3400, nv_ro32(ramfc, 0x8c));
+ nv_wr32(dev, 0x3404, nv_ro32(ramfc, 0x90));
+ nv_wr32(dev, 0x3408, nv_ro32(ramfc, 0x94));
+ nv_wr32(dev, 0x3410, nv_ro32(ramfc, 0x98));
}
nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH1, chan->id | (1<<16));
@@ -399,62 +398,63 @@ nv50_fifo_unload_context(struct drm_device *dev)
return -EINVAL;
}
NV_DEBUG(dev, "ch%d\n", chan->id);
- ramfc = chan->ramfc->gpuobj;
- cache = chan->cache->gpuobj;
-
- nv_wo32(dev, ramfc, 0x00/4, nv_rd32(dev, 0x3330));
- nv_wo32(dev, ramfc, 0x04/4, nv_rd32(dev, 0x3334));
- nv_wo32(dev, ramfc, 0x08/4, nv_rd32(dev, 0x3240));
- nv_wo32(dev, ramfc, 0x0c/4, nv_rd32(dev, 0x3320));
- nv_wo32(dev, ramfc, 0x10/4, nv_rd32(dev, 0x3244));
- nv_wo32(dev, ramfc, 0x14/4, nv_rd32(dev, 0x3328));
- nv_wo32(dev, ramfc, 0x18/4, nv_rd32(dev, 0x3368));
- nv_wo32(dev, ramfc, 0x1c/4, nv_rd32(dev, 0x336c));
- nv_wo32(dev, ramfc, 0x20/4, nv_rd32(dev, 0x3370));
- nv_wo32(dev, ramfc, 0x24/4, nv_rd32(dev, 0x3374));
- nv_wo32(dev, ramfc, 0x28/4, nv_rd32(dev, 0x3378));
- nv_wo32(dev, ramfc, 0x2c/4, nv_rd32(dev, 0x337c));
- nv_wo32(dev, ramfc, 0x30/4, nv_rd32(dev, 0x3228));
- nv_wo32(dev, ramfc, 0x34/4, nv_rd32(dev, 0x3364));
- nv_wo32(dev, ramfc, 0x38/4, nv_rd32(dev, 0x32a0));
- nv_wo32(dev, ramfc, 0x3c/4, nv_rd32(dev, 0x3224));
- nv_wo32(dev, ramfc, 0x40/4, nv_rd32(dev, 0x324c));
- nv_wo32(dev, ramfc, 0x44/4, nv_rd32(dev, 0x2044));
- nv_wo32(dev, ramfc, 0x48/4, nv_rd32(dev, 0x322c));
- nv_wo32(dev, ramfc, 0x4c/4, nv_rd32(dev, 0x3234));
- nv_wo32(dev, ramfc, 0x50/4, nv_rd32(dev, 0x3340));
- nv_wo32(dev, ramfc, 0x54/4, nv_rd32(dev, 0x3344));
- nv_wo32(dev, ramfc, 0x58/4, nv_rd32(dev, 0x3280));
- nv_wo32(dev, ramfc, 0x5c/4, nv_rd32(dev, 0x3254));
- nv_wo32(dev, ramfc, 0x60/4, nv_rd32(dev, 0x3260));
- nv_wo32(dev, ramfc, 0x64/4, nv_rd32(dev, 0x3264));
- nv_wo32(dev, ramfc, 0x68/4, nv_rd32(dev, 0x3268));
- nv_wo32(dev, ramfc, 0x6c/4, nv_rd32(dev, 0x326c));
- nv_wo32(dev, ramfc, 0x70/4, nv_rd32(dev, 0x32e4));
- nv_wo32(dev, ramfc, 0x74/4, nv_rd32(dev, 0x3248));
- nv_wo32(dev, ramfc, 0x78/4, nv_rd32(dev, 0x2088));
- nv_wo32(dev, ramfc, 0x7c/4, nv_rd32(dev, 0x2058));
- nv_wo32(dev, ramfc, 0x80/4, nv_rd32(dev, 0x2210));
+ ramfc = chan->ramfc;
+ cache = chan->cache;
+
+ nv_wo32(ramfc, 0x00, nv_rd32(dev, 0x3330));
+ nv_wo32(ramfc, 0x04, nv_rd32(dev, 0x3334));
+ nv_wo32(ramfc, 0x08, nv_rd32(dev, 0x3240));
+ nv_wo32(ramfc, 0x0c, nv_rd32(dev, 0x3320));
+ nv_wo32(ramfc, 0x10, nv_rd32(dev, 0x3244));
+ nv_wo32(ramfc, 0x14, nv_rd32(dev, 0x3328));
+ nv_wo32(ramfc, 0x18, nv_rd32(dev, 0x3368));
+ nv_wo32(ramfc, 0x1c, nv_rd32(dev, 0x336c));
+ nv_wo32(ramfc, 0x20, nv_rd32(dev, 0x3370));
+ nv_wo32(ramfc, 0x24, nv_rd32(dev, 0x3374));
+ nv_wo32(ramfc, 0x28, nv_rd32(dev, 0x3378));
+ nv_wo32(ramfc, 0x2c, nv_rd32(dev, 0x337c));
+ nv_wo32(ramfc, 0x30, nv_rd32(dev, 0x3228));
+ nv_wo32(ramfc, 0x34, nv_rd32(dev, 0x3364));
+ nv_wo32(ramfc, 0x38, nv_rd32(dev, 0x32a0));
+ nv_wo32(ramfc, 0x3c, nv_rd32(dev, 0x3224));
+ nv_wo32(ramfc, 0x40, nv_rd32(dev, 0x324c));
+ nv_wo32(ramfc, 0x44, nv_rd32(dev, 0x2044));
+ nv_wo32(ramfc, 0x48, nv_rd32(dev, 0x322c));
+ nv_wo32(ramfc, 0x4c, nv_rd32(dev, 0x3234));
+ nv_wo32(ramfc, 0x50, nv_rd32(dev, 0x3340));
+ nv_wo32(ramfc, 0x54, nv_rd32(dev, 0x3344));
+ nv_wo32(ramfc, 0x58, nv_rd32(dev, 0x3280));
+ nv_wo32(ramfc, 0x5c, nv_rd32(dev, 0x3254));
+ nv_wo32(ramfc, 0x60, nv_rd32(dev, 0x3260));
+ nv_wo32(ramfc, 0x64, nv_rd32(dev, 0x3264));
+ nv_wo32(ramfc, 0x68, nv_rd32(dev, 0x3268));
+ nv_wo32(ramfc, 0x6c, nv_rd32(dev, 0x326c));
+ nv_wo32(ramfc, 0x70, nv_rd32(dev, 0x32e4));
+ nv_wo32(ramfc, 0x74, nv_rd32(dev, 0x3248));
+ nv_wo32(ramfc, 0x78, nv_rd32(dev, 0x2088));
+ nv_wo32(ramfc, 0x7c, nv_rd32(dev, 0x2058));
+ nv_wo32(ramfc, 0x80, nv_rd32(dev, 0x2210));
put = (nv_rd32(dev, NV03_PFIFO_CACHE1_PUT) & 0x7ff) >> 2;
get = (nv_rd32(dev, NV03_PFIFO_CACHE1_GET) & 0x7ff) >> 2;
ptr = 0;
while (put != get) {
- nv_wo32(dev, cache, ptr++,
- nv_rd32(dev, NV40_PFIFO_CACHE1_METHOD(get)));
- nv_wo32(dev, cache, ptr++,
- nv_rd32(dev, NV40_PFIFO_CACHE1_DATA(get)));
+ nv_wo32(cache, ptr + 0,
+ nv_rd32(dev, NV40_PFIFO_CACHE1_METHOD(get)));
+ nv_wo32(cache, ptr + 4,
+ nv_rd32(dev, NV40_PFIFO_CACHE1_DATA(get)));
get = (get + 1) & 0x1ff;
+ ptr += 8;
}
/* guessing that all the 0x34xx regs aren't on NV50 */
if (dev_priv->chipset != 0x50) {
- nv_wo32(dev, ramfc, 0x84/4, ptr >> 1);
- nv_wo32(dev, ramfc, 0x88/4, nv_rd32(dev, 0x340c));
- nv_wo32(dev, ramfc, 0x8c/4, nv_rd32(dev, 0x3400));
- nv_wo32(dev, ramfc, 0x90/4, nv_rd32(dev, 0x3404));
- nv_wo32(dev, ramfc, 0x94/4, nv_rd32(dev, 0x3408));
- nv_wo32(dev, ramfc, 0x98/4, nv_rd32(dev, 0x3410));
+ nv_wo32(ramfc, 0x84, ptr >> 3);
+ nv_wo32(ramfc, 0x88, nv_rd32(dev, 0x340c));
+ nv_wo32(ramfc, 0x8c, nv_rd32(dev, 0x3400));
+ nv_wo32(ramfc, 0x90, nv_rd32(dev, 0x3404));
+ nv_wo32(ramfc, 0x94, nv_rd32(dev, 0x3408));
+ nv_wo32(ramfc, 0x98, nv_rd32(dev, 0x3410));
}
dev_priv->engine.instmem.flush(dev);
@@ -464,3 +464,8 @@ nv50_fifo_unload_context(struct drm_device *dev)
return 0;
}
+void
+nv50_fifo_tlb_flush(struct drm_device *dev)
+{
+ nv50_vm_flush(dev, 5);
+}
diff --git a/drivers/gpu/drm/nouveau/nv50_graph.c b/drivers/gpu/drm/nouveau/nv50_graph.c
index 1413028e1580..8b669d0af610 100644
--- a/drivers/gpu/drm/nouveau/nv50_graph.c
+++ b/drivers/gpu/drm/nouveau/nv50_graph.c
@@ -27,7 +27,7 @@
#include "drmP.h"
#include "drm.h"
#include "nouveau_drv.h"
-
+#include "nouveau_ramht.h"
#include "nouveau_grctx.h"
static void
@@ -181,7 +181,7 @@ nv50_graph_channel(struct drm_device *dev)
/* Be sure we're not in the middle of a context switch or bad things
* will happen, such as unloading the wrong pgraph context.
*/
- if (!nv_wait(0x400300, 0x00000001, 0x00000000))
+ if (!nv_wait(dev, 0x400300, 0x00000001, 0x00000000))
NV_ERROR(dev, "Ctxprog is still running\n");
inst = nv_rd32(dev, NV50_PGRAPH_CTXCTL_CUR);
@@ -192,7 +192,7 @@ nv50_graph_channel(struct drm_device *dev)
for (i = 0; i < dev_priv->engine.fifo.channels; i++) {
struct nouveau_channel *chan = dev_priv->fifos[i];
- if (chan && chan->ramin && chan->ramin->instance == inst)
+ if (chan && chan->ramin && chan->ramin->vinst == inst)
return chan;
}
@@ -204,36 +204,34 @@ nv50_graph_create_context(struct nouveau_channel *chan)
{
struct drm_device *dev = chan->dev;
struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_gpuobj *ramin = chan->ramin->gpuobj;
- struct nouveau_gpuobj *obj;
+ struct nouveau_gpuobj *ramin = chan->ramin;
struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
struct nouveau_grctx ctx = {};
int hdr, ret;
NV_DEBUG(dev, "ch%d\n", chan->id);
- ret = nouveau_gpuobj_new_ref(dev, chan, NULL, 0, pgraph->grctx_size,
- 0x1000, NVOBJ_FLAG_ZERO_ALLOC |
- NVOBJ_FLAG_ZERO_FREE, &chan->ramin_grctx);
+ ret = nouveau_gpuobj_new(dev, chan, pgraph->grctx_size, 0x1000,
+ NVOBJ_FLAG_ZERO_ALLOC |
+ NVOBJ_FLAG_ZERO_FREE, &chan->ramin_grctx);
if (ret)
return ret;
- obj = chan->ramin_grctx->gpuobj;
hdr = (dev_priv->chipset == 0x50) ? 0x200 : 0x20;
- nv_wo32(dev, ramin, (hdr + 0x00)/4, 0x00190002);
- nv_wo32(dev, ramin, (hdr + 0x04)/4, chan->ramin_grctx->instance +
- pgraph->grctx_size - 1);
- nv_wo32(dev, ramin, (hdr + 0x08)/4, chan->ramin_grctx->instance);
- nv_wo32(dev, ramin, (hdr + 0x0c)/4, 0);
- nv_wo32(dev, ramin, (hdr + 0x10)/4, 0);
- nv_wo32(dev, ramin, (hdr + 0x14)/4, 0x00010000);
+ nv_wo32(ramin, hdr + 0x00, 0x00190002);
+ nv_wo32(ramin, hdr + 0x04, chan->ramin_grctx->vinst +
+ pgraph->grctx_size - 1);
+ nv_wo32(ramin, hdr + 0x08, chan->ramin_grctx->vinst);
+ nv_wo32(ramin, hdr + 0x0c, 0);
+ nv_wo32(ramin, hdr + 0x10, 0);
+ nv_wo32(ramin, hdr + 0x14, 0x00010000);
ctx.dev = chan->dev;
ctx.mode = NOUVEAU_GRCTX_VALS;
- ctx.data = obj;
+ ctx.data = chan->ramin_grctx;
nv50_grctx_init(&ctx);
- nv_wo32(dev, obj, 0x00000/4, chan->ramin->instance >> 12);
+ nv_wo32(chan->ramin_grctx, 0x00000, chan->ramin->vinst >> 12);
dev_priv->engine.instmem.flush(dev);
return 0;
@@ -248,14 +246,14 @@ nv50_graph_destroy_context(struct nouveau_channel *chan)
NV_DEBUG(dev, "ch%d\n", chan->id);
- if (!chan->ramin || !chan->ramin->gpuobj)
+ if (!chan->ramin)
return;
for (i = hdr; i < hdr + 24; i += 4)
- nv_wo32(dev, chan->ramin->gpuobj, i/4, 0);
+ nv_wo32(chan->ramin, i, 0);
dev_priv->engine.instmem.flush(dev);
- nouveau_gpuobj_ref_del(dev, &chan->ramin_grctx);
+ nouveau_gpuobj_ref(NULL, &chan->ramin_grctx);
}
static int
@@ -282,7 +280,7 @@ nv50_graph_do_load_context(struct drm_device *dev, uint32_t inst)
int
nv50_graph_load_context(struct nouveau_channel *chan)
{
- uint32_t inst = chan->ramin->instance >> 12;
+ uint32_t inst = chan->ramin->vinst >> 12;
NV_DEBUG(chan->dev, "ch%d\n", chan->id);
return nv50_graph_do_load_context(chan->dev, inst);
@@ -327,15 +325,16 @@ static int
nv50_graph_nvsw_dma_vblsem(struct nouveau_channel *chan, int grclass,
int mthd, uint32_t data)
{
- struct nouveau_gpuobj_ref *ref = NULL;
+ struct nouveau_gpuobj *gpuobj;
- if (nouveau_gpuobj_ref_find(chan, data, &ref))
+ gpuobj = nouveau_ramht_find(chan, data);
+ if (!gpuobj)
return -ENOENT;
- if (nouveau_notifier_offset(ref->gpuobj, NULL))
+ if (nouveau_notifier_offset(gpuobj, NULL))
return -EINVAL;
- chan->nvsw.vblsem = ref->gpuobj;
+ chan->nvsw.vblsem = gpuobj;
chan->nvsw.vblsem_offset = ~0;
return 0;
}
@@ -403,3 +402,55 @@ struct nouveau_pgraph_object_class nv50_graph_grclass[] = {
{ 0x8597, false, NULL }, /* tesla (nva3, nva5, nva8) */
{}
};
+
+void
+nv50_graph_tlb_flush(struct drm_device *dev)
+{
+ nv50_vm_flush(dev, 0);
+}
+
+void
+nv86_graph_tlb_flush(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_timer_engine *ptimer = &dev_priv->engine.timer;
+ bool idle, timeout = false;
+ unsigned long flags;
+ u64 start;
+ u32 tmp;
+
+ spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
+ nv_mask(dev, 0x400500, 0x00000001, 0x00000000);
+
+ start = ptimer->read(dev);
+ do {
+ idle = true;
+
+ for (tmp = nv_rd32(dev, 0x400380); tmp && idle; tmp >>= 3) {
+ if ((tmp & 7) == 1)
+ idle = false;
+ }
+
+ for (tmp = nv_rd32(dev, 0x400384); tmp && idle; tmp >>= 3) {
+ if ((tmp & 7) == 1)
+ idle = false;
+ }
+
+ for (tmp = nv_rd32(dev, 0x400388); tmp && idle; tmp >>= 3) {
+ if ((tmp & 7) == 1)
+ idle = false;
+ }
+ } while (!idle && !(timeout = ptimer->read(dev) - start > 2000000000));
+
+ if (timeout) {
+ NV_ERROR(dev, "PGRAPH TLB flush idle timeout fail: "
+ "0x%08x 0x%08x 0x%08x 0x%08x\n",
+ nv_rd32(dev, 0x400700), nv_rd32(dev, 0x400380),
+ nv_rd32(dev, 0x400384), nv_rd32(dev, 0x400388));
+ }
+
+ nv50_vm_flush(dev, 0);
+
+ nv_mask(dev, 0x400500, 0x00000001, 0x00000001);
+ spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
+}
diff --git a/drivers/gpu/drm/nouveau/nv50_grctx.c b/drivers/gpu/drm/nouveau/nv50_grctx.c
index 42a8fb20c1e6..336aab2a24a6 100644
--- a/drivers/gpu/drm/nouveau/nv50_grctx.c
+++ b/drivers/gpu/drm/nouveau/nv50_grctx.c
@@ -103,6 +103,9 @@
#include "nouveau_drv.h"
#include "nouveau_grctx.h"
+#define IS_NVA3F(x) (((x) > 0xa0 && (x) < 0xaa) || (x) == 0xaf)
+#define IS_NVAAF(x) ((x) >= 0xaa && (x) <= 0xac)
+
/*
* This code deals with PGRAPH contexts on NV50 family cards. Like NV40, it's
* the GPU itself that does context-switching, but it needs a special
@@ -182,6 +185,7 @@ nv50_grctx_init(struct nouveau_grctx *ctx)
case 0xa8:
case 0xaa:
case 0xac:
+ case 0xaf:
break;
default:
NV_ERROR(ctx->dev, "I don't know how to make a ctxprog for "
@@ -268,6 +272,9 @@ nv50_grctx_init(struct nouveau_grctx *ctx)
*/
static void
+nv50_graph_construct_mmio_ddata(struct nouveau_grctx *ctx);
+
+static void
nv50_graph_construct_mmio(struct nouveau_grctx *ctx)
{
struct drm_nouveau_private *dev_priv = ctx->dev->dev_private;
@@ -286,7 +293,7 @@ nv50_graph_construct_mmio(struct nouveau_grctx *ctx)
gr_def(ctx, 0x400840, 0xffe806a8);
}
gr_def(ctx, 0x400844, 0x00000002);
- if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa)
+ if (IS_NVA3F(dev_priv->chipset))
gr_def(ctx, 0x400894, 0x00001000);
gr_def(ctx, 0x4008e8, 0x00000003);
gr_def(ctx, 0x4008ec, 0x00001000);
@@ -299,13 +306,15 @@ nv50_graph_construct_mmio(struct nouveau_grctx *ctx)
if (dev_priv->chipset >= 0xa0)
cp_ctx(ctx, 0x400b00, 0x1);
- if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa) {
+ if (IS_NVA3F(dev_priv->chipset)) {
cp_ctx(ctx, 0x400b10, 0x1);
gr_def(ctx, 0x400b10, 0x0001629d);
cp_ctx(ctx, 0x400b20, 0x1);
gr_def(ctx, 0x400b20, 0x0001629d);
}
+ nv50_graph_construct_mmio_ddata(ctx);
+
/* 0C00: VFETCH */
cp_ctx(ctx, 0x400c08, 0x2);
gr_def(ctx, 0x400c08, 0x0000fe0c);
@@ -314,7 +323,7 @@ nv50_graph_construct_mmio(struct nouveau_grctx *ctx)
if (dev_priv->chipset < 0xa0) {
cp_ctx(ctx, 0x401008, 0x4);
gr_def(ctx, 0x401014, 0x00001000);
- } else if (dev_priv->chipset == 0xa0 || dev_priv->chipset >= 0xaa) {
+ } else if (!IS_NVA3F(dev_priv->chipset)) {
cp_ctx(ctx, 0x401008, 0x5);
gr_def(ctx, 0x401018, 0x00001000);
} else {
@@ -368,10 +377,13 @@ nv50_graph_construct_mmio(struct nouveau_grctx *ctx)
case 0xa3:
case 0xa5:
case 0xa8:
+ case 0xaf:
gr_def(ctx, 0x401c00, 0x142500df);
break;
}
+ /* 2000 */
+
/* 2400 */
cp_ctx(ctx, 0x402400, 0x1);
if (dev_priv->chipset == 0x50)
@@ -380,12 +392,12 @@ nv50_graph_construct_mmio(struct nouveau_grctx *ctx)
cp_ctx(ctx, 0x402408, 0x2);
gr_def(ctx, 0x402408, 0x00000600);
- /* 2800 */
+ /* 2800: CSCHED */
cp_ctx(ctx, 0x402800, 0x1);
if (dev_priv->chipset == 0x50)
gr_def(ctx, 0x402800, 0x00000006);
- /* 2C00 */
+ /* 2C00: ZCULL */
cp_ctx(ctx, 0x402c08, 0x6);
if (dev_priv->chipset != 0x50)
gr_def(ctx, 0x402c14, 0x01000000);
@@ -396,23 +408,23 @@ nv50_graph_construct_mmio(struct nouveau_grctx *ctx)
cp_ctx(ctx, 0x402ca0, 0x2);
if (dev_priv->chipset < 0xa0)
gr_def(ctx, 0x402ca0, 0x00000400);
- else if (dev_priv->chipset == 0xa0 || dev_priv->chipset >= 0xaa)
+ else if (!IS_NVA3F(dev_priv->chipset))
gr_def(ctx, 0x402ca0, 0x00000800);
else
gr_def(ctx, 0x402ca0, 0x00000400);
cp_ctx(ctx, 0x402cac, 0x4);
- /* 3000 */
+ /* 3000: ENG2D */
cp_ctx(ctx, 0x403004, 0x1);
gr_def(ctx, 0x403004, 0x00000001);
- /* 3404 */
+ /* 3400 */
if (dev_priv->chipset >= 0xa0) {
cp_ctx(ctx, 0x403404, 0x1);
gr_def(ctx, 0x403404, 0x00000001);
}
- /* 5000 */
+ /* 5000: CCACHE */
cp_ctx(ctx, 0x405000, 0x1);
switch (dev_priv->chipset) {
case 0x50:
@@ -425,6 +437,7 @@ nv50_graph_construct_mmio(struct nouveau_grctx *ctx)
case 0xa8:
case 0xaa:
case 0xac:
+ case 0xaf:
gr_def(ctx, 0x405000, 0x000e0080);
break;
case 0x86:
@@ -441,210 +454,6 @@ nv50_graph_construct_mmio(struct nouveau_grctx *ctx)
cp_ctx(ctx, 0x405024, 0x1);
cp_ctx(ctx, 0x40502c, 0x1);
- /* 5400 or maybe 4800 */
- if (dev_priv->chipset == 0x50) {
- offset = 0x405400;
- cp_ctx(ctx, 0x405400, 0xea);
- } else if (dev_priv->chipset < 0x94) {
- offset = 0x405400;
- cp_ctx(ctx, 0x405400, 0xcb);
- } else if (dev_priv->chipset < 0xa0) {
- offset = 0x405400;
- cp_ctx(ctx, 0x405400, 0xcc);
- } else if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa) {
- offset = 0x404800;
- cp_ctx(ctx, 0x404800, 0xda);
- } else {
- offset = 0x405400;
- cp_ctx(ctx, 0x405400, 0xd4);
- }
- gr_def(ctx, offset + 0x0c, 0x00000002);
- gr_def(ctx, offset + 0x10, 0x00000001);
- if (dev_priv->chipset >= 0x94)
- offset += 4;
- gr_def(ctx, offset + 0x1c, 0x00000001);
- gr_def(ctx, offset + 0x20, 0x00000100);
- gr_def(ctx, offset + 0x38, 0x00000002);
- gr_def(ctx, offset + 0x3c, 0x00000001);
- gr_def(ctx, offset + 0x40, 0x00000001);
- gr_def(ctx, offset + 0x50, 0x00000001);
- gr_def(ctx, offset + 0x54, 0x003fffff);
- gr_def(ctx, offset + 0x58, 0x00001fff);
- gr_def(ctx, offset + 0x60, 0x00000001);
- gr_def(ctx, offset + 0x64, 0x00000001);
- gr_def(ctx, offset + 0x6c, 0x00000001);
- gr_def(ctx, offset + 0x70, 0x00000001);
- gr_def(ctx, offset + 0x74, 0x00000001);
- gr_def(ctx, offset + 0x78, 0x00000004);
- gr_def(ctx, offset + 0x7c, 0x00000001);
- if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa)
- offset += 4;
- gr_def(ctx, offset + 0x80, 0x00000001);
- gr_def(ctx, offset + 0x84, 0x00000001);
- gr_def(ctx, offset + 0x88, 0x00000007);
- gr_def(ctx, offset + 0x8c, 0x00000001);
- gr_def(ctx, offset + 0x90, 0x00000007);
- gr_def(ctx, offset + 0x94, 0x00000001);
- gr_def(ctx, offset + 0x98, 0x00000001);
- gr_def(ctx, offset + 0x9c, 0x00000001);
- if (dev_priv->chipset == 0x50) {
- gr_def(ctx, offset + 0xb0, 0x00000001);
- gr_def(ctx, offset + 0xb4, 0x00000001);
- gr_def(ctx, offset + 0xbc, 0x00000001);
- gr_def(ctx, offset + 0xc0, 0x0000000a);
- gr_def(ctx, offset + 0xd0, 0x00000040);
- gr_def(ctx, offset + 0xd8, 0x00000002);
- gr_def(ctx, offset + 0xdc, 0x00000100);
- gr_def(ctx, offset + 0xe0, 0x00000001);
- gr_def(ctx, offset + 0xe4, 0x00000100);
- gr_def(ctx, offset + 0x100, 0x00000001);
- gr_def(ctx, offset + 0x124, 0x00000004);
- gr_def(ctx, offset + 0x13c, 0x00000001);
- gr_def(ctx, offset + 0x140, 0x00000100);
- gr_def(ctx, offset + 0x148, 0x00000001);
- gr_def(ctx, offset + 0x154, 0x00000100);
- gr_def(ctx, offset + 0x158, 0x00000001);
- gr_def(ctx, offset + 0x15c, 0x00000100);
- gr_def(ctx, offset + 0x164, 0x00000001);
- gr_def(ctx, offset + 0x170, 0x00000100);
- gr_def(ctx, offset + 0x174, 0x00000001);
- gr_def(ctx, offset + 0x17c, 0x00000001);
- gr_def(ctx, offset + 0x188, 0x00000002);
- gr_def(ctx, offset + 0x190, 0x00000001);
- gr_def(ctx, offset + 0x198, 0x00000001);
- gr_def(ctx, offset + 0x1ac, 0x00000003);
- offset += 0xd0;
- } else {
- gr_def(ctx, offset + 0xb0, 0x00000001);
- gr_def(ctx, offset + 0xb4, 0x00000100);
- gr_def(ctx, offset + 0xbc, 0x00000001);
- gr_def(ctx, offset + 0xc8, 0x00000100);
- gr_def(ctx, offset + 0xcc, 0x00000001);
- gr_def(ctx, offset + 0xd0, 0x00000100);
- gr_def(ctx, offset + 0xd8, 0x00000001);
- gr_def(ctx, offset + 0xe4, 0x00000100);
- }
- gr_def(ctx, offset + 0xf8, 0x00000004);
- gr_def(ctx, offset + 0xfc, 0x00000070);
- gr_def(ctx, offset + 0x100, 0x00000080);
- if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa)
- offset += 4;
- gr_def(ctx, offset + 0x114, 0x0000000c);
- if (dev_priv->chipset == 0x50)
- offset -= 4;
- gr_def(ctx, offset + 0x11c, 0x00000008);
- gr_def(ctx, offset + 0x120, 0x00000014);
- if (dev_priv->chipset == 0x50) {
- gr_def(ctx, offset + 0x124, 0x00000026);
- offset -= 0x18;
- } else {
- gr_def(ctx, offset + 0x128, 0x00000029);
- gr_def(ctx, offset + 0x12c, 0x00000027);
- gr_def(ctx, offset + 0x130, 0x00000026);
- gr_def(ctx, offset + 0x134, 0x00000008);
- gr_def(ctx, offset + 0x138, 0x00000004);
- gr_def(ctx, offset + 0x13c, 0x00000027);
- }
- gr_def(ctx, offset + 0x148, 0x00000001);
- gr_def(ctx, offset + 0x14c, 0x00000002);
- gr_def(ctx, offset + 0x150, 0x00000003);
- gr_def(ctx, offset + 0x154, 0x00000004);
- gr_def(ctx, offset + 0x158, 0x00000005);
- gr_def(ctx, offset + 0x15c, 0x00000006);
- gr_def(ctx, offset + 0x160, 0x00000007);
- gr_def(ctx, offset + 0x164, 0x00000001);
- gr_def(ctx, offset + 0x1a8, 0x000000cf);
- if (dev_priv->chipset == 0x50)
- offset -= 4;
- gr_def(ctx, offset + 0x1d8, 0x00000080);
- gr_def(ctx, offset + 0x1dc, 0x00000004);
- gr_def(ctx, offset + 0x1e0, 0x00000004);
- if (dev_priv->chipset == 0x50)
- offset -= 4;
- else
- gr_def(ctx, offset + 0x1e4, 0x00000003);
- if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa) {
- gr_def(ctx, offset + 0x1ec, 0x00000003);
- offset += 8;
- }
- gr_def(ctx, offset + 0x1e8, 0x00000001);
- if (dev_priv->chipset == 0x50)
- offset -= 4;
- gr_def(ctx, offset + 0x1f4, 0x00000012);
- gr_def(ctx, offset + 0x1f8, 0x00000010);
- gr_def(ctx, offset + 0x1fc, 0x0000000c);
- gr_def(ctx, offset + 0x200, 0x00000001);
- gr_def(ctx, offset + 0x210, 0x00000004);
- gr_def(ctx, offset + 0x214, 0x00000002);
- gr_def(ctx, offset + 0x218, 0x00000004);
- if (dev_priv->chipset >= 0xa0)
- offset += 4;
- gr_def(ctx, offset + 0x224, 0x003fffff);
- gr_def(ctx, offset + 0x228, 0x00001fff);
- if (dev_priv->chipset == 0x50)
- offset -= 0x20;
- else if (dev_priv->chipset >= 0xa0) {
- gr_def(ctx, offset + 0x250, 0x00000001);
- gr_def(ctx, offset + 0x254, 0x00000001);
- gr_def(ctx, offset + 0x258, 0x00000002);
- offset += 0x10;
- }
- gr_def(ctx, offset + 0x250, 0x00000004);
- gr_def(ctx, offset + 0x254, 0x00000014);
- gr_def(ctx, offset + 0x258, 0x00000001);
- if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa)
- offset += 4;
- gr_def(ctx, offset + 0x264, 0x00000002);
- if (dev_priv->chipset >= 0xa0)
- offset += 8;
- gr_def(ctx, offset + 0x270, 0x00000001);
- gr_def(ctx, offset + 0x278, 0x00000002);
- gr_def(ctx, offset + 0x27c, 0x00001000);
- if (dev_priv->chipset == 0x50)
- offset -= 0xc;
- else {
- gr_def(ctx, offset + 0x280, 0x00000e00);
- gr_def(ctx, offset + 0x284, 0x00001000);
- gr_def(ctx, offset + 0x288, 0x00001e00);
- }
- gr_def(ctx, offset + 0x290, 0x00000001);
- gr_def(ctx, offset + 0x294, 0x00000001);
- gr_def(ctx, offset + 0x298, 0x00000001);
- gr_def(ctx, offset + 0x29c, 0x00000001);
- gr_def(ctx, offset + 0x2a0, 0x00000001);
- gr_def(ctx, offset + 0x2b0, 0x00000200);
- if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa) {
- gr_def(ctx, offset + 0x2b4, 0x00000200);
- offset += 4;
- }
- if (dev_priv->chipset < 0xa0) {
- gr_def(ctx, offset + 0x2b8, 0x00000001);
- gr_def(ctx, offset + 0x2bc, 0x00000070);
- gr_def(ctx, offset + 0x2c0, 0x00000080);
- gr_def(ctx, offset + 0x2cc, 0x00000001);
- gr_def(ctx, offset + 0x2d0, 0x00000070);
- gr_def(ctx, offset + 0x2d4, 0x00000080);
- } else {
- gr_def(ctx, offset + 0x2b8, 0x00000001);
- gr_def(ctx, offset + 0x2bc, 0x000000f0);
- gr_def(ctx, offset + 0x2c0, 0x000000ff);
- gr_def(ctx, offset + 0x2cc, 0x00000001);
- gr_def(ctx, offset + 0x2d0, 0x000000f0);
- gr_def(ctx, offset + 0x2d4, 0x000000ff);
- gr_def(ctx, offset + 0x2dc, 0x00000009);
- offset += 4;
- }
- gr_def(ctx, offset + 0x2e4, 0x00000001);
- gr_def(ctx, offset + 0x2e8, 0x000000cf);
- gr_def(ctx, offset + 0x2f0, 0x00000001);
- gr_def(ctx, offset + 0x300, 0x000000cf);
- gr_def(ctx, offset + 0x308, 0x00000002);
- gr_def(ctx, offset + 0x310, 0x00000001);
- gr_def(ctx, offset + 0x318, 0x00000001);
- gr_def(ctx, offset + 0x320, 0x000000cf);
- gr_def(ctx, offset + 0x324, 0x000000cf);
- gr_def(ctx, offset + 0x328, 0x00000001);
-
/* 6000? */
if (dev_priv->chipset == 0x50)
cp_ctx(ctx, 0x4063e0, 0x1);
@@ -661,7 +470,7 @@ nv50_graph_construct_mmio(struct nouveau_grctx *ctx)
gr_def(ctx, 0x406818, 0x00000f80);
else
gr_def(ctx, 0x406818, 0x00001f80);
- if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa)
+ if (IS_NVA3F(dev_priv->chipset))
gr_def(ctx, 0x40681c, 0x00000030);
cp_ctx(ctx, 0x406830, 0x3);
}
@@ -706,7 +515,7 @@ nv50_graph_construct_mmio(struct nouveau_grctx *ctx)
if (dev_priv->chipset < 0xa0)
cp_ctx(ctx, 0x407094 + (i<<8), 1);
- else if (dev_priv->chipset <= 0xa0 || dev_priv->chipset >= 0xaa)
+ else if (!IS_NVA3F(dev_priv->chipset))
cp_ctx(ctx, 0x407094 + (i<<8), 3);
else {
cp_ctx(ctx, 0x407094 + (i<<8), 4);
@@ -799,6 +608,7 @@ nv50_graph_construct_mmio(struct nouveau_grctx *ctx)
case 0xa8:
case 0xaa:
case 0xac:
+ case 0xaf:
gr_def(ctx, offset + 0x1c, 0x300c0000);
break;
}
@@ -825,7 +635,7 @@ nv50_graph_construct_mmio(struct nouveau_grctx *ctx)
gr_def(ctx, base + 0x304, 0x00007070);
else if (dev_priv->chipset < 0xa0)
gr_def(ctx, base + 0x304, 0x00027070);
- else if (dev_priv->chipset <= 0xa0 || dev_priv->chipset >= 0xaa)
+ else if (!IS_NVA3F(dev_priv->chipset))
gr_def(ctx, base + 0x304, 0x01127070);
else
gr_def(ctx, base + 0x304, 0x05127070);
@@ -849,7 +659,7 @@ nv50_graph_construct_mmio(struct nouveau_grctx *ctx)
if (dev_priv->chipset < 0xa0) {
cp_ctx(ctx, base + 0x340, 9);
offset = base + 0x340;
- } else if (dev_priv->chipset <= 0xa0 || dev_priv->chipset >= 0xaa) {
+ } else if (!IS_NVA3F(dev_priv->chipset)) {
cp_ctx(ctx, base + 0x33c, 0xb);
offset = base + 0x344;
} else {
@@ -880,7 +690,7 @@ nv50_graph_construct_mmio(struct nouveau_grctx *ctx)
gr_def(ctx, offset + 0x0, 0x000001f0);
gr_def(ctx, offset + 0x4, 0x00000001);
gr_def(ctx, offset + 0x8, 0x00000003);
- if (dev_priv->chipset == 0x50 || dev_priv->chipset >= 0xaa)
+ if (dev_priv->chipset == 0x50 || IS_NVAAF(dev_priv->chipset))
gr_def(ctx, offset + 0xc, 0x00008000);
gr_def(ctx, offset + 0x14, 0x00039e00);
cp_ctx(ctx, offset + 0x1c, 2);
@@ -892,7 +702,7 @@ nv50_graph_construct_mmio(struct nouveau_grctx *ctx)
if (dev_priv->chipset >= 0xa0) {
cp_ctx(ctx, base + 0x54c, 2);
- if (dev_priv->chipset <= 0xa0 || dev_priv->chipset >= 0xaa)
+ if (!IS_NVA3F(dev_priv->chipset))
gr_def(ctx, base + 0x54c, 0x003fe006);
else
gr_def(ctx, base + 0x54c, 0x003fe007);
@@ -948,6 +758,336 @@ nv50_graph_construct_mmio(struct nouveau_grctx *ctx)
}
}
+static void
+dd_emit(struct nouveau_grctx *ctx, int num, uint32_t val) {
+ int i;
+ if (val && ctx->mode == NOUVEAU_GRCTX_VALS)
+ for (i = 0; i < num; i++)
+ nv_wo32(ctx->data, 4 * (ctx->ctxvals_pos + i), val);
+ ctx->ctxvals_pos += num;
+}
+
+static void
+nv50_graph_construct_mmio_ddata(struct nouveau_grctx *ctx)
+{
+ struct drm_nouveau_private *dev_priv = ctx->dev->dev_private;
+ int base, num;
+ base = ctx->ctxvals_pos;
+
+ /* tesla state */
+ dd_emit(ctx, 1, 0); /* 00000001 UNK0F90 */
+ dd_emit(ctx, 1, 0); /* 00000001 UNK135C */
+
+ /* SRC_TIC state */
+ dd_emit(ctx, 1, 0); /* 00000007 SRC_TILE_MODE_Z */
+ dd_emit(ctx, 1, 2); /* 00000007 SRC_TILE_MODE_Y */
+ dd_emit(ctx, 1, 1); /* 00000001 SRC_LINEAR #1 */
+ dd_emit(ctx, 1, 0); /* 000000ff SRC_ADDRESS_HIGH */
+ dd_emit(ctx, 1, 0); /* 00000001 SRC_SRGB */
+ if (dev_priv->chipset >= 0x94)
+ dd_emit(ctx, 1, 0); /* 00000003 eng2d UNK0258 */
+ dd_emit(ctx, 1, 1); /* 00000fff SRC_DEPTH */
+ dd_emit(ctx, 1, 0x100); /* 0000ffff SRC_HEIGHT */
+
+ /* turing state */
+ dd_emit(ctx, 1, 0); /* 0000000f TEXTURES_LOG2 */
+ dd_emit(ctx, 1, 0); /* 0000000f SAMPLERS_LOG2 */
+ dd_emit(ctx, 1, 0); /* 000000ff CB_DEF_ADDRESS_HIGH */
+ dd_emit(ctx, 1, 0); /* ffffffff CB_DEF_ADDRESS_LOW */
+ dd_emit(ctx, 1, 0); /* ffffffff SHARED_SIZE */
+ dd_emit(ctx, 1, 2); /* ffffffff REG_MODE */
+ dd_emit(ctx, 1, 1); /* 0000ffff BLOCK_ALLOC_THREADS */
+ dd_emit(ctx, 1, 1); /* 00000001 LANES32 */
+ dd_emit(ctx, 1, 0); /* 000000ff UNK370 */
+ dd_emit(ctx, 1, 0); /* 000000ff USER_PARAM_UNK */
+ dd_emit(ctx, 1, 0); /* 000000ff USER_PARAM_COUNT */
+ dd_emit(ctx, 1, 1); /* 000000ff UNK384 bits 8-15 */
+ dd_emit(ctx, 1, 0x3fffff); /* 003fffff TIC_LIMIT */
+ dd_emit(ctx, 1, 0x1fff); /* 000fffff TSC_LIMIT */
+ dd_emit(ctx, 1, 0); /* 0000ffff CB_ADDR_INDEX */
+ dd_emit(ctx, 1, 1); /* 000007ff BLOCKDIM_X */
+ dd_emit(ctx, 1, 1); /* 000007ff BLOCKDIM_XMY */
+ dd_emit(ctx, 1, 0); /* 00000001 BLOCKDIM_XMY_OVERFLOW */
+ dd_emit(ctx, 1, 1); /* 0003ffff BLOCKDIM_XMYMZ */
+ dd_emit(ctx, 1, 1); /* 000007ff BLOCKDIM_Y */
+ dd_emit(ctx, 1, 1); /* 0000007f BLOCKDIM_Z */
+ dd_emit(ctx, 1, 4); /* 000000ff CP_REG_ALLOC_TEMP */
+ dd_emit(ctx, 1, 1); /* 00000001 BLOCKDIM_DIRTY */
+ if (IS_NVA3F(dev_priv->chipset))
+ dd_emit(ctx, 1, 0); /* 00000003 UNK03E8 */
+ dd_emit(ctx, 1, 1); /* 0000007f BLOCK_ALLOC_HALFWARPS */
+ dd_emit(ctx, 1, 1); /* 00000007 LOCAL_WARPS_NO_CLAMP */
+ dd_emit(ctx, 1, 7); /* 00000007 LOCAL_WARPS_LOG_ALLOC */
+ dd_emit(ctx, 1, 1); /* 00000007 STACK_WARPS_NO_CLAMP */
+ dd_emit(ctx, 1, 7); /* 00000007 STACK_WARPS_LOG_ALLOC */
+ dd_emit(ctx, 1, 1); /* 00001fff BLOCK_ALLOC_REGSLOTS_PACKED */
+ dd_emit(ctx, 1, 1); /* 00001fff BLOCK_ALLOC_REGSLOTS_STRIDED */
+ dd_emit(ctx, 1, 1); /* 000007ff BLOCK_ALLOC_THREADS */
+
+ /* compat 2d state */
+ if (dev_priv->chipset == 0x50) {
+ dd_emit(ctx, 4, 0); /* 0000ffff clip X, Y, W, H */
+
+ dd_emit(ctx, 1, 1); /* ffffffff chroma COLOR_FORMAT */
+
+ dd_emit(ctx, 1, 1); /* ffffffff pattern COLOR_FORMAT */
+ dd_emit(ctx, 1, 0); /* ffffffff pattern SHAPE */
+ dd_emit(ctx, 1, 1); /* ffffffff pattern PATTERN_SELECT */
+
+ dd_emit(ctx, 1, 0xa); /* ffffffff surf2d SRC_FORMAT */
+ dd_emit(ctx, 1, 0); /* ffffffff surf2d DMA_SRC */
+ dd_emit(ctx, 1, 0); /* 000000ff surf2d SRC_ADDRESS_HIGH */
+ dd_emit(ctx, 1, 0); /* ffffffff surf2d SRC_ADDRESS_LOW */
+ dd_emit(ctx, 1, 0x40); /* 0000ffff surf2d SRC_PITCH */
+ dd_emit(ctx, 1, 0); /* 0000000f surf2d SRC_TILE_MODE_Z */
+ dd_emit(ctx, 1, 2); /* 0000000f surf2d SRC_TILE_MODE_Y */
+ dd_emit(ctx, 1, 0x100); /* ffffffff surf2d SRC_HEIGHT */
+ dd_emit(ctx, 1, 1); /* 00000001 surf2d SRC_LINEAR */
+ dd_emit(ctx, 1, 0x100); /* ffffffff surf2d SRC_WIDTH */
+
+ dd_emit(ctx, 1, 0); /* 0000ffff gdirect CLIP_B_X */
+ dd_emit(ctx, 1, 0); /* 0000ffff gdirect CLIP_B_Y */
+ dd_emit(ctx, 1, 0); /* 0000ffff gdirect CLIP_C_X */
+ dd_emit(ctx, 1, 0); /* 0000ffff gdirect CLIP_C_Y */
+ dd_emit(ctx, 1, 0); /* 0000ffff gdirect CLIP_D_X */
+ dd_emit(ctx, 1, 0); /* 0000ffff gdirect CLIP_D_Y */
+ dd_emit(ctx, 1, 1); /* ffffffff gdirect COLOR_FORMAT */
+ dd_emit(ctx, 1, 0); /* ffffffff gdirect OPERATION */
+ dd_emit(ctx, 1, 0); /* 0000ffff gdirect POINT_X */
+ dd_emit(ctx, 1, 0); /* 0000ffff gdirect POINT_Y */
+
+ dd_emit(ctx, 1, 0); /* 0000ffff blit SRC_Y */
+ dd_emit(ctx, 1, 0); /* ffffffff blit OPERATION */
+
+ dd_emit(ctx, 1, 0); /* ffffffff ifc OPERATION */
+
+ dd_emit(ctx, 1, 0); /* ffffffff iifc INDEX_FORMAT */
+ dd_emit(ctx, 1, 0); /* ffffffff iifc LUT_OFFSET */
+ dd_emit(ctx, 1, 4); /* ffffffff iifc COLOR_FORMAT */
+ dd_emit(ctx, 1, 0); /* ffffffff iifc OPERATION */
+ }
+
+ /* m2mf state */
+ dd_emit(ctx, 1, 0); /* ffffffff m2mf LINE_COUNT */
+ dd_emit(ctx, 1, 0); /* ffffffff m2mf LINE_LENGTH_IN */
+ dd_emit(ctx, 2, 0); /* ffffffff m2mf OFFSET_IN, OFFSET_OUT */
+ dd_emit(ctx, 1, 1); /* ffffffff m2mf TILING_DEPTH_OUT */
+ dd_emit(ctx, 1, 0x100); /* ffffffff m2mf TILING_HEIGHT_OUT */
+ dd_emit(ctx, 1, 0); /* ffffffff m2mf TILING_POSITION_OUT_Z */
+ dd_emit(ctx, 1, 1); /* 00000001 m2mf LINEAR_OUT */
+ dd_emit(ctx, 2, 0); /* 0000ffff m2mf TILING_POSITION_OUT_X, Y */
+ dd_emit(ctx, 1, 0x100); /* ffffffff m2mf TILING_PITCH_OUT */
+ dd_emit(ctx, 1, 1); /* ffffffff m2mf TILING_DEPTH_IN */
+ dd_emit(ctx, 1, 0x100); /* ffffffff m2mf TILING_HEIGHT_IN */
+ dd_emit(ctx, 1, 0); /* ffffffff m2mf TILING_POSITION_IN_Z */
+ dd_emit(ctx, 1, 1); /* 00000001 m2mf LINEAR_IN */
+ dd_emit(ctx, 2, 0); /* 0000ffff m2mf TILING_POSITION_IN_X, Y */
+ dd_emit(ctx, 1, 0x100); /* ffffffff m2mf TILING_PITCH_IN */
+
+ /* more compat 2d state */
+ if (dev_priv->chipset == 0x50) {
+ dd_emit(ctx, 1, 1); /* ffffffff line COLOR_FORMAT */
+ dd_emit(ctx, 1, 0); /* ffffffff line OPERATION */
+
+ dd_emit(ctx, 1, 1); /* ffffffff triangle COLOR_FORMAT */
+ dd_emit(ctx, 1, 0); /* ffffffff triangle OPERATION */
+
+ dd_emit(ctx, 1, 0); /* 0000000f sifm TILE_MODE_Z */
+ dd_emit(ctx, 1, 2); /* 0000000f sifm TILE_MODE_Y */
+ dd_emit(ctx, 1, 0); /* 000000ff sifm FORMAT_FILTER */
+ dd_emit(ctx, 1, 1); /* 000000ff sifm FORMAT_ORIGIN */
+ dd_emit(ctx, 1, 0); /* 0000ffff sifm SRC_PITCH */
+ dd_emit(ctx, 1, 1); /* 00000001 sifm SRC_LINEAR */
+ dd_emit(ctx, 1, 0); /* 000000ff sifm SRC_OFFSET_HIGH */
+ dd_emit(ctx, 1, 0); /* ffffffff sifm SRC_OFFSET */
+ dd_emit(ctx, 1, 0); /* 0000ffff sifm SRC_HEIGHT */
+ dd_emit(ctx, 1, 0); /* 0000ffff sifm SRC_WIDTH */
+ dd_emit(ctx, 1, 3); /* ffffffff sifm COLOR_FORMAT */
+ dd_emit(ctx, 1, 0); /* ffffffff sifm OPERATION */
+
+ dd_emit(ctx, 1, 0); /* ffffffff sifc OPERATION */
+ }
+
+ /* tesla state */
+ dd_emit(ctx, 1, 0); /* 0000000f GP_TEXTURES_LOG2 */
+ dd_emit(ctx, 1, 0); /* 0000000f GP_SAMPLERS_LOG2 */
+ dd_emit(ctx, 1, 0); /* 000000ff */
+ dd_emit(ctx, 1, 0); /* ffffffff */
+ dd_emit(ctx, 1, 4); /* 000000ff UNK12B0_0 */
+ dd_emit(ctx, 1, 0x70); /* 000000ff UNK12B0_1 */
+ dd_emit(ctx, 1, 0x80); /* 000000ff UNK12B0_3 */
+ dd_emit(ctx, 1, 0); /* 000000ff UNK12B0_2 */
+ dd_emit(ctx, 1, 0); /* 0000000f FP_TEXTURES_LOG2 */
+ dd_emit(ctx, 1, 0); /* 0000000f FP_SAMPLERS_LOG2 */
+ if (IS_NVA3F(dev_priv->chipset)) {
+ dd_emit(ctx, 1, 0); /* ffffffff */
+ dd_emit(ctx, 1, 0); /* 0000007f MULTISAMPLE_SAMPLES_LOG2 */
+ } else {
+ dd_emit(ctx, 1, 0); /* 0000000f MULTISAMPLE_SAMPLES_LOG2 */
+ }
+ dd_emit(ctx, 1, 0xc); /* 000000ff SEMANTIC_COLOR.BFC0_ID */
+ if (dev_priv->chipset != 0x50)
+ dd_emit(ctx, 1, 0); /* 00000001 SEMANTIC_COLOR.CLMP_EN */
+ dd_emit(ctx, 1, 8); /* 000000ff SEMANTIC_COLOR.COLR_NR */
+ dd_emit(ctx, 1, 0x14); /* 000000ff SEMANTIC_COLOR.FFC0_ID */
+ if (dev_priv->chipset == 0x50) {
+ dd_emit(ctx, 1, 0); /* 000000ff SEMANTIC_LAYER */
+ dd_emit(ctx, 1, 0); /* 00000001 */
+ } else {
+ dd_emit(ctx, 1, 0); /* 00000001 SEMANTIC_PTSZ.ENABLE */
+ dd_emit(ctx, 1, 0x29); /* 000000ff SEMANTIC_PTSZ.PTSZ_ID */
+ dd_emit(ctx, 1, 0x27); /* 000000ff SEMANTIC_PRIM */
+ dd_emit(ctx, 1, 0x26); /* 000000ff SEMANTIC_LAYER */
+ dd_emit(ctx, 1, 8); /* 0000000f SMENATIC_CLIP.CLIP_HIGH */
+ dd_emit(ctx, 1, 4); /* 000000ff SEMANTIC_CLIP.CLIP_LO */
+ dd_emit(ctx, 1, 0x27); /* 000000ff UNK0FD4 */
+ dd_emit(ctx, 1, 0); /* 00000001 UNK1900 */
+ }
+ dd_emit(ctx, 1, 0); /* 00000007 RT_CONTROL_MAP0 */
+ dd_emit(ctx, 1, 1); /* 00000007 RT_CONTROL_MAP1 */
+ dd_emit(ctx, 1, 2); /* 00000007 RT_CONTROL_MAP2 */
+ dd_emit(ctx, 1, 3); /* 00000007 RT_CONTROL_MAP3 */
+ dd_emit(ctx, 1, 4); /* 00000007 RT_CONTROL_MAP4 */
+ dd_emit(ctx, 1, 5); /* 00000007 RT_CONTROL_MAP5 */
+ dd_emit(ctx, 1, 6); /* 00000007 RT_CONTROL_MAP6 */
+ dd_emit(ctx, 1, 7); /* 00000007 RT_CONTROL_MAP7 */
+ dd_emit(ctx, 1, 1); /* 0000000f RT_CONTROL_COUNT */
+ dd_emit(ctx, 8, 0); /* 00000001 RT_HORIZ_UNK */
+ dd_emit(ctx, 8, 0); /* ffffffff RT_ADDRESS_LOW */
+ dd_emit(ctx, 1, 0xcf); /* 000000ff RT_FORMAT */
+ dd_emit(ctx, 7, 0); /* 000000ff RT_FORMAT */
+ if (dev_priv->chipset != 0x50)
+ dd_emit(ctx, 3, 0); /* 1, 1, 1 */
+ else
+ dd_emit(ctx, 2, 0); /* 1, 1 */
+ dd_emit(ctx, 1, 0); /* ffffffff GP_ENABLE */
+ dd_emit(ctx, 1, 0x80); /* 0000ffff GP_VERTEX_OUTPUT_COUNT*/
+ dd_emit(ctx, 1, 4); /* 000000ff GP_REG_ALLOC_RESULT */
+ dd_emit(ctx, 1, 4); /* 000000ff GP_RESULT_MAP_SIZE */
+ if (IS_NVA3F(dev_priv->chipset)) {
+ dd_emit(ctx, 1, 3); /* 00000003 */
+ dd_emit(ctx, 1, 0); /* 00000001 UNK1418. Alone. */
+ }
+ if (dev_priv->chipset != 0x50)
+ dd_emit(ctx, 1, 3); /* 00000003 UNK15AC */
+ dd_emit(ctx, 1, 1); /* ffffffff RASTERIZE_ENABLE */
+ dd_emit(ctx, 1, 0); /* 00000001 FP_CONTROL.EXPORTS_Z */
+ if (dev_priv->chipset != 0x50)
+ dd_emit(ctx, 1, 0); /* 00000001 FP_CONTROL.MULTIPLE_RESULTS */
+ dd_emit(ctx, 1, 0x12); /* 000000ff FP_INTERPOLANT_CTRL.COUNT */
+ dd_emit(ctx, 1, 0x10); /* 000000ff FP_INTERPOLANT_CTRL.COUNT_NONFLAT */
+ dd_emit(ctx, 1, 0xc); /* 000000ff FP_INTERPOLANT_CTRL.OFFSET */
+ dd_emit(ctx, 1, 1); /* 00000001 FP_INTERPOLANT_CTRL.UMASK.W */
+ dd_emit(ctx, 1, 0); /* 00000001 FP_INTERPOLANT_CTRL.UMASK.X */
+ dd_emit(ctx, 1, 0); /* 00000001 FP_INTERPOLANT_CTRL.UMASK.Y */
+ dd_emit(ctx, 1, 0); /* 00000001 FP_INTERPOLANT_CTRL.UMASK.Z */
+ dd_emit(ctx, 1, 4); /* 000000ff FP_RESULT_COUNT */
+ dd_emit(ctx, 1, 2); /* ffffffff REG_MODE */
+ dd_emit(ctx, 1, 4); /* 000000ff FP_REG_ALLOC_TEMP */
+ if (dev_priv->chipset >= 0xa0)
+ dd_emit(ctx, 1, 0); /* ffffffff */
+ dd_emit(ctx, 1, 0); /* 00000001 GP_BUILTIN_RESULT_EN.LAYER_IDX */
+ dd_emit(ctx, 1, 0); /* ffffffff STRMOUT_ENABLE */
+ dd_emit(ctx, 1, 0x3fffff); /* 003fffff TIC_LIMIT */
+ dd_emit(ctx, 1, 0x1fff); /* 000fffff TSC_LIMIT */
+ dd_emit(ctx, 1, 0); /* 00000001 VERTEX_TWO_SIDE_ENABLE*/
+ if (dev_priv->chipset != 0x50)
+ dd_emit(ctx, 8, 0); /* 00000001 */
+ if (dev_priv->chipset >= 0xa0) {
+ dd_emit(ctx, 1, 1); /* 00000007 VTX_ATTR_DEFINE.COMP */
+ dd_emit(ctx, 1, 1); /* 00000007 VTX_ATTR_DEFINE.SIZE */
+ dd_emit(ctx, 1, 2); /* 00000007 VTX_ATTR_DEFINE.TYPE */
+ dd_emit(ctx, 1, 0); /* 000000ff VTX_ATTR_DEFINE.ATTR */
+ }
+ dd_emit(ctx, 1, 4); /* 0000007f VP_RESULT_MAP_SIZE */
+ dd_emit(ctx, 1, 0x14); /* 0000001f ZETA_FORMAT */
+ dd_emit(ctx, 1, 1); /* 00000001 ZETA_ENABLE */
+ dd_emit(ctx, 1, 0); /* 0000000f VP_TEXTURES_LOG2 */
+ dd_emit(ctx, 1, 0); /* 0000000f VP_SAMPLERS_LOG2 */
+ if (IS_NVA3F(dev_priv->chipset))
+ dd_emit(ctx, 1, 0); /* 00000001 */
+ dd_emit(ctx, 1, 2); /* 00000003 POLYGON_MODE_BACK */
+ if (dev_priv->chipset >= 0xa0)
+ dd_emit(ctx, 1, 0); /* 00000003 VTX_ATTR_DEFINE.SIZE - 1 */
+ dd_emit(ctx, 1, 0); /* 0000ffff CB_ADDR_INDEX */
+ if (dev_priv->chipset >= 0xa0)
+ dd_emit(ctx, 1, 0); /* 00000003 */
+ dd_emit(ctx, 1, 0); /* 00000001 CULL_FACE_ENABLE */
+ dd_emit(ctx, 1, 1); /* 00000003 CULL_FACE */
+ dd_emit(ctx, 1, 0); /* 00000001 FRONT_FACE */
+ dd_emit(ctx, 1, 2); /* 00000003 POLYGON_MODE_FRONT */
+ dd_emit(ctx, 1, 0x1000); /* 00007fff UNK141C */
+ if (dev_priv->chipset != 0x50) {
+ dd_emit(ctx, 1, 0xe00); /* 7fff */
+ dd_emit(ctx, 1, 0x1000); /* 7fff */
+ dd_emit(ctx, 1, 0x1e00); /* 7fff */
+ }
+ dd_emit(ctx, 1, 0); /* 00000001 BEGIN_END_ACTIVE */
+ dd_emit(ctx, 1, 1); /* 00000001 POLYGON_MODE_??? */
+ dd_emit(ctx, 1, 1); /* 000000ff GP_REG_ALLOC_TEMP / 4 rounded up */
+ dd_emit(ctx, 1, 1); /* 000000ff FP_REG_ALLOC_TEMP... without /4? */
+ dd_emit(ctx, 1, 1); /* 000000ff VP_REG_ALLOC_TEMP / 4 rounded up */
+ dd_emit(ctx, 1, 1); /* 00000001 */
+ dd_emit(ctx, 1, 0); /* 00000001 */
+ dd_emit(ctx, 1, 0); /* 00000001 VTX_ATTR_MASK_UNK0 nonempty */
+ dd_emit(ctx, 1, 0); /* 00000001 VTX_ATTR_MASK_UNK1 nonempty */
+ dd_emit(ctx, 1, 0x200); /* 0003ffff GP_VERTEX_OUTPUT_COUNT*GP_REG_ALLOC_RESULT */
+ if (IS_NVA3F(dev_priv->chipset))
+ dd_emit(ctx, 1, 0x200);
+ dd_emit(ctx, 1, 0); /* 00000001 */
+ if (dev_priv->chipset < 0xa0) {
+ dd_emit(ctx, 1, 1); /* 00000001 */
+ dd_emit(ctx, 1, 0x70); /* 000000ff */
+ dd_emit(ctx, 1, 0x80); /* 000000ff */
+ dd_emit(ctx, 1, 0); /* 000000ff */
+ dd_emit(ctx, 1, 0); /* 00000001 */
+ dd_emit(ctx, 1, 1); /* 00000001 */
+ dd_emit(ctx, 1, 0x70); /* 000000ff */
+ dd_emit(ctx, 1, 0x80); /* 000000ff */
+ dd_emit(ctx, 1, 0); /* 000000ff */
+ } else {
+ dd_emit(ctx, 1, 1); /* 00000001 */
+ dd_emit(ctx, 1, 0xf0); /* 000000ff */
+ dd_emit(ctx, 1, 0xff); /* 000000ff */
+ dd_emit(ctx, 1, 0); /* 000000ff */
+ dd_emit(ctx, 1, 0); /* 00000001 */
+ dd_emit(ctx, 1, 1); /* 00000001 */
+ dd_emit(ctx, 1, 0xf0); /* 000000ff */
+ dd_emit(ctx, 1, 0xff); /* 000000ff */
+ dd_emit(ctx, 1, 0); /* 000000ff */
+ dd_emit(ctx, 1, 9); /* 0000003f UNK114C.COMP,SIZE */
+ }
+
+ /* eng2d state */
+ dd_emit(ctx, 1, 0); /* 00000001 eng2d COLOR_KEY_ENABLE */
+ dd_emit(ctx, 1, 0); /* 00000007 eng2d COLOR_KEY_FORMAT */
+ dd_emit(ctx, 1, 1); /* ffffffff eng2d DST_DEPTH */
+ dd_emit(ctx, 1, 0xcf); /* 000000ff eng2d DST_FORMAT */
+ dd_emit(ctx, 1, 0); /* ffffffff eng2d DST_LAYER */
+ dd_emit(ctx, 1, 1); /* 00000001 eng2d DST_LINEAR */
+ dd_emit(ctx, 1, 0); /* 00000007 eng2d PATTERN_COLOR_FORMAT */
+ dd_emit(ctx, 1, 0); /* 00000007 eng2d OPERATION */
+ dd_emit(ctx, 1, 0); /* 00000003 eng2d PATTERN_SELECT */
+ dd_emit(ctx, 1, 0xcf); /* 000000ff eng2d SIFC_FORMAT */
+ dd_emit(ctx, 1, 0); /* 00000001 eng2d SIFC_BITMAP_ENABLE */
+ dd_emit(ctx, 1, 2); /* 00000003 eng2d SIFC_BITMAP_UNK808 */
+ dd_emit(ctx, 1, 0); /* ffffffff eng2d BLIT_DU_DX_FRACT */
+ dd_emit(ctx, 1, 1); /* ffffffff eng2d BLIT_DU_DX_INT */
+ dd_emit(ctx, 1, 0); /* ffffffff eng2d BLIT_DV_DY_FRACT */
+ dd_emit(ctx, 1, 1); /* ffffffff eng2d BLIT_DV_DY_INT */
+ dd_emit(ctx, 1, 0); /* 00000001 eng2d BLIT_CONTROL_FILTER */
+ dd_emit(ctx, 1, 0xcf); /* 000000ff eng2d DRAW_COLOR_FORMAT */
+ dd_emit(ctx, 1, 0xcf); /* 000000ff eng2d SRC_FORMAT */
+ dd_emit(ctx, 1, 1); /* 00000001 eng2d SRC_LINEAR #2 */
+
+ num = ctx->ctxvals_pos - base;
+ ctx->ctxvals_pos = base;
+ if (IS_NVA3F(dev_priv->chipset))
+ cp_ctx(ctx, 0x404800, num);
+ else
+ cp_ctx(ctx, 0x405400, num);
+}
+
/*
* xfer areas. These are a pain.
*
@@ -990,28 +1130,33 @@ nv50_graph_construct_mmio(struct nouveau_grctx *ctx)
* without the help of ctxprog.
*/
-static inline void
+static void
xf_emit(struct nouveau_grctx *ctx, int num, uint32_t val) {
int i;
if (val && ctx->mode == NOUVEAU_GRCTX_VALS)
for (i = 0; i < num; i++)
- nv_wo32(ctx->dev, ctx->data, ctx->ctxvals_pos + (i << 3), val);
+ nv_wo32(ctx->data, 4 * (ctx->ctxvals_pos + (i << 3)), val);
ctx->ctxvals_pos += num << 3;
}
/* Gene declarations... */
+static void nv50_graph_construct_gene_dispatch(struct nouveau_grctx *ctx);
static void nv50_graph_construct_gene_m2mf(struct nouveau_grctx *ctx);
-static void nv50_graph_construct_gene_unk1(struct nouveau_grctx *ctx);
-static void nv50_graph_construct_gene_unk2(struct nouveau_grctx *ctx);
-static void nv50_graph_construct_gene_unk3(struct nouveau_grctx *ctx);
-static void nv50_graph_construct_gene_unk4(struct nouveau_grctx *ctx);
-static void nv50_graph_construct_gene_unk5(struct nouveau_grctx *ctx);
-static void nv50_graph_construct_gene_unk6(struct nouveau_grctx *ctx);
-static void nv50_graph_construct_gene_unk7(struct nouveau_grctx *ctx);
-static void nv50_graph_construct_gene_unk8(struct nouveau_grctx *ctx);
-static void nv50_graph_construct_gene_unk9(struct nouveau_grctx *ctx);
-static void nv50_graph_construct_gene_unk10(struct nouveau_grctx *ctx);
+static void nv50_graph_construct_gene_ccache(struct nouveau_grctx *ctx);
+static void nv50_graph_construct_gene_unk10xx(struct nouveau_grctx *ctx);
+static void nv50_graph_construct_gene_unk14xx(struct nouveau_grctx *ctx);
+static void nv50_graph_construct_gene_zcull(struct nouveau_grctx *ctx);
+static void nv50_graph_construct_gene_clipid(struct nouveau_grctx *ctx);
+static void nv50_graph_construct_gene_unk24xx(struct nouveau_grctx *ctx);
+static void nv50_graph_construct_gene_vfetch(struct nouveau_grctx *ctx);
+static void nv50_graph_construct_gene_eng2d(struct nouveau_grctx *ctx);
+static void nv50_graph_construct_gene_csched(struct nouveau_grctx *ctx);
+static void nv50_graph_construct_gene_unk1cxx(struct nouveau_grctx *ctx);
+static void nv50_graph_construct_gene_strmout(struct nouveau_grctx *ctx);
+static void nv50_graph_construct_gene_unk34xx(struct nouveau_grctx *ctx);
+static void nv50_graph_construct_gene_ropm1(struct nouveau_grctx *ctx);
+static void nv50_graph_construct_gene_ropm2(struct nouveau_grctx *ctx);
static void nv50_graph_construct_gene_ropc(struct nouveau_grctx *ctx);
static void nv50_graph_construct_xfer_tp(struct nouveau_grctx *ctx);
@@ -1030,102 +1175,32 @@ nv50_graph_construct_xfer1(struct nouveau_grctx *ctx)
if (dev_priv->chipset < 0xa0) {
/* Strand 0 */
ctx->ctxvals_pos = offset;
- switch (dev_priv->chipset) {
- case 0x50:
- xf_emit(ctx, 0x99, 0);
- break;
- case 0x84:
- case 0x86:
- xf_emit(ctx, 0x384, 0);
- break;
- case 0x92:
- case 0x94:
- case 0x96:
- case 0x98:
- xf_emit(ctx, 0x380, 0);
- break;
- }
- nv50_graph_construct_gene_m2mf (ctx);
- switch (dev_priv->chipset) {
- case 0x50:
- case 0x84:
- case 0x86:
- case 0x98:
- xf_emit(ctx, 0x4c4, 0);
- break;
- case 0x92:
- case 0x94:
- case 0x96:
- xf_emit(ctx, 0x984, 0);
- break;
- }
- nv50_graph_construct_gene_unk5(ctx);
- if (dev_priv->chipset == 0x50)
- xf_emit(ctx, 0xa, 0);
- else
- xf_emit(ctx, 0xb, 0);
- nv50_graph_construct_gene_unk4(ctx);
- nv50_graph_construct_gene_unk3(ctx);
+ nv50_graph_construct_gene_dispatch(ctx);
+ nv50_graph_construct_gene_m2mf(ctx);
+ nv50_graph_construct_gene_unk24xx(ctx);
+ nv50_graph_construct_gene_clipid(ctx);
+ nv50_graph_construct_gene_zcull(ctx);
if ((ctx->ctxvals_pos-offset)/8 > size)
size = (ctx->ctxvals_pos-offset)/8;
/* Strand 1 */
ctx->ctxvals_pos = offset + 0x1;
- nv50_graph_construct_gene_unk6(ctx);
- nv50_graph_construct_gene_unk7(ctx);
- nv50_graph_construct_gene_unk8(ctx);
- switch (dev_priv->chipset) {
- case 0x50:
- case 0x92:
- xf_emit(ctx, 0xfb, 0);
- break;
- case 0x84:
- xf_emit(ctx, 0xd3, 0);
- break;
- case 0x94:
- case 0x96:
- xf_emit(ctx, 0xab, 0);
- break;
- case 0x86:
- case 0x98:
- xf_emit(ctx, 0x6b, 0);
- break;
- }
- xf_emit(ctx, 2, 0x4e3bfdf);
- xf_emit(ctx, 4, 0);
- xf_emit(ctx, 1, 0x0fac6881);
- xf_emit(ctx, 0xb, 0);
- xf_emit(ctx, 2, 0x4e3bfdf);
+ nv50_graph_construct_gene_vfetch(ctx);
+ nv50_graph_construct_gene_eng2d(ctx);
+ nv50_graph_construct_gene_csched(ctx);
+ nv50_graph_construct_gene_ropm1(ctx);
+ nv50_graph_construct_gene_ropm2(ctx);
if ((ctx->ctxvals_pos-offset)/8 > size)
size = (ctx->ctxvals_pos-offset)/8;
/* Strand 2 */
ctx->ctxvals_pos = offset + 0x2;
- switch (dev_priv->chipset) {
- case 0x50:
- case 0x92:
- xf_emit(ctx, 0xa80, 0);
- break;
- case 0x84:
- xf_emit(ctx, 0xa7e, 0);
- break;
- case 0x94:
- case 0x96:
- xf_emit(ctx, 0xa7c, 0);
- break;
- case 0x86:
- case 0x98:
- xf_emit(ctx, 0xa7a, 0);
- break;
- }
- xf_emit(ctx, 1, 0x3fffff);
- xf_emit(ctx, 2, 0);
- xf_emit(ctx, 1, 0x1fff);
- xf_emit(ctx, 0xe, 0);
- nv50_graph_construct_gene_unk9(ctx);
- nv50_graph_construct_gene_unk2(ctx);
- nv50_graph_construct_gene_unk1(ctx);
- nv50_graph_construct_gene_unk10(ctx);
+ nv50_graph_construct_gene_ccache(ctx);
+ nv50_graph_construct_gene_unk1cxx(ctx);
+ nv50_graph_construct_gene_strmout(ctx);
+ nv50_graph_construct_gene_unk14xx(ctx);
+ nv50_graph_construct_gene_unk10xx(ctx);
+ nv50_graph_construct_gene_unk34xx(ctx);
if ((ctx->ctxvals_pos-offset)/8 > size)
size = (ctx->ctxvals_pos-offset)/8;
@@ -1150,86 +1225,46 @@ nv50_graph_construct_xfer1(struct nouveau_grctx *ctx)
} else {
/* Strand 0 */
ctx->ctxvals_pos = offset;
- if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa)
- xf_emit(ctx, 0x385, 0);
- else
- xf_emit(ctx, 0x384, 0);
+ nv50_graph_construct_gene_dispatch(ctx);
nv50_graph_construct_gene_m2mf(ctx);
- xf_emit(ctx, 0x950, 0);
- nv50_graph_construct_gene_unk10(ctx);
- xf_emit(ctx, 1, 0x0fac6881);
- if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa) {
- xf_emit(ctx, 1, 1);
- xf_emit(ctx, 3, 0);
- }
- nv50_graph_construct_gene_unk8(ctx);
- if (dev_priv->chipset == 0xa0)
- xf_emit(ctx, 0x189, 0);
- else if (dev_priv->chipset == 0xa3)
- xf_emit(ctx, 0xd5, 0);
- else if (dev_priv->chipset == 0xa5)
- xf_emit(ctx, 0x99, 0);
- else if (dev_priv->chipset == 0xaa)
- xf_emit(ctx, 0x65, 0);
- else
- xf_emit(ctx, 0x6d, 0);
- nv50_graph_construct_gene_unk9(ctx);
+ nv50_graph_construct_gene_unk34xx(ctx);
+ nv50_graph_construct_gene_csched(ctx);
+ nv50_graph_construct_gene_unk1cxx(ctx);
+ nv50_graph_construct_gene_strmout(ctx);
if ((ctx->ctxvals_pos-offset)/8 > size)
size = (ctx->ctxvals_pos-offset)/8;
/* Strand 1 */
ctx->ctxvals_pos = offset + 1;
- nv50_graph_construct_gene_unk1(ctx);
+ nv50_graph_construct_gene_unk10xx(ctx);
if ((ctx->ctxvals_pos-offset)/8 > size)
size = (ctx->ctxvals_pos-offset)/8;
/* Strand 2 */
ctx->ctxvals_pos = offset + 2;
- if (dev_priv->chipset == 0xa0) {
- nv50_graph_construct_gene_unk2(ctx);
- }
- xf_emit(ctx, 0x36, 0);
- nv50_graph_construct_gene_unk5(ctx);
+ if (dev_priv->chipset == 0xa0)
+ nv50_graph_construct_gene_unk14xx(ctx);
+ nv50_graph_construct_gene_unk24xx(ctx);
if ((ctx->ctxvals_pos-offset)/8 > size)
size = (ctx->ctxvals_pos-offset)/8;
/* Strand 3 */
ctx->ctxvals_pos = offset + 3;
- xf_emit(ctx, 1, 0);
- xf_emit(ctx, 1, 1);
- nv50_graph_construct_gene_unk6(ctx);
+ nv50_graph_construct_gene_vfetch(ctx);
if ((ctx->ctxvals_pos-offset)/8 > size)
size = (ctx->ctxvals_pos-offset)/8;
/* Strand 4 */
ctx->ctxvals_pos = offset + 4;
- if (dev_priv->chipset == 0xa0)
- xf_emit(ctx, 0xa80, 0);
- else if (dev_priv->chipset == 0xa3)
- xf_emit(ctx, 0xa7c, 0);
- else
- xf_emit(ctx, 0xa7a, 0);
- xf_emit(ctx, 1, 0x3fffff);
- xf_emit(ctx, 2, 0);
- xf_emit(ctx, 1, 0x1fff);
+ nv50_graph_construct_gene_ccache(ctx);
if ((ctx->ctxvals_pos-offset)/8 > size)
size = (ctx->ctxvals_pos-offset)/8;
/* Strand 5 */
ctx->ctxvals_pos = offset + 5;
- xf_emit(ctx, 1, 0);
- xf_emit(ctx, 1, 0x0fac6881);
- xf_emit(ctx, 0xb, 0);
- xf_emit(ctx, 2, 0x4e3bfdf);
- xf_emit(ctx, 3, 0);
- if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa)
- xf_emit(ctx, 1, 0x11);
- xf_emit(ctx, 1, 0);
- xf_emit(ctx, 2, 0x4e3bfdf);
- xf_emit(ctx, 2, 0);
- if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa)
- xf_emit(ctx, 1, 0x11);
- xf_emit(ctx, 1, 0);
+ nv50_graph_construct_gene_ropm2(ctx);
+ nv50_graph_construct_gene_ropm1(ctx);
+ /* per-ROP context */
for (i = 0; i < 8; i++)
if (units & (1<<(i+16)))
nv50_graph_construct_gene_ropc(ctx);
@@ -1238,10 +1273,9 @@ nv50_graph_construct_xfer1(struct nouveau_grctx *ctx)
/* Strand 6 */
ctx->ctxvals_pos = offset + 6;
- nv50_graph_construct_gene_unk3(ctx);
- xf_emit(ctx, 0xb, 0);
- nv50_graph_construct_gene_unk4(ctx);
- nv50_graph_construct_gene_unk7(ctx);
+ nv50_graph_construct_gene_zcull(ctx);
+ nv50_graph_construct_gene_clipid(ctx);
+ nv50_graph_construct_gene_eng2d(ctx);
if (units & (1 << 0))
nv50_graph_construct_xfer_tp(ctx);
if (units & (1 << 1))
@@ -1269,7 +1303,7 @@ nv50_graph_construct_xfer1(struct nouveau_grctx *ctx)
if (units & (1 << 9))
nv50_graph_construct_xfer_tp(ctx);
} else {
- nv50_graph_construct_gene_unk2(ctx);
+ nv50_graph_construct_gene_unk14xx(ctx);
}
if ((ctx->ctxvals_pos-offset)/8 > size)
size = (ctx->ctxvals_pos-offset)/8;
@@ -1290,9 +1324,70 @@ nv50_graph_construct_xfer1(struct nouveau_grctx *ctx)
*/
static void
+nv50_graph_construct_gene_dispatch(struct nouveau_grctx *ctx)
+{
+ /* start of strand 0 */
+ struct drm_nouveau_private *dev_priv = ctx->dev->dev_private;
+ /* SEEK */
+ if (dev_priv->chipset == 0x50)
+ xf_emit(ctx, 5, 0);
+ else if (!IS_NVA3F(dev_priv->chipset))
+ xf_emit(ctx, 6, 0);
+ else
+ xf_emit(ctx, 4, 0);
+ /* SEEK */
+ /* the PGRAPH's internal FIFO */
+ if (dev_priv->chipset == 0x50)
+ xf_emit(ctx, 8*3, 0);
+ else
+ xf_emit(ctx, 0x100*3, 0);
+ /* and another bonus slot?!? */
+ xf_emit(ctx, 3, 0);
+ /* and YET ANOTHER bonus slot? */
+ if (IS_NVA3F(dev_priv->chipset))
+ xf_emit(ctx, 3, 0);
+ /* SEEK */
+ /* CTX_SWITCH: caches of gr objects bound to subchannels. 8 values, last used index */
+ xf_emit(ctx, 9, 0);
+ /* SEEK */
+ xf_emit(ctx, 9, 0);
+ /* SEEK */
+ xf_emit(ctx, 9, 0);
+ /* SEEK */
+ xf_emit(ctx, 9, 0);
+ /* SEEK */
+ if (dev_priv->chipset < 0x90)
+ xf_emit(ctx, 4, 0);
+ /* SEEK */
+ xf_emit(ctx, 2, 0);
+ /* SEEK */
+ xf_emit(ctx, 6*2, 0);
+ xf_emit(ctx, 2, 0);
+ /* SEEK */
+ xf_emit(ctx, 2, 0);
+ /* SEEK */
+ xf_emit(ctx, 6*2, 0);
+ xf_emit(ctx, 2, 0);
+ /* SEEK */
+ if (dev_priv->chipset == 0x50)
+ xf_emit(ctx, 0x1c, 0);
+ else if (dev_priv->chipset < 0xa0)
+ xf_emit(ctx, 0x1e, 0);
+ else
+ xf_emit(ctx, 0x22, 0);
+ /* SEEK */
+ xf_emit(ctx, 0x15, 0);
+}
+
+static void
nv50_graph_construct_gene_m2mf(struct nouveau_grctx *ctx)
{
- /* m2mf state */
+ /* Strand 0, right after dispatch */
+ struct drm_nouveau_private *dev_priv = ctx->dev->dev_private;
+ int smallm2mf = 0;
+ if (dev_priv->chipset < 0x92 || dev_priv->chipset == 0x98)
+ smallm2mf = 1;
+ /* SEEK */
xf_emit (ctx, 1, 0); /* DMA_NOTIFY instance >> 4 */
xf_emit (ctx, 1, 0); /* DMA_BUFFER_IN instance >> 4 */
xf_emit (ctx, 1, 0); /* DMA_BUFFER_OUT instance >> 4 */
@@ -1319,427 +1414,975 @@ nv50_graph_construct_gene_m2mf(struct nouveau_grctx *ctx)
xf_emit (ctx, 1, 0); /* TILING_POSITION_OUT */
xf_emit (ctx, 1, 0); /* OFFSET_IN_HIGH */
xf_emit (ctx, 1, 0); /* OFFSET_OUT_HIGH */
+ /* SEEK */
+ if (smallm2mf)
+ xf_emit(ctx, 0x40, 0); /* 20 * ffffffff, 3ffff */
+ else
+ xf_emit(ctx, 0x100, 0); /* 80 * ffffffff, 3ffff */
+ xf_emit(ctx, 4, 0); /* 1f/7f, 0, 1f/7f, 0 [1f for smallm2mf, 7f otherwise] */
+ /* SEEK */
+ if (smallm2mf)
+ xf_emit(ctx, 0x400, 0); /* ffffffff */
+ else
+ xf_emit(ctx, 0x800, 0); /* ffffffff */
+ xf_emit(ctx, 4, 0); /* ff/1ff, 0, 0, 0 [ff for smallm2mf, 1ff otherwise] */
+ /* SEEK */
+ xf_emit(ctx, 0x40, 0); /* 20 * bits ffffffff, 3ffff */
+ xf_emit(ctx, 0x6, 0); /* 1f, 0, 1f, 0, 1f, 0 */
}
static void
-nv50_graph_construct_gene_unk1(struct nouveau_grctx *ctx)
+nv50_graph_construct_gene_ccache(struct nouveau_grctx *ctx)
{
struct drm_nouveau_private *dev_priv = ctx->dev->dev_private;
- /* end of area 2 on pre-NVA0, area 1 on NVAx */
- xf_emit(ctx, 2, 4);
- xf_emit(ctx, 1, 0);
- xf_emit(ctx, 1, 0x80);
- xf_emit(ctx, 1, 4);
- xf_emit(ctx, 1, 0x80c14);
- xf_emit(ctx, 1, 0);
- if (dev_priv->chipset == 0x50)
- xf_emit(ctx, 1, 0x3ff);
- else
- xf_emit(ctx, 1, 0x7ff);
+ xf_emit(ctx, 2, 0); /* RO */
+ xf_emit(ctx, 0x800, 0); /* ffffffff */
switch (dev_priv->chipset) {
case 0x50:
- case 0x86:
- case 0x98:
- case 0xaa:
- case 0xac:
- xf_emit(ctx, 0x542, 0);
+ case 0x92:
+ case 0xa0:
+ xf_emit(ctx, 0x2b, 0);
break;
case 0x84:
- case 0x92:
+ xf_emit(ctx, 0x29, 0);
+ break;
case 0x94:
case 0x96:
- xf_emit(ctx, 0x942, 0);
- break;
- case 0xa0:
case 0xa3:
- xf_emit(ctx, 0x2042, 0);
+ xf_emit(ctx, 0x27, 0);
break;
+ case 0x86:
+ case 0x98:
case 0xa5:
case 0xa8:
- xf_emit(ctx, 0x842, 0);
+ case 0xaa:
+ case 0xac:
+ case 0xaf:
+ xf_emit(ctx, 0x25, 0);
break;
}
- xf_emit(ctx, 2, 4);
- xf_emit(ctx, 1, 0);
- xf_emit(ctx, 1, 0x80);
- xf_emit(ctx, 1, 4);
- xf_emit(ctx, 1, 1);
- xf_emit(ctx, 1, 0);
- xf_emit(ctx, 1, 0x27);
- xf_emit(ctx, 1, 0);
- xf_emit(ctx, 1, 0x26);
- xf_emit(ctx, 3, 0);
+ /* CB bindings, 0x80 of them. first word is address >> 8, second is
+ * size >> 4 | valid << 24 */
+ xf_emit(ctx, 0x100, 0); /* ffffffff CB_DEF */
+ xf_emit(ctx, 1, 0); /* 0000007f CB_ADDR_BUFFER */
+ xf_emit(ctx, 1, 0); /* 0 */
+ xf_emit(ctx, 0x30, 0); /* ff SET_PROGRAM_CB */
+ xf_emit(ctx, 1, 0); /* 3f last SET_PROGRAM_CB */
+ xf_emit(ctx, 4, 0); /* RO */
+ xf_emit(ctx, 0x100, 0); /* ffffffff */
+ xf_emit(ctx, 8, 0); /* 1f, 0, 0, ... */
+ xf_emit(ctx, 8, 0); /* ffffffff */
+ xf_emit(ctx, 4, 0); /* ffffffff */
+ xf_emit(ctx, 1, 0); /* 3 */
+ xf_emit(ctx, 1, 0); /* ffffffff */
+ xf_emit(ctx, 1, 0); /* 0000ffff DMA_CODE_CB */
+ xf_emit(ctx, 1, 0); /* 0000ffff DMA_TIC */
+ xf_emit(ctx, 1, 0); /* 0000ffff DMA_TSC */
+ xf_emit(ctx, 1, 0); /* 00000001 LINKED_TSC */
+ xf_emit(ctx, 1, 0); /* 000000ff TIC_ADDRESS_HIGH */
+ xf_emit(ctx, 1, 0); /* ffffffff TIC_ADDRESS_LOW */
+ xf_emit(ctx, 1, 0x3fffff); /* 003fffff TIC_LIMIT */
+ xf_emit(ctx, 1, 0); /* 000000ff TSC_ADDRESS_HIGH */
+ xf_emit(ctx, 1, 0); /* ffffffff TSC_ADDRESS_LOW */
+ xf_emit(ctx, 1, 0x1fff); /* 000fffff TSC_LIMIT */
+ xf_emit(ctx, 1, 0); /* 000000ff VP_ADDRESS_HIGH */
+ xf_emit(ctx, 1, 0); /* ffffffff VP_ADDRESS_LOW */
+ xf_emit(ctx, 1, 0); /* 00ffffff VP_START_ID */
+ xf_emit(ctx, 1, 0); /* 000000ff CB_DEF_ADDRESS_HIGH */
+ xf_emit(ctx, 1, 0); /* ffffffff CB_DEF_ADDRESS_LOW */
+ xf_emit(ctx, 1, 0); /* 00000001 GP_ENABLE */
+ xf_emit(ctx, 1, 0); /* 000000ff GP_ADDRESS_HIGH */
+ xf_emit(ctx, 1, 0); /* ffffffff GP_ADDRESS_LOW */
+ xf_emit(ctx, 1, 0); /* 00ffffff GP_START_ID */
+ xf_emit(ctx, 1, 0); /* 000000ff FP_ADDRESS_HIGH */
+ xf_emit(ctx, 1, 0); /* ffffffff FP_ADDRESS_LOW */
+ xf_emit(ctx, 1, 0); /* 00ffffff FP_START_ID */
}
static void
-nv50_graph_construct_gene_unk10(struct nouveau_grctx *ctx)
+nv50_graph_construct_gene_unk10xx(struct nouveau_grctx *ctx)
{
+ struct drm_nouveau_private *dev_priv = ctx->dev->dev_private;
+ int i;
/* end of area 2 on pre-NVA0, area 1 on NVAx */
- xf_emit(ctx, 0x10, 0x04000000);
- xf_emit(ctx, 0x24, 0);
- xf_emit(ctx, 2, 0x04e3bfdf);
- xf_emit(ctx, 2, 0);
- xf_emit(ctx, 1, 0x1fe21);
+ xf_emit(ctx, 1, 4); /* 000000ff GP_RESULT_MAP_SIZE */
+ xf_emit(ctx, 1, 4); /* 0000007f VP_RESULT_MAP_SIZE */
+ xf_emit(ctx, 1, 0); /* 00000001 GP_ENABLE */
+ xf_emit(ctx, 1, 0x80); /* 0000ffff GP_VERTEX_OUTPUT_COUNT */
+ xf_emit(ctx, 1, 4); /* 000000ff GP_REG_ALLOC_RESULT */
+ xf_emit(ctx, 1, 0x80c14); /* 01ffffff SEMANTIC_COLOR */
+ xf_emit(ctx, 1, 0); /* 00000001 VERTEX_TWO_SIDE_ENABLE */
+ if (dev_priv->chipset == 0x50)
+ xf_emit(ctx, 1, 0x3ff);
+ else
+ xf_emit(ctx, 1, 0x7ff); /* 000007ff */
+ xf_emit(ctx, 1, 0); /* 111/113 */
+ xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A30 */
+ for (i = 0; i < 8; i++) {
+ switch (dev_priv->chipset) {
+ case 0x50:
+ case 0x86:
+ case 0x98:
+ case 0xaa:
+ case 0xac:
+ xf_emit(ctx, 0xa0, 0); /* ffffffff */
+ break;
+ case 0x84:
+ case 0x92:
+ case 0x94:
+ case 0x96:
+ xf_emit(ctx, 0x120, 0);
+ break;
+ case 0xa5:
+ case 0xa8:
+ xf_emit(ctx, 0x100, 0); /* ffffffff */
+ break;
+ case 0xa0:
+ case 0xa3:
+ case 0xaf:
+ xf_emit(ctx, 0x400, 0); /* ffffffff */
+ break;
+ }
+ xf_emit(ctx, 4, 0); /* 3f, 0, 0, 0 */
+ xf_emit(ctx, 4, 0); /* ffffffff */
+ }
+ xf_emit(ctx, 1, 4); /* 000000ff GP_RESULT_MAP_SIZE */
+ xf_emit(ctx, 1, 4); /* 0000007f VP_RESULT_MAP_SIZE */
+ xf_emit(ctx, 1, 0); /* 00000001 GP_ENABLE */
+ xf_emit(ctx, 1, 0x80); /* 0000ffff GP_VERTEX_OUTPUT_COUNT */
+ xf_emit(ctx, 1, 4); /* 000000ff GP_REG_ALLOC_TEMP */
+ xf_emit(ctx, 1, 1); /* 00000001 RASTERIZE_ENABLE */
+ xf_emit(ctx, 1, 0); /* 00000001 tesla UNK1900 */
+ xf_emit(ctx, 1, 0x27); /* 000000ff UNK0FD4 */
+ xf_emit(ctx, 1, 0); /* 0001ffff GP_BUILTIN_RESULT_EN */
+ xf_emit(ctx, 1, 0x26); /* 000000ff SEMANTIC_LAYER */
+ xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A30 */
+}
+
+static void
+nv50_graph_construct_gene_unk34xx(struct nouveau_grctx *ctx)
+{
+ struct drm_nouveau_private *dev_priv = ctx->dev->dev_private;
+ /* end of area 2 on pre-NVA0, area 1 on NVAx */
+ xf_emit(ctx, 1, 0); /* 00000001 VIEWPORT_CLIP_RECTS_EN */
+ xf_emit(ctx, 1, 0); /* 00000003 VIEWPORT_CLIP_MODE */
+ xf_emit(ctx, 0x10, 0x04000000); /* 07ffffff VIEWPORT_CLIP_HORIZ*8, VIEWPORT_CLIP_VERT*8 */
+ xf_emit(ctx, 1, 0); /* 00000001 POLYGON_STIPPLE_ENABLE */
+ xf_emit(ctx, 0x20, 0); /* ffffffff POLYGON_STIPPLE */
+ xf_emit(ctx, 2, 0); /* 00007fff WINDOW_OFFSET_XY */
+ xf_emit(ctx, 1, 0); /* ffff0ff3 */
+ xf_emit(ctx, 1, 0x04e3bfdf); /* ffffffff UNK0D64 */
+ xf_emit(ctx, 1, 0x04e3bfdf); /* ffffffff UNK0DF4 */
+ xf_emit(ctx, 1, 0); /* 00000003 WINDOW_ORIGIN */
+ xf_emit(ctx, 1, 0); /* 00000007 */
+ xf_emit(ctx, 1, 0x1fe21); /* 0001ffff tesla UNK0FAC */
+ if (dev_priv->chipset >= 0xa0)
+ xf_emit(ctx, 1, 0x0fac6881);
+ if (IS_NVA3F(dev_priv->chipset)) {
+ xf_emit(ctx, 1, 1);
+ xf_emit(ctx, 3, 0);
+ }
}
static void
-nv50_graph_construct_gene_unk2(struct nouveau_grctx *ctx)
+nv50_graph_construct_gene_unk14xx(struct nouveau_grctx *ctx)
{
struct drm_nouveau_private *dev_priv = ctx->dev->dev_private;
/* middle of area 2 on pre-NVA0, beginning of area 2 on NVA0, area 7 on >NVA0 */
if (dev_priv->chipset != 0x50) {
- xf_emit(ctx, 5, 0);
- xf_emit(ctx, 1, 0x80c14);
- xf_emit(ctx, 2, 0);
- xf_emit(ctx, 1, 0x804);
- xf_emit(ctx, 1, 0);
- xf_emit(ctx, 2, 4);
- xf_emit(ctx, 1, 0x8100c12);
+ xf_emit(ctx, 5, 0); /* ffffffff */
+ xf_emit(ctx, 1, 0x80c14); /* 01ffffff SEMANTIC_COLOR */
+ xf_emit(ctx, 1, 0); /* 00000001 */
+ xf_emit(ctx, 1, 0); /* 000003ff */
+ xf_emit(ctx, 1, 0x804); /* 00000fff SEMANTIC_CLIP */
+ xf_emit(ctx, 1, 0); /* 00000001 */
+ xf_emit(ctx, 2, 4); /* 7f, ff */
+ xf_emit(ctx, 1, 0x8100c12); /* 1fffffff FP_INTERPOLANT_CTRL */
}
- xf_emit(ctx, 1, 0);
- xf_emit(ctx, 2, 4);
- xf_emit(ctx, 1, 0);
- xf_emit(ctx, 1, 0x10);
- if (dev_priv->chipset == 0x50)
- xf_emit(ctx, 3, 0);
- else
- xf_emit(ctx, 4, 0);
- xf_emit(ctx, 1, 0x804);
- xf_emit(ctx, 1, 1);
- xf_emit(ctx, 1, 0x1a);
+ xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A30 */
+ xf_emit(ctx, 1, 4); /* 0000007f VP_RESULT_MAP_SIZE */
+ xf_emit(ctx, 1, 4); /* 000000ff GP_RESULT_MAP_SIZE */
+ xf_emit(ctx, 1, 0); /* 00000001 GP_ENABLE */
+ xf_emit(ctx, 1, 0x10); /* 7f/ff VIEW_VOLUME_CLIP_CTRL */
+ xf_emit(ctx, 1, 0); /* 000000ff VP_CLIP_DISTANCE_ENABLE */
if (dev_priv->chipset != 0x50)
- xf_emit(ctx, 1, 0x7f);
- xf_emit(ctx, 1, 0);
- xf_emit(ctx, 1, 1);
- xf_emit(ctx, 1, 0x80c14);
- xf_emit(ctx, 1, 0);
- xf_emit(ctx, 1, 0x8100c12);
- xf_emit(ctx, 2, 4);
- xf_emit(ctx, 1, 0);
- xf_emit(ctx, 1, 0x10);
- xf_emit(ctx, 3, 0);
- xf_emit(ctx, 1, 1);
- xf_emit(ctx, 1, 0x8100c12);
- xf_emit(ctx, 6, 0);
- if (dev_priv->chipset == 0x50)
- xf_emit(ctx, 1, 0x3ff);
- else
- xf_emit(ctx, 1, 0x7ff);
- xf_emit(ctx, 1, 0x80c14);
- xf_emit(ctx, 0x38, 0);
- xf_emit(ctx, 1, 1);
- xf_emit(ctx, 2, 0);
- xf_emit(ctx, 1, 0x10);
- xf_emit(ctx, 0x38, 0);
- xf_emit(ctx, 2, 0x88);
- xf_emit(ctx, 2, 0);
- xf_emit(ctx, 1, 4);
- xf_emit(ctx, 0x16, 0);
- xf_emit(ctx, 1, 0x26);
- xf_emit(ctx, 2, 0);
- xf_emit(ctx, 1, 0x3f800000);
- if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa)
- xf_emit(ctx, 4, 0);
- else
- xf_emit(ctx, 3, 0);
- xf_emit(ctx, 1, 0x1a);
- xf_emit(ctx, 1, 0x10);
+ xf_emit(ctx, 1, 0); /* 3ff */
+ xf_emit(ctx, 1, 0); /* 000000ff tesla UNK1940 */
+ xf_emit(ctx, 1, 0); /* 00000001 tesla UNK0D7C */
+ xf_emit(ctx, 1, 0x804); /* 00000fff SEMANTIC_CLIP */
+ xf_emit(ctx, 1, 1); /* 00000001 VIEWPORT_TRANSFORM_EN */
+ xf_emit(ctx, 1, 0x1a); /* 0000001f POLYGON_MODE */
if (dev_priv->chipset != 0x50)
- xf_emit(ctx, 0x28, 0);
+ xf_emit(ctx, 1, 0x7f); /* 000000ff tesla UNK0FFC */
+ xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A30 */
+ xf_emit(ctx, 1, 1); /* 00000001 SHADE_MODEL */
+ xf_emit(ctx, 1, 0x80c14); /* 01ffffff SEMANTIC_COLOR */
+ xf_emit(ctx, 1, 0); /* 00000001 tesla UNK1900 */
+ xf_emit(ctx, 1, 0x8100c12); /* 1fffffff FP_INTERPOLANT_CTRL */
+ xf_emit(ctx, 1, 4); /* 0000007f VP_RESULT_MAP_SIZE */
+ xf_emit(ctx, 1, 4); /* 000000ff GP_RESULT_MAP_SIZE */
+ xf_emit(ctx, 1, 0); /* 00000001 GP_ENABLE */
+ xf_emit(ctx, 1, 0x10); /* 7f/ff VIEW_VOLUME_CLIP_CTRL */
+ xf_emit(ctx, 1, 0); /* 00000001 tesla UNK0D7C */
+ xf_emit(ctx, 1, 0); /* 00000001 tesla UNK0F8C */
+ xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A30 */
+ xf_emit(ctx, 1, 1); /* 00000001 VIEWPORT_TRANSFORM_EN */
+ xf_emit(ctx, 1, 0x8100c12); /* 1fffffff FP_INTERPOLANT_CTRL */
+ xf_emit(ctx, 4, 0); /* ffffffff NOPERSPECTIVE_BITMAP */
+ xf_emit(ctx, 1, 0); /* 00000001 tesla UNK1900 */
+ xf_emit(ctx, 1, 0); /* 0000000f */
+ if (dev_priv->chipset == 0x50)
+ xf_emit(ctx, 1, 0x3ff); /* 000003ff tesla UNK0D68 */
else
- xf_emit(ctx, 0x25, 0);
- xf_emit(ctx, 1, 0x52);
- xf_emit(ctx, 1, 0);
- xf_emit(ctx, 1, 0x26);
- xf_emit(ctx, 1, 0);
- xf_emit(ctx, 2, 4);
- xf_emit(ctx, 1, 0);
- xf_emit(ctx, 1, 0x1a);
- xf_emit(ctx, 2, 0);
- xf_emit(ctx, 1, 0x00ffff00);
- xf_emit(ctx, 1, 0);
+ xf_emit(ctx, 1, 0x7ff); /* 000007ff tesla UNK0D68 */
+ xf_emit(ctx, 1, 0x80c14); /* 01ffffff SEMANTIC_COLOR */
+ xf_emit(ctx, 1, 0); /* 00000001 VERTEX_TWO_SIDE_ENABLE */
+ xf_emit(ctx, 0x30, 0); /* ffffffff VIEWPORT_SCALE: X0, Y0, Z0, X1, Y1, ... */
+ xf_emit(ctx, 3, 0); /* f, 0, 0 */
+ xf_emit(ctx, 3, 0); /* ffffffff last VIEWPORT_SCALE? */
+ xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A30 */
+ xf_emit(ctx, 1, 1); /* 00000001 VIEWPORT_TRANSFORM_EN */
+ xf_emit(ctx, 1, 0); /* 00000001 tesla UNK1900 */
+ xf_emit(ctx, 1, 0); /* 00000001 tesla UNK1924 */
+ xf_emit(ctx, 1, 0x10); /* 000000ff VIEW_VOLUME_CLIP_CTRL */
+ xf_emit(ctx, 1, 0); /* 00000001 */
+ xf_emit(ctx, 0x30, 0); /* ffffffff VIEWPORT_TRANSLATE */
+ xf_emit(ctx, 3, 0); /* f, 0, 0 */
+ xf_emit(ctx, 3, 0); /* ffffffff */
+ xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A30 */
+ xf_emit(ctx, 2, 0x88); /* 000001ff tesla UNK19D8 */
+ xf_emit(ctx, 1, 0); /* 00000001 tesla UNK1924 */
+ xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A30 */
+ xf_emit(ctx, 1, 4); /* 0000000f CULL_MODE */
+ xf_emit(ctx, 2, 0); /* 07ffffff SCREEN_SCISSOR */
+ xf_emit(ctx, 2, 0); /* 00007fff WINDOW_OFFSET_XY */
+ xf_emit(ctx, 1, 0); /* 00000003 WINDOW_ORIGIN */
+ xf_emit(ctx, 0x10, 0); /* 00000001 SCISSOR_ENABLE */
+ xf_emit(ctx, 1, 0); /* 0001ffff GP_BUILTIN_RESULT_EN */
+ xf_emit(ctx, 1, 0x26); /* 000000ff SEMANTIC_LAYER */
+ xf_emit(ctx, 1, 0); /* 00000001 tesla UNK1900 */
+ xf_emit(ctx, 1, 0); /* 0000000f */
+ xf_emit(ctx, 1, 0x3f800000); /* ffffffff LINE_WIDTH */
+ xf_emit(ctx, 1, 0); /* 00000001 LINE_STIPPLE_ENABLE */
+ xf_emit(ctx, 1, 0); /* 00000001 LINE_SMOOTH_ENABLE */
+ xf_emit(ctx, 1, 0); /* 00000007 MULTISAMPLE_SAMPLES_LOG2 */
+ if (IS_NVA3F(dev_priv->chipset))
+ xf_emit(ctx, 1, 0); /* 00000001 */
+ xf_emit(ctx, 1, 0x1a); /* 0000001f POLYGON_MODE */
+ xf_emit(ctx, 1, 0x10); /* 000000ff VIEW_VOLUME_CLIP_CTRL */
+ if (dev_priv->chipset != 0x50) {
+ xf_emit(ctx, 1, 0); /* ffffffff */
+ xf_emit(ctx, 1, 0); /* 00000001 */
+ xf_emit(ctx, 1, 0); /* 000003ff */
+ }
+ xf_emit(ctx, 0x20, 0); /* 10xbits ffffffff, 3fffff. SCISSOR_* */
+ xf_emit(ctx, 1, 0); /* f */
+ xf_emit(ctx, 1, 0); /* 0? */
+ xf_emit(ctx, 1, 0); /* ffffffff */
+ xf_emit(ctx, 1, 0); /* 003fffff */
+ xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A30 */
+ xf_emit(ctx, 1, 0x52); /* 000001ff SEMANTIC_PTSZ */
+ xf_emit(ctx, 1, 0); /* 0001ffff GP_BUILTIN_RESULT_EN */
+ xf_emit(ctx, 1, 0x26); /* 000000ff SEMANTIC_LAYER */
+ xf_emit(ctx, 1, 0); /* 00000001 tesla UNK1900 */
+ xf_emit(ctx, 1, 4); /* 0000007f VP_RESULT_MAP_SIZE */
+ xf_emit(ctx, 1, 4); /* 000000ff GP_RESULT_MAP_SIZE */
+ xf_emit(ctx, 1, 0); /* 00000001 GP_ENABLE */
+ xf_emit(ctx, 1, 0x1a); /* 0000001f POLYGON_MODE */
+ xf_emit(ctx, 1, 0); /* 00000001 LINE_SMOOTH_ENABLE */
+ xf_emit(ctx, 1, 0); /* 00000001 LINE_STIPPLE_ENABLE */
+ xf_emit(ctx, 1, 0x00ffff00); /* 00ffffff LINE_STIPPLE_PATTERN */
+ xf_emit(ctx, 1, 0); /* 0000000f */
}
static void
-nv50_graph_construct_gene_unk3(struct nouveau_grctx *ctx)
+nv50_graph_construct_gene_zcull(struct nouveau_grctx *ctx)
{
struct drm_nouveau_private *dev_priv = ctx->dev->dev_private;
- /* end of area 0 on pre-NVA0, beginning of area 6 on NVAx */
- xf_emit(ctx, 1, 0x3f);
- xf_emit(ctx, 0xa, 0);
- xf_emit(ctx, 1, 2);
- xf_emit(ctx, 2, 0x04000000);
- xf_emit(ctx, 8, 0);
- xf_emit(ctx, 1, 4);
- xf_emit(ctx, 3, 0);
- xf_emit(ctx, 1, 4);
- if (dev_priv->chipset == 0x50)
- xf_emit(ctx, 0x10, 0);
- else
- xf_emit(ctx, 0x11, 0);
- xf_emit(ctx, 1, 1);
- xf_emit(ctx, 1, 0x1001);
- xf_emit(ctx, 4, 0xffff);
- xf_emit(ctx, 0x20, 0);
- xf_emit(ctx, 0x10, 0x3f800000);
- xf_emit(ctx, 1, 0x10);
- if (dev_priv->chipset == 0x50)
- xf_emit(ctx, 1, 0);
- else
- xf_emit(ctx, 2, 0);
- xf_emit(ctx, 1, 3);
- xf_emit(ctx, 2, 0);
+ /* end of strand 0 on pre-NVA0, beginning of strand 6 on NVAx */
+ /* SEEK */
+ xf_emit(ctx, 1, 0x3f); /* 0000003f UNK1590 */
+ xf_emit(ctx, 1, 0); /* 00000001 ALPHA_TEST_ENABLE */
+ xf_emit(ctx, 1, 0); /* 00000007 MULTISAMPLE_SAMPLES_LOG2 */
+ xf_emit(ctx, 1, 0); /* 00000001 tesla UNK1534 */
+ xf_emit(ctx, 1, 0); /* 00000007 STENCIL_BACK_FUNC_FUNC */
+ xf_emit(ctx, 1, 0); /* 000000ff STENCIL_BACK_FUNC_MASK */
+ xf_emit(ctx, 1, 0); /* 000000ff STENCIL_BACK_FUNC_REF */
+ xf_emit(ctx, 1, 0); /* 000000ff STENCIL_BACK_MASK */
+ xf_emit(ctx, 3, 0); /* 00000007 STENCIL_BACK_OP_FAIL, ZFAIL, ZPASS */
+ xf_emit(ctx, 1, 2); /* 00000003 tesla UNK143C */
+ xf_emit(ctx, 2, 0x04000000); /* 07ffffff tesla UNK0D6C */
+ xf_emit(ctx, 1, 0); /* ffff0ff3 */
+ xf_emit(ctx, 1, 0); /* 00000001 CLIPID_ENABLE */
+ xf_emit(ctx, 2, 0); /* ffffffff DEPTH_BOUNDS */
+ xf_emit(ctx, 1, 0); /* 00000001 */
+ xf_emit(ctx, 1, 0); /* 00000007 DEPTH_TEST_FUNC */
+ xf_emit(ctx, 1, 0); /* 00000001 DEPTH_TEST_ENABLE */
+ xf_emit(ctx, 1, 0); /* 00000001 DEPTH_WRITE_ENABLE */
+ xf_emit(ctx, 1, 4); /* 0000000f CULL_MODE */
+ xf_emit(ctx, 1, 0); /* 0000ffff */
+ xf_emit(ctx, 1, 0); /* 00000001 UNK0FB0 */
+ xf_emit(ctx, 1, 0); /* 00000001 POLYGON_STIPPLE_ENABLE */
+ xf_emit(ctx, 1, 4); /* 00000007 FP_CONTROL */
+ xf_emit(ctx, 1, 0); /* ffffffff */
+ xf_emit(ctx, 1, 0); /* 0001ffff GP_BUILTIN_RESULT_EN */
+ xf_emit(ctx, 1, 0); /* 000000ff CLEAR_STENCIL */
+ xf_emit(ctx, 1, 0); /* 00000007 STENCIL_FRONT_FUNC_FUNC */
+ xf_emit(ctx, 1, 0); /* 000000ff STENCIL_FRONT_FUNC_MASK */
+ xf_emit(ctx, 1, 0); /* 000000ff STENCIL_FRONT_FUNC_REF */
+ xf_emit(ctx, 1, 0); /* 000000ff STENCIL_FRONT_MASK */
+ xf_emit(ctx, 3, 0); /* 00000007 STENCIL_FRONT_OP_FAIL, ZFAIL, ZPASS */
+ xf_emit(ctx, 1, 0); /* 00000001 STENCIL_FRONT_ENABLE */
+ xf_emit(ctx, 1, 0); /* 00000001 STENCIL_BACK_ENABLE */
+ xf_emit(ctx, 1, 0); /* ffffffff CLEAR_DEPTH */
+ xf_emit(ctx, 1, 0); /* 00000007 */
+ if (dev_priv->chipset != 0x50)
+ xf_emit(ctx, 1, 0); /* 00000003 tesla UNK1108 */
+ xf_emit(ctx, 1, 0); /* 00000001 SAMPLECNT_ENABLE */
+ xf_emit(ctx, 1, 0); /* 0000000f ZETA_FORMAT */
+ xf_emit(ctx, 1, 1); /* 00000001 ZETA_ENABLE */
+ xf_emit(ctx, 1, 0x1001); /* 00001fff ZETA_ARRAY_MODE */
+ /* SEEK */
+ xf_emit(ctx, 4, 0xffff); /* 0000ffff MSAA_MASK */
+ xf_emit(ctx, 0x10, 0); /* 00000001 SCISSOR_ENABLE */
+ xf_emit(ctx, 0x10, 0); /* ffffffff DEPTH_RANGE_NEAR */
+ xf_emit(ctx, 0x10, 0x3f800000); /* ffffffff DEPTH_RANGE_FAR */
+ xf_emit(ctx, 1, 0x10); /* 7f/ff/3ff VIEW_VOLUME_CLIP_CTRL */
+ xf_emit(ctx, 1, 0); /* 00000001 VIEWPORT_CLIP_RECTS_EN */
+ xf_emit(ctx, 1, 3); /* 00000003 FP_CTRL_UNK196C */
+ xf_emit(ctx, 1, 0); /* 00000003 tesla UNK1968 */
+ if (dev_priv->chipset != 0x50)
+ xf_emit(ctx, 1, 0); /* 0fffffff tesla UNK1104 */
+ xf_emit(ctx, 1, 0); /* 00000001 tesla UNK151C */
}
static void
-nv50_graph_construct_gene_unk4(struct nouveau_grctx *ctx)
+nv50_graph_construct_gene_clipid(struct nouveau_grctx *ctx)
{
- /* middle of area 0 on pre-NVA0, middle of area 6 on NVAx */
- xf_emit(ctx, 2, 0x04000000);
- xf_emit(ctx, 1, 0);
- xf_emit(ctx, 1, 0x80);
- xf_emit(ctx, 3, 0);
- xf_emit(ctx, 1, 0x80);
- xf_emit(ctx, 1, 0);
+ /* middle of strand 0 on pre-NVA0 [after 24xx], middle of area 6 on NVAx */
+ /* SEEK */
+ xf_emit(ctx, 1, 0); /* 00000007 UNK0FB4 */
+ /* SEEK */
+ xf_emit(ctx, 4, 0); /* 07ffffff CLIPID_REGION_HORIZ */
+ xf_emit(ctx, 4, 0); /* 07ffffff CLIPID_REGION_VERT */
+ xf_emit(ctx, 2, 0); /* 07ffffff SCREEN_SCISSOR */
+ xf_emit(ctx, 2, 0x04000000); /* 07ffffff UNK1508 */
+ xf_emit(ctx, 1, 0); /* 00000001 CLIPID_ENABLE */
+ xf_emit(ctx, 1, 0x80); /* 00003fff CLIPID_WIDTH */
+ xf_emit(ctx, 1, 0); /* 000000ff CLIPID_ID */
+ xf_emit(ctx, 1, 0); /* 000000ff CLIPID_ADDRESS_HIGH */
+ xf_emit(ctx, 1, 0); /* ffffffff CLIPID_ADDRESS_LOW */
+ xf_emit(ctx, 1, 0x80); /* 00003fff CLIPID_HEIGHT */
+ xf_emit(ctx, 1, 0); /* 0000ffff DMA_CLIPID */
}
static void
-nv50_graph_construct_gene_unk5(struct nouveau_grctx *ctx)
+nv50_graph_construct_gene_unk24xx(struct nouveau_grctx *ctx)
{
struct drm_nouveau_private *dev_priv = ctx->dev->dev_private;
- /* middle of area 0 on pre-NVA0 [after m2mf], end of area 2 on NVAx */
- xf_emit(ctx, 2, 4);
- if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa)
- xf_emit(ctx, 0x1c4d, 0);
+ int i;
+ /* middle of strand 0 on pre-NVA0 [after m2mf], end of strand 2 on NVAx */
+ /* SEEK */
+ xf_emit(ctx, 0x33, 0);
+ /* SEEK */
+ xf_emit(ctx, 2, 0);
+ /* SEEK */
+ xf_emit(ctx, 1, 0); /* 00000001 GP_ENABLE */
+ xf_emit(ctx, 1, 4); /* 0000007f VP_RESULT_MAP_SIZE */
+ xf_emit(ctx, 1, 4); /* 000000ff GP_RESULT_MAP_SIZE */
+ /* SEEK */
+ if (IS_NVA3F(dev_priv->chipset)) {
+ xf_emit(ctx, 4, 0); /* RO */
+ xf_emit(ctx, 0xe10, 0); /* 190 * 9: 8*ffffffff, 7ff */
+ xf_emit(ctx, 1, 0); /* 1ff */
+ xf_emit(ctx, 8, 0); /* 0? */
+ xf_emit(ctx, 9, 0); /* ffffffff, 7ff */
+
+ xf_emit(ctx, 4, 0); /* RO */
+ xf_emit(ctx, 0xe10, 0); /* 190 * 9: 8*ffffffff, 7ff */
+ xf_emit(ctx, 1, 0); /* 1ff */
+ xf_emit(ctx, 8, 0); /* 0? */
+ xf_emit(ctx, 9, 0); /* ffffffff, 7ff */
+ }
else
- xf_emit(ctx, 0x1c4b, 0);
- xf_emit(ctx, 2, 4);
- xf_emit(ctx, 1, 0x8100c12);
+ {
+ xf_emit(ctx, 0xc, 0); /* RO */
+ /* SEEK */
+ xf_emit(ctx, 0xe10, 0); /* 190 * 9: 8*ffffffff, 7ff */
+ xf_emit(ctx, 1, 0); /* 1ff */
+ xf_emit(ctx, 8, 0); /* 0? */
+
+ /* SEEK */
+ xf_emit(ctx, 0xc, 0); /* RO */
+ /* SEEK */
+ xf_emit(ctx, 0xe10, 0); /* 190 * 9: 8*ffffffff, 7ff */
+ xf_emit(ctx, 1, 0); /* 1ff */
+ xf_emit(ctx, 8, 0); /* 0? */
+ }
+ /* SEEK */
+ xf_emit(ctx, 1, 0); /* 00000001 GP_ENABLE */
+ xf_emit(ctx, 1, 4); /* 000000ff GP_RESULT_MAP_SIZE */
+ xf_emit(ctx, 1, 4); /* 0000007f VP_RESULT_MAP_SIZE */
+ xf_emit(ctx, 1, 0x8100c12); /* 1fffffff FP_INTERPOLANT_CTRL */
if (dev_priv->chipset != 0x50)
- xf_emit(ctx, 1, 3);
- xf_emit(ctx, 1, 0);
- xf_emit(ctx, 1, 0x8100c12);
- xf_emit(ctx, 1, 0);
- xf_emit(ctx, 1, 0x80c14);
- xf_emit(ctx, 1, 1);
+ xf_emit(ctx, 1, 3); /* 00000003 tesla UNK1100 */
+ /* SEEK */
+ xf_emit(ctx, 1, 0); /* 00000001 GP_ENABLE */
+ xf_emit(ctx, 1, 0x8100c12); /* 1fffffff FP_INTERPOLANT_CTRL */
+ xf_emit(ctx, 1, 0); /* 0000000f VP_GP_BUILTIN_ATTR_EN */
+ xf_emit(ctx, 1, 0x80c14); /* 01ffffff SEMANTIC_COLOR */
+ xf_emit(ctx, 1, 1); /* 00000001 */
+ /* SEEK */
if (dev_priv->chipset >= 0xa0)
- xf_emit(ctx, 2, 4);
- xf_emit(ctx, 1, 0x80c14);
- xf_emit(ctx, 2, 0);
- xf_emit(ctx, 1, 0x8100c12);
- xf_emit(ctx, 1, 0x27);
- xf_emit(ctx, 2, 0);
- xf_emit(ctx, 1, 1);
- xf_emit(ctx, 0x3c1, 0);
- xf_emit(ctx, 1, 1);
- xf_emit(ctx, 0x16, 0);
- xf_emit(ctx, 1, 0x8100c12);
- xf_emit(ctx, 1, 0);
+ xf_emit(ctx, 2, 4); /* 000000ff */
+ xf_emit(ctx, 1, 0x80c14); /* 01ffffff SEMANTIC_COLOR */
+ xf_emit(ctx, 1, 0); /* 00000001 VERTEX_TWO_SIDE_ENABLE */
+ xf_emit(ctx, 1, 0); /* 00000001 POINT_SPRITE_ENABLE */
+ xf_emit(ctx, 1, 0x8100c12); /* 1fffffff FP_INTERPOLANT_CTRL */
+ xf_emit(ctx, 1, 0x27); /* 000000ff SEMANTIC_PRIM_ID */
+ xf_emit(ctx, 1, 0); /* 00000001 GP_ENABLE */
+ xf_emit(ctx, 1, 0); /* 0000000f */
+ xf_emit(ctx, 1, 1); /* 00000001 */
+ for (i = 0; i < 10; i++) {
+ /* SEEK */
+ xf_emit(ctx, 0x40, 0); /* ffffffff */
+ xf_emit(ctx, 0x10, 0); /* 3, 0, 0.... */
+ xf_emit(ctx, 0x10, 0); /* ffffffff */
+ }
+ /* SEEK */
+ xf_emit(ctx, 1, 0); /* 00000001 POINT_SPRITE_CTRL */
+ xf_emit(ctx, 1, 1); /* 00000001 */
+ xf_emit(ctx, 1, 0); /* ffffffff */
+ xf_emit(ctx, 4, 0); /* ffffffff NOPERSPECTIVE_BITMAP */
+ xf_emit(ctx, 0x10, 0); /* 00ffffff POINT_COORD_REPLACE_MAP */
+ xf_emit(ctx, 1, 0); /* 00000003 WINDOW_ORIGIN */
+ xf_emit(ctx, 1, 0x8100c12); /* 1fffffff FP_INTERPOLANT_CTRL */
+ if (dev_priv->chipset != 0x50)
+ xf_emit(ctx, 1, 0); /* 000003ff */
}
static void
-nv50_graph_construct_gene_unk6(struct nouveau_grctx *ctx)
+nv50_graph_construct_gene_vfetch(struct nouveau_grctx *ctx)
{
struct drm_nouveau_private *dev_priv = ctx->dev->dev_private;
- /* beginning of area 1 on pre-NVA0 [after m2mf], area 3 on NVAx */
- xf_emit(ctx, 4, 0);
- xf_emit(ctx, 1, 0xf);
- if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa)
- xf_emit(ctx, 8, 0);
- else
- xf_emit(ctx, 4, 0);
- xf_emit(ctx, 1, 0x20);
- if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa)
- xf_emit(ctx, 0x11, 0);
+ int acnt = 0x10, rep, i;
+ /* beginning of strand 1 on pre-NVA0, strand 3 on NVAx */
+ if (IS_NVA3F(dev_priv->chipset))
+ acnt = 0x20;
+ /* SEEK */
+ if (dev_priv->chipset >= 0xa0) {
+ xf_emit(ctx, 1, 0); /* ffffffff tesla UNK13A4 */
+ xf_emit(ctx, 1, 1); /* 00000fff tesla UNK1318 */
+ }
+ xf_emit(ctx, 1, 0); /* ffffffff VERTEX_BUFFER_FIRST */
+ xf_emit(ctx, 1, 0); /* 00000001 PRIMITIVE_RESTART_ENABLE */
+ xf_emit(ctx, 1, 0); /* 00000001 UNK0DE8 */
+ xf_emit(ctx, 1, 0); /* ffffffff PRIMITIVE_RESTART_INDEX */
+ xf_emit(ctx, 1, 0xf); /* ffffffff VP_ATTR_EN */
+ xf_emit(ctx, (acnt/8)-1, 0); /* ffffffff VP_ATTR_EN */
+ xf_emit(ctx, acnt/8, 0); /* ffffffff VTX_ATR_MASK_UNK0DD0 */
+ xf_emit(ctx, 1, 0); /* 0000000f VP_GP_BUILTIN_ATTR_EN */
+ xf_emit(ctx, 1, 0x20); /* 0000ffff tesla UNK129C */
+ xf_emit(ctx, 1, 0); /* 000000ff turing UNK370??? */
+ xf_emit(ctx, 1, 0); /* 0000ffff turing USER_PARAM_COUNT */
+ xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A30 */
+ /* SEEK */
+ if (IS_NVA3F(dev_priv->chipset))
+ xf_emit(ctx, 0xb, 0); /* RO */
else if (dev_priv->chipset >= 0xa0)
- xf_emit(ctx, 0xf, 0);
+ xf_emit(ctx, 0x9, 0); /* RO */
else
- xf_emit(ctx, 0xe, 0);
- xf_emit(ctx, 1, 0x1a);
- xf_emit(ctx, 0xd, 0);
- xf_emit(ctx, 2, 4);
- xf_emit(ctx, 1, 0);
- xf_emit(ctx, 1, 4);
- xf_emit(ctx, 1, 8);
- xf_emit(ctx, 1, 0);
+ xf_emit(ctx, 0x8, 0); /* RO */
+ /* SEEK */
+ xf_emit(ctx, 1, 0); /* 00000001 EDGE_FLAG */
+ xf_emit(ctx, 1, 0); /* 00000001 PROVOKING_VERTEX_LAST */
+ xf_emit(ctx, 1, 0); /* 00000001 GP_ENABLE */
+ xf_emit(ctx, 1, 0x1a); /* 0000001f POLYGON_MODE */
+ /* SEEK */
+ xf_emit(ctx, 0xc, 0); /* RO */
+ /* SEEK */
+ xf_emit(ctx, 1, 0); /* 7f/ff */
+ xf_emit(ctx, 1, 4); /* 7f/ff VP_REG_ALLOC_RESULT */
+ xf_emit(ctx, 1, 4); /* 7f/ff VP_RESULT_MAP_SIZE */
+ xf_emit(ctx, 1, 0); /* 0000000f VP_GP_BUILTIN_ATTR_EN */
+ xf_emit(ctx, 1, 4); /* 000001ff UNK1A28 */
+ xf_emit(ctx, 1, 8); /* 000001ff UNK0DF0 */
+ xf_emit(ctx, 1, 0); /* 00000001 GP_ENABLE */
if (dev_priv->chipset == 0x50)
- xf_emit(ctx, 1, 0x3ff);
+ xf_emit(ctx, 1, 0x3ff); /* 3ff tesla UNK0D68 */
else
- xf_emit(ctx, 1, 0x7ff);
+ xf_emit(ctx, 1, 0x7ff); /* 7ff tesla UNK0D68 */
if (dev_priv->chipset == 0xa8)
- xf_emit(ctx, 1, 0x1e00);
- xf_emit(ctx, 0xc, 0);
- xf_emit(ctx, 1, 0xf);
- if (dev_priv->chipset == 0x50)
- xf_emit(ctx, 0x125, 0);
- else if (dev_priv->chipset < 0xa0)
- xf_emit(ctx, 0x126, 0);
- else if (dev_priv->chipset == 0xa0 || dev_priv->chipset >= 0xaa)
- xf_emit(ctx, 0x124, 0);
+ xf_emit(ctx, 1, 0x1e00); /* 7fff */
+ /* SEEK */
+ xf_emit(ctx, 0xc, 0); /* RO or close */
+ /* SEEK */
+ xf_emit(ctx, 1, 0xf); /* ffffffff VP_ATTR_EN */
+ xf_emit(ctx, (acnt/8)-1, 0); /* ffffffff VP_ATTR_EN */
+ xf_emit(ctx, 1, 0); /* 0000000f VP_GP_BUILTIN_ATTR_EN */
+ if (dev_priv->chipset > 0x50 && dev_priv->chipset < 0xa0)
+ xf_emit(ctx, 2, 0); /* ffffffff */
else
- xf_emit(ctx, 0x1f7, 0);
- xf_emit(ctx, 1, 0xf);
- if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa)
- xf_emit(ctx, 3, 0);
+ xf_emit(ctx, 1, 0); /* ffffffff */
+ xf_emit(ctx, 1, 0); /* 00000003 tesla UNK0FD8 */
+ /* SEEK */
+ if (IS_NVA3F(dev_priv->chipset)) {
+ xf_emit(ctx, 0x10, 0); /* 0? */
+ xf_emit(ctx, 2, 0); /* weird... */
+ xf_emit(ctx, 2, 0); /* RO */
+ } else {
+ xf_emit(ctx, 8, 0); /* 0? */
+ xf_emit(ctx, 1, 0); /* weird... */
+ xf_emit(ctx, 2, 0); /* RO */
+ }
+ /* SEEK */
+ xf_emit(ctx, 1, 0); /* ffffffff VB_ELEMENT_BASE */
+ xf_emit(ctx, 1, 0); /* ffffffff UNK1438 */
+ xf_emit(ctx, acnt, 0); /* 1 tesla UNK1000 */
+ if (dev_priv->chipset >= 0xa0)
+ xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1118? */
+ /* SEEK */
+ xf_emit(ctx, acnt, 0); /* ffffffff VERTEX_ARRAY_UNK90C */
+ xf_emit(ctx, 1, 0); /* f/1f */
+ /* SEEK */
+ xf_emit(ctx, acnt, 0); /* ffffffff VERTEX_ARRAY_UNK90C */
+ xf_emit(ctx, 1, 0); /* f/1f */
+ /* SEEK */
+ xf_emit(ctx, acnt, 0); /* RO */
+ xf_emit(ctx, 2, 0); /* RO */
+ /* SEEK */
+ xf_emit(ctx, 1, 0); /* ffffffff tesla UNK111C? */
+ xf_emit(ctx, 1, 0); /* RO */
+ /* SEEK */
+ xf_emit(ctx, 1, 0); /* 000000ff UNK15F4_ADDRESS_HIGH */
+ xf_emit(ctx, 1, 0); /* ffffffff UNK15F4_ADDRESS_LOW */
+ xf_emit(ctx, 1, 0); /* 000000ff UNK0F84_ADDRESS_HIGH */
+ xf_emit(ctx, 1, 0); /* ffffffff UNK0F84_ADDRESS_LOW */
+ /* SEEK */
+ xf_emit(ctx, acnt, 0); /* 00003fff VERTEX_ARRAY_ATTRIB_OFFSET */
+ xf_emit(ctx, 3, 0); /* f/1f */
+ /* SEEK */
+ xf_emit(ctx, acnt, 0); /* 00000fff VERTEX_ARRAY_STRIDE */
+ xf_emit(ctx, 3, 0); /* f/1f */
+ /* SEEK */
+ xf_emit(ctx, acnt, 0); /* ffffffff VERTEX_ARRAY_LOW */
+ xf_emit(ctx, 3, 0); /* f/1f */
+ /* SEEK */
+ xf_emit(ctx, acnt, 0); /* 000000ff VERTEX_ARRAY_HIGH */
+ xf_emit(ctx, 3, 0); /* f/1f */
+ /* SEEK */
+ xf_emit(ctx, acnt, 0); /* ffffffff VERTEX_LIMIT_LOW */
+ xf_emit(ctx, 3, 0); /* f/1f */
+ /* SEEK */
+ xf_emit(ctx, acnt, 0); /* 000000ff VERTEX_LIMIT_HIGH */
+ xf_emit(ctx, 3, 0); /* f/1f */
+ /* SEEK */
+ if (IS_NVA3F(dev_priv->chipset)) {
+ xf_emit(ctx, acnt, 0); /* f */
+ xf_emit(ctx, 3, 0); /* f/1f */
+ }
+ /* SEEK */
+ if (IS_NVA3F(dev_priv->chipset))
+ xf_emit(ctx, 2, 0); /* RO */
+ else
+ xf_emit(ctx, 5, 0); /* RO */
+ /* SEEK */
+ xf_emit(ctx, 1, 0); /* ffff DMA_VTXBUF */
+ /* SEEK */
+ if (dev_priv->chipset < 0xa0) {
+ xf_emit(ctx, 0x41, 0); /* RO */
+ /* SEEK */
+ xf_emit(ctx, 0x11, 0); /* RO */
+ } else if (!IS_NVA3F(dev_priv->chipset))
+ xf_emit(ctx, 0x50, 0); /* RO */
else
- xf_emit(ctx, 1, 0);
- xf_emit(ctx, 1, 1);
- if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa)
- xf_emit(ctx, 0xa1, 0);
+ xf_emit(ctx, 0x58, 0); /* RO */
+ /* SEEK */
+ xf_emit(ctx, 1, 0xf); /* ffffffff VP_ATTR_EN */
+ xf_emit(ctx, (acnt/8)-1, 0); /* ffffffff VP_ATTR_EN */
+ xf_emit(ctx, 1, 1); /* 1 UNK0DEC */
+ /* SEEK */
+ xf_emit(ctx, acnt*4, 0); /* ffffffff VTX_ATTR */
+ xf_emit(ctx, 4, 0); /* f/1f, 0, 0, 0 */
+ /* SEEK */
+ if (IS_NVA3F(dev_priv->chipset))
+ xf_emit(ctx, 0x1d, 0); /* RO */
else
- xf_emit(ctx, 0x5a, 0);
- xf_emit(ctx, 1, 0xf);
+ xf_emit(ctx, 0x16, 0); /* RO */
+ /* SEEK */
+ xf_emit(ctx, 1, 0xf); /* ffffffff VP_ATTR_EN */
+ xf_emit(ctx, (acnt/8)-1, 0); /* ffffffff VP_ATTR_EN */
+ /* SEEK */
if (dev_priv->chipset < 0xa0)
- xf_emit(ctx, 0x834, 0);
- else if (dev_priv->chipset == 0xa0)
- xf_emit(ctx, 0x1873, 0);
- else if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa)
- xf_emit(ctx, 0x8ba, 0);
+ xf_emit(ctx, 8, 0); /* RO */
+ else if (IS_NVA3F(dev_priv->chipset))
+ xf_emit(ctx, 0xc, 0); /* RO */
+ else
+ xf_emit(ctx, 7, 0); /* RO */
+ /* SEEK */
+ xf_emit(ctx, 0xa, 0); /* RO */
+ if (dev_priv->chipset == 0xa0)
+ rep = 0xc;
+ else
+ rep = 4;
+ for (i = 0; i < rep; i++) {
+ /* SEEK */
+ if (IS_NVA3F(dev_priv->chipset))
+ xf_emit(ctx, 0x20, 0); /* ffffffff */
+ xf_emit(ctx, 0x200, 0); /* ffffffff */
+ xf_emit(ctx, 4, 0); /* 7f/ff, 0, 0, 0 */
+ xf_emit(ctx, 4, 0); /* ffffffff */
+ }
+ /* SEEK */
+ xf_emit(ctx, 1, 0); /* 113/111 */
+ xf_emit(ctx, 1, 0xf); /* ffffffff VP_ATTR_EN */
+ xf_emit(ctx, (acnt/8)-1, 0); /* ffffffff VP_ATTR_EN */
+ xf_emit(ctx, acnt/8, 0); /* ffffffff VTX_ATTR_MASK_UNK0DD0 */
+ xf_emit(ctx, 1, 0); /* 0000000f VP_GP_BUILTIN_ATTR_EN */
+ xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A30 */
+ /* SEEK */
+ if (IS_NVA3F(dev_priv->chipset))
+ xf_emit(ctx, 7, 0); /* weird... */
else
- xf_emit(ctx, 0x833, 0);
- xf_emit(ctx, 1, 0xf);
- xf_emit(ctx, 0xf, 0);
+ xf_emit(ctx, 5, 0); /* weird... */
}
static void
-nv50_graph_construct_gene_unk7(struct nouveau_grctx *ctx)
+nv50_graph_construct_gene_eng2d(struct nouveau_grctx *ctx)
{
struct drm_nouveau_private *dev_priv = ctx->dev->dev_private;
- /* middle of area 1 on pre-NVA0 [after m2mf], middle of area 6 on NVAx */
- xf_emit(ctx, 2, 0);
- if (dev_priv->chipset == 0x50)
- xf_emit(ctx, 2, 1);
- else
- xf_emit(ctx, 2, 0);
- xf_emit(ctx, 1, 0);
- xf_emit(ctx, 1, 1);
- xf_emit(ctx, 2, 0x100);
- xf_emit(ctx, 1, 0x11);
- xf_emit(ctx, 1, 0);
- xf_emit(ctx, 1, 8);
- xf_emit(ctx, 5, 0);
- xf_emit(ctx, 1, 1);
- xf_emit(ctx, 1, 0);
- xf_emit(ctx, 3, 1);
- xf_emit(ctx, 1, 0xcf);
- xf_emit(ctx, 1, 2);
- xf_emit(ctx, 6, 0);
- xf_emit(ctx, 1, 1);
- xf_emit(ctx, 1, 0);
- xf_emit(ctx, 3, 1);
- xf_emit(ctx, 4, 0);
- xf_emit(ctx, 1, 4);
- xf_emit(ctx, 1, 0);
- xf_emit(ctx, 1, 1);
- xf_emit(ctx, 1, 0x15);
- xf_emit(ctx, 3, 0);
- xf_emit(ctx, 1, 0x4444480);
- xf_emit(ctx, 0x37, 0);
+ /* middle of strand 1 on pre-NVA0 [after vfetch], middle of strand 6 on NVAx */
+ /* SEEK */
+ xf_emit(ctx, 2, 0); /* 0001ffff CLIP_X, CLIP_Y */
+ xf_emit(ctx, 2, 0); /* 0000ffff CLIP_W, CLIP_H */
+ xf_emit(ctx, 1, 0); /* 00000001 CLIP_ENABLE */
+ if (dev_priv->chipset < 0xa0) {
+ /* this is useless on everything but the original NV50,
+ * guess they forgot to nuke it. Or just didn't bother. */
+ xf_emit(ctx, 2, 0); /* 0000ffff IFC_CLIP_X, Y */
+ xf_emit(ctx, 2, 1); /* 0000ffff IFC_CLIP_W, H */
+ xf_emit(ctx, 1, 0); /* 00000001 IFC_CLIP_ENABLE */
+ }
+ xf_emit(ctx, 1, 1); /* 00000001 DST_LINEAR */
+ xf_emit(ctx, 1, 0x100); /* 0001ffff DST_WIDTH */
+ xf_emit(ctx, 1, 0x100); /* 0001ffff DST_HEIGHT */
+ xf_emit(ctx, 1, 0x11); /* 3f[NV50]/7f[NV84+] DST_FORMAT */
+ xf_emit(ctx, 1, 0); /* 0001ffff DRAW_POINT_X */
+ xf_emit(ctx, 1, 8); /* 0000000f DRAW_UNK58C */
+ xf_emit(ctx, 1, 0); /* 000fffff SIFC_DST_X_FRACT */
+ xf_emit(ctx, 1, 0); /* 0001ffff SIFC_DST_X_INT */
+ xf_emit(ctx, 1, 0); /* 000fffff SIFC_DST_Y_FRACT */
+ xf_emit(ctx, 1, 0); /* 0001ffff SIFC_DST_Y_INT */
+ xf_emit(ctx, 1, 0); /* 000fffff SIFC_DX_DU_FRACT */
+ xf_emit(ctx, 1, 1); /* 0001ffff SIFC_DX_DU_INT */
+ xf_emit(ctx, 1, 0); /* 000fffff SIFC_DY_DV_FRACT */
+ xf_emit(ctx, 1, 1); /* 0001ffff SIFC_DY_DV_INT */
+ xf_emit(ctx, 1, 1); /* 0000ffff SIFC_WIDTH */
+ xf_emit(ctx, 1, 1); /* 0000ffff SIFC_HEIGHT */
+ xf_emit(ctx, 1, 0xcf); /* 000000ff SIFC_FORMAT */
+ xf_emit(ctx, 1, 2); /* 00000003 SIFC_BITMAP_UNK808 */
+ xf_emit(ctx, 1, 0); /* 00000003 SIFC_BITMAP_LINE_PACK_MODE */
+ xf_emit(ctx, 1, 0); /* 00000001 SIFC_BITMAP_LSB_FIRST */
+ xf_emit(ctx, 1, 0); /* 00000001 SIFC_BITMAP_ENABLE */
+ xf_emit(ctx, 1, 0); /* 0000ffff BLIT_DST_X */
+ xf_emit(ctx, 1, 0); /* 0000ffff BLIT_DST_Y */
+ xf_emit(ctx, 1, 0); /* 000fffff BLIT_DU_DX_FRACT */
+ xf_emit(ctx, 1, 1); /* 0001ffff BLIT_DU_DX_INT */
+ xf_emit(ctx, 1, 0); /* 000fffff BLIT_DV_DY_FRACT */
+ xf_emit(ctx, 1, 1); /* 0001ffff BLIT_DV_DY_INT */
+ xf_emit(ctx, 1, 1); /* 0000ffff BLIT_DST_W */
+ xf_emit(ctx, 1, 1); /* 0000ffff BLIT_DST_H */
+ xf_emit(ctx, 1, 0); /* 000fffff BLIT_SRC_X_FRACT */
+ xf_emit(ctx, 1, 0); /* 0001ffff BLIT_SRC_X_INT */
+ xf_emit(ctx, 1, 0); /* 000fffff BLIT_SRC_Y_FRACT */
+ xf_emit(ctx, 1, 0); /* 00000001 UNK888 */
+ xf_emit(ctx, 1, 4); /* 0000003f UNK884 */
+ xf_emit(ctx, 1, 0); /* 00000007 UNK880 */
+ xf_emit(ctx, 1, 1); /* 0000001f tesla UNK0FB8 */
+ xf_emit(ctx, 1, 0x15); /* 000000ff tesla UNK128C */
+ xf_emit(ctx, 2, 0); /* 00000007, ffff0ff3 */
+ xf_emit(ctx, 1, 0); /* 00000001 UNK260 */
+ xf_emit(ctx, 1, 0x4444480); /* 1fffffff UNK870 */
+ /* SEEK */
+ xf_emit(ctx, 0x10, 0);
+ /* SEEK */
+ xf_emit(ctx, 0x27, 0);
}
static void
-nv50_graph_construct_gene_unk8(struct nouveau_grctx *ctx)
+nv50_graph_construct_gene_csched(struct nouveau_grctx *ctx)
{
- /* middle of area 1 on pre-NVA0 [after m2mf], middle of area 0 on NVAx */
- xf_emit(ctx, 4, 0);
- xf_emit(ctx, 1, 0x8100c12);
- xf_emit(ctx, 4, 0);
- xf_emit(ctx, 1, 0x100);
- xf_emit(ctx, 2, 0);
- xf_emit(ctx, 1, 0x10001);
- xf_emit(ctx, 1, 0);
- xf_emit(ctx, 1, 0x10001);
- xf_emit(ctx, 1, 1);
- xf_emit(ctx, 1, 0x10001);
- xf_emit(ctx, 1, 1);
- xf_emit(ctx, 1, 4);
- xf_emit(ctx, 1, 2);
+ struct drm_nouveau_private *dev_priv = ctx->dev->dev_private;
+ /* middle of strand 1 on pre-NVA0 [after eng2d], middle of strand 0 on NVAx */
+ /* SEEK */
+ xf_emit(ctx, 2, 0); /* 00007fff WINDOW_OFFSET_XY... what is it doing here??? */
+ xf_emit(ctx, 1, 0); /* 00000001 tesla UNK1924 */
+ xf_emit(ctx, 1, 0); /* 00000003 WINDOW_ORIGIN */
+ xf_emit(ctx, 1, 0x8100c12); /* 1fffffff FP_INTERPOLANT_CTRL */
+ xf_emit(ctx, 1, 0); /* 000003ff */
+ /* SEEK */
+ xf_emit(ctx, 1, 0); /* ffffffff turing UNK364 */
+ xf_emit(ctx, 1, 0); /* 0000000f turing UNK36C */
+ xf_emit(ctx, 1, 0); /* 0000ffff USER_PARAM_COUNT */
+ xf_emit(ctx, 1, 0x100); /* 00ffffff turing UNK384 */
+ xf_emit(ctx, 1, 0); /* 0000000f turing UNK2A0 */
+ xf_emit(ctx, 1, 0); /* 0000ffff GRIDID */
+ xf_emit(ctx, 1, 0x10001); /* ffffffff GRIDDIM_XY */
+ xf_emit(ctx, 1, 0); /* ffffffff */
+ xf_emit(ctx, 1, 0x10001); /* ffffffff BLOCKDIM_XY */
+ xf_emit(ctx, 1, 1); /* 0000ffff BLOCKDIM_Z */
+ xf_emit(ctx, 1, 0x10001); /* 00ffffff BLOCK_ALLOC */
+ xf_emit(ctx, 1, 1); /* 00000001 LANES32 */
+ xf_emit(ctx, 1, 4); /* 000000ff FP_REG_ALLOC_TEMP */
+ xf_emit(ctx, 1, 2); /* 00000003 REG_MODE */
+ /* SEEK */
+ xf_emit(ctx, 0x40, 0); /* ffffffff USER_PARAM */
+ switch (dev_priv->chipset) {
+ case 0x50:
+ case 0x92:
+ xf_emit(ctx, 8, 0); /* 7, 0, 0, 0, ... */
+ xf_emit(ctx, 0x80, 0); /* fff */
+ xf_emit(ctx, 2, 0); /* ff, fff */
+ xf_emit(ctx, 0x10*2, 0); /* ffffffff, 1f */
+ break;
+ case 0x84:
+ xf_emit(ctx, 8, 0); /* 7, 0, 0, 0, ... */
+ xf_emit(ctx, 0x60, 0); /* fff */
+ xf_emit(ctx, 2, 0); /* ff, fff */
+ xf_emit(ctx, 0xc*2, 0); /* ffffffff, 1f */
+ break;
+ case 0x94:
+ case 0x96:
+ xf_emit(ctx, 8, 0); /* 7, 0, 0, 0, ... */
+ xf_emit(ctx, 0x40, 0); /* fff */
+ xf_emit(ctx, 2, 0); /* ff, fff */
+ xf_emit(ctx, 8*2, 0); /* ffffffff, 1f */
+ break;
+ case 0x86:
+ case 0x98:
+ xf_emit(ctx, 4, 0); /* f, 0, 0, 0 */
+ xf_emit(ctx, 0x10, 0); /* fff */
+ xf_emit(ctx, 2, 0); /* ff, fff */
+ xf_emit(ctx, 2*2, 0); /* ffffffff, 1f */
+ break;
+ case 0xa0:
+ xf_emit(ctx, 8, 0); /* 7, 0, 0, 0, ... */
+ xf_emit(ctx, 0xf0, 0); /* fff */
+ xf_emit(ctx, 2, 0); /* ff, fff */
+ xf_emit(ctx, 0x1e*2, 0); /* ffffffff, 1f */
+ break;
+ case 0xa3:
+ xf_emit(ctx, 8, 0); /* 7, 0, 0, 0, ... */
+ xf_emit(ctx, 0x60, 0); /* fff */
+ xf_emit(ctx, 2, 0); /* ff, fff */
+ xf_emit(ctx, 0xc*2, 0); /* ffffffff, 1f */
+ break;
+ case 0xa5:
+ case 0xaf:
+ xf_emit(ctx, 8, 0); /* 7, 0, 0, 0, ... */
+ xf_emit(ctx, 0x30, 0); /* fff */
+ xf_emit(ctx, 2, 0); /* ff, fff */
+ xf_emit(ctx, 6*2, 0); /* ffffffff, 1f */
+ break;
+ case 0xaa:
+ xf_emit(ctx, 0x12, 0);
+ break;
+ case 0xa8:
+ case 0xac:
+ xf_emit(ctx, 4, 0); /* f, 0, 0, 0 */
+ xf_emit(ctx, 0x10, 0); /* fff */
+ xf_emit(ctx, 2, 0); /* ff, fff */
+ xf_emit(ctx, 2*2, 0); /* ffffffff, 1f */
+ break;
+ }
+ xf_emit(ctx, 1, 0); /* 0000000f */
+ xf_emit(ctx, 1, 0); /* 00000000 */
+ xf_emit(ctx, 1, 0); /* ffffffff */
+ xf_emit(ctx, 1, 0); /* 0000001f */
+ xf_emit(ctx, 4, 0); /* ffffffff */
+ xf_emit(ctx, 1, 0); /* 00000003 turing UNK35C */
+ xf_emit(ctx, 1, 0); /* ffffffff */
+ xf_emit(ctx, 4, 0); /* ffffffff */
+ xf_emit(ctx, 1, 0); /* 00000003 turing UNK35C */
+ xf_emit(ctx, 1, 0); /* ffffffff */
+ xf_emit(ctx, 1, 0); /* 000000ff */
}
static void
-nv50_graph_construct_gene_unk9(struct nouveau_grctx *ctx)
+nv50_graph_construct_gene_unk1cxx(struct nouveau_grctx *ctx)
{
struct drm_nouveau_private *dev_priv = ctx->dev->dev_private;
- /* middle of area 2 on pre-NVA0 [after m2mf], end of area 0 on NVAx */
- xf_emit(ctx, 1, 0x3f800000);
- xf_emit(ctx, 6, 0);
- xf_emit(ctx, 1, 4);
- xf_emit(ctx, 1, 0x1a);
- xf_emit(ctx, 2, 0);
- xf_emit(ctx, 1, 1);
- xf_emit(ctx, 0x12, 0);
- xf_emit(ctx, 1, 0x00ffff00);
- xf_emit(ctx, 6, 0);
- xf_emit(ctx, 1, 0xf);
- xf_emit(ctx, 7, 0);
- xf_emit(ctx, 1, 0x0fac6881);
- xf_emit(ctx, 1, 0x11);
- xf_emit(ctx, 0xf, 0);
- xf_emit(ctx, 1, 4);
- xf_emit(ctx, 2, 0);
- if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa)
- xf_emit(ctx, 1, 3);
+ xf_emit(ctx, 2, 0); /* 00007fff WINDOW_OFFSET_XY */
+ xf_emit(ctx, 1, 0x3f800000); /* ffffffff LINE_WIDTH */
+ xf_emit(ctx, 1, 0); /* 00000001 LINE_SMOOTH_ENABLE */
+ xf_emit(ctx, 1, 0); /* 00000001 tesla UNK1658 */
+ xf_emit(ctx, 1, 0); /* 00000001 POLYGON_SMOOTH_ENABLE */
+ xf_emit(ctx, 3, 0); /* 00000001 POLYGON_OFFSET_*_ENABLE */
+ xf_emit(ctx, 1, 4); /* 0000000f CULL_MODE */
+ xf_emit(ctx, 1, 0x1a); /* 0000001f POLYGON_MODE */
+ xf_emit(ctx, 1, 0); /* 0000000f ZETA_FORMAT */
+ xf_emit(ctx, 1, 0); /* 00000001 POINT_SPRITE_ENABLE */
+ xf_emit(ctx, 1, 1); /* 00000001 tesla UNK165C */
+ xf_emit(ctx, 0x10, 0); /* 00000001 SCISSOR_ENABLE */
+ xf_emit(ctx, 1, 0); /* 00000001 tesla UNK1534 */
+ xf_emit(ctx, 1, 0); /* 00000001 LINE_STIPPLE_ENABLE */
+ xf_emit(ctx, 1, 0x00ffff00); /* 00ffffff LINE_STIPPLE_PATTERN */
+ xf_emit(ctx, 1, 0); /* ffffffff POLYGON_OFFSET_UNITS */
+ xf_emit(ctx, 1, 0); /* ffffffff POLYGON_OFFSET_FACTOR */
+ xf_emit(ctx, 1, 0); /* 00000003 tesla UNK1668 */
+ xf_emit(ctx, 2, 0); /* 07ffffff SCREEN_SCISSOR */
+ xf_emit(ctx, 1, 0); /* 00000001 tesla UNK1900 */
+ xf_emit(ctx, 1, 0xf); /* 0000000f COLOR_MASK */
+ xf_emit(ctx, 7, 0); /* 0000000f COLOR_MASK */
+ xf_emit(ctx, 1, 0x0fac6881); /* 0fffffff RT_CONTROL */
+ xf_emit(ctx, 1, 0x11); /* 0000007f RT_FORMAT */
+ xf_emit(ctx, 7, 0); /* 0000007f RT_FORMAT */
+ xf_emit(ctx, 8, 0); /* 00000001 RT_HORIZ_LINEAR */
+ xf_emit(ctx, 1, 4); /* 00000007 FP_CONTROL */
+ xf_emit(ctx, 1, 0); /* 00000001 ALPHA_TEST_ENABLE */
+ xf_emit(ctx, 1, 0); /* 00000007 ALPHA_TEST_FUNC */
+ if (IS_NVA3F(dev_priv->chipset))
+ xf_emit(ctx, 1, 3); /* 00000003 UNK16B4 */
else if (dev_priv->chipset >= 0xa0)
- xf_emit(ctx, 1, 1);
- xf_emit(ctx, 2, 0);
- xf_emit(ctx, 1, 2);
- xf_emit(ctx, 2, 0x04000000);
- xf_emit(ctx, 3, 0);
- xf_emit(ctx, 1, 5);
- xf_emit(ctx, 1, 0x52);
- if (dev_priv->chipset == 0x50) {
- xf_emit(ctx, 0x13, 0);
- } else {
- xf_emit(ctx, 4, 0);
- xf_emit(ctx, 1, 1);
- if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa)
- xf_emit(ctx, 0x11, 0);
- else
- xf_emit(ctx, 0x10, 0);
+ xf_emit(ctx, 1, 1); /* 00000001 UNK16B4 */
+ xf_emit(ctx, 1, 0); /* 00000003 MULTISAMPLE_CTRL */
+ xf_emit(ctx, 1, 0); /* 00000003 tesla UNK0F90 */
+ xf_emit(ctx, 1, 2); /* 00000003 tesla UNK143C */
+ xf_emit(ctx, 2, 0x04000000); /* 07ffffff tesla UNK0D6C */
+ xf_emit(ctx, 1, 0); /* 000000ff STENCIL_FRONT_MASK */
+ xf_emit(ctx, 1, 0); /* 00000001 DEPTH_WRITE_ENABLE */
+ xf_emit(ctx, 1, 0); /* 00000001 SAMPLECNT_ENABLE */
+ xf_emit(ctx, 1, 5); /* 0000000f UNK1408 */
+ xf_emit(ctx, 1, 0x52); /* 000001ff SEMANTIC_PTSZ */
+ xf_emit(ctx, 1, 0); /* ffffffff POINT_SIZE */
+ xf_emit(ctx, 1, 0); /* 00000001 */
+ xf_emit(ctx, 1, 0); /* 00000007 tesla UNK0FB4 */
+ if (dev_priv->chipset != 0x50) {
+ xf_emit(ctx, 1, 0); /* 3ff */
+ xf_emit(ctx, 1, 1); /* 00000001 tesla UNK1110 */
}
- xf_emit(ctx, 0x10, 0x3f800000);
- xf_emit(ctx, 1, 0x10);
- xf_emit(ctx, 0x26, 0);
- xf_emit(ctx, 1, 0x8100c12);
- xf_emit(ctx, 1, 5);
- xf_emit(ctx, 2, 0);
- xf_emit(ctx, 1, 1);
- xf_emit(ctx, 1, 0);
- xf_emit(ctx, 4, 0xffff);
+ if (IS_NVA3F(dev_priv->chipset))
+ xf_emit(ctx, 1, 0); /* 00000003 tesla UNK1928 */
+ xf_emit(ctx, 0x10, 0); /* ffffffff DEPTH_RANGE_NEAR */
+ xf_emit(ctx, 0x10, 0x3f800000); /* ffffffff DEPTH_RANGE_FAR */
+ xf_emit(ctx, 1, 0x10); /* 000000ff VIEW_VOLUME_CLIP_CTRL */
+ xf_emit(ctx, 0x20, 0); /* 07ffffff VIEWPORT_HORIZ, then VIEWPORT_VERT. (W&0x3fff)<<13 | (X&0x1fff). */
+ xf_emit(ctx, 1, 0); /* ffffffff tesla UNK187C */
+ xf_emit(ctx, 1, 0); /* 00000003 WINDOW_ORIGIN */
+ xf_emit(ctx, 1, 0); /* 00000001 STENCIL_FRONT_ENABLE */
+ xf_emit(ctx, 1, 0); /* 00000001 DEPTH_TEST_ENABLE */
+ xf_emit(ctx, 1, 0); /* 00000001 STENCIL_BACK_ENABLE */
+ xf_emit(ctx, 1, 0); /* 000000ff STENCIL_BACK_MASK */
+ xf_emit(ctx, 1, 0x8100c12); /* 1fffffff FP_INTERPOLANT_CTRL */
+ xf_emit(ctx, 1, 5); /* 0000000f tesla UNK1220 */
+ xf_emit(ctx, 1, 0); /* 00000007 MULTISAMPLE_SAMPLES_LOG2 */
+ xf_emit(ctx, 1, 0); /* 000000ff tesla UNK1A20 */
+ xf_emit(ctx, 1, 1); /* 00000001 ZETA_ENABLE */
+ xf_emit(ctx, 1, 0); /* 00000001 VERTEX_TWO_SIDE_ENABLE */
+ xf_emit(ctx, 4, 0xffff); /* 0000ffff MSAA_MASK */
if (dev_priv->chipset != 0x50)
- xf_emit(ctx, 1, 3);
+ xf_emit(ctx, 1, 3); /* 00000003 tesla UNK1100 */
if (dev_priv->chipset < 0xa0)
- xf_emit(ctx, 0x1f, 0);
- else if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa)
- xf_emit(ctx, 0xc, 0);
- else
- xf_emit(ctx, 3, 0);
- xf_emit(ctx, 1, 0x00ffff00);
- xf_emit(ctx, 1, 0x1a);
+ xf_emit(ctx, 0x1c, 0); /* RO */
+ else if (IS_NVA3F(dev_priv->chipset))
+ xf_emit(ctx, 0x9, 0);
+ xf_emit(ctx, 1, 0); /* 00000001 UNK1534 */
+ xf_emit(ctx, 1, 0); /* 00000001 LINE_SMOOTH_ENABLE */
+ xf_emit(ctx, 1, 0); /* 00000001 LINE_STIPPLE_ENABLE */
+ xf_emit(ctx, 1, 0x00ffff00); /* 00ffffff LINE_STIPPLE_PATTERN */
+ xf_emit(ctx, 1, 0x1a); /* 0000001f POLYGON_MODE */
+ xf_emit(ctx, 1, 0); /* 00000003 WINDOW_ORIGIN */
if (dev_priv->chipset != 0x50) {
- xf_emit(ctx, 1, 0);
- xf_emit(ctx, 1, 3);
+ xf_emit(ctx, 1, 3); /* 00000003 tesla UNK1100 */
+ xf_emit(ctx, 1, 0); /* 3ff */
}
+ /* XXX: the following block could belong either to unk1cxx, or
+ * to STRMOUT. Rather hard to tell. */
if (dev_priv->chipset < 0xa0)
- xf_emit(ctx, 0x26, 0);
+ xf_emit(ctx, 0x25, 0);
else
- xf_emit(ctx, 0x3c, 0);
- xf_emit(ctx, 1, 0x102);
- xf_emit(ctx, 1, 0);
- xf_emit(ctx, 4, 4);
- if (dev_priv->chipset >= 0xa0)
- xf_emit(ctx, 8, 0);
- xf_emit(ctx, 2, 4);
- xf_emit(ctx, 1, 0);
+ xf_emit(ctx, 0x3b, 0);
+}
+
+static void
+nv50_graph_construct_gene_strmout(struct nouveau_grctx *ctx)
+{
+ struct drm_nouveau_private *dev_priv = ctx->dev->dev_private;
+ xf_emit(ctx, 1, 0x102); /* 0000ffff STRMOUT_BUFFER_CTRL */
+ xf_emit(ctx, 1, 0); /* ffffffff STRMOUT_PRIMITIVE_COUNT */
+ xf_emit(ctx, 4, 4); /* 000000ff STRMOUT_NUM_ATTRIBS */
+ if (dev_priv->chipset >= 0xa0) {
+ xf_emit(ctx, 4, 0); /* ffffffff UNK1A8C */
+ xf_emit(ctx, 4, 0); /* ffffffff UNK1780 */
+ }
+ xf_emit(ctx, 1, 4); /* 000000ff GP_RESULT_MAP_SIZE */
+ xf_emit(ctx, 1, 4); /* 0000007f VP_RESULT_MAP_SIZE */
+ xf_emit(ctx, 1, 0); /* 00000001 GP_ENABLE */
if (dev_priv->chipset == 0x50)
- xf_emit(ctx, 1, 0x3ff);
+ xf_emit(ctx, 1, 0x3ff); /* 000003ff tesla UNK0D68 */
else
- xf_emit(ctx, 1, 0x7ff);
- xf_emit(ctx, 1, 0);
- xf_emit(ctx, 1, 0x102);
- xf_emit(ctx, 9, 0);
- xf_emit(ctx, 4, 4);
- xf_emit(ctx, 0x2c, 0);
+ xf_emit(ctx, 1, 0x7ff); /* 000007ff tesla UNK0D68 */
+ xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A30 */
+ /* SEEK */
+ xf_emit(ctx, 1, 0x102); /* 0000ffff STRMOUT_BUFFER_CTRL */
+ xf_emit(ctx, 1, 0); /* ffffffff STRMOUT_PRIMITIVE_COUNT */
+ xf_emit(ctx, 4, 0); /* 000000ff STRMOUT_ADDRESS_HIGH */
+ xf_emit(ctx, 4, 0); /* ffffffff STRMOUT_ADDRESS_LOW */
+ xf_emit(ctx, 4, 4); /* 000000ff STRMOUT_NUM_ATTRIBS */
+ if (dev_priv->chipset >= 0xa0) {
+ xf_emit(ctx, 4, 0); /* ffffffff UNK1A8C */
+ xf_emit(ctx, 4, 0); /* ffffffff UNK1780 */
+ }
+ xf_emit(ctx, 1, 0); /* 0000ffff DMA_STRMOUT */
+ xf_emit(ctx, 1, 0); /* 0000ffff DMA_QUERY */
+ xf_emit(ctx, 1, 0); /* 000000ff QUERY_ADDRESS_HIGH */
+ xf_emit(ctx, 2, 0); /* ffffffff QUERY_ADDRESS_LOW QUERY_COUNTER */
+ xf_emit(ctx, 2, 0); /* ffffffff */
+ xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A30 */
+ /* SEEK */
+ xf_emit(ctx, 0x20, 0); /* ffffffff STRMOUT_MAP */
+ xf_emit(ctx, 1, 0); /* 0000000f */
+ xf_emit(ctx, 1, 0); /* 00000000? */
+ xf_emit(ctx, 2, 0); /* ffffffff */
+}
+
+static void
+nv50_graph_construct_gene_ropm1(struct nouveau_grctx *ctx)
+{
+ struct drm_nouveau_private *dev_priv = ctx->dev->dev_private;
+ xf_emit(ctx, 1, 0x4e3bfdf); /* ffffffff UNK0D64 */
+ xf_emit(ctx, 1, 0x4e3bfdf); /* ffffffff UNK0DF4 */
+ xf_emit(ctx, 1, 0); /* 00000007 */
+ xf_emit(ctx, 1, 0); /* 000003ff */
+ if (IS_NVA3F(dev_priv->chipset))
+ xf_emit(ctx, 1, 0x11); /* 000000ff tesla UNK1968 */
+ xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A3C */
+}
+
+static void
+nv50_graph_construct_gene_ropm2(struct nouveau_grctx *ctx)
+{
+ struct drm_nouveau_private *dev_priv = ctx->dev->dev_private;
+ /* SEEK */
+ xf_emit(ctx, 1, 0); /* 0000ffff DMA_QUERY */
+ xf_emit(ctx, 1, 0x0fac6881); /* 0fffffff RT_CONTROL */
+ xf_emit(ctx, 2, 0); /* ffffffff */
+ xf_emit(ctx, 1, 0); /* 000000ff QUERY_ADDRESS_HIGH */
+ xf_emit(ctx, 2, 0); /* ffffffff QUERY_ADDRESS_LOW, COUNTER */
+ xf_emit(ctx, 1, 0); /* 00000001 SAMPLECNT_ENABLE */
+ xf_emit(ctx, 1, 0); /* 7 */
+ /* SEEK */
+ xf_emit(ctx, 1, 0); /* 0000ffff DMA_QUERY */
+ xf_emit(ctx, 1, 0); /* 000000ff QUERY_ADDRESS_HIGH */
+ xf_emit(ctx, 2, 0); /* ffffffff QUERY_ADDRESS_LOW, COUNTER */
+ xf_emit(ctx, 1, 0x4e3bfdf); /* ffffffff UNK0D64 */
+ xf_emit(ctx, 1, 0x4e3bfdf); /* ffffffff UNK0DF4 */
+ xf_emit(ctx, 1, 0); /* 00000001 eng2d UNK260 */
+ xf_emit(ctx, 1, 0); /* ff/3ff */
+ xf_emit(ctx, 1, 0); /* 00000007 */
+ if (IS_NVA3F(dev_priv->chipset))
+ xf_emit(ctx, 1, 0x11); /* 000000ff tesla UNK1968 */
+ xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A3C */
}
static void
@@ -1749,443 +2392,709 @@ nv50_graph_construct_gene_ropc(struct nouveau_grctx *ctx)
int magic2;
if (dev_priv->chipset == 0x50) {
magic2 = 0x00003e60;
- } else if (dev_priv->chipset <= 0xa0 || dev_priv->chipset >= 0xaa) {
+ } else if (!IS_NVA3F(dev_priv->chipset)) {
magic2 = 0x001ffe67;
} else {
magic2 = 0x00087e67;
}
- xf_emit(ctx, 8, 0);
- xf_emit(ctx, 1, 2);
- xf_emit(ctx, 1, 0);
- xf_emit(ctx, 1, magic2);
- xf_emit(ctx, 4, 0);
- if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa)
- xf_emit(ctx, 1, 1);
- xf_emit(ctx, 7, 0);
- if (dev_priv->chipset >= 0xa0 && dev_priv->chipset < 0xaa)
- xf_emit(ctx, 1, 0x15);
- xf_emit(ctx, 1, 0);
- xf_emit(ctx, 1, 1);
- xf_emit(ctx, 1, 0x10);
- xf_emit(ctx, 2, 0);
- xf_emit(ctx, 1, 1);
- xf_emit(ctx, 4, 0);
+ xf_emit(ctx, 1, 0); /* f/7 MUTISAMPLE_SAMPLES_LOG2 */
+ xf_emit(ctx, 1, 0); /* 00000001 tesla UNK1534 */
+ xf_emit(ctx, 1, 0); /* 00000007 STENCIL_BACK_FUNC_FUNC */
+ xf_emit(ctx, 1, 0); /* 000000ff STENCIL_BACK_FUNC_MASK */
+ xf_emit(ctx, 1, 0); /* 000000ff STENCIL_BACK_MASK */
+ xf_emit(ctx, 3, 0); /* 00000007 STENCIL_BACK_OP_FAIL, ZFAIL, ZPASS */
+ xf_emit(ctx, 1, 2); /* 00000003 tesla UNK143C */
+ xf_emit(ctx, 1, 0); /* ffff0ff3 */
+ xf_emit(ctx, 1, magic2); /* 001fffff tesla UNK0F78 */
+ xf_emit(ctx, 1, 0); /* 00000001 DEPTH_BOUNDS_EN */
+ xf_emit(ctx, 1, 0); /* 00000007 DEPTH_TEST_FUNC */
+ xf_emit(ctx, 1, 0); /* 00000001 DEPTH_TEST_ENABLE */
+ xf_emit(ctx, 1, 0); /* 00000001 DEPTH_WRITE_ENABLE */
+ if (IS_NVA3F(dev_priv->chipset))
+ xf_emit(ctx, 1, 1); /* 0000001f tesla UNK169C */
+ xf_emit(ctx, 1, 0); /* 00000007 STENCIL_FRONT_FUNC_FUNC */
+ xf_emit(ctx, 1, 0); /* 000000ff STENCIL_FRONT_FUNC_MASK */
+ xf_emit(ctx, 1, 0); /* 000000ff STENCIL_FRONT_MASK */
+ xf_emit(ctx, 3, 0); /* 00000007 STENCIL_FRONT_OP_FAIL, ZFAIL, ZPASS */
+ xf_emit(ctx, 1, 0); /* 00000001 STENCIL_FRONT_ENABLE */
+ if (dev_priv->chipset >= 0xa0 && !IS_NVAAF(dev_priv->chipset))
+ xf_emit(ctx, 1, 0x15); /* 000000ff */
+ xf_emit(ctx, 1, 0); /* 00000001 STENCIL_BACK_ENABLE */
+ xf_emit(ctx, 1, 1); /* 00000001 tesla UNK15B4 */
+ xf_emit(ctx, 1, 0x10); /* 3ff/ff VIEW_VOLUME_CLIP_CTRL */
+ xf_emit(ctx, 1, 0); /* ffffffff CLEAR_DEPTH */
+ xf_emit(ctx, 1, 0); /* 0000000f ZETA_FORMAT */
+ xf_emit(ctx, 1, 1); /* 00000001 ZETA_ENABLE */
+ xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A3C */
if (dev_priv->chipset == 0x86 || dev_priv->chipset == 0x92 || dev_priv->chipset == 0x98 || dev_priv->chipset >= 0xa0) {
- xf_emit(ctx, 1, 4);
- xf_emit(ctx, 1, 0x400);
- xf_emit(ctx, 1, 0x300);
- xf_emit(ctx, 1, 0x1001);
+ xf_emit(ctx, 3, 0); /* ff, ffffffff, ffffffff */
+ xf_emit(ctx, 1, 4); /* 7 */
+ xf_emit(ctx, 1, 0x400); /* fffffff */
+ xf_emit(ctx, 1, 0x300); /* ffff */
+ xf_emit(ctx, 1, 0x1001); /* 1fff */
if (dev_priv->chipset != 0xa0) {
- if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa)
- xf_emit(ctx, 1, 0);
+ if (IS_NVA3F(dev_priv->chipset))
+ xf_emit(ctx, 1, 0); /* 0000000f UNK15C8 */
else
- xf_emit(ctx, 1, 0x15);
+ xf_emit(ctx, 1, 0x15); /* ff */
}
- xf_emit(ctx, 3, 0);
}
- xf_emit(ctx, 2, 0);
- xf_emit(ctx, 1, 2);
- xf_emit(ctx, 8, 0);
- xf_emit(ctx, 1, 1);
- xf_emit(ctx, 1, 0x10);
- xf_emit(ctx, 1, 0);
- xf_emit(ctx, 1, 1);
- xf_emit(ctx, 0x13, 0);
- xf_emit(ctx, 1, 0x10);
- xf_emit(ctx, 0x10, 0);
- xf_emit(ctx, 0x10, 0x3f800000);
- xf_emit(ctx, 0x19, 0);
- xf_emit(ctx, 1, 0x10);
- xf_emit(ctx, 1, 0);
- xf_emit(ctx, 1, 0x3f);
- xf_emit(ctx, 6, 0);
- xf_emit(ctx, 1, 1);
- xf_emit(ctx, 1, 0);
- xf_emit(ctx, 1, 1);
- xf_emit(ctx, 1, 0);
- xf_emit(ctx, 1, 1);
+ xf_emit(ctx, 1, 0); /* 00000007 MULTISAMPLE_SAMPLES_LOG2 */
+ xf_emit(ctx, 1, 0); /* 00000001 tesla UNK1534 */
+ xf_emit(ctx, 1, 0); /* 00000007 STENCIL_BACK_FUNC_FUNC */
+ xf_emit(ctx, 1, 0); /* 000000ff STENCIL_BACK_FUNC_MASK */
+ xf_emit(ctx, 1, 0); /* ffff0ff3 */
+ xf_emit(ctx, 1, 2); /* 00000003 tesla UNK143C */
+ xf_emit(ctx, 1, 0); /* 00000001 DEPTH_BOUNDS_EN */
+ xf_emit(ctx, 1, 0); /* 00000007 DEPTH_TEST_FUNC */
+ xf_emit(ctx, 1, 0); /* 00000001 DEPTH_TEST_ENABLE */
+ xf_emit(ctx, 1, 0); /* 00000001 DEPTH_WRITE_ENABLE */
+ xf_emit(ctx, 1, 0); /* 00000007 STENCIL_FRONT_FUNC_FUNC */
+ xf_emit(ctx, 1, 0); /* 000000ff STENCIL_FRONT_FUNC_MASK */
+ xf_emit(ctx, 1, 0); /* 00000001 STENCIL_FRONT_ENABLE */
+ xf_emit(ctx, 1, 0); /* 00000001 STENCIL_BACK_ENABLE */
+ xf_emit(ctx, 1, 1); /* 00000001 tesla UNK15B4 */
+ xf_emit(ctx, 1, 0x10); /* 7f/ff VIEW_VOLUME_CLIP_CTRL */
+ xf_emit(ctx, 1, 0); /* 0000000f ZETA_FORMAT */
+ xf_emit(ctx, 1, 1); /* 00000001 ZETA_ENABLE */
+ xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A3C */
+ xf_emit(ctx, 1, 0); /* 00000001 tesla UNK1534 */
+ xf_emit(ctx, 1, 0); /* 00000001 tesla UNK1900 */
+ xf_emit(ctx, 1, 0); /* 00000007 STENCIL_BACK_FUNC_FUNC */
+ xf_emit(ctx, 1, 0); /* 000000ff STENCIL_BACK_FUNC_MASK */
+ xf_emit(ctx, 1, 0); /* 000000ff STENCIL_BACK_FUNC_REF */
+ xf_emit(ctx, 2, 0); /* ffffffff DEPTH_BOUNDS */
+ xf_emit(ctx, 1, 0); /* 00000001 DEPTH_BOUNDS_EN */
+ xf_emit(ctx, 1, 0); /* 00000007 DEPTH_TEST_FUNC */
+ xf_emit(ctx, 1, 0); /* 00000001 DEPTH_TEST_ENABLE */
+ xf_emit(ctx, 1, 0); /* 00000001 DEPTH_WRITE_ENABLE */
+ xf_emit(ctx, 1, 0); /* 0000000f */
+ xf_emit(ctx, 1, 0); /* 00000001 tesla UNK0FB0 */
+ xf_emit(ctx, 1, 0); /* 00000007 STENCIL_FRONT_FUNC_FUNC */
+ xf_emit(ctx, 1, 0); /* 000000ff STENCIL_FRONT_FUNC_MASK */
+ xf_emit(ctx, 1, 0); /* 000000ff STENCIL_FRONT_FUNC_REF */
+ xf_emit(ctx, 1, 0); /* 00000001 STENCIL_FRONT_ENABLE */
+ xf_emit(ctx, 1, 0); /* 00000001 STENCIL_BACK_ENABLE */
+ xf_emit(ctx, 1, 0x10); /* 7f/ff VIEW_VOLUME_CLIP_CTRL */
+ xf_emit(ctx, 0x10, 0); /* ffffffff DEPTH_RANGE_NEAR */
+ xf_emit(ctx, 0x10, 0x3f800000); /* ffffffff DEPTH_RANGE_FAR */
+ xf_emit(ctx, 1, 0); /* 0000000f ZETA_FORMAT */
+ xf_emit(ctx, 1, 0); /* 00000007 MULTISAMPLE_SAMPLES_LOG2 */
+ xf_emit(ctx, 1, 0); /* 00000007 STENCIL_BACK_FUNC_FUNC */
+ xf_emit(ctx, 1, 0); /* 000000ff STENCIL_BACK_FUNC_MASK */
+ xf_emit(ctx, 1, 0); /* 000000ff STENCIL_BACK_FUNC_REF */
+ xf_emit(ctx, 1, 0); /* 000000ff STENCIL_BACK_MASK */
+ xf_emit(ctx, 3, 0); /* 00000007 STENCIL_BACK_OP_FAIL, ZFAIL, ZPASS */
+ xf_emit(ctx, 2, 0); /* ffffffff DEPTH_BOUNDS */
+ xf_emit(ctx, 1, 0); /* 00000001 DEPTH_BOUNDS_EN */
+ xf_emit(ctx, 1, 0); /* 00000007 DEPTH_TEST_FUNC */
+ xf_emit(ctx, 1, 0); /* 00000001 DEPTH_TEST_ENABLE */
+ xf_emit(ctx, 1, 0); /* 00000001 DEPTH_WRITE_ENABLE */
+ xf_emit(ctx, 1, 0); /* 000000ff CLEAR_STENCIL */
+ xf_emit(ctx, 1, 0); /* 00000007 STENCIL_FRONT_FUNC_FUNC */
+ xf_emit(ctx, 1, 0); /* 000000ff STENCIL_FRONT_FUNC_MASK */
+ xf_emit(ctx, 1, 0); /* 000000ff STENCIL_FRONT_FUNC_REF */
+ xf_emit(ctx, 1, 0); /* 000000ff STENCIL_FRONT_MASK */
+ xf_emit(ctx, 3, 0); /* 00000007 STENCIL_FRONT_OP_FAIL, ZFAIL, ZPASS */
+ xf_emit(ctx, 1, 0); /* 00000001 STENCIL_FRONT_ENABLE */
+ xf_emit(ctx, 1, 0); /* 00000001 STENCIL_BACK_ENABLE */
+ xf_emit(ctx, 1, 0x10); /* 7f/ff VIEW_VOLUME_CLIP_CTRL */
+ xf_emit(ctx, 1, 0); /* 0000000f ZETA_FORMAT */
+ xf_emit(ctx, 1, 0x3f); /* 0000003f UNK1590 */
+ xf_emit(ctx, 1, 0); /* 00000007 MULTISAMPLE_SAMPLES_LOG2 */
+ xf_emit(ctx, 1, 0); /* 00000001 tesla UNK1534 */
+ xf_emit(ctx, 2, 0); /* ffff0ff3, ffff */
+ xf_emit(ctx, 1, 0); /* 00000001 tesla UNK0FB0 */
+ xf_emit(ctx, 1, 0); /* 0001ffff GP_BUILTIN_RESULT_EN */
+ xf_emit(ctx, 1, 1); /* 00000001 tesla UNK15B4 */
+ xf_emit(ctx, 1, 0); /* 0000000f ZETA_FORMAT */
+ xf_emit(ctx, 1, 1); /* 00000001 ZETA_ENABLE */
+ xf_emit(ctx, 1, 0); /* ffffffff CLEAR_DEPTH */
+ xf_emit(ctx, 1, 1); /* 00000001 tesla UNK19CC */
if (dev_priv->chipset >= 0xa0) {
xf_emit(ctx, 2, 0);
xf_emit(ctx, 1, 0x1001);
xf_emit(ctx, 0xb, 0);
} else {
- xf_emit(ctx, 0xc, 0);
+ xf_emit(ctx, 1, 0); /* 00000007 */
+ xf_emit(ctx, 1, 0); /* 00000001 tesla UNK1534 */
+ xf_emit(ctx, 1, 0); /* 00000007 MULTISAMPLE_SAMPLES_LOG2 */
+ xf_emit(ctx, 8, 0); /* 00000001 BLEND_ENABLE */
+ xf_emit(ctx, 1, 0); /* ffff0ff3 */
}
- xf_emit(ctx, 1, 0x11);
- xf_emit(ctx, 7, 0);
- xf_emit(ctx, 1, 0xf);
- xf_emit(ctx, 7, 0);
- xf_emit(ctx, 1, 0x11);
- if (dev_priv->chipset == 0x50)
- xf_emit(ctx, 4, 0);
- else
- xf_emit(ctx, 6, 0);
- xf_emit(ctx, 3, 1);
- xf_emit(ctx, 1, 2);
- xf_emit(ctx, 1, 1);
- xf_emit(ctx, 1, 2);
- xf_emit(ctx, 1, 1);
- xf_emit(ctx, 1, 0);
- xf_emit(ctx, 1, magic2);
- xf_emit(ctx, 1, 0);
- xf_emit(ctx, 1, 0x0fac6881);
- if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa) {
- xf_emit(ctx, 1, 0);
- xf_emit(ctx, 0x18, 1);
- xf_emit(ctx, 8, 2);
- xf_emit(ctx, 8, 1);
- xf_emit(ctx, 8, 2);
- xf_emit(ctx, 8, 1);
- xf_emit(ctx, 3, 0);
- xf_emit(ctx, 1, 1);
- xf_emit(ctx, 5, 0);
- xf_emit(ctx, 1, 1);
- xf_emit(ctx, 0x16, 0);
+ xf_emit(ctx, 1, 0x11); /* 3f/7f RT_FORMAT */
+ xf_emit(ctx, 7, 0); /* 3f/7f RT_FORMAT */
+ xf_emit(ctx, 1, 0xf); /* 0000000f COLOR_MASK */
+ xf_emit(ctx, 7, 0); /* 0000000f COLOR_MASK */
+ xf_emit(ctx, 1, 0x11); /* 3f/7f */
+ xf_emit(ctx, 1, 0); /* 00000001 LOGIC_OP_ENABLE */
+ if (dev_priv->chipset != 0x50) {
+ xf_emit(ctx, 1, 0); /* 0000000f LOGIC_OP */
+ xf_emit(ctx, 1, 0); /* 000000ff */
+ }
+ xf_emit(ctx, 1, 0); /* 00000007 OPERATION */
+ xf_emit(ctx, 1, 0); /* ff/3ff */
+ xf_emit(ctx, 1, 0); /* 00000003 UNK0F90 */
+ xf_emit(ctx, 2, 1); /* 00000007 BLEND_EQUATION_RGB, ALPHA */
+ xf_emit(ctx, 1, 1); /* 00000001 UNK133C */
+ xf_emit(ctx, 1, 2); /* 0000001f BLEND_FUNC_SRC_RGB */
+ xf_emit(ctx, 1, 1); /* 0000001f BLEND_FUNC_DST_RGB */
+ xf_emit(ctx, 1, 2); /* 0000001f BLEND_FUNC_SRC_ALPHA */
+ xf_emit(ctx, 1, 1); /* 0000001f BLEND_FUNC_DST_ALPHA */
+ xf_emit(ctx, 1, 0); /* 00000001 */
+ xf_emit(ctx, 1, magic2); /* 001fffff tesla UNK0F78 */
+ xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A3C */
+ xf_emit(ctx, 1, 0x0fac6881); /* 0fffffff RT_CONTROL */
+ if (IS_NVA3F(dev_priv->chipset)) {
+ xf_emit(ctx, 1, 0); /* 00000001 tesla UNK12E4 */
+ xf_emit(ctx, 8, 1); /* 00000007 IBLEND_EQUATION_RGB */
+ xf_emit(ctx, 8, 1); /* 00000007 IBLEND_EQUATION_ALPHA */
+ xf_emit(ctx, 8, 1); /* 00000001 IBLEND_UNK00 */
+ xf_emit(ctx, 8, 2); /* 0000001f IBLEND_FUNC_SRC_RGB */
+ xf_emit(ctx, 8, 1); /* 0000001f IBLEND_FUNC_DST_RGB */
+ xf_emit(ctx, 8, 2); /* 0000001f IBLEND_FUNC_SRC_ALPHA */
+ xf_emit(ctx, 8, 1); /* 0000001f IBLEND_FUNC_DST_ALPHA */
+ xf_emit(ctx, 1, 0); /* 00000001 tesla UNK1140 */
+ xf_emit(ctx, 2, 0); /* 00000001 */
+ xf_emit(ctx, 1, 1); /* 0000001f tesla UNK169C */
+ xf_emit(ctx, 1, 0); /* 0000000f */
+ xf_emit(ctx, 1, 0); /* 00000003 */
+ xf_emit(ctx, 1, 0); /* ffffffff */
+ xf_emit(ctx, 2, 0); /* 00000001 */
+ xf_emit(ctx, 1, 1); /* 0000001f tesla UNK169C */
+ xf_emit(ctx, 1, 0); /* 00000001 */
+ xf_emit(ctx, 1, 0); /* 000003ff */
+ } else if (dev_priv->chipset >= 0xa0) {
+ xf_emit(ctx, 2, 0); /* 00000001 */
+ xf_emit(ctx, 1, 0); /* 00000007 */
+ xf_emit(ctx, 1, 0); /* 00000003 */
+ xf_emit(ctx, 1, 0); /* ffffffff */
+ xf_emit(ctx, 2, 0); /* 00000001 */
} else {
- if (dev_priv->chipset >= 0xa0)
- xf_emit(ctx, 0x1b, 0);
- else
- xf_emit(ctx, 0x15, 0);
+ xf_emit(ctx, 1, 0); /* 00000007 MULTISAMPLE_SAMPLES_LOG2 */
+ xf_emit(ctx, 1, 0); /* 00000003 tesla UNK1430 */
+ xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A3C */
}
- xf_emit(ctx, 1, 1);
- xf_emit(ctx, 1, 2);
- xf_emit(ctx, 2, 1);
- xf_emit(ctx, 1, 2);
- xf_emit(ctx, 2, 1);
+ xf_emit(ctx, 4, 0); /* ffffffff CLEAR_COLOR */
+ xf_emit(ctx, 4, 0); /* ffffffff BLEND_COLOR A R G B */
+ xf_emit(ctx, 1, 0); /* 00000fff eng2d UNK2B0 */
if (dev_priv->chipset >= 0xa0)
- xf_emit(ctx, 4, 0);
- else
- xf_emit(ctx, 3, 0);
- if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa) {
- xf_emit(ctx, 0x10, 1);
- xf_emit(ctx, 8, 2);
- xf_emit(ctx, 0x10, 1);
- xf_emit(ctx, 8, 2);
- xf_emit(ctx, 8, 1);
- xf_emit(ctx, 3, 0);
+ xf_emit(ctx, 2, 0); /* 00000001 */
+ xf_emit(ctx, 1, 0); /* 000003ff */
+ xf_emit(ctx, 8, 0); /* 00000001 BLEND_ENABLE */
+ xf_emit(ctx, 1, 1); /* 00000001 UNK133C */
+ xf_emit(ctx, 1, 2); /* 0000001f BLEND_FUNC_SRC_RGB */
+ xf_emit(ctx, 1, 1); /* 0000001f BLEND_FUNC_DST_RGB */
+ xf_emit(ctx, 1, 1); /* 00000007 BLEND_EQUATION_RGB */
+ xf_emit(ctx, 1, 2); /* 0000001f BLEND_FUNC_SRC_ALPHA */
+ xf_emit(ctx, 1, 1); /* 0000001f BLEND_FUNC_DST_ALPHA */
+ xf_emit(ctx, 1, 1); /* 00000007 BLEND_EQUATION_ALPHA */
+ xf_emit(ctx, 1, 0); /* 00000001 UNK19C0 */
+ xf_emit(ctx, 1, 0); /* 00000001 LOGIC_OP_ENABLE */
+ xf_emit(ctx, 1, 0); /* 0000000f LOGIC_OP */
+ if (dev_priv->chipset >= 0xa0)
+ xf_emit(ctx, 1, 0); /* 00000001 UNK12E4? NVA3+ only? */
+ if (IS_NVA3F(dev_priv->chipset)) {
+ xf_emit(ctx, 8, 1); /* 00000001 IBLEND_UNK00 */
+ xf_emit(ctx, 8, 1); /* 00000007 IBLEND_EQUATION_RGB */
+ xf_emit(ctx, 8, 2); /* 0000001f IBLEND_FUNC_SRC_RGB */
+ xf_emit(ctx, 8, 1); /* 0000001f IBLEND_FUNC_DST_RGB */
+ xf_emit(ctx, 8, 1); /* 00000007 IBLEND_EQUATION_ALPHA */
+ xf_emit(ctx, 8, 2); /* 0000001f IBLEND_FUNC_SRC_ALPHA */
+ xf_emit(ctx, 8, 1); /* 0000001f IBLEND_FUNC_DST_ALPHA */
+ xf_emit(ctx, 1, 0); /* 00000001 tesla UNK15C4 */
+ xf_emit(ctx, 1, 0); /* 00000001 */
+ xf_emit(ctx, 1, 0); /* 00000001 tesla UNK1140 */
}
- xf_emit(ctx, 1, 0x11);
- xf_emit(ctx, 1, 1);
- xf_emit(ctx, 0x5b, 0);
+ xf_emit(ctx, 1, 0x11); /* 3f/7f DST_FORMAT */
+ xf_emit(ctx, 1, 1); /* 00000001 DST_LINEAR */
+ xf_emit(ctx, 1, 0); /* 00000007 PATTERN_COLOR_FORMAT */
+ xf_emit(ctx, 2, 0); /* ffffffff PATTERN_MONO_COLOR */
+ xf_emit(ctx, 1, 0); /* 00000001 PATTERN_MONO_FORMAT */
+ xf_emit(ctx, 2, 0); /* ffffffff PATTERN_MONO_BITMAP */
+ xf_emit(ctx, 1, 0); /* 00000003 PATTERN_SELECT */
+ xf_emit(ctx, 1, 0); /* 000000ff ROP */
+ xf_emit(ctx, 1, 0); /* ffffffff BETA1 */
+ xf_emit(ctx, 1, 0); /* ffffffff BETA4 */
+ xf_emit(ctx, 1, 0); /* 00000007 OPERATION */
+ xf_emit(ctx, 0x50, 0); /* 10x ffffff, ffffff, ffffff, ffffff, 3 PATTERN */
}
static void
-nv50_graph_construct_xfer_tp_x1(struct nouveau_grctx *ctx)
+nv50_graph_construct_xfer_unk84xx(struct nouveau_grctx *ctx)
{
struct drm_nouveau_private *dev_priv = ctx->dev->dev_private;
int magic3;
- if (dev_priv->chipset == 0x50)
+ switch (dev_priv->chipset) {
+ case 0x50:
magic3 = 0x1000;
- else if (dev_priv->chipset == 0x86 || dev_priv->chipset == 0x98 || dev_priv->chipset >= 0xa8)
+ break;
+ case 0x86:
+ case 0x98:
+ case 0xa8:
+ case 0xaa:
+ case 0xac:
+ case 0xaf:
magic3 = 0x1e00;
- else
+ break;
+ default:
magic3 = 0;
- xf_emit(ctx, 1, 0);
- xf_emit(ctx, 1, 4);
- if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa)
- xf_emit(ctx, 0x24, 0);
+ }
+ xf_emit(ctx, 1, 0); /* 00000001 GP_ENABLE */
+ xf_emit(ctx, 1, 4); /* 7f/ff[NVA0+] VP_REG_ALLOC_RESULT */
+ xf_emit(ctx, 1, 0); /* 00000001 GP_ENABLE */
+ xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A30 */
+ xf_emit(ctx, 1, 0); /* 111/113[NVA0+] */
+ if (IS_NVA3F(dev_priv->chipset))
+ xf_emit(ctx, 0x1f, 0); /* ffffffff */
else if (dev_priv->chipset >= 0xa0)
- xf_emit(ctx, 0x14, 0);
+ xf_emit(ctx, 0x0f, 0); /* ffffffff */
else
- xf_emit(ctx, 0x15, 0);
- xf_emit(ctx, 2, 4);
+ xf_emit(ctx, 0x10, 0); /* fffffff VP_RESULT_MAP_1 up */
+ xf_emit(ctx, 2, 0); /* f/1f[NVA3], fffffff/ffffffff[NVA0+] */
+ xf_emit(ctx, 1, 4); /* 7f/ff VP_REG_ALLOC_RESULT */
+ xf_emit(ctx, 1, 4); /* 7f/ff VP_RESULT_MAP_SIZE */
if (dev_priv->chipset >= 0xa0)
- xf_emit(ctx, 1, 0x03020100);
+ xf_emit(ctx, 1, 0x03020100); /* ffffffff */
else
- xf_emit(ctx, 1, 0x00608080);
- xf_emit(ctx, 4, 0);
- xf_emit(ctx, 1, 4);
- xf_emit(ctx, 2, 0);
- xf_emit(ctx, 2, 4);
- xf_emit(ctx, 1, 0x80);
+ xf_emit(ctx, 1, 0x00608080); /* fffffff VP_RESULT_MAP_0 */
+ xf_emit(ctx, 1, 0); /* 00000001 GP_ENABLE */
+ xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A30 */
+ xf_emit(ctx, 2, 0); /* 111/113, 7f/ff */
+ xf_emit(ctx, 1, 4); /* 7f/ff VP_RESULT_MAP_SIZE */
+ xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A30 */
+ xf_emit(ctx, 1, 0); /* 00000001 GP_ENABLE */
+ xf_emit(ctx, 1, 4); /* 000000ff GP_REG_ALLOC_RESULT */
+ xf_emit(ctx, 1, 4); /* 000000ff GP_RESULT_MAP_SIZE */
+ xf_emit(ctx, 1, 0x80); /* 0000ffff GP_VERTEX_OUTPUT_COUNT */
if (magic3)
- xf_emit(ctx, 1, magic3);
- xf_emit(ctx, 1, 4);
- xf_emit(ctx, 0x24, 0);
- xf_emit(ctx, 1, 4);
- xf_emit(ctx, 1, 0x80);
- xf_emit(ctx, 1, 4);
- xf_emit(ctx, 1, 0x03020100);
- xf_emit(ctx, 1, 3);
+ xf_emit(ctx, 1, magic3); /* 00007fff tesla UNK141C */
+ xf_emit(ctx, 1, 4); /* 7f/ff VP_RESULT_MAP_SIZE */
+ xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A30 */
+ xf_emit(ctx, 1, 0); /* 111/113 */
+ xf_emit(ctx, 0x1f, 0); /* ffffffff GP_RESULT_MAP_1 up */
+ xf_emit(ctx, 1, 0); /* 0000001f */
+ xf_emit(ctx, 1, 0); /* ffffffff */
+ xf_emit(ctx, 1, 0); /* 00000001 GP_ENABLE */
+ xf_emit(ctx, 1, 4); /* 000000ff GP_REG_ALLOC_RESULT */
+ xf_emit(ctx, 1, 0x80); /* 0000ffff GP_VERTEX_OUTPUT_COUNT */
+ xf_emit(ctx, 1, 4); /* 000000ff GP_RESULT_MAP_SIZE */
+ xf_emit(ctx, 1, 0x03020100); /* ffffffff GP_RESULT_MAP_0 */
+ xf_emit(ctx, 1, 3); /* 00000003 GP_OUTPUT_PRIMITIVE_TYPE */
if (magic3)
- xf_emit(ctx, 1, magic3);
- xf_emit(ctx, 1, 4);
- xf_emit(ctx, 4, 0);
- xf_emit(ctx, 1, 4);
- xf_emit(ctx, 1, 3);
- xf_emit(ctx, 3, 0);
- xf_emit(ctx, 1, 4);
+ xf_emit(ctx, 1, magic3); /* 7fff tesla UNK141C */
+ xf_emit(ctx, 1, 4); /* 7f/ff VP_RESULT_MAP_SIZE */
+ xf_emit(ctx, 1, 0); /* 00000001 PROVOKING_VERTEX_LAST */
+ xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A30 */
+ xf_emit(ctx, 1, 0); /* 111/113 */
+ xf_emit(ctx, 1, 0); /* 00000001 GP_ENABLE */
+ xf_emit(ctx, 1, 4); /* 000000ff GP_RESULT_MAP_SIZE */
+ xf_emit(ctx, 1, 3); /* 00000003 GP_OUTPUT_PRIMITIVE_TYPE */
+ xf_emit(ctx, 1, 0); /* 00000001 PROVOKING_VERTEX_LAST */
+ xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A30 */
+ xf_emit(ctx, 1, 0); /* 00000003 tesla UNK13A0 */
+ xf_emit(ctx, 1, 4); /* 7f/ff VP_REG_ALLOC_RESULT */
+ xf_emit(ctx, 1, 0); /* 00000001 GP_ENABLE */
+ xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A30 */
+ xf_emit(ctx, 1, 0); /* 111/113 */
if (dev_priv->chipset == 0x94 || dev_priv->chipset == 0x96)
- xf_emit(ctx, 0x1024, 0);
+ xf_emit(ctx, 0x1020, 0); /* 4 x (0x400 x 0xffffffff, ff, 0, 0, 0, 4 x ffffffff) */
else if (dev_priv->chipset < 0xa0)
- xf_emit(ctx, 0xa24, 0);
- else if (dev_priv->chipset == 0xa0 || dev_priv->chipset >= 0xaa)
- xf_emit(ctx, 0x214, 0);
+ xf_emit(ctx, 0xa20, 0); /* 4 x (0x280 x 0xffffffff, ff, 0, 0, 0, 4 x ffffffff) */
+ else if (!IS_NVA3F(dev_priv->chipset))
+ xf_emit(ctx, 0x210, 0); /* ffffffff */
else
- xf_emit(ctx, 0x414, 0);
- xf_emit(ctx, 1, 4);
- xf_emit(ctx, 1, 3);
- xf_emit(ctx, 2, 0);
+ xf_emit(ctx, 0x410, 0); /* ffffffff */
+ xf_emit(ctx, 1, 0); /* 00000001 GP_ENABLE */
+ xf_emit(ctx, 1, 4); /* 000000ff GP_RESULT_MAP_SIZE */
+ xf_emit(ctx, 1, 3); /* 00000003 GP_OUTPUT_PRIMITIVE_TYPE */
+ xf_emit(ctx, 1, 0); /* 00000001 PROVOKING_VERTEX_LAST */
+ xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A30 */
}
static void
-nv50_graph_construct_xfer_tp_x2(struct nouveau_grctx *ctx)
+nv50_graph_construct_xfer_tprop(struct nouveau_grctx *ctx)
{
struct drm_nouveau_private *dev_priv = ctx->dev->dev_private;
int magic1, magic2;
if (dev_priv->chipset == 0x50) {
magic1 = 0x3ff;
magic2 = 0x00003e60;
- } else if (dev_priv->chipset <= 0xa0 || dev_priv->chipset >= 0xaa) {
+ } else if (!IS_NVA3F(dev_priv->chipset)) {
magic1 = 0x7ff;
magic2 = 0x001ffe67;
} else {
magic1 = 0x7ff;
magic2 = 0x00087e67;
}
- xf_emit(ctx, 3, 0);
- if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa)
- xf_emit(ctx, 1, 1);
- xf_emit(ctx, 0xc, 0);
- xf_emit(ctx, 1, 0xf);
- xf_emit(ctx, 0xb, 0);
- xf_emit(ctx, 1, 4);
- xf_emit(ctx, 4, 0xffff);
- xf_emit(ctx, 8, 0);
- xf_emit(ctx, 1, 1);
- xf_emit(ctx, 3, 0);
- xf_emit(ctx, 1, 1);
- xf_emit(ctx, 5, 0);
- xf_emit(ctx, 1, 1);
- xf_emit(ctx, 2, 0);
- if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa) {
- xf_emit(ctx, 1, 3);
- xf_emit(ctx, 1, 0);
- } else if (dev_priv->chipset >= 0xa0)
- xf_emit(ctx, 1, 1);
- xf_emit(ctx, 0xa, 0);
- xf_emit(ctx, 2, 1);
- xf_emit(ctx, 1, 2);
- xf_emit(ctx, 2, 1);
- xf_emit(ctx, 1, 2);
- if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa) {
- xf_emit(ctx, 1, 0);
- xf_emit(ctx, 0x18, 1);
- xf_emit(ctx, 8, 2);
- xf_emit(ctx, 8, 1);
- xf_emit(ctx, 8, 2);
- xf_emit(ctx, 8, 1);
- xf_emit(ctx, 1, 0);
+ xf_emit(ctx, 1, 0); /* 00000007 ALPHA_TEST_FUNC */
+ xf_emit(ctx, 1, 0); /* ffffffff ALPHA_TEST_REF */
+ xf_emit(ctx, 1, 0); /* 00000001 ALPHA_TEST_ENABLE */
+ if (IS_NVA3F(dev_priv->chipset))
+ xf_emit(ctx, 1, 1); /* 0000000f UNK16A0 */
+ xf_emit(ctx, 1, 0); /* 7/f MULTISAMPLE_SAMPLES_LOG2 */
+ xf_emit(ctx, 1, 0); /* 00000001 tesla UNK1534 */
+ xf_emit(ctx, 1, 0); /* 000000ff STENCIL_BACK_MASK */
+ xf_emit(ctx, 3, 0); /* 00000007 STENCIL_BACK_OP_FAIL, ZFAIL, ZPASS */
+ xf_emit(ctx, 4, 0); /* ffffffff BLEND_COLOR */
+ xf_emit(ctx, 1, 0); /* 00000001 UNK19C0 */
+ xf_emit(ctx, 1, 0); /* 00000001 UNK0FDC */
+ xf_emit(ctx, 1, 0xf); /* 0000000f COLOR_MASK */
+ xf_emit(ctx, 7, 0); /* 0000000f COLOR_MASK */
+ xf_emit(ctx, 1, 0); /* 00000001 DEPTH_TEST_ENABLE */
+ xf_emit(ctx, 1, 0); /* 00000001 DEPTH_WRITE_ENABLE */
+ xf_emit(ctx, 1, 0); /* 00000001 LOGIC_OP_ENABLE */
+ xf_emit(ctx, 1, 0); /* ff[NV50]/3ff[NV84+] */
+ xf_emit(ctx, 1, 4); /* 00000007 FP_CONTROL */
+ xf_emit(ctx, 4, 0xffff); /* 0000ffff MSAA_MASK */
+ xf_emit(ctx, 1, 0); /* 000000ff STENCIL_FRONT_MASK */
+ xf_emit(ctx, 3, 0); /* 00000007 STENCIL_FRONT_OP_FAIL, ZFAIL, ZPASS */
+ xf_emit(ctx, 1, 0); /* 00000001 STENCIL_FRONT_ENABLE */
+ xf_emit(ctx, 1, 0); /* 00000001 STENCIL_BACK_ENABLE */
+ xf_emit(ctx, 2, 0); /* 00007fff WINDOW_OFFSET_XY */
+ xf_emit(ctx, 1, 1); /* 00000001 tesla UNK19CC */
+ xf_emit(ctx, 1, 0); /* 7 */
+ xf_emit(ctx, 1, 0); /* 00000001 SAMPLECNT_ENABLE */
+ xf_emit(ctx, 1, 0); /* 0000000f ZETA_FORMAT */
+ xf_emit(ctx, 1, 1); /* 00000001 ZETA_ENABLE */
+ xf_emit(ctx, 1, 0); /* ffffffff COLOR_KEY */
+ xf_emit(ctx, 1, 0); /* 00000001 COLOR_KEY_ENABLE */
+ xf_emit(ctx, 1, 0); /* 00000007 COLOR_KEY_FORMAT */
+ xf_emit(ctx, 2, 0); /* ffffffff SIFC_BITMAP_COLOR */
+ xf_emit(ctx, 1, 1); /* 00000001 SIFC_BITMAP_WRITE_BIT0_ENABLE */
+ xf_emit(ctx, 1, 0); /* 00000007 ALPHA_TEST_FUNC */
+ xf_emit(ctx, 1, 0); /* 00000001 ALPHA_TEST_ENABLE */
+ if (IS_NVA3F(dev_priv->chipset)) {
+ xf_emit(ctx, 1, 3); /* 00000003 tesla UNK16B4 */
+ xf_emit(ctx, 1, 0); /* 00000003 */
+ xf_emit(ctx, 1, 0); /* 00000003 tesla UNK1298 */
+ } else if (dev_priv->chipset >= 0xa0) {
+ xf_emit(ctx, 1, 1); /* 00000001 tesla UNK16B4 */
+ xf_emit(ctx, 1, 0); /* 00000003 */
+ } else {
+ xf_emit(ctx, 1, 0); /* 00000003 MULTISAMPLE_CTRL */
}
- xf_emit(ctx, 1, 1);
- xf_emit(ctx, 1, 0);
- xf_emit(ctx, 1, 0x11);
- xf_emit(ctx, 7, 0);
- xf_emit(ctx, 1, 0x0fac6881);
- xf_emit(ctx, 2, 0);
- xf_emit(ctx, 1, 4);
- xf_emit(ctx, 3, 0);
- xf_emit(ctx, 1, 0x11);
- xf_emit(ctx, 1, 1);
- xf_emit(ctx, 1, 0);
- xf_emit(ctx, 3, 0xcf);
- if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa)
- xf_emit(ctx, 1, 1);
- xf_emit(ctx, 0xa, 0);
- xf_emit(ctx, 2, 1);
- xf_emit(ctx, 1, 2);
- xf_emit(ctx, 2, 1);
- xf_emit(ctx, 1, 2);
- xf_emit(ctx, 1, 1);
- xf_emit(ctx, 1, 0);
- xf_emit(ctx, 8, 1);
- xf_emit(ctx, 1, 0x11);
- xf_emit(ctx, 7, 0);
- xf_emit(ctx, 1, 0x0fac6881);
- xf_emit(ctx, 1, 0xf);
- xf_emit(ctx, 7, 0);
- xf_emit(ctx, 1, magic2);
- xf_emit(ctx, 2, 0);
- xf_emit(ctx, 1, 0x11);
- if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa)
- xf_emit(ctx, 2, 1);
- else
- xf_emit(ctx, 1, 1);
+ xf_emit(ctx, 1, 0); /* 00000001 tesla UNK1534 */
+ xf_emit(ctx, 8, 0); /* 00000001 BLEND_ENABLE */
+ xf_emit(ctx, 1, 1); /* 0000001f BLEND_FUNC_DST_ALPHA */
+ xf_emit(ctx, 1, 1); /* 00000007 BLEND_EQUATION_ALPHA */
+ xf_emit(ctx, 1, 2); /* 0000001f BLEND_FUNC_SRC_ALPHA */
+ xf_emit(ctx, 1, 1); /* 0000001f BLEND_FUNC_DST_RGB */
+ xf_emit(ctx, 1, 1); /* 00000007 BLEND_EQUATION_RGB */
+ xf_emit(ctx, 1, 2); /* 0000001f BLEND_FUNC_SRC_RGB */
+ if (IS_NVA3F(dev_priv->chipset)) {
+ xf_emit(ctx, 1, 0); /* 00000001 UNK12E4 */
+ xf_emit(ctx, 8, 1); /* 00000007 IBLEND_EQUATION_RGB */
+ xf_emit(ctx, 8, 1); /* 00000007 IBLEND_EQUATION_ALPHA */
+ xf_emit(ctx, 8, 1); /* 00000001 IBLEND_UNK00 */
+ xf_emit(ctx, 8, 2); /* 0000001f IBLEND_SRC_RGB */
+ xf_emit(ctx, 8, 1); /* 0000001f IBLEND_DST_RGB */
+ xf_emit(ctx, 8, 2); /* 0000001f IBLEND_SRC_ALPHA */
+ xf_emit(ctx, 8, 1); /* 0000001f IBLEND_DST_ALPHA */
+ xf_emit(ctx, 1, 0); /* 00000001 UNK1140 */
+ }
+ xf_emit(ctx, 1, 1); /* 00000001 UNK133C */
+ xf_emit(ctx, 1, 0); /* ffff0ff3 */
+ xf_emit(ctx, 1, 0x11); /* 3f/7f RT_FORMAT */
+ xf_emit(ctx, 7, 0); /* 3f/7f RT_FORMAT */
+ xf_emit(ctx, 1, 0x0fac6881); /* 0fffffff RT_CONTROL */
+ xf_emit(ctx, 1, 0); /* 00000001 LOGIC_OP_ENABLE */
+ xf_emit(ctx, 1, 0); /* ff/3ff */
+ xf_emit(ctx, 1, 4); /* 00000007 FP_CONTROL */
+ xf_emit(ctx, 1, 0); /* 00000003 UNK0F90 */
+ xf_emit(ctx, 1, 0); /* 00000001 FRAMEBUFFER_SRGB */
+ xf_emit(ctx, 1, 0); /* 7 */
+ xf_emit(ctx, 1, 0x11); /* 3f/7f DST_FORMAT */
+ xf_emit(ctx, 1, 1); /* 00000001 DST_LINEAR */
+ xf_emit(ctx, 1, 0); /* 00000007 OPERATION */
+ xf_emit(ctx, 1, 0xcf); /* 000000ff SIFC_FORMAT */
+ xf_emit(ctx, 1, 0xcf); /* 000000ff DRAW_COLOR_FORMAT */
+ xf_emit(ctx, 1, 0xcf); /* 000000ff SRC_FORMAT */
+ if (IS_NVA3F(dev_priv->chipset))
+ xf_emit(ctx, 1, 1); /* 0000001f tesla UNK169C */
+ xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A3C */
+ xf_emit(ctx, 1, 0); /* 7/f[NVA3] MULTISAMPLE_SAMPLES_LOG2 */
+ xf_emit(ctx, 8, 0); /* 00000001 BLEND_ENABLE */
+ xf_emit(ctx, 1, 1); /* 0000001f BLEND_FUNC_DST_ALPHA */
+ xf_emit(ctx, 1, 1); /* 00000007 BLEND_EQUATION_ALPHA */
+ xf_emit(ctx, 1, 2); /* 0000001f BLEND_FUNC_SRC_ALPHA */
+ xf_emit(ctx, 1, 1); /* 0000001f BLEND_FUNC_DST_RGB */
+ xf_emit(ctx, 1, 1); /* 00000007 BLEND_EQUATION_RGB */
+ xf_emit(ctx, 1, 2); /* 0000001f BLEND_FUNC_SRC_RGB */
+ xf_emit(ctx, 1, 1); /* 00000001 UNK133C */
+ xf_emit(ctx, 1, 0); /* ffff0ff3 */
+ xf_emit(ctx, 8, 1); /* 00000001 UNK19E0 */
+ xf_emit(ctx, 1, 0x11); /* 3f/7f RT_FORMAT */
+ xf_emit(ctx, 7, 0); /* 3f/7f RT_FORMAT */
+ xf_emit(ctx, 1, 0x0fac6881); /* 0fffffff RT_CONTROL */
+ xf_emit(ctx, 1, 0xf); /* 0000000f COLOR_MASK */
+ xf_emit(ctx, 7, 0); /* 0000000f COLOR_MASK */
+ xf_emit(ctx, 1, magic2); /* 001fffff tesla UNK0F78 */
+ xf_emit(ctx, 1, 0); /* 00000001 DEPTH_BOUNDS_EN */
+ xf_emit(ctx, 1, 0); /* 00000001 DEPTH_TEST_ENABLE */
+ xf_emit(ctx, 1, 0x11); /* 3f/7f DST_FORMAT */
+ xf_emit(ctx, 1, 1); /* 00000001 DST_LINEAR */
+ if (IS_NVA3F(dev_priv->chipset))
+ xf_emit(ctx, 1, 1); /* 0000001f tesla UNK169C */
if(dev_priv->chipset == 0x50)
- xf_emit(ctx, 1, 0);
+ xf_emit(ctx, 1, 0); /* ff */
else
- xf_emit(ctx, 3, 0);
- xf_emit(ctx, 1, 4);
- xf_emit(ctx, 5, 0);
- xf_emit(ctx, 1, 1);
- xf_emit(ctx, 4, 0);
- xf_emit(ctx, 1, 0x11);
- xf_emit(ctx, 7, 0);
- xf_emit(ctx, 1, 0x0fac6881);
- xf_emit(ctx, 3, 0);
- xf_emit(ctx, 1, 0x11);
- xf_emit(ctx, 1, 1);
- xf_emit(ctx, 1, 0);
- xf_emit(ctx, 1, 1);
- xf_emit(ctx, 1, 0);
- xf_emit(ctx, 1, 1);
- xf_emit(ctx, 1, 0);
- xf_emit(ctx, 1, magic1);
- xf_emit(ctx, 1, 0);
- xf_emit(ctx, 1, 1);
- xf_emit(ctx, 1, 0);
- xf_emit(ctx, 1, 1);
- xf_emit(ctx, 2, 0);
- if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa)
- xf_emit(ctx, 1, 1);
- xf_emit(ctx, 0x28, 0);
- xf_emit(ctx, 8, 8);
- xf_emit(ctx, 1, 0x11);
- xf_emit(ctx, 7, 0);
- xf_emit(ctx, 1, 0x0fac6881);
- xf_emit(ctx, 8, 0x400);
- xf_emit(ctx, 8, 0x300);
- xf_emit(ctx, 1, 1);
- xf_emit(ctx, 1, 0xf);
- xf_emit(ctx, 7, 0);
- xf_emit(ctx, 1, 0x20);
- xf_emit(ctx, 1, 0x11);
- xf_emit(ctx, 1, 0x100);
- xf_emit(ctx, 1, 0);
- xf_emit(ctx, 1, 1);
- xf_emit(ctx, 2, 0);
- xf_emit(ctx, 1, 0x40);
- xf_emit(ctx, 1, 0x100);
- xf_emit(ctx, 1, 0);
- xf_emit(ctx, 1, 3);
- xf_emit(ctx, 4, 0);
- if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa)
- xf_emit(ctx, 1, 1);
- xf_emit(ctx, 1, magic2);
- xf_emit(ctx, 3, 0);
- xf_emit(ctx, 1, 2);
- xf_emit(ctx, 1, 0x0fac6881);
- xf_emit(ctx, 9, 0);
- xf_emit(ctx, 1, 1);
- xf_emit(ctx, 4, 0);
- xf_emit(ctx, 1, 4);
- xf_emit(ctx, 1, 0);
- xf_emit(ctx, 1, 1);
- xf_emit(ctx, 1, 0x400);
- xf_emit(ctx, 1, 0x300);
- xf_emit(ctx, 1, 0x1001);
- if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa)
- xf_emit(ctx, 4, 0);
- else
- xf_emit(ctx, 3, 0);
- xf_emit(ctx, 1, 0x11);
- xf_emit(ctx, 7, 0);
- xf_emit(ctx, 1, 0x0fac6881);
- xf_emit(ctx, 1, 0xf);
- if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa) {
- xf_emit(ctx, 0x15, 0);
- xf_emit(ctx, 1, 1);
- xf_emit(ctx, 3, 0);
- } else
- xf_emit(ctx, 0x17, 0);
+ xf_emit(ctx, 3, 0); /* 1, 7, 3ff */
+ xf_emit(ctx, 1, 4); /* 00000007 FP_CONTROL */
+ xf_emit(ctx, 1, 0); /* 00000003 UNK0F90 */
+ xf_emit(ctx, 1, 0); /* 00000001 STENCIL_FRONT_ENABLE */
+ xf_emit(ctx, 1, 0); /* 00000007 */
+ xf_emit(ctx, 1, 0); /* 00000001 SAMPLECNT_ENABLE */
+ xf_emit(ctx, 1, 0); /* 0000000f ZETA_FORMAT */
+ xf_emit(ctx, 1, 1); /* 00000001 ZETA_ENABLE */
+ xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A3C */
+ xf_emit(ctx, 1, 0); /* 7/f MULTISAMPLE_SAMPLES_LOG2 */
+ xf_emit(ctx, 1, 0); /* 00000001 tesla UNK1534 */
+ xf_emit(ctx, 1, 0); /* ffff0ff3 */
+ xf_emit(ctx, 1, 0x11); /* 3f/7f RT_FORMAT */
+ xf_emit(ctx, 7, 0); /* 3f/7f RT_FORMAT */
+ xf_emit(ctx, 1, 0x0fac6881); /* 0fffffff RT_CONTROL */
+ xf_emit(ctx, 1, 0); /* 00000001 DEPTH_BOUNDS_EN */
+ xf_emit(ctx, 1, 0); /* 00000001 DEPTH_TEST_ENABLE */
+ xf_emit(ctx, 1, 0); /* 00000001 DEPTH_WRITE_ENABLE */
+ xf_emit(ctx, 1, 0x11); /* 3f/7f DST_FORMAT */
+ xf_emit(ctx, 1, 1); /* 00000001 DST_LINEAR */
+ xf_emit(ctx, 1, 0); /* 000fffff BLIT_DU_DX_FRACT */
+ xf_emit(ctx, 1, 1); /* 0001ffff BLIT_DU_DX_INT */
+ xf_emit(ctx, 1, 0); /* 000fffff BLIT_DV_DY_FRACT */
+ xf_emit(ctx, 1, 1); /* 0001ffff BLIT_DV_DY_INT */
+ xf_emit(ctx, 1, 0); /* ff/3ff */
+ xf_emit(ctx, 1, magic1); /* 3ff/7ff tesla UNK0D68 */
+ xf_emit(ctx, 1, 0); /* 00000001 STENCIL_FRONT_ENABLE */
+ xf_emit(ctx, 1, 1); /* 00000001 tesla UNK15B4 */
+ xf_emit(ctx, 1, 0); /* 0000000f ZETA_FORMAT */
+ xf_emit(ctx, 1, 1); /* 00000001 ZETA_ENABLE */
+ xf_emit(ctx, 1, 0); /* 00000007 */
+ xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A3C */
+ if (IS_NVA3F(dev_priv->chipset))
+ xf_emit(ctx, 1, 1); /* 0000001f tesla UNK169C */
+ xf_emit(ctx, 8, 0); /* 0000ffff DMA_COLOR */
+ xf_emit(ctx, 1, 0); /* 0000ffff DMA_GLOBAL */
+ xf_emit(ctx, 1, 0); /* 0000ffff DMA_LOCAL */
+ xf_emit(ctx, 1, 0); /* 0000ffff DMA_STACK */
+ xf_emit(ctx, 1, 0); /* ff/3ff */
+ xf_emit(ctx, 1, 0); /* 0000ffff DMA_DST */
+ xf_emit(ctx, 1, 0); /* 7 */
+ xf_emit(ctx, 1, 0); /* 7/f MULTISAMPLE_SAMPLES_LOG2 */
+ xf_emit(ctx, 1, 0); /* ffff0ff3 */
+ xf_emit(ctx, 8, 0); /* 000000ff RT_ADDRESS_HIGH */
+ xf_emit(ctx, 8, 0); /* ffffffff RT_LAYER_STRIDE */
+ xf_emit(ctx, 8, 0); /* ffffffff RT_ADDRESS_LOW */
+ xf_emit(ctx, 8, 8); /* 0000007f RT_TILE_MODE */
+ xf_emit(ctx, 1, 0x11); /* 3f/7f RT_FORMAT */
+ xf_emit(ctx, 7, 0); /* 3f/7f RT_FORMAT */
+ xf_emit(ctx, 1, 0x0fac6881); /* 0fffffff RT_CONTROL */
+ xf_emit(ctx, 8, 0x400); /* 0fffffff RT_HORIZ */
+ xf_emit(ctx, 8, 0x300); /* 0000ffff RT_VERT */
+ xf_emit(ctx, 1, 1); /* 00001fff RT_ARRAY_MODE */
+ xf_emit(ctx, 1, 0xf); /* 0000000f COLOR_MASK */
+ xf_emit(ctx, 7, 0); /* 0000000f COLOR_MASK */
+ xf_emit(ctx, 1, 0x20); /* 00000fff DST_TILE_MODE */
+ xf_emit(ctx, 1, 0x11); /* 3f/7f DST_FORMAT */
+ xf_emit(ctx, 1, 0x100); /* 0001ffff DST_HEIGHT */
+ xf_emit(ctx, 1, 0); /* 000007ff DST_LAYER */
+ xf_emit(ctx, 1, 1); /* 00000001 DST_LINEAR */
+ xf_emit(ctx, 1, 0); /* ffffffff DST_ADDRESS_LOW */
+ xf_emit(ctx, 1, 0); /* 000000ff DST_ADDRESS_HIGH */
+ xf_emit(ctx, 1, 0x40); /* 0007ffff DST_PITCH */
+ xf_emit(ctx, 1, 0x100); /* 0001ffff DST_WIDTH */
+ xf_emit(ctx, 1, 0); /* 0000ffff */
+ xf_emit(ctx, 1, 3); /* 00000003 tesla UNK15AC */
+ xf_emit(ctx, 1, 0); /* ff/3ff */
+ xf_emit(ctx, 1, 0); /* 0001ffff GP_BUILTIN_RESULT_EN */
+ xf_emit(ctx, 1, 0); /* 00000003 UNK0F90 */
+ xf_emit(ctx, 1, 0); /* 00000007 */
+ if (IS_NVA3F(dev_priv->chipset))
+ xf_emit(ctx, 1, 1); /* 0000001f tesla UNK169C */
+ xf_emit(ctx, 1, magic2); /* 001fffff tesla UNK0F78 */
+ xf_emit(ctx, 1, 0); /* 7/f MULTISAMPLE_SAMPLES_LOG2 */
+ xf_emit(ctx, 1, 0); /* 00000001 tesla UNK1534 */
+ xf_emit(ctx, 1, 0); /* ffff0ff3 */
+ xf_emit(ctx, 1, 2); /* 00000003 tesla UNK143C */
+ xf_emit(ctx, 1, 0x0fac6881); /* 0fffffff RT_CONTROL */
+ xf_emit(ctx, 1, 0); /* 0000ffff DMA_ZETA */
+ xf_emit(ctx, 1, 0); /* 00000001 DEPTH_BOUNDS_EN */
+ xf_emit(ctx, 1, 0); /* 00000001 DEPTH_TEST_ENABLE */
+ xf_emit(ctx, 1, 0); /* 00000001 DEPTH_WRITE_ENABLE */
+ xf_emit(ctx, 2, 0); /* ffff, ff/3ff */
+ xf_emit(ctx, 1, 0); /* 0001ffff GP_BUILTIN_RESULT_EN */
+ xf_emit(ctx, 1, 0); /* 00000001 STENCIL_FRONT_ENABLE */
+ xf_emit(ctx, 1, 0); /* 000000ff STENCIL_FRONT_MASK */
+ xf_emit(ctx, 1, 1); /* 00000001 tesla UNK15B4 */
+ xf_emit(ctx, 1, 0); /* 00000007 */
+ xf_emit(ctx, 1, 0); /* ffffffff ZETA_LAYER_STRIDE */
+ xf_emit(ctx, 1, 0); /* 000000ff ZETA_ADDRESS_HIGH */
+ xf_emit(ctx, 1, 0); /* ffffffff ZETA_ADDRESS_LOW */
+ xf_emit(ctx, 1, 4); /* 00000007 ZETA_TILE_MODE */
+ xf_emit(ctx, 1, 0); /* 0000000f ZETA_FORMAT */
+ xf_emit(ctx, 1, 1); /* 00000001 ZETA_ENABLE */
+ xf_emit(ctx, 1, 0x400); /* 0fffffff ZETA_HORIZ */
+ xf_emit(ctx, 1, 0x300); /* 0000ffff ZETA_VERT */
+ xf_emit(ctx, 1, 0x1001); /* 00001fff ZETA_ARRAY_MODE */
+ xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A3C */
+ xf_emit(ctx, 1, 0); /* 7/f MULTISAMPLE_SAMPLES_LOG2 */
+ if (IS_NVA3F(dev_priv->chipset))
+ xf_emit(ctx, 1, 0); /* 00000001 */
+ xf_emit(ctx, 1, 0); /* ffff0ff3 */
+ xf_emit(ctx, 1, 0x11); /* 3f/7f RT_FORMAT */
+ xf_emit(ctx, 7, 0); /* 3f/7f RT_FORMAT */
+ xf_emit(ctx, 1, 0x0fac6881); /* 0fffffff RT_CONTROL */
+ xf_emit(ctx, 1, 0xf); /* 0000000f COLOR_MASK */
+ xf_emit(ctx, 7, 0); /* 0000000f COLOR_MASK */
+ xf_emit(ctx, 1, 0); /* ff/3ff */
+ xf_emit(ctx, 8, 0); /* 00000001 BLEND_ENABLE */
+ xf_emit(ctx, 1, 0); /* 00000003 UNK0F90 */
+ xf_emit(ctx, 1, 0); /* 00000001 FRAMEBUFFER_SRGB */
+ xf_emit(ctx, 1, 0); /* 7 */
+ xf_emit(ctx, 1, 0); /* 00000001 LOGIC_OP_ENABLE */
+ if (IS_NVA3F(dev_priv->chipset)) {
+ xf_emit(ctx, 1, 0); /* 00000001 UNK1140 */
+ xf_emit(ctx, 1, 1); /* 0000001f tesla UNK169C */
+ }
+ xf_emit(ctx, 1, 0); /* 7/f MULTISAMPLE_SAMPLES_LOG2 */
+ xf_emit(ctx, 1, 0); /* 00000001 UNK1534 */
+ xf_emit(ctx, 1, 0); /* ffff0ff3 */
if (dev_priv->chipset >= 0xa0)
- xf_emit(ctx, 1, 0x0fac6881);
- xf_emit(ctx, 1, magic2);
- xf_emit(ctx, 3, 0);
- xf_emit(ctx, 1, 0x11);
- xf_emit(ctx, 2, 0);
- xf_emit(ctx, 1, 4);
- xf_emit(ctx, 1, 0);
- xf_emit(ctx, 2, 1);
- xf_emit(ctx, 3, 0);
- if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa)
- xf_emit(ctx, 2, 1);
- else
- xf_emit(ctx, 1, 1);
- if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa)
- xf_emit(ctx, 2, 0);
- else if (dev_priv->chipset != 0x50)
- xf_emit(ctx, 1, 0);
+ xf_emit(ctx, 1, 0x0fac6881); /* fffffff */
+ xf_emit(ctx, 1, magic2); /* 001fffff tesla UNK0F78 */
+ xf_emit(ctx, 1, 0); /* 00000001 DEPTH_BOUNDS_EN */
+ xf_emit(ctx, 1, 0); /* 00000001 DEPTH_TEST_ENABLE */
+ xf_emit(ctx, 1, 0); /* 00000001 DEPTH_WRITE_ENABLE */
+ xf_emit(ctx, 1, 0x11); /* 3f/7f DST_FORMAT */
+ xf_emit(ctx, 1, 0); /* 00000001 tesla UNK0FB0 */
+ xf_emit(ctx, 1, 0); /* ff/3ff */
+ xf_emit(ctx, 1, 4); /* 00000007 FP_CONTROL */
+ xf_emit(ctx, 1, 0); /* 00000001 STENCIL_FRONT_ENABLE */
+ xf_emit(ctx, 1, 1); /* 00000001 tesla UNK15B4 */
+ xf_emit(ctx, 1, 1); /* 00000001 tesla UNK19CC */
+ xf_emit(ctx, 1, 0); /* 00000007 */
+ xf_emit(ctx, 1, 0); /* 00000001 SAMPLECNT_ENABLE */
+ xf_emit(ctx, 1, 0); /* 0000000f ZETA_FORMAT */
+ xf_emit(ctx, 1, 1); /* 00000001 ZETA_ENABLE */
+ if (IS_NVA3F(dev_priv->chipset)) {
+ xf_emit(ctx, 1, 1); /* 0000001f tesla UNK169C */
+ xf_emit(ctx, 1, 0); /* 0000000f tesla UNK15C8 */
+ }
+ xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A3C */
+ if (dev_priv->chipset >= 0xa0) {
+ xf_emit(ctx, 3, 0); /* 7/f, 1, ffff0ff3 */
+ xf_emit(ctx, 1, 0xfac6881); /* fffffff */
+ xf_emit(ctx, 4, 0); /* 1, 1, 1, 3ff */
+ xf_emit(ctx, 1, 4); /* 7 */
+ xf_emit(ctx, 1, 0); /* 1 */
+ xf_emit(ctx, 2, 1); /* 1 */
+ xf_emit(ctx, 2, 0); /* 7, f */
+ xf_emit(ctx, 1, 1); /* 1 */
+ xf_emit(ctx, 1, 0); /* 7/f */
+ if (IS_NVA3F(dev_priv->chipset))
+ xf_emit(ctx, 0x9, 0); /* 1 */
+ else
+ xf_emit(ctx, 0x8, 0); /* 1 */
+ xf_emit(ctx, 1, 0); /* ffff0ff3 */
+ xf_emit(ctx, 8, 1); /* 1 */
+ xf_emit(ctx, 1, 0x11); /* 7f */
+ xf_emit(ctx, 7, 0); /* 7f */
+ xf_emit(ctx, 1, 0xfac6881); /* fffffff */
+ xf_emit(ctx, 1, 0xf); /* f */
+ xf_emit(ctx, 7, 0); /* f */
+ xf_emit(ctx, 1, 0x11); /* 7f */
+ xf_emit(ctx, 1, 1); /* 1 */
+ xf_emit(ctx, 5, 0); /* 1, 7, 3ff, 3, 7 */
+ if (IS_NVA3F(dev_priv->chipset)) {
+ xf_emit(ctx, 1, 0); /* 00000001 UNK1140 */
+ xf_emit(ctx, 1, 1); /* 0000001f tesla UNK169C */
+ }
+ }
}
static void
-nv50_graph_construct_xfer_tp_x3(struct nouveau_grctx *ctx)
+nv50_graph_construct_xfer_tex(struct nouveau_grctx *ctx)
{
struct drm_nouveau_private *dev_priv = ctx->dev->dev_private;
- xf_emit(ctx, 3, 0);
- xf_emit(ctx, 1, 1);
- xf_emit(ctx, 1, 0);
- xf_emit(ctx, 1, 1);
+ xf_emit(ctx, 2, 0); /* 1 LINKED_TSC. yes, 2. */
+ if (dev_priv->chipset != 0x50)
+ xf_emit(ctx, 1, 0); /* 3 */
+ xf_emit(ctx, 1, 1); /* 1ffff BLIT_DU_DX_INT */
+ xf_emit(ctx, 1, 0); /* fffff BLIT_DU_DX_FRACT */
+ xf_emit(ctx, 1, 1); /* 1ffff BLIT_DV_DY_INT */
+ xf_emit(ctx, 1, 0); /* fffff BLIT_DV_DY_FRACT */
if (dev_priv->chipset == 0x50)
- xf_emit(ctx, 2, 0);
+ xf_emit(ctx, 1, 0); /* 3 BLIT_CONTROL */
else
- xf_emit(ctx, 3, 0);
- xf_emit(ctx, 1, 0x2a712488);
- xf_emit(ctx, 1, 0);
- xf_emit(ctx, 1, 0x4085c000);
- xf_emit(ctx, 1, 0x40);
- xf_emit(ctx, 1, 0x100);
- xf_emit(ctx, 1, 0x10100);
- xf_emit(ctx, 1, 0x02800000);
+ xf_emit(ctx, 2, 0); /* 3ff, 1 */
+ xf_emit(ctx, 1, 0x2a712488); /* ffffffff SRC_TIC_0 */
+ xf_emit(ctx, 1, 0); /* ffffffff SRC_TIC_1 */
+ xf_emit(ctx, 1, 0x4085c000); /* ffffffff SRC_TIC_2 */
+ xf_emit(ctx, 1, 0x40); /* ffffffff SRC_TIC_3 */
+ xf_emit(ctx, 1, 0x100); /* ffffffff SRC_TIC_4 */
+ xf_emit(ctx, 1, 0x10100); /* ffffffff SRC_TIC_5 */
+ xf_emit(ctx, 1, 0x02800000); /* ffffffff SRC_TIC_6 */
+ xf_emit(ctx, 1, 0); /* ffffffff SRC_TIC_7 */
+ if (dev_priv->chipset == 0x50) {
+ xf_emit(ctx, 1, 0); /* 00000001 turing UNK358 */
+ xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A34? */
+ xf_emit(ctx, 1, 0); /* 00000003 turing UNK37C tesla UNK1690 */
+ xf_emit(ctx, 1, 0); /* 00000003 BLIT_CONTROL */
+ xf_emit(ctx, 1, 0); /* 00000001 turing UNK32C tesla UNK0F94 */
+ } else if (!IS_NVAAF(dev_priv->chipset)) {
+ xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A34? */
+ xf_emit(ctx, 1, 0); /* 00000003 */
+ xf_emit(ctx, 1, 0); /* 000003ff */
+ xf_emit(ctx, 1, 0); /* 00000003 */
+ xf_emit(ctx, 1, 0); /* 000003ff */
+ xf_emit(ctx, 1, 0); /* 00000003 tesla UNK1664 / turing UNK03E8 */
+ xf_emit(ctx, 1, 0); /* 00000003 */
+ xf_emit(ctx, 1, 0); /* 000003ff */
+ } else {
+ xf_emit(ctx, 0x6, 0);
+ }
+ xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A34 */
+ xf_emit(ctx, 1, 0); /* 0000ffff DMA_TEXTURE */
+ xf_emit(ctx, 1, 0); /* 0000ffff DMA_SRC */
}
static void
-nv50_graph_construct_xfer_tp_x4(struct nouveau_grctx *ctx)
+nv50_graph_construct_xfer_unk8cxx(struct nouveau_grctx *ctx)
{
struct drm_nouveau_private *dev_priv = ctx->dev->dev_private;
- xf_emit(ctx, 2, 0x04e3bfdf);
- xf_emit(ctx, 1, 1);
- xf_emit(ctx, 1, 0);
- xf_emit(ctx, 1, 0x00ffff00);
- if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa)
- xf_emit(ctx, 2, 1);
- else
- xf_emit(ctx, 1, 1);
- xf_emit(ctx, 2, 0);
- xf_emit(ctx, 1, 0x00ffff00);
- xf_emit(ctx, 8, 0);
- xf_emit(ctx, 1, 1);
- xf_emit(ctx, 1, 0);
- xf_emit(ctx, 1, 1);
- xf_emit(ctx, 1, 0x30201000);
- xf_emit(ctx, 1, 0x70605040);
- xf_emit(ctx, 1, 0xb8a89888);
- xf_emit(ctx, 1, 0xf8e8d8c8);
- xf_emit(ctx, 1, 0);
- xf_emit(ctx, 1, 0x1a);
-}
-
-static void
-nv50_graph_construct_xfer_tp_x5(struct nouveau_grctx *ctx)
-{
- struct drm_nouveau_private *dev_priv = ctx->dev->dev_private;
- xf_emit(ctx, 3, 0);
- xf_emit(ctx, 1, 0xfac6881);
- xf_emit(ctx, 4, 0);
- xf_emit(ctx, 1, 4);
- xf_emit(ctx, 1, 0);
- xf_emit(ctx, 2, 1);
- xf_emit(ctx, 2, 0);
- xf_emit(ctx, 1, 1);
- if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa)
- xf_emit(ctx, 0xb, 0);
- else
- xf_emit(ctx, 0xa, 0);
- xf_emit(ctx, 8, 1);
- xf_emit(ctx, 1, 0x11);
- xf_emit(ctx, 7, 0);
- xf_emit(ctx, 1, 0xfac6881);
- xf_emit(ctx, 1, 0xf);
- xf_emit(ctx, 7, 0);
- xf_emit(ctx, 1, 0x11);
- xf_emit(ctx, 1, 1);
- if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa) {
- xf_emit(ctx, 6, 0);
- xf_emit(ctx, 1, 1);
- xf_emit(ctx, 6, 0);
- } else {
- xf_emit(ctx, 0xb, 0);
- }
+ xf_emit(ctx, 1, 0); /* 00000001 UNK1534 */
+ xf_emit(ctx, 1, 0); /* 7/f MULTISAMPLE_SAMPLES_LOG2 */
+ xf_emit(ctx, 2, 0); /* 7, ffff0ff3 */
+ xf_emit(ctx, 1, 0); /* 00000001 DEPTH_TEST_ENABLE */
+ xf_emit(ctx, 1, 0); /* 00000001 DEPTH_WRITE */
+ xf_emit(ctx, 1, 0x04e3bfdf); /* ffffffff UNK0D64 */
+ xf_emit(ctx, 1, 0x04e3bfdf); /* ffffffff UNK0DF4 */
+ xf_emit(ctx, 1, 1); /* 00000001 UNK15B4 */
+ xf_emit(ctx, 1, 0); /* 00000001 LINE_STIPPLE_ENABLE */
+ xf_emit(ctx, 1, 0x00ffff00); /* 00ffffff LINE_STIPPLE_PATTERN */
+ xf_emit(ctx, 1, 1); /* 00000001 tesla UNK0F98 */
+ if (IS_NVA3F(dev_priv->chipset))
+ xf_emit(ctx, 1, 1); /* 0000001f tesla UNK169C */
+ xf_emit(ctx, 1, 0); /* 00000003 tesla UNK1668 */
+ xf_emit(ctx, 1, 0); /* 00000001 LINE_STIPPLE_ENABLE */
+ xf_emit(ctx, 1, 0x00ffff00); /* 00ffffff LINE_STIPPLE_PATTERN */
+ xf_emit(ctx, 1, 0); /* 00000001 POLYGON_SMOOTH_ENABLE */
+ xf_emit(ctx, 1, 0); /* 00000001 UNK1534 */
+ xf_emit(ctx, 1, 0); /* 7/f MULTISAMPLE_SAMPLES_LOG2 */
+ xf_emit(ctx, 1, 0); /* 00000001 tesla UNK1658 */
+ xf_emit(ctx, 1, 0); /* 00000001 LINE_SMOOTH_ENABLE */
+ xf_emit(ctx, 1, 0); /* ffff0ff3 */
+ xf_emit(ctx, 1, 0); /* 00000001 DEPTH_TEST_ENABLE */
+ xf_emit(ctx, 1, 0); /* 00000001 DEPTH_WRITE */
+ xf_emit(ctx, 1, 1); /* 00000001 UNK15B4 */
+ xf_emit(ctx, 1, 0); /* 00000001 POINT_SPRITE_ENABLE */
+ xf_emit(ctx, 1, 1); /* 00000001 tesla UNK165C */
+ xf_emit(ctx, 1, 0x30201000); /* ffffffff tesla UNK1670 */
+ xf_emit(ctx, 1, 0x70605040); /* ffffffff tesla UNK1670 */
+ xf_emit(ctx, 1, 0xb8a89888); /* ffffffff tesla UNK1670 */
+ xf_emit(ctx, 1, 0xf8e8d8c8); /* ffffffff tesla UNK1670 */
+ xf_emit(ctx, 1, 0); /* 00000001 VERTEX_TWO_SIDE_ENABLE */
+ xf_emit(ctx, 1, 0x1a); /* 0000001f POLYGON_MODE */
}
static void
@@ -2193,108 +3102,136 @@ nv50_graph_construct_xfer_tp(struct nouveau_grctx *ctx)
{
struct drm_nouveau_private *dev_priv = ctx->dev->dev_private;
if (dev_priv->chipset < 0xa0) {
- nv50_graph_construct_xfer_tp_x1(ctx);
- nv50_graph_construct_xfer_tp_x2(ctx);
- nv50_graph_construct_xfer_tp_x3(ctx);
- if (dev_priv->chipset == 0x50)
- xf_emit(ctx, 0xf, 0);
- else
- xf_emit(ctx, 0x12, 0);
- nv50_graph_construct_xfer_tp_x4(ctx);
+ nv50_graph_construct_xfer_unk84xx(ctx);
+ nv50_graph_construct_xfer_tprop(ctx);
+ nv50_graph_construct_xfer_tex(ctx);
+ nv50_graph_construct_xfer_unk8cxx(ctx);
} else {
- nv50_graph_construct_xfer_tp_x3(ctx);
- if (dev_priv->chipset < 0xaa)
- xf_emit(ctx, 0xc, 0);
- else
- xf_emit(ctx, 0xa, 0);
- nv50_graph_construct_xfer_tp_x2(ctx);
- nv50_graph_construct_xfer_tp_x5(ctx);
- nv50_graph_construct_xfer_tp_x4(ctx);
- nv50_graph_construct_xfer_tp_x1(ctx);
+ nv50_graph_construct_xfer_tex(ctx);
+ nv50_graph_construct_xfer_tprop(ctx);
+ nv50_graph_construct_xfer_unk8cxx(ctx);
+ nv50_graph_construct_xfer_unk84xx(ctx);
}
}
static void
-nv50_graph_construct_xfer_tp2(struct nouveau_grctx *ctx)
+nv50_graph_construct_xfer_mpc(struct nouveau_grctx *ctx)
{
struct drm_nouveau_private *dev_priv = ctx->dev->dev_private;
- int i, mpcnt;
- if (dev_priv->chipset == 0x98 || dev_priv->chipset == 0xaa)
- mpcnt = 1;
- else if (dev_priv->chipset < 0xa0 || dev_priv->chipset >= 0xa8)
- mpcnt = 2;
- else
- mpcnt = 3;
+ int i, mpcnt = 2;
+ switch (dev_priv->chipset) {
+ case 0x98:
+ case 0xaa:
+ mpcnt = 1;
+ break;
+ case 0x50:
+ case 0x84:
+ case 0x86:
+ case 0x92:
+ case 0x94:
+ case 0x96:
+ case 0xa8:
+ case 0xac:
+ mpcnt = 2;
+ break;
+ case 0xa0:
+ case 0xa3:
+ case 0xa5:
+ case 0xaf:
+ mpcnt = 3;
+ break;
+ }
for (i = 0; i < mpcnt; i++) {
- xf_emit(ctx, 1, 0);
- xf_emit(ctx, 1, 0x80);
- xf_emit(ctx, 1, 0x80007004);
- xf_emit(ctx, 1, 0x04000400);
+ xf_emit(ctx, 1, 0); /* ff */
+ xf_emit(ctx, 1, 0x80); /* ffffffff tesla UNK1404 */
+ xf_emit(ctx, 1, 0x80007004); /* ffffffff tesla UNK12B0 */
+ xf_emit(ctx, 1, 0x04000400); /* ffffffff */
if (dev_priv->chipset >= 0xa0)
- xf_emit(ctx, 1, 0xc0);
- xf_emit(ctx, 1, 0x1000);
- xf_emit(ctx, 2, 0);
- if (dev_priv->chipset == 0x86 || dev_priv->chipset == 0x98 || dev_priv->chipset >= 0xa8) {
- xf_emit(ctx, 1, 0xe00);
- xf_emit(ctx, 1, 0x1e00);
+ xf_emit(ctx, 1, 0xc0); /* 00007fff tesla UNK152C */
+ xf_emit(ctx, 1, 0x1000); /* 0000ffff tesla UNK0D60 */
+ xf_emit(ctx, 1, 0); /* ff/3ff */
+ xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A30 */
+ if (dev_priv->chipset == 0x86 || dev_priv->chipset == 0x98 || dev_priv->chipset == 0xa8 || IS_NVAAF(dev_priv->chipset)) {
+ xf_emit(ctx, 1, 0xe00); /* 7fff */
+ xf_emit(ctx, 1, 0x1e00); /* 7fff */
}
- xf_emit(ctx, 1, 1);
- xf_emit(ctx, 2, 0);
+ xf_emit(ctx, 1, 1); /* 000000ff VP_REG_ALLOC_TEMP */
+ xf_emit(ctx, 1, 0); /* 00000001 LINKED_TSC */
+ xf_emit(ctx, 1, 0); /* 00000001 GP_ENABLE */
if (dev_priv->chipset == 0x50)
- xf_emit(ctx, 2, 0x1000);
- xf_emit(ctx, 1, 1);
- xf_emit(ctx, 1, 0);
- xf_emit(ctx, 1, 4);
- xf_emit(ctx, 1, 2);
- if (dev_priv->chipset >= 0xaa)
- xf_emit(ctx, 0xb, 0);
+ xf_emit(ctx, 2, 0x1000); /* 7fff tesla UNK141C */
+ xf_emit(ctx, 1, 1); /* 000000ff GP_REG_ALLOC_TEMP */
+ xf_emit(ctx, 1, 0); /* 00000001 GP_ENABLE */
+ xf_emit(ctx, 1, 4); /* 000000ff FP_REG_ALLOC_TEMP */
+ xf_emit(ctx, 1, 2); /* 00000003 REG_MODE */
+ if (IS_NVAAF(dev_priv->chipset))
+ xf_emit(ctx, 0xb, 0); /* RO */
else if (dev_priv->chipset >= 0xa0)
- xf_emit(ctx, 0xc, 0);
+ xf_emit(ctx, 0xc, 0); /* RO */
else
- xf_emit(ctx, 0xa, 0);
+ xf_emit(ctx, 0xa, 0); /* RO */
}
- xf_emit(ctx, 1, 0x08100c12);
- xf_emit(ctx, 1, 0);
+ xf_emit(ctx, 1, 0x08100c12); /* 1fffffff FP_INTERPOLANT_CTRL */
+ xf_emit(ctx, 1, 0); /* ff/3ff */
if (dev_priv->chipset >= 0xa0) {
- xf_emit(ctx, 1, 0x1fe21);
+ xf_emit(ctx, 1, 0x1fe21); /* 0003ffff tesla UNK0FAC */
}
- xf_emit(ctx, 5, 0);
- xf_emit(ctx, 4, 0xffff);
- xf_emit(ctx, 1, 1);
- xf_emit(ctx, 2, 0x10001);
- xf_emit(ctx, 1, 1);
- xf_emit(ctx, 1, 0);
- xf_emit(ctx, 1, 0x1fe21);
- xf_emit(ctx, 1, 0);
- if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa)
- xf_emit(ctx, 1, 1);
- xf_emit(ctx, 4, 0);
- xf_emit(ctx, 1, 0x08100c12);
- xf_emit(ctx, 1, 4);
- xf_emit(ctx, 1, 0);
- xf_emit(ctx, 1, 2);
- xf_emit(ctx, 1, 0x11);
- xf_emit(ctx, 8, 0);
- xf_emit(ctx, 1, 0xfac6881);
- xf_emit(ctx, 1, 0);
- if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa)
- xf_emit(ctx, 1, 3);
- xf_emit(ctx, 3, 0);
- xf_emit(ctx, 1, 4);
- xf_emit(ctx, 9, 0);
- xf_emit(ctx, 1, 2);
- xf_emit(ctx, 2, 1);
- xf_emit(ctx, 1, 2);
- xf_emit(ctx, 3, 1);
- xf_emit(ctx, 1, 0);
- if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa) {
- xf_emit(ctx, 8, 2);
- xf_emit(ctx, 0x10, 1);
- xf_emit(ctx, 8, 2);
- xf_emit(ctx, 0x18, 1);
- xf_emit(ctx, 3, 0);
+ xf_emit(ctx, 3, 0); /* 7fff, 0, 0 */
+ xf_emit(ctx, 1, 0); /* 00000001 tesla UNK1534 */
+ xf_emit(ctx, 1, 0); /* 7/f MULTISAMPLE_SAMPLES_LOG2 */
+ xf_emit(ctx, 4, 0xffff); /* 0000ffff MSAA_MASK */
+ xf_emit(ctx, 1, 1); /* 00000001 LANES32 */
+ xf_emit(ctx, 1, 0x10001); /* 00ffffff BLOCK_ALLOC */
+ xf_emit(ctx, 1, 0x10001); /* ffffffff BLOCKDIM_XY */
+ xf_emit(ctx, 1, 1); /* 0000ffff BLOCKDIM_Z */
+ xf_emit(ctx, 1, 0); /* ffffffff SHARED_SIZE */
+ xf_emit(ctx, 1, 0x1fe21); /* 1ffff/3ffff[NVA0+] tesla UNk0FAC */
+ xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A34 */
+ if (IS_NVA3F(dev_priv->chipset))
+ xf_emit(ctx, 1, 1); /* 0000001f tesla UNK169C */
+ xf_emit(ctx, 1, 0); /* ff/3ff */
+ xf_emit(ctx, 1, 0); /* 1 LINKED_TSC */
+ xf_emit(ctx, 1, 0); /* ff FP_ADDRESS_HIGH */
+ xf_emit(ctx, 1, 0); /* ffffffff FP_ADDRESS_LOW */
+ xf_emit(ctx, 1, 0x08100c12); /* 1fffffff FP_INTERPOLANT_CTRL */
+ xf_emit(ctx, 1, 4); /* 00000007 FP_CONTROL */
+ xf_emit(ctx, 1, 0); /* 000000ff FRAG_COLOR_CLAMP_EN */
+ xf_emit(ctx, 1, 2); /* 00000003 REG_MODE */
+ xf_emit(ctx, 1, 0x11); /* 0000007f RT_FORMAT */
+ xf_emit(ctx, 7, 0); /* 0000007f RT_FORMAT */
+ xf_emit(ctx, 1, 0); /* 00000007 */
+ xf_emit(ctx, 1, 0xfac6881); /* 0fffffff RT_CONTROL */
+ xf_emit(ctx, 1, 0); /* 00000003 MULTISAMPLE_CTRL */
+ if (IS_NVA3F(dev_priv->chipset))
+ xf_emit(ctx, 1, 3); /* 00000003 tesla UNK16B4 */
+ xf_emit(ctx, 1, 0); /* 00000001 ALPHA_TEST_ENABLE */
+ xf_emit(ctx, 1, 0); /* 00000007 ALPHA_TEST_FUNC */
+ xf_emit(ctx, 1, 0); /* 00000001 FRAMEBUFFER_SRGB */
+ xf_emit(ctx, 1, 4); /* ffffffff tesla UNK1400 */
+ xf_emit(ctx, 8, 0); /* 00000001 BLEND_ENABLE */
+ xf_emit(ctx, 1, 0); /* 00000001 LOGIC_OP_ENABLE */
+ xf_emit(ctx, 1, 2); /* 0000001f BLEND_FUNC_SRC_RGB */
+ xf_emit(ctx, 1, 1); /* 0000001f BLEND_FUNC_DST_RGB */
+ xf_emit(ctx, 1, 1); /* 00000007 BLEND_EQUATION_RGB */
+ xf_emit(ctx, 1, 2); /* 0000001f BLEND_FUNC_SRC_ALPHA */
+ xf_emit(ctx, 1, 1); /* 0000001f BLEND_FUNC_DST_ALPHA */
+ xf_emit(ctx, 1, 1); /* 00000007 BLEND_EQUATION_ALPHA */
+ xf_emit(ctx, 1, 1); /* 00000001 UNK133C */
+ if (IS_NVA3F(dev_priv->chipset)) {
+ xf_emit(ctx, 1, 0); /* 00000001 UNK12E4 */
+ xf_emit(ctx, 8, 2); /* 0000001f IBLEND_FUNC_SRC_RGB */
+ xf_emit(ctx, 8, 1); /* 0000001f IBLEND_FUNC_DST_RGB */
+ xf_emit(ctx, 8, 1); /* 00000007 IBLEND_EQUATION_RGB */
+ xf_emit(ctx, 8, 2); /* 0000001f IBLEND_FUNC_SRC_ALPHA */
+ xf_emit(ctx, 8, 1); /* 0000001f IBLEND_FUNC_DST_ALPHA */
+ xf_emit(ctx, 8, 1); /* 00000007 IBLEND_EQUATION_ALPHA */
+ xf_emit(ctx, 8, 1); /* 00000001 IBLEND_UNK00 */
+ xf_emit(ctx, 1, 0); /* 00000003 tesla UNK1928 */
+ xf_emit(ctx, 1, 0); /* 00000001 UNK1140 */
}
- xf_emit(ctx, 1, 4);
+ xf_emit(ctx, 1, 0); /* 00000003 tesla UNK0F90 */
+ xf_emit(ctx, 1, 4); /* 000000ff FP_RESULT_COUNT */
+ /* XXX: demagic this part some day */
if (dev_priv->chipset == 0x50)
xf_emit(ctx, 0x3a0, 0);
else if (dev_priv->chipset < 0x94)
@@ -2303,9 +3240,9 @@ nv50_graph_construct_xfer_tp2(struct nouveau_grctx *ctx)
xf_emit(ctx, 0x39f, 0);
else
xf_emit(ctx, 0x3a3, 0);
- xf_emit(ctx, 1, 0x11);
- xf_emit(ctx, 1, 0);
- xf_emit(ctx, 1, 1);
+ xf_emit(ctx, 1, 0x11); /* 3f/7f DST_FORMAT */
+ xf_emit(ctx, 1, 0); /* 7 OPERATION */
+ xf_emit(ctx, 1, 1); /* 1 DST_LINEAR */
xf_emit(ctx, 0x2d, 0);
}
@@ -2323,52 +3260,56 @@ nv50_graph_construct_xfer2(struct nouveau_grctx *ctx)
if (dev_priv->chipset < 0xa0) {
for (i = 0; i < 8; i++) {
ctx->ctxvals_pos = offset + i;
+ /* that little bugger belongs to csched. No idea
+ * what it's doing here. */
if (i == 0)
- xf_emit(ctx, 1, 0x08100c12);
+ xf_emit(ctx, 1, 0x08100c12); /* FP_INTERPOLANT_CTRL */
if (units & (1 << i))
- nv50_graph_construct_xfer_tp2(ctx);
+ nv50_graph_construct_xfer_mpc(ctx);
if ((ctx->ctxvals_pos-offset)/8 > size)
size = (ctx->ctxvals_pos-offset)/8;
}
} else {
/* Strand 0: TPs 0, 1 */
ctx->ctxvals_pos = offset;
- xf_emit(ctx, 1, 0x08100c12);
+ /* that little bugger belongs to csched. No idea
+ * what it's doing here. */
+ xf_emit(ctx, 1, 0x08100c12); /* FP_INTERPOLANT_CTRL */
if (units & (1 << 0))
- nv50_graph_construct_xfer_tp2(ctx);
+ nv50_graph_construct_xfer_mpc(ctx);
if (units & (1 << 1))
- nv50_graph_construct_xfer_tp2(ctx);
+ nv50_graph_construct_xfer_mpc(ctx);
if ((ctx->ctxvals_pos-offset)/8 > size)
size = (ctx->ctxvals_pos-offset)/8;
- /* Strand 0: TPs 2, 3 */
+ /* Strand 1: TPs 2, 3 */
ctx->ctxvals_pos = offset + 1;
if (units & (1 << 2))
- nv50_graph_construct_xfer_tp2(ctx);
+ nv50_graph_construct_xfer_mpc(ctx);
if (units & (1 << 3))
- nv50_graph_construct_xfer_tp2(ctx);
+ nv50_graph_construct_xfer_mpc(ctx);
if ((ctx->ctxvals_pos-offset)/8 > size)
size = (ctx->ctxvals_pos-offset)/8;
- /* Strand 0: TPs 4, 5, 6 */
+ /* Strand 2: TPs 4, 5, 6 */
ctx->ctxvals_pos = offset + 2;
if (units & (1 << 4))
- nv50_graph_construct_xfer_tp2(ctx);
+ nv50_graph_construct_xfer_mpc(ctx);
if (units & (1 << 5))
- nv50_graph_construct_xfer_tp2(ctx);
+ nv50_graph_construct_xfer_mpc(ctx);
if (units & (1 << 6))
- nv50_graph_construct_xfer_tp2(ctx);
+ nv50_graph_construct_xfer_mpc(ctx);
if ((ctx->ctxvals_pos-offset)/8 > size)
size = (ctx->ctxvals_pos-offset)/8;
- /* Strand 0: TPs 7, 8, 9 */
+ /* Strand 3: TPs 7, 8, 9 */
ctx->ctxvals_pos = offset + 3;
if (units & (1 << 7))
- nv50_graph_construct_xfer_tp2(ctx);
+ nv50_graph_construct_xfer_mpc(ctx);
if (units & (1 << 8))
- nv50_graph_construct_xfer_tp2(ctx);
+ nv50_graph_construct_xfer_mpc(ctx);
if (units & (1 << 9))
- nv50_graph_construct_xfer_tp2(ctx);
+ nv50_graph_construct_xfer_mpc(ctx);
if ((ctx->ctxvals_pos-offset)/8 > size)
size = (ctx->ctxvals_pos-offset)/8;
}
diff --git a/drivers/gpu/drm/nouveau/nv50_instmem.c b/drivers/gpu/drm/nouveau/nv50_instmem.c
index 91ef93cf1f35..b773229b7647 100644
--- a/drivers/gpu/drm/nouveau/nv50_instmem.c
+++ b/drivers/gpu/drm/nouveau/nv50_instmem.c
@@ -32,39 +32,87 @@
struct nv50_instmem_priv {
uint32_t save1700[5]; /* 0x1700->0x1710 */
- struct nouveau_gpuobj_ref *pramin_pt;
- struct nouveau_gpuobj_ref *pramin_bar;
- struct nouveau_gpuobj_ref *fb_bar;
+ struct nouveau_gpuobj *pramin_pt;
+ struct nouveau_gpuobj *pramin_bar;
+ struct nouveau_gpuobj *fb_bar;
};
-#define NV50_INSTMEM_PAGE_SHIFT 12
-#define NV50_INSTMEM_PAGE_SIZE (1 << NV50_INSTMEM_PAGE_SHIFT)
-#define NV50_INSTMEM_PT_SIZE(a) (((a) >> 12) << 3)
+static void
+nv50_channel_del(struct nouveau_channel **pchan)
+{
+ struct nouveau_channel *chan;
-/*NOTE: - Assumes 0x1700 already covers the correct MiB of PRAMIN
- */
-#define BAR0_WI32(g, o, v) do { \
- uint32_t offset; \
- if ((g)->im_backing) { \
- offset = (g)->im_backing_start; \
- } else { \
- offset = chan->ramin->gpuobj->im_backing_start; \
- offset += (g)->im_pramin->start; \
- } \
- offset += (o); \
- nv_wr32(dev, NV_RAMIN + (offset & 0xfffff), (v)); \
-} while (0)
+ chan = *pchan;
+ *pchan = NULL;
+ if (!chan)
+ return;
+
+ nouveau_gpuobj_ref(NULL, &chan->ramfc);
+ nouveau_gpuobj_ref(NULL, &chan->vm_pd);
+ if (chan->ramin_heap.free_stack.next)
+ drm_mm_takedown(&chan->ramin_heap);
+ nouveau_gpuobj_ref(NULL, &chan->ramin);
+ kfree(chan);
+}
+
+static int
+nv50_channel_new(struct drm_device *dev, u32 size,
+ struct nouveau_channel **pchan)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ u32 pgd = (dev_priv->chipset == 0x50) ? 0x1400 : 0x0200;
+ u32 fc = (dev_priv->chipset == 0x50) ? 0x0000 : 0x4200;
+ struct nouveau_channel *chan;
+ int ret;
+
+ chan = kzalloc(sizeof(*chan), GFP_KERNEL);
+ if (!chan)
+ return -ENOMEM;
+ chan->dev = dev;
+
+ ret = nouveau_gpuobj_new(dev, NULL, size, 0x1000, 0, &chan->ramin);
+ if (ret) {
+ nv50_channel_del(&chan);
+ return ret;
+ }
+
+ ret = drm_mm_init(&chan->ramin_heap, 0x6000, chan->ramin->size);
+ if (ret) {
+ nv50_channel_del(&chan);
+ return ret;
+ }
+
+ ret = nouveau_gpuobj_new_fake(dev, chan->ramin->pinst == ~0 ? ~0 :
+ chan->ramin->pinst + pgd,
+ chan->ramin->vinst + pgd,
+ 0x4000, NVOBJ_FLAG_ZERO_ALLOC,
+ &chan->vm_pd);
+ if (ret) {
+ nv50_channel_del(&chan);
+ return ret;
+ }
+
+ ret = nouveau_gpuobj_new_fake(dev, chan->ramin->pinst == ~0 ? ~0 :
+ chan->ramin->pinst + fc,
+ chan->ramin->vinst + fc, 0x100,
+ NVOBJ_FLAG_ZERO_ALLOC, &chan->ramfc);
+ if (ret) {
+ nv50_channel_del(&chan);
+ return ret;
+ }
+
+ *pchan = chan;
+ return 0;
+}
int
nv50_instmem_init(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_channel *chan;
- uint32_t c_offset, c_size, c_ramfc, c_vmpd, c_base, pt_size;
- uint32_t save_nv001700;
- uint64_t v;
struct nv50_instmem_priv *priv;
+ struct nouveau_channel *chan;
int ret, i;
+ u32 tmp;
priv = kzalloc(sizeof(*priv), GFP_KERNEL);
if (!priv)
@@ -75,212 +123,115 @@ nv50_instmem_init(struct drm_device *dev)
for (i = 0x1700; i <= 0x1710; i += 4)
priv->save1700[(i-0x1700)/4] = nv_rd32(dev, i);
- /* Reserve the last MiB of VRAM, we should probably try to avoid
- * setting up the below tables over the top of the VBIOS image at
- * some point.
- */
- dev_priv->ramin_rsvd_vram = 1 << 20;
- c_offset = dev_priv->vram_size - dev_priv->ramin_rsvd_vram;
- c_size = 128 << 10;
- c_vmpd = ((dev_priv->chipset & 0xf0) == 0x50) ? 0x1400 : 0x200;
- c_ramfc = ((dev_priv->chipset & 0xf0) == 0x50) ? 0x0 : 0x20;
- c_base = c_vmpd + 0x4000;
- pt_size = NV50_INSTMEM_PT_SIZE(dev_priv->ramin_size);
-
- NV_DEBUG(dev, " Rsvd VRAM base: 0x%08x\n", c_offset);
- NV_DEBUG(dev, " VBIOS image: 0x%08x\n",
- (nv_rd32(dev, 0x619f04) & ~0xff) << 8);
- NV_DEBUG(dev, " Aperture size: %d MiB\n", dev_priv->ramin_size >> 20);
- NV_DEBUG(dev, " PT size: %d KiB\n", pt_size >> 10);
-
- /* Determine VM layout, we need to do this first to make sure
- * we allocate enough memory for all the page tables.
- */
- dev_priv->vm_gart_base = roundup(NV50_VM_BLOCK, NV50_VM_BLOCK);
- dev_priv->vm_gart_size = NV50_VM_BLOCK;
-
- dev_priv->vm_vram_base = dev_priv->vm_gart_base + dev_priv->vm_gart_size;
- dev_priv->vm_vram_size = dev_priv->vram_size;
- if (dev_priv->vm_vram_size > NV50_VM_MAX_VRAM)
- dev_priv->vm_vram_size = NV50_VM_MAX_VRAM;
- dev_priv->vm_vram_size = roundup(dev_priv->vm_vram_size, NV50_VM_BLOCK);
- dev_priv->vm_vram_pt_nr = dev_priv->vm_vram_size / NV50_VM_BLOCK;
-
- dev_priv->vm_end = dev_priv->vm_vram_base + dev_priv->vm_vram_size;
-
- NV_DEBUG(dev, "NV50VM: GART 0x%016llx-0x%016llx\n",
- dev_priv->vm_gart_base,
- dev_priv->vm_gart_base + dev_priv->vm_gart_size - 1);
- NV_DEBUG(dev, "NV50VM: VRAM 0x%016llx-0x%016llx\n",
- dev_priv->vm_vram_base,
- dev_priv->vm_vram_base + dev_priv->vm_vram_size - 1);
-
- c_size += dev_priv->vm_vram_pt_nr * (NV50_VM_BLOCK / 65536 * 8);
-
- /* Map BAR0 PRAMIN aperture over the memory we want to use */
- save_nv001700 = nv_rd32(dev, NV50_PUNK_BAR0_PRAMIN);
- nv_wr32(dev, NV50_PUNK_BAR0_PRAMIN, (c_offset >> 16));
-
- /* Create a fake channel, and use it as our "dummy" channels 0/127.
- * The main reason for creating a channel is so we can use the gpuobj
- * code. However, it's probably worth noting that NVIDIA also setup
- * their channels 0/127 with the same values they configure here.
- * So, there may be some other reason for doing this.
- *
- * Have to create the entire channel manually, as the real channel
- * creation code assumes we have PRAMIN access, and we don't until
- * we're done here.
- */
- chan = kzalloc(sizeof(*chan), GFP_KERNEL);
- if (!chan)
+ /* Global PRAMIN heap */
+ ret = drm_mm_init(&dev_priv->ramin_heap, 0, dev_priv->ramin_size);
+ if (ret) {
+ NV_ERROR(dev, "Failed to init RAMIN heap\n");
return -ENOMEM;
- chan->id = 0;
- chan->dev = dev;
- chan->file_priv = (struct drm_file *)-2;
- dev_priv->fifos[0] = dev_priv->fifos[127] = chan;
-
- INIT_LIST_HEAD(&chan->ramht_refs);
+ }
- /* Channel's PRAMIN object + heap */
- ret = nouveau_gpuobj_new_fake(dev, 0, c_offset, c_size, 0,
- NULL, &chan->ramin);
+ /* we need a channel to plug into the hw to control the BARs */
+ ret = nv50_channel_new(dev, 128*1024, &dev_priv->fifos[0]);
if (ret)
return ret;
+ chan = dev_priv->fifos[127] = dev_priv->fifos[0];
- if (drm_mm_init(&chan->ramin_heap, c_base, c_size - c_base))
- return -ENOMEM;
-
- /* RAMFC + zero channel's PRAMIN up to start of VM pagedir */
- ret = nouveau_gpuobj_new_fake(dev, c_ramfc, c_offset + c_ramfc,
- 0x4000, 0, NULL, &chan->ramfc);
+ /* allocate page table for PRAMIN BAR */
+ ret = nouveau_gpuobj_new(dev, chan, (dev_priv->ramin_size >> 12) * 8,
+ 0x1000, NVOBJ_FLAG_ZERO_ALLOC,
+ &priv->pramin_pt);
if (ret)
return ret;
- for (i = 0; i < c_vmpd; i += 4)
- BAR0_WI32(chan->ramin->gpuobj, i, 0);
+ nv_wo32(chan->vm_pd, 0x0000, priv->pramin_pt->vinst | 0x63);
+ nv_wo32(chan->vm_pd, 0x0004, 0);
- /* VM page directory */
- ret = nouveau_gpuobj_new_fake(dev, c_vmpd, c_offset + c_vmpd,
- 0x4000, 0, &chan->vm_pd, NULL);
+ /* DMA object for PRAMIN BAR */
+ ret = nouveau_gpuobj_new(dev, chan, 6*4, 16, 0, &priv->pramin_bar);
if (ret)
return ret;
- for (i = 0; i < 0x4000; i += 8) {
- BAR0_WI32(chan->vm_pd, i + 0x00, 0x00000000);
- BAR0_WI32(chan->vm_pd, i + 0x04, 0x00000000);
- }
-
- /* PRAMIN page table, cheat and map into VM at 0x0000000000.
- * We map the entire fake channel into the start of the PRAMIN BAR
- */
- ret = nouveau_gpuobj_new_ref(dev, chan, NULL, 0, pt_size, 0x1000,
- 0, &priv->pramin_pt);
+ nv_wo32(priv->pramin_bar, 0x00, 0x7fc00000);
+ nv_wo32(priv->pramin_bar, 0x04, dev_priv->ramin_size - 1);
+ nv_wo32(priv->pramin_bar, 0x08, 0x00000000);
+ nv_wo32(priv->pramin_bar, 0x0c, 0x00000000);
+ nv_wo32(priv->pramin_bar, 0x10, 0x00000000);
+ nv_wo32(priv->pramin_bar, 0x14, 0x00000000);
+
+ /* map channel into PRAMIN, gpuobj didn't do it for us */
+ ret = nv50_instmem_bind(dev, chan->ramin);
if (ret)
return ret;
- v = c_offset | 1;
- if (dev_priv->vram_sys_base) {
- v += dev_priv->vram_sys_base;
- v |= 0x30;
- }
+ /* poke regs... */
+ nv_wr32(dev, 0x001704, 0x00000000 | (chan->ramin->vinst >> 12));
+ nv_wr32(dev, 0x001704, 0x40000000 | (chan->ramin->vinst >> 12));
+ nv_wr32(dev, 0x00170c, 0x80000000 | (priv->pramin_bar->cinst >> 4));
- i = 0;
- while (v < dev_priv->vram_sys_base + c_offset + c_size) {
- BAR0_WI32(priv->pramin_pt->gpuobj, i + 0, lower_32_bits(v));
- BAR0_WI32(priv->pramin_pt->gpuobj, i + 4, upper_32_bits(v));
- v += 0x1000;
- i += 8;
+ tmp = nv_ri32(dev, 0);
+ nv_wi32(dev, 0, ~tmp);
+ if (nv_ri32(dev, 0) != ~tmp) {
+ NV_ERROR(dev, "PRAMIN readback failed\n");
+ return -EIO;
}
+ nv_wi32(dev, 0, tmp);
- while (i < pt_size) {
- BAR0_WI32(priv->pramin_pt->gpuobj, i + 0, 0x00000000);
- BAR0_WI32(priv->pramin_pt->gpuobj, i + 4, 0x00000000);
- i += 8;
- }
+ dev_priv->ramin_available = true;
- BAR0_WI32(chan->vm_pd, 0x00, priv->pramin_pt->instance | 0x63);
- BAR0_WI32(chan->vm_pd, 0x04, 0x00000000);
+ /* Determine VM layout */
+ dev_priv->vm_gart_base = roundup(NV50_VM_BLOCK, NV50_VM_BLOCK);
+ dev_priv->vm_gart_size = NV50_VM_BLOCK;
+
+ dev_priv->vm_vram_base = dev_priv->vm_gart_base + dev_priv->vm_gart_size;
+ dev_priv->vm_vram_size = dev_priv->vram_size;
+ if (dev_priv->vm_vram_size > NV50_VM_MAX_VRAM)
+ dev_priv->vm_vram_size = NV50_VM_MAX_VRAM;
+ dev_priv->vm_vram_size = roundup(dev_priv->vm_vram_size, NV50_VM_BLOCK);
+ dev_priv->vm_vram_pt_nr = dev_priv->vm_vram_size / NV50_VM_BLOCK;
+
+ dev_priv->vm_end = dev_priv->vm_vram_base + dev_priv->vm_vram_size;
+
+ NV_DEBUG(dev, "NV50VM: GART 0x%016llx-0x%016llx\n",
+ dev_priv->vm_gart_base,
+ dev_priv->vm_gart_base + dev_priv->vm_gart_size - 1);
+ NV_DEBUG(dev, "NV50VM: VRAM 0x%016llx-0x%016llx\n",
+ dev_priv->vm_vram_base,
+ dev_priv->vm_vram_base + dev_priv->vm_vram_size - 1);
/* VRAM page table(s), mapped into VM at +1GiB */
for (i = 0; i < dev_priv->vm_vram_pt_nr; i++) {
- ret = nouveau_gpuobj_new_ref(dev, chan, NULL, 0,
- NV50_VM_BLOCK/65536*8, 0, 0,
- &chan->vm_vram_pt[i]);
+ ret = nouveau_gpuobj_new(dev, NULL, NV50_VM_BLOCK / 0x10000 * 8,
+ 0, NVOBJ_FLAG_ZERO_ALLOC,
+ &chan->vm_vram_pt[i]);
if (ret) {
- NV_ERROR(dev, "Error creating VRAM page tables: %d\n",
- ret);
+ NV_ERROR(dev, "Error creating VRAM PGT: %d\n", ret);
dev_priv->vm_vram_pt_nr = i;
return ret;
}
- dev_priv->vm_vram_pt[i] = chan->vm_vram_pt[i]->gpuobj;
+ dev_priv->vm_vram_pt[i] = chan->vm_vram_pt[i];
- for (v = 0; v < dev_priv->vm_vram_pt[i]->im_pramin->size;
- v += 4)
- BAR0_WI32(dev_priv->vm_vram_pt[i], v, 0);
-
- BAR0_WI32(chan->vm_pd, 0x10 + (i*8),
- chan->vm_vram_pt[i]->instance | 0x61);
- BAR0_WI32(chan->vm_pd, 0x14 + (i*8), 0);
+ nv_wo32(chan->vm_pd, 0x10 + (i*8),
+ chan->vm_vram_pt[i]->vinst | 0x61);
+ nv_wo32(chan->vm_pd, 0x14 + (i*8), 0);
}
- /* DMA object for PRAMIN BAR */
- ret = nouveau_gpuobj_new_ref(dev, chan, chan, 0, 6*4, 16, 0,
- &priv->pramin_bar);
- if (ret)
- return ret;
- BAR0_WI32(priv->pramin_bar->gpuobj, 0x00, 0x7fc00000);
- BAR0_WI32(priv->pramin_bar->gpuobj, 0x04, dev_priv->ramin_size - 1);
- BAR0_WI32(priv->pramin_bar->gpuobj, 0x08, 0x00000000);
- BAR0_WI32(priv->pramin_bar->gpuobj, 0x0c, 0x00000000);
- BAR0_WI32(priv->pramin_bar->gpuobj, 0x10, 0x00000000);
- BAR0_WI32(priv->pramin_bar->gpuobj, 0x14, 0x00000000);
-
/* DMA object for FB BAR */
- ret = nouveau_gpuobj_new_ref(dev, chan, chan, 0, 6*4, 16, 0,
- &priv->fb_bar);
+ ret = nouveau_gpuobj_new(dev, chan, 6*4, 16, 0, &priv->fb_bar);
if (ret)
return ret;
- BAR0_WI32(priv->fb_bar->gpuobj, 0x00, 0x7fc00000);
- BAR0_WI32(priv->fb_bar->gpuobj, 0x04, 0x40000000 +
- pci_resource_len(dev->pdev, 1) - 1);
- BAR0_WI32(priv->fb_bar->gpuobj, 0x08, 0x40000000);
- BAR0_WI32(priv->fb_bar->gpuobj, 0x0c, 0x00000000);
- BAR0_WI32(priv->fb_bar->gpuobj, 0x10, 0x00000000);
- BAR0_WI32(priv->fb_bar->gpuobj, 0x14, 0x00000000);
+ nv_wo32(priv->fb_bar, 0x00, 0x7fc00000);
+ nv_wo32(priv->fb_bar, 0x04, 0x40000000 +
+ pci_resource_len(dev->pdev, 1) - 1);
+ nv_wo32(priv->fb_bar, 0x08, 0x40000000);
+ nv_wo32(priv->fb_bar, 0x0c, 0x00000000);
+ nv_wo32(priv->fb_bar, 0x10, 0x00000000);
+ nv_wo32(priv->fb_bar, 0x14, 0x00000000);
- /* Poke the relevant regs, and pray it works :) */
- nv_wr32(dev, NV50_PUNK_BAR_CFG_BASE, (chan->ramin->instance >> 12));
- nv_wr32(dev, NV50_PUNK_UNK1710, 0);
- nv_wr32(dev, NV50_PUNK_BAR_CFG_BASE, (chan->ramin->instance >> 12) |
- NV50_PUNK_BAR_CFG_BASE_VALID);
- nv_wr32(dev, NV50_PUNK_BAR1_CTXDMA, (priv->fb_bar->instance >> 4) |
- NV50_PUNK_BAR1_CTXDMA_VALID);
- nv_wr32(dev, NV50_PUNK_BAR3_CTXDMA, (priv->pramin_bar->instance >> 4) |
- NV50_PUNK_BAR3_CTXDMA_VALID);
+ dev_priv->engine.instmem.flush(dev);
+ nv_wr32(dev, 0x001708, 0x80000000 | (priv->fb_bar->cinst >> 4));
for (i = 0; i < 8; i++)
nv_wr32(dev, 0x1900 + (i*4), 0);
- /* Assume that praying isn't enough, check that we can re-read the
- * entire fake channel back from the PRAMIN BAR */
- for (i = 0; i < c_size; i += 4) {
- if (nv_rd32(dev, NV_RAMIN + i) != nv_ri32(dev, i)) {
- NV_ERROR(dev, "Error reading back PRAMIN at 0x%08x\n",
- i);
- return -EINVAL;
- }
- }
-
- nv_wr32(dev, NV50_PUNK_BAR0_PRAMIN, save_nv001700);
-
- /* Global PRAMIN heap */
- if (drm_mm_init(&dev_priv->ramin_heap, c_size, dev_priv->ramin_size - c_size)) {
- NV_ERROR(dev, "Failed to init RAMIN heap\n");
- }
-
- /*XXX: incorrect, but needed to make hash func "work" */
- dev_priv->ramht_offset = 0x10000;
- dev_priv->ramht_bits = 9;
- dev_priv->ramht_size = (1 << dev_priv->ramht_bits) * 8;
return 0;
}
@@ -297,29 +248,24 @@ nv50_instmem_takedown(struct drm_device *dev)
if (!priv)
return;
+ dev_priv->ramin_available = false;
+
/* Restore state from before init */
for (i = 0x1700; i <= 0x1710; i += 4)
nv_wr32(dev, i, priv->save1700[(i - 0x1700) / 4]);
- nouveau_gpuobj_ref_del(dev, &priv->fb_bar);
- nouveau_gpuobj_ref_del(dev, &priv->pramin_bar);
- nouveau_gpuobj_ref_del(dev, &priv->pramin_pt);
+ nouveau_gpuobj_ref(NULL, &priv->fb_bar);
+ nouveau_gpuobj_ref(NULL, &priv->pramin_bar);
+ nouveau_gpuobj_ref(NULL, &priv->pramin_pt);
/* Destroy dummy channel */
if (chan) {
- for (i = 0; i < dev_priv->vm_vram_pt_nr; i++) {
- nouveau_gpuobj_ref_del(dev, &chan->vm_vram_pt[i]);
- dev_priv->vm_vram_pt[i] = NULL;
- }
+ for (i = 0; i < dev_priv->vm_vram_pt_nr; i++)
+ nouveau_gpuobj_ref(NULL, &chan->vm_vram_pt[i]);
dev_priv->vm_vram_pt_nr = 0;
- nouveau_gpuobj_del(dev, &chan->vm_pd);
- nouveau_gpuobj_ref_del(dev, &chan->ramfc);
- nouveau_gpuobj_ref_del(dev, &chan->ramin);
- drm_mm_takedown(&chan->ramin_heap);
-
- dev_priv->fifos[0] = dev_priv->fifos[127] = NULL;
- kfree(chan);
+ nv50_channel_del(&dev_priv->fifos[0]);
+ dev_priv->fifos[127] = NULL;
}
dev_priv->engine.instmem.priv = NULL;
@@ -331,14 +277,14 @@ nv50_instmem_suspend(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_channel *chan = dev_priv->fifos[0];
- struct nouveau_gpuobj *ramin = chan->ramin->gpuobj;
+ struct nouveau_gpuobj *ramin = chan->ramin;
int i;
- ramin->im_backing_suspend = vmalloc(ramin->im_pramin->size);
+ ramin->im_backing_suspend = vmalloc(ramin->size);
if (!ramin->im_backing_suspend)
return -ENOMEM;
- for (i = 0; i < ramin->im_pramin->size; i += 4)
+ for (i = 0; i < ramin->size; i += 4)
ramin->im_backing_suspend[i/4] = nv_ri32(dev, i);
return 0;
}
@@ -349,23 +295,25 @@ nv50_instmem_resume(struct drm_device *dev)
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nv50_instmem_priv *priv = dev_priv->engine.instmem.priv;
struct nouveau_channel *chan = dev_priv->fifos[0];
- struct nouveau_gpuobj *ramin = chan->ramin->gpuobj;
+ struct nouveau_gpuobj *ramin = chan->ramin;
int i;
- nv_wr32(dev, NV50_PUNK_BAR0_PRAMIN, (ramin->im_backing_start >> 16));
- for (i = 0; i < ramin->im_pramin->size; i += 4)
- BAR0_WI32(ramin, i, ramin->im_backing_suspend[i/4]);
+ dev_priv->ramin_available = false;
+ dev_priv->ramin_base = ~0;
+ for (i = 0; i < ramin->size; i += 4)
+ nv_wo32(ramin, i, ramin->im_backing_suspend[i/4]);
+ dev_priv->ramin_available = true;
vfree(ramin->im_backing_suspend);
ramin->im_backing_suspend = NULL;
/* Poke the relevant regs, and pray it works :) */
- nv_wr32(dev, NV50_PUNK_BAR_CFG_BASE, (chan->ramin->instance >> 12));
+ nv_wr32(dev, NV50_PUNK_BAR_CFG_BASE, (chan->ramin->vinst >> 12));
nv_wr32(dev, NV50_PUNK_UNK1710, 0);
- nv_wr32(dev, NV50_PUNK_BAR_CFG_BASE, (chan->ramin->instance >> 12) |
+ nv_wr32(dev, NV50_PUNK_BAR_CFG_BASE, (chan->ramin->vinst >> 12) |
NV50_PUNK_BAR_CFG_BASE_VALID);
- nv_wr32(dev, NV50_PUNK_BAR1_CTXDMA, (priv->fb_bar->instance >> 4) |
+ nv_wr32(dev, NV50_PUNK_BAR1_CTXDMA, (priv->fb_bar->cinst >> 4) |
NV50_PUNK_BAR1_CTXDMA_VALID);
- nv_wr32(dev, NV50_PUNK_BAR3_CTXDMA, (priv->pramin_bar->instance >> 4) |
+ nv_wr32(dev, NV50_PUNK_BAR3_CTXDMA, (priv->pramin_bar->cinst >> 4) |
NV50_PUNK_BAR3_CTXDMA_VALID);
for (i = 0; i < 8; i++)
@@ -381,7 +329,7 @@ nv50_instmem_populate(struct drm_device *dev, struct nouveau_gpuobj *gpuobj,
if (gpuobj->im_backing)
return -EINVAL;
- *sz = ALIGN(*sz, NV50_INSTMEM_PAGE_SIZE);
+ *sz = ALIGN(*sz, 4096);
if (*sz == 0)
return -EINVAL;
@@ -399,9 +347,7 @@ nv50_instmem_populate(struct drm_device *dev, struct nouveau_gpuobj *gpuobj,
return ret;
}
- gpuobj->im_backing_start = gpuobj->im_backing->bo.mem.mm_node->start;
- gpuobj->im_backing_start <<= PAGE_SHIFT;
-
+ gpuobj->vinst = gpuobj->im_backing->bo.mem.start << PAGE_SHIFT;
return 0;
}
@@ -424,7 +370,7 @@ nv50_instmem_bind(struct drm_device *dev, struct nouveau_gpuobj *gpuobj)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nv50_instmem_priv *priv = dev_priv->engine.instmem.priv;
- struct nouveau_gpuobj *pramin_pt = priv->pramin_pt->gpuobj;
+ struct nouveau_gpuobj *pramin_pt = priv->pramin_pt;
uint32_t pte, pte_end;
uint64_t vram;
@@ -436,11 +382,11 @@ nv50_instmem_bind(struct drm_device *dev, struct nouveau_gpuobj *gpuobj)
pte = (gpuobj->im_pramin->start >> 12) << 1;
pte_end = ((gpuobj->im_pramin->size >> 12) << 1) + pte;
- vram = gpuobj->im_backing_start;
+ vram = gpuobj->vinst;
NV_DEBUG(dev, "pramin=0x%lx, pte=%d, pte_end=%d\n",
gpuobj->im_pramin->start, pte, pte_end);
- NV_DEBUG(dev, "first vram page: 0x%08x\n", gpuobj->im_backing_start);
+ NV_DEBUG(dev, "first vram page: 0x%010llx\n", gpuobj->vinst);
vram |= 1;
if (dev_priv->vram_sys_base) {
@@ -449,13 +395,13 @@ nv50_instmem_bind(struct drm_device *dev, struct nouveau_gpuobj *gpuobj)
}
while (pte < pte_end) {
- nv_wo32(dev, pramin_pt, pte++, lower_32_bits(vram));
- nv_wo32(dev, pramin_pt, pte++, upper_32_bits(vram));
- vram += NV50_INSTMEM_PAGE_SIZE;
+ nv_wo32(pramin_pt, (pte * 4) + 0, lower_32_bits(vram));
+ nv_wo32(pramin_pt, (pte * 4) + 4, upper_32_bits(vram));
+ vram += 0x1000;
+ pte += 2;
}
dev_priv->engine.instmem.flush(dev);
- nv50_vm_flush(dev, 4);
nv50_vm_flush(dev, 6);
gpuobj->im_bound = 1;
@@ -472,12 +418,17 @@ nv50_instmem_unbind(struct drm_device *dev, struct nouveau_gpuobj *gpuobj)
if (gpuobj->im_bound == 0)
return -EINVAL;
+ /* can happen during late takedown */
+ if (unlikely(!dev_priv->ramin_available))
+ return 0;
+
pte = (gpuobj->im_pramin->start >> 12) << 1;
pte_end = ((gpuobj->im_pramin->size >> 12) << 1) + pte;
while (pte < pte_end) {
- nv_wo32(dev, priv->pramin_pt->gpuobj, pte++, 0x00000000);
- nv_wo32(dev, priv->pramin_pt->gpuobj, pte++, 0x00000000);
+ nv_wo32(priv->pramin_pt, (pte * 4) + 0, 0x00000000);
+ nv_wo32(priv->pramin_pt, (pte * 4) + 4, 0x00000000);
+ pte += 2;
}
dev_priv->engine.instmem.flush(dev);
@@ -489,7 +440,7 @@ void
nv50_instmem_flush(struct drm_device *dev)
{
nv_wr32(dev, 0x00330c, 0x00000001);
- if (!nv_wait(0x00330c, 0x00000002, 0x00000000))
+ if (!nv_wait(dev, 0x00330c, 0x00000002, 0x00000000))
NV_ERROR(dev, "PRAMIN flush timeout\n");
}
@@ -497,7 +448,7 @@ void
nv84_instmem_flush(struct drm_device *dev)
{
nv_wr32(dev, 0x070000, 0x00000001);
- if (!nv_wait(0x070000, 0x00000002, 0x00000000))
+ if (!nv_wait(dev, 0x070000, 0x00000002, 0x00000000))
NV_ERROR(dev, "PRAMIN flush timeout\n");
}
@@ -505,7 +456,7 @@ void
nv50_vm_flush(struct drm_device *dev, int engine)
{
nv_wr32(dev, 0x100c80, (engine << 16) | 1);
- if (!nv_wait(0x100c80, 0x00000001, 0x00000000))
+ if (!nv_wait(dev, 0x100c80, 0x00000001, 0x00000000))
NV_ERROR(dev, "vm flush timeout: engine %d\n", engine);
}
diff --git a/drivers/gpu/drm/nouveau/nv50_pm.c b/drivers/gpu/drm/nouveau/nv50_pm.c
new file mode 100644
index 000000000000..7dbb305d7e63
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nv50_pm.c
@@ -0,0 +1,131 @@
+/*
+ * Copyright 2010 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include "drmP.h"
+#include "nouveau_drv.h"
+#include "nouveau_bios.h"
+#include "nouveau_pm.h"
+
+struct nv50_pm_state {
+ struct nouveau_pm_level *perflvl;
+ struct pll_lims pll;
+ enum pll_types type;
+ int N, M, P;
+};
+
+int
+nv50_pm_clock_get(struct drm_device *dev, u32 id)
+{
+ struct pll_lims pll;
+ int P, N, M, ret;
+ u32 reg0, reg1;
+
+ ret = get_pll_limits(dev, id, &pll);
+ if (ret)
+ return ret;
+
+ reg0 = nv_rd32(dev, pll.reg + 0);
+ reg1 = nv_rd32(dev, pll.reg + 4);
+ P = (reg0 & 0x00070000) >> 16;
+ N = (reg1 & 0x0000ff00) >> 8;
+ M = (reg1 & 0x000000ff);
+
+ return ((pll.refclk * N / M) >> P);
+}
+
+void *
+nv50_pm_clock_pre(struct drm_device *dev, struct nouveau_pm_level *perflvl,
+ u32 id, int khz)
+{
+ struct nv50_pm_state *state;
+ int dummy, ret;
+
+ state = kzalloc(sizeof(*state), GFP_KERNEL);
+ if (!state)
+ return ERR_PTR(-ENOMEM);
+ state->type = id;
+ state->perflvl = perflvl;
+
+ ret = get_pll_limits(dev, id, &state->pll);
+ if (ret < 0) {
+ kfree(state);
+ return (ret == -ENOENT) ? NULL : ERR_PTR(ret);
+ }
+
+ ret = nv50_calc_pll(dev, &state->pll, khz, &state->N, &state->M,
+ &dummy, &dummy, &state->P);
+ if (ret < 0) {
+ kfree(state);
+ return ERR_PTR(ret);
+ }
+
+ return state;
+}
+
+void
+nv50_pm_clock_set(struct drm_device *dev, void *pre_state)
+{
+ struct nv50_pm_state *state = pre_state;
+ struct nouveau_pm_level *perflvl = state->perflvl;
+ u32 reg = state->pll.reg, tmp;
+ struct bit_entry BIT_M;
+ u16 script;
+ int N = state->N;
+ int M = state->M;
+ int P = state->P;
+
+ if (state->type == PLL_MEMORY && perflvl->memscript &&
+ bit_table(dev, 'M', &BIT_M) == 0 &&
+ BIT_M.version == 1 && BIT_M.length >= 0x0b) {
+ script = ROM16(BIT_M.data[0x05]);
+ if (script)
+ nouveau_bios_run_init_table(dev, script, NULL);
+ script = ROM16(BIT_M.data[0x07]);
+ if (script)
+ nouveau_bios_run_init_table(dev, script, NULL);
+ script = ROM16(BIT_M.data[0x09]);
+ if (script)
+ nouveau_bios_run_init_table(dev, script, NULL);
+
+ nouveau_bios_run_init_table(dev, perflvl->memscript, NULL);
+ }
+
+ if (state->type == PLL_MEMORY) {
+ nv_wr32(dev, 0x100210, 0);
+ nv_wr32(dev, 0x1002dc, 1);
+ }
+
+ tmp = nv_rd32(dev, reg + 0) & 0xfff8ffff;
+ tmp |= 0x80000000 | (P << 16);
+ nv_wr32(dev, reg + 0, tmp);
+ nv_wr32(dev, reg + 4, (N << 8) | M);
+
+ if (state->type == PLL_MEMORY) {
+ nv_wr32(dev, 0x1002dc, 0);
+ nv_wr32(dev, 0x100210, 0x80000000);
+ }
+
+ kfree(state);
+}
+
diff --git a/drivers/gpu/drm/nouveau/nv50_sor.c b/drivers/gpu/drm/nouveau/nv50_sor.c
index bcd4cf84a7e6..b4a5ecb199f9 100644
--- a/drivers/gpu/drm/nouveau/nv50_sor.c
+++ b/drivers/gpu/drm/nouveau/nv50_sor.c
@@ -92,7 +92,7 @@ nv50_sor_dpms(struct drm_encoder *encoder, int mode)
}
/* wait for it to be done */
- if (!nv_wait(NV50_PDISPLAY_SOR_DPMS_CTRL(or),
+ if (!nv_wait(dev, NV50_PDISPLAY_SOR_DPMS_CTRL(or),
NV50_PDISPLAY_SOR_DPMS_CTRL_PENDING, 0)) {
NV_ERROR(dev, "timeout: SOR_DPMS_CTRL_PENDING(%d) == 0\n", or);
NV_ERROR(dev, "SOR_DPMS_CTRL(%d) = 0x%08x\n", or,
@@ -108,7 +108,7 @@ nv50_sor_dpms(struct drm_encoder *encoder, int mode)
nv_wr32(dev, NV50_PDISPLAY_SOR_DPMS_CTRL(or), val |
NV50_PDISPLAY_SOR_DPMS_CTRL_PENDING);
- if (!nv_wait(NV50_PDISPLAY_SOR_DPMS_STATE(or),
+ if (!nv_wait(dev, NV50_PDISPLAY_SOR_DPMS_STATE(or),
NV50_PDISPLAY_SOR_DPMS_STATE_WAIT, 0)) {
NV_ERROR(dev, "timeout: SOR_DPMS_STATE_WAIT(%d) == 0\n", or);
NV_ERROR(dev, "SOR_DPMS_STATE(%d) = 0x%08x\n", or,
diff --git a/drivers/gpu/drm/nouveau/nva3_pm.c b/drivers/gpu/drm/nouveau/nva3_pm.c
new file mode 100644
index 000000000000..dbbafed36406
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nva3_pm.c
@@ -0,0 +1,95 @@
+/*
+ * Copyright 2010 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include "drmP.h"
+#include "nouveau_drv.h"
+#include "nouveau_bios.h"
+#include "nouveau_pm.h"
+
+/*XXX: boards using limits 0x40 need fixing, the register layout
+ * is correct here, but, there's some other funny magic
+ * that modifies things, so it's not likely we'll set/read
+ * the correct timings yet.. working on it...
+ */
+
+struct nva3_pm_state {
+ struct pll_lims pll;
+ int N, M, P;
+};
+
+int
+nva3_pm_clock_get(struct drm_device *dev, u32 id)
+{
+ struct pll_lims pll;
+ int P, N, M, ret;
+ u32 reg;
+
+ ret = get_pll_limits(dev, id, &pll);
+ if (ret)
+ return ret;
+
+ reg = nv_rd32(dev, pll.reg + 4);
+ P = (reg & 0x003f0000) >> 16;
+ N = (reg & 0x0000ff00) >> 8;
+ M = (reg & 0x000000ff);
+ return pll.refclk * N / M / P;
+}
+
+void *
+nva3_pm_clock_pre(struct drm_device *dev, struct nouveau_pm_level *perflvl,
+ u32 id, int khz)
+{
+ struct nva3_pm_state *state;
+ int dummy, ret;
+
+ state = kzalloc(sizeof(*state), GFP_KERNEL);
+ if (!state)
+ return ERR_PTR(-ENOMEM);
+
+ ret = get_pll_limits(dev, id, &state->pll);
+ if (ret < 0) {
+ kfree(state);
+ return (ret == -ENOENT) ? NULL : ERR_PTR(ret);
+ }
+
+ ret = nv50_calc_pll2(dev, &state->pll, khz, &state->N, &dummy,
+ &state->M, &state->P);
+ if (ret < 0) {
+ kfree(state);
+ return ERR_PTR(ret);
+ }
+
+ return state;
+}
+
+void
+nva3_pm_clock_set(struct drm_device *dev, void *pre_state)
+{
+ struct nva3_pm_state *state = pre_state;
+ u32 reg = state->pll.reg;
+
+ nv_wr32(dev, reg + 4, (state->P << 16) | (state->N << 8) | state->M);
+ kfree(state);
+}
+
diff --git a/drivers/gpu/drm/nouveau/nvc0_fifo.c b/drivers/gpu/drm/nouveau/nvc0_fifo.c
index d64375871979..890c2b95fbc1 100644
--- a/drivers/gpu/drm/nouveau/nvc0_fifo.c
+++ b/drivers/gpu/drm/nouveau/nvc0_fifo.c
@@ -43,12 +43,6 @@ nvc0_fifo_reassign(struct drm_device *dev, bool enable)
}
bool
-nvc0_fifo_cache_flush(struct drm_device *dev)
-{
- return true;
-}
-
-bool
nvc0_fifo_cache_pull(struct drm_device *dev, bool enable)
{
return false;
diff --git a/drivers/gpu/drm/nouveau/nvc0_instmem.c b/drivers/gpu/drm/nouveau/nvc0_instmem.c
index 6b451f864783..13a0f78a9088 100644
--- a/drivers/gpu/drm/nouveau/nvc0_instmem.c
+++ b/drivers/gpu/drm/nouveau/nvc0_instmem.c
@@ -50,8 +50,7 @@ nvc0_instmem_populate(struct drm_device *dev, struct nouveau_gpuobj *gpuobj,
return ret;
}
- gpuobj->im_backing_start = gpuobj->im_backing->bo.mem.mm_node->start;
- gpuobj->im_backing_start <<= PAGE_SHIFT;
+ gpuobj->vinst = gpuobj->im_backing->bo.mem.start << PAGE_SHIFT;
return 0;
}
@@ -84,11 +83,11 @@ nvc0_instmem_bind(struct drm_device *dev, struct nouveau_gpuobj *gpuobj)
pte = gpuobj->im_pramin->start >> 12;
pte_end = (gpuobj->im_pramin->size >> 12) + pte;
- vram = gpuobj->im_backing_start;
+ vram = gpuobj->vinst;
NV_DEBUG(dev, "pramin=0x%lx, pte=%d, pte_end=%d\n",
gpuobj->im_pramin->start, pte, pte_end);
- NV_DEBUG(dev, "first vram page: 0x%08x\n", gpuobj->im_backing_start);
+ NV_DEBUG(dev, "first vram page: 0x%010llx\n", gpuobj->vinst);
while (pte < pte_end) {
nv_wr32(dev, 0x702000 + (pte * 8), (vram >> 8) | 1);
@@ -134,7 +133,7 @@ void
nvc0_instmem_flush(struct drm_device *dev)
{
nv_wr32(dev, 0x070000, 1);
- if (!nv_wait(0x070000, 0x00000002, 0x00000000))
+ if (!nv_wait(dev, 0x070000, 0x00000002, 0x00000000))
NV_ERROR(dev, "PRAMIN flush timeout\n");
}
@@ -221,10 +220,6 @@ nvc0_instmem_init(struct drm_device *dev)
return -ENOMEM;
}
- /*XXX: incorrect, but needed to make hash func "work" */
- dev_priv->ramht_offset = 0x10000;
- dev_priv->ramht_bits = 9;
- dev_priv->ramht_size = (1 << dev_priv->ramht_bits) * 8;
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/nvreg.h b/drivers/gpu/drm/nouveau/nvreg.h
index ad64673ace1f..881f8a585613 100644
--- a/drivers/gpu/drm/nouveau/nvreg.h
+++ b/drivers/gpu/drm/nouveau/nvreg.h
@@ -263,6 +263,7 @@
# define NV_CIO_CRE_HCUR_ADDR1_ADR 7:2
# define NV_CIO_CRE_LCD__INDEX 0x33
# define NV_CIO_CRE_LCD_LCD_SELECT 0:0
+# define NV_CIO_CRE_LCD_ROUTE_MASK 0x3b
# define NV_CIO_CRE_DDC0_STATUS__INDEX 0x36
# define NV_CIO_CRE_DDC0_WR__INDEX 0x37
# define NV_CIO_CRE_ILACE__INDEX 0x39 /* interlace */
diff --git a/drivers/gpu/drm/r128/r128_drv.c b/drivers/gpu/drm/r128/r128_drv.c
index d42c76c23714..18c3c71e41b1 100644
--- a/drivers/gpu/drm/r128/r128_drv.c
+++ b/drivers/gpu/drm/r128/r128_drv.c
@@ -56,8 +56,6 @@ static struct drm_driver driver = {
.irq_uninstall = r128_driver_irq_uninstall,
.irq_handler = r128_driver_irq_handler,
.reclaim_buffers = drm_core_reclaim_buffers,
- .get_map_ofs = drm_core_get_map_ofs,
- .get_reg_ofs = drm_core_get_reg_ofs,
.ioctls = r128_ioctls,
.dma_ioctl = r128_cce_buffers,
.fops = {
diff --git a/drivers/gpu/drm/radeon/Makefile b/drivers/gpu/drm/radeon/Makefile
index aebe00875041..6cae4f2028d2 100644
--- a/drivers/gpu/drm/radeon/Makefile
+++ b/drivers/gpu/drm/radeon/Makefile
@@ -65,7 +65,7 @@ radeon-y += radeon_device.o radeon_asic.o radeon_kms.o \
rs400.o rs600.o rs690.o rv515.o r520.o r600.o rv770.o radeon_test.o \
r200.o radeon_legacy_tv.o r600_cs.o r600_blit.o r600_blit_shaders.o \
r600_blit_kms.o radeon_pm.o atombios_dp.o r600_audio.o r600_hdmi.o \
- evergreen.o evergreen_cs.o
+ evergreen.o evergreen_cs.o evergreen_blit_shaders.o evergreen_blit_kms.o
radeon-$(CONFIG_COMPAT) += radeon_ioc32.o
radeon-$(CONFIG_VGA_SWITCHEROO) += radeon_atpx_handler.o
diff --git a/drivers/gpu/drm/radeon/atom.c b/drivers/gpu/drm/radeon/atom.c
index 8e421f644a54..05efb5b9f13e 100644
--- a/drivers/gpu/drm/radeon/atom.c
+++ b/drivers/gpu/drm/radeon/atom.c
@@ -112,6 +112,7 @@ static uint32_t atom_iio_execute(struct atom_context *ctx, int base,
base += 3;
break;
case ATOM_IIO_WRITE:
+ (void)ctx->card->ioreg_read(ctx->card, CU16(base + 1));
ctx->card->ioreg_write(ctx->card, CU16(base + 1), temp);
base += 3;
break;
diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c
index cd0290f946cf..df2b6f2b35f8 100644
--- a/drivers/gpu/drm/radeon/atombios_crtc.c
+++ b/drivers/gpu/drm/radeon/atombios_crtc.c
@@ -398,65 +398,76 @@ static void atombios_disable_ss(struct drm_crtc *crtc)
union atom_enable_ss {
- ENABLE_LVDS_SS_PARAMETERS legacy;
+ ENABLE_LVDS_SS_PARAMETERS lvds_ss;
+ ENABLE_LVDS_SS_PARAMETERS_V2 lvds_ss_2;
ENABLE_SPREAD_SPECTRUM_ON_PPLL_PS_ALLOCATION v1;
+ ENABLE_SPREAD_SPECTRUM_ON_PPLL_V2 v2;
};
-static void atombios_enable_ss(struct drm_crtc *crtc)
+static void atombios_crtc_program_ss(struct drm_crtc *crtc,
+ int enable,
+ int pll_id,
+ struct radeon_atom_ss *ss)
{
- struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
struct drm_device *dev = crtc->dev;
struct radeon_device *rdev = dev->dev_private;
- struct drm_encoder *encoder = NULL;
- struct radeon_encoder *radeon_encoder = NULL;
- struct radeon_encoder_atom_dig *dig = NULL;
int index = GetIndexIntoMasterTable(COMMAND, EnableSpreadSpectrumOnPPLL);
union atom_enable_ss args;
- uint16_t percentage = 0;
- uint8_t type = 0, step = 0, delay = 0, range = 0;
- /* XXX add ss support for DCE4 */
- if (ASIC_IS_DCE4(rdev))
- return;
+ memset(&args, 0, sizeof(args));
- list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
- if (encoder->crtc == crtc) {
- radeon_encoder = to_radeon_encoder(encoder);
- /* only enable spread spectrum on LVDS */
- if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) {
- dig = radeon_encoder->enc_priv;
- if (dig && dig->ss) {
- percentage = dig->ss->percentage;
- type = dig->ss->type;
- step = dig->ss->step;
- delay = dig->ss->delay;
- range = dig->ss->range;
- } else
- return;
- } else
- return;
+ if (ASIC_IS_DCE4(rdev)) {
+ args.v2.usSpreadSpectrumPercentage = cpu_to_le16(ss->percentage);
+ args.v2.ucSpreadSpectrumType = ss->type;
+ switch (pll_id) {
+ case ATOM_PPLL1:
+ args.v2.ucSpreadSpectrumType |= ATOM_PPLL_SS_TYPE_V2_P1PLL;
+ args.v2.usSpreadSpectrumAmount = ss->amount;
+ args.v2.usSpreadSpectrumStep = ss->step;
+ break;
+ case ATOM_PPLL2:
+ args.v2.ucSpreadSpectrumType |= ATOM_PPLL_SS_TYPE_V2_P2PLL;
+ args.v2.usSpreadSpectrumAmount = ss->amount;
+ args.v2.usSpreadSpectrumStep = ss->step;
break;
+ case ATOM_DCPLL:
+ args.v2.ucSpreadSpectrumType |= ATOM_PPLL_SS_TYPE_V2_DCPLL;
+ args.v2.usSpreadSpectrumAmount = 0;
+ args.v2.usSpreadSpectrumStep = 0;
+ break;
+ case ATOM_PPLL_INVALID:
+ return;
}
- }
-
- if (!radeon_encoder)
- return;
-
- memset(&args, 0, sizeof(args));
- if (ASIC_IS_AVIVO(rdev)) {
- args.v1.usSpreadSpectrumPercentage = cpu_to_le16(percentage);
- args.v1.ucSpreadSpectrumType = type;
- args.v1.ucSpreadSpectrumStep = step;
- args.v1.ucSpreadSpectrumDelay = delay;
- args.v1.ucSpreadSpectrumRange = range;
- args.v1.ucPpll = radeon_crtc->crtc_id ? ATOM_PPLL2 : ATOM_PPLL1;
- args.v1.ucEnable = ATOM_ENABLE;
+ args.v2.ucEnable = enable;
+ } else if (ASIC_IS_DCE3(rdev)) {
+ args.v1.usSpreadSpectrumPercentage = cpu_to_le16(ss->percentage);
+ args.v1.ucSpreadSpectrumType = ss->type;
+ args.v1.ucSpreadSpectrumStep = ss->step;
+ args.v1.ucSpreadSpectrumDelay = ss->delay;
+ args.v1.ucSpreadSpectrumRange = ss->range;
+ args.v1.ucPpll = pll_id;
+ args.v1.ucEnable = enable;
+ } else if (ASIC_IS_AVIVO(rdev)) {
+ if (enable == ATOM_DISABLE) {
+ atombios_disable_ss(crtc);
+ return;
+ }
+ args.lvds_ss_2.usSpreadSpectrumPercentage = cpu_to_le16(ss->percentage);
+ args.lvds_ss_2.ucSpreadSpectrumType = ss->type;
+ args.lvds_ss_2.ucSpreadSpectrumStep = ss->step;
+ args.lvds_ss_2.ucSpreadSpectrumDelay = ss->delay;
+ args.lvds_ss_2.ucSpreadSpectrumRange = ss->range;
+ args.lvds_ss_2.ucEnable = enable;
} else {
- args.legacy.usSpreadSpectrumPercentage = cpu_to_le16(percentage);
- args.legacy.ucSpreadSpectrumType = type;
- args.legacy.ucSpreadSpectrumStepSize_Delay = (step & 3) << 2;
- args.legacy.ucSpreadSpectrumStepSize_Delay |= (delay & 7) << 4;
- args.legacy.ucEnable = ATOM_ENABLE;
+ if (enable == ATOM_DISABLE) {
+ atombios_disable_ss(crtc);
+ return;
+ }
+ args.lvds_ss.usSpreadSpectrumPercentage = cpu_to_le16(ss->percentage);
+ args.lvds_ss.ucSpreadSpectrumType = ss->type;
+ args.lvds_ss.ucSpreadSpectrumStepSize_Delay = (ss->step & 3) << 2;
+ args.lvds_ss.ucSpreadSpectrumStepSize_Delay |= (ss->delay & 7) << 4;
+ args.lvds_ss.ucEnable = enable;
}
atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
}
@@ -468,7 +479,9 @@ union adjust_pixel_clock {
static u32 atombios_adjust_pll(struct drm_crtc *crtc,
struct drm_display_mode *mode,
- struct radeon_pll *pll)
+ struct radeon_pll *pll,
+ bool ss_enabled,
+ struct radeon_atom_ss *ss)
{
struct drm_device *dev = crtc->dev;
struct radeon_device *rdev = dev->dev_private;
@@ -482,19 +495,6 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc,
/* reset the pll flags */
pll->flags = 0;
- /* select the PLL algo */
- if (ASIC_IS_AVIVO(rdev)) {
- if (radeon_new_pll == 0)
- pll->algo = PLL_ALGO_LEGACY;
- else
- pll->algo = PLL_ALGO_NEW;
- } else {
- if (radeon_new_pll == 1)
- pll->algo = PLL_ALGO_NEW;
- else
- pll->algo = PLL_ALGO_LEGACY;
- }
-
if (ASIC_IS_AVIVO(rdev)) {
if ((rdev->family == CHIP_RS600) ||
(rdev->family == CHIP_RS690) ||
@@ -531,29 +531,22 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc,
}
}
+ /* use recommended ref_div for ss */
+ if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) {
+ if (ss_enabled) {
+ if (ss->refdiv) {
+ pll->flags |= RADEON_PLL_USE_REF_DIV;
+ pll->reference_div = ss->refdiv;
+ }
+ }
+ }
+
if (ASIC_IS_AVIVO(rdev)) {
/* DVO wants 2x pixel clock if the DVO chip is in 12 bit mode */
if (radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1)
adjusted_clock = mode->clock * 2;
- if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT)) {
- pll->algo = PLL_ALGO_LEGACY;
+ if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT))
pll->flags |= RADEON_PLL_PREFER_CLOSEST_LOWER;
- }
- /* There is some evidence (often anecdotal) that RV515/RV620 LVDS
- * (on some boards at least) prefers the legacy algo. I'm not
- * sure whether this should handled generically or on a
- * case-by-case quirk basis. Both algos should work fine in the
- * majority of cases.
- */
- if ((radeon_encoder->active_device & (ATOM_DEVICE_LCD_SUPPORT)) &&
- ((rdev->family == CHIP_RV515) ||
- (rdev->family == CHIP_RV620))) {
- /* allow the user to overrride just in case */
- if (radeon_new_pll == 1)
- pll->algo = PLL_ALGO_NEW;
- else
- pll->algo = PLL_ALGO_LEGACY;
- }
} else {
if (encoder->encoder_type != DRM_MODE_ENCODER_DAC)
pll->flags |= RADEON_PLL_NO_ODD_POST_DIV;
@@ -589,9 +582,9 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc,
args.v1.ucTransmitterID = radeon_encoder->encoder_id;
args.v1.ucEncodeMode = encoder_mode;
if (encoder_mode == ATOM_ENCODER_MODE_DP) {
- /* may want to enable SS on DP eventually */
- /* args.v1.ucConfig |=
- ADJUST_DISPLAY_CONFIG_SS_ENABLE;*/
+ if (ss_enabled)
+ args.v1.ucConfig |=
+ ADJUST_DISPLAY_CONFIG_SS_ENABLE;
} else if (encoder_mode == ATOM_ENCODER_MODE_LVDS) {
args.v1.ucConfig |=
ADJUST_DISPLAY_CONFIG_SS_ENABLE;
@@ -608,11 +601,10 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc,
args.v3.sInput.ucDispPllConfig = 0;
if (radeon_encoder->devices & (ATOM_DEVICE_DFP_SUPPORT)) {
struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
-
if (encoder_mode == ATOM_ENCODER_MODE_DP) {
- /* may want to enable SS on DP/eDP eventually */
- /*args.v3.sInput.ucDispPllConfig |=
- DISPPLL_CONFIG_SS_ENABLE;*/
+ if (ss_enabled)
+ args.v3.sInput.ucDispPllConfig |=
+ DISPPLL_CONFIG_SS_ENABLE;
args.v3.sInput.ucDispPllConfig |=
DISPPLL_CONFIG_COHERENT_MODE;
/* 16200 or 27000 */
@@ -632,17 +624,17 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc,
}
} else if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) {
if (encoder_mode == ATOM_ENCODER_MODE_DP) {
- /* may want to enable SS on DP/eDP eventually */
- /*args.v3.sInput.ucDispPllConfig |=
- DISPPLL_CONFIG_SS_ENABLE;*/
+ if (ss_enabled)
+ args.v3.sInput.ucDispPllConfig |=
+ DISPPLL_CONFIG_SS_ENABLE;
args.v3.sInput.ucDispPllConfig |=
DISPPLL_CONFIG_COHERENT_MODE;
/* 16200 or 27000 */
args.v3.sInput.usPixelClock = cpu_to_le16(dp_clock / 10);
} else if (encoder_mode == ATOM_ENCODER_MODE_LVDS) {
- /* want to enable SS on LVDS eventually */
- /*args.v3.sInput.ucDispPllConfig |=
- DISPPLL_CONFIG_SS_ENABLE;*/
+ if (ss_enabled)
+ args.v3.sInput.ucDispPllConfig |=
+ DISPPLL_CONFIG_SS_ENABLE;
} else {
if (mode->clock > 165000)
args.v3.sInput.ucDispPllConfig |=
@@ -816,6 +808,8 @@ static void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode
struct radeon_pll *pll;
u32 adjusted_clock;
int encoder_mode = 0;
+ struct radeon_atom_ss ss;
+ bool ss_enabled = false;
list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
if (encoder->crtc == crtc) {
@@ -842,25 +836,123 @@ static void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode
break;
}
+ if (radeon_encoder->active_device &
+ (ATOM_DEVICE_LCD_SUPPORT | ATOM_DEVICE_DFP_SUPPORT)) {
+ struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
+ struct drm_connector *connector =
+ radeon_get_connector_for_encoder(encoder);
+ struct radeon_connector *radeon_connector =
+ to_radeon_connector(connector);
+ struct radeon_connector_atom_dig *dig_connector =
+ radeon_connector->con_priv;
+ int dp_clock;
+
+ switch (encoder_mode) {
+ case ATOM_ENCODER_MODE_DP:
+ /* DP/eDP */
+ dp_clock = dig_connector->dp_clock / 10;
+ if (radeon_encoder->active_device & (ATOM_DEVICE_LCD_SUPPORT)) {
+ if (ASIC_IS_DCE4(rdev))
+ ss_enabled =
+ radeon_atombios_get_asic_ss_info(rdev, &ss,
+ dig->lcd_ss_id,
+ dp_clock);
+ else
+ ss_enabled =
+ radeon_atombios_get_ppll_ss_info(rdev, &ss,
+ dig->lcd_ss_id);
+ } else {
+ if (ASIC_IS_DCE4(rdev))
+ ss_enabled =
+ radeon_atombios_get_asic_ss_info(rdev, &ss,
+ ASIC_INTERNAL_SS_ON_DP,
+ dp_clock);
+ else {
+ if (dp_clock == 16200) {
+ ss_enabled =
+ radeon_atombios_get_ppll_ss_info(rdev, &ss,
+ ATOM_DP_SS_ID2);
+ if (!ss_enabled)
+ ss_enabled =
+ radeon_atombios_get_ppll_ss_info(rdev, &ss,
+ ATOM_DP_SS_ID1);
+ } else
+ ss_enabled =
+ radeon_atombios_get_ppll_ss_info(rdev, &ss,
+ ATOM_DP_SS_ID1);
+ }
+ }
+ break;
+ case ATOM_ENCODER_MODE_LVDS:
+ if (ASIC_IS_DCE4(rdev))
+ ss_enabled = radeon_atombios_get_asic_ss_info(rdev, &ss,
+ dig->lcd_ss_id,
+ mode->clock / 10);
+ else
+ ss_enabled = radeon_atombios_get_ppll_ss_info(rdev, &ss,
+ dig->lcd_ss_id);
+ break;
+ case ATOM_ENCODER_MODE_DVI:
+ if (ASIC_IS_DCE4(rdev))
+ ss_enabled =
+ radeon_atombios_get_asic_ss_info(rdev, &ss,
+ ASIC_INTERNAL_SS_ON_TMDS,
+ mode->clock / 10);
+ break;
+ case ATOM_ENCODER_MODE_HDMI:
+ if (ASIC_IS_DCE4(rdev))
+ ss_enabled =
+ radeon_atombios_get_asic_ss_info(rdev, &ss,
+ ASIC_INTERNAL_SS_ON_HDMI,
+ mode->clock / 10);
+ break;
+ default:
+ break;
+ }
+ }
+
/* adjust pixel clock as needed */
- adjusted_clock = atombios_adjust_pll(crtc, mode, pll);
+ adjusted_clock = atombios_adjust_pll(crtc, mode, pll, ss_enabled, &ss);
radeon_compute_pll(pll, adjusted_clock, &pll_clock, &fb_div, &frac_fb_div,
&ref_div, &post_div);
+ atombios_crtc_program_ss(crtc, ATOM_DISABLE, radeon_crtc->pll_id, &ss);
+
atombios_crtc_program_pll(crtc, radeon_crtc->crtc_id, radeon_crtc->pll_id,
encoder_mode, radeon_encoder->encoder_id, mode->clock,
ref_div, fb_div, frac_fb_div, post_div);
+ if (ss_enabled) {
+ /* calculate ss amount and step size */
+ if (ASIC_IS_DCE4(rdev)) {
+ u32 step_size;
+ u32 amount = (((fb_div * 10) + frac_fb_div) * ss.percentage) / 10000;
+ ss.amount = (amount / 10) & ATOM_PPLL_SS_AMOUNT_V2_FBDIV_MASK;
+ ss.amount |= ((amount - (ss.amount * 10)) << ATOM_PPLL_SS_AMOUNT_V2_NFRAC_SHIFT) &
+ ATOM_PPLL_SS_AMOUNT_V2_NFRAC_MASK;
+ if (ss.type & ATOM_PPLL_SS_TYPE_V2_CENTRE_SPREAD)
+ step_size = (4 * amount * ref_div * (ss.rate * 2048)) /
+ (125 * 25 * pll->reference_freq / 100);
+ else
+ step_size = (2 * amount * ref_div * (ss.rate * 2048)) /
+ (125 * 25 * pll->reference_freq / 100);
+ ss.step = step_size;
+ }
+
+ atombios_crtc_program_ss(crtc, ATOM_ENABLE, radeon_crtc->pll_id, &ss);
+ }
}
-static int evergreen_crtc_set_base(struct drm_crtc *crtc, int x, int y,
- struct drm_framebuffer *old_fb)
+static int evergreen_crtc_do_set_base(struct drm_crtc *crtc,
+ struct drm_framebuffer *fb,
+ int x, int y, int atomic)
{
struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
struct drm_device *dev = crtc->dev;
struct radeon_device *rdev = dev->dev_private;
struct radeon_framebuffer *radeon_fb;
+ struct drm_framebuffer *target_fb;
struct drm_gem_object *obj;
struct radeon_bo *rbo;
uint64_t fb_location;
@@ -868,28 +960,43 @@ static int evergreen_crtc_set_base(struct drm_crtc *crtc, int x, int y,
int r;
/* no fb bound */
- if (!crtc->fb) {
+ if (!atomic && !crtc->fb) {
DRM_DEBUG_KMS("No FB bound\n");
return 0;
}
- radeon_fb = to_radeon_framebuffer(crtc->fb);
+ if (atomic) {
+ radeon_fb = to_radeon_framebuffer(fb);
+ target_fb = fb;
+ }
+ else {
+ radeon_fb = to_radeon_framebuffer(crtc->fb);
+ target_fb = crtc->fb;
+ }
- /* Pin framebuffer & get tilling informations */
+ /* If atomic, assume fb object is pinned & idle & fenced and
+ * just update base pointers
+ */
obj = radeon_fb->obj;
rbo = obj->driver_private;
r = radeon_bo_reserve(rbo, false);
if (unlikely(r != 0))
return r;
- r = radeon_bo_pin(rbo, RADEON_GEM_DOMAIN_VRAM, &fb_location);
- if (unlikely(r != 0)) {
- radeon_bo_unreserve(rbo);
- return -EINVAL;
+
+ if (atomic)
+ fb_location = radeon_bo_gpu_offset(rbo);
+ else {
+ r = radeon_bo_pin(rbo, RADEON_GEM_DOMAIN_VRAM, &fb_location);
+ if (unlikely(r != 0)) {
+ radeon_bo_unreserve(rbo);
+ return -EINVAL;
+ }
}
+
radeon_bo_get_tiling_flags(rbo, &tiling_flags, NULL);
radeon_bo_unreserve(rbo);
- switch (crtc->fb->bits_per_pixel) {
+ switch (target_fb->bits_per_pixel) {
case 8:
fb_format = (EVERGREEN_GRPH_DEPTH(EVERGREEN_GRPH_DEPTH_8BPP) |
EVERGREEN_GRPH_FORMAT(EVERGREEN_GRPH_FORMAT_INDEXED));
@@ -909,7 +1016,7 @@ static int evergreen_crtc_set_base(struct drm_crtc *crtc, int x, int y,
break;
default:
DRM_ERROR("Unsupported screen depth %d\n",
- crtc->fb->bits_per_pixel);
+ target_fb->bits_per_pixel);
return -EINVAL;
}
@@ -955,10 +1062,10 @@ static int evergreen_crtc_set_base(struct drm_crtc *crtc, int x, int y,
WREG32(EVERGREEN_GRPH_SURFACE_OFFSET_Y + radeon_crtc->crtc_offset, 0);
WREG32(EVERGREEN_GRPH_X_START + radeon_crtc->crtc_offset, 0);
WREG32(EVERGREEN_GRPH_Y_START + radeon_crtc->crtc_offset, 0);
- WREG32(EVERGREEN_GRPH_X_END + radeon_crtc->crtc_offset, crtc->fb->width);
- WREG32(EVERGREEN_GRPH_Y_END + radeon_crtc->crtc_offset, crtc->fb->height);
+ WREG32(EVERGREEN_GRPH_X_END + radeon_crtc->crtc_offset, target_fb->width);
+ WREG32(EVERGREEN_GRPH_Y_END + radeon_crtc->crtc_offset, target_fb->height);
- fb_pitch_pixels = crtc->fb->pitch / (crtc->fb->bits_per_pixel / 8);
+ fb_pitch_pixels = target_fb->pitch / (target_fb->bits_per_pixel / 8);
WREG32(EVERGREEN_GRPH_PITCH + radeon_crtc->crtc_offset, fb_pitch_pixels);
WREG32(EVERGREEN_GRPH_ENABLE + radeon_crtc->crtc_offset, 1);
@@ -977,8 +1084,8 @@ static int evergreen_crtc_set_base(struct drm_crtc *crtc, int x, int y,
else
WREG32(EVERGREEN_DATA_FORMAT + radeon_crtc->crtc_offset, 0);
- if (old_fb && old_fb != crtc->fb) {
- radeon_fb = to_radeon_framebuffer(old_fb);
+ if (!atomic && fb && fb != crtc->fb) {
+ radeon_fb = to_radeon_framebuffer(fb);
rbo = radeon_fb->obj->driver_private;
r = radeon_bo_reserve(rbo, false);
if (unlikely(r != 0))
@@ -993,8 +1100,9 @@ static int evergreen_crtc_set_base(struct drm_crtc *crtc, int x, int y,
return 0;
}
-static int avivo_crtc_set_base(struct drm_crtc *crtc, int x, int y,
- struct drm_framebuffer *old_fb)
+static int avivo_crtc_do_set_base(struct drm_crtc *crtc,
+ struct drm_framebuffer *fb,
+ int x, int y, int atomic)
{
struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
struct drm_device *dev = crtc->dev;
@@ -1002,33 +1110,48 @@ static int avivo_crtc_set_base(struct drm_crtc *crtc, int x, int y,
struct radeon_framebuffer *radeon_fb;
struct drm_gem_object *obj;
struct radeon_bo *rbo;
+ struct drm_framebuffer *target_fb;
uint64_t fb_location;
uint32_t fb_format, fb_pitch_pixels, tiling_flags;
int r;
/* no fb bound */
- if (!crtc->fb) {
+ if (!atomic && !crtc->fb) {
DRM_DEBUG_KMS("No FB bound\n");
return 0;
}
- radeon_fb = to_radeon_framebuffer(crtc->fb);
+ if (atomic) {
+ radeon_fb = to_radeon_framebuffer(fb);
+ target_fb = fb;
+ }
+ else {
+ radeon_fb = to_radeon_framebuffer(crtc->fb);
+ target_fb = crtc->fb;
+ }
- /* Pin framebuffer & get tilling informations */
obj = radeon_fb->obj;
rbo = obj->driver_private;
r = radeon_bo_reserve(rbo, false);
if (unlikely(r != 0))
return r;
- r = radeon_bo_pin(rbo, RADEON_GEM_DOMAIN_VRAM, &fb_location);
- if (unlikely(r != 0)) {
- radeon_bo_unreserve(rbo);
- return -EINVAL;
+
+ /* If atomic, assume fb object is pinned & idle & fenced and
+ * just update base pointers
+ */
+ if (atomic)
+ fb_location = radeon_bo_gpu_offset(rbo);
+ else {
+ r = radeon_bo_pin(rbo, RADEON_GEM_DOMAIN_VRAM, &fb_location);
+ if (unlikely(r != 0)) {
+ radeon_bo_unreserve(rbo);
+ return -EINVAL;
+ }
}
radeon_bo_get_tiling_flags(rbo, &tiling_flags, NULL);
radeon_bo_unreserve(rbo);
- switch (crtc->fb->bits_per_pixel) {
+ switch (target_fb->bits_per_pixel) {
case 8:
fb_format =
AVIVO_D1GRPH_CONTROL_DEPTH_8BPP |
@@ -1052,7 +1175,7 @@ static int avivo_crtc_set_base(struct drm_crtc *crtc, int x, int y,
break;
default:
DRM_ERROR("Unsupported screen depth %d\n",
- crtc->fb->bits_per_pixel);
+ target_fb->bits_per_pixel);
return -EINVAL;
}
@@ -1093,10 +1216,10 @@ static int avivo_crtc_set_base(struct drm_crtc *crtc, int x, int y,
WREG32(AVIVO_D1GRPH_SURFACE_OFFSET_Y + radeon_crtc->crtc_offset, 0);
WREG32(AVIVO_D1GRPH_X_START + radeon_crtc->crtc_offset, 0);
WREG32(AVIVO_D1GRPH_Y_START + radeon_crtc->crtc_offset, 0);
- WREG32(AVIVO_D1GRPH_X_END + radeon_crtc->crtc_offset, crtc->fb->width);
- WREG32(AVIVO_D1GRPH_Y_END + radeon_crtc->crtc_offset, crtc->fb->height);
+ WREG32(AVIVO_D1GRPH_X_END + radeon_crtc->crtc_offset, target_fb->width);
+ WREG32(AVIVO_D1GRPH_Y_END + radeon_crtc->crtc_offset, target_fb->height);
- fb_pitch_pixels = crtc->fb->pitch / (crtc->fb->bits_per_pixel / 8);
+ fb_pitch_pixels = target_fb->pitch / (target_fb->bits_per_pixel / 8);
WREG32(AVIVO_D1GRPH_PITCH + radeon_crtc->crtc_offset, fb_pitch_pixels);
WREG32(AVIVO_D1GRPH_ENABLE + radeon_crtc->crtc_offset, 1);
@@ -1115,8 +1238,8 @@ static int avivo_crtc_set_base(struct drm_crtc *crtc, int x, int y,
else
WREG32(AVIVO_D1MODE_DATA_FORMAT + radeon_crtc->crtc_offset, 0);
- if (old_fb && old_fb != crtc->fb) {
- radeon_fb = to_radeon_framebuffer(old_fb);
+ if (!atomic && fb && fb != crtc->fb) {
+ radeon_fb = to_radeon_framebuffer(fb);
rbo = radeon_fb->obj->driver_private;
r = radeon_bo_reserve(rbo, false);
if (unlikely(r != 0))
@@ -1138,11 +1261,26 @@ int atombios_crtc_set_base(struct drm_crtc *crtc, int x, int y,
struct radeon_device *rdev = dev->dev_private;
if (ASIC_IS_DCE4(rdev))
- return evergreen_crtc_set_base(crtc, x, y, old_fb);
+ return evergreen_crtc_do_set_base(crtc, old_fb, x, y, 0);
else if (ASIC_IS_AVIVO(rdev))
- return avivo_crtc_set_base(crtc, x, y, old_fb);
+ return avivo_crtc_do_set_base(crtc, old_fb, x, y, 0);
else
- return radeon_crtc_set_base(crtc, x, y, old_fb);
+ return radeon_crtc_do_set_base(crtc, old_fb, x, y, 0);
+}
+
+int atombios_crtc_set_base_atomic(struct drm_crtc *crtc,
+ struct drm_framebuffer *fb,
+ int x, int y, enum mode_set_atomic state)
+{
+ struct drm_device *dev = crtc->dev;
+ struct radeon_device *rdev = dev->dev_private;
+
+ if (ASIC_IS_DCE4(rdev))
+ return evergreen_crtc_do_set_base(crtc, fb, x, y, 1);
+ else if (ASIC_IS_AVIVO(rdev))
+ return avivo_crtc_do_set_base(crtc, fb, x, y, 1);
+ else
+ return radeon_crtc_do_set_base(crtc, fb, x, y, 1);
}
/* properly set additional regs when using atombios */
@@ -1230,12 +1368,19 @@ int atombios_crtc_mode_set(struct drm_crtc *crtc,
}
}
- atombios_disable_ss(crtc);
/* always set DCPLL */
- if (ASIC_IS_DCE4(rdev))
+ if (ASIC_IS_DCE4(rdev)) {
+ struct radeon_atom_ss ss;
+ bool ss_enabled = radeon_atombios_get_asic_ss_info(rdev, &ss,
+ ASIC_INTERNAL_SS_ON_DCPLL,
+ rdev->clock.default_dispclk);
+ if (ss_enabled)
+ atombios_crtc_program_ss(crtc, ATOM_DISABLE, ATOM_DCPLL, &ss);
atombios_crtc_set_dcpll(crtc);
+ if (ss_enabled)
+ atombios_crtc_program_ss(crtc, ATOM_ENABLE, ATOM_DCPLL, &ss);
+ }
atombios_crtc_set_pll(crtc, adjusted_mode);
- atombios_enable_ss(crtc);
if (ASIC_IS_DCE4(rdev))
atombios_set_crtc_dtd_timing(crtc, adjusted_mode);
@@ -1311,6 +1456,7 @@ static const struct drm_crtc_helper_funcs atombios_helper_funcs = {
.mode_fixup = atombios_crtc_mode_fixup,
.mode_set = atombios_crtc_mode_set,
.mode_set_base = atombios_crtc_set_base,
+ .mode_set_base_atomic = atombios_crtc_set_base_atomic,
.prepare = atombios_crtc_prepare,
.commit = atombios_crtc_commit,
.load_lut = radeon_crtc_load_lut,
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
index 2f93d46ae69a..4dc5b4714c5a 100644
--- a/drivers/gpu/drm/radeon/evergreen.c
+++ b/drivers/gpu/drm/radeon/evergreen.c
@@ -32,6 +32,7 @@
#include "atom.h"
#include "avivod.h"
#include "evergreen_reg.h"
+#include "evergreen_blit_shaders.h"
#define EVERGREEN_PFP_UCODE_SIZE 1120
#define EVERGREEN_PM4_UCODE_SIZE 1376
@@ -284,9 +285,444 @@ void evergreen_hpd_fini(struct radeon_device *rdev)
}
}
+/* watermark setup */
+
+static u32 evergreen_line_buffer_adjust(struct radeon_device *rdev,
+ struct radeon_crtc *radeon_crtc,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *other_mode)
+{
+ u32 tmp = 0;
+ /*
+ * Line Buffer Setup
+ * There are 3 line buffers, each one shared by 2 display controllers.
+ * DC_LB_MEMORY_SPLIT controls how that line buffer is shared between
+ * the display controllers. The paritioning is done via one of four
+ * preset allocations specified in bits 2:0:
+ * first display controller
+ * 0 - first half of lb (3840 * 2)
+ * 1 - first 3/4 of lb (5760 * 2)
+ * 2 - whole lb (7680 * 2)
+ * 3 - first 1/4 of lb (1920 * 2)
+ * second display controller
+ * 4 - second half of lb (3840 * 2)
+ * 5 - second 3/4 of lb (5760 * 2)
+ * 6 - whole lb (7680 * 2)
+ * 7 - last 1/4 of lb (1920 * 2)
+ */
+ if (mode && other_mode) {
+ if (mode->hdisplay > other_mode->hdisplay) {
+ if (mode->hdisplay > 2560)
+ tmp = 1; /* 3/4 */
+ else
+ tmp = 0; /* 1/2 */
+ } else if (other_mode->hdisplay > mode->hdisplay) {
+ if (other_mode->hdisplay > 2560)
+ tmp = 3; /* 1/4 */
+ else
+ tmp = 0; /* 1/2 */
+ } else
+ tmp = 0; /* 1/2 */
+ } else if (mode)
+ tmp = 2; /* whole */
+ else if (other_mode)
+ tmp = 3; /* 1/4 */
+
+ /* second controller of the pair uses second half of the lb */
+ if (radeon_crtc->crtc_id % 2)
+ tmp += 4;
+ WREG32(DC_LB_MEMORY_SPLIT + radeon_crtc->crtc_offset, tmp);
+
+ switch (tmp) {
+ case 0:
+ case 4:
+ default:
+ return 3840 * 2;
+ case 1:
+ case 5:
+ return 5760 * 2;
+ case 2:
+ case 6:
+ return 7680 * 2;
+ case 3:
+ case 7:
+ return 1920 * 2;
+ }
+}
+
+static u32 evergreen_get_number_of_dram_channels(struct radeon_device *rdev)
+{
+ u32 tmp = RREG32(MC_SHARED_CHMAP);
+
+ switch ((tmp & NOOFCHAN_MASK) >> NOOFCHAN_SHIFT) {
+ case 0:
+ default:
+ return 1;
+ case 1:
+ return 2;
+ case 2:
+ return 4;
+ case 3:
+ return 8;
+ }
+}
+
+struct evergreen_wm_params {
+ u32 dram_channels; /* number of dram channels */
+ u32 yclk; /* bandwidth per dram data pin in kHz */
+ u32 sclk; /* engine clock in kHz */
+ u32 disp_clk; /* display clock in kHz */
+ u32 src_width; /* viewport width */
+ u32 active_time; /* active display time in ns */
+ u32 blank_time; /* blank time in ns */
+ bool interlaced; /* mode is interlaced */
+ fixed20_12 vsc; /* vertical scale ratio */
+ u32 num_heads; /* number of active crtcs */
+ u32 bytes_per_pixel; /* bytes per pixel display + overlay */
+ u32 lb_size; /* line buffer allocated to pipe */
+ u32 vtaps; /* vertical scaler taps */
+};
+
+static u32 evergreen_dram_bandwidth(struct evergreen_wm_params *wm)
+{
+ /* Calculate DRAM Bandwidth and the part allocated to display. */
+ fixed20_12 dram_efficiency; /* 0.7 */
+ fixed20_12 yclk, dram_channels, bandwidth;
+ fixed20_12 a;
+
+ a.full = dfixed_const(1000);
+ yclk.full = dfixed_const(wm->yclk);
+ yclk.full = dfixed_div(yclk, a);
+ dram_channels.full = dfixed_const(wm->dram_channels * 4);
+ a.full = dfixed_const(10);
+ dram_efficiency.full = dfixed_const(7);
+ dram_efficiency.full = dfixed_div(dram_efficiency, a);
+ bandwidth.full = dfixed_mul(dram_channels, yclk);
+ bandwidth.full = dfixed_mul(bandwidth, dram_efficiency);
+
+ return dfixed_trunc(bandwidth);
+}
+
+static u32 evergreen_dram_bandwidth_for_display(struct evergreen_wm_params *wm)
+{
+ /* Calculate DRAM Bandwidth and the part allocated to display. */
+ fixed20_12 disp_dram_allocation; /* 0.3 to 0.7 */
+ fixed20_12 yclk, dram_channels, bandwidth;
+ fixed20_12 a;
+
+ a.full = dfixed_const(1000);
+ yclk.full = dfixed_const(wm->yclk);
+ yclk.full = dfixed_div(yclk, a);
+ dram_channels.full = dfixed_const(wm->dram_channels * 4);
+ a.full = dfixed_const(10);
+ disp_dram_allocation.full = dfixed_const(3); /* XXX worse case value 0.3 */
+ disp_dram_allocation.full = dfixed_div(disp_dram_allocation, a);
+ bandwidth.full = dfixed_mul(dram_channels, yclk);
+ bandwidth.full = dfixed_mul(bandwidth, disp_dram_allocation);
+
+ return dfixed_trunc(bandwidth);
+}
+
+static u32 evergreen_data_return_bandwidth(struct evergreen_wm_params *wm)
+{
+ /* Calculate the display Data return Bandwidth */
+ fixed20_12 return_efficiency; /* 0.8 */
+ fixed20_12 sclk, bandwidth;
+ fixed20_12 a;
+
+ a.full = dfixed_const(1000);
+ sclk.full = dfixed_const(wm->sclk);
+ sclk.full = dfixed_div(sclk, a);
+ a.full = dfixed_const(10);
+ return_efficiency.full = dfixed_const(8);
+ return_efficiency.full = dfixed_div(return_efficiency, a);
+ a.full = dfixed_const(32);
+ bandwidth.full = dfixed_mul(a, sclk);
+ bandwidth.full = dfixed_mul(bandwidth, return_efficiency);
+
+ return dfixed_trunc(bandwidth);
+}
+
+static u32 evergreen_dmif_request_bandwidth(struct evergreen_wm_params *wm)
+{
+ /* Calculate the DMIF Request Bandwidth */
+ fixed20_12 disp_clk_request_efficiency; /* 0.8 */
+ fixed20_12 disp_clk, bandwidth;
+ fixed20_12 a;
+
+ a.full = dfixed_const(1000);
+ disp_clk.full = dfixed_const(wm->disp_clk);
+ disp_clk.full = dfixed_div(disp_clk, a);
+ a.full = dfixed_const(10);
+ disp_clk_request_efficiency.full = dfixed_const(8);
+ disp_clk_request_efficiency.full = dfixed_div(disp_clk_request_efficiency, a);
+ a.full = dfixed_const(32);
+ bandwidth.full = dfixed_mul(a, disp_clk);
+ bandwidth.full = dfixed_mul(bandwidth, disp_clk_request_efficiency);
+
+ return dfixed_trunc(bandwidth);
+}
+
+static u32 evergreen_available_bandwidth(struct evergreen_wm_params *wm)
+{
+ /* Calculate the Available bandwidth. Display can use this temporarily but not in average. */
+ u32 dram_bandwidth = evergreen_dram_bandwidth(wm);
+ u32 data_return_bandwidth = evergreen_data_return_bandwidth(wm);
+ u32 dmif_req_bandwidth = evergreen_dmif_request_bandwidth(wm);
+
+ return min(dram_bandwidth, min(data_return_bandwidth, dmif_req_bandwidth));
+}
+
+static u32 evergreen_average_bandwidth(struct evergreen_wm_params *wm)
+{
+ /* Calculate the display mode Average Bandwidth
+ * DisplayMode should contain the source and destination dimensions,
+ * timing, etc.
+ */
+ fixed20_12 bpp;
+ fixed20_12 line_time;
+ fixed20_12 src_width;
+ fixed20_12 bandwidth;
+ fixed20_12 a;
+
+ a.full = dfixed_const(1000);
+ line_time.full = dfixed_const(wm->active_time + wm->blank_time);
+ line_time.full = dfixed_div(line_time, a);
+ bpp.full = dfixed_const(wm->bytes_per_pixel);
+ src_width.full = dfixed_const(wm->src_width);
+ bandwidth.full = dfixed_mul(src_width, bpp);
+ bandwidth.full = dfixed_mul(bandwidth, wm->vsc);
+ bandwidth.full = dfixed_div(bandwidth, line_time);
+
+ return dfixed_trunc(bandwidth);
+}
+
+static u32 evergreen_latency_watermark(struct evergreen_wm_params *wm)
+{
+ /* First calcualte the latency in ns */
+ u32 mc_latency = 2000; /* 2000 ns. */
+ u32 available_bandwidth = evergreen_available_bandwidth(wm);
+ u32 worst_chunk_return_time = (512 * 8 * 1000) / available_bandwidth;
+ u32 cursor_line_pair_return_time = (128 * 4 * 1000) / available_bandwidth;
+ u32 dc_latency = 40000000 / wm->disp_clk; /* dc pipe latency */
+ u32 other_heads_data_return_time = ((wm->num_heads + 1) * worst_chunk_return_time) +
+ (wm->num_heads * cursor_line_pair_return_time);
+ u32 latency = mc_latency + other_heads_data_return_time + dc_latency;
+ u32 max_src_lines_per_dst_line, lb_fill_bw, line_fill_time;
+ fixed20_12 a, b, c;
+
+ if (wm->num_heads == 0)
+ return 0;
+
+ a.full = dfixed_const(2);
+ b.full = dfixed_const(1);
+ if ((wm->vsc.full > a.full) ||
+ ((wm->vsc.full > b.full) && (wm->vtaps >= 3)) ||
+ (wm->vtaps >= 5) ||
+ ((wm->vsc.full >= a.full) && wm->interlaced))
+ max_src_lines_per_dst_line = 4;
+ else
+ max_src_lines_per_dst_line = 2;
+
+ a.full = dfixed_const(available_bandwidth);
+ b.full = dfixed_const(wm->num_heads);
+ a.full = dfixed_div(a, b);
+
+ b.full = dfixed_const(1000);
+ c.full = dfixed_const(wm->disp_clk);
+ b.full = dfixed_div(c, b);
+ c.full = dfixed_const(wm->bytes_per_pixel);
+ b.full = dfixed_mul(b, c);
+
+ lb_fill_bw = min(dfixed_trunc(a), dfixed_trunc(b));
+
+ a.full = dfixed_const(max_src_lines_per_dst_line * wm->src_width * wm->bytes_per_pixel);
+ b.full = dfixed_const(1000);
+ c.full = dfixed_const(lb_fill_bw);
+ b.full = dfixed_div(c, b);
+ a.full = dfixed_div(a, b);
+ line_fill_time = dfixed_trunc(a);
+
+ if (line_fill_time < wm->active_time)
+ return latency;
+ else
+ return latency + (line_fill_time - wm->active_time);
+
+}
+
+static bool evergreen_average_bandwidth_vs_dram_bandwidth_for_display(struct evergreen_wm_params *wm)
+{
+ if (evergreen_average_bandwidth(wm) <=
+ (evergreen_dram_bandwidth_for_display(wm) / wm->num_heads))
+ return true;
+ else
+ return false;
+};
+
+static bool evergreen_average_bandwidth_vs_available_bandwidth(struct evergreen_wm_params *wm)
+{
+ if (evergreen_average_bandwidth(wm) <=
+ (evergreen_available_bandwidth(wm) / wm->num_heads))
+ return true;
+ else
+ return false;
+};
+
+static bool evergreen_check_latency_hiding(struct evergreen_wm_params *wm)
+{
+ u32 lb_partitions = wm->lb_size / wm->src_width;
+ u32 line_time = wm->active_time + wm->blank_time;
+ u32 latency_tolerant_lines;
+ u32 latency_hiding;
+ fixed20_12 a;
+
+ a.full = dfixed_const(1);
+ if (wm->vsc.full > a.full)
+ latency_tolerant_lines = 1;
+ else {
+ if (lb_partitions <= (wm->vtaps + 1))
+ latency_tolerant_lines = 1;
+ else
+ latency_tolerant_lines = 2;
+ }
+
+ latency_hiding = (latency_tolerant_lines * line_time + wm->blank_time);
+
+ if (evergreen_latency_watermark(wm) <= latency_hiding)
+ return true;
+ else
+ return false;
+}
+
+static void evergreen_program_watermarks(struct radeon_device *rdev,
+ struct radeon_crtc *radeon_crtc,
+ u32 lb_size, u32 num_heads)
+{
+ struct drm_display_mode *mode = &radeon_crtc->base.mode;
+ struct evergreen_wm_params wm;
+ u32 pixel_period;
+ u32 line_time = 0;
+ u32 latency_watermark_a = 0, latency_watermark_b = 0;
+ u32 priority_a_mark = 0, priority_b_mark = 0;
+ u32 priority_a_cnt = PRIORITY_OFF;
+ u32 priority_b_cnt = PRIORITY_OFF;
+ u32 pipe_offset = radeon_crtc->crtc_id * 16;
+ u32 tmp, arb_control3;
+ fixed20_12 a, b, c;
+
+ if (radeon_crtc->base.enabled && num_heads && mode) {
+ pixel_period = 1000000 / (u32)mode->clock;
+ line_time = min((u32)mode->crtc_htotal * pixel_period, (u32)65535);
+ priority_a_cnt = 0;
+ priority_b_cnt = 0;
+
+ wm.yclk = rdev->pm.current_mclk * 10;
+ wm.sclk = rdev->pm.current_sclk * 10;
+ wm.disp_clk = mode->clock;
+ wm.src_width = mode->crtc_hdisplay;
+ wm.active_time = mode->crtc_hdisplay * pixel_period;
+ wm.blank_time = line_time - wm.active_time;
+ wm.interlaced = false;
+ if (mode->flags & DRM_MODE_FLAG_INTERLACE)
+ wm.interlaced = true;
+ wm.vsc = radeon_crtc->vsc;
+ wm.vtaps = 1;
+ if (radeon_crtc->rmx_type != RMX_OFF)
+ wm.vtaps = 2;
+ wm.bytes_per_pixel = 4; /* XXX: get this from fb config */
+ wm.lb_size = lb_size;
+ wm.dram_channels = evergreen_get_number_of_dram_channels(rdev);
+ wm.num_heads = num_heads;
+
+ /* set for high clocks */
+ latency_watermark_a = min(evergreen_latency_watermark(&wm), (u32)65535);
+ /* set for low clocks */
+ /* wm.yclk = low clk; wm.sclk = low clk */
+ latency_watermark_b = min(evergreen_latency_watermark(&wm), (u32)65535);
+
+ /* possibly force display priority to high */
+ /* should really do this at mode validation time... */
+ if (!evergreen_average_bandwidth_vs_dram_bandwidth_for_display(&wm) ||
+ !evergreen_average_bandwidth_vs_available_bandwidth(&wm) ||
+ !evergreen_check_latency_hiding(&wm) ||
+ (rdev->disp_priority == 2)) {
+ DRM_INFO("force priority to high\n");
+ priority_a_cnt |= PRIORITY_ALWAYS_ON;
+ priority_b_cnt |= PRIORITY_ALWAYS_ON;
+ }
+
+ a.full = dfixed_const(1000);
+ b.full = dfixed_const(mode->clock);
+ b.full = dfixed_div(b, a);
+ c.full = dfixed_const(latency_watermark_a);
+ c.full = dfixed_mul(c, b);
+ c.full = dfixed_mul(c, radeon_crtc->hsc);
+ c.full = dfixed_div(c, a);
+ a.full = dfixed_const(16);
+ c.full = dfixed_div(c, a);
+ priority_a_mark = dfixed_trunc(c);
+ priority_a_cnt |= priority_a_mark & PRIORITY_MARK_MASK;
+
+ a.full = dfixed_const(1000);
+ b.full = dfixed_const(mode->clock);
+ b.full = dfixed_div(b, a);
+ c.full = dfixed_const(latency_watermark_b);
+ c.full = dfixed_mul(c, b);
+ c.full = dfixed_mul(c, radeon_crtc->hsc);
+ c.full = dfixed_div(c, a);
+ a.full = dfixed_const(16);
+ c.full = dfixed_div(c, a);
+ priority_b_mark = dfixed_trunc(c);
+ priority_b_cnt |= priority_b_mark & PRIORITY_MARK_MASK;
+ }
+
+ /* select wm A */
+ arb_control3 = RREG32(PIPE0_ARBITRATION_CONTROL3 + pipe_offset);
+ tmp = arb_control3;
+ tmp &= ~LATENCY_WATERMARK_MASK(3);
+ tmp |= LATENCY_WATERMARK_MASK(1);
+ WREG32(PIPE0_ARBITRATION_CONTROL3 + pipe_offset, tmp);
+ WREG32(PIPE0_LATENCY_CONTROL + pipe_offset,
+ (LATENCY_LOW_WATERMARK(latency_watermark_a) |
+ LATENCY_HIGH_WATERMARK(line_time)));
+ /* select wm B */
+ tmp = RREG32(PIPE0_ARBITRATION_CONTROL3 + pipe_offset);
+ tmp &= ~LATENCY_WATERMARK_MASK(3);
+ tmp |= LATENCY_WATERMARK_MASK(2);
+ WREG32(PIPE0_ARBITRATION_CONTROL3 + pipe_offset, tmp);
+ WREG32(PIPE0_LATENCY_CONTROL + pipe_offset,
+ (LATENCY_LOW_WATERMARK(latency_watermark_b) |
+ LATENCY_HIGH_WATERMARK(line_time)));
+ /* restore original selection */
+ WREG32(PIPE0_ARBITRATION_CONTROL3 + pipe_offset, arb_control3);
+
+ /* write the priority marks */
+ WREG32(PRIORITY_A_CNT + radeon_crtc->crtc_offset, priority_a_cnt);
+ WREG32(PRIORITY_B_CNT + radeon_crtc->crtc_offset, priority_b_cnt);
+
+}
+
void evergreen_bandwidth_update(struct radeon_device *rdev)
{
- /* XXX */
+ struct drm_display_mode *mode0 = NULL;
+ struct drm_display_mode *mode1 = NULL;
+ u32 num_heads = 0, lb_size;
+ int i;
+
+ radeon_update_display_priority(rdev);
+
+ for (i = 0; i < rdev->num_crtc; i++) {
+ if (rdev->mode_info.crtcs[i]->base.enabled)
+ num_heads++;
+ }
+ for (i = 0; i < rdev->num_crtc; i += 2) {
+ mode0 = &rdev->mode_info.crtcs[i]->base.mode;
+ mode1 = &rdev->mode_info.crtcs[i+1]->base.mode;
+ lb_size = evergreen_line_buffer_adjust(rdev, rdev->mode_info.crtcs[i], mode0, mode1);
+ evergreen_program_watermarks(rdev, rdev->mode_info.crtcs[i], lb_size, num_heads);
+ lb_size = evergreen_line_buffer_adjust(rdev, rdev->mode_info.crtcs[i+1], mode1, mode0);
+ evergreen_program_watermarks(rdev, rdev->mode_info.crtcs[i+1], lb_size, num_heads);
+ }
}
static int evergreen_mc_wait_for_idle(struct radeon_device *rdev)
@@ -677,7 +1113,7 @@ static int evergreen_cp_load_microcode(struct radeon_device *rdev)
static int evergreen_cp_start(struct radeon_device *rdev)
{
- int r;
+ int r, i;
uint32_t cp_me;
r = radeon_ring_lock(rdev, 7);
@@ -697,16 +1133,39 @@ static int evergreen_cp_start(struct radeon_device *rdev)
cp_me = 0xff;
WREG32(CP_ME_CNTL, cp_me);
- r = radeon_ring_lock(rdev, 4);
+ r = radeon_ring_lock(rdev, evergreen_default_size + 15);
if (r) {
DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r);
return r;
}
- /* init some VGT regs */
- radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONTEXT_REG, 2));
- radeon_ring_write(rdev, (VGT_VERTEX_REUSE_BLOCK_CNTL - PACKET3_SET_CONTEXT_REG_START) >> 2);
- radeon_ring_write(rdev, 0xe);
- radeon_ring_write(rdev, 0x10);
+
+ /* setup clear context state */
+ radeon_ring_write(rdev, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
+ radeon_ring_write(rdev, PACKET3_PREAMBLE_BEGIN_CLEAR_STATE);
+
+ for (i = 0; i < evergreen_default_size; i++)
+ radeon_ring_write(rdev, evergreen_default_state[i]);
+
+ radeon_ring_write(rdev, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
+ radeon_ring_write(rdev, PACKET3_PREAMBLE_END_CLEAR_STATE);
+
+ /* set clear context state */
+ radeon_ring_write(rdev, PACKET3(PACKET3_CLEAR_STATE, 0));
+ radeon_ring_write(rdev, 0);
+
+ /* SQ_VTX_BASE_VTX_LOC */
+ radeon_ring_write(rdev, 0xc0026f00);
+ radeon_ring_write(rdev, 0x00000000);
+ radeon_ring_write(rdev, 0x00000000);
+ radeon_ring_write(rdev, 0x00000000);
+
+ /* Clear consts */
+ radeon_ring_write(rdev, 0xc0036f00);
+ radeon_ring_write(rdev, 0x00000bc4);
+ radeon_ring_write(rdev, 0xffffffff);
+ radeon_ring_write(rdev, 0xffffffff);
+ radeon_ring_write(rdev, 0xffffffff);
+
radeon_ring_unlock_commit(rdev);
return 0;
@@ -731,7 +1190,7 @@ int evergreen_cp_resume(struct radeon_device *rdev)
/* Set ring buffer size */
rb_bufsz = drm_order(rdev->cp.ring_size / 8);
- tmp = RB_NO_UPDATE | (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz;
+ tmp = (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz;
#ifdef __BIG_ENDIAN
tmp |= BUF_SWAP_32BIT;
#endif
@@ -745,8 +1204,19 @@ int evergreen_cp_resume(struct radeon_device *rdev)
WREG32(CP_RB_CNTL, tmp | RB_RPTR_WR_ENA);
WREG32(CP_RB_RPTR_WR, 0);
WREG32(CP_RB_WPTR, 0);
- WREG32(CP_RB_RPTR_ADDR, rdev->cp.gpu_addr & 0xFFFFFFFF);
- WREG32(CP_RB_RPTR_ADDR_HI, upper_32_bits(rdev->cp.gpu_addr));
+
+ /* set the wb address wether it's enabled or not */
+ WREG32(CP_RB_RPTR_ADDR, (rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFFFFFFFC);
+ WREG32(CP_RB_RPTR_ADDR_HI, upper_32_bits(rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFF);
+ WREG32(SCRATCH_ADDR, ((rdev->wb.gpu_addr + RADEON_WB_SCRATCH_OFFSET) >> 8) & 0xFFFFFFFF);
+
+ if (rdev->wb.enabled)
+ WREG32(SCRATCH_UMSK, 0xff);
+ else {
+ tmp |= RB_NO_UPDATE;
+ WREG32(SCRATCH_UMSK, 0);
+ }
+
mdelay(1);
WREG32(CP_RB_CNTL, tmp);
@@ -1180,7 +1650,36 @@ static void evergreen_gpu_init(struct radeon_device *rdev)
}
}
- rdev->config.evergreen.tile_config = gb_addr_config;
+ /* setup tiling info dword. gb_addr_config is not adequate since it does
+ * not have bank info, so create a custom tiling dword.
+ * bits 3:0 num_pipes
+ * bits 7:4 num_banks
+ * bits 11:8 group_size
+ * bits 15:12 row_size
+ */
+ rdev->config.evergreen.tile_config = 0;
+ switch (rdev->config.evergreen.max_tile_pipes) {
+ case 1:
+ default:
+ rdev->config.evergreen.tile_config |= (0 << 0);
+ break;
+ case 2:
+ rdev->config.evergreen.tile_config |= (1 << 0);
+ break;
+ case 4:
+ rdev->config.evergreen.tile_config |= (2 << 0);
+ break;
+ case 8:
+ rdev->config.evergreen.tile_config |= (3 << 0);
+ break;
+ }
+ rdev->config.evergreen.tile_config |=
+ ((mc_arb_ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT) << 4;
+ rdev->config.evergreen.tile_config |=
+ ((mc_arb_ramcfg & BURSTLENGTH_MASK) >> BURSTLENGTH_SHIFT) << 8;
+ rdev->config.evergreen.tile_config |=
+ ((gb_addr_config & 0x30000000) >> 28) << 12;
+
WREG32(GB_BACKEND_MAP, gb_backend_map);
WREG32(GB_ADDR_CONFIG, gb_addr_config);
WREG32(DMIF_ADDR_CONFIG, gb_addr_config);
@@ -1563,7 +2062,7 @@ int evergreen_irq_set(struct radeon_device *rdev)
u32 grbm_int_cntl = 0;
if (!rdev->irq.installed) {
- WARN(1, "Can't enable IRQ/MSI because no handler is installed.\n");
+ WARN(1, "Can't enable IRQ/MSI because no handler is installed\n");
return -EINVAL;
}
/* don't enable anything if the ih is disabled */
@@ -1584,6 +2083,7 @@ int evergreen_irq_set(struct radeon_device *rdev)
if (rdev->irq.sw_int) {
DRM_DEBUG("evergreen_irq_set: sw int\n");
cp_int_cntl |= RB_INT_ENABLE;
+ cp_int_cntl |= TIME_STAMP_INT_ENABLE;
}
if (rdev->irq.crtc_vblank_int[0]) {
DRM_DEBUG("evergreen_irq_set: vblank 0\n");
@@ -1760,8 +2260,10 @@ static inline u32 evergreen_get_ih_wptr(struct radeon_device *rdev)
{
u32 wptr, tmp;
- /* XXX use writeback */
- wptr = RREG32(IH_RB_WPTR);
+ if (rdev->wb.enabled)
+ wptr = rdev->wb.wb[R600_WB_IH_WPTR_OFFSET/4];
+ else
+ wptr = RREG32(IH_RB_WPTR);
if (wptr & RB_OVERFLOW) {
/* When a ring buffer overflow happen start parsing interrupt
@@ -1822,6 +2324,7 @@ restart_ih:
case 0: /* D1 vblank */
if (disp_int & LB_D1_VBLANK_INTERRUPT) {
drm_handle_vblank(rdev->ddev, 0);
+ rdev->pm.vblank_sync = true;
wake_up(&rdev->irq.vblank_queue);
disp_int &= ~LB_D1_VBLANK_INTERRUPT;
DRM_DEBUG("IH: D1 vblank\n");
@@ -1843,6 +2346,7 @@ restart_ih:
case 0: /* D2 vblank */
if (disp_int_cont & LB_D2_VBLANK_INTERRUPT) {
drm_handle_vblank(rdev->ddev, 1);
+ rdev->pm.vblank_sync = true;
wake_up(&rdev->irq.vblank_queue);
disp_int_cont &= ~LB_D2_VBLANK_INTERRUPT;
DRM_DEBUG("IH: D2 vblank\n");
@@ -1864,6 +2368,7 @@ restart_ih:
case 0: /* D3 vblank */
if (disp_int_cont2 & LB_D3_VBLANK_INTERRUPT) {
drm_handle_vblank(rdev->ddev, 2);
+ rdev->pm.vblank_sync = true;
wake_up(&rdev->irq.vblank_queue);
disp_int_cont2 &= ~LB_D3_VBLANK_INTERRUPT;
DRM_DEBUG("IH: D3 vblank\n");
@@ -1885,6 +2390,7 @@ restart_ih:
case 0: /* D4 vblank */
if (disp_int_cont3 & LB_D4_VBLANK_INTERRUPT) {
drm_handle_vblank(rdev->ddev, 3);
+ rdev->pm.vblank_sync = true;
wake_up(&rdev->irq.vblank_queue);
disp_int_cont3 &= ~LB_D4_VBLANK_INTERRUPT;
DRM_DEBUG("IH: D4 vblank\n");
@@ -1906,6 +2412,7 @@ restart_ih:
case 0: /* D5 vblank */
if (disp_int_cont4 & LB_D5_VBLANK_INTERRUPT) {
drm_handle_vblank(rdev->ddev, 4);
+ rdev->pm.vblank_sync = true;
wake_up(&rdev->irq.vblank_queue);
disp_int_cont4 &= ~LB_D5_VBLANK_INTERRUPT;
DRM_DEBUG("IH: D5 vblank\n");
@@ -1927,6 +2434,7 @@ restart_ih:
case 0: /* D6 vblank */
if (disp_int_cont5 & LB_D6_VBLANK_INTERRUPT) {
drm_handle_vblank(rdev->ddev, 5);
+ rdev->pm.vblank_sync = true;
wake_up(&rdev->irq.vblank_queue);
disp_int_cont5 &= ~LB_D6_VBLANK_INTERRUPT;
DRM_DEBUG("IH: D6 vblank\n");
@@ -2000,6 +2508,7 @@ restart_ih:
break;
case 181: /* CP EOP event */
DRM_DEBUG("IH: CP EOP\n");
+ radeon_fence_process(rdev);
break;
case 233: /* GUI IDLE */
DRM_DEBUG("IH: CP EOP\n");
@@ -2048,26 +2557,18 @@ static int evergreen_startup(struct radeon_device *rdev)
return r;
}
evergreen_gpu_init(rdev);
-#if 0
- if (!rdev->r600_blit.shader_obj) {
- r = r600_blit_init(rdev);
- if (r) {
- DRM_ERROR("radeon: failed blitter (%d).\n", r);
- return r;
- }
- }
- r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false);
- if (unlikely(r != 0))
- return r;
- r = radeon_bo_pin(rdev->r600_blit.shader_obj, RADEON_GEM_DOMAIN_VRAM,
- &rdev->r600_blit.shader_gpu_addr);
- radeon_bo_unreserve(rdev->r600_blit.shader_obj);
+ r = evergreen_blit_init(rdev);
if (r) {
- DRM_ERROR("failed to pin blit object %d\n", r);
- return r;
+ evergreen_blit_fini(rdev);
+ rdev->asic->copy = NULL;
+ dev_warn(rdev->dev, "failed blitter (%d) falling back to memcpy\n", r);
}
-#endif
+
+ /* allocate wb buffer */
+ r = radeon_wb_init(rdev);
+ if (r)
+ return r;
/* Enable IRQ */
r = r600_irq_init(rdev);
@@ -2087,8 +2588,6 @@ static int evergreen_startup(struct radeon_device *rdev)
r = evergreen_cp_resume(rdev);
if (r)
return r;
- /* write back buffer are not vital so don't worry about failure */
- r600_wb_enable(rdev);
return 0;
}
@@ -2122,23 +2621,43 @@ int evergreen_resume(struct radeon_device *rdev)
int evergreen_suspend(struct radeon_device *rdev)
{
-#if 0
int r;
-#endif
+
/* FIXME: we should wait for ring to be empty */
r700_cp_stop(rdev);
rdev->cp.ready = false;
evergreen_irq_suspend(rdev);
- r600_wb_disable(rdev);
+ radeon_wb_disable(rdev);
evergreen_pcie_gart_disable(rdev);
-#if 0
+
/* unpin shaders bo */
r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false);
if (likely(r == 0)) {
radeon_bo_unpin(rdev->r600_blit.shader_obj);
radeon_bo_unreserve(rdev->r600_blit.shader_obj);
}
-#endif
+
+ return 0;
+}
+
+int evergreen_copy_blit(struct radeon_device *rdev,
+ uint64_t src_offset, uint64_t dst_offset,
+ unsigned num_pages, struct radeon_fence *fence)
+{
+ int r;
+
+ mutex_lock(&rdev->r600_blit.mutex);
+ rdev->r600_blit.vb_ib = NULL;
+ r = evergreen_blit_prepare_copy(rdev, num_pages * RADEON_GPU_PAGE_SIZE);
+ if (r) {
+ if (rdev->r600_blit.vb_ib)
+ radeon_ib_free(rdev, &rdev->r600_blit.vb_ib);
+ mutex_unlock(&rdev->r600_blit.mutex);
+ return r;
+ }
+ evergreen_kms_blit_copy(rdev, src_offset, dst_offset, num_pages * RADEON_GPU_PAGE_SIZE);
+ evergreen_blit_done_copy(rdev, fence);
+ mutex_unlock(&rdev->r600_blit.mutex);
return 0;
}
@@ -2246,8 +2765,8 @@ int evergreen_init(struct radeon_device *rdev)
if (r) {
dev_err(rdev->dev, "disabling GPU acceleration\n");
r700_cp_fini(rdev);
- r600_wb_fini(rdev);
r600_irq_fini(rdev);
+ radeon_wb_fini(rdev);
radeon_irq_kms_fini(rdev);
evergreen_pcie_gart_fini(rdev);
rdev->accel_working = false;
@@ -2269,10 +2788,10 @@ int evergreen_init(struct radeon_device *rdev)
void evergreen_fini(struct radeon_device *rdev)
{
- /*r600_blit_fini(rdev);*/
+ evergreen_blit_fini(rdev);
r700_cp_fini(rdev);
- r600_wb_fini(rdev);
r600_irq_fini(rdev);
+ radeon_wb_fini(rdev);
radeon_irq_kms_fini(rdev);
evergreen_pcie_gart_fini(rdev);
radeon_gem_fini(rdev);
diff --git a/drivers/gpu/drm/radeon/evergreen_blit_kms.c b/drivers/gpu/drm/radeon/evergreen_blit_kms.c
new file mode 100644
index 000000000000..e0e590110dd4
--- /dev/null
+++ b/drivers/gpu/drm/radeon/evergreen_blit_kms.c
@@ -0,0 +1,774 @@
+/*
+ * Copyright 2010 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * Alex Deucher <alexander.deucher@amd.com>
+ */
+
+#include "drmP.h"
+#include "drm.h"
+#include "radeon_drm.h"
+#include "radeon.h"
+
+#include "evergreend.h"
+#include "evergreen_blit_shaders.h"
+
+#define DI_PT_RECTLIST 0x11
+#define DI_INDEX_SIZE_16_BIT 0x0
+#define DI_SRC_SEL_AUTO_INDEX 0x2
+
+#define FMT_8 0x1
+#define FMT_5_6_5 0x8
+#define FMT_8_8_8_8 0x1a
+#define COLOR_8 0x1
+#define COLOR_5_6_5 0x8
+#define COLOR_8_8_8_8 0x1a
+
+/* emits 17 */
+static void
+set_render_target(struct radeon_device *rdev, int format,
+ int w, int h, u64 gpu_addr)
+{
+ u32 cb_color_info;
+ int pitch, slice;
+
+ h = ALIGN(h, 8);
+ if (h < 8)
+ h = 8;
+
+ cb_color_info = ((format << 2) | (1 << 24));
+ pitch = (w / 8) - 1;
+ slice = ((w * h) / 64) - 1;
+
+ radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONTEXT_REG, 15));
+ radeon_ring_write(rdev, (CB_COLOR0_BASE - PACKET3_SET_CONTEXT_REG_START) >> 2);
+ radeon_ring_write(rdev, gpu_addr >> 8);
+ radeon_ring_write(rdev, pitch);
+ radeon_ring_write(rdev, slice);
+ radeon_ring_write(rdev, 0);
+ radeon_ring_write(rdev, cb_color_info);
+ radeon_ring_write(rdev, (1 << 4));
+ radeon_ring_write(rdev, (w - 1) | ((h - 1) << 16));
+ radeon_ring_write(rdev, 0);
+ radeon_ring_write(rdev, 0);
+ radeon_ring_write(rdev, 0);
+ radeon_ring_write(rdev, 0);
+ radeon_ring_write(rdev, 0);
+ radeon_ring_write(rdev, 0);
+ radeon_ring_write(rdev, 0);
+ radeon_ring_write(rdev, 0);
+}
+
+/* emits 5dw */
+static void
+cp_set_surface_sync(struct radeon_device *rdev,
+ u32 sync_type, u32 size,
+ u64 mc_addr)
+{
+ u32 cp_coher_size;
+
+ if (size == 0xffffffff)
+ cp_coher_size = 0xffffffff;
+ else
+ cp_coher_size = ((size + 255) >> 8);
+
+ radeon_ring_write(rdev, PACKET3(PACKET3_SURFACE_SYNC, 3));
+ radeon_ring_write(rdev, sync_type);
+ radeon_ring_write(rdev, cp_coher_size);
+ radeon_ring_write(rdev, mc_addr >> 8);
+ radeon_ring_write(rdev, 10); /* poll interval */
+}
+
+/* emits 11dw + 1 surface sync = 16dw */
+static void
+set_shaders(struct radeon_device *rdev)
+{
+ u64 gpu_addr;
+
+ /* VS */
+ gpu_addr = rdev->r600_blit.shader_gpu_addr + rdev->r600_blit.vs_offset;
+ radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONTEXT_REG, 3));
+ radeon_ring_write(rdev, (SQ_PGM_START_VS - PACKET3_SET_CONTEXT_REG_START) >> 2);
+ radeon_ring_write(rdev, gpu_addr >> 8);
+ radeon_ring_write(rdev, 2);
+ radeon_ring_write(rdev, 0);
+
+ /* PS */
+ gpu_addr = rdev->r600_blit.shader_gpu_addr + rdev->r600_blit.ps_offset;
+ radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONTEXT_REG, 4));
+ radeon_ring_write(rdev, (SQ_PGM_START_PS - PACKET3_SET_CONTEXT_REG_START) >> 2);
+ radeon_ring_write(rdev, gpu_addr >> 8);
+ radeon_ring_write(rdev, 1);
+ radeon_ring_write(rdev, 0);
+ radeon_ring_write(rdev, 2);
+
+ gpu_addr = rdev->r600_blit.shader_gpu_addr + rdev->r600_blit.vs_offset;
+ cp_set_surface_sync(rdev, PACKET3_SH_ACTION_ENA, 512, gpu_addr);
+}
+
+/* emits 10 + 1 sync (5) = 15 */
+static void
+set_vtx_resource(struct radeon_device *rdev, u64 gpu_addr)
+{
+ u32 sq_vtx_constant_word2, sq_vtx_constant_word3;
+
+ /* high addr, stride */
+ sq_vtx_constant_word2 = ((upper_32_bits(gpu_addr) & 0xff) | (16 << 8));
+ /* xyzw swizzles */
+ sq_vtx_constant_word3 = (0 << 3) | (1 << 6) | (2 << 9) | (3 << 12);
+
+ radeon_ring_write(rdev, PACKET3(PACKET3_SET_RESOURCE, 8));
+ radeon_ring_write(rdev, 0x580);
+ radeon_ring_write(rdev, gpu_addr & 0xffffffff);
+ radeon_ring_write(rdev, 48 - 1); /* size */
+ radeon_ring_write(rdev, sq_vtx_constant_word2);
+ radeon_ring_write(rdev, sq_vtx_constant_word3);
+ radeon_ring_write(rdev, 0);
+ radeon_ring_write(rdev, 0);
+ radeon_ring_write(rdev, 0);
+ radeon_ring_write(rdev, SQ_TEX_VTX_VALID_BUFFER << 30);
+
+ if (rdev->family == CHIP_CEDAR)
+ cp_set_surface_sync(rdev,
+ PACKET3_TC_ACTION_ENA, 48, gpu_addr);
+ else
+ cp_set_surface_sync(rdev,
+ PACKET3_VC_ACTION_ENA, 48, gpu_addr);
+
+}
+
+/* emits 10 */
+static void
+set_tex_resource(struct radeon_device *rdev,
+ int format, int w, int h, int pitch,
+ u64 gpu_addr)
+{
+ u32 sq_tex_resource_word0, sq_tex_resource_word1;
+ u32 sq_tex_resource_word4, sq_tex_resource_word7;
+
+ if (h < 1)
+ h = 1;
+
+ sq_tex_resource_word0 = (1 << 0); /* 2D */
+ sq_tex_resource_word0 |= ((((pitch >> 3) - 1) << 6) |
+ ((w - 1) << 18));
+ sq_tex_resource_word1 = ((h - 1) << 0);
+ /* xyzw swizzles */
+ sq_tex_resource_word4 = (0 << 16) | (1 << 19) | (2 << 22) | (3 << 25);
+
+ sq_tex_resource_word7 = format | (SQ_TEX_VTX_VALID_TEXTURE << 30);
+
+ radeon_ring_write(rdev, PACKET3(PACKET3_SET_RESOURCE, 8));
+ radeon_ring_write(rdev, 0);
+ radeon_ring_write(rdev, sq_tex_resource_word0);
+ radeon_ring_write(rdev, sq_tex_resource_word1);
+ radeon_ring_write(rdev, gpu_addr >> 8);
+ radeon_ring_write(rdev, gpu_addr >> 8);
+ radeon_ring_write(rdev, sq_tex_resource_word4);
+ radeon_ring_write(rdev, 0);
+ radeon_ring_write(rdev, 0);
+ radeon_ring_write(rdev, sq_tex_resource_word7);
+}
+
+/* emits 12 */
+static void
+set_scissors(struct radeon_device *rdev, int x1, int y1,
+ int x2, int y2)
+{
+ radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONTEXT_REG, 2));
+ radeon_ring_write(rdev, (PA_SC_SCREEN_SCISSOR_TL - PACKET3_SET_CONTEXT_REG_START) >> 2);
+ radeon_ring_write(rdev, (x1 << 0) | (y1 << 16));
+ radeon_ring_write(rdev, (x2 << 0) | (y2 << 16));
+
+ radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONTEXT_REG, 2));
+ radeon_ring_write(rdev, (PA_SC_GENERIC_SCISSOR_TL - PACKET3_SET_CONTEXT_REG_START) >> 2);
+ radeon_ring_write(rdev, (x1 << 0) | (y1 << 16) | (1 << 31));
+ radeon_ring_write(rdev, (x2 << 0) | (y2 << 16));
+
+ radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONTEXT_REG, 2));
+ radeon_ring_write(rdev, (PA_SC_WINDOW_SCISSOR_TL - PACKET3_SET_CONTEXT_REG_START) >> 2);
+ radeon_ring_write(rdev, (x1 << 0) | (y1 << 16) | (1 << 31));
+ radeon_ring_write(rdev, (x2 << 0) | (y2 << 16));
+}
+
+/* emits 10 */
+static void
+draw_auto(struct radeon_device *rdev)
+{
+ radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONFIG_REG, 1));
+ radeon_ring_write(rdev, (VGT_PRIMITIVE_TYPE - PACKET3_SET_CONFIG_REG_START) >> 2);
+ radeon_ring_write(rdev, DI_PT_RECTLIST);
+
+ radeon_ring_write(rdev, PACKET3(PACKET3_INDEX_TYPE, 0));
+ radeon_ring_write(rdev, DI_INDEX_SIZE_16_BIT);
+
+ radeon_ring_write(rdev, PACKET3(PACKET3_NUM_INSTANCES, 0));
+ radeon_ring_write(rdev, 1);
+
+ radeon_ring_write(rdev, PACKET3(PACKET3_DRAW_INDEX_AUTO, 1));
+ radeon_ring_write(rdev, 3);
+ radeon_ring_write(rdev, DI_SRC_SEL_AUTO_INDEX);
+
+}
+
+/* emits 30 */
+static void
+set_default_state(struct radeon_device *rdev)
+{
+ u32 sq_config, sq_gpr_resource_mgmt_1, sq_gpr_resource_mgmt_2, sq_gpr_resource_mgmt_3;
+ u32 sq_thread_resource_mgmt, sq_thread_resource_mgmt_2;
+ u32 sq_stack_resource_mgmt_1, sq_stack_resource_mgmt_2, sq_stack_resource_mgmt_3;
+ int num_ps_gprs, num_vs_gprs, num_temp_gprs;
+ int num_gs_gprs, num_es_gprs, num_hs_gprs, num_ls_gprs;
+ int num_ps_threads, num_vs_threads, num_gs_threads, num_es_threads;
+ int num_hs_threads, num_ls_threads;
+ int num_ps_stack_entries, num_vs_stack_entries, num_gs_stack_entries, num_es_stack_entries;
+ int num_hs_stack_entries, num_ls_stack_entries;
+
+ switch (rdev->family) {
+ case CHIP_CEDAR:
+ default:
+ num_ps_gprs = 93;
+ num_vs_gprs = 46;
+ num_temp_gprs = 4;
+ num_gs_gprs = 31;
+ num_es_gprs = 31;
+ num_hs_gprs = 23;
+ num_ls_gprs = 23;
+ num_ps_threads = 96;
+ num_vs_threads = 16;
+ num_gs_threads = 16;
+ num_es_threads = 16;
+ num_hs_threads = 16;
+ num_ls_threads = 16;
+ num_ps_stack_entries = 42;
+ num_vs_stack_entries = 42;
+ num_gs_stack_entries = 42;
+ num_es_stack_entries = 42;
+ num_hs_stack_entries = 42;
+ num_ls_stack_entries = 42;
+ break;
+ case CHIP_REDWOOD:
+ num_ps_gprs = 93;
+ num_vs_gprs = 46;
+ num_temp_gprs = 4;
+ num_gs_gprs = 31;
+ num_es_gprs = 31;
+ num_hs_gprs = 23;
+ num_ls_gprs = 23;
+ num_ps_threads = 128;
+ num_vs_threads = 20;
+ num_gs_threads = 20;
+ num_es_threads = 20;
+ num_hs_threads = 20;
+ num_ls_threads = 20;
+ num_ps_stack_entries = 42;
+ num_vs_stack_entries = 42;
+ num_gs_stack_entries = 42;
+ num_es_stack_entries = 42;
+ num_hs_stack_entries = 42;
+ num_ls_stack_entries = 42;
+ break;
+ case CHIP_JUNIPER:
+ num_ps_gprs = 93;
+ num_vs_gprs = 46;
+ num_temp_gprs = 4;
+ num_gs_gprs = 31;
+ num_es_gprs = 31;
+ num_hs_gprs = 23;
+ num_ls_gprs = 23;
+ num_ps_threads = 128;
+ num_vs_threads = 20;
+ num_gs_threads = 20;
+ num_es_threads = 20;
+ num_hs_threads = 20;
+ num_ls_threads = 20;
+ num_ps_stack_entries = 85;
+ num_vs_stack_entries = 85;
+ num_gs_stack_entries = 85;
+ num_es_stack_entries = 85;
+ num_hs_stack_entries = 85;
+ num_ls_stack_entries = 85;
+ break;
+ case CHIP_CYPRESS:
+ case CHIP_HEMLOCK:
+ num_ps_gprs = 93;
+ num_vs_gprs = 46;
+ num_temp_gprs = 4;
+ num_gs_gprs = 31;
+ num_es_gprs = 31;
+ num_hs_gprs = 23;
+ num_ls_gprs = 23;
+ num_ps_threads = 128;
+ num_vs_threads = 20;
+ num_gs_threads = 20;
+ num_es_threads = 20;
+ num_hs_threads = 20;
+ num_ls_threads = 20;
+ num_ps_stack_entries = 85;
+ num_vs_stack_entries = 85;
+ num_gs_stack_entries = 85;
+ num_es_stack_entries = 85;
+ num_hs_stack_entries = 85;
+ num_ls_stack_entries = 85;
+ break;
+ }
+
+ if (rdev->family == CHIP_CEDAR)
+ sq_config = 0;
+ else
+ sq_config = VC_ENABLE;
+
+ sq_config |= (EXPORT_SRC_C |
+ CS_PRIO(0) |
+ LS_PRIO(0) |
+ HS_PRIO(0) |
+ PS_PRIO(0) |
+ VS_PRIO(1) |
+ GS_PRIO(2) |
+ ES_PRIO(3));
+
+ sq_gpr_resource_mgmt_1 = (NUM_PS_GPRS(num_ps_gprs) |
+ NUM_VS_GPRS(num_vs_gprs) |
+ NUM_CLAUSE_TEMP_GPRS(num_temp_gprs));
+ sq_gpr_resource_mgmt_2 = (NUM_GS_GPRS(num_gs_gprs) |
+ NUM_ES_GPRS(num_es_gprs));
+ sq_gpr_resource_mgmt_3 = (NUM_HS_GPRS(num_hs_gprs) |
+ NUM_LS_GPRS(num_ls_gprs));
+ sq_thread_resource_mgmt = (NUM_PS_THREADS(num_ps_threads) |
+ NUM_VS_THREADS(num_vs_threads) |
+ NUM_GS_THREADS(num_gs_threads) |
+ NUM_ES_THREADS(num_es_threads));
+ sq_thread_resource_mgmt_2 = (NUM_HS_THREADS(num_hs_threads) |
+ NUM_LS_THREADS(num_ls_threads));
+ sq_stack_resource_mgmt_1 = (NUM_PS_STACK_ENTRIES(num_ps_stack_entries) |
+ NUM_VS_STACK_ENTRIES(num_vs_stack_entries));
+ sq_stack_resource_mgmt_2 = (NUM_GS_STACK_ENTRIES(num_gs_stack_entries) |
+ NUM_ES_STACK_ENTRIES(num_es_stack_entries));
+ sq_stack_resource_mgmt_3 = (NUM_HS_STACK_ENTRIES(num_hs_stack_entries) |
+ NUM_LS_STACK_ENTRIES(num_ls_stack_entries));
+
+ /* set clear context state */
+ radeon_ring_write(rdev, PACKET3(PACKET3_CLEAR_STATE, 0));
+ radeon_ring_write(rdev, 0);
+
+ /* disable dyn gprs */
+ radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONFIG_REG, 1));
+ radeon_ring_write(rdev, (SQ_DYN_GPR_CNTL_PS_FLUSH_REQ - PACKET3_SET_CONFIG_REG_START) >> 2);
+ radeon_ring_write(rdev, 0);
+
+ /* SQ config */
+ radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONFIG_REG, 11));
+ radeon_ring_write(rdev, (SQ_CONFIG - PACKET3_SET_CONFIG_REG_START) >> 2);
+ radeon_ring_write(rdev, sq_config);
+ radeon_ring_write(rdev, sq_gpr_resource_mgmt_1);
+ radeon_ring_write(rdev, sq_gpr_resource_mgmt_2);
+ radeon_ring_write(rdev, sq_gpr_resource_mgmt_3);
+ radeon_ring_write(rdev, 0);
+ radeon_ring_write(rdev, 0);
+ radeon_ring_write(rdev, sq_thread_resource_mgmt);
+ radeon_ring_write(rdev, sq_thread_resource_mgmt_2);
+ radeon_ring_write(rdev, sq_stack_resource_mgmt_1);
+ radeon_ring_write(rdev, sq_stack_resource_mgmt_2);
+ radeon_ring_write(rdev, sq_stack_resource_mgmt_3);
+
+ /* CONTEXT_CONTROL */
+ radeon_ring_write(rdev, 0xc0012800);
+ radeon_ring_write(rdev, 0x80000000);
+ radeon_ring_write(rdev, 0x80000000);
+
+ /* SQ_VTX_BASE_VTX_LOC */
+ radeon_ring_write(rdev, 0xc0026f00);
+ radeon_ring_write(rdev, 0x00000000);
+ radeon_ring_write(rdev, 0x00000000);
+ radeon_ring_write(rdev, 0x00000000);
+
+ /* SET_SAMPLER */
+ radeon_ring_write(rdev, 0xc0036e00);
+ radeon_ring_write(rdev, 0x00000000);
+ radeon_ring_write(rdev, 0x00000012);
+ radeon_ring_write(rdev, 0x00000000);
+ radeon_ring_write(rdev, 0x00000000);
+
+}
+
+static inline uint32_t i2f(uint32_t input)
+{
+ u32 result, i, exponent, fraction;
+
+ if ((input & 0x3fff) == 0)
+ result = 0; /* 0 is a special case */
+ else {
+ exponent = 140; /* exponent biased by 127; */
+ fraction = (input & 0x3fff) << 10; /* cheat and only
+ handle numbers below 2^^15 */
+ for (i = 0; i < 14; i++) {
+ if (fraction & 0x800000)
+ break;
+ else {
+ fraction = fraction << 1; /* keep
+ shifting left until top bit = 1 */
+ exponent = exponent - 1;
+ }
+ }
+ result = exponent << 23 | (fraction & 0x7fffff); /* mask
+ off top bit; assumed 1 */
+ }
+ return result;
+}
+
+int evergreen_blit_init(struct radeon_device *rdev)
+{
+ u32 obj_size;
+ int r;
+ void *ptr;
+
+ /* pin copy shader into vram if already initialized */
+ if (rdev->r600_blit.shader_obj)
+ goto done;
+
+ mutex_init(&rdev->r600_blit.mutex);
+ rdev->r600_blit.state_offset = 0;
+ rdev->r600_blit.state_len = 0;
+ obj_size = 0;
+
+ rdev->r600_blit.vs_offset = obj_size;
+ obj_size += evergreen_vs_size * 4;
+ obj_size = ALIGN(obj_size, 256);
+
+ rdev->r600_blit.ps_offset = obj_size;
+ obj_size += evergreen_ps_size * 4;
+ obj_size = ALIGN(obj_size, 256);
+
+ r = radeon_bo_create(rdev, NULL, obj_size, PAGE_SIZE, true, RADEON_GEM_DOMAIN_VRAM,
+ &rdev->r600_blit.shader_obj);
+ if (r) {
+ DRM_ERROR("evergreen failed to allocate shader\n");
+ return r;
+ }
+
+ DRM_DEBUG("evergreen blit allocated bo %08x vs %08x ps %08x\n",
+ obj_size,
+ rdev->r600_blit.vs_offset, rdev->r600_blit.ps_offset);
+
+ r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false);
+ if (unlikely(r != 0))
+ return r;
+ r = radeon_bo_kmap(rdev->r600_blit.shader_obj, &ptr);
+ if (r) {
+ DRM_ERROR("failed to map blit object %d\n", r);
+ return r;
+ }
+
+ memcpy(ptr + rdev->r600_blit.vs_offset, evergreen_vs, evergreen_vs_size * 4);
+ memcpy(ptr + rdev->r600_blit.ps_offset, evergreen_ps, evergreen_ps_size * 4);
+ radeon_bo_kunmap(rdev->r600_blit.shader_obj);
+ radeon_bo_unreserve(rdev->r600_blit.shader_obj);
+
+done:
+ r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false);
+ if (unlikely(r != 0))
+ return r;
+ r = radeon_bo_pin(rdev->r600_blit.shader_obj, RADEON_GEM_DOMAIN_VRAM,
+ &rdev->r600_blit.shader_gpu_addr);
+ radeon_bo_unreserve(rdev->r600_blit.shader_obj);
+ if (r) {
+ dev_err(rdev->dev, "(%d) pin blit object failed\n", r);
+ return r;
+ }
+ rdev->mc.active_vram_size = rdev->mc.real_vram_size;
+ return 0;
+}
+
+void evergreen_blit_fini(struct radeon_device *rdev)
+{
+ int r;
+
+ rdev->mc.active_vram_size = rdev->mc.visible_vram_size;
+ if (rdev->r600_blit.shader_obj == NULL)
+ return;
+ /* If we can't reserve the bo, unref should be enough to destroy
+ * it when it becomes idle.
+ */
+ r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false);
+ if (!r) {
+ radeon_bo_unpin(rdev->r600_blit.shader_obj);
+ radeon_bo_unreserve(rdev->r600_blit.shader_obj);
+ }
+ radeon_bo_unref(&rdev->r600_blit.shader_obj);
+}
+
+static int evergreen_vb_ib_get(struct radeon_device *rdev)
+{
+ int r;
+ r = radeon_ib_get(rdev, &rdev->r600_blit.vb_ib);
+ if (r) {
+ DRM_ERROR("failed to get IB for vertex buffer\n");
+ return r;
+ }
+
+ rdev->r600_blit.vb_total = 64*1024;
+ rdev->r600_blit.vb_used = 0;
+ return 0;
+}
+
+static void evergreen_vb_ib_put(struct radeon_device *rdev)
+{
+ radeon_fence_emit(rdev, rdev->r600_blit.vb_ib->fence);
+ radeon_ib_free(rdev, &rdev->r600_blit.vb_ib);
+}
+
+int evergreen_blit_prepare_copy(struct radeon_device *rdev, int size_bytes)
+{
+ int r;
+ int ring_size, line_size;
+ int max_size;
+ /* loops of emits + fence emit possible */
+ int dwords_per_loop = 74, num_loops;
+
+ r = evergreen_vb_ib_get(rdev);
+ if (r)
+ return r;
+
+ /* 8 bpp vs 32 bpp for xfer unit */
+ if (size_bytes & 3)
+ line_size = 8192;
+ else
+ line_size = 8192 * 4;
+
+ max_size = 8192 * line_size;
+
+ /* major loops cover the max size transfer */
+ num_loops = ((size_bytes + max_size) / max_size);
+ /* minor loops cover the extra non aligned bits */
+ num_loops += ((size_bytes % line_size) ? 1 : 0);
+ /* calculate number of loops correctly */
+ ring_size = num_loops * dwords_per_loop;
+ /* set default + shaders */
+ ring_size += 46; /* shaders + def state */
+ ring_size += 10; /* fence emit for VB IB */
+ ring_size += 5; /* done copy */
+ ring_size += 10; /* fence emit for done copy */
+ r = radeon_ring_lock(rdev, ring_size);
+ if (r)
+ return r;
+
+ set_default_state(rdev); /* 30 */
+ set_shaders(rdev); /* 16 */
+ return 0;
+}
+
+void evergreen_blit_done_copy(struct radeon_device *rdev, struct radeon_fence *fence)
+{
+ int r;
+
+ if (rdev->r600_blit.vb_ib)
+ evergreen_vb_ib_put(rdev);
+
+ if (fence)
+ r = radeon_fence_emit(rdev, fence);
+
+ radeon_ring_unlock_commit(rdev);
+}
+
+void evergreen_kms_blit_copy(struct radeon_device *rdev,
+ u64 src_gpu_addr, u64 dst_gpu_addr,
+ int size_bytes)
+{
+ int max_bytes;
+ u64 vb_gpu_addr;
+ u32 *vb;
+
+ DRM_DEBUG("emitting copy %16llx %16llx %d %d\n", src_gpu_addr, dst_gpu_addr,
+ size_bytes, rdev->r600_blit.vb_used);
+ vb = (u32 *)(rdev->r600_blit.vb_ib->ptr + rdev->r600_blit.vb_used);
+ if ((size_bytes & 3) || (src_gpu_addr & 3) || (dst_gpu_addr & 3)) {
+ max_bytes = 8192;
+
+ while (size_bytes) {
+ int cur_size = size_bytes;
+ int src_x = src_gpu_addr & 255;
+ int dst_x = dst_gpu_addr & 255;
+ int h = 1;
+ src_gpu_addr = src_gpu_addr & ~255ULL;
+ dst_gpu_addr = dst_gpu_addr & ~255ULL;
+
+ if (!src_x && !dst_x) {
+ h = (cur_size / max_bytes);
+ if (h > 8192)
+ h = 8192;
+ if (h == 0)
+ h = 1;
+ else
+ cur_size = max_bytes;
+ } else {
+ if (cur_size > max_bytes)
+ cur_size = max_bytes;
+ if (cur_size > (max_bytes - dst_x))
+ cur_size = (max_bytes - dst_x);
+ if (cur_size > (max_bytes - src_x))
+ cur_size = (max_bytes - src_x);
+ }
+
+ if ((rdev->r600_blit.vb_used + 48) > rdev->r600_blit.vb_total) {
+ WARN_ON(1);
+ }
+
+ vb[0] = i2f(dst_x);
+ vb[1] = 0;
+ vb[2] = i2f(src_x);
+ vb[3] = 0;
+
+ vb[4] = i2f(dst_x);
+ vb[5] = i2f(h);
+ vb[6] = i2f(src_x);
+ vb[7] = i2f(h);
+
+ vb[8] = i2f(dst_x + cur_size);
+ vb[9] = i2f(h);
+ vb[10] = i2f(src_x + cur_size);
+ vb[11] = i2f(h);
+
+ /* src 10 */
+ set_tex_resource(rdev, FMT_8,
+ src_x + cur_size, h, src_x + cur_size,
+ src_gpu_addr);
+
+ /* 5 */
+ cp_set_surface_sync(rdev,
+ PACKET3_TC_ACTION_ENA, (src_x + cur_size * h), src_gpu_addr);
+
+
+ /* dst 17 */
+ set_render_target(rdev, COLOR_8,
+ dst_x + cur_size, h,
+ dst_gpu_addr);
+
+ /* scissors 12 */
+ set_scissors(rdev, dst_x, 0, dst_x + cur_size, h);
+
+ /* 15 */
+ vb_gpu_addr = rdev->r600_blit.vb_ib->gpu_addr + rdev->r600_blit.vb_used;
+ set_vtx_resource(rdev, vb_gpu_addr);
+
+ /* draw 10 */
+ draw_auto(rdev);
+
+ /* 5 */
+ cp_set_surface_sync(rdev,
+ PACKET3_CB_ACTION_ENA | PACKET3_CB0_DEST_BASE_ENA,
+ cur_size * h, dst_gpu_addr);
+
+ vb += 12;
+ rdev->r600_blit.vb_used += 12 * 4;
+
+ src_gpu_addr += cur_size * h;
+ dst_gpu_addr += cur_size * h;
+ size_bytes -= cur_size * h;
+ }
+ } else {
+ max_bytes = 8192 * 4;
+
+ while (size_bytes) {
+ int cur_size = size_bytes;
+ int src_x = (src_gpu_addr & 255);
+ int dst_x = (dst_gpu_addr & 255);
+ int h = 1;
+ src_gpu_addr = src_gpu_addr & ~255ULL;
+ dst_gpu_addr = dst_gpu_addr & ~255ULL;
+
+ if (!src_x && !dst_x) {
+ h = (cur_size / max_bytes);
+ if (h > 8192)
+ h = 8192;
+ if (h == 0)
+ h = 1;
+ else
+ cur_size = max_bytes;
+ } else {
+ if (cur_size > max_bytes)
+ cur_size = max_bytes;
+ if (cur_size > (max_bytes - dst_x))
+ cur_size = (max_bytes - dst_x);
+ if (cur_size > (max_bytes - src_x))
+ cur_size = (max_bytes - src_x);
+ }
+
+ if ((rdev->r600_blit.vb_used + 48) > rdev->r600_blit.vb_total) {
+ WARN_ON(1);
+ }
+
+ vb[0] = i2f(dst_x / 4);
+ vb[1] = 0;
+ vb[2] = i2f(src_x / 4);
+ vb[3] = 0;
+
+ vb[4] = i2f(dst_x / 4);
+ vb[5] = i2f(h);
+ vb[6] = i2f(src_x / 4);
+ vb[7] = i2f(h);
+
+ vb[8] = i2f((dst_x + cur_size) / 4);
+ vb[9] = i2f(h);
+ vb[10] = i2f((src_x + cur_size) / 4);
+ vb[11] = i2f(h);
+
+ /* src 10 */
+ set_tex_resource(rdev, FMT_8_8_8_8,
+ (src_x + cur_size) / 4,
+ h, (src_x + cur_size) / 4,
+ src_gpu_addr);
+ /* 5 */
+ cp_set_surface_sync(rdev,
+ PACKET3_TC_ACTION_ENA, (src_x + cur_size * h), src_gpu_addr);
+
+ /* dst 17 */
+ set_render_target(rdev, COLOR_8_8_8_8,
+ (dst_x + cur_size) / 4, h,
+ dst_gpu_addr);
+
+ /* scissors 12 */
+ set_scissors(rdev, (dst_x / 4), 0, (dst_x + cur_size / 4), h);
+
+ /* Vertex buffer setup 15 */
+ vb_gpu_addr = rdev->r600_blit.vb_ib->gpu_addr + rdev->r600_blit.vb_used;
+ set_vtx_resource(rdev, vb_gpu_addr);
+
+ /* draw 10 */
+ draw_auto(rdev);
+
+ /* 5 */
+ cp_set_surface_sync(rdev,
+ PACKET3_CB_ACTION_ENA | PACKET3_CB0_DEST_BASE_ENA,
+ cur_size * h, dst_gpu_addr);
+
+ /* 74 ring dwords per loop */
+ vb += 12;
+ rdev->r600_blit.vb_used += 12 * 4;
+
+ src_gpu_addr += cur_size * h;
+ dst_gpu_addr += cur_size * h;
+ size_bytes -= cur_size * h;
+ }
+ }
+}
+
diff --git a/drivers/gpu/drm/radeon/evergreen_blit_shaders.c b/drivers/gpu/drm/radeon/evergreen_blit_shaders.c
new file mode 100644
index 000000000000..ef1d28c07fbf
--- /dev/null
+++ b/drivers/gpu/drm/radeon/evergreen_blit_shaders.c
@@ -0,0 +1,348 @@
+/*
+ * Copyright 2010 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * Alex Deucher <alexander.deucher@amd.com>
+ */
+
+#include <linux/types.h>
+#include <linux/kernel.h>
+
+/*
+ * evergreen cards need to use the 3D engine to blit data which requires
+ * quite a bit of hw state setup. Rather than pull the whole 3D driver
+ * (which normally generates the 3D state) into the DRM, we opt to use
+ * statically generated state tables. The regsiter state and shaders
+ * were hand generated to support blitting functionality. See the 3D
+ * driver or documentation for descriptions of the registers and
+ * shader instructions.
+ */
+
+const u32 evergreen_default_state[] =
+{
+ 0xc0016900,
+ 0x0000023b,
+ 0x00000000, /* SQ_LDS_ALLOC_PS */
+
+ 0xc0066900,
+ 0x00000240,
+ 0x00000000, /* SQ_ESGS_RING_ITEMSIZE */
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+
+ 0xc0046900,
+ 0x00000247,
+ 0x00000000, /* SQ_GS_VERT_ITEMSIZE */
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+
+ 0xc0026900,
+ 0x00000010,
+ 0x00000000, /* DB_Z_INFO */
+ 0x00000000, /* DB_STENCIL_INFO */
+
+ 0xc0016900,
+ 0x00000200,
+ 0x00000000, /* DB_DEPTH_CONTROL */
+
+ 0xc0066900,
+ 0x00000000,
+ 0x00000060, /* DB_RENDER_CONTROL */
+ 0x00000000, /* DB_COUNT_CONTROL */
+ 0x00000000, /* DB_DEPTH_VIEW */
+ 0x0000002a, /* DB_RENDER_OVERRIDE */
+ 0x00000000, /* DB_RENDER_OVERRIDE2 */
+ 0x00000000, /* DB_HTILE_DATA_BASE */
+
+ 0xc0026900,
+ 0x0000000a,
+ 0x00000000, /* DB_STENCIL_CLEAR */
+ 0x00000000, /* DB_DEPTH_CLEAR */
+
+ 0xc0016900,
+ 0x000002dc,
+ 0x0000aa00, /* DB_ALPHA_TO_MASK */
+
+ 0xc0016900,
+ 0x00000080,
+ 0x00000000, /* PA_SC_WINDOW_OFFSET */
+
+ 0xc00d6900,
+ 0x00000083,
+ 0x0000ffff, /* PA_SC_CLIPRECT_RULE */
+ 0x00000000, /* PA_SC_CLIPRECT_0_TL */
+ 0x20002000, /* PA_SC_CLIPRECT_0_BR */
+ 0x00000000,
+ 0x20002000,
+ 0x00000000,
+ 0x20002000,
+ 0x00000000,
+ 0x20002000,
+ 0xaaaaaaaa, /* PA_SC_EDGERULE */
+ 0x00000000, /* PA_SU_HARDWARE_SCREEN_OFFSET */
+ 0x0000000f, /* CB_TARGET_MASK */
+ 0x0000000f, /* CB_SHADER_MASK */
+
+ 0xc0226900,
+ 0x00000094,
+ 0x80000000, /* PA_SC_VPORT_SCISSOR_0_TL */
+ 0x20002000, /* PA_SC_VPORT_SCISSOR_0_BR */
+ 0x80000000,
+ 0x20002000,
+ 0x80000000,
+ 0x20002000,
+ 0x80000000,
+ 0x20002000,
+ 0x80000000,
+ 0x20002000,
+ 0x80000000,
+ 0x20002000,
+ 0x80000000,
+ 0x20002000,
+ 0x80000000,
+ 0x20002000,
+ 0x80000000,
+ 0x20002000,
+ 0x80000000,
+ 0x20002000,
+ 0x80000000,
+ 0x20002000,
+ 0x80000000,
+ 0x20002000,
+ 0x80000000,
+ 0x20002000,
+ 0x80000000,
+ 0x20002000,
+ 0x80000000,
+ 0x20002000,
+ 0x80000000,
+ 0x20002000,
+ 0x00000000, /* PA_SC_VPORT_ZMIN_0 */
+ 0x3f800000, /* PA_SC_VPORT_ZMAX_0 */
+
+ 0xc0016900,
+ 0x000000d4,
+ 0x00000000, /* SX_MISC */
+
+ 0xc0026900,
+ 0x00000292,
+ 0x00000000, /* PA_SC_MODE_CNTL_0 */
+ 0x00000000, /* PA_SC_MODE_CNTL_1 */
+
+ 0xc0106900,
+ 0x00000300,
+ 0x00000000, /* PA_SC_LINE_CNTL */
+ 0x00000000, /* PA_SC_AA_CONFIG */
+ 0x00000005, /* PA_SU_VTX_CNTL */
+ 0x3f800000, /* PA_CL_GB_VERT_CLIP_ADJ */
+ 0x3f800000, /* PA_CL_GB_VERT_DISC_ADJ */
+ 0x3f800000, /* PA_CL_GB_HORZ_CLIP_ADJ */
+ 0x3f800000, /* PA_CL_GB_HORZ_DISC_ADJ */
+ 0x00000000, /* PA_SC_AA_SAMPLE_LOCS_0 */
+ 0x00000000, /* */
+ 0x00000000, /* */
+ 0x00000000, /* */
+ 0x00000000, /* */
+ 0x00000000, /* */
+ 0x00000000, /* */
+ 0x00000000, /* PA_SC_AA_SAMPLE_LOCS_7 */
+ 0xffffffff, /* PA_SC_AA_MASK */
+
+ 0xc00d6900,
+ 0x00000202,
+ 0x00cc0010, /* CB_COLOR_CONTROL */
+ 0x00000210, /* DB_SHADER_CONTROL */
+ 0x00010000, /* PA_CL_CLIP_CNTL */
+ 0x00000004, /* PA_SU_SC_MODE_CNTL */
+ 0x00000100, /* PA_CL_VTE_CNTL */
+ 0x00000000, /* PA_CL_VS_OUT_CNTL */
+ 0x00000000, /* PA_CL_NANINF_CNTL */
+ 0x00000000, /* PA_SU_LINE_STIPPLE_CNTL */
+ 0x00000000, /* PA_SU_LINE_STIPPLE_SCALE */
+ 0x00000000, /* PA_SU_PRIM_FILTER_CNTL */
+ 0x00000000, /* */
+ 0x00000000, /* */
+ 0x00000000, /* SQ_DYN_GPR_RESOURCE_LIMIT_1 */
+
+ 0xc0066900,
+ 0x000002de,
+ 0x00000000, /* PA_SU_POLY_OFFSET_DB_FMT_CNTL */
+ 0x00000000, /* */
+ 0x00000000, /* */
+ 0x00000000, /* */
+ 0x00000000, /* */
+ 0x00000000, /* */
+
+ 0xc0016900,
+ 0x00000229,
+ 0x00000000, /* SQ_PGM_START_FS */
+
+ 0xc0016900,
+ 0x0000022a,
+ 0x00000000, /* SQ_PGM_RESOURCES_FS */
+
+ 0xc0096900,
+ 0x00000100,
+ 0x00ffffff, /* VGT_MAX_VTX_INDX */
+ 0x00000000, /* */
+ 0x00000000, /* */
+ 0x00000000, /* */
+ 0x00000000, /* SX_ALPHA_TEST_CONTROL */
+ 0x00000000, /* CB_BLEND_RED */
+ 0x00000000, /* CB_BLEND_GREEN */
+ 0x00000000, /* CB_BLEND_BLUE */
+ 0x00000000, /* CB_BLEND_ALPHA */
+
+ 0xc0026900,
+ 0x000002a8,
+ 0x00000000, /* VGT_INSTANCE_STEP_RATE_0 */
+ 0x00000000, /* */
+
+ 0xc0026900,
+ 0x000002ad,
+ 0x00000000, /* VGT_REUSE_OFF */
+ 0x00000000, /* */
+
+ 0xc0116900,
+ 0x00000280,
+ 0x00000000, /* PA_SU_POINT_SIZE */
+ 0x00000000, /* PA_SU_POINT_MINMAX */
+ 0x00000008, /* PA_SU_LINE_CNTL */
+ 0x00000000, /* PA_SC_LINE_STIPPLE */
+ 0x00000000, /* VGT_OUTPUT_PATH_CNTL */
+ 0x00000000, /* VGT_HOS_CNTL */
+ 0x00000000, /* */
+ 0x00000000, /* */
+ 0x00000000, /* */
+ 0x00000000, /* */
+ 0x00000000, /* */
+ 0x00000000, /* */
+ 0x00000000, /* */
+ 0x00000000, /* */
+ 0x00000000, /* */
+ 0x00000000, /* */
+ 0x00000000, /* VGT_GS_MODE */
+
+ 0xc0016900,
+ 0x000002a1,
+ 0x00000000, /* VGT_PRIMITIVEID_EN */
+
+ 0xc0016900,
+ 0x000002a5,
+ 0x00000000, /* VGT_MULTI_PRIM_IB_RESET_EN */
+
+ 0xc0016900,
+ 0x000002d5,
+ 0x00000000, /* VGT_SHADER_STAGES_EN */
+
+ 0xc0026900,
+ 0x000002e5,
+ 0x00000000, /* VGT_STRMOUT_CONFIG */
+ 0x00000000, /* */
+
+ 0xc0016900,
+ 0x000001e0,
+ 0x00000000, /* CB_BLEND0_CONTROL */
+
+ 0xc0016900,
+ 0x000001b1,
+ 0x00000000, /* SPI_VS_OUT_CONFIG */
+
+ 0xc0016900,
+ 0x00000187,
+ 0x00000000, /* SPI_VS_OUT_ID_0 */
+
+ 0xc0016900,
+ 0x00000191,
+ 0x00000100, /* SPI_PS_INPUT_CNTL_0 */
+
+ 0xc00b6900,
+ 0x000001b3,
+ 0x20000001, /* SPI_PS_IN_CONTROL_0 */
+ 0x00000000, /* SPI_PS_IN_CONTROL_1 */
+ 0x00000000, /* SPI_INTERP_CONTROL_0 */
+ 0x00000000, /* SPI_INPUT_Z */
+ 0x00000000, /* SPI_FOG_CNTL */
+ 0x00100000, /* SPI_BARYC_CNTL */
+ 0x00000000, /* SPI_PS_IN_CONTROL_2 */
+ 0x00000000, /* */
+ 0x00000000, /* */
+ 0x00000000, /* */
+ 0x00000000, /* */
+
+ 0xc0026900,
+ 0x00000316,
+ 0x0000000e, /* VGT_VERTEX_REUSE_BLOCK_CNTL */
+ 0x00000010, /* */
+};
+
+const u32 evergreen_vs[] =
+{
+ 0x00000004,
+ 0x80800400,
+ 0x0000a03c,
+ 0x95000688,
+ 0x00004000,
+ 0x15200688,
+ 0x00000000,
+ 0x00000000,
+ 0x3c000000,
+ 0x67961001,
+ 0x00080000,
+ 0x00000000,
+ 0x1c000000,
+ 0x67961000,
+ 0x00000008,
+ 0x00000000,
+};
+
+const u32 evergreen_ps[] =
+{
+ 0x00000003,
+ 0xa00c0000,
+ 0x00000008,
+ 0x80400000,
+ 0x00000000,
+ 0x95200688,
+ 0x00380400,
+ 0x00146b10,
+ 0x00380000,
+ 0x20146b10,
+ 0x00380400,
+ 0x40146b00,
+ 0x80380000,
+ 0x60146b00,
+ 0x00000000,
+ 0x00000000,
+ 0x00000010,
+ 0x000d1000,
+ 0xb0800000,
+ 0x00000000,
+};
+
+const u32 evergreen_ps_size = ARRAY_SIZE(evergreen_ps);
+const u32 evergreen_vs_size = ARRAY_SIZE(evergreen_vs);
+const u32 evergreen_default_size = ARRAY_SIZE(evergreen_default_state);
diff --git a/drivers/gpu/drm/radeon/evergreen_blit_shaders.h b/drivers/gpu/drm/radeon/evergreen_blit_shaders.h
new file mode 100644
index 000000000000..bb8d6c751595
--- /dev/null
+++ b/drivers/gpu/drm/radeon/evergreen_blit_shaders.h
@@ -0,0 +1,35 @@
+/*
+ * Copyright 2009 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef EVERGREEN_BLIT_SHADERS_H
+#define EVERGREEN_BLIT_SHADERS_H
+
+extern const u32 evergreen_ps[];
+extern const u32 evergreen_vs[];
+extern const u32 evergreen_default_state[];
+
+extern const u32 evergreen_ps_size, evergreen_vs_size;
+extern const u32 evergreen_default_size;
+
+#endif
diff --git a/drivers/gpu/drm/radeon/evergreend.h b/drivers/gpu/drm/radeon/evergreend.h
index 9b7532dd30f7..113c70cc8b39 100644
--- a/drivers/gpu/drm/radeon/evergreend.h
+++ b/drivers/gpu/drm/radeon/evergreend.h
@@ -412,6 +412,19 @@
#define SOFT_RESET_REGBB (1 << 22)
#define SOFT_RESET_ORB (1 << 23)
+/* display watermarks */
+#define DC_LB_MEMORY_SPLIT 0x6b0c
+#define PRIORITY_A_CNT 0x6b18
+#define PRIORITY_MARK_MASK 0x7fff
+#define PRIORITY_OFF (1 << 16)
+#define PRIORITY_ALWAYS_ON (1 << 20)
+#define PRIORITY_B_CNT 0x6b1c
+#define PIPE0_ARBITRATION_CONTROL3 0x0bf0
+# define LATENCY_WATERMARK_MASK(x) ((x) << 16)
+#define PIPE0_LATENCY_CONTROL 0x0bf4
+# define LATENCY_LOW_WATERMARK(x) ((x) << 0)
+# define LATENCY_HIGH_WATERMARK(x) ((x) << 16)
+
#define IH_RB_CNTL 0x3e00
# define IH_RB_ENABLE (1 << 0)
# define IH_IB_SIZE(x) ((x) << 1) /* log2 */
@@ -645,6 +658,8 @@
#define PACKET3_EVENT_WRITE_EOP 0x47
#define PACKET3_EVENT_WRITE_EOS 0x48
#define PACKET3_PREAMBLE_CNTL 0x4A
+# define PACKET3_PREAMBLE_BEGIN_CLEAR_STATE (2 << 28)
+# define PACKET3_PREAMBLE_END_CLEAR_STATE (3 << 28)
#define PACKET3_RB_OFFSET 0x4B
#define PACKET3_ALU_PS_CONST_BUFFER_COPY 0x4C
#define PACKET3_ALU_VS_CONST_BUFFER_COPY 0x4D
@@ -802,6 +817,11 @@
#define SQ_ALU_CONST_CACHE_LS_14 0x28f78
#define SQ_ALU_CONST_CACHE_LS_15 0x28f7c
+#define PA_SC_SCREEN_SCISSOR_TL 0x28030
+#define PA_SC_GENERIC_SCISSOR_TL 0x28240
+#define PA_SC_WINDOW_SCISSOR_TL 0x28204
+#define VGT_PRIMITIVE_TYPE 0x8958
+
#define DB_DEPTH_CONTROL 0x28800
#define DB_DEPTH_VIEW 0x28008
#define DB_HTILE_DATA_BASE 0x28014
diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c
index e59422320bb6..8e10aa9f74b0 100644
--- a/drivers/gpu/drm/radeon/r100.c
+++ b/drivers/gpu/drm/radeon/r100.c
@@ -442,7 +442,7 @@ int r100_pci_gart_init(struct radeon_device *rdev)
int r;
if (rdev->gart.table.ram.ptr) {
- WARN(1, "R100 PCI GART already initialized.\n");
+ WARN(1, "R100 PCI GART already initialized\n");
return 0;
}
/* Initialize common gart structure */
@@ -516,7 +516,7 @@ int r100_irq_set(struct radeon_device *rdev)
uint32_t tmp = 0;
if (!rdev->irq.installed) {
- WARN(1, "Can't enable IRQ/MSI because no handler is installed.\n");
+ WARN(1, "Can't enable IRQ/MSI because no handler is installed\n");
WREG32(R_000040_GEN_INT_CNTL, 0);
return -EINVAL;
}
@@ -675,67 +675,6 @@ void r100_fence_ring_emit(struct radeon_device *rdev,
radeon_ring_write(rdev, RADEON_SW_INT_FIRE);
}
-int r100_wb_init(struct radeon_device *rdev)
-{
- int r;
-
- if (rdev->wb.wb_obj == NULL) {
- r = radeon_bo_create(rdev, NULL, RADEON_GPU_PAGE_SIZE, true,
- RADEON_GEM_DOMAIN_GTT,
- &rdev->wb.wb_obj);
- if (r) {
- dev_err(rdev->dev, "(%d) create WB buffer failed\n", r);
- return r;
- }
- r = radeon_bo_reserve(rdev->wb.wb_obj, false);
- if (unlikely(r != 0))
- return r;
- r = radeon_bo_pin(rdev->wb.wb_obj, RADEON_GEM_DOMAIN_GTT,
- &rdev->wb.gpu_addr);
- if (r) {
- dev_err(rdev->dev, "(%d) pin WB buffer failed\n", r);
- radeon_bo_unreserve(rdev->wb.wb_obj);
- return r;
- }
- r = radeon_bo_kmap(rdev->wb.wb_obj, (void **)&rdev->wb.wb);
- radeon_bo_unreserve(rdev->wb.wb_obj);
- if (r) {
- dev_err(rdev->dev, "(%d) map WB buffer failed\n", r);
- return r;
- }
- }
- WREG32(R_000774_SCRATCH_ADDR, rdev->wb.gpu_addr);
- WREG32(R_00070C_CP_RB_RPTR_ADDR,
- S_00070C_RB_RPTR_ADDR((rdev->wb.gpu_addr + 1024) >> 2));
- WREG32(R_000770_SCRATCH_UMSK, 0xff);
- return 0;
-}
-
-void r100_wb_disable(struct radeon_device *rdev)
-{
- WREG32(R_000770_SCRATCH_UMSK, 0);
-}
-
-void r100_wb_fini(struct radeon_device *rdev)
-{
- int r;
-
- r100_wb_disable(rdev);
- if (rdev->wb.wb_obj) {
- r = radeon_bo_reserve(rdev->wb.wb_obj, false);
- if (unlikely(r != 0)) {
- dev_err(rdev->dev, "(%d) can't finish WB\n", r);
- return;
- }
- radeon_bo_kunmap(rdev->wb.wb_obj);
- radeon_bo_unpin(rdev->wb.wb_obj);
- radeon_bo_unreserve(rdev->wb.wb_obj);
- radeon_bo_unref(&rdev->wb.wb_obj);
- rdev->wb.wb = NULL;
- rdev->wb.wb_obj = NULL;
- }
-}
-
int r100_copy_blit(struct radeon_device *rdev,
uint64_t src_offset,
uint64_t dst_offset,
@@ -996,20 +935,32 @@ int r100_cp_init(struct radeon_device *rdev, unsigned ring_size)
WREG32(0x718, pre_write_timer | (pre_write_limit << 28));
tmp = (REG_SET(RADEON_RB_BUFSZ, rb_bufsz) |
REG_SET(RADEON_RB_BLKSZ, rb_blksz) |
- REG_SET(RADEON_MAX_FETCH, max_fetch) |
- RADEON_RB_NO_UPDATE);
+ REG_SET(RADEON_MAX_FETCH, max_fetch));
#ifdef __BIG_ENDIAN
tmp |= RADEON_BUF_SWAP_32BIT;
#endif
- WREG32(RADEON_CP_RB_CNTL, tmp);
+ WREG32(RADEON_CP_RB_CNTL, tmp | RADEON_RB_NO_UPDATE);
/* Set ring address */
DRM_INFO("radeon: ring at 0x%016lX\n", (unsigned long)rdev->cp.gpu_addr);
WREG32(RADEON_CP_RB_BASE, rdev->cp.gpu_addr);
/* Force read & write ptr to 0 */
- WREG32(RADEON_CP_RB_CNTL, tmp | RADEON_RB_RPTR_WR_ENA);
+ WREG32(RADEON_CP_RB_CNTL, tmp | RADEON_RB_RPTR_WR_ENA | RADEON_RB_NO_UPDATE);
WREG32(RADEON_CP_RB_RPTR_WR, 0);
WREG32(RADEON_CP_RB_WPTR, 0);
+
+ /* set the wb address whether it's enabled or not */
+ WREG32(R_00070C_CP_RB_RPTR_ADDR,
+ S_00070C_RB_RPTR_ADDR((rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) >> 2));
+ WREG32(R_000774_SCRATCH_ADDR, rdev->wb.gpu_addr + RADEON_WB_SCRATCH_OFFSET);
+
+ if (rdev->wb.enabled)
+ WREG32(R_000770_SCRATCH_UMSK, 0xff);
+ else {
+ tmp |= RADEON_RB_NO_UPDATE;
+ WREG32(R_000770_SCRATCH_UMSK, 0);
+ }
+
WREG32(RADEON_CP_RB_CNTL, tmp);
udelay(10);
rdev->cp.rptr = RREG32(RADEON_CP_RB_RPTR);
@@ -1052,6 +1003,7 @@ void r100_cp_disable(struct radeon_device *rdev)
rdev->cp.ready = false;
WREG32(RADEON_CP_CSQ_MODE, 0);
WREG32(RADEON_CP_CSQ_CNTL, 0);
+ WREG32(R_000770_SCRATCH_UMSK, 0);
if (r100_gui_wait_for_idle(rdev)) {
printk(KERN_WARNING "Failed to wait GUI idle while "
"programming pipes. Bad things might happen.\n");
@@ -2318,6 +2270,9 @@ void r100_vram_init_sizes(struct radeon_device *rdev)
/* Fix for RN50, M6, M7 with 8/16/32(??) MBs of VRAM -
* Novell bug 204882 + along with lots of ubuntu ones
*/
+ if (rdev->mc.aper_size > config_aper_size)
+ config_aper_size = rdev->mc.aper_size;
+
if (config_aper_size > rdev->mc.real_vram_size)
rdev->mc.mc_vram_size = config_aper_size;
else
@@ -3225,6 +3180,8 @@ static int r100_cs_track_texture_check(struct radeon_device *rdev,
for (u = 0; u < track->num_texture; u++) {
if (!track->textures[u].enabled)
continue;
+ if (track->textures[u].lookup_disable)
+ continue;
robj = track->textures[u].robj;
if (robj == NULL) {
DRM_ERROR("No texture bound to unit %u\n", u);
@@ -3459,6 +3416,7 @@ void r100_cs_track_clear(struct radeon_device *rdev, struct r100_cs_track *track
track->textures[i].robj = NULL;
/* CS IB emission code makes sure texture unit are disabled */
track->textures[i].enabled = false;
+ track->textures[i].lookup_disable = false;
track->textures[i].roundup_w = true;
track->textures[i].roundup_h = true;
if (track->separate_cube)
@@ -3737,6 +3695,12 @@ static int r100_startup(struct radeon_device *rdev)
if (r)
return r;
}
+
+ /* allocate wb buffer */
+ r = radeon_wb_init(rdev);
+ if (r)
+ return r;
+
/* Enable IRQ */
r100_irq_set(rdev);
rdev->config.r100.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL);
@@ -3746,9 +3710,6 @@ static int r100_startup(struct radeon_device *rdev)
dev_err(rdev->dev, "failled initializing CP (%d).\n", r);
return r;
}
- r = r100_wb_init(rdev);
- if (r)
- dev_err(rdev->dev, "failled initializing WB (%d).\n", r);
r = r100_ib_init(rdev);
if (r) {
dev_err(rdev->dev, "failled initializing IB (%d).\n", r);
@@ -3782,7 +3743,7 @@ int r100_resume(struct radeon_device *rdev)
int r100_suspend(struct radeon_device *rdev)
{
r100_cp_disable(rdev);
- r100_wb_disable(rdev);
+ radeon_wb_disable(rdev);
r100_irq_disable(rdev);
if (rdev->flags & RADEON_IS_PCI)
r100_pci_gart_disable(rdev);
@@ -3792,7 +3753,7 @@ int r100_suspend(struct radeon_device *rdev)
void r100_fini(struct radeon_device *rdev)
{
r100_cp_fini(rdev);
- r100_wb_fini(rdev);
+ radeon_wb_fini(rdev);
r100_ib_fini(rdev);
radeon_gem_fini(rdev);
if (rdev->flags & RADEON_IS_PCI)
@@ -3905,7 +3866,7 @@ int r100_init(struct radeon_device *rdev)
/* Somethings want wront with the accel init stop accel */
dev_err(rdev->dev, "Disabling GPU acceleration\n");
r100_cp_fini(rdev);
- r100_wb_fini(rdev);
+ radeon_wb_fini(rdev);
r100_ib_fini(rdev);
radeon_irq_kms_fini(rdev);
if (rdev->flags & RADEON_IS_PCI)
diff --git a/drivers/gpu/drm/radeon/r100_track.h b/drivers/gpu/drm/radeon/r100_track.h
index f47cdca1c004..af65600e6564 100644
--- a/drivers/gpu/drm/radeon/r100_track.h
+++ b/drivers/gpu/drm/radeon/r100_track.h
@@ -46,6 +46,7 @@ struct r100_cs_track_texture {
unsigned height_11;
bool use_pitch;
bool enabled;
+ bool lookup_disable;
bool roundup_w;
bool roundup_h;
unsigned compress_format;
diff --git a/drivers/gpu/drm/radeon/r200.c b/drivers/gpu/drm/radeon/r200.c
index 0266d72e0a4c..d2408c395619 100644
--- a/drivers/gpu/drm/radeon/r200.c
+++ b/drivers/gpu/drm/radeon/r200.c
@@ -447,6 +447,8 @@ int r200_packet0_check(struct radeon_cs_parser *p,
track->textures[i].width = 1 << ((idx_value >> RADEON_TXFORMAT_WIDTH_SHIFT) & RADEON_TXFORMAT_WIDTH_MASK);
track->textures[i].height = 1 << ((idx_value >> RADEON_TXFORMAT_HEIGHT_SHIFT) & RADEON_TXFORMAT_HEIGHT_MASK);
}
+ if (idx_value & R200_TXFORMAT_LOOKUP_DISABLE)
+ track->textures[i].lookup_disable = true;
switch ((idx_value & RADEON_TXFORMAT_FORMAT_MASK)) {
case R200_TXFORMAT_I8:
case R200_TXFORMAT_RGB332:
diff --git a/drivers/gpu/drm/radeon/r300.c b/drivers/gpu/drm/radeon/r300.c
index c827738ad7dd..cde1d3480d93 100644
--- a/drivers/gpu/drm/radeon/r300.c
+++ b/drivers/gpu/drm/radeon/r300.c
@@ -91,7 +91,7 @@ int rv370_pcie_gart_init(struct radeon_device *rdev)
int r;
if (rdev->gart.table.vram.robj) {
- WARN(1, "RV370 PCIE GART already initialized.\n");
+ WARN(1, "RV370 PCIE GART already initialized\n");
return 0;
}
/* Initialize common gart structure */
@@ -1332,6 +1332,12 @@ static int r300_startup(struct radeon_device *rdev)
if (r)
return r;
}
+
+ /* allocate wb buffer */
+ r = radeon_wb_init(rdev);
+ if (r)
+ return r;
+
/* Enable IRQ */
r100_irq_set(rdev);
rdev->config.r300.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL);
@@ -1341,9 +1347,6 @@ static int r300_startup(struct radeon_device *rdev)
dev_err(rdev->dev, "failled initializing CP (%d).\n", r);
return r;
}
- r = r100_wb_init(rdev);
- if (r)
- dev_err(rdev->dev, "failled initializing WB (%d).\n", r);
r = r100_ib_init(rdev);
if (r) {
dev_err(rdev->dev, "failled initializing IB (%d).\n", r);
@@ -1379,7 +1382,7 @@ int r300_resume(struct radeon_device *rdev)
int r300_suspend(struct radeon_device *rdev)
{
r100_cp_disable(rdev);
- r100_wb_disable(rdev);
+ radeon_wb_disable(rdev);
r100_irq_disable(rdev);
if (rdev->flags & RADEON_IS_PCIE)
rv370_pcie_gart_disable(rdev);
@@ -1391,7 +1394,7 @@ int r300_suspend(struct radeon_device *rdev)
void r300_fini(struct radeon_device *rdev)
{
r100_cp_fini(rdev);
- r100_wb_fini(rdev);
+ radeon_wb_fini(rdev);
r100_ib_fini(rdev);
radeon_gem_fini(rdev);
if (rdev->flags & RADEON_IS_PCIE)
@@ -1484,7 +1487,7 @@ int r300_init(struct radeon_device *rdev)
/* Somethings want wront with the accel init stop accel */
dev_err(rdev->dev, "Disabling GPU acceleration\n");
r100_cp_fini(rdev);
- r100_wb_fini(rdev);
+ radeon_wb_fini(rdev);
r100_ib_fini(rdev);
radeon_irq_kms_fini(rdev);
if (rdev->flags & RADEON_IS_PCIE)
diff --git a/drivers/gpu/drm/radeon/r420.c b/drivers/gpu/drm/radeon/r420.c
index 59f7bccc5be0..c387346f93a9 100644
--- a/drivers/gpu/drm/radeon/r420.c
+++ b/drivers/gpu/drm/radeon/r420.c
@@ -248,6 +248,12 @@ static int r420_startup(struct radeon_device *rdev)
return r;
}
r420_pipes_init(rdev);
+
+ /* allocate wb buffer */
+ r = radeon_wb_init(rdev);
+ if (r)
+ return r;
+
/* Enable IRQ */
r100_irq_set(rdev);
rdev->config.r300.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL);
@@ -258,10 +264,6 @@ static int r420_startup(struct radeon_device *rdev)
return r;
}
r420_cp_errata_init(rdev);
- r = r100_wb_init(rdev);
- if (r) {
- dev_err(rdev->dev, "failled initializing WB (%d).\n", r);
- }
r = r100_ib_init(rdev);
if (r) {
dev_err(rdev->dev, "failled initializing IB (%d).\n", r);
@@ -302,7 +304,7 @@ int r420_suspend(struct radeon_device *rdev)
{
r420_cp_errata_fini(rdev);
r100_cp_disable(rdev);
- r100_wb_disable(rdev);
+ radeon_wb_disable(rdev);
r100_irq_disable(rdev);
if (rdev->flags & RADEON_IS_PCIE)
rv370_pcie_gart_disable(rdev);
@@ -314,7 +316,7 @@ int r420_suspend(struct radeon_device *rdev)
void r420_fini(struct radeon_device *rdev)
{
r100_cp_fini(rdev);
- r100_wb_fini(rdev);
+ radeon_wb_fini(rdev);
r100_ib_fini(rdev);
radeon_gem_fini(rdev);
if (rdev->flags & RADEON_IS_PCIE)
@@ -418,7 +420,7 @@ int r420_init(struct radeon_device *rdev)
/* Somethings want wront with the accel init stop accel */
dev_err(rdev->dev, "Disabling GPU acceleration\n");
r100_cp_fini(rdev);
- r100_wb_fini(rdev);
+ radeon_wb_fini(rdev);
r100_ib_fini(rdev);
radeon_irq_kms_fini(rdev);
if (rdev->flags & RADEON_IS_PCIE)
diff --git a/drivers/gpu/drm/radeon/r520.c b/drivers/gpu/drm/radeon/r520.c
index 1458dee902dd..3c8677f9e385 100644
--- a/drivers/gpu/drm/radeon/r520.c
+++ b/drivers/gpu/drm/radeon/r520.c
@@ -181,6 +181,12 @@ static int r520_startup(struct radeon_device *rdev)
if (r)
return r;
}
+
+ /* allocate wb buffer */
+ r = radeon_wb_init(rdev);
+ if (r)
+ return r;
+
/* Enable IRQ */
rs600_irq_set(rdev);
rdev->config.r300.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL);
@@ -190,9 +196,6 @@ static int r520_startup(struct radeon_device *rdev)
dev_err(rdev->dev, "failled initializing CP (%d).\n", r);
return r;
}
- r = r100_wb_init(rdev);
- if (r)
- dev_err(rdev->dev, "failled initializing WB (%d).\n", r);
r = r100_ib_init(rdev);
if (r) {
dev_err(rdev->dev, "failled initializing IB (%d).\n", r);
@@ -295,7 +298,7 @@ int r520_init(struct radeon_device *rdev)
/* Somethings want wront with the accel init stop accel */
dev_err(rdev->dev, "Disabling GPU acceleration\n");
r100_cp_fini(rdev);
- r100_wb_fini(rdev);
+ radeon_wb_fini(rdev);
r100_ib_fini(rdev);
radeon_irq_kms_fini(rdev);
rv370_pcie_gart_fini(rdev);
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c
index 7b65e4efe8af..a322d4f647bd 100644
--- a/drivers/gpu/drm/radeon/r600.c
+++ b/drivers/gpu/drm/radeon/r600.c
@@ -97,14 +97,8 @@ u32 rv6xx_get_temp(struct radeon_device *rdev)
{
u32 temp = (RREG32(CG_THERMAL_STATUS) & ASIC_T_MASK) >>
ASIC_T_SHIFT;
- u32 actual_temp = 0;
- if ((temp >> 7) & 1)
- actual_temp = 0;
- else
- actual_temp = (temp >> 1) & 0xff;
-
- return actual_temp * 1000;
+ return temp * 1000;
}
void r600_pm_get_dynpm_state(struct radeon_device *rdev)
@@ -919,7 +913,7 @@ int r600_pcie_gart_init(struct radeon_device *rdev)
int r;
if (rdev->gart.table.vram.robj) {
- WARN(1, "R600 PCIE GART already initialized.\n");
+ WARN(1, "R600 PCIE GART already initialized\n");
return 0;
}
/* Initialize common gart structure */
@@ -1201,8 +1195,10 @@ void r600_vram_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc)
mc->vram_end, mc->real_vram_size >> 20);
} else {
u64 base = 0;
- if (rdev->flags & RADEON_IS_IGP)
- base = (RREG32(MC_VM_FB_LOCATION) & 0xFFFF) << 24;
+ if (rdev->flags & RADEON_IS_IGP) {
+ base = RREG32(MC_VM_FB_LOCATION) & 0xFFFF;
+ base <<= 24;
+ }
radeon_vram_location(rdev, &rdev->mc, base);
rdev->mc.gtt_base_align = 0;
radeon_gtt_location(rdev, mc);
@@ -1608,8 +1604,11 @@ void r600_gpu_init(struct radeon_device *rdev)
rdev->config.r600.tiling_npipes = rdev->config.r600.max_tile_pipes;
rdev->config.r600.tiling_nbanks = 4 << ((ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT);
tiling_config |= BANK_TILING((ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT);
- tiling_config |= GROUP_SIZE(0);
- rdev->config.r600.tiling_group_size = 256;
+ tiling_config |= GROUP_SIZE((ramcfg & BURSTLENGTH_MASK) >> BURSTLENGTH_SHIFT);
+ if ((ramcfg & BURSTLENGTH_MASK) >> BURSTLENGTH_SHIFT)
+ rdev->config.r600.tiling_group_size = 512;
+ else
+ rdev->config.r600.tiling_group_size = 256;
tmp = (ramcfg & NOOFROWS_MASK) >> NOOFROWS_SHIFT;
if (tmp > 3) {
tiling_config |= ROW_TILING(3);
@@ -1920,6 +1919,7 @@ void r600_cp_stop(struct radeon_device *rdev)
{
rdev->mc.active_vram_size = rdev->mc.visible_vram_size;
WREG32(R_0086D8_CP_ME_CNTL, S_0086D8_CP_ME_HALT(1));
+ WREG32(SCRATCH_UMSK, 0);
}
int r600_init_microcode(struct radeon_device *rdev)
@@ -2152,7 +2152,7 @@ int r600_cp_resume(struct radeon_device *rdev)
/* Set ring buffer size */
rb_bufsz = drm_order(rdev->cp.ring_size / 8);
- tmp = RB_NO_UPDATE | (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz;
+ tmp = (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz;
#ifdef __BIG_ENDIAN
tmp |= BUF_SWAP_32BIT;
#endif
@@ -2166,8 +2166,19 @@ int r600_cp_resume(struct radeon_device *rdev)
WREG32(CP_RB_CNTL, tmp | RB_RPTR_WR_ENA);
WREG32(CP_RB_RPTR_WR, 0);
WREG32(CP_RB_WPTR, 0);
- WREG32(CP_RB_RPTR_ADDR, rdev->cp.gpu_addr & 0xFFFFFFFF);
- WREG32(CP_RB_RPTR_ADDR_HI, upper_32_bits(rdev->cp.gpu_addr));
+
+ /* set the wb address whether it's enabled or not */
+ WREG32(CP_RB_RPTR_ADDR, (rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFFFFFFFC);
+ WREG32(CP_RB_RPTR_ADDR_HI, upper_32_bits(rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFF);
+ WREG32(SCRATCH_ADDR, ((rdev->wb.gpu_addr + RADEON_WB_SCRATCH_OFFSET) >> 8) & 0xFFFFFFFF);
+
+ if (rdev->wb.enabled)
+ WREG32(SCRATCH_UMSK, 0xff);
+ else {
+ tmp |= RB_NO_UPDATE;
+ WREG32(SCRATCH_UMSK, 0);
+ }
+
mdelay(1);
WREG32(CP_RB_CNTL, tmp);
@@ -2219,9 +2230,10 @@ void r600_scratch_init(struct radeon_device *rdev)
int i;
rdev->scratch.num_reg = 7;
+ rdev->scratch.reg_base = SCRATCH_REG0;
for (i = 0; i < rdev->scratch.num_reg; i++) {
rdev->scratch.free[i] = true;
- rdev->scratch.reg[i] = SCRATCH_REG0 + (i * 4);
+ rdev->scratch.reg[i] = rdev->scratch.reg_base + (i * 4);
}
}
@@ -2265,88 +2277,34 @@ int r600_ring_test(struct radeon_device *rdev)
return r;
}
-void r600_wb_disable(struct radeon_device *rdev)
-{
- int r;
-
- WREG32(SCRATCH_UMSK, 0);
- if (rdev->wb.wb_obj) {
- r = radeon_bo_reserve(rdev->wb.wb_obj, false);
- if (unlikely(r != 0))
- return;
- radeon_bo_kunmap(rdev->wb.wb_obj);
- radeon_bo_unpin(rdev->wb.wb_obj);
- radeon_bo_unreserve(rdev->wb.wb_obj);
- }
-}
-
-void r600_wb_fini(struct radeon_device *rdev)
-{
- r600_wb_disable(rdev);
- if (rdev->wb.wb_obj) {
- radeon_bo_unref(&rdev->wb.wb_obj);
- rdev->wb.wb = NULL;
- rdev->wb.wb_obj = NULL;
- }
-}
-
-int r600_wb_enable(struct radeon_device *rdev)
-{
- int r;
-
- if (rdev->wb.wb_obj == NULL) {
- r = radeon_bo_create(rdev, NULL, RADEON_GPU_PAGE_SIZE, true,
- RADEON_GEM_DOMAIN_GTT, &rdev->wb.wb_obj);
- if (r) {
- dev_warn(rdev->dev, "(%d) create WB bo failed\n", r);
- return r;
- }
- r = radeon_bo_reserve(rdev->wb.wb_obj, false);
- if (unlikely(r != 0)) {
- r600_wb_fini(rdev);
- return r;
- }
- r = radeon_bo_pin(rdev->wb.wb_obj, RADEON_GEM_DOMAIN_GTT,
- &rdev->wb.gpu_addr);
- if (r) {
- radeon_bo_unreserve(rdev->wb.wb_obj);
- dev_warn(rdev->dev, "(%d) pin WB bo failed\n", r);
- r600_wb_fini(rdev);
- return r;
- }
- r = radeon_bo_kmap(rdev->wb.wb_obj, (void **)&rdev->wb.wb);
- radeon_bo_unreserve(rdev->wb.wb_obj);
- if (r) {
- dev_warn(rdev->dev, "(%d) map WB bo failed\n", r);
- r600_wb_fini(rdev);
- return r;
- }
- }
- WREG32(SCRATCH_ADDR, (rdev->wb.gpu_addr >> 8) & 0xFFFFFFFF);
- WREG32(CP_RB_RPTR_ADDR, (rdev->wb.gpu_addr + 1024) & 0xFFFFFFFC);
- WREG32(CP_RB_RPTR_ADDR_HI, upper_32_bits(rdev->wb.gpu_addr + 1024) & 0xFF);
- WREG32(SCRATCH_UMSK, 0xff);
- return 0;
-}
-
void r600_fence_ring_emit(struct radeon_device *rdev,
struct radeon_fence *fence)
{
- /* Also consider EVENT_WRITE_EOP. it handles the interrupts + timestamps + events */
-
- radeon_ring_write(rdev, PACKET3(PACKET3_EVENT_WRITE, 0));
- radeon_ring_write(rdev, CACHE_FLUSH_AND_INV_EVENT);
- /* wait for 3D idle clean */
- radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONFIG_REG, 1));
- radeon_ring_write(rdev, (WAIT_UNTIL - PACKET3_SET_CONFIG_REG_OFFSET) >> 2);
- radeon_ring_write(rdev, WAIT_3D_IDLE_bit | WAIT_3D_IDLECLEAN_bit);
- /* Emit fence sequence & fire IRQ */
- radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONFIG_REG, 1));
- radeon_ring_write(rdev, ((rdev->fence_drv.scratch_reg - PACKET3_SET_CONFIG_REG_OFFSET) >> 2));
- radeon_ring_write(rdev, fence->seq);
- /* CP_INTERRUPT packet 3 no longer exists, use packet 0 */
- radeon_ring_write(rdev, PACKET0(CP_INT_STATUS, 0));
- radeon_ring_write(rdev, RB_INT_STAT);
+ if (rdev->wb.use_event) {
+ u64 addr = rdev->wb.gpu_addr + R600_WB_EVENT_OFFSET +
+ (u64)(rdev->fence_drv.scratch_reg - rdev->scratch.reg_base);
+ /* EVENT_WRITE_EOP - flush caches, send int */
+ radeon_ring_write(rdev, PACKET3(PACKET3_EVENT_WRITE_EOP, 4));
+ radeon_ring_write(rdev, EVENT_TYPE(CACHE_FLUSH_AND_INV_EVENT_TS) | EVENT_INDEX(5));
+ radeon_ring_write(rdev, addr & 0xffffffff);
+ radeon_ring_write(rdev, (upper_32_bits(addr) & 0xff) | DATA_SEL(1) | INT_SEL(2));
+ radeon_ring_write(rdev, fence->seq);
+ radeon_ring_write(rdev, 0);
+ } else {
+ radeon_ring_write(rdev, PACKET3(PACKET3_EVENT_WRITE, 0));
+ radeon_ring_write(rdev, EVENT_TYPE(CACHE_FLUSH_AND_INV_EVENT) | EVENT_INDEX(0));
+ /* wait for 3D idle clean */
+ radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONFIG_REG, 1));
+ radeon_ring_write(rdev, (WAIT_UNTIL - PACKET3_SET_CONFIG_REG_OFFSET) >> 2);
+ radeon_ring_write(rdev, WAIT_3D_IDLE_bit | WAIT_3D_IDLECLEAN_bit);
+ /* Emit fence sequence & fire IRQ */
+ radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONFIG_REG, 1));
+ radeon_ring_write(rdev, ((rdev->fence_drv.scratch_reg - PACKET3_SET_CONFIG_REG_OFFSET) >> 2));
+ radeon_ring_write(rdev, fence->seq);
+ /* CP_INTERRUPT packet 3 no longer exists, use packet 0 */
+ radeon_ring_write(rdev, PACKET0(CP_INT_STATUS, 0));
+ radeon_ring_write(rdev, RB_INT_STAT);
+ }
}
int r600_copy_blit(struct radeon_device *rdev,
@@ -2428,19 +2386,12 @@ int r600_startup(struct radeon_device *rdev)
rdev->asic->copy = NULL;
dev_warn(rdev->dev, "failed blitter (%d) falling back to memcpy\n", r);
}
- /* pin copy shader into vram */
- if (rdev->r600_blit.shader_obj) {
- r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false);
- if (unlikely(r != 0))
- return r;
- r = radeon_bo_pin(rdev->r600_blit.shader_obj, RADEON_GEM_DOMAIN_VRAM,
- &rdev->r600_blit.shader_gpu_addr);
- radeon_bo_unreserve(rdev->r600_blit.shader_obj);
- if (r) {
- dev_err(rdev->dev, "(%d) pin blit object failed\n", r);
- return r;
- }
- }
+
+ /* allocate wb buffer */
+ r = radeon_wb_init(rdev);
+ if (r)
+ return r;
+
/* Enable IRQ */
r = r600_irq_init(rdev);
if (r) {
@@ -2459,8 +2410,7 @@ int r600_startup(struct radeon_device *rdev)
r = r600_cp_resume(rdev);
if (r)
return r;
- /* write back buffer are not vital so don't worry about failure */
- r600_wb_enable(rdev);
+
return 0;
}
@@ -2519,7 +2469,7 @@ int r600_suspend(struct radeon_device *rdev)
r600_cp_stop(rdev);
rdev->cp.ready = false;
r600_irq_suspend(rdev);
- r600_wb_disable(rdev);
+ radeon_wb_disable(rdev);
r600_pcie_gart_disable(rdev);
/* unpin shaders bo */
if (rdev->r600_blit.shader_obj) {
@@ -2616,8 +2566,8 @@ int r600_init(struct radeon_device *rdev)
if (r) {
dev_err(rdev->dev, "disabling GPU acceleration\n");
r600_cp_fini(rdev);
- r600_wb_fini(rdev);
r600_irq_fini(rdev);
+ radeon_wb_fini(rdev);
radeon_irq_kms_fini(rdev);
r600_pcie_gart_fini(rdev);
rdev->accel_working = false;
@@ -2647,8 +2597,8 @@ void r600_fini(struct radeon_device *rdev)
r600_audio_fini(rdev);
r600_blit_fini(rdev);
r600_cp_fini(rdev);
- r600_wb_fini(rdev);
r600_irq_fini(rdev);
+ radeon_wb_fini(rdev);
radeon_irq_kms_fini(rdev);
r600_pcie_gart_fini(rdev);
radeon_agp_fini(rdev);
@@ -2770,7 +2720,7 @@ static int r600_ih_ring_alloc(struct radeon_device *rdev)
/* Allocate ring buffer */
if (rdev->ih.ring_obj == NULL) {
r = radeon_bo_create(rdev, NULL, rdev->ih.ring_size,
- true,
+ PAGE_SIZE, true,
RADEON_GEM_DOMAIN_GTT,
&rdev->ih.ring_obj);
if (r) {
@@ -2983,10 +2933,13 @@ int r600_irq_init(struct radeon_device *rdev)
ih_rb_cntl = (IH_WPTR_OVERFLOW_ENABLE |
IH_WPTR_OVERFLOW_CLEAR |
(rb_bufsz << 1));
- /* WPTR writeback, not yet */
- /*ih_rb_cntl |= IH_WPTR_WRITEBACK_ENABLE;*/
- WREG32(IH_RB_WPTR_ADDR_LO, 0);
- WREG32(IH_RB_WPTR_ADDR_HI, 0);
+
+ if (rdev->wb.enabled)
+ ih_rb_cntl |= IH_WPTR_WRITEBACK_ENABLE;
+
+ /* set the writeback address whether it's enabled or not */
+ WREG32(IH_RB_WPTR_ADDR_LO, (rdev->wb.gpu_addr + R600_WB_IH_WPTR_OFFSET) & 0xFFFFFFFC);
+ WREG32(IH_RB_WPTR_ADDR_HI, upper_32_bits(rdev->wb.gpu_addr + R600_WB_IH_WPTR_OFFSET) & 0xFF);
WREG32(IH_RB_CNTL, ih_rb_cntl);
@@ -3038,7 +2991,7 @@ int r600_irq_set(struct radeon_device *rdev)
u32 hdmi1, hdmi2;
if (!rdev->irq.installed) {
- WARN(1, "Can't enable IRQ/MSI because no handler is installed.\n");
+ WARN(1, "Can't enable IRQ/MSI because no handler is installed\n");
return -EINVAL;
}
/* don't enable anything if the ih is disabled */
@@ -3070,6 +3023,7 @@ int r600_irq_set(struct radeon_device *rdev)
if (rdev->irq.sw_int) {
DRM_DEBUG("r600_irq_set: sw int\n");
cp_int_cntl |= RB_INT_ENABLE;
+ cp_int_cntl |= TIME_STAMP_INT_ENABLE;
}
if (rdev->irq.crtc_vblank_int[0]) {
DRM_DEBUG("r600_irq_set: vblank 0\n");
@@ -3244,8 +3198,10 @@ static inline u32 r600_get_ih_wptr(struct radeon_device *rdev)
{
u32 wptr, tmp;
- /* XXX use writeback */
- wptr = RREG32(IH_RB_WPTR);
+ if (rdev->wb.enabled)
+ wptr = rdev->wb.wb[R600_WB_IH_WPTR_OFFSET/4];
+ else
+ wptr = RREG32(IH_RB_WPTR);
if (wptr & RB_OVERFLOW) {
/* When a ring buffer overflow happen start parsing interrupt
@@ -3433,6 +3389,7 @@ restart_ih:
break;
case 181: /* CP EOP event */
DRM_DEBUG("IH: CP EOP\n");
+ radeon_fence_process(rdev);
break;
case 233: /* GUI IDLE */
DRM_DEBUG("IH: CP EOP\n");
diff --git a/drivers/gpu/drm/radeon/r600_blit_kms.c b/drivers/gpu/drm/radeon/r600_blit_kms.c
index 3473c00781ff..86e5aa07f0db 100644
--- a/drivers/gpu/drm/radeon/r600_blit_kms.c
+++ b/drivers/gpu/drm/radeon/r600_blit_kms.c
@@ -472,9 +472,10 @@ int r600_blit_init(struct radeon_device *rdev)
u32 packet2s[16];
int num_packet2s = 0;
- /* don't reinitialize blit */
+ /* pin copy shader into vram if already initialized */
if (rdev->r600_blit.shader_obj)
- return 0;
+ goto done;
+
mutex_init(&rdev->r600_blit.mutex);
rdev->r600_blit.state_offset = 0;
@@ -500,7 +501,7 @@ int r600_blit_init(struct radeon_device *rdev)
obj_size += r6xx_ps_size * 4;
obj_size = ALIGN(obj_size, 256);
- r = radeon_bo_create(rdev, NULL, obj_size, true, RADEON_GEM_DOMAIN_VRAM,
+ r = radeon_bo_create(rdev, NULL, obj_size, PAGE_SIZE, true, RADEON_GEM_DOMAIN_VRAM,
&rdev->r600_blit.shader_obj);
if (r) {
DRM_ERROR("r600 failed to allocate shader\n");
@@ -532,6 +533,18 @@ int r600_blit_init(struct radeon_device *rdev)
memcpy(ptr + rdev->r600_blit.ps_offset, r6xx_ps, r6xx_ps_size * 4);
radeon_bo_kunmap(rdev->r600_blit.shader_obj);
radeon_bo_unreserve(rdev->r600_blit.shader_obj);
+
+done:
+ r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false);
+ if (unlikely(r != 0))
+ return r;
+ r = radeon_bo_pin(rdev->r600_blit.shader_obj, RADEON_GEM_DOMAIN_VRAM,
+ &rdev->r600_blit.shader_gpu_addr);
+ radeon_bo_unreserve(rdev->r600_blit.shader_obj);
+ if (r) {
+ dev_err(rdev->dev, "(%d) pin blit object failed\n", r);
+ return r;
+ }
rdev->mc.active_vram_size = rdev->mc.real_vram_size;
return 0;
}
@@ -554,7 +567,7 @@ void r600_blit_fini(struct radeon_device *rdev)
radeon_bo_unref(&rdev->r600_blit.shader_obj);
}
-int r600_vb_ib_get(struct radeon_device *rdev)
+static int r600_vb_ib_get(struct radeon_device *rdev)
{
int r;
r = radeon_ib_get(rdev, &rdev->r600_blit.vb_ib);
@@ -568,7 +581,7 @@ int r600_vb_ib_get(struct radeon_device *rdev)
return 0;
}
-void r600_vb_ib_put(struct radeon_device *rdev)
+static void r600_vb_ib_put(struct radeon_device *rdev)
{
radeon_fence_emit(rdev, rdev->r600_blit.vb_ib->fence);
radeon_ib_free(rdev, &rdev->r600_blit.vb_ib);
@@ -650,8 +663,8 @@ void r600_kms_blit_copy(struct radeon_device *rdev,
int src_x = src_gpu_addr & 255;
int dst_x = dst_gpu_addr & 255;
int h = 1;
- src_gpu_addr = src_gpu_addr & ~255;
- dst_gpu_addr = dst_gpu_addr & ~255;
+ src_gpu_addr = src_gpu_addr & ~255ULL;
+ dst_gpu_addr = dst_gpu_addr & ~255ULL;
if (!src_x && !dst_x) {
h = (cur_size / max_bytes);
@@ -672,17 +685,6 @@ void r600_kms_blit_copy(struct radeon_device *rdev,
if ((rdev->r600_blit.vb_used + 48) > rdev->r600_blit.vb_total) {
WARN_ON(1);
-
-#if 0
- r600_vb_ib_put(rdev);
-
- r600_nomm_put_vb(dev);
- r600_nomm_get_vb(dev);
- if (!dev_priv->blit_vb)
- return;
- set_shaders(dev);
- vb = r600_nomm_get_vb_ptr(dev);
-#endif
}
vb[0] = i2f(dst_x);
@@ -744,8 +746,8 @@ void r600_kms_blit_copy(struct radeon_device *rdev,
int src_x = (src_gpu_addr & 255);
int dst_x = (dst_gpu_addr & 255);
int h = 1;
- src_gpu_addr = src_gpu_addr & ~255;
- dst_gpu_addr = dst_gpu_addr & ~255;
+ src_gpu_addr = src_gpu_addr & ~255ULL;
+ dst_gpu_addr = dst_gpu_addr & ~255ULL;
if (!src_x && !dst_x) {
h = (cur_size / max_bytes);
@@ -767,17 +769,6 @@ void r600_kms_blit_copy(struct radeon_device *rdev,
if ((rdev->r600_blit.vb_used + 48) > rdev->r600_blit.vb_total) {
WARN_ON(1);
}
-#if 0
- if ((rdev->blit_vb->used + 48) > rdev->blit_vb->total) {
- r600_nomm_put_vb(dev);
- r600_nomm_get_vb(dev);
- if (!rdev->blit_vb)
- return;
-
- set_shaders(dev);
- vb = r600_nomm_get_vb_ptr(dev);
- }
-#endif
vb[0] = i2f(dst_x / 4);
vb[1] = 0;
diff --git a/drivers/gpu/drm/radeon/r600_cs.c b/drivers/gpu/drm/radeon/r600_cs.c
index 250a3a918193..0f90fc3482ce 100644
--- a/drivers/gpu/drm/radeon/r600_cs.c
+++ b/drivers/gpu/drm/radeon/r600_cs.c
@@ -50,6 +50,7 @@ struct r600_cs_track {
u32 nsamples;
u32 cb_color_base_last[8];
struct radeon_bo *cb_color_bo[8];
+ u64 cb_color_bo_mc[8];
u32 cb_color_bo_offset[8];
struct radeon_bo *cb_color_frag_bo[8];
struct radeon_bo *cb_color_tile_bo[8];
@@ -67,6 +68,7 @@ struct r600_cs_track {
u32 db_depth_size;
u32 db_offset;
struct radeon_bo *db_bo;
+ u64 db_bo_mc;
};
static inline int r600_bpe_from_format(u32 *bpe, u32 format)
@@ -140,6 +142,68 @@ static inline int r600_bpe_from_format(u32 *bpe, u32 format)
return 0;
}
+struct array_mode_checker {
+ int array_mode;
+ u32 group_size;
+ u32 nbanks;
+ u32 npipes;
+ u32 nsamples;
+ u32 bpe;
+};
+
+/* returns alignment in pixels for pitch/height/depth and bytes for base */
+static inline int r600_get_array_mode_alignment(struct array_mode_checker *values,
+ u32 *pitch_align,
+ u32 *height_align,
+ u32 *depth_align,
+ u64 *base_align)
+{
+ u32 tile_width = 8;
+ u32 tile_height = 8;
+ u32 macro_tile_width = values->nbanks;
+ u32 macro_tile_height = values->npipes;
+ u32 tile_bytes = tile_width * tile_height * values->bpe * values->nsamples;
+ u32 macro_tile_bytes = macro_tile_width * macro_tile_height * tile_bytes;
+
+ switch (values->array_mode) {
+ case ARRAY_LINEAR_GENERAL:
+ /* technically tile_width/_height for pitch/height */
+ *pitch_align = 1; /* tile_width */
+ *height_align = 1; /* tile_height */
+ *depth_align = 1;
+ *base_align = 1;
+ break;
+ case ARRAY_LINEAR_ALIGNED:
+ *pitch_align = max((u32)64, (u32)(values->group_size / values->bpe));
+ *height_align = tile_height;
+ *depth_align = 1;
+ *base_align = values->group_size;
+ break;
+ case ARRAY_1D_TILED_THIN1:
+ *pitch_align = max((u32)tile_width,
+ (u32)(values->group_size /
+ (tile_height * values->bpe * values->nsamples)));
+ *height_align = tile_height;
+ *depth_align = 1;
+ *base_align = values->group_size;
+ break;
+ case ARRAY_2D_TILED_THIN1:
+ *pitch_align = max((u32)macro_tile_width,
+ (u32)(((values->group_size / tile_height) /
+ (values->bpe * values->nsamples)) *
+ values->nbanks)) * tile_width;
+ *height_align = macro_tile_height * tile_height;
+ *depth_align = 1;
+ *base_align = max(macro_tile_bytes,
+ (*pitch_align) * values->bpe * (*height_align) * values->nsamples);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
static void r600_cs_track_init(struct r600_cs_track *track)
{
int i;
@@ -153,10 +217,12 @@ static void r600_cs_track_init(struct r600_cs_track *track)
track->cb_color_info[i] = 0;
track->cb_color_bo[i] = NULL;
track->cb_color_bo_offset[i] = 0xFFFFFFFF;
+ track->cb_color_bo_mc[i] = 0xFFFFFFFF;
}
track->cb_target_mask = 0xFFFFFFFF;
track->cb_shader_mask = 0xFFFFFFFF;
track->db_bo = NULL;
+ track->db_bo_mc = 0xFFFFFFFF;
/* assume the biggest format and that htile is enabled */
track->db_depth_info = 7 | (1 << 25);
track->db_depth_view = 0xFFFFC000;
@@ -168,8 +234,12 @@ static void r600_cs_track_init(struct r600_cs_track *track)
static inline int r600_cs_track_validate_cb(struct radeon_cs_parser *p, int i)
{
struct r600_cs_track *track = p->track;
- u32 bpe = 0, pitch, slice_tile_max, size, tmp, height, pitch_align;
+ u32 bpe = 0, slice_tile_max, size, tmp;
+ u32 height, height_align, pitch, pitch_align, depth_align;
+ u64 base_offset, base_align;
+ struct array_mode_checker array_check;
volatile u32 *ib = p->ib->ptr;
+ unsigned array_mode;
if (G_0280A0_TILE_MODE(track->cb_color_info[i])) {
dev_warn(p->dev, "FMASK or CMASK buffer are not supported by this kernel\n");
@@ -182,57 +252,40 @@ static inline int r600_cs_track_validate_cb(struct radeon_cs_parser *p, int i)
i, track->cb_color_info[i]);
return -EINVAL;
}
- /* pitch is the number of 8x8 tiles per row */
- pitch = G_028060_PITCH_TILE_MAX(track->cb_color_size[i]) + 1;
+ /* pitch in pixels */
+ pitch = (G_028060_PITCH_TILE_MAX(track->cb_color_size[i]) + 1) * 8;
slice_tile_max = G_028060_SLICE_TILE_MAX(track->cb_color_size[i]) + 1;
- height = size / (pitch * 8 * bpe);
+ slice_tile_max *= 64;
+ height = slice_tile_max / pitch;
if (height > 8192)
height = 8192;
- if (height > 7)
- height &= ~0x7;
- switch (G_0280A0_ARRAY_MODE(track->cb_color_info[i])) {
+ array_mode = G_0280A0_ARRAY_MODE(track->cb_color_info[i]);
+
+ base_offset = track->cb_color_bo_mc[i] + track->cb_color_bo_offset[i];
+ array_check.array_mode = array_mode;
+ array_check.group_size = track->group_size;
+ array_check.nbanks = track->nbanks;
+ array_check.npipes = track->npipes;
+ array_check.nsamples = track->nsamples;
+ array_check.bpe = bpe;
+ if (r600_get_array_mode_alignment(&array_check,
+ &pitch_align, &height_align, &depth_align, &base_align)) {
+ dev_warn(p->dev, "%s invalid tiling %d for %d (0x%08X)\n", __func__,
+ G_0280A0_ARRAY_MODE(track->cb_color_info[i]), i,
+ track->cb_color_info[i]);
+ return -EINVAL;
+ }
+ switch (array_mode) {
case V_0280A0_ARRAY_LINEAR_GENERAL:
- /* technically height & 0x7 */
break;
case V_0280A0_ARRAY_LINEAR_ALIGNED:
- pitch_align = max((u32)64, (u32)(track->group_size / bpe)) / 8;
- if (!IS_ALIGNED(pitch, pitch_align)) {
- dev_warn(p->dev, "%s:%d cb pitch (%d) invalid\n",
- __func__, __LINE__, pitch);
- return -EINVAL;
- }
- if (!IS_ALIGNED(height, 8)) {
- dev_warn(p->dev, "%s:%d cb height (%d) invalid\n",
- __func__, __LINE__, height);
- return -EINVAL;
- }
break;
case V_0280A0_ARRAY_1D_TILED_THIN1:
- pitch_align = max((u32)8, (u32)(track->group_size / (8 * bpe * track->nsamples))) / 8;
- if (!IS_ALIGNED(pitch, pitch_align)) {
- dev_warn(p->dev, "%s:%d cb pitch (%d) invalid\n",
- __func__, __LINE__, pitch);
- return -EINVAL;
- }
- if (!IS_ALIGNED(height, 8)) {
- dev_warn(p->dev, "%s:%d cb height (%d) invalid\n",
- __func__, __LINE__, height);
- return -EINVAL;
- }
+ /* avoid breaking userspace */
+ if (height > 7)
+ height &= ~0x7;
break;
case V_0280A0_ARRAY_2D_TILED_THIN1:
- pitch_align = max((u32)track->nbanks,
- (u32)(((track->group_size / 8) / (bpe * track->nsamples)) * track->nbanks));
- if (!IS_ALIGNED(pitch, pitch_align)) {
- dev_warn(p->dev, "%s:%d cb pitch (%d) invalid\n",
- __func__, __LINE__, pitch);
- return -EINVAL;
- }
- if (!IS_ALIGNED((height / 8), track->nbanks)) {
- dev_warn(p->dev, "%s:%d cb height (%d) invalid\n",
- __func__, __LINE__, height);
- return -EINVAL;
- }
break;
default:
dev_warn(p->dev, "%s invalid tiling %d for %d (0x%08X)\n", __func__,
@@ -240,21 +293,43 @@ static inline int r600_cs_track_validate_cb(struct radeon_cs_parser *p, int i)
track->cb_color_info[i]);
return -EINVAL;
}
- /* check offset */
- tmp = height * pitch * 8 * bpe;
- if ((tmp + track->cb_color_bo_offset[i]) > radeon_bo_size(track->cb_color_bo[i])) {
- dev_warn(p->dev, "%s offset[%d] %d too big\n", __func__, i, track->cb_color_bo_offset[i]);
+
+ if (!IS_ALIGNED(pitch, pitch_align)) {
+ dev_warn(p->dev, "%s:%d cb pitch (%d) invalid\n",
+ __func__, __LINE__, pitch);
+ return -EINVAL;
+ }
+ if (!IS_ALIGNED(height, height_align)) {
+ dev_warn(p->dev, "%s:%d cb height (%d) invalid\n",
+ __func__, __LINE__, height);
return -EINVAL;
}
- if (!IS_ALIGNED(track->cb_color_bo_offset[i], track->group_size)) {
- dev_warn(p->dev, "%s offset[%d] %d not aligned\n", __func__, i, track->cb_color_bo_offset[i]);
+ if (!IS_ALIGNED(base_offset, base_align)) {
+ dev_warn(p->dev, "%s offset[%d] 0x%llx not aligned\n", __func__, i, base_offset);
return -EINVAL;
}
+
+ /* check offset */
+ tmp = height * pitch * bpe;
+ if ((tmp + track->cb_color_bo_offset[i]) > radeon_bo_size(track->cb_color_bo[i])) {
+ if (array_mode == V_0280A0_ARRAY_LINEAR_GENERAL) {
+ /* the initial DDX does bad things with the CB size occasionally */
+ /* it rounds up height too far for slice tile max but the BO is smaller */
+ tmp = (height - 7) * 8 * bpe;
+ if ((tmp + track->cb_color_bo_offset[i]) > radeon_bo_size(track->cb_color_bo[i])) {
+ dev_warn(p->dev, "%s offset[%d] %d %d %lu too big\n", __func__, i, track->cb_color_bo_offset[i], tmp, radeon_bo_size(track->cb_color_bo[i]));
+ return -EINVAL;
+ }
+ } else {
+ dev_warn(p->dev, "%s offset[%d] %d %d %lu too big\n", __func__, i, track->cb_color_bo_offset[i], tmp, radeon_bo_size(track->cb_color_bo[i]));
+ return -EINVAL;
+ }
+ }
/* limit max tile */
- tmp = (height * pitch * 8) >> 6;
+ tmp = (height * pitch) >> 6;
if (tmp < slice_tile_max)
slice_tile_max = tmp;
- tmp = S_028060_PITCH_TILE_MAX(pitch - 1) |
+ tmp = S_028060_PITCH_TILE_MAX((pitch / 8) - 1) |
S_028060_SLICE_TILE_MAX(slice_tile_max - 1);
ib[track->cb_color_size_idx[i]] = tmp;
return 0;
@@ -296,7 +371,12 @@ static int r600_cs_track_check(struct radeon_cs_parser *p)
/* Check depth buffer */
if (G_028800_STENCIL_ENABLE(track->db_depth_control) ||
G_028800_Z_ENABLE(track->db_depth_control)) {
- u32 nviews, bpe, ntiles, pitch, pitch_align, height, size;
+ u32 nviews, bpe, ntiles, size, slice_tile_max;
+ u32 height, height_align, pitch, pitch_align, depth_align;
+ u64 base_offset, base_align;
+ struct array_mode_checker array_check;
+ int array_mode;
+
if (track->db_bo == NULL) {
dev_warn(p->dev, "z/stencil with no depth buffer\n");
return -EINVAL;
@@ -339,39 +419,34 @@ static int r600_cs_track_check(struct radeon_cs_parser *p)
ib[track->db_depth_size_idx] = S_028000_SLICE_TILE_MAX(tmp - 1) | (track->db_depth_size & 0x3FF);
} else {
size = radeon_bo_size(track->db_bo);
- pitch = G_028000_PITCH_TILE_MAX(track->db_depth_size) + 1;
- height = size / (pitch * 8 * bpe);
- height &= ~0x7;
- if (!height)
- height = 8;
-
- switch (G_028010_ARRAY_MODE(track->db_depth_info)) {
+ /* pitch in pixels */
+ pitch = (G_028000_PITCH_TILE_MAX(track->db_depth_size) + 1) * 8;
+ slice_tile_max = G_028000_SLICE_TILE_MAX(track->db_depth_size) + 1;
+ slice_tile_max *= 64;
+ height = slice_tile_max / pitch;
+ if (height > 8192)
+ height = 8192;
+ base_offset = track->db_bo_mc + track->db_offset;
+ array_mode = G_028010_ARRAY_MODE(track->db_depth_info);
+ array_check.array_mode = array_mode;
+ array_check.group_size = track->group_size;
+ array_check.nbanks = track->nbanks;
+ array_check.npipes = track->npipes;
+ array_check.nsamples = track->nsamples;
+ array_check.bpe = bpe;
+ if (r600_get_array_mode_alignment(&array_check,
+ &pitch_align, &height_align, &depth_align, &base_align)) {
+ dev_warn(p->dev, "%s invalid tiling %d (0x%08X)\n", __func__,
+ G_028010_ARRAY_MODE(track->db_depth_info),
+ track->db_depth_info);
+ return -EINVAL;
+ }
+ switch (array_mode) {
case V_028010_ARRAY_1D_TILED_THIN1:
- pitch_align = (max((u32)8, (u32)(track->group_size / (8 * bpe))) / 8);
- if (!IS_ALIGNED(pitch, pitch_align)) {
- dev_warn(p->dev, "%s:%d db pitch (%d) invalid\n",
- __func__, __LINE__, pitch);
- return -EINVAL;
- }
- if (!IS_ALIGNED(height, 8)) {
- dev_warn(p->dev, "%s:%d db height (%d) invalid\n",
- __func__, __LINE__, height);
- return -EINVAL;
- }
+ /* don't break userspace */
+ height &= ~0x7;
break;
case V_028010_ARRAY_2D_TILED_THIN1:
- pitch_align = max((u32)track->nbanks,
- (u32)(((track->group_size / 8) / bpe) * track->nbanks));
- if (!IS_ALIGNED(pitch, pitch_align)) {
- dev_warn(p->dev, "%s:%d db pitch (%d) invalid\n",
- __func__, __LINE__, pitch);
- return -EINVAL;
- }
- if ((height / 8) & (track->nbanks - 1)) {
- dev_warn(p->dev, "%s:%d db height (%d) invalid\n",
- __func__, __LINE__, height);
- return -EINVAL;
- }
break;
default:
dev_warn(p->dev, "%s invalid tiling %d (0x%08X)\n", __func__,
@@ -379,15 +454,27 @@ static int r600_cs_track_check(struct radeon_cs_parser *p)
track->db_depth_info);
return -EINVAL;
}
- if (!IS_ALIGNED(track->db_offset, track->group_size)) {
- dev_warn(p->dev, "%s offset[%d] %d not aligned\n", __func__, i, track->db_offset);
+
+ if (!IS_ALIGNED(pitch, pitch_align)) {
+ dev_warn(p->dev, "%s:%d db pitch (%d) invalid\n",
+ __func__, __LINE__, pitch);
+ return -EINVAL;
+ }
+ if (!IS_ALIGNED(height, height_align)) {
+ dev_warn(p->dev, "%s:%d db height (%d) invalid\n",
+ __func__, __LINE__, height);
+ return -EINVAL;
+ }
+ if (!IS_ALIGNED(base_offset, base_align)) {
+ dev_warn(p->dev, "%s offset[%d] 0x%llx not aligned\n", __func__, i, base_offset);
return -EINVAL;
}
+
ntiles = G_028000_SLICE_TILE_MAX(track->db_depth_size) + 1;
nviews = G_028004_SLICE_MAX(track->db_depth_view) + 1;
tmp = ntiles * bpe * 64 * nviews;
if ((tmp + track->db_offset) > radeon_bo_size(track->db_bo)) {
- dev_warn(p->dev, "z/stencil buffer too small (0x%08X %d %d %d -> %d have %ld)\n",
+ dev_warn(p->dev, "z/stencil buffer too small (0x%08X %d %d %d -> %u have %lu)\n",
track->db_depth_size, ntiles, nviews, bpe, tmp + track->db_offset,
radeon_bo_size(track->db_bo));
return -EINVAL;
@@ -938,6 +1025,7 @@ static inline int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx
ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
track->cb_color_base_last[tmp] = ib[idx];
track->cb_color_bo[tmp] = reloc->robj;
+ track->cb_color_bo_mc[tmp] = reloc->lobj.gpu_offset;
break;
case DB_DEPTH_BASE:
r = r600_cs_packet_next_reloc(p, &reloc);
@@ -949,6 +1037,7 @@ static inline int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx
track->db_offset = radeon_get_ib_value(p, idx) << 8;
ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
track->db_bo = reloc->robj;
+ track->db_bo_mc = reloc->lobj.gpu_offset;
break;
case DB_HTILE_DATA_BASE:
case SQ_PGM_START_FS:
@@ -1070,16 +1159,25 @@ static void r600_texture_size(unsigned nfaces, unsigned blevel, unsigned nlevels
static inline int r600_check_texture_resource(struct radeon_cs_parser *p, u32 idx,
struct radeon_bo *texture,
struct radeon_bo *mipmap,
+ u64 base_offset,
+ u64 mip_offset,
u32 tiling_flags)
{
struct r600_cs_track *track = p->track;
u32 nfaces, nlevels, blevel, w0, h0, d0, bpe = 0;
- u32 word0, word1, l0_size, mipmap_size, pitch, pitch_align;
+ u32 word0, word1, l0_size, mipmap_size;
+ u32 height_align, pitch, pitch_align, depth_align;
+ u64 base_align;
+ struct array_mode_checker array_check;
/* on legacy kernel we don't perform advanced check */
if (p->rdev == NULL)
return 0;
+ /* convert to bytes */
+ base_offset <<= 8;
+ mip_offset <<= 8;
+
word0 = radeon_get_ib_value(p, idx + 0);
if (tiling_flags & RADEON_TILING_MACRO)
word0 |= S_038000_TILE_MODE(V_038000_ARRAY_2D_TILED_THIN1);
@@ -1112,46 +1210,38 @@ static inline int r600_check_texture_resource(struct radeon_cs_parser *p, u32 i
return -EINVAL;
}
- pitch = G_038000_PITCH(word0) + 1;
- switch (G_038000_TILE_MODE(word0)) {
- case V_038000_ARRAY_LINEAR_GENERAL:
- pitch_align = 1;
- /* XXX check height align */
- break;
- case V_038000_ARRAY_LINEAR_ALIGNED:
- pitch_align = max((u32)64, (u32)(track->group_size / bpe)) / 8;
- if (!IS_ALIGNED(pitch, pitch_align)) {
- dev_warn(p->dev, "%s:%d tex pitch (%d) invalid\n",
- __func__, __LINE__, pitch);
- return -EINVAL;
- }
- /* XXX check height align */
- break;
- case V_038000_ARRAY_1D_TILED_THIN1:
- pitch_align = max((u32)8, (u32)(track->group_size / (8 * bpe))) / 8;
- if (!IS_ALIGNED(pitch, pitch_align)) {
- dev_warn(p->dev, "%s:%d tex pitch (%d) invalid\n",
- __func__, __LINE__, pitch);
- return -EINVAL;
- }
- /* XXX check height align */
- break;
- case V_038000_ARRAY_2D_TILED_THIN1:
- pitch_align = max((u32)track->nbanks,
- (u32)(((track->group_size / 8) / bpe) * track->nbanks));
- if (!IS_ALIGNED(pitch, pitch_align)) {
- dev_warn(p->dev, "%s:%d tex pitch (%d) invalid\n",
- __func__, __LINE__, pitch);
- return -EINVAL;
- }
- /* XXX check height align */
- break;
- default:
- dev_warn(p->dev, "%s invalid tiling %d (0x%08X)\n", __func__,
- G_038000_TILE_MODE(word0), word0);
+ /* pitch in texels */
+ pitch = (G_038000_PITCH(word0) + 1) * 8;
+ array_check.array_mode = G_038000_TILE_MODE(word0);
+ array_check.group_size = track->group_size;
+ array_check.nbanks = track->nbanks;
+ array_check.npipes = track->npipes;
+ array_check.nsamples = 1;
+ array_check.bpe = bpe;
+ if (r600_get_array_mode_alignment(&array_check,
+ &pitch_align, &height_align, &depth_align, &base_align)) {
+ dev_warn(p->dev, "%s:%d tex array mode (%d) invalid\n",
+ __func__, __LINE__, G_038000_TILE_MODE(word0));
+ return -EINVAL;
+ }
+
+ /* XXX check height as well... */
+
+ if (!IS_ALIGNED(pitch, pitch_align)) {
+ dev_warn(p->dev, "%s:%d tex pitch (%d) invalid\n",
+ __func__, __LINE__, pitch);
+ return -EINVAL;
+ }
+ if (!IS_ALIGNED(base_offset, base_align)) {
+ dev_warn(p->dev, "%s:%d tex base offset (0x%llx) invalid\n",
+ __func__, __LINE__, base_offset);
+ return -EINVAL;
+ }
+ if (!IS_ALIGNED(mip_offset, base_align)) {
+ dev_warn(p->dev, "%s:%d tex mip offset (0x%llx) invalid\n",
+ __func__, __LINE__, mip_offset);
return -EINVAL;
}
- /* XXX check offset align */
word0 = radeon_get_ib_value(p, idx + 4);
word1 = radeon_get_ib_value(p, idx + 5);
@@ -1386,7 +1476,10 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
mip_offset = (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
mipmap = reloc->robj;
r = r600_check_texture_resource(p, idx+(i*7)+1,
- texture, mipmap, reloc->lobj.tiling_flags);
+ texture, mipmap,
+ base_offset + radeon_get_ib_value(p, idx+1+(i*7)+2),
+ mip_offset + radeon_get_ib_value(p, idx+1+(i*7)+3),
+ reloc->lobj.tiling_flags);
if (r)
return r;
ib[idx+1+(i*7)+2] += base_offset;
diff --git a/drivers/gpu/drm/radeon/r600_reg.h b/drivers/gpu/drm/radeon/r600_reg.h
index d84612ae47e0..33cda016b083 100644
--- a/drivers/gpu/drm/radeon/r600_reg.h
+++ b/drivers/gpu/drm/radeon/r600_reg.h
@@ -86,6 +86,7 @@
#define R600_HDP_NONSURFACE_BASE 0x2c04
#define R600_BUS_CNTL 0x5420
+# define R600_BIOS_ROM_DIS (1 << 1)
#define R600_CONFIG_CNTL 0x5424
#define R600_CONFIG_MEMSIZE 0x5428
#define R600_CONFIG_F0_BASE 0x542C
diff --git a/drivers/gpu/drm/radeon/r600d.h b/drivers/gpu/drm/radeon/r600d.h
index 858a1920c0d7..bff4dc4f410f 100644
--- a/drivers/gpu/drm/radeon/r600d.h
+++ b/drivers/gpu/drm/radeon/r600d.h
@@ -51,6 +51,12 @@
#define PTE_READABLE (1 << 5)
#define PTE_WRITEABLE (1 << 6)
+/* tiling bits */
+#define ARRAY_LINEAR_GENERAL 0x00000000
+#define ARRAY_LINEAR_ALIGNED 0x00000001
+#define ARRAY_1D_TILED_THIN1 0x00000002
+#define ARRAY_2D_TILED_THIN1 0x00000004
+
/* Registers */
#define ARB_POP 0x2418
#define ENABLE_TC128 (1 << 30)
@@ -474,6 +480,7 @@
#define VGT_VERTEX_REUSE_BLOCK_CNTL 0x28C58
#define VTX_REUSE_DEPTH_MASK 0x000000FF
#define VGT_EVENT_INITIATOR 0x28a90
+# define CACHE_FLUSH_AND_INV_EVENT_TS (0x14 << 0)
# define CACHE_FLUSH_AND_INV_EVENT (0x16 << 0)
#define VM_CONTEXT0_CNTL 0x1410
@@ -775,7 +782,27 @@
#define PACKET3_ME_INITIALIZE_DEVICE_ID(x) ((x) << 16)
#define PACKET3_COND_WRITE 0x45
#define PACKET3_EVENT_WRITE 0x46
+#define EVENT_TYPE(x) ((x) << 0)
+#define EVENT_INDEX(x) ((x) << 8)
+ /* 0 - any non-TS event
+ * 1 - ZPASS_DONE
+ * 2 - SAMPLE_PIPELINESTAT
+ * 3 - SAMPLE_STREAMOUTSTAT*
+ * 4 - *S_PARTIAL_FLUSH
+ * 5 - TS events
+ */
#define PACKET3_EVENT_WRITE_EOP 0x47
+#define DATA_SEL(x) ((x) << 29)
+ /* 0 - discard
+ * 1 - send low 32bit data
+ * 2 - send 64bit data
+ * 3 - send 64bit counter value
+ */
+#define INT_SEL(x) ((x) << 24)
+ /* 0 - none
+ * 1 - interrupt only (DATA_SEL = 0)
+ * 2 - interrupt when data write is confirmed
+ */
#define PACKET3_ONE_REG_WRITE 0x57
#define PACKET3_SET_CONFIG_REG 0x68
#define PACKET3_SET_CONFIG_REG_OFFSET 0x00008000
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index 9ff38c99a6ea..3a7095743d44 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -88,7 +88,6 @@ extern int radeon_benchmarking;
extern int radeon_testing;
extern int radeon_connector_table;
extern int radeon_tv;
-extern int radeon_new_pll;
extern int radeon_audio;
extern int radeon_disp_priority;
extern int radeon_hw_i2c;
@@ -366,6 +365,7 @@ bool radeon_atombios_sideport_present(struct radeon_device *rdev);
*/
struct radeon_scratch {
unsigned num_reg;
+ uint32_t reg_base;
bool free[32];
uint32_t reg[32];
};
@@ -594,8 +594,15 @@ struct radeon_wb {
struct radeon_bo *wb_obj;
volatile uint32_t *wb;
uint64_t gpu_addr;
+ bool enabled;
+ bool use_event;
};
+#define RADEON_WB_SCRATCH_OFFSET 0
+#define RADEON_WB_CP_RPTR_OFFSET 1024
+#define R600_WB_IH_WPTR_OFFSET 2048
+#define R600_WB_EVENT_OFFSET 3072
+
/**
* struct radeon_pm - power management datas
* @max_bandwidth: maximum bandwidth the gpu has (MByte/s)
@@ -1124,6 +1131,12 @@ void r600_blit_done_copy(struct radeon_device *rdev, struct radeon_fence *fence)
void r600_kms_blit_copy(struct radeon_device *rdev,
u64 src_gpu_addr, u64 dst_gpu_addr,
int size_bytes);
+/* evergreen blit */
+int evergreen_blit_prepare_copy(struct radeon_device *rdev, int size_bytes);
+void evergreen_blit_done_copy(struct radeon_device *rdev, struct radeon_fence *fence);
+void evergreen_kms_blit_copy(struct radeon_device *rdev,
+ u64 src_gpu_addr, u64 dst_gpu_addr,
+ int size_bytes);
static inline uint32_t r100_mm_rreg(struct radeon_device *rdev, uint32_t reg)
{
@@ -1249,6 +1262,10 @@ void r100_pll_errata_after_index(struct radeon_device *rdev);
(rdev->family == CHIP_RS400) || \
(rdev->family == CHIP_RS480))
#define ASIC_IS_AVIVO(rdev) ((rdev->family >= CHIP_RS600))
+#define ASIC_IS_DCE2(rdev) ((rdev->family == CHIP_RS600) || \
+ (rdev->family == CHIP_RS690) || \
+ (rdev->family == CHIP_RS740) || \
+ (rdev->family >= CHIP_R600))
#define ASIC_IS_DCE3(rdev) ((rdev->family >= CHIP_RV620))
#define ASIC_IS_DCE32(rdev) ((rdev->family >= CHIP_RV730))
#define ASIC_IS_DCE4(rdev) ((rdev->family >= CHIP_CEDAR))
@@ -1341,6 +1358,9 @@ extern void radeon_update_bandwidth_info(struct radeon_device *rdev);
extern void radeon_update_display_priority(struct radeon_device *rdev);
extern bool radeon_boot_test_post_card(struct radeon_device *rdev);
extern void radeon_scratch_init(struct radeon_device *rdev);
+extern void radeon_wb_fini(struct radeon_device *rdev);
+extern int radeon_wb_init(struct radeon_device *rdev);
+extern void radeon_wb_disable(struct radeon_device *rdev);
extern void radeon_surface_init(struct radeon_device *rdev);
extern int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data);
extern void radeon_legacy_set_clock_gating(struct radeon_device *rdev, int enable);
@@ -1425,9 +1445,6 @@ extern int r600_pcie_gart_init(struct radeon_device *rdev);
extern void r600_pcie_gart_tlb_flush(struct radeon_device *rdev);
extern int r600_ib_test(struct radeon_device *rdev);
extern int r600_ring_test(struct radeon_device *rdev);
-extern void r600_wb_fini(struct radeon_device *rdev);
-extern int r600_wb_enable(struct radeon_device *rdev);
-extern void r600_wb_disable(struct radeon_device *rdev);
extern void r600_scratch_init(struct radeon_device *rdev);
extern int r600_blit_init(struct radeon_device *rdev);
extern void r600_blit_fini(struct radeon_device *rdev);
@@ -1465,6 +1482,8 @@ extern void r700_cp_stop(struct radeon_device *rdev);
extern void r700_cp_fini(struct radeon_device *rdev);
extern void evergreen_disable_interrupt_state(struct radeon_device *rdev);
extern int evergreen_irq_set(struct radeon_device *rdev);
+extern int evergreen_blit_init(struct radeon_device *rdev);
+extern void evergreen_blit_fini(struct radeon_device *rdev);
/* radeon_acpi.c */
#if defined(CONFIG_ACPI)
diff --git a/drivers/gpu/drm/radeon/radeon_asic.c b/drivers/gpu/drm/radeon/radeon_asic.c
index 25e1dd197791..64fb89ecbf74 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.c
+++ b/drivers/gpu/drm/radeon/radeon_asic.c
@@ -726,9 +726,9 @@ static struct radeon_asic evergreen_asic = {
.get_vblank_counter = &evergreen_get_vblank_counter,
.fence_ring_emit = &r600_fence_ring_emit,
.cs_parse = &evergreen_cs_parse,
- .copy_blit = NULL,
- .copy_dma = NULL,
- .copy = NULL,
+ .copy_blit = &evergreen_copy_blit,
+ .copy_dma = &evergreen_copy_blit,
+ .copy = &evergreen_copy_blit,
.get_engine_clock = &radeon_atom_get_engine_clock,
.set_engine_clock = &radeon_atom_set_engine_clock,
.get_memory_clock = &radeon_atom_get_memory_clock,
diff --git a/drivers/gpu/drm/radeon/radeon_asic.h b/drivers/gpu/drm/radeon/radeon_asic.h
index a5aff755f0d2..740988244143 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.h
+++ b/drivers/gpu/drm/radeon/radeon_asic.h
@@ -108,9 +108,6 @@ void r100_irq_disable(struct radeon_device *rdev);
void r100_mc_stop(struct radeon_device *rdev, struct r100_mc_save *save);
void r100_mc_resume(struct radeon_device *rdev, struct r100_mc_save *save);
void r100_vram_init_sizes(struct radeon_device *rdev);
-void r100_wb_disable(struct radeon_device *rdev);
-void r100_wb_fini(struct radeon_device *rdev);
-int r100_wb_init(struct radeon_device *rdev);
int r100_cp_reset(struct radeon_device *rdev);
void r100_vga_render_disable(struct radeon_device *rdev);
void r100_restore_sanity(struct radeon_device *rdev);
@@ -257,11 +254,6 @@ void r600_pciep_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
int r600_cs_parse(struct radeon_cs_parser *p);
void r600_fence_ring_emit(struct radeon_device *rdev,
struct radeon_fence *fence);
-int r600_copy_dma(struct radeon_device *rdev,
- uint64_t src_offset,
- uint64_t dst_offset,
- unsigned num_pages,
- struct radeon_fence *fence);
int r600_irq_process(struct radeon_device *rdev);
int r600_irq_set(struct radeon_device *rdev);
bool r600_gpu_is_lockup(struct radeon_device *rdev);
@@ -307,6 +299,9 @@ int evergreen_resume(struct radeon_device *rdev);
bool evergreen_gpu_is_lockup(struct radeon_device *rdev);
int evergreen_asic_reset(struct radeon_device *rdev);
void evergreen_bandwidth_update(struct radeon_device *rdev);
+int evergreen_copy_blit(struct radeon_device *rdev,
+ uint64_t src_offset, uint64_t dst_offset,
+ unsigned num_pages, struct radeon_fence *fence);
void evergreen_hpd_init(struct radeon_device *rdev);
void evergreen_hpd_fini(struct radeon_device *rdev);
bool evergreen_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd);
diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c
index 8e43ddae70cc..bc5a2c3382d9 100644
--- a/drivers/gpu/drm/radeon/radeon_atombios.c
+++ b/drivers/gpu/drm/radeon/radeon_atombios.c
@@ -98,6 +98,14 @@ static inline struct radeon_i2c_bus_rec radeon_lookup_i2c_gpio(struct radeon_dev
}
}
+ /* some DCE3 boards have bad data for this entry */
+ if (ASIC_IS_DCE3(rdev)) {
+ if ((i == 4) &&
+ (gpio->usClkMaskRegisterIndex == 0x1fda) &&
+ (gpio->sucI2cId.ucAccess == 0x94))
+ gpio->sucI2cId.ucAccess = 0x14;
+ }
+
if (gpio->sucI2cId.ucAccess == id) {
i2c.mask_clk_reg = le16_to_cpu(gpio->usClkMaskRegisterIndex) * 4;
i2c.mask_data_reg = le16_to_cpu(gpio->usDataMaskRegisterIndex) * 4;
@@ -174,6 +182,14 @@ void radeon_atombios_i2c_init(struct radeon_device *rdev)
}
}
+ /* some DCE3 boards have bad data for this entry */
+ if (ASIC_IS_DCE3(rdev)) {
+ if ((i == 4) &&
+ (gpio->usClkMaskRegisterIndex == 0x1fda) &&
+ (gpio->sucI2cId.ucAccess == 0x94))
+ gpio->sucI2cId.ucAccess = 0x14;
+ }
+
i2c.mask_clk_reg = le16_to_cpu(gpio->usClkMaskRegisterIndex) * 4;
i2c.mask_data_reg = le16_to_cpu(gpio->usDataMaskRegisterIndex) * 4;
i2c.en_clk_reg = le16_to_cpu(gpio->usClkEnRegisterIndex) * 4;
@@ -526,8 +542,6 @@ bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev)
if (crev < 2)
return false;
- router.valid = false;
-
obj_header = (ATOM_OBJECT_HEADER *) (ctx->bios + data_offset);
path_obj = (ATOM_DISPLAY_OBJECT_PATH_TABLE *)
(ctx->bios + data_offset +
@@ -624,6 +638,8 @@ bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev)
if (connector_type == DRM_MODE_CONNECTOR_Unknown)
continue;
+ router.ddc_valid = false;
+ router.cd_valid = false;
for (j = 0; j < ((le16_to_cpu(path->usSize) - 8) / 2); j++) {
uint8_t grph_obj_id, grph_obj_num, grph_obj_type;
@@ -647,9 +663,8 @@ bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev)
usDeviceTag));
} else if (grph_obj_type == GRAPH_OBJECT_TYPE_ROUTER) {
- router.valid = false;
for (k = 0; k < router_obj->ucNumberOfObjects; k++) {
- u16 router_obj_id = le16_to_cpu(router_obj->asObjects[j].usObjectID);
+ u16 router_obj_id = le16_to_cpu(router_obj->asObjects[k].usObjectID);
if (le16_to_cpu(path->usGraphicObjIds[j]) == router_obj_id) {
ATOM_COMMON_RECORD_HEADER *record = (ATOM_COMMON_RECORD_HEADER *)
(ctx->bios + data_offset +
@@ -657,6 +672,7 @@ bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev)
ATOM_I2C_RECORD *i2c_record;
ATOM_I2C_ID_CONFIG_ACCESS *i2c_config;
ATOM_ROUTER_DDC_PATH_SELECT_RECORD *ddc_path;
+ ATOM_ROUTER_DATA_CLOCK_PATH_SELECT_RECORD *cd_path;
ATOM_SRC_DST_TABLE_FOR_ONE_OBJECT *router_src_dst_table =
(ATOM_SRC_DST_TABLE_FOR_ONE_OBJECT *)
(ctx->bios + data_offset +
@@ -690,10 +706,18 @@ bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev)
case ATOM_ROUTER_DDC_PATH_SELECT_RECORD_TYPE:
ddc_path = (ATOM_ROUTER_DDC_PATH_SELECT_RECORD *)
record;
- router.valid = true;
- router.mux_type = ddc_path->ucMuxType;
- router.mux_control_pin = ddc_path->ucMuxControlPin;
- router.mux_state = ddc_path->ucMuxState[enum_id];
+ router.ddc_valid = true;
+ router.ddc_mux_type = ddc_path->ucMuxType;
+ router.ddc_mux_control_pin = ddc_path->ucMuxControlPin;
+ router.ddc_mux_state = ddc_path->ucMuxState[enum_id];
+ break;
+ case ATOM_ROUTER_DATA_CLOCK_PATH_SELECT_RECORD_TYPE:
+ cd_path = (ATOM_ROUTER_DATA_CLOCK_PATH_SELECT_RECORD *)
+ record;
+ router.cd_valid = true;
+ router.cd_mux_type = cd_path->ucMuxType;
+ router.cd_mux_control_pin = cd_path->ucMuxControlPin;
+ router.cd_mux_state = cd_path->ucMuxState[enum_id];
break;
}
record = (ATOM_COMMON_RECORD_HEADER *)
@@ -860,7 +884,8 @@ bool radeon_get_atom_connector_info_from_supported_devices_table(struct
size_t bc_size = sizeof(*bios_connectors) * ATOM_MAX_SUPPORTED_DEVICE;
struct radeon_router router;
- router.valid = false;
+ router.ddc_valid = false;
+ router.cd_valid = false;
bios_connectors = kzalloc(bc_size, GFP_KERNEL);
if (!bios_connectors)
@@ -1112,8 +1137,7 @@ bool radeon_atom_get_clock_info(struct drm_device *dev)
* pre-DCE 3.0 r6xx hardware. This might need to be adjusted per
* family.
*/
- if (!radeon_new_pll)
- p1pll->pll_out_min = 64800;
+ p1pll->pll_out_min = 64800;
}
p1pll->pll_in_min =
@@ -1277,36 +1301,27 @@ bool radeon_atombios_get_tmds_info(struct radeon_encoder *encoder,
return false;
}
-static struct radeon_atom_ss *radeon_atombios_get_ss_info(struct
- radeon_encoder
- *encoder,
- int id)
+bool radeon_atombios_get_ppll_ss_info(struct radeon_device *rdev,
+ struct radeon_atom_ss *ss,
+ int id)
{
- struct drm_device *dev = encoder->base.dev;
- struct radeon_device *rdev = dev->dev_private;
struct radeon_mode_info *mode_info = &rdev->mode_info;
int index = GetIndexIntoMasterTable(DATA, PPLL_SS_Info);
- uint16_t data_offset;
+ uint16_t data_offset, size;
struct _ATOM_SPREAD_SPECTRUM_INFO *ss_info;
uint8_t frev, crev;
- struct radeon_atom_ss *ss = NULL;
- int i;
-
- if (id > ATOM_MAX_SS_ENTRY)
- return NULL;
+ int i, num_indices;
- if (atom_parse_data_header(mode_info->atom_context, index, NULL,
+ memset(ss, 0, sizeof(struct radeon_atom_ss));
+ if (atom_parse_data_header(mode_info->atom_context, index, &size,
&frev, &crev, &data_offset)) {
ss_info =
(struct _ATOM_SPREAD_SPECTRUM_INFO *)(mode_info->atom_context->bios + data_offset);
- ss =
- kzalloc(sizeof(struct radeon_atom_ss), GFP_KERNEL);
-
- if (!ss)
- return NULL;
+ num_indices = (size - sizeof(ATOM_COMMON_TABLE_HEADER)) /
+ sizeof(ATOM_SPREAD_SPECTRUM_ASSIGNMENT);
- for (i = 0; i < ATOM_MAX_SS_ENTRY; i++) {
+ for (i = 0; i < num_indices; i++) {
if (ss_info->asSS_Info[i].ucSS_Id == id) {
ss->percentage =
le16_to_cpu(ss_info->asSS_Info[i].usSpreadSpectrumPercentage);
@@ -1315,11 +1330,88 @@ static struct radeon_atom_ss *radeon_atombios_get_ss_info(struct
ss->delay = ss_info->asSS_Info[i].ucSS_Delay;
ss->range = ss_info->asSS_Info[i].ucSS_Range;
ss->refdiv = ss_info->asSS_Info[i].ucRecommendedRef_Div;
- break;
+ return true;
}
}
}
- return ss;
+ return false;
+}
+
+union asic_ss_info {
+ struct _ATOM_ASIC_INTERNAL_SS_INFO info;
+ struct _ATOM_ASIC_INTERNAL_SS_INFO_V2 info_2;
+ struct _ATOM_ASIC_INTERNAL_SS_INFO_V3 info_3;
+};
+
+bool radeon_atombios_get_asic_ss_info(struct radeon_device *rdev,
+ struct radeon_atom_ss *ss,
+ int id, u32 clock)
+{
+ struct radeon_mode_info *mode_info = &rdev->mode_info;
+ int index = GetIndexIntoMasterTable(DATA, ASIC_InternalSS_Info);
+ uint16_t data_offset, size;
+ union asic_ss_info *ss_info;
+ uint8_t frev, crev;
+ int i, num_indices;
+
+ memset(ss, 0, sizeof(struct radeon_atom_ss));
+ if (atom_parse_data_header(mode_info->atom_context, index, &size,
+ &frev, &crev, &data_offset)) {
+
+ ss_info =
+ (union asic_ss_info *)(mode_info->atom_context->bios + data_offset);
+
+ switch (frev) {
+ case 1:
+ num_indices = (size - sizeof(ATOM_COMMON_TABLE_HEADER)) /
+ sizeof(ATOM_ASIC_SS_ASSIGNMENT);
+
+ for (i = 0; i < num_indices; i++) {
+ if ((ss_info->info.asSpreadSpectrum[i].ucClockIndication == id) &&
+ (clock <= ss_info->info.asSpreadSpectrum[i].ulTargetClockRange)) {
+ ss->percentage =
+ le16_to_cpu(ss_info->info.asSpreadSpectrum[i].usSpreadSpectrumPercentage);
+ ss->type = ss_info->info.asSpreadSpectrum[i].ucSpreadSpectrumMode;
+ ss->rate = le16_to_cpu(ss_info->info.asSpreadSpectrum[i].usSpreadRateInKhz);
+ return true;
+ }
+ }
+ break;
+ case 2:
+ num_indices = (size - sizeof(ATOM_COMMON_TABLE_HEADER)) /
+ sizeof(ATOM_ASIC_SS_ASSIGNMENT_V2);
+ for (i = 0; i < num_indices; i++) {
+ if ((ss_info->info_2.asSpreadSpectrum[i].ucClockIndication == id) &&
+ (clock <= ss_info->info_2.asSpreadSpectrum[i].ulTargetClockRange)) {
+ ss->percentage =
+ le16_to_cpu(ss_info->info_2.asSpreadSpectrum[i].usSpreadSpectrumPercentage);
+ ss->type = ss_info->info_2.asSpreadSpectrum[i].ucSpreadSpectrumMode;
+ ss->rate = le16_to_cpu(ss_info->info_2.asSpreadSpectrum[i].usSpreadRateIn10Hz);
+ return true;
+ }
+ }
+ break;
+ case 3:
+ num_indices = (size - sizeof(ATOM_COMMON_TABLE_HEADER)) /
+ sizeof(ATOM_ASIC_SS_ASSIGNMENT_V3);
+ for (i = 0; i < num_indices; i++) {
+ if ((ss_info->info_3.asSpreadSpectrum[i].ucClockIndication == id) &&
+ (clock <= ss_info->info_3.asSpreadSpectrum[i].ulTargetClockRange)) {
+ ss->percentage =
+ le16_to_cpu(ss_info->info_3.asSpreadSpectrum[i].usSpreadSpectrumPercentage);
+ ss->type = ss_info->info_3.asSpreadSpectrum[i].ucSpreadSpectrumMode;
+ ss->rate = le16_to_cpu(ss_info->info_3.asSpreadSpectrum[i].usSpreadRateIn10Hz);
+ return true;
+ }
+ }
+ break;
+ default:
+ DRM_ERROR("Unsupported ASIC_InternalSS_Info table: %d %d\n", frev, crev);
+ break;
+ }
+
+ }
+ return false;
}
union lvds_info {
@@ -1371,7 +1463,7 @@ struct radeon_encoder_atom_dig *radeon_atombios_get_lvds_info(struct
le16_to_cpu(lvds_info->info.sLCDTiming.usVSyncWidth);
lvds->panel_pwr_delay =
le16_to_cpu(lvds_info->info.usOffDelayInMs);
- lvds->lvds_misc = lvds_info->info.ucLVDS_Misc;
+ lvds->lcd_misc = lvds_info->info.ucLVDS_Misc;
misc = le16_to_cpu(lvds_info->info.sLCDTiming.susModeMiscInfo.usAccess);
if (misc & ATOM_VSYNC_POLARITY)
@@ -1388,19 +1480,7 @@ struct radeon_encoder_atom_dig *radeon_atombios_get_lvds_info(struct
/* set crtc values */
drm_mode_set_crtcinfo(&lvds->native_mode, CRTC_INTERLACE_HALVE_V);
- lvds->ss = radeon_atombios_get_ss_info(encoder, lvds_info->info.ucSS_Id);
-
- if (ASIC_IS_AVIVO(rdev)) {
- if (radeon_new_pll == 0)
- lvds->pll_algo = PLL_ALGO_LEGACY;
- else
- lvds->pll_algo = PLL_ALGO_NEW;
- } else {
- if (radeon_new_pll == 1)
- lvds->pll_algo = PLL_ALGO_NEW;
- else
- lvds->pll_algo = PLL_ALGO_LEGACY;
- }
+ lvds->lcd_ss_id = lvds_info->info.ucSS_Id;
encoder->native_mode = lvds->native_mode;
diff --git a/drivers/gpu/drm/radeon/radeon_benchmark.c b/drivers/gpu/drm/radeon/radeon_benchmark.c
index 7932dc4d6b90..c558685cc637 100644
--- a/drivers/gpu/drm/radeon/radeon_benchmark.c
+++ b/drivers/gpu/drm/radeon/radeon_benchmark.c
@@ -41,7 +41,7 @@ void radeon_benchmark_move(struct radeon_device *rdev, unsigned bsize,
size = bsize;
n = 1024;
- r = radeon_bo_create(rdev, NULL, size, true, sdomain, &sobj);
+ r = radeon_bo_create(rdev, NULL, size, PAGE_SIZE, true, sdomain, &sobj);
if (r) {
goto out_cleanup;
}
@@ -53,7 +53,7 @@ void radeon_benchmark_move(struct radeon_device *rdev, unsigned bsize,
if (r) {
goto out_cleanup;
}
- r = radeon_bo_create(rdev, NULL, size, true, ddomain, &dobj);
+ r = radeon_bo_create(rdev, NULL, size, PAGE_SIZE, true, ddomain, &dobj);
if (r) {
goto out_cleanup;
}
diff --git a/drivers/gpu/drm/radeon/radeon_bios.c b/drivers/gpu/drm/radeon/radeon_bios.c
index 654787ec43f4..8f2c7b50dcf5 100644
--- a/drivers/gpu/drm/radeon/radeon_bios.c
+++ b/drivers/gpu/drm/radeon/radeon_bios.c
@@ -130,6 +130,7 @@ static bool radeon_atrm_get_bios(struct radeon_device *rdev)
}
return true;
}
+
static bool r700_read_disabled_bios(struct radeon_device *rdev)
{
uint32_t viph_control;
@@ -143,7 +144,7 @@ static bool r700_read_disabled_bios(struct radeon_device *rdev)
bool r;
viph_control = RREG32(RADEON_VIPH_CONTROL);
- bus_cntl = RREG32(RADEON_BUS_CNTL);
+ bus_cntl = RREG32(R600_BUS_CNTL);
d1vga_control = RREG32(AVIVO_D1VGA_CONTROL);
d2vga_control = RREG32(AVIVO_D2VGA_CONTROL);
vga_render_control = RREG32(AVIVO_VGA_RENDER_CONTROL);
@@ -152,7 +153,7 @@ static bool r700_read_disabled_bios(struct radeon_device *rdev)
/* disable VIP */
WREG32(RADEON_VIPH_CONTROL, (viph_control & ~RADEON_VIPH_EN));
/* enable the rom */
- WREG32(RADEON_BUS_CNTL, (bus_cntl & ~RADEON_BUS_BIOS_DIS_ROM));
+ WREG32(R600_BUS_CNTL, (bus_cntl & ~R600_BIOS_ROM_DIS));
/* Disable VGA mode */
WREG32(AVIVO_D1VGA_CONTROL,
(d1vga_control & ~(AVIVO_DVGA_CONTROL_MODE_ENABLE |
@@ -191,7 +192,7 @@ static bool r700_read_disabled_bios(struct radeon_device *rdev)
cg_spll_status = RREG32(R600_CG_SPLL_STATUS);
}
WREG32(RADEON_VIPH_CONTROL, viph_control);
- WREG32(RADEON_BUS_CNTL, bus_cntl);
+ WREG32(R600_BUS_CNTL, bus_cntl);
WREG32(AVIVO_D1VGA_CONTROL, d1vga_control);
WREG32(AVIVO_D2VGA_CONTROL, d2vga_control);
WREG32(AVIVO_VGA_RENDER_CONTROL, vga_render_control);
@@ -216,7 +217,7 @@ static bool r600_read_disabled_bios(struct radeon_device *rdev)
bool r;
viph_control = RREG32(RADEON_VIPH_CONTROL);
- bus_cntl = RREG32(RADEON_BUS_CNTL);
+ bus_cntl = RREG32(R600_BUS_CNTL);
d1vga_control = RREG32(AVIVO_D1VGA_CONTROL);
d2vga_control = RREG32(AVIVO_D2VGA_CONTROL);
vga_render_control = RREG32(AVIVO_VGA_RENDER_CONTROL);
@@ -231,7 +232,7 @@ static bool r600_read_disabled_bios(struct radeon_device *rdev)
/* disable VIP */
WREG32(RADEON_VIPH_CONTROL, (viph_control & ~RADEON_VIPH_EN));
/* enable the rom */
- WREG32(RADEON_BUS_CNTL, (bus_cntl & ~RADEON_BUS_BIOS_DIS_ROM));
+ WREG32(R600_BUS_CNTL, (bus_cntl & ~R600_BIOS_ROM_DIS));
/* Disable VGA mode */
WREG32(AVIVO_D1VGA_CONTROL,
(d1vga_control & ~(AVIVO_DVGA_CONTROL_MODE_ENABLE |
@@ -262,7 +263,7 @@ static bool r600_read_disabled_bios(struct radeon_device *rdev)
/* restore regs */
WREG32(RADEON_VIPH_CONTROL, viph_control);
- WREG32(RADEON_BUS_CNTL, bus_cntl);
+ WREG32(R600_BUS_CNTL, bus_cntl);
WREG32(AVIVO_D1VGA_CONTROL, d1vga_control);
WREG32(AVIVO_D2VGA_CONTROL, d2vga_control);
WREG32(AVIVO_VGA_RENDER_CONTROL, vga_render_control);
diff --git a/drivers/gpu/drm/radeon/radeon_combios.c b/drivers/gpu/drm/radeon/radeon_combios.c
index 7b7ea269549c..137b8075f6e7 100644
--- a/drivers/gpu/drm/radeon/radeon_combios.c
+++ b/drivers/gpu/drm/radeon/radeon_combios.c
@@ -571,6 +571,7 @@ static struct radeon_i2c_bus_rec combios_setup_i2c_bus(struct radeon_device *rde
}
if (clk_mask && data_mask) {
+ /* system specific masks */
i2c.mask_clk_mask = clk_mask;
i2c.mask_data_mask = data_mask;
i2c.a_clk_mask = clk_mask;
@@ -579,7 +580,19 @@ static struct radeon_i2c_bus_rec combios_setup_i2c_bus(struct radeon_device *rde
i2c.en_data_mask = data_mask;
i2c.y_clk_mask = clk_mask;
i2c.y_data_mask = data_mask;
+ } else if ((ddc_line == RADEON_GPIOPAD_MASK) ||
+ (ddc_line == RADEON_MDGPIO_MASK)) {
+ /* default gpiopad masks */
+ i2c.mask_clk_mask = (0x20 << 8);
+ i2c.mask_data_mask = 0x80;
+ i2c.a_clk_mask = (0x20 << 8);
+ i2c.a_data_mask = 0x80;
+ i2c.en_clk_mask = (0x20 << 8);
+ i2c.en_data_mask = 0x80;
+ i2c.y_clk_mask = (0x20 << 8);
+ i2c.y_data_mask = 0x80;
} else {
+ /* default masks for ddc pads */
i2c.mask_clk_mask = RADEON_GPIO_EN_1;
i2c.mask_data_mask = RADEON_GPIO_EN_0;
i2c.a_clk_mask = RADEON_GPIO_A_1;
@@ -716,7 +729,7 @@ void radeon_combios_i2c_init(struct radeon_device *rdev)
clk = RBIOS8(offset + 3 + (i * 5) + 3);
data = RBIOS8(offset + 3 + (i * 5) + 4);
i2c = combios_setup_i2c_bus(rdev, DDC_MONID,
- clk, data);
+ (1 << clk), (1 << data));
rdev->i2c_bus[4] = radeon_i2c_create(dev, &i2c, "GPIOPAD_MASK");
break;
}
diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c
index ecc1a8fafbfd..8afaf7a7459e 100644
--- a/drivers/gpu/drm/radeon/radeon_connectors.c
+++ b/drivers/gpu/drm/radeon/radeon_connectors.c
@@ -183,13 +183,13 @@ radeon_connector_analog_encoder_conflict_solve(struct drm_connector *connector,
continue;
if (priority == true) {
- DRM_INFO("1: conflicting encoders switching off %s\n", drm_get_connector_name(conflict));
- DRM_INFO("in favor of %s\n", drm_get_connector_name(connector));
+ DRM_DEBUG_KMS("1: conflicting encoders switching off %s\n", drm_get_connector_name(conflict));
+ DRM_DEBUG_KMS("in favor of %s\n", drm_get_connector_name(connector));
conflict->status = connector_status_disconnected;
radeon_connector_update_scratch_regs(conflict, connector_status_disconnected);
} else {
- DRM_INFO("2: conflicting encoders switching off %s\n", drm_get_connector_name(connector));
- DRM_INFO("in favor of %s\n", drm_get_connector_name(conflict));
+ DRM_DEBUG_KMS("2: conflicting encoders switching off %s\n", drm_get_connector_name(connector));
+ DRM_DEBUG_KMS("in favor of %s\n", drm_get_connector_name(conflict));
current_status = connector_status_disconnected;
}
break;
@@ -326,6 +326,34 @@ int radeon_connector_set_property(struct drm_connector *connector, struct drm_pr
}
}
+ if (property == rdev->mode_info.underscan_hborder_property) {
+ /* need to find digital encoder on connector */
+ encoder = radeon_find_encoder(connector, DRM_MODE_ENCODER_TMDS);
+ if (!encoder)
+ return 0;
+
+ radeon_encoder = to_radeon_encoder(encoder);
+
+ if (radeon_encoder->underscan_hborder != val) {
+ radeon_encoder->underscan_hborder = val;
+ radeon_property_change_mode(&radeon_encoder->base);
+ }
+ }
+
+ if (property == rdev->mode_info.underscan_vborder_property) {
+ /* need to find digital encoder on connector */
+ encoder = radeon_find_encoder(connector, DRM_MODE_ENCODER_TMDS);
+ if (!encoder)
+ return 0;
+
+ radeon_encoder = to_radeon_encoder(encoder);
+
+ if (radeon_encoder->underscan_vborder != val) {
+ radeon_encoder->underscan_vborder = val;
+ radeon_property_change_mode(&radeon_encoder->base);
+ }
+ }
+
if (property == rdev->mode_info.tv_std_property) {
encoder = radeon_find_encoder(connector, DRM_MODE_ENCODER_TVDAC);
if (!encoder) {
@@ -404,13 +432,13 @@ static void radeon_fixup_lvds_native_mode(struct drm_encoder *encoder,
mode->vdisplay == native_mode->vdisplay) {
*native_mode = *mode;
drm_mode_set_crtcinfo(native_mode, CRTC_INTERLACE_HALVE_V);
- DRM_INFO("Determined LVDS native mode details from EDID\n");
+ DRM_DEBUG_KMS("Determined LVDS native mode details from EDID\n");
break;
}
}
}
if (!native_mode->clock) {
- DRM_INFO("No LVDS native mode details, disabling RMX\n");
+ DRM_DEBUG_KMS("No LVDS native mode details, disabling RMX\n");
radeon_encoder->rmx_type = RMX_OFF;
}
}
@@ -635,6 +663,11 @@ radeon_vga_detect(struct drm_connector *connector, bool force)
ret = connector_status_connected;
}
} else {
+
+ /* if we aren't forcing don't do destructive polling */
+ if (!force)
+ return connector->status;
+
if (radeon_connector->dac_load_detect && encoder) {
encoder_funcs = encoder->helper_private;
ret = encoder_funcs->detect(encoder, connector);
@@ -822,6 +855,11 @@ radeon_dvi_detect(struct drm_connector *connector, bool force)
if ((ret == connector_status_connected) && (radeon_connector->use_digital == true))
goto out;
+ if (!force) {
+ ret = connector->status;
+ goto out;
+ }
+
/* find analog encoder */
if (radeon_connector->dac_load_detect) {
for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
@@ -970,9 +1008,21 @@ static void radeon_dp_connector_destroy(struct drm_connector *connector)
static int radeon_dp_get_modes(struct drm_connector *connector)
{
struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+ struct radeon_connector_atom_dig *radeon_dig_connector = radeon_connector->con_priv;
int ret;
+ if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) {
+ if (!radeon_dig_connector->edp_on)
+ atombios_set_edp_panel_power(connector,
+ ATOM_TRANSMITTER_ACTION_POWER_ON);
+ }
ret = radeon_ddc_get_modes(radeon_connector);
+ if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) {
+ if (!radeon_dig_connector->edp_on)
+ atombios_set_edp_panel_power(connector,
+ ATOM_TRANSMITTER_ACTION_POWER_OFF);
+ }
+
return ret;
}
@@ -991,8 +1041,14 @@ radeon_dp_detect(struct drm_connector *connector, bool force)
if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) {
/* eDP is always DP */
radeon_dig_connector->dp_sink_type = CONNECTOR_OBJECT_ID_DISPLAYPORT;
+ if (!radeon_dig_connector->edp_on)
+ atombios_set_edp_panel_power(connector,
+ ATOM_TRANSMITTER_ACTION_POWER_ON);
if (radeon_dp_getdpcd(radeon_connector))
ret = connector_status_connected;
+ if (!radeon_dig_connector->edp_on)
+ atombios_set_edp_panel_power(connector,
+ ATOM_TRANSMITTER_ACTION_POWER_OFF);
} else {
radeon_dig_connector->dp_sink_type = radeon_dp_getsinktype(radeon_connector);
if (radeon_dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) {
@@ -1078,7 +1134,7 @@ radeon_add_atom_connector(struct drm_device *dev,
radeon_connector->shared_ddc = true;
shared_ddc = true;
}
- if (radeon_connector->router_bus && router->valid &&
+ if (radeon_connector->router_bus && router->ddc_valid &&
(radeon_connector->router.router_id == router->router_id)) {
radeon_connector->shared_ddc = false;
shared_ddc = false;
@@ -1098,7 +1154,7 @@ radeon_add_atom_connector(struct drm_device *dev,
radeon_connector->connector_object_id = connector_object_id;
radeon_connector->hpd = *hpd;
radeon_connector->router = *router;
- if (router->valid) {
+ if (router->ddc_valid || router->cd_valid) {
radeon_connector->router_bus = radeon_i2c_lookup(rdev, &router->i2c_info);
if (!radeon_connector->router_bus)
goto failed;
@@ -1119,6 +1175,8 @@ radeon_add_atom_connector(struct drm_device *dev,
/* no HPD on analog connectors */
radeon_connector->hpd.hpd = RADEON_HPD_NONE;
connector->polled = DRM_CONNECTOR_POLL_CONNECT;
+ connector->interlace_allowed = true;
+ connector->doublescan_allowed = true;
break;
case DRM_MODE_CONNECTOR_DVIA:
drm_connector_init(dev, &radeon_connector->base, &radeon_vga_connector_funcs, connector_type);
@@ -1134,6 +1192,8 @@ radeon_add_atom_connector(struct drm_device *dev,
1);
/* no HPD on analog connectors */
radeon_connector->hpd.hpd = RADEON_HPD_NONE;
+ connector->interlace_allowed = true;
+ connector->doublescan_allowed = true;
break;
case DRM_MODE_CONNECTOR_DVII:
case DRM_MODE_CONNECTOR_DVID:
@@ -1153,16 +1213,28 @@ radeon_add_atom_connector(struct drm_device *dev,
drm_connector_attach_property(&radeon_connector->base,
rdev->mode_info.coherent_mode_property,
1);
- if (ASIC_IS_AVIVO(rdev))
+ if (ASIC_IS_AVIVO(rdev)) {
drm_connector_attach_property(&radeon_connector->base,
rdev->mode_info.underscan_property,
UNDERSCAN_AUTO);
+ drm_connector_attach_property(&radeon_connector->base,
+ rdev->mode_info.underscan_hborder_property,
+ 0);
+ drm_connector_attach_property(&radeon_connector->base,
+ rdev->mode_info.underscan_vborder_property,
+ 0);
+ }
if (connector_type == DRM_MODE_CONNECTOR_DVII) {
radeon_connector->dac_load_detect = true;
drm_connector_attach_property(&radeon_connector->base,
rdev->mode_info.load_detect_property,
1);
}
+ connector->interlace_allowed = true;
+ if (connector_type == DRM_MODE_CONNECTOR_DVII)
+ connector->doublescan_allowed = true;
+ else
+ connector->doublescan_allowed = false;
break;
case DRM_MODE_CONNECTOR_HDMIA:
case DRM_MODE_CONNECTOR_HDMIB:
@@ -1181,11 +1253,23 @@ radeon_add_atom_connector(struct drm_device *dev,
drm_connector_attach_property(&radeon_connector->base,
rdev->mode_info.coherent_mode_property,
1);
- if (ASIC_IS_AVIVO(rdev))
+ if (ASIC_IS_AVIVO(rdev)) {
drm_connector_attach_property(&radeon_connector->base,
rdev->mode_info.underscan_property,
UNDERSCAN_AUTO);
+ drm_connector_attach_property(&radeon_connector->base,
+ rdev->mode_info.underscan_hborder_property,
+ 0);
+ drm_connector_attach_property(&radeon_connector->base,
+ rdev->mode_info.underscan_vborder_property,
+ 0);
+ }
subpixel_order = SubPixelHorizontalRGB;
+ connector->interlace_allowed = true;
+ if (connector_type == DRM_MODE_CONNECTOR_HDMIB)
+ connector->doublescan_allowed = true;
+ else
+ connector->doublescan_allowed = false;
break;
case DRM_MODE_CONNECTOR_DisplayPort:
case DRM_MODE_CONNECTOR_eDP:
@@ -1212,10 +1296,20 @@ radeon_add_atom_connector(struct drm_device *dev,
drm_connector_attach_property(&radeon_connector->base,
rdev->mode_info.coherent_mode_property,
1);
- if (ASIC_IS_AVIVO(rdev))
+ if (ASIC_IS_AVIVO(rdev)) {
drm_connector_attach_property(&radeon_connector->base,
rdev->mode_info.underscan_property,
UNDERSCAN_AUTO);
+ drm_connector_attach_property(&radeon_connector->base,
+ rdev->mode_info.underscan_hborder_property,
+ 0);
+ drm_connector_attach_property(&radeon_connector->base,
+ rdev->mode_info.underscan_vborder_property,
+ 0);
+ }
+ connector->interlace_allowed = true;
+ /* in theory with a DP to VGA converter... */
+ connector->doublescan_allowed = false;
break;
case DRM_MODE_CONNECTOR_SVIDEO:
case DRM_MODE_CONNECTOR_Composite:
@@ -1231,6 +1325,8 @@ radeon_add_atom_connector(struct drm_device *dev,
radeon_atombios_get_tv_info(rdev));
/* no HPD on analog connectors */
radeon_connector->hpd.hpd = RADEON_HPD_NONE;
+ connector->interlace_allowed = false;
+ connector->doublescan_allowed = false;
break;
case DRM_MODE_CONNECTOR_LVDS:
radeon_dig_connector = kzalloc(sizeof(struct radeon_connector_atom_dig), GFP_KERNEL);
@@ -1249,6 +1345,8 @@ radeon_add_atom_connector(struct drm_device *dev,
dev->mode_config.scaling_mode_property,
DRM_MODE_SCALE_FULLSCREEN);
subpixel_order = SubPixelHorizontalRGB;
+ connector->interlace_allowed = false;
+ connector->doublescan_allowed = false;
break;
}
@@ -1326,6 +1424,8 @@ radeon_add_legacy_connector(struct drm_device *dev,
/* no HPD on analog connectors */
radeon_connector->hpd.hpd = RADEON_HPD_NONE;
connector->polled = DRM_CONNECTOR_POLL_CONNECT;
+ connector->interlace_allowed = true;
+ connector->doublescan_allowed = true;
break;
case DRM_MODE_CONNECTOR_DVIA:
drm_connector_init(dev, &radeon_connector->base, &radeon_vga_connector_funcs, connector_type);
@@ -1341,6 +1441,8 @@ radeon_add_legacy_connector(struct drm_device *dev,
1);
/* no HPD on analog connectors */
radeon_connector->hpd.hpd = RADEON_HPD_NONE;
+ connector->interlace_allowed = true;
+ connector->doublescan_allowed = true;
break;
case DRM_MODE_CONNECTOR_DVII:
case DRM_MODE_CONNECTOR_DVID:
@@ -1358,6 +1460,11 @@ radeon_add_legacy_connector(struct drm_device *dev,
1);
}
subpixel_order = SubPixelHorizontalRGB;
+ connector->interlace_allowed = true;
+ if (connector_type == DRM_MODE_CONNECTOR_DVII)
+ connector->doublescan_allowed = true;
+ else
+ connector->doublescan_allowed = false;
break;
case DRM_MODE_CONNECTOR_SVIDEO:
case DRM_MODE_CONNECTOR_Composite:
@@ -1380,6 +1487,8 @@ radeon_add_legacy_connector(struct drm_device *dev,
radeon_combios_get_tv_info(rdev));
/* no HPD on analog connectors */
radeon_connector->hpd.hpd = RADEON_HPD_NONE;
+ connector->interlace_allowed = false;
+ connector->doublescan_allowed = false;
break;
case DRM_MODE_CONNECTOR_LVDS:
drm_connector_init(dev, &radeon_connector->base, &radeon_lvds_connector_funcs, connector_type);
@@ -1393,6 +1502,8 @@ radeon_add_legacy_connector(struct drm_device *dev,
dev->mode_config.scaling_mode_property,
DRM_MODE_SCALE_FULLSCREEN);
subpixel_order = SubPixelHorizontalRGB;
+ connector->interlace_allowed = false;
+ connector->doublescan_allowed = false;
break;
}
diff --git a/drivers/gpu/drm/radeon/radeon_cursor.c b/drivers/gpu/drm/radeon/radeon_cursor.c
index 3eef567b0421..017ac54920fb 100644
--- a/drivers/gpu/drm/radeon/radeon_cursor.c
+++ b/drivers/gpu/drm/radeon/radeon_cursor.c
@@ -118,22 +118,25 @@ static void radeon_show_cursor(struct drm_crtc *crtc)
}
static void radeon_set_cursor(struct drm_crtc *crtc, struct drm_gem_object *obj,
- uint32_t gpu_addr)
+ uint64_t gpu_addr)
{
struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
struct radeon_device *rdev = crtc->dev->dev_private;
if (ASIC_IS_DCE4(rdev)) {
- WREG32(EVERGREEN_CUR_SURFACE_ADDRESS_HIGH + radeon_crtc->crtc_offset, 0);
- WREG32(EVERGREEN_CUR_SURFACE_ADDRESS + radeon_crtc->crtc_offset, gpu_addr);
+ WREG32(EVERGREEN_CUR_SURFACE_ADDRESS_HIGH + radeon_crtc->crtc_offset,
+ upper_32_bits(gpu_addr));
+ WREG32(EVERGREEN_CUR_SURFACE_ADDRESS + radeon_crtc->crtc_offset,
+ gpu_addr & 0xffffffff);
} else if (ASIC_IS_AVIVO(rdev)) {
if (rdev->family >= CHIP_RV770) {
if (radeon_crtc->crtc_id)
- WREG32(R700_D2CUR_SURFACE_ADDRESS_HIGH, 0);
+ WREG32(R700_D2CUR_SURFACE_ADDRESS_HIGH, upper_32_bits(gpu_addr));
else
- WREG32(R700_D1CUR_SURFACE_ADDRESS_HIGH, 0);
+ WREG32(R700_D1CUR_SURFACE_ADDRESS_HIGH, upper_32_bits(gpu_addr));
}
- WREG32(AVIVO_D1CUR_SURFACE_ADDRESS + radeon_crtc->crtc_offset, gpu_addr);
+ WREG32(AVIVO_D1CUR_SURFACE_ADDRESS + radeon_crtc->crtc_offset,
+ gpu_addr & 0xffffffff);
} else {
radeon_crtc->legacy_cursor_offset = gpu_addr - radeon_crtc->legacy_display_base_addr;
/* offset is from DISP(2)_BASE_ADDRESS */
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index 256d204a6d24..e12e79326cb1 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -117,9 +117,10 @@ void radeon_scratch_init(struct radeon_device *rdev)
} else {
rdev->scratch.num_reg = 7;
}
+ rdev->scratch.reg_base = RADEON_SCRATCH_REG0;
for (i = 0; i < rdev->scratch.num_reg; i++) {
rdev->scratch.free[i] = true;
- rdev->scratch.reg[i] = RADEON_SCRATCH_REG0 + (i * 4);
+ rdev->scratch.reg[i] = rdev->scratch.reg_base + (i * 4);
}
}
@@ -149,6 +150,86 @@ void radeon_scratch_free(struct radeon_device *rdev, uint32_t reg)
}
}
+void radeon_wb_disable(struct radeon_device *rdev)
+{
+ int r;
+
+ if (rdev->wb.wb_obj) {
+ r = radeon_bo_reserve(rdev->wb.wb_obj, false);
+ if (unlikely(r != 0))
+ return;
+ radeon_bo_kunmap(rdev->wb.wb_obj);
+ radeon_bo_unpin(rdev->wb.wb_obj);
+ radeon_bo_unreserve(rdev->wb.wb_obj);
+ }
+ rdev->wb.enabled = false;
+}
+
+void radeon_wb_fini(struct radeon_device *rdev)
+{
+ radeon_wb_disable(rdev);
+ if (rdev->wb.wb_obj) {
+ radeon_bo_unref(&rdev->wb.wb_obj);
+ rdev->wb.wb = NULL;
+ rdev->wb.wb_obj = NULL;
+ }
+}
+
+int radeon_wb_init(struct radeon_device *rdev)
+{
+ int r;
+
+ if (rdev->wb.wb_obj == NULL) {
+ r = radeon_bo_create(rdev, NULL, RADEON_GPU_PAGE_SIZE, PAGE_SIZE, true,
+ RADEON_GEM_DOMAIN_GTT, &rdev->wb.wb_obj);
+ if (r) {
+ dev_warn(rdev->dev, "(%d) create WB bo failed\n", r);
+ return r;
+ }
+ }
+ r = radeon_bo_reserve(rdev->wb.wb_obj, false);
+ if (unlikely(r != 0)) {
+ radeon_wb_fini(rdev);
+ return r;
+ }
+ r = radeon_bo_pin(rdev->wb.wb_obj, RADEON_GEM_DOMAIN_GTT,
+ &rdev->wb.gpu_addr);
+ if (r) {
+ radeon_bo_unreserve(rdev->wb.wb_obj);
+ dev_warn(rdev->dev, "(%d) pin WB bo failed\n", r);
+ radeon_wb_fini(rdev);
+ return r;
+ }
+ r = radeon_bo_kmap(rdev->wb.wb_obj, (void **)&rdev->wb.wb);
+ radeon_bo_unreserve(rdev->wb.wb_obj);
+ if (r) {
+ dev_warn(rdev->dev, "(%d) map WB bo failed\n", r);
+ radeon_wb_fini(rdev);
+ return r;
+ }
+
+ /* disable event_write fences */
+ rdev->wb.use_event = false;
+ /* disabled via module param */
+ if (radeon_no_wb == 1)
+ rdev->wb.enabled = false;
+ else {
+ /* often unreliable on AGP */
+ if (rdev->flags & RADEON_IS_AGP) {
+ rdev->wb.enabled = false;
+ } else {
+ rdev->wb.enabled = true;
+ /* event_write fences are only available on r600+ */
+ if (rdev->family >= CHIP_R600)
+ rdev->wb.use_event = true;
+ }
+ }
+
+ dev_info(rdev->dev, "WB %sabled\n", rdev->wb.enabled ? "en" : "dis");
+
+ return 0;
+}
+
/**
* radeon_vram_location - try to find VRAM location
* @rdev: radeon device structure holding all necessary informations
@@ -205,7 +286,7 @@ void radeon_vram_location(struct radeon_device *rdev, struct radeon_mc *mc, u64
mc->mc_vram_size = mc->aper_size;
}
mc->vram_end = mc->vram_start + mc->mc_vram_size - 1;
- dev_info(rdev->dev, "VRAM: %lluM 0x%08llX - 0x%08llX (%lluM used)\n",
+ dev_info(rdev->dev, "VRAM: %lluM 0x%016llX - 0x%016llX (%lluM used)\n",
mc->mc_vram_size >> 20, mc->vram_start,
mc->vram_end, mc->real_vram_size >> 20);
}
@@ -242,7 +323,7 @@ void radeon_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc)
mc->gtt_start = (mc->vram_end + 1 + mc->gtt_base_align) & ~mc->gtt_base_align;
}
mc->gtt_end = mc->gtt_start + mc->gtt_size - 1;
- dev_info(rdev->dev, "GTT: %lluM 0x%08llX - 0x%08llX\n",
+ dev_info(rdev->dev, "GTT: %lluM 0x%016llX - 0x%016llX\n",
mc->gtt_size >> 20, mc->gtt_start, mc->gtt_end);
}
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
index b92d2f2fcbed..1df4dc6c063c 100644
--- a/drivers/gpu/drm/radeon/radeon_display.c
+++ b/drivers/gpu/drm/radeon/radeon_display.c
@@ -315,10 +315,14 @@ static void radeon_print_display_setup(struct drm_device *dev)
radeon_connector->ddc_bus->rec.en_data_reg,
radeon_connector->ddc_bus->rec.y_clk_reg,
radeon_connector->ddc_bus->rec.y_data_reg);
- if (radeon_connector->router_bus)
+ if (radeon_connector->router.ddc_valid)
DRM_INFO(" DDC Router 0x%x/0x%x\n",
- radeon_connector->router.mux_control_pin,
- radeon_connector->router.mux_state);
+ radeon_connector->router.ddc_mux_control_pin,
+ radeon_connector->router.ddc_mux_state);
+ if (radeon_connector->router.cd_valid)
+ DRM_INFO(" Clock/Data Router 0x%x/0x%x\n",
+ radeon_connector->router.cd_mux_control_pin,
+ radeon_connector->router.cd_mux_state);
} else {
if (connector->connector_type == DRM_MODE_CONNECTOR_VGA ||
connector->connector_type == DRM_MODE_CONNECTOR_DVII ||
@@ -398,8 +402,8 @@ int radeon_ddc_get_modes(struct radeon_connector *radeon_connector)
int ret = 0;
/* on hw with routers, select right port */
- if (radeon_connector->router.valid)
- radeon_router_select_port(radeon_connector);
+ if (radeon_connector->router.ddc_valid)
+ radeon_router_select_ddc_port(radeon_connector);
if ((radeon_connector->base.connector_type == DRM_MODE_CONNECTOR_DisplayPort) ||
(radeon_connector->base.connector_type == DRM_MODE_CONNECTOR_eDP)) {
@@ -432,8 +436,8 @@ static int radeon_ddc_dump(struct drm_connector *connector)
int ret = 0;
/* on hw with routers, select right port */
- if (radeon_connector->router.valid)
- radeon_router_select_port(radeon_connector);
+ if (radeon_connector->router.ddc_valid)
+ radeon_router_select_ddc_port(radeon_connector);
if (!radeon_connector->ddc_bus)
return -1;
@@ -454,13 +458,13 @@ static inline uint32_t radeon_div(uint64_t n, uint32_t d)
return n;
}
-static void radeon_compute_pll_legacy(struct radeon_pll *pll,
- uint64_t freq,
- uint32_t *dot_clock_p,
- uint32_t *fb_div_p,
- uint32_t *frac_fb_div_p,
- uint32_t *ref_div_p,
- uint32_t *post_div_p)
+void radeon_compute_pll(struct radeon_pll *pll,
+ uint64_t freq,
+ uint32_t *dot_clock_p,
+ uint32_t *fb_div_p,
+ uint32_t *frac_fb_div_p,
+ uint32_t *ref_div_p,
+ uint32_t *post_div_p)
{
uint32_t min_ref_div = pll->min_ref_div;
uint32_t max_ref_div = pll->max_ref_div;
@@ -513,7 +517,7 @@ static void radeon_compute_pll_legacy(struct radeon_pll *pll,
max_fractional_feed_div = pll->max_frac_feedback_div;
}
- for (post_div = min_post_div; post_div <= max_post_div; ++post_div) {
+ for (post_div = max_post_div; post_div >= min_post_div; --post_div) {
uint32_t ref_div;
if ((pll->flags & RADEON_PLL_NO_ODD_POST_DIV) && (post_div & 1))
@@ -631,214 +635,6 @@ static void radeon_compute_pll_legacy(struct radeon_pll *pll,
*post_div_p = best_post_div;
}
-static bool
-calc_fb_div(struct radeon_pll *pll,
- uint32_t freq,
- uint32_t post_div,
- uint32_t ref_div,
- uint32_t *fb_div,
- uint32_t *fb_div_frac)
-{
- fixed20_12 feedback_divider, a, b;
- u32 vco_freq;
-
- vco_freq = freq * post_div;
- /* feedback_divider = vco_freq * ref_div / pll->reference_freq; */
- a.full = dfixed_const(pll->reference_freq);
- feedback_divider.full = dfixed_const(vco_freq);
- feedback_divider.full = dfixed_div(feedback_divider, a);
- a.full = dfixed_const(ref_div);
- feedback_divider.full = dfixed_mul(feedback_divider, a);
-
- if (pll->flags & RADEON_PLL_USE_FRAC_FB_DIV) {
- /* feedback_divider = floor((feedback_divider * 10.0) + 0.5) * 0.1; */
- a.full = dfixed_const(10);
- feedback_divider.full = dfixed_mul(feedback_divider, a);
- feedback_divider.full += dfixed_const_half(0);
- feedback_divider.full = dfixed_floor(feedback_divider);
- feedback_divider.full = dfixed_div(feedback_divider, a);
-
- /* *fb_div = floor(feedback_divider); */
- a.full = dfixed_floor(feedback_divider);
- *fb_div = dfixed_trunc(a);
- /* *fb_div_frac = fmod(feedback_divider, 1.0) * 10.0; */
- a.full = dfixed_const(10);
- b.full = dfixed_mul(feedback_divider, a);
-
- feedback_divider.full = dfixed_floor(feedback_divider);
- feedback_divider.full = dfixed_mul(feedback_divider, a);
- feedback_divider.full = b.full - feedback_divider.full;
- *fb_div_frac = dfixed_trunc(feedback_divider);
- } else {
- /* *fb_div = floor(feedback_divider + 0.5); */
- feedback_divider.full += dfixed_const_half(0);
- feedback_divider.full = dfixed_floor(feedback_divider);
-
- *fb_div = dfixed_trunc(feedback_divider);
- *fb_div_frac = 0;
- }
-
- if (((*fb_div) < pll->min_feedback_div) || ((*fb_div) > pll->max_feedback_div))
- return false;
- else
- return true;
-}
-
-static bool
-calc_fb_ref_div(struct radeon_pll *pll,
- uint32_t freq,
- uint32_t post_div,
- uint32_t *fb_div,
- uint32_t *fb_div_frac,
- uint32_t *ref_div)
-{
- fixed20_12 ffreq, max_error, error, pll_out, a;
- u32 vco;
- u32 pll_out_min, pll_out_max;
-
- if (pll->flags & RADEON_PLL_IS_LCD) {
- pll_out_min = pll->lcd_pll_out_min;
- pll_out_max = pll->lcd_pll_out_max;
- } else {
- pll_out_min = pll->pll_out_min;
- pll_out_max = pll->pll_out_max;
- }
-
- ffreq.full = dfixed_const(freq);
- /* max_error = ffreq * 0.0025; */
- a.full = dfixed_const(400);
- max_error.full = dfixed_div(ffreq, a);
-
- for ((*ref_div) = pll->min_ref_div; (*ref_div) < pll->max_ref_div; ++(*ref_div)) {
- if (calc_fb_div(pll, freq, post_div, (*ref_div), fb_div, fb_div_frac)) {
- vco = pll->reference_freq * (((*fb_div) * 10) + (*fb_div_frac));
- vco = vco / ((*ref_div) * 10);
-
- if ((vco < pll_out_min) || (vco > pll_out_max))
- continue;
-
- /* pll_out = vco / post_div; */
- a.full = dfixed_const(post_div);
- pll_out.full = dfixed_const(vco);
- pll_out.full = dfixed_div(pll_out, a);
-
- if (pll_out.full >= ffreq.full) {
- error.full = pll_out.full - ffreq.full;
- if (error.full <= max_error.full)
- return true;
- }
- }
- }
- return false;
-}
-
-static void radeon_compute_pll_new(struct radeon_pll *pll,
- uint64_t freq,
- uint32_t *dot_clock_p,
- uint32_t *fb_div_p,
- uint32_t *frac_fb_div_p,
- uint32_t *ref_div_p,
- uint32_t *post_div_p)
-{
- u32 fb_div = 0, fb_div_frac = 0, post_div = 0, ref_div = 0;
- u32 best_freq = 0, vco_frequency;
- u32 pll_out_min, pll_out_max;
-
- if (pll->flags & RADEON_PLL_IS_LCD) {
- pll_out_min = pll->lcd_pll_out_min;
- pll_out_max = pll->lcd_pll_out_max;
- } else {
- pll_out_min = pll->pll_out_min;
- pll_out_max = pll->pll_out_max;
- }
-
- /* freq = freq / 10; */
- do_div(freq, 10);
-
- if (pll->flags & RADEON_PLL_USE_POST_DIV) {
- post_div = pll->post_div;
- if ((post_div < pll->min_post_div) || (post_div > pll->max_post_div))
- goto done;
-
- vco_frequency = freq * post_div;
- if ((vco_frequency < pll_out_min) || (vco_frequency > pll_out_max))
- goto done;
-
- if (pll->flags & RADEON_PLL_USE_REF_DIV) {
- ref_div = pll->reference_div;
- if ((ref_div < pll->min_ref_div) || (ref_div > pll->max_ref_div))
- goto done;
- if (!calc_fb_div(pll, freq, post_div, ref_div, &fb_div, &fb_div_frac))
- goto done;
- }
- } else {
- for (post_div = pll->max_post_div; post_div >= pll->min_post_div; --post_div) {
- if (pll->flags & RADEON_PLL_LEGACY) {
- if ((post_div == 5) ||
- (post_div == 7) ||
- (post_div == 9) ||
- (post_div == 10) ||
- (post_div == 11))
- continue;
- }
-
- if ((pll->flags & RADEON_PLL_NO_ODD_POST_DIV) && (post_div & 1))
- continue;
-
- vco_frequency = freq * post_div;
- if ((vco_frequency < pll_out_min) || (vco_frequency > pll_out_max))
- continue;
- if (pll->flags & RADEON_PLL_USE_REF_DIV) {
- ref_div = pll->reference_div;
- if ((ref_div < pll->min_ref_div) || (ref_div > pll->max_ref_div))
- goto done;
- if (calc_fb_div(pll, freq, post_div, ref_div, &fb_div, &fb_div_frac))
- break;
- } else {
- if (calc_fb_ref_div(pll, freq, post_div, &fb_div, &fb_div_frac, &ref_div))
- break;
- }
- }
- }
-
- best_freq = pll->reference_freq * 10 * fb_div;
- best_freq += pll->reference_freq * fb_div_frac;
- best_freq = best_freq / (ref_div * post_div);
-
-done:
- if (best_freq == 0)
- DRM_ERROR("Couldn't find valid PLL dividers\n");
-
- *dot_clock_p = best_freq / 10;
- *fb_div_p = fb_div;
- *frac_fb_div_p = fb_div_frac;
- *ref_div_p = ref_div;
- *post_div_p = post_div;
-
- DRM_DEBUG_KMS("%u %d.%d, %d, %d\n", *dot_clock_p, *fb_div_p, *frac_fb_div_p, *ref_div_p, *post_div_p);
-}
-
-void radeon_compute_pll(struct radeon_pll *pll,
- uint64_t freq,
- uint32_t *dot_clock_p,
- uint32_t *fb_div_p,
- uint32_t *frac_fb_div_p,
- uint32_t *ref_div_p,
- uint32_t *post_div_p)
-{
- switch (pll->algo) {
- case PLL_ALGO_NEW:
- radeon_compute_pll_new(pll, freq, dot_clock_p, fb_div_p,
- frac_fb_div_p, ref_div_p, post_div_p);
- break;
- case PLL_ALGO_LEGACY:
- default:
- radeon_compute_pll_legacy(pll, freq, dot_clock_p, fb_div_p,
- frac_fb_div_p, ref_div_p, post_div_p);
- break;
- }
-}
-
static void radeon_user_framebuffer_destroy(struct drm_framebuffer *fb)
{
struct radeon_framebuffer *radeon_fb = to_radeon_framebuffer(fb);
@@ -1002,6 +798,24 @@ static int radeon_modeset_create_props(struct radeon_device *rdev)
radeon_underscan_enum_list[i].name);
}
+ rdev->mode_info.underscan_hborder_property =
+ drm_property_create(rdev->ddev,
+ DRM_MODE_PROP_RANGE,
+ "underscan hborder", 2);
+ if (!rdev->mode_info.underscan_hborder_property)
+ return -ENOMEM;
+ rdev->mode_info.underscan_hborder_property->values[0] = 0;
+ rdev->mode_info.underscan_hborder_property->values[1] = 128;
+
+ rdev->mode_info.underscan_vborder_property =
+ drm_property_create(rdev->ddev,
+ DRM_MODE_PROP_RANGE,
+ "underscan vborder", 2);
+ if (!rdev->mode_info.underscan_vborder_property)
+ return -ENOMEM;
+ rdev->mode_info.underscan_vborder_property->values[0] = 0;
+ rdev->mode_info.underscan_vborder_property->values[1] = 128;
+
return 0;
}
@@ -1159,8 +973,14 @@ bool radeon_crtc_scaling_mode_fixup(struct drm_crtc *crtc,
((radeon_encoder->underscan_type == UNDERSCAN_AUTO) &&
drm_detect_hdmi_monitor(radeon_connector->edid) &&
is_hdtv_mode(mode)))) {
- radeon_crtc->h_border = (mode->hdisplay >> 5) + 16;
- radeon_crtc->v_border = (mode->vdisplay >> 5) + 16;
+ if (radeon_encoder->underscan_hborder != 0)
+ radeon_crtc->h_border = radeon_encoder->underscan_hborder;
+ else
+ radeon_crtc->h_border = (mode->hdisplay >> 5) + 16;
+ if (radeon_encoder->underscan_vborder != 0)
+ radeon_crtc->v_border = radeon_encoder->underscan_vborder;
+ else
+ radeon_crtc->v_border = (mode->vdisplay >> 5) + 16;
radeon_crtc->rmx_type = RMX_FULL;
src_v = crtc->mode.vdisplay;
dst_v = crtc->mode.vdisplay - (radeon_crtc->v_border * 2);
@@ -1195,3 +1015,156 @@ bool radeon_crtc_scaling_mode_fixup(struct drm_crtc *crtc,
}
return true;
}
+
+/*
+ * Retrieve current video scanout position of crtc on a given gpu.
+ *
+ * \param rdev Device to query.
+ * \param crtc Crtc to query.
+ * \param *vpos Location where vertical scanout position should be stored.
+ * \param *hpos Location where horizontal scanout position should go.
+ *
+ * Returns vpos as a positive number while in active scanout area.
+ * Returns vpos as a negative number inside vblank, counting the number
+ * of scanlines to go until end of vblank, e.g., -1 means "one scanline
+ * until start of active scanout / end of vblank."
+ *
+ * \return Flags, or'ed together as follows:
+ *
+ * RADEON_SCANOUTPOS_VALID = Query successfull.
+ * RADEON_SCANOUTPOS_INVBL = Inside vblank.
+ * RADEON_SCANOUTPOS_ACCURATE = Returned position is accurate. A lack of
+ * this flag means that returned position may be offset by a constant but
+ * unknown small number of scanlines wrt. real scanout position.
+ *
+ */
+int radeon_get_crtc_scanoutpos(struct radeon_device *rdev, int crtc, int *vpos, int *hpos)
+{
+ u32 stat_crtc = 0, vbl = 0, position = 0;
+ int vbl_start, vbl_end, vtotal, ret = 0;
+ bool in_vbl = true;
+
+ if (ASIC_IS_DCE4(rdev)) {
+ if (crtc == 0) {
+ vbl = RREG32(EVERGREEN_CRTC_V_BLANK_START_END +
+ EVERGREEN_CRTC0_REGISTER_OFFSET);
+ position = RREG32(EVERGREEN_CRTC_STATUS_POSITION +
+ EVERGREEN_CRTC0_REGISTER_OFFSET);
+ ret |= RADEON_SCANOUTPOS_VALID;
+ }
+ if (crtc == 1) {
+ vbl = RREG32(EVERGREEN_CRTC_V_BLANK_START_END +
+ EVERGREEN_CRTC1_REGISTER_OFFSET);
+ position = RREG32(EVERGREEN_CRTC_STATUS_POSITION +
+ EVERGREEN_CRTC1_REGISTER_OFFSET);
+ ret |= RADEON_SCANOUTPOS_VALID;
+ }
+ if (crtc == 2) {
+ vbl = RREG32(EVERGREEN_CRTC_V_BLANK_START_END +
+ EVERGREEN_CRTC2_REGISTER_OFFSET);
+ position = RREG32(EVERGREEN_CRTC_STATUS_POSITION +
+ EVERGREEN_CRTC2_REGISTER_OFFSET);
+ ret |= RADEON_SCANOUTPOS_VALID;
+ }
+ if (crtc == 3) {
+ vbl = RREG32(EVERGREEN_CRTC_V_BLANK_START_END +
+ EVERGREEN_CRTC3_REGISTER_OFFSET);
+ position = RREG32(EVERGREEN_CRTC_STATUS_POSITION +
+ EVERGREEN_CRTC3_REGISTER_OFFSET);
+ ret |= RADEON_SCANOUTPOS_VALID;
+ }
+ if (crtc == 4) {
+ vbl = RREG32(EVERGREEN_CRTC_V_BLANK_START_END +
+ EVERGREEN_CRTC4_REGISTER_OFFSET);
+ position = RREG32(EVERGREEN_CRTC_STATUS_POSITION +
+ EVERGREEN_CRTC4_REGISTER_OFFSET);
+ ret |= RADEON_SCANOUTPOS_VALID;
+ }
+ if (crtc == 5) {
+ vbl = RREG32(EVERGREEN_CRTC_V_BLANK_START_END +
+ EVERGREEN_CRTC5_REGISTER_OFFSET);
+ position = RREG32(EVERGREEN_CRTC_STATUS_POSITION +
+ EVERGREEN_CRTC5_REGISTER_OFFSET);
+ ret |= RADEON_SCANOUTPOS_VALID;
+ }
+ } else if (ASIC_IS_AVIVO(rdev)) {
+ if (crtc == 0) {
+ vbl = RREG32(AVIVO_D1CRTC_V_BLANK_START_END);
+ position = RREG32(AVIVO_D1CRTC_STATUS_POSITION);
+ ret |= RADEON_SCANOUTPOS_VALID;
+ }
+ if (crtc == 1) {
+ vbl = RREG32(AVIVO_D2CRTC_V_BLANK_START_END);
+ position = RREG32(AVIVO_D2CRTC_STATUS_POSITION);
+ ret |= RADEON_SCANOUTPOS_VALID;
+ }
+ } else {
+ /* Pre-AVIVO: Different encoding of scanout pos and vblank interval. */
+ if (crtc == 0) {
+ /* Assume vbl_end == 0, get vbl_start from
+ * upper 16 bits.
+ */
+ vbl = (RREG32(RADEON_CRTC_V_TOTAL_DISP) &
+ RADEON_CRTC_V_DISP) >> RADEON_CRTC_V_DISP_SHIFT;
+ /* Only retrieve vpos from upper 16 bits, set hpos == 0. */
+ position = (RREG32(RADEON_CRTC_VLINE_CRNT_VLINE) >> 16) & RADEON_CRTC_V_TOTAL;
+ stat_crtc = RREG32(RADEON_CRTC_STATUS);
+ if (!(stat_crtc & 1))
+ in_vbl = false;
+
+ ret |= RADEON_SCANOUTPOS_VALID;
+ }
+ if (crtc == 1) {
+ vbl = (RREG32(RADEON_CRTC2_V_TOTAL_DISP) &
+ RADEON_CRTC_V_DISP) >> RADEON_CRTC_V_DISP_SHIFT;
+ position = (RREG32(RADEON_CRTC2_VLINE_CRNT_VLINE) >> 16) & RADEON_CRTC_V_TOTAL;
+ stat_crtc = RREG32(RADEON_CRTC2_STATUS);
+ if (!(stat_crtc & 1))
+ in_vbl = false;
+
+ ret |= RADEON_SCANOUTPOS_VALID;
+ }
+ }
+
+ /* Decode into vertical and horizontal scanout position. */
+ *vpos = position & 0x1fff;
+ *hpos = (position >> 16) & 0x1fff;
+
+ /* Valid vblank area boundaries from gpu retrieved? */
+ if (vbl > 0) {
+ /* Yes: Decode. */
+ ret |= RADEON_SCANOUTPOS_ACCURATE;
+ vbl_start = vbl & 0x1fff;
+ vbl_end = (vbl >> 16) & 0x1fff;
+ }
+ else {
+ /* No: Fake something reasonable which gives at least ok results. */
+ vbl_start = rdev->mode_info.crtcs[crtc]->base.mode.crtc_vdisplay;
+ vbl_end = 0;
+ }
+
+ /* Test scanout position against vblank region. */
+ if ((*vpos < vbl_start) && (*vpos >= vbl_end))
+ in_vbl = false;
+
+ /* Check if inside vblank area and apply corrective offsets:
+ * vpos will then be >=0 in video scanout area, but negative
+ * within vblank area, counting down the number of lines until
+ * start of scanout.
+ */
+
+ /* Inside "upper part" of vblank area? Apply corrective offset if so: */
+ if (in_vbl && (*vpos >= vbl_start)) {
+ vtotal = rdev->mode_info.crtcs[crtc]->base.mode.crtc_vtotal;
+ *vpos = *vpos - vtotal;
+ }
+
+ /* Correct for shifted end of vbl at vbl_end. */
+ *vpos = *vpos - vbl_end;
+
+ /* In vblank? */
+ if (in_vbl)
+ ret |= RADEON_SCANOUTPOS_INVBL;
+
+ return ret;
+}
diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c
index 29c1237c2e7b..88e4ea925900 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.c
+++ b/drivers/gpu/drm/radeon/radeon_drv.c
@@ -47,9 +47,10 @@
* - 2.4.0 - add crtc id query
* - 2.5.0 - add get accel 2 to work around ddx breakage for evergreen
* - 2.6.0 - add tiling config query (r6xx+), add initial HiZ support (r300->r500)
+ * 2.7.0 - fixups for r600 2D tiling support. (no external ABI change), add eg dyn gpr regs
*/
#define KMS_DRIVER_MAJOR 2
-#define KMS_DRIVER_MINOR 6
+#define KMS_DRIVER_MINOR 7
#define KMS_DRIVER_PATCHLEVEL 0
int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags);
int radeon_driver_unload_kms(struct drm_device *dev);
@@ -93,7 +94,6 @@ int radeon_benchmarking = 0;
int radeon_testing = 0;
int radeon_connector_table = 0;
int radeon_tv = 1;
-int radeon_new_pll = -1;
int radeon_audio = 1;
int radeon_disp_priority = 0;
int radeon_hw_i2c = 0;
@@ -131,9 +131,6 @@ module_param_named(connector_table, radeon_connector_table, int, 0444);
MODULE_PARM_DESC(tv, "TV enable (0 = disable)");
module_param_named(tv, radeon_tv, int, 0444);
-MODULE_PARM_DESC(new_pll, "Select new PLL code");
-module_param_named(new_pll, radeon_new_pll, int, 0444);
-
MODULE_PARM_DESC(audio, "Audio enable (0 = disable)");
module_param_named(audio, radeon_audio, int, 0444);
@@ -203,8 +200,6 @@ static struct drm_driver driver_old = {
.irq_uninstall = radeon_driver_irq_uninstall,
.irq_handler = radeon_driver_irq_handler,
.reclaim_buffers = drm_core_reclaim_buffers,
- .get_map_ofs = drm_core_get_map_ofs,
- .get_reg_ofs = drm_core_get_reg_ofs,
.ioctls = radeon_ioctls,
.dma_ioctl = radeon_cp_buffers,
.fops = {
@@ -291,8 +286,6 @@ static struct drm_driver kms_driver = {
.irq_uninstall = radeon_driver_irq_uninstall_kms,
.irq_handler = radeon_driver_irq_handler_kms,
.reclaim_buffers = drm_core_reclaim_buffers,
- .get_map_ofs = drm_core_get_map_ofs,
- .get_reg_ofs = drm_core_get_reg_ofs,
.ioctls = radeon_ioctls_kms,
.gem_init_object = radeon_gem_object_init,
.gem_free_object = radeon_gem_object_free,
diff --git a/drivers/gpu/drm/radeon/radeon_encoders.c b/drivers/gpu/drm/radeon/radeon_encoders.c
index 2c293e8304d6..041943df966b 100644
--- a/drivers/gpu/drm/radeon/radeon_encoders.c
+++ b/drivers/gpu/drm/radeon/radeon_encoders.c
@@ -176,6 +176,7 @@ static inline bool radeon_encoder_is_digital(struct drm_encoder *encoder)
return false;
}
}
+
void
radeon_link_encoder_connector(struct drm_device *dev)
{
@@ -228,6 +229,27 @@ radeon_get_connector_for_encoder(struct drm_encoder *encoder)
return NULL;
}
+struct drm_encoder *radeon_atom_get_external_encoder(struct drm_encoder *encoder)
+{
+ struct drm_device *dev = encoder->dev;
+ struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+ struct drm_encoder *other_encoder;
+ struct radeon_encoder *other_radeon_encoder;
+
+ if (radeon_encoder->is_ext_encoder)
+ return NULL;
+
+ list_for_each_entry(other_encoder, &dev->mode_config.encoder_list, head) {
+ if (other_encoder == encoder)
+ continue;
+ other_radeon_encoder = to_radeon_encoder(other_encoder);
+ if (other_radeon_encoder->is_ext_encoder &&
+ (radeon_encoder->devices & other_radeon_encoder->devices))
+ return other_encoder;
+ }
+ return NULL;
+}
+
void radeon_panel_mode_fixup(struct drm_encoder *encoder,
struct drm_display_mode *adjusted_mode)
{
@@ -426,52 +448,49 @@ atombios_tv_setup(struct drm_encoder *encoder, int action)
}
-void
-atombios_external_tmds_setup(struct drm_encoder *encoder, int action)
-{
- struct drm_device *dev = encoder->dev;
- struct radeon_device *rdev = dev->dev_private;
- struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
- ENABLE_EXTERNAL_TMDS_ENCODER_PS_ALLOCATION args;
- int index = 0;
-
- memset(&args, 0, sizeof(args));
-
- index = GetIndexIntoMasterTable(COMMAND, DVOEncoderControl);
-
- args.sXTmdsEncoder.ucEnable = action;
-
- if (radeon_encoder->pixel_clock > 165000)
- args.sXTmdsEncoder.ucMisc = PANEL_ENCODER_MISC_DUAL;
-
- /*if (pScrn->rgbBits == 8)*/
- args.sXTmdsEncoder.ucMisc |= (1 << 1);
-
- atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
-
-}
+union dvo_encoder_control {
+ ENABLE_EXTERNAL_TMDS_ENCODER_PS_ALLOCATION ext_tmds;
+ DVO_ENCODER_CONTROL_PS_ALLOCATION dvo;
+ DVO_ENCODER_CONTROL_PS_ALLOCATION_V3 dvo_v3;
+};
-static void
-atombios_ddia_setup(struct drm_encoder *encoder, int action)
+void
+atombios_dvo_setup(struct drm_encoder *encoder, int action)
{
struct drm_device *dev = encoder->dev;
struct radeon_device *rdev = dev->dev_private;
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
- DVO_ENCODER_CONTROL_PS_ALLOCATION args;
- int index = 0;
+ union dvo_encoder_control args;
+ int index = GetIndexIntoMasterTable(COMMAND, DVOEncoderControl);
memset(&args, 0, sizeof(args));
- index = GetIndexIntoMasterTable(COMMAND, DVOEncoderControl);
+ if (ASIC_IS_DCE3(rdev)) {
+ /* DCE3+ */
+ args.dvo_v3.ucAction = action;
+ args.dvo_v3.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10);
+ args.dvo_v3.ucDVOConfig = 0; /* XXX */
+ } else if (ASIC_IS_DCE2(rdev)) {
+ /* DCE2 (pre-DCE3 R6xx, RS600/690/740 */
+ args.dvo.sDVOEncoder.ucAction = action;
+ args.dvo.sDVOEncoder.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10);
+ /* DFP1, CRT1, TV1 depending on the type of port */
+ args.dvo.sDVOEncoder.ucDeviceType = ATOM_DEVICE_DFP1_INDEX;
+
+ if (radeon_encoder->pixel_clock > 165000)
+ args.dvo.sDVOEncoder.usDevAttr.sDigAttrib.ucAttribute |= PANEL_ENCODER_MISC_DUAL;
+ } else {
+ /* R4xx, R5xx */
+ args.ext_tmds.sXTmdsEncoder.ucEnable = action;
- args.sDVOEncoder.ucAction = action;
- args.sDVOEncoder.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10);
+ if (radeon_encoder->pixel_clock > 165000)
+ args.ext_tmds.sXTmdsEncoder.ucMisc |= PANEL_ENCODER_MISC_DUAL;
- if (radeon_encoder->pixel_clock > 165000)
- args.sDVOEncoder.usDevAttr.sDigAttrib.ucAttribute = PANEL_ENCODER_MISC_DUAL;
+ /*if (pScrn->rgbBits == 8)*/
+ args.ext_tmds.sXTmdsEncoder.ucMisc |= ATOM_PANEL_MISC_888RGB;
+ }
atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
-
}
union lvds_encoder_control {
@@ -529,17 +548,17 @@ atombios_digital_setup(struct drm_encoder *encoder, int action)
args.v1.ucMisc |= PANEL_ENCODER_MISC_HDMI_TYPE;
args.v1.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10);
if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) {
- if (dig->lvds_misc & ATOM_PANEL_MISC_DUAL)
+ if (dig->lcd_misc & ATOM_PANEL_MISC_DUAL)
args.v1.ucMisc |= PANEL_ENCODER_MISC_DUAL;
- if (dig->lvds_misc & ATOM_PANEL_MISC_888RGB)
- args.v1.ucMisc |= (1 << 1);
+ if (dig->lcd_misc & ATOM_PANEL_MISC_888RGB)
+ args.v1.ucMisc |= ATOM_PANEL_MISC_888RGB;
} else {
if (dig->linkb)
args.v1.ucMisc |= PANEL_ENCODER_MISC_TMDS_LINKB;
if (radeon_encoder->pixel_clock > 165000)
args.v1.ucMisc |= PANEL_ENCODER_MISC_DUAL;
/*if (pScrn->rgbBits == 8) */
- args.v1.ucMisc |= (1 << 1);
+ args.v1.ucMisc |= ATOM_PANEL_MISC_888RGB;
}
break;
case 2:
@@ -558,18 +577,18 @@ atombios_digital_setup(struct drm_encoder *encoder, int action)
args.v2.ucTemporal = 0;
args.v2.ucFRC = 0;
if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) {
- if (dig->lvds_misc & ATOM_PANEL_MISC_DUAL)
+ if (dig->lcd_misc & ATOM_PANEL_MISC_DUAL)
args.v2.ucMisc |= PANEL_ENCODER_MISC_DUAL;
- if (dig->lvds_misc & ATOM_PANEL_MISC_SPATIAL) {
+ if (dig->lcd_misc & ATOM_PANEL_MISC_SPATIAL) {
args.v2.ucSpatial = PANEL_ENCODER_SPATIAL_DITHER_EN;
- if (dig->lvds_misc & ATOM_PANEL_MISC_888RGB)
+ if (dig->lcd_misc & ATOM_PANEL_MISC_888RGB)
args.v2.ucSpatial |= PANEL_ENCODER_SPATIAL_DITHER_DEPTH;
}
- if (dig->lvds_misc & ATOM_PANEL_MISC_TEMPORAL) {
+ if (dig->lcd_misc & ATOM_PANEL_MISC_TEMPORAL) {
args.v2.ucTemporal = PANEL_ENCODER_TEMPORAL_DITHER_EN;
- if (dig->lvds_misc & ATOM_PANEL_MISC_888RGB)
+ if (dig->lcd_misc & ATOM_PANEL_MISC_888RGB)
args.v2.ucTemporal |= PANEL_ENCODER_TEMPORAL_DITHER_DEPTH;
- if (((dig->lvds_misc >> ATOM_PANEL_MISC_GREY_LEVEL_SHIFT) & 0x3) == 2)
+ if (((dig->lcd_misc >> ATOM_PANEL_MISC_GREY_LEVEL_SHIFT) & 0x3) == 2)
args.v2.ucTemporal |= PANEL_ENCODER_TEMPORAL_LEVEL_4;
}
} else {
@@ -595,6 +614,7 @@ atombios_digital_setup(struct drm_encoder *encoder, int action)
int
atombios_get_encoder_mode(struct drm_encoder *encoder)
{
+ struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
struct drm_device *dev = encoder->dev;
struct radeon_device *rdev = dev->dev_private;
struct drm_connector *connector;
@@ -602,9 +622,20 @@ atombios_get_encoder_mode(struct drm_encoder *encoder)
struct radeon_connector_atom_dig *dig_connector;
connector = radeon_get_connector_for_encoder(encoder);
- if (!connector)
- return 0;
-
+ if (!connector) {
+ switch (radeon_encoder->encoder_id) {
+ case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
+ case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
+ case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
+ case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
+ case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1:
+ return ATOM_ENCODER_MODE_DVI;
+ case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1:
+ case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2:
+ default:
+ return ATOM_ENCODER_MODE_CRT;
+ }
+ }
radeon_connector = to_radeon_connector(connector);
switch (connector->connector_type) {
@@ -834,6 +865,9 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action, uint8_t
memset(&args, 0, sizeof(args));
switch (radeon_encoder->encoder_id) {
+ case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1:
+ index = GetIndexIntoMasterTable(COMMAND, DVOOutputControl);
+ break;
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
@@ -978,6 +1012,105 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action, uint8_t
atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
}
+void
+atombios_set_edp_panel_power(struct drm_connector *connector, int action)
+{
+ struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+ struct drm_device *dev = radeon_connector->base.dev;
+ struct radeon_device *rdev = dev->dev_private;
+ union dig_transmitter_control args;
+ int index = GetIndexIntoMasterTable(COMMAND, UNIPHYTransmitterControl);
+ uint8_t frev, crev;
+
+ if (connector->connector_type != DRM_MODE_CONNECTOR_eDP)
+ return;
+
+ if (!ASIC_IS_DCE4(rdev))
+ return;
+
+ if ((action != ATOM_TRANSMITTER_ACTION_POWER_ON) ||
+ (action != ATOM_TRANSMITTER_ACTION_POWER_OFF))
+ return;
+
+ if (!atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev))
+ return;
+
+ memset(&args, 0, sizeof(args));
+
+ args.v1.ucAction = action;
+
+ atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+}
+
+union external_encoder_control {
+ EXTERNAL_ENCODER_CONTROL_PS_ALLOCATION v1;
+};
+
+static void
+atombios_external_encoder_setup(struct drm_encoder *encoder,
+ struct drm_encoder *ext_encoder,
+ int action)
+{
+ struct drm_device *dev = encoder->dev;
+ struct radeon_device *rdev = dev->dev_private;
+ struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+ union external_encoder_control args;
+ struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
+ int index = GetIndexIntoMasterTable(COMMAND, ExternalEncoderControl);
+ u8 frev, crev;
+ int dp_clock = 0;
+ int dp_lane_count = 0;
+ int connector_object_id = 0;
+
+ if (connector) {
+ struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+ struct radeon_connector_atom_dig *dig_connector =
+ radeon_connector->con_priv;
+
+ dp_clock = dig_connector->dp_clock;
+ dp_lane_count = dig_connector->dp_lane_count;
+ connector_object_id =
+ (radeon_connector->connector_object_id & OBJECT_ID_MASK) >> OBJECT_ID_SHIFT;
+ }
+
+ memset(&args, 0, sizeof(args));
+
+ if (!atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev))
+ return;
+
+ switch (frev) {
+ case 1:
+ /* no params on frev 1 */
+ break;
+ case 2:
+ switch (crev) {
+ case 1:
+ case 2:
+ args.v1.sDigEncoder.ucAction = action;
+ args.v1.sDigEncoder.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10);
+ args.v1.sDigEncoder.ucEncoderMode = atombios_get_encoder_mode(encoder);
+
+ if (args.v1.sDigEncoder.ucEncoderMode == ATOM_ENCODER_MODE_DP) {
+ if (dp_clock == 270000)
+ args.v1.sDigEncoder.ucConfig |= ATOM_ENCODER_CONFIG_DPLINKRATE_2_70GHZ;
+ args.v1.sDigEncoder.ucLaneNum = dp_lane_count;
+ } else if (radeon_encoder->pixel_clock > 165000)
+ args.v1.sDigEncoder.ucLaneNum = 8;
+ else
+ args.v1.sDigEncoder.ucLaneNum = 4;
+ break;
+ default:
+ DRM_ERROR("Unknown table version: %d, %d\n", frev, crev);
+ return;
+ }
+ break;
+ default:
+ DRM_ERROR("Unknown table version: %d, %d\n", frev, crev);
+ return;
+ }
+ atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+}
+
static void
atombios_yuv_setup(struct drm_encoder *encoder, bool enable)
{
@@ -1021,6 +1154,7 @@ radeon_atom_encoder_dpms(struct drm_encoder *encoder, int mode)
struct drm_device *dev = encoder->dev;
struct radeon_device *rdev = dev->dev_private;
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+ struct drm_encoder *ext_encoder = radeon_atom_get_external_encoder(encoder);
DISPLAY_DEVICE_OUTPUT_CONTROL_PS_ALLOCATION args;
int index = 0;
bool is_dig = false;
@@ -1043,9 +1177,14 @@ radeon_atom_encoder_dpms(struct drm_encoder *encoder, int mode)
break;
case ENCODER_OBJECT_ID_INTERNAL_DVO1:
case ENCODER_OBJECT_ID_INTERNAL_DDI:
- case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1:
index = GetIndexIntoMasterTable(COMMAND, DVOOutputControl);
break;
+ case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1:
+ if (ASIC_IS_DCE3(rdev))
+ is_dig = true;
+ else
+ index = GetIndexIntoMasterTable(COMMAND, DVOOutputControl);
+ break;
case ENCODER_OBJECT_ID_INTERNAL_LVDS:
index = GetIndexIntoMasterTable(COMMAND, LCD1OutputControl);
break;
@@ -1082,34 +1221,85 @@ radeon_atom_encoder_dpms(struct drm_encoder *encoder, int mode)
if (atombios_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_DP) {
struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
+ if (connector &&
+ (connector->connector_type == DRM_MODE_CONNECTOR_eDP)) {
+ struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+ struct radeon_connector_atom_dig *radeon_dig_connector =
+ radeon_connector->con_priv;
+ atombios_set_edp_panel_power(connector,
+ ATOM_TRANSMITTER_ACTION_POWER_ON);
+ radeon_dig_connector->edp_on = true;
+ }
dp_link_train(encoder, connector);
if (ASIC_IS_DCE4(rdev))
atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_DP_VIDEO_ON);
}
+ if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT))
+ atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_LCD_BLON, 0, 0);
break;
case DRM_MODE_DPMS_STANDBY:
case DRM_MODE_DPMS_SUSPEND:
case DRM_MODE_DPMS_OFF:
atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE_OUTPUT, 0, 0);
if (atombios_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_DP) {
+ struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
+
if (ASIC_IS_DCE4(rdev))
atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_DP_VIDEO_OFF);
+ if (connector &&
+ (connector->connector_type == DRM_MODE_CONNECTOR_eDP)) {
+ struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+ struct radeon_connector_atom_dig *radeon_dig_connector =
+ radeon_connector->con_priv;
+ atombios_set_edp_panel_power(connector,
+ ATOM_TRANSMITTER_ACTION_POWER_OFF);
+ radeon_dig_connector->edp_on = false;
+ }
}
+ if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT))
+ atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_LCD_BLOFF, 0, 0);
break;
}
} else {
switch (mode) {
case DRM_MODE_DPMS_ON:
args.ucAction = ATOM_ENABLE;
+ atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+ if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) {
+ args.ucAction = ATOM_LCD_BLON;
+ atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+ }
break;
case DRM_MODE_DPMS_STANDBY:
case DRM_MODE_DPMS_SUSPEND:
case DRM_MODE_DPMS_OFF:
args.ucAction = ATOM_DISABLE;
+ atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+ if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) {
+ args.ucAction = ATOM_LCD_BLOFF;
+ atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+ }
break;
}
- atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
}
+
+ if (ext_encoder) {
+ int action;
+
+ switch (mode) {
+ case DRM_MODE_DPMS_ON:
+ default:
+ action = ATOM_ENABLE;
+ break;
+ case DRM_MODE_DPMS_STANDBY:
+ case DRM_MODE_DPMS_SUSPEND:
+ case DRM_MODE_DPMS_OFF:
+ action = ATOM_DISABLE;
+ break;
+ }
+ atombios_external_encoder_setup(encoder, ext_encoder, action);
+ }
+
radeon_atombios_encoder_dpms_scratch_regs(encoder, (mode == DRM_MODE_DPMS_ON) ? true : false);
}
@@ -1242,7 +1432,7 @@ atombios_set_encoder_crtc_source(struct drm_encoder *encoder)
break;
default:
DRM_ERROR("Unknown table version: %d, %d\n", frev, crev);
- break;
+ return;
}
atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
@@ -1357,6 +1547,7 @@ radeon_atom_encoder_mode_set(struct drm_encoder *encoder,
struct drm_device *dev = encoder->dev;
struct radeon_device *rdev = dev->dev_private;
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+ struct drm_encoder *ext_encoder = radeon_atom_get_external_encoder(encoder);
radeon_encoder->pixel_clock = adjusted_mode->clock;
@@ -1400,11 +1591,9 @@ radeon_atom_encoder_mode_set(struct drm_encoder *encoder,
}
break;
case ENCODER_OBJECT_ID_INTERNAL_DDI:
- atombios_ddia_setup(encoder, ATOM_ENABLE);
- break;
case ENCODER_OBJECT_ID_INTERNAL_DVO1:
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1:
- atombios_external_tmds_setup(encoder, ATOM_ENABLE);
+ atombios_dvo_setup(encoder, ATOM_ENABLE);
break;
case ENCODER_OBJECT_ID_INTERNAL_DAC1:
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1:
@@ -1419,6 +1608,11 @@ radeon_atom_encoder_mode_set(struct drm_encoder *encoder,
}
break;
}
+
+ if (ext_encoder) {
+ atombios_external_encoder_setup(encoder, ext_encoder, ATOM_ENABLE);
+ }
+
atombios_apply_encoder_quirks(encoder, adjusted_mode);
if (atombios_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_HDMI) {
@@ -1520,6 +1714,7 @@ radeon_atom_dac_detect(struct drm_encoder *encoder, struct drm_connector *connec
static void radeon_atom_encoder_prepare(struct drm_encoder *encoder)
{
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+ struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
if (radeon_encoder->active_device &
(ATOM_DEVICE_DFP_SUPPORT | ATOM_DEVICE_LCD_SUPPORT)) {
@@ -1531,6 +1726,13 @@ static void radeon_atom_encoder_prepare(struct drm_encoder *encoder)
radeon_atom_output_lock(encoder, true);
radeon_atom_encoder_dpms(encoder, DRM_MODE_DPMS_OFF);
+ /* select the clock/data port if it uses a router */
+ if (connector) {
+ struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+ if (radeon_connector->router.cd_valid)
+ radeon_router_select_cd_port(radeon_connector);
+ }
+
/* this is needed for the pll/ss setup to work correctly in some cases */
atombios_set_encoder_crtc_source(encoder);
}
@@ -1547,6 +1749,23 @@ static void radeon_atom_encoder_disable(struct drm_encoder *encoder)
struct radeon_device *rdev = dev->dev_private;
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
struct radeon_encoder_atom_dig *dig;
+
+ /* check for pre-DCE3 cards with shared encoders;
+ * can't really use the links individually, so don't disable
+ * the encoder if it's in use by another connector
+ */
+ if (!ASIC_IS_DCE3(rdev)) {
+ struct drm_encoder *other_encoder;
+ struct radeon_encoder *other_radeon_encoder;
+
+ list_for_each_entry(other_encoder, &dev->mode_config.encoder_list, head) {
+ other_radeon_encoder = to_radeon_encoder(other_encoder);
+ if ((radeon_encoder->encoder_id == other_radeon_encoder->encoder_id) &&
+ drm_helper_encoder_in_use(other_encoder))
+ goto disable_done;
+ }
+ }
+
radeon_atom_encoder_dpms(encoder, DRM_MODE_DPMS_OFF);
switch (radeon_encoder->encoder_id) {
@@ -1570,11 +1789,9 @@ static void radeon_atom_encoder_disable(struct drm_encoder *encoder)
}
break;
case ENCODER_OBJECT_ID_INTERNAL_DDI:
- atombios_ddia_setup(encoder, ATOM_DISABLE);
- break;
case ENCODER_OBJECT_ID_INTERNAL_DVO1:
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1:
- atombios_external_tmds_setup(encoder, ATOM_DISABLE);
+ atombios_dvo_setup(encoder, ATOM_DISABLE);
break;
case ENCODER_OBJECT_ID_INTERNAL_DAC1:
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1:
@@ -1586,6 +1803,7 @@ static void radeon_atom_encoder_disable(struct drm_encoder *encoder)
break;
}
+disable_done:
if (radeon_encoder_is_digital(encoder)) {
if (atombios_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_HDMI)
r600_hdmi_disable(encoder);
@@ -1595,6 +1813,53 @@ static void radeon_atom_encoder_disable(struct drm_encoder *encoder)
radeon_encoder->active_device = 0;
}
+/* these are handled by the primary encoders */
+static void radeon_atom_ext_prepare(struct drm_encoder *encoder)
+{
+
+}
+
+static void radeon_atom_ext_commit(struct drm_encoder *encoder)
+{
+
+}
+
+static void
+radeon_atom_ext_mode_set(struct drm_encoder *encoder,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+
+}
+
+static void radeon_atom_ext_disable(struct drm_encoder *encoder)
+{
+
+}
+
+static void
+radeon_atom_ext_dpms(struct drm_encoder *encoder, int mode)
+{
+
+}
+
+static bool radeon_atom_ext_mode_fixup(struct drm_encoder *encoder,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ return true;
+}
+
+static const struct drm_encoder_helper_funcs radeon_atom_ext_helper_funcs = {
+ .dpms = radeon_atom_ext_dpms,
+ .mode_fixup = radeon_atom_ext_mode_fixup,
+ .prepare = radeon_atom_ext_prepare,
+ .mode_set = radeon_atom_ext_mode_set,
+ .commit = radeon_atom_ext_commit,
+ .disable = radeon_atom_ext_disable,
+ /* no detect for TMDS/LVDS yet */
+};
+
static const struct drm_encoder_helper_funcs radeon_atom_dig_helper_funcs = {
.dpms = radeon_atom_encoder_dpms,
.mode_fixup = radeon_atom_mode_fixup,
@@ -1704,6 +1969,7 @@ radeon_add_atom_encoder(struct drm_device *dev, uint32_t encoder_enum, uint32_t
radeon_encoder->devices = supported_device;
radeon_encoder->rmx_type = RMX_OFF;
radeon_encoder->underscan_type = UNDERSCAN_OFF;
+ radeon_encoder->is_ext_encoder = false;
switch (radeon_encoder->encoder_id) {
case ENCODER_OBJECT_ID_INTERNAL_LVDS:
@@ -1745,6 +2011,9 @@ radeon_add_atom_encoder(struct drm_device *dev, uint32_t encoder_enum, uint32_t
radeon_encoder->rmx_type = RMX_FULL;
drm_encoder_init(dev, encoder, &radeon_atom_enc_funcs, DRM_MODE_ENCODER_LVDS);
radeon_encoder->enc_priv = radeon_atombios_get_lvds_info(radeon_encoder);
+ } else if (radeon_encoder->devices & (ATOM_DEVICE_CRT_SUPPORT)) {
+ drm_encoder_init(dev, encoder, &radeon_atom_enc_funcs, DRM_MODE_ENCODER_DAC);
+ radeon_encoder->enc_priv = radeon_atombios_set_dig_info(radeon_encoder);
} else {
drm_encoder_init(dev, encoder, &radeon_atom_enc_funcs, DRM_MODE_ENCODER_TMDS);
radeon_encoder->enc_priv = radeon_atombios_set_dig_info(radeon_encoder);
@@ -1753,5 +2022,22 @@ radeon_add_atom_encoder(struct drm_device *dev, uint32_t encoder_enum, uint32_t
}
drm_encoder_helper_add(encoder, &radeon_atom_dig_helper_funcs);
break;
+ case ENCODER_OBJECT_ID_SI170B:
+ case ENCODER_OBJECT_ID_CH7303:
+ case ENCODER_OBJECT_ID_EXTERNAL_SDVOA:
+ case ENCODER_OBJECT_ID_EXTERNAL_SDVOB:
+ case ENCODER_OBJECT_ID_TITFP513:
+ case ENCODER_OBJECT_ID_VT1623:
+ case ENCODER_OBJECT_ID_HDMI_SI1930:
+ /* these are handled by the primary encoders */
+ radeon_encoder->is_ext_encoder = true;
+ if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT))
+ drm_encoder_init(dev, encoder, &radeon_atom_enc_funcs, DRM_MODE_ENCODER_LVDS);
+ else if (radeon_encoder->devices & (ATOM_DEVICE_CRT_SUPPORT))
+ drm_encoder_init(dev, encoder, &radeon_atom_enc_funcs, DRM_MODE_ENCODER_DAC);
+ else
+ drm_encoder_init(dev, encoder, &radeon_atom_enc_funcs, DRM_MODE_ENCODER_TMDS);
+ drm_encoder_helper_add(encoder, &radeon_atom_ext_helper_funcs);
+ break;
}
}
diff --git a/drivers/gpu/drm/radeon/radeon_fb.c b/drivers/gpu/drm/radeon/radeon_fb.c
index 40b0c087b592..efa211898fe6 100644
--- a/drivers/gpu/drm/radeon/radeon_fb.c
+++ b/drivers/gpu/drm/radeon/radeon_fb.c
@@ -59,6 +59,8 @@ static struct fb_ops radeonfb_ops = {
.fb_pan_display = drm_fb_helper_pan_display,
.fb_blank = drm_fb_helper_blank,
.fb_setcmap = drm_fb_helper_setcmap,
+ .fb_debug_enter = drm_fb_helper_debug_enter,
+ .fb_debug_leave = drm_fb_helper_debug_leave,
};
diff --git a/drivers/gpu/drm/radeon/radeon_fence.c b/drivers/gpu/drm/radeon/radeon_fence.c
index b1f9a81b5d1d..daacb281dfaf 100644
--- a/drivers/gpu/drm/radeon/radeon_fence.c
+++ b/drivers/gpu/drm/radeon/radeon_fence.c
@@ -72,7 +72,15 @@ static bool radeon_fence_poll_locked(struct radeon_device *rdev)
bool wake = false;
unsigned long cjiffies;
- seq = RREG32(rdev->fence_drv.scratch_reg);
+ if (rdev->wb.enabled) {
+ u32 scratch_index;
+ if (rdev->wb.use_event)
+ scratch_index = R600_WB_EVENT_OFFSET + rdev->fence_drv.scratch_reg - rdev->scratch.reg_base;
+ else
+ scratch_index = RADEON_WB_SCRATCH_OFFSET + rdev->fence_drv.scratch_reg - rdev->scratch.reg_base;
+ seq = rdev->wb.wb[scratch_index/4];
+ } else
+ seq = RREG32(rdev->fence_drv.scratch_reg);
if (seq != rdev->fence_drv.last_seq) {
rdev->fence_drv.last_seq = seq;
rdev->fence_drv.last_jiffies = jiffies;
@@ -232,7 +240,8 @@ retry:
*/
if (seq == rdev->fence_drv.last_seq && radeon_gpu_is_lockup(rdev)) {
/* good news we believe it's a lockup */
- WARN(1, "GPU lockup (waiting for 0x%08X last fence id 0x%08X)\n", fence->seq, seq);
+ WARN(1, "GPU lockup (waiting for 0x%08X last fence id 0x%08X)\n",
+ fence->seq, seq);
/* FIXME: what should we do ? marking everyone
* as signaled for now
*/
diff --git a/drivers/gpu/drm/radeon/radeon_gart.c b/drivers/gpu/drm/radeon/radeon_gart.c
index e65b90317fab..65016117d95f 100644
--- a/drivers/gpu/drm/radeon/radeon_gart.c
+++ b/drivers/gpu/drm/radeon/radeon_gart.c
@@ -79,8 +79,8 @@ int radeon_gart_table_vram_alloc(struct radeon_device *rdev)
if (rdev->gart.table.vram.robj == NULL) {
r = radeon_bo_create(rdev, NULL, rdev->gart.table_size,
- true, RADEON_GEM_DOMAIN_VRAM,
- &rdev->gart.table.vram.robj);
+ PAGE_SIZE, true, RADEON_GEM_DOMAIN_VRAM,
+ &rdev->gart.table.vram.robj);
if (r) {
return r;
}
diff --git a/drivers/gpu/drm/radeon/radeon_gem.c b/drivers/gpu/drm/radeon/radeon_gem.c
index d1e595d91723..df95eb83dac6 100644
--- a/drivers/gpu/drm/radeon/radeon_gem.c
+++ b/drivers/gpu/drm/radeon/radeon_gem.c
@@ -67,7 +67,7 @@ int radeon_gem_object_create(struct radeon_device *rdev, int size,
if (alignment < PAGE_SIZE) {
alignment = PAGE_SIZE;
}
- r = radeon_bo_create(rdev, gobj, size, kernel, initial_domain, &robj);
+ r = radeon_bo_create(rdev, gobj, size, alignment, kernel, initial_domain, &robj);
if (r) {
if (r != -ERESTARTSYS)
DRM_ERROR("Failed to allocate GEM object (%d, %d, %u, %d)\n",
diff --git a/drivers/gpu/drm/radeon/radeon_i2c.c b/drivers/gpu/drm/radeon/radeon_i2c.c
index 6a13ee38a5b9..ded2a45bc95c 100644
--- a/drivers/gpu/drm/radeon/radeon_i2c.c
+++ b/drivers/gpu/drm/radeon/radeon_i2c.c
@@ -53,8 +53,8 @@ bool radeon_ddc_probe(struct radeon_connector *radeon_connector)
};
/* on hw with routers, select right port */
- if (radeon_connector->router.valid)
- radeon_router_select_port(radeon_connector);
+ if (radeon_connector->router.ddc_valid)
+ radeon_router_select_ddc_port(radeon_connector);
ret = i2c_transfer(&radeon_connector->ddc_bus->adapter, msgs, 2);
if (ret == 2)
@@ -896,7 +896,8 @@ struct radeon_i2c_chan *radeon_i2c_create(struct drm_device *dev,
((rdev->family <= CHIP_RS480) ||
((rdev->family >= CHIP_RV515) && (rdev->family <= CHIP_R580))))) {
/* set the radeon hw i2c adapter */
- sprintf(i2c->adapter.name, "Radeon i2c hw bus %s", name);
+ snprintf(i2c->adapter.name, sizeof(i2c->adapter.name),
+ "Radeon i2c hw bus %s", name);
i2c->adapter.algo = &radeon_i2c_algo;
ret = i2c_add_adapter(&i2c->adapter);
if (ret) {
@@ -905,7 +906,8 @@ struct radeon_i2c_chan *radeon_i2c_create(struct drm_device *dev,
}
} else {
/* set the radeon bit adapter */
- sprintf(i2c->adapter.name, "Radeon i2c bit bus %s", name);
+ snprintf(i2c->adapter.name, sizeof(i2c->adapter.name),
+ "Radeon i2c bit bus %s", name);
i2c->adapter.algo_data = &i2c->algo.bit;
i2c->algo.bit.pre_xfer = pre_xfer;
i2c->algo.bit.post_xfer = post_xfer;
@@ -946,6 +948,8 @@ struct radeon_i2c_chan *radeon_i2c_create_dp(struct drm_device *dev,
i2c->rec = *rec;
i2c->adapter.owner = THIS_MODULE;
i2c->dev = dev;
+ snprintf(i2c->adapter.name, sizeof(i2c->adapter.name),
+ "Radeon aux bus %s", name);
i2c_set_adapdata(&i2c->adapter, i2c);
i2c->adapter.algo_data = &i2c->algo.dp;
i2c->algo.dp.aux_ch = radeon_dp_i2c_aux_ch;
@@ -1084,26 +1088,51 @@ void radeon_i2c_put_byte(struct radeon_i2c_chan *i2c_bus,
addr, val);
}
-/* router switching */
-void radeon_router_select_port(struct radeon_connector *radeon_connector)
+/* ddc router switching */
+void radeon_router_select_ddc_port(struct radeon_connector *radeon_connector)
{
u8 val;
- if (!radeon_connector->router.valid)
+ if (!radeon_connector->router.ddc_valid)
return;
radeon_i2c_get_byte(radeon_connector->router_bus,
radeon_connector->router.i2c_addr,
0x3, &val);
- val &= radeon_connector->router.mux_control_pin;
+ val &= ~radeon_connector->router.ddc_mux_control_pin;
radeon_i2c_put_byte(radeon_connector->router_bus,
radeon_connector->router.i2c_addr,
0x3, val);
radeon_i2c_get_byte(radeon_connector->router_bus,
radeon_connector->router.i2c_addr,
0x1, &val);
- val &= radeon_connector->router.mux_control_pin;
- val |= radeon_connector->router.mux_state;
+ val &= ~radeon_connector->router.ddc_mux_control_pin;
+ val |= radeon_connector->router.ddc_mux_state;
+ radeon_i2c_put_byte(radeon_connector->router_bus,
+ radeon_connector->router.i2c_addr,
+ 0x1, val);
+}
+
+/* clock/data router switching */
+void radeon_router_select_cd_port(struct radeon_connector *radeon_connector)
+{
+ u8 val;
+
+ if (!radeon_connector->router.cd_valid)
+ return;
+
+ radeon_i2c_get_byte(radeon_connector->router_bus,
+ radeon_connector->router.i2c_addr,
+ 0x3, &val);
+ val &= ~radeon_connector->router.cd_mux_control_pin;
+ radeon_i2c_put_byte(radeon_connector->router_bus,
+ radeon_connector->router.i2c_addr,
+ 0x3, val);
+ radeon_i2c_get_byte(radeon_connector->router_bus,
+ radeon_connector->router.i2c_addr,
+ 0x1, &val);
+ val &= ~radeon_connector->router.cd_mux_control_pin;
+ val |= radeon_connector->router.cd_mux_state;
radeon_i2c_put_byte(radeon_connector->router_bus,
radeon_connector->router.i2c_addr,
0x1, val);
diff --git a/drivers/gpu/drm/radeon/radeon_irq.c b/drivers/gpu/drm/radeon/radeon_irq.c
index 2f349a300195..465746bd51b7 100644
--- a/drivers/gpu/drm/radeon/radeon_irq.c
+++ b/drivers/gpu/drm/radeon/radeon_irq.c
@@ -76,7 +76,7 @@ int radeon_enable_vblank(struct drm_device *dev, int crtc)
default:
DRM_ERROR("tried to enable vblank on non-existent crtc %d\n",
crtc);
- return EINVAL;
+ return -EINVAL;
}
} else {
switch (crtc) {
@@ -89,7 +89,7 @@ int radeon_enable_vblank(struct drm_device *dev, int crtc)
default:
DRM_ERROR("tried to enable vblank on non-existent crtc %d\n",
crtc);
- return EINVAL;
+ return -EINVAL;
}
}
diff --git a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
index 305049afde15..ace2e6384d40 100644
--- a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
+++ b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
@@ -348,10 +348,25 @@ void radeon_crtc_dpms(struct drm_crtc *crtc, int mode)
int radeon_crtc_set_base(struct drm_crtc *crtc, int x, int y,
struct drm_framebuffer *old_fb)
{
+ return radeon_crtc_do_set_base(crtc, old_fb, x, y, 0);
+}
+
+int radeon_crtc_set_base_atomic(struct drm_crtc *crtc,
+ struct drm_framebuffer *fb,
+ int x, int y, enum mode_set_atomic state)
+{
+ return radeon_crtc_do_set_base(crtc, fb, x, y, 1);
+}
+
+int radeon_crtc_do_set_base(struct drm_crtc *crtc,
+ struct drm_framebuffer *fb,
+ int x, int y, int atomic)
+{
struct drm_device *dev = crtc->dev;
struct radeon_device *rdev = dev->dev_private;
struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
struct radeon_framebuffer *radeon_fb;
+ struct drm_framebuffer *target_fb;
struct drm_gem_object *obj;
struct radeon_bo *rbo;
uint64_t base;
@@ -364,14 +379,21 @@ int radeon_crtc_set_base(struct drm_crtc *crtc, int x, int y,
DRM_DEBUG_KMS("\n");
/* no fb bound */
- if (!crtc->fb) {
+ if (!atomic && !crtc->fb) {
DRM_DEBUG_KMS("No FB bound\n");
return 0;
}
- radeon_fb = to_radeon_framebuffer(crtc->fb);
+ if (atomic) {
+ radeon_fb = to_radeon_framebuffer(fb);
+ target_fb = fb;
+ }
+ else {
+ radeon_fb = to_radeon_framebuffer(crtc->fb);
+ target_fb = crtc->fb;
+ }
- switch (crtc->fb->bits_per_pixel) {
+ switch (target_fb->bits_per_pixel) {
case 8:
format = 2;
break;
@@ -415,10 +437,10 @@ int radeon_crtc_set_base(struct drm_crtc *crtc, int x, int y,
crtc_offset_cntl = 0;
- pitch_pixels = crtc->fb->pitch / (crtc->fb->bits_per_pixel / 8);
- crtc_pitch = (((pitch_pixels * crtc->fb->bits_per_pixel) +
- ((crtc->fb->bits_per_pixel * 8) - 1)) /
- (crtc->fb->bits_per_pixel * 8));
+ pitch_pixels = target_fb->pitch / (target_fb->bits_per_pixel / 8);
+ crtc_pitch = (((pitch_pixels * target_fb->bits_per_pixel) +
+ ((target_fb->bits_per_pixel * 8) - 1)) /
+ (target_fb->bits_per_pixel * 8));
crtc_pitch |= crtc_pitch << 16;
@@ -443,14 +465,14 @@ int radeon_crtc_set_base(struct drm_crtc *crtc, int x, int y,
crtc_tile_x0_y0 = x | (y << 16);
base &= ~0x7ff;
} else {
- int byteshift = crtc->fb->bits_per_pixel >> 4;
+ int byteshift = target_fb->bits_per_pixel >> 4;
int tile_addr = (((y >> 3) * pitch_pixels + x) >> (8 - byteshift)) << 11;
base += tile_addr + ((x << byteshift) % 256) + ((y % 8) << 8);
crtc_offset_cntl |= (y % 16);
}
} else {
int offset = y * pitch_pixels + x;
- switch (crtc->fb->bits_per_pixel) {
+ switch (target_fb->bits_per_pixel) {
case 8:
offset *= 1;
break;
@@ -496,8 +518,8 @@ int radeon_crtc_set_base(struct drm_crtc *crtc, int x, int y,
WREG32(RADEON_CRTC_OFFSET + radeon_crtc->crtc_offset, crtc_offset);
WREG32(RADEON_CRTC_PITCH + radeon_crtc->crtc_offset, crtc_pitch);
- if (old_fb && old_fb != crtc->fb) {
- radeon_fb = to_radeon_framebuffer(old_fb);
+ if (!atomic && fb && fb != crtc->fb) {
+ radeon_fb = to_radeon_framebuffer(fb);
rbo = radeon_fb->obj->driver_private;
r = radeon_bo_reserve(rbo, false);
if (unlikely(r != 0))
@@ -717,10 +739,6 @@ static void radeon_set_pll(struct drm_crtc *crtc, struct drm_display_mode *mode)
pll = &rdev->clock.p1pll;
pll->flags = RADEON_PLL_LEGACY;
- if (radeon_new_pll == 1)
- pll->algo = PLL_ALGO_NEW;
- else
- pll->algo = PLL_ALGO_LEGACY;
if (mode->clock > 200000) /* range limits??? */
pll->flags |= RADEON_PLL_PREFER_HIGH_FB_DIV;
@@ -1040,6 +1058,7 @@ static const struct drm_crtc_helper_funcs legacy_helper_funcs = {
.mode_fixup = radeon_crtc_mode_fixup,
.mode_set = radeon_crtc_mode_set,
.mode_set_base = radeon_crtc_set_base,
+ .mode_set_base_atomic = radeon_crtc_set_base_atomic,
.prepare = radeon_crtc_prepare,
.commit = radeon_crtc_commit,
.load_lut = radeon_crtc_load_lut,
diff --git a/drivers/gpu/drm/radeon/radeon_legacy_encoders.c b/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
index 0b8397000f4c..59f834ba283d 100644
--- a/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
+++ b/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
@@ -670,7 +670,7 @@ static void radeon_legacy_tmds_ext_mode_set(struct drm_encoder *encoder,
if (rdev->is_atom_bios) {
radeon_encoder->pixel_clock = adjusted_mode->clock;
- atombios_external_tmds_setup(encoder, ATOM_ENABLE);
+ atombios_dvo_setup(encoder, ATOM_ENABLE);
fp2_gen_cntl = RREG32(RADEON_FP2_GEN_CNTL);
} else {
fp2_gen_cntl = RREG32(RADEON_FP2_GEN_CNTL);
diff --git a/drivers/gpu/drm/radeon/radeon_mode.h b/drivers/gpu/drm/radeon/radeon_mode.h
index 454c1dc7ea45..e301c6f9e059 100644
--- a/drivers/gpu/drm/radeon/radeon_mode.h
+++ b/drivers/gpu/drm/radeon/radeon_mode.h
@@ -35,6 +35,7 @@
#include <drm_edid.h>
#include <drm_dp_helper.h>
#include <drm_fixed.h>
+#include <drm_crtc_helper.h>
#include <linux/i2c.h>
#include <linux/i2c-algo-bit.h>
@@ -149,12 +150,6 @@ struct radeon_tmds_pll {
#define RADEON_PLL_USE_POST_DIV (1 << 12)
#define RADEON_PLL_IS_LCD (1 << 13)
-/* pll algo */
-enum radeon_pll_algo {
- PLL_ALGO_LEGACY,
- PLL_ALGO_NEW
-};
-
struct radeon_pll {
/* reference frequency */
uint32_t reference_freq;
@@ -187,8 +182,6 @@ struct radeon_pll {
/* pll id */
uint32_t id;
- /* pll algo */
- enum radeon_pll_algo algo;
};
struct radeon_i2c_chan {
@@ -240,6 +233,8 @@ struct radeon_mode_info {
struct drm_property *tmds_pll_property;
/* underscan */
struct drm_property *underscan_property;
+ struct drm_property *underscan_hborder_property;
+ struct drm_property *underscan_vborder_property;
/* hardcoded DFP edid from BIOS */
struct edid *bios_hardcoded_edid;
@@ -335,22 +330,24 @@ struct radeon_encoder_ext_tmds {
struct radeon_atom_ss {
uint16_t percentage;
uint8_t type;
- uint8_t step;
+ uint16_t step;
uint8_t delay;
uint8_t range;
uint8_t refdiv;
+ /* asic_ss */
+ uint16_t rate;
+ uint16_t amount;
};
struct radeon_encoder_atom_dig {
bool linkb;
/* atom dig */
bool coherent_mode;
- int dig_encoder; /* -1 disabled, 0 DIGA, 1 DIGB */
- /* atom lvds */
- uint32_t lvds_misc;
+ int dig_encoder; /* -1 disabled, 0 DIGA, 1 DIGB, etc. */
+ /* atom lvds/edp */
+ uint32_t lcd_misc;
uint16_t panel_pwr_delay;
- enum radeon_pll_algo pll_algo;
- struct radeon_atom_ss *ss;
+ uint32_t lcd_ss_id;
/* panel mode */
struct drm_display_mode native_mode;
};
@@ -369,6 +366,8 @@ struct radeon_encoder {
uint32_t pixel_clock;
enum radeon_rmx_type rmx_type;
enum radeon_underscan_type underscan_type;
+ uint32_t underscan_hborder;
+ uint32_t underscan_vborder;
struct drm_display_mode native_mode;
void *enc_priv;
int audio_polling_active;
@@ -376,6 +375,7 @@ struct radeon_encoder {
int hdmi_config_offset;
int hdmi_audio_workaround;
int hdmi_buffer_status;
+ bool is_ext_encoder;
};
struct radeon_connector_atom_dig {
@@ -386,6 +386,7 @@ struct radeon_connector_atom_dig {
u8 dp_sink_type;
int dp_clock;
int dp_lane_count;
+ bool edp_on;
};
struct radeon_gpio_rec {
@@ -402,13 +403,19 @@ struct radeon_hpd {
};
struct radeon_router {
- bool valid;
u32 router_id;
struct radeon_i2c_bus_rec i2c_info;
u8 i2c_addr;
- u8 mux_type;
- u8 mux_control_pin;
- u8 mux_state;
+ /* i2c mux */
+ bool ddc_valid;
+ u8 ddc_mux_type;
+ u8 ddc_mux_control_pin;
+ u8 ddc_mux_state;
+ /* clock/data mux */
+ bool cd_valid;
+ u8 cd_mux_type;
+ u8 cd_mux_control_pin;
+ u8 cd_mux_state;
};
struct radeon_connector {
@@ -435,6 +442,11 @@ struct radeon_framebuffer {
struct drm_gem_object *obj;
};
+/* radeon_get_crtc_scanoutpos() return flags */
+#define RADEON_SCANOUTPOS_VALID (1 << 0)
+#define RADEON_SCANOUTPOS_INVBL (1 << 1)
+#define RADEON_SCANOUTPOS_ACCURATE (1 << 2)
+
extern enum radeon_tv_std
radeon_combios_get_tv_info(struct radeon_device *rdev);
extern enum radeon_tv_std
@@ -484,12 +496,20 @@ extern void radeon_i2c_put_byte(struct radeon_i2c_chan *i2c,
u8 slave_addr,
u8 addr,
u8 val);
-extern void radeon_router_select_port(struct radeon_connector *radeon_connector);
+extern void radeon_router_select_ddc_port(struct radeon_connector *radeon_connector);
+extern void radeon_router_select_cd_port(struct radeon_connector *radeon_connector);
extern bool radeon_ddc_probe(struct radeon_connector *radeon_connector);
extern int radeon_ddc_get_modes(struct radeon_connector *radeon_connector);
extern struct drm_encoder *radeon_best_encoder(struct drm_connector *connector);
+extern bool radeon_atombios_get_ppll_ss_info(struct radeon_device *rdev,
+ struct radeon_atom_ss *ss,
+ int id);
+extern bool radeon_atombios_get_asic_ss_info(struct radeon_device *rdev,
+ struct radeon_atom_ss *ss,
+ int id, u32 clock);
+
extern void radeon_compute_pll(struct radeon_pll *pll,
uint64_t freq,
uint32_t *dot_clock_p,
@@ -505,14 +525,19 @@ struct drm_encoder *radeon_encoder_legacy_primary_dac_add(struct drm_device *dev
struct drm_encoder *radeon_encoder_legacy_tv_dac_add(struct drm_device *dev, int bios_index, int with_tv);
struct drm_encoder *radeon_encoder_legacy_tmds_int_add(struct drm_device *dev, int bios_index);
struct drm_encoder *radeon_encoder_legacy_tmds_ext_add(struct drm_device *dev, int bios_index);
-extern void atombios_external_tmds_setup(struct drm_encoder *encoder, int action);
+extern void atombios_dvo_setup(struct drm_encoder *encoder, int action);
extern void atombios_digital_setup(struct drm_encoder *encoder, int action);
extern int atombios_get_encoder_mode(struct drm_encoder *encoder);
+extern void atombios_set_edp_panel_power(struct drm_connector *connector, int action);
extern void radeon_encoder_set_active_device(struct drm_encoder *encoder);
extern void radeon_crtc_load_lut(struct drm_crtc *crtc);
extern int atombios_crtc_set_base(struct drm_crtc *crtc, int x, int y,
struct drm_framebuffer *old_fb);
+extern int atombios_crtc_set_base_atomic(struct drm_crtc *crtc,
+ struct drm_framebuffer *fb,
+ int x, int y,
+ enum mode_set_atomic state);
extern int atombios_crtc_mode_set(struct drm_crtc *crtc,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode,
@@ -522,7 +547,13 @@ extern void atombios_crtc_dpms(struct drm_crtc *crtc, int mode);
extern int radeon_crtc_set_base(struct drm_crtc *crtc, int x, int y,
struct drm_framebuffer *old_fb);
-
+extern int radeon_crtc_set_base_atomic(struct drm_crtc *crtc,
+ struct drm_framebuffer *fb,
+ int x, int y,
+ enum mode_set_atomic state);
+extern int radeon_crtc_do_set_base(struct drm_crtc *crtc,
+ struct drm_framebuffer *fb,
+ int x, int y, int atomic);
extern int radeon_crtc_cursor_set(struct drm_crtc *crtc,
struct drm_file *file_priv,
uint32_t handle,
@@ -531,6 +562,8 @@ extern int radeon_crtc_cursor_set(struct drm_crtc *crtc,
extern int radeon_crtc_cursor_move(struct drm_crtc *crtc,
int x, int y);
+extern int radeon_get_crtc_scanoutpos(struct radeon_device *rdev, int crtc, int *vpos, int *hpos);
+
extern bool radeon_combios_check_hardcoded_edid(struct radeon_device *rdev);
extern struct edid *
radeon_combios_get_hardcoded_edid(struct radeon_device *rdev);
diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c
index b3b5306bb578..a598d0049aa5 100644
--- a/drivers/gpu/drm/radeon/radeon_object.c
+++ b/drivers/gpu/drm/radeon/radeon_object.c
@@ -69,7 +69,7 @@ void radeon_ttm_placement_from_domain(struct radeon_bo *rbo, u32 domain)
u32 c = 0;
rbo->placement.fpfn = 0;
- rbo->placement.lpfn = rbo->rdev->mc.active_vram_size >> PAGE_SHIFT;
+ rbo->placement.lpfn = 0;
rbo->placement.placement = rbo->placements;
rbo->placement.busy_placement = rbo->placements;
if (domain & RADEON_GEM_DOMAIN_VRAM)
@@ -86,11 +86,13 @@ void radeon_ttm_placement_from_domain(struct radeon_bo *rbo, u32 domain)
}
int radeon_bo_create(struct radeon_device *rdev, struct drm_gem_object *gobj,
- unsigned long size, bool kernel, u32 domain,
- struct radeon_bo **bo_ptr)
+ unsigned long size, int byte_align, bool kernel, u32 domain,
+ struct radeon_bo **bo_ptr)
{
struct radeon_bo *bo;
enum ttm_bo_type type;
+ unsigned long page_align = roundup(byte_align, PAGE_SIZE) >> PAGE_SHIFT;
+ unsigned long max_size = 0;
int r;
if (unlikely(rdev->mman.bdev.dev_mapping == NULL)) {
@@ -102,6 +104,16 @@ int radeon_bo_create(struct radeon_device *rdev, struct drm_gem_object *gobj,
type = ttm_bo_type_device;
}
*bo_ptr = NULL;
+
+ /* maximun bo size is the minimun btw visible vram and gtt size */
+ max_size = min(rdev->mc.visible_vram_size, rdev->mc.gtt_size);
+ if ((page_align << PAGE_SHIFT) >= max_size) {
+ printk(KERN_WARNING "%s:%d alloc size %ldM bigger than %ldMb limit\n",
+ __func__, __LINE__, page_align >> (20 - PAGE_SHIFT), max_size >> 20);
+ return -ENOMEM;
+ }
+
+retry:
bo = kzalloc(sizeof(struct radeon_bo), GFP_KERNEL);
if (bo == NULL)
return -ENOMEM;
@@ -109,13 +121,11 @@ int radeon_bo_create(struct radeon_device *rdev, struct drm_gem_object *gobj,
bo->gobj = gobj;
bo->surface_reg = -1;
INIT_LIST_HEAD(&bo->list);
-
-retry:
radeon_ttm_placement_from_domain(bo, domain);
/* Kernel allocation are uninterruptible */
mutex_lock(&rdev->vram_mutex);
r = ttm_bo_init(&rdev->mman.bdev, &bo->tbo, size, type,
- &bo->placement, 0, 0, !kernel, NULL, size,
+ &bo->placement, page_align, 0, !kernel, NULL, size,
&radeon_ttm_bo_destroy);
mutex_unlock(&rdev->vram_mutex);
if (unlikely(r != 0)) {
@@ -435,7 +445,7 @@ int radeon_bo_get_surface_reg(struct radeon_bo *bo)
out:
radeon_set_surface_reg(rdev, i, bo->tiling_flags, bo->pitch,
- bo->tbo.mem.mm_node->start << PAGE_SHIFT,
+ bo->tbo.mem.start << PAGE_SHIFT,
bo->tbo.num_pages << PAGE_SHIFT);
return 0;
}
@@ -532,7 +542,7 @@ int radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
rdev = rbo->rdev;
if (bo->mem.mem_type == TTM_PL_VRAM) {
size = bo->mem.num_pages << PAGE_SHIFT;
- offset = bo->mem.mm_node->start << PAGE_SHIFT;
+ offset = bo->mem.start << PAGE_SHIFT;
if ((offset + size) > rdev->mc.visible_vram_size) {
/* hurrah the memory is not visible ! */
radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_VRAM);
@@ -540,7 +550,7 @@ int radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
r = ttm_bo_validate(bo, &rbo->placement, false, true, false);
if (unlikely(r != 0))
return r;
- offset = bo->mem.mm_node->start << PAGE_SHIFT;
+ offset = bo->mem.start << PAGE_SHIFT;
/* this should not happen */
if ((offset + size) > rdev->mc.visible_vram_size)
return -EINVAL;
diff --git a/drivers/gpu/drm/radeon/radeon_object.h b/drivers/gpu/drm/radeon/radeon_object.h
index 3481bc7f6f58..d143702b244a 100644
--- a/drivers/gpu/drm/radeon/radeon_object.h
+++ b/drivers/gpu/drm/radeon/radeon_object.h
@@ -137,9 +137,10 @@ static inline int radeon_bo_wait(struct radeon_bo *bo, u32 *mem_type,
}
extern int radeon_bo_create(struct radeon_device *rdev,
- struct drm_gem_object *gobj, unsigned long size,
- bool kernel, u32 domain,
- struct radeon_bo **bo_ptr);
+ struct drm_gem_object *gobj, unsigned long size,
+ int byte_align,
+ bool kernel, u32 domain,
+ struct radeon_bo **bo_ptr);
extern int radeon_bo_kmap(struct radeon_bo *bo, void **ptr);
extern void radeon_bo_kunmap(struct radeon_bo *bo);
extern void radeon_bo_unref(struct radeon_bo **bo);
diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c
index f87efec76236..8c9b2ef32c68 100644
--- a/drivers/gpu/drm/radeon/radeon_pm.c
+++ b/drivers/gpu/drm/radeon/radeon_pm.c
@@ -712,73 +712,21 @@ void radeon_pm_compute_clocks(struct radeon_device *rdev)
static bool radeon_pm_in_vbl(struct radeon_device *rdev)
{
- u32 stat_crtc = 0, vbl = 0, position = 0;
+ int crtc, vpos, hpos, vbl_status;
bool in_vbl = true;
- if (ASIC_IS_DCE4(rdev)) {
- if (rdev->pm.active_crtcs & (1 << 0)) {
- vbl = RREG32(EVERGREEN_CRTC_V_BLANK_START_END +
- EVERGREEN_CRTC0_REGISTER_OFFSET) & 0xfff;
- position = RREG32(EVERGREEN_CRTC_STATUS_POSITION +
- EVERGREEN_CRTC0_REGISTER_OFFSET) & 0xfff;
- }
- if (rdev->pm.active_crtcs & (1 << 1)) {
- vbl = RREG32(EVERGREEN_CRTC_V_BLANK_START_END +
- EVERGREEN_CRTC1_REGISTER_OFFSET) & 0xfff;
- position = RREG32(EVERGREEN_CRTC_STATUS_POSITION +
- EVERGREEN_CRTC1_REGISTER_OFFSET) & 0xfff;
- }
- if (rdev->pm.active_crtcs & (1 << 2)) {
- vbl = RREG32(EVERGREEN_CRTC_V_BLANK_START_END +
- EVERGREEN_CRTC2_REGISTER_OFFSET) & 0xfff;
- position = RREG32(EVERGREEN_CRTC_STATUS_POSITION +
- EVERGREEN_CRTC2_REGISTER_OFFSET) & 0xfff;
- }
- if (rdev->pm.active_crtcs & (1 << 3)) {
- vbl = RREG32(EVERGREEN_CRTC_V_BLANK_START_END +
- EVERGREEN_CRTC3_REGISTER_OFFSET) & 0xfff;
- position = RREG32(EVERGREEN_CRTC_STATUS_POSITION +
- EVERGREEN_CRTC3_REGISTER_OFFSET) & 0xfff;
- }
- if (rdev->pm.active_crtcs & (1 << 4)) {
- vbl = RREG32(EVERGREEN_CRTC_V_BLANK_START_END +
- EVERGREEN_CRTC4_REGISTER_OFFSET) & 0xfff;
- position = RREG32(EVERGREEN_CRTC_STATUS_POSITION +
- EVERGREEN_CRTC4_REGISTER_OFFSET) & 0xfff;
- }
- if (rdev->pm.active_crtcs & (1 << 5)) {
- vbl = RREG32(EVERGREEN_CRTC_V_BLANK_START_END +
- EVERGREEN_CRTC5_REGISTER_OFFSET) & 0xfff;
- position = RREG32(EVERGREEN_CRTC_STATUS_POSITION +
- EVERGREEN_CRTC5_REGISTER_OFFSET) & 0xfff;
- }
- } else if (ASIC_IS_AVIVO(rdev)) {
- if (rdev->pm.active_crtcs & (1 << 0)) {
- vbl = RREG32(AVIVO_D1CRTC_V_BLANK_START_END) & 0xfff;
- position = RREG32(AVIVO_D1CRTC_STATUS_POSITION) & 0xfff;
- }
- if (rdev->pm.active_crtcs & (1 << 1)) {
- vbl = RREG32(AVIVO_D2CRTC_V_BLANK_START_END) & 0xfff;
- position = RREG32(AVIVO_D2CRTC_STATUS_POSITION) & 0xfff;
- }
- if (position < vbl && position > 1)
- in_vbl = false;
- } else {
- if (rdev->pm.active_crtcs & (1 << 0)) {
- stat_crtc = RREG32(RADEON_CRTC_STATUS);
- if (!(stat_crtc & 1))
- in_vbl = false;
- }
- if (rdev->pm.active_crtcs & (1 << 1)) {
- stat_crtc = RREG32(RADEON_CRTC2_STATUS);
- if (!(stat_crtc & 1))
+ /* Iterate over all active crtc's. All crtc's must be in vblank,
+ * otherwise return in_vbl == false.
+ */
+ for (crtc = 0; (crtc < rdev->num_crtc) && in_vbl; crtc++) {
+ if (rdev->pm.active_crtcs & (1 << crtc)) {
+ vbl_status = radeon_get_crtc_scanoutpos(rdev, crtc, &vpos, &hpos);
+ if ((vbl_status & RADEON_SCANOUTPOS_VALID) &&
+ !(vbl_status & RADEON_SCANOUTPOS_INVBL))
in_vbl = false;
}
}
- if (position < vbl && position > 1)
- in_vbl = false;
-
return in_vbl;
}
diff --git a/drivers/gpu/drm/radeon/radeon_reg.h b/drivers/gpu/drm/radeon/radeon_reg.h
index c332f46340d5..64928814de53 100644
--- a/drivers/gpu/drm/radeon/radeon_reg.h
+++ b/drivers/gpu/drm/radeon/radeon_reg.h
@@ -2836,6 +2836,7 @@
# define R200_TXFORMAT_ST_ROUTE_STQ5 (5 << 24)
# define R200_TXFORMAT_ST_ROUTE_MASK (7 << 24)
# define R200_TXFORMAT_ST_ROUTE_SHIFT 24
+# define R200_TXFORMAT_LOOKUP_DISABLE (1 << 27)
# define R200_TXFORMAT_ALPHA_MASK_ENABLE (1 << 28)
# define R200_TXFORMAT_CHROMA_KEY_ENABLE (1 << 29)
# define R200_TXFORMAT_CUBIC_MAP_ENABLE (1 << 30)
diff --git a/drivers/gpu/drm/radeon/radeon_ring.c b/drivers/gpu/drm/radeon/radeon_ring.c
index 261e98a276db..06e79822a2bf 100644
--- a/drivers/gpu/drm/radeon/radeon_ring.c
+++ b/drivers/gpu/drm/radeon/radeon_ring.c
@@ -176,8 +176,8 @@ int radeon_ib_pool_init(struct radeon_device *rdev)
INIT_LIST_HEAD(&rdev->ib_pool.bogus_ib);
/* Allocate 1M object buffer */
r = radeon_bo_create(rdev, NULL, RADEON_IB_POOL_SIZE*64*1024,
- true, RADEON_GEM_DOMAIN_GTT,
- &rdev->ib_pool.robj);
+ PAGE_SIZE, true, RADEON_GEM_DOMAIN_GTT,
+ &rdev->ib_pool.robj);
if (r) {
DRM_ERROR("radeon: failed to ib pool (%d).\n", r);
return r;
@@ -247,10 +247,14 @@ void radeon_ib_pool_fini(struct radeon_device *rdev)
*/
void radeon_ring_free_size(struct radeon_device *rdev)
{
- if (rdev->family >= CHIP_R600)
- rdev->cp.rptr = RREG32(R600_CP_RB_RPTR);
- else
- rdev->cp.rptr = RREG32(RADEON_CP_RB_RPTR);
+ if (rdev->wb.enabled)
+ rdev->cp.rptr = rdev->wb.wb[RADEON_WB_CP_RPTR_OFFSET/4];
+ else {
+ if (rdev->family >= CHIP_R600)
+ rdev->cp.rptr = RREG32(R600_CP_RB_RPTR);
+ else
+ rdev->cp.rptr = RREG32(RADEON_CP_RB_RPTR);
+ }
/* This works because ring_size is a power of 2 */
rdev->cp.ring_free_dw = (rdev->cp.rptr + (rdev->cp.ring_size / 4));
rdev->cp.ring_free_dw -= rdev->cp.wptr;
@@ -328,7 +332,7 @@ int radeon_ring_init(struct radeon_device *rdev, unsigned ring_size)
rdev->cp.ring_size = ring_size;
/* Allocate ring buffer */
if (rdev->cp.ring_obj == NULL) {
- r = radeon_bo_create(rdev, NULL, rdev->cp.ring_size, true,
+ r = radeon_bo_create(rdev, NULL, rdev->cp.ring_size, PAGE_SIZE, true,
RADEON_GEM_DOMAIN_GTT,
&rdev->cp.ring_obj);
if (r) {
diff --git a/drivers/gpu/drm/radeon/radeon_test.c b/drivers/gpu/drm/radeon/radeon_test.c
index 313c96bc09da..5b44f652145c 100644
--- a/drivers/gpu/drm/radeon/radeon_test.c
+++ b/drivers/gpu/drm/radeon/radeon_test.c
@@ -52,7 +52,7 @@ void radeon_test_moves(struct radeon_device *rdev)
goto out_cleanup;
}
- r = radeon_bo_create(rdev, NULL, size, true, RADEON_GEM_DOMAIN_VRAM,
+ r = radeon_bo_create(rdev, NULL, size, PAGE_SIZE, true, RADEON_GEM_DOMAIN_VRAM,
&vram_obj);
if (r) {
DRM_ERROR("Failed to create VRAM object\n");
@@ -71,7 +71,7 @@ void radeon_test_moves(struct radeon_device *rdev)
void **gtt_start, **gtt_end;
void **vram_start, **vram_end;
- r = radeon_bo_create(rdev, NULL, size, true,
+ r = radeon_bo_create(rdev, NULL, size, PAGE_SIZE, true,
RADEON_GEM_DOMAIN_GTT, gtt_obj + i);
if (r) {
DRM_ERROR("Failed to create GTT object %d\n", i);
diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
index a823d8fe54c2..1272e4b6a1d4 100644
--- a/drivers/gpu/drm/radeon/radeon_ttm.c
+++ b/drivers/gpu/drm/radeon/radeon_ttm.c
@@ -152,6 +152,7 @@ static int radeon_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
man->default_caching = TTM_PL_FLAG_CACHED;
break;
case TTM_PL_TT:
+ man->func = &ttm_bo_manager_func;
man->gpu_offset = rdev->mc.gtt_start;
man->available_caching = TTM_PL_MASK_CACHING;
man->default_caching = TTM_PL_FLAG_CACHED;
@@ -173,6 +174,7 @@ static int radeon_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
break;
case TTM_PL_VRAM:
/* "On-card" video ram */
+ man->func = &ttm_bo_manager_func;
man->gpu_offset = rdev->mc.vram_start;
man->flags = TTM_MEMTYPE_FLAG_FIXED |
TTM_MEMTYPE_FLAG_MAPPABLE;
@@ -246,8 +248,8 @@ static int radeon_move_blit(struct ttm_buffer_object *bo,
if (unlikely(r)) {
return r;
}
- old_start = old_mem->mm_node->start << PAGE_SHIFT;
- new_start = new_mem->mm_node->start << PAGE_SHIFT;
+ old_start = old_mem->start << PAGE_SHIFT;
+ new_start = new_mem->start << PAGE_SHIFT;
switch (old_mem->mem_type) {
case TTM_PL_VRAM:
@@ -326,14 +328,7 @@ static int radeon_move_vram_ram(struct ttm_buffer_object *bo,
}
r = ttm_bo_move_ttm(bo, true, no_wait_reserve, no_wait_gpu, new_mem);
out_cleanup:
- if (tmp_mem.mm_node) {
- struct ttm_bo_global *glob = rdev->mman.bdev.glob;
-
- spin_lock(&glob->lru_lock);
- drm_mm_put_block(tmp_mem.mm_node);
- spin_unlock(&glob->lru_lock);
- return r;
- }
+ ttm_bo_mem_put(bo, &tmp_mem);
return r;
}
@@ -372,14 +367,7 @@ static int radeon_move_ram_vram(struct ttm_buffer_object *bo,
goto out_cleanup;
}
out_cleanup:
- if (tmp_mem.mm_node) {
- struct ttm_bo_global *glob = rdev->mman.bdev.glob;
-
- spin_lock(&glob->lru_lock);
- drm_mm_put_block(tmp_mem.mm_node);
- spin_unlock(&glob->lru_lock);
- return r;
- }
+ ttm_bo_mem_put(bo, &tmp_mem);
return r;
}
@@ -449,14 +437,14 @@ static int radeon_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_
#if __OS_HAS_AGP
if (rdev->flags & RADEON_IS_AGP) {
/* RADEON_IS_AGP is set only if AGP is active */
- mem->bus.offset = mem->mm_node->start << PAGE_SHIFT;
+ mem->bus.offset = mem->start << PAGE_SHIFT;
mem->bus.base = rdev->mc.agp_base;
mem->bus.is_iomem = !rdev->ddev->agp->cant_use_aperture;
}
#endif
break;
case TTM_PL_VRAM:
- mem->bus.offset = mem->mm_node->start << PAGE_SHIFT;
+ mem->bus.offset = mem->start << PAGE_SHIFT;
/* check if it's visible */
if ((mem->bus.offset + mem->bus.size) > rdev->mc.visible_vram_size)
return -EINVAL;
@@ -541,7 +529,7 @@ int radeon_ttm_init(struct radeon_device *rdev)
DRM_ERROR("Failed initializing VRAM heap.\n");
return r;
}
- r = radeon_bo_create(rdev, NULL, 256 * 1024, true,
+ r = radeon_bo_create(rdev, NULL, 256 * 1024, PAGE_SIZE, true,
RADEON_GEM_DOMAIN_VRAM,
&rdev->stollen_vga_memory);
if (r) {
@@ -699,9 +687,10 @@ static int radeon_ttm_backend_bind(struct ttm_backend *backend,
int r;
gtt = container_of(backend, struct radeon_ttm_backend, backend);
- gtt->offset = bo_mem->mm_node->start << PAGE_SHIFT;
+ gtt->offset = bo_mem->start << PAGE_SHIFT;
if (!gtt->num_pages) {
- WARN(1, "nothing to bind %lu pages for mreg %p back %p!\n", gtt->num_pages, bo_mem, backend);
+ WARN(1, "nothing to bind %lu pages for mreg %p back %p!\n",
+ gtt->num_pages, bo_mem, backend);
}
r = radeon_gart_bind(gtt->rdev, gtt->offset,
gtt->num_pages, gtt->pages);
@@ -798,9 +787,9 @@ static int radeon_ttm_debugfs_init(struct radeon_device *rdev)
radeon_mem_types_list[i].show = &radeon_mm_dump_table;
radeon_mem_types_list[i].driver_features = 0;
if (i == 0)
- radeon_mem_types_list[i].data = &rdev->mman.bdev.man[TTM_PL_VRAM].manager;
+ radeon_mem_types_list[i].data = &rdev->mman.bdev.man[TTM_PL_VRAM].priv;
else
- radeon_mem_types_list[i].data = &rdev->mman.bdev.man[TTM_PL_TT].manager;
+ radeon_mem_types_list[i].data = &rdev->mman.bdev.man[TTM_PL_TT].priv;
}
/* Add ttm page pool to debugfs */
diff --git a/drivers/gpu/drm/radeon/reg_srcs/evergreen b/drivers/gpu/drm/radeon/reg_srcs/evergreen
index f78fd592544d..ac40fd39d787 100644
--- a/drivers/gpu/drm/radeon/reg_srcs/evergreen
+++ b/drivers/gpu/drm/radeon/reg_srcs/evergreen
@@ -22,6 +22,10 @@ evergreen 0x9400
0x00008B10 PA_SC_LINE_STIPPLE_STATE
0x00008BF0 PA_SC_ENHANCE
0x00008D8C SQ_DYN_GPR_CNTL_PS_FLUSH_REQ
+0x00008D90 SQ_DYN_GPR_OPTIMIZATION
+0x00008D94 SQ_DYN_GPR_SIMD_LOCK_EN
+0x00008D98 SQ_DYN_GPR_THREAD_LIMIT
+0x00008D9C SQ_DYN_GPR_LDS_LIMIT
0x00008C00 SQ_CONFIG
0x00008C04 SQ_GPR_RESOURCE_MGMT_1
0x00008C08 SQ_GPR_RESOURCE_MGMT_2
@@ -34,6 +38,10 @@ evergreen 0x9400
0x00008C24 SQ_STACK_RESOURCE_MGMT_2
0x00008C28 SQ_STACK_RESOURCE_MGMT_3
0x00008DF8 SQ_CONST_MEM_BASE
+0x00008E20 SQ_STATIC_THREAD_MGMT_1
+0x00008E24 SQ_STATIC_THREAD_MGMT_2
+0x00008E28 SQ_STATIC_THREAD_MGMT_3
+0x00008E2C SQ_LDS_RESOURCE_MGMT
0x00008E48 SQ_EX_ALLOC_TABLE_SLOTS
0x00009100 SPI_CONFIG_CNTL
0x0000913C SPI_CONFIG_CNTL_1
diff --git a/drivers/gpu/drm/radeon/rs400.c b/drivers/gpu/drm/radeon/rs400.c
index ae2b76b9a388..5512e4e5e636 100644
--- a/drivers/gpu/drm/radeon/rs400.c
+++ b/drivers/gpu/drm/radeon/rs400.c
@@ -78,7 +78,7 @@ int rs400_gart_init(struct radeon_device *rdev)
int r;
if (rdev->gart.table.ram.ptr) {
- WARN(1, "RS400 GART already initialized.\n");
+ WARN(1, "RS400 GART already initialized\n");
return 0;
}
/* Check gart size */
@@ -397,6 +397,12 @@ static int rs400_startup(struct radeon_device *rdev)
r = rs400_gart_enable(rdev);
if (r)
return r;
+
+ /* allocate wb buffer */
+ r = radeon_wb_init(rdev);
+ if (r)
+ return r;
+
/* Enable IRQ */
r100_irq_set(rdev);
rdev->config.r300.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL);
@@ -406,9 +412,6 @@ static int rs400_startup(struct radeon_device *rdev)
dev_err(rdev->dev, "failled initializing CP (%d).\n", r);
return r;
}
- r = r100_wb_init(rdev);
- if (r)
- dev_err(rdev->dev, "failled initializing WB (%d).\n", r);
r = r100_ib_init(rdev);
if (r) {
dev_err(rdev->dev, "failled initializing IB (%d).\n", r);
@@ -443,7 +446,7 @@ int rs400_resume(struct radeon_device *rdev)
int rs400_suspend(struct radeon_device *rdev)
{
r100_cp_disable(rdev);
- r100_wb_disable(rdev);
+ radeon_wb_disable(rdev);
r100_irq_disable(rdev);
rs400_gart_disable(rdev);
return 0;
@@ -452,7 +455,7 @@ int rs400_suspend(struct radeon_device *rdev)
void rs400_fini(struct radeon_device *rdev)
{
r100_cp_fini(rdev);
- r100_wb_fini(rdev);
+ radeon_wb_fini(rdev);
r100_ib_fini(rdev);
radeon_gem_fini(rdev);
rs400_gart_fini(rdev);
@@ -526,7 +529,7 @@ int rs400_init(struct radeon_device *rdev)
/* Somethings want wront with the accel init stop accel */
dev_err(rdev->dev, "Disabling GPU acceleration\n");
r100_cp_fini(rdev);
- r100_wb_fini(rdev);
+ radeon_wb_fini(rdev);
r100_ib_fini(rdev);
rs400_gart_fini(rdev);
radeon_irq_kms_fini(rdev);
diff --git a/drivers/gpu/drm/radeon/rs600.c b/drivers/gpu/drm/radeon/rs600.c
index 51d5f7b5ab21..f1c6e02c2e6b 100644
--- a/drivers/gpu/drm/radeon/rs600.c
+++ b/drivers/gpu/drm/radeon/rs600.c
@@ -375,7 +375,7 @@ int rs600_gart_init(struct radeon_device *rdev)
int r;
if (rdev->gart.table.vram.robj) {
- WARN(1, "RS600 GART already initialized.\n");
+ WARN(1, "RS600 GART already initialized\n");
return 0;
}
/* Initialize common gart structure */
@@ -505,7 +505,7 @@ int rs600_irq_set(struct radeon_device *rdev)
~S_007D18_DC_HOT_PLUG_DETECT2_INT_EN(1);
if (!rdev->irq.installed) {
- WARN(1, "Can't enable IRQ/MSI because no handler is installed.\n");
+ WARN(1, "Can't enable IRQ/MSI because no handler is installed\n");
WREG32(R_000040_GEN_INT_CNTL, 0);
return -EINVAL;
}
@@ -796,6 +796,12 @@ static int rs600_startup(struct radeon_device *rdev)
r = rs600_gart_enable(rdev);
if (r)
return r;
+
+ /* allocate wb buffer */
+ r = radeon_wb_init(rdev);
+ if (r)
+ return r;
+
/* Enable IRQ */
rs600_irq_set(rdev);
rdev->config.r300.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL);
@@ -805,9 +811,6 @@ static int rs600_startup(struct radeon_device *rdev)
dev_err(rdev->dev, "failled initializing CP (%d).\n", r);
return r;
}
- r = r100_wb_init(rdev);
- if (r)
- dev_err(rdev->dev, "failled initializing WB (%d).\n", r);
r = r100_ib_init(rdev);
if (r) {
dev_err(rdev->dev, "failled initializing IB (%d).\n", r);
@@ -848,7 +851,7 @@ int rs600_suspend(struct radeon_device *rdev)
{
r600_audio_fini(rdev);
r100_cp_disable(rdev);
- r100_wb_disable(rdev);
+ radeon_wb_disable(rdev);
rs600_irq_disable(rdev);
rs600_gart_disable(rdev);
return 0;
@@ -858,7 +861,7 @@ void rs600_fini(struct radeon_device *rdev)
{
r600_audio_fini(rdev);
r100_cp_fini(rdev);
- r100_wb_fini(rdev);
+ radeon_wb_fini(rdev);
r100_ib_fini(rdev);
radeon_gem_fini(rdev);
rs600_gart_fini(rdev);
@@ -932,7 +935,7 @@ int rs600_init(struct radeon_device *rdev)
/* Somethings want wront with the accel init stop accel */
dev_err(rdev->dev, "Disabling GPU acceleration\n");
r100_cp_fini(rdev);
- r100_wb_fini(rdev);
+ radeon_wb_fini(rdev);
r100_ib_fini(rdev);
rs600_gart_fini(rdev);
radeon_irq_kms_fini(rdev);
diff --git a/drivers/gpu/drm/radeon/rs690.c b/drivers/gpu/drm/radeon/rs690.c
index 4dc2a87ea680..0137d3e3728d 100644
--- a/drivers/gpu/drm/radeon/rs690.c
+++ b/drivers/gpu/drm/radeon/rs690.c
@@ -616,6 +616,12 @@ static int rs690_startup(struct radeon_device *rdev)
r = rs400_gart_enable(rdev);
if (r)
return r;
+
+ /* allocate wb buffer */
+ r = radeon_wb_init(rdev);
+ if (r)
+ return r;
+
/* Enable IRQ */
rs600_irq_set(rdev);
rdev->config.r300.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL);
@@ -625,9 +631,6 @@ static int rs690_startup(struct radeon_device *rdev)
dev_err(rdev->dev, "failled initializing CP (%d).\n", r);
return r;
}
- r = r100_wb_init(rdev);
- if (r)
- dev_err(rdev->dev, "failled initializing WB (%d).\n", r);
r = r100_ib_init(rdev);
if (r) {
dev_err(rdev->dev, "failled initializing IB (%d).\n", r);
@@ -668,7 +671,7 @@ int rs690_suspend(struct radeon_device *rdev)
{
r600_audio_fini(rdev);
r100_cp_disable(rdev);
- r100_wb_disable(rdev);
+ radeon_wb_disable(rdev);
rs600_irq_disable(rdev);
rs400_gart_disable(rdev);
return 0;
@@ -678,7 +681,7 @@ void rs690_fini(struct radeon_device *rdev)
{
r600_audio_fini(rdev);
r100_cp_fini(rdev);
- r100_wb_fini(rdev);
+ radeon_wb_fini(rdev);
r100_ib_fini(rdev);
radeon_gem_fini(rdev);
rs400_gart_fini(rdev);
@@ -753,7 +756,7 @@ int rs690_init(struct radeon_device *rdev)
/* Somethings want wront with the accel init stop accel */
dev_err(rdev->dev, "Disabling GPU acceleration\n");
r100_cp_fini(rdev);
- r100_wb_fini(rdev);
+ radeon_wb_fini(rdev);
r100_ib_fini(rdev);
rs400_gart_fini(rdev);
radeon_irq_kms_fini(rdev);
diff --git a/drivers/gpu/drm/radeon/rv515.c b/drivers/gpu/drm/radeon/rv515.c
index 4d6e86041a9f..5d569f41f4ae 100644
--- a/drivers/gpu/drm/radeon/rv515.c
+++ b/drivers/gpu/drm/radeon/rv515.c
@@ -386,6 +386,12 @@ static int rv515_startup(struct radeon_device *rdev)
if (r)
return r;
}
+
+ /* allocate wb buffer */
+ r = radeon_wb_init(rdev);
+ if (r)
+ return r;
+
/* Enable IRQ */
rs600_irq_set(rdev);
rdev->config.r300.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL);
@@ -395,9 +401,6 @@ static int rv515_startup(struct radeon_device *rdev)
dev_err(rdev->dev, "failled initializing CP (%d).\n", r);
return r;
}
- r = r100_wb_init(rdev);
- if (r)
- dev_err(rdev->dev, "failled initializing WB (%d).\n", r);
r = r100_ib_init(rdev);
if (r) {
dev_err(rdev->dev, "failled initializing IB (%d).\n", r);
@@ -431,7 +434,7 @@ int rv515_resume(struct radeon_device *rdev)
int rv515_suspend(struct radeon_device *rdev)
{
r100_cp_disable(rdev);
- r100_wb_disable(rdev);
+ radeon_wb_disable(rdev);
rs600_irq_disable(rdev);
if (rdev->flags & RADEON_IS_PCIE)
rv370_pcie_gart_disable(rdev);
@@ -447,7 +450,7 @@ void rv515_set_safe_registers(struct radeon_device *rdev)
void rv515_fini(struct radeon_device *rdev)
{
r100_cp_fini(rdev);
- r100_wb_fini(rdev);
+ radeon_wb_fini(rdev);
r100_ib_fini(rdev);
radeon_gem_fini(rdev);
rv370_pcie_gart_fini(rdev);
@@ -527,7 +530,7 @@ int rv515_init(struct radeon_device *rdev)
/* Somethings want wront with the accel init stop accel */
dev_err(rdev->dev, "Disabling GPU acceleration\n");
r100_cp_fini(rdev);
- r100_wb_fini(rdev);
+ radeon_wb_fini(rdev);
r100_ib_fini(rdev);
radeon_irq_kms_fini(rdev);
rv370_pcie_gart_fini(rdev);
diff --git a/drivers/gpu/drm/radeon/rv770.c b/drivers/gpu/drm/radeon/rv770.c
index 9490da700749..4dfead8cee33 100644
--- a/drivers/gpu/drm/radeon/rv770.c
+++ b/drivers/gpu/drm/radeon/rv770.c
@@ -269,6 +269,7 @@ void r700_cp_stop(struct radeon_device *rdev)
{
rdev->mc.active_vram_size = rdev->mc.visible_vram_size;
WREG32(CP_ME_CNTL, (CP_ME_HALT | CP_PFP_HALT));
+ WREG32(SCRATCH_UMSK, 0);
}
static int rv770_cp_load_microcode(struct radeon_device *rdev)
@@ -643,10 +644,11 @@ static void rv770_gpu_init(struct radeon_device *rdev)
else
gb_tiling_config |= BANK_TILING((mc_arb_ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT);
rdev->config.rv770.tiling_nbanks = 4 << ((gb_tiling_config >> 4) & 0x3);
-
- gb_tiling_config |= GROUP_SIZE(0);
- rdev->config.rv770.tiling_group_size = 256;
-
+ gb_tiling_config |= GROUP_SIZE((mc_arb_ramcfg & BURSTLENGTH_MASK) >> BURSTLENGTH_SHIFT);
+ if ((mc_arb_ramcfg & BURSTLENGTH_MASK) >> BURSTLENGTH_SHIFT)
+ rdev->config.rv770.tiling_group_size = 512;
+ else
+ rdev->config.rv770.tiling_group_size = 256;
if (((mc_arb_ramcfg & NOOFROWS_MASK) >> NOOFROWS_SHIFT) > 3) {
gb_tiling_config |= ROW_TILING(3);
gb_tiling_config |= SAMPLE_SPLIT(3);
@@ -913,8 +915,8 @@ static int rv770_vram_scratch_init(struct radeon_device *rdev)
if (rdev->vram_scratch.robj == NULL) {
r = radeon_bo_create(rdev, NULL, RADEON_GPU_PAGE_SIZE,
- true, RADEON_GEM_DOMAIN_VRAM,
- &rdev->vram_scratch.robj);
+ PAGE_SIZE, true, RADEON_GEM_DOMAIN_VRAM,
+ &rdev->vram_scratch.robj);
if (r) {
return r;
}
@@ -1030,19 +1032,12 @@ static int rv770_startup(struct radeon_device *rdev)
rdev->asic->copy = NULL;
dev_warn(rdev->dev, "failed blitter (%d) falling back to memcpy\n", r);
}
- /* pin copy shader into vram */
- if (rdev->r600_blit.shader_obj) {
- r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false);
- if (unlikely(r != 0))
- return r;
- r = radeon_bo_pin(rdev->r600_blit.shader_obj, RADEON_GEM_DOMAIN_VRAM,
- &rdev->r600_blit.shader_gpu_addr);
- radeon_bo_unreserve(rdev->r600_blit.shader_obj);
- if (r) {
- DRM_ERROR("failed to pin blit object %d\n", r);
- return r;
- }
- }
+
+ /* allocate wb buffer */
+ r = radeon_wb_init(rdev);
+ if (r)
+ return r;
+
/* Enable IRQ */
r = r600_irq_init(rdev);
if (r) {
@@ -1061,8 +1056,7 @@ static int rv770_startup(struct radeon_device *rdev)
r = r600_cp_resume(rdev);
if (r)
return r;
- /* write back buffer are not vital so don't worry about failure */
- r600_wb_enable(rdev);
+
return 0;
}
@@ -1108,7 +1102,7 @@ int rv770_suspend(struct radeon_device *rdev)
r700_cp_stop(rdev);
rdev->cp.ready = false;
r600_irq_suspend(rdev);
- r600_wb_disable(rdev);
+ radeon_wb_disable(rdev);
rv770_pcie_gart_disable(rdev);
/* unpin shaders bo */
if (rdev->r600_blit.shader_obj) {
@@ -1203,8 +1197,8 @@ int rv770_init(struct radeon_device *rdev)
if (r) {
dev_err(rdev->dev, "disabling GPU acceleration\n");
r700_cp_fini(rdev);
- r600_wb_fini(rdev);
r600_irq_fini(rdev);
+ radeon_wb_fini(rdev);
radeon_irq_kms_fini(rdev);
rv770_pcie_gart_fini(rdev);
rdev->accel_working = false;
@@ -1236,8 +1230,8 @@ void rv770_fini(struct radeon_device *rdev)
{
r600_blit_fini(rdev);
r700_cp_fini(rdev);
- r600_wb_fini(rdev);
r600_irq_fini(rdev);
+ radeon_wb_fini(rdev);
radeon_irq_kms_fini(rdev);
rv770_pcie_gart_fini(rdev);
rv770_vram_scratch_fini(rdev);
diff --git a/drivers/gpu/drm/savage/savage_drv.c b/drivers/gpu/drm/savage/savage_drv.c
index 2a2830f5a840..fa64d25d4248 100644
--- a/drivers/gpu/drm/savage/savage_drv.c
+++ b/drivers/gpu/drm/savage/savage_drv.c
@@ -42,8 +42,6 @@ static struct drm_driver driver = {
.lastclose = savage_driver_lastclose,
.unload = savage_driver_unload,
.reclaim_buffers = savage_reclaim_buffers,
- .get_map_ofs = drm_core_get_map_ofs,
- .get_reg_ofs = drm_core_get_reg_ofs,
.ioctls = savage_ioctls,
.dma_ioctl = savage_bci_buffers,
.fops = {
diff --git a/drivers/gpu/drm/sis/sis_drv.c b/drivers/gpu/drm/sis/sis_drv.c
index 4bb10ef6676a..4caf5d01cfd3 100644
--- a/drivers/gpu/drm/sis/sis_drv.c
+++ b/drivers/gpu/drm/sis/sis_drv.c
@@ -67,13 +67,10 @@ static struct drm_driver driver = {
.driver_features = DRIVER_USE_AGP | DRIVER_USE_MTRR,
.load = sis_driver_load,
.unload = sis_driver_unload,
- .context_dtor = NULL,
.dma_quiescent = sis_idle,
.reclaim_buffers = NULL,
.reclaim_buffers_idlelocked = sis_reclaim_buffers_locked,
.lastclose = sis_lastclose,
- .get_map_ofs = drm_core_get_map_ofs,
- .get_reg_ofs = drm_core_get_reg_ofs,
.ioctls = sis_ioctls,
.fops = {
.owner = THIS_MODULE,
diff --git a/drivers/gpu/drm/tdfx/tdfx_drv.c b/drivers/gpu/drm/tdfx/tdfx_drv.c
index 640567ef713d..b70fa91d761a 100644
--- a/drivers/gpu/drm/tdfx/tdfx_drv.c
+++ b/drivers/gpu/drm/tdfx/tdfx_drv.c
@@ -42,8 +42,6 @@ static struct pci_device_id pciidlist[] = {
static struct drm_driver driver = {
.driver_features = DRIVER_USE_MTRR,
.reclaim_buffers = drm_core_reclaim_buffers,
- .get_map_ofs = drm_core_get_map_ofs,
- .get_reg_ofs = drm_core_get_reg_ofs,
.fops = {
.owner = THIS_MODULE,
.open = drm_open,
diff --git a/drivers/gpu/drm/ttm/Makefile b/drivers/gpu/drm/ttm/Makefile
index b256d4adfafe..f3cf6f02c997 100644
--- a/drivers/gpu/drm/ttm/Makefile
+++ b/drivers/gpu/drm/ttm/Makefile
@@ -4,6 +4,7 @@
ccflags-y := -Iinclude/drm
ttm-y := ttm_agp_backend.o ttm_memory.o ttm_tt.o ttm_bo.o \
ttm_bo_util.o ttm_bo_vm.o ttm_module.o \
- ttm_object.o ttm_lock.o ttm_execbuf_util.o ttm_page_alloc.o
+ ttm_object.o ttm_lock.o ttm_execbuf_util.o ttm_page_alloc.o \
+ ttm_bo_manager.o
obj-$(CONFIG_DRM_TTM) += ttm.o
diff --git a/drivers/gpu/drm/ttm/ttm_agp_backend.c b/drivers/gpu/drm/ttm/ttm_agp_backend.c
index 4bf69c404491..f999e36f30b4 100644
--- a/drivers/gpu/drm/ttm/ttm_agp_backend.c
+++ b/drivers/gpu/drm/ttm/ttm_agp_backend.c
@@ -74,6 +74,7 @@ static int ttm_agp_bind(struct ttm_backend *backend, struct ttm_mem_reg *bo_mem)
{
struct ttm_agp_backend *agp_be =
container_of(backend, struct ttm_agp_backend, backend);
+ struct drm_mm_node *node = bo_mem->mm_node;
struct agp_memory *mem = agp_be->mem;
int cached = (bo_mem->placement & TTM_PL_FLAG_CACHED);
int ret;
@@ -81,7 +82,7 @@ static int ttm_agp_bind(struct ttm_backend *backend, struct ttm_mem_reg *bo_mem)
mem->is_flushed = 1;
mem->type = (cached) ? AGP_USER_CACHED_MEMORY : AGP_USER_MEMORY;
- ret = agp_bind_memory(mem, bo_mem->mm_node->start);
+ ret = agp_bind_memory(mem, node->start);
if (ret)
printk(KERN_ERR TTM_PFX "AGP Bind memory failed.\n");
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index db809e034cc4..148a322d8f5d 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -27,14 +27,6 @@
/*
* Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
*/
-/* Notes:
- *
- * We store bo pointer in drm_mm_node struct so we know which bo own a
- * specific node. There is no protection on the pointer, thus to make
- * sure things don't go berserk you have to access this pointer while
- * holding the global lru lock and make sure anytime you free a node you
- * reset the pointer to NULL.
- */
#include "ttm/ttm_module.h"
#include "ttm/ttm_bo_driver.h"
@@ -45,6 +37,7 @@
#include <linux/mm.h>
#include <linux/file.h>
#include <linux/module.h>
+#include <asm/atomic.h>
#define TTM_ASSERT_LOCKED(param)
#define TTM_DEBUG(fmt, arg...)
@@ -84,11 +77,8 @@ static void ttm_mem_type_debug(struct ttm_bo_device *bdev, int mem_type)
man->available_caching);
printk(KERN_ERR TTM_PFX " default_caching: 0x%08X\n",
man->default_caching);
- if (mem_type != TTM_PL_SYSTEM) {
- spin_lock(&bdev->glob->lru_lock);
- drm_mm_debug_table(&man->manager, TTM_PFX);
- spin_unlock(&bdev->glob->lru_lock);
- }
+ if (mem_type != TTM_PL_SYSTEM)
+ (*man->func->debug)(man, TTM_PFX);
}
static void ttm_bo_mem_space_debug(struct ttm_buffer_object *bo,
@@ -169,18 +159,13 @@ static void ttm_bo_release_list(struct kref *list_kref)
int ttm_bo_wait_unreserved(struct ttm_buffer_object *bo, bool interruptible)
{
-
if (interruptible) {
- int ret = 0;
-
- ret = wait_event_interruptible(bo->event_queue,
+ return wait_event_interruptible(bo->event_queue,
atomic_read(&bo->reserved) == 0);
- if (unlikely(ret != 0))
- return ret;
} else {
wait_event(bo->event_queue, atomic_read(&bo->reserved) == 0);
+ return 0;
}
- return 0;
}
EXPORT_SYMBOL(ttm_bo_wait_unreserved);
@@ -239,6 +224,9 @@ int ttm_bo_reserve_locked(struct ttm_buffer_object *bo,
int ret;
while (unlikely(atomic_cmpxchg(&bo->reserved, 0, 1) != 0)) {
+ /**
+ * Deadlock avoidance for multi-bo reserving.
+ */
if (use_sequence && bo->seq_valid &&
(sequence - bo->val_seq < (1 << 31))) {
return -EAGAIN;
@@ -256,6 +244,14 @@ int ttm_bo_reserve_locked(struct ttm_buffer_object *bo,
}
if (use_sequence) {
+ /**
+ * Wake up waiters that may need to recheck for deadlock,
+ * if we decreased the sequence number.
+ */
+ if (unlikely((bo->val_seq - sequence < (1 << 31))
+ || !bo->seq_valid))
+ wake_up_all(&bo->event_queue);
+
bo->val_seq = sequence;
bo->seq_valid = true;
} else {
@@ -421,7 +417,7 @@ moved:
if (bo->mem.mm_node) {
spin_lock(&bo->lock);
- bo->offset = (bo->mem.mm_node->start << PAGE_SHIFT) +
+ bo->offset = (bo->mem.start << PAGE_SHIFT) +
bdev->man[bo->mem.mem_type].gpu_offset;
bo->cur_placement = bo->mem.placement;
spin_unlock(&bo->lock);
@@ -442,135 +438,152 @@ out_err:
}
/**
- * Call bo::reserved and with the lru lock held.
+ * Call bo::reserved.
* Will release GPU memory type usage on destruction.
- * This is the place to put in driver specific hooks.
- * Will release the bo::reserved lock and the
- * lru lock on exit.
+ * This is the place to put in driver specific hooks to release
+ * driver private resources.
+ * Will release the bo::reserved lock.
*/
static void ttm_bo_cleanup_memtype_use(struct ttm_buffer_object *bo)
{
- struct ttm_bo_global *glob = bo->glob;
-
if (bo->ttm) {
-
- /**
- * Release the lru_lock, since we don't want to have
- * an atomic requirement on ttm_tt[unbind|destroy].
- */
-
- spin_unlock(&glob->lru_lock);
ttm_tt_unbind(bo->ttm);
ttm_tt_destroy(bo->ttm);
bo->ttm = NULL;
- spin_lock(&glob->lru_lock);
}
- if (bo->mem.mm_node) {
- drm_mm_put_block(bo->mem.mm_node);
- bo->mem.mm_node = NULL;
- }
+ ttm_bo_mem_put(bo, &bo->mem);
atomic_set(&bo->reserved, 0);
+
+ /*
+ * Make processes trying to reserve really pick it up.
+ */
+ smp_mb__after_atomic_dec();
wake_up_all(&bo->event_queue);
- spin_unlock(&glob->lru_lock);
}
-
-/**
- * If bo idle, remove from delayed- and lru lists, and unref.
- * If not idle, and already on delayed list, do nothing.
- * If not idle, and not on delayed list, put on delayed list,
- * up the list_kref and schedule a delayed list check.
- */
-
-static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo, bool remove_all)
+static void ttm_bo_cleanup_refs_or_queue(struct ttm_buffer_object *bo)
{
struct ttm_bo_device *bdev = bo->bdev;
struct ttm_bo_global *glob = bo->glob;
- struct ttm_bo_driver *driver = bdev->driver;
+ struct ttm_bo_driver *driver;
+ void *sync_obj = NULL;
+ void *sync_obj_arg;
+ int put_count;
int ret;
spin_lock(&bo->lock);
-retry:
- (void) ttm_bo_wait(bo, false, false, !remove_all);
-
+ (void) ttm_bo_wait(bo, false, false, true);
if (!bo->sync_obj) {
- int put_count;
-
- spin_unlock(&bo->lock);
spin_lock(&glob->lru_lock);
- ret = ttm_bo_reserve_locked(bo, false, !remove_all, false, 0);
/**
- * Someone else has the object reserved. Bail and retry.
+ * Lock inversion between bo::reserve and bo::lock here,
+ * but that's OK, since we're only trylocking.
*/
- if (unlikely(ret == -EBUSY)) {
- spin_unlock(&glob->lru_lock);
- spin_lock(&bo->lock);
- goto requeue;
- }
-
- /**
- * We can re-check for sync object without taking
- * the bo::lock since setting the sync object requires
- * also bo::reserved. A busy object at this point may
- * be caused by another thread starting an accelerated
- * eviction.
- */
+ ret = ttm_bo_reserve_locked(bo, false, true, false, 0);
- if (unlikely(bo->sync_obj)) {
- atomic_set(&bo->reserved, 0);
- wake_up_all(&bo->event_queue);
- spin_unlock(&glob->lru_lock);
- spin_lock(&bo->lock);
- if (remove_all)
- goto retry;
- else
- goto requeue;
- }
+ if (unlikely(ret == -EBUSY))
+ goto queue;
+ spin_unlock(&bo->lock);
put_count = ttm_bo_del_from_lru(bo);
- if (!list_empty(&bo->ddestroy)) {
- list_del_init(&bo->ddestroy);
- ++put_count;
- }
-
+ spin_unlock(&glob->lru_lock);
ttm_bo_cleanup_memtype_use(bo);
while (put_count--)
kref_put(&bo->list_kref, ttm_bo_ref_bug);
- return 0;
+ return;
+ } else {
+ spin_lock(&glob->lru_lock);
+ }
+queue:
+ driver = bdev->driver;
+ if (bo->sync_obj)
+ sync_obj = driver->sync_obj_ref(bo->sync_obj);
+ sync_obj_arg = bo->sync_obj_arg;
+
+ kref_get(&bo->list_kref);
+ list_add_tail(&bo->ddestroy, &bdev->ddestroy);
+ spin_unlock(&glob->lru_lock);
+ spin_unlock(&bo->lock);
+
+ if (sync_obj) {
+ driver->sync_obj_flush(sync_obj, sync_obj_arg);
+ driver->sync_obj_unref(&sync_obj);
}
-requeue:
+ schedule_delayed_work(&bdev->wq,
+ ((HZ / 100) < 1) ? 1 : HZ / 100);
+}
+
+/**
+ * function ttm_bo_cleanup_refs
+ * If bo idle, remove from delayed- and lru lists, and unref.
+ * If not idle, do nothing.
+ *
+ * @interruptible Any sleeps should occur interruptibly.
+ * @no_wait_reserve Never wait for reserve. Return -EBUSY instead.
+ * @no_wait_gpu Never wait for gpu. Return -EBUSY instead.
+ */
+
+static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo,
+ bool interruptible,
+ bool no_wait_reserve,
+ bool no_wait_gpu)
+{
+ struct ttm_bo_global *glob = bo->glob;
+ int put_count;
+ int ret = 0;
+
+retry:
+ spin_lock(&bo->lock);
+ ret = ttm_bo_wait(bo, false, interruptible, no_wait_gpu);
+ spin_unlock(&bo->lock);
+
+ if (unlikely(ret != 0))
+ return ret;
+
spin_lock(&glob->lru_lock);
- if (list_empty(&bo->ddestroy)) {
- void *sync_obj = bo->sync_obj;
- void *sync_obj_arg = bo->sync_obj_arg;
+ ret = ttm_bo_reserve_locked(bo, interruptible,
+ no_wait_reserve, false, 0);
- kref_get(&bo->list_kref);
- list_add_tail(&bo->ddestroy, &bdev->ddestroy);
+ if (unlikely(ret != 0) || list_empty(&bo->ddestroy)) {
spin_unlock(&glob->lru_lock);
- spin_unlock(&bo->lock);
+ return ret;
+ }
- if (sync_obj)
- driver->sync_obj_flush(sync_obj, sync_obj_arg);
- schedule_delayed_work(&bdev->wq,
- ((HZ / 100) < 1) ? 1 : HZ / 100);
- ret = 0;
+ /**
+ * We can re-check for sync object without taking
+ * the bo::lock since setting the sync object requires
+ * also bo::reserved. A busy object at this point may
+ * be caused by another thread recently starting an accelerated
+ * eviction.
+ */
- } else {
+ if (unlikely(bo->sync_obj)) {
+ atomic_set(&bo->reserved, 0);
+ wake_up_all(&bo->event_queue);
spin_unlock(&glob->lru_lock);
- spin_unlock(&bo->lock);
- ret = -EBUSY;
+ goto retry;
}
- return ret;
+ put_count = ttm_bo_del_from_lru(bo);
+ list_del_init(&bo->ddestroy);
+ ++put_count;
+
+ spin_unlock(&glob->lru_lock);
+ ttm_bo_cleanup_memtype_use(bo);
+
+ while (put_count--)
+ kref_put(&bo->list_kref, ttm_bo_ref_bug);
+
+ return 0;
}
/**
@@ -602,7 +615,8 @@ static int ttm_bo_delayed_delete(struct ttm_bo_device *bdev, bool remove_all)
}
spin_unlock(&glob->lru_lock);
- ret = ttm_bo_cleanup_refs(entry, remove_all);
+ ret = ttm_bo_cleanup_refs(entry, false, !remove_all,
+ !remove_all);
kref_put(&entry->list_kref, ttm_bo_release_list);
entry = nentry;
@@ -645,7 +659,7 @@ static void ttm_bo_release(struct kref *kref)
bo->vm_node = NULL;
}
write_unlock(&bdev->vm_lock);
- ttm_bo_cleanup_refs(bo, false);
+ ttm_bo_cleanup_refs_or_queue(bo);
kref_put(&bo->list_kref, ttm_bo_release_list);
write_lock(&bdev->vm_lock);
}
@@ -680,7 +694,6 @@ static int ttm_bo_evict(struct ttm_buffer_object *bo, bool interruptible,
bool no_wait_reserve, bool no_wait_gpu)
{
struct ttm_bo_device *bdev = bo->bdev;
- struct ttm_bo_global *glob = bo->glob;
struct ttm_mem_reg evict_mem;
struct ttm_placement placement;
int ret = 0;
@@ -726,12 +739,7 @@ static int ttm_bo_evict(struct ttm_buffer_object *bo, bool interruptible,
if (ret) {
if (ret != -ERESTARTSYS)
printk(KERN_ERR TTM_PFX "Buffer eviction failed\n");
- spin_lock(&glob->lru_lock);
- if (evict_mem.mm_node) {
- drm_mm_put_block(evict_mem.mm_node);
- evict_mem.mm_node = NULL;
- }
- spin_unlock(&glob->lru_lock);
+ ttm_bo_mem_put(bo, &evict_mem);
goto out;
}
bo->evicted = true;
@@ -759,6 +767,18 @@ retry:
bo = list_first_entry(&man->lru, struct ttm_buffer_object, lru);
kref_get(&bo->list_kref);
+ if (!list_empty(&bo->ddestroy)) {
+ spin_unlock(&glob->lru_lock);
+ ret = ttm_bo_cleanup_refs(bo, interruptible,
+ no_wait_reserve, no_wait_gpu);
+ kref_put(&bo->list_kref, ttm_bo_release_list);
+
+ if (likely(ret == 0 || ret == -ERESTARTSYS))
+ return ret;
+
+ goto retry;
+ }
+
ret = ttm_bo_reserve_locked(bo, false, no_wait_reserve, false, 0);
if (unlikely(ret == -EBUSY)) {
@@ -792,41 +812,14 @@ retry:
return ret;
}
-static int ttm_bo_man_get_node(struct ttm_buffer_object *bo,
- struct ttm_mem_type_manager *man,
- struct ttm_placement *placement,
- struct ttm_mem_reg *mem,
- struct drm_mm_node **node)
+void ttm_bo_mem_put(struct ttm_buffer_object *bo, struct ttm_mem_reg *mem)
{
- struct ttm_bo_global *glob = bo->glob;
- unsigned long lpfn;
- int ret;
-
- lpfn = placement->lpfn;
- if (!lpfn)
- lpfn = man->size;
- *node = NULL;
- do {
- ret = drm_mm_pre_get(&man->manager);
- if (unlikely(ret))
- return ret;
+ struct ttm_mem_type_manager *man = &bo->bdev->man[mem->mem_type];
- spin_lock(&glob->lru_lock);
- *node = drm_mm_search_free_in_range(&man->manager,
- mem->num_pages, mem->page_alignment,
- placement->fpfn, lpfn, 1);
- if (unlikely(*node == NULL)) {
- spin_unlock(&glob->lru_lock);
- return 0;
- }
- *node = drm_mm_get_block_atomic_range(*node, mem->num_pages,
- mem->page_alignment,
- placement->fpfn,
- lpfn);
- spin_unlock(&glob->lru_lock);
- } while (*node == NULL);
- return 0;
+ if (mem->mm_node)
+ (*man->func->put_node)(man, mem);
}
+EXPORT_SYMBOL(ttm_bo_mem_put);
/**
* Repeatedly evict memory from the LRU for @mem_type until we create enough
@@ -841,31 +834,22 @@ static int ttm_bo_mem_force_space(struct ttm_buffer_object *bo,
bool no_wait_gpu)
{
struct ttm_bo_device *bdev = bo->bdev;
- struct ttm_bo_global *glob = bdev->glob;
struct ttm_mem_type_manager *man = &bdev->man[mem_type];
- struct drm_mm_node *node;
int ret;
do {
- ret = ttm_bo_man_get_node(bo, man, placement, mem, &node);
+ ret = (*man->func->get_node)(man, bo, placement, mem);
if (unlikely(ret != 0))
return ret;
- if (node)
- break;
- spin_lock(&glob->lru_lock);
- if (list_empty(&man->lru)) {
- spin_unlock(&glob->lru_lock);
+ if (mem->mm_node)
break;
- }
- spin_unlock(&glob->lru_lock);
ret = ttm_mem_evict_first(bdev, mem_type, interruptible,
no_wait_reserve, no_wait_gpu);
if (unlikely(ret != 0))
return ret;
} while (1);
- if (node == NULL)
+ if (mem->mm_node == NULL)
return -ENOMEM;
- mem->mm_node = node;
mem->mem_type = mem_type;
return 0;
}
@@ -939,7 +923,6 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo,
bool type_found = false;
bool type_ok = false;
bool has_erestartsys = false;
- struct drm_mm_node *node = NULL;
int i, ret;
mem->mm_node = NULL;
@@ -973,17 +956,15 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo,
if (man->has_type && man->use_type) {
type_found = true;
- ret = ttm_bo_man_get_node(bo, man, placement, mem,
- &node);
+ ret = (*man->func->get_node)(man, bo, placement, mem);
if (unlikely(ret))
return ret;
}
- if (node)
+ if (mem->mm_node)
break;
}
- if ((type_ok && (mem_type == TTM_PL_SYSTEM)) || node) {
- mem->mm_node = node;
+ if ((type_ok && (mem_type == TTM_PL_SYSTEM)) || mem->mm_node) {
mem->mem_type = mem_type;
mem->placement = cur_flags;
return 0;
@@ -1053,7 +1034,6 @@ int ttm_bo_move_buffer(struct ttm_buffer_object *bo,
bool interruptible, bool no_wait_reserve,
bool no_wait_gpu)
{
- struct ttm_bo_global *glob = bo->glob;
int ret = 0;
struct ttm_mem_reg mem;
@@ -1081,11 +1061,8 @@ int ttm_bo_move_buffer(struct ttm_buffer_object *bo,
goto out_unlock;
ret = ttm_bo_handle_move_mem(bo, &mem, false, interruptible, no_wait_reserve, no_wait_gpu);
out_unlock:
- if (ret && mem.mm_node) {
- spin_lock(&glob->lru_lock);
- drm_mm_put_block(mem.mm_node);
- spin_unlock(&glob->lru_lock);
- }
+ if (ret && mem.mm_node)
+ ttm_bo_mem_put(bo, &mem);
return ret;
}
@@ -1093,11 +1070,10 @@ static int ttm_bo_mem_compat(struct ttm_placement *placement,
struct ttm_mem_reg *mem)
{
int i;
- struct drm_mm_node *node = mem->mm_node;
- if (node && placement->lpfn != 0 &&
- (node->start < placement->fpfn ||
- node->start + node->size > placement->lpfn))
+ if (mem->mm_node && placement->lpfn != 0 &&
+ (mem->start < placement->fpfn ||
+ mem->start + mem->num_pages > placement->lpfn))
return -1;
for (i = 0; i < placement->num_placement; i++) {
@@ -1154,35 +1130,9 @@ EXPORT_SYMBOL(ttm_bo_validate);
int ttm_bo_check_placement(struct ttm_buffer_object *bo,
struct ttm_placement *placement)
{
- int i;
+ BUG_ON((placement->fpfn || placement->lpfn) &&
+ (bo->mem.num_pages > (placement->lpfn - placement->fpfn)));
- if (placement->fpfn || placement->lpfn) {
- if (bo->mem.num_pages > (placement->lpfn - placement->fpfn)) {
- printk(KERN_ERR TTM_PFX "Page number range to small "
- "Need %lu pages, range is [%u, %u]\n",
- bo->mem.num_pages, placement->fpfn,
- placement->lpfn);
- return -EINVAL;
- }
- }
- for (i = 0; i < placement->num_placement; i++) {
- if (!capable(CAP_SYS_ADMIN)) {
- if (placement->placement[i] & TTM_PL_FLAG_NO_EVICT) {
- printk(KERN_ERR TTM_PFX "Need to be root to "
- "modify NO_EVICT status.\n");
- return -EINVAL;
- }
- }
- }
- for (i = 0; i < placement->num_busy_placement; i++) {
- if (!capable(CAP_SYS_ADMIN)) {
- if (placement->busy_placement[i] & TTM_PL_FLAG_NO_EVICT) {
- printk(KERN_ERR TTM_PFX "Need to be root to "
- "modify NO_EVICT status.\n");
- return -EINVAL;
- }
- }
- }
return 0;
}
@@ -1205,6 +1155,10 @@ int ttm_bo_init(struct ttm_bo_device *bdev,
num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
if (num_pages == 0) {
printk(KERN_ERR TTM_PFX "Illegal buffer object size.\n");
+ if (destroy)
+ (*destroy)(bo);
+ else
+ kfree(bo);
return -EINVAL;
}
bo->destroy = destroy;
@@ -1341,7 +1295,6 @@ static int ttm_bo_force_list_clean(struct ttm_bo_device *bdev,
int ttm_bo_clean_mm(struct ttm_bo_device *bdev, unsigned mem_type)
{
- struct ttm_bo_global *glob = bdev->glob;
struct ttm_mem_type_manager *man;
int ret = -EINVAL;
@@ -1364,13 +1317,7 @@ int ttm_bo_clean_mm(struct ttm_bo_device *bdev, unsigned mem_type)
if (mem_type > 0) {
ttm_bo_force_list_clean(bdev, mem_type, false);
- spin_lock(&glob->lru_lock);
- if (drm_mm_clean(&man->manager))
- drm_mm_takedown(&man->manager);
- else
- ret = -EBUSY;
-
- spin_unlock(&glob->lru_lock);
+ ret = (*man->func->takedown)(man);
}
return ret;
@@ -1405,32 +1352,18 @@ int ttm_bo_init_mm(struct ttm_bo_device *bdev, unsigned type,
int ret = -EINVAL;
struct ttm_mem_type_manager *man;
- if (type >= TTM_NUM_MEM_TYPES) {
- printk(KERN_ERR TTM_PFX "Illegal memory type %d\n", type);
- return ret;
- }
-
+ BUG_ON(type >= TTM_NUM_MEM_TYPES);
man = &bdev->man[type];
- if (man->has_type) {
- printk(KERN_ERR TTM_PFX
- "Memory manager already initialized for type %d\n",
- type);
- return ret;
- }
+ BUG_ON(man->has_type);
ret = bdev->driver->init_mem_type(bdev, type, man);
if (ret)
return ret;
+ man->bdev = bdev;
ret = 0;
if (type != TTM_PL_SYSTEM) {
- if (!p_size) {
- printk(KERN_ERR TTM_PFX
- "Zero size memory manager type %d\n",
- type);
- return ret;
- }
- ret = drm_mm_init(&man->manager, 0, p_size);
+ ret = (*man->func->init)(man, p_size);
if (ret)
return ret;
}
@@ -1824,6 +1757,13 @@ static int ttm_bo_swapout(struct ttm_mem_shrink *shrink)
struct ttm_buffer_object, swap);
kref_get(&bo->list_kref);
+ if (!list_empty(&bo->ddestroy)) {
+ spin_unlock(&glob->lru_lock);
+ (void) ttm_bo_cleanup_refs(bo, false, false, false);
+ kref_put(&bo->list_kref, ttm_bo_release_list);
+ continue;
+ }
+
/**
* Reserve buffer. Since we unlock while sleeping, we need
* to re-check that nobody removed us from the swap-list while
diff --git a/drivers/gpu/drm/ttm/ttm_bo_manager.c b/drivers/gpu/drm/ttm/ttm_bo_manager.c
new file mode 100644
index 000000000000..038e947d00f9
--- /dev/null
+++ b/drivers/gpu/drm/ttm/ttm_bo_manager.c
@@ -0,0 +1,157 @@
+/**************************************************************************
+ *
+ * Copyright (c) 2007-2010 VMware, Inc., Palo Alto, CA., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+/*
+ * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
+ */
+
+#include "ttm/ttm_module.h"
+#include "ttm/ttm_bo_driver.h"
+#include "ttm/ttm_placement.h"
+#include "drm_mm.h"
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/module.h>
+
+/**
+ * Currently we use a spinlock for the lock, but a mutex *may* be
+ * more appropriate to reduce scheduling latency if the range manager
+ * ends up with very fragmented allocation patterns.
+ */
+
+struct ttm_range_manager {
+ struct drm_mm mm;
+ spinlock_t lock;
+};
+
+static int ttm_bo_man_get_node(struct ttm_mem_type_manager *man,
+ struct ttm_buffer_object *bo,
+ struct ttm_placement *placement,
+ struct ttm_mem_reg *mem)
+{
+ struct ttm_range_manager *rman = (struct ttm_range_manager *) man->priv;
+ struct drm_mm *mm = &rman->mm;
+ struct drm_mm_node *node = NULL;
+ unsigned long lpfn;
+ int ret;
+
+ lpfn = placement->lpfn;
+ if (!lpfn)
+ lpfn = man->size;
+ do {
+ ret = drm_mm_pre_get(mm);
+ if (unlikely(ret))
+ return ret;
+
+ spin_lock(&rman->lock);
+ node = drm_mm_search_free_in_range(mm,
+ mem->num_pages, mem->page_alignment,
+ placement->fpfn, lpfn, 1);
+ if (unlikely(node == NULL)) {
+ spin_unlock(&rman->lock);
+ return 0;
+ }
+ node = drm_mm_get_block_atomic_range(node, mem->num_pages,
+ mem->page_alignment,
+ placement->fpfn,
+ lpfn);
+ spin_unlock(&rman->lock);
+ } while (node == NULL);
+
+ mem->mm_node = node;
+ mem->start = node->start;
+ return 0;
+}
+
+static void ttm_bo_man_put_node(struct ttm_mem_type_manager *man,
+ struct ttm_mem_reg *mem)
+{
+ struct ttm_range_manager *rman = (struct ttm_range_manager *) man->priv;
+
+ if (mem->mm_node) {
+ spin_lock(&rman->lock);
+ drm_mm_put_block(mem->mm_node);
+ spin_unlock(&rman->lock);
+ mem->mm_node = NULL;
+ }
+}
+
+static int ttm_bo_man_init(struct ttm_mem_type_manager *man,
+ unsigned long p_size)
+{
+ struct ttm_range_manager *rman;
+ int ret;
+
+ rman = kzalloc(sizeof(*rman), GFP_KERNEL);
+ if (!rman)
+ return -ENOMEM;
+
+ ret = drm_mm_init(&rman->mm, 0, p_size);
+ if (ret) {
+ kfree(rman);
+ return ret;
+ }
+
+ spin_lock_init(&rman->lock);
+ man->priv = rman;
+ return 0;
+}
+
+static int ttm_bo_man_takedown(struct ttm_mem_type_manager *man)
+{
+ struct ttm_range_manager *rman = (struct ttm_range_manager *) man->priv;
+ struct drm_mm *mm = &rman->mm;
+
+ spin_lock(&rman->lock);
+ if (drm_mm_clean(mm)) {
+ drm_mm_takedown(mm);
+ spin_unlock(&rman->lock);
+ kfree(rman);
+ man->priv = NULL;
+ return 0;
+ }
+ spin_unlock(&rman->lock);
+ return -EBUSY;
+}
+
+static void ttm_bo_man_debug(struct ttm_mem_type_manager *man,
+ const char *prefix)
+{
+ struct ttm_range_manager *rman = (struct ttm_range_manager *) man->priv;
+
+ spin_lock(&rman->lock);
+ drm_mm_debug_table(&rman->mm, prefix);
+ spin_unlock(&rman->lock);
+}
+
+const struct ttm_mem_type_manager_func ttm_bo_manager_func = {
+ ttm_bo_man_init,
+ ttm_bo_man_takedown,
+ ttm_bo_man_get_node,
+ ttm_bo_man_put_node,
+ ttm_bo_man_debug
+};
+EXPORT_SYMBOL(ttm_bo_manager_func);
diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c
index 3451a82adba7..3106d5bcce32 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_util.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_util.c
@@ -39,14 +39,7 @@
void ttm_bo_free_old_node(struct ttm_buffer_object *bo)
{
- struct ttm_mem_reg *old_mem = &bo->mem;
-
- if (old_mem->mm_node) {
- spin_lock(&bo->glob->lru_lock);
- drm_mm_put_block(old_mem->mm_node);
- spin_unlock(&bo->glob->lru_lock);
- }
- old_mem->mm_node = NULL;
+ ttm_bo_mem_put(bo, &bo->mem);
}
int ttm_bo_move_ttm(struct ttm_buffer_object *bo,
@@ -170,7 +163,7 @@ static int ttm_copy_io_ttm_page(struct ttm_tt *ttm, void *src,
src = (void *)((unsigned long)src + (page << PAGE_SHIFT));
#ifdef CONFIG_X86
- dst = kmap_atomic_prot(d, KM_USER0, prot);
+ dst = kmap_atomic_prot(d, prot);
#else
if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL))
dst = vmap(&d, 1, 0, prot);
@@ -183,7 +176,7 @@ static int ttm_copy_io_ttm_page(struct ttm_tt *ttm, void *src,
memcpy_fromio(dst, src, PAGE_SIZE);
#ifdef CONFIG_X86
- kunmap_atomic(dst, KM_USER0);
+ kunmap_atomic(dst);
#else
if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL))
vunmap(dst);
@@ -206,7 +199,7 @@ static int ttm_copy_ttm_io_page(struct ttm_tt *ttm, void *dst,
dst = (void *)((unsigned long)dst + (page << PAGE_SHIFT));
#ifdef CONFIG_X86
- src = kmap_atomic_prot(s, KM_USER0, prot);
+ src = kmap_atomic_prot(s, prot);
#else
if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL))
src = vmap(&s, 1, 0, prot);
@@ -219,7 +212,7 @@ static int ttm_copy_ttm_io_page(struct ttm_tt *ttm, void *dst,
memcpy_toio(dst, src, PAGE_SIZE);
#ifdef CONFIG_X86
- kunmap_atomic(src, KM_USER0);
+ kunmap_atomic(src);
#else
if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL))
vunmap(src);
@@ -263,8 +256,7 @@ int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
dir = 1;
if ((old_mem->mem_type == new_mem->mem_type) &&
- (new_mem->mm_node->start <
- old_mem->mm_node->start + old_mem->mm_node->size)) {
+ (new_mem->start < old_mem->start + old_mem->size)) {
dir = -1;
add = new_mem->num_pages - 1;
}
diff --git a/drivers/gpu/drm/ttm/ttm_tt.c b/drivers/gpu/drm/ttm/ttm_tt.c
index a7bab87a548b..af789dc869b9 100644
--- a/drivers/gpu/drm/ttm/ttm_tt.c
+++ b/drivers/gpu/drm/ttm/ttm_tt.c
@@ -440,10 +440,8 @@ int ttm_tt_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem)
return ret;
ret = be->func->bind(be, bo_mem);
- if (ret) {
- printk(KERN_ERR TTM_PFX "Couldn't bind backend.\n");
+ if (unlikely(ret != 0))
return ret;
- }
ttm->state = tt_bound;
diff --git a/drivers/gpu/drm/via/via_dmablit.c b/drivers/gpu/drm/via/via_dmablit.c
index 9b5b4d9dd62c..3e038a394c51 100644
--- a/drivers/gpu/drm/via/via_dmablit.c
+++ b/drivers/gpu/drm/via/via_dmablit.c
@@ -235,9 +235,9 @@ via_lock_all_dma_pages(drm_via_sg_info_t *vsg, drm_via_dmablit_t *xfer)
vsg->num_pages = VIA_PFN(xfer->mem_addr + (xfer->num_lines * xfer->mem_stride - 1)) -
first_pfn + 1;
- if (NULL == (vsg->pages = vmalloc(sizeof(struct page *) * vsg->num_pages)))
+ vsg->pages = vzalloc(sizeof(struct page *) * vsg->num_pages);
+ if (NULL == vsg->pages)
return -ENOMEM;
- memset(vsg->pages, 0, sizeof(struct page *) * vsg->num_pages);
down_read(&current->mm->mmap_sem);
ret = get_user_pages(current, current->mm,
(unsigned long)xfer->mem_addr,
diff --git a/drivers/gpu/drm/via/via_drv.c b/drivers/gpu/drm/via/via_drv.c
index b8984a5ae521..e1ff4e7a6eb0 100644
--- a/drivers/gpu/drm/via/via_drv.c
+++ b/drivers/gpu/drm/via/via_drv.c
@@ -51,8 +51,6 @@ static struct drm_driver driver = {
.reclaim_buffers_locked = NULL,
.reclaim_buffers_idlelocked = via_reclaim_buffers_locked,
.lastclose = via_lastclose,
- .get_map_ofs = drm_core_get_map_ofs,
- .get_reg_ofs = drm_core_get_reg_ofs,
.ioctls = via_ioctls,
.fops = {
.owner = THIS_MODULE,
diff --git a/drivers/gpu/drm/vmwgfx/Makefile b/drivers/gpu/drm/vmwgfx/Makefile
index 4505e17df3f5..c9281a1b1d3b 100644
--- a/drivers/gpu/drm/vmwgfx/Makefile
+++ b/drivers/gpu/drm/vmwgfx/Makefile
@@ -4,6 +4,6 @@ ccflags-y := -Iinclude/drm
vmwgfx-y := vmwgfx_execbuf.o vmwgfx_gmr.o vmwgfx_kms.o vmwgfx_drv.o \
vmwgfx_fb.o vmwgfx_ioctl.o vmwgfx_resource.o vmwgfx_buffer.o \
vmwgfx_fifo.o vmwgfx_irq.o vmwgfx_ldu.o vmwgfx_ttm_glue.o \
- vmwgfx_overlay.o vmwgfx_fence.o
+ vmwgfx_overlay.o vmwgfx_fence.o vmwgfx_gmrid_manager.o
obj-$(CONFIG_DRM_VMWGFX) := vmwgfx.o
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c b/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c
index c4f5114aee7c..80bc37b274e7 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c
@@ -39,6 +39,9 @@ static uint32_t vram_ne_placement_flags = TTM_PL_FLAG_VRAM |
static uint32_t sys_placement_flags = TTM_PL_FLAG_SYSTEM |
TTM_PL_FLAG_CACHED;
+static uint32_t gmr_placement_flags = VMW_PL_FLAG_GMR |
+ TTM_PL_FLAG_CACHED;
+
struct ttm_placement vmw_vram_placement = {
.fpfn = 0,
.lpfn = 0,
@@ -48,6 +51,20 @@ struct ttm_placement vmw_vram_placement = {
.busy_placement = &vram_placement_flags
};
+static uint32_t vram_gmr_placement_flags[] = {
+ TTM_PL_FLAG_VRAM | TTM_PL_FLAG_CACHED,
+ VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED
+};
+
+struct ttm_placement vmw_vram_gmr_placement = {
+ .fpfn = 0,
+ .lpfn = 0,
+ .num_placement = 2,
+ .placement = vram_gmr_placement_flags,
+ .num_busy_placement = 1,
+ .busy_placement = &gmr_placement_flags
+};
+
struct ttm_placement vmw_vram_sys_placement = {
.fpfn = 0,
.lpfn = 0,
@@ -77,27 +94,52 @@ struct ttm_placement vmw_sys_placement = {
struct vmw_ttm_backend {
struct ttm_backend backend;
+ struct page **pages;
+ unsigned long num_pages;
+ struct vmw_private *dev_priv;
+ int gmr_id;
};
static int vmw_ttm_populate(struct ttm_backend *backend,
unsigned long num_pages, struct page **pages,
struct page *dummy_read_page)
{
+ struct vmw_ttm_backend *vmw_be =
+ container_of(backend, struct vmw_ttm_backend, backend);
+
+ vmw_be->pages = pages;
+ vmw_be->num_pages = num_pages;
+
return 0;
}
static int vmw_ttm_bind(struct ttm_backend *backend, struct ttm_mem_reg *bo_mem)
{
- return 0;
+ struct vmw_ttm_backend *vmw_be =
+ container_of(backend, struct vmw_ttm_backend, backend);
+
+ vmw_be->gmr_id = bo_mem->start;
+
+ return vmw_gmr_bind(vmw_be->dev_priv, vmw_be->pages,
+ vmw_be->num_pages, vmw_be->gmr_id);
}
static int vmw_ttm_unbind(struct ttm_backend *backend)
{
+ struct vmw_ttm_backend *vmw_be =
+ container_of(backend, struct vmw_ttm_backend, backend);
+
+ vmw_gmr_unbind(vmw_be->dev_priv, vmw_be->gmr_id);
return 0;
}
static void vmw_ttm_clear(struct ttm_backend *backend)
{
+ struct vmw_ttm_backend *vmw_be =
+ container_of(backend, struct vmw_ttm_backend, backend);
+
+ vmw_be->pages = NULL;
+ vmw_be->num_pages = 0;
}
static void vmw_ttm_destroy(struct ttm_backend *backend)
@@ -125,6 +167,7 @@ struct ttm_backend *vmw_ttm_backend_init(struct ttm_bo_device *bdev)
return NULL;
vmw_be->backend.func = &vmw_ttm_func;
+ vmw_be->dev_priv = container_of(bdev, struct vmw_private, bdev);
return &vmw_be->backend;
}
@@ -142,15 +185,28 @@ int vmw_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
/* System memory */
man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
- man->available_caching = TTM_PL_MASK_CACHING;
+ man->available_caching = TTM_PL_FLAG_CACHED;
man->default_caching = TTM_PL_FLAG_CACHED;
break;
case TTM_PL_VRAM:
/* "On-card" video ram */
+ man->func = &ttm_bo_manager_func;
man->gpu_offset = 0;
man->flags = TTM_MEMTYPE_FLAG_FIXED | TTM_MEMTYPE_FLAG_MAPPABLE;
- man->available_caching = TTM_PL_MASK_CACHING;
- man->default_caching = TTM_PL_FLAG_WC;
+ man->available_caching = TTM_PL_FLAG_CACHED;
+ man->default_caching = TTM_PL_FLAG_CACHED;
+ break;
+ case VMW_PL_GMR:
+ /*
+ * "Guest Memory Regions" is an aperture like feature with
+ * one slot per bo. There is an upper limit of the number of
+ * slots as well as the bo size.
+ */
+ man->func = &vmw_gmrid_manager_func;
+ man->gpu_offset = 0;
+ man->flags = TTM_MEMTYPE_FLAG_CMA | TTM_MEMTYPE_FLAG_MAPPABLE;
+ man->available_caching = TTM_PL_FLAG_CACHED;
+ man->default_caching = TTM_PL_FLAG_CACHED;
break;
default:
DRM_ERROR("Unsupported memory type %u\n", (unsigned)type);
@@ -174,18 +230,6 @@ static int vmw_verify_access(struct ttm_buffer_object *bo, struct file *filp)
return 0;
}
-static void vmw_move_notify(struct ttm_buffer_object *bo,
- struct ttm_mem_reg *new_mem)
-{
- if (new_mem->mem_type != TTM_PL_SYSTEM)
- vmw_dmabuf_gmr_unbind(bo);
-}
-
-static void vmw_swap_notify(struct ttm_buffer_object *bo)
-{
- vmw_dmabuf_gmr_unbind(bo);
-}
-
static int vmw_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
{
struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
@@ -200,10 +244,10 @@ static int vmw_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg
return -EINVAL;
switch (mem->mem_type) {
case TTM_PL_SYSTEM:
- /* System memory */
+ case VMW_PL_GMR:
return 0;
case TTM_PL_VRAM:
- mem->bus.offset = mem->mm_node->start << PAGE_SHIFT;
+ mem->bus.offset = mem->start << PAGE_SHIFT;
mem->bus.base = dev_priv->vram_start;
mem->bus.is_iomem = true;
break;
@@ -276,8 +320,8 @@ struct ttm_bo_driver vmw_bo_driver = {
.sync_obj_flush = vmw_sync_obj_flush,
.sync_obj_unref = vmw_sync_obj_unref,
.sync_obj_ref = vmw_sync_obj_ref,
- .move_notify = vmw_move_notify,
- .swap_notify = vmw_swap_notify,
+ .move_notify = NULL,
+ .swap_notify = NULL,
.fault_reserve_notify = &vmw_ttm_fault_reserve_notify,
.io_mem_reserve = &vmw_ttm_io_mem_reserve,
.io_mem_free = &vmw_ttm_io_mem_free,
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
index 2ef93df9e8ae..10ca97ee0206 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
@@ -260,13 +260,11 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
idr_init(&dev_priv->context_idr);
idr_init(&dev_priv->surface_idr);
idr_init(&dev_priv->stream_idr);
- ida_init(&dev_priv->gmr_ida);
mutex_init(&dev_priv->init_mutex);
init_waitqueue_head(&dev_priv->fence_queue);
init_waitqueue_head(&dev_priv->fifo_queue);
atomic_set(&dev_priv->fence_queue_waiters, 0);
atomic_set(&dev_priv->fifo_queue_waiters, 0);
- INIT_LIST_HEAD(&dev_priv->gmr_lru);
dev_priv->io_start = pci_resource_start(dev->pdev, 0);
dev_priv->vram_start = pci_resource_start(dev->pdev, 1);
@@ -341,6 +339,14 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
goto out_err2;
}
+ dev_priv->has_gmr = true;
+ if (ttm_bo_init_mm(&dev_priv->bdev, VMW_PL_GMR,
+ dev_priv->max_gmr_ids) != 0) {
+ DRM_INFO("No GMR memory available. "
+ "Graphics memory resources are very limited.\n");
+ dev_priv->has_gmr = false;
+ }
+
dev_priv->mmio_mtrr = drm_mtrr_add(dev_priv->mmio_start,
dev_priv->mmio_size, DRM_MTRR_WC);
@@ -440,13 +446,14 @@ out_err4:
out_err3:
drm_mtrr_del(dev_priv->mmio_mtrr, dev_priv->mmio_start,
dev_priv->mmio_size, DRM_MTRR_WC);
+ if (dev_priv->has_gmr)
+ (void) ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_GMR);
(void)ttm_bo_clean_mm(&dev_priv->bdev, TTM_PL_VRAM);
out_err2:
(void)ttm_bo_device_release(&dev_priv->bdev);
out_err1:
vmw_ttm_global_release(dev_priv);
out_err0:
- ida_destroy(&dev_priv->gmr_ida);
idr_destroy(&dev_priv->surface_idr);
idr_destroy(&dev_priv->context_idr);
idr_destroy(&dev_priv->stream_idr);
@@ -478,10 +485,11 @@ static int vmw_driver_unload(struct drm_device *dev)
iounmap(dev_priv->mmio_virt);
drm_mtrr_del(dev_priv->mmio_mtrr, dev_priv->mmio_start,
dev_priv->mmio_size, DRM_MTRR_WC);
+ if (dev_priv->has_gmr)
+ (void)ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_GMR);
(void)ttm_bo_clean_mm(&dev_priv->bdev, TTM_PL_VRAM);
(void)ttm_bo_device_release(&dev_priv->bdev);
vmw_ttm_global_release(dev_priv);
- ida_destroy(&dev_priv->gmr_ida);
idr_destroy(&dev_priv->surface_idr);
idr_destroy(&dev_priv->context_idr);
idr_destroy(&dev_priv->stream_idr);
@@ -597,6 +605,8 @@ static void vmw_lastclose(struct drm_device *dev)
static void vmw_master_init(struct vmw_master *vmaster)
{
ttm_lock_init(&vmaster->lock);
+ INIT_LIST_HEAD(&vmaster->fb_surf);
+ mutex_init(&vmaster->fb_surf_mutex);
}
static int vmw_master_create(struct drm_device *dev,
@@ -608,7 +618,7 @@ static int vmw_master_create(struct drm_device *dev,
if (unlikely(vmaster == NULL))
return -ENOMEM;
- ttm_lock_init(&vmaster->lock);
+ vmw_master_init(vmaster);
ttm_lock_set_kill(&vmaster->lock, true, SIGTERM);
master->driver_priv = vmaster;
@@ -699,6 +709,7 @@ static void vmw_master_drop(struct drm_device *dev,
vmw_fp->locked_master = drm_master_get(file_priv->master);
ret = ttm_vt_lock(&vmaster->lock, false, vmw_fp->tfile);
+ vmw_kms_idle_workqueues(vmaster);
if (unlikely((ret != 0))) {
DRM_ERROR("Unable to lock TTM at VT switch.\n");
@@ -751,15 +762,16 @@ static int vmwgfx_pm_notifier(struct notifier_block *nb, unsigned long val,
* Buffer contents is moved to swappable memory.
*/
ttm_bo_swapout_all(&dev_priv->bdev);
+
break;
case PM_POST_HIBERNATION:
case PM_POST_SUSPEND:
+ case PM_POST_RESTORE:
ttm_suspend_unlock(&vmaster->lock);
+
break;
case PM_RESTORE_PREPARE:
break;
- case PM_POST_RESTORE:
- break;
default:
break;
}
@@ -770,21 +782,98 @@ static int vmwgfx_pm_notifier(struct notifier_block *nb, unsigned long val,
* These might not be needed with the virtual SVGA device.
*/
-int vmw_pci_suspend(struct pci_dev *pdev, pm_message_t state)
+static int vmw_pci_suspend(struct pci_dev *pdev, pm_message_t state)
{
+ struct drm_device *dev = pci_get_drvdata(pdev);
+ struct vmw_private *dev_priv = vmw_priv(dev);
+
+ if (dev_priv->num_3d_resources != 0) {
+ DRM_INFO("Can't suspend or hibernate "
+ "while 3D resources are active.\n");
+ return -EBUSY;
+ }
+
pci_save_state(pdev);
pci_disable_device(pdev);
pci_set_power_state(pdev, PCI_D3hot);
return 0;
}
-int vmw_pci_resume(struct pci_dev *pdev)
+static int vmw_pci_resume(struct pci_dev *pdev)
{
pci_set_power_state(pdev, PCI_D0);
pci_restore_state(pdev);
return pci_enable_device(pdev);
}
+static int vmw_pm_suspend(struct device *kdev)
+{
+ struct pci_dev *pdev = to_pci_dev(kdev);
+ struct pm_message dummy;
+
+ dummy.event = 0;
+
+ return vmw_pci_suspend(pdev, dummy);
+}
+
+static int vmw_pm_resume(struct device *kdev)
+{
+ struct pci_dev *pdev = to_pci_dev(kdev);
+
+ return vmw_pci_resume(pdev);
+}
+
+static int vmw_pm_prepare(struct device *kdev)
+{
+ struct pci_dev *pdev = to_pci_dev(kdev);
+ struct drm_device *dev = pci_get_drvdata(pdev);
+ struct vmw_private *dev_priv = vmw_priv(dev);
+
+ /**
+ * Release 3d reference held by fbdev and potentially
+ * stop fifo.
+ */
+ dev_priv->suspended = true;
+ if (dev_priv->enable_fb)
+ vmw_3d_resource_dec(dev_priv);
+
+ if (dev_priv->num_3d_resources != 0) {
+
+ DRM_INFO("Can't suspend or hibernate "
+ "while 3D resources are active.\n");
+
+ if (dev_priv->enable_fb)
+ vmw_3d_resource_inc(dev_priv);
+ dev_priv->suspended = false;
+ return -EBUSY;
+ }
+
+ return 0;
+}
+
+static void vmw_pm_complete(struct device *kdev)
+{
+ struct pci_dev *pdev = to_pci_dev(kdev);
+ struct drm_device *dev = pci_get_drvdata(pdev);
+ struct vmw_private *dev_priv = vmw_priv(dev);
+
+ /**
+ * Reclaim 3d reference held by fbdev and potentially
+ * start fifo.
+ */
+ if (dev_priv->enable_fb)
+ vmw_3d_resource_inc(dev_priv);
+
+ dev_priv->suspended = false;
+}
+
+static const struct dev_pm_ops vmw_pm_ops = {
+ .prepare = vmw_pm_prepare,
+ .complete = vmw_pm_complete,
+ .suspend = vmw_pm_suspend,
+ .resume = vmw_pm_resume,
+};
+
static struct drm_driver driver = {
.driver_features = DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED |
DRIVER_MODESET,
@@ -798,8 +887,6 @@ static struct drm_driver driver = {
.irq_handler = vmw_irq_handler,
.get_vblank_counter = vmw_get_vblank_counter,
.reclaim_buffers_locked = NULL,
- .get_map_ofs = drm_core_get_map_ofs,
- .get_reg_ofs = drm_core_get_reg_ofs,
.ioctls = vmw_ioctls,
.num_ioctls = DRM_ARRAY_SIZE(vmw_ioctls),
.dma_quiescent = NULL, /*vmw_dma_quiescent, */
@@ -821,15 +908,16 @@ static struct drm_driver driver = {
.compat_ioctl = drm_compat_ioctl,
#endif
.llseek = noop_llseek,
- },
+ },
.pci_driver = {
- .name = VMWGFX_DRIVER_NAME,
- .id_table = vmw_pci_id_list,
- .probe = vmw_probe,
- .remove = vmw_remove,
- .suspend = vmw_pci_suspend,
- .resume = vmw_pci_resume
- },
+ .name = VMWGFX_DRIVER_NAME,
+ .id_table = vmw_pci_id_list,
+ .probe = vmw_probe,
+ .remove = vmw_remove,
+ .driver = {
+ .pm = &vmw_pm_ops
+ }
+ },
.name = VMWGFX_DRIVER_NAME,
.desc = VMWGFX_DRIVER_DESC,
.date = VMWGFX_DRIVER_DATE,
@@ -863,3 +951,7 @@ module_exit(vmwgfx_exit);
MODULE_AUTHOR("VMware Inc. and others");
MODULE_DESCRIPTION("Standalone drm driver for the VMware SVGA device");
MODULE_LICENSE("GPL and additional rights");
+MODULE_VERSION(__stringify(VMWGFX_DRIVER_MAJOR) "."
+ __stringify(VMWGFX_DRIVER_MINOR) "."
+ __stringify(VMWGFX_DRIVER_PATCHLEVEL) "."
+ "0");
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
index 58de6393f611..e7a58d055041 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
@@ -39,9 +39,9 @@
#include "ttm/ttm_execbuf_util.h"
#include "ttm/ttm_module.h"
-#define VMWGFX_DRIVER_DATE "20100209"
+#define VMWGFX_DRIVER_DATE "20100927"
#define VMWGFX_DRIVER_MAJOR 1
-#define VMWGFX_DRIVER_MINOR 2
+#define VMWGFX_DRIVER_MINOR 4
#define VMWGFX_DRIVER_PATCHLEVEL 0
#define VMWGFX_FILE_PAGE_OFFSET 0x00100000
#define VMWGFX_FIFO_STATIC_SIZE (1024*1024)
@@ -49,6 +49,9 @@
#define VMWGFX_MAX_GMRS 2048
#define VMWGFX_MAX_DISPLAYS 16
+#define VMW_PL_GMR TTM_PL_PRIV0
+#define VMW_PL_FLAG_GMR TTM_PL_FLAG_PRIV0
+
struct vmw_fpriv {
struct drm_master *locked_master;
struct ttm_object_file *tfile;
@@ -57,8 +60,6 @@ struct vmw_fpriv {
struct vmw_dma_buffer {
struct ttm_buffer_object base;
struct list_head validate_list;
- struct list_head gmr_lru;
- uint32_t gmr_id;
bool gmr_bound;
uint32_t cur_validate_node;
bool on_validate_list;
@@ -151,6 +152,8 @@ struct vmw_overlay;
struct vmw_master {
struct ttm_lock lock;
+ struct mutex fb_surf_mutex;
+ struct list_head fb_surf;
};
struct vmw_vga_topology_state {
@@ -182,6 +185,7 @@ struct vmw_private {
uint32_t capabilities;
uint32_t max_gmr_descriptors;
uint32_t max_gmr_ids;
+ bool has_gmr;
struct mutex hw_mutex;
/*
@@ -264,14 +268,6 @@ struct vmw_private {
struct mutex cmdbuf_mutex;
/**
- * GMR management. Protected by the lru spinlock.
- */
-
- struct ida gmr_ida;
- struct list_head gmr_lru;
-
-
- /**
* Operating mode.
*/
@@ -286,6 +282,7 @@ struct vmw_private {
struct vmw_master *active_master;
struct vmw_master fbdev_master;
struct notifier_block pm_nb;
+ bool suspended;
struct mutex release_mutex;
uint32_t num_3d_resources;
@@ -331,7 +328,9 @@ void vmw_3d_resource_dec(struct vmw_private *dev_priv);
*/
extern int vmw_gmr_bind(struct vmw_private *dev_priv,
- struct ttm_buffer_object *bo);
+ struct page *pages[],
+ unsigned long num_pages,
+ int gmr_id);
extern void vmw_gmr_unbind(struct vmw_private *dev_priv, int gmr_id);
/**
@@ -380,14 +379,10 @@ extern uint32_t vmw_dmabuf_validate_node(struct ttm_buffer_object *bo,
extern void vmw_dmabuf_validate_clear(struct ttm_buffer_object *bo);
extern int vmw_user_dmabuf_lookup(struct ttm_object_file *tfile,
uint32_t id, struct vmw_dma_buffer **out);
-extern uint32_t vmw_dmabuf_gmr(struct ttm_buffer_object *bo);
-extern void vmw_dmabuf_set_gmr(struct ttm_buffer_object *bo, uint32_t id);
-extern int vmw_gmr_id_alloc(struct vmw_private *dev_priv, uint32_t *p_id);
extern int vmw_dmabuf_to_start_of_vram(struct vmw_private *vmw_priv,
struct vmw_dma_buffer *bo);
extern int vmw_dmabuf_from_vram(struct vmw_private *vmw_priv,
struct vmw_dma_buffer *bo);
-extern void vmw_dmabuf_gmr_unbind(struct ttm_buffer_object *bo);
extern int vmw_stream_claim_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
extern int vmw_stream_unref_ioctl(struct drm_device *dev, void *data,
@@ -439,6 +434,7 @@ extern int vmw_mmap(struct file *filp, struct vm_area_struct *vma);
extern struct ttm_placement vmw_vram_placement;
extern struct ttm_placement vmw_vram_ne_placement;
extern struct ttm_placement vmw_vram_sys_placement;
+extern struct ttm_placement vmw_vram_gmr_placement;
extern struct ttm_placement vmw_sys_placement;
extern struct ttm_bo_driver vmw_bo_driver;
extern int vmw_dma_quiescent(struct drm_device *dev);
@@ -518,6 +514,10 @@ void vmw_kms_write_svga(struct vmw_private *vmw_priv,
unsigned bbp, unsigned depth);
int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
+void vmw_kms_idle_workqueues(struct vmw_master *vmaster);
+bool vmw_kms_validate_mode_vram(struct vmw_private *dev_priv,
+ uint32_t pitch,
+ uint32_t height);
u32 vmw_get_vblank_counter(struct drm_device *dev, int crtc);
/**
@@ -537,6 +537,12 @@ int vmw_overlay_num_overlays(struct vmw_private *dev_priv);
int vmw_overlay_num_free_overlays(struct vmw_private *dev_priv);
/**
+ * GMR Id manager
+ */
+
+extern const struct ttm_mem_type_manager_func vmw_gmrid_manager_func;
+
+/**
* Inline helper functions
*/
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
index 8e396850513c..76954e3528c1 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
@@ -538,8 +538,11 @@ static void vmw_apply_relocations(struct vmw_sw_context *sw_context)
reloc = &sw_context->relocs[i];
validate = &sw_context->val_bufs[reloc->index];
bo = validate->bo;
- reloc->location->offset += bo->offset;
- reloc->location->gmrId = vmw_dmabuf_gmr(bo);
+ if (bo->mem.mem_type == TTM_PL_VRAM) {
+ reloc->location->offset += bo->offset;
+ reloc->location->gmrId = SVGA_GMR_FRAMEBUFFER;
+ } else
+ reloc->location->gmrId = bo->mem.start;
}
vmw_free_relocations(sw_context);
}
@@ -563,25 +566,14 @@ static int vmw_validate_single_buffer(struct vmw_private *dev_priv,
{
int ret;
- if (vmw_dmabuf_gmr(bo) != SVGA_GMR_NULL)
- return 0;
-
/**
- * Put BO in VRAM, only if there is space.
+ * Put BO in VRAM if there is space, otherwise as a GMR.
+ * If there is no space in VRAM and GMR ids are all used up,
+ * start evicting GMRs to make room. If the DMA buffer can't be
+ * used as a GMR, this will return -ENOMEM.
*/
- ret = ttm_bo_validate(bo, &vmw_vram_sys_placement, true, false, false);
- if (unlikely(ret == -ERESTARTSYS))
- return ret;
-
- /**
- * Otherwise, set it up as GMR.
- */
-
- if (vmw_dmabuf_gmr(bo) != SVGA_GMR_NULL)
- return 0;
-
- ret = vmw_gmr_bind(dev_priv, bo);
+ ret = ttm_bo_validate(bo, &vmw_vram_gmr_placement, true, false, false);
if (likely(ret == 0 || ret == -ERESTARTSYS))
return ret;
@@ -590,6 +582,7 @@ static int vmw_validate_single_buffer(struct vmw_private *dev_priv,
* previous contents.
*/
+ DRM_INFO("Falling through to VRAM.\n");
ret = ttm_bo_validate(bo, &vmw_vram_placement, true, false, false);
return ret;
}
@@ -698,6 +691,7 @@ int vmw_execbuf_ioctl(struct drm_device *dev, void *data,
fence_rep.error = ret;
fence_rep.fence_seq = (uint64_t) sequence;
+ fence_rep.pad64 = 0;
user_fence_rep = (struct drm_vmw_fence_rep __user *)
(unsigned long)arg->fence_rep;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
index 409e172f4abf..41d9a5b73c03 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
@@ -144,6 +144,13 @@ static int vmw_fb_check_var(struct fb_var_screeninfo *var,
return -EINVAL;
}
+ if (!vmw_kms_validate_mode_vram(vmw_priv,
+ info->fix.line_length,
+ var->yoffset + var->yres)) {
+ DRM_ERROR("Requested geom can not fit in framebuffer\n");
+ return -EINVAL;
+ }
+
return 0;
}
@@ -205,6 +212,9 @@ static void vmw_fb_dirty_flush(struct vmw_fb_par *par)
SVGAFifoCmdUpdate body;
} *cmd;
+ if (vmw_priv->suspended)
+ return;
+
spin_lock_irqsave(&par->dirty.lock, flags);
if (!par->dirty.active) {
spin_unlock_irqrestore(&par->dirty.lock, flags);
@@ -616,7 +626,8 @@ int vmw_dmabuf_to_start_of_vram(struct vmw_private *vmw_priv,
goto err_unlock;
if (bo->mem.mem_type == TTM_PL_VRAM &&
- bo->mem.mm_node->start < bo->num_pages)
+ bo->mem.start < bo->num_pages &&
+ bo->mem.start > 0)
(void) ttm_bo_validate(bo, &vmw_sys_placement, false,
false, false);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c b/drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c
index 5f8908a5d7fd..de0c5948521d 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c
@@ -146,7 +146,7 @@ static void vmw_gmr_fire_descriptors(struct vmw_private *dev_priv,
*/
static unsigned long vmw_gmr_count_descriptors(struct page *pages[],
- unsigned long num_pages)
+ unsigned long num_pages)
{
unsigned long prev_pfn = ~(0UL);
unsigned long pfn;
@@ -163,45 +163,33 @@ static unsigned long vmw_gmr_count_descriptors(struct page *pages[],
}
int vmw_gmr_bind(struct vmw_private *dev_priv,
- struct ttm_buffer_object *bo)
+ struct page *pages[],
+ unsigned long num_pages,
+ int gmr_id)
{
- struct ttm_tt *ttm = bo->ttm;
- unsigned long descriptors;
- int ret;
- uint32_t id;
struct list_head desc_pages;
+ int ret;
- if (!(dev_priv->capabilities & SVGA_CAP_GMR))
+ if (unlikely(!(dev_priv->capabilities & SVGA_CAP_GMR)))
return -EINVAL;
- ret = ttm_tt_populate(ttm);
- if (unlikely(ret != 0))
- return ret;
-
- descriptors = vmw_gmr_count_descriptors(ttm->pages, ttm->num_pages);
- if (unlikely(descriptors > dev_priv->max_gmr_descriptors))
+ if (vmw_gmr_count_descriptors(pages, num_pages) >
+ dev_priv->max_gmr_descriptors)
return -EINVAL;
INIT_LIST_HEAD(&desc_pages);
- ret = vmw_gmr_build_descriptors(&desc_pages, ttm->pages,
- ttm->num_pages);
- if (unlikely(ret != 0))
- return ret;
- ret = vmw_gmr_id_alloc(dev_priv, &id);
+ ret = vmw_gmr_build_descriptors(&desc_pages, pages, num_pages);
if (unlikely(ret != 0))
- goto out_no_id;
+ return ret;
- vmw_gmr_fire_descriptors(dev_priv, id, &desc_pages);
+ vmw_gmr_fire_descriptors(dev_priv, gmr_id, &desc_pages);
vmw_gmr_free_descriptors(&desc_pages);
- vmw_dmabuf_set_gmr(bo, id);
- return 0;
-out_no_id:
- vmw_gmr_free_descriptors(&desc_pages);
- return ret;
+ return 0;
}
+
void vmw_gmr_unbind(struct vmw_private *dev_priv, int gmr_id)
{
mutex_lock(&dev_priv->hw_mutex);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c b/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c
new file mode 100644
index 000000000000..ac6e0d1bd629
--- /dev/null
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c
@@ -0,0 +1,137 @@
+/**************************************************************************
+ *
+ * Copyright (c) 2007-2010 VMware, Inc., Palo Alto, CA., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+/*
+ * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
+ */
+
+#include "vmwgfx_drv.h"
+#include "ttm/ttm_module.h"
+#include "ttm/ttm_bo_driver.h"
+#include "ttm/ttm_placement.h"
+#include <linux/idr.h>
+#include <linux/spinlock.h>
+#include <linux/kernel.h>
+
+struct vmwgfx_gmrid_man {
+ spinlock_t lock;
+ struct ida gmr_ida;
+ uint32_t max_gmr_ids;
+};
+
+static int vmw_gmrid_man_get_node(struct ttm_mem_type_manager *man,
+ struct ttm_buffer_object *bo,
+ struct ttm_placement *placement,
+ struct ttm_mem_reg *mem)
+{
+ struct vmwgfx_gmrid_man *gman =
+ (struct vmwgfx_gmrid_man *)man->priv;
+ int ret;
+ int id;
+
+ mem->mm_node = NULL;
+
+ do {
+ if (unlikely(ida_pre_get(&gman->gmr_ida, GFP_KERNEL) == 0))
+ return -ENOMEM;
+
+ spin_lock(&gman->lock);
+ ret = ida_get_new(&gman->gmr_ida, &id);
+
+ if (unlikely(ret == 0 && id >= gman->max_gmr_ids)) {
+ ida_remove(&gman->gmr_ida, id);
+ spin_unlock(&gman->lock);
+ return 0;
+ }
+
+ spin_unlock(&gman->lock);
+
+ } while (ret == -EAGAIN);
+
+ if (likely(ret == 0)) {
+ mem->mm_node = gman;
+ mem->start = id;
+ }
+
+ return ret;
+}
+
+static void vmw_gmrid_man_put_node(struct ttm_mem_type_manager *man,
+ struct ttm_mem_reg *mem)
+{
+ struct vmwgfx_gmrid_man *gman =
+ (struct vmwgfx_gmrid_man *)man->priv;
+
+ if (mem->mm_node) {
+ spin_lock(&gman->lock);
+ ida_remove(&gman->gmr_ida, mem->start);
+ spin_unlock(&gman->lock);
+ mem->mm_node = NULL;
+ }
+}
+
+static int vmw_gmrid_man_init(struct ttm_mem_type_manager *man,
+ unsigned long p_size)
+{
+ struct vmwgfx_gmrid_man *gman =
+ kzalloc(sizeof(*gman), GFP_KERNEL);
+
+ if (unlikely(gman == NULL))
+ return -ENOMEM;
+
+ spin_lock_init(&gman->lock);
+ ida_init(&gman->gmr_ida);
+ gman->max_gmr_ids = p_size;
+ man->priv = (void *) gman;
+ return 0;
+}
+
+static int vmw_gmrid_man_takedown(struct ttm_mem_type_manager *man)
+{
+ struct vmwgfx_gmrid_man *gman =
+ (struct vmwgfx_gmrid_man *)man->priv;
+
+ if (gman) {
+ ida_destroy(&gman->gmr_ida);
+ kfree(gman);
+ }
+ return 0;
+}
+
+static void vmw_gmrid_man_debug(struct ttm_mem_type_manager *man,
+ const char *prefix)
+{
+ printk(KERN_INFO "%s: No debug info available for the GMR "
+ "id manager.\n", prefix);
+}
+
+const struct ttm_mem_type_manager_func vmw_gmrid_manager_func = {
+ vmw_gmrid_man_init,
+ vmw_gmrid_man_takedown,
+ vmw_gmrid_man_get_node,
+ vmw_gmrid_man_put_node,
+ vmw_gmrid_man_debug
+};
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
index 1c7a316454d8..570d57775a58 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
@@ -54,6 +54,9 @@ int vmw_getparam_ioctl(struct drm_device *dev, void *data,
case DRM_VMW_PARAM_FIFO_CAPS:
param->value = dev_priv->fifo.capabilities;
break;
+ case DRM_VMW_PARAM_MAX_FB_SIZE:
+ param->value = dev_priv->vram_size;
+ break;
default:
DRM_ERROR("Illegal vmwgfx get param request: %d\n",
param->param);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
index e882ba099f0c..cceeb42789b6 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
@@ -332,18 +332,55 @@ struct vmw_framebuffer_surface {
struct delayed_work d_work;
struct mutex work_lock;
bool present_fs;
+ struct list_head head;
+ struct drm_master *master;
};
+/**
+ * vmw_kms_idle_workqueues - Flush workqueues on this master
+ *
+ * @vmaster - Pointer identifying the master, for the surfaces of which
+ * we idle the dirty work queues.
+ *
+ * This function should be called with the ttm lock held in exclusive mode
+ * to idle all dirty work queues before the fifo is taken down.
+ *
+ * The work task may actually requeue itself, but after the flush returns we're
+ * sure that there's nothing to present, since the ttm lock is held in
+ * exclusive mode, so the fifo will never get used.
+ */
+
+void vmw_kms_idle_workqueues(struct vmw_master *vmaster)
+{
+ struct vmw_framebuffer_surface *entry;
+
+ mutex_lock(&vmaster->fb_surf_mutex);
+ list_for_each_entry(entry, &vmaster->fb_surf, head) {
+ if (cancel_delayed_work_sync(&entry->d_work))
+ (void) entry->d_work.work.func(&entry->d_work.work);
+
+ (void) cancel_delayed_work_sync(&entry->d_work);
+ }
+ mutex_unlock(&vmaster->fb_surf_mutex);
+}
+
void vmw_framebuffer_surface_destroy(struct drm_framebuffer *framebuffer)
{
- struct vmw_framebuffer_surface *vfb =
+ struct vmw_framebuffer_surface *vfbs =
vmw_framebuffer_to_vfbs(framebuffer);
+ struct vmw_master *vmaster = vmw_master(vfbs->master);
+
+
+ mutex_lock(&vmaster->fb_surf_mutex);
+ list_del(&vfbs->head);
+ mutex_unlock(&vmaster->fb_surf_mutex);
- cancel_delayed_work_sync(&vfb->d_work);
+ cancel_delayed_work_sync(&vfbs->d_work);
+ drm_master_put(&vfbs->master);
drm_framebuffer_cleanup(framebuffer);
- vmw_surface_unreference(&vfb->surface);
+ vmw_surface_unreference(&vfbs->surface);
- kfree(framebuffer);
+ kfree(vfbs);
}
static void vmw_framebuffer_present_fs_callback(struct work_struct *work)
@@ -362,6 +399,12 @@ static void vmw_framebuffer_present_fs_callback(struct work_struct *work)
SVGA3dCopyRect cr;
} *cmd;
+ /**
+ * Strictly we should take the ttm_lock in read mode before accessing
+ * the fifo, to make sure the fifo is present and up. However,
+ * instead we flush all workqueues under the ttm lock in exclusive mode
+ * before taking down the fifo.
+ */
mutex_lock(&vfbs->work_lock);
if (!vfbs->present_fs)
goto out_unlock;
@@ -392,17 +435,20 @@ out_unlock:
int vmw_framebuffer_surface_dirty(struct drm_framebuffer *framebuffer,
+ struct drm_file *file_priv,
unsigned flags, unsigned color,
struct drm_clip_rect *clips,
unsigned num_clips)
{
struct vmw_private *dev_priv = vmw_priv(framebuffer->dev);
+ struct vmw_master *vmaster = vmw_master(file_priv->master);
struct vmw_framebuffer_surface *vfbs =
vmw_framebuffer_to_vfbs(framebuffer);
struct vmw_surface *surf = vfbs->surface;
struct drm_clip_rect norect;
SVGA3dCopyRect *cr;
int i, inc = 1;
+ int ret;
struct {
SVGA3dCmdHeader header;
@@ -410,6 +456,13 @@ int vmw_framebuffer_surface_dirty(struct drm_framebuffer *framebuffer,
SVGA3dCopyRect cr;
} *cmd;
+ if (unlikely(vfbs->master != file_priv->master))
+ return -EINVAL;
+
+ ret = ttm_read_lock(&vmaster->lock, true);
+ if (unlikely(ret != 0))
+ return ret;
+
if (!num_clips ||
!(dev_priv->fifo.capabilities &
SVGA_FIFO_CAP_SCREEN_OBJECT)) {
@@ -425,6 +478,7 @@ int vmw_framebuffer_surface_dirty(struct drm_framebuffer *framebuffer,
*/
vmw_framebuffer_present_fs_callback(&vfbs->d_work.work);
}
+ ttm_read_unlock(&vmaster->lock);
return 0;
}
@@ -442,6 +496,7 @@ int vmw_framebuffer_surface_dirty(struct drm_framebuffer *framebuffer,
cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd) + (num_clips - 1) * sizeof(cmd->cr));
if (unlikely(cmd == NULL)) {
DRM_ERROR("Fifo reserve failed.\n");
+ ttm_read_unlock(&vmaster->lock);
return -ENOMEM;
}
@@ -461,7 +516,7 @@ int vmw_framebuffer_surface_dirty(struct drm_framebuffer *framebuffer,
}
vmw_fifo_commit(dev_priv, sizeof(*cmd) + (num_clips - 1) * sizeof(cmd->cr));
-
+ ttm_read_unlock(&vmaster->lock);
return 0;
}
@@ -471,16 +526,57 @@ static struct drm_framebuffer_funcs vmw_framebuffer_surface_funcs = {
.create_handle = vmw_framebuffer_create_handle,
};
-int vmw_kms_new_framebuffer_surface(struct vmw_private *dev_priv,
- struct vmw_surface *surface,
- struct vmw_framebuffer **out,
- unsigned width, unsigned height)
+static int vmw_kms_new_framebuffer_surface(struct vmw_private *dev_priv,
+ struct drm_file *file_priv,
+ struct vmw_surface *surface,
+ struct vmw_framebuffer **out,
+ const struct drm_mode_fb_cmd
+ *mode_cmd)
{
struct drm_device *dev = dev_priv->dev;
struct vmw_framebuffer_surface *vfbs;
+ enum SVGA3dSurfaceFormat format;
+ struct vmw_master *vmaster = vmw_master(file_priv->master);
int ret;
+ /*
+ * Sanity checks.
+ */
+
+ if (unlikely(surface->mip_levels[0] != 1 ||
+ surface->num_sizes != 1 ||
+ surface->sizes[0].width < mode_cmd->width ||
+ surface->sizes[0].height < mode_cmd->height ||
+ surface->sizes[0].depth != 1)) {
+ DRM_ERROR("Incompatible surface dimensions "
+ "for requested mode.\n");
+ return -EINVAL;
+ }
+
+ switch (mode_cmd->depth) {
+ case 32:
+ format = SVGA3D_A8R8G8B8;
+ break;
+ case 24:
+ format = SVGA3D_X8R8G8B8;
+ break;
+ case 16:
+ format = SVGA3D_R5G6B5;
+ break;
+ case 15:
+ format = SVGA3D_A1R5G5B5;
+ break;
+ default:
+ DRM_ERROR("Invalid color depth: %d\n", mode_cmd->depth);
+ return -EINVAL;
+ }
+
+ if (unlikely(format != surface->format)) {
+ DRM_ERROR("Invalid surface format for requested mode.\n");
+ return -EINVAL;
+ }
+
vfbs = kzalloc(sizeof(*vfbs), GFP_KERNEL);
if (!vfbs) {
ret = -ENOMEM;
@@ -498,16 +594,22 @@ int vmw_kms_new_framebuffer_surface(struct vmw_private *dev_priv,
}
/* XXX get the first 3 from the surface info */
- vfbs->base.base.bits_per_pixel = 32;
- vfbs->base.base.pitch = width * 32 / 4;
- vfbs->base.base.depth = 24;
- vfbs->base.base.width = width;
- vfbs->base.base.height = height;
+ vfbs->base.base.bits_per_pixel = mode_cmd->bpp;
+ vfbs->base.base.pitch = mode_cmd->pitch;
+ vfbs->base.base.depth = mode_cmd->depth;
+ vfbs->base.base.width = mode_cmd->width;
+ vfbs->base.base.height = mode_cmd->height;
vfbs->base.pin = &vmw_surface_dmabuf_pin;
vfbs->base.unpin = &vmw_surface_dmabuf_unpin;
vfbs->surface = surface;
+ vfbs->master = drm_master_get(file_priv->master);
mutex_init(&vfbs->work_lock);
+
+ mutex_lock(&vmaster->fb_surf_mutex);
INIT_DELAYED_WORK(&vfbs->d_work, &vmw_framebuffer_present_fs_callback);
+ list_add_tail(&vfbs->head, &vmaster->fb_surf);
+ mutex_unlock(&vmaster->fb_surf_mutex);
+
*out = &vfbs->base;
return 0;
@@ -544,18 +646,25 @@ void vmw_framebuffer_dmabuf_destroy(struct drm_framebuffer *framebuffer)
}
int vmw_framebuffer_dmabuf_dirty(struct drm_framebuffer *framebuffer,
+ struct drm_file *file_priv,
unsigned flags, unsigned color,
struct drm_clip_rect *clips,
unsigned num_clips)
{
struct vmw_private *dev_priv = vmw_priv(framebuffer->dev);
+ struct vmw_master *vmaster = vmw_master(file_priv->master);
struct drm_clip_rect norect;
+ int ret;
struct {
uint32_t header;
SVGAFifoCmdUpdate body;
} *cmd;
int i, increment = 1;
+ ret = ttm_read_lock(&vmaster->lock, true);
+ if (unlikely(ret != 0))
+ return ret;
+
if (!num_clips) {
num_clips = 1;
clips = &norect;
@@ -570,6 +679,7 @@ int vmw_framebuffer_dmabuf_dirty(struct drm_framebuffer *framebuffer,
cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd) * num_clips);
if (unlikely(cmd == NULL)) {
DRM_ERROR("Fifo reserve failed.\n");
+ ttm_read_unlock(&vmaster->lock);
return -ENOMEM;
}
@@ -582,6 +692,7 @@ int vmw_framebuffer_dmabuf_dirty(struct drm_framebuffer *framebuffer,
}
vmw_fifo_commit(dev_priv, sizeof(*cmd) * num_clips);
+ ttm_read_unlock(&vmaster->lock);
return 0;
}
@@ -609,6 +720,8 @@ static int vmw_surface_dmabuf_pin(struct vmw_framebuffer *vfb)
&vmw_vram_ne_placement,
false, &vmw_dmabuf_bo_free);
vmw_overlay_resume_all(dev_priv);
+ if (unlikely(ret != 0))
+ vfbs->buffer = NULL;
return ret;
}
@@ -619,6 +732,9 @@ static int vmw_surface_dmabuf_unpin(struct vmw_framebuffer *vfb)
struct vmw_framebuffer_surface *vfbs =
vmw_framebuffer_to_vfbs(&vfb->base);
+ if (unlikely(vfbs->buffer == NULL))
+ return 0;
+
bo = &vfbs->buffer->base;
ttm_bo_unref(&bo);
vfbs->buffer = NULL;
@@ -659,16 +775,25 @@ static int vmw_framebuffer_dmabuf_unpin(struct vmw_framebuffer *vfb)
return vmw_dmabuf_from_vram(dev_priv, vfbd->buffer);
}
-int vmw_kms_new_framebuffer_dmabuf(struct vmw_private *dev_priv,
- struct vmw_dma_buffer *dmabuf,
- struct vmw_framebuffer **out,
- unsigned width, unsigned height)
+static int vmw_kms_new_framebuffer_dmabuf(struct vmw_private *dev_priv,
+ struct vmw_dma_buffer *dmabuf,
+ struct vmw_framebuffer **out,
+ const struct drm_mode_fb_cmd
+ *mode_cmd)
{
struct drm_device *dev = dev_priv->dev;
struct vmw_framebuffer_dmabuf *vfbd;
+ unsigned int requested_size;
int ret;
+ requested_size = mode_cmd->height * mode_cmd->pitch;
+ if (unlikely(requested_size > dmabuf->base.num_pages * PAGE_SIZE)) {
+ DRM_ERROR("Screen buffer object size is too small "
+ "for requested mode.\n");
+ return -EINVAL;
+ }
+
vfbd = kzalloc(sizeof(*vfbd), GFP_KERNEL);
if (!vfbd) {
ret = -ENOMEM;
@@ -685,12 +810,11 @@ int vmw_kms_new_framebuffer_dmabuf(struct vmw_private *dev_priv,
goto out_err3;
}
- /* XXX get the first 3 from the surface info */
- vfbd->base.base.bits_per_pixel = 32;
- vfbd->base.base.pitch = width * vfbd->base.base.bits_per_pixel / 8;
- vfbd->base.base.depth = 24;
- vfbd->base.base.width = width;
- vfbd->base.base.height = height;
+ vfbd->base.base.bits_per_pixel = mode_cmd->bpp;
+ vfbd->base.base.pitch = mode_cmd->pitch;
+ vfbd->base.base.depth = mode_cmd->depth;
+ vfbd->base.base.width = mode_cmd->width;
+ vfbd->base.base.height = mode_cmd->height;
vfbd->base.pin = vmw_framebuffer_dmabuf_pin;
vfbd->base.unpin = vmw_framebuffer_dmabuf_unpin;
vfbd->buffer = dmabuf;
@@ -719,8 +843,25 @@ static struct drm_framebuffer *vmw_kms_fb_create(struct drm_device *dev,
struct vmw_framebuffer *vfb = NULL;
struct vmw_surface *surface = NULL;
struct vmw_dma_buffer *bo = NULL;
+ u64 required_size;
int ret;
+ /**
+ * This code should be conditioned on Screen Objects not being used.
+ * If screen objects are used, we can allocate a GMR to hold the
+ * requested framebuffer.
+ */
+
+ required_size = mode_cmd->pitch * mode_cmd->height;
+ if (unlikely(required_size > (u64) dev_priv->vram_size)) {
+ DRM_ERROR("VRAM size is too small for requested mode.\n");
+ return NULL;
+ }
+
+ /**
+ * End conditioned code.
+ */
+
ret = vmw_user_surface_lookup_handle(dev_priv, tfile,
mode_cmd->handle, &surface);
if (ret)
@@ -729,8 +870,8 @@ static struct drm_framebuffer *vmw_kms_fb_create(struct drm_device *dev,
if (!surface->scanout)
goto err_not_scanout;
- ret = vmw_kms_new_framebuffer_surface(dev_priv, surface, &vfb,
- mode_cmd->width, mode_cmd->height);
+ ret = vmw_kms_new_framebuffer_surface(dev_priv, file_priv, surface,
+ &vfb, mode_cmd);
/* vmw_user_surface_lookup takes one ref so does new_fb */
vmw_surface_unreference(&surface);
@@ -751,7 +892,7 @@ try_dmabuf:
}
ret = vmw_kms_new_framebuffer_dmabuf(dev_priv, bo, &vfb,
- mode_cmd->width, mode_cmd->height);
+ mode_cmd);
/* vmw_user_dmabuf_lookup takes one ref so does new_fb */
vmw_dmabuf_unreference(&bo);
@@ -889,6 +1030,9 @@ int vmw_kms_save_vga(struct vmw_private *vmw_priv)
vmw_priv->num_displays = vmw_read(vmw_priv,
SVGA_REG_NUM_GUEST_DISPLAYS);
+ if (vmw_priv->num_displays == 0)
+ vmw_priv->num_displays = 1;
+
for (i = 0; i < vmw_priv->num_displays; ++i) {
save = &vmw_priv->vga_save[i];
vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, i);
@@ -997,6 +1141,13 @@ out_unlock:
return ret;
}
+bool vmw_kms_validate_mode_vram(struct vmw_private *dev_priv,
+ uint32_t pitch,
+ uint32_t height)
+{
+ return ((u64) pitch * (u64) height) < (u64) dev_priv->vram_size;
+}
+
u32 vmw_get_vblank_counter(struct drm_device *dev, int crtc)
{
return 0;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
index 11cb39e3accb..29113c9b26a8 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
@@ -427,7 +427,9 @@ static int vmw_ldu_connector_fill_modes(struct drm_connector *connector,
{
struct vmw_legacy_display_unit *ldu = vmw_connector_to_ldu(connector);
struct drm_device *dev = connector->dev;
+ struct vmw_private *dev_priv = vmw_priv(dev);
struct drm_display_mode *mode = NULL;
+ struct drm_display_mode *bmode;
struct drm_display_mode prefmode = { DRM_MODE("preferred",
DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
@@ -443,22 +445,30 @@ static int vmw_ldu_connector_fill_modes(struct drm_connector *connector,
mode->hdisplay = ldu->pref_width;
mode->vdisplay = ldu->pref_height;
mode->vrefresh = drm_mode_vrefresh(mode);
- drm_mode_probed_add(connector, mode);
+ if (vmw_kms_validate_mode_vram(dev_priv, mode->hdisplay * 2,
+ mode->vdisplay)) {
+ drm_mode_probed_add(connector, mode);
- if (ldu->pref_mode) {
- list_del_init(&ldu->pref_mode->head);
- drm_mode_destroy(dev, ldu->pref_mode);
- }
+ if (ldu->pref_mode) {
+ list_del_init(&ldu->pref_mode->head);
+ drm_mode_destroy(dev, ldu->pref_mode);
+ }
- ldu->pref_mode = mode;
+ ldu->pref_mode = mode;
+ }
}
for (i = 0; vmw_ldu_connector_builtin[i].type != 0; i++) {
- if (vmw_ldu_connector_builtin[i].hdisplay > max_width ||
- vmw_ldu_connector_builtin[i].vdisplay > max_height)
+ bmode = &vmw_ldu_connector_builtin[i];
+ if (bmode->hdisplay > max_width ||
+ bmode->vdisplay > max_height)
+ continue;
+
+ if (!vmw_kms_validate_mode_vram(dev_priv, bmode->hdisplay * 2,
+ bmode->vdisplay))
continue;
- mode = drm_mode_duplicate(dev, &vmw_ldu_connector_builtin[i]);
+ mode = drm_mode_duplicate(dev, bmode);
if (!mode)
return 0;
mode->vrefresh = drm_mode_vrefresh(mode);
@@ -547,7 +557,7 @@ int vmw_kms_init_legacy_display_system(struct vmw_private *dev_priv)
return -EINVAL;
}
- dev_priv->ldu_priv = kmalloc(GFP_KERNEL, sizeof(*dev_priv->ldu_priv));
+ dev_priv->ldu_priv = kmalloc(sizeof(*dev_priv->ldu_priv), GFP_KERNEL);
if (!dev_priv->ldu_priv)
return -ENOMEM;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c b/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c
index df2036ed18d5..f1a52f9e7298 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c
@@ -585,7 +585,7 @@ int vmw_overlay_init(struct vmw_private *dev_priv)
return -ENOSYS;
}
- overlay = kmalloc(GFP_KERNEL, sizeof(*overlay));
+ overlay = kmalloc(sizeof(*overlay), GFP_KERNEL);
if (!overlay)
return -ENOMEM;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
index c8c40e9979db..5408b1b7996f 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
@@ -765,28 +765,11 @@ static size_t vmw_dmabuf_acc_size(struct ttm_bo_global *glob,
return bo_user_size + page_array_size;
}
-void vmw_dmabuf_gmr_unbind(struct ttm_buffer_object *bo)
-{
- struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo);
- struct ttm_bo_global *glob = bo->glob;
- struct vmw_private *dev_priv =
- container_of(bo->bdev, struct vmw_private, bdev);
-
- if (vmw_bo->gmr_bound) {
- vmw_gmr_unbind(dev_priv, vmw_bo->gmr_id);
- spin_lock(&glob->lru_lock);
- ida_remove(&dev_priv->gmr_ida, vmw_bo->gmr_id);
- spin_unlock(&glob->lru_lock);
- vmw_bo->gmr_bound = false;
- }
-}
-
void vmw_dmabuf_bo_free(struct ttm_buffer_object *bo)
{
struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo);
struct ttm_bo_global *glob = bo->glob;
- vmw_dmabuf_gmr_unbind(bo);
ttm_mem_global_free(glob->mem_glob, bo->acc_size);
kfree(vmw_bo);
}
@@ -818,10 +801,7 @@ int vmw_dmabuf_init(struct vmw_private *dev_priv,
memset(vmw_bo, 0, sizeof(*vmw_bo));
- INIT_LIST_HEAD(&vmw_bo->gmr_lru);
INIT_LIST_HEAD(&vmw_bo->validate_list);
- vmw_bo->gmr_id = 0;
- vmw_bo->gmr_bound = false;
ret = ttm_bo_init(bdev, &vmw_bo->base, size,
ttm_bo_type_device, placement,
@@ -835,7 +815,6 @@ static void vmw_user_dmabuf_destroy(struct ttm_buffer_object *bo)
struct vmw_user_dma_buffer *vmw_user_bo = vmw_user_dma_buffer(bo);
struct ttm_bo_global *glob = bo->glob;
- vmw_dmabuf_gmr_unbind(bo);
ttm_mem_global_free(glob->mem_glob, bo->acc_size);
kfree(vmw_user_bo);
}
@@ -883,7 +862,7 @@ int vmw_dmabuf_alloc_ioctl(struct drm_device *dev, void *data,
&vmw_vram_sys_placement, true,
&vmw_user_dmabuf_destroy);
if (unlikely(ret != 0))
- return ret;
+ goto out_no_dmabuf;
tmp = ttm_bo_reference(&vmw_user_bo->dma.base);
ret = ttm_base_object_init(vmw_fpriv(file_priv)->tfile,
@@ -891,19 +870,21 @@ int vmw_dmabuf_alloc_ioctl(struct drm_device *dev, void *data,
false,
ttm_buffer_type,
&vmw_user_dmabuf_release, NULL);
- if (unlikely(ret != 0)) {
- ttm_bo_unref(&tmp);
- } else {
+ if (unlikely(ret != 0))
+ goto out_no_base_object;
+ else {
rep->handle = vmw_user_bo->base.hash.key;
rep->map_handle = vmw_user_bo->dma.base.addr_space_offset;
rep->cur_gmr_id = vmw_user_bo->base.hash.key;
rep->cur_gmr_offset = 0;
}
- ttm_bo_unref(&tmp);
+out_no_base_object:
+ ttm_bo_unref(&tmp);
+out_no_dmabuf:
ttm_read_unlock(&vmaster->lock);
- return 0;
+ return ret;
}
int vmw_dmabuf_unref_ioctl(struct drm_device *dev, void *data,
@@ -938,25 +919,6 @@ void vmw_dmabuf_validate_clear(struct ttm_buffer_object *bo)
vmw_bo->on_validate_list = false;
}
-uint32_t vmw_dmabuf_gmr(struct ttm_buffer_object *bo)
-{
- struct vmw_dma_buffer *vmw_bo;
-
- if (bo->mem.mem_type == TTM_PL_VRAM)
- return SVGA_GMR_FRAMEBUFFER;
-
- vmw_bo = vmw_dma_buffer(bo);
-
- return (vmw_bo->gmr_bound) ? vmw_bo->gmr_id : SVGA_GMR_NULL;
-}
-
-void vmw_dmabuf_set_gmr(struct ttm_buffer_object *bo, uint32_t id)
-{
- struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo);
- vmw_bo->gmr_bound = true;
- vmw_bo->gmr_id = id;
-}
-
int vmw_user_dmabuf_lookup(struct ttm_object_file *tfile,
uint32_t handle, struct vmw_dma_buffer **out)
{
@@ -985,41 +947,6 @@ int vmw_user_dmabuf_lookup(struct ttm_object_file *tfile,
return 0;
}
-/**
- * TODO: Implement a gmr id eviction mechanism. Currently we just fail
- * when we're out of ids, causing GMR space to be allocated
- * out of VRAM.
- */
-
-int vmw_gmr_id_alloc(struct vmw_private *dev_priv, uint32_t *p_id)
-{
- struct ttm_bo_global *glob = dev_priv->bdev.glob;
- int id;
- int ret;
-
- do {
- if (unlikely(ida_pre_get(&dev_priv->gmr_ida, GFP_KERNEL) == 0))
- return -ENOMEM;
-
- spin_lock(&glob->lru_lock);
- ret = ida_get_new(&dev_priv->gmr_ida, &id);
- spin_unlock(&glob->lru_lock);
- } while (ret == -EAGAIN);
-
- if (unlikely(ret != 0))
- return ret;
-
- if (unlikely(id >= dev_priv->max_gmr_ids)) {
- spin_lock(&glob->lru_lock);
- ida_remove(&dev_priv->gmr_ida, id);
- spin_unlock(&glob->lru_lock);
- return -EBUSY;
- }
-
- *p_id = (uint32_t) id;
- return 0;
-}
-
/*
* Stream management
*/
diff --git a/drivers/gpu/stub/Kconfig b/drivers/gpu/stub/Kconfig
new file mode 100644
index 000000000000..0e1edd7311ff
--- /dev/null
+++ b/drivers/gpu/stub/Kconfig
@@ -0,0 +1,16 @@
+config STUB_POULSBO
+ tristate "Intel GMA500 Stub Driver"
+ depends on PCI
+ # Poulsbo stub depends on ACPI_VIDEO when ACPI is enabled
+ # but for select to work, need to select ACPI_VIDEO's dependencies, ick
+ select VIDEO_OUTPUT_CONTROL if ACPI
+ select BACKLIGHT_CLASS_DEVICE if ACPI
+ select INPUT if ACPI
+ select ACPI_VIDEO if ACPI
+ help
+ Choose this option if you have a system that has Intel GMA500
+ (Poulsbo) integrated graphics. If M is selected, the module will
+ be called Poulsbo. This driver is a stub driver for Poulsbo that
+ will call poulsbo.ko to enable the acpi backlight control sysfs
+ entry file because there have no poulsbo native driver can support
+ intel opregion.
diff --git a/drivers/gpu/stub/Makefile b/drivers/gpu/stub/Makefile
new file mode 100644
index 000000000000..cd940cc9d36d
--- /dev/null
+++ b/drivers/gpu/stub/Makefile
@@ -0,0 +1 @@
+obj-$(CONFIG_STUB_POULSBO) += poulsbo.o
diff --git a/drivers/gpu/stub/poulsbo.c b/drivers/gpu/stub/poulsbo.c
new file mode 100644
index 000000000000..7edfd27b8dee
--- /dev/null
+++ b/drivers/gpu/stub/poulsbo.c
@@ -0,0 +1,64 @@
+/*
+ * Intel Poulsbo Stub driver
+ *
+ * Copyright (C) 2010 Novell <jlee@novell.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/acpi.h>
+#include <acpi/video.h>
+
+#define DRIVER_NAME "poulsbo"
+
+enum {
+ CHIP_PSB_8108 = 0,
+ CHIP_PSB_8109 = 1,
+};
+
+static DEFINE_PCI_DEVICE_TABLE(pciidlist) = {
+ {0x8086, 0x8108, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PSB_8108}, \
+ {0x8086, 0x8109, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PSB_8109}, \
+ {0, 0, 0}
+};
+
+static int poulsbo_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+ return acpi_video_register();
+}
+
+static void poulsbo_remove(struct pci_dev *pdev)
+{
+ acpi_video_unregister();
+}
+
+static struct pci_driver poulsbo_driver = {
+ .name = DRIVER_NAME,
+ .id_table = pciidlist,
+ .probe = poulsbo_probe,
+ .remove = poulsbo_remove,
+};
+
+static int __init poulsbo_init(void)
+{
+ return pci_register_driver(&poulsbo_driver);
+}
+
+static void __exit poulsbo_exit(void)
+{
+ pci_unregister_driver(&poulsbo_driver);
+}
+
+module_init(poulsbo_init);
+module_exit(poulsbo_exit);
+
+MODULE_AUTHOR("Lee, Chun-Yi <jlee@novell.com>");
+MODULE_DESCRIPTION("Poulsbo Stub Driver");
+MODULE_LICENSE("GPL");
+
+MODULE_DEVICE_TABLE(pci, pciidlist);
diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
index d4c1906313d2..c4d47e635f95 100644
--- a/drivers/hid/hid-core.c
+++ b/drivers/hid/hid-core.c
@@ -1414,6 +1414,7 @@ static const struct hid_device_id hid_have_special_driver[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb651) },
{ HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb653) },
{ HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb654) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb65a) },
{ HID_USB_DEVICE(USB_VENDOR_ID_TOPSEED, USB_DEVICE_ID_TOPSEED_CYBERLINK) },
{ HID_USB_DEVICE(USB_VENDOR_ID_TOPSEED2, USB_DEVICE_ID_TOPSEED2_RF_COMBO) },
{ HID_USB_DEVICE(USB_VENDOR_ID_TWINHAN, USB_DEVICE_ID_TWINHAN_IR_REMOTE) },
@@ -1814,6 +1815,11 @@ static bool hid_ignore(struct hid_device *hdev)
hdev->product <= USB_DEVICE_ID_SOUNDGRAPH_IMON_LAST)
return true;
break;
+ case USB_VENDOR_ID_HANWANG:
+ if (hdev->product >= USB_DEVICE_ID_HANWANG_TABLET_FIRST &&
+ hdev->product <= USB_DEVICE_ID_HANWANG_TABLET_LAST)
+ return true;
+ break;
}
if (hdev->type == HID_TYPE_USBMOUSE &&
diff --git a/drivers/hid/hid-egalax.c b/drivers/hid/hid-egalax.c
index bbab6cff3518..72b1dd8d16ce 100644
--- a/drivers/hid/hid-egalax.c
+++ b/drivers/hid/hid-egalax.c
@@ -221,7 +221,7 @@ static int egalax_probe(struct hid_device *hdev, const struct hid_device_id *id)
struct egalax_data *td;
struct hid_report *report;
- td = kmalloc(sizeof(struct egalax_data), GFP_KERNEL);
+ td = kzalloc(sizeof(struct egalax_data), GFP_KERNEL);
if (!td) {
hid_err(hdev, "cannot allocate eGalax data\n");
return -ENOMEM;
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
index d91bb12f9c54..5cd1a6a356a3 100644
--- a/drivers/hid/hid-ids.h
+++ b/drivers/hid/hid-ids.h
@@ -309,6 +309,10 @@
#define USB_DEVICE_ID_GYRATION_REMOTE_2 0x0003
#define USB_DEVICE_ID_GYRATION_REMOTE_3 0x0008
+#define USB_VENDOR_ID_HANWANG 0x0b57
+#define USB_DEVICE_ID_HANWANG_TABLET_FIRST 0x5000
+#define USB_DEVICE_ID_HANWANG_TABLET_LAST 0x8fff
+
#define USB_VENDOR_ID_HAPP 0x078b
#define USB_DEVICE_ID_UGCI_DRIVING 0x0010
#define USB_DEVICE_ID_UGCI_FLYING 0x0020
diff --git a/drivers/hid/hid-input.c b/drivers/hid/hid-input.c
index a1a2206714fc..c0757821b1fc 100644
--- a/drivers/hid/hid-input.c
+++ b/drivers/hid/hid-input.c
@@ -68,39 +68,52 @@ static const struct {
#define map_key_clear(c) hid_map_usage_clear(hidinput, usage, &bit, \
&max, EV_KEY, (c))
-static inline int match_scancode(unsigned int code, unsigned int scancode)
+static bool match_scancode(struct hid_usage *usage,
+ unsigned int cur_idx, unsigned int scancode)
{
- if (scancode == 0)
- return 1;
-
- return (code & (HID_USAGE_PAGE | HID_USAGE)) == scancode;
+ return (usage->hid & (HID_USAGE_PAGE | HID_USAGE)) == scancode;
}
-static inline int match_keycode(unsigned int code, unsigned int keycode)
+static bool match_keycode(struct hid_usage *usage,
+ unsigned int cur_idx, unsigned int keycode)
{
- if (keycode == 0)
- return 1;
+ /*
+ * We should exclude unmapped usages when doing lookup by keycode.
+ */
+ return (usage->type == EV_KEY && usage->code == keycode);
+}
- return code == keycode;
+static bool match_index(struct hid_usage *usage,
+ unsigned int cur_idx, unsigned int idx)
+{
+ return cur_idx == idx;
}
+typedef bool (*hid_usage_cmp_t)(struct hid_usage *usage,
+ unsigned int cur_idx, unsigned int val);
+
static struct hid_usage *hidinput_find_key(struct hid_device *hid,
- unsigned int scancode,
- unsigned int keycode)
+ hid_usage_cmp_t match,
+ unsigned int value,
+ unsigned int *usage_idx)
{
- int i, j, k;
+ unsigned int i, j, k, cur_idx = 0;
struct hid_report *report;
struct hid_usage *usage;
for (k = HID_INPUT_REPORT; k <= HID_OUTPUT_REPORT; k++) {
list_for_each_entry(report, &hid->report_enum[k].report_list, list) {
for (i = 0; i < report->maxfield; i++) {
- for ( j = 0; j < report->field[i]->maxusage; j++) {
+ for (j = 0; j < report->field[i]->maxusage; j++) {
usage = report->field[i]->usage + j;
- if (usage->type == EV_KEY &&
- match_scancode(usage->hid, scancode) &&
- match_keycode(usage->code, keycode))
- return usage;
+ if (usage->type == EV_KEY || usage->type == 0) {
+ if (match(usage, cur_idx, value)) {
+ if (usage_idx)
+ *usage_idx = cur_idx;
+ return usage;
+ }
+ cur_idx++;
+ }
}
}
}
@@ -108,39 +121,68 @@ static struct hid_usage *hidinput_find_key(struct hid_device *hid,
return NULL;
}
+static struct hid_usage *hidinput_locate_usage(struct hid_device *hid,
+ const struct input_keymap_entry *ke,
+ unsigned int *index)
+{
+ struct hid_usage *usage;
+ unsigned int scancode;
+
+ if (ke->flags & INPUT_KEYMAP_BY_INDEX)
+ usage = hidinput_find_key(hid, match_index, ke->index, index);
+ else if (input_scancode_to_scalar(ke, &scancode) == 0)
+ usage = hidinput_find_key(hid, match_scancode, scancode, index);
+ else
+ usage = NULL;
+
+ return usage;
+}
+
static int hidinput_getkeycode(struct input_dev *dev,
- unsigned int scancode, unsigned int *keycode)
+ struct input_keymap_entry *ke)
{
struct hid_device *hid = input_get_drvdata(dev);
struct hid_usage *usage;
+ unsigned int scancode, index;
- usage = hidinput_find_key(hid, scancode, 0);
+ usage = hidinput_locate_usage(hid, ke, &index);
if (usage) {
- *keycode = usage->code;
+ ke->keycode = usage->type == EV_KEY ?
+ usage->code : KEY_RESERVED;
+ ke->index = index;
+ scancode = usage->hid & (HID_USAGE_PAGE | HID_USAGE);
+ ke->len = sizeof(scancode);
+ memcpy(ke->scancode, &scancode, sizeof(scancode));
return 0;
}
+
return -EINVAL;
}
static int hidinput_setkeycode(struct input_dev *dev,
- unsigned int scancode, unsigned int keycode)
+ const struct input_keymap_entry *ke,
+ unsigned int *old_keycode)
{
struct hid_device *hid = input_get_drvdata(dev);
struct hid_usage *usage;
- int old_keycode;
- usage = hidinput_find_key(hid, scancode, 0);
+ usage = hidinput_locate_usage(hid, ke, NULL);
if (usage) {
- old_keycode = usage->code;
- usage->code = keycode;
+ *old_keycode = usage->type == EV_KEY ?
+ usage->code : KEY_RESERVED;
+ usage->code = ke->keycode;
- clear_bit(old_keycode, dev->keybit);
+ clear_bit(*old_keycode, dev->keybit);
set_bit(usage->code, dev->keybit);
- dbg_hid(KERN_DEBUG "Assigned keycode %d to HID usage code %x\n", keycode, scancode);
- /* Set the keybit for the old keycode if the old keycode is used
- * by another key */
- if (hidinput_find_key (hid, 0, old_keycode))
- set_bit(old_keycode, dev->keybit);
+ dbg_hid("Assigned keycode %d to HID usage code %x\n",
+ usage->code, usage->hid);
+
+ /*
+ * Set the keybit for the old keycode if the old keycode is used
+ * by another key
+ */
+ if (hidinput_find_key(hid, match_keycode, *old_keycode, NULL))
+ set_bit(*old_keycode, dev->keybit);
return 0;
}
@@ -161,8 +203,8 @@ static int hidinput_setkeycode(struct input_dev *dev,
*
* as seen in the HID specification v1.11 6.2.2.7 Global Items.
*
- * Only exponent 1 length units are processed. Centimeters are converted to
- * inches. Degrees are converted to radians.
+ * Only exponent 1 length units are processed. Centimeters and inches are
+ * converted to millimeters. Degrees are converted to radians.
*/
static __s32 hidinput_calc_abs_res(const struct hid_field *field, __u16 code)
{
@@ -183,13 +225,16 @@ static __s32 hidinput_calc_abs_res(const struct hid_field *field, __u16 code)
*/
if (code == ABS_X || code == ABS_Y || code == ABS_Z) {
if (field->unit == 0x11) { /* If centimeters */
- /* Convert to inches */
- prev = logical_extents;
- logical_extents *= 254;
- if (logical_extents < prev)
+ /* Convert to millimeters */
+ unit_exponent += 1;
+ } else if (field->unit == 0x13) { /* If inches */
+ /* Convert to millimeters */
+ prev = physical_extents;
+ physical_extents *= 254;
+ if (physical_extents < prev)
return 0;
- unit_exponent += 2;
- } else if (field->unit != 0x13) { /* If not inches */
+ unit_exponent -= 1;
+ } else {
return 0;
}
} else if (code == ABS_RX || code == ABS_RY || code == ABS_RZ) {
@@ -835,8 +880,8 @@ int hidinput_connect(struct hid_device *hid, unsigned int force)
hid->ll_driver->hidinput_input_event;
input_dev->open = hidinput_open;
input_dev->close = hidinput_close;
- input_dev->setkeycode = hidinput_setkeycode;
- input_dev->getkeycode = hidinput_getkeycode;
+ input_dev->setkeycode_new = hidinput_setkeycode;
+ input_dev->getkeycode_new = hidinput_getkeycode;
input_dev->name = hid->name;
input_dev->phys = hid->phys;
diff --git a/drivers/hid/hid-tmff.c b/drivers/hid/hid-tmff.c
index 356a98fcb365..575862b0688e 100644
--- a/drivers/hid/hid-tmff.c
+++ b/drivers/hid/hid-tmff.c
@@ -249,6 +249,8 @@ static const struct hid_device_id tm_devices[] = {
.driver_data = (unsigned long)ff_joystick },
{ HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb654), /* FGT Force Feedback Wheel */
.driver_data = (unsigned long)ff_joystick },
+ { HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb65a), /* F430 Force Feedback Wheel */
+ .driver_data = (unsigned long)ff_joystick },
{ }
};
MODULE_DEVICE_TABLE(hid, tm_devices);
diff --git a/drivers/hid/hidraw.c b/drivers/hid/hidraw.c
index eb16cd143e2a..68d087f63c02 100644
--- a/drivers/hid/hidraw.c
+++ b/drivers/hid/hidraw.c
@@ -34,7 +34,6 @@
#include <linux/hid.h>
#include <linux/mutex.h>
#include <linux/sched.h>
-#include <linux/smp_lock.h>
#include <linux/hidraw.h>
diff --git a/drivers/hid/usbhid/hiddev.c b/drivers/hid/usbhid/hiddev.c
index fb78f75d49a9..af0a7c1002af 100644
--- a/drivers/hid/usbhid/hiddev.c
+++ b/drivers/hid/usbhid/hiddev.c
@@ -29,7 +29,6 @@
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/init.h>
-#include <linux/smp_lock.h>
#include <linux/input.h>
#include <linux/usb.h>
#include <linux/hid.h>
diff --git a/drivers/hwmon/Kconfig b/drivers/hwmon/Kconfig
index 97499d00615a..a56f6adf3b76 100644
--- a/drivers/hwmon/Kconfig
+++ b/drivers/hwmon/Kconfig
@@ -129,7 +129,7 @@ config SENSORS_ADM1025
config SENSORS_ADM1026
tristate "Analog Devices ADM1026 and compatibles"
- depends on I2C && EXPERIMENTAL
+ depends on I2C
select HWMON_VID
help
If you say yes here you get support for Analog Devices ADM1026
@@ -140,7 +140,7 @@ config SENSORS_ADM1026
config SENSORS_ADM1029
tristate "Analog Devices ADM1029"
- depends on I2C && EXPERIMENTAL
+ depends on I2C
help
If you say yes here you get support for Analog Devices ADM1029
sensor chip.
@@ -151,7 +151,7 @@ config SENSORS_ADM1029
config SENSORS_ADM1031
tristate "Analog Devices ADM1031 and compatibles"
- depends on I2C && EXPERIMENTAL
+ depends on I2C
help
If you say yes here you get support for Analog Devices ADM1031
and ADM1030 sensor chips.
@@ -202,7 +202,7 @@ config SENSORS_ADT7470
config SENSORS_ADT7475
tristate "Analog Devices ADT7473, ADT7475, ADT7476 and ADT7490"
- depends on I2C && EXPERIMENTAL
+ depends on I2C
select HWMON_VID
help
If you say yes here you get support for the Analog Devices
@@ -249,32 +249,6 @@ config SENSORS_K10TEMP
This driver can also be built as a module. If so, the module
will be called k10temp.
-config SENSORS_AMS
- tristate "Apple Motion Sensor driver"
- depends on PPC_PMAC && !PPC64 && INPUT && ((ADB_PMU && I2C = y) || (ADB_PMU && !I2C) || I2C) && EXPERIMENTAL
- select INPUT_POLLDEV
- help
- Support for the motion sensor included in PowerBooks. Includes
- implementations for PMU and I2C.
-
- This driver can also be built as a module. If so, the module
- will be called ams.
-
-config SENSORS_AMS_PMU
- bool "PMU variant"
- depends on SENSORS_AMS && ADB_PMU
- default y
- help
- PMU variant of motion sensor, found in late 2005 PowerBooks.
-
-config SENSORS_AMS_I2C
- bool "I2C variant"
- depends on SENSORS_AMS && I2C
- default y
- help
- I2C variant of motion sensor, found in early 2005 PowerBooks and
- iBooks.
-
config SENSORS_ASB100
tristate "Asus ASB100 Bach"
depends on X86 && I2C && EXPERIMENTAL
@@ -322,7 +296,6 @@ config SENSORS_I5K_AMB
config SENSORS_F71805F
tristate "Fintek F71805F/FG, F71806F/FG and F71872F/FG"
- depends on EXPERIMENTAL
help
If you say yes here you get support for hardware monitoring
features of the Fintek F71805F/FG, F71806F/FG and F71872F/FG
@@ -333,7 +306,6 @@ config SENSORS_F71805F
config SENSORS_F71882FG
tristate "Fintek F71858FG, F71862FG, F71882FG, F71889FG and F8000"
- depends on EXPERIMENTAL
help
If you say yes here you get support for hardware monitoring
features of the Fintek F71858FG, F71862FG/71863FG, F71882FG/F71883FG,
@@ -343,8 +315,8 @@ config SENSORS_F71882FG
will be called f71882fg.
config SENSORS_F75375S
- tristate "Fintek F75375S/SP and F75373";
- depends on I2C && EXPERIMENTAL
+ tristate "Fintek F75375S/SP and F75373"
+ depends on I2C
help
If you say yes here you get support for hardware monitoring
features of the Fintek F75375S/SP and F75373
@@ -399,6 +371,15 @@ config SENSORS_GL520SM
This driver can also be built as a module. If so, the module
will be called gl520sm.
+config SENSORS_GPIO_FAN
+ tristate "GPIO fan"
+ depends on GENERIC_GPIO
+ help
+ If you say yes here you get support for fans connected to GPIO lines.
+
+ This driver can also be built as a module. If so, the module
+ will be called gpio-fan.
+
config SENSORS_CORETEMP
tristate "Intel Core/Core2/Atom temperature sensor"
depends on X86 && PCI && EXPERIMENTAL
@@ -447,8 +428,8 @@ config SENSORS_IT87
select HWMON_VID
help
If you say yes here you get support for ITE IT8705F, IT8712F,
- IT8716F, IT8718F, IT8720F and IT8726F sensor chips, and the
- SiS960 clone.
+ IT8716F, IT8718F, IT8720F, IT8721F, IT8726F and IT8758E sensor
+ chips, and the SiS960 clone.
This driver can also be built as a module. If so, the module
will be called it87.
@@ -490,7 +471,7 @@ config SENSORS_LM63
config SENSORS_LM70
tristate "National Semiconductor LM70 / Texas Instruments TMP121"
- depends on SPI_MASTER && EXPERIMENTAL
+ depends on SPI_MASTER
help
If you say yes here you get support for the National Semiconductor
LM70 and Texas Instruments TMP121/TMP123 digital temperature
@@ -558,7 +539,7 @@ config SENSORS_LM78
config SENSORS_LM80
tristate "National Semiconductor LM80"
- depends on I2C && EXPERIMENTAL
+ depends on I2C
help
If you say yes here you get support for National Semiconductor
LM80 sensor chips.
@@ -578,11 +559,12 @@ config SENSORS_LM83
config SENSORS_LM85
tristate "National Semiconductor LM85 and compatibles"
- depends on I2C && EXPERIMENTAL
+ depends on I2C
select HWMON_VID
help
If you say yes here you get support for National Semiconductor LM85
- sensor chips and clones: ADT7463, EMC6D100, EMC6D102 and ADM1027.
+ sensor chips and clones: ADM1027, ADT7463, ADT7468, EMC6D100,
+ EMC6D101 and EMC6D102.
This driver can also be built as a module. If so, the module
will be called lm85.
@@ -605,8 +587,8 @@ config SENSORS_LM90
If you say yes here you get support for National Semiconductor LM90,
LM86, LM89 and LM99, Analog Devices ADM1032 and ADT7461, Maxim
MAX6646, MAX6647, MAX6648, MAX6649, MAX6657, MAX6658, MAX6659,
- MAX6680, MAX6681 and MAX6692, and Winbond/Nuvoton W83L771AWG/ASG
- sensor chips.
+ MAX6680, MAX6681, MAX6692, MAX6695, MAX6696, and Winbond/Nuvoton
+ W83L771W/G/AWG/ASG sensor chips.
This driver can also be built as a module. If so, the module
will be called lm90.
@@ -654,6 +636,17 @@ config SENSORS_LTC4245
This driver can also be built as a module. If so, the module will
be called ltc4245.
+config SENSORS_LTC4261
+ tristate "Linear Technology LTC4261"
+ depends on I2C && EXPERIMENTAL
+ default n
+ help
+ If you say yes here you get support for Linear Technology LTC4261
+ Negative Voltage Hot Swap Controller I2C interface.
+
+ This driver can also be built as a module. If so, the module will
+ be called ltc4261.
+
config SENSORS_LM95241
tristate "National Semiconductor LM95241 sensor chip"
depends on I2C
@@ -706,7 +699,6 @@ config SENSORS_PC87360
config SENSORS_PC87427
tristate "National Semiconductor PC87427"
- depends on EXPERIMENTAL
help
If you say yes here you get access to the hardware monitoring
functions of the National Semiconductor PC87427 Super-I/O chip.
@@ -743,14 +735,14 @@ config SENSORS_SHT15
will be called sht15.
config SENSORS_S3C
- tristate "S3C24XX/S3C64XX Inbuilt ADC"
- depends on ARCH_S3C2410
+ tristate "Samsung built-in ADC"
+ depends on S3C_ADC
help
If you say yes here you get support for the on-board ADCs of
- the Samsung S3C24XX or S3C64XX series of SoC
+ the Samsung S3C24XX, S3C64XX and other series of SoC
This driver can also be built as a module. If so, the module
- will be called s3c-hwmo.
+ will be called s3c-hwmon.
config SENSORS_S3C_RAW
bool "Include raw channel attributes in sysfs"
@@ -834,7 +826,7 @@ config SENSORS_SMSC47M1
config SENSORS_SMSC47M192
tristate "SMSC LPC47M192 and compatibles"
- depends on I2C && EXPERIMENTAL
+ depends on I2C
select HWMON_VID
help
If you say yes here you get support for the temperature and
@@ -890,7 +882,7 @@ config SENSORS_AMC6821
config SENSORS_THMC50
tristate "Texas Instruments THMC50 / Analog Devices ADM1022"
- depends on I2C && EXPERIMENTAL
+ depends on I2C
help
If you say yes here you get support for Texas Instruments THMC50
sensor chips and clones: the Analog Devices ADM1022.
@@ -948,7 +940,6 @@ config SENSORS_VIA686A
config SENSORS_VT1211
tristate "VIA VT1211"
- depends on EXPERIMENTAL
select HWMON_VID
help
If you say yes here then you get support for hardware monitoring
@@ -992,7 +983,7 @@ config SENSORS_W83791D
config SENSORS_W83792D
tristate "Winbond W83792D"
- depends on I2C && EXPERIMENTAL
+ depends on I2C
help
If you say yes here you get support for the Winbond W83792D chip.
@@ -1011,6 +1002,33 @@ config SENSORS_W83793
This driver can also be built as a module. If so, the module
will be called w83793.
+config SENSORS_W83795
+ tristate "Winbond/Nuvoton W83795G/ADG"
+ depends on I2C && EXPERIMENTAL
+ help
+ If you say yes here you get support for the Winbond W83795G and
+ W83795ADG hardware monitoring chip.
+
+ This driver can also be built as a module. If so, the module
+ will be called w83795.
+
+config SENSORS_W83795_FANCTRL
+ boolean "Include fan control support (DANGEROUS)"
+ depends on SENSORS_W83795 && EXPERIMENTAL
+ default n
+ help
+ If you say yes here, support for the both manual and automatic
+ fan control features will be included in the driver.
+
+ This part of the code wasn't carefully reviewed and tested yet,
+ so enabling this option is strongly discouraged on production
+ servers. Only developers and testers should enable it for the
+ time being.
+
+ Please also note that this option will create sysfs attribute
+ files which may change in the future, so you shouldn't rely
+ on them being stable.
+
config SENSORS_W83L785TS
tristate "Winbond W83L785TS-S"
depends on I2C && EXPERIMENTAL
@@ -1088,26 +1106,6 @@ config SENSORS_ULTRA45
This driver provides support for the Ultra45 workstation environmental
sensors.
-config SENSORS_HDAPS
- tristate "IBM Hard Drive Active Protection System (hdaps)"
- depends on INPUT && X86
- select INPUT_POLLDEV
- default n
- help
- This driver provides support for the IBM Hard Drive Active Protection
- System (hdaps), which provides an accelerometer and other misc. data.
- ThinkPads starting with the R50, T41, and X40 are supported. The
- accelerometer data is readable via sysfs.
-
- This driver also provides an absolute input class device, allowing
- the laptop to act as a pinball machine-esque joystick.
-
- If your ThinkPad is not recognized by the driver, please update to latest
- BIOS. This is especially the case for some R52 ThinkPads.
-
- Say Y here if you have an applicable laptop and want to experience
- the awesome power of hdaps.
-
config SENSORS_LIS3_SPI
tristate "STMicroeletronics LIS3LV02Dx three-axis digital accelerometer (SPI)"
depends on !ACPI && SPI_MASTER && INPUT
diff --git a/drivers/hwmon/Makefile b/drivers/hwmon/Makefile
index e3c2484f6c5f..2479b3da272c 100644
--- a/drivers/hwmon/Makefile
+++ b/drivers/hwmon/Makefile
@@ -14,6 +14,7 @@ obj-$(CONFIG_SENSORS_ASB100) += asb100.o
obj-$(CONFIG_SENSORS_W83627HF) += w83627hf.o
obj-$(CONFIG_SENSORS_W83792D) += w83792d.o
obj-$(CONFIG_SENSORS_W83793) += w83793.o
+obj-$(CONFIG_SENSORS_W83795) += w83795.o
obj-$(CONFIG_SENSORS_W83781D) += w83781d.o
obj-$(CONFIG_SENSORS_W83791D) += w83791d.o
@@ -35,7 +36,6 @@ obj-$(CONFIG_SENSORS_ADT7462) += adt7462.o
obj-$(CONFIG_SENSORS_ADT7470) += adt7470.o
obj-$(CONFIG_SENSORS_ADT7475) += adt7475.o
obj-$(CONFIG_SENSORS_APPLESMC) += applesmc.o
-obj-$(CONFIG_SENSORS_AMS) += ams/
obj-$(CONFIG_SENSORS_ASC7621) += asc7621.o
obj-$(CONFIG_SENSORS_ATXP1) += atxp1.o
obj-$(CONFIG_SENSORS_CORETEMP) += coretemp.o
@@ -51,8 +51,8 @@ obj-$(CONFIG_SENSORS_FSCHMD) += fschmd.o
obj-$(CONFIG_SENSORS_G760A) += g760a.o
obj-$(CONFIG_SENSORS_GL518SM) += gl518sm.o
obj-$(CONFIG_SENSORS_GL520SM) += gl520sm.o
+obj-$(CONFIG_SENSORS_GPIO_FAN) += gpio-fan.o
obj-$(CONFIG_SENSORS_ULTRA45) += ultra45_env.o
-obj-$(CONFIG_SENSORS_HDAPS) += hdaps.o
obj-$(CONFIG_SENSORS_I5K_AMB) += i5k_amb.o
obj-$(CONFIG_SENSORS_IBMAEM) += ibmaem.o
obj-$(CONFIG_SENSORS_IBMPEX) += ibmpex.o
@@ -80,6 +80,7 @@ obj-$(CONFIG_SENSORS_LM93) += lm93.o
obj-$(CONFIG_SENSORS_LM95241) += lm95241.o
obj-$(CONFIG_SENSORS_LTC4215) += ltc4215.o
obj-$(CONFIG_SENSORS_LTC4245) += ltc4245.o
+obj-$(CONFIG_SENSORS_LTC4261) += ltc4261.o
obj-$(CONFIG_SENSORS_MAX1111) += max1111.o
obj-$(CONFIG_SENSORS_MAX1619) += max1619.o
obj-$(CONFIG_SENSORS_MAX6650) += max6650.o
diff --git a/drivers/hwmon/ad7414.c b/drivers/hwmon/ad7414.c
index 1e4c21fc1a89..86d822aa9bbf 100644
--- a/drivers/hwmon/ad7414.c
+++ b/drivers/hwmon/ad7414.c
@@ -178,11 +178,13 @@ static int ad7414_probe(struct i2c_client *client,
{
struct ad7414_data *data;
int conf;
- int err = 0;
+ int err;
if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_BYTE_DATA |
- I2C_FUNC_SMBUS_READ_WORD_DATA))
+ I2C_FUNC_SMBUS_READ_WORD_DATA)) {
+ err = -EOPNOTSUPP;
goto exit;
+ }
data = kzalloc(sizeof(struct ad7414_data), GFP_KERNEL);
if (!data) {
diff --git a/drivers/hwmon/adt7470.c b/drivers/hwmon/adt7470.c
index 9e775717abb7..87d92a56a939 100644
--- a/drivers/hwmon/adt7470.c
+++ b/drivers/hwmon/adt7470.c
@@ -1286,8 +1286,10 @@ static int adt7470_probe(struct i2c_client *client,
init_completion(&data->auto_update_stop);
data->auto_update = kthread_run(adt7470_update_thread, client,
dev_name(data->hwmon_dev));
- if (IS_ERR(data->auto_update))
+ if (IS_ERR(data->auto_update)) {
+ err = PTR_ERR(data->auto_update);
goto exit_unregister;
+ }
return 0;
diff --git a/drivers/hwmon/adt7475.c b/drivers/hwmon/adt7475.c
index a0c385145686..b5fcd87931cb 100644
--- a/drivers/hwmon/adt7475.c
+++ b/drivers/hwmon/adt7475.c
@@ -146,7 +146,7 @@
#define TEMP_OFFSET_REG(idx) (REG_TEMP_OFFSET_BASE + (idx))
#define TEMP_TRANGE_REG(idx) (REG_TEMP_TRANGE_BASE + (idx))
-static unsigned short normal_i2c[] = { 0x2c, 0x2d, 0x2e, I2C_CLIENT_END };
+static const unsigned short normal_i2c[] = { 0x2c, 0x2d, 0x2e, I2C_CLIENT_END };
enum chips { adt7473, adt7475, adt7476, adt7490 };
diff --git a/drivers/hwmon/amc6821.c b/drivers/hwmon/amc6821.c
index fa9708c2d723..4033974d1bb3 100644
--- a/drivers/hwmon/amc6821.c
+++ b/drivers/hwmon/amc6821.c
@@ -4,7 +4,7 @@
Copyright (C) 2009 T. Mertelj <tomaz.mertelj@guest.arnes.si>
Based on max6650.c:
- Copyright (C) 2007 Hans J. Koch <hjk@linutronix.de>
+ Copyright (C) 2007 Hans J. Koch <hjk@hansjkoch.de>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
diff --git a/drivers/hwmon/asc7621.c b/drivers/hwmon/asc7621.c
index 89b4f3babe87..d2596cec18b5 100644
--- a/drivers/hwmon/asc7621.c
+++ b/drivers/hwmon/asc7621.c
@@ -28,7 +28,7 @@
#include <linux/mutex.h>
/* Addresses to scan */
-static unsigned short normal_i2c[] = {
+static const unsigned short normal_i2c[] = {
0x2c, 0x2d, 0x2e, I2C_CLIENT_END
};
@@ -52,7 +52,7 @@ struct asc7621_chip {
u8 company_id;
u8 verstep_reg;
u8 verstep_id;
- unsigned short *addresses;
+ const unsigned short *addresses;
};
static struct asc7621_chip asc7621_chips[] = {
diff --git a/drivers/hwmon/coretemp.c b/drivers/hwmon/coretemp.c
index a23b17a78ace..42de98d73ff5 100644
--- a/drivers/hwmon/coretemp.c
+++ b/drivers/hwmon/coretemp.c
@@ -21,7 +21,6 @@
*/
#include <linux/module.h>
-#include <linux/delay.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/jiffies.h>
@@ -280,11 +279,9 @@ static int __devinit get_tjmax(struct cpuinfo_x86 *c, u32 id,
case 0x1a:
dev_warn(dev, "TjMax is assumed as 100 C!\n");
return 100000;
- break;
case 0x17:
case 0x1c: /* Atom CPUs */
return adjust_tjmax(c, id, dev);
- break;
default:
dev_warn(dev, "CPU (model=0x%x) is not supported yet,"
" using default TjMax of 100C.\n", c->x86_model);
@@ -292,6 +289,15 @@ static int __devinit get_tjmax(struct cpuinfo_x86 *c, u32 id,
}
}
+static void __devinit get_ucode_rev_on_cpu(void *edx)
+{
+ u32 eax;
+
+ wrmsr(MSR_IA32_UCODE_REV, 0, 0);
+ sync_core();
+ rdmsr(MSR_IA32_UCODE_REV, eax, *(u32 *)edx);
+}
+
static int __devinit coretemp_probe(struct platform_device *pdev)
{
struct coretemp_data *data;
@@ -327,8 +333,15 @@ static int __devinit coretemp_probe(struct platform_device *pdev)
if ((c->x86_model == 0xe) && (c->x86_mask < 0xc)) {
/* check for microcode update */
- rdmsr_on_cpu(data->id, MSR_IA32_UCODE_REV, &eax, &edx);
- if (edx < 0x39) {
+ err = smp_call_function_single(data->id, get_ucode_rev_on_cpu,
+ &edx, 1);
+ if (err) {
+ dev_err(&pdev->dev,
+ "Cannot determine microcode revision of "
+ "CPU#%u (%d)!\n", data->id, err);
+ err = -ENODEV;
+ goto exit_free;
+ } else if (edx < 0x39) {
err = -ENODEV;
dev_err(&pdev->dev,
"Errata AE18 not fixed, update BIOS or "
@@ -490,7 +503,7 @@ exit:
return err;
}
-static void coretemp_device_remove(unsigned int cpu)
+static void __cpuinit coretemp_device_remove(unsigned int cpu)
{
struct pdev_entry *p;
unsigned int i;
@@ -569,9 +582,8 @@ exit:
static void __exit coretemp_exit(void)
{
struct pdev_entry *p, *n;
-#ifdef CONFIG_HOTPLUG_CPU
+
unregister_hotcpu_notifier(&coretemp_cpu_notifier);
-#endif
mutex_lock(&pdev_list_mutex);
list_for_each_entry_safe(p, n, &pdev_list, list) {
platform_device_unregister(p->pdev);
diff --git a/drivers/hwmon/gpio-fan.c b/drivers/hwmon/gpio-fan.c
new file mode 100644
index 000000000000..f141a1de519c
--- /dev/null
+++ b/drivers/hwmon/gpio-fan.c
@@ -0,0 +1,558 @@
+/*
+ * gpio-fan.c - Hwmon driver for fans connected to GPIO lines.
+ *
+ * Copyright (C) 2010 LaCie
+ *
+ * Author: Simon Guinot <sguinot@lacie.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/platform_device.h>
+#include <linux/err.h>
+#include <linux/mutex.h>
+#include <linux/hwmon.h>
+#include <linux/gpio.h>
+#include <linux/gpio-fan.h>
+
+struct gpio_fan_data {
+ struct platform_device *pdev;
+ struct device *hwmon_dev;
+ struct mutex lock; /* lock GPIOs operations. */
+ int num_ctrl;
+ unsigned *ctrl;
+ int num_speed;
+ struct gpio_fan_speed *speed;
+ int speed_index;
+#ifdef CONFIG_PM
+ int resume_speed;
+#endif
+ bool pwm_enable;
+ struct gpio_fan_alarm *alarm;
+ struct work_struct alarm_work;
+};
+
+/*
+ * Alarm GPIO.
+ */
+
+static void fan_alarm_notify(struct work_struct *ws)
+{
+ struct gpio_fan_data *fan_data =
+ container_of(ws, struct gpio_fan_data, alarm_work);
+
+ sysfs_notify(&fan_data->pdev->dev.kobj, NULL, "fan1_alarm");
+ kobject_uevent(&fan_data->pdev->dev.kobj, KOBJ_CHANGE);
+}
+
+static irqreturn_t fan_alarm_irq_handler(int irq, void *dev_id)
+{
+ struct gpio_fan_data *fan_data = dev_id;
+
+ schedule_work(&fan_data->alarm_work);
+
+ return IRQ_NONE;
+}
+
+static ssize_t show_fan_alarm(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct gpio_fan_data *fan_data = dev_get_drvdata(dev);
+ struct gpio_fan_alarm *alarm = fan_data->alarm;
+ int value = gpio_get_value(alarm->gpio);
+
+ if (alarm->active_low)
+ value = !value;
+
+ return sprintf(buf, "%d\n", value);
+}
+
+static DEVICE_ATTR(fan1_alarm, S_IRUGO, show_fan_alarm, NULL);
+
+static int fan_alarm_init(struct gpio_fan_data *fan_data,
+ struct gpio_fan_alarm *alarm)
+{
+ int err;
+ int alarm_irq;
+ struct platform_device *pdev = fan_data->pdev;
+
+ fan_data->alarm = alarm;
+
+ err = gpio_request(alarm->gpio, "GPIO fan alarm");
+ if (err)
+ return err;
+
+ err = gpio_direction_input(alarm->gpio);
+ if (err)
+ goto err_free_gpio;
+
+ err = device_create_file(&pdev->dev, &dev_attr_fan1_alarm);
+ if (err)
+ goto err_free_gpio;
+
+ /*
+ * If the alarm GPIO don't support interrupts, just leave
+ * without initializing the fail notification support.
+ */
+ alarm_irq = gpio_to_irq(alarm->gpio);
+ if (alarm_irq < 0)
+ return 0;
+
+ INIT_WORK(&fan_data->alarm_work, fan_alarm_notify);
+ set_irq_type(alarm_irq, IRQ_TYPE_EDGE_BOTH);
+ err = request_irq(alarm_irq, fan_alarm_irq_handler, IRQF_SHARED,
+ "GPIO fan alarm", fan_data);
+ if (err)
+ goto err_free_sysfs;
+
+ return 0;
+
+err_free_sysfs:
+ device_remove_file(&pdev->dev, &dev_attr_fan1_alarm);
+err_free_gpio:
+ gpio_free(alarm->gpio);
+
+ return err;
+}
+
+static void fan_alarm_free(struct gpio_fan_data *fan_data)
+{
+ struct platform_device *pdev = fan_data->pdev;
+ int alarm_irq = gpio_to_irq(fan_data->alarm->gpio);
+
+ if (alarm_irq >= 0)
+ free_irq(alarm_irq, fan_data);
+ device_remove_file(&pdev->dev, &dev_attr_fan1_alarm);
+ gpio_free(fan_data->alarm->gpio);
+}
+
+/*
+ * Control GPIOs.
+ */
+
+/* Must be called with fan_data->lock held, except during initialization. */
+static void __set_fan_ctrl(struct gpio_fan_data *fan_data, int ctrl_val)
+{
+ int i;
+
+ for (i = 0; i < fan_data->num_ctrl; i++)
+ gpio_set_value(fan_data->ctrl[i], (ctrl_val >> i) & 1);
+}
+
+static int __get_fan_ctrl(struct gpio_fan_data *fan_data)
+{
+ int i;
+ int ctrl_val = 0;
+
+ for (i = 0; i < fan_data->num_ctrl; i++) {
+ int value;
+
+ value = gpio_get_value(fan_data->ctrl[i]);
+ ctrl_val |= (value << i);
+ }
+ return ctrl_val;
+}
+
+/* Must be called with fan_data->lock held, except during initialization. */
+static void set_fan_speed(struct gpio_fan_data *fan_data, int speed_index)
+{
+ if (fan_data->speed_index == speed_index)
+ return;
+
+ __set_fan_ctrl(fan_data, fan_data->speed[speed_index].ctrl_val);
+ fan_data->speed_index = speed_index;
+}
+
+static int get_fan_speed_index(struct gpio_fan_data *fan_data)
+{
+ int ctrl_val = __get_fan_ctrl(fan_data);
+ int i;
+
+ for (i = 0; i < fan_data->num_speed; i++)
+ if (fan_data->speed[i].ctrl_val == ctrl_val)
+ return i;
+
+ dev_warn(&fan_data->pdev->dev,
+ "missing speed array entry for GPIO value 0x%x\n", ctrl_val);
+
+ return -EINVAL;
+}
+
+static int rpm_to_speed_index(struct gpio_fan_data *fan_data, int rpm)
+{
+ struct gpio_fan_speed *speed = fan_data->speed;
+ int i;
+
+ for (i = 0; i < fan_data->num_speed; i++)
+ if (speed[i].rpm >= rpm)
+ return i;
+
+ return fan_data->num_speed - 1;
+}
+
+static ssize_t show_pwm(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct gpio_fan_data *fan_data = dev_get_drvdata(dev);
+ u8 pwm = fan_data->speed_index * 255 / (fan_data->num_speed - 1);
+
+ return sprintf(buf, "%d\n", pwm);
+}
+
+static ssize_t set_pwm(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct gpio_fan_data *fan_data = dev_get_drvdata(dev);
+ unsigned long pwm;
+ int speed_index;
+ int ret = count;
+
+ if (strict_strtoul(buf, 10, &pwm) || pwm > 255)
+ return -EINVAL;
+
+ mutex_lock(&fan_data->lock);
+
+ if (!fan_data->pwm_enable) {
+ ret = -EPERM;
+ goto exit_unlock;
+ }
+
+ speed_index = DIV_ROUND_UP(pwm * (fan_data->num_speed - 1), 255);
+ set_fan_speed(fan_data, speed_index);
+
+exit_unlock:
+ mutex_unlock(&fan_data->lock);
+
+ return ret;
+}
+
+static ssize_t show_pwm_enable(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct gpio_fan_data *fan_data = dev_get_drvdata(dev);
+
+ return sprintf(buf, "%d\n", fan_data->pwm_enable);
+}
+
+static ssize_t set_pwm_enable(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct gpio_fan_data *fan_data = dev_get_drvdata(dev);
+ unsigned long val;
+
+ if (strict_strtoul(buf, 10, &val) || val > 1)
+ return -EINVAL;
+
+ if (fan_data->pwm_enable == val)
+ return count;
+
+ mutex_lock(&fan_data->lock);
+
+ fan_data->pwm_enable = val;
+
+ /* Disable manual control mode: set fan at full speed. */
+ if (val == 0)
+ set_fan_speed(fan_data, fan_data->num_speed - 1);
+
+ mutex_unlock(&fan_data->lock);
+
+ return count;
+}
+
+static ssize_t show_pwm_mode(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return sprintf(buf, "0\n");
+}
+
+static ssize_t show_rpm_min(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct gpio_fan_data *fan_data = dev_get_drvdata(dev);
+
+ return sprintf(buf, "%d\n", fan_data->speed[0].rpm);
+}
+
+static ssize_t show_rpm_max(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct gpio_fan_data *fan_data = dev_get_drvdata(dev);
+
+ return sprintf(buf, "%d\n",
+ fan_data->speed[fan_data->num_speed - 1].rpm);
+}
+
+static ssize_t show_rpm(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct gpio_fan_data *fan_data = dev_get_drvdata(dev);
+
+ return sprintf(buf, "%d\n", fan_data->speed[fan_data->speed_index].rpm);
+}
+
+static ssize_t set_rpm(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct gpio_fan_data *fan_data = dev_get_drvdata(dev);
+ unsigned long rpm;
+ int ret = count;
+
+ if (strict_strtoul(buf, 10, &rpm))
+ return -EINVAL;
+
+ mutex_lock(&fan_data->lock);
+
+ if (!fan_data->pwm_enable) {
+ ret = -EPERM;
+ goto exit_unlock;
+ }
+
+ set_fan_speed(fan_data, rpm_to_speed_index(fan_data, rpm));
+
+exit_unlock:
+ mutex_unlock(&fan_data->lock);
+
+ return ret;
+}
+
+static DEVICE_ATTR(pwm1, S_IRUGO | S_IWUSR, show_pwm, set_pwm);
+static DEVICE_ATTR(pwm1_enable, S_IRUGO | S_IWUSR,
+ show_pwm_enable, set_pwm_enable);
+static DEVICE_ATTR(pwm1_mode, S_IRUGO, show_pwm_mode, NULL);
+static DEVICE_ATTR(fan1_min, S_IRUGO, show_rpm_min, NULL);
+static DEVICE_ATTR(fan1_max, S_IRUGO, show_rpm_max, NULL);
+static DEVICE_ATTR(fan1_input, S_IRUGO, show_rpm, NULL);
+static DEVICE_ATTR(fan1_target, S_IRUGO | S_IWUSR, show_rpm, set_rpm);
+
+static struct attribute *gpio_fan_ctrl_attributes[] = {
+ &dev_attr_pwm1.attr,
+ &dev_attr_pwm1_enable.attr,
+ &dev_attr_pwm1_mode.attr,
+ &dev_attr_fan1_input.attr,
+ &dev_attr_fan1_target.attr,
+ &dev_attr_fan1_min.attr,
+ &dev_attr_fan1_max.attr,
+ NULL
+};
+
+static const struct attribute_group gpio_fan_ctrl_group = {
+ .attrs = gpio_fan_ctrl_attributes,
+};
+
+static int fan_ctrl_init(struct gpio_fan_data *fan_data,
+ struct gpio_fan_platform_data *pdata)
+{
+ struct platform_device *pdev = fan_data->pdev;
+ int num_ctrl = pdata->num_ctrl;
+ unsigned *ctrl = pdata->ctrl;
+ int i, err;
+
+ for (i = 0; i < num_ctrl; i++) {
+ err = gpio_request(ctrl[i], "GPIO fan control");
+ if (err)
+ goto err_free_gpio;
+
+ err = gpio_direction_output(ctrl[i], gpio_get_value(ctrl[i]));
+ if (err) {
+ gpio_free(ctrl[i]);
+ goto err_free_gpio;
+ }
+ }
+
+ fan_data->num_ctrl = num_ctrl;
+ fan_data->ctrl = ctrl;
+ fan_data->num_speed = pdata->num_speed;
+ fan_data->speed = pdata->speed;
+ fan_data->pwm_enable = true; /* Enable manual fan speed control. */
+ fan_data->speed_index = get_fan_speed_index(fan_data);
+ if (fan_data->speed_index < 0) {
+ err = -ENODEV;
+ goto err_free_gpio;
+ }
+
+ err = sysfs_create_group(&pdev->dev.kobj, &gpio_fan_ctrl_group);
+ if (err)
+ goto err_free_gpio;
+
+ return 0;
+
+err_free_gpio:
+ for (i = i - 1; i >= 0; i--)
+ gpio_free(ctrl[i]);
+
+ return err;
+}
+
+static void fan_ctrl_free(struct gpio_fan_data *fan_data)
+{
+ struct platform_device *pdev = fan_data->pdev;
+ int i;
+
+ sysfs_remove_group(&pdev->dev.kobj, &gpio_fan_ctrl_group);
+ for (i = 0; i < fan_data->num_ctrl; i++)
+ gpio_free(fan_data->ctrl[i]);
+}
+
+/*
+ * Platform driver.
+ */
+
+static ssize_t show_name(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return sprintf(buf, "gpio-fan\n");
+}
+
+static DEVICE_ATTR(name, S_IRUGO, show_name, NULL);
+
+static int __devinit gpio_fan_probe(struct platform_device *pdev)
+{
+ int err;
+ struct gpio_fan_data *fan_data;
+ struct gpio_fan_platform_data *pdata = pdev->dev.platform_data;
+
+ if (!pdata)
+ return -EINVAL;
+
+ fan_data = kzalloc(sizeof(struct gpio_fan_data), GFP_KERNEL);
+ if (!fan_data)
+ return -ENOMEM;
+
+ fan_data->pdev = pdev;
+ platform_set_drvdata(pdev, fan_data);
+ mutex_init(&fan_data->lock);
+
+ /* Configure alarm GPIO if available. */
+ if (pdata->alarm) {
+ err = fan_alarm_init(fan_data, pdata->alarm);
+ if (err)
+ goto err_free_data;
+ }
+
+ /* Configure control GPIOs if available. */
+ if (pdata->ctrl && pdata->num_ctrl > 0) {
+ if (!pdata->speed || pdata->num_speed <= 1) {
+ err = -EINVAL;
+ goto err_free_alarm;
+ }
+ err = fan_ctrl_init(fan_data, pdata);
+ if (err)
+ goto err_free_alarm;
+ }
+
+ err = device_create_file(&pdev->dev, &dev_attr_name);
+ if (err)
+ goto err_free_ctrl;
+
+ /* Make this driver part of hwmon class. */
+ fan_data->hwmon_dev = hwmon_device_register(&pdev->dev);
+ if (IS_ERR(fan_data->hwmon_dev)) {
+ err = PTR_ERR(fan_data->hwmon_dev);
+ goto err_remove_name;
+ }
+
+ dev_info(&pdev->dev, "GPIO fan initialized\n");
+
+ return 0;
+
+err_remove_name:
+ device_remove_file(&pdev->dev, &dev_attr_name);
+err_free_ctrl:
+ if (fan_data->ctrl)
+ fan_ctrl_free(fan_data);
+err_free_alarm:
+ if (fan_data->alarm)
+ fan_alarm_free(fan_data);
+err_free_data:
+ platform_set_drvdata(pdev, NULL);
+ kfree(fan_data);
+
+ return err;
+}
+
+static int __devexit gpio_fan_remove(struct platform_device *pdev)
+{
+ struct gpio_fan_data *fan_data = platform_get_drvdata(pdev);
+
+ hwmon_device_unregister(fan_data->hwmon_dev);
+ device_remove_file(&pdev->dev, &dev_attr_name);
+ if (fan_data->alarm)
+ fan_alarm_free(fan_data);
+ if (fan_data->ctrl)
+ fan_ctrl_free(fan_data);
+ kfree(fan_data);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int gpio_fan_suspend(struct platform_device *pdev, pm_message_t state)
+{
+ struct gpio_fan_data *fan_data = platform_get_drvdata(pdev);
+
+ if (fan_data->ctrl) {
+ fan_data->resume_speed = fan_data->speed_index;
+ set_fan_speed(fan_data, 0);
+ }
+
+ return 0;
+}
+
+static int gpio_fan_resume(struct platform_device *pdev)
+{
+ struct gpio_fan_data *fan_data = platform_get_drvdata(pdev);
+
+ if (fan_data->ctrl)
+ set_fan_speed(fan_data, fan_data->resume_speed);
+
+ return 0;
+}
+#else
+#define gpio_fan_suspend NULL
+#define gpio_fan_resume NULL
+#endif
+
+static struct platform_driver gpio_fan_driver = {
+ .probe = gpio_fan_probe,
+ .remove = __devexit_p(gpio_fan_remove),
+ .suspend = gpio_fan_suspend,
+ .resume = gpio_fan_resume,
+ .driver = {
+ .name = "gpio-fan",
+ },
+};
+
+static int __init gpio_fan_init(void)
+{
+ return platform_driver_register(&gpio_fan_driver);
+}
+
+static void __exit gpio_fan_exit(void)
+{
+ platform_driver_unregister(&gpio_fan_driver);
+}
+
+module_init(gpio_fan_init);
+module_exit(gpio_fan_exit);
+
+MODULE_AUTHOR("Simon Guinot <sguinot@lacie.com>");
+MODULE_DESCRIPTION("GPIO FAN driver");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:gpio-fan");
diff --git a/drivers/hwmon/hp_accel.c b/drivers/hwmon/hp_accel.c
index 36e957532230..a56a78412fcb 100644
--- a/drivers/hwmon/hp_accel.c
+++ b/drivers/hwmon/hp_accel.c
@@ -146,7 +146,7 @@ int lis3lv02d_acpi_write(struct lis3lv02d *lis3, int reg, u8 val)
static int lis3lv02d_dmi_matched(const struct dmi_system_id *dmi)
{
- lis3_dev.ac = *((struct axis_conversion *)dmi->driver_data);
+ lis3_dev.ac = *((union axis_conversion *)dmi->driver_data);
printk(KERN_INFO DRIVER_NAME ": hardware type %s found.\n", dmi->ident);
return 1;
@@ -154,16 +154,19 @@ static int lis3lv02d_dmi_matched(const struct dmi_system_id *dmi)
/* Represents, for each axis seen by userspace, the corresponding hw axis (+1).
* If the value is negative, the opposite of the hw value is used. */
-static struct axis_conversion lis3lv02d_axis_normal = {1, 2, 3};
-static struct axis_conversion lis3lv02d_axis_y_inverted = {1, -2, 3};
-static struct axis_conversion lis3lv02d_axis_x_inverted = {-1, 2, 3};
-static struct axis_conversion lis3lv02d_axis_z_inverted = {1, 2, -3};
-static struct axis_conversion lis3lv02d_axis_xy_swap = {2, 1, 3};
-static struct axis_conversion lis3lv02d_axis_xy_rotated_left = {-2, 1, 3};
-static struct axis_conversion lis3lv02d_axis_xy_rotated_left_usd = {-2, 1, -3};
-static struct axis_conversion lis3lv02d_axis_xy_swap_inverted = {-2, -1, 3};
-static struct axis_conversion lis3lv02d_axis_xy_rotated_right = {2, -1, 3};
-static struct axis_conversion lis3lv02d_axis_xy_swap_yz_inverted = {2, -1, -3};
+#define DEFINE_CONV(name, x, y, z) \
+ static union axis_conversion lis3lv02d_axis_##name = \
+ { .as_array = { x, y, z } }
+DEFINE_CONV(normal, 1, 2, 3);
+DEFINE_CONV(y_inverted, 1, -2, 3);
+DEFINE_CONV(x_inverted, -1, 2, 3);
+DEFINE_CONV(z_inverted, 1, 2, -3);
+DEFINE_CONV(xy_swap, 2, 1, 3);
+DEFINE_CONV(xy_rotated_left, -2, 1, 3);
+DEFINE_CONV(xy_rotated_left_usd, -2, 1, -3);
+DEFINE_CONV(xy_swap_inverted, -2, -1, 3);
+DEFINE_CONV(xy_rotated_right, 2, -1, 3);
+DEFINE_CONV(xy_swap_yz_inverted, 2, -1, -3);
#define AXIS_DMI_MATCH(_ident, _name, _axis) { \
.ident = _ident, \
@@ -222,7 +225,7 @@ static struct dmi_system_id lis3lv02d_dmi_ids[] = {
AXIS_DMI_MATCH("HPB452x", "HP ProBook 452", y_inverted),
AXIS_DMI_MATCH("HPB522x", "HP ProBook 522", xy_swap),
AXIS_DMI_MATCH("HPB532x", "HP ProBook 532", y_inverted),
- AXIS_DMI_MATCH("Mini5102", "HP Mini 5102", xy_rotated_left_usd),
+ AXIS_DMI_MATCH("Mini510x", "HP Mini 510", xy_rotated_left_usd),
{ NULL, }
/* Laptop models without axis info (yet):
* "NC6910" "HP Compaq 6910"
@@ -299,7 +302,10 @@ static int lis3lv02d_add(struct acpi_device *device)
lis3lv02d_enum_resources(device);
/* If possible use a "standard" axes order */
- if (dmi_check_system(lis3lv02d_dmi_ids) == 0) {
+ if (lis3_dev.ac.x && lis3_dev.ac.y && lis3_dev.ac.z) {
+ printk(KERN_INFO DRIVER_NAME ": Using custom axes %d,%d,%d\n",
+ lis3_dev.ac.x, lis3_dev.ac.y, lis3_dev.ac.z);
+ } else if (dmi_check_system(lis3lv02d_dmi_ids) == 0) {
printk(KERN_INFO DRIVER_NAME ": laptop model unknown, "
"using default axes configuration\n");
lis3_dev.ac = lis3lv02d_axis_normal;
diff --git a/drivers/hwmon/i5k_amb.c b/drivers/hwmon/i5k_amb.c
index 937983407e2a..c4c40be0edbf 100644
--- a/drivers/hwmon/i5k_amb.c
+++ b/drivers/hwmon/i5k_amb.c
@@ -497,12 +497,14 @@ static unsigned long chipset_ids[] = {
0
};
+#ifdef MODULE
static struct pci_device_id i5k_amb_ids[] __devinitdata = {
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_5000_ERR) },
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_5400_ERR) },
{ 0, }
};
MODULE_DEVICE_TABLE(pci, i5k_amb_ids);
+#endif
static int __devinit i5k_amb_probe(struct platform_device *pdev)
{
diff --git a/drivers/hwmon/it87.c b/drivers/hwmon/it87.c
index f7701295937d..14a5d981be7d 100644
--- a/drivers/hwmon/it87.c
+++ b/drivers/hwmon/it87.c
@@ -15,7 +15,9 @@
* IT8716F Super I/O chip w/LPC interface
* IT8718F Super I/O chip w/LPC interface
* IT8720F Super I/O chip w/LPC interface
+ * IT8721F Super I/O chip w/LPC interface
* IT8726F Super I/O chip w/LPC interface
+ * IT8758E Super I/O chip w/LPC interface
* Sis950 A clone of the IT8705F
*
* Copyright (C) 2001 Chris Gauthron
@@ -54,7 +56,7 @@
#define DRVNAME "it87"
-enum chips { it87, it8712, it8716, it8718, it8720 };
+enum chips { it87, it8712, it8716, it8718, it8720, it8721 };
static unsigned short force_id;
module_param(force_id, ushort, 0);
@@ -126,6 +128,7 @@ superio_exit(void)
#define IT8716F_DEVID 0x8716
#define IT8718F_DEVID 0x8718
#define IT8720F_DEVID 0x8720
+#define IT8721F_DEVID 0x8721
#define IT8726F_DEVID 0x8726
#define IT87_ACT_REG 0x30
#define IT87_BASE_REG 0x60
@@ -202,56 +205,6 @@ static const u8 IT87_REG_FANX_MIN[] = { 0x1b, 0x1c, 0x1d, 0x85, 0x87 };
#define IT87_REG_AUTO_TEMP(nr, i) (0x60 + (nr) * 8 + (i))
#define IT87_REG_AUTO_PWM(nr, i) (0x65 + (nr) * 8 + (i))
-#define IN_TO_REG(val) (SENSORS_LIMIT((((val) + 8)/16),0,255))
-#define IN_FROM_REG(val) ((val) * 16)
-
-static inline u8 FAN_TO_REG(long rpm, int div)
-{
- if (rpm == 0)
- return 255;
- rpm = SENSORS_LIMIT(rpm, 1, 1000000);
- return SENSORS_LIMIT((1350000 + rpm * div / 2) / (rpm * div), 1,
- 254);
-}
-
-static inline u16 FAN16_TO_REG(long rpm)
-{
- if (rpm == 0)
- return 0xffff;
- return SENSORS_LIMIT((1350000 + rpm) / (rpm * 2), 1, 0xfffe);
-}
-
-#define FAN_FROM_REG(val,div) ((val)==0?-1:(val)==255?0:1350000/((val)*(div)))
-/* The divider is fixed to 2 in 16-bit mode */
-#define FAN16_FROM_REG(val) ((val)==0?-1:(val)==0xffff?0:1350000/((val)*2))
-
-#define TEMP_TO_REG(val) (SENSORS_LIMIT(((val)<0?(((val)-500)/1000):\
- ((val)+500)/1000),-128,127))
-#define TEMP_FROM_REG(val) ((val) * 1000)
-
-#define PWM_TO_REG(val) ((val) >> 1)
-#define PWM_FROM_REG(val) (((val)&0x7f) << 1)
-
-static int DIV_TO_REG(int val)
-{
- int answer = 0;
- while (answer < 7 && (val >>= 1))
- answer++;
- return answer;
-}
-#define DIV_FROM_REG(val) (1 << (val))
-
-static const unsigned int pwm_freq[8] = {
- 48000000 / 128,
- 24000000 / 128,
- 12000000 / 128,
- 8000000 / 128,
- 6000000 / 128,
- 3000000 / 128,
- 1500000 / 128,
- 750000 / 128,
-};
-
struct it87_sio_data {
enum chips type;
@@ -279,6 +232,7 @@ struct it87_data {
char valid; /* !=0 if following fields are valid */
unsigned long last_updated; /* In jiffies */
+ u16 in_scaled; /* Internal voltage sensors are scaled */
u8 in[9]; /* Register value */
u8 in_max[8]; /* Register value */
u8 in_min[8]; /* Register value */
@@ -310,6 +264,96 @@ struct it87_data {
s8 auto_temp[3][5]; /* [nr][0] is point1_temp_hyst */
};
+static u8 in_to_reg(const struct it87_data *data, int nr, long val)
+{
+ long lsb;
+
+ if (data->type == it8721) {
+ if (data->in_scaled & (1 << nr))
+ lsb = 24;
+ else
+ lsb = 12;
+ } else
+ lsb = 16;
+
+ val = DIV_ROUND_CLOSEST(val, lsb);
+ return SENSORS_LIMIT(val, 0, 255);
+}
+
+static int in_from_reg(const struct it87_data *data, int nr, int val)
+{
+ if (data->type == it8721) {
+ if (data->in_scaled & (1 << nr))
+ return val * 24;
+ else
+ return val * 12;
+ } else
+ return val * 16;
+}
+
+static inline u8 FAN_TO_REG(long rpm, int div)
+{
+ if (rpm == 0)
+ return 255;
+ rpm = SENSORS_LIMIT(rpm, 1, 1000000);
+ return SENSORS_LIMIT((1350000 + rpm * div / 2) / (rpm * div), 1,
+ 254);
+}
+
+static inline u16 FAN16_TO_REG(long rpm)
+{
+ if (rpm == 0)
+ return 0xffff;
+ return SENSORS_LIMIT((1350000 + rpm) / (rpm * 2), 1, 0xfffe);
+}
+
+#define FAN_FROM_REG(val, div) ((val) == 0 ? -1 : (val) == 255 ? 0 : \
+ 1350000 / ((val) * (div)))
+/* The divider is fixed to 2 in 16-bit mode */
+#define FAN16_FROM_REG(val) ((val) == 0 ? -1 : (val) == 0xffff ? 0 : \
+ 1350000 / ((val) * 2))
+
+#define TEMP_TO_REG(val) (SENSORS_LIMIT(((val) < 0 ? (((val) - 500) / 1000) : \
+ ((val) + 500) / 1000), -128, 127))
+#define TEMP_FROM_REG(val) ((val) * 1000)
+
+static u8 pwm_to_reg(const struct it87_data *data, long val)
+{
+ if (data->type == it8721)
+ return val;
+ else
+ return val >> 1;
+}
+
+static int pwm_from_reg(const struct it87_data *data, u8 reg)
+{
+ if (data->type == it8721)
+ return reg;
+ else
+ return (reg & 0x7f) << 1;
+}
+
+
+static int DIV_TO_REG(int val)
+{
+ int answer = 0;
+ while (answer < 7 && (val >>= 1))
+ answer++;
+ return answer;
+}
+#define DIV_FROM_REG(val) (1 << (val))
+
+static const unsigned int pwm_freq[8] = {
+ 48000000 / 128,
+ 24000000 / 128,
+ 12000000 / 128,
+ 8000000 / 128,
+ 6000000 / 128,
+ 3000000 / 128,
+ 1500000 / 128,
+ 750000 / 128,
+};
+
static inline int has_16bit_fans(const struct it87_data *data)
{
/* IT8705F Datasheet 0.4.1, 3h == Version G.
@@ -319,7 +363,8 @@ static inline int has_16bit_fans(const struct it87_data *data)
|| (data->type == it8712 && data->revision >= 0x08)
|| data->type == it8716
|| data->type == it8718
- || data->type == it8720;
+ || data->type == it8720
+ || data->type == it8721;
}
static inline int has_old_autopwm(const struct it87_data *data)
@@ -357,7 +402,7 @@ static ssize_t show_in(struct device *dev, struct device_attribute *attr,
int nr = sensor_attr->index;
struct it87_data *data = it87_update_device(dev);
- return sprintf(buf, "%d\n", IN_FROM_REG(data->in[nr]));
+ return sprintf(buf, "%d\n", in_from_reg(data, nr, data->in[nr]));
}
static ssize_t show_in_min(struct device *dev, struct device_attribute *attr,
@@ -367,7 +412,7 @@ static ssize_t show_in_min(struct device *dev, struct device_attribute *attr,
int nr = sensor_attr->index;
struct it87_data *data = it87_update_device(dev);
- return sprintf(buf, "%d\n", IN_FROM_REG(data->in_min[nr]));
+ return sprintf(buf, "%d\n", in_from_reg(data, nr, data->in_min[nr]));
}
static ssize_t show_in_max(struct device *dev, struct device_attribute *attr,
@@ -377,7 +422,7 @@ static ssize_t show_in_max(struct device *dev, struct device_attribute *attr,
int nr = sensor_attr->index;
struct it87_data *data = it87_update_device(dev);
- return sprintf(buf, "%d\n", IN_FROM_REG(data->in_max[nr]));
+ return sprintf(buf, "%d\n", in_from_reg(data, nr, data->in_max[nr]));
}
static ssize_t set_in_min(struct device *dev, struct device_attribute *attr,
@@ -393,7 +438,7 @@ static ssize_t set_in_min(struct device *dev, struct device_attribute *attr,
return -EINVAL;
mutex_lock(&data->update_lock);
- data->in_min[nr] = IN_TO_REG(val);
+ data->in_min[nr] = in_to_reg(data, nr, val);
it87_write_value(data, IT87_REG_VIN_MIN(nr),
data->in_min[nr]);
mutex_unlock(&data->update_lock);
@@ -412,7 +457,7 @@ static ssize_t set_in_max(struct device *dev, struct device_attribute *attr,
return -EINVAL;
mutex_lock(&data->update_lock);
- data->in_max[nr] = IN_TO_REG(val);
+ data->in_max[nr] = in_to_reg(data, nr, val);
it87_write_value(data, IT87_REG_VIN_MAX(nr),
data->in_max[nr]);
mutex_unlock(&data->update_lock);
@@ -642,7 +687,8 @@ static ssize_t show_pwm(struct device *dev, struct device_attribute *attr,
int nr = sensor_attr->index;
struct it87_data *data = it87_update_device(dev);
- return sprintf(buf, "%d\n", PWM_FROM_REG(data->pwm_duty[nr]));
+ return sprintf(buf, "%d\n",
+ pwm_from_reg(data, data->pwm_duty[nr]));
}
static ssize_t show_pwm_freq(struct device *dev, struct device_attribute *attr,
char *buf)
@@ -812,7 +858,7 @@ static ssize_t set_pwm(struct device *dev, struct device_attribute *attr,
return -EINVAL;
mutex_lock(&data->update_lock);
- data->pwm_duty[nr] = PWM_TO_REG(val);
+ data->pwm_duty[nr] = pwm_to_reg(data, val);
/* If we are in manual mode, write the duty cycle immediately;
* otherwise, just store it for later use. */
if (!(data->pwm_ctrl[nr] & 0x80)) {
@@ -916,7 +962,8 @@ static ssize_t show_auto_pwm(struct device *dev,
int nr = sensor_attr->nr;
int point = sensor_attr->index;
- return sprintf(buf, "%d\n", PWM_FROM_REG(data->auto_pwm[nr][point]));
+ return sprintf(buf, "%d\n",
+ pwm_from_reg(data, data->auto_pwm[nr][point]));
}
static ssize_t set_auto_pwm(struct device *dev,
@@ -933,7 +980,7 @@ static ssize_t set_auto_pwm(struct device *dev,
return -EINVAL;
mutex_lock(&data->update_lock);
- data->auto_pwm[nr][point] = PWM_TO_REG(val);
+ data->auto_pwm[nr][point] = pwm_to_reg(data, val);
it87_write_value(data, IT87_REG_AUTO_PWM(nr, point),
data->auto_pwm[nr][point]);
mutex_unlock(&data->update_lock);
@@ -1203,9 +1250,16 @@ static ssize_t show_label(struct device *dev, struct device_attribute *attr,
"5VSB",
"Vbat",
};
+ static const char *labels_it8721[] = {
+ "+3.3V",
+ "3VSB",
+ "Vbat",
+ };
+ struct it87_data *data = dev_get_drvdata(dev);
int nr = to_sensor_dev_attr(attr)->index;
- return sprintf(buf, "%s\n", labels[nr]);
+ return sprintf(buf, "%s\n", data->type == it8721 ? labels_it8721[nr]
+ : labels[nr]);
}
static SENSOR_DEVICE_ATTR(in3_label, S_IRUGO, show_label, NULL, 0);
static SENSOR_DEVICE_ATTR(in7_label, S_IRUGO, show_label, NULL, 1);
@@ -1490,6 +1544,9 @@ static int __init it87_find(unsigned short *address,
case IT8720F_DEVID:
sio_data->type = it8720;
break;
+ case IT8721F_DEVID:
+ sio_data->type = it8721;
+ break;
case 0xffff: /* No device at all */
goto exit;
default:
@@ -1530,11 +1587,17 @@ static int __init it87_find(unsigned short *address,
int reg;
superio_select(GPIO);
- /* We need at least 4 VID pins */
+
reg = superio_inb(IT87_SIO_GPIO3_REG);
- if (reg & 0x0f) {
- pr_info("it87: VID is disabled (pins used for GPIO)\n");
+ if (sio_data->type == it8721) {
+ /* The IT8721F/IT8758E doesn't have VID pins at all */
sio_data->skip_vid = 1;
+ } else {
+ /* We need at least 4 VID pins */
+ if (reg & 0x0f) {
+ pr_info("it87: VID is disabled (pins used for GPIO)\n");
+ sio_data->skip_vid = 1;
+ }
}
/* Check if fan3 is there or not */
@@ -1572,7 +1635,7 @@ static int __init it87_find(unsigned short *address,
}
if (reg & (1 << 0))
sio_data->internal |= (1 << 0);
- if (reg & (1 << 1))
+ if ((reg & (1 << 1)) || sio_data->type == it8721)
sio_data->internal |= (1 << 1);
sio_data->beep_pin = superio_inb(IT87_SIO_BEEP_PIN_REG) & 0x3f;
@@ -1650,6 +1713,7 @@ static int __devinit it87_probe(struct platform_device *pdev)
"it8716",
"it8718",
"it8720",
+ "it8721",
};
res = platform_get_resource(pdev, IORESOURCE_IO, 0);
@@ -1686,6 +1750,16 @@ static int __devinit it87_probe(struct platform_device *pdev)
/* Check PWM configuration */
enable_pwm_interface = it87_check_pwm(dev);
+ /* Starting with IT8721F, we handle scaling of internal voltages */
+ if (data->type == it8721) {
+ if (sio_data->internal & (1 << 0))
+ data->in_scaled |= (1 << 3); /* in3 is AVCC */
+ if (sio_data->internal & (1 << 1))
+ data->in_scaled |= (1 << 7); /* in7 is VSB */
+ if (sio_data->internal & (1 << 2))
+ data->in_scaled |= (1 << 8); /* in8 is Vbat */
+ }
+
/* Initialize the IT87 chip */
it87_init_device(pdev);
@@ -2051,7 +2125,7 @@ static struct it87_data *it87_update_device(struct device *dev)
data->sensor = it87_read_value(data, IT87_REG_TEMP_ENABLE);
/* The 8705 does not have VID capability.
- The 8718 and the 8720 don't use IT87_REG_VID for the
+ The 8718 and later don't use IT87_REG_VID for the
same purpose. */
if (data->type == it8712 || data->type == it8716) {
data->vid = it87_read_value(data, IT87_REG_VID);
@@ -2151,7 +2225,7 @@ static void __exit sm_it87_exit(void)
MODULE_AUTHOR("Chris Gauthron, "
"Jean Delvare <khali@linux-fr.org>");
-MODULE_DESCRIPTION("IT8705F/8712F/8716F/8718F/8720F/8726F, SiS950 driver");
+MODULE_DESCRIPTION("IT8705F/IT871xF/IT872xF hardware monitoring driver");
module_param(update_vbat, bool, 0);
MODULE_PARM_DESC(update_vbat, "Update vbat if set else return powerup value");
module_param(fix_pwm_polarity, bool, 0);
diff --git a/drivers/hwmon/k8temp.c b/drivers/hwmon/k8temp.c
index 39ead2a4d3c5..418496f13020 100644
--- a/drivers/hwmon/k8temp.c
+++ b/drivers/hwmon/k8temp.c
@@ -191,38 +191,31 @@ static int __devinit k8temp_probe(struct pci_dev *pdev,
model = boot_cpu_data.x86_model;
stepping = boot_cpu_data.x86_mask;
- switch (boot_cpu_data.x86) {
- case 0xf:
- /* feature available since SH-C0, exclude older revisions */
- if (((model == 4) && (stepping == 0)) ||
- ((model == 5) && (stepping <= 1))) {
- err = -ENODEV;
- goto exit_free;
- }
-
- /*
- * AMD NPT family 0fh, i.e. RevF and RevG:
- * meaning of SEL_CORE bit is inverted
- */
- if (model >= 0x40) {
- data->swap_core_select = 1;
- dev_warn(&pdev->dev, "Temperature readouts might be "
- "wrong - check erratum #141\n");
- }
-
- if (is_rev_g_desktop(model)) {
- /*
- * RevG desktop CPUs (i.e. no socket S1G1 or
- * ASB1 parts) need additional offset,
- * otherwise reported temperature is below
- * ambient temperature
- */
- data->temp_offset = 21000;
- }
+ /* feature available since SH-C0, exclude older revisions */
+ if (((model == 4) && (stepping == 0)) ||
+ ((model == 5) && (stepping <= 1))) {
+ err = -ENODEV;
+ goto exit_free;
+ }
- break;
+ /*
+ * AMD NPT family 0fh, i.e. RevF and RevG:
+ * meaning of SEL_CORE bit is inverted
+ */
+ if (model >= 0x40) {
+ data->swap_core_select = 1;
+ dev_warn(&pdev->dev, "Temperature readouts might be wrong - "
+ "check erratum #141\n");
}
+ /*
+ * RevG desktop CPUs (i.e. no socket S1G1 or ASB1 parts) need
+ * additional offset, otherwise reported temperature is below
+ * ambient temperature
+ */
+ if (is_rev_g_desktop(model))
+ data->temp_offset = 21000;
+
pci_read_config_byte(pdev, REG_TEMP, &scfg);
scfg &= ~(SEL_PLACE | SEL_CORE); /* Select sensor 0, core0 */
pci_write_config_byte(pdev, REG_TEMP, scfg);
diff --git a/drivers/hwmon/lis3lv02d.c b/drivers/hwmon/lis3lv02d.c
index fc591ae53107..0cee73a6124e 100644
--- a/drivers/hwmon/lis3lv02d.c
+++ b/drivers/hwmon/lis3lv02d.c
@@ -31,9 +31,11 @@
#include <linux/delay.h>
#include <linux/wait.h>
#include <linux/poll.h>
+#include <linux/slab.h>
#include <linux/freezer.h>
#include <linux/uaccess.h>
#include <linux/miscdevice.h>
+#include <linux/pm_runtime.h>
#include <asm/atomic.h>
#include "lis3lv02d.h"
@@ -43,6 +45,16 @@
#define MDPS_POLL_INTERVAL 50
#define MDPS_POLL_MIN 0
#define MDPS_POLL_MAX 2000
+
+#define LIS3_SYSFS_POWERDOWN_DELAY 5000 /* In milliseconds */
+
+#define SELFTEST_OK 0
+#define SELFTEST_FAIL -1
+#define SELFTEST_IRQ -2
+
+#define IRQ_LINE0 0
+#define IRQ_LINE1 1
+
/*
* The sensor can also generate interrupts (DRDY) but it's pretty pointless
* because they are generated even if the data do not change. So it's better
@@ -66,8 +78,10 @@
#define LIS3_SENSITIVITY_12B ((LIS3_ACCURACY * 1000) / 1024)
#define LIS3_SENSITIVITY_8B (18 * LIS3_ACCURACY)
-#define LIS3_DEFAULT_FUZZ 3
-#define LIS3_DEFAULT_FLAT 3
+#define LIS3_DEFAULT_FUZZ_12B 3
+#define LIS3_DEFAULT_FLAT_12B 3
+#define LIS3_DEFAULT_FUZZ_8B 1
+#define LIS3_DEFAULT_FLAT_8B 1
struct lis3lv02d lis3_dev = {
.misc_wait = __WAIT_QUEUE_HEAD_INITIALIZER(lis3_dev.misc_wait),
@@ -75,6 +89,30 @@ struct lis3lv02d lis3_dev = {
EXPORT_SYMBOL_GPL(lis3_dev);
+/* just like param_set_int() but does sanity-check so that it won't point
+ * over the axis array size
+ */
+static int param_set_axis(const char *val, const struct kernel_param *kp)
+{
+ int ret = param_set_int(val, kp);
+ if (!ret) {
+ int val = *(int *)kp->arg;
+ if (val < 0)
+ val = -val;
+ if (!val || val > 3)
+ return -EINVAL;
+ }
+ return ret;
+}
+
+static struct kernel_param_ops param_ops_axis = {
+ .set = param_set_axis,
+ .get = param_get_int,
+};
+
+module_param_array_named(axes, lis3_dev.ac.as_array, axis, NULL, 0644);
+MODULE_PARM_DESC(axes, "Axis-mapping for x,y,z directions");
+
static s16 lis3lv02d_read_8(struct lis3lv02d *lis3, int reg)
{
s8 lo;
@@ -123,9 +161,24 @@ static void lis3lv02d_get_xyz(struct lis3lv02d *lis3, int *x, int *y, int *z)
int position[3];
int i;
- position[0] = lis3->read_data(lis3, OUTX);
- position[1] = lis3->read_data(lis3, OUTY);
- position[2] = lis3->read_data(lis3, OUTZ);
+ if (lis3->blkread) {
+ if (lis3_dev.whoami == WAI_12B) {
+ u16 data[3];
+ lis3->blkread(lis3, OUTX_L, 6, (u8 *)data);
+ for (i = 0; i < 3; i++)
+ position[i] = (s16)le16_to_cpu(data[i]);
+ } else {
+ u8 data[5];
+ /* Data: x, dummy, y, dummy, z */
+ lis3->blkread(lis3, OUTX, 5, data);
+ for (i = 0; i < 3; i++)
+ position[i] = (s8)data[i * 2];
+ }
+ } else {
+ position[0] = lis3->read_data(lis3, OUTX);
+ position[1] = lis3->read_data(lis3, OUTY);
+ position[2] = lis3->read_data(lis3, OUTZ);
+ }
for (i = 0; i < 3; i++)
position[i] = (position[i] * lis3->scale) / LIS3_ACCURACY;
@@ -138,6 +191,7 @@ static void lis3lv02d_get_xyz(struct lis3lv02d *lis3, int *x, int *y, int *z)
/* conversion btw sampling rate and the register values */
static int lis3_12_rates[4] = {40, 160, 640, 2560};
static int lis3_8_rates[2] = {100, 400};
+static int lis3_3dc_rates[16] = {0, 1, 10, 25, 50, 100, 200, 400, 1600, 5000};
/* ODR is Output Data Rate */
static int lis3lv02d_get_odr(void)
@@ -156,6 +210,9 @@ static int lis3lv02d_set_odr(int rate)
u8 ctrl;
int i, len, shift;
+ if (!rate)
+ return -EINVAL;
+
lis3_dev.read(&lis3_dev, CTRL_REG1, &ctrl);
ctrl &= ~lis3_dev.odr_mask;
len = 1 << hweight_long(lis3_dev.odr_mask); /* # of possible values */
@@ -172,19 +229,42 @@ static int lis3lv02d_set_odr(int rate)
static int lis3lv02d_selftest(struct lis3lv02d *lis3, s16 results[3])
{
- u8 reg;
+ u8 ctlreg, reg;
s16 x, y, z;
u8 selftest;
int ret;
+ u8 ctrl_reg_data;
+ unsigned char irq_cfg;
mutex_lock(&lis3->mutex);
- if (lis3_dev.whoami == WAI_12B)
- selftest = CTRL1_ST;
- else
- selftest = CTRL1_STP;
- lis3->read(lis3, CTRL_REG1, &reg);
- lis3->write(lis3, CTRL_REG1, (reg | selftest));
+ irq_cfg = lis3->irq_cfg;
+ if (lis3_dev.whoami == WAI_8B) {
+ lis3->data_ready_count[IRQ_LINE0] = 0;
+ lis3->data_ready_count[IRQ_LINE1] = 0;
+
+ /* Change interrupt cfg to data ready for selftest */
+ atomic_inc(&lis3_dev.wake_thread);
+ lis3->irq_cfg = LIS3_IRQ1_DATA_READY | LIS3_IRQ2_DATA_READY;
+ lis3->read(lis3, CTRL_REG3, &ctrl_reg_data);
+ lis3->write(lis3, CTRL_REG3, (ctrl_reg_data &
+ ~(LIS3_IRQ1_MASK | LIS3_IRQ2_MASK)) |
+ (LIS3_IRQ1_DATA_READY | LIS3_IRQ2_DATA_READY));
+ }
+
+ if (lis3_dev.whoami == WAI_3DC) {
+ ctlreg = CTRL_REG4;
+ selftest = CTRL4_ST0;
+ } else {
+ ctlreg = CTRL_REG1;
+ if (lis3_dev.whoami == WAI_12B)
+ selftest = CTRL1_ST;
+ else
+ selftest = CTRL1_STP;
+ }
+
+ lis3->read(lis3, ctlreg, &reg);
+ lis3->write(lis3, ctlreg, (reg | selftest));
msleep(lis3->pwron_delay / lis3lv02d_get_odr());
/* Read directly to avoid axis remap */
@@ -193,7 +273,7 @@ static int lis3lv02d_selftest(struct lis3lv02d *lis3, s16 results[3])
z = lis3->read_data(lis3, OUTZ);
/* back to normal settings */
- lis3->write(lis3, CTRL_REG1, reg);
+ lis3->write(lis3, ctlreg, reg);
msleep(lis3->pwron_delay / lis3lv02d_get_odr());
results[0] = x - lis3->read_data(lis3, OUTX);
@@ -201,13 +281,33 @@ static int lis3lv02d_selftest(struct lis3lv02d *lis3, s16 results[3])
results[2] = z - lis3->read_data(lis3, OUTZ);
ret = 0;
+
+ if (lis3_dev.whoami == WAI_8B) {
+ /* Restore original interrupt configuration */
+ atomic_dec(&lis3_dev.wake_thread);
+ lis3->write(lis3, CTRL_REG3, ctrl_reg_data);
+ lis3->irq_cfg = irq_cfg;
+
+ if ((irq_cfg & LIS3_IRQ1_MASK) &&
+ lis3->data_ready_count[IRQ_LINE0] < 2) {
+ ret = SELFTEST_IRQ;
+ goto fail;
+ }
+
+ if ((irq_cfg & LIS3_IRQ2_MASK) &&
+ lis3->data_ready_count[IRQ_LINE1] < 2) {
+ ret = SELFTEST_IRQ;
+ goto fail;
+ }
+ }
+
if (lis3->pdata) {
int i;
for (i = 0; i < 3; i++) {
/* Check against selftest acceptance limits */
if ((results[i] < lis3->pdata->st_min_limits[i]) ||
(results[i] > lis3->pdata->st_max_limits[i])) {
- ret = -EIO;
+ ret = SELFTEST_FAIL;
goto fail;
}
}
@@ -219,10 +319,46 @@ fail:
return ret;
}
+/*
+ * Order of registers in the list affects to order of the restore process.
+ * Perhaps it is a good idea to set interrupt enable register as a last one
+ * after all other configurations
+ */
+static u8 lis3_wai8_regs[] = { FF_WU_CFG_1, FF_WU_THS_1, FF_WU_DURATION_1,
+ FF_WU_CFG_2, FF_WU_THS_2, FF_WU_DURATION_2,
+ CLICK_CFG, CLICK_SRC, CLICK_THSY_X, CLICK_THSZ,
+ CLICK_TIMELIMIT, CLICK_LATENCY, CLICK_WINDOW,
+ CTRL_REG1, CTRL_REG2, CTRL_REG3};
+
+static u8 lis3_wai12_regs[] = {FF_WU_CFG, FF_WU_THS_L, FF_WU_THS_H,
+ FF_WU_DURATION, DD_CFG, DD_THSI_L, DD_THSI_H,
+ DD_THSE_L, DD_THSE_H,
+ CTRL_REG1, CTRL_REG3, CTRL_REG2};
+
+static inline void lis3_context_save(struct lis3lv02d *lis3)
+{
+ int i;
+ for (i = 0; i < lis3->regs_size; i++)
+ lis3->read(lis3, lis3->regs[i], &lis3->reg_cache[i]);
+ lis3->regs_stored = true;
+}
+
+static inline void lis3_context_restore(struct lis3lv02d *lis3)
+{
+ int i;
+ if (lis3->regs_stored)
+ for (i = 0; i < lis3->regs_size; i++)
+ lis3->write(lis3, lis3->regs[i], lis3->reg_cache[i]);
+}
+
void lis3lv02d_poweroff(struct lis3lv02d *lis3)
{
+ if (lis3->reg_ctrl)
+ lis3_context_save(lis3);
/* disable X,Y,Z axis and power down */
lis3->write(lis3, CTRL_REG1, 0x00);
+ if (lis3->reg_ctrl)
+ lis3->reg_ctrl(lis3, LIS3_REG_OFF);
}
EXPORT_SYMBOL_GPL(lis3lv02d_poweroff);
@@ -232,19 +368,24 @@ void lis3lv02d_poweron(struct lis3lv02d *lis3)
lis3->init(lis3);
- /* LIS3 power on delay is quite long */
- msleep(lis3->pwron_delay / lis3lv02d_get_odr());
-
/*
* Common configuration
* BDU: (12 bits sensors only) LSB and MSB values are not updated until
* both have been read. So the value read will always be correct.
+ * Set BOOT bit to refresh factory tuning values.
*/
- if (lis3->whoami == WAI_12B) {
- lis3->read(lis3, CTRL_REG2, &reg);
- reg |= CTRL2_BDU;
- lis3->write(lis3, CTRL_REG2, reg);
- }
+ lis3->read(lis3, CTRL_REG2, &reg);
+ if (lis3->whoami == WAI_12B)
+ reg |= CTRL2_BDU | CTRL2_BOOT;
+ else
+ reg |= CTRL2_BOOT_8B;
+ lis3->write(lis3, CTRL_REG2, reg);
+
+ /* LIS3 power on delay is quite long */
+ msleep(lis3->pwron_delay / lis3lv02d_get_odr());
+
+ if (lis3->reg_ctrl)
+ lis3_context_restore(lis3);
}
EXPORT_SYMBOL_GPL(lis3lv02d_poweron);
@@ -262,6 +403,27 @@ static void lis3lv02d_joystick_poll(struct input_polled_dev *pidev)
mutex_unlock(&lis3_dev.mutex);
}
+static void lis3lv02d_joystick_open(struct input_polled_dev *pidev)
+{
+ if (lis3_dev.pm_dev)
+ pm_runtime_get_sync(lis3_dev.pm_dev);
+
+ if (lis3_dev.pdata && lis3_dev.whoami == WAI_8B && lis3_dev.idev)
+ atomic_set(&lis3_dev.wake_thread, 1);
+ /*
+ * Update coordinates for the case where poll interval is 0 and
+ * the chip in running purely under interrupt control
+ */
+ lis3lv02d_joystick_poll(pidev);
+}
+
+static void lis3lv02d_joystick_close(struct input_polled_dev *pidev)
+{
+ atomic_set(&lis3_dev.wake_thread, 0);
+ if (lis3_dev.pm_dev)
+ pm_runtime_put(lis3_dev.pm_dev);
+}
+
static irqreturn_t lis302dl_interrupt(int irq, void *dummy)
{
if (!test_bit(0, &lis3_dev.misc_opened))
@@ -277,8 +439,7 @@ static irqreturn_t lis302dl_interrupt(int irq, void *dummy)
wake_up_interruptible(&lis3_dev.misc_wait);
kill_fasync(&lis3_dev.async_queue, SIGIO, POLL_IN);
out:
- if (lis3_dev.pdata && lis3_dev.whoami == WAI_8B && lis3_dev.idev &&
- lis3_dev.idev->input->users)
+ if (atomic_read(&lis3_dev.wake_thread))
return IRQ_WAKE_THREAD;
return IRQ_HANDLED;
}
@@ -309,44 +470,41 @@ static void lis302dl_interrupt_handle_click(struct lis3lv02d *lis3)
mutex_unlock(&lis3->mutex);
}
-static void lis302dl_interrupt_handle_ff_wu(struct lis3lv02d *lis3)
+static inline void lis302dl_data_ready(struct lis3lv02d *lis3, int index)
{
- u8 wu1_src;
- u8 wu2_src;
-
- lis3->read(lis3, FF_WU_SRC_1, &wu1_src);
- lis3->read(lis3, FF_WU_SRC_2, &wu2_src);
+ int dummy;
- wu1_src = wu1_src & FF_WU_SRC_IA ? wu1_src : 0;
- wu2_src = wu2_src & FF_WU_SRC_IA ? wu2_src : 0;
-
- /* joystick poll is internally protected by the lis3->mutex. */
- if (wu1_src || wu2_src)
- lis3lv02d_joystick_poll(lis3_dev.idev);
+ /* Dummy read to ack interrupt */
+ lis3lv02d_get_xyz(lis3, &dummy, &dummy, &dummy);
+ lis3->data_ready_count[index]++;
}
static irqreturn_t lis302dl_interrupt_thread1_8b(int irq, void *data)
{
-
struct lis3lv02d *lis3 = data;
+ u8 irq_cfg = lis3->irq_cfg & LIS3_IRQ1_MASK;
- if ((lis3->pdata->irq_cfg & LIS3_IRQ1_MASK) == LIS3_IRQ1_CLICK)
+ if (irq_cfg == LIS3_IRQ1_CLICK)
lis302dl_interrupt_handle_click(lis3);
+ else if (unlikely(irq_cfg == LIS3_IRQ1_DATA_READY))
+ lis302dl_data_ready(lis3, IRQ_LINE0);
else
- lis302dl_interrupt_handle_ff_wu(lis3);
+ lis3lv02d_joystick_poll(lis3->idev);
return IRQ_HANDLED;
}
static irqreturn_t lis302dl_interrupt_thread2_8b(int irq, void *data)
{
-
struct lis3lv02d *lis3 = data;
+ u8 irq_cfg = lis3->irq_cfg & LIS3_IRQ2_MASK;
- if ((lis3->pdata->irq_cfg & LIS3_IRQ2_MASK) == LIS3_IRQ2_CLICK)
+ if (irq_cfg == LIS3_IRQ2_CLICK)
lis302dl_interrupt_handle_click(lis3);
+ else if (unlikely(irq_cfg == LIS3_IRQ2_DATA_READY))
+ lis302dl_data_ready(lis3, IRQ_LINE1);
else
- lis302dl_interrupt_handle_ff_wu(lis3);
+ lis3lv02d_joystick_poll(lis3->idev);
return IRQ_HANDLED;
}
@@ -356,6 +514,9 @@ static int lis3lv02d_misc_open(struct inode *inode, struct file *file)
if (test_and_set_bit(0, &lis3_dev.misc_opened))
return -EBUSY; /* already open */
+ if (lis3_dev.pm_dev)
+ pm_runtime_get_sync(lis3_dev.pm_dev);
+
atomic_set(&lis3_dev.count, 0);
return 0;
}
@@ -364,6 +525,8 @@ static int lis3lv02d_misc_release(struct inode *inode, struct file *file)
{
fasync_helper(-1, file, 0, &lis3_dev.async_queue);
clear_bit(0, &lis3_dev.misc_opened); /* release the device */
+ if (lis3_dev.pm_dev)
+ pm_runtime_put(lis3_dev.pm_dev);
return 0;
}
@@ -460,6 +623,8 @@ int lis3lv02d_joystick_enable(void)
return -ENOMEM;
lis3_dev.idev->poll = lis3lv02d_joystick_poll;
+ lis3_dev.idev->open = lis3lv02d_joystick_open;
+ lis3_dev.idev->close = lis3lv02d_joystick_close;
lis3_dev.idev->poll_interval = MDPS_POLL_INTERVAL;
lis3_dev.idev->poll_interval_min = MDPS_POLL_MIN;
lis3_dev.idev->poll_interval_max = MDPS_POLL_MAX;
@@ -473,8 +638,16 @@ int lis3lv02d_joystick_enable(void)
set_bit(EV_ABS, input_dev->evbit);
max_val = (lis3_dev.mdps_max_val * lis3_dev.scale) / LIS3_ACCURACY;
- fuzz = (LIS3_DEFAULT_FUZZ * lis3_dev.scale) / LIS3_ACCURACY;
- flat = (LIS3_DEFAULT_FLAT * lis3_dev.scale) / LIS3_ACCURACY;
+ if (lis3_dev.whoami == WAI_12B) {
+ fuzz = LIS3_DEFAULT_FUZZ_12B;
+ flat = LIS3_DEFAULT_FLAT_12B;
+ } else {
+ fuzz = LIS3_DEFAULT_FUZZ_8B;
+ flat = LIS3_DEFAULT_FLAT_8B;
+ }
+ fuzz = (fuzz * lis3_dev.scale) / LIS3_ACCURACY;
+ flat = (flat * lis3_dev.scale) / LIS3_ACCURACY;
+
input_set_abs_params(input_dev, ABS_X, -max_val, max_val, fuzz, flat);
input_set_abs_params(input_dev, ABS_Y, -max_val, max_val, fuzz, flat);
input_set_abs_params(input_dev, ABS_Z, -max_val, max_val, fuzz, flat);
@@ -512,14 +685,47 @@ void lis3lv02d_joystick_disable(void)
EXPORT_SYMBOL_GPL(lis3lv02d_joystick_disable);
/* Sysfs stuff */
+static void lis3lv02d_sysfs_poweron(struct lis3lv02d *lis3)
+{
+ /*
+ * SYSFS functions are fast visitors so put-call
+ * immediately after the get-call. However, keep
+ * chip running for a while and schedule delayed
+ * suspend. This way periodic sysfs calls doesn't
+ * suffer from relatively long power up time.
+ */
+
+ if (lis3->pm_dev) {
+ pm_runtime_get_sync(lis3->pm_dev);
+ pm_runtime_put_noidle(lis3->pm_dev);
+ pm_schedule_suspend(lis3->pm_dev, LIS3_SYSFS_POWERDOWN_DELAY);
+ }
+}
+
static ssize_t lis3lv02d_selftest_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- int result;
s16 values[3];
- result = lis3lv02d_selftest(&lis3_dev, values);
- return sprintf(buf, "%s %d %d %d\n", result == 0 ? "OK" : "FAIL",
+ static const char ok[] = "OK";
+ static const char fail[] = "FAIL";
+ static const char irq[] = "FAIL_IRQ";
+ const char *res;
+
+ lis3lv02d_sysfs_poweron(&lis3_dev);
+ switch (lis3lv02d_selftest(&lis3_dev, values)) {
+ case SELFTEST_FAIL:
+ res = fail;
+ break;
+ case SELFTEST_IRQ:
+ res = irq;
+ break;
+ case SELFTEST_OK:
+ default:
+ res = ok;
+ break;
+ }
+ return sprintf(buf, "%s %d %d %d\n", res,
values[0], values[1], values[2]);
}
@@ -528,6 +734,7 @@ static ssize_t lis3lv02d_position_show(struct device *dev,
{
int x, y, z;
+ lis3lv02d_sysfs_poweron(&lis3_dev);
mutex_lock(&lis3_dev.mutex);
lis3lv02d_get_xyz(&lis3_dev, &x, &y, &z);
mutex_unlock(&lis3_dev.mutex);
@@ -537,6 +744,7 @@ static ssize_t lis3lv02d_position_show(struct device *dev,
static ssize_t lis3lv02d_rate_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
+ lis3lv02d_sysfs_poweron(&lis3_dev);
return sprintf(buf, "%d\n", lis3lv02d_get_odr());
}
@@ -549,6 +757,7 @@ static ssize_t lis3lv02d_rate_set(struct device *dev,
if (strict_strtoul(buf, 0, &rate))
return -EINVAL;
+ lis3lv02d_sysfs_poweron(&lis3_dev);
if (lis3lv02d_set_odr(rate))
return -EINVAL;
@@ -585,6 +794,18 @@ int lis3lv02d_remove_fs(struct lis3lv02d *lis3)
{
sysfs_remove_group(&lis3->pdev->dev.kobj, &lis3lv02d_attribute_group);
platform_device_unregister(lis3->pdev);
+ if (lis3->pm_dev) {
+ /* Barrier after the sysfs remove */
+ pm_runtime_barrier(lis3->pm_dev);
+
+ /* SYSFS may have left chip running. Turn off if necessary */
+ if (!pm_runtime_suspended(lis3->pm_dev))
+ lis3lv02d_poweroff(&lis3_dev);
+
+ pm_runtime_disable(lis3->pm_dev);
+ pm_runtime_set_suspended(lis3->pm_dev);
+ }
+ kfree(lis3->reg_cache);
return 0;
}
EXPORT_SYMBOL_GPL(lis3lv02d_remove_fs);
@@ -616,16 +837,16 @@ static void lis3lv02d_8b_configure(struct lis3lv02d *dev,
if (p->wakeup_flags) {
dev->write(dev, FF_WU_CFG_1, p->wakeup_flags);
dev->write(dev, FF_WU_THS_1, p->wakeup_thresh & 0x7f);
- /* default to 2.5ms for now */
- dev->write(dev, FF_WU_DURATION_1, 1);
+ /* pdata value + 1 to keep this backward compatible*/
+ dev->write(dev, FF_WU_DURATION_1, p->duration1 + 1);
ctrl2 ^= HP_FF_WU1; /* Xor to keep compatible with old pdata*/
}
if (p->wakeup_flags2) {
dev->write(dev, FF_WU_CFG_2, p->wakeup_flags2);
dev->write(dev, FF_WU_THS_2, p->wakeup_thresh2 & 0x7f);
- /* default to 2.5ms for now */
- dev->write(dev, FF_WU_DURATION_2, 1);
+ /* pdata value + 1 to keep this backward compatible*/
+ dev->write(dev, FF_WU_DURATION_2, p->duration2 + 1);
ctrl2 ^= HP_FF_WU2; /* Xor to keep compatible with old pdata*/
}
/* Configure hipass filters */
@@ -635,8 +856,8 @@ static void lis3lv02d_8b_configure(struct lis3lv02d *dev,
err = request_threaded_irq(p->irq2,
NULL,
lis302dl_interrupt_thread2_8b,
- IRQF_TRIGGER_RISING |
- IRQF_ONESHOT,
+ IRQF_TRIGGER_RISING | IRQF_ONESHOT |
+ (p->irq_flags2 & IRQF_TRIGGER_MASK),
DRIVER_NAME, &lis3_dev);
if (err < 0)
printk(KERN_ERR DRIVER_NAME
@@ -652,6 +873,7 @@ int lis3lv02d_init_device(struct lis3lv02d *dev)
{
int err;
irq_handler_t thread_fn;
+ int irq_flags = 0;
dev->whoami = lis3lv02d_read_8(dev, WHO_AM_I);
@@ -664,6 +886,8 @@ int lis3lv02d_init_device(struct lis3lv02d *dev)
dev->odrs = lis3_12_rates;
dev->odr_mask = CTRL1_DF0 | CTRL1_DF1;
dev->scale = LIS3_SENSITIVITY_12B;
+ dev->regs = lis3_wai12_regs;
+ dev->regs_size = ARRAY_SIZE(lis3_wai12_regs);
break;
case WAI_8B:
printk(KERN_INFO DRIVER_NAME ": 8 bits sensor found\n");
@@ -673,6 +897,17 @@ int lis3lv02d_init_device(struct lis3lv02d *dev)
dev->odrs = lis3_8_rates;
dev->odr_mask = CTRL1_DR;
dev->scale = LIS3_SENSITIVITY_8B;
+ dev->regs = lis3_wai8_regs;
+ dev->regs_size = ARRAY_SIZE(lis3_wai8_regs);
+ break;
+ case WAI_3DC:
+ printk(KERN_INFO DRIVER_NAME ": 8 bits 3DC sensor found\n");
+ dev->read_data = lis3lv02d_read_8;
+ dev->mdps_max_val = 128;
+ dev->pwron_delay = LIS3_PWRON_DELAY_WAI_8B;
+ dev->odrs = lis3_3dc_rates;
+ dev->odr_mask = CTRL1_ODR0|CTRL1_ODR1|CTRL1_ODR2|CTRL1_ODR3;
+ dev->scale = LIS3_SENSITIVITY_8B;
break;
default:
printk(KERN_ERR DRIVER_NAME
@@ -680,11 +915,25 @@ int lis3lv02d_init_device(struct lis3lv02d *dev)
return -EINVAL;
}
+ dev->reg_cache = kzalloc(max(sizeof(lis3_wai8_regs),
+ sizeof(lis3_wai12_regs)), GFP_KERNEL);
+
+ if (dev->reg_cache == NULL) {
+ printk(KERN_ERR DRIVER_NAME "out of memory\n");
+ return -ENOMEM;
+ }
+
mutex_init(&dev->mutex);
+ atomic_set(&dev->wake_thread, 0);
lis3lv02d_add_fs(dev);
lis3lv02d_poweron(dev);
+ if (dev->pm_dev) {
+ pm_runtime_set_active(dev->pm_dev);
+ pm_runtime_enable(dev->pm_dev);
+ }
+
if (lis3lv02d_joystick_enable())
printk(KERN_ERR DRIVER_NAME ": joystick initialization failed\n");
@@ -696,8 +945,14 @@ int lis3lv02d_init_device(struct lis3lv02d *dev)
if (dev->whoami == WAI_8B)
lis3lv02d_8b_configure(dev, p);
+ irq_flags = p->irq_flags1 & IRQF_TRIGGER_MASK;
+
+ dev->irq_cfg = p->irq_cfg;
if (p->irq_cfg)
dev->write(dev, CTRL_REG3, p->irq_cfg);
+
+ if (p->default_rate)
+ lis3lv02d_set_odr(p->default_rate);
}
/* bail if we did not get an IRQ from the bus layer */
@@ -725,7 +980,8 @@ int lis3lv02d_init_device(struct lis3lv02d *dev)
err = request_threaded_irq(dev->irq, lis302dl_interrupt,
thread_fn,
- IRQF_TRIGGER_RISING | IRQF_ONESHOT,
+ IRQF_TRIGGER_RISING | IRQF_ONESHOT |
+ irq_flags,
DRIVER_NAME, &lis3_dev);
if (err < 0) {
diff --git a/drivers/hwmon/lis3lv02d.h b/drivers/hwmon/lis3lv02d.h
index 854091380e33..a1939589eb2c 100644
--- a/drivers/hwmon/lis3lv02d.h
+++ b/drivers/hwmon/lis3lv02d.h
@@ -20,6 +20,7 @@
*/
#include <linux/platform_device.h>
#include <linux/input-polldev.h>
+#include <linux/regulator/consumer.h>
/*
* This driver tries to support the "digital" accelerometer chips from
@@ -45,6 +46,7 @@ enum lis3_reg {
CTRL_REG1 = 0x20,
CTRL_REG2 = 0x21,
CTRL_REG3 = 0x22,
+ CTRL_REG4 = 0x23,
HP_FILTER_RESET = 0x23,
STATUS_REG = 0x27,
OUTX_L = 0x28,
@@ -93,6 +95,7 @@ enum lis3lv02d_reg {
};
enum lis3_who_am_i {
+ WAI_3DC = 0x33, /* 8 bits: LIS3DC, HP3DC */
WAI_12B = 0x3A, /* 12 bits: LIS3LV02D[LQ]... */
WAI_8B = 0x3B, /* 8 bits: LIS[23]02D[LQ]... */
WAI_6B = 0x52, /* 6 bits: LIS331DLF - not supported */
@@ -118,6 +121,13 @@ enum lis3lv02d_ctrl1_8b {
CTRL1_DR = 0x80,
};
+enum lis3lv02d_ctrl1_3dc {
+ CTRL1_ODR0 = 0x10,
+ CTRL1_ODR1 = 0x20,
+ CTRL1_ODR2 = 0x40,
+ CTRL1_ODR3 = 0x80,
+};
+
enum lis3lv02d_ctrl2 {
CTRL2_DAS = 0x01,
CTRL2_SIM = 0x02,
@@ -129,9 +139,18 @@ enum lis3lv02d_ctrl2 {
CTRL2_FS = 0x80, /* Full Scale selection */
};
+enum lis3lv02d_ctrl4_3dc {
+ CTRL4_SIM = 0x01,
+ CTRL4_ST0 = 0x02,
+ CTRL4_ST1 = 0x04,
+ CTRL4_FS0 = 0x10,
+ CTRL4_FS1 = 0x20,
+};
+
enum lis302d_ctrl2 {
HP_FF_WU2 = 0x08,
HP_FF_WU1 = 0x04,
+ CTRL2_BOOT_8B = 0x40,
};
enum lis3lv02d_ctrl3 {
@@ -206,19 +225,33 @@ enum lis3lv02d_click_src_8b {
CLICK_IA = 0x40,
};
-struct axis_conversion {
- s8 x;
- s8 y;
- s8 z;
+enum lis3lv02d_reg_state {
+ LIS3_REG_OFF = 0x00,
+ LIS3_REG_ON = 0x01,
+};
+
+union axis_conversion {
+ struct {
+ int x, y, z;
+ };
+ int as_array[3];
+
};
struct lis3lv02d {
void *bus_priv; /* used by the bus layer only */
+ struct device *pm_dev; /* for pm_runtime purposes */
int (*init) (struct lis3lv02d *lis3);
int (*write) (struct lis3lv02d *lis3, int reg, u8 val);
int (*read) (struct lis3lv02d *lis3, int reg, u8 *ret);
+ int (*blkread) (struct lis3lv02d *lis3, int reg, int len, u8 *ret);
+ int (*reg_ctrl) (struct lis3lv02d *lis3, bool state);
int *odrs; /* Supported output data rates */
+ u8 *regs; /* Regs to store / restore */
+ int regs_size;
+ u8 *reg_cache;
+ bool regs_stored;
u8 odr_mask; /* ODR bit mask */
u8 whoami; /* indicates measurement precision */
s16 (*read_data) (struct lis3lv02d *lis3, int reg);
@@ -231,14 +264,18 @@ struct lis3lv02d {
struct input_polled_dev *idev; /* input device */
struct platform_device *pdev; /* platform device */
+ struct regulator_bulk_data regulators[2];
atomic_t count; /* interrupt count after last read */
- struct axis_conversion ac; /* hw -> logical axis */
+ union axis_conversion ac; /* hw -> logical axis */
int mapped_btns[3];
u32 irq; /* IRQ number */
struct fasync_struct *async_queue; /* queue for the misc device */
wait_queue_head_t misc_wait; /* Wait queue for the misc device */
unsigned long misc_opened; /* bit0: whether the device is open */
+ int data_ready_count[2];
+ atomic_t wake_thread;
+ unsigned char irq_cfg;
struct lis3lv02d_platform_data *pdata; /* for passing board config */
struct mutex mutex; /* Serialize poll and selftest */
diff --git a/drivers/hwmon/lis3lv02d_i2c.c b/drivers/hwmon/lis3lv02d_i2c.c
index 8e5933b72d19..8853afce85ce 100644
--- a/drivers/hwmon/lis3lv02d_i2c.c
+++ b/drivers/hwmon/lis3lv02d_i2c.c
@@ -29,10 +29,30 @@
#include <linux/init.h>
#include <linux/err.h>
#include <linux/i2c.h>
+#include <linux/pm_runtime.h>
+#include <linux/delay.h>
#include "lis3lv02d.h"
#define DRV_NAME "lis3lv02d_i2c"
+static const char reg_vdd[] = "Vdd";
+static const char reg_vdd_io[] = "Vdd_IO";
+
+static int lis3_reg_ctrl(struct lis3lv02d *lis3, bool state)
+{
+ int ret;
+ if (state == LIS3_REG_OFF) {
+ ret = regulator_bulk_disable(ARRAY_SIZE(lis3->regulators),
+ lis3->regulators);
+ } else {
+ ret = regulator_bulk_enable(ARRAY_SIZE(lis3->regulators),
+ lis3->regulators);
+ /* Chip needs time to wakeup. Not mentioned in datasheet */
+ usleep_range(10000, 20000);
+ }
+ return ret;
+}
+
static inline s32 lis3_i2c_write(struct lis3lv02d *lis3, int reg, u8 value)
{
struct i2c_client *c = lis3->bus_priv;
@@ -46,24 +66,38 @@ static inline s32 lis3_i2c_read(struct lis3lv02d *lis3, int reg, u8 *v)
return 0;
}
+static inline s32 lis3_i2c_blockread(struct lis3lv02d *lis3, int reg, int len,
+ u8 *v)
+{
+ struct i2c_client *c = lis3->bus_priv;
+ reg |= (1 << 7); /* 7th bit enables address auto incrementation */
+ return i2c_smbus_read_i2c_block_data(c, reg, len, v);
+}
+
static int lis3_i2c_init(struct lis3lv02d *lis3)
{
u8 reg;
int ret;
+ if (lis3->reg_ctrl)
+ lis3_reg_ctrl(lis3, LIS3_REG_ON);
+
+ lis3->read(lis3, WHO_AM_I, &reg);
+ if (reg != lis3->whoami)
+ printk(KERN_ERR "lis3: power on failure\n");
+
/* power up the device */
ret = lis3->read(lis3, CTRL_REG1, &reg);
if (ret < 0)
return ret;
- reg |= CTRL1_PD0;
+ reg |= CTRL1_PD0 | CTRL1_Xen | CTRL1_Yen | CTRL1_Zen;
return lis3->write(lis3, CTRL_REG1, reg);
}
/* Default axis mapping but it can be overwritten by platform data */
-static struct axis_conversion lis3lv02d_axis_map = { LIS3_DEV_X,
- LIS3_DEV_Y,
- LIS3_DEV_Z };
+static union axis_conversion lis3lv02d_axis_map =
+ { .as_array = { LIS3_DEV_X, LIS3_DEV_Y, LIS3_DEV_Z } };
static int __devinit lis3lv02d_i2c_probe(struct i2c_client *client,
const struct i2c_device_id *id)
@@ -72,6 +106,15 @@ static int __devinit lis3lv02d_i2c_probe(struct i2c_client *client,
struct lis3lv02d_platform_data *pdata = client->dev.platform_data;
if (pdata) {
+ /* Regulator control is optional */
+ if (pdata->driver_features & LIS3_USE_REGULATOR_CTRL)
+ lis3_dev.reg_ctrl = lis3_reg_ctrl;
+
+ if ((pdata->driver_features & LIS3_USE_BLOCK_READ) &&
+ (i2c_check_functionality(client->adapter,
+ I2C_FUNC_SMBUS_I2C_BLOCK)))
+ lis3_dev.blkread = lis3_i2c_blockread;
+
if (pdata->axis_x)
lis3lv02d_axis_map.x = pdata->axis_x;
@@ -88,6 +131,16 @@ static int __devinit lis3lv02d_i2c_probe(struct i2c_client *client,
goto fail;
}
+ if (lis3_dev.reg_ctrl) {
+ lis3_dev.regulators[0].supply = reg_vdd;
+ lis3_dev.regulators[1].supply = reg_vdd_io;
+ ret = regulator_bulk_get(&client->dev,
+ ARRAY_SIZE(lis3_dev.regulators),
+ lis3_dev.regulators);
+ if (ret < 0)
+ goto fail;
+ }
+
lis3_dev.pdata = pdata;
lis3_dev.bus_priv = client;
lis3_dev.init = lis3_i2c_init;
@@ -95,10 +148,24 @@ static int __devinit lis3lv02d_i2c_probe(struct i2c_client *client,
lis3_dev.write = lis3_i2c_write;
lis3_dev.irq = client->irq;
lis3_dev.ac = lis3lv02d_axis_map;
+ lis3_dev.pm_dev = &client->dev;
i2c_set_clientdata(client, &lis3_dev);
+
+ /* Provide power over the init call */
+ if (lis3_dev.reg_ctrl)
+ lis3_reg_ctrl(&lis3_dev, LIS3_REG_ON);
+
ret = lis3lv02d_init_device(&lis3_dev);
+
+ if (lis3_dev.reg_ctrl)
+ lis3_reg_ctrl(&lis3_dev, LIS3_REG_OFF);
+
+ if (ret == 0)
+ return 0;
fail:
+ if (pdata && pdata->release_resources)
+ pdata->release_resources();
return ret;
}
@@ -111,14 +178,18 @@ static int __devexit lis3lv02d_i2c_remove(struct i2c_client *client)
pdata->release_resources();
lis3lv02d_joystick_disable();
- lis3lv02d_poweroff(lis3);
+ lis3lv02d_remove_fs(&lis3_dev);
- return lis3lv02d_remove_fs(&lis3_dev);
+ if (lis3_dev.reg_ctrl)
+ regulator_bulk_free(ARRAY_SIZE(lis3->regulators),
+ lis3_dev.regulators);
+ return 0;
}
-#ifdef CONFIG_PM
-static int lis3lv02d_i2c_suspend(struct i2c_client *client, pm_message_t mesg)
+#ifdef CONFIG_PM_SLEEP
+static int lis3lv02d_i2c_suspend(struct device *dev)
{
+ struct i2c_client *client = container_of(dev, struct i2c_client, dev);
struct lis3lv02d *lis3 = i2c_get_clientdata(client);
if (!lis3->pdata || !lis3->pdata->wakeup_flags)
@@ -126,24 +197,43 @@ static int lis3lv02d_i2c_suspend(struct i2c_client *client, pm_message_t mesg)
return 0;
}
-static int lis3lv02d_i2c_resume(struct i2c_client *client)
+static int lis3lv02d_i2c_resume(struct device *dev)
{
+ struct i2c_client *client = container_of(dev, struct i2c_client, dev);
struct lis3lv02d *lis3 = i2c_get_clientdata(client);
- if (!lis3->pdata || !lis3->pdata->wakeup_flags)
+ /*
+ * pm_runtime documentation says that devices should always
+ * be powered on at resume. Pm_runtime turns them off after system
+ * wide resume is complete.
+ */
+ if (!lis3->pdata || !lis3->pdata->wakeup_flags ||
+ pm_runtime_suspended(dev))
lis3lv02d_poweron(lis3);
+
+ return 0;
+}
+#endif /* CONFIG_PM_SLEEP */
+
+#ifdef CONFIG_PM_RUNTIME
+static int lis3_i2c_runtime_suspend(struct device *dev)
+{
+ struct i2c_client *client = container_of(dev, struct i2c_client, dev);
+ struct lis3lv02d *lis3 = i2c_get_clientdata(client);
+
+ lis3lv02d_poweroff(lis3);
return 0;
}
-static void lis3lv02d_i2c_shutdown(struct i2c_client *client)
+static int lis3_i2c_runtime_resume(struct device *dev)
{
- lis3lv02d_i2c_suspend(client, PMSG_SUSPEND);
+ struct i2c_client *client = container_of(dev, struct i2c_client, dev);
+ struct lis3lv02d *lis3 = i2c_get_clientdata(client);
+
+ lis3lv02d_poweron(lis3);
+ return 0;
}
-#else
-#define lis3lv02d_i2c_suspend NULL
-#define lis3lv02d_i2c_resume NULL
-#define lis3lv02d_i2c_shutdown NULL
-#endif
+#endif /* CONFIG_PM_RUNTIME */
static const struct i2c_device_id lis3lv02d_id[] = {
{"lis3lv02d", 0 },
@@ -152,14 +242,20 @@ static const struct i2c_device_id lis3lv02d_id[] = {
MODULE_DEVICE_TABLE(i2c, lis3lv02d_id);
+static const struct dev_pm_ops lis3_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(lis3lv02d_i2c_suspend,
+ lis3lv02d_i2c_resume)
+ SET_RUNTIME_PM_OPS(lis3_i2c_runtime_suspend,
+ lis3_i2c_runtime_resume,
+ NULL)
+};
+
static struct i2c_driver lis3lv02d_i2c_driver = {
.driver = {
.name = DRV_NAME,
.owner = THIS_MODULE,
+ .pm = &lis3_pm_ops,
},
- .suspend = lis3lv02d_i2c_suspend,
- .shutdown = lis3lv02d_i2c_shutdown,
- .resume = lis3lv02d_i2c_resume,
.probe = lis3lv02d_i2c_probe,
.remove = __devexit_p(lis3lv02d_i2c_remove),
.id_table = lis3lv02d_id,
diff --git a/drivers/hwmon/lis3lv02d_spi.c b/drivers/hwmon/lis3lv02d_spi.c
index b9be5e3a22b3..2549de1de4e2 100644
--- a/drivers/hwmon/lis3lv02d_spi.c
+++ b/drivers/hwmon/lis3lv02d_spi.c
@@ -50,11 +50,12 @@ static int lis3_spi_init(struct lis3lv02d *lis3)
if (ret < 0)
return ret;
- reg |= CTRL1_PD0;
+ reg |= CTRL1_PD0 | CTRL1_Xen | CTRL1_Yen | CTRL1_Zen;
return lis3->write(lis3, CTRL_REG1, reg);
}
-static struct axis_conversion lis3lv02d_axis_normal = { 1, 2, 3 };
+static union axis_conversion lis3lv02d_axis_normal =
+ { .as_array = { 1, 2, 3 } };
static int __devinit lis302dl_spi_probe(struct spi_device *spi)
{
diff --git a/drivers/hwmon/lm75.c b/drivers/hwmon/lm75.c
index ab5b87a81677..f36eb80d227f 100644
--- a/drivers/hwmon/lm75.c
+++ b/drivers/hwmon/lm75.c
@@ -1,22 +1,22 @@
/*
- lm75.c - Part of lm_sensors, Linux kernel modules for hardware
- monitoring
- Copyright (c) 1998, 1999 Frodo Looijaard <frodol@dds.nl>
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
-*/
+ * lm75.c - Part of lm_sensors, Linux kernel modules for hardware
+ * monitoring
+ * Copyright (c) 1998, 1999 Frodo Looijaard <frodol@dds.nl>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
#include <linux/module.h>
#include <linux/init.h>
@@ -103,7 +103,12 @@ static ssize_t set_temp(struct device *dev, struct device_attribute *da,
struct i2c_client *client = to_i2c_client(dev);
struct lm75_data *data = i2c_get_clientdata(client);
int nr = attr->index;
- long temp = simple_strtol(buf, NULL, 10);
+ long temp;
+ int error;
+
+ error = strict_strtol(buf, 10, &temp);
+ if (error)
+ return error;
mutex_lock(&data->update_lock);
data->temp[nr] = LM75_TEMP_TO_REG(temp);
@@ -335,9 +340,11 @@ static struct i2c_driver lm75_driver = {
/* register access */
-/* All registers are word-sized, except for the configuration register.
- LM75 uses a high-byte first convention, which is exactly opposite to
- the SMBus standard. */
+/*
+ * All registers are word-sized, except for the configuration register.
+ * LM75 uses a high-byte first convention, which is exactly opposite to
+ * the SMBus standard.
+ */
static int lm75_read_value(struct i2c_client *client, u8 reg)
{
int value;
diff --git a/drivers/hwmon/lm85.c b/drivers/hwmon/lm85.c
index b3841a615595..1e229847f37a 100644
--- a/drivers/hwmon/lm85.c
+++ b/drivers/hwmon/lm85.c
@@ -64,9 +64,12 @@ enum chips {
#define LM85_REG_VERSTEP 0x3f
#define ADT7468_REG_CFG5 0x7c
-#define ADT7468_OFF64 0x01
+#define ADT7468_OFF64 (1 << 0)
+#define ADT7468_HFPWM (1 << 1)
#define IS_ADT7468_OFF64(data) \
((data)->type == adt7468 && !((data)->cfg5 & ADT7468_OFF64))
+#define IS_ADT7468_HFPWM(data) \
+ ((data)->type == adt7468 && !((data)->cfg5 & ADT7468_HFPWM))
/* These are the recognized values for the above regs */
#define LM85_COMPANY_NATIONAL 0x01
@@ -567,8 +570,14 @@ static ssize_t show_pwm_freq(struct device *dev,
{
int nr = to_sensor_dev_attr(attr)->index;
struct lm85_data *data = lm85_update_device(dev);
- return sprintf(buf, "%d\n", FREQ_FROM_REG(data->freq_map,
- data->pwm_freq[nr]));
+ int freq;
+
+ if (IS_ADT7468_HFPWM(data))
+ freq = 22500;
+ else
+ freq = FREQ_FROM_REG(data->freq_map, data->pwm_freq[nr]);
+
+ return sprintf(buf, "%d\n", freq);
}
static ssize_t set_pwm_freq(struct device *dev,
@@ -580,10 +589,22 @@ static ssize_t set_pwm_freq(struct device *dev,
long val = simple_strtol(buf, NULL, 10);
mutex_lock(&data->update_lock);
- data->pwm_freq[nr] = FREQ_TO_REG(data->freq_map, val);
- lm85_write_value(client, LM85_REG_AFAN_RANGE(nr),
- (data->zone[nr].range << 4)
- | data->pwm_freq[nr]);
+ /* The ADT7468 has a special high-frequency PWM output mode,
+ * where all PWM outputs are driven by a 22.5 kHz clock.
+ * This might confuse the user, but there's not much we can do. */
+ if (data->type == adt7468 && val >= 11300) { /* High freq. mode */
+ data->cfg5 &= ~ADT7468_HFPWM;
+ lm85_write_value(client, ADT7468_REG_CFG5, data->cfg5);
+ } else { /* Low freq. mode */
+ data->pwm_freq[nr] = FREQ_TO_REG(data->freq_map, val);
+ lm85_write_value(client, LM85_REG_AFAN_RANGE(nr),
+ (data->zone[nr].range << 4)
+ | data->pwm_freq[nr]);
+ if (data->type == adt7468) {
+ data->cfg5 |= ADT7468_HFPWM;
+ lm85_write_value(client, ADT7468_REG_CFG5, data->cfg5);
+ }
+ }
mutex_unlock(&data->update_lock);
return count;
}
@@ -1259,6 +1280,7 @@ static int lm85_probe(struct i2c_client *client,
switch (data->type) {
case adm1027:
case adt7463:
+ case adt7468:
case emc6d100:
case emc6d102:
data->freq_map = adm1027_freq_map;
diff --git a/drivers/hwmon/lm90.c b/drivers/hwmon/lm90.c
index 760ef72eea56..812781c655a7 100644
--- a/drivers/hwmon/lm90.c
+++ b/drivers/hwmon/lm90.c
@@ -28,9 +28,11 @@
* This driver also supports the MAX6657, MAX6658 and MAX6659 sensor
* chips made by Maxim. These chips are similar to the LM86.
* Note that there is no easy way to differentiate between the three
- * variants. The extra address and features of the MAX6659 are not
- * supported by this driver. These chips lack the remote temperature
- * offset feature.
+ * variants. We use the device address to detect MAX6659, which will result
+ * in a detection as max6657 if it is on address 0x4c. The extra address
+ * and features of the MAX6659 are only supported if the chip is configured
+ * explicitly as max6659, or if its address is not 0x4c.
+ * These chips lack the remote temperature offset feature.
*
* This driver also supports the MAX6646, MAX6647, MAX6648, MAX6649 and
* MAX6692 chips made by Maxim. These are again similar to the LM86,
@@ -42,6 +44,11 @@
* chips. The MAX6680 and MAX6681 only differ in the pinout so they can
* be treated identically.
*
+ * This driver also supports the MAX6695 and MAX6696, two other sensor
+ * chips made by Maxim. These are also quite similar to other Maxim
+ * chips, but support three temperature sensors instead of two. MAX6695
+ * and MAX6696 only differ in the pinout so they can be treated identically.
+ *
* This driver also supports the ADT7461 chip from Analog Devices.
* It's supported in both compatibility and extended mode. It is mostly
* compatible with LM90 except for a data format difference for the
@@ -81,11 +88,11 @@
* Addresses to scan
* Address is fully defined internally and cannot be changed except for
* MAX6659, MAX6680 and MAX6681.
- * LM86, LM89, LM90, LM99, ADM1032, ADM1032-1, ADT7461, MAX6649, MAX6657
- * and MAX6658 have address 0x4c.
+ * LM86, LM89, LM90, LM99, ADM1032, ADM1032-1, ADT7461, MAX6649, MAX6657,
+ * MAX6658 and W83L771 have address 0x4c.
* ADM1032-2, ADT7461-2, LM89-1, LM99-1 and MAX6646 have address 0x4d.
* MAX6647 has address 0x4e.
- * MAX6659 can have address 0x4c, 0x4d or 0x4e (unsupported).
+ * MAX6659 can have address 0x4c, 0x4d or 0x4e.
* MAX6680 and MAX6681 can have address 0x18, 0x19, 0x1a, 0x29, 0x2a, 0x2b,
* 0x4c, 0x4d or 0x4e.
*/
@@ -93,8 +100,8 @@
static const unsigned short normal_i2c[] = {
0x18, 0x19, 0x1a, 0x29, 0x2a, 0x2b, 0x4c, 0x4d, 0x4e, I2C_CLIENT_END };
-enum chips { lm90, adm1032, lm99, lm86, max6657, adt7461, max6680, max6646,
- w83l771 };
+enum chips { lm90, adm1032, lm99, lm86, max6657, max6659, adt7461, max6680,
+ max6646, w83l771, max6696 };
/*
* The LM90 registers
@@ -135,26 +142,30 @@ enum chips { lm90, adm1032, lm99, lm86, max6657, adt7461, max6680, max6646,
#define LM90_REG_R_TCRIT_HYST 0x21
#define LM90_REG_W_TCRIT_HYST 0x21
-/* MAX6646/6647/6649/6657/6658/6659 registers */
+/* MAX6646/6647/6649/6657/6658/6659/6695/6696 registers */
#define MAX6657_REG_R_LOCAL_TEMPL 0x11
+#define MAX6696_REG_R_STATUS2 0x12
+#define MAX6659_REG_R_REMOTE_EMERG 0x16
+#define MAX6659_REG_W_REMOTE_EMERG 0x16
+#define MAX6659_REG_R_LOCAL_EMERG 0x17
+#define MAX6659_REG_W_LOCAL_EMERG 0x17
-/*
- * Device flags
- */
-#define LM90_FLAG_ADT7461_EXT 0x01 /* ADT7461 extended mode */
+#define LM90_DEF_CONVRATE_RVAL 6 /* Def conversion rate register value */
+#define LM90_MAX_CONVRATE_MS 16000 /* Maximum conversion rate in ms */
/*
- * Functions declaration
+ * Device flags
*/
-
-static int lm90_detect(struct i2c_client *client, struct i2c_board_info *info);
-static int lm90_probe(struct i2c_client *client,
- const struct i2c_device_id *id);
-static void lm90_init_client(struct i2c_client *client);
-static void lm90_alert(struct i2c_client *client, unsigned int flag);
-static int lm90_remove(struct i2c_client *client);
-static struct lm90_data *lm90_update_device(struct device *dev);
+#define LM90_FLAG_ADT7461_EXT (1 << 0) /* ADT7461 extended mode */
+/* Device features */
+#define LM90_HAVE_OFFSET (1 << 1) /* temperature offset register */
+#define LM90_HAVE_LOCAL_EXT (1 << 2) /* extended local temperature */
+#define LM90_HAVE_REM_LIMIT_EXT (1 << 3) /* extended remote limit */
+#define LM90_HAVE_EMERGENCY (1 << 4) /* 3rd upper (emergency) limit */
+#define LM90_HAVE_EMERGENCY_ALARM (1 << 5)/* emergency alarm */
+#define LM90_HAVE_TEMP3 (1 << 6) /* 3rd temperature sensor */
+#define LM90_HAVE_BROKEN_ALERT (1 << 7) /* Broken alert */
/*
* Driver data (common to all clients)
@@ -172,25 +183,85 @@ static const struct i2c_device_id lm90_id[] = {
{ "max6649", max6646 },
{ "max6657", max6657 },
{ "max6658", max6657 },
- { "max6659", max6657 },
+ { "max6659", max6659 },
{ "max6680", max6680 },
{ "max6681", max6680 },
+ { "max6695", max6696 },
+ { "max6696", max6696 },
{ "w83l771", w83l771 },
{ }
};
MODULE_DEVICE_TABLE(i2c, lm90_id);
-static struct i2c_driver lm90_driver = {
- .class = I2C_CLASS_HWMON,
- .driver = {
- .name = "lm90",
+/*
+ * chip type specific parameters
+ */
+struct lm90_params {
+ u32 flags; /* Capabilities */
+ u16 alert_alarms; /* Which alarm bits trigger ALERT# */
+ /* Upper 8 bits for max6695/96 */
+ u8 max_convrate; /* Maximum conversion rate register value */
+};
+
+static const struct lm90_params lm90_params[] = {
+ [adm1032] = {
+ .flags = LM90_HAVE_OFFSET | LM90_HAVE_REM_LIMIT_EXT
+ | LM90_HAVE_BROKEN_ALERT,
+ .alert_alarms = 0x7c,
+ .max_convrate = 10,
+ },
+ [adt7461] = {
+ .flags = LM90_HAVE_OFFSET | LM90_HAVE_REM_LIMIT_EXT
+ | LM90_HAVE_BROKEN_ALERT,
+ .alert_alarms = 0x7c,
+ .max_convrate = 10,
+ },
+ [lm86] = {
+ .flags = LM90_HAVE_OFFSET | LM90_HAVE_REM_LIMIT_EXT,
+ .alert_alarms = 0x7b,
+ .max_convrate = 9,
+ },
+ [lm90] = {
+ .flags = LM90_HAVE_OFFSET | LM90_HAVE_REM_LIMIT_EXT,
+ .alert_alarms = 0x7b,
+ .max_convrate = 9,
+ },
+ [lm99] = {
+ .flags = LM90_HAVE_OFFSET | LM90_HAVE_REM_LIMIT_EXT,
+ .alert_alarms = 0x7b,
+ .max_convrate = 9,
+ },
+ [max6646] = {
+ .flags = LM90_HAVE_LOCAL_EXT,
+ .alert_alarms = 0x7c,
+ .max_convrate = 6,
+ },
+ [max6657] = {
+ .flags = LM90_HAVE_LOCAL_EXT,
+ .alert_alarms = 0x7c,
+ .max_convrate = 8,
+ },
+ [max6659] = {
+ .flags = LM90_HAVE_LOCAL_EXT | LM90_HAVE_EMERGENCY,
+ .alert_alarms = 0x7c,
+ .max_convrate = 8,
+ },
+ [max6680] = {
+ .flags = LM90_HAVE_OFFSET,
+ .alert_alarms = 0x7c,
+ .max_convrate = 7,
+ },
+ [max6696] = {
+ .flags = LM90_HAVE_LOCAL_EXT | LM90_HAVE_EMERGENCY
+ | LM90_HAVE_EMERGENCY_ALARM | LM90_HAVE_TEMP3,
+ .alert_alarms = 0x187c,
+ .max_convrate = 6,
+ },
+ [w83l771] = {
+ .flags = LM90_HAVE_OFFSET | LM90_HAVE_REM_LIMIT_EXT,
+ .alert_alarms = 0x7c,
+ .max_convrate = 8,
},
- .probe = lm90_probe,
- .remove = lm90_remove,
- .alert = lm90_alert,
- .id_table = lm90_id,
- .detect = lm90_detect,
- .address_list = normal_i2c,
};
/*
@@ -203,26 +274,268 @@ struct lm90_data {
char valid; /* zero until following fields are valid */
unsigned long last_updated; /* in jiffies */
int kind;
- int flags;
+ u32 flags;
+
+ int update_interval; /* in milliseconds */
u8 config_orig; /* Original configuration register value */
- u8 alert_alarms; /* Which alarm bits trigger ALERT# */
+ u8 convrate_orig; /* Original conversion rate register value */
+ u16 alert_alarms; /* Which alarm bits trigger ALERT# */
+ /* Upper 8 bits for max6695/96 */
+ u8 max_convrate; /* Maximum conversion rate */
/* registers values */
- s8 temp8[4]; /* 0: local low limit
+ s8 temp8[8]; /* 0: local low limit
1: local high limit
2: local critical limit
- 3: remote critical limit */
- s16 temp11[5]; /* 0: remote input
+ 3: remote critical limit
+ 4: local emergency limit (max6659 and max6695/96)
+ 5: remote emergency limit (max6659 and max6695/96)
+ 6: remote 2 critical limit (max6695/96 only)
+ 7: remote 2 emergency limit (max6695/96 only) */
+ s16 temp11[8]; /* 0: remote input
1: remote low limit
2: remote high limit
- 3: remote offset (except max6646 and max6657)
- 4: local input */
+ 3: remote offset (except max6646, max6657/58/59,
+ and max6695/96)
+ 4: local input
+ 5: remote 2 input (max6695/96 only)
+ 6: remote 2 low limit (max6695/96 only)
+ 7: remote 2 high limit (ma6695/96 only) */
u8 temp_hyst;
- u8 alarms; /* bitvector */
+ u16 alarms; /* bitvector (upper 8 bits for max6695/96) */
};
/*
+ * Support functions
+ */
+
+/*
+ * The ADM1032 supports PEC but not on write byte transactions, so we need
+ * to explicitly ask for a transaction without PEC.
+ */
+static inline s32 adm1032_write_byte(struct i2c_client *client, u8 value)
+{
+ return i2c_smbus_xfer(client->adapter, client->addr,
+ client->flags & ~I2C_CLIENT_PEC,
+ I2C_SMBUS_WRITE, value, I2C_SMBUS_BYTE, NULL);
+}
+
+/*
+ * It is assumed that client->update_lock is held (unless we are in
+ * detection or initialization steps). This matters when PEC is enabled,
+ * because we don't want the address pointer to change between the write
+ * byte and the read byte transactions.
+ */
+static int lm90_read_reg(struct i2c_client *client, u8 reg, u8 *value)
+{
+ int err;
+
+ if (client->flags & I2C_CLIENT_PEC) {
+ err = adm1032_write_byte(client, reg);
+ if (err >= 0)
+ err = i2c_smbus_read_byte(client);
+ } else
+ err = i2c_smbus_read_byte_data(client, reg);
+
+ if (err < 0) {
+ dev_warn(&client->dev, "Register %#02x read failed (%d)\n",
+ reg, err);
+ return err;
+ }
+ *value = err;
+
+ return 0;
+}
+
+static int lm90_read16(struct i2c_client *client, u8 regh, u8 regl, u16 *value)
+{
+ int err;
+ u8 oldh, newh, l;
+
+ /*
+ * There is a trick here. We have to read two registers to have the
+ * sensor temperature, but we have to beware a conversion could occur
+ * inbetween the readings. The datasheet says we should either use
+ * the one-shot conversion register, which we don't want to do
+ * (disables hardware monitoring) or monitor the busy bit, which is
+ * impossible (we can't read the values and monitor that bit at the
+ * exact same time). So the solution used here is to read the high
+ * byte once, then the low byte, then the high byte again. If the new
+ * high byte matches the old one, then we have a valid reading. Else
+ * we have to read the low byte again, and now we believe we have a
+ * correct reading.
+ */
+ if ((err = lm90_read_reg(client, regh, &oldh))
+ || (err = lm90_read_reg(client, regl, &l))
+ || (err = lm90_read_reg(client, regh, &newh)))
+ return err;
+ if (oldh != newh) {
+ err = lm90_read_reg(client, regl, &l);
+ if (err)
+ return err;
+ }
+ *value = (newh << 8) | l;
+
+ return 0;
+}
+
+/*
+ * client->update_lock must be held when calling this function (unless we are
+ * in detection or initialization steps), and while a remote channel other
+ * than channel 0 is selected. Also, calling code must make sure to re-select
+ * external channel 0 before releasing the lock. This is necessary because
+ * various registers have different meanings as a result of selecting a
+ * non-default remote channel.
+ */
+static inline void lm90_select_remote_channel(struct i2c_client *client,
+ struct lm90_data *data,
+ int channel)
+{
+ u8 config;
+
+ if (data->kind == max6696) {
+ lm90_read_reg(client, LM90_REG_R_CONFIG1, &config);
+ config &= ~0x08;
+ if (channel)
+ config |= 0x08;
+ i2c_smbus_write_byte_data(client, LM90_REG_W_CONFIG1,
+ config);
+ }
+}
+
+/*
+ * Set conversion rate.
+ * client->update_lock must be held when calling this function (unless we are
+ * in detection or initialization steps).
+ */
+static void lm90_set_convrate(struct i2c_client *client, struct lm90_data *data,
+ unsigned int interval)
+{
+ int i;
+ unsigned int update_interval;
+
+ /* Shift calculations to avoid rounding errors */
+ interval <<= 6;
+
+ /* find the nearest update rate */
+ for (i = 0, update_interval = LM90_MAX_CONVRATE_MS << 6;
+ i < data->max_convrate; i++, update_interval >>= 1)
+ if (interval >= update_interval * 3 / 4)
+ break;
+
+ i2c_smbus_write_byte_data(client, LM90_REG_W_CONVRATE, i);
+ data->update_interval = DIV_ROUND_CLOSEST(update_interval, 64);
+}
+
+static struct lm90_data *lm90_update_device(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct lm90_data *data = i2c_get_clientdata(client);
+ unsigned long next_update;
+
+ mutex_lock(&data->update_lock);
+
+ next_update = data->last_updated
+ + msecs_to_jiffies(data->update_interval) + 1;
+ if (time_after(jiffies, next_update) || !data->valid) {
+ u8 h, l;
+ u8 alarms;
+
+ dev_dbg(&client->dev, "Updating lm90 data.\n");
+ lm90_read_reg(client, LM90_REG_R_LOCAL_LOW, &data->temp8[0]);
+ lm90_read_reg(client, LM90_REG_R_LOCAL_HIGH, &data->temp8[1]);
+ lm90_read_reg(client, LM90_REG_R_LOCAL_CRIT, &data->temp8[2]);
+ lm90_read_reg(client, LM90_REG_R_REMOTE_CRIT, &data->temp8[3]);
+ lm90_read_reg(client, LM90_REG_R_TCRIT_HYST, &data->temp_hyst);
+
+ if (data->flags & LM90_HAVE_LOCAL_EXT) {
+ lm90_read16(client, LM90_REG_R_LOCAL_TEMP,
+ MAX6657_REG_R_LOCAL_TEMPL,
+ &data->temp11[4]);
+ } else {
+ if (lm90_read_reg(client, LM90_REG_R_LOCAL_TEMP,
+ &h) == 0)
+ data->temp11[4] = h << 8;
+ }
+ lm90_read16(client, LM90_REG_R_REMOTE_TEMPH,
+ LM90_REG_R_REMOTE_TEMPL, &data->temp11[0]);
+
+ if (lm90_read_reg(client, LM90_REG_R_REMOTE_LOWH, &h) == 0) {
+ data->temp11[1] = h << 8;
+ if ((data->flags & LM90_HAVE_REM_LIMIT_EXT)
+ && lm90_read_reg(client, LM90_REG_R_REMOTE_LOWL,
+ &l) == 0)
+ data->temp11[1] |= l;
+ }
+ if (lm90_read_reg(client, LM90_REG_R_REMOTE_HIGHH, &h) == 0) {
+ data->temp11[2] = h << 8;
+ if ((data->flags & LM90_HAVE_REM_LIMIT_EXT)
+ && lm90_read_reg(client, LM90_REG_R_REMOTE_HIGHL,
+ &l) == 0)
+ data->temp11[2] |= l;
+ }
+
+ if (data->flags & LM90_HAVE_OFFSET) {
+ if (lm90_read_reg(client, LM90_REG_R_REMOTE_OFFSH,
+ &h) == 0
+ && lm90_read_reg(client, LM90_REG_R_REMOTE_OFFSL,
+ &l) == 0)
+ data->temp11[3] = (h << 8) | l;
+ }
+ if (data->flags & LM90_HAVE_EMERGENCY) {
+ lm90_read_reg(client, MAX6659_REG_R_LOCAL_EMERG,
+ &data->temp8[4]);
+ lm90_read_reg(client, MAX6659_REG_R_REMOTE_EMERG,
+ &data->temp8[5]);
+ }
+ lm90_read_reg(client, LM90_REG_R_STATUS, &alarms);
+ data->alarms = alarms; /* save as 16 bit value */
+
+ if (data->kind == max6696) {
+ lm90_select_remote_channel(client, data, 1);
+ lm90_read_reg(client, LM90_REG_R_REMOTE_CRIT,
+ &data->temp8[6]);
+ lm90_read_reg(client, MAX6659_REG_R_REMOTE_EMERG,
+ &data->temp8[7]);
+ lm90_read16(client, LM90_REG_R_REMOTE_TEMPH,
+ LM90_REG_R_REMOTE_TEMPL, &data->temp11[5]);
+ if (!lm90_read_reg(client, LM90_REG_R_REMOTE_LOWH, &h))
+ data->temp11[6] = h << 8;
+ if (!lm90_read_reg(client, LM90_REG_R_REMOTE_HIGHH, &h))
+ data->temp11[7] = h << 8;
+ lm90_select_remote_channel(client, data, 0);
+
+ if (!lm90_read_reg(client, MAX6696_REG_R_STATUS2,
+ &alarms))
+ data->alarms |= alarms << 8;
+ }
+
+ /* Re-enable ALERT# output if it was originally enabled and
+ * relevant alarms are all clear */
+ if ((data->config_orig & 0x80) == 0
+ && (data->alarms & data->alert_alarms) == 0) {
+ u8 config;
+
+ lm90_read_reg(client, LM90_REG_R_CONFIG1, &config);
+ if (config & 0x80) {
+ dev_dbg(&client->dev, "Re-enabling ALERT#\n");
+ i2c_smbus_write_byte_data(client,
+ LM90_REG_W_CONFIG1,
+ config & ~0x80);
+ }
+ }
+
+ data->last_updated = jiffies;
+ data->valid = 1;
+ }
+
+ mutex_unlock(&data->update_lock);
+
+ return data;
+}
+
+/*
* Conversions
* For local temperatures and limits, critical limits and the hysteresis
* value, the LM90 uses signed 8-bit values with LSB = 1 degree Celsius.
@@ -377,18 +690,27 @@ static ssize_t show_temp8(struct device *dev, struct device_attribute *devattr,
static ssize_t set_temp8(struct device *dev, struct device_attribute *devattr,
const char *buf, size_t count)
{
- static const u8 reg[4] = {
+ static const u8 reg[8] = {
LM90_REG_W_LOCAL_LOW,
LM90_REG_W_LOCAL_HIGH,
LM90_REG_W_LOCAL_CRIT,
LM90_REG_W_REMOTE_CRIT,
+ MAX6659_REG_W_LOCAL_EMERG,
+ MAX6659_REG_W_REMOTE_EMERG,
+ LM90_REG_W_REMOTE_CRIT,
+ MAX6659_REG_W_REMOTE_EMERG,
};
struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
struct i2c_client *client = to_i2c_client(dev);
struct lm90_data *data = i2c_get_clientdata(client);
- long val = simple_strtol(buf, NULL, 10);
int nr = attr->index;
+ long val;
+ int err;
+
+ err = strict_strtol(buf, 10, &val);
+ if (err < 0)
+ return err;
/* +16 degrees offset for temp2 for the LM99 */
if (data->kind == lm99 && attr->index == 3)
@@ -401,7 +723,11 @@ static ssize_t set_temp8(struct device *dev, struct device_attribute *devattr,
data->temp8[nr] = temp_to_u8(val);
else
data->temp8[nr] = temp_to_s8(val);
+
+ lm90_select_remote_channel(client, data, nr >= 6);
i2c_smbus_write_byte_data(client, reg[nr], data->temp8[nr]);
+ lm90_select_remote_channel(client, data, 0);
+
mutex_unlock(&data->update_lock);
return count;
}
@@ -409,7 +735,7 @@ static ssize_t set_temp8(struct device *dev, struct device_attribute *devattr,
static ssize_t show_temp11(struct device *dev, struct device_attribute *devattr,
char *buf)
{
- struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
+ struct sensor_device_attribute_2 *attr = to_sensor_dev_attr_2(devattr);
struct lm90_data *data = lm90_update_device(dev);
int temp;
@@ -430,46 +756,58 @@ static ssize_t show_temp11(struct device *dev, struct device_attribute *devattr,
static ssize_t set_temp11(struct device *dev, struct device_attribute *devattr,
const char *buf, size_t count)
{
- static const u8 reg[6] = {
- LM90_REG_W_REMOTE_LOWH,
- LM90_REG_W_REMOTE_LOWL,
- LM90_REG_W_REMOTE_HIGHH,
- LM90_REG_W_REMOTE_HIGHL,
- LM90_REG_W_REMOTE_OFFSH,
- LM90_REG_W_REMOTE_OFFSL,
+ struct {
+ u8 high;
+ u8 low;
+ int channel;
+ } reg[5] = {
+ { LM90_REG_W_REMOTE_LOWH, LM90_REG_W_REMOTE_LOWL, 0 },
+ { LM90_REG_W_REMOTE_HIGHH, LM90_REG_W_REMOTE_HIGHL, 0 },
+ { LM90_REG_W_REMOTE_OFFSH, LM90_REG_W_REMOTE_OFFSL, 0 },
+ { LM90_REG_W_REMOTE_LOWH, LM90_REG_W_REMOTE_LOWL, 1 },
+ { LM90_REG_W_REMOTE_HIGHH, LM90_REG_W_REMOTE_HIGHL, 1 }
};
- struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
+ struct sensor_device_attribute_2 *attr = to_sensor_dev_attr_2(devattr);
struct i2c_client *client = to_i2c_client(dev);
struct lm90_data *data = i2c_get_clientdata(client);
- long val = simple_strtol(buf, NULL, 10);
- int nr = attr->index;
+ int nr = attr->nr;
+ int index = attr->index;
+ long val;
+ int err;
+
+ err = strict_strtol(buf, 10, &val);
+ if (err < 0)
+ return err;
/* +16 degrees offset for temp2 for the LM99 */
- if (data->kind == lm99 && attr->index <= 2)
+ if (data->kind == lm99 && index <= 2)
val -= 16000;
mutex_lock(&data->update_lock);
if (data->kind == adt7461)
- data->temp11[nr] = temp_to_u16_adt7461(data, val);
- else if (data->kind == max6657 || data->kind == max6680)
- data->temp11[nr] = temp_to_s8(val) << 8;
+ data->temp11[index] = temp_to_u16_adt7461(data, val);
else if (data->kind == max6646)
- data->temp11[nr] = temp_to_u8(val) << 8;
+ data->temp11[index] = temp_to_u8(val) << 8;
+ else if (data->flags & LM90_HAVE_REM_LIMIT_EXT)
+ data->temp11[index] = temp_to_s16(val);
else
- data->temp11[nr] = temp_to_s16(val);
-
- i2c_smbus_write_byte_data(client, reg[(nr - 1) * 2],
- data->temp11[nr] >> 8);
- if (data->kind != max6657 && data->kind != max6680
- && data->kind != max6646)
- i2c_smbus_write_byte_data(client, reg[(nr - 1) * 2 + 1],
- data->temp11[nr] & 0xff);
+ data->temp11[index] = temp_to_s8(val) << 8;
+
+ lm90_select_remote_channel(client, data, reg[nr].channel);
+ i2c_smbus_write_byte_data(client, reg[nr].high,
+ data->temp11[index] >> 8);
+ if (data->flags & LM90_HAVE_REM_LIMIT_EXT)
+ i2c_smbus_write_byte_data(client, reg[nr].low,
+ data->temp11[index] & 0xff);
+ lm90_select_remote_channel(client, data, 0);
+
mutex_unlock(&data->update_lock);
return count;
}
-static ssize_t show_temphyst(struct device *dev, struct device_attribute *devattr,
+static ssize_t show_temphyst(struct device *dev,
+ struct device_attribute *devattr,
char *buf)
{
struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
@@ -495,9 +833,14 @@ static ssize_t set_temphyst(struct device *dev, struct device_attribute *dummy,
{
struct i2c_client *client = to_i2c_client(dev);
struct lm90_data *data = i2c_get_clientdata(client);
- long val = simple_strtol(buf, NULL, 10);
+ long val;
+ int err;
int temp;
+ err = strict_strtol(buf, 10, &val);
+ if (err < 0)
+ return err;
+
mutex_lock(&data->update_lock);
if (data->kind == adt7461)
temp = temp_from_u8_adt7461(data, data->temp8[2]);
@@ -530,16 +873,44 @@ static ssize_t show_alarm(struct device *dev, struct device_attribute
return sprintf(buf, "%d\n", (data->alarms >> bitnr) & 1);
}
-static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, show_temp11, NULL, 4);
-static SENSOR_DEVICE_ATTR(temp2_input, S_IRUGO, show_temp11, NULL, 0);
+static ssize_t show_update_interval(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct lm90_data *data = dev_get_drvdata(dev);
+
+ return sprintf(buf, "%u\n", data->update_interval);
+}
+
+static ssize_t set_update_interval(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct lm90_data *data = i2c_get_clientdata(client);
+ unsigned long val;
+ int err;
+
+ err = strict_strtoul(buf, 10, &val);
+ if (err)
+ return err;
+
+ mutex_lock(&data->update_lock);
+ lm90_set_convrate(client, data, val);
+ mutex_unlock(&data->update_lock);
+
+ return count;
+}
+
+static SENSOR_DEVICE_ATTR_2(temp1_input, S_IRUGO, show_temp11, NULL, 0, 4);
+static SENSOR_DEVICE_ATTR_2(temp2_input, S_IRUGO, show_temp11, NULL, 0, 0);
static SENSOR_DEVICE_ATTR(temp1_min, S_IWUSR | S_IRUGO, show_temp8,
set_temp8, 0);
-static SENSOR_DEVICE_ATTR(temp2_min, S_IWUSR | S_IRUGO, show_temp11,
- set_temp11, 1);
+static SENSOR_DEVICE_ATTR_2(temp2_min, S_IWUSR | S_IRUGO, show_temp11,
+ set_temp11, 0, 1);
static SENSOR_DEVICE_ATTR(temp1_max, S_IWUSR | S_IRUGO, show_temp8,
set_temp8, 1);
-static SENSOR_DEVICE_ATTR(temp2_max, S_IWUSR | S_IRUGO, show_temp11,
- set_temp11, 2);
+static SENSOR_DEVICE_ATTR_2(temp2_max, S_IWUSR | S_IRUGO, show_temp11,
+ set_temp11, 1, 2);
static SENSOR_DEVICE_ATTR(temp1_crit, S_IWUSR | S_IRUGO, show_temp8,
set_temp8, 2);
static SENSOR_DEVICE_ATTR(temp2_crit, S_IWUSR | S_IRUGO, show_temp8,
@@ -547,8 +918,8 @@ static SENSOR_DEVICE_ATTR(temp2_crit, S_IWUSR | S_IRUGO, show_temp8,
static SENSOR_DEVICE_ATTR(temp1_crit_hyst, S_IWUSR | S_IRUGO, show_temphyst,
set_temphyst, 2);
static SENSOR_DEVICE_ATTR(temp2_crit_hyst, S_IRUGO, show_temphyst, NULL, 3);
-static SENSOR_DEVICE_ATTR(temp2_offset, S_IWUSR | S_IRUGO, show_temp11,
- set_temp11, 3);
+static SENSOR_DEVICE_ATTR_2(temp2_offset, S_IWUSR | S_IRUGO, show_temp11,
+ set_temp11, 2, 3);
/* Individual alarm files */
static SENSOR_DEVICE_ATTR(temp1_crit_alarm, S_IRUGO, show_alarm, NULL, 0);
@@ -561,6 +932,9 @@ static SENSOR_DEVICE_ATTR(temp1_max_alarm, S_IRUGO, show_alarm, NULL, 6);
/* Raw alarm file for compatibility */
static DEVICE_ATTR(alarms, S_IRUGO, show_alarms, NULL);
+static DEVICE_ATTR(update_interval, S_IRUGO | S_IWUSR, show_update_interval,
+ set_update_interval);
+
static struct attribute *lm90_attributes[] = {
&sensor_dev_attr_temp1_input.dev_attr.attr,
&sensor_dev_attr_temp2_input.dev_attr.attr,
@@ -581,6 +955,7 @@ static struct attribute *lm90_attributes[] = {
&sensor_dev_attr_temp1_min_alarm.dev_attr.attr,
&sensor_dev_attr_temp1_max_alarm.dev_attr.attr,
&dev_attr_alarms.attr,
+ &dev_attr_update_interval.attr,
NULL
};
@@ -588,6 +963,86 @@ static const struct attribute_group lm90_group = {
.attrs = lm90_attributes,
};
+/*
+ * Additional attributes for devices with emergency sensors
+ */
+static SENSOR_DEVICE_ATTR(temp1_emergency, S_IWUSR | S_IRUGO, show_temp8,
+ set_temp8, 4);
+static SENSOR_DEVICE_ATTR(temp2_emergency, S_IWUSR | S_IRUGO, show_temp8,
+ set_temp8, 5);
+static SENSOR_DEVICE_ATTR(temp1_emergency_hyst, S_IRUGO, show_temphyst,
+ NULL, 4);
+static SENSOR_DEVICE_ATTR(temp2_emergency_hyst, S_IRUGO, show_temphyst,
+ NULL, 5);
+
+static struct attribute *lm90_emergency_attributes[] = {
+ &sensor_dev_attr_temp1_emergency.dev_attr.attr,
+ &sensor_dev_attr_temp2_emergency.dev_attr.attr,
+ &sensor_dev_attr_temp1_emergency_hyst.dev_attr.attr,
+ &sensor_dev_attr_temp2_emergency_hyst.dev_attr.attr,
+ NULL
+};
+
+static const struct attribute_group lm90_emergency_group = {
+ .attrs = lm90_emergency_attributes,
+};
+
+static SENSOR_DEVICE_ATTR(temp1_emergency_alarm, S_IRUGO, show_alarm, NULL, 15);
+static SENSOR_DEVICE_ATTR(temp2_emergency_alarm, S_IRUGO, show_alarm, NULL, 13);
+
+static struct attribute *lm90_emergency_alarm_attributes[] = {
+ &sensor_dev_attr_temp1_emergency_alarm.dev_attr.attr,
+ &sensor_dev_attr_temp2_emergency_alarm.dev_attr.attr,
+ NULL
+};
+
+static const struct attribute_group lm90_emergency_alarm_group = {
+ .attrs = lm90_emergency_alarm_attributes,
+};
+
+/*
+ * Additional attributes for devices with 3 temperature sensors
+ */
+static SENSOR_DEVICE_ATTR_2(temp3_input, S_IRUGO, show_temp11, NULL, 0, 5);
+static SENSOR_DEVICE_ATTR_2(temp3_min, S_IWUSR | S_IRUGO, show_temp11,
+ set_temp11, 3, 6);
+static SENSOR_DEVICE_ATTR_2(temp3_max, S_IWUSR | S_IRUGO, show_temp11,
+ set_temp11, 4, 7);
+static SENSOR_DEVICE_ATTR(temp3_crit, S_IWUSR | S_IRUGO, show_temp8,
+ set_temp8, 6);
+static SENSOR_DEVICE_ATTR(temp3_crit_hyst, S_IRUGO, show_temphyst, NULL, 6);
+static SENSOR_DEVICE_ATTR(temp3_emergency, S_IWUSR | S_IRUGO, show_temp8,
+ set_temp8, 7);
+static SENSOR_DEVICE_ATTR(temp3_emergency_hyst, S_IRUGO, show_temphyst,
+ NULL, 7);
+
+static SENSOR_DEVICE_ATTR(temp3_crit_alarm, S_IRUGO, show_alarm, NULL, 9);
+static SENSOR_DEVICE_ATTR(temp3_fault, S_IRUGO, show_alarm, NULL, 10);
+static SENSOR_DEVICE_ATTR(temp3_min_alarm, S_IRUGO, show_alarm, NULL, 11);
+static SENSOR_DEVICE_ATTR(temp3_max_alarm, S_IRUGO, show_alarm, NULL, 12);
+static SENSOR_DEVICE_ATTR(temp3_emergency_alarm, S_IRUGO, show_alarm, NULL, 14);
+
+static struct attribute *lm90_temp3_attributes[] = {
+ &sensor_dev_attr_temp3_input.dev_attr.attr,
+ &sensor_dev_attr_temp3_min.dev_attr.attr,
+ &sensor_dev_attr_temp3_max.dev_attr.attr,
+ &sensor_dev_attr_temp3_crit.dev_attr.attr,
+ &sensor_dev_attr_temp3_crit_hyst.dev_attr.attr,
+ &sensor_dev_attr_temp3_emergency.dev_attr.attr,
+ &sensor_dev_attr_temp3_emergency_hyst.dev_attr.attr,
+
+ &sensor_dev_attr_temp3_fault.dev_attr.attr,
+ &sensor_dev_attr_temp3_min_alarm.dev_attr.attr,
+ &sensor_dev_attr_temp3_max_alarm.dev_attr.attr,
+ &sensor_dev_attr_temp3_crit_alarm.dev_attr.attr,
+ &sensor_dev_attr_temp3_emergency_alarm.dev_attr.attr,
+ NULL
+};
+
+static const struct attribute_group lm90_temp3_group = {
+ .attrs = lm90_temp3_attributes,
+};
+
/* pec used for ADM1032 only */
static ssize_t show_pec(struct device *dev, struct device_attribute *dummy,
char *buf)
@@ -600,7 +1055,12 @@ static ssize_t set_pec(struct device *dev, struct device_attribute *dummy,
const char *buf, size_t count)
{
struct i2c_client *client = to_i2c_client(dev);
- long val = simple_strtol(buf, NULL, 10);
+ long val;
+ int err;
+
+ err = strict_strtol(buf, 10, &val);
+ if (err < 0)
+ return err;
switch (val) {
case 0:
@@ -622,40 +1082,6 @@ static DEVICE_ATTR(pec, S_IWUSR | S_IRUGO, show_pec, set_pec);
* Real code
*/
-/* The ADM1032 supports PEC but not on write byte transactions, so we need
- to explicitly ask for a transaction without PEC. */
-static inline s32 adm1032_write_byte(struct i2c_client *client, u8 value)
-{
- return i2c_smbus_xfer(client->adapter, client->addr,
- client->flags & ~I2C_CLIENT_PEC,
- I2C_SMBUS_WRITE, value, I2C_SMBUS_BYTE, NULL);
-}
-
-/* It is assumed that client->update_lock is held (unless we are in
- detection or initialization steps). This matters when PEC is enabled,
- because we don't want the address pointer to change between the write
- byte and the read byte transactions. */
-static int lm90_read_reg(struct i2c_client* client, u8 reg, u8 *value)
-{
- int err;
-
- if (client->flags & I2C_CLIENT_PEC) {
- err = adm1032_write_byte(client, reg);
- if (err >= 0)
- err = i2c_smbus_read_byte(client);
- } else
- err = i2c_smbus_read_byte_data(client, reg);
-
- if (err < 0) {
- dev_warn(&client->dev, "Register %#02x read failed (%d)\n",
- reg, err);
- return err;
- }
- *value = err;
-
- return 0;
-}
-
/* Return 0 if detection is successful, -ENODEV otherwise */
static int lm90_detect(struct i2c_client *new_client,
struct i2c_board_info *info)
@@ -730,6 +1156,23 @@ static int lm90_detect(struct i2c_client *new_client,
}
} else
if (man_id == 0x4D) { /* Maxim */
+ int reg_emerg, reg_emerg2, reg_status2;
+
+ /*
+ * We read MAX6659_REG_R_REMOTE_EMERG twice, and re-read
+ * LM90_REG_R_MAN_ID in between. If MAX6659_REG_R_REMOTE_EMERG
+ * exists, both readings will reflect the same value. Otherwise,
+ * the readings will be different.
+ */
+ if ((reg_emerg = i2c_smbus_read_byte_data(new_client,
+ MAX6659_REG_R_REMOTE_EMERG)) < 0
+ || i2c_smbus_read_byte_data(new_client, LM90_REG_R_MAN_ID) < 0
+ || (reg_emerg2 = i2c_smbus_read_byte_data(new_client,
+ MAX6659_REG_R_REMOTE_EMERG)) < 0
+ || (reg_status2 = i2c_smbus_read_byte_data(new_client,
+ MAX6696_REG_R_STATUS2)) < 0)
+ return -ENODEV;
+
/*
* The MAX6657, MAX6658 and MAX6659 do NOT have a chip_id
* register. Reading from that address will return the last
@@ -737,12 +1180,38 @@ static int lm90_detect(struct i2c_client *new_client,
* register. Likewise, the config1 register seems to lack a
* low nibble, so the value will be those of the previous
* read, so in our case those of the man_id register.
+ * MAX6659 has a third set of upper temperature limit registers.
+ * Those registers also return values on MAX6657 and MAX6658,
+ * thus the only way to detect MAX6659 is by its address.
+ * For this reason it will be mis-detected as MAX6657 if its
+ * address is 0x4C.
*/
if (chip_id == man_id
- && (address == 0x4C || address == 0x4D)
+ && (address == 0x4C || address == 0x4D || address == 0x4E)
&& (reg_config1 & 0x1F) == (man_id & 0x0F)
&& reg_convrate <= 0x09) {
- name = "max6657";
+ if (address == 0x4C)
+ name = "max6657";
+ else
+ name = "max6659";
+ } else
+ /*
+ * Even though MAX6695 and MAX6696 do not have a chip ID
+ * register, reading it returns 0x01. Bit 4 of the config1
+ * register is unused and should return zero when read. Bit 0 of
+ * the status2 register is unused and should return zero when
+ * read.
+ *
+ * MAX6695 and MAX6696 have an additional set of temperature
+ * limit registers. We can detect those chips by checking if
+ * one of those registers exists.
+ */
+ if (chip_id == 0x01
+ && (reg_config1 & 0x10) == 0x00
+ && (reg_status2 & 0x01) == 0x00
+ && reg_emerg == reg_emerg2
+ && reg_convrate <= 0x07) {
+ name = "max6696";
} else
/*
* The chip_id register of the MAX6680 and MAX6681 holds the
@@ -768,10 +1237,23 @@ static int lm90_detect(struct i2c_client *new_client,
} else
if (address == 0x4C
&& man_id == 0x5C) { /* Winbond/Nuvoton */
- if ((chip_id & 0xFE) == 0x10 /* W83L771AWG/ASG */
- && (reg_config1 & 0x2A) == 0x00
- && reg_convrate <= 0x08) {
- name = "w83l771";
+ int reg_config2;
+
+ reg_config2 = i2c_smbus_read_byte_data(new_client,
+ LM90_REG_R_CONFIG2);
+ if (reg_config2 < 0)
+ return -ENODEV;
+
+ if ((reg_config1 & 0x2A) == 0x00
+ && (reg_config2 & 0xF8) == 0x00) {
+ if (chip_id == 0x01 /* W83L771W/G */
+ && reg_convrate <= 0x09) {
+ name = "w83l771";
+ } else
+ if ((chip_id & 0xFE) == 0x10 /* W83L771AWG/ASG */
+ && reg_convrate <= 0x08) {
+ name = "w83l771";
+ }
}
}
@@ -787,6 +1269,69 @@ static int lm90_detect(struct i2c_client *new_client,
return 0;
}
+static void lm90_remove_files(struct i2c_client *client, struct lm90_data *data)
+{
+ if (data->flags & LM90_HAVE_TEMP3)
+ sysfs_remove_group(&client->dev.kobj, &lm90_temp3_group);
+ if (data->flags & LM90_HAVE_EMERGENCY_ALARM)
+ sysfs_remove_group(&client->dev.kobj,
+ &lm90_emergency_alarm_group);
+ if (data->flags & LM90_HAVE_EMERGENCY)
+ sysfs_remove_group(&client->dev.kobj,
+ &lm90_emergency_group);
+ if (data->flags & LM90_HAVE_OFFSET)
+ device_remove_file(&client->dev,
+ &sensor_dev_attr_temp2_offset.dev_attr);
+ device_remove_file(&client->dev, &dev_attr_pec);
+ sysfs_remove_group(&client->dev.kobj, &lm90_group);
+}
+
+static void lm90_init_client(struct i2c_client *client)
+{
+ u8 config, convrate;
+ struct lm90_data *data = i2c_get_clientdata(client);
+
+ if (lm90_read_reg(client, LM90_REG_R_CONVRATE, &convrate) < 0) {
+ dev_warn(&client->dev, "Failed to read convrate register!\n");
+ convrate = LM90_DEF_CONVRATE_RVAL;
+ }
+ data->convrate_orig = convrate;
+
+ /*
+ * Start the conversions.
+ */
+ lm90_set_convrate(client, data, 500); /* 500ms; 2Hz conversion rate */
+ if (lm90_read_reg(client, LM90_REG_R_CONFIG1, &config) < 0) {
+ dev_warn(&client->dev, "Initialization failed!\n");
+ return;
+ }
+ data->config_orig = config;
+
+ /* Check Temperature Range Select */
+ if (data->kind == adt7461) {
+ if (config & 0x04)
+ data->flags |= LM90_FLAG_ADT7461_EXT;
+ }
+
+ /*
+ * Put MAX6680/MAX8881 into extended resolution (bit 0x10,
+ * 0.125 degree resolution) and range (0x08, extend range
+ * to -64 degree) mode for the remote temperature sensor.
+ */
+ if (data->kind == max6680)
+ config |= 0x18;
+
+ /*
+ * Select external channel 0 for max6695/96
+ */
+ if (data->kind == max6696)
+ config &= ~0x08;
+
+ config &= 0xBF; /* run */
+ if (config != data->config_orig) /* Only write if changed */
+ i2c_smbus_write_byte_data(client, LM90_REG_W_CONFIG1, config);
+}
+
static int lm90_probe(struct i2c_client *new_client,
const struct i2c_device_id *id)
{
@@ -811,31 +1356,48 @@ static int lm90_probe(struct i2c_client *new_client,
/* Different devices have different alarm bits triggering the
* ALERT# output */
- switch (data->kind) {
- case lm90:
- case lm99:
- case lm86:
- data->alert_alarms = 0x7b;
- break;
- default:
- data->alert_alarms = 0x7c;
- break;
- }
+ data->alert_alarms = lm90_params[data->kind].alert_alarms;
+
+ /* Set chip capabilities */
+ data->flags = lm90_params[data->kind].flags;
+
+ /* Set maximum conversion rate */
+ data->max_convrate = lm90_params[data->kind].max_convrate;
/* Initialize the LM90 chip */
lm90_init_client(new_client);
/* Register sysfs hooks */
- if ((err = sysfs_create_group(&new_client->dev.kobj, &lm90_group)))
+ err = sysfs_create_group(&new_client->dev.kobj, &lm90_group);
+ if (err)
goto exit_free;
if (new_client->flags & I2C_CLIENT_PEC) {
- if ((err = device_create_file(&new_client->dev,
- &dev_attr_pec)))
+ err = device_create_file(&new_client->dev, &dev_attr_pec);
+ if (err)
+ goto exit_remove_files;
+ }
+ if (data->flags & LM90_HAVE_OFFSET) {
+ err = device_create_file(&new_client->dev,
+ &sensor_dev_attr_temp2_offset.dev_attr);
+ if (err)
goto exit_remove_files;
}
- if (data->kind != max6657 && data->kind != max6646) {
- if ((err = device_create_file(&new_client->dev,
- &sensor_dev_attr_temp2_offset.dev_attr)))
+ if (data->flags & LM90_HAVE_EMERGENCY) {
+ err = sysfs_create_group(&new_client->dev.kobj,
+ &lm90_emergency_group);
+ if (err)
+ goto exit_remove_files;
+ }
+ if (data->flags & LM90_HAVE_EMERGENCY_ALARM) {
+ err = sysfs_create_group(&new_client->dev.kobj,
+ &lm90_emergency_alarm_group);
+ if (err)
+ goto exit_remove_files;
+ }
+ if (data->flags & LM90_HAVE_TEMP3) {
+ err = sysfs_create_group(&new_client->dev.kobj,
+ &lm90_temp3_group);
+ if (err)
goto exit_remove_files;
}
@@ -848,62 +1410,23 @@ static int lm90_probe(struct i2c_client *new_client,
return 0;
exit_remove_files:
- sysfs_remove_group(&new_client->dev.kobj, &lm90_group);
- device_remove_file(&new_client->dev, &dev_attr_pec);
+ lm90_remove_files(new_client, data);
exit_free:
kfree(data);
exit:
return err;
}
-static void lm90_init_client(struct i2c_client *client)
-{
- u8 config;
- struct lm90_data *data = i2c_get_clientdata(client);
-
- /*
- * Start the conversions.
- */
- i2c_smbus_write_byte_data(client, LM90_REG_W_CONVRATE,
- 5); /* 2 Hz */
- if (lm90_read_reg(client, LM90_REG_R_CONFIG1, &config) < 0) {
- dev_warn(&client->dev, "Initialization failed!\n");
- return;
- }
- data->config_orig = config;
-
- /* Check Temperature Range Select */
- if (data->kind == adt7461) {
- if (config & 0x04)
- data->flags |= LM90_FLAG_ADT7461_EXT;
- }
-
- /*
- * Put MAX6680/MAX8881 into extended resolution (bit 0x10,
- * 0.125 degree resolution) and range (0x08, extend range
- * to -64 degree) mode for the remote temperature sensor.
- */
- if (data->kind == max6680) {
- config |= 0x18;
- }
-
- config &= 0xBF; /* run */
- if (config != data->config_orig) /* Only write if changed */
- i2c_smbus_write_byte_data(client, LM90_REG_W_CONFIG1, config);
-}
-
static int lm90_remove(struct i2c_client *client)
{
struct lm90_data *data = i2c_get_clientdata(client);
hwmon_device_unregister(data->hwmon_dev);
- sysfs_remove_group(&client->dev.kobj, &lm90_group);
- device_remove_file(&client->dev, &dev_attr_pec);
- if (data->kind != max6657 && data->kind != max6646)
- device_remove_file(&client->dev,
- &sensor_dev_attr_temp2_offset.dev_attr);
+ lm90_remove_files(client, data);
/* Restore initial configuration */
+ i2c_smbus_write_byte_data(client, LM90_REG_W_CONVRATE,
+ data->convrate_orig);
i2c_smbus_write_byte_data(client, LM90_REG_W_CONFIG1,
data->config_orig);
@@ -914,10 +1437,14 @@ static int lm90_remove(struct i2c_client *client)
static void lm90_alert(struct i2c_client *client, unsigned int flag)
{
struct lm90_data *data = i2c_get_clientdata(client);
- u8 config, alarms;
+ u8 config, alarms, alarms2 = 0;
lm90_read_reg(client, LM90_REG_R_STATUS, &alarms);
- if ((alarms & 0x7f) == 0) {
+
+ if (data->kind == max6696)
+ lm90_read_reg(client, MAX6696_REG_R_STATUS2, &alarms2);
+
+ if ((alarms & 0x7f) == 0 && (alarms2 & 0xfe) == 0) {
dev_info(&client->dev, "Everything OK\n");
} else {
if (alarms & 0x61)
@@ -930,10 +1457,14 @@ static void lm90_alert(struct i2c_client *client, unsigned int flag)
dev_warn(&client->dev,
"temp%d diode open, please check!\n", 2);
+ if (alarms2 & 0x18)
+ dev_warn(&client->dev,
+ "temp%d out of range, please check!\n", 3);
+
/* Disable ALERT# output, because these chips don't implement
SMBus alert correctly; they should only hold the alert line
low briefly. */
- if ((data->kind == adm1032 || data->kind == adt7461)
+ if ((data->flags & LM90_HAVE_BROKEN_ALERT)
&& (alarms & data->alert_alarms)) {
dev_dbg(&client->dev, "Disabling ALERT#\n");
lm90_read_reg(client, LM90_REG_R_CONFIG1, &config);
@@ -943,117 +1474,18 @@ static void lm90_alert(struct i2c_client *client, unsigned int flag)
}
}
-static int lm90_read16(struct i2c_client *client, u8 regh, u8 regl, u16 *value)
-{
- int err;
- u8 oldh, newh, l;
-
- /*
- * There is a trick here. We have to read two registers to have the
- * sensor temperature, but we have to beware a conversion could occur
- * inbetween the readings. The datasheet says we should either use
- * the one-shot conversion register, which we don't want to do
- * (disables hardware monitoring) or monitor the busy bit, which is
- * impossible (we can't read the values and monitor that bit at the
- * exact same time). So the solution used here is to read the high
- * byte once, then the low byte, then the high byte again. If the new
- * high byte matches the old one, then we have a valid reading. Else
- * we have to read the low byte again, and now we believe we have a
- * correct reading.
- */
- if ((err = lm90_read_reg(client, regh, &oldh))
- || (err = lm90_read_reg(client, regl, &l))
- || (err = lm90_read_reg(client, regh, &newh)))
- return err;
- if (oldh != newh) {
- err = lm90_read_reg(client, regl, &l);
- if (err)
- return err;
- }
- *value = (newh << 8) | l;
-
- return 0;
-}
-
-static struct lm90_data *lm90_update_device(struct device *dev)
-{
- struct i2c_client *client = to_i2c_client(dev);
- struct lm90_data *data = i2c_get_clientdata(client);
-
- mutex_lock(&data->update_lock);
-
- if (time_after(jiffies, data->last_updated + HZ / 2 + HZ / 10)
- || !data->valid) {
- u8 h, l;
-
- dev_dbg(&client->dev, "Updating lm90 data.\n");
- lm90_read_reg(client, LM90_REG_R_LOCAL_LOW, &data->temp8[0]);
- lm90_read_reg(client, LM90_REG_R_LOCAL_HIGH, &data->temp8[1]);
- lm90_read_reg(client, LM90_REG_R_LOCAL_CRIT, &data->temp8[2]);
- lm90_read_reg(client, LM90_REG_R_REMOTE_CRIT, &data->temp8[3]);
- lm90_read_reg(client, LM90_REG_R_TCRIT_HYST, &data->temp_hyst);
-
- if (data->kind == max6657 || data->kind == max6646) {
- lm90_read16(client, LM90_REG_R_LOCAL_TEMP,
- MAX6657_REG_R_LOCAL_TEMPL,
- &data->temp11[4]);
- } else {
- if (lm90_read_reg(client, LM90_REG_R_LOCAL_TEMP,
- &h) == 0)
- data->temp11[4] = h << 8;
- }
- lm90_read16(client, LM90_REG_R_REMOTE_TEMPH,
- LM90_REG_R_REMOTE_TEMPL, &data->temp11[0]);
-
- if (lm90_read_reg(client, LM90_REG_R_REMOTE_LOWH, &h) == 0) {
- data->temp11[1] = h << 8;
- if (data->kind != max6657 && data->kind != max6680
- && data->kind != max6646
- && lm90_read_reg(client, LM90_REG_R_REMOTE_LOWL,
- &l) == 0)
- data->temp11[1] |= l;
- }
- if (lm90_read_reg(client, LM90_REG_R_REMOTE_HIGHH, &h) == 0) {
- data->temp11[2] = h << 8;
- if (data->kind != max6657 && data->kind != max6680
- && data->kind != max6646
- && lm90_read_reg(client, LM90_REG_R_REMOTE_HIGHL,
- &l) == 0)
- data->temp11[2] |= l;
- }
-
- if (data->kind != max6657 && data->kind != max6646) {
- if (lm90_read_reg(client, LM90_REG_R_REMOTE_OFFSH,
- &h) == 0
- && lm90_read_reg(client, LM90_REG_R_REMOTE_OFFSL,
- &l) == 0)
- data->temp11[3] = (h << 8) | l;
- }
- lm90_read_reg(client, LM90_REG_R_STATUS, &data->alarms);
-
- /* Re-enable ALERT# output if it was originally enabled and
- * relevant alarms are all clear */
- if ((data->config_orig & 0x80) == 0
- && (data->alarms & data->alert_alarms) == 0) {
- u8 config;
-
- lm90_read_reg(client, LM90_REG_R_CONFIG1, &config);
- if (config & 0x80) {
- dev_dbg(&client->dev, "Re-enabling ALERT#\n");
- i2c_smbus_write_byte_data(client,
- LM90_REG_W_CONFIG1,
- config & ~0x80);
- }
- }
-
- data->last_updated = jiffies;
- data->valid = 1;
- }
-
- mutex_unlock(&data->update_lock);
-
- return data;
-}
+static struct i2c_driver lm90_driver = {
+ .class = I2C_CLASS_HWMON,
+ .driver = {
+ .name = "lm90",
+ },
+ .probe = lm90_probe,
+ .remove = lm90_remove,
+ .alert = lm90_alert,
+ .id_table = lm90_id,
+ .detect = lm90_detect,
+ .address_list = normal_i2c,
+};
static int __init sensors_lm90_init(void)
{
diff --git a/drivers/hwmon/lm93.c b/drivers/hwmon/lm93.c
index 6669255aadcf..c9ed14eba5a6 100644
--- a/drivers/hwmon/lm93.c
+++ b/drivers/hwmon/lm93.c
@@ -20,7 +20,7 @@
Adapted to 2.6.20 by Carsten Emde <cbe@osadl.org>
Copyright (c) 2006 Carsten Emde, Open Source Automation Development Lab
- Modified for mainline integration by Hans J. Koch <hjk@linutronix.de>
+ Modified for mainline integration by Hans J. Koch <hjk@hansjkoch.de>
Copyright (c) 2007 Hans J. Koch, Linutronix GmbH
This program is free software; you can redistribute it and/or modify
@@ -2629,7 +2629,7 @@ static void __exit lm93_exit(void)
}
MODULE_AUTHOR("Mark M. Hoffman <mhoffman@lightlink.com>, "
- "Hans J. Koch <hjk@linutronix.de");
+ "Hans J. Koch <hjk@hansjkoch.de>");
MODULE_DESCRIPTION("LM93 driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/hwmon/lm95241.c b/drivers/hwmon/lm95241.c
index 464340f25496..4546d82f024a 100644
--- a/drivers/hwmon/lm95241.c
+++ b/drivers/hwmon/lm95241.c
@@ -128,9 +128,12 @@ static ssize_t set_interval(struct device *dev, struct device_attribute *attr,
{
struct i2c_client *client = to_i2c_client(dev);
struct lm95241_data *data = i2c_get_clientdata(client);
+ unsigned long val;
- strict_strtol(buf, 10, &data->interval);
- data->interval = data->interval * HZ / 1000;
+ if (strict_strtoul(buf, 10, &val) < 0)
+ return -EINVAL;
+
+ data->interval = val * HZ / 1000;
return count;
}
@@ -188,7 +191,9 @@ static ssize_t set_type##flag(struct device *dev, \
struct lm95241_data *data = i2c_get_clientdata(client); \
\
long val; \
- strict_strtol(buf, 10, &val); \
+\
+ if (strict_strtol(buf, 10, &val) < 0) \
+ return -EINVAL; \
\
if ((val == 1) || (val == 2)) { \
\
@@ -227,7 +232,9 @@ static ssize_t set_min##flag(struct device *dev, \
struct lm95241_data *data = i2c_get_clientdata(client); \
\
long val; \
- strict_strtol(buf, 10, &val); \
+\
+ if (strict_strtol(buf, 10, &val) < 0) \
+ return -EINVAL;\
\
mutex_lock(&data->update_lock); \
\
@@ -256,7 +263,9 @@ static ssize_t set_max##flag(struct device *dev, \
struct lm95241_data *data = i2c_get_clientdata(client); \
\
long val; \
- strict_strtol(buf, 10, &val); \
+\
+ if (strict_strtol(buf, 10, &val) < 0) \
+ return -EINVAL; \
\
mutex_lock(&data->update_lock); \
\
diff --git a/drivers/hwmon/ltc4261.c b/drivers/hwmon/ltc4261.c
new file mode 100644
index 000000000000..4b50601027d3
--- /dev/null
+++ b/drivers/hwmon/ltc4261.c
@@ -0,0 +1,314 @@
+/*
+ * Driver for Linear Technology LTC4261 I2C Negative Voltage Hot Swap Controller
+ *
+ * Copyright (C) 2010 Ericsson AB.
+ *
+ * Derived from:
+ *
+ * Driver for Linear Technology LTC4245 I2C Multiple Supply Hot Swap Controller
+ * Copyright (C) 2008 Ira W. Snyder <iws@ovro.caltech.edu>
+ *
+ * Datasheet: http://cds.linear.com/docs/Datasheet/42612fb.pdf
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/err.h>
+#include <linux/slab.h>
+#include <linux/i2c.h>
+#include <linux/hwmon.h>
+#include <linux/hwmon-sysfs.h>
+
+/* chip registers */
+#define LTC4261_STATUS 0x00 /* readonly */
+#define LTC4261_FAULT 0x01
+#define LTC4261_ALERT 0x02
+#define LTC4261_CONTROL 0x03
+#define LTC4261_SENSE_H 0x04
+#define LTC4261_SENSE_L 0x05
+#define LTC4261_ADIN2_H 0x06
+#define LTC4261_ADIN2_L 0x07
+#define LTC4261_ADIN_H 0x08
+#define LTC4261_ADIN_L 0x09
+
+/*
+ * Fault register bits
+ */
+#define FAULT_OV (1<<0)
+#define FAULT_UV (1<<1)
+#define FAULT_OC (1<<2)
+
+struct ltc4261_data {
+ struct device *hwmon_dev;
+
+ struct mutex update_lock;
+ bool valid;
+ unsigned long last_updated; /* in jiffies */
+
+ /* Registers */
+ u8 regs[10];
+};
+
+static struct ltc4261_data *ltc4261_update_device(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct ltc4261_data *data = i2c_get_clientdata(client);
+ struct ltc4261_data *ret = data;
+
+ mutex_lock(&data->update_lock);
+
+ if (time_after(jiffies, data->last_updated + HZ / 4) || !data->valid) {
+ int i;
+
+ /* Read registers -- 0x00 to 0x09 */
+ for (i = 0; i < ARRAY_SIZE(data->regs); i++) {
+ int val;
+
+ val = i2c_smbus_read_byte_data(client, i);
+ if (unlikely(val < 0)) {
+ dev_dbg(dev,
+ "Failed to read ADC value: error %d\n",
+ val);
+ ret = ERR_PTR(val);
+ goto abort;
+ }
+ data->regs[i] = val;
+ }
+ data->last_updated = jiffies;
+ data->valid = 1;
+ }
+abort:
+ mutex_unlock(&data->update_lock);
+ return ret;
+}
+
+/* Return the voltage from the given register in mV or mA */
+static int ltc4261_get_value(struct ltc4261_data *data, u8 reg)
+{
+ u32 val;
+
+ val = (data->regs[reg] << 2) + (data->regs[reg + 1] >> 6);
+
+ switch (reg) {
+ case LTC4261_ADIN_H:
+ case LTC4261_ADIN2_H:
+ /* 2.5mV resolution. Convert to mV. */
+ val = val * 25 / 10;
+ break;
+ case LTC4261_SENSE_H:
+ /*
+ * 62.5uV resolution. Convert to current as measured with
+ * an 1 mOhm sense resistor, in mA. If a different sense
+ * resistor is installed, calculate the actual current by
+ * dividing the reported current by the sense resistor value
+ * in mOhm.
+ */
+ val = val * 625 / 10;
+ break;
+ default:
+ /* If we get here, the developer messed up */
+ WARN_ON_ONCE(1);
+ val = 0;
+ break;
+ }
+
+ return val;
+}
+
+static ssize_t ltc4261_show_value(struct device *dev,
+ struct device_attribute *da, char *buf)
+{
+ struct sensor_device_attribute *attr = to_sensor_dev_attr(da);
+ struct ltc4261_data *data = ltc4261_update_device(dev);
+ int value;
+
+ if (IS_ERR(data))
+ return PTR_ERR(data);
+
+ value = ltc4261_get_value(data, attr->index);
+ return snprintf(buf, PAGE_SIZE, "%d\n", value);
+}
+
+static ssize_t ltc4261_show_bool(struct device *dev,
+ struct device_attribute *da, char *buf)
+{
+ struct sensor_device_attribute *attr = to_sensor_dev_attr(da);
+ struct i2c_client *client = to_i2c_client(dev);
+ struct ltc4261_data *data = ltc4261_update_device(dev);
+ u8 fault;
+
+ if (IS_ERR(data))
+ return PTR_ERR(data);
+
+ fault = data->regs[LTC4261_FAULT] & attr->index;
+ if (fault) /* Clear reported faults in chip register */
+ i2c_smbus_write_byte_data(client, LTC4261_FAULT, ~fault);
+
+ return snprintf(buf, PAGE_SIZE, "%d\n", fault ? 1 : 0);
+}
+
+/*
+ * These macros are used below in constructing device attribute objects
+ * for use with sysfs_create_group() to make a sysfs device file
+ * for each register.
+ */
+
+#define LTC4261_VALUE(name, ltc4261_cmd_idx) \
+ static SENSOR_DEVICE_ATTR(name, S_IRUGO, \
+ ltc4261_show_value, NULL, ltc4261_cmd_idx)
+
+#define LTC4261_BOOL(name, mask) \
+ static SENSOR_DEVICE_ATTR(name, S_IRUGO, \
+ ltc4261_show_bool, NULL, (mask))
+
+/*
+ * Input voltages.
+ */
+LTC4261_VALUE(in1_input, LTC4261_ADIN_H);
+LTC4261_VALUE(in2_input, LTC4261_ADIN2_H);
+
+/*
+ * Voltage alarms. The chip has only one set of voltage alarm status bits,
+ * triggered by input voltage alarms. In many designs, those alarms are
+ * associated with the ADIN2 sensor, due to the proximity of the ADIN2 pin
+ * to the OV pin. ADIN2 is, however, not available on all chip variants.
+ * To ensure that the alarm condition is reported to the user, report it
+ * with both voltage sensors.
+ */
+LTC4261_BOOL(in1_min_alarm, FAULT_UV);
+LTC4261_BOOL(in1_max_alarm, FAULT_OV);
+LTC4261_BOOL(in2_min_alarm, FAULT_UV);
+LTC4261_BOOL(in2_max_alarm, FAULT_OV);
+
+/* Currents (via sense resistor) */
+LTC4261_VALUE(curr1_input, LTC4261_SENSE_H);
+
+/* Overcurrent alarm */
+LTC4261_BOOL(curr1_max_alarm, FAULT_OC);
+
+static struct attribute *ltc4261_attributes[] = {
+ &sensor_dev_attr_in1_input.dev_attr.attr,
+ &sensor_dev_attr_in1_min_alarm.dev_attr.attr,
+ &sensor_dev_attr_in1_max_alarm.dev_attr.attr,
+ &sensor_dev_attr_in2_input.dev_attr.attr,
+ &sensor_dev_attr_in2_min_alarm.dev_attr.attr,
+ &sensor_dev_attr_in2_max_alarm.dev_attr.attr,
+
+ &sensor_dev_attr_curr1_input.dev_attr.attr,
+ &sensor_dev_attr_curr1_max_alarm.dev_attr.attr,
+
+ NULL,
+};
+
+static const struct attribute_group ltc4261_group = {
+ .attrs = ltc4261_attributes,
+};
+
+static int ltc4261_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct i2c_adapter *adapter = client->adapter;
+ struct ltc4261_data *data;
+ int ret;
+
+ if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA))
+ return -ENODEV;
+
+ if (i2c_smbus_read_byte_data(client, LTC4261_STATUS) < 0) {
+ dev_err(&client->dev, "Failed to read status register\n");
+ return -ENODEV;
+ }
+
+ data = kzalloc(sizeof(*data), GFP_KERNEL);
+ if (!data) {
+ ret = -ENOMEM;
+ goto out_kzalloc;
+ }
+
+ i2c_set_clientdata(client, data);
+ mutex_init(&data->update_lock);
+
+ /* Clear faults */
+ i2c_smbus_write_byte_data(client, LTC4261_FAULT, 0x00);
+
+ /* Register sysfs hooks */
+ ret = sysfs_create_group(&client->dev.kobj, &ltc4261_group);
+ if (ret)
+ goto out_sysfs_create_group;
+
+ data->hwmon_dev = hwmon_device_register(&client->dev);
+ if (IS_ERR(data->hwmon_dev)) {
+ ret = PTR_ERR(data->hwmon_dev);
+ goto out_hwmon_device_register;
+ }
+
+ return 0;
+
+out_hwmon_device_register:
+ sysfs_remove_group(&client->dev.kobj, &ltc4261_group);
+out_sysfs_create_group:
+ kfree(data);
+out_kzalloc:
+ return ret;
+}
+
+static int ltc4261_remove(struct i2c_client *client)
+{
+ struct ltc4261_data *data = i2c_get_clientdata(client);
+
+ hwmon_device_unregister(data->hwmon_dev);
+ sysfs_remove_group(&client->dev.kobj, &ltc4261_group);
+
+ kfree(data);
+
+ return 0;
+}
+
+static const struct i2c_device_id ltc4261_id[] = {
+ {"ltc4261", 0},
+ {}
+};
+
+MODULE_DEVICE_TABLE(i2c, ltc4261_id);
+
+/* This is the driver that will be inserted */
+static struct i2c_driver ltc4261_driver = {
+ .driver = {
+ .name = "ltc4261",
+ },
+ .probe = ltc4261_probe,
+ .remove = ltc4261_remove,
+ .id_table = ltc4261_id,
+};
+
+static int __init ltc4261_init(void)
+{
+ return i2c_add_driver(&ltc4261_driver);
+}
+
+static void __exit ltc4261_exit(void)
+{
+ i2c_del_driver(&ltc4261_driver);
+}
+
+MODULE_AUTHOR("Guenter Roeck <guenter.roeck@ericsson.com>");
+MODULE_DESCRIPTION("LTC4261 driver");
+MODULE_LICENSE("GPL");
+
+module_init(ltc4261_init);
+module_exit(ltc4261_exit);
diff --git a/drivers/hwmon/max6650.c b/drivers/hwmon/max6650.c
index a0160ee5caef..9a11532ecae8 100644
--- a/drivers/hwmon/max6650.c
+++ b/drivers/hwmon/max6650.c
@@ -2,7 +2,7 @@
* max6650.c - Part of lm_sensors, Linux kernel modules for hardware
* monitoring.
*
- * (C) 2007 by Hans J. Koch <hjk@linutronix.de>
+ * (C) 2007 by Hans J. Koch <hjk@hansjkoch.de>
*
* based on code written by John Morris <john.morris@spirentcom.com>
* Copyright (c) 2003 Spirent Communications
diff --git a/drivers/hwmon/pcf8591.c b/drivers/hwmon/pcf8591.c
index d44787949851..dc7259d69812 100644
--- a/drivers/hwmon/pcf8591.c
+++ b/drivers/hwmon/pcf8591.c
@@ -23,10 +23,8 @@
#include <linux/slab.h>
#include <linux/i2c.h>
#include <linux/mutex.h>
-
-/* Addresses to scan */
-static const unsigned short normal_i2c[] = { 0x48, 0x49, 0x4a, 0x4b, 0x4c,
- 0x4d, 0x4e, 0x4f, I2C_CLIENT_END };
+#include <linux/err.h>
+#include <linux/hwmon.h>
/* Insmod parameters */
@@ -71,6 +69,7 @@ MODULE_PARM_DESC(input_mode,
#define REG_TO_SIGNED(reg) (((reg) & 0x80)?((reg) - 256):(reg))
struct pcf8591_data {
+ struct device *hwmon_dev;
struct mutex update_lock;
u8 control;
@@ -167,24 +166,6 @@ static const struct attribute_group pcf8591_attr_group_opt = {
* Real code
*/
-/* Return 0 if detection is successful, -ENODEV otherwise */
-static int pcf8591_detect(struct i2c_client *client,
- struct i2c_board_info *info)
-{
- struct i2c_adapter *adapter = client->adapter;
-
- if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE
- | I2C_FUNC_SMBUS_WRITE_BYTE_DATA))
- return -ENODEV;
-
- /* Now, we would do the remaining detection. But the PCF8591 is plainly
- impossible to detect! Stupid chip. */
-
- strlcpy(info->type, "pcf8591", I2C_NAME_SIZE);
-
- return 0;
-}
-
static int pcf8591_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
@@ -221,6 +202,12 @@ static int pcf8591_probe(struct i2c_client *client,
goto exit_sysfs_remove;
}
+ data->hwmon_dev = hwmon_device_register(&client->dev);
+ if (IS_ERR(data->hwmon_dev)) {
+ err = PTR_ERR(data->hwmon_dev);
+ goto exit_sysfs_remove;
+ }
+
return 0;
exit_sysfs_remove:
@@ -234,6 +221,9 @@ exit:
static int pcf8591_remove(struct i2c_client *client)
{
+ struct pcf8591_data *data = i2c_get_clientdata(client);
+
+ hwmon_device_unregister(data->hwmon_dev);
sysfs_remove_group(&client->dev.kobj, &pcf8591_attr_group_opt);
sysfs_remove_group(&client->dev.kobj, &pcf8591_attr_group);
kfree(i2c_get_clientdata(client));
@@ -295,10 +285,6 @@ static struct i2c_driver pcf8591_driver = {
.probe = pcf8591_probe,
.remove = pcf8591_remove,
.id_table = pcf8591_id,
-
- .class = I2C_CLASS_HWMON, /* Nearest choice */
- .detect = pcf8591_detect,
- .address_list = normal_i2c,
};
static int __init pcf8591_init(void)
diff --git a/drivers/hwmon/pkgtemp.c b/drivers/hwmon/pkgtemp.c
index f11903936c8b..0798210590bc 100644
--- a/drivers/hwmon/pkgtemp.c
+++ b/drivers/hwmon/pkgtemp.c
@@ -21,7 +21,6 @@
*/
#include <linux/module.h>
-#include <linux/delay.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/jiffies.h>
@@ -35,6 +34,7 @@
#include <linux/cpu.h>
#include <asm/msr.h>
#include <asm/processor.h>
+#include <asm/smp.h>
#define DRVNAME "pkgtemp"
@@ -339,8 +339,7 @@ exit:
return err;
}
-#ifdef CONFIG_HOTPLUG_CPU
-static void pkgtemp_device_remove(unsigned int cpu)
+static void __cpuinit pkgtemp_device_remove(unsigned int cpu)
{
struct pdev_entry *p;
unsigned int i;
@@ -387,12 +386,10 @@ static int __cpuinit pkgtemp_cpu_callback(struct notifier_block *nfb,
static struct notifier_block pkgtemp_cpu_notifier __refdata = {
.notifier_call = pkgtemp_cpu_callback,
};
-#endif /* !CONFIG_HOTPLUG_CPU */
static int __init pkgtemp_init(void)
{
int i, err = -ENODEV;
- struct pdev_entry *p, *n;
/* quick check if we run Intel */
if (cpu_data(0).x86_vendor != X86_VENDOR_INTEL)
@@ -402,31 +399,23 @@ static int __init pkgtemp_init(void)
if (err)
goto exit;
- for_each_online_cpu(i) {
- err = pkgtemp_device_add(i);
- if (err)
- goto exit_devices_unreg;
- }
+ for_each_online_cpu(i)
+ pkgtemp_device_add(i);
+
+#ifndef CONFIG_HOTPLUG_CPU
if (list_empty(&pdev_list)) {
err = -ENODEV;
goto exit_driver_unreg;
}
+#endif
-#ifdef CONFIG_HOTPLUG_CPU
register_hotcpu_notifier(&pkgtemp_cpu_notifier);
-#endif
return 0;
-exit_devices_unreg:
- mutex_lock(&pdev_list_mutex);
- list_for_each_entry_safe(p, n, &pdev_list, list) {
- platform_device_unregister(p->pdev);
- list_del(&p->list);
- kfree(p);
- }
- mutex_unlock(&pdev_list_mutex);
+#ifndef CONFIG_HOTPLUG_CPU
exit_driver_unreg:
platform_driver_unregister(&pkgtemp_driver);
+#endif
exit:
return err;
}
@@ -434,9 +423,8 @@ exit:
static void __exit pkgtemp_exit(void)
{
struct pdev_entry *p, *n;
-#ifdef CONFIG_HOTPLUG_CPU
+
unregister_hotcpu_notifier(&pkgtemp_cpu_notifier);
-#endif
mutex_lock(&pdev_list_mutex);
list_for_each_entry_safe(p, n, &pdev_list, list) {
platform_device_unregister(p->pdev);
diff --git a/drivers/hwmon/s3c-hwmon.c b/drivers/hwmon/s3c-hwmon.c
index 3f3f9a47acfd..05248f2d7581 100644
--- a/drivers/hwmon/s3c-hwmon.c
+++ b/drivers/hwmon/s3c-hwmon.c
@@ -51,7 +51,7 @@ struct s3c_hwmon_attr {
* @attr: The holders for the channel attributes.
*/
struct s3c_hwmon {
- struct semaphore lock;
+ struct mutex lock;
struct s3c_adc_client *client;
struct device *hwmon_dev;
@@ -73,14 +73,14 @@ static int s3c_hwmon_read_ch(struct device *dev,
{
int ret;
- ret = down_interruptible(&hwmon->lock);
+ ret = mutex_lock_interruptible(&hwmon->lock);
if (ret < 0)
return ret;
dev_dbg(dev, "reading channel %d\n", channel);
ret = s3c_adc_read(hwmon->client, channel);
- up(&hwmon->lock);
+ mutex_unlock(&hwmon->lock);
return ret;
}
@@ -296,7 +296,7 @@ static int __devinit s3c_hwmon_probe(struct platform_device *dev)
platform_set_drvdata(dev, hwmon);
- init_MUTEX(&hwmon->lock);
+ mutex_init(&hwmon->lock);
/* Register with the core ADC driver. */
diff --git a/drivers/hwmon/tmp421.c b/drivers/hwmon/tmp421.c
index 6b4165c12092..0517a8f09d35 100644
--- a/drivers/hwmon/tmp421.c
+++ b/drivers/hwmon/tmp421.c
@@ -36,8 +36,8 @@
#include <linux/sysfs.h>
/* Addresses to scan */
-static unsigned short normal_i2c[] = { 0x2a, 0x4c, 0x4d, 0x4e, 0x4f,
- I2C_CLIENT_END };
+static const unsigned short normal_i2c[] = { 0x2a, 0x4c, 0x4d, 0x4e, 0x4f,
+ I2C_CLIENT_END };
enum chips { tmp421, tmp422, tmp423 };
diff --git a/drivers/hwmon/via-cputemp.c b/drivers/hwmon/via-cputemp.c
index ffb793af680b..ec7fad747adc 100644
--- a/drivers/hwmon/via-cputemp.c
+++ b/drivers/hwmon/via-cputemp.c
@@ -22,10 +22,8 @@
*/
#include <linux/module.h>
-#include <linux/delay.h>
#include <linux/init.h>
#include <linux/slab.h>
-#include <linux/jiffies.h>
#include <linux/hwmon.h>
#include <linux/sysfs.h>
#include <linux/hwmon-sysfs.h>
@@ -237,8 +235,7 @@ exit:
return err;
}
-#ifdef CONFIG_HOTPLUG_CPU
-static void via_cputemp_device_remove(unsigned int cpu)
+static void __cpuinit via_cputemp_device_remove(unsigned int cpu)
{
struct pdev_entry *p, *n;
mutex_lock(&pdev_list_mutex);
@@ -272,7 +269,6 @@ static int __cpuinit via_cputemp_cpu_callback(struct notifier_block *nfb,
static struct notifier_block via_cputemp_cpu_notifier __refdata = {
.notifier_call = via_cputemp_cpu_callback,
};
-#endif /* !CONFIG_HOTPLUG_CPU */
static int __init via_cputemp_init(void)
{
@@ -313,9 +309,7 @@ static int __init via_cputemp_init(void)
goto exit_driver_unreg;
}
-#ifdef CONFIG_HOTPLUG_CPU
register_hotcpu_notifier(&via_cputemp_cpu_notifier);
-#endif
return 0;
exit_devices_unreg:
@@ -335,9 +329,8 @@ exit:
static void __exit via_cputemp_exit(void)
{
struct pdev_entry *p, *n;
-#ifdef CONFIG_HOTPLUG_CPU
+
unregister_hotcpu_notifier(&via_cputemp_cpu_notifier);
-#endif
mutex_lock(&pdev_list_mutex);
list_for_each_entry_safe(p, n, &pdev_list, list) {
platform_device_unregister(p->pdev);
diff --git a/drivers/hwmon/w83795.c b/drivers/hwmon/w83795.c
new file mode 100644
index 000000000000..cdbc7448491e
--- /dev/null
+++ b/drivers/hwmon/w83795.c
@@ -0,0 +1,2262 @@
+/*
+ * w83795.c - Linux kernel driver for hardware monitoring
+ * Copyright (C) 2008 Nuvoton Technology Corp.
+ * Wei Song
+ * Copyright (C) 2010 Jean Delvare <khali@linux-fr.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation - version 2.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301 USA.
+ *
+ * Supports following chips:
+ *
+ * Chip #vin #fanin #pwm #temp #dts wchipid vendid i2c ISA
+ * w83795g 21 14 8 6 8 0x79 0x5ca3 yes no
+ * w83795adg 18 14 2 6 8 0x79 0x5ca3 yes no
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/i2c.h>
+#include <linux/hwmon.h>
+#include <linux/hwmon-sysfs.h>
+#include <linux/err.h>
+#include <linux/mutex.h>
+#include <linux/delay.h>
+
+/* Addresses to scan */
+static const unsigned short normal_i2c[] = {
+ 0x2c, 0x2d, 0x2e, 0x2f, I2C_CLIENT_END
+};
+
+
+static int reset;
+module_param(reset, bool, 0);
+MODULE_PARM_DESC(reset, "Set to 1 to reset chip, not recommended");
+
+
+#define W83795_REG_BANKSEL 0x00
+#define W83795_REG_VENDORID 0xfd
+#define W83795_REG_CHIPID 0xfe
+#define W83795_REG_DEVICEID 0xfb
+#define W83795_REG_DEVICEID_A 0xff
+
+#define W83795_REG_I2C_ADDR 0xfc
+#define W83795_REG_CONFIG 0x01
+#define W83795_REG_CONFIG_CONFIG48 0x04
+#define W83795_REG_CONFIG_START 0x01
+
+/* Multi-Function Pin Ctrl Registers */
+#define W83795_REG_VOLT_CTRL1 0x02
+#define W83795_REG_VOLT_CTRL2 0x03
+#define W83795_REG_TEMP_CTRL1 0x04
+#define W83795_REG_TEMP_CTRL2 0x05
+#define W83795_REG_FANIN_CTRL1 0x06
+#define W83795_REG_FANIN_CTRL2 0x07
+#define W83795_REG_VMIGB_CTRL 0x08
+
+#define TEMP_READ 0
+#define TEMP_CRIT 1
+#define TEMP_CRIT_HYST 2
+#define TEMP_WARN 3
+#define TEMP_WARN_HYST 4
+/* only crit and crit_hyst affect real-time alarm status
+ * current crit crit_hyst warn warn_hyst */
+static const u16 W83795_REG_TEMP[][5] = {
+ {0x21, 0x96, 0x97, 0x98, 0x99}, /* TD1/TR1 */
+ {0x22, 0x9a, 0x9b, 0x9c, 0x9d}, /* TD2/TR2 */
+ {0x23, 0x9e, 0x9f, 0xa0, 0xa1}, /* TD3/TR3 */
+ {0x24, 0xa2, 0xa3, 0xa4, 0xa5}, /* TD4/TR4 */
+ {0x1f, 0xa6, 0xa7, 0xa8, 0xa9}, /* TR5 */
+ {0x20, 0xaa, 0xab, 0xac, 0xad}, /* TR6 */
+};
+
+#define IN_READ 0
+#define IN_MAX 1
+#define IN_LOW 2
+static const u16 W83795_REG_IN[][3] = {
+ /* Current, HL, LL */
+ {0x10, 0x70, 0x71}, /* VSEN1 */
+ {0x11, 0x72, 0x73}, /* VSEN2 */
+ {0x12, 0x74, 0x75}, /* VSEN3 */
+ {0x13, 0x76, 0x77}, /* VSEN4 */
+ {0x14, 0x78, 0x79}, /* VSEN5 */
+ {0x15, 0x7a, 0x7b}, /* VSEN6 */
+ {0x16, 0x7c, 0x7d}, /* VSEN7 */
+ {0x17, 0x7e, 0x7f}, /* VSEN8 */
+ {0x18, 0x80, 0x81}, /* VSEN9 */
+ {0x19, 0x82, 0x83}, /* VSEN10 */
+ {0x1A, 0x84, 0x85}, /* VSEN11 */
+ {0x1B, 0x86, 0x87}, /* VTT */
+ {0x1C, 0x88, 0x89}, /* 3VDD */
+ {0x1D, 0x8a, 0x8b}, /* 3VSB */
+ {0x1E, 0x8c, 0x8d}, /* VBAT */
+ {0x1F, 0xa6, 0xa7}, /* VSEN12 */
+ {0x20, 0xaa, 0xab}, /* VSEN13 */
+ {0x21, 0x96, 0x97}, /* VSEN14 */
+ {0x22, 0x9a, 0x9b}, /* VSEN15 */
+ {0x23, 0x9e, 0x9f}, /* VSEN16 */
+ {0x24, 0xa2, 0xa3}, /* VSEN17 */
+};
+#define W83795_REG_VRLSB 0x3C
+
+static const u8 W83795_REG_IN_HL_LSB[] = {
+ 0x8e, /* VSEN1-4 */
+ 0x90, /* VSEN5-8 */
+ 0x92, /* VSEN9-11 */
+ 0x94, /* VTT, 3VDD, 3VSB, 3VBAT */
+ 0xa8, /* VSEN12 */
+ 0xac, /* VSEN13 */
+ 0x98, /* VSEN14 */
+ 0x9c, /* VSEN15 */
+ 0xa0, /* VSEN16 */
+ 0xa4, /* VSEN17 */
+};
+
+#define IN_LSB_REG(index, type) \
+ (((type) == 1) ? W83795_REG_IN_HL_LSB[(index)] \
+ : (W83795_REG_IN_HL_LSB[(index)] + 1))
+
+#define IN_LSB_SHIFT 0
+#define IN_LSB_IDX 1
+static const u8 IN_LSB_SHIFT_IDX[][2] = {
+ /* High/Low LSB shift, LSB No. */
+ {0x00, 0x00}, /* VSEN1 */
+ {0x02, 0x00}, /* VSEN2 */
+ {0x04, 0x00}, /* VSEN3 */
+ {0x06, 0x00}, /* VSEN4 */
+ {0x00, 0x01}, /* VSEN5 */
+ {0x02, 0x01}, /* VSEN6 */
+ {0x04, 0x01}, /* VSEN7 */
+ {0x06, 0x01}, /* VSEN8 */
+ {0x00, 0x02}, /* VSEN9 */
+ {0x02, 0x02}, /* VSEN10 */
+ {0x04, 0x02}, /* VSEN11 */
+ {0x00, 0x03}, /* VTT */
+ {0x02, 0x03}, /* 3VDD */
+ {0x04, 0x03}, /* 3VSB */
+ {0x06, 0x03}, /* VBAT */
+ {0x06, 0x04}, /* VSEN12 */
+ {0x06, 0x05}, /* VSEN13 */
+ {0x06, 0x06}, /* VSEN14 */
+ {0x06, 0x07}, /* VSEN15 */
+ {0x06, 0x08}, /* VSEN16 */
+ {0x06, 0x09}, /* VSEN17 */
+};
+
+
+#define W83795_REG_FAN(index) (0x2E + (index))
+#define W83795_REG_FAN_MIN_HL(index) (0xB6 + (index))
+#define W83795_REG_FAN_MIN_LSB(index) (0xC4 + (index) / 2)
+#define W83795_REG_FAN_MIN_LSB_SHIFT(index) \
+ (((index) & 1) ? 4 : 0)
+
+#define W83795_REG_VID_CTRL 0x6A
+
+#define W83795_REG_ALARM_CTRL 0x40
+#define ALARM_CTRL_RTSACS (1 << 7)
+#define W83795_REG_ALARM(index) (0x41 + (index))
+#define W83795_REG_CLR_CHASSIS 0x4D
+#define W83795_REG_BEEP(index) (0x50 + (index))
+
+#define W83795_REG_OVT_CFG 0x58
+#define OVT_CFG_SEL (1 << 7)
+
+
+#define W83795_REG_FCMS1 0x201
+#define W83795_REG_FCMS2 0x208
+#define W83795_REG_TFMR(index) (0x202 + (index))
+#define W83795_REG_FOMC 0x20F
+
+#define W83795_REG_TSS(index) (0x209 + (index))
+
+#define TSS_MAP_RESERVED 0xff
+static const u8 tss_map[4][6] = {
+ { 0, 1, 2, 3, 4, 5},
+ { 6, 7, 8, 9, 0, 1},
+ {10, 11, 12, 13, 2, 3},
+ { 4, 5, 4, 5, TSS_MAP_RESERVED, TSS_MAP_RESERVED},
+};
+
+#define PWM_OUTPUT 0
+#define PWM_FREQ 1
+#define PWM_START 2
+#define PWM_NONSTOP 3
+#define PWM_STOP_TIME 4
+#define W83795_REG_PWM(index, nr) (0x210 + (nr) * 8 + (index))
+
+#define W83795_REG_FTSH(index) (0x240 + (index) * 2)
+#define W83795_REG_FTSL(index) (0x241 + (index) * 2)
+#define W83795_REG_TFTS 0x250
+
+#define TEMP_PWM_TTTI 0
+#define TEMP_PWM_CTFS 1
+#define TEMP_PWM_HCT 2
+#define TEMP_PWM_HOT 3
+#define W83795_REG_TTTI(index) (0x260 + (index))
+#define W83795_REG_CTFS(index) (0x268 + (index))
+#define W83795_REG_HT(index) (0x270 + (index))
+
+#define SF4_TEMP 0
+#define SF4_PWM 1
+#define W83795_REG_SF4_TEMP(temp_num, index) \
+ (0x280 + 0x10 * (temp_num) + (index))
+#define W83795_REG_SF4_PWM(temp_num, index) \
+ (0x288 + 0x10 * (temp_num) + (index))
+
+#define W83795_REG_DTSC 0x301
+#define W83795_REG_DTSE 0x302
+#define W83795_REG_DTS(index) (0x26 + (index))
+#define W83795_REG_PECI_TBASE(index) (0x320 + (index))
+
+#define DTS_CRIT 0
+#define DTS_CRIT_HYST 1
+#define DTS_WARN 2
+#define DTS_WARN_HYST 3
+#define W83795_REG_DTS_EXT(index) (0xB2 + (index))
+
+#define SETUP_PWM_DEFAULT 0
+#define SETUP_PWM_UPTIME 1
+#define SETUP_PWM_DOWNTIME 2
+#define W83795_REG_SETUP_PWM(index) (0x20C + (index))
+
+static inline u16 in_from_reg(u8 index, u16 val)
+{
+ /* 3VDD, 3VSB and VBAT: 6 mV/bit; other inputs: 2 mV/bit */
+ if (index >= 12 && index <= 14)
+ return val * 6;
+ else
+ return val * 2;
+}
+
+static inline u16 in_to_reg(u8 index, u16 val)
+{
+ if (index >= 12 && index <= 14)
+ return val / 6;
+ else
+ return val / 2;
+}
+
+static inline unsigned long fan_from_reg(u16 val)
+{
+ if ((val == 0xfff) || (val == 0))
+ return 0;
+ return 1350000UL / val;
+}
+
+static inline u16 fan_to_reg(long rpm)
+{
+ if (rpm <= 0)
+ return 0x0fff;
+ return SENSORS_LIMIT((1350000 + (rpm >> 1)) / rpm, 1, 0xffe);
+}
+
+static inline unsigned long time_from_reg(u8 reg)
+{
+ return reg * 100;
+}
+
+static inline u8 time_to_reg(unsigned long val)
+{
+ return SENSORS_LIMIT((val + 50) / 100, 0, 0xff);
+}
+
+static inline long temp_from_reg(s8 reg)
+{
+ return reg * 1000;
+}
+
+static inline s8 temp_to_reg(long val, s8 min, s8 max)
+{
+ return SENSORS_LIMIT(val / 1000, min, max);
+}
+
+static const u16 pwm_freq_cksel0[16] = {
+ 1024, 512, 341, 256, 205, 171, 146, 128,
+ 85, 64, 32, 16, 8, 4, 2, 1
+};
+
+static unsigned int pwm_freq_from_reg(u8 reg, u16 clkin)
+{
+ unsigned long base_clock;
+
+ if (reg & 0x80) {
+ base_clock = clkin * 1000 / ((clkin == 48000) ? 384 : 256);
+ return base_clock / ((reg & 0x7f) + 1);
+ } else
+ return pwm_freq_cksel0[reg & 0x0f];
+}
+
+static u8 pwm_freq_to_reg(unsigned long val, u16 clkin)
+{
+ unsigned long base_clock;
+ u8 reg0, reg1;
+ unsigned long best0, best1;
+
+ /* Best fit for cksel = 0 */
+ for (reg0 = 0; reg0 < ARRAY_SIZE(pwm_freq_cksel0) - 1; reg0++) {
+ if (val > (pwm_freq_cksel0[reg0] +
+ pwm_freq_cksel0[reg0 + 1]) / 2)
+ break;
+ }
+ if (val < 375) /* cksel = 1 can't beat this */
+ return reg0;
+ best0 = pwm_freq_cksel0[reg0];
+
+ /* Best fit for cksel = 1 */
+ base_clock = clkin * 1000 / ((clkin == 48000) ? 384 : 256);
+ reg1 = SENSORS_LIMIT(DIV_ROUND_CLOSEST(base_clock, val), 1, 128);
+ best1 = base_clock / reg1;
+ reg1 = 0x80 | (reg1 - 1);
+
+ /* Choose the closest one */
+ if (abs(val - best0) > abs(val - best1))
+ return reg1;
+ else
+ return reg0;
+}
+
+enum chip_types {w83795g, w83795adg};
+
+struct w83795_data {
+ struct device *hwmon_dev;
+ struct mutex update_lock;
+ unsigned long last_updated; /* In jiffies */
+ enum chip_types chip_type;
+
+ u8 bank;
+
+ u32 has_in; /* Enable monitor VIN or not */
+ u8 has_dyn_in; /* Only in2-0 can have this */
+ u16 in[21][3]; /* Register value, read/high/low */
+ u8 in_lsb[10][3]; /* LSB Register value, high/low */
+ u8 has_gain; /* has gain: in17-20 * 8 */
+
+ u16 has_fan; /* Enable fan14-1 or not */
+ u16 fan[14]; /* Register value combine */
+ u16 fan_min[14]; /* Register value combine */
+
+ u8 has_temp; /* Enable monitor temp6-1 or not */
+ s8 temp[6][5]; /* current, crit, crit_hyst, warn, warn_hyst */
+ u8 temp_read_vrlsb[6];
+ u8 temp_mode; /* Bit vector, 0 = TR, 1 = TD */
+ u8 temp_src[3]; /* Register value */
+
+ u8 enable_dts; /* Enable PECI and SB-TSI,
+ * bit 0: =1 enable, =0 disable,
+ * bit 1: =1 AMD SB-TSI, =0 Intel PECI */
+ u8 has_dts; /* Enable monitor DTS temp */
+ s8 dts[8]; /* Register value */
+ u8 dts_read_vrlsb[8]; /* Register value */
+ s8 dts_ext[4]; /* Register value */
+
+ u8 has_pwm; /* 795g supports 8 pwm, 795adg only supports 2,
+ * no config register, only affected by chip
+ * type */
+ u8 pwm[8][5]; /* Register value, output, freq, start,
+ * non stop, stop time */
+ u16 clkin; /* CLKIN frequency in kHz */
+ u8 pwm_fcms[2]; /* Register value */
+ u8 pwm_tfmr[6]; /* Register value */
+ u8 pwm_fomc; /* Register value */
+
+ u16 target_speed[8]; /* Register value, target speed for speed
+ * cruise */
+ u8 tol_speed; /* tolerance of target speed */
+ u8 pwm_temp[6][4]; /* TTTI, CTFS, HCT, HOT */
+ u8 sf4_reg[6][2][7]; /* 6 temp, temp/dcpwm, 7 registers */
+
+ u8 setup_pwm[3]; /* Register value */
+
+ u8 alarms[6]; /* Register value */
+ u8 enable_beep;
+ u8 beeps[6]; /* Register value */
+
+ char valid;
+ char valid_limits;
+ char valid_pwm_config;
+};
+
+/*
+ * Hardware access
+ * We assume that nobdody can change the bank outside the driver.
+ */
+
+/* Must be called with data->update_lock held, except during initialization */
+static int w83795_set_bank(struct i2c_client *client, u8 bank)
+{
+ struct w83795_data *data = i2c_get_clientdata(client);
+ int err;
+
+ /* If the same bank is already set, nothing to do */
+ if ((data->bank & 0x07) == bank)
+ return 0;
+
+ /* Change to new bank, preserve all other bits */
+ bank |= data->bank & ~0x07;
+ err = i2c_smbus_write_byte_data(client, W83795_REG_BANKSEL, bank);
+ if (err < 0) {
+ dev_err(&client->dev,
+ "Failed to set bank to %d, err %d\n",
+ (int)bank, err);
+ return err;
+ }
+ data->bank = bank;
+
+ return 0;
+}
+
+/* Must be called with data->update_lock held, except during initialization */
+static u8 w83795_read(struct i2c_client *client, u16 reg)
+{
+ int err;
+
+ err = w83795_set_bank(client, reg >> 8);
+ if (err < 0)
+ return 0x00; /* Arbitrary */
+
+ err = i2c_smbus_read_byte_data(client, reg & 0xff);
+ if (err < 0) {
+ dev_err(&client->dev,
+ "Failed to read from register 0x%03x, err %d\n",
+ (int)reg, err);
+ return 0x00; /* Arbitrary */
+ }
+ return err;
+}
+
+/* Must be called with data->update_lock held, except during initialization */
+static int w83795_write(struct i2c_client *client, u16 reg, u8 value)
+{
+ int err;
+
+ err = w83795_set_bank(client, reg >> 8);
+ if (err < 0)
+ return err;
+
+ err = i2c_smbus_write_byte_data(client, reg & 0xff, value);
+ if (err < 0)
+ dev_err(&client->dev,
+ "Failed to write to register 0x%03x, err %d\n",
+ (int)reg, err);
+ return err;
+}
+
+static void w83795_update_limits(struct i2c_client *client)
+{
+ struct w83795_data *data = i2c_get_clientdata(client);
+ int i, limit;
+
+ /* Read the voltage limits */
+ for (i = 0; i < ARRAY_SIZE(data->in); i++) {
+ if (!(data->has_in & (1 << i)))
+ continue;
+ data->in[i][IN_MAX] =
+ w83795_read(client, W83795_REG_IN[i][IN_MAX]);
+ data->in[i][IN_LOW] =
+ w83795_read(client, W83795_REG_IN[i][IN_LOW]);
+ }
+ for (i = 0; i < ARRAY_SIZE(data->in_lsb); i++) {
+ if ((i == 2 && data->chip_type == w83795adg) ||
+ (i >= 4 && !(data->has_in & (1 << (i + 11)))))
+ continue;
+ data->in_lsb[i][IN_MAX] =
+ w83795_read(client, IN_LSB_REG(i, IN_MAX));
+ data->in_lsb[i][IN_LOW] =
+ w83795_read(client, IN_LSB_REG(i, IN_LOW));
+ }
+
+ /* Read the fan limits */
+ for (i = 0; i < ARRAY_SIZE(data->fan); i++) {
+ u8 lsb;
+
+ /* Each register contains LSB for 2 fans, but we want to
+ * read it only once to save time */
+ if ((i & 1) == 0 && (data->has_fan & (3 << i)))
+ lsb = w83795_read(client, W83795_REG_FAN_MIN_LSB(i));
+
+ if (!(data->has_fan & (1 << i)))
+ continue;
+ data->fan_min[i] =
+ w83795_read(client, W83795_REG_FAN_MIN_HL(i)) << 4;
+ data->fan_min[i] |=
+ (lsb >> W83795_REG_FAN_MIN_LSB_SHIFT(i)) & 0x0F;
+ }
+
+ /* Read the temperature limits */
+ for (i = 0; i < ARRAY_SIZE(data->temp); i++) {
+ if (!(data->has_temp & (1 << i)))
+ continue;
+ for (limit = TEMP_CRIT; limit <= TEMP_WARN_HYST; limit++)
+ data->temp[i][limit] =
+ w83795_read(client, W83795_REG_TEMP[i][limit]);
+ }
+
+ /* Read the DTS limits */
+ if (data->enable_dts) {
+ for (limit = DTS_CRIT; limit <= DTS_WARN_HYST; limit++)
+ data->dts_ext[limit] =
+ w83795_read(client, W83795_REG_DTS_EXT(limit));
+ }
+
+ /* Read beep settings */
+ if (data->enable_beep) {
+ for (i = 0; i < ARRAY_SIZE(data->beeps); i++)
+ data->beeps[i] =
+ w83795_read(client, W83795_REG_BEEP(i));
+ }
+
+ data->valid_limits = 1;
+}
+
+static struct w83795_data *w83795_update_pwm_config(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct w83795_data *data = i2c_get_clientdata(client);
+ int i, tmp;
+
+ mutex_lock(&data->update_lock);
+
+ if (data->valid_pwm_config)
+ goto END;
+
+ /* Read temperature source selection */
+ for (i = 0; i < ARRAY_SIZE(data->temp_src); i++)
+ data->temp_src[i] = w83795_read(client, W83795_REG_TSS(i));
+
+ /* Read automatic fan speed control settings */
+ data->pwm_fcms[0] = w83795_read(client, W83795_REG_FCMS1);
+ data->pwm_fcms[1] = w83795_read(client, W83795_REG_FCMS2);
+ for (i = 0; i < ARRAY_SIZE(data->pwm_tfmr); i++)
+ data->pwm_tfmr[i] = w83795_read(client, W83795_REG_TFMR(i));
+ data->pwm_fomc = w83795_read(client, W83795_REG_FOMC);
+ for (i = 0; i < data->has_pwm; i++) {
+ for (tmp = PWM_FREQ; tmp <= PWM_STOP_TIME; tmp++)
+ data->pwm[i][tmp] =
+ w83795_read(client, W83795_REG_PWM(i, tmp));
+ }
+ for (i = 0; i < ARRAY_SIZE(data->target_speed); i++) {
+ data->target_speed[i] =
+ w83795_read(client, W83795_REG_FTSH(i)) << 4;
+ data->target_speed[i] |=
+ w83795_read(client, W83795_REG_FTSL(i)) >> 4;
+ }
+ data->tol_speed = w83795_read(client, W83795_REG_TFTS) & 0x3f;
+
+ for (i = 0; i < ARRAY_SIZE(data->pwm_temp); i++) {
+ data->pwm_temp[i][TEMP_PWM_TTTI] =
+ w83795_read(client, W83795_REG_TTTI(i)) & 0x7f;
+ data->pwm_temp[i][TEMP_PWM_CTFS] =
+ w83795_read(client, W83795_REG_CTFS(i));
+ tmp = w83795_read(client, W83795_REG_HT(i));
+ data->pwm_temp[i][TEMP_PWM_HCT] = tmp >> 4;
+ data->pwm_temp[i][TEMP_PWM_HOT] = tmp & 0x0f;
+ }
+
+ /* Read SmartFanIV trip points */
+ for (i = 0; i < ARRAY_SIZE(data->sf4_reg); i++) {
+ for (tmp = 0; tmp < 7; tmp++) {
+ data->sf4_reg[i][SF4_TEMP][tmp] =
+ w83795_read(client,
+ W83795_REG_SF4_TEMP(i, tmp));
+ data->sf4_reg[i][SF4_PWM][tmp] =
+ w83795_read(client, W83795_REG_SF4_PWM(i, tmp));
+ }
+ }
+
+ /* Read setup PWM */
+ for (i = 0; i < ARRAY_SIZE(data->setup_pwm); i++)
+ data->setup_pwm[i] =
+ w83795_read(client, W83795_REG_SETUP_PWM(i));
+
+ data->valid_pwm_config = 1;
+
+END:
+ mutex_unlock(&data->update_lock);
+ return data;
+}
+
+static struct w83795_data *w83795_update_device(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct w83795_data *data = i2c_get_clientdata(client);
+ u16 tmp;
+ u8 intrusion;
+ int i;
+
+ mutex_lock(&data->update_lock);
+
+ if (!data->valid_limits)
+ w83795_update_limits(client);
+
+ if (!(time_after(jiffies, data->last_updated + HZ * 2)
+ || !data->valid))
+ goto END;
+
+ /* Update the voltages value */
+ for (i = 0; i < ARRAY_SIZE(data->in); i++) {
+ if (!(data->has_in & (1 << i)))
+ continue;
+ tmp = w83795_read(client, W83795_REG_IN[i][IN_READ]) << 2;
+ tmp |= w83795_read(client, W83795_REG_VRLSB) >> 6;
+ data->in[i][IN_READ] = tmp;
+ }
+
+ /* in0-2 can have dynamic limits (W83795G only) */
+ if (data->has_dyn_in) {
+ u8 lsb_max = w83795_read(client, IN_LSB_REG(0, IN_MAX));
+ u8 lsb_low = w83795_read(client, IN_LSB_REG(0, IN_LOW));
+
+ for (i = 0; i < 3; i++) {
+ if (!(data->has_dyn_in & (1 << i)))
+ continue;
+ data->in[i][IN_MAX] =
+ w83795_read(client, W83795_REG_IN[i][IN_MAX]);
+ data->in[i][IN_LOW] =
+ w83795_read(client, W83795_REG_IN[i][IN_LOW]);
+ data->in_lsb[i][IN_MAX] = (lsb_max >> (2 * i)) & 0x03;
+ data->in_lsb[i][IN_LOW] = (lsb_low >> (2 * i)) & 0x03;
+ }
+ }
+
+ /* Update fan */
+ for (i = 0; i < ARRAY_SIZE(data->fan); i++) {
+ if (!(data->has_fan & (1 << i)))
+ continue;
+ data->fan[i] = w83795_read(client, W83795_REG_FAN(i)) << 4;
+ data->fan[i] |= w83795_read(client, W83795_REG_VRLSB) >> 4;
+ }
+
+ /* Update temperature */
+ for (i = 0; i < ARRAY_SIZE(data->temp); i++) {
+ data->temp[i][TEMP_READ] =
+ w83795_read(client, W83795_REG_TEMP[i][TEMP_READ]);
+ data->temp_read_vrlsb[i] =
+ w83795_read(client, W83795_REG_VRLSB);
+ }
+
+ /* Update dts temperature */
+ if (data->enable_dts) {
+ for (i = 0; i < ARRAY_SIZE(data->dts); i++) {
+ if (!(data->has_dts & (1 << i)))
+ continue;
+ data->dts[i] =
+ w83795_read(client, W83795_REG_DTS(i));
+ data->dts_read_vrlsb[i] =
+ w83795_read(client, W83795_REG_VRLSB);
+ }
+ }
+
+ /* Update pwm output */
+ for (i = 0; i < data->has_pwm; i++) {
+ data->pwm[i][PWM_OUTPUT] =
+ w83795_read(client, W83795_REG_PWM(i, PWM_OUTPUT));
+ }
+
+ /* Update intrusion and alarms
+ * It is important to read intrusion first, because reading from
+ * register SMI STS6 clears the interrupt status temporarily. */
+ tmp = w83795_read(client, W83795_REG_ALARM_CTRL);
+ /* Switch to interrupt status for intrusion if needed */
+ if (tmp & ALARM_CTRL_RTSACS)
+ w83795_write(client, W83795_REG_ALARM_CTRL,
+ tmp & ~ALARM_CTRL_RTSACS);
+ intrusion = w83795_read(client, W83795_REG_ALARM(5)) & (1 << 6);
+ /* Switch to real-time alarms */
+ w83795_write(client, W83795_REG_ALARM_CTRL, tmp | ALARM_CTRL_RTSACS);
+ for (i = 0; i < ARRAY_SIZE(data->alarms); i++)
+ data->alarms[i] = w83795_read(client, W83795_REG_ALARM(i));
+ data->alarms[5] |= intrusion;
+ /* Restore original configuration if needed */
+ if (!(tmp & ALARM_CTRL_RTSACS))
+ w83795_write(client, W83795_REG_ALARM_CTRL,
+ tmp & ~ALARM_CTRL_RTSACS);
+
+ data->last_updated = jiffies;
+ data->valid = 1;
+
+END:
+ mutex_unlock(&data->update_lock);
+ return data;
+}
+
+/*
+ * Sysfs attributes
+ */
+
+#define ALARM_STATUS 0
+#define BEEP_ENABLE 1
+static ssize_t
+show_alarm_beep(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct w83795_data *data = w83795_update_device(dev);
+ struct sensor_device_attribute_2 *sensor_attr =
+ to_sensor_dev_attr_2(attr);
+ int nr = sensor_attr->nr;
+ int index = sensor_attr->index >> 3;
+ int bit = sensor_attr->index & 0x07;
+ u8 val;
+
+ if (nr == ALARM_STATUS)
+ val = (data->alarms[index] >> bit) & 1;
+ else /* BEEP_ENABLE */
+ val = (data->beeps[index] >> bit) & 1;
+
+ return sprintf(buf, "%u\n", val);
+}
+
+static ssize_t
+store_beep(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct w83795_data *data = i2c_get_clientdata(client);
+ struct sensor_device_attribute_2 *sensor_attr =
+ to_sensor_dev_attr_2(attr);
+ int index = sensor_attr->index >> 3;
+ int shift = sensor_attr->index & 0x07;
+ u8 beep_bit = 1 << shift;
+ unsigned long val;
+
+ if (strict_strtoul(buf, 10, &val) < 0)
+ return -EINVAL;
+ if (val != 0 && val != 1)
+ return -EINVAL;
+
+ mutex_lock(&data->update_lock);
+ data->beeps[index] = w83795_read(client, W83795_REG_BEEP(index));
+ data->beeps[index] &= ~beep_bit;
+ data->beeps[index] |= val << shift;
+ w83795_write(client, W83795_REG_BEEP(index), data->beeps[index]);
+ mutex_unlock(&data->update_lock);
+
+ return count;
+}
+
+/* Write 0 to clear chassis alarm */
+static ssize_t
+store_chassis_clear(struct device *dev,
+ struct device_attribute *attr, const char *buf,
+ size_t count)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct w83795_data *data = i2c_get_clientdata(client);
+ unsigned long val;
+
+ if (strict_strtoul(buf, 10, &val) < 0 || val != 0)
+ return -EINVAL;
+
+ mutex_lock(&data->update_lock);
+ val = w83795_read(client, W83795_REG_CLR_CHASSIS);
+ val |= 0x80;
+ w83795_write(client, W83795_REG_CLR_CHASSIS, val);
+
+ /* Clear status and force cache refresh */
+ w83795_read(client, W83795_REG_ALARM(5));
+ data->valid = 0;
+ mutex_unlock(&data->update_lock);
+ return count;
+}
+
+#define FAN_INPUT 0
+#define FAN_MIN 1
+static ssize_t
+show_fan(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct sensor_device_attribute_2 *sensor_attr =
+ to_sensor_dev_attr_2(attr);
+ int nr = sensor_attr->nr;
+ int index = sensor_attr->index;
+ struct w83795_data *data = w83795_update_device(dev);
+ u16 val;
+
+ if (nr == FAN_INPUT)
+ val = data->fan[index] & 0x0fff;
+ else
+ val = data->fan_min[index] & 0x0fff;
+
+ return sprintf(buf, "%lu\n", fan_from_reg(val));
+}
+
+static ssize_t
+store_fan_min(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct sensor_device_attribute_2 *sensor_attr =
+ to_sensor_dev_attr_2(attr);
+ int index = sensor_attr->index;
+ struct i2c_client *client = to_i2c_client(dev);
+ struct w83795_data *data = i2c_get_clientdata(client);
+ unsigned long val;
+
+ if (strict_strtoul(buf, 10, &val))
+ return -EINVAL;
+ val = fan_to_reg(val);
+
+ mutex_lock(&data->update_lock);
+ data->fan_min[index] = val;
+ w83795_write(client, W83795_REG_FAN_MIN_HL(index), (val >> 4) & 0xff);
+ val &= 0x0f;
+ if (index & 1) {
+ val <<= 4;
+ val |= w83795_read(client, W83795_REG_FAN_MIN_LSB(index))
+ & 0x0f;
+ } else {
+ val |= w83795_read(client, W83795_REG_FAN_MIN_LSB(index))
+ & 0xf0;
+ }
+ w83795_write(client, W83795_REG_FAN_MIN_LSB(index), val & 0xff);
+ mutex_unlock(&data->update_lock);
+
+ return count;
+}
+
+static ssize_t
+show_pwm(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct w83795_data *data;
+ struct sensor_device_attribute_2 *sensor_attr =
+ to_sensor_dev_attr_2(attr);
+ int nr = sensor_attr->nr;
+ int index = sensor_attr->index;
+ unsigned int val;
+
+ data = nr == PWM_OUTPUT ? w83795_update_device(dev)
+ : w83795_update_pwm_config(dev);
+
+ switch (nr) {
+ case PWM_STOP_TIME:
+ val = time_from_reg(data->pwm[index][nr]);
+ break;
+ case PWM_FREQ:
+ val = pwm_freq_from_reg(data->pwm[index][nr], data->clkin);
+ break;
+ default:
+ val = data->pwm[index][nr];
+ break;
+ }
+
+ return sprintf(buf, "%u\n", val);
+}
+
+static ssize_t
+store_pwm(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct w83795_data *data = i2c_get_clientdata(client);
+ struct sensor_device_attribute_2 *sensor_attr =
+ to_sensor_dev_attr_2(attr);
+ int nr = sensor_attr->nr;
+ int index = sensor_attr->index;
+ unsigned long val;
+
+ if (strict_strtoul(buf, 10, &val) < 0)
+ return -EINVAL;
+
+ mutex_lock(&data->update_lock);
+ switch (nr) {
+ case PWM_STOP_TIME:
+ val = time_to_reg(val);
+ break;
+ case PWM_FREQ:
+ val = pwm_freq_to_reg(val, data->clkin);
+ break;
+ default:
+ val = SENSORS_LIMIT(val, 0, 0xff);
+ break;
+ }
+ w83795_write(client, W83795_REG_PWM(index, nr), val);
+ data->pwm[index][nr] = val;
+ mutex_unlock(&data->update_lock);
+ return count;
+}
+
+static ssize_t
+show_pwm_enable(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct sensor_device_attribute_2 *sensor_attr =
+ to_sensor_dev_attr_2(attr);
+ struct w83795_data *data = w83795_update_pwm_config(dev);
+ int index = sensor_attr->index;
+ u8 tmp;
+
+ /* Speed cruise mode */
+ if (data->pwm_fcms[0] & (1 << index)) {
+ tmp = 2;
+ goto out;
+ }
+ /* Thermal cruise or SmartFan IV mode */
+ for (tmp = 0; tmp < 6; tmp++) {
+ if (data->pwm_tfmr[tmp] & (1 << index)) {
+ tmp = 3;
+ goto out;
+ }
+ }
+ /* Manual mode */
+ tmp = 1;
+
+out:
+ return sprintf(buf, "%u\n", tmp);
+}
+
+static ssize_t
+store_pwm_enable(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct w83795_data *data = w83795_update_pwm_config(dev);
+ struct sensor_device_attribute_2 *sensor_attr =
+ to_sensor_dev_attr_2(attr);
+ int index = sensor_attr->index;
+ unsigned long val;
+ int i;
+
+ if (strict_strtoul(buf, 10, &val) < 0)
+ return -EINVAL;
+ if (val < 1 || val > 2)
+ return -EINVAL;
+
+ mutex_lock(&data->update_lock);
+ switch (val) {
+ case 1:
+ /* Clear speed cruise mode bits */
+ data->pwm_fcms[0] &= ~(1 << index);
+ w83795_write(client, W83795_REG_FCMS1, data->pwm_fcms[0]);
+ /* Clear thermal cruise mode bits */
+ for (i = 0; i < 6; i++) {
+ data->pwm_tfmr[i] &= ~(1 << index);
+ w83795_write(client, W83795_REG_TFMR(i),
+ data->pwm_tfmr[i]);
+ }
+ break;
+ case 2:
+ data->pwm_fcms[0] |= (1 << index);
+ w83795_write(client, W83795_REG_FCMS1, data->pwm_fcms[0]);
+ break;
+ }
+ mutex_unlock(&data->update_lock);
+ return count;
+}
+
+static ssize_t
+show_pwm_mode(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct w83795_data *data = w83795_update_pwm_config(dev);
+ int index = to_sensor_dev_attr_2(attr)->index;
+ unsigned int mode;
+
+ if (data->pwm_fomc & (1 << index))
+ mode = 0; /* DC */
+ else
+ mode = 1; /* PWM */
+
+ return sprintf(buf, "%u\n", mode);
+}
+
+/*
+ * Check whether a given temperature source can ever be useful.
+ * Returns the number of selectable temperature channels which are
+ * enabled.
+ */
+static int w83795_tss_useful(const struct w83795_data *data, int tsrc)
+{
+ int useful = 0, i;
+
+ for (i = 0; i < 4; i++) {
+ if (tss_map[i][tsrc] == TSS_MAP_RESERVED)
+ continue;
+ if (tss_map[i][tsrc] < 6) /* Analog */
+ useful += (data->has_temp >> tss_map[i][tsrc]) & 1;
+ else /* Digital */
+ useful += (data->has_dts >> (tss_map[i][tsrc] - 6)) & 1;
+ }
+
+ return useful;
+}
+
+static ssize_t
+show_temp_src(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct sensor_device_attribute_2 *sensor_attr =
+ to_sensor_dev_attr_2(attr);
+ struct w83795_data *data = w83795_update_pwm_config(dev);
+ int index = sensor_attr->index;
+ u8 tmp = data->temp_src[index / 2];
+
+ if (index & 1)
+ tmp >>= 4; /* Pick high nibble */
+ else
+ tmp &= 0x0f; /* Pick low nibble */
+
+ /* Look-up the actual temperature channel number */
+ if (tmp >= 4 || tss_map[tmp][index] == TSS_MAP_RESERVED)
+ return -EINVAL; /* Shouldn't happen */
+
+ return sprintf(buf, "%u\n", (unsigned int)tss_map[tmp][index] + 1);
+}
+
+static ssize_t
+store_temp_src(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct w83795_data *data = w83795_update_pwm_config(dev);
+ struct sensor_device_attribute_2 *sensor_attr =
+ to_sensor_dev_attr_2(attr);
+ int index = sensor_attr->index;
+ int tmp;
+ unsigned long channel;
+ u8 val = index / 2;
+
+ if (strict_strtoul(buf, 10, &channel) < 0 ||
+ channel < 1 || channel > 14)
+ return -EINVAL;
+
+ /* Check if request can be fulfilled */
+ for (tmp = 0; tmp < 4; tmp++) {
+ if (tss_map[tmp][index] == channel - 1)
+ break;
+ }
+ if (tmp == 4) /* No match */
+ return -EINVAL;
+
+ mutex_lock(&data->update_lock);
+ if (index & 1) {
+ tmp <<= 4;
+ data->temp_src[val] &= 0x0f;
+ } else {
+ data->temp_src[val] &= 0xf0;
+ }
+ data->temp_src[val] |= tmp;
+ w83795_write(client, W83795_REG_TSS(val), data->temp_src[val]);
+ mutex_unlock(&data->update_lock);
+
+ return count;
+}
+
+#define TEMP_PWM_ENABLE 0
+#define TEMP_PWM_FAN_MAP 1
+static ssize_t
+show_temp_pwm_enable(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct w83795_data *data = w83795_update_pwm_config(dev);
+ struct sensor_device_attribute_2 *sensor_attr =
+ to_sensor_dev_attr_2(attr);
+ int nr = sensor_attr->nr;
+ int index = sensor_attr->index;
+ u8 tmp = 0xff;
+
+ switch (nr) {
+ case TEMP_PWM_ENABLE:
+ tmp = (data->pwm_fcms[1] >> index) & 1;
+ if (tmp)
+ tmp = 4;
+ else
+ tmp = 3;
+ break;
+ case TEMP_PWM_FAN_MAP:
+ tmp = data->pwm_tfmr[index];
+ break;
+ }
+
+ return sprintf(buf, "%u\n", tmp);
+}
+
+static ssize_t
+store_temp_pwm_enable(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct w83795_data *data = w83795_update_pwm_config(dev);
+ struct sensor_device_attribute_2 *sensor_attr =
+ to_sensor_dev_attr_2(attr);
+ int nr = sensor_attr->nr;
+ int index = sensor_attr->index;
+ unsigned long tmp;
+
+ if (strict_strtoul(buf, 10, &tmp) < 0)
+ return -EINVAL;
+
+ switch (nr) {
+ case TEMP_PWM_ENABLE:
+ if (tmp != 3 && tmp != 4)
+ return -EINVAL;
+ tmp -= 3;
+ mutex_lock(&data->update_lock);
+ data->pwm_fcms[1] &= ~(1 << index);
+ data->pwm_fcms[1] |= tmp << index;
+ w83795_write(client, W83795_REG_FCMS2, data->pwm_fcms[1]);
+ mutex_unlock(&data->update_lock);
+ break;
+ case TEMP_PWM_FAN_MAP:
+ mutex_lock(&data->update_lock);
+ tmp = SENSORS_LIMIT(tmp, 0, 0xff);
+ w83795_write(client, W83795_REG_TFMR(index), tmp);
+ data->pwm_tfmr[index] = tmp;
+ mutex_unlock(&data->update_lock);
+ break;
+ }
+ return count;
+}
+
+#define FANIN_TARGET 0
+#define FANIN_TOL 1
+static ssize_t
+show_fanin(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct w83795_data *data = w83795_update_pwm_config(dev);
+ struct sensor_device_attribute_2 *sensor_attr =
+ to_sensor_dev_attr_2(attr);
+ int nr = sensor_attr->nr;
+ int index = sensor_attr->index;
+ u16 tmp = 0;
+
+ switch (nr) {
+ case FANIN_TARGET:
+ tmp = fan_from_reg(data->target_speed[index]);
+ break;
+ case FANIN_TOL:
+ tmp = data->tol_speed;
+ break;
+ }
+
+ return sprintf(buf, "%u\n", tmp);
+}
+
+static ssize_t
+store_fanin(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct w83795_data *data = i2c_get_clientdata(client);
+ struct sensor_device_attribute_2 *sensor_attr =
+ to_sensor_dev_attr_2(attr);
+ int nr = sensor_attr->nr;
+ int index = sensor_attr->index;
+ unsigned long val;
+
+ if (strict_strtoul(buf, 10, &val) < 0)
+ return -EINVAL;
+
+ mutex_lock(&data->update_lock);
+ switch (nr) {
+ case FANIN_TARGET:
+ val = fan_to_reg(SENSORS_LIMIT(val, 0, 0xfff));
+ w83795_write(client, W83795_REG_FTSH(index), val >> 4);
+ w83795_write(client, W83795_REG_FTSL(index), (val << 4) & 0xf0);
+ data->target_speed[index] = val;
+ break;
+ case FANIN_TOL:
+ val = SENSORS_LIMIT(val, 0, 0x3f);
+ w83795_write(client, W83795_REG_TFTS, val);
+ data->tol_speed = val;
+ break;
+ }
+ mutex_unlock(&data->update_lock);
+
+ return count;
+}
+
+
+static ssize_t
+show_temp_pwm(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct w83795_data *data = w83795_update_pwm_config(dev);
+ struct sensor_device_attribute_2 *sensor_attr =
+ to_sensor_dev_attr_2(attr);
+ int nr = sensor_attr->nr;
+ int index = sensor_attr->index;
+ long tmp = temp_from_reg(data->pwm_temp[index][nr]);
+
+ return sprintf(buf, "%ld\n", tmp);
+}
+
+static ssize_t
+store_temp_pwm(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct w83795_data *data = i2c_get_clientdata(client);
+ struct sensor_device_attribute_2 *sensor_attr =
+ to_sensor_dev_attr_2(attr);
+ int nr = sensor_attr->nr;
+ int index = sensor_attr->index;
+ unsigned long val;
+ u8 tmp;
+
+ if (strict_strtoul(buf, 10, &val) < 0)
+ return -EINVAL;
+ val /= 1000;
+
+ mutex_lock(&data->update_lock);
+ switch (nr) {
+ case TEMP_PWM_TTTI:
+ val = SENSORS_LIMIT(val, 0, 0x7f);
+ w83795_write(client, W83795_REG_TTTI(index), val);
+ break;
+ case TEMP_PWM_CTFS:
+ val = SENSORS_LIMIT(val, 0, 0x7f);
+ w83795_write(client, W83795_REG_CTFS(index), val);
+ break;
+ case TEMP_PWM_HCT:
+ val = SENSORS_LIMIT(val, 0, 0x0f);
+ tmp = w83795_read(client, W83795_REG_HT(index));
+ tmp &= 0x0f;
+ tmp |= (val << 4) & 0xf0;
+ w83795_write(client, W83795_REG_HT(index), tmp);
+ break;
+ case TEMP_PWM_HOT:
+ val = SENSORS_LIMIT(val, 0, 0x0f);
+ tmp = w83795_read(client, W83795_REG_HT(index));
+ tmp &= 0xf0;
+ tmp |= val & 0x0f;
+ w83795_write(client, W83795_REG_HT(index), tmp);
+ break;
+ }
+ data->pwm_temp[index][nr] = val;
+ mutex_unlock(&data->update_lock);
+
+ return count;
+}
+
+static ssize_t
+show_sf4_pwm(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct w83795_data *data = w83795_update_pwm_config(dev);
+ struct sensor_device_attribute_2 *sensor_attr =
+ to_sensor_dev_attr_2(attr);
+ int nr = sensor_attr->nr;
+ int index = sensor_attr->index;
+
+ return sprintf(buf, "%u\n", data->sf4_reg[index][SF4_PWM][nr]);
+}
+
+static ssize_t
+store_sf4_pwm(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct w83795_data *data = i2c_get_clientdata(client);
+ struct sensor_device_attribute_2 *sensor_attr =
+ to_sensor_dev_attr_2(attr);
+ int nr = sensor_attr->nr;
+ int index = sensor_attr->index;
+ unsigned long val;
+
+ if (strict_strtoul(buf, 10, &val) < 0)
+ return -EINVAL;
+
+ mutex_lock(&data->update_lock);
+ w83795_write(client, W83795_REG_SF4_PWM(index, nr), val);
+ data->sf4_reg[index][SF4_PWM][nr] = val;
+ mutex_unlock(&data->update_lock);
+
+ return count;
+}
+
+static ssize_t
+show_sf4_temp(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct w83795_data *data = w83795_update_pwm_config(dev);
+ struct sensor_device_attribute_2 *sensor_attr =
+ to_sensor_dev_attr_2(attr);
+ int nr = sensor_attr->nr;
+ int index = sensor_attr->index;
+
+ return sprintf(buf, "%u\n",
+ (data->sf4_reg[index][SF4_TEMP][nr]) * 1000);
+}
+
+static ssize_t
+store_sf4_temp(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct w83795_data *data = i2c_get_clientdata(client);
+ struct sensor_device_attribute_2 *sensor_attr =
+ to_sensor_dev_attr_2(attr);
+ int nr = sensor_attr->nr;
+ int index = sensor_attr->index;
+ unsigned long val;
+
+ if (strict_strtoul(buf, 10, &val) < 0)
+ return -EINVAL;
+ val /= 1000;
+
+ mutex_lock(&data->update_lock);
+ w83795_write(client, W83795_REG_SF4_TEMP(index, nr), val);
+ data->sf4_reg[index][SF4_TEMP][nr] = val;
+ mutex_unlock(&data->update_lock);
+
+ return count;
+}
+
+
+static ssize_t
+show_temp(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct sensor_device_attribute_2 *sensor_attr =
+ to_sensor_dev_attr_2(attr);
+ int nr = sensor_attr->nr;
+ int index = sensor_attr->index;
+ struct w83795_data *data = w83795_update_device(dev);
+ long temp = temp_from_reg(data->temp[index][nr]);
+
+ if (nr == TEMP_READ)
+ temp += (data->temp_read_vrlsb[index] >> 6) * 250;
+ return sprintf(buf, "%ld\n", temp);
+}
+
+static ssize_t
+store_temp(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct sensor_device_attribute_2 *sensor_attr =
+ to_sensor_dev_attr_2(attr);
+ int nr = sensor_attr->nr;
+ int index = sensor_attr->index;
+ struct i2c_client *client = to_i2c_client(dev);
+ struct w83795_data *data = i2c_get_clientdata(client);
+ long tmp;
+
+ if (strict_strtol(buf, 10, &tmp) < 0)
+ return -EINVAL;
+
+ mutex_lock(&data->update_lock);
+ data->temp[index][nr] = temp_to_reg(tmp, -128, 127);
+ w83795_write(client, W83795_REG_TEMP[index][nr], data->temp[index][nr]);
+ mutex_unlock(&data->update_lock);
+ return count;
+}
+
+
+static ssize_t
+show_dts_mode(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct w83795_data *data = dev_get_drvdata(dev);
+ int tmp;
+
+ if (data->enable_dts & 2)
+ tmp = 5;
+ else
+ tmp = 6;
+
+ return sprintf(buf, "%d\n", tmp);
+}
+
+static ssize_t
+show_dts(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct sensor_device_attribute_2 *sensor_attr =
+ to_sensor_dev_attr_2(attr);
+ int index = sensor_attr->index;
+ struct w83795_data *data = w83795_update_device(dev);
+ long temp = temp_from_reg(data->dts[index]);
+
+ temp += (data->dts_read_vrlsb[index] >> 6) * 250;
+ return sprintf(buf, "%ld\n", temp);
+}
+
+static ssize_t
+show_dts_ext(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct sensor_device_attribute_2 *sensor_attr =
+ to_sensor_dev_attr_2(attr);
+ int nr = sensor_attr->nr;
+ struct w83795_data *data = dev_get_drvdata(dev);
+ long temp = temp_from_reg(data->dts_ext[nr]);
+
+ return sprintf(buf, "%ld\n", temp);
+}
+
+static ssize_t
+store_dts_ext(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct sensor_device_attribute_2 *sensor_attr =
+ to_sensor_dev_attr_2(attr);
+ int nr = sensor_attr->nr;
+ struct i2c_client *client = to_i2c_client(dev);
+ struct w83795_data *data = i2c_get_clientdata(client);
+ long tmp;
+
+ if (strict_strtol(buf, 10, &tmp) < 0)
+ return -EINVAL;
+
+ mutex_lock(&data->update_lock);
+ data->dts_ext[nr] = temp_to_reg(tmp, -128, 127);
+ w83795_write(client, W83795_REG_DTS_EXT(nr), data->dts_ext[nr]);
+ mutex_unlock(&data->update_lock);
+ return count;
+}
+
+
+static ssize_t
+show_temp_mode(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct w83795_data *data = dev_get_drvdata(dev);
+ struct sensor_device_attribute_2 *sensor_attr =
+ to_sensor_dev_attr_2(attr);
+ int index = sensor_attr->index;
+ int tmp;
+
+ if (data->temp_mode & (1 << index))
+ tmp = 3; /* Thermal diode */
+ else
+ tmp = 4; /* Thermistor */
+
+ return sprintf(buf, "%d\n", tmp);
+}
+
+/* Only for temp1-4 (temp5-6 can only be thermistor) */
+static ssize_t
+store_temp_mode(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct w83795_data *data = i2c_get_clientdata(client);
+ struct sensor_device_attribute_2 *sensor_attr =
+ to_sensor_dev_attr_2(attr);
+ int index = sensor_attr->index;
+ int reg_shift;
+ unsigned long val;
+ u8 tmp;
+
+ if (strict_strtoul(buf, 10, &val) < 0)
+ return -EINVAL;
+ if ((val != 4) && (val != 3))
+ return -EINVAL;
+
+ mutex_lock(&data->update_lock);
+ if (val == 3) {
+ /* Thermal diode */
+ val = 0x01;
+ data->temp_mode |= 1 << index;
+ } else if (val == 4) {
+ /* Thermistor */
+ val = 0x03;
+ data->temp_mode &= ~(1 << index);
+ }
+
+ reg_shift = 2 * index;
+ tmp = w83795_read(client, W83795_REG_TEMP_CTRL2);
+ tmp &= ~(0x03 << reg_shift);
+ tmp |= val << reg_shift;
+ w83795_write(client, W83795_REG_TEMP_CTRL2, tmp);
+
+ mutex_unlock(&data->update_lock);
+ return count;
+}
+
+
+/* show/store VIN */
+static ssize_t
+show_in(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct sensor_device_attribute_2 *sensor_attr =
+ to_sensor_dev_attr_2(attr);
+ int nr = sensor_attr->nr;
+ int index = sensor_attr->index;
+ struct w83795_data *data = w83795_update_device(dev);
+ u16 val = data->in[index][nr];
+ u8 lsb_idx;
+
+ switch (nr) {
+ case IN_READ:
+ /* calculate this value again by sensors as sensors3.conf */
+ if ((index >= 17) &&
+ !((data->has_gain >> (index - 17)) & 1))
+ val *= 8;
+ break;
+ case IN_MAX:
+ case IN_LOW:
+ lsb_idx = IN_LSB_SHIFT_IDX[index][IN_LSB_IDX];
+ val <<= 2;
+ val |= (data->in_lsb[lsb_idx][nr] >>
+ IN_LSB_SHIFT_IDX[index][IN_LSB_SHIFT]) & 0x03;
+ if ((index >= 17) &&
+ !((data->has_gain >> (index - 17)) & 1))
+ val *= 8;
+ break;
+ }
+ val = in_from_reg(index, val);
+
+ return sprintf(buf, "%d\n", val);
+}
+
+static ssize_t
+store_in(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct sensor_device_attribute_2 *sensor_attr =
+ to_sensor_dev_attr_2(attr);
+ int nr = sensor_attr->nr;
+ int index = sensor_attr->index;
+ struct i2c_client *client = to_i2c_client(dev);
+ struct w83795_data *data = i2c_get_clientdata(client);
+ unsigned long val;
+ u8 tmp;
+ u8 lsb_idx;
+
+ if (strict_strtoul(buf, 10, &val) < 0)
+ return -EINVAL;
+ val = in_to_reg(index, val);
+
+ if ((index >= 17) &&
+ !((data->has_gain >> (index - 17)) & 1))
+ val /= 8;
+ val = SENSORS_LIMIT(val, 0, 0x3FF);
+ mutex_lock(&data->update_lock);
+
+ lsb_idx = IN_LSB_SHIFT_IDX[index][IN_LSB_IDX];
+ tmp = w83795_read(client, IN_LSB_REG(lsb_idx, nr));
+ tmp &= ~(0x03 << IN_LSB_SHIFT_IDX[index][IN_LSB_SHIFT]);
+ tmp |= (val & 0x03) << IN_LSB_SHIFT_IDX[index][IN_LSB_SHIFT];
+ w83795_write(client, IN_LSB_REG(lsb_idx, nr), tmp);
+ data->in_lsb[lsb_idx][nr] = tmp;
+
+ tmp = (val >> 2) & 0xff;
+ w83795_write(client, W83795_REG_IN[index][nr], tmp);
+ data->in[index][nr] = tmp;
+
+ mutex_unlock(&data->update_lock);
+ return count;
+}
+
+
+#ifdef CONFIG_SENSORS_W83795_FANCTRL
+static ssize_t
+show_sf_setup(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct sensor_device_attribute_2 *sensor_attr =
+ to_sensor_dev_attr_2(attr);
+ int nr = sensor_attr->nr;
+ struct w83795_data *data = w83795_update_pwm_config(dev);
+ u16 val = data->setup_pwm[nr];
+
+ switch (nr) {
+ case SETUP_PWM_UPTIME:
+ case SETUP_PWM_DOWNTIME:
+ val = time_from_reg(val);
+ break;
+ }
+
+ return sprintf(buf, "%d\n", val);
+}
+
+static ssize_t
+store_sf_setup(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct sensor_device_attribute_2 *sensor_attr =
+ to_sensor_dev_attr_2(attr);
+ int nr = sensor_attr->nr;
+ struct i2c_client *client = to_i2c_client(dev);
+ struct w83795_data *data = i2c_get_clientdata(client);
+ unsigned long val;
+
+ if (strict_strtoul(buf, 10, &val) < 0)
+ return -EINVAL;
+
+ switch (nr) {
+ case SETUP_PWM_DEFAULT:
+ val = SENSORS_LIMIT(val, 0, 0xff);
+ break;
+ case SETUP_PWM_UPTIME:
+ case SETUP_PWM_DOWNTIME:
+ val = time_to_reg(val);
+ if (val == 0)
+ return -EINVAL;
+ break;
+ }
+
+ mutex_lock(&data->update_lock);
+ data->setup_pwm[nr] = val;
+ w83795_write(client, W83795_REG_SETUP_PWM(nr), val);
+ mutex_unlock(&data->update_lock);
+ return count;
+}
+#endif
+
+
+#define NOT_USED -1
+
+/* Don't change the attribute order, _max, _min and _beep are accessed by index
+ * somewhere else in the code */
+#define SENSOR_ATTR_IN(index) { \
+ SENSOR_ATTR_2(in##index##_input, S_IRUGO, show_in, NULL, \
+ IN_READ, index), \
+ SENSOR_ATTR_2(in##index##_max, S_IRUGO | S_IWUSR, show_in, \
+ store_in, IN_MAX, index), \
+ SENSOR_ATTR_2(in##index##_min, S_IRUGO | S_IWUSR, show_in, \
+ store_in, IN_LOW, index), \
+ SENSOR_ATTR_2(in##index##_alarm, S_IRUGO, show_alarm_beep, \
+ NULL, ALARM_STATUS, index + ((index > 14) ? 1 : 0)), \
+ SENSOR_ATTR_2(in##index##_beep, S_IWUSR | S_IRUGO, \
+ show_alarm_beep, store_beep, BEEP_ENABLE, \
+ index + ((index > 14) ? 1 : 0)) }
+
+/* Don't change the attribute order, _beep is accessed by index
+ * somewhere else in the code */
+#define SENSOR_ATTR_FAN(index) { \
+ SENSOR_ATTR_2(fan##index##_input, S_IRUGO, show_fan, \
+ NULL, FAN_INPUT, index - 1), \
+ SENSOR_ATTR_2(fan##index##_min, S_IWUSR | S_IRUGO, \
+ show_fan, store_fan_min, FAN_MIN, index - 1), \
+ SENSOR_ATTR_2(fan##index##_alarm, S_IRUGO, show_alarm_beep, \
+ NULL, ALARM_STATUS, index + 31), \
+ SENSOR_ATTR_2(fan##index##_beep, S_IWUSR | S_IRUGO, \
+ show_alarm_beep, store_beep, BEEP_ENABLE, index + 31) }
+
+#define SENSOR_ATTR_PWM(index) { \
+ SENSOR_ATTR_2(pwm##index, S_IWUSR | S_IRUGO, show_pwm, \
+ store_pwm, PWM_OUTPUT, index - 1), \
+ SENSOR_ATTR_2(pwm##index##_nonstop, S_IWUSR | S_IRUGO, \
+ show_pwm, store_pwm, PWM_NONSTOP, index - 1), \
+ SENSOR_ATTR_2(pwm##index##_start, S_IWUSR | S_IRUGO, \
+ show_pwm, store_pwm, PWM_START, index - 1), \
+ SENSOR_ATTR_2(pwm##index##_stop_time, S_IWUSR | S_IRUGO, \
+ show_pwm, store_pwm, PWM_STOP_TIME, index - 1), \
+ SENSOR_ATTR_2(pwm##index##_freq, S_IWUSR | S_IRUGO, \
+ show_pwm, store_pwm, PWM_FREQ, index - 1), \
+ SENSOR_ATTR_2(pwm##index##_enable, S_IWUSR | S_IRUGO, \
+ show_pwm_enable, store_pwm_enable, NOT_USED, index - 1), \
+ SENSOR_ATTR_2(pwm##index##_mode, S_IRUGO, \
+ show_pwm_mode, NULL, NOT_USED, index - 1), \
+ SENSOR_ATTR_2(fan##index##_target, S_IWUSR | S_IRUGO, \
+ show_fanin, store_fanin, FANIN_TARGET, index - 1) }
+
+/* Don't change the attribute order, _beep is accessed by index
+ * somewhere else in the code */
+#define SENSOR_ATTR_DTS(index) { \
+ SENSOR_ATTR_2(temp##index##_type, S_IRUGO , \
+ show_dts_mode, NULL, NOT_USED, index - 7), \
+ SENSOR_ATTR_2(temp##index##_input, S_IRUGO, show_dts, \
+ NULL, NOT_USED, index - 7), \
+ SENSOR_ATTR_2(temp##index##_crit, S_IRUGO | S_IWUSR, show_dts_ext, \
+ store_dts_ext, DTS_CRIT, NOT_USED), \
+ SENSOR_ATTR_2(temp##index##_crit_hyst, S_IRUGO | S_IWUSR, \
+ show_dts_ext, store_dts_ext, DTS_CRIT_HYST, NOT_USED), \
+ SENSOR_ATTR_2(temp##index##_max, S_IRUGO | S_IWUSR, show_dts_ext, \
+ store_dts_ext, DTS_WARN, NOT_USED), \
+ SENSOR_ATTR_2(temp##index##_max_hyst, S_IRUGO | S_IWUSR, \
+ show_dts_ext, store_dts_ext, DTS_WARN_HYST, NOT_USED), \
+ SENSOR_ATTR_2(temp##index##_alarm, S_IRUGO, \
+ show_alarm_beep, NULL, ALARM_STATUS, index + 17), \
+ SENSOR_ATTR_2(temp##index##_beep, S_IWUSR | S_IRUGO, \
+ show_alarm_beep, store_beep, BEEP_ENABLE, index + 17) }
+
+/* Don't change the attribute order, _beep is accessed by index
+ * somewhere else in the code */
+#define SENSOR_ATTR_TEMP(index) { \
+ SENSOR_ATTR_2(temp##index##_type, S_IRUGO | (index < 4 ? S_IWUSR : 0), \
+ show_temp_mode, store_temp_mode, NOT_USED, index - 1), \
+ SENSOR_ATTR_2(temp##index##_input, S_IRUGO, show_temp, \
+ NULL, TEMP_READ, index - 1), \
+ SENSOR_ATTR_2(temp##index##_crit, S_IRUGO | S_IWUSR, show_temp, \
+ store_temp, TEMP_CRIT, index - 1), \
+ SENSOR_ATTR_2(temp##index##_crit_hyst, S_IRUGO | S_IWUSR, \
+ show_temp, store_temp, TEMP_CRIT_HYST, index - 1), \
+ SENSOR_ATTR_2(temp##index##_max, S_IRUGO | S_IWUSR, show_temp, \
+ store_temp, TEMP_WARN, index - 1), \
+ SENSOR_ATTR_2(temp##index##_max_hyst, S_IRUGO | S_IWUSR, \
+ show_temp, store_temp, TEMP_WARN_HYST, index - 1), \
+ SENSOR_ATTR_2(temp##index##_alarm, S_IRUGO, \
+ show_alarm_beep, NULL, ALARM_STATUS, \
+ index + (index > 4 ? 11 : 17)), \
+ SENSOR_ATTR_2(temp##index##_beep, S_IWUSR | S_IRUGO, \
+ show_alarm_beep, store_beep, BEEP_ENABLE, \
+ index + (index > 4 ? 11 : 17)), \
+ SENSOR_ATTR_2(temp##index##_pwm_enable, S_IWUSR | S_IRUGO, \
+ show_temp_pwm_enable, store_temp_pwm_enable, \
+ TEMP_PWM_ENABLE, index - 1), \
+ SENSOR_ATTR_2(temp##index##_auto_channels_pwm, S_IWUSR | S_IRUGO, \
+ show_temp_pwm_enable, store_temp_pwm_enable, \
+ TEMP_PWM_FAN_MAP, index - 1), \
+ SENSOR_ATTR_2(thermal_cruise##index, S_IWUSR | S_IRUGO, \
+ show_temp_pwm, store_temp_pwm, TEMP_PWM_TTTI, index - 1), \
+ SENSOR_ATTR_2(temp##index##_warn, S_IWUSR | S_IRUGO, \
+ show_temp_pwm, store_temp_pwm, TEMP_PWM_CTFS, index - 1), \
+ SENSOR_ATTR_2(temp##index##_warn_hyst, S_IWUSR | S_IRUGO, \
+ show_temp_pwm, store_temp_pwm, TEMP_PWM_HCT, index - 1), \
+ SENSOR_ATTR_2(temp##index##_operation_hyst, S_IWUSR | S_IRUGO, \
+ show_temp_pwm, store_temp_pwm, TEMP_PWM_HOT, index - 1), \
+ SENSOR_ATTR_2(temp##index##_auto_point1_pwm, S_IRUGO | S_IWUSR, \
+ show_sf4_pwm, store_sf4_pwm, 0, index - 1), \
+ SENSOR_ATTR_2(temp##index##_auto_point2_pwm, S_IRUGO | S_IWUSR, \
+ show_sf4_pwm, store_sf4_pwm, 1, index - 1), \
+ SENSOR_ATTR_2(temp##index##_auto_point3_pwm, S_IRUGO | S_IWUSR, \
+ show_sf4_pwm, store_sf4_pwm, 2, index - 1), \
+ SENSOR_ATTR_2(temp##index##_auto_point4_pwm, S_IRUGO | S_IWUSR, \
+ show_sf4_pwm, store_sf4_pwm, 3, index - 1), \
+ SENSOR_ATTR_2(temp##index##_auto_point5_pwm, S_IRUGO | S_IWUSR, \
+ show_sf4_pwm, store_sf4_pwm, 4, index - 1), \
+ SENSOR_ATTR_2(temp##index##_auto_point6_pwm, S_IRUGO | S_IWUSR, \
+ show_sf4_pwm, store_sf4_pwm, 5, index - 1), \
+ SENSOR_ATTR_2(temp##index##_auto_point7_pwm, S_IRUGO | S_IWUSR, \
+ show_sf4_pwm, store_sf4_pwm, 6, index - 1), \
+ SENSOR_ATTR_2(temp##index##_auto_point1_temp, S_IRUGO | S_IWUSR,\
+ show_sf4_temp, store_sf4_temp, 0, index - 1), \
+ SENSOR_ATTR_2(temp##index##_auto_point2_temp, S_IRUGO | S_IWUSR,\
+ show_sf4_temp, store_sf4_temp, 1, index - 1), \
+ SENSOR_ATTR_2(temp##index##_auto_point3_temp, S_IRUGO | S_IWUSR,\
+ show_sf4_temp, store_sf4_temp, 2, index - 1), \
+ SENSOR_ATTR_2(temp##index##_auto_point4_temp, S_IRUGO | S_IWUSR,\
+ show_sf4_temp, store_sf4_temp, 3, index - 1), \
+ SENSOR_ATTR_2(temp##index##_auto_point5_temp, S_IRUGO | S_IWUSR,\
+ show_sf4_temp, store_sf4_temp, 4, index - 1), \
+ SENSOR_ATTR_2(temp##index##_auto_point6_temp, S_IRUGO | S_IWUSR,\
+ show_sf4_temp, store_sf4_temp, 5, index - 1), \
+ SENSOR_ATTR_2(temp##index##_auto_point7_temp, S_IRUGO | S_IWUSR,\
+ show_sf4_temp, store_sf4_temp, 6, index - 1) }
+
+
+static struct sensor_device_attribute_2 w83795_in[][5] = {
+ SENSOR_ATTR_IN(0),
+ SENSOR_ATTR_IN(1),
+ SENSOR_ATTR_IN(2),
+ SENSOR_ATTR_IN(3),
+ SENSOR_ATTR_IN(4),
+ SENSOR_ATTR_IN(5),
+ SENSOR_ATTR_IN(6),
+ SENSOR_ATTR_IN(7),
+ SENSOR_ATTR_IN(8),
+ SENSOR_ATTR_IN(9),
+ SENSOR_ATTR_IN(10),
+ SENSOR_ATTR_IN(11),
+ SENSOR_ATTR_IN(12),
+ SENSOR_ATTR_IN(13),
+ SENSOR_ATTR_IN(14),
+ SENSOR_ATTR_IN(15),
+ SENSOR_ATTR_IN(16),
+ SENSOR_ATTR_IN(17),
+ SENSOR_ATTR_IN(18),
+ SENSOR_ATTR_IN(19),
+ SENSOR_ATTR_IN(20),
+};
+
+static const struct sensor_device_attribute_2 w83795_fan[][4] = {
+ SENSOR_ATTR_FAN(1),
+ SENSOR_ATTR_FAN(2),
+ SENSOR_ATTR_FAN(3),
+ SENSOR_ATTR_FAN(4),
+ SENSOR_ATTR_FAN(5),
+ SENSOR_ATTR_FAN(6),
+ SENSOR_ATTR_FAN(7),
+ SENSOR_ATTR_FAN(8),
+ SENSOR_ATTR_FAN(9),
+ SENSOR_ATTR_FAN(10),
+ SENSOR_ATTR_FAN(11),
+ SENSOR_ATTR_FAN(12),
+ SENSOR_ATTR_FAN(13),
+ SENSOR_ATTR_FAN(14),
+};
+
+static const struct sensor_device_attribute_2 w83795_temp[][28] = {
+ SENSOR_ATTR_TEMP(1),
+ SENSOR_ATTR_TEMP(2),
+ SENSOR_ATTR_TEMP(3),
+ SENSOR_ATTR_TEMP(4),
+ SENSOR_ATTR_TEMP(5),
+ SENSOR_ATTR_TEMP(6),
+};
+
+static const struct sensor_device_attribute_2 w83795_dts[][8] = {
+ SENSOR_ATTR_DTS(7),
+ SENSOR_ATTR_DTS(8),
+ SENSOR_ATTR_DTS(9),
+ SENSOR_ATTR_DTS(10),
+ SENSOR_ATTR_DTS(11),
+ SENSOR_ATTR_DTS(12),
+ SENSOR_ATTR_DTS(13),
+ SENSOR_ATTR_DTS(14),
+};
+
+static const struct sensor_device_attribute_2 w83795_pwm[][8] = {
+ SENSOR_ATTR_PWM(1),
+ SENSOR_ATTR_PWM(2),
+ SENSOR_ATTR_PWM(3),
+ SENSOR_ATTR_PWM(4),
+ SENSOR_ATTR_PWM(5),
+ SENSOR_ATTR_PWM(6),
+ SENSOR_ATTR_PWM(7),
+ SENSOR_ATTR_PWM(8),
+};
+
+static const struct sensor_device_attribute_2 w83795_tss[6] = {
+ SENSOR_ATTR_2(temp1_source_sel, S_IWUSR | S_IRUGO,
+ show_temp_src, store_temp_src, NOT_USED, 0),
+ SENSOR_ATTR_2(temp2_source_sel, S_IWUSR | S_IRUGO,
+ show_temp_src, store_temp_src, NOT_USED, 1),
+ SENSOR_ATTR_2(temp3_source_sel, S_IWUSR | S_IRUGO,
+ show_temp_src, store_temp_src, NOT_USED, 2),
+ SENSOR_ATTR_2(temp4_source_sel, S_IWUSR | S_IRUGO,
+ show_temp_src, store_temp_src, NOT_USED, 3),
+ SENSOR_ATTR_2(temp5_source_sel, S_IWUSR | S_IRUGO,
+ show_temp_src, store_temp_src, NOT_USED, 4),
+ SENSOR_ATTR_2(temp6_source_sel, S_IWUSR | S_IRUGO,
+ show_temp_src, store_temp_src, NOT_USED, 5),
+};
+
+static const struct sensor_device_attribute_2 sda_single_files[] = {
+ SENSOR_ATTR_2(intrusion0_alarm, S_IWUSR | S_IRUGO, show_alarm_beep,
+ store_chassis_clear, ALARM_STATUS, 46),
+#ifdef CONFIG_SENSORS_W83795_FANCTRL
+ SENSOR_ATTR_2(speed_cruise_tolerance, S_IWUSR | S_IRUGO, show_fanin,
+ store_fanin, FANIN_TOL, NOT_USED),
+ SENSOR_ATTR_2(pwm_default, S_IWUSR | S_IRUGO, show_sf_setup,
+ store_sf_setup, SETUP_PWM_DEFAULT, NOT_USED),
+ SENSOR_ATTR_2(pwm_uptime, S_IWUSR | S_IRUGO, show_sf_setup,
+ store_sf_setup, SETUP_PWM_UPTIME, NOT_USED),
+ SENSOR_ATTR_2(pwm_downtime, S_IWUSR | S_IRUGO, show_sf_setup,
+ store_sf_setup, SETUP_PWM_DOWNTIME, NOT_USED),
+#endif
+};
+
+static const struct sensor_device_attribute_2 sda_beep_files[] = {
+ SENSOR_ATTR_2(intrusion0_beep, S_IWUSR | S_IRUGO, show_alarm_beep,
+ store_beep, BEEP_ENABLE, 46),
+ SENSOR_ATTR_2(beep_enable, S_IWUSR | S_IRUGO, show_alarm_beep,
+ store_beep, BEEP_ENABLE, 47),
+};
+
+/*
+ * Driver interface
+ */
+
+static void w83795_init_client(struct i2c_client *client)
+{
+ struct w83795_data *data = i2c_get_clientdata(client);
+ static const u16 clkin[4] = { /* in kHz */
+ 14318, 24000, 33333, 48000
+ };
+ u8 config;
+
+ if (reset)
+ w83795_write(client, W83795_REG_CONFIG, 0x80);
+
+ /* Start monitoring if needed */
+ config = w83795_read(client, W83795_REG_CONFIG);
+ if (!(config & W83795_REG_CONFIG_START)) {
+ dev_info(&client->dev, "Enabling monitoring operations\n");
+ w83795_write(client, W83795_REG_CONFIG,
+ config | W83795_REG_CONFIG_START);
+ }
+
+ data->clkin = clkin[(config >> 3) & 0x3];
+ dev_dbg(&client->dev, "clkin = %u kHz\n", data->clkin);
+}
+
+static int w83795_get_device_id(struct i2c_client *client)
+{
+ int device_id;
+
+ device_id = i2c_smbus_read_byte_data(client, W83795_REG_DEVICEID);
+
+ /* Special case for rev. A chips; can't be checked first because later
+ revisions emulate this for compatibility */
+ if (device_id < 0 || (device_id & 0xf0) != 0x50) {
+ int alt_id;
+
+ alt_id = i2c_smbus_read_byte_data(client,
+ W83795_REG_DEVICEID_A);
+ if (alt_id == 0x50)
+ device_id = alt_id;
+ }
+
+ return device_id;
+}
+
+/* Return 0 if detection is successful, -ENODEV otherwise */
+static int w83795_detect(struct i2c_client *client,
+ struct i2c_board_info *info)
+{
+ int bank, vendor_id, device_id, expected, i2c_addr, config;
+ struct i2c_adapter *adapter = client->adapter;
+ unsigned short address = client->addr;
+ const char *chip_name;
+
+ if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA))
+ return -ENODEV;
+ bank = i2c_smbus_read_byte_data(client, W83795_REG_BANKSEL);
+ if (bank < 0 || (bank & 0x7c)) {
+ dev_dbg(&adapter->dev,
+ "w83795: Detection failed at addr 0x%02hx, check %s\n",
+ address, "bank");
+ return -ENODEV;
+ }
+
+ /* Check Nuvoton vendor ID */
+ vendor_id = i2c_smbus_read_byte_data(client, W83795_REG_VENDORID);
+ expected = bank & 0x80 ? 0x5c : 0xa3;
+ if (vendor_id != expected) {
+ dev_dbg(&adapter->dev,
+ "w83795: Detection failed at addr 0x%02hx, check %s\n",
+ address, "vendor id");
+ return -ENODEV;
+ }
+
+ /* Check device ID */
+ device_id = w83795_get_device_id(client) |
+ (i2c_smbus_read_byte_data(client, W83795_REG_CHIPID) << 8);
+ if ((device_id >> 4) != 0x795) {
+ dev_dbg(&adapter->dev,
+ "w83795: Detection failed at addr 0x%02hx, check %s\n",
+ address, "device id\n");
+ return -ENODEV;
+ }
+
+ /* If Nuvoton chip, address of chip and W83795_REG_I2C_ADDR
+ should match */
+ if ((bank & 0x07) == 0) {
+ i2c_addr = i2c_smbus_read_byte_data(client,
+ W83795_REG_I2C_ADDR);
+ if ((i2c_addr & 0x7f) != address) {
+ dev_dbg(&adapter->dev,
+ "w83795: Detection failed at addr 0x%02hx, "
+ "check %s\n", address, "i2c addr");
+ return -ENODEV;
+ }
+ }
+
+ /* Check 795 chip type: 795G or 795ADG
+ Usually we don't write to chips during detection, but here we don't
+ quite have the choice; hopefully it's OK, we are about to return
+ success anyway */
+ if ((bank & 0x07) != 0)
+ i2c_smbus_write_byte_data(client, W83795_REG_BANKSEL,
+ bank & ~0x07);
+ config = i2c_smbus_read_byte_data(client, W83795_REG_CONFIG);
+ if (config & W83795_REG_CONFIG_CONFIG48)
+ chip_name = "w83795adg";
+ else
+ chip_name = "w83795g";
+
+ strlcpy(info->type, chip_name, I2C_NAME_SIZE);
+ dev_info(&adapter->dev, "Found %s rev. %c at 0x%02hx\n", chip_name,
+ 'A' + (device_id & 0xf), address);
+
+ return 0;
+}
+
+static int w83795_handle_files(struct device *dev, int (*fn)(struct device *,
+ const struct device_attribute *))
+{
+ struct w83795_data *data = dev_get_drvdata(dev);
+ int err, i, j;
+
+ for (i = 0; i < ARRAY_SIZE(w83795_in); i++) {
+ if (!(data->has_in & (1 << i)))
+ continue;
+ for (j = 0; j < ARRAY_SIZE(w83795_in[0]); j++) {
+ if (j == 4 && !data->enable_beep)
+ continue;
+ err = fn(dev, &w83795_in[i][j].dev_attr);
+ if (err)
+ return err;
+ }
+ }
+
+ for (i = 0; i < ARRAY_SIZE(w83795_fan); i++) {
+ if (!(data->has_fan & (1 << i)))
+ continue;
+ for (j = 0; j < ARRAY_SIZE(w83795_fan[0]); j++) {
+ if (j == 3 && !data->enable_beep)
+ continue;
+ err = fn(dev, &w83795_fan[i][j].dev_attr);
+ if (err)
+ return err;
+ }
+ }
+
+ for (i = 0; i < ARRAY_SIZE(w83795_tss); i++) {
+ j = w83795_tss_useful(data, i);
+ if (!j)
+ continue;
+ err = fn(dev, &w83795_tss[i].dev_attr);
+ if (err)
+ return err;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(sda_single_files); i++) {
+ err = fn(dev, &sda_single_files[i].dev_attr);
+ if (err)
+ return err;
+ }
+
+ if (data->enable_beep) {
+ for (i = 0; i < ARRAY_SIZE(sda_beep_files); i++) {
+ err = fn(dev, &sda_beep_files[i].dev_attr);
+ if (err)
+ return err;
+ }
+ }
+
+#ifdef CONFIG_SENSORS_W83795_FANCTRL
+ for (i = 0; i < data->has_pwm; i++) {
+ for (j = 0; j < ARRAY_SIZE(w83795_pwm[0]); j++) {
+ err = fn(dev, &w83795_pwm[i][j].dev_attr);
+ if (err)
+ return err;
+ }
+ }
+#endif
+
+ for (i = 0; i < ARRAY_SIZE(w83795_temp); i++) {
+ if (!(data->has_temp & (1 << i)))
+ continue;
+#ifdef CONFIG_SENSORS_W83795_FANCTRL
+ for (j = 0; j < ARRAY_SIZE(w83795_temp[0]); j++) {
+#else
+ for (j = 0; j < 8; j++) {
+#endif
+ if (j == 7 && !data->enable_beep)
+ continue;
+ err = fn(dev, &w83795_temp[i][j].dev_attr);
+ if (err)
+ return err;
+ }
+ }
+
+ if (data->enable_dts) {
+ for (i = 0; i < ARRAY_SIZE(w83795_dts); i++) {
+ if (!(data->has_dts & (1 << i)))
+ continue;
+ for (j = 0; j < ARRAY_SIZE(w83795_dts[0]); j++) {
+ if (j == 7 && !data->enable_beep)
+ continue;
+ err = fn(dev, &w83795_dts[i][j].dev_attr);
+ if (err)
+ return err;
+ }
+ }
+ }
+
+ return 0;
+}
+
+/* We need a wrapper that fits in w83795_handle_files */
+static int device_remove_file_wrapper(struct device *dev,
+ const struct device_attribute *attr)
+{
+ device_remove_file(dev, attr);
+ return 0;
+}
+
+static void w83795_check_dynamic_in_limits(struct i2c_client *client)
+{
+ struct w83795_data *data = i2c_get_clientdata(client);
+ u8 vid_ctl;
+ int i, err_max, err_min;
+
+ vid_ctl = w83795_read(client, W83795_REG_VID_CTRL);
+
+ /* Return immediately if VRM isn't configured */
+ if ((vid_ctl & 0x07) == 0x00 || (vid_ctl & 0x07) == 0x07)
+ return;
+
+ data->has_dyn_in = (vid_ctl >> 3) & 0x07;
+ for (i = 0; i < 2; i++) {
+ if (!(data->has_dyn_in & (1 << i)))
+ continue;
+
+ /* Voltage limits in dynamic mode, switch to read-only */
+ err_max = sysfs_chmod_file(&client->dev.kobj,
+ &w83795_in[i][2].dev_attr.attr,
+ S_IRUGO);
+ err_min = sysfs_chmod_file(&client->dev.kobj,
+ &w83795_in[i][3].dev_attr.attr,
+ S_IRUGO);
+ if (err_max || err_min)
+ dev_warn(&client->dev, "Failed to set in%d limits "
+ "read-only (%d, %d)\n", i, err_max, err_min);
+ else
+ dev_info(&client->dev, "in%d limits set dynamically "
+ "from VID\n", i);
+ }
+}
+
+/* Check pins that can be used for either temperature or voltage monitoring */
+static void w83795_apply_temp_config(struct w83795_data *data, u8 config,
+ int temp_chan, int in_chan)
+{
+ /* config is a 2-bit value */
+ switch (config) {
+ case 0x2: /* Voltage monitoring */
+ data->has_in |= 1 << in_chan;
+ break;
+ case 0x1: /* Thermal diode */
+ if (temp_chan >= 4)
+ break;
+ data->temp_mode |= 1 << temp_chan;
+ /* fall through */
+ case 0x3: /* Thermistor */
+ data->has_temp |= 1 << temp_chan;
+ break;
+ }
+}
+
+static int w83795_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ int i;
+ u8 tmp;
+ struct device *dev = &client->dev;
+ struct w83795_data *data;
+ int err;
+
+ data = kzalloc(sizeof(struct w83795_data), GFP_KERNEL);
+ if (!data) {
+ err = -ENOMEM;
+ goto exit;
+ }
+
+ i2c_set_clientdata(client, data);
+ data->chip_type = id->driver_data;
+ data->bank = i2c_smbus_read_byte_data(client, W83795_REG_BANKSEL);
+ mutex_init(&data->update_lock);
+
+ /* Initialize the chip */
+ w83795_init_client(client);
+
+ /* Check which voltages and fans are present */
+ data->has_in = w83795_read(client, W83795_REG_VOLT_CTRL1)
+ | (w83795_read(client, W83795_REG_VOLT_CTRL2) << 8);
+ data->has_fan = w83795_read(client, W83795_REG_FANIN_CTRL1)
+ | (w83795_read(client, W83795_REG_FANIN_CTRL2) << 8);
+
+ /* Check which analog temperatures and extra voltages are present */
+ tmp = w83795_read(client, W83795_REG_TEMP_CTRL1);
+ if (tmp & 0x20)
+ data->enable_dts = 1;
+ w83795_apply_temp_config(data, (tmp >> 2) & 0x3, 5, 16);
+ w83795_apply_temp_config(data, tmp & 0x3, 4, 15);
+ tmp = w83795_read(client, W83795_REG_TEMP_CTRL2);
+ w83795_apply_temp_config(data, tmp >> 6, 3, 20);
+ w83795_apply_temp_config(data, (tmp >> 4) & 0x3, 2, 19);
+ w83795_apply_temp_config(data, (tmp >> 2) & 0x3, 1, 18);
+ w83795_apply_temp_config(data, tmp & 0x3, 0, 17);
+
+ /* Check DTS enable status */
+ if (data->enable_dts) {
+ if (1 & w83795_read(client, W83795_REG_DTSC))
+ data->enable_dts |= 2;
+ data->has_dts = w83795_read(client, W83795_REG_DTSE);
+ }
+
+ /* Report PECI Tbase values */
+ if (data->enable_dts == 1) {
+ for (i = 0; i < 8; i++) {
+ if (!(data->has_dts & (1 << i)))
+ continue;
+ tmp = w83795_read(client, W83795_REG_PECI_TBASE(i));
+ dev_info(&client->dev,
+ "PECI agent %d Tbase temperature: %u\n",
+ i + 1, (unsigned int)tmp & 0x7f);
+ }
+ }
+
+ data->has_gain = w83795_read(client, W83795_REG_VMIGB_CTRL) & 0x0f;
+
+ /* pwm and smart fan */
+ if (data->chip_type == w83795g)
+ data->has_pwm = 8;
+ else
+ data->has_pwm = 2;
+
+ /* Check if BEEP pin is available */
+ if (data->chip_type == w83795g) {
+ /* The W83795G has a dedicated BEEP pin */
+ data->enable_beep = 1;
+ } else {
+ /* The W83795ADG has a shared pin for OVT# and BEEP, so you
+ * can't have both */
+ tmp = w83795_read(client, W83795_REG_OVT_CFG);
+ if ((tmp & OVT_CFG_SEL) == 0)
+ data->enable_beep = 1;
+ }
+
+ err = w83795_handle_files(dev, device_create_file);
+ if (err)
+ goto exit_remove;
+
+ if (data->chip_type == w83795g)
+ w83795_check_dynamic_in_limits(client);
+
+ data->hwmon_dev = hwmon_device_register(dev);
+ if (IS_ERR(data->hwmon_dev)) {
+ err = PTR_ERR(data->hwmon_dev);
+ goto exit_remove;
+ }
+
+ return 0;
+
+exit_remove:
+ w83795_handle_files(dev, device_remove_file_wrapper);
+ kfree(data);
+exit:
+ return err;
+}
+
+static int w83795_remove(struct i2c_client *client)
+{
+ struct w83795_data *data = i2c_get_clientdata(client);
+
+ hwmon_device_unregister(data->hwmon_dev);
+ w83795_handle_files(&client->dev, device_remove_file_wrapper);
+ kfree(data);
+
+ return 0;
+}
+
+
+static const struct i2c_device_id w83795_id[] = {
+ { "w83795g", w83795g },
+ { "w83795adg", w83795adg },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, w83795_id);
+
+static struct i2c_driver w83795_driver = {
+ .driver = {
+ .name = "w83795",
+ },
+ .probe = w83795_probe,
+ .remove = w83795_remove,
+ .id_table = w83795_id,
+
+ .class = I2C_CLASS_HWMON,
+ .detect = w83795_detect,
+ .address_list = normal_i2c,
+};
+
+static int __init sensors_w83795_init(void)
+{
+ return i2c_add_driver(&w83795_driver);
+}
+
+static void __exit sensors_w83795_exit(void)
+{
+ i2c_del_driver(&w83795_driver);
+}
+
+MODULE_AUTHOR("Wei Song, Jean Delvare <khali@linux-fr.org>");
+MODULE_DESCRIPTION("W83795G/ADG hardware monitoring driver");
+MODULE_LICENSE("GPL");
+
+module_init(sensors_w83795_init);
+module_exit(sensors_w83795_exit);
diff --git a/drivers/i2c/Kconfig b/drivers/i2c/Kconfig
index b923074b2cbe..30f06e956bfb 100644
--- a/drivers/i2c/Kconfig
+++ b/drivers/i2c/Kconfig
@@ -75,8 +75,7 @@ config I2C_HELPER_AUTO
In doubt, say Y.
config I2C_SMBUS
- tristate
- prompt "SMBus-specific protocols" if !I2C_HELPER_AUTO
+ tristate "SMBus-specific protocols" if !I2C_HELPER_AUTO
help
Say Y here if you want support for SMBus extensions to the I2C
specification. At the moment, the only supported extension is
diff --git a/drivers/i2c/algos/Kconfig b/drivers/i2c/algos/Kconfig
index 3998dd620a03..f1cfe7e5508b 100644
--- a/drivers/i2c/algos/Kconfig
+++ b/drivers/i2c/algos/Kconfig
@@ -3,7 +3,7 @@
#
menu "I2C Algorithms"
- depends on !I2C_HELPER_AUTO
+ visible if !I2C_HELPER_AUTO
config I2C_ALGOBIT
tristate "I2C bit-banging interfaces"
@@ -15,15 +15,3 @@ config I2C_ALGOPCA
tristate "I2C PCA 9564 interfaces"
endmenu
-
-# In automatic configuration mode, we still have to define the
-# symbols to avoid unmet dependencies.
-
-if I2C_HELPER_AUTO
-config I2C_ALGOBIT
- tristate
-config I2C_ALGOPCF
- tristate
-config I2C_ALGOPCA
- tristate
-endif
diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig
index 6539ac2907e9..3a6321cb8030 100644
--- a/drivers/i2c/busses/Kconfig
+++ b/drivers/i2c/busses/Kconfig
@@ -95,10 +95,11 @@ config I2C_I801
ESB2
ICH8
ICH9
- Tolapai
+ EP80579 (Tolapai)
ICH10
- 3400/5 Series (PCH)
+ 5/3400 Series (PCH)
Cougar Point (PCH)
+ Patsburg (PCH)
This driver can also be built as a module. If so, the module
will be called i2c-i801.
@@ -396,6 +397,16 @@ config I2C_IMX
This driver can also be built as a module. If so, the module
will be called i2c-imx.
+config I2C_INTEL_MID
+ tristate "Intel Moorestown/Medfield Platform I2C controller"
+ depends on PCI
+ help
+ Say Y here if you have an Intel Moorestown/Medfield platform I2C
+ controller.
+
+ This support is also available as a module. If so, the module
+ will be called i2c-intel-mid.
+
config I2C_IOP3XX
tristate "Intel IOPx3xx and IXP4xx on-chip I2C interface"
depends on ARCH_IOP32X || ARCH_IOP33X || ARCH_IXP4XX || ARCH_IOP13XX
diff --git a/drivers/i2c/busses/Makefile b/drivers/i2c/busses/Makefile
index 033ad413f328..84cb16ae6f9e 100644
--- a/drivers/i2c/busses/Makefile
+++ b/drivers/i2c/busses/Makefile
@@ -38,6 +38,7 @@ obj-$(CONFIG_I2C_GPIO) += i2c-gpio.o
obj-$(CONFIG_I2C_HIGHLANDER) += i2c-highlander.o
obj-$(CONFIG_I2C_IBM_IIC) += i2c-ibm_iic.o
obj-$(CONFIG_I2C_IMX) += i2c-imx.o
+obj-$(CONFIG_I2C_INTEL_MID) += i2c-intel-mid.o
obj-$(CONFIG_I2C_IOP3XX) += i2c-iop3xx.o
obj-$(CONFIG_I2C_IXP2000) += i2c-ixp2000.o
obj-$(CONFIG_I2C_MPC) += i2c-mpc.o
diff --git a/drivers/i2c/busses/i2c-i801.c b/drivers/i2c/busses/i2c-i801.c
index c60081169cc3..02835ce7ff4b 100644
--- a/drivers/i2c/busses/i2c-i801.c
+++ b/drivers/i2c/busses/i2c-i801.c
@@ -3,6 +3,8 @@
Philip Edelbrock <phil@netroedge.com>, and Mark D. Studebaker
<mdsxyz123@yahoo.com>
Copyright (C) 2007, 2008 Jean Delvare <khali@linux-fr.org>
+ Copyright (C) 2010 Intel Corporation,
+ David Woodhouse <dwmw2@infradead.org>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
@@ -38,11 +40,15 @@
82801G (ICH7) 0x27da 32 hard yes yes yes
82801H (ICH8) 0x283e 32 hard yes yes yes
82801I (ICH9) 0x2930 32 hard yes yes yes
- Tolapai 0x5032 32 hard yes yes yes
+ EP80579 (Tolapai) 0x5032 32 hard yes yes yes
ICH10 0x3a30 32 hard yes yes yes
ICH10 0x3a60 32 hard yes yes yes
- 3400/5 Series (PCH) 0x3b30 32 hard yes yes yes
+ 5/3400 Series (PCH) 0x3b30 32 hard yes yes yes
Cougar Point (PCH) 0x1c22 32 hard yes yes yes
+ Patsburg (PCH) 0x1d22 32 hard yes yes yes
+ Patsburg (PCH) IDF 0x1d70 32 hard yes yes yes
+ Patsburg (PCH) IDF 0x1d71 32 hard yes yes yes
+ Patsburg (PCH) IDF 0x1d72 32 hard yes yes yes
Features supported by this driver:
Software PEC no
@@ -50,12 +56,11 @@
Block buffer yes
Block process call transaction no
I2C block read transaction yes (doesn't use the block buffer)
+ Slave mode no
See the file Documentation/i2c/busses/i2c-i801 for details.
*/
-/* Note: we assume there can only be one I801, with one SMBus interface */
-
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/kernel.h>
@@ -69,16 +74,16 @@
#include <linux/dmi.h>
/* I801 SMBus address offsets */
-#define SMBHSTSTS (0 + i801_smba)
-#define SMBHSTCNT (2 + i801_smba)
-#define SMBHSTCMD (3 + i801_smba)
-#define SMBHSTADD (4 + i801_smba)
-#define SMBHSTDAT0 (5 + i801_smba)
-#define SMBHSTDAT1 (6 + i801_smba)
-#define SMBBLKDAT (7 + i801_smba)
-#define SMBPEC (8 + i801_smba) /* ICH3 and later */
-#define SMBAUXSTS (12 + i801_smba) /* ICH4 and later */
-#define SMBAUXCTL (13 + i801_smba) /* ICH4 and later */
+#define SMBHSTSTS(p) (0 + (p)->smba)
+#define SMBHSTCNT(p) (2 + (p)->smba)
+#define SMBHSTCMD(p) (3 + (p)->smba)
+#define SMBHSTADD(p) (4 + (p)->smba)
+#define SMBHSTDAT0(p) (5 + (p)->smba)
+#define SMBHSTDAT1(p) (6 + (p)->smba)
+#define SMBBLKDAT(p) (7 + (p)->smba)
+#define SMBPEC(p) (8 + (p)->smba) /* ICH3 and later */
+#define SMBAUXSTS(p) (12 + (p)->smba) /* ICH4 and later */
+#define SMBAUXCTL(p) (13 + (p)->smba) /* ICH4 and later */
/* PCI Address Constants */
#define SMBBAR 4
@@ -127,16 +132,25 @@
SMBHSTSTS_BUS_ERR | SMBHSTSTS_DEV_ERR | \
SMBHSTSTS_INTR)
-static unsigned long i801_smba;
-static unsigned char i801_original_hstcfg;
+/* Patsburg also has three 'Integrated Device Function' SMBus controllers */
+#define PCI_DEVICE_ID_INTEL_PATSBURG_SMBUS_IDF0 0x1d70
+#define PCI_DEVICE_ID_INTEL_PATSBURG_SMBUS_IDF1 0x1d71
+#define PCI_DEVICE_ID_INTEL_PATSBURG_SMBUS_IDF2 0x1d72
+
+struct i801_priv {
+ struct i2c_adapter adapter;
+ unsigned long smba;
+ unsigned char original_hstcfg;
+ struct pci_dev *pci_dev;
+ unsigned int features;
+};
+
static struct pci_driver i801_driver;
-static struct pci_dev *I801_dev;
#define FEATURE_SMBUS_PEC (1 << 0)
#define FEATURE_BLOCK_BUFFER (1 << 1)
#define FEATURE_BLOCK_PROC (1 << 2)
#define FEATURE_I2C_BLOCK_READ (1 << 3)
-static unsigned int i801_features;
static const char *i801_feature_names[] = {
"SMBus PEC",
@@ -151,24 +165,24 @@ MODULE_PARM_DESC(disable_features, "Disable selected driver features");
/* Make sure the SMBus host is ready to start transmitting.
Return 0 if it is, -EBUSY if it is not. */
-static int i801_check_pre(void)
+static int i801_check_pre(struct i801_priv *priv)
{
int status;
- status = inb_p(SMBHSTSTS);
+ status = inb_p(SMBHSTSTS(priv));
if (status & SMBHSTSTS_HOST_BUSY) {
- dev_err(&I801_dev->dev, "SMBus is busy, can't use it!\n");
+ dev_err(&priv->pci_dev->dev, "SMBus is busy, can't use it!\n");
return -EBUSY;
}
status &= STATUS_FLAGS;
if (status) {
- dev_dbg(&I801_dev->dev, "Clearing status flags (%02x)\n",
+ dev_dbg(&priv->pci_dev->dev, "Clearing status flags (%02x)\n",
status);
- outb_p(status, SMBHSTSTS);
- status = inb_p(SMBHSTSTS) & STATUS_FLAGS;
+ outb_p(status, SMBHSTSTS(priv));
+ status = inb_p(SMBHSTSTS(priv)) & STATUS_FLAGS;
if (status) {
- dev_err(&I801_dev->dev,
+ dev_err(&priv->pci_dev->dev,
"Failed clearing status flags (%02x)\n",
status);
return -EBUSY;
@@ -179,48 +193,50 @@ static int i801_check_pre(void)
}
/* Convert the status register to an error code, and clear it. */
-static int i801_check_post(int status, int timeout)
+static int i801_check_post(struct i801_priv *priv, int status, int timeout)
{
int result = 0;
/* If the SMBus is still busy, we give up */
if (timeout) {
- dev_err(&I801_dev->dev, "Transaction timeout\n");
+ dev_err(&priv->pci_dev->dev, "Transaction timeout\n");
/* try to stop the current command */
- dev_dbg(&I801_dev->dev, "Terminating the current operation\n");
- outb_p(inb_p(SMBHSTCNT) | SMBHSTCNT_KILL, SMBHSTCNT);
+ dev_dbg(&priv->pci_dev->dev, "Terminating the current operation\n");
+ outb_p(inb_p(SMBHSTCNT(priv)) | SMBHSTCNT_KILL,
+ SMBHSTCNT(priv));
msleep(1);
- outb_p(inb_p(SMBHSTCNT) & (~SMBHSTCNT_KILL), SMBHSTCNT);
+ outb_p(inb_p(SMBHSTCNT(priv)) & (~SMBHSTCNT_KILL),
+ SMBHSTCNT(priv));
/* Check if it worked */
- status = inb_p(SMBHSTSTS);
+ status = inb_p(SMBHSTSTS(priv));
if ((status & SMBHSTSTS_HOST_BUSY) ||
!(status & SMBHSTSTS_FAILED))
- dev_err(&I801_dev->dev,
+ dev_err(&priv->pci_dev->dev,
"Failed terminating the transaction\n");
- outb_p(STATUS_FLAGS, SMBHSTSTS);
+ outb_p(STATUS_FLAGS, SMBHSTSTS(priv));
return -ETIMEDOUT;
}
if (status & SMBHSTSTS_FAILED) {
result = -EIO;
- dev_err(&I801_dev->dev, "Transaction failed\n");
+ dev_err(&priv->pci_dev->dev, "Transaction failed\n");
}
if (status & SMBHSTSTS_DEV_ERR) {
result = -ENXIO;
- dev_dbg(&I801_dev->dev, "No response\n");
+ dev_dbg(&priv->pci_dev->dev, "No response\n");
}
if (status & SMBHSTSTS_BUS_ERR) {
result = -EAGAIN;
- dev_dbg(&I801_dev->dev, "Lost arbitration\n");
+ dev_dbg(&priv->pci_dev->dev, "Lost arbitration\n");
}
if (result) {
/* Clear error flags */
- outb_p(status & STATUS_FLAGS, SMBHSTSTS);
- status = inb_p(SMBHSTSTS) & STATUS_FLAGS;
+ outb_p(status & STATUS_FLAGS, SMBHSTSTS(priv));
+ status = inb_p(SMBHSTSTS(priv)) & STATUS_FLAGS;
if (status) {
- dev_warn(&I801_dev->dev, "Failed clearing status "
+ dev_warn(&priv->pci_dev->dev, "Failed clearing status "
"flags at end of transaction (%02x)\n",
status);
}
@@ -229,86 +245,88 @@ static int i801_check_post(int status, int timeout)
return result;
}
-static int i801_transaction(int xact)
+static int i801_transaction(struct i801_priv *priv, int xact)
{
int status;
int result;
int timeout = 0;
- result = i801_check_pre();
+ result = i801_check_pre(priv);
if (result < 0)
return result;
/* the current contents of SMBHSTCNT can be overwritten, since PEC,
* INTREN, SMBSCMD are passed in xact */
- outb_p(xact | I801_START, SMBHSTCNT);
+ outb_p(xact | I801_START, SMBHSTCNT(priv));
/* We will always wait for a fraction of a second! */
do {
msleep(1);
- status = inb_p(SMBHSTSTS);
+ status = inb_p(SMBHSTSTS(priv));
} while ((status & SMBHSTSTS_HOST_BUSY) && (timeout++ < MAX_TIMEOUT));
- result = i801_check_post(status, timeout > MAX_TIMEOUT);
+ result = i801_check_post(priv, status, timeout > MAX_TIMEOUT);
if (result < 0)
return result;
- outb_p(SMBHSTSTS_INTR, SMBHSTSTS);
+ outb_p(SMBHSTSTS_INTR, SMBHSTSTS(priv));
return 0;
}
/* wait for INTR bit as advised by Intel */
-static void i801_wait_hwpec(void)
+static void i801_wait_hwpec(struct i801_priv *priv)
{
int timeout = 0;
int status;
do {
msleep(1);
- status = inb_p(SMBHSTSTS);
+ status = inb_p(SMBHSTSTS(priv));
} while ((!(status & SMBHSTSTS_INTR))
&& (timeout++ < MAX_TIMEOUT));
if (timeout > MAX_TIMEOUT)
- dev_dbg(&I801_dev->dev, "PEC Timeout!\n");
+ dev_dbg(&priv->pci_dev->dev, "PEC Timeout!\n");
- outb_p(status, SMBHSTSTS);
+ outb_p(status, SMBHSTSTS(priv));
}
-static int i801_block_transaction_by_block(union i2c_smbus_data *data,
+static int i801_block_transaction_by_block(struct i801_priv *priv,
+ union i2c_smbus_data *data,
char read_write, int hwpec)
{
int i, len;
int status;
- inb_p(SMBHSTCNT); /* reset the data buffer index */
+ inb_p(SMBHSTCNT(priv)); /* reset the data buffer index */
/* Use 32-byte buffer to process this transaction */
if (read_write == I2C_SMBUS_WRITE) {
len = data->block[0];
- outb_p(len, SMBHSTDAT0);
+ outb_p(len, SMBHSTDAT0(priv));
for (i = 0; i < len; i++)
- outb_p(data->block[i+1], SMBBLKDAT);
+ outb_p(data->block[i+1], SMBBLKDAT(priv));
}
- status = i801_transaction(I801_BLOCK_DATA | ENABLE_INT9 |
+ status = i801_transaction(priv, I801_BLOCK_DATA | ENABLE_INT9 |
I801_PEC_EN * hwpec);
if (status)
return status;
if (read_write == I2C_SMBUS_READ) {
- len = inb_p(SMBHSTDAT0);
+ len = inb_p(SMBHSTDAT0(priv));
if (len < 1 || len > I2C_SMBUS_BLOCK_MAX)
return -EPROTO;
data->block[0] = len;
for (i = 0; i < len; i++)
- data->block[i + 1] = inb_p(SMBBLKDAT);
+ data->block[i + 1] = inb_p(SMBBLKDAT(priv));
}
return 0;
}
-static int i801_block_transaction_byte_by_byte(union i2c_smbus_data *data,
+static int i801_block_transaction_byte_by_byte(struct i801_priv *priv,
+ union i2c_smbus_data *data,
char read_write, int command,
int hwpec)
{
@@ -318,15 +336,15 @@ static int i801_block_transaction_byte_by_byte(union i2c_smbus_data *data,
int result;
int timeout;
- result = i801_check_pre();
+ result = i801_check_pre(priv);
if (result < 0)
return result;
len = data->block[0];
if (read_write == I2C_SMBUS_WRITE) {
- outb_p(len, SMBHSTDAT0);
- outb_p(data->block[1], SMBBLKDAT);
+ outb_p(len, SMBHSTDAT0(priv));
+ outb_p(data->block[1], SMBBLKDAT(priv));
}
for (i = 1; i <= len; i++) {
@@ -342,34 +360,37 @@ static int i801_block_transaction_byte_by_byte(union i2c_smbus_data *data,
else
smbcmd = I801_BLOCK_DATA;
}
- outb_p(smbcmd | ENABLE_INT9, SMBHSTCNT);
+ outb_p(smbcmd | ENABLE_INT9, SMBHSTCNT(priv));
if (i == 1)
- outb_p(inb(SMBHSTCNT) | I801_START, SMBHSTCNT);
+ outb_p(inb(SMBHSTCNT(priv)) | I801_START,
+ SMBHSTCNT(priv));
/* We will always wait for a fraction of a second! */
timeout = 0;
do {
msleep(1);
- status = inb_p(SMBHSTSTS);
+ status = inb_p(SMBHSTSTS(priv));
} while ((!(status & SMBHSTSTS_BYTE_DONE))
&& (timeout++ < MAX_TIMEOUT));
- result = i801_check_post(status, timeout > MAX_TIMEOUT);
+ result = i801_check_post(priv, status, timeout > MAX_TIMEOUT);
if (result < 0)
return result;
if (i == 1 && read_write == I2C_SMBUS_READ
&& command != I2C_SMBUS_I2C_BLOCK_DATA) {
- len = inb_p(SMBHSTDAT0);
+ len = inb_p(SMBHSTDAT0(priv));
if (len < 1 || len > I2C_SMBUS_BLOCK_MAX) {
- dev_err(&I801_dev->dev,
+ dev_err(&priv->pci_dev->dev,
"Illegal SMBus block read size %d\n",
len);
/* Recover */
- while (inb_p(SMBHSTSTS) & SMBHSTSTS_HOST_BUSY)
- outb_p(SMBHSTSTS_BYTE_DONE, SMBHSTSTS);
- outb_p(SMBHSTSTS_INTR, SMBHSTSTS);
+ while (inb_p(SMBHSTSTS(priv)) &
+ SMBHSTSTS_HOST_BUSY)
+ outb_p(SMBHSTSTS_BYTE_DONE,
+ SMBHSTSTS(priv));
+ outb_p(SMBHSTSTS_INTR, SMBHSTSTS(priv));
return -EPROTO;
}
data->block[0] = len;
@@ -377,27 +398,28 @@ static int i801_block_transaction_byte_by_byte(union i2c_smbus_data *data,
/* Retrieve/store value in SMBBLKDAT */
if (read_write == I2C_SMBUS_READ)
- data->block[i] = inb_p(SMBBLKDAT);
+ data->block[i] = inb_p(SMBBLKDAT(priv));
if (read_write == I2C_SMBUS_WRITE && i+1 <= len)
- outb_p(data->block[i+1], SMBBLKDAT);
+ outb_p(data->block[i+1], SMBBLKDAT(priv));
/* signals SMBBLKDAT ready */
- outb_p(SMBHSTSTS_BYTE_DONE | SMBHSTSTS_INTR, SMBHSTSTS);
+ outb_p(SMBHSTSTS_BYTE_DONE | SMBHSTSTS_INTR, SMBHSTSTS(priv));
}
return 0;
}
-static int i801_set_block_buffer_mode(void)
+static int i801_set_block_buffer_mode(struct i801_priv *priv)
{
- outb_p(inb_p(SMBAUXCTL) | SMBAUXCTL_E32B, SMBAUXCTL);
- if ((inb_p(SMBAUXCTL) & SMBAUXCTL_E32B) == 0)
+ outb_p(inb_p(SMBAUXCTL(priv)) | SMBAUXCTL_E32B, SMBAUXCTL(priv));
+ if ((inb_p(SMBAUXCTL(priv)) & SMBAUXCTL_E32B) == 0)
return -EIO;
return 0;
}
/* Block transaction function */
-static int i801_block_transaction(union i2c_smbus_data *data, char read_write,
+static int i801_block_transaction(struct i801_priv *priv,
+ union i2c_smbus_data *data, char read_write,
int command, int hwpec)
{
int result = 0;
@@ -406,11 +428,11 @@ static int i801_block_transaction(union i2c_smbus_data *data, char read_write,
if (command == I2C_SMBUS_I2C_BLOCK_DATA) {
if (read_write == I2C_SMBUS_WRITE) {
/* set I2C_EN bit in configuration register */
- pci_read_config_byte(I801_dev, SMBHSTCFG, &hostc);
- pci_write_config_byte(I801_dev, SMBHSTCFG,
+ pci_read_config_byte(priv->pci_dev, SMBHSTCFG, &hostc);
+ pci_write_config_byte(priv->pci_dev, SMBHSTCFG,
hostc | SMBHSTCFG_I2C_EN);
- } else if (!(i801_features & FEATURE_I2C_BLOCK_READ)) {
- dev_err(&I801_dev->dev,
+ } else if (!(priv->features & FEATURE_I2C_BLOCK_READ)) {
+ dev_err(&priv->pci_dev->dev,
"I2C block read is unsupported!\n");
return -EOPNOTSUPP;
}
@@ -429,22 +451,23 @@ static int i801_block_transaction(union i2c_smbus_data *data, char read_write,
/* Experience has shown that the block buffer can only be used for
SMBus (not I2C) block transactions, even though the datasheet
doesn't mention this limitation. */
- if ((i801_features & FEATURE_BLOCK_BUFFER)
+ if ((priv->features & FEATURE_BLOCK_BUFFER)
&& command != I2C_SMBUS_I2C_BLOCK_DATA
- && i801_set_block_buffer_mode() == 0)
- result = i801_block_transaction_by_block(data, read_write,
- hwpec);
+ && i801_set_block_buffer_mode(priv) == 0)
+ result = i801_block_transaction_by_block(priv, data,
+ read_write, hwpec);
else
- result = i801_block_transaction_byte_by_byte(data, read_write,
+ result = i801_block_transaction_byte_by_byte(priv, data,
+ read_write,
command, hwpec);
if (result == 0 && hwpec)
- i801_wait_hwpec();
+ i801_wait_hwpec(priv);
if (command == I2C_SMBUS_I2C_BLOCK_DATA
&& read_write == I2C_SMBUS_WRITE) {
/* restore saved configuration register value */
- pci_write_config_byte(I801_dev, SMBHSTCFG, hostc);
+ pci_write_config_byte(priv->pci_dev, SMBHSTCFG, hostc);
}
return result;
}
@@ -457,81 +480,85 @@ static s32 i801_access(struct i2c_adapter *adap, u16 addr,
int hwpec;
int block = 0;
int ret, xact = 0;
+ struct i801_priv *priv = i2c_get_adapdata(adap);
- hwpec = (i801_features & FEATURE_SMBUS_PEC) && (flags & I2C_CLIENT_PEC)
+ hwpec = (priv->features & FEATURE_SMBUS_PEC) && (flags & I2C_CLIENT_PEC)
&& size != I2C_SMBUS_QUICK
&& size != I2C_SMBUS_I2C_BLOCK_DATA;
switch (size) {
case I2C_SMBUS_QUICK:
outb_p(((addr & 0x7f) << 1) | (read_write & 0x01),
- SMBHSTADD);
+ SMBHSTADD(priv));
xact = I801_QUICK;
break;
case I2C_SMBUS_BYTE:
outb_p(((addr & 0x7f) << 1) | (read_write & 0x01),
- SMBHSTADD);
+ SMBHSTADD(priv));
if (read_write == I2C_SMBUS_WRITE)
- outb_p(command, SMBHSTCMD);
+ outb_p(command, SMBHSTCMD(priv));
xact = I801_BYTE;
break;
case I2C_SMBUS_BYTE_DATA:
outb_p(((addr & 0x7f) << 1) | (read_write & 0x01),
- SMBHSTADD);
- outb_p(command, SMBHSTCMD);
+ SMBHSTADD(priv));
+ outb_p(command, SMBHSTCMD(priv));
if (read_write == I2C_SMBUS_WRITE)
- outb_p(data->byte, SMBHSTDAT0);
+ outb_p(data->byte, SMBHSTDAT0(priv));
xact = I801_BYTE_DATA;
break;
case I2C_SMBUS_WORD_DATA:
outb_p(((addr & 0x7f) << 1) | (read_write & 0x01),
- SMBHSTADD);
- outb_p(command, SMBHSTCMD);
+ SMBHSTADD(priv));
+ outb_p(command, SMBHSTCMD(priv));
if (read_write == I2C_SMBUS_WRITE) {
- outb_p(data->word & 0xff, SMBHSTDAT0);
- outb_p((data->word & 0xff00) >> 8, SMBHSTDAT1);
+ outb_p(data->word & 0xff, SMBHSTDAT0(priv));
+ outb_p((data->word & 0xff00) >> 8, SMBHSTDAT1(priv));
}
xact = I801_WORD_DATA;
break;
case I2C_SMBUS_BLOCK_DATA:
outb_p(((addr & 0x7f) << 1) | (read_write & 0x01),
- SMBHSTADD);
- outb_p(command, SMBHSTCMD);
+ SMBHSTADD(priv));
+ outb_p(command, SMBHSTCMD(priv));
block = 1;
break;
case I2C_SMBUS_I2C_BLOCK_DATA:
/* NB: page 240 of ICH5 datasheet shows that the R/#W
* bit should be cleared here, even when reading */
- outb_p((addr & 0x7f) << 1, SMBHSTADD);
+ outb_p((addr & 0x7f) << 1, SMBHSTADD(priv));
if (read_write == I2C_SMBUS_READ) {
/* NB: page 240 of ICH5 datasheet also shows
* that DATA1 is the cmd field when reading */
- outb_p(command, SMBHSTDAT1);
+ outb_p(command, SMBHSTDAT1(priv));
} else
- outb_p(command, SMBHSTCMD);
+ outb_p(command, SMBHSTCMD(priv));
block = 1;
break;
default:
- dev_err(&I801_dev->dev, "Unsupported transaction %d\n", size);
+ dev_err(&priv->pci_dev->dev, "Unsupported transaction %d\n",
+ size);
return -EOPNOTSUPP;
}
if (hwpec) /* enable/disable hardware PEC */
- outb_p(inb_p(SMBAUXCTL) | SMBAUXCTL_CRC, SMBAUXCTL);
+ outb_p(inb_p(SMBAUXCTL(priv)) | SMBAUXCTL_CRC, SMBAUXCTL(priv));
else
- outb_p(inb_p(SMBAUXCTL) & (~SMBAUXCTL_CRC), SMBAUXCTL);
+ outb_p(inb_p(SMBAUXCTL(priv)) & (~SMBAUXCTL_CRC),
+ SMBAUXCTL(priv));
if (block)
- ret = i801_block_transaction(data, read_write, size, hwpec);
+ ret = i801_block_transaction(priv, data, read_write, size,
+ hwpec);
else
- ret = i801_transaction(xact | ENABLE_INT9);
+ ret = i801_transaction(priv, xact | ENABLE_INT9);
/* Some BIOSes don't like it when PEC is enabled at reboot or resume
time, so we forcibly disable it after every transaction. Turn off
E32B for the same reason. */
if (hwpec || block)
- outb_p(inb_p(SMBAUXCTL) & ~(SMBAUXCTL_CRC | SMBAUXCTL_E32B),
- SMBAUXCTL);
+ outb_p(inb_p(SMBAUXCTL(priv)) &
+ ~(SMBAUXCTL_CRC | SMBAUXCTL_E32B), SMBAUXCTL(priv));
if (block)
return ret;
@@ -543,10 +570,11 @@ static s32 i801_access(struct i2c_adapter *adap, u16 addr,
switch (xact & 0x7f) {
case I801_BYTE: /* Result put in SMBHSTDAT0 */
case I801_BYTE_DATA:
- data->byte = inb_p(SMBHSTDAT0);
+ data->byte = inb_p(SMBHSTDAT0(priv));
break;
case I801_WORD_DATA:
- data->word = inb_p(SMBHSTDAT0) + (inb_p(SMBHSTDAT1) << 8);
+ data->word = inb_p(SMBHSTDAT0(priv)) +
+ (inb_p(SMBHSTDAT1(priv)) << 8);
break;
}
return 0;
@@ -555,11 +583,13 @@ static s32 i801_access(struct i2c_adapter *adap, u16 addr,
static u32 i801_func(struct i2c_adapter *adapter)
{
+ struct i801_priv *priv = i2c_get_adapdata(adapter);
+
return I2C_FUNC_SMBUS_QUICK | I2C_FUNC_SMBUS_BYTE |
I2C_FUNC_SMBUS_BYTE_DATA | I2C_FUNC_SMBUS_WORD_DATA |
I2C_FUNC_SMBUS_BLOCK_DATA | I2C_FUNC_SMBUS_WRITE_I2C_BLOCK |
- ((i801_features & FEATURE_SMBUS_PEC) ? I2C_FUNC_SMBUS_PEC : 0) |
- ((i801_features & FEATURE_I2C_BLOCK_READ) ?
+ ((priv->features & FEATURE_SMBUS_PEC) ? I2C_FUNC_SMBUS_PEC : 0) |
+ ((priv->features & FEATURE_I2C_BLOCK_READ) ?
I2C_FUNC_SMBUS_READ_I2C_BLOCK : 0);
}
@@ -568,12 +598,6 @@ static const struct i2c_algorithm smbus_algorithm = {
.functionality = i801_func,
};
-static struct i2c_adapter i801_adapter = {
- .owner = THIS_MODULE,
- .class = I2C_CLASS_HWMON | I2C_CLASS_SPD,
- .algo = &smbus_algorithm,
-};
-
static const struct pci_device_id i801_ids[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_3) },
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_3) },
@@ -587,11 +611,15 @@ static const struct pci_device_id i801_ids[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ESB2_17) },
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH8_5) },
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH9_6) },
- { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_TOLAPAI_1) },
+ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_EP80579_1) },
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH10_4) },
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH10_5) },
- { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PCH_SMBUS) },
- { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CPT_SMBUS) },
+ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_5_3400_SERIES_SMBUS) },
+ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_COUGARPOINT_SMBUS) },
+ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PATSBURG_SMBUS) },
+ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PATSBURG_SMBUS_IDF0) },
+ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PATSBURG_SMBUS_IDF1) },
+ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PATSBURG_SMBUS_IDF2) },
{ 0, }
};
@@ -704,16 +732,25 @@ static int __devinit i801_probe(struct pci_dev *dev,
{
unsigned char temp;
int err, i;
+ struct i801_priv *priv;
+
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ i2c_set_adapdata(&priv->adapter, priv);
+ priv->adapter.owner = THIS_MODULE;
+ priv->adapter.class = I2C_CLASS_HWMON | I2C_CLASS_SPD;
+ priv->adapter.algo = &smbus_algorithm;
- I801_dev = dev;
- i801_features = 0;
+ priv->pci_dev = dev;
switch (dev->device) {
default:
- i801_features |= FEATURE_I2C_BLOCK_READ;
+ priv->features |= FEATURE_I2C_BLOCK_READ;
/* fall through */
case PCI_DEVICE_ID_INTEL_82801DB_3:
- i801_features |= FEATURE_SMBUS_PEC;
- i801_features |= FEATURE_BLOCK_BUFFER;
+ priv->features |= FEATURE_SMBUS_PEC;
+ priv->features |= FEATURE_BLOCK_BUFFER;
/* fall through */
case PCI_DEVICE_ID_INTEL_82801CA_3:
case PCI_DEVICE_ID_INTEL_82801BA_2:
@@ -724,11 +761,11 @@ static int __devinit i801_probe(struct pci_dev *dev,
/* Disable features on user request */
for (i = 0; i < ARRAY_SIZE(i801_feature_names); i++) {
- if (i801_features & disable_features & (1 << i))
+ if (priv->features & disable_features & (1 << i))
dev_notice(&dev->dev, "%s disabled by user\n",
i801_feature_names[i]);
}
- i801_features &= ~disable_features;
+ priv->features &= ~disable_features;
err = pci_enable_device(dev);
if (err) {
@@ -738,8 +775,8 @@ static int __devinit i801_probe(struct pci_dev *dev,
}
/* Determine the address of the SMBus area */
- i801_smba = pci_resource_start(dev, SMBBAR);
- if (!i801_smba) {
+ priv->smba = pci_resource_start(dev, SMBBAR);
+ if (!priv->smba) {
dev_err(&dev->dev, "SMBus base address uninitialized, "
"upgrade BIOS\n");
err = -ENODEV;
@@ -755,19 +792,19 @@ static int __devinit i801_probe(struct pci_dev *dev,
err = pci_request_region(dev, SMBBAR, i801_driver.name);
if (err) {
dev_err(&dev->dev, "Failed to request SMBus region "
- "0x%lx-0x%Lx\n", i801_smba,
+ "0x%lx-0x%Lx\n", priv->smba,
(unsigned long long)pci_resource_end(dev, SMBBAR));
goto exit;
}
- pci_read_config_byte(I801_dev, SMBHSTCFG, &temp);
- i801_original_hstcfg = temp;
+ pci_read_config_byte(priv->pci_dev, SMBHSTCFG, &temp);
+ priv->original_hstcfg = temp;
temp &= ~SMBHSTCFG_I2C_EN; /* SMBus timing */
if (!(temp & SMBHSTCFG_HST_EN)) {
dev_info(&dev->dev, "Enabling SMBus device\n");
temp |= SMBHSTCFG_HST_EN;
}
- pci_write_config_byte(I801_dev, SMBHSTCFG, temp);
+ pci_write_config_byte(priv->pci_dev, SMBHSTCFG, temp);
if (temp & SMBHSTCFG_SMB_SMI_EN)
dev_dbg(&dev->dev, "SMBus using interrupt SMI#\n");
@@ -775,19 +812,19 @@ static int __devinit i801_probe(struct pci_dev *dev,
dev_dbg(&dev->dev, "SMBus using PCI Interrupt\n");
/* Clear special mode bits */
- if (i801_features & (FEATURE_SMBUS_PEC | FEATURE_BLOCK_BUFFER))
- outb_p(inb_p(SMBAUXCTL) & ~(SMBAUXCTL_CRC | SMBAUXCTL_E32B),
- SMBAUXCTL);
+ if (priv->features & (FEATURE_SMBUS_PEC | FEATURE_BLOCK_BUFFER))
+ outb_p(inb_p(SMBAUXCTL(priv)) &
+ ~(SMBAUXCTL_CRC | SMBAUXCTL_E32B), SMBAUXCTL(priv));
/* set up the sysfs linkage to our parent device */
- i801_adapter.dev.parent = &dev->dev;
+ priv->adapter.dev.parent = &dev->dev;
/* Retry up to 3 times on lost arbitration */
- i801_adapter.retries = 3;
+ priv->adapter.retries = 3;
- snprintf(i801_adapter.name, sizeof(i801_adapter.name),
- "SMBus I801 adapter at %04lx", i801_smba);
- err = i2c_add_adapter(&i801_adapter);
+ snprintf(priv->adapter.name, sizeof(priv->adapter.name),
+ "SMBus I801 adapter at %04lx", priv->smba);
+ err = i2c_add_adapter(&priv->adapter);
if (err) {
dev_err(&dev->dev, "Failed to add SMBus adapter\n");
goto exit_release;
@@ -801,27 +838,33 @@ static int __devinit i801_probe(struct pci_dev *dev,
memset(&info, 0, sizeof(struct i2c_board_info));
info.addr = apanel_addr;
strlcpy(info.type, "fujitsu_apanel", I2C_NAME_SIZE);
- i2c_new_device(&i801_adapter, &info);
+ i2c_new_device(&priv->adapter, &info);
}
#endif
#if defined CONFIG_SENSORS_FSCHMD || defined CONFIG_SENSORS_FSCHMD_MODULE
if (dmi_name_in_vendors("FUJITSU"))
- dmi_walk(dmi_check_onboard_devices, &i801_adapter);
+ dmi_walk(dmi_check_onboard_devices, &priv->adapter);
#endif
+ pci_set_drvdata(dev, priv);
return 0;
exit_release:
pci_release_region(dev, SMBBAR);
exit:
+ kfree(priv);
return err;
}
static void __devexit i801_remove(struct pci_dev *dev)
{
- i2c_del_adapter(&i801_adapter);
- pci_write_config_byte(I801_dev, SMBHSTCFG, i801_original_hstcfg);
+ struct i801_priv *priv = pci_get_drvdata(dev);
+
+ i2c_del_adapter(&priv->adapter);
+ pci_write_config_byte(dev, SMBHSTCFG, priv->original_hstcfg);
pci_release_region(dev, SMBBAR);
+ pci_set_drvdata(dev, NULL);
+ kfree(priv);
/*
* do not call pci_disable_device(dev) since it can cause hard hangs on
* some systems during power-off (eg. Fujitsu-Siemens Lifebook E8010)
@@ -831,8 +874,10 @@ static void __devexit i801_remove(struct pci_dev *dev)
#ifdef CONFIG_PM
static int i801_suspend(struct pci_dev *dev, pm_message_t mesg)
{
+ struct i801_priv *priv = pci_get_drvdata(dev);
+
pci_save_state(dev);
- pci_write_config_byte(dev, SMBHSTCFG, i801_original_hstcfg);
+ pci_write_config_byte(dev, SMBHSTCFG, priv->original_hstcfg);
pci_set_power_state(dev, pci_choose_state(dev, mesg));
return 0;
}
diff --git a/drivers/i2c/busses/i2c-intel-mid.c b/drivers/i2c/busses/i2c-intel-mid.c
new file mode 100644
index 000000000000..80f70d3a744d
--- /dev/null
+++ b/drivers/i2c/busses/i2c-intel-mid.c
@@ -0,0 +1,1135 @@
+/*
+ * Support for Moorestown/Medfield I2C chip
+ *
+ * Copyright (c) 2009 Intel Corporation.
+ * Copyright (c) 2009 Synopsys. Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License, version
+ * 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT ANY
+ * WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+ * FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc., 51
+ * Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/err.h>
+#include <linux/slab.h>
+#include <linux/stat.h>
+#include <linux/delay.h>
+#include <linux/i2c.h>
+#include <linux/init.h>
+#include <linux/pci.h>
+#include <linux/interrupt.h>
+#include <linux/pm_runtime.h>
+#include <linux/io.h>
+
+#define DRIVER_NAME "i2c-intel-mid"
+#define VERSION "Version 0.5ac2"
+#define PLATFORM "Moorestown/Medfield"
+
+/* Tables use: 0 Moorestown, 1 Medfield */
+#define NUM_PLATFORMS 2
+enum platform_enum {
+ MOORESTOWN = 0,
+ MEDFIELD = 1,
+};
+
+enum mid_i2c_status {
+ STATUS_IDLE = 0,
+ STATUS_READ_START,
+ STATUS_READ_IN_PROGRESS,
+ STATUS_READ_SUCCESS,
+ STATUS_WRITE_START,
+ STATUS_WRITE_SUCCESS,
+ STATUS_XFER_ABORT,
+ STATUS_STANDBY
+};
+
+/**
+ * struct intel_mid_i2c_private - per device I²C context
+ * @adap: core i2c layer adapter information
+ * @dev: device reference for power management
+ * @base: register base
+ * @speed: speed mode for this port
+ * @complete: completion object for transaction wait
+ * @abort: reason for last abort
+ * @rx_buf: pointer into working receive buffer
+ * @rx_buf_len: receive buffer length
+ * @status: adapter state machine
+ * @msg: the message we are currently processing
+ * @platform: the MID device type we are part of
+ * @lock: transaction serialization
+ *
+ * We allocate one of these per device we discover, it holds the core
+ * i2c layer objects and the data we need to track privately.
+ */
+struct intel_mid_i2c_private {
+ struct i2c_adapter adap;
+ struct device *dev;
+ void __iomem *base;
+ int speed;
+ struct completion complete;
+ int abort;
+ u8 *rx_buf;
+ int rx_buf_len;
+ enum mid_i2c_status status;
+ struct i2c_msg *msg;
+ enum platform_enum platform;
+ struct mutex lock;
+};
+
+#define NUM_SPEEDS 3
+
+#define ACTIVE 0
+#define STANDBY 1
+
+
+/* Control register */
+#define IC_CON 0x00
+#define SLV_DIS (1 << 6) /* Disable slave mode */
+#define RESTART (1 << 5) /* Send a Restart condition */
+#define ADDR_10BIT (1 << 4) /* 10-bit addressing */
+#define STANDARD_MODE (1 << 1) /* standard mode */
+#define FAST_MODE (2 << 1) /* fast mode */
+#define HIGH_MODE (3 << 1) /* high speed mode */
+#define MASTER_EN (1 << 0) /* Master mode */
+
+/* Target address register */
+#define IC_TAR 0x04
+#define IC_TAR_10BIT_ADDR (1 << 12) /* 10-bit addressing */
+#define IC_TAR_SPECIAL (1 << 11) /* Perform special I2C cmd */
+#define IC_TAR_GC_OR_START (1 << 10) /* 0: Gerneral Call Address */
+ /* 1: START BYTE */
+/* Slave Address Register */
+#define IC_SAR 0x08 /* Not used in Master mode */
+
+/* High Speed Master Mode Code Address Register */
+#define IC_HS_MADDR 0x0c
+
+/* Rx/Tx Data Buffer and Command Register */
+#define IC_DATA_CMD 0x10
+#define IC_RD (1 << 8) /* 1: Read 0: Write */
+
+/* Standard Speed Clock SCL High Count Register */
+#define IC_SS_SCL_HCNT 0x14
+
+/* Standard Speed Clock SCL Low Count Register */
+#define IC_SS_SCL_LCNT 0x18
+
+/* Fast Speed Clock SCL High Count Register */
+#define IC_FS_SCL_HCNT 0x1c
+
+/* Fast Spedd Clock SCL Low Count Register */
+#define IC_FS_SCL_LCNT 0x20
+
+/* High Speed Clock SCL High Count Register */
+#define IC_HS_SCL_HCNT 0x24
+
+/* High Speed Clock SCL Low Count Register */
+#define IC_HS_SCL_LCNT 0x28
+
+/* Interrupt Status Register */
+#define IC_INTR_STAT 0x2c /* Read only */
+#define R_GEN_CALL (1 << 11)
+#define R_START_DET (1 << 10)
+#define R_STOP_DET (1 << 9)
+#define R_ACTIVITY (1 << 8)
+#define R_RX_DONE (1 << 7)
+#define R_TX_ABRT (1 << 6)
+#define R_RD_REQ (1 << 5)
+#define R_TX_EMPTY (1 << 4)
+#define R_TX_OVER (1 << 3)
+#define R_RX_FULL (1 << 2)
+#define R_RX_OVER (1 << 1)
+#define R_RX_UNDER (1 << 0)
+
+/* Interrupt Mask Register */
+#define IC_INTR_MASK 0x30 /* Read and Write */
+#define M_GEN_CALL (1 << 11)
+#define M_START_DET (1 << 10)
+#define M_STOP_DET (1 << 9)
+#define M_ACTIVITY (1 << 8)
+#define M_RX_DONE (1 << 7)
+#define M_TX_ABRT (1 << 6)
+#define M_RD_REQ (1 << 5)
+#define M_TX_EMPTY (1 << 4)
+#define M_TX_OVER (1 << 3)
+#define M_RX_FULL (1 << 2)
+#define M_RX_OVER (1 << 1)
+#define M_RX_UNDER (1 << 0)
+
+/* Raw Interrupt Status Register */
+#define IC_RAW_INTR_STAT 0x34 /* Read Only */
+#define GEN_CALL (1 << 11) /* General call */
+#define START_DET (1 << 10) /* (RE)START occured */
+#define STOP_DET (1 << 9) /* STOP occured */
+#define ACTIVITY (1 << 8) /* Bus busy */
+#define RX_DONE (1 << 7) /* Not used in Master mode */
+#define TX_ABRT (1 << 6) /* Transmit Abort */
+#define RD_REQ (1 << 5) /* Not used in Master mode */
+#define TX_EMPTY (1 << 4) /* TX FIFO <= threshold */
+#define TX_OVER (1 << 3) /* TX FIFO overflow */
+#define RX_FULL (1 << 2) /* RX FIFO >= threshold */
+#define RX_OVER (1 << 1) /* RX FIFO overflow */
+#define RX_UNDER (1 << 0) /* RX FIFO empty */
+
+/* Receive FIFO Threshold Register */
+#define IC_RX_TL 0x38
+
+/* Transmit FIFO Treshold Register */
+#define IC_TX_TL 0x3c
+
+/* Clear Combined and Individual Interrupt Register */
+#define IC_CLR_INTR 0x40
+#define CLR_INTR (1 << 0)
+
+/* Clear RX_UNDER Interrupt Register */
+#define IC_CLR_RX_UNDER 0x44
+#define CLR_RX_UNDER (1 << 0)
+
+/* Clear RX_OVER Interrupt Register */
+#define IC_CLR_RX_OVER 0x48
+#define CLR_RX_OVER (1 << 0)
+
+/* Clear TX_OVER Interrupt Register */
+#define IC_CLR_TX_OVER 0x4c
+#define CLR_TX_OVER (1 << 0)
+
+#define IC_CLR_RD_REQ 0x50
+
+/* Clear TX_ABRT Interrupt Register */
+#define IC_CLR_TX_ABRT 0x54
+#define CLR_TX_ABRT (1 << 0)
+#define IC_CLR_RX_DONE 0x58
+
+/* Clear ACTIVITY Interrupt Register */
+#define IC_CLR_ACTIVITY 0x5c
+#define CLR_ACTIVITY (1 << 0)
+
+/* Clear STOP_DET Interrupt Register */
+#define IC_CLR_STOP_DET 0x60
+#define CLR_STOP_DET (1 << 0)
+
+/* Clear START_DET Interrupt Register */
+#define IC_CLR_START_DET 0x64
+#define CLR_START_DET (1 << 0)
+
+/* Clear GEN_CALL Interrupt Register */
+#define IC_CLR_GEN_CALL 0x68
+#define CLR_GEN_CALL (1 << 0)
+
+/* Enable Register */
+#define IC_ENABLE 0x6c
+#define ENABLE (1 << 0)
+
+/* Status Register */
+#define IC_STATUS 0x70 /* Read Only */
+#define STAT_SLV_ACTIVITY (1 << 6) /* Slave not in idle */
+#define STAT_MST_ACTIVITY (1 << 5) /* Master not in idle */
+#define STAT_RFF (1 << 4) /* RX FIFO Full */
+#define STAT_RFNE (1 << 3) /* RX FIFO Not Empty */
+#define STAT_TFE (1 << 2) /* TX FIFO Empty */
+#define STAT_TFNF (1 << 1) /* TX FIFO Not Full */
+#define STAT_ACTIVITY (1 << 0) /* Activity Status */
+
+/* Transmit FIFO Level Register */
+#define IC_TXFLR 0x74 /* Read Only */
+#define TXFLR (1 << 0) /* TX FIFO level */
+
+/* Receive FIFO Level Register */
+#define IC_RXFLR 0x78 /* Read Only */
+#define RXFLR (1 << 0) /* RX FIFO level */
+
+/* Transmit Abort Source Register */
+#define IC_TX_ABRT_SOURCE 0x80
+#define ABRT_SLVRD_INTX (1 << 15)
+#define ABRT_SLV_ARBLOST (1 << 14)
+#define ABRT_SLVFLUSH_TXFIFO (1 << 13)
+#define ARB_LOST (1 << 12)
+#define ABRT_MASTER_DIS (1 << 11)
+#define ABRT_10B_RD_NORSTRT (1 << 10)
+#define ABRT_SBYTE_NORSTRT (1 << 9)
+#define ABRT_HS_NORSTRT (1 << 8)
+#define ABRT_SBYTE_ACKDET (1 << 7)
+#define ABRT_HS_ACKDET (1 << 6)
+#define ABRT_GCALL_READ (1 << 5)
+#define ABRT_GCALL_NOACK (1 << 4)
+#define ABRT_TXDATA_NOACK (1 << 3)
+#define ABRT_10ADDR2_NOACK (1 << 2)
+#define ABRT_10ADDR1_NOACK (1 << 1)
+#define ABRT_7B_ADDR_NOACK (1 << 0)
+
+/* Enable Status Register */
+#define IC_ENABLE_STATUS 0x9c
+#define IC_EN (1 << 0) /* I2C in an enabled state */
+
+/* Component Parameter Register 1*/
+#define IC_COMP_PARAM_1 0xf4
+#define APB_DATA_WIDTH (0x3 << 0)
+
+/* added by xiaolin --begin */
+#define SS_MIN_SCL_HIGH 4000
+#define SS_MIN_SCL_LOW 4700
+#define FS_MIN_SCL_HIGH 600
+#define FS_MIN_SCL_LOW 1300
+#define HS_MIN_SCL_HIGH_100PF 60
+#define HS_MIN_SCL_LOW_100PF 120
+
+#define STANDARD 0
+#define FAST 1
+#define HIGH 2
+
+#define NUM_SPEEDS 3
+
+static int speed_mode[6] = {
+ FAST,
+ FAST,
+ FAST,
+ STANDARD,
+ FAST,
+ FAST
+};
+
+static int ctl_num = 6;
+module_param_array(speed_mode, int, &ctl_num, S_IRUGO);
+MODULE_PARM_DESC(speed_mode, "Set the speed of the i2c interface (0-2)");
+
+/**
+ * intel_mid_i2c_disable - Disable I2C controller
+ * @adap: struct pointer to i2c_adapter
+ *
+ * Return Value:
+ * 0 success
+ * -EBUSY if device is busy
+ * -ETIMEDOUT if i2c cannot be disabled within the given time
+ *
+ * I2C bus state should be checked prior to disabling the hardware. If bus is
+ * not in idle state, an errno is returned. Write "0" to IC_ENABLE to disable
+ * I2C controller.
+ */
+static int intel_mid_i2c_disable(struct i2c_adapter *adap)
+{
+ struct intel_mid_i2c_private *i2c = i2c_get_adapdata(adap);
+ int err = 0;
+ int count = 0;
+ int ret1, ret2;
+ static const u16 delay[NUM_SPEEDS] = {100, 25, 3};
+
+ /* Set IC_ENABLE to 0 */
+ writel(0, i2c->base + IC_ENABLE);
+
+ /* Check if device is busy */
+ dev_dbg(&adap->dev, "mrst i2c disable\n");
+ while ((ret1 = readl(i2c->base + IC_ENABLE_STATUS) & 0x1)
+ || (ret2 = readl(i2c->base + IC_STATUS) & 0x1)) {
+ udelay(delay[i2c->speed]);
+ writel(0, i2c->base + IC_ENABLE);
+ dev_dbg(&adap->dev, "i2c is busy, count is %d speed %d\n",
+ count, i2c->speed);
+ if (count++ > 10) {
+ err = -ETIMEDOUT;
+ break;
+ }
+ }
+
+ /* Clear all interrupts */
+ readl(i2c->base + IC_CLR_INTR);
+ readl(i2c->base + IC_CLR_STOP_DET);
+ readl(i2c->base + IC_CLR_START_DET);
+ readl(i2c->base + IC_CLR_ACTIVITY);
+ readl(i2c->base + IC_CLR_TX_ABRT);
+ readl(i2c->base + IC_CLR_RX_OVER);
+ readl(i2c->base + IC_CLR_RX_UNDER);
+ readl(i2c->base + IC_CLR_TX_OVER);
+ readl(i2c->base + IC_CLR_RX_DONE);
+ readl(i2c->base + IC_CLR_GEN_CALL);
+
+ /* Disable all interupts */
+ writel(0x0000, i2c->base + IC_INTR_MASK);
+
+ return err;
+}
+
+/**
+ * intel_mid_i2c_hwinit - Initialize the I2C hardware registers
+ * @dev: pci device struct pointer
+ *
+ * This function will be called in intel_mid_i2c_probe() before device
+ * registration.
+ *
+ * Return Values:
+ * 0 success
+ * -EBUSY i2c cannot be disabled
+ * -ETIMEDOUT i2c cannot be disabled
+ * -EFAULT If APB data width is not 32-bit wide
+ *
+ * I2C should be disabled prior to other register operation. If failed, an
+ * errno is returned. Mask and Clear all interrpts, this should be done at
+ * first. Set common registers which will not be modified during normal
+ * transfers, including: controll register, FIFO threshold and clock freq.
+ * Check APB data width at last.
+ */
+static int intel_mid_i2c_hwinit(struct intel_mid_i2c_private *i2c)
+{
+ int err;
+
+ static const u16 hcnt[NUM_PLATFORMS][NUM_SPEEDS] = {
+ { 0x75, 0x15, 0x07 },
+ { 0x04c, 0x10, 0x06 }
+ };
+ static const u16 lcnt[NUM_PLATFORMS][NUM_SPEEDS] = {
+ { 0x7C, 0x21, 0x0E },
+ { 0x053, 0x19, 0x0F }
+ };
+
+ /* Disable i2c first */
+ err = intel_mid_i2c_disable(&i2c->adap);
+ if (err)
+ return err;
+
+ /*
+ * Setup clock frequency and speed mode
+ * Enable restart condition,
+ * enable master FSM, disable slave FSM,
+ * use target address when initiating transfer
+ */
+
+ writel((i2c->speed + 1) << 1 | SLV_DIS | RESTART | MASTER_EN,
+ i2c->base + IC_CON);
+ writel(hcnt[i2c->platform][i2c->speed],
+ i2c->base + (IC_SS_SCL_HCNT + (i2c->speed << 3)));
+ writel(lcnt[i2c->platform][i2c->speed],
+ i2c->base + (IC_SS_SCL_LCNT + (i2c->speed << 3)));
+
+ /* Set tranmit & receive FIFO threshold to zero */
+ writel(0x0, i2c->base + IC_RX_TL);
+ writel(0x0, i2c->base + IC_TX_TL);
+
+ return 0;
+}
+
+/**
+ * intel_mid_i2c_func - Return the supported three I2C operations.
+ * @adapter: i2c_adapter struct pointer
+ */
+static u32 intel_mid_i2c_func(struct i2c_adapter *adapter)
+{
+ return I2C_FUNC_I2C | I2C_FUNC_10BIT_ADDR | I2C_FUNC_SMBUS_EMUL;
+}
+
+/**
+ * intel_mid_i2c_address_neq - To check if the addresses for different i2c messages
+ * are equal.
+ * @p1: first i2c_msg
+ * @p2: second i2c_msg
+ *
+ * Return Values:
+ * 0 if addresses are equal
+ * 1 if not equal
+ *
+ * Within a single transfer, the I2C client may need to send its address more
+ * than once. So a check if the addresses match is needed.
+ */
+static inline bool intel_mid_i2c_address_neq(const struct i2c_msg *p1,
+ const struct i2c_msg *p2)
+{
+ if (p1->addr != p2->addr)
+ return 1;
+ if ((p1->flags ^ p2->flags) & I2C_M_TEN)
+ return 1;
+ return 0;
+}
+
+/**
+ * intel_mid_i2c_abort - To handle transfer abortions and print error messages.
+ * @adap: i2c_adapter struct pointer
+ *
+ * By reading register IC_TX_ABRT_SOURCE, various transfer errors can be
+ * distingushed. At present, no circumstances have been found out that
+ * multiple errors would be occured simutaneously, so we simply use the
+ * register value directly.
+ *
+ * At last the error bits are cleared. (Note clear ABRT_SBYTE_NORSTRT bit need
+ * a few extra steps)
+ */
+static void intel_mid_i2c_abort(struct intel_mid_i2c_private *i2c)
+{
+ /* Read about source register */
+ int abort = i2c->abort;
+ struct i2c_adapter *adap = &i2c->adap;
+
+ /* Single transfer error check:
+ * According to databook, TX/RX FIFOs would be flushed when
+ * the abort interrupt occured.
+ */
+ if (abort & ABRT_MASTER_DIS)
+ dev_err(&adap->dev,
+ "initiate master operation with master mode disabled.\n");
+ if (abort & ABRT_10B_RD_NORSTRT)
+ dev_err(&adap->dev,
+ "RESTART disabled and master sent READ cmd in 10-bit addressing.\n");
+
+ if (abort & ABRT_SBYTE_NORSTRT) {
+ dev_err(&adap->dev,
+ "RESTART disabled and user is trying to send START byte.\n");
+ writel(~ABRT_SBYTE_NORSTRT, i2c->base + IC_TX_ABRT_SOURCE);
+ writel(RESTART, i2c->base + IC_CON);
+ writel(~IC_TAR_SPECIAL, i2c->base + IC_TAR);
+ }
+
+ if (abort & ABRT_SBYTE_ACKDET)
+ dev_err(&adap->dev,
+ "START byte was not acknowledged.\n");
+ if (abort & ABRT_TXDATA_NOACK)
+ dev_dbg(&adap->dev,
+ "No acknowledgement received from slave.\n");
+ if (abort & ABRT_10ADDR2_NOACK)
+ dev_dbg(&adap->dev,
+ "The 2nd address byte of the 10-bit address was not acknowledged.\n");
+ if (abort & ABRT_10ADDR1_NOACK)
+ dev_dbg(&adap->dev,
+ "The 1st address byte of 10-bit address was not acknowledged.\n");
+ if (abort & ABRT_7B_ADDR_NOACK)
+ dev_dbg(&adap->dev,
+ "I2C slave device not acknowledged.\n");
+
+ /* Clear TX_ABRT bit */
+ readl(i2c->base + IC_CLR_TX_ABRT);
+ i2c->status = STATUS_XFER_ABORT;
+}
+
+/**
+ * xfer_read - Internal function to implement master read transfer.
+ * @adap: i2c_adapter struct pointer
+ * @buf: buffer in i2c_msg
+ * @length: number of bytes to be read
+ *
+ * Return Values:
+ * 0 if the read transfer succeeds
+ * -ETIMEDOUT if cannot read the "raw" interrupt register
+ * -EINVAL if a transfer abort occurred
+ *
+ * For every byte, a "READ" command will be loaded into IC_DATA_CMD prior to
+ * data transfer. The actual "read" operation will be performed if an RX_FULL
+ * interrupt occurred.
+ *
+ * Note there may be two interrupt signals captured, one should read
+ * IC_RAW_INTR_STAT to separate between errors and actual data.
+ */
+static int xfer_read(struct i2c_adapter *adap, unsigned char *buf, int length)
+{
+ struct intel_mid_i2c_private *i2c = i2c_get_adapdata(adap);
+ int i = length;
+ int err;
+
+ if (length >= 256) {
+ dev_err(&adap->dev,
+ "I2C FIFO cannot support larger than 256 bytes\n");
+ return -EMSGSIZE;
+ }
+
+ INIT_COMPLETION(i2c->complete);
+
+ readl(i2c->base + IC_CLR_INTR);
+ writel(0x0044, i2c->base + IC_INTR_MASK);
+
+ i2c->status = STATUS_READ_START;
+
+ while (i--)
+ writel(IC_RD, i2c->base + IC_DATA_CMD);
+
+ i2c->status = STATUS_READ_START;
+ err = wait_for_completion_interruptible_timeout(&i2c->complete, HZ);
+ if (!err) {
+ dev_err(&adap->dev, "Timeout for ACK from I2C slave device\n");
+ intel_mid_i2c_hwinit(i2c);
+ return -ETIMEDOUT;
+ }
+ if (i2c->status == STATUS_READ_SUCCESS)
+ return 0;
+ else
+ return -EIO;
+}
+
+/**
+ * xfer_write - Internal function to implement master write transfer.
+ * @adap: i2c_adapter struct pointer
+ * @buf: buffer in i2c_msg
+ * @length: number of bytes to be read
+ *
+ * Return Values:
+ * 0 if the read transfer succeeds
+ * -ETIMEDOUT if we cannot read the "raw" interrupt register
+ * -EINVAL if a transfer abort occured
+ *
+ * For every byte, a "WRITE" command will be loaded into IC_DATA_CMD prior to
+ * data transfer. The actual "write" operation will be performed when the
+ * RX_FULL interrupt signal occurs.
+ *
+ * Note there may be two interrupt signals captured, one should read
+ * IC_RAW_INTR_STAT to separate between errors and actual data.
+ */
+static int xfer_write(struct i2c_adapter *adap,
+ unsigned char *buf, int length)
+{
+ struct intel_mid_i2c_private *i2c = i2c_get_adapdata(adap);
+ int i, err;
+
+ if (length >= 256) {
+ dev_err(&adap->dev,
+ "I2C FIFO cannot support larger than 256 bytes\n");
+ return -EMSGSIZE;
+ }
+
+ INIT_COMPLETION(i2c->complete);
+
+ readl(i2c->base + IC_CLR_INTR);
+ writel(0x0050, i2c->base + IC_INTR_MASK);
+
+ i2c->status = STATUS_WRITE_START;
+ for (i = 0; i < length; i++)
+ writel((u16)(*(buf + i)), i2c->base + IC_DATA_CMD);
+
+ i2c->status = STATUS_WRITE_START;
+ err = wait_for_completion_interruptible_timeout(&i2c->complete, HZ);
+ if (!err) {
+ dev_err(&adap->dev, "Timeout for ACK from I2C slave device\n");
+ intel_mid_i2c_hwinit(i2c);
+ return -ETIMEDOUT;
+ } else {
+ if (i2c->status == STATUS_WRITE_SUCCESS)
+ return 0;
+ else
+ return -EIO;
+ }
+}
+
+static int intel_mid_i2c_setup(struct i2c_adapter *adap, struct i2c_msg *pmsg)
+{
+ struct intel_mid_i2c_private *i2c = i2c_get_adapdata(adap);
+ int err;
+ u32 reg;
+ u32 bit_mask;
+ u32 mode;
+
+ /* Disable device first */
+ err = intel_mid_i2c_disable(adap);
+ if (err) {
+ dev_err(&adap->dev,
+ "Cannot disable i2c controller, timeout\n");
+ return err;
+ }
+
+ mode = (1 + i2c->speed) << 1;
+ /* set the speed mode */
+ reg = readl(i2c->base + IC_CON);
+ if ((reg & 0x06) != mode) {
+ dev_dbg(&adap->dev, "set mode %d\n", i2c->speed);
+ writel((reg & ~0x6) | mode, i2c->base + IC_CON);
+ }
+
+ reg = readl(i2c->base + IC_CON);
+ /* use 7-bit addressing */
+ if (pmsg->flags & I2C_M_TEN) {
+ if ((reg & ADDR_10BIT) != ADDR_10BIT) {
+ dev_dbg(&adap->dev, "set i2c 10 bit address mode\n");
+ writel(reg | ADDR_10BIT, i2c->base + IC_CON);
+ }
+ } else {
+ if ((reg & ADDR_10BIT) != 0x0) {
+ dev_dbg(&adap->dev, "set i2c 7 bit address mode\n");
+ writel(reg & ~ADDR_10BIT, i2c->base + IC_CON);
+ }
+ }
+ /* enable restart conditions */
+ reg = readl(i2c->base + IC_CON);
+ if ((reg & RESTART) != RESTART) {
+ dev_dbg(&adap->dev, "enable restart conditions\n");
+ writel(reg | RESTART, i2c->base + IC_CON);
+ }
+
+ /* enable master FSM */
+ reg = readl(i2c->base + IC_CON);
+ dev_dbg(&adap->dev, "ic_con reg is 0x%x\n", reg);
+ writel(reg | MASTER_EN, i2c->base + IC_CON);
+ if ((reg & SLV_DIS) != SLV_DIS) {
+ dev_dbg(&adap->dev, "enable master FSM\n");
+ writel(reg | SLV_DIS, i2c->base + IC_CON);
+ dev_dbg(&adap->dev, "ic_con reg is 0x%x\n", reg);
+ }
+
+ /* use target address when initiating transfer */
+ reg = readl(i2c->base + IC_TAR);
+ bit_mask = IC_TAR_SPECIAL | IC_TAR_GC_OR_START;
+
+ if ((reg & bit_mask) != 0x0) {
+ dev_dbg(&adap->dev,
+ "WR: use target address when intiating transfer, i2c_tx_target\n");
+ writel(reg & ~bit_mask, i2c->base + IC_TAR);
+ }
+
+ /* set target address to the I2C slave address */
+ dev_dbg(&adap->dev,
+ "set target address to the I2C slave address, addr is %x\n",
+ pmsg->addr);
+ writel(pmsg->addr | (pmsg->flags & I2C_M_TEN ? IC_TAR_10BIT_ADDR : 0),
+ i2c->base + IC_TAR);
+
+ /* Enable I2C controller */
+ writel(ENABLE, i2c->base + IC_ENABLE);
+
+ return 0;
+}
+
+/**
+ * intel_mid_i2c_xfer - Main master transfer routine.
+ * @adap: i2c_adapter struct pointer
+ * @pmsg: i2c_msg struct pointer
+ * @num: number of i2c_msg
+ *
+ * Return Values:
+ * + number of messages transfered
+ * -ETIMEDOUT If cannot disable I2C controller or read IC_STATUS
+ * -EINVAL If the address in i2c_msg is invalid
+ *
+ * This function will be registered in i2c-core and exposed to external
+ * I2C clients.
+ * 1. Disable I2C controller
+ * 2. Unmask three interrupts: RX_FULL, TX_EMPTY, TX_ABRT
+ * 3. Check if address in i2c_msg is valid
+ * 4. Enable I2C controller
+ * 5. Perform real transfer (call xfer_read or xfer_write)
+ * 6. Wait until the current transfer is finished (check bus state)
+ * 7. Mask and clear all interrupts
+ */
+static int intel_mid_i2c_xfer(struct i2c_adapter *adap,
+ struct i2c_msg *pmsg,
+ int num)
+{
+ struct intel_mid_i2c_private *i2c = i2c_get_adapdata(adap);
+ int i, err = 0;
+
+ /* if number of messages equal 0*/
+ if (num == 0)
+ return 0;
+
+ pm_runtime_get(i2c->dev);
+
+ mutex_lock(&i2c->lock);
+ dev_dbg(&adap->dev, "intel_mid_i2c_xfer, process %d msg(s)\n", num);
+ dev_dbg(&adap->dev, "slave address is %x\n", pmsg->addr);
+
+
+ if (i2c->status != STATUS_IDLE) {
+ dev_err(&adap->dev, "Adapter %d in transfer/standby\n",
+ adap->nr);
+ mutex_unlock(&i2c->lock);
+ pm_runtime_put(i2c->dev);
+ return -1;
+ }
+
+
+ for (i = 1; i < num; i++) {
+ /* Message address equal? */
+ if (unlikely(intel_mid_i2c_address_neq(&pmsg[0], &pmsg[i]))) {
+ dev_err(&adap->dev, "Invalid address in msg[%d]\n", i);
+ mutex_unlock(&i2c->lock);
+ pm_runtime_put(i2c->dev);
+ return -EINVAL;
+ }
+ }
+
+ if (intel_mid_i2c_setup(adap, pmsg)) {
+ mutex_unlock(&i2c->lock);
+ pm_runtime_put(i2c->dev);
+ return -EINVAL;
+ }
+
+ for (i = 0; i < num; i++) {
+ i2c->msg = pmsg;
+ i2c->status = STATUS_IDLE;
+ /* Read or Write */
+ if (pmsg->flags & I2C_M_RD) {
+ dev_dbg(&adap->dev, "I2C_M_RD\n");
+ err = xfer_read(adap, pmsg->buf, pmsg->len);
+ } else {
+ dev_dbg(&adap->dev, "I2C_M_WR\n");
+ err = xfer_write(adap, pmsg->buf, pmsg->len);
+ }
+ if (err < 0)
+ break;
+ dev_dbg(&adap->dev, "msg[%d] transfer complete\n", i);
+ pmsg++; /* next message */
+ }
+
+ /* Mask interrupts */
+ writel(0x0000, i2c->base + IC_INTR_MASK);
+ /* Clear all interrupts */
+ readl(i2c->base + IC_CLR_INTR);
+
+ i2c->status = STATUS_IDLE;
+ mutex_unlock(&i2c->lock);
+ pm_runtime_put(i2c->dev);
+
+ return err;
+}
+
+static int intel_mid_i2c_runtime_suspend(struct device *dev)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct intel_mid_i2c_private *i2c = pci_get_drvdata(pdev);
+ struct i2c_adapter *adap = to_i2c_adapter(dev);
+ int err;
+
+ if (i2c->status != STATUS_IDLE)
+ return -1;
+
+ intel_mid_i2c_disable(adap);
+
+ err = pci_save_state(pdev);
+ if (err) {
+ dev_err(dev, "pci_save_state failed\n");
+ return err;
+ }
+
+ err = pci_set_power_state(pdev, PCI_D3hot);
+ if (err) {
+ dev_err(dev, "pci_set_power_state failed\n");
+ return err;
+ }
+ i2c->status = STATUS_STANDBY;
+
+ return 0;
+}
+
+static int intel_mid_i2c_runtime_resume(struct device *dev)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct intel_mid_i2c_private *i2c = pci_get_drvdata(pdev);
+ int err;
+
+ if (i2c->status != STATUS_STANDBY)
+ return 0;
+
+ pci_set_power_state(pdev, PCI_D0);
+ pci_restore_state(pdev);
+ err = pci_enable_device(pdev);
+ if (err) {
+ dev_err(dev, "pci_enable_device failed\n");
+ return err;
+ }
+
+ i2c->status = STATUS_IDLE;
+
+ intel_mid_i2c_hwinit(i2c);
+ return err;
+}
+
+static void i2c_isr_read(struct intel_mid_i2c_private *i2c)
+{
+ struct i2c_msg *msg = i2c->msg;
+ int rx_num;
+ u32 len;
+ u8 *buf;
+
+ if (!(msg->flags & I2C_M_RD))
+ return;
+
+ if (i2c->status != STATUS_READ_IN_PROGRESS) {
+ len = msg->len;
+ buf = msg->buf;
+ } else {
+ len = i2c->rx_buf_len;
+ buf = i2c->rx_buf;
+ }
+
+ rx_num = readl(i2c->base + IC_RXFLR);
+
+ for (; len > 0 && rx_num > 0; len--, rx_num--)
+ *buf++ = readl(i2c->base + IC_DATA_CMD);
+
+ if (len > 0) {
+ i2c->status = STATUS_READ_IN_PROGRESS;
+ i2c->rx_buf_len = len;
+ i2c->rx_buf = buf;
+ } else
+ i2c->status = STATUS_READ_SUCCESS;
+
+ return;
+}
+
+static irqreturn_t intel_mid_i2c_isr(int this_irq, void *dev)
+{
+ struct intel_mid_i2c_private *i2c = dev;
+ u32 stat = readl(i2c->base + IC_INTR_STAT);
+
+ if (!stat)
+ return IRQ_NONE;
+
+ dev_dbg(&i2c->adap.dev, "%s, stat = 0x%x\n", __func__, stat);
+ stat &= 0x54;
+
+ if (i2c->status != STATUS_WRITE_START &&
+ i2c->status != STATUS_READ_START &&
+ i2c->status != STATUS_READ_IN_PROGRESS)
+ goto err;
+
+ if (stat & TX_ABRT)
+ i2c->abort = readl(i2c->base + IC_TX_ABRT_SOURCE);
+
+ readl(i2c->base + IC_CLR_INTR);
+
+ if (stat & TX_ABRT) {
+ intel_mid_i2c_abort(i2c);
+ goto exit;
+ }
+
+ if (stat & RX_FULL) {
+ i2c_isr_read(i2c);
+ goto exit;
+ }
+
+ if (stat & TX_EMPTY) {
+ if (readl(i2c->base + IC_STATUS) & 0x4)
+ i2c->status = STATUS_WRITE_SUCCESS;
+ }
+
+exit:
+ if (i2c->status == STATUS_READ_SUCCESS ||
+ i2c->status == STATUS_WRITE_SUCCESS ||
+ i2c->status == STATUS_XFER_ABORT) {
+ /* Clear all interrupts */
+ readl(i2c->base + IC_CLR_INTR);
+ /* Mask interrupts */
+ writel(0, i2c->base + IC_INTR_MASK);
+ complete(&i2c->complete);
+ }
+err:
+ return IRQ_HANDLED;
+}
+
+static struct i2c_algorithm intel_mid_i2c_algorithm = {
+ .master_xfer = intel_mid_i2c_xfer,
+ .functionality = intel_mid_i2c_func,
+};
+
+
+static const struct dev_pm_ops intel_mid_i2c_pm_ops = {
+ .runtime_suspend = intel_mid_i2c_runtime_suspend,
+ .runtime_resume = intel_mid_i2c_runtime_resume,
+};
+
+/**
+ * intel_mid_i2c_probe - I2C controller initialization routine
+ * @dev: pci device
+ * @id: device id
+ *
+ * Return Values:
+ * 0 success
+ * -ENODEV If cannot allocate pci resource
+ * -ENOMEM If the register base remapping failed, or
+ * if kzalloc failed
+ *
+ * Initialization steps:
+ * 1. Request for PCI resource
+ * 2. Remap the start address of PCI resource to register base
+ * 3. Request for device memory region
+ * 4. Fill in the struct members of intel_mid_i2c_private
+ * 5. Call intel_mid_i2c_hwinit() for hardware initialization
+ * 6. Register I2C adapter in i2c-core
+ */
+static int __devinit intel_mid_i2c_probe(struct pci_dev *dev,
+ const struct pci_device_id *id)
+{
+ struct intel_mid_i2c_private *mrst;
+ unsigned long start, len;
+ int err, busnum;
+ void __iomem *base = NULL;
+
+ dev_dbg(&dev->dev, "Get into probe function for I2C\n");
+ err = pci_enable_device(dev);
+ if (err) {
+ dev_err(&dev->dev, "Failed to enable I2C PCI device (%d)\n",
+ err);
+ goto exit;
+ }
+
+ /* Determine the address of the I2C area */
+ start = pci_resource_start(dev, 0);
+ len = pci_resource_len(dev, 0);
+ if (!start || len == 0) {
+ dev_err(&dev->dev, "base address not set\n");
+ err = -ENODEV;
+ goto exit;
+ }
+ dev_dbg(&dev->dev, "%s i2c resource start 0x%lx, len=%ld\n",
+ PLATFORM, start, len);
+
+ err = pci_request_region(dev, 0, DRIVER_NAME);
+ if (err) {
+ dev_err(&dev->dev, "failed to request I2C region "
+ "0x%lx-0x%lx\n", start,
+ (unsigned long)pci_resource_end(dev, 0));
+ goto exit;
+ }
+
+ base = ioremap_nocache(start, len);
+ if (!base) {
+ dev_err(&dev->dev, "I/O memory remapping failed\n");
+ err = -ENOMEM;
+ goto fail0;
+ }
+
+ /* Allocate the per-device data structure, intel_mid_i2c_private */
+ mrst = kzalloc(sizeof(struct intel_mid_i2c_private), GFP_KERNEL);
+ if (mrst == NULL) {
+ dev_err(&dev->dev, "can't allocate interface\n");
+ err = -ENOMEM;
+ goto fail1;
+ }
+
+ /* Initialize struct members */
+ snprintf(mrst->adap.name, sizeof(mrst->adap.name),
+ "MRST/Medfield I2C at %lx", start);
+ mrst->adap.owner = THIS_MODULE;
+ mrst->adap.algo = &intel_mid_i2c_algorithm;
+ mrst->adap.dev.parent = &dev->dev;
+ mrst->dev = &dev->dev;
+ mrst->base = base;
+ mrst->speed = STANDARD;
+ mrst->abort = 0;
+ mrst->rx_buf_len = 0;
+ mrst->status = STATUS_IDLE;
+
+ pci_set_drvdata(dev, mrst);
+ i2c_set_adapdata(&mrst->adap, mrst);
+
+ mrst->adap.nr = busnum = id->driver_data;
+ if (dev->device <= 0x0804)
+ mrst->platform = MOORESTOWN;
+ else
+ mrst->platform = MEDFIELD;
+
+ dev_dbg(&dev->dev, "I2C%d\n", busnum);
+
+ if (ctl_num > busnum) {
+ if (speed_mode[busnum] < 0 || speed_mode[busnum] >= NUM_SPEEDS)
+ dev_warn(&dev->dev, "invalid speed %d ignored.\n",
+ speed_mode[busnum]);
+ else
+ mrst->speed = speed_mode[busnum];
+ }
+
+ /* Initialize i2c controller */
+ err = intel_mid_i2c_hwinit(mrst);
+ if (err < 0) {
+ dev_err(&dev->dev, "I2C interface initialization failed\n");
+ goto fail2;
+ }
+
+ mutex_init(&mrst->lock);
+ init_completion(&mrst->complete);
+
+ /* Clear all interrupts */
+ readl(mrst->base + IC_CLR_INTR);
+ writel(0x0000, mrst->base + IC_INTR_MASK);
+
+ err = request_irq(dev->irq, intel_mid_i2c_isr, IRQF_SHARED,
+ mrst->adap.name, mrst);
+ if (err) {
+ dev_err(&dev->dev, "Failed to request IRQ for I2C controller: "
+ "%s", mrst->adap.name);
+ goto fail2;
+ }
+
+ /* Adapter registration */
+ err = i2c_add_numbered_adapter(&mrst->adap);
+ if (err) {
+ dev_err(&dev->dev, "Adapter %s registration failed\n",
+ mrst->adap.name);
+ goto fail3;
+ }
+
+ dev_dbg(&dev->dev, "%s I2C bus %d driver bind success.\n",
+ (mrst->platform == MOORESTOWN) ? "Moorestown" : "Medfield",
+ busnum);
+
+ pm_runtime_enable(&dev->dev);
+ return 0;
+
+fail3:
+ free_irq(dev->irq, mrst);
+fail2:
+ pci_set_drvdata(dev, NULL);
+ kfree(mrst);
+fail1:
+ iounmap(base);
+fail0:
+ pci_release_region(dev, 0);
+exit:
+ return err;
+}
+
+static void __devexit intel_mid_i2c_remove(struct pci_dev *dev)
+{
+ struct intel_mid_i2c_private *mrst = pci_get_drvdata(dev);
+ intel_mid_i2c_disable(&mrst->adap);
+ if (i2c_del_adapter(&mrst->adap))
+ dev_err(&dev->dev, "Failed to delete i2c adapter");
+
+ free_irq(dev->irq, mrst);
+ pci_set_drvdata(dev, NULL);
+ iounmap(mrst->base);
+ kfree(mrst);
+ pci_release_region(dev, 0);
+}
+
+static struct pci_device_id intel_mid_i2c_ids[] = {
+ /* Moorestown */
+ { PCI_VDEVICE(INTEL, 0x0802), 0 },
+ { PCI_VDEVICE(INTEL, 0x0803), 1 },
+ { PCI_VDEVICE(INTEL, 0x0804), 2 },
+ /* Medfield */
+ { PCI_VDEVICE(INTEL, 0x0817), 3,},
+ { PCI_VDEVICE(INTEL, 0x0818), 4 },
+ { PCI_VDEVICE(INTEL, 0x0819), 5 },
+ { PCI_VDEVICE(INTEL, 0x082C), 0 },
+ { PCI_VDEVICE(INTEL, 0x082D), 1 },
+ { PCI_VDEVICE(INTEL, 0x082E), 2 },
+ { 0,}
+};
+MODULE_DEVICE_TABLE(pci, intel_mid_i2c_ids);
+
+static struct pci_driver intel_mid_i2c_driver = {
+ .name = DRIVER_NAME,
+ .id_table = intel_mid_i2c_ids,
+ .probe = intel_mid_i2c_probe,
+ .remove = __devexit_p(intel_mid_i2c_remove),
+};
+
+static int __init intel_mid_i2c_init(void)
+{
+ return pci_register_driver(&intel_mid_i2c_driver);
+}
+
+static void __exit intel_mid_i2c_exit(void)
+{
+ pci_unregister_driver(&intel_mid_i2c_driver);
+}
+
+module_init(intel_mid_i2c_init);
+module_exit(intel_mid_i2c_exit);
+
+MODULE_AUTHOR("Ba Zheng <zheng.ba@intel.com>");
+MODULE_DESCRIPTION("I2C driver for Moorestown Platform");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(VERSION);
diff --git a/drivers/i2c/busses/i2c-nomadik.c b/drivers/i2c/busses/i2c-nomadik.c
index 73de8ade10b1..c9fffd0389fe 100644
--- a/drivers/i2c/busses/i2c-nomadik.c
+++ b/drivers/i2c/busses/i2c-nomadik.c
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2009 ST-Ericsson
+ * Copyright (C) 2009 ST-Ericsson SA
* Copyright (C) 2009 STMicroelectronics
*
* I2C master mode controller driver, used in Nomadik 8815
@@ -103,6 +103,9 @@
/* maximum threshold value */
#define MAX_I2C_FIFO_THRESHOLD 15
+/* per-transfer delay, required for the hardware to stabilize */
+#define I2C_DELAY 150
+
enum i2c_status {
I2C_NOP,
I2C_ON_GOING,
@@ -118,7 +121,7 @@ enum i2c_operation {
};
/* controller response timeout in ms */
-#define I2C_TIMEOUT_MS 500
+#define I2C_TIMEOUT_MS 2000
/**
* struct i2c_nmk_client - client specific data
@@ -250,6 +253,8 @@ static int init_hw(struct nmk_i2c_dev *dev)
{
int stat;
+ clk_enable(dev->clk);
+
stat = flush_i2c_fifo(dev);
if (stat)
return stat;
@@ -263,6 +268,9 @@ static int init_hw(struct nmk_i2c_dev *dev)
dev->cli.operation = I2C_NO_OPERATION;
+ clk_disable(dev->clk);
+
+ udelay(I2C_DELAY);
return 0;
}
@@ -431,7 +439,6 @@ static int read_i2c(struct nmk_i2c_dev *dev)
(void) init_hw(dev);
status = -ETIMEDOUT;
}
-
return status;
}
@@ -502,9 +509,9 @@ static int write_i2c(struct nmk_i2c_dev *dev)
/**
* nmk_i2c_xfer() - I2C transfer function used by kernel framework
- * @i2c_adap - Adapter pointer to the controller
- * @msgs[] - Pointer to data to be written.
- * @num_msgs - Number of messages to be executed
+ * @i2c_adap: Adapter pointer to the controller
+ * @msgs: Pointer to data to be written.
+ * @num_msgs: Number of messages to be executed
*
* This is the function called by the generic kernel i2c_transfer()
* or i2c_smbus...() API calls. Note that this code is protected by the
@@ -559,6 +566,8 @@ static int nmk_i2c_xfer(struct i2c_adapter *i2c_adap,
if (status)
return status;
+ clk_enable(dev->clk);
+
/* setup the i2c controller */
setup_i2c_controller(dev);
@@ -591,10 +600,13 @@ static int nmk_i2c_xfer(struct i2c_adapter *i2c_adap,
dev_err(&dev->pdev->dev, "%s\n",
cause >= ARRAY_SIZE(abort_causes)
? "unknown reason" : abort_causes[cause]);
+ clk_disable(dev->clk);
return status;
}
- mdelay(1);
+ udelay(I2C_DELAY);
}
+ clk_disable(dev->clk);
+
/* return the no. messages processed */
if (status)
return status;
@@ -605,6 +617,7 @@ static int nmk_i2c_xfer(struct i2c_adapter *i2c_adap,
/**
* disable_interrupts() - disable the interrupts
* @dev: private data of controller
+ * @irq: interrupt number
*/
static int disable_interrupts(struct nmk_i2c_dev *dev, u32 irq)
{
@@ -794,10 +807,7 @@ static irqreturn_t i2c_irq_handler(int irq, void *arg)
static unsigned int nmk_i2c_functionality(struct i2c_adapter *adap)
{
- return I2C_FUNC_I2C
- | I2C_FUNC_SMBUS_BYTE_DATA
- | I2C_FUNC_SMBUS_WORD_DATA
- | I2C_FUNC_SMBUS_I2C_BLOCK;
+ return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
}
static const struct i2c_algorithm nmk_i2c_algo = {
@@ -857,8 +867,6 @@ static int __devinit nmk_i2c_probe(struct platform_device *pdev)
goto err_no_clk;
}
- clk_enable(dev->clk);
-
adap = &dev->adap;
adap->dev.parent = &pdev->dev;
adap->owner = THIS_MODULE;
@@ -895,7 +903,6 @@ static int __devinit nmk_i2c_probe(struct platform_device *pdev)
return 0;
err_init_hw:
- clk_disable(dev->clk);
err_add_adap:
clk_put(dev->clk);
err_no_clk:
@@ -928,7 +935,6 @@ static int __devexit nmk_i2c_remove(struct platform_device *pdev)
iounmap(dev->virtbase);
if (res)
release_mem_region(res->start, resource_size(res));
- clk_disable(dev->clk);
clk_put(dev->clk);
platform_set_drvdata(pdev, NULL);
kfree(dev);
diff --git a/drivers/i2c/busses/i2c-s3c2410.c b/drivers/i2c/busses/i2c-s3c2410.c
index 6a292ea5e35c..6c00c107ebf3 100644
--- a/drivers/i2c/busses/i2c-s3c2410.c
+++ b/drivers/i2c/busses/i2c-s3c2410.c
@@ -554,18 +554,23 @@ static int s3c24xx_i2c_xfer(struct i2c_adapter *adap,
int retry;
int ret;
+ clk_enable(i2c->clk);
+
for (retry = 0; retry < adap->retries; retry++) {
ret = s3c24xx_i2c_doxfer(i2c, msgs, num);
- if (ret != -EAGAIN)
+ if (ret != -EAGAIN) {
+ clk_disable(i2c->clk);
return ret;
+ }
dev_dbg(i2c->dev, "Retrying transmission (%d)\n", retry);
udelay(100);
}
+ clk_disable(i2c->clk);
return -EREMOTEIO;
}
@@ -910,6 +915,7 @@ static int s3c24xx_i2c_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, i2c);
dev_info(&pdev->dev, "%s: S3C I2C adapter\n", dev_name(&i2c->adap.dev));
+ clk_disable(i2c->clk);
return 0;
err_cpufreq:
@@ -977,7 +983,9 @@ static int s3c24xx_i2c_resume(struct device *dev)
struct s3c24xx_i2c *i2c = platform_get_drvdata(pdev);
i2c->suspended = 0;
+ clk_enable(i2c->clk);
s3c24xx_i2c_init(i2c);
+ clk_disable(i2c->clk);
return 0;
}
diff --git a/drivers/i2c/busses/i2c-sh7760.c b/drivers/i2c/busses/i2c-sh7760.c
index 4f93da31d3ad..3cad8fecc3d3 100644
--- a/drivers/i2c/busses/i2c-sh7760.c
+++ b/drivers/i2c/busses/i2c-sh7760.c
@@ -101,12 +101,12 @@ struct cami2c {
static inline void OUT32(struct cami2c *cam, int reg, unsigned long val)
{
- ctrl_outl(val, (unsigned long)cam->iobase + reg);
+ __raw_writel(val, (unsigned long)cam->iobase + reg);
}
static inline unsigned long IN32(struct cami2c *cam, int reg)
{
- return ctrl_inl((unsigned long)cam->iobase + reg);
+ return __raw_readl((unsigned long)cam->iobase + reg);
}
static irqreturn_t sh7760_i2c_irq(int irq, void *ptr)
diff --git a/drivers/i2c/busses/i2c-sh_mobile.c b/drivers/i2c/busses/i2c-sh_mobile.c
index 598c49acaeb5..2707f5e17158 100644
--- a/drivers/i2c/busses/i2c-sh_mobile.c
+++ b/drivers/i2c/busses/i2c-sh_mobile.c
@@ -538,15 +538,17 @@ static int sh_mobile_i2c_hook_irqs(struct platform_device *dev, int hook)
{
struct resource *res;
int ret = -ENXIO;
- int q, m;
- int k = 0;
- int n = 0;
+ int n, k = 0;
while ((res = platform_get_resource(dev, IORESOURCE_IRQ, k))) {
for (n = res->start; hook && n <= res->end; n++) {
if (request_irq(n, sh_mobile_i2c_isr, IRQF_DISABLED,
- dev_name(&dev->dev), dev))
+ dev_name(&dev->dev), dev)) {
+ for (n--; n >= res->start; n--)
+ free_irq(n, dev);
+
goto rollback;
+ }
}
k++;
}
@@ -554,16 +556,17 @@ static int sh_mobile_i2c_hook_irqs(struct platform_device *dev, int hook)
if (hook)
return k > 0 ? 0 : -ENOENT;
- k--;
ret = 0;
rollback:
- for (q = k; k >= 0; k--) {
- for (m = n; m >= res->start; m--)
- free_irq(m, dev);
+ k--;
+
+ while (k >= 0) {
+ res = platform_get_resource(dev, IORESOURCE_IRQ, k);
+ for (n = res->start; n <= res->end; n++)
+ free_irq(n, dev);
- res = platform_get_resource(dev, IORESOURCE_IRQ, k - 1);
- m = res->end;
+ k--;
}
return ret;
diff --git a/drivers/i2c/busses/scx200_acb.c b/drivers/i2c/busses/scx200_acb.c
index 4cb4bb009950..53fab518b3da 100644
--- a/drivers/i2c/busses/scx200_acb.c
+++ b/drivers/i2c/busses/scx200_acb.c
@@ -560,7 +560,8 @@ static const struct pci_device_id scx200_pci[] __initconst = {
{ PCI_DEVICE(PCI_VENDOR_ID_NS, PCI_DEVICE_ID_NS_CS5535_ISA),
.driver_data = 1 },
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_CS5536_ISA),
- .driver_data = 2 }
+ .driver_data = 2 },
+ { 0, }
};
static struct {
diff --git a/drivers/i2c/i2c-core.c b/drivers/i2c/i2c-core.c
index d231f683f576..6b4cc567645b 100644
--- a/drivers/i2c/i2c-core.c
+++ b/drivers/i2c/i2c-core.c
@@ -848,6 +848,18 @@ static int i2c_register_adapter(struct i2c_adapter *adap)
goto out_list;
}
+ /* Sanity checks */
+ if (unlikely(adap->name[0] == '\0')) {
+ pr_err("i2c-core: Attempt to register an adapter with "
+ "no name!\n");
+ return -EINVAL;
+ }
+ if (unlikely(!adap->algo)) {
+ pr_err("i2c-core: Attempt to register adapter '%s' with "
+ "no algo!\n", adap->name);
+ return -EINVAL;
+ }
+
rt_mutex_init(&adap->bus_lock);
mutex_init(&adap->userspace_clients_lock);
INIT_LIST_HEAD(&adap->userspace_clients);
diff --git a/drivers/i2c/i2c-mux.c b/drivers/i2c/i2c-mux.c
index d32a4843fc3a..d7a4833be416 100644
--- a/drivers/i2c/i2c-mux.c
+++ b/drivers/i2c/i2c-mux.c
@@ -120,7 +120,6 @@ struct i2c_adapter *i2c_add_mux_adapter(struct i2c_adapter *parent,
snprintf(priv->adap.name, sizeof(priv->adap.name),
"i2c-%d-mux (chan_id %d)", i2c_adapter_id(parent), chan_id);
priv->adap.owner = THIS_MODULE;
- priv->adap.id = parent->id;
priv->adap.algo = &priv->algo;
priv->adap.algo_data = priv;
priv->adap.dev.parent = &parent->dev;
diff --git a/drivers/ide/hpt366.c b/drivers/ide/hpt366.c
index 97d98fbf5849..58c51cddc100 100644
--- a/drivers/ide/hpt366.c
+++ b/drivers/ide/hpt366.c
@@ -838,7 +838,7 @@ static void hpt3xxn_set_clock(ide_hwif_t *hwif, u8 mode)
static void hpt3xxn_rw_disk(ide_drive_t *drive, struct request *rq)
{
- hpt3xxn_set_clock(drive->hwif, rq_data_dir(rq) ? 0x23 : 0x21);
+ hpt3xxn_set_clock(drive->hwif, rq_data_dir(rq) ? 0x21 : 0x23);
}
/**
@@ -1173,8 +1173,9 @@ static u8 hpt3xx_cable_detect(ide_hwif_t *hwif)
u16 mcr;
pci_read_config_word(dev, mcr_addr, &mcr);
- pci_write_config_word(dev, mcr_addr, (mcr | 0x8000));
- /* now read cable id register */
+ pci_write_config_word(dev, mcr_addr, mcr | 0x8000);
+ /* Debounce, then read cable ID register */
+ udelay(10);
pci_read_config_byte(dev, 0x5a, &scr1);
pci_write_config_word(dev, mcr_addr, mcr);
} else if (chip_type >= HPT370) {
@@ -1185,10 +1186,11 @@ static u8 hpt3xx_cable_detect(ide_hwif_t *hwif)
u8 scr2 = 0;
pci_read_config_byte(dev, 0x5b, &scr2);
- pci_write_config_byte(dev, 0x5b, (scr2 & ~1));
- /* now read cable id register */
+ pci_write_config_byte(dev, 0x5b, scr2 & ~1);
+ /* Debounce, then read cable ID register */
+ udelay(10);
pci_read_config_byte(dev, 0x5a, &scr1);
- pci_write_config_byte(dev, 0x5b, scr2);
+ pci_write_config_byte(dev, 0x5b, scr2);
} else
pci_read_config_byte(dev, 0x5a, &scr1);
diff --git a/drivers/ide/ide-dma.c b/drivers/ide/ide-dma.c
index 06b14bc9a1d4..d4136908f916 100644
--- a/drivers/ide/ide-dma.c
+++ b/drivers/ide/ide-dma.c
@@ -449,7 +449,6 @@ ide_startstop_t ide_dma_timeout_retry(ide_drive_t *drive, int error)
ide_hwif_t *hwif = drive->hwif;
const struct ide_dma_ops *dma_ops = hwif->dma_ops;
struct ide_cmd *cmd = &hwif->cmd;
- struct request *rq;
ide_startstop_t ret = ide_stopped;
/*
@@ -487,14 +486,10 @@ ide_startstop_t ide_dma_timeout_retry(ide_drive_t *drive, int error)
ide_dma_off_quietly(drive);
/*
- * un-busy drive etc and make sure request is sane
+ * make sure request is sane
*/
- rq = hwif->rq;
- if (rq) {
- hwif->rq = NULL;
- rq->errors = 0;
- ide_requeue_and_plug(drive, rq);
- }
+ if (hwif->rq)
+ hwif->rq->errors = 0;
return ret;
}
diff --git a/drivers/idle/intel_idle.c b/drivers/idle/intel_idle.c
index cb3ccf3ed221..41665d2f9f93 100644
--- a/drivers/idle/intel_idle.c
+++ b/drivers/idle/intel_idle.c
@@ -74,7 +74,7 @@ static int max_cstate = MWAIT_MAX_NUM_CSTATES - 1;
static unsigned int mwait_substates;
/* Reliable LAPIC Timer States, bit 1 for C1 etc. */
-static unsigned int lapic_timer_reliable_states;
+static unsigned int lapic_timer_reliable_states = (1 << 1); /* Default to only C1 */
static struct cpuidle_device __percpu *intel_idle_cpuidle_devices;
static int intel_idle(struct cpuidle_device *dev, struct cpuidle_state *state);
@@ -94,7 +94,6 @@ static struct cpuidle_state nehalem_cstates[MWAIT_MAX_NUM_CSTATES] = {
.driver_data = (void *) 0x00,
.flags = CPUIDLE_FLAG_TIME_VALID,
.exit_latency = 3,
- .power_usage = 1000,
.target_residency = 6,
.enter = &intel_idle },
{ /* MWAIT C2 */
@@ -103,7 +102,6 @@ static struct cpuidle_state nehalem_cstates[MWAIT_MAX_NUM_CSTATES] = {
.driver_data = (void *) 0x10,
.flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED,
.exit_latency = 20,
- .power_usage = 500,
.target_residency = 80,
.enter = &intel_idle },
{ /* MWAIT C3 */
@@ -112,11 +110,46 @@ static struct cpuidle_state nehalem_cstates[MWAIT_MAX_NUM_CSTATES] = {
.driver_data = (void *) 0x20,
.flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED,
.exit_latency = 200,
- .power_usage = 350,
.target_residency = 800,
.enter = &intel_idle },
};
+static struct cpuidle_state snb_cstates[MWAIT_MAX_NUM_CSTATES] = {
+ { /* MWAIT C0 */ },
+ { /* MWAIT C1 */
+ .name = "SNB-C1",
+ .desc = "MWAIT 0x00",
+ .driver_data = (void *) 0x00,
+ .flags = CPUIDLE_FLAG_TIME_VALID,
+ .exit_latency = 1,
+ .target_residency = 4,
+ .enter = &intel_idle },
+ { /* MWAIT C2 */
+ .name = "SNB-C3",
+ .desc = "MWAIT 0x10",
+ .driver_data = (void *) 0x10,
+ .flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED,
+ .exit_latency = 80,
+ .target_residency = 160,
+ .enter = &intel_idle },
+ { /* MWAIT C3 */
+ .name = "SNB-C6",
+ .desc = "MWAIT 0x20",
+ .driver_data = (void *) 0x20,
+ .flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED,
+ .exit_latency = 104,
+ .target_residency = 208,
+ .enter = &intel_idle },
+ { /* MWAIT C4 */
+ .name = "SNB-C7",
+ .desc = "MWAIT 0x30",
+ .driver_data = (void *) 0x30,
+ .flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED,
+ .exit_latency = 109,
+ .target_residency = 300,
+ .enter = &intel_idle },
+};
+
static struct cpuidle_state atom_cstates[MWAIT_MAX_NUM_CSTATES] = {
{ /* MWAIT C0 */ },
{ /* MWAIT C1 */
@@ -125,7 +158,6 @@ static struct cpuidle_state atom_cstates[MWAIT_MAX_NUM_CSTATES] = {
.driver_data = (void *) 0x00,
.flags = CPUIDLE_FLAG_TIME_VALID,
.exit_latency = 1,
- .power_usage = 1000,
.target_residency = 4,
.enter = &intel_idle },
{ /* MWAIT C2 */
@@ -134,7 +166,6 @@ static struct cpuidle_state atom_cstates[MWAIT_MAX_NUM_CSTATES] = {
.driver_data = (void *) 0x10,
.flags = CPUIDLE_FLAG_TIME_VALID,
.exit_latency = 20,
- .power_usage = 500,
.target_residency = 80,
.enter = &intel_idle },
{ /* MWAIT C3 */ },
@@ -144,7 +175,6 @@ static struct cpuidle_state atom_cstates[MWAIT_MAX_NUM_CSTATES] = {
.driver_data = (void *) 0x30,
.flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED,
.exit_latency = 100,
- .power_usage = 250,
.target_residency = 400,
.enter = &intel_idle },
{ /* MWAIT C5 */ },
@@ -154,7 +184,6 @@ static struct cpuidle_state atom_cstates[MWAIT_MAX_NUM_CSTATES] = {
.driver_data = (void *) 0x52,
.flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED,
.exit_latency = 140,
- .power_usage = 150,
.target_residency = 560,
.enter = &intel_idle },
};
@@ -179,13 +208,10 @@ static int intel_idle(struct cpuidle_device *dev, struct cpuidle_state *state)
local_irq_disable();
/*
- * If the state flag indicates that the TLB will be flushed or if this
- * is the deepest c-state supported, do a voluntary leave mm to avoid
- * costly and mostly unnecessary wakeups for flushing the user TLB's
- * associated with the active mm.
+ * leave_mm() to avoid costly and often unnecessary wakeups
+ * for flushing the user TLB's associated with the active mm.
*/
- if (state->flags & CPUIDLE_FLAG_TLB_FLUSHED ||
- (&dev->states[dev->state_count - 1] == state))
+ if (state->flags & CPUIDLE_FLAG_TLB_FLUSHED)
leave_mm(cpu);
if (!(lapic_timer_reliable_states & (1 << (cstate))))
@@ -269,9 +295,14 @@ static int intel_idle_probe(void)
case 0x1C: /* 28 - Atom Processor */
case 0x26: /* 38 - Lincroft Atom Processor */
- lapic_timer_reliable_states = (1 << 2) | (1 << 1); /* C2, C1 */
+ lapic_timer_reliable_states = (1 << 1); /* C1 */
cpuidle_state_table = atom_cstates;
break;
+
+ case 0x2A: /* SNB */
+ case 0x2D: /* SNB Xeon */
+ cpuidle_state_table = snb_cstates;
+ break;
#ifdef FUTURE_USE
case 0x17: /* 23 - Core 2 Duo */
lapic_timer_reliable_states = (1 << 2) | (1 << 1); /* C2, C1 */
diff --git a/drivers/ieee1394/Kconfig b/drivers/ieee1394/Kconfig
deleted file mode 100644
index e02096cf7d95..000000000000
--- a/drivers/ieee1394/Kconfig
+++ /dev/null
@@ -1,182 +0,0 @@
-config IEEE1394
- tristate "Legacy alternative FireWire driver stack"
- depends on PCI || BROKEN
- help
- IEEE 1394 describes a high performance serial bus, which is also
- known as FireWire(tm) or i.Link(tm) and is used for connecting all
- sorts of devices (most notably digital video cameras) to your
- computer.
-
- If you have FireWire hardware and want to use it, say Y here. This
- is the core support only, you will also need to select a driver for
- your IEEE 1394 adapter.
-
- To compile this driver as a module, say M here: the module will be
- called ieee1394.
-
- NOTE:
- ieee1394 is superseded by the newer firewire-core driver. See
- http://ieee1394.wiki.kernel.org/index.php/Juju_Migration for
- further information on how to switch to the new FireWire drivers.
-
-config IEEE1394_OHCI1394
- tristate "OHCI-1394 controllers"
- depends on PCI && IEEE1394
- help
- Enable this driver if you have an IEEE 1394 controller based on the
- OHCI-1394 specification. The current driver is only tested with OHCI
- chipsets made by Texas Instruments and NEC. Most third-party vendors
- use one of these chipsets. It should work with any OHCI-1394
- compliant card, however.
-
- To compile this driver as a module, say M here: the module will be
- called ohci1394.
-
- NOTE:
- ohci1394 is superseded by the newer firewire-ohci driver. See
- http://ieee1394.wiki.kernel.org/index.php/Juju_Migration for
- further information on how to switch to the new FireWire drivers.
-
- If you want to install firewire-ohci and ohci1394 together, you
- should configure them only as modules and blacklist the driver(s)
- which you don't want to have auto-loaded. Add either
-
- blacklist ohci1394
- blacklist video1394
- blacklist dv1394
- or
- blacklist firewire-ohci
-
- to /etc/modprobe.conf or /etc/modprobe.d/* and update modprobe.conf
- depending on your distribution.
-
-comment "PCILynx controller requires I2C"
- depends on IEEE1394 && I2C=n
-
-config IEEE1394_PCILYNX
- tristate "PCILynx controller"
- depends on PCI && IEEE1394 && I2C
- select I2C_ALGOBIT
- help
- Say Y here if you have an IEEE-1394 controller with the Texas
- Instruments PCILynx chip. Note: this driver is written for revision
- 2 of this chip and may not work with revision 0.
-
- To compile this driver as a module, say M here: the module will be
- called pcilynx.
-
- Only some old and now very rare PCI and CardBus cards and
- PowerMacs G3 B&W contain the PCILynx controller. Therefore
- almost everybody can say N here.
-
-comment "SBP-2 support (for storage devices) requires SCSI"
- depends on IEEE1394 && SCSI=n
-
-config IEEE1394_SBP2
- tristate "Storage devices (SBP-2 protocol)"
- depends on IEEE1394 && SCSI
- help
- This option enables you to use SBP-2 devices connected to an IEEE
- 1394 bus. SBP-2 devices include storage devices like harddisks and
- DVD drives, also some other FireWire devices like scanners.
-
- You should also enable support for disks, CD-ROMs, etc. in the SCSI
- configuration section.
-
- To compile this driver as a module, say M here: the module will be
- called sbp2.
-
- NOTE:
- sbp2 is superseded by the newer firewire-sbp2 driver. See
- http://ieee1394.wiki.kernel.org/index.php/Juju_Migration for
- further information on how to switch to the new FireWire drivers.
-
-config IEEE1394_SBP2_PHYS_DMA
- bool "Enable replacement for physical DMA in SBP2"
- depends on IEEE1394_SBP2 && VIRT_TO_BUS && EXPERIMENTAL
- help
- This builds sbp2 for use with non-OHCI host adapters which do not
- support physical DMA or for when ohci1394 is run with phys_dma=0.
- Physical DMA is data movement without assistance of the drivers'
- interrupt handlers. This option includes the interrupt handlers
- that are required in absence of this hardware feature.
-
- This option is buggy and currently broken on some architectures.
- If unsure, say N.
-
-config IEEE1394_ETH1394_ROM_ENTRY
- depends on IEEE1394
- bool
- default n
-
-config IEEE1394_ETH1394
- tristate "IP networking over 1394 (experimental)"
- depends on IEEE1394 && EXPERIMENTAL && INET
- select IEEE1394_ETH1394_ROM_ENTRY
- help
- This driver implements a functional majority of RFC 2734: IPv4 over
- 1394. It will provide IP connectivity with implementations of RFC
- 2734 found on other operating systems. It will not communicate with
- older versions of this driver found in stock kernels prior to 2.6.3.
- This driver is still considered experimental. It does not yet support
- MCAP, therefore multicast support is significantly limited.
-
- The module is called eth1394 although it does not emulate Ethernet.
-
- NOTE:
- eth1394 is superseded by the newer firewire-net driver. See
- http://ieee1394.wiki.kernel.org/index.php/Juju_Migration for
- further information on how to switch to the new FireWire drivers.
-
-config IEEE1394_RAWIO
- tristate "raw1394 userspace interface"
- depends on IEEE1394
- help
- This option adds support for the raw1394 device file which enables
- direct communication of user programs with IEEE 1394 devices
- (isochronous and asynchronous). Almost all application programs
- which access FireWire require this option.
-
- To compile this driver as a module, say M here: the module will be
- called raw1394.
-
- NOTE:
- raw1394 is superseded by the newer firewire-core driver. See
- http://ieee1394.wiki.kernel.org/index.php/Juju_Migration for
- further information on how to switch to the new FireWire drivers.
-
-config IEEE1394_VIDEO1394
- tristate "video1394 userspace interface"
- depends on IEEE1394 && IEEE1394_OHCI1394
- help
- This option adds support for the video1394 device files which enable
- isochronous communication of user programs with IEEE 1394 devices,
- especially video capture or export. This interface is used by all
- libdc1394 based programs and by several other programs, in addition to
- the raw1394 interface. It is generally not required for DV capture.
-
- To compile this driver as a module, say M here: the module will be
- called video1394.
-
- NOTE:
- video1394 is superseded by the newer firewire-core driver. See
- http://ieee1394.wiki.kernel.org/index.php/Juju_Migration for
- further information on how to switch to the new FireWire drivers.
-
-config IEEE1394_DV1394
- tristate "dv1394 userspace interface (deprecated)"
- depends on IEEE1394 && IEEE1394_OHCI1394
- help
- The dv1394 driver is unsupported and may be removed from Linux in a
- future release. Its functionality is now provided by either
- raw1394 or firewire-core together with libraries such as libiec61883.
-
-config IEEE1394_VERBOSEDEBUG
- bool "Excessive debugging output"
- depends on IEEE1394
- help
- If you say Y here, you will get very verbose debugging logs from the
- ieee1394 drivers, including sent and received packet headers. This
- will quickly result in large amounts of data sent to the system log.
-
- Say Y if you really need the debugging output. Everyone else says N.
diff --git a/drivers/ieee1394/Makefile b/drivers/ieee1394/Makefile
deleted file mode 100644
index 1f8153b57503..000000000000
--- a/drivers/ieee1394/Makefile
+++ /dev/null
@@ -1,18 +0,0 @@
-#
-# Makefile for the Linux IEEE 1394 implementation
-#
-
-ieee1394-objs := ieee1394_core.o ieee1394_transactions.o hosts.o \
- highlevel.o csr.o nodemgr.o dma.o iso.o \
- csr1212.o config_roms.o
-
-obj-$(CONFIG_IEEE1394) += ieee1394.o
-obj-$(CONFIG_IEEE1394_PCILYNX) += pcilynx.o
-obj-$(CONFIG_IEEE1394_OHCI1394) += ohci1394.o
-obj-$(CONFIG_IEEE1394_VIDEO1394) += video1394.o
-obj-$(CONFIG_IEEE1394_RAWIO) += raw1394.o
-obj-$(CONFIG_IEEE1394_SBP2) += sbp2.o
-obj-$(CONFIG_IEEE1394_DV1394) += dv1394.o
-obj-$(CONFIG_IEEE1394_ETH1394) += eth1394.o
-
-obj-$(CONFIG_PROVIDE_OHCI1394_DMA_INIT) += init_ohci1394_dma.o
diff --git a/drivers/ieee1394/config_roms.c b/drivers/ieee1394/config_roms.c
deleted file mode 100644
index 1b981207fa76..000000000000
--- a/drivers/ieee1394/config_roms.c
+++ /dev/null
@@ -1,194 +0,0 @@
-/*
- * IEEE 1394 for Linux
- *
- * ConfigROM entries
- *
- * Copyright (C) 2004 Ben Collins
- *
- * This code is licensed under the GPL. See the file COPYING in the root
- * directory of the kernel sources for details.
- */
-
-#include <linux/types.h>
-
-#include "csr1212.h"
-#include "ieee1394.h"
-#include "ieee1394_types.h"
-#include "hosts.h"
-#include "ieee1394_core.h"
-#include "highlevel.h"
-#include "csr.h"
-#include "config_roms.h"
-
-struct hpsb_config_rom_entry {
- const char *name;
-
- /* Base initialization, called at module load */
- int (*init)(void);
-
- /* Cleanup called at module exit */
- void (*cleanup)(void);
-
- /* The flag added to host->config_roms */
- unsigned int flag;
-};
-
-/* The default host entry. This must succeed. */
-int hpsb_default_host_entry(struct hpsb_host *host)
-{
- struct csr1212_keyval *root;
- struct csr1212_keyval *vend_id = NULL;
- struct csr1212_keyval *text = NULL;
- char csr_name[128];
- int ret;
-
- sprintf(csr_name, "Linux - %s", host->driver->name);
- root = host->csr.rom->root_kv;
-
- vend_id = csr1212_new_immediate(CSR1212_KV_ID_VENDOR, host->csr.guid_hi >> 8);
- text = csr1212_new_string_descriptor_leaf(csr_name);
-
- if (!vend_id || !text) {
- if (vend_id)
- csr1212_release_keyval(vend_id);
- if (text)
- csr1212_release_keyval(text);
- csr1212_destroy_csr(host->csr.rom);
- return -ENOMEM;
- }
-
- csr1212_associate_keyval(vend_id, text);
- csr1212_release_keyval(text);
- ret = csr1212_attach_keyval_to_directory(root, vend_id);
- csr1212_release_keyval(vend_id);
- if (ret != CSR1212_SUCCESS) {
- csr1212_destroy_csr(host->csr.rom);
- return -ENOMEM;
- }
-
- host->update_config_rom = 1;
-
- return 0;
-}
-
-
-#ifdef CONFIG_IEEE1394_ETH1394_ROM_ENTRY
-#include "eth1394.h"
-
-static struct csr1212_keyval *ip1394_ud;
-
-static int config_rom_ip1394_init(void)
-{
- struct csr1212_keyval *spec_id = NULL;
- struct csr1212_keyval *spec_desc = NULL;
- struct csr1212_keyval *ver = NULL;
- struct csr1212_keyval *ver_desc = NULL;
- int ret = -ENOMEM;
-
- ip1394_ud = csr1212_new_directory(CSR1212_KV_ID_UNIT);
-
- spec_id = csr1212_new_immediate(CSR1212_KV_ID_SPECIFIER_ID,
- ETHER1394_GASP_SPECIFIER_ID);
- spec_desc = csr1212_new_string_descriptor_leaf("IANA");
- ver = csr1212_new_immediate(CSR1212_KV_ID_VERSION,
- ETHER1394_GASP_VERSION);
- ver_desc = csr1212_new_string_descriptor_leaf("IPv4");
-
- if (!ip1394_ud || !spec_id || !spec_desc || !ver || !ver_desc)
- goto ip1394_fail;
-
- csr1212_associate_keyval(spec_id, spec_desc);
- csr1212_associate_keyval(ver, ver_desc);
- if (csr1212_attach_keyval_to_directory(ip1394_ud, spec_id)
- == CSR1212_SUCCESS &&
- csr1212_attach_keyval_to_directory(ip1394_ud, ver)
- == CSR1212_SUCCESS)
- ret = 0;
-
-ip1394_fail:
- if (ret && ip1394_ud) {
- csr1212_release_keyval(ip1394_ud);
- ip1394_ud = NULL;
- }
-
- if (spec_id)
- csr1212_release_keyval(spec_id);
- if (spec_desc)
- csr1212_release_keyval(spec_desc);
- if (ver)
- csr1212_release_keyval(ver);
- if (ver_desc)
- csr1212_release_keyval(ver_desc);
-
- return ret;
-}
-
-static void config_rom_ip1394_cleanup(void)
-{
- if (ip1394_ud) {
- csr1212_release_keyval(ip1394_ud);
- ip1394_ud = NULL;
- }
-}
-
-int hpsb_config_rom_ip1394_add(struct hpsb_host *host)
-{
- if (!ip1394_ud)
- return -ENODEV;
-
- if (csr1212_attach_keyval_to_directory(host->csr.rom->root_kv,
- ip1394_ud) != CSR1212_SUCCESS)
- return -ENOMEM;
-
- host->config_roms |= HPSB_CONFIG_ROM_ENTRY_IP1394;
- host->update_config_rom = 1;
- return 0;
-}
-EXPORT_SYMBOL_GPL(hpsb_config_rom_ip1394_add);
-
-void hpsb_config_rom_ip1394_remove(struct hpsb_host *host)
-{
- csr1212_detach_keyval_from_directory(host->csr.rom->root_kv, ip1394_ud);
- host->config_roms &= ~HPSB_CONFIG_ROM_ENTRY_IP1394;
- host->update_config_rom = 1;
-}
-EXPORT_SYMBOL_GPL(hpsb_config_rom_ip1394_remove);
-
-static struct hpsb_config_rom_entry ip1394_entry = {
- .name = "ip1394",
- .init = config_rom_ip1394_init,
- .cleanup = config_rom_ip1394_cleanup,
- .flag = HPSB_CONFIG_ROM_ENTRY_IP1394,
-};
-
-#endif /* CONFIG_IEEE1394_ETH1394_ROM_ENTRY */
-
-static struct hpsb_config_rom_entry *const config_rom_entries[] = {
-#ifdef CONFIG_IEEE1394_ETH1394_ROM_ENTRY
- &ip1394_entry,
-#endif
-};
-
-/* Initialize all config roms */
-int hpsb_init_config_roms(void)
-{
- int i, error = 0;
-
- for (i = 0; i < ARRAY_SIZE(config_rom_entries); i++)
- if (config_rom_entries[i]->init()) {
- HPSB_ERR("Failed to initialize config rom entry `%s'",
- config_rom_entries[i]->name);
- error = -1;
- }
-
- return error;
-}
-
-/* Cleanup all config roms */
-void hpsb_cleanup_config_roms(void)
-{
- int i;
-
- for (i = 0; i < ARRAY_SIZE(config_rom_entries); i++)
- config_rom_entries[i]->cleanup();
-}
diff --git a/drivers/ieee1394/config_roms.h b/drivers/ieee1394/config_roms.h
deleted file mode 100644
index 1f5cd1f16c44..000000000000
--- a/drivers/ieee1394/config_roms.h
+++ /dev/null
@@ -1,19 +0,0 @@
-#ifndef _IEEE1394_CONFIG_ROMS_H
-#define _IEEE1394_CONFIG_ROMS_H
-
-struct hpsb_host;
-
-int hpsb_default_host_entry(struct hpsb_host *host);
-int hpsb_init_config_roms(void);
-void hpsb_cleanup_config_roms(void);
-
-/* List of flags to check if a host contains a certain extra config rom
- * entry. Available in the host->config_roms member. */
-#define HPSB_CONFIG_ROM_ENTRY_IP1394 0x00000001
-
-#ifdef CONFIG_IEEE1394_ETH1394_ROM_ENTRY
-int hpsb_config_rom_ip1394_add(struct hpsb_host *host);
-void hpsb_config_rom_ip1394_remove(struct hpsb_host *host);
-#endif
-
-#endif /* _IEEE1394_CONFIG_ROMS_H */
diff --git a/drivers/ieee1394/csr.c b/drivers/ieee1394/csr.c
deleted file mode 100644
index d696f69ebce5..000000000000
--- a/drivers/ieee1394/csr.c
+++ /dev/null
@@ -1,843 +0,0 @@
-/*
- * IEEE 1394 for Linux
- *
- * CSR implementation, iso/bus manager implementation.
- *
- * Copyright (C) 1999 Andreas E. Bombe
- * 2002 Manfred Weihs <weihs@ict.tuwien.ac.at>
- *
- * This code is licensed under the GPL. See the file COPYING in the root
- * directory of the kernel sources for details.
- *
- *
- * Contributions:
- *
- * Manfred Weihs <weihs@ict.tuwien.ac.at>
- * configuration ROM manipulation
- *
- */
-
-#include <linux/jiffies.h>
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/moduleparam.h>
-#include <linux/param.h>
-#include <linux/spinlock.h>
-#include <linux/string.h>
-
-#include "csr1212.h"
-#include "ieee1394_types.h"
-#include "hosts.h"
-#include "ieee1394.h"
-#include "highlevel.h"
-#include "ieee1394_core.h"
-
-/* Module Parameters */
-/* this module parameter can be used to disable mapping of the FCP registers */
-
-static int fcp = 1;
-module_param(fcp, int, 0444);
-MODULE_PARM_DESC(fcp, "Map FCP registers (default = 1, disable = 0).");
-
-static struct csr1212_keyval *node_cap = NULL;
-
-static void add_host(struct hpsb_host *host);
-static void remove_host(struct hpsb_host *host);
-static void host_reset(struct hpsb_host *host);
-static int read_maps(struct hpsb_host *host, int nodeid, quadlet_t *buffer,
- u64 addr, size_t length, u16 fl);
-static int write_fcp(struct hpsb_host *host, int nodeid, int dest,
- quadlet_t *data, u64 addr, size_t length, u16 flags);
-static int read_regs(struct hpsb_host *host, int nodeid, quadlet_t *buf,
- u64 addr, size_t length, u16 flags);
-static int write_regs(struct hpsb_host *host, int nodeid, int destid,
- quadlet_t *data, u64 addr, size_t length, u16 flags);
-static int lock_regs(struct hpsb_host *host, int nodeid, quadlet_t *store,
- u64 addr, quadlet_t data, quadlet_t arg, int extcode, u16 fl);
-static int lock64_regs(struct hpsb_host *host, int nodeid, octlet_t * store,
- u64 addr, octlet_t data, octlet_t arg, int extcode, u16 fl);
-static int read_config_rom(struct hpsb_host *host, int nodeid, quadlet_t *buffer,
- u64 addr, size_t length, u16 fl);
-static u64 allocate_addr_range(u64 size, u32 alignment, void *__host);
-static void release_addr_range(u64 addr, void *__host);
-
-static struct hpsb_highlevel csr_highlevel = {
- .name = "standard registers",
- .add_host = add_host,
- .remove_host = remove_host,
- .host_reset = host_reset,
-};
-
-static const struct hpsb_address_ops map_ops = {
- .read = read_maps,
-};
-
-static const struct hpsb_address_ops fcp_ops = {
- .write = write_fcp,
-};
-
-static const struct hpsb_address_ops reg_ops = {
- .read = read_regs,
- .write = write_regs,
- .lock = lock_regs,
- .lock64 = lock64_regs,
-};
-
-static const struct hpsb_address_ops config_rom_ops = {
- .read = read_config_rom,
-};
-
-struct csr1212_bus_ops csr_bus_ops = {
- .allocate_addr_range = allocate_addr_range,
- .release_addr = release_addr_range,
-};
-
-
-static u16 csr_crc16(unsigned *data, int length)
-{
- int check=0, i;
- int shift, sum, next=0;
-
- for (i = length; i; i--) {
- for (next = check, shift = 28; shift >= 0; shift -= 4 ) {
- sum = ((next >> 12) ^ (be32_to_cpu(*data) >> shift)) & 0xf;
- next = (next << 4) ^ (sum << 12) ^ (sum << 5) ^ (sum);
- }
- check = next & 0xffff;
- data++;
- }
-
- return check;
-}
-
-static void host_reset(struct hpsb_host *host)
-{
- host->csr.state &= 0x300;
-
- host->csr.bus_manager_id = 0x3f;
- host->csr.bandwidth_available = 4915;
- host->csr.channels_available_hi = 0xfffffffe; /* pre-alloc ch 31 per 1394a-2000 */
- host->csr.channels_available_lo = ~0;
- host->csr.broadcast_channel = 0x80000000 | 31;
-
- if (host->is_irm) {
- if (host->driver->hw_csr_reg) {
- host->driver->hw_csr_reg(host, 2, 0xfffffffe, ~0);
- }
- }
-
- host->csr.node_ids = host->node_id << 16;
-
- if (!host->is_root) {
- /* clear cmstr bit */
- host->csr.state &= ~0x100;
- }
-
- be32_add_cpu(&host->csr.topology_map[1], 1);
- host->csr.topology_map[2] = cpu_to_be32(host->node_count << 16
- | host->selfid_count);
- host->csr.topology_map[0] =
- cpu_to_be32((host->selfid_count + 2) << 16
- | csr_crc16(host->csr.topology_map + 1,
- host->selfid_count + 2));
-
- be32_add_cpu(&host->csr.speed_map[1], 1);
- host->csr.speed_map[0] = cpu_to_be32(0x3f1 << 16
- | csr_crc16(host->csr.speed_map+1,
- 0x3f1));
-}
-
-/*
- * HI == seconds (bits 0:2)
- * LO == fractions of a second in units of 125usec (bits 19:31)
- *
- * Convert SPLIT_TIMEOUT to jiffies.
- * The default and minimum as per 1394a-2000 clause 8.3.2.2.6 is 100ms.
- */
-static inline void calculate_expire(struct csr_control *csr)
-{
- unsigned int usecs = (csr->split_timeout_hi & 7) * 1000000 +
- (csr->split_timeout_lo >> 19) * 125;
-
- csr->expire = usecs_to_jiffies(usecs > 100000 ? usecs : 100000);
- HPSB_VERBOSE("CSR: setting expire to %lu, HZ=%u", csr->expire, HZ);
-}
-
-
-static void add_host(struct hpsb_host *host)
-{
- struct csr1212_keyval *root;
- quadlet_t bus_info[CSR_BUS_INFO_SIZE];
-
- hpsb_register_addrspace(&csr_highlevel, host, &reg_ops,
- CSR_REGISTER_BASE,
- CSR_REGISTER_BASE + CSR_CONFIG_ROM);
- hpsb_register_addrspace(&csr_highlevel, host, &config_rom_ops,
- CSR_REGISTER_BASE + CSR_CONFIG_ROM,
- CSR_REGISTER_BASE + CSR_CONFIG_ROM_END);
- if (fcp) {
- hpsb_register_addrspace(&csr_highlevel, host, &fcp_ops,
- CSR_REGISTER_BASE + CSR_FCP_COMMAND,
- CSR_REGISTER_BASE + CSR_FCP_END);
- }
- hpsb_register_addrspace(&csr_highlevel, host, &map_ops,
- CSR_REGISTER_BASE + CSR_TOPOLOGY_MAP,
- CSR_REGISTER_BASE + CSR_TOPOLOGY_MAP_END);
- hpsb_register_addrspace(&csr_highlevel, host, &map_ops,
- CSR_REGISTER_BASE + CSR_SPEED_MAP,
- CSR_REGISTER_BASE + CSR_SPEED_MAP_END);
-
- spin_lock_init(&host->csr.lock);
-
- host->csr.state = 0;
- host->csr.node_ids = 0;
- host->csr.split_timeout_hi = 0;
- host->csr.split_timeout_lo = 800 << 19;
- calculate_expire(&host->csr);
- host->csr.cycle_time = 0;
- host->csr.bus_time = 0;
- host->csr.bus_manager_id = 0x3f;
- host->csr.bandwidth_available = 4915;
- host->csr.channels_available_hi = 0xfffffffe; /* pre-alloc ch 31 per 1394a-2000 */
- host->csr.channels_available_lo = ~0;
- host->csr.broadcast_channel = 0x80000000 | 31;
-
- if (host->is_irm) {
- if (host->driver->hw_csr_reg) {
- host->driver->hw_csr_reg(host, 2, 0xfffffffe, ~0);
- }
- }
-
- if (host->csr.max_rec >= 9)
- host->csr.max_rom = 2;
- else if (host->csr.max_rec >= 5)
- host->csr.max_rom = 1;
- else
- host->csr.max_rom = 0;
-
- host->csr.generation = 2;
-
- bus_info[1] = IEEE1394_BUSID_MAGIC;
- bus_info[2] = cpu_to_be32((hpsb_disable_irm ? 0 : 1 << CSR_IRMC_SHIFT) |
- (1 << CSR_CMC_SHIFT) |
- (1 << CSR_ISC_SHIFT) |
- (0 << CSR_BMC_SHIFT) |
- (0 << CSR_PMC_SHIFT) |
- (host->csr.cyc_clk_acc << CSR_CYC_CLK_ACC_SHIFT) |
- (host->csr.max_rec << CSR_MAX_REC_SHIFT) |
- (host->csr.max_rom << CSR_MAX_ROM_SHIFT) |
- (host->csr.generation << CSR_GENERATION_SHIFT) |
- host->csr.lnk_spd);
-
- bus_info[3] = cpu_to_be32(host->csr.guid_hi);
- bus_info[4] = cpu_to_be32(host->csr.guid_lo);
-
- /* The hardware copy of the bus info block will be set later when a
- * bus reset is issued. */
-
- csr1212_init_local_csr(host->csr.rom, bus_info, host->csr.max_rom);
-
- root = host->csr.rom->root_kv;
-
- if(csr1212_attach_keyval_to_directory(root, node_cap) != CSR1212_SUCCESS) {
- HPSB_ERR("Failed to attach Node Capabilities to root directory");
- }
-
- host->update_config_rom = 1;
-}
-
-static void remove_host(struct hpsb_host *host)
-{
- quadlet_t bus_info[CSR_BUS_INFO_SIZE];
-
- bus_info[1] = IEEE1394_BUSID_MAGIC;
- bus_info[2] = cpu_to_be32((0 << CSR_IRMC_SHIFT) |
- (0 << CSR_CMC_SHIFT) |
- (0 << CSR_ISC_SHIFT) |
- (0 << CSR_BMC_SHIFT) |
- (0 << CSR_PMC_SHIFT) |
- (host->csr.cyc_clk_acc << CSR_CYC_CLK_ACC_SHIFT) |
- (host->csr.max_rec << CSR_MAX_REC_SHIFT) |
- (0 << CSR_MAX_ROM_SHIFT) |
- (0 << CSR_GENERATION_SHIFT) |
- host->csr.lnk_spd);
-
- bus_info[3] = cpu_to_be32(host->csr.guid_hi);
- bus_info[4] = cpu_to_be32(host->csr.guid_lo);
-
- csr1212_detach_keyval_from_directory(host->csr.rom->root_kv, node_cap);
-
- csr1212_init_local_csr(host->csr.rom, bus_info, 0);
- host->update_config_rom = 1;
-}
-
-
-int hpsb_update_config_rom(struct hpsb_host *host, const quadlet_t *new_rom,
- size_t buffersize, unsigned char rom_version)
-{
- unsigned long flags;
- int ret;
-
- HPSB_NOTICE("hpsb_update_config_rom() is deprecated");
-
- spin_lock_irqsave(&host->csr.lock, flags);
- if (rom_version != host->csr.generation)
- ret = -1;
- else if (buffersize > host->csr.rom->cache_head->size)
- ret = -2;
- else {
- /* Just overwrite the generated ConfigROM image with new data,
- * it can be regenerated later. */
- memcpy(host->csr.rom->cache_head->data, new_rom, buffersize);
- host->csr.rom->cache_head->len = buffersize;
-
- if (host->driver->set_hw_config_rom)
- host->driver->set_hw_config_rom(host, host->csr.rom->bus_info_data);
- /* Increment the generation number to keep some sort of sync
- * with the newer ConfigROM manipulation method. */
- host->csr.generation++;
- if (host->csr.generation > 0xf || host->csr.generation < 2)
- host->csr.generation = 2;
- ret=0;
- }
- spin_unlock_irqrestore(&host->csr.lock, flags);
- return ret;
-}
-
-
-/* Read topology / speed maps and configuration ROM */
-static int read_maps(struct hpsb_host *host, int nodeid, quadlet_t *buffer,
- u64 addr, size_t length, u16 fl)
-{
- unsigned long flags;
- int csraddr = addr - CSR_REGISTER_BASE;
- const char *src;
-
- spin_lock_irqsave(&host->csr.lock, flags);
-
- if (csraddr < CSR_SPEED_MAP) {
- src = ((char *)host->csr.topology_map) + csraddr
- - CSR_TOPOLOGY_MAP;
- } else {
- src = ((char *)host->csr.speed_map) + csraddr - CSR_SPEED_MAP;
- }
-
- memcpy(buffer, src, length);
- spin_unlock_irqrestore(&host->csr.lock, flags);
- return RCODE_COMPLETE;
-}
-
-
-#define out if (--length == 0) break
-
-static int read_regs(struct hpsb_host *host, int nodeid, quadlet_t *buf,
- u64 addr, size_t length, u16 flags)
-{
- int csraddr = addr - CSR_REGISTER_BASE;
- int oldcycle;
- quadlet_t ret;
-
- if ((csraddr | length) & 0x3)
- return RCODE_TYPE_ERROR;
-
- length /= 4;
-
- switch (csraddr) {
- case CSR_STATE_CLEAR:
- *(buf++) = cpu_to_be32(host->csr.state);
- out;
- case CSR_STATE_SET:
- *(buf++) = cpu_to_be32(host->csr.state);
- out;
- case CSR_NODE_IDS:
- *(buf++) = cpu_to_be32(host->csr.node_ids);
- out;
-
- case CSR_RESET_START:
- return RCODE_TYPE_ERROR;
-
- /* address gap - handled by default below */
-
- case CSR_SPLIT_TIMEOUT_HI:
- *(buf++) = cpu_to_be32(host->csr.split_timeout_hi);
- out;
- case CSR_SPLIT_TIMEOUT_LO:
- *(buf++) = cpu_to_be32(host->csr.split_timeout_lo);
- out;
-
- /* address gap */
- return RCODE_ADDRESS_ERROR;
-
- case CSR_CYCLE_TIME:
- oldcycle = host->csr.cycle_time;
- host->csr.cycle_time =
- host->driver->devctl(host, GET_CYCLE_COUNTER, 0);
-
- if (oldcycle > host->csr.cycle_time) {
- /* cycle time wrapped around */
- host->csr.bus_time += 1 << 7;
- }
- *(buf++) = cpu_to_be32(host->csr.cycle_time);
- out;
- case CSR_BUS_TIME:
- oldcycle = host->csr.cycle_time;
- host->csr.cycle_time =
- host->driver->devctl(host, GET_CYCLE_COUNTER, 0);
-
- if (oldcycle > host->csr.cycle_time) {
- /* cycle time wrapped around */
- host->csr.bus_time += (1 << 7);
- }
- *(buf++) = cpu_to_be32(host->csr.bus_time
- | (host->csr.cycle_time >> 25));
- out;
-
- /* address gap */
- return RCODE_ADDRESS_ERROR;
-
- case CSR_BUSY_TIMEOUT:
- /* not yet implemented */
- return RCODE_ADDRESS_ERROR;
-
- case CSR_BUS_MANAGER_ID:
- if (host->driver->hw_csr_reg)
- ret = host->driver->hw_csr_reg(host, 0, 0, 0);
- else
- ret = host->csr.bus_manager_id;
-
- *(buf++) = cpu_to_be32(ret);
- out;
- case CSR_BANDWIDTH_AVAILABLE:
- if (host->driver->hw_csr_reg)
- ret = host->driver->hw_csr_reg(host, 1, 0, 0);
- else
- ret = host->csr.bandwidth_available;
-
- *(buf++) = cpu_to_be32(ret);
- out;
- case CSR_CHANNELS_AVAILABLE_HI:
- if (host->driver->hw_csr_reg)
- ret = host->driver->hw_csr_reg(host, 2, 0, 0);
- else
- ret = host->csr.channels_available_hi;
-
- *(buf++) = cpu_to_be32(ret);
- out;
- case CSR_CHANNELS_AVAILABLE_LO:
- if (host->driver->hw_csr_reg)
- ret = host->driver->hw_csr_reg(host, 3, 0, 0);
- else
- ret = host->csr.channels_available_lo;
-
- *(buf++) = cpu_to_be32(ret);
- out;
-
- case CSR_BROADCAST_CHANNEL:
- *(buf++) = cpu_to_be32(host->csr.broadcast_channel);
- out;
-
- /* address gap to end - fall through to default */
- default:
- return RCODE_ADDRESS_ERROR;
- }
-
- return RCODE_COMPLETE;
-}
-
-static int write_regs(struct hpsb_host *host, int nodeid, int destid,
- quadlet_t *data, u64 addr, size_t length, u16 flags)
-{
- int csraddr = addr - CSR_REGISTER_BASE;
-
- if ((csraddr | length) & 0x3)
- return RCODE_TYPE_ERROR;
-
- length /= 4;
-
- switch (csraddr) {
- case CSR_STATE_CLEAR:
- /* FIXME FIXME FIXME */
- printk("doh, someone wants to mess with state clear\n");
- out;
- case CSR_STATE_SET:
- printk("doh, someone wants to mess with state set\n");
- out;
-
- case CSR_NODE_IDS:
- host->csr.node_ids &= NODE_MASK << 16;
- host->csr.node_ids |= be32_to_cpu(*(data++)) & (BUS_MASK << 16);
- host->node_id = host->csr.node_ids >> 16;
- host->driver->devctl(host, SET_BUS_ID, host->node_id >> 6);
- out;
-
- case CSR_RESET_START:
- /* FIXME - perform command reset */
- out;
-
- /* address gap */
- return RCODE_ADDRESS_ERROR;
-
- case CSR_SPLIT_TIMEOUT_HI:
- host->csr.split_timeout_hi =
- be32_to_cpu(*(data++)) & 0x00000007;
- calculate_expire(&host->csr);
- out;
- case CSR_SPLIT_TIMEOUT_LO:
- host->csr.split_timeout_lo =
- be32_to_cpu(*(data++)) & 0xfff80000;
- calculate_expire(&host->csr);
- out;
-
- /* address gap */
- return RCODE_ADDRESS_ERROR;
-
- case CSR_CYCLE_TIME:
- /* should only be set by cycle start packet, automatically */
- host->csr.cycle_time = be32_to_cpu(*data);
- host->driver->devctl(host, SET_CYCLE_COUNTER,
- be32_to_cpu(*(data++)));
- out;
- case CSR_BUS_TIME:
- host->csr.bus_time = be32_to_cpu(*(data++)) & 0xffffff80;
- out;
-
- /* address gap */
- return RCODE_ADDRESS_ERROR;
-
- case CSR_BUSY_TIMEOUT:
- /* not yet implemented */
- return RCODE_ADDRESS_ERROR;
-
- case CSR_BUS_MANAGER_ID:
- case CSR_BANDWIDTH_AVAILABLE:
- case CSR_CHANNELS_AVAILABLE_HI:
- case CSR_CHANNELS_AVAILABLE_LO:
- /* these are not writable, only lockable */
- return RCODE_TYPE_ERROR;
-
- case CSR_BROADCAST_CHANNEL:
- /* only the valid bit can be written */
- host->csr.broadcast_channel = (host->csr.broadcast_channel & ~0x40000000)
- | (be32_to_cpu(*data) & 0x40000000);
- out;
-
- /* address gap to end - fall through */
- default:
- return RCODE_ADDRESS_ERROR;
- }
-
- return RCODE_COMPLETE;
-}
-
-#undef out
-
-
-static int lock_regs(struct hpsb_host *host, int nodeid, quadlet_t *store,
- u64 addr, quadlet_t data, quadlet_t arg, int extcode, u16 fl)
-{
- int csraddr = addr - CSR_REGISTER_BASE;
- unsigned long flags;
- quadlet_t *regptr = NULL;
-
- if (csraddr & 0x3)
- return RCODE_TYPE_ERROR;
-
- if (csraddr < CSR_BUS_MANAGER_ID || csraddr > CSR_CHANNELS_AVAILABLE_LO
- || extcode != EXTCODE_COMPARE_SWAP)
- goto unsupported_lockreq;
-
- data = be32_to_cpu(data);
- arg = be32_to_cpu(arg);
-
- /* Is somebody releasing the broadcast_channel on us? */
- if (csraddr == CSR_CHANNELS_AVAILABLE_HI && (data & 0x1)) {
- /* Note: this is may not be the right way to handle
- * the problem, so we should look into the proper way
- * eventually. */
- HPSB_WARN("Node [" NODE_BUS_FMT "] wants to release "
- "broadcast channel 31. Ignoring.",
- NODE_BUS_ARGS(host, nodeid));
-
- data &= ~0x1; /* keep broadcast channel allocated */
- }
-
- if (host->driver->hw_csr_reg) {
- quadlet_t old;
-
- old = host->driver->
- hw_csr_reg(host, (csraddr - CSR_BUS_MANAGER_ID) >> 2,
- data, arg);
-
- *store = cpu_to_be32(old);
- return RCODE_COMPLETE;
- }
-
- spin_lock_irqsave(&host->csr.lock, flags);
-
- switch (csraddr) {
- case CSR_BUS_MANAGER_ID:
- regptr = &host->csr.bus_manager_id;
- *store = cpu_to_be32(*regptr);
- if (*regptr == arg)
- *regptr = data;
- break;
-
- case CSR_BANDWIDTH_AVAILABLE:
- {
- quadlet_t bandwidth;
- quadlet_t old;
- quadlet_t new;
-
- regptr = &host->csr.bandwidth_available;
- old = *regptr;
-
- /* bandwidth available algorithm adapted from IEEE 1394a-2000 spec */
- if (arg > 0x1fff) {
- *store = cpu_to_be32(old); /* change nothing */
- break;
- }
- data &= 0x1fff;
- if (arg >= data) {
- /* allocate bandwidth */
- bandwidth = arg - data;
- if (old >= bandwidth) {
- new = old - bandwidth;
- *store = cpu_to_be32(arg);
- *regptr = new;
- } else {
- *store = cpu_to_be32(old);
- }
- } else {
- /* deallocate bandwidth */
- bandwidth = data - arg;
- if (old + bandwidth < 0x2000) {
- new = old + bandwidth;
- *store = cpu_to_be32(arg);
- *regptr = new;
- } else {
- *store = cpu_to_be32(old);
- }
- }
- break;
- }
-
- case CSR_CHANNELS_AVAILABLE_HI:
- {
- /* Lock algorithm for CHANNELS_AVAILABLE as recommended by 1394a-2000 */
- quadlet_t affected_channels = arg ^ data;
-
- regptr = &host->csr.channels_available_hi;
-
- if ((arg & affected_channels) == (*regptr & affected_channels)) {
- *regptr ^= affected_channels;
- *store = cpu_to_be32(arg);
- } else {
- *store = cpu_to_be32(*regptr);
- }
-
- break;
- }
-
- case CSR_CHANNELS_AVAILABLE_LO:
- {
- /* Lock algorithm for CHANNELS_AVAILABLE as recommended by 1394a-2000 */
- quadlet_t affected_channels = arg ^ data;
-
- regptr = &host->csr.channels_available_lo;
-
- if ((arg & affected_channels) == (*regptr & affected_channels)) {
- *regptr ^= affected_channels;
- *store = cpu_to_be32(arg);
- } else {
- *store = cpu_to_be32(*regptr);
- }
- break;
- }
- }
-
- spin_unlock_irqrestore(&host->csr.lock, flags);
-
- return RCODE_COMPLETE;
-
- unsupported_lockreq:
- switch (csraddr) {
- case CSR_STATE_CLEAR:
- case CSR_STATE_SET:
- case CSR_RESET_START:
- case CSR_NODE_IDS:
- case CSR_SPLIT_TIMEOUT_HI:
- case CSR_SPLIT_TIMEOUT_LO:
- case CSR_CYCLE_TIME:
- case CSR_BUS_TIME:
- case CSR_BROADCAST_CHANNEL:
- return RCODE_TYPE_ERROR;
-
- case CSR_BUSY_TIMEOUT:
- /* not yet implemented - fall through */
- default:
- return RCODE_ADDRESS_ERROR;
- }
-}
-
-static int lock64_regs(struct hpsb_host *host, int nodeid, octlet_t * store,
- u64 addr, octlet_t data, octlet_t arg, int extcode, u16 fl)
-{
- int csraddr = addr - CSR_REGISTER_BASE;
- unsigned long flags;
-
- data = be64_to_cpu(data);
- arg = be64_to_cpu(arg);
-
- if (csraddr & 0x3)
- return RCODE_TYPE_ERROR;
-
- if (csraddr != CSR_CHANNELS_AVAILABLE
- || extcode != EXTCODE_COMPARE_SWAP)
- goto unsupported_lock64req;
-
- /* Is somebody releasing the broadcast_channel on us? */
- if (csraddr == CSR_CHANNELS_AVAILABLE_HI && (data & 0x100000000ULL)) {
- /* Note: this is may not be the right way to handle
- * the problem, so we should look into the proper way
- * eventually. */
- HPSB_WARN("Node [" NODE_BUS_FMT "] wants to release "
- "broadcast channel 31. Ignoring.",
- NODE_BUS_ARGS(host, nodeid));
-
- data &= ~0x100000000ULL; /* keep broadcast channel allocated */
- }
-
- if (host->driver->hw_csr_reg) {
- quadlet_t data_hi, data_lo;
- quadlet_t arg_hi, arg_lo;
- quadlet_t old_hi, old_lo;
-
- data_hi = data >> 32;
- data_lo = data & 0xFFFFFFFF;
- arg_hi = arg >> 32;
- arg_lo = arg & 0xFFFFFFFF;
-
- old_hi = host->driver->hw_csr_reg(host, (csraddr - CSR_BUS_MANAGER_ID) >> 2,
- data_hi, arg_hi);
-
- old_lo = host->driver->hw_csr_reg(host, ((csraddr + 4) - CSR_BUS_MANAGER_ID) >> 2,
- data_lo, arg_lo);
-
- *store = cpu_to_be64(((octlet_t)old_hi << 32) | old_lo);
- } else {
- octlet_t old;
- octlet_t affected_channels = arg ^ data;
-
- spin_lock_irqsave(&host->csr.lock, flags);
-
- old = ((octlet_t)host->csr.channels_available_hi << 32) | host->csr.channels_available_lo;
-
- if ((arg & affected_channels) == (old & affected_channels)) {
- host->csr.channels_available_hi ^= (affected_channels >> 32);
- host->csr.channels_available_lo ^= (affected_channels & 0xffffffff);
- *store = cpu_to_be64(arg);
- } else {
- *store = cpu_to_be64(old);
- }
-
- spin_unlock_irqrestore(&host->csr.lock, flags);
- }
-
- /* Is somebody erroneously releasing the broadcast_channel on us? */
- if (host->csr.channels_available_hi & 0x1)
- host->csr.channels_available_hi &= ~0x1;
-
- return RCODE_COMPLETE;
-
- unsupported_lock64req:
- switch (csraddr) {
- case CSR_STATE_CLEAR:
- case CSR_STATE_SET:
- case CSR_RESET_START:
- case CSR_NODE_IDS:
- case CSR_SPLIT_TIMEOUT_HI:
- case CSR_SPLIT_TIMEOUT_LO:
- case CSR_CYCLE_TIME:
- case CSR_BUS_TIME:
- case CSR_BUS_MANAGER_ID:
- case CSR_BROADCAST_CHANNEL:
- case CSR_BUSY_TIMEOUT:
- case CSR_BANDWIDTH_AVAILABLE:
- return RCODE_TYPE_ERROR;
-
- default:
- return RCODE_ADDRESS_ERROR;
- }
-}
-
-static int write_fcp(struct hpsb_host *host, int nodeid, int dest,
- quadlet_t *data, u64 addr, size_t length, u16 flags)
-{
- int csraddr = addr - CSR_REGISTER_BASE;
-
- if (length > 512)
- return RCODE_TYPE_ERROR;
-
- switch (csraddr) {
- case CSR_FCP_COMMAND:
- highlevel_fcp_request(host, nodeid, 0, (u8 *)data, length);
- break;
- case CSR_FCP_RESPONSE:
- highlevel_fcp_request(host, nodeid, 1, (u8 *)data, length);
- break;
- default:
- return RCODE_TYPE_ERROR;
- }
-
- return RCODE_COMPLETE;
-}
-
-static int read_config_rom(struct hpsb_host *host, int nodeid, quadlet_t *buffer,
- u64 addr, size_t length, u16 fl)
-{
- u32 offset = addr - CSR1212_REGISTER_SPACE_BASE;
-
- if (csr1212_read(host->csr.rom, offset, buffer, length) == CSR1212_SUCCESS)
- return RCODE_COMPLETE;
- else
- return RCODE_ADDRESS_ERROR;
-}
-
-static u64 allocate_addr_range(u64 size, u32 alignment, void *__host)
-{
- struct hpsb_host *host = (struct hpsb_host*)__host;
-
- return hpsb_allocate_and_register_addrspace(&csr_highlevel,
- host,
- &config_rom_ops,
- size, alignment,
- CSR1212_UNITS_SPACE_BASE,
- CSR1212_UNITS_SPACE_END);
-}
-
-static void release_addr_range(u64 addr, void *__host)
-{
- struct hpsb_host *host = (struct hpsb_host*)__host;
- hpsb_unregister_addrspace(&csr_highlevel, host, addr);
-}
-
-
-int init_csr(void)
-{
- node_cap = csr1212_new_immediate(CSR1212_KV_ID_NODE_CAPABILITIES, 0x0083c0);
- if (!node_cap) {
- HPSB_ERR("Failed to allocate memory for Node Capabilties ConfigROM entry!");
- return -ENOMEM;
- }
-
- hpsb_register_highlevel(&csr_highlevel);
-
- return 0;
-}
-
-void cleanup_csr(void)
-{
- if (node_cap)
- csr1212_release_keyval(node_cap);
- hpsb_unregister_highlevel(&csr_highlevel);
-}
diff --git a/drivers/ieee1394/csr.h b/drivers/ieee1394/csr.h
deleted file mode 100644
index 90fb3f2192c3..000000000000
--- a/drivers/ieee1394/csr.h
+++ /dev/null
@@ -1,99 +0,0 @@
-#ifndef _IEEE1394_CSR_H
-#define _IEEE1394_CSR_H
-
-#include <linux/spinlock_types.h>
-
-#include "csr1212.h"
-#include "ieee1394_types.h"
-
-#define CSR_REGISTER_BASE 0xfffff0000000ULL
-
-/* register offsets relative to CSR_REGISTER_BASE */
-#define CSR_STATE_CLEAR 0x0
-#define CSR_STATE_SET 0x4
-#define CSR_NODE_IDS 0x8
-#define CSR_RESET_START 0xc
-#define CSR_SPLIT_TIMEOUT_HI 0x18
-#define CSR_SPLIT_TIMEOUT_LO 0x1c
-#define CSR_CYCLE_TIME 0x200
-#define CSR_BUS_TIME 0x204
-#define CSR_BUSY_TIMEOUT 0x210
-#define CSR_BUS_MANAGER_ID 0x21c
-#define CSR_BANDWIDTH_AVAILABLE 0x220
-#define CSR_CHANNELS_AVAILABLE 0x224
-#define CSR_CHANNELS_AVAILABLE_HI 0x224
-#define CSR_CHANNELS_AVAILABLE_LO 0x228
-#define CSR_BROADCAST_CHANNEL 0x234
-#define CSR_CONFIG_ROM 0x400
-#define CSR_CONFIG_ROM_END 0x800
-#define CSR_FCP_COMMAND 0xB00
-#define CSR_FCP_RESPONSE 0xD00
-#define CSR_FCP_END 0xF00
-#define CSR_TOPOLOGY_MAP 0x1000
-#define CSR_TOPOLOGY_MAP_END 0x1400
-#define CSR_SPEED_MAP 0x2000
-#define CSR_SPEED_MAP_END 0x3000
-
-/* IEEE 1394 bus specific Configuration ROM Key IDs */
-#define IEEE1394_KV_ID_POWER_REQUIREMENTS (0x30)
-
-/* IEEE 1394 Bus Information Block specifics */
-#define CSR_BUS_INFO_SIZE (5 * sizeof(quadlet_t))
-
-#define CSR_IRMC_SHIFT 31
-#define CSR_CMC_SHIFT 30
-#define CSR_ISC_SHIFT 29
-#define CSR_BMC_SHIFT 28
-#define CSR_PMC_SHIFT 27
-#define CSR_CYC_CLK_ACC_SHIFT 16
-#define CSR_MAX_REC_SHIFT 12
-#define CSR_MAX_ROM_SHIFT 8
-#define CSR_GENERATION_SHIFT 4
-
-static inline void csr_set_bus_info_generation(struct csr1212_csr *csr, u8 gen)
-{
- csr->bus_info_data[2] &= ~cpu_to_be32(0xf << CSR_GENERATION_SHIFT);
- csr->bus_info_data[2] |= cpu_to_be32((u32)gen << CSR_GENERATION_SHIFT);
-}
-
-struct csr_control {
- spinlock_t lock;
-
- quadlet_t state;
- quadlet_t node_ids;
- quadlet_t split_timeout_hi, split_timeout_lo;
- unsigned long expire; /* Calculated from split_timeout */
- quadlet_t cycle_time;
- quadlet_t bus_time;
- quadlet_t bus_manager_id;
- quadlet_t bandwidth_available;
- quadlet_t channels_available_hi, channels_available_lo;
- quadlet_t broadcast_channel;
-
- /* Bus Info */
- quadlet_t guid_hi, guid_lo;
- u8 cyc_clk_acc;
- u8 max_rec;
- u8 max_rom;
- u8 generation; /* Only use values between 0x2 and 0xf */
- u8 lnk_spd;
-
- unsigned long gen_timestamp[16];
-
- struct csr1212_csr *rom;
-
- quadlet_t topology_map[256];
- quadlet_t speed_map[1024];
-};
-
-extern struct csr1212_bus_ops csr_bus_ops;
-
-int init_csr(void);
-void cleanup_csr(void);
-
-/* hpsb_update_config_rom() is deprecated */
-struct hpsb_host;
-int hpsb_update_config_rom(struct hpsb_host *host, const quadlet_t *new_rom,
- size_t size, unsigned char rom_version);
-
-#endif /* _IEEE1394_CSR_H */
diff --git a/drivers/ieee1394/csr1212.c b/drivers/ieee1394/csr1212.c
deleted file mode 100644
index e76cac64c533..000000000000
--- a/drivers/ieee1394/csr1212.c
+++ /dev/null
@@ -1,1467 +0,0 @@
-/*
- * csr1212.c -- IEEE 1212 Control and Status Register support for Linux
- *
- * Copyright (C) 2003 Francois Retief <fgretief@sun.ac.za>
- * Steve Kinneberg <kinnebergsteve@acmsystems.com>
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * 3. The name of the author may not be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
- * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
- * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
- * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
- * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
- * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
- * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-
-/* TODO List:
- * - Verify interface consistency: i.e., public functions that take a size
- * parameter expect size to be in bytes.
- */
-
-#include <linux/errno.h>
-#include <linux/kernel.h>
-#include <linux/kmemcheck.h>
-#include <linux/string.h>
-#include <asm/bug.h>
-#include <asm/byteorder.h>
-
-#include "csr1212.h"
-
-
-/* Permitted key type for each key id */
-#define __I (1 << CSR1212_KV_TYPE_IMMEDIATE)
-#define __C (1 << CSR1212_KV_TYPE_CSR_OFFSET)
-#define __D (1 << CSR1212_KV_TYPE_DIRECTORY)
-#define __L (1 << CSR1212_KV_TYPE_LEAF)
-static const u8 csr1212_key_id_type_map[0x30] = {
- __C, /* used by Apple iSight */
- __D | __L, /* Descriptor */
- __I | __D | __L, /* Bus_Dependent_Info */
- __I | __D | __L, /* Vendor */
- __I, /* Hardware_Version */
- 0, 0, /* Reserved */
- __D | __L | __I, /* Module */
- __I, 0, 0, 0, /* used by Apple iSight, Reserved */
- __I, /* Node_Capabilities */
- __L, /* EUI_64 */
- 0, 0, 0, /* Reserved */
- __D, /* Unit */
- __I, /* Specifier_ID */
- __I, /* Version */
- __I | __C | __D | __L, /* Dependent_Info */
- __L, /* Unit_Location */
- 0, /* Reserved */
- __I, /* Model */
- __D, /* Instance */
- __L, /* Keyword */
- __D, /* Feature */
- __L, /* Extended_ROM */
- __I, /* Extended_Key_Specifier_ID */
- __I, /* Extended_Key */
- __I | __C | __D | __L, /* Extended_Data */
- __L, /* Modifiable_Descriptor */
- __I, /* Directory_ID */
- __I, /* Revision */
-};
-#undef __I
-#undef __C
-#undef __D
-#undef __L
-
-
-#define quads_to_bytes(_q) ((_q) * sizeof(u32))
-#define bytes_to_quads(_b) DIV_ROUND_UP(_b, sizeof(u32))
-
-static void free_keyval(struct csr1212_keyval *kv)
-{
- if ((kv->key.type == CSR1212_KV_TYPE_LEAF) &&
- (kv->key.id != CSR1212_KV_ID_EXTENDED_ROM))
- CSR1212_FREE(kv->value.leaf.data);
-
- CSR1212_FREE(kv);
-}
-
-static u16 csr1212_crc16(const u32 *buffer, size_t length)
-{
- int shift;
- u32 data;
- u16 sum, crc = 0;
-
- for (; length; length--) {
- data = be32_to_cpu(*buffer);
- buffer++;
- for (shift = 28; shift >= 0; shift -= 4 ) {
- sum = ((crc >> 12) ^ (data >> shift)) & 0xf;
- crc = (crc << 4) ^ (sum << 12) ^ (sum << 5) ^ (sum);
- }
- crc &= 0xffff;
- }
-
- return cpu_to_be16(crc);
-}
-
-/* Microsoft computes the CRC with the bytes in reverse order. */
-static u16 csr1212_msft_crc16(const u32 *buffer, size_t length)
-{
- int shift;
- u32 data;
- u16 sum, crc = 0;
-
- for (; length; length--) {
- data = le32_to_cpu(*buffer);
- buffer++;
- for (shift = 28; shift >= 0; shift -= 4 ) {
- sum = ((crc >> 12) ^ (data >> shift)) & 0xf;
- crc = (crc << 4) ^ (sum << 12) ^ (sum << 5) ^ (sum);
- }
- crc &= 0xffff;
- }
-
- return cpu_to_be16(crc);
-}
-
-static struct csr1212_dentry *
-csr1212_find_keyval(struct csr1212_keyval *dir, struct csr1212_keyval *kv)
-{
- struct csr1212_dentry *pos;
-
- for (pos = dir->value.directory.dentries_head;
- pos != NULL; pos = pos->next)
- if (pos->kv == kv)
- return pos;
- return NULL;
-}
-
-static struct csr1212_keyval *
-csr1212_find_keyval_offset(struct csr1212_keyval *kv_list, u32 offset)
-{
- struct csr1212_keyval *kv;
-
- for (kv = kv_list->next; kv && (kv != kv_list); kv = kv->next)
- if (kv->offset == offset)
- return kv;
- return NULL;
-}
-
-
-/* Creation Routines */
-
-struct csr1212_csr *csr1212_create_csr(struct csr1212_bus_ops *ops,
- size_t bus_info_size, void *private)
-{
- struct csr1212_csr *csr;
-
- csr = CSR1212_MALLOC(sizeof(*csr));
- if (!csr)
- return NULL;
-
- csr->cache_head =
- csr1212_rom_cache_malloc(CSR1212_CONFIG_ROM_SPACE_OFFSET,
- CSR1212_CONFIG_ROM_SPACE_SIZE);
- if (!csr->cache_head) {
- CSR1212_FREE(csr);
- return NULL;
- }
-
- /* The keyval key id is not used for the root node, but a valid key id
- * that can be used for a directory needs to be passed to
- * csr1212_new_directory(). */
- csr->root_kv = csr1212_new_directory(CSR1212_KV_ID_VENDOR);
- if (!csr->root_kv) {
- CSR1212_FREE(csr->cache_head);
- CSR1212_FREE(csr);
- return NULL;
- }
-
- csr->bus_info_data = csr->cache_head->data;
- csr->bus_info_len = bus_info_size;
- csr->crc_len = bus_info_size;
- csr->ops = ops;
- csr->private = private;
- csr->cache_tail = csr->cache_head;
-
- return csr;
-}
-
-void csr1212_init_local_csr(struct csr1212_csr *csr,
- const u32 *bus_info_data, int max_rom)
-{
- static const int mr_map[] = { 4, 64, 1024, 0 };
-
- BUG_ON(max_rom & ~0x3);
- csr->max_rom = mr_map[max_rom];
- memcpy(csr->bus_info_data, bus_info_data, csr->bus_info_len);
-}
-
-static struct csr1212_keyval *csr1212_new_keyval(u8 type, u8 key)
-{
- struct csr1212_keyval *kv;
-
- if (key < 0x30 && ((csr1212_key_id_type_map[key] & (1 << type)) == 0))
- return NULL;
-
- kv = CSR1212_MALLOC(sizeof(*kv));
- if (!kv)
- return NULL;
-
- atomic_set(&kv->refcnt, 1);
- kv->key.type = type;
- kv->key.id = key;
- kv->associate = NULL;
- kv->next = NULL;
- kv->prev = NULL;
- kv->offset = 0;
- kv->valid = 0;
- return kv;
-}
-
-struct csr1212_keyval *csr1212_new_immediate(u8 key, u32 value)
-{
- struct csr1212_keyval *kv;
-
- kv = csr1212_new_keyval(CSR1212_KV_TYPE_IMMEDIATE, key);
- if (!kv)
- return NULL;
-
- kv->value.immediate = value;
- kv->valid = 1;
- return kv;
-}
-
-static struct csr1212_keyval *
-csr1212_new_leaf(u8 key, const void *data, size_t data_len)
-{
- struct csr1212_keyval *kv;
-
- kv = csr1212_new_keyval(CSR1212_KV_TYPE_LEAF, key);
- if (!kv)
- return NULL;
-
- if (data_len > 0) {
- kv->value.leaf.data = CSR1212_MALLOC(data_len);
- if (!kv->value.leaf.data) {
- CSR1212_FREE(kv);
- return NULL;
- }
-
- if (data)
- memcpy(kv->value.leaf.data, data, data_len);
- } else {
- kv->value.leaf.data = NULL;
- }
-
- kv->value.leaf.len = bytes_to_quads(data_len);
- kv->offset = 0;
- kv->valid = 1;
-
- return kv;
-}
-
-static struct csr1212_keyval *
-csr1212_new_csr_offset(u8 key, u32 csr_offset)
-{
- struct csr1212_keyval *kv;
-
- kv = csr1212_new_keyval(CSR1212_KV_TYPE_CSR_OFFSET, key);
- if (!kv)
- return NULL;
-
- kv->value.csr_offset = csr_offset;
-
- kv->offset = 0;
- kv->valid = 1;
- return kv;
-}
-
-struct csr1212_keyval *csr1212_new_directory(u8 key)
-{
- struct csr1212_keyval *kv;
-
- kv = csr1212_new_keyval(CSR1212_KV_TYPE_DIRECTORY, key);
- if (!kv)
- return NULL;
-
- kv->value.directory.len = 0;
- kv->offset = 0;
- kv->value.directory.dentries_head = NULL;
- kv->value.directory.dentries_tail = NULL;
- kv->valid = 1;
- return kv;
-}
-
-void csr1212_associate_keyval(struct csr1212_keyval *kv,
- struct csr1212_keyval *associate)
-{
- BUG_ON(!kv || !associate || kv->key.id == CSR1212_KV_ID_DESCRIPTOR ||
- (associate->key.id != CSR1212_KV_ID_DESCRIPTOR &&
- associate->key.id != CSR1212_KV_ID_DEPENDENT_INFO &&
- associate->key.id != CSR1212_KV_ID_EXTENDED_KEY &&
- associate->key.id != CSR1212_KV_ID_EXTENDED_DATA &&
- associate->key.id < 0x30) ||
- (kv->key.id == CSR1212_KV_ID_EXTENDED_KEY_SPECIFIER_ID &&
- associate->key.id != CSR1212_KV_ID_EXTENDED_KEY) ||
- (kv->key.id == CSR1212_KV_ID_EXTENDED_KEY &&
- associate->key.id != CSR1212_KV_ID_EXTENDED_DATA) ||
- (associate->key.id == CSR1212_KV_ID_EXTENDED_KEY &&
- kv->key.id != CSR1212_KV_ID_EXTENDED_KEY_SPECIFIER_ID) ||
- (associate->key.id == CSR1212_KV_ID_EXTENDED_DATA &&
- kv->key.id != CSR1212_KV_ID_EXTENDED_KEY));
-
- if (kv->associate)
- csr1212_release_keyval(kv->associate);
-
- csr1212_keep_keyval(associate);
- kv->associate = associate;
-}
-
-static int __csr1212_attach_keyval_to_directory(struct csr1212_keyval *dir,
- struct csr1212_keyval *kv,
- bool keep_keyval)
-{
- struct csr1212_dentry *dentry;
-
- BUG_ON(!kv || !dir || dir->key.type != CSR1212_KV_TYPE_DIRECTORY);
-
- dentry = CSR1212_MALLOC(sizeof(*dentry));
- if (!dentry)
- return -ENOMEM;
-
- if (keep_keyval)
- csr1212_keep_keyval(kv);
- dentry->kv = kv;
-
- dentry->next = NULL;
- dentry->prev = dir->value.directory.dentries_tail;
-
- if (!dir->value.directory.dentries_head)
- dir->value.directory.dentries_head = dentry;
-
- if (dir->value.directory.dentries_tail)
- dir->value.directory.dentries_tail->next = dentry;
- dir->value.directory.dentries_tail = dentry;
-
- return CSR1212_SUCCESS;
-}
-
-int csr1212_attach_keyval_to_directory(struct csr1212_keyval *dir,
- struct csr1212_keyval *kv)
-{
- return __csr1212_attach_keyval_to_directory(dir, kv, true);
-}
-
-#define CSR1212_DESCRIPTOR_LEAF_DATA(kv) \
- (&((kv)->value.leaf.data[1]))
-
-#define CSR1212_DESCRIPTOR_LEAF_SET_TYPE(kv, type) \
- ((kv)->value.leaf.data[0] = \
- cpu_to_be32(CSR1212_DESCRIPTOR_LEAF_SPECIFIER_ID(kv) | \
- ((type) << CSR1212_DESCRIPTOR_LEAF_TYPE_SHIFT)))
-#define CSR1212_DESCRIPTOR_LEAF_SET_SPECIFIER_ID(kv, spec_id) \
- ((kv)->value.leaf.data[0] = \
- cpu_to_be32((CSR1212_DESCRIPTOR_LEAF_TYPE(kv) << \
- CSR1212_DESCRIPTOR_LEAF_TYPE_SHIFT) | \
- ((spec_id) & CSR1212_DESCRIPTOR_LEAF_SPECIFIER_ID_MASK)))
-
-static struct csr1212_keyval *
-csr1212_new_descriptor_leaf(u8 dtype, u32 specifier_id,
- const void *data, size_t data_len)
-{
- struct csr1212_keyval *kv;
-
- kv = csr1212_new_leaf(CSR1212_KV_ID_DESCRIPTOR, NULL,
- data_len + CSR1212_DESCRIPTOR_LEAF_OVERHEAD);
- if (!kv)
- return NULL;
-
- kmemcheck_annotate_variable(kv->value.leaf.data[0]);
- CSR1212_DESCRIPTOR_LEAF_SET_TYPE(kv, dtype);
- CSR1212_DESCRIPTOR_LEAF_SET_SPECIFIER_ID(kv, specifier_id);
-
- if (data)
- memcpy(CSR1212_DESCRIPTOR_LEAF_DATA(kv), data, data_len);
-
- return kv;
-}
-
-/* Check if string conforms to minimal ASCII as per IEEE 1212 clause 7.4 */
-static int csr1212_check_minimal_ascii(const char *s)
-{
- static const char minimal_ascii_table[] = {
- /* 1 2 4 8 16 32 64 128 */
- 128, /* --, --, --, --, --, --, --, 07, */
- 4 + 16 + 32, /* --, --, 0a, --, 0C, 0D, --, --, */
- 0, /* --, --, --, --, --, --, --, --, */
- 0, /* --, --, --, --, --, --, --, --, */
- 255 - 8 - 16, /* 20, 21, 22, --, --, 25, 26, 27, */
- 255, /* 28, 29, 2a, 2b, 2c, 2d, 2e, 2f, */
- 255, /* 30, 31, 32, 33, 34, 35, 36, 37, */
- 255, /* 38, 39, 3a, 3b, 3c, 3d, 3e, 3f, */
- 255, /* 40, 41, 42, 43, 44, 45, 46, 47, */
- 255, /* 48, 49, 4a, 4b, 4c, 4d, 4e, 4f, */
- 255, /* 50, 51, 52, 53, 54, 55, 56, 57, */
- 1 + 2 + 4 + 128, /* 58, 59, 5a, --, --, --, --, 5f, */
- 255 - 1, /* --, 61, 62, 63, 64, 65, 66, 67, */
- 255, /* 68, 69, 6a, 6b, 6c, 6d, 6e, 6f, */
- 255, /* 70, 71, 72, 73, 74, 75, 76, 77, */
- 1 + 2 + 4, /* 78, 79, 7a, --, --, --, --, --, */
- };
- int i, j;
-
- for (; *s; s++) {
- i = *s >> 3; /* i = *s / 8; */
- j = 1 << (*s & 3); /* j = 1 << (*s % 8); */
-
- if (i >= ARRAY_SIZE(minimal_ascii_table) ||
- !(minimal_ascii_table[i] & j))
- return -EINVAL;
- }
- return 0;
-}
-
-/* IEEE 1212 clause 7.5.4.1 textual descriptors (English, minimal ASCII) */
-struct csr1212_keyval *csr1212_new_string_descriptor_leaf(const char *s)
-{
- struct csr1212_keyval *kv;
- u32 *text;
- size_t str_len, quads;
-
- if (!s || !*s || csr1212_check_minimal_ascii(s))
- return NULL;
-
- str_len = strlen(s);
- quads = bytes_to_quads(str_len);
- kv = csr1212_new_descriptor_leaf(0, 0, NULL, quads_to_bytes(quads) +
- CSR1212_TEXTUAL_DESCRIPTOR_LEAF_OVERHEAD);
- if (!kv)
- return NULL;
-
- kv->value.leaf.data[1] = 0; /* width, character_set, language */
- text = CSR1212_TEXTUAL_DESCRIPTOR_LEAF_DATA(kv);
- text[quads - 1] = 0; /* padding */
- memcpy(text, s, str_len);
-
- return kv;
-}
-
-
-/* Destruction Routines */
-
-void csr1212_detach_keyval_from_directory(struct csr1212_keyval *dir,
- struct csr1212_keyval *kv)
-{
- struct csr1212_dentry *dentry;
-
- if (!kv || !dir || dir->key.type != CSR1212_KV_TYPE_DIRECTORY)
- return;
-
- dentry = csr1212_find_keyval(dir, kv);
-
- if (!dentry)
- return;
-
- if (dentry->prev)
- dentry->prev->next = dentry->next;
- if (dentry->next)
- dentry->next->prev = dentry->prev;
- if (dir->value.directory.dentries_head == dentry)
- dir->value.directory.dentries_head = dentry->next;
- if (dir->value.directory.dentries_tail == dentry)
- dir->value.directory.dentries_tail = dentry->prev;
-
- CSR1212_FREE(dentry);
-
- csr1212_release_keyval(kv);
-}
-
-/* This function is used to free the memory taken by a keyval. If the given
- * keyval is a directory type, then any keyvals contained in that directory
- * will be destroyed as well if noone holds a reference on them. By means of
- * list manipulation, this routine will descend a directory structure in a
- * non-recursive manner. */
-void csr1212_release_keyval(struct csr1212_keyval *kv)
-{
- struct csr1212_keyval *k, *a;
- struct csr1212_dentry dentry;
- struct csr1212_dentry *head, *tail;
-
- if (!atomic_dec_and_test(&kv->refcnt))
- return;
-
- dentry.kv = kv;
- dentry.next = NULL;
- dentry.prev = NULL;
-
- head = &dentry;
- tail = head;
-
- while (head) {
- k = head->kv;
-
- while (k) {
- /* must not dec_and_test kv->refcnt again */
- if (k != kv && !atomic_dec_and_test(&k->refcnt))
- break;
-
- a = k->associate;
-
- if (k->key.type == CSR1212_KV_TYPE_DIRECTORY) {
- /* If the current entry is a directory, move all
- * the entries to the destruction list. */
- if (k->value.directory.dentries_head) {
- tail->next =
- k->value.directory.dentries_head;
- k->value.directory.dentries_head->prev =
- tail;
- tail = k->value.directory.dentries_tail;
- }
- }
- free_keyval(k);
- k = a;
- }
-
- head = head->next;
- if (head) {
- if (head->prev && head->prev != &dentry)
- CSR1212_FREE(head->prev);
- head->prev = NULL;
- } else if (tail != &dentry) {
- CSR1212_FREE(tail);
- }
- }
-}
-
-void csr1212_destroy_csr(struct csr1212_csr *csr)
-{
- struct csr1212_csr_rom_cache *c, *oc;
- struct csr1212_cache_region *cr, *ocr;
-
- csr1212_release_keyval(csr->root_kv);
-
- c = csr->cache_head;
- while (c) {
- oc = c;
- cr = c->filled_head;
- while (cr) {
- ocr = cr;
- cr = cr->next;
- CSR1212_FREE(ocr);
- }
- c = c->next;
- CSR1212_FREE(oc);
- }
-
- CSR1212_FREE(csr);
-}
-
-
-/* CSR Image Creation */
-
-static int csr1212_append_new_cache(struct csr1212_csr *csr, size_t romsize)
-{
- struct csr1212_csr_rom_cache *cache;
- u64 csr_addr;
-
- BUG_ON(!csr || !csr->ops || !csr->ops->allocate_addr_range ||
- !csr->ops->release_addr || csr->max_rom < 1);
-
- /* ROM size must be a multiple of csr->max_rom */
- romsize = (romsize + (csr->max_rom - 1)) & ~(csr->max_rom - 1);
-
- csr_addr = csr->ops->allocate_addr_range(romsize, csr->max_rom,
- csr->private);
- if (csr_addr == CSR1212_INVALID_ADDR_SPACE)
- return -ENOMEM;
-
- if (csr_addr < CSR1212_REGISTER_SPACE_BASE) {
- /* Invalid address returned from allocate_addr_range(). */
- csr->ops->release_addr(csr_addr, csr->private);
- return -ENOMEM;
- }
-
- cache = csr1212_rom_cache_malloc(csr_addr - CSR1212_REGISTER_SPACE_BASE,
- romsize);
- if (!cache) {
- csr->ops->release_addr(csr_addr, csr->private);
- return -ENOMEM;
- }
-
- cache->ext_rom = csr1212_new_keyval(CSR1212_KV_TYPE_LEAF,
- CSR1212_KV_ID_EXTENDED_ROM);
- if (!cache->ext_rom) {
- csr->ops->release_addr(csr_addr, csr->private);
- CSR1212_FREE(cache);
- return -ENOMEM;
- }
-
- if (csr1212_attach_keyval_to_directory(csr->root_kv, cache->ext_rom) !=
- CSR1212_SUCCESS) {
- csr1212_release_keyval(cache->ext_rom);
- csr->ops->release_addr(csr_addr, csr->private);
- CSR1212_FREE(cache);
- return -ENOMEM;
- }
- cache->ext_rom->offset = csr_addr - CSR1212_REGISTER_SPACE_BASE;
- cache->ext_rom->value.leaf.len = -1;
- cache->ext_rom->value.leaf.data = cache->data;
-
- /* Add cache to tail of cache list */
- cache->prev = csr->cache_tail;
- csr->cache_tail->next = cache;
- csr->cache_tail = cache;
- return CSR1212_SUCCESS;
-}
-
-static void csr1212_remove_cache(struct csr1212_csr *csr,
- struct csr1212_csr_rom_cache *cache)
-{
- if (csr->cache_head == cache)
- csr->cache_head = cache->next;
- if (csr->cache_tail == cache)
- csr->cache_tail = cache->prev;
-
- if (cache->prev)
- cache->prev->next = cache->next;
- if (cache->next)
- cache->next->prev = cache->prev;
-
- if (cache->ext_rom) {
- csr1212_detach_keyval_from_directory(csr->root_kv,
- cache->ext_rom);
- csr1212_release_keyval(cache->ext_rom);
- }
-
- CSR1212_FREE(cache);
-}
-
-static int csr1212_generate_layout_subdir(struct csr1212_keyval *dir,
- struct csr1212_keyval **layout_tail)
-{
- struct csr1212_dentry *dentry;
- struct csr1212_keyval *dkv;
- struct csr1212_keyval *last_extkey_spec = NULL;
- struct csr1212_keyval *last_extkey = NULL;
- int num_entries = 0;
-
- for (dentry = dir->value.directory.dentries_head; dentry;
- dentry = dentry->next) {
- for (dkv = dentry->kv; dkv; dkv = dkv->associate) {
- /* Special Case: Extended Key Specifier_ID */
- if (dkv->key.id ==
- CSR1212_KV_ID_EXTENDED_KEY_SPECIFIER_ID) {
- if (last_extkey_spec == NULL)
- last_extkey_spec = dkv;
- else if (dkv->value.immediate !=
- last_extkey_spec->value.immediate)
- last_extkey_spec = dkv;
- else
- continue;
- /* Special Case: Extended Key */
- } else if (dkv->key.id == CSR1212_KV_ID_EXTENDED_KEY) {
- if (last_extkey == NULL)
- last_extkey = dkv;
- else if (dkv->value.immediate !=
- last_extkey->value.immediate)
- last_extkey = dkv;
- else
- continue;
- }
-
- num_entries += 1;
-
- switch (dkv->key.type) {
- default:
- case CSR1212_KV_TYPE_IMMEDIATE:
- case CSR1212_KV_TYPE_CSR_OFFSET:
- break;
- case CSR1212_KV_TYPE_LEAF:
- case CSR1212_KV_TYPE_DIRECTORY:
- /* Remove from list */
- if (dkv->prev && (dkv->prev->next == dkv))
- dkv->prev->next = dkv->next;
- if (dkv->next && (dkv->next->prev == dkv))
- dkv->next->prev = dkv->prev;
- //if (dkv == *layout_tail)
- // *layout_tail = dkv->prev;
-
- /* Special case: Extended ROM leafs */
- if (dkv->key.id == CSR1212_KV_ID_EXTENDED_ROM) {
- dkv->value.leaf.len = -1;
- /* Don't add Extended ROM leafs in the
- * layout list, they are handled
- * differently. */
- break;
- }
-
- /* Add to tail of list */
- dkv->next = NULL;
- dkv->prev = *layout_tail;
- (*layout_tail)->next = dkv;
- *layout_tail = dkv;
- break;
- }
- }
- }
- return num_entries;
-}
-
-static size_t csr1212_generate_layout_order(struct csr1212_keyval *kv)
-{
- struct csr1212_keyval *ltail = kv;
- size_t agg_size = 0;
-
- while (kv) {
- switch (kv->key.type) {
- case CSR1212_KV_TYPE_LEAF:
- /* Add 1 quadlet for crc/len field */
- agg_size += kv->value.leaf.len + 1;
- break;
-
- case CSR1212_KV_TYPE_DIRECTORY:
- kv->value.directory.len =
- csr1212_generate_layout_subdir(kv, &ltail);
- /* Add 1 quadlet for crc/len field */
- agg_size += kv->value.directory.len + 1;
- break;
- }
- kv = kv->next;
- }
- return quads_to_bytes(agg_size);
-}
-
-static struct csr1212_keyval *
-csr1212_generate_positions(struct csr1212_csr_rom_cache *cache,
- struct csr1212_keyval *start_kv, int start_pos)
-{
- struct csr1212_keyval *kv = start_kv;
- struct csr1212_keyval *okv = start_kv;
- int pos = start_pos;
- int kv_len = 0, okv_len = 0;
-
- cache->layout_head = kv;
-
- while (kv && pos < cache->size) {
- /* Special case: Extended ROM leafs */
- if (kv->key.id != CSR1212_KV_ID_EXTENDED_ROM)
- kv->offset = cache->offset + pos;
-
- switch (kv->key.type) {
- case CSR1212_KV_TYPE_LEAF:
- kv_len = kv->value.leaf.len;
- break;
-
- case CSR1212_KV_TYPE_DIRECTORY:
- kv_len = kv->value.directory.len;
- break;
-
- default:
- /* Should never get here */
- WARN_ON(1);
- break;
- }
-
- pos += quads_to_bytes(kv_len + 1);
-
- if (pos <= cache->size) {
- okv = kv;
- okv_len = kv_len;
- kv = kv->next;
- }
- }
-
- cache->layout_tail = okv;
- cache->len = okv->offset - cache->offset + quads_to_bytes(okv_len + 1);
-
- return kv;
-}
-
-#define CSR1212_KV_KEY_SHIFT 24
-#define CSR1212_KV_KEY_TYPE_SHIFT 6
-#define CSR1212_KV_KEY_ID_MASK 0x3f
-#define CSR1212_KV_KEY_TYPE_MASK 0x3 /* after shift */
-
-static void
-csr1212_generate_tree_subdir(struct csr1212_keyval *dir, u32 *data_buffer)
-{
- struct csr1212_dentry *dentry;
- struct csr1212_keyval *last_extkey_spec = NULL;
- struct csr1212_keyval *last_extkey = NULL;
- int index = 0;
-
- for (dentry = dir->value.directory.dentries_head;
- dentry;
- dentry = dentry->next) {
- struct csr1212_keyval *a;
-
- for (a = dentry->kv; a; a = a->associate) {
- u32 value = 0;
-
- /* Special Case: Extended Key Specifier_ID */
- if (a->key.id ==
- CSR1212_KV_ID_EXTENDED_KEY_SPECIFIER_ID) {
- if (last_extkey_spec == NULL)
- last_extkey_spec = a;
- else if (a->value.immediate !=
- last_extkey_spec->value.immediate)
- last_extkey_spec = a;
- else
- continue;
-
- /* Special Case: Extended Key */
- } else if (a->key.id == CSR1212_KV_ID_EXTENDED_KEY) {
- if (last_extkey == NULL)
- last_extkey = a;
- else if (a->value.immediate !=
- last_extkey->value.immediate)
- last_extkey = a;
- else
- continue;
- }
-
- switch (a->key.type) {
- case CSR1212_KV_TYPE_IMMEDIATE:
- value = a->value.immediate;
- break;
- case CSR1212_KV_TYPE_CSR_OFFSET:
- value = a->value.csr_offset;
- break;
- case CSR1212_KV_TYPE_LEAF:
- value = a->offset;
- value -= dir->offset + quads_to_bytes(1+index);
- value = bytes_to_quads(value);
- break;
- case CSR1212_KV_TYPE_DIRECTORY:
- value = a->offset;
- value -= dir->offset + quads_to_bytes(1+index);
- value = bytes_to_quads(value);
- break;
- default:
- /* Should never get here */
- WARN_ON(1);
- break;
- }
-
- value |= (a->key.id & CSR1212_KV_KEY_ID_MASK) <<
- CSR1212_KV_KEY_SHIFT;
- value |= (a->key.type & CSR1212_KV_KEY_TYPE_MASK) <<
- (CSR1212_KV_KEY_SHIFT +
- CSR1212_KV_KEY_TYPE_SHIFT);
- data_buffer[index] = cpu_to_be32(value);
- index++;
- }
- }
-}
-
-struct csr1212_keyval_img {
- u16 length;
- u16 crc;
-
- /* Must be last */
- u32 data[0]; /* older gcc can't handle [] which is standard */
-};
-
-static void csr1212_fill_cache(struct csr1212_csr_rom_cache *cache)
-{
- struct csr1212_keyval *kv, *nkv;
- struct csr1212_keyval_img *kvi;
-
- for (kv = cache->layout_head;
- kv != cache->layout_tail->next;
- kv = nkv) {
- kvi = (struct csr1212_keyval_img *)(cache->data +
- bytes_to_quads(kv->offset - cache->offset));
- switch (kv->key.type) {
- default:
- case CSR1212_KV_TYPE_IMMEDIATE:
- case CSR1212_KV_TYPE_CSR_OFFSET:
- /* Should never get here */
- WARN_ON(1);
- break;
-
- case CSR1212_KV_TYPE_LEAF:
- /* Don't copy over Extended ROM areas, they are
- * already filled out! */
- if (kv->key.id != CSR1212_KV_ID_EXTENDED_ROM)
- memcpy(kvi->data, kv->value.leaf.data,
- quads_to_bytes(kv->value.leaf.len));
-
- kvi->length = cpu_to_be16(kv->value.leaf.len);
- kvi->crc = csr1212_crc16(kvi->data, kv->value.leaf.len);
- break;
-
- case CSR1212_KV_TYPE_DIRECTORY:
- csr1212_generate_tree_subdir(kv, kvi->data);
-
- kvi->length = cpu_to_be16(kv->value.directory.len);
- kvi->crc = csr1212_crc16(kvi->data,
- kv->value.directory.len);
- break;
- }
-
- nkv = kv->next;
- if (kv->prev)
- kv->prev->next = NULL;
- if (kv->next)
- kv->next->prev = NULL;
- kv->prev = NULL;
- kv->next = NULL;
- }
-}
-
-/* This size is arbitrarily chosen.
- * The struct overhead is subtracted for more economic allocations. */
-#define CSR1212_EXTENDED_ROM_SIZE (2048 - sizeof(struct csr1212_csr_rom_cache))
-
-int csr1212_generate_csr_image(struct csr1212_csr *csr)
-{
- struct csr1212_bus_info_block_img *bi;
- struct csr1212_csr_rom_cache *cache;
- struct csr1212_keyval *kv;
- size_t agg_size;
- int ret;
- int init_offset;
-
- BUG_ON(!csr);
-
- cache = csr->cache_head;
-
- bi = (struct csr1212_bus_info_block_img*)cache->data;
-
- bi->length = bytes_to_quads(csr->bus_info_len) - 1;
- bi->crc_length = bi->length;
- bi->crc = csr1212_crc16(bi->data, bi->crc_length);
-
- csr->root_kv->next = NULL;
- csr->root_kv->prev = NULL;
-
- agg_size = csr1212_generate_layout_order(csr->root_kv);
-
- init_offset = csr->bus_info_len;
-
- for (kv = csr->root_kv, cache = csr->cache_head;
- kv;
- cache = cache->next) {
- if (!cache) {
- /* Estimate approximate number of additional cache
- * regions needed (it assumes that the cache holding
- * the first 1K Config ROM space always exists). */
- int est_c = agg_size / (CSR1212_EXTENDED_ROM_SIZE -
- (2 * sizeof(u32))) + 1;
-
- /* Add additional cache regions, extras will be
- * removed later */
- for (; est_c; est_c--) {
- ret = csr1212_append_new_cache(csr,
- CSR1212_EXTENDED_ROM_SIZE);
- if (ret != CSR1212_SUCCESS)
- return ret;
- }
- /* Need to re-layout for additional cache regions */
- agg_size = csr1212_generate_layout_order(csr->root_kv);
- kv = csr->root_kv;
- cache = csr->cache_head;
- init_offset = csr->bus_info_len;
- }
- kv = csr1212_generate_positions(cache, kv, init_offset);
- agg_size -= cache->len;
- init_offset = sizeof(u32);
- }
-
- /* Remove unused, excess cache regions */
- while (cache) {
- struct csr1212_csr_rom_cache *oc = cache;
-
- cache = cache->next;
- csr1212_remove_cache(csr, oc);
- }
-
- /* Go through the list backward so that when done, the correct CRC
- * will be calculated for the Extended ROM areas. */
- for (cache = csr->cache_tail; cache; cache = cache->prev) {
- /* Only Extended ROM caches should have this set. */
- if (cache->ext_rom) {
- int leaf_size;
-
- /* Make sure the Extended ROM leaf is a multiple of
- * max_rom in size. */
- BUG_ON(csr->max_rom < 1);
- leaf_size = (cache->len + (csr->max_rom - 1)) &
- ~(csr->max_rom - 1);
-
- /* Zero out the unused ROM region */
- memset(cache->data + bytes_to_quads(cache->len), 0x00,
- leaf_size - cache->len);
-
- /* Subtract leaf header */
- leaf_size -= sizeof(u32);
-
- /* Update the Extended ROM leaf length */
- cache->ext_rom->value.leaf.len =
- bytes_to_quads(leaf_size);
- } else {
- /* Zero out the unused ROM region */
- memset(cache->data + bytes_to_quads(cache->len), 0x00,
- cache->size - cache->len);
- }
-
- /* Copy the data into the cache buffer */
- csr1212_fill_cache(cache);
-
- if (cache != csr->cache_head) {
- /* Set the length and CRC of the extended ROM. */
- struct csr1212_keyval_img *kvi =
- (struct csr1212_keyval_img*)cache->data;
- u16 len = bytes_to_quads(cache->len) - 1;
-
- kvi->length = cpu_to_be16(len);
- kvi->crc = csr1212_crc16(kvi->data, len);
- }
- }
-
- return CSR1212_SUCCESS;
-}
-
-int csr1212_read(struct csr1212_csr *csr, u32 offset, void *buffer, u32 len)
-{
- struct csr1212_csr_rom_cache *cache;
-
- for (cache = csr->cache_head; cache; cache = cache->next)
- if (offset >= cache->offset &&
- (offset + len) <= (cache->offset + cache->size)) {
- memcpy(buffer, &cache->data[
- bytes_to_quads(offset - cache->offset)],
- len);
- return CSR1212_SUCCESS;
- }
-
- return -ENOENT;
-}
-
-/*
- * Apparently there are many different wrong implementations of the CRC
- * algorithm. We don't fail, we just warn... approximately once per GUID.
- */
-static void
-csr1212_check_crc(const u32 *buffer, size_t length, u16 crc, __be32 *guid)
-{
- static u64 last_bad_eui64;
- u64 eui64 = ((u64)be32_to_cpu(guid[0]) << 32) | be32_to_cpu(guid[1]);
-
- if (csr1212_crc16(buffer, length) == crc ||
- csr1212_msft_crc16(buffer, length) == crc ||
- eui64 == last_bad_eui64)
- return;
-
- printk(KERN_DEBUG "ieee1394: config ROM CRC error\n");
- last_bad_eui64 = eui64;
-}
-
-/* Parse a chunk of data as a Config ROM */
-
-static int csr1212_parse_bus_info_block(struct csr1212_csr *csr)
-{
- struct csr1212_bus_info_block_img *bi;
- struct csr1212_cache_region *cr;
- int i;
- int ret;
-
- for (i = 0; i < csr->bus_info_len; i += sizeof(u32)) {
- ret = csr->ops->bus_read(csr, CSR1212_CONFIG_ROM_SPACE_BASE + i,
- &csr->cache_head->data[bytes_to_quads(i)],
- csr->private);
- if (ret != CSR1212_SUCCESS)
- return ret;
-
- /* check ROM header's info_length */
- if (i == 0 &&
- be32_to_cpu(csr->cache_head->data[0]) >> 24 !=
- bytes_to_quads(csr->bus_info_len) - 1)
- return -EINVAL;
- }
-
- bi = (struct csr1212_bus_info_block_img*)csr->cache_head->data;
- csr->crc_len = quads_to_bytes(bi->crc_length);
-
- /* IEEE 1212 recommends that crc_len be equal to bus_info_len, but that
- * is not always the case, so read the rest of the crc area 1 quadlet at
- * a time. */
- for (i = csr->bus_info_len; i <= csr->crc_len; i += sizeof(u32)) {
- ret = csr->ops->bus_read(csr, CSR1212_CONFIG_ROM_SPACE_BASE + i,
- &csr->cache_head->data[bytes_to_quads(i)],
- csr->private);
- if (ret != CSR1212_SUCCESS)
- return ret;
- }
-
- csr1212_check_crc(bi->data, bi->crc_length, bi->crc,
- &csr->bus_info_data[3]);
-
- cr = CSR1212_MALLOC(sizeof(*cr));
- if (!cr)
- return -ENOMEM;
-
- cr->next = NULL;
- cr->prev = NULL;
- cr->offset_start = 0;
- cr->offset_end = csr->crc_len + 4;
-
- csr->cache_head->filled_head = cr;
- csr->cache_head->filled_tail = cr;
-
- return CSR1212_SUCCESS;
-}
-
-#define CSR1212_KV_KEY(q) (be32_to_cpu(q) >> CSR1212_KV_KEY_SHIFT)
-#define CSR1212_KV_KEY_TYPE(q) (CSR1212_KV_KEY(q) >> CSR1212_KV_KEY_TYPE_SHIFT)
-#define CSR1212_KV_KEY_ID(q) (CSR1212_KV_KEY(q) & CSR1212_KV_KEY_ID_MASK)
-#define CSR1212_KV_VAL_MASK 0xffffff
-#define CSR1212_KV_VAL(q) (be32_to_cpu(q) & CSR1212_KV_VAL_MASK)
-
-static int
-csr1212_parse_dir_entry(struct csr1212_keyval *dir, u32 ki, u32 kv_pos)
-{
- int ret = CSR1212_SUCCESS;
- struct csr1212_keyval *k = NULL;
- u32 offset;
- bool keep_keyval = true;
-
- switch (CSR1212_KV_KEY_TYPE(ki)) {
- case CSR1212_KV_TYPE_IMMEDIATE:
- k = csr1212_new_immediate(CSR1212_KV_KEY_ID(ki),
- CSR1212_KV_VAL(ki));
- if (!k) {
- ret = -ENOMEM;
- goto out;
- }
- /* Don't keep local reference when parsing. */
- keep_keyval = false;
- break;
-
- case CSR1212_KV_TYPE_CSR_OFFSET:
- k = csr1212_new_csr_offset(CSR1212_KV_KEY_ID(ki),
- CSR1212_KV_VAL(ki));
- if (!k) {
- ret = -ENOMEM;
- goto out;
- }
- /* Don't keep local reference when parsing. */
- keep_keyval = false;
- break;
-
- default:
- /* Compute the offset from 0xffff f000 0000. */
- offset = quads_to_bytes(CSR1212_KV_VAL(ki)) + kv_pos;
- if (offset == kv_pos) {
- /* Uh-oh. Can't have a relative offset of 0 for Leaves
- * or Directories. The Config ROM image is most likely
- * messed up, so we'll just abort here. */
- ret = -EIO;
- goto out;
- }
-
- k = csr1212_find_keyval_offset(dir, offset);
-
- if (k)
- break; /* Found it. */
-
- if (CSR1212_KV_KEY_TYPE(ki) == CSR1212_KV_TYPE_DIRECTORY)
- k = csr1212_new_directory(CSR1212_KV_KEY_ID(ki));
- else
- k = csr1212_new_leaf(CSR1212_KV_KEY_ID(ki), NULL, 0);
-
- if (!k) {
- ret = -ENOMEM;
- goto out;
- }
- /* Don't keep local reference when parsing. */
- keep_keyval = false;
- /* Contents not read yet so it's not valid. */
- k->valid = 0;
- k->offset = offset;
-
- k->prev = dir;
- k->next = dir->next;
- dir->next->prev = k;
- dir->next = k;
- }
- ret = __csr1212_attach_keyval_to_directory(dir, k, keep_keyval);
-out:
- if (ret != CSR1212_SUCCESS && k != NULL)
- free_keyval(k);
- return ret;
-}
-
-int csr1212_parse_keyval(struct csr1212_keyval *kv,
- struct csr1212_csr_rom_cache *cache)
-{
- struct csr1212_keyval_img *kvi;
- int i;
- int ret = CSR1212_SUCCESS;
- int kvi_len;
-
- kvi = (struct csr1212_keyval_img*)
- &cache->data[bytes_to_quads(kv->offset - cache->offset)];
- kvi_len = be16_to_cpu(kvi->length);
-
- /* GUID is wrong in here in case of extended ROM. We don't care. */
- csr1212_check_crc(kvi->data, kvi_len, kvi->crc, &cache->data[3]);
-
- switch (kv->key.type) {
- case CSR1212_KV_TYPE_DIRECTORY:
- for (i = 0; i < kvi_len; i++) {
- u32 ki = kvi->data[i];
-
- /* Some devices put null entries in their unit
- * directories. If we come across such an entry,
- * then skip it. */
- if (ki == 0x0)
- continue;
- ret = csr1212_parse_dir_entry(kv, ki,
- kv->offset + quads_to_bytes(i + 1));
- }
- kv->value.directory.len = kvi_len;
- break;
-
- case CSR1212_KV_TYPE_LEAF:
- if (kv->key.id != CSR1212_KV_ID_EXTENDED_ROM) {
- size_t size = quads_to_bytes(kvi_len);
-
- kv->value.leaf.data = CSR1212_MALLOC(size);
- if (!kv->value.leaf.data) {
- ret = -ENOMEM;
- goto out;
- }
-
- kv->value.leaf.len = kvi_len;
- memcpy(kv->value.leaf.data, kvi->data, size);
- }
- break;
- }
-
- kv->valid = 1;
-out:
- return ret;
-}
-
-static int
-csr1212_read_keyval(struct csr1212_csr *csr, struct csr1212_keyval *kv)
-{
- struct csr1212_cache_region *cr, *ncr, *newcr = NULL;
- struct csr1212_keyval_img *kvi = NULL;
- struct csr1212_csr_rom_cache *cache;
- int cache_index;
- u64 addr;
- u32 *cache_ptr;
- u16 kv_len = 0;
-
- BUG_ON(!csr || !kv || csr->max_rom < 1);
-
- /* First find which cache the data should be in (or go in if not read
- * yet). */
- for (cache = csr->cache_head; cache; cache = cache->next)
- if (kv->offset >= cache->offset &&
- kv->offset < (cache->offset + cache->size))
- break;
-
- if (!cache) {
- u32 q, cache_size;
-
- /* Only create a new cache for Extended ROM leaves. */
- if (kv->key.id != CSR1212_KV_ID_EXTENDED_ROM)
- return -EINVAL;
-
- if (csr->ops->bus_read(csr,
- CSR1212_REGISTER_SPACE_BASE + kv->offset,
- &q, csr->private))
- return -EIO;
-
- kv->value.leaf.len = be32_to_cpu(q) >> 16;
-
- cache_size = (quads_to_bytes(kv->value.leaf.len + 1) +
- (csr->max_rom - 1)) & ~(csr->max_rom - 1);
-
- cache = csr1212_rom_cache_malloc(kv->offset, cache_size);
- if (!cache)
- return -ENOMEM;
-
- kv->value.leaf.data = &cache->data[1];
- csr->cache_tail->next = cache;
- cache->prev = csr->cache_tail;
- cache->next = NULL;
- csr->cache_tail = cache;
- cache->filled_head =
- CSR1212_MALLOC(sizeof(*cache->filled_head));
- if (!cache->filled_head)
- return -ENOMEM;
-
- cache->filled_head->offset_start = 0;
- cache->filled_head->offset_end = sizeof(u32);
- cache->filled_tail = cache->filled_head;
- cache->filled_head->next = NULL;
- cache->filled_head->prev = NULL;
- cache->data[0] = q;
-
- /* Don't read the entire extended ROM now. Pieces of it will
- * be read when entries inside it are read. */
- return csr1212_parse_keyval(kv, cache);
- }
-
- cache_index = kv->offset - cache->offset;
-
- /* Now seach read portions of the cache to see if it is there. */
- for (cr = cache->filled_head; cr; cr = cr->next) {
- if (cache_index < cr->offset_start) {
- newcr = CSR1212_MALLOC(sizeof(*newcr));
- if (!newcr)
- return -ENOMEM;
-
- newcr->offset_start = cache_index & ~(csr->max_rom - 1);
- newcr->offset_end = newcr->offset_start;
- newcr->next = cr;
- newcr->prev = cr->prev;
- cr->prev = newcr;
- cr = newcr;
- break;
- } else if ((cache_index >= cr->offset_start) &&
- (cache_index < cr->offset_end)) {
- kvi = (struct csr1212_keyval_img*)
- (&cache->data[bytes_to_quads(cache_index)]);
- kv_len = quads_to_bytes(be16_to_cpu(kvi->length) + 1);
- break;
- } else if (cache_index == cr->offset_end) {
- break;
- }
- }
-
- if (!cr) {
- cr = cache->filled_tail;
- newcr = CSR1212_MALLOC(sizeof(*newcr));
- if (!newcr)
- return -ENOMEM;
-
- newcr->offset_start = cache_index & ~(csr->max_rom - 1);
- newcr->offset_end = newcr->offset_start;
- newcr->prev = cr;
- newcr->next = cr->next;
- cr->next = newcr;
- cr = newcr;
- cache->filled_tail = newcr;
- }
-
- while(!kvi || cr->offset_end < cache_index + kv_len) {
- cache_ptr = &cache->data[bytes_to_quads(cr->offset_end &
- ~(csr->max_rom - 1))];
-
- addr = (CSR1212_CSR_ARCH_REG_SPACE_BASE + cache->offset +
- cr->offset_end) & ~(csr->max_rom - 1);
-
- if (csr->ops->bus_read(csr, addr, cache_ptr, csr->private))
- return -EIO;
-
- cr->offset_end += csr->max_rom - (cr->offset_end &
- (csr->max_rom - 1));
-
- if (!kvi && (cr->offset_end > cache_index)) {
- kvi = (struct csr1212_keyval_img*)
- (&cache->data[bytes_to_quads(cache_index)]);
- kv_len = quads_to_bytes(be16_to_cpu(kvi->length) + 1);
- }
-
- if ((kv_len + (kv->offset - cache->offset)) > cache->size) {
- /* The Leaf or Directory claims its length extends
- * beyond the ConfigROM image region and thus beyond the
- * end of our cache region. Therefore, we abort now
- * rather than seg faulting later. */
- return -EIO;
- }
-
- ncr = cr->next;
-
- if (ncr && (cr->offset_end >= ncr->offset_start)) {
- /* consolidate region entries */
- ncr->offset_start = cr->offset_start;
-
- if (cr->prev)
- cr->prev->next = cr->next;
- ncr->prev = cr->prev;
- if (cache->filled_head == cr)
- cache->filled_head = ncr;
- CSR1212_FREE(cr);
- cr = ncr;
- }
- }
-
- return csr1212_parse_keyval(kv, cache);
-}
-
-struct csr1212_keyval *
-csr1212_get_keyval(struct csr1212_csr *csr, struct csr1212_keyval *kv)
-{
- if (!kv)
- return NULL;
- if (!kv->valid)
- if (csr1212_read_keyval(csr, kv) != CSR1212_SUCCESS)
- return NULL;
- return kv;
-}
-
-int csr1212_parse_csr(struct csr1212_csr *csr)
-{
- struct csr1212_dentry *dentry;
- int ret;
-
- BUG_ON(!csr || !csr->ops || !csr->ops->bus_read);
-
- ret = csr1212_parse_bus_info_block(csr);
- if (ret != CSR1212_SUCCESS)
- return ret;
-
- /*
- * There has been a buggy firmware with bus_info_block.max_rom > 0
- * spotted which actually only supported quadlet read requests to the
- * config ROM. Therefore read everything quadlet by quadlet regardless
- * of what the bus info block says.
- */
- csr->max_rom = 4;
-
- csr->cache_head->layout_head = csr->root_kv;
- csr->cache_head->layout_tail = csr->root_kv;
-
- csr->root_kv->offset = (CSR1212_CONFIG_ROM_SPACE_BASE & 0xffff) +
- csr->bus_info_len;
-
- csr->root_kv->valid = 0;
- csr->root_kv->next = csr->root_kv;
- csr->root_kv->prev = csr->root_kv;
- ret = csr1212_read_keyval(csr, csr->root_kv);
- if (ret != CSR1212_SUCCESS)
- return ret;
-
- /* Scan through the Root directory finding all extended ROM regions
- * and make cache regions for them */
- for (dentry = csr->root_kv->value.directory.dentries_head;
- dentry; dentry = dentry->next) {
- if (dentry->kv->key.id == CSR1212_KV_ID_EXTENDED_ROM &&
- !dentry->kv->valid) {
- ret = csr1212_read_keyval(csr, dentry->kv);
- if (ret != CSR1212_SUCCESS)
- return ret;
- }
- }
-
- return CSR1212_SUCCESS;
-}
diff --git a/drivers/ieee1394/csr1212.h b/drivers/ieee1394/csr1212.h
deleted file mode 100644
index a892d922dbc9..000000000000
--- a/drivers/ieee1394/csr1212.h
+++ /dev/null
@@ -1,383 +0,0 @@
-/*
- * csr1212.h -- IEEE 1212 Control and Status Register support for Linux
- *
- * Copyright (C) 2003 Francois Retief <fgretief@sun.ac.za>
- * Steve Kinneberg <kinnebergsteve@acmsystems.com>
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * 3. The name of the author may not be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
- * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
- * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
- * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
- * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
- * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
- * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef __CSR1212_H__
-#define __CSR1212_H__
-
-#include <linux/types.h>
-#include <linux/slab.h>
-#include <asm/atomic.h>
-
-#define CSR1212_MALLOC(size) kmalloc((size), GFP_KERNEL)
-#define CSR1212_FREE(ptr) kfree(ptr)
-
-#define CSR1212_SUCCESS (0)
-
-
-/* CSR 1212 key types */
-#define CSR1212_KV_TYPE_IMMEDIATE 0
-#define CSR1212_KV_TYPE_CSR_OFFSET 1
-#define CSR1212_KV_TYPE_LEAF 2
-#define CSR1212_KV_TYPE_DIRECTORY 3
-
-
-/* CSR 1212 key ids */
-#define CSR1212_KV_ID_DESCRIPTOR 0x01
-#define CSR1212_KV_ID_BUS_DEPENDENT_INFO 0x02
-#define CSR1212_KV_ID_VENDOR 0x03
-#define CSR1212_KV_ID_HARDWARE_VERSION 0x04
-#define CSR1212_KV_ID_MODULE 0x07
-#define CSR1212_KV_ID_NODE_CAPABILITIES 0x0C
-#define CSR1212_KV_ID_EUI_64 0x0D
-#define CSR1212_KV_ID_UNIT 0x11
-#define CSR1212_KV_ID_SPECIFIER_ID 0x12
-#define CSR1212_KV_ID_VERSION 0x13
-#define CSR1212_KV_ID_DEPENDENT_INFO 0x14
-#define CSR1212_KV_ID_UNIT_LOCATION 0x15
-#define CSR1212_KV_ID_MODEL 0x17
-#define CSR1212_KV_ID_INSTANCE 0x18
-#define CSR1212_KV_ID_KEYWORD 0x19
-#define CSR1212_KV_ID_FEATURE 0x1A
-#define CSR1212_KV_ID_EXTENDED_ROM 0x1B
-#define CSR1212_KV_ID_EXTENDED_KEY_SPECIFIER_ID 0x1C
-#define CSR1212_KV_ID_EXTENDED_KEY 0x1D
-#define CSR1212_KV_ID_EXTENDED_DATA 0x1E
-#define CSR1212_KV_ID_MODIFIABLE_DESCRIPTOR 0x1F
-#define CSR1212_KV_ID_DIRECTORY_ID 0x20
-#define CSR1212_KV_ID_REVISION 0x21
-
-
-/* IEEE 1212 Address space map */
-#define CSR1212_ALL_SPACE_BASE (0x000000000000ULL)
-#define CSR1212_ALL_SPACE_SIZE (1ULL << 48)
-#define CSR1212_ALL_SPACE_END (CSR1212_ALL_SPACE_BASE + CSR1212_ALL_SPACE_SIZE)
-
-#define CSR1212_MEMORY_SPACE_BASE (0x000000000000ULL)
-#define CSR1212_MEMORY_SPACE_SIZE ((256ULL * (1ULL << 40)) - (512ULL * (1ULL << 20)))
-#define CSR1212_MEMORY_SPACE_END (CSR1212_MEMORY_SPACE_BASE + CSR1212_MEMORY_SPACE_SIZE)
-
-#define CSR1212_PRIVATE_SPACE_BASE (0xffffe0000000ULL)
-#define CSR1212_PRIVATE_SPACE_SIZE (256ULL * (1ULL << 20))
-#define CSR1212_PRIVATE_SPACE_END (CSR1212_PRIVATE_SPACE_BASE + CSR1212_PRIVATE_SPACE_SIZE)
-
-#define CSR1212_REGISTER_SPACE_BASE (0xfffff0000000ULL)
-#define CSR1212_REGISTER_SPACE_SIZE (256ULL * (1ULL << 20))
-#define CSR1212_REGISTER_SPACE_END (CSR1212_REGISTER_SPACE_BASE + CSR1212_REGISTER_SPACE_SIZE)
-
-#define CSR1212_CSR_ARCH_REG_SPACE_BASE (0xfffff0000000ULL)
-#define CSR1212_CSR_ARCH_REG_SPACE_SIZE (512)
-#define CSR1212_CSR_ARCH_REG_SPACE_END (CSR1212_CSR_ARCH_REG_SPACE_BASE + CSR1212_CSR_ARCH_REG_SPACE_SIZE)
-#define CSR1212_CSR_ARCH_REG_SPACE_OFFSET (CSR1212_CSR_ARCH_REG_SPACE_BASE - CSR1212_REGISTER_SPACE_BASE)
-
-#define CSR1212_CSR_BUS_DEP_REG_SPACE_BASE (0xfffff0000200ULL)
-#define CSR1212_CSR_BUS_DEP_REG_SPACE_SIZE (512)
-#define CSR1212_CSR_BUS_DEP_REG_SPACE_END (CSR1212_CSR_BUS_DEP_REG_SPACE_BASE + CSR1212_CSR_BUS_DEP_REG_SPACE_SIZE)
-#define CSR1212_CSR_BUS_DEP_REG_SPACE_OFFSET (CSR1212_CSR_BUS_DEP_REG_SPACE_BASE - CSR1212_REGISTER_SPACE_BASE)
-
-#define CSR1212_CONFIG_ROM_SPACE_BASE (0xfffff0000400ULL)
-#define CSR1212_CONFIG_ROM_SPACE_SIZE (1024)
-#define CSR1212_CONFIG_ROM_SPACE_END (CSR1212_CONFIG_ROM_SPACE_BASE + CSR1212_CONFIG_ROM_SPACE_SIZE)
-#define CSR1212_CONFIG_ROM_SPACE_OFFSET (CSR1212_CONFIG_ROM_SPACE_BASE - CSR1212_REGISTER_SPACE_BASE)
-
-#define CSR1212_UNITS_SPACE_BASE (0xfffff0000800ULL)
-#define CSR1212_UNITS_SPACE_SIZE ((256ULL * (1ULL << 20)) - 2048)
-#define CSR1212_UNITS_SPACE_END (CSR1212_UNITS_SPACE_BASE + CSR1212_UNITS_SPACE_SIZE)
-#define CSR1212_UNITS_SPACE_OFFSET (CSR1212_UNITS_SPACE_BASE - CSR1212_REGISTER_SPACE_BASE)
-
-#define CSR1212_INVALID_ADDR_SPACE -1
-
-
-/* Config ROM image structures */
-struct csr1212_bus_info_block_img {
- u8 length;
- u8 crc_length;
- u16 crc;
-
- /* Must be last */
- u32 data[0]; /* older gcc can't handle [] which is standard */
-};
-
-struct csr1212_leaf {
- int len;
- u32 *data;
-};
-
-struct csr1212_dentry {
- struct csr1212_dentry *next, *prev;
- struct csr1212_keyval *kv;
-};
-
-struct csr1212_directory {
- int len;
- struct csr1212_dentry *dentries_head, *dentries_tail;
-};
-
-struct csr1212_keyval {
- struct {
- u8 type;
- u8 id;
- } key;
- union {
- u32 immediate;
- u32 csr_offset;
- struct csr1212_leaf leaf;
- struct csr1212_directory directory;
- } value;
- struct csr1212_keyval *associate;
- atomic_t refcnt;
-
- /* used in generating and/or parsing CSR image */
- struct csr1212_keyval *next, *prev; /* flat list of CSR elements */
- u32 offset; /* position in CSR from 0xffff f000 0000 */
- u8 valid; /* flag indicating keyval has valid data*/
-};
-
-
-struct csr1212_cache_region {
- struct csr1212_cache_region *next, *prev;
- u32 offset_start; /* inclusive */
- u32 offset_end; /* exclusive */
-};
-
-struct csr1212_csr_rom_cache {
- struct csr1212_csr_rom_cache *next, *prev;
- struct csr1212_cache_region *filled_head, *filled_tail;
- struct csr1212_keyval *layout_head, *layout_tail;
- size_t size;
- u32 offset;
- struct csr1212_keyval *ext_rom;
- size_t len;
-
- /* Must be last */
- u32 data[0]; /* older gcc can't handle [] which is standard */
-};
-
-struct csr1212_csr {
- size_t bus_info_len; /* bus info block length in bytes */
- size_t crc_len; /* crc length in bytes */
- __be32 *bus_info_data; /* bus info data incl bus name and EUI */
-
- void *private; /* private, bus specific data */
- struct csr1212_bus_ops *ops;
-
- struct csr1212_keyval *root_kv;
-
- int max_rom; /* max bytes readable in Config ROM region */
-
- /* Items below used for image parsing and generation */
- struct csr1212_csr_rom_cache *cache_head, *cache_tail;
-};
-
-struct csr1212_bus_ops {
- /* This function is used by csr1212 to read additional information
- * from remote nodes when parsing a Config ROM (i.e., read Config ROM
- * entries located in the Units Space. Must return 0 on success
- * anything else indicates an error. */
- int (*bus_read) (struct csr1212_csr *csr, u64 addr,
- void *buffer, void *private);
-
- /* This function is used by csr1212 to allocate a region in units space
- * in the event that Config ROM entries don't all fit in the predefined
- * 1K region. The void *private parameter is private member of struct
- * csr1212_csr. */
- u64 (*allocate_addr_range) (u64 size, u32 alignment, void *private);
-
- /* This function is used by csr1212 to release a region in units space
- * that is no longer needed. */
- void (*release_addr) (u64 addr, void *private);
-};
-
-
-/* Descriptor Leaf manipulation macros */
-#define CSR1212_DESCRIPTOR_LEAF_TYPE_SHIFT 24
-#define CSR1212_DESCRIPTOR_LEAF_SPECIFIER_ID_MASK 0xffffff
-#define CSR1212_DESCRIPTOR_LEAF_OVERHEAD (1 * sizeof(u32))
-
-#define CSR1212_DESCRIPTOR_LEAF_TYPE(kv) \
- (be32_to_cpu((kv)->value.leaf.data[0]) >> \
- CSR1212_DESCRIPTOR_LEAF_TYPE_SHIFT)
-#define CSR1212_DESCRIPTOR_LEAF_SPECIFIER_ID(kv) \
- (be32_to_cpu((kv)->value.leaf.data[0]) & \
- CSR1212_DESCRIPTOR_LEAF_SPECIFIER_ID_MASK)
-
-
-/* Text Descriptor Leaf manipulation macros */
-#define CSR1212_TEXTUAL_DESCRIPTOR_LEAF_WIDTH_SHIFT 28
-#define CSR1212_TEXTUAL_DESCRIPTOR_LEAF_WIDTH_MASK 0xf /* after shift */
-#define CSR1212_TEXTUAL_DESCRIPTOR_LEAF_CHAR_SET_SHIFT 16
-#define CSR1212_TEXTUAL_DESCRIPTOR_LEAF_CHAR_SET_MASK 0xfff /* after shift */
-#define CSR1212_TEXTUAL_DESCRIPTOR_LEAF_LANGUAGE_MASK 0xffff
-#define CSR1212_TEXTUAL_DESCRIPTOR_LEAF_OVERHEAD (1 * sizeof(u32))
-
-#define CSR1212_TEXTUAL_DESCRIPTOR_LEAF_WIDTH(kv) \
- (be32_to_cpu((kv)->value.leaf.data[1]) >> \
- CSR1212_TEXTUAL_DESCRIPTOR_LEAF_WIDTH_SHIFT)
-#define CSR1212_TEXTUAL_DESCRIPTOR_LEAF_CHAR_SET(kv) \
- ((be32_to_cpu((kv)->value.leaf.data[1]) >> \
- CSR1212_TEXTUAL_DESCRIPTOR_LEAF_CHAR_SET_SHIFT) & \
- CSR1212_TEXTUAL_DESCRIPTOR_LEAF_CHAR_SET_MASK)
-#define CSR1212_TEXTUAL_DESCRIPTOR_LEAF_LANGUAGE(kv) \
- (be32_to_cpu((kv)->value.leaf.data[1]) & \
- CSR1212_TEXTUAL_DESCRIPTOR_LEAF_LANGUAGE_MASK)
-#define CSR1212_TEXTUAL_DESCRIPTOR_LEAF_DATA(kv) \
- (&((kv)->value.leaf.data[2]))
-
-
-/* The following 2 function are for creating new Configuration ROM trees. The
- * first function is used for both creating local trees and parsing remote
- * trees. The second function adds pertinent information to local Configuration
- * ROM trees - namely data for the bus information block. */
-extern struct csr1212_csr *csr1212_create_csr(struct csr1212_bus_ops *ops,
- size_t bus_info_size,
- void *private);
-extern void csr1212_init_local_csr(struct csr1212_csr *csr,
- const u32 *bus_info_data, int max_rom);
-
-
-/* Destroy a Configuration ROM tree and release all memory taken by the tree. */
-extern void csr1212_destroy_csr(struct csr1212_csr *csr);
-
-
-/* The following set of functions are fore creating new keyvals for placement in
- * a Configuration ROM tree. Code that creates new keyvals with these functions
- * must release those keyvals with csr1212_release_keyval() when they are no
- * longer needed. */
-extern struct csr1212_keyval *csr1212_new_immediate(u8 key, u32 value);
-extern struct csr1212_keyval *csr1212_new_directory(u8 key);
-extern struct csr1212_keyval *csr1212_new_string_descriptor_leaf(const char *s);
-
-
-/* The following function manages association between keyvals. Typically,
- * Descriptor Leaves and Directories will be associated with another keyval and
- * it is desirable for the Descriptor keyval to be place immediately after the
- * keyval that it is associated with.
- * Take care with subsequent ROM modifications: There is no function to remove
- * previously specified associations.
- */
-extern void csr1212_associate_keyval(struct csr1212_keyval *kv,
- struct csr1212_keyval *associate);
-
-
-/* The following functions manage the association of a keyval and directories.
- * A keyval may be attached to more than one directory. */
-extern int csr1212_attach_keyval_to_directory(struct csr1212_keyval *dir,
- struct csr1212_keyval *kv);
-extern void csr1212_detach_keyval_from_directory(struct csr1212_keyval *dir,
- struct csr1212_keyval *kv);
-
-
-/* Creates a complete Configuration ROM image in the list of caches available
- * via csr->cache_head. */
-extern int csr1212_generate_csr_image(struct csr1212_csr *csr);
-
-
-/* This is a convience function for reading a block of data out of one of the
- * caches in the csr->cache_head list. */
-extern int csr1212_read(struct csr1212_csr *csr, u32 offset, void *buffer,
- u32 len);
-
-
-/* The following functions are in place for parsing Configuration ROM images.
- * csr1212_parse_keyval() is used should there be a need to directly parse a
- * Configuration ROM directly. */
-extern int csr1212_parse_keyval(struct csr1212_keyval *kv,
- struct csr1212_csr_rom_cache *cache);
-extern int csr1212_parse_csr(struct csr1212_csr *csr);
-
-
-/* This function allocates a new cache which may be used for either parsing or
- * generating sub-sets of Configuration ROM images. */
-static inline struct csr1212_csr_rom_cache *
-csr1212_rom_cache_malloc(u32 offset, size_t size)
-{
- struct csr1212_csr_rom_cache *cache;
-
- cache = CSR1212_MALLOC(sizeof(*cache) + size);
- if (!cache)
- return NULL;
-
- cache->next = NULL;
- cache->prev = NULL;
- cache->filled_head = NULL;
- cache->filled_tail = NULL;
- cache->layout_head = NULL;
- cache->layout_tail = NULL;
- cache->offset = offset;
- cache->size = size;
- cache->ext_rom = NULL;
-
- return cache;
-}
-
-
-/* This function ensures that a keyval contains data when referencing a keyval
- * created by parsing a Configuration ROM. */
-extern struct csr1212_keyval *
-csr1212_get_keyval(struct csr1212_csr *csr, struct csr1212_keyval *kv);
-
-
-/* This function increments the reference count for a keyval should there be a
- * need for code to retain a keyval that has been parsed. */
-static inline void csr1212_keep_keyval(struct csr1212_keyval *kv)
-{
- atomic_inc(&kv->refcnt);
- smp_mb__after_atomic_inc();
-}
-
-
-/* This function decrements a keyval's reference count and will destroy the
- * keyval when there are no more users of the keyval. This should be called by
- * any code that calls csr1212_keep_keyval() or any of the keyval creation
- * routines csr1212_new_*(). */
-extern void csr1212_release_keyval(struct csr1212_keyval *kv);
-
-
-/*
- * This macro allows for looping over the keyval entries in a directory and it
- * ensures that keyvals from remote ConfigROMs are parsed properly.
- *
- * struct csr1212_csr *_csr points to the CSR associated with dir.
- * struct csr1212_keyval *_kv points to the current keyval (loop index).
- * struct csr1212_keyval *_dir points to the directory to be looped.
- * struct csr1212_dentry *_pos is used internally for indexing.
- *
- * kv will be NULL upon exit of the loop.
- */
-#define csr1212_for_each_dir_entry(_csr, _kv, _dir, _pos) \
- for (csr1212_get_keyval((_csr), (_dir)), \
- _pos = (_dir)->value.directory.dentries_head, \
- _kv = (_pos) ? csr1212_get_keyval((_csr), _pos->kv) : NULL;\
- (_kv) && (_pos); \
- (_kv->associate == NULL) ? \
- ((_pos = _pos->next), (_kv = (_pos) ? \
- csr1212_get_keyval((_csr), _pos->kv) : \
- NULL)) : \
- (_kv = csr1212_get_keyval((_csr), _kv->associate)))
-
-#endif /* __CSR1212_H__ */
diff --git a/drivers/ieee1394/dma.c b/drivers/ieee1394/dma.c
deleted file mode 100644
index d178699b194a..000000000000
--- a/drivers/ieee1394/dma.c
+++ /dev/null
@@ -1,289 +0,0 @@
-/*
- * DMA region bookkeeping routines
- *
- * Copyright (C) 2002 Maas Digital LLC
- *
- * This code is licensed under the GPL. See the file COPYING in the root
- * directory of the kernel sources for details.
- */
-
-#include <linux/mm.h>
-#include <linux/module.h>
-#include <linux/pci.h>
-#include <linux/vmalloc.h>
-#include <linux/scatterlist.h>
-
-#include "dma.h"
-
-/* dma_prog_region */
-
-void dma_prog_region_init(struct dma_prog_region *prog)
-{
- prog->kvirt = NULL;
- prog->dev = NULL;
- prog->n_pages = 0;
- prog->bus_addr = 0;
-}
-
-int dma_prog_region_alloc(struct dma_prog_region *prog, unsigned long n_bytes,
- struct pci_dev *dev)
-{
- /* round up to page size */
- n_bytes = PAGE_ALIGN(n_bytes);
-
- prog->n_pages = n_bytes >> PAGE_SHIFT;
-
- prog->kvirt = pci_alloc_consistent(dev, n_bytes, &prog->bus_addr);
- if (!prog->kvirt) {
- printk(KERN_ERR
- "dma_prog_region_alloc: pci_alloc_consistent() failed\n");
- dma_prog_region_free(prog);
- return -ENOMEM;
- }
-
- prog->dev = dev;
-
- return 0;
-}
-
-void dma_prog_region_free(struct dma_prog_region *prog)
-{
- if (prog->kvirt) {
- pci_free_consistent(prog->dev, prog->n_pages << PAGE_SHIFT,
- prog->kvirt, prog->bus_addr);
- }
-
- prog->kvirt = NULL;
- prog->dev = NULL;
- prog->n_pages = 0;
- prog->bus_addr = 0;
-}
-
-/* dma_region */
-
-/**
- * dma_region_init - clear out all fields but do not allocate anything
- */
-void dma_region_init(struct dma_region *dma)
-{
- dma->kvirt = NULL;
- dma->dev = NULL;
- dma->n_pages = 0;
- dma->n_dma_pages = 0;
- dma->sglist = NULL;
-}
-
-/**
- * dma_region_alloc - allocate the buffer and map it to the IOMMU
- */
-int dma_region_alloc(struct dma_region *dma, unsigned long n_bytes,
- struct pci_dev *dev, int direction)
-{
- unsigned int i;
-
- /* round up to page size */
- n_bytes = PAGE_ALIGN(n_bytes);
-
- dma->n_pages = n_bytes >> PAGE_SHIFT;
-
- dma->kvirt = vmalloc_32(n_bytes);
- if (!dma->kvirt) {
- printk(KERN_ERR "dma_region_alloc: vmalloc_32() failed\n");
- goto err;
- }
-
- /* Clear the ram out, no junk to the user */
- memset(dma->kvirt, 0, n_bytes);
-
- /* allocate scatter/gather list */
- dma->sglist = vmalloc(dma->n_pages * sizeof(*dma->sglist));
- if (!dma->sglist) {
- printk(KERN_ERR "dma_region_alloc: vmalloc(sglist) failed\n");
- goto err;
- }
-
- sg_init_table(dma->sglist, dma->n_pages);
-
- /* fill scatter/gather list with pages */
- for (i = 0; i < dma->n_pages; i++) {
- unsigned long va =
- (unsigned long)dma->kvirt + (i << PAGE_SHIFT);
-
- sg_set_page(&dma->sglist[i], vmalloc_to_page((void *)va),
- PAGE_SIZE, 0);
- }
-
- /* map sglist to the IOMMU */
- dma->n_dma_pages =
- pci_map_sg(dev, dma->sglist, dma->n_pages, direction);
-
- if (dma->n_dma_pages == 0) {
- printk(KERN_ERR "dma_region_alloc: pci_map_sg() failed\n");
- goto err;
- }
-
- dma->dev = dev;
- dma->direction = direction;
-
- return 0;
-
- err:
- dma_region_free(dma);
- return -ENOMEM;
-}
-
-/**
- * dma_region_free - unmap and free the buffer
- */
-void dma_region_free(struct dma_region *dma)
-{
- if (dma->n_dma_pages) {
- pci_unmap_sg(dma->dev, dma->sglist, dma->n_pages,
- dma->direction);
- dma->n_dma_pages = 0;
- dma->dev = NULL;
- }
-
- vfree(dma->sglist);
- dma->sglist = NULL;
-
- vfree(dma->kvirt);
- dma->kvirt = NULL;
- dma->n_pages = 0;
-}
-
-/* find the scatterlist index and remaining offset corresponding to a
- given offset from the beginning of the buffer */
-static inline int dma_region_find(struct dma_region *dma, unsigned long offset,
- unsigned int start, unsigned long *rem)
-{
- int i;
- unsigned long off = offset;
-
- for (i = start; i < dma->n_dma_pages; i++) {
- if (off < sg_dma_len(&dma->sglist[i])) {
- *rem = off;
- break;
- }
-
- off -= sg_dma_len(&dma->sglist[i]);
- }
-
- BUG_ON(i >= dma->n_dma_pages);
-
- return i;
-}
-
-/**
- * dma_region_offset_to_bus - get bus address of an offset within a DMA region
- *
- * Returns the DMA bus address of the byte with the given @offset relative to
- * the beginning of the @dma.
- */
-dma_addr_t dma_region_offset_to_bus(struct dma_region * dma,
- unsigned long offset)
-{
- unsigned long rem = 0;
-
- struct scatterlist *sg =
- &dma->sglist[dma_region_find(dma, offset, 0, &rem)];
- return sg_dma_address(sg) + rem;
-}
-
-/**
- * dma_region_sync_for_cpu - sync the CPU's view of the buffer
- */
-void dma_region_sync_for_cpu(struct dma_region *dma, unsigned long offset,
- unsigned long len)
-{
- int first, last;
- unsigned long rem = 0;
-
- if (!len)
- len = 1;
-
- first = dma_region_find(dma, offset, 0, &rem);
- last = dma_region_find(dma, rem + len - 1, first, &rem);
-
- pci_dma_sync_sg_for_cpu(dma->dev, &dma->sglist[first], last - first + 1,
- dma->direction);
-}
-
-/**
- * dma_region_sync_for_device - sync the IO bus' view of the buffer
- */
-void dma_region_sync_for_device(struct dma_region *dma, unsigned long offset,
- unsigned long len)
-{
- int first, last;
- unsigned long rem = 0;
-
- if (!len)
- len = 1;
-
- first = dma_region_find(dma, offset, 0, &rem);
- last = dma_region_find(dma, rem + len - 1, first, &rem);
-
- pci_dma_sync_sg_for_device(dma->dev, &dma->sglist[first],
- last - first + 1, dma->direction);
-}
-
-#ifdef CONFIG_MMU
-
-static int dma_region_pagefault(struct vm_area_struct *vma,
- struct vm_fault *vmf)
-{
- struct dma_region *dma = (struct dma_region *)vma->vm_private_data;
-
- if (!dma->kvirt)
- return VM_FAULT_SIGBUS;
-
- if (vmf->pgoff >= dma->n_pages)
- return VM_FAULT_SIGBUS;
-
- vmf->page = vmalloc_to_page(dma->kvirt + (vmf->pgoff << PAGE_SHIFT));
- get_page(vmf->page);
- return 0;
-}
-
-static const struct vm_operations_struct dma_region_vm_ops = {
- .fault = dma_region_pagefault,
-};
-
-/**
- * dma_region_mmap - map the buffer into a user space process
- */
-int dma_region_mmap(struct dma_region *dma, struct file *file,
- struct vm_area_struct *vma)
-{
- unsigned long size;
-
- if (!dma->kvirt)
- return -EINVAL;
-
- /* must be page-aligned (XXX: comment is wrong, we could allow pgoff) */
- if (vma->vm_pgoff != 0)
- return -EINVAL;
-
- /* check the length */
- size = vma->vm_end - vma->vm_start;
- if (size > (dma->n_pages << PAGE_SHIFT))
- return -EINVAL;
-
- vma->vm_ops = &dma_region_vm_ops;
- vma->vm_private_data = dma;
- vma->vm_file = file;
- vma->vm_flags |= VM_RESERVED | VM_ALWAYSDUMP;
-
- return 0;
-}
-
-#else /* CONFIG_MMU */
-
-int dma_region_mmap(struct dma_region *dma, struct file *file,
- struct vm_area_struct *vma)
-{
- return -EINVAL;
-}
-
-#endif /* CONFIG_MMU */
diff --git a/drivers/ieee1394/dma.h b/drivers/ieee1394/dma.h
deleted file mode 100644
index 467373cab8e5..000000000000
--- a/drivers/ieee1394/dma.h
+++ /dev/null
@@ -1,89 +0,0 @@
-/*
- * DMA region bookkeeping routines
- *
- * Copyright (C) 2002 Maas Digital LLC
- *
- * This code is licensed under the GPL. See the file COPYING in the root
- * directory of the kernel sources for details.
- */
-
-#ifndef IEEE1394_DMA_H
-#define IEEE1394_DMA_H
-
-#include <asm/types.h>
-
-struct file;
-struct pci_dev;
-struct scatterlist;
-struct vm_area_struct;
-
-/**
- * struct dma_prog_region - small contiguous DMA buffer
- * @kvirt: kernel virtual address
- * @dev: PCI device
- * @n_pages: number of kernel pages
- * @bus_addr: base bus address
- *
- * a small, physically contiguous DMA buffer with random-access, synchronous
- * usage characteristics
- */
-struct dma_prog_region {
- unsigned char *kvirt;
- struct pci_dev *dev;
- unsigned int n_pages;
- dma_addr_t bus_addr;
-};
-
-/* clear out all fields but do not allocate any memory */
-void dma_prog_region_init(struct dma_prog_region *prog);
-int dma_prog_region_alloc(struct dma_prog_region *prog, unsigned long n_bytes,
- struct pci_dev *dev);
-void dma_prog_region_free(struct dma_prog_region *prog);
-
-static inline dma_addr_t dma_prog_region_offset_to_bus(
- struct dma_prog_region *prog, unsigned long offset)
-{
- return prog->bus_addr + offset;
-}
-
-/**
- * struct dma_region - large non-contiguous DMA buffer
- * @virt: kernel virtual address
- * @dev: PCI device
- * @n_pages: number of kernel pages
- * @n_dma_pages: number of IOMMU pages
- * @sglist: IOMMU mapping
- * @direction: PCI_DMA_TODEVICE, etc.
- *
- * a large, non-physically-contiguous DMA buffer with streaming, asynchronous
- * usage characteristics
- */
-struct dma_region {
- unsigned char *kvirt;
- struct pci_dev *dev;
- unsigned int n_pages;
- unsigned int n_dma_pages;
- struct scatterlist *sglist;
- int direction;
-};
-
-void dma_region_init(struct dma_region *dma);
-int dma_region_alloc(struct dma_region *dma, unsigned long n_bytes,
- struct pci_dev *dev, int direction);
-void dma_region_free(struct dma_region *dma);
-void dma_region_sync_for_cpu(struct dma_region *dma, unsigned long offset,
- unsigned long len);
-void dma_region_sync_for_device(struct dma_region *dma, unsigned long offset,
- unsigned long len);
-int dma_region_mmap(struct dma_region *dma, struct file *file,
- struct vm_area_struct *vma);
-dma_addr_t dma_region_offset_to_bus(struct dma_region *dma,
- unsigned long offset);
-
-/**
- * dma_region_i - macro to index into a DMA region (or dma_prog_region)
- */
-#define dma_region_i(_dma, _type, _index) \
- ( ((_type*) ((_dma)->kvirt)) + (_index) )
-
-#endif /* IEEE1394_DMA_H */
diff --git a/drivers/ieee1394/dv1394-private.h b/drivers/ieee1394/dv1394-private.h
deleted file mode 100644
index 18b92cbf4a9f..000000000000
--- a/drivers/ieee1394/dv1394-private.h
+++ /dev/null
@@ -1,587 +0,0 @@
-/*
- * dv1394-private.h - DV input/output over IEEE 1394 on OHCI chips
- * Copyright (C)2001 Daniel Maas <dmaas@dcine.com>
- * receive by Dan Dennedy <dan@dennedy.org>
- *
- * based on:
- * video1394.h - driver for OHCI 1394 boards
- * Copyright (C)1999,2000 Sebastien Rougeaux <sebastien.rougeaux@anu.edu.au>
- * Peter Schlaile <udbz@rz.uni-karlsruhe.de>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software Foundation,
- * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
- */
-
-#ifndef _DV_1394_PRIVATE_H
-#define _DV_1394_PRIVATE_H
-
-#include "ieee1394.h"
-#include "ohci1394.h"
-#include "dma.h"
-
-/* data structures private to the dv1394 driver */
-/* none of this is exposed to user-space */
-
-
-/*
- the 8-byte CIP (Common Isochronous Packet) header that precedes
- each packet of DV data.
-
- See the IEC 61883 standard.
-*/
-
-struct CIP_header { unsigned char b[8]; };
-
-static inline void fill_cip_header(struct CIP_header *cip,
- unsigned char source_node_id,
- unsigned long counter,
- enum pal_or_ntsc format,
- unsigned long timestamp)
-{
- cip->b[0] = source_node_id;
- cip->b[1] = 0x78; /* packet size in quadlets (480/4) - even for empty packets! */
- cip->b[2] = 0x00;
- cip->b[3] = counter;
-
- cip->b[4] = 0x80; /* const */
-
- switch(format) {
- case DV1394_PAL:
- cip->b[5] = 0x80;
- break;
- case DV1394_NTSC:
- cip->b[5] = 0x00;
- break;
- }
-
- cip->b[6] = timestamp >> 8;
- cip->b[7] = timestamp & 0xFF;
-}
-
-
-
-/*
- DMA commands used to program the OHCI's DMA engine
-
- See the Texas Instruments OHCI 1394 chipset documentation.
-*/
-
-struct output_more_immediate { __le32 q[8]; };
-struct output_more { __le32 q[4]; };
-struct output_last { __le32 q[4]; };
-struct input_more { __le32 q[4]; };
-struct input_last { __le32 q[4]; };
-
-/* outputs */
-
-static inline void fill_output_more_immediate(struct output_more_immediate *omi,
- unsigned char tag,
- unsigned char channel,
- unsigned char sync_tag,
- unsigned int payload_size)
-{
- omi->q[0] = cpu_to_le32(0x02000000 | 8); /* OUTPUT_MORE_IMMEDIATE; 8 is the size of the IT header */
- omi->q[1] = cpu_to_le32(0);
- omi->q[2] = cpu_to_le32(0);
- omi->q[3] = cpu_to_le32(0);
-
- /* IT packet header */
- omi->q[4] = cpu_to_le32( (0x0 << 16) /* IEEE1394_SPEED_100 */
- | (tag << 14)
- | (channel << 8)
- | (TCODE_ISO_DATA << 4)
- | (sync_tag) );
-
- /* reserved field; mimic behavior of my Sony DSR-40 */
- omi->q[5] = cpu_to_le32((payload_size << 16) | (0x7F << 8) | 0xA0);
-
- omi->q[6] = cpu_to_le32(0);
- omi->q[7] = cpu_to_le32(0);
-}
-
-static inline void fill_output_more(struct output_more *om,
- unsigned int data_size,
- unsigned long data_phys_addr)
-{
- om->q[0] = cpu_to_le32(data_size);
- om->q[1] = cpu_to_le32(data_phys_addr);
- om->q[2] = cpu_to_le32(0);
- om->q[3] = cpu_to_le32(0);
-}
-
-static inline void fill_output_last(struct output_last *ol,
- int want_timestamp,
- int want_interrupt,
- unsigned int data_size,
- unsigned long data_phys_addr)
-{
- u32 temp = 0;
- temp |= 1 << 28; /* OUTPUT_LAST */
-
- if (want_timestamp) /* controller will update timestamp at DMA time */
- temp |= 1 << 27;
-
- if (want_interrupt)
- temp |= 3 << 20;
-
- temp |= 3 << 18; /* must take branch */
- temp |= data_size;
-
- ol->q[0] = cpu_to_le32(temp);
- ol->q[1] = cpu_to_le32(data_phys_addr);
- ol->q[2] = cpu_to_le32(0);
- ol->q[3] = cpu_to_le32(0);
-}
-
-/* inputs */
-
-static inline void fill_input_more(struct input_more *im,
- int want_interrupt,
- unsigned int data_size,
- unsigned long data_phys_addr)
-{
- u32 temp = 2 << 28; /* INPUT_MORE */
- temp |= 8 << 24; /* s = 1, update xferStatus and resCount */
- if (want_interrupt)
- temp |= 0 << 20; /* interrupts, i=0 in packet-per-buffer mode */
- temp |= 0x0 << 16; /* disable branch to address for packet-per-buffer mode */
- /* disable wait on sync field, not used in DV :-( */
- temp |= data_size;
-
- im->q[0] = cpu_to_le32(temp);
- im->q[1] = cpu_to_le32(data_phys_addr);
- im->q[2] = cpu_to_le32(0); /* branchAddress and Z not use in packet-per-buffer mode */
- im->q[3] = cpu_to_le32(0); /* xferStatus & resCount, resCount must be initialize to data_size */
-}
-
-static inline void fill_input_last(struct input_last *il,
- int want_interrupt,
- unsigned int data_size,
- unsigned long data_phys_addr)
-{
- u32 temp = 3 << 28; /* INPUT_LAST */
- temp |= 8 << 24; /* s = 1, update xferStatus and resCount */
- if (want_interrupt)
- temp |= 3 << 20; /* enable interrupts */
- temp |= 0xC << 16; /* enable branch to address */
- /* disable wait on sync field, not used in DV :-( */
- temp |= data_size;
-
- il->q[0] = cpu_to_le32(temp);
- il->q[1] = cpu_to_le32(data_phys_addr);
- il->q[2] = cpu_to_le32(1); /* branchAddress (filled in later) and Z = 1 descriptor in next block */
- il->q[3] = cpu_to_le32(data_size); /* xferStatus & resCount, resCount must be initialize to data_size */
-}
-
-
-
-/*
- A "DMA descriptor block" consists of several contiguous DMA commands.
- struct DMA_descriptor_block encapsulates all of the commands necessary
- to send one packet of DV data.
-
- There are three different types of these blocks:
-
- 1) command to send an empty packet (CIP header only, no DV data):
-
- OUTPUT_MORE-Immediate <-- contains the iso header in-line
- OUTPUT_LAST <-- points to the CIP header
-
- 2) command to send a full packet when the DV data payload does NOT
- cross a page boundary:
-
- OUTPUT_MORE-Immediate <-- contains the iso header in-line
- OUTPUT_MORE <-- points to the CIP header
- OUTPUT_LAST <-- points to entire DV data payload
-
- 3) command to send a full packet when the DV payload DOES cross
- a page boundary:
-
- OUTPUT_MORE-Immediate <-- contains the iso header in-line
- OUTPUT_MORE <-- points to the CIP header
- OUTPUT_MORE <-- points to first part of DV data payload
- OUTPUT_LAST <-- points to second part of DV data payload
-
- This struct describes all three block types using unions.
-
- !!! It is vital that an even number of these descriptor blocks fit on one
- page of memory, since a block cannot cross a page boundary !!!
-
- */
-
-struct DMA_descriptor_block {
-
- union {
- struct {
- /* iso header, common to all output block types */
- struct output_more_immediate omi;
-
- union {
- /* empty packet */
- struct {
- struct output_last ol; /* CIP header */
- } empty;
-
- /* full packet */
- struct {
- struct output_more om; /* CIP header */
-
- union {
- /* payload does not cross page boundary */
- struct {
- struct output_last ol; /* data payload */
- } nocross;
-
- /* payload crosses page boundary */
- struct {
- struct output_more om; /* data payload */
- struct output_last ol; /* data payload */
- } cross;
- } u;
-
- } full;
- } u;
- } out;
-
- struct {
- struct input_last il;
- } in;
-
- } u;
-
- /* ensure that PAGE_SIZE % sizeof(struct DMA_descriptor_block) == 0
- by padding out to 128 bytes */
- u32 __pad__[12];
-};
-
-
-/* struct frame contains all data associated with one frame in the
- ringbuffer these are allocated when the DMA context is initialized
- do_dv1394_init(). They are re-used after the card finishes
- transmitting the frame. */
-
-struct video_card; /* forward declaration */
-
-struct frame {
-
- /* points to the struct video_card that owns this frame */
- struct video_card *video;
-
- /* index of this frame in video_card->frames[] */
- unsigned int frame_num;
-
- /* FRAME_CLEAR - DMA program not set up, waiting for data
- FRAME_READY - DMA program written, ready to transmit
-
- Changes to these should be locked against the interrupt
- */
- enum {
- FRAME_CLEAR = 0,
- FRAME_READY
- } state;
-
- /* whether this frame has been DMA'ed already; used only from
- the IRQ handler to determine whether the frame can be reset */
- int done;
-
-
- /* kernel virtual pointer to the start of this frame's data in
- the user ringbuffer. Use only for CPU access; to get the DMA
- bus address you must go through the video->user_dma mapping */
- unsigned long data;
-
- /* Max # of packets per frame */
-#define MAX_PACKETS 500
-
-
- /* a PAGE_SIZE memory pool for allocating CIP headers
- !header_pool must be aligned to PAGE_SIZE! */
- struct CIP_header *header_pool;
- dma_addr_t header_pool_dma;
-
-
- /* a physically contiguous memory pool for allocating DMA
- descriptor blocks; usually around 64KB in size
- !descriptor_pool must be aligned to PAGE_SIZE! */
- struct DMA_descriptor_block *descriptor_pool;
- dma_addr_t descriptor_pool_dma;
- unsigned long descriptor_pool_size;
-
-
- /* # of packets allocated for this frame */
- unsigned int n_packets;
-
-
- /* below are several pointers (kernel virtual addresses, not
- DMA bus addresses) to parts of the DMA program. These are
- set each time the DMA program is written in
- frame_prepare(). They are used later on, e.g. from the
- interrupt handler, to check the status of the frame */
-
- /* points to status/timestamp field of first DMA packet */
- /* (we'll check it later to monitor timestamp accuracy) */
- __le32 *frame_begin_timestamp;
-
- /* the timestamp we assigned to the first packet in the frame */
- u32 assigned_timestamp;
-
- /* pointer to the first packet's CIP header (where the timestamp goes) */
- struct CIP_header *cip_syt1;
-
- /* pointer to the second packet's CIP header
- (only set if the first packet was empty) */
- struct CIP_header *cip_syt2;
-
- /* in order to figure out what caused an interrupt,
- store pointers to the status fields of the two packets
- that can cause interrupts. We'll check these from the
- interrupt handler.
- */
- __le32 *mid_frame_timestamp;
- __le32 *frame_end_timestamp;
-
- /* branch address field of final packet. This is effectively
- the "tail" in the chain of DMA descriptor blocks.
- We will fill it with the address of the first DMA descriptor
- block in the subsequent frame, once it is ready.
- */
- __le32 *frame_end_branch;
-
- /* the number of descriptors in the first descriptor block
- of the frame. Needed to start DMA */
- int first_n_descriptors;
-};
-
-
-struct packet {
- __le16 timestamp;
- u16 invalid;
- u16 iso_header;
- __le16 data_length;
- u32 cip_h1;
- u32 cip_h2;
- unsigned char data[480];
- unsigned char padding[16]; /* force struct size =512 for page alignment */
-};
-
-
-/* allocate/free a frame */
-static struct frame* frame_new(unsigned int frame_num, struct video_card *video);
-static void frame_delete(struct frame *f);
-
-/* reset f so that it can be used again */
-static void frame_reset(struct frame *f);
-
-/* struct video_card contains all data associated with one instance
- of the dv1394 driver
-*/
-enum modes {
- MODE_RECEIVE,
- MODE_TRANSMIT
-};
-
-struct video_card {
-
- /* ohci card to which this instance corresponds */
- struct ti_ohci *ohci;
-
- /* OHCI card id; the link between the VFS inode and a specific video_card
- (essentially the device minor number) */
- int id;
-
- /* entry in dv1394_cards */
- struct list_head list;
-
- /* OHCI card IT DMA context number, -1 if not in use */
- int ohci_it_ctx;
- struct ohci1394_iso_tasklet it_tasklet;
-
- /* register offsets for current IT DMA context, 0 if not in use */
- u32 ohci_IsoXmitContextControlSet;
- u32 ohci_IsoXmitContextControlClear;
- u32 ohci_IsoXmitCommandPtr;
-
- /* OHCI card IR DMA context number, -1 if not in use */
- struct ohci1394_iso_tasklet ir_tasklet;
- int ohci_ir_ctx;
-
- /* register offsets for current IR DMA context, 0 if not in use */
- u32 ohci_IsoRcvContextControlSet;
- u32 ohci_IsoRcvContextControlClear;
- u32 ohci_IsoRcvCommandPtr;
- u32 ohci_IsoRcvContextMatch;
-
-
- /* CONCURRENCY CONTROL */
-
- /* there are THREE levels of locking associated with video_card. */
-
- /*
- 1) the 'open' flag - this prevents more than one process from
- opening the device. (the driver currently assumes only one opener).
- This is a regular int, but use test_and_set_bit() (on bit zero)
- for atomicity.
- */
- unsigned long open;
-
- /*
- 2) the spinlock - this provides mutual exclusion between the interrupt
- handler and process-context operations. Generally you must take the
- spinlock under the following conditions:
- 1) DMA (and hence the interrupt handler) may be running
- AND
- 2) you need to operate on the video_card, especially active_frame
-
- It is OK to play with video_card without taking the spinlock if
- you are certain that DMA is not running. Even if DMA is running,
- it is OK to *read* active_frame with the lock, then drop it
- immediately. This is safe because the interrupt handler will never
- advance active_frame onto a frame that is not READY (and the spinlock
- must be held while marking a frame READY).
-
- spinlock is also used to protect ohci_it_ctx and ohci_ir_ctx,
- which can be accessed from both process and interrupt context
- */
- spinlock_t spinlock;
-
- /* flag to prevent spurious interrupts (which OHCI seems to
- generate a lot :) from accessing the struct */
- int dma_running;
-
- /*
- 3) the sleeping mutex 'mtx' - this is used from process context only,
- to serialize various operations on the video_card. Even though only one
- open() is allowed, we still need to prevent multiple threads of execution
- from entering calls like read, write, ioctl, etc.
-
- I honestly can't think of a good reason to use dv1394 from several threads
- at once, but we need to serialize anyway to prevent oopses =).
-
- NOTE: if you need both spinlock and mtx, take mtx first to avoid deadlock!
- */
- struct mutex mtx;
-
- /* people waiting for buffer space, please form a line here... */
- wait_queue_head_t waitq;
-
- /* support asynchronous I/O signals (SIGIO) */
- struct fasync_struct *fasync;
-
- /* the large, non-contiguous (rvmalloc()) ringbuffer for DV
- data, exposed to user-space via mmap() */
- unsigned long dv_buf_size;
- struct dma_region dv_buf;
-
- /* next byte in the ringbuffer that a write() call will fill */
- size_t write_off;
-
- struct frame *frames[DV1394_MAX_FRAMES];
-
- /* n_frames also serves as an indicator that this struct video_card is
- initialized and ready to run DMA buffers */
-
- int n_frames;
-
- /* this is the frame that is currently "owned" by the OHCI DMA controller
- (set to -1 iff DMA is not running)
-
- ! must lock against the interrupt handler when accessing it !
-
- RULES:
-
- Only the interrupt handler may change active_frame if DMA
- is running; if not, process may change it
-
- If the next frame is READY, the interrupt handler will advance
- active_frame when the current frame is finished.
-
- If the next frame is CLEAR, the interrupt handler will re-transmit
- the current frame, and the dropped_frames counter will be incremented.
-
- The interrupt handler will NEVER advance active_frame to a
- frame that is not READY.
- */
- int active_frame;
- int first_run;
-
- /* the same locking rules apply to these three fields also: */
-
- /* altered ONLY from process context. Must check first_clear_frame->state;
- if it's READY, that means the ringbuffer is full with READY frames;
- if it's CLEAR, that means one or more ringbuffer frames are CLEAR */
- unsigned int first_clear_frame;
-
- /* altered both by process and interrupt */
- unsigned int n_clear_frames;
-
- /* only altered by the interrupt */
- unsigned int dropped_frames;
-
-
-
- /* the CIP accumulator and continuity counter are properties
- of the DMA stream as a whole (not a single frame), so they
- are stored here in the video_card */
-
- unsigned long cip_accum;
- unsigned long cip_n, cip_d;
- unsigned int syt_offset;
- unsigned int continuity_counter;
-
- enum pal_or_ntsc pal_or_ntsc;
-
- /* redundant, but simplifies the code somewhat */
- unsigned int frame_size; /* in bytes */
-
- /* the isochronous channel to use, -1 if video card is inactive */
- int channel;
-
-
- /* physically contiguous packet ringbuffer for receive */
- struct dma_region packet_buf;
- unsigned long packet_buf_size;
-
- unsigned int current_packet;
- int first_frame; /* received first start frame marker? */
- enum modes mode;
-};
-
-/*
- if the video_card is not initialized, then the ONLY fields that are valid are:
- ohci
- open
- n_frames
-*/
-
-static inline int video_card_initialized(struct video_card *v)
-{
- return v->n_frames > 0;
-}
-
-static int do_dv1394_init(struct video_card *video, struct dv1394_init *init);
-static int do_dv1394_init_default(struct video_card *video);
-static void do_dv1394_shutdown(struct video_card *video, int free_user_buf);
-
-
-/* NTSC empty packet rate accurate to within 0.01%,
- calibrated against a Sony DSR-40 DVCAM deck */
-
-#define CIP_N_NTSC 68000000
-#define CIP_D_NTSC 1068000000
-
-#define CIP_N_PAL 1
-#define CIP_D_PAL 16
-
-#endif /* _DV_1394_PRIVATE_H */
-
diff --git a/drivers/ieee1394/dv1394.c b/drivers/ieee1394/dv1394.c
deleted file mode 100644
index c5a031b79d03..000000000000
--- a/drivers/ieee1394/dv1394.c
+++ /dev/null
@@ -1,2584 +0,0 @@
-/*
- * dv1394.c - DV input/output over IEEE 1394 on OHCI chips
- * Copyright (C)2001 Daniel Maas <dmaas@dcine.com>
- * receive by Dan Dennedy <dan@dennedy.org>
- *
- * based on:
- * video1394.c - video driver for OHCI 1394 boards
- * Copyright (C)1999,2000 Sebastien Rougeaux <sebastien.rougeaux@anu.edu.au>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software Foundation,
- * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
- */
-
-/*
- OVERVIEW
-
- I designed dv1394 as a "pipe" that you can use to shoot DV onto a
- FireWire bus. In transmission mode, dv1394 does the following:
-
- 1. accepts contiguous frames of DV data from user-space, via write()
- or mmap() (see dv1394.h for the complete API)
- 2. wraps IEC 61883 packets around the DV data, inserting
- empty synchronization packets as necessary
- 3. assigns accurate SYT timestamps to the outgoing packets
- 4. shoots them out using the OHCI card's IT DMA engine
-
- Thanks to Dan Dennedy, we now have a receive mode that does the following:
-
- 1. accepts raw IEC 61883 packets from the OHCI card
- 2. re-assembles the DV data payloads into contiguous frames,
- discarding empty packets
- 3. sends the DV data to user-space via read() or mmap()
-*/
-
-/*
- TODO:
-
- - tunable frame-drop behavior: either loop last frame, or halt transmission
-
- - use a scatter/gather buffer for DMA programs (f->descriptor_pool)
- so that we don't rely on allocating 64KB of contiguous kernel memory
- via pci_alloc_consistent()
-
- DONE:
- - during reception, better handling of dropped frames and continuity errors
- - during reception, prevent DMA from bypassing the irq tasklets
- - reduce irq rate during reception (1/250 packets).
- - add many more internal buffers during reception with scatter/gather dma.
- - add dbc (continuity) checking on receive, increment status.dropped_frames
- if not continuous.
- - restart IT DMA after a bus reset
- - safely obtain and release ISO Tx channels in cooperation with OHCI driver
- - map received DIF blocks to their proper location in DV frame (ensure
- recovery if dropped packet)
- - handle bus resets gracefully (OHCI card seems to take care of this itself(!))
- - do not allow resizing the user_buf once allocated; eliminate nuke_buffer_mappings
- - eliminated #ifdef DV1394_DEBUG_LEVEL by inventing macros debug_printk and irq_printk
- - added wmb() and mb() to places where PCI read/write ordering needs to be enforced
- - set video->id correctly
- - store video_cards in an array indexed by OHCI card ID, rather than a list
- - implement DMA context allocation to cooperate with other users of the OHCI
- - fix all XXX showstoppers
- - disable IR/IT DMA interrupts on shutdown
- - flush pci writes to the card by issuing a read
- - character device dispatching
- - switch over to the new kernel DMA API (pci_map_*()) (* needs testing on platforms with IOMMU!)
- - keep all video_cards in a list (for open() via chardev), set file->private_data = video
- - dv1394_poll should indicate POLLIN when receiving buffers are available
- - add proc fs interface to set cip_n, cip_d, syt_offset, and video signal
- - expose xmit and recv as separate devices (not exclusive)
- - expose NTSC and PAL as separate devices (can be overridden)
-
-*/
-
-#include <linux/kernel.h>
-#include <linux/list.h>
-#include <linux/slab.h>
-#include <linux/interrupt.h>
-#include <linux/wait.h>
-#include <linux/errno.h>
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/pci.h>
-#include <linux/fs.h>
-#include <linux/poll.h>
-#include <linux/mutex.h>
-#include <linux/bitops.h>
-#include <asm/byteorder.h>
-#include <asm/atomic.h>
-#include <asm/io.h>
-#include <asm/uaccess.h>
-#include <linux/delay.h>
-#include <asm/pgtable.h>
-#include <asm/page.h>
-#include <linux/sched.h>
-#include <linux/types.h>
-#include <linux/vmalloc.h>
-#include <linux/string.h>
-#include <linux/compat.h>
-#include <linux/cdev.h>
-
-#include "dv1394.h"
-#include "dv1394-private.h"
-#include "highlevel.h"
-#include "hosts.h"
-#include "ieee1394.h"
-#include "ieee1394_core.h"
-#include "ieee1394_hotplug.h"
-#include "ieee1394_types.h"
-#include "nodemgr.h"
-#include "ohci1394.h"
-
-/* DEBUG LEVELS:
- 0 - no debugging messages
- 1 - some debugging messages, but none during DMA frame transmission
- 2 - lots of messages, including during DMA frame transmission
- (will cause underflows if your machine is too slow!)
-*/
-
-#define DV1394_DEBUG_LEVEL 0
-
-/* for debugging use ONLY: allow more than one open() of the device */
-/* #define DV1394_ALLOW_MORE_THAN_ONE_OPEN 1 */
-
-#if DV1394_DEBUG_LEVEL >= 2
-#define irq_printk( args... ) printk( args )
-#else
-#define irq_printk( args... ) do {} while (0)
-#endif
-
-#if DV1394_DEBUG_LEVEL >= 1
-#define debug_printk( args... ) printk( args)
-#else
-#define debug_printk( args... ) do {} while (0)
-#endif
-
-/* issue a dummy PCI read to force the preceding write
- to be posted to the PCI bus immediately */
-
-static inline void flush_pci_write(struct ti_ohci *ohci)
-{
- mb();
- reg_read(ohci, OHCI1394_IsochronousCycleTimer);
-}
-
-static void it_tasklet_func(unsigned long data);
-static void ir_tasklet_func(unsigned long data);
-
-#ifdef CONFIG_COMPAT
-static long dv1394_compat_ioctl(struct file *file, unsigned int cmd,
- unsigned long arg);
-#endif
-
-/* GLOBAL DATA */
-
-/* list of all video_cards */
-static LIST_HEAD(dv1394_cards);
-static DEFINE_SPINLOCK(dv1394_cards_lock);
-
-/* translate from a struct file* to the corresponding struct video_card* */
-
-static inline struct video_card* file_to_video_card(struct file *file)
-{
- return file->private_data;
-}
-
-/*** FRAME METHODS *********************************************************/
-
-static void frame_reset(struct frame *f)
-{
- f->state = FRAME_CLEAR;
- f->done = 0;
- f->n_packets = 0;
- f->frame_begin_timestamp = NULL;
- f->assigned_timestamp = 0;
- f->cip_syt1 = NULL;
- f->cip_syt2 = NULL;
- f->mid_frame_timestamp = NULL;
- f->frame_end_timestamp = NULL;
- f->frame_end_branch = NULL;
-}
-
-static struct frame* frame_new(unsigned int frame_num, struct video_card *video)
-{
- struct frame *f = kmalloc(sizeof(*f), GFP_KERNEL);
- if (!f)
- return NULL;
-
- f->video = video;
- f->frame_num = frame_num;
-
- f->header_pool = pci_alloc_consistent(f->video->ohci->dev, PAGE_SIZE, &f->header_pool_dma);
- if (!f->header_pool) {
- printk(KERN_ERR "dv1394: failed to allocate CIP header pool\n");
- kfree(f);
- return NULL;
- }
-
- debug_printk("dv1394: frame_new: allocated CIP header pool at virt 0x%08lx (contig) dma 0x%08lx size %ld\n",
- (unsigned long) f->header_pool, (unsigned long) f->header_pool_dma, PAGE_SIZE);
-
- f->descriptor_pool_size = MAX_PACKETS * sizeof(struct DMA_descriptor_block);
- /* make it an even # of pages */
- f->descriptor_pool_size += PAGE_SIZE - (f->descriptor_pool_size%PAGE_SIZE);
-
- f->descriptor_pool = pci_alloc_consistent(f->video->ohci->dev,
- f->descriptor_pool_size,
- &f->descriptor_pool_dma);
- if (!f->descriptor_pool) {
- pci_free_consistent(f->video->ohci->dev, PAGE_SIZE, f->header_pool, f->header_pool_dma);
- kfree(f);
- return NULL;
- }
-
- debug_printk("dv1394: frame_new: allocated DMA program memory at virt 0x%08lx (contig) dma 0x%08lx size %ld\n",
- (unsigned long) f->descriptor_pool, (unsigned long) f->descriptor_pool_dma, f->descriptor_pool_size);
-
- f->data = 0;
- frame_reset(f);
-
- return f;
-}
-
-static void frame_delete(struct frame *f)
-{
- pci_free_consistent(f->video->ohci->dev, PAGE_SIZE, f->header_pool, f->header_pool_dma);
- pci_free_consistent(f->video->ohci->dev, f->descriptor_pool_size, f->descriptor_pool, f->descriptor_pool_dma);
- kfree(f);
-}
-
-
-
-
-/*
- frame_prepare() - build the DMA program for transmitting
-
- Frame_prepare() must be called OUTSIDE the video->spinlock.
- However, frame_prepare() must still be serialized, so
- it should be called WITH the video->mtx taken.
- */
-
-static void frame_prepare(struct video_card *video, unsigned int this_frame)
-{
- struct frame *f = video->frames[this_frame];
- int last_frame;
-
- struct DMA_descriptor_block *block;
- dma_addr_t block_dma;
- struct CIP_header *cip;
- dma_addr_t cip_dma;
-
- unsigned int n_descriptors, full_packets, packets_per_frame, payload_size;
-
- /* these flags denote packets that need special attention */
- int empty_packet, first_packet, last_packet, mid_packet;
-
- __le32 *branch_address, *last_branch_address = NULL;
- unsigned long data_p;
- int first_packet_empty = 0;
- u32 cycleTimer, ct_sec, ct_cyc, ct_off;
- unsigned long irq_flags;
-
- irq_printk("frame_prepare( %d ) ---------------------\n", this_frame);
-
- full_packets = 0;
-
-
-
- if (video->pal_or_ntsc == DV1394_PAL)
- packets_per_frame = DV1394_PAL_PACKETS_PER_FRAME;
- else
- packets_per_frame = DV1394_NTSC_PACKETS_PER_FRAME;
-
- while ( full_packets < packets_per_frame ) {
- empty_packet = first_packet = last_packet = mid_packet = 0;
-
- data_p = f->data + full_packets * 480;
-
- /************************************************/
- /* allocate a descriptor block and a CIP header */
- /************************************************/
-
- /* note: these should NOT cross a page boundary (DMA restriction) */
-
- if (f->n_packets >= MAX_PACKETS) {
- printk(KERN_ERR "dv1394: FATAL ERROR: max packet count exceeded\n");
- return;
- }
-
- /* the block surely won't cross a page boundary,
- since an even number of descriptor_blocks fit on a page */
- block = &(f->descriptor_pool[f->n_packets]);
-
- /* DMA address of the block = offset of block relative
- to the kernel base address of the descriptor pool
- + DMA base address of the descriptor pool */
- block_dma = ((unsigned long) block - (unsigned long) f->descriptor_pool) + f->descriptor_pool_dma;
-
-
- /* the whole CIP pool fits on one page, so no worries about boundaries */
- if ( ((unsigned long) &(f->header_pool[f->n_packets]) - (unsigned long) f->header_pool)
- > PAGE_SIZE) {
- printk(KERN_ERR "dv1394: FATAL ERROR: no room to allocate CIP header\n");
- return;
- }
-
- cip = &(f->header_pool[f->n_packets]);
-
- /* DMA address of the CIP header = offset of cip
- relative to kernel base address of the header pool
- + DMA base address of the header pool */
- cip_dma = (unsigned long) cip % PAGE_SIZE + f->header_pool_dma;
-
- /* is this an empty packet? */
-
- if (video->cip_accum > (video->cip_d - video->cip_n)) {
- empty_packet = 1;
- payload_size = 8;
- video->cip_accum -= (video->cip_d - video->cip_n);
- } else {
- payload_size = 488;
- video->cip_accum += video->cip_n;
- }
-
- /* there are three important packets each frame:
-
- the first packet in the frame - we ask the card to record the timestamp when
- this packet is actually sent, so we can monitor
- how accurate our timestamps are. Also, the first
- packet serves as a semaphore to let us know that
- it's OK to free the *previous* frame's DMA buffer
-
- the last packet in the frame - this packet is used to detect buffer underflows.
- if this is the last ready frame, the last DMA block
- will have a branch back to the beginning of the frame
- (so that the card will re-send the frame on underflow).
- if this branch gets taken, we know that at least one
- frame has been dropped. When the next frame is ready,
- the branch is pointed to its first packet, and the
- semaphore is disabled.
-
- a "mid" packet slightly before the end of the frame - this packet should trigger
- an interrupt so we can go and assign a timestamp to the first packet
- in the next frame. We don't use the very last packet in the frame
- for this purpose, because that would leave very little time to set
- the timestamp before DMA starts on the next frame.
- */
-
- if (f->n_packets == 0) {
- first_packet = 1;
- } else if ( full_packets == (packets_per_frame-1) ) {
- last_packet = 1;
- } else if (f->n_packets == packets_per_frame) {
- mid_packet = 1;
- }
-
-
- /********************/
- /* setup CIP header */
- /********************/
-
- /* the timestamp will be written later from the
- mid-frame interrupt handler. For now we just
- store the address of the CIP header(s) that
- need a timestamp. */
-
- /* first packet in the frame needs a timestamp */
- if (first_packet) {
- f->cip_syt1 = cip;
- if (empty_packet)
- first_packet_empty = 1;
-
- } else if (first_packet_empty && (f->n_packets == 1) ) {
- /* if the first packet was empty, the second
- packet's CIP header also needs a timestamp */
- f->cip_syt2 = cip;
- }
-
- fill_cip_header(cip,
- /* the node ID number of the OHCI card */
- reg_read(video->ohci, OHCI1394_NodeID) & 0x3F,
- video->continuity_counter,
- video->pal_or_ntsc,
- 0xFFFF /* the timestamp is filled in later */);
-
- /* advance counter, only for full packets */
- if ( ! empty_packet )
- video->continuity_counter++;
-
- /******************************/
- /* setup DMA descriptor block */
- /******************************/
-
- /* first descriptor - OUTPUT_MORE_IMMEDIATE, for the controller's IT header */
- fill_output_more_immediate( &(block->u.out.omi), 1, video->channel, 0, payload_size);
-
- if (empty_packet) {
- /* second descriptor - OUTPUT_LAST for CIP header */
- fill_output_last( &(block->u.out.u.empty.ol),
-
- /* want completion status on all interesting packets */
- (first_packet || mid_packet || last_packet) ? 1 : 0,
-
- /* want interrupts on all interesting packets */
- (first_packet || mid_packet || last_packet) ? 1 : 0,
-
- sizeof(struct CIP_header), /* data size */
- cip_dma);
-
- if (first_packet)
- f->frame_begin_timestamp = &(block->u.out.u.empty.ol.q[3]);
- else if (mid_packet)
- f->mid_frame_timestamp = &(block->u.out.u.empty.ol.q[3]);
- else if (last_packet) {
- f->frame_end_timestamp = &(block->u.out.u.empty.ol.q[3]);
- f->frame_end_branch = &(block->u.out.u.empty.ol.q[2]);
- }
-
- branch_address = &(block->u.out.u.empty.ol.q[2]);
- n_descriptors = 3;
- if (first_packet)
- f->first_n_descriptors = n_descriptors;
-
- } else { /* full packet */
-
- /* second descriptor - OUTPUT_MORE for CIP header */
- fill_output_more( &(block->u.out.u.full.om),
- sizeof(struct CIP_header), /* data size */
- cip_dma);
-
-
- /* third (and possibly fourth) descriptor - for DV data */
- /* the 480-byte payload can cross a page boundary; if so,
- we need to split it into two DMA descriptors */
-
- /* does the 480-byte data payload cross a page boundary? */
- if ( (PAGE_SIZE- ((unsigned long)data_p % PAGE_SIZE) ) < 480 ) {
-
- /* page boundary crossed */
-
- fill_output_more( &(block->u.out.u.full.u.cross.om),
- /* data size - how much of data_p fits on the first page */
- PAGE_SIZE - (data_p % PAGE_SIZE),
-
- /* DMA address of data_p */
- dma_region_offset_to_bus(&video->dv_buf,
- data_p - (unsigned long) video->dv_buf.kvirt));
-
- fill_output_last( &(block->u.out.u.full.u.cross.ol),
-
- /* want completion status on all interesting packets */
- (first_packet || mid_packet || last_packet) ? 1 : 0,
-
- /* want interrupt on all interesting packets */
- (first_packet || mid_packet || last_packet) ? 1 : 0,
-
- /* data size - remaining portion of data_p */
- 480 - (PAGE_SIZE - (data_p % PAGE_SIZE)),
-
- /* DMA address of data_p + PAGE_SIZE - (data_p % PAGE_SIZE) */
- dma_region_offset_to_bus(&video->dv_buf,
- data_p + PAGE_SIZE - (data_p % PAGE_SIZE) - (unsigned long) video->dv_buf.kvirt));
-
- if (first_packet)
- f->frame_begin_timestamp = &(block->u.out.u.full.u.cross.ol.q[3]);
- else if (mid_packet)
- f->mid_frame_timestamp = &(block->u.out.u.full.u.cross.ol.q[3]);
- else if (last_packet) {
- f->frame_end_timestamp = &(block->u.out.u.full.u.cross.ol.q[3]);
- f->frame_end_branch = &(block->u.out.u.full.u.cross.ol.q[2]);
- }
-
- branch_address = &(block->u.out.u.full.u.cross.ol.q[2]);
-
- n_descriptors = 5;
- if (first_packet)
- f->first_n_descriptors = n_descriptors;
-
- full_packets++;
-
- } else {
- /* fits on one page */
-
- fill_output_last( &(block->u.out.u.full.u.nocross.ol),
-
- /* want completion status on all interesting packets */
- (first_packet || mid_packet || last_packet) ? 1 : 0,
-
- /* want interrupt on all interesting packets */
- (first_packet || mid_packet || last_packet) ? 1 : 0,
-
- 480, /* data size (480 bytes of DV data) */
-
-
- /* DMA address of data_p */
- dma_region_offset_to_bus(&video->dv_buf,
- data_p - (unsigned long) video->dv_buf.kvirt));
-
- if (first_packet)
- f->frame_begin_timestamp = &(block->u.out.u.full.u.nocross.ol.q[3]);
- else if (mid_packet)
- f->mid_frame_timestamp = &(block->u.out.u.full.u.nocross.ol.q[3]);
- else if (last_packet) {
- f->frame_end_timestamp = &(block->u.out.u.full.u.nocross.ol.q[3]);
- f->frame_end_branch = &(block->u.out.u.full.u.nocross.ol.q[2]);
- }
-
- branch_address = &(block->u.out.u.full.u.nocross.ol.q[2]);
-
- n_descriptors = 4;
- if (first_packet)
- f->first_n_descriptors = n_descriptors;
-
- full_packets++;
- }
- }
-
- /* link this descriptor block into the DMA program by filling in
- the branch address of the previous block */
-
- /* note: we are not linked into the active DMA chain yet */
-
- if (last_branch_address) {
- *(last_branch_address) = cpu_to_le32(block_dma | n_descriptors);
- }
-
- last_branch_address = branch_address;
-
-
- f->n_packets++;
-
- }
-
- /* when we first assemble a new frame, set the final branch
- to loop back up to the top */
- *(f->frame_end_branch) = cpu_to_le32(f->descriptor_pool_dma | f->first_n_descriptors);
-
- /* make the latest version of this frame visible to the PCI card */
- dma_region_sync_for_device(&video->dv_buf, f->data - (unsigned long) video->dv_buf.kvirt, video->frame_size);
-
- /* lock against DMA interrupt */
- spin_lock_irqsave(&video->spinlock, irq_flags);
-
- f->state = FRAME_READY;
-
- video->n_clear_frames--;
-
- last_frame = video->first_clear_frame - 1;
- if (last_frame == -1)
- last_frame = video->n_frames-1;
-
- video->first_clear_frame = (video->first_clear_frame + 1) % video->n_frames;
-
- irq_printk(" frame %d prepared, active_frame = %d, n_clear_frames = %d, first_clear_frame = %d\n last=%d\n",
- this_frame, video->active_frame, video->n_clear_frames, video->first_clear_frame, last_frame);
-
- irq_printk(" begin_ts %08lx mid_ts %08lx end_ts %08lx end_br %08lx\n",
- (unsigned long) f->frame_begin_timestamp,
- (unsigned long) f->mid_frame_timestamp,
- (unsigned long) f->frame_end_timestamp,
- (unsigned long) f->frame_end_branch);
-
- if (video->active_frame != -1) {
-
- /* if DMA is already active, we are almost done */
- /* just link us onto the active DMA chain */
- if (video->frames[last_frame]->frame_end_branch) {
- u32 temp;
-
- /* point the previous frame's tail to this frame's head */
- *(video->frames[last_frame]->frame_end_branch) = cpu_to_le32(f->descriptor_pool_dma | f->first_n_descriptors);
-
- /* this write MUST precede the next one, or we could silently drop frames */
- wmb();
-
- /* disable the want_status semaphore on the last packet */
- temp = le32_to_cpu(*(video->frames[last_frame]->frame_end_branch - 2));
- temp &= 0xF7CFFFFF;
- *(video->frames[last_frame]->frame_end_branch - 2) = cpu_to_le32(temp);
-
- /* flush these writes to memory ASAP */
- flush_pci_write(video->ohci);
-
- /* NOTE:
- ideally the writes should be "atomic": if
- the OHCI card reads the want_status flag in
- between them, we'll falsely report a
- dropped frame. Hopefully this window is too
- small to really matter, and the consequence
- is rather harmless. */
-
-
- irq_printk(" new frame %d linked onto DMA chain\n", this_frame);
-
- } else {
- printk(KERN_ERR "dv1394: last frame not ready???\n");
- }
-
- } else {
-
- u32 transmit_sec, transmit_cyc;
- u32 ts_cyc;
-
- /* DMA is stopped, so this is the very first frame */
- video->active_frame = this_frame;
-
- /* set CommandPtr to address and size of first descriptor block */
- reg_write(video->ohci, video->ohci_IsoXmitCommandPtr,
- video->frames[video->active_frame]->descriptor_pool_dma |
- f->first_n_descriptors);
-
- /* assign a timestamp based on the current cycle time...
- We'll tell the card to begin DMA 100 cycles from now,
- and assign a timestamp 103 cycles from now */
-
- cycleTimer = reg_read(video->ohci, OHCI1394_IsochronousCycleTimer);
-
- ct_sec = cycleTimer >> 25;
- ct_cyc = (cycleTimer >> 12) & 0x1FFF;
- ct_off = cycleTimer & 0xFFF;
-
- transmit_sec = ct_sec;
- transmit_cyc = ct_cyc + 100;
-
- transmit_sec += transmit_cyc/8000;
- transmit_cyc %= 8000;
-
- ts_cyc = transmit_cyc + 3;
- ts_cyc %= 8000;
-
- f->assigned_timestamp = (ts_cyc&0xF) << 12;
-
- /* now actually write the timestamp into the appropriate CIP headers */
- if (f->cip_syt1) {
- f->cip_syt1->b[6] = f->assigned_timestamp >> 8;
- f->cip_syt1->b[7] = f->assigned_timestamp & 0xFF;
- }
- if (f->cip_syt2) {
- f->cip_syt2->b[6] = f->assigned_timestamp >> 8;
- f->cip_syt2->b[7] = f->assigned_timestamp & 0xFF;
- }
-
- /* --- start DMA --- */
-
- /* clear all bits in ContextControl register */
-
- reg_write(video->ohci, video->ohci_IsoXmitContextControlClear, 0xFFFFFFFF);
- wmb();
-
- /* the OHCI card has the ability to start ISO transmission on a
- particular cycle (start-on-cycle). This way we can ensure that
- the first DV frame will have an accurate timestamp.
-
- However, start-on-cycle only appears to work if the OHCI card
- is cycle master! Since the consequences of messing up the first
- timestamp are minimal*, just disable start-on-cycle for now.
-
- * my DV deck drops the first few frames before it "locks in;"
- so the first frame having an incorrect timestamp is inconsequential.
- */
-
-#if 0
- reg_write(video->ohci, video->ohci_IsoXmitContextControlSet,
- (1 << 31) /* enable start-on-cycle */
- | ( (transmit_sec & 0x3) << 29)
- | (transmit_cyc << 16));
- wmb();
-#endif
-
- video->dma_running = 1;
-
- /* set the 'run' bit */
- reg_write(video->ohci, video->ohci_IsoXmitContextControlSet, 0x8000);
- flush_pci_write(video->ohci);
-
- /* --- DMA should be running now --- */
-
- debug_printk(" Cycle = %4u ContextControl = %08x CmdPtr = %08x\n",
- (reg_read(video->ohci, OHCI1394_IsochronousCycleTimer) >> 12) & 0x1FFF,
- reg_read(video->ohci, video->ohci_IsoXmitContextControlSet),
- reg_read(video->ohci, video->ohci_IsoXmitCommandPtr));
-
- debug_printk(" DMA start - current cycle %4u, transmit cycle %4u (%2u), assigning ts cycle %2u\n",
- ct_cyc, transmit_cyc, transmit_cyc & 0xF, ts_cyc & 0xF);
-
-#if DV1394_DEBUG_LEVEL >= 2
- {
- /* check if DMA is really running */
- int i = 0;
- while (i < 20) {
- mb();
- mdelay(1);
- if (reg_read(video->ohci, video->ohci_IsoXmitContextControlSet) & (1 << 10)) {
- printk("DMA ACTIVE after %d msec\n", i);
- break;
- }
- i++;
- }
-
- printk("set = %08x, cmdPtr = %08x\n",
- reg_read(video->ohci, video->ohci_IsoXmitContextControlSet),
- reg_read(video->ohci, video->ohci_IsoXmitCommandPtr)
- );
-
- if ( ! (reg_read(video->ohci, video->ohci_IsoXmitContextControlSet) & (1 << 10)) ) {
- printk("DMA did NOT go active after 20ms, event = %x\n",
- reg_read(video->ohci, video->ohci_IsoXmitContextControlSet) & 0x1F);
- } else
- printk("DMA is RUNNING!\n");
- }
-#endif
-
- }
-
-
- spin_unlock_irqrestore(&video->spinlock, irq_flags);
-}
-
-
-
-/*** RECEIVE FUNCTIONS *****************************************************/
-
-/*
- frame method put_packet
-
- map and copy the packet data to its location in the frame
- based upon DIF section and sequence
-*/
-
-static void inline
-frame_put_packet (struct frame *f, struct packet *p)
-{
- int section_type = p->data[0] >> 5; /* section type is in bits 5 - 7 */
- int dif_sequence = p->data[1] >> 4; /* dif sequence number is in bits 4 - 7 */
- int dif_block = p->data[2];
-
- /* sanity check */
- if (dif_sequence > 11 || dif_block > 149) return;
-
- switch (section_type) {
- case 0: /* 1 Header block */
- memcpy( (void *) f->data + dif_sequence * 150 * 80, p->data, 480);
- break;
-
- case 1: /* 2 Subcode blocks */
- memcpy( (void *) f->data + dif_sequence * 150 * 80 + (1 + dif_block) * 80, p->data, 480);
- break;
-
- case 2: /* 3 VAUX blocks */
- memcpy( (void *) f->data + dif_sequence * 150 * 80 + (3 + dif_block) * 80, p->data, 480);
- break;
-
- case 3: /* 9 Audio blocks interleaved with video */
- memcpy( (void *) f->data + dif_sequence * 150 * 80 + (6 + dif_block * 16) * 80, p->data, 480);
- break;
-
- case 4: /* 135 Video blocks interleaved with audio */
- memcpy( (void *) f->data + dif_sequence * 150 * 80 + (7 + (dif_block / 15) + dif_block) * 80, p->data, 480);
- break;
-
- default: /* we can not handle any other data */
- break;
- }
-}
-
-
-static void start_dma_receive(struct video_card *video)
-{
- if (video->first_run == 1) {
- video->first_run = 0;
-
- /* start DMA once all of the frames are READY */
- video->n_clear_frames = 0;
- video->first_clear_frame = -1;
- video->current_packet = 0;
- video->active_frame = 0;
-
- /* reset iso recv control register */
- reg_write(video->ohci, video->ohci_IsoRcvContextControlClear, 0xFFFFFFFF);
- wmb();
-
- /* clear bufferFill, set isochHeader and speed (0=100) */
- reg_write(video->ohci, video->ohci_IsoRcvContextControlSet, 0x40000000);
-
- /* match on all tags, listen on channel */
- reg_write(video->ohci, video->ohci_IsoRcvContextMatch, 0xf0000000 | video->channel);
-
- /* address and first descriptor block + Z=1 */
- reg_write(video->ohci, video->ohci_IsoRcvCommandPtr,
- video->frames[0]->descriptor_pool_dma | 1); /* Z=1 */
- wmb();
-
- video->dma_running = 1;
-
- /* run */
- reg_write(video->ohci, video->ohci_IsoRcvContextControlSet, 0x8000);
- flush_pci_write(video->ohci);
-
- debug_printk("dv1394: DMA started\n");
-
-#if DV1394_DEBUG_LEVEL >= 2
- {
- int i;
-
- for (i = 0; i < 1000; ++i) {
- mdelay(1);
- if (reg_read(video->ohci, video->ohci_IsoRcvContextControlSet) & (1 << 10)) {
- printk("DMA ACTIVE after %d msec\n", i);
- break;
- }
- }
- if ( reg_read(video->ohci, video->ohci_IsoRcvContextControlSet) & (1 << 11) ) {
- printk("DEAD, event = %x\n",
- reg_read(video->ohci, video->ohci_IsoRcvContextControlSet) & 0x1F);
- } else
- printk("RUNNING!\n");
- }
-#endif
- } else if ( reg_read(video->ohci, video->ohci_IsoRcvContextControlSet) & (1 << 11) ) {
- debug_printk("DEAD, event = %x\n",
- reg_read(video->ohci, video->ohci_IsoRcvContextControlSet) & 0x1F);
-
- /* wake */
- reg_write(video->ohci, video->ohci_IsoRcvContextControlSet, (1 << 12));
- }
-}
-
-
-/*
- receive_packets() - build the DMA program for receiving
-*/
-
-static void receive_packets(struct video_card *video)
-{
- struct DMA_descriptor_block *block = NULL;
- dma_addr_t block_dma = 0;
- struct packet *data = NULL;
- dma_addr_t data_dma = 0;
- __le32 *last_branch_address = NULL;
- unsigned long irq_flags;
- int want_interrupt = 0;
- struct frame *f = NULL;
- int i, j;
-
- spin_lock_irqsave(&video->spinlock, irq_flags);
-
- for (j = 0; j < video->n_frames; j++) {
-
- /* connect frames */
- if (j > 0 && f != NULL && f->frame_end_branch != NULL)
- *(f->frame_end_branch) = cpu_to_le32(video->frames[j]->descriptor_pool_dma | 1); /* set Z=1 */
-
- f = video->frames[j];
-
- for (i = 0; i < MAX_PACKETS; i++) {
- /* locate a descriptor block and packet from the buffer */
- block = &(f->descriptor_pool[i]);
- block_dma = ((unsigned long) block - (unsigned long) f->descriptor_pool) + f->descriptor_pool_dma;
-
- data = ((struct packet*)video->packet_buf.kvirt) + f->frame_num * MAX_PACKETS + i;
- data_dma = dma_region_offset_to_bus( &video->packet_buf,
- ((unsigned long) data - (unsigned long) video->packet_buf.kvirt) );
-
- /* setup DMA descriptor block */
- want_interrupt = ((i % (MAX_PACKETS/2)) == 0 || i == (MAX_PACKETS-1));
- fill_input_last( &(block->u.in.il), want_interrupt, 512, data_dma);
-
- /* link descriptors */
- last_branch_address = f->frame_end_branch;
-
- if (last_branch_address != NULL)
- *(last_branch_address) = cpu_to_le32(block_dma | 1); /* set Z=1 */
-
- f->frame_end_branch = &(block->u.in.il.q[2]);
- }
-
- } /* next j */
-
- spin_unlock_irqrestore(&video->spinlock, irq_flags);
-
-}
-
-
-
-/*** MANAGEMENT FUNCTIONS **************************************************/
-
-static int do_dv1394_init(struct video_card *video, struct dv1394_init *init)
-{
- unsigned long flags, new_buf_size;
- int i;
- u64 chan_mask;
- int retval = -EINVAL;
-
- debug_printk("dv1394: initialising %d\n", video->id);
- if (init->api_version != DV1394_API_VERSION)
- return -EINVAL;
-
- /* first sanitize all the parameters */
- if ( (init->n_frames < 2) || (init->n_frames > DV1394_MAX_FRAMES) )
- return -EINVAL;
-
- if ( (init->format != DV1394_NTSC) && (init->format != DV1394_PAL) )
- return -EINVAL;
-
- if ( (init->syt_offset == 0) || (init->syt_offset > 50) )
- /* default SYT offset is 3 cycles */
- init->syt_offset = 3;
-
- if (init->channel > 63)
- init->channel = 63;
-
- chan_mask = (u64)1 << init->channel;
-
- /* calculate what size DMA buffer is needed */
- if (init->format == DV1394_NTSC)
- new_buf_size = DV1394_NTSC_FRAME_SIZE * init->n_frames;
- else
- new_buf_size = DV1394_PAL_FRAME_SIZE * init->n_frames;
-
- /* round up to PAGE_SIZE */
- if (new_buf_size % PAGE_SIZE) new_buf_size += PAGE_SIZE - (new_buf_size % PAGE_SIZE);
-
- /* don't allow the user to allocate the DMA buffer more than once */
- if (video->dv_buf.kvirt && video->dv_buf_size != new_buf_size) {
- printk("dv1394: re-sizing the DMA buffer is not allowed\n");
- return -EINVAL;
- }
-
- /* shutdown the card if it's currently active */
- /* (the card should not be reset if the parameters are screwy) */
-
- do_dv1394_shutdown(video, 0);
-
- /* try to claim the ISO channel */
- spin_lock_irqsave(&video->ohci->IR_channel_lock, flags);
- if (video->ohci->ISO_channel_usage & chan_mask) {
- spin_unlock_irqrestore(&video->ohci->IR_channel_lock, flags);
- retval = -EBUSY;
- goto err;
- }
- video->ohci->ISO_channel_usage |= chan_mask;
- spin_unlock_irqrestore(&video->ohci->IR_channel_lock, flags);
-
- video->channel = init->channel;
-
- /* initialize misc. fields of video */
- video->n_frames = init->n_frames;
- video->pal_or_ntsc = init->format;
-
- video->cip_accum = 0;
- video->continuity_counter = 0;
-
- video->active_frame = -1;
- video->first_clear_frame = 0;
- video->n_clear_frames = video->n_frames;
- video->dropped_frames = 0;
-
- video->write_off = 0;
-
- video->first_run = 1;
- video->current_packet = -1;
- video->first_frame = 0;
-
- if (video->pal_or_ntsc == DV1394_NTSC) {
- video->cip_n = init->cip_n != 0 ? init->cip_n : CIP_N_NTSC;
- video->cip_d = init->cip_d != 0 ? init->cip_d : CIP_D_NTSC;
- video->frame_size = DV1394_NTSC_FRAME_SIZE;
- } else {
- video->cip_n = init->cip_n != 0 ? init->cip_n : CIP_N_PAL;
- video->cip_d = init->cip_d != 0 ? init->cip_d : CIP_D_PAL;
- video->frame_size = DV1394_PAL_FRAME_SIZE;
- }
-
- video->syt_offset = init->syt_offset;
-
- /* find and claim DMA contexts on the OHCI card */
-
- if (video->ohci_it_ctx == -1) {
- ohci1394_init_iso_tasklet(&video->it_tasklet, OHCI_ISO_TRANSMIT,
- it_tasklet_func, (unsigned long) video);
-
- if (ohci1394_register_iso_tasklet(video->ohci, &video->it_tasklet) < 0) {
- printk(KERN_ERR "dv1394: could not find an available IT DMA context\n");
- retval = -EBUSY;
- goto err;
- }
-
- video->ohci_it_ctx = video->it_tasklet.context;
- debug_printk("dv1394: claimed IT DMA context %d\n", video->ohci_it_ctx);
- }
-
- if (video->ohci_ir_ctx == -1) {
- ohci1394_init_iso_tasklet(&video->ir_tasklet, OHCI_ISO_RECEIVE,
- ir_tasklet_func, (unsigned long) video);
-
- if (ohci1394_register_iso_tasklet(video->ohci, &video->ir_tasklet) < 0) {
- printk(KERN_ERR "dv1394: could not find an available IR DMA context\n");
- retval = -EBUSY;
- goto err;
- }
- video->ohci_ir_ctx = video->ir_tasklet.context;
- debug_printk("dv1394: claimed IR DMA context %d\n", video->ohci_ir_ctx);
- }
-
- /* allocate struct frames */
- for (i = 0; i < init->n_frames; i++) {
- video->frames[i] = frame_new(i, video);
-
- if (!video->frames[i]) {
- printk(KERN_ERR "dv1394: Cannot allocate frame structs\n");
- retval = -ENOMEM;
- goto err;
- }
- }
-
- if (!video->dv_buf.kvirt) {
- /* allocate the ringbuffer */
- retval = dma_region_alloc(&video->dv_buf, new_buf_size, video->ohci->dev, PCI_DMA_TODEVICE);
- if (retval)
- goto err;
-
- video->dv_buf_size = new_buf_size;
-
- debug_printk("dv1394: Allocated %d frame buffers, total %u pages (%u DMA pages), %lu bytes\n",
- video->n_frames, video->dv_buf.n_pages,
- video->dv_buf.n_dma_pages, video->dv_buf_size);
- }
-
- /* set up the frame->data pointers */
- for (i = 0; i < video->n_frames; i++)
- video->frames[i]->data = (unsigned long) video->dv_buf.kvirt + i * video->frame_size;
-
- if (!video->packet_buf.kvirt) {
- /* allocate packet buffer */
- video->packet_buf_size = sizeof(struct packet) * video->n_frames * MAX_PACKETS;
- if (video->packet_buf_size % PAGE_SIZE)
- video->packet_buf_size += PAGE_SIZE - (video->packet_buf_size % PAGE_SIZE);
-
- retval = dma_region_alloc(&video->packet_buf, video->packet_buf_size,
- video->ohci->dev, PCI_DMA_FROMDEVICE);
- if (retval)
- goto err;
-
- debug_printk("dv1394: Allocated %d packets in buffer, total %u pages (%u DMA pages), %lu bytes\n",
- video->n_frames*MAX_PACKETS, video->packet_buf.n_pages,
- video->packet_buf.n_dma_pages, video->packet_buf_size);
- }
-
- /* set up register offsets for IT context */
- /* IT DMA context registers are spaced 16 bytes apart */
- video->ohci_IsoXmitContextControlSet = OHCI1394_IsoXmitContextControlSet+16*video->ohci_it_ctx;
- video->ohci_IsoXmitContextControlClear = OHCI1394_IsoXmitContextControlClear+16*video->ohci_it_ctx;
- video->ohci_IsoXmitCommandPtr = OHCI1394_IsoXmitCommandPtr+16*video->ohci_it_ctx;
-
- /* enable interrupts for IT context */
- reg_write(video->ohci, OHCI1394_IsoXmitIntMaskSet, (1 << video->ohci_it_ctx));
- debug_printk("dv1394: interrupts enabled for IT context %d\n", video->ohci_it_ctx);
-
- /* set up register offsets for IR context */
- /* IR DMA context registers are spaced 32 bytes apart */
- video->ohci_IsoRcvContextControlSet = OHCI1394_IsoRcvContextControlSet+32*video->ohci_ir_ctx;
- video->ohci_IsoRcvContextControlClear = OHCI1394_IsoRcvContextControlClear+32*video->ohci_ir_ctx;
- video->ohci_IsoRcvCommandPtr = OHCI1394_IsoRcvCommandPtr+32*video->ohci_ir_ctx;
- video->ohci_IsoRcvContextMatch = OHCI1394_IsoRcvContextMatch+32*video->ohci_ir_ctx;
-
- /* enable interrupts for IR context */
- reg_write(video->ohci, OHCI1394_IsoRecvIntMaskSet, (1 << video->ohci_ir_ctx) );
- debug_printk("dv1394: interrupts enabled for IR context %d\n", video->ohci_ir_ctx);
-
- return 0;
-
-err:
- do_dv1394_shutdown(video, 1);
- return retval;
-}
-
-/* if the user doesn't bother to call ioctl(INIT) before starting
- mmap() or read()/write(), just give him some default values */
-
-static int do_dv1394_init_default(struct video_card *video)
-{
- struct dv1394_init init;
-
- init.api_version = DV1394_API_VERSION;
- init.n_frames = DV1394_MAX_FRAMES / 4;
- init.channel = video->channel;
- init.format = video->pal_or_ntsc;
- init.cip_n = video->cip_n;
- init.cip_d = video->cip_d;
- init.syt_offset = video->syt_offset;
-
- return do_dv1394_init(video, &init);
-}
-
-/* do NOT call from interrupt context */
-static void stop_dma(struct video_card *video)
-{
- unsigned long flags;
- int i;
-
- /* no interrupts */
- spin_lock_irqsave(&video->spinlock, flags);
-
- video->dma_running = 0;
-
- if ( (video->ohci_it_ctx == -1) && (video->ohci_ir_ctx == -1) )
- goto out;
-
- /* stop DMA if in progress */
- if ( (video->active_frame != -1) ||
- (reg_read(video->ohci, video->ohci_IsoXmitContextControlClear) & (1 << 10)) ||
- (reg_read(video->ohci, video->ohci_IsoRcvContextControlClear) & (1 << 10)) ) {
-
- /* clear the .run bits */
- reg_write(video->ohci, video->ohci_IsoXmitContextControlClear, (1 << 15));
- reg_write(video->ohci, video->ohci_IsoRcvContextControlClear, (1 << 15));
- flush_pci_write(video->ohci);
-
- video->active_frame = -1;
- video->first_run = 1;
-
- /* wait until DMA really stops */
- i = 0;
- while (i < 1000) {
-
- /* wait 0.1 millisecond */
- udelay(100);
-
- if ( (reg_read(video->ohci, video->ohci_IsoXmitContextControlClear) & (1 << 10)) ||
- (reg_read(video->ohci, video->ohci_IsoRcvContextControlClear) & (1 << 10)) ) {
- /* still active */
- debug_printk("dv1394: stop_dma: DMA not stopped yet\n" );
- mb();
- } else {
- debug_printk("dv1394: stop_dma: DMA stopped safely after %d ms\n", i/10);
- break;
- }
-
- i++;
- }
-
- if (i == 1000) {
- printk(KERN_ERR "dv1394: stop_dma: DMA still going after %d ms!\n", i/10);
- }
- }
- else
- debug_printk("dv1394: stop_dma: already stopped.\n");
-
-out:
- spin_unlock_irqrestore(&video->spinlock, flags);
-}
-
-
-
-static void do_dv1394_shutdown(struct video_card *video, int free_dv_buf)
-{
- int i;
-
- debug_printk("dv1394: shutdown...\n");
-
- /* stop DMA if in progress */
- stop_dma(video);
-
- /* release the DMA contexts */
- if (video->ohci_it_ctx != -1) {
- video->ohci_IsoXmitContextControlSet = 0;
- video->ohci_IsoXmitContextControlClear = 0;
- video->ohci_IsoXmitCommandPtr = 0;
-
- /* disable interrupts for IT context */
- reg_write(video->ohci, OHCI1394_IsoXmitIntMaskClear, (1 << video->ohci_it_ctx));
-
- /* remove tasklet */
- ohci1394_unregister_iso_tasklet(video->ohci, &video->it_tasklet);
- debug_printk("dv1394: IT context %d released\n", video->ohci_it_ctx);
- video->ohci_it_ctx = -1;
- }
-
- if (video->ohci_ir_ctx != -1) {
- video->ohci_IsoRcvContextControlSet = 0;
- video->ohci_IsoRcvContextControlClear = 0;
- video->ohci_IsoRcvCommandPtr = 0;
- video->ohci_IsoRcvContextMatch = 0;
-
- /* disable interrupts for IR context */
- reg_write(video->ohci, OHCI1394_IsoRecvIntMaskClear, (1 << video->ohci_ir_ctx));
-
- /* remove tasklet */
- ohci1394_unregister_iso_tasklet(video->ohci, &video->ir_tasklet);
- debug_printk("dv1394: IR context %d released\n", video->ohci_ir_ctx);
- video->ohci_ir_ctx = -1;
- }
-
- /* release the ISO channel */
- if (video->channel != -1) {
- u64 chan_mask;
- unsigned long flags;
-
- chan_mask = (u64)1 << video->channel;
-
- spin_lock_irqsave(&video->ohci->IR_channel_lock, flags);
- video->ohci->ISO_channel_usage &= ~(chan_mask);
- spin_unlock_irqrestore(&video->ohci->IR_channel_lock, flags);
-
- video->channel = -1;
- }
-
- /* free the frame structs */
- for (i = 0; i < DV1394_MAX_FRAMES; i++) {
- if (video->frames[i])
- frame_delete(video->frames[i]);
- video->frames[i] = NULL;
- }
-
- video->n_frames = 0;
-
- /* we can't free the DMA buffer unless it is guaranteed that
- no more user-space mappings exist */
-
- if (free_dv_buf) {
- dma_region_free(&video->dv_buf);
- video->dv_buf_size = 0;
- }
-
- /* free packet buffer */
- dma_region_free(&video->packet_buf);
- video->packet_buf_size = 0;
-
- debug_printk("dv1394: shutdown OK\n");
-}
-
-/*
- **********************************
- *** MMAP() THEORY OF OPERATION ***
- **********************************
-
- The ringbuffer cannot be re-allocated or freed while
- a user program maintains a mapping of it. (note that a mapping
- can persist even after the device fd is closed!)
-
- So, only let the user process allocate the DMA buffer once.
- To resize or deallocate it, you must close the device file
- and open it again.
-
- Previously Dan M. hacked out a scheme that allowed the DMA
- buffer to change by forcefully unmapping it from the user's
- address space. It was prone to error because it's very hard to
- track all the places the buffer could have been mapped (we
- would have had to walk the vma list of every process in the
- system to be sure we found all the mappings!). Instead, we
- force the user to choose one buffer size and stick with
- it. This small sacrifice is worth the huge reduction in
- error-prone code in dv1394.
-*/
-
-static int dv1394_mmap(struct file *file, struct vm_area_struct *vma)
-{
- struct video_card *video = file_to_video_card(file);
- int retval = -EINVAL;
-
- /*
- * We cannot use the blocking variant mutex_lock here because .mmap
- * is called with mmap_sem held, while .ioctl, .read, .write acquire
- * video->mtx and subsequently call copy_to/from_user which will
- * grab mmap_sem in case of a page fault.
- */
- if (!mutex_trylock(&video->mtx))
- return -EAGAIN;
-
- if ( ! video_card_initialized(video) ) {
- retval = do_dv1394_init_default(video);
- if (retval)
- goto out;
- }
-
- retval = dma_region_mmap(&video->dv_buf, file, vma);
-out:
- mutex_unlock(&video->mtx);
- return retval;
-}
-
-/*** DEVICE FILE INTERFACE *************************************************/
-
-/* no need to serialize, multiple threads OK */
-static unsigned int dv1394_poll(struct file *file, struct poll_table_struct *wait)
-{
- struct video_card *video = file_to_video_card(file);
- unsigned int mask = 0;
- unsigned long flags;
-
- poll_wait(file, &video->waitq, wait);
-
- spin_lock_irqsave(&video->spinlock, flags);
- if ( video->n_frames == 0 ) {
-
- } else if ( video->active_frame == -1 ) {
- /* nothing going on */
- mask |= POLLOUT;
- } else {
- /* any clear/ready buffers? */
- if (video->n_clear_frames >0)
- mask |= POLLOUT | POLLIN;
- }
- spin_unlock_irqrestore(&video->spinlock, flags);
-
- return mask;
-}
-
-static int dv1394_fasync(int fd, struct file *file, int on)
-{
- /* I just copied this code verbatim from Alan Cox's mouse driver example
- (Documentation/DocBook/) */
-
- struct video_card *video = file_to_video_card(file);
-
- return fasync_helper(fd, file, on, &video->fasync);
-}
-
-static ssize_t dv1394_write(struct file *file, const char __user *buffer, size_t count, loff_t *ppos)
-{
- struct video_card *video = file_to_video_card(file);
- DECLARE_WAITQUEUE(wait, current);
- ssize_t ret;
- size_t cnt;
- unsigned long flags;
- int target_frame;
-
- /* serialize this to prevent multi-threaded mayhem */
- if (file->f_flags & O_NONBLOCK) {
- if (!mutex_trylock(&video->mtx))
- return -EAGAIN;
- } else {
- if (mutex_lock_interruptible(&video->mtx))
- return -ERESTARTSYS;
- }
-
- if ( !video_card_initialized(video) ) {
- ret = do_dv1394_init_default(video);
- if (ret) {
- mutex_unlock(&video->mtx);
- return ret;
- }
- }
-
- ret = 0;
- add_wait_queue(&video->waitq, &wait);
-
- while (count > 0) {
-
- /* must set TASK_INTERRUPTIBLE *before* checking for free
- buffers; otherwise we could miss a wakeup if the interrupt
- fires between the check and the schedule() */
-
- set_current_state(TASK_INTERRUPTIBLE);
-
- spin_lock_irqsave(&video->spinlock, flags);
-
- target_frame = video->first_clear_frame;
-
- spin_unlock_irqrestore(&video->spinlock, flags);
-
- if (video->frames[target_frame]->state == FRAME_CLEAR) {
-
- /* how much room is left in the target frame buffer */
- cnt = video->frame_size - (video->write_off - target_frame * video->frame_size);
-
- } else {
- /* buffer is already used */
- cnt = 0;
- }
-
- if (cnt > count)
- cnt = count;
-
- if (cnt <= 0) {
- /* no room left, gotta wait */
- if (file->f_flags & O_NONBLOCK) {
- if (!ret)
- ret = -EAGAIN;
- break;
- }
- if (signal_pending(current)) {
- if (!ret)
- ret = -ERESTARTSYS;
- break;
- }
-
- schedule();
-
- continue; /* start over from 'while(count > 0)...' */
- }
-
- if (copy_from_user(video->dv_buf.kvirt + video->write_off, buffer, cnt)) {
- if (!ret)
- ret = -EFAULT;
- break;
- }
-
- video->write_off = (video->write_off + cnt) % (video->n_frames * video->frame_size);
-
- count -= cnt;
- buffer += cnt;
- ret += cnt;
-
- if (video->write_off == video->frame_size * ((target_frame + 1) % video->n_frames))
- frame_prepare(video, target_frame);
- }
-
- remove_wait_queue(&video->waitq, &wait);
- set_current_state(TASK_RUNNING);
- mutex_unlock(&video->mtx);
- return ret;
-}
-
-
-static ssize_t dv1394_read(struct file *file, char __user *buffer, size_t count, loff_t *ppos)
-{
- struct video_card *video = file_to_video_card(file);
- DECLARE_WAITQUEUE(wait, current);
- ssize_t ret;
- size_t cnt;
- unsigned long flags;
- int target_frame;
-
- /* serialize this to prevent multi-threaded mayhem */
- if (file->f_flags & O_NONBLOCK) {
- if (!mutex_trylock(&video->mtx))
- return -EAGAIN;
- } else {
- if (mutex_lock_interruptible(&video->mtx))
- return -ERESTARTSYS;
- }
-
- if ( !video_card_initialized(video) ) {
- ret = do_dv1394_init_default(video);
- if (ret) {
- mutex_unlock(&video->mtx);
- return ret;
- }
- video->continuity_counter = -1;
-
- receive_packets(video);
-
- start_dma_receive(video);
- }
-
- ret = 0;
- add_wait_queue(&video->waitq, &wait);
-
- while (count > 0) {
-
- /* must set TASK_INTERRUPTIBLE *before* checking for free
- buffers; otherwise we could miss a wakeup if the interrupt
- fires between the check and the schedule() */
-
- set_current_state(TASK_INTERRUPTIBLE);
-
- spin_lock_irqsave(&video->spinlock, flags);
-
- target_frame = video->first_clear_frame;
-
- spin_unlock_irqrestore(&video->spinlock, flags);
-
- if (target_frame >= 0 &&
- video->n_clear_frames > 0 &&
- video->frames[target_frame]->state == FRAME_CLEAR) {
-
- /* how much room is left in the target frame buffer */
- cnt = video->frame_size - (video->write_off - target_frame * video->frame_size);
-
- } else {
- /* buffer is already used */
- cnt = 0;
- }
-
- if (cnt > count)
- cnt = count;
-
- if (cnt <= 0) {
- /* no room left, gotta wait */
- if (file->f_flags & O_NONBLOCK) {
- if (!ret)
- ret = -EAGAIN;
- break;
- }
- if (signal_pending(current)) {
- if (!ret)
- ret = -ERESTARTSYS;
- break;
- }
-
- schedule();
-
- continue; /* start over from 'while(count > 0)...' */
- }
-
- if (copy_to_user(buffer, video->dv_buf.kvirt + video->write_off, cnt)) {
- if (!ret)
- ret = -EFAULT;
- break;
- }
-
- video->write_off = (video->write_off + cnt) % (video->n_frames * video->frame_size);
-
- count -= cnt;
- buffer += cnt;
- ret += cnt;
-
- if (video->write_off == video->frame_size * ((target_frame + 1) % video->n_frames)) {
- spin_lock_irqsave(&video->spinlock, flags);
- video->n_clear_frames--;
- video->first_clear_frame = (video->first_clear_frame + 1) % video->n_frames;
- spin_unlock_irqrestore(&video->spinlock, flags);
- }
- }
-
- remove_wait_queue(&video->waitq, &wait);
- set_current_state(TASK_RUNNING);
- mutex_unlock(&video->mtx);
- return ret;
-}
-
-
-/*** DEVICE IOCTL INTERFACE ************************************************/
-
-static long dv1394_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
-{
- struct video_card *video = file_to_video_card(file);
- unsigned long flags;
- int ret = -EINVAL;
- void __user *argp = (void __user *)arg;
-
- DECLARE_WAITQUEUE(wait, current);
-
- /* serialize this to prevent multi-threaded mayhem */
- if (file->f_flags & O_NONBLOCK) {
- if (!mutex_trylock(&video->mtx))
- return -EAGAIN;
- } else {
- if (mutex_lock_interruptible(&video->mtx))
- return -ERESTARTSYS;
- }
-
- switch(cmd)
- {
- case DV1394_IOC_SUBMIT_FRAMES: {
- unsigned int n_submit;
-
- if ( !video_card_initialized(video) ) {
- ret = do_dv1394_init_default(video);
- if (ret)
- goto out;
- }
-
- n_submit = (unsigned int) arg;
-
- if (n_submit > video->n_frames) {
- ret = -EINVAL;
- goto out;
- }
-
- while (n_submit > 0) {
-
- add_wait_queue(&video->waitq, &wait);
- set_current_state(TASK_INTERRUPTIBLE);
-
- spin_lock_irqsave(&video->spinlock, flags);
-
- /* wait until video->first_clear_frame is really CLEAR */
- while (video->frames[video->first_clear_frame]->state != FRAME_CLEAR) {
-
- spin_unlock_irqrestore(&video->spinlock, flags);
-
- if (signal_pending(current)) {
- remove_wait_queue(&video->waitq, &wait);
- set_current_state(TASK_RUNNING);
- ret = -EINTR;
- goto out;
- }
-
- schedule();
- set_current_state(TASK_INTERRUPTIBLE);
-
- spin_lock_irqsave(&video->spinlock, flags);
- }
- spin_unlock_irqrestore(&video->spinlock, flags);
-
- remove_wait_queue(&video->waitq, &wait);
- set_current_state(TASK_RUNNING);
-
- frame_prepare(video, video->first_clear_frame);
-
- n_submit--;
- }
-
- ret = 0;
- break;
- }
-
- case DV1394_IOC_WAIT_FRAMES: {
- unsigned int n_wait;
-
- if ( !video_card_initialized(video) ) {
- ret = -EINVAL;
- goto out;
- }
-
- n_wait = (unsigned int) arg;
-
- /* since we re-run the last frame on underflow, we will
- never actually have n_frames clear frames; at most only
- n_frames - 1 */
-
- if (n_wait > (video->n_frames-1) ) {
- ret = -EINVAL;
- goto out;
- }
-
- add_wait_queue(&video->waitq, &wait);
- set_current_state(TASK_INTERRUPTIBLE);
-
- spin_lock_irqsave(&video->spinlock, flags);
-
- while (video->n_clear_frames < n_wait) {
-
- spin_unlock_irqrestore(&video->spinlock, flags);
-
- if (signal_pending(current)) {
- remove_wait_queue(&video->waitq, &wait);
- set_current_state(TASK_RUNNING);
- ret = -EINTR;
- goto out;
- }
-
- schedule();
- set_current_state(TASK_INTERRUPTIBLE);
-
- spin_lock_irqsave(&video->spinlock, flags);
- }
-
- spin_unlock_irqrestore(&video->spinlock, flags);
-
- remove_wait_queue(&video->waitq, &wait);
- set_current_state(TASK_RUNNING);
- ret = 0;
- break;
- }
-
- case DV1394_IOC_RECEIVE_FRAMES: {
- unsigned int n_recv;
-
- if ( !video_card_initialized(video) ) {
- ret = -EINVAL;
- goto out;
- }
-
- n_recv = (unsigned int) arg;
-
- /* at least one frame must be active */
- if (n_recv > (video->n_frames-1) ) {
- ret = -EINVAL;
- goto out;
- }
-
- spin_lock_irqsave(&video->spinlock, flags);
-
- /* release the clear frames */
- video->n_clear_frames -= n_recv;
-
- /* advance the clear frame cursor */
- video->first_clear_frame = (video->first_clear_frame + n_recv) % video->n_frames;
-
- /* reset dropped_frames */
- video->dropped_frames = 0;
-
- spin_unlock_irqrestore(&video->spinlock, flags);
-
- ret = 0;
- break;
- }
-
- case DV1394_IOC_START_RECEIVE: {
- if ( !video_card_initialized(video) ) {
- ret = do_dv1394_init_default(video);
- if (ret)
- goto out;
- }
-
- video->continuity_counter = -1;
-
- receive_packets(video);
-
- start_dma_receive(video);
-
- ret = 0;
- break;
- }
-
- case DV1394_IOC_INIT: {
- struct dv1394_init init;
- if (!argp) {
- ret = do_dv1394_init_default(video);
- } else {
- if (copy_from_user(&init, argp, sizeof(init))) {
- ret = -EFAULT;
- goto out;
- }
- ret = do_dv1394_init(video, &init);
- }
- break;
- }
-
- case DV1394_IOC_SHUTDOWN:
- do_dv1394_shutdown(video, 0);
- ret = 0;
- break;
-
-
- case DV1394_IOC_GET_STATUS: {
- struct dv1394_status status;
-
- if ( !video_card_initialized(video) ) {
- ret = -EINVAL;
- goto out;
- }
-
- status.init.api_version = DV1394_API_VERSION;
- status.init.channel = video->channel;
- status.init.n_frames = video->n_frames;
- status.init.format = video->pal_or_ntsc;
- status.init.cip_n = video->cip_n;
- status.init.cip_d = video->cip_d;
- status.init.syt_offset = video->syt_offset;
-
- status.first_clear_frame = video->first_clear_frame;
-
- /* the rest of the fields need to be locked against the interrupt */
- spin_lock_irqsave(&video->spinlock, flags);
-
- status.active_frame = video->active_frame;
- status.n_clear_frames = video->n_clear_frames;
-
- status.dropped_frames = video->dropped_frames;
-
- /* reset dropped_frames */
- video->dropped_frames = 0;
-
- spin_unlock_irqrestore(&video->spinlock, flags);
-
- if (copy_to_user(argp, &status, sizeof(status))) {
- ret = -EFAULT;
- goto out;
- }
-
- ret = 0;
- break;
- }
-
- default:
- break;
- }
-
- out:
- mutex_unlock(&video->mtx);
- return ret;
-}
-
-/*** DEVICE FILE INTERFACE CONTINUED ***************************************/
-
-static int dv1394_open(struct inode *inode, struct file *file)
-{
- struct video_card *video = NULL;
-
- if (file->private_data) {
- video = file->private_data;
-
- } else {
- /* look up the card by ID */
- unsigned long flags;
- int idx = ieee1394_file_to_instance(file);
-
- spin_lock_irqsave(&dv1394_cards_lock, flags);
- if (!list_empty(&dv1394_cards)) {
- struct video_card *p;
- list_for_each_entry(p, &dv1394_cards, list) {
- if ((p->id) == idx) {
- video = p;
- break;
- }
- }
- }
- spin_unlock_irqrestore(&dv1394_cards_lock, flags);
-
- if (!video) {
- debug_printk("dv1394: OHCI card %d not found", idx);
- return -ENODEV;
- }
-
- file->private_data = (void*) video;
- }
-
-#ifndef DV1394_ALLOW_MORE_THAN_ONE_OPEN
-
- if ( test_and_set_bit(0, &video->open) ) {
- /* video is already open by someone else */
- return -EBUSY;
- }
-
-#endif
-
- printk(KERN_INFO "%s: NOTE, the dv1394 interface is unsupported "
- "and will not be available in the new firewire driver stack. "
- "Try libraw1394 based programs instead.\n", current->comm);
-
- return nonseekable_open(inode, file);
-}
-
-
-static int dv1394_release(struct inode *inode, struct file *file)
-{
- struct video_card *video = file_to_video_card(file);
-
- /* OK to free the DMA buffer, no more mappings can exist */
- do_dv1394_shutdown(video, 1);
-
- /* give someone else a turn */
- clear_bit(0, &video->open);
-
- return 0;
-}
-
-
-/*** DEVICE DRIVER HANDLERS ************************************************/
-
-static void it_tasklet_func(unsigned long data)
-{
- int wake = 0;
- struct video_card *video = (struct video_card*) data;
-
- spin_lock(&video->spinlock);
-
- if (!video->dma_running)
- goto out;
-
- irq_printk("ContextControl = %08x, CommandPtr = %08x\n",
- reg_read(video->ohci, video->ohci_IsoXmitContextControlSet),
- reg_read(video->ohci, video->ohci_IsoXmitCommandPtr)
- );
-
-
- if ( (video->ohci_it_ctx != -1) &&
- (reg_read(video->ohci, video->ohci_IsoXmitContextControlSet) & (1 << 10)) ) {
-
- struct frame *f;
- unsigned int frame, i;
-
-
- if (video->active_frame == -1)
- frame = 0;
- else
- frame = video->active_frame;
-
- /* check all the DMA-able frames */
- for (i = 0; i < video->n_frames; i++, frame = (frame+1) % video->n_frames) {
-
- irq_printk("IRQ checking frame %d...", frame);
- f = video->frames[frame];
- if (f->state != FRAME_READY) {
- irq_printk("clear, skipping\n");
- /* we don't own this frame */
- continue;
- }
-
- irq_printk("DMA\n");
-
- /* check the frame begin semaphore to see if we can free the previous frame */
- if ( *(f->frame_begin_timestamp) ) {
- int prev_frame;
- struct frame *prev_f;
-
-
-
- /* don't reset, need this later *(f->frame_begin_timestamp) = 0; */
- irq_printk(" BEGIN\n");
-
- prev_frame = frame - 1;
- if (prev_frame == -1)
- prev_frame += video->n_frames;
- prev_f = video->frames[prev_frame];
-
- /* make sure we can actually garbage collect
- this frame */
- if ( (prev_f->state == FRAME_READY) &&
- prev_f->done && (!f->done) )
- {
- frame_reset(prev_f);
- video->n_clear_frames++;
- wake = 1;
- video->active_frame = frame;
-
- irq_printk(" BEGIN - freeing previous frame %d, new active frame is %d\n", prev_frame, frame);
- } else {
- irq_printk(" BEGIN - can't free yet\n");
- }
-
- f->done = 1;
- }
-
-
- /* see if we need to set the timestamp for the next frame */
- if ( *(f->mid_frame_timestamp) ) {
- struct frame *next_frame;
- u32 begin_ts, ts_cyc, ts_off;
-
- *(f->mid_frame_timestamp) = 0;
-
- begin_ts = le32_to_cpu(*(f->frame_begin_timestamp));
-
- irq_printk(" MIDDLE - first packet was sent at cycle %4u (%2u), assigned timestamp was (%2u) %4u\n",
- begin_ts & 0x1FFF, begin_ts & 0xF,
- f->assigned_timestamp >> 12, f->assigned_timestamp & 0xFFF);
-
- /* prepare next frame and assign timestamp */
- next_frame = video->frames[ (frame+1) % video->n_frames ];
-
- if (next_frame->state == FRAME_READY) {
- irq_printk(" MIDDLE - next frame is ready, good\n");
- } else {
- debug_printk("dv1394: Underflow! At least one frame has been dropped.\n");
- next_frame = f;
- }
-
- /* set the timestamp to the timestamp of the last frame sent,
- plus the length of the last frame sent, plus the syt latency */
- ts_cyc = begin_ts & 0xF;
- /* advance one frame, plus syt latency (typically 2-3) */
- ts_cyc += f->n_packets + video->syt_offset ;
-
- ts_off = 0;
-
- ts_cyc += ts_off/3072;
- ts_off %= 3072;
-
- next_frame->assigned_timestamp = ((ts_cyc&0xF) << 12) + ts_off;
- if (next_frame->cip_syt1) {
- next_frame->cip_syt1->b[6] = next_frame->assigned_timestamp >> 8;
- next_frame->cip_syt1->b[7] = next_frame->assigned_timestamp & 0xFF;
- }
- if (next_frame->cip_syt2) {
- next_frame->cip_syt2->b[6] = next_frame->assigned_timestamp >> 8;
- next_frame->cip_syt2->b[7] = next_frame->assigned_timestamp & 0xFF;
- }
-
- }
-
- /* see if the frame looped */
- if ( *(f->frame_end_timestamp) ) {
-
- *(f->frame_end_timestamp) = 0;
-
- debug_printk(" END - the frame looped at least once\n");
-
- video->dropped_frames++;
- }
-
- } /* for (each frame) */
- }
-
- if (wake) {
- kill_fasync(&video->fasync, SIGIO, POLL_OUT);
-
- /* wake readers/writers/ioctl'ers */
- wake_up_interruptible(&video->waitq);
- }
-
-out:
- spin_unlock(&video->spinlock);
-}
-
-static void ir_tasklet_func(unsigned long data)
-{
- int wake = 0;
- struct video_card *video = (struct video_card*) data;
-
- spin_lock(&video->spinlock);
-
- if (!video->dma_running)
- goto out;
-
- if ( (video->ohci_ir_ctx != -1) &&
- (reg_read(video->ohci, video->ohci_IsoRcvContextControlSet) & (1 << 10)) ) {
-
- int sof=0; /* start-of-frame flag */
- struct frame *f;
- u16 packet_length;
- int i, dbc=0;
- struct DMA_descriptor_block *block = NULL;
- u16 xferstatus;
-
- int next_i, prev_i;
- struct DMA_descriptor_block *next = NULL;
- dma_addr_t next_dma = 0;
- struct DMA_descriptor_block *prev = NULL;
-
- /* loop over all descriptors in all frames */
- for (i = 0; i < video->n_frames*MAX_PACKETS; i++) {
- struct packet *p = dma_region_i(&video->packet_buf, struct packet, video->current_packet);
-
- /* make sure we are seeing the latest changes to p */
- dma_region_sync_for_cpu(&video->packet_buf,
- (unsigned long) p - (unsigned long) video->packet_buf.kvirt,
- sizeof(struct packet));
-
- packet_length = le16_to_cpu(p->data_length);
-
- /* get the descriptor based on packet_buffer cursor */
- f = video->frames[video->current_packet / MAX_PACKETS];
- block = &(f->descriptor_pool[video->current_packet % MAX_PACKETS]);
- xferstatus = le32_to_cpu(block->u.in.il.q[3]) >> 16;
- xferstatus &= 0x1F;
- irq_printk("ir_tasklet_func: xferStatus/resCount [%d] = 0x%08x\n", i, le32_to_cpu(block->u.in.il.q[3]) );
-
- /* get the current frame */
- f = video->frames[video->active_frame];
-
- /* exclude empty packet */
- if (packet_length > 8 && xferstatus == 0x11) {
- /* check for start of frame */
- /* DRD> Changed to check section type ([0]>>5==0)
- and dif sequence ([1]>>4==0) */
- sof = ( (p->data[0] >> 5) == 0 && (p->data[1] >> 4) == 0);
-
- dbc = (int) (p->cip_h1 >> 24);
- if ( video->continuity_counter != -1 && dbc > ((video->continuity_counter + 1) % 256) )
- {
- printk(KERN_WARNING "dv1394: discontinuity detected, dropping all frames\n" );
- video->dropped_frames += video->n_clear_frames + 1;
- video->first_frame = 0;
- video->n_clear_frames = 0;
- video->first_clear_frame = -1;
- }
- video->continuity_counter = dbc;
-
- if (!video->first_frame) {
- if (sof) {
- video->first_frame = 1;
- }
-
- } else if (sof) {
- /* close current frame */
- frame_reset(f); /* f->state = STATE_CLEAR */
- video->n_clear_frames++;
- if (video->n_clear_frames > video->n_frames) {
- video->dropped_frames++;
- printk(KERN_WARNING "dv1394: dropped a frame during reception\n" );
- video->n_clear_frames = video->n_frames-1;
- video->first_clear_frame = (video->first_clear_frame + 1) % video->n_frames;
- }
- if (video->first_clear_frame == -1)
- video->first_clear_frame = video->active_frame;
-
- /* get the next frame */
- video->active_frame = (video->active_frame + 1) % video->n_frames;
- f = video->frames[video->active_frame];
- irq_printk(" frame received, active_frame = %d, n_clear_frames = %d, first_clear_frame = %d\n",
- video->active_frame, video->n_clear_frames, video->first_clear_frame);
- }
- if (video->first_frame) {
- if (sof) {
- /* open next frame */
- f->state = FRAME_READY;
- }
-
- /* copy to buffer */
- if (f->n_packets > (video->frame_size / 480)) {
- printk(KERN_ERR "frame buffer overflow during receive\n");
- }
-
- frame_put_packet(f, p);
-
- } /* first_frame */
- }
-
- /* stop, end of ready packets */
- else if (xferstatus == 0) {
- break;
- }
-
- /* reset xferStatus & resCount */
- block->u.in.il.q[3] = cpu_to_le32(512);
-
- /* terminate dma chain at this (next) packet */
- next_i = video->current_packet;
- f = video->frames[next_i / MAX_PACKETS];
- next = &(f->descriptor_pool[next_i % MAX_PACKETS]);
- next_dma = ((unsigned long) block - (unsigned long) f->descriptor_pool) + f->descriptor_pool_dma;
- next->u.in.il.q[0] |= cpu_to_le32(3 << 20); /* enable interrupt */
- next->u.in.il.q[2] = cpu_to_le32(0); /* disable branch */
-
- /* link previous to next */
- prev_i = (next_i == 0) ? (MAX_PACKETS * video->n_frames - 1) : (next_i - 1);
- f = video->frames[prev_i / MAX_PACKETS];
- prev = &(f->descriptor_pool[prev_i % MAX_PACKETS]);
- if (prev_i % (MAX_PACKETS/2)) {
- prev->u.in.il.q[0] &= ~cpu_to_le32(3 << 20); /* no interrupt */
- } else {
- prev->u.in.il.q[0] |= cpu_to_le32(3 << 20); /* enable interrupt */
- }
- prev->u.in.il.q[2] = cpu_to_le32(next_dma | 1); /* set Z=1 */
- wmb();
-
- /* wake up DMA in case it fell asleep */
- reg_write(video->ohci, video->ohci_IsoRcvContextControlSet, (1 << 12));
-
- /* advance packet_buffer cursor */
- video->current_packet = (video->current_packet + 1) % (MAX_PACKETS * video->n_frames);
-
- } /* for all packets */
-
- wake = 1; /* why the hell not? */
-
- } /* receive interrupt */
-
- if (wake) {
- kill_fasync(&video->fasync, SIGIO, POLL_IN);
-
- /* wake readers/writers/ioctl'ers */
- wake_up_interruptible(&video->waitq);
- }
-
-out:
- spin_unlock(&video->spinlock);
-}
-
-static struct cdev dv1394_cdev;
-static const struct file_operations dv1394_fops=
-{
- .owner = THIS_MODULE,
- .poll = dv1394_poll,
- .unlocked_ioctl = dv1394_ioctl,
-#ifdef CONFIG_COMPAT
- .compat_ioctl = dv1394_compat_ioctl,
-#endif
- .mmap = dv1394_mmap,
- .open = dv1394_open,
- .write = dv1394_write,
- .read = dv1394_read,
- .release = dv1394_release,
- .fasync = dv1394_fasync,
- .llseek = no_llseek,
-};
-
-
-/*** HOTPLUG STUFF **********************************************************/
-/*
- * Export information about protocols/devices supported by this driver.
- */
-#ifdef MODULE
-static const struct ieee1394_device_id dv1394_id_table[] = {
- {
- .match_flags = IEEE1394_MATCH_SPECIFIER_ID | IEEE1394_MATCH_VERSION,
- .specifier_id = AVC_UNIT_SPEC_ID_ENTRY & 0xffffff,
- .version = AVC_SW_VERSION_ENTRY & 0xffffff
- },
- { }
-};
-
-MODULE_DEVICE_TABLE(ieee1394, dv1394_id_table);
-#endif /* MODULE */
-
-static struct hpsb_protocol_driver dv1394_driver = {
- .name = "dv1394",
-};
-
-
-/*** IEEE1394 HPSB CALLBACKS ***********************************************/
-
-static int dv1394_init(struct ti_ohci *ohci, enum pal_or_ntsc format, enum modes mode)
-{
- struct video_card *video;
- unsigned long flags;
- int i;
-
- video = kzalloc(sizeof(*video), GFP_KERNEL);
- if (!video) {
- printk(KERN_ERR "dv1394: cannot allocate video_card\n");
- return -1;
- }
-
- video->ohci = ohci;
- /* lower 2 bits of id indicate which of four "plugs"
- per host */
- video->id = ohci->host->id << 2;
- if (format == DV1394_NTSC)
- video->id |= mode;
- else
- video->id |= 2 + mode;
-
- video->ohci_it_ctx = -1;
- video->ohci_ir_ctx = -1;
-
- video->ohci_IsoXmitContextControlSet = 0;
- video->ohci_IsoXmitContextControlClear = 0;
- video->ohci_IsoXmitCommandPtr = 0;
-
- video->ohci_IsoRcvContextControlSet = 0;
- video->ohci_IsoRcvContextControlClear = 0;
- video->ohci_IsoRcvCommandPtr = 0;
- video->ohci_IsoRcvContextMatch = 0;
-
- video->n_frames = 0; /* flag that video is not initialized */
- video->channel = 63; /* default to broadcast channel */
- video->active_frame = -1;
-
- /* initialize the following */
- video->pal_or_ntsc = format;
- video->cip_n = 0; /* 0 = use builtin default */
- video->cip_d = 0;
- video->syt_offset = 0;
- video->mode = mode;
-
- for (i = 0; i < DV1394_MAX_FRAMES; i++)
- video->frames[i] = NULL;
-
- dma_region_init(&video->dv_buf);
- video->dv_buf_size = 0;
- dma_region_init(&video->packet_buf);
- video->packet_buf_size = 0;
-
- clear_bit(0, &video->open);
- spin_lock_init(&video->spinlock);
- video->dma_running = 0;
- mutex_init(&video->mtx);
- init_waitqueue_head(&video->waitq);
- video->fasync = NULL;
-
- spin_lock_irqsave(&dv1394_cards_lock, flags);
- INIT_LIST_HEAD(&video->list);
- list_add_tail(&video->list, &dv1394_cards);
- spin_unlock_irqrestore(&dv1394_cards_lock, flags);
-
- debug_printk("dv1394: dv1394_init() OK on ID %d\n", video->id);
- return 0;
-}
-
-static void dv1394_remove_host(struct hpsb_host *host)
-{
- struct video_card *video, *tmp_video;
- unsigned long flags;
- int found_ohci_card = 0;
-
- do {
- video = NULL;
- spin_lock_irqsave(&dv1394_cards_lock, flags);
- list_for_each_entry(tmp_video, &dv1394_cards, list) {
- if ((tmp_video->id >> 2) == host->id) {
- list_del(&tmp_video->list);
- video = tmp_video;
- found_ohci_card = 1;
- break;
- }
- }
- spin_unlock_irqrestore(&dv1394_cards_lock, flags);
-
- if (video) {
- do_dv1394_shutdown(video, 1);
- kfree(video);
- }
- } while (video);
-
- if (found_ohci_card)
- device_destroy(hpsb_protocol_class, MKDEV(IEEE1394_MAJOR,
- IEEE1394_MINOR_BLOCK_DV1394 * 16 + (host->id << 2)));
-}
-
-static void dv1394_add_host(struct hpsb_host *host)
-{
- struct ti_ohci *ohci;
- int id = host->id;
-
- /* We only work with the OHCI-1394 driver */
- if (strcmp(host->driver->name, OHCI1394_DRIVER_NAME))
- return;
-
- ohci = (struct ti_ohci *)host->hostdata;
-
- device_create(hpsb_protocol_class, NULL,
- MKDEV(IEEE1394_MAJOR,
- IEEE1394_MINOR_BLOCK_DV1394 * 16 + (id<<2)),
- NULL, "dv1394-%d", id);
-
- dv1394_init(ohci, DV1394_NTSC, MODE_RECEIVE);
- dv1394_init(ohci, DV1394_NTSC, MODE_TRANSMIT);
- dv1394_init(ohci, DV1394_PAL, MODE_RECEIVE);
- dv1394_init(ohci, DV1394_PAL, MODE_TRANSMIT);
-}
-
-
-/* Bus reset handler. In the event of a bus reset, we may need to
- re-start the DMA contexts - otherwise the user program would
- end up waiting forever.
-*/
-
-static void dv1394_host_reset(struct hpsb_host *host)
-{
- struct video_card *video = NULL, *tmp_vid;
- unsigned long flags;
-
- /* We only work with the OHCI-1394 driver */
- if (strcmp(host->driver->name, OHCI1394_DRIVER_NAME))
- return;
-
- /* find the corresponding video_cards */
- spin_lock_irqsave(&dv1394_cards_lock, flags);
- list_for_each_entry(tmp_vid, &dv1394_cards, list) {
- if ((tmp_vid->id >> 2) == host->id) {
- video = tmp_vid;
- break;
- }
- }
- spin_unlock_irqrestore(&dv1394_cards_lock, flags);
-
- if (!video)
- return;
-
-
- spin_lock_irqsave(&video->spinlock, flags);
-
- if (!video->dma_running)
- goto out;
-
- /* check IT context */
- if (video->ohci_it_ctx != -1) {
- u32 ctx;
-
- ctx = reg_read(video->ohci, video->ohci_IsoXmitContextControlSet);
-
- /* if (RUN but not ACTIVE) */
- if ( (ctx & (1<<15)) &&
- !(ctx & (1<<10)) ) {
-
- debug_printk("dv1394: IT context stopped due to bus reset; waking it up\n");
-
- /* to be safe, assume a frame has been dropped. User-space programs
- should handle this condition like an underflow. */
- video->dropped_frames++;
-
- /* for some reason you must clear, then re-set the RUN bit to restart DMA */
-
- /* clear RUN */
- reg_write(video->ohci, video->ohci_IsoXmitContextControlClear, (1 << 15));
- flush_pci_write(video->ohci);
-
- /* set RUN */
- reg_write(video->ohci, video->ohci_IsoXmitContextControlSet, (1 << 15));
- flush_pci_write(video->ohci);
-
- /* set the WAKE bit (just in case; this isn't strictly necessary) */
- reg_write(video->ohci, video->ohci_IsoXmitContextControlSet, (1 << 12));
- flush_pci_write(video->ohci);
-
- irq_printk("dv1394: AFTER IT restart ctx 0x%08x ptr 0x%08x\n",
- reg_read(video->ohci, video->ohci_IsoXmitContextControlSet),
- reg_read(video->ohci, video->ohci_IsoXmitCommandPtr));
- }
- }
-
- /* check IR context */
- if (video->ohci_ir_ctx != -1) {
- u32 ctx;
-
- ctx = reg_read(video->ohci, video->ohci_IsoRcvContextControlSet);
-
- /* if (RUN but not ACTIVE) */
- if ( (ctx & (1<<15)) &&
- !(ctx & (1<<10)) ) {
-
- debug_printk("dv1394: IR context stopped due to bus reset; waking it up\n");
-
- /* to be safe, assume a frame has been dropped. User-space programs
- should handle this condition like an overflow. */
- video->dropped_frames++;
-
- /* for some reason you must clear, then re-set the RUN bit to restart DMA */
- /* XXX this doesn't work for me, I can't get IR DMA to restart :[ */
-
- /* clear RUN */
- reg_write(video->ohci, video->ohci_IsoRcvContextControlClear, (1 << 15));
- flush_pci_write(video->ohci);
-
- /* set RUN */
- reg_write(video->ohci, video->ohci_IsoRcvContextControlSet, (1 << 15));
- flush_pci_write(video->ohci);
-
- /* set the WAKE bit (just in case; this isn't strictly necessary) */
- reg_write(video->ohci, video->ohci_IsoRcvContextControlSet, (1 << 12));
- flush_pci_write(video->ohci);
-
- irq_printk("dv1394: AFTER IR restart ctx 0x%08x ptr 0x%08x\n",
- reg_read(video->ohci, video->ohci_IsoRcvContextControlSet),
- reg_read(video->ohci, video->ohci_IsoRcvCommandPtr));
- }
- }
-
-out:
- spin_unlock_irqrestore(&video->spinlock, flags);
-
- /* wake readers/writers/ioctl'ers */
- wake_up_interruptible(&video->waitq);
-}
-
-static struct hpsb_highlevel dv1394_highlevel = {
- .name = "dv1394",
- .add_host = dv1394_add_host,
- .remove_host = dv1394_remove_host,
- .host_reset = dv1394_host_reset,
-};
-
-#ifdef CONFIG_COMPAT
-
-#define DV1394_IOC32_INIT _IOW('#', 0x06, struct dv1394_init32)
-#define DV1394_IOC32_GET_STATUS _IOR('#', 0x0c, struct dv1394_status32)
-
-struct dv1394_init32 {
- u32 api_version;
- u32 channel;
- u32 n_frames;
- u32 format;
- u32 cip_n;
- u32 cip_d;
- u32 syt_offset;
-};
-
-struct dv1394_status32 {
- struct dv1394_init32 init;
- s32 active_frame;
- u32 first_clear_frame;
- u32 n_clear_frames;
- u32 dropped_frames;
-};
-
-/* RED-PEN: this should use compat_alloc_userspace instead */
-
-static int handle_dv1394_init(struct file *file, unsigned int cmd, unsigned long arg)
-{
- struct dv1394_init32 dv32;
- struct dv1394_init dv;
- mm_segment_t old_fs;
- int ret;
-
- if (file->f_op->unlocked_ioctl != dv1394_ioctl)
- return -EFAULT;
-
- if (copy_from_user(&dv32, (void __user *)arg, sizeof(dv32)))
- return -EFAULT;
-
- dv.api_version = dv32.api_version;
- dv.channel = dv32.channel;
- dv.n_frames = dv32.n_frames;
- dv.format = dv32.format;
- dv.cip_n = (unsigned long)dv32.cip_n;
- dv.cip_d = (unsigned long)dv32.cip_d;
- dv.syt_offset = dv32.syt_offset;
-
- old_fs = get_fs();
- set_fs(KERNEL_DS);
- ret = dv1394_ioctl(file, DV1394_IOC_INIT, (unsigned long)&dv);
- set_fs(old_fs);
-
- return ret;
-}
-
-static int handle_dv1394_get_status(struct file *file, unsigned int cmd, unsigned long arg)
-{
- struct dv1394_status32 dv32;
- struct dv1394_status dv;
- mm_segment_t old_fs;
- int ret;
-
- if (file->f_op->unlocked_ioctl != dv1394_ioctl)
- return -EFAULT;
-
- old_fs = get_fs();
- set_fs(KERNEL_DS);
- ret = dv1394_ioctl(file, DV1394_IOC_GET_STATUS, (unsigned long)&dv);
- set_fs(old_fs);
-
- if (!ret) {
- dv32.init.api_version = dv.init.api_version;
- dv32.init.channel = dv.init.channel;
- dv32.init.n_frames = dv.init.n_frames;
- dv32.init.format = dv.init.format;
- dv32.init.cip_n = (u32)dv.init.cip_n;
- dv32.init.cip_d = (u32)dv.init.cip_d;
- dv32.init.syt_offset = dv.init.syt_offset;
- dv32.active_frame = dv.active_frame;
- dv32.first_clear_frame = dv.first_clear_frame;
- dv32.n_clear_frames = dv.n_clear_frames;
- dv32.dropped_frames = dv.dropped_frames;
-
- if (copy_to_user((struct dv1394_status32 __user *)arg, &dv32, sizeof(dv32)))
- ret = -EFAULT;
- }
-
- return ret;
-}
-
-
-
-static long dv1394_compat_ioctl(struct file *file, unsigned int cmd,
- unsigned long arg)
-{
- switch (cmd) {
- case DV1394_IOC_SHUTDOWN:
- case DV1394_IOC_SUBMIT_FRAMES:
- case DV1394_IOC_WAIT_FRAMES:
- case DV1394_IOC_RECEIVE_FRAMES:
- case DV1394_IOC_START_RECEIVE:
- return dv1394_ioctl(file, cmd, arg);
-
- case DV1394_IOC32_INIT:
- return handle_dv1394_init(file, cmd, arg);
- case DV1394_IOC32_GET_STATUS:
- return handle_dv1394_get_status(file, cmd, arg);
- default:
- return -ENOIOCTLCMD;
- }
-}
-
-#endif /* CONFIG_COMPAT */
-
-
-/*** KERNEL MODULE HANDLERS ************************************************/
-
-MODULE_AUTHOR("Dan Maas <dmaas@dcine.com>, Dan Dennedy <dan@dennedy.org>");
-MODULE_DESCRIPTION("driver for DV input/output on OHCI board");
-MODULE_SUPPORTED_DEVICE("dv1394");
-MODULE_LICENSE("GPL");
-
-static void __exit dv1394_exit_module(void)
-{
- hpsb_unregister_protocol(&dv1394_driver);
- hpsb_unregister_highlevel(&dv1394_highlevel);
- cdev_del(&dv1394_cdev);
-}
-
-static int __init dv1394_init_module(void)
-{
- int ret;
-
- cdev_init(&dv1394_cdev, &dv1394_fops);
- dv1394_cdev.owner = THIS_MODULE;
- ret = cdev_add(&dv1394_cdev, IEEE1394_DV1394_DEV, 16);
- if (ret) {
- printk(KERN_ERR "dv1394: unable to register character device\n");
- return ret;
- }
-
- hpsb_register_highlevel(&dv1394_highlevel);
-
- ret = hpsb_register_protocol(&dv1394_driver);
- if (ret) {
- printk(KERN_ERR "dv1394: failed to register protocol\n");
- hpsb_unregister_highlevel(&dv1394_highlevel);
- cdev_del(&dv1394_cdev);
- return ret;
- }
-
- return 0;
-}
-
-module_init(dv1394_init_module);
-module_exit(dv1394_exit_module);
diff --git a/drivers/ieee1394/dv1394.h b/drivers/ieee1394/dv1394.h
deleted file mode 100644
index 5807f5289810..000000000000
--- a/drivers/ieee1394/dv1394.h
+++ /dev/null
@@ -1,305 +0,0 @@
-/*
- * dv1394.h - DV input/output over IEEE 1394 on OHCI chips
- * Copyright (C)2001 Daniel Maas <dmaas@dcine.com>
- * receive by Dan Dennedy <dan@dennedy.org>
- *
- * based on:
- * video1394.h - driver for OHCI 1394 boards
- * Copyright (C)1999,2000 Sebastien Rougeaux <sebastien.rougeaux@anu.edu.au>
- * Peter Schlaile <udbz@rz.uni-karlsruhe.de>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software Foundation,
- * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
- */
-
-#ifndef _DV_1394_H
-#define _DV_1394_H
-
-/* This is the public user-space interface. Try not to break it. */
-
-#define DV1394_API_VERSION 0x20011127
-
-/* ********************
- ** **
- ** DV1394 API **
- ** **
- ********************
-
- There are two methods of operating the DV1394 DV output device.
-
- 1)
-
- The simplest is an interface based on write(): simply write
- full DV frames of data to the device, and they will be transmitted
- as quickly as possible. The FD may be set for non-blocking I/O,
- in which case you can use select() or poll() to wait for output
- buffer space.
-
- To set the DV output parameters (e.g. whether you want NTSC or PAL
- video), use the DV1394_INIT ioctl, passing in the parameters you
- want in a struct dv1394_init.
-
- Example 1:
- To play a raw .DV file: cat foo.DV > /dev/dv1394
- (cat will use write() internally)
-
- Example 2:
- static struct dv1394_init init = {
- 0x63, (broadcast channel)
- 4, (four-frame ringbuffer)
- DV1394_NTSC, (send NTSC video)
- 0, 0 (default empty packet rate)
- }
-
- ioctl(fd, DV1394_INIT, &init);
-
- while (1) {
- read( <a raw DV file>, buf, DV1394_NTSC_FRAME_SIZE );
- write( <the dv1394 FD>, buf, DV1394_NTSC_FRAME_SIZE );
- }
-
- 2)
-
- For more control over buffering, and to avoid unnecessary copies
- of the DV data, you can use the more sophisticated the mmap() interface.
- First, call the DV1394_INIT ioctl to specify your parameters,
- including the number of frames in the ringbuffer. Then, calling mmap()
- on the dv1394 device will give you direct access to the ringbuffer
- from which the DV card reads your frame data.
-
- The ringbuffer is simply one large, contiguous region of memory
- containing two or more frames of packed DV data. Each frame of DV data
- is 120000 bytes (NTSC) or 144000 bytes (PAL).
-
- Fill one or more frames in the ringbuffer, then use the DV1394_SUBMIT_FRAMES
- ioctl to begin I/O. You can use either the DV1394_WAIT_FRAMES ioctl
- or select()/poll() to wait until the frames are transmitted. Next, you'll
- need to call the DV1394_GET_STATUS ioctl to determine which ringbuffer
- frames are clear (ready to be filled with new DV data). Finally, use
- DV1394_SUBMIT_FRAMES again to send the new data to the DV output.
-
-
- Example: here is what a four-frame ringbuffer might look like
- during DV transmission:
-
-
- frame 0 frame 1 frame 2 frame 3
-
- *--------------------------------------*
- | CLEAR | DV data | DV data | CLEAR |
- *--------------------------------------*
- <ACTIVE>
-
- transmission goes in this direction --->>>
-
-
- The DV hardware is currently transmitting the data in frame 1.
- Once frame 1 is finished, it will automatically transmit frame 2.
- (if frame 2 finishes before frame 3 is submitted, the device
- will continue to transmit frame 2, and will increase the dropped_frames
- counter each time it repeats the transmission).
-
-
- If you called DV1394_GET_STATUS at this instant, you would
- receive the following values:
-
- n_frames = 4
- active_frame = 1
- first_clear_frame = 3
- n_clear_frames = 2
-
- At this point, you should write new DV data into frame 3 and optionally
- frame 0. Then call DV1394_SUBMIT_FRAMES to inform the device that
- it may transmit the new frames.
-
- ERROR HANDLING
-
- An error (buffer underflow/overflow or a break in the DV stream due
- to a 1394 bus reset) can be detected by checking the dropped_frames
- field of struct dv1394_status (obtained through the
- DV1394_GET_STATUS ioctl).
-
- The best way to recover from such an error is to re-initialize
- dv1394, either by using the DV1394_INIT ioctl call, or closing the
- file descriptor and opening it again. (note that you must unmap all
- ringbuffer mappings when closing the file descriptor, or else
- dv1394 will still be considered 'in use').
-
- MAIN LOOP
-
- For maximum efficiency and robustness against bus errors, you are
- advised to model the main loop of your application after the
- following pseudo-code example:
-
- (checks of system call return values omitted for brevity; always
- check return values in your code!)
-
- while ( frames left ) {
-
- struct pollfd *pfd = ...;
-
- pfd->fd = dv1394_fd;
- pfd->revents = 0;
- pfd->events = POLLOUT | POLLIN; (OUT for transmit, IN for receive)
-
- (add other sources of I/O here)
-
- poll(pfd, 1, -1); (or select(); add a timeout if you want)
-
- if (pfd->revents) {
- struct dv1394_status status;
-
- ioctl(dv1394_fd, DV1394_GET_STATUS, &status);
-
- if (status.dropped_frames > 0) {
- reset_dv1394();
- } else {
- for (int i = 0; i < status.n_clear_frames; i++) {
- copy_DV_frame();
- }
- }
- }
- }
-
- where copy_DV_frame() reads or writes on the dv1394 file descriptor
- (read/write mode) or copies data to/from the mmap ringbuffer and
- then calls ioctl(DV1394_SUBMIT_FRAMES) to notify dv1394 that new
- frames are availble (mmap mode).
-
- reset_dv1394() is called in the event of a buffer
- underflow/overflow or a halt in the DV stream (e.g. due to a 1394
- bus reset). To guarantee recovery from the error, this function
- should close the dv1394 file descriptor (and munmap() all
- ringbuffer mappings, if you are using them), then re-open the
- dv1394 device (and re-map the ringbuffer).
-
-*/
-
-
-/* maximum number of frames in the ringbuffer */
-#define DV1394_MAX_FRAMES 32
-
-/* number of *full* isochronous packets per DV frame */
-#define DV1394_NTSC_PACKETS_PER_FRAME 250
-#define DV1394_PAL_PACKETS_PER_FRAME 300
-
-/* size of one frame's worth of DV data, in bytes */
-#define DV1394_NTSC_FRAME_SIZE (480 * DV1394_NTSC_PACKETS_PER_FRAME)
-#define DV1394_PAL_FRAME_SIZE (480 * DV1394_PAL_PACKETS_PER_FRAME)
-
-
-/* ioctl() commands */
-#include "ieee1394-ioctl.h"
-
-
-enum pal_or_ntsc {
- DV1394_NTSC = 0,
- DV1394_PAL
-};
-
-
-
-
-/* this is the argument to DV1394_INIT */
-struct dv1394_init {
- /* DV1394_API_VERSION */
- unsigned int api_version;
-
- /* isochronous transmission channel to use */
- unsigned int channel;
-
- /* number of frames in the ringbuffer. Must be at least 2
- and at most DV1394_MAX_FRAMES. */
- unsigned int n_frames;
-
- /* send/receive PAL or NTSC video format */
- enum pal_or_ntsc format;
-
- /* the following are used only for transmission */
-
- /* set these to zero unless you want a
- non-default empty packet rate (see below) */
- unsigned long cip_n;
- unsigned long cip_d;
-
- /* set this to zero unless you want a
- non-default SYT cycle offset (default = 3 cycles) */
- unsigned int syt_offset;
-};
-
-/* NOTE: you may only allocate the DV frame ringbuffer once each time
- you open the dv1394 device. DV1394_INIT will fail if you call it a
- second time with different 'n_frames' or 'format' arguments (which
- would imply a different size for the ringbuffer). If you need a
- different buffer size, simply close and re-open the device, then
- initialize it with your new settings. */
-
-/* Q: What are cip_n and cip_d? */
-
-/*
- A: DV video streams do not utilize 100% of the potential bandwidth offered
- by IEEE 1394 (FireWire). To achieve the correct rate of data transmission,
- DV devices must periodically insert empty packets into the 1394 data stream.
- Typically there is one empty packet per 14-16 data-carrying packets.
-
- Some DV devices will accept a wide range of empty packet rates, while others
- require a precise rate. If the dv1394 driver produces empty packets at
- a rate that your device does not accept, you may see ugly patterns on the
- DV output, or even no output at all.
-
- The default empty packet insertion rate seems to work for many people; if
- your DV output is stable, you can simply ignore this discussion. However,
- we have exposed the empty packet rate as a parameter to support devices that
- do not work with the default rate.
-
- The decision to insert an empty packet is made with a numerator/denominator
- algorithm. Empty packets are produced at an average rate of CIP_N / CIP_D.
- You can alter the empty packet rate by passing non-zero values for cip_n
- and cip_d to the INIT ioctl.
-
- */
-
-
-
-struct dv1394_status {
- /* this embedded init struct returns the current dv1394
- parameters in use */
- struct dv1394_init init;
-
- /* the ringbuffer frame that is currently being
- displayed. (-1 if the device is not transmitting anything) */
- int active_frame;
-
- /* index of the first buffer (ahead of active_frame) that
- is ready to be filled with data */
- unsigned int first_clear_frame;
-
- /* how many buffers, including first_clear_buffer, are
- ready to be filled with data */
- unsigned int n_clear_frames;
-
- /* how many times the DV stream has underflowed, overflowed,
- or otherwise encountered an error, since the previous call
- to DV1394_GET_STATUS */
- unsigned int dropped_frames;
-
- /* N.B. The dropped_frames counter is only a lower bound on the actual
- number of dropped frames, with the special case that if dropped_frames
- is zero, then it is guaranteed that NO frames have been dropped
- since the last call to DV1394_GET_STATUS.
- */
-};
-
-
-#endif /* _DV_1394_H */
diff --git a/drivers/ieee1394/eth1394.c b/drivers/ieee1394/eth1394.c
deleted file mode 100644
index 63403822330e..000000000000
--- a/drivers/ieee1394/eth1394.c
+++ /dev/null
@@ -1,1720 +0,0 @@
-/*
- * eth1394.c -- IPv4 driver for Linux IEEE-1394 Subsystem
- *
- * Copyright (C) 2001-2003 Ben Collins <bcollins@debian.org>
- * 2000 Bonin Franck <boninf@free.fr>
- * 2003 Steve Kinneberg <kinnebergsteve@acmsystems.com>
- *
- * Mainly based on work by Emanuel Pirker and Andreas E. Bombe
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software Foundation,
- * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
- */
-
-/*
- * This driver intends to support RFC 2734, which describes a method for
- * transporting IPv4 datagrams over IEEE-1394 serial busses.
- *
- * TODO:
- * RFC 2734 related:
- * - Add MCAP. Limited Multicast exists only to 224.0.0.1 and 224.0.0.2.
- *
- * Non-RFC 2734 related:
- * - Handle fragmented skb's coming from the networking layer.
- * - Move generic GASP reception to core 1394 code
- * - Convert kmalloc/kfree for link fragments to use kmem_cache_* instead
- * - Stability improvements
- * - Performance enhancements
- * - Consider garbage collecting old partial datagrams after X amount of time
- */
-
-#include <linux/module.h>
-
-#include <linux/kernel.h>
-#include <linux/slab.h>
-#include <linux/errno.h>
-#include <linux/types.h>
-#include <linux/delay.h>
-#include <linux/init.h>
-#include <linux/workqueue.h>
-
-#include <linux/netdevice.h>
-#include <linux/inetdevice.h>
-#include <linux/if_arp.h>
-#include <linux/if_ether.h>
-#include <linux/ip.h>
-#include <linux/in.h>
-#include <linux/tcp.h>
-#include <linux/skbuff.h>
-#include <linux/bitops.h>
-#include <asm/uaccess.h>
-#include <asm/delay.h>
-#include <asm/unaligned.h>
-#include <net/arp.h>
-
-#include "config_roms.h"
-#include "csr1212.h"
-#include "eth1394.h"
-#include "highlevel.h"
-#include "ieee1394.h"
-#include "ieee1394_core.h"
-#include "ieee1394_hotplug.h"
-#include "ieee1394_transactions.h"
-#include "ieee1394_types.h"
-#include "iso.h"
-#include "nodemgr.h"
-
-#define ETH1394_PRINT_G(level, fmt, args...) \
- printk(level "%s: " fmt, driver_name, ## args)
-
-#define ETH1394_PRINT(level, dev_name, fmt, args...) \
- printk(level "%s: %s: " fmt, driver_name, dev_name, ## args)
-
-struct fragment_info {
- struct list_head list;
- int offset;
- int len;
-};
-
-struct partial_datagram {
- struct list_head list;
- u16 dgl;
- u16 dg_size;
- __be16 ether_type;
- struct sk_buff *skb;
- char *pbuf;
- struct list_head frag_info;
-};
-
-struct pdg_list {
- struct list_head list; /* partial datagram list per node */
- unsigned int sz; /* partial datagram list size per node */
- spinlock_t lock; /* partial datagram lock */
-};
-
-struct eth1394_host_info {
- struct hpsb_host *host;
- struct net_device *dev;
-};
-
-struct eth1394_node_ref {
- struct unit_directory *ud;
- struct list_head list;
-};
-
-struct eth1394_node_info {
- u16 maxpayload; /* max payload */
- u8 sspd; /* max speed */
- u64 fifo; /* FIFO address */
- struct pdg_list pdg; /* partial RX datagram lists */
- int dgl; /* outgoing datagram label */
-};
-
-static const char driver_name[] = "eth1394";
-
-static struct kmem_cache *packet_task_cache;
-
-static struct hpsb_highlevel eth1394_highlevel;
-
-/* Use common.lf to determine header len */
-static const int hdr_type_len[] = {
- sizeof(struct eth1394_uf_hdr),
- sizeof(struct eth1394_ff_hdr),
- sizeof(struct eth1394_sf_hdr),
- sizeof(struct eth1394_sf_hdr)
-};
-
-static const u16 eth1394_speedto_maxpayload[] = {
-/* S100, S200, S400, S800, S1600, S3200 */
- 512, 1024, 2048, 4096, 4096, 4096
-};
-
-MODULE_AUTHOR("Ben Collins (bcollins@debian.org)");
-MODULE_DESCRIPTION("IEEE 1394 IPv4 Driver (IPv4-over-1394 as per RFC 2734)");
-MODULE_LICENSE("GPL");
-
-/*
- * The max_partial_datagrams parameter is the maximum number of fragmented
- * datagrams per node that eth1394 will keep in memory. Providing an upper
- * bound allows us to limit the amount of memory that partial datagrams
- * consume in the event that some partial datagrams are never completed.
- */
-static int max_partial_datagrams = 25;
-module_param(max_partial_datagrams, int, S_IRUGO | S_IWUSR);
-MODULE_PARM_DESC(max_partial_datagrams,
- "Maximum number of partially received fragmented datagrams "
- "(default = 25).");
-
-
-static int ether1394_header(struct sk_buff *skb, struct net_device *dev,
- unsigned short type, const void *daddr,
- const void *saddr, unsigned len);
-static int ether1394_rebuild_header(struct sk_buff *skb);
-static int ether1394_header_parse(const struct sk_buff *skb,
- unsigned char *haddr);
-static int ether1394_header_cache(const struct neighbour *neigh,
- struct hh_cache *hh);
-static void ether1394_header_cache_update(struct hh_cache *hh,
- const struct net_device *dev,
- const unsigned char *haddr);
-static netdev_tx_t ether1394_tx(struct sk_buff *skb,
- struct net_device *dev);
-static void ether1394_iso(struct hpsb_iso *iso);
-
-static int ether1394_write(struct hpsb_host *host, int srcid, int destid,
- quadlet_t *data, u64 addr, size_t len, u16 flags);
-static void ether1394_add_host(struct hpsb_host *host);
-static void ether1394_remove_host(struct hpsb_host *host);
-static void ether1394_host_reset(struct hpsb_host *host);
-
-/* Function for incoming 1394 packets */
-static const struct hpsb_address_ops addr_ops = {
- .write = ether1394_write,
-};
-
-/* Ieee1394 highlevel driver functions */
-static struct hpsb_highlevel eth1394_highlevel = {
- .name = driver_name,
- .add_host = ether1394_add_host,
- .remove_host = ether1394_remove_host,
- .host_reset = ether1394_host_reset,
-};
-
-static int ether1394_recv_init(struct eth1394_priv *priv)
-{
- unsigned int iso_buf_size;
-
- /* FIXME: rawiso limits us to PAGE_SIZE */
- iso_buf_size = min((unsigned int)PAGE_SIZE,
- 2 * (1U << (priv->host->csr.max_rec + 1)));
-
- priv->iso = hpsb_iso_recv_init(priv->host,
- ETHER1394_GASP_BUFFERS * iso_buf_size,
- ETHER1394_GASP_BUFFERS,
- priv->broadcast_channel,
- HPSB_ISO_DMA_PACKET_PER_BUFFER,
- 1, ether1394_iso);
- if (priv->iso == NULL) {
- ETH1394_PRINT_G(KERN_ERR, "Failed to allocate IR context\n");
- priv->bc_state = ETHER1394_BC_ERROR;
- return -EAGAIN;
- }
-
- if (hpsb_iso_recv_start(priv->iso, -1, (1 << 3), -1) < 0)
- priv->bc_state = ETHER1394_BC_STOPPED;
- else
- priv->bc_state = ETHER1394_BC_RUNNING;
- return 0;
-}
-
-/* This is called after an "ifup" */
-static int ether1394_open(struct net_device *dev)
-{
- struct eth1394_priv *priv = netdev_priv(dev);
- int ret;
-
- if (priv->bc_state == ETHER1394_BC_ERROR) {
- ret = ether1394_recv_init(priv);
- if (ret)
- return ret;
- }
- netif_start_queue(dev);
- return 0;
-}
-
-/* This is called after an "ifdown" */
-static int ether1394_stop(struct net_device *dev)
-{
- /* flush priv->wake */
- flush_scheduled_work();
-
- netif_stop_queue(dev);
- return 0;
-}
-
-/* FIXME: What to do if we timeout? I think a host reset is probably in order,
- * so that's what we do. Should we increment the stat counters too? */
-static void ether1394_tx_timeout(struct net_device *dev)
-{
- struct hpsb_host *host =
- ((struct eth1394_priv *)netdev_priv(dev))->host;
-
- ETH1394_PRINT(KERN_ERR, dev->name, "Timeout, resetting host\n");
- ether1394_host_reset(host);
-}
-
-static inline int ether1394_max_mtu(struct hpsb_host* host)
-{
- return (1 << (host->csr.max_rec + 1))
- - sizeof(union eth1394_hdr) - ETHER1394_GASP_OVERHEAD;
-}
-
-static int ether1394_change_mtu(struct net_device *dev, int new_mtu)
-{
- int max_mtu;
-
- if (new_mtu < 68)
- return -EINVAL;
-
- max_mtu = ether1394_max_mtu(
- ((struct eth1394_priv *)netdev_priv(dev))->host);
- if (new_mtu > max_mtu) {
- ETH1394_PRINT(KERN_INFO, dev->name,
- "Local node constrains MTU to %d\n", max_mtu);
- return -ERANGE;
- }
-
- dev->mtu = new_mtu;
- return 0;
-}
-
-static void purge_partial_datagram(struct list_head *old)
-{
- struct partial_datagram *pd;
- struct list_head *lh, *n;
- struct fragment_info *fi;
-
- pd = list_entry(old, struct partial_datagram, list);
-
- list_for_each_safe(lh, n, &pd->frag_info) {
- fi = list_entry(lh, struct fragment_info, list);
- list_del(lh);
- kfree(fi);
- }
- list_del(old);
- kfree_skb(pd->skb);
- kfree(pd);
-}
-
-/******************************************
- * 1394 bus activity functions
- ******************************************/
-
-static struct eth1394_node_ref *eth1394_find_node(struct list_head *inl,
- struct unit_directory *ud)
-{
- struct eth1394_node_ref *node;
-
- list_for_each_entry(node, inl, list)
- if (node->ud == ud)
- return node;
-
- return NULL;
-}
-
-static struct eth1394_node_ref *eth1394_find_node_guid(struct list_head *inl,
- u64 guid)
-{
- struct eth1394_node_ref *node;
-
- list_for_each_entry(node, inl, list)
- if (node->ud->ne->guid == guid)
- return node;
-
- return NULL;
-}
-
-static struct eth1394_node_ref *eth1394_find_node_nodeid(struct list_head *inl,
- nodeid_t nodeid)
-{
- struct eth1394_node_ref *node;
-
- list_for_each_entry(node, inl, list)
- if (node->ud->ne->nodeid == nodeid)
- return node;
-
- return NULL;
-}
-
-static int eth1394_new_node(struct eth1394_host_info *hi,
- struct unit_directory *ud)
-{
- struct eth1394_priv *priv;
- struct eth1394_node_ref *new_node;
- struct eth1394_node_info *node_info;
-
- new_node = kmalloc(sizeof(*new_node), GFP_KERNEL);
- if (!new_node)
- return -ENOMEM;
-
- node_info = kmalloc(sizeof(*node_info), GFP_KERNEL);
- if (!node_info) {
- kfree(new_node);
- return -ENOMEM;
- }
-
- spin_lock_init(&node_info->pdg.lock);
- INIT_LIST_HEAD(&node_info->pdg.list);
- node_info->pdg.sz = 0;
- node_info->fifo = CSR1212_INVALID_ADDR_SPACE;
-
- dev_set_drvdata(&ud->device, node_info);
- new_node->ud = ud;
-
- priv = netdev_priv(hi->dev);
- list_add_tail(&new_node->list, &priv->ip_node_list);
- return 0;
-}
-
-static int eth1394_probe(struct device *dev)
-{
- struct unit_directory *ud;
- struct eth1394_host_info *hi;
-
- ud = container_of(dev, struct unit_directory, device);
- hi = hpsb_get_hostinfo(&eth1394_highlevel, ud->ne->host);
- if (!hi)
- return -ENOENT;
-
- return eth1394_new_node(hi, ud);
-}
-
-static int eth1394_remove(struct device *dev)
-{
- struct unit_directory *ud;
- struct eth1394_host_info *hi;
- struct eth1394_priv *priv;
- struct eth1394_node_ref *old_node;
- struct eth1394_node_info *node_info;
- struct list_head *lh, *n;
- unsigned long flags;
-
- ud = container_of(dev, struct unit_directory, device);
- hi = hpsb_get_hostinfo(&eth1394_highlevel, ud->ne->host);
- if (!hi)
- return -ENOENT;
-
- priv = netdev_priv(hi->dev);
-
- old_node = eth1394_find_node(&priv->ip_node_list, ud);
- if (!old_node)
- return 0;
-
- list_del(&old_node->list);
- kfree(old_node);
-
- node_info = dev_get_drvdata(&ud->device);
-
- spin_lock_irqsave(&node_info->pdg.lock, flags);
- /* The partial datagram list should be empty, but we'll just
- * make sure anyway... */
- list_for_each_safe(lh, n, &node_info->pdg.list)
- purge_partial_datagram(lh);
- spin_unlock_irqrestore(&node_info->pdg.lock, flags);
-
- kfree(node_info);
- dev_set_drvdata(&ud->device, NULL);
- return 0;
-}
-
-static int eth1394_update(struct unit_directory *ud)
-{
- struct eth1394_host_info *hi;
- struct eth1394_priv *priv;
- struct eth1394_node_ref *node;
-
- hi = hpsb_get_hostinfo(&eth1394_highlevel, ud->ne->host);
- if (!hi)
- return -ENOENT;
-
- priv = netdev_priv(hi->dev);
- node = eth1394_find_node(&priv->ip_node_list, ud);
- if (node)
- return 0;
-
- return eth1394_new_node(hi, ud);
-}
-
-static const struct ieee1394_device_id eth1394_id_table[] = {
- {
- .match_flags = (IEEE1394_MATCH_SPECIFIER_ID |
- IEEE1394_MATCH_VERSION),
- .specifier_id = ETHER1394_GASP_SPECIFIER_ID,
- .version = ETHER1394_GASP_VERSION,
- },
- {}
-};
-
-MODULE_DEVICE_TABLE(ieee1394, eth1394_id_table);
-
-static struct hpsb_protocol_driver eth1394_proto_driver = {
- .name = driver_name,
- .id_table = eth1394_id_table,
- .update = eth1394_update,
- .driver = {
- .probe = eth1394_probe,
- .remove = eth1394_remove,
- },
-};
-
-static void ether1394_reset_priv(struct net_device *dev, int set_mtu)
-{
- unsigned long flags;
- int i;
- struct eth1394_priv *priv = netdev_priv(dev);
- struct hpsb_host *host = priv->host;
- u64 guid = get_unaligned((u64 *)&(host->csr.rom->bus_info_data[3]));
- int max_speed = IEEE1394_SPEED_MAX;
-
- spin_lock_irqsave(&priv->lock, flags);
-
- memset(priv->ud_list, 0, sizeof(priv->ud_list));
- priv->bc_maxpayload = 512;
-
- /* Determine speed limit */
- /* FIXME: This is broken for nodes with link speed < PHY speed,
- * and it is suboptimal for S200B...S800B hardware.
- * The result of nodemgr's speed probe should be used somehow. */
- for (i = 0; i < host->node_count; i++) {
- /* take care of S100B...S400B PHY ports */
- if (host->speed[i] == SELFID_SPEED_UNKNOWN) {
- max_speed = IEEE1394_SPEED_100;
- break;
- }
- if (max_speed > host->speed[i])
- max_speed = host->speed[i];
- }
- priv->bc_sspd = max_speed;
-
- if (set_mtu) {
- /* Use the RFC 2734 default 1500 octets or the maximum payload
- * as initial MTU */
- dev->mtu = min(1500, ether1394_max_mtu(host));
-
- /* Set our hardware address while we're at it */
- memcpy(dev->dev_addr, &guid, sizeof(u64));
- memset(dev->broadcast, 0xff, sizeof(u64));
- }
-
- spin_unlock_irqrestore(&priv->lock, flags);
-}
-
-static const struct header_ops ether1394_header_ops = {
- .create = ether1394_header,
- .rebuild = ether1394_rebuild_header,
- .cache = ether1394_header_cache,
- .cache_update = ether1394_header_cache_update,
- .parse = ether1394_header_parse,
-};
-
-static const struct net_device_ops ether1394_netdev_ops = {
- .ndo_open = ether1394_open,
- .ndo_stop = ether1394_stop,
- .ndo_start_xmit = ether1394_tx,
- .ndo_tx_timeout = ether1394_tx_timeout,
- .ndo_change_mtu = ether1394_change_mtu,
-};
-
-static void ether1394_init_dev(struct net_device *dev)
-{
-
- dev->header_ops = &ether1394_header_ops;
- dev->netdev_ops = &ether1394_netdev_ops;
-
- dev->watchdog_timeo = ETHER1394_TIMEOUT;
- dev->flags = IFF_BROADCAST | IFF_MULTICAST;
- dev->features = NETIF_F_HIGHDMA;
- dev->addr_len = ETH1394_ALEN;
- dev->hard_header_len = ETH1394_HLEN;
- dev->type = ARPHRD_IEEE1394;
-
- /* FIXME: This value was copied from ether_setup(). Is it too much? */
- dev->tx_queue_len = 1000;
-}
-
-/*
- * Wake the queue up after commonly encountered transmit failure conditions are
- * hopefully over. Currently only tlabel exhaustion is accounted for.
- */
-static void ether1394_wake_queue(struct work_struct *work)
-{
- struct eth1394_priv *priv;
- struct hpsb_packet *packet;
-
- priv = container_of(work, struct eth1394_priv, wake);
- packet = hpsb_alloc_packet(0);
-
- /* This is really bad, but unjam the queue anyway. */
- if (!packet)
- goto out;
-
- packet->host = priv->host;
- packet->node_id = priv->wake_node;
- /*
- * A transaction label is all we really want. If we get one, it almost
- * always means we can get a lot more because the ieee1394 core recycled
- * a whole batch of tlabels, at last.
- */
- if (hpsb_get_tlabel(packet) == 0)
- hpsb_free_tlabel(packet);
-
- hpsb_free_packet(packet);
-out:
- netif_wake_queue(priv->wake_dev);
-}
-
-/*
- * This function is called every time a card is found. It is generally called
- * when the module is installed. This is where we add all of our ethernet
- * devices. One for each host.
- */
-static void ether1394_add_host(struct hpsb_host *host)
-{
- struct eth1394_host_info *hi = NULL;
- struct net_device *dev = NULL;
- struct eth1394_priv *priv;
- u64 fifo_addr;
-
- if (hpsb_config_rom_ip1394_add(host) != 0) {
- ETH1394_PRINT_G(KERN_ERR, "Can't add IP-over-1394 ROM entry\n");
- return;
- }
-
- fifo_addr = hpsb_allocate_and_register_addrspace(
- &eth1394_highlevel, host, &addr_ops,
- ETHER1394_REGION_ADDR_LEN, ETHER1394_REGION_ADDR_LEN,
- CSR1212_INVALID_ADDR_SPACE, CSR1212_INVALID_ADDR_SPACE);
- if (fifo_addr == CSR1212_INVALID_ADDR_SPACE) {
- ETH1394_PRINT_G(KERN_ERR, "Cannot register CSR space\n");
- hpsb_config_rom_ip1394_remove(host);
- return;
- }
-
- dev = alloc_netdev(sizeof(*priv), "eth%d", ether1394_init_dev);
- if (dev == NULL) {
- ETH1394_PRINT_G(KERN_ERR, "Out of memory\n");
- goto out;
- }
-
- SET_NETDEV_DEV(dev, &host->device);
-
- priv = netdev_priv(dev);
- INIT_LIST_HEAD(&priv->ip_node_list);
- spin_lock_init(&priv->lock);
- priv->host = host;
- priv->local_fifo = fifo_addr;
- INIT_WORK(&priv->wake, ether1394_wake_queue);
- priv->wake_dev = dev;
-
- hi = hpsb_create_hostinfo(&eth1394_highlevel, host, sizeof(*hi));
- if (hi == NULL) {
- ETH1394_PRINT_G(KERN_ERR, "Out of memory\n");
- goto out;
- }
-
- ether1394_reset_priv(dev, 1);
-
- if (register_netdev(dev)) {
- ETH1394_PRINT_G(KERN_ERR, "Cannot register the driver\n");
- goto out;
- }
-
- ETH1394_PRINT(KERN_INFO, dev->name, "IPv4 over IEEE 1394 (fw-host%d)\n",
- host->id);
-
- hi->host = host;
- hi->dev = dev;
-
- /* Ignore validity in hopes that it will be set in the future. It'll
- * be checked when the eth device is opened. */
- priv->broadcast_channel = host->csr.broadcast_channel & 0x3f;
-
- ether1394_recv_init(priv);
- return;
-out:
- if (dev)
- free_netdev(dev);
- if (hi)
- hpsb_destroy_hostinfo(&eth1394_highlevel, host);
- hpsb_unregister_addrspace(&eth1394_highlevel, host, fifo_addr);
- hpsb_config_rom_ip1394_remove(host);
-}
-
-/* Remove a card from our list */
-static void ether1394_remove_host(struct hpsb_host *host)
-{
- struct eth1394_host_info *hi;
- struct eth1394_priv *priv;
-
- hi = hpsb_get_hostinfo(&eth1394_highlevel, host);
- if (!hi)
- return;
- priv = netdev_priv(hi->dev);
- hpsb_unregister_addrspace(&eth1394_highlevel, host, priv->local_fifo);
- hpsb_config_rom_ip1394_remove(host);
- if (priv->iso)
- hpsb_iso_shutdown(priv->iso);
- unregister_netdev(hi->dev);
- free_netdev(hi->dev);
-}
-
-/* A bus reset happened */
-static void ether1394_host_reset(struct hpsb_host *host)
-{
- struct eth1394_host_info *hi;
- struct eth1394_priv *priv;
- struct net_device *dev;
- struct list_head *lh, *n;
- struct eth1394_node_ref *node;
- struct eth1394_node_info *node_info;
- unsigned long flags;
-
- hi = hpsb_get_hostinfo(&eth1394_highlevel, host);
-
- /* This can happen for hosts that we don't use */
- if (!hi)
- return;
-
- dev = hi->dev;
- priv = netdev_priv(dev);
-
- /* Reset our private host data, but not our MTU */
- netif_stop_queue(dev);
- ether1394_reset_priv(dev, 0);
-
- list_for_each_entry(node, &priv->ip_node_list, list) {
- node_info = dev_get_drvdata(&node->ud->device);
-
- spin_lock_irqsave(&node_info->pdg.lock, flags);
-
- list_for_each_safe(lh, n, &node_info->pdg.list)
- purge_partial_datagram(lh);
-
- INIT_LIST_HEAD(&(node_info->pdg.list));
- node_info->pdg.sz = 0;
-
- spin_unlock_irqrestore(&node_info->pdg.lock, flags);
- }
-
- netif_wake_queue(dev);
-}
-
-/******************************************
- * HW Header net device functions
- ******************************************/
-/* These functions have been adapted from net/ethernet/eth.c */
-
-/* Create a fake MAC header for an arbitrary protocol layer.
- * saddr=NULL means use device source address
- * daddr=NULL means leave destination address (eg unresolved arp). */
-static int ether1394_header(struct sk_buff *skb, struct net_device *dev,
- unsigned short type, const void *daddr,
- const void *saddr, unsigned len)
-{
- struct eth1394hdr *eth =
- (struct eth1394hdr *)skb_push(skb, ETH1394_HLEN);
-
- eth->h_proto = htons(type);
-
- if (dev->flags & (IFF_LOOPBACK | IFF_NOARP)) {
- memset(eth->h_dest, 0, dev->addr_len);
- return dev->hard_header_len;
- }
-
- if (daddr) {
- memcpy(eth->h_dest, daddr, dev->addr_len);
- return dev->hard_header_len;
- }
-
- return -dev->hard_header_len;
-}
-
-/* Rebuild the faked MAC header. This is called after an ARP
- * (or in future other address resolution) has completed on this
- * sk_buff. We now let ARP fill in the other fields.
- *
- * This routine CANNOT use cached dst->neigh!
- * Really, it is used only when dst->neigh is wrong.
- */
-static int ether1394_rebuild_header(struct sk_buff *skb)
-{
- struct eth1394hdr *eth = (struct eth1394hdr *)skb->data;
-
- if (eth->h_proto == htons(ETH_P_IP))
- return arp_find((unsigned char *)&eth->h_dest, skb);
-
- ETH1394_PRINT(KERN_DEBUG, skb->dev->name,
- "unable to resolve type %04x addresses\n",
- ntohs(eth->h_proto));
- return 0;
-}
-
-static int ether1394_header_parse(const struct sk_buff *skb,
- unsigned char *haddr)
-{
- memcpy(haddr, skb->dev->dev_addr, ETH1394_ALEN);
- return ETH1394_ALEN;
-}
-
-static int ether1394_header_cache(const struct neighbour *neigh,
- struct hh_cache *hh)
-{
- __be16 type = hh->hh_type;
- struct net_device *dev = neigh->dev;
- struct eth1394hdr *eth =
- (struct eth1394hdr *)((u8 *)hh->hh_data + 16 - ETH1394_HLEN);
-
- if (type == htons(ETH_P_802_3))
- return -1;
-
- eth->h_proto = type;
- memcpy(eth->h_dest, neigh->ha, dev->addr_len);
-
- hh->hh_len = ETH1394_HLEN;
- return 0;
-}
-
-/* Called by Address Resolution module to notify changes in address. */
-static void ether1394_header_cache_update(struct hh_cache *hh,
- const struct net_device *dev,
- const unsigned char * haddr)
-{
- memcpy((u8 *)hh->hh_data + 16 - ETH1394_HLEN, haddr, dev->addr_len);
-}
-
-/******************************************
- * Datagram reception code
- ******************************************/
-
-/* Copied from net/ethernet/eth.c */
-static __be16 ether1394_type_trans(struct sk_buff *skb, struct net_device *dev)
-{
- struct eth1394hdr *eth;
- unsigned char *rawp;
-
- skb_reset_mac_header(skb);
- skb_pull(skb, ETH1394_HLEN);
- eth = eth1394_hdr(skb);
-
- if (*eth->h_dest & 1) {
- if (memcmp(eth->h_dest, dev->broadcast, dev->addr_len) == 0)
- skb->pkt_type = PACKET_BROADCAST;
-#if 0
- else
- skb->pkt_type = PACKET_MULTICAST;
-#endif
- } else {
- if (memcmp(eth->h_dest, dev->dev_addr, dev->addr_len))
- skb->pkt_type = PACKET_OTHERHOST;
- }
-
- if (ntohs(eth->h_proto) >= 1536)
- return eth->h_proto;
-
- rawp = skb->data;
-
- if (*(unsigned short *)rawp == 0xFFFF)
- return htons(ETH_P_802_3);
-
- return htons(ETH_P_802_2);
-}
-
-/* Parse an encapsulated IP1394 header into an ethernet frame packet.
- * We also perform ARP translation here, if need be. */
-static __be16 ether1394_parse_encap(struct sk_buff *skb, struct net_device *dev,
- nodeid_t srcid, nodeid_t destid,
- __be16 ether_type)
-{
- struct eth1394_priv *priv = netdev_priv(dev);
- __be64 dest_hw;
- __be16 ret = 0;
-
- /* Setup our hw addresses. We use these to build the ethernet header. */
- if (destid == (LOCAL_BUS | ALL_NODES))
- dest_hw = ~cpu_to_be64(0); /* broadcast */
- else
- dest_hw = cpu_to_be64((u64)priv->host->csr.guid_hi << 32 |
- priv->host->csr.guid_lo);
-
- /* If this is an ARP packet, convert it. First, we want to make
- * use of some of the fields, since they tell us a little bit
- * about the sending machine. */
- if (ether_type == htons(ETH_P_ARP)) {
- struct eth1394_arp *arp1394 = (struct eth1394_arp *)skb->data;
- struct arphdr *arp = (struct arphdr *)skb->data;
- unsigned char *arp_ptr = (unsigned char *)(arp + 1);
- u64 fifo_addr = (u64)ntohs(arp1394->fifo_hi) << 32 |
- ntohl(arp1394->fifo_lo);
- u8 max_rec = min(priv->host->csr.max_rec,
- (u8)(arp1394->max_rec));
- int sspd = arp1394->sspd;
- u16 maxpayload;
- struct eth1394_node_ref *node;
- struct eth1394_node_info *node_info;
- __be64 guid;
-
- /* Sanity check. MacOSX seems to be sending us 131 in this
- * field (atleast on my Panther G5). Not sure why. */
- if (sspd > 5 || sspd < 0)
- sspd = 0;
-
- maxpayload = min(eth1394_speedto_maxpayload[sspd],
- (u16)(1 << (max_rec + 1)));
-
- guid = get_unaligned(&arp1394->s_uniq_id);
- node = eth1394_find_node_guid(&priv->ip_node_list,
- be64_to_cpu(guid));
- if (!node)
- return cpu_to_be16(0);
-
- node_info = dev_get_drvdata(&node->ud->device);
-
- /* Update our speed/payload/fifo_offset table */
- node_info->maxpayload = maxpayload;
- node_info->sspd = sspd;
- node_info->fifo = fifo_addr;
-
- /* Now that we're done with the 1394 specific stuff, we'll
- * need to alter some of the data. Believe it or not, all
- * that needs to be done is sender_IP_address needs to be
- * moved, the destination hardware address get stuffed
- * in and the hardware address length set to 8.
- *
- * IMPORTANT: The code below overwrites 1394 specific data
- * needed above so keep the munging of the data for the
- * higher level IP stack last. */
-
- arp->ar_hln = 8;
- arp_ptr += arp->ar_hln; /* skip over sender unique id */
- *(u32 *)arp_ptr = arp1394->sip; /* move sender IP addr */
- arp_ptr += arp->ar_pln; /* skip over sender IP addr */
-
- if (arp->ar_op == htons(ARPOP_REQUEST))
- memset(arp_ptr, 0, sizeof(u64));
- else
- memcpy(arp_ptr, dev->dev_addr, sizeof(u64));
- }
-
- /* Now add the ethernet header. */
- if (dev_hard_header(skb, dev, ntohs(ether_type), &dest_hw, NULL,
- skb->len) >= 0)
- ret = ether1394_type_trans(skb, dev);
-
- return ret;
-}
-
-static int fragment_overlap(struct list_head *frag_list, int offset, int len)
-{
- struct fragment_info *fi;
- int end = offset + len;
-
- list_for_each_entry(fi, frag_list, list)
- if (offset < fi->offset + fi->len && end > fi->offset)
- return 1;
-
- return 0;
-}
-
-static struct list_head *find_partial_datagram(struct list_head *pdgl, int dgl)
-{
- struct partial_datagram *pd;
-
- list_for_each_entry(pd, pdgl, list)
- if (pd->dgl == dgl)
- return &pd->list;
-
- return NULL;
-}
-
-/* Assumes that new fragment does not overlap any existing fragments */
-static int new_fragment(struct list_head *frag_info, int offset, int len)
-{
- struct list_head *lh;
- struct fragment_info *fi, *fi2, *new;
-
- list_for_each(lh, frag_info) {
- fi = list_entry(lh, struct fragment_info, list);
- if (fi->offset + fi->len == offset) {
- /* The new fragment can be tacked on to the end */
- fi->len += len;
- /* Did the new fragment plug a hole? */
- fi2 = list_entry(lh->next, struct fragment_info, list);
- if (fi->offset + fi->len == fi2->offset) {
- /* glue fragments together */
- fi->len += fi2->len;
- list_del(lh->next);
- kfree(fi2);
- }
- return 0;
- } else if (offset + len == fi->offset) {
- /* The new fragment can be tacked on to the beginning */
- fi->offset = offset;
- fi->len += len;
- /* Did the new fragment plug a hole? */
- fi2 = list_entry(lh->prev, struct fragment_info, list);
- if (fi2->offset + fi2->len == fi->offset) {
- /* glue fragments together */
- fi2->len += fi->len;
- list_del(lh);
- kfree(fi);
- }
- return 0;
- } else if (offset > fi->offset + fi->len) {
- break;
- } else if (offset + len < fi->offset) {
- lh = lh->prev;
- break;
- }
- }
-
- new = kmalloc(sizeof(*new), GFP_ATOMIC);
- if (!new)
- return -ENOMEM;
-
- new->offset = offset;
- new->len = len;
-
- list_add(&new->list, lh);
- return 0;
-}
-
-static int new_partial_datagram(struct net_device *dev, struct list_head *pdgl,
- int dgl, int dg_size, char *frag_buf,
- int frag_off, int frag_len)
-{
- struct partial_datagram *new;
-
- new = kmalloc(sizeof(*new), GFP_ATOMIC);
- if (!new)
- return -ENOMEM;
-
- INIT_LIST_HEAD(&new->frag_info);
-
- if (new_fragment(&new->frag_info, frag_off, frag_len) < 0) {
- kfree(new);
- return -ENOMEM;
- }
-
- new->dgl = dgl;
- new->dg_size = dg_size;
-
- new->skb = dev_alloc_skb(dg_size + dev->hard_header_len + 15);
- if (!new->skb) {
- struct fragment_info *fi = list_entry(new->frag_info.next,
- struct fragment_info,
- list);
- kfree(fi);
- kfree(new);
- return -ENOMEM;
- }
-
- skb_reserve(new->skb, (dev->hard_header_len + 15) & ~15);
- new->pbuf = skb_put(new->skb, dg_size);
- memcpy(new->pbuf + frag_off, frag_buf, frag_len);
-
- list_add(&new->list, pdgl);
- return 0;
-}
-
-static int update_partial_datagram(struct list_head *pdgl, struct list_head *lh,
- char *frag_buf, int frag_off, int frag_len)
-{
- struct partial_datagram *pd =
- list_entry(lh, struct partial_datagram, list);
-
- if (new_fragment(&pd->frag_info, frag_off, frag_len) < 0)
- return -ENOMEM;
-
- memcpy(pd->pbuf + frag_off, frag_buf, frag_len);
-
- /* Move list entry to beginnig of list so that oldest partial
- * datagrams percolate to the end of the list */
- list_move(lh, pdgl);
- return 0;
-}
-
-static int is_datagram_complete(struct list_head *lh, int dg_size)
-{
- struct partial_datagram *pd;
- struct fragment_info *fi;
-
- pd = list_entry(lh, struct partial_datagram, list);
- fi = list_entry(pd->frag_info.next, struct fragment_info, list);
-
- return (fi->len == dg_size);
-}
-
-/* Packet reception. We convert the IP1394 encapsulation header to an
- * ethernet header, and fill it with some of our other fields. This is
- * an incoming packet from the 1394 bus. */
-static int ether1394_data_handler(struct net_device *dev, int srcid, int destid,
- char *buf, int len)
-{
- struct sk_buff *skb;
- unsigned long flags;
- struct eth1394_priv *priv = netdev_priv(dev);
- union eth1394_hdr *hdr = (union eth1394_hdr *)buf;
- __be16 ether_type = cpu_to_be16(0); /* initialized to clear warning */
- int hdr_len;
- struct unit_directory *ud = priv->ud_list[NODEID_TO_NODE(srcid)];
- struct eth1394_node_info *node_info;
-
- if (!ud) {
- struct eth1394_node_ref *node;
- node = eth1394_find_node_nodeid(&priv->ip_node_list, srcid);
- if (unlikely(!node)) {
- HPSB_PRINT(KERN_ERR, "ether1394 rx: sender nodeid "
- "lookup failure: " NODE_BUS_FMT,
- NODE_BUS_ARGS(priv->host, srcid));
- dev->stats.rx_dropped++;
- return -1;
- }
- ud = node->ud;
-
- priv->ud_list[NODEID_TO_NODE(srcid)] = ud;
- }
-
- node_info = dev_get_drvdata(&ud->device);
-
- /* First, did we receive a fragmented or unfragmented datagram? */
- hdr->words.word1 = ntohs(hdr->words.word1);
-
- hdr_len = hdr_type_len[hdr->common.lf];
-
- if (hdr->common.lf == ETH1394_HDR_LF_UF) {
- /* An unfragmented datagram has been received by the ieee1394
- * bus. Build an skbuff around it so we can pass it to the
- * high level network layer. */
-
- skb = dev_alloc_skb(len + dev->hard_header_len + 15);
- if (unlikely(!skb)) {
- ETH1394_PRINT_G(KERN_ERR, "Out of memory\n");
- dev->stats.rx_dropped++;
- return -1;
- }
- skb_reserve(skb, (dev->hard_header_len + 15) & ~15);
- memcpy(skb_put(skb, len - hdr_len), buf + hdr_len,
- len - hdr_len);
- ether_type = hdr->uf.ether_type;
- } else {
- /* A datagram fragment has been received, now the fun begins. */
-
- struct list_head *pdgl, *lh;
- struct partial_datagram *pd;
- int fg_off;
- int fg_len = len - hdr_len;
- int dg_size;
- int dgl;
- int retval;
- struct pdg_list *pdg = &(node_info->pdg);
-
- hdr->words.word3 = ntohs(hdr->words.word3);
- /* The 4th header word is reserved so no need to do ntohs() */
-
- if (hdr->common.lf == ETH1394_HDR_LF_FF) {
- ether_type = hdr->ff.ether_type;
- dgl = hdr->ff.dgl;
- dg_size = hdr->ff.dg_size + 1;
- fg_off = 0;
- } else {
- hdr->words.word2 = ntohs(hdr->words.word2);
- dgl = hdr->sf.dgl;
- dg_size = hdr->sf.dg_size + 1;
- fg_off = hdr->sf.fg_off;
- }
- spin_lock_irqsave(&pdg->lock, flags);
-
- pdgl = &(pdg->list);
- lh = find_partial_datagram(pdgl, dgl);
-
- if (lh == NULL) {
- while (pdg->sz >= max_partial_datagrams) {
- /* remove the oldest */
- purge_partial_datagram(pdgl->prev);
- pdg->sz--;
- }
-
- retval = new_partial_datagram(dev, pdgl, dgl, dg_size,
- buf + hdr_len, fg_off,
- fg_len);
- if (retval < 0) {
- spin_unlock_irqrestore(&pdg->lock, flags);
- goto bad_proto;
- }
- pdg->sz++;
- lh = find_partial_datagram(pdgl, dgl);
- } else {
- pd = list_entry(lh, struct partial_datagram, list);
-
- if (fragment_overlap(&pd->frag_info, fg_off, fg_len)) {
- /* Overlapping fragments, obliterate old
- * datagram and start new one. */
- purge_partial_datagram(lh);
- retval = new_partial_datagram(dev, pdgl, dgl,
- dg_size,
- buf + hdr_len,
- fg_off, fg_len);
- if (retval < 0) {
- pdg->sz--;
- spin_unlock_irqrestore(&pdg->lock, flags);
- goto bad_proto;
- }
- } else {
- retval = update_partial_datagram(pdgl, lh,
- buf + hdr_len,
- fg_off, fg_len);
- if (retval < 0) {
- /* Couldn't save off fragment anyway
- * so might as well obliterate the
- * datagram now. */
- purge_partial_datagram(lh);
- pdg->sz--;
- spin_unlock_irqrestore(&pdg->lock, flags);
- goto bad_proto;
- }
- } /* fragment overlap */
- } /* new datagram or add to existing one */
-
- pd = list_entry(lh, struct partial_datagram, list);
-
- if (hdr->common.lf == ETH1394_HDR_LF_FF)
- pd->ether_type = ether_type;
-
- if (is_datagram_complete(lh, dg_size)) {
- ether_type = pd->ether_type;
- pdg->sz--;
- skb = skb_get(pd->skb);
- purge_partial_datagram(lh);
- spin_unlock_irqrestore(&pdg->lock, flags);
- } else {
- /* Datagram is not complete, we're done for the
- * moment. */
- spin_unlock_irqrestore(&pdg->lock, flags);
- return 0;
- }
- } /* unframgented datagram or fragmented one */
-
- /* Write metadata, and then pass to the receive level */
- skb->dev = dev;
- skb->ip_summed = CHECKSUM_UNNECESSARY; /* don't check it */
-
- /* Parse the encapsulation header. This actually does the job of
- * converting to an ethernet frame header, aswell as arp
- * conversion if needed. ARP conversion is easier in this
- * direction, since we are using ethernet as our backend. */
- skb->protocol = ether1394_parse_encap(skb, dev, srcid, destid,
- ether_type);
-
- spin_lock_irqsave(&priv->lock, flags);
-
- if (!skb->protocol) {
- dev->stats.rx_errors++;
- dev->stats.rx_dropped++;
- dev_kfree_skb_any(skb);
- } else if (netif_rx(skb) == NET_RX_DROP) {
- dev->stats.rx_errors++;
- dev->stats.rx_dropped++;
- } else {
- dev->stats.rx_packets++;
- dev->stats.rx_bytes += skb->len;
- }
-
- spin_unlock_irqrestore(&priv->lock, flags);
-
-bad_proto:
- if (netif_queue_stopped(dev))
- netif_wake_queue(dev);
-
- return 0;
-}
-
-static int ether1394_write(struct hpsb_host *host, int srcid, int destid,
- quadlet_t *data, u64 addr, size_t len, u16 flags)
-{
- struct eth1394_host_info *hi;
-
- hi = hpsb_get_hostinfo(&eth1394_highlevel, host);
- if (unlikely(!hi)) {
- ETH1394_PRINT_G(KERN_ERR, "No net device at fw-host%d\n",
- host->id);
- return RCODE_ADDRESS_ERROR;
- }
-
- if (ether1394_data_handler(hi->dev, srcid, destid, (char*)data, len))
- return RCODE_ADDRESS_ERROR;
- else
- return RCODE_COMPLETE;
-}
-
-static void ether1394_iso(struct hpsb_iso *iso)
-{
- __be32 *data;
- char *buf;
- struct eth1394_host_info *hi;
- struct net_device *dev;
- unsigned int len;
- u32 specifier_id;
- u16 source_id;
- int i;
- int nready;
-
- hi = hpsb_get_hostinfo(&eth1394_highlevel, iso->host);
- if (unlikely(!hi)) {
- ETH1394_PRINT_G(KERN_ERR, "No net device at fw-host%d\n",
- iso->host->id);
- return;
- }
-
- dev = hi->dev;
-
- nready = hpsb_iso_n_ready(iso);
- for (i = 0; i < nready; i++) {
- struct hpsb_iso_packet_info *info =
- &iso->infos[(iso->first_packet + i) % iso->buf_packets];
- data = (__be32 *)(iso->data_buf.kvirt + info->offset);
-
- /* skip over GASP header */
- buf = (char *)data + 8;
- len = info->len - 8;
-
- specifier_id = (be32_to_cpu(data[0]) & 0xffff) << 8 |
- (be32_to_cpu(data[1]) & 0xff000000) >> 24;
- source_id = be32_to_cpu(data[0]) >> 16;
-
- if (info->channel != (iso->host->csr.broadcast_channel & 0x3f)
- || specifier_id != ETHER1394_GASP_SPECIFIER_ID) {
- /* This packet is not for us */
- continue;
- }
- ether1394_data_handler(dev, source_id, LOCAL_BUS | ALL_NODES,
- buf, len);
- }
-
- hpsb_iso_recv_release_packets(iso, i);
-
-}
-
-/******************************************
- * Datagram transmission code
- ******************************************/
-
-/* Convert a standard ARP packet to 1394 ARP. The first 8 bytes (the entire
- * arphdr) is the same format as the ip1394 header, so they overlap. The rest
- * needs to be munged a bit. The remainder of the arphdr is formatted based
- * on hwaddr len and ipaddr len. We know what they'll be, so it's easy to
- * judge.
- *
- * Now that the EUI is used for the hardware address all we need to do to make
- * this work for 1394 is to insert 2 quadlets that contain max_rec size,
- * speed, and unicast FIFO address information between the sender_unique_id
- * and the IP addresses.
- */
-static void ether1394_arp_to_1394arp(struct sk_buff *skb,
- struct net_device *dev)
-{
- struct eth1394_priv *priv = netdev_priv(dev);
- struct arphdr *arp = (struct arphdr *)skb->data;
- unsigned char *arp_ptr = (unsigned char *)(arp + 1);
- struct eth1394_arp *arp1394 = (struct eth1394_arp *)skb->data;
-
- arp1394->hw_addr_len = 16;
- arp1394->sip = *(u32*)(arp_ptr + ETH1394_ALEN);
- arp1394->max_rec = priv->host->csr.max_rec;
- arp1394->sspd = priv->host->csr.lnk_spd;
- arp1394->fifo_hi = htons(priv->local_fifo >> 32);
- arp1394->fifo_lo = htonl(priv->local_fifo & ~0x0);
-}
-
-/* We need to encapsulate the standard header with our own. We use the
- * ethernet header's proto for our own. */
-static unsigned int ether1394_encapsulate_prep(unsigned int max_payload,
- __be16 proto,
- union eth1394_hdr *hdr,
- u16 dg_size, u16 dgl)
-{
- unsigned int adj_max_payload =
- max_payload - hdr_type_len[ETH1394_HDR_LF_UF];
-
- /* Does it all fit in one packet? */
- if (dg_size <= adj_max_payload) {
- hdr->uf.lf = ETH1394_HDR_LF_UF;
- hdr->uf.ether_type = proto;
- } else {
- hdr->ff.lf = ETH1394_HDR_LF_FF;
- hdr->ff.ether_type = proto;
- hdr->ff.dg_size = dg_size - 1;
- hdr->ff.dgl = dgl;
- adj_max_payload = max_payload - hdr_type_len[ETH1394_HDR_LF_FF];
- }
- return DIV_ROUND_UP(dg_size, adj_max_payload);
-}
-
-static unsigned int ether1394_encapsulate(struct sk_buff *skb,
- unsigned int max_payload,
- union eth1394_hdr *hdr)
-{
- union eth1394_hdr *bufhdr;
- int ftype = hdr->common.lf;
- int hdrsz = hdr_type_len[ftype];
- unsigned int adj_max_payload = max_payload - hdrsz;
-
- switch (ftype) {
- case ETH1394_HDR_LF_UF:
- bufhdr = (union eth1394_hdr *)skb_push(skb, hdrsz);
- bufhdr->words.word1 = htons(hdr->words.word1);
- bufhdr->words.word2 = hdr->words.word2;
- break;
-
- case ETH1394_HDR_LF_FF:
- bufhdr = (union eth1394_hdr *)skb_push(skb, hdrsz);
- bufhdr->words.word1 = htons(hdr->words.word1);
- bufhdr->words.word2 = hdr->words.word2;
- bufhdr->words.word3 = htons(hdr->words.word3);
- bufhdr->words.word4 = 0;
-
- /* Set frag type here for future interior fragments */
- hdr->common.lf = ETH1394_HDR_LF_IF;
- hdr->sf.fg_off = 0;
- break;
-
- default:
- hdr->sf.fg_off += adj_max_payload;
- bufhdr = (union eth1394_hdr *)skb_pull(skb, adj_max_payload);
- if (max_payload >= skb->len)
- hdr->common.lf = ETH1394_HDR_LF_LF;
- bufhdr->words.word1 = htons(hdr->words.word1);
- bufhdr->words.word2 = htons(hdr->words.word2);
- bufhdr->words.word3 = htons(hdr->words.word3);
- bufhdr->words.word4 = 0;
- }
- return min(max_payload, skb->len);
-}
-
-static struct hpsb_packet *ether1394_alloc_common_packet(struct hpsb_host *host)
-{
- struct hpsb_packet *p;
-
- p = hpsb_alloc_packet(0);
- if (p) {
- p->host = host;
- p->generation = get_hpsb_generation(host);
- p->type = hpsb_async;
- }
- return p;
-}
-
-static int ether1394_prep_write_packet(struct hpsb_packet *p,
- struct hpsb_host *host, nodeid_t node,
- u64 addr, void *data, int tx_len)
-{
- p->node_id = node;
-
- if (hpsb_get_tlabel(p))
- return -EAGAIN;
-
- p->tcode = TCODE_WRITEB;
- p->header_size = 16;
- p->expect_response = 1;
- p->header[0] =
- p->node_id << 16 | p->tlabel << 10 | 1 << 8 | TCODE_WRITEB << 4;
- p->header[1] = host->node_id << 16 | addr >> 32;
- p->header[2] = addr & 0xffffffff;
- p->header[3] = tx_len << 16;
- p->data_size = (tx_len + 3) & ~3;
- p->data = data;
-
- return 0;
-}
-
-static void ether1394_prep_gasp_packet(struct hpsb_packet *p,
- struct eth1394_priv *priv,
- struct sk_buff *skb, int length)
-{
- p->header_size = 4;
- p->tcode = TCODE_STREAM_DATA;
-
- p->header[0] = length << 16 | 3 << 14 | priv->broadcast_channel << 8 |
- TCODE_STREAM_DATA << 4;
- p->data_size = length;
- p->data = (quadlet_t *)skb->data - 2;
- p->data[0] = cpu_to_be32(priv->host->node_id << 16 |
- ETHER1394_GASP_SPECIFIER_ID_HI);
- p->data[1] = cpu_to_be32(ETHER1394_GASP_SPECIFIER_ID_LO << 24 |
- ETHER1394_GASP_VERSION);
-
- p->speed_code = priv->bc_sspd;
-
- /* prevent hpsb_send_packet() from overriding our speed code */
- p->node_id = LOCAL_BUS | ALL_NODES;
-}
-
-static void ether1394_free_packet(struct hpsb_packet *packet)
-{
- if (packet->tcode != TCODE_STREAM_DATA)
- hpsb_free_tlabel(packet);
- hpsb_free_packet(packet);
-}
-
-static void ether1394_complete_cb(void *__ptask);
-
-static int ether1394_send_packet(struct packet_task *ptask, unsigned int tx_len)
-{
- struct eth1394_priv *priv = ptask->priv;
- struct hpsb_packet *packet = NULL;
-
- packet = ether1394_alloc_common_packet(priv->host);
- if (!packet)
- return -ENOMEM;
-
- if (ptask->tx_type == ETH1394_GASP) {
- int length = tx_len + 2 * sizeof(quadlet_t);
-
- ether1394_prep_gasp_packet(packet, priv, ptask->skb, length);
- } else if (ether1394_prep_write_packet(packet, priv->host,
- ptask->dest_node,
- ptask->addr, ptask->skb->data,
- tx_len)) {
- hpsb_free_packet(packet);
- return -EAGAIN;
- }
-
- ptask->packet = packet;
- hpsb_set_packet_complete_task(ptask->packet, ether1394_complete_cb,
- ptask);
-
- if (hpsb_send_packet(packet) < 0) {
- ether1394_free_packet(packet);
- return -EIO;
- }
-
- return 0;
-}
-
-/* Task function to be run when a datagram transmission is completed */
-static void ether1394_dg_complete(struct packet_task *ptask, int fail)
-{
- struct sk_buff *skb = ptask->skb;
- struct net_device *dev = skb->dev;
- struct eth1394_priv *priv = netdev_priv(dev);
- unsigned long flags;
-
- /* Statistics */
- spin_lock_irqsave(&priv->lock, flags);
- if (fail) {
- dev->stats.tx_dropped++;
- dev->stats.tx_errors++;
- } else {
- dev->stats.tx_bytes += skb->len;
- dev->stats.tx_packets++;
- }
- spin_unlock_irqrestore(&priv->lock, flags);
-
- dev_kfree_skb_any(skb);
- kmem_cache_free(packet_task_cache, ptask);
-}
-
-/* Callback for when a packet has been sent and the status of that packet is
- * known */
-static void ether1394_complete_cb(void *__ptask)
-{
- struct packet_task *ptask = (struct packet_task *)__ptask;
- struct hpsb_packet *packet = ptask->packet;
- int fail = 0;
-
- if (packet->tcode != TCODE_STREAM_DATA)
- fail = hpsb_packet_success(packet);
-
- ether1394_free_packet(packet);
-
- ptask->outstanding_pkts--;
- if (ptask->outstanding_pkts > 0 && !fail) {
- int tx_len, err;
-
- /* Add the encapsulation header to the fragment */
- tx_len = ether1394_encapsulate(ptask->skb, ptask->max_payload,
- &ptask->hdr);
- err = ether1394_send_packet(ptask, tx_len);
- if (err) {
- if (err == -EAGAIN)
- ETH1394_PRINT_G(KERN_ERR, "Out of tlabels\n");
-
- ether1394_dg_complete(ptask, 1);
- }
- } else {
- ether1394_dg_complete(ptask, fail);
- }
-}
-
-/* Transmit a packet (called by kernel) */
-static netdev_tx_t ether1394_tx(struct sk_buff *skb,
- struct net_device *dev)
-{
- struct eth1394hdr hdr_buf;
- struct eth1394_priv *priv = netdev_priv(dev);
- __be16 proto;
- unsigned long flags;
- nodeid_t dest_node;
- eth1394_tx_type tx_type;
- unsigned int tx_len;
- unsigned int max_payload;
- u16 dg_size;
- u16 dgl;
- struct packet_task *ptask;
- struct eth1394_node_ref *node;
- struct eth1394_node_info *node_info = NULL;
-
- ptask = kmem_cache_alloc(packet_task_cache, GFP_ATOMIC);
- if (ptask == NULL)
- goto fail;
-
- /* XXX Ignore this for now. Noticed that when MacOSX is the IRM,
- * it does not set our validity bit. We need to compensate for
- * that somewhere else, but not in eth1394. */
-#if 0
- if ((priv->host->csr.broadcast_channel & 0xc0000000) != 0xc0000000)
- goto fail;
-#endif
-
- skb = skb_share_check(skb, GFP_ATOMIC);
- if (!skb)
- goto fail;
-
- /* Get rid of the fake eth1394 header, but first make a copy.
- * We might need to rebuild the header on tx failure. */
- memcpy(&hdr_buf, skb->data, sizeof(hdr_buf));
- skb_pull(skb, ETH1394_HLEN);
-
- proto = hdr_buf.h_proto;
- dg_size = skb->len;
-
- /* Set the transmission type for the packet. ARP packets and IP
- * broadcast packets are sent via GASP. */
- if (memcmp(hdr_buf.h_dest, dev->broadcast, ETH1394_ALEN) == 0 ||
- proto == htons(ETH_P_ARP) ||
- (proto == htons(ETH_P_IP) &&
- IN_MULTICAST(ntohl(ip_hdr(skb)->daddr)))) {
- tx_type = ETH1394_GASP;
- dest_node = LOCAL_BUS | ALL_NODES;
- max_payload = priv->bc_maxpayload - ETHER1394_GASP_OVERHEAD;
- BUG_ON(max_payload < 512 - ETHER1394_GASP_OVERHEAD);
- dgl = priv->bc_dgl;
- if (max_payload < dg_size + hdr_type_len[ETH1394_HDR_LF_UF])
- priv->bc_dgl++;
- } else {
- __be64 guid = get_unaligned((__be64 *)hdr_buf.h_dest);
-
- node = eth1394_find_node_guid(&priv->ip_node_list,
- be64_to_cpu(guid));
- if (!node)
- goto fail;
-
- node_info = dev_get_drvdata(&node->ud->device);
- if (node_info->fifo == CSR1212_INVALID_ADDR_SPACE)
- goto fail;
-
- dest_node = node->ud->ne->nodeid;
- max_payload = node_info->maxpayload;
- BUG_ON(max_payload < 512 - ETHER1394_GASP_OVERHEAD);
-
- dgl = node_info->dgl;
- if (max_payload < dg_size + hdr_type_len[ETH1394_HDR_LF_UF])
- node_info->dgl++;
- tx_type = ETH1394_WRREQ;
- }
-
- /* If this is an ARP packet, convert it */
- if (proto == htons(ETH_P_ARP))
- ether1394_arp_to_1394arp(skb, dev);
-
- ptask->hdr.words.word1 = 0;
- ptask->hdr.words.word2 = 0;
- ptask->hdr.words.word3 = 0;
- ptask->hdr.words.word4 = 0;
- ptask->skb = skb;
- ptask->priv = priv;
- ptask->tx_type = tx_type;
-
- if (tx_type != ETH1394_GASP) {
- u64 addr;
-
- spin_lock_irqsave(&priv->lock, flags);
- addr = node_info->fifo;
- spin_unlock_irqrestore(&priv->lock, flags);
-
- ptask->addr = addr;
- ptask->dest_node = dest_node;
- }
-
- ptask->tx_type = tx_type;
- ptask->max_payload = max_payload;
- ptask->outstanding_pkts = ether1394_encapsulate_prep(max_payload,
- proto, &ptask->hdr, dg_size, dgl);
-
- /* Add the encapsulation header to the fragment */
- tx_len = ether1394_encapsulate(skb, max_payload, &ptask->hdr);
- dev->trans_start = jiffies;
- if (ether1394_send_packet(ptask, tx_len)) {
- if (dest_node == (LOCAL_BUS | ALL_NODES))
- goto fail;
-
- /* At this point we want to restore the packet. When we return
- * here with NETDEV_TX_BUSY we will get another entrance in this
- * routine with the same skb and we need it to look the same.
- * So we pull 4 more bytes, then build the header again. */
- skb_pull(skb, 4);
- ether1394_header(skb, dev, ntohs(hdr_buf.h_proto),
- hdr_buf.h_dest, NULL, 0);
-
- /* Most failures of ether1394_send_packet are recoverable. */
- netif_stop_queue(dev);
- priv->wake_node = dest_node;
- schedule_work(&priv->wake);
- kmem_cache_free(packet_task_cache, ptask);
- return NETDEV_TX_BUSY;
- }
-
- return NETDEV_TX_OK;
-fail:
- if (ptask)
- kmem_cache_free(packet_task_cache, ptask);
-
- if (skb != NULL)
- dev_kfree_skb(skb);
-
- spin_lock_irqsave(&priv->lock, flags);
- dev->stats.tx_dropped++;
- dev->stats.tx_errors++;
- spin_unlock_irqrestore(&priv->lock, flags);
-
- return NETDEV_TX_OK;
-}
-
-static int __init ether1394_init_module(void)
-{
- int err;
-
- packet_task_cache = kmem_cache_create("packet_task",
- sizeof(struct packet_task),
- 0, 0, NULL);
- if (!packet_task_cache)
- return -ENOMEM;
-
- hpsb_register_highlevel(&eth1394_highlevel);
- err = hpsb_register_protocol(&eth1394_proto_driver);
- if (err) {
- hpsb_unregister_highlevel(&eth1394_highlevel);
- kmem_cache_destroy(packet_task_cache);
- }
- return err;
-}
-
-static void __exit ether1394_exit_module(void)
-{
- hpsb_unregister_protocol(&eth1394_proto_driver);
- hpsb_unregister_highlevel(&eth1394_highlevel);
- kmem_cache_destroy(packet_task_cache);
-}
-
-module_init(ether1394_init_module);
-module_exit(ether1394_exit_module);
diff --git a/drivers/ieee1394/eth1394.h b/drivers/ieee1394/eth1394.h
deleted file mode 100644
index d53bac47b86f..000000000000
--- a/drivers/ieee1394/eth1394.h
+++ /dev/null
@@ -1,234 +0,0 @@
-/*
- * eth1394.h -- Ethernet driver for Linux IEEE-1394 Subsystem
- *
- * Copyright (C) 2000 Bonin Franck <boninf@free.fr>
- * (C) 2001 Ben Collins <bcollins@debian.org>
- *
- * Mainly based on work by Emanuel Pirker and Andreas E. Bombe
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software Foundation,
- * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
- */
-
-#ifndef __ETH1394_H
-#define __ETH1394_H
-
-#include <linux/netdevice.h>
-#include <linux/skbuff.h>
-#include <asm/byteorder.h>
-
-#include "ieee1394.h"
-#include "ieee1394_types.h"
-
-/* Register for incoming packets. This is 4096 bytes, which supports up to
- * S3200 (per Table 16-3 of IEEE 1394b-2002). */
-#define ETHER1394_REGION_ADDR_LEN 4096
-
-/* GASP identifier numbers for IPv4 over IEEE 1394 */
-#define ETHER1394_GASP_SPECIFIER_ID 0x00005E
-#define ETHER1394_GASP_SPECIFIER_ID_HI ((0x00005E >> 8) & 0xffff)
-#define ETHER1394_GASP_SPECIFIER_ID_LO (0x00005E & 0xff)
-#define ETHER1394_GASP_VERSION 1
-
-#define ETHER1394_GASP_OVERHEAD (2 * sizeof(quadlet_t)) /* for GASP header */
-
-#define ETHER1394_GASP_BUFFERS 16
-
-#define NODE_SET (ALL_NODES + 1) /* Node set == 64 */
-
-enum eth1394_bc_states { ETHER1394_BC_ERROR,
- ETHER1394_BC_RUNNING,
- ETHER1394_BC_STOPPED };
-
-
-/* Private structure for our ethernet driver */
-struct eth1394_priv {
- struct hpsb_host *host; /* The card for this dev */
- u16 bc_maxpayload; /* Max broadcast payload */
- u8 bc_sspd; /* Max broadcast speed */
- u64 local_fifo; /* Local FIFO Address */
- spinlock_t lock; /* Private lock */
- int broadcast_channel; /* Async stream Broadcast Channel */
- enum eth1394_bc_states bc_state; /* broadcast channel state */
- struct hpsb_iso *iso; /* Async stream recv handle */
- int bc_dgl; /* Outgoing broadcast datagram label */
- struct list_head ip_node_list; /* List of IP capable nodes */
- struct unit_directory *ud_list[ALL_NODES]; /* Cached unit dir list */
-
- struct work_struct wake; /* Wake up after xmit failure */
- struct net_device *wake_dev; /* Stupid backlink for .wake */
- nodeid_t wake_node; /* Destination of failed xmit */
-};
-
-
-/* Define a fake hardware header format for the networking core. Note that
- * header size cannot exceed 16 bytes as that is the size of the header cache.
- * Also, we do not need the source address in the header so we omit it and
- * keep the header to under 16 bytes */
-#define ETH1394_ALEN (8)
-#define ETH1394_HLEN (10)
-
-struct eth1394hdr {
- unsigned char h_dest[ETH1394_ALEN]; /* destination eth1394 addr */
- __be16 h_proto; /* packet type ID field */
-} __attribute__((packed));
-
-static inline struct eth1394hdr *eth1394_hdr(const struct sk_buff *skb)
-{
- return (struct eth1394hdr *)skb_mac_header(skb);
-}
-
-typedef enum {ETH1394_GASP, ETH1394_WRREQ} eth1394_tx_type;
-
-/* IP1394 headers */
-
-/* Unfragmented */
-#if defined __BIG_ENDIAN_BITFIELD
-struct eth1394_uf_hdr {
- u16 lf:2;
- u16 res:14;
- __be16 ether_type; /* Ethernet packet type */
-} __attribute__((packed));
-#elif defined __LITTLE_ENDIAN_BITFIELD
-struct eth1394_uf_hdr {
- u16 res:14;
- u16 lf:2;
- __be16 ether_type;
-} __attribute__((packed));
-#else
-#error Unknown bit field type
-#endif
-
-/* First fragment */
-#if defined __BIG_ENDIAN_BITFIELD
-struct eth1394_ff_hdr {
- u16 lf:2;
- u16 res1:2;
- u16 dg_size:12; /* Datagram size */
- __be16 ether_type; /* Ethernet packet type */
- u16 dgl; /* Datagram label */
- u16 res2;
-} __attribute__((packed));
-#elif defined __LITTLE_ENDIAN_BITFIELD
-struct eth1394_ff_hdr {
- u16 dg_size:12;
- u16 res1:2;
- u16 lf:2;
- __be16 ether_type;
- u16 dgl;
- u16 res2;
-} __attribute__((packed));
-#else
-#error Unknown bit field type
-#endif
-
-/* XXX: Subsequent fragments, including last */
-#if defined __BIG_ENDIAN_BITFIELD
-struct eth1394_sf_hdr {
- u16 lf:2;
- u16 res1:2;
- u16 dg_size:12; /* Datagram size */
- u16 res2:4;
- u16 fg_off:12; /* Fragment offset */
- u16 dgl; /* Datagram label */
- u16 res3;
-} __attribute__((packed));
-#elif defined __LITTLE_ENDIAN_BITFIELD
-struct eth1394_sf_hdr {
- u16 dg_size:12;
- u16 res1:2;
- u16 lf:2;
- u16 fg_off:12;
- u16 res2:4;
- u16 dgl;
- u16 res3;
-} __attribute__((packed));
-#else
-#error Unknown bit field type
-#endif
-
-#if defined __BIG_ENDIAN_BITFIELD
-struct eth1394_common_hdr {
- u16 lf:2;
- u16 pad1:14;
-} __attribute__((packed));
-#elif defined __LITTLE_ENDIAN_BITFIELD
-struct eth1394_common_hdr {
- u16 pad1:14;
- u16 lf:2;
-} __attribute__((packed));
-#else
-#error Unknown bit field type
-#endif
-
-struct eth1394_hdr_words {
- u16 word1;
- u16 word2;
- u16 word3;
- u16 word4;
-};
-
-union eth1394_hdr {
- struct eth1394_common_hdr common;
- struct eth1394_uf_hdr uf;
- struct eth1394_ff_hdr ff;
- struct eth1394_sf_hdr sf;
- struct eth1394_hdr_words words;
-};
-
-/* End of IP1394 headers */
-
-/* Fragment types */
-#define ETH1394_HDR_LF_UF 0 /* unfragmented */
-#define ETH1394_HDR_LF_FF 1 /* first fragment */
-#define ETH1394_HDR_LF_LF 2 /* last fragment */
-#define ETH1394_HDR_LF_IF 3 /* interior fragment */
-
-#define IP1394_HW_ADDR_LEN 16 /* As per RFC */
-
-/* Our arp packet (ARPHRD_IEEE1394) */
-struct eth1394_arp {
- u16 hw_type; /* 0x0018 */
- u16 proto_type; /* 0x0806 */
- u8 hw_addr_len; /* 16 */
- u8 ip_addr_len; /* 4 */
- u16 opcode; /* ARP Opcode */
- /* Above is exactly the same format as struct arphdr */
-
- __be64 s_uniq_id; /* Sender's 64bit EUI */
- u8 max_rec; /* Sender's max packet size */
- u8 sspd; /* Sender's max speed */
- __be16 fifo_hi; /* hi 16bits of sender's FIFO addr */
- __be32 fifo_lo; /* lo 32bits of sender's FIFO addr */
- u32 sip; /* Sender's IP Address */
- u32 tip; /* IP Address of requested hw addr */
-};
-
-/* Network timeout */
-#define ETHER1394_TIMEOUT 100000
-
-/* This is our task struct. It's used for the packet complete callback. */
-struct packet_task {
- struct sk_buff *skb;
- int outstanding_pkts;
- eth1394_tx_type tx_type;
- int max_payload;
- struct hpsb_packet *packet;
- struct eth1394_priv *priv;
- union eth1394_hdr hdr;
- u64 addr;
- u16 dest_node;
-};
-
-#endif /* __ETH1394_H */
diff --git a/drivers/ieee1394/highlevel.c b/drivers/ieee1394/highlevel.c
deleted file mode 100644
index 4bc443546e04..000000000000
--- a/drivers/ieee1394/highlevel.c
+++ /dev/null
@@ -1,691 +0,0 @@
-/*
- * IEEE 1394 for Linux
- *
- * Copyright (C) 1999 Andreas E. Bombe
- *
- * This code is licensed under the GPL. See the file COPYING in the root
- * directory of the kernel sources for details.
- *
- *
- * Contributions:
- *
- * Christian Toegel <christian.toegel@gmx.at>
- * unregister address space
- *
- * Manfred Weihs <weihs@ict.tuwien.ac.at>
- * unregister address space
- *
- */
-
-#include <linux/slab.h>
-#include <linux/list.h>
-#include <linux/bitops.h>
-
-#include "ieee1394.h"
-#include "ieee1394_types.h"
-#include "hosts.h"
-#include "ieee1394_core.h"
-#include "highlevel.h"
-#include "nodemgr.h"
-
-
-struct hl_host_info {
- struct list_head list;
- struct hpsb_host *host;
- size_t size;
- unsigned long key;
- void *data;
-};
-
-
-static LIST_HEAD(hl_drivers);
-static DECLARE_RWSEM(hl_drivers_sem);
-
-static LIST_HEAD(hl_irqs);
-static DEFINE_RWLOCK(hl_irqs_lock);
-
-static DEFINE_RWLOCK(addr_space_lock);
-
-
-static struct hl_host_info *hl_get_hostinfo(struct hpsb_highlevel *hl,
- struct hpsb_host *host)
-{
- struct hl_host_info *hi = NULL;
-
- if (!hl || !host)
- return NULL;
-
- read_lock(&hl->host_info_lock);
- list_for_each_entry(hi, &hl->host_info_list, list) {
- if (hi->host == host) {
- read_unlock(&hl->host_info_lock);
- return hi;
- }
- }
- read_unlock(&hl->host_info_lock);
- return NULL;
-}
-
-/**
- * hpsb_get_hostinfo - retrieve a hostinfo pointer bound to this driver/host
- *
- * Returns a per @host and @hl driver data structure that was previously stored
- * by hpsb_create_hostinfo.
- */
-void *hpsb_get_hostinfo(struct hpsb_highlevel *hl, struct hpsb_host *host)
-{
- struct hl_host_info *hi = hl_get_hostinfo(hl, host);
-
- return hi ? hi->data : NULL;
-}
-
-/**
- * hpsb_create_hostinfo - allocate a hostinfo pointer bound to this driver/host
- *
- * Allocate a hostinfo pointer backed by memory with @data_size and bind it to
- * to this @hl driver and @host. If @data_size is zero, then the return here is
- * only valid for error checking.
- */
-void *hpsb_create_hostinfo(struct hpsb_highlevel *hl, struct hpsb_host *host,
- size_t data_size)
-{
- struct hl_host_info *hi;
- void *data;
- unsigned long flags;
-
- hi = hl_get_hostinfo(hl, host);
- if (hi) {
- HPSB_ERR("%s called hpsb_create_hostinfo when hostinfo already"
- " exists", hl->name);
- return NULL;
- }
-
- hi = kzalloc(sizeof(*hi) + data_size, GFP_ATOMIC);
- if (!hi)
- return NULL;
-
- if (data_size) {
- data = hi->data = hi + 1;
- hi->size = data_size;
- } else
- data = hi;
-
- hi->host = host;
-
- write_lock_irqsave(&hl->host_info_lock, flags);
- list_add_tail(&hi->list, &hl->host_info_list);
- write_unlock_irqrestore(&hl->host_info_lock, flags);
-
- return data;
-}
-
-/**
- * hpsb_set_hostinfo - set the hostinfo pointer to something useful
- *
- * Usually follows a call to hpsb_create_hostinfo, where the size is 0.
- */
-int hpsb_set_hostinfo(struct hpsb_highlevel *hl, struct hpsb_host *host,
- void *data)
-{
- struct hl_host_info *hi;
-
- hi = hl_get_hostinfo(hl, host);
- if (hi) {
- if (!hi->size && !hi->data) {
- hi->data = data;
- return 0;
- } else
- HPSB_ERR("%s called hpsb_set_hostinfo when hostinfo "
- "already has data", hl->name);
- } else
- HPSB_ERR("%s called hpsb_set_hostinfo when no hostinfo exists",
- hl->name);
- return -EINVAL;
-}
-
-/**
- * hpsb_destroy_hostinfo - free and remove a hostinfo pointer
- *
- * Free and remove the hostinfo pointer bound to this @hl driver and @host.
- */
-void hpsb_destroy_hostinfo(struct hpsb_highlevel *hl, struct hpsb_host *host)
-{
- struct hl_host_info *hi;
-
- hi = hl_get_hostinfo(hl, host);
- if (hi) {
- unsigned long flags;
- write_lock_irqsave(&hl->host_info_lock, flags);
- list_del(&hi->list);
- write_unlock_irqrestore(&hl->host_info_lock, flags);
- kfree(hi);
- }
- return;
-}
-
-/**
- * hpsb_set_hostinfo_key - set an alternate lookup key for an hostinfo
- *
- * Sets an alternate lookup key for the hostinfo bound to this @hl driver and
- * @host.
- */
-void hpsb_set_hostinfo_key(struct hpsb_highlevel *hl, struct hpsb_host *host,
- unsigned long key)
-{
- struct hl_host_info *hi;
-
- hi = hl_get_hostinfo(hl, host);
- if (hi)
- hi->key = key;
- return;
-}
-
-/**
- * hpsb_get_hostinfo_bykey - retrieve a hostinfo pointer by its alternate key
- */
-void *hpsb_get_hostinfo_bykey(struct hpsb_highlevel *hl, unsigned long key)
-{
- struct hl_host_info *hi;
- void *data = NULL;
-
- if (!hl)
- return NULL;
-
- read_lock(&hl->host_info_lock);
- list_for_each_entry(hi, &hl->host_info_list, list) {
- if (hi->key == key) {
- data = hi->data;
- break;
- }
- }
- read_unlock(&hl->host_info_lock);
- return data;
-}
-
-static int highlevel_for_each_host_reg(struct hpsb_host *host, void *__data)
-{
- struct hpsb_highlevel *hl = __data;
-
- hl->add_host(host);
-
- if (host->update_config_rom && hpsb_update_config_rom_image(host) < 0)
- HPSB_ERR("Failed to generate Configuration ROM image for host "
- "%s-%d", hl->name, host->id);
- return 0;
-}
-
-/**
- * hpsb_register_highlevel - register highlevel driver
- *
- * The name pointer in @hl has to stay valid at all times because the string is
- * not copied.
- */
-void hpsb_register_highlevel(struct hpsb_highlevel *hl)
-{
- unsigned long flags;
-
- hpsb_init_highlevel(hl);
- INIT_LIST_HEAD(&hl->addr_list);
-
- down_write(&hl_drivers_sem);
- list_add_tail(&hl->hl_list, &hl_drivers);
- up_write(&hl_drivers_sem);
-
- write_lock_irqsave(&hl_irqs_lock, flags);
- list_add_tail(&hl->irq_list, &hl_irqs);
- write_unlock_irqrestore(&hl_irqs_lock, flags);
-
- if (hl->add_host)
- nodemgr_for_each_host(hl, highlevel_for_each_host_reg);
- return;
-}
-
-static void __delete_addr(struct hpsb_address_serve *as)
-{
- list_del(&as->host_list);
- list_del(&as->hl_list);
- kfree(as);
-}
-
-static void __unregister_host(struct hpsb_highlevel *hl, struct hpsb_host *host,
- int update_cr)
-{
- unsigned long flags;
- struct list_head *lh, *next;
- struct hpsb_address_serve *as;
-
- /* First, let the highlevel driver unreg */
- if (hl->remove_host)
- hl->remove_host(host);
-
- /* Remove any addresses that are matched for this highlevel driver
- * and this particular host. */
- write_lock_irqsave(&addr_space_lock, flags);
- list_for_each_safe (lh, next, &hl->addr_list) {
- as = list_entry(lh, struct hpsb_address_serve, hl_list);
- if (as->host == host)
- __delete_addr(as);
- }
- write_unlock_irqrestore(&addr_space_lock, flags);
-
- /* Now update the config-rom to reflect anything removed by the
- * highlevel driver. */
- if (update_cr && host->update_config_rom &&
- hpsb_update_config_rom_image(host) < 0)
- HPSB_ERR("Failed to generate Configuration ROM image for host "
- "%s-%d", hl->name, host->id);
-
- /* Finally remove all the host info associated between these two. */
- hpsb_destroy_hostinfo(hl, host);
-}
-
-static int highlevel_for_each_host_unreg(struct hpsb_host *host, void *__data)
-{
- struct hpsb_highlevel *hl = __data;
-
- __unregister_host(hl, host, 1);
- return 0;
-}
-
-/**
- * hpsb_unregister_highlevel - unregister highlevel driver
- */
-void hpsb_unregister_highlevel(struct hpsb_highlevel *hl)
-{
- unsigned long flags;
-
- write_lock_irqsave(&hl_irqs_lock, flags);
- list_del(&hl->irq_list);
- write_unlock_irqrestore(&hl_irqs_lock, flags);
-
- down_write(&hl_drivers_sem);
- list_del(&hl->hl_list);
- up_write(&hl_drivers_sem);
-
- nodemgr_for_each_host(hl, highlevel_for_each_host_unreg);
-}
-
-/**
- * hpsb_allocate_and_register_addrspace - alloc' and reg' a host address space
- *
- * @start and @end are 48 bit pointers and have to be quadlet aligned.
- * @end points to the first address behind the handled addresses. This
- * function can be called multiple times for a single hpsb_highlevel @hl to
- * implement sparse register sets. The requested region must not overlap any
- * previously allocated region, otherwise registering will fail.
- *
- * It returns true for successful allocation. Address spaces can be
- * unregistered with hpsb_unregister_addrspace. All remaining address spaces
- * are automatically deallocated together with the hpsb_highlevel @hl.
- */
-u64 hpsb_allocate_and_register_addrspace(struct hpsb_highlevel *hl,
- struct hpsb_host *host,
- const struct hpsb_address_ops *ops,
- u64 size, u64 alignment,
- u64 start, u64 end)
-{
- struct hpsb_address_serve *as, *a1, *a2;
- struct list_head *entry;
- u64 retval = CSR1212_INVALID_ADDR_SPACE;
- unsigned long flags;
- u64 align_mask = ~(alignment - 1);
-
- if ((alignment & 3) || (alignment > 0x800000000000ULL) ||
- (hweight64(alignment) != 1)) {
- HPSB_ERR("%s called with invalid alignment: 0x%048llx",
- __func__, (unsigned long long)alignment);
- return retval;
- }
-
- /* default range,
- * avoids controller's posted write area (see OHCI 1.1 clause 1.5) */
- if (start == CSR1212_INVALID_ADDR_SPACE &&
- end == CSR1212_INVALID_ADDR_SPACE) {
- start = host->middle_addr_space;
- end = CSR1212_ALL_SPACE_END;
- }
-
- if (((start|end) & ~align_mask) || (start >= end) ||
- (end > CSR1212_ALL_SPACE_END)) {
- HPSB_ERR("%s called with invalid addresses "
- "(start = %012Lx end = %012Lx)", __func__,
- (unsigned long long)start,(unsigned long long)end);
- return retval;
- }
-
- as = kmalloc(sizeof(*as), GFP_KERNEL);
- if (!as)
- return retval;
-
- INIT_LIST_HEAD(&as->host_list);
- INIT_LIST_HEAD(&as->hl_list);
- as->op = ops;
- as->host = host;
-
- write_lock_irqsave(&addr_space_lock, flags);
- list_for_each(entry, &host->addr_space) {
- u64 a1sa, a1ea;
- u64 a2sa, a2ea;
-
- a1 = list_entry(entry, struct hpsb_address_serve, host_list);
- a2 = list_entry(entry->next, struct hpsb_address_serve,
- host_list);
-
- a1sa = a1->start & align_mask;
- a1ea = (a1->end + alignment -1) & align_mask;
- a2sa = a2->start & align_mask;
- a2ea = (a2->end + alignment -1) & align_mask;
-
- if ((a2sa - a1ea >= size) && (a2sa - start >= size) &&
- (a2sa > start)) {
- as->start = max(start, a1ea);
- as->end = as->start + size;
- list_add(&as->host_list, entry);
- list_add_tail(&as->hl_list, &hl->addr_list);
- retval = as->start;
- break;
- }
- }
- write_unlock_irqrestore(&addr_space_lock, flags);
-
- if (retval == CSR1212_INVALID_ADDR_SPACE)
- kfree(as);
- return retval;
-}
-
-/**
- * hpsb_register_addrspace - register a host address space
- *
- * @start and @end are 48 bit pointers and have to be quadlet aligned.
- * @end points to the first address behind the handled addresses. This
- * function can be called multiple times for a single hpsb_highlevel @hl to
- * implement sparse register sets. The requested region must not overlap any
- * previously allocated region, otherwise registering will fail.
- *
- * It returns true for successful allocation. Address spaces can be
- * unregistered with hpsb_unregister_addrspace. All remaining address spaces
- * are automatically deallocated together with the hpsb_highlevel @hl.
- */
-int hpsb_register_addrspace(struct hpsb_highlevel *hl, struct hpsb_host *host,
- const struct hpsb_address_ops *ops,
- u64 start, u64 end)
-{
- struct hpsb_address_serve *as;
- struct list_head *lh;
- int retval = 0;
- unsigned long flags;
-
- if (((start|end) & 3) || (start >= end) ||
- (end > CSR1212_ALL_SPACE_END)) {
- HPSB_ERR("%s called with invalid addresses", __func__);
- return 0;
- }
-
- as = kmalloc(sizeof(*as), GFP_KERNEL);
- if (!as)
- return 0;
-
- INIT_LIST_HEAD(&as->host_list);
- INIT_LIST_HEAD(&as->hl_list);
- as->op = ops;
- as->start = start;
- as->end = end;
- as->host = host;
-
- write_lock_irqsave(&addr_space_lock, flags);
- list_for_each(lh, &host->addr_space) {
- struct hpsb_address_serve *as_this =
- list_entry(lh, struct hpsb_address_serve, host_list);
- struct hpsb_address_serve *as_next =
- list_entry(lh->next, struct hpsb_address_serve,
- host_list);
-
- if (as_this->end > as->start)
- break;
-
- if (as_next->start >= as->end) {
- list_add(&as->host_list, lh);
- list_add_tail(&as->hl_list, &hl->addr_list);
- retval = 1;
- break;
- }
- }
- write_unlock_irqrestore(&addr_space_lock, flags);
-
- if (retval == 0)
- kfree(as);
- return retval;
-}
-
-int hpsb_unregister_addrspace(struct hpsb_highlevel *hl, struct hpsb_host *host,
- u64 start)
-{
- int retval = 0;
- struct hpsb_address_serve *as;
- struct list_head *lh, *next;
- unsigned long flags;
-
- write_lock_irqsave(&addr_space_lock, flags);
- list_for_each_safe (lh, next, &hl->addr_list) {
- as = list_entry(lh, struct hpsb_address_serve, hl_list);
- if (as->start == start && as->host == host) {
- __delete_addr(as);
- retval = 1;
- break;
- }
- }
- write_unlock_irqrestore(&addr_space_lock, flags);
- return retval;
-}
-
-static const struct hpsb_address_ops dummy_ops;
-
-/* dummy address spaces as lower and upper bounds of the host's a.s. list */
-static void init_hpsb_highlevel(struct hpsb_host *host)
-{
- INIT_LIST_HEAD(&host->dummy_zero_addr.host_list);
- INIT_LIST_HEAD(&host->dummy_zero_addr.hl_list);
- INIT_LIST_HEAD(&host->dummy_max_addr.host_list);
- INIT_LIST_HEAD(&host->dummy_max_addr.hl_list);
-
- host->dummy_zero_addr.op = host->dummy_max_addr.op = &dummy_ops;
-
- host->dummy_zero_addr.start = host->dummy_zero_addr.end = 0;
- host->dummy_max_addr.start = host->dummy_max_addr.end = ((u64) 1) << 48;
-
- list_add_tail(&host->dummy_zero_addr.host_list, &host->addr_space);
- list_add_tail(&host->dummy_max_addr.host_list, &host->addr_space);
-}
-
-void highlevel_add_host(struct hpsb_host *host)
-{
- struct hpsb_highlevel *hl;
-
- init_hpsb_highlevel(host);
-
- down_read(&hl_drivers_sem);
- list_for_each_entry(hl, &hl_drivers, hl_list) {
- if (hl->add_host)
- hl->add_host(host);
- }
- up_read(&hl_drivers_sem);
- if (host->update_config_rom && hpsb_update_config_rom_image(host) < 0)
- HPSB_ERR("Failed to generate Configuration ROM image for host "
- "%s-%d", hl->name, host->id);
-}
-
-void highlevel_remove_host(struct hpsb_host *host)
-{
- struct hpsb_highlevel *hl;
-
- down_read(&hl_drivers_sem);
- list_for_each_entry(hl, &hl_drivers, hl_list)
- __unregister_host(hl, host, 0);
- up_read(&hl_drivers_sem);
-}
-
-void highlevel_host_reset(struct hpsb_host *host)
-{
- unsigned long flags;
- struct hpsb_highlevel *hl;
-
- read_lock_irqsave(&hl_irqs_lock, flags);
- list_for_each_entry(hl, &hl_irqs, irq_list) {
- if (hl->host_reset)
- hl->host_reset(host);
- }
- read_unlock_irqrestore(&hl_irqs_lock, flags);
-}
-
-void highlevel_fcp_request(struct hpsb_host *host, int nodeid, int direction,
- void *data, size_t length)
-{
- unsigned long flags;
- struct hpsb_highlevel *hl;
- int cts = ((quadlet_t *)data)[0] >> 4;
-
- read_lock_irqsave(&hl_irqs_lock, flags);
- list_for_each_entry(hl, &hl_irqs, irq_list) {
- if (hl->fcp_request)
- hl->fcp_request(host, nodeid, direction, cts, data,
- length);
- }
- read_unlock_irqrestore(&hl_irqs_lock, flags);
-}
-
-/*
- * highlevel_read, highlevel_write, highlevel_lock, highlevel_lock64:
- *
- * These functions are called to handle transactions. They are called when a
- * packet arrives. The flags argument contains the second word of the first
- * header quadlet of the incoming packet (containing transaction label, retry
- * code, transaction code and priority). These functions either return a
- * response code or a negative number. In the first case a response will be
- * generated. In the latter case, no response will be sent and the driver which
- * handled the request will send the response itself.
- */
-int highlevel_read(struct hpsb_host *host, int nodeid, void *data, u64 addr,
- unsigned int length, u16 flags)
-{
- struct hpsb_address_serve *as;
- unsigned int partlength;
- int rcode = RCODE_ADDRESS_ERROR;
-
- read_lock(&addr_space_lock);
- list_for_each_entry(as, &host->addr_space, host_list) {
- if (as->start > addr)
- break;
-
- if (as->end > addr) {
- partlength = min(as->end - addr, (u64) length);
-
- if (as->op->read)
- rcode = as->op->read(host, nodeid, data,
- addr, partlength, flags);
- else
- rcode = RCODE_TYPE_ERROR;
-
- data += partlength;
- length -= partlength;
- addr += partlength;
-
- if ((rcode != RCODE_COMPLETE) || !length)
- break;
- }
- }
- read_unlock(&addr_space_lock);
-
- if (length && (rcode == RCODE_COMPLETE))
- rcode = RCODE_ADDRESS_ERROR;
- return rcode;
-}
-
-int highlevel_write(struct hpsb_host *host, int nodeid, int destid, void *data,
- u64 addr, unsigned int length, u16 flags)
-{
- struct hpsb_address_serve *as;
- unsigned int partlength;
- int rcode = RCODE_ADDRESS_ERROR;
-
- read_lock(&addr_space_lock);
- list_for_each_entry(as, &host->addr_space, host_list) {
- if (as->start > addr)
- break;
-
- if (as->end > addr) {
- partlength = min(as->end - addr, (u64) length);
-
- if (as->op->write)
- rcode = as->op->write(host, nodeid, destid,
- data, addr, partlength,
- flags);
- else
- rcode = RCODE_TYPE_ERROR;
-
- data += partlength;
- length -= partlength;
- addr += partlength;
-
- if ((rcode != RCODE_COMPLETE) || !length)
- break;
- }
- }
- read_unlock(&addr_space_lock);
-
- if (length && (rcode == RCODE_COMPLETE))
- rcode = RCODE_ADDRESS_ERROR;
- return rcode;
-}
-
-int highlevel_lock(struct hpsb_host *host, int nodeid, quadlet_t *store,
- u64 addr, quadlet_t data, quadlet_t arg, int ext_tcode,
- u16 flags)
-{
- struct hpsb_address_serve *as;
- int rcode = RCODE_ADDRESS_ERROR;
-
- read_lock(&addr_space_lock);
- list_for_each_entry(as, &host->addr_space, host_list) {
- if (as->start > addr)
- break;
-
- if (as->end > addr) {
- if (as->op->lock)
- rcode = as->op->lock(host, nodeid, store, addr,
- data, arg, ext_tcode,
- flags);
- else
- rcode = RCODE_TYPE_ERROR;
- break;
- }
- }
- read_unlock(&addr_space_lock);
- return rcode;
-}
-
-int highlevel_lock64(struct hpsb_host *host, int nodeid, octlet_t *store,
- u64 addr, octlet_t data, octlet_t arg, int ext_tcode,
- u16 flags)
-{
- struct hpsb_address_serve *as;
- int rcode = RCODE_ADDRESS_ERROR;
-
- read_lock(&addr_space_lock);
-
- list_for_each_entry(as, &host->addr_space, host_list) {
- if (as->start > addr)
- break;
-
- if (as->end > addr) {
- if (as->op->lock64)
- rcode = as->op->lock64(host, nodeid, store,
- addr, data, arg,
- ext_tcode, flags);
- else
- rcode = RCODE_TYPE_ERROR;
- break;
- }
- }
- read_unlock(&addr_space_lock);
- return rcode;
-}
diff --git a/drivers/ieee1394/highlevel.h b/drivers/ieee1394/highlevel.h
deleted file mode 100644
index 9dba89fc60ad..000000000000
--- a/drivers/ieee1394/highlevel.h
+++ /dev/null
@@ -1,141 +0,0 @@
-#ifndef IEEE1394_HIGHLEVEL_H
-#define IEEE1394_HIGHLEVEL_H
-
-#include <linux/list.h>
-#include <linux/spinlock.h>
-#include <linux/types.h>
-
-struct module;
-
-#include "ieee1394_types.h"
-
-struct hpsb_host;
-
-/* internal to ieee1394 core */
-struct hpsb_address_serve {
- struct list_head host_list; /* per host list */
- struct list_head hl_list; /* hpsb_highlevel list */
- const struct hpsb_address_ops *op;
- struct hpsb_host *host;
- u64 start; /* first address handled, quadlet aligned */
- u64 end; /* first address behind, quadlet aligned */
-};
-
-/* Only the following structures are of interest to actual highlevel drivers. */
-
-struct hpsb_highlevel {
- const char *name;
-
- /* Any of the following pointers can legally be NULL. */
-
- /* New host initialized. Will also be called during
- * hpsb_register_highlevel for all hosts already installed. */
- void (*add_host)(struct hpsb_host *host);
-
- /* Host about to be removed. Will also be called during
- * hpsb_unregister_highlevel once for each host. */
- void (*remove_host)(struct hpsb_host *host);
-
- /* Host experienced bus reset with possible configuration changes.
- * Note that this one may occur during interrupt/bottom half handling.
- * You can not expect to be able to do stock hpsb_reads. */
- void (*host_reset)(struct hpsb_host *host);
-
- /* A write request was received on either the FCP_COMMAND (direction =
- * 0) or the FCP_RESPONSE (direction = 1) register. The cts arg
- * contains the cts field (first byte of data). */
- void (*fcp_request)(struct hpsb_host *host, int nodeid, int direction,
- int cts, u8 *data, size_t length);
-
- /* These are initialized by the subsystem when the
- * hpsb_higlevel is registered. */
- struct list_head hl_list;
- struct list_head irq_list;
- struct list_head addr_list;
-
- struct list_head host_info_list;
- rwlock_t host_info_lock;
-};
-
-struct hpsb_address_ops {
- /*
- * Null function pointers will make the respective operation complete
- * with RCODE_TYPE_ERROR. Makes for easy to implement read-only
- * registers (just leave everything but read NULL).
- *
- * All functions shall return appropriate IEEE 1394 rcodes.
- */
-
- /* These functions have to implement block reads for themselves.
- *
- * These functions either return a response code or a negative number.
- * In the first case a response will be generated. In the latter case,
- * no response will be sent and the driver which handled the request
- * will send the response itself. */
- int (*read)(struct hpsb_host *host, int nodeid, quadlet_t *buffer,
- u64 addr, size_t length, u16 flags);
- int (*write)(struct hpsb_host *host, int nodeid, int destid,
- quadlet_t *data, u64 addr, size_t length, u16 flags);
-
- /* Lock transactions: write results of ext_tcode operation into
- * *store. */
- int (*lock)(struct hpsb_host *host, int nodeid, quadlet_t *store,
- u64 addr, quadlet_t data, quadlet_t arg, int ext_tcode,
- u16 flags);
- int (*lock64)(struct hpsb_host *host, int nodeid, octlet_t *store,
- u64 addr, octlet_t data, octlet_t arg, int ext_tcode,
- u16 flags);
-};
-
-void highlevel_add_host(struct hpsb_host *host);
-void highlevel_remove_host(struct hpsb_host *host);
-void highlevel_host_reset(struct hpsb_host *host);
-int highlevel_read(struct hpsb_host *host, int nodeid, void *data, u64 addr,
- unsigned int length, u16 flags);
-int highlevel_write(struct hpsb_host *host, int nodeid, int destid, void *data,
- u64 addr, unsigned int length, u16 flags);
-int highlevel_lock(struct hpsb_host *host, int nodeid, quadlet_t *store,
- u64 addr, quadlet_t data, quadlet_t arg, int ext_tcode,
- u16 flags);
-int highlevel_lock64(struct hpsb_host *host, int nodeid, octlet_t *store,
- u64 addr, octlet_t data, octlet_t arg, int ext_tcode,
- u16 flags);
-void highlevel_fcp_request(struct hpsb_host *host, int nodeid, int direction,
- void *data, size_t length);
-
-/**
- * hpsb_init_highlevel - initialize a struct hpsb_highlevel
- *
- * This is only necessary if hpsb_get_hostinfo_bykey can be called
- * before hpsb_register_highlevel.
- */
-static inline void hpsb_init_highlevel(struct hpsb_highlevel *hl)
-{
- rwlock_init(&hl->host_info_lock);
- INIT_LIST_HEAD(&hl->host_info_list);
-}
-void hpsb_register_highlevel(struct hpsb_highlevel *hl);
-void hpsb_unregister_highlevel(struct hpsb_highlevel *hl);
-
-u64 hpsb_allocate_and_register_addrspace(struct hpsb_highlevel *hl,
- struct hpsb_host *host,
- const struct hpsb_address_ops *ops,
- u64 size, u64 alignment,
- u64 start, u64 end);
-int hpsb_register_addrspace(struct hpsb_highlevel *hl, struct hpsb_host *host,
- const struct hpsb_address_ops *ops,
- u64 start, u64 end);
-int hpsb_unregister_addrspace(struct hpsb_highlevel *hl, struct hpsb_host *host,
- u64 start);
-
-void *hpsb_get_hostinfo(struct hpsb_highlevel *hl, struct hpsb_host *host);
-void *hpsb_create_hostinfo(struct hpsb_highlevel *hl, struct hpsb_host *host,
- size_t data_size);
-void hpsb_destroy_hostinfo(struct hpsb_highlevel *hl, struct hpsb_host *host);
-void hpsb_set_hostinfo_key(struct hpsb_highlevel *hl, struct hpsb_host *host,
- unsigned long key);
-void *hpsb_get_hostinfo_bykey(struct hpsb_highlevel *hl, unsigned long key);
-int hpsb_set_hostinfo(struct hpsb_highlevel *hl, struct hpsb_host *host,
- void *data);
-
-#endif /* IEEE1394_HIGHLEVEL_H */
diff --git a/drivers/ieee1394/hosts.c b/drivers/ieee1394/hosts.c
deleted file mode 100644
index e947d8ffac85..000000000000
--- a/drivers/ieee1394/hosts.c
+++ /dev/null
@@ -1,249 +0,0 @@
-/*
- * IEEE 1394 for Linux
- *
- * Low level (host adapter) management.
- *
- * Copyright (C) 1999 Andreas E. Bombe
- * Copyright (C) 1999 Emanuel Pirker
- *
- * This code is licensed under the GPL. See the file COPYING in the root
- * directory of the kernel sources for details.
- */
-
-#include <linux/module.h>
-#include <linux/types.h>
-#include <linux/list.h>
-#include <linux/init.h>
-#include <linux/slab.h>
-#include <linux/timer.h>
-#include <linux/jiffies.h>
-#include <linux/mutex.h>
-
-#include "csr1212.h"
-#include "ieee1394.h"
-#include "ieee1394_types.h"
-#include "hosts.h"
-#include "ieee1394_core.h"
-#include "highlevel.h"
-#include "nodemgr.h"
-#include "csr.h"
-#include "config_roms.h"
-
-
-static void delayed_reset_bus(struct work_struct *work)
-{
- struct hpsb_host *host =
- container_of(work, struct hpsb_host, delayed_reset.work);
- u8 generation = host->csr.generation + 1;
-
- /* The generation field rolls over to 2 rather than 0 per IEEE
- * 1394a-2000. */
- if (generation > 0xf || generation < 2)
- generation = 2;
-
- csr_set_bus_info_generation(host->csr.rom, generation);
- if (csr1212_generate_csr_image(host->csr.rom) != CSR1212_SUCCESS) {
- /* CSR image creation failed.
- * Reset generation field and do not issue a bus reset. */
- csr_set_bus_info_generation(host->csr.rom,
- host->csr.generation);
- return;
- }
-
- host->csr.generation = generation;
-
- host->update_config_rom = 0;
- if (host->driver->set_hw_config_rom)
- host->driver->set_hw_config_rom(host,
- host->csr.rom->bus_info_data);
-
- host->csr.gen_timestamp[host->csr.generation] = jiffies;
- hpsb_reset_bus(host, SHORT_RESET);
-}
-
-static int dummy_transmit_packet(struct hpsb_host *h, struct hpsb_packet *p)
-{
- return 0;
-}
-
-static int dummy_devctl(struct hpsb_host *h, enum devctl_cmd c, int arg)
-{
- return -1;
-}
-
-static int dummy_isoctl(struct hpsb_iso *iso, enum isoctl_cmd command,
- unsigned long arg)
-{
- return -1;
-}
-
-static struct hpsb_host_driver dummy_driver = {
- .transmit_packet = dummy_transmit_packet,
- .devctl = dummy_devctl,
- .isoctl = dummy_isoctl
-};
-
-static int alloc_hostnum_cb(struct hpsb_host *host, void *__data)
-{
- int *hostnum = __data;
-
- if (host->id == *hostnum)
- return 1;
-
- return 0;
-}
-
-static DEFINE_MUTEX(host_num_alloc);
-
-/**
- * hpsb_alloc_host - allocate a new host controller.
- * @drv: the driver that will manage the host controller
- * @extra: number of extra bytes to allocate for the driver
- *
- * Allocate a &hpsb_host and initialize the general subsystem specific
- * fields. If the driver needs to store per host data, as drivers
- * usually do, the amount of memory required can be specified by the
- * @extra parameter. Once allocated, the driver should initialize the
- * driver specific parts, enable the controller and make it available
- * to the general subsystem using hpsb_add_host().
- *
- * Return Value: a pointer to the &hpsb_host if successful, %NULL if
- * no memory was available.
- */
-struct hpsb_host *hpsb_alloc_host(struct hpsb_host_driver *drv, size_t extra,
- struct device *dev)
-{
- struct hpsb_host *h;
- int i;
- int hostnum = 0;
-
- h = kzalloc(sizeof(*h) + extra, GFP_KERNEL);
- if (!h)
- return NULL;
-
- h->csr.rom = csr1212_create_csr(&csr_bus_ops, CSR_BUS_INFO_SIZE, h);
- if (!h->csr.rom)
- goto fail;
-
- h->hostdata = h + 1;
- h->driver = drv;
-
- INIT_LIST_HEAD(&h->pending_packets);
- INIT_LIST_HEAD(&h->addr_space);
-
- for (i = 2; i < 16; i++)
- h->csr.gen_timestamp[i] = jiffies - 60 * HZ;
-
- atomic_set(&h->generation, 0);
-
- INIT_DELAYED_WORK(&h->delayed_reset, delayed_reset_bus);
-
- init_timer(&h->timeout);
- h->timeout.data = (unsigned long) h;
- h->timeout.function = abort_timedouts;
- h->timeout_interval = HZ / 20; /* 50ms, half of minimum SPLIT_TIMEOUT */
-
- h->topology_map = h->csr.topology_map + 3;
- h->speed_map = (u8 *)(h->csr.speed_map + 2);
-
- mutex_lock(&host_num_alloc);
- while (nodemgr_for_each_host(&hostnum, alloc_hostnum_cb))
- hostnum++;
- mutex_unlock(&host_num_alloc);
- h->id = hostnum;
-
- memcpy(&h->device, &nodemgr_dev_template_host, sizeof(h->device));
- h->device.parent = dev;
- set_dev_node(&h->device, dev_to_node(dev));
- dev_set_name(&h->device, "fw-host%d", h->id);
-
- h->host_dev.parent = &h->device;
- h->host_dev.class = &hpsb_host_class;
- dev_set_name(&h->host_dev, "fw-host%d", h->id);
-
- if (device_register(&h->device))
- goto fail;
- if (device_register(&h->host_dev)) {
- device_unregister(&h->device);
- goto fail;
- }
- get_device(&h->device);
-
- return h;
-
-fail:
- kfree(h);
- return NULL;
-}
-
-int hpsb_add_host(struct hpsb_host *host)
-{
- if (hpsb_default_host_entry(host))
- return -ENOMEM;
-
- highlevel_add_host(host);
- return 0;
-}
-
-void hpsb_resume_host(struct hpsb_host *host)
-{
- if (host->driver->set_hw_config_rom)
- host->driver->set_hw_config_rom(host,
- host->csr.rom->bus_info_data);
- host->driver->devctl(host, RESET_BUS, SHORT_RESET);
-}
-
-void hpsb_remove_host(struct hpsb_host *host)
-{
- host->is_shutdown = 1;
-
- cancel_delayed_work(&host->delayed_reset);
- flush_scheduled_work();
-
- host->driver = &dummy_driver;
- highlevel_remove_host(host);
-
- device_unregister(&host->host_dev);
- device_unregister(&host->device);
-}
-
-/**
- * hpsb_update_config_rom_image - updates configuration ROM image of a host
- *
- * Updates the configuration ROM image of a host. rom_version must be the
- * current version, otherwise it will fail with return value -1. If this
- * host does not support config-rom-update, it will return -%EINVAL.
- * Return value 0 indicates success.
- */
-int hpsb_update_config_rom_image(struct hpsb_host *host)
-{
- unsigned long reset_delay;
- int next_gen = host->csr.generation + 1;
-
- if (!host->update_config_rom)
- return -EINVAL;
-
- if (next_gen > 0xf)
- next_gen = 2;
-
- /* Stop the delayed interrupt, we're about to change the config rom and
- * it would be a waste to do a bus reset twice. */
- cancel_delayed_work(&host->delayed_reset);
-
- /* IEEE 1394a-2000 prohibits using the same generation number
- * twice in a 60 second period. */
- if (time_before(jiffies, host->csr.gen_timestamp[next_gen] + 60 * HZ))
- /* Wait 60 seconds from the last time this generation number was
- * used. */
- reset_delay =
- (60 * HZ) + host->csr.gen_timestamp[next_gen] - jiffies;
- else
- /* Wait 1 second in case some other code wants to change the
- * Config ROM in the near future. */
- reset_delay = HZ;
-
- PREPARE_DELAYED_WORK(&host->delayed_reset, delayed_reset_bus);
- schedule_delayed_work(&host->delayed_reset, reset_delay);
-
- return 0;
-}
diff --git a/drivers/ieee1394/hosts.h b/drivers/ieee1394/hosts.h
deleted file mode 100644
index 49c359022c54..000000000000
--- a/drivers/ieee1394/hosts.h
+++ /dev/null
@@ -1,201 +0,0 @@
-#ifndef _IEEE1394_HOSTS_H
-#define _IEEE1394_HOSTS_H
-
-#include <linux/device.h>
-#include <linux/list.h>
-#include <linux/timer.h>
-#include <linux/types.h>
-#include <linux/workqueue.h>
-#include <asm/atomic.h>
-
-struct pci_dev;
-struct module;
-
-#include "ieee1394_types.h"
-#include "csr.h"
-#include "highlevel.h"
-
-struct hpsb_packet;
-struct hpsb_iso;
-
-struct hpsb_host {
- struct list_head host_list;
-
- void *hostdata;
-
- atomic_t generation;
-
- struct list_head pending_packets;
- struct timer_list timeout;
- unsigned long timeout_interval;
-
- int node_count; /* number of identified nodes on this bus */
- int selfid_count; /* total number of SelfIDs received */
- int nodes_active; /* number of nodes with active link layer */
-
- nodeid_t node_id; /* node ID of this host */
- nodeid_t irm_id; /* ID of this bus' isochronous resource manager */
- nodeid_t busmgr_id; /* ID of this bus' bus manager */
-
- /* this nodes state */
- unsigned in_bus_reset:1;
- unsigned is_shutdown:1;
- unsigned resume_packet_sent:1;
-
- /* this nodes' duties on the bus */
- unsigned is_root:1;
- unsigned is_cycmst:1;
- unsigned is_irm:1;
- unsigned is_busmgr:1;
-
- int reset_retries;
- quadlet_t *topology_map;
- u8 *speed_map;
-
- int id;
- struct hpsb_host_driver *driver;
- struct pci_dev *pdev;
- struct device device;
- struct device host_dev;
-
- struct delayed_work delayed_reset;
- unsigned config_roms:31;
- unsigned update_config_rom:1;
-
- struct list_head addr_space;
- u64 low_addr_space; /* upper bound of physical DMA area */
- u64 middle_addr_space; /* upper bound of posted write area */
-
- u8 speed[ALL_NODES]; /* speed between each node and local node */
-
- /* per node tlabel allocation */
- u8 next_tl[ALL_NODES];
- struct { DECLARE_BITMAP(map, 64); } tl_pool[ALL_NODES];
-
- struct csr_control csr;
-
- struct hpsb_address_serve dummy_zero_addr;
- struct hpsb_address_serve dummy_max_addr;
-};
-
-enum devctl_cmd {
- /* Host is requested to reset its bus and cancel all outstanding async
- * requests. If arg == 1, it shall also attempt to become root on the
- * bus. Return void. */
- RESET_BUS,
-
- /* Arg is void, return value is the hardware cycle counter value. */
- GET_CYCLE_COUNTER,
-
- /* Set the hardware cycle counter to the value in arg, return void.
- * FIXME - setting is probably not required. */
- SET_CYCLE_COUNTER,
-
- /* Configure hardware for new bus ID in arg, return void. */
- SET_BUS_ID,
-
- /* If arg true, start sending cycle start packets, stop if arg == 0.
- * Return void. */
- ACT_CYCLE_MASTER,
-
- /* Cancel all outstanding async requests without resetting the bus.
- * Return void. */
- CANCEL_REQUESTS,
-};
-
-enum isoctl_cmd {
- /* rawiso API - see iso.h for the meanings of these commands
- * (they correspond exactly to the hpsb_iso_* API functions)
- * INIT = allocate resources
- * START = begin transmission/reception
- * STOP = halt transmission/reception
- * QUEUE/RELEASE = produce/consume packets
- * SHUTDOWN = deallocate resources
- */
-
- XMIT_INIT,
- XMIT_START,
- XMIT_STOP,
- XMIT_QUEUE,
- XMIT_SHUTDOWN,
-
- RECV_INIT,
- RECV_LISTEN_CHANNEL, /* multi-channel only */
- RECV_UNLISTEN_CHANNEL, /* multi-channel only */
- RECV_SET_CHANNEL_MASK, /* multi-channel only; arg is a *u64 */
- RECV_START,
- RECV_STOP,
- RECV_RELEASE,
- RECV_SHUTDOWN,
- RECV_FLUSH
-};
-
-enum reset_types {
- /* 166 microsecond reset -- only type of reset available on
- non-1394a capable controllers */
- LONG_RESET,
-
- /* Short (arbitrated) reset -- only available on 1394a capable
- controllers */
- SHORT_RESET,
-
- /* Variants that set force_root before issueing the bus reset */
- LONG_RESET_FORCE_ROOT, SHORT_RESET_FORCE_ROOT,
-
- /* Variants that clear force_root before issueing the bus reset */
- LONG_RESET_NO_FORCE_ROOT, SHORT_RESET_NO_FORCE_ROOT
-};
-
-struct hpsb_host_driver {
- struct module *owner;
- const char *name;
-
- /* The hardware driver may optionally support a function that is used
- * to set the hardware ConfigROM if the hardware supports handling
- * reads to the ConfigROM on its own. */
- void (*set_hw_config_rom)(struct hpsb_host *host,
- __be32 *config_rom);
-
- /* This function shall implement packet transmission based on
- * packet->type. It shall CRC both parts of the packet (unless
- * packet->type == raw) and do byte-swapping as necessary or instruct
- * the hardware to do so. It can return immediately after the packet
- * was queued for sending. After sending, hpsb_sent_packet() has to be
- * called. Return 0 on success, negative errno on failure.
- * NOTE: The function must be callable in interrupt context.
- */
- int (*transmit_packet)(struct hpsb_host *host,
- struct hpsb_packet *packet);
-
- /* This function requests miscellanous services from the driver, see
- * above for command codes and expected actions. Return -1 for unknown
- * command, though that should never happen.
- */
- int (*devctl)(struct hpsb_host *host, enum devctl_cmd command, int arg);
-
- /* ISO transmission/reception functions. Return 0 on success, -1
- * (or -EXXX errno code) on failure. If the low-level driver does not
- * support the new ISO API, set isoctl to NULL.
- */
- int (*isoctl)(struct hpsb_iso *iso, enum isoctl_cmd command,
- unsigned long arg);
-
- /* This function is mainly to redirect local CSR reads/locks to the iso
- * management registers (bus manager id, bandwidth available, channels
- * available) to the hardware registers in OHCI. reg is 0,1,2,3 for bus
- * mgr, bwdth avail, ch avail hi, ch avail lo respectively (the same ids
- * as OHCI uses). data and compare are the new data and expected data
- * respectively, return value is the old value.
- */
- quadlet_t (*hw_csr_reg) (struct hpsb_host *host, int reg,
- quadlet_t data, quadlet_t compare);
-};
-
-struct hpsb_host *hpsb_alloc_host(struct hpsb_host_driver *drv, size_t extra,
- struct device *dev);
-int hpsb_add_host(struct hpsb_host *host);
-void hpsb_resume_host(struct hpsb_host *host);
-void hpsb_remove_host(struct hpsb_host *host);
-int hpsb_update_config_rom_image(struct hpsb_host *host);
-
-#endif /* _IEEE1394_HOSTS_H */
diff --git a/drivers/ieee1394/ieee1394-ioctl.h b/drivers/ieee1394/ieee1394-ioctl.h
deleted file mode 100644
index 46878fef136c..000000000000
--- a/drivers/ieee1394/ieee1394-ioctl.h
+++ /dev/null
@@ -1,106 +0,0 @@
-/*
- * Base file for all ieee1394 ioctl's.
- * Linux-1394 has allocated base '#' with a range of 0x00-0x3f.
- */
-
-#ifndef __IEEE1394_IOCTL_H
-#define __IEEE1394_IOCTL_H
-
-#include <linux/ioctl.h>
-#include <linux/types.h>
-
-/* DV1394 Gets 10 */
-
-/* Get the driver ready to transmit video. pass a struct dv1394_init* as
- * the parameter (see below), or NULL to get default parameters */
-#define DV1394_IOC_INIT _IOW('#', 0x06, struct dv1394_init)
-
-/* Stop transmitting video and free the ringbuffer */
-#define DV1394_IOC_SHUTDOWN _IO ('#', 0x07)
-
-/* Submit N new frames to be transmitted, where the index of the first new
- * frame is first_clear_buffer, and the index of the last new frame is
- * (first_clear_buffer + N) % n_frames */
-#define DV1394_IOC_SUBMIT_FRAMES _IO ('#', 0x08)
-
-/* Block until N buffers are clear (pass N as the parameter) Because we
- * re-transmit the last frame on underrun, there will at most be n_frames
- * - 1 clear frames at any time */
-#define DV1394_IOC_WAIT_FRAMES _IO ('#', 0x09)
-
-/* Capture new frames that have been received, where the index of the
- * first new frame is first_clear_buffer, and the index of the last new
- * frame is (first_clear_buffer + N) % n_frames */
-#define DV1394_IOC_RECEIVE_FRAMES _IO ('#', 0x0a)
-
-/* Tell card to start receiving DMA */
-#define DV1394_IOC_START_RECEIVE _IO ('#', 0x0b)
-
-/* Pass a struct dv1394_status* as the parameter */
-#define DV1394_IOC_GET_STATUS _IOR('#', 0x0c, struct dv1394_status)
-
-
-/* Video1394 Gets 10 */
-
-#define VIDEO1394_IOC_LISTEN_CHANNEL \
- _IOWR('#', 0x10, struct video1394_mmap)
-#define VIDEO1394_IOC_UNLISTEN_CHANNEL \
- _IOW ('#', 0x11, int)
-#define VIDEO1394_IOC_LISTEN_QUEUE_BUFFER \
- _IOW ('#', 0x12, struct video1394_wait)
-#define VIDEO1394_IOC_LISTEN_WAIT_BUFFER \
- _IOWR('#', 0x13, struct video1394_wait)
-#define VIDEO1394_IOC_TALK_CHANNEL \
- _IOWR('#', 0x14, struct video1394_mmap)
-#define VIDEO1394_IOC_UNTALK_CHANNEL \
- _IOW ('#', 0x15, int)
-/*
- * This one is broken: it really wanted
- * "sizeof (struct video1394_wait) + sizeof (struct video1394_queue_variable)"
- * but got just a "size_t"
- */
-#define VIDEO1394_IOC_TALK_QUEUE_BUFFER \
- _IOW ('#', 0x16, size_t)
-#define VIDEO1394_IOC_TALK_WAIT_BUFFER \
- _IOW ('#', 0x17, struct video1394_wait)
-#define VIDEO1394_IOC_LISTEN_POLL_BUFFER \
- _IOWR('#', 0x18, struct video1394_wait)
-
-
-/* Raw1394's ISO interface */
-#define RAW1394_IOC_ISO_XMIT_INIT \
- _IOW ('#', 0x1a, struct raw1394_iso_status)
-#define RAW1394_IOC_ISO_RECV_INIT \
- _IOWR('#', 0x1b, struct raw1394_iso_status)
-#define RAW1394_IOC_ISO_RECV_START \
- _IOC (_IOC_WRITE, '#', 0x1c, sizeof(int) * 3)
-#define RAW1394_IOC_ISO_XMIT_START \
- _IOC (_IOC_WRITE, '#', 0x1d, sizeof(int) * 2)
-#define RAW1394_IOC_ISO_XMIT_RECV_STOP \
- _IO ('#', 0x1e)
-#define RAW1394_IOC_ISO_GET_STATUS \
- _IOR ('#', 0x1f, struct raw1394_iso_status)
-#define RAW1394_IOC_ISO_SHUTDOWN \
- _IO ('#', 0x20)
-#define RAW1394_IOC_ISO_QUEUE_ACTIVITY \
- _IO ('#', 0x21)
-#define RAW1394_IOC_ISO_RECV_LISTEN_CHANNEL \
- _IOW ('#', 0x22, unsigned char)
-#define RAW1394_IOC_ISO_RECV_UNLISTEN_CHANNEL \
- _IOW ('#', 0x23, unsigned char)
-#define RAW1394_IOC_ISO_RECV_SET_CHANNEL_MASK \
- _IOW ('#', 0x24, __u64)
-#define RAW1394_IOC_ISO_RECV_PACKETS \
- _IOW ('#', 0x25, struct raw1394_iso_packets)
-#define RAW1394_IOC_ISO_RECV_RELEASE_PACKETS \
- _IOW ('#', 0x26, unsigned int)
-#define RAW1394_IOC_ISO_XMIT_PACKETS \
- _IOW ('#', 0x27, struct raw1394_iso_packets)
-#define RAW1394_IOC_ISO_XMIT_SYNC \
- _IO ('#', 0x28)
-#define RAW1394_IOC_ISO_RECV_FLUSH \
- _IO ('#', 0x29)
-#define RAW1394_IOC_GET_CYCLE_TIMER \
- _IOR ('#', 0x30, struct raw1394_cycle_timer)
-
-#endif /* __IEEE1394_IOCTL_H */
diff --git a/drivers/ieee1394/ieee1394.h b/drivers/ieee1394/ieee1394.h
deleted file mode 100644
index af320e2c5079..000000000000
--- a/drivers/ieee1394/ieee1394.h
+++ /dev/null
@@ -1,220 +0,0 @@
-/*
- * Generic IEEE 1394 definitions
- */
-
-#ifndef _IEEE1394_IEEE1394_H
-#define _IEEE1394_IEEE1394_H
-
-#define TCODE_WRITEQ 0x0
-#define TCODE_WRITEB 0x1
-#define TCODE_WRITE_RESPONSE 0x2
-#define TCODE_READQ 0x4
-#define TCODE_READB 0x5
-#define TCODE_READQ_RESPONSE 0x6
-#define TCODE_READB_RESPONSE 0x7
-#define TCODE_CYCLE_START 0x8
-#define TCODE_LOCK_REQUEST 0x9
-#define TCODE_ISO_DATA 0xa
-#define TCODE_STREAM_DATA 0xa
-#define TCODE_LOCK_RESPONSE 0xb
-
-#define RCODE_COMPLETE 0x0
-#define RCODE_CONFLICT_ERROR 0x4
-#define RCODE_DATA_ERROR 0x5
-#define RCODE_TYPE_ERROR 0x6
-#define RCODE_ADDRESS_ERROR 0x7
-
-#define EXTCODE_MASK_SWAP 0x1
-#define EXTCODE_COMPARE_SWAP 0x2
-#define EXTCODE_FETCH_ADD 0x3
-#define EXTCODE_LITTLE_ADD 0x4
-#define EXTCODE_BOUNDED_ADD 0x5
-#define EXTCODE_WRAP_ADD 0x6
-
-#define ACK_COMPLETE 0x1
-#define ACK_PENDING 0x2
-#define ACK_BUSY_X 0x4
-#define ACK_BUSY_A 0x5
-#define ACK_BUSY_B 0x6
-#define ACK_TARDY 0xb
-#define ACK_CONFLICT_ERROR 0xc
-#define ACK_DATA_ERROR 0xd
-#define ACK_TYPE_ERROR 0xe
-#define ACK_ADDRESS_ERROR 0xf
-
-/* Non-standard "ACK codes" for internal use */
-#define ACKX_NONE (-1)
-#define ACKX_SEND_ERROR (-2)
-#define ACKX_ABORTED (-3)
-#define ACKX_TIMEOUT (-4)
-
-#define IEEE1394_SPEED_100 0x00
-#define IEEE1394_SPEED_200 0x01
-#define IEEE1394_SPEED_400 0x02
-#define IEEE1394_SPEED_800 0x03
-#define IEEE1394_SPEED_1600 0x04
-#define IEEE1394_SPEED_3200 0x05
-#define IEEE1394_SPEED_MAX IEEE1394_SPEED_3200
-
-/* Maps speed values above to a string representation */
-extern const char *hpsb_speedto_str[];
-
-/* 1394a cable PHY packets */
-#define SELFID_PWRCL_NO_POWER 0x0
-#define SELFID_PWRCL_PROVIDE_15W 0x1
-#define SELFID_PWRCL_PROVIDE_30W 0x2
-#define SELFID_PWRCL_PROVIDE_45W 0x3
-#define SELFID_PWRCL_USE_1W 0x4
-#define SELFID_PWRCL_USE_3W 0x5
-#define SELFID_PWRCL_USE_6W 0x6
-#define SELFID_PWRCL_USE_10W 0x7
-
-#define SELFID_PORT_CHILD 0x3
-#define SELFID_PORT_PARENT 0x2
-#define SELFID_PORT_NCONN 0x1
-#define SELFID_PORT_NONE 0x0
-
-#define SELFID_SPEED_UNKNOWN 0x3 /* 1394b PHY */
-
-#define PHYPACKET_LINKON 0x40000000
-#define PHYPACKET_PHYCONFIG_R 0x00800000
-#define PHYPACKET_PHYCONFIG_T 0x00400000
-#define EXTPHYPACKET_TYPE_PING 0x00000000
-#define EXTPHYPACKET_TYPE_REMOTEACCESS_BASE 0x00040000
-#define EXTPHYPACKET_TYPE_REMOTEACCESS_PAGED 0x00140000
-#define EXTPHYPACKET_TYPE_REMOTEREPLY_BASE 0x000C0000
-#define EXTPHYPACKET_TYPE_REMOTEREPLY_PAGED 0x001C0000
-#define EXTPHYPACKET_TYPE_REMOTECOMMAND 0x00200000
-#define EXTPHYPACKET_TYPE_REMOTECONFIRMATION 0x00280000
-#define EXTPHYPACKET_TYPE_RESUME 0x003C0000
-
-#define EXTPHYPACKET_TYPEMASK 0xC0FC0000
-
-#define PHYPACKET_PORT_SHIFT 24
-#define PHYPACKET_GAPCOUNT_SHIFT 16
-
-/* 1394a PHY register map bitmasks */
-#define PHY_00_PHYSICAL_ID 0xFC
-#define PHY_00_R 0x02 /* Root */
-#define PHY_00_PS 0x01 /* Power Status*/
-#define PHY_01_RHB 0x80 /* Root Hold-Off */
-#define PHY_01_IBR 0x80 /* Initiate Bus Reset */
-#define PHY_01_GAP_COUNT 0x3F
-#define PHY_02_EXTENDED 0xE0 /* 0x7 for 1394a-compliant PHY */
-#define PHY_02_TOTAL_PORTS 0x1F
-#define PHY_03_MAX_SPEED 0xE0
-#define PHY_03_DELAY 0x0F
-#define PHY_04_LCTRL 0x80 /* Link Active Report Control */
-#define PHY_04_CONTENDER 0x40
-#define PHY_04_JITTER 0x38
-#define PHY_04_PWR_CLASS 0x07 /* Power Class */
-#define PHY_05_WATCHDOG 0x80
-#define PHY_05_ISBR 0x40 /* Initiate Short Bus Reset */
-#define PHY_05_LOOP 0x20 /* Loop Detect */
-#define PHY_05_PWR_FAIL 0x10 /* Cable Power Failure Detect */
-#define PHY_05_TIMEOUT 0x08 /* Arbitration State Machine Timeout */
-#define PHY_05_PORT_EVENT 0x04 /* Port Event Detect */
-#define PHY_05_ENAB_ACCEL 0x02 /* Enable Arbitration Acceleration */
-#define PHY_05_ENAB_MULTI 0x01 /* Ena. Multispeed Packet Concatenation */
-
-#include <asm/byteorder.h>
-
-/* '1' '3' '9' '4' in ASCII */
-#define IEEE1394_BUSID_MAGIC cpu_to_be32(0x31333934)
-
-#ifdef __BIG_ENDIAN_BITFIELD
-
-struct selfid {
- u32 packet_identifier:2; /* always binary 10 */
- u32 phy_id:6;
- /* byte */
- u32 extended:1; /* if true is struct ext_selfid */
- u32 link_active:1;
- u32 gap_count:6;
- /* byte */
- u32 speed:2;
- u32 phy_delay:2;
- u32 contender:1;
- u32 power_class:3;
- /* byte */
- u32 port0:2;
- u32 port1:2;
- u32 port2:2;
- u32 initiated_reset:1;
- u32 more_packets:1;
-} __attribute__((packed));
-
-struct ext_selfid {
- u32 packet_identifier:2; /* always binary 10 */
- u32 phy_id:6;
- /* byte */
- u32 extended:1; /* if false is struct selfid */
- u32 seq_nr:3;
- u32 reserved:2;
- u32 porta:2;
- /* byte */
- u32 portb:2;
- u32 portc:2;
- u32 portd:2;
- u32 porte:2;
- /* byte */
- u32 portf:2;
- u32 portg:2;
- u32 porth:2;
- u32 reserved2:1;
- u32 more_packets:1;
-} __attribute__((packed));
-
-#elif defined __LITTLE_ENDIAN_BITFIELD /* __BIG_ENDIAN_BITFIELD */
-
-/*
- * Note: these mean to be bit fields of a big endian SelfID as seen on a little
- * endian machine. Without swapping.
- */
-
-struct selfid {
- u32 phy_id:6;
- u32 packet_identifier:2; /* always binary 10 */
- /* byte */
- u32 gap_count:6;
- u32 link_active:1;
- u32 extended:1; /* if true is struct ext_selfid */
- /* byte */
- u32 power_class:3;
- u32 contender:1;
- u32 phy_delay:2;
- u32 speed:2;
- /* byte */
- u32 more_packets:1;
- u32 initiated_reset:1;
- u32 port2:2;
- u32 port1:2;
- u32 port0:2;
-} __attribute__((packed));
-
-struct ext_selfid {
- u32 phy_id:6;
- u32 packet_identifier:2; /* always binary 10 */
- /* byte */
- u32 porta:2;
- u32 reserved:2;
- u32 seq_nr:3;
- u32 extended:1; /* if false is struct selfid */
- /* byte */
- u32 porte:2;
- u32 portd:2;
- u32 portc:2;
- u32 portb:2;
- /* byte */
- u32 more_packets:1;
- u32 reserved2:1;
- u32 porth:2;
- u32 portg:2;
- u32 portf:2;
-} __attribute__((packed));
-
-#else
-#error What? PDP endian?
-#endif /* __BIG_ENDIAN_BITFIELD */
-
-#endif /* _IEEE1394_IEEE1394_H */
diff --git a/drivers/ieee1394/ieee1394_core.c b/drivers/ieee1394/ieee1394_core.c
deleted file mode 100644
index 872338003721..000000000000
--- a/drivers/ieee1394/ieee1394_core.c
+++ /dev/null
@@ -1,1380 +0,0 @@
-/*
- * IEEE 1394 for Linux
- *
- * Core support: hpsb_packet management, packet handling and forwarding to
- * highlevel or lowlevel code
- *
- * Copyright (C) 1999, 2000 Andreas E. Bombe
- * 2002 Manfred Weihs <weihs@ict.tuwien.ac.at>
- *
- * This code is licensed under the GPL. See the file COPYING in the root
- * directory of the kernel sources for details.
- *
- *
- * Contributions:
- *
- * Manfred Weihs <weihs@ict.tuwien.ac.at>
- * loopback functionality in hpsb_send_packet
- * allow highlevel drivers to disable automatic response generation
- * and to generate responses themselves (deferred)
- *
- */
-
-#include <linux/kernel.h>
-#include <linux/list.h>
-#include <linux/string.h>
-#include <linux/init.h>
-#include <linux/slab.h>
-#include <linux/interrupt.h>
-#include <linux/module.h>
-#include <linux/moduleparam.h>
-#include <linux/bitops.h>
-#include <linux/kdev_t.h>
-#include <linux/freezer.h>
-#include <linux/suspend.h>
-#include <linux/kthread.h>
-#include <linux/preempt.h>
-#include <linux/time.h>
-
-#include <asm/system.h>
-#include <asm/byteorder.h>
-
-#include "ieee1394_types.h"
-#include "ieee1394.h"
-#include "hosts.h"
-#include "ieee1394_core.h"
-#include "highlevel.h"
-#include "ieee1394_transactions.h"
-#include "csr.h"
-#include "nodemgr.h"
-#include "dma.h"
-#include "iso.h"
-#include "config_roms.h"
-
-/*
- * Disable the nodemgr detection and config rom reading functionality.
- */
-static int disable_nodemgr;
-module_param(disable_nodemgr, int, 0444);
-MODULE_PARM_DESC(disable_nodemgr, "Disable nodemgr functionality.");
-
-/* Disable Isochronous Resource Manager functionality */
-int hpsb_disable_irm = 0;
-module_param_named(disable_irm, hpsb_disable_irm, bool, 0444);
-MODULE_PARM_DESC(disable_irm,
- "Disable Isochronous Resource Manager functionality.");
-
-/* We are GPL, so treat us special */
-MODULE_LICENSE("GPL");
-
-/* Some globals used */
-const char *hpsb_speedto_str[] = { "S100", "S200", "S400", "S800", "S1600", "S3200" };
-struct class *hpsb_protocol_class;
-
-#ifdef CONFIG_IEEE1394_VERBOSEDEBUG
-static void dump_packet(const char *text, quadlet_t *data, int size, int speed)
-{
- int i;
-
- size /= 4;
- size = (size > 4 ? 4 : size);
-
- printk(KERN_DEBUG "ieee1394: %s", text);
- if (speed > -1 && speed < 6)
- printk(" at %s", hpsb_speedto_str[speed]);
- printk(":");
- for (i = 0; i < size; i++)
- printk(" %08x", data[i]);
- printk("\n");
-}
-#else
-#define dump_packet(a,b,c,d) do {} while (0)
-#endif
-
-static void abort_requests(struct hpsb_host *host);
-static void queue_packet_complete(struct hpsb_packet *packet);
-
-
-/**
- * hpsb_set_packet_complete_task - set task that runs when a packet completes
- * @packet: the packet whose completion we want the task added to
- * @routine: function to call
- * @data: data (if any) to pass to the above function
- *
- * Set the task that runs when a packet completes. You cannot call this more
- * than once on a single packet before it is sent.
- *
- * Typically, the complete @routine is responsible to call hpsb_free_packet().
- */
-void hpsb_set_packet_complete_task(struct hpsb_packet *packet,
- void (*routine)(void *), void *data)
-{
- WARN_ON(packet->complete_routine != NULL);
- packet->complete_routine = routine;
- packet->complete_data = data;
- return;
-}
-
-/**
- * hpsb_alloc_packet - allocate new packet structure
- * @data_size: size of the data block to be allocated, in bytes
- *
- * This function allocates, initializes and returns a new &struct hpsb_packet.
- * It can be used in interrupt context. A header block is always included and
- * initialized with zeros. Its size is big enough to contain all possible 1394
- * headers. The data block is only allocated if @data_size is not zero.
- *
- * For packets for which responses will be received the @data_size has to be big
- * enough to contain the response's data block since no further allocation
- * occurs at response matching time.
- *
- * The packet's generation value will be set to the current generation number
- * for ease of use. Remember to overwrite it with your own recorded generation
- * number if you can not be sure that your code will not race with a bus reset.
- *
- * Return value: A pointer to a &struct hpsb_packet or NULL on allocation
- * failure.
- */
-struct hpsb_packet *hpsb_alloc_packet(size_t data_size)
-{
- struct hpsb_packet *packet;
-
- data_size = ((data_size + 3) & ~3);
-
- packet = kzalloc(sizeof(*packet) + data_size, GFP_ATOMIC);
- if (!packet)
- return NULL;
-
- packet->state = hpsb_unused;
- packet->generation = -1;
- INIT_LIST_HEAD(&packet->driver_list);
- INIT_LIST_HEAD(&packet->queue);
- atomic_set(&packet->refcnt, 1);
-
- if (data_size) {
- packet->data = packet->embedded_data;
- packet->allocated_data_size = data_size;
- }
- return packet;
-}
-
-/**
- * hpsb_free_packet - free packet and data associated with it
- * @packet: packet to free (is NULL safe)
- *
- * Frees @packet->data only if it was allocated through hpsb_alloc_packet().
- */
-void hpsb_free_packet(struct hpsb_packet *packet)
-{
- if (packet && atomic_dec_and_test(&packet->refcnt)) {
- BUG_ON(!list_empty(&packet->driver_list) ||
- !list_empty(&packet->queue));
- kfree(packet);
- }
-}
-
-/**
- * hpsb_reset_bus - initiate bus reset on the given host
- * @host: host controller whose bus to reset
- * @type: one of enum reset_types
- *
- * Returns 1 if bus reset already in progress, 0 otherwise.
- */
-int hpsb_reset_bus(struct hpsb_host *host, int type)
-{
- if (!host->in_bus_reset) {
- host->driver->devctl(host, RESET_BUS, type);
- return 0;
- } else {
- return 1;
- }
-}
-
-/**
- * hpsb_read_cycle_timer - read cycle timer register and system time
- * @host: host whose isochronous cycle timer register is read
- * @cycle_timer: address of bitfield to return the register contents
- * @local_time: address to return the system time
- *
- * The format of * @cycle_timer, is described in OHCI 1.1 clause 5.13. This
- * format is also read from non-OHCI controllers. * @local_time contains the
- * system time in microseconds since the Epoch, read at the moment when the
- * cycle timer was read.
- *
- * Return value: 0 for success or error number otherwise.
- */
-int hpsb_read_cycle_timer(struct hpsb_host *host, u32 *cycle_timer,
- u64 *local_time)
-{
- int ctr;
- struct timeval tv;
- unsigned long flags;
-
- if (!host || !cycle_timer || !local_time)
- return -EINVAL;
-
- preempt_disable();
- local_irq_save(flags);
-
- ctr = host->driver->devctl(host, GET_CYCLE_COUNTER, 0);
- if (ctr)
- do_gettimeofday(&tv);
-
- local_irq_restore(flags);
- preempt_enable();
-
- if (!ctr)
- return -EIO;
- *cycle_timer = ctr;
- *local_time = tv.tv_sec * 1000000ULL + tv.tv_usec;
- return 0;
-}
-
-/**
- * hpsb_bus_reset - notify a bus reset to the core
- *
- * For host driver module usage. Safe to use in interrupt context, although
- * quite complex; so you may want to run it in the bottom rather than top half.
- *
- * Returns 1 if bus reset already in progress, 0 otherwise.
- */
-int hpsb_bus_reset(struct hpsb_host *host)
-{
- if (host->in_bus_reset) {
- HPSB_NOTICE("%s called while bus reset already in progress",
- __func__);
- return 1;
- }
-
- abort_requests(host);
- host->in_bus_reset = 1;
- host->irm_id = -1;
- host->is_irm = 0;
- host->busmgr_id = -1;
- host->is_busmgr = 0;
- host->is_cycmst = 0;
- host->node_count = 0;
- host->selfid_count = 0;
-
- return 0;
-}
-
-
-/*
- * Verify num_of_selfids SelfIDs and return number of nodes. Return zero in
- * case verification failed.
- */
-static int check_selfids(struct hpsb_host *host)
-{
- int nodeid = -1;
- int rest_of_selfids = host->selfid_count;
- struct selfid *sid = (struct selfid *)host->topology_map;
- struct ext_selfid *esid;
- int esid_seq = 23;
-
- host->nodes_active = 0;
-
- while (rest_of_selfids--) {
- if (!sid->extended) {
- nodeid++;
- esid_seq = 0;
-
- if (sid->phy_id != nodeid) {
- HPSB_INFO("SelfIDs failed monotony check with "
- "%d", sid->phy_id);
- return 0;
- }
-
- if (sid->link_active) {
- host->nodes_active++;
- if (sid->contender)
- host->irm_id = LOCAL_BUS | sid->phy_id;
- }
- } else {
- esid = (struct ext_selfid *)sid;
-
- if ((esid->phy_id != nodeid)
- || (esid->seq_nr != esid_seq)) {
- HPSB_INFO("SelfIDs failed monotony check with "
- "%d/%d", esid->phy_id, esid->seq_nr);
- return 0;
- }
- esid_seq++;
- }
- sid++;
- }
-
- esid = (struct ext_selfid *)(sid - 1);
- while (esid->extended) {
- if ((esid->porta == SELFID_PORT_PARENT) ||
- (esid->portb == SELFID_PORT_PARENT) ||
- (esid->portc == SELFID_PORT_PARENT) ||
- (esid->portd == SELFID_PORT_PARENT) ||
- (esid->porte == SELFID_PORT_PARENT) ||
- (esid->portf == SELFID_PORT_PARENT) ||
- (esid->portg == SELFID_PORT_PARENT) ||
- (esid->porth == SELFID_PORT_PARENT)) {
- HPSB_INFO("SelfIDs failed root check on "
- "extended SelfID");
- return 0;
- }
- esid--;
- }
-
- sid = (struct selfid *)esid;
- if ((sid->port0 == SELFID_PORT_PARENT) ||
- (sid->port1 == SELFID_PORT_PARENT) ||
- (sid->port2 == SELFID_PORT_PARENT)) {
- HPSB_INFO("SelfIDs failed root check");
- return 0;
- }
-
- host->node_count = nodeid + 1;
- return 1;
-}
-
-static void build_speed_map(struct hpsb_host *host, int nodecount)
-{
- u8 cldcnt[nodecount];
- u8 *map = host->speed_map;
- u8 *speedcap = host->speed;
- u8 local_link_speed = host->csr.lnk_spd;
- struct selfid *sid;
- struct ext_selfid *esid;
- int i, j, n;
-
- for (i = 0; i < (nodecount * 64); i += 64) {
- for (j = 0; j < nodecount; j++) {
- map[i+j] = IEEE1394_SPEED_MAX;
- }
- }
-
- for (i = 0; i < nodecount; i++) {
- cldcnt[i] = 0;
- }
-
- /* find direct children count and speed */
- for (sid = (struct selfid *)&host->topology_map[host->selfid_count-1],
- n = nodecount - 1;
- (void *)sid >= (void *)host->topology_map; sid--) {
- if (sid->extended) {
- esid = (struct ext_selfid *)sid;
-
- if (esid->porta == SELFID_PORT_CHILD) cldcnt[n]++;
- if (esid->portb == SELFID_PORT_CHILD) cldcnt[n]++;
- if (esid->portc == SELFID_PORT_CHILD) cldcnt[n]++;
- if (esid->portd == SELFID_PORT_CHILD) cldcnt[n]++;
- if (esid->porte == SELFID_PORT_CHILD) cldcnt[n]++;
- if (esid->portf == SELFID_PORT_CHILD) cldcnt[n]++;
- if (esid->portg == SELFID_PORT_CHILD) cldcnt[n]++;
- if (esid->porth == SELFID_PORT_CHILD) cldcnt[n]++;
- } else {
- if (sid->port0 == SELFID_PORT_CHILD) cldcnt[n]++;
- if (sid->port1 == SELFID_PORT_CHILD) cldcnt[n]++;
- if (sid->port2 == SELFID_PORT_CHILD) cldcnt[n]++;
-
- speedcap[n] = sid->speed;
- if (speedcap[n] > local_link_speed)
- speedcap[n] = local_link_speed;
- n--;
- }
- }
-
- /* set self mapping */
- for (i = 0; i < nodecount; i++) {
- map[64*i + i] = speedcap[i];
- }
-
- /* fix up direct children count to total children count;
- * also fix up speedcaps for sibling and parent communication */
- for (i = 1; i < nodecount; i++) {
- for (j = cldcnt[i], n = i - 1; j > 0; j--) {
- cldcnt[i] += cldcnt[n];
- speedcap[n] = min(speedcap[n], speedcap[i]);
- n -= cldcnt[n] + 1;
- }
- }
-
- for (n = 0; n < nodecount; n++) {
- for (i = n - cldcnt[n]; i <= n; i++) {
- for (j = 0; j < (n - cldcnt[n]); j++) {
- map[j*64 + i] = map[i*64 + j] =
- min(map[i*64 + j], speedcap[n]);
- }
- for (j = n + 1; j < nodecount; j++) {
- map[j*64 + i] = map[i*64 + j] =
- min(map[i*64 + j], speedcap[n]);
- }
- }
- }
-
- /* assume a maximum speed for 1394b PHYs, nodemgr will correct it */
- if (local_link_speed > SELFID_SPEED_UNKNOWN)
- for (i = 0; i < nodecount; i++)
- if (speedcap[i] == SELFID_SPEED_UNKNOWN)
- speedcap[i] = local_link_speed;
-}
-
-
-/**
- * hpsb_selfid_received - hand over received selfid packet to the core
- *
- * For host driver module usage. Safe to use in interrupt context.
- *
- * The host driver should have done a successful complement check (second
- * quadlet is complement of first) beforehand.
- */
-void hpsb_selfid_received(struct hpsb_host *host, quadlet_t sid)
-{
- if (host->in_bus_reset) {
- HPSB_VERBOSE("Including SelfID 0x%x", sid);
- host->topology_map[host->selfid_count++] = sid;
- } else {
- HPSB_NOTICE("Spurious SelfID packet (0x%08x) received from bus %d",
- sid, NODEID_TO_BUS(host->node_id));
- }
-}
-
-/**
- * hpsb_selfid_complete - notify completion of SelfID stage to the core
- *
- * For host driver module usage. Safe to use in interrupt context, although
- * quite complex; so you may want to run it in the bottom rather than top half.
- *
- * Notify completion of SelfID stage to the core and report new physical ID
- * and whether host is root now.
- */
-void hpsb_selfid_complete(struct hpsb_host *host, int phyid, int isroot)
-{
- if (!host->in_bus_reset)
- HPSB_NOTICE("SelfID completion called outside of bus reset!");
-
- host->node_id = LOCAL_BUS | phyid;
- host->is_root = isroot;
-
- if (!check_selfids(host)) {
- if (host->reset_retries++ < 20) {
- /* selfid stage did not complete without error */
- HPSB_NOTICE("Error in SelfID stage, resetting");
- host->in_bus_reset = 0;
- /* this should work from ohci1394 now... */
- hpsb_reset_bus(host, LONG_RESET);
- return;
- } else {
- HPSB_NOTICE("Stopping out-of-control reset loop");
- HPSB_NOTICE("Warning - topology map and speed map will not be valid");
- host->reset_retries = 0;
- }
- } else {
- host->reset_retries = 0;
- build_speed_map(host, host->node_count);
- }
-
- HPSB_VERBOSE("selfid_complete called with successful SelfID stage "
- "... irm_id: 0x%X node_id: 0x%X",host->irm_id,host->node_id);
-
- /* irm_id is kept up to date by check_selfids() */
- if (host->irm_id == host->node_id) {
- host->is_irm = 1;
- } else {
- host->is_busmgr = 0;
- host->is_irm = 0;
- }
-
- if (isroot) {
- host->driver->devctl(host, ACT_CYCLE_MASTER, 1);
- host->is_cycmst = 1;
- }
- atomic_inc(&host->generation);
- host->in_bus_reset = 0;
- highlevel_host_reset(host);
-}
-
-static DEFINE_SPINLOCK(pending_packets_lock);
-
-/**
- * hpsb_packet_sent - notify core of sending a packet
- *
- * For host driver module usage. Safe to call from within a transmit packet
- * routine.
- *
- * Notify core of sending a packet. Ackcode is the ack code returned for async
- * transmits or ACKX_SEND_ERROR if the transmission failed completely; ACKX_NONE
- * for other cases (internal errors that don't justify a panic).
- */
-void hpsb_packet_sent(struct hpsb_host *host, struct hpsb_packet *packet,
- int ackcode)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&pending_packets_lock, flags);
-
- packet->ack_code = ackcode;
-
- if (packet->no_waiter || packet->state == hpsb_complete) {
- /* if packet->no_waiter, must not have a tlabel allocated */
- spin_unlock_irqrestore(&pending_packets_lock, flags);
- hpsb_free_packet(packet);
- return;
- }
-
- atomic_dec(&packet->refcnt); /* drop HC's reference */
- /* here the packet must be on the host->pending_packets queue */
-
- if (ackcode != ACK_PENDING || !packet->expect_response) {
- packet->state = hpsb_complete;
- list_del_init(&packet->queue);
- spin_unlock_irqrestore(&pending_packets_lock, flags);
- queue_packet_complete(packet);
- return;
- }
-
- packet->state = hpsb_pending;
- packet->sendtime = jiffies;
-
- spin_unlock_irqrestore(&pending_packets_lock, flags);
-
- mod_timer(&host->timeout, jiffies + host->timeout_interval);
-}
-
-/**
- * hpsb_send_phy_config - transmit a PHY configuration packet on the bus
- * @host: host that PHY config packet gets sent through
- * @rootid: root whose force_root bit should get set (-1 = don't set force_root)
- * @gapcnt: gap count value to set (-1 = don't set gap count)
- *
- * This function sends a PHY config packet on the bus through the specified
- * host.
- *
- * Return value: 0 for success or negative error number otherwise.
- */
-int hpsb_send_phy_config(struct hpsb_host *host, int rootid, int gapcnt)
-{
- struct hpsb_packet *packet;
- quadlet_t d = 0;
- int retval = 0;
-
- if (rootid >= ALL_NODES || rootid < -1 || gapcnt > 0x3f || gapcnt < -1 ||
- (rootid == -1 && gapcnt == -1)) {
- HPSB_DEBUG("Invalid Parameter: rootid = %d gapcnt = %d",
- rootid, gapcnt);
- return -EINVAL;
- }
-
- if (rootid != -1)
- d |= PHYPACKET_PHYCONFIG_R | rootid << PHYPACKET_PORT_SHIFT;
- if (gapcnt != -1)
- d |= PHYPACKET_PHYCONFIG_T | gapcnt << PHYPACKET_GAPCOUNT_SHIFT;
-
- packet = hpsb_make_phypacket(host, d);
- if (!packet)
- return -ENOMEM;
-
- packet->generation = get_hpsb_generation(host);
- retval = hpsb_send_packet_and_wait(packet);
- hpsb_free_packet(packet);
-
- return retval;
-}
-
-/**
- * hpsb_send_packet - transmit a packet on the bus
- * @packet: packet to send
- *
- * The packet is sent through the host specified in the packet->host field.
- * Before sending, the packet's transmit speed is automatically determined
- * using the local speed map when it is an async, non-broadcast packet.
- *
- * Possibilities for failure are that host is either not initialized, in bus
- * reset, the packet's generation number doesn't match the current generation
- * number or the host reports a transmit error.
- *
- * Return value: 0 on success, negative errno on failure.
- */
-int hpsb_send_packet(struct hpsb_packet *packet)
-{
- struct hpsb_host *host = packet->host;
-
- if (host->is_shutdown)
- return -EINVAL;
- if (host->in_bus_reset ||
- (packet->generation != get_hpsb_generation(host)))
- return -EAGAIN;
-
- packet->state = hpsb_queued;
-
- /* This just seems silly to me */
- WARN_ON(packet->no_waiter && packet->expect_response);
-
- if (!packet->no_waiter || packet->expect_response) {
- unsigned long flags;
-
- atomic_inc(&packet->refcnt);
- /* Set the initial "sendtime" to 10 seconds from now, to
- prevent premature expiry. If a packet takes more than
- 10 seconds to hit the wire, we have bigger problems :) */
- packet->sendtime = jiffies + 10 * HZ;
- spin_lock_irqsave(&pending_packets_lock, flags);
- list_add_tail(&packet->queue, &host->pending_packets);
- spin_unlock_irqrestore(&pending_packets_lock, flags);
- }
-
- if (packet->node_id == host->node_id) {
- /* it is a local request, so handle it locally */
-
- quadlet_t *data;
- size_t size = packet->data_size + packet->header_size;
-
- data = kmalloc(size, GFP_ATOMIC);
- if (!data) {
- HPSB_ERR("unable to allocate memory for concatenating header and data");
- return -ENOMEM;
- }
-
- memcpy(data, packet->header, packet->header_size);
-
- if (packet->data_size)
- memcpy(((u8*)data) + packet->header_size, packet->data, packet->data_size);
-
- dump_packet("send packet local", packet->header, packet->header_size, -1);
-
- hpsb_packet_sent(host, packet, packet->expect_response ? ACK_PENDING : ACK_COMPLETE);
- hpsb_packet_received(host, data, size, 0);
-
- kfree(data);
-
- return 0;
- }
-
- if (packet->type == hpsb_async &&
- NODEID_TO_NODE(packet->node_id) != ALL_NODES)
- packet->speed_code =
- host->speed[NODEID_TO_NODE(packet->node_id)];
-
- dump_packet("send packet", packet->header, packet->header_size, packet->speed_code);
-
- return host->driver->transmit_packet(host, packet);
-}
-
-/* We could just use complete() directly as the packet complete
- * callback, but this is more typesafe, in the sense that we get a
- * compiler error if the prototype for complete() changes. */
-
-static void complete_packet(void *data)
-{
- complete((struct completion *) data);
-}
-
-/**
- * hpsb_send_packet_and_wait - enqueue packet, block until transaction completes
- * @packet: packet to send
- *
- * Return value: 0 on success, negative errno on failure.
- */
-int hpsb_send_packet_and_wait(struct hpsb_packet *packet)
-{
- struct completion done;
- int retval;
-
- init_completion(&done);
- hpsb_set_packet_complete_task(packet, complete_packet, &done);
- retval = hpsb_send_packet(packet);
- if (retval == 0)
- wait_for_completion(&done);
-
- return retval;
-}
-
-static void send_packet_nocare(struct hpsb_packet *packet)
-{
- if (hpsb_send_packet(packet) < 0) {
- hpsb_free_packet(packet);
- }
-}
-
-static size_t packet_size_to_data_size(size_t packet_size, size_t header_size,
- size_t buffer_size, int tcode)
-{
- size_t ret = packet_size <= header_size ? 0 : packet_size - header_size;
-
- if (unlikely(ret > buffer_size))
- ret = buffer_size;
-
- if (unlikely(ret + header_size != packet_size))
- HPSB_ERR("unexpected packet size %zd (tcode %d), bug?",
- packet_size, tcode);
- return ret;
-}
-
-static void handle_packet_response(struct hpsb_host *host, int tcode,
- quadlet_t *data, size_t size)
-{
- struct hpsb_packet *packet;
- int tlabel = (data[0] >> 10) & 0x3f;
- size_t header_size;
- unsigned long flags;
-
- spin_lock_irqsave(&pending_packets_lock, flags);
-
- list_for_each_entry(packet, &host->pending_packets, queue)
- if (packet->tlabel == tlabel &&
- packet->node_id == (data[1] >> 16))
- goto found;
-
- spin_unlock_irqrestore(&pending_packets_lock, flags);
- HPSB_DEBUG("unsolicited response packet received - %s",
- "no tlabel match");
- dump_packet("contents", data, 16, -1);
- return;
-
-found:
- switch (packet->tcode) {
- case TCODE_WRITEQ:
- case TCODE_WRITEB:
- if (unlikely(tcode != TCODE_WRITE_RESPONSE))
- break;
- header_size = 12;
- size = 0;
- goto dequeue;
-
- case TCODE_READQ:
- if (unlikely(tcode != TCODE_READQ_RESPONSE))
- break;
- header_size = 16;
- size = 0;
- goto dequeue;
-
- case TCODE_READB:
- if (unlikely(tcode != TCODE_READB_RESPONSE))
- break;
- header_size = 16;
- size = packet_size_to_data_size(size, header_size,
- packet->allocated_data_size,
- tcode);
- goto dequeue;
-
- case TCODE_LOCK_REQUEST:
- if (unlikely(tcode != TCODE_LOCK_RESPONSE))
- break;
- header_size = 16;
- size = packet_size_to_data_size(min(size, (size_t)(16 + 8)),
- header_size,
- packet->allocated_data_size,
- tcode);
- goto dequeue;
- }
-
- spin_unlock_irqrestore(&pending_packets_lock, flags);
- HPSB_DEBUG("unsolicited response packet received - %s",
- "tcode mismatch");
- dump_packet("contents", data, 16, -1);
- return;
-
-dequeue:
- list_del_init(&packet->queue);
- spin_unlock_irqrestore(&pending_packets_lock, flags);
-
- if (packet->state == hpsb_queued) {
- packet->sendtime = jiffies;
- packet->ack_code = ACK_PENDING;
- }
- packet->state = hpsb_complete;
-
- memcpy(packet->header, data, header_size);
- if (size)
- memcpy(packet->data, data + 4, size);
-
- queue_packet_complete(packet);
-}
-
-
-static struct hpsb_packet *create_reply_packet(struct hpsb_host *host,
- quadlet_t *data, size_t dsize)
-{
- struct hpsb_packet *p;
-
- p = hpsb_alloc_packet(dsize);
- if (unlikely(p == NULL)) {
- /* FIXME - send data_error response */
- HPSB_ERR("out of memory, cannot send response packet");
- return NULL;
- }
-
- p->type = hpsb_async;
- p->state = hpsb_unused;
- p->host = host;
- p->node_id = data[1] >> 16;
- p->tlabel = (data[0] >> 10) & 0x3f;
- p->no_waiter = 1;
-
- p->generation = get_hpsb_generation(host);
-
- if (dsize % 4)
- p->data[dsize / 4] = 0;
-
- return p;
-}
-
-#define PREP_ASYNC_HEAD_RCODE(tc) \
- packet->tcode = tc; \
- packet->header[0] = (packet->node_id << 16) | (packet->tlabel << 10) \
- | (1 << 8) | (tc << 4); \
- packet->header[1] = (packet->host->node_id << 16) | (rcode << 12); \
- packet->header[2] = 0
-
-static void fill_async_readquad_resp(struct hpsb_packet *packet, int rcode,
- quadlet_t data)
-{
- PREP_ASYNC_HEAD_RCODE(TCODE_READQ_RESPONSE);
- packet->header[3] = data;
- packet->header_size = 16;
- packet->data_size = 0;
-}
-
-static void fill_async_readblock_resp(struct hpsb_packet *packet, int rcode,
- int length)
-{
- if (rcode != RCODE_COMPLETE)
- length = 0;
-
- PREP_ASYNC_HEAD_RCODE(TCODE_READB_RESPONSE);
- packet->header[3] = length << 16;
- packet->header_size = 16;
- packet->data_size = length + (length % 4 ? 4 - (length % 4) : 0);
-}
-
-static void fill_async_write_resp(struct hpsb_packet *packet, int rcode)
-{
- PREP_ASYNC_HEAD_RCODE(TCODE_WRITE_RESPONSE);
- packet->header_size = 12;
- packet->data_size = 0;
-}
-
-static void fill_async_lock_resp(struct hpsb_packet *packet, int rcode, int extcode,
- int length)
-{
- if (rcode != RCODE_COMPLETE)
- length = 0;
-
- PREP_ASYNC_HEAD_RCODE(TCODE_LOCK_RESPONSE);
- packet->header[3] = (length << 16) | extcode;
- packet->header_size = 16;
- packet->data_size = length;
-}
-
-static void handle_incoming_packet(struct hpsb_host *host, int tcode,
- quadlet_t *data, size_t size,
- int write_acked)
-{
- struct hpsb_packet *packet;
- int length, rcode, extcode;
- quadlet_t buffer;
- nodeid_t source = data[1] >> 16;
- nodeid_t dest = data[0] >> 16;
- u16 flags = (u16) data[0];
- u64 addr;
-
- /* FIXME?
- * Out-of-bounds lengths are left for highlevel_read|write to cap. */
-
- switch (tcode) {
- case TCODE_WRITEQ:
- addr = (((u64)(data[1] & 0xffff)) << 32) | data[2];
- rcode = highlevel_write(host, source, dest, data + 3,
- addr, 4, flags);
- goto handle_write_request;
-
- case TCODE_WRITEB:
- addr = (((u64)(data[1] & 0xffff)) << 32) | data[2];
- rcode = highlevel_write(host, source, dest, data + 4,
- addr, data[3] >> 16, flags);
-handle_write_request:
- if (rcode < 0 || write_acked ||
- NODEID_TO_NODE(data[0] >> 16) == NODE_MASK)
- return;
- /* not a broadcast write, reply */
- packet = create_reply_packet(host, data, 0);
- if (packet) {
- fill_async_write_resp(packet, rcode);
- send_packet_nocare(packet);
- }
- return;
-
- case TCODE_READQ:
- addr = (((u64)(data[1] & 0xffff)) << 32) | data[2];
- rcode = highlevel_read(host, source, &buffer, addr, 4, flags);
- if (rcode < 0)
- return;
-
- packet = create_reply_packet(host, data, 0);
- if (packet) {
- fill_async_readquad_resp(packet, rcode, buffer);
- send_packet_nocare(packet);
- }
- return;
-
- case TCODE_READB:
- length = data[3] >> 16;
- packet = create_reply_packet(host, data, length);
- if (!packet)
- return;
-
- addr = (((u64)(data[1] & 0xffff)) << 32) | data[2];
- rcode = highlevel_read(host, source, packet->data, addr,
- length, flags);
- if (rcode < 0) {
- hpsb_free_packet(packet);
- return;
- }
- fill_async_readblock_resp(packet, rcode, length);
- send_packet_nocare(packet);
- return;
-
- case TCODE_LOCK_REQUEST:
- length = data[3] >> 16;
- extcode = data[3] & 0xffff;
- addr = (((u64)(data[1] & 0xffff)) << 32) | data[2];
-
- packet = create_reply_packet(host, data, 8);
- if (!packet)
- return;
-
- if (extcode == 0 || extcode >= 7) {
- /* let switch default handle error */
- length = 0;
- }
-
- switch (length) {
- case 4:
- rcode = highlevel_lock(host, source, packet->data, addr,
- data[4], 0, extcode, flags);
- fill_async_lock_resp(packet, rcode, extcode, 4);
- break;
- case 8:
- if (extcode != EXTCODE_FETCH_ADD &&
- extcode != EXTCODE_LITTLE_ADD) {
- rcode = highlevel_lock(host, source,
- packet->data, addr,
- data[5], data[4],
- extcode, flags);
- fill_async_lock_resp(packet, rcode, extcode, 4);
- } else {
- rcode = highlevel_lock64(host, source,
- (octlet_t *)packet->data, addr,
- *(octlet_t *)(data + 4), 0ULL,
- extcode, flags);
- fill_async_lock_resp(packet, rcode, extcode, 8);
- }
- break;
- case 16:
- rcode = highlevel_lock64(host, source,
- (octlet_t *)packet->data, addr,
- *(octlet_t *)(data + 6),
- *(octlet_t *)(data + 4),
- extcode, flags);
- fill_async_lock_resp(packet, rcode, extcode, 8);
- break;
- default:
- rcode = RCODE_TYPE_ERROR;
- fill_async_lock_resp(packet, rcode, extcode, 0);
- }
-
- if (rcode < 0)
- hpsb_free_packet(packet);
- else
- send_packet_nocare(packet);
- return;
- }
-}
-
-/**
- * hpsb_packet_received - hand over received packet to the core
- *
- * For host driver module usage.
- *
- * The contents of data are expected to be the full packet but with the CRCs
- * left out (data block follows header immediately), with the header (i.e. the
- * first four quadlets) in machine byte order and the data block in big endian.
- * *@data can be safely overwritten after this call.
- *
- * If the packet is a write request, @write_acked is to be set to true if it was
- * ack_complete'd already, false otherwise. This argument is ignored for any
- * other packet type.
- */
-void hpsb_packet_received(struct hpsb_host *host, quadlet_t *data, size_t size,
- int write_acked)
-{
- int tcode;
-
- if (unlikely(host->in_bus_reset)) {
- HPSB_DEBUG("received packet during reset; ignoring");
- return;
- }
-
- dump_packet("received packet", data, size, -1);
-
- tcode = (data[0] >> 4) & 0xf;
-
- switch (tcode) {
- case TCODE_WRITE_RESPONSE:
- case TCODE_READQ_RESPONSE:
- case TCODE_READB_RESPONSE:
- case TCODE_LOCK_RESPONSE:
- handle_packet_response(host, tcode, data, size);
- break;
-
- case TCODE_WRITEQ:
- case TCODE_WRITEB:
- case TCODE_READQ:
- case TCODE_READB:
- case TCODE_LOCK_REQUEST:
- handle_incoming_packet(host, tcode, data, size, write_acked);
- break;
-
- case TCODE_CYCLE_START:
- /* simply ignore this packet if it is passed on */
- break;
-
- default:
- HPSB_DEBUG("received packet with bogus transaction code %d",
- tcode);
- break;
- }
-}
-
-static void abort_requests(struct hpsb_host *host)
-{
- struct hpsb_packet *packet, *p;
- struct list_head tmp;
- unsigned long flags;
-
- host->driver->devctl(host, CANCEL_REQUESTS, 0);
-
- INIT_LIST_HEAD(&tmp);
- spin_lock_irqsave(&pending_packets_lock, flags);
- list_splice_init(&host->pending_packets, &tmp);
- spin_unlock_irqrestore(&pending_packets_lock, flags);
-
- list_for_each_entry_safe(packet, p, &tmp, queue) {
- list_del_init(&packet->queue);
- packet->state = hpsb_complete;
- packet->ack_code = ACKX_ABORTED;
- queue_packet_complete(packet);
- }
-}
-
-void abort_timedouts(unsigned long __opaque)
-{
- struct hpsb_host *host = (struct hpsb_host *)__opaque;
- struct hpsb_packet *packet, *p;
- struct list_head tmp;
- unsigned long flags, expire, j;
-
- spin_lock_irqsave(&host->csr.lock, flags);
- expire = host->csr.expire;
- spin_unlock_irqrestore(&host->csr.lock, flags);
-
- j = jiffies;
- INIT_LIST_HEAD(&tmp);
- spin_lock_irqsave(&pending_packets_lock, flags);
-
- list_for_each_entry_safe(packet, p, &host->pending_packets, queue) {
- if (time_before(packet->sendtime + expire, j))
- list_move_tail(&packet->queue, &tmp);
- else
- /* Since packets are added to the tail, the oldest
- * ones are first, always. When we get to one that
- * isn't timed out, the rest aren't either. */
- break;
- }
- if (!list_empty(&host->pending_packets))
- mod_timer(&host->timeout, j + host->timeout_interval);
-
- spin_unlock_irqrestore(&pending_packets_lock, flags);
-
- list_for_each_entry_safe(packet, p, &tmp, queue) {
- list_del_init(&packet->queue);
- packet->state = hpsb_complete;
- packet->ack_code = ACKX_TIMEOUT;
- queue_packet_complete(packet);
- }
-}
-
-static struct task_struct *khpsbpkt_thread;
-static LIST_HEAD(hpsbpkt_queue);
-
-static void queue_packet_complete(struct hpsb_packet *packet)
-{
- unsigned long flags;
-
- if (packet->no_waiter) {
- hpsb_free_packet(packet);
- return;
- }
- if (packet->complete_routine != NULL) {
- spin_lock_irqsave(&pending_packets_lock, flags);
- list_add_tail(&packet->queue, &hpsbpkt_queue);
- spin_unlock_irqrestore(&pending_packets_lock, flags);
- wake_up_process(khpsbpkt_thread);
- }
- return;
-}
-
-/*
- * Kernel thread which handles packets that are completed. This way the
- * packet's "complete" function is asynchronously run in process context.
- * Only packets which have a "complete" function may be sent here.
- */
-static int hpsbpkt_thread(void *__hi)
-{
- struct hpsb_packet *packet, *p;
- struct list_head tmp;
- int may_schedule;
-
- while (!kthread_should_stop()) {
-
- INIT_LIST_HEAD(&tmp);
- spin_lock_irq(&pending_packets_lock);
- list_splice_init(&hpsbpkt_queue, &tmp);
- spin_unlock_irq(&pending_packets_lock);
-
- list_for_each_entry_safe(packet, p, &tmp, queue) {
- list_del_init(&packet->queue);
- packet->complete_routine(packet->complete_data);
- }
-
- set_current_state(TASK_INTERRUPTIBLE);
- spin_lock_irq(&pending_packets_lock);
- may_schedule = list_empty(&hpsbpkt_queue);
- spin_unlock_irq(&pending_packets_lock);
- if (may_schedule)
- schedule();
- __set_current_state(TASK_RUNNING);
- }
- return 0;
-}
-
-static int __init ieee1394_init(void)
-{
- int i, ret;
-
- /* non-fatal error */
- if (hpsb_init_config_roms()) {
- HPSB_ERR("Failed to initialize some config rom entries.\n");
- HPSB_ERR("Some features may not be available\n");
- }
-
- khpsbpkt_thread = kthread_run(hpsbpkt_thread, NULL, "khpsbpkt");
- if (IS_ERR(khpsbpkt_thread)) {
- HPSB_ERR("Failed to start hpsbpkt thread!\n");
- ret = PTR_ERR(khpsbpkt_thread);
- goto exit_cleanup_config_roms;
- }
-
- if (register_chrdev_region(IEEE1394_CORE_DEV, 256, "ieee1394")) {
- HPSB_ERR("unable to register character device major %d!\n", IEEE1394_MAJOR);
- ret = -ENODEV;
- goto exit_release_kernel_thread;
- }
-
- ret = bus_register(&ieee1394_bus_type);
- if (ret < 0) {
- HPSB_INFO("bus register failed");
- goto release_chrdev;
- }
-
- for (i = 0; fw_bus_attrs[i]; i++) {
- ret = bus_create_file(&ieee1394_bus_type, fw_bus_attrs[i]);
- if (ret < 0) {
- while (i >= 0) {
- bus_remove_file(&ieee1394_bus_type,
- fw_bus_attrs[i--]);
- }
- bus_unregister(&ieee1394_bus_type);
- goto release_chrdev;
- }
- }
-
- ret = class_register(&hpsb_host_class);
- if (ret < 0)
- goto release_all_bus;
-
- hpsb_protocol_class = class_create(THIS_MODULE, "ieee1394_protocol");
- if (IS_ERR(hpsb_protocol_class)) {
- ret = PTR_ERR(hpsb_protocol_class);
- goto release_class_host;
- }
-
- ret = init_csr();
- if (ret) {
- HPSB_INFO("init csr failed");
- ret = -ENOMEM;
- goto release_class_protocol;
- }
-
- if (disable_nodemgr) {
- HPSB_INFO("nodemgr and IRM functionality disabled");
- /* We shouldn't contend for IRM with nodemgr disabled, since
- nodemgr implements functionality required of ieee1394a-2000
- IRMs */
- hpsb_disable_irm = 1;
-
- return 0;
- }
-
- if (hpsb_disable_irm) {
- HPSB_INFO("IRM functionality disabled");
- }
-
- ret = init_ieee1394_nodemgr();
- if (ret < 0) {
- HPSB_INFO("init nodemgr failed");
- goto cleanup_csr;
- }
-
- return 0;
-
-cleanup_csr:
- cleanup_csr();
-release_class_protocol:
- class_destroy(hpsb_protocol_class);
-release_class_host:
- class_unregister(&hpsb_host_class);
-release_all_bus:
- for (i = 0; fw_bus_attrs[i]; i++)
- bus_remove_file(&ieee1394_bus_type, fw_bus_attrs[i]);
- bus_unregister(&ieee1394_bus_type);
-release_chrdev:
- unregister_chrdev_region(IEEE1394_CORE_DEV, 256);
-exit_release_kernel_thread:
- kthread_stop(khpsbpkt_thread);
-exit_cleanup_config_roms:
- hpsb_cleanup_config_roms();
- return ret;
-}
-
-static void __exit ieee1394_cleanup(void)
-{
- int i;
-
- if (!disable_nodemgr)
- cleanup_ieee1394_nodemgr();
-
- cleanup_csr();
-
- class_destroy(hpsb_protocol_class);
- class_unregister(&hpsb_host_class);
- for (i = 0; fw_bus_attrs[i]; i++)
- bus_remove_file(&ieee1394_bus_type, fw_bus_attrs[i]);
- bus_unregister(&ieee1394_bus_type);
-
- kthread_stop(khpsbpkt_thread);
-
- hpsb_cleanup_config_roms();
-
- unregister_chrdev_region(IEEE1394_CORE_DEV, 256);
-}
-
-fs_initcall(ieee1394_init);
-module_exit(ieee1394_cleanup);
-
-/* Exported symbols */
-
-/** hosts.c **/
-EXPORT_SYMBOL(hpsb_alloc_host);
-EXPORT_SYMBOL(hpsb_add_host);
-EXPORT_SYMBOL(hpsb_resume_host);
-EXPORT_SYMBOL(hpsb_remove_host);
-EXPORT_SYMBOL(hpsb_update_config_rom_image);
-
-/** ieee1394_core.c **/
-EXPORT_SYMBOL(hpsb_speedto_str);
-EXPORT_SYMBOL(hpsb_protocol_class);
-EXPORT_SYMBOL(hpsb_set_packet_complete_task);
-EXPORT_SYMBOL(hpsb_alloc_packet);
-EXPORT_SYMBOL(hpsb_free_packet);
-EXPORT_SYMBOL(hpsb_send_packet);
-EXPORT_SYMBOL(hpsb_reset_bus);
-EXPORT_SYMBOL(hpsb_read_cycle_timer);
-EXPORT_SYMBOL(hpsb_bus_reset);
-EXPORT_SYMBOL(hpsb_selfid_received);
-EXPORT_SYMBOL(hpsb_selfid_complete);
-EXPORT_SYMBOL(hpsb_packet_sent);
-EXPORT_SYMBOL(hpsb_packet_received);
-EXPORT_SYMBOL_GPL(hpsb_disable_irm);
-
-/** ieee1394_transactions.c **/
-EXPORT_SYMBOL(hpsb_get_tlabel);
-EXPORT_SYMBOL(hpsb_free_tlabel);
-EXPORT_SYMBOL(hpsb_make_readpacket);
-EXPORT_SYMBOL(hpsb_make_writepacket);
-EXPORT_SYMBOL(hpsb_make_streampacket);
-EXPORT_SYMBOL(hpsb_make_lockpacket);
-EXPORT_SYMBOL(hpsb_make_lock64packet);
-EXPORT_SYMBOL(hpsb_make_phypacket);
-EXPORT_SYMBOL(hpsb_read);
-EXPORT_SYMBOL(hpsb_write);
-EXPORT_SYMBOL(hpsb_lock);
-EXPORT_SYMBOL(hpsb_packet_success);
-
-/** highlevel.c **/
-EXPORT_SYMBOL(hpsb_register_highlevel);
-EXPORT_SYMBOL(hpsb_unregister_highlevel);
-EXPORT_SYMBOL(hpsb_register_addrspace);
-EXPORT_SYMBOL(hpsb_unregister_addrspace);
-EXPORT_SYMBOL(hpsb_allocate_and_register_addrspace);
-EXPORT_SYMBOL(hpsb_get_hostinfo);
-EXPORT_SYMBOL(hpsb_create_hostinfo);
-EXPORT_SYMBOL(hpsb_destroy_hostinfo);
-EXPORT_SYMBOL(hpsb_set_hostinfo_key);
-EXPORT_SYMBOL(hpsb_get_hostinfo_bykey);
-EXPORT_SYMBOL(hpsb_set_hostinfo);
-
-/** nodemgr.c **/
-EXPORT_SYMBOL(hpsb_node_fill_packet);
-EXPORT_SYMBOL(hpsb_node_write);
-EXPORT_SYMBOL(__hpsb_register_protocol);
-EXPORT_SYMBOL(hpsb_unregister_protocol);
-
-/** csr.c **/
-EXPORT_SYMBOL(hpsb_update_config_rom);
-
-/** dma.c **/
-EXPORT_SYMBOL(dma_prog_region_init);
-EXPORT_SYMBOL(dma_prog_region_alloc);
-EXPORT_SYMBOL(dma_prog_region_free);
-EXPORT_SYMBOL(dma_region_init);
-EXPORT_SYMBOL(dma_region_alloc);
-EXPORT_SYMBOL(dma_region_free);
-EXPORT_SYMBOL(dma_region_sync_for_cpu);
-EXPORT_SYMBOL(dma_region_sync_for_device);
-EXPORT_SYMBOL(dma_region_mmap);
-EXPORT_SYMBOL(dma_region_offset_to_bus);
-
-/** iso.c **/
-EXPORT_SYMBOL(hpsb_iso_xmit_init);
-EXPORT_SYMBOL(hpsb_iso_recv_init);
-EXPORT_SYMBOL(hpsb_iso_xmit_start);
-EXPORT_SYMBOL(hpsb_iso_recv_start);
-EXPORT_SYMBOL(hpsb_iso_recv_listen_channel);
-EXPORT_SYMBOL(hpsb_iso_recv_unlisten_channel);
-EXPORT_SYMBOL(hpsb_iso_recv_set_channel_mask);
-EXPORT_SYMBOL(hpsb_iso_stop);
-EXPORT_SYMBOL(hpsb_iso_shutdown);
-EXPORT_SYMBOL(hpsb_iso_xmit_queue_packet);
-EXPORT_SYMBOL(hpsb_iso_xmit_sync);
-EXPORT_SYMBOL(hpsb_iso_recv_release_packets);
-EXPORT_SYMBOL(hpsb_iso_n_ready);
-EXPORT_SYMBOL(hpsb_iso_packet_sent);
-EXPORT_SYMBOL(hpsb_iso_packet_received);
-EXPORT_SYMBOL(hpsb_iso_wake);
-EXPORT_SYMBOL(hpsb_iso_recv_flush);
-
-/** csr1212.c **/
-EXPORT_SYMBOL(csr1212_attach_keyval_to_directory);
-EXPORT_SYMBOL(csr1212_detach_keyval_from_directory);
-EXPORT_SYMBOL(csr1212_get_keyval);
-EXPORT_SYMBOL(csr1212_new_directory);
-EXPORT_SYMBOL(csr1212_parse_keyval);
-EXPORT_SYMBOL(csr1212_read);
-EXPORT_SYMBOL(csr1212_release_keyval);
diff --git a/drivers/ieee1394/ieee1394_core.h b/drivers/ieee1394/ieee1394_core.h
deleted file mode 100644
index 28b9f58bafd2..000000000000
--- a/drivers/ieee1394/ieee1394_core.h
+++ /dev/null
@@ -1,172 +0,0 @@
-#ifndef _IEEE1394_CORE_H
-#define _IEEE1394_CORE_H
-
-#include <linux/device.h>
-#include <linux/fs.h>
-#include <linux/list.h>
-#include <linux/types.h>
-#include <linux/cdev.h>
-#include <asm/atomic.h>
-
-#include "hosts.h"
-#include "ieee1394_types.h"
-
-struct hpsb_packet {
- /* This struct is basically read-only for hosts with the exception of
- * the data buffer contents and driver_list. */
-
- /* This can be used for host driver internal linking.
- *
- * NOTE: This must be left in init state when the driver is done
- * with it (e.g. by using list_del_init()), since the core does
- * some sanity checks to make sure the packet is not on a
- * driver_list when free'ing it. */
- struct list_head driver_list;
-
- nodeid_t node_id;
-
- /* hpsb_raw = send as-is, do not CRC (but still byte-swap it) */
- enum { hpsb_async, hpsb_raw } __attribute__((packed)) type;
-
- /* Okay, this is core internal and a no care for hosts.
- * queued = queued for sending
- * pending = sent, waiting for response
- * complete = processing completed, successful or not
- */
- enum {
- hpsb_unused, hpsb_queued, hpsb_pending, hpsb_complete
- } __attribute__((packed)) state;
-
- /* These are core-internal. */
- signed char tlabel;
- signed char ack_code;
- unsigned char tcode;
-
- unsigned expect_response:1;
- unsigned no_waiter:1;
-
- /* Speed to transmit with: 0 = 100Mbps, 1 = 200Mbps, 2 = 400Mbps */
- unsigned speed_code:2;
-
- struct hpsb_host *host;
- unsigned int generation;
-
- atomic_t refcnt;
- struct list_head queue;
-
- /* Function (and possible data to pass to it) to call when this
- * packet is completed. */
- void (*complete_routine)(void *);
- void *complete_data;
-
- /* Store jiffies for implementing bus timeouts. */
- unsigned long sendtime;
-
- /* Core-internal. */
- size_t allocated_data_size; /* as allocated */
-
- /* Sizes are in bytes. To be set by caller of hpsb_alloc_packet. */
- size_t data_size; /* as filled in */
- size_t header_size; /* as filled in, not counting the CRC */
-
- /* Buffers */
- quadlet_t *data; /* can be DMA-mapped */
- quadlet_t header[5];
- quadlet_t embedded_data[0]; /* keep as last member */
-};
-
-void hpsb_set_packet_complete_task(struct hpsb_packet *packet,
- void (*routine)(void *), void *data);
-static inline struct hpsb_packet *driver_packet(struct list_head *l)
-{
- return list_entry(l, struct hpsb_packet, driver_list);
-}
-void abort_timedouts(unsigned long __opaque);
-struct hpsb_packet *hpsb_alloc_packet(size_t data_size);
-void hpsb_free_packet(struct hpsb_packet *packet);
-
-/**
- * get_hpsb_generation - generation counter for the complete 1394 subsystem
- *
- * Generation gets incremented on every change in the subsystem (notably on bus
- * resets). Use the functions, not the variable.
- */
-static inline unsigned int get_hpsb_generation(struct hpsb_host *host)
-{
- return atomic_read(&host->generation);
-}
-
-int hpsb_send_phy_config(struct hpsb_host *host, int rootid, int gapcnt);
-int hpsb_send_packet(struct hpsb_packet *packet);
-int hpsb_send_packet_and_wait(struct hpsb_packet *packet);
-int hpsb_reset_bus(struct hpsb_host *host, int type);
-int hpsb_read_cycle_timer(struct hpsb_host *host, u32 *cycle_timer,
- u64 *local_time);
-
-int hpsb_bus_reset(struct hpsb_host *host);
-void hpsb_selfid_received(struct hpsb_host *host, quadlet_t sid);
-void hpsb_selfid_complete(struct hpsb_host *host, int phyid, int isroot);
-void hpsb_packet_sent(struct hpsb_host *host, struct hpsb_packet *packet,
- int ackcode);
-void hpsb_packet_received(struct hpsb_host *host, quadlet_t *data, size_t size,
- int write_acked);
-
-/*
- * CHARACTER DEVICE DISPATCHING
- *
- * All ieee1394 character device drivers share the same major number
- * (major 171). The 256 minor numbers are allocated to the various
- * task-specific interfaces (raw1394, video1394, dv1394, etc) in
- * blocks of 16.
- *
- * The core ieee1394.o module allocates the device number region
- * 171:0-255, the various drivers must then cdev_add() their cdev
- * objects to handle their respective sub-regions.
- *
- * Minor device number block allocations:
- *
- * Block 0 ( 0- 15) raw1394
- * Block 1 ( 16- 31) video1394
- * Block 2 ( 32- 47) dv1394
- *
- * Blocks 3-14 free for future allocation
- *
- * Block 15 (240-255) reserved for drivers under development, etc.
- */
-
-#define IEEE1394_MAJOR 171
-
-#define IEEE1394_MINOR_BLOCK_RAW1394 0
-#define IEEE1394_MINOR_BLOCK_VIDEO1394 1
-#define IEEE1394_MINOR_BLOCK_DV1394 2
-#define IEEE1394_MINOR_BLOCK_EXPERIMENTAL 15
-
-#define IEEE1394_CORE_DEV MKDEV(IEEE1394_MAJOR, 0)
-#define IEEE1394_RAW1394_DEV MKDEV(IEEE1394_MAJOR, \
- IEEE1394_MINOR_BLOCK_RAW1394 * 16)
-#define IEEE1394_VIDEO1394_DEV MKDEV(IEEE1394_MAJOR, \
- IEEE1394_MINOR_BLOCK_VIDEO1394 * 16)
-#define IEEE1394_DV1394_DEV MKDEV(IEEE1394_MAJOR, \
- IEEE1394_MINOR_BLOCK_DV1394 * 16)
-#define IEEE1394_EXPERIMENTAL_DEV MKDEV(IEEE1394_MAJOR, \
- IEEE1394_MINOR_BLOCK_EXPERIMENTAL * 16)
-
-/**
- * ieee1394_file_to_instance - get the index within a minor number block
- */
-static inline unsigned char ieee1394_file_to_instance(struct file *file)
-{
- int idx = cdev_index(file->f_path.dentry->d_inode);
- if (idx < 0)
- idx = 0;
- return idx;
-}
-
-extern int hpsb_disable_irm;
-
-/* Our sysfs bus entry */
-extern struct bus_type ieee1394_bus_type;
-extern struct class hpsb_host_class;
-extern struct class *hpsb_protocol_class;
-
-#endif /* _IEEE1394_CORE_H */
diff --git a/drivers/ieee1394/ieee1394_hotplug.h b/drivers/ieee1394/ieee1394_hotplug.h
deleted file mode 100644
index dd5500ed8322..000000000000
--- a/drivers/ieee1394/ieee1394_hotplug.h
+++ /dev/null
@@ -1,19 +0,0 @@
-#ifndef _IEEE1394_HOTPLUG_H
-#define _IEEE1394_HOTPLUG_H
-
-/* Unit spec id and sw version entry for some protocols */
-#define AVC_UNIT_SPEC_ID_ENTRY 0x0000A02D
-#define AVC_SW_VERSION_ENTRY 0x00010001
-#define CAMERA_UNIT_SPEC_ID_ENTRY 0x0000A02D
-#define CAMERA_SW_VERSION_ENTRY 0x00000100
-
-/* /include/linux/mod_devicetable.h defines:
- * IEEE1394_MATCH_VENDOR_ID
- * IEEE1394_MATCH_MODEL_ID
- * IEEE1394_MATCH_SPECIFIER_ID
- * IEEE1394_MATCH_VERSION
- * struct ieee1394_device_id
- */
-#include <linux/mod_devicetable.h>
-
-#endif /* _IEEE1394_HOTPLUG_H */
diff --git a/drivers/ieee1394/ieee1394_transactions.c b/drivers/ieee1394/ieee1394_transactions.c
deleted file mode 100644
index 675b3135d5f1..000000000000
--- a/drivers/ieee1394/ieee1394_transactions.c
+++ /dev/null
@@ -1,595 +0,0 @@
-/*
- * IEEE 1394 for Linux
- *
- * Transaction support.
- *
- * Copyright (C) 1999 Andreas E. Bombe
- *
- * This code is licensed under the GPL. See the file COPYING in the root
- * directory of the kernel sources for details.
- */
-
-#include <linux/bitops.h>
-#include <linux/compiler.h>
-#include <linux/hardirq.h>
-#include <linux/spinlock.h>
-#include <linux/string.h>
-#include <linux/sched.h> /* because linux/wait.h is broken if CONFIG_SMP=n */
-#include <linux/wait.h>
-
-#include <asm/bug.h>
-#include <asm/errno.h>
-#include <asm/system.h>
-
-#include "ieee1394.h"
-#include "ieee1394_types.h"
-#include "hosts.h"
-#include "ieee1394_core.h"
-#include "ieee1394_transactions.h"
-
-#define PREP_ASYNC_HEAD_ADDRESS(tc) \
- packet->tcode = tc; \
- packet->header[0] = (packet->node_id << 16) | (packet->tlabel << 10) \
- | (1 << 8) | (tc << 4); \
- packet->header[1] = (packet->host->node_id << 16) | (addr >> 32); \
- packet->header[2] = addr & 0xffffffff
-
-#ifndef HPSB_DEBUG_TLABELS
-static
-#endif
-DEFINE_SPINLOCK(hpsb_tlabel_lock);
-
-static DECLARE_WAIT_QUEUE_HEAD(tlabel_wq);
-
-static void fill_async_readquad(struct hpsb_packet *packet, u64 addr)
-{
- PREP_ASYNC_HEAD_ADDRESS(TCODE_READQ);
- packet->header_size = 12;
- packet->data_size = 0;
- packet->expect_response = 1;
-}
-
-static void fill_async_readblock(struct hpsb_packet *packet, u64 addr,
- int length)
-{
- PREP_ASYNC_HEAD_ADDRESS(TCODE_READB);
- packet->header[3] = length << 16;
- packet->header_size = 16;
- packet->data_size = 0;
- packet->expect_response = 1;
-}
-
-static void fill_async_writequad(struct hpsb_packet *packet, u64 addr,
- quadlet_t data)
-{
- PREP_ASYNC_HEAD_ADDRESS(TCODE_WRITEQ);
- packet->header[3] = data;
- packet->header_size = 16;
- packet->data_size = 0;
- packet->expect_response = 1;
-}
-
-static void fill_async_writeblock(struct hpsb_packet *packet, u64 addr,
- int length)
-{
- PREP_ASYNC_HEAD_ADDRESS(TCODE_WRITEB);
- packet->header[3] = length << 16;
- packet->header_size = 16;
- packet->expect_response = 1;
- packet->data_size = length + (length % 4 ? 4 - (length % 4) : 0);
-}
-
-static void fill_async_lock(struct hpsb_packet *packet, u64 addr, int extcode,
- int length)
-{
- PREP_ASYNC_HEAD_ADDRESS(TCODE_LOCK_REQUEST);
- packet->header[3] = (length << 16) | extcode;
- packet->header_size = 16;
- packet->data_size = length;
- packet->expect_response = 1;
-}
-
-static void fill_phy_packet(struct hpsb_packet *packet, quadlet_t data)
-{
- packet->header[0] = data;
- packet->header[1] = ~data;
- packet->header_size = 8;
- packet->data_size = 0;
- packet->expect_response = 0;
- packet->type = hpsb_raw; /* No CRC added */
- packet->speed_code = IEEE1394_SPEED_100; /* Force speed to be 100Mbps */
-}
-
-static void fill_async_stream_packet(struct hpsb_packet *packet, int length,
- int channel, int tag, int sync)
-{
- packet->header[0] = (length << 16) | (tag << 14) | (channel << 8)
- | (TCODE_STREAM_DATA << 4) | sync;
-
- packet->header_size = 4;
- packet->data_size = length;
- packet->type = hpsb_async;
- packet->tcode = TCODE_ISO_DATA;
-}
-
-/* same as hpsb_get_tlabel, except that it returns immediately */
-static int hpsb_get_tlabel_atomic(struct hpsb_packet *packet)
-{
- unsigned long flags, *tp;
- u8 *next;
- int tlabel, n = NODEID_TO_NODE(packet->node_id);
-
- /* Broadcast transactions are complete once the request has been sent.
- * Use the same transaction label for all broadcast transactions. */
- if (unlikely(n == ALL_NODES)) {
- packet->tlabel = 0;
- return 0;
- }
- tp = packet->host->tl_pool[n].map;
- next = &packet->host->next_tl[n];
-
- spin_lock_irqsave(&hpsb_tlabel_lock, flags);
- tlabel = find_next_zero_bit(tp, 64, *next);
- if (tlabel > 63)
- tlabel = find_first_zero_bit(tp, 64);
- if (tlabel > 63) {
- spin_unlock_irqrestore(&hpsb_tlabel_lock, flags);
- return -EAGAIN;
- }
- __set_bit(tlabel, tp);
- *next = (tlabel + 1) & 63;
- spin_unlock_irqrestore(&hpsb_tlabel_lock, flags);
-
- packet->tlabel = tlabel;
- return 0;
-}
-
-/**
- * hpsb_get_tlabel - allocate a transaction label
- * @packet: the packet whose tlabel and tl_pool we set
- *
- * Every asynchronous transaction on the 1394 bus needs a transaction
- * label to match the response to the request. This label has to be
- * different from any other transaction label in an outstanding request to
- * the same node to make matching possible without ambiguity.
- *
- * There are 64 different tlabels, so an allocated tlabel has to be freed
- * with hpsb_free_tlabel() after the transaction is complete (unless it's
- * reused again for the same target node).
- *
- * Return value: Zero on success, otherwise non-zero. A non-zero return
- * generally means there are no available tlabels. If this is called out
- * of interrupt or atomic context, then it will sleep until can return a
- * tlabel or a signal is received.
- */
-int hpsb_get_tlabel(struct hpsb_packet *packet)
-{
- if (irqs_disabled() || in_atomic())
- return hpsb_get_tlabel_atomic(packet);
-
- /* NB: The macro wait_event_interruptible() is called with a condition
- * argument with side effect. This is only possible because the side
- * effect does not occur until the condition became true, and
- * wait_event_interruptible() won't evaluate the condition again after
- * that. */
- return wait_event_interruptible(tlabel_wq,
- !hpsb_get_tlabel_atomic(packet));
-}
-
-/**
- * hpsb_free_tlabel - free an allocated transaction label
- * @packet: packet whose tlabel and tl_pool needs to be cleared
- *
- * Frees the transaction label allocated with hpsb_get_tlabel(). The
- * tlabel has to be freed after the transaction is complete (i.e. response
- * was received for a split transaction or packet was sent for a unified
- * transaction).
- *
- * A tlabel must not be freed twice.
- */
-void hpsb_free_tlabel(struct hpsb_packet *packet)
-{
- unsigned long flags, *tp;
- int tlabel, n = NODEID_TO_NODE(packet->node_id);
-
- if (unlikely(n == ALL_NODES))
- return;
- tp = packet->host->tl_pool[n].map;
- tlabel = packet->tlabel;
- BUG_ON(tlabel > 63 || tlabel < 0);
-
- spin_lock_irqsave(&hpsb_tlabel_lock, flags);
- BUG_ON(!__test_and_clear_bit(tlabel, tp));
- spin_unlock_irqrestore(&hpsb_tlabel_lock, flags);
-
- wake_up_interruptible(&tlabel_wq);
-}
-
-/**
- * hpsb_packet_success - Make sense of the ack and reply codes
- *
- * Make sense of the ack and reply codes and return more convenient error codes:
- * 0 = success. -%EBUSY = node is busy, try again. -%EAGAIN = error which can
- * probably resolved by retry. -%EREMOTEIO = node suffers from an internal
- * error. -%EACCES = this transaction is not allowed on requested address.
- * -%EINVAL = invalid address at node.
- */
-int hpsb_packet_success(struct hpsb_packet *packet)
-{
- switch (packet->ack_code) {
- case ACK_PENDING:
- switch ((packet->header[1] >> 12) & 0xf) {
- case RCODE_COMPLETE:
- return 0;
- case RCODE_CONFLICT_ERROR:
- return -EAGAIN;
- case RCODE_DATA_ERROR:
- return -EREMOTEIO;
- case RCODE_TYPE_ERROR:
- return -EACCES;
- case RCODE_ADDRESS_ERROR:
- return -EINVAL;
- default:
- HPSB_ERR("received reserved rcode %d from node %d",
- (packet->header[1] >> 12) & 0xf,
- packet->node_id);
- return -EAGAIN;
- }
-
- case ACK_BUSY_X:
- case ACK_BUSY_A:
- case ACK_BUSY_B:
- return -EBUSY;
-
- case ACK_TYPE_ERROR:
- return -EACCES;
-
- case ACK_COMPLETE:
- if (packet->tcode == TCODE_WRITEQ
- || packet->tcode == TCODE_WRITEB) {
- return 0;
- } else {
- HPSB_ERR("impossible ack_complete from node %d "
- "(tcode %d)", packet->node_id, packet->tcode);
- return -EAGAIN;
- }
-
- case ACK_DATA_ERROR:
- if (packet->tcode == TCODE_WRITEB
- || packet->tcode == TCODE_LOCK_REQUEST) {
- return -EAGAIN;
- } else {
- HPSB_ERR("impossible ack_data_error from node %d "
- "(tcode %d)", packet->node_id, packet->tcode);
- return -EAGAIN;
- }
-
- case ACK_ADDRESS_ERROR:
- return -EINVAL;
-
- case ACK_TARDY:
- case ACK_CONFLICT_ERROR:
- case ACKX_NONE:
- case ACKX_SEND_ERROR:
- case ACKX_ABORTED:
- case ACKX_TIMEOUT:
- /* error while sending */
- return -EAGAIN;
-
- default:
- HPSB_ERR("got invalid ack %d from node %d (tcode %d)",
- packet->ack_code, packet->node_id, packet->tcode);
- return -EAGAIN;
- }
-}
-
-struct hpsb_packet *hpsb_make_readpacket(struct hpsb_host *host, nodeid_t node,
- u64 addr, size_t length)
-{
- struct hpsb_packet *packet;
-
- if (length == 0)
- return NULL;
-
- packet = hpsb_alloc_packet(length);
- if (!packet)
- return NULL;
-
- packet->host = host;
- packet->node_id = node;
-
- if (hpsb_get_tlabel(packet)) {
- hpsb_free_packet(packet);
- return NULL;
- }
-
- if (length == 4)
- fill_async_readquad(packet, addr);
- else
- fill_async_readblock(packet, addr, length);
-
- return packet;
-}
-
-struct hpsb_packet *hpsb_make_writepacket(struct hpsb_host *host, nodeid_t node,
- u64 addr, quadlet_t * buffer,
- size_t length)
-{
- struct hpsb_packet *packet;
-
- if (length == 0)
- return NULL;
-
- packet = hpsb_alloc_packet(length);
- if (!packet)
- return NULL;
-
- if (length % 4) { /* zero padding bytes */
- packet->data[length >> 2] = 0;
- }
- packet->host = host;
- packet->node_id = node;
-
- if (hpsb_get_tlabel(packet)) {
- hpsb_free_packet(packet);
- return NULL;
- }
-
- if (length == 4) {
- fill_async_writequad(packet, addr, buffer ? *buffer : 0);
- } else {
- fill_async_writeblock(packet, addr, length);
- if (buffer)
- memcpy(packet->data, buffer, length);
- }
-
- return packet;
-}
-
-struct hpsb_packet *hpsb_make_streampacket(struct hpsb_host *host, u8 * buffer,
- int length, int channel, int tag,
- int sync)
-{
- struct hpsb_packet *packet;
-
- if (length == 0)
- return NULL;
-
- packet = hpsb_alloc_packet(length);
- if (!packet)
- return NULL;
-
- if (length % 4) { /* zero padding bytes */
- packet->data[length >> 2] = 0;
- }
- packet->host = host;
-
- /* Because it is too difficult to determine all PHY speeds and link
- * speeds here, we use S100... */
- packet->speed_code = IEEE1394_SPEED_100;
-
- /* ...and prevent hpsb_send_packet() from overriding it. */
- packet->node_id = LOCAL_BUS | ALL_NODES;
-
- if (hpsb_get_tlabel(packet)) {
- hpsb_free_packet(packet);
- return NULL;
- }
-
- fill_async_stream_packet(packet, length, channel, tag, sync);
- if (buffer)
- memcpy(packet->data, buffer, length);
-
- return packet;
-}
-
-struct hpsb_packet *hpsb_make_lockpacket(struct hpsb_host *host, nodeid_t node,
- u64 addr, int extcode,
- quadlet_t * data, quadlet_t arg)
-{
- struct hpsb_packet *p;
- u32 length;
-
- p = hpsb_alloc_packet(8);
- if (!p)
- return NULL;
-
- p->host = host;
- p->node_id = node;
- if (hpsb_get_tlabel(p)) {
- hpsb_free_packet(p);
- return NULL;
- }
-
- switch (extcode) {
- case EXTCODE_FETCH_ADD:
- case EXTCODE_LITTLE_ADD:
- length = 4;
- if (data)
- p->data[0] = *data;
- break;
- default:
- length = 8;
- if (data) {
- p->data[0] = arg;
- p->data[1] = *data;
- }
- break;
- }
- fill_async_lock(p, addr, extcode, length);
-
- return p;
-}
-
-struct hpsb_packet *hpsb_make_lock64packet(struct hpsb_host *host,
- nodeid_t node, u64 addr, int extcode,
- octlet_t * data, octlet_t arg)
-{
- struct hpsb_packet *p;
- u32 length;
-
- p = hpsb_alloc_packet(16);
- if (!p)
- return NULL;
-
- p->host = host;
- p->node_id = node;
- if (hpsb_get_tlabel(p)) {
- hpsb_free_packet(p);
- return NULL;
- }
-
- switch (extcode) {
- case EXTCODE_FETCH_ADD:
- case EXTCODE_LITTLE_ADD:
- length = 8;
- if (data) {
- p->data[0] = *data >> 32;
- p->data[1] = *data & 0xffffffff;
- }
- break;
- default:
- length = 16;
- if (data) {
- p->data[0] = arg >> 32;
- p->data[1] = arg & 0xffffffff;
- p->data[2] = *data >> 32;
- p->data[3] = *data & 0xffffffff;
- }
- break;
- }
- fill_async_lock(p, addr, extcode, length);
-
- return p;
-}
-
-struct hpsb_packet *hpsb_make_phypacket(struct hpsb_host *host, quadlet_t data)
-{
- struct hpsb_packet *p;
-
- p = hpsb_alloc_packet(0);
- if (!p)
- return NULL;
-
- p->host = host;
- fill_phy_packet(p, data);
-
- return p;
-}
-
-/*
- * FIXME - these functions should probably read from / write to user space to
- * avoid in kernel buffers for user space callers
- */
-
-/**
- * hpsb_read - generic read function
- *
- * Recognizes the local node ID and act accordingly. Automatically uses a
- * quadlet read request if @length == 4 and and a block read request otherwise.
- * It does not yet support lengths that are not a multiple of 4.
- *
- * You must explicitly specifiy the @generation for which the node ID is valid,
- * to avoid sending packets to the wrong nodes when we race with a bus reset.
- */
-int hpsb_read(struct hpsb_host *host, nodeid_t node, unsigned int generation,
- u64 addr, quadlet_t * buffer, size_t length)
-{
- struct hpsb_packet *packet;
- int retval = 0;
-
- if (length == 0)
- return -EINVAL;
-
- packet = hpsb_make_readpacket(host, node, addr, length);
-
- if (!packet) {
- return -ENOMEM;
- }
-
- packet->generation = generation;
- retval = hpsb_send_packet_and_wait(packet);
- if (retval < 0)
- goto hpsb_read_fail;
-
- retval = hpsb_packet_success(packet);
-
- if (retval == 0) {
- if (length == 4) {
- *buffer = packet->header[3];
- } else {
- memcpy(buffer, packet->data, length);
- }
- }
-
- hpsb_read_fail:
- hpsb_free_tlabel(packet);
- hpsb_free_packet(packet);
-
- return retval;
-}
-
-/**
- * hpsb_write - generic write function
- *
- * Recognizes the local node ID and act accordingly. Automatically uses a
- * quadlet write request if @length == 4 and and a block write request
- * otherwise. It does not yet support lengths that are not a multiple of 4.
- *
- * You must explicitly specifiy the @generation for which the node ID is valid,
- * to avoid sending packets to the wrong nodes when we race with a bus reset.
- */
-int hpsb_write(struct hpsb_host *host, nodeid_t node, unsigned int generation,
- u64 addr, quadlet_t * buffer, size_t length)
-{
- struct hpsb_packet *packet;
- int retval;
-
- if (length == 0)
- return -EINVAL;
-
- packet = hpsb_make_writepacket(host, node, addr, buffer, length);
-
- if (!packet)
- return -ENOMEM;
-
- packet->generation = generation;
- retval = hpsb_send_packet_and_wait(packet);
- if (retval < 0)
- goto hpsb_write_fail;
-
- retval = hpsb_packet_success(packet);
-
- hpsb_write_fail:
- hpsb_free_tlabel(packet);
- hpsb_free_packet(packet);
-
- return retval;
-}
-
-int hpsb_lock(struct hpsb_host *host, nodeid_t node, unsigned int generation,
- u64 addr, int extcode, quadlet_t *data, quadlet_t arg)
-{
- struct hpsb_packet *packet;
- int retval = 0;
-
- packet = hpsb_make_lockpacket(host, node, addr, extcode, data, arg);
- if (!packet)
- return -ENOMEM;
-
- packet->generation = generation;
- retval = hpsb_send_packet_and_wait(packet);
- if (retval < 0)
- goto hpsb_lock_fail;
-
- retval = hpsb_packet_success(packet);
-
- if (retval == 0)
- *data = packet->data[0];
-
-hpsb_lock_fail:
- hpsb_free_tlabel(packet);
- hpsb_free_packet(packet);
-
- return retval;
-}
diff --git a/drivers/ieee1394/ieee1394_transactions.h b/drivers/ieee1394/ieee1394_transactions.h
deleted file mode 100644
index 20b693be14b2..000000000000
--- a/drivers/ieee1394/ieee1394_transactions.h
+++ /dev/null
@@ -1,40 +0,0 @@
-#ifndef _IEEE1394_TRANSACTIONS_H
-#define _IEEE1394_TRANSACTIONS_H
-
-#include <linux/types.h>
-
-#include "ieee1394_types.h"
-
-struct hpsb_packet;
-struct hpsb_host;
-
-int hpsb_get_tlabel(struct hpsb_packet *packet);
-void hpsb_free_tlabel(struct hpsb_packet *packet);
-struct hpsb_packet *hpsb_make_readpacket(struct hpsb_host *host, nodeid_t node,
- u64 addr, size_t length);
-struct hpsb_packet *hpsb_make_lockpacket(struct hpsb_host *host, nodeid_t node,
- u64 addr, int extcode, quadlet_t *data,
- quadlet_t arg);
-struct hpsb_packet *hpsb_make_lock64packet(struct hpsb_host *host,
- nodeid_t node, u64 addr, int extcode,
- octlet_t *data, octlet_t arg);
-struct hpsb_packet *hpsb_make_phypacket(struct hpsb_host *host, quadlet_t data);
-struct hpsb_packet *hpsb_make_writepacket(struct hpsb_host *host,
- nodeid_t node, u64 addr,
- quadlet_t *buffer, size_t length);
-struct hpsb_packet *hpsb_make_streampacket(struct hpsb_host *host, u8 *buffer,
- int length, int channel, int tag,
- int sync);
-int hpsb_packet_success(struct hpsb_packet *packet);
-int hpsb_read(struct hpsb_host *host, nodeid_t node, unsigned int generation,
- u64 addr, quadlet_t *buffer, size_t length);
-int hpsb_write(struct hpsb_host *host, nodeid_t node, unsigned int generation,
- u64 addr, quadlet_t *buffer, size_t length);
-int hpsb_lock(struct hpsb_host *host, nodeid_t node, unsigned int generation,
- u64 addr, int extcode, quadlet_t *data, quadlet_t arg);
-
-#ifdef HPSB_DEBUG_TLABELS
-extern spinlock_t hpsb_tlabel_lock;
-#endif
-
-#endif /* _IEEE1394_TRANSACTIONS_H */
diff --git a/drivers/ieee1394/ieee1394_types.h b/drivers/ieee1394/ieee1394_types.h
deleted file mode 100644
index 9803aaa15be0..000000000000
--- a/drivers/ieee1394/ieee1394_types.h
+++ /dev/null
@@ -1,69 +0,0 @@
-#ifndef _IEEE1394_TYPES_H
-#define _IEEE1394_TYPES_H
-
-#include <linux/kernel.h>
-#include <linux/string.h>
-#include <linux/types.h>
-#include <asm/byteorder.h>
-
-typedef u32 quadlet_t;
-typedef u64 octlet_t;
-typedef u16 nodeid_t;
-
-typedef u8 byte_t;
-typedef u64 nodeaddr_t;
-typedef u16 arm_length_t;
-
-#define BUS_MASK 0xffc0
-#define BUS_SHIFT 6
-#define NODE_MASK 0x003f
-#define LOCAL_BUS 0xffc0
-#define ALL_NODES 0x003f
-
-#define NODEID_TO_BUS(nodeid) ((nodeid & BUS_MASK) >> BUS_SHIFT)
-#define NODEID_TO_NODE(nodeid) (nodeid & NODE_MASK)
-
-/* Can be used to consistently print a node/bus ID. */
-#define NODE_BUS_FMT "%d-%02d:%04d"
-#define NODE_BUS_ARGS(__host, __nodeid) \
- __host->id, NODEID_TO_NODE(__nodeid), NODEID_TO_BUS(__nodeid)
-
-#define HPSB_PRINT(level, fmt, args...) \
- printk(level "ieee1394: " fmt "\n" , ## args)
-
-#define HPSB_DEBUG(fmt, args...) HPSB_PRINT(KERN_DEBUG, fmt , ## args)
-#define HPSB_INFO(fmt, args...) HPSB_PRINT(KERN_INFO, fmt , ## args)
-#define HPSB_NOTICE(fmt, args...) HPSB_PRINT(KERN_NOTICE, fmt , ## args)
-#define HPSB_WARN(fmt, args...) HPSB_PRINT(KERN_WARNING, fmt , ## args)
-#define HPSB_ERR(fmt, args...) HPSB_PRINT(KERN_ERR, fmt , ## args)
-
-#ifdef CONFIG_IEEE1394_VERBOSEDEBUG
-#define HPSB_VERBOSE(fmt, args...) HPSB_PRINT(KERN_DEBUG, fmt , ## args)
-#define HPSB_DEBUG_TLABELS
-#else
-#define HPSB_VERBOSE(fmt, args...) do {} while (0)
-#endif
-
-#ifdef __BIG_ENDIAN
-
-static inline void *memcpy_le32(u32 *dest, const u32 *__src, size_t count)
-{
- void *tmp = dest;
- u32 *src = (u32 *)__src;
-
- count /= 4;
- while (count--)
- *dest++ = swab32p(src++);
- return tmp;
-}
-
-#else
-
-static __inline__ void *memcpy_le32(u32 *dest, const u32 *src, size_t count)
-{
- return memcpy(dest, src, count);
-}
-
-#endif /* __BIG_ENDIAN */
-
-#endif /* _IEEE1394_TYPES_H */
diff --git a/drivers/ieee1394/iso.c b/drivers/ieee1394/iso.c
deleted file mode 100644
index 1cf6487b65ba..000000000000
--- a/drivers/ieee1394/iso.c
+++ /dev/null
@@ -1,568 +0,0 @@
-/*
- * IEEE 1394 for Linux
- *
- * kernel ISO transmission/reception
- *
- * Copyright (C) 2002 Maas Digital LLC
- *
- * This code is licensed under the GPL. See the file COPYING in the root
- * directory of the kernel sources for details.
- */
-
-#include <linux/pci.h>
-#include <linux/sched.h>
-#include <linux/mm.h>
-#include <linux/slab.h>
-
-#include "hosts.h"
-#include "iso.h"
-
-/**
- * hpsb_iso_stop - stop DMA
- */
-void hpsb_iso_stop(struct hpsb_iso *iso)
-{
- if (!(iso->flags & HPSB_ISO_DRIVER_STARTED))
- return;
-
- iso->host->driver->isoctl(iso, iso->type == HPSB_ISO_XMIT ?
- XMIT_STOP : RECV_STOP, 0);
- iso->flags &= ~HPSB_ISO_DRIVER_STARTED;
-}
-
-/**
- * hpsb_iso_shutdown - deallocate buffer and DMA context
- */
-void hpsb_iso_shutdown(struct hpsb_iso *iso)
-{
- if (iso->flags & HPSB_ISO_DRIVER_INIT) {
- hpsb_iso_stop(iso);
- iso->host->driver->isoctl(iso, iso->type == HPSB_ISO_XMIT ?
- XMIT_SHUTDOWN : RECV_SHUTDOWN, 0);
- iso->flags &= ~HPSB_ISO_DRIVER_INIT;
- }
-
- dma_region_free(&iso->data_buf);
- kfree(iso);
-}
-
-static struct hpsb_iso *hpsb_iso_common_init(struct hpsb_host *host,
- enum hpsb_iso_type type,
- unsigned int data_buf_size,
- unsigned int buf_packets,
- int channel, int dma_mode,
- int irq_interval,
- void (*callback) (struct hpsb_iso
- *))
-{
- struct hpsb_iso *iso;
- int dma_direction;
-
- /* make sure driver supports the ISO API */
- if (!host->driver->isoctl) {
- printk(KERN_INFO
- "ieee1394: host driver '%s' does not support the rawiso API\n",
- host->driver->name);
- return NULL;
- }
-
- /* sanitize parameters */
-
- if (buf_packets < 2)
- buf_packets = 2;
-
- if ((dma_mode < HPSB_ISO_DMA_DEFAULT)
- || (dma_mode > HPSB_ISO_DMA_PACKET_PER_BUFFER))
- dma_mode = HPSB_ISO_DMA_DEFAULT;
-
- if ((irq_interval < 0) || (irq_interval > buf_packets / 4))
- irq_interval = buf_packets / 4;
- if (irq_interval == 0) /* really interrupt for each packet */
- irq_interval = 1;
-
- if (channel < -1 || channel >= 64)
- return NULL;
-
- /* channel = -1 is OK for multi-channel recv but not for xmit */
- if (type == HPSB_ISO_XMIT && channel < 0)
- return NULL;
-
- /* allocate and write the struct hpsb_iso */
-
- iso =
- kmalloc(sizeof(*iso) +
- buf_packets * sizeof(struct hpsb_iso_packet_info),
- GFP_KERNEL);
- if (!iso)
- return NULL;
-
- iso->infos = (struct hpsb_iso_packet_info *)(iso + 1);
-
- iso->type = type;
- iso->host = host;
- iso->hostdata = NULL;
- iso->callback = callback;
- init_waitqueue_head(&iso->waitq);
- iso->channel = channel;
- iso->irq_interval = irq_interval;
- iso->dma_mode = dma_mode;
- dma_region_init(&iso->data_buf);
- iso->buf_size = PAGE_ALIGN(data_buf_size);
- iso->buf_packets = buf_packets;
- iso->pkt_dma = 0;
- iso->first_packet = 0;
- spin_lock_init(&iso->lock);
-
- if (iso->type == HPSB_ISO_XMIT) {
- iso->n_ready_packets = iso->buf_packets;
- dma_direction = PCI_DMA_TODEVICE;
- } else {
- iso->n_ready_packets = 0;
- dma_direction = PCI_DMA_FROMDEVICE;
- }
-
- atomic_set(&iso->overflows, 0);
- iso->bytes_discarded = 0;
- iso->flags = 0;
- iso->prebuffer = 0;
-
- /* allocate the packet buffer */
- if (dma_region_alloc
- (&iso->data_buf, iso->buf_size, host->pdev, dma_direction))
- goto err;
-
- return iso;
-
- err:
- hpsb_iso_shutdown(iso);
- return NULL;
-}
-
-/**
- * hpsb_iso_n_ready - returns number of packets ready to send or receive
- */
-int hpsb_iso_n_ready(struct hpsb_iso *iso)
-{
- unsigned long flags;
- int val;
-
- spin_lock_irqsave(&iso->lock, flags);
- val = iso->n_ready_packets;
- spin_unlock_irqrestore(&iso->lock, flags);
-
- return val;
-}
-
-/**
- * hpsb_iso_xmit_init - allocate the buffer and DMA context
- */
-struct hpsb_iso *hpsb_iso_xmit_init(struct hpsb_host *host,
- unsigned int data_buf_size,
- unsigned int buf_packets,
- int channel,
- int speed,
- int irq_interval,
- void (*callback) (struct hpsb_iso *))
-{
- struct hpsb_iso *iso = hpsb_iso_common_init(host, HPSB_ISO_XMIT,
- data_buf_size, buf_packets,
- channel,
- HPSB_ISO_DMA_DEFAULT,
- irq_interval, callback);
- if (!iso)
- return NULL;
-
- iso->speed = speed;
-
- /* tell the driver to start working */
- if (host->driver->isoctl(iso, XMIT_INIT, 0))
- goto err;
-
- iso->flags |= HPSB_ISO_DRIVER_INIT;
- return iso;
-
- err:
- hpsb_iso_shutdown(iso);
- return NULL;
-}
-
-/**
- * hpsb_iso_recv_init - allocate the buffer and DMA context
- *
- * Note, if channel = -1, multi-channel receive is enabled.
- */
-struct hpsb_iso *hpsb_iso_recv_init(struct hpsb_host *host,
- unsigned int data_buf_size,
- unsigned int buf_packets,
- int channel,
- int dma_mode,
- int irq_interval,
- void (*callback) (struct hpsb_iso *))
-{
- struct hpsb_iso *iso = hpsb_iso_common_init(host, HPSB_ISO_RECV,
- data_buf_size, buf_packets,
- channel, dma_mode,
- irq_interval, callback);
- if (!iso)
- return NULL;
-
- /* tell the driver to start working */
- if (host->driver->isoctl(iso, RECV_INIT, 0))
- goto err;
-
- iso->flags |= HPSB_ISO_DRIVER_INIT;
- return iso;
-
- err:
- hpsb_iso_shutdown(iso);
- return NULL;
-}
-
-/**
- * hpsb_iso_recv_listen_channel
- *
- * multi-channel only
- */
-int hpsb_iso_recv_listen_channel(struct hpsb_iso *iso, unsigned char channel)
-{
- if (iso->type != HPSB_ISO_RECV || iso->channel != -1 || channel >= 64)
- return -EINVAL;
- return iso->host->driver->isoctl(iso, RECV_LISTEN_CHANNEL, channel);
-}
-
-/**
- * hpsb_iso_recv_unlisten_channel
- *
- * multi-channel only
- */
-int hpsb_iso_recv_unlisten_channel(struct hpsb_iso *iso, unsigned char channel)
-{
- if (iso->type != HPSB_ISO_RECV || iso->channel != -1 || channel >= 64)
- return -EINVAL;
- return iso->host->driver->isoctl(iso, RECV_UNLISTEN_CHANNEL, channel);
-}
-
-/**
- * hpsb_iso_recv_set_channel_mask
- *
- * multi-channel only
- */
-int hpsb_iso_recv_set_channel_mask(struct hpsb_iso *iso, u64 mask)
-{
- if (iso->type != HPSB_ISO_RECV || iso->channel != -1)
- return -EINVAL;
- return iso->host->driver->isoctl(iso, RECV_SET_CHANNEL_MASK,
- (unsigned long)&mask);
-}
-
-/**
- * hpsb_iso_recv_flush - check for arrival of new packets
- *
- * check for arrival of new packets immediately (even if irq_interval
- * has not yet been reached)
- */
-int hpsb_iso_recv_flush(struct hpsb_iso *iso)
-{
- if (iso->type != HPSB_ISO_RECV)
- return -EINVAL;
- return iso->host->driver->isoctl(iso, RECV_FLUSH, 0);
-}
-
-static int do_iso_xmit_start(struct hpsb_iso *iso, int cycle)
-{
- int retval = iso->host->driver->isoctl(iso, XMIT_START, cycle);
- if (retval)
- return retval;
-
- iso->flags |= HPSB_ISO_DRIVER_STARTED;
- return retval;
-}
-
-/**
- * hpsb_iso_xmit_start - start DMA
- */
-int hpsb_iso_xmit_start(struct hpsb_iso *iso, int cycle, int prebuffer)
-{
- if (iso->type != HPSB_ISO_XMIT)
- return -1;
-
- if (iso->flags & HPSB_ISO_DRIVER_STARTED)
- return 0;
-
- if (cycle < -1)
- cycle = -1;
- else if (cycle >= 8000)
- cycle %= 8000;
-
- iso->xmit_cycle = cycle;
-
- if (prebuffer < 0)
- prebuffer = iso->buf_packets - 1;
- else if (prebuffer == 0)
- prebuffer = 1;
-
- if (prebuffer >= iso->buf_packets)
- prebuffer = iso->buf_packets - 1;
-
- iso->prebuffer = prebuffer;
-
- /* remember the starting cycle; DMA will commence from xmit_queue_packets()
- once enough packets have been buffered */
- iso->start_cycle = cycle;
-
- return 0;
-}
-
-/**
- * hpsb_iso_recv_start - start DMA
- */
-int hpsb_iso_recv_start(struct hpsb_iso *iso, int cycle, int tag_mask, int sync)
-{
- int retval = 0;
- int isoctl_args[3];
-
- if (iso->type != HPSB_ISO_RECV)
- return -1;
-
- if (iso->flags & HPSB_ISO_DRIVER_STARTED)
- return 0;
-
- if (cycle < -1)
- cycle = -1;
- else if (cycle >= 8000)
- cycle %= 8000;
-
- isoctl_args[0] = cycle;
-
- if (tag_mask < 0)
- /* match all tags */
- tag_mask = 0xF;
- isoctl_args[1] = tag_mask;
-
- isoctl_args[2] = sync;
-
- retval =
- iso->host->driver->isoctl(iso, RECV_START,
- (unsigned long)&isoctl_args[0]);
- if (retval)
- return retval;
-
- iso->flags |= HPSB_ISO_DRIVER_STARTED;
- return retval;
-}
-
-/* check to make sure the user has not supplied bogus values of offset/len
- * that would cause the kernel to access memory outside the buffer */
-static int hpsb_iso_check_offset_len(struct hpsb_iso *iso,
- unsigned int offset, unsigned short len,
- unsigned int *out_offset,
- unsigned short *out_len)
-{
- if (offset >= iso->buf_size)
- return -EFAULT;
-
- /* make sure the packet does not go beyond the end of the buffer */
- if (offset + len > iso->buf_size)
- return -EFAULT;
-
- /* check for wrap-around */
- if (offset + len < offset)
- return -EFAULT;
-
- /* now we can trust 'offset' and 'length' */
- *out_offset = offset;
- *out_len = len;
-
- return 0;
-}
-
-/**
- * hpsb_iso_xmit_queue_packet - queue a packet for transmission.
- *
- * @offset is relative to the beginning of the DMA buffer, where the packet's
- * data payload should already have been placed.
- */
-int hpsb_iso_xmit_queue_packet(struct hpsb_iso *iso, u32 offset, u16 len,
- u8 tag, u8 sy)
-{
- struct hpsb_iso_packet_info *info;
- unsigned long flags;
- int rv;
-
- if (iso->type != HPSB_ISO_XMIT)
- return -EINVAL;
-
- /* is there space in the buffer? */
- if (iso->n_ready_packets <= 0) {
- return -EBUSY;
- }
-
- info = &iso->infos[iso->first_packet];
-
- /* check for bogus offset/length */
- if (hpsb_iso_check_offset_len
- (iso, offset, len, &info->offset, &info->len))
- return -EFAULT;
-
- info->tag = tag;
- info->sy = sy;
-
- spin_lock_irqsave(&iso->lock, flags);
-
- rv = iso->host->driver->isoctl(iso, XMIT_QUEUE, (unsigned long)info);
- if (rv)
- goto out;
-
- /* increment cursors */
- iso->first_packet = (iso->first_packet + 1) % iso->buf_packets;
- iso->xmit_cycle = (iso->xmit_cycle + 1) % 8000;
- iso->n_ready_packets--;
-
- if (iso->prebuffer != 0) {
- iso->prebuffer--;
- if (iso->prebuffer <= 0) {
- iso->prebuffer = 0;
- rv = do_iso_xmit_start(iso, iso->start_cycle);
- }
- }
-
- out:
- spin_unlock_irqrestore(&iso->lock, flags);
- return rv;
-}
-
-/**
- * hpsb_iso_xmit_sync - wait until all queued packets have been transmitted
- */
-int hpsb_iso_xmit_sync(struct hpsb_iso *iso)
-{
- if (iso->type != HPSB_ISO_XMIT)
- return -EINVAL;
-
- return wait_event_interruptible(iso->waitq,
- hpsb_iso_n_ready(iso) ==
- iso->buf_packets);
-}
-
-/**
- * hpsb_iso_packet_sent
- *
- * Available to low-level drivers.
- *
- * Call after a packet has been transmitted to the bus (interrupt context is
- * OK). @cycle is the _exact_ cycle the packet was sent on. @error should be
- * non-zero if some sort of error occurred when sending the packet.
- */
-void hpsb_iso_packet_sent(struct hpsb_iso *iso, int cycle, int error)
-{
- unsigned long flags;
- spin_lock_irqsave(&iso->lock, flags);
-
- /* predict the cycle of the next packet to be queued */
-
- /* jump ahead by the number of packets that are already buffered */
- cycle += iso->buf_packets - iso->n_ready_packets;
- cycle %= 8000;
-
- iso->xmit_cycle = cycle;
- iso->n_ready_packets++;
- iso->pkt_dma = (iso->pkt_dma + 1) % iso->buf_packets;
-
- if (iso->n_ready_packets == iso->buf_packets || error != 0) {
- /* the buffer has run empty! */
- atomic_inc(&iso->overflows);
- }
-
- spin_unlock_irqrestore(&iso->lock, flags);
-}
-
-/**
- * hpsb_iso_packet_received
- *
- * Available to low-level drivers.
- *
- * Call after a packet has been received (interrupt context is OK).
- */
-void hpsb_iso_packet_received(struct hpsb_iso *iso, u32 offset, u16 len,
- u16 total_len, u16 cycle, u8 channel, u8 tag,
- u8 sy)
-{
- unsigned long flags;
- spin_lock_irqsave(&iso->lock, flags);
-
- if (iso->n_ready_packets == iso->buf_packets) {
- /* overflow! */
- atomic_inc(&iso->overflows);
- /* Record size of this discarded packet */
- iso->bytes_discarded += total_len;
- } else {
- struct hpsb_iso_packet_info *info = &iso->infos[iso->pkt_dma];
- info->offset = offset;
- info->len = len;
- info->total_len = total_len;
- info->cycle = cycle;
- info->channel = channel;
- info->tag = tag;
- info->sy = sy;
-
- iso->pkt_dma = (iso->pkt_dma + 1) % iso->buf_packets;
- iso->n_ready_packets++;
- }
-
- spin_unlock_irqrestore(&iso->lock, flags);
-}
-
-/**
- * hpsb_iso_recv_release_packets - release packets, reuse buffer
- *
- * @n_packets have been read out of the buffer, re-use the buffer space
- */
-int hpsb_iso_recv_release_packets(struct hpsb_iso *iso, unsigned int n_packets)
-{
- unsigned long flags;
- unsigned int i;
- int rv = 0;
-
- if (iso->type != HPSB_ISO_RECV)
- return -1;
-
- spin_lock_irqsave(&iso->lock, flags);
- for (i = 0; i < n_packets; i++) {
- rv = iso->host->driver->isoctl(iso, RECV_RELEASE,
- (unsigned long)&iso->infos[iso->
- first_packet]);
- if (rv)
- break;
-
- iso->first_packet = (iso->first_packet + 1) % iso->buf_packets;
- iso->n_ready_packets--;
-
- /* release memory from packets discarded when queue was full */
- if (iso->n_ready_packets == 0) { /* Release only after all prior packets handled */
- if (iso->bytes_discarded != 0) {
- struct hpsb_iso_packet_info inf;
- inf.total_len = iso->bytes_discarded;
- iso->host->driver->isoctl(iso, RECV_RELEASE,
- (unsigned long)&inf);
- iso->bytes_discarded = 0;
- }
- }
- }
- spin_unlock_irqrestore(&iso->lock, flags);
- return rv;
-}
-
-/**
- * hpsb_iso_wake
- *
- * Available to low-level drivers.
- *
- * Call to wake waiting processes after buffer space has opened up.
- */
-void hpsb_iso_wake(struct hpsb_iso *iso)
-{
- wake_up_interruptible(&iso->waitq);
-
- if (iso->callback)
- iso->callback(iso);
-}
diff --git a/drivers/ieee1394/iso.h b/drivers/ieee1394/iso.h
deleted file mode 100644
index c2089c093aa7..000000000000
--- a/drivers/ieee1394/iso.h
+++ /dev/null
@@ -1,195 +0,0 @@
-/*
- * IEEE 1394 for Linux
- *
- * kernel ISO transmission/reception
- *
- * Copyright (C) 2002 Maas Digital LLC
- *
- * This code is licensed under the GPL. See the file COPYING in the root
- * directory of the kernel sources for details.
- */
-
-#ifndef IEEE1394_ISO_H
-#define IEEE1394_ISO_H
-
-#include <linux/spinlock_types.h>
-#include <linux/wait.h>
-#include <asm/atomic.h>
-#include <asm/types.h>
-
-#include "dma.h"
-
-struct hpsb_host;
-
-/* high-level ISO interface */
-
-/*
- * This API sends and receives isochronous packets on a large,
- * virtually-contiguous kernel memory buffer. The buffer may be mapped
- * into a user-space process for zero-copy transmission and reception.
- *
- * There are no explicit boundaries between packets in the buffer. A
- * packet may be transmitted or received at any location. However,
- * low-level drivers may impose certain restrictions on alignment or
- * size of packets. (e.g. in OHCI no packet may cross a page boundary,
- * and packets should be quadlet-aligned)
- */
-
-/* Packet descriptor - the API maintains a ring buffer of these packet
- * descriptors in kernel memory (hpsb_iso.infos[]). */
-struct hpsb_iso_packet_info {
- /* offset of data payload relative to the first byte of the buffer */
- __u32 offset;
-
- /* length of the data payload, in bytes (not including the isochronous
- * header) */
- __u16 len;
-
- /* (recv only) the cycle number (mod 8000) on which the packet was
- * received */
- __u16 cycle;
-
- /* (recv only) channel on which the packet was received */
- __u8 channel;
-
- /* 2-bit 'tag' and 4-bit 'sy' fields of the isochronous header */
- __u8 tag;
- __u8 sy;
-
- /* length in bytes of the packet including header/trailer.
- * MUST be at structure end, since the first part of this structure is
- * also defined in raw1394.h (i.e. struct raw1394_iso_packet_info), is
- * copied to userspace and is accessed there through libraw1394. */
- __u16 total_len;
-};
-
-enum hpsb_iso_type { HPSB_ISO_RECV = 0, HPSB_ISO_XMIT = 1 };
-
-/* The mode of the dma when receiving iso data. Must be supported by chip */
-enum raw1394_iso_dma_recv_mode {
- HPSB_ISO_DMA_DEFAULT = -1,
- HPSB_ISO_DMA_OLD_ABI = 0,
- HPSB_ISO_DMA_BUFFERFILL = 1,
- HPSB_ISO_DMA_PACKET_PER_BUFFER = 2
-};
-
-struct hpsb_iso {
- enum hpsb_iso_type type;
-
- /* pointer to low-level driver and its private data */
- struct hpsb_host *host;
- void *hostdata;
-
- /* a function to be called (from interrupt context) after
- * outgoing packets have been sent, or incoming packets have
- * arrived */
- void (*callback)(struct hpsb_iso*);
-
- /* wait for buffer space */
- wait_queue_head_t waitq;
-
- int speed; /* IEEE1394_SPEED_100, 200, or 400 */
- int channel; /* -1 if multichannel */
- int dma_mode; /* dma receive mode */
-
-
- /* greatest # of packets between interrupts - controls
- * the maximum latency of the buffer */
- int irq_interval;
-
- /* the buffer for packet data payloads */
- struct dma_region data_buf;
-
- /* size of data_buf, in bytes (always a multiple of PAGE_SIZE) */
- unsigned int buf_size;
-
- /* # of packets in the ringbuffer */
- unsigned int buf_packets;
-
- /* protects packet cursors */
- spinlock_t lock;
-
- /* the index of the next packet that will be produced
- or consumed by the user */
- int first_packet;
-
- /* the index of the next packet that will be transmitted
- or received by the 1394 hardware */
- int pkt_dma;
-
- /* how many packets, starting at first_packet:
- * (transmit) are ready to be filled with data
- * (receive) contain received data */
- int n_ready_packets;
-
- /* how many times the buffer has overflowed or underflowed */
- atomic_t overflows;
- /* how many cycles were skipped for a given context */
- atomic_t skips;
-
- /* Current number of bytes lost in discarded packets */
- int bytes_discarded;
-
- /* private flags to track initialization progress */
-#define HPSB_ISO_DRIVER_INIT (1<<0)
-#define HPSB_ISO_DRIVER_STARTED (1<<1)
- unsigned int flags;
-
- /* # of packets left to prebuffer (xmit only) */
- int prebuffer;
-
- /* starting cycle for DMA (xmit only) */
- int start_cycle;
-
- /* cycle at which next packet will be transmitted,
- * -1 if not known */
- int xmit_cycle;
-
- /* ringbuffer of packet descriptors in regular kernel memory
- * XXX Keep this last, since we use over-allocated memory from
- * this entry to fill this field. */
- struct hpsb_iso_packet_info *infos;
-};
-
-/* functions available to high-level drivers (e.g. raw1394) */
-
-struct hpsb_iso* hpsb_iso_xmit_init(struct hpsb_host *host,
- unsigned int data_buf_size,
- unsigned int buf_packets,
- int channel,
- int speed,
- int irq_interval,
- void (*callback)(struct hpsb_iso*));
-struct hpsb_iso* hpsb_iso_recv_init(struct hpsb_host *host,
- unsigned int data_buf_size,
- unsigned int buf_packets,
- int channel,
- int dma_mode,
- int irq_interval,
- void (*callback)(struct hpsb_iso*));
-int hpsb_iso_recv_listen_channel(struct hpsb_iso *iso, unsigned char channel);
-int hpsb_iso_recv_unlisten_channel(struct hpsb_iso *iso, unsigned char channel);
-int hpsb_iso_recv_set_channel_mask(struct hpsb_iso *iso, u64 mask);
-int hpsb_iso_xmit_start(struct hpsb_iso *iso, int start_on_cycle,
- int prebuffer);
-int hpsb_iso_recv_start(struct hpsb_iso *iso, int start_on_cycle,
- int tag_mask, int sync);
-void hpsb_iso_stop(struct hpsb_iso *iso);
-void hpsb_iso_shutdown(struct hpsb_iso *iso);
-int hpsb_iso_xmit_queue_packet(struct hpsb_iso *iso, u32 offset, u16 len,
- u8 tag, u8 sy);
-int hpsb_iso_xmit_sync(struct hpsb_iso *iso);
-int hpsb_iso_recv_release_packets(struct hpsb_iso *recv,
- unsigned int n_packets);
-int hpsb_iso_recv_flush(struct hpsb_iso *iso);
-int hpsb_iso_n_ready(struct hpsb_iso *iso);
-
-/* the following are callbacks available to low-level drivers */
-
-void hpsb_iso_packet_sent(struct hpsb_iso *iso, int cycle, int error);
-void hpsb_iso_packet_received(struct hpsb_iso *iso, u32 offset, u16 len,
- u16 total_len, u16 cycle, u8 channel, u8 tag,
- u8 sy);
-void hpsb_iso_wake(struct hpsb_iso *iso);
-
-#endif /* IEEE1394_ISO_H */
diff --git a/drivers/ieee1394/nodemgr.c b/drivers/ieee1394/nodemgr.c
deleted file mode 100644
index 18350213479e..000000000000
--- a/drivers/ieee1394/nodemgr.c
+++ /dev/null
@@ -1,1901 +0,0 @@
-/*
- * Node information (ConfigROM) collection and management.
- *
- * Copyright (C) 2000 Andreas E. Bombe
- * 2001-2003 Ben Collins <bcollins@debian.net>
- *
- * This code is licensed under the GPL. See the file COPYING in the root
- * directory of the kernel sources for details.
- */
-
-#include <linux/bitmap.h>
-#include <linux/kernel.h>
-#include <linux/kmemcheck.h>
-#include <linux/list.h>
-#include <linux/slab.h>
-#include <linux/delay.h>
-#include <linux/kthread.h>
-#include <linux/module.h>
-#include <linux/moduleparam.h>
-#include <linux/mutex.h>
-#include <linux/freezer.h>
-#include <asm/atomic.h>
-
-#include "csr.h"
-#include "highlevel.h"
-#include "hosts.h"
-#include "ieee1394.h"
-#include "ieee1394_core.h"
-#include "ieee1394_hotplug.h"
-#include "ieee1394_types.h"
-#include "ieee1394_transactions.h"
-#include "nodemgr.h"
-
-static int ignore_drivers;
-module_param(ignore_drivers, int, S_IRUGO | S_IWUSR);
-MODULE_PARM_DESC(ignore_drivers, "Disable automatic probing for drivers.");
-
-struct nodemgr_csr_info {
- struct hpsb_host *host;
- nodeid_t nodeid;
- unsigned int generation;
-
- kmemcheck_bitfield_begin(flags);
- unsigned int speed_unverified:1;
- kmemcheck_bitfield_end(flags);
-};
-
-
-/*
- * Correct the speed map entry. This is necessary
- * - for nodes with link speed < phy speed,
- * - for 1394b nodes with negotiated phy port speed < IEEE1394_SPEED_MAX.
- * A possible speed is determined by trial and error, using quadlet reads.
- */
-static int nodemgr_check_speed(struct nodemgr_csr_info *ci, u64 addr,
- quadlet_t *buffer)
-{
- quadlet_t q;
- u8 i, *speed, old_speed, good_speed;
- int error;
-
- speed = &(ci->host->speed[NODEID_TO_NODE(ci->nodeid)]);
- old_speed = *speed;
- good_speed = IEEE1394_SPEED_MAX + 1;
-
- /* Try every speed from S100 to old_speed.
- * If we did it the other way around, a too low speed could be caught
- * if the retry succeeded for some other reason, e.g. because the link
- * just finished its initialization. */
- for (i = IEEE1394_SPEED_100; i <= old_speed; i++) {
- *speed = i;
- error = hpsb_read(ci->host, ci->nodeid, ci->generation, addr,
- &q, 4);
- if (error)
- break;
- *buffer = q;
- good_speed = i;
- }
- if (good_speed <= IEEE1394_SPEED_MAX) {
- HPSB_DEBUG("Speed probe of node " NODE_BUS_FMT " yields %s",
- NODE_BUS_ARGS(ci->host, ci->nodeid),
- hpsb_speedto_str[good_speed]);
- *speed = good_speed;
- ci->speed_unverified = 0;
- return 0;
- }
- *speed = old_speed;
- return error;
-}
-
-static int nodemgr_bus_read(struct csr1212_csr *csr, u64 addr,
- void *buffer, void *__ci)
-{
- struct nodemgr_csr_info *ci = (struct nodemgr_csr_info*)__ci;
- int i, error;
-
- for (i = 1; ; i++) {
- error = hpsb_read(ci->host, ci->nodeid, ci->generation, addr,
- buffer, 4);
- if (!error) {
- ci->speed_unverified = 0;
- break;
- }
- /* Give up after 3rd failure. */
- if (i == 3)
- break;
-
- /* The ieee1394_core guessed the node's speed capability from
- * the self ID. Check whether a lower speed works. */
- if (ci->speed_unverified) {
- error = nodemgr_check_speed(ci, addr, buffer);
- if (!error)
- break;
- }
- if (msleep_interruptible(334))
- return -EINTR;
- }
- return error;
-}
-
-static struct csr1212_bus_ops nodemgr_csr_ops = {
- .bus_read = nodemgr_bus_read,
-};
-
-
-/*
- * Basically what we do here is start off retrieving the bus_info block.
- * From there will fill in some info about the node, verify it is of IEEE
- * 1394 type, and that the crc checks out ok. After that we start off with
- * the root directory, and subdirectories. To do this, we retrieve the
- * quadlet header for a directory, find out the length, and retrieve the
- * complete directory entry (be it a leaf or a directory). We then process
- * it and add the info to our structure for that particular node.
- *
- * We verify CRC's along the way for each directory/block/leaf. The entire
- * node structure is generic, and simply stores the information in a way
- * that's easy to parse by the protocol interface.
- */
-
-/*
- * The nodemgr relies heavily on the Driver Model for device callbacks and
- * driver/device mappings. The old nodemgr used to handle all this itself,
- * but now we are much simpler because of the LDM.
- */
-
-struct host_info {
- struct hpsb_host *host;
- struct list_head list;
- struct task_struct *thread;
-};
-
-static int nodemgr_bus_match(struct device * dev, struct device_driver * drv);
-static int nodemgr_uevent(struct device *dev, struct kobj_uevent_env *env);
-
-struct bus_type ieee1394_bus_type = {
- .name = "ieee1394",
- .match = nodemgr_bus_match,
-};
-
-static void host_cls_release(struct device *dev)
-{
- put_device(&container_of((dev), struct hpsb_host, host_dev)->device);
-}
-
-struct class hpsb_host_class = {
- .name = "ieee1394_host",
- .dev_release = host_cls_release,
-};
-
-static void ne_cls_release(struct device *dev)
-{
- put_device(&container_of((dev), struct node_entry, node_dev)->device);
-}
-
-static struct class nodemgr_ne_class = {
- .name = "ieee1394_node",
- .dev_release = ne_cls_release,
-};
-
-static void ud_cls_release(struct device *dev)
-{
- put_device(&container_of((dev), struct unit_directory, unit_dev)->device);
-}
-
-/* The name here is only so that unit directory hotplug works with old
- * style hotplug, which only ever did unit directories anyway.
- */
-static struct class nodemgr_ud_class = {
- .name = "ieee1394",
- .dev_release = ud_cls_release,
- .dev_uevent = nodemgr_uevent,
-};
-
-static struct hpsb_highlevel nodemgr_highlevel;
-
-
-static void nodemgr_release_ud(struct device *dev)
-{
- struct unit_directory *ud = container_of(dev, struct unit_directory, device);
-
- if (ud->vendor_name_kv)
- csr1212_release_keyval(ud->vendor_name_kv);
- if (ud->model_name_kv)
- csr1212_release_keyval(ud->model_name_kv);
-
- kfree(ud);
-}
-
-static void nodemgr_release_ne(struct device *dev)
-{
- struct node_entry *ne = container_of(dev, struct node_entry, device);
-
- if (ne->vendor_name_kv)
- csr1212_release_keyval(ne->vendor_name_kv);
-
- kfree(ne);
-}
-
-
-static void nodemgr_release_host(struct device *dev)
-{
- struct hpsb_host *host = container_of(dev, struct hpsb_host, device);
-
- csr1212_destroy_csr(host->csr.rom);
-
- kfree(host);
-}
-
-static int nodemgr_ud_platform_data;
-
-static struct device nodemgr_dev_template_ud = {
- .bus = &ieee1394_bus_type,
- .release = nodemgr_release_ud,
- .platform_data = &nodemgr_ud_platform_data,
-};
-
-static struct device nodemgr_dev_template_ne = {
- .bus = &ieee1394_bus_type,
- .release = nodemgr_release_ne,
-};
-
-/* This dummy driver prevents the host devices from being scanned. We have no
- * useful drivers for them yet, and there would be a deadlock possible if the
- * driver core scans the host device while the host's low-level driver (i.e.
- * the host's parent device) is being removed. */
-static struct device_driver nodemgr_mid_layer_driver = {
- .bus = &ieee1394_bus_type,
- .name = "nodemgr",
- .owner = THIS_MODULE,
-};
-
-struct device nodemgr_dev_template_host = {
- .bus = &ieee1394_bus_type,
- .release = nodemgr_release_host,
-};
-
-
-#define fw_attr(class, class_type, field, type, format_string) \
-static ssize_t fw_show_##class##_##field (struct device *dev, struct device_attribute *attr, char *buf)\
-{ \
- class_type *class; \
- class = container_of(dev, class_type, device); \
- return sprintf(buf, format_string, (type)class->field); \
-} \
-static struct device_attribute dev_attr_##class##_##field = { \
- .attr = {.name = __stringify(field), .mode = S_IRUGO }, \
- .show = fw_show_##class##_##field, \
-};
-
-#define fw_attr_td(class, class_type, td_kv) \
-static ssize_t fw_show_##class##_##td_kv (struct device *dev, struct device_attribute *attr, char *buf)\
-{ \
- int len; \
- class_type *class = container_of(dev, class_type, device); \
- len = (class->td_kv->value.leaf.len - 2) * sizeof(quadlet_t); \
- memcpy(buf, \
- CSR1212_TEXTUAL_DESCRIPTOR_LEAF_DATA(class->td_kv), \
- len); \
- while (buf[len - 1] == '\0') \
- len--; \
- buf[len++] = '\n'; \
- buf[len] = '\0'; \
- return len; \
-} \
-static struct device_attribute dev_attr_##class##_##td_kv = { \
- .attr = {.name = __stringify(td_kv), .mode = S_IRUGO }, \
- .show = fw_show_##class##_##td_kv, \
-};
-
-
-#define fw_drv_attr(field, type, format_string) \
-static ssize_t fw_drv_show_##field (struct device_driver *drv, char *buf) \
-{ \
- struct hpsb_protocol_driver *driver; \
- driver = container_of(drv, struct hpsb_protocol_driver, driver); \
- return sprintf(buf, format_string, (type)driver->field);\
-} \
-static struct driver_attribute driver_attr_drv_##field = { \
- .attr = {.name = __stringify(field), .mode = S_IRUGO }, \
- .show = fw_drv_show_##field, \
-};
-
-
-static ssize_t fw_show_ne_bus_options(struct device *dev, struct device_attribute *attr, char *buf)
-{
- struct node_entry *ne = container_of(dev, struct node_entry, device);
-
- return sprintf(buf, "IRMC(%d) CMC(%d) ISC(%d) BMC(%d) PMC(%d) GEN(%d) "
- "LSPD(%d) MAX_REC(%d) MAX_ROM(%d) CYC_CLK_ACC(%d)\n",
- ne->busopt.irmc,
- ne->busopt.cmc, ne->busopt.isc, ne->busopt.bmc,
- ne->busopt.pmc, ne->busopt.generation, ne->busopt.lnkspd,
- ne->busopt.max_rec,
- ne->busopt.max_rom,
- ne->busopt.cyc_clk_acc);
-}
-static DEVICE_ATTR(bus_options,S_IRUGO,fw_show_ne_bus_options,NULL);
-
-
-#ifdef HPSB_DEBUG_TLABELS
-static ssize_t fw_show_ne_tlabels_free(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct node_entry *ne = container_of(dev, struct node_entry, device);
- unsigned long flags;
- unsigned long *tp = ne->host->tl_pool[NODEID_TO_NODE(ne->nodeid)].map;
- int tf;
-
- spin_lock_irqsave(&hpsb_tlabel_lock, flags);
- tf = 64 - bitmap_weight(tp, 64);
- spin_unlock_irqrestore(&hpsb_tlabel_lock, flags);
-
- return sprintf(buf, "%d\n", tf);
-}
-static DEVICE_ATTR(tlabels_free,S_IRUGO,fw_show_ne_tlabels_free,NULL);
-
-
-static ssize_t fw_show_ne_tlabels_mask(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct node_entry *ne = container_of(dev, struct node_entry, device);
- unsigned long flags;
- unsigned long *tp = ne->host->tl_pool[NODEID_TO_NODE(ne->nodeid)].map;
- u64 tm;
-
- spin_lock_irqsave(&hpsb_tlabel_lock, flags);
-#if (BITS_PER_LONG <= 32)
- tm = ((u64)tp[0] << 32) + tp[1];
-#else
- tm = tp[0];
-#endif
- spin_unlock_irqrestore(&hpsb_tlabel_lock, flags);
-
- return sprintf(buf, "0x%016llx\n", (unsigned long long)tm);
-}
-static DEVICE_ATTR(tlabels_mask, S_IRUGO, fw_show_ne_tlabels_mask, NULL);
-#endif /* HPSB_DEBUG_TLABELS */
-
-
-static ssize_t fw_set_ignore_driver(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
-{
- struct unit_directory *ud = container_of(dev, struct unit_directory, device);
- int state = simple_strtoul(buf, NULL, 10);
-
- if (state == 1) {
- ud->ignore_driver = 1;
- device_release_driver(dev);
- } else if (state == 0)
- ud->ignore_driver = 0;
-
- return count;
-}
-static ssize_t fw_get_ignore_driver(struct device *dev, struct device_attribute *attr, char *buf)
-{
- struct unit_directory *ud = container_of(dev, struct unit_directory, device);
-
- return sprintf(buf, "%d\n", ud->ignore_driver);
-}
-static DEVICE_ATTR(ignore_driver, S_IWUSR | S_IRUGO, fw_get_ignore_driver, fw_set_ignore_driver);
-
-
-static ssize_t fw_set_rescan(struct bus_type *bus, const char *buf,
- size_t count)
-{
- int error = 0;
-
- if (simple_strtoul(buf, NULL, 10) == 1)
- error = bus_rescan_devices(&ieee1394_bus_type);
- return error ? error : count;
-}
-static ssize_t fw_get_rescan(struct bus_type *bus, char *buf)
-{
- return sprintf(buf, "You can force a rescan of the bus for "
- "drivers by writing a 1 to this file\n");
-}
-static BUS_ATTR(rescan, S_IWUSR | S_IRUGO, fw_get_rescan, fw_set_rescan);
-
-
-static ssize_t fw_set_ignore_drivers(struct bus_type *bus, const char *buf, size_t count)
-{
- int state = simple_strtoul(buf, NULL, 10);
-
- if (state == 1)
- ignore_drivers = 1;
- else if (state == 0)
- ignore_drivers = 0;
-
- return count;
-}
-static ssize_t fw_get_ignore_drivers(struct bus_type *bus, char *buf)
-{
- return sprintf(buf, "%d\n", ignore_drivers);
-}
-static BUS_ATTR(ignore_drivers, S_IWUSR | S_IRUGO, fw_get_ignore_drivers, fw_set_ignore_drivers);
-
-
-struct bus_attribute *const fw_bus_attrs[] = {
- &bus_attr_rescan,
- &bus_attr_ignore_drivers,
- NULL
-};
-
-
-fw_attr(ne, struct node_entry, capabilities, unsigned int, "0x%06x\n")
-fw_attr(ne, struct node_entry, nodeid, unsigned int, "0x%04x\n")
-
-fw_attr(ne, struct node_entry, vendor_id, unsigned int, "0x%06x\n")
-fw_attr_td(ne, struct node_entry, vendor_name_kv)
-
-fw_attr(ne, struct node_entry, guid, unsigned long long, "0x%016Lx\n")
-fw_attr(ne, struct node_entry, guid_vendor_id, unsigned int, "0x%06x\n")
-fw_attr(ne, struct node_entry, in_limbo, int, "%d\n");
-
-static struct device_attribute *const fw_ne_attrs[] = {
- &dev_attr_ne_guid,
- &dev_attr_ne_guid_vendor_id,
- &dev_attr_ne_capabilities,
- &dev_attr_ne_vendor_id,
- &dev_attr_ne_nodeid,
- &dev_attr_bus_options,
-#ifdef HPSB_DEBUG_TLABELS
- &dev_attr_tlabels_free,
- &dev_attr_tlabels_mask,
-#endif
-};
-
-
-
-fw_attr(ud, struct unit_directory, address, unsigned long long, "0x%016Lx\n")
-fw_attr(ud, struct unit_directory, length, int, "%d\n")
-/* These are all dependent on the value being provided */
-fw_attr(ud, struct unit_directory, vendor_id, unsigned int, "0x%06x\n")
-fw_attr(ud, struct unit_directory, model_id, unsigned int, "0x%06x\n")
-fw_attr(ud, struct unit_directory, specifier_id, unsigned int, "0x%06x\n")
-fw_attr(ud, struct unit_directory, version, unsigned int, "0x%06x\n")
-fw_attr_td(ud, struct unit_directory, vendor_name_kv)
-fw_attr_td(ud, struct unit_directory, model_name_kv)
-
-static struct device_attribute *const fw_ud_attrs[] = {
- &dev_attr_ud_address,
- &dev_attr_ud_length,
- &dev_attr_ignore_driver,
-};
-
-
-fw_attr(host, struct hpsb_host, node_count, int, "%d\n")
-fw_attr(host, struct hpsb_host, selfid_count, int, "%d\n")
-fw_attr(host, struct hpsb_host, nodes_active, int, "%d\n")
-fw_attr(host, struct hpsb_host, in_bus_reset, int, "%d\n")
-fw_attr(host, struct hpsb_host, is_root, int, "%d\n")
-fw_attr(host, struct hpsb_host, is_cycmst, int, "%d\n")
-fw_attr(host, struct hpsb_host, is_irm, int, "%d\n")
-fw_attr(host, struct hpsb_host, is_busmgr, int, "%d\n")
-
-static struct device_attribute *const fw_host_attrs[] = {
- &dev_attr_host_node_count,
- &dev_attr_host_selfid_count,
- &dev_attr_host_nodes_active,
- &dev_attr_host_in_bus_reset,
- &dev_attr_host_is_root,
- &dev_attr_host_is_cycmst,
- &dev_attr_host_is_irm,
- &dev_attr_host_is_busmgr,
-};
-
-
-static ssize_t fw_show_drv_device_ids(struct device_driver *drv, char *buf)
-{
- struct hpsb_protocol_driver *driver;
- const struct ieee1394_device_id *id;
- int length = 0;
- char *scratch = buf;
-
- driver = container_of(drv, struct hpsb_protocol_driver, driver);
- id = driver->id_table;
- if (!id)
- return 0;
-
- for (; id->match_flags != 0; id++) {
- int need_coma = 0;
-
- if (id->match_flags & IEEE1394_MATCH_VENDOR_ID) {
- length += sprintf(scratch, "vendor_id=0x%06x", id->vendor_id);
- scratch = buf + length;
- need_coma++;
- }
-
- if (id->match_flags & IEEE1394_MATCH_MODEL_ID) {
- length += sprintf(scratch, "%smodel_id=0x%06x",
- need_coma++ ? "," : "",
- id->model_id);
- scratch = buf + length;
- }
-
- if (id->match_flags & IEEE1394_MATCH_SPECIFIER_ID) {
- length += sprintf(scratch, "%sspecifier_id=0x%06x",
- need_coma++ ? "," : "",
- id->specifier_id);
- scratch = buf + length;
- }
-
- if (id->match_flags & IEEE1394_MATCH_VERSION) {
- length += sprintf(scratch, "%sversion=0x%06x",
- need_coma++ ? "," : "",
- id->version);
- scratch = buf + length;
- }
-
- if (need_coma) {
- *scratch++ = '\n';
- length++;
- }
- }
-
- return length;
-}
-static DRIVER_ATTR(device_ids,S_IRUGO,fw_show_drv_device_ids,NULL);
-
-
-fw_drv_attr(name, const char *, "%s\n")
-
-static struct driver_attribute *const fw_drv_attrs[] = {
- &driver_attr_drv_name,
- &driver_attr_device_ids,
-};
-
-
-static void nodemgr_create_drv_files(struct hpsb_protocol_driver *driver)
-{
- struct device_driver *drv = &driver->driver;
- int i;
-
- for (i = 0; i < ARRAY_SIZE(fw_drv_attrs); i++)
- if (driver_create_file(drv, fw_drv_attrs[i]))
- goto fail;
- return;
-fail:
- HPSB_ERR("Failed to add sysfs attribute");
-}
-
-
-static void nodemgr_remove_drv_files(struct hpsb_protocol_driver *driver)
-{
- struct device_driver *drv = &driver->driver;
- int i;
-
- for (i = 0; i < ARRAY_SIZE(fw_drv_attrs); i++)
- driver_remove_file(drv, fw_drv_attrs[i]);
-}
-
-
-static void nodemgr_create_ne_dev_files(struct node_entry *ne)
-{
- struct device *dev = &ne->device;
- int i;
-
- for (i = 0; i < ARRAY_SIZE(fw_ne_attrs); i++)
- if (device_create_file(dev, fw_ne_attrs[i]))
- goto fail;
- return;
-fail:
- HPSB_ERR("Failed to add sysfs attribute");
-}
-
-
-static void nodemgr_create_host_dev_files(struct hpsb_host *host)
-{
- struct device *dev = &host->device;
- int i;
-
- for (i = 0; i < ARRAY_SIZE(fw_host_attrs); i++)
- if (device_create_file(dev, fw_host_attrs[i]))
- goto fail;
- return;
-fail:
- HPSB_ERR("Failed to add sysfs attribute");
-}
-
-
-static struct node_entry *find_entry_by_nodeid(struct hpsb_host *host,
- nodeid_t nodeid);
-
-static void nodemgr_update_host_dev_links(struct hpsb_host *host)
-{
- struct device *dev = &host->device;
- struct node_entry *ne;
-
- sysfs_remove_link(&dev->kobj, "irm_id");
- sysfs_remove_link(&dev->kobj, "busmgr_id");
- sysfs_remove_link(&dev->kobj, "host_id");
-
- if ((ne = find_entry_by_nodeid(host, host->irm_id)) &&
- sysfs_create_link(&dev->kobj, &ne->device.kobj, "irm_id"))
- goto fail;
- if ((ne = find_entry_by_nodeid(host, host->busmgr_id)) &&
- sysfs_create_link(&dev->kobj, &ne->device.kobj, "busmgr_id"))
- goto fail;
- if ((ne = find_entry_by_nodeid(host, host->node_id)) &&
- sysfs_create_link(&dev->kobj, &ne->device.kobj, "host_id"))
- goto fail;
- return;
-fail:
- HPSB_ERR("Failed to update sysfs attributes for host %d", host->id);
-}
-
-static void nodemgr_create_ud_dev_files(struct unit_directory *ud)
-{
- struct device *dev = &ud->device;
- int i;
-
- for (i = 0; i < ARRAY_SIZE(fw_ud_attrs); i++)
- if (device_create_file(dev, fw_ud_attrs[i]))
- goto fail;
- if (ud->flags & UNIT_DIRECTORY_SPECIFIER_ID)
- if (device_create_file(dev, &dev_attr_ud_specifier_id))
- goto fail;
- if (ud->flags & UNIT_DIRECTORY_VERSION)
- if (device_create_file(dev, &dev_attr_ud_version))
- goto fail;
- if (ud->flags & UNIT_DIRECTORY_VENDOR_ID) {
- if (device_create_file(dev, &dev_attr_ud_vendor_id))
- goto fail;
- if (ud->vendor_name_kv &&
- device_create_file(dev, &dev_attr_ud_vendor_name_kv))
- goto fail;
- }
- if (ud->flags & UNIT_DIRECTORY_MODEL_ID) {
- if (device_create_file(dev, &dev_attr_ud_model_id))
- goto fail;
- if (ud->model_name_kv &&
- device_create_file(dev, &dev_attr_ud_model_name_kv))
- goto fail;
- }
- return;
-fail:
- HPSB_ERR("Failed to add sysfs attribute");
-}
-
-
-static int nodemgr_bus_match(struct device * dev, struct device_driver * drv)
-{
- struct hpsb_protocol_driver *driver;
- struct unit_directory *ud;
- const struct ieee1394_device_id *id;
-
- /* We only match unit directories */
- if (dev->platform_data != &nodemgr_ud_platform_data)
- return 0;
-
- ud = container_of(dev, struct unit_directory, device);
- if (ud->ne->in_limbo || ud->ignore_driver)
- return 0;
-
- /* We only match drivers of type hpsb_protocol_driver */
- if (drv == &nodemgr_mid_layer_driver)
- return 0;
-
- driver = container_of(drv, struct hpsb_protocol_driver, driver);
- id = driver->id_table;
- if (!id)
- return 0;
-
- for (; id->match_flags != 0; id++) {
- if ((id->match_flags & IEEE1394_MATCH_VENDOR_ID) &&
- id->vendor_id != ud->vendor_id)
- continue;
-
- if ((id->match_flags & IEEE1394_MATCH_MODEL_ID) &&
- id->model_id != ud->model_id)
- continue;
-
- if ((id->match_flags & IEEE1394_MATCH_SPECIFIER_ID) &&
- id->specifier_id != ud->specifier_id)
- continue;
-
- if ((id->match_flags & IEEE1394_MATCH_VERSION) &&
- id->version != ud->version)
- continue;
-
- return 1;
- }
-
- return 0;
-}
-
-
-static DEFINE_MUTEX(nodemgr_serialize_remove_uds);
-
-static int match_ne(struct device *dev, void *data)
-{
- struct unit_directory *ud;
- struct node_entry *ne = data;
-
- ud = container_of(dev, struct unit_directory, unit_dev);
- return ud->ne == ne;
-}
-
-static void nodemgr_remove_uds(struct node_entry *ne)
-{
- struct device *dev;
- struct unit_directory *ud;
-
- /* Use class_find device to iterate the devices. Since this code
- * may be called from other contexts besides the knodemgrds,
- * protect it by nodemgr_serialize_remove_uds.
- */
- mutex_lock(&nodemgr_serialize_remove_uds);
- for (;;) {
- dev = class_find_device(&nodemgr_ud_class, NULL, ne, match_ne);
- if (!dev)
- break;
- ud = container_of(dev, struct unit_directory, unit_dev);
- put_device(dev);
- device_unregister(&ud->unit_dev);
- device_unregister(&ud->device);
- }
- mutex_unlock(&nodemgr_serialize_remove_uds);
-}
-
-
-static void nodemgr_remove_ne(struct node_entry *ne)
-{
- struct device *dev;
-
- dev = get_device(&ne->device);
- if (!dev)
- return;
-
- HPSB_DEBUG("Node removed: ID:BUS[" NODE_BUS_FMT "] GUID[%016Lx]",
- NODE_BUS_ARGS(ne->host, ne->nodeid), (unsigned long long)ne->guid);
- nodemgr_remove_uds(ne);
-
- device_unregister(&ne->node_dev);
- device_unregister(dev);
-
- put_device(dev);
-}
-
-static int remove_host_dev(struct device *dev, void *data)
-{
- if (dev->bus == &ieee1394_bus_type)
- nodemgr_remove_ne(container_of(dev, struct node_entry,
- device));
- return 0;
-}
-
-static void nodemgr_remove_host_dev(struct device *dev)
-{
- device_for_each_child(dev, NULL, remove_host_dev);
- sysfs_remove_link(&dev->kobj, "irm_id");
- sysfs_remove_link(&dev->kobj, "busmgr_id");
- sysfs_remove_link(&dev->kobj, "host_id");
-}
-
-
-static void nodemgr_update_bus_options(struct node_entry *ne)
-{
-#ifdef CONFIG_IEEE1394_VERBOSEDEBUG
- static const u16 mr[] = { 4, 64, 1024, 0};
-#endif
- quadlet_t busoptions = be32_to_cpu(ne->csr->bus_info_data[2]);
-
- ne->busopt.irmc = (busoptions >> 31) & 1;
- ne->busopt.cmc = (busoptions >> 30) & 1;
- ne->busopt.isc = (busoptions >> 29) & 1;
- ne->busopt.bmc = (busoptions >> 28) & 1;
- ne->busopt.pmc = (busoptions >> 27) & 1;
- ne->busopt.cyc_clk_acc = (busoptions >> 16) & 0xff;
- ne->busopt.max_rec = 1 << (((busoptions >> 12) & 0xf) + 1);
- ne->busopt.max_rom = (busoptions >> 8) & 0x3;
- ne->busopt.generation = (busoptions >> 4) & 0xf;
- ne->busopt.lnkspd = busoptions & 0x7;
-
- HPSB_VERBOSE("NodeMgr: raw=0x%08x irmc=%d cmc=%d isc=%d bmc=%d pmc=%d "
- "cyc_clk_acc=%d max_rec=%d max_rom=%d gen=%d lspd=%d",
- busoptions, ne->busopt.irmc, ne->busopt.cmc,
- ne->busopt.isc, ne->busopt.bmc, ne->busopt.pmc,
- ne->busopt.cyc_clk_acc, ne->busopt.max_rec,
- mr[ne->busopt.max_rom],
- ne->busopt.generation, ne->busopt.lnkspd);
-}
-
-
-static struct node_entry *nodemgr_create_node(octlet_t guid,
- struct csr1212_csr *csr, struct hpsb_host *host,
- nodeid_t nodeid, unsigned int generation)
-{
- struct node_entry *ne;
-
- ne = kzalloc(sizeof(*ne), GFP_KERNEL);
- if (!ne)
- goto fail_alloc;
-
- ne->host = host;
- ne->nodeid = nodeid;
- ne->generation = generation;
- ne->needs_probe = true;
-
- ne->guid = guid;
- ne->guid_vendor_id = (guid >> 40) & 0xffffff;
- ne->csr = csr;
-
- memcpy(&ne->device, &nodemgr_dev_template_ne,
- sizeof(ne->device));
- ne->device.parent = &host->device;
- dev_set_name(&ne->device, "%016Lx", (unsigned long long)(ne->guid));
-
- ne->node_dev.parent = &ne->device;
- ne->node_dev.class = &nodemgr_ne_class;
- dev_set_name(&ne->node_dev, "%016Lx", (unsigned long long)(ne->guid));
-
- if (device_register(&ne->device))
- goto fail_devreg;
- if (device_register(&ne->node_dev))
- goto fail_classdevreg;
- get_device(&ne->device);
-
- nodemgr_create_ne_dev_files(ne);
-
- nodemgr_update_bus_options(ne);
-
- HPSB_DEBUG("%s added: ID:BUS[" NODE_BUS_FMT "] GUID[%016Lx]",
- (host->node_id == nodeid) ? "Host" : "Node",
- NODE_BUS_ARGS(host, nodeid), (unsigned long long)guid);
-
- return ne;
-
-fail_classdevreg:
- device_unregister(&ne->device);
-fail_devreg:
- kfree(ne);
-fail_alloc:
- HPSB_ERR("Failed to create node ID:BUS[" NODE_BUS_FMT "] GUID[%016Lx]",
- NODE_BUS_ARGS(host, nodeid), (unsigned long long)guid);
-
- return NULL;
-}
-
-static int match_ne_guid(struct device *dev, void *data)
-{
- struct node_entry *ne;
- u64 *guid = data;
-
- ne = container_of(dev, struct node_entry, node_dev);
- return ne->guid == *guid;
-}
-
-static struct node_entry *find_entry_by_guid(u64 guid)
-{
- struct device *dev;
- struct node_entry *ne;
-
- dev = class_find_device(&nodemgr_ne_class, NULL, &guid, match_ne_guid);
- if (!dev)
- return NULL;
- ne = container_of(dev, struct node_entry, node_dev);
- put_device(dev);
-
- return ne;
-}
-
-struct match_nodeid_parameter {
- struct hpsb_host *host;
- nodeid_t nodeid;
-};
-
-static int match_ne_nodeid(struct device *dev, void *data)
-{
- int found = 0;
- struct node_entry *ne;
- struct match_nodeid_parameter *p = data;
-
- if (!dev)
- goto ret;
- ne = container_of(dev, struct node_entry, node_dev);
- if (ne->host == p->host && ne->nodeid == p->nodeid)
- found = 1;
-ret:
- return found;
-}
-
-static struct node_entry *find_entry_by_nodeid(struct hpsb_host *host,
- nodeid_t nodeid)
-{
- struct device *dev;
- struct node_entry *ne;
- struct match_nodeid_parameter p;
-
- p.host = host;
- p.nodeid = nodeid;
-
- dev = class_find_device(&nodemgr_ne_class, NULL, &p, match_ne_nodeid);
- if (!dev)
- return NULL;
- ne = container_of(dev, struct node_entry, node_dev);
- put_device(dev);
-
- return ne;
-}
-
-
-static void nodemgr_register_device(struct node_entry *ne,
- struct unit_directory *ud, struct device *parent)
-{
- memcpy(&ud->device, &nodemgr_dev_template_ud,
- sizeof(ud->device));
-
- ud->device.parent = parent;
-
- dev_set_name(&ud->device, "%s-%u", dev_name(&ne->device), ud->id);
-
- ud->unit_dev.parent = &ud->device;
- ud->unit_dev.class = &nodemgr_ud_class;
- dev_set_name(&ud->unit_dev, "%s-%u", dev_name(&ne->device), ud->id);
-
- if (device_register(&ud->device))
- goto fail_devreg;
- if (device_register(&ud->unit_dev))
- goto fail_classdevreg;
- get_device(&ud->device);
-
- nodemgr_create_ud_dev_files(ud);
-
- return;
-
-fail_classdevreg:
- device_unregister(&ud->device);
-fail_devreg:
- HPSB_ERR("Failed to create unit %s", dev_name(&ud->device));
-}
-
-
-/* This implementation currently only scans the config rom and its
- * immediate unit directories looking for software_id and
- * software_version entries, in order to get driver autoloading working. */
-static struct unit_directory *nodemgr_process_unit_directory
- (struct node_entry *ne, struct csr1212_keyval *ud_kv,
- unsigned int *id, struct unit_directory *parent)
-{
- struct unit_directory *ud;
- struct unit_directory *ud_child = NULL;
- struct csr1212_dentry *dentry;
- struct csr1212_keyval *kv;
- u8 last_key_id = 0;
-
- ud = kzalloc(sizeof(*ud), GFP_KERNEL);
- if (!ud)
- goto unit_directory_error;
-
- ud->ne = ne;
- ud->ignore_driver = ignore_drivers;
- ud->address = ud_kv->offset + CSR1212_REGISTER_SPACE_BASE;
- ud->directory_id = ud->address & 0xffffff;
- ud->ud_kv = ud_kv;
- ud->id = (*id)++;
-
- /* inherit vendor_id from root directory if none exists in unit dir */
- ud->vendor_id = ne->vendor_id;
-
- csr1212_for_each_dir_entry(ne->csr, kv, ud_kv, dentry) {
- switch (kv->key.id) {
- case CSR1212_KV_ID_VENDOR:
- if (kv->key.type == CSR1212_KV_TYPE_IMMEDIATE) {
- ud->vendor_id = kv->value.immediate;
- ud->flags |= UNIT_DIRECTORY_VENDOR_ID;
- }
- break;
-
- case CSR1212_KV_ID_MODEL:
- ud->model_id = kv->value.immediate;
- ud->flags |= UNIT_DIRECTORY_MODEL_ID;
- break;
-
- case CSR1212_KV_ID_SPECIFIER_ID:
- ud->specifier_id = kv->value.immediate;
- ud->flags |= UNIT_DIRECTORY_SPECIFIER_ID;
- break;
-
- case CSR1212_KV_ID_VERSION:
- ud->version = kv->value.immediate;
- ud->flags |= UNIT_DIRECTORY_VERSION;
- break;
-
- case CSR1212_KV_ID_DESCRIPTOR:
- if (kv->key.type == CSR1212_KV_TYPE_LEAF &&
- CSR1212_DESCRIPTOR_LEAF_TYPE(kv) == 0 &&
- CSR1212_DESCRIPTOR_LEAF_SPECIFIER_ID(kv) == 0 &&
- CSR1212_TEXTUAL_DESCRIPTOR_LEAF_WIDTH(kv) == 0 &&
- CSR1212_TEXTUAL_DESCRIPTOR_LEAF_CHAR_SET(kv) == 0 &&
- CSR1212_TEXTUAL_DESCRIPTOR_LEAF_LANGUAGE(kv) == 0) {
- switch (last_key_id) {
- case CSR1212_KV_ID_VENDOR:
- csr1212_keep_keyval(kv);
- ud->vendor_name_kv = kv;
- break;
-
- case CSR1212_KV_ID_MODEL:
- csr1212_keep_keyval(kv);
- ud->model_name_kv = kv;
- break;
-
- }
- } /* else if (kv->key.type == CSR1212_KV_TYPE_DIRECTORY) ... */
- break;
-
- case CSR1212_KV_ID_DEPENDENT_INFO:
- /* Logical Unit Number */
- if (kv->key.type == CSR1212_KV_TYPE_IMMEDIATE) {
- if (ud->flags & UNIT_DIRECTORY_HAS_LUN) {
- ud_child = kmemdup(ud, sizeof(*ud_child), GFP_KERNEL);
- if (!ud_child)
- goto unit_directory_error;
- nodemgr_register_device(ne, ud_child, &ne->device);
- ud_child = NULL;
-
- ud->id = (*id)++;
- }
- ud->lun = kv->value.immediate;
- ud->flags |= UNIT_DIRECTORY_HAS_LUN;
-
- /* Logical Unit Directory */
- } else if (kv->key.type == CSR1212_KV_TYPE_DIRECTORY) {
- /* This should really be done in SBP2 as this is
- * doing SBP2 specific parsing.
- */
-
- /* first register the parent unit */
- ud->flags |= UNIT_DIRECTORY_HAS_LUN_DIRECTORY;
- if (ud->device.bus != &ieee1394_bus_type)
- nodemgr_register_device(ne, ud, &ne->device);
-
- /* process the child unit */
- ud_child = nodemgr_process_unit_directory(ne, kv, id, ud);
-
- if (ud_child == NULL)
- break;
-
- /* inherit unspecified values, the driver core picks it up */
- if ((ud->flags & UNIT_DIRECTORY_MODEL_ID) &&
- !(ud_child->flags & UNIT_DIRECTORY_MODEL_ID))
- {
- ud_child->flags |= UNIT_DIRECTORY_MODEL_ID;
- ud_child->model_id = ud->model_id;
- }
- if ((ud->flags & UNIT_DIRECTORY_SPECIFIER_ID) &&
- !(ud_child->flags & UNIT_DIRECTORY_SPECIFIER_ID))
- {
- ud_child->flags |= UNIT_DIRECTORY_SPECIFIER_ID;
- ud_child->specifier_id = ud->specifier_id;
- }
- if ((ud->flags & UNIT_DIRECTORY_VERSION) &&
- !(ud_child->flags & UNIT_DIRECTORY_VERSION))
- {
- ud_child->flags |= UNIT_DIRECTORY_VERSION;
- ud_child->version = ud->version;
- }
-
- /* register the child unit */
- ud_child->flags |= UNIT_DIRECTORY_LUN_DIRECTORY;
- nodemgr_register_device(ne, ud_child, &ud->device);
- }
-
- break;
-
- case CSR1212_KV_ID_DIRECTORY_ID:
- ud->directory_id = kv->value.immediate;
- break;
-
- default:
- break;
- }
- last_key_id = kv->key.id;
- }
-
- /* do not process child units here and only if not already registered */
- if (!parent && ud->device.bus != &ieee1394_bus_type)
- nodemgr_register_device(ne, ud, &ne->device);
-
- return ud;
-
-unit_directory_error:
- kfree(ud);
- return NULL;
-}
-
-
-static void nodemgr_process_root_directory(struct node_entry *ne)
-{
- unsigned int ud_id = 0;
- struct csr1212_dentry *dentry;
- struct csr1212_keyval *kv, *vendor_name_kv = NULL;
- u8 last_key_id = 0;
-
- ne->needs_probe = false;
-
- csr1212_for_each_dir_entry(ne->csr, kv, ne->csr->root_kv, dentry) {
- switch (kv->key.id) {
- case CSR1212_KV_ID_VENDOR:
- ne->vendor_id = kv->value.immediate;
- break;
-
- case CSR1212_KV_ID_NODE_CAPABILITIES:
- ne->capabilities = kv->value.immediate;
- break;
-
- case CSR1212_KV_ID_UNIT:
- nodemgr_process_unit_directory(ne, kv, &ud_id, NULL);
- break;
-
- case CSR1212_KV_ID_DESCRIPTOR:
- if (last_key_id == CSR1212_KV_ID_VENDOR) {
- if (kv->key.type == CSR1212_KV_TYPE_LEAF &&
- CSR1212_DESCRIPTOR_LEAF_TYPE(kv) == 0 &&
- CSR1212_DESCRIPTOR_LEAF_SPECIFIER_ID(kv) == 0 &&
- CSR1212_TEXTUAL_DESCRIPTOR_LEAF_WIDTH(kv) == 0 &&
- CSR1212_TEXTUAL_DESCRIPTOR_LEAF_CHAR_SET(kv) == 0 &&
- CSR1212_TEXTUAL_DESCRIPTOR_LEAF_LANGUAGE(kv) == 0) {
- csr1212_keep_keyval(kv);
- vendor_name_kv = kv;
- }
- }
- break;
- }
- last_key_id = kv->key.id;
- }
-
- if (ne->vendor_name_kv) {
- kv = ne->vendor_name_kv;
- ne->vendor_name_kv = vendor_name_kv;
- csr1212_release_keyval(kv);
- } else if (vendor_name_kv) {
- ne->vendor_name_kv = vendor_name_kv;
- if (device_create_file(&ne->device,
- &dev_attr_ne_vendor_name_kv) != 0)
- HPSB_ERR("Failed to add sysfs attribute");
- }
-}
-
-#ifdef CONFIG_HOTPLUG
-
-static int nodemgr_uevent(struct device *dev, struct kobj_uevent_env *env)
-{
- struct unit_directory *ud;
- int retval = 0;
- /* ieee1394:venNmoNspNverN */
- char buf[8 + 1 + 3 + 8 + 2 + 8 + 2 + 8 + 3 + 8 + 1];
-
- if (!dev)
- return -ENODEV;
-
- ud = container_of(dev, struct unit_directory, unit_dev);
-
- if (ud->ne->in_limbo || ud->ignore_driver)
- return -ENODEV;
-
-#define PUT_ENVP(fmt,val) \
-do { \
- retval = add_uevent_var(env, fmt, val); \
- if (retval) \
- return retval; \
-} while (0)
-
- PUT_ENVP("VENDOR_ID=%06x", ud->vendor_id);
- PUT_ENVP("MODEL_ID=%06x", ud->model_id);
- PUT_ENVP("GUID=%016Lx", (unsigned long long)ud->ne->guid);
- PUT_ENVP("SPECIFIER_ID=%06x", ud->specifier_id);
- PUT_ENVP("VERSION=%06x", ud->version);
- snprintf(buf, sizeof(buf), "ieee1394:ven%08Xmo%08Xsp%08Xver%08X",
- ud->vendor_id,
- ud->model_id,
- ud->specifier_id,
- ud->version);
- PUT_ENVP("MODALIAS=%s", buf);
-
-#undef PUT_ENVP
-
- return 0;
-}
-
-#else
-
-static int nodemgr_uevent(struct device *dev, struct kobj_uevent_env *env)
-{
- return -ENODEV;
-}
-
-#endif /* CONFIG_HOTPLUG */
-
-
-int __hpsb_register_protocol(struct hpsb_protocol_driver *drv,
- struct module *owner)
-{
- int error;
-
- drv->driver.bus = &ieee1394_bus_type;
- drv->driver.owner = owner;
- drv->driver.name = drv->name;
-
- /* This will cause a probe for devices */
- error = driver_register(&drv->driver);
- if (!error)
- nodemgr_create_drv_files(drv);
- return error;
-}
-
-void hpsb_unregister_protocol(struct hpsb_protocol_driver *driver)
-{
- nodemgr_remove_drv_files(driver);
- /* This will subsequently disconnect all devices that our driver
- * is attached to. */
- driver_unregister(&driver->driver);
-}
-
-
-/*
- * This function updates nodes that were present on the bus before the
- * reset and still are after the reset. The nodeid and the config rom
- * may have changed, and the drivers managing this device must be
- * informed that this device just went through a bus reset, to allow
- * the to take whatever actions required.
- */
-static void nodemgr_update_node(struct node_entry *ne, struct csr1212_csr *csr,
- nodeid_t nodeid, unsigned int generation)
-{
- if (ne->nodeid != nodeid) {
- HPSB_DEBUG("Node changed: " NODE_BUS_FMT " -> " NODE_BUS_FMT,
- NODE_BUS_ARGS(ne->host, ne->nodeid),
- NODE_BUS_ARGS(ne->host, nodeid));
- ne->nodeid = nodeid;
- }
-
- if (ne->busopt.generation != ((be32_to_cpu(csr->bus_info_data[2]) >> 4) & 0xf)) {
- kfree(ne->csr->private);
- csr1212_destroy_csr(ne->csr);
- ne->csr = csr;
-
- /* If the node's configrom generation has changed, we
- * unregister all the unit directories. */
- nodemgr_remove_uds(ne);
-
- nodemgr_update_bus_options(ne);
-
- /* Mark the node as new, so it gets re-probed */
- ne->needs_probe = true;
- } else {
- /* old cache is valid, so update its generation */
- struct nodemgr_csr_info *ci = ne->csr->private;
- ci->generation = generation;
- /* free the partially filled now unneeded new cache */
- kfree(csr->private);
- csr1212_destroy_csr(csr);
- }
-
- /* Finally, mark the node current */
- smp_wmb();
- ne->generation = generation;
-
- if (ne->in_limbo) {
- device_remove_file(&ne->device, &dev_attr_ne_in_limbo);
- ne->in_limbo = false;
-
- HPSB_DEBUG("Node reactivated: "
- "ID:BUS[" NODE_BUS_FMT "] GUID[%016Lx]",
- NODE_BUS_ARGS(ne->host, ne->nodeid),
- (unsigned long long)ne->guid);
- }
-}
-
-static void nodemgr_node_scan_one(struct hpsb_host *host,
- nodeid_t nodeid, int generation)
-{
- struct node_entry *ne;
- octlet_t guid;
- struct csr1212_csr *csr;
- struct nodemgr_csr_info *ci;
- u8 *speed;
-
- ci = kmalloc(sizeof(*ci), GFP_KERNEL);
- kmemcheck_annotate_bitfield(ci, flags);
- if (!ci)
- return;
-
- ci->host = host;
- ci->nodeid = nodeid;
- ci->generation = generation;
-
- /* Prepare for speed probe which occurs when reading the ROM */
- speed = &(host->speed[NODEID_TO_NODE(nodeid)]);
- if (*speed > host->csr.lnk_spd)
- *speed = host->csr.lnk_spd;
- ci->speed_unverified = *speed > IEEE1394_SPEED_100;
-
- /* We need to detect when the ConfigROM's generation has changed,
- * so we only update the node's info when it needs to be. */
-
- csr = csr1212_create_csr(&nodemgr_csr_ops, 5 * sizeof(quadlet_t), ci);
- if (!csr || csr1212_parse_csr(csr) != CSR1212_SUCCESS) {
- HPSB_ERR("Error parsing configrom for node " NODE_BUS_FMT,
- NODE_BUS_ARGS(host, nodeid));
- if (csr)
- csr1212_destroy_csr(csr);
- kfree(ci);
- return;
- }
-
- if (csr->bus_info_data[1] != IEEE1394_BUSID_MAGIC) {
- /* This isn't a 1394 device, but we let it slide. There
- * was a report of a device with broken firmware which
- * reported '2394' instead of '1394', which is obviously a
- * mistake. One would hope that a non-1394 device never
- * gets connected to Firewire bus. If someone does, we
- * shouldn't be held responsible, so we'll allow it with a
- * warning. */
- HPSB_WARN("Node " NODE_BUS_FMT " has invalid busID magic [0x%08x]",
- NODE_BUS_ARGS(host, nodeid), csr->bus_info_data[1]);
- }
-
- guid = ((u64)be32_to_cpu(csr->bus_info_data[3]) << 32) | be32_to_cpu(csr->bus_info_data[4]);
- ne = find_entry_by_guid(guid);
-
- if (ne && ne->host != host && ne->in_limbo) {
- /* Must have moved this device from one host to another */
- nodemgr_remove_ne(ne);
- ne = NULL;
- }
-
- if (!ne)
- nodemgr_create_node(guid, csr, host, nodeid, generation);
- else
- nodemgr_update_node(ne, csr, nodeid, generation);
-}
-
-
-static void nodemgr_node_scan(struct hpsb_host *host, int generation)
-{
- int count;
- struct selfid *sid = (struct selfid *)host->topology_map;
- nodeid_t nodeid = LOCAL_BUS;
-
- /* Scan each node on the bus */
- for (count = host->selfid_count; count; count--, sid++) {
- if (sid->extended)
- continue;
-
- if (!sid->link_active) {
- nodeid++;
- continue;
- }
- nodemgr_node_scan_one(host, nodeid++, generation);
- }
-}
-
-static void nodemgr_pause_ne(struct node_entry *ne)
-{
- HPSB_DEBUG("Node paused: ID:BUS[" NODE_BUS_FMT "] GUID[%016Lx]",
- NODE_BUS_ARGS(ne->host, ne->nodeid),
- (unsigned long long)ne->guid);
-
- ne->in_limbo = true;
- WARN_ON(device_create_file(&ne->device, &dev_attr_ne_in_limbo));
-}
-
-static int update_pdrv(struct device *dev, void *data)
-{
- struct unit_directory *ud;
- struct device_driver *drv;
- struct hpsb_protocol_driver *pdrv;
- struct node_entry *ne = data;
- int error;
-
- ud = container_of(dev, struct unit_directory, unit_dev);
- if (ud->ne == ne) {
- drv = get_driver(ud->device.driver);
- if (drv) {
- error = 0;
- pdrv = container_of(drv, struct hpsb_protocol_driver,
- driver);
- if (pdrv->update) {
- device_lock(&ud->device);
- error = pdrv->update(ud);
- device_unlock(&ud->device);
- }
- if (error)
- device_release_driver(&ud->device);
- put_driver(drv);
- }
- }
-
- return 0;
-}
-
-static void nodemgr_update_pdrv(struct node_entry *ne)
-{
- class_for_each_device(&nodemgr_ud_class, NULL, ne, update_pdrv);
-}
-
-/* Write the BROADCAST_CHANNEL as per IEEE1394a 8.3.2.3.11 and 8.4.2.3. This
- * seems like an optional service but in the end it is practically mandatory
- * as a consequence of these clauses.
- *
- * Note that we cannot do a broadcast write to all nodes at once because some
- * pre-1394a devices would hang. */
-static void nodemgr_irm_write_bc(struct node_entry *ne, int generation)
-{
- const u64 bc_addr = (CSR_REGISTER_BASE | CSR_BROADCAST_CHANNEL);
- quadlet_t bc_remote, bc_local;
- int error;
-
- if (!ne->host->is_irm || ne->generation != generation ||
- ne->nodeid == ne->host->node_id)
- return;
-
- bc_local = cpu_to_be32(ne->host->csr.broadcast_channel);
-
- /* Check if the register is implemented and 1394a compliant. */
- error = hpsb_read(ne->host, ne->nodeid, generation, bc_addr, &bc_remote,
- sizeof(bc_remote));
- if (!error && bc_remote & cpu_to_be32(0x80000000) &&
- bc_remote != bc_local)
- hpsb_node_write(ne, bc_addr, &bc_local, sizeof(bc_local));
-}
-
-
-static void nodemgr_probe_ne(struct hpsb_host *host, struct node_entry *ne,
- int generation)
-{
- struct device *dev;
-
- if (ne->host != host || ne->in_limbo)
- return;
-
- dev = get_device(&ne->device);
- if (!dev)
- return;
-
- nodemgr_irm_write_bc(ne, generation);
-
- /* If "needs_probe", then this is either a new or changed node we
- * rescan totally. If the generation matches for an existing node
- * (one that existed prior to the bus reset) we send update calls
- * down to the drivers. Otherwise, this is a dead node and we
- * suspend it. */
- if (ne->needs_probe)
- nodemgr_process_root_directory(ne);
- else if (ne->generation == generation)
- nodemgr_update_pdrv(ne);
- else
- nodemgr_pause_ne(ne);
-
- put_device(dev);
-}
-
-struct node_probe_parameter {
- struct hpsb_host *host;
- int generation;
- bool probe_now;
-};
-
-static int node_probe(struct device *dev, void *data)
-{
- struct node_probe_parameter *p = data;
- struct node_entry *ne;
-
- if (p->generation != get_hpsb_generation(p->host))
- return -EAGAIN;
-
- ne = container_of(dev, struct node_entry, node_dev);
- if (ne->needs_probe == p->probe_now)
- nodemgr_probe_ne(p->host, ne, p->generation);
- return 0;
-}
-
-static int nodemgr_node_probe(struct hpsb_host *host, int generation)
-{
- struct node_probe_parameter p;
-
- p.host = host;
- p.generation = generation;
- /*
- * Do some processing of the nodes we've probed. This pulls them
- * into the sysfs layer if needed, and can result in processing of
- * unit-directories, or just updating the node and it's
- * unit-directories.
- *
- * Run updates before probes. Usually, updates are time-critical
- * while probes are time-consuming.
- *
- * Meanwhile, another bus reset may have happened. In this case we
- * skip everything here and let the next bus scan handle it.
- * Otherwise we may prematurely remove nodes which are still there.
- */
- p.probe_now = false;
- if (class_for_each_device(&nodemgr_ne_class, NULL, &p, node_probe) != 0)
- return 0;
-
- p.probe_now = true;
- if (class_for_each_device(&nodemgr_ne_class, NULL, &p, node_probe) != 0)
- return 0;
- /*
- * Now let's tell the bus to rescan our devices. This may seem
- * like overhead, but the driver-model core will only scan a
- * device for a driver when either the device is added, or when a
- * new driver is added. A bus reset is a good reason to rescan
- * devices that were there before. For example, an sbp2 device
- * may become available for login, if the host that held it was
- * just removed.
- */
- if (bus_rescan_devices(&ieee1394_bus_type) != 0)
- HPSB_DEBUG("bus_rescan_devices had an error");
-
- return 1;
-}
-
-static int remove_nodes_in_limbo(struct device *dev, void *data)
-{
- struct node_entry *ne;
-
- if (dev->bus != &ieee1394_bus_type)
- return 0;
-
- ne = container_of(dev, struct node_entry, device);
- if (ne->in_limbo)
- nodemgr_remove_ne(ne);
-
- return 0;
-}
-
-static void nodemgr_remove_nodes_in_limbo(struct hpsb_host *host)
-{
- device_for_each_child(&host->device, NULL, remove_nodes_in_limbo);
-}
-
-static int nodemgr_send_resume_packet(struct hpsb_host *host)
-{
- struct hpsb_packet *packet;
- int error = -ENOMEM;
-
- packet = hpsb_make_phypacket(host,
- EXTPHYPACKET_TYPE_RESUME |
- NODEID_TO_NODE(host->node_id) << PHYPACKET_PORT_SHIFT);
- if (packet) {
- packet->no_waiter = 1;
- packet->generation = get_hpsb_generation(host);
- error = hpsb_send_packet(packet);
- }
- if (error)
- HPSB_WARN("fw-host%d: Failed to broadcast resume packet",
- host->id);
- return error;
-}
-
-/* Perform a few high-level IRM responsibilities. */
-static int nodemgr_do_irm_duties(struct hpsb_host *host, int cycles)
-{
- quadlet_t bc;
-
- /* if irm_id == -1 then there is no IRM on this bus */
- if (!host->is_irm || host->irm_id == (nodeid_t)-1)
- return 1;
-
- /* We are a 1394a-2000 compliant IRM. Set the validity bit. */
- host->csr.broadcast_channel |= 0x40000000;
-
- /* If there is no bus manager then we should set the root node's
- * force_root bit to promote bus stability per the 1394
- * spec. (8.4.2.6) */
- if (host->busmgr_id == 0xffff && host->node_count > 1)
- {
- u16 root_node = host->node_count - 1;
-
- /* get cycle master capability flag from root node */
- if (host->is_cycmst ||
- (!hpsb_read(host, LOCAL_BUS | root_node, get_hpsb_generation(host),
- (CSR_REGISTER_BASE + CSR_CONFIG_ROM + 2 * sizeof(quadlet_t)),
- &bc, sizeof(quadlet_t)) &&
- be32_to_cpu(bc) & 1 << CSR_CMC_SHIFT))
- hpsb_send_phy_config(host, root_node, -1);
- else {
- HPSB_DEBUG("The root node is not cycle master capable; "
- "selecting a new root node and resetting...");
-
- if (cycles >= 5) {
- /* Oh screw it! Just leave the bus as it is */
- HPSB_DEBUG("Stopping reset loop for IRM sanity");
- return 1;
- }
-
- hpsb_send_phy_config(host, NODEID_TO_NODE(host->node_id), -1);
- hpsb_reset_bus(host, LONG_RESET_FORCE_ROOT);
-
- return 0;
- }
- }
-
- /* Some devices suspend their ports while being connected to an inactive
- * host adapter, i.e. if connected before the low-level driver is
- * loaded. They become visible either when physically unplugged and
- * replugged, or when receiving a resume packet. Send one once. */
- if (!host->resume_packet_sent && !nodemgr_send_resume_packet(host))
- host->resume_packet_sent = 1;
-
- return 1;
-}
-
-/* We need to ensure that if we are not the IRM, that the IRM node is capable of
- * everything we can do, otherwise issue a bus reset and try to become the IRM
- * ourselves. */
-static int nodemgr_check_irm_capability(struct hpsb_host *host, int cycles)
-{
- quadlet_t bc;
- int status;
-
- if (hpsb_disable_irm || host->is_irm)
- return 1;
-
- status = hpsb_read(host, LOCAL_BUS | (host->irm_id),
- get_hpsb_generation(host),
- (CSR_REGISTER_BASE | CSR_BROADCAST_CHANNEL),
- &bc, sizeof(quadlet_t));
-
- if (status < 0 || !(be32_to_cpu(bc) & 0x80000000)) {
- /* The current irm node does not have a valid BROADCAST_CHANNEL
- * register and we do, so reset the bus with force_root set */
- HPSB_DEBUG("Current remote IRM is not 1394a-2000 compliant, resetting...");
-
- if (cycles >= 5) {
- /* Oh screw it! Just leave the bus as it is */
- HPSB_DEBUG("Stopping reset loop for IRM sanity");
- return 1;
- }
-
- hpsb_send_phy_config(host, NODEID_TO_NODE(host->node_id), -1);
- hpsb_reset_bus(host, LONG_RESET_FORCE_ROOT);
-
- return 0;
- }
-
- return 1;
-}
-
-static int nodemgr_host_thread(void *data)
-{
- struct hpsb_host *host = data;
- unsigned int g, generation = 0;
- int i, reset_cycles = 0;
-
- set_freezable();
- /* Setup our device-model entries */
- nodemgr_create_host_dev_files(host);
-
- for (;;) {
- /* Sleep until next bus reset */
- set_current_state(TASK_INTERRUPTIBLE);
- if (get_hpsb_generation(host) == generation &&
- !kthread_should_stop())
- schedule();
- __set_current_state(TASK_RUNNING);
-
- /* Thread may have been woken up to freeze or to exit */
- if (try_to_freeze())
- continue;
- if (kthread_should_stop())
- goto exit;
-
- /* Pause for 1/4 second in 1/16 second intervals,
- * to make sure things settle down. */
- g = get_hpsb_generation(host);
- for (i = 0; i < 4 ; i++) {
- msleep_interruptible(63);
- try_to_freeze();
- if (kthread_should_stop())
- goto exit;
-
- /* Now get the generation in which the node ID's we collect
- * are valid. During the bus scan we will use this generation
- * for the read transactions, so that if another reset occurs
- * during the scan the transactions will fail instead of
- * returning bogus data. */
- generation = get_hpsb_generation(host);
-
- /* If we get a reset before we are done waiting, then
- * start the waiting over again */
- if (generation != g)
- g = generation, i = 0;
- }
-
- if (!nodemgr_check_irm_capability(host, reset_cycles) ||
- !nodemgr_do_irm_duties(host, reset_cycles)) {
- reset_cycles++;
- continue;
- }
- reset_cycles = 0;
-
- /* Scan our nodes to get the bus options and create node
- * entries. This does not do the sysfs stuff, since that
- * would trigger uevents and such, which is a bad idea at
- * this point. */
- nodemgr_node_scan(host, generation);
-
- /* This actually does the full probe, with sysfs
- * registration. */
- if (!nodemgr_node_probe(host, generation))
- continue;
-
- /* Update some of our sysfs symlinks */
- nodemgr_update_host_dev_links(host);
-
- /* Sleep 3 seconds */
- for (i = 3000/200; i; i--) {
- msleep_interruptible(200);
- try_to_freeze();
- if (kthread_should_stop())
- goto exit;
-
- if (generation != get_hpsb_generation(host))
- break;
- }
- /* Remove nodes which are gone, unless a bus reset happened */
- if (!i)
- nodemgr_remove_nodes_in_limbo(host);
- }
-exit:
- HPSB_VERBOSE("NodeMgr: Exiting thread");
- return 0;
-}
-
-struct per_host_parameter {
- void *data;
- int (*cb)(struct hpsb_host *, void *);
-};
-
-static int per_host(struct device *dev, void *data)
-{
- struct hpsb_host *host;
- struct per_host_parameter *p = data;
-
- host = container_of(dev, struct hpsb_host, host_dev);
- return p->cb(host, p->data);
-}
-
-/**
- * nodemgr_for_each_host - call a function for each IEEE 1394 host
- * @data: an address to supply to the callback
- * @cb: function to call for each host
- *
- * Iterate the hosts, calling a given function with supplied data for each host.
- * If the callback fails on a host, i.e. if it returns a non-zero value, the
- * iteration is stopped.
- *
- * Return value: 0 on success, non-zero on failure (same as returned by last run
- * of the callback).
- */
-int nodemgr_for_each_host(void *data, int (*cb)(struct hpsb_host *, void *))
-{
- struct per_host_parameter p;
-
- p.cb = cb;
- p.data = data;
- return class_for_each_device(&hpsb_host_class, NULL, &p, per_host);
-}
-
-/* The following two convenience functions use a struct node_entry
- * for addressing a node on the bus. They are intended for use by any
- * process context, not just the nodemgr thread, so we need to be a
- * little careful when reading out the node ID and generation. The
- * thing that can go wrong is that we get the node ID, then a bus
- * reset occurs, and then we read the generation. The node ID is
- * possibly invalid, but the generation is current, and we end up
- * sending a packet to a the wrong node.
- *
- * The solution is to make sure we read the generation first, so that
- * if a reset occurs in the process, we end up with a stale generation
- * and the transactions will fail instead of silently using wrong node
- * ID's.
- */
-
-/**
- * hpsb_node_fill_packet - fill some destination information into a packet
- * @ne: destination node
- * @packet: packet to fill in
- *
- * This will fill in the given, pre-initialised hpsb_packet with the current
- * information from the node entry (host, node ID, bus generation number).
- */
-void hpsb_node_fill_packet(struct node_entry *ne, struct hpsb_packet *packet)
-{
- packet->host = ne->host;
- packet->generation = ne->generation;
- smp_rmb();
- packet->node_id = ne->nodeid;
-}
-
-int hpsb_node_write(struct node_entry *ne, u64 addr,
- quadlet_t *buffer, size_t length)
-{
- unsigned int generation = ne->generation;
-
- smp_rmb();
- return hpsb_write(ne->host, ne->nodeid, generation,
- addr, buffer, length);
-}
-
-static void nodemgr_add_host(struct hpsb_host *host)
-{
- struct host_info *hi;
-
- hi = hpsb_create_hostinfo(&nodemgr_highlevel, host, sizeof(*hi));
- if (!hi) {
- HPSB_ERR("NodeMgr: out of memory in add host");
- return;
- }
- hi->host = host;
- hi->thread = kthread_run(nodemgr_host_thread, host, "knodemgrd_%d",
- host->id);
- if (IS_ERR(hi->thread)) {
- HPSB_ERR("NodeMgr: cannot start thread for host %d", host->id);
- hpsb_destroy_hostinfo(&nodemgr_highlevel, host);
- }
-}
-
-static void nodemgr_host_reset(struct hpsb_host *host)
-{
- struct host_info *hi = hpsb_get_hostinfo(&nodemgr_highlevel, host);
-
- if (hi) {
- HPSB_VERBOSE("NodeMgr: Processing reset for host %d", host->id);
- wake_up_process(hi->thread);
- }
-}
-
-static void nodemgr_remove_host(struct hpsb_host *host)
-{
- struct host_info *hi = hpsb_get_hostinfo(&nodemgr_highlevel, host);
-
- if (hi) {
- kthread_stop(hi->thread);
- nodemgr_remove_host_dev(&host->device);
- }
-}
-
-static struct hpsb_highlevel nodemgr_highlevel = {
- .name = "Node manager",
- .add_host = nodemgr_add_host,
- .host_reset = nodemgr_host_reset,
- .remove_host = nodemgr_remove_host,
-};
-
-int init_ieee1394_nodemgr(void)
-{
- int error;
-
- error = class_register(&nodemgr_ne_class);
- if (error)
- goto fail_ne;
- error = class_register(&nodemgr_ud_class);
- if (error)
- goto fail_ud;
- error = driver_register(&nodemgr_mid_layer_driver);
- if (error)
- goto fail_ml;
- /* This driver is not used if nodemgr is off (disable_nodemgr=1). */
- nodemgr_dev_template_host.driver = &nodemgr_mid_layer_driver;
-
- hpsb_register_highlevel(&nodemgr_highlevel);
- return 0;
-
-fail_ml:
- class_unregister(&nodemgr_ud_class);
-fail_ud:
- class_unregister(&nodemgr_ne_class);
-fail_ne:
- return error;
-}
-
-void cleanup_ieee1394_nodemgr(void)
-{
- hpsb_unregister_highlevel(&nodemgr_highlevel);
- driver_unregister(&nodemgr_mid_layer_driver);
- class_unregister(&nodemgr_ud_class);
- class_unregister(&nodemgr_ne_class);
-}
diff --git a/drivers/ieee1394/nodemgr.h b/drivers/ieee1394/nodemgr.h
deleted file mode 100644
index 749b271d3107..000000000000
--- a/drivers/ieee1394/nodemgr.h
+++ /dev/null
@@ -1,186 +0,0 @@
-/*
- * Copyright (C) 2000 Andreas E. Bombe
- * 2001 Ben Collins <bcollins@debian.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software Foundation,
- * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
- */
-
-#ifndef _IEEE1394_NODEMGR_H
-#define _IEEE1394_NODEMGR_H
-
-#include <linux/device.h>
-#include <asm/system.h>
-#include <asm/types.h>
-
-#include "ieee1394_core.h"
-#include "ieee1394_transactions.h"
-#include "ieee1394_types.h"
-
-struct csr1212_csr;
-struct csr1212_keyval;
-struct hpsb_host;
-struct ieee1394_device_id;
-
-/* This is the start of a Node entry structure. It should be a stable API
- * for which to gather info from the Node Manager about devices attached
- * to the bus. */
-struct bus_options {
- u8 irmc; /* Iso Resource Manager Capable */
- u8 cmc; /* Cycle Master Capable */
- u8 isc; /* Iso Capable */
- u8 bmc; /* Bus Master Capable */
- u8 pmc; /* Power Manager Capable (PNP spec) */
- u8 cyc_clk_acc; /* Cycle clock accuracy */
- u8 max_rom; /* Maximum block read supported in the CSR */
- u8 generation; /* Incremented when configrom changes */
- u8 lnkspd; /* Link speed */
- u16 max_rec; /* Maximum packet size node can receive */
-};
-
-#define UNIT_DIRECTORY_VENDOR_ID 0x01
-#define UNIT_DIRECTORY_MODEL_ID 0x02
-#define UNIT_DIRECTORY_SPECIFIER_ID 0x04
-#define UNIT_DIRECTORY_VERSION 0x08
-#define UNIT_DIRECTORY_HAS_LUN_DIRECTORY 0x10
-#define UNIT_DIRECTORY_LUN_DIRECTORY 0x20
-#define UNIT_DIRECTORY_HAS_LUN 0x40
-
-/*
- * A unit directory corresponds to a protocol supported by the
- * node. If a node supports eg. IP/1394 and AV/C, its config rom has a
- * unit directory for each of these protocols.
- */
-struct unit_directory {
- struct node_entry *ne; /* The node which this directory belongs to */
- octlet_t address; /* Address of the unit directory on the node */
- u8 flags; /* Indicates which entries were read */
-
- quadlet_t vendor_id;
- struct csr1212_keyval *vendor_name_kv;
-
- quadlet_t model_id;
- struct csr1212_keyval *model_name_kv;
- quadlet_t specifier_id;
- quadlet_t version;
- quadlet_t directory_id;
-
- unsigned int id;
-
- int ignore_driver;
-
- int length; /* Number of quadlets */
-
- struct device device;
- struct device unit_dev;
-
- struct csr1212_keyval *ud_kv;
- u32 lun; /* logical unit number immediate value */
-};
-
-struct node_entry {
- u64 guid; /* GUID of this node */
- u32 guid_vendor_id; /* Top 24bits of guid */
-
- struct hpsb_host *host; /* Host this node is attached to */
- nodeid_t nodeid; /* NodeID */
- struct bus_options busopt; /* Bus Options */
- bool needs_probe;
- unsigned int generation; /* Synced with hpsb generation */
-
- /* The following is read from the config rom */
- u32 vendor_id;
- struct csr1212_keyval *vendor_name_kv;
-
- u32 capabilities;
-
- struct device device;
- struct device node_dev;
-
- /* Means this node is not attached anymore */
- bool in_limbo;
-
- struct csr1212_csr *csr;
-};
-
-struct hpsb_protocol_driver {
- /* The name of the driver, e.g. SBP2 or IP1394 */
- const char *name;
-
- /*
- * The device id table describing the protocols and/or devices
- * supported by this driver. This is used by the nodemgr to
- * decide if a driver could support a given node, but the
- * probe function below can implement further protocol
- * dependent or vendor dependent checking.
- */
- const struct ieee1394_device_id *id_table;
-
- /*
- * The update function is called when the node has just
- * survived a bus reset, i.e. it is still present on the bus.
- * However, it may be necessary to reestablish the connection
- * or login into the node again, depending on the protocol. If the
- * probe fails (returns non-zero), we unbind the driver from this
- * device.
- */
- int (*update)(struct unit_directory *ud);
-
- /* Our LDM structure */
- struct device_driver driver;
-};
-
-int __hpsb_register_protocol(struct hpsb_protocol_driver *, struct module *);
-static inline int hpsb_register_protocol(struct hpsb_protocol_driver *driver)
-{
- return __hpsb_register_protocol(driver, THIS_MODULE);
-}
-
-void hpsb_unregister_protocol(struct hpsb_protocol_driver *driver);
-
-static inline int hpsb_node_entry_valid(struct node_entry *ne)
-{
- return ne->generation == get_hpsb_generation(ne->host);
-}
-void hpsb_node_fill_packet(struct node_entry *ne, struct hpsb_packet *packet);
-int hpsb_node_write(struct node_entry *ne, u64 addr,
- quadlet_t *buffer, size_t length);
-static inline int hpsb_node_read(struct node_entry *ne, u64 addr,
- quadlet_t *buffer, size_t length)
-{
- unsigned int g = ne->generation;
-
- smp_rmb();
- return hpsb_read(ne->host, ne->nodeid, g, addr, buffer, length);
-}
-static inline int hpsb_node_lock(struct node_entry *ne, u64 addr, int extcode,
- quadlet_t *buffer, quadlet_t arg)
-{
- unsigned int g = ne->generation;
-
- smp_rmb();
- return hpsb_lock(ne->host, ne->nodeid, g, addr, extcode, buffer, arg);
-}
-int nodemgr_for_each_host(void *data, int (*cb)(struct hpsb_host *, void *));
-
-int init_ieee1394_nodemgr(void);
-void cleanup_ieee1394_nodemgr(void);
-
-/* The template for a host device */
-extern struct device nodemgr_dev_template_host;
-
-/* Bus attributes we export */
-extern struct bus_attribute *const fw_bus_attrs[];
-
-#endif /* _IEEE1394_NODEMGR_H */
diff --git a/drivers/ieee1394/ohci1394.c b/drivers/ieee1394/ohci1394.c
deleted file mode 100644
index 50815022cff1..000000000000
--- a/drivers/ieee1394/ohci1394.c
+++ /dev/null
@@ -1,3590 +0,0 @@
-/*
- * ohci1394.c - driver for OHCI 1394 boards
- * Copyright (C)1999,2000 Sebastien Rougeaux <sebastien.rougeaux@anu.edu.au>
- * Gord Peters <GordPeters@smarttech.com>
- * 2001 Ben Collins <bcollins@debian.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software Foundation,
- * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
- */
-
-/*
- * Things known to be working:
- * . Async Request Transmit
- * . Async Response Receive
- * . Async Request Receive
- * . Async Response Transmit
- * . Iso Receive
- * . DMA mmap for iso receive
- * . Config ROM generation
- *
- * Things implemented, but still in test phase:
- * . Iso Transmit
- * . Async Stream Packets Transmit (Receive done via Iso interface)
- *
- * Things not implemented:
- * . DMA error recovery
- *
- * Known bugs:
- * . devctl BUS_RESET arg confusion (reset type or root holdoff?)
- * added LONG_RESET_ROOT and SHORT_RESET_ROOT for root holdoff --kk
- */
-
-/*
- * Acknowledgments:
- *
- * Adam J Richter <adam@yggdrasil.com>
- * . Use of pci_class to find device
- *
- * Emilie Chung <emilie.chung@axis.com>
- * . Tip on Async Request Filter
- *
- * Pascal Drolet <pascal.drolet@informission.ca>
- * . Various tips for optimization and functionnalities
- *
- * Robert Ficklin <rficklin@westengineering.com>
- * . Loop in irq_handler
- *
- * James Goodwin <jamesg@Filanet.com>
- * . Various tips on initialization, self-id reception, etc.
- *
- * Albrecht Dress <ad@mpifr-bonn.mpg.de>
- * . Apple PowerBook detection
- *
- * Daniel Kobras <daniel.kobras@student.uni-tuebingen.de>
- * . Reset the board properly before leaving + misc cleanups
- *
- * Leon van Stuivenberg <leonvs@iae.nl>
- * . Bug fixes
- *
- * Ben Collins <bcollins@debian.org>
- * . Working big-endian support
- * . Updated to 2.4.x module scheme (PCI aswell)
- * . Config ROM generation
- *
- * Manfred Weihs <weihs@ict.tuwien.ac.at>
- * . Reworked code for initiating bus resets
- * (long, short, with or without hold-off)
- *
- * Nandu Santhi <contactnandu@users.sourceforge.net>
- * . Added support for nVidia nForce2 onboard Firewire chipset
- *
- */
-
-#include <linux/bitops.h>
-#include <linux/kernel.h>
-#include <linux/list.h>
-#include <linux/slab.h>
-#include <linux/interrupt.h>
-#include <linux/wait.h>
-#include <linux/errno.h>
-#include <linux/module.h>
-#include <linux/moduleparam.h>
-#include <linux/pci.h>
-#include <linux/fs.h>
-#include <linux/poll.h>
-#include <asm/byteorder.h>
-#include <asm/atomic.h>
-#include <asm/uaccess.h>
-#include <linux/delay.h>
-#include <linux/spinlock.h>
-
-#include <asm/pgtable.h>
-#include <asm/page.h>
-#include <asm/irq.h>
-#include <linux/types.h>
-#include <linux/vmalloc.h>
-#include <linux/init.h>
-
-#ifdef CONFIG_PPC_PMAC
-#include <asm/machdep.h>
-#include <asm/pmac_feature.h>
-#include <asm/prom.h>
-#include <asm/pci-bridge.h>
-#endif
-
-#include "csr1212.h"
-#include "ieee1394.h"
-#include "ieee1394_types.h"
-#include "hosts.h"
-#include "dma.h"
-#include "iso.h"
-#include "ieee1394_core.h"
-#include "highlevel.h"
-#include "ohci1394.h"
-
-#ifdef CONFIG_IEEE1394_VERBOSEDEBUG
-#define OHCI1394_DEBUG
-#endif
-
-#ifdef DBGMSG
-#undef DBGMSG
-#endif
-
-#ifdef OHCI1394_DEBUG
-#define DBGMSG(fmt, args...) \
-printk(KERN_INFO "%s: fw-host%d: " fmt "\n" , OHCI1394_DRIVER_NAME, ohci->host->id , ## args)
-#else
-#define DBGMSG(fmt, args...) do {} while (0)
-#endif
-
-/* print general (card independent) information */
-#define PRINT_G(level, fmt, args...) \
-printk(level "%s: " fmt "\n" , OHCI1394_DRIVER_NAME , ## args)
-
-/* print card specific information */
-#define PRINT(level, fmt, args...) \
-printk(level "%s: fw-host%d: " fmt "\n" , OHCI1394_DRIVER_NAME, ohci->host->id , ## args)
-
-/* Module Parameters */
-static int phys_dma = 1;
-module_param(phys_dma, int, 0444);
-MODULE_PARM_DESC(phys_dma, "Enable physical DMA (default = 1).");
-
-static void dma_trm_tasklet(unsigned long data);
-static void dma_trm_reset(struct dma_trm_ctx *d);
-
-static int alloc_dma_rcv_ctx(struct ti_ohci *ohci, struct dma_rcv_ctx *d,
- enum context_type type, int ctx, int num_desc,
- int buf_size, int split_buf_size, int context_base);
-static void free_dma_rcv_ctx(struct dma_rcv_ctx *d);
-
-static int alloc_dma_trm_ctx(struct ti_ohci *ohci, struct dma_trm_ctx *d,
- enum context_type type, int ctx, int num_desc,
- int context_base);
-
-static void ohci1394_pci_remove(struct pci_dev *pdev);
-
-#ifndef __LITTLE_ENDIAN
-static const size_t hdr_sizes[] = {
- 3, /* TCODE_WRITEQ */
- 4, /* TCODE_WRITEB */
- 3, /* TCODE_WRITE_RESPONSE */
- 0, /* reserved */
- 3, /* TCODE_READQ */
- 4, /* TCODE_READB */
- 3, /* TCODE_READQ_RESPONSE */
- 4, /* TCODE_READB_RESPONSE */
- 1, /* TCODE_CYCLE_START */
- 4, /* TCODE_LOCK_REQUEST */
- 2, /* TCODE_ISO_DATA */
- 4, /* TCODE_LOCK_RESPONSE */
- /* rest is reserved or link-internal */
-};
-
-static inline void header_le32_to_cpu(quadlet_t *data, unsigned char tcode)
-{
- size_t size;
-
- if (unlikely(tcode >= ARRAY_SIZE(hdr_sizes)))
- return;
-
- size = hdr_sizes[tcode];
- while (size--)
- data[size] = le32_to_cpu(data[size]);
-}
-#else
-#define header_le32_to_cpu(w,x) do {} while (0)
-#endif /* !LITTLE_ENDIAN */
-
-/***********************************
- * IEEE-1394 functionality section *
- ***********************************/
-
-static u8 get_phy_reg(struct ti_ohci *ohci, u8 addr)
-{
- int i;
- unsigned long flags;
- quadlet_t r;
-
- spin_lock_irqsave (&ohci->phy_reg_lock, flags);
-
- reg_write(ohci, OHCI1394_PhyControl, (addr << 8) | 0x00008000);
-
- for (i = 0; i < OHCI_LOOP_COUNT; i++) {
- if (reg_read(ohci, OHCI1394_PhyControl) & 0x80000000)
- break;
-
- mdelay(1);
- }
-
- r = reg_read(ohci, OHCI1394_PhyControl);
-
- if (i >= OHCI_LOOP_COUNT)
- PRINT (KERN_ERR, "Get PHY Reg timeout [0x%08x/0x%08x/%d]",
- r, r & 0x80000000, i);
-
- spin_unlock_irqrestore (&ohci->phy_reg_lock, flags);
-
- return (r & 0x00ff0000) >> 16;
-}
-
-static void set_phy_reg(struct ti_ohci *ohci, u8 addr, u8 data)
-{
- int i;
- unsigned long flags;
- u32 r = 0;
-
- spin_lock_irqsave (&ohci->phy_reg_lock, flags);
-
- reg_write(ohci, OHCI1394_PhyControl, (addr << 8) | data | 0x00004000);
-
- for (i = 0; i < OHCI_LOOP_COUNT; i++) {
- r = reg_read(ohci, OHCI1394_PhyControl);
- if (!(r & 0x00004000))
- break;
-
- mdelay(1);
- }
-
- if (i == OHCI_LOOP_COUNT)
- PRINT (KERN_ERR, "Set PHY Reg timeout [0x%08x/0x%08x/%d]",
- r, r & 0x00004000, i);
-
- spin_unlock_irqrestore (&ohci->phy_reg_lock, flags);
-
- return;
-}
-
-/* Or's our value into the current value */
-static void set_phy_reg_mask(struct ti_ohci *ohci, u8 addr, u8 data)
-{
- u8 old;
-
- old = get_phy_reg (ohci, addr);
- old |= data;
- set_phy_reg (ohci, addr, old);
-
- return;
-}
-
-static void handle_selfid(struct ti_ohci *ohci, struct hpsb_host *host,
- int phyid, int isroot)
-{
- quadlet_t *q = ohci->selfid_buf_cpu;
- quadlet_t self_id_count=reg_read(ohci, OHCI1394_SelfIDCount);
- size_t size;
- quadlet_t q0, q1;
-
- /* Check status of self-id reception */
-
- if (ohci->selfid_swap)
- q0 = le32_to_cpu(q[0]);
- else
- q0 = q[0];
-
- if ((self_id_count & 0x80000000) ||
- ((self_id_count & 0x00FF0000) != (q0 & 0x00FF0000))) {
- PRINT(KERN_ERR,
- "Error in reception of SelfID packets [0x%08x/0x%08x] (count: %d)",
- self_id_count, q0, ohci->self_id_errors);
-
- /* Tip by James Goodwin <jamesg@Filanet.com>:
- * We had an error, generate another bus reset in response. */
- if (ohci->self_id_errors<OHCI1394_MAX_SELF_ID_ERRORS) {
- set_phy_reg_mask (ohci, 1, 0x40);
- ohci->self_id_errors++;
- } else {
- PRINT(KERN_ERR,
- "Too many errors on SelfID error reception, giving up!");
- }
- return;
- }
-
- /* SelfID Ok, reset error counter. */
- ohci->self_id_errors = 0;
-
- size = ((self_id_count & 0x00001FFC) >> 2) - 1;
- q++;
-
- while (size > 0) {
- if (ohci->selfid_swap) {
- q0 = le32_to_cpu(q[0]);
- q1 = le32_to_cpu(q[1]);
- } else {
- q0 = q[0];
- q1 = q[1];
- }
-
- if (q0 == ~q1) {
- DBGMSG ("SelfID packet 0x%x received", q0);
- hpsb_selfid_received(host, cpu_to_be32(q0));
- if (((q0 & 0x3f000000) >> 24) == phyid)
- DBGMSG ("SelfID for this node is 0x%08x", q0);
- } else {
- PRINT(KERN_ERR,
- "SelfID is inconsistent [0x%08x/0x%08x]", q0, q1);
- }
- q += 2;
- size -= 2;
- }
-
- DBGMSG("SelfID complete");
-
- return;
-}
-
-static void ohci_soft_reset(struct ti_ohci *ohci) {
- int i;
-
- reg_write(ohci, OHCI1394_HCControlSet, OHCI1394_HCControl_softReset);
-
- for (i = 0; i < OHCI_LOOP_COUNT; i++) {
- if (!(reg_read(ohci, OHCI1394_HCControlSet) & OHCI1394_HCControl_softReset))
- break;
- mdelay(1);
- }
- DBGMSG ("Soft reset finished");
-}
-
-
-/* Generate the dma receive prgs and start the context */
-static void initialize_dma_rcv_ctx(struct dma_rcv_ctx *d, int generate_irq)
-{
- struct ti_ohci *ohci = (struct ti_ohci*)(d->ohci);
- int i;
-
- ohci1394_stop_context(ohci, d->ctrlClear, NULL);
-
- for (i=0; i<d->num_desc; i++) {
- u32 c;
-
- c = DMA_CTL_INPUT_MORE | DMA_CTL_UPDATE | DMA_CTL_BRANCH;
- if (generate_irq)
- c |= DMA_CTL_IRQ;
-
- d->prg_cpu[i]->control = cpu_to_le32(c | d->buf_size);
-
- /* End of descriptor list? */
- if (i + 1 < d->num_desc) {
- d->prg_cpu[i]->branchAddress =
- cpu_to_le32((d->prg_bus[i+1] & 0xfffffff0) | 0x1);
- } else {
- d->prg_cpu[i]->branchAddress =
- cpu_to_le32((d->prg_bus[0] & 0xfffffff0));
- }
-
- d->prg_cpu[i]->address = cpu_to_le32(d->buf_bus[i]);
- d->prg_cpu[i]->status = cpu_to_le32(d->buf_size);
- }
-
- d->buf_ind = 0;
- d->buf_offset = 0;
-
- if (d->type == DMA_CTX_ISO) {
- /* Clear contextControl */
- reg_write(ohci, d->ctrlClear, 0xffffffff);
-
- /* Set bufferFill, isochHeader, multichannel for IR context */
- reg_write(ohci, d->ctrlSet, 0xd0000000);
-
- /* Set the context match register to match on all tags */
- reg_write(ohci, d->ctxtMatch, 0xf0000000);
-
- /* Clear the multi channel mask high and low registers */
- reg_write(ohci, OHCI1394_IRMultiChanMaskHiClear, 0xffffffff);
- reg_write(ohci, OHCI1394_IRMultiChanMaskLoClear, 0xffffffff);
-
- /* Set up isoRecvIntMask to generate interrupts */
- reg_write(ohci, OHCI1394_IsoRecvIntMaskSet, 1 << d->ctx);
- }
-
- /* Tell the controller where the first AR program is */
- reg_write(ohci, d->cmdPtr, d->prg_bus[0] | 0x1);
-
- /* Run context */
- reg_write(ohci, d->ctrlSet, 0x00008000);
-
- DBGMSG("Receive DMA ctx=%d initialized", d->ctx);
-}
-
-/* Initialize the dma transmit context */
-static void initialize_dma_trm_ctx(struct dma_trm_ctx *d)
-{
- struct ti_ohci *ohci = (struct ti_ohci*)(d->ohci);
-
- /* Stop the context */
- ohci1394_stop_context(ohci, d->ctrlClear, NULL);
-
- d->prg_ind = 0;
- d->sent_ind = 0;
- d->free_prgs = d->num_desc;
- d->branchAddrPtr = NULL;
- INIT_LIST_HEAD(&d->fifo_list);
- INIT_LIST_HEAD(&d->pending_list);
-
- if (d->type == DMA_CTX_ISO) {
- /* enable interrupts */
- reg_write(ohci, OHCI1394_IsoXmitIntMaskSet, 1 << d->ctx);
- }
-
- DBGMSG("Transmit DMA ctx=%d initialized", d->ctx);
-}
-
-/* Count the number of available iso contexts */
-static int get_nb_iso_ctx(struct ti_ohci *ohci, int reg)
-{
- u32 tmp;
-
- reg_write(ohci, reg, 0xffffffff);
- tmp = reg_read(ohci, reg);
-
- DBGMSG("Iso contexts reg: %08x implemented: %08x", reg, tmp);
-
- /* Count the number of contexts */
- return hweight32(tmp);
-}
-
-/* Global initialization */
-static void ohci_initialize(struct ti_ohci *ohci)
-{
- quadlet_t buf;
- int num_ports, i;
-
- spin_lock_init(&ohci->phy_reg_lock);
-
- /* Put some defaults to these undefined bus options */
- buf = reg_read(ohci, OHCI1394_BusOptions);
- buf |= 0x60000000; /* Enable CMC and ISC */
- if (hpsb_disable_irm)
- buf &= ~0x80000000;
- else
- buf |= 0x80000000; /* Enable IRMC */
- buf &= ~0x00ff0000; /* XXX: Set cyc_clk_acc to zero for now */
- buf &= ~0x18000000; /* Disable PMC and BMC */
- reg_write(ohci, OHCI1394_BusOptions, buf);
-
- /* Set the bus number */
- reg_write(ohci, OHCI1394_NodeID, 0x0000ffc0);
-
- /* Enable posted writes */
- reg_write(ohci, OHCI1394_HCControlSet, OHCI1394_HCControl_postedWriteEnable);
-
- /* Clear link control register */
- reg_write(ohci, OHCI1394_LinkControlClear, 0xffffffff);
-
- /* Enable cycle timer and cycle master and set the IRM
- * contender bit in our self ID packets if appropriate. */
- reg_write(ohci, OHCI1394_LinkControlSet,
- OHCI1394_LinkControl_CycleTimerEnable |
- OHCI1394_LinkControl_CycleMaster);
- i = get_phy_reg(ohci, 4) | PHY_04_LCTRL;
- if (hpsb_disable_irm)
- i &= ~PHY_04_CONTENDER;
- else
- i |= PHY_04_CONTENDER;
- set_phy_reg(ohci, 4, i);
-
- /* Set up self-id dma buffer */
- reg_write(ohci, OHCI1394_SelfIDBuffer, ohci->selfid_buf_bus);
-
- /* enable self-id */
- reg_write(ohci, OHCI1394_LinkControlSet, OHCI1394_LinkControl_RcvSelfID);
-
- /* Set the Config ROM mapping register */
- reg_write(ohci, OHCI1394_ConfigROMmap, ohci->csr_config_rom_bus);
-
- /* Now get our max packet size */
- ohci->max_packet_size =
- 1<<(((reg_read(ohci, OHCI1394_BusOptions)>>12)&0xf)+1);
-
- /* Clear the interrupt mask */
- reg_write(ohci, OHCI1394_IsoRecvIntMaskClear, 0xffffffff);
- reg_write(ohci, OHCI1394_IsoRecvIntEventClear, 0xffffffff);
-
- /* Clear the interrupt mask */
- reg_write(ohci, OHCI1394_IsoXmitIntMaskClear, 0xffffffff);
- reg_write(ohci, OHCI1394_IsoXmitIntEventClear, 0xffffffff);
-
- /* Initialize AR dma */
- initialize_dma_rcv_ctx(&ohci->ar_req_context, 0);
- initialize_dma_rcv_ctx(&ohci->ar_resp_context, 0);
-
- /* Initialize AT dma */
- initialize_dma_trm_ctx(&ohci->at_req_context);
- initialize_dma_trm_ctx(&ohci->at_resp_context);
-
- /* Accept AR requests from all nodes */
- reg_write(ohci, OHCI1394_AsReqFilterHiSet, 0x80000000);
-
- /* Set the address range of the physical response unit.
- * Most controllers do not implement it as a writable register though.
- * They will keep a hardwired offset of 0x00010000 and show 0x0 as
- * register content.
- * To actually enable physical responses is the job of our interrupt
- * handler which programs the physical request filter. */
- reg_write(ohci, OHCI1394_PhyUpperBound,
- OHCI1394_PHYS_UPPER_BOUND_PROGRAMMED >> 16);
-
- DBGMSG("physUpperBoundOffset=%08x",
- reg_read(ohci, OHCI1394_PhyUpperBound));
-
- /* Specify AT retries */
- reg_write(ohci, OHCI1394_ATRetries,
- OHCI1394_MAX_AT_REQ_RETRIES |
- (OHCI1394_MAX_AT_RESP_RETRIES<<4) |
- (OHCI1394_MAX_PHYS_RESP_RETRIES<<8));
-
- /* We don't want hardware swapping */
- reg_write(ohci, OHCI1394_HCControlClear, OHCI1394_HCControl_noByteSwap);
-
- /* Enable interrupts */
- reg_write(ohci, OHCI1394_IntMaskSet,
- OHCI1394_unrecoverableError |
- OHCI1394_masterIntEnable |
- OHCI1394_busReset |
- OHCI1394_selfIDComplete |
- OHCI1394_RSPkt |
- OHCI1394_RQPkt |
- OHCI1394_respTxComplete |
- OHCI1394_reqTxComplete |
- OHCI1394_isochRx |
- OHCI1394_isochTx |
- OHCI1394_postedWriteErr |
- OHCI1394_cycleTooLong |
- OHCI1394_cycleInconsistent);
-
- /* Enable link */
- reg_write(ohci, OHCI1394_HCControlSet, OHCI1394_HCControl_linkEnable);
-
- buf = reg_read(ohci, OHCI1394_Version);
- PRINT(KERN_INFO, "OHCI-1394 %d.%d (PCI): IRQ=[%d] "
- "MMIO=[%llx-%llx] Max Packet=[%d] IR/IT contexts=[%d/%d]",
- ((((buf) >> 16) & 0xf) + (((buf) >> 20) & 0xf) * 10),
- ((((buf) >> 4) & 0xf) + ((buf) & 0xf) * 10), ohci->dev->irq,
- (unsigned long long)pci_resource_start(ohci->dev, 0),
- (unsigned long long)pci_resource_start(ohci->dev, 0) + OHCI1394_REGISTER_SIZE - 1,
- ohci->max_packet_size,
- ohci->nb_iso_rcv_ctx, ohci->nb_iso_xmit_ctx);
-
- /* Check all of our ports to make sure that if anything is
- * connected, we enable that port. */
- num_ports = get_phy_reg(ohci, 2) & 0xf;
- for (i = 0; i < num_ports; i++) {
- unsigned int status;
-
- set_phy_reg(ohci, 7, i);
- status = get_phy_reg(ohci, 8);
-
- if (status & 0x20)
- set_phy_reg(ohci, 8, status & ~1);
- }
-
- /* Serial EEPROM Sanity check. */
- if ((ohci->max_packet_size < 512) ||
- (ohci->max_packet_size > 4096)) {
- /* Serial EEPROM contents are suspect, set a sane max packet
- * size and print the raw contents for bug reports if verbose
- * debug is enabled. */
-#ifdef CONFIG_IEEE1394_VERBOSEDEBUG
- int i;
-#endif
-
- PRINT(KERN_DEBUG, "Serial EEPROM has suspicious values, "
- "attempting to set max_packet_size to 512 bytes");
- reg_write(ohci, OHCI1394_BusOptions,
- (reg_read(ohci, OHCI1394_BusOptions) & 0xf007) | 0x8002);
- ohci->max_packet_size = 512;
-#ifdef CONFIG_IEEE1394_VERBOSEDEBUG
- PRINT(KERN_DEBUG, " EEPROM Present: %d",
- (reg_read(ohci, OHCI1394_Version) >> 24) & 0x1);
- reg_write(ohci, OHCI1394_GUID_ROM, 0x80000000);
-
- for (i = 0;
- ((i < 1000) &&
- (reg_read(ohci, OHCI1394_GUID_ROM) & 0x80000000)); i++)
- udelay(10);
-
- for (i = 0; i < 0x20; i++) {
- reg_write(ohci, OHCI1394_GUID_ROM, 0x02000000);
- PRINT(KERN_DEBUG, " EEPROM %02x: %02x", i,
- (reg_read(ohci, OHCI1394_GUID_ROM) >> 16) & 0xff);
- }
-#endif
- }
-}
-
-/*
- * Insert a packet in the DMA fifo and generate the DMA prg
- * FIXME: rewrite the program in order to accept packets crossing
- * page boundaries.
- * check also that a single dma descriptor doesn't cross a
- * page boundary.
- */
-static void insert_packet(struct ti_ohci *ohci,
- struct dma_trm_ctx *d, struct hpsb_packet *packet)
-{
- u32 cycleTimer;
- int idx = d->prg_ind;
-
- DBGMSG("Inserting packet for node " NODE_BUS_FMT
- ", tlabel=%d, tcode=0x%x, speed=%d",
- NODE_BUS_ARGS(ohci->host, packet->node_id), packet->tlabel,
- packet->tcode, packet->speed_code);
-
- d->prg_cpu[idx]->begin.address = 0;
- d->prg_cpu[idx]->begin.branchAddress = 0;
-
- if (d->type == DMA_CTX_ASYNC_RESP) {
- /*
- * For response packets, we need to put a timeout value in
- * the 16 lower bits of the status... let's try 1 sec timeout
- */
- cycleTimer = reg_read(ohci, OHCI1394_IsochronousCycleTimer);
- d->prg_cpu[idx]->begin.status = cpu_to_le32(
- (((((cycleTimer>>25)&0x7)+1)&0x7)<<13) |
- ((cycleTimer&0x01fff000)>>12));
-
- DBGMSG("cycleTimer: %08x timeStamp: %08x",
- cycleTimer, d->prg_cpu[idx]->begin.status);
- } else
- d->prg_cpu[idx]->begin.status = 0;
-
- if ( (packet->type == hpsb_async) || (packet->type == hpsb_raw) ) {
-
- if (packet->type == hpsb_raw) {
- d->prg_cpu[idx]->data[0] = cpu_to_le32(OHCI1394_TCODE_PHY<<4);
- d->prg_cpu[idx]->data[1] = cpu_to_le32(packet->header[0]);
- d->prg_cpu[idx]->data[2] = cpu_to_le32(packet->header[1]);
- } else {
- d->prg_cpu[idx]->data[0] = packet->speed_code<<16 |
- (packet->header[0] & 0xFFFF);
-
- if (packet->tcode == TCODE_ISO_DATA) {
- /* Sending an async stream packet */
- d->prg_cpu[idx]->data[1] = packet->header[0] & 0xFFFF0000;
- } else {
- /* Sending a normal async request or response */
- d->prg_cpu[idx]->data[1] =
- (packet->header[1] & 0xFFFF) |
- (packet->header[0] & 0xFFFF0000);
- d->prg_cpu[idx]->data[2] = packet->header[2];
- d->prg_cpu[idx]->data[3] = packet->header[3];
- }
- header_le32_to_cpu(d->prg_cpu[idx]->data, packet->tcode);
- }
-
- if (packet->data_size) { /* block transmit */
- if (packet->tcode == TCODE_STREAM_DATA){
- d->prg_cpu[idx]->begin.control =
- cpu_to_le32(DMA_CTL_OUTPUT_MORE |
- DMA_CTL_IMMEDIATE | 0x8);
- } else {
- d->prg_cpu[idx]->begin.control =
- cpu_to_le32(DMA_CTL_OUTPUT_MORE |
- DMA_CTL_IMMEDIATE | 0x10);
- }
- d->prg_cpu[idx]->end.control =
- cpu_to_le32(DMA_CTL_OUTPUT_LAST |
- DMA_CTL_IRQ |
- DMA_CTL_BRANCH |
- packet->data_size);
- /*
- * Check that the packet data buffer
- * does not cross a page boundary.
- *
- * XXX Fix this some day. eth1394 seems to trigger
- * it, but ignoring it doesn't seem to cause a
- * problem.
- */
-#if 0
- if (cross_bound((unsigned long)packet->data,
- packet->data_size)>0) {
- /* FIXME: do something about it */
- PRINT(KERN_ERR,
- "%s: packet data addr: %p size %Zd bytes "
- "cross page boundary", __func__,
- packet->data, packet->data_size);
- }
-#endif
- d->prg_cpu[idx]->end.address = cpu_to_le32(
- pci_map_single(ohci->dev, packet->data,
- packet->data_size,
- PCI_DMA_TODEVICE));
-
- d->prg_cpu[idx]->end.branchAddress = 0;
- d->prg_cpu[idx]->end.status = 0;
- if (d->branchAddrPtr)
- *(d->branchAddrPtr) =
- cpu_to_le32(d->prg_bus[idx] | 0x3);
- d->branchAddrPtr =
- &(d->prg_cpu[idx]->end.branchAddress);
- } else { /* quadlet transmit */
- if (packet->type == hpsb_raw)
- d->prg_cpu[idx]->begin.control =
- cpu_to_le32(DMA_CTL_OUTPUT_LAST |
- DMA_CTL_IMMEDIATE |
- DMA_CTL_IRQ |
- DMA_CTL_BRANCH |
- (packet->header_size + 4));
- else
- d->prg_cpu[idx]->begin.control =
- cpu_to_le32(DMA_CTL_OUTPUT_LAST |
- DMA_CTL_IMMEDIATE |
- DMA_CTL_IRQ |
- DMA_CTL_BRANCH |
- packet->header_size);
-
- if (d->branchAddrPtr)
- *(d->branchAddrPtr) =
- cpu_to_le32(d->prg_bus[idx] | 0x2);
- d->branchAddrPtr =
- &(d->prg_cpu[idx]->begin.branchAddress);
- }
-
- } else { /* iso packet */
- d->prg_cpu[idx]->data[0] = packet->speed_code<<16 |
- (packet->header[0] & 0xFFFF);
- d->prg_cpu[idx]->data[1] = packet->header[0] & 0xFFFF0000;
- header_le32_to_cpu(d->prg_cpu[idx]->data, packet->tcode);
-
- d->prg_cpu[idx]->begin.control =
- cpu_to_le32(DMA_CTL_OUTPUT_MORE |
- DMA_CTL_IMMEDIATE | 0x8);
- d->prg_cpu[idx]->end.control =
- cpu_to_le32(DMA_CTL_OUTPUT_LAST |
- DMA_CTL_UPDATE |
- DMA_CTL_IRQ |
- DMA_CTL_BRANCH |
- packet->data_size);
- d->prg_cpu[idx]->end.address = cpu_to_le32(
- pci_map_single(ohci->dev, packet->data,
- packet->data_size, PCI_DMA_TODEVICE));
-
- d->prg_cpu[idx]->end.branchAddress = 0;
- d->prg_cpu[idx]->end.status = 0;
- DBGMSG("Iso xmit context info: header[%08x %08x]\n"
- " begin=%08x %08x %08x %08x\n"
- " %08x %08x %08x %08x\n"
- " end =%08x %08x %08x %08x",
- d->prg_cpu[idx]->data[0], d->prg_cpu[idx]->data[1],
- d->prg_cpu[idx]->begin.control,
- d->prg_cpu[idx]->begin.address,
- d->prg_cpu[idx]->begin.branchAddress,
- d->prg_cpu[idx]->begin.status,
- d->prg_cpu[idx]->data[0],
- d->prg_cpu[idx]->data[1],
- d->prg_cpu[idx]->data[2],
- d->prg_cpu[idx]->data[3],
- d->prg_cpu[idx]->end.control,
- d->prg_cpu[idx]->end.address,
- d->prg_cpu[idx]->end.branchAddress,
- d->prg_cpu[idx]->end.status);
- if (d->branchAddrPtr)
- *(d->branchAddrPtr) = cpu_to_le32(d->prg_bus[idx] | 0x3);
- d->branchAddrPtr = &(d->prg_cpu[idx]->end.branchAddress);
- }
- d->free_prgs--;
-
- /* queue the packet in the appropriate context queue */
- list_add_tail(&packet->driver_list, &d->fifo_list);
- d->prg_ind = (d->prg_ind + 1) % d->num_desc;
-}
-
-/*
- * This function fills the FIFO with the (eventual) pending packets
- * and runs or wakes up the DMA prg if necessary.
- *
- * The function MUST be called with the d->lock held.
- */
-static void dma_trm_flush(struct ti_ohci *ohci, struct dma_trm_ctx *d)
-{
- struct hpsb_packet *packet, *ptmp;
- int idx = d->prg_ind;
- int z = 0;
-
- /* insert the packets into the dma fifo */
- list_for_each_entry_safe(packet, ptmp, &d->pending_list, driver_list) {
- if (!d->free_prgs)
- break;
-
- /* For the first packet only */
- if (!z)
- z = (packet->data_size) ? 3 : 2;
-
- /* Insert the packet */
- list_del_init(&packet->driver_list);
- insert_packet(ohci, d, packet);
- }
-
- /* Nothing must have been done, either no free_prgs or no packets */
- if (z == 0)
- return;
-
- /* Is the context running ? (should be unless it is
- the first packet to be sent in this context) */
- if (!(reg_read(ohci, d->ctrlSet) & 0x8000)) {
- u32 nodeId = reg_read(ohci, OHCI1394_NodeID);
-
- DBGMSG("Starting transmit DMA ctx=%d",d->ctx);
- reg_write(ohci, d->cmdPtr, d->prg_bus[idx] | z);
-
- /* Check that the node id is valid, and not 63 */
- if (!(nodeId & 0x80000000) || (nodeId & 0x3f) == 63)
- PRINT(KERN_ERR, "Running dma failed because Node ID is not valid");
- else
- reg_write(ohci, d->ctrlSet, 0x8000);
- } else {
- /* Wake up the dma context if necessary */
- if (!(reg_read(ohci, d->ctrlSet) & 0x400))
- DBGMSG("Waking transmit DMA ctx=%d",d->ctx);
-
- /* do this always, to avoid race condition */
- reg_write(ohci, d->ctrlSet, 0x1000);
- }
-
- return;
-}
-
-/* Transmission of an async or iso packet */
-static int ohci_transmit(struct hpsb_host *host, struct hpsb_packet *packet)
-{
- struct ti_ohci *ohci = host->hostdata;
- struct dma_trm_ctx *d;
- unsigned long flags;
-
- if (packet->data_size > ohci->max_packet_size) {
- PRINT(KERN_ERR,
- "Transmit packet size %Zd is too big",
- packet->data_size);
- return -EOVERFLOW;
- }
-
- if (packet->type == hpsb_raw)
- d = &ohci->at_req_context;
- else if ((packet->tcode & 0x02) && (packet->tcode != TCODE_ISO_DATA))
- d = &ohci->at_resp_context;
- else
- d = &ohci->at_req_context;
-
- spin_lock_irqsave(&d->lock,flags);
-
- list_add_tail(&packet->driver_list, &d->pending_list);
-
- dma_trm_flush(ohci, d);
-
- spin_unlock_irqrestore(&d->lock,flags);
-
- return 0;
-}
-
-static int ohci_devctl(struct hpsb_host *host, enum devctl_cmd cmd, int arg)
-{
- struct ti_ohci *ohci = host->hostdata;
- int retval = 0, phy_reg;
-
- switch (cmd) {
- case RESET_BUS:
- switch (arg) {
- case SHORT_RESET:
- phy_reg = get_phy_reg(ohci, 5);
- phy_reg |= 0x40;
- set_phy_reg(ohci, 5, phy_reg); /* set ISBR */
- break;
- case LONG_RESET:
- phy_reg = get_phy_reg(ohci, 1);
- phy_reg |= 0x40;
- set_phy_reg(ohci, 1, phy_reg); /* set IBR */
- break;
- case SHORT_RESET_NO_FORCE_ROOT:
- phy_reg = get_phy_reg(ohci, 1);
- if (phy_reg & 0x80) {
- phy_reg &= ~0x80;
- set_phy_reg(ohci, 1, phy_reg); /* clear RHB */
- }
-
- phy_reg = get_phy_reg(ohci, 5);
- phy_reg |= 0x40;
- set_phy_reg(ohci, 5, phy_reg); /* set ISBR */
- break;
- case LONG_RESET_NO_FORCE_ROOT:
- phy_reg = get_phy_reg(ohci, 1);
- phy_reg &= ~0x80;
- phy_reg |= 0x40;
- set_phy_reg(ohci, 1, phy_reg); /* clear RHB, set IBR */
- break;
- case SHORT_RESET_FORCE_ROOT:
- phy_reg = get_phy_reg(ohci, 1);
- if (!(phy_reg & 0x80)) {
- phy_reg |= 0x80;
- set_phy_reg(ohci, 1, phy_reg); /* set RHB */
- }
-
- phy_reg = get_phy_reg(ohci, 5);
- phy_reg |= 0x40;
- set_phy_reg(ohci, 5, phy_reg); /* set ISBR */
- break;
- case LONG_RESET_FORCE_ROOT:
- phy_reg = get_phy_reg(ohci, 1);
- phy_reg |= 0xc0;
- set_phy_reg(ohci, 1, phy_reg); /* set RHB and IBR */
- break;
- default:
- retval = -1;
- }
- break;
-
- case GET_CYCLE_COUNTER:
- retval = reg_read(ohci, OHCI1394_IsochronousCycleTimer);
- break;
-
- case SET_CYCLE_COUNTER:
- reg_write(ohci, OHCI1394_IsochronousCycleTimer, arg);
- break;
-
- case SET_BUS_ID:
- PRINT(KERN_ERR, "devctl command SET_BUS_ID err");
- break;
-
- case ACT_CYCLE_MASTER:
- if (arg) {
- /* check if we are root and other nodes are present */
- u32 nodeId = reg_read(ohci, OHCI1394_NodeID);
- if ((nodeId & (1<<30)) && (nodeId & 0x3f)) {
- /*
- * enable cycleTimer, cycleMaster
- */
- DBGMSG("Cycle master enabled");
- reg_write(ohci, OHCI1394_LinkControlSet,
- OHCI1394_LinkControl_CycleTimerEnable |
- OHCI1394_LinkControl_CycleMaster);
- }
- } else {
- /* disable cycleTimer, cycleMaster, cycleSource */
- reg_write(ohci, OHCI1394_LinkControlClear,
- OHCI1394_LinkControl_CycleTimerEnable |
- OHCI1394_LinkControl_CycleMaster |
- OHCI1394_LinkControl_CycleSource);
- }
- break;
-
- case CANCEL_REQUESTS:
- DBGMSG("Cancel request received");
- dma_trm_reset(&ohci->at_req_context);
- dma_trm_reset(&ohci->at_resp_context);
- break;
-
- default:
- PRINT_G(KERN_ERR, "ohci_devctl cmd %d not implemented yet",
- cmd);
- break;
- }
- return retval;
-}
-
-/***********************************
- * rawiso ISO reception *
- ***********************************/
-
-/*
- We use either buffer-fill or packet-per-buffer DMA mode. The DMA
- buffer is split into "blocks" (regions described by one DMA
- descriptor). Each block must be one page or less in size, and
- must not cross a page boundary.
-
- There is one little wrinkle with buffer-fill mode: a packet that
- starts in the final block may wrap around into the first block. But
- the user API expects all packets to be contiguous. Our solution is
- to keep the very last page of the DMA buffer in reserve - if a
- packet spans the gap, we copy its tail into this page.
-*/
-
-struct ohci_iso_recv {
- struct ti_ohci *ohci;
-
- struct ohci1394_iso_tasklet task;
- int task_active;
-
- enum { BUFFER_FILL_MODE = 0,
- PACKET_PER_BUFFER_MODE = 1 } dma_mode;
-
- /* memory and PCI mapping for the DMA descriptors */
- struct dma_prog_region prog;
- struct dma_cmd *block; /* = (struct dma_cmd*) prog.virt */
-
- /* how many DMA blocks fit in the buffer */
- unsigned int nblocks;
-
- /* stride of DMA blocks */
- unsigned int buf_stride;
-
- /* number of blocks to batch between interrupts */
- int block_irq_interval;
-
- /* block that DMA will finish next */
- int block_dma;
-
- /* (buffer-fill only) block that the reader will release next */
- int block_reader;
-
- /* (buffer-fill only) bytes of buffer the reader has released,
- less than one block */
- int released_bytes;
-
- /* (buffer-fill only) buffer offset at which the next packet will appear */
- int dma_offset;
-
- /* OHCI DMA context control registers */
- u32 ContextControlSet;
- u32 ContextControlClear;
- u32 CommandPtr;
- u32 ContextMatch;
-};
-
-static void ohci_iso_recv_task(unsigned long data);
-static void ohci_iso_recv_stop(struct hpsb_iso *iso);
-static void ohci_iso_recv_shutdown(struct hpsb_iso *iso);
-static int ohci_iso_recv_start(struct hpsb_iso *iso, int cycle, int tag_mask, int sync);
-static void ohci_iso_recv_program(struct hpsb_iso *iso);
-
-static int ohci_iso_recv_init(struct hpsb_iso *iso)
-{
- struct ti_ohci *ohci = iso->host->hostdata;
- struct ohci_iso_recv *recv;
- int ctx;
- int ret = -ENOMEM;
-
- recv = kmalloc(sizeof(*recv), GFP_KERNEL);
- if (!recv)
- return -ENOMEM;
-
- iso->hostdata = recv;
- recv->ohci = ohci;
- recv->task_active = 0;
- dma_prog_region_init(&recv->prog);
- recv->block = NULL;
-
- /* use buffer-fill mode, unless irq_interval is 1
- (note: multichannel requires buffer-fill) */
-
- if (((iso->irq_interval == 1 && iso->dma_mode == HPSB_ISO_DMA_OLD_ABI) ||
- iso->dma_mode == HPSB_ISO_DMA_PACKET_PER_BUFFER) && iso->channel != -1) {
- recv->dma_mode = PACKET_PER_BUFFER_MODE;
- } else {
- recv->dma_mode = BUFFER_FILL_MODE;
- }
-
- /* set nblocks, buf_stride, block_irq_interval */
-
- if (recv->dma_mode == BUFFER_FILL_MODE) {
- recv->buf_stride = PAGE_SIZE;
-
- /* one block per page of data in the DMA buffer, minus the final guard page */
- recv->nblocks = iso->buf_size/PAGE_SIZE - 1;
- if (recv->nblocks < 3) {
- DBGMSG("ohci_iso_recv_init: DMA buffer too small");
- goto err;
- }
-
- /* iso->irq_interval is in packets - translate that to blocks */
- if (iso->irq_interval == 1)
- recv->block_irq_interval = 1;
- else
- recv->block_irq_interval = iso->irq_interval *
- ((recv->nblocks+1)/iso->buf_packets);
- if (recv->block_irq_interval*4 > recv->nblocks)
- recv->block_irq_interval = recv->nblocks/4;
- if (recv->block_irq_interval < 1)
- recv->block_irq_interval = 1;
-
- } else {
- int max_packet_size;
-
- recv->nblocks = iso->buf_packets;
- recv->block_irq_interval = iso->irq_interval;
- if (recv->block_irq_interval * 4 > iso->buf_packets)
- recv->block_irq_interval = iso->buf_packets / 4;
- if (recv->block_irq_interval < 1)
- recv->block_irq_interval = 1;
-
- /* choose a buffer stride */
- /* must be a power of 2, and <= PAGE_SIZE */
-
- max_packet_size = iso->buf_size / iso->buf_packets;
-
- for (recv->buf_stride = 8; recv->buf_stride < max_packet_size;
- recv->buf_stride *= 2);
-
- if (recv->buf_stride*iso->buf_packets > iso->buf_size ||
- recv->buf_stride > PAGE_SIZE) {
- /* this shouldn't happen, but anyway... */
- DBGMSG("ohci_iso_recv_init: problem choosing a buffer stride");
- goto err;
- }
- }
-
- recv->block_reader = 0;
- recv->released_bytes = 0;
- recv->block_dma = 0;
- recv->dma_offset = 0;
-
- /* size of DMA program = one descriptor per block */
- if (dma_prog_region_alloc(&recv->prog,
- sizeof(struct dma_cmd) * recv->nblocks,
- recv->ohci->dev))
- goto err;
-
- recv->block = (struct dma_cmd*) recv->prog.kvirt;
-
- ohci1394_init_iso_tasklet(&recv->task,
- iso->channel == -1 ? OHCI_ISO_MULTICHANNEL_RECEIVE :
- OHCI_ISO_RECEIVE,
- ohci_iso_recv_task, (unsigned long) iso);
-
- if (ohci1394_register_iso_tasklet(recv->ohci, &recv->task) < 0) {
- ret = -EBUSY;
- goto err;
- }
-
- recv->task_active = 1;
-
- /* recv context registers are spaced 32 bytes apart */
- ctx = recv->task.context;
- recv->ContextControlSet = OHCI1394_IsoRcvContextControlSet + 32 * ctx;
- recv->ContextControlClear = OHCI1394_IsoRcvContextControlClear + 32 * ctx;
- recv->CommandPtr = OHCI1394_IsoRcvCommandPtr + 32 * ctx;
- recv->ContextMatch = OHCI1394_IsoRcvContextMatch + 32 * ctx;
-
- if (iso->channel == -1) {
- /* clear multi-channel selection mask */
- reg_write(recv->ohci, OHCI1394_IRMultiChanMaskHiClear, 0xFFFFFFFF);
- reg_write(recv->ohci, OHCI1394_IRMultiChanMaskLoClear, 0xFFFFFFFF);
- }
-
- /* write the DMA program */
- ohci_iso_recv_program(iso);
-
- DBGMSG("ohci_iso_recv_init: %s mode, DMA buffer is %lu pages"
- " (%u bytes), using %u blocks, buf_stride %u, block_irq_interval %d",
- recv->dma_mode == BUFFER_FILL_MODE ?
- "buffer-fill" : "packet-per-buffer",
- iso->buf_size/PAGE_SIZE, iso->buf_size,
- recv->nblocks, recv->buf_stride, recv->block_irq_interval);
-
- return 0;
-
-err:
- ohci_iso_recv_shutdown(iso);
- return ret;
-}
-
-static void ohci_iso_recv_stop(struct hpsb_iso *iso)
-{
- struct ohci_iso_recv *recv = iso->hostdata;
-
- /* disable interrupts */
- reg_write(recv->ohci, OHCI1394_IsoRecvIntMaskClear, 1 << recv->task.context);
-
- /* halt DMA */
- ohci1394_stop_context(recv->ohci, recv->ContextControlClear, NULL);
-}
-
-static void ohci_iso_recv_shutdown(struct hpsb_iso *iso)
-{
- struct ohci_iso_recv *recv = iso->hostdata;
-
- if (recv->task_active) {
- ohci_iso_recv_stop(iso);
- ohci1394_unregister_iso_tasklet(recv->ohci, &recv->task);
- recv->task_active = 0;
- }
-
- dma_prog_region_free(&recv->prog);
- kfree(recv);
- iso->hostdata = NULL;
-}
-
-/* set up a "gapped" ring buffer DMA program */
-static void ohci_iso_recv_program(struct hpsb_iso *iso)
-{
- struct ohci_iso_recv *recv = iso->hostdata;
- int blk;
-
- /* address of 'branch' field in previous DMA descriptor */
- u32 *prev_branch = NULL;
-
- for (blk = 0; blk < recv->nblocks; blk++) {
- u32 control;
-
- /* the DMA descriptor */
- struct dma_cmd *cmd = &recv->block[blk];
-
- /* offset of the DMA descriptor relative to the DMA prog buffer */
- unsigned long prog_offset = blk * sizeof(struct dma_cmd);
-
- /* offset of this packet's data within the DMA buffer */
- unsigned long buf_offset = blk * recv->buf_stride;
-
- if (recv->dma_mode == BUFFER_FILL_MODE) {
- control = 2 << 28; /* INPUT_MORE */
- } else {
- control = 3 << 28; /* INPUT_LAST */
- }
-
- control |= 8 << 24; /* s = 1, update xferStatus and resCount */
-
- /* interrupt on last block, and at intervals */
- if (blk == recv->nblocks-1 || (blk % recv->block_irq_interval) == 0) {
- control |= 3 << 20; /* want interrupt */
- }
-
- control |= 3 << 18; /* enable branch to address */
- control |= recv->buf_stride;
-
- cmd->control = cpu_to_le32(control);
- cmd->address = cpu_to_le32(dma_region_offset_to_bus(&iso->data_buf, buf_offset));
- cmd->branchAddress = 0; /* filled in on next loop */
- cmd->status = cpu_to_le32(recv->buf_stride);
-
- /* link the previous descriptor to this one */
- if (prev_branch) {
- *prev_branch = cpu_to_le32(dma_prog_region_offset_to_bus(&recv->prog, prog_offset) | 1);
- }
-
- prev_branch = &cmd->branchAddress;
- }
-
- /* the final descriptor's branch address and Z should be left at 0 */
-}
-
-/* listen or unlisten to a specific channel (multi-channel mode only) */
-static void ohci_iso_recv_change_channel(struct hpsb_iso *iso, unsigned char channel, int listen)
-{
- struct ohci_iso_recv *recv = iso->hostdata;
- int reg, i;
-
- if (channel < 32) {
- reg = listen ? OHCI1394_IRMultiChanMaskLoSet : OHCI1394_IRMultiChanMaskLoClear;
- i = channel;
- } else {
- reg = listen ? OHCI1394_IRMultiChanMaskHiSet : OHCI1394_IRMultiChanMaskHiClear;
- i = channel - 32;
- }
-
- reg_write(recv->ohci, reg, (1 << i));
-
- /* issue a dummy read to force all PCI writes to be posted immediately */
- mb();
- reg_read(recv->ohci, OHCI1394_IsochronousCycleTimer);
-}
-
-static void ohci_iso_recv_set_channel_mask(struct hpsb_iso *iso, u64 mask)
-{
- struct ohci_iso_recv *recv = iso->hostdata;
- int i;
-
- for (i = 0; i < 64; i++) {
- if (mask & (1ULL << i)) {
- if (i < 32)
- reg_write(recv->ohci, OHCI1394_IRMultiChanMaskLoSet, (1 << i));
- else
- reg_write(recv->ohci, OHCI1394_IRMultiChanMaskHiSet, (1 << (i-32)));
- } else {
- if (i < 32)
- reg_write(recv->ohci, OHCI1394_IRMultiChanMaskLoClear, (1 << i));
- else
- reg_write(recv->ohci, OHCI1394_IRMultiChanMaskHiClear, (1 << (i-32)));
- }
- }
-
- /* issue a dummy read to force all PCI writes to be posted immediately */
- mb();
- reg_read(recv->ohci, OHCI1394_IsochronousCycleTimer);
-}
-
-static int ohci_iso_recv_start(struct hpsb_iso *iso, int cycle, int tag_mask, int sync)
-{
- struct ohci_iso_recv *recv = iso->hostdata;
- struct ti_ohci *ohci = recv->ohci;
- u32 command, contextMatch;
-
- reg_write(recv->ohci, recv->ContextControlClear, 0xFFFFFFFF);
- wmb();
-
- /* always keep ISO headers */
- command = (1 << 30);
-
- if (recv->dma_mode == BUFFER_FILL_MODE)
- command |= (1 << 31);
-
- reg_write(recv->ohci, recv->ContextControlSet, command);
-
- /* match on specified tags */
- contextMatch = tag_mask << 28;
-
- if (iso->channel == -1) {
- /* enable multichannel reception */
- reg_write(recv->ohci, recv->ContextControlSet, (1 << 28));
- } else {
- /* listen on channel */
- contextMatch |= iso->channel;
- }
-
- if (cycle != -1) {
- u32 seconds;
-
- /* enable cycleMatch */
- reg_write(recv->ohci, recv->ContextControlSet, (1 << 29));
-
- /* set starting cycle */
- cycle &= 0x1FFF;
-
- /* 'cycle' is only mod 8000, but we also need two 'seconds' bits -
- just snarf them from the current time */
- seconds = reg_read(recv->ohci, OHCI1394_IsochronousCycleTimer) >> 25;
-
- /* advance one second to give some extra time for DMA to start */
- seconds += 1;
-
- cycle |= (seconds & 3) << 13;
-
- contextMatch |= cycle << 12;
- }
-
- if (sync != -1) {
- /* set sync flag on first DMA descriptor */
- struct dma_cmd *cmd = &recv->block[recv->block_dma];
- cmd->control |= cpu_to_le32(DMA_CTL_WAIT);
-
- /* match sync field */
- contextMatch |= (sync&0xf)<<8;
- }
-
- reg_write(recv->ohci, recv->ContextMatch, contextMatch);
-
- /* address of first descriptor block */
- command = dma_prog_region_offset_to_bus(&recv->prog,
- recv->block_dma * sizeof(struct dma_cmd));
- command |= 1; /* Z=1 */
-
- reg_write(recv->ohci, recv->CommandPtr, command);
-
- /* enable interrupts */
- reg_write(recv->ohci, OHCI1394_IsoRecvIntMaskSet, 1 << recv->task.context);
-
- wmb();
-
- /* run */
- reg_write(recv->ohci, recv->ContextControlSet, 0x8000);
-
- /* issue a dummy read of the cycle timer register to force
- all PCI writes to be posted immediately */
- mb();
- reg_read(recv->ohci, OHCI1394_IsochronousCycleTimer);
-
- /* check RUN */
- if (!(reg_read(recv->ohci, recv->ContextControlSet) & 0x8000)) {
- PRINT(KERN_ERR,
- "Error starting IR DMA (ContextControl 0x%08x)\n",
- reg_read(recv->ohci, recv->ContextControlSet));
- return -1;
- }
-
- return 0;
-}
-
-static void ohci_iso_recv_release_block(struct ohci_iso_recv *recv, int block)
-{
- /* re-use the DMA descriptor for the block */
- /* by linking the previous descriptor to it */
-
- int next_i = block;
- int prev_i = (next_i == 0) ? (recv->nblocks - 1) : (next_i - 1);
-
- struct dma_cmd *next = &recv->block[next_i];
- struct dma_cmd *prev = &recv->block[prev_i];
-
- /* ignore out-of-range requests */
- if ((block < 0) || (block > recv->nblocks))
- return;
-
- /* 'next' becomes the new end of the DMA chain,
- so disable branch and enable interrupt */
- next->branchAddress = 0;
- next->control |= cpu_to_le32(3 << 20);
- next->status = cpu_to_le32(recv->buf_stride);
-
- /* link prev to next */
- prev->branchAddress = cpu_to_le32(dma_prog_region_offset_to_bus(&recv->prog,
- sizeof(struct dma_cmd) * next_i)
- | 1); /* Z=1 */
-
- /* disable interrupt on previous DMA descriptor, except at intervals */
- if ((prev_i % recv->block_irq_interval) == 0) {
- prev->control |= cpu_to_le32(3 << 20); /* enable interrupt */
- } else {
- prev->control &= cpu_to_le32(~(3<<20)); /* disable interrupt */
- }
- wmb();
-
- /* wake up DMA in case it fell asleep */
- reg_write(recv->ohci, recv->ContextControlSet, (1 << 12));
-}
-
-static void ohci_iso_recv_bufferfill_release(struct ohci_iso_recv *recv,
- struct hpsb_iso_packet_info *info)
-{
- /* release the memory where the packet was */
- recv->released_bytes += info->total_len;
-
- /* have we released enough memory for one block? */
- while (recv->released_bytes > recv->buf_stride) {
- ohci_iso_recv_release_block(recv, recv->block_reader);
- recv->block_reader = (recv->block_reader + 1) % recv->nblocks;
- recv->released_bytes -= recv->buf_stride;
- }
-}
-
-static inline void ohci_iso_recv_release(struct hpsb_iso *iso, struct hpsb_iso_packet_info *info)
-{
- struct ohci_iso_recv *recv = iso->hostdata;
- if (recv->dma_mode == BUFFER_FILL_MODE) {
- ohci_iso_recv_bufferfill_release(recv, info);
- } else {
- ohci_iso_recv_release_block(recv, info - iso->infos);
- }
-}
-
-/* parse all packets from blocks that have been fully received */
-static void ohci_iso_recv_bufferfill_parse(struct hpsb_iso *iso, struct ohci_iso_recv *recv)
-{
- int wake = 0;
- int runaway = 0;
- struct ti_ohci *ohci = recv->ohci;
-
- while (1) {
- /* we expect the next parsable packet to begin at recv->dma_offset */
- /* note: packet layout is as shown in section 10.6.1.1 of the OHCI spec */
-
- unsigned int offset;
- unsigned short len, cycle, total_len;
- unsigned char channel, tag, sy;
-
- unsigned char *p = iso->data_buf.kvirt;
-
- unsigned int this_block = recv->dma_offset/recv->buf_stride;
-
- /* don't loop indefinitely */
- if (runaway++ > 100000) {
- atomic_inc(&iso->overflows);
- PRINT(KERN_ERR,
- "IR DMA error - Runaway during buffer parsing!\n");
- break;
- }
-
- /* stop parsing once we arrive at block_dma (i.e. don't get ahead of DMA) */
- if (this_block == recv->block_dma)
- break;
-
- wake = 1;
-
- /* parse data length, tag, channel, and sy */
-
- /* note: we keep our own local copies of 'len' and 'offset'
- so the user can't mess with them by poking in the mmap area */
-
- len = p[recv->dma_offset+2] | (p[recv->dma_offset+3] << 8);
-
- if (len > 4096) {
- PRINT(KERN_ERR,
- "IR DMA error - bogus 'len' value %u\n", len);
- }
-
- channel = p[recv->dma_offset+1] & 0x3F;
- tag = p[recv->dma_offset+1] >> 6;
- sy = p[recv->dma_offset+0] & 0xF;
-
- /* advance to data payload */
- recv->dma_offset += 4;
-
- /* check for wrap-around */
- if (recv->dma_offset >= recv->buf_stride*recv->nblocks) {
- recv->dma_offset -= recv->buf_stride*recv->nblocks;
- }
-
- /* dma_offset now points to the first byte of the data payload */
- offset = recv->dma_offset;
-
- /* advance to xferStatus/timeStamp */
- recv->dma_offset += len;
-
- total_len = len + 8; /* 8 bytes header+trailer in OHCI packet */
- /* payload is padded to 4 bytes */
- if (len % 4) {
- recv->dma_offset += 4 - (len%4);
- total_len += 4 - (len%4);
- }
-
- /* check for wrap-around */
- if (recv->dma_offset >= recv->buf_stride*recv->nblocks) {
- /* uh oh, the packet data wraps from the last
- to the first DMA block - make the packet
- contiguous by copying its "tail" into the
- guard page */
-
- int guard_off = recv->buf_stride*recv->nblocks;
- int tail_len = len - (guard_off - offset);
-
- if (tail_len > 0 && tail_len < recv->buf_stride) {
- memcpy(iso->data_buf.kvirt + guard_off,
- iso->data_buf.kvirt,
- tail_len);
- }
-
- recv->dma_offset -= recv->buf_stride*recv->nblocks;
- }
-
- /* parse timestamp */
- cycle = p[recv->dma_offset+0] | (p[recv->dma_offset+1]<<8);
- cycle &= 0x1FFF;
-
- /* advance to next packet */
- recv->dma_offset += 4;
-
- /* check for wrap-around */
- if (recv->dma_offset >= recv->buf_stride*recv->nblocks) {
- recv->dma_offset -= recv->buf_stride*recv->nblocks;
- }
-
- hpsb_iso_packet_received(iso, offset, len, total_len, cycle, channel, tag, sy);
- }
-
- if (wake)
- hpsb_iso_wake(iso);
-}
-
-static void ohci_iso_recv_bufferfill_task(struct hpsb_iso *iso, struct ohci_iso_recv *recv)
-{
- int loop;
- struct ti_ohci *ohci = recv->ohci;
-
- /* loop over all blocks */
- for (loop = 0; loop < recv->nblocks; loop++) {
-
- /* check block_dma to see if it's done */
- struct dma_cmd *im = &recv->block[recv->block_dma];
-
- /* check the DMA descriptor for new writes to xferStatus */
- u16 xferstatus = le32_to_cpu(im->status) >> 16;
-
- /* rescount is the number of bytes *remaining to be written* in the block */
- u16 rescount = le32_to_cpu(im->status) & 0xFFFF;
-
- unsigned char event = xferstatus & 0x1F;
-
- if (!event) {
- /* nothing has happened to this block yet */
- break;
- }
-
- if (event != 0x11) {
- atomic_inc(&iso->overflows);
- PRINT(KERN_ERR,
- "IR DMA error - OHCI error code 0x%02x\n", event);
- }
-
- if (rescount != 0) {
- /* the card is still writing to this block;
- we can't touch it until it's done */
- break;
- }
-
- /* OK, the block is finished... */
-
- /* sync our view of the block */
- dma_region_sync_for_cpu(&iso->data_buf, recv->block_dma*recv->buf_stride, recv->buf_stride);
-
- /* reset the DMA descriptor */
- im->status = recv->buf_stride;
-
- /* advance block_dma */
- recv->block_dma = (recv->block_dma + 1) % recv->nblocks;
-
- if ((recv->block_dma+1) % recv->nblocks == recv->block_reader) {
- atomic_inc(&iso->overflows);
- DBGMSG("ISO reception overflow - "
- "ran out of DMA blocks");
- }
- }
-
- /* parse any packets that have arrived */
- ohci_iso_recv_bufferfill_parse(iso, recv);
-}
-
-static void ohci_iso_recv_packetperbuf_task(struct hpsb_iso *iso, struct ohci_iso_recv *recv)
-{
- int count;
- int wake = 0;
- struct ti_ohci *ohci = recv->ohci;
-
- /* loop over the entire buffer */
- for (count = 0; count < recv->nblocks; count++) {
- u32 packet_len = 0;
-
- /* pointer to the DMA descriptor */
- struct dma_cmd *il = ((struct dma_cmd*) recv->prog.kvirt) + iso->pkt_dma;
-
- /* check the DMA descriptor for new writes to xferStatus */
- u16 xferstatus = le32_to_cpu(il->status) >> 16;
- u16 rescount = le32_to_cpu(il->status) & 0xFFFF;
-
- unsigned char event = xferstatus & 0x1F;
-
- if (!event) {
- /* this packet hasn't come in yet; we are done for now */
- goto out;
- }
-
- if (event == 0x11) {
- /* packet received successfully! */
-
- /* rescount is the number of bytes *remaining* in the packet buffer,
- after the packet was written */
- packet_len = recv->buf_stride - rescount;
-
- } else if (event == 0x02) {
- PRINT(KERN_ERR, "IR DMA error - packet too long for buffer\n");
- } else if (event) {
- PRINT(KERN_ERR, "IR DMA error - OHCI error code 0x%02x\n", event);
- }
-
- /* sync our view of the buffer */
- dma_region_sync_for_cpu(&iso->data_buf, iso->pkt_dma * recv->buf_stride, recv->buf_stride);
-
- /* record the per-packet info */
- {
- /* iso header is 8 bytes ahead of the data payload */
- unsigned char *hdr;
-
- unsigned int offset;
- unsigned short cycle;
- unsigned char channel, tag, sy;
-
- offset = iso->pkt_dma * recv->buf_stride;
- hdr = iso->data_buf.kvirt + offset;
-
- /* skip iso header */
- offset += 8;
- packet_len -= 8;
-
- cycle = (hdr[0] | (hdr[1] << 8)) & 0x1FFF;
- channel = hdr[5] & 0x3F;
- tag = hdr[5] >> 6;
- sy = hdr[4] & 0xF;
-
- hpsb_iso_packet_received(iso, offset, packet_len,
- recv->buf_stride, cycle, channel, tag, sy);
- }
-
- /* reset the DMA descriptor */
- il->status = recv->buf_stride;
-
- wake = 1;
- recv->block_dma = iso->pkt_dma;
- }
-
-out:
- if (wake)
- hpsb_iso_wake(iso);
-}
-
-static void ohci_iso_recv_task(unsigned long data)
-{
- struct hpsb_iso *iso = (struct hpsb_iso*) data;
- struct ohci_iso_recv *recv = iso->hostdata;
-
- if (recv->dma_mode == BUFFER_FILL_MODE)
- ohci_iso_recv_bufferfill_task(iso, recv);
- else
- ohci_iso_recv_packetperbuf_task(iso, recv);
-}
-
-/***********************************
- * rawiso ISO transmission *
- ***********************************/
-
-struct ohci_iso_xmit {
- struct ti_ohci *ohci;
- struct dma_prog_region prog;
- struct ohci1394_iso_tasklet task;
- int task_active;
- int last_cycle;
- atomic_t skips;
-
- u32 ContextControlSet;
- u32 ContextControlClear;
- u32 CommandPtr;
-};
-
-/* transmission DMA program:
- one OUTPUT_MORE_IMMEDIATE for the IT header
- one OUTPUT_LAST for the buffer data */
-
-struct iso_xmit_cmd {
- struct dma_cmd output_more_immediate;
- u8 iso_hdr[8];
- u32 unused[2];
- struct dma_cmd output_last;
-};
-
-static int ohci_iso_xmit_init(struct hpsb_iso *iso);
-static int ohci_iso_xmit_start(struct hpsb_iso *iso, int cycle);
-static void ohci_iso_xmit_shutdown(struct hpsb_iso *iso);
-static void ohci_iso_xmit_task(unsigned long data);
-
-static int ohci_iso_xmit_init(struct hpsb_iso *iso)
-{
- struct ohci_iso_xmit *xmit;
- unsigned int prog_size;
- int ctx;
- int ret = -ENOMEM;
-
- xmit = kmalloc(sizeof(*xmit), GFP_KERNEL);
- if (!xmit)
- return -ENOMEM;
-
- iso->hostdata = xmit;
- xmit->ohci = iso->host->hostdata;
- xmit->task_active = 0;
- xmit->last_cycle = -1;
- atomic_set(&iso->skips, 0);
-
- dma_prog_region_init(&xmit->prog);
-
- prog_size = sizeof(struct iso_xmit_cmd) * iso->buf_packets;
-
- if (dma_prog_region_alloc(&xmit->prog, prog_size, xmit->ohci->dev))
- goto err;
-
- ohci1394_init_iso_tasklet(&xmit->task, OHCI_ISO_TRANSMIT,
- ohci_iso_xmit_task, (unsigned long) iso);
-
- if (ohci1394_register_iso_tasklet(xmit->ohci, &xmit->task) < 0) {
- ret = -EBUSY;
- goto err;
- }
-
- xmit->task_active = 1;
-
- /* xmit context registers are spaced 16 bytes apart */
- ctx = xmit->task.context;
- xmit->ContextControlSet = OHCI1394_IsoXmitContextControlSet + 16 * ctx;
- xmit->ContextControlClear = OHCI1394_IsoXmitContextControlClear + 16 * ctx;
- xmit->CommandPtr = OHCI1394_IsoXmitCommandPtr + 16 * ctx;
-
- return 0;
-
-err:
- ohci_iso_xmit_shutdown(iso);
- return ret;
-}
-
-static void ohci_iso_xmit_stop(struct hpsb_iso *iso)
-{
- struct ohci_iso_xmit *xmit = iso->hostdata;
- struct ti_ohci *ohci = xmit->ohci;
-
- /* disable interrupts */
- reg_write(xmit->ohci, OHCI1394_IsoXmitIntMaskClear, 1 << xmit->task.context);
-
- /* halt DMA */
- if (ohci1394_stop_context(xmit->ohci, xmit->ContextControlClear, NULL)) {
- /* XXX the DMA context will lock up if you try to send too much data! */
- PRINT(KERN_ERR,
- "you probably exceeded the OHCI card's bandwidth limit - "
- "reload the module and reduce xmit bandwidth");
- }
-}
-
-static void ohci_iso_xmit_shutdown(struct hpsb_iso *iso)
-{
- struct ohci_iso_xmit *xmit = iso->hostdata;
-
- if (xmit->task_active) {
- ohci_iso_xmit_stop(iso);
- ohci1394_unregister_iso_tasklet(xmit->ohci, &xmit->task);
- xmit->task_active = 0;
- }
-
- dma_prog_region_free(&xmit->prog);
- kfree(xmit);
- iso->hostdata = NULL;
-}
-
-static void ohci_iso_xmit_task(unsigned long data)
-{
- struct hpsb_iso *iso = (struct hpsb_iso*) data;
- struct ohci_iso_xmit *xmit = iso->hostdata;
- struct ti_ohci *ohci = xmit->ohci;
- int wake = 0;
- int count;
-
- /* check the whole buffer if necessary, starting at pkt_dma */
- for (count = 0; count < iso->buf_packets; count++) {
- int cycle;
-
- /* DMA descriptor */
- struct iso_xmit_cmd *cmd = dma_region_i(&xmit->prog, struct iso_xmit_cmd, iso->pkt_dma);
-
- /* check for new writes to xferStatus */
- u16 xferstatus = le32_to_cpu(cmd->output_last.status) >> 16;
- u8 event = xferstatus & 0x1F;
-
- if (!event) {
- /* packet hasn't been sent yet; we are done for now */
- break;
- }
-
- if (event != 0x11)
- PRINT(KERN_ERR,
- "IT DMA error - OHCI error code 0x%02x\n", event);
-
- /* at least one packet went out, so wake up the writer */
- wake = 1;
-
- /* parse cycle */
- cycle = le32_to_cpu(cmd->output_last.status) & 0x1FFF;
-
- if (xmit->last_cycle > -1) {
- int cycle_diff = cycle - xmit->last_cycle;
- int skip;
-
- /* unwrap */
- if (cycle_diff < 0) {
- cycle_diff += 8000;
- if (cycle_diff < 0)
- PRINT(KERN_ERR, "bogus cycle diff %d\n",
- cycle_diff);
- }
-
- skip = cycle_diff - 1;
- if (skip > 0) {
- DBGMSG("skipped %d cycles without packet loss", skip);
- atomic_add(skip, &iso->skips);
- }
- }
- xmit->last_cycle = cycle;
-
- /* tell the subsystem the packet has gone out */
- hpsb_iso_packet_sent(iso, cycle, event != 0x11);
-
- /* reset the DMA descriptor for next time */
- cmd->output_last.status = 0;
- }
-
- if (wake)
- hpsb_iso_wake(iso);
-}
-
-static int ohci_iso_xmit_queue(struct hpsb_iso *iso, struct hpsb_iso_packet_info *info)
-{
- struct ohci_iso_xmit *xmit = iso->hostdata;
- struct ti_ohci *ohci = xmit->ohci;
-
- int next_i, prev_i;
- struct iso_xmit_cmd *next, *prev;
-
- unsigned int offset;
- unsigned short len;
- unsigned char tag, sy;
-
- /* check that the packet doesn't cross a page boundary
- (we could allow this if we added OUTPUT_MORE descriptor support) */
- if (cross_bound(info->offset, info->len)) {
- PRINT(KERN_ERR,
- "rawiso xmit: packet %u crosses a page boundary",
- iso->first_packet);
- return -EINVAL;
- }
-
- offset = info->offset;
- len = info->len;
- tag = info->tag;
- sy = info->sy;
-
- /* sync up the card's view of the buffer */
- dma_region_sync_for_device(&iso->data_buf, offset, len);
-
- /* append first_packet to the DMA chain */
- /* by linking the previous descriptor to it */
- /* (next will become the new end of the DMA chain) */
-
- next_i = iso->first_packet;
- prev_i = (next_i == 0) ? (iso->buf_packets - 1) : (next_i - 1);
-
- next = dma_region_i(&xmit->prog, struct iso_xmit_cmd, next_i);
- prev = dma_region_i(&xmit->prog, struct iso_xmit_cmd, prev_i);
-
- /* set up the OUTPUT_MORE_IMMEDIATE descriptor */
- memset(next, 0, sizeof(struct iso_xmit_cmd));
- next->output_more_immediate.control = cpu_to_le32(0x02000008);
-
- /* ISO packet header is embedded in the OUTPUT_MORE_IMMEDIATE */
-
- /* tcode = 0xA, and sy */
- next->iso_hdr[0] = 0xA0 | (sy & 0xF);
-
- /* tag and channel number */
- next->iso_hdr[1] = (tag << 6) | (iso->channel & 0x3F);
-
- /* transmission speed */
- next->iso_hdr[2] = iso->speed & 0x7;
-
- /* payload size */
- next->iso_hdr[6] = len & 0xFF;
- next->iso_hdr[7] = len >> 8;
-
- /* set up the OUTPUT_LAST */
- next->output_last.control = cpu_to_le32(1 << 28);
- next->output_last.control |= cpu_to_le32(1 << 27); /* update timeStamp */
- next->output_last.control |= cpu_to_le32(3 << 20); /* want interrupt */
- next->output_last.control |= cpu_to_le32(3 << 18); /* enable branch */
- next->output_last.control |= cpu_to_le32(len);
-
- /* payload bus address */
- next->output_last.address = cpu_to_le32(dma_region_offset_to_bus(&iso->data_buf, offset));
-
- /* leave branchAddress at zero for now */
-
- /* re-write the previous DMA descriptor to chain to this one */
-
- /* set prev branch address to point to next (Z=3) */
- prev->output_last.branchAddress = cpu_to_le32(
- dma_prog_region_offset_to_bus(&xmit->prog, sizeof(struct iso_xmit_cmd) * next_i) | 3);
-
- /*
- * Link the skip address to this descriptor itself. This causes a
- * context to skip a cycle whenever lost cycles or FIFO overruns occur,
- * without dropping the data at that point the application should then
- * decide whether this is an error condition or not. Some protocols
- * can deal with this by dropping some rate-matching padding packets.
- */
- next->output_more_immediate.branchAddress =
- prev->output_last.branchAddress;
-
- /* disable interrupt, unless required by the IRQ interval */
- if (prev_i % iso->irq_interval) {
- prev->output_last.control &= cpu_to_le32(~(3 << 20)); /* no interrupt */
- } else {
- prev->output_last.control |= cpu_to_le32(3 << 20); /* enable interrupt */
- }
-
- wmb();
-
- /* wake DMA in case it is sleeping */
- reg_write(xmit->ohci, xmit->ContextControlSet, 1 << 12);
-
- /* issue a dummy read of the cycle timer to force all PCI
- writes to be posted immediately */
- mb();
- reg_read(xmit->ohci, OHCI1394_IsochronousCycleTimer);
-
- return 0;
-}
-
-static int ohci_iso_xmit_start(struct hpsb_iso *iso, int cycle)
-{
- struct ohci_iso_xmit *xmit = iso->hostdata;
- struct ti_ohci *ohci = xmit->ohci;
-
- /* clear out the control register */
- reg_write(xmit->ohci, xmit->ContextControlClear, 0xFFFFFFFF);
- wmb();
-
- /* address and length of first descriptor block (Z=3) */
- reg_write(xmit->ohci, xmit->CommandPtr,
- dma_prog_region_offset_to_bus(&xmit->prog, iso->pkt_dma * sizeof(struct iso_xmit_cmd)) | 3);
-
- /* cycle match */
- if (cycle != -1) {
- u32 start = cycle & 0x1FFF;
-
- /* 'cycle' is only mod 8000, but we also need two 'seconds' bits -
- just snarf them from the current time */
- u32 seconds = reg_read(xmit->ohci, OHCI1394_IsochronousCycleTimer) >> 25;
-
- /* advance one second to give some extra time for DMA to start */
- seconds += 1;
-
- start |= (seconds & 3) << 13;
-
- reg_write(xmit->ohci, xmit->ContextControlSet, 0x80000000 | (start << 16));
- }
-
- /* enable interrupts */
- reg_write(xmit->ohci, OHCI1394_IsoXmitIntMaskSet, 1 << xmit->task.context);
-
- /* run */
- reg_write(xmit->ohci, xmit->ContextControlSet, 0x8000);
- mb();
-
- /* wait 100 usec to give the card time to go active */
- udelay(100);
-
- /* check the RUN bit */
- if (!(reg_read(xmit->ohci, xmit->ContextControlSet) & 0x8000)) {
- PRINT(KERN_ERR, "Error starting IT DMA (ContextControl 0x%08x)\n",
- reg_read(xmit->ohci, xmit->ContextControlSet));
- return -1;
- }
-
- return 0;
-}
-
-static int ohci_isoctl(struct hpsb_iso *iso, enum isoctl_cmd cmd, unsigned long arg)
-{
-
- switch(cmd) {
- case XMIT_INIT:
- return ohci_iso_xmit_init(iso);
- case XMIT_START:
- return ohci_iso_xmit_start(iso, arg);
- case XMIT_STOP:
- ohci_iso_xmit_stop(iso);
- return 0;
- case XMIT_QUEUE:
- return ohci_iso_xmit_queue(iso, (struct hpsb_iso_packet_info*) arg);
- case XMIT_SHUTDOWN:
- ohci_iso_xmit_shutdown(iso);
- return 0;
-
- case RECV_INIT:
- return ohci_iso_recv_init(iso);
- case RECV_START: {
- int *args = (int*) arg;
- return ohci_iso_recv_start(iso, args[0], args[1], args[2]);
- }
- case RECV_STOP:
- ohci_iso_recv_stop(iso);
- return 0;
- case RECV_RELEASE:
- ohci_iso_recv_release(iso, (struct hpsb_iso_packet_info*) arg);
- return 0;
- case RECV_FLUSH:
- ohci_iso_recv_task((unsigned long) iso);
- return 0;
- case RECV_SHUTDOWN:
- ohci_iso_recv_shutdown(iso);
- return 0;
- case RECV_LISTEN_CHANNEL:
- ohci_iso_recv_change_channel(iso, arg, 1);
- return 0;
- case RECV_UNLISTEN_CHANNEL:
- ohci_iso_recv_change_channel(iso, arg, 0);
- return 0;
- case RECV_SET_CHANNEL_MASK:
- ohci_iso_recv_set_channel_mask(iso, *((u64*) arg));
- return 0;
-
- default:
- PRINT_G(KERN_ERR, "ohci_isoctl cmd %d not implemented yet",
- cmd);
- break;
- }
- return -EINVAL;
-}
-
-/***************************************
- * IEEE-1394 functionality section END *
- ***************************************/
-
-
-/********************************************************
- * Global stuff (interrupt handler, init/shutdown code) *
- ********************************************************/
-
-static void dma_trm_reset(struct dma_trm_ctx *d)
-{
- unsigned long flags;
- LIST_HEAD(packet_list);
- struct ti_ohci *ohci = d->ohci;
- struct hpsb_packet *packet, *ptmp;
-
- ohci1394_stop_context(ohci, d->ctrlClear, NULL);
-
- /* Lock the context, reset it and release it. Move the packets
- * that were pending in the context to packet_list and free
- * them after releasing the lock. */
-
- spin_lock_irqsave(&d->lock, flags);
-
- list_splice_init(&d->fifo_list, &packet_list);
- list_splice_init(&d->pending_list, &packet_list);
-
- d->branchAddrPtr = NULL;
- d->sent_ind = d->prg_ind;
- d->free_prgs = d->num_desc;
-
- spin_unlock_irqrestore(&d->lock, flags);
-
- if (list_empty(&packet_list))
- return;
-
- PRINT(KERN_INFO, "AT dma reset ctx=%d, aborting transmission", d->ctx);
-
- /* Now process subsystem callbacks for the packets from this
- * context. */
- list_for_each_entry_safe(packet, ptmp, &packet_list, driver_list) {
- list_del_init(&packet->driver_list);
- hpsb_packet_sent(ohci->host, packet, ACKX_ABORTED);
- }
-}
-
-static void ohci_schedule_iso_tasklets(struct ti_ohci *ohci,
- quadlet_t rx_event,
- quadlet_t tx_event)
-{
- struct ohci1394_iso_tasklet *t;
- unsigned long mask;
- unsigned long flags;
-
- spin_lock_irqsave(&ohci->iso_tasklet_list_lock, flags);
-
- list_for_each_entry(t, &ohci->iso_tasklet_list, link) {
- mask = 1 << t->context;
-
- if (t->type == OHCI_ISO_TRANSMIT) {
- if (tx_event & mask)
- tasklet_schedule(&t->tasklet);
- } else {
- /* OHCI_ISO_RECEIVE or OHCI_ISO_MULTICHANNEL_RECEIVE */
- if (rx_event & mask)
- tasklet_schedule(&t->tasklet);
- }
- }
-
- spin_unlock_irqrestore(&ohci->iso_tasklet_list_lock, flags);
-}
-
-static irqreturn_t ohci_irq_handler(int irq, void *dev_id)
-{
- quadlet_t event, node_id;
- struct ti_ohci *ohci = (struct ti_ohci *)dev_id;
- struct hpsb_host *host = ohci->host;
- int phyid = -1, isroot = 0;
- unsigned long flags;
-
- /* Read and clear the interrupt event register. Don't clear
- * the busReset event, though. This is done when we get the
- * selfIDComplete interrupt. */
- spin_lock_irqsave(&ohci->event_lock, flags);
- event = reg_read(ohci, OHCI1394_IntEventClear);
- reg_write(ohci, OHCI1394_IntEventClear, event & ~OHCI1394_busReset);
- spin_unlock_irqrestore(&ohci->event_lock, flags);
-
- if (!event)
- return IRQ_NONE;
-
- /* If event is ~(u32)0 cardbus card was ejected. In this case
- * we just return, and clean up in the ohci1394_pci_remove
- * function. */
- if (event == ~(u32) 0) {
- DBGMSG("Device removed.");
- return IRQ_NONE;
- }
-
- DBGMSG("IntEvent: %08x", event);
-
- if (event & OHCI1394_unrecoverableError) {
- int ctx;
- PRINT(KERN_ERR, "Unrecoverable error!");
-
- if (reg_read(ohci, OHCI1394_AsReqTrContextControlSet) & 0x800)
- PRINT(KERN_ERR, "Async Req Tx Context died: "
- "ctrl[%08x] cmdptr[%08x]",
- reg_read(ohci, OHCI1394_AsReqTrContextControlSet),
- reg_read(ohci, OHCI1394_AsReqTrCommandPtr));
-
- if (reg_read(ohci, OHCI1394_AsRspTrContextControlSet) & 0x800)
- PRINT(KERN_ERR, "Async Rsp Tx Context died: "
- "ctrl[%08x] cmdptr[%08x]",
- reg_read(ohci, OHCI1394_AsRspTrContextControlSet),
- reg_read(ohci, OHCI1394_AsRspTrCommandPtr));
-
- if (reg_read(ohci, OHCI1394_AsReqRcvContextControlSet) & 0x800)
- PRINT(KERN_ERR, "Async Req Rcv Context died: "
- "ctrl[%08x] cmdptr[%08x]",
- reg_read(ohci, OHCI1394_AsReqRcvContextControlSet),
- reg_read(ohci, OHCI1394_AsReqRcvCommandPtr));
-
- if (reg_read(ohci, OHCI1394_AsRspRcvContextControlSet) & 0x800)
- PRINT(KERN_ERR, "Async Rsp Rcv Context died: "
- "ctrl[%08x] cmdptr[%08x]",
- reg_read(ohci, OHCI1394_AsRspRcvContextControlSet),
- reg_read(ohci, OHCI1394_AsRspRcvCommandPtr));
-
- for (ctx = 0; ctx < ohci->nb_iso_xmit_ctx; ctx++) {
- if (reg_read(ohci, OHCI1394_IsoXmitContextControlSet + (16 * ctx)) & 0x800)
- PRINT(KERN_ERR, "Iso Xmit %d Context died: "
- "ctrl[%08x] cmdptr[%08x]", ctx,
- reg_read(ohci, OHCI1394_IsoXmitContextControlSet + (16 * ctx)),
- reg_read(ohci, OHCI1394_IsoXmitCommandPtr + (16 * ctx)));
- }
-
- for (ctx = 0; ctx < ohci->nb_iso_rcv_ctx; ctx++) {
- if (reg_read(ohci, OHCI1394_IsoRcvContextControlSet + (32 * ctx)) & 0x800)
- PRINT(KERN_ERR, "Iso Recv %d Context died: "
- "ctrl[%08x] cmdptr[%08x] match[%08x]", ctx,
- reg_read(ohci, OHCI1394_IsoRcvContextControlSet + (32 * ctx)),
- reg_read(ohci, OHCI1394_IsoRcvCommandPtr + (32 * ctx)),
- reg_read(ohci, OHCI1394_IsoRcvContextMatch + (32 * ctx)));
- }
-
- event &= ~OHCI1394_unrecoverableError;
- }
- if (event & OHCI1394_postedWriteErr) {
- PRINT(KERN_ERR, "physical posted write error");
- /* no recovery strategy yet, had to involve protocol drivers */
- event &= ~OHCI1394_postedWriteErr;
- }
- if (event & OHCI1394_cycleTooLong) {
- if(printk_ratelimit())
- PRINT(KERN_WARNING, "isochronous cycle too long");
- else
- DBGMSG("OHCI1394_cycleTooLong");
- reg_write(ohci, OHCI1394_LinkControlSet,
- OHCI1394_LinkControl_CycleMaster);
- event &= ~OHCI1394_cycleTooLong;
- }
- if (event & OHCI1394_cycleInconsistent) {
- /* We subscribe to the cycleInconsistent event only to
- * clear the corresponding event bit... otherwise,
- * isochronous cycleMatch DMA won't work. */
- DBGMSG("OHCI1394_cycleInconsistent");
- event &= ~OHCI1394_cycleInconsistent;
- }
- if (event & OHCI1394_busReset) {
- /* The busReset event bit can't be cleared during the
- * selfID phase, so we disable busReset interrupts, to
- * avoid burying the cpu in interrupt requests. */
- spin_lock_irqsave(&ohci->event_lock, flags);
- reg_write(ohci, OHCI1394_IntMaskClear, OHCI1394_busReset);
-
- if (ohci->check_busreset) {
- int loop_count = 0;
-
- udelay(10);
-
- while (reg_read(ohci, OHCI1394_IntEventSet) & OHCI1394_busReset) {
- reg_write(ohci, OHCI1394_IntEventClear, OHCI1394_busReset);
-
- spin_unlock_irqrestore(&ohci->event_lock, flags);
- udelay(10);
- spin_lock_irqsave(&ohci->event_lock, flags);
-
- /* The loop counter check is to prevent the driver
- * from remaining in this state forever. For the
- * initial bus reset, the loop continues for ever
- * and the system hangs, until some device is plugged-in
- * or out manually into a port! The forced reset seems
- * to solve this problem. This mainly effects nForce2. */
- if (loop_count > 10000) {
- ohci_devctl(host, RESET_BUS, LONG_RESET);
- DBGMSG("Detected bus-reset loop. Forced a bus reset!");
- loop_count = 0;
- }
-
- loop_count++;
- }
- }
- spin_unlock_irqrestore(&ohci->event_lock, flags);
- if (!host->in_bus_reset) {
- DBGMSG("irq_handler: Bus reset requested");
-
- /* Subsystem call */
- hpsb_bus_reset(ohci->host);
- }
- event &= ~OHCI1394_busReset;
- }
- if (event & OHCI1394_reqTxComplete) {
- struct dma_trm_ctx *d = &ohci->at_req_context;
- DBGMSG("Got reqTxComplete interrupt "
- "status=0x%08X", reg_read(ohci, d->ctrlSet));
- if (reg_read(ohci, d->ctrlSet) & 0x800)
- ohci1394_stop_context(ohci, d->ctrlClear,
- "reqTxComplete");
- else
- dma_trm_tasklet((unsigned long)d);
- //tasklet_schedule(&d->task);
- event &= ~OHCI1394_reqTxComplete;
- }
- if (event & OHCI1394_respTxComplete) {
- struct dma_trm_ctx *d = &ohci->at_resp_context;
- DBGMSG("Got respTxComplete interrupt "
- "status=0x%08X", reg_read(ohci, d->ctrlSet));
- if (reg_read(ohci, d->ctrlSet) & 0x800)
- ohci1394_stop_context(ohci, d->ctrlClear,
- "respTxComplete");
- else
- tasklet_schedule(&d->task);
- event &= ~OHCI1394_respTxComplete;
- }
- if (event & OHCI1394_RQPkt) {
- struct dma_rcv_ctx *d = &ohci->ar_req_context;
- DBGMSG("Got RQPkt interrupt status=0x%08X",
- reg_read(ohci, d->ctrlSet));
- if (reg_read(ohci, d->ctrlSet) & 0x800)
- ohci1394_stop_context(ohci, d->ctrlClear, "RQPkt");
- else
- tasklet_schedule(&d->task);
- event &= ~OHCI1394_RQPkt;
- }
- if (event & OHCI1394_RSPkt) {
- struct dma_rcv_ctx *d = &ohci->ar_resp_context;
- DBGMSG("Got RSPkt interrupt status=0x%08X",
- reg_read(ohci, d->ctrlSet));
- if (reg_read(ohci, d->ctrlSet) & 0x800)
- ohci1394_stop_context(ohci, d->ctrlClear, "RSPkt");
- else
- tasklet_schedule(&d->task);
- event &= ~OHCI1394_RSPkt;
- }
- if (event & OHCI1394_isochRx) {
- quadlet_t rx_event;
-
- rx_event = reg_read(ohci, OHCI1394_IsoRecvIntEventSet);
- reg_write(ohci, OHCI1394_IsoRecvIntEventClear, rx_event);
- ohci_schedule_iso_tasklets(ohci, rx_event, 0);
- event &= ~OHCI1394_isochRx;
- }
- if (event & OHCI1394_isochTx) {
- quadlet_t tx_event;
-
- tx_event = reg_read(ohci, OHCI1394_IsoXmitIntEventSet);
- reg_write(ohci, OHCI1394_IsoXmitIntEventClear, tx_event);
- ohci_schedule_iso_tasklets(ohci, 0, tx_event);
- event &= ~OHCI1394_isochTx;
- }
- if (event & OHCI1394_selfIDComplete) {
- if (host->in_bus_reset) {
- node_id = reg_read(ohci, OHCI1394_NodeID);
-
- if (!(node_id & 0x80000000)) {
- PRINT(KERN_ERR,
- "SelfID received, but NodeID invalid "
- "(probably new bus reset occurred): %08X",
- node_id);
- goto selfid_not_valid;
- }
-
- phyid = node_id & 0x0000003f;
- isroot = (node_id & 0x40000000) != 0;
-
- DBGMSG("SelfID interrupt received "
- "(phyid %d, %s)", phyid,
- (isroot ? "root" : "not root"));
-
- handle_selfid(ohci, host, phyid, isroot);
-
- /* Clear the bus reset event and re-enable the
- * busReset interrupt. */
- spin_lock_irqsave(&ohci->event_lock, flags);
- reg_write(ohci, OHCI1394_IntEventClear, OHCI1394_busReset);
- reg_write(ohci, OHCI1394_IntMaskSet, OHCI1394_busReset);
- spin_unlock_irqrestore(&ohci->event_lock, flags);
-
- /* Turn on phys dma reception.
- *
- * TODO: Enable some sort of filtering management.
- */
- if (phys_dma) {
- reg_write(ohci, OHCI1394_PhyReqFilterHiSet,
- 0xffffffff);
- reg_write(ohci, OHCI1394_PhyReqFilterLoSet,
- 0xffffffff);
- }
-
- DBGMSG("PhyReqFilter=%08x%08x",
- reg_read(ohci, OHCI1394_PhyReqFilterHiSet),
- reg_read(ohci, OHCI1394_PhyReqFilterLoSet));
-
- hpsb_selfid_complete(host, phyid, isroot);
- } else
- PRINT(KERN_ERR,
- "SelfID received outside of bus reset sequence");
-
-selfid_not_valid:
- event &= ~OHCI1394_selfIDComplete;
- }
-
- /* Make sure we handle everything, just in case we accidentally
- * enabled an interrupt that we didn't write a handler for. */
- if (event)
- PRINT(KERN_ERR, "Unhandled interrupt(s) 0x%08x",
- event);
-
- return IRQ_HANDLED;
-}
-
-/* Put the buffer back into the dma context */
-static void insert_dma_buffer(struct dma_rcv_ctx *d, int idx)
-{
- struct ti_ohci *ohci = (struct ti_ohci*)(d->ohci);
- DBGMSG("Inserting dma buf ctx=%d idx=%d", d->ctx, idx);
-
- d->prg_cpu[idx]->status = cpu_to_le32(d->buf_size);
- d->prg_cpu[idx]->branchAddress &= le32_to_cpu(0xfffffff0);
- idx = (idx + d->num_desc - 1 ) % d->num_desc;
- d->prg_cpu[idx]->branchAddress |= le32_to_cpu(0x00000001);
-
- /* To avoid a race, ensure 1394 interface hardware sees the inserted
- * context program descriptors before it sees the wakeup bit set. */
- wmb();
-
- /* wake up the dma context if necessary */
- if (!(reg_read(ohci, d->ctrlSet) & 0x400)) {
- PRINT(KERN_INFO,
- "Waking dma ctx=%d ... processing is probably too slow",
- d->ctx);
- }
-
- /* do this always, to avoid race condition */
- reg_write(ohci, d->ctrlSet, 0x1000);
-}
-
-#define cond_le32_to_cpu(data, noswap) \
- (noswap ? data : le32_to_cpu(data))
-
-static const int TCODE_SIZE[16] = {20, 0, 16, -1, 16, 20, 20, 0,
- -1, 0, -1, 0, -1, -1, 16, -1};
-
-/*
- * Determine the length of a packet in the buffer
- * Optimization suggested by Pascal Drolet <pascal.drolet@informission.ca>
- */
-static inline int packet_length(struct dma_rcv_ctx *d, int idx,
- quadlet_t *buf_ptr, int offset,
- unsigned char tcode, int noswap)
-{
- int length = -1;
-
- if (d->type == DMA_CTX_ASYNC_REQ || d->type == DMA_CTX_ASYNC_RESP) {
- length = TCODE_SIZE[tcode];
- if (length == 0) {
- if (offset + 12 >= d->buf_size) {
- length = (cond_le32_to_cpu(d->buf_cpu[(idx + 1) % d->num_desc]
- [3 - ((d->buf_size - offset) >> 2)], noswap) >> 16);
- } else {
- length = (cond_le32_to_cpu(buf_ptr[3], noswap) >> 16);
- }
- length += 20;
- }
- } else if (d->type == DMA_CTX_ISO) {
- /* Assumption: buffer fill mode with header/trailer */
- length = (cond_le32_to_cpu(buf_ptr[0], noswap) >> 16) + 8;
- }
-
- if (length > 0 && length % 4)
- length += 4 - (length % 4);
-
- return length;
-}
-
-/* Tasklet that processes dma receive buffers */
-static void dma_rcv_tasklet (unsigned long data)
-{
- struct dma_rcv_ctx *d = (struct dma_rcv_ctx*)data;
- struct ti_ohci *ohci = (struct ti_ohci*)(d->ohci);
- unsigned int split_left, idx, offset, rescount;
- unsigned char tcode;
- int length, bytes_left, ack;
- unsigned long flags;
- quadlet_t *buf_ptr;
- char *split_ptr;
- char msg[256];
-
- spin_lock_irqsave(&d->lock, flags);
-
- idx = d->buf_ind;
- offset = d->buf_offset;
- buf_ptr = d->buf_cpu[idx] + offset/4;
-
- rescount = le32_to_cpu(d->prg_cpu[idx]->status) & 0xffff;
- bytes_left = d->buf_size - rescount - offset;
-
- while (bytes_left > 0) {
- tcode = (cond_le32_to_cpu(buf_ptr[0], ohci->no_swap_incoming) >> 4) & 0xf;
-
- /* packet_length() will return < 4 for an error */
- length = packet_length(d, idx, buf_ptr, offset, tcode, ohci->no_swap_incoming);
-
- if (length < 4) { /* something is wrong */
- sprintf(msg,"Unexpected tcode 0x%x(0x%08x) in AR ctx=%d, length=%d",
- tcode, cond_le32_to_cpu(buf_ptr[0], ohci->no_swap_incoming),
- d->ctx, length);
- ohci1394_stop_context(ohci, d->ctrlClear, msg);
- spin_unlock_irqrestore(&d->lock, flags);
- return;
- }
-
- /* The first case is where we have a packet that crosses
- * over more than one descriptor. The next case is where
- * it's all in the first descriptor. */
- if ((offset + length) > d->buf_size) {
- DBGMSG("Split packet rcv'd");
- if (length > d->split_buf_size) {
- ohci1394_stop_context(ohci, d->ctrlClear,
- "Split packet size exceeded");
- d->buf_ind = idx;
- d->buf_offset = offset;
- spin_unlock_irqrestore(&d->lock, flags);
- return;
- }
-
- if (le32_to_cpu(d->prg_cpu[(idx+1)%d->num_desc]->status)
- == d->buf_size) {
- /* Other part of packet not written yet.
- * this should never happen I think
- * anyway we'll get it on the next call. */
- PRINT(KERN_INFO,
- "Got only half a packet!");
- d->buf_ind = idx;
- d->buf_offset = offset;
- spin_unlock_irqrestore(&d->lock, flags);
- return;
- }
-
- split_left = length;
- split_ptr = (char *)d->spb;
- memcpy(split_ptr,buf_ptr,d->buf_size-offset);
- split_left -= d->buf_size-offset;
- split_ptr += d->buf_size-offset;
- insert_dma_buffer(d, idx);
- idx = (idx+1) % d->num_desc;
- buf_ptr = d->buf_cpu[idx];
- offset=0;
-
- while (split_left >= d->buf_size) {
- memcpy(split_ptr,buf_ptr,d->buf_size);
- split_ptr += d->buf_size;
- split_left -= d->buf_size;
- insert_dma_buffer(d, idx);
- idx = (idx+1) % d->num_desc;
- buf_ptr = d->buf_cpu[idx];
- }
-
- if (split_left > 0) {
- memcpy(split_ptr, buf_ptr, split_left);
- offset = split_left;
- buf_ptr += offset/4;
- }
- } else {
- DBGMSG("Single packet rcv'd");
- memcpy(d->spb, buf_ptr, length);
- offset += length;
- buf_ptr += length/4;
- if (offset==d->buf_size) {
- insert_dma_buffer(d, idx);
- idx = (idx+1) % d->num_desc;
- buf_ptr = d->buf_cpu[idx];
- offset=0;
- }
- }
-
- /* We get one phy packet to the async descriptor for each
- * bus reset. We always ignore it. */
- if (tcode != OHCI1394_TCODE_PHY) {
- if (!ohci->no_swap_incoming)
- header_le32_to_cpu(d->spb, tcode);
- DBGMSG("Packet received from node"
- " %d ack=0x%02X spd=%d tcode=0x%X"
- " length=%d ctx=%d tlabel=%d",
- (d->spb[1]>>16)&0x3f,
- (cond_le32_to_cpu(d->spb[length/4-1], ohci->no_swap_incoming)>>16)&0x1f,
- (cond_le32_to_cpu(d->spb[length/4-1], ohci->no_swap_incoming)>>21)&0x3,
- tcode, length, d->ctx,
- (d->spb[0]>>10)&0x3f);
-
- ack = (((cond_le32_to_cpu(d->spb[length/4-1], ohci->no_swap_incoming)>>16)&0x1f)
- == 0x11) ? 1 : 0;
-
- hpsb_packet_received(ohci->host, d->spb,
- length-4, ack);
- }
-#ifdef OHCI1394_DEBUG
- else
- PRINT (KERN_DEBUG, "Got phy packet ctx=%d ... discarded",
- d->ctx);
-#endif
-
- rescount = le32_to_cpu(d->prg_cpu[idx]->status) & 0xffff;
-
- bytes_left = d->buf_size - rescount - offset;
-
- }
-
- d->buf_ind = idx;
- d->buf_offset = offset;
-
- spin_unlock_irqrestore(&d->lock, flags);
-}
-
-/* Bottom half that processes sent packets */
-static void dma_trm_tasklet (unsigned long data)
-{
- struct dma_trm_ctx *d = (struct dma_trm_ctx*)data;
- struct ti_ohci *ohci = (struct ti_ohci*)(d->ohci);
- struct hpsb_packet *packet, *ptmp;
- unsigned long flags;
- u32 status, ack;
- size_t datasize;
-
- spin_lock_irqsave(&d->lock, flags);
-
- list_for_each_entry_safe(packet, ptmp, &d->fifo_list, driver_list) {
- datasize = packet->data_size;
- if (datasize && packet->type != hpsb_raw)
- status = le32_to_cpu(
- d->prg_cpu[d->sent_ind]->end.status) >> 16;
- else
- status = le32_to_cpu(
- d->prg_cpu[d->sent_ind]->begin.status) >> 16;
-
- if (status == 0)
- /* this packet hasn't been sent yet*/
- break;
-
-#ifdef OHCI1394_DEBUG
- if (datasize)
- if (((le32_to_cpu(d->prg_cpu[d->sent_ind]->data[0])>>4)&0xf) == 0xa)
- DBGMSG("Stream packet sent to channel %d tcode=0x%X "
- "ack=0x%X spd=%d dataLength=%d ctx=%d",
- (le32_to_cpu(d->prg_cpu[d->sent_ind]->data[0])>>8)&0x3f,
- (le32_to_cpu(d->prg_cpu[d->sent_ind]->data[0])>>4)&0xf,
- status&0x1f, (status>>5)&0x3,
- le32_to_cpu(d->prg_cpu[d->sent_ind]->data[1])>>16,
- d->ctx);
- else
- DBGMSG("Packet sent to node %d tcode=0x%X tLabel="
- "%d ack=0x%X spd=%d dataLength=%d ctx=%d",
- (le32_to_cpu(d->prg_cpu[d->sent_ind]->data[1])>>16)&0x3f,
- (le32_to_cpu(d->prg_cpu[d->sent_ind]->data[0])>>4)&0xf,
- (le32_to_cpu(d->prg_cpu[d->sent_ind]->data[0])>>10)&0x3f,
- status&0x1f, (status>>5)&0x3,
- le32_to_cpu(d->prg_cpu[d->sent_ind]->data[3])>>16,
- d->ctx);
- else
- DBGMSG("Packet sent to node %d tcode=0x%X tLabel="
- "%d ack=0x%X spd=%d data=0x%08X ctx=%d",
- (le32_to_cpu(d->prg_cpu[d->sent_ind]->data[1])
- >>16)&0x3f,
- (le32_to_cpu(d->prg_cpu[d->sent_ind]->data[0])
- >>4)&0xf,
- (le32_to_cpu(d->prg_cpu[d->sent_ind]->data[0])
- >>10)&0x3f,
- status&0x1f, (status>>5)&0x3,
- le32_to_cpu(d->prg_cpu[d->sent_ind]->data[3]),
- d->ctx);
-#endif
-
- if (status & 0x10) {
- ack = status & 0xf;
- } else {
- switch (status & 0x1f) {
- case EVT_NO_STATUS: /* that should never happen */
- case EVT_RESERVED_A: /* that should never happen */
- case EVT_LONG_PACKET: /* that should never happen */
- PRINT(KERN_WARNING, "Received OHCI evt_* error 0x%x", status & 0x1f);
- ack = ACKX_SEND_ERROR;
- break;
- case EVT_MISSING_ACK:
- ack = ACKX_TIMEOUT;
- break;
- case EVT_UNDERRUN:
- ack = ACKX_SEND_ERROR;
- break;
- case EVT_OVERRUN: /* that should never happen */
- PRINT(KERN_WARNING, "Received OHCI evt_* error 0x%x", status & 0x1f);
- ack = ACKX_SEND_ERROR;
- break;
- case EVT_DESCRIPTOR_READ:
- case EVT_DATA_READ:
- case EVT_DATA_WRITE:
- ack = ACKX_SEND_ERROR;
- break;
- case EVT_BUS_RESET: /* that should never happen */
- PRINT(KERN_WARNING, "Received OHCI evt_* error 0x%x", status & 0x1f);
- ack = ACKX_SEND_ERROR;
- break;
- case EVT_TIMEOUT:
- ack = ACKX_TIMEOUT;
- break;
- case EVT_TCODE_ERR:
- ack = ACKX_SEND_ERROR;
- break;
- case EVT_RESERVED_B: /* that should never happen */
- case EVT_RESERVED_C: /* that should never happen */
- PRINT(KERN_WARNING, "Received OHCI evt_* error 0x%x", status & 0x1f);
- ack = ACKX_SEND_ERROR;
- break;
- case EVT_UNKNOWN:
- case EVT_FLUSHED:
- ack = ACKX_SEND_ERROR;
- break;
- default:
- PRINT(KERN_ERR, "Unhandled OHCI evt_* error 0x%x", status & 0x1f);
- ack = ACKX_SEND_ERROR;
- BUG();
- }
- }
-
- list_del_init(&packet->driver_list);
- hpsb_packet_sent(ohci->host, packet, ack);
-
- if (datasize)
- pci_unmap_single(ohci->dev,
- cpu_to_le32(d->prg_cpu[d->sent_ind]->end.address),
- datasize, PCI_DMA_TODEVICE);
-
- d->sent_ind = (d->sent_ind+1)%d->num_desc;
- d->free_prgs++;
- }
-
- dma_trm_flush(ohci, d);
-
- spin_unlock_irqrestore(&d->lock, flags);
-}
-
-static void free_dma_rcv_ctx(struct dma_rcv_ctx *d)
-{
- int i;
- struct ti_ohci *ohci = d->ohci;
-
- if (ohci == NULL)
- return;
-
- DBGMSG("Freeing dma_rcv_ctx %d", d->ctx);
-
- if (d->buf_cpu) {
- for (i=0; i<d->num_desc; i++)
- if (d->buf_cpu[i] && d->buf_bus[i])
- pci_free_consistent(
- ohci->dev, d->buf_size,
- d->buf_cpu[i], d->buf_bus[i]);
- kfree(d->buf_cpu);
- kfree(d->buf_bus);
- }
- if (d->prg_cpu) {
- for (i=0; i<d->num_desc; i++)
- if (d->prg_cpu[i] && d->prg_bus[i])
- pci_pool_free(d->prg_pool, d->prg_cpu[i],
- d->prg_bus[i]);
- pci_pool_destroy(d->prg_pool);
- kfree(d->prg_cpu);
- kfree(d->prg_bus);
- }
- kfree(d->spb);
-
- /* Mark this context as freed. */
- d->ohci = NULL;
-}
-
-static int
-alloc_dma_rcv_ctx(struct ti_ohci *ohci, struct dma_rcv_ctx *d,
- enum context_type type, int ctx, int num_desc,
- int buf_size, int split_buf_size, int context_base)
-{
- int i, len;
- static int num_allocs;
- static char pool_name[20];
-
- d->ohci = ohci;
- d->type = type;
- d->ctx = ctx;
-
- d->num_desc = num_desc;
- d->buf_size = buf_size;
- d->split_buf_size = split_buf_size;
-
- d->ctrlSet = 0;
- d->ctrlClear = 0;
- d->cmdPtr = 0;
-
- d->buf_cpu = kzalloc(d->num_desc * sizeof(*d->buf_cpu), GFP_ATOMIC);
- d->buf_bus = kzalloc(d->num_desc * sizeof(*d->buf_bus), GFP_ATOMIC);
-
- if (d->buf_cpu == NULL || d->buf_bus == NULL) {
- PRINT(KERN_ERR, "Failed to allocate %s", "DMA buffer");
- free_dma_rcv_ctx(d);
- return -ENOMEM;
- }
-
- d->prg_cpu = kzalloc(d->num_desc * sizeof(*d->prg_cpu), GFP_ATOMIC);
- d->prg_bus = kzalloc(d->num_desc * sizeof(*d->prg_bus), GFP_ATOMIC);
-
- if (d->prg_cpu == NULL || d->prg_bus == NULL) {
- PRINT(KERN_ERR, "Failed to allocate %s", "DMA prg");
- free_dma_rcv_ctx(d);
- return -ENOMEM;
- }
-
- d->spb = kmalloc(d->split_buf_size, GFP_ATOMIC);
-
- if (d->spb == NULL) {
- PRINT(KERN_ERR, "Failed to allocate %s", "split buffer");
- free_dma_rcv_ctx(d);
- return -ENOMEM;
- }
-
- len = sprintf(pool_name, "ohci1394_rcv_prg");
- sprintf(pool_name+len, "%d", num_allocs);
- d->prg_pool = pci_pool_create(pool_name, ohci->dev,
- sizeof(struct dma_cmd), 4, 0);
- if(d->prg_pool == NULL)
- {
- PRINT(KERN_ERR, "pci_pool_create failed for %s", pool_name);
- free_dma_rcv_ctx(d);
- return -ENOMEM;
- }
- num_allocs++;
-
- for (i=0; i<d->num_desc; i++) {
- d->buf_cpu[i] = pci_alloc_consistent(ohci->dev,
- d->buf_size,
- d->buf_bus+i);
-
- if (d->buf_cpu[i] != NULL) {
- memset(d->buf_cpu[i], 0, d->buf_size);
- } else {
- PRINT(KERN_ERR,
- "Failed to allocate %s", "DMA buffer");
- free_dma_rcv_ctx(d);
- return -ENOMEM;
- }
-
- d->prg_cpu[i] = pci_pool_alloc(d->prg_pool, GFP_KERNEL, d->prg_bus+i);
-
- if (d->prg_cpu[i] != NULL) {
- memset(d->prg_cpu[i], 0, sizeof(struct dma_cmd));
- } else {
- PRINT(KERN_ERR,
- "Failed to allocate %s", "DMA prg");
- free_dma_rcv_ctx(d);
- return -ENOMEM;
- }
- }
-
- spin_lock_init(&d->lock);
-
- d->ctrlSet = context_base + OHCI1394_ContextControlSet;
- d->ctrlClear = context_base + OHCI1394_ContextControlClear;
- d->cmdPtr = context_base + OHCI1394_ContextCommandPtr;
-
- tasklet_init(&d->task, dma_rcv_tasklet, (unsigned long) d);
- return 0;
-}
-
-static void free_dma_trm_ctx(struct dma_trm_ctx *d)
-{
- int i;
- struct ti_ohci *ohci = d->ohci;
-
- if (ohci == NULL)
- return;
-
- DBGMSG("Freeing dma_trm_ctx %d", d->ctx);
-
- if (d->prg_cpu) {
- for (i=0; i<d->num_desc; i++)
- if (d->prg_cpu[i] && d->prg_bus[i])
- pci_pool_free(d->prg_pool, d->prg_cpu[i],
- d->prg_bus[i]);
- pci_pool_destroy(d->prg_pool);
- kfree(d->prg_cpu);
- kfree(d->prg_bus);
- }
-
- /* Mark this context as freed. */
- d->ohci = NULL;
-}
-
-static int
-alloc_dma_trm_ctx(struct ti_ohci *ohci, struct dma_trm_ctx *d,
- enum context_type type, int ctx, int num_desc,
- int context_base)
-{
- int i, len;
- static char pool_name[20];
- static int num_allocs=0;
-
- d->ohci = ohci;
- d->type = type;
- d->ctx = ctx;
- d->num_desc = num_desc;
- d->ctrlSet = 0;
- d->ctrlClear = 0;
- d->cmdPtr = 0;
-
- d->prg_cpu = kzalloc(d->num_desc * sizeof(*d->prg_cpu), GFP_KERNEL);
- d->prg_bus = kzalloc(d->num_desc * sizeof(*d->prg_bus), GFP_KERNEL);
-
- if (d->prg_cpu == NULL || d->prg_bus == NULL) {
- PRINT(KERN_ERR, "Failed to allocate %s", "AT DMA prg");
- free_dma_trm_ctx(d);
- return -ENOMEM;
- }
-
- len = sprintf(pool_name, "ohci1394_trm_prg");
- sprintf(pool_name+len, "%d", num_allocs);
- d->prg_pool = pci_pool_create(pool_name, ohci->dev,
- sizeof(struct at_dma_prg), 4, 0);
- if (d->prg_pool == NULL) {
- PRINT(KERN_ERR, "pci_pool_create failed for %s", pool_name);
- free_dma_trm_ctx(d);
- return -ENOMEM;
- }
- num_allocs++;
-
- for (i = 0; i < d->num_desc; i++) {
- d->prg_cpu[i] = pci_pool_alloc(d->prg_pool, GFP_KERNEL, d->prg_bus+i);
-
- if (d->prg_cpu[i] != NULL) {
- memset(d->prg_cpu[i], 0, sizeof(struct at_dma_prg));
- } else {
- PRINT(KERN_ERR,
- "Failed to allocate %s", "AT DMA prg");
- free_dma_trm_ctx(d);
- return -ENOMEM;
- }
- }
-
- spin_lock_init(&d->lock);
-
- /* initialize tasklet */
- d->ctrlSet = context_base + OHCI1394_ContextControlSet;
- d->ctrlClear = context_base + OHCI1394_ContextControlClear;
- d->cmdPtr = context_base + OHCI1394_ContextCommandPtr;
- tasklet_init(&d->task, dma_trm_tasklet, (unsigned long)d);
- return 0;
-}
-
-static void ohci_set_hw_config_rom(struct hpsb_host *host, __be32 *config_rom)
-{
- struct ti_ohci *ohci = host->hostdata;
-
- reg_write(ohci, OHCI1394_ConfigROMhdr, be32_to_cpu(config_rom[0]));
- reg_write(ohci, OHCI1394_BusOptions, be32_to_cpu(config_rom[2]));
-
- memcpy(ohci->csr_config_rom_cpu, config_rom, OHCI_CONFIG_ROM_LEN);
-}
-
-
-static quadlet_t ohci_hw_csr_reg(struct hpsb_host *host, int reg,
- quadlet_t data, quadlet_t compare)
-{
- struct ti_ohci *ohci = host->hostdata;
- int i;
-
- reg_write(ohci, OHCI1394_CSRData, data);
- reg_write(ohci, OHCI1394_CSRCompareData, compare);
- reg_write(ohci, OHCI1394_CSRControl, reg & 0x3);
-
- for (i = 0; i < OHCI_LOOP_COUNT; i++) {
- if (reg_read(ohci, OHCI1394_CSRControl) & 0x80000000)
- break;
-
- mdelay(1);
- }
-
- return reg_read(ohci, OHCI1394_CSRData);
-}
-
-static struct hpsb_host_driver ohci1394_driver = {
- .owner = THIS_MODULE,
- .name = OHCI1394_DRIVER_NAME,
- .set_hw_config_rom = ohci_set_hw_config_rom,
- .transmit_packet = ohci_transmit,
- .devctl = ohci_devctl,
- .isoctl = ohci_isoctl,
- .hw_csr_reg = ohci_hw_csr_reg,
-};
-
-/***********************************
- * PCI Driver Interface functions *
- ***********************************/
-
-#ifdef CONFIG_PPC_PMAC
-static void ohci1394_pmac_on(struct pci_dev *dev)
-{
- if (machine_is(powermac)) {
- struct device_node *ofn = pci_device_to_OF_node(dev);
-
- if (ofn) {
- pmac_call_feature(PMAC_FTR_1394_CABLE_POWER, ofn, 0, 1);
- pmac_call_feature(PMAC_FTR_1394_ENABLE, ofn, 0, 1);
- }
- }
-}
-
-static void ohci1394_pmac_off(struct pci_dev *dev)
-{
- if (machine_is(powermac)) {
- struct device_node *ofn = pci_device_to_OF_node(dev);
-
- if (ofn) {
- pmac_call_feature(PMAC_FTR_1394_ENABLE, ofn, 0, 0);
- pmac_call_feature(PMAC_FTR_1394_CABLE_POWER, ofn, 0, 0);
- }
- }
-}
-#else
-#define ohci1394_pmac_on(dev)
-#define ohci1394_pmac_off(dev)
-#endif /* CONFIG_PPC_PMAC */
-
-static int __devinit ohci1394_pci_probe(struct pci_dev *dev,
- const struct pci_device_id *ent)
-{
- struct hpsb_host *host;
- struct ti_ohci *ohci; /* shortcut to currently handled device */
- resource_size_t ohci_base;
- int err = -ENOMEM;
-
- ohci1394_pmac_on(dev);
- if (pci_enable_device(dev)) {
- PRINT_G(KERN_ERR, "Failed to enable OHCI hardware");
- err = -ENXIO;
- goto err;
- }
- pci_set_master(dev);
-
- host = hpsb_alloc_host(&ohci1394_driver, sizeof(struct ti_ohci), &dev->dev);
- if (!host) {
- PRINT_G(KERN_ERR, "Failed to allocate %s", "host structure");
- goto err;
- }
- ohci = host->hostdata;
- ohci->dev = dev;
- ohci->host = host;
- ohci->init_state = OHCI_INIT_ALLOC_HOST;
- host->pdev = dev;
- pci_set_drvdata(dev, ohci);
-
- /* We don't want hardware swapping */
- pci_write_config_dword(dev, OHCI1394_PCI_HCI_Control, 0);
-
- /* Some oddball Apple controllers do not order the selfid
- * properly, so we make up for it here. */
-#ifndef __LITTLE_ENDIAN
- /* XXX: Need a better way to check this. I'm wondering if we can
- * read the values of the OHCI1394_PCI_HCI_Control and the
- * noByteSwapData registers to see if they were not cleared to
- * zero. Should this work? Obviously it's not defined what these
- * registers will read when they aren't supported. Bleh! */
- if (dev->vendor == PCI_VENDOR_ID_APPLE &&
- dev->device == PCI_DEVICE_ID_APPLE_UNI_N_FW) {
- ohci->no_swap_incoming = 1;
- ohci->selfid_swap = 0;
- } else
- ohci->selfid_swap = 1;
-#endif
-
-
-#ifndef PCI_DEVICE_ID_NVIDIA_NFORCE2_FW
-#define PCI_DEVICE_ID_NVIDIA_NFORCE2_FW 0x006e
-#endif
-
- /* These chipsets require a bit of extra care when checking after
- * a busreset. */
- if ((dev->vendor == PCI_VENDOR_ID_APPLE &&
- dev->device == PCI_DEVICE_ID_APPLE_UNI_N_FW) ||
- (dev->vendor == PCI_VENDOR_ID_NVIDIA &&
- dev->device == PCI_DEVICE_ID_NVIDIA_NFORCE2_FW))
- ohci->check_busreset = 1;
-
- /* We hardwire the MMIO length, since some CardBus adaptors
- * fail to report the right length. Anyway, the ohci spec
- * clearly says it's 2kb, so this shouldn't be a problem. */
- ohci_base = pci_resource_start(dev, 0);
- if (pci_resource_len(dev, 0) < OHCI1394_REGISTER_SIZE)
- PRINT(KERN_WARNING, "PCI resource length of 0x%llx too small!",
- (unsigned long long)pci_resource_len(dev, 0));
-
- if (!request_mem_region(ohci_base, OHCI1394_REGISTER_SIZE,
- OHCI1394_DRIVER_NAME)) {
- PRINT_G(KERN_ERR, "MMIO resource (0x%llx - 0x%llx) unavailable",
- (unsigned long long)ohci_base,
- (unsigned long long)ohci_base + OHCI1394_REGISTER_SIZE);
- goto err;
- }
- ohci->init_state = OHCI_INIT_HAVE_MEM_REGION;
-
- ohci->registers = ioremap(ohci_base, OHCI1394_REGISTER_SIZE);
- if (ohci->registers == NULL) {
- PRINT_G(KERN_ERR, "Failed to remap registers");
- err = -ENXIO;
- goto err;
- }
- ohci->init_state = OHCI_INIT_HAVE_IOMAPPING;
- DBGMSG("Remapped memory spaces reg 0x%p", ohci->registers);
-
- /* csr_config rom allocation */
- ohci->csr_config_rom_cpu =
- pci_alloc_consistent(ohci->dev, OHCI_CONFIG_ROM_LEN,
- &ohci->csr_config_rom_bus);
- if (ohci->csr_config_rom_cpu == NULL) {
- PRINT_G(KERN_ERR, "Failed to allocate %s", "buffer config rom");
- goto err;
- }
- ohci->init_state = OHCI_INIT_HAVE_CONFIG_ROM_BUFFER;
-
- /* self-id dma buffer allocation */
- ohci->selfid_buf_cpu =
- pci_alloc_consistent(ohci->dev, OHCI1394_SI_DMA_BUF_SIZE,
- &ohci->selfid_buf_bus);
- if (ohci->selfid_buf_cpu == NULL) {
- PRINT_G(KERN_ERR, "Failed to allocate %s", "self-ID buffer");
- goto err;
- }
- ohci->init_state = OHCI_INIT_HAVE_SELFID_BUFFER;
-
- if ((unsigned long)ohci->selfid_buf_cpu & 0x1fff)
- PRINT(KERN_INFO, "SelfID buffer %p is not aligned on "
- "8Kb boundary... may cause problems on some CXD3222 chip",
- ohci->selfid_buf_cpu);
-
- /* No self-id errors at startup */
- ohci->self_id_errors = 0;
-
- ohci->init_state = OHCI_INIT_HAVE_TXRX_BUFFERS__MAYBE;
- /* AR DMA request context allocation */
- if (alloc_dma_rcv_ctx(ohci, &ohci->ar_req_context,
- DMA_CTX_ASYNC_REQ, 0, AR_REQ_NUM_DESC,
- AR_REQ_BUF_SIZE, AR_REQ_SPLIT_BUF_SIZE,
- OHCI1394_AsReqRcvContextBase) < 0) {
- PRINT_G(KERN_ERR, "Failed to allocate %s", "AR Req context");
- goto err;
- }
- /* AR DMA response context allocation */
- if (alloc_dma_rcv_ctx(ohci, &ohci->ar_resp_context,
- DMA_CTX_ASYNC_RESP, 0, AR_RESP_NUM_DESC,
- AR_RESP_BUF_SIZE, AR_RESP_SPLIT_BUF_SIZE,
- OHCI1394_AsRspRcvContextBase) < 0) {
- PRINT_G(KERN_ERR, "Failed to allocate %s", "AR Resp context");
- goto err;
- }
- /* AT DMA request context */
- if (alloc_dma_trm_ctx(ohci, &ohci->at_req_context,
- DMA_CTX_ASYNC_REQ, 0, AT_REQ_NUM_DESC,
- OHCI1394_AsReqTrContextBase) < 0) {
- PRINT_G(KERN_ERR, "Failed to allocate %s", "AT Req context");
- goto err;
- }
- /* AT DMA response context */
- if (alloc_dma_trm_ctx(ohci, &ohci->at_resp_context,
- DMA_CTX_ASYNC_RESP, 1, AT_RESP_NUM_DESC,
- OHCI1394_AsRspTrContextBase) < 0) {
- PRINT_G(KERN_ERR, "Failed to allocate %s", "AT Resp context");
- goto err;
- }
- /* Start off with a soft reset, to clear everything to a sane
- * state. */
- ohci_soft_reset(ohci);
-
- /* Now enable LPS, which we need in order to start accessing
- * most of the registers. In fact, on some cards (ALI M5251),
- * accessing registers in the SClk domain without LPS enabled
- * will lock up the machine. */
- reg_write(ohci, OHCI1394_HCControlSet, OHCI1394_HCControl_LPS);
-
- /* Disable and clear interrupts */
- reg_write(ohci, OHCI1394_IntEventClear, 0xffffffff);
- reg_write(ohci, OHCI1394_IntMaskClear, 0xffffffff);
-
- /* Flush MMIO writes and wait to make sure we have full link enabled. */
- reg_read(ohci, OHCI1394_Version);
- msleep(50);
-
- /* Determine the number of available IR and IT contexts. */
- ohci->nb_iso_rcv_ctx =
- get_nb_iso_ctx(ohci, OHCI1394_IsoRecvIntMaskSet);
- ohci->nb_iso_xmit_ctx =
- get_nb_iso_ctx(ohci, OHCI1394_IsoXmitIntMaskSet);
-
- /* Set the usage bits for non-existent contexts so they can't
- * be allocated */
- ohci->ir_ctx_usage = ~0 << ohci->nb_iso_rcv_ctx;
- ohci->it_ctx_usage = ~0 << ohci->nb_iso_xmit_ctx;
-
- INIT_LIST_HEAD(&ohci->iso_tasklet_list);
- spin_lock_init(&ohci->iso_tasklet_list_lock);
- ohci->ISO_channel_usage = 0;
- spin_lock_init(&ohci->IR_channel_lock);
-
- spin_lock_init(&ohci->event_lock);
-
- /*
- * interrupts are disabled, all right, but... due to IRQF_SHARED we
- * might get called anyway. We'll see no event, of course, but
- * we need to get to that "no event", so enough should be initialized
- * by that point.
- */
- err = request_irq(dev->irq, ohci_irq_handler, IRQF_SHARED,
- OHCI1394_DRIVER_NAME, ohci);
- if (err) {
- PRINT_G(KERN_ERR, "Failed to allocate interrupt %d", dev->irq);
- goto err;
- }
- ohci->init_state = OHCI_INIT_HAVE_IRQ;
- ohci_initialize(ohci);
-
- /* Set certain csr values */
- host->csr.guid_hi = reg_read(ohci, OHCI1394_GUIDHi);
- host->csr.guid_lo = reg_read(ohci, OHCI1394_GUIDLo);
- host->csr.cyc_clk_acc = 100; /* how do we determine clk accuracy? */
- host->csr.max_rec = (reg_read(ohci, OHCI1394_BusOptions) >> 12) & 0xf;
- host->csr.lnk_spd = reg_read(ohci, OHCI1394_BusOptions) & 0x7;
-
- if (phys_dma) {
- host->low_addr_space =
- (u64) reg_read(ohci, OHCI1394_PhyUpperBound) << 16;
- if (!host->low_addr_space)
- host->low_addr_space = OHCI1394_PHYS_UPPER_BOUND_FIXED;
- }
- host->middle_addr_space = OHCI1394_MIDDLE_ADDRESS_SPACE;
-
- /* Tell the highlevel this host is ready */
- if (hpsb_add_host(host)) {
- PRINT_G(KERN_ERR, "Failed to register host with highlevel");
- goto err;
- }
- ohci->init_state = OHCI_INIT_DONE;
-
- return 0;
-err:
- ohci1394_pci_remove(dev);
- return err;
-}
-
-static void ohci1394_pci_remove(struct pci_dev *dev)
-{
- struct ti_ohci *ohci;
- struct device *device;
-
- ohci = pci_get_drvdata(dev);
- if (!ohci)
- goto out;
-
- device = get_device(&ohci->host->device);
-
- switch (ohci->init_state) {
- case OHCI_INIT_DONE:
- hpsb_remove_host(ohci->host);
-
- /* Clear out BUS Options */
- reg_write(ohci, OHCI1394_ConfigROMhdr, 0);
- reg_write(ohci, OHCI1394_BusOptions,
- (reg_read(ohci, OHCI1394_BusOptions) & 0x0000f007) |
- 0x00ff0000);
- memset(ohci->csr_config_rom_cpu, 0, OHCI_CONFIG_ROM_LEN);
-
- case OHCI_INIT_HAVE_IRQ:
- /* Clear interrupt registers */
- reg_write(ohci, OHCI1394_IntMaskClear, 0xffffffff);
- reg_write(ohci, OHCI1394_IntEventClear, 0xffffffff);
- reg_write(ohci, OHCI1394_IsoXmitIntMaskClear, 0xffffffff);
- reg_write(ohci, OHCI1394_IsoXmitIntEventClear, 0xffffffff);
- reg_write(ohci, OHCI1394_IsoRecvIntMaskClear, 0xffffffff);
- reg_write(ohci, OHCI1394_IsoRecvIntEventClear, 0xffffffff);
-
- /* Disable IRM Contender */
- set_phy_reg(ohci, 4, ~0xc0 & get_phy_reg(ohci, 4));
-
- /* Clear link control register */
- reg_write(ohci, OHCI1394_LinkControlClear, 0xffffffff);
-
- /* Let all other nodes know to ignore us */
- ohci_devctl(ohci->host, RESET_BUS, LONG_RESET_NO_FORCE_ROOT);
-
- /* Soft reset before we start - this disables
- * interrupts and clears linkEnable and LPS. */
- ohci_soft_reset(ohci);
- free_irq(dev->irq, ohci);
-
- case OHCI_INIT_HAVE_TXRX_BUFFERS__MAYBE:
- /* The ohci_soft_reset() stops all DMA contexts, so we
- * dont need to do this. */
- free_dma_rcv_ctx(&ohci->ar_req_context);
- free_dma_rcv_ctx(&ohci->ar_resp_context);
- free_dma_trm_ctx(&ohci->at_req_context);
- free_dma_trm_ctx(&ohci->at_resp_context);
-
- case OHCI_INIT_HAVE_SELFID_BUFFER:
- pci_free_consistent(dev, OHCI1394_SI_DMA_BUF_SIZE,
- ohci->selfid_buf_cpu,
- ohci->selfid_buf_bus);
-
- case OHCI_INIT_HAVE_CONFIG_ROM_BUFFER:
- pci_free_consistent(dev, OHCI_CONFIG_ROM_LEN,
- ohci->csr_config_rom_cpu,
- ohci->csr_config_rom_bus);
-
- case OHCI_INIT_HAVE_IOMAPPING:
- iounmap(ohci->registers);
-
- case OHCI_INIT_HAVE_MEM_REGION:
- release_mem_region(pci_resource_start(dev, 0),
- OHCI1394_REGISTER_SIZE);
-
- case OHCI_INIT_ALLOC_HOST:
- pci_set_drvdata(dev, NULL);
- }
-
- if (device)
- put_device(device);
-out:
- ohci1394_pmac_off(dev);
-}
-
-#ifdef CONFIG_PM
-static int ohci1394_pci_suspend(struct pci_dev *dev, pm_message_t state)
-{
- int err;
- struct ti_ohci *ohci = pci_get_drvdata(dev);
-
- if (!ohci) {
- printk(KERN_ERR "%s: tried to suspend nonexisting host\n",
- OHCI1394_DRIVER_NAME);
- return -ENXIO;
- }
- DBGMSG("suspend called");
-
- /* Clear the async DMA contexts and stop using the controller */
- hpsb_bus_reset(ohci->host);
-
- /* See ohci1394_pci_remove() for comments on this sequence */
- reg_write(ohci, OHCI1394_ConfigROMhdr, 0);
- reg_write(ohci, OHCI1394_BusOptions,
- (reg_read(ohci, OHCI1394_BusOptions) & 0x0000f007) |
- 0x00ff0000);
- reg_write(ohci, OHCI1394_IntMaskClear, 0xffffffff);
- reg_write(ohci, OHCI1394_IntEventClear, 0xffffffff);
- reg_write(ohci, OHCI1394_IsoXmitIntMaskClear, 0xffffffff);
- reg_write(ohci, OHCI1394_IsoXmitIntEventClear, 0xffffffff);
- reg_write(ohci, OHCI1394_IsoRecvIntMaskClear, 0xffffffff);
- reg_write(ohci, OHCI1394_IsoRecvIntEventClear, 0xffffffff);
- set_phy_reg(ohci, 4, ~0xc0 & get_phy_reg(ohci, 4));
- reg_write(ohci, OHCI1394_LinkControlClear, 0xffffffff);
- ohci_devctl(ohci->host, RESET_BUS, LONG_RESET_NO_FORCE_ROOT);
- ohci_soft_reset(ohci);
-
- free_irq(dev->irq, ohci);
- err = pci_save_state(dev);
- if (err) {
- PRINT(KERN_ERR, "pci_save_state failed with %d", err);
- return err;
- }
- err = pci_set_power_state(dev, pci_choose_state(dev, state));
- if (err)
- DBGMSG("pci_set_power_state failed with %d", err);
- ohci1394_pmac_off(dev);
-
- return 0;
-}
-
-static int ohci1394_pci_resume(struct pci_dev *dev)
-{
- int err;
- struct ti_ohci *ohci = pci_get_drvdata(dev);
-
- if (!ohci) {
- printk(KERN_ERR "%s: tried to resume nonexisting host\n",
- OHCI1394_DRIVER_NAME);
- return -ENXIO;
- }
- DBGMSG("resume called");
-
- ohci1394_pmac_on(dev);
- pci_set_power_state(dev, PCI_D0);
- pci_restore_state(dev);
- err = pci_enable_device(dev);
- if (err) {
- PRINT(KERN_ERR, "pci_enable_device failed with %d", err);
- return err;
- }
-
- /* See ohci1394_pci_probe() for comments on this sequence */
- ohci_soft_reset(ohci);
- reg_write(ohci, OHCI1394_HCControlSet, OHCI1394_HCControl_LPS);
- reg_write(ohci, OHCI1394_IntEventClear, 0xffffffff);
- reg_write(ohci, OHCI1394_IntMaskClear, 0xffffffff);
- reg_read(ohci, OHCI1394_Version);
- msleep(50);
-
- err = request_irq(dev->irq, ohci_irq_handler, IRQF_SHARED,
- OHCI1394_DRIVER_NAME, ohci);
- if (err) {
- PRINT_G(KERN_ERR, "Failed to allocate interrupt %d", dev->irq);
- return err;
- }
-
- ohci_initialize(ohci);
-
- hpsb_resume_host(ohci->host);
- return 0;
-}
-#endif /* CONFIG_PM */
-
-static struct pci_device_id ohci1394_pci_tbl[] = {
- {
- .class = PCI_CLASS_SERIAL_FIREWIRE_OHCI,
- .class_mask = PCI_ANY_ID,
- .vendor = PCI_ANY_ID,
- .device = PCI_ANY_ID,
- .subvendor = PCI_ANY_ID,
- .subdevice = PCI_ANY_ID,
- },
- { 0, },
-};
-
-MODULE_DEVICE_TABLE(pci, ohci1394_pci_tbl);
-
-static struct pci_driver ohci1394_pci_driver = {
- .name = OHCI1394_DRIVER_NAME,
- .id_table = ohci1394_pci_tbl,
- .probe = ohci1394_pci_probe,
- .remove = ohci1394_pci_remove,
-#ifdef CONFIG_PM
- .resume = ohci1394_pci_resume,
- .suspend = ohci1394_pci_suspend,
-#endif
-};
-
-/***********************************
- * OHCI1394 Video Interface *
- ***********************************/
-
-/* essentially the only purpose of this code is to allow another
- module to hook into ohci's interrupt handler */
-
-/* returns zero if successful, one if DMA context is locked up */
-int ohci1394_stop_context(struct ti_ohci *ohci, int reg, char *msg)
-{
- int i=0;
-
- /* stop the channel program if it's still running */
- reg_write(ohci, reg, 0x8000);
-
- /* Wait until it effectively stops */
- while (reg_read(ohci, reg) & 0x400) {
- i++;
- if (i>5000) {
- PRINT(KERN_ERR,
- "Runaway loop while stopping context: %s...", msg ? msg : "");
- return 1;
- }
-
- mb();
- udelay(10);
- }
- if (msg) PRINT(KERN_ERR, "%s: dma prg stopped", msg);
- return 0;
-}
-
-void ohci1394_init_iso_tasklet(struct ohci1394_iso_tasklet *tasklet, int type,
- void (*func)(unsigned long), unsigned long data)
-{
- tasklet_init(&tasklet->tasklet, func, data);
- tasklet->type = type;
- /* We init the tasklet->link field, so we can list_del() it
- * without worrying whether it was added to the list or not. */
- INIT_LIST_HEAD(&tasklet->link);
-}
-
-int ohci1394_register_iso_tasklet(struct ti_ohci *ohci,
- struct ohci1394_iso_tasklet *tasklet)
-{
- unsigned long flags, *usage;
- int n, i, r = -EBUSY;
-
- if (tasklet->type == OHCI_ISO_TRANSMIT) {
- n = ohci->nb_iso_xmit_ctx;
- usage = &ohci->it_ctx_usage;
- }
- else {
- n = ohci->nb_iso_rcv_ctx;
- usage = &ohci->ir_ctx_usage;
-
- /* only one receive context can be multichannel (OHCI sec 10.4.1) */
- if (tasklet->type == OHCI_ISO_MULTICHANNEL_RECEIVE) {
- if (test_and_set_bit(0, &ohci->ir_multichannel_used)) {
- return r;
- }
- }
- }
-
- spin_lock_irqsave(&ohci->iso_tasklet_list_lock, flags);
-
- for (i = 0; i < n; i++)
- if (!test_and_set_bit(i, usage)) {
- tasklet->context = i;
- list_add_tail(&tasklet->link, &ohci->iso_tasklet_list);
- r = 0;
- break;
- }
-
- spin_unlock_irqrestore(&ohci->iso_tasklet_list_lock, flags);
-
- return r;
-}
-
-void ohci1394_unregister_iso_tasklet(struct ti_ohci *ohci,
- struct ohci1394_iso_tasklet *tasklet)
-{
- unsigned long flags;
-
- tasklet_kill(&tasklet->tasklet);
-
- spin_lock_irqsave(&ohci->iso_tasklet_list_lock, flags);
-
- if (tasklet->type == OHCI_ISO_TRANSMIT)
- clear_bit(tasklet->context, &ohci->it_ctx_usage);
- else {
- clear_bit(tasklet->context, &ohci->ir_ctx_usage);
-
- if (tasklet->type == OHCI_ISO_MULTICHANNEL_RECEIVE) {
- clear_bit(0, &ohci->ir_multichannel_used);
- }
- }
-
- list_del(&tasklet->link);
-
- spin_unlock_irqrestore(&ohci->iso_tasklet_list_lock, flags);
-}
-
-EXPORT_SYMBOL(ohci1394_stop_context);
-EXPORT_SYMBOL(ohci1394_init_iso_tasklet);
-EXPORT_SYMBOL(ohci1394_register_iso_tasklet);
-EXPORT_SYMBOL(ohci1394_unregister_iso_tasklet);
-
-/***********************************
- * General module initialization *
- ***********************************/
-
-MODULE_AUTHOR("Sebastien Rougeaux <sebastien.rougeaux@anu.edu.au>");
-MODULE_DESCRIPTION("Driver for PCI OHCI IEEE-1394 controllers");
-MODULE_LICENSE("GPL");
-
-static void __exit ohci1394_cleanup (void)
-{
- pci_unregister_driver(&ohci1394_pci_driver);
-}
-
-static int __init ohci1394_init(void)
-{
- return pci_register_driver(&ohci1394_pci_driver);
-}
-
-module_init(ohci1394_init);
-module_exit(ohci1394_cleanup);
diff --git a/drivers/ieee1394/ohci1394.h b/drivers/ieee1394/ohci1394.h
deleted file mode 100644
index 7fb8ab9780ae..000000000000
--- a/drivers/ieee1394/ohci1394.h
+++ /dev/null
@@ -1,453 +0,0 @@
-/*
- * ohci1394.h - driver for OHCI 1394 boards
- * Copyright (C)1999,2000 Sebastien Rougeaux <sebastien.rougeaux@anu.edu.au>
- * Gord Peters <GordPeters@smarttech.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software Foundation,
- * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
- */
-
-#ifndef _OHCI1394_H
-#define _OHCI1394_H
-
-#include "ieee1394_types.h"
-#include <asm/io.h>
-
-#define OHCI1394_DRIVER_NAME "ohci1394"
-
-#define OHCI1394_MAX_AT_REQ_RETRIES 0xf
-#define OHCI1394_MAX_AT_RESP_RETRIES 0x2
-#define OHCI1394_MAX_PHYS_RESP_RETRIES 0x8
-#define OHCI1394_MAX_SELF_ID_ERRORS 16
-
-#define AR_REQ_NUM_DESC 4 /* number of AR req descriptors */
-#define AR_REQ_BUF_SIZE PAGE_SIZE /* size of AR req buffers */
-#define AR_REQ_SPLIT_BUF_SIZE PAGE_SIZE /* split packet buffer */
-
-#define AR_RESP_NUM_DESC 4 /* number of AR resp descriptors */
-#define AR_RESP_BUF_SIZE PAGE_SIZE /* size of AR resp buffers */
-#define AR_RESP_SPLIT_BUF_SIZE PAGE_SIZE /* split packet buffer */
-
-#define IR_NUM_DESC 16 /* number of IR descriptors */
-#define IR_BUF_SIZE PAGE_SIZE /* 4096 bytes/buffer */
-#define IR_SPLIT_BUF_SIZE PAGE_SIZE /* split packet buffer */
-
-#define IT_NUM_DESC 16 /* number of IT descriptors */
-
-#define AT_REQ_NUM_DESC 32 /* number of AT req descriptors */
-#define AT_RESP_NUM_DESC 32 /* number of AT resp descriptors */
-
-#define OHCI_LOOP_COUNT 100 /* Number of loops for reg read waits */
-
-#define OHCI_CONFIG_ROM_LEN 1024 /* Length of the mapped configrom space */
-
-#define OHCI1394_SI_DMA_BUF_SIZE 8192 /* length of the selfid buffer */
-
-/* PCI configuration space addresses */
-#define OHCI1394_PCI_HCI_Control 0x40
-
-struct dma_cmd {
- u32 control;
- u32 address;
- u32 branchAddress;
- u32 status;
-};
-
-/*
- * FIXME:
- * It is important that a single at_dma_prg does not cross a page boundary
- * The proper way to do it would be to do the check dynamically as the
- * programs are inserted into the AT fifo.
- */
-struct at_dma_prg {
- struct dma_cmd begin;
- quadlet_t data[4];
- struct dma_cmd end;
- quadlet_t pad[4]; /* FIXME: quick hack for memory alignment */
-};
-
-/* identify whether a DMA context is asynchronous or isochronous */
-enum context_type { DMA_CTX_ASYNC_REQ, DMA_CTX_ASYNC_RESP, DMA_CTX_ISO };
-
-/* DMA receive context */
-struct dma_rcv_ctx {
- struct ti_ohci *ohci;
- enum context_type type;
- int ctx;
- unsigned int num_desc;
-
- unsigned int buf_size;
- unsigned int split_buf_size;
-
- /* dma block descriptors */
- struct dma_cmd **prg_cpu;
- dma_addr_t *prg_bus;
- struct pci_pool *prg_pool;
-
- /* dma buffers */
- quadlet_t **buf_cpu;
- dma_addr_t *buf_bus;
-
- unsigned int buf_ind;
- unsigned int buf_offset;
- quadlet_t *spb;
- spinlock_t lock;
- struct tasklet_struct task;
- int ctrlClear;
- int ctrlSet;
- int cmdPtr;
- int ctxtMatch;
-};
-
-/* DMA transmit context */
-struct dma_trm_ctx {
- struct ti_ohci *ohci;
- enum context_type type;
- int ctx;
- unsigned int num_desc;
-
- /* dma block descriptors */
- struct at_dma_prg **prg_cpu;
- dma_addr_t *prg_bus;
- struct pci_pool *prg_pool;
-
- unsigned int prg_ind;
- unsigned int sent_ind;
- int free_prgs;
- quadlet_t *branchAddrPtr;
-
- /* list of packets inserted in the AT FIFO */
- struct list_head fifo_list;
-
- /* list of pending packets to be inserted in the AT FIFO */
- struct list_head pending_list;
-
- spinlock_t lock;
- struct tasklet_struct task;
- int ctrlClear;
- int ctrlSet;
- int cmdPtr;
-};
-
-struct ohci1394_iso_tasklet {
- struct tasklet_struct tasklet;
- struct list_head link;
- int context;
- enum { OHCI_ISO_TRANSMIT, OHCI_ISO_RECEIVE,
- OHCI_ISO_MULTICHANNEL_RECEIVE } type;
-};
-
-struct ti_ohci {
- struct pci_dev *dev;
-
- enum {
- OHCI_INIT_ALLOC_HOST,
- OHCI_INIT_HAVE_MEM_REGION,
- OHCI_INIT_HAVE_IOMAPPING,
- OHCI_INIT_HAVE_CONFIG_ROM_BUFFER,
- OHCI_INIT_HAVE_SELFID_BUFFER,
- OHCI_INIT_HAVE_TXRX_BUFFERS__MAYBE,
- OHCI_INIT_HAVE_IRQ,
- OHCI_INIT_DONE,
- } init_state;
-
- /* remapped memory spaces */
- void __iomem *registers;
-
- /* dma buffer for self-id packets */
- quadlet_t *selfid_buf_cpu;
- dma_addr_t selfid_buf_bus;
-
- /* buffer for csr config rom */
- quadlet_t *csr_config_rom_cpu;
- dma_addr_t csr_config_rom_bus;
- int csr_config_rom_length;
-
- unsigned int max_packet_size;
-
- /* async receive */
- struct dma_rcv_ctx ar_resp_context;
- struct dma_rcv_ctx ar_req_context;
-
- /* async transmit */
- struct dma_trm_ctx at_resp_context;
- struct dma_trm_ctx at_req_context;
-
- /* iso receive */
- int nb_iso_rcv_ctx;
- unsigned long ir_ctx_usage; /* use test_and_set_bit() for atomicity */
- unsigned long ir_multichannel_used; /* ditto */
- spinlock_t IR_channel_lock;
-
- /* iso transmit */
- int nb_iso_xmit_ctx;
- unsigned long it_ctx_usage; /* use test_and_set_bit() for atomicity */
-
- u64 ISO_channel_usage;
-
- /* IEEE-1394 part follows */
- struct hpsb_host *host;
-
- int phyid, isroot;
-
- spinlock_t phy_reg_lock;
- spinlock_t event_lock;
-
- int self_id_errors;
-
- /* Tasklets for iso receive and transmit, used by video1394
- * and dv1394 */
- struct list_head iso_tasklet_list;
- spinlock_t iso_tasklet_list_lock;
-
- /* Swap the selfid buffer? */
- unsigned int selfid_swap:1;
- /* Some Apple chipset seem to swap incoming headers for us */
- unsigned int no_swap_incoming:1;
-
- /* Force extra paranoia checking on bus-reset handling */
- unsigned int check_busreset:1;
-};
-
-static inline int cross_bound(unsigned long addr, unsigned int size)
-{
- if (size == 0)
- return 0;
-
- if (size > PAGE_SIZE)
- return 1;
-
- if (addr >> PAGE_SHIFT != (addr + size - 1) >> PAGE_SHIFT)
- return 1;
-
- return 0;
-}
-
-/*
- * Register read and write helper functions.
- */
-static inline void reg_write(const struct ti_ohci *ohci, int offset, u32 data)
-{
- writel(data, ohci->registers + offset);
-}
-
-static inline u32 reg_read(const struct ti_ohci *ohci, int offset)
-{
- return readl(ohci->registers + offset);
-}
-
-
-/* 2 KiloBytes of register space */
-#define OHCI1394_REGISTER_SIZE 0x800
-
-/* Offsets relative to context bases defined below */
-
-#define OHCI1394_ContextControlSet 0x000
-#define OHCI1394_ContextControlClear 0x004
-#define OHCI1394_ContextCommandPtr 0x00C
-
-/* register map */
-#define OHCI1394_Version 0x000
-#define OHCI1394_GUID_ROM 0x004
-#define OHCI1394_ATRetries 0x008
-#define OHCI1394_CSRData 0x00C
-#define OHCI1394_CSRCompareData 0x010
-#define OHCI1394_CSRControl 0x014
-#define OHCI1394_ConfigROMhdr 0x018
-#define OHCI1394_BusID 0x01C
-#define OHCI1394_BusOptions 0x020
-#define OHCI1394_GUIDHi 0x024
-#define OHCI1394_GUIDLo 0x028
-#define OHCI1394_ConfigROMmap 0x034
-#define OHCI1394_PostedWriteAddressLo 0x038
-#define OHCI1394_PostedWriteAddressHi 0x03C
-#define OHCI1394_VendorID 0x040
-#define OHCI1394_HCControlSet 0x050
-#define OHCI1394_HCControlClear 0x054
-#define OHCI1394_HCControl_noByteSwap 0x40000000
-#define OHCI1394_HCControl_programPhyEnable 0x00800000
-#define OHCI1394_HCControl_aPhyEnhanceEnable 0x00400000
-#define OHCI1394_HCControl_LPS 0x00080000
-#define OHCI1394_HCControl_postedWriteEnable 0x00040000
-#define OHCI1394_HCControl_linkEnable 0x00020000
-#define OHCI1394_HCControl_softReset 0x00010000
-#define OHCI1394_SelfIDBuffer 0x064
-#define OHCI1394_SelfIDCount 0x068
-#define OHCI1394_IRMultiChanMaskHiSet 0x070
-#define OHCI1394_IRMultiChanMaskHiClear 0x074
-#define OHCI1394_IRMultiChanMaskLoSet 0x078
-#define OHCI1394_IRMultiChanMaskLoClear 0x07C
-#define OHCI1394_IntEventSet 0x080
-#define OHCI1394_IntEventClear 0x084
-#define OHCI1394_IntMaskSet 0x088
-#define OHCI1394_IntMaskClear 0x08C
-#define OHCI1394_IsoXmitIntEventSet 0x090
-#define OHCI1394_IsoXmitIntEventClear 0x094
-#define OHCI1394_IsoXmitIntMaskSet 0x098
-#define OHCI1394_IsoXmitIntMaskClear 0x09C
-#define OHCI1394_IsoRecvIntEventSet 0x0A0
-#define OHCI1394_IsoRecvIntEventClear 0x0A4
-#define OHCI1394_IsoRecvIntMaskSet 0x0A8
-#define OHCI1394_IsoRecvIntMaskClear 0x0AC
-#define OHCI1394_InitialBandwidthAvailable 0x0B0
-#define OHCI1394_InitialChannelsAvailableHi 0x0B4
-#define OHCI1394_InitialChannelsAvailableLo 0x0B8
-#define OHCI1394_FairnessControl 0x0DC
-#define OHCI1394_LinkControlSet 0x0E0
-#define OHCI1394_LinkControlClear 0x0E4
-#define OHCI1394_LinkControl_RcvSelfID 0x00000200
-#define OHCI1394_LinkControl_RcvPhyPkt 0x00000400
-#define OHCI1394_LinkControl_CycleTimerEnable 0x00100000
-#define OHCI1394_LinkControl_CycleMaster 0x00200000
-#define OHCI1394_LinkControl_CycleSource 0x00400000
-#define OHCI1394_NodeID 0x0E8
-#define OHCI1394_PhyControl 0x0EC
-#define OHCI1394_IsochronousCycleTimer 0x0F0
-#define OHCI1394_AsReqFilterHiSet 0x100
-#define OHCI1394_AsReqFilterHiClear 0x104
-#define OHCI1394_AsReqFilterLoSet 0x108
-#define OHCI1394_AsReqFilterLoClear 0x10C
-#define OHCI1394_PhyReqFilterHiSet 0x110
-#define OHCI1394_PhyReqFilterHiClear 0x114
-#define OHCI1394_PhyReqFilterLoSet 0x118
-#define OHCI1394_PhyReqFilterLoClear 0x11C
-#define OHCI1394_PhyUpperBound 0x120
-
-#define OHCI1394_AsReqTrContextBase 0x180
-#define OHCI1394_AsReqTrContextControlSet 0x180
-#define OHCI1394_AsReqTrContextControlClear 0x184
-#define OHCI1394_AsReqTrCommandPtr 0x18C
-
-#define OHCI1394_AsRspTrContextBase 0x1A0
-#define OHCI1394_AsRspTrContextControlSet 0x1A0
-#define OHCI1394_AsRspTrContextControlClear 0x1A4
-#define OHCI1394_AsRspTrCommandPtr 0x1AC
-
-#define OHCI1394_AsReqRcvContextBase 0x1C0
-#define OHCI1394_AsReqRcvContextControlSet 0x1C0
-#define OHCI1394_AsReqRcvContextControlClear 0x1C4
-#define OHCI1394_AsReqRcvCommandPtr 0x1CC
-
-#define OHCI1394_AsRspRcvContextBase 0x1E0
-#define OHCI1394_AsRspRcvContextControlSet 0x1E0
-#define OHCI1394_AsRspRcvContextControlClear 0x1E4
-#define OHCI1394_AsRspRcvCommandPtr 0x1EC
-
-/* Isochronous transmit registers */
-/* Add (16 * n) for context n */
-#define OHCI1394_IsoXmitContextBase 0x200
-#define OHCI1394_IsoXmitContextControlSet 0x200
-#define OHCI1394_IsoXmitContextControlClear 0x204
-#define OHCI1394_IsoXmitCommandPtr 0x20C
-
-/* Isochronous receive registers */
-/* Add (32 * n) for context n */
-#define OHCI1394_IsoRcvContextBase 0x400
-#define OHCI1394_IsoRcvContextControlSet 0x400
-#define OHCI1394_IsoRcvContextControlClear 0x404
-#define OHCI1394_IsoRcvCommandPtr 0x40C
-#define OHCI1394_IsoRcvContextMatch 0x410
-
-/* Interrupts Mask/Events */
-
-#define OHCI1394_reqTxComplete 0x00000001
-#define OHCI1394_respTxComplete 0x00000002
-#define OHCI1394_ARRQ 0x00000004
-#define OHCI1394_ARRS 0x00000008
-#define OHCI1394_RQPkt 0x00000010
-#define OHCI1394_RSPkt 0x00000020
-#define OHCI1394_isochTx 0x00000040
-#define OHCI1394_isochRx 0x00000080
-#define OHCI1394_postedWriteErr 0x00000100
-#define OHCI1394_lockRespErr 0x00000200
-#define OHCI1394_selfIDComplete 0x00010000
-#define OHCI1394_busReset 0x00020000
-#define OHCI1394_phy 0x00080000
-#define OHCI1394_cycleSynch 0x00100000
-#define OHCI1394_cycle64Seconds 0x00200000
-#define OHCI1394_cycleLost 0x00400000
-#define OHCI1394_cycleInconsistent 0x00800000
-#define OHCI1394_unrecoverableError 0x01000000
-#define OHCI1394_cycleTooLong 0x02000000
-#define OHCI1394_phyRegRcvd 0x04000000
-#define OHCI1394_masterIntEnable 0x80000000
-
-/* DMA Control flags */
-#define DMA_CTL_OUTPUT_MORE 0x00000000
-#define DMA_CTL_OUTPUT_LAST 0x10000000
-#define DMA_CTL_INPUT_MORE 0x20000000
-#define DMA_CTL_INPUT_LAST 0x30000000
-#define DMA_CTL_UPDATE 0x08000000
-#define DMA_CTL_IMMEDIATE 0x02000000
-#define DMA_CTL_IRQ 0x00300000
-#define DMA_CTL_BRANCH 0x000c0000
-#define DMA_CTL_WAIT 0x00030000
-
-/* OHCI evt_* error types, table 3-2 of the OHCI 1.1 spec. */
-#define EVT_NO_STATUS 0x0 /* No event status */
-#define EVT_RESERVED_A 0x1 /* Reserved, not used !!! */
-#define EVT_LONG_PACKET 0x2 /* The revc data was longer than the buf */
-#define EVT_MISSING_ACK 0x3 /* A subaction gap was detected before an ack
- arrived, or recv'd ack had a parity error */
-#define EVT_UNDERRUN 0x4 /* Underrun on corresponding FIFO, packet
- truncated */
-#define EVT_OVERRUN 0x5 /* A recv FIFO overflowed on reception of ISO
- packet */
-#define EVT_DESCRIPTOR_READ 0x6 /* An unrecoverable error occurred while host was
- reading a descriptor block */
-#define EVT_DATA_READ 0x7 /* An error occurred while host controller was
- attempting to read from host memory in the data
- stage of descriptor processing */
-#define EVT_DATA_WRITE 0x8 /* An error occurred while host controller was
- attempting to write either during the data stage
- of descriptor processing, or when processing a single
- 16-bit host memory write */
-#define EVT_BUS_RESET 0x9 /* Identifies a PHY packet in the recv buffer as
- being a synthesized bus reset packet */
-#define EVT_TIMEOUT 0xa /* Indicates that the asynchronous transmit response
- packet expired and was not transmitted, or that an
- IT DMA context experienced a skip processing overflow */
-#define EVT_TCODE_ERR 0xb /* A bad tCode is associated with this packet.
- The packet was flushed */
-#define EVT_RESERVED_B 0xc /* Reserved, not used !!! */
-#define EVT_RESERVED_C 0xd /* Reserved, not used !!! */
-#define EVT_UNKNOWN 0xe /* An error condition has occurred that cannot be
- represented by any other event codes defined herein. */
-#define EVT_FLUSHED 0xf /* Send by the link side of output FIFO when asynchronous
- packets are being flushed due to a bus reset. */
-
-#define OHCI1394_TCODE_PHY 0xE
-
-/* Node offset map (phys DMA area, posted write area).
- * The value of OHCI1394_PHYS_UPPER_BOUND_PROGRAMMED may be modified but must
- * be lower than OHCI1394_MIDDLE_ADDRESS_SPACE.
- * OHCI1394_PHYS_UPPER_BOUND_FIXED and OHCI1394_MIDDLE_ADDRESS_SPACE are
- * constants given by the OHCI spec.
- */
-#define OHCI1394_PHYS_UPPER_BOUND_FIXED 0x000100000000ULL /* 4 GB */
-#define OHCI1394_PHYS_UPPER_BOUND_PROGRAMMED 0x010000000000ULL /* 1 TB */
-#define OHCI1394_MIDDLE_ADDRESS_SPACE 0xffff00000000ULL
-
-void ohci1394_init_iso_tasklet(struct ohci1394_iso_tasklet *tasklet,
- int type,
- void (*func)(unsigned long),
- unsigned long data);
-int ohci1394_register_iso_tasklet(struct ti_ohci *ohci,
- struct ohci1394_iso_tasklet *tasklet);
-void ohci1394_unregister_iso_tasklet(struct ti_ohci *ohci,
- struct ohci1394_iso_tasklet *tasklet);
-int ohci1394_stop_context(struct ti_ohci *ohci, int reg, char *msg);
-struct ti_ohci *ohci1394_get_struct(int card_num);
-
-#endif
diff --git a/drivers/ieee1394/pcilynx.c b/drivers/ieee1394/pcilynx.c
deleted file mode 100644
index bf47fee79808..000000000000
--- a/drivers/ieee1394/pcilynx.c
+++ /dev/null
@@ -1,1554 +0,0 @@
-/*
- * pcilynx.c - Texas Instruments PCILynx driver
- * Copyright (C) 1999,2000 Andreas Bombe <andreas.bombe@munich.netsurf.de>,
- * Stephan Linz <linz@mazet.de>
- * Manfred Weihs <weihs@ict.tuwien.ac.at>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software Foundation,
- * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
- */
-
-/*
- * Contributions:
- *
- * Manfred Weihs <weihs@ict.tuwien.ac.at>
- * reading bus info block (containing GUID) from serial
- * eeprom via i2c and storing it in config ROM
- * Reworked code for initiating bus resets
- * (long, short, with or without hold-off)
- * Enhancements in async and iso send code
- */
-
-#include <linux/kernel.h>
-#include <linux/slab.h>
-#include <linux/interrupt.h>
-#include <linux/wait.h>
-#include <linux/errno.h>
-#include <linux/module.h>
-#include <linux/moduleparam.h>
-#include <linux/init.h>
-#include <linux/pci.h>
-#include <linux/fs.h>
-#include <linux/poll.h>
-#include <linux/kdev_t.h>
-#include <linux/dma-mapping.h>
-#include <asm/byteorder.h>
-#include <asm/atomic.h>
-#include <asm/io.h>
-#include <asm/uaccess.h>
-#include <asm/irq.h>
-
-#include "csr1212.h"
-#include "ieee1394.h"
-#include "ieee1394_types.h"
-#include "hosts.h"
-#include "ieee1394_core.h"
-#include "highlevel.h"
-#include "pcilynx.h"
-
-#include <linux/i2c.h>
-#include <linux/i2c-algo-bit.h>
-
-/* print general (card independent) information */
-#define PRINT_G(level, fmt, args...) printk(level "pcilynx: " fmt "\n" , ## args)
-/* print card specific information */
-#define PRINT(level, card, fmt, args...) printk(level "pcilynx%d: " fmt "\n" , card , ## args)
-
-#ifdef CONFIG_IEEE1394_VERBOSEDEBUG
-#define PRINT_GD(level, fmt, args...) printk(level "pcilynx: " fmt "\n" , ## args)
-#define PRINTD(level, card, fmt, args...) printk(level "pcilynx%d: " fmt "\n" , card , ## args)
-#else
-#define PRINT_GD(level, fmt, args...) do {} while (0)
-#define PRINTD(level, card, fmt, args...) do {} while (0)
-#endif
-
-
-/* Module Parameters */
-static int skip_eeprom;
-module_param(skip_eeprom, int, 0444);
-MODULE_PARM_DESC(skip_eeprom, "Use generic bus info block instead of serial eeprom (default = 0).");
-
-
-static struct hpsb_host_driver lynx_driver;
-static unsigned int card_id;
-
-
-
-/*
- * I2C stuff
- */
-
-/* the i2c stuff was inspired by i2c-philips-par.c */
-
-static void bit_setscl(void *data, int state)
-{
- if (state) {
- ((struct ti_lynx *) data)->i2c_driven_state |= 0x00000040;
- } else {
- ((struct ti_lynx *) data)->i2c_driven_state &= ~0x00000040;
- }
- reg_write((struct ti_lynx *) data, SERIAL_EEPROM_CONTROL, ((struct ti_lynx *) data)->i2c_driven_state);
-}
-
-static void bit_setsda(void *data, int state)
-{
- if (state) {
- ((struct ti_lynx *) data)->i2c_driven_state |= 0x00000010;
- } else {
- ((struct ti_lynx *) data)->i2c_driven_state &= ~0x00000010;
- }
- reg_write((struct ti_lynx *) data, SERIAL_EEPROM_CONTROL, ((struct ti_lynx *) data)->i2c_driven_state);
-}
-
-static int bit_getscl(void *data)
-{
- return reg_read((struct ti_lynx *) data, SERIAL_EEPROM_CONTROL) & 0x00000040;
-}
-
-static int bit_getsda(void *data)
-{
- return reg_read((struct ti_lynx *) data, SERIAL_EEPROM_CONTROL) & 0x00000010;
-}
-
-static struct i2c_algo_bit_data bit_data = {
- .setsda = bit_setsda,
- .setscl = bit_setscl,
- .getsda = bit_getsda,
- .getscl = bit_getscl,
- .udelay = 5,
- .timeout = 100,
-};
-
-
-/*
- * PCL handling functions.
- */
-
-static pcl_t alloc_pcl(struct ti_lynx *lynx)
-{
- u8 m;
- int i, j;
-
- spin_lock(&lynx->lock);
- /* FIXME - use ffz() to make this readable */
- for (i = 0; i < (LOCALRAM_SIZE / 1024); i++) {
- m = lynx->pcl_bmap[i];
- for (j = 0; j < 8; j++) {
- if (m & 1<<j) {
- continue;
- }
- m |= 1<<j;
- lynx->pcl_bmap[i] = m;
- spin_unlock(&lynx->lock);
- return 8 * i + j;
- }
- }
- spin_unlock(&lynx->lock);
-
- return -1;
-}
-
-
-#if 0
-static void free_pcl(struct ti_lynx *lynx, pcl_t pclid)
-{
- int off, bit;
-
- off = pclid / 8;
- bit = pclid % 8;
-
- if (pclid < 0) {
- return;
- }
-
- spin_lock(&lynx->lock);
- if (lynx->pcl_bmap[off] & 1<<bit) {
- lynx->pcl_bmap[off] &= ~(1<<bit);
- } else {
- PRINT(KERN_ERR, lynx->id,
- "attempted to free unallocated PCL %d", pclid);
- }
- spin_unlock(&lynx->lock);
-}
-
-/* functions useful for debugging */
-static void pretty_print_pcl(const struct ti_pcl *pcl)
-{
- int i;
-
- printk("PCL next %08x, userdata %08x, status %08x, remtrans %08x, nextbuf %08x\n",
- pcl->next, pcl->user_data, pcl->pcl_status,
- pcl->remaining_transfer_count, pcl->next_data_buffer);
-
- printk("PCL");
- for (i=0; i<13; i++) {
- printk(" c%x:%08x d%x:%08x",
- i, pcl->buffer[i].control, i, pcl->buffer[i].pointer);
- if (!(i & 0x3) && (i != 12)) printk("\nPCL");
- }
- printk("\n");
-}
-
-static void print_pcl(const struct ti_lynx *lynx, pcl_t pclid)
-{
- struct ti_pcl pcl;
-
- get_pcl(lynx, pclid, &pcl);
- pretty_print_pcl(&pcl);
-}
-#endif
-
-
-
-/***********************************
- * IEEE-1394 functionality section *
- ***********************************/
-
-
-static int get_phy_reg(struct ti_lynx *lynx, int addr)
-{
- int retval;
- int i = 0;
-
- unsigned long flags;
-
- if (addr > 15) {
- PRINT(KERN_ERR, lynx->id,
- "%s: PHY register address %d out of range",
- __func__, addr);
- return -1;
- }
-
- spin_lock_irqsave(&lynx->phy_reg_lock, flags);
-
- reg_write(lynx, LINK_PHY, LINK_PHY_READ | LINK_PHY_ADDR(addr));
- do {
- retval = reg_read(lynx, LINK_PHY);
-
- if (i > 10000) {
- PRINT(KERN_ERR, lynx->id, "%s: runaway loop, aborting",
- __func__);
- retval = -1;
- break;
- }
- i++;
- } while ((retval & 0xf00) != LINK_PHY_RADDR(addr));
-
- reg_write(lynx, LINK_INT_STATUS, LINK_INT_PHY_REG_RCVD);
- spin_unlock_irqrestore(&lynx->phy_reg_lock, flags);
-
- if (retval != -1) {
- return retval & 0xff;
- } else {
- return -1;
- }
-}
-
-static int set_phy_reg(struct ti_lynx *lynx, int addr, int val)
-{
- unsigned long flags;
-
- if (addr > 15) {
- PRINT(KERN_ERR, lynx->id,
- "%s: PHY register address %d out of range", __func__, addr);
- return -1;
- }
-
- if (val > 0xff) {
- PRINT(KERN_ERR, lynx->id,
- "%s: PHY register value %d out of range", __func__, val);
- return -1;
- }
-
- spin_lock_irqsave(&lynx->phy_reg_lock, flags);
-
- reg_write(lynx, LINK_PHY, LINK_PHY_WRITE | LINK_PHY_ADDR(addr)
- | LINK_PHY_WDATA(val));
-
- spin_unlock_irqrestore(&lynx->phy_reg_lock, flags);
-
- return 0;
-}
-
-static int sel_phy_reg_page(struct ti_lynx *lynx, int page)
-{
- int reg;
-
- if (page > 7) {
- PRINT(KERN_ERR, lynx->id,
- "%s: PHY page %d out of range", __func__, page);
- return -1;
- }
-
- reg = get_phy_reg(lynx, 7);
- if (reg != -1) {
- reg &= 0x1f;
- reg |= (page << 5);
- set_phy_reg(lynx, 7, reg);
- return 0;
- } else {
- return -1;
- }
-}
-
-#if 0 /* not needed at this time */
-static int sel_phy_reg_port(struct ti_lynx *lynx, int port)
-{
- int reg;
-
- if (port > 15) {
- PRINT(KERN_ERR, lynx->id,
- "%s: PHY port %d out of range", __func__, port);
- return -1;
- }
-
- reg = get_phy_reg(lynx, 7);
- if (reg != -1) {
- reg &= 0xf0;
- reg |= port;
- set_phy_reg(lynx, 7, reg);
- return 0;
- } else {
- return -1;
- }
-}
-#endif
-
-static u32 get_phy_vendorid(struct ti_lynx *lynx)
-{
- u32 pvid = 0;
- sel_phy_reg_page(lynx, 1);
- pvid |= (get_phy_reg(lynx, 10) << 16);
- pvid |= (get_phy_reg(lynx, 11) << 8);
- pvid |= get_phy_reg(lynx, 12);
- PRINT(KERN_INFO, lynx->id, "PHY vendor id 0x%06x", pvid);
- return pvid;
-}
-
-static u32 get_phy_productid(struct ti_lynx *lynx)
-{
- u32 id = 0;
- sel_phy_reg_page(lynx, 1);
- id |= (get_phy_reg(lynx, 13) << 16);
- id |= (get_phy_reg(lynx, 14) << 8);
- id |= get_phy_reg(lynx, 15);
- PRINT(KERN_INFO, lynx->id, "PHY product id 0x%06x", id);
- return id;
-}
-
-static quadlet_t generate_own_selfid(struct ti_lynx *lynx,
- struct hpsb_host *host)
-{
- quadlet_t lsid;
- char phyreg[7];
- int i;
-
- phyreg[0] = lynx->phy_reg0;
- for (i = 1; i < 7; i++) {
- phyreg[i] = get_phy_reg(lynx, i);
- }
-
- /* FIXME? We assume a TSB21LV03A phy here. This code doesn't support
- more than 3 ports on the PHY anyway. */
-
- lsid = 0x80400000 | ((phyreg[0] & 0xfc) << 22);
- lsid |= (phyreg[1] & 0x3f) << 16; /* gap count */
- lsid |= (phyreg[2] & 0xc0) << 8; /* max speed */
- if (!hpsb_disable_irm)
- lsid |= (phyreg[6] & 0x01) << 11; /* contender (phy dependent) */
- /* lsid |= 1 << 11; *//* set contender (hack) */
- lsid |= (phyreg[6] & 0x10) >> 3; /* initiated reset */
-
- for (i = 0; i < (phyreg[2] & 0xf); i++) { /* ports */
- if (phyreg[3 + i] & 0x4) {
- lsid |= (((phyreg[3 + i] & 0x8) | 0x10) >> 3)
- << (6 - i*2);
- } else {
- lsid |= 1 << (6 - i*2);
- }
- }
-
- cpu_to_be32s(&lsid);
- PRINT(KERN_DEBUG, lynx->id, "generated own selfid 0x%x", lsid);
- return lsid;
-}
-
-static void handle_selfid(struct ti_lynx *lynx, struct hpsb_host *host)
-{
- quadlet_t *q = lynx->rcv_page;
- int phyid, isroot, size;
- quadlet_t lsid = 0;
- int i;
-
- if (lynx->phy_reg0 == -1 || lynx->selfid_size == -1) return;
-
- size = lynx->selfid_size;
- phyid = lynx->phy_reg0;
-
- i = (size > 16 ? 16 : size) / 4 - 1;
- while (i >= 0) {
- cpu_to_be32s(&q[i]);
- i--;
- }
-
- if (!lynx->phyic.reg_1394a) {
- lsid = generate_own_selfid(lynx, host);
- }
-
- isroot = (phyid & 2) != 0;
- phyid >>= 2;
- PRINT(KERN_INFO, lynx->id, "SelfID process finished (phyid %d, %s)",
- phyid, (isroot ? "root" : "not root"));
- reg_write(lynx, LINK_ID, (0xffc0 | phyid) << 16);
-
- if (!lynx->phyic.reg_1394a && !size) {
- hpsb_selfid_received(host, lsid);
- }
-
- while (size > 0) {
- struct selfid *sid = (struct selfid *)q;
-
- if (!lynx->phyic.reg_1394a && !sid->extended
- && (sid->phy_id == (phyid + 1))) {
- hpsb_selfid_received(host, lsid);
- }
-
- if (q[0] == ~q[1]) {
- PRINT(KERN_DEBUG, lynx->id, "SelfID packet 0x%x rcvd",
- q[0]);
- hpsb_selfid_received(host, q[0]);
- } else {
- PRINT(KERN_INFO, lynx->id,
- "inconsistent selfid 0x%x/0x%x", q[0], q[1]);
- }
- q += 2;
- size -= 8;
- }
-
- if (!lynx->phyic.reg_1394a && isroot && phyid != 0) {
- hpsb_selfid_received(host, lsid);
- }
-
- hpsb_selfid_complete(host, phyid, isroot);
-
- if (host->in_bus_reset) return; /* in bus reset again */
-
- if (isroot) reg_set_bits(lynx, LINK_CONTROL, LINK_CONTROL_CYCMASTER); //FIXME: I do not think, we need this here
- reg_set_bits(lynx, LINK_CONTROL,
- LINK_CONTROL_RCV_CMP_VALID | LINK_CONTROL_TX_ASYNC_EN
- | LINK_CONTROL_RX_ASYNC_EN | LINK_CONTROL_CYCTIMEREN);
-}
-
-
-
-/* This must be called with the respective queue_lock held. */
-static void send_next(struct ti_lynx *lynx, int what)
-{
- struct ti_pcl pcl;
- struct lynx_send_data *d;
- struct hpsb_packet *packet;
-
-#if 0 /* has been removed from ieee1394 core */
- d = (what == hpsb_iso ? &lynx->iso_send : &lynx->async);
-#else
- d = &lynx->async;
-#endif
- if (!list_empty(&d->pcl_queue)) {
- PRINT(KERN_ERR, lynx->id, "trying to queue a new packet in nonempty fifo");
- BUG();
- }
-
- packet = driver_packet(d->queue.next);
- list_move_tail(&packet->driver_list, &d->pcl_queue);
-
- d->header_dma = pci_map_single(lynx->dev, packet->header,
- packet->header_size, PCI_DMA_TODEVICE);
- if (packet->data_size) {
- d->data_dma = pci_map_single(lynx->dev, packet->data,
- packet->data_size,
- PCI_DMA_TODEVICE);
- } else {
- d->data_dma = 0;
- }
-
- pcl.next = PCL_NEXT_INVALID;
- pcl.async_error_next = PCL_NEXT_INVALID;
- pcl.pcl_status = 0;
- pcl.buffer[0].control = packet->speed_code << 14 | packet->header_size;
-#ifndef __BIG_ENDIAN
- pcl.buffer[0].control |= PCL_BIGENDIAN;
-#endif
- pcl.buffer[0].pointer = d->header_dma;
- pcl.buffer[1].control = PCL_LAST_BUFF | packet->data_size;
- pcl.buffer[1].pointer = d->data_dma;
-
- switch (packet->type) {
- case hpsb_async:
- pcl.buffer[0].control |= PCL_CMD_XMT;
- break;
-#if 0 /* has been removed from ieee1394 core */
- case hpsb_iso:
- pcl.buffer[0].control |= PCL_CMD_XMT | PCL_ISOMODE;
- break;
-#endif
- case hpsb_raw:
- pcl.buffer[0].control |= PCL_CMD_UNFXMT;
- break;
- }
-
- put_pcl(lynx, d->pcl, &pcl);
- run_pcl(lynx, d->pcl_start, d->channel);
-}
-
-
-/* called from subsystem core */
-static int lynx_transmit(struct hpsb_host *host, struct hpsb_packet *packet)
-{
- struct ti_lynx *lynx = host->hostdata;
- struct lynx_send_data *d;
- unsigned long flags;
-
- if (packet->data_size >= 4096) {
- PRINT(KERN_ERR, lynx->id, "transmit packet data too big (%Zd)",
- packet->data_size);
- return -EOVERFLOW;
- }
-
- switch (packet->type) {
- case hpsb_async:
- case hpsb_raw:
- d = &lynx->async;
- break;
-#if 0 /* has been removed from ieee1394 core */
- case hpsb_iso:
- d = &lynx->iso_send;
- break;
-#endif
- default:
- PRINT(KERN_ERR, lynx->id, "invalid packet type %d",
- packet->type);
- return -EINVAL;
- }
-
- if (packet->tcode == TCODE_WRITEQ
- || packet->tcode == TCODE_READQ_RESPONSE) {
- cpu_to_be32s(&packet->header[3]);
- }
-
- spin_lock_irqsave(&d->queue_lock, flags);
-
- list_add_tail(&packet->driver_list, &d->queue);
- if (list_empty(&d->pcl_queue))
- send_next(lynx, packet->type);
-
- spin_unlock_irqrestore(&d->queue_lock, flags);
-
- return 0;
-}
-
-
-/* called from subsystem core */
-static int lynx_devctl(struct hpsb_host *host, enum devctl_cmd cmd, int arg)
-{
- struct ti_lynx *lynx = host->hostdata;
- int retval = 0;
- struct hpsb_packet *packet;
- LIST_HEAD(packet_list);
- unsigned long flags;
- int phy_reg;
-
- switch (cmd) {
- case RESET_BUS:
- if (reg_read(lynx, LINK_INT_STATUS) & LINK_INT_PHY_BUSRESET) {
- retval = 0;
- break;
- }
-
- switch (arg) {
- case SHORT_RESET:
- if (lynx->phyic.reg_1394a) {
- phy_reg = get_phy_reg(lynx, 5);
- if (phy_reg == -1) {
- PRINT(KERN_ERR, lynx->id, "cannot reset bus, because read phy reg failed");
- retval = -1;
- break;
- }
- phy_reg |= 0x40;
-
- PRINT(KERN_INFO, lynx->id, "resetting bus (short bus reset) on request");
-
- lynx->selfid_size = -1;
- lynx->phy_reg0 = -1;
- set_phy_reg(lynx, 5, phy_reg); /* set ISBR */
- break;
- } else {
- PRINT(KERN_INFO, lynx->id, "cannot do short bus reset, because of old phy");
- /* fall through to long bus reset */
- }
- case LONG_RESET:
- phy_reg = get_phy_reg(lynx, 1);
- if (phy_reg == -1) {
- PRINT(KERN_ERR, lynx->id, "cannot reset bus, because read phy reg failed");
- retval = -1;
- break;
- }
- phy_reg |= 0x40;
-
- PRINT(KERN_INFO, lynx->id, "resetting bus (long bus reset) on request");
-
- lynx->selfid_size = -1;
- lynx->phy_reg0 = -1;
- set_phy_reg(lynx, 1, phy_reg); /* clear RHB, set IBR */
- break;
- case SHORT_RESET_NO_FORCE_ROOT:
- if (lynx->phyic.reg_1394a) {
- phy_reg = get_phy_reg(lynx, 1);
- if (phy_reg == -1) {
- PRINT(KERN_ERR, lynx->id, "cannot reset bus, because read phy reg failed");
- retval = -1;
- break;
- }
- if (phy_reg & 0x80) {
- phy_reg &= ~0x80;
- set_phy_reg(lynx, 1, phy_reg); /* clear RHB */
- }
-
- phy_reg = get_phy_reg(lynx, 5);
- if (phy_reg == -1) {
- PRINT(KERN_ERR, lynx->id, "cannot reset bus, because read phy reg failed");
- retval = -1;
- break;
- }
- phy_reg |= 0x40;
-
- PRINT(KERN_INFO, lynx->id, "resetting bus (short bus reset, no force_root) on request");
-
- lynx->selfid_size = -1;
- lynx->phy_reg0 = -1;
- set_phy_reg(lynx, 5, phy_reg); /* set ISBR */
- break;
- } else {
- PRINT(KERN_INFO, lynx->id, "cannot do short bus reset, because of old phy");
- /* fall through to long bus reset */
- }
- case LONG_RESET_NO_FORCE_ROOT:
- phy_reg = get_phy_reg(lynx, 1);
- if (phy_reg == -1) {
- PRINT(KERN_ERR, lynx->id, "cannot reset bus, because read phy reg failed");
- retval = -1;
- break;
- }
- phy_reg &= ~0x80;
- phy_reg |= 0x40;
-
- PRINT(KERN_INFO, lynx->id, "resetting bus (long bus reset, no force_root) on request");
-
- lynx->selfid_size = -1;
- lynx->phy_reg0 = -1;
- set_phy_reg(lynx, 1, phy_reg); /* clear RHB, set IBR */
- break;
- case SHORT_RESET_FORCE_ROOT:
- if (lynx->phyic.reg_1394a) {
- phy_reg = get_phy_reg(lynx, 1);
- if (phy_reg == -1) {
- PRINT(KERN_ERR, lynx->id, "cannot reset bus, because read phy reg failed");
- retval = -1;
- break;
- }
- if (!(phy_reg & 0x80)) {
- phy_reg |= 0x80;
- set_phy_reg(lynx, 1, phy_reg); /* set RHB */
- }
-
- phy_reg = get_phy_reg(lynx, 5);
- if (phy_reg == -1) {
- PRINT(KERN_ERR, lynx->id, "cannot reset bus, because read phy reg failed");
- retval = -1;
- break;
- }
- phy_reg |= 0x40;
-
- PRINT(KERN_INFO, lynx->id, "resetting bus (short bus reset, force_root set) on request");
-
- lynx->selfid_size = -1;
- lynx->phy_reg0 = -1;
- set_phy_reg(lynx, 5, phy_reg); /* set ISBR */
- break;
- } else {
- PRINT(KERN_INFO, lynx->id, "cannot do short bus reset, because of old phy");
- /* fall through to long bus reset */
- }
- case LONG_RESET_FORCE_ROOT:
- phy_reg = get_phy_reg(lynx, 1);
- if (phy_reg == -1) {
- PRINT(KERN_ERR, lynx->id, "cannot reset bus, because read phy reg failed");
- retval = -1;
- break;
- }
- phy_reg |= 0xc0;
-
- PRINT(KERN_INFO, lynx->id, "resetting bus (long bus reset, force_root set) on request");
-
- lynx->selfid_size = -1;
- lynx->phy_reg0 = -1;
- set_phy_reg(lynx, 1, phy_reg); /* set IBR and RHB */
- break;
- default:
- PRINT(KERN_ERR, lynx->id, "unknown argument for reset_bus command %d", arg);
- retval = -1;
- }
-
- break;
-
- case GET_CYCLE_COUNTER:
- retval = reg_read(lynx, CYCLE_TIMER);
- break;
-
- case SET_CYCLE_COUNTER:
- reg_write(lynx, CYCLE_TIMER, arg);
- break;
-
- case SET_BUS_ID:
- reg_write(lynx, LINK_ID,
- (arg << 22) | (reg_read(lynx, LINK_ID) & 0x003f0000));
- break;
-
- case ACT_CYCLE_MASTER:
- if (arg) {
- reg_set_bits(lynx, LINK_CONTROL,
- LINK_CONTROL_CYCMASTER);
- } else {
- reg_clear_bits(lynx, LINK_CONTROL,
- LINK_CONTROL_CYCMASTER);
- }
- break;
-
- case CANCEL_REQUESTS:
- spin_lock_irqsave(&lynx->async.queue_lock, flags);
-
- reg_write(lynx, DMA_CHAN_CTRL(CHANNEL_ASYNC_SEND), 0);
- list_splice_init(&lynx->async.queue, &packet_list);
-
- if (list_empty(&lynx->async.pcl_queue)) {
- spin_unlock_irqrestore(&lynx->async.queue_lock, flags);
- PRINTD(KERN_DEBUG, lynx->id, "no async packet in PCL to cancel");
- } else {
- struct ti_pcl pcl;
- u32 ack;
-
- PRINT(KERN_INFO, lynx->id, "cancelling async packet, that was already in PCL");
-
- get_pcl(lynx, lynx->async.pcl, &pcl);
-
- packet = driver_packet(lynx->async.pcl_queue.next);
- list_del_init(&packet->driver_list);
-
- pci_unmap_single(lynx->dev, lynx->async.header_dma,
- packet->header_size, PCI_DMA_TODEVICE);
- if (packet->data_size) {
- pci_unmap_single(lynx->dev, lynx->async.data_dma,
- packet->data_size, PCI_DMA_TODEVICE);
- }
-
- spin_unlock_irqrestore(&lynx->async.queue_lock, flags);
-
- if (pcl.pcl_status & DMA_CHAN_STAT_PKTCMPL) {
- if (pcl.pcl_status & DMA_CHAN_STAT_SPECIALACK) {
- ack = (pcl.pcl_status >> 15) & 0xf;
- PRINTD(KERN_INFO, lynx->id, "special ack %d", ack);
- ack = (ack == 1 ? ACKX_TIMEOUT : ACKX_SEND_ERROR);
- } else {
- ack = (pcl.pcl_status >> 15) & 0xf;
- }
- } else {
- PRINT(KERN_INFO, lynx->id, "async packet was not completed");
- ack = ACKX_ABORTED;
- }
- hpsb_packet_sent(host, packet, ack);
- }
-
- while (!list_empty(&packet_list)) {
- packet = driver_packet(packet_list.next);
- list_del_init(&packet->driver_list);
- hpsb_packet_sent(host, packet, ACKX_ABORTED);
- }
-
- break;
-#if 0 /* has been removed from ieee1394 core */
- case ISO_LISTEN_CHANNEL:
- spin_lock_irqsave(&lynx->iso_rcv.lock, flags);
-
- if (lynx->iso_rcv.chan_count++ == 0) {
- reg_write(lynx, DMA_WORD1_CMP_ENABLE(CHANNEL_ISO_RCV),
- DMA_WORD1_CMP_ENABLE_MASTER);
- }
-
- spin_unlock_irqrestore(&lynx->iso_rcv.lock, flags);
- break;
-
- case ISO_UNLISTEN_CHANNEL:
- spin_lock_irqsave(&lynx->iso_rcv.lock, flags);
-
- if (--lynx->iso_rcv.chan_count == 0) {
- reg_write(lynx, DMA_WORD1_CMP_ENABLE(CHANNEL_ISO_RCV),
- 0);
- }
-
- spin_unlock_irqrestore(&lynx->iso_rcv.lock, flags);
- break;
-#endif
- default:
- PRINT(KERN_ERR, lynx->id, "unknown devctl command %d", cmd);
- retval = -1;
- }
-
- return retval;
-}
-
-
-/***************************************
- * IEEE-1394 functionality section END *
- ***************************************/
-
-
-/********************************************************
- * Global stuff (interrupt handler, init/shutdown code) *
- ********************************************************/
-
-
-static irqreturn_t lynx_irq_handler(int irq, void *dev_id)
-{
- struct ti_lynx *lynx = (struct ti_lynx *)dev_id;
- struct hpsb_host *host = lynx->host;
- u32 intmask;
- u32 linkint;
-
- linkint = reg_read(lynx, LINK_INT_STATUS);
- intmask = reg_read(lynx, PCI_INT_STATUS);
-
- if (!(intmask & PCI_INT_INT_PEND))
- return IRQ_NONE;
-
- PRINTD(KERN_DEBUG, lynx->id, "interrupt: 0x%08x / 0x%08x", intmask,
- linkint);
-
- reg_write(lynx, LINK_INT_STATUS, linkint);
- reg_write(lynx, PCI_INT_STATUS, intmask);
-
- if (intmask & PCI_INT_1394) {
- if (linkint & LINK_INT_PHY_TIMEOUT) {
- PRINT(KERN_INFO, lynx->id, "PHY timeout occurred");
- }
- if (linkint & LINK_INT_PHY_BUSRESET) {
- PRINT(KERN_INFO, lynx->id, "bus reset interrupt");
- lynx->selfid_size = -1;
- lynx->phy_reg0 = -1;
- if (!host->in_bus_reset)
- hpsb_bus_reset(host);
- }
- if (linkint & LINK_INT_PHY_REG_RCVD) {
- u32 reg;
-
- spin_lock(&lynx->phy_reg_lock);
- reg = reg_read(lynx, LINK_PHY);
- spin_unlock(&lynx->phy_reg_lock);
-
- if (!host->in_bus_reset) {
- PRINT(KERN_INFO, lynx->id,
- "phy reg received without reset");
- } else if (reg & 0xf00) {
- PRINT(KERN_INFO, lynx->id,
- "unsolicited phy reg %d received",
- (reg >> 8) & 0xf);
- } else {
- lynx->phy_reg0 = reg & 0xff;
- handle_selfid(lynx, host);
- }
- }
- if (linkint & LINK_INT_ISO_STUCK) {
- PRINT(KERN_INFO, lynx->id, "isochronous transmitter stuck");
- }
- if (linkint & LINK_INT_ASYNC_STUCK) {
- PRINT(KERN_INFO, lynx->id, "asynchronous transmitter stuck");
- }
- if (linkint & LINK_INT_SENT_REJECT) {
- PRINT(KERN_INFO, lynx->id, "sent reject");
- }
- if (linkint & LINK_INT_TX_INVALID_TC) {
- PRINT(KERN_INFO, lynx->id, "invalid transaction code");
- }
- if (linkint & LINK_INT_GRF_OVERFLOW) {
- /* flush FIFO if overflow happens during reset */
- if (host->in_bus_reset)
- reg_write(lynx, FIFO_CONTROL,
- FIFO_CONTROL_GRF_FLUSH);
- PRINT(KERN_INFO, lynx->id, "GRF overflow");
- }
- if (linkint & LINK_INT_ITF_UNDERFLOW) {
- PRINT(KERN_INFO, lynx->id, "ITF underflow");
- }
- if (linkint & LINK_INT_ATF_UNDERFLOW) {
- PRINT(KERN_INFO, lynx->id, "ATF underflow");
- }
- }
-
- if (intmask & PCI_INT_DMA_HLT(CHANNEL_ISO_RCV)) {
- PRINTD(KERN_DEBUG, lynx->id, "iso receive");
-
- spin_lock(&lynx->iso_rcv.lock);
-
- lynx->iso_rcv.stat[lynx->iso_rcv.next] =
- reg_read(lynx, DMA_CHAN_STAT(CHANNEL_ISO_RCV));
-
- lynx->iso_rcv.used++;
- lynx->iso_rcv.next = (lynx->iso_rcv.next + 1) % NUM_ISORCV_PCL;
-
- if ((lynx->iso_rcv.next == lynx->iso_rcv.last)
- || !lynx->iso_rcv.chan_count) {
- PRINTD(KERN_DEBUG, lynx->id, "stopped");
- reg_write(lynx, DMA_WORD1_CMP_ENABLE(CHANNEL_ISO_RCV), 0);
- }
-
- run_sub_pcl(lynx, lynx->iso_rcv.pcl_start, lynx->iso_rcv.next,
- CHANNEL_ISO_RCV);
-
- spin_unlock(&lynx->iso_rcv.lock);
-
- tasklet_schedule(&lynx->iso_rcv.tq);
- }
-
- if (intmask & PCI_INT_DMA_HLT(CHANNEL_ASYNC_SEND)) {
- PRINTD(KERN_DEBUG, lynx->id, "async sent");
- spin_lock(&lynx->async.queue_lock);
-
- if (list_empty(&lynx->async.pcl_queue)) {
- spin_unlock(&lynx->async.queue_lock);
- PRINT(KERN_WARNING, lynx->id, "async dma halted, but no queued packet (maybe it was cancelled)");
- } else {
- struct ti_pcl pcl;
- u32 ack;
- struct hpsb_packet *packet;
-
- get_pcl(lynx, lynx->async.pcl, &pcl);
-
- packet = driver_packet(lynx->async.pcl_queue.next);
- list_del_init(&packet->driver_list);
-
- pci_unmap_single(lynx->dev, lynx->async.header_dma,
- packet->header_size, PCI_DMA_TODEVICE);
- if (packet->data_size) {
- pci_unmap_single(lynx->dev, lynx->async.data_dma,
- packet->data_size, PCI_DMA_TODEVICE);
- }
-
- if (!list_empty(&lynx->async.queue)) {
- send_next(lynx, hpsb_async);
- }
-
- spin_unlock(&lynx->async.queue_lock);
-
- if (pcl.pcl_status & DMA_CHAN_STAT_PKTCMPL) {
- if (pcl.pcl_status & DMA_CHAN_STAT_SPECIALACK) {
- ack = (pcl.pcl_status >> 15) & 0xf;
- PRINTD(KERN_INFO, lynx->id, "special ack %d", ack);
- ack = (ack == 1 ? ACKX_TIMEOUT : ACKX_SEND_ERROR);
- } else {
- ack = (pcl.pcl_status >> 15) & 0xf;
- }
- } else {
- PRINT(KERN_INFO, lynx->id, "async packet was not completed");
- ack = ACKX_SEND_ERROR;
- }
- hpsb_packet_sent(host, packet, ack);
- }
- }
-
- if (intmask & PCI_INT_DMA_HLT(CHANNEL_ISO_SEND)) {
- PRINTD(KERN_DEBUG, lynx->id, "iso sent");
- spin_lock(&lynx->iso_send.queue_lock);
-
- if (list_empty(&lynx->iso_send.pcl_queue)) {
- spin_unlock(&lynx->iso_send.queue_lock);
- PRINT(KERN_ERR, lynx->id, "iso send dma halted, but no queued packet");
- } else {
- struct ti_pcl pcl;
- u32 ack;
- struct hpsb_packet *packet;
-
- get_pcl(lynx, lynx->iso_send.pcl, &pcl);
-
- packet = driver_packet(lynx->iso_send.pcl_queue.next);
- list_del_init(&packet->driver_list);
-
- pci_unmap_single(lynx->dev, lynx->iso_send.header_dma,
- packet->header_size, PCI_DMA_TODEVICE);
- if (packet->data_size) {
- pci_unmap_single(lynx->dev, lynx->iso_send.data_dma,
- packet->data_size, PCI_DMA_TODEVICE);
- }
-#if 0 /* has been removed from ieee1394 core */
- if (!list_empty(&lynx->iso_send.queue)) {
- send_next(lynx, hpsb_iso);
- }
-#endif
- spin_unlock(&lynx->iso_send.queue_lock);
-
- if (pcl.pcl_status & DMA_CHAN_STAT_PKTCMPL) {
- if (pcl.pcl_status & DMA_CHAN_STAT_SPECIALACK) {
- ack = (pcl.pcl_status >> 15) & 0xf;
- PRINTD(KERN_INFO, lynx->id, "special ack %d", ack);
- ack = (ack == 1 ? ACKX_TIMEOUT : ACKX_SEND_ERROR);
- } else {
- ack = (pcl.pcl_status >> 15) & 0xf;
- }
- } else {
- PRINT(KERN_INFO, lynx->id, "iso send packet was not completed");
- ack = ACKX_SEND_ERROR;
- }
-
- hpsb_packet_sent(host, packet, ack); //FIXME: maybe we should just use ACK_COMPLETE and ACKX_SEND_ERROR
- }
- }
-
- if (intmask & PCI_INT_DMA_HLT(CHANNEL_ASYNC_RCV)) {
- /* general receive DMA completed */
- int stat = reg_read(lynx, DMA_CHAN_STAT(CHANNEL_ASYNC_RCV));
-
- PRINTD(KERN_DEBUG, lynx->id, "received packet size %d",
- stat & 0x1fff);
-
- if (stat & DMA_CHAN_STAT_SELFID) {
- lynx->selfid_size = stat & 0x1fff;
- handle_selfid(lynx, host);
- } else {
- quadlet_t *q_data = lynx->rcv_page;
- if ((*q_data >> 4 & 0xf) == TCODE_READQ_RESPONSE
- || (*q_data >> 4 & 0xf) == TCODE_WRITEQ) {
- cpu_to_be32s(q_data + 3);
- }
- hpsb_packet_received(host, q_data, stat & 0x1fff, 0);
- }
-
- run_pcl(lynx, lynx->rcv_pcl_start, CHANNEL_ASYNC_RCV);
- }
-
- return IRQ_HANDLED;
-}
-
-
-static void iso_rcv_bh(struct ti_lynx *lynx)
-{
- unsigned int idx;
- quadlet_t *data;
- unsigned long flags;
-
- spin_lock_irqsave(&lynx->iso_rcv.lock, flags);
-
- while (lynx->iso_rcv.used) {
- idx = lynx->iso_rcv.last;
- spin_unlock_irqrestore(&lynx->iso_rcv.lock, flags);
-
- data = lynx->iso_rcv.page[idx / ISORCV_PER_PAGE]
- + (idx % ISORCV_PER_PAGE) * MAX_ISORCV_SIZE;
-
- if ((*data >> 16) + 4 != (lynx->iso_rcv.stat[idx] & 0x1fff)) {
- PRINT(KERN_ERR, lynx->id,
- "iso length mismatch 0x%08x/0x%08x", *data,
- lynx->iso_rcv.stat[idx]);
- }
-
- if (lynx->iso_rcv.stat[idx]
- & (DMA_CHAN_STAT_PCIERR | DMA_CHAN_STAT_PKTERR)) {
- PRINT(KERN_INFO, lynx->id,
- "iso receive error on %d to 0x%p", idx, data);
- } else {
- hpsb_packet_received(lynx->host, data,
- lynx->iso_rcv.stat[idx] & 0x1fff,
- 0);
- }
-
- spin_lock_irqsave(&lynx->iso_rcv.lock, flags);
- lynx->iso_rcv.last = (idx + 1) % NUM_ISORCV_PCL;
- lynx->iso_rcv.used--;
- }
-
- if (lynx->iso_rcv.chan_count) {
- reg_write(lynx, DMA_WORD1_CMP_ENABLE(CHANNEL_ISO_RCV),
- DMA_WORD1_CMP_ENABLE_MASTER);
- }
- spin_unlock_irqrestore(&lynx->iso_rcv.lock, flags);
-}
-
-
-static void remove_card(struct pci_dev *dev)
-{
- struct ti_lynx *lynx;
- struct device *lynx_dev;
- int i;
-
- lynx = pci_get_drvdata(dev);
- if (!lynx) return;
- pci_set_drvdata(dev, NULL);
-
- lynx_dev = get_device(&lynx->host->device);
-
- switch (lynx->state) {
- case is_host:
- reg_write(lynx, PCI_INT_ENABLE, 0);
- hpsb_remove_host(lynx->host);
- case have_intr:
- reg_write(lynx, PCI_INT_ENABLE, 0);
- free_irq(lynx->dev->irq, lynx);
-
- /* Disable IRM Contender and LCtrl */
- if (lynx->phyic.reg_1394a)
- set_phy_reg(lynx, 4, ~0xc0 & get_phy_reg(lynx, 4));
-
- /* Let all other nodes know to ignore us */
- lynx_devctl(lynx->host, RESET_BUS, LONG_RESET_NO_FORCE_ROOT);
-
- case have_iomappings:
- reg_set_bits(lynx, MISC_CONTROL, MISC_CONTROL_SWRESET);
- /* Fix buggy cards with autoboot pin not tied low: */
- reg_write(lynx, DMA0_CHAN_CTRL, 0);
- iounmap(lynx->registers);
- iounmap(lynx->local_rom);
- iounmap(lynx->local_ram);
- iounmap(lynx->aux_port);
- case have_1394_buffers:
- for (i = 0; i < ISORCV_PAGES; i++) {
- if (lynx->iso_rcv.page[i]) {
- pci_free_consistent(lynx->dev, PAGE_SIZE,
- lynx->iso_rcv.page[i],
- lynx->iso_rcv.page_dma[i]);
- }
- }
- pci_free_consistent(lynx->dev, PAGE_SIZE, lynx->rcv_page,
- lynx->rcv_page_dma);
- case have_aux_buf:
- case have_pcl_mem:
- pci_free_consistent(lynx->dev, LOCALRAM_SIZE, lynx->pcl_mem,
- lynx->pcl_mem_dma);
- case clear:
- /* do nothing - already freed */
- ;
- }
-
- tasklet_kill(&lynx->iso_rcv.tq);
-
- if (lynx_dev)
- put_device(lynx_dev);
-}
-
-
-static int __devinit add_card(struct pci_dev *dev,
- const struct pci_device_id *devid_is_unused)
-{
-#define FAIL(fmt, args...) do { \
- PRINT_G(KERN_ERR, fmt , ## args); \
- remove_card(dev); \
- return error; \
- } while (0)
-
- char irq_buf[16];
- struct hpsb_host *host;
- struct ti_lynx *lynx; /* shortcut to currently handled device */
- struct ti_pcl pcl;
- u32 *pcli;
- int i;
- int error;
-
- error = -ENXIO;
-
- if (pci_set_dma_mask(dev, DMA_BIT_MASK(32)))
- FAIL("DMA address limits not supported for PCILynx hardware");
- if (pci_enable_device(dev))
- FAIL("failed to enable PCILynx hardware");
- pci_set_master(dev);
-
- error = -ENOMEM;
-
- host = hpsb_alloc_host(&lynx_driver, sizeof(struct ti_lynx), &dev->dev);
- if (!host) FAIL("failed to allocate control structure memory");
-
- lynx = host->hostdata;
- lynx->id = card_id++;
- lynx->dev = dev;
- lynx->state = clear;
- lynx->host = host;
- host->pdev = dev;
- pci_set_drvdata(dev, lynx);
-
- spin_lock_init(&lynx->lock);
- spin_lock_init(&lynx->phy_reg_lock);
-
- lynx->pcl_mem = pci_alloc_consistent(dev, LOCALRAM_SIZE,
- &lynx->pcl_mem_dma);
-
- if (lynx->pcl_mem != NULL) {
- lynx->state = have_pcl_mem;
- PRINT(KERN_INFO, lynx->id,
- "allocated PCL memory %d Bytes @ 0x%p", LOCALRAM_SIZE,
- lynx->pcl_mem);
- } else {
- FAIL("failed to allocate PCL memory area");
- }
-
- lynx->rcv_page = pci_alloc_consistent(dev, PAGE_SIZE,
- &lynx->rcv_page_dma);
- if (lynx->rcv_page == NULL) {
- FAIL("failed to allocate receive buffer");
- }
- lynx->state = have_1394_buffers;
-
- for (i = 0; i < ISORCV_PAGES; i++) {
- lynx->iso_rcv.page[i] =
- pci_alloc_consistent(dev, PAGE_SIZE,
- &lynx->iso_rcv.page_dma[i]);
- if (lynx->iso_rcv.page[i] == NULL) {
- FAIL("failed to allocate iso receive buffers");
- }
- }
-
- lynx->registers = ioremap_nocache(pci_resource_start(dev,0),
- PCILYNX_MAX_REGISTER);
- lynx->local_ram = ioremap(pci_resource_start(dev,1), PCILYNX_MAX_MEMORY);
- lynx->aux_port = ioremap(pci_resource_start(dev,2), PCILYNX_MAX_MEMORY);
- lynx->local_rom = ioremap(pci_resource_start(dev,PCI_ROM_RESOURCE),
- PCILYNX_MAX_MEMORY);
- lynx->state = have_iomappings;
-
- if (lynx->registers == NULL) {
- FAIL("failed to remap registers - card not accessible");
- }
-
- reg_set_bits(lynx, MISC_CONTROL, MISC_CONTROL_SWRESET);
- /* Fix buggy cards with autoboot pin not tied low: */
- reg_write(lynx, DMA0_CHAN_CTRL, 0);
-
- sprintf (irq_buf, "%d", dev->irq);
-
- if (!request_irq(dev->irq, lynx_irq_handler, IRQF_SHARED,
- PCILYNX_DRIVER_NAME, lynx)) {
- PRINT(KERN_INFO, lynx->id, "allocated interrupt %s", irq_buf);
- lynx->state = have_intr;
- } else {
- FAIL("failed to allocate shared interrupt %s", irq_buf);
- }
-
- /* alloc_pcl return values are not checked, it is expected that the
- * provided PCL space is sufficient for the initial allocations */
- lynx->rcv_pcl = alloc_pcl(lynx);
- lynx->rcv_pcl_start = alloc_pcl(lynx);
- lynx->async.pcl = alloc_pcl(lynx);
- lynx->async.pcl_start = alloc_pcl(lynx);
- lynx->iso_send.pcl = alloc_pcl(lynx);
- lynx->iso_send.pcl_start = alloc_pcl(lynx);
-
- for (i = 0; i < NUM_ISORCV_PCL; i++) {
- lynx->iso_rcv.pcl[i] = alloc_pcl(lynx);
- }
- lynx->iso_rcv.pcl_start = alloc_pcl(lynx);
-
- /* all allocations successful - simple init stuff follows */
-
- reg_write(lynx, PCI_INT_ENABLE, PCI_INT_DMA_ALL);
-
- tasklet_init(&lynx->iso_rcv.tq, (void (*)(unsigned long))iso_rcv_bh,
- (unsigned long)lynx);
-
- spin_lock_init(&lynx->iso_rcv.lock);
-
- spin_lock_init(&lynx->async.queue_lock);
- lynx->async.channel = CHANNEL_ASYNC_SEND;
- spin_lock_init(&lynx->iso_send.queue_lock);
- lynx->iso_send.channel = CHANNEL_ISO_SEND;
-
- PRINT(KERN_INFO, lynx->id, "remapped memory spaces reg 0x%p, rom 0x%p, "
- "ram 0x%p, aux 0x%p", lynx->registers, lynx->local_rom,
- lynx->local_ram, lynx->aux_port);
-
- /* now, looking for PHY register set */
- if ((get_phy_reg(lynx, 2) & 0xe0) == 0xe0) {
- lynx->phyic.reg_1394a = 1;
- PRINT(KERN_INFO, lynx->id,
- "found 1394a conform PHY (using extended register set)");
- lynx->phyic.vendor = get_phy_vendorid(lynx);
- lynx->phyic.product = get_phy_productid(lynx);
- } else {
- lynx->phyic.reg_1394a = 0;
- PRINT(KERN_INFO, lynx->id, "found old 1394 PHY");
- }
-
- lynx->selfid_size = -1;
- lynx->phy_reg0 = -1;
-
- INIT_LIST_HEAD(&lynx->async.queue);
- INIT_LIST_HEAD(&lynx->async.pcl_queue);
- INIT_LIST_HEAD(&lynx->iso_send.queue);
- INIT_LIST_HEAD(&lynx->iso_send.pcl_queue);
-
- pcl.next = pcl_bus(lynx, lynx->rcv_pcl);
- put_pcl(lynx, lynx->rcv_pcl_start, &pcl);
-
- pcl.next = PCL_NEXT_INVALID;
- pcl.async_error_next = PCL_NEXT_INVALID;
-
- pcl.buffer[0].control = PCL_CMD_RCV | 16;
-#ifndef __BIG_ENDIAN
- pcl.buffer[0].control |= PCL_BIGENDIAN;
-#endif
- pcl.buffer[1].control = PCL_LAST_BUFF | 4080;
-
- pcl.buffer[0].pointer = lynx->rcv_page_dma;
- pcl.buffer[1].pointer = lynx->rcv_page_dma + 16;
- put_pcl(lynx, lynx->rcv_pcl, &pcl);
-
- pcl.next = pcl_bus(lynx, lynx->async.pcl);
- pcl.async_error_next = pcl_bus(lynx, lynx->async.pcl);
- put_pcl(lynx, lynx->async.pcl_start, &pcl);
-
- pcl.next = pcl_bus(lynx, lynx->iso_send.pcl);
- pcl.async_error_next = PCL_NEXT_INVALID;
- put_pcl(lynx, lynx->iso_send.pcl_start, &pcl);
-
- pcl.next = PCL_NEXT_INVALID;
- pcl.async_error_next = PCL_NEXT_INVALID;
- pcl.buffer[0].control = PCL_CMD_RCV | 4;
-#ifndef __BIG_ENDIAN
- pcl.buffer[0].control |= PCL_BIGENDIAN;
-#endif
- pcl.buffer[1].control = PCL_LAST_BUFF | 2044;
-
- for (i = 0; i < NUM_ISORCV_PCL; i++) {
- int page = i / ISORCV_PER_PAGE;
- int sec = i % ISORCV_PER_PAGE;
-
- pcl.buffer[0].pointer = lynx->iso_rcv.page_dma[page]
- + sec * MAX_ISORCV_SIZE;
- pcl.buffer[1].pointer = pcl.buffer[0].pointer + 4;
- put_pcl(lynx, lynx->iso_rcv.pcl[i], &pcl);
- }
-
- pcli = (u32 *)&pcl;
- for (i = 0; i < NUM_ISORCV_PCL; i++) {
- pcli[i] = pcl_bus(lynx, lynx->iso_rcv.pcl[i]);
- }
- put_pcl(lynx, lynx->iso_rcv.pcl_start, &pcl);
-
- /* FIFO sizes from left to right: ITF=48 ATF=48 GRF=160 */
- reg_write(lynx, FIFO_SIZES, 0x003030a0);
- /* 20 byte threshold before triggering PCI transfer */
- reg_write(lynx, DMA_GLOBAL_REGISTER, 0x2<<24);
- /* threshold on both send FIFOs before transmitting:
- FIFO size - cache line size - 1 */
- i = reg_read(lynx, PCI_LATENCY_CACHELINE) & 0xff;
- i = 0x30 - i - 1;
- reg_write(lynx, FIFO_XMIT_THRESHOLD, (i << 8) | i);
-
- reg_set_bits(lynx, PCI_INT_ENABLE, PCI_INT_1394);
-
- reg_write(lynx, LINK_INT_ENABLE, LINK_INT_PHY_TIMEOUT
- | LINK_INT_PHY_REG_RCVD | LINK_INT_PHY_BUSRESET
- | LINK_INT_ISO_STUCK | LINK_INT_ASYNC_STUCK
- | LINK_INT_SENT_REJECT | LINK_INT_TX_INVALID_TC
- | LINK_INT_GRF_OVERFLOW | LINK_INT_ITF_UNDERFLOW
- | LINK_INT_ATF_UNDERFLOW);
-
- reg_write(lynx, DMA_WORD0_CMP_VALUE(CHANNEL_ASYNC_RCV), 0);
- reg_write(lynx, DMA_WORD0_CMP_ENABLE(CHANNEL_ASYNC_RCV), 0xa<<4);
- reg_write(lynx, DMA_WORD1_CMP_VALUE(CHANNEL_ASYNC_RCV), 0);
- reg_write(lynx, DMA_WORD1_CMP_ENABLE(CHANNEL_ASYNC_RCV),
- DMA_WORD1_CMP_MATCH_LOCAL_NODE | DMA_WORD1_CMP_MATCH_BROADCAST
- | DMA_WORD1_CMP_MATCH_EXACT | DMA_WORD1_CMP_MATCH_BUS_BCAST
- | DMA_WORD1_CMP_ENABLE_SELF_ID | DMA_WORD1_CMP_ENABLE_MASTER);
-
- run_pcl(lynx, lynx->rcv_pcl_start, CHANNEL_ASYNC_RCV);
-
- reg_write(lynx, DMA_WORD0_CMP_VALUE(CHANNEL_ISO_RCV), 0);
- reg_write(lynx, DMA_WORD0_CMP_ENABLE(CHANNEL_ISO_RCV), 0x9<<4);
- reg_write(lynx, DMA_WORD1_CMP_VALUE(CHANNEL_ISO_RCV), 0);
- reg_write(lynx, DMA_WORD1_CMP_ENABLE(CHANNEL_ISO_RCV), 0);
-
- run_sub_pcl(lynx, lynx->iso_rcv.pcl_start, 0, CHANNEL_ISO_RCV);
-
- reg_write(lynx, LINK_CONTROL, LINK_CONTROL_RCV_CMP_VALID
- | LINK_CONTROL_TX_ISO_EN | LINK_CONTROL_RX_ISO_EN
- | LINK_CONTROL_TX_ASYNC_EN | LINK_CONTROL_RX_ASYNC_EN
- | LINK_CONTROL_RESET_TX | LINK_CONTROL_RESET_RX);
-
- if (!lynx->phyic.reg_1394a) {
- if (!hpsb_disable_irm) {
- /* attempt to enable contender bit -FIXME- would this
- * work elsewhere? */
- reg_set_bits(lynx, GPIO_CTRL_A, 0x1);
- reg_write(lynx, GPIO_DATA_BASE + 0x3c, 0x1);
- }
- } else {
- /* set the contender (if appropriate) and LCtrl bit in the
- * extended PHY register set. (Should check that PHY_02_EXTENDED
- * is set in register 2?)
- */
- i = get_phy_reg(lynx, 4);
- i |= PHY_04_LCTRL;
- if (hpsb_disable_irm)
- i &= ~PHY_04_CONTENDER;
- else
- i |= PHY_04_CONTENDER;
- if (i != -1) set_phy_reg(lynx, 4, i);
- }
-
- if (!skip_eeprom)
- {
- /* needed for i2c communication with serial eeprom */
- struct i2c_adapter *i2c_ad;
- struct i2c_algo_bit_data i2c_adapter_data;
-
- error = -ENOMEM;
- i2c_ad = kzalloc(sizeof(*i2c_ad), GFP_KERNEL);
- if (!i2c_ad) FAIL("failed to allocate I2C adapter memory");
-
- strlcpy(i2c_ad->name, "PCILynx I2C", sizeof(i2c_ad->name));
- i2c_adapter_data = bit_data;
- i2c_ad->algo_data = &i2c_adapter_data;
- i2c_adapter_data.data = lynx;
- i2c_ad->dev.parent = &dev->dev;
-
- PRINTD(KERN_DEBUG, lynx->id,"original eeprom control: %d",
- reg_read(lynx, SERIAL_EEPROM_CONTROL));
-
- /* reset hardware to sane state */
- lynx->i2c_driven_state = 0x00000070;
- reg_write(lynx, SERIAL_EEPROM_CONTROL, lynx->i2c_driven_state);
-
- if (i2c_bit_add_bus(i2c_ad) < 0)
- {
- kfree(i2c_ad);
- error = -ENXIO;
- FAIL("unable to register i2c");
- }
- else
- {
- /* do i2c stuff */
- unsigned char i2c_cmd = 0x10;
- struct i2c_msg msg[2] = { { 0x50, 0, 1, &i2c_cmd },
- { 0x50, I2C_M_RD, 20, (unsigned char*) lynx->bus_info_block }
- };
-
- /* we use i2c_transfer because we have no i2c_client
- at hand */
- if (i2c_transfer(i2c_ad, msg, 2) < 0) {
- PRINT(KERN_ERR, lynx->id, "unable to read bus info block from i2c");
- } else {
- PRINT(KERN_INFO, lynx->id, "got bus info block from serial eeprom");
- /* FIXME: probably we should rewrite the max_rec, max_ROM(1394a),
- * generation(1394a) and link_spd(1394a) field and recalculate
- * the CRC */
-
- for (i = 0; i < 5 ; i++)
- PRINTD(KERN_DEBUG, lynx->id, "Businfo block quadlet %i: %08x",
- i, be32_to_cpu(lynx->bus_info_block[i]));
-
- /* info_length, crc_length and 1394 magic number to check, if it is really a bus info block */
- if (((be32_to_cpu(lynx->bus_info_block[0]) & 0xffff0000) == 0x04040000) &&
- (lynx->bus_info_block[1] == IEEE1394_BUSID_MAGIC))
- {
- PRINT(KERN_DEBUG, lynx->id, "read a valid bus info block from");
- } else {
- kfree(i2c_ad);
- error = -ENXIO;
- FAIL("read something from serial eeprom, but it does not seem to be a valid bus info block");
- }
-
- }
-
- i2c_del_adapter(i2c_ad);
- kfree(i2c_ad);
- }
- }
-
- host->csr.guid_hi = be32_to_cpu(lynx->bus_info_block[3]);
- host->csr.guid_lo = be32_to_cpu(lynx->bus_info_block[4]);
- host->csr.cyc_clk_acc = (be32_to_cpu(lynx->bus_info_block[2]) >> 16) & 0xff;
- host->csr.max_rec = (be32_to_cpu(lynx->bus_info_block[2]) >> 12) & 0xf;
- if (!lynx->phyic.reg_1394a)
- host->csr.lnk_spd = (get_phy_reg(lynx, 2) & 0xc0) >> 6;
- else
- host->csr.lnk_spd = be32_to_cpu(lynx->bus_info_block[2]) & 0x7;
-
- if (hpsb_add_host(host)) {
- error = -ENOMEM;
- FAIL("Failed to register host with highlevel");
- }
-
- lynx->state = is_host;
-
- return 0;
-#undef FAIL
-}
-
-
-static struct pci_device_id pci_table[] = {
- {
- .vendor = PCI_VENDOR_ID_TI,
- .device = PCI_DEVICE_ID_TI_PCILYNX,
- .subvendor = PCI_ANY_ID,
- .subdevice = PCI_ANY_ID,
- },
- { } /* Terminating entry */
-};
-
-static struct pci_driver lynx_pci_driver = {
- .name = PCILYNX_DRIVER_NAME,
- .id_table = pci_table,
- .probe = add_card,
- .remove = remove_card,
-};
-
-static struct hpsb_host_driver lynx_driver = {
- .owner = THIS_MODULE,
- .name = PCILYNX_DRIVER_NAME,
- .set_hw_config_rom = NULL,
- .transmit_packet = lynx_transmit,
- .devctl = lynx_devctl,
- .isoctl = NULL,
-};
-
-MODULE_AUTHOR("Andreas E. Bombe <andreas.bombe@munich.netsurf.de>");
-MODULE_DESCRIPTION("driver for Texas Instruments PCI Lynx IEEE-1394 controller");
-MODULE_LICENSE("GPL");
-MODULE_SUPPORTED_DEVICE("pcilynx");
-MODULE_DEVICE_TABLE(pci, pci_table);
-
-static int __init pcilynx_init(void)
-{
- int ret;
-
- ret = pci_register_driver(&lynx_pci_driver);
- if (ret < 0) {
- PRINT_G(KERN_ERR, "PCI module init failed");
- return ret;
- }
-
- return 0;
-}
-
-static void __exit pcilynx_cleanup(void)
-{
- pci_unregister_driver(&lynx_pci_driver);
-}
-
-
-module_init(pcilynx_init);
-module_exit(pcilynx_cleanup);
diff --git a/drivers/ieee1394/pcilynx.h b/drivers/ieee1394/pcilynx.h
deleted file mode 100644
index 693a169acea3..000000000000
--- a/drivers/ieee1394/pcilynx.h
+++ /dev/null
@@ -1,468 +0,0 @@
-#ifndef __PCILYNX_H__
-#define __PCILYNX_H__
-
-
-#define PCILYNX_DRIVER_NAME "pcilynx"
-#define PCILYNX_MAJOR 177
-
-#define PCILYNX_MINOR_AUX_START 0
-#define PCILYNX_MINOR_ROM_START 16
-#define PCILYNX_MINOR_RAM_START 32
-
-#define PCILYNX_MAX_REGISTER 0xfff
-#define PCILYNX_MAX_MEMORY 0xffff
-
-#define PCI_DEVICE_ID_TI_PCILYNX 0x8000
-#define MAX_PCILYNX_CARDS 4
-#define LOCALRAM_SIZE 4096
-
-#define NUM_ISORCV_PCL 4
-#define MAX_ISORCV_SIZE 2048
-#define ISORCV_PER_PAGE (PAGE_SIZE / MAX_ISORCV_SIZE)
-#define ISORCV_PAGES (NUM_ISORCV_PCL / ISORCV_PER_PAGE)
-
-#define CHANNEL_LOCALBUS 0
-#define CHANNEL_ASYNC_RCV 1
-#define CHANNEL_ISO_RCV 2
-#define CHANNEL_ASYNC_SEND 3
-#define CHANNEL_ISO_SEND 4
-
-#define PCILYNX_CONFIG_ROM_LENGTH 1024
-
-typedef int pcl_t;
-
-struct ti_lynx {
- int id; /* sequential card number */
-
- spinlock_t lock;
-
- struct pci_dev *dev;
-
- struct {
- unsigned reg_1394a:1;
- u32 vendor;
- u32 product;
- } phyic;
-
- enum { clear, have_intr, have_aux_buf, have_pcl_mem,
- have_1394_buffers, have_iomappings, is_host } state;
-
- /* remapped memory spaces */
- void __iomem *registers;
- void __iomem *local_rom;
- void __iomem *local_ram;
- void __iomem *aux_port;
- __be32 bus_info_block[5];
-
- /*
- * use local RAM of LOCALRAM_SIZE bytes for PCLs, which allows for
- * LOCALRAM_SIZE * 8 PCLs (each sized 128 bytes);
- * the following is an allocation bitmap
- */
- u8 pcl_bmap[LOCALRAM_SIZE / 1024];
-
- /* point to PCLs memory area if needed */
- void *pcl_mem;
- dma_addr_t pcl_mem_dma;
-
- /* PCLs for local mem / aux transfers */
- pcl_t dmem_pcl;
-
- /* IEEE-1394 part follows */
- struct hpsb_host *host;
-
- int phyid, isroot;
- int selfid_size;
- int phy_reg0;
-
- spinlock_t phy_reg_lock;
-
- pcl_t rcv_pcl_start, rcv_pcl;
- void *rcv_page;
- dma_addr_t rcv_page_dma;
- int rcv_active;
-
- struct lynx_send_data {
- pcl_t pcl_start, pcl;
- struct list_head queue;
- struct list_head pcl_queue; /* this queue contains at most one packet */
- spinlock_t queue_lock;
- dma_addr_t header_dma, data_dma;
- int channel;
- } async, iso_send;
-
- struct {
- pcl_t pcl[NUM_ISORCV_PCL];
- u32 stat[NUM_ISORCV_PCL];
- void *page[ISORCV_PAGES];
- dma_addr_t page_dma[ISORCV_PAGES];
- pcl_t pcl_start;
- int chan_count;
- int next, last, used, running;
- struct tasklet_struct tq;
- spinlock_t lock;
- } iso_rcv;
-
- u32 i2c_driven_state; /* the state we currently drive the Serial EEPROM Control register */
-};
-
-/* the per-file data structure for mem space access */
-struct memdata {
- struct ti_lynx *lynx;
- int cid;
- atomic_t aux_intr_last_seen;
- /* enum values are the same as LBUS_ADDR_SEL_* values below */
- enum { rom = 0x10000, aux = 0x20000, ram = 0 } type;
-};
-
-
-
-/*
- * Register read and write helper functions.
- */
-static inline void reg_write(const struct ti_lynx *lynx, int offset, u32 data)
-{
- writel(data, lynx->registers + offset);
-}
-
-static inline u32 reg_read(const struct ti_lynx *lynx, int offset)
-{
- return readl(lynx->registers + offset);
-}
-
-static inline void reg_set_bits(const struct ti_lynx *lynx, int offset,
- u32 mask)
-{
- reg_write(lynx, offset, (reg_read(lynx, offset) | mask));
-}
-
-static inline void reg_clear_bits(const struct ti_lynx *lynx, int offset,
- u32 mask)
-{
- reg_write(lynx, offset, (reg_read(lynx, offset) & ~mask));
-}
-
-
-
-/* chip register definitions follow */
-
-#define PCI_LATENCY_CACHELINE 0x0c
-
-#define MISC_CONTROL 0x40
-#define MISC_CONTROL_SWRESET (1<<0)
-
-#define SERIAL_EEPROM_CONTROL 0x44
-
-#define PCI_INT_STATUS 0x48
-#define PCI_INT_ENABLE 0x4c
-/* status and enable have identical bit numbers */
-#define PCI_INT_INT_PEND (1<<31)
-#define PCI_INT_FORCED_INT (1<<30)
-#define PCI_INT_SLV_ADR_PERR (1<<28)
-#define PCI_INT_SLV_DAT_PERR (1<<27)
-#define PCI_INT_MST_DAT_PERR (1<<26)
-#define PCI_INT_MST_DEV_TIMEOUT (1<<25)
-#define PCI_INT_INTERNAL_SLV_TIMEOUT (1<<23)
-#define PCI_INT_AUX_TIMEOUT (1<<18)
-#define PCI_INT_AUX_INT (1<<17)
-#define PCI_INT_1394 (1<<16)
-#define PCI_INT_DMA4_PCL (1<<9)
-#define PCI_INT_DMA4_HLT (1<<8)
-#define PCI_INT_DMA3_PCL (1<<7)
-#define PCI_INT_DMA3_HLT (1<<6)
-#define PCI_INT_DMA2_PCL (1<<5)
-#define PCI_INT_DMA2_HLT (1<<4)
-#define PCI_INT_DMA1_PCL (1<<3)
-#define PCI_INT_DMA1_HLT (1<<2)
-#define PCI_INT_DMA0_PCL (1<<1)
-#define PCI_INT_DMA0_HLT (1<<0)
-/* all DMA interrupts combined: */
-#define PCI_INT_DMA_ALL 0x3ff
-
-#define PCI_INT_DMA_HLT(chan) (1 << (chan * 2))
-#define PCI_INT_DMA_PCL(chan) (1 << (chan * 2 + 1))
-
-#define LBUS_ADDR 0xb4
-#define LBUS_ADDR_SEL_RAM (0x0<<16)
-#define LBUS_ADDR_SEL_ROM (0x1<<16)
-#define LBUS_ADDR_SEL_AUX (0x2<<16)
-#define LBUS_ADDR_SEL_ZV (0x3<<16)
-
-#define GPIO_CTRL_A 0xb8
-#define GPIO_CTRL_B 0xbc
-#define GPIO_DATA_BASE 0xc0
-
-#define DMA_BREG(base, chan) (base + chan * 0x20)
-#define DMA_SREG(base, chan) (base + chan * 0x10)
-
-#define DMA0_PREV_PCL 0x100
-#define DMA1_PREV_PCL 0x120
-#define DMA2_PREV_PCL 0x140
-#define DMA3_PREV_PCL 0x160
-#define DMA4_PREV_PCL 0x180
-#define DMA_PREV_PCL(chan) (DMA_BREG(DMA0_PREV_PCL, chan))
-
-#define DMA0_CURRENT_PCL 0x104
-#define DMA1_CURRENT_PCL 0x124
-#define DMA2_CURRENT_PCL 0x144
-#define DMA3_CURRENT_PCL 0x164
-#define DMA4_CURRENT_PCL 0x184
-#define DMA_CURRENT_PCL(chan) (DMA_BREG(DMA0_CURRENT_PCL, chan))
-
-#define DMA0_CHAN_STAT 0x10c
-#define DMA1_CHAN_STAT 0x12c
-#define DMA2_CHAN_STAT 0x14c
-#define DMA3_CHAN_STAT 0x16c
-#define DMA4_CHAN_STAT 0x18c
-#define DMA_CHAN_STAT(chan) (DMA_BREG(DMA0_CHAN_STAT, chan))
-/* CHAN_STATUS registers share bits */
-#define DMA_CHAN_STAT_SELFID (1<<31)
-#define DMA_CHAN_STAT_ISOPKT (1<<30)
-#define DMA_CHAN_STAT_PCIERR (1<<29)
-#define DMA_CHAN_STAT_PKTERR (1<<28)
-#define DMA_CHAN_STAT_PKTCMPL (1<<27)
-#define DMA_CHAN_STAT_SPECIALACK (1<<14)
-
-
-#define DMA0_CHAN_CTRL 0x110
-#define DMA1_CHAN_CTRL 0x130
-#define DMA2_CHAN_CTRL 0x150
-#define DMA3_CHAN_CTRL 0x170
-#define DMA4_CHAN_CTRL 0x190
-#define DMA_CHAN_CTRL(chan) (DMA_BREG(DMA0_CHAN_CTRL, chan))
-/* CHAN_CTRL registers share bits */
-#define DMA_CHAN_CTRL_ENABLE (1<<31)
-#define DMA_CHAN_CTRL_BUSY (1<<30)
-#define DMA_CHAN_CTRL_LINK (1<<29)
-
-#define DMA0_READY 0x114
-#define DMA1_READY 0x134
-#define DMA2_READY 0x154
-#define DMA3_READY 0x174
-#define DMA4_READY 0x194
-#define DMA_READY(chan) (DMA_BREG(DMA0_READY, chan))
-
-#define DMA_GLOBAL_REGISTER 0x908
-
-#define FIFO_SIZES 0xa00
-
-#define FIFO_CONTROL 0xa10
-#define FIFO_CONTROL_GRF_FLUSH (1<<4)
-#define FIFO_CONTROL_ITF_FLUSH (1<<3)
-#define FIFO_CONTROL_ATF_FLUSH (1<<2)
-
-#define FIFO_XMIT_THRESHOLD 0xa14
-
-#define DMA0_WORD0_CMP_VALUE 0xb00
-#define DMA1_WORD0_CMP_VALUE 0xb10
-#define DMA2_WORD0_CMP_VALUE 0xb20
-#define DMA3_WORD0_CMP_VALUE 0xb30
-#define DMA4_WORD0_CMP_VALUE 0xb40
-#define DMA_WORD0_CMP_VALUE(chan) (DMA_SREG(DMA0_WORD0_CMP_VALUE, chan))
-
-#define DMA0_WORD0_CMP_ENABLE 0xb04
-#define DMA1_WORD0_CMP_ENABLE 0xb14
-#define DMA2_WORD0_CMP_ENABLE 0xb24
-#define DMA3_WORD0_CMP_ENABLE 0xb34
-#define DMA4_WORD0_CMP_ENABLE 0xb44
-#define DMA_WORD0_CMP_ENABLE(chan) (DMA_SREG(DMA0_WORD0_CMP_ENABLE,chan))
-
-#define DMA0_WORD1_CMP_VALUE 0xb08
-#define DMA1_WORD1_CMP_VALUE 0xb18
-#define DMA2_WORD1_CMP_VALUE 0xb28
-#define DMA3_WORD1_CMP_VALUE 0xb38
-#define DMA4_WORD1_CMP_VALUE 0xb48
-#define DMA_WORD1_CMP_VALUE(chan) (DMA_SREG(DMA0_WORD1_CMP_VALUE, chan))
-
-#define DMA0_WORD1_CMP_ENABLE 0xb0c
-#define DMA1_WORD1_CMP_ENABLE 0xb1c
-#define DMA2_WORD1_CMP_ENABLE 0xb2c
-#define DMA3_WORD1_CMP_ENABLE 0xb3c
-#define DMA4_WORD1_CMP_ENABLE 0xb4c
-#define DMA_WORD1_CMP_ENABLE(chan) (DMA_SREG(DMA0_WORD1_CMP_ENABLE,chan))
-/* word 1 compare enable flags */
-#define DMA_WORD1_CMP_MATCH_OTHERBUS (1<<15)
-#define DMA_WORD1_CMP_MATCH_BROADCAST (1<<14)
-#define DMA_WORD1_CMP_MATCH_BUS_BCAST (1<<13)
-#define DMA_WORD1_CMP_MATCH_LOCAL_NODE (1<<12)
-#define DMA_WORD1_CMP_MATCH_EXACT (1<<11)
-#define DMA_WORD1_CMP_ENABLE_SELF_ID (1<<10)
-#define DMA_WORD1_CMP_ENABLE_MASTER (1<<8)
-
-#define LINK_ID 0xf00
-#define LINK_ID_BUS(id) (id<<22)
-#define LINK_ID_NODE(id) (id<<16)
-
-#define LINK_CONTROL 0xf04
-#define LINK_CONTROL_BUSY (1<<29)
-#define LINK_CONTROL_TX_ISO_EN (1<<26)
-#define LINK_CONTROL_RX_ISO_EN (1<<25)
-#define LINK_CONTROL_TX_ASYNC_EN (1<<24)
-#define LINK_CONTROL_RX_ASYNC_EN (1<<23)
-#define LINK_CONTROL_RESET_TX (1<<21)
-#define LINK_CONTROL_RESET_RX (1<<20)
-#define LINK_CONTROL_CYCMASTER (1<<11)
-#define LINK_CONTROL_CYCSOURCE (1<<10)
-#define LINK_CONTROL_CYCTIMEREN (1<<9)
-#define LINK_CONTROL_RCV_CMP_VALID (1<<7)
-#define LINK_CONTROL_SNOOP_ENABLE (1<<6)
-
-#define CYCLE_TIMER 0xf08
-
-#define LINK_PHY 0xf0c
-#define LINK_PHY_READ (1<<31)
-#define LINK_PHY_WRITE (1<<30)
-#define LINK_PHY_ADDR(addr) (addr<<24)
-#define LINK_PHY_WDATA(data) (data<<16)
-#define LINK_PHY_RADDR(addr) (addr<<8)
-
-
-#define LINK_INT_STATUS 0xf14
-#define LINK_INT_ENABLE 0xf18
-/* status and enable have identical bit numbers */
-#define LINK_INT_LINK_INT (1<<31)
-#define LINK_INT_PHY_TIMEOUT (1<<30)
-#define LINK_INT_PHY_REG_RCVD (1<<29)
-#define LINK_INT_PHY_BUSRESET (1<<28)
-#define LINK_INT_TX_RDY (1<<26)
-#define LINK_INT_RX_DATA_RDY (1<<25)
-#define LINK_INT_ISO_STUCK (1<<20)
-#define LINK_INT_ASYNC_STUCK (1<<19)
-#define LINK_INT_SENT_REJECT (1<<17)
-#define LINK_INT_HDR_ERR (1<<16)
-#define LINK_INT_TX_INVALID_TC (1<<15)
-#define LINK_INT_CYC_SECOND (1<<11)
-#define LINK_INT_CYC_START (1<<10)
-#define LINK_INT_CYC_DONE (1<<9)
-#define LINK_INT_CYC_PENDING (1<<8)
-#define LINK_INT_CYC_LOST (1<<7)
-#define LINK_INT_CYC_ARB_FAILED (1<<6)
-#define LINK_INT_GRF_OVERFLOW (1<<5)
-#define LINK_INT_ITF_UNDERFLOW (1<<4)
-#define LINK_INT_ATF_UNDERFLOW (1<<3)
-#define LINK_INT_ISOARB_FAILED (1<<0)
-
-/* PHY specifics */
-#define PHY_VENDORID_TI 0x800028
-#define PHY_PRODUCTID_TSB41LV03 0x000000
-
-
-/* this is the physical layout of a PCL, its size is 128 bytes */
-struct ti_pcl {
- u32 next;
- u32 async_error_next;
- u32 user_data;
- u32 pcl_status;
- u32 remaining_transfer_count;
- u32 next_data_buffer;
- struct {
- u32 control;
- u32 pointer;
- } buffer[13] __attribute__ ((packed));
-} __attribute__ ((packed));
-
-#include <linux/stddef.h>
-#define pcloffs(MEMBER) (offsetof(struct ti_pcl, MEMBER))
-
-
-static inline void put_pcl(const struct ti_lynx *lynx, pcl_t pclid,
- const struct ti_pcl *pcl)
-{
- memcpy_le32((u32 *)(lynx->pcl_mem + pclid * sizeof(struct ti_pcl)),
- (u32 *)pcl, sizeof(struct ti_pcl));
-}
-
-static inline void get_pcl(const struct ti_lynx *lynx, pcl_t pclid,
- struct ti_pcl *pcl)
-{
- memcpy_le32((u32 *)pcl,
- (u32 *)(lynx->pcl_mem + pclid * sizeof(struct ti_pcl)),
- sizeof(struct ti_pcl));
-}
-
-static inline u32 pcl_bus(const struct ti_lynx *lynx, pcl_t pclid)
-{
- return lynx->pcl_mem_dma + pclid * sizeof(struct ti_pcl);
-}
-
-
-#if defined (__BIG_ENDIAN)
-typedef struct ti_pcl pcltmp_t;
-
-static inline struct ti_pcl *edit_pcl(const struct ti_lynx *lynx, pcl_t pclid,
- pcltmp_t *tmp)
-{
- get_pcl(lynx, pclid, tmp);
- return tmp;
-}
-
-static inline void commit_pcl(const struct ti_lynx *lynx, pcl_t pclid,
- pcltmp_t *tmp)
-{
- put_pcl(lynx, pclid, tmp);
-}
-
-#else
-typedef int pcltmp_t; /* just a dummy */
-
-static inline struct ti_pcl *edit_pcl(const struct ti_lynx *lynx, pcl_t pclid,
- pcltmp_t *tmp)
-{
- return lynx->pcl_mem + pclid * sizeof(struct ti_pcl);
-}
-
-static inline void commit_pcl(const struct ti_lynx *lynx, pcl_t pclid,
- pcltmp_t *tmp)
-{
-}
-#endif
-
-
-static inline void run_sub_pcl(const struct ti_lynx *lynx, pcl_t pclid, int idx,
- int dmachan)
-{
- reg_write(lynx, DMA0_CURRENT_PCL + dmachan * 0x20,
- pcl_bus(lynx, pclid) + idx * 4);
- reg_write(lynx, DMA0_CHAN_CTRL + dmachan * 0x20,
- DMA_CHAN_CTRL_ENABLE | DMA_CHAN_CTRL_LINK);
-}
-
-static inline void run_pcl(const struct ti_lynx *lynx, pcl_t pclid, int dmachan)
-{
- run_sub_pcl(lynx, pclid, 0, dmachan);
-}
-
-#define PCL_NEXT_INVALID (1<<0)
-
-/* transfer commands */
-#define PCL_CMD_RCV (0x1<<24)
-#define PCL_CMD_RCV_AND_UPDATE (0xa<<24)
-#define PCL_CMD_XMT (0x2<<24)
-#define PCL_CMD_UNFXMT (0xc<<24)
-#define PCL_CMD_PCI_TO_LBUS (0x8<<24)
-#define PCL_CMD_LBUS_TO_PCI (0x9<<24)
-
-/* aux commands */
-#define PCL_CMD_NOP (0x0<<24)
-#define PCL_CMD_LOAD (0x3<<24)
-#define PCL_CMD_STOREQ (0x4<<24)
-#define PCL_CMD_STORED (0xb<<24)
-#define PCL_CMD_STORE0 (0x5<<24)
-#define PCL_CMD_STORE1 (0x6<<24)
-#define PCL_CMD_COMPARE (0xe<<24)
-#define PCL_CMD_SWAP_COMPARE (0xf<<24)
-#define PCL_CMD_ADD (0xd<<24)
-#define PCL_CMD_BRANCH (0x7<<24)
-
-/* BRANCH condition codes */
-#define PCL_COND_DMARDY_SET (0x1<<20)
-#define PCL_COND_DMARDY_CLEAR (0x2<<20)
-
-#define PCL_GEN_INTR (1<<19)
-#define PCL_LAST_BUFF (1<<18)
-#define PCL_LAST_CMD (PCL_LAST_BUFF)
-#define PCL_WAITSTAT (1<<17)
-#define PCL_BIGENDIAN (1<<16)
-#define PCL_ISOMODE (1<<12)
-
-#endif
diff --git a/drivers/ieee1394/raw1394-private.h b/drivers/ieee1394/raw1394-private.h
deleted file mode 100644
index 7a225a405987..000000000000
--- a/drivers/ieee1394/raw1394-private.h
+++ /dev/null
@@ -1,81 +0,0 @@
-#ifndef IEEE1394_RAW1394_PRIVATE_H
-#define IEEE1394_RAW1394_PRIVATE_H
-
-/* header for definitions that are private to the raw1394 driver
- and not visible to user-space */
-
-#define RAW1394_DEVICE_MAJOR 171
-#define RAW1394_DEVICE_NAME "raw1394"
-
-#define RAW1394_MAX_USER_CSR_DIRS 16
-
-struct iso_block_store {
- atomic_t refcount;
- size_t data_size;
- quadlet_t data[0];
-};
-
-enum raw1394_iso_state { RAW1394_ISO_INACTIVE = 0,
- RAW1394_ISO_RECV = 1,
- RAW1394_ISO_XMIT = 2 };
-
-struct file_info {
- struct list_head list;
-
- struct mutex state_mutex;
- enum { opened, initialized, connected } state;
- unsigned int protocol_version;
-
- struct hpsb_host *host;
-
- struct list_head req_pending; /* protected by reqlists_lock */
- struct list_head req_complete; /* protected by reqlists_lock */
- spinlock_t reqlists_lock;
- wait_queue_head_t wait_complete;
-
- struct list_head addr_list; /* protected by host_info_lock */
-
- u8 __user *fcp_buffer;
-
- u8 notification; /* (busreset-notification) RAW1394_NOTIFY_OFF/ON */
-
- /* new rawiso API */
- enum raw1394_iso_state iso_state;
- struct hpsb_iso *iso_handle;
-
- /* User space's CSR1212 dynamic ConfigROM directories */
- struct csr1212_keyval *csr1212_dirs[RAW1394_MAX_USER_CSR_DIRS];
-
- /* Legacy ConfigROM update flag */
- u8 cfgrom_upd;
-};
-
-struct arm_addr {
- struct list_head addr_list; /* file_info list */
- u64 start, end;
- u64 arm_tag;
- u8 access_rights;
- u8 notification_options;
- u8 client_transactions;
- u64 recvb;
- u16 rec_length;
- u8 *addr_space_buffer; /* accessed by read/write/lock requests */
-};
-
-struct pending_request {
- struct list_head list;
- struct file_info *file_info;
- struct hpsb_packet *packet;
- struct iso_block_store *ibs;
- quadlet_t *data;
- int free_data;
- struct raw1394_request req;
-};
-
-struct host_info {
- struct list_head list;
- struct hpsb_host *host;
- struct list_head file_info_list; /* protected by host_info_lock */
-};
-
-#endif /* IEEE1394_RAW1394_PRIVATE_H */
diff --git a/drivers/ieee1394/raw1394.c b/drivers/ieee1394/raw1394.c
deleted file mode 100644
index f3401427404c..000000000000
--- a/drivers/ieee1394/raw1394.c
+++ /dev/null
@@ -1,3096 +0,0 @@
-/*
- * IEEE 1394 for Linux
- *
- * Raw interface to the bus
- *
- * Copyright (C) 1999, 2000 Andreas E. Bombe
- * 2001, 2002 Manfred Weihs <weihs@ict.tuwien.ac.at>
- * 2002 Christian Toegel <christian.toegel@gmx.at>
- *
- * This code is licensed under the GPL. See the file COPYING in the root
- * directory of the kernel sources for details.
- *
- *
- * Contributions:
- *
- * Manfred Weihs <weihs@ict.tuwien.ac.at>
- * configuration ROM manipulation
- * address range mapping
- * adaptation for new (transparent) loopback mechanism
- * sending of arbitrary async packets
- * Christian Toegel <christian.toegel@gmx.at>
- * address range mapping
- * lock64 request
- * transmit physical packet
- * busreset notification control (switch on/off)
- * busreset with selection of type (short/long)
- * request_reply
- */
-
-#include <linux/kernel.h>
-#include <linux/list.h>
-#include <linux/sched.h>
-#include <linux/string.h>
-#include <linux/slab.h>
-#include <linux/fs.h>
-#include <linux/poll.h>
-#include <linux/module.h>
-#include <linux/mutex.h>
-#include <linux/init.h>
-#include <linux/interrupt.h>
-#include <linux/vmalloc.h>
-#include <linux/cdev.h>
-#include <asm/uaccess.h>
-#include <asm/atomic.h>
-#include <linux/compat.h>
-
-#include "csr1212.h"
-#include "highlevel.h"
-#include "hosts.h"
-#include "ieee1394.h"
-#include "ieee1394_core.h"
-#include "ieee1394_hotplug.h"
-#include "ieee1394_transactions.h"
-#include "ieee1394_types.h"
-#include "iso.h"
-#include "nodemgr.h"
-#include "raw1394.h"
-#include "raw1394-private.h"
-
-#define int2ptr(x) ((void __user *)(unsigned long)x)
-#define ptr2int(x) ((u64)(unsigned long)(void __user *)x)
-
-#ifdef CONFIG_IEEE1394_VERBOSEDEBUG
-#define RAW1394_DEBUG
-#endif
-
-#ifdef RAW1394_DEBUG
-#define DBGMSG(fmt, args...) \
-printk(KERN_INFO "raw1394:" fmt "\n" , ## args)
-#else
-#define DBGMSG(fmt, args...) do {} while (0)
-#endif
-
-static LIST_HEAD(host_info_list);
-static int host_count;
-static DEFINE_SPINLOCK(host_info_lock);
-static atomic_t internal_generation = ATOMIC_INIT(0);
-
-static atomic_t iso_buffer_size;
-static const int iso_buffer_max = 4 * 1024 * 1024; /* 4 MB */
-
-static struct hpsb_highlevel raw1394_highlevel;
-
-static int arm_read(struct hpsb_host *host, int nodeid, quadlet_t * buffer,
- u64 addr, size_t length, u16 flags);
-static int arm_write(struct hpsb_host *host, int nodeid, int destid,
- quadlet_t * data, u64 addr, size_t length, u16 flags);
-static int arm_lock(struct hpsb_host *host, int nodeid, quadlet_t * store,
- u64 addr, quadlet_t data, quadlet_t arg, int ext_tcode,
- u16 flags);
-static int arm_lock64(struct hpsb_host *host, int nodeid, octlet_t * store,
- u64 addr, octlet_t data, octlet_t arg, int ext_tcode,
- u16 flags);
-static const struct hpsb_address_ops arm_ops = {
- .read = arm_read,
- .write = arm_write,
- .lock = arm_lock,
- .lock64 = arm_lock64,
-};
-
-static void queue_complete_cb(struct pending_request *req);
-
-static struct pending_request *__alloc_pending_request(gfp_t flags)
-{
- struct pending_request *req;
-
- req = kzalloc(sizeof(*req), flags);
- if (req)
- INIT_LIST_HEAD(&req->list);
-
- return req;
-}
-
-static inline struct pending_request *alloc_pending_request(void)
-{
- return __alloc_pending_request(GFP_KERNEL);
-}
-
-static void free_pending_request(struct pending_request *req)
-{
- if (req->ibs) {
- if (atomic_dec_and_test(&req->ibs->refcount)) {
- atomic_sub(req->ibs->data_size, &iso_buffer_size);
- kfree(req->ibs);
- }
- } else if (req->free_data) {
- kfree(req->data);
- }
- hpsb_free_packet(req->packet);
- kfree(req);
-}
-
-/* fi->reqlists_lock must be taken */
-static void __queue_complete_req(struct pending_request *req)
-{
- struct file_info *fi = req->file_info;
-
- list_move_tail(&req->list, &fi->req_complete);
- wake_up(&fi->wait_complete);
-}
-
-static void queue_complete_req(struct pending_request *req)
-{
- unsigned long flags;
- struct file_info *fi = req->file_info;
-
- spin_lock_irqsave(&fi->reqlists_lock, flags);
- __queue_complete_req(req);
- spin_unlock_irqrestore(&fi->reqlists_lock, flags);
-}
-
-static void queue_complete_cb(struct pending_request *req)
-{
- struct hpsb_packet *packet = req->packet;
- int rcode = (packet->header[1] >> 12) & 0xf;
-
- switch (packet->ack_code) {
- case ACKX_NONE:
- case ACKX_SEND_ERROR:
- req->req.error = RAW1394_ERROR_SEND_ERROR;
- break;
- case ACKX_ABORTED:
- req->req.error = RAW1394_ERROR_ABORTED;
- break;
- case ACKX_TIMEOUT:
- req->req.error = RAW1394_ERROR_TIMEOUT;
- break;
- default:
- req->req.error = (packet->ack_code << 16) | rcode;
- break;
- }
-
- if (!((packet->ack_code == ACK_PENDING) && (rcode == RCODE_COMPLETE))) {
- req->req.length = 0;
- }
-
- if ((req->req.type == RAW1394_REQ_ASYNC_READ) ||
- (req->req.type == RAW1394_REQ_ASYNC_WRITE) ||
- (req->req.type == RAW1394_REQ_ASYNC_STREAM) ||
- (req->req.type == RAW1394_REQ_LOCK) ||
- (req->req.type == RAW1394_REQ_LOCK64))
- hpsb_free_tlabel(packet);
-
- queue_complete_req(req);
-}
-
-static void add_host(struct hpsb_host *host)
-{
- struct host_info *hi;
- unsigned long flags;
-
- hi = kmalloc(sizeof(*hi), GFP_KERNEL);
-
- if (hi) {
- INIT_LIST_HEAD(&hi->list);
- hi->host = host;
- INIT_LIST_HEAD(&hi->file_info_list);
-
- spin_lock_irqsave(&host_info_lock, flags);
- list_add_tail(&hi->list, &host_info_list);
- host_count++;
- spin_unlock_irqrestore(&host_info_lock, flags);
- }
-
- atomic_inc(&internal_generation);
-}
-
-static struct host_info *find_host_info(struct hpsb_host *host)
-{
- struct host_info *hi;
-
- list_for_each_entry(hi, &host_info_list, list)
- if (hi->host == host)
- return hi;
-
- return NULL;
-}
-
-static void remove_host(struct hpsb_host *host)
-{
- struct host_info *hi;
- unsigned long flags;
-
- spin_lock_irqsave(&host_info_lock, flags);
- hi = find_host_info(host);
-
- if (hi != NULL) {
- list_del(&hi->list);
- host_count--;
- /*
- FIXME: address ranges should be removed
- and fileinfo states should be initialized
- (including setting generation to
- internal-generation ...)
- */
- }
- spin_unlock_irqrestore(&host_info_lock, flags);
-
- if (hi == NULL) {
- printk(KERN_ERR "raw1394: attempt to remove unknown host "
- "0x%p\n", host);
- return;
- }
-
- kfree(hi);
-
- atomic_inc(&internal_generation);
-}
-
-static void host_reset(struct hpsb_host *host)
-{
- unsigned long flags;
- struct host_info *hi;
- struct file_info *fi;
- struct pending_request *req;
-
- spin_lock_irqsave(&host_info_lock, flags);
- hi = find_host_info(host);
-
- if (hi != NULL) {
- list_for_each_entry(fi, &hi->file_info_list, list) {
- if (fi->notification == RAW1394_NOTIFY_ON) {
- req = __alloc_pending_request(GFP_ATOMIC);
-
- if (req != NULL) {
- req->file_info = fi;
- req->req.type = RAW1394_REQ_BUS_RESET;
- req->req.generation =
- get_hpsb_generation(host);
- req->req.misc = (host->node_id << 16)
- | host->node_count;
- if (fi->protocol_version > 3) {
- req->req.misc |=
- (NODEID_TO_NODE
- (host->irm_id)
- << 8);
- }
-
- queue_complete_req(req);
- }
- }
- }
- }
- spin_unlock_irqrestore(&host_info_lock, flags);
-}
-
-static void fcp_request(struct hpsb_host *host, int nodeid, int direction,
- int cts, u8 * data, size_t length)
-{
- unsigned long flags;
- struct host_info *hi;
- struct file_info *fi;
- struct pending_request *req, *req_next;
- struct iso_block_store *ibs = NULL;
- LIST_HEAD(reqs);
-
- if ((atomic_read(&iso_buffer_size) + length) > iso_buffer_max) {
- HPSB_INFO("dropped fcp request");
- return;
- }
-
- spin_lock_irqsave(&host_info_lock, flags);
- hi = find_host_info(host);
-
- if (hi != NULL) {
- list_for_each_entry(fi, &hi->file_info_list, list) {
- if (!fi->fcp_buffer)
- continue;
-
- req = __alloc_pending_request(GFP_ATOMIC);
- if (!req)
- break;
-
- if (!ibs) {
- ibs = kmalloc(sizeof(*ibs) + length,
- GFP_ATOMIC);
- if (!ibs) {
- kfree(req);
- break;
- }
-
- atomic_add(length, &iso_buffer_size);
- atomic_set(&ibs->refcount, 0);
- ibs->data_size = length;
- memcpy(ibs->data, data, length);
- }
-
- atomic_inc(&ibs->refcount);
-
- req->file_info = fi;
- req->ibs = ibs;
- req->data = ibs->data;
- req->req.type = RAW1394_REQ_FCP_REQUEST;
- req->req.generation = get_hpsb_generation(host);
- req->req.misc = nodeid | (direction << 16);
- req->req.recvb = ptr2int(fi->fcp_buffer);
- req->req.length = length;
-
- list_add_tail(&req->list, &reqs);
- }
- }
- spin_unlock_irqrestore(&host_info_lock, flags);
-
- list_for_each_entry_safe(req, req_next, &reqs, list)
- queue_complete_req(req);
-}
-
-#ifdef CONFIG_COMPAT
-struct compat_raw1394_req {
- __u32 type;
- __s32 error;
- __u32 misc;
-
- __u32 generation;
- __u32 length;
-
- __u64 address;
-
- __u64 tag;
-
- __u64 sendb;
- __u64 recvb;
-}
-#if defined(CONFIG_X86_64) || defined(CONFIG_IA64)
-__attribute__((packed))
-#endif
-;
-
-static const char __user *raw1394_compat_write(const char __user *buf)
-{
- struct compat_raw1394_req __user *cr = (typeof(cr)) buf;
- struct raw1394_request __user *r;
-
- r = compat_alloc_user_space(sizeof(struct raw1394_request));
-
-#define C(x) __copy_in_user(&r->x, &cr->x, sizeof(r->x))
-
- if (copy_in_user(r, cr, sizeof(struct compat_raw1394_req)) ||
- C(address) ||
- C(tag) ||
- C(sendb) ||
- C(recvb))
- return (__force const char __user *)ERR_PTR(-EFAULT);
-
- return (const char __user *)r;
-}
-#undef C
-
-#define P(x) __put_user(r->x, &cr->x)
-
-static int
-raw1394_compat_read(const char __user *buf, struct raw1394_request *r)
-{
- struct compat_raw1394_req __user *cr = (typeof(cr)) buf;
-
- if (!access_ok(VERIFY_WRITE, cr, sizeof(struct compat_raw1394_req)) ||
- P(type) ||
- P(error) ||
- P(misc) ||
- P(generation) ||
- P(length) ||
- P(address) ||
- P(tag) ||
- P(sendb) ||
- P(recvb))
- return -EFAULT;
-
- return sizeof(struct compat_raw1394_req);
-}
-#undef P
-
-#endif
-
-/* get next completed request (caller must hold fi->reqlists_lock) */
-static inline struct pending_request *__next_complete_req(struct file_info *fi)
-{
- struct list_head *lh;
- struct pending_request *req = NULL;
-
- if (!list_empty(&fi->req_complete)) {
- lh = fi->req_complete.next;
- list_del(lh);
- req = list_entry(lh, struct pending_request, list);
- }
- return req;
-}
-
-/* atomically get next completed request */
-static struct pending_request *next_complete_req(struct file_info *fi)
-{
- unsigned long flags;
- struct pending_request *req;
-
- spin_lock_irqsave(&fi->reqlists_lock, flags);
- req = __next_complete_req(fi);
- spin_unlock_irqrestore(&fi->reqlists_lock, flags);
- return req;
-}
-
-static ssize_t raw1394_read(struct file *file, char __user * buffer,
- size_t count, loff_t * offset_is_ignored)
-{
- struct file_info *fi = file->private_data;
- struct pending_request *req;
- ssize_t ret;
-
-#ifdef CONFIG_COMPAT
- if (count == sizeof(struct compat_raw1394_req)) {
- /* ok */
- } else
-#endif
- if (count != sizeof(struct raw1394_request)) {
- return -EINVAL;
- }
-
- if (!access_ok(VERIFY_WRITE, buffer, count)) {
- return -EFAULT;
- }
-
- if (file->f_flags & O_NONBLOCK) {
- if (!(req = next_complete_req(fi)))
- return -EAGAIN;
- } else {
- /*
- * NB: We call the macro wait_event_interruptible() with a
- * condition argument with side effect. This is only possible
- * because the side effect does not occur until the condition
- * became true, and wait_event_interruptible() won't evaluate
- * the condition again after that.
- */
- if (wait_event_interruptible(fi->wait_complete,
- (req = next_complete_req(fi))))
- return -ERESTARTSYS;
- }
-
- if (req->req.length) {
- if (copy_to_user(int2ptr(req->req.recvb), req->data,
- req->req.length)) {
- req->req.error = RAW1394_ERROR_MEMFAULT;
- }
- }
-
-#ifdef CONFIG_COMPAT
- if (count == sizeof(struct compat_raw1394_req) &&
- sizeof(struct compat_raw1394_req) !=
- sizeof(struct raw1394_request)) {
- ret = raw1394_compat_read(buffer, &req->req);
- } else
-#endif
- {
- if (copy_to_user(buffer, &req->req, sizeof(req->req))) {
- ret = -EFAULT;
- goto out;
- }
- ret = (ssize_t) sizeof(struct raw1394_request);
- }
- out:
- free_pending_request(req);
- return ret;
-}
-
-static int state_opened(struct file_info *fi, struct pending_request *req)
-{
- if (req->req.type == RAW1394_REQ_INITIALIZE) {
- switch (req->req.misc) {
- case RAW1394_KERNELAPI_VERSION:
- case 3:
- fi->state = initialized;
- fi->protocol_version = req->req.misc;
- req->req.error = RAW1394_ERROR_NONE;
- req->req.generation = atomic_read(&internal_generation);
- break;
-
- default:
- req->req.error = RAW1394_ERROR_COMPAT;
- req->req.misc = RAW1394_KERNELAPI_VERSION;
- }
- } else {
- req->req.error = RAW1394_ERROR_STATE_ORDER;
- }
-
- req->req.length = 0;
- queue_complete_req(req);
- return 0;
-}
-
-static int state_initialized(struct file_info *fi, struct pending_request *req)
-{
- unsigned long flags;
- struct host_info *hi;
- struct raw1394_khost_list *khl;
-
- if (req->req.generation != atomic_read(&internal_generation)) {
- req->req.error = RAW1394_ERROR_GENERATION;
- req->req.generation = atomic_read(&internal_generation);
- req->req.length = 0;
- queue_complete_req(req);
- return 0;
- }
-
- switch (req->req.type) {
- case RAW1394_REQ_LIST_CARDS:
- spin_lock_irqsave(&host_info_lock, flags);
- khl = kmalloc(sizeof(*khl) * host_count, GFP_ATOMIC);
-
- if (khl) {
- req->req.misc = host_count;
- req->data = (quadlet_t *) khl;
-
- list_for_each_entry(hi, &host_info_list, list) {
- khl->nodes = hi->host->node_count;
- strcpy(khl->name, hi->host->driver->name);
- khl++;
- }
- }
- spin_unlock_irqrestore(&host_info_lock, flags);
-
- if (khl) {
- req->req.error = RAW1394_ERROR_NONE;
- req->req.length = min(req->req.length,
- (u32) (sizeof
- (struct raw1394_khost_list)
- * req->req.misc));
- req->free_data = 1;
- } else {
- return -ENOMEM;
- }
- break;
-
- case RAW1394_REQ_SET_CARD:
- spin_lock_irqsave(&host_info_lock, flags);
- if (req->req.misc >= host_count) {
- req->req.error = RAW1394_ERROR_INVALID_ARG;
- goto out_set_card;
- }
- list_for_each_entry(hi, &host_info_list, list)
- if (!req->req.misc--)
- break;
- get_device(&hi->host->device); /* FIXME handle failure case */
- list_add_tail(&fi->list, &hi->file_info_list);
-
- /* prevent unloading of the host's low-level driver */
- if (!try_module_get(hi->host->driver->owner)) {
- req->req.error = RAW1394_ERROR_ABORTED;
- goto out_set_card;
- }
- WARN_ON(fi->host);
- fi->host = hi->host;
- fi->state = connected;
-
- req->req.error = RAW1394_ERROR_NONE;
- req->req.generation = get_hpsb_generation(fi->host);
- req->req.misc = (fi->host->node_id << 16)
- | fi->host->node_count;
- if (fi->protocol_version > 3)
- req->req.misc |= NODEID_TO_NODE(fi->host->irm_id) << 8;
-out_set_card:
- spin_unlock_irqrestore(&host_info_lock, flags);
-
- req->req.length = 0;
- break;
-
- default:
- req->req.error = RAW1394_ERROR_STATE_ORDER;
- req->req.length = 0;
- break;
- }
-
- queue_complete_req(req);
- return 0;
-}
-
-static void handle_fcp_listen(struct file_info *fi, struct pending_request *req)
-{
- if (req->req.misc) {
- if (fi->fcp_buffer) {
- req->req.error = RAW1394_ERROR_ALREADY;
- } else {
- fi->fcp_buffer = int2ptr(req->req.recvb);
- }
- } else {
- if (!fi->fcp_buffer) {
- req->req.error = RAW1394_ERROR_ALREADY;
- } else {
- fi->fcp_buffer = NULL;
- }
- }
-
- req->req.length = 0;
- queue_complete_req(req);
-}
-
-static int handle_async_request(struct file_info *fi,
- struct pending_request *req, int node)
-{
- unsigned long flags;
- struct hpsb_packet *packet = NULL;
- u64 addr = req->req.address & 0xffffffffffffULL;
-
- switch (req->req.type) {
- case RAW1394_REQ_ASYNC_READ:
- DBGMSG("read_request called");
- packet =
- hpsb_make_readpacket(fi->host, node, addr, req->req.length);
-
- if (!packet)
- return -ENOMEM;
-
- if (req->req.length == 4)
- req->data = &packet->header[3];
- else
- req->data = packet->data;
-
- break;
-
- case RAW1394_REQ_ASYNC_WRITE:
- DBGMSG("write_request called");
-
- packet = hpsb_make_writepacket(fi->host, node, addr, NULL,
- req->req.length);
- if (!packet)
- return -ENOMEM;
-
- if (req->req.length == 4) {
- if (copy_from_user
- (&packet->header[3], int2ptr(req->req.sendb),
- req->req.length))
- req->req.error = RAW1394_ERROR_MEMFAULT;
- } else {
- if (copy_from_user
- (packet->data, int2ptr(req->req.sendb),
- req->req.length))
- req->req.error = RAW1394_ERROR_MEMFAULT;
- }
-
- req->req.length = 0;
- break;
-
- case RAW1394_REQ_ASYNC_STREAM:
- DBGMSG("stream_request called");
-
- packet =
- hpsb_make_streampacket(fi->host, NULL, req->req.length,
- node & 0x3f /*channel */ ,
- (req->req.misc >> 16) & 0x3,
- req->req.misc & 0xf);
- if (!packet)
- return -ENOMEM;
-
- if (copy_from_user(packet->data, int2ptr(req->req.sendb),
- req->req.length))
- req->req.error = RAW1394_ERROR_MEMFAULT;
-
- req->req.length = 0;
- break;
-
- case RAW1394_REQ_LOCK:
- DBGMSG("lock_request called");
- if ((req->req.misc == EXTCODE_FETCH_ADD)
- || (req->req.misc == EXTCODE_LITTLE_ADD)) {
- if (req->req.length != 4) {
- req->req.error = RAW1394_ERROR_INVALID_ARG;
- break;
- }
- } else {
- if (req->req.length != 8) {
- req->req.error = RAW1394_ERROR_INVALID_ARG;
- break;
- }
- }
-
- packet = hpsb_make_lockpacket(fi->host, node, addr,
- req->req.misc, NULL, 0);
- if (!packet)
- return -ENOMEM;
-
- if (copy_from_user(packet->data, int2ptr(req->req.sendb),
- req->req.length)) {
- req->req.error = RAW1394_ERROR_MEMFAULT;
- break;
- }
-
- req->data = packet->data;
- req->req.length = 4;
- break;
-
- case RAW1394_REQ_LOCK64:
- DBGMSG("lock64_request called");
- if ((req->req.misc == EXTCODE_FETCH_ADD)
- || (req->req.misc == EXTCODE_LITTLE_ADD)) {
- if (req->req.length != 8) {
- req->req.error = RAW1394_ERROR_INVALID_ARG;
- break;
- }
- } else {
- if (req->req.length != 16) {
- req->req.error = RAW1394_ERROR_INVALID_ARG;
- break;
- }
- }
- packet = hpsb_make_lock64packet(fi->host, node, addr,
- req->req.misc, NULL, 0);
- if (!packet)
- return -ENOMEM;
-
- if (copy_from_user(packet->data, int2ptr(req->req.sendb),
- req->req.length)) {
- req->req.error = RAW1394_ERROR_MEMFAULT;
- break;
- }
-
- req->data = packet->data;
- req->req.length = 8;
- break;
-
- default:
- req->req.error = RAW1394_ERROR_STATE_ORDER;
- }
-
- req->packet = packet;
-
- if (req->req.error) {
- req->req.length = 0;
- queue_complete_req(req);
- return 0;
- }
-
- hpsb_set_packet_complete_task(packet,
- (void (*)(void *))queue_complete_cb, req);
-
- spin_lock_irqsave(&fi->reqlists_lock, flags);
- list_add_tail(&req->list, &fi->req_pending);
- spin_unlock_irqrestore(&fi->reqlists_lock, flags);
-
- packet->generation = req->req.generation;
-
- if (hpsb_send_packet(packet) < 0) {
- req->req.error = RAW1394_ERROR_SEND_ERROR;
- req->req.length = 0;
- hpsb_free_tlabel(packet);
- queue_complete_req(req);
- }
- return 0;
-}
-
-static int handle_async_send(struct file_info *fi, struct pending_request *req)
-{
- unsigned long flags;
- struct hpsb_packet *packet;
- int header_length = req->req.misc & 0xffff;
- int expect_response = req->req.misc >> 16;
- size_t data_size;
-
- if (header_length > req->req.length || header_length < 12 ||
- header_length > FIELD_SIZEOF(struct hpsb_packet, header)) {
- req->req.error = RAW1394_ERROR_INVALID_ARG;
- req->req.length = 0;
- queue_complete_req(req);
- return 0;
- }
-
- data_size = req->req.length - header_length;
- packet = hpsb_alloc_packet(data_size);
- req->packet = packet;
- if (!packet)
- return -ENOMEM;
-
- if (copy_from_user(packet->header, int2ptr(req->req.sendb),
- header_length)) {
- req->req.error = RAW1394_ERROR_MEMFAULT;
- req->req.length = 0;
- queue_complete_req(req);
- return 0;
- }
-
- if (copy_from_user
- (packet->data, int2ptr(req->req.sendb) + header_length,
- data_size)) {
- req->req.error = RAW1394_ERROR_MEMFAULT;
- req->req.length = 0;
- queue_complete_req(req);
- return 0;
- }
-
- packet->type = hpsb_async;
- packet->node_id = packet->header[0] >> 16;
- packet->tcode = (packet->header[0] >> 4) & 0xf;
- packet->tlabel = (packet->header[0] >> 10) & 0x3f;
- packet->host = fi->host;
- packet->expect_response = expect_response;
- packet->header_size = header_length;
- packet->data_size = data_size;
-
- req->req.length = 0;
- hpsb_set_packet_complete_task(packet,
- (void (*)(void *))queue_complete_cb, req);
-
- spin_lock_irqsave(&fi->reqlists_lock, flags);
- list_add_tail(&req->list, &fi->req_pending);
- spin_unlock_irqrestore(&fi->reqlists_lock, flags);
-
- /* Update the generation of the packet just before sending. */
- packet->generation = req->req.generation;
-
- if (hpsb_send_packet(packet) < 0) {
- req->req.error = RAW1394_ERROR_SEND_ERROR;
- queue_complete_req(req);
- }
-
- return 0;
-}
-
-static int arm_read(struct hpsb_host *host, int nodeid, quadlet_t * buffer,
- u64 addr, size_t length, u16 flags)
-{
- unsigned long irqflags;
- struct pending_request *req;
- struct host_info *hi;
- struct file_info *fi = NULL;
- struct list_head *entry;
- struct arm_addr *arm_addr = NULL;
- struct arm_request *arm_req = NULL;
- struct arm_response *arm_resp = NULL;
- int found = 0, size = 0, rcode = -1;
- struct arm_request_response *arm_req_resp = NULL;
-
- DBGMSG("arm_read called by node: %X "
- "addr: %4.4x %8.8x length: %Zu", nodeid,
- (u16) ((addr >> 32) & 0xFFFF), (u32) (addr & 0xFFFFFFFF),
- length);
- spin_lock_irqsave(&host_info_lock, irqflags);
- hi = find_host_info(host); /* search address-entry */
- if (hi != NULL) {
- list_for_each_entry(fi, &hi->file_info_list, list) {
- entry = fi->addr_list.next;
- while (entry != &(fi->addr_list)) {
- arm_addr =
- list_entry(entry, struct arm_addr,
- addr_list);
- if (((arm_addr->start) <= (addr))
- && ((arm_addr->end) >= (addr + length))) {
- found = 1;
- break;
- }
- entry = entry->next;
- }
- if (found) {
- break;
- }
- }
- }
- rcode = -1;
- if (!found) {
- printk(KERN_ERR "raw1394: arm_read FAILED addr_entry not found"
- " -> rcode_address_error\n");
- spin_unlock_irqrestore(&host_info_lock, irqflags);
- return (RCODE_ADDRESS_ERROR);
- } else {
- DBGMSG("arm_read addr_entry FOUND");
- }
- if (arm_addr->rec_length < length) {
- DBGMSG("arm_read blocklength too big -> rcode_data_error");
- rcode = RCODE_DATA_ERROR; /* hardware error, data is unavailable */
- }
- if (rcode == -1) {
- if (arm_addr->access_rights & ARM_READ) {
- if (!(arm_addr->client_transactions & ARM_READ)) {
- memcpy(buffer,
- (arm_addr->addr_space_buffer) + (addr -
- (arm_addr->
- start)),
- length);
- DBGMSG("arm_read -> (rcode_complete)");
- rcode = RCODE_COMPLETE;
- }
- } else {
- rcode = RCODE_TYPE_ERROR; /* function not allowed */
- DBGMSG("arm_read -> rcode_type_error (access denied)");
- }
- }
- if (arm_addr->notification_options & ARM_READ) {
- DBGMSG("arm_read -> entering notification-section");
- req = __alloc_pending_request(GFP_ATOMIC);
- if (!req) {
- DBGMSG("arm_read -> rcode_conflict_error");
- spin_unlock_irqrestore(&host_info_lock, irqflags);
- return (RCODE_CONFLICT_ERROR); /* A resource conflict was detected.
- The request may be retried */
- }
- if (rcode == RCODE_COMPLETE) {
- size =
- sizeof(struct arm_request) +
- sizeof(struct arm_response) +
- length * sizeof(byte_t) +
- sizeof(struct arm_request_response);
- } else {
- size =
- sizeof(struct arm_request) +
- sizeof(struct arm_response) +
- sizeof(struct arm_request_response);
- }
- req->data = kmalloc(size, GFP_ATOMIC);
- if (!(req->data)) {
- free_pending_request(req);
- DBGMSG("arm_read -> rcode_conflict_error");
- spin_unlock_irqrestore(&host_info_lock, irqflags);
- return (RCODE_CONFLICT_ERROR); /* A resource conflict was detected.
- The request may be retried */
- }
- req->free_data = 1;
- req->file_info = fi;
- req->req.type = RAW1394_REQ_ARM;
- req->req.generation = get_hpsb_generation(host);
- req->req.misc =
- (((length << 16) & (0xFFFF0000)) | (ARM_READ & 0xFF));
- req->req.tag = arm_addr->arm_tag;
- req->req.recvb = arm_addr->recvb;
- req->req.length = size;
- arm_req_resp = (struct arm_request_response *)(req->data);
- arm_req = (struct arm_request *)((byte_t *) (req->data) +
- (sizeof
- (struct
- arm_request_response)));
- arm_resp =
- (struct arm_response *)((byte_t *) (arm_req) +
- (sizeof(struct arm_request)));
- arm_req->buffer = NULL;
- arm_resp->buffer = NULL;
- if (rcode == RCODE_COMPLETE) {
- byte_t *buf =
- (byte_t *) arm_resp + sizeof(struct arm_response);
- memcpy(buf,
- (arm_addr->addr_space_buffer) + (addr -
- (arm_addr->
- start)),
- length);
- arm_resp->buffer =
- int2ptr((arm_addr->recvb) +
- sizeof(struct arm_request_response) +
- sizeof(struct arm_request) +
- sizeof(struct arm_response));
- }
- arm_resp->buffer_length =
- (rcode == RCODE_COMPLETE) ? length : 0;
- arm_resp->response_code = rcode;
- arm_req->buffer_length = 0;
- arm_req->generation = req->req.generation;
- arm_req->extended_transaction_code = 0;
- arm_req->destination_offset = addr;
- arm_req->source_nodeid = nodeid;
- arm_req->destination_nodeid = host->node_id;
- arm_req->tlabel = (flags >> 10) & 0x3f;
- arm_req->tcode = (flags >> 4) & 0x0f;
- arm_req_resp->request = int2ptr((arm_addr->recvb) +
- sizeof(struct
- arm_request_response));
- arm_req_resp->response =
- int2ptr((arm_addr->recvb) +
- sizeof(struct arm_request_response) +
- sizeof(struct arm_request));
- queue_complete_req(req);
- }
- spin_unlock_irqrestore(&host_info_lock, irqflags);
- return (rcode);
-}
-
-static int arm_write(struct hpsb_host *host, int nodeid, int destid,
- quadlet_t * data, u64 addr, size_t length, u16 flags)
-{
- unsigned long irqflags;
- struct pending_request *req;
- struct host_info *hi;
- struct file_info *fi = NULL;
- struct list_head *entry;
- struct arm_addr *arm_addr = NULL;
- struct arm_request *arm_req = NULL;
- struct arm_response *arm_resp = NULL;
- int found = 0, size = 0, rcode = -1;
- struct arm_request_response *arm_req_resp = NULL;
-
- DBGMSG("arm_write called by node: %X "
- "addr: %4.4x %8.8x length: %Zu", nodeid,
- (u16) ((addr >> 32) & 0xFFFF), (u32) (addr & 0xFFFFFFFF),
- length);
- spin_lock_irqsave(&host_info_lock, irqflags);
- hi = find_host_info(host); /* search address-entry */
- if (hi != NULL) {
- list_for_each_entry(fi, &hi->file_info_list, list) {
- entry = fi->addr_list.next;
- while (entry != &(fi->addr_list)) {
- arm_addr =
- list_entry(entry, struct arm_addr,
- addr_list);
- if (((arm_addr->start) <= (addr))
- && ((arm_addr->end) >= (addr + length))) {
- found = 1;
- break;
- }
- entry = entry->next;
- }
- if (found) {
- break;
- }
- }
- }
- rcode = -1;
- if (!found) {
- printk(KERN_ERR "raw1394: arm_write FAILED addr_entry not found"
- " -> rcode_address_error\n");
- spin_unlock_irqrestore(&host_info_lock, irqflags);
- return (RCODE_ADDRESS_ERROR);
- } else {
- DBGMSG("arm_write addr_entry FOUND");
- }
- if (arm_addr->rec_length < length) {
- DBGMSG("arm_write blocklength too big -> rcode_data_error");
- rcode = RCODE_DATA_ERROR; /* hardware error, data is unavailable */
- }
- if (rcode == -1) {
- if (arm_addr->access_rights & ARM_WRITE) {
- if (!(arm_addr->client_transactions & ARM_WRITE)) {
- memcpy((arm_addr->addr_space_buffer) +
- (addr - (arm_addr->start)), data,
- length);
- DBGMSG("arm_write -> (rcode_complete)");
- rcode = RCODE_COMPLETE;
- }
- } else {
- rcode = RCODE_TYPE_ERROR; /* function not allowed */
- DBGMSG("arm_write -> rcode_type_error (access denied)");
- }
- }
- if (arm_addr->notification_options & ARM_WRITE) {
- DBGMSG("arm_write -> entering notification-section");
- req = __alloc_pending_request(GFP_ATOMIC);
- if (!req) {
- DBGMSG("arm_write -> rcode_conflict_error");
- spin_unlock_irqrestore(&host_info_lock, irqflags);
- return (RCODE_CONFLICT_ERROR); /* A resource conflict was detected.
- The request my be retried */
- }
- size =
- sizeof(struct arm_request) + sizeof(struct arm_response) +
- (length) * sizeof(byte_t) +
- sizeof(struct arm_request_response);
- req->data = kmalloc(size, GFP_ATOMIC);
- if (!(req->data)) {
- free_pending_request(req);
- DBGMSG("arm_write -> rcode_conflict_error");
- spin_unlock_irqrestore(&host_info_lock, irqflags);
- return (RCODE_CONFLICT_ERROR); /* A resource conflict was detected.
- The request may be retried */
- }
- req->free_data = 1;
- req->file_info = fi;
- req->req.type = RAW1394_REQ_ARM;
- req->req.generation = get_hpsb_generation(host);
- req->req.misc =
- (((length << 16) & (0xFFFF0000)) | (ARM_WRITE & 0xFF));
- req->req.tag = arm_addr->arm_tag;
- req->req.recvb = arm_addr->recvb;
- req->req.length = size;
- arm_req_resp = (struct arm_request_response *)(req->data);
- arm_req = (struct arm_request *)((byte_t *) (req->data) +
- (sizeof
- (struct
- arm_request_response)));
- arm_resp =
- (struct arm_response *)((byte_t *) (arm_req) +
- (sizeof(struct arm_request)));
- arm_resp->buffer = NULL;
- memcpy((byte_t *) arm_resp + sizeof(struct arm_response),
- data, length);
- arm_req->buffer = int2ptr((arm_addr->recvb) +
- sizeof(struct arm_request_response) +
- sizeof(struct arm_request) +
- sizeof(struct arm_response));
- arm_req->buffer_length = length;
- arm_req->generation = req->req.generation;
- arm_req->extended_transaction_code = 0;
- arm_req->destination_offset = addr;
- arm_req->source_nodeid = nodeid;
- arm_req->destination_nodeid = destid;
- arm_req->tlabel = (flags >> 10) & 0x3f;
- arm_req->tcode = (flags >> 4) & 0x0f;
- arm_resp->buffer_length = 0;
- arm_resp->response_code = rcode;
- arm_req_resp->request = int2ptr((arm_addr->recvb) +
- sizeof(struct
- arm_request_response));
- arm_req_resp->response =
- int2ptr((arm_addr->recvb) +
- sizeof(struct arm_request_response) +
- sizeof(struct arm_request));
- queue_complete_req(req);
- }
- spin_unlock_irqrestore(&host_info_lock, irqflags);
- return (rcode);
-}
-
-static int arm_lock(struct hpsb_host *host, int nodeid, quadlet_t * store,
- u64 addr, quadlet_t data, quadlet_t arg, int ext_tcode,
- u16 flags)
-{
- unsigned long irqflags;
- struct pending_request *req;
- struct host_info *hi;
- struct file_info *fi = NULL;
- struct list_head *entry;
- struct arm_addr *arm_addr = NULL;
- struct arm_request *arm_req = NULL;
- struct arm_response *arm_resp = NULL;
- int found = 0, size = 0, rcode = -1;
- quadlet_t old, new;
- struct arm_request_response *arm_req_resp = NULL;
-
- if (((ext_tcode & 0xFF) == EXTCODE_FETCH_ADD) ||
- ((ext_tcode & 0xFF) == EXTCODE_LITTLE_ADD)) {
- DBGMSG("arm_lock called by node: %X "
- "addr: %4.4x %8.8x extcode: %2.2X data: %8.8X",
- nodeid, (u16) ((addr >> 32) & 0xFFFF),
- (u32) (addr & 0xFFFFFFFF), ext_tcode & 0xFF,
- be32_to_cpu(data));
- } else {
- DBGMSG("arm_lock called by node: %X "
- "addr: %4.4x %8.8x extcode: %2.2X data: %8.8X arg: %8.8X",
- nodeid, (u16) ((addr >> 32) & 0xFFFF),
- (u32) (addr & 0xFFFFFFFF), ext_tcode & 0xFF,
- be32_to_cpu(data), be32_to_cpu(arg));
- }
- spin_lock_irqsave(&host_info_lock, irqflags);
- hi = find_host_info(host); /* search address-entry */
- if (hi != NULL) {
- list_for_each_entry(fi, &hi->file_info_list, list) {
- entry = fi->addr_list.next;
- while (entry != &(fi->addr_list)) {
- arm_addr =
- list_entry(entry, struct arm_addr,
- addr_list);
- if (((arm_addr->start) <= (addr))
- && ((arm_addr->end) >=
- (addr + sizeof(*store)))) {
- found = 1;
- break;
- }
- entry = entry->next;
- }
- if (found) {
- break;
- }
- }
- }
- rcode = -1;
- if (!found) {
- printk(KERN_ERR "raw1394: arm_lock FAILED addr_entry not found"
- " -> rcode_address_error\n");
- spin_unlock_irqrestore(&host_info_lock, irqflags);
- return (RCODE_ADDRESS_ERROR);
- } else {
- DBGMSG("arm_lock addr_entry FOUND");
- }
- if (rcode == -1) {
- if (arm_addr->access_rights & ARM_LOCK) {
- if (!(arm_addr->client_transactions & ARM_LOCK)) {
- memcpy(&old,
- (arm_addr->addr_space_buffer) + (addr -
- (arm_addr->
- start)),
- sizeof(old));
- switch (ext_tcode) {
- case (EXTCODE_MASK_SWAP):
- new = data | (old & ~arg);
- break;
- case (EXTCODE_COMPARE_SWAP):
- if (old == arg) {
- new = data;
- } else {
- new = old;
- }
- break;
- case (EXTCODE_FETCH_ADD):
- new =
- cpu_to_be32(be32_to_cpu(data) +
- be32_to_cpu(old));
- break;
- case (EXTCODE_LITTLE_ADD):
- new =
- cpu_to_le32(le32_to_cpu(data) +
- le32_to_cpu(old));
- break;
- case (EXTCODE_BOUNDED_ADD):
- if (old != arg) {
- new =
- cpu_to_be32(be32_to_cpu
- (data) +
- be32_to_cpu
- (old));
- } else {
- new = old;
- }
- break;
- case (EXTCODE_WRAP_ADD):
- if (old != arg) {
- new =
- cpu_to_be32(be32_to_cpu
- (data) +
- be32_to_cpu
- (old));
- } else {
- new = data;
- }
- break;
- default:
- rcode = RCODE_TYPE_ERROR; /* function not allowed */
- printk(KERN_ERR
- "raw1394: arm_lock FAILED "
- "ext_tcode not allowed -> rcode_type_error\n");
- break;
- } /*switch */
- if (rcode == -1) {
- DBGMSG("arm_lock -> (rcode_complete)");
- rcode = RCODE_COMPLETE;
- memcpy(store, &old, sizeof(*store));
- memcpy((arm_addr->addr_space_buffer) +
- (addr - (arm_addr->start)),
- &new, sizeof(*store));
- }
- }
- } else {
- rcode = RCODE_TYPE_ERROR; /* function not allowed */
- DBGMSG("arm_lock -> rcode_type_error (access denied)");
- }
- }
- if (arm_addr->notification_options & ARM_LOCK) {
- byte_t *buf1, *buf2;
- DBGMSG("arm_lock -> entering notification-section");
- req = __alloc_pending_request(GFP_ATOMIC);
- if (!req) {
- DBGMSG("arm_lock -> rcode_conflict_error");
- spin_unlock_irqrestore(&host_info_lock, irqflags);
- return (RCODE_CONFLICT_ERROR); /* A resource conflict was detected.
- The request may be retried */
- }
- size = sizeof(struct arm_request) + sizeof(struct arm_response) + 3 * sizeof(*store) + sizeof(struct arm_request_response); /* maximum */
- req->data = kmalloc(size, GFP_ATOMIC);
- if (!(req->data)) {
- free_pending_request(req);
- DBGMSG("arm_lock -> rcode_conflict_error");
- spin_unlock_irqrestore(&host_info_lock, irqflags);
- return (RCODE_CONFLICT_ERROR); /* A resource conflict was detected.
- The request may be retried */
- }
- req->free_data = 1;
- arm_req_resp = (struct arm_request_response *)(req->data);
- arm_req = (struct arm_request *)((byte_t *) (req->data) +
- (sizeof
- (struct
- arm_request_response)));
- arm_resp =
- (struct arm_response *)((byte_t *) (arm_req) +
- (sizeof(struct arm_request)));
- buf1 = (byte_t *) arm_resp + sizeof(struct arm_response);
- buf2 = buf1 + 2 * sizeof(*store);
- if ((ext_tcode == EXTCODE_FETCH_ADD) ||
- (ext_tcode == EXTCODE_LITTLE_ADD)) {
- arm_req->buffer_length = sizeof(*store);
- memcpy(buf1, &data, sizeof(*store));
-
- } else {
- arm_req->buffer_length = 2 * sizeof(*store);
- memcpy(buf1, &arg, sizeof(*store));
- memcpy(buf1 + sizeof(*store), &data, sizeof(*store));
- }
- if (rcode == RCODE_COMPLETE) {
- arm_resp->buffer_length = sizeof(*store);
- memcpy(buf2, &old, sizeof(*store));
- } else {
- arm_resp->buffer_length = 0;
- }
- req->file_info = fi;
- req->req.type = RAW1394_REQ_ARM;
- req->req.generation = get_hpsb_generation(host);
- req->req.misc = ((((sizeof(*store)) << 16) & (0xFFFF0000)) |
- (ARM_LOCK & 0xFF));
- req->req.tag = arm_addr->arm_tag;
- req->req.recvb = arm_addr->recvb;
- req->req.length = size;
- arm_req->generation = req->req.generation;
- arm_req->extended_transaction_code = ext_tcode;
- arm_req->destination_offset = addr;
- arm_req->source_nodeid = nodeid;
- arm_req->destination_nodeid = host->node_id;
- arm_req->tlabel = (flags >> 10) & 0x3f;
- arm_req->tcode = (flags >> 4) & 0x0f;
- arm_resp->response_code = rcode;
- arm_req_resp->request = int2ptr((arm_addr->recvb) +
- sizeof(struct
- arm_request_response));
- arm_req_resp->response =
- int2ptr((arm_addr->recvb) +
- sizeof(struct arm_request_response) +
- sizeof(struct arm_request));
- arm_req->buffer =
- int2ptr((arm_addr->recvb) +
- sizeof(struct arm_request_response) +
- sizeof(struct arm_request) +
- sizeof(struct arm_response));
- arm_resp->buffer =
- int2ptr((arm_addr->recvb) +
- sizeof(struct arm_request_response) +
- sizeof(struct arm_request) +
- sizeof(struct arm_response) + 2 * sizeof(*store));
- queue_complete_req(req);
- }
- spin_unlock_irqrestore(&host_info_lock, irqflags);
- return (rcode);
-}
-
-static int arm_lock64(struct hpsb_host *host, int nodeid, octlet_t * store,
- u64 addr, octlet_t data, octlet_t arg, int ext_tcode,
- u16 flags)
-{
- unsigned long irqflags;
- struct pending_request *req;
- struct host_info *hi;
- struct file_info *fi = NULL;
- struct list_head *entry;
- struct arm_addr *arm_addr = NULL;
- struct arm_request *arm_req = NULL;
- struct arm_response *arm_resp = NULL;
- int found = 0, size = 0, rcode = -1;
- octlet_t old, new;
- struct arm_request_response *arm_req_resp = NULL;
-
- if (((ext_tcode & 0xFF) == EXTCODE_FETCH_ADD) ||
- ((ext_tcode & 0xFF) == EXTCODE_LITTLE_ADD)) {
- DBGMSG("arm_lock64 called by node: %X "
- "addr: %4.4x %8.8x extcode: %2.2X data: %8.8X %8.8X ",
- nodeid, (u16) ((addr >> 32) & 0xFFFF),
- (u32) (addr & 0xFFFFFFFF),
- ext_tcode & 0xFF,
- (u32) ((be64_to_cpu(data) >> 32) & 0xFFFFFFFF),
- (u32) (be64_to_cpu(data) & 0xFFFFFFFF));
- } else {
- DBGMSG("arm_lock64 called by node: %X "
- "addr: %4.4x %8.8x extcode: %2.2X data: %8.8X %8.8X arg: "
- "%8.8X %8.8X ",
- nodeid, (u16) ((addr >> 32) & 0xFFFF),
- (u32) (addr & 0xFFFFFFFF),
- ext_tcode & 0xFF,
- (u32) ((be64_to_cpu(data) >> 32) & 0xFFFFFFFF),
- (u32) (be64_to_cpu(data) & 0xFFFFFFFF),
- (u32) ((be64_to_cpu(arg) >> 32) & 0xFFFFFFFF),
- (u32) (be64_to_cpu(arg) & 0xFFFFFFFF));
- }
- spin_lock_irqsave(&host_info_lock, irqflags);
- hi = find_host_info(host); /* search addressentry in file_info's for host */
- if (hi != NULL) {
- list_for_each_entry(fi, &hi->file_info_list, list) {
- entry = fi->addr_list.next;
- while (entry != &(fi->addr_list)) {
- arm_addr =
- list_entry(entry, struct arm_addr,
- addr_list);
- if (((arm_addr->start) <= (addr))
- && ((arm_addr->end) >=
- (addr + sizeof(*store)))) {
- found = 1;
- break;
- }
- entry = entry->next;
- }
- if (found) {
- break;
- }
- }
- }
- rcode = -1;
- if (!found) {
- printk(KERN_ERR
- "raw1394: arm_lock64 FAILED addr_entry not found"
- " -> rcode_address_error\n");
- spin_unlock_irqrestore(&host_info_lock, irqflags);
- return (RCODE_ADDRESS_ERROR);
- } else {
- DBGMSG("arm_lock64 addr_entry FOUND");
- }
- if (rcode == -1) {
- if (arm_addr->access_rights & ARM_LOCK) {
- if (!(arm_addr->client_transactions & ARM_LOCK)) {
- memcpy(&old,
- (arm_addr->addr_space_buffer) + (addr -
- (arm_addr->
- start)),
- sizeof(old));
- switch (ext_tcode) {
- case (EXTCODE_MASK_SWAP):
- new = data | (old & ~arg);
- break;
- case (EXTCODE_COMPARE_SWAP):
- if (old == arg) {
- new = data;
- } else {
- new = old;
- }
- break;
- case (EXTCODE_FETCH_ADD):
- new =
- cpu_to_be64(be64_to_cpu(data) +
- be64_to_cpu(old));
- break;
- case (EXTCODE_LITTLE_ADD):
- new =
- cpu_to_le64(le64_to_cpu(data) +
- le64_to_cpu(old));
- break;
- case (EXTCODE_BOUNDED_ADD):
- if (old != arg) {
- new =
- cpu_to_be64(be64_to_cpu
- (data) +
- be64_to_cpu
- (old));
- } else {
- new = old;
- }
- break;
- case (EXTCODE_WRAP_ADD):
- if (old != arg) {
- new =
- cpu_to_be64(be64_to_cpu
- (data) +
- be64_to_cpu
- (old));
- } else {
- new = data;
- }
- break;
- default:
- printk(KERN_ERR
- "raw1394: arm_lock64 FAILED "
- "ext_tcode not allowed -> rcode_type_error\n");
- rcode = RCODE_TYPE_ERROR; /* function not allowed */
- break;
- } /*switch */
- if (rcode == -1) {
- DBGMSG
- ("arm_lock64 -> (rcode_complete)");
- rcode = RCODE_COMPLETE;
- memcpy(store, &old, sizeof(*store));
- memcpy((arm_addr->addr_space_buffer) +
- (addr - (arm_addr->start)),
- &new, sizeof(*store));
- }
- }
- } else {
- rcode = RCODE_TYPE_ERROR; /* function not allowed */
- DBGMSG
- ("arm_lock64 -> rcode_type_error (access denied)");
- }
- }
- if (arm_addr->notification_options & ARM_LOCK) {
- byte_t *buf1, *buf2;
- DBGMSG("arm_lock64 -> entering notification-section");
- req = __alloc_pending_request(GFP_ATOMIC);
- if (!req) {
- spin_unlock_irqrestore(&host_info_lock, irqflags);
- DBGMSG("arm_lock64 -> rcode_conflict_error");
- return (RCODE_CONFLICT_ERROR); /* A resource conflict was detected.
- The request may be retried */
- }
- size = sizeof(struct arm_request) + sizeof(struct arm_response) + 3 * sizeof(*store) + sizeof(struct arm_request_response); /* maximum */
- req->data = kmalloc(size, GFP_ATOMIC);
- if (!(req->data)) {
- free_pending_request(req);
- spin_unlock_irqrestore(&host_info_lock, irqflags);
- DBGMSG("arm_lock64 -> rcode_conflict_error");
- return (RCODE_CONFLICT_ERROR); /* A resource conflict was detected.
- The request may be retried */
- }
- req->free_data = 1;
- arm_req_resp = (struct arm_request_response *)(req->data);
- arm_req = (struct arm_request *)((byte_t *) (req->data) +
- (sizeof
- (struct
- arm_request_response)));
- arm_resp =
- (struct arm_response *)((byte_t *) (arm_req) +
- (sizeof(struct arm_request)));
- buf1 = (byte_t *) arm_resp + sizeof(struct arm_response);
- buf2 = buf1 + 2 * sizeof(*store);
- if ((ext_tcode == EXTCODE_FETCH_ADD) ||
- (ext_tcode == EXTCODE_LITTLE_ADD)) {
- arm_req->buffer_length = sizeof(*store);
- memcpy(buf1, &data, sizeof(*store));
-
- } else {
- arm_req->buffer_length = 2 * sizeof(*store);
- memcpy(buf1, &arg, sizeof(*store));
- memcpy(buf1 + sizeof(*store), &data, sizeof(*store));
- }
- if (rcode == RCODE_COMPLETE) {
- arm_resp->buffer_length = sizeof(*store);
- memcpy(buf2, &old, sizeof(*store));
- } else {
- arm_resp->buffer_length = 0;
- }
- req->file_info = fi;
- req->req.type = RAW1394_REQ_ARM;
- req->req.generation = get_hpsb_generation(host);
- req->req.misc = ((((sizeof(*store)) << 16) & (0xFFFF0000)) |
- (ARM_LOCK & 0xFF));
- req->req.tag = arm_addr->arm_tag;
- req->req.recvb = arm_addr->recvb;
- req->req.length = size;
- arm_req->generation = req->req.generation;
- arm_req->extended_transaction_code = ext_tcode;
- arm_req->destination_offset = addr;
- arm_req->source_nodeid = nodeid;
- arm_req->destination_nodeid = host->node_id;
- arm_req->tlabel = (flags >> 10) & 0x3f;
- arm_req->tcode = (flags >> 4) & 0x0f;
- arm_resp->response_code = rcode;
- arm_req_resp->request = int2ptr((arm_addr->recvb) +
- sizeof(struct
- arm_request_response));
- arm_req_resp->response =
- int2ptr((arm_addr->recvb) +
- sizeof(struct arm_request_response) +
- sizeof(struct arm_request));
- arm_req->buffer =
- int2ptr((arm_addr->recvb) +
- sizeof(struct arm_request_response) +
- sizeof(struct arm_request) +
- sizeof(struct arm_response));
- arm_resp->buffer =
- int2ptr((arm_addr->recvb) +
- sizeof(struct arm_request_response) +
- sizeof(struct arm_request) +
- sizeof(struct arm_response) + 2 * sizeof(*store));
- queue_complete_req(req);
- }
- spin_unlock_irqrestore(&host_info_lock, irqflags);
- return (rcode);
-}
-
-static int arm_register(struct file_info *fi, struct pending_request *req)
-{
- int retval;
- struct arm_addr *addr;
- struct host_info *hi;
- struct file_info *fi_hlp = NULL;
- struct list_head *entry;
- struct arm_addr *arm_addr = NULL;
- int same_host, another_host;
- unsigned long flags;
-
- DBGMSG("arm_register called "
- "addr(Offset): %8.8x %8.8x length: %u "
- "rights: %2.2X notify: %2.2X "
- "max_blk_len: %4.4X",
- (u32) ((req->req.address >> 32) & 0xFFFF),
- (u32) (req->req.address & 0xFFFFFFFF),
- req->req.length, ((req->req.misc >> 8) & 0xFF),
- (req->req.misc & 0xFF), ((req->req.misc >> 16) & 0xFFFF));
- /* check addressrange */
- if ((((req->req.address) & ~(0xFFFFFFFFFFFFULL)) != 0) ||
- (((req->req.address + req->req.length) & ~(0xFFFFFFFFFFFFULL)) !=
- 0)) {
- req->req.length = 0;
- return (-EINVAL);
- }
- /* addr-list-entry for fileinfo */
- addr = kmalloc(sizeof(*addr), GFP_KERNEL);
- if (!addr) {
- req->req.length = 0;
- return (-ENOMEM);
- }
- /* allocation of addr_space_buffer */
- addr->addr_space_buffer = vmalloc(req->req.length);
- if (!(addr->addr_space_buffer)) {
- kfree(addr);
- req->req.length = 0;
- return (-ENOMEM);
- }
- /* initialization of addr_space_buffer */
- if ((req->req.sendb) == (unsigned long)NULL) {
- /* init: set 0 */
- memset(addr->addr_space_buffer, 0, req->req.length);
- } else {
- /* init: user -> kernel */
- if (copy_from_user
- (addr->addr_space_buffer, int2ptr(req->req.sendb),
- req->req.length)) {
- vfree(addr->addr_space_buffer);
- kfree(addr);
- return (-EFAULT);
- }
- }
- INIT_LIST_HEAD(&addr->addr_list);
- addr->arm_tag = req->req.tag;
- addr->start = req->req.address;
- addr->end = req->req.address + req->req.length;
- addr->access_rights = (u8) (req->req.misc & 0x0F);
- addr->notification_options = (u8) ((req->req.misc >> 4) & 0x0F);
- addr->client_transactions = (u8) ((req->req.misc >> 8) & 0x0F);
- addr->access_rights |= addr->client_transactions;
- addr->notification_options |= addr->client_transactions;
- addr->recvb = req->req.recvb;
- addr->rec_length = (u16) ((req->req.misc >> 16) & 0xFFFF);
-
- spin_lock_irqsave(&host_info_lock, flags);
- hi = find_host_info(fi->host);
- same_host = 0;
- another_host = 0;
- /* same host with address-entry containing same addressrange ? */
- list_for_each_entry(fi_hlp, &hi->file_info_list, list) {
- entry = fi_hlp->addr_list.next;
- while (entry != &(fi_hlp->addr_list)) {
- arm_addr =
- list_entry(entry, struct arm_addr, addr_list);
- if ((arm_addr->start == addr->start)
- && (arm_addr->end == addr->end)) {
- DBGMSG("same host ownes same "
- "addressrange -> EALREADY");
- same_host = 1;
- break;
- }
- entry = entry->next;
- }
- if (same_host) {
- break;
- }
- }
- if (same_host) {
- /* addressrange occupied by same host */
- spin_unlock_irqrestore(&host_info_lock, flags);
- vfree(addr->addr_space_buffer);
- kfree(addr);
- return (-EALREADY);
- }
- /* another host with valid address-entry containing same addressrange */
- list_for_each_entry(hi, &host_info_list, list) {
- if (hi->host != fi->host) {
- list_for_each_entry(fi_hlp, &hi->file_info_list, list) {
- entry = fi_hlp->addr_list.next;
- while (entry != &(fi_hlp->addr_list)) {
- arm_addr =
- list_entry(entry, struct arm_addr,
- addr_list);
- if ((arm_addr->start == addr->start)
- && (arm_addr->end == addr->end)) {
- DBGMSG
- ("another host ownes same "
- "addressrange");
- another_host = 1;
- break;
- }
- entry = entry->next;
- }
- if (another_host) {
- break;
- }
- }
- }
- }
- spin_unlock_irqrestore(&host_info_lock, flags);
-
- if (another_host) {
- DBGMSG("another hosts entry is valid -> SUCCESS");
- if (copy_to_user(int2ptr(req->req.recvb),
- &addr->start, sizeof(u64))) {
- printk(KERN_ERR "raw1394: arm_register failed "
- " address-range-entry is invalid -> EFAULT !!!\n");
- vfree(addr->addr_space_buffer);
- kfree(addr);
- return (-EFAULT);
- }
- free_pending_request(req); /* immediate success or fail */
- /* INSERT ENTRY */
- spin_lock_irqsave(&host_info_lock, flags);
- list_add_tail(&addr->addr_list, &fi->addr_list);
- spin_unlock_irqrestore(&host_info_lock, flags);
- return 0;
- }
- retval =
- hpsb_register_addrspace(&raw1394_highlevel, fi->host, &arm_ops,
- req->req.address,
- req->req.address + req->req.length);
- if (retval) {
- /* INSERT ENTRY */
- spin_lock_irqsave(&host_info_lock, flags);
- list_add_tail(&addr->addr_list, &fi->addr_list);
- spin_unlock_irqrestore(&host_info_lock, flags);
- } else {
- DBGMSG("arm_register failed errno: %d \n", retval);
- vfree(addr->addr_space_buffer);
- kfree(addr);
- return (-EALREADY);
- }
- free_pending_request(req); /* immediate success or fail */
- return 0;
-}
-
-static int arm_unregister(struct file_info *fi, struct pending_request *req)
-{
- int found = 0;
- int retval = 0;
- struct list_head *entry;
- struct arm_addr *addr = NULL;
- struct host_info *hi;
- struct file_info *fi_hlp = NULL;
- struct arm_addr *arm_addr = NULL;
- int another_host;
- unsigned long flags;
-
- DBGMSG("arm_Unregister called addr(Offset): "
- "%8.8x %8.8x",
- (u32) ((req->req.address >> 32) & 0xFFFF),
- (u32) (req->req.address & 0xFFFFFFFF));
- spin_lock_irqsave(&host_info_lock, flags);
- /* get addr */
- entry = fi->addr_list.next;
- while (entry != &(fi->addr_list)) {
- addr = list_entry(entry, struct arm_addr, addr_list);
- if (addr->start == req->req.address) {
- found = 1;
- break;
- }
- entry = entry->next;
- }
- if (!found) {
- DBGMSG("arm_Unregister addr not found");
- spin_unlock_irqrestore(&host_info_lock, flags);
- return (-EINVAL);
- }
- DBGMSG("arm_Unregister addr found");
- another_host = 0;
- /* another host with valid address-entry containing
- same addressrange */
- list_for_each_entry(hi, &host_info_list, list) {
- if (hi->host != fi->host) {
- list_for_each_entry(fi_hlp, &hi->file_info_list, list) {
- entry = fi_hlp->addr_list.next;
- while (entry != &(fi_hlp->addr_list)) {
- arm_addr = list_entry(entry,
- struct arm_addr,
- addr_list);
- if (arm_addr->start == addr->start) {
- DBGMSG("another host ownes "
- "same addressrange");
- another_host = 1;
- break;
- }
- entry = entry->next;
- }
- if (another_host) {
- break;
- }
- }
- }
- }
- if (another_host) {
- DBGMSG("delete entry from list -> success");
- list_del(&addr->addr_list);
- spin_unlock_irqrestore(&host_info_lock, flags);
- vfree(addr->addr_space_buffer);
- kfree(addr);
- free_pending_request(req); /* immediate success or fail */
- return 0;
- }
- retval =
- hpsb_unregister_addrspace(&raw1394_highlevel, fi->host,
- addr->start);
- if (!retval) {
- printk(KERN_ERR "raw1394: arm_Unregister failed -> EINVAL\n");
- spin_unlock_irqrestore(&host_info_lock, flags);
- return (-EINVAL);
- }
- DBGMSG("delete entry from list -> success");
- list_del(&addr->addr_list);
- spin_unlock_irqrestore(&host_info_lock, flags);
- vfree(addr->addr_space_buffer);
- kfree(addr);
- free_pending_request(req); /* immediate success or fail */
- return 0;
-}
-
-/* Copy data from ARM buffer(s) to user buffer. */
-static int arm_get_buf(struct file_info *fi, struct pending_request *req)
-{
- struct arm_addr *arm_addr = NULL;
- unsigned long flags;
- unsigned long offset;
-
- struct list_head *entry;
-
- DBGMSG("arm_get_buf "
- "addr(Offset): %04X %08X length: %u",
- (u32) ((req->req.address >> 32) & 0xFFFF),
- (u32) (req->req.address & 0xFFFFFFFF), (u32) req->req.length);
-
- spin_lock_irqsave(&host_info_lock, flags);
- entry = fi->addr_list.next;
- while (entry != &(fi->addr_list)) {
- arm_addr = list_entry(entry, struct arm_addr, addr_list);
- if ((arm_addr->start <= req->req.address) &&
- (arm_addr->end > req->req.address)) {
- if (req->req.address + req->req.length <= arm_addr->end) {
- offset = req->req.address - arm_addr->start;
- spin_unlock_irqrestore(&host_info_lock, flags);
-
- DBGMSG
- ("arm_get_buf copy_to_user( %08X, %p, %u )",
- (u32) req->req.recvb,
- arm_addr->addr_space_buffer + offset,
- (u32) req->req.length);
- if (copy_to_user
- (int2ptr(req->req.recvb),
- arm_addr->addr_space_buffer + offset,
- req->req.length))
- return (-EFAULT);
-
- /* We have to free the request, because we
- * queue no response, and therefore nobody
- * will free it. */
- free_pending_request(req);
- return 0;
- } else {
- DBGMSG("arm_get_buf request exceeded mapping");
- spin_unlock_irqrestore(&host_info_lock, flags);
- return (-EINVAL);
- }
- }
- entry = entry->next;
- }
- spin_unlock_irqrestore(&host_info_lock, flags);
- return (-EINVAL);
-}
-
-/* Copy data from user buffer to ARM buffer(s). */
-static int arm_set_buf(struct file_info *fi, struct pending_request *req)
-{
- struct arm_addr *arm_addr = NULL;
- unsigned long flags;
- unsigned long offset;
-
- struct list_head *entry;
-
- DBGMSG("arm_set_buf "
- "addr(Offset): %04X %08X length: %u",
- (u32) ((req->req.address >> 32) & 0xFFFF),
- (u32) (req->req.address & 0xFFFFFFFF), (u32) req->req.length);
-
- spin_lock_irqsave(&host_info_lock, flags);
- entry = fi->addr_list.next;
- while (entry != &(fi->addr_list)) {
- arm_addr = list_entry(entry, struct arm_addr, addr_list);
- if ((arm_addr->start <= req->req.address) &&
- (arm_addr->end > req->req.address)) {
- if (req->req.address + req->req.length <= arm_addr->end) {
- offset = req->req.address - arm_addr->start;
- spin_unlock_irqrestore(&host_info_lock, flags);
-
- DBGMSG
- ("arm_set_buf copy_from_user( %p, %08X, %u )",
- arm_addr->addr_space_buffer + offset,
- (u32) req->req.sendb,
- (u32) req->req.length);
- if (copy_from_user
- (arm_addr->addr_space_buffer + offset,
- int2ptr(req->req.sendb),
- req->req.length))
- return (-EFAULT);
-
- /* We have to free the request, because we
- * queue no response, and therefore nobody
- * will free it. */
- free_pending_request(req);
- return 0;
- } else {
- DBGMSG("arm_set_buf request exceeded mapping");
- spin_unlock_irqrestore(&host_info_lock, flags);
- return (-EINVAL);
- }
- }
- entry = entry->next;
- }
- spin_unlock_irqrestore(&host_info_lock, flags);
- return (-EINVAL);
-}
-
-static int reset_notification(struct file_info *fi, struct pending_request *req)
-{
- DBGMSG("reset_notification called - switch %s ",
- (req->req.misc == RAW1394_NOTIFY_OFF) ? "OFF" : "ON");
- if ((req->req.misc == RAW1394_NOTIFY_OFF) ||
- (req->req.misc == RAW1394_NOTIFY_ON)) {
- fi->notification = (u8) req->req.misc;
- free_pending_request(req); /* we have to free the request, because we queue no response, and therefore nobody will free it */
- return 0;
- }
- /* error EINVAL (22) invalid argument */
- return (-EINVAL);
-}
-
-static int write_phypacket(struct file_info *fi, struct pending_request *req)
-{
- struct hpsb_packet *packet = NULL;
- int retval = 0;
- quadlet_t data;
- unsigned long flags;
-
- data = be32_to_cpu((u32) req->req.sendb);
- DBGMSG("write_phypacket called - quadlet 0x%8.8x ", data);
- packet = hpsb_make_phypacket(fi->host, data);
- if (!packet)
- return -ENOMEM;
- req->req.length = 0;
- req->packet = packet;
- hpsb_set_packet_complete_task(packet,
- (void (*)(void *))queue_complete_cb, req);
- spin_lock_irqsave(&fi->reqlists_lock, flags);
- list_add_tail(&req->list, &fi->req_pending);
- spin_unlock_irqrestore(&fi->reqlists_lock, flags);
- packet->generation = req->req.generation;
- retval = hpsb_send_packet(packet);
- DBGMSG("write_phypacket send_packet called => retval: %d ", retval);
- if (retval < 0) {
- req->req.error = RAW1394_ERROR_SEND_ERROR;
- req->req.length = 0;
- queue_complete_req(req);
- }
- return 0;
-}
-
-static int get_config_rom(struct file_info *fi, struct pending_request *req)
-{
- int ret = 0;
- quadlet_t *data = kmalloc(req->req.length, GFP_KERNEL);
- int status;
-
- if (!data)
- return -ENOMEM;
-
- status =
- csr1212_read(fi->host->csr.rom, CSR1212_CONFIG_ROM_SPACE_OFFSET,
- data, req->req.length);
- if (copy_to_user(int2ptr(req->req.recvb), data, req->req.length))
- ret = -EFAULT;
- if (copy_to_user
- (int2ptr(req->req.tag), &fi->host->csr.rom->cache_head->len,
- sizeof(fi->host->csr.rom->cache_head->len)))
- ret = -EFAULT;
- if (copy_to_user(int2ptr(req->req.address), &fi->host->csr.generation,
- sizeof(fi->host->csr.generation)))
- ret = -EFAULT;
- if (copy_to_user(int2ptr(req->req.sendb), &status, sizeof(status)))
- ret = -EFAULT;
- kfree(data);
- if (ret >= 0) {
- free_pending_request(req); /* we have to free the request, because we queue no response, and therefore nobody will free it */
- }
- return ret;
-}
-
-static int update_config_rom(struct file_info *fi, struct pending_request *req)
-{
- int ret = 0;
- quadlet_t *data = kmalloc(req->req.length, GFP_KERNEL);
- if (!data)
- return -ENOMEM;
- if (copy_from_user(data, int2ptr(req->req.sendb), req->req.length)) {
- ret = -EFAULT;
- } else {
- int status = hpsb_update_config_rom(fi->host,
- data, req->req.length,
- (unsigned char)req->req.
- misc);
- if (copy_to_user
- (int2ptr(req->req.recvb), &status, sizeof(status)))
- ret = -ENOMEM;
- }
- kfree(data);
- if (ret >= 0) {
- free_pending_request(req); /* we have to free the request, because we queue no response, and therefore nobody will free it */
- fi->cfgrom_upd = 1;
- }
- return ret;
-}
-
-static int modify_config_rom(struct file_info *fi, struct pending_request *req)
-{
- struct csr1212_keyval *kv;
- struct csr1212_csr_rom_cache *cache;
- struct csr1212_dentry *dentry;
- u32 dr;
- int ret = 0;
-
- if (req->req.misc == ~0) {
- if (req->req.length == 0)
- return -EINVAL;
-
- /* Find an unused slot */
- for (dr = 0;
- dr < RAW1394_MAX_USER_CSR_DIRS && fi->csr1212_dirs[dr];
- dr++) ;
-
- if (dr == RAW1394_MAX_USER_CSR_DIRS)
- return -ENOMEM;
-
- fi->csr1212_dirs[dr] =
- csr1212_new_directory(CSR1212_KV_ID_VENDOR);
- if (!fi->csr1212_dirs[dr])
- return -ENOMEM;
- } else {
- dr = req->req.misc;
- if (!fi->csr1212_dirs[dr])
- return -EINVAL;
-
- /* Delete old stuff */
- for (dentry =
- fi->csr1212_dirs[dr]->value.directory.dentries_head;
- dentry; dentry = dentry->next) {
- csr1212_detach_keyval_from_directory(fi->host->csr.rom->
- root_kv,
- dentry->kv);
- }
-
- if (req->req.length == 0) {
- csr1212_release_keyval(fi->csr1212_dirs[dr]);
- fi->csr1212_dirs[dr] = NULL;
-
- hpsb_update_config_rom_image(fi->host);
- free_pending_request(req);
- return 0;
- }
- }
-
- cache = csr1212_rom_cache_malloc(0, req->req.length);
- if (!cache) {
- csr1212_release_keyval(fi->csr1212_dirs[dr]);
- fi->csr1212_dirs[dr] = NULL;
- return -ENOMEM;
- }
-
- cache->filled_head = kmalloc(sizeof(*cache->filled_head), GFP_KERNEL);
- if (!cache->filled_head) {
- csr1212_release_keyval(fi->csr1212_dirs[dr]);
- fi->csr1212_dirs[dr] = NULL;
- CSR1212_FREE(cache);
- return -ENOMEM;
- }
- cache->filled_tail = cache->filled_head;
-
- if (copy_from_user(cache->data, int2ptr(req->req.sendb),
- req->req.length)) {
- csr1212_release_keyval(fi->csr1212_dirs[dr]);
- fi->csr1212_dirs[dr] = NULL;
- ret = -EFAULT;
- } else {
- cache->len = req->req.length;
- cache->filled_head->offset_start = 0;
- cache->filled_head->offset_end = cache->size - 1;
-
- cache->layout_head = cache->layout_tail = fi->csr1212_dirs[dr];
-
- ret = CSR1212_SUCCESS;
- /* parse all the items */
- for (kv = cache->layout_head; ret == CSR1212_SUCCESS && kv;
- kv = kv->next) {
- ret = csr1212_parse_keyval(kv, cache);
- }
-
- /* attach top level items to the root directory */
- for (dentry =
- fi->csr1212_dirs[dr]->value.directory.dentries_head;
- ret == CSR1212_SUCCESS && dentry; dentry = dentry->next) {
- ret =
- csr1212_attach_keyval_to_directory(fi->host->csr.
- rom->root_kv,
- dentry->kv);
- }
-
- if (ret == CSR1212_SUCCESS) {
- ret = hpsb_update_config_rom_image(fi->host);
-
- if (ret >= 0 && copy_to_user(int2ptr(req->req.recvb),
- &dr, sizeof(dr))) {
- ret = -ENOMEM;
- }
- }
- }
- kfree(cache->filled_head);
- CSR1212_FREE(cache);
-
- if (ret >= 0) {
- /* we have to free the request, because we queue no response,
- * and therefore nobody will free it */
- free_pending_request(req);
- return 0;
- } else {
- for (dentry =
- fi->csr1212_dirs[dr]->value.directory.dentries_head;
- dentry; dentry = dentry->next) {
- csr1212_detach_keyval_from_directory(fi->host->csr.rom->
- root_kv,
- dentry->kv);
- }
- csr1212_release_keyval(fi->csr1212_dirs[dr]);
- fi->csr1212_dirs[dr] = NULL;
- return ret;
- }
-}
-
-static int state_connected(struct file_info *fi, struct pending_request *req)
-{
- int node = req->req.address >> 48;
-
- req->req.error = RAW1394_ERROR_NONE;
-
- switch (req->req.type) {
-
- case RAW1394_REQ_ECHO:
- queue_complete_req(req);
- return 0;
-
- case RAW1394_REQ_ARM_REGISTER:
- return arm_register(fi, req);
-
- case RAW1394_REQ_ARM_UNREGISTER:
- return arm_unregister(fi, req);
-
- case RAW1394_REQ_ARM_SET_BUF:
- return arm_set_buf(fi, req);
-
- case RAW1394_REQ_ARM_GET_BUF:
- return arm_get_buf(fi, req);
-
- case RAW1394_REQ_RESET_NOTIFY:
- return reset_notification(fi, req);
-
- case RAW1394_REQ_ISO_SEND:
- case RAW1394_REQ_ISO_LISTEN:
- printk(KERN_DEBUG "raw1394: old iso ABI has been removed\n");
- req->req.error = RAW1394_ERROR_COMPAT;
- req->req.misc = RAW1394_KERNELAPI_VERSION;
- queue_complete_req(req);
- return 0;
-
- case RAW1394_REQ_FCP_LISTEN:
- handle_fcp_listen(fi, req);
- return 0;
-
- case RAW1394_REQ_RESET_BUS:
- if (req->req.misc == RAW1394_LONG_RESET) {
- DBGMSG("busreset called (type: LONG)");
- hpsb_reset_bus(fi->host, LONG_RESET);
- free_pending_request(req); /* we have to free the request, because we queue no response, and therefore nobody will free it */
- return 0;
- }
- if (req->req.misc == RAW1394_SHORT_RESET) {
- DBGMSG("busreset called (type: SHORT)");
- hpsb_reset_bus(fi->host, SHORT_RESET);
- free_pending_request(req); /* we have to free the request, because we queue no response, and therefore nobody will free it */
- return 0;
- }
- /* error EINVAL (22) invalid argument */
- return (-EINVAL);
- case RAW1394_REQ_GET_ROM:
- return get_config_rom(fi, req);
-
- case RAW1394_REQ_UPDATE_ROM:
- return update_config_rom(fi, req);
-
- case RAW1394_REQ_MODIFY_ROM:
- return modify_config_rom(fi, req);
- }
-
- if (req->req.generation != get_hpsb_generation(fi->host)) {
- req->req.error = RAW1394_ERROR_GENERATION;
- req->req.generation = get_hpsb_generation(fi->host);
- req->req.length = 0;
- queue_complete_req(req);
- return 0;
- }
-
- switch (req->req.type) {
- case RAW1394_REQ_PHYPACKET:
- return write_phypacket(fi, req);
- case RAW1394_REQ_ASYNC_SEND:
- return handle_async_send(fi, req);
- }
-
- if (req->req.length == 0) {
- req->req.error = RAW1394_ERROR_INVALID_ARG;
- queue_complete_req(req);
- return 0;
- }
-
- return handle_async_request(fi, req, node);
-}
-
-static ssize_t raw1394_write(struct file *file, const char __user * buffer,
- size_t count, loff_t * offset_is_ignored)
-{
- struct file_info *fi = file->private_data;
- struct pending_request *req;
- ssize_t retval = -EBADFD;
-
-#ifdef CONFIG_COMPAT
- if (count == sizeof(struct compat_raw1394_req) &&
- sizeof(struct compat_raw1394_req) !=
- sizeof(struct raw1394_request)) {
- buffer = raw1394_compat_write(buffer);
- if (IS_ERR((__force void *)buffer))
- return PTR_ERR((__force void *)buffer);
- } else
-#endif
- if (count != sizeof(struct raw1394_request)) {
- return -EINVAL;
- }
-
- req = alloc_pending_request();
- if (req == NULL) {
- return -ENOMEM;
- }
- req->file_info = fi;
-
- if (copy_from_user(&req->req, buffer, sizeof(struct raw1394_request))) {
- free_pending_request(req);
- return -EFAULT;
- }
-
- if (!mutex_trylock(&fi->state_mutex)) {
- free_pending_request(req);
- return -EAGAIN;
- }
-
- switch (fi->state) {
- case opened:
- retval = state_opened(fi, req);
- break;
-
- case initialized:
- retval = state_initialized(fi, req);
- break;
-
- case connected:
- retval = state_connected(fi, req);
- break;
- }
-
- mutex_unlock(&fi->state_mutex);
-
- if (retval < 0) {
- free_pending_request(req);
- } else {
- BUG_ON(retval);
- retval = count;
- }
-
- return retval;
-}
-
-/* rawiso operations */
-
-/* check if any RAW1394_REQ_RAWISO_ACTIVITY event is already in the
- * completion queue (reqlists_lock must be taken) */
-static inline int __rawiso_event_in_queue(struct file_info *fi)
-{
- struct pending_request *req;
-
- list_for_each_entry(req, &fi->req_complete, list)
- if (req->req.type == RAW1394_REQ_RAWISO_ACTIVITY)
- return 1;
-
- return 0;
-}
-
-/* put a RAWISO_ACTIVITY event in the queue, if one isn't there already */
-static void queue_rawiso_event(struct file_info *fi)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&fi->reqlists_lock, flags);
-
- /* only one ISO activity event may be in the queue */
- if (!__rawiso_event_in_queue(fi)) {
- struct pending_request *req =
- __alloc_pending_request(GFP_ATOMIC);
-
- if (req) {
- req->file_info = fi;
- req->req.type = RAW1394_REQ_RAWISO_ACTIVITY;
- req->req.generation = get_hpsb_generation(fi->host);
- __queue_complete_req(req);
- } else {
- /* on allocation failure, signal an overflow */
- if (fi->iso_handle) {
- atomic_inc(&fi->iso_handle->overflows);
- }
- }
- }
- spin_unlock_irqrestore(&fi->reqlists_lock, flags);
-}
-
-static void rawiso_activity_cb(struct hpsb_iso *iso)
-{
- unsigned long flags;
- struct host_info *hi;
- struct file_info *fi;
-
- spin_lock_irqsave(&host_info_lock, flags);
- hi = find_host_info(iso->host);
-
- if (hi != NULL) {
- list_for_each_entry(fi, &hi->file_info_list, list) {
- if (fi->iso_handle == iso)
- queue_rawiso_event(fi);
- }
- }
-
- spin_unlock_irqrestore(&host_info_lock, flags);
-}
-
-/* helper function - gather all the kernel iso status bits for returning to user-space */
-static void raw1394_iso_fill_status(struct hpsb_iso *iso,
- struct raw1394_iso_status *stat)
-{
- int overflows = atomic_read(&iso->overflows);
- int skips = atomic_read(&iso->skips);
-
- stat->config.data_buf_size = iso->buf_size;
- stat->config.buf_packets = iso->buf_packets;
- stat->config.channel = iso->channel;
- stat->config.speed = iso->speed;
- stat->config.irq_interval = iso->irq_interval;
- stat->n_packets = hpsb_iso_n_ready(iso);
- stat->overflows = ((skips & 0xFFFF) << 16) | ((overflows & 0xFFFF));
- stat->xmit_cycle = iso->xmit_cycle;
-}
-
-static int raw1394_iso_xmit_init(struct file_info *fi, void __user * uaddr)
-{
- struct raw1394_iso_status stat;
-
- if (!fi->host)
- return -EINVAL;
-
- if (copy_from_user(&stat, uaddr, sizeof(stat)))
- return -EFAULT;
-
- fi->iso_handle = hpsb_iso_xmit_init(fi->host,
- stat.config.data_buf_size,
- stat.config.buf_packets,
- stat.config.channel,
- stat.config.speed,
- stat.config.irq_interval,
- rawiso_activity_cb);
- if (!fi->iso_handle)
- return -ENOMEM;
-
- fi->iso_state = RAW1394_ISO_XMIT;
-
- raw1394_iso_fill_status(fi->iso_handle, &stat);
- if (copy_to_user(uaddr, &stat, sizeof(stat)))
- return -EFAULT;
-
- /* queue an event to get things started */
- rawiso_activity_cb(fi->iso_handle);
-
- return 0;
-}
-
-static int raw1394_iso_recv_init(struct file_info *fi, void __user * uaddr)
-{
- struct raw1394_iso_status stat;
-
- if (!fi->host)
- return -EINVAL;
-
- if (copy_from_user(&stat, uaddr, sizeof(stat)))
- return -EFAULT;
-
- fi->iso_handle = hpsb_iso_recv_init(fi->host,
- stat.config.data_buf_size,
- stat.config.buf_packets,
- stat.config.channel,
- stat.config.dma_mode,
- stat.config.irq_interval,
- rawiso_activity_cb);
- if (!fi->iso_handle)
- return -ENOMEM;
-
- fi->iso_state = RAW1394_ISO_RECV;
-
- raw1394_iso_fill_status(fi->iso_handle, &stat);
- if (copy_to_user(uaddr, &stat, sizeof(stat)))
- return -EFAULT;
- return 0;
-}
-
-static int raw1394_iso_get_status(struct file_info *fi, void __user * uaddr)
-{
- struct raw1394_iso_status stat;
- struct hpsb_iso *iso = fi->iso_handle;
-
- raw1394_iso_fill_status(fi->iso_handle, &stat);
- if (copy_to_user(uaddr, &stat, sizeof(stat)))
- return -EFAULT;
-
- /* reset overflow counter */
- atomic_set(&iso->overflows, 0);
- /* reset skip counter */
- atomic_set(&iso->skips, 0);
-
- return 0;
-}
-
-/* copy N packet_infos out of the ringbuffer into user-supplied array */
-static int raw1394_iso_recv_packets(struct file_info *fi, void __user * uaddr)
-{
- struct raw1394_iso_packets upackets;
- unsigned int packet = fi->iso_handle->first_packet;
- int i;
-
- if (copy_from_user(&upackets, uaddr, sizeof(upackets)))
- return -EFAULT;
-
- if (upackets.n_packets > hpsb_iso_n_ready(fi->iso_handle))
- return -EINVAL;
-
- /* ensure user-supplied buffer is accessible and big enough */
- if (!access_ok(VERIFY_WRITE, upackets.infos,
- upackets.n_packets *
- sizeof(struct raw1394_iso_packet_info)))
- return -EFAULT;
-
- /* copy the packet_infos out */
- for (i = 0; i < upackets.n_packets; i++) {
- if (__copy_to_user(&upackets.infos[i],
- &fi->iso_handle->infos[packet],
- sizeof(struct raw1394_iso_packet_info)))
- return -EFAULT;
-
- packet = (packet + 1) % fi->iso_handle->buf_packets;
- }
-
- return 0;
-}
-
-/* copy N packet_infos from user to ringbuffer, and queue them for transmission */
-static int raw1394_iso_send_packets(struct file_info *fi, void __user * uaddr)
-{
- struct raw1394_iso_packets upackets;
- int i, rv;
-
- if (copy_from_user(&upackets, uaddr, sizeof(upackets)))
- return -EFAULT;
-
- if (upackets.n_packets >= fi->iso_handle->buf_packets)
- return -EINVAL;
-
- if (upackets.n_packets >= hpsb_iso_n_ready(fi->iso_handle))
- return -EAGAIN;
-
- /* ensure user-supplied buffer is accessible and big enough */
- if (!access_ok(VERIFY_READ, upackets.infos,
- upackets.n_packets *
- sizeof(struct raw1394_iso_packet_info)))
- return -EFAULT;
-
- /* copy the infos structs in and queue the packets */
- for (i = 0; i < upackets.n_packets; i++) {
- struct raw1394_iso_packet_info info;
-
- if (__copy_from_user(&info, &upackets.infos[i],
- sizeof(struct raw1394_iso_packet_info)))
- return -EFAULT;
-
- rv = hpsb_iso_xmit_queue_packet(fi->iso_handle, info.offset,
- info.len, info.tag, info.sy);
- if (rv)
- return rv;
- }
-
- return 0;
-}
-
-static void raw1394_iso_shutdown(struct file_info *fi)
-{
- if (fi->iso_handle)
- hpsb_iso_shutdown(fi->iso_handle);
-
- fi->iso_handle = NULL;
- fi->iso_state = RAW1394_ISO_INACTIVE;
-}
-
-static int raw1394_read_cycle_timer(struct file_info *fi, void __user * uaddr)
-{
- struct raw1394_cycle_timer ct;
- int err;
-
- err = hpsb_read_cycle_timer(fi->host, &ct.cycle_timer, &ct.local_time);
- if (!err)
- if (copy_to_user(uaddr, &ct, sizeof(ct)))
- err = -EFAULT;
- return err;
-}
-
-/* mmap the rawiso xmit/recv buffer */
-static int raw1394_mmap(struct file *file, struct vm_area_struct *vma)
-{
- struct file_info *fi = file->private_data;
- int ret;
-
- if (!mutex_trylock(&fi->state_mutex))
- return -EAGAIN;
-
- if (fi->iso_state == RAW1394_ISO_INACTIVE)
- ret = -EINVAL;
- else
- ret = dma_region_mmap(&fi->iso_handle->data_buf, file, vma);
-
- mutex_unlock(&fi->state_mutex);
-
- return ret;
-}
-
-static long raw1394_ioctl_inactive(struct file_info *fi, unsigned int cmd,
- void __user *argp)
-{
- switch (cmd) {
- case RAW1394_IOC_ISO_XMIT_INIT:
- return raw1394_iso_xmit_init(fi, argp);
- case RAW1394_IOC_ISO_RECV_INIT:
- return raw1394_iso_recv_init(fi, argp);
- default:
- return -EINVAL;
- }
-}
-
-static long raw1394_ioctl_recv(struct file_info *fi, unsigned int cmd,
- unsigned long arg)
-{
- void __user *argp = (void __user *)arg;
-
- switch (cmd) {
- case RAW1394_IOC_ISO_RECV_START:{
- int args[3];
-
- if (copy_from_user(&args[0], argp, sizeof(args)))
- return -EFAULT;
- return hpsb_iso_recv_start(fi->iso_handle,
- args[0], args[1], args[2]);
- }
- case RAW1394_IOC_ISO_XMIT_RECV_STOP:
- hpsb_iso_stop(fi->iso_handle);
- return 0;
- case RAW1394_IOC_ISO_RECV_LISTEN_CHANNEL:
- return hpsb_iso_recv_listen_channel(fi->iso_handle, arg);
- case RAW1394_IOC_ISO_RECV_UNLISTEN_CHANNEL:
- return hpsb_iso_recv_unlisten_channel(fi->iso_handle, arg);
- case RAW1394_IOC_ISO_RECV_SET_CHANNEL_MASK:{
- u64 mask;
-
- if (copy_from_user(&mask, argp, sizeof(mask)))
- return -EFAULT;
- return hpsb_iso_recv_set_channel_mask(fi->iso_handle,
- mask);
- }
- case RAW1394_IOC_ISO_GET_STATUS:
- return raw1394_iso_get_status(fi, argp);
- case RAW1394_IOC_ISO_RECV_PACKETS:
- return raw1394_iso_recv_packets(fi, argp);
- case RAW1394_IOC_ISO_RECV_RELEASE_PACKETS:
- return hpsb_iso_recv_release_packets(fi->iso_handle, arg);
- case RAW1394_IOC_ISO_RECV_FLUSH:
- return hpsb_iso_recv_flush(fi->iso_handle);
- case RAW1394_IOC_ISO_SHUTDOWN:
- raw1394_iso_shutdown(fi);
- return 0;
- case RAW1394_IOC_ISO_QUEUE_ACTIVITY:
- queue_rawiso_event(fi);
- return 0;
- default:
- return -EINVAL;
- }
-}
-
-static long raw1394_ioctl_xmit(struct file_info *fi, unsigned int cmd,
- void __user *argp)
-{
- switch (cmd) {
- case RAW1394_IOC_ISO_XMIT_START:{
- int args[2];
-
- if (copy_from_user(&args[0], argp, sizeof(args)))
- return -EFAULT;
- return hpsb_iso_xmit_start(fi->iso_handle,
- args[0], args[1]);
- }
- case RAW1394_IOC_ISO_XMIT_SYNC:
- return hpsb_iso_xmit_sync(fi->iso_handle);
- case RAW1394_IOC_ISO_XMIT_RECV_STOP:
- hpsb_iso_stop(fi->iso_handle);
- return 0;
- case RAW1394_IOC_ISO_GET_STATUS:
- return raw1394_iso_get_status(fi, argp);
- case RAW1394_IOC_ISO_XMIT_PACKETS:
- return raw1394_iso_send_packets(fi, argp);
- case RAW1394_IOC_ISO_SHUTDOWN:
- raw1394_iso_shutdown(fi);
- return 0;
- case RAW1394_IOC_ISO_QUEUE_ACTIVITY:
- queue_rawiso_event(fi);
- return 0;
- default:
- return -EINVAL;
- }
-}
-
-/* ioctl is only used for rawiso operations */
-static long raw1394_ioctl(struct file *file, unsigned int cmd,
- unsigned long arg)
-{
- struct file_info *fi = file->private_data;
- void __user *argp = (void __user *)arg;
- long ret;
-
- /* state-independent commands */
- switch(cmd) {
- case RAW1394_IOC_GET_CYCLE_TIMER:
- return raw1394_read_cycle_timer(fi, argp);
- default:
- break;
- }
-
- if (!mutex_trylock(&fi->state_mutex))
- return -EAGAIN;
-
- switch (fi->iso_state) {
- case RAW1394_ISO_INACTIVE:
- ret = raw1394_ioctl_inactive(fi, cmd, argp);
- break;
- case RAW1394_ISO_RECV:
- ret = raw1394_ioctl_recv(fi, cmd, arg);
- break;
- case RAW1394_ISO_XMIT:
- ret = raw1394_ioctl_xmit(fi, cmd, argp);
- break;
- default:
- ret = -EINVAL;
- break;
- }
-
- mutex_unlock(&fi->state_mutex);
-
- return ret;
-}
-
-#ifdef CONFIG_COMPAT
-struct raw1394_iso_packets32 {
- __u32 n_packets;
- compat_uptr_t infos;
-} __attribute__((packed));
-
-struct raw1394_cycle_timer32 {
- __u32 cycle_timer;
- __u64 local_time;
-}
-#if defined(CONFIG_X86_64) || defined(CONFIG_IA64)
-__attribute__((packed))
-#endif
-;
-
-#define RAW1394_IOC_ISO_RECV_PACKETS32 \
- _IOW ('#', 0x25, struct raw1394_iso_packets32)
-#define RAW1394_IOC_ISO_XMIT_PACKETS32 \
- _IOW ('#', 0x27, struct raw1394_iso_packets32)
-#define RAW1394_IOC_GET_CYCLE_TIMER32 \
- _IOR ('#', 0x30, struct raw1394_cycle_timer32)
-
-static long raw1394_iso_xmit_recv_packets32(struct file *file, unsigned int cmd,
- struct raw1394_iso_packets32 __user *arg)
-{
- compat_uptr_t infos32;
- void __user *infos;
- long err = -EFAULT;
- struct raw1394_iso_packets __user *dst = compat_alloc_user_space(sizeof(struct raw1394_iso_packets));
-
- if (!copy_in_user(&dst->n_packets, &arg->n_packets, sizeof arg->n_packets) &&
- !copy_from_user(&infos32, &arg->infos, sizeof infos32)) {
- infos = compat_ptr(infos32);
- if (!copy_to_user(&dst->infos, &infos, sizeof infos))
- err = raw1394_ioctl(file, cmd, (unsigned long)dst);
- }
- return err;
-}
-
-static long raw1394_read_cycle_timer32(struct file_info *fi, void __user * uaddr)
-{
- struct raw1394_cycle_timer32 ct;
- int err;
-
- err = hpsb_read_cycle_timer(fi->host, &ct.cycle_timer, &ct.local_time);
- if (!err)
- if (copy_to_user(uaddr, &ct, sizeof(ct)))
- err = -EFAULT;
- return err;
-}
-
-static long raw1394_compat_ioctl(struct file *file,
- unsigned int cmd, unsigned long arg)
-{
- struct file_info *fi = file->private_data;
- void __user *argp = (void __user *)arg;
- long err;
-
- switch (cmd) {
- /* These requests have same format as long as 'int' has same size. */
- case RAW1394_IOC_ISO_RECV_INIT:
- case RAW1394_IOC_ISO_RECV_START:
- case RAW1394_IOC_ISO_RECV_LISTEN_CHANNEL:
- case RAW1394_IOC_ISO_RECV_UNLISTEN_CHANNEL:
- case RAW1394_IOC_ISO_RECV_SET_CHANNEL_MASK:
- case RAW1394_IOC_ISO_RECV_RELEASE_PACKETS:
- case RAW1394_IOC_ISO_RECV_FLUSH:
- case RAW1394_IOC_ISO_XMIT_RECV_STOP:
- case RAW1394_IOC_ISO_XMIT_INIT:
- case RAW1394_IOC_ISO_XMIT_START:
- case RAW1394_IOC_ISO_XMIT_SYNC:
- case RAW1394_IOC_ISO_GET_STATUS:
- case RAW1394_IOC_ISO_SHUTDOWN:
- case RAW1394_IOC_ISO_QUEUE_ACTIVITY:
- err = raw1394_ioctl(file, cmd, arg);
- break;
- /* These request have different format. */
- case RAW1394_IOC_ISO_RECV_PACKETS32:
- err = raw1394_iso_xmit_recv_packets32(file, RAW1394_IOC_ISO_RECV_PACKETS, argp);
- break;
- case RAW1394_IOC_ISO_XMIT_PACKETS32:
- err = raw1394_iso_xmit_recv_packets32(file, RAW1394_IOC_ISO_XMIT_PACKETS, argp);
- break;
- case RAW1394_IOC_GET_CYCLE_TIMER32:
- err = raw1394_read_cycle_timer32(fi, argp);
- break;
- default:
- err = -EINVAL;
- break;
- }
-
- return err;
-}
-#endif
-
-static unsigned int raw1394_poll(struct file *file, poll_table * pt)
-{
- struct file_info *fi = file->private_data;
- unsigned int mask = POLLOUT | POLLWRNORM;
- unsigned long flags;
-
- poll_wait(file, &fi->wait_complete, pt);
-
- spin_lock_irqsave(&fi->reqlists_lock, flags);
- if (!list_empty(&fi->req_complete)) {
- mask |= POLLIN | POLLRDNORM;
- }
- spin_unlock_irqrestore(&fi->reqlists_lock, flags);
-
- return mask;
-}
-
-static int raw1394_open(struct inode *inode, struct file *file)
-{
- struct file_info *fi;
-
- fi = kzalloc(sizeof(*fi), GFP_KERNEL);
- if (!fi)
- return -ENOMEM;
-
- fi->notification = (u8) RAW1394_NOTIFY_ON; /* busreset notification */
-
- INIT_LIST_HEAD(&fi->list);
- mutex_init(&fi->state_mutex);
- fi->state = opened;
- INIT_LIST_HEAD(&fi->req_pending);
- INIT_LIST_HEAD(&fi->req_complete);
- spin_lock_init(&fi->reqlists_lock);
- init_waitqueue_head(&fi->wait_complete);
- INIT_LIST_HEAD(&fi->addr_list);
-
- file->private_data = fi;
-
- return nonseekable_open(inode, file);
-}
-
-static int raw1394_release(struct inode *inode, struct file *file)
-{
- struct file_info *fi = file->private_data;
- struct list_head *lh;
- struct pending_request *req;
- int i, fail;
- int retval = 0;
- struct list_head *entry;
- struct arm_addr *addr = NULL;
- struct host_info *hi;
- struct file_info *fi_hlp = NULL;
- struct arm_addr *arm_addr = NULL;
- int another_host;
- int csr_mod = 0;
- unsigned long flags;
-
- if (fi->iso_state != RAW1394_ISO_INACTIVE)
- raw1394_iso_shutdown(fi);
-
- spin_lock_irqsave(&host_info_lock, flags);
-
- fail = 0;
- /* set address-entries invalid */
-
- while (!list_empty(&fi->addr_list)) {
- another_host = 0;
- lh = fi->addr_list.next;
- addr = list_entry(lh, struct arm_addr, addr_list);
- /* another host with valid address-entry containing
- same addressrange? */
- list_for_each_entry(hi, &host_info_list, list) {
- if (hi->host != fi->host) {
- list_for_each_entry(fi_hlp, &hi->file_info_list,
- list) {
- entry = fi_hlp->addr_list.next;
- while (entry != &(fi_hlp->addr_list)) {
- arm_addr = list_entry(entry, struct
- arm_addr,
- addr_list);
- if (arm_addr->start ==
- addr->start) {
- DBGMSG
- ("raw1394_release: "
- "another host ownes "
- "same addressrange");
- another_host = 1;
- break;
- }
- entry = entry->next;
- }
- if (another_host) {
- break;
- }
- }
- }
- }
- if (!another_host) {
- DBGMSG("raw1394_release: call hpsb_arm_unregister");
- retval =
- hpsb_unregister_addrspace(&raw1394_highlevel,
- fi->host, addr->start);
- if (!retval) {
- ++fail;
- printk(KERN_ERR
- "raw1394_release arm_Unregister failed\n");
- }
- }
- DBGMSG("raw1394_release: delete addr_entry from list");
- list_del(&addr->addr_list);
- vfree(addr->addr_space_buffer);
- kfree(addr);
- } /* while */
- spin_unlock_irqrestore(&host_info_lock, flags);
- if (fail > 0) {
- printk(KERN_ERR "raw1394: during addr_list-release "
- "error(s) occurred \n");
- }
-
- for (;;) {
- /* This locked section guarantees that neither
- * complete nor pending requests exist once i!=0 */
- spin_lock_irqsave(&fi->reqlists_lock, flags);
- while ((req = __next_complete_req(fi)))
- free_pending_request(req);
-
- i = list_empty(&fi->req_pending);
- spin_unlock_irqrestore(&fi->reqlists_lock, flags);
-
- if (i)
- break;
- /*
- * Sleep until more requests can be freed.
- *
- * NB: We call the macro wait_event() with a condition argument
- * with side effect. This is only possible because the side
- * effect does not occur until the condition became true, and
- * wait_event() won't evaluate the condition again after that.
- */
- wait_event(fi->wait_complete, (req = next_complete_req(fi)));
- free_pending_request(req);
- }
-
- /* Remove any sub-trees left by user space programs */
- for (i = 0; i < RAW1394_MAX_USER_CSR_DIRS; i++) {
- struct csr1212_dentry *dentry;
- if (!fi->csr1212_dirs[i])
- continue;
- for (dentry =
- fi->csr1212_dirs[i]->value.directory.dentries_head; dentry;
- dentry = dentry->next) {
- csr1212_detach_keyval_from_directory(fi->host->csr.rom->
- root_kv,
- dentry->kv);
- }
- csr1212_release_keyval(fi->csr1212_dirs[i]);
- fi->csr1212_dirs[i] = NULL;
- csr_mod = 1;
- }
-
- if ((csr_mod || fi->cfgrom_upd)
- && hpsb_update_config_rom_image(fi->host) < 0)
- HPSB_ERR
- ("Failed to generate Configuration ROM image for host %d",
- fi->host->id);
-
- if (fi->state == connected) {
- spin_lock_irqsave(&host_info_lock, flags);
- list_del(&fi->list);
- spin_unlock_irqrestore(&host_info_lock, flags);
-
- put_device(&fi->host->device);
- }
-
- spin_lock_irqsave(&host_info_lock, flags);
- if (fi->host)
- module_put(fi->host->driver->owner);
- spin_unlock_irqrestore(&host_info_lock, flags);
-
- kfree(fi);
-
- return 0;
-}
-
-/*** HOTPLUG STUFF **********************************************************/
-/*
- * Export information about protocols/devices supported by this driver.
- */
-#ifdef MODULE
-static const struct ieee1394_device_id raw1394_id_table[] = {
- {
- .match_flags = IEEE1394_MATCH_SPECIFIER_ID | IEEE1394_MATCH_VERSION,
- .specifier_id = AVC_UNIT_SPEC_ID_ENTRY & 0xffffff,
- .version = AVC_SW_VERSION_ENTRY & 0xffffff},
- {
- .match_flags = IEEE1394_MATCH_SPECIFIER_ID | IEEE1394_MATCH_VERSION,
- .specifier_id = CAMERA_UNIT_SPEC_ID_ENTRY & 0xffffff,
- .version = CAMERA_SW_VERSION_ENTRY & 0xffffff},
- {
- .match_flags = IEEE1394_MATCH_SPECIFIER_ID | IEEE1394_MATCH_VERSION,
- .specifier_id = CAMERA_UNIT_SPEC_ID_ENTRY & 0xffffff,
- .version = (CAMERA_SW_VERSION_ENTRY + 1) & 0xffffff},
- {
- .match_flags = IEEE1394_MATCH_SPECIFIER_ID | IEEE1394_MATCH_VERSION,
- .specifier_id = CAMERA_UNIT_SPEC_ID_ENTRY & 0xffffff,
- .version = (CAMERA_SW_VERSION_ENTRY + 2) & 0xffffff},
- {}
-};
-
-MODULE_DEVICE_TABLE(ieee1394, raw1394_id_table);
-#endif /* MODULE */
-
-static struct hpsb_protocol_driver raw1394_driver = {
- .name = "raw1394",
-};
-
-/******************************************************************************/
-
-static struct hpsb_highlevel raw1394_highlevel = {
- .name = RAW1394_DEVICE_NAME,
- .add_host = add_host,
- .remove_host = remove_host,
- .host_reset = host_reset,
- .fcp_request = fcp_request,
-};
-
-static struct cdev raw1394_cdev;
-static const struct file_operations raw1394_fops = {
- .owner = THIS_MODULE,
- .read = raw1394_read,
- .write = raw1394_write,
- .mmap = raw1394_mmap,
- .unlocked_ioctl = raw1394_ioctl,
-#ifdef CONFIG_COMPAT
- .compat_ioctl = raw1394_compat_ioctl,
-#endif
- .poll = raw1394_poll,
- .open = raw1394_open,
- .release = raw1394_release,
- .llseek = no_llseek,
-};
-
-static int __init init_raw1394(void)
-{
- int ret = 0;
-
- hpsb_register_highlevel(&raw1394_highlevel);
-
- if (IS_ERR
- (device_create(hpsb_protocol_class, NULL,
- MKDEV(IEEE1394_MAJOR,
- IEEE1394_MINOR_BLOCK_RAW1394 * 16),
- NULL, RAW1394_DEVICE_NAME))) {
- ret = -EFAULT;
- goto out_unreg;
- }
-
- cdev_init(&raw1394_cdev, &raw1394_fops);
- raw1394_cdev.owner = THIS_MODULE;
- ret = cdev_add(&raw1394_cdev, IEEE1394_RAW1394_DEV, 1);
- if (ret) {
- HPSB_ERR("raw1394 failed to register minor device block");
- goto out_dev;
- }
-
- HPSB_INFO("raw1394: /dev/%s device initialized", RAW1394_DEVICE_NAME);
-
- ret = hpsb_register_protocol(&raw1394_driver);
- if (ret) {
- HPSB_ERR("raw1394: failed to register protocol");
- cdev_del(&raw1394_cdev);
- goto out_dev;
- }
-
- goto out;
-
- out_dev:
- device_destroy(hpsb_protocol_class,
- MKDEV(IEEE1394_MAJOR,
- IEEE1394_MINOR_BLOCK_RAW1394 * 16));
- out_unreg:
- hpsb_unregister_highlevel(&raw1394_highlevel);
- out:
- return ret;
-}
-
-static void __exit cleanup_raw1394(void)
-{
- device_destroy(hpsb_protocol_class,
- MKDEV(IEEE1394_MAJOR,
- IEEE1394_MINOR_BLOCK_RAW1394 * 16));
- cdev_del(&raw1394_cdev);
- hpsb_unregister_highlevel(&raw1394_highlevel);
- hpsb_unregister_protocol(&raw1394_driver);
-}
-
-module_init(init_raw1394);
-module_exit(cleanup_raw1394);
-MODULE_LICENSE("GPL");
diff --git a/drivers/ieee1394/raw1394.h b/drivers/ieee1394/raw1394.h
deleted file mode 100644
index 963ac20373d2..000000000000
--- a/drivers/ieee1394/raw1394.h
+++ /dev/null
@@ -1,191 +0,0 @@
-#ifndef IEEE1394_RAW1394_H
-#define IEEE1394_RAW1394_H
-
-/* header for the raw1394 API that is exported to user-space */
-
-#define RAW1394_KERNELAPI_VERSION 4
-
-/* state: opened */
-#define RAW1394_REQ_INITIALIZE 1
-
-/* state: initialized */
-#define RAW1394_REQ_LIST_CARDS 2
-#define RAW1394_REQ_SET_CARD 3
-
-/* state: connected */
-#define RAW1394_REQ_ASYNC_READ 100
-#define RAW1394_REQ_ASYNC_WRITE 101
-#define RAW1394_REQ_LOCK 102
-#define RAW1394_REQ_LOCK64 103
-#define RAW1394_REQ_ISO_SEND 104 /* removed ABI, now a no-op */
-#define RAW1394_REQ_ASYNC_SEND 105
-#define RAW1394_REQ_ASYNC_STREAM 106
-
-#define RAW1394_REQ_ISO_LISTEN 200 /* removed ABI, now a no-op */
-#define RAW1394_REQ_FCP_LISTEN 201
-#define RAW1394_REQ_RESET_BUS 202
-#define RAW1394_REQ_GET_ROM 203
-#define RAW1394_REQ_UPDATE_ROM 204
-#define RAW1394_REQ_ECHO 205
-#define RAW1394_REQ_MODIFY_ROM 206
-
-#define RAW1394_REQ_ARM_REGISTER 300
-#define RAW1394_REQ_ARM_UNREGISTER 301
-#define RAW1394_REQ_ARM_SET_BUF 302
-#define RAW1394_REQ_ARM_GET_BUF 303
-
-#define RAW1394_REQ_RESET_NOTIFY 400
-
-#define RAW1394_REQ_PHYPACKET 500
-
-/* kernel to user */
-#define RAW1394_REQ_BUS_RESET 10000
-#define RAW1394_REQ_ISO_RECEIVE 10001
-#define RAW1394_REQ_FCP_REQUEST 10002
-#define RAW1394_REQ_ARM 10003
-#define RAW1394_REQ_RAWISO_ACTIVITY 10004
-
-/* error codes */
-#define RAW1394_ERROR_NONE 0
-#define RAW1394_ERROR_COMPAT (-1001)
-#define RAW1394_ERROR_STATE_ORDER (-1002)
-#define RAW1394_ERROR_GENERATION (-1003)
-#define RAW1394_ERROR_INVALID_ARG (-1004)
-#define RAW1394_ERROR_MEMFAULT (-1005)
-#define RAW1394_ERROR_ALREADY (-1006)
-
-#define RAW1394_ERROR_EXCESSIVE (-1020)
-#define RAW1394_ERROR_UNTIDY_LEN (-1021)
-
-#define RAW1394_ERROR_SEND_ERROR (-1100)
-#define RAW1394_ERROR_ABORTED (-1101)
-#define RAW1394_ERROR_TIMEOUT (-1102)
-
-/* arm_codes */
-#define ARM_READ 1
-#define ARM_WRITE 2
-#define ARM_LOCK 4
-
-#define RAW1394_LONG_RESET 0
-#define RAW1394_SHORT_RESET 1
-
-/* busresetnotify ... */
-#define RAW1394_NOTIFY_OFF 0
-#define RAW1394_NOTIFY_ON 1
-
-#include <asm/types.h>
-
-struct raw1394_request {
- __u32 type;
- __s32 error;
- __u32 misc;
-
- __u32 generation;
- __u32 length;
-
- __u64 address;
-
- __u64 tag;
-
- __u64 sendb;
- __u64 recvb;
-};
-
-struct raw1394_khost_list {
- __u32 nodes;
- __u8 name[32];
-};
-
-typedef struct arm_request {
- __u16 destination_nodeid;
- __u16 source_nodeid;
- __u64 destination_offset;
- __u8 tlabel;
- __u8 tcode;
- __u8 extended_transaction_code;
- __u32 generation;
- __u16 buffer_length;
- __u8 __user *buffer;
-} *arm_request_t;
-
-typedef struct arm_response {
- __s32 response_code;
- __u16 buffer_length;
- __u8 __user *buffer;
-} *arm_response_t;
-
-typedef struct arm_request_response {
- struct arm_request __user *request;
- struct arm_response __user *response;
-} *arm_request_response_t;
-
-/* rawiso API */
-#include "ieee1394-ioctl.h"
-
-/* per-packet metadata embedded in the ringbuffer */
-/* must be identical to hpsb_iso_packet_info in iso.h! */
-struct raw1394_iso_packet_info {
- __u32 offset;
- __u16 len;
- __u16 cycle; /* recv only */
- __u8 channel; /* recv only */
- __u8 tag;
- __u8 sy;
-};
-
-/* argument for RAW1394_ISO_RECV/XMIT_PACKETS ioctls */
-struct raw1394_iso_packets {
- __u32 n_packets;
- struct raw1394_iso_packet_info __user *infos;
-};
-
-struct raw1394_iso_config {
- /* size of packet data buffer, in bytes (will be rounded up to PAGE_SIZE) */
- __u32 data_buf_size;
-
- /* # of packets to buffer */
- __u32 buf_packets;
-
- /* iso channel (set to -1 for multi-channel recv) */
- __s32 channel;
-
- /* xmit only - iso transmission speed */
- __u8 speed;
-
- /* The mode of the dma when receiving iso data. Must be supported by chip */
- __u8 dma_mode;
-
- /* max. latency of buffer, in packets (-1 if you don't care) */
- __s32 irq_interval;
-};
-
-/* argument to RAW1394_ISO_XMIT/RECV_INIT and RAW1394_ISO_GET_STATUS */
-struct raw1394_iso_status {
- /* current settings */
- struct raw1394_iso_config config;
-
- /* number of packets waiting to be filled with data (ISO transmission)
- or containing data received (ISO reception) */
- __u32 n_packets;
-
- /* approximate number of packets dropped due to overflow or
- underflow of the packet buffer (a value of zero guarantees
- that no packets have been dropped) */
- __u32 overflows;
-
- /* cycle number at which next packet will be transmitted;
- -1 if not known */
- __s16 xmit_cycle;
-};
-
-/* argument to RAW1394_IOC_GET_CYCLE_TIMER ioctl */
-struct raw1394_cycle_timer {
- /* contents of Isochronous Cycle Timer register,
- as in OHCI 1.1 clause 5.13 (also with non-OHCI hosts) */
- __u32 cycle_timer;
-
- /* local time in microseconds since Epoch,
- simultaneously read with cycle timer */
- __u64 local_time;
-};
-#endif /* IEEE1394_RAW1394_H */
diff --git a/drivers/ieee1394/sbp2.c b/drivers/ieee1394/sbp2.c
deleted file mode 100644
index d6e251a300ce..000000000000
--- a/drivers/ieee1394/sbp2.c
+++ /dev/null
@@ -1,2138 +0,0 @@
-/*
- * sbp2.c - SBP-2 protocol driver for IEEE-1394
- *
- * Copyright (C) 2000 James Goodwin, Filanet Corporation (www.filanet.com)
- * jamesg@filanet.com (JSG)
- *
- * Copyright (C) 2003 Ben Collins <bcollins@debian.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software Foundation,
- * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
- */
-
-/*
- * Brief Description:
- *
- * This driver implements the Serial Bus Protocol 2 (SBP-2) over IEEE-1394
- * under Linux. The SBP-2 driver is implemented as an IEEE-1394 high-level
- * driver. It also registers as a SCSI lower-level driver in order to accept
- * SCSI commands for transport using SBP-2.
- *
- * You may access any attached SBP-2 (usually storage devices) as regular
- * SCSI devices. E.g. mount /dev/sda1, fdisk, mkfs, etc..
- *
- * See http://www.t10.org/drafts.htm#sbp2 for the final draft of the SBP-2
- * specification and for where to purchase the official standard.
- *
- * TODO:
- * - look into possible improvements of the SCSI error handlers
- * - handle Unit_Characteristics.mgt_ORB_timeout and .ORB_size
- * - handle Logical_Unit_Number.ordered
- * - handle src == 1 in status blocks
- * - reimplement the DMA mapping in absence of physical DMA so that
- * bus_to_virt is no longer required
- * - debug the handling of absent physical DMA
- * - replace CONFIG_IEEE1394_SBP2_PHYS_DMA by automatic detection
- * (this is easy but depends on the previous two TODO items)
- * - make the parameter serialize_io configurable per device
- * - move all requests to fetch agent registers into non-atomic context,
- * replace all usages of sbp2util_node_write_no_wait by true transactions
- * Grep for inline FIXME comments below.
- */
-
-#include <linux/blkdev.h>
-#include <linux/compiler.h>
-#include <linux/delay.h>
-#include <linux/device.h>
-#include <linux/dma-mapping.h>
-#include <linux/init.h>
-#include <linux/kernel.h>
-#include <linux/list.h>
-#include <linux/mm.h>
-#include <linux/module.h>
-#include <linux/moduleparam.h>
-#include <linux/sched.h>
-#include <linux/slab.h>
-#include <linux/spinlock.h>
-#include <linux/stat.h>
-#include <linux/string.h>
-#include <linux/stringify.h>
-#include <linux/types.h>
-#include <linux/wait.h>
-#include <linux/workqueue.h>
-#include <linux/scatterlist.h>
-
-#include <asm/byteorder.h>
-#include <asm/errno.h>
-#include <asm/param.h>
-#include <asm/system.h>
-#include <asm/types.h>
-
-#ifdef CONFIG_IEEE1394_SBP2_PHYS_DMA
-#include <asm/io.h> /* for bus_to_virt */
-#endif
-
-#include <scsi/scsi.h>
-#include <scsi/scsi_cmnd.h>
-#include <scsi/scsi_dbg.h>
-#include <scsi/scsi_device.h>
-#include <scsi/scsi_host.h>
-
-#include "csr1212.h"
-#include "highlevel.h"
-#include "hosts.h"
-#include "ieee1394.h"
-#include "ieee1394_core.h"
-#include "ieee1394_hotplug.h"
-#include "ieee1394_transactions.h"
-#include "ieee1394_types.h"
-#include "nodemgr.h"
-#include "sbp2.h"
-
-/*
- * Module load parameter definitions
- */
-
-/*
- * Change max_speed on module load if you have a bad IEEE-1394
- * controller that has trouble running 2KB packets at 400mb.
- *
- * NOTE: On certain OHCI parts I have seen short packets on async transmit
- * (probably due to PCI latency/throughput issues with the part). You can
- * bump down the speed if you are running into problems.
- */
-static int sbp2_max_speed = IEEE1394_SPEED_MAX;
-module_param_named(max_speed, sbp2_max_speed, int, 0644);
-MODULE_PARM_DESC(max_speed, "Limit data transfer speed (5 <= 3200, "
- "4 <= 1600, 3 <= 800, 2 <= 400, 1 <= 200, 0 = 100 Mb/s)");
-
-/*
- * Set serialize_io to 0 or N to use dynamically appended lists of command ORBs.
- * This is and always has been buggy in multiple subtle ways. See above TODOs.
- */
-static int sbp2_serialize_io = 1;
-module_param_named(serialize_io, sbp2_serialize_io, bool, 0444);
-MODULE_PARM_DESC(serialize_io, "Serialize requests coming from SCSI drivers "
- "(default = Y, faster but buggy = N)");
-
-/*
- * Adjust max_sectors if you'd like to influence how many sectors each SCSI
- * command can transfer at most. Please note that some older SBP-2 bridge
- * chips are broken for transfers greater or equal to 128KB, therefore
- * max_sectors used to be a safe 255 sectors for many years. We now have a
- * default of 0 here which means that we let the SCSI stack choose a limit.
- *
- * The SBP2_WORKAROUND_128K_MAX_TRANS flag, if set either in the workarounds
- * module parameter or in the sbp2_workarounds_table[], will override the
- * value of max_sectors. We should use sbp2_workarounds_table[] to cover any
- * bridge chip which becomes known to need the 255 sectors limit.
- */
-static int sbp2_max_sectors;
-module_param_named(max_sectors, sbp2_max_sectors, int, 0444);
-MODULE_PARM_DESC(max_sectors, "Change max sectors per I/O supported "
- "(default = 0 = use SCSI stack's default)");
-
-/*
- * Exclusive login to sbp2 device? In most cases, the sbp2 driver should
- * do an exclusive login, as it's generally unsafe to have two hosts
- * talking to a single sbp2 device at the same time (filesystem coherency,
- * etc.). If you're running an sbp2 device that supports multiple logins,
- * and you're either running read-only filesystems or some sort of special
- * filesystem supporting multiple hosts, e.g. OpenGFS, Oracle Cluster
- * File System, or Lustre, then set exclusive_login to zero.
- *
- * So far only bridges from Oxford Semiconductor are known to support
- * concurrent logins. Depending on firmware, four or two concurrent logins
- * are possible on OXFW911 and newer Oxsemi bridges.
- */
-static int sbp2_exclusive_login = 1;
-module_param_named(exclusive_login, sbp2_exclusive_login, bool, 0644);
-MODULE_PARM_DESC(exclusive_login, "Exclusive login to sbp2 device "
- "(default = Y, use N for concurrent initiators)");
-
-/*
- * If any of the following workarounds is required for your device to work,
- * please submit the kernel messages logged by sbp2 to the linux1394-devel
- * mailing list.
- *
- * - 128kB max transfer
- * Limit transfer size. Necessary for some old bridges.
- *
- * - 36 byte inquiry
- * When scsi_mod probes the device, let the inquiry command look like that
- * from MS Windows.
- *
- * - skip mode page 8
- * Suppress sending of mode_sense for mode page 8 if the device pretends to
- * support the SCSI Primary Block commands instead of Reduced Block Commands.
- *
- * - fix capacity
- * Tell sd_mod to correct the last sector number reported by read_capacity.
- * Avoids access beyond actual disk limits on devices with an off-by-one bug.
- * Don't use this with devices which don't have this bug.
- *
- * - delay inquiry
- * Wait extra SBP2_INQUIRY_DELAY seconds after login before SCSI inquiry.
- *
- * - power condition
- * Set the power condition field in the START STOP UNIT commands sent by
- * sd_mod on suspend, resume, and shutdown (if manage_start_stop is on).
- * Some disks need this to spin down or to resume properly.
- *
- * - override internal blacklist
- * Instead of adding to the built-in blacklist, use only the workarounds
- * specified in the module load parameter.
- * Useful if a blacklist entry interfered with a non-broken device.
- */
-static int sbp2_default_workarounds;
-module_param_named(workarounds, sbp2_default_workarounds, int, 0644);
-MODULE_PARM_DESC(workarounds, "Work around device bugs (default = 0"
- ", 128kB max transfer = " __stringify(SBP2_WORKAROUND_128K_MAX_TRANS)
- ", 36 byte inquiry = " __stringify(SBP2_WORKAROUND_INQUIRY_36)
- ", skip mode page 8 = " __stringify(SBP2_WORKAROUND_MODE_SENSE_8)
- ", fix capacity = " __stringify(SBP2_WORKAROUND_FIX_CAPACITY)
- ", delay inquiry = " __stringify(SBP2_WORKAROUND_DELAY_INQUIRY)
- ", set power condition in start stop unit = "
- __stringify(SBP2_WORKAROUND_POWER_CONDITION)
- ", override internal blacklist = " __stringify(SBP2_WORKAROUND_OVERRIDE)
- ", or a combination)");
-
-/*
- * This influences the format of the sysfs attribute
- * /sys/bus/scsi/devices/.../ieee1394_id.
- *
- * The default format is like in older kernels: %016Lx:%d:%d
- * It contains the target's EUI-64, a number given to the logical unit by
- * the ieee1394 driver's nodemgr (starting at 0), and the LUN.
- *
- * The long format is: %016Lx:%06x:%04x
- * It contains the target's EUI-64, the unit directory's directory_ID as per
- * IEEE 1212 clause 7.7.19, and the LUN. This format comes closest to the
- * format of SBP(-3) target port and logical unit identifier as per SAM (SCSI
- * Architecture Model) rev.2 to 4 annex A. Therefore and because it is
- * independent of the implementation of the ieee1394 nodemgr, the longer format
- * is recommended for future use.
- */
-static int sbp2_long_sysfs_ieee1394_id;
-module_param_named(long_ieee1394_id, sbp2_long_sysfs_ieee1394_id, bool, 0644);
-MODULE_PARM_DESC(long_ieee1394_id, "8+3+2 bytes format of ieee1394_id in sysfs "
- "(default = backwards-compatible = N, SAM-conforming = Y)");
-
-
-#define SBP2_INFO(fmt, args...) HPSB_INFO("sbp2: "fmt, ## args)
-#define SBP2_ERR(fmt, args...) HPSB_ERR("sbp2: "fmt, ## args)
-
-/*
- * Globals
- */
-static void sbp2scsi_complete_all_commands(struct sbp2_lu *, u32);
-static void sbp2scsi_complete_command(struct sbp2_lu *, u32, struct scsi_cmnd *,
- void (*)(struct scsi_cmnd *));
-static struct sbp2_lu *sbp2_alloc_device(struct unit_directory *);
-static int sbp2_start_device(struct sbp2_lu *);
-static void sbp2_remove_device(struct sbp2_lu *);
-static int sbp2_login_device(struct sbp2_lu *);
-static int sbp2_reconnect_device(struct sbp2_lu *);
-static int sbp2_logout_device(struct sbp2_lu *);
-static void sbp2_host_reset(struct hpsb_host *);
-static int sbp2_handle_status_write(struct hpsb_host *, int, int, quadlet_t *,
- u64, size_t, u16);
-static int sbp2_agent_reset(struct sbp2_lu *, int);
-static void sbp2_parse_unit_directory(struct sbp2_lu *,
- struct unit_directory *);
-static int sbp2_set_busy_timeout(struct sbp2_lu *);
-static int sbp2_max_speed_and_size(struct sbp2_lu *);
-
-
-static const u8 sbp2_speedto_max_payload[] = { 0x7, 0x8, 0x9, 0xa, 0xa, 0xa };
-
-static DEFINE_RWLOCK(sbp2_hi_logical_units_lock);
-
-static struct hpsb_highlevel sbp2_highlevel = {
- .name = SBP2_DEVICE_NAME,
- .host_reset = sbp2_host_reset,
-};
-
-static const struct hpsb_address_ops sbp2_ops = {
- .write = sbp2_handle_status_write
-};
-
-#ifdef CONFIG_IEEE1394_SBP2_PHYS_DMA
-static int sbp2_handle_physdma_write(struct hpsb_host *, int, int, quadlet_t *,
- u64, size_t, u16);
-static int sbp2_handle_physdma_read(struct hpsb_host *, int, quadlet_t *, u64,
- size_t, u16);
-
-static const struct hpsb_address_ops sbp2_physdma_ops = {
- .read = sbp2_handle_physdma_read,
- .write = sbp2_handle_physdma_write,
-};
-#endif
-
-
-/*
- * Interface to driver core and IEEE 1394 core
- */
-static const struct ieee1394_device_id sbp2_id_table[] = {
- {
- .match_flags = IEEE1394_MATCH_SPECIFIER_ID | IEEE1394_MATCH_VERSION,
- .specifier_id = SBP2_UNIT_SPEC_ID_ENTRY & 0xffffff,
- .version = SBP2_SW_VERSION_ENTRY & 0xffffff},
- {}
-};
-MODULE_DEVICE_TABLE(ieee1394, sbp2_id_table);
-
-static int sbp2_probe(struct device *);
-static int sbp2_remove(struct device *);
-static int sbp2_update(struct unit_directory *);
-
-static struct hpsb_protocol_driver sbp2_driver = {
- .name = SBP2_DEVICE_NAME,
- .id_table = sbp2_id_table,
- .update = sbp2_update,
- .driver = {
- .probe = sbp2_probe,
- .remove = sbp2_remove,
- },
-};
-
-
-/*
- * Interface to SCSI core
- */
-static int sbp2scsi_queuecommand(struct scsi_cmnd *,
- void (*)(struct scsi_cmnd *));
-static int sbp2scsi_abort(struct scsi_cmnd *);
-static int sbp2scsi_reset(struct scsi_cmnd *);
-static int sbp2scsi_slave_alloc(struct scsi_device *);
-static int sbp2scsi_slave_configure(struct scsi_device *);
-static void sbp2scsi_slave_destroy(struct scsi_device *);
-static ssize_t sbp2_sysfs_ieee1394_id_show(struct device *,
- struct device_attribute *, char *);
-
-static DEVICE_ATTR(ieee1394_id, S_IRUGO, sbp2_sysfs_ieee1394_id_show, NULL);
-
-static struct device_attribute *sbp2_sysfs_sdev_attrs[] = {
- &dev_attr_ieee1394_id,
- NULL
-};
-
-static struct scsi_host_template sbp2_shost_template = {
- .module = THIS_MODULE,
- .name = "SBP-2 IEEE-1394",
- .proc_name = SBP2_DEVICE_NAME,
- .queuecommand = sbp2scsi_queuecommand,
- .eh_abort_handler = sbp2scsi_abort,
- .eh_device_reset_handler = sbp2scsi_reset,
- .slave_alloc = sbp2scsi_slave_alloc,
- .slave_configure = sbp2scsi_slave_configure,
- .slave_destroy = sbp2scsi_slave_destroy,
- .this_id = -1,
- .sg_tablesize = SG_ALL,
- .use_clustering = ENABLE_CLUSTERING,
- .cmd_per_lun = SBP2_MAX_CMDS,
- .can_queue = SBP2_MAX_CMDS,
- .sdev_attrs = sbp2_sysfs_sdev_attrs,
-};
-
-#define SBP2_ROM_VALUE_WILDCARD ~0 /* match all */
-#define SBP2_ROM_VALUE_MISSING 0xff000000 /* not present in the unit dir. */
-
-/*
- * List of devices with known bugs.
- *
- * The firmware_revision field, masked with 0xffff00, is the best indicator
- * for the type of bridge chip of a device. It yields a few false positives
- * but this did not break correctly behaving devices so far.
- */
-static const struct {
- u32 firmware_revision;
- u32 model;
- unsigned workarounds;
-} sbp2_workarounds_table[] = {
- /* DViCO Momobay CX-1 with TSB42AA9 bridge */ {
- .firmware_revision = 0x002800,
- .model = 0x001010,
- .workarounds = SBP2_WORKAROUND_INQUIRY_36 |
- SBP2_WORKAROUND_MODE_SENSE_8 |
- SBP2_WORKAROUND_POWER_CONDITION,
- },
- /* DViCO Momobay FX-3A with TSB42AA9A bridge */ {
- .firmware_revision = 0x002800,
- .model = 0x000000,
- .workarounds = SBP2_WORKAROUND_POWER_CONDITION,
- },
- /* Initio bridges, actually only needed for some older ones */ {
- .firmware_revision = 0x000200,
- .model = SBP2_ROM_VALUE_WILDCARD,
- .workarounds = SBP2_WORKAROUND_INQUIRY_36,
- },
- /* PL-3507 bridge with Prolific firmware */ {
- .firmware_revision = 0x012800,
- .model = SBP2_ROM_VALUE_WILDCARD,
- .workarounds = SBP2_WORKAROUND_POWER_CONDITION,
- },
- /* Symbios bridge */ {
- .firmware_revision = 0xa0b800,
- .model = SBP2_ROM_VALUE_WILDCARD,
- .workarounds = SBP2_WORKAROUND_128K_MAX_TRANS,
- },
- /* Datafab MD2-FW2 with Symbios/LSILogic SYM13FW500 bridge */ {
- .firmware_revision = 0x002600,
- .model = SBP2_ROM_VALUE_WILDCARD,
- .workarounds = SBP2_WORKAROUND_128K_MAX_TRANS,
- },
- /*
- * iPod 2nd generation: needs 128k max transfer size workaround
- * iPod 3rd generation: needs fix capacity workaround
- */
- {
- .firmware_revision = 0x0a2700,
- .model = 0x000000,
- .workarounds = SBP2_WORKAROUND_128K_MAX_TRANS |
- SBP2_WORKAROUND_FIX_CAPACITY,
- },
- /* iPod 4th generation */ {
- .firmware_revision = 0x0a2700,
- .model = 0x000021,
- .workarounds = SBP2_WORKAROUND_FIX_CAPACITY,
- },
- /* iPod mini */ {
- .firmware_revision = 0x0a2700,
- .model = 0x000022,
- .workarounds = SBP2_WORKAROUND_FIX_CAPACITY,
- },
- /* iPod mini */ {
- .firmware_revision = 0x0a2700,
- .model = 0x000023,
- .workarounds = SBP2_WORKAROUND_FIX_CAPACITY,
- },
- /* iPod Photo */ {
- .firmware_revision = 0x0a2700,
- .model = 0x00007e,
- .workarounds = SBP2_WORKAROUND_FIX_CAPACITY,
- }
-};
-
-/**************************************
- * General utility functions
- **************************************/
-
-#ifndef __BIG_ENDIAN
-/*
- * Converts a buffer from be32 to cpu byte ordering. Length is in bytes.
- */
-static inline void sbp2util_be32_to_cpu_buffer(void *buffer, int length)
-{
- u32 *temp = buffer;
-
- for (length = (length >> 2); length--; )
- temp[length] = be32_to_cpu(temp[length]);
-}
-
-/*
- * Converts a buffer from cpu to be32 byte ordering. Length is in bytes.
- */
-static inline void sbp2util_cpu_to_be32_buffer(void *buffer, int length)
-{
- u32 *temp = buffer;
-
- for (length = (length >> 2); length--; )
- temp[length] = cpu_to_be32(temp[length]);
-}
-#else /* BIG_ENDIAN */
-/* Why waste the cpu cycles? */
-#define sbp2util_be32_to_cpu_buffer(x,y) do {} while (0)
-#define sbp2util_cpu_to_be32_buffer(x,y) do {} while (0)
-#endif
-
-static DECLARE_WAIT_QUEUE_HEAD(sbp2_access_wq);
-
-/*
- * Waits for completion of an SBP-2 access request.
- * Returns nonzero if timed out or prematurely interrupted.
- */
-static int sbp2util_access_timeout(struct sbp2_lu *lu, int timeout)
-{
- long leftover;
-
- leftover = wait_event_interruptible_timeout(
- sbp2_access_wq, lu->access_complete, timeout);
- lu->access_complete = 0;
- return leftover <= 0;
-}
-
-static void sbp2_free_packet(void *packet)
-{
- hpsb_free_tlabel(packet);
- hpsb_free_packet(packet);
-}
-
-/*
- * This is much like hpsb_node_write(), except it ignores the response
- * subaction and returns immediately. Can be used from atomic context.
- */
-static int sbp2util_node_write_no_wait(struct node_entry *ne, u64 addr,
- quadlet_t *buf, size_t len)
-{
- struct hpsb_packet *packet;
-
- packet = hpsb_make_writepacket(ne->host, ne->nodeid, addr, buf, len);
- if (!packet)
- return -ENOMEM;
-
- hpsb_set_packet_complete_task(packet, sbp2_free_packet, packet);
- hpsb_node_fill_packet(ne, packet);
- if (hpsb_send_packet(packet) < 0) {
- sbp2_free_packet(packet);
- return -EIO;
- }
- return 0;
-}
-
-static void sbp2util_notify_fetch_agent(struct sbp2_lu *lu, u64 offset,
- quadlet_t *data, size_t len)
-{
- /* There is a small window after a bus reset within which the node
- * entry's generation is current but the reconnect wasn't completed. */
- if (unlikely(atomic_read(&lu->state) == SBP2LU_STATE_IN_RESET))
- return;
-
- if (hpsb_node_write(lu->ne, lu->command_block_agent_addr + offset,
- data, len))
- SBP2_ERR("sbp2util_notify_fetch_agent failed.");
-
- /* Now accept new SCSI commands, unless a bus reset happended during
- * hpsb_node_write. */
- if (likely(atomic_read(&lu->state) != SBP2LU_STATE_IN_RESET))
- scsi_unblock_requests(lu->shost);
-}
-
-static void sbp2util_write_orb_pointer(struct work_struct *work)
-{
- struct sbp2_lu *lu = container_of(work, struct sbp2_lu, protocol_work);
- quadlet_t data[2];
-
- data[0] = ORB_SET_NODE_ID(lu->hi->host->node_id);
- data[1] = lu->last_orb_dma;
- sbp2util_cpu_to_be32_buffer(data, 8);
- sbp2util_notify_fetch_agent(lu, SBP2_ORB_POINTER_OFFSET, data, 8);
-}
-
-static void sbp2util_write_doorbell(struct work_struct *work)
-{
- struct sbp2_lu *lu = container_of(work, struct sbp2_lu, protocol_work);
-
- sbp2util_notify_fetch_agent(lu, SBP2_DOORBELL_OFFSET, NULL, 4);
-}
-
-static int sbp2util_create_command_orb_pool(struct sbp2_lu *lu)
-{
- struct sbp2_command_info *cmd;
- struct device *dmadev = lu->hi->host->device.parent;
- int i, orbs = sbp2_serialize_io ? 2 : SBP2_MAX_CMDS;
-
- for (i = 0; i < orbs; i++) {
- cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
- if (!cmd)
- goto failed_alloc;
-
- cmd->command_orb_dma =
- dma_map_single(dmadev, &cmd->command_orb,
- sizeof(struct sbp2_command_orb),
- DMA_TO_DEVICE);
- if (dma_mapping_error(dmadev, cmd->command_orb_dma))
- goto failed_orb;
-
- cmd->sge_dma =
- dma_map_single(dmadev, &cmd->scatter_gather_element,
- sizeof(cmd->scatter_gather_element),
- DMA_TO_DEVICE);
- if (dma_mapping_error(dmadev, cmd->sge_dma))
- goto failed_sge;
-
- INIT_LIST_HEAD(&cmd->list);
- list_add_tail(&cmd->list, &lu->cmd_orb_completed);
- }
- return 0;
-
-failed_sge:
- dma_unmap_single(dmadev, cmd->command_orb_dma,
- sizeof(struct sbp2_command_orb), DMA_TO_DEVICE);
-failed_orb:
- kfree(cmd);
-failed_alloc:
- return -ENOMEM;
-}
-
-static void sbp2util_remove_command_orb_pool(struct sbp2_lu *lu,
- struct hpsb_host *host)
-{
- struct list_head *lh, *next;
- struct sbp2_command_info *cmd;
- unsigned long flags;
-
- spin_lock_irqsave(&lu->cmd_orb_lock, flags);
- if (!list_empty(&lu->cmd_orb_completed))
- list_for_each_safe(lh, next, &lu->cmd_orb_completed) {
- cmd = list_entry(lh, struct sbp2_command_info, list);
- dma_unmap_single(host->device.parent,
- cmd->command_orb_dma,
- sizeof(struct sbp2_command_orb),
- DMA_TO_DEVICE);
- dma_unmap_single(host->device.parent, cmd->sge_dma,
- sizeof(cmd->scatter_gather_element),
- DMA_TO_DEVICE);
- kfree(cmd);
- }
- spin_unlock_irqrestore(&lu->cmd_orb_lock, flags);
- return;
-}
-
-/*
- * Finds the sbp2_command for a given outstanding command ORB.
- * Only looks at the in-use list.
- */
-static struct sbp2_command_info *sbp2util_find_command_for_orb(
- struct sbp2_lu *lu, dma_addr_t orb)
-{
- struct sbp2_command_info *cmd;
- unsigned long flags;
-
- spin_lock_irqsave(&lu->cmd_orb_lock, flags);
- if (!list_empty(&lu->cmd_orb_inuse))
- list_for_each_entry(cmd, &lu->cmd_orb_inuse, list)
- if (cmd->command_orb_dma == orb) {
- spin_unlock_irqrestore(
- &lu->cmd_orb_lock, flags);
- return cmd;
- }
- spin_unlock_irqrestore(&lu->cmd_orb_lock, flags);
- return NULL;
-}
-
-/*
- * Finds the sbp2_command for a given outstanding SCpnt.
- * Only looks at the in-use list.
- * Must be called with lu->cmd_orb_lock held.
- */
-static struct sbp2_command_info *sbp2util_find_command_for_SCpnt(
- struct sbp2_lu *lu, void *SCpnt)
-{
- struct sbp2_command_info *cmd;
-
- if (!list_empty(&lu->cmd_orb_inuse))
- list_for_each_entry(cmd, &lu->cmd_orb_inuse, list)
- if (cmd->Current_SCpnt == SCpnt)
- return cmd;
- return NULL;
-}
-
-static struct sbp2_command_info *sbp2util_allocate_command_orb(
- struct sbp2_lu *lu,
- struct scsi_cmnd *Current_SCpnt,
- void (*Current_done)(struct scsi_cmnd *))
-{
- struct list_head *lh;
- struct sbp2_command_info *cmd = NULL;
- unsigned long flags;
-
- spin_lock_irqsave(&lu->cmd_orb_lock, flags);
- if (!list_empty(&lu->cmd_orb_completed)) {
- lh = lu->cmd_orb_completed.next;
- list_del(lh);
- cmd = list_entry(lh, struct sbp2_command_info, list);
- cmd->Current_done = Current_done;
- cmd->Current_SCpnt = Current_SCpnt;
- list_add_tail(&cmd->list, &lu->cmd_orb_inuse);
- } else
- SBP2_ERR("%s: no orbs available", __func__);
- spin_unlock_irqrestore(&lu->cmd_orb_lock, flags);
- return cmd;
-}
-
-/*
- * Unmaps the DMAs of a command and moves the command to the completed ORB list.
- * Must be called with lu->cmd_orb_lock held.
- */
-static void sbp2util_mark_command_completed(struct sbp2_lu *lu,
- struct sbp2_command_info *cmd)
-{
- if (scsi_sg_count(cmd->Current_SCpnt))
- dma_unmap_sg(lu->ud->ne->host->device.parent,
- scsi_sglist(cmd->Current_SCpnt),
- scsi_sg_count(cmd->Current_SCpnt),
- cmd->Current_SCpnt->sc_data_direction);
- list_move_tail(&cmd->list, &lu->cmd_orb_completed);
-}
-
-/*
- * Is lu valid? Is the 1394 node still present?
- */
-static inline int sbp2util_node_is_available(struct sbp2_lu *lu)
-{
- return lu && lu->ne && !lu->ne->in_limbo;
-}
-
-/*********************************************
- * IEEE-1394 core driver stack related section
- *********************************************/
-
-static int sbp2_probe(struct device *dev)
-{
- struct unit_directory *ud;
- struct sbp2_lu *lu;
-
- ud = container_of(dev, struct unit_directory, device);
-
- /* Don't probe UD's that have the LUN flag. We'll probe the LUN(s)
- * instead. */
- if (ud->flags & UNIT_DIRECTORY_HAS_LUN_DIRECTORY)
- return -ENODEV;
-
- lu = sbp2_alloc_device(ud);
- if (!lu)
- return -ENOMEM;
-
- sbp2_parse_unit_directory(lu, ud);
- return sbp2_start_device(lu);
-}
-
-static int sbp2_remove(struct device *dev)
-{
- struct unit_directory *ud;
- struct sbp2_lu *lu;
- struct scsi_device *sdev;
-
- ud = container_of(dev, struct unit_directory, device);
- lu = dev_get_drvdata(&ud->device);
- if (!lu)
- return 0;
-
- if (lu->shost) {
- /* Get rid of enqueued commands if there is no chance to
- * send them. */
- if (!sbp2util_node_is_available(lu))
- sbp2scsi_complete_all_commands(lu, DID_NO_CONNECT);
- /* scsi_remove_device() may trigger shutdown functions of SCSI
- * highlevel drivers which would deadlock if blocked. */
- atomic_set(&lu->state, SBP2LU_STATE_IN_SHUTDOWN);
- scsi_unblock_requests(lu->shost);
- }
- sdev = lu->sdev;
- if (sdev) {
- lu->sdev = NULL;
- scsi_remove_device(sdev);
- }
-
- sbp2_logout_device(lu);
- sbp2_remove_device(lu);
-
- return 0;
-}
-
-static int sbp2_update(struct unit_directory *ud)
-{
- struct sbp2_lu *lu = dev_get_drvdata(&ud->device);
-
- if (sbp2_reconnect_device(lu) != 0) {
- /*
- * Reconnect failed. If another bus reset happened,
- * let nodemgr proceed and call sbp2_update again later
- * (or sbp2_remove if this node went away).
- */
- if (!hpsb_node_entry_valid(lu->ne))
- return 0;
- /*
- * Or the target rejected the reconnect because we weren't
- * fast enough. Try a regular login, but first log out
- * just in case of any weirdness.
- */
- sbp2_logout_device(lu);
-
- if (sbp2_login_device(lu) != 0) {
- if (!hpsb_node_entry_valid(lu->ne))
- return 0;
-
- /* Maybe another initiator won the login. */
- SBP2_ERR("Failed to reconnect to sbp2 device!");
- return -EBUSY;
- }
- }
-
- sbp2_set_busy_timeout(lu);
- sbp2_agent_reset(lu, 1);
- sbp2_max_speed_and_size(lu);
-
- /* Complete any pending commands with busy (so they get retried)
- * and remove them from our queue. */
- sbp2scsi_complete_all_commands(lu, DID_BUS_BUSY);
-
- /* Accept new commands unless there was another bus reset in the
- * meantime. */
- if (hpsb_node_entry_valid(lu->ne)) {
- atomic_set(&lu->state, SBP2LU_STATE_RUNNING);
- scsi_unblock_requests(lu->shost);
- }
- return 0;
-}
-
-static struct sbp2_lu *sbp2_alloc_device(struct unit_directory *ud)
-{
- struct sbp2_fwhost_info *hi;
- struct Scsi_Host *shost = NULL;
- struct sbp2_lu *lu = NULL;
- unsigned long flags;
-
- lu = kzalloc(sizeof(*lu), GFP_KERNEL);
- if (!lu) {
- SBP2_ERR("failed to create lu");
- goto failed_alloc;
- }
-
- lu->ne = ud->ne;
- lu->ud = ud;
- lu->speed_code = IEEE1394_SPEED_100;
- lu->max_payload_size = sbp2_speedto_max_payload[IEEE1394_SPEED_100];
- lu->status_fifo_addr = CSR1212_INVALID_ADDR_SPACE;
- INIT_LIST_HEAD(&lu->cmd_orb_inuse);
- INIT_LIST_HEAD(&lu->cmd_orb_completed);
- INIT_LIST_HEAD(&lu->lu_list);
- spin_lock_init(&lu->cmd_orb_lock);
- atomic_set(&lu->state, SBP2LU_STATE_RUNNING);
- INIT_WORK(&lu->protocol_work, NULL);
-
- dev_set_drvdata(&ud->device, lu);
-
- hi = hpsb_get_hostinfo(&sbp2_highlevel, ud->ne->host);
- if (!hi) {
- hi = hpsb_create_hostinfo(&sbp2_highlevel, ud->ne->host,
- sizeof(*hi));
- if (!hi) {
- SBP2_ERR("failed to allocate hostinfo");
- goto failed_alloc;
- }
- hi->host = ud->ne->host;
- INIT_LIST_HEAD(&hi->logical_units);
-
-#ifdef CONFIG_IEEE1394_SBP2_PHYS_DMA
- /* Handle data movement if physical dma is not
- * enabled or not supported on host controller */
- if (!hpsb_register_addrspace(&sbp2_highlevel, ud->ne->host,
- &sbp2_physdma_ops,
- 0x0ULL, 0xfffffffcULL)) {
- SBP2_ERR("failed to register lower 4GB address range");
- goto failed_alloc;
- }
-#endif
- }
-
- if (dma_get_max_seg_size(hi->host->device.parent) > SBP2_MAX_SEG_SIZE)
- BUG_ON(dma_set_max_seg_size(hi->host->device.parent,
- SBP2_MAX_SEG_SIZE));
-
- /* Prevent unloading of the 1394 host */
- if (!try_module_get(hi->host->driver->owner)) {
- SBP2_ERR("failed to get a reference on 1394 host driver");
- goto failed_alloc;
- }
-
- lu->hi = hi;
-
- write_lock_irqsave(&sbp2_hi_logical_units_lock, flags);
- list_add_tail(&lu->lu_list, &hi->logical_units);
- write_unlock_irqrestore(&sbp2_hi_logical_units_lock, flags);
-
- /* Register the status FIFO address range. We could use the same FIFO
- * for targets at different nodes. However we need different FIFOs per
- * target in order to support multi-unit devices.
- * The FIFO is located out of the local host controller's physical range
- * but, if possible, within the posted write area. Status writes will
- * then be performed as unified transactions. This slightly reduces
- * bandwidth usage, and some Prolific based devices seem to require it.
- */
- lu->status_fifo_addr = hpsb_allocate_and_register_addrspace(
- &sbp2_highlevel, ud->ne->host, &sbp2_ops,
- sizeof(struct sbp2_status_block), sizeof(quadlet_t),
- ud->ne->host->low_addr_space, CSR1212_ALL_SPACE_END);
- if (lu->status_fifo_addr == CSR1212_INVALID_ADDR_SPACE) {
- SBP2_ERR("failed to allocate status FIFO address range");
- goto failed_alloc;
- }
-
- shost = scsi_host_alloc(&sbp2_shost_template, sizeof(unsigned long));
- if (!shost) {
- SBP2_ERR("failed to register scsi host");
- goto failed_alloc;
- }
-
- shost->hostdata[0] = (unsigned long)lu;
- shost->max_cmd_len = SBP2_MAX_CDB_SIZE;
-
- if (!scsi_add_host(shost, &ud->device)) {
- lu->shost = shost;
- return lu;
- }
-
- SBP2_ERR("failed to add scsi host");
- scsi_host_put(shost);
-
-failed_alloc:
- sbp2_remove_device(lu);
- return NULL;
-}
-
-static void sbp2_host_reset(struct hpsb_host *host)
-{
- struct sbp2_fwhost_info *hi;
- struct sbp2_lu *lu;
- unsigned long flags;
-
- hi = hpsb_get_hostinfo(&sbp2_highlevel, host);
- if (!hi)
- return;
-
- read_lock_irqsave(&sbp2_hi_logical_units_lock, flags);
-
- list_for_each_entry(lu, &hi->logical_units, lu_list)
- if (atomic_cmpxchg(&lu->state,
- SBP2LU_STATE_RUNNING, SBP2LU_STATE_IN_RESET)
- == SBP2LU_STATE_RUNNING)
- scsi_block_requests(lu->shost);
-
- read_unlock_irqrestore(&sbp2_hi_logical_units_lock, flags);
-}
-
-static int sbp2_start_device(struct sbp2_lu *lu)
-{
- struct sbp2_fwhost_info *hi = lu->hi;
- int error;
-
- lu->login_response = dma_alloc_coherent(hi->host->device.parent,
- sizeof(struct sbp2_login_response),
- &lu->login_response_dma, GFP_KERNEL);
- if (!lu->login_response)
- goto alloc_fail;
-
- lu->query_logins_orb = dma_alloc_coherent(hi->host->device.parent,
- sizeof(struct sbp2_query_logins_orb),
- &lu->query_logins_orb_dma, GFP_KERNEL);
- if (!lu->query_logins_orb)
- goto alloc_fail;
-
- lu->query_logins_response = dma_alloc_coherent(hi->host->device.parent,
- sizeof(struct sbp2_query_logins_response),
- &lu->query_logins_response_dma, GFP_KERNEL);
- if (!lu->query_logins_response)
- goto alloc_fail;
-
- lu->reconnect_orb = dma_alloc_coherent(hi->host->device.parent,
- sizeof(struct sbp2_reconnect_orb),
- &lu->reconnect_orb_dma, GFP_KERNEL);
- if (!lu->reconnect_orb)
- goto alloc_fail;
-
- lu->logout_orb = dma_alloc_coherent(hi->host->device.parent,
- sizeof(struct sbp2_logout_orb),
- &lu->logout_orb_dma, GFP_KERNEL);
- if (!lu->logout_orb)
- goto alloc_fail;
-
- lu->login_orb = dma_alloc_coherent(hi->host->device.parent,
- sizeof(struct sbp2_login_orb),
- &lu->login_orb_dma, GFP_KERNEL);
- if (!lu->login_orb)
- goto alloc_fail;
-
- if (sbp2util_create_command_orb_pool(lu))
- goto alloc_fail;
-
- /* Wait a second before trying to log in. Previously logged in
- * initiators need a chance to reconnect. */
- if (msleep_interruptible(1000)) {
- sbp2_remove_device(lu);
- return -EINTR;
- }
-
- if (sbp2_login_device(lu)) {
- sbp2_remove_device(lu);
- return -EBUSY;
- }
-
- sbp2_set_busy_timeout(lu);
- sbp2_agent_reset(lu, 1);
- sbp2_max_speed_and_size(lu);
-
- if (lu->workarounds & SBP2_WORKAROUND_DELAY_INQUIRY)
- ssleep(SBP2_INQUIRY_DELAY);
-
- error = scsi_add_device(lu->shost, 0, lu->ud->id, 0);
- if (error) {
- SBP2_ERR("scsi_add_device failed");
- sbp2_logout_device(lu);
- sbp2_remove_device(lu);
- return error;
- }
-
- return 0;
-
-alloc_fail:
- SBP2_ERR("Could not allocate memory for lu");
- sbp2_remove_device(lu);
- return -ENOMEM;
-}
-
-static void sbp2_remove_device(struct sbp2_lu *lu)
-{
- struct sbp2_fwhost_info *hi;
- unsigned long flags;
-
- if (!lu)
- return;
- hi = lu->hi;
- if (!hi)
- goto no_hi;
-
- if (lu->shost) {
- scsi_remove_host(lu->shost);
- scsi_host_put(lu->shost);
- }
- flush_scheduled_work();
- sbp2util_remove_command_orb_pool(lu, hi->host);
-
- write_lock_irqsave(&sbp2_hi_logical_units_lock, flags);
- list_del(&lu->lu_list);
- write_unlock_irqrestore(&sbp2_hi_logical_units_lock, flags);
-
- if (lu->login_response)
- dma_free_coherent(hi->host->device.parent,
- sizeof(struct sbp2_login_response),
- lu->login_response,
- lu->login_response_dma);
- if (lu->login_orb)
- dma_free_coherent(hi->host->device.parent,
- sizeof(struct sbp2_login_orb),
- lu->login_orb,
- lu->login_orb_dma);
- if (lu->reconnect_orb)
- dma_free_coherent(hi->host->device.parent,
- sizeof(struct sbp2_reconnect_orb),
- lu->reconnect_orb,
- lu->reconnect_orb_dma);
- if (lu->logout_orb)
- dma_free_coherent(hi->host->device.parent,
- sizeof(struct sbp2_logout_orb),
- lu->logout_orb,
- lu->logout_orb_dma);
- if (lu->query_logins_orb)
- dma_free_coherent(hi->host->device.parent,
- sizeof(struct sbp2_query_logins_orb),
- lu->query_logins_orb,
- lu->query_logins_orb_dma);
- if (lu->query_logins_response)
- dma_free_coherent(hi->host->device.parent,
- sizeof(struct sbp2_query_logins_response),
- lu->query_logins_response,
- lu->query_logins_response_dma);
-
- if (lu->status_fifo_addr != CSR1212_INVALID_ADDR_SPACE)
- hpsb_unregister_addrspace(&sbp2_highlevel, hi->host,
- lu->status_fifo_addr);
-
- dev_set_drvdata(&lu->ud->device, NULL);
-
- module_put(hi->host->driver->owner);
-no_hi:
- kfree(lu);
-}
-
-#ifdef CONFIG_IEEE1394_SBP2_PHYS_DMA
-/*
- * Deal with write requests on adapters which do not support physical DMA or
- * have it switched off.
- */
-static int sbp2_handle_physdma_write(struct hpsb_host *host, int nodeid,
- int destid, quadlet_t *data, u64 addr,
- size_t length, u16 flags)
-{
- memcpy(bus_to_virt((u32) addr), data, length);
- return RCODE_COMPLETE;
-}
-
-/*
- * Deal with read requests on adapters which do not support physical DMA or
- * have it switched off.
- */
-static int sbp2_handle_physdma_read(struct hpsb_host *host, int nodeid,
- quadlet_t *data, u64 addr, size_t length,
- u16 flags)
-{
- memcpy(data, bus_to_virt((u32) addr), length);
- return RCODE_COMPLETE;
-}
-#endif
-
-/**************************************
- * SBP-2 protocol related section
- **************************************/
-
-static int sbp2_query_logins(struct sbp2_lu *lu)
-{
- struct sbp2_fwhost_info *hi = lu->hi;
- quadlet_t data[2];
- int max_logins;
- int active_logins;
-
- lu->query_logins_orb->reserved1 = 0x0;
- lu->query_logins_orb->reserved2 = 0x0;
-
- lu->query_logins_orb->query_response_lo = lu->query_logins_response_dma;
- lu->query_logins_orb->query_response_hi =
- ORB_SET_NODE_ID(hi->host->node_id);
- lu->query_logins_orb->lun_misc =
- ORB_SET_FUNCTION(SBP2_QUERY_LOGINS_REQUEST);
- lu->query_logins_orb->lun_misc |= ORB_SET_NOTIFY(1);
- lu->query_logins_orb->lun_misc |= ORB_SET_LUN(lu->lun);
-
- lu->query_logins_orb->reserved_resp_length =
- ORB_SET_QUERY_LOGINS_RESP_LENGTH(
- sizeof(struct sbp2_query_logins_response));
-
- lu->query_logins_orb->status_fifo_hi =
- ORB_SET_STATUS_FIFO_HI(lu->status_fifo_addr, hi->host->node_id);
- lu->query_logins_orb->status_fifo_lo =
- ORB_SET_STATUS_FIFO_LO(lu->status_fifo_addr);
-
- sbp2util_cpu_to_be32_buffer(lu->query_logins_orb,
- sizeof(struct sbp2_query_logins_orb));
-
- memset(lu->query_logins_response, 0,
- sizeof(struct sbp2_query_logins_response));
-
- data[0] = ORB_SET_NODE_ID(hi->host->node_id);
- data[1] = lu->query_logins_orb_dma;
- sbp2util_cpu_to_be32_buffer(data, 8);
-
- hpsb_node_write(lu->ne, lu->management_agent_addr, data, 8);
-
- if (sbp2util_access_timeout(lu, 2*HZ)) {
- SBP2_INFO("Error querying logins to SBP-2 device - timed out");
- return -EIO;
- }
-
- if (lu->status_block.ORB_offset_lo != lu->query_logins_orb_dma) {
- SBP2_INFO("Error querying logins to SBP-2 device - timed out");
- return -EIO;
- }
-
- if (STATUS_TEST_RDS(lu->status_block.ORB_offset_hi_misc)) {
- SBP2_INFO("Error querying logins to SBP-2 device - failed");
- return -EIO;
- }
-
- sbp2util_cpu_to_be32_buffer(lu->query_logins_response,
- sizeof(struct sbp2_query_logins_response));
-
- max_logins = RESPONSE_GET_MAX_LOGINS(
- lu->query_logins_response->length_max_logins);
- SBP2_INFO("Maximum concurrent logins supported: %d", max_logins);
-
- active_logins = RESPONSE_GET_ACTIVE_LOGINS(
- lu->query_logins_response->length_max_logins);
- SBP2_INFO("Number of active logins: %d", active_logins);
-
- if (active_logins >= max_logins) {
- return -EIO;
- }
-
- return 0;
-}
-
-static int sbp2_login_device(struct sbp2_lu *lu)
-{
- struct sbp2_fwhost_info *hi = lu->hi;
- quadlet_t data[2];
-
- if (!lu->login_orb)
- return -EIO;
-
- if (!sbp2_exclusive_login && sbp2_query_logins(lu)) {
- SBP2_INFO("Device does not support any more concurrent logins");
- return -EIO;
- }
-
- /* assume no password */
- lu->login_orb->password_hi = 0;
- lu->login_orb->password_lo = 0;
-
- lu->login_orb->login_response_lo = lu->login_response_dma;
- lu->login_orb->login_response_hi = ORB_SET_NODE_ID(hi->host->node_id);
- lu->login_orb->lun_misc = ORB_SET_FUNCTION(SBP2_LOGIN_REQUEST);
-
- /* one second reconnect time */
- lu->login_orb->lun_misc |= ORB_SET_RECONNECT(0);
- lu->login_orb->lun_misc |= ORB_SET_EXCLUSIVE(sbp2_exclusive_login);
- lu->login_orb->lun_misc |= ORB_SET_NOTIFY(1);
- lu->login_orb->lun_misc |= ORB_SET_LUN(lu->lun);
-
- lu->login_orb->passwd_resp_lengths =
- ORB_SET_LOGIN_RESP_LENGTH(sizeof(struct sbp2_login_response));
-
- lu->login_orb->status_fifo_hi =
- ORB_SET_STATUS_FIFO_HI(lu->status_fifo_addr, hi->host->node_id);
- lu->login_orb->status_fifo_lo =
- ORB_SET_STATUS_FIFO_LO(lu->status_fifo_addr);
-
- sbp2util_cpu_to_be32_buffer(lu->login_orb,
- sizeof(struct sbp2_login_orb));
-
- memset(lu->login_response, 0, sizeof(struct sbp2_login_response));
-
- data[0] = ORB_SET_NODE_ID(hi->host->node_id);
- data[1] = lu->login_orb_dma;
- sbp2util_cpu_to_be32_buffer(data, 8);
-
- hpsb_node_write(lu->ne, lu->management_agent_addr, data, 8);
-
- /* wait up to 20 seconds for login status */
- if (sbp2util_access_timeout(lu, 20*HZ)) {
- SBP2_ERR("Error logging into SBP-2 device - timed out");
- return -EIO;
- }
-
- /* make sure that the returned status matches the login ORB */
- if (lu->status_block.ORB_offset_lo != lu->login_orb_dma) {
- SBP2_ERR("Error logging into SBP-2 device - timed out");
- return -EIO;
- }
-
- if (STATUS_TEST_RDS(lu->status_block.ORB_offset_hi_misc)) {
- SBP2_ERR("Error logging into SBP-2 device - failed");
- return -EIO;
- }
-
- sbp2util_cpu_to_be32_buffer(lu->login_response,
- sizeof(struct sbp2_login_response));
- lu->command_block_agent_addr =
- ((u64)lu->login_response->command_block_agent_hi) << 32;
- lu->command_block_agent_addr |=
- ((u64)lu->login_response->command_block_agent_lo);
- lu->command_block_agent_addr &= 0x0000ffffffffffffULL;
-
- SBP2_INFO("Logged into SBP-2 device");
- return 0;
-}
-
-static int sbp2_logout_device(struct sbp2_lu *lu)
-{
- struct sbp2_fwhost_info *hi = lu->hi;
- quadlet_t data[2];
- int error;
-
- lu->logout_orb->reserved1 = 0x0;
- lu->logout_orb->reserved2 = 0x0;
- lu->logout_orb->reserved3 = 0x0;
- lu->logout_orb->reserved4 = 0x0;
-
- lu->logout_orb->login_ID_misc = ORB_SET_FUNCTION(SBP2_LOGOUT_REQUEST);
- lu->logout_orb->login_ID_misc |=
- ORB_SET_LOGIN_ID(lu->login_response->length_login_ID);
- lu->logout_orb->login_ID_misc |= ORB_SET_NOTIFY(1);
-
- lu->logout_orb->reserved5 = 0x0;
- lu->logout_orb->status_fifo_hi =
- ORB_SET_STATUS_FIFO_HI(lu->status_fifo_addr, hi->host->node_id);
- lu->logout_orb->status_fifo_lo =
- ORB_SET_STATUS_FIFO_LO(lu->status_fifo_addr);
-
- sbp2util_cpu_to_be32_buffer(lu->logout_orb,
- sizeof(struct sbp2_logout_orb));
-
- data[0] = ORB_SET_NODE_ID(hi->host->node_id);
- data[1] = lu->logout_orb_dma;
- sbp2util_cpu_to_be32_buffer(data, 8);
-
- error = hpsb_node_write(lu->ne, lu->management_agent_addr, data, 8);
- if (error)
- return error;
-
- /* wait up to 1 second for the device to complete logout */
- if (sbp2util_access_timeout(lu, HZ))
- return -EIO;
-
- SBP2_INFO("Logged out of SBP-2 device");
- return 0;
-}
-
-static int sbp2_reconnect_device(struct sbp2_lu *lu)
-{
- struct sbp2_fwhost_info *hi = lu->hi;
- quadlet_t data[2];
- int error;
-
- lu->reconnect_orb->reserved1 = 0x0;
- lu->reconnect_orb->reserved2 = 0x0;
- lu->reconnect_orb->reserved3 = 0x0;
- lu->reconnect_orb->reserved4 = 0x0;
-
- lu->reconnect_orb->login_ID_misc =
- ORB_SET_FUNCTION(SBP2_RECONNECT_REQUEST);
- lu->reconnect_orb->login_ID_misc |=
- ORB_SET_LOGIN_ID(lu->login_response->length_login_ID);
- lu->reconnect_orb->login_ID_misc |= ORB_SET_NOTIFY(1);
-
- lu->reconnect_orb->reserved5 = 0x0;
- lu->reconnect_orb->status_fifo_hi =
- ORB_SET_STATUS_FIFO_HI(lu->status_fifo_addr, hi->host->node_id);
- lu->reconnect_orb->status_fifo_lo =
- ORB_SET_STATUS_FIFO_LO(lu->status_fifo_addr);
-
- sbp2util_cpu_to_be32_buffer(lu->reconnect_orb,
- sizeof(struct sbp2_reconnect_orb));
-
- data[0] = ORB_SET_NODE_ID(hi->host->node_id);
- data[1] = lu->reconnect_orb_dma;
- sbp2util_cpu_to_be32_buffer(data, 8);
-
- error = hpsb_node_write(lu->ne, lu->management_agent_addr, data, 8);
- if (error)
- return error;
-
- /* wait up to 1 second for reconnect status */
- if (sbp2util_access_timeout(lu, HZ)) {
- SBP2_ERR("Error reconnecting to SBP-2 device - timed out");
- return -EIO;
- }
-
- /* make sure that the returned status matches the reconnect ORB */
- if (lu->status_block.ORB_offset_lo != lu->reconnect_orb_dma) {
- SBP2_ERR("Error reconnecting to SBP-2 device - timed out");
- return -EIO;
- }
-
- if (STATUS_TEST_RDS(lu->status_block.ORB_offset_hi_misc)) {
- SBP2_ERR("Error reconnecting to SBP-2 device - failed");
- return -EIO;
- }
-
- SBP2_INFO("Reconnected to SBP-2 device");
- return 0;
-}
-
-/*
- * Set the target node's Single Phase Retry limit. Affects the target's retry
- * behaviour if our node is too busy to accept requests.
- */
-static int sbp2_set_busy_timeout(struct sbp2_lu *lu)
-{
- quadlet_t data;
-
- data = cpu_to_be32(SBP2_BUSY_TIMEOUT_VALUE);
- if (hpsb_node_write(lu->ne, SBP2_BUSY_TIMEOUT_ADDRESS, &data, 4))
- SBP2_ERR("%s error", __func__);
- return 0;
-}
-
-static void sbp2_parse_unit_directory(struct sbp2_lu *lu,
- struct unit_directory *ud)
-{
- struct csr1212_keyval *kv;
- struct csr1212_dentry *dentry;
- u64 management_agent_addr;
- u32 firmware_revision, model;
- unsigned workarounds;
- int i;
-
- management_agent_addr = 0;
- firmware_revision = SBP2_ROM_VALUE_MISSING;
- model = ud->flags & UNIT_DIRECTORY_MODEL_ID ?
- ud->model_id : SBP2_ROM_VALUE_MISSING;
-
- csr1212_for_each_dir_entry(ud->ne->csr, kv, ud->ud_kv, dentry) {
- switch (kv->key.id) {
- case CSR1212_KV_ID_DEPENDENT_INFO:
- if (kv->key.type == CSR1212_KV_TYPE_CSR_OFFSET)
- management_agent_addr =
- CSR1212_REGISTER_SPACE_BASE +
- (kv->value.csr_offset << 2);
-
- else if (kv->key.type == CSR1212_KV_TYPE_IMMEDIATE)
- lu->lun = ORB_SET_LUN(kv->value.immediate);
- break;
-
-
- case SBP2_FIRMWARE_REVISION_KEY:
- firmware_revision = kv->value.immediate;
- break;
-
- default:
- /* FIXME: Check for SBP2_UNIT_CHARACTERISTICS_KEY
- * mgt_ORB_timeout and ORB_size, SBP-2 clause 7.4.8. */
-
- /* FIXME: Check for SBP2_DEVICE_TYPE_AND_LUN_KEY.
- * Its "ordered" bit has consequences for command ORB
- * list handling. See SBP-2 clauses 4.6, 7.4.11, 10.2 */
- break;
- }
- }
-
- workarounds = sbp2_default_workarounds;
-
- if (!(workarounds & SBP2_WORKAROUND_OVERRIDE))
- for (i = 0; i < ARRAY_SIZE(sbp2_workarounds_table); i++) {
- if (sbp2_workarounds_table[i].firmware_revision !=
- SBP2_ROM_VALUE_WILDCARD &&
- sbp2_workarounds_table[i].firmware_revision !=
- (firmware_revision & 0xffff00))
- continue;
- if (sbp2_workarounds_table[i].model !=
- SBP2_ROM_VALUE_WILDCARD &&
- sbp2_workarounds_table[i].model != model)
- continue;
- workarounds |= sbp2_workarounds_table[i].workarounds;
- break;
- }
-
- if (workarounds)
- SBP2_INFO("Workarounds for node " NODE_BUS_FMT ": 0x%x "
- "(firmware_revision 0x%06x, vendor_id 0x%06x,"
- " model_id 0x%06x)",
- NODE_BUS_ARGS(ud->ne->host, ud->ne->nodeid),
- workarounds, firmware_revision, ud->vendor_id,
- model);
-
- /* We would need one SCSI host template for each target to adjust
- * max_sectors on the fly, therefore warn only. */
- if (workarounds & SBP2_WORKAROUND_128K_MAX_TRANS &&
- (sbp2_max_sectors * 512) > (128 * 1024))
- SBP2_INFO("Node " NODE_BUS_FMT ": Bridge only supports 128KB "
- "max transfer size. WARNING: Current max_sectors "
- "setting is larger than 128KB (%d sectors)",
- NODE_BUS_ARGS(ud->ne->host, ud->ne->nodeid),
- sbp2_max_sectors);
-
- /* If this is a logical unit directory entry, process the parent
- * to get the values. */
- if (ud->flags & UNIT_DIRECTORY_LUN_DIRECTORY) {
- struct unit_directory *parent_ud = container_of(
- ud->device.parent, struct unit_directory, device);
- sbp2_parse_unit_directory(lu, parent_ud);
- } else {
- lu->management_agent_addr = management_agent_addr;
- lu->workarounds = workarounds;
- if (ud->flags & UNIT_DIRECTORY_HAS_LUN)
- lu->lun = ORB_SET_LUN(ud->lun);
- }
-}
-
-#define SBP2_PAYLOAD_TO_BYTES(p) (1 << ((p) + 2))
-
-/*
- * This function is called in order to determine the max speed and packet
- * size we can use in our ORBs. Note, that we (the driver and host) only
- * initiate the transaction. The SBP-2 device actually transfers the data
- * (by reading from the DMA area we tell it). This means that the SBP-2
- * device decides the actual maximum data it can transfer. We just tell it
- * the speed that it needs to use, and the max_rec the host supports, and
- * it takes care of the rest.
- */
-static int sbp2_max_speed_and_size(struct sbp2_lu *lu)
-{
- struct sbp2_fwhost_info *hi = lu->hi;
- u8 payload;
-
- lu->speed_code = hi->host->speed[NODEID_TO_NODE(lu->ne->nodeid)];
-
- if (lu->speed_code > sbp2_max_speed) {
- lu->speed_code = sbp2_max_speed;
- SBP2_INFO("Reducing speed to %s",
- hpsb_speedto_str[sbp2_max_speed]);
- }
-
- /* Payload size is the lesser of what our speed supports and what
- * our host supports. */
- payload = min(sbp2_speedto_max_payload[lu->speed_code],
- (u8) (hi->host->csr.max_rec - 1));
-
- /* If physical DMA is off, work around limitation in ohci1394:
- * packet size must not exceed PAGE_SIZE */
- if (lu->ne->host->low_addr_space < (1ULL << 32))
- while (SBP2_PAYLOAD_TO_BYTES(payload) + 24 > PAGE_SIZE &&
- payload)
- payload--;
-
- SBP2_INFO("Node " NODE_BUS_FMT ": Max speed [%s] - Max payload [%u]",
- NODE_BUS_ARGS(hi->host, lu->ne->nodeid),
- hpsb_speedto_str[lu->speed_code],
- SBP2_PAYLOAD_TO_BYTES(payload));
-
- lu->max_payload_size = payload;
- return 0;
-}
-
-static int sbp2_agent_reset(struct sbp2_lu *lu, int wait)
-{
- quadlet_t data;
- u64 addr;
- int retval;
- unsigned long flags;
-
- /* flush lu->protocol_work */
- if (wait)
- flush_scheduled_work();
-
- data = ntohl(SBP2_AGENT_RESET_DATA);
- addr = lu->command_block_agent_addr + SBP2_AGENT_RESET_OFFSET;
-
- if (wait)
- retval = hpsb_node_write(lu->ne, addr, &data, 4);
- else
- retval = sbp2util_node_write_no_wait(lu->ne, addr, &data, 4);
-
- if (retval < 0) {
- SBP2_ERR("hpsb_node_write failed.\n");
- return -EIO;
- }
-
- /* make sure that the ORB_POINTER is written on next command */
- spin_lock_irqsave(&lu->cmd_orb_lock, flags);
- lu->last_orb = NULL;
- spin_unlock_irqrestore(&lu->cmd_orb_lock, flags);
-
- return 0;
-}
-
-static int sbp2_prep_command_orb_sg(struct sbp2_command_orb *orb,
- struct sbp2_fwhost_info *hi,
- struct sbp2_command_info *cmd,
- unsigned int sg_count,
- struct scatterlist *sg,
- u32 orb_direction,
- enum dma_data_direction dma_dir)
-{
- struct device *dmadev = hi->host->device.parent;
- struct sbp2_unrestricted_page_table *pt;
- int i, n;
-
- n = dma_map_sg(dmadev, sg, sg_count, dma_dir);
- if (n == 0)
- return -ENOMEM;
-
- orb->data_descriptor_hi = ORB_SET_NODE_ID(hi->host->node_id);
- orb->misc |= ORB_SET_DIRECTION(orb_direction);
-
- /* special case if only one element (and less than 64KB in size) */
- if (n == 1) {
- orb->misc |= ORB_SET_DATA_SIZE(sg_dma_len(sg));
- orb->data_descriptor_lo = sg_dma_address(sg);
- } else {
- pt = &cmd->scatter_gather_element[0];
-
- dma_sync_single_for_cpu(dmadev, cmd->sge_dma,
- sizeof(cmd->scatter_gather_element),
- DMA_TO_DEVICE);
-
- for_each_sg(sg, sg, n, i) {
- pt[i].high = cpu_to_be32(sg_dma_len(sg) << 16);
- pt[i].low = cpu_to_be32(sg_dma_address(sg));
- }
-
- orb->misc |= ORB_SET_PAGE_TABLE_PRESENT(0x1) |
- ORB_SET_DATA_SIZE(n);
- orb->data_descriptor_lo = cmd->sge_dma;
-
- dma_sync_single_for_device(dmadev, cmd->sge_dma,
- sizeof(cmd->scatter_gather_element),
- DMA_TO_DEVICE);
- }
- return 0;
-}
-
-static int sbp2_create_command_orb(struct sbp2_lu *lu,
- struct sbp2_command_info *cmd,
- struct scsi_cmnd *SCpnt)
-{
- struct device *dmadev = lu->hi->host->device.parent;
- struct sbp2_command_orb *orb = &cmd->command_orb;
- unsigned int scsi_request_bufflen = scsi_bufflen(SCpnt);
- enum dma_data_direction dma_dir = SCpnt->sc_data_direction;
- u32 orb_direction;
- int ret;
-
- dma_sync_single_for_cpu(dmadev, cmd->command_orb_dma,
- sizeof(struct sbp2_command_orb), DMA_TO_DEVICE);
- /*
- * Set-up our command ORB.
- *
- * NOTE: We're doing unrestricted page tables (s/g), as this is
- * best performance (at least with the devices I have). This means
- * that data_size becomes the number of s/g elements, and
- * page_size should be zero (for unrestricted).
- */
- orb->next_ORB_hi = ORB_SET_NULL_PTR(1);
- orb->next_ORB_lo = 0x0;
- orb->misc = ORB_SET_MAX_PAYLOAD(lu->max_payload_size);
- orb->misc |= ORB_SET_SPEED(lu->speed_code);
- orb->misc |= ORB_SET_NOTIFY(1);
-
- if (dma_dir == DMA_NONE)
- orb_direction = ORB_DIRECTION_NO_DATA_TRANSFER;
- else if (dma_dir == DMA_TO_DEVICE && scsi_request_bufflen)
- orb_direction = ORB_DIRECTION_WRITE_TO_MEDIA;
- else if (dma_dir == DMA_FROM_DEVICE && scsi_request_bufflen)
- orb_direction = ORB_DIRECTION_READ_FROM_MEDIA;
- else {
- SBP2_INFO("Falling back to DMA_NONE");
- orb_direction = ORB_DIRECTION_NO_DATA_TRANSFER;
- }
-
- /* set up our page table stuff */
- if (orb_direction == ORB_DIRECTION_NO_DATA_TRANSFER) {
- orb->data_descriptor_hi = 0x0;
- orb->data_descriptor_lo = 0x0;
- orb->misc |= ORB_SET_DIRECTION(1);
- ret = 0;
- } else {
- ret = sbp2_prep_command_orb_sg(orb, lu->hi, cmd,
- scsi_sg_count(SCpnt),
- scsi_sglist(SCpnt),
- orb_direction, dma_dir);
- }
- sbp2util_cpu_to_be32_buffer(orb, sizeof(*orb));
-
- memset(orb->cdb, 0, sizeof(orb->cdb));
- memcpy(orb->cdb, SCpnt->cmnd, SCpnt->cmd_len);
-
- dma_sync_single_for_device(dmadev, cmd->command_orb_dma,
- sizeof(struct sbp2_command_orb), DMA_TO_DEVICE);
- return ret;
-}
-
-static void sbp2_link_orb_command(struct sbp2_lu *lu,
- struct sbp2_command_info *cmd)
-{
- struct sbp2_fwhost_info *hi = lu->hi;
- struct sbp2_command_orb *last_orb;
- dma_addr_t last_orb_dma;
- u64 addr = lu->command_block_agent_addr;
- quadlet_t data[2];
- size_t length;
- unsigned long flags;
-
- /* check to see if there are any previous orbs to use */
- spin_lock_irqsave(&lu->cmd_orb_lock, flags);
- last_orb = lu->last_orb;
- last_orb_dma = lu->last_orb_dma;
- if (!last_orb) {
- /*
- * last_orb == NULL means: We know that the target's fetch agent
- * is not active right now.
- */
- addr += SBP2_ORB_POINTER_OFFSET;
- data[0] = ORB_SET_NODE_ID(hi->host->node_id);
- data[1] = cmd->command_orb_dma;
- sbp2util_cpu_to_be32_buffer(data, 8);
- length = 8;
- } else {
- /*
- * last_orb != NULL means: We know that the target's fetch agent
- * is (very probably) not dead or in reset state right now.
- * We have an ORB already sent that we can append a new one to.
- * The target's fetch agent may or may not have read this
- * previous ORB yet.
- */
- dma_sync_single_for_cpu(hi->host->device.parent, last_orb_dma,
- sizeof(struct sbp2_command_orb),
- DMA_TO_DEVICE);
- last_orb->next_ORB_lo = cpu_to_be32(cmd->command_orb_dma);
- wmb();
- /* Tells hardware that this pointer is valid */
- last_orb->next_ORB_hi = 0;
- dma_sync_single_for_device(hi->host->device.parent,
- last_orb_dma,
- sizeof(struct sbp2_command_orb),
- DMA_TO_DEVICE);
- addr += SBP2_DOORBELL_OFFSET;
- data[0] = 0;
- length = 4;
- }
- lu->last_orb = &cmd->command_orb;
- lu->last_orb_dma = cmd->command_orb_dma;
- spin_unlock_irqrestore(&lu->cmd_orb_lock, flags);
-
- if (sbp2util_node_write_no_wait(lu->ne, addr, data, length)) {
- /*
- * sbp2util_node_write_no_wait failed. We certainly ran out
- * of transaction labels, perhaps just because there were no
- * context switches which gave khpsbpkt a chance to collect
- * free tlabels. Try again in non-atomic context. If necessary,
- * the workqueue job will sleep to guaranteedly get a tlabel.
- * We do not accept new commands until the job is over.
- */
- scsi_block_requests(lu->shost);
- PREPARE_WORK(&lu->protocol_work,
- last_orb ? sbp2util_write_doorbell:
- sbp2util_write_orb_pointer);
- schedule_work(&lu->protocol_work);
- }
-}
-
-static int sbp2_send_command(struct sbp2_lu *lu, struct scsi_cmnd *SCpnt,
- void (*done)(struct scsi_cmnd *))
-{
- struct sbp2_command_info *cmd;
-
- cmd = sbp2util_allocate_command_orb(lu, SCpnt, done);
- if (!cmd)
- return -EIO;
-
- if (sbp2_create_command_orb(lu, cmd, SCpnt))
- return -ENOMEM;
-
- sbp2_link_orb_command(lu, cmd);
- return 0;
-}
-
-/*
- * Translates SBP-2 status into SCSI sense data for check conditions
- */
-static unsigned int sbp2_status_to_sense_data(unchar *sbp2_status,
- unchar *sense_data)
-{
- /* OK, it's pretty ugly... ;-) */
- sense_data[0] = 0x70;
- sense_data[1] = 0x0;
- sense_data[2] = sbp2_status[9];
- sense_data[3] = sbp2_status[12];
- sense_data[4] = sbp2_status[13];
- sense_data[5] = sbp2_status[14];
- sense_data[6] = sbp2_status[15];
- sense_data[7] = 10;
- sense_data[8] = sbp2_status[16];
- sense_data[9] = sbp2_status[17];
- sense_data[10] = sbp2_status[18];
- sense_data[11] = sbp2_status[19];
- sense_data[12] = sbp2_status[10];
- sense_data[13] = sbp2_status[11];
- sense_data[14] = sbp2_status[20];
- sense_data[15] = sbp2_status[21];
-
- return sbp2_status[8] & 0x3f;
-}
-
-static int sbp2_handle_status_write(struct hpsb_host *host, int nodeid,
- int destid, quadlet_t *data, u64 addr,
- size_t length, u16 fl)
-{
- struct sbp2_fwhost_info *hi;
- struct sbp2_lu *lu = NULL, *lu_tmp;
- struct scsi_cmnd *SCpnt = NULL;
- struct sbp2_status_block *sb;
- u32 scsi_status = SBP2_SCSI_STATUS_GOOD;
- struct sbp2_command_info *cmd;
- unsigned long flags;
-
- if (unlikely(length < 8 || length > sizeof(struct sbp2_status_block))) {
- SBP2_ERR("Wrong size of status block");
- return RCODE_ADDRESS_ERROR;
- }
- if (unlikely(!host)) {
- SBP2_ERR("host is NULL - this is bad!");
- return RCODE_ADDRESS_ERROR;
- }
- hi = hpsb_get_hostinfo(&sbp2_highlevel, host);
- if (unlikely(!hi)) {
- SBP2_ERR("host info is NULL - this is bad!");
- return RCODE_ADDRESS_ERROR;
- }
-
- /* Find the unit which wrote the status. */
- read_lock_irqsave(&sbp2_hi_logical_units_lock, flags);
- list_for_each_entry(lu_tmp, &hi->logical_units, lu_list) {
- if (lu_tmp->ne->nodeid == nodeid &&
- lu_tmp->status_fifo_addr == addr) {
- lu = lu_tmp;
- break;
- }
- }
- read_unlock_irqrestore(&sbp2_hi_logical_units_lock, flags);
-
- if (unlikely(!lu)) {
- SBP2_ERR("lu is NULL - device is gone?");
- return RCODE_ADDRESS_ERROR;
- }
-
- /* Put response into lu status fifo buffer. The first two bytes
- * come in big endian bit order. Often the target writes only a
- * truncated status block, minimally the first two quadlets. The rest
- * is implied to be zeros. */
- sb = &lu->status_block;
- memset(sb->command_set_dependent, 0, sizeof(sb->command_set_dependent));
- memcpy(sb, data, length);
- sbp2util_be32_to_cpu_buffer(sb, 8);
-
- /* Ignore unsolicited status. Handle command ORB status. */
- if (unlikely(STATUS_GET_SRC(sb->ORB_offset_hi_misc) == 2))
- cmd = NULL;
- else
- cmd = sbp2util_find_command_for_orb(lu, sb->ORB_offset_lo);
- if (cmd) {
- /* Grab SCSI command pointers and check status. */
- /*
- * FIXME: If the src field in the status is 1, the ORB DMA must
- * not be reused until status for a subsequent ORB is received.
- */
- SCpnt = cmd->Current_SCpnt;
- spin_lock_irqsave(&lu->cmd_orb_lock, flags);
- sbp2util_mark_command_completed(lu, cmd);
- spin_unlock_irqrestore(&lu->cmd_orb_lock, flags);
-
- if (SCpnt) {
- u32 h = sb->ORB_offset_hi_misc;
- u32 r = STATUS_GET_RESP(h);
-
- if (r != RESP_STATUS_REQUEST_COMPLETE) {
- SBP2_INFO("resp 0x%x, sbp_status 0x%x",
- r, STATUS_GET_SBP_STATUS(h));
- scsi_status =
- r == RESP_STATUS_TRANSPORT_FAILURE ?
- SBP2_SCSI_STATUS_BUSY :
- SBP2_SCSI_STATUS_COMMAND_TERMINATED;
- }
-
- if (STATUS_GET_LEN(h) > 1)
- scsi_status = sbp2_status_to_sense_data(
- (unchar *)sb, SCpnt->sense_buffer);
-
- if (STATUS_TEST_DEAD(h))
- sbp2_agent_reset(lu, 0);
- }
-
- /* Check here to see if there are no commands in-use. If there
- * are none, we know that the fetch agent left the active state
- * _and_ that we did not reactivate it yet. Therefore clear
- * last_orb so that next time we write directly to the
- * ORB_POINTER register. That way the fetch agent does not need
- * to refetch the next_ORB. */
- spin_lock_irqsave(&lu->cmd_orb_lock, flags);
- if (list_empty(&lu->cmd_orb_inuse))
- lu->last_orb = NULL;
- spin_unlock_irqrestore(&lu->cmd_orb_lock, flags);
-
- } else {
- /* It's probably status after a management request. */
- if ((sb->ORB_offset_lo == lu->reconnect_orb_dma) ||
- (sb->ORB_offset_lo == lu->login_orb_dma) ||
- (sb->ORB_offset_lo == lu->query_logins_orb_dma) ||
- (sb->ORB_offset_lo == lu->logout_orb_dma)) {
- lu->access_complete = 1;
- wake_up_interruptible(&sbp2_access_wq);
- }
- }
-
- if (SCpnt)
- sbp2scsi_complete_command(lu, scsi_status, SCpnt,
- cmd->Current_done);
- return RCODE_COMPLETE;
-}
-
-/**************************************
- * SCSI interface related section
- **************************************/
-
-static int sbp2scsi_queuecommand(struct scsi_cmnd *SCpnt,
- void (*done)(struct scsi_cmnd *))
-{
- struct sbp2_lu *lu = (struct sbp2_lu *)SCpnt->device->host->hostdata[0];
- struct sbp2_fwhost_info *hi;
- int result = DID_NO_CONNECT << 16;
-
- if (unlikely(!sbp2util_node_is_available(lu)))
- goto done;
-
- hi = lu->hi;
-
- if (unlikely(!hi)) {
- SBP2_ERR("sbp2_fwhost_info is NULL - this is bad!");
- goto done;
- }
-
- /* Multiple units are currently represented to the SCSI core as separate
- * targets, not as one target with multiple LUs. Therefore return
- * selection time-out to any IO directed at non-zero LUNs. */
- if (unlikely(SCpnt->device->lun))
- goto done;
-
- if (unlikely(!hpsb_node_entry_valid(lu->ne))) {
- SBP2_ERR("Bus reset in progress - rejecting command");
- result = DID_BUS_BUSY << 16;
- goto done;
- }
-
- /* Bidirectional commands are not yet implemented,
- * and unknown transfer direction not handled. */
- if (unlikely(SCpnt->sc_data_direction == DMA_BIDIRECTIONAL)) {
- SBP2_ERR("Cannot handle DMA_BIDIRECTIONAL - rejecting command");
- result = DID_ERROR << 16;
- goto done;
- }
-
- if (sbp2_send_command(lu, SCpnt, done)) {
- SBP2_ERR("Error sending SCSI command");
- sbp2scsi_complete_command(lu,
- SBP2_SCSI_STATUS_SELECTION_TIMEOUT,
- SCpnt, done);
- }
- return 0;
-
-done:
- SCpnt->result = result;
- done(SCpnt);
- return 0;
-}
-
-static void sbp2scsi_complete_all_commands(struct sbp2_lu *lu, u32 status)
-{
- struct list_head *lh;
- struct sbp2_command_info *cmd;
- unsigned long flags;
-
- spin_lock_irqsave(&lu->cmd_orb_lock, flags);
- while (!list_empty(&lu->cmd_orb_inuse)) {
- lh = lu->cmd_orb_inuse.next;
- cmd = list_entry(lh, struct sbp2_command_info, list);
- sbp2util_mark_command_completed(lu, cmd);
- if (cmd->Current_SCpnt) {
- cmd->Current_SCpnt->result = status << 16;
- cmd->Current_done(cmd->Current_SCpnt);
- }
- }
- spin_unlock_irqrestore(&lu->cmd_orb_lock, flags);
-
- return;
-}
-
-/*
- * Complete a regular SCSI command. Can be called in atomic context.
- */
-static void sbp2scsi_complete_command(struct sbp2_lu *lu, u32 scsi_status,
- struct scsi_cmnd *SCpnt,
- void (*done)(struct scsi_cmnd *))
-{
- if (!SCpnt) {
- SBP2_ERR("SCpnt is NULL");
- return;
- }
-
- switch (scsi_status) {
- case SBP2_SCSI_STATUS_GOOD:
- SCpnt->result = DID_OK << 16;
- break;
-
- case SBP2_SCSI_STATUS_BUSY:
- SBP2_ERR("SBP2_SCSI_STATUS_BUSY");
- SCpnt->result = DID_BUS_BUSY << 16;
- break;
-
- case SBP2_SCSI_STATUS_CHECK_CONDITION:
- SCpnt->result = CHECK_CONDITION << 1 | DID_OK << 16;
- break;
-
- case SBP2_SCSI_STATUS_SELECTION_TIMEOUT:
- SBP2_ERR("SBP2_SCSI_STATUS_SELECTION_TIMEOUT");
- SCpnt->result = DID_NO_CONNECT << 16;
- scsi_print_command(SCpnt);
- break;
-
- case SBP2_SCSI_STATUS_CONDITION_MET:
- case SBP2_SCSI_STATUS_RESERVATION_CONFLICT:
- case SBP2_SCSI_STATUS_COMMAND_TERMINATED:
- SBP2_ERR("Bad SCSI status = %x", scsi_status);
- SCpnt->result = DID_ERROR << 16;
- scsi_print_command(SCpnt);
- break;
-
- default:
- SBP2_ERR("Unsupported SCSI status = %x", scsi_status);
- SCpnt->result = DID_ERROR << 16;
- }
-
- /* If a bus reset is in progress and there was an error, complete
- * the command as busy so that it will get retried. */
- if (!hpsb_node_entry_valid(lu->ne)
- && (scsi_status != SBP2_SCSI_STATUS_GOOD)) {
- SBP2_ERR("Completing command with busy (bus reset)");
- SCpnt->result = DID_BUS_BUSY << 16;
- }
-
- /* Tell the SCSI stack that we're done with this command. */
- done(SCpnt);
-}
-
-static int sbp2scsi_slave_alloc(struct scsi_device *sdev)
-{
- struct sbp2_lu *lu = (struct sbp2_lu *)sdev->host->hostdata[0];
-
- if (sdev->lun != 0 || sdev->id != lu->ud->id || sdev->channel != 0)
- return -ENODEV;
-
- lu->sdev = sdev;
- sdev->allow_restart = 1;
-
- /* SBP-2 requires quadlet alignment of the data buffers. */
- blk_queue_update_dma_alignment(sdev->request_queue, 4 - 1);
-
- if (lu->workarounds & SBP2_WORKAROUND_INQUIRY_36)
- sdev->inquiry_len = 36;
- return 0;
-}
-
-static int sbp2scsi_slave_configure(struct scsi_device *sdev)
-{
- struct sbp2_lu *lu = (struct sbp2_lu *)sdev->host->hostdata[0];
-
- sdev->use_10_for_rw = 1;
-
- if (sbp2_exclusive_login)
- sdev->manage_start_stop = 1;
- if (sdev->type == TYPE_ROM)
- sdev->use_10_for_ms = 1;
- if (sdev->type == TYPE_DISK &&
- lu->workarounds & SBP2_WORKAROUND_MODE_SENSE_8)
- sdev->skip_ms_page_8 = 1;
- if (lu->workarounds & SBP2_WORKAROUND_FIX_CAPACITY)
- sdev->fix_capacity = 1;
- if (lu->workarounds & SBP2_WORKAROUND_POWER_CONDITION)
- sdev->start_stop_pwr_cond = 1;
- if (lu->workarounds & SBP2_WORKAROUND_128K_MAX_TRANS)
- blk_queue_max_hw_sectors(sdev->request_queue, 128 * 1024 / 512);
-
- blk_queue_max_segment_size(sdev->request_queue, SBP2_MAX_SEG_SIZE);
- return 0;
-}
-
-static void sbp2scsi_slave_destroy(struct scsi_device *sdev)
-{
- ((struct sbp2_lu *)sdev->host->hostdata[0])->sdev = NULL;
- return;
-}
-
-/*
- * Called by scsi stack when something has really gone wrong.
- * Usually called when a command has timed-out for some reason.
- */
-static int sbp2scsi_abort(struct scsi_cmnd *SCpnt)
-{
- struct sbp2_lu *lu = (struct sbp2_lu *)SCpnt->device->host->hostdata[0];
- struct sbp2_command_info *cmd;
- unsigned long flags;
-
- SBP2_INFO("aborting sbp2 command");
- scsi_print_command(SCpnt);
-
- if (sbp2util_node_is_available(lu)) {
- sbp2_agent_reset(lu, 1);
-
- /* Return a matching command structure to the free pool. */
- spin_lock_irqsave(&lu->cmd_orb_lock, flags);
- cmd = sbp2util_find_command_for_SCpnt(lu, SCpnt);
- if (cmd) {
- sbp2util_mark_command_completed(lu, cmd);
- if (cmd->Current_SCpnt) {
- cmd->Current_SCpnt->result = DID_ABORT << 16;
- cmd->Current_done(cmd->Current_SCpnt);
- }
- }
- spin_unlock_irqrestore(&lu->cmd_orb_lock, flags);
-
- sbp2scsi_complete_all_commands(lu, DID_BUS_BUSY);
- }
-
- return SUCCESS;
-}
-
-/*
- * Called by scsi stack when something has really gone wrong.
- */
-static int sbp2scsi_reset(struct scsi_cmnd *SCpnt)
-{
- struct sbp2_lu *lu = (struct sbp2_lu *)SCpnt->device->host->hostdata[0];
-
- SBP2_INFO("reset requested");
-
- if (sbp2util_node_is_available(lu)) {
- SBP2_INFO("generating sbp2 fetch agent reset");
- sbp2_agent_reset(lu, 1);
- }
-
- return SUCCESS;
-}
-
-static ssize_t sbp2_sysfs_ieee1394_id_show(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct scsi_device *sdev;
- struct sbp2_lu *lu;
-
- if (!(sdev = to_scsi_device(dev)))
- return 0;
-
- if (!(lu = (struct sbp2_lu *)sdev->host->hostdata[0]))
- return 0;
-
- if (sbp2_long_sysfs_ieee1394_id)
- return sprintf(buf, "%016Lx:%06x:%04x\n",
- (unsigned long long)lu->ne->guid,
- lu->ud->directory_id, ORB_SET_LUN(lu->lun));
- else
- return sprintf(buf, "%016Lx:%d:%d\n",
- (unsigned long long)lu->ne->guid,
- lu->ud->id, ORB_SET_LUN(lu->lun));
-}
-
-MODULE_AUTHOR("Ben Collins <bcollins@debian.org>");
-MODULE_DESCRIPTION("IEEE-1394 SBP-2 protocol driver");
-MODULE_SUPPORTED_DEVICE(SBP2_DEVICE_NAME);
-MODULE_LICENSE("GPL");
-
-static int sbp2_module_init(void)
-{
- int ret;
-
- if (sbp2_serialize_io) {
- sbp2_shost_template.can_queue = 1;
- sbp2_shost_template.cmd_per_lun = 1;
- }
-
- sbp2_shost_template.max_sectors = sbp2_max_sectors;
-
- hpsb_register_highlevel(&sbp2_highlevel);
- ret = hpsb_register_protocol(&sbp2_driver);
- if (ret) {
- SBP2_ERR("Failed to register protocol");
- hpsb_unregister_highlevel(&sbp2_highlevel);
- return ret;
- }
- return 0;
-}
-
-static void __exit sbp2_module_exit(void)
-{
- hpsb_unregister_protocol(&sbp2_driver);
- hpsb_unregister_highlevel(&sbp2_highlevel);
-}
-
-module_init(sbp2_module_init);
-module_exit(sbp2_module_exit);
diff --git a/drivers/ieee1394/sbp2.h b/drivers/ieee1394/sbp2.h
deleted file mode 100644
index 64a3a66a8a39..000000000000
--- a/drivers/ieee1394/sbp2.h
+++ /dev/null
@@ -1,346 +0,0 @@
-/*
- * sbp2.h - Defines and prototypes for sbp2.c
- *
- * Copyright (C) 2000 James Goodwin, Filanet Corporation (www.filanet.com)
- * jamesg@filanet.com
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software Foundation,
- * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
- */
-
-#ifndef SBP2_H
-#define SBP2_H
-
-#define SBP2_DEVICE_NAME "sbp2"
-
-/*
- * There is no transport protocol limit to the CDB length, but we implement
- * a fixed length only. 16 bytes is enough for disks larger than 2 TB.
- */
-#define SBP2_MAX_CDB_SIZE 16
-
-/*
- * SBP-2 specific definitions
- */
-
-#define ORB_DIRECTION_WRITE_TO_MEDIA 0x0
-#define ORB_DIRECTION_READ_FROM_MEDIA 0x1
-#define ORB_DIRECTION_NO_DATA_TRANSFER 0x2
-
-#define ORB_SET_NULL_PTR(v) (((v) & 0x1) << 31)
-#define ORB_SET_NOTIFY(v) (((v) & 0x1) << 31)
-#define ORB_SET_RQ_FMT(v) (((v) & 0x3) << 29)
-#define ORB_SET_NODE_ID(v) (((v) & 0xffff) << 16)
-#define ORB_SET_STATUS_FIFO_HI(v, id) ((v) >> 32 | ORB_SET_NODE_ID(id))
-#define ORB_SET_STATUS_FIFO_LO(v) ((v) & 0xffffffff)
-#define ORB_SET_DATA_SIZE(v) ((v) & 0xffff)
-#define ORB_SET_PAGE_SIZE(v) (((v) & 0x7) << 16)
-#define ORB_SET_PAGE_TABLE_PRESENT(v) (((v) & 0x1) << 19)
-#define ORB_SET_MAX_PAYLOAD(v) (((v) & 0xf) << 20)
-#define ORB_SET_SPEED(v) (((v) & 0x7) << 24)
-#define ORB_SET_DIRECTION(v) (((v) & 0x1) << 27)
-
-struct sbp2_command_orb {
- u32 next_ORB_hi;
- u32 next_ORB_lo;
- u32 data_descriptor_hi;
- u32 data_descriptor_lo;
- u32 misc;
- u8 cdb[SBP2_MAX_CDB_SIZE];
-} __attribute__((packed));
-
-#define SBP2_LOGIN_REQUEST 0x0
-#define SBP2_QUERY_LOGINS_REQUEST 0x1
-#define SBP2_RECONNECT_REQUEST 0x3
-#define SBP2_SET_PASSWORD_REQUEST 0x4
-#define SBP2_LOGOUT_REQUEST 0x7
-#define SBP2_ABORT_TASK_REQUEST 0xb
-#define SBP2_ABORT_TASK_SET 0xc
-#define SBP2_LOGICAL_UNIT_RESET 0xe
-#define SBP2_TARGET_RESET_REQUEST 0xf
-
-#define ORB_SET_LUN(v) ((v) & 0xffff)
-#define ORB_SET_FUNCTION(v) (((v) & 0xf) << 16)
-#define ORB_SET_RECONNECT(v) (((v) & 0xf) << 20)
-#define ORB_SET_EXCLUSIVE(v) ((v) ? 1 << 28 : 0)
-#define ORB_SET_LOGIN_RESP_LENGTH(v) ((v) & 0xffff)
-#define ORB_SET_PASSWD_LENGTH(v) (((v) & 0xffff) << 16)
-
-struct sbp2_login_orb {
- u32 password_hi;
- u32 password_lo;
- u32 login_response_hi;
- u32 login_response_lo;
- u32 lun_misc;
- u32 passwd_resp_lengths;
- u32 status_fifo_hi;
- u32 status_fifo_lo;
-} __attribute__((packed));
-
-#define RESPONSE_GET_LOGIN_ID(v) ((v) & 0xffff)
-#define RESPONSE_GET_LENGTH(v) (((v) >> 16) & 0xffff)
-#define RESPONSE_GET_RECONNECT_HOLD(v) ((v) & 0xffff)
-
-struct sbp2_login_response {
- u32 length_login_ID;
- u32 command_block_agent_hi;
- u32 command_block_agent_lo;
- u32 reconnect_hold;
-} __attribute__((packed));
-
-#define ORB_SET_LOGIN_ID(v) ((v) & 0xffff)
-#define ORB_SET_QUERY_LOGINS_RESP_LENGTH(v) ((v) & 0xffff)
-
-struct sbp2_query_logins_orb {
- u32 reserved1;
- u32 reserved2;
- u32 query_response_hi;
- u32 query_response_lo;
- u32 lun_misc;
- u32 reserved_resp_length;
- u32 status_fifo_hi;
- u32 status_fifo_lo;
-} __attribute__((packed));
-
-#define RESPONSE_GET_MAX_LOGINS(v) ((v) & 0xffff)
-#define RESPONSE_GET_ACTIVE_LOGINS(v) ((RESPONSE_GET_LENGTH((v)) - 4) / 12)
-
-struct sbp2_query_logins_response {
- u32 length_max_logins;
- u32 misc_IDs;
- u32 initiator_misc_hi;
- u32 initiator_misc_lo;
-} __attribute__((packed));
-
-struct sbp2_reconnect_orb {
- u32 reserved1;
- u32 reserved2;
- u32 reserved3;
- u32 reserved4;
- u32 login_ID_misc;
- u32 reserved5;
- u32 status_fifo_hi;
- u32 status_fifo_lo;
-} __attribute__((packed));
-
-struct sbp2_logout_orb {
- u32 reserved1;
- u32 reserved2;
- u32 reserved3;
- u32 reserved4;
- u32 login_ID_misc;
- u32 reserved5;
- u32 status_fifo_hi;
- u32 status_fifo_lo;
-} __attribute__((packed));
-
-struct sbp2_unrestricted_page_table {
- __be32 high;
- __be32 low;
-};
-
-#define RESP_STATUS_REQUEST_COMPLETE 0x0
-#define RESP_STATUS_TRANSPORT_FAILURE 0x1
-#define RESP_STATUS_ILLEGAL_REQUEST 0x2
-#define RESP_STATUS_VENDOR_DEPENDENT 0x3
-
-#define SBP2_STATUS_NO_ADDITIONAL_INFO 0x0
-#define SBP2_STATUS_REQ_TYPE_NOT_SUPPORTED 0x1
-#define SBP2_STATUS_SPEED_NOT_SUPPORTED 0x2
-#define SBP2_STATUS_PAGE_SIZE_NOT_SUPPORTED 0x3
-#define SBP2_STATUS_ACCESS_DENIED 0x4
-#define SBP2_STATUS_LU_NOT_SUPPORTED 0x5
-#define SBP2_STATUS_MAX_PAYLOAD_TOO_SMALL 0x6
-#define SBP2_STATUS_RESOURCES_UNAVAILABLE 0x8
-#define SBP2_STATUS_FUNCTION_REJECTED 0x9
-#define SBP2_STATUS_LOGIN_ID_NOT_RECOGNIZED 0xa
-#define SBP2_STATUS_DUMMY_ORB_COMPLETED 0xb
-#define SBP2_STATUS_REQUEST_ABORTED 0xc
-#define SBP2_STATUS_UNSPECIFIED_ERROR 0xff
-
-#define SFMT_CURRENT_ERROR 0x0
-#define SFMT_DEFERRED_ERROR 0x1
-#define SFMT_VENDOR_DEPENDENT_STATUS 0x3
-
-#define STATUS_GET_SRC(v) (((v) >> 30) & 0x3)
-#define STATUS_GET_RESP(v) (((v) >> 28) & 0x3)
-#define STATUS_GET_LEN(v) (((v) >> 24) & 0x7)
-#define STATUS_GET_SBP_STATUS(v) (((v) >> 16) & 0xff)
-#define STATUS_GET_ORB_OFFSET_HI(v) ((v) & 0x0000ffff)
-#define STATUS_TEST_DEAD(v) ((v) & 0x08000000)
-/* test 'resp' | 'dead' | 'sbp2_status' */
-#define STATUS_TEST_RDS(v) ((v) & 0x38ff0000)
-
-struct sbp2_status_block {
- u32 ORB_offset_hi_misc;
- u32 ORB_offset_lo;
- u8 command_set_dependent[24];
-} __attribute__((packed));
-
-
-/*
- * SBP2 related configuration ROM definitions
- */
-
-#define SBP2_UNIT_DIRECTORY_OFFSET_KEY 0xd1
-#define SBP2_CSR_OFFSET_KEY 0x54
-#define SBP2_UNIT_SPEC_ID_KEY 0x12
-#define SBP2_UNIT_SW_VERSION_KEY 0x13
-#define SBP2_COMMAND_SET_SPEC_ID_KEY 0x38
-#define SBP2_COMMAND_SET_KEY 0x39
-#define SBP2_UNIT_CHARACTERISTICS_KEY 0x3a
-#define SBP2_DEVICE_TYPE_AND_LUN_KEY 0x14
-#define SBP2_FIRMWARE_REVISION_KEY 0x3c
-
-#define SBP2_AGENT_STATE_OFFSET 0x00ULL
-#define SBP2_AGENT_RESET_OFFSET 0x04ULL
-#define SBP2_ORB_POINTER_OFFSET 0x08ULL
-#define SBP2_DOORBELL_OFFSET 0x10ULL
-#define SBP2_UNSOLICITED_STATUS_ENABLE_OFFSET 0x14ULL
-#define SBP2_UNSOLICITED_STATUS_VALUE 0xf
-
-#define SBP2_BUSY_TIMEOUT_ADDRESS 0xfffff0000210ULL
-/* biggest possible value for Single Phase Retry count is 0xf */
-#define SBP2_BUSY_TIMEOUT_VALUE 0xf
-
-#define SBP2_AGENT_RESET_DATA 0xf
-
-#define SBP2_UNIT_SPEC_ID_ENTRY 0x0000609e
-#define SBP2_SW_VERSION_ENTRY 0x00010483
-
-/*
- * The default maximum s/g segment size of a FireWire controller is
- * usually 0x10000, but SBP-2 only allows 0xffff. Since buffers have to
- * be quadlet-aligned, we set the length limit to 0xffff & ~3.
- */
-#define SBP2_MAX_SEG_SIZE 0xfffc
-
-/*
- * There is no real limitation of the queue depth (i.e. length of the linked
- * list of command ORBs) at the target. The chosen depth is merely an
- * implementation detail of the sbp2 driver.
- */
-#define SBP2_MAX_CMDS 8
-
-#define SBP2_SCSI_STATUS_GOOD 0x0
-#define SBP2_SCSI_STATUS_CHECK_CONDITION 0x2
-#define SBP2_SCSI_STATUS_CONDITION_MET 0x4
-#define SBP2_SCSI_STATUS_BUSY 0x8
-#define SBP2_SCSI_STATUS_RESERVATION_CONFLICT 0x18
-#define SBP2_SCSI_STATUS_COMMAND_TERMINATED 0x22
-#define SBP2_SCSI_STATUS_SELECTION_TIMEOUT 0xff
-
-
-/*
- * Representations of commands and devices
- */
-
-/* Per SCSI command */
-struct sbp2_command_info {
- struct list_head list;
- struct sbp2_command_orb command_orb;
- dma_addr_t command_orb_dma;
- struct scsi_cmnd *Current_SCpnt;
- void (*Current_done)(struct scsi_cmnd *);
-
- /* Also need s/g structure for each sbp2 command */
- struct sbp2_unrestricted_page_table
- scatter_gather_element[SG_ALL] __attribute__((aligned(8)));
- dma_addr_t sge_dma;
-};
-
-/* Per FireWire host */
-struct sbp2_fwhost_info {
- struct hpsb_host *host;
- struct list_head logical_units;
-};
-
-/* Per logical unit */
-struct sbp2_lu {
- /* Operation request blocks */
- struct sbp2_command_orb *last_orb;
- dma_addr_t last_orb_dma;
- struct sbp2_login_orb *login_orb;
- dma_addr_t login_orb_dma;
- struct sbp2_login_response *login_response;
- dma_addr_t login_response_dma;
- struct sbp2_query_logins_orb *query_logins_orb;
- dma_addr_t query_logins_orb_dma;
- struct sbp2_query_logins_response *query_logins_response;
- dma_addr_t query_logins_response_dma;
- struct sbp2_reconnect_orb *reconnect_orb;
- dma_addr_t reconnect_orb_dma;
- struct sbp2_logout_orb *logout_orb;
- dma_addr_t logout_orb_dma;
- struct sbp2_status_block status_block;
-
- /* How to talk to the unit */
- u64 management_agent_addr;
- u64 command_block_agent_addr;
- u32 speed_code;
- u32 max_payload_size;
- u16 lun;
-
- /* Address for the unit to write status blocks to */
- u64 status_fifo_addr;
-
- /* Waitqueue flag for logins, reconnects, logouts, query logins */
- unsigned int access_complete:1;
-
- /* Pool of command ORBs for this logical unit */
- spinlock_t cmd_orb_lock;
- struct list_head cmd_orb_inuse;
- struct list_head cmd_orb_completed;
-
- /* Backlink to FireWire host; list of units attached to the host */
- struct sbp2_fwhost_info *hi;
- struct list_head lu_list;
-
- /* IEEE 1394 core's device representations */
- struct node_entry *ne;
- struct unit_directory *ud;
-
- /* SCSI core's device representations */
- struct scsi_device *sdev;
- struct Scsi_Host *shost;
-
- /* Device specific workarounds/brokeness */
- unsigned workarounds;
-
- /* Connection state */
- atomic_t state;
-
- /* For deferred requests to the fetch agent */
- struct work_struct protocol_work;
-};
-
-/* For use in sbp2_lu.state */
-enum sbp2lu_state_types {
- SBP2LU_STATE_RUNNING, /* all normal */
- SBP2LU_STATE_IN_RESET, /* between bus reset and reconnect */
- SBP2LU_STATE_IN_SHUTDOWN /* when sbp2_remove was called */
-};
-
-/* For use in sbp2_lu.workarounds and in the corresponding
- * module load parameter */
-#define SBP2_WORKAROUND_128K_MAX_TRANS 0x1
-#define SBP2_WORKAROUND_INQUIRY_36 0x2
-#define SBP2_WORKAROUND_MODE_SENSE_8 0x4
-#define SBP2_WORKAROUND_FIX_CAPACITY 0x8
-#define SBP2_WORKAROUND_DELAY_INQUIRY 0x10
-#define SBP2_INQUIRY_DELAY 12
-#define SBP2_WORKAROUND_POWER_CONDITION 0x20
-#define SBP2_WORKAROUND_OVERRIDE 0x100
-
-#endif /* SBP2_H */
diff --git a/drivers/ieee1394/video1394.c b/drivers/ieee1394/video1394.c
deleted file mode 100644
index 5c74f796d7f1..000000000000
--- a/drivers/ieee1394/video1394.c
+++ /dev/null
@@ -1,1528 +0,0 @@
-/*
- * video1394.c - video driver for OHCI 1394 boards
- * Copyright (C)1999,2000 Sebastien Rougeaux <sebastien.rougeaux@anu.edu.au>
- * Peter Schlaile <udbz@rz.uni-karlsruhe.de>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software Foundation,
- * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
- *
- * NOTES:
- *
- * ioctl return codes:
- * EFAULT is only for invalid address for the argp
- * EINVAL for out of range values
- * EBUSY when trying to use an already used resource
- * ESRCH when trying to free/stop a not used resource
- * EAGAIN for resource allocation failure that could perhaps succeed later
- * ENOTTY for unsupported ioctl request
- *
- */
-#include <linux/kernel.h>
-#include <linux/list.h>
-#include <linux/sched.h>
-#include <linux/slab.h>
-#include <linux/interrupt.h>
-#include <linux/wait.h>
-#include <linux/errno.h>
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/pci.h>
-#include <linux/fs.h>
-#include <linux/poll.h>
-#include <linux/delay.h>
-#include <linux/bitops.h>
-#include <linux/types.h>
-#include <linux/vmalloc.h>
-#include <linux/timex.h>
-#include <linux/mm.h>
-#include <linux/compat.h>
-#include <linux/cdev.h>
-
-#include "dma.h"
-#include "highlevel.h"
-#include "hosts.h"
-#include "ieee1394.h"
-#include "ieee1394_core.h"
-#include "ieee1394_hotplug.h"
-#include "ieee1394_types.h"
-#include "nodemgr.h"
-#include "ohci1394.h"
-#include "video1394.h"
-
-#define ISO_CHANNELS 64
-
-struct it_dma_prg {
- struct dma_cmd begin;
- quadlet_t data[4];
- struct dma_cmd end;
- quadlet_t pad[4]; /* FIXME: quick hack for memory alignment */
-};
-
-struct dma_iso_ctx {
- struct ti_ohci *ohci;
- int type; /* OHCI_ISO_TRANSMIT or OHCI_ISO_RECEIVE */
- struct ohci1394_iso_tasklet iso_tasklet;
- int channel;
- int ctx;
- int last_buffer;
- int * next_buffer; /* For ISO Transmit of video packets
- to write the correct SYT field
- into the next block */
- unsigned int num_desc;
- unsigned int buf_size;
- unsigned int frame_size;
- unsigned int packet_size;
- unsigned int left_size;
- unsigned int nb_cmd;
-
- struct dma_region dma;
-
- struct dma_prog_region *prg_reg;
-
- struct dma_cmd **ir_prg;
- struct it_dma_prg **it_prg;
-
- unsigned int *buffer_status;
- unsigned int *buffer_prg_assignment;
- struct timeval *buffer_time; /* time when the buffer was received */
- unsigned int *last_used_cmd; /* For ISO Transmit with
- variable sized packets only ! */
- int ctrlClear;
- int ctrlSet;
- int cmdPtr;
- int ctxMatch;
- wait_queue_head_t waitq;
- spinlock_t lock;
- unsigned int syt_offset;
- int flags;
-
- struct list_head link;
-};
-
-
-struct file_ctx {
- struct ti_ohci *ohci;
- struct list_head context_list;
- struct dma_iso_ctx *current_ctx;
-};
-
-#ifdef CONFIG_IEEE1394_VERBOSEDEBUG
-#define VIDEO1394_DEBUG
-#endif
-
-#ifdef DBGMSG
-#undef DBGMSG
-#endif
-
-#ifdef VIDEO1394_DEBUG
-#define DBGMSG(card, fmt, args...) \
-printk(KERN_INFO "video1394_%d: " fmt "\n" , card , ## args)
-#else
-#define DBGMSG(card, fmt, args...) do {} while (0)
-#endif
-
-/* print general (card independent) information */
-#define PRINT_G(level, fmt, args...) \
-printk(level "video1394: " fmt "\n" , ## args)
-
-/* print card specific information */
-#define PRINT(level, card, fmt, args...) \
-printk(level "video1394_%d: " fmt "\n" , card , ## args)
-
-static void wakeup_dma_ir_ctx(unsigned long l);
-static void wakeup_dma_it_ctx(unsigned long l);
-
-static struct hpsb_highlevel video1394_highlevel;
-
-static int free_dma_iso_ctx(struct dma_iso_ctx *d)
-{
- int i;
-
- DBGMSG(d->ohci->host->id, "Freeing dma_iso_ctx %d", d->ctx);
-
- ohci1394_stop_context(d->ohci, d->ctrlClear, NULL);
- if (d->iso_tasklet.link.next != NULL)
- ohci1394_unregister_iso_tasklet(d->ohci, &d->iso_tasklet);
-
- dma_region_free(&d->dma);
-
- if (d->prg_reg) {
- for (i = 0; i < d->num_desc; i++)
- dma_prog_region_free(&d->prg_reg[i]);
- kfree(d->prg_reg);
- }
-
- kfree(d->ir_prg);
- kfree(d->it_prg);
- kfree(d->buffer_status);
- kfree(d->buffer_prg_assignment);
- kfree(d->buffer_time);
- kfree(d->last_used_cmd);
- kfree(d->next_buffer);
- list_del(&d->link);
- kfree(d);
-
- return 0;
-}
-
-static struct dma_iso_ctx *
-alloc_dma_iso_ctx(struct ti_ohci *ohci, int type, int num_desc,
- int buf_size, int channel, unsigned int packet_size)
-{
- struct dma_iso_ctx *d;
- int i;
-
- d = kzalloc(sizeof(*d), GFP_KERNEL);
- if (!d) {
- PRINT(KERN_ERR, ohci->host->id, "Failed to allocate dma_iso_ctx");
- return NULL;
- }
-
- d->ohci = ohci;
- d->type = type;
- d->channel = channel;
- d->num_desc = num_desc;
- d->frame_size = buf_size;
- d->buf_size = PAGE_ALIGN(buf_size);
- d->last_buffer = -1;
- INIT_LIST_HEAD(&d->link);
- init_waitqueue_head(&d->waitq);
-
- /* Init the regions for easy cleanup */
- dma_region_init(&d->dma);
-
- if (dma_region_alloc(&d->dma, (d->num_desc - 1) * d->buf_size, ohci->dev,
- PCI_DMA_BIDIRECTIONAL)) {
- PRINT(KERN_ERR, ohci->host->id, "Failed to allocate dma buffer");
- free_dma_iso_ctx(d);
- return NULL;
- }
-
- if (type == OHCI_ISO_RECEIVE)
- ohci1394_init_iso_tasklet(&d->iso_tasklet, type,
- wakeup_dma_ir_ctx,
- (unsigned long) d);
- else
- ohci1394_init_iso_tasklet(&d->iso_tasklet, type,
- wakeup_dma_it_ctx,
- (unsigned long) d);
-
- if (ohci1394_register_iso_tasklet(ohci, &d->iso_tasklet) < 0) {
- PRINT(KERN_ERR, ohci->host->id, "no free iso %s contexts",
- type == OHCI_ISO_RECEIVE ? "receive" : "transmit");
- free_dma_iso_ctx(d);
- return NULL;
- }
- d->ctx = d->iso_tasklet.context;
-
- d->prg_reg = kmalloc(d->num_desc * sizeof(*d->prg_reg), GFP_KERNEL);
- if (!d->prg_reg) {
- PRINT(KERN_ERR, ohci->host->id, "Failed to allocate ir prg regs");
- free_dma_iso_ctx(d);
- return NULL;
- }
- /* Makes for easier cleanup */
- for (i = 0; i < d->num_desc; i++)
- dma_prog_region_init(&d->prg_reg[i]);
-
- if (type == OHCI_ISO_RECEIVE) {
- d->ctrlSet = OHCI1394_IsoRcvContextControlSet+32*d->ctx;
- d->ctrlClear = OHCI1394_IsoRcvContextControlClear+32*d->ctx;
- d->cmdPtr = OHCI1394_IsoRcvCommandPtr+32*d->ctx;
- d->ctxMatch = OHCI1394_IsoRcvContextMatch+32*d->ctx;
-
- d->ir_prg = kzalloc(d->num_desc * sizeof(*d->ir_prg),
- GFP_KERNEL);
-
- if (!d->ir_prg) {
- PRINT(KERN_ERR, ohci->host->id, "Failed to allocate dma ir prg");
- free_dma_iso_ctx(d);
- return NULL;
- }
-
- d->nb_cmd = d->buf_size / PAGE_SIZE + 1;
- d->left_size = (d->frame_size % PAGE_SIZE) ?
- d->frame_size % PAGE_SIZE : PAGE_SIZE;
-
- for (i = 0;i < d->num_desc; i++) {
- if (dma_prog_region_alloc(&d->prg_reg[i], d->nb_cmd *
- sizeof(struct dma_cmd), ohci->dev)) {
- PRINT(KERN_ERR, ohci->host->id, "Failed to allocate dma ir prg");
- free_dma_iso_ctx(d);
- return NULL;
- }
- d->ir_prg[i] = (struct dma_cmd *)d->prg_reg[i].kvirt;
- }
-
- } else { /* OHCI_ISO_TRANSMIT */
- d->ctrlSet = OHCI1394_IsoXmitContextControlSet+16*d->ctx;
- d->ctrlClear = OHCI1394_IsoXmitContextControlClear+16*d->ctx;
- d->cmdPtr = OHCI1394_IsoXmitCommandPtr+16*d->ctx;
-
- d->it_prg = kzalloc(d->num_desc * sizeof(*d->it_prg),
- GFP_KERNEL);
-
- if (!d->it_prg) {
- PRINT(KERN_ERR, ohci->host->id,
- "Failed to allocate dma it prg");
- free_dma_iso_ctx(d);
- return NULL;
- }
-
- d->packet_size = packet_size;
-
- if (PAGE_SIZE % packet_size || packet_size>4096) {
- PRINT(KERN_ERR, ohci->host->id,
- "Packet size %d (page_size: %ld) "
- "not yet supported\n",
- packet_size, PAGE_SIZE);
- free_dma_iso_ctx(d);
- return NULL;
- }
-
- d->nb_cmd = d->frame_size / d->packet_size;
- if (d->frame_size % d->packet_size) {
- d->nb_cmd++;
- d->left_size = d->frame_size % d->packet_size;
- } else
- d->left_size = d->packet_size;
-
- for (i = 0; i < d->num_desc; i++) {
- if (dma_prog_region_alloc(&d->prg_reg[i], d->nb_cmd *
- sizeof(struct it_dma_prg), ohci->dev)) {
- PRINT(KERN_ERR, ohci->host->id, "Failed to allocate dma it prg");
- free_dma_iso_ctx(d);
- return NULL;
- }
- d->it_prg[i] = (struct it_dma_prg *)d->prg_reg[i].kvirt;
- }
- }
-
- d->buffer_status =
- kzalloc(d->num_desc * sizeof(*d->buffer_status), GFP_KERNEL);
- d->buffer_prg_assignment =
- kzalloc(d->num_desc * sizeof(*d->buffer_prg_assignment), GFP_KERNEL);
- d->buffer_time =
- kzalloc(d->num_desc * sizeof(*d->buffer_time), GFP_KERNEL);
- d->last_used_cmd =
- kzalloc(d->num_desc * sizeof(*d->last_used_cmd), GFP_KERNEL);
- d->next_buffer =
- kzalloc(d->num_desc * sizeof(*d->next_buffer), GFP_KERNEL);
-
- if (!d->buffer_status || !d->buffer_prg_assignment || !d->buffer_time ||
- !d->last_used_cmd || !d->next_buffer) {
- PRINT(KERN_ERR, ohci->host->id,
- "Failed to allocate dma_iso_ctx member");
- free_dma_iso_ctx(d);
- return NULL;
- }
-
- spin_lock_init(&d->lock);
-
- DBGMSG(ohci->host->id, "Iso %s DMA: %d buffers "
- "of size %d allocated for a frame size %d, each with %d prgs",
- (type == OHCI_ISO_RECEIVE) ? "receive" : "transmit",
- d->num_desc - 1, d->buf_size, d->frame_size, d->nb_cmd);
-
- return d;
-}
-
-static void reset_ir_status(struct dma_iso_ctx *d, int n)
-{
- int i;
- d->ir_prg[n][0].status = cpu_to_le32(4);
- d->ir_prg[n][1].status = cpu_to_le32(PAGE_SIZE-4);
- for (i = 2; i < d->nb_cmd - 1; i++)
- d->ir_prg[n][i].status = cpu_to_le32(PAGE_SIZE);
- d->ir_prg[n][i].status = cpu_to_le32(d->left_size);
-}
-
-static void reprogram_dma_ir_prg(struct dma_iso_ctx *d, int n, int buffer, int flags)
-{
- struct dma_cmd *ir_prg = d->ir_prg[n];
- unsigned long buf = (unsigned long)d->dma.kvirt + buffer * d->buf_size;
- int i;
-
- d->buffer_prg_assignment[n] = buffer;
-
- ir_prg[0].address = cpu_to_le32(dma_region_offset_to_bus(&d->dma, buf -
- (unsigned long)d->dma.kvirt));
- ir_prg[1].address = cpu_to_le32(dma_region_offset_to_bus(&d->dma,
- (buf + 4) - (unsigned long)d->dma.kvirt));
-
- for (i=2;i<d->nb_cmd-1;i++) {
- ir_prg[i].address = cpu_to_le32(dma_region_offset_to_bus(&d->dma,
- (buf+(i-1)*PAGE_SIZE) -
- (unsigned long)d->dma.kvirt));
- }
-
- ir_prg[i].control = cpu_to_le32(DMA_CTL_INPUT_MORE | DMA_CTL_UPDATE |
- DMA_CTL_IRQ | DMA_CTL_BRANCH | d->left_size);
- ir_prg[i].address = cpu_to_le32(dma_region_offset_to_bus(&d->dma,
- (buf+(i-1)*PAGE_SIZE) - (unsigned long)d->dma.kvirt));
-}
-
-static void initialize_dma_ir_prg(struct dma_iso_ctx *d, int n, int flags)
-{
- struct dma_cmd *ir_prg = d->ir_prg[n];
- struct dma_prog_region *ir_reg = &d->prg_reg[n];
- unsigned long buf = (unsigned long)d->dma.kvirt;
- int i;
-
- /* the first descriptor will read only 4 bytes */
- ir_prg[0].control = cpu_to_le32(DMA_CTL_INPUT_MORE | DMA_CTL_UPDATE |
- DMA_CTL_BRANCH | 4);
-
- /* set the sync flag */
- if (flags & VIDEO1394_SYNC_FRAMES)
- ir_prg[0].control |= cpu_to_le32(DMA_CTL_WAIT);
-
- ir_prg[0].address = cpu_to_le32(dma_region_offset_to_bus(&d->dma, buf -
- (unsigned long)d->dma.kvirt));
- ir_prg[0].branchAddress = cpu_to_le32((dma_prog_region_offset_to_bus(ir_reg,
- 1 * sizeof(struct dma_cmd)) & 0xfffffff0) | 0x1);
-
- /* If there is *not* only one DMA page per frame (hence, d->nb_cmd==2) */
- if (d->nb_cmd > 2) {
- /* The second descriptor will read PAGE_SIZE-4 bytes */
- ir_prg[1].control = cpu_to_le32(DMA_CTL_INPUT_MORE | DMA_CTL_UPDATE |
- DMA_CTL_BRANCH | (PAGE_SIZE-4));
- ir_prg[1].address = cpu_to_le32(dma_region_offset_to_bus(&d->dma, (buf + 4) -
- (unsigned long)d->dma.kvirt));
- ir_prg[1].branchAddress = cpu_to_le32((dma_prog_region_offset_to_bus(ir_reg,
- 2 * sizeof(struct dma_cmd)) & 0xfffffff0) | 0x1);
-
- for (i = 2; i < d->nb_cmd - 1; i++) {
- ir_prg[i].control = cpu_to_le32(DMA_CTL_INPUT_MORE | DMA_CTL_UPDATE |
- DMA_CTL_BRANCH | PAGE_SIZE);
- ir_prg[i].address = cpu_to_le32(dma_region_offset_to_bus(&d->dma,
- (buf+(i-1)*PAGE_SIZE) -
- (unsigned long)d->dma.kvirt));
-
- ir_prg[i].branchAddress =
- cpu_to_le32((dma_prog_region_offset_to_bus(ir_reg,
- (i + 1) * sizeof(struct dma_cmd)) & 0xfffffff0) | 0x1);
- }
-
- /* The last descriptor will generate an interrupt */
- ir_prg[i].control = cpu_to_le32(DMA_CTL_INPUT_MORE | DMA_CTL_UPDATE |
- DMA_CTL_IRQ | DMA_CTL_BRANCH | d->left_size);
- ir_prg[i].address = cpu_to_le32(dma_region_offset_to_bus(&d->dma,
- (buf+(i-1)*PAGE_SIZE) -
- (unsigned long)d->dma.kvirt));
- } else {
- /* Only one DMA page is used. Read d->left_size immediately and */
- /* generate an interrupt as this is also the last page. */
- ir_prg[1].control = cpu_to_le32(DMA_CTL_INPUT_MORE | DMA_CTL_UPDATE |
- DMA_CTL_IRQ | DMA_CTL_BRANCH | (d->left_size-4));
- ir_prg[1].address = cpu_to_le32(dma_region_offset_to_bus(&d->dma,
- (buf + 4) - (unsigned long)d->dma.kvirt));
- }
-}
-
-static void initialize_dma_ir_ctx(struct dma_iso_ctx *d, int tag, int flags)
-{
- struct ti_ohci *ohci = (struct ti_ohci *)d->ohci;
- int i;
-
- d->flags = flags;
-
- ohci1394_stop_context(ohci, d->ctrlClear, NULL);
-
- for (i=0;i<d->num_desc;i++) {
- initialize_dma_ir_prg(d, i, flags);
- reset_ir_status(d, i);
- }
-
- /* reset the ctrl register */
- reg_write(ohci, d->ctrlClear, 0xf0000000);
-
- /* Set bufferFill */
- reg_write(ohci, d->ctrlSet, 0x80000000);
-
- /* Set isoch header */
- if (flags & VIDEO1394_INCLUDE_ISO_HEADERS)
- reg_write(ohci, d->ctrlSet, 0x40000000);
-
- /* Set the context match register to match on all tags,
- sync for sync tag, and listen to d->channel */
- reg_write(ohci, d->ctxMatch, 0xf0000000|((tag&0xf)<<8)|d->channel);
-
- /* Set up isoRecvIntMask to generate interrupts */
- reg_write(ohci, OHCI1394_IsoRecvIntMaskSet, 1<<d->ctx);
-}
-
-/* find which context is listening to this channel */
-static struct dma_iso_ctx *
-find_ctx(struct list_head *list, int type, int channel)
-{
- struct dma_iso_ctx *ctx;
-
- list_for_each_entry(ctx, list, link) {
- if (ctx->type == type && ctx->channel == channel)
- return ctx;
- }
-
- return NULL;
-}
-
-static void wakeup_dma_ir_ctx(unsigned long l)
-{
- struct dma_iso_ctx *d = (struct dma_iso_ctx *) l;
- int i;
-
- spin_lock(&d->lock);
-
- for (i = 0; i < d->num_desc; i++) {
- if (d->ir_prg[i][d->nb_cmd-1].status & cpu_to_le32(0xFFFF0000)) {
- reset_ir_status(d, i);
- d->buffer_status[d->buffer_prg_assignment[i]] = VIDEO1394_BUFFER_READY;
- do_gettimeofday(&d->buffer_time[d->buffer_prg_assignment[i]]);
- dma_region_sync_for_cpu(&d->dma,
- d->buffer_prg_assignment[i] * d->buf_size,
- d->buf_size);
- }
- }
-
- spin_unlock(&d->lock);
-
- if (waitqueue_active(&d->waitq))
- wake_up_interruptible(&d->waitq);
-}
-
-static inline void put_timestamp(struct ti_ohci *ohci, struct dma_iso_ctx * d,
- int n)
-{
- unsigned char* buf = d->dma.kvirt + n * d->buf_size;
- u32 cycleTimer;
- u32 timeStamp;
-
- if (n == -1) {
- return;
- }
-
- cycleTimer = reg_read(ohci, OHCI1394_IsochronousCycleTimer);
-
- timeStamp = ((cycleTimer & 0x0fff) + d->syt_offset); /* 11059 = 450 us */
- timeStamp = (timeStamp % 3072 + ((timeStamp / 3072) << 12)
- + (cycleTimer & 0xf000)) & 0xffff;
-
- buf[6] = timeStamp >> 8;
- buf[7] = timeStamp & 0xff;
-
- /* if first packet is empty packet, then put timestamp into the next full one too */
- if ( (le32_to_cpu(d->it_prg[n][0].data[1]) >>16) == 0x008) {
- buf += d->packet_size;
- buf[6] = timeStamp >> 8;
- buf[7] = timeStamp & 0xff;
- }
-
- /* do the next buffer frame too in case of irq latency */
- n = d->next_buffer[n];
- if (n == -1) {
- return;
- }
- buf = d->dma.kvirt + n * d->buf_size;
-
- timeStamp += (d->last_used_cmd[n] << 12) & 0xffff;
-
- buf[6] = timeStamp >> 8;
- buf[7] = timeStamp & 0xff;
-
- /* if first packet is empty packet, then put timestamp into the next full one too */
- if ( (le32_to_cpu(d->it_prg[n][0].data[1]) >>16) == 0x008) {
- buf += d->packet_size;
- buf[6] = timeStamp >> 8;
- buf[7] = timeStamp & 0xff;
- }
-
-#if 0
- printk("curr: %d, next: %d, cycleTimer: %08x timeStamp: %08x\n",
- curr, n, cycleTimer, timeStamp);
-#endif
-}
-
-static void wakeup_dma_it_ctx(unsigned long l)
-{
- struct dma_iso_ctx *d = (struct dma_iso_ctx *) l;
- struct ti_ohci *ohci = d->ohci;
- int i;
-
- spin_lock(&d->lock);
-
- for (i = 0; i < d->num_desc; i++) {
- if (d->it_prg[i][d->last_used_cmd[i]].end.status &
- cpu_to_le32(0xFFFF0000)) {
- int next = d->next_buffer[i];
- put_timestamp(ohci, d, next);
- d->it_prg[i][d->last_used_cmd[i]].end.status = 0;
- d->buffer_status[d->buffer_prg_assignment[i]] = VIDEO1394_BUFFER_READY;
- }
- }
-
- spin_unlock(&d->lock);
-
- if (waitqueue_active(&d->waitq))
- wake_up_interruptible(&d->waitq);
-}
-
-static void reprogram_dma_it_prg(struct dma_iso_ctx *d, int n, int buffer)
-{
- struct it_dma_prg *it_prg = d->it_prg[n];
- unsigned long buf = (unsigned long)d->dma.kvirt + buffer * d->buf_size;
- int i;
-
- d->buffer_prg_assignment[n] = buffer;
- for (i=0;i<d->nb_cmd;i++) {
- it_prg[i].end.address =
- cpu_to_le32(dma_region_offset_to_bus(&d->dma,
- (buf+i*d->packet_size) - (unsigned long)d->dma.kvirt));
- }
-}
-
-static void initialize_dma_it_prg(struct dma_iso_ctx *d, int n, int sync_tag)
-{
- struct it_dma_prg *it_prg = d->it_prg[n];
- struct dma_prog_region *it_reg = &d->prg_reg[n];
- unsigned long buf = (unsigned long)d->dma.kvirt;
- int i;
- d->last_used_cmd[n] = d->nb_cmd - 1;
- for (i=0;i<d->nb_cmd;i++) {
-
- it_prg[i].begin.control = cpu_to_le32(DMA_CTL_OUTPUT_MORE |
- DMA_CTL_IMMEDIATE | 8) ;
- it_prg[i].begin.address = 0;
-
- it_prg[i].begin.status = 0;
-
- it_prg[i].data[0] = cpu_to_le32(
- (IEEE1394_SPEED_100 << 16)
- | (/* tag */ 1 << 14)
- | (d->channel << 8)
- | (TCODE_ISO_DATA << 4));
- if (i==0) it_prg[i].data[0] |= cpu_to_le32(sync_tag);
- it_prg[i].data[1] = cpu_to_le32(d->packet_size << 16);
- it_prg[i].data[2] = 0;
- it_prg[i].data[3] = 0;
-
- it_prg[i].end.control = cpu_to_le32(DMA_CTL_OUTPUT_LAST |
- DMA_CTL_BRANCH);
- it_prg[i].end.address =
- cpu_to_le32(dma_region_offset_to_bus(&d->dma, (buf+i*d->packet_size) -
- (unsigned long)d->dma.kvirt));
-
- if (i<d->nb_cmd-1) {
- it_prg[i].end.control |= cpu_to_le32(d->packet_size);
- it_prg[i].begin.branchAddress =
- cpu_to_le32((dma_prog_region_offset_to_bus(it_reg, (i + 1) *
- sizeof(struct it_dma_prg)) & 0xfffffff0) | 0x3);
- it_prg[i].end.branchAddress =
- cpu_to_le32((dma_prog_region_offset_to_bus(it_reg, (i + 1) *
- sizeof(struct it_dma_prg)) & 0xfffffff0) | 0x3);
- } else {
- /* the last prg generates an interrupt */
- it_prg[i].end.control |= cpu_to_le32(DMA_CTL_UPDATE |
- DMA_CTL_IRQ | d->left_size);
- /* the last prg doesn't branch */
- it_prg[i].begin.branchAddress = 0;
- it_prg[i].end.branchAddress = 0;
- }
- it_prg[i].end.status = 0;
- }
-}
-
-static void initialize_dma_it_prg_var_packet_queue(
- struct dma_iso_ctx *d, int n, unsigned int * packet_sizes,
- struct ti_ohci *ohci)
-{
- struct it_dma_prg *it_prg = d->it_prg[n];
- struct dma_prog_region *it_reg = &d->prg_reg[n];
- int i;
-
-#if 0
- if (n != -1) {
- put_timestamp(ohci, d, n);
- }
-#endif
- d->last_used_cmd[n] = d->nb_cmd - 1;
-
- for (i = 0; i < d->nb_cmd; i++) {
- unsigned int size;
- if (packet_sizes[i] > d->packet_size) {
- size = d->packet_size;
- } else {
- size = packet_sizes[i];
- }
- it_prg[i].data[1] = cpu_to_le32(size << 16);
- it_prg[i].end.control = cpu_to_le32(DMA_CTL_OUTPUT_LAST | DMA_CTL_BRANCH);
-
- if (i < d->nb_cmd-1 && packet_sizes[i+1] != 0) {
- it_prg[i].end.control |= cpu_to_le32(size);
- it_prg[i].begin.branchAddress =
- cpu_to_le32((dma_prog_region_offset_to_bus(it_reg, (i + 1) *
- sizeof(struct it_dma_prg)) & 0xfffffff0) | 0x3);
- it_prg[i].end.branchAddress =
- cpu_to_le32((dma_prog_region_offset_to_bus(it_reg, (i + 1) *
- sizeof(struct it_dma_prg)) & 0xfffffff0) | 0x3);
- } else {
- /* the last prg generates an interrupt */
- it_prg[i].end.control |= cpu_to_le32(DMA_CTL_UPDATE |
- DMA_CTL_IRQ | size);
- /* the last prg doesn't branch */
- it_prg[i].begin.branchAddress = 0;
- it_prg[i].end.branchAddress = 0;
- d->last_used_cmd[n] = i;
- break;
- }
- }
-}
-
-static void initialize_dma_it_ctx(struct dma_iso_ctx *d, int sync_tag,
- unsigned int syt_offset, int flags)
-{
- struct ti_ohci *ohci = (struct ti_ohci *)d->ohci;
- int i;
-
- d->flags = flags;
- d->syt_offset = (syt_offset == 0 ? 11000 : syt_offset);
-
- ohci1394_stop_context(ohci, d->ctrlClear, NULL);
-
- for (i=0;i<d->num_desc;i++)
- initialize_dma_it_prg(d, i, sync_tag);
-
- /* Set up isoRecvIntMask to generate interrupts */
- reg_write(ohci, OHCI1394_IsoXmitIntMaskSet, 1<<d->ctx);
-}
-
-static inline unsigned video1394_buffer_state(struct dma_iso_ctx *d,
- unsigned int buffer)
-{
- unsigned long flags;
- unsigned int ret;
- spin_lock_irqsave(&d->lock, flags);
- ret = d->buffer_status[buffer];
- spin_unlock_irqrestore(&d->lock, flags);
- return ret;
-}
-
-static long video1394_ioctl(struct file *file,
- unsigned int cmd, unsigned long arg)
-{
- struct file_ctx *ctx = file->private_data;
- struct ti_ohci *ohci = ctx->ohci;
- unsigned long flags;
- void __user *argp = (void __user *)arg;
-
- switch(cmd)
- {
- case VIDEO1394_IOC_LISTEN_CHANNEL:
- case VIDEO1394_IOC_TALK_CHANNEL:
- {
- struct video1394_mmap v;
- u64 mask;
- struct dma_iso_ctx *d;
- int i;
-
- if (copy_from_user(&v, argp, sizeof(v)))
- return -EFAULT;
-
- /* if channel < 0, find lowest available one */
- if (v.channel < 0) {
- mask = (u64)0x1;
- for (i=0; ; i++) {
- if (i == ISO_CHANNELS) {
- PRINT(KERN_ERR, ohci->host->id,
- "No free channel found");
- return -EAGAIN;
- }
- if (!(ohci->ISO_channel_usage & mask)) {
- v.channel = i;
- PRINT(KERN_INFO, ohci->host->id, "Found free channel %d", i);
- break;
- }
- mask = mask << 1;
- }
- } else if (v.channel >= ISO_CHANNELS) {
- PRINT(KERN_ERR, ohci->host->id,
- "Iso channel %d out of bounds", v.channel);
- return -EINVAL;
- } else {
- mask = (u64)0x1<<v.channel;
- }
- DBGMSG(ohci->host->id, "mask: %08X%08X usage: %08X%08X\n",
- (u32)(mask>>32),(u32)(mask&0xffffffff),
- (u32)(ohci->ISO_channel_usage>>32),
- (u32)(ohci->ISO_channel_usage&0xffffffff));
- if (ohci->ISO_channel_usage & mask) {
- PRINT(KERN_ERR, ohci->host->id,
- "Channel %d is already taken", v.channel);
- return -EBUSY;
- }
-
- if (v.buf_size == 0 || v.buf_size > VIDEO1394_MAX_SIZE) {
- PRINT(KERN_ERR, ohci->host->id,
- "Invalid %d length buffer requested",v.buf_size);
- return -EINVAL;
- }
-
- if (v.nb_buffers == 0 || v.nb_buffers > VIDEO1394_MAX_SIZE) {
- PRINT(KERN_ERR, ohci->host->id,
- "Invalid %d buffers requested",v.nb_buffers);
- return -EINVAL;
- }
-
- if (v.nb_buffers * v.buf_size > VIDEO1394_MAX_SIZE) {
- PRINT(KERN_ERR, ohci->host->id,
- "%d buffers of size %d bytes is too big",
- v.nb_buffers, v.buf_size);
- return -EINVAL;
- }
-
- if (cmd == VIDEO1394_IOC_LISTEN_CHANNEL) {
- d = alloc_dma_iso_ctx(ohci, OHCI_ISO_RECEIVE,
- v.nb_buffers + 1, v.buf_size,
- v.channel, 0);
-
- if (d == NULL) {
- PRINT(KERN_ERR, ohci->host->id,
- "Couldn't allocate ir context");
- return -EAGAIN;
- }
- initialize_dma_ir_ctx(d, v.sync_tag, v.flags);
-
- ctx->current_ctx = d;
-
- v.buf_size = d->buf_size;
- list_add_tail(&d->link, &ctx->context_list);
-
- DBGMSG(ohci->host->id,
- "iso context %d listen on channel %d",
- d->ctx, v.channel);
- }
- else {
- d = alloc_dma_iso_ctx(ohci, OHCI_ISO_TRANSMIT,
- v.nb_buffers + 1, v.buf_size,
- v.channel, v.packet_size);
-
- if (d == NULL) {
- PRINT(KERN_ERR, ohci->host->id,
- "Couldn't allocate it context");
- return -EAGAIN;
- }
- initialize_dma_it_ctx(d, v.sync_tag,
- v.syt_offset, v.flags);
-
- ctx->current_ctx = d;
-
- v.buf_size = d->buf_size;
-
- list_add_tail(&d->link, &ctx->context_list);
-
- DBGMSG(ohci->host->id,
- "Iso context %d talk on channel %d", d->ctx,
- v.channel);
- }
-
- if (copy_to_user(argp, &v, sizeof(v))) {
- /* FIXME : free allocated dma resources */
- return -EFAULT;
- }
-
- ohci->ISO_channel_usage |= mask;
-
- return 0;
- }
- case VIDEO1394_IOC_UNLISTEN_CHANNEL:
- case VIDEO1394_IOC_UNTALK_CHANNEL:
- {
- int channel;
- u64 mask;
- struct dma_iso_ctx *d;
-
- if (copy_from_user(&channel, argp, sizeof(int)))
- return -EFAULT;
-
- if (channel < 0 || channel >= ISO_CHANNELS) {
- PRINT(KERN_ERR, ohci->host->id,
- "Iso channel %d out of bound", channel);
- return -EINVAL;
- }
- mask = (u64)0x1<<channel;
- if (!(ohci->ISO_channel_usage & mask)) {
- PRINT(KERN_ERR, ohci->host->id,
- "Channel %d is not being used", channel);
- return -ESRCH;
- }
-
- /* Mark this channel as unused */
- ohci->ISO_channel_usage &= ~mask;
-
- if (cmd == VIDEO1394_IOC_UNLISTEN_CHANNEL)
- d = find_ctx(&ctx->context_list, OHCI_ISO_RECEIVE, channel);
- else
- d = find_ctx(&ctx->context_list, OHCI_ISO_TRANSMIT, channel);
-
- if (d == NULL) return -ESRCH;
- DBGMSG(ohci->host->id, "Iso context %d "
- "stop talking on channel %d", d->ctx, channel);
- free_dma_iso_ctx(d);
-
- return 0;
- }
- case VIDEO1394_IOC_LISTEN_QUEUE_BUFFER:
- {
- struct video1394_wait v;
- struct dma_iso_ctx *d;
- int next_prg;
-
- if (unlikely(copy_from_user(&v, argp, sizeof(v))))
- return -EFAULT;
-
- d = find_ctx(&ctx->context_list, OHCI_ISO_RECEIVE, v.channel);
- if (unlikely(d == NULL))
- return -EFAULT;
-
- if (unlikely(v.buffer >= d->num_desc - 1)) {
- PRINT(KERN_ERR, ohci->host->id,
- "Buffer %d out of range",v.buffer);
- return -EINVAL;
- }
-
- spin_lock_irqsave(&d->lock,flags);
-
- if (unlikely(d->buffer_status[v.buffer]==VIDEO1394_BUFFER_QUEUED)) {
- PRINT(KERN_ERR, ohci->host->id,
- "Buffer %d is already used",v.buffer);
- spin_unlock_irqrestore(&d->lock,flags);
- return -EBUSY;
- }
-
- d->buffer_status[v.buffer]=VIDEO1394_BUFFER_QUEUED;
-
- next_prg = (d->last_buffer + 1) % d->num_desc;
- if (d->last_buffer>=0)
- d->ir_prg[d->last_buffer][d->nb_cmd-1].branchAddress =
- cpu_to_le32((dma_prog_region_offset_to_bus(&d->prg_reg[next_prg], 0)
- & 0xfffffff0) | 0x1);
-
- d->last_buffer = next_prg;
- reprogram_dma_ir_prg(d, d->last_buffer, v.buffer, d->flags);
-
- d->ir_prg[d->last_buffer][d->nb_cmd-1].branchAddress = 0;
-
- spin_unlock_irqrestore(&d->lock,flags);
-
- if (!(reg_read(ohci, d->ctrlSet) & 0x8000))
- {
- DBGMSG(ohci->host->id, "Starting iso DMA ctx=%d",d->ctx);
-
- /* Tell the controller where the first program is */
- reg_write(ohci, d->cmdPtr,
- dma_prog_region_offset_to_bus(&d->prg_reg[d->last_buffer], 0) | 0x1);
-
- /* Run IR context */
- reg_write(ohci, d->ctrlSet, 0x8000);
- }
- else {
- /* Wake up dma context if necessary */
- if (!(reg_read(ohci, d->ctrlSet) & 0x400)) {
- DBGMSG(ohci->host->id,
- "Waking up iso dma ctx=%d", d->ctx);
- reg_write(ohci, d->ctrlSet, 0x1000);
- }
- }
- return 0;
-
- }
- case VIDEO1394_IOC_LISTEN_WAIT_BUFFER:
- case VIDEO1394_IOC_LISTEN_POLL_BUFFER:
- {
- struct video1394_wait v;
- struct dma_iso_ctx *d;
- int i = 0;
-
- if (unlikely(copy_from_user(&v, argp, sizeof(v))))
- return -EFAULT;
-
- d = find_ctx(&ctx->context_list, OHCI_ISO_RECEIVE, v.channel);
- if (unlikely(d == NULL))
- return -EFAULT;
-
- if (unlikely(v.buffer > d->num_desc - 1)) {
- PRINT(KERN_ERR, ohci->host->id,
- "Buffer %d out of range",v.buffer);
- return -EINVAL;
- }
-
- /*
- * I change the way it works so that it returns
- * the last received frame.
- */
- spin_lock_irqsave(&d->lock, flags);
- switch(d->buffer_status[v.buffer]) {
- case VIDEO1394_BUFFER_READY:
- d->buffer_status[v.buffer]=VIDEO1394_BUFFER_FREE;
- break;
- case VIDEO1394_BUFFER_QUEUED:
- if (cmd == VIDEO1394_IOC_LISTEN_POLL_BUFFER) {
- /* for polling, return error code EINTR */
- spin_unlock_irqrestore(&d->lock, flags);
- return -EINTR;
- }
-
- spin_unlock_irqrestore(&d->lock, flags);
- wait_event_interruptible(d->waitq,
- video1394_buffer_state(d, v.buffer) ==
- VIDEO1394_BUFFER_READY);
- if (signal_pending(current))
- return -EINTR;
- spin_lock_irqsave(&d->lock, flags);
- d->buffer_status[v.buffer]=VIDEO1394_BUFFER_FREE;
- break;
- default:
- PRINT(KERN_ERR, ohci->host->id,
- "Buffer %d is not queued",v.buffer);
- spin_unlock_irqrestore(&d->lock, flags);
- return -ESRCH;
- }
-
- /* set time of buffer */
- v.filltime = d->buffer_time[v.buffer];
-
- /*
- * Look ahead to see how many more buffers have been received
- */
- i=0;
- while (d->buffer_status[(v.buffer+1)%(d->num_desc - 1)]==
- VIDEO1394_BUFFER_READY) {
- v.buffer=(v.buffer+1)%(d->num_desc - 1);
- i++;
- }
- spin_unlock_irqrestore(&d->lock, flags);
-
- v.buffer=i;
- if (unlikely(copy_to_user(argp, &v, sizeof(v))))
- return -EFAULT;
-
- return 0;
- }
- case VIDEO1394_IOC_TALK_QUEUE_BUFFER:
- {
- struct video1394_wait v;
- unsigned int *psizes = NULL;
- struct dma_iso_ctx *d;
- int next_prg;
-
- if (copy_from_user(&v, argp, sizeof(v)))
- return -EFAULT;
-
- d = find_ctx(&ctx->context_list, OHCI_ISO_TRANSMIT, v.channel);
- if (d == NULL) return -EFAULT;
-
- if (v.buffer >= d->num_desc - 1) {
- PRINT(KERN_ERR, ohci->host->id,
- "Buffer %d out of range",v.buffer);
- return -EINVAL;
- }
-
- if (d->flags & VIDEO1394_VARIABLE_PACKET_SIZE) {
- int buf_size = d->nb_cmd * sizeof(*psizes);
- struct video1394_queue_variable __user *p = argp;
- unsigned int __user *qv;
-
- if (get_user(qv, &p->packet_sizes))
- return -EFAULT;
-
- psizes = memdup_user(qv, buf_size);
- if (IS_ERR(psizes))
- return PTR_ERR(psizes);
- }
-
- spin_lock_irqsave(&d->lock,flags);
-
- /* last_buffer is last_prg */
- next_prg = (d->last_buffer + 1) % d->num_desc;
- if (d->buffer_status[v.buffer]!=VIDEO1394_BUFFER_FREE) {
- PRINT(KERN_ERR, ohci->host->id,
- "Buffer %d is already used",v.buffer);
- spin_unlock_irqrestore(&d->lock,flags);
- kfree(psizes);
- return -EBUSY;
- }
-
- if (d->flags & VIDEO1394_VARIABLE_PACKET_SIZE) {
- initialize_dma_it_prg_var_packet_queue(
- d, next_prg, psizes, ohci);
- }
-
- d->buffer_status[v.buffer]=VIDEO1394_BUFFER_QUEUED;
-
- if (d->last_buffer >= 0) {
- d->it_prg[d->last_buffer]
- [ d->last_used_cmd[d->last_buffer] ].end.branchAddress =
- cpu_to_le32((dma_prog_region_offset_to_bus(&d->prg_reg[next_prg],
- 0) & 0xfffffff0) | 0x3);
-
- d->it_prg[d->last_buffer]
- [ d->last_used_cmd[d->last_buffer] ].begin.branchAddress =
- cpu_to_le32((dma_prog_region_offset_to_bus(&d->prg_reg[next_prg],
- 0) & 0xfffffff0) | 0x3);
- d->next_buffer[d->last_buffer] = (v.buffer + 1) % (d->num_desc - 1);
- }
- d->last_buffer = next_prg;
- reprogram_dma_it_prg(d, d->last_buffer, v.buffer);
- d->next_buffer[d->last_buffer] = -1;
-
- d->it_prg[d->last_buffer][d->last_used_cmd[d->last_buffer]].end.branchAddress = 0;
-
- spin_unlock_irqrestore(&d->lock,flags);
-
- if (!(reg_read(ohci, d->ctrlSet) & 0x8000))
- {
- DBGMSG(ohci->host->id, "Starting iso transmit DMA ctx=%d",
- d->ctx);
- put_timestamp(ohci, d, d->last_buffer);
- dma_region_sync_for_device(&d->dma,
- v.buffer * d->buf_size, d->buf_size);
-
- /* Tell the controller where the first program is */
- reg_write(ohci, d->cmdPtr,
- dma_prog_region_offset_to_bus(&d->prg_reg[next_prg], 0) | 0x3);
-
- /* Run IT context */
- reg_write(ohci, d->ctrlSet, 0x8000);
- }
- else {
- /* Wake up dma context if necessary */
- if (!(reg_read(ohci, d->ctrlSet) & 0x400)) {
- DBGMSG(ohci->host->id,
- "Waking up iso transmit dma ctx=%d",
- d->ctx);
- put_timestamp(ohci, d, d->last_buffer);
- dma_region_sync_for_device(&d->dma,
- v.buffer * d->buf_size, d->buf_size);
-
- reg_write(ohci, d->ctrlSet, 0x1000);
- }
- }
-
- kfree(psizes);
- return 0;
-
- }
- case VIDEO1394_IOC_TALK_WAIT_BUFFER:
- {
- struct video1394_wait v;
- struct dma_iso_ctx *d;
-
- if (copy_from_user(&v, argp, sizeof(v)))
- return -EFAULT;
-
- d = find_ctx(&ctx->context_list, OHCI_ISO_TRANSMIT, v.channel);
- if (d == NULL) return -EFAULT;
-
- if (v.buffer >= d->num_desc - 1) {
- PRINT(KERN_ERR, ohci->host->id,
- "Buffer %d out of range",v.buffer);
- return -EINVAL;
- }
-
- switch(d->buffer_status[v.buffer]) {
- case VIDEO1394_BUFFER_READY:
- d->buffer_status[v.buffer]=VIDEO1394_BUFFER_FREE;
- return 0;
- case VIDEO1394_BUFFER_QUEUED:
- wait_event_interruptible(d->waitq,
- (d->buffer_status[v.buffer] == VIDEO1394_BUFFER_READY));
- if (signal_pending(current))
- return -EINTR;
- d->buffer_status[v.buffer]=VIDEO1394_BUFFER_FREE;
- return 0;
- default:
- PRINT(KERN_ERR, ohci->host->id,
- "Buffer %d is not queued",v.buffer);
- return -ESRCH;
- }
- }
- default:
- return -ENOTTY;
- }
-}
-
-/*
- * This maps the vmalloced and reserved buffer to user space.
- *
- * FIXME:
- * - PAGE_READONLY should suffice!?
- * - remap_pfn_range is kind of inefficient for page by page remapping.
- * But e.g. pte_alloc() does not work in modules ... :-(
- */
-
-static int video1394_mmap(struct file *file, struct vm_area_struct *vma)
-{
- struct file_ctx *ctx = file->private_data;
-
- if (ctx->current_ctx == NULL) {
- PRINT(KERN_ERR, ctx->ohci->host->id,
- "Current iso context not set");
- return -EINVAL;
- }
-
- return dma_region_mmap(&ctx->current_ctx->dma, file, vma);
-}
-
-static unsigned int video1394_poll(struct file *file, poll_table *pt)
-{
- struct file_ctx *ctx;
- unsigned int mask = 0;
- unsigned long flags;
- struct dma_iso_ctx *d;
- int i;
-
- ctx = file->private_data;
- d = ctx->current_ctx;
- if (d == NULL) {
- PRINT(KERN_ERR, ctx->ohci->host->id,
- "Current iso context not set");
- return POLLERR;
- }
-
- poll_wait(file, &d->waitq, pt);
-
- spin_lock_irqsave(&d->lock, flags);
- for (i = 0; i < d->num_desc; i++) {
- if (d->buffer_status[i] == VIDEO1394_BUFFER_READY) {
- mask |= POLLIN | POLLRDNORM;
- break;
- }
- }
- spin_unlock_irqrestore(&d->lock, flags);
-
- return mask;
-}
-
-static int video1394_open(struct inode *inode, struct file *file)
-{
- int i = ieee1394_file_to_instance(file);
- struct ti_ohci *ohci;
- struct file_ctx *ctx;
-
- ohci = hpsb_get_hostinfo_bykey(&video1394_highlevel, i);
- if (ohci == NULL)
- return -EIO;
-
- ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
- if (!ctx) {
- PRINT(KERN_ERR, ohci->host->id, "Cannot malloc file_ctx");
- return -ENOMEM;
- }
-
- ctx->ohci = ohci;
- INIT_LIST_HEAD(&ctx->context_list);
- ctx->current_ctx = NULL;
- file->private_data = ctx;
-
- return nonseekable_open(inode, file);
-}
-
-static int video1394_release(struct inode *inode, struct file *file)
-{
- struct file_ctx *ctx = file->private_data;
- struct ti_ohci *ohci = ctx->ohci;
- struct list_head *lh, *next;
- u64 mask;
-
- list_for_each_safe(lh, next, &ctx->context_list) {
- struct dma_iso_ctx *d;
- d = list_entry(lh, struct dma_iso_ctx, link);
- mask = (u64) 1 << d->channel;
-
- if (!(ohci->ISO_channel_usage & mask))
- PRINT(KERN_ERR, ohci->host->id, "On release: Channel %d "
- "is not being used", d->channel);
- else
- ohci->ISO_channel_usage &= ~mask;
- DBGMSG(ohci->host->id, "On release: Iso %s context "
- "%d stop listening on channel %d",
- d->type == OHCI_ISO_RECEIVE ? "receive" : "transmit",
- d->ctx, d->channel);
- free_dma_iso_ctx(d);
- }
-
- kfree(ctx);
- file->private_data = NULL;
-
- return 0;
-}
-
-#ifdef CONFIG_COMPAT
-static long video1394_compat_ioctl(struct file *f, unsigned cmd, unsigned long arg);
-#endif
-
-static struct cdev video1394_cdev;
-static const struct file_operations video1394_fops=
-{
- .owner = THIS_MODULE,
- .unlocked_ioctl = video1394_ioctl,
-#ifdef CONFIG_COMPAT
- .compat_ioctl = video1394_compat_ioctl,
-#endif
- .poll = video1394_poll,
- .mmap = video1394_mmap,
- .open = video1394_open,
- .release = video1394_release,
- .llseek = no_llseek,
-};
-
-/*** HOTPLUG STUFF **********************************************************/
-/*
- * Export information about protocols/devices supported by this driver.
- */
-#ifdef MODULE
-static const struct ieee1394_device_id video1394_id_table[] = {
- {
- .match_flags = IEEE1394_MATCH_SPECIFIER_ID | IEEE1394_MATCH_VERSION,
- .specifier_id = CAMERA_UNIT_SPEC_ID_ENTRY & 0xffffff,
- .version = CAMERA_SW_VERSION_ENTRY & 0xffffff
- },
- {
- .match_flags = IEEE1394_MATCH_SPECIFIER_ID | IEEE1394_MATCH_VERSION,
- .specifier_id = CAMERA_UNIT_SPEC_ID_ENTRY & 0xffffff,
- .version = (CAMERA_SW_VERSION_ENTRY + 1) & 0xffffff
- },
- {
- .match_flags = IEEE1394_MATCH_SPECIFIER_ID | IEEE1394_MATCH_VERSION,
- .specifier_id = CAMERA_UNIT_SPEC_ID_ENTRY & 0xffffff,
- .version = (CAMERA_SW_VERSION_ENTRY + 2) & 0xffffff
- },
- { }
-};
-
-MODULE_DEVICE_TABLE(ieee1394, video1394_id_table);
-#endif /* MODULE */
-
-static struct hpsb_protocol_driver video1394_driver = {
- .name = VIDEO1394_DRIVER_NAME,
-};
-
-
-static void video1394_add_host (struct hpsb_host *host)
-{
- struct ti_ohci *ohci;
- int minor;
-
- /* We only work with the OHCI-1394 driver */
- if (strcmp(host->driver->name, OHCI1394_DRIVER_NAME))
- return;
-
- ohci = (struct ti_ohci *)host->hostdata;
-
- if (!hpsb_create_hostinfo(&video1394_highlevel, host, 0)) {
- PRINT(KERN_ERR, ohci->host->id, "Cannot allocate hostinfo");
- return;
- }
-
- hpsb_set_hostinfo(&video1394_highlevel, host, ohci);
- hpsb_set_hostinfo_key(&video1394_highlevel, host, ohci->host->id);
-
- minor = IEEE1394_MINOR_BLOCK_VIDEO1394 * 16 + ohci->host->id;
- device_create(hpsb_protocol_class, NULL, MKDEV(IEEE1394_MAJOR, minor),
- NULL, "%s-%d", VIDEO1394_DRIVER_NAME, ohci->host->id);
-}
-
-
-static void video1394_remove_host (struct hpsb_host *host)
-{
- struct ti_ohci *ohci = hpsb_get_hostinfo(&video1394_highlevel, host);
-
- if (ohci)
- device_destroy(hpsb_protocol_class, MKDEV(IEEE1394_MAJOR,
- IEEE1394_MINOR_BLOCK_VIDEO1394 * 16 + ohci->host->id));
- return;
-}
-
-
-static struct hpsb_highlevel video1394_highlevel = {
- .name = VIDEO1394_DRIVER_NAME,
- .add_host = video1394_add_host,
- .remove_host = video1394_remove_host,
-};
-
-MODULE_AUTHOR("Sebastien Rougeaux <sebastien.rougeaux@anu.edu.au>");
-MODULE_DESCRIPTION("driver for digital video on OHCI board");
-MODULE_SUPPORTED_DEVICE(VIDEO1394_DRIVER_NAME);
-MODULE_LICENSE("GPL");
-
-#ifdef CONFIG_COMPAT
-
-#define VIDEO1394_IOC32_LISTEN_QUEUE_BUFFER \
- _IOW ('#', 0x12, struct video1394_wait32)
-#define VIDEO1394_IOC32_LISTEN_WAIT_BUFFER \
- _IOWR('#', 0x13, struct video1394_wait32)
-#define VIDEO1394_IOC32_TALK_WAIT_BUFFER \
- _IOW ('#', 0x17, struct video1394_wait32)
-#define VIDEO1394_IOC32_LISTEN_POLL_BUFFER \
- _IOWR('#', 0x18, struct video1394_wait32)
-
-struct video1394_wait32 {
- u32 channel;
- u32 buffer;
- struct compat_timeval filltime;
-};
-
-static int video1394_wr_wait32(struct file *file, unsigned int cmd, unsigned long arg)
-{
- struct video1394_wait32 __user *argp = (void __user *)arg;
- struct video1394_wait32 wait32;
- struct video1394_wait wait;
- mm_segment_t old_fs;
- int ret;
-
- if (copy_from_user(&wait32, argp, sizeof(wait32)))
- return -EFAULT;
-
- wait.channel = wait32.channel;
- wait.buffer = wait32.buffer;
- wait.filltime.tv_sec = (time_t)wait32.filltime.tv_sec;
- wait.filltime.tv_usec = (suseconds_t)wait32.filltime.tv_usec;
-
- old_fs = get_fs();
- set_fs(KERNEL_DS);
- if (cmd == VIDEO1394_IOC32_LISTEN_WAIT_BUFFER)
- ret = video1394_ioctl(file,
- VIDEO1394_IOC_LISTEN_WAIT_BUFFER,
- (unsigned long) &wait);
- else
- ret = video1394_ioctl(file,
- VIDEO1394_IOC_LISTEN_POLL_BUFFER,
- (unsigned long) &wait);
- set_fs(old_fs);
-
- if (!ret) {
- wait32.channel = wait.channel;
- wait32.buffer = wait.buffer;
- wait32.filltime.tv_sec = (int)wait.filltime.tv_sec;
- wait32.filltime.tv_usec = (int)wait.filltime.tv_usec;
-
- if (copy_to_user(argp, &wait32, sizeof(wait32)))
- ret = -EFAULT;
- }
-
- return ret;
-}
-
-static int video1394_w_wait32(struct file *file, unsigned int cmd, unsigned long arg)
-{
- struct video1394_wait32 wait32;
- struct video1394_wait wait;
- mm_segment_t old_fs;
- int ret;
-
- if (copy_from_user(&wait32, (void __user *)arg, sizeof(wait32)))
- return -EFAULT;
-
- wait.channel = wait32.channel;
- wait.buffer = wait32.buffer;
- wait.filltime.tv_sec = (time_t)wait32.filltime.tv_sec;
- wait.filltime.tv_usec = (suseconds_t)wait32.filltime.tv_usec;
-
- old_fs = get_fs();
- set_fs(KERNEL_DS);
- if (cmd == VIDEO1394_IOC32_LISTEN_QUEUE_BUFFER)
- ret = video1394_ioctl(file,
- VIDEO1394_IOC_LISTEN_QUEUE_BUFFER,
- (unsigned long) &wait);
- else
- ret = video1394_ioctl(file,
- VIDEO1394_IOC_TALK_WAIT_BUFFER,
- (unsigned long) &wait);
- set_fs(old_fs);
-
- return ret;
-}
-
-static int video1394_queue_buf32(struct file *file, unsigned int cmd, unsigned long arg)
-{
- return -EFAULT; /* ??? was there before. */
-
- return video1394_ioctl(file,
- VIDEO1394_IOC_TALK_QUEUE_BUFFER, arg);
-}
-
-static long video1394_compat_ioctl(struct file *f, unsigned cmd, unsigned long arg)
-{
- switch (cmd) {
- case VIDEO1394_IOC_LISTEN_CHANNEL:
- case VIDEO1394_IOC_UNLISTEN_CHANNEL:
- case VIDEO1394_IOC_TALK_CHANNEL:
- case VIDEO1394_IOC_UNTALK_CHANNEL:
- return video1394_ioctl(f, cmd, arg);
-
- case VIDEO1394_IOC32_LISTEN_QUEUE_BUFFER:
- return video1394_w_wait32(f, cmd, arg);
- case VIDEO1394_IOC32_LISTEN_WAIT_BUFFER:
- return video1394_wr_wait32(f, cmd, arg);
- case VIDEO1394_IOC_TALK_QUEUE_BUFFER:
- return video1394_queue_buf32(f, cmd, arg);
- case VIDEO1394_IOC32_TALK_WAIT_BUFFER:
- return video1394_w_wait32(f, cmd, arg);
- case VIDEO1394_IOC32_LISTEN_POLL_BUFFER:
- return video1394_wr_wait32(f, cmd, arg);
- default:
- return -ENOIOCTLCMD;
- }
-}
-
-#endif /* CONFIG_COMPAT */
-
-static void __exit video1394_exit_module (void)
-{
- hpsb_unregister_protocol(&video1394_driver);
- hpsb_unregister_highlevel(&video1394_highlevel);
- cdev_del(&video1394_cdev);
- PRINT_G(KERN_INFO, "Removed " VIDEO1394_DRIVER_NAME " module");
-}
-
-static int __init video1394_init_module (void)
-{
- int ret;
-
- hpsb_init_highlevel(&video1394_highlevel);
-
- cdev_init(&video1394_cdev, &video1394_fops);
- video1394_cdev.owner = THIS_MODULE;
- ret = cdev_add(&video1394_cdev, IEEE1394_VIDEO1394_DEV, 16);
- if (ret) {
- PRINT_G(KERN_ERR, "video1394: unable to get minor device block");
- return ret;
- }
-
- hpsb_register_highlevel(&video1394_highlevel);
-
- ret = hpsb_register_protocol(&video1394_driver);
- if (ret) {
- PRINT_G(KERN_ERR, "video1394: failed to register protocol");
- hpsb_unregister_highlevel(&video1394_highlevel);
- cdev_del(&video1394_cdev);
- return ret;
- }
-
- PRINT_G(KERN_INFO, "Installed " VIDEO1394_DRIVER_NAME " module");
- return 0;
-}
-
-
-module_init(video1394_init_module);
-module_exit(video1394_exit_module);
diff --git a/drivers/ieee1394/video1394.h b/drivers/ieee1394/video1394.h
deleted file mode 100644
index 9a89d9cc3c85..000000000000
--- a/drivers/ieee1394/video1394.h
+++ /dev/null
@@ -1,67 +0,0 @@
-/*
- * video1394.h - driver for OHCI 1394 boards
- * Copyright (C)1999,2000 Sebastien Rougeaux <sebastien.rougeaux@anu.edu.au>
- * Peter Schlaile <udbz@rz.uni-karlsruhe.de>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software Foundation,
- * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
- */
-
-#ifndef _VIDEO_1394_H
-#define _VIDEO_1394_H
-
-#include "ieee1394-ioctl.h"
-
-#define VIDEO1394_DRIVER_NAME "video1394"
-
-#define VIDEO1394_MAX_SIZE 0x4000000
-
-enum {
- VIDEO1394_BUFFER_FREE = 0,
- VIDEO1394_BUFFER_QUEUED,
- VIDEO1394_BUFFER_READY
-};
-
-#define VIDEO1394_SYNC_FRAMES 0x00000001
-#define VIDEO1394_INCLUDE_ISO_HEADERS 0x00000002
-#define VIDEO1394_VARIABLE_PACKET_SIZE 0x00000004
-
-struct video1394_mmap {
- int channel; /* -1 to find an open channel in LISTEN/TALK */
- unsigned int sync_tag;
- unsigned int nb_buffers;
- unsigned int buf_size;
- unsigned int packet_size; /* For VARIABLE_PACKET_SIZE:
- Maximum packet size */
- unsigned int fps;
- unsigned int syt_offset;
- unsigned int flags;
-};
-
-/* For TALK_QUEUE_BUFFER with VIDEO1394_VARIABLE_PACKET_SIZE use */
-struct video1394_queue_variable {
- unsigned int channel;
- unsigned int buffer;
- unsigned int __user * packet_sizes; /* Buffer of size:
- buf_size / packet_size */
-};
-
-struct video1394_wait {
- unsigned int channel;
- unsigned int buffer;
- struct timeval filltime; /* time of buffer full */
-};
-
-
-#endif
diff --git a/drivers/infiniband/core/agent.c b/drivers/infiniband/core/agent.c
index ae7c2880e624..91916a8d5de4 100644
--- a/drivers/infiniband/core/agent.c
+++ b/drivers/infiniband/core/agent.c
@@ -59,8 +59,8 @@ __ib_get_agent_port(struct ib_device *device, int port_num)
struct ib_agent_port_private *entry;
list_for_each_entry(entry, &ib_agent_port_list, port_list) {
- if (entry->agent[0]->device == device &&
- entry->agent[0]->port_num == port_num)
+ if (entry->agent[1]->device == device &&
+ entry->agent[1]->port_num == port_num)
return entry;
}
return NULL;
@@ -155,14 +155,16 @@ int ib_agent_port_open(struct ib_device *device, int port_num)
goto error1;
}
- /* Obtain send only MAD agent for SMI QP */
- port_priv->agent[0] = ib_register_mad_agent(device, port_num,
- IB_QPT_SMI, NULL, 0,
- &agent_send_handler,
- NULL, NULL);
- if (IS_ERR(port_priv->agent[0])) {
- ret = PTR_ERR(port_priv->agent[0]);
- goto error2;
+ if (rdma_port_get_link_layer(device, port_num) == IB_LINK_LAYER_INFINIBAND) {
+ /* Obtain send only MAD agent for SMI QP */
+ port_priv->agent[0] = ib_register_mad_agent(device, port_num,
+ IB_QPT_SMI, NULL, 0,
+ &agent_send_handler,
+ NULL, NULL);
+ if (IS_ERR(port_priv->agent[0])) {
+ ret = PTR_ERR(port_priv->agent[0]);
+ goto error2;
+ }
}
/* Obtain send only MAD agent for GSI QP */
@@ -182,7 +184,8 @@ int ib_agent_port_open(struct ib_device *device, int port_num)
return 0;
error3:
- ib_unregister_mad_agent(port_priv->agent[0]);
+ if (port_priv->agent[0])
+ ib_unregister_mad_agent(port_priv->agent[0]);
error2:
kfree(port_priv);
error1:
@@ -205,7 +208,9 @@ int ib_agent_port_close(struct ib_device *device, int port_num)
spin_unlock_irqrestore(&ib_agent_port_list_lock, flags);
ib_unregister_mad_agent(port_priv->agent[1]);
- ib_unregister_mad_agent(port_priv->agent[0]);
+ if (port_priv->agent[0])
+ ib_unregister_mad_agent(port_priv->agent[0]);
+
kfree(port_priv);
return 0;
}
diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
index b930b8110a63..6884da24fde1 100644
--- a/drivers/infiniband/core/cma.c
+++ b/drivers/infiniband/core/cma.c
@@ -59,6 +59,7 @@ MODULE_LICENSE("Dual BSD/GPL");
#define CMA_CM_RESPONSE_TIMEOUT 20
#define CMA_MAX_CM_RETRIES 15
#define CMA_CM_MRA_SETTING (IB_CM_MRA_FLAG_DELAY | 24)
+#define CMA_IBOE_PACKET_LIFETIME 18
static void cma_add_one(struct ib_device *device);
static void cma_remove_one(struct ib_device *device);
@@ -157,6 +158,7 @@ struct cma_multicast {
struct list_head list;
void *context;
struct sockaddr_storage addr;
+ struct kref mcref;
};
struct cma_work {
@@ -173,6 +175,12 @@ struct cma_ndev_work {
struct rdma_cm_event event;
};
+struct iboe_mcast_work {
+ struct work_struct work;
+ struct rdma_id_private *id;
+ struct cma_multicast *mc;
+};
+
union cma_ip_addr {
struct in6_addr ip6;
struct {
@@ -281,6 +289,8 @@ static void cma_attach_to_dev(struct rdma_id_private *id_priv,
atomic_inc(&cma_dev->refcount);
id_priv->cma_dev = cma_dev;
id_priv->id.device = cma_dev->device;
+ id_priv->id.route.addr.dev_addr.transport =
+ rdma_node_get_transport(cma_dev->device->node_type);
list_add_tail(&id_priv->list, &cma_dev->id_list);
}
@@ -290,6 +300,14 @@ static inline void cma_deref_dev(struct cma_device *cma_dev)
complete(&cma_dev->comp);
}
+static inline void release_mc(struct kref *kref)
+{
+ struct cma_multicast *mc = container_of(kref, struct cma_multicast, mcref);
+
+ kfree(mc->multicast.ib);
+ kfree(mc);
+}
+
static void cma_detach_from_dev(struct rdma_id_private *id_priv)
{
list_del(&id_priv->list);
@@ -323,22 +341,63 @@ static int cma_set_qkey(struct rdma_id_private *id_priv)
return ret;
}
+static int find_gid_port(struct ib_device *device, union ib_gid *gid, u8 port_num)
+{
+ int i;
+ int err;
+ struct ib_port_attr props;
+ union ib_gid tmp;
+
+ err = ib_query_port(device, port_num, &props);
+ if (err)
+ return 1;
+
+ for (i = 0; i < props.gid_tbl_len; ++i) {
+ err = ib_query_gid(device, port_num, i, &tmp);
+ if (err)
+ return 1;
+ if (!memcmp(&tmp, gid, sizeof tmp))
+ return 0;
+ }
+
+ return -EAGAIN;
+}
+
static int cma_acquire_dev(struct rdma_id_private *id_priv)
{
struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr;
struct cma_device *cma_dev;
- union ib_gid gid;
+ union ib_gid gid, iboe_gid;
int ret = -ENODEV;
+ u8 port;
+ enum rdma_link_layer dev_ll = dev_addr->dev_type == ARPHRD_INFINIBAND ?
+ IB_LINK_LAYER_INFINIBAND : IB_LINK_LAYER_ETHERNET;
- rdma_addr_get_sgid(dev_addr, &gid);
+ iboe_addr_get_sgid(dev_addr, &iboe_gid);
+ memcpy(&gid, dev_addr->src_dev_addr +
+ rdma_addr_gid_offset(dev_addr), sizeof gid);
list_for_each_entry(cma_dev, &dev_list, list) {
- ret = ib_find_cached_gid(cma_dev->device, &gid,
- &id_priv->id.port_num, NULL);
- if (!ret) {
- cma_attach_to_dev(id_priv, cma_dev);
- break;
+ for (port = 1; port <= cma_dev->device->phys_port_cnt; ++port) {
+ if (rdma_port_get_link_layer(cma_dev->device, port) == dev_ll) {
+ if (rdma_node_get_transport(cma_dev->device->node_type) == RDMA_TRANSPORT_IB &&
+ rdma_port_get_link_layer(cma_dev->device, port) == IB_LINK_LAYER_ETHERNET)
+ ret = find_gid_port(cma_dev->device, &iboe_gid, port);
+ else
+ ret = find_gid_port(cma_dev->device, &gid, port);
+
+ if (!ret) {
+ id_priv->id.port_num = port;
+ goto out;
+ } else if (ret == 1)
+ break;
+ }
}
}
+
+out:
+ if (!ret)
+ cma_attach_to_dev(id_priv, cma_dev);
+
return ret;
}
@@ -556,10 +615,16 @@ static int cma_ib_init_qp_attr(struct rdma_id_private *id_priv,
{
struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr;
int ret;
+ u16 pkey;
+
+ if (rdma_port_get_link_layer(id_priv->id.device, id_priv->id.port_num) ==
+ IB_LINK_LAYER_INFINIBAND)
+ pkey = ib_addr_get_pkey(dev_addr);
+ else
+ pkey = 0xffff;
ret = ib_find_cached_pkey(id_priv->id.device, id_priv->id.port_num,
- ib_addr_get_pkey(dev_addr),
- &qp_attr->pkey_index);
+ pkey, &qp_attr->pkey_index);
if (ret)
return ret;
@@ -737,8 +802,8 @@ static inline int cma_user_data_offset(enum rdma_port_space ps)
static void cma_cancel_route(struct rdma_id_private *id_priv)
{
- switch (rdma_node_get_transport(id_priv->id.device->node_type)) {
- case RDMA_TRANSPORT_IB:
+ switch (rdma_port_get_link_layer(id_priv->id.device, id_priv->id.port_num)) {
+ case IB_LINK_LAYER_INFINIBAND:
if (id_priv->query)
ib_sa_cancel_query(id_priv->query_id, id_priv->query);
break;
@@ -816,8 +881,17 @@ static void cma_leave_mc_groups(struct rdma_id_private *id_priv)
mc = container_of(id_priv->mc_list.next,
struct cma_multicast, list);
list_del(&mc->list);
- ib_sa_free_multicast(mc->multicast.ib);
- kfree(mc);
+ switch (rdma_port_get_link_layer(id_priv->cma_dev->device, id_priv->id.port_num)) {
+ case IB_LINK_LAYER_INFINIBAND:
+ ib_sa_free_multicast(mc->multicast.ib);
+ kfree(mc);
+ break;
+ case IB_LINK_LAYER_ETHERNET:
+ kref_put(&mc->mcref, release_mc);
+ break;
+ default:
+ break;
+ }
}
}
@@ -833,7 +907,7 @@ void rdma_destroy_id(struct rdma_cm_id *id)
mutex_lock(&lock);
if (id_priv->cma_dev) {
mutex_unlock(&lock);
- switch (rdma_node_get_transport(id->device->node_type)) {
+ switch (rdma_node_get_transport(id_priv->id.device->node_type)) {
case RDMA_TRANSPORT_IB:
if (id_priv->cm_id.ib && !IS_ERR(id_priv->cm_id.ib))
ib_destroy_cm_id(id_priv->cm_id.ib);
@@ -1708,6 +1782,81 @@ static int cma_resolve_iw_route(struct rdma_id_private *id_priv, int timeout_ms)
return 0;
}
+static int cma_resolve_iboe_route(struct rdma_id_private *id_priv)
+{
+ struct rdma_route *route = &id_priv->id.route;
+ struct rdma_addr *addr = &route->addr;
+ struct cma_work *work;
+ int ret;
+ struct sockaddr_in *src_addr = (struct sockaddr_in *)&route->addr.src_addr;
+ struct sockaddr_in *dst_addr = (struct sockaddr_in *)&route->addr.dst_addr;
+ struct net_device *ndev = NULL;
+ u16 vid;
+
+ if (src_addr->sin_family != dst_addr->sin_family)
+ return -EINVAL;
+
+ work = kzalloc(sizeof *work, GFP_KERNEL);
+ if (!work)
+ return -ENOMEM;
+
+ work->id = id_priv;
+ INIT_WORK(&work->work, cma_work_handler);
+
+ route->path_rec = kzalloc(sizeof *route->path_rec, GFP_KERNEL);
+ if (!route->path_rec) {
+ ret = -ENOMEM;
+ goto err1;
+ }
+
+ route->num_paths = 1;
+
+ if (addr->dev_addr.bound_dev_if)
+ ndev = dev_get_by_index(&init_net, addr->dev_addr.bound_dev_if);
+ if (!ndev) {
+ ret = -ENODEV;
+ goto err2;
+ }
+
+ vid = rdma_vlan_dev_vlan_id(ndev);
+
+ iboe_mac_vlan_to_ll(&route->path_rec->sgid, addr->dev_addr.src_dev_addr, vid);
+ iboe_mac_vlan_to_ll(&route->path_rec->dgid, addr->dev_addr.dst_dev_addr, vid);
+
+ route->path_rec->hop_limit = 1;
+ route->path_rec->reversible = 1;
+ route->path_rec->pkey = cpu_to_be16(0xffff);
+ route->path_rec->mtu_selector = IB_SA_EQ;
+ route->path_rec->sl = id_priv->tos >> 5;
+
+ route->path_rec->mtu = iboe_get_mtu(ndev->mtu);
+ route->path_rec->rate_selector = IB_SA_EQ;
+ route->path_rec->rate = iboe_get_rate(ndev);
+ dev_put(ndev);
+ route->path_rec->packet_life_time_selector = IB_SA_EQ;
+ route->path_rec->packet_life_time = CMA_IBOE_PACKET_LIFETIME;
+ if (!route->path_rec->mtu) {
+ ret = -EINVAL;
+ goto err2;
+ }
+
+ work->old_state = CMA_ROUTE_QUERY;
+ work->new_state = CMA_ROUTE_RESOLVED;
+ work->event.event = RDMA_CM_EVENT_ROUTE_RESOLVED;
+ work->event.status = 0;
+
+ queue_work(cma_wq, &work->work);
+
+ return 0;
+
+err2:
+ kfree(route->path_rec);
+ route->path_rec = NULL;
+err1:
+ kfree(work);
+ return ret;
+}
+
int rdma_resolve_route(struct rdma_cm_id *id, int timeout_ms)
{
struct rdma_id_private *id_priv;
@@ -1720,7 +1869,16 @@ int rdma_resolve_route(struct rdma_cm_id *id, int timeout_ms)
atomic_inc(&id_priv->refcount);
switch (rdma_node_get_transport(id->device->node_type)) {
case RDMA_TRANSPORT_IB:
- ret = cma_resolve_ib_route(id_priv, timeout_ms);
+ switch (rdma_port_get_link_layer(id->device, id->port_num)) {
+ case IB_LINK_LAYER_INFINIBAND:
+ ret = cma_resolve_ib_route(id_priv, timeout_ms);
+ break;
+ case IB_LINK_LAYER_ETHERNET:
+ ret = cma_resolve_iboe_route(id_priv);
+ break;
+ default:
+ ret = -ENOSYS;
+ }
break;
case RDMA_TRANSPORT_IWARP:
ret = cma_resolve_iw_route(id_priv, timeout_ms);
@@ -1773,7 +1931,7 @@ port_found:
goto out;
id_priv->id.route.addr.dev_addr.dev_type =
- (rdma_node_get_transport(cma_dev->device->node_type) == RDMA_TRANSPORT_IB) ?
+ (rdma_port_get_link_layer(cma_dev->device, p) == IB_LINK_LAYER_INFINIBAND) ?
ARPHRD_INFINIBAND : ARPHRD_ETHER;
rdma_addr_set_sgid(&id_priv->id.route.addr.dev_addr, &gid);
@@ -2758,6 +2916,102 @@ static int cma_join_ib_multicast(struct rdma_id_private *id_priv,
return 0;
}
+static void iboe_mcast_work_handler(struct work_struct *work)
+{
+ struct iboe_mcast_work *mw = container_of(work, struct iboe_mcast_work, work);
+ struct cma_multicast *mc = mw->mc;
+ struct ib_sa_multicast *m = mc->multicast.ib;
+
+ mc->multicast.ib->context = mc;
+ cma_ib_mc_handler(0, m);
+ kref_put(&mc->mcref, release_mc);
+ kfree(mw);
+}
+
+static void cma_iboe_set_mgid(struct sockaddr *addr, union ib_gid *mgid)
+{
+ struct sockaddr_in *sin = (struct sockaddr_in *)addr;
+ struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)addr;
+
+ if (cma_any_addr(addr)) {
+ memset(mgid, 0, sizeof *mgid);
+ } else if (addr->sa_family == AF_INET6) {
+ memcpy(mgid, &sin6->sin6_addr, sizeof *mgid);
+ } else {
+ mgid->raw[0] = 0xff;
+ mgid->raw[1] = 0x0e;
+ mgid->raw[2] = 0;
+ mgid->raw[3] = 0;
+ mgid->raw[4] = 0;
+ mgid->raw[5] = 0;
+ mgid->raw[6] = 0;
+ mgid->raw[7] = 0;
+ mgid->raw[8] = 0;
+ mgid->raw[9] = 0;
+ mgid->raw[10] = 0xff;
+ mgid->raw[11] = 0xff;
+ *(__be32 *)(&mgid->raw[12]) = sin->sin_addr.s_addr;
+ }
+}
+
+static int cma_iboe_join_multicast(struct rdma_id_private *id_priv,
+ struct cma_multicast *mc)
+{
+ struct iboe_mcast_work *work;
+ struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr;
+ int err;
+ struct sockaddr *addr = (struct sockaddr *)&mc->addr;
+ struct net_device *ndev = NULL;
+
+ if (cma_zero_addr((struct sockaddr *)&mc->addr))
+ return -EINVAL;
+
+ work = kzalloc(sizeof *work, GFP_KERNEL);
+ if (!work)
+ return -ENOMEM;
+
+ mc->multicast.ib = kzalloc(sizeof(struct ib_sa_multicast), GFP_KERNEL);
+ if (!mc->multicast.ib) {
+ err = -ENOMEM;
+ goto out1;
+ }
+
+ cma_iboe_set_mgid(addr, &mc->multicast.ib->rec.mgid);
+
+ mc->multicast.ib->rec.pkey = cpu_to_be16(0xffff);
+ if (id_priv->id.ps == RDMA_PS_UDP)
+ mc->multicast.ib->rec.qkey = cpu_to_be32(RDMA_UDP_QKEY);
+
+ if (dev_addr->bound_dev_if)
+ ndev = dev_get_by_index(&init_net, dev_addr->bound_dev_if);
+ if (!ndev) {
+ err = -ENODEV;
+ goto out2;
+ }
+ mc->multicast.ib->rec.rate = iboe_get_rate(ndev);
+ mc->multicast.ib->rec.hop_limit = 1;
+ mc->multicast.ib->rec.mtu = iboe_get_mtu(ndev->mtu);
+ dev_put(ndev);
+ if (!mc->multicast.ib->rec.mtu) {
+ err = -EINVAL;
+ goto out2;
+ }
+ iboe_addr_get_sgid(dev_addr, &mc->multicast.ib->rec.port_gid);
+ work->id = id_priv;
+ work->mc = mc;
+ INIT_WORK(&work->work, iboe_mcast_work_handler);
+ kref_get(&mc->mcref);
+ queue_work(cma_wq, &work->work);
+
+ return 0;
+
+out2:
+ kfree(mc->multicast.ib);
+out1:
+ kfree(work);
+ return err;
+}
+
int rdma_join_multicast(struct rdma_cm_id *id, struct sockaddr *addr,
void *context)
{
@@ -2784,7 +3038,17 @@ int rdma_join_multicast(struct rdma_cm_id *id, struct sockaddr *addr,
switch (rdma_node_get_transport(id->device->node_type)) {
case RDMA_TRANSPORT_IB:
- ret = cma_join_ib_multicast(id_priv, mc);
+ switch (rdma_port_get_link_layer(id->device, id->port_num)) {
+ case IB_LINK_LAYER_INFINIBAND:
+ ret = cma_join_ib_multicast(id_priv, mc);
+ break;
+ case IB_LINK_LAYER_ETHERNET:
+ kref_init(&mc->mcref);
+ ret = cma_iboe_join_multicast(id_priv, mc);
+ break;
+ default:
+ ret = -EINVAL;
+ }
break;
default:
ret = -ENOSYS;
@@ -2817,8 +3081,19 @@ void rdma_leave_multicast(struct rdma_cm_id *id, struct sockaddr *addr)
ib_detach_mcast(id->qp,
&mc->multicast.ib->rec.mgid,
mc->multicast.ib->rec.mlid);
- ib_sa_free_multicast(mc->multicast.ib);
- kfree(mc);
+ if (rdma_node_get_transport(id_priv->cma_dev->device->node_type) == RDMA_TRANSPORT_IB) {
+ switch (rdma_port_get_link_layer(id->device, id->port_num)) {
+ case IB_LINK_LAYER_INFINIBAND:
+ ib_sa_free_multicast(mc->multicast.ib);
+ kfree(mc);
+ break;
+ case IB_LINK_LAYER_ETHERNET:
+ kref_put(&mc->mcref, release_mc);
+ break;
+ default:
+ break;
+ }
+ }
return;
}
}
diff --git a/drivers/infiniband/core/iwcm.c b/drivers/infiniband/core/iwcm.c
index bfead5bc25f6..2a1e9ae134b4 100644
--- a/drivers/infiniband/core/iwcm.c
+++ b/drivers/infiniband/core/iwcm.c
@@ -506,6 +506,8 @@ int iw_cm_accept(struct iw_cm_id *cm_id,
qp = cm_id->device->iwcm->get_qp(cm_id->device, iw_param->qpn);
if (!qp) {
spin_unlock_irqrestore(&cm_id_priv->lock, flags);
+ clear_bit(IWCM_F_CONNECT_WAIT, &cm_id_priv->flags);
+ wake_up_all(&cm_id_priv->connect_wait);
return -EINVAL;
}
cm_id->device->iwcm->add_ref(qp);
@@ -565,6 +567,8 @@ int iw_cm_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *iw_param)
qp = cm_id->device->iwcm->get_qp(cm_id->device, iw_param->qpn);
if (!qp) {
spin_unlock_irqrestore(&cm_id_priv->lock, flags);
+ clear_bit(IWCM_F_CONNECT_WAIT, &cm_id_priv->flags);
+ wake_up_all(&cm_id_priv->connect_wait);
return -EINVAL;
}
cm_id->device->iwcm->add_ref(qp);
diff --git a/drivers/infiniband/core/mad.c b/drivers/infiniband/core/mad.c
index ef1304f151dc..822cfdcd9f78 100644
--- a/drivers/infiniband/core/mad.c
+++ b/drivers/infiniband/core/mad.c
@@ -2598,6 +2598,9 @@ static void cleanup_recv_queue(struct ib_mad_qp_info *qp_info)
struct ib_mad_private *recv;
struct ib_mad_list_head *mad_list;
+ if (!qp_info->qp)
+ return;
+
while (!list_empty(&qp_info->recv_queue.list)) {
mad_list = list_entry(qp_info->recv_queue.list.next,
@@ -2639,6 +2642,9 @@ static int ib_mad_port_start(struct ib_mad_port_private *port_priv)
for (i = 0; i < IB_MAD_QPS_CORE; i++) {
qp = port_priv->qp_info[i].qp;
+ if (!qp)
+ continue;
+
/*
* PKey index for QP1 is irrelevant but
* one is needed for the Reset to Init transition
@@ -2680,6 +2686,9 @@ static int ib_mad_port_start(struct ib_mad_port_private *port_priv)
}
for (i = 0; i < IB_MAD_QPS_CORE; i++) {
+ if (!port_priv->qp_info[i].qp)
+ continue;
+
ret = ib_mad_post_receive_mads(&port_priv->qp_info[i], NULL);
if (ret) {
printk(KERN_ERR PFX "Couldn't post receive WRs\n");
@@ -2758,6 +2767,9 @@ error:
static void destroy_mad_qp(struct ib_mad_qp_info *qp_info)
{
+ if (!qp_info->qp)
+ return;
+
ib_destroy_qp(qp_info->qp);
kfree(qp_info->snoop_table);
}
@@ -2773,6 +2785,7 @@ static int ib_mad_port_open(struct ib_device *device,
struct ib_mad_port_private *port_priv;
unsigned long flags;
char name[sizeof "ib_mad123"];
+ int has_smi;
/* Create new device info */
port_priv = kzalloc(sizeof *port_priv, GFP_KERNEL);
@@ -2788,7 +2801,11 @@ static int ib_mad_port_open(struct ib_device *device,
init_mad_qp(port_priv, &port_priv->qp_info[0]);
init_mad_qp(port_priv, &port_priv->qp_info[1]);
- cq_size = (mad_sendq_size + mad_recvq_size) * 2;
+ cq_size = mad_sendq_size + mad_recvq_size;
+ has_smi = rdma_port_get_link_layer(device, port_num) == IB_LINK_LAYER_INFINIBAND;
+ if (has_smi)
+ cq_size *= 2;
+
port_priv->cq = ib_create_cq(port_priv->device,
ib_mad_thread_completion_handler,
NULL, port_priv, cq_size, 0);
@@ -2812,9 +2829,11 @@ static int ib_mad_port_open(struct ib_device *device,
goto error5;
}
- ret = create_mad_qp(&port_priv->qp_info[0], IB_QPT_SMI);
- if (ret)
- goto error6;
+ if (has_smi) {
+ ret = create_mad_qp(&port_priv->qp_info[0], IB_QPT_SMI);
+ if (ret)
+ goto error6;
+ }
ret = create_mad_qp(&port_priv->qp_info[1], IB_QPT_GSI);
if (ret)
goto error7;
diff --git a/drivers/infiniband/core/multicast.c b/drivers/infiniband/core/multicast.c
index a519801dcfb7..68b4162fd9d2 100644
--- a/drivers/infiniband/core/multicast.c
+++ b/drivers/infiniband/core/multicast.c
@@ -774,6 +774,10 @@ static void mcast_event_handler(struct ib_event_handler *handler,
int index;
dev = container_of(handler, struct mcast_device, event_handler);
+ if (rdma_port_get_link_layer(dev->device, event->element.port_num) !=
+ IB_LINK_LAYER_INFINIBAND)
+ return;
+
index = event->element.port_num - dev->start_port;
switch (event->event) {
@@ -796,6 +800,7 @@ static void mcast_add_one(struct ib_device *device)
struct mcast_device *dev;
struct mcast_port *port;
int i;
+ int count = 0;
if (rdma_node_get_transport(device->node_type) != RDMA_TRANSPORT_IB)
return;
@@ -813,6 +818,9 @@ static void mcast_add_one(struct ib_device *device)
}
for (i = 0; i <= dev->end_port - dev->start_port; i++) {
+ if (rdma_port_get_link_layer(device, dev->start_port + i) !=
+ IB_LINK_LAYER_INFINIBAND)
+ continue;
port = &dev->port[i];
port->dev = dev;
port->port_num = dev->start_port + i;
@@ -820,6 +828,12 @@ static void mcast_add_one(struct ib_device *device)
port->table = RB_ROOT;
init_completion(&port->comp);
atomic_set(&port->refcount, 1);
+ ++count;
+ }
+
+ if (!count) {
+ kfree(dev);
+ return;
}
dev->device = device;
@@ -843,9 +857,12 @@ static void mcast_remove_one(struct ib_device *device)
flush_workqueue(mcast_wq);
for (i = 0; i <= dev->end_port - dev->start_port; i++) {
- port = &dev->port[i];
- deref_port(port);
- wait_for_completion(&port->comp);
+ if (rdma_port_get_link_layer(device, dev->start_port + i) ==
+ IB_LINK_LAYER_INFINIBAND) {
+ port = &dev->port[i];
+ deref_port(port);
+ wait_for_completion(&port->comp);
+ }
}
kfree(dev);
diff --git a/drivers/infiniband/core/sa_query.c b/drivers/infiniband/core/sa_query.c
index 7e1ffd8ccd5c..91a660310b7c 100644
--- a/drivers/infiniband/core/sa_query.c
+++ b/drivers/infiniband/core/sa_query.c
@@ -416,6 +416,9 @@ static void ib_sa_event(struct ib_event_handler *handler, struct ib_event *event
struct ib_sa_port *port =
&sa_dev->port[event->element.port_num - sa_dev->start_port];
+ if (rdma_port_get_link_layer(handler->device, port->port_num) != IB_LINK_LAYER_INFINIBAND)
+ return;
+
spin_lock_irqsave(&port->ah_lock, flags);
if (port->sm_ah)
kref_put(&port->sm_ah->ref, free_sm_ah);
@@ -493,6 +496,7 @@ int ib_init_ah_from_path(struct ib_device *device, u8 port_num,
{
int ret;
u16 gid_index;
+ int force_grh;
memset(ah_attr, 0, sizeof *ah_attr);
ah_attr->dlid = be16_to_cpu(rec->dlid);
@@ -502,7 +506,9 @@ int ib_init_ah_from_path(struct ib_device *device, u8 port_num,
ah_attr->port_num = port_num;
ah_attr->static_rate = rec->rate;
- if (rec->hop_limit > 1) {
+ force_grh = rdma_port_get_link_layer(device, port_num) == IB_LINK_LAYER_ETHERNET;
+
+ if (rec->hop_limit > 1 || force_grh) {
ah_attr->ah_flags = IB_AH_GRH;
ah_attr->grh.dgid = rec->dgid;
@@ -1007,7 +1013,7 @@ static void ib_sa_add_one(struct ib_device *device)
e = device->phys_port_cnt;
}
- sa_dev = kmalloc(sizeof *sa_dev +
+ sa_dev = kzalloc(sizeof *sa_dev +
(e - s + 1) * sizeof (struct ib_sa_port),
GFP_KERNEL);
if (!sa_dev)
@@ -1017,9 +1023,12 @@ static void ib_sa_add_one(struct ib_device *device)
sa_dev->end_port = e;
for (i = 0; i <= e - s; ++i) {
+ spin_lock_init(&sa_dev->port[i].ah_lock);
+ if (rdma_port_get_link_layer(device, i + 1) != IB_LINK_LAYER_INFINIBAND)
+ continue;
+
sa_dev->port[i].sm_ah = NULL;
sa_dev->port[i].port_num = i + s;
- spin_lock_init(&sa_dev->port[i].ah_lock);
sa_dev->port[i].agent =
ib_register_mad_agent(device, i + s, IB_QPT_GSI,
@@ -1045,13 +1054,15 @@ static void ib_sa_add_one(struct ib_device *device)
goto err;
for (i = 0; i <= e - s; ++i)
- update_sm_ah(&sa_dev->port[i].update_task);
+ if (rdma_port_get_link_layer(device, i + 1) == IB_LINK_LAYER_INFINIBAND)
+ update_sm_ah(&sa_dev->port[i].update_task);
return;
err:
while (--i >= 0)
- ib_unregister_mad_agent(sa_dev->port[i].agent);
+ if (rdma_port_get_link_layer(device, i + 1) == IB_LINK_LAYER_INFINIBAND)
+ ib_unregister_mad_agent(sa_dev->port[i].agent);
kfree(sa_dev);
@@ -1071,9 +1082,12 @@ static void ib_sa_remove_one(struct ib_device *device)
flush_scheduled_work();
for (i = 0; i <= sa_dev->end_port - sa_dev->start_port; ++i) {
- ib_unregister_mad_agent(sa_dev->port[i].agent);
- if (sa_dev->port[i].sm_ah)
- kref_put(&sa_dev->port[i].sm_ah->ref, free_sm_ah);
+ if (rdma_port_get_link_layer(device, i + 1) == IB_LINK_LAYER_INFINIBAND) {
+ ib_unregister_mad_agent(sa_dev->port[i].agent);
+ if (sa_dev->port[i].sm_ah)
+ kref_put(&sa_dev->port[i].sm_ah->ref, free_sm_ah);
+ }
+
}
kfree(sa_dev);
diff --git a/drivers/infiniband/core/sysfs.c b/drivers/infiniband/core/sysfs.c
index 3627300e2a10..9ab5df72df7b 100644
--- a/drivers/infiniband/core/sysfs.c
+++ b/drivers/infiniband/core/sysfs.c
@@ -222,6 +222,19 @@ static ssize_t phys_state_show(struct ib_port *p, struct port_attribute *unused,
}
}
+static ssize_t link_layer_show(struct ib_port *p, struct port_attribute *unused,
+ char *buf)
+{
+ switch (rdma_port_get_link_layer(p->ibdev, p->port_num)) {
+ case IB_LINK_LAYER_INFINIBAND:
+ return sprintf(buf, "%s\n", "InfiniBand");
+ case IB_LINK_LAYER_ETHERNET:
+ return sprintf(buf, "%s\n", "Ethernet");
+ default:
+ return sprintf(buf, "%s\n", "Unknown");
+ }
+}
+
static PORT_ATTR_RO(state);
static PORT_ATTR_RO(lid);
static PORT_ATTR_RO(lid_mask_count);
@@ -230,6 +243,7 @@ static PORT_ATTR_RO(sm_sl);
static PORT_ATTR_RO(cap_mask);
static PORT_ATTR_RO(rate);
static PORT_ATTR_RO(phys_state);
+static PORT_ATTR_RO(link_layer);
static struct attribute *port_default_attrs[] = {
&port_attr_state.attr,
@@ -240,6 +254,7 @@ static struct attribute *port_default_attrs[] = {
&port_attr_cap_mask.attr,
&port_attr_rate.attr,
&port_attr_phys_state.attr,
+ &port_attr_link_layer.attr,
NULL
};
diff --git a/drivers/infiniband/core/ucma.c b/drivers/infiniband/core/ucma.c
index ac7edc24165c..ca12acf38379 100644
--- a/drivers/infiniband/core/ucma.c
+++ b/drivers/infiniband/core/ucma.c
@@ -40,6 +40,7 @@
#include <linux/in6.h>
#include <linux/miscdevice.h>
#include <linux/slab.h>
+#include <linux/sysctl.h>
#include <rdma/rdma_user_cm.h>
#include <rdma/ib_marshall.h>
@@ -50,8 +51,24 @@ MODULE_AUTHOR("Sean Hefty");
MODULE_DESCRIPTION("RDMA Userspace Connection Manager Access");
MODULE_LICENSE("Dual BSD/GPL");
-enum {
- UCMA_MAX_BACKLOG = 128
+static unsigned int max_backlog = 1024;
+
+static struct ctl_table_header *ucma_ctl_table_hdr;
+static ctl_table ucma_ctl_table[] = {
+ {
+ .procname = "max_backlog",
+ .data = &max_backlog,
+ .maxlen = sizeof max_backlog,
+ .mode = 0644,
+ .proc_handler = proc_dointvec,
+ },
+ { }
+};
+
+static struct ctl_path ucma_ctl_path[] = {
+ { .procname = "net" },
+ { .procname = "rdma_ucm" },
+ { }
};
struct ucma_file {
@@ -583,6 +600,42 @@ static void ucma_copy_ib_route(struct rdma_ucm_query_route_resp *resp,
}
}
+static void ucma_copy_iboe_route(struct rdma_ucm_query_route_resp *resp,
+ struct rdma_route *route)
+{
+ struct rdma_dev_addr *dev_addr;
+ struct net_device *dev;
+ u16 vid = 0;
+
+ resp->num_paths = route->num_paths;
+ switch (route->num_paths) {
+ case 0:
+ dev_addr = &route->addr.dev_addr;
+ dev = dev_get_by_index(&init_net, dev_addr->bound_dev_if);
+ if (dev) {
+ vid = rdma_vlan_dev_vlan_id(dev);
+ dev_put(dev);
+ }
+
+ iboe_mac_vlan_to_ll((union ib_gid *) &resp->ib_route[0].dgid,
+ dev_addr->dst_dev_addr, vid);
+ iboe_addr_get_sgid(dev_addr,
+ (union ib_gid *) &resp->ib_route[0].sgid);
+ resp->ib_route[0].pkey = cpu_to_be16(0xffff);
+ break;
+ case 2:
+ ib_copy_path_rec_to_user(&resp->ib_route[1],
+ &route->path_rec[1]);
+ /* fall through */
+ case 1:
+ ib_copy_path_rec_to_user(&resp->ib_route[0],
+ &route->path_rec[0]);
+ break;
+ default:
+ break;
+ }
+}
+
static ssize_t ucma_query_route(struct ucma_file *file,
const char __user *inbuf,
int in_len, int out_len)
@@ -617,12 +670,17 @@ static ssize_t ucma_query_route(struct ucma_file *file,
resp.node_guid = (__force __u64) ctx->cm_id->device->node_guid;
resp.port_num = ctx->cm_id->port_num;
- switch (rdma_node_get_transport(ctx->cm_id->device->node_type)) {
- case RDMA_TRANSPORT_IB:
- ucma_copy_ib_route(&resp, &ctx->cm_id->route);
- break;
- default:
- break;
+ if (rdma_node_get_transport(ctx->cm_id->device->node_type) == RDMA_TRANSPORT_IB) {
+ switch (rdma_port_get_link_layer(ctx->cm_id->device, ctx->cm_id->port_num)) {
+ case IB_LINK_LAYER_INFINIBAND:
+ ucma_copy_ib_route(&resp, &ctx->cm_id->route);
+ break;
+ case IB_LINK_LAYER_ETHERNET:
+ ucma_copy_iboe_route(&resp, &ctx->cm_id->route);
+ break;
+ default:
+ break;
+ }
}
out:
@@ -686,8 +744,8 @@ static ssize_t ucma_listen(struct ucma_file *file, const char __user *inbuf,
if (IS_ERR(ctx))
return PTR_ERR(ctx);
- ctx->backlog = cmd.backlog > 0 && cmd.backlog < UCMA_MAX_BACKLOG ?
- cmd.backlog : UCMA_MAX_BACKLOG;
+ ctx->backlog = cmd.backlog > 0 && cmd.backlog < max_backlog ?
+ cmd.backlog : max_backlog;
ret = rdma_listen(ctx->cm_id, ctx->backlog);
ucma_put_ctx(ctx);
return ret;
@@ -1279,16 +1337,26 @@ static int __init ucma_init(void)
ret = device_create_file(ucma_misc.this_device, &dev_attr_abi_version);
if (ret) {
printk(KERN_ERR "rdma_ucm: couldn't create abi_version attr\n");
- goto err;
+ goto err1;
+ }
+
+ ucma_ctl_table_hdr = register_sysctl_paths(ucma_ctl_path, ucma_ctl_table);
+ if (!ucma_ctl_table_hdr) {
+ printk(KERN_ERR "rdma_ucm: couldn't register sysctl paths\n");
+ ret = -ENOMEM;
+ goto err2;
}
return 0;
-err:
+err2:
+ device_remove_file(ucma_misc.this_device, &dev_attr_abi_version);
+err1:
misc_deregister(&ucma_misc);
return ret;
}
static void __exit ucma_cleanup(void)
{
+ unregister_sysctl_table(ucma_ctl_table_hdr);
device_remove_file(ucma_misc.this_device, &dev_attr_abi_version);
misc_deregister(&ucma_misc);
idr_destroy(&ctx_idr);
diff --git a/drivers/infiniband/core/ud_header.c b/drivers/infiniband/core/ud_header.c
index 650b501eb142..9b737ff133e2 100644
--- a/drivers/infiniband/core/ud_header.c
+++ b/drivers/infiniband/core/ud_header.c
@@ -33,6 +33,7 @@
#include <linux/errno.h>
#include <linux/string.h>
+#include <linux/if_ether.h>
#include <rdma/ib_pack.h>
@@ -80,6 +81,40 @@ static const struct ib_field lrh_table[] = {
.size_bits = 16 }
};
+static const struct ib_field eth_table[] = {
+ { STRUCT_FIELD(eth, dmac_h),
+ .offset_words = 0,
+ .offset_bits = 0,
+ .size_bits = 32 },
+ { STRUCT_FIELD(eth, dmac_l),
+ .offset_words = 1,
+ .offset_bits = 0,
+ .size_bits = 16 },
+ { STRUCT_FIELD(eth, smac_h),
+ .offset_words = 1,
+ .offset_bits = 16,
+ .size_bits = 16 },
+ { STRUCT_FIELD(eth, smac_l),
+ .offset_words = 2,
+ .offset_bits = 0,
+ .size_bits = 32 },
+ { STRUCT_FIELD(eth, type),
+ .offset_words = 3,
+ .offset_bits = 0,
+ .size_bits = 16 }
+};
+
+static const struct ib_field vlan_table[] = {
+ { STRUCT_FIELD(vlan, tag),
+ .offset_words = 0,
+ .offset_bits = 0,
+ .size_bits = 16 },
+ { STRUCT_FIELD(vlan, type),
+ .offset_words = 0,
+ .offset_bits = 16,
+ .size_bits = 16 }
+};
+
static const struct ib_field grh_table[] = {
{ STRUCT_FIELD(grh, ip_version),
.offset_words = 0,
@@ -180,38 +215,43 @@ static const struct ib_field deth_table[] = {
/**
* ib_ud_header_init - Initialize UD header structure
* @payload_bytes:Length of packet payload
+ * @lrh_present: specify if LRH is present
+ * @eth_present: specify if Eth header is present
+ * @vlan_present: packet is tagged vlan
* @grh_present:GRH flag (if non-zero, GRH will be included)
- * @immediate_present: specify if immediate data should be used
+ * @immediate_present: specify if immediate data is present
* @header:Structure to initialize
- *
- * ib_ud_header_init() initializes the lrh.link_version, lrh.link_next_header,
- * lrh.packet_length, grh.ip_version, grh.payload_length,
- * grh.next_header, bth.opcode, bth.pad_count and
- * bth.transport_header_version fields of a &struct ib_ud_header given
- * the payload length and whether a GRH will be included.
*/
void ib_ud_header_init(int payload_bytes,
+ int lrh_present,
+ int eth_present,
+ int vlan_present,
int grh_present,
int immediate_present,
struct ib_ud_header *header)
{
- u16 packet_length;
-
memset(header, 0, sizeof *header);
- header->lrh.link_version = 0;
- header->lrh.link_next_header =
- grh_present ? IB_LNH_IBA_GLOBAL : IB_LNH_IBA_LOCAL;
- packet_length = (IB_LRH_BYTES +
- IB_BTH_BYTES +
- IB_DETH_BYTES +
- payload_bytes +
- 4 + /* ICRC */
- 3) / 4; /* round up */
-
- header->grh_present = grh_present;
+ if (lrh_present) {
+ u16 packet_length;
+
+ header->lrh.link_version = 0;
+ header->lrh.link_next_header =
+ grh_present ? IB_LNH_IBA_GLOBAL : IB_LNH_IBA_LOCAL;
+ packet_length = (IB_LRH_BYTES +
+ IB_BTH_BYTES +
+ IB_DETH_BYTES +
+ (grh_present ? IB_GRH_BYTES : 0) +
+ payload_bytes +
+ 4 + /* ICRC */
+ 3) / 4; /* round up */
+ header->lrh.packet_length = cpu_to_be16(packet_length);
+ }
+
+ if (vlan_present)
+ header->eth.type = cpu_to_be16(ETH_P_8021Q);
+
if (grh_present) {
- packet_length += IB_GRH_BYTES / 4;
header->grh.ip_version = 6;
header->grh.payload_length =
cpu_to_be16((IB_BTH_BYTES +
@@ -222,15 +262,18 @@ void ib_ud_header_init(int payload_bytes,
header->grh.next_header = 0x1b;
}
- header->lrh.packet_length = cpu_to_be16(packet_length);
-
- header->immediate_present = immediate_present;
if (immediate_present)
header->bth.opcode = IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE;
else
header->bth.opcode = IB_OPCODE_UD_SEND_ONLY;
header->bth.pad_count = (4 - payload_bytes) & 3;
header->bth.transport_header_version = 0;
+
+ header->lrh_present = lrh_present;
+ header->eth_present = eth_present;
+ header->vlan_present = vlan_present;
+ header->grh_present = grh_present;
+ header->immediate_present = immediate_present;
}
EXPORT_SYMBOL(ib_ud_header_init);
@@ -247,10 +290,21 @@ int ib_ud_header_pack(struct ib_ud_header *header,
{
int len = 0;
- ib_pack(lrh_table, ARRAY_SIZE(lrh_table),
- &header->lrh, buf);
- len += IB_LRH_BYTES;
-
+ if (header->lrh_present) {
+ ib_pack(lrh_table, ARRAY_SIZE(lrh_table),
+ &header->lrh, buf + len);
+ len += IB_LRH_BYTES;
+ }
+ if (header->eth_present) {
+ ib_pack(eth_table, ARRAY_SIZE(eth_table),
+ &header->eth, buf + len);
+ len += IB_ETH_BYTES;
+ }
+ if (header->vlan_present) {
+ ib_pack(vlan_table, ARRAY_SIZE(vlan_table),
+ &header->vlan, buf + len);
+ len += IB_VLAN_BYTES;
+ }
if (header->grh_present) {
ib_pack(grh_table, ARRAY_SIZE(grh_table),
&header->grh, buf + len);
diff --git a/drivers/infiniband/core/user_mad.c b/drivers/infiniband/core/user_mad.c
index 5fa856909511..cd1996d0ad08 100644
--- a/drivers/infiniband/core/user_mad.c
+++ b/drivers/infiniband/core/user_mad.c
@@ -1022,7 +1022,7 @@ static int ib_umad_init_port(struct ib_device *device, int port_num,
port->ib_dev = device;
port->port_num = port_num;
- init_MUTEX(&port->sm_sem);
+ sema_init(&port->sm_sem, 1);
mutex_init(&port->file_mutex);
INIT_LIST_HEAD(&port->file_list);
diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c
index 6fcfbeb24a23..b342248aec05 100644
--- a/drivers/infiniband/core/uverbs_cmd.c
+++ b/drivers/infiniband/core/uverbs_cmd.c
@@ -460,6 +460,8 @@ ssize_t ib_uverbs_query_port(struct ib_uverbs_file *file,
resp.active_width = attr.active_width;
resp.active_speed = attr.active_speed;
resp.phys_state = attr.phys_state;
+ resp.link_layer = rdma_port_get_link_layer(file->device->ib_dev,
+ cmd.port_num);
if (copy_to_user((void __user *) (unsigned long) cmd.response,
&resp, sizeof resp))
diff --git a/drivers/infiniband/core/uverbs_marshall.c b/drivers/infiniband/core/uverbs_marshall.c
index 5440da0e59b4..1b1146f87124 100644
--- a/drivers/infiniband/core/uverbs_marshall.c
+++ b/drivers/infiniband/core/uverbs_marshall.c
@@ -40,18 +40,21 @@ void ib_copy_ah_attr_to_user(struct ib_uverbs_ah_attr *dst,
dst->grh.sgid_index = src->grh.sgid_index;
dst->grh.hop_limit = src->grh.hop_limit;
dst->grh.traffic_class = src->grh.traffic_class;
+ memset(&dst->grh.reserved, 0, sizeof(dst->grh.reserved));
dst->dlid = src->dlid;
dst->sl = src->sl;
dst->src_path_bits = src->src_path_bits;
dst->static_rate = src->static_rate;
dst->is_global = src->ah_flags & IB_AH_GRH ? 1 : 0;
dst->port_num = src->port_num;
+ dst->reserved = 0;
}
EXPORT_SYMBOL(ib_copy_ah_attr_to_user);
void ib_copy_qp_attr_to_user(struct ib_uverbs_qp_attr *dst,
struct ib_qp_attr *src)
{
+ dst->qp_state = src->qp_state;
dst->cur_qp_state = src->cur_qp_state;
dst->path_mtu = src->path_mtu;
dst->path_mig_state = src->path_mig_state;
@@ -83,6 +86,7 @@ void ib_copy_qp_attr_to_user(struct ib_uverbs_qp_attr *dst,
dst->rnr_retry = src->rnr_retry;
dst->alt_port_num = src->alt_port_num;
dst->alt_timeout = src->alt_timeout;
+ memset(dst->reserved, 0, sizeof(dst->reserved));
}
EXPORT_SYMBOL(ib_copy_qp_attr_to_user);
diff --git a/drivers/infiniband/core/verbs.c b/drivers/infiniband/core/verbs.c
index e0fa22238715..af7a8b08b2e9 100644
--- a/drivers/infiniband/core/verbs.c
+++ b/drivers/infiniband/core/verbs.c
@@ -94,6 +94,22 @@ rdma_node_get_transport(enum rdma_node_type node_type)
}
EXPORT_SYMBOL(rdma_node_get_transport);
+enum rdma_link_layer rdma_port_get_link_layer(struct ib_device *device, u8 port_num)
+{
+ if (device->get_link_layer)
+ return device->get_link_layer(device, port_num);
+
+ switch (rdma_node_get_transport(device->node_type)) {
+ case RDMA_TRANSPORT_IB:
+ return IB_LINK_LAYER_INFINIBAND;
+ case RDMA_TRANSPORT_IWARP:
+ return IB_LINK_LAYER_ETHERNET;
+ default:
+ return IB_LINK_LAYER_UNSPECIFIED;
+ }
+}
+EXPORT_SYMBOL(rdma_port_get_link_layer);
+
/* Protection domains */
struct ib_pd *ib_alloc_pd(struct ib_device *device)
diff --git a/drivers/infiniband/hw/amso1100/Kbuild b/drivers/infiniband/hw/amso1100/Kbuild
index 06964c4af849..950dfabcd89d 100644
--- a/drivers/infiniband/hw/amso1100/Kbuild
+++ b/drivers/infiniband/hw/amso1100/Kbuild
@@ -1,6 +1,4 @@
-ifdef CONFIG_INFINIBAND_AMSO1100_DEBUG
-EXTRA_CFLAGS += -DDEBUG
-endif
+ccflags-$(CONFIG_INFINIBAND_AMSO1100_DEBUG) := -DDEBUG
obj-$(CONFIG_INFINIBAND_AMSO1100) += iw_c2.o
diff --git a/drivers/infiniband/hw/amso1100/c2_intr.c b/drivers/infiniband/hw/amso1100/c2_intr.c
index 3b5095470cb3..0ebe4e806b86 100644
--- a/drivers/infiniband/hw/amso1100/c2_intr.c
+++ b/drivers/infiniband/hw/amso1100/c2_intr.c
@@ -62,8 +62,8 @@ void c2_rnic_interrupt(struct c2_dev *c2dev)
static void handle_mq(struct c2_dev *c2dev, u32 mq_index)
{
if (c2dev->qptr_array[mq_index] == NULL) {
- pr_debug(KERN_INFO "handle_mq: stray activity for mq_index=%d\n",
- mq_index);
+ pr_debug("handle_mq: stray activity for mq_index=%d\n",
+ mq_index);
return;
}
diff --git a/drivers/infiniband/hw/cxgb3/Makefile b/drivers/infiniband/hw/cxgb3/Makefile
index 7e7b5a66f042..621619c794e5 100644
--- a/drivers/infiniband/hw/cxgb3/Makefile
+++ b/drivers/infiniband/hw/cxgb3/Makefile
@@ -1,10 +1,8 @@
-EXTRA_CFLAGS += -Idrivers/net/cxgb3
+ccflags-y := -Idrivers/net/cxgb3
obj-$(CONFIG_INFINIBAND_CXGB3) += iw_cxgb3.o
iw_cxgb3-y := iwch_cm.o iwch_ev.o iwch_cq.o iwch_qp.o iwch_mem.o \
iwch_provider.o iwch.o cxio_hal.o cxio_resource.o
-ifdef CONFIG_INFINIBAND_CXGB3_DEBUG
-EXTRA_CFLAGS += -DDEBUG
-endif
+ccflags-$(CONFIG_INFINIBAND_CXGB3_DEBUG) += -DDEBUG
diff --git a/drivers/infiniband/hw/cxgb3/cxio_hal.c b/drivers/infiniband/hw/cxgb3/cxio_hal.c
index 005b7b52bc1e..09dda0b8740e 100644
--- a/drivers/infiniband/hw/cxgb3/cxio_hal.c
+++ b/drivers/infiniband/hw/cxgb3/cxio_hal.c
@@ -160,6 +160,7 @@ int cxio_create_cq(struct cxio_rdev *rdev_p, struct t3_cq *cq, int kernel)
struct rdma_cq_setup setup;
int size = (1UL << (cq->size_log2)) * sizeof(struct t3_cqe);
+ size += 1; /* one extra page for storing cq-in-err state */
cq->cqid = cxio_hal_get_cqid(rdev_p->rscp);
if (!cq->cqid)
return -ENOMEM;
diff --git a/drivers/infiniband/hw/cxgb3/cxio_wr.h b/drivers/infiniband/hw/cxgb3/cxio_wr.h
index e5ddb63e7d23..4bb997aa39d0 100644
--- a/drivers/infiniband/hw/cxgb3/cxio_wr.h
+++ b/drivers/infiniband/hw/cxgb3/cxio_wr.h
@@ -728,6 +728,22 @@ struct t3_cq {
#define CQ_VLD_ENTRY(ptr,size_log2,cqe) (Q_GENBIT(ptr,size_log2) == \
CQE_GENBIT(*cqe))
+struct t3_cq_status_page {
+ u32 cq_err;
+};
+
+static inline int cxio_cq_in_error(struct t3_cq *cq)
+{
+ return ((struct t3_cq_status_page *)
+ &cq->queue[1 << cq->size_log2])->cq_err;
+}
+
+static inline void cxio_set_cq_in_error(struct t3_cq *cq)
+{
+ ((struct t3_cq_status_page *)
+ &cq->queue[1 << cq->size_log2])->cq_err = 1;
+}
+
static inline void cxio_set_wq_in_error(struct t3_wq *wq)
{
wq->queue->wq_in_err.err |= 1;
diff --git a/drivers/infiniband/hw/cxgb3/iwch_cm.c b/drivers/infiniband/hw/cxgb3/iwch_cm.c
index 13c88871dc3b..d02dcc6e5963 100644
--- a/drivers/infiniband/hw/cxgb3/iwch_cm.c
+++ b/drivers/infiniband/hw/cxgb3/iwch_cm.c
@@ -1093,8 +1093,8 @@ static int tx_ack(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
PDBG("%s ep %p credits %u\n", __func__, ep, credits);
if (credits == 0) {
- PDBG(KERN_ERR "%s 0 credit ack ep %p state %u\n",
- __func__, ep, state_read(&ep->com));
+ PDBG("%s 0 credit ack ep %p state %u\n",
+ __func__, ep, state_read(&ep->com));
return CPL_RET_BUF_DONE;
}
diff --git a/drivers/infiniband/hw/cxgb3/iwch_ev.c b/drivers/infiniband/hw/cxgb3/iwch_ev.c
index 6afc89e7572c..71e0d845da3d 100644
--- a/drivers/infiniband/hw/cxgb3/iwch_ev.c
+++ b/drivers/infiniband/hw/cxgb3/iwch_ev.c
@@ -76,6 +76,14 @@ static void post_qp_event(struct iwch_dev *rnicp, struct iwch_cq *chp,
atomic_inc(&qhp->refcnt);
spin_unlock(&rnicp->lock);
+ if (qhp->attr.state == IWCH_QP_STATE_RTS) {
+ attrs.next_state = IWCH_QP_STATE_TERMINATE;
+ iwch_modify_qp(qhp->rhp, qhp, IWCH_QP_ATTR_NEXT_STATE,
+ &attrs, 1);
+ if (send_term)
+ iwch_post_terminate(qhp, rsp_msg);
+ }
+
event.event = ib_event;
event.device = chp->ibcq.device;
if (ib_event == IB_EVENT_CQ_ERR)
@@ -86,13 +94,7 @@ static void post_qp_event(struct iwch_dev *rnicp, struct iwch_cq *chp,
if (qhp->ibqp.event_handler)
(*qhp->ibqp.event_handler)(&event, qhp->ibqp.qp_context);
- if (qhp->attr.state == IWCH_QP_STATE_RTS) {
- attrs.next_state = IWCH_QP_STATE_TERMINATE;
- iwch_modify_qp(qhp->rhp, qhp, IWCH_QP_ATTR_NEXT_STATE,
- &attrs, 1);
- if (send_term)
- iwch_post_terminate(qhp, rsp_msg);
- }
+ (*chp->ibcq.comp_handler)(&chp->ibcq, chp->ibcq.cq_context);
if (atomic_dec_and_test(&qhp->refcnt))
wake_up(&qhp->wait);
@@ -179,7 +181,6 @@ void iwch_ev_dispatch(struct cxio_rdev *rdev_p, struct sk_buff *skb)
case TPT_ERR_BOUND:
case TPT_ERR_INVALIDATE_SHARED_MR:
case TPT_ERR_INVALIDATE_MR_WITH_MW_BOUND:
- (*chp->ibcq.comp_handler)(&chp->ibcq, chp->ibcq.cq_context);
post_qp_event(rnicp, chp, rsp_msg, IB_EVENT_QP_ACCESS_ERR, 1);
break;
diff --git a/drivers/infiniband/hw/cxgb3/iwch_provider.c b/drivers/infiniband/hw/cxgb3/iwch_provider.c
index fca0b4b747e4..2e2741307af4 100644
--- a/drivers/infiniband/hw/cxgb3/iwch_provider.c
+++ b/drivers/infiniband/hw/cxgb3/iwch_provider.c
@@ -154,6 +154,8 @@ static struct ib_cq *iwch_create_cq(struct ib_device *ibdev, int entries, int ve
struct iwch_create_cq_resp uresp;
struct iwch_create_cq_req ureq;
struct iwch_ucontext *ucontext = NULL;
+ static int warned;
+ size_t resplen;
PDBG("%s ib_dev %p entries %d\n", __func__, ibdev, entries);
rhp = to_iwch_dev(ibdev);
@@ -217,15 +219,26 @@ static struct ib_cq *iwch_create_cq(struct ib_device *ibdev, int entries, int ve
uresp.key = ucontext->key;
ucontext->key += PAGE_SIZE;
spin_unlock(&ucontext->mmap_lock);
- if (ib_copy_to_udata(udata, &uresp, sizeof (uresp))) {
+ mm->key = uresp.key;
+ mm->addr = virt_to_phys(chp->cq.queue);
+ if (udata->outlen < sizeof uresp) {
+ if (!warned++)
+ printk(KERN_WARNING MOD "Warning - "
+ "downlevel libcxgb3 (non-fatal).\n");
+ mm->len = PAGE_ALIGN((1UL << uresp.size_log2) *
+ sizeof(struct t3_cqe));
+ resplen = sizeof(struct iwch_create_cq_resp_v0);
+ } else {
+ mm->len = PAGE_ALIGN(((1UL << uresp.size_log2) + 1) *
+ sizeof(struct t3_cqe));
+ uresp.memsize = mm->len;
+ resplen = sizeof uresp;
+ }
+ if (ib_copy_to_udata(udata, &uresp, resplen)) {
kfree(mm);
iwch_destroy_cq(&chp->ibcq);
return ERR_PTR(-EFAULT);
}
- mm->key = uresp.key;
- mm->addr = virt_to_phys(chp->cq.queue);
- mm->len = PAGE_ALIGN((1UL << uresp.size_log2) *
- sizeof (struct t3_cqe));
insert_mmap(ucontext, mm);
}
PDBG("created cqid 0x%0x chp %p size 0x%0x, dma_addr 0x%0llx\n",
@@ -1414,6 +1427,7 @@ int iwch_register_device(struct iwch_dev *dev)
dev->ibdev.post_send = iwch_post_send;
dev->ibdev.post_recv = iwch_post_receive;
dev->ibdev.get_protocol_stats = iwch_get_mib;
+ dev->ibdev.uverbs_abi_ver = IWCH_UVERBS_ABI_VERSION;
dev->ibdev.iwcm = kmalloc(sizeof(struct iw_cm_verbs), GFP_KERNEL);
if (!dev->ibdev.iwcm)
diff --git a/drivers/infiniband/hw/cxgb3/iwch_qp.c b/drivers/infiniband/hw/cxgb3/iwch_qp.c
index c64d27bf2c15..0993137181d7 100644
--- a/drivers/infiniband/hw/cxgb3/iwch_qp.c
+++ b/drivers/infiniband/hw/cxgb3/iwch_qp.c
@@ -802,14 +802,12 @@ int iwch_post_terminate(struct iwch_qp *qhp, struct respQ_msg_t *rsp_msg)
/*
* Assumes qhp lock is held.
*/
-static void __flush_qp(struct iwch_qp *qhp, unsigned long *flag)
+static void __flush_qp(struct iwch_qp *qhp, struct iwch_cq *rchp,
+ struct iwch_cq *schp, unsigned long *flag)
{
- struct iwch_cq *rchp, *schp;
int count;
int flushed;
- rchp = get_chp(qhp->rhp, qhp->attr.rcq);
- schp = get_chp(qhp->rhp, qhp->attr.scq);
PDBG("%s qhp %p rchp %p schp %p\n", __func__, qhp, rchp, schp);
/* take a ref on the qhp since we must release the lock */
@@ -847,10 +845,23 @@ static void __flush_qp(struct iwch_qp *qhp, unsigned long *flag)
static void flush_qp(struct iwch_qp *qhp, unsigned long *flag)
{
- if (qhp->ibqp.uobject)
+ struct iwch_cq *rchp, *schp;
+
+ rchp = get_chp(qhp->rhp, qhp->attr.rcq);
+ schp = get_chp(qhp->rhp, qhp->attr.scq);
+
+ if (qhp->ibqp.uobject) {
cxio_set_wq_in_error(&qhp->wq);
- else
- __flush_qp(qhp, flag);
+ cxio_set_cq_in_error(&rchp->cq);
+ (*rchp->ibcq.comp_handler)(&rchp->ibcq, rchp->ibcq.cq_context);
+ if (schp != rchp) {
+ cxio_set_cq_in_error(&schp->cq);
+ (*schp->ibcq.comp_handler)(&schp->ibcq,
+ schp->ibcq.cq_context);
+ }
+ return;
+ }
+ __flush_qp(qhp, rchp, schp, flag);
}
diff --git a/drivers/infiniband/hw/cxgb3/iwch_user.h b/drivers/infiniband/hw/cxgb3/iwch_user.h
index cb7086f558c1..a277c31fcaf7 100644
--- a/drivers/infiniband/hw/cxgb3/iwch_user.h
+++ b/drivers/infiniband/hw/cxgb3/iwch_user.h
@@ -45,10 +45,18 @@ struct iwch_create_cq_req {
__u64 user_rptr_addr;
};
+struct iwch_create_cq_resp_v0 {
+ __u64 key;
+ __u32 cqid;
+ __u32 size_log2;
+};
+
struct iwch_create_cq_resp {
__u64 key;
__u32 cqid;
__u32 size_log2;
+ __u32 memsize;
+ __u32 reserved;
};
struct iwch_create_qp_resp {
diff --git a/drivers/infiniband/hw/cxgb4/Makefile b/drivers/infiniband/hw/cxgb4/Makefile
index e31a499f0172..cd20b1342aec 100644
--- a/drivers/infiniband/hw/cxgb4/Makefile
+++ b/drivers/infiniband/hw/cxgb4/Makefile
@@ -1,4 +1,4 @@
-EXTRA_CFLAGS += -Idrivers/net/cxgb4
+ccflags-y := -Idrivers/net/cxgb4
obj-$(CONFIG_INFINIBAND_CXGB4) += iw_cxgb4.o
diff --git a/drivers/infiniband/hw/cxgb4/cm.c b/drivers/infiniband/hw/cxgb4/cm.c
index 32d352a88d50..0dc62b1438be 100644
--- a/drivers/infiniband/hw/cxgb4/cm.c
+++ b/drivers/infiniband/hw/cxgb4/cm.c
@@ -117,9 +117,9 @@ static int rcv_win = 256 * 1024;
module_param(rcv_win, int, 0644);
MODULE_PARM_DESC(rcv_win, "TCP receive window in bytes (default=256KB)");
-static int snd_win = 32 * 1024;
+static int snd_win = 128 * 1024;
module_param(snd_win, int, 0644);
-MODULE_PARM_DESC(snd_win, "TCP send window in bytes (default=32KB)");
+MODULE_PARM_DESC(snd_win, "TCP send window in bytes (default=128KB)");
static struct workqueue_struct *workq;
@@ -172,7 +172,7 @@ static int c4iw_l2t_send(struct c4iw_rdev *rdev, struct sk_buff *skb,
error = cxgb4_l2t_send(rdev->lldi.ports[0], skb, l2e);
if (error < 0)
kfree_skb(skb);
- return error;
+ return error < 0 ? error : 0;
}
int c4iw_ofld_send(struct c4iw_rdev *rdev, struct sk_buff *skb)
@@ -187,7 +187,7 @@ int c4iw_ofld_send(struct c4iw_rdev *rdev, struct sk_buff *skb)
error = cxgb4_ofld_send(rdev->lldi.ports[0], skb);
if (error < 0)
kfree_skb(skb);
- return error;
+ return error < 0 ? error : 0;
}
static void release_tid(struct c4iw_rdev *rdev, u32 hwtid, struct sk_buff *skb)
@@ -219,12 +219,11 @@ static void set_emss(struct c4iw_ep *ep, u16 opt)
static enum c4iw_ep_state state_read(struct c4iw_ep_common *epc)
{
- unsigned long flags;
enum c4iw_ep_state state;
- spin_lock_irqsave(&epc->lock, flags);
+ mutex_lock(&epc->mutex);
state = epc->state;
- spin_unlock_irqrestore(&epc->lock, flags);
+ mutex_unlock(&epc->mutex);
return state;
}
@@ -235,12 +234,10 @@ static void __state_set(struct c4iw_ep_common *epc, enum c4iw_ep_state new)
static void state_set(struct c4iw_ep_common *epc, enum c4iw_ep_state new)
{
- unsigned long flags;
-
- spin_lock_irqsave(&epc->lock, flags);
+ mutex_lock(&epc->mutex);
PDBG("%s - %s -> %s\n", __func__, states[epc->state], states[new]);
__state_set(epc, new);
- spin_unlock_irqrestore(&epc->lock, flags);
+ mutex_unlock(&epc->mutex);
return;
}
@@ -251,8 +248,8 @@ static void *alloc_ep(int size, gfp_t gfp)
epc = kzalloc(size, gfp);
if (epc) {
kref_init(&epc->kref);
- spin_lock_init(&epc->lock);
- init_waitqueue_head(&epc->waitq);
+ mutex_init(&epc->mutex);
+ c4iw_init_wr_wait(&epc->wr_wait);
}
PDBG("%s alloc ep %p\n", __func__, epc);
return epc;
@@ -1131,7 +1128,6 @@ static int abort_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
{
struct c4iw_ep *ep;
struct cpl_abort_rpl_rss *rpl = cplhdr(skb);
- unsigned long flags;
int release = 0;
unsigned int tid = GET_TID(rpl);
struct tid_info *t = dev->rdev.lldi.tids;
@@ -1139,7 +1135,7 @@ static int abort_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
ep = lookup_tid(t, tid);
PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
BUG_ON(!ep);
- spin_lock_irqsave(&ep->com.lock, flags);
+ mutex_lock(&ep->com.mutex);
switch (ep->com.state) {
case ABORTING:
__state_set(&ep->com, DEAD);
@@ -1150,7 +1146,7 @@ static int abort_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
__func__, ep, ep->com.state);
break;
}
- spin_unlock_irqrestore(&ep->com.lock, flags);
+ mutex_unlock(&ep->com.mutex);
if (release)
release_ep_resources(ep);
@@ -1213,9 +1209,9 @@ static int pass_open_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
}
PDBG("%s ep %p status %d error %d\n", __func__, ep,
rpl->status, status2errno(rpl->status));
- ep->com.rpl_err = status2errno(rpl->status);
- ep->com.rpl_done = 1;
- wake_up(&ep->com.waitq);
+ ep->com.wr_wait.ret = status2errno(rpl->status);
+ ep->com.wr_wait.done = 1;
+ wake_up(&ep->com.wr_wait.wait);
return 0;
}
@@ -1249,9 +1245,9 @@ static int close_listsrv_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
struct c4iw_listen_ep *ep = lookup_stid(t, stid);
PDBG("%s ep %p\n", __func__, ep);
- ep->com.rpl_err = status2errno(rpl->status);
- ep->com.rpl_done = 1;
- wake_up(&ep->com.waitq);
+ ep->com.wr_wait.ret = status2errno(rpl->status);
+ ep->com.wr_wait.done = 1;
+ wake_up(&ep->com.wr_wait.wait);
return 0;
}
@@ -1478,7 +1474,6 @@ static int peer_close(struct c4iw_dev *dev, struct sk_buff *skb)
struct cpl_peer_close *hdr = cplhdr(skb);
struct c4iw_ep *ep;
struct c4iw_qp_attributes attrs;
- unsigned long flags;
int disconnect = 1;
int release = 0;
int closing = 0;
@@ -1489,7 +1484,7 @@ static int peer_close(struct c4iw_dev *dev, struct sk_buff *skb)
PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
dst_confirm(ep->dst);
- spin_lock_irqsave(&ep->com.lock, flags);
+ mutex_lock(&ep->com.mutex);
switch (ep->com.state) {
case MPA_REQ_WAIT:
__state_set(&ep->com, CLOSING);
@@ -1507,17 +1502,17 @@ static int peer_close(struct c4iw_dev *dev, struct sk_buff *skb)
* in rdma connection migration (see c4iw_accept_cr()).
*/
__state_set(&ep->com, CLOSING);
- ep->com.rpl_done = 1;
- ep->com.rpl_err = -ECONNRESET;
+ ep->com.wr_wait.done = 1;
+ ep->com.wr_wait.ret = -ECONNRESET;
PDBG("waking up ep %p tid %u\n", ep, ep->hwtid);
- wake_up(&ep->com.waitq);
+ wake_up(&ep->com.wr_wait.wait);
break;
case MPA_REP_SENT:
__state_set(&ep->com, CLOSING);
- ep->com.rpl_done = 1;
- ep->com.rpl_err = -ECONNRESET;
+ ep->com.wr_wait.done = 1;
+ ep->com.wr_wait.ret = -ECONNRESET;
PDBG("waking up ep %p tid %u\n", ep, ep->hwtid);
- wake_up(&ep->com.waitq);
+ wake_up(&ep->com.wr_wait.wait);
break;
case FPDU_MODE:
start_ep_timer(ep);
@@ -1550,7 +1545,7 @@ static int peer_close(struct c4iw_dev *dev, struct sk_buff *skb)
default:
BUG_ON(1);
}
- spin_unlock_irqrestore(&ep->com.lock, flags);
+ mutex_unlock(&ep->com.mutex);
if (closing) {
attrs.next_state = C4IW_QP_STATE_CLOSING;
c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp,
@@ -1581,7 +1576,6 @@ static int peer_abort(struct c4iw_dev *dev, struct sk_buff *skb)
struct c4iw_qp_attributes attrs;
int ret;
int release = 0;
- unsigned long flags;
struct tid_info *t = dev->rdev.lldi.tids;
unsigned int tid = GET_TID(req);
@@ -1591,9 +1585,17 @@ static int peer_abort(struct c4iw_dev *dev, struct sk_buff *skb)
ep->hwtid);
return 0;
}
- spin_lock_irqsave(&ep->com.lock, flags);
PDBG("%s ep %p tid %u state %u\n", __func__, ep, ep->hwtid,
ep->com.state);
+
+ /*
+ * Wake up any threads in rdma_init() or rdma_fini().
+ */
+ ep->com.wr_wait.done = 1;
+ ep->com.wr_wait.ret = -ECONNRESET;
+ wake_up(&ep->com.wr_wait.wait);
+
+ mutex_lock(&ep->com.mutex);
switch (ep->com.state) {
case CONNECTING:
break;
@@ -1605,23 +1607,8 @@ static int peer_abort(struct c4iw_dev *dev, struct sk_buff *skb)
connect_reply_upcall(ep, -ECONNRESET);
break;
case MPA_REP_SENT:
- ep->com.rpl_done = 1;
- ep->com.rpl_err = -ECONNRESET;
- PDBG("waking up ep %p\n", ep);
- wake_up(&ep->com.waitq);
break;
case MPA_REQ_RCVD:
-
- /*
- * We're gonna mark this puppy DEAD, but keep
- * the reference on it until the ULP accepts or
- * rejects the CR. Also wake up anyone waiting
- * in rdma connection migration (see c4iw_accept_cr()).
- */
- ep->com.rpl_done = 1;
- ep->com.rpl_err = -ECONNRESET;
- PDBG("waking up ep %p tid %u\n", ep, ep->hwtid);
- wake_up(&ep->com.waitq);
break;
case MORIBUND:
case CLOSING:
@@ -1644,7 +1631,7 @@ static int peer_abort(struct c4iw_dev *dev, struct sk_buff *skb)
break;
case DEAD:
PDBG("%s PEER_ABORT IN DEAD STATE!!!!\n", __func__);
- spin_unlock_irqrestore(&ep->com.lock, flags);
+ mutex_unlock(&ep->com.mutex);
return 0;
default:
BUG_ON(1);
@@ -1655,7 +1642,7 @@ static int peer_abort(struct c4iw_dev *dev, struct sk_buff *skb)
__state_set(&ep->com, DEAD);
release = 1;
}
- spin_unlock_irqrestore(&ep->com.lock, flags);
+ mutex_unlock(&ep->com.mutex);
rpl_skb = get_skb(skb, sizeof(*rpl), GFP_KERNEL);
if (!rpl_skb) {
@@ -1681,7 +1668,6 @@ static int close_con_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
struct c4iw_ep *ep;
struct c4iw_qp_attributes attrs;
struct cpl_close_con_rpl *rpl = cplhdr(skb);
- unsigned long flags;
int release = 0;
struct tid_info *t = dev->rdev.lldi.tids;
unsigned int tid = GET_TID(rpl);
@@ -1692,7 +1678,7 @@ static int close_con_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
BUG_ON(!ep);
/* The cm_id may be null if we failed to connect */
- spin_lock_irqsave(&ep->com.lock, flags);
+ mutex_lock(&ep->com.mutex);
switch (ep->com.state) {
case CLOSING:
__state_set(&ep->com, MORIBUND);
@@ -1717,7 +1703,7 @@ static int close_con_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
BUG_ON(1);
break;
}
- spin_unlock_irqrestore(&ep->com.lock, flags);
+ mutex_unlock(&ep->com.mutex);
if (release)
release_ep_resources(ep);
return 0;
@@ -1725,23 +1711,24 @@ static int close_con_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
static int terminate(struct c4iw_dev *dev, struct sk_buff *skb)
{
- struct c4iw_ep *ep;
- struct cpl_rdma_terminate *term = cplhdr(skb);
+ struct cpl_rdma_terminate *rpl = cplhdr(skb);
struct tid_info *t = dev->rdev.lldi.tids;
- unsigned int tid = GET_TID(term);
+ unsigned int tid = GET_TID(rpl);
+ struct c4iw_ep *ep;
+ struct c4iw_qp_attributes attrs;
ep = lookup_tid(t, tid);
+ BUG_ON(!ep);
- if (state_read(&ep->com) != FPDU_MODE)
- return 0;
+ if (ep->com.qp) {
+ printk(KERN_WARNING MOD "TERM received tid %u qpid %u\n", tid,
+ ep->com.qp->wq.sq.qid);
+ attrs.next_state = C4IW_QP_STATE_TERMINATE;
+ c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp,
+ C4IW_QP_ATTR_NEXT_STATE, &attrs, 1);
+ } else
+ printk(KERN_WARNING MOD "TERM received tid %u no qp\n", tid);
- PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
- skb_pull(skb, sizeof *term);
- PDBG("%s saving %d bytes of term msg\n", __func__, skb->len);
- skb_copy_from_linear_data(skb, ep->com.qp->attr.terminate_buffer,
- skb->len);
- ep->com.qp->attr.terminate_msg_len = skb->len;
- ep->com.qp->attr.is_terminate_local = 0;
return 0;
}
@@ -1762,8 +1749,8 @@ static int fw4_ack(struct c4iw_dev *dev, struct sk_buff *skb)
ep = lookup_tid(t, tid);
PDBG("%s ep %p tid %u credits %u\n", __func__, ep, ep->hwtid, credits);
if (credits == 0) {
- PDBG(KERN_ERR "%s 0 credit ack ep %p tid %u state %u\n",
- __func__, ep, ep->hwtid, state_read(&ep->com));
+ PDBG("%s 0 credit ack ep %p tid %u state %u\n",
+ __func__, ep, ep->hwtid, state_read(&ep->com));
return 0;
}
@@ -2042,6 +2029,7 @@ int c4iw_create_listen(struct iw_cm_id *cm_id, int backlog)
}
state_set(&ep->com, LISTEN);
+ c4iw_init_wr_wait(&ep->com.wr_wait);
err = cxgb4_create_server(ep->com.dev->rdev.lldi.ports[0], ep->stid,
ep->com.local_addr.sin_addr.s_addr,
ep->com.local_addr.sin_port,
@@ -2050,15 +2038,8 @@ int c4iw_create_listen(struct iw_cm_id *cm_id, int backlog)
goto fail3;
/* wait for pass_open_rpl */
- wait_event_timeout(ep->com.waitq, ep->com.rpl_done, C4IW_WR_TO);
- if (ep->com.rpl_done)
- err = ep->com.rpl_err;
- else {
- printk(KERN_ERR MOD "Device %s not responding!\n",
- pci_name(ep->com.dev->rdev.lldi.pdev));
- ep->com.dev->rdev.flags = T4_FATAL_ERROR;
- err = -EIO;
- }
+ err = c4iw_wait_for_reply(&ep->com.dev->rdev, &ep->com.wr_wait, 0, 0,
+ __func__);
if (!err) {
cm_id->provider_data = ep;
goto out;
@@ -2082,20 +2063,12 @@ int c4iw_destroy_listen(struct iw_cm_id *cm_id)
might_sleep();
state_set(&ep->com, DEAD);
- ep->com.rpl_done = 0;
- ep->com.rpl_err = 0;
+ c4iw_init_wr_wait(&ep->com.wr_wait);
err = listen_stop(ep);
if (err)
goto done;
- wait_event_timeout(ep->com.waitq, ep->com.rpl_done, C4IW_WR_TO);
- if (ep->com.rpl_done)
- err = ep->com.rpl_err;
- else {
- printk(KERN_ERR MOD "Device %s not responding!\n",
- pci_name(ep->com.dev->rdev.lldi.pdev));
- ep->com.dev->rdev.flags = T4_FATAL_ERROR;
- err = -EIO;
- }
+ err = c4iw_wait_for_reply(&ep->com.dev->rdev, &ep->com.wr_wait, 0, 0,
+ __func__);
cxgb4_free_stid(ep->com.dev->rdev.lldi.tids, ep->stid, PF_INET);
done:
cm_id->rem_ref(cm_id);
@@ -2106,12 +2079,11 @@ done:
int c4iw_ep_disconnect(struct c4iw_ep *ep, int abrupt, gfp_t gfp)
{
int ret = 0;
- unsigned long flags;
int close = 0;
int fatal = 0;
struct c4iw_rdev *rdev;
- spin_lock_irqsave(&ep->com.lock, flags);
+ mutex_lock(&ep->com.mutex);
PDBG("%s ep %p state %s, abrupt %d\n", __func__, ep,
states[ep->com.state], abrupt);
@@ -2158,7 +2130,7 @@ int c4iw_ep_disconnect(struct c4iw_ep *ep, int abrupt, gfp_t gfp)
break;
}
- spin_unlock_irqrestore(&ep->com.lock, flags);
+ mutex_unlock(&ep->com.mutex);
if (close) {
if (abrupt)
ret = abort_connection(ep, NULL, gfp);
@@ -2172,6 +2144,13 @@ int c4iw_ep_disconnect(struct c4iw_ep *ep, int abrupt, gfp_t gfp)
return ret;
}
+static int async_event(struct c4iw_dev *dev, struct sk_buff *skb)
+{
+ struct cpl_fw6_msg *rpl = cplhdr(skb);
+ c4iw_ev_dispatch(dev, (struct t4_cqe *)&rpl->data[0]);
+ return 0;
+}
+
/*
* These are the real handlers that are called from a
* work queue.
@@ -2190,7 +2169,8 @@ static c4iw_handler_func work_handlers[NUM_CPL_CMDS] = {
[CPL_ABORT_REQ_RSS] = peer_abort,
[CPL_CLOSE_CON_RPL] = close_con_rpl,
[CPL_RDMA_TERMINATE] = terminate,
- [CPL_FW4_ACK] = fw4_ack
+ [CPL_FW4_ACK] = fw4_ack,
+ [CPL_FW6_MSG] = async_event
};
static void process_timeout(struct c4iw_ep *ep)
@@ -2198,7 +2178,7 @@ static void process_timeout(struct c4iw_ep *ep)
struct c4iw_qp_attributes attrs;
int abort = 1;
- spin_lock_irq(&ep->com.lock);
+ mutex_lock(&ep->com.mutex);
PDBG("%s ep %p tid %u state %d\n", __func__, ep, ep->hwtid,
ep->com.state);
switch (ep->com.state) {
@@ -2225,7 +2205,7 @@ static void process_timeout(struct c4iw_ep *ep)
WARN_ON(1);
abort = 0;
}
- spin_unlock_irq(&ep->com.lock);
+ mutex_unlock(&ep->com.mutex);
if (abort)
abort_connection(ep, NULL, GFP_KERNEL);
c4iw_put_ep(&ep->com);
@@ -2309,6 +2289,7 @@ static int set_tcb_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
printk(KERN_ERR MOD "Unexpected SET_TCB_RPL status %u "
"for tid %u\n", rpl->status, GET_TID(rpl));
}
+ kfree_skb(skb);
return 0;
}
@@ -2323,20 +2304,25 @@ static int fw6_msg(struct c4iw_dev *dev, struct sk_buff *skb)
switch (rpl->type) {
case 1:
ret = (int)((be64_to_cpu(rpl->data[0]) >> 8) & 0xff);
- wr_waitp = (__force struct c4iw_wr_wait *)rpl->data[1];
+ wr_waitp = (struct c4iw_wr_wait *)(__force unsigned long) rpl->data[1];
PDBG("%s wr_waitp %p ret %u\n", __func__, wr_waitp, ret);
if (wr_waitp) {
- wr_waitp->ret = ret;
+ if (ret)
+ wr_waitp->ret = -ret;
+ else
+ wr_waitp->ret = 0;
wr_waitp->done = 1;
wake_up(&wr_waitp->wait);
}
+ kfree_skb(skb);
break;
case 2:
- c4iw_ev_dispatch(dev, (struct t4_cqe *)&rpl->data[0]);
+ sched(dev, skb);
break;
default:
printk(KERN_ERR MOD "%s unexpected fw6 msg type %u\n", __func__,
rpl->type);
+ kfree_skb(skb);
break;
}
return 0;
diff --git a/drivers/infiniband/hw/cxgb4/cq.c b/drivers/infiniband/hw/cxgb4/cq.c
index b3daf39eed4a..8d8f8add6fcd 100644
--- a/drivers/infiniband/hw/cxgb4/cq.c
+++ b/drivers/infiniband/hw/cxgb4/cq.c
@@ -55,7 +55,7 @@ static int destroy_cq(struct c4iw_rdev *rdev, struct t4_cq *cq,
V_FW_RI_RES_WR_NRES(1) |
FW_WR_COMPL(1));
res_wr->len16_pkd = cpu_to_be32(DIV_ROUND_UP(wr_len, 16));
- res_wr->cookie = (u64)&wr_wait;
+ res_wr->cookie = (unsigned long) &wr_wait;
res = res_wr->res;
res->u.cq.restype = FW_RI_RES_TYPE_CQ;
res->u.cq.op = FW_RI_RES_OP_RESET;
@@ -64,14 +64,7 @@ static int destroy_cq(struct c4iw_rdev *rdev, struct t4_cq *cq,
c4iw_init_wr_wait(&wr_wait);
ret = c4iw_ofld_send(rdev, skb);
if (!ret) {
- wait_event_timeout(wr_wait.wait, wr_wait.done, C4IW_WR_TO);
- if (!wr_wait.done) {
- printk(KERN_ERR MOD "Device %s not responding!\n",
- pci_name(rdev->lldi.pdev));
- rdev->flags = T4_FATAL_ERROR;
- ret = -EIO;
- } else
- ret = wr_wait.ret;
+ ret = c4iw_wait_for_reply(rdev, &wr_wait, 0, 0, __func__);
}
kfree(cq->sw_queue);
@@ -132,7 +125,7 @@ static int create_cq(struct c4iw_rdev *rdev, struct t4_cq *cq,
V_FW_RI_RES_WR_NRES(1) |
FW_WR_COMPL(1));
res_wr->len16_pkd = cpu_to_be32(DIV_ROUND_UP(wr_len, 16));
- res_wr->cookie = (u64)&wr_wait;
+ res_wr->cookie = (unsigned long) &wr_wait;
res = res_wr->res;
res->u.cq.restype = FW_RI_RES_TYPE_CQ;
res->u.cq.op = FW_RI_RES_OP_WRITE;
@@ -157,14 +150,7 @@ static int create_cq(struct c4iw_rdev *rdev, struct t4_cq *cq,
if (ret)
goto err4;
PDBG("%s wait_event wr_wait %p\n", __func__, &wr_wait);
- wait_event_timeout(wr_wait.wait, wr_wait.done, C4IW_WR_TO);
- if (!wr_wait.done) {
- printk(KERN_ERR MOD "Device %s not responding!\n",
- pci_name(rdev->lldi.pdev));
- rdev->flags = T4_FATAL_ERROR;
- ret = -EIO;
- } else
- ret = wr_wait.ret;
+ ret = c4iw_wait_for_reply(rdev, &wr_wait, 0, 0, __func__);
if (ret)
goto err4;
@@ -476,6 +462,11 @@ static int poll_cq(struct t4_wq *wq, struct t4_cq *cq, struct t4_cqe *cqe,
goto proc_cqe;
}
+ if (CQE_OPCODE(hw_cqe) == FW_RI_TERMINATE) {
+ ret = -EAGAIN;
+ goto skip_cqe;
+ }
+
/*
* RECV completion.
*/
@@ -696,6 +687,7 @@ static int c4iw_poll_cq_one(struct c4iw_cq *chp, struct ib_wc *wc)
case T4_ERR_MSN_RANGE:
case T4_ERR_IRD_OVERFLOW:
case T4_ERR_OPCODE:
+ case T4_ERR_INTERNAL_ERR:
wc->status = IB_WC_FATAL_ERR;
break;
case T4_ERR_SWFLUSH:
diff --git a/drivers/infiniband/hw/cxgb4/device.c b/drivers/infiniband/hw/cxgb4/device.c
index 9bbf491d5d9e..54fbc1118abe 100644
--- a/drivers/infiniband/hw/cxgb4/device.c
+++ b/drivers/infiniband/hw/cxgb4/device.c
@@ -49,29 +49,33 @@ static DEFINE_MUTEX(dev_mutex);
static struct dentry *c4iw_debugfs_root;
-struct debugfs_qp_data {
+struct c4iw_debugfs_data {
struct c4iw_dev *devp;
char *buf;
int bufsize;
int pos;
};
-static int count_qps(int id, void *p, void *data)
+static int count_idrs(int id, void *p, void *data)
{
- struct c4iw_qp *qp = p;
int *countp = data;
- if (id != qp->wq.sq.qid)
- return 0;
-
*countp = *countp + 1;
return 0;
}
-static int dump_qps(int id, void *p, void *data)
+static ssize_t debugfs_read(struct file *file, char __user *buf, size_t count,
+ loff_t *ppos)
+{
+ struct c4iw_debugfs_data *d = file->private_data;
+
+ return simple_read_from_buffer(buf, count, ppos, d->buf, d->pos);
+}
+
+static int dump_qp(int id, void *p, void *data)
{
struct c4iw_qp *qp = p;
- struct debugfs_qp_data *qpd = data;
+ struct c4iw_debugfs_data *qpd = data;
int space;
int cc;
@@ -101,7 +105,7 @@ static int dump_qps(int id, void *p, void *data)
static int qp_release(struct inode *inode, struct file *file)
{
- struct debugfs_qp_data *qpd = file->private_data;
+ struct c4iw_debugfs_data *qpd = file->private_data;
if (!qpd) {
printk(KERN_INFO "%s null qpd?\n", __func__);
return 0;
@@ -113,7 +117,7 @@ static int qp_release(struct inode *inode, struct file *file)
static int qp_open(struct inode *inode, struct file *file)
{
- struct debugfs_qp_data *qpd;
+ struct c4iw_debugfs_data *qpd;
int ret = 0;
int count = 1;
@@ -126,7 +130,7 @@ static int qp_open(struct inode *inode, struct file *file)
qpd->pos = 0;
spin_lock_irq(&qpd->devp->lock);
- idr_for_each(&qpd->devp->qpidr, count_qps, &count);
+ idr_for_each(&qpd->devp->qpidr, count_idrs, &count);
spin_unlock_irq(&qpd->devp->lock);
qpd->bufsize = count * 128;
@@ -137,7 +141,7 @@ static int qp_open(struct inode *inode, struct file *file)
}
spin_lock_irq(&qpd->devp->lock);
- idr_for_each(&qpd->devp->qpidr, dump_qps, qpd);
+ idr_for_each(&qpd->devp->qpidr, dump_qp, qpd);
spin_unlock_irq(&qpd->devp->lock);
qpd->buf[qpd->pos++] = 0;
@@ -149,43 +153,86 @@ out:
return ret;
}
-static ssize_t qp_read(struct file *file, char __user *buf, size_t count,
- loff_t *ppos)
+static const struct file_operations qp_debugfs_fops = {
+ .owner = THIS_MODULE,
+ .open = qp_open,
+ .release = qp_release,
+ .read = debugfs_read,
+ .llseek = default_llseek,
+};
+
+static int dump_stag(int id, void *p, void *data)
{
- struct debugfs_qp_data *qpd = file->private_data;
- loff_t pos = *ppos;
- loff_t avail = qpd->pos;
+ struct c4iw_debugfs_data *stagd = data;
+ int space;
+ int cc;
- if (pos < 0)
- return -EINVAL;
- if (pos >= avail)
+ space = stagd->bufsize - stagd->pos - 1;
+ if (space == 0)
+ return 1;
+
+ cc = snprintf(stagd->buf + stagd->pos, space, "0x%x\n", id<<8);
+ if (cc < space)
+ stagd->pos += cc;
+ return 0;
+}
+
+static int stag_release(struct inode *inode, struct file *file)
+{
+ struct c4iw_debugfs_data *stagd = file->private_data;
+ if (!stagd) {
+ printk(KERN_INFO "%s null stagd?\n", __func__);
return 0;
- if (count > avail - pos)
- count = avail - pos;
+ }
+ kfree(stagd->buf);
+ kfree(stagd);
+ return 0;
+}
- while (count) {
- size_t len = 0;
+static int stag_open(struct inode *inode, struct file *file)
+{
+ struct c4iw_debugfs_data *stagd;
+ int ret = 0;
+ int count = 1;
- len = min((int)count, (int)qpd->pos - (int)pos);
- if (copy_to_user(buf, qpd->buf + pos, len))
- return -EFAULT;
- if (len == 0)
- return -EINVAL;
+ stagd = kmalloc(sizeof *stagd, GFP_KERNEL);
+ if (!stagd) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ stagd->devp = inode->i_private;
+ stagd->pos = 0;
+
+ spin_lock_irq(&stagd->devp->lock);
+ idr_for_each(&stagd->devp->mmidr, count_idrs, &count);
+ spin_unlock_irq(&stagd->devp->lock);
- buf += len;
- pos += len;
- count -= len;
+ stagd->bufsize = count * sizeof("0x12345678\n");
+ stagd->buf = kmalloc(stagd->bufsize, GFP_KERNEL);
+ if (!stagd->buf) {
+ ret = -ENOMEM;
+ goto err1;
}
- count = pos - *ppos;
- *ppos = pos;
- return count;
+
+ spin_lock_irq(&stagd->devp->lock);
+ idr_for_each(&stagd->devp->mmidr, dump_stag, stagd);
+ spin_unlock_irq(&stagd->devp->lock);
+
+ stagd->buf[stagd->pos++] = 0;
+ file->private_data = stagd;
+ goto out;
+err1:
+ kfree(stagd);
+out:
+ return ret;
}
-static const struct file_operations qp_debugfs_fops = {
+static const struct file_operations stag_debugfs_fops = {
.owner = THIS_MODULE,
- .open = qp_open,
- .release = qp_release,
- .read = qp_read,
+ .open = stag_open,
+ .release = stag_release,
+ .read = debugfs_read,
+ .llseek = default_llseek,
};
static int setup_debugfs(struct c4iw_dev *devp)
@@ -199,6 +246,11 @@ static int setup_debugfs(struct c4iw_dev *devp)
(void *)devp, &qp_debugfs_fops);
if (de && de->d_inode)
de->d_inode->i_size = 4096;
+
+ de = debugfs_create_file("stags", S_IWUSR, devp->debugfs_root,
+ (void *)devp, &stag_debugfs_fops);
+ if (de && de->d_inode)
+ de->d_inode->i_size = 4096;
return 0;
}
@@ -290,7 +342,14 @@ static int c4iw_rdev_open(struct c4iw_rdev *rdev)
printk(KERN_ERR MOD "error %d initializing rqt pool\n", err);
goto err3;
}
+ err = c4iw_ocqp_pool_create(rdev);
+ if (err) {
+ printk(KERN_ERR MOD "error %d initializing ocqp pool\n", err);
+ goto err4;
+ }
return 0;
+err4:
+ c4iw_rqtpool_destroy(rdev);
err3:
c4iw_pblpool_destroy(rdev);
err2:
@@ -317,6 +376,7 @@ static void c4iw_remove(struct c4iw_dev *dev)
idr_destroy(&dev->cqidr);
idr_destroy(&dev->qpidr);
idr_destroy(&dev->mmidr);
+ iounmap(dev->rdev.oc_mw_kva);
ib_dealloc_device(&dev->ibdev);
}
@@ -332,6 +392,17 @@ static struct c4iw_dev *c4iw_alloc(const struct cxgb4_lld_info *infop)
}
devp->rdev.lldi = *infop;
+ devp->rdev.oc_mw_pa = pci_resource_start(devp->rdev.lldi.pdev, 2) +
+ (pci_resource_len(devp->rdev.lldi.pdev, 2) -
+ roundup_pow_of_two(devp->rdev.lldi.vr->ocq.size));
+ devp->rdev.oc_mw_kva = ioremap_wc(devp->rdev.oc_mw_pa,
+ devp->rdev.lldi.vr->ocq.size);
+
+ printk(KERN_INFO MOD "ocq memory: "
+ "hw_start 0x%x size %u mw_pa 0x%lx mw_kva %p\n",
+ devp->rdev.lldi.vr->ocq.start, devp->rdev.lldi.vr->ocq.size,
+ devp->rdev.oc_mw_pa, devp->rdev.oc_mw_kva);
+
mutex_lock(&dev_mutex);
ret = c4iw_rdev_open(&devp->rdev);
@@ -383,46 +454,6 @@ out:
return dev;
}
-static struct sk_buff *t4_pktgl_to_skb(const struct pkt_gl *gl,
- unsigned int skb_len,
- unsigned int pull_len)
-{
- struct sk_buff *skb;
- struct skb_shared_info *ssi;
-
- if (gl->tot_len <= 512) {
- skb = alloc_skb(gl->tot_len, GFP_ATOMIC);
- if (unlikely(!skb))
- goto out;
- __skb_put(skb, gl->tot_len);
- skb_copy_to_linear_data(skb, gl->va, gl->tot_len);
- } else {
- skb = alloc_skb(skb_len, GFP_ATOMIC);
- if (unlikely(!skb))
- goto out;
- __skb_put(skb, pull_len);
- skb_copy_to_linear_data(skb, gl->va, pull_len);
-
- ssi = skb_shinfo(skb);
- ssi->frags[0].page = gl->frags[0].page;
- ssi->frags[0].page_offset = gl->frags[0].page_offset + pull_len;
- ssi->frags[0].size = gl->frags[0].size - pull_len;
- if (gl->nfrags > 1)
- memcpy(&ssi->frags[1], &gl->frags[1],
- (gl->nfrags - 1) * sizeof(skb_frag_t));
- ssi->nr_frags = gl->nfrags;
-
- skb->len = gl->tot_len;
- skb->data_len = skb->len - pull_len;
- skb->truesize += skb->data_len;
-
- /* Get a reference for the last page, we don't own it */
- get_page(gl->frags[gl->nfrags - 1].page);
- }
-out:
- return skb;
-}
-
static int c4iw_uld_rx_handler(void *handle, const __be64 *rsp,
const struct pkt_gl *gl)
{
@@ -447,7 +478,7 @@ static int c4iw_uld_rx_handler(void *handle, const __be64 *rsp,
c4iw_ev_handler(dev, qid);
return 0;
} else {
- skb = t4_pktgl_to_skb(gl, 128, 128);
+ skb = cxgb4_pktgl_to_skb(gl, 128, 128);
if (unlikely(!skb))
goto nomem;
}
diff --git a/drivers/infiniband/hw/cxgb4/ev.c b/drivers/infiniband/hw/cxgb4/ev.c
index 491e76a0327f..c13041a0aeba 100644
--- a/drivers/infiniband/hw/cxgb4/ev.c
+++ b/drivers/infiniband/hw/cxgb4/ev.c
@@ -60,7 +60,7 @@ static void post_qp_event(struct c4iw_dev *dev, struct c4iw_cq *chp,
if (qhp->attr.state == C4IW_QP_STATE_RTS) {
attrs.next_state = C4IW_QP_STATE_TERMINATE;
c4iw_modify_qp(qhp->rhp, qhp, C4IW_QP_ATTR_NEXT_STATE,
- &attrs, 1);
+ &attrs, 0);
}
event.event = ib_event;
diff --git a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
index ed459b8f800f..16032cdb4337 100644
--- a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
+++ b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
@@ -46,6 +46,7 @@
#include <linux/timer.h>
#include <linux/io.h>
#include <linux/kfifo.h>
+#include <linux/mutex.h>
#include <asm/byteorder.h>
@@ -79,21 +80,6 @@ static inline void *cplhdr(struct sk_buff *skb)
return skb->data;
}
-#define C4IW_WR_TO (10*HZ)
-
-struct c4iw_wr_wait {
- wait_queue_head_t wait;
- int done;
- int ret;
-};
-
-static inline void c4iw_init_wr_wait(struct c4iw_wr_wait *wr_waitp)
-{
- wr_waitp->ret = 0;
- wr_waitp->done = 0;
- init_waitqueue_head(&wr_waitp->wait);
-}
-
struct c4iw_resource {
struct kfifo tpt_fifo;
spinlock_t tpt_fifo_lock;
@@ -127,8 +113,11 @@ struct c4iw_rdev {
struct c4iw_dev_ucontext uctx;
struct gen_pool *pbl_pool;
struct gen_pool *rqt_pool;
+ struct gen_pool *ocqp_pool;
u32 flags;
struct cxgb4_lld_info lldi;
+ unsigned long oc_mw_pa;
+ void __iomem *oc_mw_kva;
};
static inline int c4iw_fatal_error(struct c4iw_rdev *rdev)
@@ -141,6 +130,44 @@ static inline int c4iw_num_stags(struct c4iw_rdev *rdev)
return min((int)T4_MAX_NUM_STAG, (int)(rdev->lldi.vr->stag.size >> 5));
}
+#define C4IW_WR_TO (10*HZ)
+
+struct c4iw_wr_wait {
+ wait_queue_head_t wait;
+ int done;
+ int ret;
+};
+
+static inline void c4iw_init_wr_wait(struct c4iw_wr_wait *wr_waitp)
+{
+ wr_waitp->ret = 0;
+ wr_waitp->done = 0;
+ init_waitqueue_head(&wr_waitp->wait);
+}
+
+static inline int c4iw_wait_for_reply(struct c4iw_rdev *rdev,
+ struct c4iw_wr_wait *wr_waitp,
+ u32 hwtid, u32 qpid,
+ const char *func)
+{
+ unsigned to = C4IW_WR_TO;
+ do {
+
+ wait_event_timeout(wr_waitp->wait, wr_waitp->done, to);
+ if (!wr_waitp->done) {
+ printk(KERN_ERR MOD "%s - Device %s not responding - "
+ "tid %u qpid %u\n", func,
+ pci_name(rdev->lldi.pdev), hwtid, qpid);
+ to = to << 2;
+ }
+ } while (!wr_waitp->done);
+ if (wr_waitp->ret)
+ printk(KERN_WARNING MOD "%s: FW reply %d tid %u qpid %u\n",
+ pci_name(rdev->lldi.pdev), wr_waitp->ret, hwtid, qpid);
+ return wr_waitp->ret;
+}
+
+
struct c4iw_dev {
struct ib_device ibdev;
struct c4iw_rdev rdev;
@@ -327,6 +354,7 @@ struct c4iw_qp {
struct c4iw_qp_attributes attr;
struct t4_wq wq;
spinlock_t lock;
+ struct mutex mutex;
atomic_t refcnt;
wait_queue_head_t wait;
struct timer_list timer;
@@ -579,12 +607,10 @@ struct c4iw_ep_common {
struct c4iw_dev *dev;
enum c4iw_ep_state state;
struct kref kref;
- spinlock_t lock;
+ struct mutex mutex;
struct sockaddr_in local_addr;
struct sockaddr_in remote_addr;
- wait_queue_head_t waitq;
- int rpl_done;
- int rpl_err;
+ struct c4iw_wr_wait wr_wait;
unsigned long flags;
};
@@ -654,8 +680,10 @@ int c4iw_init_resource(struct c4iw_rdev *rdev, u32 nr_tpt, u32 nr_pdid);
int c4iw_init_ctrl_qp(struct c4iw_rdev *rdev);
int c4iw_pblpool_create(struct c4iw_rdev *rdev);
int c4iw_rqtpool_create(struct c4iw_rdev *rdev);
+int c4iw_ocqp_pool_create(struct c4iw_rdev *rdev);
void c4iw_pblpool_destroy(struct c4iw_rdev *rdev);
void c4iw_rqtpool_destroy(struct c4iw_rdev *rdev);
+void c4iw_ocqp_pool_destroy(struct c4iw_rdev *rdev);
void c4iw_destroy_resource(struct c4iw_resource *rscp);
int c4iw_destroy_ctrl_qp(struct c4iw_rdev *rdev);
int c4iw_register_device(struct c4iw_dev *dev);
@@ -721,6 +749,8 @@ u32 c4iw_rqtpool_alloc(struct c4iw_rdev *rdev, int size);
void c4iw_rqtpool_free(struct c4iw_rdev *rdev, u32 addr, int size);
u32 c4iw_pblpool_alloc(struct c4iw_rdev *rdev, int size);
void c4iw_pblpool_free(struct c4iw_rdev *rdev, u32 addr, int size);
+u32 c4iw_ocqp_pool_alloc(struct c4iw_rdev *rdev, int size);
+void c4iw_ocqp_pool_free(struct c4iw_rdev *rdev, u32 addr, int size);
int c4iw_ofld_send(struct c4iw_rdev *rdev, struct sk_buff *skb);
void c4iw_flush_hw_cq(struct t4_cq *cq);
void c4iw_count_rcqes(struct t4_cq *cq, struct t4_wq *wq, int *count);
diff --git a/drivers/infiniband/hw/cxgb4/mem.c b/drivers/infiniband/hw/cxgb4/mem.c
index 269373a62f22..273ffe49525a 100644
--- a/drivers/infiniband/hw/cxgb4/mem.c
+++ b/drivers/infiniband/hw/cxgb4/mem.c
@@ -71,7 +71,7 @@ static int write_adapter_mem(struct c4iw_rdev *rdev, u32 addr, u32 len,
if (i == (num_wqe-1)) {
req->wr.wr_hi = cpu_to_be32(FW_WR_OP(FW_ULPTX_WR) |
FW_WR_COMPL(1));
- req->wr.wr_lo = (__force __be64)&wr_wait;
+ req->wr.wr_lo = (__force __be64)(unsigned long) &wr_wait;
} else
req->wr.wr_hi = cpu_to_be32(FW_WR_OP(FW_ULPTX_WR));
req->wr.wr_mid = cpu_to_be32(
@@ -103,14 +103,7 @@ static int write_adapter_mem(struct c4iw_rdev *rdev, u32 addr, u32 len,
len -= C4IW_MAX_INLINE_SIZE;
}
- wait_event_timeout(wr_wait.wait, wr_wait.done, C4IW_WR_TO);
- if (!wr_wait.done) {
- printk(KERN_ERR MOD "Device %s not responding!\n",
- pci_name(rdev->lldi.pdev));
- rdev->flags = T4_FATAL_ERROR;
- ret = -EIO;
- } else
- ret = wr_wait.ret;
+ ret = c4iw_wait_for_reply(rdev, &wr_wait, 0, 0, __func__);
return ret;
}
diff --git a/drivers/infiniband/hw/cxgb4/provider.c b/drivers/infiniband/hw/cxgb4/provider.c
index 8f645c83a125..f66dd8bf5128 100644
--- a/drivers/infiniband/hw/cxgb4/provider.c
+++ b/drivers/infiniband/hw/cxgb4/provider.c
@@ -54,9 +54,9 @@
#include "iw_cxgb4.h"
-static int fastreg_support;
+static int fastreg_support = 1;
module_param(fastreg_support, int, 0644);
-MODULE_PARM_DESC(fastreg_support, "Advertise fastreg support (default=0)");
+MODULE_PARM_DESC(fastreg_support, "Advertise fastreg support (default=1)");
static int c4iw_modify_port(struct ib_device *ibdev,
u8 port, int port_modify_mask,
@@ -149,19 +149,28 @@ static int c4iw_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
addr = mm->addr;
kfree(mm);
- if ((addr >= pci_resource_start(rdev->lldi.pdev, 2)) &&
- (addr < (pci_resource_start(rdev->lldi.pdev, 2) +
- pci_resource_len(rdev->lldi.pdev, 2)))) {
+ if ((addr >= pci_resource_start(rdev->lldi.pdev, 0)) &&
+ (addr < (pci_resource_start(rdev->lldi.pdev, 0) +
+ pci_resource_len(rdev->lldi.pdev, 0)))) {
/*
- * Map T4 DB register.
+ * MA_SYNC register...
*/
- if (vma->vm_flags & VM_READ)
- return -EPERM;
-
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
- vma->vm_flags |= VM_DONTCOPY | VM_DONTEXPAND;
- vma->vm_flags &= ~VM_MAYREAD;
+ ret = io_remap_pfn_range(vma, vma->vm_start,
+ addr >> PAGE_SHIFT,
+ len, vma->vm_page_prot);
+ } else if ((addr >= pci_resource_start(rdev->lldi.pdev, 2)) &&
+ (addr < (pci_resource_start(rdev->lldi.pdev, 2) +
+ pci_resource_len(rdev->lldi.pdev, 2)))) {
+
+ /*
+ * Map user DB or OCQP memory...
+ */
+ if (addr >= rdev->oc_mw_pa)
+ vma->vm_page_prot = t4_pgprot_wc(vma->vm_page_prot);
+ else
+ vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
ret = io_remap_pfn_range(vma, vma->vm_start,
addr >> PAGE_SHIFT,
len, vma->vm_page_prot);
@@ -382,7 +391,17 @@ static ssize_t show_board(struct device *dev, struct device_attribute *attr,
static int c4iw_get_mib(struct ib_device *ibdev,
union rdma_protocol_stats *stats)
{
- return -ENOSYS;
+ struct tp_tcp_stats v4, v6;
+ struct c4iw_dev *c4iw_dev = to_c4iw_dev(ibdev);
+
+ cxgb4_get_tcp_stats(c4iw_dev->rdev.lldi.pdev, &v4, &v6);
+ memset(stats, 0, sizeof *stats);
+ stats->iw.tcpInSegs = v4.tcpInSegs + v6.tcpInSegs;
+ stats->iw.tcpOutSegs = v4.tcpOutSegs + v6.tcpOutSegs;
+ stats->iw.tcpRetransSegs = v4.tcpRetransSegs + v6.tcpRetransSegs;
+ stats->iw.tcpOutRsts = v4.tcpOutRsts + v6.tcpOutSegs;
+
+ return 0;
}
static DEVICE_ATTR(hw_rev, S_IRUGO, show_rev, NULL);
@@ -472,6 +491,7 @@ int c4iw_register_device(struct c4iw_dev *dev)
dev->ibdev.post_send = c4iw_post_send;
dev->ibdev.post_recv = c4iw_post_receive;
dev->ibdev.get_protocol_stats = c4iw_get_mib;
+ dev->ibdev.uverbs_abi_ver = C4IW_UVERBS_ABI_VERSION;
dev->ibdev.iwcm = kmalloc(sizeof(struct iw_cm_verbs), GFP_KERNEL);
if (!dev->ibdev.iwcm)
diff --git a/drivers/infiniband/hw/cxgb4/qp.c b/drivers/infiniband/hw/cxgb4/qp.c
index 93f6e5bf0ec5..057cb2505ea1 100644
--- a/drivers/infiniband/hw/cxgb4/qp.c
+++ b/drivers/infiniband/hw/cxgb4/qp.c
@@ -31,6 +31,63 @@
*/
#include "iw_cxgb4.h"
+static int ocqp_support;
+module_param(ocqp_support, int, 0644);
+MODULE_PARM_DESC(ocqp_support, "Support on-chip SQs (default=0)");
+
+static void set_state(struct c4iw_qp *qhp, enum c4iw_qp_state state)
+{
+ unsigned long flag;
+ spin_lock_irqsave(&qhp->lock, flag);
+ qhp->attr.state = state;
+ spin_unlock_irqrestore(&qhp->lock, flag);
+}
+
+static void dealloc_oc_sq(struct c4iw_rdev *rdev, struct t4_sq *sq)
+{
+ c4iw_ocqp_pool_free(rdev, sq->dma_addr, sq->memsize);
+}
+
+static void dealloc_host_sq(struct c4iw_rdev *rdev, struct t4_sq *sq)
+{
+ dma_free_coherent(&(rdev->lldi.pdev->dev), sq->memsize, sq->queue,
+ pci_unmap_addr(sq, mapping));
+}
+
+static void dealloc_sq(struct c4iw_rdev *rdev, struct t4_sq *sq)
+{
+ if (t4_sq_onchip(sq))
+ dealloc_oc_sq(rdev, sq);
+ else
+ dealloc_host_sq(rdev, sq);
+}
+
+static int alloc_oc_sq(struct c4iw_rdev *rdev, struct t4_sq *sq)
+{
+ if (!ocqp_support || !t4_ocqp_supported())
+ return -ENOSYS;
+ sq->dma_addr = c4iw_ocqp_pool_alloc(rdev, sq->memsize);
+ if (!sq->dma_addr)
+ return -ENOMEM;
+ sq->phys_addr = rdev->oc_mw_pa + sq->dma_addr -
+ rdev->lldi.vr->ocq.start;
+ sq->queue = (__force union t4_wr *)(rdev->oc_mw_kva + sq->dma_addr -
+ rdev->lldi.vr->ocq.start);
+ sq->flags |= T4_SQ_ONCHIP;
+ return 0;
+}
+
+static int alloc_host_sq(struct c4iw_rdev *rdev, struct t4_sq *sq)
+{
+ sq->queue = dma_alloc_coherent(&(rdev->lldi.pdev->dev), sq->memsize,
+ &(sq->dma_addr), GFP_KERNEL);
+ if (!sq->queue)
+ return -ENOMEM;
+ sq->phys_addr = virt_to_phys(sq->queue);
+ pci_unmap_addr_set(sq, mapping, sq->dma_addr);
+ return 0;
+}
+
static int destroy_qp(struct c4iw_rdev *rdev, struct t4_wq *wq,
struct c4iw_dev_ucontext *uctx)
{
@@ -41,9 +98,7 @@ static int destroy_qp(struct c4iw_rdev *rdev, struct t4_wq *wq,
dma_free_coherent(&(rdev->lldi.pdev->dev),
wq->rq.memsize, wq->rq.queue,
dma_unmap_addr(&wq->rq, mapping));
- dma_free_coherent(&(rdev->lldi.pdev->dev),
- wq->sq.memsize, wq->sq.queue,
- dma_unmap_addr(&wq->sq, mapping));
+ dealloc_sq(rdev, &wq->sq);
c4iw_rqtpool_free(rdev, wq->rq.rqt_hwaddr, wq->rq.rqt_size);
kfree(wq->rq.sw_rq);
kfree(wq->sq.sw_sq);
@@ -93,11 +148,12 @@ static int create_qp(struct c4iw_rdev *rdev, struct t4_wq *wq,
if (!wq->rq.rqt_hwaddr)
goto err4;
- wq->sq.queue = dma_alloc_coherent(&(rdev->lldi.pdev->dev),
- wq->sq.memsize, &(wq->sq.dma_addr),
- GFP_KERNEL);
- if (!wq->sq.queue)
- goto err5;
+ if (user) {
+ if (alloc_oc_sq(rdev, &wq->sq) && alloc_host_sq(rdev, &wq->sq))
+ goto err5;
+ } else
+ if (alloc_host_sq(rdev, &wq->sq))
+ goto err5;
memset(wq->sq.queue, 0, wq->sq.memsize);
dma_unmap_addr_set(&wq->sq, mapping, wq->sq.dma_addr);
@@ -144,7 +200,7 @@ static int create_qp(struct c4iw_rdev *rdev, struct t4_wq *wq,
V_FW_RI_RES_WR_NRES(2) |
FW_WR_COMPL(1));
res_wr->len16_pkd = cpu_to_be32(DIV_ROUND_UP(wr_len, 16));
- res_wr->cookie = (u64)&wr_wait;
+ res_wr->cookie = (unsigned long) &wr_wait;
res = res_wr->res;
res->u.sqrq.restype = FW_RI_RES_TYPE_SQ;
res->u.sqrq.op = FW_RI_RES_OP_WRITE;
@@ -158,6 +214,7 @@ static int create_qp(struct c4iw_rdev *rdev, struct t4_wq *wq,
V_FW_RI_RES_WR_HOSTFCMODE(0) | /* no host cidx updates */
V_FW_RI_RES_WR_CPRIO(0) | /* don't keep in chip cache */
V_FW_RI_RES_WR_PCIECHN(0) | /* set by uP at ri_init time */
+ t4_sq_onchip(&wq->sq) ? F_FW_RI_RES_WR_ONCHIP : 0 |
V_FW_RI_RES_WR_IQID(scq->cqid));
res->u.sqrq.dcaen_to_eqsize = cpu_to_be32(
V_FW_RI_RES_WR_DCAEN(0) |
@@ -198,14 +255,7 @@ static int create_qp(struct c4iw_rdev *rdev, struct t4_wq *wq,
ret = c4iw_ofld_send(rdev, skb);
if (ret)
goto err7;
- wait_event_timeout(wr_wait.wait, wr_wait.done, C4IW_WR_TO);
- if (!wr_wait.done) {
- printk(KERN_ERR MOD "Device %s not responding!\n",
- pci_name(rdev->lldi.pdev));
- rdev->flags = T4_FATAL_ERROR;
- ret = -EIO;
- } else
- ret = wr_wait.ret;
+ ret = c4iw_wait_for_reply(rdev, &wr_wait, 0, wq->sq.qid, __func__);
if (ret)
goto err7;
@@ -219,9 +269,7 @@ err7:
wq->rq.memsize, wq->rq.queue,
dma_unmap_addr(&wq->rq, mapping));
err6:
- dma_free_coherent(&(rdev->lldi.pdev->dev),
- wq->sq.memsize, wq->sq.queue,
- dma_unmap_addr(&wq->sq, mapping));
+ dealloc_sq(rdev, &wq->sq);
err5:
c4iw_rqtpool_free(rdev, wq->rq.rqt_hwaddr, wq->rq.rqt_size);
err4:
@@ -263,6 +311,9 @@ static int build_immd(struct t4_sq *sq, struct fw_ri_immd *immdp,
rem -= len;
}
}
+ len = roundup(plen + sizeof *immdp, 16) - (plen + sizeof *immdp);
+ if (len)
+ memset(dstp, 0, len);
immdp->op = FW_RI_DATA_IMMD;
immdp->r1 = 0;
immdp->r2 = 0;
@@ -292,6 +343,7 @@ static int build_isgl(__be64 *queue_start, __be64 *queue_end,
if (++flitp == queue_end)
flitp = queue_start;
}
+ *flitp = (__force __be64)0;
isglp->op = FW_RI_DATA_ISGL;
isglp->r1 = 0;
isglp->nsge = cpu_to_be16(num_sge);
@@ -453,13 +505,15 @@ static int build_rdma_recv(struct c4iw_qp *qhp, union t4_recv_wr *wqe,
return 0;
}
-static int build_fastreg(union t4_wr *wqe, struct ib_send_wr *wr, u8 *len16)
+static int build_fastreg(struct t4_sq *sq, union t4_wr *wqe,
+ struct ib_send_wr *wr, u8 *len16)
{
struct fw_ri_immd *imdp;
__be64 *p;
int i;
int pbllen = roundup(wr->wr.fast_reg.page_list_len * sizeof(u64), 32);
+ int rem;
if (wr->wr.fast_reg.page_list_len > T4_MAX_FR_DEPTH)
return -EINVAL;
@@ -474,32 +528,28 @@ static int build_fastreg(union t4_wr *wqe, struct ib_send_wr *wr, u8 *len16)
wqe->fr.va_hi = cpu_to_be32(wr->wr.fast_reg.iova_start >> 32);
wqe->fr.va_lo_fbo = cpu_to_be32(wr->wr.fast_reg.iova_start &
0xffffffff);
- if (pbllen > T4_MAX_FR_IMMD) {
- struct c4iw_fr_page_list *c4pl =
- to_c4iw_fr_page_list(wr->wr.fast_reg.page_list);
- struct fw_ri_dsgl *sglp;
-
- sglp = (struct fw_ri_dsgl *)(&wqe->fr + 1);
- sglp->op = FW_RI_DATA_DSGL;
- sglp->r1 = 0;
- sglp->nsge = cpu_to_be16(1);
- sglp->addr0 = cpu_to_be64(c4pl->dma_addr);
- sglp->len0 = cpu_to_be32(pbllen);
-
- *len16 = DIV_ROUND_UP(sizeof wqe->fr + sizeof *sglp, 16);
- } else {
- imdp = (struct fw_ri_immd *)(&wqe->fr + 1);
- imdp->op = FW_RI_DATA_IMMD;
- imdp->r1 = 0;
- imdp->r2 = 0;
- imdp->immdlen = cpu_to_be32(pbllen);
- p = (__be64 *)(imdp + 1);
- for (i = 0; i < wr->wr.fast_reg.page_list_len; i++, p++)
- *p = cpu_to_be64(
- (u64)wr->wr.fast_reg.page_list->page_list[i]);
- *len16 = DIV_ROUND_UP(sizeof wqe->fr + sizeof *imdp + pbllen,
- 16);
+ WARN_ON(pbllen > T4_MAX_FR_IMMD);
+ imdp = (struct fw_ri_immd *)(&wqe->fr + 1);
+ imdp->op = FW_RI_DATA_IMMD;
+ imdp->r1 = 0;
+ imdp->r2 = 0;
+ imdp->immdlen = cpu_to_be32(pbllen);
+ p = (__be64 *)(imdp + 1);
+ rem = pbllen;
+ for (i = 0; i < wr->wr.fast_reg.page_list_len; i++) {
+ *p = cpu_to_be64((u64)wr->wr.fast_reg.page_list->page_list[i]);
+ rem -= sizeof *p;
+ if (++p == (__be64 *)&sq->queue[sq->size])
+ p = (__be64 *)sq->queue;
}
+ BUG_ON(rem < 0);
+ while (rem) {
+ *p = 0;
+ rem -= sizeof *p;
+ if (++p == (__be64 *)&sq->queue[sq->size])
+ p = (__be64 *)sq->queue;
+ }
+ *len16 = DIV_ROUND_UP(sizeof wqe->fr + sizeof *imdp + pbllen, 16);
return 0;
}
@@ -587,7 +637,7 @@ int c4iw_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
fw_opcode = FW_RI_RDMA_READ_WR;
swsqe->opcode = FW_RI_READ_REQ;
if (wr->opcode == IB_WR_RDMA_READ_WITH_INV)
- fw_flags |= FW_RI_RDMA_READ_INVALIDATE;
+ fw_flags = FW_RI_RDMA_READ_INVALIDATE;
else
fw_flags = 0;
err = build_rdma_read(wqe, wr, &len16);
@@ -600,7 +650,7 @@ int c4iw_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
case IB_WR_FAST_REG_MR:
fw_opcode = FW_RI_FR_NSMR_WR;
swsqe->opcode = FW_RI_FAST_REGISTER;
- err = build_fastreg(wqe, wr, &len16);
+ err = build_fastreg(&qhp->wq.sq, wqe, wr, &len16);
break;
case IB_WR_LOCAL_INV:
if (wr->send_flags & IB_SEND_FENCE)
@@ -905,46 +955,38 @@ static void post_terminate(struct c4iw_qp *qhp, struct t4_cqe *err_cqe,
* Assumes qhp lock is held.
*/
static void __flush_qp(struct c4iw_qp *qhp, struct c4iw_cq *rchp,
- struct c4iw_cq *schp, unsigned long *flag)
+ struct c4iw_cq *schp)
{
int count;
int flushed;
+ unsigned long flag;
PDBG("%s qhp %p rchp %p schp %p\n", __func__, qhp, rchp, schp);
- /* take a ref on the qhp since we must release the lock */
- atomic_inc(&qhp->refcnt);
- spin_unlock_irqrestore(&qhp->lock, *flag);
/* locking hierarchy: cq lock first, then qp lock. */
- spin_lock_irqsave(&rchp->lock, *flag);
+ spin_lock_irqsave(&rchp->lock, flag);
spin_lock(&qhp->lock);
c4iw_flush_hw_cq(&rchp->cq);
c4iw_count_rcqes(&rchp->cq, &qhp->wq, &count);
flushed = c4iw_flush_rq(&qhp->wq, &rchp->cq, count);
spin_unlock(&qhp->lock);
- spin_unlock_irqrestore(&rchp->lock, *flag);
+ spin_unlock_irqrestore(&rchp->lock, flag);
if (flushed)
(*rchp->ibcq.comp_handler)(&rchp->ibcq, rchp->ibcq.cq_context);
/* locking hierarchy: cq lock first, then qp lock. */
- spin_lock_irqsave(&schp->lock, *flag);
+ spin_lock_irqsave(&schp->lock, flag);
spin_lock(&qhp->lock);
c4iw_flush_hw_cq(&schp->cq);
c4iw_count_scqes(&schp->cq, &qhp->wq, &count);
flushed = c4iw_flush_sq(&qhp->wq, &schp->cq, count);
spin_unlock(&qhp->lock);
- spin_unlock_irqrestore(&schp->lock, *flag);
+ spin_unlock_irqrestore(&schp->lock, flag);
if (flushed)
(*schp->ibcq.comp_handler)(&schp->ibcq, schp->ibcq.cq_context);
-
- /* deref */
- if (atomic_dec_and_test(&qhp->refcnt))
- wake_up(&qhp->wait);
-
- spin_lock_irqsave(&qhp->lock, *flag);
}
-static void flush_qp(struct c4iw_qp *qhp, unsigned long *flag)
+static void flush_qp(struct c4iw_qp *qhp)
{
struct c4iw_cq *rchp, *schp;
@@ -958,7 +1000,7 @@ static void flush_qp(struct c4iw_qp *qhp, unsigned long *flag)
t4_set_cq_in_error(&schp->cq);
return;
}
- __flush_qp(qhp, rchp, schp, flag);
+ __flush_qp(qhp, rchp, schp);
}
static int rdma_fini(struct c4iw_dev *rhp, struct c4iw_qp *qhp,
@@ -966,7 +1008,6 @@ static int rdma_fini(struct c4iw_dev *rhp, struct c4iw_qp *qhp,
{
struct fw_ri_wr *wqe;
int ret;
- struct c4iw_wr_wait wr_wait;
struct sk_buff *skb;
PDBG("%s qhp %p qid 0x%x tid %u\n", __func__, qhp, qhp->wq.sq.qid,
@@ -985,28 +1026,16 @@ static int rdma_fini(struct c4iw_dev *rhp, struct c4iw_qp *qhp,
wqe->flowid_len16 = cpu_to_be32(
FW_WR_FLOWID(ep->hwtid) |
FW_WR_LEN16(DIV_ROUND_UP(sizeof *wqe, 16)));
- wqe->cookie = (u64)&wr_wait;
+ wqe->cookie = (unsigned long) &ep->com.wr_wait;
wqe->u.fini.type = FW_RI_TYPE_FINI;
- c4iw_init_wr_wait(&wr_wait);
+ c4iw_init_wr_wait(&ep->com.wr_wait);
ret = c4iw_ofld_send(&rhp->rdev, skb);
if (ret)
goto out;
- wait_event_timeout(wr_wait.wait, wr_wait.done, C4IW_WR_TO);
- if (!wr_wait.done) {
- printk(KERN_ERR MOD "Device %s not responding!\n",
- pci_name(rhp->rdev.lldi.pdev));
- rhp->rdev.flags = T4_FATAL_ERROR;
- ret = -EIO;
- } else {
- ret = wr_wait.ret;
- if (ret)
- printk(KERN_WARNING MOD
- "%s: Abnormal close qpid %d ret %u\n",
- pci_name(rhp->rdev.lldi.pdev), qhp->wq.sq.qid,
- ret);
- }
+ ret = c4iw_wait_for_reply(&rhp->rdev, &ep->com.wr_wait, qhp->ep->hwtid,
+ qhp->wq.sq.qid, __func__);
out:
PDBG("%s ret %d\n", __func__, ret);
return ret;
@@ -1040,7 +1069,6 @@ static int rdma_init(struct c4iw_dev *rhp, struct c4iw_qp *qhp)
{
struct fw_ri_wr *wqe;
int ret;
- struct c4iw_wr_wait wr_wait;
struct sk_buff *skb;
PDBG("%s qhp %p qid 0x%x tid %u\n", __func__, qhp, qhp->wq.sq.qid,
@@ -1060,7 +1088,7 @@ static int rdma_init(struct c4iw_dev *rhp, struct c4iw_qp *qhp)
FW_WR_FLOWID(qhp->ep->hwtid) |
FW_WR_LEN16(DIV_ROUND_UP(sizeof *wqe, 16)));
- wqe->cookie = (u64)&wr_wait;
+ wqe->cookie = (unsigned long) &qhp->ep->com.wr_wait;
wqe->u.init.type = FW_RI_TYPE_INIT;
wqe->u.init.mpareqbit_p2ptype =
@@ -1097,19 +1125,13 @@ static int rdma_init(struct c4iw_dev *rhp, struct c4iw_qp *qhp)
if (qhp->attr.mpa_attr.initiator)
build_rtr_msg(qhp->attr.mpa_attr.p2p_type, &wqe->u.init);
- c4iw_init_wr_wait(&wr_wait);
+ c4iw_init_wr_wait(&qhp->ep->com.wr_wait);
ret = c4iw_ofld_send(&rhp->rdev, skb);
if (ret)
goto out;
- wait_event_timeout(wr_wait.wait, wr_wait.done, C4IW_WR_TO);
- if (!wr_wait.done) {
- printk(KERN_ERR MOD "Device %s not responding!\n",
- pci_name(rhp->rdev.lldi.pdev));
- rhp->rdev.flags = T4_FATAL_ERROR;
- ret = -EIO;
- } else
- ret = wr_wait.ret;
+ ret = c4iw_wait_for_reply(&rhp->rdev, &qhp->ep->com.wr_wait,
+ qhp->ep->hwtid, qhp->wq.sq.qid, __func__);
out:
PDBG("%s ret %d\n", __func__, ret);
return ret;
@@ -1122,7 +1144,6 @@ int c4iw_modify_qp(struct c4iw_dev *rhp, struct c4iw_qp *qhp,
{
int ret = 0;
struct c4iw_qp_attributes newattr = qhp->attr;
- unsigned long flag;
int disconnect = 0;
int terminate = 0;
int abort = 0;
@@ -1133,7 +1154,7 @@ int c4iw_modify_qp(struct c4iw_dev *rhp, struct c4iw_qp *qhp,
qhp, qhp->wq.sq.qid, qhp->wq.rq.qid, qhp->ep, qhp->attr.state,
(mask & C4IW_QP_ATTR_NEXT_STATE) ? attrs->next_state : -1);
- spin_lock_irqsave(&qhp->lock, flag);
+ mutex_lock(&qhp->mutex);
/* Process attr changes if in IDLE */
if (mask & C4IW_QP_ATTR_VALID_MODIFY) {
@@ -1184,7 +1205,7 @@ int c4iw_modify_qp(struct c4iw_dev *rhp, struct c4iw_qp *qhp,
qhp->attr.mpa_attr = attrs->mpa_attr;
qhp->attr.llp_stream_handle = attrs->llp_stream_handle;
qhp->ep = qhp->attr.llp_stream_handle;
- qhp->attr.state = C4IW_QP_STATE_RTS;
+ set_state(qhp, C4IW_QP_STATE_RTS);
/*
* Ref the endpoint here and deref when we
@@ -1193,15 +1214,13 @@ int c4iw_modify_qp(struct c4iw_dev *rhp, struct c4iw_qp *qhp,
* transition.
*/
c4iw_get_ep(&qhp->ep->com);
- spin_unlock_irqrestore(&qhp->lock, flag);
ret = rdma_init(rhp, qhp);
- spin_lock_irqsave(&qhp->lock, flag);
if (ret)
goto err;
break;
case C4IW_QP_STATE_ERROR:
- qhp->attr.state = C4IW_QP_STATE_ERROR;
- flush_qp(qhp, &flag);
+ set_state(qhp, C4IW_QP_STATE_ERROR);
+ flush_qp(qhp);
break;
default:
ret = -EINVAL;
@@ -1212,38 +1231,38 @@ int c4iw_modify_qp(struct c4iw_dev *rhp, struct c4iw_qp *qhp,
switch (attrs->next_state) {
case C4IW_QP_STATE_CLOSING:
BUG_ON(atomic_read(&qhp->ep->com.kref.refcount) < 2);
- qhp->attr.state = C4IW_QP_STATE_CLOSING;
+ set_state(qhp, C4IW_QP_STATE_CLOSING);
ep = qhp->ep;
if (!internal) {
abort = 0;
disconnect = 1;
- c4iw_get_ep(&ep->com);
+ c4iw_get_ep(&qhp->ep->com);
}
- spin_unlock_irqrestore(&qhp->lock, flag);
ret = rdma_fini(rhp, qhp, ep);
- spin_lock_irqsave(&qhp->lock, flag);
if (ret) {
- c4iw_get_ep(&ep->com);
+ if (internal)
+ c4iw_get_ep(&qhp->ep->com);
disconnect = abort = 1;
goto err;
}
break;
case C4IW_QP_STATE_TERMINATE:
- qhp->attr.state = C4IW_QP_STATE_TERMINATE;
+ set_state(qhp, C4IW_QP_STATE_TERMINATE);
if (qhp->ibqp.uobject)
t4_set_wq_in_error(&qhp->wq);
ep = qhp->ep;
- c4iw_get_ep(&ep->com);
- terminate = 1;
+ if (!internal)
+ terminate = 1;
disconnect = 1;
+ c4iw_get_ep(&qhp->ep->com);
break;
case C4IW_QP_STATE_ERROR:
- qhp->attr.state = C4IW_QP_STATE_ERROR;
+ set_state(qhp, C4IW_QP_STATE_ERROR);
if (!internal) {
abort = 1;
disconnect = 1;
ep = qhp->ep;
- c4iw_get_ep(&ep->com);
+ c4iw_get_ep(&qhp->ep->com);
}
goto err;
break;
@@ -1259,8 +1278,8 @@ int c4iw_modify_qp(struct c4iw_dev *rhp, struct c4iw_qp *qhp,
}
switch (attrs->next_state) {
case C4IW_QP_STATE_IDLE:
- flush_qp(qhp, &flag);
- qhp->attr.state = C4IW_QP_STATE_IDLE;
+ flush_qp(qhp);
+ set_state(qhp, C4IW_QP_STATE_IDLE);
qhp->attr.llp_stream_handle = NULL;
c4iw_put_ep(&qhp->ep->com);
qhp->ep = NULL;
@@ -1282,7 +1301,7 @@ int c4iw_modify_qp(struct c4iw_dev *rhp, struct c4iw_qp *qhp,
ret = -EINVAL;
goto out;
}
- qhp->attr.state = C4IW_QP_STATE_IDLE;
+ set_state(qhp, C4IW_QP_STATE_IDLE);
break;
case C4IW_QP_STATE_TERMINATE:
if (!internal) {
@@ -1305,15 +1324,16 @@ err:
/* disassociate the LLP connection */
qhp->attr.llp_stream_handle = NULL;
- ep = qhp->ep;
+ if (!ep)
+ ep = qhp->ep;
qhp->ep = NULL;
- qhp->attr.state = C4IW_QP_STATE_ERROR;
+ set_state(qhp, C4IW_QP_STATE_ERROR);
free = 1;
wake_up(&qhp->wait);
BUG_ON(!ep);
- flush_qp(qhp, &flag);
+ flush_qp(qhp);
out:
- spin_unlock_irqrestore(&qhp->lock, flag);
+ mutex_unlock(&qhp->mutex);
if (terminate)
post_terminate(qhp, NULL, internal ? GFP_ATOMIC : GFP_KERNEL);
@@ -1335,7 +1355,6 @@ out:
*/
if (free)
c4iw_put_ep(&ep->com);
-
PDBG("%s exit state %d\n", __func__, qhp->attr.state);
return ret;
}
@@ -1380,7 +1399,7 @@ struct ib_qp *c4iw_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *attrs,
int sqsize, rqsize;
struct c4iw_ucontext *ucontext;
int ret;
- struct c4iw_mm_entry *mm1, *mm2, *mm3, *mm4;
+ struct c4iw_mm_entry *mm1, *mm2, *mm3, *mm4, *mm5 = NULL;
PDBG("%s ib_pd %p\n", __func__, pd);
@@ -1450,6 +1469,7 @@ struct ib_qp *c4iw_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *attrs,
qhp->attr.max_ord = 1;
qhp->attr.max_ird = 1;
spin_lock_init(&qhp->lock);
+ mutex_init(&qhp->mutex);
init_waitqueue_head(&qhp->wait);
atomic_set(&qhp->refcnt, 1);
@@ -1478,7 +1498,15 @@ struct ib_qp *c4iw_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *attrs,
ret = -ENOMEM;
goto err6;
}
-
+ if (t4_sq_onchip(&qhp->wq.sq)) {
+ mm5 = kmalloc(sizeof *mm5, GFP_KERNEL);
+ if (!mm5) {
+ ret = -ENOMEM;
+ goto err7;
+ }
+ uresp.flags = C4IW_QPF_ONCHIP;
+ } else
+ uresp.flags = 0;
uresp.qid_mask = rhp->rdev.qpmask;
uresp.sqid = qhp->wq.sq.qid;
uresp.sq_size = qhp->wq.sq.size;
@@ -1487,6 +1515,10 @@ struct ib_qp *c4iw_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *attrs,
uresp.rq_size = qhp->wq.rq.size;
uresp.rq_memsize = qhp->wq.rq.memsize;
spin_lock(&ucontext->mmap_lock);
+ if (mm5) {
+ uresp.ma_sync_key = ucontext->key;
+ ucontext->key += PAGE_SIZE;
+ }
uresp.sq_key = ucontext->key;
ucontext->key += PAGE_SIZE;
uresp.rq_key = ucontext->key;
@@ -1498,9 +1530,9 @@ struct ib_qp *c4iw_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *attrs,
spin_unlock(&ucontext->mmap_lock);
ret = ib_copy_to_udata(udata, &uresp, sizeof uresp);
if (ret)
- goto err7;
+ goto err8;
mm1->key = uresp.sq_key;
- mm1->addr = virt_to_phys(qhp->wq.sq.queue);
+ mm1->addr = qhp->wq.sq.phys_addr;
mm1->len = PAGE_ALIGN(qhp->wq.sq.memsize);
insert_mmap(ucontext, mm1);
mm2->key = uresp.rq_key;
@@ -1515,6 +1547,13 @@ struct ib_qp *c4iw_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *attrs,
mm4->addr = qhp->wq.rq.udb;
mm4->len = PAGE_SIZE;
insert_mmap(ucontext, mm4);
+ if (mm5) {
+ mm5->key = uresp.ma_sync_key;
+ mm5->addr = (pci_resource_start(rhp->rdev.lldi.pdev, 0)
+ + A_PCIE_MA_SYNC) & PAGE_MASK;
+ mm5->len = PAGE_SIZE;
+ insert_mmap(ucontext, mm5);
+ }
}
qhp->ibqp.qp_num = qhp->wq.sq.qid;
init_timer(&(qhp->timer));
@@ -1522,6 +1561,8 @@ struct ib_qp *c4iw_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *attrs,
__func__, qhp, qhp->attr.sq_num_entries, qhp->attr.rq_num_entries,
qhp->wq.sq.qid);
return &qhp->ibqp;
+err8:
+ kfree(mm5);
err7:
kfree(mm4);
err6:
diff --git a/drivers/infiniband/hw/cxgb4/resource.c b/drivers/infiniband/hw/cxgb4/resource.c
index 83b23dfa250d..4fb50d58b493 100644
--- a/drivers/infiniband/hw/cxgb4/resource.c
+++ b/drivers/infiniband/hw/cxgb4/resource.c
@@ -311,6 +311,9 @@ u32 c4iw_pblpool_alloc(struct c4iw_rdev *rdev, int size)
{
unsigned long addr = gen_pool_alloc(rdev->pbl_pool, size);
PDBG("%s addr 0x%x size %d\n", __func__, (u32)addr, size);
+ if (!addr && printk_ratelimit())
+ printk(KERN_WARNING MOD "%s: Out of PBL memory\n",
+ pci_name(rdev->lldi.pdev));
return (u32)addr;
}
@@ -370,6 +373,9 @@ u32 c4iw_rqtpool_alloc(struct c4iw_rdev *rdev, int size)
{
unsigned long addr = gen_pool_alloc(rdev->rqt_pool, size << 6);
PDBG("%s addr 0x%x size %d\n", __func__, (u32)addr, size << 6);
+ if (!addr && printk_ratelimit())
+ printk(KERN_WARNING MOD "%s: Out of RQT memory\n",
+ pci_name(rdev->lldi.pdev));
return (u32)addr;
}
@@ -416,3 +422,59 @@ void c4iw_rqtpool_destroy(struct c4iw_rdev *rdev)
{
gen_pool_destroy(rdev->rqt_pool);
}
+
+/*
+ * On-Chip QP Memory.
+ */
+#define MIN_OCQP_SHIFT 12 /* 4KB == min ocqp size */
+
+u32 c4iw_ocqp_pool_alloc(struct c4iw_rdev *rdev, int size)
+{
+ unsigned long addr = gen_pool_alloc(rdev->ocqp_pool, size);
+ PDBG("%s addr 0x%x size %d\n", __func__, (u32)addr, size);
+ return (u32)addr;
+}
+
+void c4iw_ocqp_pool_free(struct c4iw_rdev *rdev, u32 addr, int size)
+{
+ PDBG("%s addr 0x%x size %d\n", __func__, addr, size);
+ gen_pool_free(rdev->ocqp_pool, (unsigned long)addr, size);
+}
+
+int c4iw_ocqp_pool_create(struct c4iw_rdev *rdev)
+{
+ unsigned start, chunk, top;
+
+ rdev->ocqp_pool = gen_pool_create(MIN_OCQP_SHIFT, -1);
+ if (!rdev->ocqp_pool)
+ return -ENOMEM;
+
+ start = rdev->lldi.vr->ocq.start;
+ chunk = rdev->lldi.vr->ocq.size;
+ top = start + chunk;
+
+ while (start < top) {
+ chunk = min(top - start + 1, chunk);
+ if (gen_pool_add(rdev->ocqp_pool, start, chunk, -1)) {
+ PDBG("%s failed to add OCQP chunk (%x/%x)\n",
+ __func__, start, chunk);
+ if (chunk <= 1024 << MIN_OCQP_SHIFT) {
+ printk(KERN_WARNING MOD
+ "Failed to add all OCQP chunks (%x/%x)\n",
+ start, top - start);
+ return 0;
+ }
+ chunk >>= 1;
+ } else {
+ PDBG("%s added OCQP chunk (%x/%x)\n",
+ __func__, start, chunk);
+ start += chunk;
+ }
+ }
+ return 0;
+}
+
+void c4iw_ocqp_pool_destroy(struct c4iw_rdev *rdev)
+{
+ gen_pool_destroy(rdev->ocqp_pool);
+}
diff --git a/drivers/infiniband/hw/cxgb4/t4.h b/drivers/infiniband/hw/cxgb4/t4.h
index 24f369046ef3..70004425d695 100644
--- a/drivers/infiniband/hw/cxgb4/t4.h
+++ b/drivers/infiniband/hw/cxgb4/t4.h
@@ -52,6 +52,7 @@
#define T4_STAG_UNSET 0xffffffff
#define T4_FW_MAJ 0
#define T4_EQ_STATUS_ENTRIES (L1_CACHE_BYTES > 64 ? 2 : 1)
+#define A_PCIE_MA_SYNC 0x30b4
struct t4_status_page {
__be32 rsvd1; /* flit 0 - hw owns */
@@ -65,7 +66,7 @@ struct t4_status_page {
#define T4_EQ_ENTRY_SIZE 64
-#define T4_SQ_NUM_SLOTS 4
+#define T4_SQ_NUM_SLOTS 5
#define T4_SQ_NUM_BYTES (T4_EQ_ENTRY_SIZE * T4_SQ_NUM_SLOTS)
#define T4_MAX_SEND_SGE ((T4_SQ_NUM_BYTES - sizeof(struct fw_ri_send_wr) - \
sizeof(struct fw_ri_isgl)) / sizeof(struct fw_ri_sge))
@@ -78,7 +79,7 @@ struct t4_status_page {
sizeof(struct fw_ri_rdma_write_wr) - \
sizeof(struct fw_ri_isgl)) / sizeof(struct fw_ri_sge))
#define T4_MAX_FR_IMMD ((T4_SQ_NUM_BYTES - sizeof(struct fw_ri_fr_nsmr_wr) - \
- sizeof(struct fw_ri_immd)))
+ sizeof(struct fw_ri_immd)) & ~31UL)
#define T4_MAX_FR_DEPTH (T4_MAX_FR_IMMD / sizeof(u64))
#define T4_RQ_NUM_SLOTS 2
@@ -266,10 +267,36 @@ struct t4_swsqe {
u16 idx;
};
+static inline pgprot_t t4_pgprot_wc(pgprot_t prot)
+{
+#if defined(__i386__) || defined(__x86_64__)
+ return pgprot_writecombine(prot);
+#elif defined(CONFIG_PPC64)
+ return __pgprot((pgprot_val(prot) | _PAGE_NO_CACHE) &
+ ~(pgprot_t)_PAGE_GUARDED);
+#else
+ return pgprot_noncached(prot);
+#endif
+}
+
+static inline int t4_ocqp_supported(void)
+{
+#if defined(__i386__) || defined(__x86_64__) || defined(CONFIG_PPC64)
+ return 1;
+#else
+ return 0;
+#endif
+}
+
+enum {
+ T4_SQ_ONCHIP = (1<<0),
+};
+
struct t4_sq {
union t4_wr *queue;
dma_addr_t dma_addr;
DEFINE_DMA_UNMAP_ADDR(mapping);
+ unsigned long phys_addr;
struct t4_swsqe *sw_sq;
struct t4_swsqe *oldest_read;
u64 udb;
@@ -280,6 +307,7 @@ struct t4_sq {
u16 cidx;
u16 pidx;
u16 wq_pidx;
+ u16 flags;
};
struct t4_swrqe {
@@ -350,6 +378,11 @@ static inline void t4_rq_consume(struct t4_wq *wq)
wq->rq.cidx = 0;
}
+static inline int t4_sq_onchip(struct t4_sq *sq)
+{
+ return sq->flags & T4_SQ_ONCHIP;
+}
+
static inline int t4_sq_empty(struct t4_wq *wq)
{
return wq->sq.in_use == 0;
@@ -396,30 +429,27 @@ static inline void t4_ring_rq_db(struct t4_wq *wq, u16 inc)
static inline int t4_wq_in_error(struct t4_wq *wq)
{
- return wq->sq.queue[wq->sq.size].status.qp_err;
+ return wq->rq.queue[wq->rq.size].status.qp_err;
}
static inline void t4_set_wq_in_error(struct t4_wq *wq)
{
- wq->sq.queue[wq->sq.size].status.qp_err = 1;
wq->rq.queue[wq->rq.size].status.qp_err = 1;
}
static inline void t4_disable_wq_db(struct t4_wq *wq)
{
- wq->sq.queue[wq->sq.size].status.db_off = 1;
wq->rq.queue[wq->rq.size].status.db_off = 1;
}
static inline void t4_enable_wq_db(struct t4_wq *wq)
{
- wq->sq.queue[wq->sq.size].status.db_off = 0;
wq->rq.queue[wq->rq.size].status.db_off = 0;
}
static inline int t4_wq_db_enabled(struct t4_wq *wq)
{
- return !wq->sq.queue[wq->sq.size].status.db_off;
+ return !wq->rq.queue[wq->rq.size].status.db_off;
}
struct t4_cq {
diff --git a/drivers/infiniband/hw/cxgb4/user.h b/drivers/infiniband/hw/cxgb4/user.h
index ed6414abde02..e6669d54770e 100644
--- a/drivers/infiniband/hw/cxgb4/user.h
+++ b/drivers/infiniband/hw/cxgb4/user.h
@@ -50,7 +50,13 @@ struct c4iw_create_cq_resp {
__u32 qid_mask;
};
+
+enum {
+ C4IW_QPF_ONCHIP = (1<<0)
+};
+
struct c4iw_create_qp_resp {
+ __u64 ma_sync_key;
__u64 sq_key;
__u64 rq_key;
__u64 sq_db_gts_key;
@@ -62,5 +68,6 @@ struct c4iw_create_qp_resp {
__u32 sq_size;
__u32 rq_size;
__u32 qid_mask;
+ __u32 flags;
};
#endif
diff --git a/drivers/infiniband/hw/ehca/ehca_mrmw.c b/drivers/infiniband/hw/ehca/ehca_mrmw.c
index 53f4cd4fc19a..43cae84005f0 100644
--- a/drivers/infiniband/hw/ehca/ehca_mrmw.c
+++ b/drivers/infiniband/hw/ehca/ehca_mrmw.c
@@ -171,7 +171,7 @@ struct ib_mr *ehca_get_dma_mr(struct ib_pd *pd, int mr_access_flags)
}
ret = ehca_reg_maxmr(shca, e_maxmr,
- (void *)ehca_map_vaddr((void *)KERNELBASE),
+ (void *)ehca_map_vaddr((void *)(KERNELBASE + PHYSICAL_START)),
mr_access_flags, e_pd,
&e_maxmr->ib.ib_mr.lkey,
&e_maxmr->ib.ib_mr.rkey);
@@ -1636,7 +1636,7 @@ int ehca_reg_internal_maxmr(
/* register internal max-MR on HCA */
size_maxmr = ehca_mr_len;
- iova_start = (u64 *)ehca_map_vaddr((void *)KERNELBASE);
+ iova_start = (u64 *)ehca_map_vaddr((void *)(KERNELBASE + PHYSICAL_START));
ib_pbuf.addr = 0;
ib_pbuf.size = size_maxmr;
num_kpages = NUM_CHUNKS(((u64)iova_start % PAGE_SIZE) + size_maxmr,
@@ -2209,7 +2209,7 @@ int ehca_mr_is_maxmr(u64 size,
{
/* a MR is treated as max-MR only if it fits following: */
if ((size == ehca_mr_len) &&
- (iova_start == (void *)ehca_map_vaddr((void *)KERNELBASE))) {
+ (iova_start == (void *)ehca_map_vaddr((void *)(KERNELBASE + PHYSICAL_START)))) {
ehca_gen_dbg("this is a max-MR");
return 1;
} else
diff --git a/drivers/infiniband/hw/ipath/Makefile b/drivers/infiniband/hw/ipath/Makefile
index fa3df82681df..4496f2820c92 100644
--- a/drivers/infiniband/hw/ipath/Makefile
+++ b/drivers/infiniband/hw/ipath/Makefile
@@ -1,4 +1,4 @@
-EXTRA_CFLAGS += -DIPATH_IDSTR='"QLogic kernel.org driver"' \
+ccflags-y := -DIPATH_IDSTR='"QLogic kernel.org driver"' \
-DIPATH_KERN_TYPE=0
obj-$(CONFIG_INFINIBAND_IPATH) += ib_ipath.o
diff --git a/drivers/infiniband/hw/ipath/ipath_file_ops.c b/drivers/infiniband/hw/ipath/ipath_file_ops.c
index 6078992da3f0..9292a15ad7c4 100644
--- a/drivers/infiniband/hw/ipath/ipath_file_ops.c
+++ b/drivers/infiniband/hw/ipath/ipath_file_ops.c
@@ -40,7 +40,6 @@
#include <linux/highmem.h>
#include <linux/io.h>
#include <linux/jiffies.h>
-#include <linux/smp_lock.h>
#include <asm/pgtable.h>
#include "ipath_kernel.h"
diff --git a/drivers/infiniband/hw/ipath/ipath_fs.c b/drivers/infiniband/hw/ipath/ipath_fs.c
index d13e72685dcf..8c8afc716b98 100644
--- a/drivers/infiniband/hw/ipath/ipath_fs.c
+++ b/drivers/infiniband/hw/ipath/ipath_fs.c
@@ -57,6 +57,7 @@ static int ipathfs_mknod(struct inode *dir, struct dentry *dentry,
goto bail;
}
+ inode->i_ino = get_next_ino();
inode->i_mode = mode;
inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
inode->i_private = data;
@@ -361,13 +362,13 @@ bail:
return ret;
}
-static int ipathfs_get_sb(struct file_system_type *fs_type, int flags,
- const char *dev_name, void *data, struct vfsmount *mnt)
+static struct dentry *ipathfs_mount(struct file_system_type *fs_type,
+ int flags, const char *dev_name, void *data)
{
- int ret = get_sb_single(fs_type, flags, data,
- ipathfs_fill_super, mnt);
- if (ret >= 0)
- ipath_super = mnt->mnt_sb;
+ struct dentry *ret;
+ ret = mount_single(fs_type, flags, data, ipathfs_fill_super);
+ if (!IS_ERR(ret))
+ ipath_super = ret->d_sb;
return ret;
}
@@ -410,7 +411,7 @@ bail:
static struct file_system_type ipathfs_fs_type = {
.owner = THIS_MODULE,
.name = "ipathfs",
- .get_sb = ipathfs_get_sb,
+ .mount = ipathfs_mount,
.kill_sb = ipathfs_kill_super,
};
diff --git a/drivers/infiniband/hw/mlx4/ah.c b/drivers/infiniband/hw/mlx4/ah.c
index 11a236f8d884..4b8f9c49397e 100644
--- a/drivers/infiniband/hw/mlx4/ah.c
+++ b/drivers/infiniband/hw/mlx4/ah.c
@@ -30,66 +30,163 @@
* SOFTWARE.
*/
+#include <rdma/ib_addr.h>
+#include <rdma/ib_cache.h>
+
#include <linux/slab.h>
+#include <linux/inet.h>
+#include <linux/string.h>
#include "mlx4_ib.h"
-struct ib_ah *mlx4_ib_create_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr)
+int mlx4_ib_resolve_grh(struct mlx4_ib_dev *dev, const struct ib_ah_attr *ah_attr,
+ u8 *mac, int *is_mcast, u8 port)
{
- struct mlx4_dev *dev = to_mdev(pd->device)->dev;
- struct mlx4_ib_ah *ah;
+ struct in6_addr in6;
- ah = kmalloc(sizeof *ah, GFP_ATOMIC);
- if (!ah)
- return ERR_PTR(-ENOMEM);
+ *is_mcast = 0;
- memset(&ah->av, 0, sizeof ah->av);
+ memcpy(&in6, ah_attr->grh.dgid.raw, sizeof in6);
+ if (rdma_link_local_addr(&in6))
+ rdma_get_ll_mac(&in6, mac);
+ else if (rdma_is_multicast_addr(&in6)) {
+ rdma_get_mcast_mac(&in6, mac);
+ *is_mcast = 1;
+ } else
+ return -EINVAL;
- ah->av.port_pd = cpu_to_be32(to_mpd(pd)->pdn | (ah_attr->port_num << 24));
- ah->av.g_slid = ah_attr->src_path_bits;
- ah->av.dlid = cpu_to_be16(ah_attr->dlid);
- if (ah_attr->static_rate) {
- ah->av.stat_rate = ah_attr->static_rate + MLX4_STAT_RATE_OFFSET;
- while (ah->av.stat_rate > IB_RATE_2_5_GBPS + MLX4_STAT_RATE_OFFSET &&
- !(1 << ah->av.stat_rate & dev->caps.stat_rate_support))
- --ah->av.stat_rate;
- }
- ah->av.sl_tclass_flowlabel = cpu_to_be32(ah_attr->sl << 28);
+ return 0;
+}
+
+static struct ib_ah *create_ib_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr,
+ struct mlx4_ib_ah *ah)
+{
+ struct mlx4_dev *dev = to_mdev(pd->device)->dev;
+
+ ah->av.ib.port_pd = cpu_to_be32(to_mpd(pd)->pdn | (ah_attr->port_num << 24));
+ ah->av.ib.g_slid = ah_attr->src_path_bits;
if (ah_attr->ah_flags & IB_AH_GRH) {
- ah->av.g_slid |= 0x80;
- ah->av.gid_index = ah_attr->grh.sgid_index;
- ah->av.hop_limit = ah_attr->grh.hop_limit;
- ah->av.sl_tclass_flowlabel |=
+ ah->av.ib.g_slid |= 0x80;
+ ah->av.ib.gid_index = ah_attr->grh.sgid_index;
+ ah->av.ib.hop_limit = ah_attr->grh.hop_limit;
+ ah->av.ib.sl_tclass_flowlabel |=
cpu_to_be32((ah_attr->grh.traffic_class << 20) |
ah_attr->grh.flow_label);
- memcpy(ah->av.dgid, ah_attr->grh.dgid.raw, 16);
+ memcpy(ah->av.ib.dgid, ah_attr->grh.dgid.raw, 16);
+ }
+
+ ah->av.ib.dlid = cpu_to_be16(ah_attr->dlid);
+ if (ah_attr->static_rate) {
+ ah->av.ib.stat_rate = ah_attr->static_rate + MLX4_STAT_RATE_OFFSET;
+ while (ah->av.ib.stat_rate > IB_RATE_2_5_GBPS + MLX4_STAT_RATE_OFFSET &&
+ !(1 << ah->av.ib.stat_rate & dev->caps.stat_rate_support))
+ --ah->av.ib.stat_rate;
}
+ ah->av.ib.sl_tclass_flowlabel = cpu_to_be32(ah_attr->sl << 28);
return &ah->ibah;
}
+static struct ib_ah *create_iboe_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr,
+ struct mlx4_ib_ah *ah)
+{
+ struct mlx4_ib_dev *ibdev = to_mdev(pd->device);
+ struct mlx4_dev *dev = ibdev->dev;
+ union ib_gid sgid;
+ u8 mac[6];
+ int err;
+ int is_mcast;
+ u16 vlan_tag;
+
+ err = mlx4_ib_resolve_grh(ibdev, ah_attr, mac, &is_mcast, ah_attr->port_num);
+ if (err)
+ return ERR_PTR(err);
+
+ memcpy(ah->av.eth.mac, mac, 6);
+ err = ib_get_cached_gid(pd->device, ah_attr->port_num, ah_attr->grh.sgid_index, &sgid);
+ if (err)
+ return ERR_PTR(err);
+ vlan_tag = rdma_get_vlan_id(&sgid);
+ if (vlan_tag < 0x1000)
+ vlan_tag |= (ah_attr->sl & 7) << 13;
+ ah->av.eth.port_pd = cpu_to_be32(to_mpd(pd)->pdn | (ah_attr->port_num << 24));
+ ah->av.eth.gid_index = ah_attr->grh.sgid_index;
+ ah->av.eth.vlan = cpu_to_be16(vlan_tag);
+ if (ah_attr->static_rate) {
+ ah->av.eth.stat_rate = ah_attr->static_rate + MLX4_STAT_RATE_OFFSET;
+ while (ah->av.eth.stat_rate > IB_RATE_2_5_GBPS + MLX4_STAT_RATE_OFFSET &&
+ !(1 << ah->av.eth.stat_rate & dev->caps.stat_rate_support))
+ --ah->av.eth.stat_rate;
+ }
+
+ /*
+ * HW requires multicast LID so we just choose one.
+ */
+ if (is_mcast)
+ ah->av.ib.dlid = cpu_to_be16(0xc000);
+
+ memcpy(ah->av.eth.dgid, ah_attr->grh.dgid.raw, 16);
+ ah->av.eth.sl_tclass_flowlabel = cpu_to_be32(ah_attr->sl << 28);
+
+ return &ah->ibah;
+}
+
+struct ib_ah *mlx4_ib_create_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr)
+{
+ struct mlx4_ib_ah *ah;
+ struct ib_ah *ret;
+
+ ah = kzalloc(sizeof *ah, GFP_ATOMIC);
+ if (!ah)
+ return ERR_PTR(-ENOMEM);
+
+ if (rdma_port_get_link_layer(pd->device, ah_attr->port_num) == IB_LINK_LAYER_ETHERNET) {
+ if (!(ah_attr->ah_flags & IB_AH_GRH)) {
+ ret = ERR_PTR(-EINVAL);
+ } else {
+ /*
+ * TBD: need to handle the case when we get
+ * called in an atomic context and there we
+ * might sleep. We don't expect this
+ * currently since we're working with link
+ * local addresses which we can translate
+ * without going to sleep.
+ */
+ ret = create_iboe_ah(pd, ah_attr, ah);
+ }
+
+ if (IS_ERR(ret))
+ kfree(ah);
+
+ return ret;
+ } else
+ return create_ib_ah(pd, ah_attr, ah); /* never fails */
+}
+
int mlx4_ib_query_ah(struct ib_ah *ibah, struct ib_ah_attr *ah_attr)
{
struct mlx4_ib_ah *ah = to_mah(ibah);
+ enum rdma_link_layer ll;
memset(ah_attr, 0, sizeof *ah_attr);
- ah_attr->dlid = be16_to_cpu(ah->av.dlid);
- ah_attr->sl = be32_to_cpu(ah->av.sl_tclass_flowlabel) >> 28;
- ah_attr->port_num = be32_to_cpu(ah->av.port_pd) >> 24;
- if (ah->av.stat_rate)
- ah_attr->static_rate = ah->av.stat_rate - MLX4_STAT_RATE_OFFSET;
- ah_attr->src_path_bits = ah->av.g_slid & 0x7F;
+ ah_attr->sl = be32_to_cpu(ah->av.ib.sl_tclass_flowlabel) >> 28;
+ ah_attr->port_num = be32_to_cpu(ah->av.ib.port_pd) >> 24;
+ ll = rdma_port_get_link_layer(ibah->device, ah_attr->port_num);
+ ah_attr->dlid = ll == IB_LINK_LAYER_INFINIBAND ? be16_to_cpu(ah->av.ib.dlid) : 0;
+ if (ah->av.ib.stat_rate)
+ ah_attr->static_rate = ah->av.ib.stat_rate - MLX4_STAT_RATE_OFFSET;
+ ah_attr->src_path_bits = ah->av.ib.g_slid & 0x7F;
if (mlx4_ib_ah_grh_present(ah)) {
ah_attr->ah_flags = IB_AH_GRH;
ah_attr->grh.traffic_class =
- be32_to_cpu(ah->av.sl_tclass_flowlabel) >> 20;
+ be32_to_cpu(ah->av.ib.sl_tclass_flowlabel) >> 20;
ah_attr->grh.flow_label =
- be32_to_cpu(ah->av.sl_tclass_flowlabel) & 0xfffff;
- ah_attr->grh.hop_limit = ah->av.hop_limit;
- ah_attr->grh.sgid_index = ah->av.gid_index;
- memcpy(ah_attr->grh.dgid.raw, ah->av.dgid, 16);
+ be32_to_cpu(ah->av.ib.sl_tclass_flowlabel) & 0xfffff;
+ ah_attr->grh.hop_limit = ah->av.ib.hop_limit;
+ ah_attr->grh.sgid_index = ah->av.ib.gid_index;
+ memcpy(ah_attr->grh.dgid.raw, ah->av.ib.dgid, 16);
}
return 0;
diff --git a/drivers/infiniband/hw/mlx4/mad.c b/drivers/infiniband/hw/mlx4/mad.c
index f38d5b118927..c9a8dd63b9e2 100644
--- a/drivers/infiniband/hw/mlx4/mad.c
+++ b/drivers/infiniband/hw/mlx4/mad.c
@@ -311,19 +311,25 @@ int mlx4_ib_mad_init(struct mlx4_ib_dev *dev)
struct ib_mad_agent *agent;
int p, q;
int ret;
+ enum rdma_link_layer ll;
- for (p = 0; p < dev->num_ports; ++p)
+ for (p = 0; p < dev->num_ports; ++p) {
+ ll = rdma_port_get_link_layer(&dev->ib_dev, p + 1);
for (q = 0; q <= 1; ++q) {
- agent = ib_register_mad_agent(&dev->ib_dev, p + 1,
- q ? IB_QPT_GSI : IB_QPT_SMI,
- NULL, 0, send_handler,
- NULL, NULL);
- if (IS_ERR(agent)) {
- ret = PTR_ERR(agent);
- goto err;
- }
- dev->send_agent[p][q] = agent;
+ if (ll == IB_LINK_LAYER_INFINIBAND) {
+ agent = ib_register_mad_agent(&dev->ib_dev, p + 1,
+ q ? IB_QPT_GSI : IB_QPT_SMI,
+ NULL, 0, send_handler,
+ NULL, NULL);
+ if (IS_ERR(agent)) {
+ ret = PTR_ERR(agent);
+ goto err;
+ }
+ dev->send_agent[p][q] = agent;
+ } else
+ dev->send_agent[p][q] = NULL;
}
+ }
return 0;
@@ -344,8 +350,10 @@ void mlx4_ib_mad_cleanup(struct mlx4_ib_dev *dev)
for (p = 0; p < dev->num_ports; ++p) {
for (q = 0; q <= 1; ++q) {
agent = dev->send_agent[p][q];
- dev->send_agent[p][q] = NULL;
- ib_unregister_mad_agent(agent);
+ if (agent) {
+ dev->send_agent[p][q] = NULL;
+ ib_unregister_mad_agent(agent);
+ }
}
if (dev->sm_ah[p])
diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c
index 4e94e360e43b..30e09caf0da9 100644
--- a/drivers/infiniband/hw/mlx4/main.c
+++ b/drivers/infiniband/hw/mlx4/main.c
@@ -35,9 +35,14 @@
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/errno.h>
+#include <linux/netdevice.h>
+#include <linux/inetdevice.h>
+#include <linux/rtnetlink.h>
+#include <linux/if_vlan.h>
#include <rdma/ib_smi.h>
#include <rdma/ib_user_verbs.h>
+#include <rdma/ib_addr.h>
#include <linux/mlx4/driver.h>
#include <linux/mlx4/cmd.h>
@@ -58,6 +63,15 @@ static const char mlx4_ib_version[] =
DRV_NAME ": Mellanox ConnectX InfiniBand driver v"
DRV_VERSION " (" DRV_RELDATE ")\n";
+struct update_gid_work {
+ struct work_struct work;
+ union ib_gid gids[128];
+ struct mlx4_ib_dev *dev;
+ int port;
+};
+
+static struct workqueue_struct *wq;
+
static void init_query_mad(struct ib_smp *mad)
{
mad->base_version = 1;
@@ -66,6 +80,8 @@ static void init_query_mad(struct ib_smp *mad)
mad->method = IB_MGMT_METHOD_GET;
}
+static union ib_gid zgid;
+
static int mlx4_ib_query_device(struct ib_device *ibdev,
struct ib_device_attr *props)
{
@@ -135,7 +151,7 @@ static int mlx4_ib_query_device(struct ib_device *ibdev,
props->max_srq = dev->dev->caps.num_srqs - dev->dev->caps.reserved_srqs;
props->max_srq_wr = dev->dev->caps.max_srq_wqes - 1;
props->max_srq_sge = dev->dev->caps.max_srq_sge;
- props->max_fast_reg_page_list_len = PAGE_SIZE / sizeof (u64);
+ props->max_fast_reg_page_list_len = MLX4_MAX_FAST_REG_PAGES;
props->local_ca_ack_delay = dev->dev->caps.local_ca_ack_delay;
props->atomic_cap = dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_ATOMIC ?
IB_ATOMIC_HCA : IB_ATOMIC_NONE;
@@ -154,28 +170,19 @@ out:
return err;
}
-static int mlx4_ib_query_port(struct ib_device *ibdev, u8 port,
- struct ib_port_attr *props)
+static enum rdma_link_layer
+mlx4_ib_port_link_layer(struct ib_device *device, u8 port_num)
{
- struct ib_smp *in_mad = NULL;
- struct ib_smp *out_mad = NULL;
- int err = -ENOMEM;
-
- in_mad = kzalloc(sizeof *in_mad, GFP_KERNEL);
- out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL);
- if (!in_mad || !out_mad)
- goto out;
-
- memset(props, 0, sizeof *props);
-
- init_query_mad(in_mad);
- in_mad->attr_id = IB_SMP_ATTR_PORT_INFO;
- in_mad->attr_mod = cpu_to_be32(port);
+ struct mlx4_dev *dev = to_mdev(device)->dev;
- err = mlx4_MAD_IFC(to_mdev(ibdev), 1, 1, port, NULL, NULL, in_mad, out_mad);
- if (err)
- goto out;
+ return dev->caps.port_mask & (1 << (port_num - 1)) ?
+ IB_LINK_LAYER_INFINIBAND : IB_LINK_LAYER_ETHERNET;
+}
+static int ib_link_query_port(struct ib_device *ibdev, u8 port,
+ struct ib_port_attr *props,
+ struct ib_smp *out_mad)
+{
props->lid = be16_to_cpup((__be16 *) (out_mad->data + 16));
props->lmc = out_mad->data[34] & 0x7;
props->sm_lid = be16_to_cpup((__be16 *) (out_mad->data + 18));
@@ -196,6 +203,80 @@ static int mlx4_ib_query_port(struct ib_device *ibdev, u8 port,
props->max_vl_num = out_mad->data[37] >> 4;
props->init_type_reply = out_mad->data[41] >> 4;
+ return 0;
+}
+
+static u8 state_to_phys_state(enum ib_port_state state)
+{
+ return state == IB_PORT_ACTIVE ? 5 : 3;
+}
+
+static int eth_link_query_port(struct ib_device *ibdev, u8 port,
+ struct ib_port_attr *props,
+ struct ib_smp *out_mad)
+{
+ struct mlx4_ib_iboe *iboe = &to_mdev(ibdev)->iboe;
+ struct net_device *ndev;
+ enum ib_mtu tmp;
+
+ props->active_width = IB_WIDTH_1X;
+ props->active_speed = 4;
+ props->port_cap_flags = IB_PORT_CM_SUP;
+ props->gid_tbl_len = to_mdev(ibdev)->dev->caps.gid_table_len[port];
+ props->max_msg_sz = to_mdev(ibdev)->dev->caps.max_msg_sz;
+ props->pkey_tbl_len = 1;
+ props->bad_pkey_cntr = be16_to_cpup((__be16 *) (out_mad->data + 46));
+ props->qkey_viol_cntr = be16_to_cpup((__be16 *) (out_mad->data + 48));
+ props->max_mtu = IB_MTU_2048;
+ props->subnet_timeout = 0;
+ props->max_vl_num = out_mad->data[37] >> 4;
+ props->init_type_reply = 0;
+ props->state = IB_PORT_DOWN;
+ props->phys_state = state_to_phys_state(props->state);
+ props->active_mtu = IB_MTU_256;
+ spin_lock(&iboe->lock);
+ ndev = iboe->netdevs[port - 1];
+ if (!ndev)
+ goto out;
+
+ tmp = iboe_get_mtu(ndev->mtu);
+ props->active_mtu = tmp ? min(props->max_mtu, tmp) : IB_MTU_256;
+
+ props->state = (netif_running(ndev) && netif_carrier_ok(ndev)) ?
+ IB_PORT_ACTIVE : IB_PORT_DOWN;
+ props->phys_state = state_to_phys_state(props->state);
+
+out:
+ spin_unlock(&iboe->lock);
+ return 0;
+}
+
+static int mlx4_ib_query_port(struct ib_device *ibdev, u8 port,
+ struct ib_port_attr *props)
+{
+ struct ib_smp *in_mad = NULL;
+ struct ib_smp *out_mad = NULL;
+ int err = -ENOMEM;
+
+ in_mad = kzalloc(sizeof *in_mad, GFP_KERNEL);
+ out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL);
+ if (!in_mad || !out_mad)
+ goto out;
+
+ memset(props, 0, sizeof *props);
+
+ init_query_mad(in_mad);
+ in_mad->attr_id = IB_SMP_ATTR_PORT_INFO;
+ in_mad->attr_mod = cpu_to_be32(port);
+
+ err = mlx4_MAD_IFC(to_mdev(ibdev), 1, 1, port, NULL, NULL, in_mad, out_mad);
+ if (err)
+ goto out;
+
+ err = mlx4_ib_port_link_layer(ibdev, port) == IB_LINK_LAYER_INFINIBAND ?
+ ib_link_query_port(ibdev, port, props, out_mad) :
+ eth_link_query_port(ibdev, port, props, out_mad);
+
out:
kfree(in_mad);
kfree(out_mad);
@@ -203,8 +284,8 @@ out:
return err;
}
-static int mlx4_ib_query_gid(struct ib_device *ibdev, u8 port, int index,
- union ib_gid *gid)
+static int __mlx4_ib_query_gid(struct ib_device *ibdev, u8 port, int index,
+ union ib_gid *gid)
{
struct ib_smp *in_mad = NULL;
struct ib_smp *out_mad = NULL;
@@ -241,6 +322,25 @@ out:
return err;
}
+static int iboe_query_gid(struct ib_device *ibdev, u8 port, int index,
+ union ib_gid *gid)
+{
+ struct mlx4_ib_dev *dev = to_mdev(ibdev);
+
+ *gid = dev->iboe.gid_table[port - 1][index];
+
+ return 0;
+}
+
+static int mlx4_ib_query_gid(struct ib_device *ibdev, u8 port, int index,
+ union ib_gid *gid)
+{
+ if (rdma_port_get_link_layer(ibdev, port) == IB_LINK_LAYER_INFINIBAND)
+ return __mlx4_ib_query_gid(ibdev, port, index, gid);
+ else
+ return iboe_query_gid(ibdev, port, index, gid);
+}
+
static int mlx4_ib_query_pkey(struct ib_device *ibdev, u8 port, u16 index,
u16 *pkey)
{
@@ -272,14 +372,32 @@ out:
static int mlx4_ib_modify_device(struct ib_device *ibdev, int mask,
struct ib_device_modify *props)
{
+ struct mlx4_cmd_mailbox *mailbox;
+
if (mask & ~IB_DEVICE_MODIFY_NODE_DESC)
return -EOPNOTSUPP;
- if (mask & IB_DEVICE_MODIFY_NODE_DESC) {
- spin_lock(&to_mdev(ibdev)->sm_lock);
- memcpy(ibdev->node_desc, props->node_desc, 64);
- spin_unlock(&to_mdev(ibdev)->sm_lock);
- }
+ if (!(mask & IB_DEVICE_MODIFY_NODE_DESC))
+ return 0;
+
+ spin_lock(&to_mdev(ibdev)->sm_lock);
+ memcpy(ibdev->node_desc, props->node_desc, 64);
+ spin_unlock(&to_mdev(ibdev)->sm_lock);
+
+ /*
+ * If possible, pass node desc to FW, so it can generate
+ * a 144 trap. If cmd fails, just ignore.
+ */
+ mailbox = mlx4_alloc_cmd_mailbox(to_mdev(ibdev)->dev);
+ if (IS_ERR(mailbox))
+ return 0;
+
+ memset(mailbox->buf, 0, 256);
+ memcpy(mailbox->buf, props->node_desc, 64);
+ mlx4_cmd(to_mdev(ibdev)->dev, mailbox->dma, 1, 0,
+ MLX4_CMD_SET_NODE, MLX4_CMD_TIME_CLASS_A);
+
+ mlx4_free_cmd_mailbox(to_mdev(ibdev)->dev, mailbox);
return 0;
}
@@ -289,6 +407,7 @@ static int mlx4_SET_PORT(struct mlx4_ib_dev *dev, u8 port, int reset_qkey_viols,
{
struct mlx4_cmd_mailbox *mailbox;
int err;
+ u8 is_eth = dev->dev->caps.port_type[port] == MLX4_PORT_TYPE_ETH;
mailbox = mlx4_alloc_cmd_mailbox(dev->dev);
if (IS_ERR(mailbox))
@@ -304,7 +423,7 @@ static int mlx4_SET_PORT(struct mlx4_ib_dev *dev, u8 port, int reset_qkey_viols,
((__be32 *) mailbox->buf)[1] = cpu_to_be32(cap_mask);
}
- err = mlx4_cmd(dev->dev, mailbox->dma, port, 0, MLX4_CMD_SET_PORT,
+ err = mlx4_cmd(dev->dev, mailbox->dma, port, is_eth, MLX4_CMD_SET_PORT,
MLX4_CMD_TIME_CLASS_B);
mlx4_free_cmd_mailbox(dev->dev, mailbox);
@@ -447,18 +566,132 @@ static int mlx4_ib_dealloc_pd(struct ib_pd *pd)
return 0;
}
+static int add_gid_entry(struct ib_qp *ibqp, union ib_gid *gid)
+{
+ struct mlx4_ib_qp *mqp = to_mqp(ibqp);
+ struct mlx4_ib_dev *mdev = to_mdev(ibqp->device);
+ struct mlx4_ib_gid_entry *ge;
+
+ ge = kzalloc(sizeof *ge, GFP_KERNEL);
+ if (!ge)
+ return -ENOMEM;
+
+ ge->gid = *gid;
+ if (mlx4_ib_add_mc(mdev, mqp, gid)) {
+ ge->port = mqp->port;
+ ge->added = 1;
+ }
+
+ mutex_lock(&mqp->mutex);
+ list_add_tail(&ge->list, &mqp->gid_list);
+ mutex_unlock(&mqp->mutex);
+
+ return 0;
+}
+
+int mlx4_ib_add_mc(struct mlx4_ib_dev *mdev, struct mlx4_ib_qp *mqp,
+ union ib_gid *gid)
+{
+ u8 mac[6];
+ struct net_device *ndev;
+ int ret = 0;
+
+ if (!mqp->port)
+ return 0;
+
+ spin_lock(&mdev->iboe.lock);
+ ndev = mdev->iboe.netdevs[mqp->port - 1];
+ if (ndev)
+ dev_hold(ndev);
+ spin_unlock(&mdev->iboe.lock);
+
+ if (ndev) {
+ rdma_get_mcast_mac((struct in6_addr *)gid, mac);
+ rtnl_lock();
+ dev_mc_add(mdev->iboe.netdevs[mqp->port - 1], mac);
+ ret = 1;
+ rtnl_unlock();
+ dev_put(ndev);
+ }
+
+ return ret;
+}
+
static int mlx4_ib_mcg_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
{
- return mlx4_multicast_attach(to_mdev(ibqp->device)->dev,
- &to_mqp(ibqp)->mqp, gid->raw,
- !!(to_mqp(ibqp)->flags &
- MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK));
+ int err;
+ struct mlx4_ib_dev *mdev = to_mdev(ibqp->device);
+ struct mlx4_ib_qp *mqp = to_mqp(ibqp);
+
+ err = mlx4_multicast_attach(mdev->dev, &mqp->mqp, gid->raw, !!(mqp->flags &
+ MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK));
+ if (err)
+ return err;
+
+ err = add_gid_entry(ibqp, gid);
+ if (err)
+ goto err_add;
+
+ return 0;
+
+err_add:
+ mlx4_multicast_detach(mdev->dev, &mqp->mqp, gid->raw);
+ return err;
+}
+
+static struct mlx4_ib_gid_entry *find_gid_entry(struct mlx4_ib_qp *qp, u8 *raw)
+{
+ struct mlx4_ib_gid_entry *ge;
+ struct mlx4_ib_gid_entry *tmp;
+ struct mlx4_ib_gid_entry *ret = NULL;
+
+ list_for_each_entry_safe(ge, tmp, &qp->gid_list, list) {
+ if (!memcmp(raw, ge->gid.raw, 16)) {
+ ret = ge;
+ break;
+ }
+ }
+
+ return ret;
}
static int mlx4_ib_mcg_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
{
- return mlx4_multicast_detach(to_mdev(ibqp->device)->dev,
- &to_mqp(ibqp)->mqp, gid->raw);
+ int err;
+ struct mlx4_ib_dev *mdev = to_mdev(ibqp->device);
+ struct mlx4_ib_qp *mqp = to_mqp(ibqp);
+ u8 mac[6];
+ struct net_device *ndev;
+ struct mlx4_ib_gid_entry *ge;
+
+ err = mlx4_multicast_detach(mdev->dev,
+ &mqp->mqp, gid->raw);
+ if (err)
+ return err;
+
+ mutex_lock(&mqp->mutex);
+ ge = find_gid_entry(mqp, gid->raw);
+ if (ge) {
+ spin_lock(&mdev->iboe.lock);
+ ndev = ge->added ? mdev->iboe.netdevs[ge->port - 1] : NULL;
+ if (ndev)
+ dev_hold(ndev);
+ spin_unlock(&mdev->iboe.lock);
+ rdma_get_mcast_mac((struct in6_addr *)gid, mac);
+ if (ndev) {
+ rtnl_lock();
+ dev_mc_del(mdev->iboe.netdevs[ge->port - 1], mac);
+ rtnl_unlock();
+ dev_put(ndev);
+ }
+ list_del(&ge->list);
+ kfree(ge);
+ } else
+ printk(KERN_WARNING "could not find mgid entry\n");
+
+ mutex_unlock(&mqp->mutex);
+
+ return 0;
}
static int init_node_data(struct mlx4_ib_dev *dev)
@@ -543,15 +776,215 @@ static struct device_attribute *mlx4_class_attributes[] = {
&dev_attr_board_id
};
+static void mlx4_addrconf_ifid_eui48(u8 *eui, u16 vlan_id, struct net_device *dev)
+{
+ memcpy(eui, dev->dev_addr, 3);
+ memcpy(eui + 5, dev->dev_addr + 3, 3);
+ if (vlan_id < 0x1000) {
+ eui[3] = vlan_id >> 8;
+ eui[4] = vlan_id & 0xff;
+ } else {
+ eui[3] = 0xff;
+ eui[4] = 0xfe;
+ }
+ eui[0] ^= 2;
+}
+
+static void update_gids_task(struct work_struct *work)
+{
+ struct update_gid_work *gw = container_of(work, struct update_gid_work, work);
+ struct mlx4_cmd_mailbox *mailbox;
+ union ib_gid *gids;
+ int err;
+ struct mlx4_dev *dev = gw->dev->dev;
+ struct ib_event event;
+
+ mailbox = mlx4_alloc_cmd_mailbox(dev);
+ if (IS_ERR(mailbox)) {
+ printk(KERN_WARNING "update gid table failed %ld\n", PTR_ERR(mailbox));
+ return;
+ }
+
+ gids = mailbox->buf;
+ memcpy(gids, gw->gids, sizeof gw->gids);
+
+ err = mlx4_cmd(dev, mailbox->dma, MLX4_SET_PORT_GID_TABLE << 8 | gw->port,
+ 1, MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B);
+ if (err)
+ printk(KERN_WARNING "set port command failed\n");
+ else {
+ memcpy(gw->dev->iboe.gid_table[gw->port - 1], gw->gids, sizeof gw->gids);
+ event.device = &gw->dev->ib_dev;
+ event.element.port_num = gw->port;
+ event.event = IB_EVENT_LID_CHANGE;
+ ib_dispatch_event(&event);
+ }
+
+ mlx4_free_cmd_mailbox(dev, mailbox);
+ kfree(gw);
+}
+
+static int update_ipv6_gids(struct mlx4_ib_dev *dev, int port, int clear)
+{
+ struct net_device *ndev = dev->iboe.netdevs[port - 1];
+ struct update_gid_work *work;
+ struct net_device *tmp;
+ int i;
+ u8 *hits;
+ int ret;
+ union ib_gid gid;
+ int free;
+ int found;
+ int need_update = 0;
+ u16 vid;
+
+ work = kzalloc(sizeof *work, GFP_ATOMIC);
+ if (!work)
+ return -ENOMEM;
+
+ hits = kzalloc(128, GFP_ATOMIC);
+ if (!hits) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ read_lock(&dev_base_lock);
+ for_each_netdev(&init_net, tmp) {
+ if (ndev && (tmp == ndev || rdma_vlan_dev_real_dev(tmp) == ndev)) {
+ gid.global.subnet_prefix = cpu_to_be64(0xfe80000000000000LL);
+ vid = rdma_vlan_dev_vlan_id(tmp);
+ mlx4_addrconf_ifid_eui48(&gid.raw[8], vid, ndev);
+ found = 0;
+ free = -1;
+ for (i = 0; i < 128; ++i) {
+ if (free < 0 &&
+ !memcmp(&dev->iboe.gid_table[port - 1][i], &zgid, sizeof zgid))
+ free = i;
+ if (!memcmp(&dev->iboe.gid_table[port - 1][i], &gid, sizeof gid)) {
+ hits[i] = 1;
+ found = 1;
+ break;
+ }
+ }
+
+ if (!found) {
+ if (tmp == ndev &&
+ (memcmp(&dev->iboe.gid_table[port - 1][0],
+ &gid, sizeof gid) ||
+ !memcmp(&dev->iboe.gid_table[port - 1][0],
+ &zgid, sizeof gid))) {
+ dev->iboe.gid_table[port - 1][0] = gid;
+ ++need_update;
+ hits[0] = 1;
+ } else if (free >= 0) {
+ dev->iboe.gid_table[port - 1][free] = gid;
+ hits[free] = 1;
+ ++need_update;
+ }
+ }
+ }
+ }
+ read_unlock(&dev_base_lock);
+
+ for (i = 0; i < 128; ++i)
+ if (!hits[i]) {
+ if (memcmp(&dev->iboe.gid_table[port - 1][i], &zgid, sizeof zgid))
+ ++need_update;
+ dev->iboe.gid_table[port - 1][i] = zgid;
+ }
+
+ if (need_update) {
+ memcpy(work->gids, dev->iboe.gid_table[port - 1], sizeof work->gids);
+ INIT_WORK(&work->work, update_gids_task);
+ work->port = port;
+ work->dev = dev;
+ queue_work(wq, &work->work);
+ } else
+ kfree(work);
+
+ kfree(hits);
+ return 0;
+
+out:
+ kfree(work);
+ return ret;
+}
+
+static void handle_en_event(struct mlx4_ib_dev *dev, int port, unsigned long event)
+{
+ switch (event) {
+ case NETDEV_UP:
+ case NETDEV_CHANGEADDR:
+ update_ipv6_gids(dev, port, 0);
+ break;
+
+ case NETDEV_DOWN:
+ update_ipv6_gids(dev, port, 1);
+ dev->iboe.netdevs[port - 1] = NULL;
+ }
+}
+
+static void netdev_added(struct mlx4_ib_dev *dev, int port)
+{
+ update_ipv6_gids(dev, port, 0);
+}
+
+static void netdev_removed(struct mlx4_ib_dev *dev, int port)
+{
+ update_ipv6_gids(dev, port, 1);
+}
+
+static int mlx4_ib_netdev_event(struct notifier_block *this, unsigned long event,
+ void *ptr)
+{
+ struct net_device *dev = ptr;
+ struct mlx4_ib_dev *ibdev;
+ struct net_device *oldnd;
+ struct mlx4_ib_iboe *iboe;
+ int port;
+
+ if (!net_eq(dev_net(dev), &init_net))
+ return NOTIFY_DONE;
+
+ ibdev = container_of(this, struct mlx4_ib_dev, iboe.nb);
+ iboe = &ibdev->iboe;
+
+ spin_lock(&iboe->lock);
+ mlx4_foreach_ib_transport_port(port, ibdev->dev) {
+ oldnd = iboe->netdevs[port - 1];
+ iboe->netdevs[port - 1] =
+ mlx4_get_protocol_dev(ibdev->dev, MLX4_PROTOCOL_EN, port);
+ if (oldnd != iboe->netdevs[port - 1]) {
+ if (iboe->netdevs[port - 1])
+ netdev_added(ibdev, port);
+ else
+ netdev_removed(ibdev, port);
+ }
+ }
+
+ if (dev == iboe->netdevs[0] ||
+ (iboe->netdevs[0] && rdma_vlan_dev_real_dev(dev) == iboe->netdevs[0]))
+ handle_en_event(ibdev, 1, event);
+ else if (dev == iboe->netdevs[1]
+ || (iboe->netdevs[1] && rdma_vlan_dev_real_dev(dev) == iboe->netdevs[1]))
+ handle_en_event(ibdev, 2, event);
+
+ spin_unlock(&iboe->lock);
+
+ return NOTIFY_DONE;
+}
+
static void *mlx4_ib_add(struct mlx4_dev *dev)
{
struct mlx4_ib_dev *ibdev;
int num_ports = 0;
int i;
+ int err;
+ struct mlx4_ib_iboe *iboe;
printk_once(KERN_INFO "%s", mlx4_ib_version);
- mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_IB)
+ mlx4_foreach_ib_transport_port(i, dev)
num_ports++;
/* No point in registering a device with no ports... */
@@ -564,6 +997,8 @@ static void *mlx4_ib_add(struct mlx4_dev *dev)
return NULL;
}
+ iboe = &ibdev->iboe;
+
if (mlx4_pd_alloc(dev, &ibdev->priv_pdn))
goto err_dealloc;
@@ -612,6 +1047,7 @@ static void *mlx4_ib_add(struct mlx4_dev *dev)
ibdev->ib_dev.query_device = mlx4_ib_query_device;
ibdev->ib_dev.query_port = mlx4_ib_query_port;
+ ibdev->ib_dev.get_link_layer = mlx4_ib_port_link_layer;
ibdev->ib_dev.query_gid = mlx4_ib_query_gid;
ibdev->ib_dev.query_pkey = mlx4_ib_query_pkey;
ibdev->ib_dev.modify_device = mlx4_ib_modify_device;
@@ -656,6 +1092,8 @@ static void *mlx4_ib_add(struct mlx4_dev *dev)
ibdev->ib_dev.unmap_fmr = mlx4_ib_unmap_fmr;
ibdev->ib_dev.dealloc_fmr = mlx4_ib_fmr_dealloc;
+ spin_lock_init(&iboe->lock);
+
if (init_node_data(ibdev))
goto err_map;
@@ -668,16 +1106,28 @@ static void *mlx4_ib_add(struct mlx4_dev *dev)
if (mlx4_ib_mad_init(ibdev))
goto err_reg;
+ if (dev->caps.flags & MLX4_DEV_CAP_FLAG_IBOE && !iboe->nb.notifier_call) {
+ iboe->nb.notifier_call = mlx4_ib_netdev_event;
+ err = register_netdevice_notifier(&iboe->nb);
+ if (err)
+ goto err_reg;
+ }
+
for (i = 0; i < ARRAY_SIZE(mlx4_class_attributes); ++i) {
if (device_create_file(&ibdev->ib_dev.dev,
mlx4_class_attributes[i]))
- goto err_reg;
+ goto err_notif;
}
ibdev->ib_active = true;
return ibdev;
+err_notif:
+ if (unregister_netdevice_notifier(&ibdev->iboe.nb))
+ printk(KERN_WARNING "failure unregistering notifier\n");
+ flush_workqueue(wq);
+
err_reg:
ib_unregister_device(&ibdev->ib_dev);
@@ -703,11 +1153,16 @@ static void mlx4_ib_remove(struct mlx4_dev *dev, void *ibdev_ptr)
mlx4_ib_mad_cleanup(ibdev);
ib_unregister_device(&ibdev->ib_dev);
+ if (ibdev->iboe.nb.notifier_call) {
+ if (unregister_netdevice_notifier(&ibdev->iboe.nb))
+ printk(KERN_WARNING "failure unregistering notifier\n");
+ ibdev->iboe.nb.notifier_call = NULL;
+ }
+ iounmap(ibdev->uar_map);
- for (p = 1; p <= ibdev->num_ports; ++p)
+ mlx4_foreach_port(p, dev, MLX4_PORT_TYPE_IB)
mlx4_CLOSE_PORT(dev, p);
- iounmap(ibdev->uar_map);
mlx4_uar_free(dev, &ibdev->priv_uar);
mlx4_pd_free(dev, ibdev->priv_pdn);
ib_dealloc_device(&ibdev->ib_dev);
@@ -747,19 +1202,33 @@ static void mlx4_ib_event(struct mlx4_dev *dev, void *ibdev_ptr,
}
static struct mlx4_interface mlx4_ib_interface = {
- .add = mlx4_ib_add,
- .remove = mlx4_ib_remove,
- .event = mlx4_ib_event
+ .add = mlx4_ib_add,
+ .remove = mlx4_ib_remove,
+ .event = mlx4_ib_event,
+ .protocol = MLX4_PROTOCOL_IB
};
static int __init mlx4_ib_init(void)
{
- return mlx4_register_interface(&mlx4_ib_interface);
+ int err;
+
+ wq = create_singlethread_workqueue("mlx4_ib");
+ if (!wq)
+ return -ENOMEM;
+
+ err = mlx4_register_interface(&mlx4_ib_interface);
+ if (err) {
+ destroy_workqueue(wq);
+ return err;
+ }
+
+ return 0;
}
static void __exit mlx4_ib_cleanup(void)
{
mlx4_unregister_interface(&mlx4_ib_interface);
+ destroy_workqueue(wq);
}
module_init(mlx4_ib_init);
diff --git a/drivers/infiniband/hw/mlx4/mlx4_ib.h b/drivers/infiniband/hw/mlx4/mlx4_ib.h
index 3486d7675e56..2a322f21049f 100644
--- a/drivers/infiniband/hw/mlx4/mlx4_ib.h
+++ b/drivers/infiniband/hw/mlx4/mlx4_ib.h
@@ -112,6 +112,13 @@ enum mlx4_ib_qp_flags {
MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK = 1 << 1,
};
+struct mlx4_ib_gid_entry {
+ struct list_head list;
+ union ib_gid gid;
+ int added;
+ u8 port;
+};
+
struct mlx4_ib_qp {
struct ib_qp ibqp;
struct mlx4_qp mqp;
@@ -138,6 +145,8 @@ struct mlx4_ib_qp {
u8 resp_depth;
u8 sq_no_prefetch;
u8 state;
+ int mlx_type;
+ struct list_head gid_list;
};
struct mlx4_ib_srq {
@@ -157,7 +166,14 @@ struct mlx4_ib_srq {
struct mlx4_ib_ah {
struct ib_ah ibah;
- struct mlx4_av av;
+ union mlx4_ext_av av;
+};
+
+struct mlx4_ib_iboe {
+ spinlock_t lock;
+ struct net_device *netdevs[MLX4_MAX_PORTS];
+ struct notifier_block nb;
+ union ib_gid gid_table[MLX4_MAX_PORTS][128];
};
struct mlx4_ib_dev {
@@ -176,6 +192,7 @@ struct mlx4_ib_dev {
struct mutex cap_mask_mutex;
bool ib_active;
+ struct mlx4_ib_iboe iboe;
};
static inline struct mlx4_ib_dev *to_mdev(struct ib_device *ibdev)
@@ -314,9 +331,20 @@ int mlx4_ib_map_phys_fmr(struct ib_fmr *ibfmr, u64 *page_list, int npages,
int mlx4_ib_unmap_fmr(struct list_head *fmr_list);
int mlx4_ib_fmr_dealloc(struct ib_fmr *fmr);
+int mlx4_ib_resolve_grh(struct mlx4_ib_dev *dev, const struct ib_ah_attr *ah_attr,
+ u8 *mac, int *is_mcast, u8 port);
+
static inline int mlx4_ib_ah_grh_present(struct mlx4_ib_ah *ah)
{
- return !!(ah->av.g_slid & 0x80);
+ u8 port = be32_to_cpu(ah->av.ib.port_pd) >> 24 & 3;
+
+ if (rdma_port_get_link_layer(ah->ibah.device, port) == IB_LINK_LAYER_ETHERNET)
+ return 1;
+
+ return !!(ah->av.ib.g_slid & 0x80);
}
+int mlx4_ib_add_mc(struct mlx4_ib_dev *mdev, struct mlx4_ib_qp *mqp,
+ union ib_gid *gid);
+
#endif /* MLX4_IB_H */
diff --git a/drivers/infiniband/hw/mlx4/mr.c b/drivers/infiniband/hw/mlx4/mr.c
index 1d27b9a8e2d6..dca55b19a6f1 100644
--- a/drivers/infiniband/hw/mlx4/mr.c
+++ b/drivers/infiniband/hw/mlx4/mr.c
@@ -226,7 +226,7 @@ struct ib_fast_reg_page_list *mlx4_ib_alloc_fast_reg_page_list(struct ib_device
struct mlx4_ib_fast_reg_page_list *mfrpl;
int size = page_list_len * sizeof (u64);
- if (size > PAGE_SIZE)
+ if (page_list_len > MLX4_MAX_FAST_REG_PAGES)
return ERR_PTR(-EINVAL);
mfrpl = kmalloc(sizeof *mfrpl, GFP_KERNEL);
diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c
index 6a60827b2301..2001f20a4361 100644
--- a/drivers/infiniband/hw/mlx4/qp.c
+++ b/drivers/infiniband/hw/mlx4/qp.c
@@ -33,9 +33,11 @@
#include <linux/log2.h>
#include <linux/slab.h>
+#include <linux/netdevice.h>
#include <rdma/ib_cache.h>
#include <rdma/ib_pack.h>
+#include <rdma/ib_addr.h>
#include <linux/mlx4/qp.h>
@@ -48,17 +50,26 @@ enum {
enum {
MLX4_IB_DEFAULT_SCHED_QUEUE = 0x83,
- MLX4_IB_DEFAULT_QP0_SCHED_QUEUE = 0x3f
+ MLX4_IB_DEFAULT_QP0_SCHED_QUEUE = 0x3f,
+ MLX4_IB_LINK_TYPE_IB = 0,
+ MLX4_IB_LINK_TYPE_ETH = 1
};
enum {
/*
- * Largest possible UD header: send with GRH and immediate data.
+ * Largest possible UD header: send with GRH and immediate
+ * data plus 18 bytes for an Ethernet header with VLAN/802.1Q
+ * tag. (LRH would only use 8 bytes, so Ethernet is the
+ * biggest case)
*/
- MLX4_IB_UD_HEADER_SIZE = 72,
+ MLX4_IB_UD_HEADER_SIZE = 82,
MLX4_IB_LSO_HEADER_SPARE = 128,
};
+enum {
+ MLX4_IB_IBOE_ETHERTYPE = 0x8915
+};
+
struct mlx4_ib_sqp {
struct mlx4_ib_qp qp;
int pkey_index;
@@ -462,6 +473,7 @@ static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd,
mutex_init(&qp->mutex);
spin_lock_init(&qp->sq.lock);
spin_lock_init(&qp->rq.lock);
+ INIT_LIST_HEAD(&qp->gid_list);
qp->state = IB_QPS_RESET;
if (init_attr->sq_sig_type == IB_SIGNAL_ALL_WR)
@@ -649,6 +661,16 @@ static void mlx4_ib_unlock_cqs(struct mlx4_ib_cq *send_cq, struct mlx4_ib_cq *re
}
}
+static void del_gid_entries(struct mlx4_ib_qp *qp)
+{
+ struct mlx4_ib_gid_entry *ge, *tmp;
+
+ list_for_each_entry_safe(ge, tmp, &qp->gid_list, list) {
+ list_del(&ge->list);
+ kfree(ge);
+ }
+}
+
static void destroy_qp_common(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp,
int is_user)
{
@@ -695,6 +717,8 @@ static void destroy_qp_common(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp,
if (!qp->ibqp.srq)
mlx4_db_free(dev->dev, &qp->db);
}
+
+ del_gid_entries(qp);
}
struct ib_qp *mlx4_ib_create_qp(struct ib_pd *pd,
@@ -852,6 +876,14 @@ static void mlx4_set_sched(struct mlx4_qp_path *path, u8 port)
static int mlx4_set_path(struct mlx4_ib_dev *dev, const struct ib_ah_attr *ah,
struct mlx4_qp_path *path, u8 port)
{
+ int err;
+ int is_eth = rdma_port_get_link_layer(&dev->ib_dev, port) ==
+ IB_LINK_LAYER_ETHERNET;
+ u8 mac[6];
+ int is_mcast;
+ u16 vlan_tag;
+ int vidx;
+
path->grh_mylmc = ah->src_path_bits & 0x7f;
path->rlid = cpu_to_be16(ah->dlid);
if (ah->static_rate) {
@@ -879,12 +911,49 @@ static int mlx4_set_path(struct mlx4_ib_dev *dev, const struct ib_ah_attr *ah,
memcpy(path->rgid, ah->grh.dgid.raw, 16);
}
- path->sched_queue = MLX4_IB_DEFAULT_SCHED_QUEUE |
- ((port - 1) << 6) | ((ah->sl & 0xf) << 2);
+ if (is_eth) {
+ path->sched_queue = MLX4_IB_DEFAULT_SCHED_QUEUE |
+ ((port - 1) << 6) | ((ah->sl & 7) << 3) | ((ah->sl & 8) >> 1);
+
+ if (!(ah->ah_flags & IB_AH_GRH))
+ return -1;
+
+ err = mlx4_ib_resolve_grh(dev, ah, mac, &is_mcast, port);
+ if (err)
+ return err;
+
+ memcpy(path->dmac, mac, 6);
+ path->ackto = MLX4_IB_LINK_TYPE_ETH;
+ /* use index 0 into MAC table for IBoE */
+ path->grh_mylmc &= 0x80;
+
+ vlan_tag = rdma_get_vlan_id(&dev->iboe.gid_table[port - 1][ah->grh.sgid_index]);
+ if (vlan_tag < 0x1000) {
+ if (mlx4_find_cached_vlan(dev->dev, port, vlan_tag, &vidx))
+ return -ENOENT;
+
+ path->vlan_index = vidx;
+ path->fl = 1 << 6;
+ }
+ } else
+ path->sched_queue = MLX4_IB_DEFAULT_SCHED_QUEUE |
+ ((port - 1) << 6) | ((ah->sl & 0xf) << 2);
return 0;
}
+static void update_mcg_macs(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp)
+{
+ struct mlx4_ib_gid_entry *ge, *tmp;
+
+ list_for_each_entry_safe(ge, tmp, &qp->gid_list, list) {
+ if (!ge->added && mlx4_ib_add_mc(dev, qp, &ge->gid)) {
+ ge->added = 1;
+ ge->port = qp->port;
+ }
+ }
+}
+
static int __mlx4_ib_modify_qp(struct ib_qp *ibqp,
const struct ib_qp_attr *attr, int attr_mask,
enum ib_qp_state cur_state, enum ib_qp_state new_state)
@@ -980,7 +1049,7 @@ static int __mlx4_ib_modify_qp(struct ib_qp *ibqp,
}
if (attr_mask & IB_QP_TIMEOUT) {
- context->pri_path.ackto = attr->timeout << 3;
+ context->pri_path.ackto |= attr->timeout << 3;
optpar |= MLX4_QP_OPTPAR_ACK_TIMEOUT;
}
@@ -1118,8 +1187,10 @@ static int __mlx4_ib_modify_qp(struct ib_qp *ibqp,
qp->atomic_rd_en = attr->qp_access_flags;
if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC)
qp->resp_depth = attr->max_dest_rd_atomic;
- if (attr_mask & IB_QP_PORT)
+ if (attr_mask & IB_QP_PORT) {
qp->port = attr->port_num;
+ update_mcg_macs(dev, qp);
+ }
if (attr_mask & IB_QP_ALT_PATH)
qp->alt_port = attr->alt_port_num;
@@ -1221,40 +1292,59 @@ static int build_mlx_header(struct mlx4_ib_sqp *sqp, struct ib_send_wr *wr,
struct mlx4_wqe_mlx_seg *mlx = wqe;
struct mlx4_wqe_inline_seg *inl = wqe + sizeof *mlx;
struct mlx4_ib_ah *ah = to_mah(wr->wr.ud.ah);
+ union ib_gid sgid;
u16 pkey;
int send_size;
int header_size;
int spc;
int i;
+ int is_eth;
+ int is_vlan = 0;
+ int is_grh;
+ u16 vlan;
send_size = 0;
for (i = 0; i < wr->num_sge; ++i)
send_size += wr->sg_list[i].length;
- ib_ud_header_init(send_size, mlx4_ib_ah_grh_present(ah), 0, &sqp->ud_header);
+ is_eth = rdma_port_get_link_layer(sqp->qp.ibqp.device, sqp->qp.port) == IB_LINK_LAYER_ETHERNET;
+ is_grh = mlx4_ib_ah_grh_present(ah);
+ if (is_eth) {
+ ib_get_cached_gid(ib_dev, be32_to_cpu(ah->av.ib.port_pd) >> 24,
+ ah->av.ib.gid_index, &sgid);
+ vlan = rdma_get_vlan_id(&sgid);
+ is_vlan = vlan < 0x1000;
+ }
+ ib_ud_header_init(send_size, !is_eth, is_eth, is_vlan, is_grh, 0, &sqp->ud_header);
+
+ if (!is_eth) {
+ sqp->ud_header.lrh.service_level =
+ be32_to_cpu(ah->av.ib.sl_tclass_flowlabel) >> 28;
+ sqp->ud_header.lrh.destination_lid = ah->av.ib.dlid;
+ sqp->ud_header.lrh.source_lid = cpu_to_be16(ah->av.ib.g_slid & 0x7f);
+ }
- sqp->ud_header.lrh.service_level =
- be32_to_cpu(ah->av.sl_tclass_flowlabel) >> 28;
- sqp->ud_header.lrh.destination_lid = ah->av.dlid;
- sqp->ud_header.lrh.source_lid = cpu_to_be16(ah->av.g_slid & 0x7f);
- if (mlx4_ib_ah_grh_present(ah)) {
+ if (is_grh) {
sqp->ud_header.grh.traffic_class =
- (be32_to_cpu(ah->av.sl_tclass_flowlabel) >> 20) & 0xff;
+ (be32_to_cpu(ah->av.ib.sl_tclass_flowlabel) >> 20) & 0xff;
sqp->ud_header.grh.flow_label =
- ah->av.sl_tclass_flowlabel & cpu_to_be32(0xfffff);
- sqp->ud_header.grh.hop_limit = ah->av.hop_limit;
- ib_get_cached_gid(ib_dev, be32_to_cpu(ah->av.port_pd) >> 24,
- ah->av.gid_index, &sqp->ud_header.grh.source_gid);
+ ah->av.ib.sl_tclass_flowlabel & cpu_to_be32(0xfffff);
+ sqp->ud_header.grh.hop_limit = ah->av.ib.hop_limit;
+ ib_get_cached_gid(ib_dev, be32_to_cpu(ah->av.ib.port_pd) >> 24,
+ ah->av.ib.gid_index, &sqp->ud_header.grh.source_gid);
memcpy(sqp->ud_header.grh.destination_gid.raw,
- ah->av.dgid, 16);
+ ah->av.ib.dgid, 16);
}
mlx->flags &= cpu_to_be32(MLX4_WQE_CTRL_CQ_UPDATE);
- mlx->flags |= cpu_to_be32((!sqp->qp.ibqp.qp_num ? MLX4_WQE_MLX_VL15 : 0) |
- (sqp->ud_header.lrh.destination_lid ==
- IB_LID_PERMISSIVE ? MLX4_WQE_MLX_SLR : 0) |
- (sqp->ud_header.lrh.service_level << 8));
- mlx->rlid = sqp->ud_header.lrh.destination_lid;
+
+ if (!is_eth) {
+ mlx->flags |= cpu_to_be32((!sqp->qp.ibqp.qp_num ? MLX4_WQE_MLX_VL15 : 0) |
+ (sqp->ud_header.lrh.destination_lid ==
+ IB_LID_PERMISSIVE ? MLX4_WQE_MLX_SLR : 0) |
+ (sqp->ud_header.lrh.service_level << 8));
+ mlx->rlid = sqp->ud_header.lrh.destination_lid;
+ }
switch (wr->opcode) {
case IB_WR_SEND:
@@ -1270,9 +1360,29 @@ static int build_mlx_header(struct mlx4_ib_sqp *sqp, struct ib_send_wr *wr,
return -EINVAL;
}
- sqp->ud_header.lrh.virtual_lane = !sqp->qp.ibqp.qp_num ? 15 : 0;
- if (sqp->ud_header.lrh.destination_lid == IB_LID_PERMISSIVE)
- sqp->ud_header.lrh.source_lid = IB_LID_PERMISSIVE;
+ if (is_eth) {
+ u8 *smac;
+
+ memcpy(sqp->ud_header.eth.dmac_h, ah->av.eth.mac, 6);
+ /* FIXME: cache smac value? */
+ smac = to_mdev(sqp->qp.ibqp.device)->iboe.netdevs[sqp->qp.port - 1]->dev_addr;
+ memcpy(sqp->ud_header.eth.smac_h, smac, 6);
+ if (!memcmp(sqp->ud_header.eth.smac_h, sqp->ud_header.eth.dmac_h, 6))
+ mlx->flags |= cpu_to_be32(MLX4_WQE_CTRL_FORCE_LOOPBACK);
+ if (!is_vlan) {
+ sqp->ud_header.eth.type = cpu_to_be16(MLX4_IB_IBOE_ETHERTYPE);
+ } else {
+ u16 pcp;
+
+ sqp->ud_header.vlan.type = cpu_to_be16(MLX4_IB_IBOE_ETHERTYPE);
+ pcp = (be32_to_cpu(ah->av.ib.sl_tclass_flowlabel) >> 27 & 3) << 13;
+ sqp->ud_header.vlan.tag = cpu_to_be16(vlan | pcp);
+ }
+ } else {
+ sqp->ud_header.lrh.virtual_lane = !sqp->qp.ibqp.qp_num ? 15 : 0;
+ if (sqp->ud_header.lrh.destination_lid == IB_LID_PERMISSIVE)
+ sqp->ud_header.lrh.source_lid = IB_LID_PERMISSIVE;
+ }
sqp->ud_header.bth.solicited_event = !!(wr->send_flags & IB_SEND_SOLICITED);
if (!sqp->qp.ibqp.qp_num)
ib_get_cached_pkey(ib_dev, sqp->qp.port, sqp->pkey_index, &pkey);
@@ -1429,11 +1539,14 @@ static void set_masked_atomic_seg(struct mlx4_wqe_masked_atomic_seg *aseg,
}
static void set_datagram_seg(struct mlx4_wqe_datagram_seg *dseg,
- struct ib_send_wr *wr)
+ struct ib_send_wr *wr, __be16 *vlan)
{
memcpy(dseg->av, &to_mah(wr->wr.ud.ah)->av, sizeof (struct mlx4_av));
dseg->dqpn = cpu_to_be32(wr->wr.ud.remote_qpn);
dseg->qkey = cpu_to_be32(wr->wr.ud.remote_qkey);
+ dseg->vlan = to_mah(wr->wr.ud.ah)->av.eth.vlan;
+ memcpy(dseg->mac, to_mah(wr->wr.ud.ah)->av.eth.mac, 6);
+ *vlan = dseg->vlan;
}
static void set_mlx_icrc_seg(void *dseg)
@@ -1536,6 +1649,7 @@ int mlx4_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
__be32 uninitialized_var(lso_hdr_sz);
__be32 blh;
int i;
+ __be16 vlan = cpu_to_be16(0xffff);
spin_lock_irqsave(&qp->sq.lock, flags);
@@ -1639,7 +1753,7 @@ int mlx4_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
break;
case IB_QPT_UD:
- set_datagram_seg(wqe, wr);
+ set_datagram_seg(wqe, wr, &vlan);
wqe += sizeof (struct mlx4_wqe_datagram_seg);
size += sizeof (struct mlx4_wqe_datagram_seg) / 16;
@@ -1702,6 +1816,11 @@ int mlx4_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
ctrl->fence_size = (wr->send_flags & IB_SEND_FENCE ?
MLX4_WQE_CTRL_FENCE : 0) | size;
+ if (be16_to_cpu(vlan) < 0x1000) {
+ ctrl->ins_vlan = 1 << 6;
+ ctrl->vlan_tag = vlan;
+ }
+
/*
* Make sure descriptor is fully written before
* setting ownership bit (because HW can start
@@ -1866,17 +1985,27 @@ static int to_ib_qp_access_flags(int mlx4_flags)
return ib_flags;
}
-static void to_ib_ah_attr(struct mlx4_dev *dev, struct ib_ah_attr *ib_ah_attr,
+static void to_ib_ah_attr(struct mlx4_ib_dev *ibdev, struct ib_ah_attr *ib_ah_attr,
struct mlx4_qp_path *path)
{
+ struct mlx4_dev *dev = ibdev->dev;
+ int is_eth;
+
memset(ib_ah_attr, 0, sizeof *ib_ah_attr);
ib_ah_attr->port_num = path->sched_queue & 0x40 ? 2 : 1;
if (ib_ah_attr->port_num == 0 || ib_ah_attr->port_num > dev->caps.num_ports)
return;
+ is_eth = rdma_port_get_link_layer(&ibdev->ib_dev, ib_ah_attr->port_num) ==
+ IB_LINK_LAYER_ETHERNET;
+ if (is_eth)
+ ib_ah_attr->sl = ((path->sched_queue >> 3) & 0x7) |
+ ((path->sched_queue & 4) << 1);
+ else
+ ib_ah_attr->sl = (path->sched_queue >> 2) & 0xf;
+
ib_ah_attr->dlid = be16_to_cpu(path->rlid);
- ib_ah_attr->sl = (path->sched_queue >> 2) & 0xf;
ib_ah_attr->src_path_bits = path->grh_mylmc & 0x7f;
ib_ah_attr->static_rate = path->static_rate ? path->static_rate - 5 : 0;
ib_ah_attr->ah_flags = (path->grh_mylmc & (1 << 7)) ? IB_AH_GRH : 0;
@@ -1929,8 +2058,8 @@ int mlx4_ib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, int qp_attr
to_ib_qp_access_flags(be32_to_cpu(context.params2));
if (qp->ibqp.qp_type == IB_QPT_RC || qp->ibqp.qp_type == IB_QPT_UC) {
- to_ib_ah_attr(dev->dev, &qp_attr->ah_attr, &context.pri_path);
- to_ib_ah_attr(dev->dev, &qp_attr->alt_ah_attr, &context.alt_path);
+ to_ib_ah_attr(dev, &qp_attr->ah_attr, &context.pri_path);
+ to_ib_ah_attr(dev, &qp_attr->alt_ah_attr, &context.alt_path);
qp_attr->alt_pkey_index = context.alt_path.pkey_index & 0x7f;
qp_attr->alt_port_num = qp_attr->alt_ah_attr.port_num;
}
diff --git a/drivers/infiniband/hw/mthca/mthca_qp.c b/drivers/infiniband/hw/mthca/mthca_qp.c
index d2d172e6289c..a34c9d38e822 100644
--- a/drivers/infiniband/hw/mthca/mthca_qp.c
+++ b/drivers/infiniband/hw/mthca/mthca_qp.c
@@ -1493,7 +1493,7 @@ static int build_mlx_header(struct mthca_dev *dev, struct mthca_sqp *sqp,
int err;
u16 pkey;
- ib_ud_header_init(256, /* assume a MAD */
+ ib_ud_header_init(256, /* assume a MAD */ 1, 0, 0,
mthca_ah_grh_present(to_mah(wr->wr.ud.ah)), 0,
&sqp->ud_header);
diff --git a/drivers/infiniband/hw/nes/nes_cm.c b/drivers/infiniband/hw/nes/nes_cm.c
index 6220d9d75b58..25ad0f9944c0 100644
--- a/drivers/infiniband/hw/nes/nes_cm.c
+++ b/drivers/infiniband/hw/nes/nes_cm.c
@@ -1424,7 +1424,6 @@ static void handle_rst_pkt(struct nes_cm_node *cm_node, struct sk_buff *skb,
{
int reset = 0; /* whether to send reset in case of err.. */
- int passive_state;
atomic_inc(&cm_resets_recvd);
nes_debug(NES_DBG_CM, "Received Reset, cm_node = %p, state = %u."
" refcnt=%d\n", cm_node, cm_node->state,
@@ -1439,7 +1438,7 @@ static void handle_rst_pkt(struct nes_cm_node *cm_node, struct sk_buff *skb,
active_open_err(cm_node, skb, reset);
break;
case NES_CM_STATE_MPAREQ_RCVD:
- passive_state = atomic_add_return(1, &cm_node->passive_state);
+ atomic_inc(&cm_node->passive_state);
dev_kfree_skb_any(skb);
break;
case NES_CM_STATE_ESTABLISHED:
diff --git a/drivers/infiniband/hw/nes/nes_nic.c b/drivers/infiniband/hw/nes/nes_nic.c
index 10560c796fd6..3892e2c0e95a 100644
--- a/drivers/infiniband/hw/nes/nes_nic.c
+++ b/drivers/infiniband/hw/nes/nes_nic.c
@@ -271,6 +271,7 @@ static int nes_netdev_stop(struct net_device *netdev)
if (netif_msg_ifdown(nesvnic))
printk(KERN_INFO PFX "%s: disabling interface\n", netdev->name);
+ netif_carrier_off(netdev);
/* Disable network packets */
napi_disable(&nesvnic->napi);
diff --git a/drivers/infiniband/hw/nes/nes_verbs.c b/drivers/infiniband/hw/nes/nes_verbs.c
index 546fc22405fe..99933e4e48ff 100644
--- a/drivers/infiniband/hw/nes/nes_verbs.c
+++ b/drivers/infiniband/hw/nes/nes_verbs.c
@@ -476,9 +476,9 @@ static struct ib_fast_reg_page_list *nes_alloc_fast_reg_page_list(
}
nes_debug(NES_DBG_MR, "nes_alloc_fast_reg_pbl: nes_frpl = %p, "
"ibfrpl = %p, ibfrpl.page_list = %p, pbl.kva = %p, "
- "pbl.paddr= %p\n", pnesfrpl, &pnesfrpl->ibfrpl,
+ "pbl.paddr = %llx\n", pnesfrpl, &pnesfrpl->ibfrpl,
pnesfrpl->ibfrpl.page_list, pnesfrpl->nes_wqe_pbl.kva,
- (void *)pnesfrpl->nes_wqe_pbl.paddr);
+ (unsigned long long) pnesfrpl->nes_wqe_pbl.paddr);
return pifrpl;
}
@@ -584,7 +584,9 @@ static int nes_query_port(struct ib_device *ibdev, u8 port, struct ib_port_attr
props->lmc = 0;
props->sm_lid = 0;
props->sm_sl = 0;
- if (nesvnic->linkup)
+ if (netif_queue_stopped(netdev))
+ props->state = IB_PORT_DOWN;
+ else if (nesvnic->linkup)
props->state = IB_PORT_ACTIVE;
else
props->state = IB_PORT_DOWN;
@@ -3483,13 +3485,13 @@ static int nes_post_send(struct ib_qp *ibqp, struct ib_send_wr *ib_wr,
for (i = 0; i < ib_wr->wr.fast_reg.page_list_len; i++)
dst_page_list[i] = cpu_to_le64(src_page_list[i]);
- nes_debug(NES_DBG_IW_TX, "SQ_FMR: iova_start: %p, "
- "length: %d, rkey: %0x, pgl_paddr: %p, "
+ nes_debug(NES_DBG_IW_TX, "SQ_FMR: iova_start: %llx, "
+ "length: %d, rkey: %0x, pgl_paddr: %llx, "
"page_list_len: %u, wqe_misc: %x\n",
- (void *)ib_wr->wr.fast_reg.iova_start,
+ (unsigned long long) ib_wr->wr.fast_reg.iova_start,
ib_wr->wr.fast_reg.length,
ib_wr->wr.fast_reg.rkey,
- (void *)pnesfrpl->nes_wqe_pbl.paddr,
+ (unsigned long long) pnesfrpl->nes_wqe_pbl.paddr,
ib_wr->wr.fast_reg.page_list_len,
wqe_misc);
break;
diff --git a/drivers/infiniband/hw/qib/qib.h b/drivers/infiniband/hw/qib/qib.h
index 61de0654820e..64c9e7d02d4a 100644
--- a/drivers/infiniband/hw/qib/qib.h
+++ b/drivers/infiniband/hw/qib/qib.h
@@ -1406,7 +1406,7 @@ extern struct mutex qib_mutex;
*/
#define qib_early_err(dev, fmt, ...) \
do { \
- dev_info(dev, KERN_ERR QIB_DRV_NAME ": " fmt, ##__VA_ARGS__); \
+ dev_err(dev, fmt, ##__VA_ARGS__); \
} while (0)
#define qib_dev_err(dd, fmt, ...) \
diff --git a/drivers/infiniband/hw/qib/qib_fs.c b/drivers/infiniband/hw/qib/qib_fs.c
index a0e6613e8be6..f99bddc01716 100644
--- a/drivers/infiniband/hw/qib/qib_fs.c
+++ b/drivers/infiniband/hw/qib/qib_fs.c
@@ -58,6 +58,7 @@ static int qibfs_mknod(struct inode *dir, struct dentry *dentry,
goto bail;
}
+ inode->i_ino = get_next_ino();
inode->i_mode = mode;
inode->i_uid = 0;
inode->i_gid = 0;
@@ -554,13 +555,13 @@ bail:
return ret;
}
-static int qibfs_get_sb(struct file_system_type *fs_type, int flags,
- const char *dev_name, void *data, struct vfsmount *mnt)
+static struct dentry *qibfs_mount(struct file_system_type *fs_type, int flags,
+ const char *dev_name, void *data)
{
- int ret = get_sb_single(fs_type, flags, data,
- qibfs_fill_super, mnt);
- if (ret >= 0)
- qib_super = mnt->mnt_sb;
+ struct dentry *ret;
+ ret = mount_single(fs_type, flags, data, qibfs_fill_super);
+ if (!IS_ERR(ret))
+ qib_super = ret->d_sb;
return ret;
}
@@ -602,7 +603,7 @@ int qibfs_remove(struct qib_devdata *dd)
static struct file_system_type qibfs_fs_type = {
.owner = THIS_MODULE,
.name = "ipathfs",
- .get_sb = qibfs_get_sb,
+ .mount = qibfs_mount,
.kill_sb = qibfs_kill_super,
};
diff --git a/drivers/infiniband/hw/qib/qib_init.c b/drivers/infiniband/hw/qib/qib_init.c
index f1d16d3a01f6..f3b503936043 100644
--- a/drivers/infiniband/hw/qib/qib_init.c
+++ b/drivers/infiniband/hw/qib/qib_init.c
@@ -1243,6 +1243,7 @@ static int __devinit qib_init_one(struct pci_dev *pdev,
qib_early_err(&pdev->dev, "QLogic PCIE device 0x%x cannot "
"work if CONFIG_PCI_MSI is not enabled\n",
ent->device);
+ dd = ERR_PTR(-ENODEV);
#endif
break;
diff --git a/drivers/infiniband/hw/qib/qib_pcie.c b/drivers/infiniband/hw/qib/qib_pcie.c
index 7fa6e5592630..48b6674cbc49 100644
--- a/drivers/infiniband/hw/qib/qib_pcie.c
+++ b/drivers/infiniband/hw/qib/qib_pcie.c
@@ -103,16 +103,20 @@ int qib_pcie_init(struct pci_dev *pdev, const struct pci_device_id *ent)
ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
} else
ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
- if (ret)
+ if (ret) {
qib_early_err(&pdev->dev,
"Unable to set DMA consistent mask: %d\n", ret);
+ goto bail;
+ }
pci_set_master(pdev);
ret = pci_enable_pcie_error_reporting(pdev);
- if (ret)
+ if (ret) {
qib_early_err(&pdev->dev,
"Unable to enable pcie error reporting: %d\n",
ret);
+ ret = 0;
+ }
goto done;
bail:
diff --git a/drivers/infiniband/hw/qib/qib_rc.c b/drivers/infiniband/hw/qib/qib_rc.c
index a0931119bd78..955fb7157793 100644
--- a/drivers/infiniband/hw/qib/qib_rc.c
+++ b/drivers/infiniband/hw/qib/qib_rc.c
@@ -2068,7 +2068,10 @@ send_last:
goto nack_op_err;
if (!ret)
goto rnr_nak;
- goto send_last_imm;
+ wc.ex.imm_data = ohdr->u.rc.imm_data;
+ hdrsize += 4;
+ wc.wc_flags = IB_WC_WITH_IMM;
+ goto send_last;
case OP(RDMA_READ_REQUEST): {
struct qib_ack_entry *e;
diff --git a/drivers/infiniband/hw/qib/qib_uc.c b/drivers/infiniband/hw/qib/qib_uc.c
index b9c8b6346c1b..32ccf3c824ca 100644
--- a/drivers/infiniband/hw/qib/qib_uc.c
+++ b/drivers/infiniband/hw/qib/qib_uc.c
@@ -457,8 +457,10 @@ rdma_first:
}
if (opcode == OP(RDMA_WRITE_ONLY))
goto rdma_last;
- else if (opcode == OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE))
+ else if (opcode == OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE)) {
+ wc.ex.imm_data = ohdr->u.rc.imm_data;
goto rdma_last_imm;
+ }
/* FALLTHROUGH */
case OP(RDMA_WRITE_MIDDLE):
/* Check for invalid length PMTU or posted rwqe len. */
@@ -471,8 +473,8 @@ rdma_first:
break;
case OP(RDMA_WRITE_LAST_WITH_IMMEDIATE):
-rdma_last_imm:
wc.ex.imm_data = ohdr->u.imm_data;
+rdma_last_imm:
hdrsize += 4;
wc.wc_flags = IB_WC_WITH_IMM;
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ib.c b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
index ec6b4fbe25e4..dfa71903d6e4 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_ib.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
@@ -223,6 +223,7 @@ static void ipoib_ib_handle_rx_wc(struct net_device *dev, struct ib_wc *wc)
unsigned int wr_id = wc->wr_id & ~IPOIB_OP_RECV;
struct sk_buff *skb;
u64 mapping[IPOIB_UD_RX_SG];
+ union ib_gid *dgid;
ipoib_dbg_data(priv, "recv completion: id %d, status: %d\n",
wr_id, wc->status);
@@ -271,6 +272,16 @@ static void ipoib_ib_handle_rx_wc(struct net_device *dev, struct ib_wc *wc)
ipoib_ud_dma_unmap_rx(priv, mapping);
ipoib_ud_skb_put_frags(priv, skb, wc->byte_len);
+ /* First byte of dgid signals multicast when 0xff */
+ dgid = &((struct ib_grh *)skb->data)->dgid;
+
+ if (!(wc->wc_flags & IB_WC_GRH) || dgid->raw[0] != 0xff)
+ skb->pkt_type = PACKET_HOST;
+ else if (memcmp(dgid, dev->broadcast + 4, sizeof(union ib_gid)) == 0)
+ skb->pkt_type = PACKET_BROADCAST;
+ else
+ skb->pkt_type = PACKET_MULTICAST;
+
skb_pull(skb, IB_GRH_BYTES);
skb->protocol = ((struct ipoib_header *) skb->data)->proto;
@@ -281,9 +292,6 @@ static void ipoib_ib_handle_rx_wc(struct net_device *dev, struct ib_wc *wc)
dev->stats.rx_bytes += skb->len;
skb->dev = dev;
- /* XXX get correct PACKET_ type here */
- skb->pkt_type = PACKET_HOST;
-
if (test_bit(IPOIB_FLAG_CSUM, &priv->flags) && likely(wc->csum_ok))
skb->ip_summed = CHECKSUM_UNNECESSARY;
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c
index b4b22576f12a..9ff7bc73ed95 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_main.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c
@@ -1240,6 +1240,7 @@ static struct net_device *ipoib_add_port(const char *format,
goto alloc_mem_failed;
SET_NETDEV_DEV(priv->dev, hca->dma_device);
+ priv->dev->dev_id = port - 1;
if (!ib_query_port(hca, port, &attr))
priv->max_ib_mtu = ib_mtu_enum_to_int(attr.max_mtu);
@@ -1362,6 +1363,8 @@ static void ipoib_add_one(struct ib_device *device)
}
for (p = s; p <= e; ++p) {
+ if (rdma_port_get_link_layer(device, p) != IB_LINK_LAYER_INFINIBAND)
+ continue;
dev = ipoib_add_port("ib%d", device, p);
if (!IS_ERR(dev)) {
priv = netdev_priv(dev);
@@ -1409,8 +1412,7 @@ static int __init ipoib_init_module(void)
ipoib_sendq_size = roundup_pow_of_two(ipoib_sendq_size);
ipoib_sendq_size = min(ipoib_sendq_size, IPOIB_MAX_QUEUE_SIZE);
- ipoib_sendq_size = max(ipoib_sendq_size, max(2 * MAX_SEND_CQE,
- IPOIB_MIN_QUEUE_SIZE));
+ ipoib_sendq_size = max3(ipoib_sendq_size, 2 * MAX_SEND_CQE, IPOIB_MIN_QUEUE_SIZE);
#ifdef CONFIG_INFINIBAND_IPOIB_CM
ipoib_max_conn_qp = min(ipoib_max_conn_qp, IPOIB_CM_MAX_CONN_QP);
#endif
diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c
index 7f8f16bad753..1e1e347a7715 100644
--- a/drivers/infiniband/ulp/srp/ib_srp.c
+++ b/drivers/infiniband/ulp/srp/ib_srp.c
@@ -291,7 +291,7 @@ static void srp_free_target_ib(struct srp_target_port *target)
for (i = 0; i < SRP_RQ_SIZE; ++i)
srp_free_iu(target->srp_host, target->rx_ring[i]);
- for (i = 0; i < SRP_SQ_SIZE + 1; ++i)
+ for (i = 0; i < SRP_SQ_SIZE; ++i)
srp_free_iu(target->srp_host, target->tx_ring[i]);
}
@@ -811,6 +811,75 @@ static int srp_map_data(struct scsi_cmnd *scmnd, struct srp_target_port *target,
return len;
}
+/*
+ * Must be called with target->scsi_host->host_lock held to protect
+ * req_lim and tx_head. Lock cannot be dropped between call here and
+ * call to __srp_post_send().
+ *
+ * Note:
+ * An upper limit for the number of allocated information units for each
+ * request type is:
+ * - SRP_IU_CMD: SRP_CMD_SQ_SIZE, since the SCSI mid-layer never queues
+ * more than Scsi_Host.can_queue requests.
+ * - SRP_IU_TSK_MGMT: SRP_TSK_MGMT_SQ_SIZE.
+ * - SRP_IU_RSP: 1, since a conforming SRP target never sends more than
+ * one unanswered SRP request to an initiator.
+ */
+static struct srp_iu *__srp_get_tx_iu(struct srp_target_port *target,
+ enum srp_iu_type iu_type)
+{
+ s32 rsv = (iu_type == SRP_IU_TSK_MGMT) ? 0 : SRP_TSK_MGMT_SQ_SIZE;
+ struct srp_iu *iu;
+
+ srp_send_completion(target->send_cq, target);
+
+ if (target->tx_head - target->tx_tail >= SRP_SQ_SIZE)
+ return NULL;
+
+ /* Initiator responses to target requests do not consume credits */
+ if (target->req_lim <= rsv && iu_type != SRP_IU_RSP) {
+ ++target->zero_req_lim;
+ return NULL;
+ }
+
+ iu = target->tx_ring[target->tx_head & SRP_SQ_MASK];
+ iu->type = iu_type;
+ return iu;
+}
+
+/*
+ * Must be called with target->scsi_host->host_lock held to protect
+ * req_lim and tx_head.
+ */
+static int __srp_post_send(struct srp_target_port *target,
+ struct srp_iu *iu, int len)
+{
+ struct ib_sge list;
+ struct ib_send_wr wr, *bad_wr;
+ int ret = 0;
+
+ list.addr = iu->dma;
+ list.length = len;
+ list.lkey = target->srp_host->srp_dev->mr->lkey;
+
+ wr.next = NULL;
+ wr.wr_id = target->tx_head & SRP_SQ_MASK;
+ wr.sg_list = &list;
+ wr.num_sge = 1;
+ wr.opcode = IB_WR_SEND;
+ wr.send_flags = IB_SEND_SIGNALED;
+
+ ret = ib_post_send(target->qp, &wr, &bad_wr);
+
+ if (!ret) {
+ ++target->tx_head;
+ if (iu->type != SRP_IU_RSP)
+ --target->req_lim;
+ }
+
+ return ret;
+}
+
static int srp_post_recv(struct srp_target_port *target)
{
unsigned long flags;
@@ -822,7 +891,7 @@ static int srp_post_recv(struct srp_target_port *target)
spin_lock_irqsave(target->scsi_host->host_lock, flags);
- next = target->rx_head & (SRP_RQ_SIZE - 1);
+ next = target->rx_head & SRP_RQ_MASK;
wr.wr_id = next;
iu = target->rx_ring[next];
@@ -896,6 +965,71 @@ static void srp_process_rsp(struct srp_target_port *target, struct srp_rsp *rsp)
spin_unlock_irqrestore(target->scsi_host->host_lock, flags);
}
+static int srp_response_common(struct srp_target_port *target, s32 req_delta,
+ void *rsp, int len)
+{
+ struct ib_device *dev;
+ unsigned long flags;
+ struct srp_iu *iu;
+ int err = 1;
+
+ dev = target->srp_host->srp_dev->dev;
+
+ spin_lock_irqsave(target->scsi_host->host_lock, flags);
+ target->req_lim += req_delta;
+
+ iu = __srp_get_tx_iu(target, SRP_IU_RSP);
+ if (!iu) {
+ shost_printk(KERN_ERR, target->scsi_host, PFX
+ "no IU available to send response\n");
+ goto out;
+ }
+
+ ib_dma_sync_single_for_cpu(dev, iu->dma, len, DMA_TO_DEVICE);
+ memcpy(iu->buf, rsp, len);
+ ib_dma_sync_single_for_device(dev, iu->dma, len, DMA_TO_DEVICE);
+
+ err = __srp_post_send(target, iu, len);
+ if (err)
+ shost_printk(KERN_ERR, target->scsi_host, PFX
+ "unable to post response: %d\n", err);
+
+out:
+ spin_unlock_irqrestore(target->scsi_host->host_lock, flags);
+ return err;
+}
+
+static void srp_process_cred_req(struct srp_target_port *target,
+ struct srp_cred_req *req)
+{
+ struct srp_cred_rsp rsp = {
+ .opcode = SRP_CRED_RSP,
+ .tag = req->tag,
+ };
+ s32 delta = be32_to_cpu(req->req_lim_delta);
+
+ if (srp_response_common(target, delta, &rsp, sizeof rsp))
+ shost_printk(KERN_ERR, target->scsi_host, PFX
+ "problems processing SRP_CRED_REQ\n");
+}
+
+static void srp_process_aer_req(struct srp_target_port *target,
+ struct srp_aer_req *req)
+{
+ struct srp_aer_rsp rsp = {
+ .opcode = SRP_AER_RSP,
+ .tag = req->tag,
+ };
+ s32 delta = be32_to_cpu(req->req_lim_delta);
+
+ shost_printk(KERN_ERR, target->scsi_host, PFX
+ "ignoring AER for LUN %llu\n", be64_to_cpu(req->lun));
+
+ if (srp_response_common(target, delta, &rsp, sizeof rsp))
+ shost_printk(KERN_ERR, target->scsi_host, PFX
+ "problems processing SRP_AER_REQ\n");
+}
+
static void srp_handle_recv(struct srp_target_port *target, struct ib_wc *wc)
{
struct ib_device *dev;
@@ -923,6 +1057,14 @@ static void srp_handle_recv(struct srp_target_port *target, struct ib_wc *wc)
srp_process_rsp(target, iu->buf);
break;
+ case SRP_CRED_REQ:
+ srp_process_cred_req(target, iu->buf);
+ break;
+
+ case SRP_AER_REQ:
+ srp_process_aer_req(target, iu->buf);
+ break;
+
case SRP_T_LOGOUT:
/* XXX Handle target logout */
shost_printk(KERN_WARNING, target->scsi_host,
@@ -981,62 +1123,7 @@ static void srp_send_completion(struct ib_cq *cq, void *target_ptr)
}
}
-/*
- * Must be called with target->scsi_host->host_lock held to protect
- * req_lim and tx_head. Lock cannot be dropped between call here and
- * call to __srp_post_send().
- */
-static struct srp_iu *__srp_get_tx_iu(struct srp_target_port *target,
- enum srp_request_type req_type)
-{
- s32 min = (req_type == SRP_REQ_TASK_MGMT) ? 1 : 2;
-
- srp_send_completion(target->send_cq, target);
-
- if (target->tx_head - target->tx_tail >= SRP_SQ_SIZE)
- return NULL;
-
- if (target->req_lim < min) {
- ++target->zero_req_lim;
- return NULL;
- }
-
- return target->tx_ring[target->tx_head & SRP_SQ_SIZE];
-}
-
-/*
- * Must be called with target->scsi_host->host_lock held to protect
- * req_lim and tx_head.
- */
-static int __srp_post_send(struct srp_target_port *target,
- struct srp_iu *iu, int len)
-{
- struct ib_sge list;
- struct ib_send_wr wr, *bad_wr;
- int ret = 0;
-
- list.addr = iu->dma;
- list.length = len;
- list.lkey = target->srp_host->srp_dev->mr->lkey;
-
- wr.next = NULL;
- wr.wr_id = target->tx_head & SRP_SQ_SIZE;
- wr.sg_list = &list;
- wr.num_sge = 1;
- wr.opcode = IB_WR_SEND;
- wr.send_flags = IB_SEND_SIGNALED;
-
- ret = ib_post_send(target->qp, &wr, &bad_wr);
-
- if (!ret) {
- ++target->tx_head;
- --target->req_lim;
- }
-
- return ret;
-}
-
-static int srp_queuecommand(struct scsi_cmnd *scmnd,
+static int srp_queuecommand_lck(struct scsi_cmnd *scmnd,
void (*done)(struct scsi_cmnd *))
{
struct srp_target_port *target = host_to_target(scmnd->device->host);
@@ -1056,7 +1143,7 @@ static int srp_queuecommand(struct scsi_cmnd *scmnd,
return 0;
}
- iu = __srp_get_tx_iu(target, SRP_REQ_NORMAL);
+ iu = __srp_get_tx_iu(target, SRP_IU_CMD);
if (!iu)
goto err;
@@ -1064,7 +1151,7 @@ static int srp_queuecommand(struct scsi_cmnd *scmnd,
ib_dma_sync_single_for_cpu(dev, iu->dma, srp_max_iu_len,
DMA_TO_DEVICE);
- req = list_entry(target->free_reqs.next, struct srp_request, list);
+ req = list_first_entry(&target->free_reqs, struct srp_request, list);
scmnd->scsi_done = done;
scmnd->result = 0;
@@ -1109,6 +1196,8 @@ err:
return SCSI_MLQUEUE_HOST_BUSY;
}
+static DEF_SCSI_QCMD(srp_queuecommand)
+
static int srp_alloc_iu_bufs(struct srp_target_port *target)
{
int i;
@@ -1121,7 +1210,7 @@ static int srp_alloc_iu_bufs(struct srp_target_port *target)
goto err;
}
- for (i = 0; i < SRP_SQ_SIZE + 1; ++i) {
+ for (i = 0; i < SRP_SQ_SIZE; ++i) {
target->tx_ring[i] = srp_alloc_iu(target->srp_host,
srp_max_iu_len,
GFP_KERNEL, DMA_TO_DEVICE);
@@ -1137,7 +1226,7 @@ err:
target->rx_ring[i] = NULL;
}
- for (i = 0; i < SRP_SQ_SIZE + 1; ++i) {
+ for (i = 0; i < SRP_SQ_SIZE; ++i) {
srp_free_iu(target->srp_host, target->tx_ring[i]);
target->tx_ring[i] = NULL;
}
@@ -1252,8 +1341,13 @@ static int srp_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event)
target->max_ti_iu_len = be32_to_cpu(rsp->max_ti_iu_len);
target->req_lim = be32_to_cpu(rsp->req_lim_delta);
- target->scsi_host->can_queue = min(target->req_lim,
- target->scsi_host->can_queue);
+ /*
+ * Reserve credits for task management so we don't
+ * bounce requests back to the SCSI mid-layer.
+ */
+ target->scsi_host->can_queue
+ = min(target->req_lim - SRP_TSK_MGMT_SQ_SIZE,
+ target->scsi_host->can_queue);
} else {
shost_printk(KERN_WARNING, target->scsi_host,
PFX "Unhandled RSP opcode %#x\n", opcode);
@@ -1350,6 +1444,7 @@ static int srp_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event)
static int srp_send_tsk_mgmt(struct srp_target_port *target,
struct srp_request *req, u8 func)
{
+ struct ib_device *dev = target->srp_host->srp_dev->dev;
struct srp_iu *iu;
struct srp_tsk_mgmt *tsk_mgmt;
@@ -1363,10 +1458,12 @@ static int srp_send_tsk_mgmt(struct srp_target_port *target,
init_completion(&req->done);
- iu = __srp_get_tx_iu(target, SRP_REQ_TASK_MGMT);
+ iu = __srp_get_tx_iu(target, SRP_IU_TSK_MGMT);
if (!iu)
goto out;
+ ib_dma_sync_single_for_cpu(dev, iu->dma, sizeof *tsk_mgmt,
+ DMA_TO_DEVICE);
tsk_mgmt = iu->buf;
memset(tsk_mgmt, 0, sizeof *tsk_mgmt);
@@ -1376,6 +1473,8 @@ static int srp_send_tsk_mgmt(struct srp_target_port *target,
tsk_mgmt->tsk_mgmt_func = func;
tsk_mgmt->task_tag = req->index;
+ ib_dma_sync_single_for_device(dev, iu->dma, sizeof *tsk_mgmt,
+ DMA_TO_DEVICE);
if (__srp_post_send(target, iu, sizeof *tsk_mgmt))
goto out;
@@ -1626,9 +1725,9 @@ static struct scsi_host_template srp_template = {
.eh_abort_handler = srp_abort,
.eh_device_reset_handler = srp_reset_device,
.eh_host_reset_handler = srp_reset_host,
- .can_queue = SRP_SQ_SIZE,
+ .can_queue = SRP_CMD_SQ_SIZE,
.this_id = -1,
- .cmd_per_lun = SRP_SQ_SIZE,
+ .cmd_per_lun = SRP_CMD_SQ_SIZE,
.use_clustering = ENABLE_CLUSTERING,
.shost_attrs = srp_host_attrs
};
@@ -1813,7 +1912,7 @@ static int srp_parse_options(const char *buf, struct srp_target_port *target)
printk(KERN_WARNING PFX "bad max cmd_per_lun parameter '%s'\n", p);
goto out;
}
- target->scsi_host->cmd_per_lun = min(token, SRP_SQ_SIZE);
+ target->scsi_host->cmd_per_lun = min(token, SRP_CMD_SQ_SIZE);
break;
case SRP_OPT_IO_CLASS:
@@ -1891,7 +1990,7 @@ static ssize_t srp_create_target(struct device *dev,
INIT_LIST_HEAD(&target->free_reqs);
INIT_LIST_HEAD(&target->req_queue);
- for (i = 0; i < SRP_SQ_SIZE; ++i) {
+ for (i = 0; i < SRP_CMD_SQ_SIZE; ++i) {
target->req_ring[i].index = i;
list_add_tail(&target->req_ring[i].list, &target->free_reqs);
}
@@ -2159,6 +2258,9 @@ static int __init srp_init_module(void)
{
int ret;
+ BUILD_BUG_ON_NOT_POWER_OF_2(SRP_SQ_SIZE);
+ BUILD_BUG_ON_NOT_POWER_OF_2(SRP_RQ_SIZE);
+
if (srp_sg_tablesize > 255) {
printk(KERN_WARNING PFX "Clamping srp_sg_tablesize to 255\n");
srp_sg_tablesize = 255;
diff --git a/drivers/infiniband/ulp/srp/ib_srp.h b/drivers/infiniband/ulp/srp/ib_srp.h
index 5a80eac6fdaa..ed0dce9e479f 100644
--- a/drivers/infiniband/ulp/srp/ib_srp.h
+++ b/drivers/infiniband/ulp/srp/ib_srp.h
@@ -59,7 +59,14 @@ enum {
SRP_RQ_SHIFT = 6,
SRP_RQ_SIZE = 1 << SRP_RQ_SHIFT,
- SRP_SQ_SIZE = SRP_RQ_SIZE - 1,
+ SRP_RQ_MASK = SRP_RQ_SIZE - 1,
+
+ SRP_SQ_SIZE = SRP_RQ_SIZE,
+ SRP_SQ_MASK = SRP_SQ_SIZE - 1,
+ SRP_RSP_SQ_SIZE = 1,
+ SRP_REQ_SQ_SIZE = SRP_SQ_SIZE - SRP_RSP_SQ_SIZE,
+ SRP_TSK_MGMT_SQ_SIZE = 1,
+ SRP_CMD_SQ_SIZE = SRP_REQ_SQ_SIZE - SRP_TSK_MGMT_SQ_SIZE,
SRP_TAG_TSK_MGMT = 1 << (SRP_RQ_SHIFT + 1),
@@ -75,9 +82,10 @@ enum srp_target_state {
SRP_TARGET_REMOVED
};
-enum srp_request_type {
- SRP_REQ_NORMAL,
- SRP_REQ_TASK_MGMT,
+enum srp_iu_type {
+ SRP_IU_CMD,
+ SRP_IU_TSK_MGMT,
+ SRP_IU_RSP,
};
struct srp_device {
@@ -144,11 +152,11 @@ struct srp_target_port {
unsigned tx_head;
unsigned tx_tail;
- struct srp_iu *tx_ring[SRP_SQ_SIZE + 1];
+ struct srp_iu *tx_ring[SRP_SQ_SIZE];
struct list_head free_reqs;
struct list_head req_queue;
- struct srp_request req_ring[SRP_SQ_SIZE];
+ struct srp_request req_ring[SRP_CMD_SQ_SIZE];
struct work_struct work;
@@ -164,6 +172,7 @@ struct srp_iu {
void *buf;
size_t size;
enum dma_data_direction direction;
+ enum srp_iu_type type;
};
#endif /* IB_SRP_H */
diff --git a/drivers/input/evdev.c b/drivers/input/evdev.c
index 535fea4fe67f..e3f7fc6f9565 100644
--- a/drivers/input/evdev.c
+++ b/drivers/input/evdev.c
@@ -534,6 +534,80 @@ static int handle_eviocgbit(struct input_dev *dev,
}
#undef OLD_KEY_MAX
+static int evdev_handle_get_keycode(struct input_dev *dev,
+ void __user *p, size_t size)
+{
+ struct input_keymap_entry ke;
+ int error;
+
+ memset(&ke, 0, sizeof(ke));
+
+ if (size == sizeof(unsigned int[2])) {
+ /* legacy case */
+ int __user *ip = (int __user *)p;
+
+ if (copy_from_user(ke.scancode, p, sizeof(unsigned int)))
+ return -EFAULT;
+
+ ke.len = sizeof(unsigned int);
+ ke.flags = 0;
+
+ error = input_get_keycode(dev, &ke);
+ if (error)
+ return error;
+
+ if (put_user(ke.keycode, ip + 1))
+ return -EFAULT;
+
+ } else {
+ size = min(size, sizeof(ke));
+
+ if (copy_from_user(&ke, p, size))
+ return -EFAULT;
+
+ error = input_get_keycode(dev, &ke);
+ if (error)
+ return error;
+
+ if (copy_to_user(p, &ke, size))
+ return -EFAULT;
+ }
+ return 0;
+}
+
+static int evdev_handle_set_keycode(struct input_dev *dev,
+ void __user *p, size_t size)
+{
+ struct input_keymap_entry ke;
+
+ memset(&ke, 0, sizeof(ke));
+
+ if (size == sizeof(unsigned int[2])) {
+ /* legacy case */
+ int __user *ip = (int __user *)p;
+
+ if (copy_from_user(ke.scancode, p, sizeof(unsigned int)))
+ return -EFAULT;
+
+ if (get_user(ke.keycode, ip + 1))
+ return -EFAULT;
+
+ ke.len = sizeof(unsigned int);
+ ke.flags = 0;
+
+ } else {
+ size = min(size, sizeof(ke));
+
+ if (copy_from_user(&ke, p, size))
+ return -EFAULT;
+
+ if (ke.len > sizeof(ke.scancode))
+ return -EINVAL;
+ }
+
+ return input_set_keycode(dev, &ke);
+}
+
static long evdev_do_ioctl(struct file *file, unsigned int cmd,
void __user *p, int compat_mode)
{
@@ -580,25 +654,6 @@ static long evdev_do_ioctl(struct file *file, unsigned int cmd,
return 0;
- case EVIOCGKEYCODE:
- if (get_user(t, ip))
- return -EFAULT;
-
- error = input_get_keycode(dev, t, &v);
- if (error)
- return error;
-
- if (put_user(v, ip + 1))
- return -EFAULT;
-
- return 0;
-
- case EVIOCSKEYCODE:
- if (get_user(t, ip) || get_user(v, ip + 1))
- return -EFAULT;
-
- return input_set_keycode(dev, t, v);
-
case EVIOCRMFF:
return input_ff_erase(dev, (int)(unsigned long) p, file);
@@ -620,7 +675,6 @@ static long evdev_do_ioctl(struct file *file, unsigned int cmd,
/* Now check variable-length commands */
#define EVIOC_MASK_SIZE(nr) ((nr) & ~(_IOC_SIZEMASK << _IOC_SIZESHIFT))
-
switch (EVIOC_MASK_SIZE(cmd)) {
case EVIOCGKEY(0):
@@ -654,6 +708,12 @@ static long evdev_do_ioctl(struct file *file, unsigned int cmd,
return -EFAULT;
return error;
+
+ case EVIOC_MASK_SIZE(EVIOCGKEYCODE):
+ return evdev_handle_get_keycode(dev, p, size);
+
+ case EVIOC_MASK_SIZE(EVIOCSKEYCODE):
+ return evdev_handle_set_keycode(dev, p, size);
}
/* Multi-number variable-length handlers */
diff --git a/drivers/input/gameport/emu10k1-gp.c b/drivers/input/gameport/emu10k1-gp.c
index 7392992da424..422aa0a6b77f 100644
--- a/drivers/input/gameport/emu10k1-gp.c
+++ b/drivers/input/gameport/emu10k1-gp.c
@@ -59,44 +59,52 @@ MODULE_DEVICE_TABLE(pci, emu_tbl);
static int __devinit emu_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
- int ioport, iolen;
struct emu *emu;
struct gameport *port;
-
- if (pci_enable_device(pdev))
- return -EBUSY;
-
- ioport = pci_resource_start(pdev, 0);
- iolen = pci_resource_len(pdev, 0);
-
- if (!request_region(ioport, iolen, "emu10k1-gp"))
- return -EBUSY;
+ int error;
emu = kzalloc(sizeof(struct emu), GFP_KERNEL);
port = gameport_allocate_port();
if (!emu || !port) {
printk(KERN_ERR "emu10k1-gp: Memory allocation failed\n");
- release_region(ioport, iolen);
- kfree(emu);
- gameport_free_port(port);
- return -ENOMEM;
+ error = -ENOMEM;
+ goto err_out_free;
}
- emu->io = ioport;
- emu->size = iolen;
+ error = pci_enable_device(pdev);
+ if (error)
+ goto err_out_free;
+
+ emu->io = pci_resource_start(pdev, 0);
+ emu->size = pci_resource_len(pdev, 0);
+
emu->dev = pdev;
emu->gameport = port;
gameport_set_name(port, "EMU10K1");
gameport_set_phys(port, "pci%s/gameport0", pci_name(pdev));
port->dev.parent = &pdev->dev;
- port->io = ioport;
+ port->io = emu->io;
+
+ if (!request_region(emu->io, emu->size, "emu10k1-gp")) {
+ printk(KERN_ERR "emu10k1-gp: unable to grab region 0x%x-0x%x\n",
+ emu->io, emu->io + emu->size - 1);
+ error = -EBUSY;
+ goto err_out_disable_dev;
+ }
pci_set_drvdata(pdev, emu);
gameport_register_port(port);
return 0;
+
+ err_out_disable_dev:
+ pci_disable_device(pdev);
+ err_out_free:
+ gameport_free_port(port);
+ kfree(emu);
+ return error;
}
static void __devexit emu_remove(struct pci_dev *pdev)
@@ -106,6 +114,8 @@ static void __devexit emu_remove(struct pci_dev *pdev)
gameport_unregister_port(emu->gameport);
release_region(emu->io, emu->size);
kfree(emu);
+
+ pci_disable_device(pdev);
}
static struct pci_driver emu_driver = {
diff --git a/drivers/input/gameport/fm801-gp.c b/drivers/input/gameport/fm801-gp.c
index 14d3f3e208a2..a3b70ff21018 100644
--- a/drivers/input/gameport/fm801-gp.c
+++ b/drivers/input/gameport/fm801-gp.c
@@ -133,11 +133,11 @@ static void __devexit fm801_gp_remove(struct pci_dev *pci)
{
struct fm801_gp *gp = pci_get_drvdata(pci);
- if (gp) {
- gameport_unregister_port(gp->gameport);
- release_resource(gp->res_port);
- kfree(gp);
- }
+ gameport_unregister_port(gp->gameport);
+ release_resource(gp->res_port);
+ kfree(gp);
+
+ pci_disable_device(pci);
}
static const struct pci_device_id fm801_gp_id_table[] = {
diff --git a/drivers/input/input.c b/drivers/input/input.c
index 7919c2537225..db409d6bd5d2 100644
--- a/drivers/input/input.c
+++ b/drivers/input/input.c
@@ -24,7 +24,6 @@
#include <linux/device.h>
#include <linux/mutex.h>
#include <linux/rcupdate.h>
-#include <linux/smp_lock.h>
#include "input-compat.h"
MODULE_AUTHOR("Vojtech Pavlik <vojtech@suse.cz>");
@@ -74,6 +73,7 @@ static int input_defuzz_abs_event(int value, int old_val, int fuzz)
* dev->event_lock held and interrupts disabled.
*/
static void input_pass_event(struct input_dev *dev,
+ struct input_handler *src_handler,
unsigned int type, unsigned int code, int value)
{
struct input_handler *handler;
@@ -92,6 +92,15 @@ static void input_pass_event(struct input_dev *dev,
continue;
handler = handle->handler;
+
+ /*
+ * If this is the handler that injected this
+ * particular event we want to skip it to avoid
+ * filters firing again and again.
+ */
+ if (handler == src_handler)
+ continue;
+
if (!handler->filter) {
if (filtered)
break;
@@ -121,7 +130,7 @@ static void input_repeat_key(unsigned long data)
if (test_bit(dev->repeat_key, dev->key) &&
is_event_supported(dev->repeat_key, dev->keybit, KEY_MAX)) {
- input_pass_event(dev, EV_KEY, dev->repeat_key, 2);
+ input_pass_event(dev, NULL, EV_KEY, dev->repeat_key, 2);
if (dev->sync) {
/*
@@ -130,7 +139,7 @@ static void input_repeat_key(unsigned long data)
* Otherwise assume that the driver will send
* SYN_REPORT once it's done.
*/
- input_pass_event(dev, EV_SYN, SYN_REPORT, 1);
+ input_pass_event(dev, NULL, EV_SYN, SYN_REPORT, 1);
}
if (dev->rep[REP_PERIOD])
@@ -163,6 +172,7 @@ static void input_stop_autorepeat(struct input_dev *dev)
#define INPUT_PASS_TO_ALL (INPUT_PASS_TO_HANDLERS | INPUT_PASS_TO_DEVICE)
static int input_handle_abs_event(struct input_dev *dev,
+ struct input_handler *src_handler,
unsigned int code, int *pval)
{
bool is_mt_event;
@@ -171,7 +181,7 @@ static int input_handle_abs_event(struct input_dev *dev,
if (code == ABS_MT_SLOT) {
/*
* "Stage" the event; we'll flush it later, when we
- * get actiual touch data.
+ * get actual touch data.
*/
if (*pval >= 0 && *pval < dev->mtsize)
dev->slot = *pval;
@@ -188,7 +198,7 @@ static int input_handle_abs_event(struct input_dev *dev,
pold = &mtslot->abs[code - ABS_MT_FIRST];
} else {
/*
- * Bypass filtering for multitouch events when
+ * Bypass filtering for multi-touch events when
* not employing slots.
*/
pold = NULL;
@@ -206,13 +216,15 @@ static int input_handle_abs_event(struct input_dev *dev,
/* Flush pending "slot" event */
if (is_mt_event && dev->slot != input_abs_get_val(dev, ABS_MT_SLOT)) {
input_abs_set_val(dev, ABS_MT_SLOT, dev->slot);
- input_pass_event(dev, EV_ABS, ABS_MT_SLOT, dev->slot);
+ input_pass_event(dev, src_handler,
+ EV_ABS, ABS_MT_SLOT, dev->slot);
}
return INPUT_PASS_TO_HANDLERS;
}
static void input_handle_event(struct input_dev *dev,
+ struct input_handler *src_handler,
unsigned int type, unsigned int code, int value)
{
int disposition = INPUT_IGNORE_EVENT;
@@ -265,7 +277,8 @@ static void input_handle_event(struct input_dev *dev,
case EV_ABS:
if (is_event_supported(code, dev->absbit, ABS_MAX))
- disposition = input_handle_abs_event(dev, code, &value);
+ disposition = input_handle_abs_event(dev, src_handler,
+ code, &value);
break;
@@ -323,7 +336,7 @@ static void input_handle_event(struct input_dev *dev,
dev->event(dev, type, code, value);
if (disposition & INPUT_PASS_TO_HANDLERS)
- input_pass_event(dev, type, code, value);
+ input_pass_event(dev, src_handler, type, code, value);
}
/**
@@ -352,7 +365,7 @@ void input_event(struct input_dev *dev,
spin_lock_irqsave(&dev->event_lock, flags);
add_input_randomness(type, code, value);
- input_handle_event(dev, type, code, value);
+ input_handle_event(dev, NULL, type, code, value);
spin_unlock_irqrestore(&dev->event_lock, flags);
}
}
@@ -382,7 +395,8 @@ void input_inject_event(struct input_handle *handle,
rcu_read_lock();
grab = rcu_dereference(dev->grab);
if (!grab || grab == handle)
- input_handle_event(dev, type, code, value);
+ input_handle_event(dev, handle->handler,
+ type, code, value);
rcu_read_unlock();
spin_unlock_irqrestore(&dev->event_lock, flags);
@@ -595,10 +609,10 @@ static void input_dev_release_keys(struct input_dev *dev)
for (code = 0; code <= KEY_MAX; code++) {
if (is_event_supported(code, dev->keybit, KEY_MAX) &&
__test_and_clear_bit(code, dev->key)) {
- input_pass_event(dev, EV_KEY, code, 0);
+ input_pass_event(dev, NULL, EV_KEY, code, 0);
}
}
- input_pass_event(dev, EV_SYN, SYN_REPORT, 1);
+ input_pass_event(dev, NULL, EV_SYN, SYN_REPORT, 1);
}
}
@@ -634,78 +648,141 @@ static void input_disconnect_device(struct input_dev *dev)
spin_unlock_irq(&dev->event_lock);
}
-static int input_fetch_keycode(struct input_dev *dev, int scancode)
+/**
+ * input_scancode_to_scalar() - converts scancode in &struct input_keymap_entry
+ * @ke: keymap entry containing scancode to be converted.
+ * @scancode: pointer to the location where converted scancode should
+ * be stored.
+ *
+ * This function is used to convert scancode stored in &struct keymap_entry
+ * into scalar form understood by legacy keymap handling methods. These
+ * methods expect scancodes to be represented as 'unsigned int'.
+ */
+int input_scancode_to_scalar(const struct input_keymap_entry *ke,
+ unsigned int *scancode)
+{
+ switch (ke->len) {
+ case 1:
+ *scancode = *((u8 *)ke->scancode);
+ break;
+
+ case 2:
+ *scancode = *((u16 *)ke->scancode);
+ break;
+
+ case 4:
+ *scancode = *((u32 *)ke->scancode);
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(input_scancode_to_scalar);
+
+/*
+ * Those routines handle the default case where no [gs]etkeycode() is
+ * defined. In this case, an array indexed by the scancode is used.
+ */
+
+static unsigned int input_fetch_keycode(struct input_dev *dev,
+ unsigned int index)
{
switch (dev->keycodesize) {
- case 1:
- return ((u8 *)dev->keycode)[scancode];
+ case 1:
+ return ((u8 *)dev->keycode)[index];
- case 2:
- return ((u16 *)dev->keycode)[scancode];
+ case 2:
+ return ((u16 *)dev->keycode)[index];
- default:
- return ((u32 *)dev->keycode)[scancode];
+ default:
+ return ((u32 *)dev->keycode)[index];
}
}
static int input_default_getkeycode(struct input_dev *dev,
- unsigned int scancode,
- unsigned int *keycode)
+ struct input_keymap_entry *ke)
{
+ unsigned int index;
+ int error;
+
if (!dev->keycodesize)
return -EINVAL;
- if (scancode >= dev->keycodemax)
+ if (ke->flags & INPUT_KEYMAP_BY_INDEX)
+ index = ke->index;
+ else {
+ error = input_scancode_to_scalar(ke, &index);
+ if (error)
+ return error;
+ }
+
+ if (index >= dev->keycodemax)
return -EINVAL;
- *keycode = input_fetch_keycode(dev, scancode);
+ ke->keycode = input_fetch_keycode(dev, index);
+ ke->index = index;
+ ke->len = sizeof(index);
+ memcpy(ke->scancode, &index, sizeof(index));
return 0;
}
static int input_default_setkeycode(struct input_dev *dev,
- unsigned int scancode,
- unsigned int keycode)
+ const struct input_keymap_entry *ke,
+ unsigned int *old_keycode)
{
- int old_keycode;
+ unsigned int index;
+ int error;
int i;
- if (scancode >= dev->keycodemax)
+ if (!dev->keycodesize)
return -EINVAL;
- if (!dev->keycodesize)
+ if (ke->flags & INPUT_KEYMAP_BY_INDEX) {
+ index = ke->index;
+ } else {
+ error = input_scancode_to_scalar(ke, &index);
+ if (error)
+ return error;
+ }
+
+ if (index >= dev->keycodemax)
return -EINVAL;
- if (dev->keycodesize < sizeof(keycode) && (keycode >> (dev->keycodesize * 8)))
+ if (dev->keycodesize < sizeof(ke->keycode) &&
+ (ke->keycode >> (dev->keycodesize * 8)))
return -EINVAL;
switch (dev->keycodesize) {
case 1: {
u8 *k = (u8 *)dev->keycode;
- old_keycode = k[scancode];
- k[scancode] = keycode;
+ *old_keycode = k[index];
+ k[index] = ke->keycode;
break;
}
case 2: {
u16 *k = (u16 *)dev->keycode;
- old_keycode = k[scancode];
- k[scancode] = keycode;
+ *old_keycode = k[index];
+ k[index] = ke->keycode;
break;
}
default: {
u32 *k = (u32 *)dev->keycode;
- old_keycode = k[scancode];
- k[scancode] = keycode;
+ *old_keycode = k[index];
+ k[index] = ke->keycode;
break;
}
}
- __clear_bit(old_keycode, dev->keybit);
- __set_bit(keycode, dev->keybit);
+ __clear_bit(*old_keycode, dev->keybit);
+ __set_bit(ke->keycode, dev->keybit);
for (i = 0; i < dev->keycodemax; i++) {
- if (input_fetch_keycode(dev, i) == old_keycode) {
- __set_bit(old_keycode, dev->keybit);
+ if (input_fetch_keycode(dev, i) == *old_keycode) {
+ __set_bit(*old_keycode, dev->keybit);
break; /* Setting the bit twice is useless, so break */
}
}
@@ -716,53 +793,86 @@ static int input_default_setkeycode(struct input_dev *dev,
/**
* input_get_keycode - retrieve keycode currently mapped to a given scancode
* @dev: input device which keymap is being queried
- * @scancode: scancode (or its equivalent for device in question) for which
- * keycode is needed
- * @keycode: result
+ * @ke: keymap entry
*
* This function should be called by anyone interested in retrieving current
- * keymap. Presently keyboard and evdev handlers use it.
+ * keymap. Presently evdev handlers use it.
*/
-int input_get_keycode(struct input_dev *dev,
- unsigned int scancode, unsigned int *keycode)
+int input_get_keycode(struct input_dev *dev, struct input_keymap_entry *ke)
{
unsigned long flags;
int retval;
spin_lock_irqsave(&dev->event_lock, flags);
- retval = dev->getkeycode(dev, scancode, keycode);
- spin_unlock_irqrestore(&dev->event_lock, flags);
+ if (dev->getkeycode) {
+ /*
+ * Support for legacy drivers, that don't implement the new
+ * ioctls
+ */
+ u32 scancode = ke->index;
+
+ memcpy(ke->scancode, &scancode, sizeof(scancode));
+ ke->len = sizeof(scancode);
+ retval = dev->getkeycode(dev, scancode, &ke->keycode);
+ } else {
+ retval = dev->getkeycode_new(dev, ke);
+ }
+
+ spin_unlock_irqrestore(&dev->event_lock, flags);
return retval;
}
EXPORT_SYMBOL(input_get_keycode);
/**
- * input_get_keycode - assign new keycode to a given scancode
+ * input_set_keycode - attribute a keycode to a given scancode
* @dev: input device which keymap is being updated
- * @scancode: scancode (or its equivalent for device in question)
- * @keycode: new keycode to be assigned to the scancode
+ * @ke: new keymap entry
*
* This function should be called by anyone needing to update current
* keymap. Presently keyboard and evdev handlers use it.
*/
int input_set_keycode(struct input_dev *dev,
- unsigned int scancode, unsigned int keycode)
+ const struct input_keymap_entry *ke)
{
unsigned long flags;
unsigned int old_keycode;
int retval;
- if (keycode > KEY_MAX)
+ if (ke->keycode > KEY_MAX)
return -EINVAL;
spin_lock_irqsave(&dev->event_lock, flags);
- retval = dev->getkeycode(dev, scancode, &old_keycode);
- if (retval)
- goto out;
+ if (dev->setkeycode) {
+ /*
+ * Support for legacy drivers, that don't implement the new
+ * ioctls
+ */
+ unsigned int scancode;
+
+ retval = input_scancode_to_scalar(ke, &scancode);
+ if (retval)
+ goto out;
+
+ /*
+ * We need to know the old scancode, in order to generate a
+ * keyup effect, if the set operation happens successfully
+ */
+ if (!dev->getkeycode) {
+ retval = -EINVAL;
+ goto out;
+ }
+
+ retval = dev->getkeycode(dev, scancode, &old_keycode);
+ if (retval)
+ goto out;
+
+ retval = dev->setkeycode(dev, scancode, ke->keycode);
+ } else {
+ retval = dev->setkeycode_new(dev, ke, &old_keycode);
+ }
- retval = dev->setkeycode(dev, scancode, keycode);
if (retval)
goto out;
@@ -777,9 +887,9 @@ int input_set_keycode(struct input_dev *dev,
!is_event_supported(old_keycode, dev->keybit, KEY_MAX) &&
__test_and_clear_bit(old_keycode, dev->key)) {
- input_pass_event(dev, EV_KEY, old_keycode, 0);
+ input_pass_event(dev, NULL, EV_KEY, old_keycode, 0);
if (dev->sync)
- input_pass_event(dev, EV_SYN, SYN_REPORT, 1);
+ input_pass_event(dev, NULL, EV_SYN, SYN_REPORT, 1);
}
out:
@@ -1469,8 +1579,7 @@ static int input_dev_uevent(struct device *device, struct kobj_uevent_env *env)
} \
} while (0)
-#ifdef CONFIG_PM
-static void input_dev_reset(struct input_dev *dev, bool activate)
+static void input_dev_toggle(struct input_dev *dev, bool activate)
{
if (!dev->event)
return;
@@ -1484,12 +1593,44 @@ static void input_dev_reset(struct input_dev *dev, bool activate)
}
}
+/**
+ * input_reset_device() - reset/restore the state of input device
+ * @dev: input device whose state needs to be reset
+ *
+ * This function tries to reset the state of an opened input device and
+ * bring internal state and state if the hardware in sync with each other.
+ * We mark all keys as released, restore LED state, repeat rate, etc.
+ */
+void input_reset_device(struct input_dev *dev)
+{
+ mutex_lock(&dev->mutex);
+
+ if (dev->users) {
+ input_dev_toggle(dev, true);
+
+ /*
+ * Keys that have been pressed at suspend time are unlikely
+ * to be still pressed when we resume.
+ */
+ spin_lock_irq(&dev->event_lock);
+ input_dev_release_keys(dev);
+ spin_unlock_irq(&dev->event_lock);
+ }
+
+ mutex_unlock(&dev->mutex);
+}
+EXPORT_SYMBOL(input_reset_device);
+
+#ifdef CONFIG_PM
static int input_dev_suspend(struct device *dev)
{
struct input_dev *input_dev = to_input_dev(dev);
mutex_lock(&input_dev->mutex);
- input_dev_reset(input_dev, false);
+
+ if (input_dev->users)
+ input_dev_toggle(input_dev, false);
+
mutex_unlock(&input_dev->mutex);
return 0;
@@ -1499,18 +1640,7 @@ static int input_dev_resume(struct device *dev)
{
struct input_dev *input_dev = to_input_dev(dev);
- mutex_lock(&input_dev->mutex);
- input_dev_reset(input_dev, true);
-
- /*
- * Keys that have been pressed at suspend time are unlikely
- * to be still pressed when we resume.
- */
- spin_lock_irq(&input_dev->event_lock);
- input_dev_release_keys(input_dev);
- spin_unlock_irq(&input_dev->event_lock);
-
- mutex_unlock(&input_dev->mutex);
+ input_reset_device(input_dev);
return 0;
}
@@ -1601,7 +1731,7 @@ EXPORT_SYMBOL(input_free_device);
*
* This function allocates all necessary memory for MT slot handling in the
* input device, and adds ABS_MT_SLOT to the device capabilities. All slots
- * are initially marked as unused iby setting ABS_MT_TRACKING_ID to -1.
+ * are initially marked as unused by setting ABS_MT_TRACKING_ID to -1.
*/
int input_mt_create_slots(struct input_dev *dev, unsigned int num_slots)
{
@@ -1759,11 +1889,11 @@ int input_register_device(struct input_dev *dev)
dev->rep[REP_PERIOD] = 33;
}
- if (!dev->getkeycode)
- dev->getkeycode = input_default_getkeycode;
+ if (!dev->getkeycode && !dev->getkeycode_new)
+ dev->getkeycode_new = input_default_getkeycode;
- if (!dev->setkeycode)
- dev->setkeycode = input_default_setkeycode;
+ if (!dev->setkeycode && !dev->setkeycode_new)
+ dev->setkeycode_new = input_default_setkeycode;
dev_set_name(&dev->dev, "input%ld",
(unsigned long) atomic_inc_return(&input_no) - 1);
diff --git a/drivers/input/joystick/turbografx.c b/drivers/input/joystick/turbografx.c
index d53b9e900234..27b6a3ce18ca 100644
--- a/drivers/input/joystick/turbografx.c
+++ b/drivers/input/joystick/turbografx.c
@@ -245,6 +245,7 @@ static struct tgfx __init *tgfx_probe(int parport, int *n_buttons, int n_devs)
goto err_free_tgfx;
}
+ parport_put_port(pp);
return tgfx;
err_free_dev:
diff --git a/drivers/input/keyboard/Kconfig b/drivers/input/keyboard/Kconfig
index aa037fec2f86..3a87f3ba5f75 100644
--- a/drivers/input/keyboard/Kconfig
+++ b/drivers/input/keyboard/Kconfig
@@ -179,6 +179,22 @@ config KEYBOARD_GPIO
To compile this driver as a module, choose M here: the
module will be called gpio_keys.
+config KEYBOARD_GPIO_POLLED
+ tristate "Polled GPIO buttons"
+ depends on GENERIC_GPIO
+ select INPUT_POLLDEV
+ help
+ This driver implements support for buttons connected
+ to GPIO pins that are not capable of generating interrupts.
+
+ Say Y here if your device has buttons connected
+ directly to such GPIO pins. Your board-specific
+ setup logic must also provide a platform device,
+ with configuration data saying which GPIOs are used.
+
+ To compile this driver as a module, choose M here: the
+ module will be called gpio_keys_polled.
+
config KEYBOARD_TCA6416
tristate "TCA6416 Keypad Support"
depends on I2C
@@ -327,6 +343,16 @@ config KEYBOARD_NEWTON
To compile this driver as a module, choose M here: the
module will be called newtonkbd.
+config KEYBOARD_NOMADIK
+ tristate "ST-Ericsson Nomadik SKE keyboard"
+ depends on PLAT_NOMADIK
+ help
+ Say Y here if you want to use a keypad provided on the SKE controller
+ used on the Ux500 and Nomadik platforms
+
+ To compile this driver as a module, choose M here: the
+ module will be called nmk-ske-keypad.
+
config KEYBOARD_OPENCORES
tristate "OpenCores Keyboard Controller"
help
@@ -424,6 +450,24 @@ config KEYBOARD_OMAP
To compile this driver as a module, choose M here: the
module will be called omap-keypad.
+config KEYBOARD_OMAP4
+ tristate "TI OMAP4 keypad support"
+ depends on ARCH_OMAP4
+ help
+ Say Y here if you want to use the OMAP4 keypad.
+
+ To compile this driver as a module, choose M here: the
+ module will be called omap4-keypad.
+
+config KEYBOARD_TNETV107X
+ tristate "TI TNETV107X keypad support"
+ depends on ARCH_DAVINCI_TNETV107X
+ help
+ Say Y here if you want to use the TNETV107X keypad.
+
+ To compile this driver as a module, choose M here: the
+ module will be called tnetv107x-keypad.
+
config KEYBOARD_TWL4030
tristate "TI TWL4030/TWL5030/TPS659x0 keypad support"
depends on TWL4030_CORE
diff --git a/drivers/input/keyboard/Makefile b/drivers/input/keyboard/Makefile
index 504b591be0cd..622de73a445d 100644
--- a/drivers/input/keyboard/Makefile
+++ b/drivers/input/keyboard/Makefile
@@ -14,6 +14,7 @@ obj-$(CONFIG_KEYBOARD_BFIN) += bf54x-keys.o
obj-$(CONFIG_KEYBOARD_DAVINCI) += davinci_keyscan.o
obj-$(CONFIG_KEYBOARD_EP93XX) += ep93xx_keypad.o
obj-$(CONFIG_KEYBOARD_GPIO) += gpio_keys.o
+obj-$(CONFIG_KEYBOARD_GPIO_POLLED) += gpio_keys_polled.o
obj-$(CONFIG_KEYBOARD_TCA6416) += tca6416-keypad.o
obj-$(CONFIG_KEYBOARD_HIL) += hil_kbd.o
obj-$(CONFIG_KEYBOARD_HIL_OLD) += hilkbd.o
@@ -28,7 +29,9 @@ obj-$(CONFIG_KEYBOARD_MATRIX) += matrix_keypad.o
obj-$(CONFIG_KEYBOARD_MAX7359) += max7359_keypad.o
obj-$(CONFIG_KEYBOARD_MCS) += mcs_touchkey.o
obj-$(CONFIG_KEYBOARD_NEWTON) += newtonkbd.o
+obj-$(CONFIG_KEYBOARD_NOMADIK) += nomadik-ske-keypad.o
obj-$(CONFIG_KEYBOARD_OMAP) += omap-keypad.o
+obj-$(CONFIG_KEYBOARD_OMAP4) += omap4-keypad.o
obj-$(CONFIG_KEYBOARD_OPENCORES) += opencores-kbd.o
obj-$(CONFIG_KEYBOARD_PXA27x) += pxa27x_keypad.o
obj-$(CONFIG_KEYBOARD_PXA930_ROTARY) += pxa930_rotary.o
@@ -38,6 +41,7 @@ obj-$(CONFIG_KEYBOARD_SH_KEYSC) += sh_keysc.o
obj-$(CONFIG_KEYBOARD_STMPE) += stmpe-keypad.o
obj-$(CONFIG_KEYBOARD_STOWAWAY) += stowaway.o
obj-$(CONFIG_KEYBOARD_SUNKBD) += sunkbd.o
+obj-$(CONFIG_KEYBOARD_TNETV107X) += tnetv107x-keypad.o
obj-$(CONFIG_KEYBOARD_TWL4030) += twl4030_keypad.o
obj-$(CONFIG_KEYBOARD_XTKBD) += xtkbd.o
obj-$(CONFIG_KEYBOARD_W90P910) += w90p910_keypad.o
diff --git a/drivers/input/keyboard/adp5588-keys.c b/drivers/input/keyboard/adp5588-keys.c
index d6918cb966c0..af45d275f686 100644
--- a/drivers/input/keyboard/adp5588-keys.c
+++ b/drivers/input/keyboard/adp5588-keys.c
@@ -4,7 +4,7 @@
* I2C QWERTY Keypad and IO Expander
* Bugs: Enter bugs at http://blackfin.uclinux.org/
*
- * Copyright (C) 2008-2009 Analog Devices Inc.
+ * Copyright (C) 2008-2010 Analog Devices Inc.
* Licensed under the GPL-2 or later.
*/
@@ -24,29 +24,6 @@
#include <linux/i2c/adp5588.h>
- /* Configuration Register1 */
-#define AUTO_INC (1 << 7)
-#define GPIEM_CFG (1 << 6)
-#define OVR_FLOW_M (1 << 5)
-#define INT_CFG (1 << 4)
-#define OVR_FLOW_IEN (1 << 3)
-#define K_LCK_IM (1 << 2)
-#define GPI_IEN (1 << 1)
-#define KE_IEN (1 << 0)
-
-/* Interrupt Status Register */
-#define CMP2_INT (1 << 5)
-#define CMP1_INT (1 << 4)
-#define OVR_FLOW_INT (1 << 3)
-#define K_LCK_INT (1 << 2)
-#define GPI_INT (1 << 1)
-#define KE_INT (1 << 0)
-
-/* Key Lock and Event Counter Register */
-#define K_LCK_EN (1 << 6)
-#define LCK21 0x30
-#define KEC 0xF
-
/* Key Event Register xy */
#define KEY_EV_PRESSED (1 << 7)
#define KEY_EV_MASK (0x7F)
@@ -55,10 +32,6 @@
#define KEYP_MAX_EVENT 10
-#define MAXGPIO 18
-#define ADP_BANK(offs) ((offs) >> 3)
-#define ADP_BIT(offs) (1u << ((offs) & 0x7))
-
/*
* Early pre 4.0 Silicon required to delay readout by at least 25ms,
* since the Event Counter Register updated 25ms after the interrupt
@@ -75,7 +48,7 @@ struct adp5588_kpad {
const struct adp5588_gpi_map *gpimap;
unsigned short gpimapsize;
#ifdef CONFIG_GPIOLIB
- unsigned char gpiomap[MAXGPIO];
+ unsigned char gpiomap[ADP5588_MAXGPIO];
bool export_gpio;
struct gpio_chip gc;
struct mutex gpio_lock; /* Protect cached dir, dat_out */
@@ -103,8 +76,8 @@ static int adp5588_write(struct i2c_client *client, u8 reg, u8 val)
static int adp5588_gpio_get_value(struct gpio_chip *chip, unsigned off)
{
struct adp5588_kpad *kpad = container_of(chip, struct adp5588_kpad, gc);
- unsigned int bank = ADP_BANK(kpad->gpiomap[off]);
- unsigned int bit = ADP_BIT(kpad->gpiomap[off]);
+ unsigned int bank = ADP5588_BANK(kpad->gpiomap[off]);
+ unsigned int bit = ADP5588_BIT(kpad->gpiomap[off]);
return !!(adp5588_read(kpad->client, GPIO_DAT_STAT1 + bank) & bit);
}
@@ -113,8 +86,8 @@ static void adp5588_gpio_set_value(struct gpio_chip *chip,
unsigned off, int val)
{
struct adp5588_kpad *kpad = container_of(chip, struct adp5588_kpad, gc);
- unsigned int bank = ADP_BANK(kpad->gpiomap[off]);
- unsigned int bit = ADP_BIT(kpad->gpiomap[off]);
+ unsigned int bank = ADP5588_BANK(kpad->gpiomap[off]);
+ unsigned int bit = ADP5588_BIT(kpad->gpiomap[off]);
mutex_lock(&kpad->gpio_lock);
@@ -132,8 +105,8 @@ static void adp5588_gpio_set_value(struct gpio_chip *chip,
static int adp5588_gpio_direction_input(struct gpio_chip *chip, unsigned off)
{
struct adp5588_kpad *kpad = container_of(chip, struct adp5588_kpad, gc);
- unsigned int bank = ADP_BANK(kpad->gpiomap[off]);
- unsigned int bit = ADP_BIT(kpad->gpiomap[off]);
+ unsigned int bank = ADP5588_BANK(kpad->gpiomap[off]);
+ unsigned int bit = ADP5588_BIT(kpad->gpiomap[off]);
int ret;
mutex_lock(&kpad->gpio_lock);
@@ -150,8 +123,8 @@ static int adp5588_gpio_direction_output(struct gpio_chip *chip,
unsigned off, int val)
{
struct adp5588_kpad *kpad = container_of(chip, struct adp5588_kpad, gc);
- unsigned int bank = ADP_BANK(kpad->gpiomap[off]);
- unsigned int bit = ADP_BIT(kpad->gpiomap[off]);
+ unsigned int bank = ADP5588_BANK(kpad->gpiomap[off]);
+ unsigned int bit = ADP5588_BIT(kpad->gpiomap[off]);
int ret;
mutex_lock(&kpad->gpio_lock);
@@ -176,7 +149,7 @@ static int adp5588_gpio_direction_output(struct gpio_chip *chip,
static int __devinit adp5588_build_gpiomap(struct adp5588_kpad *kpad,
const struct adp5588_kpad_platform_data *pdata)
{
- bool pin_used[MAXGPIO];
+ bool pin_used[ADP5588_MAXGPIO];
int n_unused = 0;
int i;
@@ -191,7 +164,7 @@ static int __devinit adp5588_build_gpiomap(struct adp5588_kpad *kpad,
for (i = 0; i < kpad->gpimapsize; i++)
pin_used[kpad->gpimap[i].pin - GPI_PIN_BASE] = true;
- for (i = 0; i < MAXGPIO; i++)
+ for (i = 0; i < ADP5588_MAXGPIO; i++)
if (!pin_used[i])
kpad->gpiomap[n_unused++] = i;
@@ -234,7 +207,7 @@ static int __devinit adp5588_gpio_add(struct adp5588_kpad *kpad)
return error;
}
- for (i = 0; i <= ADP_BANK(MAXGPIO); i++) {
+ for (i = 0; i <= ADP5588_BANK(ADP5588_MAXGPIO); i++) {
kpad->dat_out[i] = adp5588_read(kpad->client,
GPIO_DAT_OUT1 + i);
kpad->dir[i] = adp5588_read(kpad->client, GPIO_DIR1 + i);
@@ -318,11 +291,11 @@ static void adp5588_work(struct work_struct *work)
status = adp5588_read(client, INT_STAT);
- if (status & OVR_FLOW_INT) /* Unlikely and should never happen */
+ if (status & ADP5588_OVR_FLOW_INT) /* Unlikely and should never happen */
dev_err(&client->dev, "Event Overflow Error\n");
- if (status & KE_INT) {
- ev_cnt = adp5588_read(client, KEY_LCK_EC_STAT) & KEC;
+ if (status & ADP5588_KE_INT) {
+ ev_cnt = adp5588_read(client, KEY_LCK_EC_STAT) & ADP5588_KEC;
if (ev_cnt) {
adp5588_report_events(kpad, ev_cnt);
input_sync(kpad->input);
@@ -360,7 +333,7 @@ static int __devinit adp5588_setup(struct i2c_client *client)
if (pdata->en_keylock) {
ret |= adp5588_write(client, UNLOCK1, pdata->unlock_key1);
ret |= adp5588_write(client, UNLOCK2, pdata->unlock_key2);
- ret |= adp5588_write(client, KEY_LCK_EC_STAT, K_LCK_EN);
+ ret |= adp5588_write(client, KEY_LCK_EC_STAT, ADP5588_K_LCK_EN);
}
for (i = 0; i < KEYP_MAX_EVENT; i++)
@@ -384,7 +357,7 @@ static int __devinit adp5588_setup(struct i2c_client *client)
}
if (gpio_data) {
- for (i = 0; i <= ADP_BANK(MAXGPIO); i++) {
+ for (i = 0; i <= ADP5588_BANK(ADP5588_MAXGPIO); i++) {
int pull_mask = gpio_data->pullup_dis_mask;
ret |= adp5588_write(client, GPIO_PULL1 + i,
@@ -392,11 +365,14 @@ static int __devinit adp5588_setup(struct i2c_client *client)
}
}
- ret |= adp5588_write(client, INT_STAT, CMP2_INT | CMP1_INT |
- OVR_FLOW_INT | K_LCK_INT |
- GPI_INT | KE_INT); /* Status is W1C */
+ ret |= adp5588_write(client, INT_STAT,
+ ADP5588_CMP2_INT | ADP5588_CMP1_INT |
+ ADP5588_OVR_FLOW_INT | ADP5588_K_LCK_INT |
+ ADP5588_GPI_INT | ADP5588_KE_INT); /* Status is W1C */
- ret |= adp5588_write(client, CFG, INT_CFG | OVR_FLOW_IEN | KE_IEN);
+ ret |= adp5588_write(client, CFG, ADP5588_INT_CFG |
+ ADP5588_OVR_FLOW_IEN |
+ ADP5588_KE_IEN);
if (ret < 0) {
dev_err(&client->dev, "Write Error\n");
@@ -660,7 +636,7 @@ static const struct dev_pm_ops adp5588_dev_pm_ops = {
#endif
static const struct i2c_device_id adp5588_id[] = {
- { KBUILD_MODNAME, 0 },
+ { "adp5588-keys", 0 },
{ "adp5587-keys", 0 },
{ }
};
diff --git a/drivers/input/keyboard/atkbd.c b/drivers/input/keyboard/atkbd.c
index d358ef8623f4..11478eb2c27d 100644
--- a/drivers/input/keyboard/atkbd.c
+++ b/drivers/input/keyboard/atkbd.c
@@ -63,6 +63,10 @@ static bool atkbd_extra;
module_param_named(extra, atkbd_extra, bool, 0);
MODULE_PARM_DESC(extra, "Enable extra LEDs and keys on IBM RapidAcces, EzKey and similar keyboards");
+static bool atkbd_terminal;
+module_param_named(terminal, atkbd_terminal, bool, 0);
+MODULE_PARM_DESC(terminal, "Enable break codes on an IBM Terminal keyboard connected via AT/PS2");
+
/*
* Scancode to keycode tables. These are just the default setting, and
* are loadable via a userland utility.
@@ -136,7 +140,8 @@ static const unsigned short atkbd_unxlate_table[128] = {
#define ATKBD_CMD_ENABLE 0x00f4
#define ATKBD_CMD_RESET_DIS 0x00f5 /* Reset to defaults and disable */
#define ATKBD_CMD_RESET_DEF 0x00f6 /* Reset to defaults */
-#define ATKBD_CMD_SETALL_MBR 0x00fa
+#define ATKBD_CMD_SETALL_MB 0x00f8 /* Set all keys to give break codes */
+#define ATKBD_CMD_SETALL_MBR 0x00fa /* ... and repeat */
#define ATKBD_CMD_RESET_BAT 0x02ff
#define ATKBD_CMD_RESEND 0x00fe
#define ATKBD_CMD_EX_ENABLE 0x10ea
@@ -764,6 +769,11 @@ static int atkbd_select_set(struct atkbd *atkbd, int target_set, int allow_extra
}
}
+ if (atkbd_terminal) {
+ ps2_command(ps2dev, param, ATKBD_CMD_SETALL_MB);
+ return 3;
+ }
+
if (target_set != 3)
return 2;
diff --git a/drivers/input/keyboard/gpio_keys_polled.c b/drivers/input/keyboard/gpio_keys_polled.c
new file mode 100644
index 000000000000..4c17aff20657
--- /dev/null
+++ b/drivers/input/keyboard/gpio_keys_polled.c
@@ -0,0 +1,261 @@
+/*
+ * Driver for buttons on GPIO lines not capable of generating interrupts
+ *
+ * Copyright (C) 2007-2010 Gabor Juhos <juhosg@openwrt.org>
+ * Copyright (C) 2010 Nuno Goncalves <nunojpg@gmail.com>
+ *
+ * This file was based on: /drivers/input/misc/cobalt_btns.c
+ * Copyright (C) 2007 Yoichi Yuasa <yoichi_yuasa@tripeaks.co.jp>
+ *
+ * also was based on: /drivers/input/keyboard/gpio_keys.c
+ * Copyright 2005 Phil Blundell
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/input.h>
+#include <linux/input-polldev.h>
+#include <linux/ioport.h>
+#include <linux/platform_device.h>
+#include <linux/gpio.h>
+#include <linux/gpio_keys.h>
+
+#define DRV_NAME "gpio-keys-polled"
+
+struct gpio_keys_button_data {
+ int last_state;
+ int count;
+ int threshold;
+ int can_sleep;
+};
+
+struct gpio_keys_polled_dev {
+ struct input_polled_dev *poll_dev;
+ struct device *dev;
+ struct gpio_keys_platform_data *pdata;
+ struct gpio_keys_button_data data[0];
+};
+
+static void gpio_keys_polled_check_state(struct input_dev *input,
+ struct gpio_keys_button *button,
+ struct gpio_keys_button_data *bdata)
+{
+ int state;
+
+ if (bdata->can_sleep)
+ state = !!gpio_get_value_cansleep(button->gpio);
+ else
+ state = !!gpio_get_value(button->gpio);
+
+ if (state != bdata->last_state) {
+ unsigned int type = button->type ?: EV_KEY;
+
+ input_event(input, type, button->code,
+ !!(state ^ button->active_low));
+ input_sync(input);
+ bdata->count = 0;
+ bdata->last_state = state;
+ }
+}
+
+static void gpio_keys_polled_poll(struct input_polled_dev *dev)
+{
+ struct gpio_keys_polled_dev *bdev = dev->private;
+ struct gpio_keys_platform_data *pdata = bdev->pdata;
+ struct input_dev *input = dev->input;
+ int i;
+
+ for (i = 0; i < bdev->pdata->nbuttons; i++) {
+ struct gpio_keys_button_data *bdata = &bdev->data[i];
+
+ if (bdata->count < bdata->threshold)
+ bdata->count++;
+ else
+ gpio_keys_polled_check_state(input, &pdata->buttons[i],
+ bdata);
+ }
+}
+
+static void gpio_keys_polled_open(struct input_polled_dev *dev)
+{
+ struct gpio_keys_polled_dev *bdev = dev->private;
+ struct gpio_keys_platform_data *pdata = bdev->pdata;
+
+ if (pdata->enable)
+ pdata->enable(bdev->dev);
+}
+
+static void gpio_keys_polled_close(struct input_polled_dev *dev)
+{
+ struct gpio_keys_polled_dev *bdev = dev->private;
+ struct gpio_keys_platform_data *pdata = bdev->pdata;
+
+ if (pdata->disable)
+ pdata->disable(bdev->dev);
+}
+
+static int __devinit gpio_keys_polled_probe(struct platform_device *pdev)
+{
+ struct gpio_keys_platform_data *pdata = pdev->dev.platform_data;
+ struct device *dev = &pdev->dev;
+ struct gpio_keys_polled_dev *bdev;
+ struct input_polled_dev *poll_dev;
+ struct input_dev *input;
+ int error;
+ int i;
+
+ if (!pdata || !pdata->poll_interval)
+ return -EINVAL;
+
+ bdev = kzalloc(sizeof(struct gpio_keys_polled_dev) +
+ pdata->nbuttons * sizeof(struct gpio_keys_button_data),
+ GFP_KERNEL);
+ if (!bdev) {
+ dev_err(dev, "no memory for private data\n");
+ return -ENOMEM;
+ }
+
+ poll_dev = input_allocate_polled_device();
+ if (!poll_dev) {
+ dev_err(dev, "no memory for polled device\n");
+ error = -ENOMEM;
+ goto err_free_bdev;
+ }
+
+ poll_dev->private = bdev;
+ poll_dev->poll = gpio_keys_polled_poll;
+ poll_dev->poll_interval = pdata->poll_interval;
+ poll_dev->open = gpio_keys_polled_open;
+ poll_dev->close = gpio_keys_polled_close;
+
+ input = poll_dev->input;
+
+ input->evbit[0] = BIT(EV_KEY);
+ input->name = pdev->name;
+ input->phys = DRV_NAME"/input0";
+ input->dev.parent = &pdev->dev;
+
+ input->id.bustype = BUS_HOST;
+ input->id.vendor = 0x0001;
+ input->id.product = 0x0001;
+ input->id.version = 0x0100;
+
+ for (i = 0; i < pdata->nbuttons; i++) {
+ struct gpio_keys_button *button = &pdata->buttons[i];
+ struct gpio_keys_button_data *bdata = &bdev->data[i];
+ unsigned int gpio = button->gpio;
+ unsigned int type = button->type ?: EV_KEY;
+
+ if (button->wakeup) {
+ dev_err(dev, DRV_NAME " does not support wakeup\n");
+ error = -EINVAL;
+ goto err_free_gpio;
+ }
+
+ error = gpio_request(gpio,
+ button->desc ? button->desc : DRV_NAME);
+ if (error) {
+ dev_err(dev, "unable to claim gpio %u, err=%d\n",
+ gpio, error);
+ goto err_free_gpio;
+ }
+
+ error = gpio_direction_input(gpio);
+ if (error) {
+ dev_err(dev,
+ "unable to set direction on gpio %u, err=%d\n",
+ gpio, error);
+ goto err_free_gpio;
+ }
+
+ bdata->can_sleep = gpio_cansleep(gpio);
+ bdata->last_state = -1;
+ bdata->threshold = DIV_ROUND_UP(button->debounce_interval,
+ pdata->poll_interval);
+
+ input_set_capability(input, type, button->code);
+ }
+
+ bdev->poll_dev = poll_dev;
+ bdev->dev = dev;
+ bdev->pdata = pdata;
+ platform_set_drvdata(pdev, bdev);
+
+ error = input_register_polled_device(poll_dev);
+ if (error) {
+ dev_err(dev, "unable to register polled device, err=%d\n",
+ error);
+ goto err_free_gpio;
+ }
+
+ /* report initial state of the buttons */
+ for (i = 0; i < pdata->nbuttons; i++)
+ gpio_keys_polled_check_state(input, &pdata->buttons[i],
+ &bdev->data[i]);
+
+ return 0;
+
+err_free_gpio:
+ while (--i >= 0)
+ gpio_free(pdata->buttons[i].gpio);
+
+ input_free_polled_device(poll_dev);
+
+err_free_bdev:
+ kfree(bdev);
+
+ platform_set_drvdata(pdev, NULL);
+ return error;
+}
+
+static int __devexit gpio_keys_polled_remove(struct platform_device *pdev)
+{
+ struct gpio_keys_polled_dev *bdev = platform_get_drvdata(pdev);
+ struct gpio_keys_platform_data *pdata = bdev->pdata;
+ int i;
+
+ input_unregister_polled_device(bdev->poll_dev);
+
+ for (i = 0; i < pdata->nbuttons; i++)
+ gpio_free(pdata->buttons[i].gpio);
+
+ input_free_polled_device(bdev->poll_dev);
+
+ kfree(bdev);
+ platform_set_drvdata(pdev, NULL);
+
+ return 0;
+}
+
+static struct platform_driver gpio_keys_polled_driver = {
+ .probe = gpio_keys_polled_probe,
+ .remove = __devexit_p(gpio_keys_polled_remove),
+ .driver = {
+ .name = DRV_NAME,
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init gpio_keys_polled_init(void)
+{
+ return platform_driver_register(&gpio_keys_polled_driver);
+}
+
+static void __exit gpio_keys_polled_exit(void)
+{
+ platform_driver_unregister(&gpio_keys_polled_driver);
+}
+
+module_init(gpio_keys_polled_init);
+module_exit(gpio_keys_polled_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Gabor Juhos <juhosg@openwrt.org>");
+MODULE_DESCRIPTION("Polled GPIO Buttons driver");
+MODULE_ALIAS("platform:" DRV_NAME);
diff --git a/drivers/input/keyboard/hil_kbd.c b/drivers/input/keyboard/hil_kbd.c
index 19fa94af207a..fed31e0947a1 100644
--- a/drivers/input/keyboard/hil_kbd.c
+++ b/drivers/input/keyboard/hil_kbd.c
@@ -570,6 +570,8 @@ static struct serio_device_id hil_dev_ids[] = {
{ 0 }
};
+MODULE_DEVICE_TABLE(serio, hil_dev_ids);
+
static struct serio_driver hil_serio_drv = {
.driver = {
.name = "hil_dev",
diff --git a/drivers/input/keyboard/jornada680_kbd.c b/drivers/input/keyboard/jornada680_kbd.c
index 5fc976dbce0b..7197c5698747 100644
--- a/drivers/input/keyboard/jornada680_kbd.c
+++ b/drivers/input/keyboard/jornada680_kbd.c
@@ -139,35 +139,35 @@ static void jornada_scan_keyb(unsigned char *s)
}, *y = matrix_PDE;
/* Save these control reg bits */
- dc_static = (ctrl_inw(PDCR) & (~0xcc0c));
- ec_static = (ctrl_inw(PECR) & (~0xf0cf));
+ dc_static = (__raw_readw(PDCR) & (~0xcc0c));
+ ec_static = (__raw_readw(PECR) & (~0xf0cf));
for (i = 0; i < 8; i++) {
/* disable output for all but the one we want to scan */
- ctrl_outw((dc_static | *y++), PDCR);
- ctrl_outw((ec_static | *y++), PECR);
+ __raw_writew((dc_static | *y++), PDCR);
+ __raw_writew((ec_static | *y++), PECR);
udelay(5);
/* Get scanline row */
- ctrl_outb(*t++, PDDR);
- ctrl_outb(*t++, PEDR);
+ __raw_writeb(*t++, PDDR);
+ __raw_writeb(*t++, PEDR);
udelay(50);
/* Read data */
- *s++ = ctrl_inb(PCDR);
- *s++ = ctrl_inb(PFDR);
+ *s++ = __raw_readb(PCDR);
+ *s++ = __raw_readb(PFDR);
}
/* Scan no lines */
- ctrl_outb(0xff, PDDR);
- ctrl_outb(0xff, PEDR);
+ __raw_writeb(0xff, PDDR);
+ __raw_writeb(0xff, PEDR);
/* Enable all scanlines */
- ctrl_outw((dc_static | (0x5555 & 0xcc0c)),PDCR);
- ctrl_outw((ec_static | (0x5555 & 0xf0cf)),PECR);
+ __raw_writew((dc_static | (0x5555 & 0xcc0c)),PDCR);
+ __raw_writew((ec_static | (0x5555 & 0xf0cf)),PECR);
/* Ignore extra keys and events */
- *s++ = ctrl_inb(PGDR);
- *s++ = ctrl_inb(PHDR);
+ *s++ = __raw_readb(PGDR);
+ *s++ = __raw_readb(PHDR);
}
static void jornadakbd680_poll(struct input_polled_dev *dev)
diff --git a/drivers/input/keyboard/nomadik-ske-keypad.c b/drivers/input/keyboard/nomadik-ske-keypad.c
new file mode 100644
index 000000000000..6e0f23091360
--- /dev/null
+++ b/drivers/input/keyboard/nomadik-ske-keypad.c
@@ -0,0 +1,408 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ *
+ * Author: Naveen Kumar G <naveen.gaddipati@stericsson.com> for ST-Ericsson
+ * Author: Sundar Iyer <sundar.iyer@stericsson.com> for ST-Ericsson
+ *
+ * License terms:GNU General Public License (GPL) version 2
+ *
+ * Keypad controller driver for the SKE (Scroll Key Encoder) module used in
+ * the Nomadik 8815 and Ux500 platforms.
+ */
+
+#include <linux/platform_device.h>
+#include <linux/interrupt.h>
+#include <linux/spinlock.h>
+#include <linux/io.h>
+#include <linux/delay.h>
+#include <linux/input.h>
+#include <linux/slab.h>
+#include <linux/clk.h>
+
+#include <plat/ske.h>
+
+/* SKE_CR bits */
+#define SKE_KPMLT (0x1 << 6)
+#define SKE_KPCN (0x7 << 3)
+#define SKE_KPASEN (0x1 << 2)
+#define SKE_KPASON (0x1 << 7)
+
+/* SKE_IMSC bits */
+#define SKE_KPIMA (0x1 << 2)
+
+/* SKE_ICR bits */
+#define SKE_KPICS (0x1 << 3)
+#define SKE_KPICA (0x1 << 2)
+
+/* SKE_RIS bits */
+#define SKE_KPRISA (0x1 << 2)
+
+#define SKE_KEYPAD_ROW_SHIFT 3
+#define SKE_KPD_KEYMAP_SIZE (8 * 8)
+
+/* keypad auto scan registers */
+#define SKE_ASR0 0x20
+#define SKE_ASR1 0x24
+#define SKE_ASR2 0x28
+#define SKE_ASR3 0x2C
+
+#define SKE_NUM_ASRX_REGISTERS (4)
+
+/**
+ * struct ske_keypad - data structure used by keypad driver
+ * @irq: irq no
+ * @reg_base: ske regsiters base address
+ * @input: pointer to input device object
+ * @board: keypad platform device
+ * @keymap: matrix scan code table for keycodes
+ * @clk: clock structure pointer
+ */
+struct ske_keypad {
+ int irq;
+ void __iomem *reg_base;
+ struct input_dev *input;
+ const struct ske_keypad_platform_data *board;
+ unsigned short keymap[SKE_KPD_KEYMAP_SIZE];
+ struct clk *clk;
+ spinlock_t ske_keypad_lock;
+};
+
+static void ske_keypad_set_bits(struct ske_keypad *keypad, u16 addr,
+ u8 mask, u8 data)
+{
+ u32 ret;
+
+ spin_lock(&keypad->ske_keypad_lock);
+
+ ret = readl(keypad->reg_base + addr);
+ ret &= ~mask;
+ ret |= data;
+ writel(ret, keypad->reg_base + addr);
+
+ spin_unlock(&keypad->ske_keypad_lock);
+}
+
+/*
+ * ske_keypad_chip_init: init keypad controller configuration
+ *
+ * Enable Multi key press detection, auto scan mode
+ */
+static int __devinit ske_keypad_chip_init(struct ske_keypad *keypad)
+{
+ u32 value;
+ int timeout = 50;
+
+ /* check SKE_RIS to be 0 */
+ while ((readl(keypad->reg_base + SKE_RIS) != 0x00000000) && timeout--)
+ cpu_relax();
+
+ if (!timeout)
+ return -EINVAL;
+
+ /*
+ * set debounce value
+ * keypad dbounce is configured in DBCR[15:8]
+ * dbounce value in steps of 32/32.768 ms
+ */
+ spin_lock(&keypad->ske_keypad_lock);
+ value = readl(keypad->reg_base + SKE_DBCR);
+ value = value & 0xff;
+ value |= ((keypad->board->debounce_ms * 32000)/32768) << 8;
+ writel(value, keypad->reg_base + SKE_DBCR);
+ spin_unlock(&keypad->ske_keypad_lock);
+
+ /* enable multi key detection */
+ ske_keypad_set_bits(keypad, SKE_CR, 0x0, SKE_KPMLT);
+
+ /*
+ * set up the number of columns
+ * KPCN[5:3] defines no. of keypad columns to be auto scanned
+ */
+ value = (keypad->board->kcol - 1) << 3;
+ ske_keypad_set_bits(keypad, SKE_CR, SKE_KPCN, value);
+
+ /* clear keypad interrupt for auto(and pending SW) scans */
+ ske_keypad_set_bits(keypad, SKE_ICR, 0x0, SKE_KPICA | SKE_KPICS);
+
+ /* un-mask keypad interrupts */
+ ske_keypad_set_bits(keypad, SKE_IMSC, 0x0, SKE_KPIMA);
+
+ /* enable automatic scan */
+ ske_keypad_set_bits(keypad, SKE_CR, 0x0, SKE_KPASEN);
+
+ return 0;
+}
+
+static void ske_keypad_read_data(struct ske_keypad *keypad)
+{
+ struct input_dev *input = keypad->input;
+ u16 status;
+ int col = 0, row = 0, code;
+ int ske_asr, ske_ris, key_pressed, i;
+
+ /*
+ * Read the auto scan registers
+ *
+ * Each SKE_ASRx (x=0 to x=3) contains two row values.
+ * lower byte contains row value for column 2*x,
+ * upper byte contains row value for column 2*x + 1
+ */
+ for (i = 0; i < SKE_NUM_ASRX_REGISTERS; i++) {
+ ske_asr = readl(keypad->reg_base + SKE_ASR0 + (4 * i));
+ if (!ske_asr)
+ continue;
+
+ /* now that ASRx is zero, find out the column x and row y*/
+ if (ske_asr & 0xff) {
+ col = i * 2;
+ status = ske_asr & 0xff;
+ } else {
+ col = (i * 2) + 1;
+ status = (ske_asr & 0xff00) >> 8;
+ }
+
+ /* find out the row */
+ row = __ffs(status);
+
+ code = MATRIX_SCAN_CODE(row, col, SKE_KEYPAD_ROW_SHIFT);
+ ske_ris = readl(keypad->reg_base + SKE_RIS);
+ key_pressed = ske_ris & SKE_KPRISA;
+
+ input_event(input, EV_MSC, MSC_SCAN, code);
+ input_report_key(input, keypad->keymap[code], key_pressed);
+ input_sync(input);
+ }
+}
+
+static irqreturn_t ske_keypad_irq(int irq, void *dev_id)
+{
+ struct ske_keypad *keypad = dev_id;
+ int retries = 20;
+
+ /* disable auto scan interrupt; mask the interrupt generated */
+ ske_keypad_set_bits(keypad, SKE_IMSC, ~SKE_KPIMA, 0x0);
+ ske_keypad_set_bits(keypad, SKE_ICR, 0x0, SKE_KPICA);
+
+ while ((readl(keypad->reg_base + SKE_CR) & SKE_KPASON) && --retries)
+ msleep(5);
+
+ if (retries) {
+ /* SKEx registers are stable and can be read */
+ ske_keypad_read_data(keypad);
+ }
+
+ /* enable auto scan interrupts */
+ ske_keypad_set_bits(keypad, SKE_IMSC, 0x0, SKE_KPIMA);
+
+ return IRQ_HANDLED;
+}
+
+static int __devinit ske_keypad_probe(struct platform_device *pdev)
+{
+ const struct ske_keypad_platform_data *plat = pdev->dev.platform_data;
+ struct ske_keypad *keypad;
+ struct input_dev *input;
+ struct resource *res;
+ int irq;
+ int error;
+
+ if (!plat) {
+ dev_err(&pdev->dev, "invalid keypad platform data\n");
+ return -EINVAL;
+ }
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0) {
+ dev_err(&pdev->dev, "failed to get keypad irq\n");
+ return -EINVAL;
+ }
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ dev_err(&pdev->dev, "missing platform resources\n");
+ return -EINVAL;
+ }
+
+ keypad = kzalloc(sizeof(struct ske_keypad), GFP_KERNEL);
+ input = input_allocate_device();
+ if (!keypad || !input) {
+ dev_err(&pdev->dev, "failed to allocate keypad memory\n");
+ error = -ENOMEM;
+ goto err_free_mem;
+ }
+
+ keypad->irq = irq;
+ keypad->board = plat;
+ keypad->input = input;
+ spin_lock_init(&keypad->ske_keypad_lock);
+
+ if (!request_mem_region(res->start, resource_size(res), pdev->name)) {
+ dev_err(&pdev->dev, "failed to request I/O memory\n");
+ error = -EBUSY;
+ goto err_free_mem;
+ }
+
+ keypad->reg_base = ioremap(res->start, resource_size(res));
+ if (!keypad->reg_base) {
+ dev_err(&pdev->dev, "failed to remap I/O memory\n");
+ error = -ENXIO;
+ goto err_free_mem_region;
+ }
+
+ keypad->clk = clk_get(&pdev->dev, NULL);
+ if (IS_ERR(keypad->clk)) {
+ dev_err(&pdev->dev, "failed to get clk\n");
+ error = PTR_ERR(keypad->clk);
+ goto err_iounmap;
+ }
+
+ input->id.bustype = BUS_HOST;
+ input->name = "ux500-ske-keypad";
+ input->dev.parent = &pdev->dev;
+
+ input->keycode = keypad->keymap;
+ input->keycodesize = sizeof(keypad->keymap[0]);
+ input->keycodemax = ARRAY_SIZE(keypad->keymap);
+
+ input_set_capability(input, EV_MSC, MSC_SCAN);
+
+ __set_bit(EV_KEY, input->evbit);
+ if (!plat->no_autorepeat)
+ __set_bit(EV_REP, input->evbit);
+
+ matrix_keypad_build_keymap(plat->keymap_data, SKE_KEYPAD_ROW_SHIFT,
+ input->keycode, input->keybit);
+
+ clk_enable(keypad->clk);
+
+ /* go through board initialization helpers */
+ if (keypad->board->init)
+ keypad->board->init();
+
+ error = ske_keypad_chip_init(keypad);
+ if (error) {
+ dev_err(&pdev->dev, "unable to init keypad hardware\n");
+ goto err_clk_disable;
+ }
+
+ error = request_threaded_irq(keypad->irq, NULL, ske_keypad_irq,
+ IRQF_ONESHOT, "ske-keypad", keypad);
+ if (error) {
+ dev_err(&pdev->dev, "allocate irq %d failed\n", keypad->irq);
+ goto err_clk_disable;
+ }
+
+ error = input_register_device(input);
+ if (error) {
+ dev_err(&pdev->dev,
+ "unable to register input device: %d\n", error);
+ goto err_free_irq;
+ }
+
+ if (plat->wakeup_enable)
+ device_init_wakeup(&pdev->dev, true);
+
+ platform_set_drvdata(pdev, keypad);
+
+ return 0;
+
+err_free_irq:
+ free_irq(keypad->irq, keypad);
+err_clk_disable:
+ clk_disable(keypad->clk);
+ clk_put(keypad->clk);
+err_iounmap:
+ iounmap(keypad->reg_base);
+err_free_mem_region:
+ release_mem_region(res->start, resource_size(res));
+err_free_mem:
+ input_free_device(input);
+ kfree(keypad);
+ return error;
+}
+
+static int __devexit ske_keypad_remove(struct platform_device *pdev)
+{
+ struct ske_keypad *keypad = platform_get_drvdata(pdev);
+ struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+
+ free_irq(keypad->irq, keypad);
+
+ input_unregister_device(keypad->input);
+
+ clk_disable(keypad->clk);
+ clk_put(keypad->clk);
+
+ if (keypad->board->exit)
+ keypad->board->exit();
+
+ iounmap(keypad->reg_base);
+ release_mem_region(res->start, resource_size(res));
+ kfree(keypad);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int ske_keypad_suspend(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct ske_keypad *keypad = platform_get_drvdata(pdev);
+ int irq = platform_get_irq(pdev, 0);
+
+ if (device_may_wakeup(dev))
+ enable_irq_wake(irq);
+ else
+ ske_keypad_set_bits(keypad, SKE_IMSC, ~SKE_KPIMA, 0x0);
+
+ return 0;
+}
+
+static int ske_keypad_resume(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct ske_keypad *keypad = platform_get_drvdata(pdev);
+ int irq = platform_get_irq(pdev, 0);
+
+ if (device_may_wakeup(dev))
+ disable_irq_wake(irq);
+ else
+ ske_keypad_set_bits(keypad, SKE_IMSC, 0x0, SKE_KPIMA);
+
+ return 0;
+}
+
+static const struct dev_pm_ops ske_keypad_dev_pm_ops = {
+ .suspend = ske_keypad_suspend,
+ .resume = ske_keypad_resume,
+};
+#endif
+
+struct platform_driver ske_keypad_driver = {
+ .driver = {
+ .name = "nmk-ske-keypad",
+ .owner = THIS_MODULE,
+#ifdef CONFIG_PM
+ .pm = &ske_keypad_dev_pm_ops,
+#endif
+ },
+ .probe = ske_keypad_probe,
+ .remove = __devexit_p(ske_keypad_remove),
+};
+
+static int __init ske_keypad_init(void)
+{
+ return platform_driver_probe(&ske_keypad_driver, ske_keypad_probe);
+}
+module_init(ske_keypad_init);
+
+static void __exit ske_keypad_exit(void)
+{
+ platform_driver_unregister(&ske_keypad_driver);
+}
+module_exit(ske_keypad_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Naveen Kumar <naveen.gaddipati@stericsson.com> / Sundar Iyer <sundar.iyer@stericsson.com>");
+MODULE_DESCRIPTION("Nomadik Scroll-Key-Encoder Keypad Driver");
+MODULE_ALIAS("platform:nomadik-ske-keypad");
diff --git a/drivers/input/keyboard/omap4-keypad.c b/drivers/input/keyboard/omap4-keypad.c
new file mode 100644
index 000000000000..45bd0977d006
--- /dev/null
+++ b/drivers/input/keyboard/omap4-keypad.c
@@ -0,0 +1,318 @@
+/*
+ * OMAP4 Keypad Driver
+ *
+ * Copyright (C) 2010 Texas Instruments
+ *
+ * Author: Abraham Arce <x0066660@ti.com>
+ * Initial Code: Syed Rafiuddin <rafiuddin.syed@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+#include <linux/errno.h>
+#include <linux/io.h>
+#include <linux/input.h>
+#include <linux/slab.h>
+
+#include <plat/omap4-keypad.h>
+
+/* OMAP4 registers */
+#define OMAP4_KBD_REVISION 0x00
+#define OMAP4_KBD_SYSCONFIG 0x10
+#define OMAP4_KBD_SYSSTATUS 0x14
+#define OMAP4_KBD_IRQSTATUS 0x18
+#define OMAP4_KBD_IRQENABLE 0x1C
+#define OMAP4_KBD_WAKEUPENABLE 0x20
+#define OMAP4_KBD_PENDING 0x24
+#define OMAP4_KBD_CTRL 0x28
+#define OMAP4_KBD_DEBOUNCINGTIME 0x2C
+#define OMAP4_KBD_LONGKEYTIME 0x30
+#define OMAP4_KBD_TIMEOUT 0x34
+#define OMAP4_KBD_STATEMACHINE 0x38
+#define OMAP4_KBD_ROWINPUTS 0x3C
+#define OMAP4_KBD_COLUMNOUTPUTS 0x40
+#define OMAP4_KBD_FULLCODE31_0 0x44
+#define OMAP4_KBD_FULLCODE63_32 0x48
+
+/* OMAP4 bit definitions */
+#define OMAP4_DEF_IRQENABLE_EVENTEN (1 << 0)
+#define OMAP4_DEF_IRQENABLE_LONGKEY (1 << 1)
+#define OMAP4_DEF_IRQENABLE_TIMEOUTEN (1 << 2)
+#define OMAP4_DEF_WUP_EVENT_ENA (1 << 0)
+#define OMAP4_DEF_WUP_LONG_KEY_ENA (1 << 1)
+#define OMAP4_DEF_CTRL_NOSOFTMODE (1 << 1)
+#define OMAP4_DEF_CTRLPTVVALUE (1 << 2)
+#define OMAP4_DEF_CTRLPTV (1 << 1)
+
+/* OMAP4 values */
+#define OMAP4_VAL_IRQDISABLE 0x00
+#define OMAP4_VAL_DEBOUNCINGTIME 0x07
+#define OMAP4_VAL_FUNCTIONALCFG 0x1E
+
+#define OMAP4_MASK_IRQSTATUSDISABLE 0xFFFF
+
+struct omap4_keypad {
+ struct input_dev *input;
+
+ void __iomem *base;
+ int irq;
+
+ unsigned int rows;
+ unsigned int cols;
+ unsigned int row_shift;
+ unsigned char key_state[8];
+ unsigned short keymap[];
+};
+
+static void __devinit omap4_keypad_config(struct omap4_keypad *keypad_data)
+{
+ __raw_writel(OMAP4_VAL_FUNCTIONALCFG,
+ keypad_data->base + OMAP4_KBD_CTRL);
+ __raw_writel(OMAP4_VAL_DEBOUNCINGTIME,
+ keypad_data->base + OMAP4_KBD_DEBOUNCINGTIME);
+ __raw_writel(OMAP4_VAL_IRQDISABLE,
+ keypad_data->base + OMAP4_KBD_IRQSTATUS);
+ __raw_writel(OMAP4_DEF_IRQENABLE_EVENTEN | OMAP4_DEF_IRQENABLE_LONGKEY,
+ keypad_data->base + OMAP4_KBD_IRQENABLE);
+ __raw_writel(OMAP4_DEF_WUP_EVENT_ENA | OMAP4_DEF_WUP_LONG_KEY_ENA,
+ keypad_data->base + OMAP4_KBD_WAKEUPENABLE);
+}
+
+/* Interrupt handler */
+static irqreturn_t omap4_keypad_interrupt(int irq, void *dev_id)
+{
+ struct omap4_keypad *keypad_data = dev_id;
+ struct input_dev *input_dev = keypad_data->input;
+ unsigned char key_state[ARRAY_SIZE(keypad_data->key_state)];
+ unsigned int col, row, code, changed;
+ u32 *new_state = (u32 *) key_state;
+
+ /* Disable interrupts */
+ __raw_writel(OMAP4_VAL_IRQDISABLE,
+ keypad_data->base + OMAP4_KBD_IRQENABLE);
+
+ *new_state = __raw_readl(keypad_data->base + OMAP4_KBD_FULLCODE31_0);
+ *(new_state + 1) = __raw_readl(keypad_data->base
+ + OMAP4_KBD_FULLCODE63_32);
+
+ for (row = 0; row < keypad_data->rows; row++) {
+ changed = key_state[row] ^ keypad_data->key_state[row];
+ if (!changed)
+ continue;
+
+ for (col = 0; col < keypad_data->cols; col++) {
+ if (changed & (1 << col)) {
+ code = MATRIX_SCAN_CODE(row, col,
+ keypad_data->row_shift);
+ input_event(input_dev, EV_MSC, MSC_SCAN, code);
+ input_report_key(input_dev,
+ keypad_data->keymap[code],
+ key_state[row] & (1 << col));
+ }
+ }
+ }
+
+ input_sync(input_dev);
+
+ memcpy(keypad_data->key_state, key_state,
+ sizeof(keypad_data->key_state));
+
+ /* clear pending interrupts */
+ __raw_writel(__raw_readl(keypad_data->base + OMAP4_KBD_IRQSTATUS),
+ keypad_data->base + OMAP4_KBD_IRQSTATUS);
+
+ /* enable interrupts */
+ __raw_writel(OMAP4_DEF_IRQENABLE_EVENTEN | OMAP4_DEF_IRQENABLE_LONGKEY,
+ keypad_data->base + OMAP4_KBD_IRQENABLE);
+
+ return IRQ_HANDLED;
+}
+
+static int __devinit omap4_keypad_probe(struct platform_device *pdev)
+{
+ const struct omap4_keypad_platform_data *pdata;
+ struct omap4_keypad *keypad_data;
+ struct input_dev *input_dev;
+ struct resource *res;
+ resource_size_t size;
+ unsigned int row_shift, max_keys;
+ int irq;
+ int error;
+
+ /* platform data */
+ pdata = pdev->dev.platform_data;
+ if (!pdata) {
+ dev_err(&pdev->dev, "no platform data defined\n");
+ return -EINVAL;
+ }
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ dev_err(&pdev->dev, "no base address specified\n");
+ return -EINVAL;
+ }
+
+ irq = platform_get_irq(pdev, 0);
+ if (!irq) {
+ dev_err(&pdev->dev, "no keyboard irq assigned\n");
+ return -EINVAL;
+ }
+
+ if (!pdata->keymap_data) {
+ dev_err(&pdev->dev, "no keymap data defined\n");
+ return -EINVAL;
+ }
+
+ row_shift = get_count_order(pdata->cols);
+ max_keys = pdata->rows << row_shift;
+
+ keypad_data = kzalloc(sizeof(struct omap4_keypad) +
+ max_keys * sizeof(keypad_data->keymap[0]),
+ GFP_KERNEL);
+ if (!keypad_data) {
+ dev_err(&pdev->dev, "keypad_data memory allocation failed\n");
+ return -ENOMEM;
+ }
+
+ size = resource_size(res);
+
+ res = request_mem_region(res->start, size, pdev->name);
+ if (!res) {
+ dev_err(&pdev->dev, "can't request mem region\n");
+ error = -EBUSY;
+ goto err_free_keypad;
+ }
+
+ keypad_data->base = ioremap(res->start, resource_size(res));
+ if (!keypad_data->base) {
+ dev_err(&pdev->dev, "can't ioremap mem resource\n");
+ error = -ENOMEM;
+ goto err_release_mem;
+ }
+
+ keypad_data->irq = irq;
+ keypad_data->row_shift = row_shift;
+ keypad_data->rows = pdata->rows;
+ keypad_data->cols = pdata->cols;
+
+ /* input device allocation */
+ keypad_data->input = input_dev = input_allocate_device();
+ if (!input_dev) {
+ error = -ENOMEM;
+ goto err_unmap;
+ }
+
+ input_dev->name = pdev->name;
+ input_dev->dev.parent = &pdev->dev;
+ input_dev->id.bustype = BUS_HOST;
+ input_dev->id.vendor = 0x0001;
+ input_dev->id.product = 0x0001;
+ input_dev->id.version = 0x0001;
+
+ input_dev->keycode = keypad_data->keymap;
+ input_dev->keycodesize = sizeof(keypad_data->keymap[0]);
+ input_dev->keycodemax = max_keys;
+
+ __set_bit(EV_KEY, input_dev->evbit);
+ __set_bit(EV_REP, input_dev->evbit);
+
+ input_set_capability(input_dev, EV_MSC, MSC_SCAN);
+
+ input_set_drvdata(input_dev, keypad_data);
+
+ matrix_keypad_build_keymap(pdata->keymap_data, row_shift,
+ input_dev->keycode, input_dev->keybit);
+
+ omap4_keypad_config(keypad_data);
+
+ error = request_irq(keypad_data->irq, omap4_keypad_interrupt,
+ IRQF_TRIGGER_RISING,
+ "omap4-keypad", keypad_data);
+ if (error) {
+ dev_err(&pdev->dev, "failed to register interrupt\n");
+ goto err_free_input;
+ }
+
+ error = input_register_device(keypad_data->input);
+ if (error < 0) {
+ dev_err(&pdev->dev, "failed to register input device\n");
+ goto err_free_irq;
+ }
+
+
+ platform_set_drvdata(pdev, keypad_data);
+ return 0;
+
+err_free_irq:
+ free_irq(keypad_data->irq, keypad_data);
+err_free_input:
+ input_free_device(input_dev);
+err_unmap:
+ iounmap(keypad_data->base);
+err_release_mem:
+ release_mem_region(res->start, size);
+err_free_keypad:
+ kfree(keypad_data);
+ return error;
+}
+
+static int __devexit omap4_keypad_remove(struct platform_device *pdev)
+{
+ struct omap4_keypad *keypad_data = platform_get_drvdata(pdev);
+ struct resource *res;
+
+ free_irq(keypad_data->irq, keypad_data);
+ input_unregister_device(keypad_data->input);
+
+ iounmap(keypad_data->base);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ release_mem_region(res->start, resource_size(res));
+
+ kfree(keypad_data);
+ platform_set_drvdata(pdev, NULL);
+
+ return 0;
+}
+
+static struct platform_driver omap4_keypad_driver = {
+ .probe = omap4_keypad_probe,
+ .remove = __devexit_p(omap4_keypad_remove),
+ .driver = {
+ .name = "omap4-keypad",
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init omap4_keypad_init(void)
+{
+ return platform_driver_register(&omap4_keypad_driver);
+}
+module_init(omap4_keypad_init);
+
+static void __exit omap4_keypad_exit(void)
+{
+ platform_driver_unregister(&omap4_keypad_driver);
+}
+module_exit(omap4_keypad_exit);
+
+MODULE_AUTHOR("Texas Instruments");
+MODULE_DESCRIPTION("OMAP4 Keypad Driver");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:omap4-keypad");
diff --git a/drivers/input/keyboard/tnetv107x-keypad.c b/drivers/input/keyboard/tnetv107x-keypad.c
new file mode 100644
index 000000000000..b4a81ebfab92
--- /dev/null
+++ b/drivers/input/keyboard/tnetv107x-keypad.c
@@ -0,0 +1,340 @@
+/*
+ * Texas Instruments TNETV107X Keypad Driver
+ *
+ * Copyright (C) 2010 Texas Instruments
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation version 2.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/input.h>
+#include <linux/platform_device.h>
+#include <linux/interrupt.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/clk.h>
+#include <linux/input/matrix_keypad.h>
+
+#define BITS(x) (BIT(x) - 1)
+
+#define KEYPAD_ROWS 9
+#define KEYPAD_COLS 9
+
+#define DEBOUNCE_MIN 0x400ul
+#define DEBOUNCE_MAX 0x3ffffffful
+
+struct keypad_regs {
+ u32 rev;
+ u32 mode;
+ u32 mask;
+ u32 pol;
+ u32 dclock;
+ u32 rclock;
+ u32 stable_cnt;
+ u32 in_en;
+ u32 out;
+ u32 out_en;
+ u32 in;
+ u32 lock;
+ u32 pres[3];
+};
+
+#define keypad_read(kp, reg) __raw_readl(&(kp)->regs->reg)
+#define keypad_write(kp, reg, val) __raw_writel(val, &(kp)->regs->reg)
+
+struct keypad_data {
+ struct input_dev *input_dev;
+ struct resource *res;
+ struct keypad_regs __iomem *regs;
+ struct clk *clk;
+ struct device *dev;
+ spinlock_t lock;
+ u32 irq_press;
+ u32 irq_release;
+ int rows, cols, row_shift;
+ int debounce_ms, active_low;
+ u32 prev_keys[3];
+ unsigned short keycodes[];
+};
+
+static irqreturn_t keypad_irq(int irq, void *data)
+{
+ struct keypad_data *kp = data;
+ int i, bit, val, row, col, code;
+ unsigned long flags;
+ u32 curr_keys[3];
+ u32 change;
+
+ spin_lock_irqsave(&kp->lock, flags);
+
+ memset(curr_keys, 0, sizeof(curr_keys));
+ if (irq == kp->irq_press)
+ for (i = 0; i < 3; i++)
+ curr_keys[i] = keypad_read(kp, pres[i]);
+
+ for (i = 0; i < 3; i++) {
+ change = curr_keys[i] ^ kp->prev_keys[i];
+
+ while (change) {
+ bit = fls(change) - 1;
+ change ^= BIT(bit);
+ val = curr_keys[i] & BIT(bit);
+ bit += i * 32;
+ row = bit / KEYPAD_COLS;
+ col = bit % KEYPAD_COLS;
+
+ code = MATRIX_SCAN_CODE(row, col, kp->row_shift);
+ input_event(kp->input_dev, EV_MSC, MSC_SCAN, code);
+ input_report_key(kp->input_dev, kp->keycodes[code],
+ val);
+ }
+ }
+ input_sync(kp->input_dev);
+ memcpy(kp->prev_keys, curr_keys, sizeof(curr_keys));
+
+ if (irq == kp->irq_press)
+ keypad_write(kp, lock, 0); /* Allow hardware updates */
+
+ spin_unlock_irqrestore(&kp->lock, flags);
+
+ return IRQ_HANDLED;
+}
+
+static int keypad_start(struct input_dev *dev)
+{
+ struct keypad_data *kp = input_get_drvdata(dev);
+ unsigned long mask, debounce, clk_rate_khz;
+ unsigned long flags;
+
+ clk_enable(kp->clk);
+ clk_rate_khz = clk_get_rate(kp->clk) / 1000;
+
+ spin_lock_irqsave(&kp->lock, flags);
+
+ /* Initialize device registers */
+ keypad_write(kp, mode, 0);
+
+ mask = BITS(kp->rows) << KEYPAD_COLS;
+ mask |= BITS(kp->cols);
+ keypad_write(kp, mask, ~mask);
+
+ keypad_write(kp, pol, kp->active_low ? 0 : 0x3ffff);
+ keypad_write(kp, stable_cnt, 3);
+
+ debounce = kp->debounce_ms * clk_rate_khz;
+ debounce = clamp(debounce, DEBOUNCE_MIN, DEBOUNCE_MAX);
+ keypad_write(kp, dclock, debounce);
+ keypad_write(kp, rclock, 4 * debounce);
+
+ keypad_write(kp, in_en, 1);
+
+ spin_unlock_irqrestore(&kp->lock, flags);
+
+ return 0;
+}
+
+static void keypad_stop(struct input_dev *dev)
+{
+ struct keypad_data *kp = input_get_drvdata(dev);
+
+ synchronize_irq(kp->irq_press);
+ synchronize_irq(kp->irq_release);
+ clk_disable(kp->clk);
+}
+
+static int __devinit keypad_probe(struct platform_device *pdev)
+{
+ const struct matrix_keypad_platform_data *pdata;
+ const struct matrix_keymap_data *keymap_data;
+ struct device *dev = &pdev->dev;
+ struct keypad_data *kp;
+ int error = 0, sz, row_shift;
+ u32 rev = 0;
+
+ pdata = pdev->dev.platform_data;
+ if (!pdata) {
+ dev_err(dev, "cannot find device data\n");
+ return -EINVAL;
+ }
+
+ keymap_data = pdata->keymap_data;
+ if (!keymap_data) {
+ dev_err(dev, "cannot find keymap data\n");
+ return -EINVAL;
+ }
+
+ row_shift = get_count_order(pdata->num_col_gpios);
+ sz = offsetof(struct keypad_data, keycodes);
+ sz += (pdata->num_row_gpios << row_shift) * sizeof(kp->keycodes[0]);
+ kp = kzalloc(sz, GFP_KERNEL);
+ if (!kp) {
+ dev_err(dev, "cannot allocate device info\n");
+ return -ENOMEM;
+ }
+
+ kp->dev = dev;
+ kp->rows = pdata->num_row_gpios;
+ kp->cols = pdata->num_col_gpios;
+ kp->row_shift = row_shift;
+ platform_set_drvdata(pdev, kp);
+ spin_lock_init(&kp->lock);
+
+ kp->irq_press = platform_get_irq_byname(pdev, "press");
+ kp->irq_release = platform_get_irq_byname(pdev, "release");
+ if (kp->irq_press < 0 || kp->irq_release < 0) {
+ dev_err(dev, "cannot determine device interrupts\n");
+ error = -ENODEV;
+ goto error_res;
+ }
+
+ kp->res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!kp->res) {
+ dev_err(dev, "cannot determine register area\n");
+ error = -ENODEV;
+ goto error_res;
+ }
+
+ if (!request_mem_region(kp->res->start, resource_size(kp->res),
+ pdev->name)) {
+ dev_err(dev, "cannot claim register memory\n");
+ kp->res = NULL;
+ error = -EINVAL;
+ goto error_res;
+ }
+
+ kp->regs = ioremap(kp->res->start, resource_size(kp->res));
+ if (!kp->regs) {
+ dev_err(dev, "cannot map register memory\n");
+ error = -ENOMEM;
+ goto error_map;
+ }
+
+ kp->clk = clk_get(dev, NULL);
+ if (!kp->clk) {
+ dev_err(dev, "cannot claim device clock\n");
+ error = -EINVAL;
+ goto error_clk;
+ }
+
+ error = request_threaded_irq(kp->irq_press, NULL, keypad_irq, 0,
+ dev_name(dev), kp);
+ if (error < 0) {
+ dev_err(kp->dev, "Could not allocate keypad press key irq\n");
+ goto error_irq_press;
+ }
+
+ error = request_threaded_irq(kp->irq_release, NULL, keypad_irq, 0,
+ dev_name(dev), kp);
+ if (error < 0) {
+ dev_err(kp->dev, "Could not allocate keypad release key irq\n");
+ goto error_irq_release;
+ }
+
+ kp->input_dev = input_allocate_device();
+ if (!kp->input_dev) {
+ dev_err(dev, "cannot allocate input device\n");
+ error = -ENOMEM;
+ goto error_input;
+ }
+ input_set_drvdata(kp->input_dev, kp);
+
+ kp->input_dev->name = pdev->name;
+ kp->input_dev->dev.parent = &pdev->dev;
+ kp->input_dev->open = keypad_start;
+ kp->input_dev->close = keypad_stop;
+ kp->input_dev->evbit[0] = BIT_MASK(EV_KEY);
+ if (!pdata->no_autorepeat)
+ kp->input_dev->evbit[0] |= BIT_MASK(EV_REP);
+
+ clk_enable(kp->clk);
+ rev = keypad_read(kp, rev);
+ kp->input_dev->id.bustype = BUS_HOST;
+ kp->input_dev->id.product = ((rev >> 8) & 0x07);
+ kp->input_dev->id.version = ((rev >> 16) & 0xfff);
+ clk_disable(kp->clk);
+
+ kp->input_dev->keycode = kp->keycodes;
+ kp->input_dev->keycodesize = sizeof(kp->keycodes[0]);
+ kp->input_dev->keycodemax = kp->rows << kp->row_shift;
+
+ matrix_keypad_build_keymap(keymap_data, kp->row_shift, kp->keycodes,
+ kp->input_dev->keybit);
+
+ input_set_capability(kp->input_dev, EV_MSC, MSC_SCAN);
+
+ error = input_register_device(kp->input_dev);
+ if (error < 0) {
+ dev_err(dev, "Could not register input device\n");
+ goto error_reg;
+ }
+
+ return 0;
+
+
+error_reg:
+ input_free_device(kp->input_dev);
+error_input:
+ free_irq(kp->irq_release, kp);
+error_irq_release:
+ free_irq(kp->irq_press, kp);
+error_irq_press:
+ clk_put(kp->clk);
+error_clk:
+ iounmap(kp->regs);
+error_map:
+ release_mem_region(kp->res->start, resource_size(kp->res));
+error_res:
+ platform_set_drvdata(pdev, NULL);
+ kfree(kp);
+ return error;
+}
+
+static int __devexit keypad_remove(struct platform_device *pdev)
+{
+ struct keypad_data *kp = platform_get_drvdata(pdev);
+
+ free_irq(kp->irq_press, kp);
+ free_irq(kp->irq_release, kp);
+ input_unregister_device(kp->input_dev);
+ clk_put(kp->clk);
+ iounmap(kp->regs);
+ release_mem_region(kp->res->start, resource_size(kp->res));
+ platform_set_drvdata(pdev, NULL);
+ kfree(kp);
+
+ return 0;
+}
+
+static struct platform_driver keypad_driver = {
+ .probe = keypad_probe,
+ .remove = __devexit_p(keypad_remove),
+ .driver.name = "tnetv107x-keypad",
+ .driver.owner = THIS_MODULE,
+};
+
+static int __init keypad_init(void)
+{
+ return platform_driver_register(&keypad_driver);
+}
+
+static void __exit keypad_exit(void)
+{
+ platform_driver_unregister(&keypad_driver);
+}
+
+module_init(keypad_init);
+module_exit(keypad_exit);
+
+MODULE_AUTHOR("Cyril Chemparathy");
+MODULE_DESCRIPTION("TNETV107X Keypad Driver");
+MODULE_ALIAS("platform: tnetv107x-keypad");
+MODULE_LICENSE("GPL");
diff --git a/drivers/input/keyboard/twl4030_keypad.c b/drivers/input/keyboard/twl4030_keypad.c
index fb16b5e5ea13..09bef79d9da1 100644
--- a/drivers/input/keyboard/twl4030_keypad.c
+++ b/drivers/input/keyboard/twl4030_keypad.c
@@ -406,23 +406,22 @@ static int __devinit twl4030_kp_probe(struct platform_device *pdev)
if (error) {
dev_info(kp->dbg_dev, "request_irq failed for irq no=%d\n",
kp->irq);
- goto err3;
+ goto err2;
}
/* Enable KP and TO interrupts now. */
reg = (u8) ~(KEYP_IMR1_KP | KEYP_IMR1_TO);
if (twl4030_kpwrite_u8(kp, reg, KEYP_IMR1)) {
error = -EIO;
- goto err4;
+ goto err3;
}
platform_set_drvdata(pdev, kp);
return 0;
-err4:
+err3:
/* mask all events - we don't care about the result */
(void) twl4030_kpwrite_u8(kp, 0xff, KEYP_IMR1);
-err3:
free_irq(kp->irq, NULL);
err2:
input_unregister_device(input);
diff --git a/drivers/input/misc/Kconfig b/drivers/input/misc/Kconfig
index b49e23379723..b99b8cbde02f 100644
--- a/drivers/input/misc/Kconfig
+++ b/drivers/input/misc/Kconfig
@@ -22,6 +22,16 @@ config INPUT_88PM860X_ONKEY
To compile this driver as a module, choose M here: the module
will be called 88pm860x_onkey.
+config INPUT_AB8500_PONKEY
+ tristate "AB8500 Pon (PowerOn) Key"
+ depends on AB8500_CORE
+ help
+ Say Y here to use the PowerOn Key for ST-Ericsson's AB8500
+ Mix-Sig PMIC.
+
+ To compile this driver as a module, choose M here: the module
+ will be called ab8500-ponkey.
+
config INPUT_AD714X
tristate "Analog Devices AD714x Capacitance Touch Sensor"
help
diff --git a/drivers/input/misc/Makefile b/drivers/input/misc/Makefile
index 19ccca78fa76..1fe1f6c8b737 100644
--- a/drivers/input/misc/Makefile
+++ b/drivers/input/misc/Makefile
@@ -5,6 +5,7 @@
# Each configuration option enables a list of files.
obj-$(CONFIG_INPUT_88PM860X_ONKEY) += 88pm860x_onkey.o
+obj-$(CONFIG_INPUT_AB8500_PONKEY) += ab8500-ponkey.o
obj-$(CONFIG_INPUT_AD714X) += ad714x.o
obj-$(CONFIG_INPUT_AD714X_I2C) += ad714x-i2c.o
obj-$(CONFIG_INPUT_AD714X_SPI) += ad714x-spi.o
diff --git a/drivers/input/misc/ab8500-ponkey.c b/drivers/input/misc/ab8500-ponkey.c
new file mode 100644
index 000000000000..3d3288a78fdc
--- /dev/null
+++ b/drivers/input/misc/ab8500-ponkey.c
@@ -0,0 +1,157 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ *
+ * License Terms: GNU General Public License v2
+ * Author: Sundar Iyer <sundar.iyer@stericsson.com> for ST-Ericsson
+ *
+ * AB8500 Power-On Key handler
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/input.h>
+#include <linux/interrupt.h>
+#include <linux/mfd/ab8500.h>
+#include <linux/slab.h>
+
+/**
+ * struct ab8500_ponkey - ab8500 ponkey information
+ * @input_dev: pointer to input device
+ * @ab8500: ab8500 parent
+ * @irq_dbf: irq number for falling transition
+ * @irq_dbr: irq number for rising transition
+ */
+struct ab8500_ponkey {
+ struct input_dev *idev;
+ struct ab8500 *ab8500;
+ int irq_dbf;
+ int irq_dbr;
+};
+
+/* AB8500 gives us an interrupt when ONKEY is held */
+static irqreturn_t ab8500_ponkey_handler(int irq, void *data)
+{
+ struct ab8500_ponkey *ponkey = data;
+
+ if (irq == ponkey->irq_dbf)
+ input_report_key(ponkey->idev, KEY_POWER, true);
+ else if (irq == ponkey->irq_dbr)
+ input_report_key(ponkey->idev, KEY_POWER, false);
+
+ input_sync(ponkey->idev);
+
+ return IRQ_HANDLED;
+}
+
+static int __devinit ab8500_ponkey_probe(struct platform_device *pdev)
+{
+ struct ab8500 *ab8500 = dev_get_drvdata(pdev->dev.parent);
+ struct ab8500_ponkey *ponkey;
+ struct input_dev *input;
+ int irq_dbf, irq_dbr;
+ int error;
+
+ irq_dbf = platform_get_irq_byname(pdev, "ONKEY_DBF");
+ if (irq_dbf < 0) {
+ dev_err(&pdev->dev, "No IRQ for ONKEY_DBF, error=%d\n", irq_dbf);
+ return irq_dbf;
+ }
+
+ irq_dbr = platform_get_irq_byname(pdev, "ONKEY_DBR");
+ if (irq_dbr < 0) {
+ dev_err(&pdev->dev, "No IRQ for ONKEY_DBR, error=%d\n", irq_dbr);
+ return irq_dbr;
+ }
+
+ ponkey = kzalloc(sizeof(struct ab8500_ponkey), GFP_KERNEL);
+ input = input_allocate_device();
+ if (!ponkey || !input) {
+ error = -ENOMEM;
+ goto err_free_mem;
+ }
+
+ ponkey->idev = input;
+ ponkey->ab8500 = ab8500;
+ ponkey->irq_dbf = irq_dbf;
+ ponkey->irq_dbr = irq_dbr;
+
+ input->name = "AB8500 POn(PowerOn) Key";
+ input->dev.parent = &pdev->dev;
+
+ input_set_capability(input, EV_KEY, KEY_POWER);
+
+ error = request_any_context_irq(ponkey->irq_dbf, ab8500_ponkey_handler,
+ 0, "ab8500-ponkey-dbf", ponkey);
+ if (error < 0) {
+ dev_err(ab8500->dev, "Failed to request dbf IRQ#%d: %d\n",
+ ponkey->irq_dbf, error);
+ goto err_free_mem;
+ }
+
+ error = request_any_context_irq(ponkey->irq_dbr, ab8500_ponkey_handler,
+ 0, "ab8500-ponkey-dbr", ponkey);
+ if (error < 0) {
+ dev_err(ab8500->dev, "Failed to request dbr IRQ#%d: %d\n",
+ ponkey->irq_dbr, error);
+ goto err_free_dbf_irq;
+ }
+
+ error = input_register_device(ponkey->idev);
+ if (error) {
+ dev_err(ab8500->dev, "Can't register input device: %d\n", error);
+ goto err_free_dbr_irq;
+ }
+
+ platform_set_drvdata(pdev, ponkey);
+ return 0;
+
+err_free_dbr_irq:
+ free_irq(ponkey->irq_dbr, ponkey);
+err_free_dbf_irq:
+ free_irq(ponkey->irq_dbf, ponkey);
+err_free_mem:
+ input_free_device(input);
+ kfree(ponkey);
+
+ return error;
+}
+
+static int __devexit ab8500_ponkey_remove(struct platform_device *pdev)
+{
+ struct ab8500_ponkey *ponkey = platform_get_drvdata(pdev);
+
+ free_irq(ponkey->irq_dbf, ponkey);
+ free_irq(ponkey->irq_dbr, ponkey);
+ input_unregister_device(ponkey->idev);
+ kfree(ponkey);
+
+ platform_set_drvdata(pdev, NULL);
+
+ return 0;
+}
+
+static struct platform_driver ab8500_ponkey_driver = {
+ .driver = {
+ .name = "ab8500-poweron-key",
+ .owner = THIS_MODULE,
+ },
+ .probe = ab8500_ponkey_probe,
+ .remove = __devexit_p(ab8500_ponkey_remove),
+};
+
+static int __init ab8500_ponkey_init(void)
+{
+ return platform_driver_register(&ab8500_ponkey_driver);
+}
+module_init(ab8500_ponkey_init);
+
+static void __exit ab8500_ponkey_exit(void)
+{
+ platform_driver_unregister(&ab8500_ponkey_driver);
+}
+module_exit(ab8500_ponkey_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Sundar Iyer <sundar.iyer@stericsson.com>");
+MODULE_DESCRIPTION("ST-Ericsson AB8500 Power-ON(Pon) Key driver");
diff --git a/drivers/input/misc/ati_remote2.c b/drivers/input/misc/ati_remote2.c
index 23257652b8e8..0b0e9be63542 100644
--- a/drivers/input/misc/ati_remote2.c
+++ b/drivers/input/misc/ati_remote2.c
@@ -483,51 +483,88 @@ static void ati_remote2_complete_key(struct urb *urb)
}
static int ati_remote2_getkeycode(struct input_dev *idev,
- unsigned int scancode, unsigned int *keycode)
+ struct input_keymap_entry *ke)
{
struct ati_remote2 *ar2 = input_get_drvdata(idev);
unsigned int mode;
- int index;
+ int offset;
+ unsigned int index;
+ unsigned int scancode;
+
+ if (ke->flags & INPUT_KEYMAP_BY_INDEX) {
+ index = ke->index;
+ if (index >= ATI_REMOTE2_MODES *
+ ARRAY_SIZE(ati_remote2_key_table))
+ return -EINVAL;
+
+ mode = ke->index / ARRAY_SIZE(ati_remote2_key_table);
+ offset = ke->index % ARRAY_SIZE(ati_remote2_key_table);
+ scancode = (mode << 8) + ati_remote2_key_table[offset].hw_code;
+ } else {
+ if (input_scancode_to_scalar(ke, &scancode))
+ return -EINVAL;
+
+ mode = scancode >> 8;
+ if (mode > ATI_REMOTE2_PC)
+ return -EINVAL;
+
+ offset = ati_remote2_lookup(scancode & 0xff);
+ if (offset < 0)
+ return -EINVAL;
+
+ index = mode * ARRAY_SIZE(ati_remote2_key_table) + offset;
+ }
- mode = scancode >> 8;
- if (mode > ATI_REMOTE2_PC || !((1 << mode) & ar2->mode_mask))
- return -EINVAL;
+ ke->keycode = ar2->keycode[mode][offset];
+ ke->len = sizeof(scancode);
+ memcpy(&ke->scancode, &scancode, sizeof(scancode));
+ ke->index = index;
- index = ati_remote2_lookup(scancode & 0xFF);
- if (index < 0)
- return -EINVAL;
-
- *keycode = ar2->keycode[mode][index];
return 0;
}
static int ati_remote2_setkeycode(struct input_dev *idev,
- unsigned int scancode, unsigned int keycode)
+ const struct input_keymap_entry *ke,
+ unsigned int *old_keycode)
{
struct ati_remote2 *ar2 = input_get_drvdata(idev);
- unsigned int mode, old_keycode;
- int index;
-
- mode = scancode >> 8;
- if (mode > ATI_REMOTE2_PC || !((1 << mode) & ar2->mode_mask))
- return -EINVAL;
-
- index = ati_remote2_lookup(scancode & 0xFF);
- if (index < 0)
- return -EINVAL;
+ unsigned int mode;
+ int offset;
+ unsigned int index;
+ unsigned int scancode;
+
+ if (ke->flags & INPUT_KEYMAP_BY_INDEX) {
+ if (ke->index >= ATI_REMOTE2_MODES *
+ ARRAY_SIZE(ati_remote2_key_table))
+ return -EINVAL;
+
+ mode = ke->index / ARRAY_SIZE(ati_remote2_key_table);
+ offset = ke->index % ARRAY_SIZE(ati_remote2_key_table);
+ } else {
+ if (input_scancode_to_scalar(ke, &scancode))
+ return -EINVAL;
+
+ mode = scancode >> 8;
+ if (mode > ATI_REMOTE2_PC)
+ return -EINVAL;
+
+ offset = ati_remote2_lookup(scancode & 0xff);
+ if (offset < 0)
+ return -EINVAL;
+ }
- old_keycode = ar2->keycode[mode][index];
- ar2->keycode[mode][index] = keycode;
- __set_bit(keycode, idev->keybit);
+ *old_keycode = ar2->keycode[mode][offset];
+ ar2->keycode[mode][offset] = ke->keycode;
+ __set_bit(ke->keycode, idev->keybit);
for (mode = 0; mode < ATI_REMOTE2_MODES; mode++) {
for (index = 0; index < ARRAY_SIZE(ati_remote2_key_table); index++) {
- if (ar2->keycode[mode][index] == old_keycode)
+ if (ar2->keycode[mode][index] == *old_keycode)
return 0;
}
}
- __clear_bit(old_keycode, idev->keybit);
+ __clear_bit(*old_keycode, idev->keybit);
return 0;
}
@@ -575,8 +612,8 @@ static int ati_remote2_input_init(struct ati_remote2 *ar2)
idev->open = ati_remote2_open;
idev->close = ati_remote2_close;
- idev->getkeycode = ati_remote2_getkeycode;
- idev->setkeycode = ati_remote2_setkeycode;
+ idev->getkeycode_new = ati_remote2_getkeycode;
+ idev->setkeycode_new = ati_remote2_setkeycode;
idev->name = ar2->name;
idev->phys = ar2->phys;
diff --git a/drivers/input/misc/max8925_onkey.c b/drivers/input/misc/max8925_onkey.c
index 80af44608018..7de0ded4ccc3 100644
--- a/drivers/input/misc/max8925_onkey.c
+++ b/drivers/input/misc/max8925_onkey.c
@@ -27,27 +27,37 @@
#include <linux/mfd/max8925.h>
#include <linux/slab.h>
+#define SW_INPUT (1 << 7) /* 0/1 -- up/down */
#define HARDRESET_EN (1 << 7)
#define PWREN_EN (1 << 7)
struct max8925_onkey_info {
struct input_dev *idev;
struct i2c_client *i2c;
- int irq;
+ struct device *dev;
+ int irq[2];
};
/*
- * MAX8925 gives us an interrupt when ONKEY is held for 3 seconds.
+ * MAX8925 gives us an interrupt when ONKEY is pressed or released.
* max8925_set_bits() operates I2C bus and may sleep. So implement
* it in thread IRQ handler.
*/
static irqreturn_t max8925_onkey_handler(int irq, void *data)
{
struct max8925_onkey_info *info = data;
-
- input_report_key(info->idev, KEY_POWER, 1);
+ int ret, event;
+
+ ret = max8925_reg_read(info->i2c, MAX8925_ON_OFF_STATUS);
+ if (ret & SW_INPUT)
+ event = 1;
+ else
+ event = 0;
+ input_report_key(info->idev, KEY_POWER, event);
input_sync(info->idev);
+ dev_dbg(info->dev, "onkey event:%d\n", event);
+
/* Enable hardreset to halt if system isn't shutdown on time */
max8925_set_bits(info->i2c, MAX8925_SYSENSEL,
HARDRESET_EN, HARDRESET_EN);
@@ -59,14 +69,42 @@ static int __devinit max8925_onkey_probe(struct platform_device *pdev)
{
struct max8925_chip *chip = dev_get_drvdata(pdev->dev.parent);
struct max8925_onkey_info *info;
- int error;
+ int irq[2], error;
+
+ irq[0] = platform_get_irq(pdev, 0);
+ if (irq[0] < 0) {
+ dev_err(&pdev->dev, "No IRQ resource!\n");
+ return -EINVAL;
+ }
+ irq[1] = platform_get_irq(pdev, 1);
+ if (irq[1] < 0) {
+ dev_err(&pdev->dev, "No IRQ resource!\n");
+ return -EINVAL;
+ }
info = kzalloc(sizeof(struct max8925_onkey_info), GFP_KERNEL);
if (!info)
return -ENOMEM;
info->i2c = chip->i2c;
- info->irq = chip->irq_base + MAX8925_IRQ_GPM_SW_3SEC;
+ info->dev = &pdev->dev;
+ irq[0] += chip->irq_base;
+ irq[1] += chip->irq_base;
+
+ error = request_threaded_irq(irq[0], NULL, max8925_onkey_handler,
+ IRQF_ONESHOT, "onkey-down", info);
+ if (error < 0) {
+ dev_err(chip->dev, "Failed to request IRQ: #%d: %d\n",
+ irq[0], error);
+ goto out;
+ }
+ error = request_threaded_irq(irq[1], NULL, max8925_onkey_handler,
+ IRQF_ONESHOT, "onkey-up", info);
+ if (error < 0) {
+ dev_err(chip->dev, "Failed to request IRQ: #%d: %d\n",
+ irq[1], error);
+ goto out_irq;
+ }
info->idev = input_allocate_device();
if (!info->idev) {
@@ -79,32 +117,29 @@ static int __devinit max8925_onkey_probe(struct platform_device *pdev)
info->idev->phys = "max8925_on/input0";
info->idev->id.bustype = BUS_I2C;
info->idev->dev.parent = &pdev->dev;
+ info->irq[0] = irq[0];
+ info->irq[1] = irq[1];
info->idev->evbit[0] = BIT_MASK(EV_KEY);
info->idev->keybit[BIT_WORD(KEY_POWER)] = BIT_MASK(KEY_POWER);
- error = request_threaded_irq(info->irq, NULL, max8925_onkey_handler,
- IRQF_ONESHOT, "onkey", info);
- if (error < 0) {
- dev_err(chip->dev, "Failed to request IRQ: #%d: %d\n",
- info->irq, error);
- goto out_irq;
- }
error = input_register_device(info->idev);
if (error) {
dev_err(chip->dev, "Can't register input device: %d\n", error);
- goto out;
+ goto out_reg;
}
platform_set_drvdata(pdev, info);
return 0;
-out:
- free_irq(info->irq, info);
-out_irq:
+out_reg:
input_free_device(info->idev);
out_input:
+ free_irq(info->irq[1], info);
+out_irq:
+ free_irq(info->irq[0], info);
+out:
kfree(info);
return error;
}
@@ -113,7 +148,8 @@ static int __devexit max8925_onkey_remove(struct platform_device *pdev)
{
struct max8925_onkey_info *info = platform_get_drvdata(pdev);
- free_irq(info->irq, info);
+ free_irq(info->irq[0], info);
+ free_irq(info->irq[1], info);
input_unregister_device(info->idev);
kfree(info);
diff --git a/drivers/input/misc/pcf8574_keypad.c b/drivers/input/misc/pcf8574_keypad.c
index 4b42ffc0532a..d1583aea1721 100644
--- a/drivers/input/misc/pcf8574_keypad.c
+++ b/drivers/input/misc/pcf8574_keypad.c
@@ -127,14 +127,6 @@ static int __devinit pcf8574_kp_probe(struct i2c_client *client, const struct i2
idev->id.product = 0x0001;
idev->id.version = 0x0100;
- input_set_drvdata(idev, lp);
-
- ret = input_register_device(idev);
- if (ret) {
- dev_err(&client->dev, "input_register_device() failed\n");
- goto fail_register;
- }
-
lp->laststate = read_state(lp);
ret = request_threaded_irq(client->irq, NULL, pcf8574_kp_irq_handler,
@@ -142,16 +134,21 @@ static int __devinit pcf8574_kp_probe(struct i2c_client *client, const struct i2
DRV_NAME, lp);
if (ret) {
dev_err(&client->dev, "IRQ %d is not free\n", client->irq);
- goto fail_irq;
+ goto fail_free_device;
+ }
+
+ ret = input_register_device(idev);
+ if (ret) {
+ dev_err(&client->dev, "input_register_device() failed\n");
+ goto fail_free_irq;
}
i2c_set_clientdata(client, lp);
return 0;
- fail_irq:
- input_unregister_device(idev);
- fail_register:
- input_set_drvdata(idev, NULL);
+ fail_free_irq:
+ free_irq(client->irq, lp);
+ fail_free_device:
input_free_device(idev);
fail_allocate:
kfree(lp);
diff --git a/drivers/input/misc/powermate.c b/drivers/input/misc/powermate.c
index bf170f6b4422..f45947190e4f 100644
--- a/drivers/input/misc/powermate.c
+++ b/drivers/input/misc/powermate.c
@@ -280,7 +280,7 @@ static int powermate_alloc_buffers(struct usb_device *udev, struct powermate_dev
pm->configcr = kmalloc(sizeof(*(pm->configcr)), GFP_KERNEL);
if (!pm->configcr)
- return -1;
+ return -ENOMEM;
return 0;
}
diff --git a/drivers/input/misc/twl4030-vibra.c b/drivers/input/misc/twl4030-vibra.c
index 4f9b2afc24e8..014dd4ad0d4f 100644
--- a/drivers/input/misc/twl4030-vibra.c
+++ b/drivers/input/misc/twl4030-vibra.c
@@ -271,7 +271,7 @@ static struct platform_driver twl4030_vibra_driver = {
.probe = twl4030_vibra_probe,
.remove = __devexit_p(twl4030_vibra_remove),
.driver = {
- .name = "twl4030_codec_vibra",
+ .name = "twl4030-vibra",
.owner = THIS_MODULE,
#ifdef CONFIG_PM
.pm = &twl4030_vibra_pm_ops,
@@ -291,7 +291,7 @@ static void __exit twl4030_vibra_exit(void)
}
module_exit(twl4030_vibra_exit);
-MODULE_ALIAS("platform:twl4030_codec_vibra");
+MODULE_ALIAS("platform:twl4030-vibra");
MODULE_DESCRIPTION("TWL4030 Vibra driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/input/mouse/appletouch.c b/drivers/input/mouse/appletouch.c
index a9cf76831634..b77f9991278e 100644
--- a/drivers/input/mouse/appletouch.c
+++ b/drivers/input/mouse/appletouch.c
@@ -630,7 +630,7 @@ static void atp_complete_geyser_3_4(struct urb *urb)
/* Just update the base values (i.e. touchpad in untouched state) */
if (dev->data[dev->info->datalen - 1] & ATP_STATUS_BASE_UPDATE) {
- dprintk(KERN_DEBUG "appletouch: updated base values\n");
+ dprintk("appletouch: updated base values\n");
memcpy(dev->xy_old, dev->xy_cur, sizeof(dev->xy_old));
goto exit;
diff --git a/drivers/input/mouse/elantech.c b/drivers/input/mouse/elantech.c
index 48311204ba51..04d9bf320a4f 100644
--- a/drivers/input/mouse/elantech.c
+++ b/drivers/input/mouse/elantech.c
@@ -699,7 +699,7 @@ int elantech_init(struct psmouse *psmouse)
psmouse->private = etd = kzalloc(sizeof(struct elantech_data), GFP_KERNEL);
if (!etd)
- return -1;
+ return -ENOMEM;
etd->parity[0] = 1;
for (i = 1; i < 256; i++)
diff --git a/drivers/input/mouse/psmouse-base.c b/drivers/input/mouse/psmouse-base.c
index 73a7af2542a8..cd9d0c97e429 100644
--- a/drivers/input/mouse/psmouse-base.c
+++ b/drivers/input/mouse/psmouse-base.c
@@ -1584,10 +1584,10 @@ static ssize_t psmouse_attr_set_protocol(struct psmouse *psmouse, void *data, co
if (!new_dev)
return -ENOMEM;
- while (serio->child) {
+ while (!list_empty(&serio->children)) {
if (++retry > 3) {
printk(KERN_WARNING
- "psmouse: failed to destroy child port, "
+ "psmouse: failed to destroy children ports, "
"protocol change aborted.\n");
input_free_device(new_dev);
return -EIO;
diff --git a/drivers/input/mouse/synaptics.c b/drivers/input/mouse/synaptics.c
index 96b70a43515f..2e300a460556 100644
--- a/drivers/input/mouse/synaptics.c
+++ b/drivers/input/mouse/synaptics.c
@@ -294,7 +294,29 @@ static int synaptics_pt_write(struct serio *serio, unsigned char c)
return 0;
}
-static inline int synaptics_is_pt_packet(unsigned char *buf)
+static int synaptics_pt_start(struct serio *serio)
+{
+ struct psmouse *parent = serio_get_drvdata(serio->parent);
+ struct synaptics_data *priv = parent->private;
+
+ serio_pause_rx(parent->ps2dev.serio);
+ priv->pt_port = serio;
+ serio_continue_rx(parent->ps2dev.serio);
+
+ return 0;
+}
+
+static void synaptics_pt_stop(struct serio *serio)
+{
+ struct psmouse *parent = serio_get_drvdata(serio->parent);
+ struct synaptics_data *priv = parent->private;
+
+ serio_pause_rx(parent->ps2dev.serio);
+ priv->pt_port = NULL;
+ serio_continue_rx(parent->ps2dev.serio);
+}
+
+static int synaptics_is_pt_packet(unsigned char *buf)
{
return (buf[0] & 0xFC) == 0x84 && (buf[3] & 0xCC) == 0xC4;
}
@@ -315,9 +337,8 @@ static void synaptics_pass_pt_packet(struct serio *ptport, unsigned char *packet
static void synaptics_pt_activate(struct psmouse *psmouse)
{
- struct serio *ptport = psmouse->ps2dev.serio->child;
- struct psmouse *child = serio_get_drvdata(ptport);
struct synaptics_data *priv = psmouse->private;
+ struct psmouse *child = serio_get_drvdata(priv->pt_port);
/* adjust the touchpad to child's choice of protocol */
if (child) {
@@ -345,6 +366,8 @@ static void synaptics_pt_create(struct psmouse *psmouse)
strlcpy(serio->name, "Synaptics pass-through", sizeof(serio->name));
strlcpy(serio->phys, "synaptics-pt/serio0", sizeof(serio->name));
serio->write = synaptics_pt_write;
+ serio->start = synaptics_pt_start;
+ serio->stop = synaptics_pt_stop;
serio->parent = psmouse->ps2dev.serio;
psmouse->pt_activate = synaptics_pt_activate;
@@ -578,9 +601,10 @@ static psmouse_ret_t synaptics_process_byte(struct psmouse *psmouse)
if (unlikely(priv->pkt_type == SYN_NEWABS))
priv->pkt_type = synaptics_detect_pkt_type(psmouse);
- if (SYN_CAP_PASS_THROUGH(priv->capabilities) && synaptics_is_pt_packet(psmouse->packet)) {
- if (psmouse->ps2dev.serio->child)
- synaptics_pass_pt_packet(psmouse->ps2dev.serio->child, psmouse->packet);
+ if (SYN_CAP_PASS_THROUGH(priv->capabilities) &&
+ synaptics_is_pt_packet(psmouse->packet)) {
+ if (priv->pt_port)
+ synaptics_pass_pt_packet(priv->pt_port, psmouse->packet);
} else
synaptics_process_packet(psmouse);
@@ -731,7 +755,7 @@ int synaptics_init(struct psmouse *psmouse)
psmouse->private = priv = kzalloc(sizeof(struct synaptics_data), GFP_KERNEL);
if (!priv)
- return -1;
+ return -ENOMEM;
psmouse_reset(psmouse);
diff --git a/drivers/input/mouse/synaptics.h b/drivers/input/mouse/synaptics.h
index b6aa7d20d8a3..0aefaa885871 100644
--- a/drivers/input/mouse/synaptics.h
+++ b/drivers/input/mouse/synaptics.h
@@ -51,7 +51,8 @@
#define SYN_EXT_CAP_REQUESTS(c) (((c) & 0x700000) >> 20)
#define SYN_CAP_MULTI_BUTTON_NO(ec) (((ec) & 0x00f000) >> 12)
#define SYN_CAP_PRODUCT_ID(ec) (((ec) & 0xff0000) >> 16)
-#define SYN_CAP_CLICKPAD(ex0c) ((ex0c) & 0x100100)
+#define SYN_CAP_CLICKPAD(ex0c) ((ex0c) & 0x100000) /* 1-button ClickPad */
+#define SYN_CAP_CLICKPAD2BTN(ex0c) ((ex0c) & 0x000100) /* 2-button ClickPad */
#define SYN_CAP_MAX_DIMENSIONS(ex0c) ((ex0c) & 0x020000)
/* synaptics modes query bits */
@@ -110,6 +111,8 @@ struct synaptics_data {
unsigned char pkt_type; /* packet type - old, new, etc */
unsigned char mode; /* current mode byte */
int scroll;
+
+ struct serio *pt_port; /* Pass-through serio port */
};
void synaptics_module_init(void);
diff --git a/drivers/input/mouse/trackpoint.c b/drivers/input/mouse/trackpoint.c
index 0643e49ca603..54b2fa892e19 100644
--- a/drivers/input/mouse/trackpoint.c
+++ b/drivers/input/mouse/trackpoint.c
@@ -303,7 +303,7 @@ int trackpoint_detect(struct psmouse *psmouse, bool set_properties)
psmouse->private = kzalloc(sizeof(struct trackpoint_data), GFP_KERNEL);
if (!psmouse->private)
- return -1;
+ return -ENOMEM;
psmouse->vendor = "IBM";
psmouse->name = "TrackPoint";
diff --git a/drivers/input/mousedev.c b/drivers/input/mousedev.c
index 31ec7265aac6..2a00ddf4f23a 100644
--- a/drivers/input/mousedev.c
+++ b/drivers/input/mousedev.c
@@ -867,7 +867,7 @@ static struct mousedev *mousedev_create(struct input_dev *dev,
spin_lock_init(&mousedev->client_lock);
mutex_init(&mousedev->mutex);
lockdep_set_subclass(&mousedev->mutex,
- minor == MOUSEDEV_MIX ? MOUSEDEV_MIX : 0);
+ minor == MOUSEDEV_MIX ? SINGLE_DEPTH_NESTING : 0);
init_waitqueue_head(&mousedev->wait);
if (minor == MOUSEDEV_MIX)
diff --git a/drivers/input/serio/Kconfig b/drivers/input/serio/Kconfig
index 3bfe8fafc6ad..6256233d2bfb 100644
--- a/drivers/input/serio/Kconfig
+++ b/drivers/input/serio/Kconfig
@@ -226,4 +226,13 @@ config SERIO_AMS_DELTA
To compile this driver as a module, choose M here;
the module will be called ams_delta_serio.
+config SERIO_PS2MULT
+ tristate "TQC PS/2 multiplexer"
+ help
+ Say Y here if you have the PS/2 line multiplexer like the one
+ present on TQC boads.
+
+ To compile this driver as a module, choose M here: the
+ module will be called ps2mult.
+
endif
diff --git a/drivers/input/serio/Makefile b/drivers/input/serio/Makefile
index 84c80bf7185e..dbbe37616c92 100644
--- a/drivers/input/serio/Makefile
+++ b/drivers/input/serio/Makefile
@@ -18,6 +18,7 @@ obj-$(CONFIG_SERIO_GSCPS2) += gscps2.o
obj-$(CONFIG_HP_SDC) += hp_sdc.o
obj-$(CONFIG_HIL_MLC) += hp_sdc_mlc.o hil_mlc.o
obj-$(CONFIG_SERIO_PCIPS2) += pcips2.o
+obj-$(CONFIG_SERIO_PS2MULT) += ps2mult.o
obj-$(CONFIG_SERIO_MACEPS2) += maceps2.o
obj-$(CONFIG_SERIO_LIBPS2) += libps2.o
obj-$(CONFIG_SERIO_RAW) += serio_raw.o
diff --git a/drivers/input/serio/gscps2.c b/drivers/input/serio/gscps2.c
index 3c287dd879d3..4225f5d6b15f 100644
--- a/drivers/input/serio/gscps2.c
+++ b/drivers/input/serio/gscps2.c
@@ -358,7 +358,7 @@ static int __devinit gscps2_probe(struct parisc_device *dev)
gscps2_reset(ps2port);
ps2port->id = readb(ps2port->addr + GSC_ID) & 0x0f;
- snprintf(serio->name, sizeof(serio->name), "GSC PS/2 %s",
+ snprintf(serio->name, sizeof(serio->name), "gsc-ps2-%s",
(ps2port->id == GSC_ID_KEYBOARD) ? "keyboard" : "mouse");
strlcpy(serio->phys, dev_name(&dev->dev), sizeof(serio->phys));
serio->id.type = SERIO_8042;
diff --git a/drivers/input/serio/i8042-x86ia64io.h b/drivers/input/serio/i8042-x86ia64io.h
index ed7ad7416b24..a5475b577086 100644
--- a/drivers/input/serio/i8042-x86ia64io.h
+++ b/drivers/input/serio/i8042-x86ia64io.h
@@ -351,6 +351,17 @@ static const struct dmi_system_id __initconst i8042_dmi_nomux_table[] = {
},
},
{
+ /*
+ * Most (all?) VAIOs do not have external PS/2 ports nor
+ * they implement active multiplexing properly, and
+ * MUX discovery usually messes up keyboard/touchpad.
+ */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
+ DMI_MATCH(DMI_BOARD_NAME, "VAIO"),
+ },
+ },
+ {
/* Amoi M636/A737 */
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Amoi Electronics CO.,LTD."),
diff --git a/drivers/input/serio/i8042.c b/drivers/input/serio/i8042.c
index f58513160480..18db5a8c7478 100644
--- a/drivers/input/serio/i8042.c
+++ b/drivers/input/serio/i8042.c
@@ -1063,7 +1063,7 @@ static long i8042_panic_blink(int state)
#ifdef CONFIG_X86
static void i8042_dritek_enable(void)
{
- char param = 0x90;
+ unsigned char param = 0x90;
int error;
error = i8042_command(&param, 0x1059);
diff --git a/drivers/input/serio/ps2mult.c b/drivers/input/serio/ps2mult.c
new file mode 100644
index 000000000000..6bce22e4e495
--- /dev/null
+++ b/drivers/input/serio/ps2mult.c
@@ -0,0 +1,318 @@
+/*
+ * TQC PS/2 Multiplexer driver
+ *
+ * Copyright (C) 2010 Dmitry Eremin-Solenikov
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ */
+
+
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/serio.h>
+
+MODULE_AUTHOR("Dmitry Eremin-Solenikov <dbaryshkov@gmail.com>");
+MODULE_DESCRIPTION("TQC PS/2 Multiplexer driver");
+MODULE_LICENSE("GPL");
+
+#define PS2MULT_KB_SELECTOR 0xA0
+#define PS2MULT_MS_SELECTOR 0xA1
+#define PS2MULT_ESCAPE 0x7D
+#define PS2MULT_BSYNC 0x7E
+#define PS2MULT_SESSION_START 0x55
+#define PS2MULT_SESSION_END 0x56
+
+struct ps2mult_port {
+ struct serio *serio;
+ unsigned char sel;
+ bool registered;
+};
+
+#define PS2MULT_NUM_PORTS 2
+#define PS2MULT_KBD_PORT 0
+#define PS2MULT_MOUSE_PORT 1
+
+struct ps2mult {
+ struct serio *mx_serio;
+ struct ps2mult_port ports[PS2MULT_NUM_PORTS];
+
+ spinlock_t lock;
+ struct ps2mult_port *in_port;
+ struct ps2mult_port *out_port;
+ bool escape;
+};
+
+/* First MUST come PS2MULT_NUM_PORTS selectors */
+static const unsigned char ps2mult_controls[] = {
+ PS2MULT_KB_SELECTOR, PS2MULT_MS_SELECTOR,
+ PS2MULT_ESCAPE, PS2MULT_BSYNC,
+ PS2MULT_SESSION_START, PS2MULT_SESSION_END,
+};
+
+static const struct serio_device_id ps2mult_serio_ids[] = {
+ {
+ .type = SERIO_RS232,
+ .proto = SERIO_PS2MULT,
+ .id = SERIO_ANY,
+ .extra = SERIO_ANY,
+ },
+ { 0 }
+};
+
+MODULE_DEVICE_TABLE(serio, ps2mult_serio_ids);
+
+static void ps2mult_select_port(struct ps2mult *psm, struct ps2mult_port *port)
+{
+ struct serio *mx_serio = psm->mx_serio;
+
+ serio_write(mx_serio, port->sel);
+ psm->out_port = port;
+ dev_dbg(&mx_serio->dev, "switched to sel %02x\n", port->sel);
+}
+
+static int ps2mult_serio_write(struct serio *serio, unsigned char data)
+{
+ struct serio *mx_port = serio->parent;
+ struct ps2mult *psm = serio_get_drvdata(mx_port);
+ struct ps2mult_port *port = serio->port_data;
+ bool need_escape;
+ unsigned long flags;
+
+ spin_lock_irqsave(&psm->lock, flags);
+
+ if (psm->out_port != port)
+ ps2mult_select_port(psm, port);
+
+ need_escape = memchr(ps2mult_controls, data, sizeof(ps2mult_controls));
+
+ dev_dbg(&serio->dev,
+ "write: %s%02x\n", need_escape ? "ESC " : "", data);
+
+ if (need_escape)
+ serio_write(mx_port, PS2MULT_ESCAPE);
+
+ serio_write(mx_port, data);
+
+ spin_unlock_irqrestore(&psm->lock, flags);
+
+ return 0;
+}
+
+static int ps2mult_serio_start(struct serio *serio)
+{
+ struct ps2mult *psm = serio_get_drvdata(serio->parent);
+ struct ps2mult_port *port = serio->port_data;
+ unsigned long flags;
+
+ spin_lock_irqsave(&psm->lock, flags);
+ port->registered = true;
+ spin_unlock_irqrestore(&psm->lock, flags);
+
+ return 0;
+}
+
+static void ps2mult_serio_stop(struct serio *serio)
+{
+ struct ps2mult *psm = serio_get_drvdata(serio->parent);
+ struct ps2mult_port *port = serio->port_data;
+ unsigned long flags;
+
+ spin_lock_irqsave(&psm->lock, flags);
+ port->registered = false;
+ spin_unlock_irqrestore(&psm->lock, flags);
+}
+
+static int ps2mult_create_port(struct ps2mult *psm, int i)
+{
+ struct serio *mx_serio = psm->mx_serio;
+ struct serio *serio;
+
+ serio = kzalloc(sizeof(struct serio), GFP_KERNEL);
+ if (!serio)
+ return -ENOMEM;
+
+ strlcpy(serio->name, "TQC PS/2 Multiplexer", sizeof(serio->name));
+ snprintf(serio->phys, sizeof(serio->phys),
+ "%s/port%d", mx_serio->phys, i);
+ serio->id.type = SERIO_8042;
+ serio->write = ps2mult_serio_write;
+ serio->start = ps2mult_serio_start;
+ serio->stop = ps2mult_serio_stop;
+ serio->parent = psm->mx_serio;
+ serio->port_data = &psm->ports[i];
+
+ psm->ports[i].serio = serio;
+
+ return 0;
+}
+
+static void ps2mult_reset(struct ps2mult *psm)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&psm->lock, flags);
+
+ serio_write(psm->mx_serio, PS2MULT_SESSION_END);
+ serio_write(psm->mx_serio, PS2MULT_SESSION_START);
+
+ ps2mult_select_port(psm, &psm->ports[PS2MULT_KBD_PORT]);
+
+ spin_unlock_irqrestore(&psm->lock, flags);
+}
+
+static int ps2mult_connect(struct serio *serio, struct serio_driver *drv)
+{
+ struct ps2mult *psm;
+ int i;
+ int error;
+
+ if (!serio->write)
+ return -EINVAL;
+
+ psm = kzalloc(sizeof(*psm), GFP_KERNEL);
+ if (!psm)
+ return -ENOMEM;
+
+ spin_lock_init(&psm->lock);
+ psm->mx_serio = serio;
+
+ for (i = 0; i < PS2MULT_NUM_PORTS; i++) {
+ psm->ports[i].sel = ps2mult_controls[i];
+ error = ps2mult_create_port(psm, i);
+ if (error)
+ goto err_out;
+ }
+
+ psm->in_port = psm->out_port = &psm->ports[PS2MULT_KBD_PORT];
+
+ serio_set_drvdata(serio, psm);
+ error = serio_open(serio, drv);
+ if (error)
+ goto err_out;
+
+ ps2mult_reset(psm);
+
+ for (i = 0; i < PS2MULT_NUM_PORTS; i++) {
+ struct serio *s = psm->ports[i].serio;
+
+ dev_info(&serio->dev, "%s port at %s\n", s->name, serio->phys);
+ serio_register_port(s);
+ }
+
+ return 0;
+
+err_out:
+ while (--i >= 0)
+ kfree(psm->ports[i].serio);
+ kfree(serio);
+ return error;
+}
+
+static void ps2mult_disconnect(struct serio *serio)
+{
+ struct ps2mult *psm = serio_get_drvdata(serio);
+
+ /* Note that serio core already take care of children ports */
+ serio_write(serio, PS2MULT_SESSION_END);
+ serio_close(serio);
+ kfree(psm);
+
+ serio_set_drvdata(serio, NULL);
+}
+
+static int ps2mult_reconnect(struct serio *serio)
+{
+ struct ps2mult *psm = serio_get_drvdata(serio);
+
+ ps2mult_reset(psm);
+
+ return 0;
+}
+
+static irqreturn_t ps2mult_interrupt(struct serio *serio,
+ unsigned char data, unsigned int dfl)
+{
+ struct ps2mult *psm = serio_get_drvdata(serio);
+ struct ps2mult_port *in_port;
+ unsigned long flags;
+
+ dev_dbg(&serio->dev, "Received %02x flags %02x\n", data, dfl);
+
+ spin_lock_irqsave(&psm->lock, flags);
+
+ if (psm->escape) {
+ psm->escape = false;
+ in_port = psm->in_port;
+ if (in_port->registered)
+ serio_interrupt(in_port->serio, data, dfl);
+ goto out;
+ }
+
+ switch (data) {
+ case PS2MULT_ESCAPE:
+ dev_dbg(&serio->dev, "ESCAPE\n");
+ psm->escape = true;
+ break;
+
+ case PS2MULT_BSYNC:
+ dev_dbg(&serio->dev, "BSYNC\n");
+ psm->in_port = psm->out_port;
+ break;
+
+ case PS2MULT_SESSION_START:
+ dev_dbg(&serio->dev, "SS\n");
+ break;
+
+ case PS2MULT_SESSION_END:
+ dev_dbg(&serio->dev, "SE\n");
+ break;
+
+ case PS2MULT_KB_SELECTOR:
+ dev_dbg(&serio->dev, "KB\n");
+ psm->in_port = &psm->ports[PS2MULT_KBD_PORT];
+ break;
+
+ case PS2MULT_MS_SELECTOR:
+ dev_dbg(&serio->dev, "MS\n");
+ psm->in_port = &psm->ports[PS2MULT_MOUSE_PORT];
+ break;
+
+ default:
+ in_port = psm->in_port;
+ if (in_port->registered)
+ serio_interrupt(in_port->serio, data, dfl);
+ break;
+ }
+
+ out:
+ spin_unlock_irqrestore(&psm->lock, flags);
+ return IRQ_HANDLED;
+}
+
+static struct serio_driver ps2mult_drv = {
+ .driver = {
+ .name = "ps2mult",
+ },
+ .description = "TQC PS/2 Multiplexer driver",
+ .id_table = ps2mult_serio_ids,
+ .interrupt = ps2mult_interrupt,
+ .connect = ps2mult_connect,
+ .disconnect = ps2mult_disconnect,
+ .reconnect = ps2mult_reconnect,
+};
+
+static int __init ps2mult_init(void)
+{
+ return serio_register_driver(&ps2mult_drv);
+}
+
+static void __exit ps2mult_exit(void)
+{
+ serio_unregister_driver(&ps2mult_drv);
+}
+
+module_init(ps2mult_init);
+module_exit(ps2mult_exit);
diff --git a/drivers/input/serio/serio.c b/drivers/input/serio/serio.c
index c3b626e9eae7..405bf214527c 100644
--- a/drivers/input/serio/serio.c
+++ b/drivers/input/serio/serio.c
@@ -37,7 +37,6 @@
#include <linux/slab.h>
#include <linux/kthread.h>
#include <linux/mutex.h>
-#include <linux/freezer.h>
MODULE_AUTHOR("Vojtech Pavlik <vojtech@ucw.cz>");
MODULE_DESCRIPTION("Serio abstraction core");
@@ -56,7 +55,7 @@ static struct bus_type serio_bus;
static void serio_add_port(struct serio *serio);
static int serio_reconnect_port(struct serio *serio);
static void serio_disconnect_port(struct serio *serio);
-static void serio_reconnect_chain(struct serio *serio);
+static void serio_reconnect_subtree(struct serio *serio);
static void serio_attach_driver(struct serio_driver *drv);
static int serio_connect_driver(struct serio *serio, struct serio_driver *drv)
@@ -152,7 +151,7 @@ static void serio_find_driver(struct serio *serio)
enum serio_event_type {
SERIO_RESCAN_PORT,
SERIO_RECONNECT_PORT,
- SERIO_RECONNECT_CHAIN,
+ SERIO_RECONNECT_SUBTREE,
SERIO_REGISTER_PORT,
SERIO_ATTACH_DRIVER,
};
@@ -292,8 +291,8 @@ static void serio_handle_event(void)
serio_find_driver(event->object);
break;
- case SERIO_RECONNECT_CHAIN:
- serio_reconnect_chain(event->object);
+ case SERIO_RECONNECT_SUBTREE:
+ serio_reconnect_subtree(event->object);
break;
case SERIO_ATTACH_DRIVER:
@@ -330,12 +329,10 @@ static void serio_remove_pending_events(void *object)
}
/*
- * Destroy child serio port (if any) that has not been fully registered yet.
+ * Locate child serio port (if any) that has not been fully registered yet.
*
- * Note that we rely on the fact that port can have only one child and therefore
- * only one child registration request can be pending. Additionally, children
- * are registered by driver's connect() handler so there can't be a grandchild
- * pending registration together with a child.
+ * Children are registered by driver's connect() handler so there can't be a
+ * grandchild pending registration together with a child.
*/
static struct serio *serio_get_pending_child(struct serio *parent)
{
@@ -449,7 +446,7 @@ static ssize_t serio_rebind_driver(struct device *dev, struct device_attribute *
if (!strncmp(buf, "none", count)) {
serio_disconnect_port(serio);
} else if (!strncmp(buf, "reconnect", count)) {
- serio_reconnect_chain(serio);
+ serio_reconnect_subtree(serio);
} else if (!strncmp(buf, "rescan", count)) {
serio_disconnect_port(serio);
serio_find_driver(serio);
@@ -516,6 +513,8 @@ static void serio_init_port(struct serio *serio)
__module_get(THIS_MODULE);
INIT_LIST_HEAD(&serio->node);
+ INIT_LIST_HEAD(&serio->child_node);
+ INIT_LIST_HEAD(&serio->children);
spin_lock_init(&serio->lock);
mutex_init(&serio->drv_mutex);
device_initialize(&serio->dev);
@@ -538,12 +537,13 @@ static void serio_init_port(struct serio *serio)
*/
static void serio_add_port(struct serio *serio)
{
+ struct serio *parent = serio->parent;
int error;
- if (serio->parent) {
- serio_pause_rx(serio->parent);
- serio->parent->child = serio;
- serio_continue_rx(serio->parent);
+ if (parent) {
+ serio_pause_rx(parent);
+ list_add_tail(&serio->child_node, &parent->children);
+ serio_continue_rx(parent);
}
list_add_tail(&serio->node, &serio_list);
@@ -559,15 +559,14 @@ static void serio_add_port(struct serio *serio)
}
/*
- * serio_destroy_port() completes deregistration process and removes
+ * serio_destroy_port() completes unregistration process and removes
* port from the system
*/
static void serio_destroy_port(struct serio *serio)
{
struct serio *child;
- child = serio_get_pending_child(serio);
- if (child) {
+ while ((child = serio_get_pending_child(serio)) != NULL) {
serio_remove_pending_events(child);
put_device(&child->dev);
}
@@ -577,7 +576,7 @@ static void serio_destroy_port(struct serio *serio)
if (serio->parent) {
serio_pause_rx(serio->parent);
- serio->parent->child = NULL;
+ list_del_init(&serio->child_node);
serio_continue_rx(serio->parent);
serio->parent = NULL;
}
@@ -609,46 +608,82 @@ static int serio_reconnect_port(struct serio *serio)
}
/*
- * Reconnect serio port and all its children (re-initialize attached devices)
+ * Reconnect serio port and all its children (re-initialize attached
+ * devices).
*/
-static void serio_reconnect_chain(struct serio *serio)
+static void serio_reconnect_subtree(struct serio *root)
{
+ struct serio *s = root;
+ int error;
+
do {
- if (serio_reconnect_port(serio)) {
- /* Ok, old children are now gone, we are done */
- break;
+ error = serio_reconnect_port(s);
+ if (!error) {
+ /*
+ * Reconnect was successful, move on to do the
+ * first child.
+ */
+ if (!list_empty(&s->children)) {
+ s = list_first_entry(&s->children,
+ struct serio, child_node);
+ continue;
+ }
}
- serio = serio->child;
- } while (serio);
+
+ /*
+ * Either it was a leaf node or reconnect failed and it
+ * became a leaf node. Continue reconnecting starting with
+ * the next sibling of the parent node.
+ */
+ while (s != root) {
+ struct serio *parent = s->parent;
+
+ if (!list_is_last(&s->child_node, &parent->children)) {
+ s = list_entry(s->child_node.next,
+ struct serio, child_node);
+ break;
+ }
+
+ s = parent;
+ }
+ } while (s != root);
}
/*
* serio_disconnect_port() unbinds a port from its driver. As a side effect
- * all child ports are unbound and destroyed.
+ * all children ports are unbound and destroyed.
*/
static void serio_disconnect_port(struct serio *serio)
{
- struct serio *s, *parent;
+ struct serio *s = serio;
+
+ /*
+ * Children ports should be disconnected and destroyed
+ * first; we travel the tree in depth-first order.
+ */
+ while (!list_empty(&serio->children)) {
+
+ /* Locate a leaf */
+ while (!list_empty(&s->children))
+ s = list_first_entry(&s->children,
+ struct serio, child_node);
- if (serio->child) {
/*
- * Children ports should be disconnected and destroyed
- * first, staring with the leaf one, since we don't want
- * to do recursion
+ * Prune this leaf node unless it is the one we
+ * started with.
*/
- for (s = serio; s->child; s = s->child)
- /* empty */;
-
- do {
- parent = s->parent;
+ if (s != serio) {
+ struct serio *parent = s->parent;
device_release_driver(&s->dev);
serio_destroy_port(s);
- } while ((s = parent) != serio);
+
+ s = parent;
+ }
}
/*
- * Ok, no children left, now disconnect this port
+ * OK, no children left, now disconnect this port.
*/
device_release_driver(&serio->dev);
}
@@ -661,7 +696,7 @@ EXPORT_SYMBOL(serio_rescan);
void serio_reconnect(struct serio *serio)
{
- serio_queue_event(serio, NULL, SERIO_RECONNECT_CHAIN);
+ serio_queue_event(serio, NULL, SERIO_RECONNECT_SUBTREE);
}
EXPORT_SYMBOL(serio_reconnect);
@@ -689,14 +724,16 @@ void serio_unregister_port(struct serio *serio)
EXPORT_SYMBOL(serio_unregister_port);
/*
- * Safely unregisters child port if one is present.
+ * Safely unregisters children ports if they are present.
*/
void serio_unregister_child_port(struct serio *serio)
{
+ struct serio *s, *next;
+
mutex_lock(&serio_mutex);
- if (serio->child) {
- serio_disconnect_port(serio->child);
- serio_destroy_port(serio->child);
+ list_for_each_entry_safe(s, next, &serio->children, child_node) {
+ serio_disconnect_port(s);
+ serio_destroy_port(s);
}
mutex_unlock(&serio_mutex);
}
diff --git a/drivers/input/serio/serio_raw.c b/drivers/input/serio/serio_raw.c
index cd82bb125915..b7ba4597f7f0 100644
--- a/drivers/input/serio/serio_raw.c
+++ b/drivers/input/serio/serio_raw.c
@@ -11,7 +11,6 @@
#include <linux/sched.h>
#include <linux/slab.h>
-#include <linux/smp_lock.h>
#include <linux/poll.h>
#include <linux/module.h>
#include <linux/serio.h>
diff --git a/drivers/input/sparse-keymap.c b/drivers/input/sparse-keymap.c
index 014248344763..a29a7812bd46 100644
--- a/drivers/input/sparse-keymap.c
+++ b/drivers/input/sparse-keymap.c
@@ -22,6 +22,37 @@ MODULE_DESCRIPTION("Generic support for sparse keymaps");
MODULE_LICENSE("GPL v2");
MODULE_VERSION("0.1");
+static unsigned int sparse_keymap_get_key_index(struct input_dev *dev,
+ const struct key_entry *k)
+{
+ struct key_entry *key;
+ unsigned int idx = 0;
+
+ for (key = dev->keycode; key->type != KE_END; key++) {
+ if (key->type == KE_KEY) {
+ if (key == k)
+ break;
+ idx++;
+ }
+ }
+
+ return idx;
+}
+
+static struct key_entry *sparse_keymap_entry_by_index(struct input_dev *dev,
+ unsigned int index)
+{
+ struct key_entry *key;
+ unsigned int key_cnt = 0;
+
+ for (key = dev->keycode; key->type != KE_END; key++)
+ if (key->type == KE_KEY)
+ if (key_cnt++ == index)
+ return key;
+
+ return NULL;
+}
+
/**
* sparse_keymap_entry_from_scancode - perform sparse keymap lookup
* @dev: Input device using sparse keymap
@@ -64,16 +95,36 @@ struct key_entry *sparse_keymap_entry_from_keycode(struct input_dev *dev,
}
EXPORT_SYMBOL(sparse_keymap_entry_from_keycode);
+static struct key_entry *sparse_keymap_locate(struct input_dev *dev,
+ const struct input_keymap_entry *ke)
+{
+ struct key_entry *key;
+ unsigned int scancode;
+
+ if (ke->flags & INPUT_KEYMAP_BY_INDEX)
+ key = sparse_keymap_entry_by_index(dev, ke->index);
+ else if (input_scancode_to_scalar(ke, &scancode) == 0)
+ key = sparse_keymap_entry_from_scancode(dev, scancode);
+ else
+ key = NULL;
+
+ return key;
+}
+
static int sparse_keymap_getkeycode(struct input_dev *dev,
- unsigned int scancode,
- unsigned int *keycode)
+ struct input_keymap_entry *ke)
{
const struct key_entry *key;
if (dev->keycode) {
- key = sparse_keymap_entry_from_scancode(dev, scancode);
+ key = sparse_keymap_locate(dev, ke);
if (key && key->type == KE_KEY) {
- *keycode = key->keycode;
+ ke->keycode = key->keycode;
+ if (!(ke->flags & INPUT_KEYMAP_BY_INDEX))
+ ke->index =
+ sparse_keymap_get_key_index(dev, key);
+ ke->len = sizeof(key->code);
+ memcpy(ke->scancode, &key->code, sizeof(key->code));
return 0;
}
}
@@ -82,20 +133,19 @@ static int sparse_keymap_getkeycode(struct input_dev *dev,
}
static int sparse_keymap_setkeycode(struct input_dev *dev,
- unsigned int scancode,
- unsigned int keycode)
+ const struct input_keymap_entry *ke,
+ unsigned int *old_keycode)
{
struct key_entry *key;
- int old_keycode;
if (dev->keycode) {
- key = sparse_keymap_entry_from_scancode(dev, scancode);
+ key = sparse_keymap_locate(dev, ke);
if (key && key->type == KE_KEY) {
- old_keycode = key->keycode;
- key->keycode = keycode;
- set_bit(keycode, dev->keybit);
- if (!sparse_keymap_entry_from_keycode(dev, old_keycode))
- clear_bit(old_keycode, dev->keybit);
+ *old_keycode = key->keycode;
+ key->keycode = ke->keycode;
+ set_bit(ke->keycode, dev->keybit);
+ if (!sparse_keymap_entry_from_keycode(dev, *old_keycode))
+ clear_bit(*old_keycode, dev->keybit);
return 0;
}
}
@@ -159,15 +209,14 @@ int sparse_keymap_setup(struct input_dev *dev,
dev->keycode = map;
dev->keycodemax = map_size;
- dev->getkeycode = sparse_keymap_getkeycode;
- dev->setkeycode = sparse_keymap_setkeycode;
+ dev->getkeycode_new = sparse_keymap_getkeycode;
+ dev->setkeycode_new = sparse_keymap_setkeycode;
return 0;
err_out:
kfree(map);
return error;
-
}
EXPORT_SYMBOL(sparse_keymap_setup);
diff --git a/drivers/input/tablet/Kconfig b/drivers/input/tablet/Kconfig
index effb49ea24aa..58a87755b936 100644
--- a/drivers/input/tablet/Kconfig
+++ b/drivers/input/tablet/Kconfig
@@ -49,6 +49,17 @@ config TABLET_USB_GTCO
To compile this driver as a module, choose M here: the
module will be called gtco.
+config TABLET_USB_HANWANG
+ tristate "Hanwang Art Master III tablet support (USB)"
+ depends on USB_ARCH_HAS_HCD
+ select USB
+ help
+ Say Y here if you want to use the USB version of the Hanwang Art
+ Master III tablet.
+
+ To compile this driver as a module, choose M here: the
+ module will be called hanwang.
+
config TABLET_USB_KBTAB
tristate "KB Gear JamStudio tablet support (USB)"
depends on USB_ARCH_HAS_HCD
diff --git a/drivers/input/tablet/Makefile b/drivers/input/tablet/Makefile
index ce8b9a9cfa40..3f6c25220638 100644
--- a/drivers/input/tablet/Makefile
+++ b/drivers/input/tablet/Makefile
@@ -8,5 +8,6 @@ wacom-objs := wacom_wac.o wacom_sys.o
obj-$(CONFIG_TABLET_USB_ACECAD) += acecad.o
obj-$(CONFIG_TABLET_USB_AIPTEK) += aiptek.o
obj-$(CONFIG_TABLET_USB_GTCO) += gtco.o
+obj-$(CONFIG_TABLET_USB_HANWANG) += hanwang.o
obj-$(CONFIG_TABLET_USB_KBTAB) += kbtab.o
obj-$(CONFIG_TABLET_USB_WACOM) += wacom.o
diff --git a/drivers/input/tablet/acecad.c b/drivers/input/tablet/acecad.c
index aea9a9399a36..d94f7e9aa997 100644
--- a/drivers/input/tablet/acecad.c
+++ b/drivers/input/tablet/acecad.c
@@ -229,12 +229,13 @@ static int usb_acecad_probe(struct usb_interface *intf, const struct usb_device_
err = input_register_device(acecad->input);
if (err)
- goto fail2;
+ goto fail3;
usb_set_intfdata(intf, acecad);
return 0;
+ fail3: usb_free_urb(acecad->irq);
fail2: usb_free_coherent(dev, 8, acecad->data, acecad->data_dma);
fail1: input_free_device(input_dev);
kfree(acecad);
diff --git a/drivers/input/tablet/aiptek.c b/drivers/input/tablet/aiptek.c
index 57b25b84d1fc..0a619c558bfb 100644
--- a/drivers/input/tablet/aiptek.c
+++ b/drivers/input/tablet/aiptek.c
@@ -1097,7 +1097,7 @@ store_tabletPointerMode(struct device *dev, struct device_attribute *attr, const
}
static DEVICE_ATTR(pointer_mode,
- S_IRUGO | S_IWUGO,
+ S_IRUGO | S_IWUSR,
show_tabletPointerMode, store_tabletPointerMode);
/***********************************************************************
@@ -1134,7 +1134,7 @@ store_tabletCoordinateMode(struct device *dev, struct device_attribute *attr, co
}
static DEVICE_ATTR(coordinate_mode,
- S_IRUGO | S_IWUGO,
+ S_IRUGO | S_IWUSR,
show_tabletCoordinateMode, store_tabletCoordinateMode);
/***********************************************************************
@@ -1176,7 +1176,7 @@ store_tabletToolMode(struct device *dev, struct device_attribute *attr, const ch
}
static DEVICE_ATTR(tool_mode,
- S_IRUGO | S_IWUGO,
+ S_IRUGO | S_IWUSR,
show_tabletToolMode, store_tabletToolMode);
/***********************************************************************
@@ -1219,7 +1219,7 @@ store_tabletXtilt(struct device *dev, struct device_attribute *attr, const char
}
static DEVICE_ATTR(xtilt,
- S_IRUGO | S_IWUGO, show_tabletXtilt, store_tabletXtilt);
+ S_IRUGO | S_IWUSR, show_tabletXtilt, store_tabletXtilt);
/***********************************************************************
* support routines for the 'ytilt' file. Note that this file
@@ -1261,7 +1261,7 @@ store_tabletYtilt(struct device *dev, struct device_attribute *attr, const char
}
static DEVICE_ATTR(ytilt,
- S_IRUGO | S_IWUGO, show_tabletYtilt, store_tabletYtilt);
+ S_IRUGO | S_IWUSR, show_tabletYtilt, store_tabletYtilt);
/***********************************************************************
* support routines for the 'jitter' file. Note that this file
@@ -1288,7 +1288,7 @@ store_tabletJitterDelay(struct device *dev, struct device_attribute *attr, const
}
static DEVICE_ATTR(jitter,
- S_IRUGO | S_IWUGO,
+ S_IRUGO | S_IWUSR,
show_tabletJitterDelay, store_tabletJitterDelay);
/***********************************************************************
@@ -1317,7 +1317,7 @@ store_tabletProgrammableDelay(struct device *dev, struct device_attribute *attr,
}
static DEVICE_ATTR(delay,
- S_IRUGO | S_IWUGO,
+ S_IRUGO | S_IWUSR,
show_tabletProgrammableDelay, store_tabletProgrammableDelay);
/***********************************************************************
@@ -1406,7 +1406,7 @@ store_tabletStylusUpper(struct device *dev, struct device_attribute *attr, const
}
static DEVICE_ATTR(stylus_upper,
- S_IRUGO | S_IWUGO,
+ S_IRUGO | S_IWUSR,
show_tabletStylusUpper, store_tabletStylusUpper);
/***********************************************************************
@@ -1437,7 +1437,7 @@ store_tabletStylusLower(struct device *dev, struct device_attribute *attr, const
}
static DEVICE_ATTR(stylus_lower,
- S_IRUGO | S_IWUGO,
+ S_IRUGO | S_IWUSR,
show_tabletStylusLower, store_tabletStylusLower);
/***********************************************************************
@@ -1475,7 +1475,7 @@ store_tabletMouseLeft(struct device *dev, struct device_attribute *attr, const c
}
static DEVICE_ATTR(mouse_left,
- S_IRUGO | S_IWUGO,
+ S_IRUGO | S_IWUSR,
show_tabletMouseLeft, store_tabletMouseLeft);
/***********************************************************************
@@ -1505,7 +1505,7 @@ store_tabletMouseMiddle(struct device *dev, struct device_attribute *attr, const
}
static DEVICE_ATTR(mouse_middle,
- S_IRUGO | S_IWUGO,
+ S_IRUGO | S_IWUSR,
show_tabletMouseMiddle, store_tabletMouseMiddle);
/***********************************************************************
@@ -1535,7 +1535,7 @@ store_tabletMouseRight(struct device *dev, struct device_attribute *attr, const
}
static DEVICE_ATTR(mouse_right,
- S_IRUGO | S_IWUGO,
+ S_IRUGO | S_IWUSR,
show_tabletMouseRight, store_tabletMouseRight);
/***********************************************************************
@@ -1567,7 +1567,7 @@ store_tabletWheel(struct device *dev, struct device_attribute *attr, const char
}
static DEVICE_ATTR(wheel,
- S_IRUGO | S_IWUGO, show_tabletWheel, store_tabletWheel);
+ S_IRUGO | S_IWUSR, show_tabletWheel, store_tabletWheel);
/***********************************************************************
* support routines for the 'execute' file. Note that this file
@@ -1600,7 +1600,7 @@ store_tabletExecute(struct device *dev, struct device_attribute *attr, const cha
}
static DEVICE_ATTR(execute,
- S_IRUGO | S_IWUGO, show_tabletExecute, store_tabletExecute);
+ S_IRUGO | S_IWUSR, show_tabletExecute, store_tabletExecute);
/***********************************************************************
* support routines for the 'odm_code' file. Note that this file
diff --git a/drivers/input/tablet/hanwang.c b/drivers/input/tablet/hanwang.c
new file mode 100644
index 000000000000..6504b627b234
--- /dev/null
+++ b/drivers/input/tablet/hanwang.c
@@ -0,0 +1,446 @@
+/*
+ * USB Hanwang tablet support
+ *
+ * Copyright (c) 2010 Xing Wei <weixing@hanwang.com.cn>
+ *
+ */
+
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ */
+
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/usb/input.h>
+
+#define DRIVER_AUTHOR "Xing Wei <weixing@hanwang.com.cn>"
+#define DRIVER_DESC "USB Hanwang tablet driver"
+#define DRIVER_LICENSE "GPL"
+
+MODULE_AUTHOR(DRIVER_AUTHOR);
+MODULE_DESCRIPTION(DRIVER_DESC);
+MODULE_LICENSE(DRIVER_LICENSE);
+
+#define USB_VENDOR_ID_HANWANG 0x0b57
+#define HANWANG_TABLET_INT_CLASS 0x0003
+#define HANWANG_TABLET_INT_SUB_CLASS 0x0001
+#define HANWANG_TABLET_INT_PROTOCOL 0x0002
+
+#define ART_MASTER_PKGLEN_MAX 10
+
+/* device IDs */
+#define STYLUS_DEVICE_ID 0x02
+#define TOUCH_DEVICE_ID 0x03
+#define CURSOR_DEVICE_ID 0x06
+#define ERASER_DEVICE_ID 0x0A
+#define PAD_DEVICE_ID 0x0F
+
+/* match vendor and interface info */
+#define HANWANG_TABLET_DEVICE(vend, cl, sc, pr) \
+ .match_flags = USB_DEVICE_ID_MATCH_VENDOR \
+ | USB_DEVICE_ID_MATCH_INT_INFO, \
+ .idVendor = (vend), \
+ .bInterfaceClass = (cl), \
+ .bInterfaceSubClass = (sc), \
+ .bInterfaceProtocol = (pr)
+
+enum hanwang_tablet_type {
+ HANWANG_ART_MASTER_III,
+ HANWANG_ART_MASTER_HD,
+};
+
+struct hanwang {
+ unsigned char *data;
+ dma_addr_t data_dma;
+ struct input_dev *dev;
+ struct usb_device *usbdev;
+ struct urb *irq;
+ const struct hanwang_features *features;
+ unsigned int current_tool;
+ unsigned int current_id;
+ char name[64];
+ char phys[32];
+};
+
+struct hanwang_features {
+ unsigned short pid;
+ char *name;
+ enum hanwang_tablet_type type;
+ int pkg_len;
+ int max_x;
+ int max_y;
+ int max_tilt_x;
+ int max_tilt_y;
+ int max_pressure;
+};
+
+static const struct hanwang_features features_array[] = {
+ { 0x8528, "Hanwang Art Master III 0906", HANWANG_ART_MASTER_III,
+ ART_MASTER_PKGLEN_MAX, 0x5757, 0x3692, 0x3f, 0x7f, 2048 },
+ { 0x8529, "Hanwang Art Master III 0604", HANWANG_ART_MASTER_III,
+ ART_MASTER_PKGLEN_MAX, 0x3d84, 0x2672, 0x3f, 0x7f, 2048 },
+ { 0x852a, "Hanwang Art Master III 1308", HANWANG_ART_MASTER_III,
+ ART_MASTER_PKGLEN_MAX, 0x7f00, 0x4f60, 0x3f, 0x7f, 2048 },
+ { 0x8401, "Hanwang Art Master HD 5012", HANWANG_ART_MASTER_HD,
+ ART_MASTER_PKGLEN_MAX, 0x678e, 0x4150, 0x3f, 0x7f, 1024 },
+};
+
+static const int hw_eventtypes[] = {
+ EV_KEY, EV_ABS, EV_MSC,
+};
+
+static const int hw_absevents[] = {
+ ABS_X, ABS_Y, ABS_TILT_X, ABS_TILT_Y, ABS_WHEEL,
+ ABS_RX, ABS_RY, ABS_PRESSURE, ABS_MISC,
+};
+
+static const int hw_btnevents[] = {
+ BTN_STYLUS, BTN_STYLUS2, BTN_TOOL_PEN, BTN_TOOL_RUBBER,
+ BTN_TOOL_MOUSE, BTN_TOOL_FINGER,
+ BTN_0, BTN_1, BTN_2, BTN_3, BTN_4, BTN_5, BTN_6, BTN_7, BTN_8,
+};
+
+static const int hw_mscevents[] = {
+ MSC_SERIAL,
+};
+
+static void hanwang_parse_packet(struct hanwang *hanwang)
+{
+ unsigned char *data = hanwang->data;
+ struct input_dev *input_dev = hanwang->dev;
+ struct usb_device *dev = hanwang->usbdev;
+ enum hanwang_tablet_type type = hanwang->features->type;
+ int i;
+ u16 x, y, p;
+
+ switch (data[0]) {
+ case 0x02: /* data packet */
+ switch (data[1]) {
+ case 0x80: /* tool prox out */
+ hanwang->current_id = 0;
+ input_report_key(input_dev, hanwang->current_tool, 0);
+ break;
+
+ case 0xc2: /* first time tool prox in */
+ switch (data[3] & 0xf0) {
+ case 0x20: /* art_master III */
+ case 0x30: /* art_master_HD */
+ hanwang->current_id = STYLUS_DEVICE_ID;
+ hanwang->current_tool = BTN_TOOL_PEN;
+ input_report_key(input_dev, BTN_TOOL_PEN, 1);
+ break;
+ case 0xa0: /* art_master III */
+ case 0xb0: /* art_master_HD */
+ hanwang->current_id = ERASER_DEVICE_ID;
+ hanwang->current_tool = BTN_TOOL_RUBBER;
+ input_report_key(input_dev, BTN_TOOL_RUBBER, 1);
+ break;
+ default:
+ hanwang->current_id = 0;
+ dev_dbg(&dev->dev,
+ "unknown tablet tool %02x ", data[0]);
+ break;
+ }
+ break;
+
+ default: /* tool data packet */
+ x = (data[2] << 8) | data[3];
+ y = (data[4] << 8) | data[5];
+
+ switch (type) {
+ case HANWANG_ART_MASTER_III:
+ p = (data[6] << 3) |
+ ((data[7] & 0xc0) >> 5) |
+ (data[1] & 0x01);
+ break;
+
+ case HANWANG_ART_MASTER_HD:
+ p = (data[7] >> 6) | (data[6] << 2);
+ break;
+
+ default:
+ p = 0;
+ break;
+ }
+
+ input_report_abs(input_dev, ABS_X,
+ le16_to_cpup((__le16 *)&x));
+ input_report_abs(input_dev, ABS_Y,
+ le16_to_cpup((__le16 *)&y));
+ input_report_abs(input_dev, ABS_PRESSURE,
+ le16_to_cpup((__le16 *)&p));
+ input_report_abs(input_dev, ABS_TILT_X, data[7] & 0x3f);
+ input_report_abs(input_dev, ABS_TILT_Y, data[8] & 0x7f);
+ input_report_key(input_dev, BTN_STYLUS, data[1] & 0x02);
+ input_report_key(input_dev, BTN_STYLUS2, data[1] & 0x04);
+ break;
+ }
+ input_report_abs(input_dev, ABS_MISC, hanwang->current_id);
+ input_event(input_dev, EV_MSC, MSC_SERIAL,
+ hanwang->features->pid);
+ break;
+
+ case 0x0c:
+ /* roll wheel */
+ hanwang->current_id = PAD_DEVICE_ID;
+
+ switch (type) {
+ case HANWANG_ART_MASTER_III:
+ input_report_key(input_dev, BTN_TOOL_FINGER, data[1] ||
+ data[2] || data[3]);
+ input_report_abs(input_dev, ABS_WHEEL, data[1]);
+ input_report_key(input_dev, BTN_0, data[2]);
+ for (i = 0; i < 8; i++)
+ input_report_key(input_dev,
+ BTN_1 + i, data[3] & (1 << i));
+ break;
+
+ case HANWANG_ART_MASTER_HD:
+ input_report_key(input_dev, BTN_TOOL_FINGER, data[1] ||
+ data[2] || data[3] || data[4] ||
+ data[5] || data[6]);
+ input_report_abs(input_dev, ABS_RX,
+ ((data[1] & 0x1f) << 8) | data[2]);
+ input_report_abs(input_dev, ABS_RY,
+ ((data[3] & 0x1f) << 8) | data[4]);
+ input_report_key(input_dev, BTN_0, data[5] & 0x01);
+ for (i = 0; i < 4; i++) {
+ input_report_key(input_dev,
+ BTN_1 + i, data[5] & (1 << i));
+ input_report_key(input_dev,
+ BTN_5 + i, data[6] & (1 << i));
+ }
+ break;
+ }
+
+ input_report_abs(input_dev, ABS_MISC, hanwang->current_id);
+ input_event(input_dev, EV_MSC, MSC_SERIAL, 0xffffffff);
+ break;
+
+ default:
+ dev_dbg(&dev->dev, "error packet %02x ", data[0]);
+ break;
+ }
+
+ input_sync(input_dev);
+}
+
+static void hanwang_irq(struct urb *urb)
+{
+ struct hanwang *hanwang = urb->context;
+ struct usb_device *dev = hanwang->usbdev;
+ int retval;
+
+ switch (urb->status) {
+ case 0:
+ /* success */;
+ hanwang_parse_packet(hanwang);
+ break;
+ case -ECONNRESET:
+ case -ENOENT:
+ case -ESHUTDOWN:
+ /* this urb is terminated, clean up */
+ dev_err(&dev->dev, "%s - urb shutting down with status: %d",
+ __func__, urb->status);
+ return;
+ default:
+ dev_err(&dev->dev, "%s - nonzero urb status received: %d",
+ __func__, urb->status);
+ break;
+ }
+
+ retval = usb_submit_urb(urb, GFP_ATOMIC);
+ if (retval)
+ dev_err(&dev->dev, "%s - usb_submit_urb failed with result %d",
+ __func__, retval);
+}
+
+static int hanwang_open(struct input_dev *dev)
+{
+ struct hanwang *hanwang = input_get_drvdata(dev);
+
+ hanwang->irq->dev = hanwang->usbdev;
+ if (usb_submit_urb(hanwang->irq, GFP_KERNEL))
+ return -EIO;
+
+ return 0;
+}
+
+static void hanwang_close(struct input_dev *dev)
+{
+ struct hanwang *hanwang = input_get_drvdata(dev);
+
+ usb_kill_urb(hanwang->irq);
+}
+
+static bool get_features(struct usb_device *dev, struct hanwang *hanwang)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(features_array); i++) {
+ if (le16_to_cpu(dev->descriptor.idProduct) ==
+ features_array[i].pid) {
+ hanwang->features = &features_array[i];
+ return true;
+ }
+ }
+
+ return false;
+}
+
+
+static int hanwang_probe(struct usb_interface *intf, const struct usb_device_id *id)
+{
+ struct usb_device *dev = interface_to_usbdev(intf);
+ struct usb_endpoint_descriptor *endpoint;
+ struct hanwang *hanwang;
+ struct input_dev *input_dev;
+ int error;
+ int i;
+
+ hanwang = kzalloc(sizeof(struct hanwang), GFP_KERNEL);
+ input_dev = input_allocate_device();
+ if (!hanwang || !input_dev) {
+ error = -ENOMEM;
+ goto fail1;
+ }
+
+ if (!get_features(dev, hanwang)) {
+ error = -ENXIO;
+ goto fail1;
+ }
+
+ hanwang->data = usb_alloc_coherent(dev, hanwang->features->pkg_len,
+ GFP_KERNEL, &hanwang->data_dma);
+ if (!hanwang->data) {
+ error = -ENOMEM;
+ goto fail1;
+ }
+
+ hanwang->irq = usb_alloc_urb(0, GFP_KERNEL);
+ if (!hanwang->irq) {
+ error = -ENOMEM;
+ goto fail2;
+ }
+
+ hanwang->usbdev = dev;
+ hanwang->dev = input_dev;
+
+ usb_make_path(dev, hanwang->phys, sizeof(hanwang->phys));
+ strlcat(hanwang->phys, "/input0", sizeof(hanwang->phys));
+
+ strlcpy(hanwang->name, hanwang->features->name, sizeof(hanwang->name));
+ input_dev->name = hanwang->name;
+ input_dev->phys = hanwang->phys;
+ usb_to_input_id(dev, &input_dev->id);
+ input_dev->dev.parent = &intf->dev;
+
+ input_set_drvdata(input_dev, hanwang);
+
+ input_dev->open = hanwang_open;
+ input_dev->close = hanwang_close;
+
+ for (i = 0; i < ARRAY_SIZE(hw_eventtypes); ++i)
+ __set_bit(hw_eventtypes[i], input_dev->evbit);
+
+ for (i = 0; i < ARRAY_SIZE(hw_absevents); ++i)
+ __set_bit(hw_absevents[i], input_dev->absbit);
+
+ for (i = 0; i < ARRAY_SIZE(hw_btnevents); ++i)
+ __set_bit(hw_btnevents[i], input_dev->keybit);
+
+ for (i = 0; i < ARRAY_SIZE(hw_mscevents); ++i)
+ __set_bit(hw_mscevents[i], input_dev->mscbit);
+
+ input_set_abs_params(input_dev, ABS_X,
+ 0, hanwang->features->max_x, 4, 0);
+ input_set_abs_params(input_dev, ABS_Y,
+ 0, hanwang->features->max_y, 4, 0);
+ input_set_abs_params(input_dev, ABS_TILT_X,
+ 0, hanwang->features->max_tilt_x, 0, 0);
+ input_set_abs_params(input_dev, ABS_TILT_Y,
+ 0, hanwang->features->max_tilt_y, 0, 0);
+ input_set_abs_params(input_dev, ABS_PRESSURE,
+ 0, hanwang->features->max_pressure, 0, 0);
+
+ endpoint = &intf->cur_altsetting->endpoint[0].desc;
+ usb_fill_int_urb(hanwang->irq, dev,
+ usb_rcvintpipe(dev, endpoint->bEndpointAddress),
+ hanwang->data, hanwang->features->pkg_len,
+ hanwang_irq, hanwang, endpoint->bInterval);
+ hanwang->irq->transfer_dma = hanwang->data_dma;
+ hanwang->irq->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
+
+ error = input_register_device(hanwang->dev);
+ if (error)
+ goto fail3;
+
+ usb_set_intfdata(intf, hanwang);
+
+ return 0;
+
+ fail3: usb_free_urb(hanwang->irq);
+ fail2: usb_free_coherent(dev, hanwang->features->pkg_len,
+ hanwang->data, hanwang->data_dma);
+ fail1: input_free_device(input_dev);
+ kfree(hanwang);
+ return error;
+
+}
+
+static void hanwang_disconnect(struct usb_interface *intf)
+{
+ struct hanwang *hanwang = usb_get_intfdata(intf);
+
+ input_unregister_device(hanwang->dev);
+ usb_free_urb(hanwang->irq);
+ usb_free_coherent(interface_to_usbdev(intf),
+ hanwang->features->pkg_len, hanwang->data,
+ hanwang->data_dma);
+ kfree(hanwang);
+ usb_set_intfdata(intf, NULL);
+}
+
+static const struct usb_device_id hanwang_ids[] = {
+ { HANWANG_TABLET_DEVICE(USB_VENDOR_ID_HANWANG, HANWANG_TABLET_INT_CLASS,
+ HANWANG_TABLET_INT_SUB_CLASS, HANWANG_TABLET_INT_PROTOCOL) },
+ {}
+};
+
+MODULE_DEVICE_TABLE(usb, hanwang_ids);
+
+static struct usb_driver hanwang_driver = {
+ .name = "hanwang",
+ .probe = hanwang_probe,
+ .disconnect = hanwang_disconnect,
+ .id_table = hanwang_ids,
+};
+
+static int __init hanwang_init(void)
+{
+ return usb_register(&hanwang_driver);
+}
+
+static void __exit hanwang_exit(void)
+{
+ usb_deregister(&hanwang_driver);
+}
+
+module_init(hanwang_init);
+module_exit(hanwang_exit);
diff --git a/drivers/input/tablet/wacom.h b/drivers/input/tablet/wacom.h
index 284dfaab6b8c..de5adb109030 100644
--- a/drivers/input/tablet/wacom.h
+++ b/drivers/input/tablet/wacom.h
@@ -118,6 +118,7 @@ struct wacom {
extern const struct usb_device_id wacom_ids[];
void wacom_wac_irq(struct wacom_wac *wacom_wac, size_t len);
+void wacom_setup_device_quirks(struct wacom_features *features);
void wacom_setup_input_capabilities(struct input_dev *input_dev,
struct wacom_wac *wacom_wac);
#endif
diff --git a/drivers/input/tablet/wacom_sys.c b/drivers/input/tablet/wacom_sys.c
index b35876ee6908..fc381498b798 100644
--- a/drivers/input/tablet/wacom_sys.c
+++ b/drivers/input/tablet/wacom_sys.c
@@ -120,14 +120,16 @@ static int wacom_open(struct input_dev *dev)
out:
mutex_unlock(&wacom->lock);
- if (retval)
- usb_autopm_put_interface(wacom->intf);
+ usb_autopm_put_interface(wacom->intf);
return retval;
}
static void wacom_close(struct input_dev *dev)
{
struct wacom *wacom = input_get_drvdata(dev);
+ int autopm_error;
+
+ autopm_error = usb_autopm_get_interface(wacom->intf);
mutex_lock(&wacom->lock);
usb_kill_urb(wacom->irq);
@@ -135,7 +137,8 @@ static void wacom_close(struct input_dev *dev)
wacom->intf->needs_remote_wakeup = 0;
mutex_unlock(&wacom->lock);
- usb_autopm_put_interface(wacom->intf);
+ if (!autopm_error)
+ usb_autopm_put_interface(wacom->intf);
}
static int wacom_parse_hid(struct usb_interface *intf, struct hid_descriptor *hid_desc,
@@ -196,17 +199,30 @@ static int wacom_parse_hid(struct usb_interface *intf, struct hid_descriptor *hi
features->pktlen = WACOM_PKGLEN_TPC2FG;
features->device_type = BTN_TOOL_TRIPLETAP;
}
- features->x_max =
- get_unaligned_le16(&report[i + 3]);
- features->x_phy =
- get_unaligned_le16(&report[i + 6]);
- features->unit = report[i + 9];
- features->unitExpo = report[i + 11];
- i += 12;
+ if (features->type == BAMBOO_PT) {
+ /* need to reset back */
+ features->pktlen = WACOM_PKGLEN_BBTOUCH;
+ features->device_type = BTN_TOOL_TRIPLETAP;
+ features->x_phy =
+ get_unaligned_le16(&report[i + 5]);
+ features->x_max =
+ get_unaligned_le16(&report[i + 8]);
+ i += 15;
+ } else {
+ features->x_max =
+ get_unaligned_le16(&report[i + 3]);
+ features->x_phy =
+ get_unaligned_le16(&report[i + 6]);
+ features->unit = report[i + 9];
+ features->unitExpo = report[i + 11];
+ i += 12;
+ }
} else if (pen) {
/* penabled only accepts exact bytes of data */
if (features->type == TABLETPC2FG)
features->pktlen = WACOM_PKGLEN_GRAPHIRE;
+ if (features->type == BAMBOO_PT)
+ features->pktlen = WACOM_PKGLEN_BBFUN;
features->device_type = BTN_TOOL_PEN;
features->x_max =
get_unaligned_le16(&report[i + 3]);
@@ -235,6 +251,15 @@ static int wacom_parse_hid(struct usb_interface *intf, struct hid_descriptor *hi
features->y_phy =
get_unaligned_le16(&report[i + 6]);
i += 7;
+ } else if (features->type == BAMBOO_PT) {
+ /* need to reset back */
+ features->pktlen = WACOM_PKGLEN_BBTOUCH;
+ features->device_type = BTN_TOOL_TRIPLETAP;
+ features->y_phy =
+ get_unaligned_le16(&report[i + 3]);
+ features->y_max =
+ get_unaligned_le16(&report[i + 6]);
+ i += 12;
} else {
features->y_max =
features->x_max;
@@ -246,6 +271,8 @@ static int wacom_parse_hid(struct usb_interface *intf, struct hid_descriptor *hi
/* penabled only accepts exact bytes of data */
if (features->type == TABLETPC2FG)
features->pktlen = WACOM_PKGLEN_GRAPHIRE;
+ if (features->type == BAMBOO_PT)
+ features->pktlen = WACOM_PKGLEN_BBFUN;
features->device_type = BTN_TOOL_PEN;
features->y_max =
get_unaligned_le16(&report[i + 3]);
@@ -296,8 +323,9 @@ static int wacom_query_tablet_data(struct usb_interface *intf, struct wacom_feat
if (!rep_data)
return error;
- /* ask to report tablet data if it is 2FGT or not a Tablet PC */
- if (features->device_type == BTN_TOOL_TRIPLETAP) {
+ /* ask to report tablet data if it is 2FGT Tablet PC or
+ * not a Tablet PC */
+ if (features->type == TABLETPC2FG) {
do {
rep_data[0] = 3;
rep_data[1] = 4;
@@ -309,7 +337,7 @@ static int wacom_query_tablet_data(struct usb_interface *intf, struct wacom_feat
WAC_HID_FEATURE_REPORT, report_id,
rep_data, 3);
} while ((error < 0 || rep_data[1] != 4) && limit++ < 5);
- } else if (features->type != TABLETPC && features->type != TABLETPC2FG) {
+ } else if (features->type != TABLETPC) {
do {
rep_data[0] = 2;
rep_data[1] = 2;
@@ -334,11 +362,16 @@ static int wacom_retrieve_hid_descriptor(struct usb_interface *intf,
struct usb_host_interface *interface = intf->cur_altsetting;
struct hid_descriptor *hid_desc;
- /* default device to penabled */
+ /* default features */
features->device_type = BTN_TOOL_PEN;
-
- /* only Tablet PCs need to retrieve the info */
- if ((features->type != TABLETPC) && (features->type != TABLETPC2FG))
+ features->x_fuzz = 4;
+ features->y_fuzz = 4;
+ features->pressure_fuzz = 0;
+ features->distance_fuzz = 0;
+
+ /* only Tablet PCs and Bamboo P&T need to retrieve the info */
+ if ((features->type != TABLETPC) && (features->type != TABLETPC2FG) &&
+ (features->type != BAMBOO_PT))
goto out;
if (usb_get_extra_descriptor(interface, HID_DEVICET_HID, &hid_desc)) {
@@ -353,12 +386,6 @@ static int wacom_retrieve_hid_descriptor(struct usb_interface *intf,
if (error)
goto out;
- /* touch device found but size is not defined. use default */
- if (features->device_type == BTN_TOOL_DOUBLETAP && !features->x_max) {
- features->x_max = 1023;
- features->y_max = 1023;
- }
-
out:
return error;
}
@@ -494,9 +521,11 @@ static int wacom_probe(struct usb_interface *intf, const struct usb_device_id *i
if (error)
goto fail2;
+ wacom_setup_device_quirks(features);
+
strlcpy(wacom_wac->name, features->name, sizeof(wacom_wac->name));
- if (features->type == TABLETPC || features->type == TABLETPC2FG) {
+ if (features->quirks & WACOM_QUIRK_MULTI_INPUT) {
/* Append the device type to the name */
strlcat(wacom_wac->name,
features->device_type == BTN_TOOL_PEN ?
diff --git a/drivers/input/tablet/wacom_wac.c b/drivers/input/tablet/wacom_wac.c
index 47fd7a041c52..4852b440960a 100644
--- a/drivers/input/tablet/wacom_wac.c
+++ b/drivers/input/tablet/wacom_wac.c
@@ -857,6 +857,134 @@ static int wacom_tpc_irq(struct wacom_wac *wacom, size_t len)
return retval;
}
+static int wacom_bpt_touch(struct wacom_wac *wacom)
+{
+ struct wacom_features *features = &wacom->features;
+ struct input_dev *input = wacom->input;
+ unsigned char *data = wacom->data;
+ int sp = 0, sx = 0, sy = 0, count = 0;
+ int i;
+
+ for (i = 0; i < 2; i++) {
+ int p = data[9 * i + 2];
+ input_mt_slot(input, i);
+ /*
+ * Touch events need to be disabled while stylus is
+ * in proximity because user's hand is resting on touchpad
+ * and sending unwanted events. User expects tablet buttons
+ * to continue working though.
+ */
+ if (p && !wacom->shared->stylus_in_proximity) {
+ int x = get_unaligned_be16(&data[9 * i + 3]) & 0x7ff;
+ int y = get_unaligned_be16(&data[9 * i + 5]) & 0x7ff;
+ if (features->quirks & WACOM_QUIRK_BBTOUCH_LOWRES) {
+ x <<= 5;
+ y <<= 5;
+ }
+ input_report_abs(input, ABS_MT_PRESSURE, p);
+ input_report_abs(input, ABS_MT_POSITION_X, x);
+ input_report_abs(input, ABS_MT_POSITION_Y, y);
+ if (wacom->id[i] < 0)
+ wacom->id[i] = wacom->trk_id++ & MAX_TRACKING_ID;
+ if (!count++)
+ sp = p, sx = x, sy = y;
+ } else {
+ wacom->id[i] = -1;
+ }
+ input_report_abs(input, ABS_MT_TRACKING_ID, wacom->id[i]);
+ }
+
+ input_report_key(input, BTN_TOUCH, count > 0);
+ input_report_key(input, BTN_TOOL_FINGER, count == 1);
+ input_report_key(input, BTN_TOOL_DOUBLETAP, count == 2);
+
+ input_report_abs(input, ABS_PRESSURE, sp);
+ input_report_abs(input, ABS_X, sx);
+ input_report_abs(input, ABS_Y, sy);
+
+ input_report_key(input, BTN_LEFT, (data[1] & 0x08) != 0);
+ input_report_key(input, BTN_FORWARD, (data[1] & 0x04) != 0);
+ input_report_key(input, BTN_BACK, (data[1] & 0x02) != 0);
+ input_report_key(input, BTN_RIGHT, (data[1] & 0x01) != 0);
+
+ input_sync(input);
+
+ return 0;
+}
+
+static int wacom_bpt_pen(struct wacom_wac *wacom)
+{
+ struct input_dev *input = wacom->input;
+ unsigned char *data = wacom->data;
+ int prox = 0, x = 0, y = 0, p = 0, d = 0, pen = 0, btn1 = 0, btn2 = 0;
+
+ /*
+ * Similar to Graphire protocol, data[1] & 0x20 is proximity and
+ * data[1] & 0x18 is tool ID. 0x30 is safety check to ignore
+ * 2 unused tool ID's.
+ */
+ prox = (data[1] & 0x30) == 0x30;
+
+ /*
+ * All reports shared between PEN and RUBBER tool must be
+ * forced to a known starting value (zero) when transitioning to
+ * out-of-prox.
+ *
+ * If not reset then, to userspace, it will look like lost events
+ * if new tool comes in-prox with same values as previous tool sent.
+ *
+ * Hardware does report zero in most out-of-prox cases but not all.
+ */
+ if (prox) {
+ if (!wacom->shared->stylus_in_proximity) {
+ if (data[1] & 0x08) {
+ wacom->tool[0] = BTN_TOOL_RUBBER;
+ wacom->id[0] = ERASER_DEVICE_ID;
+ } else {
+ wacom->tool[0] = BTN_TOOL_PEN;
+ wacom->id[0] = STYLUS_DEVICE_ID;
+ }
+ wacom->shared->stylus_in_proximity = true;
+ }
+ x = le16_to_cpup((__le16 *)&data[2]);
+ y = le16_to_cpup((__le16 *)&data[4]);
+ p = le16_to_cpup((__le16 *)&data[6]);
+ d = data[8];
+ pen = data[1] & 0x01;
+ btn1 = data[1] & 0x02;
+ btn2 = data[1] & 0x04;
+ }
+
+ input_report_key(input, BTN_TOUCH, pen);
+ input_report_key(input, BTN_STYLUS, btn1);
+ input_report_key(input, BTN_STYLUS2, btn2);
+
+ input_report_abs(input, ABS_X, x);
+ input_report_abs(input, ABS_Y, y);
+ input_report_abs(input, ABS_PRESSURE, p);
+ input_report_abs(input, ABS_DISTANCE, d);
+
+ if (!prox) {
+ wacom->id[0] = 0;
+ wacom->shared->stylus_in_proximity = false;
+ }
+
+ input_report_key(input, wacom->tool[0], prox); /* PEN or RUBBER */
+ input_report_abs(input, ABS_MISC, wacom->id[0]); /* TOOL ID */
+
+ return 1;
+}
+
+static int wacom_bpt_irq(struct wacom_wac *wacom, size_t len)
+{
+ if (len == WACOM_PKGLEN_BBTOUCH)
+ return wacom_bpt_touch(wacom);
+ else if (len == WACOM_PKGLEN_BBFUN)
+ return wacom_bpt_pen(wacom);
+
+ return 0;
+}
+
void wacom_wac_irq(struct wacom_wac *wacom_wac, size_t len)
{
bool sync;
@@ -902,6 +1030,10 @@ void wacom_wac_irq(struct wacom_wac *wacom_wac, size_t len)
sync = wacom_tpc_irq(wacom_wac, len);
break;
+ case BAMBOO_PT:
+ sync = wacom_bpt_irq(wacom_wac, len);
+ break;
+
default:
sync = false;
break;
@@ -911,26 +1043,17 @@ void wacom_wac_irq(struct wacom_wac *wacom_wac, size_t len)
input_sync(wacom_wac->input);
}
-static void wacom_setup_intuos(struct wacom_wac *wacom_wac)
+static void wacom_setup_cintiq(struct wacom_wac *wacom_wac)
{
struct input_dev *input_dev = wacom_wac->input;
input_set_capability(input_dev, EV_MSC, MSC_SERIAL);
- input_set_capability(input_dev, EV_REL, REL_WHEEL);
-
- __set_bit(BTN_LEFT, input_dev->keybit);
- __set_bit(BTN_RIGHT, input_dev->keybit);
- __set_bit(BTN_MIDDLE, input_dev->keybit);
- __set_bit(BTN_SIDE, input_dev->keybit);
- __set_bit(BTN_EXTRA, input_dev->keybit);
__set_bit(BTN_TOOL_RUBBER, input_dev->keybit);
__set_bit(BTN_TOOL_PEN, input_dev->keybit);
- __set_bit(BTN_TOOL_MOUSE, input_dev->keybit);
__set_bit(BTN_TOOL_BRUSH, input_dev->keybit);
__set_bit(BTN_TOOL_PENCIL, input_dev->keybit);
__set_bit(BTN_TOOL_AIRBRUSH, input_dev->keybit);
- __set_bit(BTN_TOOL_LENS, input_dev->keybit);
__set_bit(BTN_STYLUS, input_dev->keybit);
__set_bit(BTN_STYLUS2, input_dev->keybit);
@@ -939,10 +1062,55 @@ static void wacom_setup_intuos(struct wacom_wac *wacom_wac)
input_set_abs_params(input_dev, ABS_WHEEL, 0, 1023, 0, 0);
input_set_abs_params(input_dev, ABS_TILT_X, 0, 127, 0, 0);
input_set_abs_params(input_dev, ABS_TILT_Y, 0, 127, 0, 0);
+}
+
+static void wacom_setup_intuos(struct wacom_wac *wacom_wac)
+{
+ struct input_dev *input_dev = wacom_wac->input;
+
+ input_set_capability(input_dev, EV_REL, REL_WHEEL);
+
+ wacom_setup_cintiq(wacom_wac);
+
+ __set_bit(BTN_LEFT, input_dev->keybit);
+ __set_bit(BTN_RIGHT, input_dev->keybit);
+ __set_bit(BTN_MIDDLE, input_dev->keybit);
+ __set_bit(BTN_SIDE, input_dev->keybit);
+ __set_bit(BTN_EXTRA, input_dev->keybit);
+ __set_bit(BTN_TOOL_MOUSE, input_dev->keybit);
+ __set_bit(BTN_TOOL_LENS, input_dev->keybit);
+
input_set_abs_params(input_dev, ABS_RZ, -900, 899, 0, 0);
input_set_abs_params(input_dev, ABS_THROTTLE, -1023, 1023, 0, 0);
}
+void wacom_setup_device_quirks(struct wacom_features *features)
+{
+
+ /* touch device found but size is not defined. use default */
+ if (features->device_type == BTN_TOOL_DOUBLETAP && !features->x_max) {
+ features->x_max = 1023;
+ features->y_max = 1023;
+ }
+
+ /* these device have multiple inputs */
+ if (features->type == TABLETPC || features->type == TABLETPC2FG ||
+ features->type == BAMBOO_PT)
+ features->quirks |= WACOM_QUIRK_MULTI_INPUT;
+
+ /* quirks for bamboo touch */
+ if (features->type == BAMBOO_PT &&
+ features->device_type == BTN_TOOL_TRIPLETAP) {
+ features->x_max <<= 5;
+ features->y_max <<= 5;
+ features->x_fuzz <<= 5;
+ features->y_fuzz <<= 5;
+ features->pressure_max = 256;
+ features->pressure_fuzz = 16;
+ features->quirks |= WACOM_QUIRK_BBTOUCH_LOWRES;
+ }
+}
+
void wacom_setup_input_capabilities(struct input_dev *input_dev,
struct wacom_wac *wacom_wac)
{
@@ -953,9 +1121,12 @@ void wacom_setup_input_capabilities(struct input_dev *input_dev,
__set_bit(BTN_TOUCH, input_dev->keybit);
- input_set_abs_params(input_dev, ABS_X, 0, features->x_max, 4, 0);
- input_set_abs_params(input_dev, ABS_Y, 0, features->y_max, 4, 0);
- input_set_abs_params(input_dev, ABS_PRESSURE, 0, features->pressure_max, 0, 0);
+ input_set_abs_params(input_dev, ABS_X, 0, features->x_max,
+ features->x_fuzz, 0);
+ input_set_abs_params(input_dev, ABS_Y, 0, features->y_max,
+ features->y_fuzz, 0);
+ input_set_abs_params(input_dev, ABS_PRESSURE, 0, features->pressure_max,
+ features->pressure_fuzz, 0);
__set_bit(ABS_MISC, input_dev->absbit);
@@ -1005,9 +1176,19 @@ void wacom_setup_input_capabilities(struct input_dev *input_dev,
__set_bit(BTN_9, input_dev->keybit);
/* fall through */
+ case CINTIQ:
+ for (i = 0; i < 8; i++)
+ __set_bit(BTN_0 + i, input_dev->keybit);
+ __set_bit(BTN_TOOL_FINGER, input_dev->keybit);
+
+ input_set_abs_params(input_dev, ABS_RX, 0, 4096, 0, 0);
+ input_set_abs_params(input_dev, ABS_RY, 0, 4096, 0, 0);
+ input_set_abs_params(input_dev, ABS_Z, -900, 899, 0, 0);
+ wacom_setup_cintiq(wacom_wac);
+ break;
+
case INTUOS3:
case INTUOS3L:
- case CINTIQ:
__set_bit(BTN_4, input_dev->keybit);
__set_bit(BTN_5, input_dev->keybit);
__set_bit(BTN_6, input_dev->keybit);
@@ -1078,6 +1259,38 @@ void wacom_setup_input_capabilities(struct input_dev *input_dev,
case PENPARTNER:
__set_bit(BTN_TOOL_RUBBER, input_dev->keybit);
break;
+
+ case BAMBOO_PT:
+ __clear_bit(ABS_MISC, input_dev->absbit);
+
+ if (features->device_type == BTN_TOOL_TRIPLETAP) {
+ __set_bit(BTN_LEFT, input_dev->keybit);
+ __set_bit(BTN_FORWARD, input_dev->keybit);
+ __set_bit(BTN_BACK, input_dev->keybit);
+ __set_bit(BTN_RIGHT, input_dev->keybit);
+
+ __set_bit(BTN_TOOL_FINGER, input_dev->keybit);
+ __set_bit(BTN_TOOL_DOUBLETAP, input_dev->keybit);
+
+ input_mt_create_slots(input_dev, 2);
+ input_set_abs_params(input_dev, ABS_MT_POSITION_X,
+ 0, features->x_max,
+ features->x_fuzz, 0);
+ input_set_abs_params(input_dev, ABS_MT_POSITION_Y,
+ 0, features->y_max,
+ features->y_fuzz, 0);
+ input_set_abs_params(input_dev, ABS_MT_PRESSURE,
+ 0, features->pressure_max,
+ features->pressure_fuzz, 0);
+ input_set_abs_params(input_dev, ABS_MT_TRACKING_ID, 0,
+ MAX_TRACKING_ID, 0, 0);
+ } else if (features->device_type == BTN_TOOL_PEN) {
+ __set_bit(BTN_TOOL_RUBBER, input_dev->keybit);
+ __set_bit(BTN_TOOL_PEN, input_dev->keybit);
+ __set_bit(BTN_STYLUS, input_dev->keybit);
+ __set_bit(BTN_STYLUS2, input_dev->keybit);
+ }
+ break;
}
}
@@ -1215,6 +1428,20 @@ static const struct wacom_features wacom_features_0xE3 =
{ "Wacom ISDv4 E3", WACOM_PKGLEN_TPC2FG, 26202, 16325, 255, 0, TABLETPC2FG };
static const struct wacom_features wacom_features_0x47 =
{ "Wacom Intuos2 6x8", WACOM_PKGLEN_INTUOS, 20320, 16240, 1023, 31, INTUOS };
+static struct wacom_features wacom_features_0xD0 =
+ { "Wacom Bamboo 2FG", WACOM_PKGLEN_BBFUN, 14720, 9200, 1023, 63, BAMBOO_PT };
+static struct wacom_features wacom_features_0xD1 =
+ { "Wacom Bamboo 2FG 4x5", WACOM_PKGLEN_BBFUN, 14720, 9200, 1023, 63, BAMBOO_PT };
+static struct wacom_features wacom_features_0xD2 =
+ { "Wacom Bamboo Craft", WACOM_PKGLEN_BBFUN, 14720, 9200, 1023, 63, BAMBOO_PT };
+static struct wacom_features wacom_features_0xD3 =
+ { "Wacom Bamboo 2FG 6x8", WACOM_PKGLEN_BBFUN, 21648, 13530, 1023, 63, BAMBOO_PT };
+static struct wacom_features wacom_features_0xD8 =
+ { "Wacom Bamboo Comic 2FG", WACOM_PKGLEN_BBFUN, 21648, 13530, 1023, 63, BAMBOO_PT };
+static struct wacom_features wacom_features_0xDA =
+ { "Wacom Bamboo 2FG 4x5 SE", WACOM_PKGLEN_BBFUN, 14720, 9200, 1023, 63, BAMBOO_PT };
+static struct wacom_features wacom_features_0xDB =
+ { "Wacom Bamboo 2FG 6x8 SE", WACOM_PKGLEN_BBFUN, 21648, 13530, 1023, 63, BAMBOO_PT };
#define USB_DEVICE_WACOM(prod) \
USB_DEVICE(USB_VENDOR_ID_WACOM, prod), \
@@ -1279,6 +1506,13 @@ const struct usb_device_id wacom_ids[] = {
{ USB_DEVICE_WACOM(0xC6) },
{ USB_DEVICE_WACOM(0xC7) },
{ USB_DEVICE_WACOM(0xCE) },
+ { USB_DEVICE_WACOM(0xD0) },
+ { USB_DEVICE_WACOM(0xD1) },
+ { USB_DEVICE_WACOM(0xD2) },
+ { USB_DEVICE_WACOM(0xD3) },
+ { USB_DEVICE_WACOM(0xD8) },
+ { USB_DEVICE_WACOM(0xDA) },
+ { USB_DEVICE_WACOM(0xDB) },
{ USB_DEVICE_WACOM(0xF0) },
{ USB_DEVICE_WACOM(0xCC) },
{ USB_DEVICE_WACOM(0x90) },
diff --git a/drivers/input/tablet/wacom_wac.h b/drivers/input/tablet/wacom_wac.h
index 99e1a54cd305..00ca01541d89 100644
--- a/drivers/input/tablet/wacom_wac.h
+++ b/drivers/input/tablet/wacom_wac.h
@@ -21,6 +21,7 @@
#define WACOM_PKGLEN_INTUOS 10
#define WACOM_PKGLEN_TPC1FG 5
#define WACOM_PKGLEN_TPC2FG 14
+#define WACOM_PKGLEN_BBTOUCH 20
/* device IDs */
#define STYLUS_DEVICE_ID 0x02
@@ -37,6 +38,13 @@
#define WACOM_REPORT_TPC1FG 6
#define WACOM_REPORT_TPC2FG 13
+/* device quirks */
+#define WACOM_QUIRK_MULTI_INPUT 0x0001
+#define WACOM_QUIRK_BBTOUCH_LOWRES 0x0002
+
+/* largest reported tracking id */
+#define MAX_TRACKING_ID 0xfff
+
enum {
PENPARTNER = 0,
GRAPHIRE,
@@ -44,6 +52,7 @@ enum {
PTU,
PL,
DTU,
+ BAMBOO_PT,
INTUOS,
INTUOS3S,
INTUOS3,
@@ -73,6 +82,11 @@ struct wacom_features {
int y_phy;
unsigned char unit;
unsigned char unitExpo;
+ int x_fuzz;
+ int y_fuzz;
+ int pressure_fuzz;
+ int distance_fuzz;
+ unsigned quirks;
};
struct wacom_shared {
@@ -86,6 +100,7 @@ struct wacom_wac {
int id[3];
__u32 serial[2];
int last_finger;
+ int trk_id;
struct wacom_features features;
struct wacom_shared *shared;
struct input_dev *input;
diff --git a/drivers/input/touchscreen/Kconfig b/drivers/input/touchscreen/Kconfig
index 0069d9703fda..06ea8da95c62 100644
--- a/drivers/input/touchscreen/Kconfig
+++ b/drivers/input/touchscreen/Kconfig
@@ -98,6 +98,18 @@ config TOUCHSCREEN_BITSY
To compile this driver as a module, choose M here: the
module will be called h3600_ts_input.
+config TOUCHSCREEN_BU21013
+ tristate "BU21013 based touch panel controllers"
+ depends on I2C
+ help
+ Say Y here if you have a bu21013 touchscreen connected to
+ your system.
+
+ If unsure, say N.
+
+ To compile this driver as a module, choose M here: the
+ module will be called bu21013_ts.
+
config TOUCHSCREEN_CY8CTMG110
tristate "cy8ctmg110 touchscreen"
depends on I2C
@@ -214,6 +226,16 @@ config TOUCHSCREEN_WACOM_W8001
To compile this driver as a module, choose M here: the
module will be called wacom_w8001.
+config TOUCHSCREEN_LPC32XX
+ tristate "LPC32XX touchscreen controller"
+ depends on ARCH_LPC32XX
+ help
+ Say Y here if you have a LPC32XX device and want
+ to support the built-in touchscreen.
+
+ To compile this driver as a module, choose M here: the
+ module will be called lpc32xx_ts.
+
config TOUCHSCREEN_MCS5000
tristate "MELFAS MCS-5000 touchscreen"
depends on I2C
@@ -250,6 +272,18 @@ config TOUCHSCREEN_INEXIO
To compile this driver as a module, choose M here: the
module will be called inexio.
+config TOUCHSCREEN_INTEL_MID
+ tristate "Intel MID platform resistive touchscreen"
+ depends on INTEL_SCU_IPC
+ help
+ Say Y here if you have a Intel MID based touchscreen in
+ your system.
+
+ If unsure, say N.
+
+ To compile this driver as a module, choose M here: the
+ module will be called intel_mid_touch.
+
config TOUCHSCREEN_MK712
tristate "ICS MicroClock MK712 touchscreen"
help
@@ -328,6 +362,15 @@ config TOUCHSCREEN_MIGOR
To compile this driver as a module, choose M here: the
module will be called migor_ts.
+config TOUCHSCREEN_TNETV107X
+ tristate "TI TNETV107X touchscreen support"
+ depends on ARCH_DAVINCI_TNETV107X
+ help
+ Say Y here if you want to use the TNETV107X touchscreen.
+
+ To compile this driver as a module, choose M here: the
+ module will be called tnetv107x-ts.
+
config TOUCHSCREEN_TOUCHRIGHT
tristate "Touchright serial touchscreen"
select SERIO
diff --git a/drivers/input/touchscreen/Makefile b/drivers/input/touchscreen/Makefile
index 28217e1dcafd..7cc1b4f4b677 100644
--- a/drivers/input/touchscreen/Makefile
+++ b/drivers/input/touchscreen/Makefile
@@ -14,6 +14,7 @@ obj-$(CONFIG_TOUCHSCREEN_AD7879_SPI) += ad7879-spi.o
obj-$(CONFIG_TOUCHSCREEN_ADS7846) += ads7846.o
obj-$(CONFIG_TOUCHSCREEN_ATMEL_TSADCC) += atmel_tsadcc.o
obj-$(CONFIG_TOUCHSCREEN_BITSY) += h3600_ts_input.o
+obj-$(CONFIG_TOUCHSCREEN_BU21013) += bu21013_ts.o
obj-$(CONFIG_TOUCHSCREEN_CY8CTMG110) += cy8ctmg110_ts.o
obj-$(CONFIG_TOUCHSCREEN_DA9034) += da9034-ts.o
obj-$(CONFIG_TOUCHSCREEN_DYNAPRO) += dynapro.o
@@ -23,6 +24,8 @@ obj-$(CONFIG_TOUCHSCREEN_EETI) += eeti_ts.o
obj-$(CONFIG_TOUCHSCREEN_ELO) += elo.o
obj-$(CONFIG_TOUCHSCREEN_FUJITSU) += fujitsu_ts.o
obj-$(CONFIG_TOUCHSCREEN_INEXIO) += inexio.o
+obj-$(CONFIG_TOUCHSCREEN_INTEL_MID) += intel-mid-touch.o
+obj-$(CONFIG_TOUCHSCREEN_LPC32XX) += lpc32xx_ts.o
obj-$(CONFIG_TOUCHSCREEN_MC13783) += mc13783_ts.o
obj-$(CONFIG_TOUCHSCREEN_MCS5000) += mcs5000_ts.o
obj-$(CONFIG_TOUCHSCREEN_MIGOR) += migor_ts.o
@@ -37,6 +40,7 @@ obj-$(CONFIG_TOUCHSCREEN_PENMOUNT) += penmount.o
obj-$(CONFIG_TOUCHSCREEN_QT602240) += qt602240_ts.o
obj-$(CONFIG_TOUCHSCREEN_S3C2410) += s3c2410_ts.o
obj-$(CONFIG_TOUCHSCREEN_STMPE) += stmpe-ts.o
+obj-$(CONFIG_TOUCHSCREEN_TNETV107X) += tnetv107x-ts.o
obj-$(CONFIG_TOUCHSCREEN_TOUCHIT213) += touchit213.o
obj-$(CONFIG_TOUCHSCREEN_TOUCHRIGHT) += touchright.o
obj-$(CONFIG_TOUCHSCREEN_TOUCHWIN) += touchwin.o
diff --git a/drivers/input/touchscreen/ad7877.c b/drivers/input/touchscreen/ad7877.c
index 5f0221cffef9..a1952fcc083e 100644
--- a/drivers/input/touchscreen/ad7877.c
+++ b/drivers/input/touchscreen/ad7877.c
@@ -191,13 +191,12 @@ struct ad7877 {
struct spi_message msg;
struct mutex mutex;
- unsigned disabled:1; /* P: mutex */
- unsigned gpio3:1; /* P: mutex */
- unsigned gpio4:1; /* P: mutex */
+ bool disabled; /* P: mutex */
+ bool gpio3; /* P: mutex */
+ bool gpio4; /* P: mutex */
spinlock_t lock;
struct timer_list timer; /* P: lock */
- unsigned pending:1; /* P: lock */
/*
* DMA (thus cache coherency maintenance) requires the
@@ -206,8 +205,8 @@ struct ad7877 {
u16 conversion_data[AD7877_NR_SENSE] ____cacheline_aligned;
};
-static int gpio3;
-module_param(gpio3, int, 0);
+static bool gpio3;
+module_param(gpio3, bool, 0);
MODULE_PARM_DESC(gpio3, "If gpio3 is set to 1 AUX3 acts as GPIO3");
/*
@@ -230,6 +229,7 @@ static int ad7877_read(struct spi_device *spi, u16 reg)
AD7877_READADD(reg));
req->xfer[0].tx_buf = &req->command;
req->xfer[0].len = 2;
+ req->xfer[0].cs_change = 1;
req->xfer[1].rx_buf = &req->sample;
req->xfer[1].len = 2;
@@ -295,20 +295,25 @@ static int ad7877_read_adc(struct spi_device *spi, unsigned command)
req->xfer[0].tx_buf = &req->reset;
req->xfer[0].len = 2;
+ req->xfer[0].cs_change = 1;
req->xfer[1].tx_buf = &req->ref_on;
req->xfer[1].len = 2;
req->xfer[1].delay_usecs = ts->vref_delay_usecs;
+ req->xfer[1].cs_change = 1;
req->xfer[2].tx_buf = &req->command;
req->xfer[2].len = 2;
req->xfer[2].delay_usecs = ts->vref_delay_usecs;
+ req->xfer[2].cs_change = 1;
req->xfer[3].rx_buf = &req->sample;
req->xfer[3].len = 2;
+ req->xfer[3].cs_change = 1;
req->xfer[4].tx_buf = &ts->cmd_crtl2; /*REF OFF*/
req->xfer[4].len = 2;
+ req->xfer[4].cs_change = 1;
req->xfer[5].tx_buf = &ts->cmd_crtl1; /*DEFAULT*/
req->xfer[5].len = 2;
@@ -327,7 +332,7 @@ static int ad7877_read_adc(struct spi_device *spi, unsigned command)
return status ? : sample;
}
-static void ad7877_rx(struct ad7877 *ts)
+static int ad7877_process_data(struct ad7877 *ts)
{
struct input_dev *input_dev = ts->input;
unsigned Rt;
@@ -354,11 +359,25 @@ static void ad7877_rx(struct ad7877 *ts)
Rt /= z1;
Rt = (Rt + 2047) >> 12;
+ /*
+ * Sample found inconsistent, pressure is beyond
+ * the maximum. Don't report it to user space.
+ */
+ if (Rt > ts->pressure_max)
+ return -EINVAL;
+
+ if (!timer_pending(&ts->timer))
+ input_report_key(input_dev, BTN_TOUCH, 1);
+
input_report_abs(input_dev, ABS_X, x);
input_report_abs(input_dev, ABS_Y, y);
input_report_abs(input_dev, ABS_PRESSURE, Rt);
input_sync(input_dev);
+
+ return 0;
}
+
+ return -EINVAL;
}
static inline void ad7877_ts_event_release(struct ad7877 *ts)
@@ -366,72 +385,56 @@ static inline void ad7877_ts_event_release(struct ad7877 *ts)
struct input_dev *input_dev = ts->input;
input_report_abs(input_dev, ABS_PRESSURE, 0);
+ input_report_key(input_dev, BTN_TOUCH, 0);
input_sync(input_dev);
}
static void ad7877_timer(unsigned long handle)
{
struct ad7877 *ts = (void *)handle;
+ unsigned long flags;
+ spin_lock_irqsave(&ts->lock, flags);
ad7877_ts_event_release(ts);
+ spin_unlock_irqrestore(&ts->lock, flags);
}
static irqreturn_t ad7877_irq(int irq, void *handle)
{
struct ad7877 *ts = handle;
unsigned long flags;
- int status;
+ int error;
- /*
- * The repeated conversion sequencer controlled by TMR kicked off
- * too fast. We ignore the last and process the sample sequence
- * currently in the queue. It can't be older than 9.4ms, and we
- * need to avoid that ts->msg doesn't get issued twice while in work.
- */
+ error = spi_sync(ts->spi, &ts->msg);
+ if (error) {
+ dev_err(&ts->spi->dev, "spi_sync --> %d\n", error);
+ goto out;
+ }
spin_lock_irqsave(&ts->lock, flags);
- if (!ts->pending) {
- ts->pending = 1;
-
- status = spi_async(ts->spi, &ts->msg);
- if (status)
- dev_err(&ts->spi->dev, "spi_sync --> %d\n", status);
- }
+ error = ad7877_process_data(ts);
+ if (!error)
+ mod_timer(&ts->timer, jiffies + TS_PEN_UP_TIMEOUT);
spin_unlock_irqrestore(&ts->lock, flags);
+out:
return IRQ_HANDLED;
}
-static void ad7877_callback(void *_ts)
-{
- struct ad7877 *ts = _ts;
-
- spin_lock_irq(&ts->lock);
-
- ad7877_rx(ts);
- ts->pending = 0;
- mod_timer(&ts->timer, jiffies + TS_PEN_UP_TIMEOUT);
-
- spin_unlock_irq(&ts->lock);
-}
-
static void ad7877_disable(struct ad7877 *ts)
{
mutex_lock(&ts->mutex);
if (!ts->disabled) {
- ts->disabled = 1;
+ ts->disabled = true;
disable_irq(ts->spi->irq);
- /* Wait for spi_async callback */
- while (ts->pending)
- msleep(1);
-
if (del_timer_sync(&ts->timer))
ad7877_ts_event_release(ts);
}
- /* we know the chip's in lowpower mode since we always
+ /*
+ * We know the chip's in lowpower mode since we always
* leave it that way after every request
*/
@@ -443,7 +446,7 @@ static void ad7877_enable(struct ad7877 *ts)
mutex_lock(&ts->mutex);
if (ts->disabled) {
- ts->disabled = 0;
+ ts->disabled = false;
enable_irq(ts->spi->irq);
}
@@ -453,7 +456,7 @@ static void ad7877_enable(struct ad7877 *ts)
#define SHOW(name) static ssize_t \
name ## _show(struct device *dev, struct device_attribute *attr, char *buf) \
{ \
- struct ad7877 *ts = dev_get_drvdata(dev); \
+ struct ad7877 *ts = dev_get_drvdata(dev); \
ssize_t v = ad7877_read_adc(ts->spi, \
AD7877_READ_CHAN(name)); \
if (v < 0) \
@@ -473,7 +476,7 @@ SHOW(temp2)
static ssize_t ad7877_disable_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct ad7877 *ts = dev_get_drvdata(dev);
+ struct ad7877 *ts = dev_get_drvdata(dev);
return sprintf(buf, "%u\n", ts->disabled);
}
@@ -503,7 +506,7 @@ static DEVICE_ATTR(disable, 0664, ad7877_disable_show, ad7877_disable_store);
static ssize_t ad7877_dac_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct ad7877 *ts = dev_get_drvdata(dev);
+ struct ad7877 *ts = dev_get_drvdata(dev);
return sprintf(buf, "%u\n", ts->dac);
}
@@ -533,7 +536,7 @@ static DEVICE_ATTR(dac, 0664, ad7877_dac_show, ad7877_dac_store);
static ssize_t ad7877_gpio3_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct ad7877 *ts = dev_get_drvdata(dev);
+ struct ad7877 *ts = dev_get_drvdata(dev);
return sprintf(buf, "%u\n", ts->gpio3);
}
@@ -564,7 +567,7 @@ static DEVICE_ATTR(gpio3, 0664, ad7877_gpio3_show, ad7877_gpio3_store);
static ssize_t ad7877_gpio4_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct ad7877 *ts = dev_get_drvdata(dev);
+ struct ad7877 *ts = dev_get_drvdata(dev);
return sprintf(buf, "%u\n", ts->gpio4);
}
@@ -597,16 +600,35 @@ static struct attribute *ad7877_attributes[] = {
&dev_attr_temp2.attr,
&dev_attr_aux1.attr,
&dev_attr_aux2.attr,
+ &dev_attr_aux3.attr,
&dev_attr_bat1.attr,
&dev_attr_bat2.attr,
&dev_attr_disable.attr,
&dev_attr_dac.attr,
+ &dev_attr_gpio3.attr,
&dev_attr_gpio4.attr,
NULL
};
+static mode_t ad7877_attr_is_visible(struct kobject *kobj,
+ struct attribute *attr, int n)
+{
+ mode_t mode = attr->mode;
+
+ if (attr == &dev_attr_aux3.attr) {
+ if (gpio3)
+ mode = 0;
+ } else if (attr == &dev_attr_gpio3.attr) {
+ if (!gpio3)
+ mode = 0;
+ }
+
+ return mode;
+}
+
static const struct attribute_group ad7877_attr_group = {
- .attrs = ad7877_attributes,
+ .is_visible = ad7877_attr_is_visible,
+ .attrs = ad7877_attributes,
};
static void ad7877_setup_ts_def_msg(struct spi_device *spi, struct ad7877 *ts)
@@ -635,22 +657,25 @@ static void ad7877_setup_ts_def_msg(struct spi_device *spi, struct ad7877 *ts)
spi_message_init(m);
- m->complete = ad7877_callback;
m->context = ts;
ts->xfer[0].tx_buf = &ts->cmd_crtl1;
ts->xfer[0].len = 2;
+ ts->xfer[0].cs_change = 1;
spi_message_add_tail(&ts->xfer[0], m);
ts->xfer[1].tx_buf = &ts->cmd_dummy; /* Send ZERO */
ts->xfer[1].len = 2;
+ ts->xfer[1].cs_change = 1;
spi_message_add_tail(&ts->xfer[1], m);
- for (i = 0; i < 11; i++) {
+ for (i = 0; i < AD7877_NR_SENSE; i++) {
ts->xfer[i + 2].rx_buf = &ts->conversion_data[AD7877_SEQ_YPOS + i];
ts->xfer[i + 2].len = 2;
+ if (i < (AD7877_NR_SENSE - 1))
+ ts->xfer[i + 2].cs_change = 1;
spi_message_add_tail(&ts->xfer[i + 2], m);
}
}
@@ -718,6 +743,8 @@ static int __devinit ad7877_probe(struct spi_device *spi)
input_dev->phys = ts->phys;
input_dev->dev.parent = &spi->dev;
+ __set_bit(EV_KEY, input_dev->evbit);
+ __set_bit(BTN_TOUCH, input_dev->keybit);
__set_bit(EV_ABS, input_dev->evbit);
__set_bit(ABS_X, input_dev->absbit);
__set_bit(ABS_Y, input_dev->absbit);
@@ -752,8 +779,9 @@ static int __devinit ad7877_probe(struct spi_device *spi)
/* Request AD7877 /DAV GPIO interrupt */
- err = request_irq(spi->irq, ad7877_irq, IRQF_TRIGGER_FALLING,
- spi->dev.driver->name, ts);
+ err = request_threaded_irq(spi->irq, NULL, ad7877_irq,
+ IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
+ spi->dev.driver->name, ts);
if (err) {
dev_dbg(&spi->dev, "irq %d busy?\n", spi->irq);
goto err_free_mem;
@@ -763,20 +791,12 @@ static int __devinit ad7877_probe(struct spi_device *spi)
if (err)
goto err_free_irq;
- err = device_create_file(&spi->dev,
- gpio3 ? &dev_attr_gpio3 : &dev_attr_aux3);
- if (err)
- goto err_remove_attr_group;
-
err = input_register_device(input_dev);
if (err)
- goto err_remove_attr;
+ goto err_remove_attr_group;
return 0;
-err_remove_attr:
- device_remove_file(&spi->dev,
- gpio3 ? &dev_attr_gpio3 : &dev_attr_aux3);
err_remove_attr_group:
sysfs_remove_group(&spi->dev.kobj, &ad7877_attr_group);
err_free_irq:
@@ -790,11 +810,9 @@ err_free_mem:
static int __devexit ad7877_remove(struct spi_device *spi)
{
- struct ad7877 *ts = dev_get_drvdata(&spi->dev);
+ struct ad7877 *ts = dev_get_drvdata(&spi->dev);
sysfs_remove_group(&spi->dev.kobj, &ad7877_attr_group);
- device_remove_file(&spi->dev,
- gpio3 ? &dev_attr_gpio3 : &dev_attr_aux3);
ad7877_disable(ts);
free_irq(ts->spi->irq, ts);
diff --git a/drivers/input/touchscreen/ad7879.c b/drivers/input/touchscreen/ad7879.c
index ba6f0bd1e762..bc3b5187f3a3 100644
--- a/drivers/input/touchscreen/ad7879.c
+++ b/drivers/input/touchscreen/ad7879.c
@@ -129,6 +129,9 @@ struct ad7879 {
u16 cmd_crtl1;
u16 cmd_crtl2;
u16 cmd_crtl3;
+ int x;
+ int y;
+ int Rt;
};
static int ad7879_read(struct ad7879 *ts, u8 reg)
@@ -175,13 +178,32 @@ static int ad7879_report(struct ad7879 *ts)
Rt /= z1;
Rt = (Rt + 2047) >> 12;
- if (!timer_pending(&ts->timer))
+ /*
+ * Sample found inconsistent, pressure is beyond
+ * the maximum. Don't report it to user space.
+ */
+ if (Rt > ts->pressure_max)
+ return -EINVAL;
+
+ /*
+ * Note that we delay reporting events by one sample.
+ * This is done to avoid reporting last sample of the
+ * touch sequence, which may be incomplete if finger
+ * leaves the surface before last reading is taken.
+ */
+ if (timer_pending(&ts->timer)) {
+ /* Touch continues */
input_report_key(input_dev, BTN_TOUCH, 1);
+ input_report_abs(input_dev, ABS_X, ts->x);
+ input_report_abs(input_dev, ABS_Y, ts->y);
+ input_report_abs(input_dev, ABS_PRESSURE, ts->Rt);
+ input_sync(input_dev);
+ }
+
+ ts->x = x;
+ ts->y = y;
+ ts->Rt = Rt;
- input_report_abs(input_dev, ABS_X, x);
- input_report_abs(input_dev, ABS_Y, y);
- input_report_abs(input_dev, ABS_PRESSURE, Rt);
- input_sync(input_dev);
return 0;
}
diff --git a/drivers/input/touchscreen/ads7846.c b/drivers/input/touchscreen/ads7846.c
index 16031933a8f6..14ea54b78e46 100644
--- a/drivers/input/touchscreen/ads7846.c
+++ b/drivers/input/touchscreen/ads7846.c
@@ -17,9 +17,11 @@
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
+#include <linux/types.h>
#include <linux/hwmon.h>
#include <linux/init.h>
#include <linux/err.h>
+#include <linux/sched.h>
#include <linux/delay.h>
#include <linux/input.h>
#include <linux/interrupt.h>
@@ -52,22 +54,23 @@
* files.
*/
-#define TS_POLL_DELAY (1 * 1000000) /* ns delay before the first sample */
-#define TS_POLL_PERIOD (5 * 1000000) /* ns delay between samples */
+#define TS_POLL_DELAY 1 /* ms delay before the first sample */
+#define TS_POLL_PERIOD 5 /* ms delay between samples */
/* this driver doesn't aim at the peak continuous sample rate */
#define SAMPLE_BITS (8 /*cmd*/ + 16 /*sample*/ + 2 /* before, after */)
struct ts_event {
- /* For portability, we can't read 12 bit values using SPI (which
- * would make the controller deliver them as native byteorder u16
+ /*
+ * For portability, we can't read 12 bit values using SPI (which
+ * would make the controller deliver them as native byte order u16
* with msbs zeroed). Instead, we read them as two 8-bit values,
* *** WHICH NEED BYTESWAPPING *** and range adjustment.
*/
u16 x;
u16 y;
u16 z1, z2;
- int ignore;
+ bool ignore;
u8 x_buf[3];
u8 y_buf[3];
};
@@ -110,8 +113,11 @@ struct ads7846 {
struct spi_transfer xfer[18];
struct spi_message msg[5];
- struct spi_message *last_msg;
- int msg_idx;
+ int msg_count;
+ wait_queue_head_t wait;
+
+ bool pendown;
+
int read_cnt;
int read_rep;
int last_read;
@@ -122,14 +128,10 @@ struct ads7846 {
u16 penirq_recheck_delay_usecs;
- spinlock_t lock;
- struct hrtimer timer;
- unsigned pendown:1; /* P: lock */
- unsigned pending:1; /* P: lock */
-// FIXME remove "irq_disabled"
- unsigned irq_disabled:1; /* P: lock */
- unsigned disabled:1;
- unsigned is_suspended:1;
+ struct mutex lock;
+ bool stopped; /* P: lock */
+ bool disabled; /* P: lock */
+ bool suspended; /* P: lock */
int (*filter)(void *data, int data_idx, int *val);
void *filter_data;
@@ -165,7 +167,7 @@ struct ads7846 {
#define ADS_12_BIT (0 << 3)
#define ADS_SER (1 << 2) /* non-differential */
#define ADS_DFR (0 << 2) /* differential */
-#define ADS_PD10_PDOWN (0 << 0) /* lowpower mode + penirq */
+#define ADS_PD10_PDOWN (0 << 0) /* low power mode + penirq */
#define ADS_PD10_ADC_ON (1 << 0) /* ADC on */
#define ADS_PD10_REF_ON (2 << 0) /* vREF on + penirq */
#define ADS_PD10_ALL_ON (3 << 0) /* ADC + vREF on */
@@ -193,6 +195,78 @@ struct ads7846 {
#define REF_ON (READ_12BIT_DFR(x, 1, 1))
#define REF_OFF (READ_12BIT_DFR(y, 0, 0))
+/* Must be called with ts->lock held */
+static void ads7846_stop(struct ads7846 *ts)
+{
+ if (!ts->disabled && !ts->suspended) {
+ /* Signal IRQ thread to stop polling and disable the handler. */
+ ts->stopped = true;
+ mb();
+ wake_up(&ts->wait);
+ disable_irq(ts->spi->irq);
+ }
+}
+
+/* Must be called with ts->lock held */
+static void ads7846_restart(struct ads7846 *ts)
+{
+ if (!ts->disabled && !ts->suspended) {
+ /* Tell IRQ thread that it may poll the device. */
+ ts->stopped = false;
+ mb();
+ enable_irq(ts->spi->irq);
+ }
+}
+
+/* Must be called with ts->lock held */
+static void __ads7846_disable(struct ads7846 *ts)
+{
+ ads7846_stop(ts);
+ regulator_disable(ts->reg);
+
+ /*
+ * We know the chip's in low power mode since we always
+ * leave it that way after every request
+ */
+}
+
+/* Must be called with ts->lock held */
+static void __ads7846_enable(struct ads7846 *ts)
+{
+ regulator_enable(ts->reg);
+ ads7846_restart(ts);
+}
+
+static void ads7846_disable(struct ads7846 *ts)
+{
+ mutex_lock(&ts->lock);
+
+ if (!ts->disabled) {
+
+ if (!ts->suspended)
+ __ads7846_disable(ts);
+
+ ts->disabled = true;
+ }
+
+ mutex_unlock(&ts->lock);
+}
+
+static void ads7846_enable(struct ads7846 *ts)
+{
+ mutex_lock(&ts->lock);
+
+ if (ts->disabled) {
+
+ ts->disabled = false;
+
+ if (!ts->suspended)
+ __ads7846_enable(ts);
+ }
+
+ mutex_unlock(&ts->lock);
+}
+
/*--------------------------------------------------------------------------*/
/*
@@ -219,23 +293,15 @@ struct ads7845_ser_req {
struct spi_transfer xfer[2];
};
-static void ads7846_enable(struct ads7846 *ts);
-static void ads7846_disable(struct ads7846 *ts);
-
-static int device_suspended(struct device *dev)
-{
- struct ads7846 *ts = dev_get_drvdata(dev);
- return ts->is_suspended || ts->disabled;
-}
-
static int ads7846_read12_ser(struct device *dev, unsigned command)
{
- struct spi_device *spi = to_spi_device(dev);
- struct ads7846 *ts = dev_get_drvdata(dev);
- struct ser_req *req = kzalloc(sizeof *req, GFP_KERNEL);
- int status;
- int use_internal;
+ struct spi_device *spi = to_spi_device(dev);
+ struct ads7846 *ts = dev_get_drvdata(dev);
+ struct ser_req *req;
+ int status;
+ int use_internal;
+ req = kzalloc(sizeof *req, GFP_KERNEL);
if (!req)
return -ENOMEM;
@@ -282,11 +348,11 @@ static int ads7846_read12_ser(struct device *dev, unsigned command)
CS_CHANGE(req->xfer[5]);
spi_message_add_tail(&req->xfer[5], &req->msg);
- ts->irq_disabled = 1;
- disable_irq(spi->irq);
+ mutex_lock(&ts->lock);
+ ads7846_stop(ts);
status = spi_sync(spi, &req->msg);
- ts->irq_disabled = 0;
- enable_irq(spi->irq);
+ ads7846_restart(ts);
+ mutex_unlock(&ts->lock);
if (status == 0) {
/* on-wire is a must-ignore bit, a BE12 value, then padding */
@@ -301,11 +367,12 @@ static int ads7846_read12_ser(struct device *dev, unsigned command)
static int ads7845_read12_ser(struct device *dev, unsigned command)
{
- struct spi_device *spi = to_spi_device(dev);
- struct ads7846 *ts = dev_get_drvdata(dev);
- struct ads7845_ser_req *req = kzalloc(sizeof *req, GFP_KERNEL);
- int status;
+ struct spi_device *spi = to_spi_device(dev);
+ struct ads7846 *ts = dev_get_drvdata(dev);
+ struct ads7845_ser_req *req;
+ int status;
+ req = kzalloc(sizeof *req, GFP_KERNEL);
if (!req)
return -ENOMEM;
@@ -317,11 +384,11 @@ static int ads7845_read12_ser(struct device *dev, unsigned command)
req->xfer[0].len = 3;
spi_message_add_tail(&req->xfer[0], &req->msg);
- ts->irq_disabled = 1;
- disable_irq(spi->irq);
+ mutex_lock(&ts->lock);
+ ads7846_stop(ts);
status = spi_sync(spi, &req->msg);
- ts->irq_disabled = 0;
- enable_irq(spi->irq);
+ ads7846_restart(ts);
+ mutex_unlock(&ts->lock);
if (status == 0) {
/* BE12 value, then padding */
@@ -374,6 +441,7 @@ static inline unsigned vaux_adjust(struct ads7846 *ts, ssize_t v)
/* external resistors may scale vAUX into 0..vREF */
retval *= ts->vref_mv;
retval = retval >> 12;
+
return retval;
}
@@ -384,13 +452,13 @@ static inline unsigned vbatt_adjust(struct ads7846 *ts, ssize_t v)
/* ads7846 has a resistor ladder to scale this signal down */
if (ts->model == 7846)
retval *= 4;
+
return retval;
}
SHOW(in0_input, vaux, vaux_adjust)
SHOW(in1_input, vbatt, vbatt_adjust)
-
static struct attribute *ads7846_attributes[] = {
&dev_attr_temp0.attr,
&dev_attr_temp1.attr,
@@ -498,17 +566,12 @@ static inline void ads784x_hwmon_unregister(struct spi_device *spi,
}
#endif
-static int is_pen_down(struct device *dev)
-{
- struct ads7846 *ts = dev_get_drvdata(dev);
-
- return ts->pendown;
-}
-
static ssize_t ads7846_pen_down_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- return sprintf(buf, "%u\n", is_pen_down(dev));
+ struct ads7846 *ts = dev_get_drvdata(dev);
+
+ return sprintf(buf, "%u\n", ts->pendown);
}
static DEVICE_ATTR(pen_down, S_IRUGO, ads7846_pen_down_show, NULL);
@@ -516,7 +579,7 @@ static DEVICE_ATTR(pen_down, S_IRUGO, ads7846_pen_down_show, NULL);
static ssize_t ads7846_disable_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct ads7846 *ts = dev_get_drvdata(dev);
+ struct ads7846 *ts = dev_get_drvdata(dev);
return sprintf(buf, "%u\n", ts->disabled);
}
@@ -531,15 +594,11 @@ static ssize_t ads7846_disable_store(struct device *dev,
if (strict_strtoul(buf, 10, &i))
return -EINVAL;
- spin_lock_irq(&ts->lock);
-
if (i)
ads7846_disable(ts);
else
ads7846_enable(ts);
- spin_unlock_irq(&ts->lock);
-
return count;
}
@@ -569,23 +628,141 @@ static void null_wait_for_sync(void)
{
}
-/*
- * PENIRQ only kicks the timer. The timer only reissues the SPI transfer,
- * to retrieve touchscreen status.
- *
- * The SPI transfer completion callback does the real work. It reports
- * touchscreen events and reactivates the timer (or IRQ) as appropriate.
- */
+static int ads7846_debounce_filter(void *ads, int data_idx, int *val)
+{
+ struct ads7846 *ts = ads;
+
+ if (!ts->read_cnt || (abs(ts->last_read - *val) > ts->debounce_tol)) {
+ /* Start over collecting consistent readings. */
+ ts->read_rep = 0;
+ /*
+ * Repeat it, if this was the first read or the read
+ * wasn't consistent enough.
+ */
+ if (ts->read_cnt < ts->debounce_max) {
+ ts->last_read = *val;
+ ts->read_cnt++;
+ return ADS7846_FILTER_REPEAT;
+ } else {
+ /*
+ * Maximum number of debouncing reached and still
+ * not enough number of consistent readings. Abort
+ * the whole sample, repeat it in the next sampling
+ * period.
+ */
+ ts->read_cnt = 0;
+ return ADS7846_FILTER_IGNORE;
+ }
+ } else {
+ if (++ts->read_rep > ts->debounce_rep) {
+ /*
+ * Got a good reading for this coordinate,
+ * go for the next one.
+ */
+ ts->read_cnt = 0;
+ ts->read_rep = 0;
+ return ADS7846_FILTER_OK;
+ } else {
+ /* Read more values that are consistent. */
+ ts->read_cnt++;
+ return ADS7846_FILTER_REPEAT;
+ }
+ }
+}
+
+static int ads7846_no_filter(void *ads, int data_idx, int *val)
+{
+ return ADS7846_FILTER_OK;
+}
+
+static int ads7846_get_value(struct ads7846 *ts, struct spi_message *m)
+{
+ struct spi_transfer *t =
+ list_entry(m->transfers.prev, struct spi_transfer, transfer_list);
+
+ if (ts->model == 7845) {
+ return be16_to_cpup((__be16 *)&(((char*)t->rx_buf)[1])) >> 3;
+ } else {
+ /*
+ * adjust: on-wire is a must-ignore bit, a BE12 value, then
+ * padding; built from two 8 bit values written msb-first.
+ */
+ return be16_to_cpup((__be16 *)t->rx_buf) >> 3;
+ }
+}
+
+static void ads7846_update_value(struct spi_message *m, int val)
+{
+ struct spi_transfer *t =
+ list_entry(m->transfers.prev, struct spi_transfer, transfer_list);
+
+ *(u16 *)t->rx_buf = val;
+}
+
+static void ads7846_read_state(struct ads7846 *ts)
+{
+ struct ads7846_packet *packet = ts->packet;
+ struct spi_message *m;
+ int msg_idx = 0;
+ int val;
+ int action;
+ int error;
+
+ while (msg_idx < ts->msg_count) {
+
+ ts->wait_for_sync();
+
+ m = &ts->msg[msg_idx];
+ error = spi_sync(ts->spi, m);
+ if (error) {
+ dev_err(&ts->spi->dev, "spi_async --> %d\n", error);
+ packet->tc.ignore = true;
+ return;
+ }
+
+ /*
+ * Last message is power down request, no need to convert
+ * or filter the value.
+ */
+ if (msg_idx < ts->msg_count - 1) {
-static void ads7846_rx(void *ads)
+ val = ads7846_get_value(ts, m);
+
+ action = ts->filter(ts->filter_data, msg_idx, &val);
+ switch (action) {
+ case ADS7846_FILTER_REPEAT:
+ continue;
+
+ case ADS7846_FILTER_IGNORE:
+ packet->tc.ignore = true;
+ msg_idx = ts->msg_count - 1;
+ continue;
+
+ case ADS7846_FILTER_OK:
+ ads7846_update_value(m, val);
+ packet->tc.ignore = false;
+ msg_idx++;
+ break;
+
+ default:
+ BUG();
+ }
+ } else {
+ msg_idx++;
+ }
+ }
+}
+
+static void ads7846_report_state(struct ads7846 *ts)
{
- struct ads7846 *ts = ads;
- struct ads7846_packet *packet = ts->packet;
- unsigned Rt;
- u16 x, y, z1, z2;
+ struct ads7846_packet *packet = ts->packet;
+ unsigned int Rt;
+ u16 x, y, z1, z2;
- /* ads7846_rx_val() did in-place conversion (including byteswap) from
- * on-the-wire format as part of debouncing to get stable readings.
+ /*
+ * ads7846_get_value() does in-place conversion (including byte swap)
+ * from on-the-wire format as part of debouncing to get stable
+ * readings.
*/
if (ts->model == 7845) {
x = *(u16 *)packet->tc.x_buf;
@@ -623,19 +800,19 @@ static void ads7846_rx(void *ads)
Rt = 0;
}
- /* Sample found inconsistent by debouncing or pressure is beyond
+ /*
+ * Sample found inconsistent by debouncing or pressure is beyond
* the maximum. Don't report it to user space, repeat at least
* once more the measurement
*/
if (packet->tc.ignore || Rt > ts->pressure_max) {
dev_vdbg(&ts->spi->dev, "ignored %d pressure %d\n",
packet->tc.ignore, Rt);
- hrtimer_start(&ts->timer, ktime_set(0, TS_POLL_PERIOD),
- HRTIMER_MODE_REL);
return;
}
- /* Maybe check the pendown state before reporting. This discards
+ /*
+ * Maybe check the pendown state before reporting. This discards
* false readings when the pen is lifted.
*/
if (ts->penirq_recheck_delay_usecs) {
@@ -644,8 +821,9 @@ static void ads7846_rx(void *ads)
Rt = 0;
}
- /* NOTE: We can't rely on the pressure to determine the pen down
- * state, even this controller has a pressure sensor. The pressure
+ /*
+ * NOTE: We can't rely on the pressure to determine the pen down
+ * state, even this controller has a pressure sensor. The pressure
* value can fluctuate for quite a while after lifting the pen and
* in some cases may not even settle at the expected value.
*
@@ -655,15 +833,15 @@ static void ads7846_rx(void *ads)
if (Rt) {
struct input_dev *input = ts->input;
+ if (ts->swap_xy)
+ swap(x, y);
+
if (!ts->pendown) {
input_report_key(input, BTN_TOUCH, 1);
- ts->pendown = 1;
+ ts->pendown = true;
dev_vdbg(&ts->spi->dev, "DOWN\n");
}
- if (ts->swap_xy)
- swap(x, y);
-
input_report_abs(input, ABS_X, x);
input_report_abs(input, ABS_Y, y);
input_report_abs(input, ABS_PRESSURE, ts->pressure_max - Rt);
@@ -671,246 +849,94 @@ static void ads7846_rx(void *ads)
input_sync(input);
dev_vdbg(&ts->spi->dev, "%4d/%4d/%4d\n", x, y, Rt);
}
-
- hrtimer_start(&ts->timer, ktime_set(0, TS_POLL_PERIOD),
- HRTIMER_MODE_REL);
-}
-
-static int ads7846_debounce(void *ads, int data_idx, int *val)
-{
- struct ads7846 *ts = ads;
-
- if (!ts->read_cnt || (abs(ts->last_read - *val) > ts->debounce_tol)) {
- /* Start over collecting consistent readings. */
- ts->read_rep = 0;
- /* Repeat it, if this was the first read or the read
- * wasn't consistent enough. */
- if (ts->read_cnt < ts->debounce_max) {
- ts->last_read = *val;
- ts->read_cnt++;
- return ADS7846_FILTER_REPEAT;
- } else {
- /* Maximum number of debouncing reached and still
- * not enough number of consistent readings. Abort
- * the whole sample, repeat it in the next sampling
- * period.
- */
- ts->read_cnt = 0;
- return ADS7846_FILTER_IGNORE;
- }
- } else {
- if (++ts->read_rep > ts->debounce_rep) {
- /* Got a good reading for this coordinate,
- * go for the next one. */
- ts->read_cnt = 0;
- ts->read_rep = 0;
- return ADS7846_FILTER_OK;
- } else {
- /* Read more values that are consistent. */
- ts->read_cnt++;
- return ADS7846_FILTER_REPEAT;
- }
- }
}
-static int ads7846_no_filter(void *ads, int data_idx, int *val)
+static irqreturn_t ads7846_hard_irq(int irq, void *handle)
{
- return ADS7846_FILTER_OK;
-}
-
-static void ads7846_rx_val(void *ads)
-{
- struct ads7846 *ts = ads;
- struct ads7846_packet *packet = ts->packet;
- struct spi_message *m;
- struct spi_transfer *t;
- int val;
- int action;
- int status;
-
- m = &ts->msg[ts->msg_idx];
- t = list_entry(m->transfers.prev, struct spi_transfer, transfer_list);
-
- if (ts->model == 7845) {
- val = be16_to_cpup((__be16 *)&(((char*)t->rx_buf)[1])) >> 3;
- } else {
- /* adjust: on-wire is a must-ignore bit, a BE12 value, then
- * padding; built from two 8 bit values written msb-first.
- */
- val = be16_to_cpup((__be16 *)t->rx_buf) >> 3;
- }
+ struct ads7846 *ts = handle;
- action = ts->filter(ts->filter_data, ts->msg_idx, &val);
- switch (action) {
- case ADS7846_FILTER_REPEAT:
- break;
- case ADS7846_FILTER_IGNORE:
- packet->tc.ignore = 1;
- /* Last message will contain ads7846_rx() as the
- * completion function.
- */
- m = ts->last_msg;
- break;
- case ADS7846_FILTER_OK:
- *(u16 *)t->rx_buf = val;
- packet->tc.ignore = 0;
- m = &ts->msg[++ts->msg_idx];
- break;
- default:
- BUG();
- }
- ts->wait_for_sync();
- status = spi_async(ts->spi, m);
- if (status)
- dev_err(&ts->spi->dev, "spi_async --> %d\n",
- status);
+ return get_pendown_state(ts) ? IRQ_WAKE_THREAD : IRQ_HANDLED;
}
-static enum hrtimer_restart ads7846_timer(struct hrtimer *handle)
-{
- struct ads7846 *ts = container_of(handle, struct ads7846, timer);
- int status = 0;
-
- spin_lock(&ts->lock);
-
- if (unlikely(!get_pendown_state(ts) ||
- device_suspended(&ts->spi->dev))) {
- if (ts->pendown) {
- struct input_dev *input = ts->input;
-
- input_report_key(input, BTN_TOUCH, 0);
- input_report_abs(input, ABS_PRESSURE, 0);
- input_sync(input);
-
- ts->pendown = 0;
- dev_vdbg(&ts->spi->dev, "UP\n");
- }
-
- /* measurement cycle ended */
- if (!device_suspended(&ts->spi->dev)) {
- ts->irq_disabled = 0;
- enable_irq(ts->spi->irq);
- }
- ts->pending = 0;
- } else {
- /* pen is still down, continue with the measurement */
- ts->msg_idx = 0;
- ts->wait_for_sync();
- status = spi_async(ts->spi, &ts->msg[0]);
- if (status)
- dev_err(&ts->spi->dev, "spi_async --> %d\n", status);
- }
-
- spin_unlock(&ts->lock);
- return HRTIMER_NORESTART;
-}
static irqreturn_t ads7846_irq(int irq, void *handle)
{
struct ads7846 *ts = handle;
- unsigned long flags;
-
- spin_lock_irqsave(&ts->lock, flags);
- if (likely(get_pendown_state(ts))) {
- if (!ts->irq_disabled) {
- /* The ARM do_simple_IRQ() dispatcher doesn't act
- * like the other dispatchers: it will report IRQs
- * even after they've been disabled. We work around
- * that here. (The "generic irq" framework may help...)
- */
- ts->irq_disabled = 1;
- disable_irq_nosync(ts->spi->irq);
- ts->pending = 1;
- hrtimer_start(&ts->timer, ktime_set(0, TS_POLL_DELAY),
- HRTIMER_MODE_REL);
- }
- }
- spin_unlock_irqrestore(&ts->lock, flags);
- return IRQ_HANDLED;
-}
+ /* Start with a small delay before checking pendown state */
+ msleep(TS_POLL_DELAY);
-/*--------------------------------------------------------------------------*/
+ while (!ts->stopped && get_pendown_state(ts)) {
-/* Must be called with ts->lock held */
-static void ads7846_disable(struct ads7846 *ts)
-{
- if (ts->disabled)
- return;
+ /* pen is down, continue with the measurement */
+ ads7846_read_state(ts);
- ts->disabled = 1;
+ if (!ts->stopped)
+ ads7846_report_state(ts);
- /* are we waiting for IRQ, or polling? */
- if (!ts->pending) {
- ts->irq_disabled = 1;
- disable_irq(ts->spi->irq);
- } else {
- /* the timer will run at least once more, and
- * leave everything in a clean state, IRQ disabled
- */
- while (ts->pending) {
- spin_unlock_irq(&ts->lock);
- msleep(1);
- spin_lock_irq(&ts->lock);
- }
+ wait_event_timeout(ts->wait, ts->stopped,
+ msecs_to_jiffies(TS_POLL_PERIOD));
}
- regulator_disable(ts->reg);
-
- /* we know the chip's in lowpower mode since we always
- * leave it that way after every request
- */
-}
+ if (ts->pendown) {
+ struct input_dev *input = ts->input;
-/* Must be called with ts->lock held */
-static void ads7846_enable(struct ads7846 *ts)
-{
- if (!ts->disabled)
- return;
+ input_report_key(input, BTN_TOUCH, 0);
+ input_report_abs(input, ABS_PRESSURE, 0);
+ input_sync(input);
- regulator_enable(ts->reg);
+ ts->pendown = false;
+ dev_vdbg(&ts->spi->dev, "UP\n");
+ }
- ts->disabled = 0;
- ts->irq_disabled = 0;
- enable_irq(ts->spi->irq);
+ return IRQ_HANDLED;
}
static int ads7846_suspend(struct spi_device *spi, pm_message_t message)
{
struct ads7846 *ts = dev_get_drvdata(&spi->dev);
- spin_lock_irq(&ts->lock);
+ mutex_lock(&ts->lock);
- ts->is_suspended = 1;
- ads7846_disable(ts);
+ if (!ts->suspended) {
- spin_unlock_irq(&ts->lock);
+ if (!ts->disabled)
+ __ads7846_disable(ts);
- if (device_may_wakeup(&ts->spi->dev))
- enable_irq_wake(ts->spi->irq);
+ if (device_may_wakeup(&ts->spi->dev))
+ enable_irq_wake(ts->spi->irq);
- return 0;
+ ts->suspended = true;
+ }
+
+ mutex_unlock(&ts->lock);
+ return 0;
}
static int ads7846_resume(struct spi_device *spi)
{
struct ads7846 *ts = dev_get_drvdata(&spi->dev);
- if (device_may_wakeup(&ts->spi->dev))
- disable_irq_wake(ts->spi->irq);
+ mutex_lock(&ts->lock);
+
+ if (ts->suspended) {
- spin_lock_irq(&ts->lock);
+ ts->suspended = false;
- ts->is_suspended = 0;
- ads7846_enable(ts);
+ if (device_may_wakeup(&ts->spi->dev))
+ disable_irq_wake(ts->spi->irq);
- spin_unlock_irq(&ts->lock);
+ if (!ts->disabled)
+ __ads7846_enable(ts);
+ }
+
+ mutex_unlock(&ts->lock);
return 0;
}
-static int __devinit setup_pendown(struct spi_device *spi, struct ads7846 *ts)
+static int __devinit ads7846_setup_pendown(struct spi_device *spi, struct ads7846 *ts)
{
struct ads7846_platform_data *pdata = spi->dev.platform_data;
int err;
@@ -932,146 +958,40 @@ static int __devinit setup_pendown(struct spi_device *spi, struct ads7846 *ts)
err = gpio_request(pdata->gpio_pendown, "ads7846_pendown");
if (err) {
dev_err(&spi->dev, "failed to request pendown GPIO%d\n",
- pdata->gpio_pendown);
+ pdata->gpio_pendown);
return err;
}
ts->gpio_pendown = pdata->gpio_pendown;
+
return 0;
}
-static int __devinit ads7846_probe(struct spi_device *spi)
+/*
+ * Set up the transfers to read touchscreen state; this assumes we
+ * use formula #2 for pressure, not #3.
+ */
+static void __devinit ads7846_setup_spi_msg(struct ads7846 *ts,
+ const struct ads7846_platform_data *pdata)
{
- struct ads7846 *ts;
- struct ads7846_packet *packet;
- struct input_dev *input_dev;
- const struct ads7846_platform_data *pdata = spi->dev.platform_data;
- struct spi_message *m;
- struct spi_transfer *x;
- unsigned long irq_flags;
- int vref;
- int err;
-
- if (!spi->irq) {
- dev_dbg(&spi->dev, "no IRQ?\n");
- return -ENODEV;
- }
-
- if (!pdata) {
- dev_dbg(&spi->dev, "no platform data?\n");
- return -ENODEV;
- }
-
- /* don't exceed max specified sample rate */
- if (spi->max_speed_hz > (125000 * SAMPLE_BITS)) {
- dev_dbg(&spi->dev, "f(sample) %d KHz?\n",
- (spi->max_speed_hz/SAMPLE_BITS)/1000);
- return -EINVAL;
- }
-
- /* We'd set TX wordsize 8 bits and RX wordsize to 13 bits ... except
- * that even if the hardware can do that, the SPI controller driver
- * may not. So we stick to very-portable 8 bit words, both RX and TX.
- */
- spi->bits_per_word = 8;
- spi->mode = SPI_MODE_0;
- err = spi_setup(spi);
- if (err < 0)
- return err;
-
- ts = kzalloc(sizeof(struct ads7846), GFP_KERNEL);
- packet = kzalloc(sizeof(struct ads7846_packet), GFP_KERNEL);
- input_dev = input_allocate_device();
- if (!ts || !packet || !input_dev) {
- err = -ENOMEM;
- goto err_free_mem;
- }
-
- dev_set_drvdata(&spi->dev, ts);
-
- ts->packet = packet;
- ts->spi = spi;
- ts->input = input_dev;
- ts->vref_mv = pdata->vref_mv;
- ts->swap_xy = pdata->swap_xy;
-
- hrtimer_init(&ts->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
- ts->timer.function = ads7846_timer;
-
- spin_lock_init(&ts->lock);
-
- ts->model = pdata->model ? : 7846;
- ts->vref_delay_usecs = pdata->vref_delay_usecs ? : 100;
- ts->x_plate_ohms = pdata->x_plate_ohms ? : 400;
- ts->pressure_max = pdata->pressure_max ? : ~0;
-
- if (pdata->filter != NULL) {
- if (pdata->filter_init != NULL) {
- err = pdata->filter_init(pdata, &ts->filter_data);
- if (err < 0)
- goto err_free_mem;
- }
- ts->filter = pdata->filter;
- ts->filter_cleanup = pdata->filter_cleanup;
- } else if (pdata->debounce_max) {
- ts->debounce_max = pdata->debounce_max;
- if (ts->debounce_max < 2)
- ts->debounce_max = 2;
- ts->debounce_tol = pdata->debounce_tol;
- ts->debounce_rep = pdata->debounce_rep;
- ts->filter = ads7846_debounce;
- ts->filter_data = ts;
- } else
- ts->filter = ads7846_no_filter;
-
- err = setup_pendown(spi, ts);
- if (err)
- goto err_cleanup_filter;
-
- if (pdata->penirq_recheck_delay_usecs)
- ts->penirq_recheck_delay_usecs =
- pdata->penirq_recheck_delay_usecs;
-
- ts->wait_for_sync = pdata->wait_for_sync ? : null_wait_for_sync;
-
- snprintf(ts->phys, sizeof(ts->phys), "%s/input0", dev_name(&spi->dev));
- snprintf(ts->name, sizeof(ts->name), "ADS%d Touchscreen", ts->model);
-
- input_dev->name = ts->name;
- input_dev->phys = ts->phys;
- input_dev->dev.parent = &spi->dev;
-
- input_dev->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_ABS);
- input_dev->keybit[BIT_WORD(BTN_TOUCH)] = BIT_MASK(BTN_TOUCH);
- input_set_abs_params(input_dev, ABS_X,
- pdata->x_min ? : 0,
- pdata->x_max ? : MAX_12BIT,
- 0, 0);
- input_set_abs_params(input_dev, ABS_Y,
- pdata->y_min ? : 0,
- pdata->y_max ? : MAX_12BIT,
- 0, 0);
- input_set_abs_params(input_dev, ABS_PRESSURE,
- pdata->pressure_min, pdata->pressure_max, 0, 0);
-
- vref = pdata->keep_vref_on;
+ struct spi_message *m = &ts->msg[0];
+ struct spi_transfer *x = ts->xfer;
+ struct ads7846_packet *packet = ts->packet;
+ int vref = pdata->keep_vref_on;
if (ts->model == 7873) {
- /* The AD7873 is almost identical to the ADS7846
+ /*
+ * The AD7873 is almost identical to the ADS7846
* keep VREF off during differential/ratiometric
- * conversion modes
+ * conversion modes.
*/
ts->model = 7846;
vref = 0;
}
- /* set up the transfers to read touchscreen state; this assumes we
- * use formula #2 for pressure, not #3.
- */
- m = &ts->msg[0];
- x = ts->xfer;
-
+ ts->msg_count = 1;
spi_message_init(m);
+ m->context = ts;
if (ts->model == 7845) {
packet->read_y_cmd[0] = READ_Y(vref);
@@ -1094,7 +1014,8 @@ static int __devinit ads7846_probe(struct spi_device *spi)
spi_message_add_tail(x, m);
}
- /* the first sample after switching drivers can be low quality;
+ /*
+ * The first sample after switching drivers can be low quality;
* optionally discard it, using a second one after the signals
* have had enough time to stabilize.
*/
@@ -1112,11 +1033,10 @@ static int __devinit ads7846_probe(struct spi_device *spi)
spi_message_add_tail(x, m);
}
- m->complete = ads7846_rx_val;
- m->context = ts;
-
+ ts->msg_count++;
m++;
spi_message_init(m);
+ m->context = ts;
if (ts->model == 7845) {
x++;
@@ -1156,13 +1076,12 @@ static int __devinit ads7846_probe(struct spi_device *spi)
spi_message_add_tail(x, m);
}
- m->complete = ads7846_rx_val;
- m->context = ts;
-
/* turn y+ off, x- on; we'll use formula #2 */
if (ts->model == 7846) {
+ ts->msg_count++;
m++;
spi_message_init(m);
+ m->context = ts;
x++;
packet->read_z1 = READ_Z1(vref);
@@ -1190,11 +1109,10 @@ static int __devinit ads7846_probe(struct spi_device *spi)
spi_message_add_tail(x, m);
}
- m->complete = ads7846_rx_val;
- m->context = ts;
-
+ ts->msg_count++;
m++;
spi_message_init(m);
+ m->context = ts;
x++;
packet->read_z2 = READ_Z2(vref);
@@ -1221,14 +1139,13 @@ static int __devinit ads7846_probe(struct spi_device *spi)
x->len = 2;
spi_message_add_tail(x, m);
}
-
- m->complete = ads7846_rx_val;
- m->context = ts;
}
/* power down */
+ ts->msg_count++;
m++;
spi_message_init(m);
+ m->context = ts;
if (ts->model == 7845) {
x++;
@@ -1251,11 +1168,119 @@ static int __devinit ads7846_probe(struct spi_device *spi)
CS_CHANGE(*x);
spi_message_add_tail(x, m);
+}
- m->complete = ads7846_rx;
- m->context = ts;
+static int __devinit ads7846_probe(struct spi_device *spi)
+{
+ struct ads7846 *ts;
+ struct ads7846_packet *packet;
+ struct input_dev *input_dev;
+ struct ads7846_platform_data *pdata = spi->dev.platform_data;
+ unsigned long irq_flags;
+ int err;
+
+ if (!spi->irq) {
+ dev_dbg(&spi->dev, "no IRQ?\n");
+ return -ENODEV;
+ }
+
+ if (!pdata) {
+ dev_dbg(&spi->dev, "no platform data?\n");
+ return -ENODEV;
+ }
+
+ /* don't exceed max specified sample rate */
+ if (spi->max_speed_hz > (125000 * SAMPLE_BITS)) {
+ dev_dbg(&spi->dev, "f(sample) %d KHz?\n",
+ (spi->max_speed_hz/SAMPLE_BITS)/1000);
+ return -EINVAL;
+ }
+
+ /* We'd set TX word size 8 bits and RX word size to 13 bits ... except
+ * that even if the hardware can do that, the SPI controller driver
+ * may not. So we stick to very-portable 8 bit words, both RX and TX.
+ */
+ spi->bits_per_word = 8;
+ spi->mode = SPI_MODE_0;
+ err = spi_setup(spi);
+ if (err < 0)
+ return err;
- ts->last_msg = m;
+ ts = kzalloc(sizeof(struct ads7846), GFP_KERNEL);
+ packet = kzalloc(sizeof(struct ads7846_packet), GFP_KERNEL);
+ input_dev = input_allocate_device();
+ if (!ts || !packet || !input_dev) {
+ err = -ENOMEM;
+ goto err_free_mem;
+ }
+
+ dev_set_drvdata(&spi->dev, ts);
+
+ ts->packet = packet;
+ ts->spi = spi;
+ ts->input = input_dev;
+ ts->vref_mv = pdata->vref_mv;
+ ts->swap_xy = pdata->swap_xy;
+
+ mutex_init(&ts->lock);
+ init_waitqueue_head(&ts->wait);
+
+ ts->model = pdata->model ? : 7846;
+ ts->vref_delay_usecs = pdata->vref_delay_usecs ? : 100;
+ ts->x_plate_ohms = pdata->x_plate_ohms ? : 400;
+ ts->pressure_max = pdata->pressure_max ? : ~0;
+
+ if (pdata->filter != NULL) {
+ if (pdata->filter_init != NULL) {
+ err = pdata->filter_init(pdata, &ts->filter_data);
+ if (err < 0)
+ goto err_free_mem;
+ }
+ ts->filter = pdata->filter;
+ ts->filter_cleanup = pdata->filter_cleanup;
+ } else if (pdata->debounce_max) {
+ ts->debounce_max = pdata->debounce_max;
+ if (ts->debounce_max < 2)
+ ts->debounce_max = 2;
+ ts->debounce_tol = pdata->debounce_tol;
+ ts->debounce_rep = pdata->debounce_rep;
+ ts->filter = ads7846_debounce_filter;
+ ts->filter_data = ts;
+ } else {
+ ts->filter = ads7846_no_filter;
+ }
+
+ err = ads7846_setup_pendown(spi, ts);
+ if (err)
+ goto err_cleanup_filter;
+
+ if (pdata->penirq_recheck_delay_usecs)
+ ts->penirq_recheck_delay_usecs =
+ pdata->penirq_recheck_delay_usecs;
+
+ ts->wait_for_sync = pdata->wait_for_sync ? : null_wait_for_sync;
+
+ snprintf(ts->phys, sizeof(ts->phys), "%s/input0", dev_name(&spi->dev));
+ snprintf(ts->name, sizeof(ts->name), "ADS%d Touchscreen", ts->model);
+
+ input_dev->name = ts->name;
+ input_dev->phys = ts->phys;
+ input_dev->dev.parent = &spi->dev;
+
+ input_dev->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_ABS);
+ input_dev->keybit[BIT_WORD(BTN_TOUCH)] = BIT_MASK(BTN_TOUCH);
+ input_set_abs_params(input_dev, ABS_X,
+ pdata->x_min ? : 0,
+ pdata->x_max ? : MAX_12BIT,
+ 0, 0);
+ input_set_abs_params(input_dev, ABS_Y,
+ pdata->y_min ? : 0,
+ pdata->y_max ? : MAX_12BIT,
+ 0, 0);
+ input_set_abs_params(input_dev, ABS_PRESSURE,
+ pdata->pressure_min, pdata->pressure_max, 0, 0);
+
+ ads7846_setup_spi_msg(ts, pdata);
ts->reg = regulator_get(&spi->dev, "vcc");
if (IS_ERR(ts->reg)) {
@@ -1271,16 +1296,17 @@ static int __devinit ads7846_probe(struct spi_device *spi)
}
irq_flags = pdata->irq_flags ? : IRQF_TRIGGER_FALLING;
+ irq_flags |= IRQF_ONESHOT;
- err = request_irq(spi->irq, ads7846_irq, irq_flags,
- spi->dev.driver->name, ts);
-
+ err = request_threaded_irq(spi->irq, ads7846_hard_irq, ads7846_irq,
+ irq_flags, spi->dev.driver->name, ts);
if (err && !pdata->irq_flags) {
dev_info(&spi->dev,
"trying pin change workaround on irq %d\n", spi->irq);
- err = request_irq(spi->irq, ads7846_irq,
- IRQF_TRIGGER_FALLING | IRQF_TRIGGER_RISING,
- spi->dev.driver->name, ts);
+ irq_flags |= IRQF_TRIGGER_RISING;
+ err = request_threaded_irq(spi->irq,
+ ads7846_hard_irq, ads7846_irq,
+ irq_flags, spi->dev.driver->name, ts);
}
if (err) {
@@ -1294,7 +1320,8 @@ static int __devinit ads7846_probe(struct spi_device *spi)
dev_info(&spi->dev, "touchscreen, irq %d\n", spi->irq);
- /* take a first sample, leaving nPENIRQ active and vREF off; avoid
+ /*
+ * Take a first sample, leaving nPENIRQ active and vREF off; avoid
* the touchscreen, in case it's not connected.
*/
if (ts->model == 7845)
@@ -1340,20 +1367,18 @@ static int __devinit ads7846_probe(struct spi_device *spi)
static int __devexit ads7846_remove(struct spi_device *spi)
{
- struct ads7846 *ts = dev_get_drvdata(&spi->dev);
+ struct ads7846 *ts = dev_get_drvdata(&spi->dev);
device_init_wakeup(&spi->dev, false);
- ads784x_hwmon_unregister(spi, ts);
- input_unregister_device(ts->input);
-
- ads7846_suspend(spi, PMSG_SUSPEND);
-
sysfs_remove_group(&spi->dev.kobj, &ads784x_attr_group);
+ ads7846_disable(ts);
free_irq(ts->spi->irq, ts);
- /* suspend left the IRQ disabled */
- enable_irq(ts->spi->irq);
+
+ input_unregister_device(ts->input);
+
+ ads784x_hwmon_unregister(spi, ts);
regulator_disable(ts->reg);
regulator_put(ts->reg);
@@ -1368,6 +1393,7 @@ static int __devexit ads7846_remove(struct spi_device *spi)
kfree(ts);
dev_dbg(&spi->dev, "unregistered touchscreen\n");
+
return 0;
}
diff --git a/drivers/input/touchscreen/bu21013_ts.c b/drivers/input/touchscreen/bu21013_ts.c
new file mode 100644
index 000000000000..2ca9e5d66460
--- /dev/null
+++ b/drivers/input/touchscreen/bu21013_ts.c
@@ -0,0 +1,648 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ * Author: Naveen Kumar G <naveen.gaddipati@stericsson.com> for ST-Ericsson
+ * License terms:GNU General Public License (GPL) version 2
+ */
+
+#include <linux/kernel.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/i2c.h>
+#include <linux/workqueue.h>
+#include <linux/input.h>
+#include <linux/input/bu21013.h>
+#include <linux/slab.h>
+
+#define PEN_DOWN_INTR 0
+#define MAX_FINGERS 2
+#define RESET_DELAY 30
+#define PENUP_TIMEOUT (10)
+#define DELTA_MIN 16
+#define MASK_BITS 0x03
+#define SHIFT_8 8
+#define SHIFT_2 2
+#define LENGTH_OF_BUFFER 11
+#define I2C_RETRY_COUNT 5
+
+#define BU21013_SENSORS_BTN_0_7_REG 0x70
+#define BU21013_SENSORS_BTN_8_15_REG 0x71
+#define BU21013_SENSORS_BTN_16_23_REG 0x72
+#define BU21013_X1_POS_MSB_REG 0x73
+#define BU21013_X1_POS_LSB_REG 0x74
+#define BU21013_Y1_POS_MSB_REG 0x75
+#define BU21013_Y1_POS_LSB_REG 0x76
+#define BU21013_X2_POS_MSB_REG 0x77
+#define BU21013_X2_POS_LSB_REG 0x78
+#define BU21013_Y2_POS_MSB_REG 0x79
+#define BU21013_Y2_POS_LSB_REG 0x7A
+#define BU21013_INT_CLR_REG 0xE8
+#define BU21013_INT_MODE_REG 0xE9
+#define BU21013_GAIN_REG 0xEA
+#define BU21013_OFFSET_MODE_REG 0xEB
+#define BU21013_XY_EDGE_REG 0xEC
+#define BU21013_RESET_REG 0xED
+#define BU21013_CALIB_REG 0xEE
+#define BU21013_DONE_REG 0xEF
+#define BU21013_SENSOR_0_7_REG 0xF0
+#define BU21013_SENSOR_8_15_REG 0xF1
+#define BU21013_SENSOR_16_23_REG 0xF2
+#define BU21013_POS_MODE1_REG 0xF3
+#define BU21013_POS_MODE2_REG 0xF4
+#define BU21013_CLK_MODE_REG 0xF5
+#define BU21013_IDLE_REG 0xFA
+#define BU21013_FILTER_REG 0xFB
+#define BU21013_TH_ON_REG 0xFC
+#define BU21013_TH_OFF_REG 0xFD
+
+
+#define BU21013_RESET_ENABLE 0x01
+
+#define BU21013_SENSORS_EN_0_7 0x3F
+#define BU21013_SENSORS_EN_8_15 0xFC
+#define BU21013_SENSORS_EN_16_23 0x1F
+
+#define BU21013_POS_MODE1_0 0x02
+#define BU21013_POS_MODE1_1 0x04
+#define BU21013_POS_MODE1_2 0x08
+
+#define BU21013_POS_MODE2_ZERO 0x01
+#define BU21013_POS_MODE2_AVG1 0x02
+#define BU21013_POS_MODE2_AVG2 0x04
+#define BU21013_POS_MODE2_EN_XY 0x08
+#define BU21013_POS_MODE2_EN_RAW 0x10
+#define BU21013_POS_MODE2_MULTI 0x80
+
+#define BU21013_CLK_MODE_DIV 0x01
+#define BU21013_CLK_MODE_EXT 0x02
+#define BU21013_CLK_MODE_CALIB 0x80
+
+#define BU21013_IDLET_0 0x01
+#define BU21013_IDLET_1 0x02
+#define BU21013_IDLET_2 0x04
+#define BU21013_IDLET_3 0x08
+#define BU21013_IDLE_INTERMIT_EN 0x10
+
+#define BU21013_DELTA_0_6 0x7F
+#define BU21013_FILTER_EN 0x80
+
+#define BU21013_INT_MODE_LEVEL 0x00
+#define BU21013_INT_MODE_EDGE 0x01
+
+#define BU21013_GAIN_0 0x01
+#define BU21013_GAIN_1 0x02
+#define BU21013_GAIN_2 0x04
+
+#define BU21013_OFFSET_MODE_DEFAULT 0x00
+#define BU21013_OFFSET_MODE_MOVE 0x01
+#define BU21013_OFFSET_MODE_DISABLE 0x02
+
+#define BU21013_TH_ON_0 0x01
+#define BU21013_TH_ON_1 0x02
+#define BU21013_TH_ON_2 0x04
+#define BU21013_TH_ON_3 0x08
+#define BU21013_TH_ON_4 0x10
+#define BU21013_TH_ON_5 0x20
+#define BU21013_TH_ON_6 0x40
+#define BU21013_TH_ON_7 0x80
+#define BU21013_TH_ON_MAX 0xFF
+
+#define BU21013_TH_OFF_0 0x01
+#define BU21013_TH_OFF_1 0x02
+#define BU21013_TH_OFF_2 0x04
+#define BU21013_TH_OFF_3 0x08
+#define BU21013_TH_OFF_4 0x10
+#define BU21013_TH_OFF_5 0x20
+#define BU21013_TH_OFF_6 0x40
+#define BU21013_TH_OFF_7 0x80
+#define BU21013_TH_OFF_MAX 0xFF
+
+#define BU21013_X_EDGE_0 0x01
+#define BU21013_X_EDGE_1 0x02
+#define BU21013_X_EDGE_2 0x04
+#define BU21013_X_EDGE_3 0x08
+#define BU21013_Y_EDGE_0 0x10
+#define BU21013_Y_EDGE_1 0x20
+#define BU21013_Y_EDGE_2 0x40
+#define BU21013_Y_EDGE_3 0x80
+
+#define BU21013_DONE 0x01
+#define BU21013_NUMBER_OF_X_SENSORS (6)
+#define BU21013_NUMBER_OF_Y_SENSORS (11)
+
+#define DRIVER_TP "bu21013_tp"
+
+/**
+ * struct bu21013_ts_data - touch panel data structure
+ * @client: pointer to the i2c client
+ * @wait: variable to wait_queue_head_t structure
+ * @touch_stopped: touch stop flag
+ * @chip: pointer to the touch panel controller
+ * @in_dev: pointer to the input device structure
+ * @intr_pin: interrupt pin value
+ *
+ * Touch panel device data structure
+ */
+struct bu21013_ts_data {
+ struct i2c_client *client;
+ wait_queue_head_t wait;
+ bool touch_stopped;
+ const struct bu21013_platform_device *chip;
+ struct input_dev *in_dev;
+ unsigned int intr_pin;
+};
+
+/**
+ * bu21013_read_block_data(): read the touch co-ordinates
+ * @data: bu21013_ts_data structure pointer
+ * @buf: byte pointer
+ *
+ * Read the touch co-ordinates using i2c read block into buffer
+ * and returns integer.
+ */
+static int bu21013_read_block_data(struct bu21013_ts_data *data, u8 *buf)
+{
+ int ret, i;
+
+ for (i = 0; i < I2C_RETRY_COUNT; i++) {
+ ret = i2c_smbus_read_i2c_block_data
+ (data->client, BU21013_SENSORS_BTN_0_7_REG,
+ LENGTH_OF_BUFFER, buf);
+ if (ret == LENGTH_OF_BUFFER)
+ return 0;
+ }
+ return -EINVAL;
+}
+
+/**
+ * bu21013_do_touch_report(): Get the touch co-ordinates
+ * @data: bu21013_ts_data structure pointer
+ *
+ * Get the touch co-ordinates from touch sensor registers and writes
+ * into device structure and returns integer.
+ */
+static int bu21013_do_touch_report(struct bu21013_ts_data *data)
+{
+ u8 buf[LENGTH_OF_BUFFER];
+ unsigned int pos_x[2], pos_y[2];
+ bool has_x_sensors, has_y_sensors;
+ int finger_down_count = 0;
+ int i;
+
+ if (data == NULL)
+ return -EINVAL;
+
+ if (bu21013_read_block_data(data, buf) < 0)
+ return -EINVAL;
+
+ has_x_sensors = hweight32(buf[0] & BU21013_SENSORS_EN_0_7);
+ has_y_sensors = hweight32(((buf[1] & BU21013_SENSORS_EN_8_15) |
+ ((buf[2] & BU21013_SENSORS_EN_16_23) << SHIFT_8)) >> SHIFT_2);
+ if (!has_x_sensors || !has_y_sensors)
+ return 0;
+
+ for (i = 0; i < MAX_FINGERS; i++) {
+ const u8 *p = &buf[4 * i + 3];
+ unsigned int x = p[0] << SHIFT_2 | (p[1] & MASK_BITS);
+ unsigned int y = p[2] << SHIFT_2 | (p[3] & MASK_BITS);
+ if (x == 0 || y == 0)
+ continue;
+ pos_x[finger_down_count] = x;
+ pos_y[finger_down_count] = y;
+ finger_down_count++;
+ }
+
+ if (finger_down_count) {
+ if (finger_down_count == 2 &&
+ (abs(pos_x[0] - pos_x[1]) < DELTA_MIN ||
+ abs(pos_y[0] - pos_y[1]) < DELTA_MIN)) {
+ return 0;
+ }
+
+ for (i = 0; i < finger_down_count; i++) {
+ if (data->chip->x_flip)
+ pos_x[i] = data->chip->touch_x_max - pos_x[i];
+ if (data->chip->y_flip)
+ pos_y[i] = data->chip->touch_y_max - pos_y[i];
+
+ input_report_abs(data->in_dev,
+ ABS_MT_POSITION_X, pos_x[i]);
+ input_report_abs(data->in_dev,
+ ABS_MT_POSITION_Y, pos_y[i]);
+ input_mt_sync(data->in_dev);
+ }
+ } else
+ input_mt_sync(data->in_dev);
+
+ input_sync(data->in_dev);
+
+ return 0;
+}
+/**
+ * bu21013_gpio_irq() - gpio thread function for touch interrupt
+ * @irq: irq value
+ * @device_data: void pointer
+ *
+ * This gpio thread function for touch interrupt
+ * and returns irqreturn_t.
+ */
+static irqreturn_t bu21013_gpio_irq(int irq, void *device_data)
+{
+ struct bu21013_ts_data *data = device_data;
+ struct i2c_client *i2c = data->client;
+ int retval;
+
+ do {
+ retval = bu21013_do_touch_report(data);
+ if (retval < 0) {
+ dev_err(&i2c->dev, "bu21013_do_touch_report failed\n");
+ return IRQ_NONE;
+ }
+
+ data->intr_pin = data->chip->irq_read_val();
+ if (data->intr_pin == PEN_DOWN_INTR)
+ wait_event_timeout(data->wait, data->touch_stopped,
+ msecs_to_jiffies(2));
+ } while (!data->intr_pin && !data->touch_stopped);
+
+ return IRQ_HANDLED;
+}
+
+/**
+ * bu21013_init_chip() - power on sequence for the bu21013 controller
+ * @data: device structure pointer
+ *
+ * This function is used to power on
+ * the bu21013 controller and returns integer.
+ */
+static int bu21013_init_chip(struct bu21013_ts_data *data)
+{
+ int retval;
+ struct i2c_client *i2c = data->client;
+
+ retval = i2c_smbus_write_byte_data(i2c, BU21013_RESET_REG,
+ BU21013_RESET_ENABLE);
+ if (retval < 0) {
+ dev_err(&i2c->dev, "BU21013_RESET reg write failed\n");
+ return retval;
+ }
+ msleep(RESET_DELAY);
+
+ retval = i2c_smbus_write_byte_data(i2c, BU21013_SENSOR_0_7_REG,
+ BU21013_SENSORS_EN_0_7);
+ if (retval < 0) {
+ dev_err(&i2c->dev, "BU21013_SENSOR_0_7 reg write failed\n");
+ return retval;
+ }
+
+ retval = i2c_smbus_write_byte_data(i2c, BU21013_SENSOR_8_15_REG,
+ BU21013_SENSORS_EN_8_15);
+ if (retval < 0) {
+ dev_err(&i2c->dev, "BU21013_SENSOR_8_15 reg write failed\n");
+ return retval;
+ }
+
+ retval = i2c_smbus_write_byte_data(i2c, BU21013_SENSOR_16_23_REG,
+ BU21013_SENSORS_EN_16_23);
+ if (retval < 0) {
+ dev_err(&i2c->dev, "BU21013_SENSOR_16_23 reg write failed\n");
+ return retval;
+ }
+
+ retval = i2c_smbus_write_byte_data(i2c, BU21013_POS_MODE1_REG,
+ (BU21013_POS_MODE1_0 | BU21013_POS_MODE1_1));
+ if (retval < 0) {
+ dev_err(&i2c->dev, "BU21013_POS_MODE1 reg write failed\n");
+ return retval;
+ }
+
+ retval = i2c_smbus_write_byte_data(i2c, BU21013_POS_MODE2_REG,
+ (BU21013_POS_MODE2_ZERO | BU21013_POS_MODE2_AVG1 |
+ BU21013_POS_MODE2_AVG2 | BU21013_POS_MODE2_EN_RAW |
+ BU21013_POS_MODE2_MULTI));
+ if (retval < 0) {
+ dev_err(&i2c->dev, "BU21013_POS_MODE2 reg write failed\n");
+ return retval;
+ }
+
+ if (data->chip->ext_clk)
+ retval = i2c_smbus_write_byte_data(i2c, BU21013_CLK_MODE_REG,
+ (BU21013_CLK_MODE_EXT | BU21013_CLK_MODE_CALIB));
+ else
+ retval = i2c_smbus_write_byte_data(i2c, BU21013_CLK_MODE_REG,
+ (BU21013_CLK_MODE_DIV | BU21013_CLK_MODE_CALIB));
+ if (retval < 0) {
+ dev_err(&i2c->dev, "BU21013_CLK_MODE reg write failed\n");
+ return retval;
+ }
+
+ retval = i2c_smbus_write_byte_data(i2c, BU21013_IDLE_REG,
+ (BU21013_IDLET_0 | BU21013_IDLE_INTERMIT_EN));
+ if (retval < 0) {
+ dev_err(&i2c->dev, "BU21013_IDLE reg write failed\n");
+ return retval;
+ }
+
+ retval = i2c_smbus_write_byte_data(i2c, BU21013_INT_MODE_REG,
+ BU21013_INT_MODE_LEVEL);
+ if (retval < 0) {
+ dev_err(&i2c->dev, "BU21013_INT_MODE reg write failed\n");
+ return retval;
+ }
+
+ retval = i2c_smbus_write_byte_data(i2c, BU21013_FILTER_REG,
+ (BU21013_DELTA_0_6 |
+ BU21013_FILTER_EN));
+ if (retval < 0) {
+ dev_err(&i2c->dev, "BU21013_FILTER reg write failed\n");
+ return retval;
+ }
+
+ retval = i2c_smbus_write_byte_data(i2c, BU21013_TH_ON_REG,
+ BU21013_TH_ON_5);
+ if (retval < 0) {
+ dev_err(&i2c->dev, "BU21013_TH_ON reg write failed\n");
+ return retval;
+ }
+
+ retval = i2c_smbus_write_byte_data(i2c, BU21013_TH_OFF_REG,
+ BU21013_TH_OFF_4 || BU21013_TH_OFF_3);
+ if (retval < 0) {
+ dev_err(&i2c->dev, "BU21013_TH_OFF reg write failed\n");
+ return retval;
+ }
+
+ retval = i2c_smbus_write_byte_data(i2c, BU21013_GAIN_REG,
+ (BU21013_GAIN_0 | BU21013_GAIN_1));
+ if (retval < 0) {
+ dev_err(&i2c->dev, "BU21013_GAIN reg write failed\n");
+ return retval;
+ }
+
+ retval = i2c_smbus_write_byte_data(i2c, BU21013_OFFSET_MODE_REG,
+ BU21013_OFFSET_MODE_DEFAULT);
+ if (retval < 0) {
+ dev_err(&i2c->dev, "BU21013_OFFSET_MODE reg write failed\n");
+ return retval;
+ }
+
+ retval = i2c_smbus_write_byte_data(i2c, BU21013_XY_EDGE_REG,
+ (BU21013_X_EDGE_0 | BU21013_X_EDGE_2 |
+ BU21013_Y_EDGE_1 | BU21013_Y_EDGE_3));
+ if (retval < 0) {
+ dev_err(&i2c->dev, "BU21013_XY_EDGE reg write failed\n");
+ return retval;
+ }
+
+ retval = i2c_smbus_write_byte_data(i2c, BU21013_DONE_REG,
+ BU21013_DONE);
+ if (retval < 0) {
+ dev_err(&i2c->dev, "BU21013_REG_DONE reg write failed\n");
+ return retval;
+ }
+
+ return 0;
+}
+
+/**
+ * bu21013_free_irq() - frees IRQ registered for touchscreen
+ * @bu21013_data: device structure pointer
+ *
+ * This function signals interrupt thread to stop processing and
+ * frees interrupt.
+ */
+static void bu21013_free_irq(struct bu21013_ts_data *bu21013_data)
+{
+ bu21013_data->touch_stopped = true;
+ wake_up(&bu21013_data->wait);
+ free_irq(bu21013_data->chip->irq, bu21013_data);
+}
+
+/**
+ * bu21013_probe() - initializes the i2c-client touchscreen driver
+ * @client: i2c client structure pointer
+ * @id: i2c device id pointer
+ *
+ * This function used to initializes the i2c-client touchscreen
+ * driver and returns integer.
+ */
+static int __devinit bu21013_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct bu21013_ts_data *bu21013_data;
+ struct input_dev *in_dev;
+ const struct bu21013_platform_device *pdata =
+ client->dev.platform_data;
+ int error;
+
+ if (!i2c_check_functionality(client->adapter,
+ I2C_FUNC_SMBUS_BYTE_DATA)) {
+ dev_err(&client->dev, "i2c smbus byte data not supported\n");
+ return -EIO;
+ }
+
+ if (!pdata) {
+ dev_err(&client->dev, "platform data not defined\n");
+ return -EINVAL;
+ }
+
+ bu21013_data = kzalloc(sizeof(struct bu21013_ts_data), GFP_KERNEL);
+ in_dev = input_allocate_device();
+ if (!bu21013_data || !in_dev) {
+ dev_err(&client->dev, "device memory alloc failed\n");
+ error = -ENOMEM;
+ goto err_free_mem;
+ }
+
+ bu21013_data->in_dev = in_dev;
+ bu21013_data->chip = pdata;
+ bu21013_data->client = client;
+ bu21013_data->touch_stopped = false;
+ init_waitqueue_head(&bu21013_data->wait);
+
+ /* configure the gpio pins */
+ if (pdata->cs_en) {
+ error = pdata->cs_en(pdata->cs_pin);
+ if (error < 0) {
+ dev_err(&client->dev, "chip init failed\n");
+ goto err_free_mem;
+ }
+ }
+
+ /* configure the touch panel controller */
+ error = bu21013_init_chip(bu21013_data);
+ if (error) {
+ dev_err(&client->dev, "error in bu21013 config\n");
+ goto err_cs_disable;
+ }
+
+ /* register the device to input subsystem */
+ in_dev->name = DRIVER_TP;
+ in_dev->id.bustype = BUS_I2C;
+ in_dev->dev.parent = &client->dev;
+
+ __set_bit(EV_SYN, in_dev->evbit);
+ __set_bit(EV_KEY, in_dev->evbit);
+ __set_bit(EV_ABS, in_dev->evbit);
+
+ input_set_abs_params(in_dev, ABS_MT_POSITION_X, 0,
+ pdata->x_max_res, 0, 0);
+ input_set_abs_params(in_dev, ABS_MT_POSITION_Y, 0,
+ pdata->y_max_res, 0, 0);
+ input_set_drvdata(in_dev, bu21013_data);
+
+ error = request_threaded_irq(pdata->irq, NULL, bu21013_gpio_irq,
+ IRQF_TRIGGER_FALLING | IRQF_SHARED,
+ DRIVER_TP, bu21013_data);
+ if (error) {
+ dev_err(&client->dev, "request irq %d failed\n", pdata->irq);
+ goto err_cs_disable;
+ }
+
+ error = input_register_device(in_dev);
+ if (error) {
+ dev_err(&client->dev, "failed to register input device\n");
+ goto err_free_irq;
+ }
+
+ device_init_wakeup(&client->dev, pdata->wakeup);
+ i2c_set_clientdata(client, bu21013_data);
+
+ return 0;
+
+err_free_irq:
+ bu21013_free_irq(bu21013_data);
+err_cs_disable:
+ pdata->cs_dis(pdata->cs_pin);
+err_free_mem:
+ input_free_device(in_dev);
+ kfree(bu21013_data);
+
+ return error;
+}
+/**
+ * bu21013_remove() - removes the i2c-client touchscreen driver
+ * @client: i2c client structure pointer
+ *
+ * This function uses to remove the i2c-client
+ * touchscreen driver and returns integer.
+ */
+static int __devexit bu21013_remove(struct i2c_client *client)
+{
+ struct bu21013_ts_data *bu21013_data = i2c_get_clientdata(client);
+
+ bu21013_free_irq(bu21013_data);
+
+ bu21013_data->chip->cs_dis(bu21013_data->chip->cs_pin);
+
+ input_unregister_device(bu21013_data->in_dev);
+ kfree(bu21013_data);
+
+ device_init_wakeup(&client->dev, false);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM
+/**
+ * bu21013_suspend() - suspend the touch screen controller
+ * @dev: pointer to device structure
+ *
+ * This function is used to suspend the
+ * touch panel controller and returns integer
+ */
+static int bu21013_suspend(struct device *dev)
+{
+ struct bu21013_ts_data *bu21013_data = dev_get_drvdata(dev);
+ struct i2c_client *client = bu21013_data->client;
+
+ bu21013_data->touch_stopped = true;
+ if (device_may_wakeup(&client->dev))
+ enable_irq_wake(bu21013_data->chip->irq);
+ else
+ disable_irq(bu21013_data->chip->irq);
+
+ return 0;
+}
+
+/**
+ * bu21013_resume() - resume the touch screen controller
+ * @dev: pointer to device structure
+ *
+ * This function is used to resume the touch panel
+ * controller and returns integer.
+ */
+static int bu21013_resume(struct device *dev)
+{
+ struct bu21013_ts_data *bu21013_data = dev_get_drvdata(dev);
+ struct i2c_client *client = bu21013_data->client;
+ int retval;
+
+ retval = bu21013_init_chip(bu21013_data);
+ if (retval < 0) {
+ dev_err(&client->dev, "bu21013 controller config failed\n");
+ return retval;
+ }
+
+ bu21013_data->touch_stopped = false;
+
+ if (device_may_wakeup(&client->dev))
+ disable_irq_wake(bu21013_data->chip->irq);
+ else
+ enable_irq(bu21013_data->chip->irq);
+
+ return 0;
+}
+
+static const struct dev_pm_ops bu21013_dev_pm_ops = {
+ .suspend = bu21013_suspend,
+ .resume = bu21013_resume,
+};
+#endif
+
+static const struct i2c_device_id bu21013_id[] = {
+ { DRIVER_TP, 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, bu21013_id);
+
+static struct i2c_driver bu21013_driver = {
+ .driver = {
+ .name = DRIVER_TP,
+ .owner = THIS_MODULE,
+#ifdef CONFIG_PM
+ .pm = &bu21013_dev_pm_ops,
+#endif
+ },
+ .probe = bu21013_probe,
+ .remove = __devexit_p(bu21013_remove),
+ .id_table = bu21013_id,
+};
+
+/**
+ * bu21013_init() - initializes the bu21013 touchscreen driver
+ *
+ * This function used to initializes the bu21013
+ * touchscreen driver and returns integer.
+ */
+static int __init bu21013_init(void)
+{
+ return i2c_add_driver(&bu21013_driver);
+}
+
+/**
+ * bu21013_exit() - de-initializes the bu21013 touchscreen driver
+ *
+ * This function uses to de-initializes the bu21013
+ * touchscreen driver and returns none.
+ */
+static void __exit bu21013_exit(void)
+{
+ i2c_del_driver(&bu21013_driver);
+}
+
+module_init(bu21013_init);
+module_exit(bu21013_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Naveen Kumar G <naveen.gaddipati@stericsson.com>");
+MODULE_DESCRIPTION("bu21013 touch screen controller driver");
diff --git a/drivers/input/touchscreen/cy8ctmg110_ts.c b/drivers/input/touchscreen/cy8ctmg110_ts.c
index 5ec0946938fe..d0c3a7229adf 100644
--- a/drivers/input/touchscreen/cy8ctmg110_ts.c
+++ b/drivers/input/touchscreen/cy8ctmg110_ts.c
@@ -206,9 +206,9 @@ static int __devinit cy8ctmg110_probe(struct i2c_client *client,
input_dev->keybit[BIT_WORD(BTN_TOUCH)] = BIT_MASK(BTN_TOUCH);
input_set_abs_params(input_dev, ABS_X,
- CY8CTMG110_X_MIN, CY8CTMG110_X_MAX, 0, 0);
+ CY8CTMG110_X_MIN, CY8CTMG110_X_MAX, 4, 0);
input_set_abs_params(input_dev, ABS_Y,
- CY8CTMG110_Y_MIN, CY8CTMG110_Y_MAX, 0, 0);
+ CY8CTMG110_Y_MIN, CY8CTMG110_Y_MAX, 4, 0);
if (ts->reset_pin) {
err = gpio_request(ts->reset_pin, NULL);
diff --git a/drivers/input/touchscreen/hp680_ts_input.c b/drivers/input/touchscreen/hp680_ts_input.c
index a89700e7ace4..dd4e8f020b99 100644
--- a/drivers/input/touchscreen/hp680_ts_input.c
+++ b/drivers/input/touchscreen/hp680_ts_input.c
@@ -28,29 +28,29 @@ static void do_softint(struct work_struct *work)
u8 scpdr;
int touched = 0;
- if (ctrl_inb(PHDR) & PHDR_TS_PEN_DOWN) {
- scpdr = ctrl_inb(SCPDR);
+ if (__raw_readb(PHDR) & PHDR_TS_PEN_DOWN) {
+ scpdr = __raw_readb(SCPDR);
scpdr |= SCPDR_TS_SCAN_ENABLE;
scpdr &= ~SCPDR_TS_SCAN_Y;
- ctrl_outb(scpdr, SCPDR);
+ __raw_writeb(scpdr, SCPDR);
udelay(30);
absy = adc_single(ADC_CHANNEL_TS_Y);
- scpdr = ctrl_inb(SCPDR);
+ scpdr = __raw_readb(SCPDR);
scpdr |= SCPDR_TS_SCAN_Y;
scpdr &= ~SCPDR_TS_SCAN_X;
- ctrl_outb(scpdr, SCPDR);
+ __raw_writeb(scpdr, SCPDR);
udelay(30);
absx = adc_single(ADC_CHANNEL_TS_X);
- scpdr = ctrl_inb(SCPDR);
+ scpdr = __raw_readb(SCPDR);
scpdr |= SCPDR_TS_SCAN_X;
scpdr &= ~SCPDR_TS_SCAN_ENABLE;
- ctrl_outb(scpdr, SCPDR);
+ __raw_writeb(scpdr, SCPDR);
udelay(100);
- touched = ctrl_inb(PHDR) & PHDR_TS_PEN_DOWN;
+ touched = __raw_readb(PHDR) & PHDR_TS_PEN_DOWN;
}
if (touched) {
@@ -107,8 +107,7 @@ static int __init hp680_ts_init(void)
return 0;
fail2: free_irq(HP680_TS_IRQ, NULL);
- cancel_delayed_work(&work);
- flush_scheduled_work();
+ cancel_delayed_work_sync(&work);
fail1: input_free_device(hp680_ts_dev);
return err;
}
@@ -116,8 +115,7 @@ static int __init hp680_ts_init(void)
static void __exit hp680_ts_exit(void)
{
free_irq(HP680_TS_IRQ, NULL);
- cancel_delayed_work(&work);
- flush_scheduled_work();
+ cancel_delayed_work_sync(&work);
input_unregister_device(hp680_ts_dev);
}
diff --git a/drivers/input/touchscreen/intel-mid-touch.c b/drivers/input/touchscreen/intel-mid-touch.c
new file mode 100644
index 000000000000..c0307b22d86f
--- /dev/null
+++ b/drivers/input/touchscreen/intel-mid-touch.c
@@ -0,0 +1,687 @@
+/*
+ * Intel MID Resistive Touch Screen Driver
+ *
+ * Copyright (C) 2008 Intel Corp
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
+ *
+ * Questions/Comments/Bug fixes to Sreedhara (sreedhara.ds@intel.com)
+ * Ramesh Agarwal (ramesh.agarwal@intel.com)
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ * TODO:
+ * review conversion of r/m/w sequences
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/input.h>
+#include <linux/interrupt.h>
+#include <linux/err.h>
+#include <linux/param.h>
+#include <linux/slab.h>
+#include <linux/platform_device.h>
+#include <linux/irq.h>
+#include <linux/delay.h>
+#include <asm/intel_scu_ipc.h>
+
+/* PMIC Interrupt registers */
+#define PMIC_REG_ID1 0x00 /* PMIC ID1 register */
+
+/* PMIC Interrupt registers */
+#define PMIC_REG_INT 0x04 /* PMIC interrupt register */
+#define PMIC_REG_MINT 0x05 /* PMIC interrupt mask register */
+
+/* ADC Interrupt registers */
+#define PMIC_REG_ADCINT 0x5F /* ADC interrupt register */
+#define PMIC_REG_MADCINT 0x60 /* ADC interrupt mask register */
+
+/* ADC Control registers */
+#define PMIC_REG_ADCCNTL1 0x61 /* ADC control register */
+
+/* ADC Channel Selection registers */
+#define PMICADDR0 0xA4
+#define END_OF_CHANNEL 0x1F
+
+/* ADC Result register */
+#define PMIC_REG_ADCSNS0H 0x64
+
+/* ADC channels for touch screen */
+#define MRST_TS_CHAN10 0xA /* Touch screen X+ connection */
+#define MRST_TS_CHAN11 0xB /* Touch screen X- connection */
+#define MRST_TS_CHAN12 0xC /* Touch screen Y+ connection */
+#define MRST_TS_CHAN13 0xD /* Touch screen Y- connection */
+
+/* Touch screen channel BIAS constants */
+#define MRST_XBIAS 0x20
+#define MRST_YBIAS 0x40
+#define MRST_ZBIAS 0x80
+
+/* Touch screen coordinates */
+#define MRST_X_MIN 10
+#define MRST_X_MAX 1024
+#define MRST_X_FUZZ 5
+#define MRST_Y_MIN 10
+#define MRST_Y_MAX 1024
+#define MRST_Y_FUZZ 5
+#define MRST_PRESSURE_MIN 0
+#define MRST_PRESSURE_NOMINAL 50
+#define MRST_PRESSURE_MAX 100
+
+#define WAIT_ADC_COMPLETION 10 /* msec */
+
+/* PMIC ADC round robin delays */
+#define ADC_LOOP_DELAY0 0x0 /* Continuous loop */
+#define ADC_LOOP_DELAY1 0x1 /* 4.5 ms approximate */
+
+/* PMIC Vendor Identifiers */
+#define PMIC_VENDOR_FS 0 /* PMIC vendor FreeScale */
+#define PMIC_VENDOR_MAXIM 1 /* PMIC vendor MAXIM */
+#define PMIC_VENDOR_NEC 2 /* PMIC vendor NEC */
+#define MRSTOUCH_MAX_CHANNELS 32 /* Maximum ADC channels */
+
+/* Touch screen device structure */
+struct mrstouch_dev {
+ struct device *dev; /* device associated with touch screen */
+ struct input_dev *input;
+ char phys[32];
+ u16 asr; /* Address selection register */
+ int irq;
+ unsigned int vendor; /* PMIC vendor */
+ unsigned int rev; /* PMIC revision */
+
+ int (*read_prepare)(struct mrstouch_dev *tsdev);
+ int (*read)(struct mrstouch_dev *tsdev, u16 *x, u16 *y, u16 *z);
+ int (*read_finish)(struct mrstouch_dev *tsdev);
+};
+
+
+/*************************** NEC and Maxim Interface ************************/
+
+static int mrstouch_nec_adc_read_prepare(struct mrstouch_dev *tsdev)
+{
+ return intel_scu_ipc_update_register(PMIC_REG_MADCINT, 0, 0x20);
+}
+
+/*
+ * Enables PENDET interrupt.
+ */
+static int mrstouch_nec_adc_read_finish(struct mrstouch_dev *tsdev)
+{
+ int err;
+
+ err = intel_scu_ipc_update_register(PMIC_REG_MADCINT, 0x20, 0x20);
+ if (!err)
+ err = intel_scu_ipc_update_register(PMIC_REG_ADCCNTL1, 0, 0x05);
+
+ return err;
+}
+
+/*
+ * Reads PMIC ADC touch screen result
+ * Reads ADC storage registers for higher 7 and lower 3 bits and
+ * converts the two readings into a single value and turns off gain bit
+ */
+static int mrstouch_ts_chan_read(u16 offset, u16 chan, u16 *vp, u16 *vm)
+{
+ int err;
+ u16 result;
+ u32 res;
+
+ result = PMIC_REG_ADCSNS0H + offset;
+
+ if (chan == MRST_TS_CHAN12)
+ result += 4;
+
+ err = intel_scu_ipc_ioread32(result, &res);
+ if (err)
+ return err;
+
+ /* Mash the bits up */
+
+ *vp = (res & 0xFF) << 3; /* Highest 7 bits */
+ *vp |= (res >> 8) & 0x07; /* Lower 3 bits */
+ *vp &= 0x3FF;
+
+ res >>= 16;
+
+ *vm = (res & 0xFF) << 3; /* Highest 7 bits */
+ *vm |= (res >> 8) & 0x07; /* Lower 3 bits */
+ *vm &= 0x3FF;
+
+ return 0;
+}
+
+/*
+ * Enables X, Y and Z bias values
+ * Enables YPYM for X channels and XPXM for Y channels
+ */
+static int mrstouch_ts_bias_set(uint offset, uint bias)
+{
+ int count;
+ u16 chan, start;
+ u16 reg[4];
+ u8 data[4];
+
+ chan = PMICADDR0 + offset;
+ start = MRST_TS_CHAN10;
+
+ for (count = 0; count <= 3; count++) {
+ reg[count] = chan++;
+ data[count] = bias | (start + count);
+ }
+
+ return intel_scu_ipc_writev(reg, data, 4);
+}
+
+/* To read touch screen channel values */
+static int mrstouch_nec_adc_read(struct mrstouch_dev *tsdev,
+ u16 *x, u16 *y, u16 *z)
+{
+ int err;
+ u16 xm, ym, zm;
+
+ /* configure Y bias for X channels */
+ err = mrstouch_ts_bias_set(tsdev->asr, MRST_YBIAS);
+ if (err)
+ goto ipc_error;
+
+ msleep(WAIT_ADC_COMPLETION);
+
+ /* read x+ and x- channels */
+ err = mrstouch_ts_chan_read(tsdev->asr, MRST_TS_CHAN10, x, &xm);
+ if (err)
+ goto ipc_error;
+
+ /* configure x bias for y channels */
+ err = mrstouch_ts_bias_set(tsdev->asr, MRST_XBIAS);
+ if (err)
+ goto ipc_error;
+
+ msleep(WAIT_ADC_COMPLETION);
+
+ /* read y+ and y- channels */
+ err = mrstouch_ts_chan_read(tsdev->asr, MRST_TS_CHAN12, y, &ym);
+ if (err)
+ goto ipc_error;
+
+ /* configure z bias for x and y channels */
+ err = mrstouch_ts_bias_set(tsdev->asr, MRST_ZBIAS);
+ if (err)
+ goto ipc_error;
+
+ msleep(WAIT_ADC_COMPLETION);
+
+ /* read z+ and z- channels */
+ err = mrstouch_ts_chan_read(tsdev->asr, MRST_TS_CHAN10, z, &zm);
+ if (err)
+ goto ipc_error;
+
+ return 0;
+
+ipc_error:
+ dev_err(tsdev->dev, "ipc error during adc read\n");
+ return err;
+}
+
+
+/*************************** Freescale Interface ************************/
+
+static int mrstouch_fs_adc_read_prepare(struct mrstouch_dev *tsdev)
+{
+ int err, count;
+ u16 chan;
+ u16 reg[5];
+ u8 data[5];
+
+ /* Stop the ADC */
+ err = intel_scu_ipc_update_register(PMIC_REG_MADCINT, 0x00, 0x02);
+ if (err)
+ goto ipc_error;
+
+ chan = PMICADDR0 + tsdev->asr;
+
+ /* Set X BIAS */
+ for (count = 0; count <= 3; count++) {
+ reg[count] = chan++;
+ data[count] = 0x2A;
+ }
+ reg[count] = chan++; /* Dummy */
+ data[count] = 0;
+
+ err = intel_scu_ipc_writev(reg, data, 5);
+ if (err)
+ goto ipc_error;
+
+ msleep(WAIT_ADC_COMPLETION);
+
+ /* Set Y BIAS */
+ for (count = 0; count <= 3; count++) {
+ reg[count] = chan++;
+ data[count] = 0x4A;
+ }
+ reg[count] = chan++; /* Dummy */
+ data[count] = 0;
+
+ err = intel_scu_ipc_writev(reg, data, 5);
+ if (err)
+ goto ipc_error;
+
+ msleep(WAIT_ADC_COMPLETION);
+
+ /* Set Z BIAS */
+ err = intel_scu_ipc_iowrite32(chan + 2, 0x8A8A8A8A);
+ if (err)
+ goto ipc_error;
+
+ msleep(WAIT_ADC_COMPLETION);
+
+ return 0;
+
+ipc_error:
+ dev_err(tsdev->dev, "ipc error during %s\n", __func__);
+ return err;
+}
+
+static int mrstouch_fs_adc_read(struct mrstouch_dev *tsdev,
+ u16 *x, u16 *y, u16 *z)
+{
+ int err;
+ u16 result;
+ u16 reg[4];
+ u8 data[4];
+
+ result = PMIC_REG_ADCSNS0H + tsdev->asr;
+
+ reg[0] = result + 4;
+ reg[1] = result + 5;
+ reg[2] = result + 16;
+ reg[3] = result + 17;
+
+ err = intel_scu_ipc_readv(reg, data, 4);
+ if (err)
+ goto ipc_error;
+
+ *x = data[0] << 3; /* Higher 7 bits */
+ *x |= data[1] & 0x7; /* Lower 3 bits */
+ *x &= 0x3FF;
+
+ *y = data[2] << 3; /* Higher 7 bits */
+ *y |= data[3] & 0x7; /* Lower 3 bits */
+ *y &= 0x3FF;
+
+ /* Read Z value */
+ reg[0] = result + 28;
+ reg[1] = result + 29;
+
+ err = intel_scu_ipc_readv(reg, data, 4);
+ if (err)
+ goto ipc_error;
+
+ *z = data[0] << 3; /* Higher 7 bits */
+ *z |= data[1] & 0x7; /* Lower 3 bits */
+ *z &= 0x3FF;
+
+ return 0;
+
+ipc_error:
+ dev_err(tsdev->dev, "ipc error during %s\n", __func__);
+ return err;
+}
+
+static int mrstouch_fs_adc_read_finish(struct mrstouch_dev *tsdev)
+{
+ int err, count;
+ u16 chan;
+ u16 reg[5];
+ u8 data[5];
+
+ /* Clear all TS channels */
+ chan = PMICADDR0 + tsdev->asr;
+ for (count = 0; count <= 4; count++) {
+ reg[count] = chan++;
+ data[count] = 0;
+ }
+ err = intel_scu_ipc_writev(reg, data, 5);
+ if (err)
+ goto ipc_error;
+
+ for (count = 0; count <= 4; count++) {
+ reg[count] = chan++;
+ data[count] = 0;
+ }
+ err = intel_scu_ipc_writev(reg, data, 5);
+ if (err)
+ goto ipc_error;
+
+ err = intel_scu_ipc_iowrite32(chan + 2, 0x00000000);
+ if (err)
+ goto ipc_error;
+
+ /* Start ADC */
+ err = intel_scu_ipc_update_register(PMIC_REG_MADCINT, 0x02, 0x02);
+ if (err)
+ goto ipc_error;
+
+ return 0;
+
+ipc_error:
+ dev_err(tsdev->dev, "ipc error during %s\n", __func__);
+ return err;
+}
+
+static void mrstouch_report_event(struct input_dev *input,
+ unsigned int x, unsigned int y, unsigned int z)
+{
+ if (z > MRST_PRESSURE_NOMINAL) {
+ /* Pen touched, report button touch and coordinates */
+ input_report_key(input, BTN_TOUCH, 1);
+ input_report_abs(input, ABS_X, x);
+ input_report_abs(input, ABS_Y, y);
+ } else {
+ input_report_key(input, BTN_TOUCH, 0);
+ }
+
+ input_report_abs(input, ABS_PRESSURE, z);
+ input_sync(input);
+}
+
+/* PENDET interrupt handler */
+static irqreturn_t mrstouch_pendet_irq(int irq, void *dev_id)
+{
+ struct mrstouch_dev *tsdev = dev_id;
+ u16 x, y, z;
+
+ /*
+ * Should we lower thread priority? Probably not, since we are
+ * not spinning but sleeping...
+ */
+
+ if (tsdev->read_prepare(tsdev))
+ goto out;
+
+ do {
+ if (tsdev->read(tsdev, &x, &y, &z))
+ break;
+
+ mrstouch_report_event(tsdev->input, x, y, z);
+ } while (z > MRST_PRESSURE_NOMINAL);
+
+ tsdev->read_finish(tsdev);
+
+out:
+ return IRQ_HANDLED;
+}
+
+/* Utility to read PMIC ID */
+static int __devinit mrstouch_read_pmic_id(uint *vendor, uint *rev)
+{
+ int err;
+ u8 r;
+
+ err = intel_scu_ipc_ioread8(PMIC_REG_ID1, &r);
+ if (err)
+ return err;
+
+ *vendor = r & 0x7;
+ *rev = (r >> 3) & 0x7;
+
+ return 0;
+}
+
+/*
+ * Parse ADC channels to find end of the channel configured by other ADC user
+ * NEC and MAXIM requires 4 channels and FreeScale needs 18 channels
+ */
+static int __devinit mrstouch_chan_parse(struct mrstouch_dev *tsdev)
+{
+ int err, i, found;
+ u8 r8;
+
+ found = -1;
+
+ for (i = 0; i < MRSTOUCH_MAX_CHANNELS; i++) {
+ if (found >= 0)
+ break;
+
+ err = intel_scu_ipc_ioread8(PMICADDR0 + i, &r8);
+ if (err)
+ return err;
+
+ if (r8 == END_OF_CHANNEL) {
+ found = i;
+ break;
+ }
+ }
+ if (found < 0)
+ return 0;
+
+ if (tsdev->vendor == PMIC_VENDOR_FS) {
+ if (found && found > (MRSTOUCH_MAX_CHANNELS - 18))
+ return -ENOSPC;
+ } else {
+ if (found && found > (MRSTOUCH_MAX_CHANNELS - 4))
+ return -ENOSPC;
+ }
+ return found;
+}
+
+
+/*
+ * Writes touch screen channels to ADC address selection registers
+ */
+static int __devinit mrstouch_ts_chan_set(uint offset)
+{
+ u16 chan;
+
+ int ret, count;
+
+ chan = PMICADDR0 + offset;
+ for (count = 0; count <= 3; count++) {
+ ret = intel_scu_ipc_iowrite8(chan++, MRST_TS_CHAN10 + count);
+ if (ret)
+ return ret;
+ }
+ return intel_scu_ipc_iowrite8(chan++, END_OF_CHANNEL);
+}
+
+/* Initialize ADC */
+static int __devinit mrstouch_adc_init(struct mrstouch_dev *tsdev)
+{
+ int err, start;
+ u8 ra, rm;
+
+ err = mrstouch_read_pmic_id(&tsdev->vendor, &tsdev->rev);
+ if (err) {
+ dev_err(tsdev->dev, "Unable to read PMIC id\n");
+ return err;
+ }
+
+ switch (tsdev->vendor) {
+ case PMIC_VENDOR_NEC:
+ case PMIC_VENDOR_MAXIM:
+ tsdev->read_prepare = mrstouch_nec_adc_read_prepare;
+ tsdev->read = mrstouch_nec_adc_read;
+ tsdev->read_finish = mrstouch_nec_adc_read_finish;
+ break;
+
+ case PMIC_VENDOR_FS:
+ tsdev->read_prepare = mrstouch_fs_adc_read_prepare;
+ tsdev->read = mrstouch_fs_adc_read;
+ tsdev->read_finish = mrstouch_fs_adc_read_finish;
+ break;
+
+ default:
+ dev_err(tsdev->dev,
+ "Unsupported touchscreen: %d\n", tsdev->vendor);
+ return -ENXIO;
+ }
+
+ start = mrstouch_chan_parse(tsdev);
+ if (start < 0) {
+ dev_err(tsdev->dev, "Unable to parse channels\n");
+ return start;
+ }
+
+ tsdev->asr = start;
+
+ /*
+ * ADC power on, start, enable PENDET and set loop delay
+ * ADC loop delay is set to 4.5 ms approximately
+ * Loop delay more than this results in jitter in adc readings
+ * Setting loop delay to 0 (continous loop) in MAXIM stops PENDET
+ * interrupt generation sometimes.
+ */
+
+ if (tsdev->vendor == PMIC_VENDOR_FS) {
+ ra = 0xE0 | ADC_LOOP_DELAY0;
+ rm = 0x5;
+ } else {
+ /* NEC and MAXIm not consistent with loop delay 0 */
+ ra = 0xE0 | ADC_LOOP_DELAY1;
+ rm = 0x0;
+
+ /* configure touch screen channels */
+ err = mrstouch_ts_chan_set(tsdev->asr);
+ if (err)
+ return err;
+ }
+
+ err = intel_scu_ipc_update_register(PMIC_REG_ADCCNTL1, ra, 0xE7);
+ if (err)
+ return err;
+
+ err = intel_scu_ipc_update_register(PMIC_REG_MADCINT, rm, 0x03);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+
+/* Probe function for touch screen driver */
+static int __devinit mrstouch_probe(struct platform_device *pdev)
+{
+ struct mrstouch_dev *tsdev;
+ struct input_dev *input;
+ int err;
+ int irq;
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0) {
+ dev_err(&pdev->dev, "no interrupt assigned\n");
+ return -EINVAL;
+ }
+
+ tsdev = kzalloc(sizeof(struct mrstouch_dev), GFP_KERNEL);
+ input = input_allocate_device();
+ if (!tsdev || !input) {
+ dev_err(&pdev->dev, "unable to allocate memory\n");
+ err = -ENOMEM;
+ goto err_free_mem;
+ }
+
+ tsdev->dev = &pdev->dev;
+ tsdev->input = input;
+ tsdev->irq = irq;
+
+ snprintf(tsdev->phys, sizeof(tsdev->phys),
+ "%s/input0", dev_name(tsdev->dev));
+
+ err = mrstouch_adc_init(tsdev);
+ if (err) {
+ dev_err(&pdev->dev, "ADC initialization failed\n");
+ goto err_free_mem;
+ }
+
+ input->name = "mrst_touchscreen";
+ input->phys = tsdev->phys;
+ input->dev.parent = tsdev->dev;
+
+ input->id.vendor = tsdev->vendor;
+ input->id.version = tsdev->rev;
+
+ input->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_ABS);
+ input->keybit[BIT_WORD(BTN_TOUCH)] = BIT_MASK(BTN_TOUCH);
+
+ input_set_abs_params(tsdev->input, ABS_X,
+ MRST_X_MIN, MRST_X_MAX, MRST_X_FUZZ, 0);
+ input_set_abs_params(tsdev->input, ABS_Y,
+ MRST_Y_MIN, MRST_Y_MAX, MRST_Y_FUZZ, 0);
+ input_set_abs_params(tsdev->input, ABS_PRESSURE,
+ MRST_PRESSURE_MIN, MRST_PRESSURE_MAX, 0, 0);
+
+ err = request_threaded_irq(tsdev->irq, NULL, mrstouch_pendet_irq,
+ 0, "mrstouch", tsdev);
+ if (err) {
+ dev_err(tsdev->dev, "unable to allocate irq\n");
+ goto err_free_mem;
+ }
+
+ err = input_register_device(tsdev->input);
+ if (err) {
+ dev_err(tsdev->dev, "unable to register input device\n");
+ goto err_free_irq;
+ }
+
+ platform_set_drvdata(pdev, tsdev);
+ return 0;
+
+err_free_irq:
+ free_irq(tsdev->irq, tsdev);
+err_free_mem:
+ input_free_device(input);
+ kfree(tsdev);
+ return err;
+}
+
+static int __devexit mrstouch_remove(struct platform_device *pdev)
+{
+ struct mrstouch_dev *tsdev = platform_get_drvdata(pdev);
+
+ free_irq(tsdev->irq, tsdev);
+ input_unregister_device(tsdev->input);
+ kfree(tsdev);
+
+ platform_set_drvdata(pdev, NULL);
+
+ return 0;
+}
+
+static struct platform_driver mrstouch_driver = {
+ .driver = {
+ .name = "pmic_touch",
+ .owner = THIS_MODULE,
+ },
+ .probe = mrstouch_probe,
+ .remove = __devexit_p(mrstouch_remove),
+};
+
+static int __init mrstouch_init(void)
+{
+ return platform_driver_register(&mrstouch_driver);
+}
+module_init(mrstouch_init);
+
+static void __exit mrstouch_exit(void)
+{
+ platform_driver_unregister(&mrstouch_driver);
+}
+module_exit(mrstouch_exit);
+
+MODULE_AUTHOR("Sreedhara Murthy. D.S, sreedhara.ds@intel.com");
+MODULE_DESCRIPTION("Intel Moorestown Resistive Touch Screen Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/input/touchscreen/lpc32xx_ts.c b/drivers/input/touchscreen/lpc32xx_ts.c
new file mode 100644
index 000000000000..dcf803f5a1f7
--- /dev/null
+++ b/drivers/input/touchscreen/lpc32xx_ts.c
@@ -0,0 +1,411 @@
+/*
+ * LPC32xx built-in touchscreen driver
+ *
+ * Copyright (C) 2010 NXP Semiconductors
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/platform_device.h>
+#include <linux/init.h>
+#include <linux/input.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/slab.h>
+
+/*
+ * Touchscreen controller register offsets
+ */
+#define LPC32XX_TSC_STAT 0x00
+#define LPC32XX_TSC_SEL 0x04
+#define LPC32XX_TSC_CON 0x08
+#define LPC32XX_TSC_FIFO 0x0C
+#define LPC32XX_TSC_DTR 0x10
+#define LPC32XX_TSC_RTR 0x14
+#define LPC32XX_TSC_UTR 0x18
+#define LPC32XX_TSC_TTR 0x1C
+#define LPC32XX_TSC_DXP 0x20
+#define LPC32XX_TSC_MIN_X 0x24
+#define LPC32XX_TSC_MAX_X 0x28
+#define LPC32XX_TSC_MIN_Y 0x2C
+#define LPC32XX_TSC_MAX_Y 0x30
+#define LPC32XX_TSC_AUX_UTR 0x34
+#define LPC32XX_TSC_AUX_MIN 0x38
+#define LPC32XX_TSC_AUX_MAX 0x3C
+
+#define LPC32XX_TSC_STAT_FIFO_OVRRN (1 << 8)
+#define LPC32XX_TSC_STAT_FIFO_EMPTY (1 << 7)
+
+#define LPC32XX_TSC_SEL_DEFVAL 0x0284
+
+#define LPC32XX_TSC_ADCCON_IRQ_TO_FIFO_4 (0x1 << 11)
+#define LPC32XX_TSC_ADCCON_X_SAMPLE_SIZE(s) ((10 - (s)) << 7)
+#define LPC32XX_TSC_ADCCON_Y_SAMPLE_SIZE(s) ((10 - (s)) << 4)
+#define LPC32XX_TSC_ADCCON_POWER_UP (1 << 2)
+#define LPC32XX_TSC_ADCCON_AUTO_EN (1 << 0)
+
+#define LPC32XX_TSC_FIFO_TS_P_LEVEL (1 << 31)
+#define LPC32XX_TSC_FIFO_NORMALIZE_X_VAL(x) (((x) & 0x03FF0000) >> 16)
+#define LPC32XX_TSC_FIFO_NORMALIZE_Y_VAL(y) ((y) & 0x000003FF)
+
+#define LPC32XX_TSC_ADCDAT_VALUE_MASK 0x000003FF
+
+#define LPC32XX_TSC_MIN_XY_VAL 0x0
+#define LPC32XX_TSC_MAX_XY_VAL 0x3FF
+
+#define MOD_NAME "ts-lpc32xx"
+
+#define tsc_readl(dev, reg) \
+ __raw_readl((dev)->tsc_base + (reg))
+#define tsc_writel(dev, reg, val) \
+ __raw_writel((val), (dev)->tsc_base + (reg))
+
+struct lpc32xx_tsc {
+ struct input_dev *dev;
+ void __iomem *tsc_base;
+ int irq;
+ struct clk *clk;
+};
+
+static void lpc32xx_fifo_clear(struct lpc32xx_tsc *tsc)
+{
+ while (!(tsc_readl(tsc, LPC32XX_TSC_STAT) &
+ LPC32XX_TSC_STAT_FIFO_EMPTY))
+ tsc_readl(tsc, LPC32XX_TSC_FIFO);
+}
+
+static irqreturn_t lpc32xx_ts_interrupt(int irq, void *dev_id)
+{
+ u32 tmp, rv[4], xs[4], ys[4];
+ int idx;
+ struct lpc32xx_tsc *tsc = dev_id;
+ struct input_dev *input = tsc->dev;
+
+ tmp = tsc_readl(tsc, LPC32XX_TSC_STAT);
+
+ if (tmp & LPC32XX_TSC_STAT_FIFO_OVRRN) {
+ /* FIFO overflow - throw away samples */
+ lpc32xx_fifo_clear(tsc);
+ return IRQ_HANDLED;
+ }
+
+ /*
+ * Gather and normalize 4 samples. Pen-up events may have less
+ * than 4 samples, but its ok to pop 4 and let the last sample
+ * pen status check drop the samples.
+ */
+ idx = 0;
+ while (idx < 4 &&
+ !(tsc_readl(tsc, LPC32XX_TSC_STAT) &
+ LPC32XX_TSC_STAT_FIFO_EMPTY)) {
+ tmp = tsc_readl(tsc, LPC32XX_TSC_FIFO);
+ xs[idx] = LPC32XX_TSC_ADCDAT_VALUE_MASK -
+ LPC32XX_TSC_FIFO_NORMALIZE_X_VAL(tmp);
+ ys[idx] = LPC32XX_TSC_ADCDAT_VALUE_MASK -
+ LPC32XX_TSC_FIFO_NORMALIZE_Y_VAL(tmp);
+ rv[idx] = tmp;
+ idx++;
+ }
+
+ /* Data is only valid if pen is still down in last sample */
+ if (!(rv[3] & LPC32XX_TSC_FIFO_TS_P_LEVEL) && idx == 4) {
+ /* Use average of 2nd and 3rd sample for position */
+ input_report_abs(input, ABS_X, (xs[1] + xs[2]) / 2);
+ input_report_abs(input, ABS_Y, (ys[1] + ys[2]) / 2);
+ input_report_key(input, BTN_TOUCH, 1);
+ } else {
+ input_report_key(input, BTN_TOUCH, 0);
+ }
+
+ input_sync(input);
+
+ return IRQ_HANDLED;
+}
+
+static void lpc32xx_stop_tsc(struct lpc32xx_tsc *tsc)
+{
+ /* Disable auto mode */
+ tsc_writel(tsc, LPC32XX_TSC_CON,
+ tsc_readl(tsc, LPC32XX_TSC_CON) &
+ ~LPC32XX_TSC_ADCCON_AUTO_EN);
+
+ clk_disable(tsc->clk);
+}
+
+static void lpc32xx_setup_tsc(struct lpc32xx_tsc *tsc)
+{
+ u32 tmp;
+
+ clk_enable(tsc->clk);
+
+ tmp = tsc_readl(tsc, LPC32XX_TSC_CON) & ~LPC32XX_TSC_ADCCON_POWER_UP;
+
+ /* Set the TSC FIFO depth to 4 samples @ 10-bits per sample (max) */
+ tmp = LPC32XX_TSC_ADCCON_IRQ_TO_FIFO_4 |
+ LPC32XX_TSC_ADCCON_X_SAMPLE_SIZE(10) |
+ LPC32XX_TSC_ADCCON_Y_SAMPLE_SIZE(10);
+ tsc_writel(tsc, LPC32XX_TSC_CON, tmp);
+
+ /* These values are all preset */
+ tsc_writel(tsc, LPC32XX_TSC_SEL, LPC32XX_TSC_SEL_DEFVAL);
+ tsc_writel(tsc, LPC32XX_TSC_MIN_X, LPC32XX_TSC_MIN_XY_VAL);
+ tsc_writel(tsc, LPC32XX_TSC_MAX_X, LPC32XX_TSC_MAX_XY_VAL);
+ tsc_writel(tsc, LPC32XX_TSC_MIN_Y, LPC32XX_TSC_MIN_XY_VAL);
+ tsc_writel(tsc, LPC32XX_TSC_MAX_Y, LPC32XX_TSC_MAX_XY_VAL);
+
+ /* Aux support is not used */
+ tsc_writel(tsc, LPC32XX_TSC_AUX_UTR, 0);
+ tsc_writel(tsc, LPC32XX_TSC_AUX_MIN, 0);
+ tsc_writel(tsc, LPC32XX_TSC_AUX_MAX, 0);
+
+ /*
+ * Set sample rate to about 240Hz per X/Y pair. A single measurement
+ * consists of 4 pairs which gives about a 60Hz sample rate based on
+ * a stable 32768Hz clock source. Values are in clocks.
+ * Rate is (32768 / (RTR + XCONV + RTR + YCONV + DXP + TTR + UTR) / 4
+ */
+ tsc_writel(tsc, LPC32XX_TSC_RTR, 0x2);
+ tsc_writel(tsc, LPC32XX_TSC_DTR, 0x2);
+ tsc_writel(tsc, LPC32XX_TSC_TTR, 0x10);
+ tsc_writel(tsc, LPC32XX_TSC_DXP, 0x4);
+ tsc_writel(tsc, LPC32XX_TSC_UTR, 88);
+
+ lpc32xx_fifo_clear(tsc);
+
+ /* Enable automatic ts event capture */
+ tsc_writel(tsc, LPC32XX_TSC_CON, tmp | LPC32XX_TSC_ADCCON_AUTO_EN);
+}
+
+static int lpc32xx_ts_open(struct input_dev *dev)
+{
+ struct lpc32xx_tsc *tsc = input_get_drvdata(dev);
+
+ lpc32xx_setup_tsc(tsc);
+
+ return 0;
+}
+
+static void lpc32xx_ts_close(struct input_dev *dev)
+{
+ struct lpc32xx_tsc *tsc = input_get_drvdata(dev);
+
+ lpc32xx_stop_tsc(tsc);
+}
+
+static int __devinit lpc32xx_ts_probe(struct platform_device *pdev)
+{
+ struct lpc32xx_tsc *tsc;
+ struct input_dev *input;
+ struct resource *res;
+ resource_size_t size;
+ int irq;
+ int error;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ dev_err(&pdev->dev, "Can't get memory resource\n");
+ return -ENOENT;
+ }
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0) {
+ dev_err(&pdev->dev, "Can't get interrupt resource\n");
+ return irq;
+ }
+
+ tsc = kzalloc(sizeof(*tsc), GFP_KERNEL);
+ input = input_allocate_device();
+ if (!tsc || !input) {
+ dev_err(&pdev->dev, "failed allocating memory\n");
+ error = -ENOMEM;
+ goto err_free_mem;
+ }
+
+ tsc->dev = input;
+ tsc->irq = irq;
+
+ size = resource_size(res);
+
+ if (!request_mem_region(res->start, size, pdev->name)) {
+ dev_err(&pdev->dev, "TSC registers are not free\n");
+ error = -EBUSY;
+ goto err_free_mem;
+ }
+
+ tsc->tsc_base = ioremap(res->start, size);
+ if (!tsc->tsc_base) {
+ dev_err(&pdev->dev, "Can't map memory\n");
+ error = -ENOMEM;
+ goto err_release_mem;
+ }
+
+ tsc->clk = clk_get(&pdev->dev, NULL);
+ if (IS_ERR(tsc->clk)) {
+ dev_err(&pdev->dev, "failed getting clock\n");
+ error = PTR_ERR(tsc->clk);
+ goto err_unmap;
+ }
+
+ input->name = MOD_NAME;
+ input->phys = "lpc32xx/input0";
+ input->id.bustype = BUS_HOST;
+ input->id.vendor = 0x0001;
+ input->id.product = 0x0002;
+ input->id.version = 0x0100;
+ input->dev.parent = &pdev->dev;
+ input->open = lpc32xx_ts_open;
+ input->close = lpc32xx_ts_close;
+
+ input->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_ABS);
+ input->keybit[BIT_WORD(BTN_TOUCH)] = BIT_MASK(BTN_TOUCH);
+ input_set_abs_params(input, ABS_X, LPC32XX_TSC_MIN_XY_VAL,
+ LPC32XX_TSC_MAX_XY_VAL, 0, 0);
+ input_set_abs_params(input, ABS_Y, LPC32XX_TSC_MIN_XY_VAL,
+ LPC32XX_TSC_MAX_XY_VAL, 0, 0);
+
+ input_set_drvdata(input, tsc);
+
+ error = request_irq(tsc->irq, lpc32xx_ts_interrupt,
+ IRQF_DISABLED, pdev->name, tsc);
+ if (error) {
+ dev_err(&pdev->dev, "failed requesting interrupt\n");
+ goto err_put_clock;
+ }
+
+ error = input_register_device(input);
+ if (error) {
+ dev_err(&pdev->dev, "failed registering input device\n");
+ goto err_free_irq;
+ }
+
+ platform_set_drvdata(pdev, tsc);
+ device_init_wakeup(&pdev->dev, 1);
+
+ return 0;
+
+err_free_irq:
+ free_irq(tsc->irq, tsc);
+err_put_clock:
+ clk_put(tsc->clk);
+err_unmap:
+ iounmap(tsc->tsc_base);
+err_release_mem:
+ release_mem_region(res->start, size);
+err_free_mem:
+ input_free_device(input);
+ kfree(tsc);
+
+ return error;
+}
+
+static int __devexit lpc32xx_ts_remove(struct platform_device *pdev)
+{
+ struct lpc32xx_tsc *tsc = platform_get_drvdata(pdev);
+ struct resource *res;
+
+ device_init_wakeup(&pdev->dev, 0);
+ free_irq(tsc->irq, tsc);
+
+ input_unregister_device(tsc->dev);
+
+ clk_put(tsc->clk);
+
+ iounmap(tsc->tsc_base);
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ release_mem_region(res->start, resource_size(res));
+
+ kfree(tsc);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int lpc32xx_ts_suspend(struct device *dev)
+{
+ struct lpc32xx_tsc *tsc = dev_get_drvdata(dev);
+ struct input_dev *input = tsc->dev;
+
+ /*
+ * Suspend and resume can be called when the device hasn't been
+ * enabled. If there are no users that have the device open, then
+ * avoid calling the TSC stop and start functions as the TSC
+ * isn't yet clocked.
+ */
+ mutex_lock(&input->mutex);
+
+ if (input->users) {
+ if (device_may_wakeup(dev))
+ enable_irq_wake(tsc->irq);
+ else
+ lpc32xx_stop_tsc(tsc);
+ }
+
+ mutex_unlock(&input->mutex);
+
+ return 0;
+}
+
+static int lpc32xx_ts_resume(struct device *dev)
+{
+ struct lpc32xx_tsc *tsc = dev_get_drvdata(dev);
+ struct input_dev *input = tsc->dev;
+
+ mutex_lock(&input->mutex);
+
+ if (input->users) {
+ if (device_may_wakeup(dev))
+ disable_irq_wake(tsc->irq);
+ else
+ lpc32xx_setup_tsc(tsc);
+ }
+
+ mutex_unlock(&input->mutex);
+
+ return 0;
+}
+
+static const struct dev_pm_ops lpc32xx_ts_pm_ops = {
+ .suspend = lpc32xx_ts_suspend,
+ .resume = lpc32xx_ts_resume,
+};
+#define LPC32XX_TS_PM_OPS (&lpc32xx_ts_pm_ops)
+#else
+#define LPC32XX_TS_PM_OPS NULL
+#endif
+
+static struct platform_driver lpc32xx_ts_driver = {
+ .probe = lpc32xx_ts_probe,
+ .remove = __devexit_p(lpc32xx_ts_remove),
+ .driver = {
+ .name = MOD_NAME,
+ .owner = THIS_MODULE,
+ .pm = LPC32XX_TS_PM_OPS,
+ },
+};
+
+static int __init lpc32xx_ts_init(void)
+{
+ return platform_driver_register(&lpc32xx_ts_driver);
+}
+module_init(lpc32xx_ts_init);
+
+static void __exit lpc32xx_ts_exit(void)
+{
+ platform_driver_unregister(&lpc32xx_ts_driver);
+}
+module_exit(lpc32xx_ts_exit);
+
+MODULE_AUTHOR("Kevin Wells <kevin.wells@nxp.com");
+MODULE_DESCRIPTION("LPC32XX TSC Driver");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:lpc32xx_ts");
diff --git a/drivers/input/touchscreen/s3c2410_ts.c b/drivers/input/touchscreen/s3c2410_ts.c
index 6085d12fd561..8feb7f3c8be1 100644
--- a/drivers/input/touchscreen/s3c2410_ts.c
+++ b/drivers/input/touchscreen/s3c2410_ts.c
@@ -350,7 +350,7 @@ static int __devinit s3c2410ts_probe(struct platform_device *pdev)
err_tcirq:
free_irq(ts.irq_tc, ts.input);
err_inputdev:
- input_unregister_device(ts.input);
+ input_free_device(ts.input);
err_iomap:
iounmap(ts.io);
err_clk:
diff --git a/drivers/input/touchscreen/stmpe-ts.c b/drivers/input/touchscreen/stmpe-ts.c
index 656148ec0027..ae88e13c99ff 100644
--- a/drivers/input/touchscreen/stmpe-ts.c
+++ b/drivers/input/touchscreen/stmpe-ts.c
@@ -268,7 +268,7 @@ static int __devinit stmpe_input_probe(struct platform_device *pdev)
struct stmpe_touch *ts;
struct input_dev *idev;
struct stmpe_ts_platform_data *ts_pdata = NULL;
- int ret = 0;
+ int ret;
int ts_irq;
ts_irq = platform_get_irq_byname(pdev, "FIFO_TH");
@@ -276,12 +276,16 @@ static int __devinit stmpe_input_probe(struct platform_device *pdev)
return ts_irq;
ts = kzalloc(sizeof(*ts), GFP_KERNEL);
- if (!ts)
+ if (!ts) {
+ ret = -ENOMEM;
goto err_out;
+ }
idev = input_allocate_device();
- if (!idev)
+ if (!idev) {
+ ret = -ENOMEM;
goto err_free_ts;
+ }
platform_set_drvdata(pdev, ts);
ts->stmpe = stmpe;
@@ -361,7 +365,6 @@ static int __devexit stmpe_ts_remove(struct platform_device *pdev)
platform_set_drvdata(pdev, NULL);
input_unregister_device(ts->idev);
- input_free_device(ts->idev);
kfree(ts);
diff --git a/drivers/input/touchscreen/tnetv107x-ts.c b/drivers/input/touchscreen/tnetv107x-ts.c
new file mode 100644
index 000000000000..cf1dba2e267c
--- /dev/null
+++ b/drivers/input/touchscreen/tnetv107x-ts.c
@@ -0,0 +1,396 @@
+/*
+ * Texas Instruments TNETV107X Touchscreen Driver
+ *
+ * Copyright (C) 2010 Texas Instruments
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation version 2.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/input.h>
+#include <linux/platform_device.h>
+#include <linux/interrupt.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include <linux/ctype.h>
+#include <linux/io.h>
+#include <linux/clk.h>
+
+#include <mach/tnetv107x.h>
+
+#define TSC_PENUP_POLL (HZ / 5)
+#define IDLE_TIMEOUT 100 /* msec */
+
+/*
+ * The first and last samples of a touch interval are usually garbage and need
+ * to be filtered out with these devices. The following definitions control
+ * the number of samples skipped.
+ */
+#define TSC_HEAD_SKIP 1
+#define TSC_TAIL_SKIP 1
+#define TSC_SKIP (TSC_HEAD_SKIP + TSC_TAIL_SKIP + 1)
+#define TSC_SAMPLES (TSC_SKIP + 1)
+
+/* Register Offsets */
+struct tsc_regs {
+ u32 rev;
+ u32 tscm;
+ u32 bwcm;
+ u32 swc;
+ u32 adcchnl;
+ u32 adcdata;
+ u32 chval[4];
+};
+
+/* TSC Mode Configuration Register (tscm) bits */
+#define WMODE BIT(0)
+#define TSKIND BIT(1)
+#define ZMEASURE_EN BIT(2)
+#define IDLE BIT(3)
+#define TSC_EN BIT(4)
+#define STOP BIT(5)
+#define ONE_SHOT BIT(6)
+#define SINGLE BIT(7)
+#define AVG BIT(8)
+#define AVGNUM(x) (((x) & 0x03) << 9)
+#define PVSTC(x) (((x) & 0x07) << 11)
+#define PON BIT(14)
+#define PONBG BIT(15)
+#define AFERST BIT(16)
+
+/* ADC DATA Capture Register bits */
+#define DATA_VALID BIT(16)
+
+/* Register Access Macros */
+#define tsc_read(ts, reg) __raw_readl(&(ts)->regs->reg)
+#define tsc_write(ts, reg, val) __raw_writel(val, &(ts)->regs->reg);
+#define tsc_set_bits(ts, reg, val) \
+ tsc_write(ts, reg, tsc_read(ts, reg) | (val))
+#define tsc_clr_bits(ts, reg, val) \
+ tsc_write(ts, reg, tsc_read(ts, reg) & ~(val))
+
+struct sample {
+ int x, y, p;
+};
+
+struct tsc_data {
+ struct input_dev *input_dev;
+ struct resource *res;
+ struct tsc_regs __iomem *regs;
+ struct timer_list timer;
+ spinlock_t lock;
+ struct clk *clk;
+ struct device *dev;
+ int sample_count;
+ struct sample samples[TSC_SAMPLES];
+ int tsc_irq;
+};
+
+static int tsc_read_sample(struct tsc_data *ts, struct sample* sample)
+{
+ int x, y, z1, z2, t, p = 0;
+ u32 val;
+
+ val = tsc_read(ts, chval[0]);
+ if (val & DATA_VALID)
+ x = val & 0xffff;
+ else
+ return -EINVAL;
+
+ y = tsc_read(ts, chval[1]) & 0xffff;
+ z1 = tsc_read(ts, chval[2]) & 0xffff;
+ z2 = tsc_read(ts, chval[3]) & 0xffff;
+
+ if (z1) {
+ t = ((600 * x) * (z2 - z1));
+ p = t / (u32) (z1 << 12);
+ if (p < 0)
+ p = 0;
+ }
+
+ sample->x = x;
+ sample->y = y;
+ sample->p = p;
+
+ return 0;
+}
+
+static void tsc_poll(unsigned long data)
+{
+ struct tsc_data *ts = (struct tsc_data *)data;
+ unsigned long flags;
+ int i, val, x, y, p;
+
+ spin_lock_irqsave(&ts->lock, flags);
+
+ if (ts->sample_count >= TSC_SKIP) {
+ input_report_abs(ts->input_dev, ABS_PRESSURE, 0);
+ input_report_key(ts->input_dev, BTN_TOUCH, 0);
+ input_sync(ts->input_dev);
+ } else if (ts->sample_count > 0) {
+ /*
+ * A touch event lasted less than our skip count. Salvage and
+ * report anyway.
+ */
+ for (i = 0, val = 0; i < ts->sample_count; i++)
+ val += ts->samples[i].x;
+ x = val / ts->sample_count;
+
+ for (i = 0, val = 0; i < ts->sample_count; i++)
+ val += ts->samples[i].y;
+ y = val / ts->sample_count;
+
+ for (i = 0, val = 0; i < ts->sample_count; i++)
+ val += ts->samples[i].p;
+ p = val / ts->sample_count;
+
+ input_report_abs(ts->input_dev, ABS_X, x);
+ input_report_abs(ts->input_dev, ABS_Y, y);
+ input_report_abs(ts->input_dev, ABS_PRESSURE, p);
+ input_report_key(ts->input_dev, BTN_TOUCH, 1);
+ input_sync(ts->input_dev);
+ }
+
+ ts->sample_count = 0;
+
+ spin_unlock_irqrestore(&ts->lock, flags);
+}
+
+static irqreturn_t tsc_irq(int irq, void *dev_id)
+{
+ struct tsc_data *ts = (struct tsc_data *)dev_id;
+ struct sample *sample;
+ int index;
+
+ spin_lock(&ts->lock);
+
+ index = ts->sample_count % TSC_SAMPLES;
+ sample = &ts->samples[index];
+ if (tsc_read_sample(ts, sample) < 0)
+ goto out;
+
+ if (++ts->sample_count >= TSC_SKIP) {
+ index = (ts->sample_count - TSC_TAIL_SKIP - 1) % TSC_SAMPLES;
+ sample = &ts->samples[index];
+
+ input_report_abs(ts->input_dev, ABS_X, sample->x);
+ input_report_abs(ts->input_dev, ABS_Y, sample->y);
+ input_report_abs(ts->input_dev, ABS_PRESSURE, sample->p);
+ if (ts->sample_count == TSC_SKIP)
+ input_report_key(ts->input_dev, BTN_TOUCH, 1);
+ input_sync(ts->input_dev);
+ }
+ mod_timer(&ts->timer, jiffies + TSC_PENUP_POLL);
+out:
+ spin_unlock(&ts->lock);
+ return IRQ_HANDLED;
+}
+
+static int tsc_start(struct input_dev *dev)
+{
+ struct tsc_data *ts = input_get_drvdata(dev);
+ unsigned long timeout = jiffies + msecs_to_jiffies(IDLE_TIMEOUT);
+ u32 val;
+
+ clk_enable(ts->clk);
+
+ /* Go to idle mode, before any initialization */
+ while (time_after(timeout, jiffies)) {
+ if (tsc_read(ts, tscm) & IDLE)
+ break;
+ }
+
+ if (time_before(timeout, jiffies)) {
+ dev_warn(ts->dev, "timeout waiting for idle\n");
+ clk_disable(ts->clk);
+ return -EIO;
+ }
+
+ /* Configure TSC Control register*/
+ val = (PONBG | PON | PVSTC(4) | ONE_SHOT | ZMEASURE_EN);
+ tsc_write(ts, tscm, val);
+
+ /* Bring TSC out of reset: Clear AFE reset bit */
+ val &= ~(AFERST);
+ tsc_write(ts, tscm, val);
+
+ /* Configure all pins for hardware control*/
+ tsc_write(ts, bwcm, 0);
+
+ /* Finally enable the TSC */
+ tsc_set_bits(ts, tscm, TSC_EN);
+
+ return 0;
+}
+
+static void tsc_stop(struct input_dev *dev)
+{
+ struct tsc_data *ts = input_get_drvdata(dev);
+
+ tsc_clr_bits(ts, tscm, TSC_EN);
+ synchronize_irq(ts->tsc_irq);
+ del_timer_sync(&ts->timer);
+ clk_disable(ts->clk);
+}
+
+static int __devinit tsc_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct tsc_data *ts;
+ int error = 0;
+ u32 rev = 0;
+
+ ts = kzalloc(sizeof(struct tsc_data), GFP_KERNEL);
+ if (!ts) {
+ dev_err(dev, "cannot allocate device info\n");
+ return -ENOMEM;
+ }
+
+ ts->dev = dev;
+ spin_lock_init(&ts->lock);
+ setup_timer(&ts->timer, tsc_poll, (unsigned long)ts);
+ platform_set_drvdata(pdev, ts);
+
+ ts->tsc_irq = platform_get_irq(pdev, 0);
+ if (ts->tsc_irq < 0) {
+ dev_err(dev, "cannot determine device interrupt\n");
+ error = -ENODEV;
+ goto error_res;
+ }
+
+ ts->res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!ts->res) {
+ dev_err(dev, "cannot determine register area\n");
+ error = -ENODEV;
+ goto error_res;
+ }
+
+ if (!request_mem_region(ts->res->start, resource_size(ts->res),
+ pdev->name)) {
+ dev_err(dev, "cannot claim register memory\n");
+ ts->res = NULL;
+ error = -EINVAL;
+ goto error_res;
+ }
+
+ ts->regs = ioremap(ts->res->start, resource_size(ts->res));
+ if (!ts->regs) {
+ dev_err(dev, "cannot map register memory\n");
+ error = -ENOMEM;
+ goto error_map;
+ }
+
+ ts->clk = clk_get(dev, NULL);
+ if (!ts->clk) {
+ dev_err(dev, "cannot claim device clock\n");
+ error = -EINVAL;
+ goto error_clk;
+ }
+
+ error = request_threaded_irq(ts->tsc_irq, NULL, tsc_irq, 0,
+ dev_name(dev), ts);
+ if (error < 0) {
+ dev_err(ts->dev, "Could not allocate ts irq\n");
+ goto error_irq;
+ }
+
+ ts->input_dev = input_allocate_device();
+ if (!ts->input_dev) {
+ dev_err(dev, "cannot allocate input device\n");
+ error = -ENOMEM;
+ goto error_input;
+ }
+ input_set_drvdata(ts->input_dev, ts);
+
+ ts->input_dev->name = pdev->name;
+ ts->input_dev->id.bustype = BUS_HOST;
+ ts->input_dev->dev.parent = &pdev->dev;
+ ts->input_dev->open = tsc_start;
+ ts->input_dev->close = tsc_stop;
+
+ clk_enable(ts->clk);
+ rev = tsc_read(ts, rev);
+ ts->input_dev->id.product = ((rev >> 8) & 0x07);
+ ts->input_dev->id.version = ((rev >> 16) & 0xfff);
+ clk_disable(ts->clk);
+
+ __set_bit(EV_KEY, ts->input_dev->evbit);
+ __set_bit(EV_ABS, ts->input_dev->evbit);
+ __set_bit(BTN_TOUCH, ts->input_dev->keybit);
+
+ input_set_abs_params(ts->input_dev, ABS_X, 0, 0xffff, 5, 0);
+ input_set_abs_params(ts->input_dev, ABS_Y, 0, 0xffff, 5, 0);
+ input_set_abs_params(ts->input_dev, ABS_PRESSURE, 0, 4095, 128, 0);
+
+ error = input_register_device(ts->input_dev);
+ if (error < 0) {
+ dev_err(dev, "failed input device registration\n");
+ goto error_reg;
+ }
+
+ return 0;
+
+error_reg:
+ input_free_device(ts->input_dev);
+error_input:
+ free_irq(ts->tsc_irq, ts);
+error_irq:
+ clk_put(ts->clk);
+error_clk:
+ iounmap(ts->regs);
+error_map:
+ release_mem_region(ts->res->start, resource_size(ts->res));
+error_res:
+ platform_set_drvdata(pdev, NULL);
+ kfree(ts);
+
+ return error;
+}
+
+static int __devexit tsc_remove(struct platform_device *pdev)
+{
+ struct tsc_data *ts = platform_get_drvdata(pdev);
+
+ input_unregister_device(ts->input_dev);
+ free_irq(ts->tsc_irq, ts);
+ clk_put(ts->clk);
+ iounmap(ts->regs);
+ release_mem_region(ts->res->start, resource_size(ts->res));
+ platform_set_drvdata(pdev, NULL);
+ kfree(ts);
+
+ return 0;
+}
+
+static struct platform_driver tsc_driver = {
+ .probe = tsc_probe,
+ .remove = __devexit_p(tsc_remove),
+ .driver.name = "tnetv107x-ts",
+ .driver.owner = THIS_MODULE,
+};
+
+static int __init tsc_init(void)
+{
+ return platform_driver_register(&tsc_driver);
+}
+
+static void __exit tsc_exit(void)
+{
+ platform_driver_unregister(&tsc_driver);
+}
+
+module_init(tsc_init);
+module_exit(tsc_exit);
+
+MODULE_AUTHOR("Cyril Chemparathy");
+MODULE_DESCRIPTION("TNETV107X Touchscreen Driver");
+MODULE_ALIAS("platform: tnetv107x-ts");
+MODULE_LICENSE("GPL");
diff --git a/drivers/input/touchscreen/tps6507x-ts.c b/drivers/input/touchscreen/tps6507x-ts.c
index a644d18c04dc..c8c136cf7bbc 100644
--- a/drivers/input/touchscreen/tps6507x-ts.c
+++ b/drivers/input/touchscreen/tps6507x-ts.c
@@ -335,6 +335,7 @@ static int tps6507x_ts_probe(struct platform_device *pdev)
dev_err(tsc->dev, "schedule failed");
goto err2;
}
+ platform_set_drvdata(pdev, tps6507x_dev);
return 0;
@@ -358,7 +359,7 @@ static int __devexit tps6507x_ts_remove(struct platform_device *pdev)
cancel_delayed_work_sync(&tsc->work);
destroy_workqueue(tsc->wq);
- input_free_device(input_dev);
+ input_unregister_device(input_dev);
tps6507x_dev->ts = NULL;
kfree(tsc);
diff --git a/drivers/input/touchscreen/tsc2007.c b/drivers/input/touchscreen/tsc2007.c
index be23780e8a3e..80467f262331 100644
--- a/drivers/input/touchscreen/tsc2007.c
+++ b/drivers/input/touchscreen/tsc2007.c
@@ -265,7 +265,7 @@ static int __devinit tsc2007_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
struct tsc2007 *ts;
- struct tsc2007_platform_data *pdata = pdata = client->dev.platform_data;
+ struct tsc2007_platform_data *pdata = client->dev.platform_data;
struct input_dev *input_dev;
int err;
diff --git a/drivers/input/touchscreen/usbtouchscreen.c b/drivers/input/touchscreen/usbtouchscreen.c
index f45f80f6d336..73fd6642b681 100644
--- a/drivers/input/touchscreen/usbtouchscreen.c
+++ b/drivers/input/touchscreen/usbtouchscreen.c
@@ -178,6 +178,7 @@ static const struct usb_device_id usbtouch_devices[] = {
#ifdef CONFIG_TOUCHSCREEN_USB_ITM
{USB_DEVICE(0x0403, 0xf9e9), .driver_info = DEVTYPE_ITM},
+ {USB_DEVICE(0x16e3, 0xf9e9), .driver_info = DEVTYPE_ITM},
#endif
#ifdef CONFIG_TOUCHSCREEN_USB_ETURBO
diff --git a/drivers/input/touchscreen/wacom_w8001.c b/drivers/input/touchscreen/wacom_w8001.c
index 56dc35c94bb1..9ae4c7b16ba7 100644
--- a/drivers/input/touchscreen/wacom_w8001.c
+++ b/drivers/input/touchscreen/wacom_w8001.c
@@ -2,6 +2,7 @@
* Wacom W8001 penabled serial touchscreen driver
*
* Copyright (c) 2008 Jaya Kumar
+ * Copyright (c) 2010 Red Hat, Inc.
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file COPYING in the main directory of this archive for
@@ -30,11 +31,24 @@ MODULE_LICENSE("GPL");
#define W8001_LEAD_BYTE 0x80
#define W8001_TAB_MASK 0x40
#define W8001_TAB_BYTE 0x40
+/* set in first byte of touch data packets */
+#define W8001_TOUCH_MASK (0x10 | W8001_LEAD_MASK)
+#define W8001_TOUCH_BYTE (0x10 | W8001_LEAD_BYTE)
#define W8001_QUERY_PACKET 0x20
#define W8001_CMD_START '1'
#define W8001_CMD_QUERY '*'
+#define W8001_CMD_TOUCHQUERY '%'
+
+/* length of data packets in bytes, depends on device. */
+#define W8001_PKTLEN_TOUCH93 5
+#define W8001_PKTLEN_TOUCH9A 7
+#define W8001_PKTLEN_TPCPEN 9
+#define W8001_PKTLEN_TPCCTL 11 /* control packet */
+#define W8001_PKTLEN_TOUCH2FG 13
+
+#define MAX_TRACKING_ID 0xFF /* arbitrarily chosen */
struct w8001_coord {
u8 rdy;
@@ -48,6 +62,15 @@ struct w8001_coord {
u8 tilt_y;
};
+/* touch query reply packet */
+struct w8001_touch_query {
+ u8 panel_res;
+ u8 capacity_res;
+ u8 sensor_id;
+ u16 x;
+ u16 y;
+};
+
/*
* Per-touchscreen data.
*/
@@ -62,6 +85,9 @@ struct w8001 {
unsigned char response[W8001_MAX_LENGTH];
unsigned char data[W8001_MAX_LENGTH];
char phys[32];
+ int type;
+ unsigned int pktlen;
+ int trkid[2];
};
static void parse_data(u8 *data, struct w8001_coord *coord)
@@ -88,11 +114,98 @@ static void parse_data(u8 *data, struct w8001_coord *coord)
coord->tilt_y = data[8] & 0x7F;
}
+static void parse_touch(struct w8001 *w8001)
+{
+ static int trkid;
+ struct input_dev *dev = w8001->dev;
+ unsigned char *data = w8001->data;
+ int i;
+
+ for (i = 0; i < 2; i++) {
+ input_mt_slot(dev, i);
+
+ if (data[0] & (1 << i)) {
+ int x = (data[6 * i + 1] << 7) | (data[6 * i + 2]);
+ int y = (data[6 * i + 3] << 7) | (data[6 * i + 4]);
+ /* data[5,6] and [11,12] is finger capacity */
+
+ input_report_abs(dev, ABS_MT_POSITION_X, x);
+ input_report_abs(dev, ABS_MT_POSITION_Y, y);
+ input_report_abs(dev, ABS_MT_TOOL_TYPE, MT_TOOL_FINGER);
+ if (w8001->trkid[i] < 0)
+ w8001->trkid[i] = trkid++ & MAX_TRACKING_ID;
+ } else {
+ w8001->trkid[i] = -1;
+ }
+ input_report_abs(dev, ABS_MT_TRACKING_ID, w8001->trkid[i]);
+ }
+
+ input_sync(dev);
+}
+
+static void parse_touchquery(u8 *data, struct w8001_touch_query *query)
+{
+ memset(query, 0, sizeof(*query));
+
+ query->panel_res = data[1];
+ query->sensor_id = data[2] & 0x7;
+ query->capacity_res = data[7];
+
+ query->x = data[3] << 9;
+ query->x |= data[4] << 2;
+ query->x |= (data[2] >> 5) & 0x3;
+
+ query->y = data[5] << 9;
+ query->y |= data[6] << 2;
+ query->y |= (data[2] >> 3) & 0x3;
+}
+
+static void report_pen_events(struct w8001 *w8001, struct w8001_coord *coord)
+{
+ struct input_dev *dev = w8001->dev;
+
+ /*
+ * We have 1 bit for proximity (rdy) and 3 bits for tip, side,
+ * side2/eraser. If rdy && f2 are set, this can be either pen + side2,
+ * or eraser. assume
+ * - if dev is already in proximity and f2 is toggled → pen + side2
+ * - if dev comes into proximity with f2 set → eraser
+ * If f2 disappears after assuming eraser, fake proximity out for
+ * eraser and in for pen.
+ */
+
+ if (!w8001->type) {
+ w8001->type = coord->f2 ? BTN_TOOL_RUBBER : BTN_TOOL_PEN;
+ } else if (w8001->type == BTN_TOOL_RUBBER) {
+ if (!coord->f2) {
+ input_report_abs(dev, ABS_PRESSURE, 0);
+ input_report_key(dev, BTN_TOUCH, 0);
+ input_report_key(dev, BTN_STYLUS, 0);
+ input_report_key(dev, BTN_STYLUS2, 0);
+ input_report_key(dev, BTN_TOOL_RUBBER, 0);
+ input_sync(dev);
+ w8001->type = BTN_TOOL_PEN;
+ }
+ } else {
+ input_report_key(dev, BTN_STYLUS2, coord->f2);
+ }
+
+ input_report_abs(dev, ABS_X, coord->x);
+ input_report_abs(dev, ABS_Y, coord->y);
+ input_report_abs(dev, ABS_PRESSURE, coord->pen_pressure);
+ input_report_key(dev, BTN_TOUCH, coord->tsw);
+ input_report_key(dev, BTN_STYLUS, coord->f1);
+ input_report_key(dev, w8001->type, coord->rdy);
+ input_sync(dev);
+
+ if (!coord->rdy)
+ w8001->type = 0;
+}
+
static irqreturn_t w8001_interrupt(struct serio *serio,
unsigned char data, unsigned int flags)
{
struct w8001 *w8001 = serio_get_drvdata(serio);
- struct input_dev *dev = w8001->dev;
struct w8001_coord coord;
unsigned char tmp;
@@ -105,26 +218,45 @@ static irqreturn_t w8001_interrupt(struct serio *serio,
}
break;
- case 8:
+ case W8001_PKTLEN_TOUCH93 - 1:
+ case W8001_PKTLEN_TOUCH9A - 1:
+ /* ignore one-finger touch packet. */
+ if (w8001->pktlen == w8001->idx)
+ w8001->idx = 0;
+ break;
+
+ /* Pen coordinates packet */
+ case W8001_PKTLEN_TPCPEN - 1:
tmp = w8001->data[0] & W8001_TAB_MASK;
if (unlikely(tmp == W8001_TAB_BYTE))
break;
+ tmp = (w8001->data[0] & W8001_TOUCH_BYTE);
+ if (tmp == W8001_TOUCH_BYTE)
+ break;
+
w8001->idx = 0;
parse_data(w8001->data, &coord);
- input_report_abs(dev, ABS_X, coord.x);
- input_report_abs(dev, ABS_Y, coord.y);
- input_report_abs(dev, ABS_PRESSURE, coord.pen_pressure);
- input_report_key(dev, BTN_TOUCH, coord.tsw);
- input_sync(dev);
+ report_pen_events(w8001, &coord);
break;
- case 10:
+ /* control packet */
+ case W8001_PKTLEN_TPCCTL - 1:
+ tmp = (w8001->data[0] & W8001_TOUCH_MASK);
+ if (tmp == W8001_TOUCH_BYTE)
+ break;
+
w8001->idx = 0;
memcpy(w8001->response, w8001->data, W8001_MAX_LENGTH);
w8001->response_type = W8001_QUERY_PACKET;
complete(&w8001->cmd_done);
break;
+
+ /* 2 finger touch packet */
+ case W8001_PKTLEN_TOUCH2FG - 1:
+ w8001->idx = 0;
+ parse_touch(w8001);
+ break;
}
return IRQ_HANDLED;
@@ -167,6 +299,38 @@ static int w8001_setup(struct w8001 *w8001)
input_set_abs_params(dev, ABS_TILT_X, 0, coord.tilt_x, 0, 0);
input_set_abs_params(dev, ABS_TILT_Y, 0, coord.tilt_y, 0, 0);
+ error = w8001_command(w8001, W8001_CMD_TOUCHQUERY, true);
+ if (!error) {
+ struct w8001_touch_query touch;
+
+ parse_touchquery(w8001->response, &touch);
+
+ switch (touch.sensor_id) {
+ case 0:
+ case 2:
+ w8001->pktlen = W8001_PKTLEN_TOUCH93;
+ break;
+ case 1:
+ case 3:
+ case 4:
+ w8001->pktlen = W8001_PKTLEN_TOUCH9A;
+ break;
+ case 5:
+ w8001->pktlen = W8001_PKTLEN_TOUCH2FG;
+
+ input_mt_create_slots(dev, 2);
+ input_set_abs_params(dev, ABS_MT_TRACKING_ID,
+ 0, MAX_TRACKING_ID, 0, 0);
+ input_set_abs_params(dev, ABS_MT_POSITION_X,
+ 0, touch.x, 0, 0);
+ input_set_abs_params(dev, ABS_MT_POSITION_Y,
+ 0, touch.y, 0, 0);
+ input_set_abs_params(dev, ABS_MT_TOOL_TYPE,
+ 0, 0, 0, 0);
+ break;
+ }
+ }
+
return w8001_command(w8001, W8001_CMD_START, false);
}
@@ -208,6 +372,7 @@ static int w8001_connect(struct serio *serio, struct serio_driver *drv)
w8001->serio = serio;
w8001->id = serio->id.id;
w8001->dev = input_dev;
+ w8001->trkid[0] = w8001->trkid[1] = -1;
init_completion(&w8001->cmd_done);
snprintf(w8001->phys, sizeof(w8001->phys), "%s/input0", serio->phys);
@@ -221,6 +386,10 @@ static int w8001_connect(struct serio *serio, struct serio_driver *drv)
input_dev->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_ABS);
input_dev->keybit[BIT_WORD(BTN_TOUCH)] = BIT_MASK(BTN_TOUCH);
+ input_dev->keybit[BIT_WORD(BTN_TOOL_PEN)] |= BIT_MASK(BTN_TOOL_PEN);
+ input_dev->keybit[BIT_WORD(BTN_TOOL_RUBBER)] |= BIT_MASK(BTN_TOOL_RUBBER);
+ input_dev->keybit[BIT_WORD(BTN_STYLUS)] |= BIT_MASK(BTN_STYLUS);
+ input_dev->keybit[BIT_WORD(BTN_STYLUS2)] |= BIT_MASK(BTN_STYLUS2);
serio_set_drvdata(serio, w8001);
err = serio_open(serio, drv);
diff --git a/drivers/input/touchscreen/wm97xx-core.c b/drivers/input/touchscreen/wm97xx-core.c
index cbfef1ea7e30..6b75c9f660ae 100644
--- a/drivers/input/touchscreen/wm97xx-core.c
+++ b/drivers/input/touchscreen/wm97xx-core.c
@@ -125,6 +125,8 @@ int wm97xx_read_aux_adc(struct wm97xx *wm, u16 adcsel)
{
int power_adc = 0, auxval;
u16 power = 0;
+ int rc = 0;
+ int timeout = 0;
/* get codec */
mutex_lock(&wm->codec_mutex);
@@ -143,7 +145,9 @@ int wm97xx_read_aux_adc(struct wm97xx *wm, u16 adcsel)
/* Turn polling mode on to read AUX ADC */
wm->pen_probably_down = 1;
- wm->codec->poll_sample(wm, adcsel, &auxval);
+
+ while (rc != RC_VALID && timeout++ < 5)
+ rc = wm->codec->poll_sample(wm, adcsel, &auxval);
if (power_adc)
wm97xx_reg_write(wm, AC97_EXTENDED_MID, power | 0x8000);
@@ -152,8 +156,15 @@ int wm97xx_read_aux_adc(struct wm97xx *wm, u16 adcsel)
wm->pen_probably_down = 0;
+ if (timeout >= 5) {
+ dev_err(wm->dev,
+ "timeout reading auxadc %d, disabling digitiser\n",
+ adcsel);
+ wm->codec->dig_enable(wm, false);
+ }
+
mutex_unlock(&wm->codec_mutex);
- return auxval & 0xfff;
+ return (rc == RC_VALID ? auxval & 0xfff : -EBUSY);
}
EXPORT_SYMBOL_GPL(wm97xx_read_aux_adc);
@@ -684,8 +695,7 @@ static int wm97xx_probe(struct device *dev)
touch_reg_err:
platform_device_put(wm->touch_dev);
touch_err:
- platform_device_unregister(wm->battery_dev);
- wm->battery_dev = NULL;
+ platform_device_del(wm->battery_dev);
batt_reg_err:
platform_device_put(wm->battery_dev);
batt_err:
diff --git a/drivers/input/xen-kbdfront.c b/drivers/input/xen-kbdfront.c
index ebb11907d402..e0c024db2ca5 100644
--- a/drivers/input/xen-kbdfront.c
+++ b/drivers/input/xen-kbdfront.c
@@ -276,6 +276,8 @@ static void xenkbd_backend_changed(struct xenbus_device *dev,
switch (backend_state) {
case XenbusStateInitialising:
case XenbusStateInitialised:
+ case XenbusStateReconfiguring:
+ case XenbusStateReconfigured:
case XenbusStateUnknown:
case XenbusStateClosed:
break;
diff --git a/drivers/isdn/capi/capifs.c b/drivers/isdn/capi/capifs.c
index 2b83850997c3..b4faed7fe0d3 100644
--- a/drivers/isdn/capi/capifs.c
+++ b/drivers/isdn/capi/capifs.c
@@ -125,16 +125,16 @@ fail:
return -ENOMEM;
}
-static int capifs_get_sb(struct file_system_type *fs_type,
- int flags, const char *dev_name, void *data, struct vfsmount *mnt)
+static struct dentry *capifs_mount(struct file_system_type *fs_type,
+ int flags, const char *dev_name, void *data)
{
- return get_sb_single(fs_type, flags, data, capifs_fill_super, mnt);
+ return mount_single(fs_type, flags, data, capifs_fill_super);
}
static struct file_system_type capifs_fs_type = {
.owner = THIS_MODULE,
.name = "capifs",
- .get_sb = capifs_get_sb,
+ .mount = capifs_mount,
.kill_sb = kill_anon_super,
};
diff --git a/drivers/isdn/hardware/mISDN/mISDNinfineon.c b/drivers/isdn/hardware/mISDN/mISDNinfineon.c
index af25e1f3efd4..e90db8870b6c 100644
--- a/drivers/isdn/hardware/mISDN/mISDNinfineon.c
+++ b/drivers/isdn/hardware/mISDN/mISDNinfineon.c
@@ -563,7 +563,7 @@ reset_inf(struct inf_hw *hw)
mdelay(10);
hw->ipac.isac.adf2 = 0x87;
hw->ipac.hscx[0].slot = 0x1f;
- hw->ipac.hscx[0].slot = 0x23;
+ hw->ipac.hscx[1].slot = 0x23;
break;
case INF_GAZEL_R753:
val = inl((u32)hw->cfg.start + GAZEL_CNTRL);
diff --git a/drivers/isdn/hisax/isar.c b/drivers/isdn/hisax/isar.c
index 40b914bded8c..2e72227bd071 100644
--- a/drivers/isdn/hisax/isar.c
+++ b/drivers/isdn/hisax/isar.c
@@ -1427,8 +1427,8 @@ modeisar(struct BCState *bcs, int mode, int bc)
&bcs->hw.isar.reg->Flags))
bcs->hw.isar.dpath = 1;
else {
- printk(KERN_WARNING"isar modeisar analog funktions only with DP1\n");
- debugl1(cs, "isar modeisar analog funktions only with DP1");
+ printk(KERN_WARNING"isar modeisar analog functions only with DP1\n");
+ debugl1(cs, "isar modeisar analog functions only with DP1");
return(1);
}
break;
diff --git a/drivers/isdn/hisax/l3_1tr6.c b/drivers/isdn/hisax/l3_1tr6.c
index b0554f80bfb3..ee4dae1382e0 100644
--- a/drivers/isdn/hisax/l3_1tr6.c
+++ b/drivers/isdn/hisax/l3_1tr6.c
@@ -164,11 +164,9 @@ l3_1tr6_setup(struct l3_process *pc, u_char pr, void *arg)
char tmp[80];
struct sk_buff *skb = arg;
- p = skb->data;
-
/* Channel Identification */
- p = skb->data;
- if ((p = findie(p, skb->len, WE0_chanID, 0))) {
+ p = findie(skb->data, skb->len, WE0_chanID, 0);
+ if (p) {
if (p[1] != 1) {
l3_1tr6_error(pc, "setup wrong chanID len", skb);
return;
diff --git a/drivers/isdn/icn/icn.c b/drivers/isdn/icn/icn.c
index 2e847a90bad0..f2b5bab5e6a1 100644
--- a/drivers/isdn/icn/icn.c
+++ b/drivers/isdn/icn/icn.c
@@ -1627,7 +1627,7 @@ __setup("icn=", icn_setup);
static int __init icn_init(void)
{
char *p;
- char rev[10];
+ char rev[20];
memset(&dev, 0, sizeof(icn_dev));
dev.memaddr = (membase & 0x0ffc000);
@@ -1637,9 +1637,10 @@ static int __init icn_init(void)
spin_lock_init(&dev.devlock);
if ((p = strchr(revision, ':'))) {
- strcpy(rev, p + 1);
+ strncpy(rev, p + 1, 20);
p = strchr(rev, '$');
- *p = 0;
+ if (p)
+ *p = 0;
} else
strcpy(rev, " ??? ");
printk(KERN_NOTICE "ICN-ISDN-driver Rev%smem=0x%08lx\n", rev,
diff --git a/drivers/isdn/mISDN/socket.c b/drivers/isdn/mISDN/socket.c
index 3232206406b1..7446d8b4282d 100644
--- a/drivers/isdn/mISDN/socket.c
+++ b/drivers/isdn/mISDN/socket.c
@@ -392,6 +392,7 @@ data_sock_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
if (dev) {
struct mISDN_devinfo di;
+ memset(&di, 0, sizeof(di));
di.id = dev->id;
di.Dprotocols = dev->Dprotocols;
di.Bprotocols = dev->Bprotocols | get_all_Bprotocols();
@@ -672,6 +673,7 @@ base_sock_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
if (dev) {
struct mISDN_devinfo di;
+ memset(&di, 0, sizeof(di));
di.id = dev->id;
di.Dprotocols = dev->Dprotocols;
di.Bprotocols = dev->Bprotocols | get_all_Bprotocols();
diff --git a/drivers/leds/Kconfig b/drivers/leds/Kconfig
index cc2a88d5192f..6f190f4cdbc0 100644
--- a/drivers/leds/Kconfig
+++ b/drivers/leds/Kconfig
@@ -7,20 +7,20 @@ menuconfig NEW_LEDS
This is not related to standard keyboard LEDs which are controlled
via the input system.
-if NEW_LEDS
-
config LEDS_CLASS
- tristate "LED Class Support"
+ bool "LED Class Support"
+ depends on NEW_LEDS
help
This option enables the led sysfs class in /sys/class/leds. You'll
need this to do anything useful with LEDs. If unsure, say N.
-if LEDS_CLASS
+if NEW_LEDS
comment "LED drivers"
config LEDS_88PM860X
tristate "LED Support for Marvell 88PM860x PMIC"
+ depends on LEDS_CLASS
depends on MFD_88PM860X
help
This option enables support for on-chip LED drivers found on Marvell
@@ -28,6 +28,7 @@ config LEDS_88PM860X
config LEDS_ATMEL_PWM
tristate "LED Support using Atmel PWM outputs"
+ depends on LEDS_CLASS
depends on ATMEL_PWM
help
This option enables support for LEDs driven using outputs
@@ -35,6 +36,7 @@ config LEDS_ATMEL_PWM
config LEDS_LOCOMO
tristate "LED Support for Locomo device"
+ depends on LEDS_CLASS
depends on SHARP_LOCOMO
help
This option enables support for the LEDs on Sharp Locomo.
@@ -42,6 +44,7 @@ config LEDS_LOCOMO
config LEDS_MIKROTIK_RB532
tristate "LED Support for Mikrotik Routerboard 532"
+ depends on LEDS_CLASS
depends on MIKROTIK_RB532
help
This option enables support for the so called "User LED" of
@@ -49,6 +52,7 @@ config LEDS_MIKROTIK_RB532
config LEDS_S3C24XX
tristate "LED Support for Samsung S3C24XX GPIO LEDs"
+ depends on LEDS_CLASS
depends on ARCH_S3C2410
help
This option enables support for LEDs connected to GPIO lines
@@ -56,12 +60,14 @@ config LEDS_S3C24XX
config LEDS_AMS_DELTA
tristate "LED Support for the Amstrad Delta (E3)"
+ depends on LEDS_CLASS
depends on MACH_AMS_DELTA
help
This option enables support for the LEDs on Amstrad Delta (E3).
config LEDS_NET48XX
tristate "LED Support for Soekris net48xx series Error LED"
+ depends on LEDS_CLASS
depends on SCx200_GPIO
help
This option enables support for the Soekris net4801 and net4826 error
@@ -79,18 +85,21 @@ config LEDS_NET5501
config LEDS_FSG
tristate "LED Support for the Freecom FSG-3"
+ depends on LEDS_CLASS
depends on MACH_FSG
help
This option enables support for the LEDs on the Freecom FSG-3.
config LEDS_WRAP
tristate "LED Support for the WRAP series LEDs"
+ depends on LEDS_CLASS
depends on SCx200_GPIO
help
This option enables support for the PCEngines WRAP programmable LEDs.
config LEDS_ALIX2
tristate "LED Support for ALIX.2 and ALIX.3 series"
+ depends on LEDS_CLASS
depends on X86 && !GPIO_CS5535 && !CS5535_GPIO
help
This option enables support for the PCEngines ALIX.2 and ALIX.3 LEDs.
@@ -98,12 +107,14 @@ config LEDS_ALIX2
config LEDS_H1940
tristate "LED Support for iPAQ H1940 device"
+ depends on LEDS_CLASS
depends on ARCH_H1940
help
This option enables support for the LEDs on the h1940.
config LEDS_COBALT_QUBE
tristate "LED Support for the Cobalt Qube series front LED"
+ depends on LEDS_CLASS
depends on MIPS_COBALT
help
This option enables support for the front LED on Cobalt Qube series
@@ -117,6 +128,7 @@ config LEDS_COBALT_RAQ
config LEDS_SUNFIRE
tristate "LED support for SunFire servers."
+ depends on LEDS_CLASS
depends on SPARC64
select LEDS_TRIGGERS
help
@@ -125,6 +137,7 @@ config LEDS_SUNFIRE
config LEDS_HP6XX
tristate "LED Support for the HP Jornada 6xx"
+ depends on LEDS_CLASS
depends on SH_HP6XX
help
This option enables LED support for the handheld
@@ -132,6 +145,7 @@ config LEDS_HP6XX
config LEDS_PCA9532
tristate "LED driver for PCA9532 dimmer"
+ depends on LEDS_CLASS
depends on I2C && INPUT && EXPERIMENTAL
help
This option enables support for NXP pca9532
@@ -140,6 +154,7 @@ config LEDS_PCA9532
config LEDS_GPIO
tristate "LED Support for GPIO connected LEDs"
+ depends on LEDS_CLASS
depends on GENERIC_GPIO
help
This option enables support for the LEDs connected to GPIO
@@ -167,6 +182,7 @@ config LEDS_GPIO_OF
config LEDS_LP3944
tristate "LED Support for N.S. LP3944 (Fun Light) I2C chip"
+ depends on LEDS_CLASS
depends on I2C
help
This option enables support for LEDs connected to the National
@@ -176,8 +192,27 @@ config LEDS_LP3944
To compile this driver as a module, choose M here: the
module will be called leds-lp3944.
+config LEDS_LP5521
+ tristate "LED Support for N.S. LP5521 LED driver chip"
+ depends on LEDS_CLASS && I2C
+ help
+ If you say yes here you get support for the National Semiconductor
+ LP5521 LED driver. It is 3 channel chip with programmable engines.
+ Driver provides direct control via LED class and interface for
+ programming the engines.
+
+config LEDS_LP5523
+ tristate "LED Support for N.S. LP5523 LED driver chip"
+ depends on LEDS_CLASS && I2C
+ help
+ If you say yes here you get support for the National Semiconductor
+ LP5523 LED driver. It is 9 channel chip with programmable engines.
+ Driver provides direct control via LED class and interface for
+ programming the engines.
+
config LEDS_CLEVO_MAIL
tristate "Mail LED on Clevo notebook"
+ depends on LEDS_CLASS
depends on X86 && SERIO_I8042 && DMI
help
This driver makes the mail LED accessible from userspace
@@ -208,6 +243,7 @@ config LEDS_CLEVO_MAIL
config LEDS_PCA955X
tristate "LED Support for PCA955x I2C chips"
+ depends on LEDS_CLASS
depends on I2C
help
This option enables support for LEDs connected to PCA955x
@@ -216,6 +252,7 @@ config LEDS_PCA955X
config LEDS_WM831X_STATUS
tristate "LED support for status LEDs on WM831x PMICs"
+ depends on LEDS_CLASS
depends on MFD_WM831X
help
This option enables support for the status LEDs of the WM831x
@@ -223,6 +260,7 @@ config LEDS_WM831X_STATUS
config LEDS_WM8350
tristate "LED Support for WM8350 AudioPlus PMIC"
+ depends on LEDS_CLASS
depends on MFD_WM8350
help
This option enables support for LEDs driven by the Wolfson
@@ -230,6 +268,7 @@ config LEDS_WM8350
config LEDS_DA903X
tristate "LED Support for DA9030/DA9034 PMIC"
+ depends on LEDS_CLASS
depends on PMIC_DA903X
help
This option enables support for on-chip LED drivers found
@@ -237,6 +276,7 @@ config LEDS_DA903X
config LEDS_DAC124S085
tristate "LED Support for DAC124S085 SPI DAC"
+ depends on LEDS_CLASS
depends on SPI
help
This option enables support for DAC124S085 SPI DAC from NatSemi,
@@ -244,18 +284,21 @@ config LEDS_DAC124S085
config LEDS_PWM
tristate "PWM driven LED Support"
+ depends on LEDS_CLASS
depends on HAVE_PWM
help
This option enables support for pwm driven LEDs
config LEDS_REGULATOR
tristate "REGULATOR driven LED support"
+ depends on LEDS_CLASS
depends on REGULATOR
help
This option enables support for regulator driven LEDs.
config LEDS_BD2802
tristate "LED driver for BD2802 RGB LED"
+ depends on LEDS_CLASS
depends on I2C
help
This option enables support for BD2802GU RGB LED driver chips
@@ -263,6 +306,7 @@ config LEDS_BD2802
config LEDS_INTEL_SS4200
tristate "LED driver for Intel NAS SS4200 series"
+ depends on LEDS_CLASS
depends on PCI && DMI
help
This option enables support for the Intel SS4200 series of
@@ -272,6 +316,7 @@ config LEDS_INTEL_SS4200
config LEDS_LT3593
tristate "LED driver for LT3593 controllers"
+ depends on LEDS_CLASS
depends on GENERIC_GPIO
help
This option enables support for LEDs driven by a Linear Technology
@@ -280,6 +325,7 @@ config LEDS_LT3593
config LEDS_ADP5520
tristate "LED Support for ADP5520/ADP5501 PMIC"
+ depends on LEDS_CLASS
depends on PMIC_ADP5520
help
This option enables support for on-chip LED drivers found
@@ -290,6 +336,7 @@ config LEDS_ADP5520
config LEDS_DELL_NETBOOKS
tristate "External LED on Dell Business Netbooks"
+ depends on LEDS_CLASS
depends on X86 && ACPI_WMI
help
This adds support for the Latitude 2100 and similar
@@ -297,6 +344,7 @@ config LEDS_DELL_NETBOOKS
config LEDS_MC13783
tristate "LED Support for MC13783 PMIC"
+ depends on LEDS_CLASS
depends on MFD_MC13783
help
This option enable support for on-chip LED drivers found
@@ -304,6 +352,7 @@ config LEDS_MC13783
config LEDS_NS2
tristate "LED support for Network Space v2 GPIO LEDs"
+ depends on LEDS_CLASS
depends on MACH_NETSPACE_V2 || MACH_INETSPACE_V2 || MACH_NETSPACE_MAX_V2 || D2NET_V2
default y
help
@@ -322,17 +371,17 @@ config LEDS_NETXBIG
config LEDS_TRIGGERS
bool "LED Trigger support"
+ depends on LEDS_CLASS
help
This option enables trigger support for the leds class.
These triggers allow kernel events to drive the LEDs and can
be configured via sysfs. If unsure, say Y.
-if LEDS_TRIGGERS
-
comment "LED Triggers"
config LEDS_TRIGGER_TIMER
tristate "LED Timer Trigger"
+ depends on LEDS_TRIGGERS
help
This allows LEDs to be controlled by a programmable timer
via sysfs. Some LED hardware can be programmed to start
@@ -344,12 +393,14 @@ config LEDS_TRIGGER_TIMER
config LEDS_TRIGGER_IDE_DISK
bool "LED IDE Disk Trigger"
depends on IDE_GD_ATA
+ depends on LEDS_TRIGGERS
help
This allows LEDs to be controlled by IDE disk activity.
If unsure, say Y.
config LEDS_TRIGGER_HEARTBEAT
tristate "LED Heartbeat Trigger"
+ depends on LEDS_TRIGGERS
help
This allows LEDs to be controlled by a CPU load average.
The flash frequency is a hyperbolic function of the 1-minute
@@ -358,6 +409,7 @@ config LEDS_TRIGGER_HEARTBEAT
config LEDS_TRIGGER_BACKLIGHT
tristate "LED backlight Trigger"
+ depends on LEDS_TRIGGERS
help
This allows LEDs to be controlled as a backlight device: they
turn off and on when the display is blanked and unblanked.
@@ -366,6 +418,7 @@ config LEDS_TRIGGER_BACKLIGHT
config LEDS_TRIGGER_GPIO
tristate "LED GPIO Trigger"
+ depends on LEDS_TRIGGERS
depends on GPIOLIB
help
This allows LEDs to be controlled by gpio events. It's good
@@ -378,6 +431,7 @@ config LEDS_TRIGGER_GPIO
config LEDS_TRIGGER_DEFAULT_ON
tristate "LED Default ON Trigger"
+ depends on LEDS_TRIGGERS
help
This allows LEDs to be initialised in the ON state.
If unsure, say Y.
@@ -385,8 +439,4 @@ config LEDS_TRIGGER_DEFAULT_ON
comment "iptables trigger is under Netfilter config (LED target)"
depends on LEDS_TRIGGERS
-endif # LEDS_TRIGGERS
-
-endif # LEDS_CLASS
-
endif # NEW_LEDS
diff --git a/drivers/leds/Makefile b/drivers/leds/Makefile
index 9c96db40ef6d..aae6989ff6b6 100644
--- a/drivers/leds/Makefile
+++ b/drivers/leds/Makefile
@@ -23,6 +23,8 @@ obj-$(CONFIG_LEDS_SUNFIRE) += leds-sunfire.o
obj-$(CONFIG_LEDS_PCA9532) += leds-pca9532.o
obj-$(CONFIG_LEDS_GPIO) += leds-gpio.o
obj-$(CONFIG_LEDS_LP3944) += leds-lp3944.o
+obj-$(CONFIG_LEDS_LP5521) += leds-lp5521.o
+obj-$(CONFIG_LEDS_LP5523) += leds-lp5523.o
obj-$(CONFIG_LEDS_CLEVO_MAIL) += leds-clevo-mail.o
obj-$(CONFIG_LEDS_HP6XX) += leds-hp6xx.o
obj-$(CONFIG_LEDS_FSG) += leds-fsg.o
diff --git a/drivers/leds/led-class.c b/drivers/leds/led-class.c
index 260660076507..211e21f34bd5 100644
--- a/drivers/leds/led-class.c
+++ b/drivers/leds/led-class.c
@@ -81,6 +81,79 @@ static struct device_attribute led_class_attrs[] = {
__ATTR_NULL,
};
+static void led_timer_function(unsigned long data)
+{
+ struct led_classdev *led_cdev = (void *)data;
+ unsigned long brightness;
+ unsigned long delay;
+
+ if (!led_cdev->blink_delay_on || !led_cdev->blink_delay_off) {
+ led_set_brightness(led_cdev, LED_OFF);
+ return;
+ }
+
+ brightness = led_get_brightness(led_cdev);
+ if (!brightness) {
+ /* Time to switch the LED on. */
+ brightness = led_cdev->blink_brightness;
+ delay = led_cdev->blink_delay_on;
+ } else {
+ /* Store the current brightness value to be able
+ * to restore it when the delay_off period is over.
+ */
+ led_cdev->blink_brightness = brightness;
+ brightness = LED_OFF;
+ delay = led_cdev->blink_delay_off;
+ }
+
+ led_set_brightness(led_cdev, brightness);
+
+ mod_timer(&led_cdev->blink_timer, jiffies + msecs_to_jiffies(delay));
+}
+
+static void led_stop_software_blink(struct led_classdev *led_cdev)
+{
+ /* deactivate previous settings */
+ del_timer_sync(&led_cdev->blink_timer);
+ led_cdev->blink_delay_on = 0;
+ led_cdev->blink_delay_off = 0;
+}
+
+static void led_set_software_blink(struct led_classdev *led_cdev,
+ unsigned long delay_on,
+ unsigned long delay_off)
+{
+ int current_brightness;
+
+ current_brightness = led_get_brightness(led_cdev);
+ if (current_brightness)
+ led_cdev->blink_brightness = current_brightness;
+ if (!led_cdev->blink_brightness)
+ led_cdev->blink_brightness = led_cdev->max_brightness;
+
+ if (delay_on == led_cdev->blink_delay_on &&
+ delay_off == led_cdev->blink_delay_off)
+ return;
+
+ led_stop_software_blink(led_cdev);
+
+ led_cdev->blink_delay_on = delay_on;
+ led_cdev->blink_delay_off = delay_off;
+
+ /* never on - don't blink */
+ if (!delay_on)
+ return;
+
+ /* never off - just set to brightness */
+ if (!delay_off) {
+ led_set_brightness(led_cdev, led_cdev->blink_brightness);
+ return;
+ }
+
+ mod_timer(&led_cdev->blink_timer, jiffies + 1);
+}
+
+
/**
* led_classdev_suspend - suspend an led_classdev.
* @led_cdev: the led_classdev to suspend.
@@ -148,6 +221,10 @@ int led_classdev_register(struct device *parent, struct led_classdev *led_cdev)
led_update_brightness(led_cdev);
+ init_timer(&led_cdev->blink_timer);
+ led_cdev->blink_timer.function = led_timer_function;
+ led_cdev->blink_timer.data = (unsigned long)led_cdev;
+
#ifdef CONFIG_LEDS_TRIGGERS
led_trigger_set_default(led_cdev);
#endif
@@ -157,7 +234,6 @@ int led_classdev_register(struct device *parent, struct led_classdev *led_cdev)
return 0;
}
-
EXPORT_SYMBOL_GPL(led_classdev_register);
/**
@@ -175,6 +251,9 @@ void led_classdev_unregister(struct led_classdev *led_cdev)
up_write(&led_cdev->trigger_lock);
#endif
+ /* Stop blinking */
+ led_brightness_set(led_cdev, LED_OFF);
+
device_unregister(led_cdev->dev);
down_write(&leds_list_lock);
@@ -183,6 +262,30 @@ void led_classdev_unregister(struct led_classdev *led_cdev)
}
EXPORT_SYMBOL_GPL(led_classdev_unregister);
+void led_blink_set(struct led_classdev *led_cdev,
+ unsigned long *delay_on,
+ unsigned long *delay_off)
+{
+ if (led_cdev->blink_set &&
+ led_cdev->blink_set(led_cdev, delay_on, delay_off))
+ return;
+
+ /* blink with 1 Hz as default if nothing specified */
+ if (!*delay_on && !*delay_off)
+ *delay_on = *delay_off = 500;
+
+ led_set_software_blink(led_cdev, *delay_on, *delay_off);
+}
+EXPORT_SYMBOL(led_blink_set);
+
+void led_brightness_set(struct led_classdev *led_cdev,
+ enum led_brightness brightness)
+{
+ led_stop_software_blink(led_cdev);
+ led_cdev->brightness_set(led_cdev, brightness);
+}
+EXPORT_SYMBOL(led_brightness_set);
+
static int __init leds_init(void)
{
leds_class = class_create(THIS_MODULE, "leds");
diff --git a/drivers/leds/led-triggers.c b/drivers/leds/led-triggers.c
index f1c00db88b5e..c41eb6180c9c 100644
--- a/drivers/leds/led-triggers.c
+++ b/drivers/leds/led-triggers.c
@@ -113,7 +113,7 @@ void led_trigger_set(struct led_classdev *led_cdev, struct led_trigger *trigger)
if (led_cdev->trigger->deactivate)
led_cdev->trigger->deactivate(led_cdev);
led_cdev->trigger = NULL;
- led_set_brightness(led_cdev, LED_OFF);
+ led_brightness_set(led_cdev, LED_OFF);
}
if (trigger) {
write_lock_irqsave(&trigger->leddev_list_lock, flags);
diff --git a/drivers/leds/leds-88pm860x.c b/drivers/leds/leds-88pm860x.c
index b7677106cff8..e672b44ee172 100644
--- a/drivers/leds/leds-88pm860x.c
+++ b/drivers/leds/leds-88pm860x.c
@@ -24,26 +24,17 @@
#define LED_CURRENT_MASK (0x07 << 5)
#define LED_BLINK_ON_MASK (0x07)
-#define LED_BLINK_PERIOD_MASK (0x0F << 3)
#define LED_BLINK_MASK (0x7F)
#define LED_BLINK_ON(x) ((x & 0x7) * 66 + 66)
-#define LED_BLINK_PERIOD(x) ((x & 0xF) * 530 + 930)
#define LED_BLINK_ON_MIN LED_BLINK_ON(0)
#define LED_BLINK_ON_MAX LED_BLINK_ON(0x7)
-#define LED_BLINK_PERIOD_MIN LED_BLINK_PERIOD(0)
-#define LED_BLINK_PERIOD_MAX LED_BLINK_PERIOD(0xE)
+#define LED_ON_CONTINUOUS (0x0F << 3)
#define LED_TO_ON(x) ((x - 66) / 66)
-#define LED_TO_PERIOD(x) ((x - 930) / 530)
#define LED1_BLINK_EN (1 << 1)
#define LED2_BLINK_EN (1 << 2)
-enum {
- SET_BRIGHTNESS,
- SET_BLINK,
-};
-
struct pm860x_led {
struct led_classdev cdev;
struct i2c_client *i2c;
@@ -54,8 +45,6 @@ struct pm860x_led {
int port;
int iset;
- int command;
- int offset;
unsigned char brightness;
unsigned char current_brightness;
@@ -95,10 +84,12 @@ static inline int __blink_off(int port)
case PM8606_LED1_GREEN:
case PM8606_LED1_BLUE:
ret = PM8606_RGB1A;
+ break;
case PM8606_LED2_RED:
case PM8606_LED2_GREEN:
case PM8606_LED2_BLUE:
ret = PM8606_RGB2A;
+ break;
}
return ret;
}
@@ -122,60 +113,35 @@ static inline int __blink_ctl_mask(int port)
return ret;
}
-static int __led_set(struct pm860x_led *led, int command)
+static void pm860x_led_work(struct work_struct *work)
{
- struct pm860x_chip *chip = led->chip;
- int mask, ret;
+ struct pm860x_led *led;
+ struct pm860x_chip *chip;
+ int mask;
+
+ led = container_of(work, struct pm860x_led, work);
+ chip = led->chip;
mutex_lock(&led->lock);
- switch (command) {
- case SET_BRIGHTNESS:
- if ((led->current_brightness == 0) && led->brightness) {
- if (led->iset) {
- ret = pm860x_set_bits(led->i2c, led->offset,
- LED_CURRENT_MASK, led->iset);
- if (ret < 0)
- goto out;
- }
- } else if (led->brightness == 0) {
- ret = pm860x_set_bits(led->i2c, led->offset,
- LED_CURRENT_MASK, 0);
- if (ret < 0)
- goto out;
+ if ((led->current_brightness == 0) && led->brightness) {
+ if (led->iset) {
+ pm860x_set_bits(led->i2c, __led_off(led->port),
+ LED_CURRENT_MASK, led->iset);
}
- ret = pm860x_set_bits(led->i2c, led->offset, LED_PWM_MASK,
- led->brightness);
- if (ret < 0)
- goto out;
- led->current_brightness = led->brightness;
- dev_dbg(chip->dev, "Update LED. (reg:%d, brightness:%d)\n",
- led->offset, led->brightness);
- break;
- case SET_BLINK:
- ret = pm860x_set_bits(led->i2c, led->offset,
- LED_BLINK_MASK, led->blink_data);
- if (ret < 0)
- goto out;
-
mask = __blink_ctl_mask(led->port);
- ret = pm860x_set_bits(led->i2c, PM8606_WLED3B, mask, mask);
- if (ret < 0)
- goto out;
- dev_dbg(chip->dev, "LED blink delay on:%dms, delay off:%dms\n",
- led->blink_on, led->blink_off);
- break;
+ pm860x_set_bits(led->i2c, PM8606_WLED3B, mask, mask);
+ } else if (led->brightness == 0) {
+ pm860x_set_bits(led->i2c, __led_off(led->port),
+ LED_CURRENT_MASK, 0);
+ mask = __blink_ctl_mask(led->port);
+ pm860x_set_bits(led->i2c, PM8606_WLED3B, mask, 0);
}
-out:
+ pm860x_set_bits(led->i2c, __led_off(led->port), LED_PWM_MASK,
+ led->brightness);
+ led->current_brightness = led->brightness;
+ dev_dbg(chip->dev, "Update LED. (reg:%d, brightness:%d)\n",
+ __led_off(led->port), led->brightness);
mutex_unlock(&led->lock);
- return 0;
-}
-
-static void pm860x_led_work(struct work_struct *work)
-{
- struct pm860x_led *led;
-
- led = container_of(work, struct pm860x_led, work);
- __led_set(led, led->command);
}
static void pm860x_led_set(struct led_classdev *cdev,
@@ -183,42 +149,10 @@ static void pm860x_led_set(struct led_classdev *cdev,
{
struct pm860x_led *data = container_of(cdev, struct pm860x_led, cdev);
- data->offset = __led_off(data->port);
data->brightness = value >> 3;
- data->command = SET_BRIGHTNESS;
schedule_work(&data->work);
}
-static int pm860x_led_blink(struct led_classdev *cdev,
- unsigned long *delay_on,
- unsigned long *delay_off)
-{
- struct pm860x_led *data = container_of(cdev, struct pm860x_led, cdev);
- int period, on;
-
- on = *delay_on;
- if ((on < LED_BLINK_ON_MIN) || (on > LED_BLINK_ON_MAX))
- return -EINVAL;
-
- on = LED_TO_ON(on);
- on = LED_BLINK_ON(on);
-
- period = on + *delay_off;
- if ((period < LED_BLINK_PERIOD_MIN) || (period > LED_BLINK_PERIOD_MAX))
- return -EINVAL;
- period = LED_TO_PERIOD(period);
- period = LED_BLINK_PERIOD(period);
-
- data->offset = __blink_off(data->port);
- data->blink_on = on;
- data->blink_off = period - data->blink_on;
- data->blink_data = (period << 3) | data->blink_on;
- data->command = SET_BLINK;
- schedule_work(&data->work);
-
- return 0;
-}
-
static int __check_device(struct pm860x_led_pdata *pdata, char *name)
{
struct pm860x_led_pdata *p = pdata;
@@ -257,7 +191,7 @@ static int pm860x_led_probe(struct platform_device *pdev)
pm860x_pdata = pdev->dev.parent->platform_data;
pdata = pm860x_pdata->led;
} else {
- dev_err(&pdev->dev, "missing platform data\n");
+ dev_err(&pdev->dev, "No platform data!\n");
return -EINVAL;
}
@@ -279,7 +213,6 @@ static int pm860x_led_probe(struct platform_device *pdev)
data->current_brightness = 0;
data->cdev.name = data->name;
data->cdev.brightness_set = pm860x_led_set;
- data->cdev.blink_set = pm860x_led_blink;
mutex_init(&data->lock);
INIT_WORK(&data->work, pm860x_led_work);
diff --git a/drivers/leds/leds-gpio.c b/drivers/leds/leds-gpio.c
index ea57e05d08f3..4d9fa38d9ff6 100644
--- a/drivers/leds/leds-gpio.c
+++ b/drivers/leds/leds-gpio.c
@@ -316,7 +316,7 @@ static struct of_platform_driver of_gpio_leds_driver = {
static int __init gpio_led_init(void)
{
- int ret;
+ int ret = 0;
#ifdef CONFIG_LEDS_GPIO_PLATFORM
ret = platform_driver_register(&gpio_led_driver);
diff --git a/drivers/leds/leds-lp5521.c b/drivers/leds/leds-lp5521.c
new file mode 100644
index 000000000000..33facd0c45d1
--- /dev/null
+++ b/drivers/leds/leds-lp5521.c
@@ -0,0 +1,837 @@
+/*
+ * LP5521 LED chip driver.
+ *
+ * Copyright (C) 2010 Nokia Corporation
+ *
+ * Contact: Samu Onkalo <samu.p.onkalo@nokia.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/i2c.h>
+#include <linux/mutex.h>
+#include <linux/gpio.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/ctype.h>
+#include <linux/spinlock.h>
+#include <linux/wait.h>
+#include <linux/leds.h>
+#include <linux/leds-lp5521.h>
+#include <linux/workqueue.h>
+#include <linux/slab.h>
+
+#define LP5521_PROGRAM_LENGTH 32 /* in bytes */
+
+#define LP5521_MAX_LEDS 3 /* Maximum number of LEDs */
+#define LP5521_MAX_ENGINES 3 /* Maximum number of engines */
+
+#define LP5521_ENG_MASK_BASE 0x30 /* 00110000 */
+#define LP5521_ENG_STATUS_MASK 0x07 /* 00000111 */
+
+#define LP5521_CMD_LOAD 0x15 /* 00010101 */
+#define LP5521_CMD_RUN 0x2a /* 00101010 */
+#define LP5521_CMD_DIRECT 0x3f /* 00111111 */
+#define LP5521_CMD_DISABLED 0x00 /* 00000000 */
+
+/* Registers */
+#define LP5521_REG_ENABLE 0x00
+#define LP5521_REG_OP_MODE 0x01
+#define LP5521_REG_R_PWM 0x02
+#define LP5521_REG_G_PWM 0x03
+#define LP5521_REG_B_PWM 0x04
+#define LP5521_REG_R_CURRENT 0x05
+#define LP5521_REG_G_CURRENT 0x06
+#define LP5521_REG_B_CURRENT 0x07
+#define LP5521_REG_CONFIG 0x08
+#define LP5521_REG_R_CHANNEL_PC 0x09
+#define LP5521_REG_G_CHANNEL_PC 0x0A
+#define LP5521_REG_B_CHANNEL_PC 0x0B
+#define LP5521_REG_STATUS 0x0C
+#define LP5521_REG_RESET 0x0D
+#define LP5521_REG_GPO 0x0E
+#define LP5521_REG_R_PROG_MEM 0x10
+#define LP5521_REG_G_PROG_MEM 0x30
+#define LP5521_REG_B_PROG_MEM 0x50
+
+#define LP5521_PROG_MEM_BASE LP5521_REG_R_PROG_MEM
+#define LP5521_PROG_MEM_SIZE 0x20
+
+/* Base register to set LED current */
+#define LP5521_REG_LED_CURRENT_BASE LP5521_REG_R_CURRENT
+
+/* Base register to set the brightness */
+#define LP5521_REG_LED_PWM_BASE LP5521_REG_R_PWM
+
+/* Bits in ENABLE register */
+#define LP5521_MASTER_ENABLE 0x40 /* Chip master enable */
+#define LP5521_LOGARITHMIC_PWM 0x80 /* Logarithmic PWM adjustment */
+#define LP5521_EXEC_RUN 0x2A
+
+/* Bits in CONFIG register */
+#define LP5521_PWM_HF 0x40 /* PWM: 0 = 256Hz, 1 = 558Hz */
+#define LP5521_PWRSAVE_EN 0x20 /* 1 = Power save mode */
+#define LP5521_CP_MODE_OFF 0 /* Charge pump (CP) off */
+#define LP5521_CP_MODE_BYPASS 8 /* CP forced to bypass mode */
+#define LP5521_CP_MODE_1X5 0x10 /* CP forced to 1.5x mode */
+#define LP5521_CP_MODE_AUTO 0x18 /* Automatic mode selection */
+#define LP5521_R_TO_BATT 4 /* R out: 0 = CP, 1 = Vbat */
+#define LP5521_CLK_SRC_EXT 0 /* Ext-clk source (CLK_32K) */
+#define LP5521_CLK_INT 1 /* Internal clock */
+#define LP5521_CLK_AUTO 2 /* Automatic clock selection */
+
+/* Status */
+#define LP5521_EXT_CLK_USED 0x08
+
+struct lp5521_engine {
+ const struct attribute_group *attributes;
+ int id;
+ u8 mode;
+ u8 prog_page;
+ u8 engine_mask;
+};
+
+struct lp5521_led {
+ int id;
+ u8 chan_nr;
+ u8 led_current;
+ u8 max_current;
+ struct led_classdev cdev;
+ struct work_struct brightness_work;
+ u8 brightness;
+};
+
+struct lp5521_chip {
+ struct lp5521_platform_data *pdata;
+ struct mutex lock; /* Serialize control */
+ struct i2c_client *client;
+ struct lp5521_engine engines[LP5521_MAX_ENGINES];
+ struct lp5521_led leds[LP5521_MAX_LEDS];
+ u8 num_channels;
+ u8 num_leds;
+};
+
+static inline struct lp5521_led *cdev_to_led(struct led_classdev *cdev)
+{
+ return container_of(cdev, struct lp5521_led, cdev);
+}
+
+static inline struct lp5521_chip *engine_to_lp5521(struct lp5521_engine *engine)
+{
+ return container_of(engine, struct lp5521_chip,
+ engines[engine->id - 1]);
+}
+
+static inline struct lp5521_chip *led_to_lp5521(struct lp5521_led *led)
+{
+ return container_of(led, struct lp5521_chip,
+ leds[led->id]);
+}
+
+static void lp5521_led_brightness_work(struct work_struct *work);
+
+static inline int lp5521_write(struct i2c_client *client, u8 reg, u8 value)
+{
+ return i2c_smbus_write_byte_data(client, reg, value);
+}
+
+static int lp5521_read(struct i2c_client *client, u8 reg, u8 *buf)
+{
+ s32 ret;
+
+ ret = i2c_smbus_read_byte_data(client, reg);
+ if (ret < 0)
+ return -EIO;
+
+ *buf = ret;
+ return 0;
+}
+
+static int lp5521_set_engine_mode(struct lp5521_engine *engine, u8 mode)
+{
+ struct lp5521_chip *chip = engine_to_lp5521(engine);
+ struct i2c_client *client = chip->client;
+ int ret;
+ u8 engine_state;
+
+ /* Only transition between RUN and DIRECT mode are handled here */
+ if (mode == LP5521_CMD_LOAD)
+ return 0;
+
+ if (mode == LP5521_CMD_DISABLED)
+ mode = LP5521_CMD_DIRECT;
+
+ ret = lp5521_read(client, LP5521_REG_OP_MODE, &engine_state);
+
+ /* set mode only for this engine */
+ engine_state &= ~(engine->engine_mask);
+ mode &= engine->engine_mask;
+ engine_state |= mode;
+ ret |= lp5521_write(client, LP5521_REG_OP_MODE, engine_state);
+
+ return ret;
+}
+
+static int lp5521_load_program(struct lp5521_engine *eng, const u8 *pattern)
+{
+ struct lp5521_chip *chip = engine_to_lp5521(eng);
+ struct i2c_client *client = chip->client;
+ int ret;
+ int addr;
+ u8 mode;
+
+ /* move current engine to direct mode and remember the state */
+ ret = lp5521_set_engine_mode(eng, LP5521_CMD_DIRECT);
+ /* Mode change requires min 500 us delay. 1 - 2 ms with margin */
+ usleep_range(1000, 2000);
+ ret |= lp5521_read(client, LP5521_REG_OP_MODE, &mode);
+
+ /* For loading, all the engines to load mode */
+ lp5521_write(client, LP5521_REG_OP_MODE, LP5521_CMD_DIRECT);
+ /* Mode change requires min 500 us delay. 1 - 2 ms with margin */
+ usleep_range(1000, 2000);
+ lp5521_write(client, LP5521_REG_OP_MODE, LP5521_CMD_LOAD);
+ /* Mode change requires min 500 us delay. 1 - 2 ms with margin */
+ usleep_range(1000, 2000);
+
+ addr = LP5521_PROG_MEM_BASE + eng->prog_page * LP5521_PROG_MEM_SIZE;
+ i2c_smbus_write_i2c_block_data(client,
+ addr,
+ LP5521_PROG_MEM_SIZE,
+ pattern);
+
+ ret |= lp5521_write(client, LP5521_REG_OP_MODE, mode);
+ return ret;
+}
+
+static int lp5521_set_led_current(struct lp5521_chip *chip, int led, u8 curr)
+{
+ return lp5521_write(chip->client,
+ LP5521_REG_LED_CURRENT_BASE + chip->leds[led].chan_nr,
+ curr);
+}
+
+static void lp5521_init_engine(struct lp5521_chip *chip,
+ const struct attribute_group *attr_group)
+{
+ int i;
+ for (i = 0; i < ARRAY_SIZE(chip->engines); i++) {
+ chip->engines[i].id = i + 1;
+ chip->engines[i].engine_mask = LP5521_ENG_MASK_BASE >> (i * 2);
+ chip->engines[i].prog_page = i;
+ chip->engines[i].attributes = &attr_group[i];
+ }
+}
+
+static int lp5521_configure(struct i2c_client *client,
+ const struct attribute_group *attr_group)
+{
+ struct lp5521_chip *chip = i2c_get_clientdata(client);
+ int ret;
+
+ lp5521_init_engine(chip, attr_group);
+
+ /* Set all PWMs to direct control mode */
+ ret = lp5521_write(client, LP5521_REG_OP_MODE, 0x3F);
+
+ /* Enable auto-powersave, set charge pump to auto, red to battery */
+ ret |= lp5521_write(client, LP5521_REG_CONFIG,
+ LP5521_PWRSAVE_EN | LP5521_CP_MODE_AUTO | LP5521_R_TO_BATT);
+
+ /* Initialize all channels PWM to zero -> leds off */
+ ret |= lp5521_write(client, LP5521_REG_R_PWM, 0);
+ ret |= lp5521_write(client, LP5521_REG_G_PWM, 0);
+ ret |= lp5521_write(client, LP5521_REG_B_PWM, 0);
+
+ /* Set engines are set to run state when OP_MODE enables engines */
+ ret |= lp5521_write(client, LP5521_REG_ENABLE,
+ LP5521_MASTER_ENABLE | LP5521_LOGARITHMIC_PWM |
+ LP5521_EXEC_RUN);
+ /* enable takes 500us. 1 - 2 ms leaves some margin */
+ usleep_range(1000, 2000);
+
+ return ret;
+}
+
+static int lp5521_run_selftest(struct lp5521_chip *chip, char *buf)
+{
+ int ret;
+ u8 status;
+
+ ret = lp5521_read(chip->client, LP5521_REG_STATUS, &status);
+ if (ret < 0)
+ return ret;
+
+ /* Check that ext clock is really in use if requested */
+ if (chip->pdata && chip->pdata->clock_mode == LP5521_CLOCK_EXT)
+ if ((status & LP5521_EXT_CLK_USED) == 0)
+ return -EIO;
+ return 0;
+}
+
+static void lp5521_set_brightness(struct led_classdev *cdev,
+ enum led_brightness brightness)
+{
+ struct lp5521_led *led = cdev_to_led(cdev);
+ led->brightness = (u8)brightness;
+ schedule_work(&led->brightness_work);
+}
+
+static void lp5521_led_brightness_work(struct work_struct *work)
+{
+ struct lp5521_led *led = container_of(work,
+ struct lp5521_led,
+ brightness_work);
+ struct lp5521_chip *chip = led_to_lp5521(led);
+ struct i2c_client *client = chip->client;
+
+ mutex_lock(&chip->lock);
+ lp5521_write(client, LP5521_REG_LED_PWM_BASE + led->chan_nr,
+ led->brightness);
+ mutex_unlock(&chip->lock);
+}
+
+/* Detect the chip by setting its ENABLE register and reading it back. */
+static int lp5521_detect(struct i2c_client *client)
+{
+ int ret;
+ u8 buf;
+
+ ret = lp5521_write(client, LP5521_REG_ENABLE,
+ LP5521_MASTER_ENABLE | LP5521_LOGARITHMIC_PWM);
+ if (ret)
+ return ret;
+ /* enable takes 500us. 1 - 2 ms leaves some margin */
+ usleep_range(1000, 2000);
+ ret = lp5521_read(client, LP5521_REG_ENABLE, &buf);
+ if (ret)
+ return ret;
+ if (buf != (LP5521_MASTER_ENABLE | LP5521_LOGARITHMIC_PWM))
+ return -ENODEV;
+
+ return 0;
+}
+
+/* Set engine mode and create appropriate sysfs attributes, if required. */
+static int lp5521_set_mode(struct lp5521_engine *engine, u8 mode)
+{
+ struct lp5521_chip *chip = engine_to_lp5521(engine);
+ struct i2c_client *client = chip->client;
+ struct device *dev = &client->dev;
+ int ret = 0;
+
+ /* if in that mode already do nothing, except for run */
+ if (mode == engine->mode && mode != LP5521_CMD_RUN)
+ return 0;
+
+ if (mode == LP5521_CMD_RUN) {
+ ret = lp5521_set_engine_mode(engine, LP5521_CMD_RUN);
+ } else if (mode == LP5521_CMD_LOAD) {
+ lp5521_set_engine_mode(engine, LP5521_CMD_DISABLED);
+ lp5521_set_engine_mode(engine, LP5521_CMD_LOAD);
+
+ ret = sysfs_create_group(&dev->kobj, engine->attributes);
+ if (ret)
+ return ret;
+ } else if (mode == LP5521_CMD_DISABLED) {
+ lp5521_set_engine_mode(engine, LP5521_CMD_DISABLED);
+ }
+
+ /* remove load attribute from sysfs if not in load mode */
+ if (engine->mode == LP5521_CMD_LOAD && mode != LP5521_CMD_LOAD)
+ sysfs_remove_group(&dev->kobj, engine->attributes);
+
+ engine->mode = mode;
+
+ return ret;
+}
+
+static int lp5521_do_store_load(struct lp5521_engine *engine,
+ const char *buf, size_t len)
+{
+ struct lp5521_chip *chip = engine_to_lp5521(engine);
+ struct i2c_client *client = chip->client;
+ int ret, nrchars, offset = 0, i = 0;
+ char c[3];
+ unsigned cmd;
+ u8 pattern[LP5521_PROGRAM_LENGTH] = {0};
+
+ while ((offset < len - 1) && (i < LP5521_PROGRAM_LENGTH)) {
+ /* separate sscanfs because length is working only for %s */
+ ret = sscanf(buf + offset, "%2s%n ", c, &nrchars);
+ ret = sscanf(c, "%2x", &cmd);
+ if (ret != 1)
+ goto fail;
+ pattern[i] = (u8)cmd;
+
+ offset += nrchars;
+ i++;
+ }
+
+ /* Each instruction is 16bit long. Check that length is even */
+ if (i % 2)
+ goto fail;
+
+ mutex_lock(&chip->lock);
+ ret = lp5521_load_program(engine, pattern);
+ mutex_unlock(&chip->lock);
+
+ if (ret) {
+ dev_err(&client->dev, "failed loading pattern\n");
+ return ret;
+ }
+
+ return len;
+fail:
+ dev_err(&client->dev, "wrong pattern format\n");
+ return -EINVAL;
+}
+
+static ssize_t store_engine_load(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t len, int nr)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct lp5521_chip *chip = i2c_get_clientdata(client);
+ return lp5521_do_store_load(&chip->engines[nr - 1], buf, len);
+}
+
+#define store_load(nr) \
+static ssize_t store_engine##nr##_load(struct device *dev, \
+ struct device_attribute *attr, \
+ const char *buf, size_t len) \
+{ \
+ return store_engine_load(dev, attr, buf, len, nr); \
+}
+store_load(1)
+store_load(2)
+store_load(3)
+
+static ssize_t show_engine_mode(struct device *dev,
+ struct device_attribute *attr,
+ char *buf, int nr)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct lp5521_chip *chip = i2c_get_clientdata(client);
+ switch (chip->engines[nr - 1].mode) {
+ case LP5521_CMD_RUN:
+ return sprintf(buf, "run\n");
+ case LP5521_CMD_LOAD:
+ return sprintf(buf, "load\n");
+ case LP5521_CMD_DISABLED:
+ return sprintf(buf, "disabled\n");
+ default:
+ return sprintf(buf, "disabled\n");
+ }
+}
+
+#define show_mode(nr) \
+static ssize_t show_engine##nr##_mode(struct device *dev, \
+ struct device_attribute *attr, \
+ char *buf) \
+{ \
+ return show_engine_mode(dev, attr, buf, nr); \
+}
+show_mode(1)
+show_mode(2)
+show_mode(3)
+
+static ssize_t store_engine_mode(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t len, int nr)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct lp5521_chip *chip = i2c_get_clientdata(client);
+ struct lp5521_engine *engine = &chip->engines[nr - 1];
+ mutex_lock(&chip->lock);
+
+ if (!strncmp(buf, "run", 3))
+ lp5521_set_mode(engine, LP5521_CMD_RUN);
+ else if (!strncmp(buf, "load", 4))
+ lp5521_set_mode(engine, LP5521_CMD_LOAD);
+ else if (!strncmp(buf, "disabled", 8))
+ lp5521_set_mode(engine, LP5521_CMD_DISABLED);
+
+ mutex_unlock(&chip->lock);
+ return len;
+}
+
+#define store_mode(nr) \
+static ssize_t store_engine##nr##_mode(struct device *dev, \
+ struct device_attribute *attr, \
+ const char *buf, size_t len) \
+{ \
+ return store_engine_mode(dev, attr, buf, len, nr); \
+}
+store_mode(1)
+store_mode(2)
+store_mode(3)
+
+static ssize_t show_max_current(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct led_classdev *led_cdev = dev_get_drvdata(dev);
+ struct lp5521_led *led = cdev_to_led(led_cdev);
+
+ return sprintf(buf, "%d\n", led->max_current);
+}
+
+static ssize_t show_current(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct led_classdev *led_cdev = dev_get_drvdata(dev);
+ struct lp5521_led *led = cdev_to_led(led_cdev);
+
+ return sprintf(buf, "%d\n", led->led_current);
+}
+
+static ssize_t store_current(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t len)
+{
+ struct led_classdev *led_cdev = dev_get_drvdata(dev);
+ struct lp5521_led *led = cdev_to_led(led_cdev);
+ struct lp5521_chip *chip = led_to_lp5521(led);
+ ssize_t ret;
+ unsigned long curr;
+
+ if (strict_strtoul(buf, 0, &curr))
+ return -EINVAL;
+
+ if (curr > led->max_current)
+ return -EINVAL;
+
+ mutex_lock(&chip->lock);
+ ret = lp5521_set_led_current(chip, led->id, curr);
+ mutex_unlock(&chip->lock);
+
+ if (ret < 0)
+ return ret;
+
+ led->led_current = (u8)curr;
+
+ return len;
+}
+
+static ssize_t lp5521_selftest(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct lp5521_chip *chip = i2c_get_clientdata(client);
+ int ret;
+
+ mutex_lock(&chip->lock);
+ ret = lp5521_run_selftest(chip, buf);
+ mutex_unlock(&chip->lock);
+ return sprintf(buf, "%s\n", ret ? "FAIL" : "OK");
+}
+
+/* led class device attributes */
+static DEVICE_ATTR(led_current, S_IRUGO | S_IWUGO, show_current, store_current);
+static DEVICE_ATTR(max_current, S_IRUGO , show_max_current, NULL);
+
+static struct attribute *lp5521_led_attributes[] = {
+ &dev_attr_led_current.attr,
+ &dev_attr_max_current.attr,
+ NULL,
+};
+
+static struct attribute_group lp5521_led_attribute_group = {
+ .attrs = lp5521_led_attributes
+};
+
+/* device attributes */
+static DEVICE_ATTR(engine1_mode, S_IRUGO | S_IWUGO,
+ show_engine1_mode, store_engine1_mode);
+static DEVICE_ATTR(engine2_mode, S_IRUGO | S_IWUGO,
+ show_engine2_mode, store_engine2_mode);
+static DEVICE_ATTR(engine3_mode, S_IRUGO | S_IWUGO,
+ show_engine3_mode, store_engine3_mode);
+static DEVICE_ATTR(engine1_load, S_IWUGO, NULL, store_engine1_load);
+static DEVICE_ATTR(engine2_load, S_IWUGO, NULL, store_engine2_load);
+static DEVICE_ATTR(engine3_load, S_IWUGO, NULL, store_engine3_load);
+static DEVICE_ATTR(selftest, S_IRUGO, lp5521_selftest, NULL);
+
+static struct attribute *lp5521_attributes[] = {
+ &dev_attr_engine1_mode.attr,
+ &dev_attr_engine2_mode.attr,
+ &dev_attr_engine3_mode.attr,
+ &dev_attr_selftest.attr,
+ NULL
+};
+
+static struct attribute *lp5521_engine1_attributes[] = {
+ &dev_attr_engine1_load.attr,
+ NULL
+};
+
+static struct attribute *lp5521_engine2_attributes[] = {
+ &dev_attr_engine2_load.attr,
+ NULL
+};
+
+static struct attribute *lp5521_engine3_attributes[] = {
+ &dev_attr_engine3_load.attr,
+ NULL
+};
+
+static const struct attribute_group lp5521_group = {
+ .attrs = lp5521_attributes,
+};
+
+static const struct attribute_group lp5521_engine_group[] = {
+ {.attrs = lp5521_engine1_attributes },
+ {.attrs = lp5521_engine2_attributes },
+ {.attrs = lp5521_engine3_attributes },
+};
+
+static int lp5521_register_sysfs(struct i2c_client *client)
+{
+ struct device *dev = &client->dev;
+ return sysfs_create_group(&dev->kobj, &lp5521_group);
+}
+
+static void lp5521_unregister_sysfs(struct i2c_client *client)
+{
+ struct lp5521_chip *chip = i2c_get_clientdata(client);
+ struct device *dev = &client->dev;
+ int i;
+
+ sysfs_remove_group(&dev->kobj, &lp5521_group);
+
+ for (i = 0; i < ARRAY_SIZE(chip->engines); i++) {
+ if (chip->engines[i].mode == LP5521_CMD_LOAD)
+ sysfs_remove_group(&dev->kobj,
+ chip->engines[i].attributes);
+ }
+
+ for (i = 0; i < chip->num_leds; i++)
+ sysfs_remove_group(&chip->leds[i].cdev.dev->kobj,
+ &lp5521_led_attribute_group);
+}
+
+static int __init lp5521_init_led(struct lp5521_led *led,
+ struct i2c_client *client,
+ int chan, struct lp5521_platform_data *pdata)
+{
+ struct device *dev = &client->dev;
+ char name[32];
+ int res;
+
+ if (chan >= LP5521_MAX_LEDS)
+ return -EINVAL;
+
+ if (pdata->led_config[chan].led_current == 0)
+ return 0;
+
+ led->led_current = pdata->led_config[chan].led_current;
+ led->max_current = pdata->led_config[chan].max_current;
+ led->chan_nr = pdata->led_config[chan].chan_nr;
+
+ if (led->chan_nr >= LP5521_MAX_LEDS) {
+ dev_err(dev, "Use channel numbers between 0 and %d\n",
+ LP5521_MAX_LEDS - 1);
+ return -EINVAL;
+ }
+
+ snprintf(name, sizeof(name), "%s:channel%d", client->name, chan);
+ led->cdev.brightness_set = lp5521_set_brightness;
+ led->cdev.name = name;
+ res = led_classdev_register(dev, &led->cdev);
+ if (res < 0) {
+ dev_err(dev, "couldn't register led on channel %d\n", chan);
+ return res;
+ }
+
+ res = sysfs_create_group(&led->cdev.dev->kobj,
+ &lp5521_led_attribute_group);
+ if (res < 0) {
+ dev_err(dev, "couldn't register current attribute\n");
+ led_classdev_unregister(&led->cdev);
+ return res;
+ }
+ return 0;
+}
+
+static int lp5521_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct lp5521_chip *chip;
+ struct lp5521_platform_data *pdata;
+ int ret, i, led;
+
+ chip = kzalloc(sizeof(*chip), GFP_KERNEL);
+ if (!chip)
+ return -ENOMEM;
+
+ i2c_set_clientdata(client, chip);
+ chip->client = client;
+
+ pdata = client->dev.platform_data;
+
+ if (!pdata) {
+ dev_err(&client->dev, "no platform data\n");
+ ret = -EINVAL;
+ goto fail1;
+ }
+
+ mutex_init(&chip->lock);
+
+ chip->pdata = pdata;
+
+ if (pdata->setup_resources) {
+ ret = pdata->setup_resources();
+ if (ret < 0)
+ goto fail1;
+ }
+
+ if (pdata->enable) {
+ pdata->enable(0);
+ usleep_range(1000, 2000); /* Keep enable down at least 1ms */
+ pdata->enable(1);
+ usleep_range(1000, 2000); /* 500us abs min. */
+ }
+
+ lp5521_write(client, LP5521_REG_RESET, 0xff);
+ usleep_range(10000, 20000); /*
+ * Exact value is not available. 10 - 20ms
+ * appears to be enough for reset.
+ */
+ ret = lp5521_detect(client);
+
+ if (ret) {
+ dev_err(&client->dev, "Chip not found\n");
+ goto fail2;
+ }
+
+ dev_info(&client->dev, "%s programmable led chip found\n", id->name);
+
+ ret = lp5521_configure(client, lp5521_engine_group);
+ if (ret < 0) {
+ dev_err(&client->dev, "error configuring chip\n");
+ goto fail2;
+ }
+
+ /* Initialize leds */
+ chip->num_channels = pdata->num_channels;
+ chip->num_leds = 0;
+ led = 0;
+ for (i = 0; i < pdata->num_channels; i++) {
+ /* Do not initialize channels that are not connected */
+ if (pdata->led_config[i].led_current == 0)
+ continue;
+
+ ret = lp5521_init_led(&chip->leds[led], client, i, pdata);
+ if (ret) {
+ dev_err(&client->dev, "error initializing leds\n");
+ goto fail3;
+ }
+ chip->num_leds++;
+
+ chip->leds[led].id = led;
+ /* Set initial LED current */
+ lp5521_set_led_current(chip, led,
+ chip->leds[led].led_current);
+
+ INIT_WORK(&(chip->leds[led].brightness_work),
+ lp5521_led_brightness_work);
+
+ led++;
+ }
+
+ ret = lp5521_register_sysfs(client);
+ if (ret) {
+ dev_err(&client->dev, "registering sysfs failed\n");
+ goto fail3;
+ }
+ return ret;
+fail3:
+ for (i = 0; i < chip->num_leds; i++) {
+ led_classdev_unregister(&chip->leds[i].cdev);
+ cancel_work_sync(&chip->leds[i].brightness_work);
+ }
+fail2:
+ if (pdata->enable)
+ pdata->enable(0);
+ if (pdata->release_resources)
+ pdata->release_resources();
+fail1:
+ kfree(chip);
+ return ret;
+}
+
+static int lp5521_remove(struct i2c_client *client)
+{
+ struct lp5521_chip *chip = i2c_get_clientdata(client);
+ int i;
+
+ lp5521_unregister_sysfs(client);
+
+ for (i = 0; i < chip->num_leds; i++) {
+ led_classdev_unregister(&chip->leds[i].cdev);
+ cancel_work_sync(&chip->leds[i].brightness_work);
+ }
+
+ if (chip->pdata->enable)
+ chip->pdata->enable(0);
+ if (chip->pdata->release_resources)
+ chip->pdata->release_resources();
+ kfree(chip);
+ return 0;
+}
+
+static const struct i2c_device_id lp5521_id[] = {
+ { "lp5521", 0 }, /* Three channel chip */
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, lp5521_id);
+
+static struct i2c_driver lp5521_driver = {
+ .driver = {
+ .name = "lp5521",
+ },
+ .probe = lp5521_probe,
+ .remove = lp5521_remove,
+ .id_table = lp5521_id,
+};
+
+static int __init lp5521_init(void)
+{
+ int ret;
+
+ ret = i2c_add_driver(&lp5521_driver);
+
+ if (ret < 0)
+ printk(KERN_ALERT "Adding lp5521 driver failed\n");
+
+ return ret;
+}
+
+static void __exit lp5521_exit(void)
+{
+ i2c_del_driver(&lp5521_driver);
+}
+
+module_init(lp5521_init);
+module_exit(lp5521_exit);
+
+MODULE_AUTHOR("Mathias Nyman, Yuri Zaporozhets, Samu Onkalo");
+MODULE_DESCRIPTION("LP5521 LED engine");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/leds/leds-lp5523.c b/drivers/leds/leds-lp5523.c
new file mode 100644
index 000000000000..0cc4ead2fd8b
--- /dev/null
+++ b/drivers/leds/leds-lp5523.c
@@ -0,0 +1,1069 @@
+/*
+ * lp5523.c - LP5523 LED Driver
+ *
+ * Copyright (C) 2010 Nokia Corporation
+ *
+ * Contact: Samu Onkalo <samu.p.onkalo@nokia.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/i2c.h>
+#include <linux/mutex.h>
+#include <linux/gpio.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/ctype.h>
+#include <linux/spinlock.h>
+#include <linux/wait.h>
+#include <linux/leds.h>
+#include <linux/leds-lp5523.h>
+#include <linux/workqueue.h>
+#include <linux/slab.h>
+
+#define LP5523_REG_ENABLE 0x00
+#define LP5523_REG_OP_MODE 0x01
+#define LP5523_REG_RATIOMETRIC_MSB 0x02
+#define LP5523_REG_RATIOMETRIC_LSB 0x03
+#define LP5523_REG_ENABLE_LEDS_MSB 0x04
+#define LP5523_REG_ENABLE_LEDS_LSB 0x05
+#define LP5523_REG_LED_CNTRL_BASE 0x06
+#define LP5523_REG_LED_PWM_BASE 0x16
+#define LP5523_REG_LED_CURRENT_BASE 0x26
+#define LP5523_REG_CONFIG 0x36
+#define LP5523_REG_CHANNEL1_PC 0x37
+#define LP5523_REG_CHANNEL2_PC 0x38
+#define LP5523_REG_CHANNEL3_PC 0x39
+#define LP5523_REG_STATUS 0x3a
+#define LP5523_REG_GPO 0x3b
+#define LP5523_REG_VARIABLE 0x3c
+#define LP5523_REG_RESET 0x3d
+#define LP5523_REG_TEMP_CTRL 0x3e
+#define LP5523_REG_TEMP_READ 0x3f
+#define LP5523_REG_TEMP_WRITE 0x40
+#define LP5523_REG_LED_TEST_CTRL 0x41
+#define LP5523_REG_LED_TEST_ADC 0x42
+#define LP5523_REG_ENG1_VARIABLE 0x45
+#define LP5523_REG_ENG2_VARIABLE 0x46
+#define LP5523_REG_ENG3_VARIABLE 0x47
+#define LP5523_REG_MASTER_FADER1 0x48
+#define LP5523_REG_MASTER_FADER2 0x49
+#define LP5523_REG_MASTER_FADER3 0x4a
+#define LP5523_REG_CH1_PROG_START 0x4c
+#define LP5523_REG_CH2_PROG_START 0x4d
+#define LP5523_REG_CH3_PROG_START 0x4e
+#define LP5523_REG_PROG_PAGE_SEL 0x4f
+#define LP5523_REG_PROG_MEM 0x50
+
+#define LP5523_CMD_LOAD 0x15 /* 00010101 */
+#define LP5523_CMD_RUN 0x2a /* 00101010 */
+#define LP5523_CMD_DISABLED 0x00 /* 00000000 */
+
+#define LP5523_ENABLE 0x40
+#define LP5523_AUTO_INC 0x40
+#define LP5523_PWR_SAVE 0x20
+#define LP5523_PWM_PWR_SAVE 0x04
+#define LP5523_CP_1 0x08
+#define LP5523_CP_1_5 0x10
+#define LP5523_CP_AUTO 0x18
+#define LP5523_INT_CLK 0x01
+#define LP5523_AUTO_CLK 0x02
+#define LP5523_EN_LEDTEST 0x80
+#define LP5523_LEDTEST_DONE 0x80
+
+#define LP5523_DEFAULT_CURRENT 50 /* microAmps */
+#define LP5523_PROGRAM_LENGTH 32 /* in bytes */
+#define LP5523_PROGRAM_PAGES 6
+#define LP5523_ADC_SHORTCIRC_LIM 80
+
+#define LP5523_LEDS 9
+#define LP5523_ENGINES 3
+
+#define LP5523_ENG_MASK_BASE 0x30 /* 00110000 */
+
+#define LP5523_ENG_STATUS_MASK 0x07 /* 00000111 */
+
+#define LP5523_IRQ_FLAGS IRQF_TRIGGER_FALLING
+
+#define LP5523_EXT_CLK_USED 0x08
+
+#define LED_ACTIVE(mux, led) (!!(mux & (0x0001 << led)))
+#define SHIFT_MASK(id) (((id) - 1) * 2)
+
+struct lp5523_engine {
+ const struct attribute_group *attributes;
+ int id;
+ u8 mode;
+ u8 prog_page;
+ u8 mux_page;
+ u16 led_mux;
+ u8 engine_mask;
+};
+
+struct lp5523_led {
+ int id;
+ u8 chan_nr;
+ u8 led_current;
+ u8 max_current;
+ struct led_classdev cdev;
+ struct work_struct brightness_work;
+ u8 brightness;
+};
+
+struct lp5523_chip {
+ struct mutex lock; /* Serialize control */
+ struct i2c_client *client;
+ struct lp5523_engine engines[LP5523_ENGINES];
+ struct lp5523_led leds[LP5523_LEDS];
+ struct lp5523_platform_data *pdata;
+ u8 num_channels;
+ u8 num_leds;
+};
+
+static inline struct lp5523_led *cdev_to_led(struct led_classdev *cdev)
+{
+ return container_of(cdev, struct lp5523_led, cdev);
+}
+
+static inline struct lp5523_chip *engine_to_lp5523(struct lp5523_engine *engine)
+{
+ return container_of(engine, struct lp5523_chip,
+ engines[engine->id - 1]);
+}
+
+static inline struct lp5523_chip *led_to_lp5523(struct lp5523_led *led)
+{
+ return container_of(led, struct lp5523_chip,
+ leds[led->id]);
+}
+
+static int lp5523_set_mode(struct lp5523_engine *engine, u8 mode);
+static int lp5523_set_engine_mode(struct lp5523_engine *engine, u8 mode);
+static int lp5523_load_program(struct lp5523_engine *engine, u8 *pattern);
+
+static void lp5523_led_brightness_work(struct work_struct *work);
+
+static int lp5523_write(struct i2c_client *client, u8 reg, u8 value)
+{
+ return i2c_smbus_write_byte_data(client, reg, value);
+}
+
+static int lp5523_read(struct i2c_client *client, u8 reg, u8 *buf)
+{
+ s32 ret = i2c_smbus_read_byte_data(client, reg);
+
+ if (ret < 0)
+ return -EIO;
+
+ *buf = ret;
+ return 0;
+}
+
+static int lp5523_detect(struct i2c_client *client)
+{
+ int ret;
+ u8 buf;
+
+ ret = lp5523_write(client, LP5523_REG_ENABLE, 0x40);
+ if (ret)
+ return ret;
+ ret = lp5523_read(client, LP5523_REG_ENABLE, &buf);
+ if (ret)
+ return ret;
+ if (buf == 0x40)
+ return 0;
+ else
+ return -ENODEV;
+}
+
+static int lp5523_configure(struct i2c_client *client)
+{
+ struct lp5523_chip *chip = i2c_get_clientdata(client);
+ int ret = 0;
+ u8 status;
+
+ /* one pattern per engine setting led mux start and stop addresses */
+ u8 pattern[][LP5523_PROGRAM_LENGTH] = {
+ { 0x9c, 0x30, 0x9c, 0xb0, 0x9d, 0x80, 0xd8, 0x00, 0},
+ { 0x9c, 0x40, 0x9c, 0xc0, 0x9d, 0x80, 0xd8, 0x00, 0},
+ { 0x9c, 0x50, 0x9c, 0xd0, 0x9d, 0x80, 0xd8, 0x00, 0},
+ };
+
+ ret |= lp5523_write(client, LP5523_REG_ENABLE, LP5523_ENABLE);
+ /* Chip startup time is 500 us, 1 - 2 ms gives some margin */
+ usleep_range(1000, 2000);
+
+ ret |= lp5523_write(client, LP5523_REG_CONFIG,
+ LP5523_AUTO_INC | LP5523_PWR_SAVE |
+ LP5523_CP_AUTO | LP5523_AUTO_CLK |
+ LP5523_PWM_PWR_SAVE);
+
+ /* turn on all leds */
+ ret |= lp5523_write(client, LP5523_REG_ENABLE_LEDS_MSB, 0x01);
+ ret |= lp5523_write(client, LP5523_REG_ENABLE_LEDS_LSB, 0xff);
+
+ /* hardcode 32 bytes of memory for each engine from program memory */
+ ret |= lp5523_write(client, LP5523_REG_CH1_PROG_START, 0x00);
+ ret |= lp5523_write(client, LP5523_REG_CH2_PROG_START, 0x10);
+ ret |= lp5523_write(client, LP5523_REG_CH3_PROG_START, 0x20);
+
+ /* write led mux address space for each channel */
+ ret |= lp5523_load_program(&chip->engines[0], pattern[0]);
+ ret |= lp5523_load_program(&chip->engines[1], pattern[1]);
+ ret |= lp5523_load_program(&chip->engines[2], pattern[2]);
+
+ if (ret) {
+ dev_err(&client->dev, "could not load mux programs\n");
+ return -1;
+ }
+
+ /* set all engines exec state and mode to run 00101010 */
+ ret |= lp5523_write(client, LP5523_REG_ENABLE,
+ (LP5523_CMD_RUN | LP5523_ENABLE));
+
+ ret |= lp5523_write(client, LP5523_REG_OP_MODE, LP5523_CMD_RUN);
+
+ if (ret) {
+ dev_err(&client->dev, "could not start mux programs\n");
+ return -1;
+ }
+
+ /* Let the programs run for couple of ms and check the engine status */
+ usleep_range(3000, 6000);
+ lp5523_read(client, LP5523_REG_STATUS, &status);
+ status &= LP5523_ENG_STATUS_MASK;
+
+ if (status == LP5523_ENG_STATUS_MASK) {
+ dev_dbg(&client->dev, "all engines configured\n");
+ } else {
+ dev_info(&client->dev, "status == %x\n", status);
+ dev_err(&client->dev, "cound not configure LED engine\n");
+ return -1;
+ }
+
+ dev_info(&client->dev, "disabling engines\n");
+
+ ret |= lp5523_write(client, LP5523_REG_OP_MODE, LP5523_CMD_DISABLED);
+
+ return ret;
+}
+
+static int lp5523_set_engine_mode(struct lp5523_engine *engine, u8 mode)
+{
+ struct lp5523_chip *chip = engine_to_lp5523(engine);
+ struct i2c_client *client = chip->client;
+ int ret;
+ u8 engine_state;
+
+ ret = lp5523_read(client, LP5523_REG_OP_MODE, &engine_state);
+ if (ret)
+ goto fail;
+
+ engine_state &= ~(engine->engine_mask);
+
+ /* set mode only for this engine */
+ mode &= engine->engine_mask;
+
+ engine_state |= mode;
+
+ ret |= lp5523_write(client, LP5523_REG_OP_MODE, engine_state);
+fail:
+ return ret;
+}
+
+static int lp5523_load_mux(struct lp5523_engine *engine, u16 mux)
+{
+ struct lp5523_chip *chip = engine_to_lp5523(engine);
+ struct i2c_client *client = chip->client;
+ int ret = 0;
+
+ ret |= lp5523_set_engine_mode(engine, LP5523_CMD_LOAD);
+
+ ret |= lp5523_write(client, LP5523_REG_PROG_PAGE_SEL, engine->mux_page);
+ ret |= lp5523_write(client, LP5523_REG_PROG_MEM,
+ (u8)(mux >> 8));
+ ret |= lp5523_write(client, LP5523_REG_PROG_MEM + 1, (u8)(mux));
+ engine->led_mux = mux;
+
+ return ret;
+}
+
+static int lp5523_load_program(struct lp5523_engine *engine, u8 *pattern)
+{
+ struct lp5523_chip *chip = engine_to_lp5523(engine);
+ struct i2c_client *client = chip->client;
+
+ int ret = 0;
+
+ ret |= lp5523_set_engine_mode(engine, LP5523_CMD_LOAD);
+
+ ret |= lp5523_write(client, LP5523_REG_PROG_PAGE_SEL,
+ engine->prog_page);
+ ret |= i2c_smbus_write_i2c_block_data(client, LP5523_REG_PROG_MEM,
+ LP5523_PROGRAM_LENGTH, pattern);
+
+ return ret;
+}
+
+static int lp5523_run_program(struct lp5523_engine *engine)
+{
+ struct lp5523_chip *chip = engine_to_lp5523(engine);
+ struct i2c_client *client = chip->client;
+ int ret;
+
+ ret = lp5523_write(client, LP5523_REG_ENABLE,
+ LP5523_CMD_RUN | LP5523_ENABLE);
+ if (ret)
+ goto fail;
+
+ ret = lp5523_set_engine_mode(engine, LP5523_CMD_RUN);
+fail:
+ return ret;
+}
+
+static int lp5523_mux_parse(const char *buf, u16 *mux, size_t len)
+{
+ int i;
+ u16 tmp_mux = 0;
+ len = len < LP5523_LEDS ? len : LP5523_LEDS;
+ for (i = 0; i < len; i++) {
+ switch (buf[i]) {
+ case '1':
+ tmp_mux |= (1 << i);
+ break;
+ case '0':
+ break;
+ case '\n':
+ i = len;
+ break;
+ default:
+ return -1;
+ }
+ }
+ *mux = tmp_mux;
+
+ return 0;
+}
+
+static void lp5523_mux_to_array(u16 led_mux, char *array)
+{
+ int i, pos = 0;
+ for (i = 0; i < LP5523_LEDS; i++)
+ pos += sprintf(array + pos, "%x", LED_ACTIVE(led_mux, i));
+
+ array[pos] = '\0';
+}
+
+/*--------------------------------------------------------------*/
+/* Sysfs interface */
+/*--------------------------------------------------------------*/
+
+static ssize_t show_engine_leds(struct device *dev,
+ struct device_attribute *attr,
+ char *buf, int nr)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct lp5523_chip *chip = i2c_get_clientdata(client);
+ char mux[LP5523_LEDS + 1];
+
+ lp5523_mux_to_array(chip->engines[nr - 1].led_mux, mux);
+
+ return sprintf(buf, "%s\n", mux);
+}
+
+#define show_leds(nr) \
+static ssize_t show_engine##nr##_leds(struct device *dev, \
+ struct device_attribute *attr, \
+ char *buf) \
+{ \
+ return show_engine_leds(dev, attr, buf, nr); \
+}
+show_leds(1)
+show_leds(2)
+show_leds(3)
+
+static ssize_t store_engine_leds(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t len, int nr)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct lp5523_chip *chip = i2c_get_clientdata(client);
+ u16 mux = 0;
+
+ if (lp5523_mux_parse(buf, &mux, len))
+ return -EINVAL;
+
+ if (lp5523_load_mux(&chip->engines[nr - 1], mux))
+ return -EINVAL;
+
+ return len;
+}
+
+#define store_leds(nr) \
+static ssize_t store_engine##nr##_leds(struct device *dev, \
+ struct device_attribute *attr, \
+ const char *buf, size_t len) \
+{ \
+ return store_engine_leds(dev, attr, buf, len, nr); \
+}
+store_leds(1)
+store_leds(2)
+store_leds(3)
+
+static ssize_t lp5523_selftest(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct lp5523_chip *chip = i2c_get_clientdata(client);
+ int i, ret, pos = 0;
+ int led = 0;
+ u8 status, adc, vdd;
+
+ mutex_lock(&chip->lock);
+
+ ret = lp5523_read(chip->client, LP5523_REG_STATUS, &status);
+ if (ret < 0)
+ goto fail;
+
+ /* Check that ext clock is really in use if requested */
+ if ((chip->pdata) && (chip->pdata->clock_mode == LP5523_CLOCK_EXT))
+ if ((status & LP5523_EXT_CLK_USED) == 0)
+ goto fail;
+
+ /* Measure VDD (i.e. VBAT) first (channel 16 corresponds to VDD) */
+ lp5523_write(chip->client, LP5523_REG_LED_TEST_CTRL,
+ LP5523_EN_LEDTEST | 16);
+ usleep_range(3000, 6000); /* ADC conversion time is typically 2.7 ms */
+ ret = lp5523_read(chip->client, LP5523_REG_STATUS, &status);
+ if (!(status & LP5523_LEDTEST_DONE))
+ usleep_range(3000, 6000); /* Was not ready. Wait little bit */
+
+ ret |= lp5523_read(chip->client, LP5523_REG_LED_TEST_ADC, &vdd);
+ vdd--; /* There may be some fluctuation in measurement */
+
+ for (i = 0; i < LP5523_LEDS; i++) {
+ /* Skip non-existing channels */
+ if (chip->pdata->led_config[i].led_current == 0)
+ continue;
+
+ /* Set default current */
+ lp5523_write(chip->client,
+ LP5523_REG_LED_CURRENT_BASE + i,
+ chip->pdata->led_config[i].led_current);
+
+ lp5523_write(chip->client, LP5523_REG_LED_PWM_BASE + i, 0xff);
+ /* let current stabilize 2 - 4ms before measurements start */
+ usleep_range(2000, 4000);
+ lp5523_write(chip->client,
+ LP5523_REG_LED_TEST_CTRL,
+ LP5523_EN_LEDTEST | i);
+ /* ADC conversion time is 2.7 ms typically */
+ usleep_range(3000, 6000);
+ ret = lp5523_read(chip->client, LP5523_REG_STATUS, &status);
+ if (!(status & LP5523_LEDTEST_DONE))
+ usleep_range(3000, 6000);/* Was not ready. Wait. */
+ ret |= lp5523_read(chip->client, LP5523_REG_LED_TEST_ADC, &adc);
+
+ if (adc >= vdd || adc < LP5523_ADC_SHORTCIRC_LIM)
+ pos += sprintf(buf + pos, "LED %d FAIL\n", i);
+
+ lp5523_write(chip->client, LP5523_REG_LED_PWM_BASE + i, 0x00);
+
+ /* Restore current */
+ lp5523_write(chip->client,
+ LP5523_REG_LED_CURRENT_BASE + i,
+ chip->leds[led].led_current);
+ led++;
+ }
+ if (pos == 0)
+ pos = sprintf(buf, "OK\n");
+ goto release_lock;
+fail:
+ pos = sprintf(buf, "FAIL\n");
+
+release_lock:
+ mutex_unlock(&chip->lock);
+
+ return pos;
+}
+
+static void lp5523_set_brightness(struct led_classdev *cdev,
+ enum led_brightness brightness)
+{
+ struct lp5523_led *led = cdev_to_led(cdev);
+
+ led->brightness = (u8)brightness;
+
+ schedule_work(&led->brightness_work);
+}
+
+static void lp5523_led_brightness_work(struct work_struct *work)
+{
+ struct lp5523_led *led = container_of(work,
+ struct lp5523_led,
+ brightness_work);
+ struct lp5523_chip *chip = led_to_lp5523(led);
+ struct i2c_client *client = chip->client;
+
+ mutex_lock(&chip->lock);
+
+ lp5523_write(client, LP5523_REG_LED_PWM_BASE + led->chan_nr,
+ led->brightness);
+
+ mutex_unlock(&chip->lock);
+}
+
+static int lp5523_do_store_load(struct lp5523_engine *engine,
+ const char *buf, size_t len)
+{
+ struct lp5523_chip *chip = engine_to_lp5523(engine);
+ struct i2c_client *client = chip->client;
+ int ret, nrchars, offset = 0, i = 0;
+ char c[3];
+ unsigned cmd;
+ u8 pattern[LP5523_PROGRAM_LENGTH] = {0};
+
+ while ((offset < len - 1) && (i < LP5523_PROGRAM_LENGTH)) {
+ /* separate sscanfs because length is working only for %s */
+ ret = sscanf(buf + offset, "%2s%n ", c, &nrchars);
+ ret = sscanf(c, "%2x", &cmd);
+ if (ret != 1)
+ goto fail;
+ pattern[i] = (u8)cmd;
+
+ offset += nrchars;
+ i++;
+ }
+
+ /* Each instruction is 16bit long. Check that length is even */
+ if (i % 2)
+ goto fail;
+
+ mutex_lock(&chip->lock);
+
+ ret = lp5523_load_program(engine, pattern);
+ mutex_unlock(&chip->lock);
+
+ if (ret) {
+ dev_err(&client->dev, "failed loading pattern\n");
+ return ret;
+ }
+
+ return len;
+fail:
+ dev_err(&client->dev, "wrong pattern format\n");
+ return -EINVAL;
+}
+
+static ssize_t store_engine_load(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t len, int nr)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct lp5523_chip *chip = i2c_get_clientdata(client);
+ return lp5523_do_store_load(&chip->engines[nr - 1], buf, len);
+}
+
+#define store_load(nr) \
+static ssize_t store_engine##nr##_load(struct device *dev, \
+ struct device_attribute *attr, \
+ const char *buf, size_t len) \
+{ \
+ return store_engine_load(dev, attr, buf, len, nr); \
+}
+store_load(1)
+store_load(2)
+store_load(3)
+
+static ssize_t show_engine_mode(struct device *dev,
+ struct device_attribute *attr,
+ char *buf, int nr)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct lp5523_chip *chip = i2c_get_clientdata(client);
+ switch (chip->engines[nr - 1].mode) {
+ case LP5523_CMD_RUN:
+ return sprintf(buf, "run\n");
+ case LP5523_CMD_LOAD:
+ return sprintf(buf, "load\n");
+ case LP5523_CMD_DISABLED:
+ return sprintf(buf, "disabled\n");
+ default:
+ return sprintf(buf, "disabled\n");
+ }
+}
+
+#define show_mode(nr) \
+static ssize_t show_engine##nr##_mode(struct device *dev, \
+ struct device_attribute *attr, \
+ char *buf) \
+{ \
+ return show_engine_mode(dev, attr, buf, nr); \
+}
+show_mode(1)
+show_mode(2)
+show_mode(3)
+
+static ssize_t store_engine_mode(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t len, int nr)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct lp5523_chip *chip = i2c_get_clientdata(client);
+ struct lp5523_engine *engine = &chip->engines[nr - 1];
+ mutex_lock(&chip->lock);
+
+ if (!strncmp(buf, "run", 3))
+ lp5523_set_mode(engine, LP5523_CMD_RUN);
+ else if (!strncmp(buf, "load", 4))
+ lp5523_set_mode(engine, LP5523_CMD_LOAD);
+ else if (!strncmp(buf, "disabled", 8))
+ lp5523_set_mode(engine, LP5523_CMD_DISABLED);
+
+ mutex_unlock(&chip->lock);
+ return len;
+}
+
+#define store_mode(nr) \
+static ssize_t store_engine##nr##_mode(struct device *dev, \
+ struct device_attribute *attr, \
+ const char *buf, size_t len) \
+{ \
+ return store_engine_mode(dev, attr, buf, len, nr); \
+}
+store_mode(1)
+store_mode(2)
+store_mode(3)
+
+static ssize_t show_max_current(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct led_classdev *led_cdev = dev_get_drvdata(dev);
+ struct lp5523_led *led = cdev_to_led(led_cdev);
+
+ return sprintf(buf, "%d\n", led->max_current);
+}
+
+static ssize_t show_current(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct led_classdev *led_cdev = dev_get_drvdata(dev);
+ struct lp5523_led *led = cdev_to_led(led_cdev);
+
+ return sprintf(buf, "%d\n", led->led_current);
+}
+
+static ssize_t store_current(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t len)
+{
+ struct led_classdev *led_cdev = dev_get_drvdata(dev);
+ struct lp5523_led *led = cdev_to_led(led_cdev);
+ struct lp5523_chip *chip = led_to_lp5523(led);
+ ssize_t ret;
+ unsigned long curr;
+
+ if (strict_strtoul(buf, 0, &curr))
+ return -EINVAL;
+
+ if (curr > led->max_current)
+ return -EINVAL;
+
+ mutex_lock(&chip->lock);
+ ret = lp5523_write(chip->client,
+ LP5523_REG_LED_CURRENT_BASE + led->chan_nr,
+ (u8)curr);
+ mutex_unlock(&chip->lock);
+
+ if (ret < 0)
+ return ret;
+
+ led->led_current = (u8)curr;
+
+ return len;
+}
+
+/* led class device attributes */
+static DEVICE_ATTR(led_current, S_IRUGO | S_IWUGO, show_current, store_current);
+static DEVICE_ATTR(max_current, S_IRUGO , show_max_current, NULL);
+
+static struct attribute *lp5523_led_attributes[] = {
+ &dev_attr_led_current.attr,
+ &dev_attr_max_current.attr,
+ NULL,
+};
+
+static struct attribute_group lp5523_led_attribute_group = {
+ .attrs = lp5523_led_attributes
+};
+
+/* device attributes */
+static DEVICE_ATTR(engine1_mode, S_IRUGO | S_IWUGO,
+ show_engine1_mode, store_engine1_mode);
+static DEVICE_ATTR(engine2_mode, S_IRUGO | S_IWUGO,
+ show_engine2_mode, store_engine2_mode);
+static DEVICE_ATTR(engine3_mode, S_IRUGO | S_IWUGO,
+ show_engine3_mode, store_engine3_mode);
+static DEVICE_ATTR(engine1_leds, S_IRUGO | S_IWUGO,
+ show_engine1_leds, store_engine1_leds);
+static DEVICE_ATTR(engine2_leds, S_IRUGO | S_IWUGO,
+ show_engine2_leds, store_engine2_leds);
+static DEVICE_ATTR(engine3_leds, S_IRUGO | S_IWUGO,
+ show_engine3_leds, store_engine3_leds);
+static DEVICE_ATTR(engine1_load, S_IWUGO, NULL, store_engine1_load);
+static DEVICE_ATTR(engine2_load, S_IWUGO, NULL, store_engine2_load);
+static DEVICE_ATTR(engine3_load, S_IWUGO, NULL, store_engine3_load);
+static DEVICE_ATTR(selftest, S_IRUGO, lp5523_selftest, NULL);
+
+static struct attribute *lp5523_attributes[] = {
+ &dev_attr_engine1_mode.attr,
+ &dev_attr_engine2_mode.attr,
+ &dev_attr_engine3_mode.attr,
+ &dev_attr_selftest.attr,
+ NULL
+};
+
+static struct attribute *lp5523_engine1_attributes[] = {
+ &dev_attr_engine1_load.attr,
+ &dev_attr_engine1_leds.attr,
+ NULL
+};
+
+static struct attribute *lp5523_engine2_attributes[] = {
+ &dev_attr_engine2_load.attr,
+ &dev_attr_engine2_leds.attr,
+ NULL
+};
+
+static struct attribute *lp5523_engine3_attributes[] = {
+ &dev_attr_engine3_load.attr,
+ &dev_attr_engine3_leds.attr,
+ NULL
+};
+
+static const struct attribute_group lp5523_group = {
+ .attrs = lp5523_attributes,
+};
+
+static const struct attribute_group lp5523_engine_group[] = {
+ {.attrs = lp5523_engine1_attributes },
+ {.attrs = lp5523_engine2_attributes },
+ {.attrs = lp5523_engine3_attributes },
+};
+
+static int lp5523_register_sysfs(struct i2c_client *client)
+{
+ struct device *dev = &client->dev;
+ int ret;
+
+ ret = sysfs_create_group(&dev->kobj, &lp5523_group);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+static void lp5523_unregister_sysfs(struct i2c_client *client)
+{
+ struct lp5523_chip *chip = i2c_get_clientdata(client);
+ struct device *dev = &client->dev;
+ int i;
+
+ sysfs_remove_group(&dev->kobj, &lp5523_group);
+
+ for (i = 0; i < ARRAY_SIZE(chip->engines); i++)
+ if (chip->engines[i].mode == LP5523_CMD_LOAD)
+ sysfs_remove_group(&dev->kobj, &lp5523_engine_group[i]);
+
+ for (i = 0; i < chip->num_leds; i++)
+ sysfs_remove_group(&chip->leds[i].cdev.dev->kobj,
+ &lp5523_led_attribute_group);
+}
+
+/*--------------------------------------------------------------*/
+/* Set chip operating mode */
+/*--------------------------------------------------------------*/
+static int lp5523_set_mode(struct lp5523_engine *engine, u8 mode)
+{
+ /* engine to chip */
+ struct lp5523_chip *chip = engine_to_lp5523(engine);
+ struct i2c_client *client = chip->client;
+ struct device *dev = &client->dev;
+ int ret = 0;
+
+ /* if in that mode already do nothing, except for run */
+ if (mode == engine->mode && mode != LP5523_CMD_RUN)
+ return 0;
+
+ if (mode == LP5523_CMD_RUN) {
+ ret = lp5523_run_program(engine);
+ } else if (mode == LP5523_CMD_LOAD) {
+ lp5523_set_engine_mode(engine, LP5523_CMD_DISABLED);
+ lp5523_set_engine_mode(engine, LP5523_CMD_LOAD);
+
+ ret = sysfs_create_group(&dev->kobj, engine->attributes);
+ if (ret)
+ return ret;
+ } else if (mode == LP5523_CMD_DISABLED) {
+ lp5523_set_engine_mode(engine, LP5523_CMD_DISABLED);
+ }
+
+ /* remove load attribute from sysfs if not in load mode */
+ if (engine->mode == LP5523_CMD_LOAD && mode != LP5523_CMD_LOAD)
+ sysfs_remove_group(&dev->kobj, engine->attributes);
+
+ engine->mode = mode;
+
+ return ret;
+}
+
+/*--------------------------------------------------------------*/
+/* Probe, Attach, Remove */
+/*--------------------------------------------------------------*/
+static int __init lp5523_init_engine(struct lp5523_engine *engine, int id)
+{
+ if (id < 1 || id > LP5523_ENGINES)
+ return -1;
+ engine->id = id;
+ engine->engine_mask = LP5523_ENG_MASK_BASE >> SHIFT_MASK(id);
+ engine->prog_page = id - 1;
+ engine->mux_page = id + 2;
+ engine->attributes = &lp5523_engine_group[id - 1];
+
+ return 0;
+}
+
+static int __init lp5523_init_led(struct lp5523_led *led, struct device *dev,
+ int chan, struct lp5523_platform_data *pdata)
+{
+ char name[32];
+ int res;
+
+ if (chan >= LP5523_LEDS)
+ return -EINVAL;
+
+ if (pdata->led_config[chan].led_current) {
+ led->led_current = pdata->led_config[chan].led_current;
+ led->max_current = pdata->led_config[chan].max_current;
+ led->chan_nr = pdata->led_config[chan].chan_nr;
+
+ if (led->chan_nr >= LP5523_LEDS) {
+ dev_err(dev, "Use channel numbers between 0 and %d\n",
+ LP5523_LEDS - 1);
+ return -EINVAL;
+ }
+
+ snprintf(name, 32, "lp5523:channel%d", chan);
+
+ led->cdev.name = name;
+ led->cdev.brightness_set = lp5523_set_brightness;
+ res = led_classdev_register(dev, &led->cdev);
+ if (res < 0) {
+ dev_err(dev, "couldn't register led on channel %d\n",
+ chan);
+ return res;
+ }
+ res = sysfs_create_group(&led->cdev.dev->kobj,
+ &lp5523_led_attribute_group);
+ if (res < 0) {
+ dev_err(dev, "couldn't register current attribute\n");
+ led_classdev_unregister(&led->cdev);
+ return res;
+ }
+ } else {
+ led->led_current = 0;
+ }
+ return 0;
+}
+
+static struct i2c_driver lp5523_driver;
+
+static int lp5523_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct lp5523_chip *chip;
+ struct lp5523_platform_data *pdata;
+ int ret, i, led;
+
+ chip = kzalloc(sizeof(*chip), GFP_KERNEL);
+ if (!chip)
+ return -ENOMEM;
+
+ i2c_set_clientdata(client, chip);
+ chip->client = client;
+
+ pdata = client->dev.platform_data;
+
+ if (!pdata) {
+ dev_err(&client->dev, "no platform data\n");
+ ret = -EINVAL;
+ goto fail1;
+ }
+
+ mutex_init(&chip->lock);
+
+ chip->pdata = pdata;
+
+ if (pdata->setup_resources) {
+ ret = pdata->setup_resources();
+ if (ret < 0)
+ goto fail1;
+ }
+
+ if (pdata->enable) {
+ pdata->enable(0);
+ usleep_range(1000, 2000); /* Keep enable down at least 1ms */
+ pdata->enable(1);
+ usleep_range(1000, 2000); /* 500us abs min. */
+ }
+
+ lp5523_write(client, LP5523_REG_RESET, 0xff);
+ usleep_range(10000, 20000); /*
+ * Exact value is not available. 10 - 20ms
+ * appears to be enough for reset.
+ */
+ ret = lp5523_detect(client);
+ if (ret)
+ goto fail2;
+
+ dev_info(&client->dev, "LP5523 Programmable led chip found\n");
+
+ /* Initialize engines */
+ for (i = 0; i < ARRAY_SIZE(chip->engines); i++) {
+ ret = lp5523_init_engine(&chip->engines[i], i + 1);
+ if (ret) {
+ dev_err(&client->dev, "error initializing engine\n");
+ goto fail2;
+ }
+ }
+ ret = lp5523_configure(client);
+ if (ret < 0) {
+ dev_err(&client->dev, "error configuring chip\n");
+ goto fail2;
+ }
+
+ /* Initialize leds */
+ chip->num_channels = pdata->num_channels;
+ chip->num_leds = 0;
+ led = 0;
+ for (i = 0; i < pdata->num_channels; i++) {
+ /* Do not initialize channels that are not connected */
+ if (pdata->led_config[i].led_current == 0)
+ continue;
+
+ ret = lp5523_init_led(&chip->leds[led], &client->dev, i, pdata);
+ if (ret) {
+ dev_err(&client->dev, "error initializing leds\n");
+ goto fail3;
+ }
+ chip->num_leds++;
+
+ chip->leds[led].id = led;
+ /* Set LED current */
+ lp5523_write(client,
+ LP5523_REG_LED_CURRENT_BASE + chip->leds[led].chan_nr,
+ chip->leds[led].led_current);
+
+ INIT_WORK(&(chip->leds[led].brightness_work),
+ lp5523_led_brightness_work);
+
+ led++;
+ }
+
+ ret = lp5523_register_sysfs(client);
+ if (ret) {
+ dev_err(&client->dev, "registering sysfs failed\n");
+ goto fail3;
+ }
+ return ret;
+fail3:
+ for (i = 0; i < chip->num_leds; i++) {
+ led_classdev_unregister(&chip->leds[i].cdev);
+ cancel_work_sync(&chip->leds[i].brightness_work);
+ }
+fail2:
+ if (pdata->enable)
+ pdata->enable(0);
+ if (pdata->release_resources)
+ pdata->release_resources();
+fail1:
+ kfree(chip);
+ return ret;
+}
+
+static int lp5523_remove(struct i2c_client *client)
+{
+ struct lp5523_chip *chip = i2c_get_clientdata(client);
+ int i;
+
+ lp5523_unregister_sysfs(client);
+
+ for (i = 0; i < chip->num_leds; i++) {
+ led_classdev_unregister(&chip->leds[i].cdev);
+ cancel_work_sync(&chip->leds[i].brightness_work);
+ }
+
+ if (chip->pdata->enable)
+ chip->pdata->enable(0);
+ if (chip->pdata->release_resources)
+ chip->pdata->release_resources();
+ kfree(chip);
+ return 0;
+}
+
+static const struct i2c_device_id lp5523_id[] = {
+ { "lp5523", 0 },
+ { }
+};
+
+MODULE_DEVICE_TABLE(i2c, lp5523_id);
+
+static struct i2c_driver lp5523_driver = {
+ .driver = {
+ .name = "lp5523",
+ },
+ .probe = lp5523_probe,
+ .remove = lp5523_remove,
+ .id_table = lp5523_id,
+};
+
+static int __init lp5523_init(void)
+{
+ int ret;
+
+ ret = i2c_add_driver(&lp5523_driver);
+
+ if (ret < 0)
+ printk(KERN_ALERT "Adding lp5523 driver failed\n");
+
+ return ret;
+}
+
+static void __exit lp5523_exit(void)
+{
+ i2c_del_driver(&lp5523_driver);
+}
+
+module_init(lp5523_init);
+module_exit(lp5523_exit);
+
+MODULE_AUTHOR("Mathias Nyman <mathias.nyman@nokia.com>");
+MODULE_DESCRIPTION("LP5523 LED engine");
+MODULE_LICENSE("GPL");
diff --git a/drivers/leds/leds-net5501.c b/drivers/leds/leds-net5501.c
index 3063f591f0dc..1739557a9038 100644
--- a/drivers/leds/leds-net5501.c
+++ b/drivers/leds/leds-net5501.c
@@ -92,3 +92,5 @@ unmap:
}
arch_initcall(soekris_init);
+
+MODULE_LICENSE("GPL");
diff --git a/drivers/leds/leds-ss4200.c b/drivers/leds/leds-ss4200.c
index a688293abd0b..614ebebaaa28 100644
--- a/drivers/leds/leds-ss4200.c
+++ b/drivers/leds/leds-ss4200.c
@@ -102,6 +102,7 @@ static struct dmi_system_id __initdata nas_led_whitelist[] = {
DMI_MATCH(DMI_PRODUCT_VERSION, "1.00.00")
}
},
+ {}
};
/*
diff --git a/drivers/leds/ledtrig-timer.c b/drivers/leds/ledtrig-timer.c
index 82b77bd482ff..b09bcbeade9c 100644
--- a/drivers/leds/ledtrig-timer.c
+++ b/drivers/leds/ledtrig-timer.c
@@ -12,73 +12,25 @@
*/
#include <linux/module.h>
-#include <linux/jiffies.h>
#include <linux/kernel.h>
#include <linux/init.h>
-#include <linux/list.h>
-#include <linux/spinlock.h>
#include <linux/device.h>
-#include <linux/sysdev.h>
-#include <linux/timer.h>
#include <linux/ctype.h>
#include <linux/leds.h>
-#include <linux/slab.h>
#include "leds.h"
-struct timer_trig_data {
- int brightness_on; /* LED brightness during "on" period.
- * (LED_OFF < brightness_on <= LED_FULL)
- */
- unsigned long delay_on; /* milliseconds on */
- unsigned long delay_off; /* milliseconds off */
- struct timer_list timer;
-};
-
-static void led_timer_function(unsigned long data)
-{
- struct led_classdev *led_cdev = (struct led_classdev *) data;
- struct timer_trig_data *timer_data = led_cdev->trigger_data;
- unsigned long brightness;
- unsigned long delay;
-
- if (!timer_data->delay_on || !timer_data->delay_off) {
- led_set_brightness(led_cdev, LED_OFF);
- return;
- }
-
- brightness = led_get_brightness(led_cdev);
- if (!brightness) {
- /* Time to switch the LED on. */
- brightness = timer_data->brightness_on;
- delay = timer_data->delay_on;
- } else {
- /* Store the current brightness value to be able
- * to restore it when the delay_off period is over.
- */
- timer_data->brightness_on = brightness;
- brightness = LED_OFF;
- delay = timer_data->delay_off;
- }
-
- led_set_brightness(led_cdev, brightness);
-
- mod_timer(&timer_data->timer, jiffies + msecs_to_jiffies(delay));
-}
-
static ssize_t led_delay_on_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct led_classdev *led_cdev = dev_get_drvdata(dev);
- struct timer_trig_data *timer_data = led_cdev->trigger_data;
- return sprintf(buf, "%lu\n", timer_data->delay_on);
+ return sprintf(buf, "%lu\n", led_cdev->blink_delay_on);
}
static ssize_t led_delay_on_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t size)
{
struct led_classdev *led_cdev = dev_get_drvdata(dev);
- struct timer_trig_data *timer_data = led_cdev->trigger_data;
int ret = -EINVAL;
char *after;
unsigned long state = simple_strtoul(buf, &after, 10);
@@ -88,21 +40,7 @@ static ssize_t led_delay_on_store(struct device *dev,
count++;
if (count == size) {
- if (timer_data->delay_on != state) {
- /* the new value differs from the previous */
- timer_data->delay_on = state;
-
- /* deactivate previous settings */
- del_timer_sync(&timer_data->timer);
-
- /* try to activate hardware acceleration, if any */
- if (!led_cdev->blink_set ||
- led_cdev->blink_set(led_cdev,
- &timer_data->delay_on, &timer_data->delay_off)) {
- /* no hardware acceleration, blink via timer */
- mod_timer(&timer_data->timer, jiffies + 1);
- }
- }
+ led_blink_set(led_cdev, &state, &led_cdev->blink_delay_off);
ret = count;
}
@@ -113,16 +51,14 @@ static ssize_t led_delay_off_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct led_classdev *led_cdev = dev_get_drvdata(dev);
- struct timer_trig_data *timer_data = led_cdev->trigger_data;
- return sprintf(buf, "%lu\n", timer_data->delay_off);
+ return sprintf(buf, "%lu\n", led_cdev->blink_delay_off);
}
static ssize_t led_delay_off_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t size)
{
struct led_classdev *led_cdev = dev_get_drvdata(dev);
- struct timer_trig_data *timer_data = led_cdev->trigger_data;
int ret = -EINVAL;
char *after;
unsigned long state = simple_strtoul(buf, &after, 10);
@@ -132,21 +68,7 @@ static ssize_t led_delay_off_store(struct device *dev,
count++;
if (count == size) {
- if (timer_data->delay_off != state) {
- /* the new value differs from the previous */
- timer_data->delay_off = state;
-
- /* deactivate previous settings */
- del_timer_sync(&timer_data->timer);
-
- /* try to activate hardware acceleration, if any */
- if (!led_cdev->blink_set ||
- led_cdev->blink_set(led_cdev,
- &timer_data->delay_on, &timer_data->delay_off)) {
- /* no hardware acceleration, blink via timer */
- mod_timer(&timer_data->timer, jiffies + 1);
- }
- }
+ led_blink_set(led_cdev, &led_cdev->blink_delay_on, &state);
ret = count;
}
@@ -158,60 +80,34 @@ static DEVICE_ATTR(delay_off, 0644, led_delay_off_show, led_delay_off_store);
static void timer_trig_activate(struct led_classdev *led_cdev)
{
- struct timer_trig_data *timer_data;
int rc;
- timer_data = kzalloc(sizeof(struct timer_trig_data), GFP_KERNEL);
- if (!timer_data)
- return;
-
- timer_data->brightness_on = led_get_brightness(led_cdev);
- if (timer_data->brightness_on == LED_OFF)
- timer_data->brightness_on = led_cdev->max_brightness;
- led_cdev->trigger_data = timer_data;
-
- init_timer(&timer_data->timer);
- timer_data->timer.function = led_timer_function;
- timer_data->timer.data = (unsigned long) led_cdev;
+ led_cdev->trigger_data = NULL;
rc = device_create_file(led_cdev->dev, &dev_attr_delay_on);
if (rc)
- goto err_out;
+ return;
rc = device_create_file(led_cdev->dev, &dev_attr_delay_off);
if (rc)
goto err_out_delayon;
- /* If there is hardware support for blinking, start one
- * user friendly blink rate chosen by the driver.
- */
- if (led_cdev->blink_set)
- led_cdev->blink_set(led_cdev,
- &timer_data->delay_on, &timer_data->delay_off);
+ led_cdev->trigger_data = (void *)1;
return;
err_out_delayon:
device_remove_file(led_cdev->dev, &dev_attr_delay_on);
-err_out:
- led_cdev->trigger_data = NULL;
- kfree(timer_data);
}
static void timer_trig_deactivate(struct led_classdev *led_cdev)
{
- struct timer_trig_data *timer_data = led_cdev->trigger_data;
- unsigned long on = 0, off = 0;
-
- if (timer_data) {
+ if (led_cdev->trigger_data) {
device_remove_file(led_cdev->dev, &dev_attr_delay_on);
device_remove_file(led_cdev->dev, &dev_attr_delay_off);
- del_timer_sync(&timer_data->timer);
- kfree(timer_data);
}
- /* If there is hardware support for blinking, stop it */
- if (led_cdev->blink_set)
- led_cdev->blink_set(led_cdev, &on, &off);
+ /* Stop blinking */
+ led_brightness_set(led_cdev, LED_OFF);
}
static struct led_trigger timer_led_trigger = {
diff --git a/drivers/macintosh/Kconfig b/drivers/macintosh/Kconfig
index fd85bde283a0..fa51af11c6f1 100644
--- a/drivers/macintosh/Kconfig
+++ b/drivers/macintosh/Kconfig
@@ -102,6 +102,7 @@ config ADB_PMU_LED
config ADB_PMU_LED_IDE
bool "Use front LED as IDE LED by default"
depends on ADB_PMU_LED
+ depends on LEDS_CLASS
select LEDS_TRIGGERS
select LEDS_TRIGGER_IDE_DISK
help
@@ -256,4 +257,30 @@ config PMAC_RACKMETER
This driver provides some support to control the front panel
blue LEDs "vu-meter" of the XServer macs.
+config SENSORS_AMS
+ tristate "Apple Motion Sensor driver"
+ depends on PPC_PMAC && !PPC64 && INPUT && ((ADB_PMU && I2C = y) || (ADB_PMU && !I2C) || I2C) && EXPERIMENTAL
+ select INPUT_POLLDEV
+ help
+ Support for the motion sensor included in PowerBooks. Includes
+ implementations for PMU and I2C.
+
+ This driver can also be built as a module. If so, the module
+ will be called ams.
+
+config SENSORS_AMS_PMU
+ bool "PMU variant"
+ depends on SENSORS_AMS && ADB_PMU
+ default y
+ help
+ PMU variant of motion sensor, found in late 2005 PowerBooks.
+
+config SENSORS_AMS_I2C
+ bool "I2C variant"
+ depends on SENSORS_AMS && I2C
+ default y
+ help
+ I2C variant of motion sensor, found in early 2005 PowerBooks and
+ iBooks.
+
endif # MACINTOSH_DRIVERS
diff --git a/drivers/macintosh/Makefile b/drivers/macintosh/Makefile
index e3132efa17c0..6652a6ebb6fa 100644
--- a/drivers/macintosh/Makefile
+++ b/drivers/macintosh/Makefile
@@ -48,3 +48,5 @@ obj-$(CONFIG_WINDFARM_PM121) += windfarm_pm121.o windfarm_smu_sat.o \
windfarm_max6690_sensor.o \
windfarm_lm75_sensor.o windfarm_pid.o
obj-$(CONFIG_PMAC_RACKMETER) += rack-meter.o
+
+obj-$(CONFIG_SENSORS_AMS) += ams/
diff --git a/drivers/macintosh/adb-iop.c b/drivers/macintosh/adb-iop.c
index 444696625171..f5f4da3d0b67 100644
--- a/drivers/macintosh/adb-iop.c
+++ b/drivers/macintosh/adb-iop.c
@@ -80,7 +80,7 @@ static void adb_iop_end_req(struct adb_request *req, int state)
static void adb_iop_complete(struct iop_msg *msg)
{
struct adb_request *req;
- uint flags;
+ unsigned long flags;
local_irq_save(flags);
@@ -103,7 +103,7 @@ static void adb_iop_listen(struct iop_msg *msg)
{
struct adb_iopmsg *amsg = (struct adb_iopmsg *) msg->message;
struct adb_request *req;
- uint flags;
+ unsigned long flags;
#ifdef DEBUG_ADB_IOP
int i;
#endif
diff --git a/drivers/hwmon/ams/Makefile b/drivers/macintosh/ams/Makefile
index 41c95b2089dc..41c95b2089dc 100644
--- a/drivers/hwmon/ams/Makefile
+++ b/drivers/macintosh/ams/Makefile
diff --git a/drivers/hwmon/ams/ams-core.c b/drivers/macintosh/ams/ams-core.c
index 2ad62c339cd2..2ad62c339cd2 100644
--- a/drivers/hwmon/ams/ams-core.c
+++ b/drivers/macintosh/ams/ams-core.c
diff --git a/drivers/hwmon/ams/ams-i2c.c b/drivers/macintosh/ams/ams-i2c.c
index abeecd27b484..abeecd27b484 100644
--- a/drivers/hwmon/ams/ams-i2c.c
+++ b/drivers/macintosh/ams/ams-i2c.c
diff --git a/drivers/hwmon/ams/ams-input.c b/drivers/macintosh/ams/ams-input.c
index 8a712392cd38..8a712392cd38 100644
--- a/drivers/hwmon/ams/ams-input.c
+++ b/drivers/macintosh/ams/ams-input.c
diff --git a/drivers/hwmon/ams/ams-pmu.c b/drivers/macintosh/ams/ams-pmu.c
index 4f61b3ee1b08..4f61b3ee1b08 100644
--- a/drivers/hwmon/ams/ams-pmu.c
+++ b/drivers/macintosh/ams/ams-pmu.c
diff --git a/drivers/hwmon/ams/ams.h b/drivers/macintosh/ams/ams.h
index 90f094d45450..90f094d45450 100644
--- a/drivers/hwmon/ams/ams.h
+++ b/drivers/macintosh/ams/ams.h
diff --git a/drivers/macintosh/windfarm_pm121.c b/drivers/macintosh/windfarm_pm121.c
index 947d4afa25ca..30e6195e19d4 100644
--- a/drivers/macintosh/windfarm_pm121.c
+++ b/drivers/macintosh/windfarm_pm121.c
@@ -482,7 +482,7 @@ static s32 pm121_correct(s32 new_setpoint,
new_min += correction->offset;
new_min = (new_min >> 16) + min;
- return max(new_setpoint, max(new_min, 0));
+ return max3(new_setpoint, new_min, 0);
}
static s32 pm121_connect(unsigned int control_id, s32 setpoint)
diff --git a/drivers/md/bitmap.c b/drivers/md/bitmap.c
index e4fb58db5454..5a1ffe3527aa 100644
--- a/drivers/md/bitmap.c
+++ b/drivers/md/bitmap.c
@@ -212,7 +212,7 @@ static struct page *read_sb_page(mddev_t *mddev, loff_t offset,
target = rdev->sb_start + offset + index * (PAGE_SIZE/512);
- if (sync_page_io(rdev->bdev, target,
+ if (sync_page_io(rdev, target,
roundup(size, bdev_logical_block_size(rdev->bdev)),
page, READ)) {
page->index = index;
@@ -343,7 +343,7 @@ static void write_page(struct bitmap *bitmap, struct page *page, int wait)
atomic_inc(&bitmap->pending_writes);
set_buffer_locked(bh);
set_buffer_mapped(bh);
- submit_bh(WRITE, bh);
+ submit_bh(WRITE | REQ_UNPLUG | REQ_SYNC, bh);
bh = bh->b_this_page;
}
@@ -1101,7 +1101,7 @@ static void bitmap_count_page(struct bitmap *bitmap, sector_t offset, int inc)
bitmap_checkfree(bitmap, page);
}
static bitmap_counter_t *bitmap_get_counter(struct bitmap *bitmap,
- sector_t offset, int *blocks,
+ sector_t offset, sector_t *blocks,
int create);
/*
@@ -1115,7 +1115,7 @@ void bitmap_daemon_work(mddev_t *mddev)
unsigned long j;
unsigned long flags;
struct page *page = NULL, *lastpage = NULL;
- int blocks;
+ sector_t blocks;
void *paddr;
struct dm_dirty_log *log = mddev->bitmap_info.log;
@@ -1258,7 +1258,7 @@ void bitmap_daemon_work(mddev_t *mddev)
}
static bitmap_counter_t *bitmap_get_counter(struct bitmap *bitmap,
- sector_t offset, int *blocks,
+ sector_t offset, sector_t *blocks,
int create)
__releases(bitmap->lock)
__acquires(bitmap->lock)
@@ -1316,7 +1316,7 @@ int bitmap_startwrite(struct bitmap *bitmap, sector_t offset, unsigned long sect
}
while (sectors) {
- int blocks;
+ sector_t blocks;
bitmap_counter_t *bmc;
spin_lock_irq(&bitmap->lock);
@@ -1381,7 +1381,7 @@ void bitmap_endwrite(struct bitmap *bitmap, sector_t offset, unsigned long secto
success = 0;
while (sectors) {
- int blocks;
+ sector_t blocks;
unsigned long flags;
bitmap_counter_t *bmc;
@@ -1423,7 +1423,7 @@ void bitmap_endwrite(struct bitmap *bitmap, sector_t offset, unsigned long secto
}
EXPORT_SYMBOL(bitmap_endwrite);
-static int __bitmap_start_sync(struct bitmap *bitmap, sector_t offset, int *blocks,
+static int __bitmap_start_sync(struct bitmap *bitmap, sector_t offset, sector_t *blocks,
int degraded)
{
bitmap_counter_t *bmc;
@@ -1452,7 +1452,7 @@ static int __bitmap_start_sync(struct bitmap *bitmap, sector_t offset, int *bloc
return rv;
}
-int bitmap_start_sync(struct bitmap *bitmap, sector_t offset, int *blocks,
+int bitmap_start_sync(struct bitmap *bitmap, sector_t offset, sector_t *blocks,
int degraded)
{
/* bitmap_start_sync must always report on multiples of whole
@@ -1463,7 +1463,7 @@ int bitmap_start_sync(struct bitmap *bitmap, sector_t offset, int *blocks,
* Return the 'or' of the result.
*/
int rv = 0;
- int blocks1;
+ sector_t blocks1;
*blocks = 0;
while (*blocks < (PAGE_SIZE>>9)) {
@@ -1476,7 +1476,7 @@ int bitmap_start_sync(struct bitmap *bitmap, sector_t offset, int *blocks,
}
EXPORT_SYMBOL(bitmap_start_sync);
-void bitmap_end_sync(struct bitmap *bitmap, sector_t offset, int *blocks, int aborted)
+void bitmap_end_sync(struct bitmap *bitmap, sector_t offset, sector_t *blocks, int aborted)
{
bitmap_counter_t *bmc;
unsigned long flags;
@@ -1515,7 +1515,7 @@ void bitmap_close_sync(struct bitmap *bitmap)
* RESYNC bit wherever it is still on
*/
sector_t sector = 0;
- int blocks;
+ sector_t blocks;
if (!bitmap)
return;
while (sector < bitmap->mddev->resync_max_sectors) {
@@ -1528,7 +1528,7 @@ EXPORT_SYMBOL(bitmap_close_sync);
void bitmap_cond_end_sync(struct bitmap *bitmap, sector_t sector)
{
sector_t s = 0;
- int blocks;
+ sector_t blocks;
if (!bitmap)
return;
@@ -1562,7 +1562,7 @@ static void bitmap_set_memory_bits(struct bitmap *bitmap, sector_t offset, int n
* be 0 at this point
*/
- int secs;
+ sector_t secs;
bitmap_counter_t *bmc;
spin_lock_irq(&bitmap->lock);
bmc = bitmap_get_counter(bitmap, offset, &secs, 1);
@@ -1790,7 +1790,7 @@ int bitmap_load(mddev_t *mddev)
* All chunks should be clean, but some might need_sync.
*/
while (sector < mddev->resync_max_sectors) {
- int blocks;
+ sector_t blocks;
bitmap_start_sync(bitmap, sector, &blocks, 0);
sector += blocks;
}
diff --git a/drivers/md/bitmap.h b/drivers/md/bitmap.h
index e872a7bad6b8..931a7a7c3796 100644
--- a/drivers/md/bitmap.h
+++ b/drivers/md/bitmap.h
@@ -271,8 +271,8 @@ int bitmap_startwrite(struct bitmap *bitmap, sector_t offset,
unsigned long sectors, int behind);
void bitmap_endwrite(struct bitmap *bitmap, sector_t offset,
unsigned long sectors, int success, int behind);
-int bitmap_start_sync(struct bitmap *bitmap, sector_t offset, int *blocks, int degraded);
-void bitmap_end_sync(struct bitmap *bitmap, sector_t offset, int *blocks, int aborted);
+int bitmap_start_sync(struct bitmap *bitmap, sector_t offset, sector_t *blocks, int degraded);
+void bitmap_end_sync(struct bitmap *bitmap, sector_t offset, sector_t *blocks, int aborted);
void bitmap_close_sync(struct bitmap *bitmap);
void bitmap_cond_end_sync(struct bitmap *bitmap, sector_t sector);
diff --git a/drivers/md/dm-snap-persistent.c b/drivers/md/dm-snap-persistent.c
index 0b61792a2780..2129cdb115dc 100644
--- a/drivers/md/dm-snap-persistent.c
+++ b/drivers/md/dm-snap-persistent.c
@@ -254,7 +254,7 @@ static int chunk_io(struct pstore *ps, void *area, chunk_t chunk, int rw,
* Issue the synchronous I/O from a different thread
* to avoid generic_make_request recursion.
*/
- INIT_WORK_ON_STACK(&req.work, do_metadata);
+ INIT_WORK_ONSTACK(&req.work, do_metadata);
queue_work(ps->metadata_wq, &req.work);
flush_workqueue(ps->metadata_wq);
diff --git a/drivers/md/faulty.c b/drivers/md/faulty.c
index 1a8987884614..339fdc670751 100644
--- a/drivers/md/faulty.c
+++ b/drivers/md/faulty.c
@@ -210,7 +210,7 @@ static int make_request(mddev_t *mddev, struct bio *bio)
}
}
if (failit) {
- struct bio *b = bio_clone(bio, GFP_NOIO);
+ struct bio *b = bio_clone_mddev(bio, GFP_NOIO, mddev);
b->bi_bdev = conf->rdev->bdev;
b->bi_private = bio;
b->bi_end_io = faulty_fail;
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 225815197a3d..84c46a161927 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -57,8 +57,6 @@
#define DEBUG 0
#define dprintk(x...) ((void)(DEBUG && printk(x)))
-static DEFINE_MUTEX(md_mutex);
-
#ifndef MODULE
static void autostart_arrays(int part);
#endif
@@ -69,6 +67,8 @@ static DEFINE_SPINLOCK(pers_lock);
static void md_print_devices(void);
static DECLARE_WAIT_QUEUE_HEAD(resync_wait);
+static struct workqueue_struct *md_wq;
+static struct workqueue_struct *md_misc_wq;
#define MD_BUG(x...) { printk("md: bug in file %s, line %d\n", __FILE__, __LINE__); md_print_devices(); }
@@ -149,6 +149,72 @@ static const struct block_device_operations md_fops;
static int start_readonly;
+/* bio_clone_mddev
+ * like bio_clone, but with a local bio set
+ */
+
+static void mddev_bio_destructor(struct bio *bio)
+{
+ mddev_t *mddev, **mddevp;
+
+ mddevp = (void*)bio;
+ mddev = mddevp[-1];
+
+ bio_free(bio, mddev->bio_set);
+}
+
+struct bio *bio_alloc_mddev(gfp_t gfp_mask, int nr_iovecs,
+ mddev_t *mddev)
+{
+ struct bio *b;
+ mddev_t **mddevp;
+
+ if (!mddev || !mddev->bio_set)
+ return bio_alloc(gfp_mask, nr_iovecs);
+
+ b = bio_alloc_bioset(gfp_mask, nr_iovecs,
+ mddev->bio_set);
+ if (!b)
+ return NULL;
+ mddevp = (void*)b;
+ mddevp[-1] = mddev;
+ b->bi_destructor = mddev_bio_destructor;
+ return b;
+}
+EXPORT_SYMBOL_GPL(bio_alloc_mddev);
+
+struct bio *bio_clone_mddev(struct bio *bio, gfp_t gfp_mask,
+ mddev_t *mddev)
+{
+ struct bio *b;
+ mddev_t **mddevp;
+
+ if (!mddev || !mddev->bio_set)
+ return bio_clone(bio, gfp_mask);
+
+ b = bio_alloc_bioset(gfp_mask, bio->bi_max_vecs,
+ mddev->bio_set);
+ if (!b)
+ return NULL;
+ mddevp = (void*)b;
+ mddevp[-1] = mddev;
+ b->bi_destructor = mddev_bio_destructor;
+ __bio_clone(b, bio);
+ if (bio_integrity(bio)) {
+ int ret;
+
+ ret = bio_integrity_clone(b, bio, gfp_mask, mddev->bio_set);
+
+ if (ret < 0) {
+ bio_put(b);
+ return NULL;
+ }
+ }
+
+ return b;
+}
+EXPORT_SYMBOL_GPL(bio_clone_mddev);
+
/*
* We have a system wide 'event count' that is incremented
* on any 'interesting' event, and readers of /proc/mdstat
@@ -300,7 +366,7 @@ static void md_end_flush(struct bio *bio, int err)
if (atomic_dec_and_test(&mddev->flush_pending)) {
/* The pre-request flush has finished */
- schedule_work(&mddev->flush_work);
+ queue_work(md_wq, &mddev->flush_work);
}
bio_put(bio);
}
@@ -321,7 +387,7 @@ static void submit_flushes(mddev_t *mddev)
atomic_inc(&rdev->nr_pending);
atomic_inc(&rdev->nr_pending);
rcu_read_unlock();
- bi = bio_alloc(GFP_KERNEL, 0);
+ bi = bio_alloc_mddev(GFP_KERNEL, 0, mddev);
bi->bi_end_io = md_end_flush;
bi->bi_private = rdev;
bi->bi_bdev = rdev->bdev;
@@ -369,7 +435,7 @@ void md_flush_request(mddev_t *mddev, struct bio *bio)
submit_flushes(mddev);
if (atomic_dec_and_test(&mddev->flush_pending))
- schedule_work(&mddev->flush_work);
+ queue_work(md_wq, &mddev->flush_work);
}
EXPORT_SYMBOL(md_flush_request);
@@ -428,6 +494,8 @@ static void mddev_delayed_delete(struct work_struct *ws);
static void mddev_put(mddev_t *mddev)
{
+ struct bio_set *bs = NULL;
+
if (!atomic_dec_and_lock(&mddev->active, &all_mddevs_lock))
return;
if (!mddev->raid_disks && list_empty(&mddev->disks) &&
@@ -435,19 +503,22 @@ static void mddev_put(mddev_t *mddev)
/* Array is not configured at all, and not held active,
* so destroy it */
list_del(&mddev->all_mddevs);
+ bs = mddev->bio_set;
+ mddev->bio_set = NULL;
if (mddev->gendisk) {
- /* we did a probe so need to clean up.
- * Call schedule_work inside the spinlock
- * so that flush_scheduled_work() after
- * mddev_find will succeed in waiting for the
- * work to be done.
+ /* We did a probe so need to clean up. Call
+ * queue_work inside the spinlock so that
+ * flush_workqueue() after mddev_find will
+ * succeed in waiting for the work to be done.
*/
INIT_WORK(&mddev->del_work, mddev_delayed_delete);
- schedule_work(&mddev->del_work);
+ queue_work(md_misc_wq, &mddev->del_work);
} else
kfree(mddev);
}
spin_unlock(&all_mddevs_lock);
+ if (bs)
+ bioset_free(bs);
}
void mddev_init(mddev_t *mddev)
@@ -635,7 +706,7 @@ static struct mdk_personality *find_pers(int level, char *clevel)
/* return the offset of the super block in 512byte sectors */
static inline sector_t calc_dev_sboffset(struct block_device *bdev)
{
- sector_t num_sectors = bdev->bd_inode->i_size / 512;
+ sector_t num_sectors = i_size_read(bdev->bd_inode) / 512;
return MD_NEW_SIZE_SECTORS(num_sectors);
}
@@ -691,7 +762,7 @@ void md_super_write(mddev_t *mddev, mdk_rdev_t *rdev,
* if zero is reached.
* If an error occurred, call md_error
*/
- struct bio *bio = bio_alloc(GFP_NOIO, 1);
+ struct bio *bio = bio_alloc_mddev(GFP_NOIO, 1, mddev);
bio->bi_bdev = rdev->bdev;
bio->bi_sector = sector;
@@ -722,16 +793,16 @@ static void bi_complete(struct bio *bio, int error)
complete((struct completion*)bio->bi_private);
}
-int sync_page_io(struct block_device *bdev, sector_t sector, int size,
- struct page *page, int rw)
+int sync_page_io(mdk_rdev_t *rdev, sector_t sector, int size,
+ struct page *page, int rw)
{
- struct bio *bio = bio_alloc(GFP_NOIO, 1);
+ struct bio *bio = bio_alloc_mddev(GFP_NOIO, 1, rdev->mddev);
struct completion event;
int ret;
rw |= REQ_SYNC | REQ_UNPLUG;
- bio->bi_bdev = bdev;
+ bio->bi_bdev = rdev->bdev;
bio->bi_sector = sector;
bio_add_page(bio, page, size, 0);
init_completion(&event);
@@ -757,7 +828,7 @@ static int read_disk_sb(mdk_rdev_t * rdev, int size)
return 0;
- if (!sync_page_io(rdev->bdev, rdev->sb_start, size, rdev->sb_page, READ))
+ if (!sync_page_io(rdev, rdev->sb_start, size, rdev->sb_page, READ))
goto fail;
rdev->sb_loaded = 1;
return 0;
@@ -1266,7 +1337,7 @@ super_90_rdev_size_change(mdk_rdev_t *rdev, sector_t num_sectors)
md_super_write(rdev->mddev, rdev, rdev->sb_start, rdev->sb_size,
rdev->sb_page);
md_super_wait(rdev->mddev);
- return num_sectors / 2; /* kB for sysfs */
+ return num_sectors;
}
@@ -1315,7 +1386,7 @@ static int super_1_load(mdk_rdev_t *rdev, mdk_rdev_t *refdev, int minor_version)
*/
switch(minor_version) {
case 0:
- sb_start = rdev->bdev->bd_inode->i_size >> 9;
+ sb_start = i_size_read(rdev->bdev->bd_inode) >> 9;
sb_start -= 8*2;
sb_start &= ~(sector_t)(4*2-1);
break;
@@ -1401,7 +1472,7 @@ static int super_1_load(mdk_rdev_t *rdev, mdk_rdev_t *refdev, int minor_version)
ret = 0;
}
if (minor_version)
- rdev->sectors = (rdev->bdev->bd_inode->i_size >> 9) -
+ rdev->sectors = (i_size_read(rdev->bdev->bd_inode) >> 9) -
le64_to_cpu(sb->data_offset);
else
rdev->sectors = rdev->sb_start;
@@ -1609,7 +1680,7 @@ super_1_rdev_size_change(mdk_rdev_t *rdev, sector_t num_sectors)
return 0; /* component must fit device */
if (rdev->sb_start < rdev->data_offset) {
/* minor versions 1 and 2; superblock before data */
- max_sectors = rdev->bdev->bd_inode->i_size >> 9;
+ max_sectors = i_size_read(rdev->bdev->bd_inode) >> 9;
max_sectors -= rdev->data_offset;
if (!num_sectors || num_sectors > max_sectors)
num_sectors = max_sectors;
@@ -1619,7 +1690,7 @@ super_1_rdev_size_change(mdk_rdev_t *rdev, sector_t num_sectors)
} else {
/* minor version 0; superblock after data */
sector_t sb_start;
- sb_start = (rdev->bdev->bd_inode->i_size >> 9) - 8*2;
+ sb_start = (i_size_read(rdev->bdev->bd_inode) >> 9) - 8*2;
sb_start &= ~(sector_t)(4*2 - 1);
max_sectors = rdev->sectors + sb_start - rdev->sb_start;
if (!num_sectors || num_sectors > max_sectors)
@@ -1633,7 +1704,7 @@ super_1_rdev_size_change(mdk_rdev_t *rdev, sector_t num_sectors)
md_super_write(rdev->mddev, rdev, rdev->sb_start, rdev->sb_size,
rdev->sb_page);
md_super_wait(rdev->mddev);
- return num_sectors / 2; /* kB for sysfs */
+ return num_sectors;
}
static struct super_type super_types[] = {
@@ -1850,7 +1921,7 @@ static void unbind_rdev_from_array(mdk_rdev_t * rdev)
synchronize_rcu();
INIT_WORK(&rdev->del_work, md_delayed_delete);
kobject_get(&rdev->kobj);
- schedule_work(&rdev->del_work);
+ queue_work(md_misc_wq, &rdev->del_work);
}
/*
@@ -2108,6 +2179,8 @@ repeat:
if (!mddev->persistent) {
clear_bit(MD_CHANGE_CLEAN, &mddev->flags);
clear_bit(MD_CHANGE_DEVS, &mddev->flags);
+ if (!mddev->external)
+ clear_bit(MD_CHANGE_PENDING, &mddev->flags);
wake_up(&mddev->sb_wait);
return;
}
@@ -2511,7 +2584,7 @@ rdev_size_store(mdk_rdev_t *rdev, const char *buf, size_t len)
if (!sectors)
return -EBUSY;
} else if (!sectors)
- sectors = (rdev->bdev->bd_inode->i_size >> 9) -
+ sectors = (i_size_read(rdev->bdev->bd_inode) >> 9) -
rdev->data_offset;
}
if (sectors < my_mddev->dev_sectors)
@@ -2724,7 +2797,7 @@ static mdk_rdev_t *md_import_device(dev_t newdev, int super_format, int super_mi
kobject_init(&rdev->kobj, &rdev_ktype);
- size = rdev->bdev->bd_inode->i_size >> BLOCK_SIZE_BITS;
+ size = i_size_read(rdev->bdev->bd_inode) >> BLOCK_SIZE_BITS;
if (!size) {
printk(KERN_WARNING
"md: %s has zero or unknown size, marking faulty!\n",
@@ -4192,10 +4265,10 @@ static int md_alloc(dev_t dev, char *name)
shift = partitioned ? MdpMinorShift : 0;
unit = MINOR(mddev->unit) >> shift;
- /* wait for any previous instance if this device
- * to be completed removed (mddev_delayed_delete).
+ /* wait for any previous instance of this device to be
+ * completely removed (mddev_delayed_delete).
*/
- flush_scheduled_work();
+ flush_workqueue(md_misc_wq);
mutex_lock(&disks_mutex);
error = -EEXIST;
@@ -4265,6 +4338,8 @@ static int md_alloc(dev_t dev, char *name)
if (mddev->kobj.sd &&
sysfs_create_group(&mddev->kobj, &md_bitmap_group))
printk(KERN_DEBUG "pointless warning\n");
+
+ blk_queue_flush(mddev->queue, REQ_FLUSH | REQ_FUA);
abort:
mutex_unlock(&disks_mutex);
if (!error && mddev->kobj.sd) {
@@ -4378,6 +4453,9 @@ int md_run(mddev_t *mddev)
sysfs_notify_dirent_safe(rdev->sysfs_state);
}
+ if (mddev->bio_set == NULL)
+ mddev->bio_set = bioset_create(BIO_POOL_SIZE, sizeof(mddev));
+
spin_lock(&pers_lock);
pers = find_pers(mddev->level, mddev->clevel);
if (!pers || !try_module_get(pers->owner)) {
@@ -5159,8 +5237,8 @@ static int add_new_disk(mddev_t * mddev, mdu_disk_info_t *info)
if (!mddev->persistent) {
printk(KERN_INFO "md: nonpersistent superblock ...\n");
- rdev->sb_start = rdev->bdev->bd_inode->i_size / 512;
- } else
+ rdev->sb_start = i_size_read(rdev->bdev->bd_inode) / 512;
+ } else
rdev->sb_start = calc_dev_sboffset(rdev->bdev);
rdev->sectors = rdev->sb_start;
@@ -5230,7 +5308,7 @@ static int hot_add_disk(mddev_t * mddev, dev_t dev)
if (mddev->persistent)
rdev->sb_start = calc_dev_sboffset(rdev->bdev);
else
- rdev->sb_start = rdev->bdev->bd_inode->i_size / 512;
+ rdev->sb_start = i_size_read(rdev->bdev->bd_inode) / 512;
rdev->sectors = rdev->sb_start;
@@ -5885,16 +5963,14 @@ static int md_open(struct block_device *bdev, fmode_t mode)
mddev_t *mddev = mddev_find(bdev->bd_dev);
int err;
- mutex_lock(&md_mutex);
if (mddev->gendisk != bdev->bd_disk) {
/* we are racing with mddev_put which is discarding this
* bd_disk.
*/
mddev_put(mddev);
/* Wait until bdev->bd_disk is definitely gone */
- flush_scheduled_work();
+ flush_workqueue(md_misc_wq);
/* Then retry the open from the top */
- mutex_unlock(&md_mutex);
return -ERESTARTSYS;
}
BUG_ON(mddev != bdev->bd_disk->private_data);
@@ -5908,7 +5984,6 @@ static int md_open(struct block_device *bdev, fmode_t mode)
check_disk_size_change(mddev->gendisk, bdev);
out:
- mutex_unlock(&md_mutex);
return err;
}
@@ -5917,10 +5992,8 @@ static int md_release(struct gendisk *disk, fmode_t mode)
mddev_t *mddev = disk->private_data;
BUG_ON(!mddev);
- mutex_lock(&md_mutex);
atomic_dec(&mddev->openers);
mddev_put(mddev);
- mutex_unlock(&md_mutex);
return 0;
}
@@ -6052,7 +6125,7 @@ void md_error(mddev_t *mddev, mdk_rdev_t *rdev)
set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
md_wakeup_thread(mddev->thread);
if (mddev->event_work.func)
- schedule_work(&mddev->event_work);
+ queue_work(md_misc_wq, &mddev->event_work);
md_new_event_inintr(mddev);
}
@@ -7212,12 +7285,23 @@ static void md_geninit(void)
static int __init md_init(void)
{
- if (register_blkdev(MD_MAJOR, "md"))
- return -1;
- if ((mdp_major=register_blkdev(0, "mdp"))<=0) {
- unregister_blkdev(MD_MAJOR, "md");
- return -1;
- }
+ int ret = -ENOMEM;
+
+ md_wq = alloc_workqueue("md", WQ_RESCUER, 0);
+ if (!md_wq)
+ goto err_wq;
+
+ md_misc_wq = alloc_workqueue("md_misc", 0, 0);
+ if (!md_misc_wq)
+ goto err_misc_wq;
+
+ if ((ret = register_blkdev(MD_MAJOR, "md")) < 0)
+ goto err_md;
+
+ if ((ret = register_blkdev(0, "mdp")) < 0)
+ goto err_mdp;
+ mdp_major = ret;
+
blk_register_region(MKDEV(MD_MAJOR, 0), 1UL<<MINORBITS, THIS_MODULE,
md_probe, NULL, NULL);
blk_register_region(MKDEV(mdp_major, 0), 1UL<<MINORBITS, THIS_MODULE,
@@ -7228,8 +7312,16 @@ static int __init md_init(void)
md_geninit();
return 0;
-}
+err_mdp:
+ unregister_blkdev(MD_MAJOR, "md");
+err_md:
+ destroy_workqueue(md_misc_wq);
+err_misc_wq:
+ destroy_workqueue(md_wq);
+err_wq:
+ return ret;
+}
#ifndef MODULE
@@ -7316,6 +7408,8 @@ static __exit void md_exit(void)
export_array(mddev);
mddev->hold_active = 0;
}
+ destroy_workqueue(md_misc_wq);
+ destroy_workqueue(md_wq);
}
subsys_initcall(md_init);
diff --git a/drivers/md/md.h b/drivers/md/md.h
index 112a2c32db0c..d05bab55df4e 100644
--- a/drivers/md/md.h
+++ b/drivers/md/md.h
@@ -331,6 +331,8 @@ struct mddev_s
struct attribute_group *to_remove;
struct plug_handle *plug; /* if used by personality */
+ struct bio_set *bio_set;
+
/* Generic flush handling.
* The last to finish preflush schedules a worker to submit
* the rest of the request (without the REQ_FLUSH flag).
@@ -495,7 +497,7 @@ extern void md_flush_request(mddev_t *mddev, struct bio *bio);
extern void md_super_write(mddev_t *mddev, mdk_rdev_t *rdev,
sector_t sector, int size, struct page *page);
extern void md_super_wait(mddev_t *mddev);
-extern int sync_page_io(struct block_device *bdev, sector_t sector, int size,
+extern int sync_page_io(mdk_rdev_t *rdev, sector_t sector, int size,
struct page *page, int rw);
extern void md_do_sync(mddev_t *mddev);
extern void md_new_event(mddev_t *mddev);
@@ -517,4 +519,8 @@ extern void md_rdev_init(mdk_rdev_t *rdev);
extern void mddev_suspend(mddev_t *mddev);
extern void mddev_resume(mddev_t *mddev);
+extern struct bio *bio_clone_mddev(struct bio *bio, gfp_t gfp_mask,
+ mddev_t *mddev);
+extern struct bio *bio_alloc_mddev(gfp_t gfp_mask, int nr_iovecs,
+ mddev_t *mddev);
#endif /* _MD_MD_H */
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index 378a25894c57..845cf95b612c 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -100,7 +100,7 @@ static void * r1buf_pool_alloc(gfp_t gfp_flags, void *data)
* Allocate bios : 1 for reading, n-1 for writing
*/
for (j = pi->raid_disks ; j-- ; ) {
- bio = bio_alloc(gfp_flags, RESYNC_PAGES);
+ bio = bio_kmalloc(gfp_flags, RESYNC_PAGES);
if (!bio)
goto out_free_bio;
r1_bio->bios[j] = bio;
@@ -306,6 +306,28 @@ static void raid1_end_read_request(struct bio *bio, int error)
rdev_dec_pending(conf->mirrors[mirror].rdev, conf->mddev);
}
+static void r1_bio_write_done(r1bio_t *r1_bio, int vcnt, struct bio_vec *bv,
+ int behind)
+{
+ if (atomic_dec_and_test(&r1_bio->remaining))
+ {
+ /* it really is the end of this request */
+ if (test_bit(R1BIO_BehindIO, &r1_bio->state)) {
+ /* free extra copy of the data pages */
+ int i = vcnt;
+ while (i--)
+ safe_put_page(bv[i].bv_page);
+ }
+ /* clear the bitmap if all writes complete successfully */
+ bitmap_endwrite(r1_bio->mddev->bitmap, r1_bio->sector,
+ r1_bio->sectors,
+ !test_bit(R1BIO_Degraded, &r1_bio->state),
+ behind);
+ md_write_end(r1_bio->mddev);
+ raid_end_bio_io(r1_bio);
+ }
+}
+
static void raid1_end_write_request(struct bio *bio, int error)
{
int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
@@ -373,21 +395,7 @@ static void raid1_end_write_request(struct bio *bio, int error)
* Let's see if all mirrored write operations have finished
* already.
*/
- if (atomic_dec_and_test(&r1_bio->remaining)) {
- if (test_bit(R1BIO_BehindIO, &r1_bio->state)) {
- /* free extra copy of the data pages */
- int i = bio->bi_vcnt;
- while (i--)
- safe_put_page(bio->bi_io_vec[i].bv_page);
- }
- /* clear the bitmap if all writes complete successfully */
- bitmap_endwrite(r1_bio->mddev->bitmap, r1_bio->sector,
- r1_bio->sectors,
- !test_bit(R1BIO_Degraded, &r1_bio->state),
- behind);
- md_write_end(r1_bio->mddev);
- raid_end_bio_io(r1_bio);
- }
+ r1_bio_write_done(r1_bio, bio->bi_vcnt, bio->bi_io_vec, behind);
if (to_put)
bio_put(to_put);
@@ -411,11 +419,13 @@ static void raid1_end_write_request(struct bio *bio, int error)
static int read_balance(conf_t *conf, r1bio_t *r1_bio)
{
const sector_t this_sector = r1_bio->sector;
- int new_disk = conf->last_used, disk = new_disk;
- int wonly_disk = -1;
const int sectors = r1_bio->sectors;
+ int new_disk = -1;
+ int start_disk;
+ int i;
sector_t new_distance, current_distance;
mdk_rdev_t *rdev;
+ int choose_first;
rcu_read_lock();
/*
@@ -426,54 +436,33 @@ static int read_balance(conf_t *conf, r1bio_t *r1_bio)
retry:
if (conf->mddev->recovery_cp < MaxSector &&
(this_sector + sectors >= conf->next_resync)) {
- /* Choose the first operational device, for consistancy */
- new_disk = 0;
-
- for (rdev = rcu_dereference(conf->mirrors[new_disk].rdev);
- r1_bio->bios[new_disk] == IO_BLOCKED ||
- !rdev || !test_bit(In_sync, &rdev->flags)
- || test_bit(WriteMostly, &rdev->flags);
- rdev = rcu_dereference(conf->mirrors[++new_disk].rdev)) {
-
- if (rdev && test_bit(In_sync, &rdev->flags) &&
- r1_bio->bios[new_disk] != IO_BLOCKED)
- wonly_disk = new_disk;
-
- if (new_disk == conf->raid_disks - 1) {
- new_disk = wonly_disk;
- break;
- }
- }
- goto rb_out;
+ choose_first = 1;
+ start_disk = 0;
+ } else {
+ choose_first = 0;
+ start_disk = conf->last_used;
}
-
/* make sure the disk is operational */
- for (rdev = rcu_dereference(conf->mirrors[new_disk].rdev);
- r1_bio->bios[new_disk] == IO_BLOCKED ||
- !rdev || !test_bit(In_sync, &rdev->flags) ||
- test_bit(WriteMostly, &rdev->flags);
- rdev = rcu_dereference(conf->mirrors[new_disk].rdev)) {
-
- if (rdev && test_bit(In_sync, &rdev->flags) &&
- r1_bio->bios[new_disk] != IO_BLOCKED)
- wonly_disk = new_disk;
-
- if (new_disk <= 0)
- new_disk = conf->raid_disks;
- new_disk--;
- if (new_disk == disk) {
- new_disk = wonly_disk;
+ for (i = 0 ; i < conf->raid_disks ; i++) {
+ int disk = start_disk + i;
+ if (disk >= conf->raid_disks)
+ disk -= conf->raid_disks;
+
+ rdev = rcu_dereference(conf->mirrors[disk].rdev);
+ if (r1_bio->bios[disk] == IO_BLOCKED
+ || rdev == NULL
+ || !test_bit(In_sync, &rdev->flags))
+ continue;
+
+ new_disk = disk;
+ if (!test_bit(WriteMostly, &rdev->flags))
break;
- }
}
- if (new_disk < 0)
+ if (new_disk < 0 || choose_first)
goto rb_out;
- disk = new_disk;
- /* now disk == new_disk == starting point for search */
-
/*
* Don't change to another disk for sequential reads:
*/
@@ -482,20 +471,21 @@ static int read_balance(conf_t *conf, r1bio_t *r1_bio)
if (this_sector == conf->mirrors[new_disk].head_position)
goto rb_out;
- current_distance = abs(this_sector - conf->mirrors[disk].head_position);
-
- /* Find the disk whose head is closest */
+ current_distance = abs(this_sector
+ - conf->mirrors[new_disk].head_position);
- do {
- if (disk <= 0)
- disk = conf->raid_disks;
- disk--;
+ /* look for a better disk - i.e. head is closer */
+ start_disk = new_disk;
+ for (i = 1; i < conf->raid_disks; i++) {
+ int disk = start_disk + 1;
+ if (disk >= conf->raid_disks)
+ disk -= conf->raid_disks;
rdev = rcu_dereference(conf->mirrors[disk].rdev);
-
- if (!rdev || r1_bio->bios[disk] == IO_BLOCKED ||
- !test_bit(In_sync, &rdev->flags) ||
- test_bit(WriteMostly, &rdev->flags))
+ if (r1_bio->bios[disk] == IO_BLOCKED
+ || rdev == NULL
+ || !test_bit(In_sync, &rdev->flags)
+ || test_bit(WriteMostly, &rdev->flags))
continue;
if (!atomic_read(&rdev->nr_pending)) {
@@ -507,11 +497,9 @@ static int read_balance(conf_t *conf, r1bio_t *r1_bio)
current_distance = new_distance;
new_disk = disk;
}
- } while (disk != conf->last_used);
+ }
rb_out:
-
-
if (new_disk >= 0) {
rdev = rcu_dereference(conf->mirrors[new_disk].rdev);
if (!rdev)
@@ -658,7 +646,7 @@ static void raise_barrier(conf_t *conf)
/* block any new IO from starting */
conf->barrier++;
- /* No wait for all pending IO to complete */
+ /* Now wait for all pending IO to complete */
wait_event_lock_irq(conf->wait_barrier,
!conf->nr_pending && conf->barrier < RESYNC_DEPTH,
conf->resync_lock,
@@ -735,23 +723,26 @@ static void unfreeze_array(conf_t *conf)
}
-/* duplicate the data pages for behind I/O */
-static struct page **alloc_behind_pages(struct bio *bio)
+/* duplicate the data pages for behind I/O
+ * We return a list of bio_vec rather than just page pointers
+ * as it makes freeing easier
+ */
+static struct bio_vec *alloc_behind_pages(struct bio *bio)
{
int i;
struct bio_vec *bvec;
- struct page **pages = kzalloc(bio->bi_vcnt * sizeof(struct page *),
+ struct bio_vec *pages = kzalloc(bio->bi_vcnt * sizeof(struct bio_vec),
GFP_NOIO);
if (unlikely(!pages))
goto do_sync_io;
bio_for_each_segment(bvec, bio, i) {
- pages[i] = alloc_page(GFP_NOIO);
- if (unlikely(!pages[i]))
+ pages[i].bv_page = alloc_page(GFP_NOIO);
+ if (unlikely(!pages[i].bv_page))
goto do_sync_io;
- memcpy(kmap(pages[i]) + bvec->bv_offset,
+ memcpy(kmap(pages[i].bv_page) + bvec->bv_offset,
kmap(bvec->bv_page) + bvec->bv_offset, bvec->bv_len);
- kunmap(pages[i]);
+ kunmap(pages[i].bv_page);
kunmap(bvec->bv_page);
}
@@ -759,8 +750,8 @@ static struct page **alloc_behind_pages(struct bio *bio)
do_sync_io:
if (pages)
- for (i = 0; i < bio->bi_vcnt && pages[i]; i++)
- put_page(pages[i]);
+ for (i = 0; i < bio->bi_vcnt && pages[i].bv_page; i++)
+ put_page(pages[i].bv_page);
kfree(pages);
PRINTK("%dB behind alloc failed, doing sync I/O\n", bio->bi_size);
return NULL;
@@ -775,8 +766,7 @@ static int make_request(mddev_t *mddev, struct bio * bio)
int i, targets = 0, disks;
struct bitmap *bitmap;
unsigned long flags;
- struct bio_list bl;
- struct page **behind_pages = NULL;
+ struct bio_vec *behind_pages = NULL;
const int rw = bio_data_dir(bio);
const unsigned long do_sync = (bio->bi_rw & REQ_SYNC);
const unsigned long do_flush_fua = (bio->bi_rw & (REQ_FLUSH | REQ_FUA));
@@ -851,7 +841,7 @@ static int make_request(mddev_t *mddev, struct bio * bio)
}
r1_bio->read_disk = rdisk;
- read_bio = bio_clone(bio, GFP_NOIO);
+ read_bio = bio_clone_mddev(bio, GFP_NOIO, mddev);
r1_bio->bios[rdisk] = read_bio;
@@ -873,13 +863,6 @@ static int make_request(mddev_t *mddev, struct bio * bio)
* bios[x] to bio
*/
disks = conf->raid_disks;
-#if 0
- { static int first=1;
- if (first) printk("First Write sector %llu disks %d\n",
- (unsigned long long)r1_bio->sector, disks);
- first = 0;
- }
-#endif
retry_write:
blocked_rdev = NULL;
rcu_read_lock();
@@ -937,16 +920,17 @@ static int make_request(mddev_t *mddev, struct bio * bio)
(behind_pages = alloc_behind_pages(bio)) != NULL)
set_bit(R1BIO_BehindIO, &r1_bio->state);
- atomic_set(&r1_bio->remaining, 0);
+ atomic_set(&r1_bio->remaining, 1);
atomic_set(&r1_bio->behind_remaining, 0);
- bio_list_init(&bl);
+ bitmap_startwrite(bitmap, bio->bi_sector, r1_bio->sectors,
+ test_bit(R1BIO_BehindIO, &r1_bio->state));
for (i = 0; i < disks; i++) {
struct bio *mbio;
if (!r1_bio->bios[i])
continue;
- mbio = bio_clone(bio, GFP_NOIO);
+ mbio = bio_clone_mddev(bio, GFP_NOIO, mddev);
r1_bio->bios[i] = mbio;
mbio->bi_sector = r1_bio->sector + conf->mirrors[i].rdev->data_offset;
@@ -963,39 +947,29 @@ static int make_request(mddev_t *mddev, struct bio * bio)
* we clear any unused pointer in the io_vec, rather
* than leave them unchanged. This is important
* because when we come to free the pages, we won't
- * know the originial bi_idx, so we just free
+ * know the original bi_idx, so we just free
* them all
*/
__bio_for_each_segment(bvec, mbio, j, 0)
- bvec->bv_page = behind_pages[j];
+ bvec->bv_page = behind_pages[j].bv_page;
if (test_bit(WriteMostly, &conf->mirrors[i].rdev->flags))
atomic_inc(&r1_bio->behind_remaining);
}
atomic_inc(&r1_bio->remaining);
-
- bio_list_add(&bl, mbio);
+ spin_lock_irqsave(&conf->device_lock, flags);
+ bio_list_add(&conf->pending_bio_list, mbio);
+ blk_plug_device(mddev->queue);
+ spin_unlock_irqrestore(&conf->device_lock, flags);
}
+ r1_bio_write_done(r1_bio, bio->bi_vcnt, behind_pages, behind_pages != NULL);
kfree(behind_pages); /* the behind pages are attached to the bios now */
- bitmap_startwrite(bitmap, bio->bi_sector, r1_bio->sectors,
- test_bit(R1BIO_BehindIO, &r1_bio->state));
- spin_lock_irqsave(&conf->device_lock, flags);
- bio_list_merge(&conf->pending_bio_list, &bl);
- bio_list_init(&bl);
-
- blk_plug_device(mddev->queue);
- spin_unlock_irqrestore(&conf->device_lock, flags);
-
- /* In case raid1d snuck into freeze_array */
+ /* In case raid1d snuck in to freeze_array */
wake_up(&conf->wait_barrier);
if (do_sync)
md_wakeup_thread(mddev->thread);
-#if 0
- while ((bio = bio_list_pop(&bl)) != NULL)
- generic_make_request(bio);
-#endif
return 0;
}
@@ -1183,10 +1157,11 @@ static int raid1_remove_disk(mddev_t *mddev, int number)
err = -EBUSY;
goto abort;
}
- /* Only remove non-faulty devices is recovery
+ /* Only remove non-faulty devices if recovery
* is not possible.
*/
if (!test_bit(Faulty, &rdev->flags) &&
+ !mddev->recovery_disabled &&
mddev->degraded < conf->raid_disks) {
err = -EBUSY;
goto abort;
@@ -1245,7 +1220,7 @@ static void end_sync_write(struct bio *bio, int error)
break;
}
if (!uptodate) {
- int sync_blocks = 0;
+ sector_t sync_blocks = 0;
sector_t s = r1_bio->sector;
long sectors_to_go = r1_bio->sectors;
/* make sure these bits doesn't get cleared. */
@@ -1388,7 +1363,7 @@ static void sync_request_write(mddev_t *mddev, r1bio_t *r1_bio)
* active, and resync is currently active
*/
rdev = conf->mirrors[d].rdev;
- if (sync_page_io(rdev->bdev,
+ if (sync_page_io(rdev,
sect + rdev->data_offset,
s<<9,
bio->bi_io_vec[idx].bv_page,
@@ -1414,7 +1389,7 @@ static void sync_request_write(mddev_t *mddev, r1bio_t *r1_bio)
continue;
rdev = conf->mirrors[d].rdev;
atomic_add(s, &rdev->corrected_errors);
- if (sync_page_io(rdev->bdev,
+ if (sync_page_io(rdev,
sect + rdev->data_offset,
s<<9,
bio->bi_io_vec[idx].bv_page,
@@ -1429,7 +1404,7 @@ static void sync_request_write(mddev_t *mddev, r1bio_t *r1_bio)
if (r1_bio->bios[d]->bi_end_io != end_sync_read)
continue;
rdev = conf->mirrors[d].rdev;
- if (sync_page_io(rdev->bdev,
+ if (sync_page_io(rdev,
sect + rdev->data_offset,
s<<9,
bio->bi_io_vec[idx].bv_page,
@@ -1513,7 +1488,7 @@ static void fix_read_error(conf_t *conf, int read_disk,
rdev = conf->mirrors[d].rdev;
if (rdev &&
test_bit(In_sync, &rdev->flags) &&
- sync_page_io(rdev->bdev,
+ sync_page_io(rdev,
sect + rdev->data_offset,
s<<9,
conf->tmppage, READ))
@@ -1539,7 +1514,7 @@ static void fix_read_error(conf_t *conf, int read_disk,
rdev = conf->mirrors[d].rdev;
if (rdev &&
test_bit(In_sync, &rdev->flags)) {
- if (sync_page_io(rdev->bdev,
+ if (sync_page_io(rdev,
sect + rdev->data_offset,
s<<9, conf->tmppage, WRITE)
== 0)
@@ -1556,7 +1531,7 @@ static void fix_read_error(conf_t *conf, int read_disk,
rdev = conf->mirrors[d].rdev;
if (rdev &&
test_bit(In_sync, &rdev->flags)) {
- if (sync_page_io(rdev->bdev,
+ if (sync_page_io(rdev,
sect + rdev->data_offset,
s<<9, conf->tmppage, READ)
== 0)
@@ -1646,7 +1621,8 @@ static void raid1d(mddev_t *mddev)
mddev->ro ? IO_BLOCKED : NULL;
r1_bio->read_disk = disk;
bio_put(bio);
- bio = bio_clone(r1_bio->master_bio, GFP_NOIO);
+ bio = bio_clone_mddev(r1_bio->master_bio,
+ GFP_NOIO, mddev);
r1_bio->bios[r1_bio->read_disk] = bio;
rdev = conf->mirrors[disk].rdev;
if (printk_ratelimit())
@@ -1705,7 +1681,7 @@ static sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *skipped, i
int i;
int wonly = -1;
int write_targets = 0, read_targets = 0;
- int sync_blocks;
+ sector_t sync_blocks;
int still_degraded = 0;
if (!conf->r1buf_pool)
@@ -1755,11 +1731,11 @@ static sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *skipped, i
msleep_interruptible(1000);
bitmap_cond_end_sync(mddev->bitmap, sector_nr);
+ r1_bio = mempool_alloc(conf->r1buf_pool, GFP_NOIO);
raise_barrier(conf);
conf->next_resync = sector_nr;
- r1_bio = mempool_alloc(conf->r1buf_pool, GFP_NOIO);
rcu_read_lock();
/*
* If we get a correctably read error during resync or recovery,
@@ -1971,7 +1947,6 @@ static conf_t *setup_conf(mddev_t *mddev)
init_waitqueue_head(&conf->wait_barrier);
bio_list_init(&conf->pending_bio_list);
- bio_list_init(&conf->flushing_bio_list);
conf->last_used = -1;
for (i = 0; i < conf->raid_disks; i++) {
diff --git a/drivers/md/raid1.h b/drivers/md/raid1.h
index adf8cfd73313..cbfdf1a6acd9 100644
--- a/drivers/md/raid1.h
+++ b/drivers/md/raid1.h
@@ -35,8 +35,6 @@ struct r1_private_data_s {
struct list_head retry_list;
/* queue pending writes and submit them on unplug */
struct bio_list pending_bio_list;
- /* queue of writes that have been unplugged */
- struct bio_list flushing_bio_list;
/* for use when syncing mirrors: */
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index f0d082f749be..c67aa54694ae 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -120,7 +120,7 @@ static void * r10buf_pool_alloc(gfp_t gfp_flags, void *data)
* Allocate bios.
*/
for (j = nalloc ; j-- ; ) {
- bio = bio_alloc(gfp_flags, RESYNC_PAGES);
+ bio = bio_kmalloc(gfp_flags, RESYNC_PAGES);
if (!bio)
goto out_free_bio;
r10_bio->devs[j].bio = bio;
@@ -801,7 +801,6 @@ static int make_request(mddev_t *mddev, struct bio * bio)
const int rw = bio_data_dir(bio);
const unsigned long do_sync = (bio->bi_rw & REQ_SYNC);
const unsigned long do_fua = (bio->bi_rw & REQ_FUA);
- struct bio_list bl;
unsigned long flags;
mdk_rdev_t *blocked_rdev;
@@ -890,7 +889,7 @@ static int make_request(mddev_t *mddev, struct bio * bio)
}
mirror = conf->mirrors + disk;
- read_bio = bio_clone(bio, GFP_NOIO);
+ read_bio = bio_clone_mddev(bio, GFP_NOIO, mddev);
r10_bio->devs[slot].bio = read_bio;
@@ -950,16 +949,16 @@ static int make_request(mddev_t *mddev, struct bio * bio)
goto retry_write;
}
- atomic_set(&r10_bio->remaining, 0);
+ atomic_set(&r10_bio->remaining, 1);
+ bitmap_startwrite(mddev->bitmap, bio->bi_sector, r10_bio->sectors, 0);
- bio_list_init(&bl);
for (i = 0; i < conf->copies; i++) {
struct bio *mbio;
int d = r10_bio->devs[i].devnum;
if (!r10_bio->devs[i].bio)
continue;
- mbio = bio_clone(bio, GFP_NOIO);
+ mbio = bio_clone_mddev(bio, GFP_NOIO, mddev);
r10_bio->devs[i].bio = mbio;
mbio->bi_sector = r10_bio->devs[i].addr+
@@ -970,22 +969,22 @@ static int make_request(mddev_t *mddev, struct bio * bio)
mbio->bi_private = r10_bio;
atomic_inc(&r10_bio->remaining);
- bio_list_add(&bl, mbio);
+ spin_lock_irqsave(&conf->device_lock, flags);
+ bio_list_add(&conf->pending_bio_list, mbio);
+ blk_plug_device(mddev->queue);
+ spin_unlock_irqrestore(&conf->device_lock, flags);
}
- if (unlikely(!atomic_read(&r10_bio->remaining))) {
- /* the array is dead */
+ if (atomic_dec_and_test(&r10_bio->remaining)) {
+ /* This matches the end of raid10_end_write_request() */
+ bitmap_endwrite(r10_bio->mddev->bitmap, r10_bio->sector,
+ r10_bio->sectors,
+ !test_bit(R10BIO_Degraded, &r10_bio->state),
+ 0);
md_write_end(mddev);
raid_end_bio_io(r10_bio);
- return 0;
}
- bitmap_startwrite(mddev->bitmap, bio->bi_sector, r10_bio->sectors, 0);
- spin_lock_irqsave(&conf->device_lock, flags);
- bio_list_merge(&conf->pending_bio_list, &bl);
- blk_plug_device(mddev->queue);
- spin_unlock_irqrestore(&conf->device_lock, flags);
-
/* In case raid10d snuck in to freeze_array */
wake_up(&conf->wait_barrier);
@@ -1558,7 +1557,7 @@ static void fix_read_error(conf_t *conf, mddev_t *mddev, r10bio_t *r10_bio)
test_bit(In_sync, &rdev->flags)) {
atomic_inc(&rdev->nr_pending);
rcu_read_unlock();
- success = sync_page_io(rdev->bdev,
+ success = sync_page_io(rdev,
r10_bio->devs[sl].addr +
sect + rdev->data_offset,
s<<9,
@@ -1597,7 +1596,7 @@ static void fix_read_error(conf_t *conf, mddev_t *mddev, r10bio_t *r10_bio)
atomic_inc(&rdev->nr_pending);
rcu_read_unlock();
atomic_add(s, &rdev->corrected_errors);
- if (sync_page_io(rdev->bdev,
+ if (sync_page_io(rdev,
r10_bio->devs[sl].addr +
sect + rdev->data_offset,
s<<9, conf->tmppage, WRITE)
@@ -1634,7 +1633,7 @@ static void fix_read_error(conf_t *conf, mddev_t *mddev, r10bio_t *r10_bio)
char b[BDEVNAME_SIZE];
atomic_inc(&rdev->nr_pending);
rcu_read_unlock();
- if (sync_page_io(rdev->bdev,
+ if (sync_page_io(rdev,
r10_bio->devs[sl].addr +
sect + rdev->data_offset,
s<<9, conf->tmppage,
@@ -1747,7 +1746,8 @@ static void raid10d(mddev_t *mddev)
mdname(mddev),
bdevname(rdev->bdev,b),
(unsigned long long)r10_bio->sector);
- bio = bio_clone(r10_bio->master_bio, GFP_NOIO);
+ bio = bio_clone_mddev(r10_bio->master_bio,
+ GFP_NOIO, mddev);
r10_bio->devs[r10_bio->read_slot].bio = bio;
bio->bi_sector = r10_bio->devs[r10_bio->read_slot].addr
+ rdev->data_offset;
@@ -1820,7 +1820,7 @@ static sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *skipped, i
int disk;
int i;
int max_sync;
- int sync_blocks;
+ sector_t sync_blocks;
sector_t sectors_skipped = 0;
int chunks_skipped = 0;
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 31140d1259dc..dc574f303f8b 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -3876,9 +3876,9 @@ static int chunk_aligned_read(mddev_t *mddev, struct bio * raid_bio)
return 0;
}
/*
- * use bio_clone to make a copy of the bio
+ * use bio_clone_mddev to make a copy of the bio
*/
- align_bi = bio_clone(raid_bio, GFP_NOIO);
+ align_bi = bio_clone_mddev(raid_bio, GFP_NOIO, mddev);
if (!align_bi)
return 0;
/*
@@ -4360,7 +4360,7 @@ static inline sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *ski
raid5_conf_t *conf = mddev->private;
struct stripe_head *sh;
sector_t max_sector = mddev->dev_sectors;
- int sync_blocks;
+ sector_t sync_blocks;
int still_degraded = 0;
int i;
diff --git a/drivers/media/IR/Kconfig b/drivers/media/IR/Kconfig
index 490c57cc4cfe..aa4163eb7a83 100644
--- a/drivers/media/IR/Kconfig
+++ b/drivers/media/IR/Kconfig
@@ -79,6 +79,18 @@ config IR_SONY_DECODER
Enable this option if you have an infrared remote control which
uses the Sony protocol, and you need software decoding support.
+config IR_RC5_SZ_DECODER
+ tristate "Enable IR raw decoder for the RC-5 (streamzap) protocol"
+ depends on IR_CORE
+ select BITREVERSE
+ default y
+
+ ---help---
+ Enable this option if you have IR with RC-5 (streamzap) protocol,
+ and if the IR is decoded in software. (The Streamzap PC Remote
+ uses an IR protocol that is almost standard RC-5, but not quite,
+ as it uses an additional bit).
+
config IR_LIRC_CODEC
tristate "Enable IR to LIRC bridge"
depends on IR_CORE
@@ -89,6 +101,20 @@ config IR_LIRC_CODEC
Enable this option to pass raw IR to and from userspace via
the LIRC interface.
+config IR_ENE
+ tristate "ENE eHome Receiver/Transceiver (pnp id: ENE0100/ENE02xxx)"
+ depends on PNP
+ depends on IR_CORE
+ ---help---
+ Say Y here to enable support for integrated infrared receiver
+ /transceiver made by ENE.
+
+ You can see if you have it by looking at lspnp output.
+ Output should include ENE0100 ENE0200 or something similar.
+
+ To compile this driver as a module, choose M here: the
+ module will be called ene_ir.
+
config IR_IMON
tristate "SoundGraph iMON Receiver and Display"
depends on USB_ARCH_HAS_HCD
@@ -113,19 +139,18 @@ config IR_MCEUSB
To compile this driver as a module, choose M here: the
module will be called mceusb.
-config IR_ENE
- tristate "ENE eHome Receiver/Transciever (pnp id: ENE0100/ENE02xxx)"
+config IR_NUVOTON
+ tristate "Nuvoton w836x7hg Consumer Infrared Transceiver"
depends on PNP
depends on IR_CORE
---help---
Say Y here to enable support for integrated infrared receiver
- /transciever made by ENE.
-
- You can see if you have it by looking at lspnp output.
- Output should include ENE0100 ENE0200 or something similiar.
+ /transciever made by Nuvoton (formerly Winbond). This chip is
+ found in the ASRock ION 330HT, as well as assorted Intel
+ DP55-series motherboards (and of course, possibly others).
To compile this driver as a module, choose M here: the
- module will be called ene_ir.
+ module will be called nuvoton-cir.
config IR_STREAMZAP
tristate "Streamzap PC Remote IR Receiver"
diff --git a/drivers/media/IR/Makefile b/drivers/media/IR/Makefile
index 53676838fe97..f9574adab82a 100644
--- a/drivers/media/IR/Makefile
+++ b/drivers/media/IR/Makefile
@@ -11,10 +11,12 @@ obj-$(CONFIG_IR_RC5_DECODER) += ir-rc5-decoder.o
obj-$(CONFIG_IR_RC6_DECODER) += ir-rc6-decoder.o
obj-$(CONFIG_IR_JVC_DECODER) += ir-jvc-decoder.o
obj-$(CONFIG_IR_SONY_DECODER) += ir-sony-decoder.o
+obj-$(CONFIG_IR_RC5_SZ_DECODER) += ir-rc5-sz-decoder.o
obj-$(CONFIG_IR_LIRC_CODEC) += ir-lirc-codec.o
# stand-alone IR receivers/transmitters
obj-$(CONFIG_IR_IMON) += imon.o
obj-$(CONFIG_IR_MCEUSB) += mceusb.o
+obj-$(CONFIG_IR_NUVOTON) += nuvoton-cir.o
obj-$(CONFIG_IR_ENE) += ene_ir.o
obj-$(CONFIG_IR_STREAMZAP) += streamzap.o
diff --git a/drivers/media/IR/ene_ir.c b/drivers/media/IR/ene_ir.c
index 5447750f5e38..7637babcd262 100644
--- a/drivers/media/IR/ene_ir.c
+++ b/drivers/media/IR/ene_ir.c
@@ -1,5 +1,5 @@
/*
- * driver for ENE KB3926 B/C/D CIR (pnp id: ENE0XXX)
+ * driver for ENE KB3926 B/C/D/E/F CIR (pnp id: ENE0XXX)
*
* Copyright (C) 2010 Maxim Levitsky <maximlevitsky@gmail.com>
*
@@ -17,6 +17,17 @@
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
* USA
+ *
+ * Special thanks to:
+ * Sami R. <maesesami@gmail.com> for lot of help in debugging and therefore
+ * bringing to life support for transmission & learning mode.
+ *
+ * Charlie Andrews <charliethepilot@googlemail.com> for lots of help in
+ * bringing up the support of new firmware buffer that is popular
+ * on latest notebooks
+ *
+ * ENE for partial device documentation
+ *
*/
#include <linux/kernel.h>
@@ -31,51 +42,59 @@
#include <media/ir-common.h>
#include "ene_ir.h"
-
-static int sample_period = -1;
-static int enable_idle = 1;
-static int input = 1;
+static int sample_period;
+static bool learning_mode_force;
static int debug;
-static int txsim;
+static bool txsim;
-static int ene_irq_status(struct ene_device *dev);
+static void ene_set_reg_addr(struct ene_device *dev, u16 reg)
+{
+ outb(reg >> 8, dev->hw_io + ENE_ADDR_HI);
+ outb(reg & 0xFF, dev->hw_io + ENE_ADDR_LO);
+}
/* read a hardware register */
-static u8 ene_hw_read_reg(struct ene_device *dev, u16 reg)
+static u8 ene_read_reg(struct ene_device *dev, u16 reg)
{
u8 retval;
- outb(reg >> 8, dev->hw_io + ENE_ADDR_HI);
- outb(reg & 0xFF, dev->hw_io + ENE_ADDR_LO);
+ ene_set_reg_addr(dev, reg);
retval = inb(dev->hw_io + ENE_IO);
-
- ene_dbg_verbose("reg %04x == %02x", reg, retval);
+ dbg_regs("reg %04x == %02x", reg, retval);
return retval;
}
/* write a hardware register */
-static void ene_hw_write_reg(struct ene_device *dev, u16 reg, u8 value)
+static void ene_write_reg(struct ene_device *dev, u16 reg, u8 value)
{
- outb(reg >> 8, dev->hw_io + ENE_ADDR_HI);
- outb(reg & 0xFF, dev->hw_io + ENE_ADDR_LO);
+ dbg_regs("reg %04x <- %02x", reg, value);
+ ene_set_reg_addr(dev, reg);
outb(value, dev->hw_io + ENE_IO);
-
- ene_dbg_verbose("reg %04x <- %02x", reg, value);
}
-/* change specific bits in hardware register */
-static void ene_hw_write_reg_mask(struct ene_device *dev,
- u16 reg, u8 value, u8 mask)
+/* Set bits in hardware register */
+static void ene_set_reg_mask(struct ene_device *dev, u16 reg, u8 mask)
{
- u8 regvalue;
-
- outb(reg >> 8, dev->hw_io + ENE_ADDR_HI);
- outb(reg & 0xFF, dev->hw_io + ENE_ADDR_LO);
+ dbg_regs("reg %04x |= %02x", reg, mask);
+ ene_set_reg_addr(dev, reg);
+ outb(inb(dev->hw_io + ENE_IO) | mask, dev->hw_io + ENE_IO);
+}
- regvalue = inb(dev->hw_io + ENE_IO) & ~mask;
- regvalue |= (value & mask);
- outb(regvalue, dev->hw_io + ENE_IO);
+/* Clear bits in hardware register */
+static void ene_clear_reg_mask(struct ene_device *dev, u16 reg, u8 mask)
+{
+ dbg_regs("reg %04x &= ~%02x ", reg, mask);
+ ene_set_reg_addr(dev, reg);
+ outb(inb(dev->hw_io + ENE_IO) & ~mask, dev->hw_io + ENE_IO);
+}
- ene_dbg_verbose("reg %04x <- %02x (mask=%02x)", reg, value, mask);
+/* A helper to set/clear a bit in register according to boolean variable */
+static void ene_set_clear_reg_mask(struct ene_device *dev, u16 reg, u8 mask,
+ bool set)
+{
+ if (set)
+ ene_set_reg_mask(dev, reg, mask);
+ else
+ ene_clear_reg_mask(dev, reg, mask);
}
/* detect hardware features */
@@ -83,194 +102,378 @@ static int ene_hw_detect(struct ene_device *dev)
{
u8 chip_major, chip_minor;
u8 hw_revision, old_ver;
- u8 tmp;
- u8 fw_capabilities;
- int pll_freq;
+ u8 fw_reg2, fw_reg1;
- tmp = ene_hw_read_reg(dev, ENE_HW_UNK);
- ene_hw_write_reg(dev, ENE_HW_UNK, tmp & ~ENE_HW_UNK_CLR);
+ ene_clear_reg_mask(dev, ENE_ECSTS, ENE_ECSTS_RSRVD);
+ chip_major = ene_read_reg(dev, ENE_ECVER_MAJOR);
+ chip_minor = ene_read_reg(dev, ENE_ECVER_MINOR);
+ ene_set_reg_mask(dev, ENE_ECSTS, ENE_ECSTS_RSRVD);
- chip_major = ene_hw_read_reg(dev, ENE_HW_VER_MAJOR);
- chip_minor = ene_hw_read_reg(dev, ENE_HW_VER_MINOR);
+ hw_revision = ene_read_reg(dev, ENE_ECHV);
+ old_ver = ene_read_reg(dev, ENE_HW_VER_OLD);
- ene_hw_write_reg(dev, ENE_HW_UNK, tmp);
- hw_revision = ene_hw_read_reg(dev, ENE_HW_VERSION);
- old_ver = ene_hw_read_reg(dev, ENE_HW_VER_OLD);
+ dev->pll_freq = (ene_read_reg(dev, ENE_PLLFRH) << 4) +
+ (ene_read_reg(dev, ENE_PLLFRL) >> 4);
- pll_freq = (ene_hw_read_reg(dev, ENE_PLLFRH) << 4) +
- (ene_hw_read_reg(dev, ENE_PLLFRL) >> 4);
-
- if (pll_freq != 1000)
- dev->rx_period_adjust = 4;
- else
- dev->rx_period_adjust = 2;
-
-
- ene_printk(KERN_NOTICE, "PLL freq = %d\n", pll_freq);
+ if (sample_period != ENE_DEFAULT_SAMPLE_PERIOD)
+ dev->rx_period_adjust =
+ dev->pll_freq == ENE_DEFAULT_PLL_FREQ ? 2 : 4;
if (hw_revision == 0xFF) {
-
- ene_printk(KERN_WARNING, "device seems to be disabled\n");
- ene_printk(KERN_WARNING,
- "send a mail to lirc-list@lists.sourceforge.net\n");
- ene_printk(KERN_WARNING, "please attach output of acpidump\n");
+ ene_warn("device seems to be disabled");
+ ene_warn("send a mail to lirc-list@lists.sourceforge.net");
+ ene_warn("please attach output of acpidump and dmidecode");
return -ENODEV;
}
+ ene_notice("chip is 0x%02x%02x - kbver = 0x%02x, rev = 0x%02x",
+ chip_major, chip_minor, old_ver, hw_revision);
+
+ ene_notice("PLL freq = %d", dev->pll_freq);
+
if (chip_major == 0x33) {
- ene_printk(KERN_WARNING, "chips 0x33xx aren't supported\n");
+ ene_warn("chips 0x33xx aren't supported");
return -ENODEV;
}
if (chip_major == 0x39 && chip_minor == 0x26 && hw_revision == 0xC0) {
dev->hw_revision = ENE_HW_C;
+ ene_notice("KB3926C detected");
} else if (old_ver == 0x24 && hw_revision == 0xC0) {
dev->hw_revision = ENE_HW_B;
- ene_printk(KERN_NOTICE, "KB3926B detected\n");
+ ene_notice("KB3926B detected");
} else {
dev->hw_revision = ENE_HW_D;
- ene_printk(KERN_WARNING,
- "unknown ENE chip detected, assuming KB3926D\n");
- ene_printk(KERN_WARNING,
- "driver support might be not complete");
-
+ ene_notice("KB3926D or higher detected");
}
- ene_printk(KERN_DEBUG,
- "chip is 0x%02x%02x - kbver = 0x%02x, rev = 0x%02x\n",
- chip_major, chip_minor, old_ver, hw_revision);
-
/* detect features hardware supports */
if (dev->hw_revision < ENE_HW_C)
return 0;
- fw_capabilities = ene_hw_read_reg(dev, ENE_FW2);
- ene_dbg("Firmware capabilities: %02x", fw_capabilities);
+ fw_reg1 = ene_read_reg(dev, ENE_FW1);
+ fw_reg2 = ene_read_reg(dev, ENE_FW2);
+
+ ene_notice("Firmware regs: %02x %02x", fw_reg1, fw_reg2);
- dev->hw_gpio40_learning = fw_capabilities & ENE_FW2_GP40_AS_LEARN;
- dev->hw_learning_and_tx_capable = fw_capabilities & ENE_FW2_LEARNING;
+ dev->hw_use_gpio_0a = !!(fw_reg2 & ENE_FW2_GP0A);
+ dev->hw_learning_and_tx_capable = !!(fw_reg2 & ENE_FW2_LEARNING);
+ dev->hw_extra_buffer = !!(fw_reg1 & ENE_FW1_HAS_EXTRA_BUF);
- dev->hw_fan_as_normal_input = dev->hw_learning_and_tx_capable &&
- (fw_capabilities & ENE_FW2_FAN_AS_NRML_IN);
+ if (dev->hw_learning_and_tx_capable)
+ dev->hw_fan_input = !!(fw_reg2 & ENE_FW2_FAN_INPUT);
- ene_printk(KERN_NOTICE, "hardware features:\n");
- ene_printk(KERN_NOTICE,
- "learning and transmit %s, gpio40_learn %s, fan_in %s\n",
- dev->hw_learning_and_tx_capable ? "on" : "off",
- dev->hw_gpio40_learning ? "on" : "off",
- dev->hw_fan_as_normal_input ? "on" : "off");
+ ene_notice("Hardware features:");
if (dev->hw_learning_and_tx_capable) {
- ene_printk(KERN_WARNING,
- "Device supports transmitting, but that support is\n");
- ene_printk(KERN_WARNING,
- "lightly tested. Please test it and mail\n");
- ene_printk(KERN_WARNING,
- "lirc-list@lists.sourceforge.net\n");
+ ene_notice("* Supports transmitting & learning mode");
+ ene_notice(" This feature is rare and therefore,");
+ ene_notice(" you are welcome to test it,");
+ ene_notice(" and/or contact the author via:");
+ ene_notice(" lirc-list@lists.sourceforge.net");
+ ene_notice(" or maximlevitsky@gmail.com");
+
+ ene_notice("* Uses GPIO %s for IR raw input",
+ dev->hw_use_gpio_0a ? "40" : "0A");
+
+ if (dev->hw_fan_input)
+ ene_notice("* Uses unused fan feedback input as source"
+ " of demodulated IR data");
}
+
+ if (!dev->hw_fan_input)
+ ene_notice("* Uses GPIO %s for IR demodulated input",
+ dev->hw_use_gpio_0a ? "0A" : "40");
+
+ if (dev->hw_extra_buffer)
+ ene_notice("* Uses new style input buffer");
return 0;
}
-/* this enables/disables IR input via gpio40*/
-static void ene_enable_gpio40_receive(struct ene_device *dev, int enable)
+/* Read properities of hw sample buffer */
+static void ene_rx_setup_hw_buffer(struct ene_device *dev)
{
- ene_hw_write_reg_mask(dev, ENE_CIR_CONF2, enable ?
- 0 : ENE_CIR_CONF2_GPIO40DIS,
- ENE_CIR_CONF2_GPIO40DIS);
+ u16 tmp;
+
+ ene_rx_read_hw_pointer(dev);
+ dev->r_pointer = dev->w_pointer;
+
+ if (!dev->hw_extra_buffer) {
+ dev->buffer_len = ENE_FW_PACKET_SIZE * 2;
+ return;
+ }
+
+ tmp = ene_read_reg(dev, ENE_FW_SAMPLE_BUFFER);
+ tmp |= ene_read_reg(dev, ENE_FW_SAMPLE_BUFFER+1) << 8;
+ dev->extra_buf1_address = tmp;
+
+ dev->extra_buf1_len = ene_read_reg(dev, ENE_FW_SAMPLE_BUFFER + 2);
+
+ tmp = ene_read_reg(dev, ENE_FW_SAMPLE_BUFFER + 3);
+ tmp |= ene_read_reg(dev, ENE_FW_SAMPLE_BUFFER + 4) << 8;
+ dev->extra_buf2_address = tmp;
+
+ dev->extra_buf2_len = ene_read_reg(dev, ENE_FW_SAMPLE_BUFFER + 5);
+
+ dev->buffer_len = dev->extra_buf1_len + dev->extra_buf2_len + 8;
+
+ ene_notice("Hardware uses 2 extended buffers:");
+ ene_notice(" 0x%04x - len : %d", dev->extra_buf1_address,
+ dev->extra_buf1_len);
+ ene_notice(" 0x%04x - len : %d", dev->extra_buf2_address,
+ dev->extra_buf2_len);
+
+ ene_notice("Total buffer len = %d", dev->buffer_len);
+
+ if (dev->buffer_len > 64 || dev->buffer_len < 16)
+ goto error;
+
+ if (dev->extra_buf1_address > 0xFBFC ||
+ dev->extra_buf1_address < 0xEC00)
+ goto error;
+
+ if (dev->extra_buf2_address > 0xFBFC ||
+ dev->extra_buf2_address < 0xEC00)
+ goto error;
+
+ if (dev->r_pointer > dev->buffer_len)
+ goto error;
+
+ ene_set_reg_mask(dev, ENE_FW1, ENE_FW1_EXTRA_BUF_HND);
+ return;
+error:
+ ene_warn("Error validating extra buffers, device probably won't work");
+ dev->hw_extra_buffer = false;
+ ene_clear_reg_mask(dev, ENE_FW1, ENE_FW1_EXTRA_BUF_HND);
}
-/* this enables/disables IR via standard input */
-static void ene_enable_normal_receive(struct ene_device *dev, int enable)
+
+/* Restore the pointers to extra buffers - to make module reload work*/
+static void ene_rx_restore_hw_buffer(struct ene_device *dev)
{
- ene_hw_write_reg(dev, ENE_CIR_CONF1, enable ? ENE_CIR_CONF1_RX_ON : 0);
+ if (!dev->hw_extra_buffer)
+ return;
+
+ ene_write_reg(dev, ENE_FW_SAMPLE_BUFFER + 0,
+ dev->extra_buf1_address & 0xFF);
+ ene_write_reg(dev, ENE_FW_SAMPLE_BUFFER + 1,
+ dev->extra_buf1_address >> 8);
+ ene_write_reg(dev, ENE_FW_SAMPLE_BUFFER + 2, dev->extra_buf1_len);
+
+ ene_write_reg(dev, ENE_FW_SAMPLE_BUFFER + 3,
+ dev->extra_buf2_address & 0xFF);
+ ene_write_reg(dev, ENE_FW_SAMPLE_BUFFER + 4,
+ dev->extra_buf2_address >> 8);
+ ene_write_reg(dev, ENE_FW_SAMPLE_BUFFER + 5,
+ dev->extra_buf2_len);
+ ene_clear_reg_mask(dev, ENE_FW1, ENE_FW1_EXTRA_BUF_HND);
}
-/* this enables/disables IR input via unused fan tachtometer input */
-static void ene_enable_fan_receive(struct ene_device *dev, int enable)
+/* Read hardware write pointer */
+static void ene_rx_read_hw_pointer(struct ene_device *dev)
{
- if (!enable)
- ene_hw_write_reg(dev, ENE_FAN_AS_IN1, 0);
- else {
- ene_hw_write_reg(dev, ENE_FAN_AS_IN1, ENE_FAN_AS_IN1_EN);
- ene_hw_write_reg(dev, ENE_FAN_AS_IN2, ENE_FAN_AS_IN2_EN);
- }
- dev->rx_fan_input_inuse = enable;
+ if (dev->hw_extra_buffer)
+ dev->w_pointer = ene_read_reg(dev, ENE_FW_RX_POINTER);
+ else
+ dev->w_pointer = ene_read_reg(dev, ENE_FW2)
+ & ENE_FW2_BUF_WPTR ? 0 : ENE_FW_PACKET_SIZE;
+
+ dbg_verbose("RB: HW write pointer: %02x, driver read pointer: %02x",
+ dev->w_pointer, dev->r_pointer);
}
+/* Gets address of next sample from HW ring buffer */
+static int ene_rx_get_sample_reg(struct ene_device *dev)
+{
+ int r_pointer;
+
+ if (dev->r_pointer == dev->w_pointer) {
+ dbg_verbose("RB: hit end, try update w_pointer");
+ ene_rx_read_hw_pointer(dev);
+ }
+
+ if (dev->r_pointer == dev->w_pointer) {
+ dbg_verbose("RB: end of data at %d", dev->r_pointer);
+ return 0;
+ }
+
+ dbg_verbose("RB: reading at offset %d", dev->r_pointer);
+ r_pointer = dev->r_pointer;
+
+ dev->r_pointer++;
+ if (dev->r_pointer == dev->buffer_len)
+ dev->r_pointer = 0;
+
+ dbg_verbose("RB: next read will be from offset %d", dev->r_pointer);
+
+ if (r_pointer < 8) {
+ dbg_verbose("RB: read at main buffer at %d", r_pointer);
+ return ENE_FW_SAMPLE_BUFFER + r_pointer;
+ }
+
+ r_pointer -= 8;
+
+ if (r_pointer < dev->extra_buf1_len) {
+ dbg_verbose("RB: read at 1st extra buffer at %d", r_pointer);
+ return dev->extra_buf1_address + r_pointer;
+ }
+
+ r_pointer -= dev->extra_buf1_len;
+
+ if (r_pointer < dev->extra_buf2_len) {
+ dbg_verbose("RB: read at 2nd extra buffer at %d", r_pointer);
+ return dev->extra_buf2_address + r_pointer;
+ }
+
+ dbg("attempt to read beyong ring bufer end");
+ return 0;
+}
/* Sense current received carrier */
-static int ene_rx_sense_carrier(struct ene_device *dev)
+void ene_rx_sense_carrier(struct ene_device *dev)
{
- int period = ene_hw_read_reg(dev, ENE_RX_CARRIER);
- int carrier;
- ene_dbg("RX: hardware carrier period = %02x", period);
+ DEFINE_IR_RAW_EVENT(ev);
- if (!(period & ENE_RX_CARRIER_VALID))
- return 0;
+ int carrier, duty_cycle;
+ int period = ene_read_reg(dev, ENE_CIRCAR_PRD);
+ int hperiod = ene_read_reg(dev, ENE_CIRCAR_HPRD);
+
+ if (!(period & ENE_CIRCAR_PRD_VALID))
+ return;
- period &= ~ENE_RX_CARRIER_VALID;
+ period &= ~ENE_CIRCAR_PRD_VALID;
if (!period)
- return 0;
+ return;
+
+ dbg("RX: hardware carrier period = %02x", period);
+ dbg("RX: hardware carrier pulse period = %02x", hperiod);
carrier = 2000000 / period;
- ene_dbg("RX: sensed carrier = %d Hz", carrier);
- return carrier;
+ duty_cycle = (hperiod * 100) / period;
+ dbg("RX: sensed carrier = %d Hz, duty cycle %d%%",
+ carrier, duty_cycle);
+ if (dev->carrier_detect_enabled) {
+ ev.carrier_report = true;
+ ev.carrier = carrier;
+ ev.duty_cycle = duty_cycle;
+ ir_raw_event_store(dev->idev, &ev);
+ }
}
-/* determine which input to use*/
-static void ene_rx_set_inputs(struct ene_device *dev)
+/* this enables/disables the CIR RX engine */
+static void ene_rx_enable_cir_engine(struct ene_device *dev, bool enable)
{
- int learning_mode = dev->learning_enabled;
-
- ene_dbg("RX: setup receiver, learning mode = %d", learning_mode);
+ ene_set_clear_reg_mask(dev, ENE_CIRCFG,
+ ENE_CIRCFG_RX_EN | ENE_CIRCFG_RX_IRQ, enable);
+}
- ene_enable_normal_receive(dev, 1);
+/* this selects input for CIR engine. Ether GPIO 0A or GPIO40*/
+static void ene_rx_select_input(struct ene_device *dev, bool gpio_0a)
+{
+ ene_set_clear_reg_mask(dev, ENE_CIRCFG2, ENE_CIRCFG2_GPIO0A, gpio_0a);
+}
- /* old hardware doesn't support learning mode for sure */
- if (dev->hw_revision <= ENE_HW_B)
+/*
+ * this enables alternative input via fan tachometer sensor and bypasses
+ * the hw CIR engine
+ */
+static void ene_rx_enable_fan_input(struct ene_device *dev, bool enable)
+{
+ if (!dev->hw_fan_input)
return;
- /* receiver not learning capable, still set gpio40 correctly */
- if (!dev->hw_learning_and_tx_capable) {
- ene_enable_gpio40_receive(dev, !dev->hw_gpio40_learning);
- return;
+ if (!enable)
+ ene_write_reg(dev, ENE_FAN_AS_IN1, 0);
+ else {
+ ene_write_reg(dev, ENE_FAN_AS_IN1, ENE_FAN_AS_IN1_EN);
+ ene_write_reg(dev, ENE_FAN_AS_IN2, ENE_FAN_AS_IN2_EN);
}
+}
+
+/* setup the receiver for RX*/
+static void ene_rx_setup(struct ene_device *dev)
+{
+ bool learning_mode = dev->learning_mode_enabled ||
+ dev->carrier_detect_enabled;
+ int sample_period_adjust = 0;
+
+ dbg("RX: setup receiver, learning mode = %d", learning_mode);
+
+
+ /* This selects RLC input and clears CFG2 settings */
+ ene_write_reg(dev, ENE_CIRCFG2, 0x00);
+
+ /* set sample period*/
+ if (sample_period == ENE_DEFAULT_SAMPLE_PERIOD)
+ sample_period_adjust =
+ dev->pll_freq == ENE_DEFAULT_PLL_FREQ ? 1 : 2;
+
+ ene_write_reg(dev, ENE_CIRRLC_CFG,
+ (sample_period + sample_period_adjust) |
+ ENE_CIRRLC_CFG_OVERFLOW);
+ /* revB doesn't support inputs */
+ if (dev->hw_revision < ENE_HW_C)
+ goto select_timeout;
- /* enable learning mode */
if (learning_mode) {
- ene_enable_gpio40_receive(dev, dev->hw_gpio40_learning);
- /* fan input is not used for learning */
- if (dev->hw_fan_as_normal_input)
- ene_enable_fan_receive(dev, 0);
+ WARN_ON(!dev->hw_learning_and_tx_capable);
- /* disable learning mode */
- } else {
- if (dev->hw_fan_as_normal_input) {
- ene_enable_fan_receive(dev, 1);
- ene_enable_normal_receive(dev, 0);
- } else
- ene_enable_gpio40_receive(dev,
- !dev->hw_gpio40_learning);
- }
+ /* Enable the opposite of the normal input
+ That means that if GPIO40 is normally used, use GPIO0A
+ and vice versa.
+ This input will carry non demodulated
+ signal, and we will tell the hw to demodulate it itself */
+ ene_rx_select_input(dev, !dev->hw_use_gpio_0a);
+ dev->rx_fan_input_inuse = false;
- /* set few additional settings for this mode */
- ene_hw_write_reg_mask(dev, ENE_CIR_CONF1, learning_mode ?
- ENE_CIR_CONF1_LEARN1 : 0, ENE_CIR_CONF1_LEARN1);
+ /* Enable carrier demodulation */
+ ene_set_reg_mask(dev, ENE_CIRCFG, ENE_CIRCFG_CARR_DEMOD);
- ene_hw_write_reg_mask(dev, ENE_CIR_CONF2, learning_mode ?
- ENE_CIR_CONF2_LEARN2 : 0, ENE_CIR_CONF2_LEARN2);
+ /* Enable carrier detection */
+ ene_write_reg(dev, ENE_CIRCAR_PULS, 0x63);
+ ene_set_clear_reg_mask(dev, ENE_CIRCFG2, ENE_CIRCFG2_CARR_DETECT,
+ dev->carrier_detect_enabled || debug);
+ } else {
+ if (dev->hw_fan_input)
+ dev->rx_fan_input_inuse = true;
+ else
+ ene_rx_select_input(dev, dev->hw_use_gpio_0a);
+
+ /* Disable carrier detection & demodulation */
+ ene_clear_reg_mask(dev, ENE_CIRCFG, ENE_CIRCFG_CARR_DEMOD);
+ ene_clear_reg_mask(dev, ENE_CIRCFG2, ENE_CIRCFG2_CARR_DETECT);
+ }
+select_timeout:
if (dev->rx_fan_input_inuse) {
- dev->props->rx_resolution = ENE_SAMPLE_PERIOD_FAN * 1000;
+ dev->props->rx_resolution = MS_TO_NS(ENE_FW_SAMPLE_PERIOD_FAN);
- dev->props->timeout =
- ENE_FAN_VALUE_MASK * ENE_SAMPLE_PERIOD_FAN * 1000;
+ /* Fan input doesn't support timeouts, it just ends the
+ input with a maximum sample */
+ dev->props->min_timeout = dev->props->max_timeout =
+ MS_TO_NS(ENE_FW_SMPL_BUF_FAN_MSK *
+ ENE_FW_SAMPLE_PERIOD_FAN);
} else {
- dev->props->rx_resolution = sample_period * 1000;
- dev->props->timeout = ENE_MAXGAP * 1000;
+ dev->props->rx_resolution = MS_TO_NS(sample_period);
+
+ /* Theoreticly timeout is unlimited, but we cap it
+ * because it was seen that on one device, it
+ * would stop sending spaces after around 250 msec.
+ * Besides, this is close to 2^32 anyway and timeout is u32.
+ */
+ dev->props->min_timeout = MS_TO_NS(127 * sample_period);
+ dev->props->max_timeout = MS_TO_NS(200000);
}
+
+ if (dev->hw_learning_and_tx_capable)
+ dev->props->tx_resolution = MS_TO_NS(sample_period);
+
+ if (dev->props->timeout > dev->props->max_timeout)
+ dev->props->timeout = dev->props->max_timeout;
+ if (dev->props->timeout < dev->props->min_timeout)
+ dev->props->timeout = dev->props->min_timeout;
}
/* Enable the device for receive */
@@ -278,145 +481,157 @@ static void ene_rx_enable(struct ene_device *dev)
{
u8 reg_value;
+ /* Enable system interrupt */
if (dev->hw_revision < ENE_HW_C) {
- ene_hw_write_reg(dev, ENEB_IRQ, dev->irq << 1);
- ene_hw_write_reg(dev, ENEB_IRQ_UNK1, 0x01);
+ ene_write_reg(dev, ENEB_IRQ, dev->irq << 1);
+ ene_write_reg(dev, ENEB_IRQ_UNK1, 0x01);
} else {
- reg_value = ene_hw_read_reg(dev, ENEC_IRQ) & 0xF0;
- reg_value |= ENEC_IRQ_UNK_EN;
- reg_value &= ~ENEC_IRQ_STATUS;
- reg_value |= (dev->irq & ENEC_IRQ_MASK);
- ene_hw_write_reg(dev, ENEC_IRQ, reg_value);
- ene_hw_write_reg(dev, ENE_TX_UNK1, 0x63);
+ reg_value = ene_read_reg(dev, ENE_IRQ) & 0xF0;
+ reg_value |= ENE_IRQ_UNK_EN;
+ reg_value &= ~ENE_IRQ_STATUS;
+ reg_value |= (dev->irq & ENE_IRQ_MASK);
+ ene_write_reg(dev, ENE_IRQ, reg_value);
}
- ene_hw_write_reg(dev, ENE_CIR_CONF2, 0x00);
- ene_rx_set_inputs(dev);
-
- /* set sampling period */
- ene_hw_write_reg(dev, ENE_CIR_SAMPLE_PERIOD, sample_period);
+ /* Enable inputs */
+ ene_rx_enable_fan_input(dev, dev->rx_fan_input_inuse);
+ ene_rx_enable_cir_engine(dev, !dev->rx_fan_input_inuse);
/* ack any pending irqs - just in case */
ene_irq_status(dev);
/* enable firmware bits */
- ene_hw_write_reg_mask(dev, ENE_FW1,
- ENE_FW1_ENABLE | ENE_FW1_IRQ,
- ENE_FW1_ENABLE | ENE_FW1_IRQ);
+ ene_set_reg_mask(dev, ENE_FW1, ENE_FW1_ENABLE | ENE_FW1_IRQ);
/* enter idle mode */
- ir_raw_event_set_idle(dev->idev, 1);
- ir_raw_event_reset(dev->idev);
-
+ ir_raw_event_set_idle(dev->idev, true);
+ dev->rx_enabled = true;
}
/* Disable the device receiver */
static void ene_rx_disable(struct ene_device *dev)
{
/* disable inputs */
- ene_enable_normal_receive(dev, 0);
-
- if (dev->hw_fan_as_normal_input)
- ene_enable_fan_receive(dev, 0);
+ ene_rx_enable_cir_engine(dev, false);
+ ene_rx_enable_fan_input(dev, false);
/* disable hardware IRQ and firmware flag */
- ene_hw_write_reg_mask(dev, ENE_FW1, 0, ENE_FW1_ENABLE | ENE_FW1_IRQ);
+ ene_clear_reg_mask(dev, ENE_FW1, ENE_FW1_ENABLE | ENE_FW1_IRQ);
- ir_raw_event_set_idle(dev->idev, 1);
- ir_raw_event_reset(dev->idev);
+ ir_raw_event_set_idle(dev->idev, true);
+ dev->rx_enabled = false;
}
+/* This resets the receiver. Usefull to stop stream of spaces at end of
+ * transmission
+ */
+static void ene_rx_reset(struct ene_device *dev)
+{
+ ene_clear_reg_mask(dev, ENE_CIRCFG, ENE_CIRCFG_RX_EN);
+ ene_set_reg_mask(dev, ENE_CIRCFG, ENE_CIRCFG_RX_EN);
+}
-/* prepare transmission */
-static void ene_tx_prepare(struct ene_device *dev)
+/* Set up the TX carrier frequency and duty cycle */
+static void ene_tx_set_carrier(struct ene_device *dev)
{
- u8 conf1;
+ u8 tx_puls_width;
+ unsigned long flags;
- conf1 = ene_hw_read_reg(dev, ENE_CIR_CONF1);
- dev->saved_conf1 = conf1;
+ spin_lock_irqsave(&dev->hw_lock, flags);
- if (dev->hw_revision == ENE_HW_C)
- conf1 &= ~ENE_CIR_CONF1_TX_CLEAR;
+ ene_set_clear_reg_mask(dev, ENE_CIRCFG,
+ ENE_CIRCFG_TX_CARR, dev->tx_period > 0);
- /* Enable TX engine */
- conf1 |= ENE_CIR_CONF1_TX_ON;
+ if (!dev->tx_period)
+ goto unlock;
- /* Set carrier */
- if (dev->tx_period) {
+ BUG_ON(dev->tx_duty_cycle >= 100 || dev->tx_duty_cycle <= 0);
- /* NOTE: duty cycle handling is just a guess, it might
- not be aviable. Default values were tested */
- int tx_period_in500ns = dev->tx_period * 2;
+ tx_puls_width = dev->tx_period / (100 / dev->tx_duty_cycle);
- int tx_pulse_width_in_500ns =
- tx_period_in500ns / (100 / dev->tx_duty_cycle);
+ if (!tx_puls_width)
+ tx_puls_width = 1;
- if (!tx_pulse_width_in_500ns)
- tx_pulse_width_in_500ns = 1;
+ dbg("TX: pulse distance = %d * 500 ns", dev->tx_period);
+ dbg("TX: pulse width = %d * 500 ns", tx_puls_width);
- ene_dbg("TX: pulse distance = %d * 500 ns", tx_period_in500ns);
- ene_dbg("TX: pulse width = %d * 500 ns",
- tx_pulse_width_in_500ns);
+ ene_write_reg(dev, ENE_CIRMOD_PRD, dev->tx_period | ENE_CIRMOD_PRD_POL);
+ ene_write_reg(dev, ENE_CIRMOD_HPRD, tx_puls_width);
+unlock:
+ spin_unlock_irqrestore(&dev->hw_lock, flags);
+}
- ene_hw_write_reg(dev, ENE_TX_PERIOD, ENE_TX_PERIOD_UNKBIT |
- tx_period_in500ns);
+/* Enable/disable transmitters */
+static void ene_tx_set_transmitters(struct ene_device *dev)
+{
+ unsigned long flags;
- ene_hw_write_reg(dev, ENE_TX_PERIOD_PULSE,
- tx_pulse_width_in_500ns);
+ spin_lock_irqsave(&dev->hw_lock, flags);
+ ene_set_clear_reg_mask(dev, ENE_GPIOFS8, ENE_GPIOFS8_GPIO41,
+ !!(dev->transmitter_mask & 0x01));
+ ene_set_clear_reg_mask(dev, ENE_GPIOFS1, ENE_GPIOFS1_GPIO0D,
+ !!(dev->transmitter_mask & 0x02));
+ spin_unlock_irqrestore(&dev->hw_lock, flags);
+}
- conf1 |= ENE_CIR_CONF1_TX_CARR;
- } else
- conf1 &= ~ENE_CIR_CONF1_TX_CARR;
+/* prepare transmission */
+static void ene_tx_enable(struct ene_device *dev)
+{
+ u8 conf1 = ene_read_reg(dev, ENE_CIRCFG);
+ u8 fwreg2 = ene_read_reg(dev, ENE_FW2);
+
+ dev->saved_conf1 = conf1;
+
+ /* Show information about currently connected transmitter jacks */
+ if (fwreg2 & ENE_FW2_EMMITER1_CONN)
+ dbg("TX: Transmitter #1 is connected");
+
+ if (fwreg2 & ENE_FW2_EMMITER2_CONN)
+ dbg("TX: Transmitter #2 is connected");
- ene_hw_write_reg(dev, ENE_CIR_CONF1, conf1);
+ if (!(fwreg2 & (ENE_FW2_EMMITER1_CONN | ENE_FW2_EMMITER2_CONN)))
+ ene_warn("TX: transmitter cable isn't connected!");
+ /* disable receive on revc */
+ if (dev->hw_revision == ENE_HW_C)
+ conf1 &= ~ENE_CIRCFG_RX_EN;
+
+ /* Enable TX engine */
+ conf1 |= ENE_CIRCFG_TX_EN | ENE_CIRCFG_TX_IRQ;
+ ene_write_reg(dev, ENE_CIRCFG, conf1);
}
/* end transmission */
-static void ene_tx_complete(struct ene_device *dev)
+static void ene_tx_disable(struct ene_device *dev)
{
- ene_hw_write_reg(dev, ENE_CIR_CONF1, dev->saved_conf1);
+ ene_write_reg(dev, ENE_CIRCFG, dev->saved_conf1);
dev->tx_buffer = NULL;
}
-/* set transmit mask */
-static void ene_tx_hw_set_transmiter_mask(struct ene_device *dev)
-{
- u8 txport1 = ene_hw_read_reg(dev, ENE_TX_PORT1) & ~ENE_TX_PORT1_EN;
- u8 txport2 = ene_hw_read_reg(dev, ENE_TX_PORT2) & ~ENE_TX_PORT2_EN;
-
- if (dev->transmitter_mask & 0x01)
- txport1 |= ENE_TX_PORT1_EN;
-
- if (dev->transmitter_mask & 0x02)
- txport2 |= ENE_TX_PORT2_EN;
-
- ene_hw_write_reg(dev, ENE_TX_PORT1, txport1);
- ene_hw_write_reg(dev, ENE_TX_PORT2, txport2);
-}
/* TX one sample - must be called with dev->hw_lock*/
static void ene_tx_sample(struct ene_device *dev)
{
u8 raw_tx;
u32 sample;
+ bool pulse = dev->tx_sample_pulse;
if (!dev->tx_buffer) {
- ene_dbg("TX: attempt to transmit NULL buffer");
+ ene_warn("TX: BUG: attempt to transmit NULL buffer");
return;
}
/* Grab next TX sample */
if (!dev->tx_sample) {
-again:
- if (dev->tx_pos == dev->tx_len + 1) {
+
+ if (dev->tx_pos == dev->tx_len) {
if (!dev->tx_done) {
- ene_dbg("TX: no more data to send");
- dev->tx_done = 1;
+ dbg("TX: no more data to send");
+ dev->tx_done = true;
goto exit;
} else {
- ene_dbg("TX: last sample sent by hardware");
- ene_tx_complete(dev);
+ dbg("TX: last sample sent by hardware");
+ ene_tx_disable(dev);
complete(&dev->tx_complete);
return;
}
@@ -425,23 +640,23 @@ again:
sample = dev->tx_buffer[dev->tx_pos++];
dev->tx_sample_pulse = !dev->tx_sample_pulse;
- ene_dbg("TX: sample %8d (%s)", sample, dev->tx_sample_pulse ?
- "pulse" : "space");
+ dev->tx_sample = DIV_ROUND_CLOSEST(sample, sample_period);
- dev->tx_sample = DIV_ROUND_CLOSEST(sample, ENE_TX_SMPL_PERIOD);
-
- /* guard against too short samples */
if (!dev->tx_sample)
- goto again;
+ dev->tx_sample = 1;
}
- raw_tx = min(dev->tx_sample , (unsigned int)ENE_TX_SMLP_MASK);
+ raw_tx = min(dev->tx_sample , (unsigned int)ENE_CIRRLC_OUT_MASK);
dev->tx_sample -= raw_tx;
- if (dev->tx_sample_pulse)
- raw_tx |= ENE_TX_PULSE_MASK;
+ dbg("TX: sample %8d (%s)", raw_tx * sample_period,
+ pulse ? "pulse" : "space");
+ if (pulse)
+ raw_tx |= ENE_CIRRLC_OUT_PULSE;
+
+ ene_write_reg(dev,
+ dev->tx_reg ? ENE_CIRRLC_OUT1 : ENE_CIRRLC_OUT0, raw_tx);
- ene_hw_write_reg(dev, ENE_TX_INPUT1 + dev->tx_reg, raw_tx);
dev->tx_reg = !dev->tx_reg;
exit:
/* simulate TX done interrupt */
@@ -466,76 +681,59 @@ static int ene_irq_status(struct ene_device *dev)
{
u8 irq_status;
u8 fw_flags1, fw_flags2;
- int cur_rx_pointer;
int retval = 0;
- fw_flags2 = ene_hw_read_reg(dev, ENE_FW2);
- cur_rx_pointer = !!(fw_flags2 & ENE_FW2_BUF_HIGH);
+ fw_flags2 = ene_read_reg(dev, ENE_FW2);
if (dev->hw_revision < ENE_HW_C) {
- irq_status = ene_hw_read_reg(dev, ENEB_IRQ_STATUS);
+ irq_status = ene_read_reg(dev, ENEB_IRQ_STATUS);
if (!(irq_status & ENEB_IRQ_STATUS_IR))
return 0;
- ene_hw_write_reg(dev, ENEB_IRQ_STATUS,
- irq_status & ~ENEB_IRQ_STATUS_IR);
- dev->rx_pointer = cur_rx_pointer;
+ ene_clear_reg_mask(dev, ENEB_IRQ_STATUS, ENEB_IRQ_STATUS_IR);
return ENE_IRQ_RX;
}
- irq_status = ene_hw_read_reg(dev, ENEC_IRQ);
-
- if (!(irq_status & ENEC_IRQ_STATUS))
+ irq_status = ene_read_reg(dev, ENE_IRQ);
+ if (!(irq_status & ENE_IRQ_STATUS))
return 0;
/* original driver does that twice - a workaround ? */
- ene_hw_write_reg(dev, ENEC_IRQ, irq_status & ~ENEC_IRQ_STATUS);
- ene_hw_write_reg(dev, ENEC_IRQ, irq_status & ~ENEC_IRQ_STATUS);
+ ene_write_reg(dev, ENE_IRQ, irq_status & ~ENE_IRQ_STATUS);
+ ene_write_reg(dev, ENE_IRQ, irq_status & ~ENE_IRQ_STATUS);
- /* clear unknown flag in F8F9 */
- if (fw_flags2 & ENE_FW2_IRQ_CLR)
- ene_hw_write_reg(dev, ENE_FW2, fw_flags2 & ~ENE_FW2_IRQ_CLR);
+ /* check RX interrupt */
+ if (fw_flags2 & ENE_FW2_RXIRQ) {
+ retval |= ENE_IRQ_RX;
+ ene_write_reg(dev, ENE_FW2, fw_flags2 & ~ENE_FW2_RXIRQ);
+ }
- /* check if this is a TX interrupt */
- fw_flags1 = ene_hw_read_reg(dev, ENE_FW1);
+ /* check TX interrupt */
+ fw_flags1 = ene_read_reg(dev, ENE_FW1);
if (fw_flags1 & ENE_FW1_TXIRQ) {
- ene_hw_write_reg(dev, ENE_FW1, fw_flags1 & ~ENE_FW1_TXIRQ);
+ ene_write_reg(dev, ENE_FW1, fw_flags1 & ~ENE_FW1_TXIRQ);
retval |= ENE_IRQ_TX;
}
- /* Check if this is RX interrupt */
- if (dev->rx_pointer != cur_rx_pointer) {
- retval |= ENE_IRQ_RX;
- dev->rx_pointer = cur_rx_pointer;
-
- } else if (!(retval & ENE_IRQ_TX)) {
- ene_dbg("RX: interrupt without change in RX pointer(%d)",
- dev->rx_pointer);
- retval |= ENE_IRQ_RX;
- }
-
- if ((retval & ENE_IRQ_RX) && (retval & ENE_IRQ_TX))
- ene_dbg("both RX and TX interrupt at same time");
-
return retval;
}
/* interrupt handler */
static irqreturn_t ene_isr(int irq, void *data)
{
- u16 hw_value;
- int i, hw_sample;
- int pulse;
- int irq_status;
+ u16 hw_value, reg;
+ int hw_sample, irq_status;
+ bool pulse;
unsigned long flags;
- int carrier = 0;
irqreturn_t retval = IRQ_NONE;
struct ene_device *dev = (struct ene_device *)data;
- struct ir_raw_event ev;
-
+ DEFINE_IR_RAW_EVENT(ev);
spin_lock_irqsave(&dev->hw_lock, flags);
+
+ dbg_verbose("ISR called");
+ ene_rx_read_hw_pointer(dev);
irq_status = ene_irq_status(dev);
if (!irq_status)
@@ -544,9 +742,9 @@ static irqreturn_t ene_isr(int irq, void *data)
retval = IRQ_HANDLED;
if (irq_status & ENE_IRQ_TX) {
-
+ dbg_verbose("TX interrupt");
if (!dev->hw_learning_and_tx_capable) {
- ene_dbg("TX interrupt on unsupported device!");
+ dbg("TX interrupt on unsupported device!");
goto unlock;
}
ene_tx_sample(dev);
@@ -555,48 +753,57 @@ static irqreturn_t ene_isr(int irq, void *data)
if (!(irq_status & ENE_IRQ_RX))
goto unlock;
+ dbg_verbose("RX interrupt");
- if (dev->carrier_detect_enabled || debug)
- carrier = ene_rx_sense_carrier(dev);
-#if 0
- /* TODO */
- if (dev->carrier_detect_enabled && carrier)
- ir_raw_event_report_frequency(dev->idev, carrier);
-#endif
+ if (dev->hw_learning_and_tx_capable)
+ ene_rx_sense_carrier(dev);
+
+ /* On hardware that don't support extra buffer we need to trust
+ the interrupt and not track the read pointer */
+ if (!dev->hw_extra_buffer)
+ dev->r_pointer = dev->w_pointer == 0 ? ENE_FW_PACKET_SIZE : 0;
+
+ while (1) {
+
+ reg = ene_rx_get_sample_reg(dev);
+
+ dbg_verbose("next sample to read at: %04x", reg);
+ if (!reg)
+ break;
- for (i = 0; i < ENE_SAMPLES_SIZE; i++) {
- hw_value = ene_hw_read_reg(dev,
- ENE_SAMPLE_BUFFER + dev->rx_pointer * 4 + i);
+ hw_value = ene_read_reg(dev, reg);
if (dev->rx_fan_input_inuse) {
+
+ int offset = ENE_FW_SMPL_BUF_FAN - ENE_FW_SAMPLE_BUFFER;
+
/* read high part of the sample */
- hw_value |= ene_hw_read_reg(dev,
- ENE_SAMPLE_BUFFER_FAN +
- dev->rx_pointer * 4 + i) << 8;
- pulse = hw_value & ENE_FAN_SMPL_PULS_MSK;
+ hw_value |= ene_read_reg(dev, reg + offset) << 8;
+ pulse = hw_value & ENE_FW_SMPL_BUF_FAN_PLS;
/* clear space bit, and other unused bits */
- hw_value &= ENE_FAN_VALUE_MASK;
- hw_sample = hw_value * ENE_SAMPLE_PERIOD_FAN;
+ hw_value &= ENE_FW_SMPL_BUF_FAN_MSK;
+ hw_sample = hw_value * ENE_FW_SAMPLE_PERIOD_FAN;
} else {
- pulse = !(hw_value & ENE_SAMPLE_SPC_MASK);
- hw_value &= ENE_SAMPLE_VALUE_MASK;
+ pulse = !(hw_value & ENE_FW_SAMPLE_SPACE);
+ hw_value &= ~ENE_FW_SAMPLE_SPACE;
hw_sample = hw_value * sample_period;
if (dev->rx_period_adjust) {
- hw_sample *= (100 - dev->rx_period_adjust);
- hw_sample /= 100;
+ hw_sample *= 100;
+ hw_sample /= (100 + dev->rx_period_adjust);
}
}
- /* no more data */
- if (!(hw_value))
- break;
- ene_dbg("RX: %d (%s)", hw_sample, pulse ? "pulse" : "space");
+ if (!dev->hw_extra_buffer && !hw_sample) {
+ dev->r_pointer = dev->w_pointer;
+ continue;
+ }
+ dbg("RX: %d (%s)", hw_sample, pulse ? "pulse" : "space");
- ev.duration = hw_sample * 1000;
+ ev.duration = MS_TO_NS(hw_sample);
ev.pulse = pulse;
ir_raw_event_store_with_filter(dev->idev, &ev);
}
@@ -608,19 +815,26 @@ unlock:
}
/* Initialize default settings */
-static void ene_setup_settings(struct ene_device *dev)
+static void ene_setup_default_settings(struct ene_device *dev)
{
dev->tx_period = 32;
- dev->tx_duty_cycle = 25; /*%*/
- dev->transmitter_mask = 3;
+ dev->tx_duty_cycle = 50; /*%*/
+ dev->transmitter_mask = 0x03;
+ dev->learning_mode_enabled = learning_mode_force;
- /* Force learning mode if (input == 2), otherwise
- let user set it with LIRC_SET_REC_CARRIER */
- dev->learning_enabled =
- (input == 2 && dev->hw_learning_and_tx_capable);
+ /* Set reasonable default timeout */
+ dev->props->timeout = MS_TO_NS(150000);
+}
- dev->rx_pointer = -1;
+/* Upload all hardware settings at once. Used at load and resume time */
+static void ene_setup_hw_settings(struct ene_device *dev)
+{
+ if (dev->hw_learning_and_tx_capable) {
+ ene_tx_set_carrier(dev);
+ ene_tx_set_transmitters(dev);
+ }
+ ene_rx_setup(dev);
}
/* outside interface: called on first open*/
@@ -630,8 +844,6 @@ static int ene_open(void *data)
unsigned long flags;
spin_lock_irqsave(&dev->hw_lock, flags);
- dev->in_use = 1;
- ene_setup_settings(dev);
ene_rx_enable(dev);
spin_unlock_irqrestore(&dev->hw_lock, flags);
return 0;
@@ -645,7 +857,6 @@ static void ene_close(void *data)
spin_lock_irqsave(&dev->hw_lock, flags);
ene_rx_disable(dev);
- dev->in_use = 0;
spin_unlock_irqrestore(&dev->hw_lock, flags);
}
@@ -653,19 +864,17 @@ static void ene_close(void *data)
static int ene_set_tx_mask(void *data, u32 tx_mask)
{
struct ene_device *dev = (struct ene_device *)data;
- unsigned long flags;
- ene_dbg("TX: attempt to set transmitter mask %02x", tx_mask);
+ dbg("TX: attempt to set transmitter mask %02x", tx_mask);
/* invalid txmask */
- if (!tx_mask || tx_mask & ~0x3) {
- ene_dbg("TX: invalid mask");
+ if (!tx_mask || tx_mask & ~0x03) {
+ dbg("TX: invalid mask");
/* return count of transmitters */
return 2;
}
- spin_lock_irqsave(&dev->hw_lock, flags);
dev->transmitter_mask = tx_mask;
- spin_unlock_irqrestore(&dev->hw_lock, flags);
+ ene_tx_set_transmitters(dev);
return 0;
}
@@ -673,66 +882,76 @@ static int ene_set_tx_mask(void *data, u32 tx_mask)
static int ene_set_tx_carrier(void *data, u32 carrier)
{
struct ene_device *dev = (struct ene_device *)data;
- unsigned long flags;
- u32 period = 1000000 / carrier; /* (1 / freq) (* # usec in 1 sec) */
-
- ene_dbg("TX: attempt to set tx carrier to %d kHz", carrier);
+ u32 period = 2000000 / carrier;
- if (period && (period > ENE_TX_PERIOD_MAX ||
- period < ENE_TX_PERIOD_MIN)) {
+ dbg("TX: attempt to set tx carrier to %d kHz", carrier);
- ene_dbg("TX: out of range %d-%d carrier, "
- "falling back to 32 kHz",
- 1000 / ENE_TX_PERIOD_MIN,
- 1000 / ENE_TX_PERIOD_MAX);
+ if (period && (period > ENE_CIRMOD_PRD_MAX ||
+ period < ENE_CIRMOD_PRD_MIN)) {
- period = 32; /* this is just a coincidence!!! */
+ dbg("TX: out of range %d-%d kHz carrier",
+ 2000 / ENE_CIRMOD_PRD_MIN, 2000 / ENE_CIRMOD_PRD_MAX);
+ return -1;
}
- ene_dbg("TX: set carrier to %d kHz", carrier);
- spin_lock_irqsave(&dev->hw_lock, flags);
dev->tx_period = period;
- spin_unlock_irqrestore(&dev->hw_lock, flags);
+ ene_tx_set_carrier(dev);
return 0;
}
+/*outside interface : set tx duty cycle */
+static int ene_set_tx_duty_cycle(void *data, u32 duty_cycle)
+{
+ struct ene_device *dev = (struct ene_device *)data;
+ dbg("TX: setting duty cycle to %d%%", duty_cycle);
+ dev->tx_duty_cycle = duty_cycle;
+ ene_tx_set_carrier(dev);
+ return 0;
+}
/* outside interface: enable learning mode */
static int ene_set_learning_mode(void *data, int enable)
{
struct ene_device *dev = (struct ene_device *)data;
unsigned long flags;
- if (enable == dev->learning_enabled)
+ if (enable == dev->learning_mode_enabled)
return 0;
spin_lock_irqsave(&dev->hw_lock, flags);
- dev->learning_enabled = enable;
- ene_rx_set_inputs(dev);
+ dev->learning_mode_enabled = enable;
+ ene_rx_disable(dev);
+ ene_rx_setup(dev);
+ ene_rx_enable(dev);
spin_unlock_irqrestore(&dev->hw_lock, flags);
return 0;
}
-/* outside interface: set rec carrier */
-static int ene_set_rec_carrier(void *data, u32 min, u32 max)
+static int ene_set_carrier_report(void *data, int enable)
{
struct ene_device *dev = (struct ene_device *)data;
- ene_set_learning_mode(dev,
- max > ENE_NORMAL_RX_HI || min < ENE_NORMAL_RX_LOW);
+ unsigned long flags;
+
+ if (enable == dev->carrier_detect_enabled)
+ return 0;
+
+ spin_lock_irqsave(&dev->hw_lock, flags);
+ dev->carrier_detect_enabled = enable;
+ ene_rx_disable(dev);
+ ene_rx_setup(dev);
+ ene_rx_enable(dev);
+ spin_unlock_irqrestore(&dev->hw_lock, flags);
return 0;
}
/* outside interface: enable or disable idle mode */
-static void ene_rx_set_idle(void *data, int idle)
+static void ene_set_idle(void *data, bool idle)
{
- struct ene_device *dev = (struct ene_device *)data;
- ene_dbg("%sabling idle mode", idle ? "en" : "dis");
-
- ene_hw_write_reg_mask(dev, ENE_CIR_SAMPLE_PERIOD,
- (enable_idle && idle) ? 0 : ENE_CIR_SAMPLE_OVERFLOW,
- ENE_CIR_SAMPLE_OVERFLOW);
+ if (idle) {
+ ene_rx_reset((struct ene_device *)data);
+ dbg("RX: end of data");
+ }
}
-
/* outside interface: transmit */
static int ene_transmit(void *data, int *buf, u32 n)
{
@@ -747,12 +966,11 @@ static int ene_transmit(void *data, int *buf, u32 n)
dev->tx_sample = 0;
dev->tx_sample_pulse = 0;
- ene_dbg("TX: %d samples", dev->tx_len);
+ dbg("TX: %d samples", dev->tx_len);
spin_lock_irqsave(&dev->hw_lock, flags);
- ene_tx_hw_set_transmiter_mask(dev);
- ene_tx_prepare(dev);
+ ene_tx_enable(dev);
/* Transmit first two samples */
ene_tx_sample(dev);
@@ -761,16 +979,15 @@ static int ene_transmit(void *data, int *buf, u32 n)
spin_unlock_irqrestore(&dev->hw_lock, flags);
if (wait_for_completion_timeout(&dev->tx_complete, 2 * HZ) == 0) {
- ene_dbg("TX: timeout");
+ dbg("TX: timeout");
spin_lock_irqsave(&dev->hw_lock, flags);
- ene_tx_complete(dev);
+ ene_tx_disable(dev);
spin_unlock_irqrestore(&dev->hw_lock, flags);
} else
- ene_dbg("TX: done");
+ dbg("TX: done");
return n;
}
-
/* probe entry */
static int ene_probe(struct pnp_dev *pnp_dev, const struct pnp_device_id *id)
{
@@ -785,121 +1002,103 @@ static int ene_probe(struct pnp_dev *pnp_dev, const struct pnp_device_id *id)
dev = kzalloc(sizeof(struct ene_device), GFP_KERNEL);
if (!input_dev || !ir_props || !dev)
- goto error;
+ goto error1;
/* validate resources */
error = -ENODEV;
if (!pnp_port_valid(pnp_dev, 0) ||
- pnp_port_len(pnp_dev, 0) < ENE_MAX_IO)
+ pnp_port_len(pnp_dev, 0) < ENE_IO_SIZE)
goto error;
if (!pnp_irq_valid(pnp_dev, 0))
goto error;
- dev->hw_io = pnp_port_start(pnp_dev, 0);
- dev->irq = pnp_irq(pnp_dev, 0);
spin_lock_init(&dev->hw_lock);
/* claim the resources */
error = -EBUSY;
- if (!request_region(dev->hw_io, ENE_MAX_IO, ENE_DRIVER_NAME))
+ dev->hw_io = pnp_port_start(pnp_dev, 0);
+ if (!request_region(dev->hw_io, ENE_IO_SIZE, ENE_DRIVER_NAME)) {
+ dev->hw_io = -1;
+ dev->irq = -1;
goto error;
+ }
+ dev->irq = pnp_irq(pnp_dev, 0);
if (request_irq(dev->irq, ene_isr,
- IRQF_SHARED, ENE_DRIVER_NAME, (void *)dev))
+ IRQF_SHARED, ENE_DRIVER_NAME, (void *)dev)) {
+ dev->irq = -1;
goto error;
+ }
pnp_set_drvdata(pnp_dev, dev);
dev->pnp_dev = pnp_dev;
+ /* don't allow too short/long sample periods */
+ if (sample_period < 5 || sample_period > 0x7F)
+ sample_period = ENE_DEFAULT_SAMPLE_PERIOD;
+
/* detect hardware version and features */
error = ene_hw_detect(dev);
if (error)
goto error;
- ene_setup_settings(dev);
-
if (!dev->hw_learning_and_tx_capable && txsim) {
- dev->hw_learning_and_tx_capable = 1;
+ dev->hw_learning_and_tx_capable = true;
setup_timer(&dev->tx_sim_timer, ene_tx_irqsim,
(long unsigned int)dev);
- ene_printk(KERN_WARNING,
- "Simulation of TX activated\n");
+ ene_warn("Simulation of TX activated");
}
+ if (!dev->hw_learning_and_tx_capable)
+ learning_mode_force = false;
+
ir_props->driver_type = RC_DRIVER_IR_RAW;
ir_props->allowed_protos = IR_TYPE_ALL;
ir_props->priv = dev;
ir_props->open = ene_open;
ir_props->close = ene_close;
- ir_props->min_timeout = ENE_MINGAP * 1000;
- ir_props->max_timeout = ENE_MAXGAP * 1000;
- ir_props->timeout = ENE_MAXGAP * 1000;
-
- if (dev->hw_revision == ENE_HW_B)
- ir_props->s_idle = ene_rx_set_idle;
-
+ ir_props->s_idle = ene_set_idle;
dev->props = ir_props;
dev->idev = input_dev;
- /* don't allow too short/long sample periods */
- if (sample_period < 5 || sample_period > 0x7F)
- sample_period = -1;
-
- /* choose default sample period */
- if (sample_period == -1) {
-
- sample_period = 50;
-
- /* on revB, hardware idle mode eats first sample
- if we set too low sample period */
- if (dev->hw_revision == ENE_HW_B && enable_idle)
- sample_period = 75;
- }
-
- ir_props->rx_resolution = sample_period * 1000;
-
if (dev->hw_learning_and_tx_capable) {
-
ir_props->s_learning_mode = ene_set_learning_mode;
-
- if (input == 0)
- ir_props->s_rx_carrier_range = ene_set_rec_carrier;
-
init_completion(&dev->tx_complete);
ir_props->tx_ir = ene_transmit;
ir_props->s_tx_mask = ene_set_tx_mask;
ir_props->s_tx_carrier = ene_set_tx_carrier;
- ir_props->tx_resolution = ENE_TX_SMPL_PERIOD * 1000;
- /* ir_props->s_carrier_report = ene_set_carrier_report; */
+ ir_props->s_tx_duty_cycle = ene_set_tx_duty_cycle;
+ ir_props->s_carrier_report = ene_set_carrier_report;
}
+ ene_rx_setup_hw_buffer(dev);
+ ene_setup_default_settings(dev);
+ ene_setup_hw_settings(dev);
- device_set_wakeup_capable(&pnp_dev->dev, 1);
- device_set_wakeup_enable(&pnp_dev->dev, 1);
+ device_set_wakeup_capable(&pnp_dev->dev, true);
+ device_set_wakeup_enable(&pnp_dev->dev, true);
if (dev->hw_learning_and_tx_capable)
input_dev->name = "ENE eHome Infrared Remote Transceiver";
else
input_dev->name = "ENE eHome Infrared Remote Receiver";
-
error = -ENODEV;
if (ir_input_register(input_dev, RC_MAP_RC6_MCE, ir_props,
ENE_DRIVER_NAME))
goto error;
-
- ene_printk(KERN_NOTICE, "driver has been succesfully loaded\n");
+ ene_notice("driver has been succesfully loaded");
return 0;
error:
- if (dev->irq)
+ if (dev && dev->irq >= 0)
free_irq(dev->irq, dev);
- if (dev->hw_io)
- release_region(dev->hw_io, ENE_MAX_IO);
-
+ if (dev && dev->hw_io >= 0)
+ release_region(dev->hw_io, ENE_IO_SIZE);
+error1:
input_free_device(input_dev);
kfree(ir_props);
kfree(dev);
@@ -914,10 +1113,11 @@ static void ene_remove(struct pnp_dev *pnp_dev)
spin_lock_irqsave(&dev->hw_lock, flags);
ene_rx_disable(dev);
+ ene_rx_restore_hw_buffer(dev);
spin_unlock_irqrestore(&dev->hw_lock, flags);
free_irq(dev->irq, dev);
- release_region(dev->hw_io, ENE_MAX_IO);
+ release_region(dev->hw_io, ENE_IO_SIZE);
ir_input_unregister(dev->idev);
kfree(dev->props);
kfree(dev);
@@ -927,28 +1127,29 @@ static void ene_remove(struct pnp_dev *pnp_dev)
static void ene_enable_wake(struct ene_device *dev, int enable)
{
enable = enable && device_may_wakeup(&dev->pnp_dev->dev);
-
- ene_dbg("wake on IR %s", enable ? "enabled" : "disabled");
-
- ene_hw_write_reg_mask(dev, ENE_FW1, enable ?
- ENE_FW1_WAKE : 0, ENE_FW1_WAKE);
+ dbg("wake on IR %s", enable ? "enabled" : "disabled");
+ ene_set_clear_reg_mask(dev, ENE_FW1, ENE_FW1_WAKE, enable);
}
#ifdef CONFIG_PM
static int ene_suspend(struct pnp_dev *pnp_dev, pm_message_t state)
{
struct ene_device *dev = pnp_get_drvdata(pnp_dev);
- ene_enable_wake(dev, 1);
+ ene_enable_wake(dev, true);
+
+ /* TODO: add support for wake pattern */
return 0;
}
static int ene_resume(struct pnp_dev *pnp_dev)
{
struct ene_device *dev = pnp_get_drvdata(pnp_dev);
- if (dev->in_use)
+ ene_setup_hw_settings(dev);
+
+ if (dev->rx_enabled)
ene_rx_enable(dev);
- ene_enable_wake(dev, 0);
+ ene_enable_wake(dev, false);
return 0;
}
#endif
@@ -956,7 +1157,7 @@ static int ene_resume(struct pnp_dev *pnp_dev)
static void ene_shutdown(struct pnp_dev *pnp_dev)
{
struct ene_device *dev = pnp_get_drvdata(pnp_dev);
- ene_enable_wake(dev, 1);
+ ene_enable_wake(dev, true);
}
static const struct pnp_device_id ene_ids[] = {
@@ -994,18 +1195,11 @@ static void ene_exit(void)
module_param(sample_period, int, S_IRUGO);
MODULE_PARM_DESC(sample_period, "Hardware sample period (50 us default)");
-module_param(enable_idle, bool, S_IRUGO | S_IWUSR);
-MODULE_PARM_DESC(enable_idle,
- "Enables turning off signal sampling after long inactivity time; "
- "if disabled might help detecting input signal (default: enabled)"
- " (KB3926B only)");
-
-module_param(input, bool, S_IRUGO);
-MODULE_PARM_DESC(input, "select which input to use "
- "0 - auto, 1 - standard, 2 - wideband(KB3926C+)");
+module_param(learning_mode_force, bool, S_IRUGO);
+MODULE_PARM_DESC(learning_mode_force, "Enable learning mode by default");
module_param(debug, int, S_IRUGO | S_IWUSR);
-MODULE_PARM_DESC(debug, "Enable debug (debug=2 verbose debug output)");
+MODULE_PARM_DESC(debug, "Debug level");
module_param(txsim, bool, S_IRUGO);
MODULE_PARM_DESC(txsim,
@@ -1013,8 +1207,8 @@ MODULE_PARM_DESC(txsim,
MODULE_DEVICE_TABLE(pnp, ene_ids);
MODULE_DESCRIPTION
- ("Infrared input driver for KB3926B/KB3926C/KB3926D "
- "(aka ENE0100/ENE0200/ENE0201) CIR port");
+ ("Infrared input driver for KB3926B/C/D/E/F "
+ "(aka ENE0100/ENE0200/ENE0201/ENE0202) CIR port");
MODULE_AUTHOR("Maxim Levitsky");
MODULE_LICENSE("GPL");
diff --git a/drivers/media/IR/ene_ir.h b/drivers/media/IR/ene_ir.h
index 54c76af0d033..f5870667a433 100644
--- a/drivers/media/IR/ene_ir.h
+++ b/drivers/media/IR/ene_ir.h
@@ -1,5 +1,5 @@
/*
- * driver for ENE KB3926 B/C/D CIR (also known as ENE0XXX)
+ * driver for ENE KB3926 B/C/D/E/F CIR (also known as ENE0XXX)
*
* Copyright (C) 2010 Maxim Levitsky <maximlevitsky@gmail.com>
*
@@ -26,43 +26,50 @@
#define ENE_ADDR_HI 1 /* hi byte of register address */
#define ENE_ADDR_LO 2 /* low byte of register address */
#define ENE_IO 3 /* read/write window */
-#define ENE_MAX_IO 4
-
-/* 8 bytes of samples, divided in 2 halfs*/
-#define ENE_SAMPLE_BUFFER 0xF8F0 /* regular sample buffer */
-#define ENE_SAMPLE_SPC_MASK 0x80 /* sample is space */
-#define ENE_SAMPLE_VALUE_MASK 0x7F
-#define ENE_SAMPLE_OVERFLOW 0x7F
-#define ENE_SAMPLES_SIZE 4
-
-/* fan input sample buffer */
-#define ENE_SAMPLE_BUFFER_FAN 0xF8FB /* this buffer holds high byte of */
- /* each sample of normal buffer */
-#define ENE_FAN_SMPL_PULS_MSK 0x8000 /* this bit of combined sample */
- /* if set, says that sample is pulse */
-#define ENE_FAN_VALUE_MASK 0x0FFF /* mask for valid bits of the value */
-
-/* first firmware register */
-#define ENE_FW1 0xF8F8
+#define ENE_IO_SIZE 4
+
+/* 8 bytes of samples, divided in 2 packets*/
+#define ENE_FW_SAMPLE_BUFFER 0xF8F0 /* sample buffer */
+#define ENE_FW_SAMPLE_SPACE 0x80 /* sample is space */
+#define ENE_FW_PACKET_SIZE 4
+
+/* first firmware flag register */
+#define ENE_FW1 0xF8F8 /* flagr */
#define ENE_FW1_ENABLE 0x01 /* enable fw processing */
#define ENE_FW1_TXIRQ 0x02 /* TX interrupt pending */
+#define ENE_FW1_HAS_EXTRA_BUF 0x04 /* fw uses extra buffer*/
+#define ENE_FW1_EXTRA_BUF_HND 0x08 /* extra buffer handshake bit*/
+#define ENE_FW1_LED_ON 0x10 /* turn on a led */
+
+#define ENE_FW1_WPATTERN 0x20 /* enable wake pattern */
#define ENE_FW1_WAKE 0x40 /* enable wake from S3 */
#define ENE_FW1_IRQ 0x80 /* enable interrupt */
-/* second firmware register */
-#define ENE_FW2 0xF8F9
-#define ENE_FW2_BUF_HIGH 0x01 /* which half of the buffer to read */
-#define ENE_FW2_IRQ_CLR 0x04 /* clear this on IRQ */
-#define ENE_FW2_GP40_AS_LEARN 0x08 /* normal input is used as */
- /* learning input */
-#define ENE_FW2_FAN_AS_NRML_IN 0x40 /* fan is used as normal input */
+/* second firmware flag register */
+#define ENE_FW2 0xF8F9 /* flagw */
+#define ENE_FW2_BUF_WPTR 0x01 /* which half of the buffer to read */
+#define ENE_FW2_RXIRQ 0x04 /* RX IRQ pending*/
+#define ENE_FW2_GP0A 0x08 /* Use GPIO0A for demodulated input */
+#define ENE_FW2_EMMITER1_CONN 0x10 /* TX emmiter 1 connected */
+#define ENE_FW2_EMMITER2_CONN 0x20 /* TX emmiter 2 connected */
+
+#define ENE_FW2_FAN_INPUT 0x40 /* fan input used for demodulated data*/
#define ENE_FW2_LEARNING 0x80 /* hardware supports learning and TX */
+/* firmware RX pointer for new style buffer */
+#define ENE_FW_RX_POINTER 0xF8FA
+
+/* high parts of samples for fan input (8 samples)*/
+#define ENE_FW_SMPL_BUF_FAN 0xF8FB
+#define ENE_FW_SMPL_BUF_FAN_PLS 0x8000 /* combined sample is pulse */
+#define ENE_FW_SMPL_BUF_FAN_MSK 0x0FFF /* combined sample maximum value */
+#define ENE_FW_SAMPLE_PERIOD_FAN 61 /* fan input has fixed sample period */
+
/* transmitter ports */
-#define ENE_TX_PORT2 0xFC01 /* this enables one or both */
-#define ENE_TX_PORT2_EN 0x20 /* TX ports */
-#define ENE_TX_PORT1 0xFC08
-#define ENE_TX_PORT1_EN 0x02
+#define ENE_GPIOFS1 0xFC01
+#define ENE_GPIOFS1_GPIO0D 0x20 /* enable tx output on GPIO0D */
+#define ENE_GPIOFS8 0xFC08
+#define ENE_GPIOFS8_GPIO41 0x02 /* enable tx output on GPIO40 */
/* IRQ registers block (for revision B) */
#define ENEB_IRQ 0xFD09 /* IRQ number */
@@ -70,97 +77,99 @@
#define ENEB_IRQ_STATUS 0xFD80 /* irq status */
#define ENEB_IRQ_STATUS_IR 0x20 /* IR irq */
-/* fan as input settings - only if learning capable */
+/* fan as input settings */
#define ENE_FAN_AS_IN1 0xFE30 /* fan init reg 1 */
#define ENE_FAN_AS_IN1_EN 0xCD
#define ENE_FAN_AS_IN2 0xFE31 /* fan init reg 2 */
#define ENE_FAN_AS_IN2_EN 0x03
-#define ENE_SAMPLE_PERIOD_FAN 61 /* fan input has fixed sample period */
/* IRQ registers block (for revision C,D) */
-#define ENEC_IRQ 0xFE9B /* new irq settings register */
-#define ENEC_IRQ_MASK 0x0F /* irq number mask */
-#define ENEC_IRQ_UNK_EN 0x10 /* always enabled */
-#define ENEC_IRQ_STATUS 0x20 /* irq status and ACK */
-
-/* CIR block settings */
-#define ENE_CIR_CONF1 0xFEC0
-#define ENE_CIR_CONF1_TX_CLEAR 0x01 /* clear that on revC */
- /* while transmitting */
-#define ENE_CIR_CONF1_RX_ON 0x07 /* normal receiver enabled */
-#define ENE_CIR_CONF1_LEARN1 0x08 /* enabled on learning mode */
-#define ENE_CIR_CONF1_TX_ON 0x30 /* enabled on transmit */
-#define ENE_CIR_CONF1_TX_CARR 0x80 /* send TX carrier or not */
-
-#define ENE_CIR_CONF2 0xFEC1 /* unknown setting = 0 */
-#define ENE_CIR_CONF2_LEARN2 0x10 /* set on enable learning */
-#define ENE_CIR_CONF2_GPIO40DIS 0x20 /* disable input via gpio40 */
-
-#define ENE_CIR_SAMPLE_PERIOD 0xFEC8 /* sample period in us */
-#define ENE_CIR_SAMPLE_OVERFLOW 0x80 /* interrupt on overflows if set */
-
-
-/* Two byte tx buffer */
-#define ENE_TX_INPUT1 0xFEC9
-#define ENE_TX_INPUT2 0xFECA
-#define ENE_TX_PULSE_MASK 0x80 /* Transmitted sample is pulse */
-#define ENE_TX_SMLP_MASK 0x7F
-#define ENE_TX_SMPL_PERIOD 50 /* transmit sample period - fixed */
+#define ENE_IRQ 0xFE9B /* new irq settings register */
+#define ENE_IRQ_MASK 0x0F /* irq number mask */
+#define ENE_IRQ_UNK_EN 0x10 /* always enabled */
+#define ENE_IRQ_STATUS 0x20 /* irq status and ACK */
+
+/* CIR Config register #1 */
+#define ENE_CIRCFG 0xFEC0
+#define ENE_CIRCFG_RX_EN 0x01 /* RX enable */
+#define ENE_CIRCFG_RX_IRQ 0x02 /* Enable hardware interrupt */
+#define ENE_CIRCFG_REV_POL 0x04 /* Input polarity reversed */
+#define ENE_CIRCFG_CARR_DEMOD 0x08 /* Enable carrier demodulator */
+
+#define ENE_CIRCFG_TX_EN 0x10 /* TX enable */
+#define ENE_CIRCFG_TX_IRQ 0x20 /* Send interrupt on TX done */
+#define ENE_CIRCFG_TX_POL_REV 0x40 /* TX polarity reversed */
+#define ENE_CIRCFG_TX_CARR 0x80 /* send TX carrier or not */
+
+/* CIR config register #2 */
+#define ENE_CIRCFG2 0xFEC1
+#define ENE_CIRCFG2_RLC 0x00
+#define ENE_CIRCFG2_RC5 0x01
+#define ENE_CIRCFG2_RC6 0x02
+#define ENE_CIRCFG2_NEC 0x03
+#define ENE_CIRCFG2_CARR_DETECT 0x10 /* Enable carrier detection */
+#define ENE_CIRCFG2_GPIO0A 0x20 /* Use GPIO0A instead of GPIO40 for input */
+#define ENE_CIRCFG2_FAST_SAMPL1 0x40 /* Fast leading pulse detection for RC6 */
+#define ENE_CIRCFG2_FAST_SAMPL2 0x80 /* Fast data detection for RC6 */
+
+/* Knobs for protocol decoding - will document when/if will use them */
+#define ENE_CIRPF 0xFEC2
+#define ENE_CIRHIGH 0xFEC3
+#define ENE_CIRBIT 0xFEC4
+#define ENE_CIRSTART 0xFEC5
+#define ENE_CIRSTART2 0xFEC6
+
+/* Actual register which contains RLC RX data - read by firmware */
+#define ENE_CIRDAT_IN 0xFEC7
+
+
+/* RLC configuration - sample period (1us resulution) + idle mode */
+#define ENE_CIRRLC_CFG 0xFEC8
+#define ENE_CIRRLC_CFG_OVERFLOW 0x80 /* interrupt on overflows if set */
+#define ENE_DEFAULT_SAMPLE_PERIOD 50
+
+/* Two byte RLC TX buffer */
+#define ENE_CIRRLC_OUT0 0xFEC9
+#define ENE_CIRRLC_OUT1 0xFECA
+#define ENE_CIRRLC_OUT_PULSE 0x80 /* Transmitted sample is pulse */
+#define ENE_CIRRLC_OUT_MASK 0x7F
+
+
+/* Carrier detect setting
+ * Low nibble - number of carrier pulses to average
+ * High nibble - number of initial carrier pulses to discard
+ */
+#define ENE_CIRCAR_PULS 0xFECB
+/* detected RX carrier period (resolution: 500 ns) */
+#define ENE_CIRCAR_PRD 0xFECC
+#define ENE_CIRCAR_PRD_VALID 0x80 /* data valid content valid */
-/* Unknown TX setting - TX sample period ??? */
-#define ENE_TX_UNK1 0xFECB /* set to 0x63 */
+/* detected RX carrier pulse width (resolution: 500 ns) */
+#define ENE_CIRCAR_HPRD 0xFECD
-/* Current received carrier period */
-#define ENE_RX_CARRIER 0xFECC /* RX period (500 ns) */
-#define ENE_RX_CARRIER_VALID 0x80 /* Register content valid */
+/* TX period (resolution: 500 ns, minimum 2)*/
+#define ENE_CIRMOD_PRD 0xFECE
+#define ENE_CIRMOD_PRD_POL 0x80 /* TX carrier polarity*/
+#define ENE_CIRMOD_PRD_MAX 0x7F /* 15.87 kHz */
+#define ENE_CIRMOD_PRD_MIN 0x02 /* 1 Mhz */
-/* TX period (1/carrier) */
-#define ENE_TX_PERIOD 0xFECE /* TX period (500 ns) */
-#define ENE_TX_PERIOD_UNKBIT 0x80 /* This bit set on transmit*/
-#define ENE_TX_PERIOD_PULSE 0xFECF /* TX pulse period (500 ns)*/
+/* TX pulse width (resolution: 500 ns)*/
+#define ENE_CIRMOD_HPRD 0xFECF
/* Hardware versions */
-#define ENE_HW_VERSION 0xFF00 /* hardware revision */
+#define ENE_ECHV 0xFF00 /* hardware revision */
#define ENE_PLLFRH 0xFF16
#define ENE_PLLFRL 0xFF17
+#define ENE_DEFAULT_PLL_FREQ 1000
-#define ENE_HW_UNK 0xFF1D
-#define ENE_HW_UNK_CLR 0x04
-#define ENE_HW_VER_MAJOR 0xFF1E /* chip version */
-#define ENE_HW_VER_MINOR 0xFF1F
-#define ENE_HW_VER_OLD 0xFD00
-
-/* Normal/Learning carrier ranges - only valid if we have learning input*/
-/* TODO: test */
-#define ENE_NORMAL_RX_LOW 34
-#define ENE_NORMAL_RX_HI 38
+#define ENE_ECSTS 0xFF1D
+#define ENE_ECSTS_RSRVD 0x04
-/* Tx carrier range */
-/* Hardware might be able to do more, but this range is enough for
- all purposes */
-#define ENE_TX_PERIOD_MAX 32 /* corresponds to 29.4 kHz */
-#define ENE_TX_PERIOD_MIN 16 /* corrsponds to 62.5 kHz */
-
-
-
-/* Minimal and maximal gaps */
-
-/* Normal case:
- Minimal gap is 0x7F * sample period
- Maximum gap depends on hardware.
- For KB3926B, it is unlimited, for newer models its around
- 250000, after which HW stops sending samples, and that is
- not possible to change */
-
-/* Fan case:
- Both minimal and maximal gaps are same, and equal to 0xFFF * 0x61
- And there is nothing to change this setting
-*/
-
-#define ENE_MAXGAP 250000
-#define ENE_MINGAP (127 * sample_period)
+#define ENE_ECVER_MAJOR 0xFF1E /* chip version */
+#define ENE_ECVER_MINOR 0xFF1F
+#define ENE_HW_VER_OLD 0xFD00
/******************************************************************************/
@@ -171,46 +180,60 @@
#define ENE_HW_B 1 /* 3926B */
#define ENE_HW_C 2 /* 3926C */
-#define ENE_HW_D 3 /* 3926D */
+#define ENE_HW_D 3 /* 3926D or later */
#define ene_printk(level, text, ...) \
- printk(level ENE_DRIVER_NAME ": " text, ## __VA_ARGS__)
+ printk(level ENE_DRIVER_NAME ": " text "\n", ## __VA_ARGS__)
-#define ene_dbg(text, ...) \
- if (debug) \
- printk(KERN_DEBUG \
- ENE_DRIVER_NAME ": " text "\n" , ## __VA_ARGS__)
+#define ene_notice(text, ...) ene_printk(KERN_NOTICE, text, ## __VA_ARGS__)
+#define ene_warn(text, ...) ene_printk(KERN_WARNING, text, ## __VA_ARGS__)
-#define ene_dbg_verbose(text, ...) \
- if (debug > 1) \
- printk(KERN_DEBUG \
- ENE_DRIVER_NAME ": " text "\n" , ## __VA_ARGS__)
+#define __dbg(level, format, ...) \
+ do { \
+ if (debug >= level) \
+ printk(KERN_DEBUG ENE_DRIVER_NAME \
+ ": " format "\n", ## __VA_ARGS__); \
+ } while (0)
+
+
+#define dbg(format, ...) __dbg(1, format, ## __VA_ARGS__)
+#define dbg_verbose(format, ...) __dbg(2, format, ## __VA_ARGS__)
+#define dbg_regs(format, ...) __dbg(3, format, ## __VA_ARGS__)
+
+#define MS_TO_NS(msec) ((msec) * 1000)
struct ene_device {
struct pnp_dev *pnp_dev;
struct input_dev *idev;
struct ir_dev_props *props;
- int in_use;
/* hw IO settings */
- unsigned long hw_io;
+ long hw_io;
int irq;
spinlock_t hw_lock;
/* HW features */
int hw_revision; /* hardware revision */
- bool hw_learning_and_tx_capable; /* learning capable */
- bool hw_gpio40_learning; /* gpio40 is learning */
- bool hw_fan_as_normal_input; /* fan input is used as */
- /* regular input */
+ bool hw_use_gpio_0a; /* gpio0a is demodulated input*/
+ bool hw_extra_buffer; /* hardware has 'extra buffer' */
+ bool hw_fan_input; /* fan input is IR data source */
+ bool hw_learning_and_tx_capable; /* learning & tx capable */
+ int pll_freq;
+ int buffer_len;
+
+ /* Extra RX buffer location */
+ int extra_buf1_address;
+ int extra_buf1_len;
+ int extra_buf2_address;
+ int extra_buf2_len;
+
/* HW state*/
- int rx_pointer; /* hw pointer to rx buffer */
+ int r_pointer; /* pointer to next sample to read */
+ int w_pointer; /* pointer to next sample hw will write */
bool rx_fan_input_inuse; /* is fan input in use for rx*/
int tx_reg; /* current reg used for TX */
u8 saved_conf1; /* saved FEC0 reg */
-
- /* TX sample handling */
unsigned int tx_sample; /* current sample for TX */
bool tx_sample_pulse; /* current sample is pulse */
@@ -229,7 +252,11 @@ struct ene_device {
int transmitter_mask;
/* RX settings */
- bool learning_enabled; /* learning input enabled */
+ bool learning_mode_enabled; /* learning input enabled */
bool carrier_detect_enabled; /* carrier detect enabled */
int rx_period_adjust;
+ bool rx_enabled;
};
+
+static int ene_irq_status(struct ene_device *dev);
+static void ene_rx_read_hw_pointer(struct ene_device *dev);
diff --git a/drivers/media/IR/imon.c b/drivers/media/IR/imon.c
index faed5a332c71..bc118066bc38 100644
--- a/drivers/media/IR/imon.c
+++ b/drivers/media/IR/imon.c
@@ -1,7 +1,7 @@
/*
* imon.c: input and display driver for SoundGraph iMON IR/VFD/LCD
*
- * Copyright(C) 2009 Jarod Wilson <jarod@wilsonet.com>
+ * Copyright(C) 2010 Jarod Wilson <jarod@wilsonet.com>
* Portions based on the original lirc_imon driver,
* Copyright(C) 2004 Venky Raju(dev@venky.ws)
*
@@ -26,6 +26,8 @@
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ":%s: " fmt, __func__
+
#include <linux/errno.h>
#include <linux/init.h>
#include <linux/kernel.h>
@@ -44,7 +46,7 @@
#define MOD_AUTHOR "Jarod Wilson <jarod@wilsonet.com>"
#define MOD_DESC "Driver for SoundGraph iMON MultiMedia IR/Display"
#define MOD_NAME "imon"
-#define MOD_VERSION "0.9.1"
+#define MOD_VERSION "0.9.2"
#define DISPLAY_MINOR_BASE 144
#define DEVICE_NAME "lcd%d"
@@ -121,21 +123,26 @@ struct imon_context {
u16 vendor; /* usb vendor ID */
u16 product; /* usb product ID */
- struct input_dev *idev; /* input device for remote */
+ struct input_dev *rdev; /* input device for remote */
+ struct input_dev *idev; /* input device for panel & IR mouse */
struct input_dev *touch; /* input device for touchscreen */
+ spinlock_t kc_lock; /* make sure we get keycodes right */
u32 kc; /* current input keycode */
u32 last_keycode; /* last reported input keycode */
+ u32 rc_scancode; /* the computed remote scancode */
+ u8 rc_toggle; /* the computed remote toggle bit */
u64 ir_type; /* iMON or MCE (RC6) IR protocol? */
- u8 mce_toggle_bit; /* last mce toggle bit */
bool release_code; /* some keys send a release code */
u8 display_type; /* store the display type */
bool pad_mouse; /* toggle kbd(0)/mouse(1) mode */
+ char name_rdev[128]; /* rc input device name */
+ char phys_rdev[64]; /* rc input device phys path */
+
char name_idev[128]; /* input device name */
char phys_idev[64]; /* input device phys path */
- struct timer_list itimer; /* input device timer, need for rc6 */
char name_touch[128]; /* touch screen name */
char phys_touch[64]; /* touch screen phys path */
@@ -289,6 +296,9 @@ static const struct {
{ 0x000100000000ffeell, KEY_VOLUMEUP },
{ 0x010000000000ffeell, KEY_VOLUMEDOWN },
{ 0x000000000100ffeell, KEY_MUTE },
+ /* 0xffdc iMON MCE VFD */
+ { 0x00010000ffffffeell, KEY_VOLUMEUP },
+ { 0x01000000ffffffeell, KEY_VOLUMEDOWN },
/* iMON Knob values */
{ 0x000100ffffffffeell, KEY_VOLUMEUP },
{ 0x010000ffffffffeell, KEY_VOLUMEDOWN },
@@ -307,7 +317,7 @@ MODULE_DEVICE_TABLE(usb, imon_usb_id_table);
static bool debug;
module_param(debug, bool, S_IRUGO | S_IWUSR);
-MODULE_PARM_DESC(debug, "Debug messages: 0=no, 1=yes(default: no)");
+MODULE_PARM_DESC(debug, "Debug messages: 0=no, 1=yes (default: no)");
/* lcd, vfd, vga or none? should be auto-detected, but can be overridden... */
static int display_type;
@@ -365,15 +375,14 @@ static int display_open(struct inode *inode, struct file *file)
subminor = iminor(inode);
interface = usb_find_interface(&imon_driver, subminor);
if (!interface) {
- err("%s: could not find interface for minor %d",
- __func__, subminor);
+ pr_err("could not find interface for minor %d\n", subminor);
retval = -ENODEV;
goto exit;
}
ictx = usb_get_intfdata(interface);
if (!ictx) {
- err("%s: no context found for minor %d", __func__, subminor);
+ pr_err("no context found for minor %d\n", subminor);
retval = -ENODEV;
goto exit;
}
@@ -381,10 +390,10 @@ static int display_open(struct inode *inode, struct file *file)
mutex_lock(&ictx->lock);
if (!ictx->display_supported) {
- err("%s: display not supported by device", __func__);
+ pr_err("display not supported by device\n");
retval = -ENODEV;
} else if (ictx->display_isopen) {
- err("%s: display port is already open", __func__);
+ pr_err("display port is already open\n");
retval = -EBUSY;
} else {
ictx->display_isopen = true;
@@ -411,17 +420,17 @@ static int display_close(struct inode *inode, struct file *file)
ictx = file->private_data;
if (!ictx) {
- err("%s: no context for device", __func__);
+ pr_err("no context for device\n");
return -ENODEV;
}
mutex_lock(&ictx->lock);
if (!ictx->display_supported) {
- err("%s: display not supported by device", __func__);
+ pr_err("display not supported by device\n");
retval = -ENODEV;
} else if (!ictx->display_isopen) {
- err("%s: display is not open", __func__);
+ pr_err("display is not open\n");
retval = -EIO;
} else {
ictx->display_isopen = false;
@@ -500,19 +509,19 @@ static int send_packet(struct imon_context *ictx)
if (retval) {
ictx->tx.busy = false;
smp_rmb(); /* ensure later readers know we're not busy */
- err("%s: error submitting urb(%d)", __func__, retval);
+ pr_err("error submitting urb(%d)\n", retval);
} else {
/* Wait for transmission to complete (or abort) */
mutex_unlock(&ictx->lock);
retval = wait_for_completion_interruptible(
&ictx->tx.finished);
if (retval)
- err("%s: task interrupted", __func__);
+ pr_err("task interrupted\n");
mutex_lock(&ictx->lock);
retval = ictx->tx.status;
if (retval)
- err("%s: packet tx failed (%d)", __func__, retval);
+ pr_err("packet tx failed (%d)\n", retval);
}
kfree(control_req);
@@ -544,12 +553,12 @@ static int send_associate_24g(struct imon_context *ictx)
0x00, 0x00, 0x00, 0x20 };
if (!ictx) {
- err("%s: no context for device", __func__);
+ pr_err("no context for device\n");
return -ENODEV;
}
if (!ictx->dev_present_intf0) {
- err("%s: no iMON device present", __func__);
+ pr_err("no iMON device present\n");
return -ENODEV;
}
@@ -577,7 +586,7 @@ static int send_set_imon_clock(struct imon_context *ictx,
int i;
if (!ictx) {
- err("%s: no context for device", __func__);
+ pr_err("no context for device\n");
return -ENODEV;
}
@@ -638,8 +647,7 @@ static int send_set_imon_clock(struct imon_context *ictx,
memcpy(ictx->usb_tx_buf, clock_enable_pkt[i], 8);
retval = send_packet(ictx);
if (retval) {
- err("%s: send_packet failed for packet %d",
- __func__, i);
+ pr_err("send_packet failed for packet %d\n", i);
break;
}
}
@@ -778,7 +786,7 @@ static struct attribute *imon_display_sysfs_entries[] = {
NULL
};
-static struct attribute_group imon_display_attribute_group = {
+static struct attribute_group imon_display_attr_group = {
.attrs = imon_display_sysfs_entries
};
@@ -787,7 +795,7 @@ static struct attribute *imon_rf_sysfs_entries[] = {
NULL
};
-static struct attribute_group imon_rf_attribute_group = {
+static struct attribute_group imon_rf_attr_group = {
.attrs = imon_rf_sysfs_entries
};
@@ -815,20 +823,20 @@ static ssize_t vfd_write(struct file *file, const char *buf,
ictx = file->private_data;
if (!ictx) {
- err("%s: no context for device", __func__);
+ pr_err("no context for device\n");
return -ENODEV;
}
mutex_lock(&ictx->lock);
if (!ictx->dev_present_intf0) {
- err("%s: no iMON device present", __func__);
+ pr_err("no iMON device present\n");
retval = -ENODEV;
goto exit;
}
if (n_bytes <= 0 || n_bytes > 32) {
- err("%s: invalid payload size", __func__);
+ pr_err("invalid payload size\n");
retval = -EINVAL;
goto exit;
}
@@ -854,8 +862,7 @@ static ssize_t vfd_write(struct file *file, const char *buf,
retval = send_packet(ictx);
if (retval) {
- err("%s: send packet failed for packet #%d",
- __func__, seq/2);
+ pr_err("send packet failed for packet #%d\n", seq / 2);
goto exit;
} else {
seq += 2;
@@ -869,8 +876,7 @@ static ssize_t vfd_write(struct file *file, const char *buf,
ictx->usb_tx_buf[7] = (unsigned char) seq;
retval = send_packet(ictx);
if (retval)
- err("%s: send packet failed for packet #%d",
- __func__, seq / 2);
+ pr_err("send packet failed for packet #%d\n", seq / 2);
exit:
mutex_unlock(&ictx->lock);
@@ -899,21 +905,20 @@ static ssize_t lcd_write(struct file *file, const char *buf,
ictx = file->private_data;
if (!ictx) {
- err("%s: no context for device", __func__);
+ pr_err("no context for device\n");
return -ENODEV;
}
mutex_lock(&ictx->lock);
if (!ictx->display_supported) {
- err("%s: no iMON display present", __func__);
+ pr_err("no iMON display present\n");
retval = -ENODEV;
goto exit;
}
if (n_bytes != 8) {
- err("%s: invalid payload size: %d (expecting 8)",
- __func__, (int) n_bytes);
+ pr_err("invalid payload size: %d (expected 8)\n", (int)n_bytes);
retval = -EINVAL;
goto exit;
}
@@ -925,7 +930,7 @@ static ssize_t lcd_write(struct file *file, const char *buf,
retval = send_packet(ictx);
if (retval) {
- err("%s: send packet failed!", __func__);
+ pr_err("send packet failed!\n");
goto exit;
} else {
dev_dbg(ictx->dev, "%s: write %d bytes to LCD\n",
@@ -958,17 +963,6 @@ static void usb_tx_callback(struct urb *urb)
}
/**
- * mce/rc6 keypresses have no distinct release code, use timer
- */
-static void imon_mce_timeout(unsigned long data)
-{
- struct imon_context *ictx = (struct imon_context *)data;
-
- input_report_key(ictx->idev, ictx->last_keycode, 0);
- input_sync(ictx->idev);
-}
-
-/**
* report touchscreen input
*/
static void imon_touch_display_timeout(unsigned long data)
@@ -1008,14 +1002,11 @@ int imon_ir_change_protocol(void *priv, u64 ir_type)
dev_dbg(dev, "Configuring IR receiver for MCE protocol\n");
ir_proto_packet[0] = 0x01;
pad_mouse = false;
- init_timer(&ictx->itimer);
- ictx->itimer.data = (unsigned long)ictx;
- ictx->itimer.function = imon_mce_timeout;
break;
case IR_TYPE_UNKNOWN:
case IR_TYPE_OTHER:
dev_dbg(dev, "Configuring IR receiver for iMON protocol\n");
- if (pad_stabilize)
+ if (pad_stabilize && !nomouse)
pad_mouse = true;
else {
dev_dbg(dev, "PAD stabilize functionality disabled\n");
@@ -1027,7 +1018,7 @@ int imon_ir_change_protocol(void *priv, u64 ir_type)
default:
dev_warn(dev, "Unsupported IR protocol specified, overriding "
"to iMON IR protocol\n");
- if (pad_stabilize)
+ if (pad_stabilize && !nomouse)
pad_mouse = true;
else {
dev_dbg(dev, "PAD stabilize functionality disabled\n");
@@ -1149,20 +1140,21 @@ static int stabilize(int a, int b, u16 timeout, u16 threshold)
return result;
}
-static u32 imon_remote_key_lookup(struct imon_context *ictx, u32 hw_code)
+static u32 imon_remote_key_lookup(struct imon_context *ictx, u32 scancode)
{
- u32 scancode = be32_to_cpu(hw_code);
u32 keycode;
u32 release;
bool is_release_code = false;
/* Look for the initial press of a button */
- keycode = ir_g_keycode_from_table(ictx->idev, scancode);
+ keycode = ir_g_keycode_from_table(ictx->rdev, scancode);
+ ictx->rc_toggle = 0x0;
+ ictx->rc_scancode = scancode;
/* Look for the release of a button */
if (keycode == KEY_RESERVED) {
release = scancode & ~0x4000;
- keycode = ir_g_keycode_from_table(ictx->idev, release);
+ keycode = ir_g_keycode_from_table(ictx->rdev, release);
if (keycode != KEY_RESERVED)
is_release_code = true;
}
@@ -1172,9 +1164,8 @@ static u32 imon_remote_key_lookup(struct imon_context *ictx, u32 hw_code)
return keycode;
}
-static u32 imon_mce_key_lookup(struct imon_context *ictx, u32 hw_code)
+static u32 imon_mce_key_lookup(struct imon_context *ictx, u32 scancode)
{
- u32 scancode = be32_to_cpu(hw_code);
u32 keycode;
#define MCE_KEY_MASK 0x7000
@@ -1188,18 +1179,21 @@ static u32 imon_mce_key_lookup(struct imon_context *ictx, u32 hw_code)
* but we can't or them into all codes, as some keys are decoded in
* a different way w/o the same use of the toggle bit...
*/
- if ((scancode >> 24) & 0x80)
+ if (scancode & 0x80000000)
scancode = scancode | MCE_KEY_MASK | MCE_TOGGLE_BIT;
- keycode = ir_g_keycode_from_table(ictx->idev, scancode);
+ ictx->rc_scancode = scancode;
+ keycode = ir_g_keycode_from_table(ictx->rdev, scancode);
+
+ /* not used in mce mode, but make sure we know its false */
+ ictx->release_code = false;
return keycode;
}
-static u32 imon_panel_key_lookup(u64 hw_code)
+static u32 imon_panel_key_lookup(u64 code)
{
int i;
- u64 code = be64_to_cpu(hw_code);
u32 keycode = KEY_RESERVED;
for (i = 0; i < ARRAY_SIZE(imon_panel_key_table); i++) {
@@ -1219,6 +1213,9 @@ static bool imon_mouse_event(struct imon_context *ictx,
u8 right_shift = 1;
bool mouse_input = true;
int dir = 0;
+ unsigned long flags;
+
+ spin_lock_irqsave(&ictx->kc_lock, flags);
/* newer iMON device PAD or mouse button */
if (ictx->product != 0xffdc && (buf[0] & 0x01) && len == 5) {
@@ -1250,6 +1247,8 @@ static bool imon_mouse_event(struct imon_context *ictx,
} else
mouse_input = false;
+ spin_unlock_irqrestore(&ictx->kc_lock, flags);
+
if (mouse_input) {
dev_dbg(ictx->dev, "sending mouse data via input subsystem\n");
@@ -1264,7 +1263,9 @@ static bool imon_mouse_event(struct imon_context *ictx,
buf[1] >> right_shift & 0x1);
}
input_sync(ictx->idev);
+ spin_lock_irqsave(&ictx->kc_lock, flags);
ictx->last_keycode = ictx->kc;
+ spin_unlock_irqrestore(&ictx->kc_lock, flags);
}
return mouse_input;
@@ -1286,8 +1287,8 @@ static void imon_pad_to_keys(struct imon_context *ictx, unsigned char *buf)
int dir = 0;
char rel_x = 0x00, rel_y = 0x00;
u16 timeout, threshold;
- u64 temp_key;
- u32 remote_key;
+ u32 scancode = KEY_RESERVED;
+ unsigned long flags;
/*
* The imon directional pad functions more like a touchpad. Bytes 3 & 4
@@ -1311,26 +1312,36 @@ static void imon_pad_to_keys(struct imon_context *ictx, unsigned char *buf)
dir = stabilize((int)rel_x, (int)rel_y,
timeout, threshold);
if (!dir) {
+ spin_lock_irqsave(&ictx->kc_lock,
+ flags);
ictx->kc = KEY_UNKNOWN;
+ spin_unlock_irqrestore(&ictx->kc_lock,
+ flags);
return;
}
buf[2] = dir & 0xFF;
buf[3] = (dir >> 8) & 0xFF;
- memcpy(&temp_key, buf, sizeof(temp_key));
- remote_key = (u32) (le64_to_cpu(temp_key)
- & 0xffffffff);
- ictx->kc = imon_remote_key_lookup(ictx,
- remote_key);
+ scancode = be32_to_cpu(*((u32 *)buf));
}
} else {
+ /*
+ * Hack alert: instead of using keycodes, we have
+ * to use hard-coded scancodes here...
+ */
if (abs(rel_y) > abs(rel_x)) {
buf[2] = (rel_y > 0) ? 0x7F : 0x80;
buf[3] = 0;
- ictx->kc = (rel_y > 0) ? KEY_DOWN : KEY_UP;
+ if (rel_y > 0)
+ scancode = 0x01007f00; /* KEY_DOWN */
+ else
+ scancode = 0x01008000; /* KEY_UP */
} else {
buf[2] = 0;
buf[3] = (rel_x > 0) ? 0x7F : 0x80;
- ictx->kc = (rel_x > 0) ? KEY_RIGHT : KEY_LEFT;
+ if (rel_x > 0)
+ scancode = 0x0100007f; /* KEY_RIGHT */
+ else
+ scancode = 0x01000080; /* KEY_LEFT */
}
}
@@ -1367,34 +1378,56 @@ static void imon_pad_to_keys(struct imon_context *ictx, unsigned char *buf)
dir = stabilize((int)rel_x, (int)rel_y,
timeout, threshold);
if (!dir) {
+ spin_lock_irqsave(&ictx->kc_lock, flags);
ictx->kc = KEY_UNKNOWN;
+ spin_unlock_irqrestore(&ictx->kc_lock, flags);
return;
}
buf[2] = dir & 0xFF;
buf[3] = (dir >> 8) & 0xFF;
- memcpy(&temp_key, buf, sizeof(temp_key));
- remote_key = (u32) (le64_to_cpu(temp_key) & 0xffffffff);
- ictx->kc = imon_remote_key_lookup(ictx, remote_key);
+ scancode = be32_to_cpu(*((u32 *)buf));
} else {
+ /*
+ * Hack alert: instead of using keycodes, we have
+ * to use hard-coded scancodes here...
+ */
if (abs(rel_y) > abs(rel_x)) {
buf[2] = (rel_y > 0) ? 0x7F : 0x80;
buf[3] = 0;
- ictx->kc = (rel_y > 0) ? KEY_DOWN : KEY_UP;
+ if (rel_y > 0)
+ scancode = 0x01007f00; /* KEY_DOWN */
+ else
+ scancode = 0x01008000; /* KEY_UP */
} else {
buf[2] = 0;
buf[3] = (rel_x > 0) ? 0x7F : 0x80;
- ictx->kc = (rel_x > 0) ? KEY_RIGHT : KEY_LEFT;
+ if (rel_x > 0)
+ scancode = 0x0100007f; /* KEY_RIGHT */
+ else
+ scancode = 0x01000080; /* KEY_LEFT */
}
}
}
+
+ if (scancode) {
+ spin_lock_irqsave(&ictx->kc_lock, flags);
+ ictx->kc = imon_remote_key_lookup(ictx, scancode);
+ spin_unlock_irqrestore(&ictx->kc_lock, flags);
+ }
}
+/**
+ * figure out if these is a press or a release. We don't actually
+ * care about repeats, as those will be auto-generated within the IR
+ * subsystem for repeating scancodes.
+ */
static int imon_parse_press_type(struct imon_context *ictx,
unsigned char *buf, u8 ktype)
{
int press_type = 0;
- int rep_delay = ictx->idev->rep[REP_DELAY];
- int rep_period = ictx->idev->rep[REP_PERIOD];
+ unsigned long flags;
+
+ spin_lock_irqsave(&ictx->kc_lock, flags);
/* key release of 0x02XXXXXX key */
if (ictx->kc == KEY_RESERVED && buf[0] == 0x02 && buf[3] == 0x00)
@@ -1410,22 +1443,10 @@ static int imon_parse_press_type(struct imon_context *ictx,
buf[2] == 0x81 && buf[3] == 0xb7)
ictx->kc = ictx->last_keycode;
- /* mce-specific button handling */
+ /* mce-specific button handling, no keyup events */
else if (ktype == IMON_KEY_MCE) {
- /* initial press */
- if (ictx->kc != ictx->last_keycode
- || buf[2] != ictx->mce_toggle_bit) {
- ictx->last_keycode = ictx->kc;
- ictx->mce_toggle_bit = buf[2];
- press_type = 1;
- mod_timer(&ictx->itimer,
- jiffies + msecs_to_jiffies(rep_delay));
- /* repeat */
- } else {
- press_type = 2;
- mod_timer(&ictx->itimer,
- jiffies + msecs_to_jiffies(rep_period));
- }
+ ictx->rc_toggle = buf[2];
+ press_type = 1;
/* incoherent or irrelevant data */
} else if (ictx->kc == KEY_RESERVED)
@@ -1439,6 +1460,8 @@ static int imon_parse_press_type(struct imon_context *ictx,
else
press_type = 1;
+ spin_unlock_irqrestore(&ictx->kc_lock, flags);
+
return press_type;
}
@@ -1451,41 +1474,45 @@ static void imon_incoming_packet(struct imon_context *ictx,
int len = urb->actual_length;
unsigned char *buf = urb->transfer_buffer;
struct device *dev = ictx->dev;
+ unsigned long flags;
u32 kc;
bool norelease = false;
int i;
- u64 temp_key;
- u64 panel_key = 0;
- u32 remote_key = 0;
- struct input_dev *idev = NULL;
+ u64 scancode;
+ struct input_dev *rdev = NULL;
+ struct ir_input_dev *irdev = NULL;
int press_type = 0;
int msec;
struct timeval t;
static struct timeval prev_time = { 0, 0 };
- u8 ktype = IMON_KEY_IMON;
+ u8 ktype;
- idev = ictx->idev;
+ rdev = ictx->rdev;
+ irdev = input_get_drvdata(rdev);
/* filter out junk data on the older 0xffdc imon devices */
if ((buf[0] == 0xff) && (buf[1] == 0xff) && (buf[2] == 0xff))
return;
/* Figure out what key was pressed */
- memcpy(&temp_key, buf, sizeof(temp_key));
if (len == 8 && buf[7] == 0xee) {
+ scancode = be64_to_cpu(*((u64 *)buf));
ktype = IMON_KEY_PANEL;
- panel_key = le64_to_cpu(temp_key);
- kc = imon_panel_key_lookup(panel_key);
+ kc = imon_panel_key_lookup(scancode);
} else {
- remote_key = (u32) (le64_to_cpu(temp_key) & 0xffffffff);
+ scancode = be32_to_cpu(*((u32 *)buf));
if (ictx->ir_type == IR_TYPE_RC6) {
+ ktype = IMON_KEY_IMON;
if (buf[0] == 0x80)
ktype = IMON_KEY_MCE;
- kc = imon_mce_key_lookup(ictx, remote_key);
- } else
- kc = imon_remote_key_lookup(ictx, remote_key);
+ kc = imon_mce_key_lookup(ictx, scancode);
+ } else {
+ ktype = IMON_KEY_IMON;
+ kc = imon_remote_key_lookup(ictx, scancode);
+ }
}
+ spin_lock_irqsave(&ictx->kc_lock, flags);
/* keyboard/mouse mode toggle button */
if (kc == KEY_KEYBOARD && !ictx->release_code) {
ictx->last_keycode = kc;
@@ -1493,6 +1520,7 @@ static void imon_incoming_packet(struct imon_context *ictx,
ictx->pad_mouse = ~(ictx->pad_mouse) & 0x1;
dev_dbg(dev, "toggling to %s mode\n",
ictx->pad_mouse ? "mouse" : "keyboard");
+ spin_unlock_irqrestore(&ictx->kc_lock, flags);
return;
} else {
ictx->pad_mouse = 0;
@@ -1501,11 +1529,13 @@ static void imon_incoming_packet(struct imon_context *ictx,
}
ictx->kc = kc;
+ spin_unlock_irqrestore(&ictx->kc_lock, flags);
/* send touchscreen events through input subsystem if touchpad data */
if (ictx->display_type == IMON_DISPLAY_TYPE_VGA && len == 8 &&
buf[7] == 0x86) {
imon_touch_event(ictx, buf);
+ return;
/* look for mouse events with pad in mouse mode */
} else if (ictx->pad_mouse) {
@@ -1533,36 +1563,55 @@ static void imon_incoming_packet(struct imon_context *ictx,
if (press_type < 0)
goto not_input_data;
+ spin_lock_irqsave(&ictx->kc_lock, flags);
if (ictx->kc == KEY_UNKNOWN)
goto unknown_key;
+ spin_unlock_irqrestore(&ictx->kc_lock, flags);
+
+ if (ktype != IMON_KEY_PANEL) {
+ if (press_type == 0)
+ ir_keyup(irdev);
+ else {
+ ir_keydown(rdev, ictx->rc_scancode, ictx->rc_toggle);
+ spin_lock_irqsave(&ictx->kc_lock, flags);
+ ictx->last_keycode = ictx->kc;
+ spin_unlock_irqrestore(&ictx->kc_lock, flags);
+ }
+ return;
+ }
- /* KEY_MUTE repeats from MCE and knob need to be suppressed */
- if ((ictx->kc == KEY_MUTE && ictx->kc == ictx->last_keycode)
- && (buf[7] == 0xee || ktype == IMON_KEY_MCE)) {
+ /* Only panel type events left to process now */
+ spin_lock_irqsave(&ictx->kc_lock, flags);
+
+ /* KEY_MUTE repeats from knob need to be suppressed */
+ if (ictx->kc == KEY_MUTE && ictx->kc == ictx->last_keycode) {
do_gettimeofday(&t);
msec = tv2int(&t, &prev_time);
prev_time = t;
- if (msec < idev->rep[REP_DELAY])
+ if (msec < ictx->idev->rep[REP_DELAY]) {
+ spin_unlock_irqrestore(&ictx->kc_lock, flags);
return;
+ }
}
+ kc = ictx->kc;
- input_report_key(idev, ictx->kc, press_type);
- input_sync(idev);
+ spin_unlock_irqrestore(&ictx->kc_lock, flags);
- /* panel keys and some remote keys don't generate a release */
- if (panel_key || norelease) {
- input_report_key(idev, ictx->kc, 0);
- input_sync(idev);
- }
+ input_report_key(ictx->idev, kc, press_type);
+ input_sync(ictx->idev);
- ictx->last_keycode = ictx->kc;
+ /* panel keys don't generate a release */
+ input_report_key(ictx->idev, kc, 0);
+ input_sync(ictx->idev);
+
+ ictx->last_keycode = kc;
return;
unknown_key:
+ spin_unlock_irqrestore(&ictx->kc_lock, flags);
dev_info(dev, "%s: unknown keypress, code 0x%llx\n", __func__,
- (panel_key ? be64_to_cpu(panel_key) :
- be32_to_cpu(remote_key)));
+ (long long)scancode);
return;
not_input_data:
@@ -1653,31 +1702,205 @@ static void usb_rx_callback_intf1(struct urb *urb)
usb_submit_urb(ictx->rx_urb_intf1, GFP_ATOMIC);
}
+/*
+ * The 0x15c2:0xffdc device ID was used for umpteen different imon
+ * devices, and all of them constantly spew interrupts, even when there
+ * is no actual data to report. However, byte 6 of this buffer looks like
+ * its unique across device variants, so we're trying to key off that to
+ * figure out which display type (if any) and what IR protocol the device
+ * actually supports. These devices have their IR protocol hard-coded into
+ * their firmware, they can't be changed on the fly like the newer hardware.
+ */
+static void imon_get_ffdc_type(struct imon_context *ictx)
+{
+ u8 ffdc_cfg_byte = ictx->usb_rx_buf[6];
+ u8 detected_display_type = IMON_DISPLAY_TYPE_NONE;
+ u64 allowed_protos = IR_TYPE_OTHER;
+
+ switch (ffdc_cfg_byte) {
+ /* iMON Knob, no display, iMON IR + vol knob */
+ case 0x21:
+ dev_info(ictx->dev, "0xffdc iMON Knob, iMON IR");
+ ictx->display_supported = false;
+ break;
+ /* iMON 2.4G LT (usb stick), no display, iMON RF */
+ case 0x4e:
+ dev_info(ictx->dev, "0xffdc iMON 2.4G LT, iMON RF");
+ ictx->display_supported = false;
+ ictx->rf_device = true;
+ break;
+ /* iMON VFD, no IR (does have vol knob tho) */
+ case 0x35:
+ dev_info(ictx->dev, "0xffdc iMON VFD + knob, no IR");
+ detected_display_type = IMON_DISPLAY_TYPE_VFD;
+ break;
+ /* iMON VFD, iMON IR */
+ case 0x24:
+ case 0x85:
+ dev_info(ictx->dev, "0xffdc iMON VFD, iMON IR");
+ detected_display_type = IMON_DISPLAY_TYPE_VFD;
+ break;
+ /* iMON VFD, MCE IR */
+ case 0x9e:
+ dev_info(ictx->dev, "0xffdc iMON VFD, MCE IR");
+ detected_display_type = IMON_DISPLAY_TYPE_VFD;
+ allowed_protos = IR_TYPE_RC6;
+ break;
+ /* iMON LCD, MCE IR */
+ case 0x9f:
+ dev_info(ictx->dev, "0xffdc iMON LCD, MCE IR");
+ detected_display_type = IMON_DISPLAY_TYPE_LCD;
+ allowed_protos = IR_TYPE_RC6;
+ break;
+ default:
+ dev_info(ictx->dev, "Unknown 0xffdc device, "
+ "defaulting to VFD and iMON IR");
+ detected_display_type = IMON_DISPLAY_TYPE_VFD;
+ break;
+ }
+
+ printk(KERN_CONT " (id 0x%02x)\n", ffdc_cfg_byte);
+
+ ictx->display_type = detected_display_type;
+ ictx->props->allowed_protos = allowed_protos;
+ ictx->ir_type = allowed_protos;
+}
+
+static void imon_set_display_type(struct imon_context *ictx)
+{
+ u8 configured_display_type = IMON_DISPLAY_TYPE_VFD;
+
+ /*
+ * Try to auto-detect the type of display if the user hasn't set
+ * it by hand via the display_type modparam. Default is VFD.
+ */
+
+ if (display_type == IMON_DISPLAY_TYPE_AUTO) {
+ switch (ictx->product) {
+ case 0xffdc:
+ /* set in imon_get_ffdc_type() */
+ configured_display_type = ictx->display_type;
+ break;
+ case 0x0034:
+ case 0x0035:
+ configured_display_type = IMON_DISPLAY_TYPE_VGA;
+ break;
+ case 0x0038:
+ case 0x0039:
+ case 0x0045:
+ configured_display_type = IMON_DISPLAY_TYPE_LCD;
+ break;
+ case 0x003c:
+ case 0x0041:
+ case 0x0042:
+ case 0x0043:
+ configured_display_type = IMON_DISPLAY_TYPE_NONE;
+ ictx->display_supported = false;
+ break;
+ case 0x0036:
+ case 0x0044:
+ default:
+ configured_display_type = IMON_DISPLAY_TYPE_VFD;
+ break;
+ }
+ } else {
+ configured_display_type = display_type;
+ if (display_type == IMON_DISPLAY_TYPE_NONE)
+ ictx->display_supported = false;
+ else
+ ictx->display_supported = true;
+ dev_info(ictx->dev, "%s: overriding display type to %d via "
+ "modparam\n", __func__, display_type);
+ }
+
+ ictx->display_type = configured_display_type;
+}
+
+static struct input_dev *imon_init_rdev(struct imon_context *ictx)
+{
+ struct input_dev *rdev;
+ struct ir_dev_props *props;
+ int ret;
+ char *ir_codes = NULL;
+ const unsigned char fp_packet[] = { 0x40, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x88 };
+
+ rdev = input_allocate_device();
+ props = kzalloc(sizeof(*props), GFP_KERNEL);
+ if (!rdev || !props) {
+ dev_err(ictx->dev, "remote control dev allocation failed\n");
+ goto out;
+ }
+
+ snprintf(ictx->name_rdev, sizeof(ictx->name_rdev),
+ "iMON Remote (%04x:%04x)", ictx->vendor, ictx->product);
+ usb_make_path(ictx->usbdev_intf0, ictx->phys_rdev,
+ sizeof(ictx->phys_rdev));
+ strlcat(ictx->phys_rdev, "/input0", sizeof(ictx->phys_rdev));
+
+ rdev->name = ictx->name_rdev;
+ rdev->phys = ictx->phys_rdev;
+ usb_to_input_id(ictx->usbdev_intf0, &rdev->id);
+ rdev->dev.parent = ictx->dev;
+ rdev->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_REP);
+ input_set_drvdata(rdev, ictx);
+
+ props->priv = ictx;
+ props->driver_type = RC_DRIVER_SCANCODE;
+ props->allowed_protos = IR_TYPE_OTHER | IR_TYPE_RC6; /* iMON PAD or MCE */
+ props->change_protocol = imon_ir_change_protocol;
+ ictx->props = props;
+
+ /* Enable front-panel buttons and/or knobs */
+ memcpy(ictx->usb_tx_buf, &fp_packet, sizeof(fp_packet));
+ ret = send_packet(ictx);
+ /* Not fatal, but warn about it */
+ if (ret)
+ dev_info(ictx->dev, "panel buttons/knobs setup failed\n");
+
+ if (ictx->product == 0xffdc)
+ imon_get_ffdc_type(ictx);
+
+ imon_set_display_type(ictx);
+
+ if (ictx->ir_type == IR_TYPE_RC6)
+ ir_codes = RC_MAP_IMON_MCE;
+ else
+ ir_codes = RC_MAP_IMON_PAD;
+
+ ret = ir_input_register(rdev, ir_codes, props, MOD_NAME);
+ if (ret < 0) {
+ dev_err(ictx->dev, "remote input dev register failed\n");
+ goto out;
+ }
+
+ return rdev;
+
+out:
+ kfree(props);
+ input_free_device(rdev);
+ return NULL;
+}
+
static struct input_dev *imon_init_idev(struct imon_context *ictx)
{
struct input_dev *idev;
- struct ir_dev_props *props;
int ret, i;
idev = input_allocate_device();
if (!idev) {
- dev_err(ictx->dev, "remote input dev allocation failed\n");
- goto idev_alloc_failed;
- }
-
- props = kzalloc(sizeof(struct ir_dev_props), GFP_KERNEL);
- if (!props) {
- dev_err(ictx->dev, "remote ir dev props allocation failed\n");
- goto props_alloc_failed;
+ dev_err(ictx->dev, "input dev allocation failed\n");
+ goto out;
}
snprintf(ictx->name_idev, sizeof(ictx->name_idev),
- "iMON Remote (%04x:%04x)", ictx->vendor, ictx->product);
+ "iMON Panel, Knob and Mouse(%04x:%04x)",
+ ictx->vendor, ictx->product);
idev->name = ictx->name_idev;
usb_make_path(ictx->usbdev_intf0, ictx->phys_idev,
sizeof(ictx->phys_idev));
- strlcat(ictx->phys_idev, "/input0", sizeof(ictx->phys_idev));
+ strlcat(ictx->phys_idev, "/input1", sizeof(ictx->phys_idev));
idev->phys = ictx->phys_idev;
idev->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_REP) | BIT_MASK(EV_REL);
@@ -1693,30 +1916,20 @@ static struct input_dev *imon_init_idev(struct imon_context *ictx)
__set_bit(kc, idev->keybit);
}
- props->priv = ictx;
- props->driver_type = RC_DRIVER_SCANCODE;
- /* IR_TYPE_OTHER maps to iMON PAD remote, IR_TYPE_RC6 to MCE remote */
- props->allowed_protos = IR_TYPE_OTHER | IR_TYPE_RC6;
- props->change_protocol = imon_ir_change_protocol;
- ictx->props = props;
-
usb_to_input_id(ictx->usbdev_intf0, &idev->id);
idev->dev.parent = ictx->dev;
+ input_set_drvdata(idev, ictx);
- ret = ir_input_register(idev, RC_MAP_IMON_PAD, props, MOD_NAME);
+ ret = input_register_device(idev);
if (ret < 0) {
- dev_err(ictx->dev, "remote input dev register failed\n");
- goto idev_register_failed;
+ dev_err(ictx->dev, "input dev register failed\n");
+ goto out;
}
return idev;
-idev_register_failed:
- kfree(props);
-props_alloc_failed:
+out:
input_free_device(idev);
-idev_alloc_failed:
-
return NULL;
}
@@ -1738,7 +1951,7 @@ static struct input_dev *imon_init_touch(struct imon_context *ictx)
usb_make_path(ictx->usbdev_intf1, ictx->phys_touch,
sizeof(ictx->phys_touch));
- strlcat(ictx->phys_touch, "/input1", sizeof(ictx->phys_touch));
+ strlcat(ictx->phys_touch, "/input2", sizeof(ictx->phys_touch));
touch->phys = ictx->phys_touch;
touch->evbit[0] =
@@ -1850,7 +2063,7 @@ static bool imon_find_endpoints(struct imon_context *ictx,
/* Input endpoint is mandatory */
if (!ir_ep_found)
- err("%s: no valid input (IR) endpoint found.", __func__);
+ pr_err("no valid input (IR) endpoint found\n");
ictx->tx_control = tx_control;
@@ -1888,6 +2101,7 @@ static struct imon_context *imon_init_intf0(struct usb_interface *intf)
}
mutex_init(&ictx->lock);
+ spin_lock_init(&ictx->kc_lock);
mutex_lock(&ictx->lock);
@@ -1913,6 +2127,12 @@ static struct imon_context *imon_init_intf0(struct usb_interface *intf)
goto idev_setup_failed;
}
+ ictx->rdev = imon_init_rdev(ictx);
+ if (!ictx->rdev) {
+ dev_err(dev, "%s: rc device setup failed\n", __func__);
+ goto rdev_setup_failed;
+ }
+
usb_fill_int_urb(ictx->rx_urb_intf0, ictx->usbdev_intf0,
usb_rcvintpipe(ictx->usbdev_intf0,
ictx->rx_endpoint_intf0->bEndpointAddress),
@@ -1922,15 +2142,16 @@ static struct imon_context *imon_init_intf0(struct usb_interface *intf)
ret = usb_submit_urb(ictx->rx_urb_intf0, GFP_KERNEL);
if (ret) {
- err("%s: usb_submit_urb failed for intf0 (%d)",
- __func__, ret);
+ pr_err("usb_submit_urb failed for intf0 (%d)\n", ret);
goto urb_submit_failed;
}
return ictx;
urb_submit_failed:
- ir_input_unregister(ictx->idev);
+ ir_input_unregister(ictx->rdev);
+rdev_setup_failed:
+ input_unregister_device(ictx->idev);
idev_setup_failed:
find_endpoint_failed:
mutex_unlock(&ictx->lock);
@@ -1954,7 +2175,7 @@ static struct imon_context *imon_init_intf1(struct usb_interface *intf,
rx_urb = usb_alloc_urb(0, GFP_KERNEL);
if (!rx_urb) {
- err("%s: usb_alloc_urb failed for IR urb", __func__);
+ pr_err("usb_alloc_urb failed for IR urb\n");
goto rx_urb_alloc_failed;
}
@@ -1992,8 +2213,7 @@ static struct imon_context *imon_init_intf1(struct usb_interface *intf,
ret = usb_submit_urb(ictx->rx_urb_intf1, GFP_KERNEL);
if (ret) {
- err("%s: usb_submit_urb failed for intf1 (%d)",
- __func__, ret);
+ pr_err("usb_submit_urb failed for intf1 (%d)\n", ret);
goto urb_submit_failed;
}
@@ -2012,116 +2232,6 @@ rx_urb_alloc_failed:
return NULL;
}
-/*
- * The 0x15c2:0xffdc device ID was used for umpteen different imon
- * devices, and all of them constantly spew interrupts, even when there
- * is no actual data to report. However, byte 6 of this buffer looks like
- * its unique across device variants, so we're trying to key off that to
- * figure out which display type (if any) and what IR protocol the device
- * actually supports. These devices have their IR protocol hard-coded into
- * their firmware, they can't be changed on the fly like the newer hardware.
- */
-static void imon_get_ffdc_type(struct imon_context *ictx)
-{
- u8 ffdc_cfg_byte = ictx->usb_rx_buf[6];
- u8 detected_display_type = IMON_DISPLAY_TYPE_NONE;
- u64 allowed_protos = IR_TYPE_OTHER;
-
- switch (ffdc_cfg_byte) {
- /* iMON Knob, no display, iMON IR + vol knob */
- case 0x21:
- dev_info(ictx->dev, "0xffdc iMON Knob, iMON IR");
- ictx->display_supported = false;
- break;
- /* iMON 2.4G LT (usb stick), no display, iMON RF */
- case 0x4e:
- dev_info(ictx->dev, "0xffdc iMON 2.4G LT, iMON RF");
- ictx->display_supported = false;
- ictx->rf_device = true;
- break;
- /* iMON VFD, no IR (does have vol knob tho) */
- case 0x35:
- dev_info(ictx->dev, "0xffdc iMON VFD + knob, no IR");
- detected_display_type = IMON_DISPLAY_TYPE_VFD;
- break;
- /* iMON VFD, iMON IR */
- case 0x24:
- case 0x85:
- dev_info(ictx->dev, "0xffdc iMON VFD, iMON IR");
- detected_display_type = IMON_DISPLAY_TYPE_VFD;
- break;
- /* iMON LCD, MCE IR */
- case 0x9e:
- case 0x9f:
- dev_info(ictx->dev, "0xffdc iMON LCD, MCE IR");
- detected_display_type = IMON_DISPLAY_TYPE_LCD;
- allowed_protos = IR_TYPE_RC6;
- break;
- default:
- dev_info(ictx->dev, "Unknown 0xffdc device, "
- "defaulting to VFD and iMON IR");
- detected_display_type = IMON_DISPLAY_TYPE_VFD;
- break;
- }
-
- printk(KERN_CONT " (id 0x%02x)\n", ffdc_cfg_byte);
-
- ictx->display_type = detected_display_type;
- ictx->props->allowed_protos = allowed_protos;
- ictx->ir_type = allowed_protos;
-}
-
-static void imon_set_display_type(struct imon_context *ictx,
- struct usb_interface *intf)
-{
- u8 configured_display_type = IMON_DISPLAY_TYPE_VFD;
-
- /*
- * Try to auto-detect the type of display if the user hasn't set
- * it by hand via the display_type modparam. Default is VFD.
- */
-
- if (display_type == IMON_DISPLAY_TYPE_AUTO) {
- switch (ictx->product) {
- case 0xffdc:
- /* set in imon_get_ffdc_type() */
- configured_display_type = ictx->display_type;
- break;
- case 0x0034:
- case 0x0035:
- configured_display_type = IMON_DISPLAY_TYPE_VGA;
- break;
- case 0x0038:
- case 0x0039:
- case 0x0045:
- configured_display_type = IMON_DISPLAY_TYPE_LCD;
- break;
- case 0x003c:
- case 0x0041:
- case 0x0042:
- case 0x0043:
- configured_display_type = IMON_DISPLAY_TYPE_NONE;
- ictx->display_supported = false;
- break;
- case 0x0036:
- case 0x0044:
- default:
- configured_display_type = IMON_DISPLAY_TYPE_VFD;
- break;
- }
- } else {
- configured_display_type = display_type;
- if (display_type == IMON_DISPLAY_TYPE_NONE)
- ictx->display_supported = false;
- else
- ictx->display_supported = true;
- dev_info(ictx->dev, "%s: overriding display type to %d via "
- "modparam\n", __func__, display_type);
- }
-
- ictx->display_type = configured_display_type;
-}
-
static void imon_init_display(struct imon_context *ictx,
struct usb_interface *intf)
{
@@ -2130,8 +2240,7 @@ static void imon_init_display(struct imon_context *ictx,
dev_dbg(ictx->dev, "Registering iMON display with sysfs\n");
/* set up sysfs entry for built-in clock */
- ret = sysfs_create_group(&intf->dev.kobj,
- &imon_display_attribute_group);
+ ret = sysfs_create_group(&intf->dev.kobj, &imon_display_attr_group);
if (ret)
dev_err(ictx->dev, "Could not create display sysfs "
"entries(%d)", ret);
@@ -2162,8 +2271,6 @@ static int __devinit imon_probe(struct usb_interface *interface,
struct imon_context *ictx = NULL;
struct imon_context *first_if_ctx = NULL;
u16 vendor, product;
- const unsigned char fp_packet[] = { 0x40, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x88 };
code_length = BUF_CHUNK_SIZE * 8;
@@ -2185,7 +2292,7 @@ static int __devinit imon_probe(struct usb_interface *interface,
if (ifnum == 0) {
ictx = imon_init_intf0(interface);
if (!ictx) {
- err("%s: failed to initialize context!\n", __func__);
+ pr_err("failed to initialize context!\n");
ret = -ENODEV;
goto fail;
}
@@ -2194,7 +2301,7 @@ static int __devinit imon_probe(struct usb_interface *interface,
/* this is the secondary interface on the device */
ictx = imon_init_intf1(interface, first_if_ctx);
if (!ictx) {
- err("%s: failed to attach to context!\n", __func__);
+ pr_err("failed to attach to context!\n");
ret = -ENODEV;
goto fail;
}
@@ -2204,39 +2311,18 @@ static int __devinit imon_probe(struct usb_interface *interface,
usb_set_intfdata(interface, ictx);
if (ifnum == 0) {
- /* Enable front-panel buttons and/or knobs */
- memcpy(ictx->usb_tx_buf, &fp_packet, sizeof(fp_packet));
- ret = send_packet(ictx);
- /* Not fatal, but warn about it */
- if (ret)
- dev_info(dev, "failed to enable panel buttons "
- "and/or knobs\n");
-
- if (product == 0xffdc)
- imon_get_ffdc_type(ictx);
-
- imon_set_display_type(ictx, interface);
-
if (product == 0xffdc && ictx->rf_device) {
sysfs_err = sysfs_create_group(&interface->dev.kobj,
- &imon_rf_attribute_group);
+ &imon_rf_attr_group);
if (sysfs_err)
- err("%s: Could not create RF sysfs entries(%d)",
- __func__, sysfs_err);
+ pr_err("Could not create RF sysfs entries(%d)\n",
+ sysfs_err);
}
if (ictx->display_supported)
imon_init_display(ictx, interface);
}
- /* set IR protocol/remote type */
- ret = imon_ir_change_protocol(ictx, ictx->ir_type);
- if (ret) {
- dev_warn(dev, "%s: failed to set IR protocol, falling back "
- "to standard iMON protocol mode\n", __func__);
- ictx->ir_type = IR_TYPE_OTHER;
- }
-
dev_info(dev, "iMON device (%04x:%04x, intf%d) on "
"usb<%d:%d> initialized\n", vendor, product, ifnum,
usbdev->bus->busnum, usbdev->devnum);
@@ -2275,10 +2361,8 @@ static void __devexit imon_disconnect(struct usb_interface *interface)
* sysfs_remove_group is safe to call even if sysfs_create_group
* hasn't been called
*/
- sysfs_remove_group(&interface->dev.kobj,
- &imon_display_attribute_group);
- sysfs_remove_group(&interface->dev.kobj,
- &imon_rf_attribute_group);
+ sysfs_remove_group(&interface->dev.kobj, &imon_display_attr_group);
+ sysfs_remove_group(&interface->dev.kobj, &imon_rf_attr_group);
usb_set_intfdata(interface, NULL);
@@ -2291,7 +2375,8 @@ static void __devexit imon_disconnect(struct usb_interface *interface)
if (ifnum == 0) {
ictx->dev_present_intf0 = false;
usb_kill_urb(ictx->rx_urb_intf0);
- ir_input_unregister(ictx->idev);
+ input_unregister_device(ictx->idev);
+ ir_input_unregister(ictx->rdev);
if (ictx->display_supported) {
if (ictx->display_type == IMON_DISPLAY_TYPE_LCD)
usb_deregister_dev(interface, &imon_lcd_class);
@@ -2311,11 +2396,8 @@ static void __devexit imon_disconnect(struct usb_interface *interface)
mutex_unlock(&ictx->lock);
if (!ictx->display_isopen)
free_imon_context(ictx);
- } else {
- if (ictx->ir_type == IR_TYPE_RC6)
- del_timer_sync(&ictx->itimer);
+ } else
mutex_unlock(&ictx->lock);
- }
mutex_unlock(&driver_lock);
@@ -2372,7 +2454,7 @@ static int __init imon_init(void)
rc = usb_register(&imon_driver);
if (rc) {
- err("%s: usb register failed(%d)", __func__, rc);
+ pr_err("usb register failed(%d)\n", rc);
rc = -ENODEV;
}
diff --git a/drivers/media/IR/ir-core-priv.h b/drivers/media/IR/ir-core-priv.h
index a85a8c7c905a..81c936bd793f 100644
--- a/drivers/media/IR/ir-core-priv.h
+++ b/drivers/media/IR/ir-core-priv.h
@@ -17,6 +17,7 @@
#define _IR_RAW_EVENT
#include <linux/slab.h>
+#include <linux/spinlock.h>
#include <media/ir-core.h>
struct ir_raw_handler {
@@ -33,6 +34,7 @@ struct ir_raw_handler {
struct ir_raw_event_ctrl {
struct list_head list; /* to keep track of raw clients */
struct task_struct *thread;
+ spinlock_t lock;
struct kfifo kfifo; /* fifo for the pulse/space durations */
ktime_t last_event; /* when last event occurred */
enum raw_event_type last_type; /* last event type */
@@ -76,10 +78,22 @@ struct ir_raw_event_ctrl {
bool first;
bool toggle;
} jvc;
+ struct rc5_sz_dec {
+ int state;
+ u32 bits;
+ unsigned count;
+ unsigned wanted_bits;
+ } rc5_sz;
struct lirc_codec {
struct ir_input_dev *ir_dev;
struct lirc_driver *drv;
int carrier_low;
+
+ ktime_t gap_start;
+ u64 gap_duration;
+ bool gap;
+ bool send_timeout_reports;
+
} lirc;
};
@@ -107,13 +121,19 @@ static inline void decrease_duration(struct ir_raw_event *ev, unsigned duration)
ev->duration -= duration;
}
+/* Returns true if event is normal pulse/space event */
+static inline bool is_timing_event(struct ir_raw_event ev)
+{
+ return !ev.carrier_report && !ev.reset;
+}
+
#define TO_US(duration) DIV_ROUND_CLOSEST((duration), 1000)
#define TO_STR(is_pulse) ((is_pulse) ? "pulse" : "space")
-#define IS_RESET(ev) (ev.duration == 0)
/*
* Routines from ir-sysfs.c - Meant to be called only internally inside
* ir-core
*/
+int ir_register_input(struct input_dev *input_dev);
int ir_register_class(struct input_dev *input_dev);
void ir_unregister_class(struct input_dev *input_dev);
diff --git a/drivers/media/IR/ir-jvc-decoder.c b/drivers/media/IR/ir-jvc-decoder.c
index 77a89c4de014..63dca6e5458b 100644
--- a/drivers/media/IR/ir-jvc-decoder.c
+++ b/drivers/media/IR/ir-jvc-decoder.c
@@ -50,8 +50,9 @@ static int ir_jvc_decode(struct input_dev *input_dev, struct ir_raw_event ev)
if (!(ir_dev->raw->enabled_protocols & IR_TYPE_JVC))
return 0;
- if (IS_RESET(ev)) {
- data->state = STATE_INACTIVE;
+ if (!is_timing_event(ev)) {
+ if (ev.reset)
+ data->state = STATE_INACTIVE;
return 0;
}
diff --git a/drivers/media/IR/ir-keytable.c b/drivers/media/IR/ir-keytable.c
index 7961d59f5cac..f60107c3b091 100644
--- a/drivers/media/IR/ir-keytable.c
+++ b/drivers/media/IR/ir-keytable.c
@@ -25,14 +25,56 @@
#define IR_KEYPRESS_TIMEOUT 250
/**
+ * ir_create_table() - initializes a scancode table
+ * @rc_tab: the ir_scancode_table to initialize
+ * @name: name to assign to the table
+ * @ir_type: ir type to assign to the new table
+ * @size: initial size of the table
+ * @return: zero on success or a negative error code
+ *
+ * This routine will initialize the ir_scancode_table and will allocate
+ * memory to hold at least the specified number elements.
+ */
+static int ir_create_table(struct ir_scancode_table *rc_tab,
+ const char *name, u64 ir_type, size_t size)
+{
+ rc_tab->name = name;
+ rc_tab->ir_type = ir_type;
+ rc_tab->alloc = roundup_pow_of_two(size * sizeof(struct ir_scancode));
+ rc_tab->size = rc_tab->alloc / sizeof(struct ir_scancode);
+ rc_tab->scan = kmalloc(rc_tab->alloc, GFP_KERNEL);
+ if (!rc_tab->scan)
+ return -ENOMEM;
+
+ IR_dprintk(1, "Allocated space for %u keycode entries (%u bytes)\n",
+ rc_tab->size, rc_tab->alloc);
+ return 0;
+}
+
+/**
+ * ir_free_table() - frees memory allocated by a scancode table
+ * @rc_tab: the table whose mappings need to be freed
+ *
+ * This routine will free memory alloctaed for key mappings used by given
+ * scancode table.
+ */
+static void ir_free_table(struct ir_scancode_table *rc_tab)
+{
+ rc_tab->size = 0;
+ kfree(rc_tab->scan);
+ rc_tab->scan = NULL;
+}
+
+/**
* ir_resize_table() - resizes a scancode table if necessary
* @rc_tab: the ir_scancode_table to resize
+ * @gfp_flags: gfp flags to use when allocating memory
* @return: zero on success or a negative error code
*
* This routine will shrink the ir_scancode_table if it has lots of
* unused entries and grow it if it is full.
*/
-static int ir_resize_table(struct ir_scancode_table *rc_tab)
+static int ir_resize_table(struct ir_scancode_table *rc_tab, gfp_t gfp_flags)
{
unsigned int oldalloc = rc_tab->alloc;
unsigned int newalloc = oldalloc;
@@ -57,7 +99,7 @@ static int ir_resize_table(struct ir_scancode_table *rc_tab)
if (newalloc == oldalloc)
return 0;
- newscan = kmalloc(newalloc, GFP_ATOMIC);
+ newscan = kmalloc(newalloc, gfp_flags);
if (!newscan) {
IR_dprintk(1, "Failed to kmalloc %u bytes\n", newalloc);
return -ENOMEM;
@@ -72,26 +114,78 @@ static int ir_resize_table(struct ir_scancode_table *rc_tab)
}
/**
- * ir_do_setkeycode() - internal function to set a keycode in the
- * scancode->keycode table
+ * ir_update_mapping() - set a keycode in the scancode->keycode table
* @dev: the struct input_dev device descriptor
- * @rc_tab: the struct ir_scancode_table to set the keycode in
- * @scancode: the scancode for the ir command
- * @keycode: the keycode for the ir command
- * @resize: whether the keytable may be shrunk
- * @return: -EINVAL if the keycode could not be inserted, otherwise zero.
+ * @rc_tab: scancode table to be adjusted
+ * @index: index of the mapping that needs to be updated
+ * @keycode: the desired keycode
+ * @return: previous keycode assigned to the mapping
+ *
+ * This routine is used to update scancode->keycopde mapping at given
+ * position.
+ */
+static unsigned int ir_update_mapping(struct input_dev *dev,
+ struct ir_scancode_table *rc_tab,
+ unsigned int index,
+ unsigned int new_keycode)
+{
+ int old_keycode = rc_tab->scan[index].keycode;
+ int i;
+
+ /* Did the user wish to remove the mapping? */
+ if (new_keycode == KEY_RESERVED || new_keycode == KEY_UNKNOWN) {
+ IR_dprintk(1, "#%d: Deleting scan 0x%04x\n",
+ index, rc_tab->scan[index].scancode);
+ rc_tab->len--;
+ memmove(&rc_tab->scan[index], &rc_tab->scan[index+ 1],
+ (rc_tab->len - index) * sizeof(struct ir_scancode));
+ } else {
+ IR_dprintk(1, "#%d: %s scan 0x%04x with key 0x%04x\n",
+ index,
+ old_keycode == KEY_RESERVED ? "New" : "Replacing",
+ rc_tab->scan[index].scancode, new_keycode);
+ rc_tab->scan[index].keycode = new_keycode;
+ __set_bit(new_keycode, dev->keybit);
+ }
+
+ if (old_keycode != KEY_RESERVED) {
+ /* A previous mapping was updated... */
+ __clear_bit(old_keycode, dev->keybit);
+ /* ... but another scancode might use the same keycode */
+ for (i = 0; i < rc_tab->len; i++) {
+ if (rc_tab->scan[i].keycode == old_keycode) {
+ __set_bit(old_keycode, dev->keybit);
+ break;
+ }
+ }
+
+ /* Possibly shrink the keytable, failure is not a problem */
+ ir_resize_table(rc_tab, GFP_ATOMIC);
+ }
+
+ return old_keycode;
+}
+
+/**
+ * ir_locate_scancode() - set a keycode in the scancode->keycode table
+ * @ir_dev: the struct ir_input_dev device descriptor
+ * @rc_tab: scancode table to be searched
+ * @scancode: the desired scancode
+ * @resize: controls whether we allowed to resize the table to
+ * accomodate not yet present scancodes
+ * @return: index of the mapping containing scancode in question
+ * or -1U in case of failure.
*
- * This routine is used internally to manipulate the scancode->keycode table.
- * The caller has to hold @rc_tab->lock.
+ * This routine is used to locate given scancode in ir_scancode_table.
+ * If scancode is not yet present the routine will allocate a new slot
+ * for it.
*/
-static int ir_do_setkeycode(struct input_dev *dev,
- struct ir_scancode_table *rc_tab,
- unsigned scancode, unsigned keycode,
- bool resize)
+static unsigned int ir_establish_scancode(struct ir_input_dev *ir_dev,
+ struct ir_scancode_table *rc_tab,
+ unsigned int scancode,
+ bool resize)
{
unsigned int i;
- int old_keycode = KEY_RESERVED;
- struct ir_input_dev *ir_dev = input_get_drvdata(dev);
/*
* Unfortunately, some hardware-based IR decoders don't provide
@@ -100,65 +194,34 @@ static int ir_do_setkeycode(struct input_dev *dev,
* the provided IR with another one, it is needed to allow loading
* IR tables from other remotes. So,
*/
- if (ir_dev->props && ir_dev->props->scanmask) {
+ if (ir_dev->props && ir_dev->props->scanmask)
scancode &= ir_dev->props->scanmask;
- }
/* First check if we already have a mapping for this ir command */
for (i = 0; i < rc_tab->len; i++) {
+ if (rc_tab->scan[i].scancode == scancode)
+ return i;
+
/* Keytable is sorted from lowest to highest scancode */
- if (rc_tab->scan[i].scancode > scancode)
+ if (rc_tab->scan[i].scancode >= scancode)
break;
- else if (rc_tab->scan[i].scancode < scancode)
- continue;
-
- old_keycode = rc_tab->scan[i].keycode;
- rc_tab->scan[i].keycode = keycode;
-
- /* Did the user wish to remove the mapping? */
- if (keycode == KEY_RESERVED || keycode == KEY_UNKNOWN) {
- IR_dprintk(1, "#%d: Deleting scan 0x%04x\n",
- i, scancode);
- rc_tab->len--;
- memmove(&rc_tab->scan[i], &rc_tab->scan[i + 1],
- (rc_tab->len - i) * sizeof(struct ir_scancode));
- }
-
- /* Possibly shrink the keytable, failure is not a problem */
- ir_resize_table(rc_tab);
- break;
}
- if (old_keycode == KEY_RESERVED && keycode != KEY_RESERVED) {
- /* No previous mapping found, we might need to grow the table */
- if (resize && ir_resize_table(rc_tab))
- return -ENOMEM;
-
- IR_dprintk(1, "#%d: New scan 0x%04x with key 0x%04x\n",
- i, scancode, keycode);
+ /* No previous mapping found, we might need to grow the table */
+ if (rc_tab->size == rc_tab->len) {
+ if (!resize || ir_resize_table(rc_tab, GFP_ATOMIC))
+ return -1U;
+ }
- /* i is the proper index to insert our new keycode */
+ /* i is the proper index to insert our new keycode */
+ if (i < rc_tab->len)
memmove(&rc_tab->scan[i + 1], &rc_tab->scan[i],
(rc_tab->len - i) * sizeof(struct ir_scancode));
- rc_tab->scan[i].scancode = scancode;
- rc_tab->scan[i].keycode = keycode;
- rc_tab->len++;
- set_bit(keycode, dev->keybit);
- } else {
- IR_dprintk(1, "#%d: Replacing scan 0x%04x with key 0x%04x\n",
- i, scancode, keycode);
- /* A previous mapping was updated... */
- clear_bit(old_keycode, dev->keybit);
- /* ...but another scancode might use the same keycode */
- for (i = 0; i < rc_tab->len; i++) {
- if (rc_tab->scan[i].keycode == old_keycode) {
- set_bit(old_keycode, dev->keybit);
- break;
- }
- }
- }
+ rc_tab->scan[i].scancode = scancode;
+ rc_tab->scan[i].keycode = KEY_RESERVED;
+ rc_tab->len++;
- return 0;
+ return i;
}
/**
@@ -171,17 +234,41 @@ static int ir_do_setkeycode(struct input_dev *dev,
* This routine is used to handle evdev EVIOCSKEY ioctl.
*/
static int ir_setkeycode(struct input_dev *dev,
- unsigned int scancode, unsigned int keycode)
+ const struct input_keymap_entry *ke,
+ unsigned int *old_keycode)
{
- int rc;
- unsigned long flags;
struct ir_input_dev *ir_dev = input_get_drvdata(dev);
struct ir_scancode_table *rc_tab = &ir_dev->rc_tab;
+ unsigned int index;
+ unsigned int scancode;
+ int retval;
+ unsigned long flags;
spin_lock_irqsave(&rc_tab->lock, flags);
- rc = ir_do_setkeycode(dev, rc_tab, scancode, keycode, true);
+
+ if (ke->flags & INPUT_KEYMAP_BY_INDEX) {
+ index = ke->index;
+ if (index >= rc_tab->len) {
+ retval = -EINVAL;
+ goto out;
+ }
+ } else {
+ retval = input_scancode_to_scalar(ke, &scancode);
+ if (retval)
+ goto out;
+
+ index = ir_establish_scancode(ir_dev, rc_tab, scancode, true);
+ if (index >= rc_tab->len) {
+ retval = -ENOMEM;
+ goto out;
+ }
+ }
+
+ *old_keycode = ir_update_mapping(dev, rc_tab, index, ke->keycode);
+
+out:
spin_unlock_irqrestore(&rc_tab->lock, flags);
- return rc;
+ return retval;
}
/**
@@ -189,32 +276,73 @@ static int ir_setkeycode(struct input_dev *dev,
* @dev: the struct input_dev device descriptor
* @to: the struct ir_scancode_table to copy entries to
* @from: the struct ir_scancode_table to copy entries from
- * @return: -EINVAL if all keycodes could not be inserted, otherwise zero.
+ * @return: -ENOMEM if all keycodes could not be inserted, otherwise zero.
*
* This routine is used to handle table initialization.
*/
-static int ir_setkeytable(struct input_dev *dev,
- struct ir_scancode_table *to,
+static int ir_setkeytable(struct ir_input_dev *ir_dev,
const struct ir_scancode_table *from)
{
- struct ir_input_dev *ir_dev = input_get_drvdata(dev);
struct ir_scancode_table *rc_tab = &ir_dev->rc_tab;
- unsigned long flags;
- unsigned int i;
- int rc = 0;
+ unsigned int i, index;
+ int rc;
+
+ rc = ir_create_table(&ir_dev->rc_tab,
+ from->name, from->ir_type, from->size);
+ if (rc)
+ return rc;
+
+ IR_dprintk(1, "Allocated space for %u keycode entries (%u bytes)\n",
+ rc_tab->size, rc_tab->alloc);
- spin_lock_irqsave(&rc_tab->lock, flags);
for (i = 0; i < from->size; i++) {
- rc = ir_do_setkeycode(dev, to, from->scan[i].scancode,
- from->scan[i].keycode, false);
- if (rc)
+ index = ir_establish_scancode(ir_dev, rc_tab,
+ from->scan[i].scancode, false);
+ if (index >= rc_tab->len) {
+ rc = -ENOMEM;
break;
+ }
+
+ ir_update_mapping(ir_dev->input_dev, rc_tab, index,
+ from->scan[i].keycode);
}
- spin_unlock_irqrestore(&rc_tab->lock, flags);
+
+ if (rc)
+ ir_free_table(rc_tab);
+
return rc;
}
/**
+ * ir_lookup_by_scancode() - locate mapping by scancode
+ * @rc_tab: the &struct ir_scancode_table to search
+ * @scancode: scancode to look for in the table
+ * @return: index in the table, -1U if not found
+ *
+ * This routine performs binary search in RC keykeymap table for
+ * given scancode.
+ */
+static unsigned int ir_lookup_by_scancode(const struct ir_scancode_table *rc_tab,
+ unsigned int scancode)
+{
+ int start = 0;
+ int end = rc_tab->len - 1;
+ int mid;
+
+ while (start <= end) {
+ mid = (start + end) / 2;
+ if (rc_tab->scan[mid].scancode < scancode)
+ start = mid + 1;
+ else if (rc_tab->scan[mid].scancode > scancode)
+ end = mid - 1;
+ else
+ return mid;
+ }
+
+ return -1U;
+}
+
+/**
* ir_getkeycode() - get a keycode from the scancode->keycode table
* @dev: the struct input_dev device descriptor
* @scancode: the desired scancode
@@ -224,36 +352,48 @@ static int ir_setkeytable(struct input_dev *dev,
* This routine is used to handle evdev EVIOCGKEY ioctl.
*/
static int ir_getkeycode(struct input_dev *dev,
- unsigned int scancode, unsigned int *keycode)
+ struct input_keymap_entry *ke)
{
- int start, end, mid;
- unsigned long flags;
- int key = KEY_RESERVED;
struct ir_input_dev *ir_dev = input_get_drvdata(dev);
struct ir_scancode_table *rc_tab = &ir_dev->rc_tab;
+ struct ir_scancode *entry;
+ unsigned long flags;
+ unsigned int index;
+ unsigned int scancode;
+ int retval;
spin_lock_irqsave(&rc_tab->lock, flags);
- start = 0;
- end = rc_tab->len - 1;
- while (start <= end) {
- mid = (start + end) / 2;
- if (rc_tab->scan[mid].scancode < scancode)
- start = mid + 1;
- else if (rc_tab->scan[mid].scancode > scancode)
- end = mid - 1;
- else {
- key = rc_tab->scan[mid].keycode;
- break;
- }
+
+ if (ke->flags & INPUT_KEYMAP_BY_INDEX) {
+ index = ke->index;
+ } else {
+ retval = input_scancode_to_scalar(ke, &scancode);
+ if (retval)
+ goto out;
+
+ index = ir_lookup_by_scancode(rc_tab, scancode);
}
- spin_unlock_irqrestore(&rc_tab->lock, flags);
- if (key == KEY_RESERVED)
- IR_dprintk(1, "unknown key for scancode 0x%04x\n",
- scancode);
+ if (index >= rc_tab->len) {
+ if (!(ke->flags & INPUT_KEYMAP_BY_INDEX))
+ IR_dprintk(1, "unknown key for scancode 0x%04x\n",
+ scancode);
+ retval = -EINVAL;
+ goto out;
+ }
- *keycode = key;
- return 0;
+ entry = &rc_tab->scan[index];
+
+ ke->index = index;
+ ke->keycode = entry->keycode;
+ ke->len = sizeof(entry->scancode);
+ memcpy(ke->scancode, &entry->scancode, sizeof(entry->scancode));
+
+ retval = 0;
+
+out:
+ spin_unlock_irqrestore(&rc_tab->lock, flags);
+ return retval;
}
/**
@@ -268,12 +408,24 @@ static int ir_getkeycode(struct input_dev *dev,
*/
u32 ir_g_keycode_from_table(struct input_dev *dev, u32 scancode)
{
- int keycode;
+ struct ir_input_dev *ir_dev = input_get_drvdata(dev);
+ struct ir_scancode_table *rc_tab = &ir_dev->rc_tab;
+ unsigned int keycode;
+ unsigned int index;
+ unsigned long flags;
+
+ spin_lock_irqsave(&rc_tab->lock, flags);
+
+ index = ir_lookup_by_scancode(rc_tab, scancode);
+ keycode = index < rc_tab->len ?
+ rc_tab->scan[index].keycode : KEY_RESERVED;
+
+ spin_unlock_irqrestore(&rc_tab->lock, flags);
- ir_getkeycode(dev, scancode, &keycode);
if (keycode != KEY_RESERVED)
IR_dprintk(1, "%s: scancode 0x%04x keycode 0x%02x\n",
dev->name, scancode, keycode);
+
return keycode;
}
EXPORT_SYMBOL_GPL(ir_g_keycode_from_table);
@@ -285,7 +437,7 @@ EXPORT_SYMBOL_GPL(ir_g_keycode_from_table);
* This routine is used to signal that a key has been released on the
* remote control. It reports a keyup input event via input_report_key().
*/
-static void ir_keyup(struct ir_input_dev *ir)
+void ir_keyup(struct ir_input_dev *ir)
{
if (!ir->keypressed)
return;
@@ -295,6 +447,7 @@ static void ir_keyup(struct ir_input_dev *ir)
input_sync(ir->input_dev);
ir->keypressed = false;
}
+EXPORT_SYMBOL_GPL(ir_keyup);
/**
* ir_timer_keyup() - generates a keyup event after a timeout
@@ -453,8 +606,8 @@ int __ir_input_register(struct input_dev *input_dev,
goto out_dev;
}
- input_dev->getkeycode = ir_getkeycode;
- input_dev->setkeycode = ir_setkeycode;
+ input_dev->getkeycode_new = ir_getkeycode;
+ input_dev->setkeycode_new = ir_setkeycode;
input_set_drvdata(input_dev, ir_dev);
ir_dev->input_dev = input_dev;
@@ -462,12 +615,6 @@ int __ir_input_register(struct input_dev *input_dev,
spin_lock_init(&ir_dev->keylock);
setup_timer(&ir_dev->timer_keyup, ir_timer_keyup, (unsigned long)ir_dev);
- ir_dev->rc_tab.name = rc_tab->name;
- ir_dev->rc_tab.ir_type = rc_tab->ir_type;
- ir_dev->rc_tab.alloc = roundup_pow_of_two(rc_tab->size *
- sizeof(struct ir_scancode));
- ir_dev->rc_tab.scan = kmalloc(ir_dev->rc_tab.alloc, GFP_KERNEL);
- ir_dev->rc_tab.size = ir_dev->rc_tab.alloc / sizeof(struct ir_scancode);
if (props) {
ir_dev->props = props;
if (props->open)
@@ -476,23 +623,14 @@ int __ir_input_register(struct input_dev *input_dev,
input_dev->close = ir_close;
}
- if (!ir_dev->rc_tab.scan) {
- rc = -ENOMEM;
- goto out_name;
- }
-
- IR_dprintk(1, "Allocated space for %u keycode entries (%u bytes)\n",
- ir_dev->rc_tab.size, ir_dev->rc_tab.alloc);
-
set_bit(EV_KEY, input_dev->evbit);
set_bit(EV_REP, input_dev->evbit);
set_bit(EV_MSC, input_dev->evbit);
set_bit(MSC_SCAN, input_dev->mscbit);
- if (ir_setkeytable(input_dev, &ir_dev->rc_tab, rc_tab)) {
- rc = -ENOMEM;
- goto out_table;
- }
+ rc = ir_setkeytable(ir_dev, rc_tab);
+ if (rc)
+ goto out_name;
rc = ir_register_class(input_dev);
if (rc < 0)
@@ -505,6 +643,10 @@ int __ir_input_register(struct input_dev *input_dev,
goto out_event;
}
+ rc = ir_register_input(input_dev);
+ if (rc < 0)
+ goto out_event;
+
IR_dprintk(1, "Registered input device on %s for %s remote%s.\n",
driver_name, rc_tab->name,
(ir_dev->props && ir_dev->props->driver_type == RC_DRIVER_IR_RAW) ?
@@ -522,7 +664,7 @@ int __ir_input_register(struct input_dev *input_dev,
out_event:
ir_unregister_class(input_dev);
out_table:
- kfree(ir_dev->rc_tab.scan);
+ ir_free_table(&ir_dev->rc_tab);
out_name:
kfree(ir_dev->driver_name);
out_dev:
@@ -540,7 +682,6 @@ EXPORT_SYMBOL_GPL(__ir_input_register);
void ir_input_unregister(struct input_dev *input_dev)
{
struct ir_input_dev *ir_dev = input_get_drvdata(input_dev);
- struct ir_scancode_table *rc_tab;
if (!ir_dev)
return;
@@ -552,10 +693,7 @@ void ir_input_unregister(struct input_dev *input_dev)
if (ir_dev->props->driver_type == RC_DRIVER_IR_RAW)
ir_raw_event_unregister(input_dev);
- rc_tab = &ir_dev->rc_tab;
- rc_tab->size = 0;
- kfree(rc_tab->scan);
- rc_tab->scan = NULL;
+ ir_free_table(&ir_dev->rc_tab);
ir_unregister_class(input_dev);
diff --git a/drivers/media/IR/ir-lirc-codec.c b/drivers/media/IR/ir-lirc-codec.c
index 1983cd3f3994..9fc0db9d344d 100644
--- a/drivers/media/IR/ir-lirc-codec.c
+++ b/drivers/media/IR/ir-lirc-codec.c
@@ -32,6 +32,7 @@
static int ir_lirc_decode(struct input_dev *input_dev, struct ir_raw_event ev)
{
struct ir_input_dev *ir_dev = input_get_drvdata(input_dev);
+ struct lirc_codec *lirc = &ir_dev->raw->lirc;
int sample;
if (!(ir_dev->raw->enabled_protocols & IR_TYPE_LIRC))
@@ -40,21 +41,57 @@ static int ir_lirc_decode(struct input_dev *input_dev, struct ir_raw_event ev)
if (!ir_dev->raw->lirc.drv || !ir_dev->raw->lirc.drv->rbuf)
return -EINVAL;
- if (IS_RESET(ev))
+ /* Packet start */
+ if (ev.reset)
return 0;
- IR_dprintk(2, "LIRC data transfer started (%uus %s)\n",
- TO_US(ev.duration), TO_STR(ev.pulse));
+ /* Carrier reports */
+ if (ev.carrier_report) {
+ sample = LIRC_FREQUENCY(ev.carrier);
+
+ /* Packet end */
+ } else if (ev.timeout) {
+
+ if (lirc->gap)
+ return 0;
+
+ lirc->gap_start = ktime_get();
+ lirc->gap = true;
+ lirc->gap_duration = ev.duration;
+
+ if (!lirc->send_timeout_reports)
+ return 0;
+
+ sample = LIRC_TIMEOUT(ev.duration / 1000);
- sample = ev.duration / 1000;
- if (ev.pulse)
- sample |= PULSE_BIT;
+ /* Normal sample */
+ } else {
+
+ if (lirc->gap) {
+ int gap_sample;
+
+ lirc->gap_duration += ktime_to_ns(ktime_sub(ktime_get(),
+ lirc->gap_start));
+
+ /* Convert to ms and cap by LIRC_VALUE_MASK */
+ do_div(lirc->gap_duration, 1000);
+ lirc->gap_duration = min(lirc->gap_duration,
+ (u64)LIRC_VALUE_MASK);
+
+ gap_sample = LIRC_SPACE(lirc->gap_duration);
+ lirc_buffer_write(ir_dev->raw->lirc.drv->rbuf,
+ (unsigned char *) &gap_sample);
+ lirc->gap = false;
+ }
+
+ sample = ev.pulse ? LIRC_PULSE(ev.duration / 1000) :
+ LIRC_SPACE(ev.duration / 1000);
+ }
lirc_buffer_write(ir_dev->raw->lirc.drv->rbuf,
(unsigned char *) &sample);
wake_up(&ir_dev->raw->lirc.drv->rbuf->wait_poll);
-
return 0;
}
@@ -102,7 +139,7 @@ static long ir_lirc_ioctl(struct file *filep, unsigned int cmd,
struct ir_input_dev *ir_dev;
int ret = 0;
void *drv_data;
- unsigned long val = 0;
+ __u32 val = 0, tmp;
lirc = lirc_get_pdata(filep);
if (!lirc)
@@ -115,7 +152,7 @@ static long ir_lirc_ioctl(struct file *filep, unsigned int cmd,
drv_data = ir_dev->props->priv;
if (_IOC_DIR(cmd) & _IOC_WRITE) {
- ret = get_user(val, (unsigned long *)arg);
+ ret = get_user(val, (__u32 *)arg);
if (ret)
return ret;
}
@@ -130,22 +167,20 @@ static long ir_lirc_ioctl(struct file *filep, unsigned int cmd,
case LIRC_SET_SEND_MODE:
if (val != (LIRC_MODE_PULSE & LIRC_CAN_SEND_MASK))
return -EINVAL;
- break;
+ return 0;
/* TX settings */
case LIRC_SET_TRANSMITTER_MASK:
- if (ir_dev->props->s_tx_mask)
- ret = ir_dev->props->s_tx_mask(drv_data, (u32)val);
- else
+ if (!ir_dev->props->s_tx_mask)
return -EINVAL;
- break;
+
+ return ir_dev->props->s_tx_mask(drv_data, val);
case LIRC_SET_SEND_CARRIER:
- if (ir_dev->props->s_tx_carrier)
- ir_dev->props->s_tx_carrier(drv_data, (u32)val);
- else
+ if (!ir_dev->props->s_tx_carrier)
return -EINVAL;
- break;
+
+ return ir_dev->props->s_tx_carrier(drv_data, val);
case LIRC_SET_SEND_DUTY_CYCLE:
if (!ir_dev->props->s_tx_duty_cycle)
@@ -154,39 +189,42 @@ static long ir_lirc_ioctl(struct file *filep, unsigned int cmd,
if (val <= 0 || val >= 100)
return -EINVAL;
- ir_dev->props->s_tx_duty_cycle(ir_dev->props->priv, val);
- break;
+ return ir_dev->props->s_tx_duty_cycle(drv_data, val);
/* RX settings */
case LIRC_SET_REC_CARRIER:
- if (ir_dev->props->s_rx_carrier_range)
- ret = ir_dev->props->s_rx_carrier_range(
- ir_dev->props->priv,
- ir_dev->raw->lirc.carrier_low, val);
- else
+ if (!ir_dev->props->s_rx_carrier_range)
return -ENOSYS;
- if (!ret)
- ir_dev->raw->lirc.carrier_low = 0;
- break;
+ if (val <= 0)
+ return -EINVAL;
+
+ return ir_dev->props->s_rx_carrier_range(drv_data,
+ ir_dev->raw->lirc.carrier_low, val);
case LIRC_SET_REC_CARRIER_RANGE:
- if (val >= 0)
- ir_dev->raw->lirc.carrier_low = val;
- break;
+ if (val <= 0)
+ return -EINVAL;
+ ir_dev->raw->lirc.carrier_low = val;
+ return 0;
case LIRC_GET_REC_RESOLUTION:
val = ir_dev->props->rx_resolution;
break;
case LIRC_SET_WIDEBAND_RECEIVER:
- if (ir_dev->props->s_learning_mode)
- return ir_dev->props->s_learning_mode(
- ir_dev->props->priv, !!val);
- else
+ if (!ir_dev->props->s_learning_mode)
return -ENOSYS;
+ return ir_dev->props->s_learning_mode(drv_data, !!val);
+
+ case LIRC_SET_MEASURE_CARRIER_MODE:
+ if (!ir_dev->props->s_carrier_report)
+ return -ENOSYS;
+
+ return ir_dev->props->s_carrier_report(drv_data, !!val);
+
/* Generic timeout support */
case LIRC_GET_MIN_TIMEOUT:
if (!ir_dev->props->max_timeout)
@@ -201,10 +239,20 @@ static long ir_lirc_ioctl(struct file *filep, unsigned int cmd,
break;
case LIRC_SET_REC_TIMEOUT:
- if (val < ir_dev->props->min_timeout ||
- val > ir_dev->props->max_timeout)
- return -EINVAL;
- ir_dev->props->timeout = val * 1000;
+ if (!ir_dev->props->max_timeout)
+ return -ENOSYS;
+
+ tmp = val * 1000;
+
+ if (tmp < ir_dev->props->min_timeout ||
+ tmp > ir_dev->props->max_timeout)
+ return -EINVAL;
+
+ ir_dev->props->timeout = tmp;
+ break;
+
+ case LIRC_SET_REC_TIMEOUT_REPORTS:
+ lirc->send_timeout_reports = !!val;
break;
default:
@@ -212,7 +260,7 @@ static long ir_lirc_ioctl(struct file *filep, unsigned int cmd,
}
if (_IOC_DIR(cmd) & _IOC_READ)
- ret = put_user(val, (unsigned long *)arg);
+ ret = put_user(val, (__u32 *)arg);
return ret;
}
@@ -231,6 +279,9 @@ static struct file_operations lirc_fops = {
.owner = THIS_MODULE,
.write = ir_lirc_transmit_ir,
.unlocked_ioctl = ir_lirc_ioctl,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = ir_lirc_ioctl,
+#endif
.read = lirc_dev_fop_read,
.poll = lirc_dev_fop_poll,
.open = lirc_dev_fop_open,
@@ -278,6 +329,10 @@ static int ir_lirc_register(struct input_dev *input_dev)
if (ir_dev->props->s_learning_mode)
features |= LIRC_CAN_USE_WIDEBAND_RECEIVER;
+ if (ir_dev->props->s_carrier_report)
+ features |= LIRC_CAN_MEASURE_CARRIER;
+
+
if (ir_dev->props->max_timeout)
features |= LIRC_CAN_SET_REC_TIMEOUT;
diff --git a/drivers/media/IR/ir-nec-decoder.c b/drivers/media/IR/ir-nec-decoder.c
index d597421d6547..70993f79c8a2 100644
--- a/drivers/media/IR/ir-nec-decoder.c
+++ b/drivers/media/IR/ir-nec-decoder.c
@@ -54,8 +54,9 @@ static int ir_nec_decode(struct input_dev *input_dev, struct ir_raw_event ev)
if (!(ir_dev->raw->enabled_protocols & IR_TYPE_NEC))
return 0;
- if (IS_RESET(ev)) {
- data->state = STATE_INACTIVE;
+ if (!is_timing_event(ev)) {
+ if (ev.reset)
+ data->state = STATE_INACTIVE;
return 0;
}
diff --git a/drivers/media/IR/ir-raw-event.c b/drivers/media/IR/ir-raw-event.c
index 8e0e1b1f8c87..a06a07e4e0b1 100644
--- a/drivers/media/IR/ir-raw-event.c
+++ b/drivers/media/IR/ir-raw-event.c
@@ -39,22 +39,34 @@ static int ir_raw_event_thread(void *data)
struct ir_raw_event ev;
struct ir_raw_handler *handler;
struct ir_raw_event_ctrl *raw = (struct ir_raw_event_ctrl *)data;
+ int retval;
while (!kthread_should_stop()) {
- try_to_freeze();
- mutex_lock(&ir_raw_handler_lock);
+ spin_lock_irq(&raw->lock);
+ retval = kfifo_out(&raw->kfifo, &ev, sizeof(ev));
+
+ if (!retval) {
+ set_current_state(TASK_INTERRUPTIBLE);
+
+ if (kthread_should_stop())
+ set_current_state(TASK_RUNNING);
- while (kfifo_out(&raw->kfifo, &ev, sizeof(ev)) == sizeof(ev)) {
- list_for_each_entry(handler, &ir_raw_handler_list, list)
- handler->decode(raw->input_dev, ev);
- raw->prev_ev = ev;
+ spin_unlock_irq(&raw->lock);
+ schedule();
+ continue;
}
- mutex_unlock(&ir_raw_handler_lock);
+ spin_unlock_irq(&raw->lock);
+
- set_current_state(TASK_INTERRUPTIBLE);
- schedule();
+ BUG_ON(retval != sizeof(ev));
+
+ mutex_lock(&ir_raw_handler_lock);
+ list_for_each_entry(handler, &ir_raw_handler_list, list)
+ handler->decode(raw->input_dev, ev);
+ raw->prev_ev = ev;
+ mutex_unlock(&ir_raw_handler_lock);
}
return 0;
@@ -77,7 +89,7 @@ int ir_raw_event_store(struct input_dev *input_dev, struct ir_raw_event *ev)
if (!ir->raw)
return -EINVAL;
- IR_dprintk(2, "sample: (05%dus %s)\n",
+ IR_dprintk(2, "sample: (%05dus %s)\n",
TO_US(ev->duration), TO_STR(ev->pulse));
if (kfifo_in(&ir->raw->kfifo, ev, sizeof(*ev)) != sizeof(*ev))
@@ -162,7 +174,7 @@ int ir_raw_event_store_with_filter(struct input_dev *input_dev,
if (ir->idle && !ev->pulse)
return 0;
else if (ir->idle)
- ir_raw_event_set_idle(input_dev, 0);
+ ir_raw_event_set_idle(input_dev, false);
if (!raw->this_ev.duration) {
raw->this_ev = *ev;
@@ -175,48 +187,35 @@ int ir_raw_event_store_with_filter(struct input_dev *input_dev,
/* Enter idle mode if nessesary */
if (!ev->pulse && ir->props->timeout &&
- raw->this_ev.duration >= ir->props->timeout)
- ir_raw_event_set_idle(input_dev, 1);
+ raw->this_ev.duration >= ir->props->timeout) {
+ ir_raw_event_set_idle(input_dev, true);
+ }
return 0;
}
EXPORT_SYMBOL_GPL(ir_raw_event_store_with_filter);
-void ir_raw_event_set_idle(struct input_dev *input_dev, int idle)
+/**
+ * ir_raw_event_set_idle() - hint the ir core if device is receiving
+ * IR data or not
+ * @input_dev: the struct input_dev device descriptor
+ * @idle: the hint value
+ */
+void ir_raw_event_set_idle(struct input_dev *input_dev, bool idle)
{
struct ir_input_dev *ir = input_get_drvdata(input_dev);
struct ir_raw_event_ctrl *raw = ir->raw;
- ktime_t now;
- u64 delta;
- if (!ir->props)
+ if (!ir->props || !ir->raw)
return;
- if (!ir->raw)
- goto out;
+ IR_dprintk(2, "%s idle mode\n", idle ? "enter" : "leave");
if (idle) {
- IR_dprintk(2, "enter idle mode\n");
- raw->last_event = ktime_get();
- } else {
- IR_dprintk(2, "exit idle mode\n");
-
- now = ktime_get();
- delta = ktime_to_ns(ktime_sub(now, ir->raw->last_event));
-
- WARN_ON(raw->this_ev.pulse);
-
- raw->this_ev.duration =
- min(raw->this_ev.duration + delta,
- (u64)IR_MAX_DURATION);
-
+ raw->this_ev.timeout = true;
ir_raw_event_store(input_dev, &raw->this_ev);
-
- if (raw->this_ev.duration == IR_MAX_DURATION)
- ir_raw_event_reset(input_dev);
-
- raw->this_ev.duration = 0;
+ init_ir_raw_event(&raw->this_ev);
}
-out:
+
if (ir->props->s_idle)
ir->props->s_idle(ir->props->priv, idle);
ir->idle = idle;
@@ -232,11 +231,14 @@ EXPORT_SYMBOL_GPL(ir_raw_event_set_idle);
void ir_raw_event_handle(struct input_dev *input_dev)
{
struct ir_input_dev *ir = input_get_drvdata(input_dev);
+ unsigned long flags;
if (!ir->raw)
return;
+ spin_lock_irqsave(&ir->raw->lock, flags);
wake_up_process(ir->raw->thread);
+ spin_unlock_irqrestore(&ir->raw->lock, flags);
}
EXPORT_SYMBOL_GPL(ir_raw_event_handle);
@@ -275,6 +277,7 @@ int ir_raw_event_register(struct input_dev *input_dev)
return rc;
}
+ spin_lock_init(&ir->raw->lock);
ir->raw->thread = kthread_run(ir_raw_event_thread, ir->raw,
"rc%u", (unsigned int)ir->devno);
diff --git a/drivers/media/IR/ir-rc5-decoder.c b/drivers/media/IR/ir-rc5-decoder.c
index df4770d978ad..572ed4ca8c68 100644
--- a/drivers/media/IR/ir-rc5-decoder.c
+++ b/drivers/media/IR/ir-rc5-decoder.c
@@ -55,8 +55,9 @@ static int ir_rc5_decode(struct input_dev *input_dev, struct ir_raw_event ev)
if (!(ir_dev->raw->enabled_protocols & IR_TYPE_RC5))
return 0;
- if (IS_RESET(ev)) {
- data->state = STATE_INACTIVE;
+ if (!is_timing_event(ev)) {
+ if (ev.reset)
+ data->state = STATE_INACTIVE;
return 0;
}
diff --git a/drivers/media/IR/ir-rc5-sz-decoder.c b/drivers/media/IR/ir-rc5-sz-decoder.c
new file mode 100644
index 000000000000..7c413501a3f7
--- /dev/null
+++ b/drivers/media/IR/ir-rc5-sz-decoder.c
@@ -0,0 +1,154 @@
+/* ir-rc5-sz-decoder.c - handle RC5 Streamzap IR Pulse/Space protocol
+ *
+ * Copyright (C) 2010 by Mauro Carvalho Chehab <mchehab@redhat.com>
+ * Copyright (C) 2010 by Jarod Wilson <jarod@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+/*
+ * This code handles the 15 bit RC5-ish protocol used by the Streamzap
+ * PC Remote.
+ * It considers a carrier of 36 kHz, with a total of 15 bits, where
+ * the first two bits are start bits, and a third one is a filing bit
+ */
+
+#include "ir-core-priv.h"
+
+#define RC5_SZ_NBITS 15
+#define RC5_UNIT 888888 /* ns */
+#define RC5_BIT_START (1 * RC5_UNIT)
+#define RC5_BIT_END (1 * RC5_UNIT)
+
+enum rc5_sz_state {
+ STATE_INACTIVE,
+ STATE_BIT_START,
+ STATE_BIT_END,
+ STATE_FINISHED,
+};
+
+/**
+ * ir_rc5_sz_decode() - Decode one RC-5 Streamzap pulse or space
+ * @input_dev: the struct input_dev descriptor of the device
+ * @ev: the struct ir_raw_event descriptor of the pulse/space
+ *
+ * This function returns -EINVAL if the pulse violates the state machine
+ */
+static int ir_rc5_sz_decode(struct input_dev *input_dev, struct ir_raw_event ev)
+{
+ struct ir_input_dev *ir_dev = input_get_drvdata(input_dev);
+ struct rc5_sz_dec *data = &ir_dev->raw->rc5_sz;
+ u8 toggle, command, system;
+ u32 scancode;
+
+ if (!(ir_dev->raw->enabled_protocols & IR_TYPE_RC5_SZ))
+ return 0;
+
+ if (!is_timing_event(ev)) {
+ if (ev.reset)
+ data->state = STATE_INACTIVE;
+ return 0;
+ }
+
+ if (!geq_margin(ev.duration, RC5_UNIT, RC5_UNIT / 2))
+ goto out;
+
+again:
+ IR_dprintk(2, "RC5-sz decode started at state %i (%uus %s)\n",
+ data->state, TO_US(ev.duration), TO_STR(ev.pulse));
+
+ if (!geq_margin(ev.duration, RC5_UNIT, RC5_UNIT / 2))
+ return 0;
+
+ switch (data->state) {
+
+ case STATE_INACTIVE:
+ if (!ev.pulse)
+ break;
+
+ data->state = STATE_BIT_START;
+ data->count = 1;
+ data->wanted_bits = RC5_SZ_NBITS;
+ decrease_duration(&ev, RC5_BIT_START);
+ goto again;
+
+ case STATE_BIT_START:
+ if (!eq_margin(ev.duration, RC5_BIT_START, RC5_UNIT / 2))
+ break;
+
+ data->bits <<= 1;
+ if (!ev.pulse)
+ data->bits |= 1;
+ data->count++;
+ data->state = STATE_BIT_END;
+ return 0;
+
+ case STATE_BIT_END:
+ if (!is_transition(&ev, &ir_dev->raw->prev_ev))
+ break;
+
+ if (data->count == data->wanted_bits)
+ data->state = STATE_FINISHED;
+ else
+ data->state = STATE_BIT_START;
+
+ decrease_duration(&ev, RC5_BIT_END);
+ goto again;
+
+ case STATE_FINISHED:
+ if (ev.pulse)
+ break;
+
+ /* RC5-sz */
+ command = (data->bits & 0x0003F) >> 0;
+ system = (data->bits & 0x02FC0) >> 6;
+ toggle = (data->bits & 0x01000) ? 1 : 0;
+ scancode = system << 6 | command;
+
+ IR_dprintk(1, "RC5-sz scancode 0x%04x (toggle: %u)\n",
+ scancode, toggle);
+
+ ir_keydown(input_dev, scancode, toggle);
+ data->state = STATE_INACTIVE;
+ return 0;
+ }
+
+out:
+ IR_dprintk(1, "RC5-sz decode failed at state %i (%uus %s)\n",
+ data->state, TO_US(ev.duration), TO_STR(ev.pulse));
+ data->state = STATE_INACTIVE;
+ return -EINVAL;
+}
+
+static struct ir_raw_handler rc5_sz_handler = {
+ .protocols = IR_TYPE_RC5_SZ,
+ .decode = ir_rc5_sz_decode,
+};
+
+static int __init ir_rc5_sz_decode_init(void)
+{
+ ir_raw_handler_register(&rc5_sz_handler);
+
+ printk(KERN_INFO "IR RC5 (streamzap) protocol handler initialized\n");
+ return 0;
+}
+
+static void __exit ir_rc5_sz_decode_exit(void)
+{
+ ir_raw_handler_unregister(&rc5_sz_handler);
+}
+
+module_init(ir_rc5_sz_decode_init);
+module_exit(ir_rc5_sz_decode_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Jarod Wilson <jarod@redhat.com>");
+MODULE_AUTHOR("Red Hat Inc. (http://www.redhat.com)");
+MODULE_DESCRIPTION("RC5 (streamzap) IR protocol decoder");
diff --git a/drivers/media/IR/ir-rc6-decoder.c b/drivers/media/IR/ir-rc6-decoder.c
index f1624b8279bc..d25da91f44ff 100644
--- a/drivers/media/IR/ir-rc6-decoder.c
+++ b/drivers/media/IR/ir-rc6-decoder.c
@@ -85,8 +85,9 @@ static int ir_rc6_decode(struct input_dev *input_dev, struct ir_raw_event ev)
if (!(ir_dev->raw->enabled_protocols & IR_TYPE_RC6))
return 0;
- if (IS_RESET(ev)) {
- data->state = STATE_INACTIVE;
+ if (!is_timing_event(ev)) {
+ if (ev.reset)
+ data->state = STATE_INACTIVE;
return 0;
}
diff --git a/drivers/media/IR/ir-sony-decoder.c b/drivers/media/IR/ir-sony-decoder.c
index b9074f07c7a0..2d15730822bc 100644
--- a/drivers/media/IR/ir-sony-decoder.c
+++ b/drivers/media/IR/ir-sony-decoder.c
@@ -48,8 +48,9 @@ static int ir_sony_decode(struct input_dev *input_dev, struct ir_raw_event ev)
if (!(ir_dev->raw->enabled_protocols & IR_TYPE_SONY))
return 0;
- if (IS_RESET(ev)) {
- data->state = STATE_INACTIVE;
+ if (!is_timing_event(ev)) {
+ if (ev.reset)
+ data->state = STATE_INACTIVE;
return 0;
}
diff --git a/drivers/media/IR/ir-sysfs.c b/drivers/media/IR/ir-sysfs.c
index 46d42467f9b4..38423a8da871 100644
--- a/drivers/media/IR/ir-sysfs.c
+++ b/drivers/media/IR/ir-sysfs.c
@@ -43,6 +43,7 @@ static struct {
{ IR_TYPE_RC6, "rc-6" },
{ IR_TYPE_JVC, "jvc" },
{ IR_TYPE_SONY, "sony" },
+ { IR_TYPE_RC5_SZ, "rc-5-sz" },
{ IR_TYPE_LIRC, "lirc" },
};
@@ -67,6 +68,10 @@ static ssize_t show_protocols(struct device *d,
char *tmp = buf;
int i;
+ /* Device is being removed */
+ if (!ir_dev)
+ return -EINVAL;
+
if (ir_dev->props && ir_dev->props->driver_type == RC_DRIVER_SCANCODE) {
enabled = ir_dev->rc_tab.ir_type;
allowed = ir_dev->props->allowed_protos;
@@ -122,6 +127,10 @@ static ssize_t store_protocols(struct device *d,
int rc, i, count = 0;
unsigned long flags;
+ /* Device is being removed */
+ if (!ir_dev)
+ return -EINVAL;
+
if (ir_dev->props && ir_dev->props->driver_type == RC_DRIVER_SCANCODE)
type = ir_dev->rc_tab.ir_type;
else if (ir_dev->raw)
@@ -256,8 +265,6 @@ static struct device_type rc_dev_type = {
*/
int ir_register_class(struct input_dev *input_dev)
{
- int rc;
- const char *path;
struct ir_input_dev *ir_dev = input_get_drvdata(input_dev);
int devno = find_first_zero_bit(&ir_core_dev_number,
IRRCV_NUM_DEVICES);
@@ -266,17 +273,28 @@ int ir_register_class(struct input_dev *input_dev)
return devno;
ir_dev->dev.type = &rc_dev_type;
+ ir_dev->devno = devno;
ir_dev->dev.class = &ir_input_class;
ir_dev->dev.parent = input_dev->dev.parent;
+ input_dev->dev.parent = &ir_dev->dev;
dev_set_name(&ir_dev->dev, "rc%d", devno);
dev_set_drvdata(&ir_dev->dev, ir_dev);
- rc = device_register(&ir_dev->dev);
- if (rc)
- return rc;
+ return device_register(&ir_dev->dev);
+};
+
+/**
+ * ir_register_input - registers ir input device with input subsystem
+ * @input_dev: the struct input_dev descriptor of the device
+ */
+
+int ir_register_input(struct input_dev *input_dev)
+{
+ struct ir_input_dev *ir_dev = input_get_drvdata(input_dev);
+ int rc;
+ const char *path;
- input_dev->dev.parent = &ir_dev->dev;
rc = input_register_device(input_dev);
if (rc < 0) {
device_del(&ir_dev->dev);
@@ -292,11 +310,9 @@ int ir_register_class(struct input_dev *input_dev)
path ? path : "N/A");
kfree(path);
- ir_dev->devno = devno;
- set_bit(devno, &ir_core_dev_number);
-
+ set_bit(ir_dev->devno, &ir_core_dev_number);
return 0;
-};
+}
/**
* ir_unregister_class() - removes the sysfs for sysfs for
@@ -309,6 +325,7 @@ void ir_unregister_class(struct input_dev *input_dev)
{
struct ir_input_dev *ir_dev = input_get_drvdata(input_dev);
+ input_set_drvdata(input_dev, NULL);
clear_bit(ir_dev->devno, &ir_core_dev_number);
input_unregister_device(input_dev);
device_del(&ir_dev->dev);
diff --git a/drivers/media/IR/keymaps/Makefile b/drivers/media/IR/keymaps/Makefile
index 950e5d953c6f..3194d391bbd4 100644
--- a/drivers/media/IR/keymaps/Makefile
+++ b/drivers/media/IR/keymaps/Makefile
@@ -1,4 +1,6 @@
obj-$(CONFIG_RC_MAP) += rc-adstech-dvb-t-pci.o \
+ rc-alink-dtu-m.o \
+ rc-anysee.o \
rc-apac-viewcomp.o \
rc-asus-pc39.o \
rc-ati-tv-wonder-hd-600.o \
@@ -8,7 +10,9 @@ obj-$(CONFIG_RC_MAP) += rc-adstech-dvb-t-pci.o \
rc-avermedia-dvbt.o \
rc-avermedia-m135a.o \
rc-avermedia-m733a-rm-k6.o \
+ rc-avermedia-rm-ks.o \
rc-avertv-303.o \
+ rc-azurewave-ad-tu700.o \
rc-behold.o \
rc-behold-columbus.o \
rc-budget-ci-old.o \
@@ -16,6 +20,8 @@ obj-$(CONFIG_RC_MAP) += rc-adstech-dvb-t-pci.o \
rc-cinergy.o \
rc-dib0700-nec.o \
rc-dib0700-rc5.o \
+ rc-digitalnow-tinytwin.o \
+ rc-digittrade.o \
rc-dm1105-nec.o \
rc-dntv-live-dvb-t.o \
rc-dntv-live-dvbt-pro.o \
@@ -38,8 +44,12 @@ obj-$(CONFIG_RC_MAP) += rc-adstech-dvb-t-pci.o \
rc-kaiomy.o \
rc-kworld-315u.o \
rc-kworld-plus-tv-analog.o \
+ rc-leadtek-y04g0051.o \
rc-lirc.o \
+ rc-lme2510.o \
rc-manli.o \
+ rc-msi-digivox-ii.o \
+ rc-msi-digivox-iii.o \
rc-msi-tvanywhere.o \
rc-msi-tvanywhere-plus.o \
rc-nebula.o \
@@ -58,14 +68,18 @@ obj-$(CONFIG_RC_MAP) += rc-adstech-dvb-t-pci.o \
rc-purpletv.o \
rc-pv951.o \
rc-rc5-hauppauge-new.o \
- rc-rc5-streamzap.o \
rc-rc5-tv.o \
rc-rc6-mce.o \
rc-real-audio-220-32-keys.o \
+ rc-streamzap.o \
rc-tbs-nec.o \
rc-terratec-cinergy-xs.o \
+ rc-terratec-slim.o \
rc-tevii-nec.o \
+ rc-total-media-in-hand.o \
+ rc-trekstor.o \
rc-tt-1500.o \
+ rc-twinhan1027.o \
rc-videomate-s350.o \
rc-videomate-tv-pvr.o \
rc-winfast.o \
diff --git a/drivers/media/IR/keymaps/rc-alink-dtu-m.c b/drivers/media/IR/keymaps/rc-alink-dtu-m.c
new file mode 100644
index 000000000000..ddfee7f8093d
--- /dev/null
+++ b/drivers/media/IR/keymaps/rc-alink-dtu-m.c
@@ -0,0 +1,68 @@
+/*
+ * A-Link DTU(m) remote controller keytable
+ *
+ * Copyright (C) 2010 Antti Palosaari <crope@iki.fi>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+#include <media/rc-map.h>
+
+/* A-Link DTU(m) slim remote, 6 rows, 3 columns. */
+static struct ir_scancode alink_dtu_m[] = {
+ { 0x0800, KEY_VOLUMEUP },
+ { 0x0801, KEY_1 },
+ { 0x0802, KEY_3 },
+ { 0x0803, KEY_7 },
+ { 0x0804, KEY_9 },
+ { 0x0805, KEY_NEW }, /* symbol: PIP */
+ { 0x0806, KEY_0 },
+ { 0x0807, KEY_CHANNEL }, /* JUMP */
+ { 0x080d, KEY_5 },
+ { 0x080f, KEY_2 },
+ { 0x0812, KEY_POWER2 },
+ { 0x0814, KEY_CHANNELUP },
+ { 0x0816, KEY_VOLUMEDOWN },
+ { 0x0818, KEY_6 },
+ { 0x081a, KEY_MUTE },
+ { 0x081b, KEY_8 },
+ { 0x081c, KEY_4 },
+ { 0x081d, KEY_CHANNELDOWN },
+};
+
+static struct rc_keymap alink_dtu_m_map = {
+ .map = {
+ .scan = alink_dtu_m,
+ .size = ARRAY_SIZE(alink_dtu_m),
+ .ir_type = IR_TYPE_NEC,
+ .name = RC_MAP_ALINK_DTU_M,
+ }
+};
+
+static int __init init_rc_map_alink_dtu_m(void)
+{
+ return ir_register_map(&alink_dtu_m_map);
+}
+
+static void __exit exit_rc_map_alink_dtu_m(void)
+{
+ ir_unregister_map(&alink_dtu_m_map);
+}
+
+module_init(init_rc_map_alink_dtu_m)
+module_exit(exit_rc_map_alink_dtu_m)
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Antti Palosaari <crope@iki.fi>");
diff --git a/drivers/media/IR/keymaps/rc-anysee.c b/drivers/media/IR/keymaps/rc-anysee.c
new file mode 100644
index 000000000000..30d70498cfed
--- /dev/null
+++ b/drivers/media/IR/keymaps/rc-anysee.c
@@ -0,0 +1,93 @@
+/*
+ * Anysee remote controller keytable
+ *
+ * Copyright (C) 2010 Antti Palosaari <crope@iki.fi>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+#include <media/rc-map.h>
+
+static struct ir_scancode anysee[] = {
+ { 0x0800, KEY_0 },
+ { 0x0801, KEY_1 },
+ { 0x0802, KEY_2 },
+ { 0x0803, KEY_3 },
+ { 0x0804, KEY_4 },
+ { 0x0805, KEY_5 },
+ { 0x0806, KEY_6 },
+ { 0x0807, KEY_7 },
+ { 0x0808, KEY_8 },
+ { 0x0809, KEY_9 },
+ { 0x080a, KEY_POWER2 }, /* [red power button] */
+ { 0x080b, KEY_VIDEO }, /* [*] MODE */
+ { 0x080c, KEY_CHANNEL }, /* [symbol counterclockwise arrow] */
+ { 0x080d, KEY_NEXT }, /* [>>|] */
+ { 0x080e, KEY_MENU }, /* MENU */
+ { 0x080f, KEY_EPG }, /* [EPG] */
+ { 0x0810, KEY_CLEAR }, /* EXIT */
+ { 0x0811, KEY_CHANNELUP },
+ { 0x0812, KEY_VOLUMEDOWN },
+ { 0x0813, KEY_VOLUMEUP },
+ { 0x0814, KEY_CHANNELDOWN },
+ { 0x0815, KEY_OK },
+ { 0x0816, KEY_RADIO }, /* [symbol TV/radio] */
+ { 0x0817, KEY_INFO }, /* [i] */
+ { 0x0818, KEY_PREVIOUS }, /* [|<<] */
+ { 0x0819, KEY_FAVORITES }, /* FAV. */
+ { 0x081a, KEY_SUBTITLE }, /* Subtitle */
+ { 0x081b, KEY_CAMERA }, /* [symbol camera] */
+ { 0x081c, KEY_YELLOW },
+ { 0x081d, KEY_RED },
+ { 0x081e, KEY_LANGUAGE }, /* [symbol Second Audio Program] */
+ { 0x081f, KEY_GREEN },
+ { 0x0820, KEY_SLEEP }, /* Sleep */
+ { 0x0821, KEY_SCREEN }, /* 16:9 / 4:3 */
+ { 0x0822, KEY_ZOOM }, /* SIZE */
+ { 0x0824, KEY_FN }, /* [F1] */
+ { 0x0825, KEY_FN }, /* [F2] */
+ { 0x0842, KEY_MUTE }, /* symbol mute */
+ { 0x0844, KEY_BLUE },
+ { 0x0847, KEY_TEXT }, /* TEXT */
+ { 0x0848, KEY_STOP },
+ { 0x0849, KEY_RECORD },
+ { 0x0850, KEY_PLAY },
+ { 0x0851, KEY_PAUSE },
+};
+
+static struct rc_keymap anysee_map = {
+ .map = {
+ .scan = anysee,
+ .size = ARRAY_SIZE(anysee),
+ .ir_type = IR_TYPE_NEC,
+ .name = RC_MAP_ANYSEE,
+ }
+};
+
+static int __init init_rc_map_anysee(void)
+{
+ return ir_register_map(&anysee_map);
+}
+
+static void __exit exit_rc_map_anysee(void)
+{
+ ir_unregister_map(&anysee_map);
+}
+
+module_init(init_rc_map_anysee)
+module_exit(exit_rc_map_anysee)
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Antti Palosaari <crope@iki.fi>");
diff --git a/drivers/media/IR/keymaps/rc-asus-pc39.c b/drivers/media/IR/keymaps/rc-asus-pc39.c
index 2aa068cd6c75..2996e0a3b8d5 100644
--- a/drivers/media/IR/keymaps/rc-asus-pc39.c
+++ b/drivers/media/IR/keymaps/rc-asus-pc39.c
@@ -20,56 +20,56 @@
static struct ir_scancode asus_pc39[] = {
/* Keys 0 to 9 */
- { 0x15, KEY_0 },
- { 0x29, KEY_1 },
- { 0x2d, KEY_2 },
- { 0x2b, KEY_3 },
- { 0x09, KEY_4 },
- { 0x0d, KEY_5 },
- { 0x0b, KEY_6 },
- { 0x31, KEY_7 },
- { 0x35, KEY_8 },
- { 0x33, KEY_9 },
+ { 0x082a, KEY_0 },
+ { 0x0816, KEY_1 },
+ { 0x0812, KEY_2 },
+ { 0x0814, KEY_3 },
+ { 0x0836, KEY_4 },
+ { 0x0832, KEY_5 },
+ { 0x0834, KEY_6 },
+ { 0x080e, KEY_7 },
+ { 0x080a, KEY_8 },
+ { 0x080c, KEY_9 },
- { 0x3e, KEY_RADIO }, /* radio */
- { 0x03, KEY_MENU }, /* dvd/menu */
- { 0x2a, KEY_VOLUMEUP },
- { 0x19, KEY_VOLUMEDOWN },
- { 0x37, KEY_UP },
- { 0x3b, KEY_DOWN },
- { 0x27, KEY_LEFT },
- { 0x2f, KEY_RIGHT },
- { 0x25, KEY_VIDEO }, /* video */
- { 0x39, KEY_AUDIO }, /* music */
+ { 0x0801, KEY_RADIO }, /* radio */
+ { 0x083c, KEY_MENU }, /* dvd/menu */
+ { 0x0815, KEY_VOLUMEUP },
+ { 0x0826, KEY_VOLUMEDOWN },
+ { 0x0808, KEY_UP },
+ { 0x0804, KEY_DOWN },
+ { 0x0818, KEY_LEFT },
+ { 0x0810, KEY_RIGHT },
+ { 0x081a, KEY_VIDEO }, /* video */
+ { 0x0806, KEY_AUDIO }, /* music */
- { 0x21, KEY_TV }, /* tv */
- { 0x1d, KEY_EXIT }, /* back */
- { 0x0a, KEY_CHANNELUP }, /* channel / program + */
- { 0x1b, KEY_CHANNELDOWN }, /* channel / program - */
- { 0x1a, KEY_ENTER }, /* enter */
+ { 0x081e, KEY_TV }, /* tv */
+ { 0x0822, KEY_EXIT }, /* back */
+ { 0x0835, KEY_CHANNELUP }, /* channel / program + */
+ { 0x0824, KEY_CHANNELDOWN }, /* channel / program - */
+ { 0x0825, KEY_ENTER }, /* enter */
- { 0x06, KEY_PAUSE }, /* play/pause */
- { 0x1e, KEY_PREVIOUS }, /* rew */
- { 0x26, KEY_NEXT }, /* forward */
- { 0x0e, KEY_REWIND }, /* backward << */
- { 0x3a, KEY_FASTFORWARD }, /* forward >> */
- { 0x36, KEY_STOP },
- { 0x2e, KEY_RECORD }, /* recording */
- { 0x16, KEY_POWER }, /* the button that reads "close" */
+ { 0x0839, KEY_PAUSE }, /* play/pause */
+ { 0x0821, KEY_PREVIOUS }, /* rew */
+ { 0x0819, KEY_NEXT }, /* forward */
+ { 0x0831, KEY_REWIND }, /* backward << */
+ { 0x0805, KEY_FASTFORWARD }, /* forward >> */
+ { 0x0809, KEY_STOP },
+ { 0x0811, KEY_RECORD }, /* recording */
+ { 0x0829, KEY_POWER }, /* the button that reads "close" */
- { 0x11, KEY_ZOOM }, /* full screen */
- { 0x13, KEY_MACRO }, /* recall */
- { 0x23, KEY_HOME }, /* home */
- { 0x05, KEY_PVR }, /* picture */
- { 0x3d, KEY_MUTE }, /* mute */
- { 0x01, KEY_DVD }, /* dvd */
+ { 0x082e, KEY_ZOOM }, /* full screen */
+ { 0x082c, KEY_MACRO }, /* recall */
+ { 0x081c, KEY_HOME }, /* home */
+ { 0x083a, KEY_PVR }, /* picture */
+ { 0x0802, KEY_MUTE }, /* mute */
+ { 0x083e, KEY_DVD }, /* dvd */
};
static struct rc_keymap asus_pc39_map = {
.map = {
.scan = asus_pc39,
.size = ARRAY_SIZE(asus_pc39),
- .ir_type = IR_TYPE_UNKNOWN, /* Legacy IR type */
+ .ir_type = IR_TYPE_RC5,
.name = RC_MAP_ASUS_PC39,
}
};
diff --git a/drivers/media/IR/keymaps/rc-avermedia-rm-ks.c b/drivers/media/IR/keymaps/rc-avermedia-rm-ks.c
new file mode 100644
index 000000000000..9ee60906c861
--- /dev/null
+++ b/drivers/media/IR/keymaps/rc-avermedia-rm-ks.c
@@ -0,0 +1,79 @@
+/*
+ * AverMedia RM-KS remote controller keytable
+ *
+ * Copyright (C) 2010 Antti Palosaari <crope@iki.fi>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+#include <media/rc-map.h>
+
+/* Initial keytable is from Jose Alberto Reguero <jareguero@telefonica.net>
+ and Felipe Morales Moreno <felipe.morales.moreno@gmail.com> */
+/* FIXME: mappings are not 100% correct? */
+static struct ir_scancode avermedia_rm_ks[] = {
+ { 0x0501, KEY_POWER2 },
+ { 0x0502, KEY_CHANNELUP },
+ { 0x0503, KEY_CHANNELDOWN },
+ { 0x0504, KEY_VOLUMEUP },
+ { 0x0505, KEY_VOLUMEDOWN },
+ { 0x0506, KEY_MUTE },
+ { 0x0507, KEY_RIGHT },
+ { 0x0508, KEY_PROG1 },
+ { 0x0509, KEY_1 },
+ { 0x050a, KEY_2 },
+ { 0x050b, KEY_3 },
+ { 0x050c, KEY_4 },
+ { 0x050d, KEY_5 },
+ { 0x050e, KEY_6 },
+ { 0x050f, KEY_7 },
+ { 0x0510, KEY_8 },
+ { 0x0511, KEY_9 },
+ { 0x0512, KEY_0 },
+ { 0x0513, KEY_AUDIO },
+ { 0x0515, KEY_EPG },
+ { 0x0516, KEY_PLAY },
+ { 0x0517, KEY_RECORD },
+ { 0x0518, KEY_STOP },
+ { 0x051c, KEY_BACK },
+ { 0x051d, KEY_FORWARD },
+ { 0x054d, KEY_LEFT },
+ { 0x0556, KEY_ZOOM },
+};
+
+static struct rc_keymap avermedia_rm_ks_map = {
+ .map = {
+ .scan = avermedia_rm_ks,
+ .size = ARRAY_SIZE(avermedia_rm_ks),
+ .ir_type = IR_TYPE_NEC,
+ .name = RC_MAP_AVERMEDIA_RM_KS,
+ }
+};
+
+static int __init init_rc_map_avermedia_rm_ks(void)
+{
+ return ir_register_map(&avermedia_rm_ks_map);
+}
+
+static void __exit exit_rc_map_avermedia_rm_ks(void)
+{
+ ir_unregister_map(&avermedia_rm_ks_map);
+}
+
+module_init(init_rc_map_avermedia_rm_ks)
+module_exit(exit_rc_map_avermedia_rm_ks)
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Antti Palosaari <crope@iki.fi>");
diff --git a/drivers/media/IR/keymaps/rc-azurewave-ad-tu700.c b/drivers/media/IR/keymaps/rc-azurewave-ad-tu700.c
new file mode 100644
index 000000000000..e0876147d471
--- /dev/null
+++ b/drivers/media/IR/keymaps/rc-azurewave-ad-tu700.c
@@ -0,0 +1,102 @@
+/*
+ * TwinHan AzureWave AD-TU700(704J) remote controller keytable
+ *
+ * Copyright (C) 2010 Antti Palosaari <crope@iki.fi>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+#include <media/rc-map.h>
+
+static struct ir_scancode azurewave_ad_tu700[] = {
+ { 0x0000, KEY_TAB }, /* Tab */
+ { 0x0001, KEY_2 },
+ { 0x0002, KEY_CHANNELDOWN },
+ { 0x0003, KEY_1 },
+ { 0x0004, KEY_MENU }, /* Record List */
+ { 0x0005, KEY_CHANNELUP },
+ { 0x0006, KEY_3 },
+ { 0x0007, KEY_SLEEP }, /* Hibernate */
+ { 0x0008, KEY_VIDEO }, /* A/V */
+ { 0x0009, KEY_4 },
+ { 0x000a, KEY_VOLUMEDOWN },
+ { 0x000c, KEY_CANCEL }, /* Cancel */
+ { 0x000d, KEY_7 },
+ { 0x000e, KEY_AGAIN }, /* Recall */
+ { 0x000f, KEY_TEXT }, /* Teletext */
+ { 0x0010, KEY_MUTE },
+ { 0x0011, KEY_RECORD },
+ { 0x0012, KEY_FASTFORWARD }, /* FF >> */
+ { 0x0013, KEY_BACK }, /* Back */
+ { 0x0014, KEY_PLAY },
+ { 0x0015, KEY_0 },
+ { 0x0016, KEY_POWER2 }, /* [red power button] */
+ { 0x0017, KEY_FAVORITES }, /* Favorite List */
+ { 0x0018, KEY_RED },
+ { 0x0019, KEY_8 },
+ { 0x001a, KEY_STOP },
+ { 0x001b, KEY_9 },
+ { 0x001c, KEY_EPG }, /* Info/EPG */
+ { 0x001d, KEY_5 },
+ { 0x001e, KEY_VOLUMEUP },
+ { 0x001f, KEY_6 },
+ { 0x0040, KEY_REWIND }, /* FR << */
+ { 0x0041, KEY_PREVIOUS }, /* Replay */
+ { 0x0042, KEY_NEXT }, /* Skip */
+ { 0x0043, KEY_SUBTITLE }, /* Subtitle / CC */
+ { 0x0045, KEY_KPPLUS }, /* Zoom+ */
+ { 0x0046, KEY_KPMINUS }, /* Zoom- */
+ { 0x0047, KEY_NEW }, /* PIP */
+ { 0x0048, KEY_INFO }, /* Preview */
+ { 0x0049, KEY_MODE }, /* L/R */
+ { 0x004a, KEY_CLEAR }, /* Clear */
+ { 0x004b, KEY_UP }, /* up arrow */
+ { 0x004c, KEY_PAUSE },
+ { 0x004d, KEY_ZOOM }, /* Full Screen */
+ { 0x004e, KEY_LEFT }, /* left arrow */
+ { 0x004f, KEY_OK }, /* Enter / ok */
+ { 0x0050, KEY_LANGUAGE }, /* SAP */
+ { 0x0051, KEY_DOWN }, /* down arrow */
+ { 0x0052, KEY_RIGHT }, /* right arrow */
+ { 0x0053, KEY_GREEN },
+ { 0x0054, KEY_CAMERA }, /* Capture */
+ { 0x005e, KEY_YELLOW },
+ { 0x005f, KEY_BLUE },
+};
+
+static struct rc_keymap azurewave_ad_tu700_map = {
+ .map = {
+ .scan = azurewave_ad_tu700,
+ .size = ARRAY_SIZE(azurewave_ad_tu700),
+ .ir_type = IR_TYPE_NEC,
+ .name = RC_MAP_AZUREWAVE_AD_TU700,
+ }
+};
+
+static int __init init_rc_map_azurewave_ad_tu700(void)
+{
+ return ir_register_map(&azurewave_ad_tu700_map);
+}
+
+static void __exit exit_rc_map_azurewave_ad_tu700(void)
+{
+ ir_unregister_map(&azurewave_ad_tu700_map);
+}
+
+module_init(init_rc_map_azurewave_ad_tu700)
+module_exit(exit_rc_map_azurewave_ad_tu700)
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Antti Palosaari <crope@iki.fi>");
diff --git a/drivers/media/IR/keymaps/rc-digitalnow-tinytwin.c b/drivers/media/IR/keymaps/rc-digitalnow-tinytwin.c
new file mode 100644
index 000000000000..63e469e2dd21
--- /dev/null
+++ b/drivers/media/IR/keymaps/rc-digitalnow-tinytwin.c
@@ -0,0 +1,98 @@
+/*
+ * DigitalNow TinyTwin remote controller keytable
+ *
+ * Copyright (C) 2010 Antti Palosaari <crope@iki.fi>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+#include <media/rc-map.h>
+
+static struct ir_scancode digitalnow_tinytwin[] = {
+ { 0x0000, KEY_MUTE }, /* [symbol speaker] */
+ { 0x0001, KEY_VOLUMEUP },
+ { 0x0002, KEY_POWER2 }, /* TV [power button] */
+ { 0x0003, KEY_2 },
+ { 0x0004, KEY_3 },
+ { 0x0005, KEY_4 },
+ { 0x0006, KEY_6 },
+ { 0x0007, KEY_7 },
+ { 0x0008, KEY_8 },
+ { 0x0009, KEY_NUMERIC_STAR }, /* [*] */
+ { 0x000a, KEY_0 },
+ { 0x000b, KEY_NUMERIC_POUND }, /* [#] */
+ { 0x000c, KEY_RIGHT }, /* [right arrow] */
+ { 0x000d, KEY_HOMEPAGE }, /* [symbol home] Start */
+ { 0x000e, KEY_RED }, /* [red] Videos */
+ { 0x0010, KEY_POWER }, /* PC [power button] */
+ { 0x0011, KEY_YELLOW }, /* [yellow] Pictures */
+ { 0x0012, KEY_DOWN }, /* [down arrow] */
+ { 0x0013, KEY_GREEN }, /* [green] Music */
+ { 0x0014, KEY_CYCLEWINDOWS }, /* BACK */
+ { 0x0015, KEY_FAVORITES }, /* MORE */
+ { 0x0016, KEY_UP }, /* [up arrow] */
+ { 0x0017, KEY_LEFT }, /* [left arrow] */
+ { 0x0018, KEY_OK }, /* OK */
+ { 0x0019, KEY_BLUE }, /* [blue] MyTV */
+ { 0x001a, KEY_REWIND }, /* REW [<<] */
+ { 0x001b, KEY_PLAY }, /* PLAY */
+ { 0x001c, KEY_5 },
+ { 0x001d, KEY_9 },
+ { 0x001e, KEY_VOLUMEDOWN },
+ { 0x001f, KEY_1 },
+ { 0x0040, KEY_STOP }, /* STOP */
+ { 0x0042, KEY_PAUSE }, /* PAUSE */
+ { 0x0043, KEY_SCREEN }, /* Aspect */
+ { 0x0044, KEY_FORWARD }, /* FWD [>>] */
+ { 0x0045, KEY_NEXT }, /* SKIP */
+ { 0x0048, KEY_RECORD }, /* RECORD */
+ { 0x0049, KEY_VIDEO }, /* RTV */
+ { 0x004a, KEY_EPG }, /* Guide */
+ { 0x004b, KEY_CHANNELUP },
+ { 0x004c, KEY_HELP }, /* Help */
+ { 0x004d, KEY_RADIO }, /* Radio */
+ { 0x004f, KEY_CHANNELDOWN },
+ { 0x0050, KEY_DVD }, /* DVD */
+ { 0x0051, KEY_AUDIO }, /* Audio */
+ { 0x0052, KEY_TITLE }, /* Title */
+ { 0x0053, KEY_NEW }, /* [symbol PIP?] */
+ { 0x0057, KEY_MENU }, /* Mouse */
+ { 0x005a, KEY_PREVIOUS }, /* REPLAY */
+};
+
+static struct rc_keymap digitalnow_tinytwin_map = {
+ .map = {
+ .scan = digitalnow_tinytwin,
+ .size = ARRAY_SIZE(digitalnow_tinytwin),
+ .ir_type = IR_TYPE_NEC,
+ .name = RC_MAP_DIGITALNOW_TINYTWIN,
+ }
+};
+
+static int __init init_rc_map_digitalnow_tinytwin(void)
+{
+ return ir_register_map(&digitalnow_tinytwin_map);
+}
+
+static void __exit exit_rc_map_digitalnow_tinytwin(void)
+{
+ ir_unregister_map(&digitalnow_tinytwin_map);
+}
+
+module_init(init_rc_map_digitalnow_tinytwin)
+module_exit(exit_rc_map_digitalnow_tinytwin)
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Antti Palosaari <crope@iki.fi>");
diff --git a/drivers/media/IR/keymaps/rc-digittrade.c b/drivers/media/IR/keymaps/rc-digittrade.c
new file mode 100644
index 000000000000..5dece78e19c5
--- /dev/null
+++ b/drivers/media/IR/keymaps/rc-digittrade.c
@@ -0,0 +1,82 @@
+/*
+ * Digittrade DVB-T USB Stick remote controller keytable
+ *
+ * Copyright (C) 2010 Antti Palosaari <crope@iki.fi>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+#include <media/rc-map.h>
+
+/* Digittrade DVB-T USB Stick remote controller. */
+/* Imported from af9015.h.
+ Initial keytable was from Alain Kalker <miki@dds.nl> */
+
+/* Digittrade DVB-T USB Stick */
+static struct ir_scancode digittrade[] = {
+ { 0x0000, KEY_9 },
+ { 0x0001, KEY_EPG }, /* EPG */
+ { 0x0002, KEY_VOLUMEDOWN }, /* Vol Dn */
+ { 0x0003, KEY_TEXT }, /* TELETEXT */
+ { 0x0004, KEY_8 },
+ { 0x0005, KEY_MUTE }, /* MUTE */
+ { 0x0006, KEY_POWER2 }, /* POWER */
+ { 0x0009, KEY_ZOOM }, /* FULLSCREEN */
+ { 0x000a, KEY_RECORD }, /* RECORD */
+ { 0x000d, KEY_SUBTITLE }, /* SUBTITLE */
+ { 0x000e, KEY_STOP }, /* STOP */
+ { 0x0010, KEY_OK }, /* RETURN */
+ { 0x0011, KEY_2 },
+ { 0x0012, KEY_4 },
+ { 0x0015, KEY_3 },
+ { 0x0016, KEY_5 },
+ { 0x0017, KEY_CHANNELDOWN }, /* Ch Dn */
+ { 0x0019, KEY_CHANNELUP }, /* CH Up */
+ { 0x001a, KEY_PAUSE }, /* PAUSE */
+ { 0x001b, KEY_1 },
+ { 0x001d, KEY_AUDIO }, /* DUAL SOUND */
+ { 0x001e, KEY_PLAY }, /* PLAY */
+ { 0x001f, KEY_CAMERA }, /* SNAPSHOT */
+ { 0x0040, KEY_VOLUMEUP }, /* Vol Up */
+ { 0x0048, KEY_7 },
+ { 0x004c, KEY_6 },
+ { 0x004d, KEY_PLAYPAUSE }, /* TIMESHIFT */
+ { 0x0054, KEY_0 },
+};
+
+static struct rc_keymap digittrade_map = {
+ .map = {
+ .scan = digittrade,
+ .size = ARRAY_SIZE(digittrade),
+ .ir_type = IR_TYPE_NEC,
+ .name = RC_MAP_DIGITTRADE,
+ }
+};
+
+static int __init init_rc_map_digittrade(void)
+{
+ return ir_register_map(&digittrade_map);
+}
+
+static void __exit exit_rc_map_digittrade(void)
+{
+ ir_unregister_map(&digittrade_map);
+}
+
+module_init(init_rc_map_digittrade)
+module_exit(exit_rc_map_digittrade)
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Antti Palosaari <crope@iki.fi>");
diff --git a/drivers/media/IR/keymaps/rc-leadtek-y04g0051.c b/drivers/media/IR/keymaps/rc-leadtek-y04g0051.c
new file mode 100644
index 000000000000..7521315fd876
--- /dev/null
+++ b/drivers/media/IR/keymaps/rc-leadtek-y04g0051.c
@@ -0,0 +1,99 @@
+/*
+ * LeadTek Y04G0051 remote controller keytable
+ *
+ * Copyright (C) 2010 Antti Palosaari <crope@iki.fi>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+#include <media/rc-map.h>
+
+static struct ir_scancode leadtek_y04g0051[] = {
+ { 0x0300, KEY_POWER2 },
+ { 0x0303, KEY_SCREEN },
+ { 0x0304, KEY_RIGHT },
+ { 0x0305, KEY_1 },
+ { 0x0306, KEY_2 },
+ { 0x0307, KEY_3 },
+ { 0x0308, KEY_LEFT },
+ { 0x0309, KEY_4 },
+ { 0x030a, KEY_5 },
+ { 0x030b, KEY_6 },
+ { 0x030c, KEY_UP },
+ { 0x030d, KEY_7 },
+ { 0x030e, KEY_8 },
+ { 0x030f, KEY_9 },
+ { 0x0310, KEY_DOWN },
+ { 0x0311, KEY_AGAIN },
+ { 0x0312, KEY_0 },
+ { 0x0313, KEY_OK }, /* 1st ok */
+ { 0x0314, KEY_MUTE },
+ { 0x0316, KEY_OK }, /* 2nd ok */
+ { 0x031e, KEY_VIDEO }, /* 2nd video */
+ { 0x031b, KEY_AUDIO },
+ { 0x031f, KEY_TEXT },
+ { 0x0340, KEY_SLEEP },
+ { 0x0341, KEY_DOT },
+ { 0x0342, KEY_REWIND },
+ { 0x0343, KEY_PLAY },
+ { 0x0344, KEY_FASTFORWARD },
+ { 0x0345, KEY_TIME },
+ { 0x0346, KEY_STOP }, /* 2nd stop */
+ { 0x0347, KEY_RECORD },
+ { 0x0348, KEY_CAMERA },
+ { 0x0349, KEY_ESC },
+ { 0x034a, KEY_NEW },
+ { 0x034b, KEY_RED },
+ { 0x034c, KEY_GREEN },
+ { 0x034d, KEY_YELLOW },
+ { 0x034e, KEY_BLUE },
+ { 0x034f, KEY_MENU },
+ { 0x0350, KEY_STOP }, /* 1st stop */
+ { 0x0351, KEY_CHANNEL },
+ { 0x0352, KEY_VIDEO }, /* 1st video */
+ { 0x0353, KEY_EPG },
+ { 0x0354, KEY_PREVIOUS },
+ { 0x0355, KEY_NEXT },
+ { 0x0356, KEY_TV },
+ { 0x035a, KEY_VOLUMEDOWN },
+ { 0x035b, KEY_CHANNELUP },
+ { 0x035e, KEY_VOLUMEUP },
+ { 0x035f, KEY_CHANNELDOWN },
+};
+
+static struct rc_keymap leadtek_y04g0051_map = {
+ .map = {
+ .scan = leadtek_y04g0051,
+ .size = ARRAY_SIZE(leadtek_y04g0051),
+ .ir_type = IR_TYPE_NEC,
+ .name = RC_MAP_LEADTEK_Y04G0051,
+ }
+};
+
+static int __init init_rc_map_leadtek_y04g0051(void)
+{
+ return ir_register_map(&leadtek_y04g0051_map);
+}
+
+static void __exit exit_rc_map_leadtek_y04g0051(void)
+{
+ ir_unregister_map(&leadtek_y04g0051_map);
+}
+
+module_init(init_rc_map_leadtek_y04g0051)
+module_exit(exit_rc_map_leadtek_y04g0051)
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Antti Palosaari <crope@iki.fi>");
diff --git a/drivers/media/IR/keymaps/rc-lme2510.c b/drivers/media/IR/keymaps/rc-lme2510.c
new file mode 100644
index 000000000000..40dcf0b4e21a
--- /dev/null
+++ b/drivers/media/IR/keymaps/rc-lme2510.c
@@ -0,0 +1,68 @@
+/* LME2510 remote control
+ *
+ *
+ * Copyright (C) 2010 Malcolm Priestley (tvboxspy@gmail.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <media/rc-map.h>
+
+
+static struct ir_scancode lme2510_rc[] = {
+ { 0xba45, KEY_0 },
+ { 0xa05f, KEY_1 },
+ { 0xaf50, KEY_2 },
+ { 0xa25d, KEY_3 },
+ { 0xbe41, KEY_4 },
+ { 0xf50a, KEY_5 },
+ { 0xbd42, KEY_6 },
+ { 0xb847, KEY_7 },
+ { 0xb649, KEY_8 },
+ { 0xfa05, KEY_9 },
+ { 0xbc43, KEY_POWER },
+ { 0xb946, KEY_SUBTITLE },
+ { 0xf906, KEY_PAUSE },
+ { 0xfc03, KEY_MEDIA_REPEAT},
+ { 0xfd02, KEY_PAUSE },
+ { 0xa15e, KEY_VOLUMEUP },
+ { 0xa35c, KEY_VOLUMEDOWN },
+ { 0xf609, KEY_CHANNELUP },
+ { 0xe51a, KEY_CHANNELDOWN },
+ { 0xe11e, KEY_PLAY },
+ { 0xe41b, KEY_ZOOM },
+ { 0xa659, KEY_MUTE },
+ { 0xa55a, KEY_TV },
+ { 0xe718, KEY_RECORD },
+ { 0xf807, KEY_EPG },
+ { 0xfe01, KEY_STOP },
+
+};
+
+static struct rc_keymap lme2510_map = {
+ .map = {
+ .scan = lme2510_rc,
+ .size = ARRAY_SIZE(lme2510_rc),
+ .ir_type = IR_TYPE_UNKNOWN,
+ .name = RC_MAP_LME2510,
+ }
+};
+
+static int __init init_rc_lme2510_map(void)
+{
+ return ir_register_map(&lme2510_map);
+}
+
+static void __exit exit_rc_lme2510_map(void)
+{
+ ir_unregister_map(&lme2510_map);
+}
+
+module_init(init_rc_lme2510_map)
+module_exit(exit_rc_lme2510_map)
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Malcolm Priestley tvboxspy@gmail.com");
diff --git a/drivers/media/IR/keymaps/rc-msi-digivox-ii.c b/drivers/media/IR/keymaps/rc-msi-digivox-ii.c
new file mode 100644
index 000000000000..67237fbf9e4b
--- /dev/null
+++ b/drivers/media/IR/keymaps/rc-msi-digivox-ii.c
@@ -0,0 +1,67 @@
+/*
+ * MSI DIGIVOX mini II remote controller keytable
+ *
+ * Copyright (C) 2010 Antti Palosaari <crope@iki.fi>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+#include <media/rc-map.h>
+
+static struct ir_scancode msi_digivox_ii[] = {
+ { 0x0002, KEY_2 },
+ { 0x0003, KEY_UP }, /* up */
+ { 0x0004, KEY_3 },
+ { 0x0005, KEY_CHANNELDOWN },
+ { 0x0008, KEY_5 },
+ { 0x0009, KEY_0 },
+ { 0x000b, KEY_8 },
+ { 0x000d, KEY_DOWN }, /* down */
+ { 0x0010, KEY_9 },
+ { 0x0011, KEY_7 },
+ { 0x0014, KEY_VOLUMEUP },
+ { 0x0015, KEY_CHANNELUP },
+ { 0x0016, KEY_OK },
+ { 0x0017, KEY_POWER2 },
+ { 0x001a, KEY_1 },
+ { 0x001c, KEY_4 },
+ { 0x001d, KEY_6 },
+ { 0x001f, KEY_VOLUMEDOWN },
+};
+
+static struct rc_keymap msi_digivox_ii_map = {
+ .map = {
+ .scan = msi_digivox_ii,
+ .size = ARRAY_SIZE(msi_digivox_ii),
+ .ir_type = IR_TYPE_NEC,
+ .name = RC_MAP_MSI_DIGIVOX_II,
+ }
+};
+
+static int __init init_rc_map_msi_digivox_ii(void)
+{
+ return ir_register_map(&msi_digivox_ii_map);
+}
+
+static void __exit exit_rc_map_msi_digivox_ii(void)
+{
+ ir_unregister_map(&msi_digivox_ii_map);
+}
+
+module_init(init_rc_map_msi_digivox_ii)
+module_exit(exit_rc_map_msi_digivox_ii)
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Antti Palosaari <crope@iki.fi>");
diff --git a/drivers/media/IR/keymaps/rc-msi-digivox-iii.c b/drivers/media/IR/keymaps/rc-msi-digivox-iii.c
new file mode 100644
index 000000000000..882056e52ef9
--- /dev/null
+++ b/drivers/media/IR/keymaps/rc-msi-digivox-iii.c
@@ -0,0 +1,85 @@
+/*
+ * MSI DIGIVOX mini III remote controller keytable
+ *
+ * Copyright (C) 2010 Antti Palosaari <crope@iki.fi>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+#include <media/rc-map.h>
+
+/* MSI DIGIVOX mini III */
+/* Uses NEC extended 0x61d6. */
+/* This remote seems to be same as rc-kworld-315u.c. Anyhow, add new remote
+ since rc-kworld-315u.c lacks NEC extended address byte. */
+static struct ir_scancode msi_digivox_iii[] = {
+ { 0x61d601, KEY_VIDEO }, /* Source */
+ { 0x61d602, KEY_3 },
+ { 0x61d603, KEY_POWER }, /* ShutDown */
+ { 0x61d604, KEY_1 },
+ { 0x61d605, KEY_5 },
+ { 0x61d606, KEY_6 },
+ { 0x61d607, KEY_CHANNELDOWN }, /* CH- */
+ { 0x61d608, KEY_2 },
+ { 0x61d609, KEY_CHANNELUP }, /* CH+ */
+ { 0x61d60a, KEY_9 },
+ { 0x61d60b, KEY_ZOOM }, /* Zoom */
+ { 0x61d60c, KEY_7 },
+ { 0x61d60d, KEY_8 },
+ { 0x61d60e, KEY_VOLUMEUP }, /* Vol+ */
+ { 0x61d60f, KEY_4 },
+ { 0x61d610, KEY_ESC }, /* [back up arrow] */
+ { 0x61d611, KEY_0 },
+ { 0x61d612, KEY_OK }, /* [enter arrow] */
+ { 0x61d613, KEY_VOLUMEDOWN }, /* Vol- */
+ { 0x61d614, KEY_RECORD }, /* Rec */
+ { 0x61d615, KEY_STOP }, /* Stop */
+ { 0x61d616, KEY_PLAY }, /* Play */
+ { 0x61d617, KEY_MUTE }, /* Mute */
+ { 0x61d618, KEY_UP },
+ { 0x61d619, KEY_DOWN },
+ { 0x61d61a, KEY_LEFT },
+ { 0x61d61b, KEY_RIGHT },
+ { 0x61d61c, KEY_RED },
+ { 0x61d61d, KEY_GREEN },
+ { 0x61d61e, KEY_YELLOW },
+ { 0x61d61f, KEY_BLUE },
+ { 0x61d643, KEY_POWER2 }, /* [red power button] */
+};
+
+static struct rc_keymap msi_digivox_iii_map = {
+ .map = {
+ .scan = msi_digivox_iii,
+ .size = ARRAY_SIZE(msi_digivox_iii),
+ .ir_type = IR_TYPE_NEC,
+ .name = RC_MAP_MSI_DIGIVOX_III,
+ }
+};
+
+static int __init init_rc_map_msi_digivox_iii(void)
+{
+ return ir_register_map(&msi_digivox_iii_map);
+}
+
+static void __exit exit_rc_map_msi_digivox_iii(void)
+{
+ ir_unregister_map(&msi_digivox_iii_map);
+}
+
+module_init(init_rc_map_msi_digivox_iii)
+module_exit(exit_rc_map_msi_digivox_iii)
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Antti Palosaari <crope@iki.fi>");
diff --git a/drivers/media/IR/keymaps/rc-rc5-streamzap.c b/drivers/media/IR/keymaps/rc-rc5-streamzap.c
deleted file mode 100644
index 4c19c58b46d8..000000000000
--- a/drivers/media/IR/keymaps/rc-rc5-streamzap.c
+++ /dev/null
@@ -1,81 +0,0 @@
-/* rc-rc5-streamzap.c - Keytable for Streamzap PC Remote, for use
- * with the Streamzap PC Remote IR Receiver.
- *
- * Copyright (c) 2010 by Jarod Wilson <jarod@redhat.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- */
-
-#include <media/rc-map.h>
-
-static struct ir_scancode rc5_streamzap[] = {
-/*
- * FIXME: The Streamzap remote isn't actually true RC-5, it has an extra
- * bit in it, which presently throws the in-kernel RC-5 decoder for a loop.
- * We either have to enhance the decoder to support it, add a new decoder,
- * or just rely on lirc userspace decoding.
- */
- { 0x00, KEY_NUMERIC_0 },
- { 0x01, KEY_NUMERIC_1 },
- { 0x02, KEY_NUMERIC_2 },
- { 0x03, KEY_NUMERIC_3 },
- { 0x04, KEY_NUMERIC_4 },
- { 0x05, KEY_NUMERIC_5 },
- { 0x06, KEY_NUMERIC_6 },
- { 0x07, KEY_NUMERIC_7 },
- { 0x08, KEY_NUMERIC_8 },
- { 0x0a, KEY_POWER },
- { 0x0b, KEY_MUTE },
- { 0x0c, KEY_CHANNELUP },
- { 0x0d, KEY_VOLUMEUP },
- { 0x0e, KEY_CHANNELDOWN },
- { 0x0f, KEY_VOLUMEDOWN },
- { 0x10, KEY_UP },
- { 0x11, KEY_LEFT },
- { 0x12, KEY_OK },
- { 0x13, KEY_RIGHT },
- { 0x14, KEY_DOWN },
- { 0x15, KEY_MENU },
- { 0x16, KEY_EXIT },
- { 0x17, KEY_PLAY },
- { 0x18, KEY_PAUSE },
- { 0x19, KEY_STOP },
- { 0x1a, KEY_BACK },
- { 0x1b, KEY_FORWARD },
- { 0x1c, KEY_RECORD },
- { 0x1d, KEY_REWIND },
- { 0x1e, KEY_FASTFORWARD },
- { 0x20, KEY_RED },
- { 0x21, KEY_GREEN },
- { 0x22, KEY_YELLOW },
- { 0x23, KEY_BLUE },
-
-};
-
-static struct rc_keymap rc5_streamzap_map = {
- .map = {
- .scan = rc5_streamzap,
- .size = ARRAY_SIZE(rc5_streamzap),
- .ir_type = IR_TYPE_RC5,
- .name = RC_MAP_RC5_STREAMZAP,
- }
-};
-
-static int __init init_rc_map_rc5_streamzap(void)
-{
- return ir_register_map(&rc5_streamzap_map);
-}
-
-static void __exit exit_rc_map_rc5_streamzap(void)
-{
- ir_unregister_map(&rc5_streamzap_map);
-}
-
-module_init(init_rc_map_rc5_streamzap)
-module_exit(exit_rc_map_rc5_streamzap)
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Jarod Wilson <jarod@redhat.com>");
diff --git a/drivers/media/IR/keymaps/rc-rc6-mce.c b/drivers/media/IR/keymaps/rc-rc6-mce.c
index 39557ad401b6..1b7adabbcee9 100644
--- a/drivers/media/IR/keymaps/rc-rc6-mce.c
+++ b/drivers/media/IR/keymaps/rc-rc6-mce.c
@@ -12,76 +12,78 @@
#include <media/rc-map.h>
static struct ir_scancode rc6_mce[] = {
- { 0x800f0415, KEY_REWIND },
- { 0x800f0414, KEY_FASTFORWARD },
- { 0x800f041b, KEY_PREVIOUS },
- { 0x800f041a, KEY_NEXT },
+ { 0x800f0400, KEY_NUMERIC_0 },
+ { 0x800f0401, KEY_NUMERIC_1 },
+ { 0x800f0402, KEY_NUMERIC_2 },
+ { 0x800f0403, KEY_NUMERIC_3 },
+ { 0x800f0404, KEY_NUMERIC_4 },
+ { 0x800f0405, KEY_NUMERIC_5 },
+ { 0x800f0406, KEY_NUMERIC_6 },
+ { 0x800f0407, KEY_NUMERIC_7 },
+ { 0x800f0408, KEY_NUMERIC_8 },
+ { 0x800f0409, KEY_NUMERIC_9 },
+
+ { 0x800f040a, KEY_DELETE },
+ { 0x800f040b, KEY_ENTER },
+ { 0x800f040c, KEY_POWER },
+ { 0x800f040d, KEY_PROG1 }, /* Windows MCE button */
+ { 0x800f040e, KEY_MUTE },
+ { 0x800f040f, KEY_INFO },
+
+ { 0x800f0410, KEY_VOLUMEUP },
+ { 0x800f0411, KEY_VOLUMEDOWN },
+ { 0x800f0412, KEY_CHANNELUP },
+ { 0x800f0413, KEY_CHANNELDOWN },
+
+ { 0x800f0414, KEY_FASTFORWARD },
+ { 0x800f0415, KEY_REWIND },
{ 0x800f0416, KEY_PLAY },
+ { 0x800f0417, KEY_RECORD },
{ 0x800f0418, KEY_PAUSE },
{ 0x800f046e, KEY_PLAYPAUSE },
{ 0x800f0419, KEY_STOP },
- { 0x800f0417, KEY_RECORD },
+ { 0x800f041a, KEY_NEXT },
+ { 0x800f041b, KEY_PREVIOUS },
+ { 0x800f041c, KEY_NUMERIC_POUND },
+ { 0x800f041d, KEY_NUMERIC_STAR },
{ 0x800f041e, KEY_UP },
{ 0x800f041f, KEY_DOWN },
{ 0x800f0420, KEY_LEFT },
{ 0x800f0421, KEY_RIGHT },
- { 0x800f040b, KEY_ENTER },
{ 0x800f0422, KEY_OK },
{ 0x800f0423, KEY_EXIT },
- { 0x800f040a, KEY_DELETE },
+ { 0x800f0424, KEY_DVD },
+ { 0x800f0425, KEY_TUNER }, /* LiveTV */
+ { 0x800f0426, KEY_EPG }, /* Guide */
+ { 0x800f0427, KEY_ZOOM }, /* Aspect */
- { 0x800f040e, KEY_MUTE },
- { 0x800f0410, KEY_VOLUMEUP },
- { 0x800f0411, KEY_VOLUMEDOWN },
- { 0x800f0412, KEY_CHANNELUP },
- { 0x800f0413, KEY_CHANNELDOWN },
{ 0x800f043a, KEY_BRIGHTNESSUP },
- { 0x800f0480, KEY_BRIGHTNESSDOWN },
-
- { 0x800f0401, KEY_NUMERIC_1 },
- { 0x800f0402, KEY_NUMERIC_2 },
- { 0x800f0403, KEY_NUMERIC_3 },
- { 0x800f0404, KEY_NUMERIC_4 },
- { 0x800f0405, KEY_NUMERIC_5 },
- { 0x800f0406, KEY_NUMERIC_6 },
- { 0x800f0407, KEY_NUMERIC_7 },
- { 0x800f0408, KEY_NUMERIC_8 },
- { 0x800f0409, KEY_NUMERIC_9 },
- { 0x800f0400, KEY_NUMERIC_0 },
-
- { 0x800f041d, KEY_NUMERIC_STAR },
- { 0x800f041c, KEY_NUMERIC_POUND },
{ 0x800f0446, KEY_TV },
- { 0x800f0447, KEY_AUDIO }, /* My Music */
- { 0x800f0448, KEY_PVR }, /* RecordedTV */
+ { 0x800f0447, KEY_AUDIO }, /* My Music */
+ { 0x800f0448, KEY_PVR }, /* RecordedTV */
{ 0x800f0449, KEY_CAMERA },
{ 0x800f044a, KEY_VIDEO },
- { 0x800f0424, KEY_DVD },
- { 0x800f0425, KEY_TUNER }, /* LiveTV */
- { 0x800f0450, KEY_RADIO },
-
{ 0x800f044c, KEY_LANGUAGE },
- { 0x800f0427, KEY_ZOOM }, /* Aspect */
+ { 0x800f044d, KEY_TITLE },
+ { 0x800f044e, KEY_PRINT }, /* Print - HP OEM version of remote */
+ { 0x800f0450, KEY_RADIO },
+
+ { 0x800f045a, KEY_SUBTITLE }, /* Caption/Teletext */
{ 0x800f045b, KEY_RED },
{ 0x800f045c, KEY_GREEN },
{ 0x800f045d, KEY_YELLOW },
{ 0x800f045e, KEY_BLUE },
- { 0x800f040f, KEY_INFO },
- { 0x800f0426, KEY_EPG }, /* Guide */
- { 0x800f045a, KEY_SUBTITLE }, /* Caption/Teletext */
- { 0x800f044d, KEY_TITLE },
-
- { 0x800f044e, KEY_PRINT }, /* Print - HP OEM version of remote */
-
- { 0x800f040c, KEY_POWER },
- { 0x800f040d, KEY_PROG1 }, /* Windows MCE button */
+ { 0x800f046e, KEY_PLAYPAUSE },
+ { 0x800f046f, KEY_MEDIA }, /* Start media application (NEW) */
+ { 0x800f0480, KEY_BRIGHTNESSDOWN },
+ { 0x800f0481, KEY_PLAYPAUSE },
};
static struct rc_keymap rc6_mce_map = {
diff --git a/drivers/media/IR/keymaps/rc-streamzap.c b/drivers/media/IR/keymaps/rc-streamzap.c
new file mode 100644
index 000000000000..df32013a321c
--- /dev/null
+++ b/drivers/media/IR/keymaps/rc-streamzap.c
@@ -0,0 +1,82 @@
+/* rc-streamzap.c - Keytable for Streamzap PC Remote, for use
+ * with the Streamzap PC Remote IR Receiver.
+ *
+ * Copyright (c) 2010 by Jarod Wilson <jarod@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <media/rc-map.h>
+
+static struct ir_scancode streamzap[] = {
+/*
+ * The Streamzap remote is almost, but not quite, RC-5, as it has an extra
+ * bit in it, which throws the in-kernel RC-5 decoder for a loop. Currently,
+ * an additional RC-5-sz decoder is being deployed to support it, but it
+ * may be possible to merge it back with the standard RC-5 decoder.
+ */
+ { 0x28c0, KEY_NUMERIC_0 },
+ { 0x28c1, KEY_NUMERIC_1 },
+ { 0x28c2, KEY_NUMERIC_2 },
+ { 0x28c3, KEY_NUMERIC_3 },
+ { 0x28c4, KEY_NUMERIC_4 },
+ { 0x28c5, KEY_NUMERIC_5 },
+ { 0x28c6, KEY_NUMERIC_6 },
+ { 0x28c7, KEY_NUMERIC_7 },
+ { 0x28c8, KEY_NUMERIC_8 },
+ { 0x28c9, KEY_NUMERIC_9 },
+ { 0x28ca, KEY_POWER },
+ { 0x28cb, KEY_MUTE },
+ { 0x28cc, KEY_CHANNELUP },
+ { 0x28cd, KEY_VOLUMEUP },
+ { 0x28ce, KEY_CHANNELDOWN },
+ { 0x28cf, KEY_VOLUMEDOWN },
+ { 0x28d0, KEY_UP },
+ { 0x28d1, KEY_LEFT },
+ { 0x28d2, KEY_OK },
+ { 0x28d3, KEY_RIGHT },
+ { 0x28d4, KEY_DOWN },
+ { 0x28d5, KEY_MENU },
+ { 0x28d6, KEY_EXIT },
+ { 0x28d7, KEY_PLAY },
+ { 0x28d8, KEY_PAUSE },
+ { 0x28d9, KEY_STOP },
+ { 0x28da, KEY_BACK },
+ { 0x28db, KEY_FORWARD },
+ { 0x28dc, KEY_RECORD },
+ { 0x28dd, KEY_REWIND },
+ { 0x28de, KEY_FASTFORWARD },
+ { 0x28e0, KEY_RED },
+ { 0x28e1, KEY_GREEN },
+ { 0x28e2, KEY_YELLOW },
+ { 0x28e3, KEY_BLUE },
+
+};
+
+static struct rc_keymap streamzap_map = {
+ .map = {
+ .scan = streamzap,
+ .size = ARRAY_SIZE(streamzap),
+ .ir_type = IR_TYPE_RC5_SZ,
+ .name = RC_MAP_STREAMZAP,
+ }
+};
+
+static int __init init_rc_map_streamzap(void)
+{
+ return ir_register_map(&streamzap_map);
+}
+
+static void __exit exit_rc_map_streamzap(void)
+{
+ ir_unregister_map(&streamzap_map);
+}
+
+module_init(init_rc_map_streamzap)
+module_exit(exit_rc_map_streamzap)
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Jarod Wilson <jarod@redhat.com>");
diff --git a/drivers/media/IR/keymaps/rc-terratec-slim.c b/drivers/media/IR/keymaps/rc-terratec-slim.c
new file mode 100644
index 000000000000..10dee4c1deff
--- /dev/null
+++ b/drivers/media/IR/keymaps/rc-terratec-slim.c
@@ -0,0 +1,79 @@
+/*
+ * TerraTec remote controller keytable
+ *
+ * Copyright (C) 2010 Antti Palosaari <crope@iki.fi>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+#include <media/rc-map.h>
+
+/* TerraTec slim remote, 7 rows, 4 columns. */
+/* Uses NEC extended 0x02bd. */
+static struct ir_scancode terratec_slim[] = {
+ { 0x02bd00, KEY_1 },
+ { 0x02bd01, KEY_2 },
+ { 0x02bd02, KEY_3 },
+ { 0x02bd03, KEY_4 },
+ { 0x02bd04, KEY_5 },
+ { 0x02bd05, KEY_6 },
+ { 0x02bd06, KEY_7 },
+ { 0x02bd07, KEY_8 },
+ { 0x02bd08, KEY_9 },
+ { 0x02bd09, KEY_0 },
+ { 0x02bd0a, KEY_MUTE },
+ { 0x02bd0b, KEY_NEW }, /* symbol: PIP */
+ { 0x02bd0e, KEY_VOLUMEDOWN },
+ { 0x02bd0f, KEY_PLAYPAUSE },
+ { 0x02bd10, KEY_RIGHT },
+ { 0x02bd11, KEY_LEFT },
+ { 0x02bd12, KEY_UP },
+ { 0x02bd13, KEY_DOWN },
+ { 0x02bd15, KEY_OK },
+ { 0x02bd16, KEY_STOP },
+ { 0x02bd17, KEY_CAMERA }, /* snapshot */
+ { 0x02bd18, KEY_CHANNELUP },
+ { 0x02bd19, KEY_RECORD },
+ { 0x02bd1a, KEY_CHANNELDOWN },
+ { 0x02bd1c, KEY_ESC },
+ { 0x02bd1f, KEY_VOLUMEUP },
+ { 0x02bd44, KEY_EPG },
+ { 0x02bd45, KEY_POWER2 }, /* [red power button] */
+};
+
+static struct rc_keymap terratec_slim_map = {
+ .map = {
+ .scan = terratec_slim,
+ .size = ARRAY_SIZE(terratec_slim),
+ .ir_type = IR_TYPE_NEC,
+ .name = RC_MAP_TERRATEC_SLIM,
+ }
+};
+
+static int __init init_rc_map_terratec_slim(void)
+{
+ return ir_register_map(&terratec_slim_map);
+}
+
+static void __exit exit_rc_map_terratec_slim(void)
+{
+ ir_unregister_map(&terratec_slim_map);
+}
+
+module_init(init_rc_map_terratec_slim)
+module_exit(exit_rc_map_terratec_slim)
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Antti Palosaari <crope@iki.fi>");
diff --git a/drivers/media/IR/keymaps/rc-total-media-in-hand.c b/drivers/media/IR/keymaps/rc-total-media-in-hand.c
new file mode 100644
index 000000000000..fd1985763781
--- /dev/null
+++ b/drivers/media/IR/keymaps/rc-total-media-in-hand.c
@@ -0,0 +1,85 @@
+/*
+ * Total Media In Hand remote controller keytable
+ *
+ * Copyright (C) 2010 Antti Palosaari <crope@iki.fi>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+#include <media/rc-map.h>
+
+/* Uses NEC extended 0x02bd */
+static struct ir_scancode total_media_in_hand[] = {
+ { 0x02bd00, KEY_1 },
+ { 0x02bd01, KEY_2 },
+ { 0x02bd02, KEY_3 },
+ { 0x02bd03, KEY_4 },
+ { 0x02bd04, KEY_5 },
+ { 0x02bd05, KEY_6 },
+ { 0x02bd06, KEY_7 },
+ { 0x02bd07, KEY_8 },
+ { 0x02bd08, KEY_9 },
+ { 0x02bd09, KEY_0 },
+ { 0x02bd0a, KEY_MUTE },
+ { 0x02bd0b, KEY_CYCLEWINDOWS }, /* yellow, [min / max] */
+ { 0x02bd0c, KEY_VIDEO }, /* TV / AV */
+ { 0x02bd0e, KEY_VOLUMEDOWN },
+ { 0x02bd0f, KEY_TIME }, /* TimeShift */
+ { 0x02bd10, KEY_RIGHT }, /* right arrow */
+ { 0x02bd11, KEY_LEFT }, /* left arrow */
+ { 0x02bd12, KEY_UP }, /* up arrow */
+ { 0x02bd13, KEY_DOWN }, /* down arrow */
+ { 0x02bd14, KEY_POWER2 }, /* [red] */
+ { 0x02bd15, KEY_OK }, /* OK */
+ { 0x02bd16, KEY_STOP },
+ { 0x02bd17, KEY_CAMERA }, /* Snapshot */
+ { 0x02bd18, KEY_CHANNELUP },
+ { 0x02bd19, KEY_RECORD },
+ { 0x02bd1a, KEY_CHANNELDOWN },
+ { 0x02bd1c, KEY_ESC }, /* Esc */
+ { 0x02bd1e, KEY_PLAY },
+ { 0x02bd1f, KEY_VOLUMEUP },
+ { 0x02bd40, KEY_PAUSE },
+ { 0x02bd41, KEY_FASTFORWARD }, /* FF >> */
+ { 0x02bd42, KEY_REWIND }, /* FR << */
+ { 0x02bd43, KEY_ZOOM }, /* [window + mouse pointer] */
+ { 0x02bd44, KEY_SHUFFLE }, /* Shuffle */
+ { 0x02bd45, KEY_INFO }, /* [red (I)] */
+};
+
+static struct rc_keymap total_media_in_hand_map = {
+ .map = {
+ .scan = total_media_in_hand,
+ .size = ARRAY_SIZE(total_media_in_hand),
+ .ir_type = IR_TYPE_NEC,
+ .name = RC_MAP_TOTAL_MEDIA_IN_HAND,
+ }
+};
+
+static int __init init_rc_map_total_media_in_hand(void)
+{
+ return ir_register_map(&total_media_in_hand_map);
+}
+
+static void __exit exit_rc_map_total_media_in_hand(void)
+{
+ ir_unregister_map(&total_media_in_hand_map);
+}
+
+module_init(init_rc_map_total_media_in_hand)
+module_exit(exit_rc_map_total_media_in_hand)
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Antti Palosaari <crope@iki.fi>");
diff --git a/drivers/media/IR/keymaps/rc-trekstor.c b/drivers/media/IR/keymaps/rc-trekstor.c
new file mode 100644
index 000000000000..91092caca452
--- /dev/null
+++ b/drivers/media/IR/keymaps/rc-trekstor.c
@@ -0,0 +1,80 @@
+/*
+ * TrekStor remote controller keytable
+ *
+ * Copyright (C) 2010 Antti Palosaari <crope@iki.fi>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+#include <media/rc-map.h>
+
+/* TrekStor DVB-T USB Stick remote controller. */
+/* Imported from af9015.h.
+ Initial keytable was from Marc Schneider <macke@macke.org> */
+static struct ir_scancode trekstor[] = {
+ { 0x0084, KEY_0 },
+ { 0x0085, KEY_MUTE }, /* Mute */
+ { 0x0086, KEY_HOMEPAGE }, /* Home */
+ { 0x0087, KEY_UP }, /* Up */
+ { 0x0088, KEY_OK }, /* OK */
+ { 0x0089, KEY_RIGHT }, /* Right */
+ { 0x008a, KEY_FASTFORWARD }, /* Fast forward */
+ { 0x008b, KEY_VOLUMEUP }, /* Volume + */
+ { 0x008c, KEY_DOWN }, /* Down */
+ { 0x008d, KEY_PLAY }, /* Play/Pause */
+ { 0x008e, KEY_STOP }, /* Stop */
+ { 0x008f, KEY_EPG }, /* Info/EPG */
+ { 0x0090, KEY_7 },
+ { 0x0091, KEY_4 },
+ { 0x0092, KEY_1 },
+ { 0x0093, KEY_CHANNELDOWN }, /* Channel - */
+ { 0x0094, KEY_8 },
+ { 0x0095, KEY_5 },
+ { 0x0096, KEY_2 },
+ { 0x0097, KEY_CHANNELUP }, /* Channel + */
+ { 0x0098, KEY_9 },
+ { 0x0099, KEY_6 },
+ { 0x009a, KEY_3 },
+ { 0x009b, KEY_VOLUMEDOWN }, /* Volume - */
+ { 0x009c, KEY_TV }, /* TV */
+ { 0x009d, KEY_RECORD }, /* Record */
+ { 0x009e, KEY_REWIND }, /* Rewind */
+ { 0x009f, KEY_LEFT }, /* Left */
+};
+
+static struct rc_keymap trekstor_map = {
+ .map = {
+ .scan = trekstor,
+ .size = ARRAY_SIZE(trekstor),
+ .ir_type = IR_TYPE_NEC,
+ .name = RC_MAP_TREKSTOR,
+ }
+};
+
+static int __init init_rc_map_trekstor(void)
+{
+ return ir_register_map(&trekstor_map);
+}
+
+static void __exit exit_rc_map_trekstor(void)
+{
+ ir_unregister_map(&trekstor_map);
+}
+
+module_init(init_rc_map_trekstor)
+module_exit(exit_rc_map_trekstor)
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Antti Palosaari <crope@iki.fi>");
diff --git a/drivers/media/IR/keymaps/rc-twinhan1027.c b/drivers/media/IR/keymaps/rc-twinhan1027.c
new file mode 100644
index 000000000000..0b5d356c2d84
--- /dev/null
+++ b/drivers/media/IR/keymaps/rc-twinhan1027.c
@@ -0,0 +1,87 @@
+#include <media/rc-map.h>
+
+static struct ir_scancode twinhan_vp1027[] = {
+ { 0x16, KEY_POWER2 },
+ { 0x17, KEY_FAVORITES },
+ { 0x0f, KEY_TEXT },
+ { 0x48, KEY_INFO},
+ { 0x1c, KEY_EPG },
+ { 0x04, KEY_LIST },
+
+ { 0x03, KEY_1 },
+ { 0x01, KEY_2 },
+ { 0x06, KEY_3 },
+ { 0x09, KEY_4 },
+ { 0x1d, KEY_5 },
+ { 0x1f, KEY_6 },
+ { 0x0d, KEY_7 },
+ { 0x19, KEY_8 },
+ { 0x1b, KEY_9 },
+ { 0x15, KEY_0 },
+
+ { 0x0c, KEY_CANCEL },
+ { 0x4a, KEY_CLEAR },
+ { 0x13, KEY_BACKSPACE },
+ { 0x00, KEY_TAB },
+
+ { 0x4b, KEY_UP },
+ { 0x51, KEY_DOWN },
+ { 0x4e, KEY_LEFT },
+ { 0x52, KEY_RIGHT },
+ { 0x4f, KEY_ENTER },
+
+ { 0x1e, KEY_VOLUMEUP },
+ { 0x0a, KEY_VOLUMEDOWN },
+ { 0x02, KEY_CHANNELDOWN },
+ { 0x05, KEY_CHANNELUP },
+ { 0x11, KEY_RECORD },
+
+ { 0x14, KEY_PLAY },
+ { 0x4c, KEY_PAUSE },
+ { 0x1a, KEY_STOP },
+ { 0x40, KEY_REWIND },
+ { 0x12, KEY_FASTFORWARD },
+ { 0x41, KEY_PREVIOUSSONG },
+ { 0x42, KEY_NEXTSONG },
+ { 0x54, KEY_SAVE },
+ { 0x50, KEY_LANGUAGE },
+ { 0x47, KEY_MEDIA },
+ { 0x4d, KEY_SCREEN },
+ { 0x43, KEY_SUBTITLE },
+ { 0x10, KEY_MUTE },
+ { 0x49, KEY_AUDIO },
+ { 0x07, KEY_SLEEP },
+ { 0x08, KEY_VIDEO },
+ { 0x0e, KEY_AGAIN },
+ { 0x45, KEY_EQUAL },
+ { 0x46, KEY_MINUS },
+ { 0x18, KEY_RED },
+ { 0x53, KEY_GREEN },
+ { 0x5e, KEY_YELLOW },
+ { 0x5f, KEY_BLUE },
+};
+
+static struct rc_keymap twinhan_vp1027_map = {
+ .map = {
+ .scan = twinhan_vp1027,
+ .size = ARRAY_SIZE(twinhan_vp1027),
+ .ir_type = IR_TYPE_UNKNOWN, /* Legacy IR type */
+ .name = RC_MAP_TWINHAN_VP1027_DVBS,
+ }
+};
+
+static int __init init_rc_map_twinhan_vp1027(void)
+{
+ return ir_register_map(&twinhan_vp1027_map);
+}
+
+static void __exit exit_rc_map_twinhan_vp1027(void)
+{
+ ir_unregister_map(&twinhan_vp1027_map);
+}
+
+module_init(init_rc_map_twinhan_vp1027)
+module_exit(exit_rc_map_twinhan_vp1027)
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Sergey Ivanov <123kash@gmail.com>");
diff --git a/drivers/media/IR/lirc_dev.c b/drivers/media/IR/lirc_dev.c
index 0acf6396e068..8418b14ee4d2 100644
--- a/drivers/media/IR/lirc_dev.c
+++ b/drivers/media/IR/lirc_dev.c
@@ -27,7 +27,6 @@
#include <linux/fs.h>
#include <linux/poll.h>
#include <linux/completion.h>
-#include <linux/errno.h>
#include <linux/mutex.h>
#include <linux/wait.h>
#include <linux/unistd.h>
@@ -58,13 +57,12 @@ struct irctl {
struct task_struct *task;
long jiffies_to_wait;
-
- struct cdev cdev;
};
static DEFINE_MUTEX(lirc_dev_lock);
static struct irctl *irctls[MAX_IRCTL_DEVICES];
+static struct cdev cdevs[MAX_IRCTL_DEVICES];
/* Only used for sysfs but defined to void otherwise */
static struct class *lirc_class;
@@ -72,15 +70,13 @@ static struct class *lirc_class;
/* helper function
* initializes the irctl structure
*/
-static void init_irctl(struct irctl *ir)
+static void lirc_irctl_init(struct irctl *ir)
{
- dev_dbg(ir->d.dev, LOGHEAD "initializing irctl\n",
- ir->d.name, ir->d.minor);
mutex_init(&ir->irctl_lock);
ir->d.minor = NOPLUG;
}
-static void cleanup(struct irctl *ir)
+static void lirc_irctl_cleanup(struct irctl *ir)
{
dev_dbg(ir->d.dev, LOGHEAD "cleaning up\n", ir->d.name, ir->d.minor);
@@ -97,7 +93,7 @@ static void cleanup(struct irctl *ir)
* reads key codes from driver and puts them into buffer
* returns 0 on success
*/
-static int add_to_buf(struct irctl *ir)
+static int lirc_add_to_buf(struct irctl *ir)
{
if (ir->d.add_to_buf) {
int res = -ENODATA;
@@ -140,7 +136,7 @@ static int lirc_thread(void *irctl)
}
if (kthread_should_stop())
break;
- if (!add_to_buf(ir))
+ if (!lirc_add_to_buf(ir))
wake_up_interruptible(&ir->buf->wait_poll);
} else {
set_current_state(TASK_INTERRUPTIBLE);
@@ -155,12 +151,15 @@ static int lirc_thread(void *irctl)
}
-static struct file_operations fops = {
+static struct file_operations lirc_dev_fops = {
.owner = THIS_MODULE,
.read = lirc_dev_fop_read,
.write = lirc_dev_fop_write,
.poll = lirc_dev_fop_poll,
.unlocked_ioctl = lirc_dev_fop_ioctl,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = lirc_dev_fop_ioctl,
+#endif
.open = lirc_dev_fop_open,
.release = lirc_dev_fop_close,
.llseek = noop_llseek,
@@ -170,19 +169,20 @@ static int lirc_cdev_add(struct irctl *ir)
{
int retval;
struct lirc_driver *d = &ir->d;
+ struct cdev *cdev = &cdevs[d->minor];
if (d->fops) {
- cdev_init(&ir->cdev, d->fops);
- ir->cdev.owner = d->owner;
+ cdev_init(cdev, d->fops);
+ cdev->owner = d->owner;
} else {
- cdev_init(&ir->cdev, &fops);
- ir->cdev.owner = THIS_MODULE;
+ cdev_init(cdev, &lirc_dev_fops);
+ cdev->owner = THIS_MODULE;
}
- kobject_set_name(&ir->cdev.kobj, "lirc%d", d->minor);
+ kobject_set_name(&cdev->kobj, "lirc%d", d->minor);
- retval = cdev_add(&ir->cdev, MKDEV(MAJOR(lirc_base_dev), d->minor), 1);
+ retval = cdev_add(cdev, MKDEV(MAJOR(lirc_base_dev), d->minor), 1);
if (retval)
- kobject_put(&ir->cdev.kobj);
+ kobject_put(&cdev->kobj);
return retval;
}
@@ -203,6 +203,12 @@ int lirc_register_driver(struct lirc_driver *d)
goto out;
}
+ if (!d->dev) {
+ printk(KERN_ERR "%s: dev pointer not filled in!\n", __func__);
+ err = -EINVAL;
+ goto out;
+ }
+
if (MAX_IRCTL_DEVICES <= d->minor) {
dev_err(d->dev, "lirc_dev: lirc_register_driver: "
"\"minor\" must be between 0 and %d (%d)!\n",
@@ -278,7 +284,7 @@ int lirc_register_driver(struct lirc_driver *d)
err = -ENOMEM;
goto out_lock;
}
- init_irctl(ir);
+ lirc_irctl_init(ir);
irctls[minor] = ir;
d->minor = minor;
@@ -317,7 +323,6 @@ int lirc_register_driver(struct lirc_driver *d)
d->features = LIRC_CAN_REC_LIRCCODE;
ir->d = *d;
- ir->d.minor = minor;
device_create(lirc_class, ir->d.dev,
MKDEV(MAJOR(lirc_base_dev), ir->d.minor), NULL,
@@ -358,21 +363,28 @@ EXPORT_SYMBOL(lirc_register_driver);
int lirc_unregister_driver(int minor)
{
struct irctl *ir;
+ struct cdev *cdev;
if (minor < 0 || minor >= MAX_IRCTL_DEVICES) {
- printk(KERN_ERR "lirc_dev: lirc_unregister_driver: "
- "\"minor (%d)\" must be between 0 and %d!\n",
- minor, MAX_IRCTL_DEVICES-1);
+ printk(KERN_ERR "lirc_dev: %s: minor (%d) must be between "
+ "0 and %d!\n", __func__, minor, MAX_IRCTL_DEVICES-1);
return -EBADRQC;
}
ir = irctls[minor];
+ if (!ir) {
+ printk(KERN_ERR "lirc_dev: %s: failed to get irctl struct "
+ "for minor %d!\n", __func__, minor);
+ return -ENOENT;
+ }
+
+ cdev = &cdevs[minor];
mutex_lock(&lirc_dev_lock);
if (ir->d.minor != minor) {
- printk(KERN_ERR "lirc_dev: lirc_unregister_driver: "
- "minor (%d) device not registered!", minor);
+ printk(KERN_ERR "lirc_dev: %s: minor (%d) device not "
+ "registered!\n", __func__, minor);
mutex_unlock(&lirc_dev_lock);
return -ENOENT;
}
@@ -391,12 +403,11 @@ int lirc_unregister_driver(int minor)
wake_up_interruptible(&ir->buf->wait_poll);
mutex_lock(&ir->irctl_lock);
ir->d.set_use_dec(ir->d.data);
- module_put(ir->d.owner);
+ module_put(cdev->owner);
mutex_unlock(&ir->irctl_lock);
- cdev_del(&ir->cdev);
} else {
- cleanup(ir);
- cdev_del(&ir->cdev);
+ lirc_irctl_cleanup(ir);
+ cdev_del(cdev);
kfree(ir);
irctls[minor] = NULL;
}
@@ -410,6 +421,7 @@ EXPORT_SYMBOL(lirc_unregister_driver);
int lirc_dev_fop_open(struct inode *inode, struct file *file)
{
struct irctl *ir;
+ struct cdev *cdev;
int retval = 0;
if (iminor(inode) >= MAX_IRCTL_DEVICES) {
@@ -426,7 +438,6 @@ int lirc_dev_fop_open(struct inode *inode, struct file *file)
retval = -ENODEV;
goto error;
}
- file->private_data = ir;
dev_dbg(ir->d.dev, LOGHEAD "open called\n", ir->d.name, ir->d.minor);
@@ -440,13 +451,14 @@ int lirc_dev_fop_open(struct inode *inode, struct file *file)
goto error;
}
- if (try_module_get(ir->d.owner)) {
- ++ir->open;
+ cdev = &cdevs[iminor(inode)];
+ if (try_module_get(cdev->owner)) {
+ ir->open++;
retval = ir->d.set_use_inc(ir->d.data);
if (retval) {
- module_put(ir->d.owner);
- --ir->open;
+ module_put(cdev->owner);
+ ir->open--;
} else {
lirc_buffer_clear(ir->buf);
}
@@ -470,17 +482,24 @@ EXPORT_SYMBOL(lirc_dev_fop_open);
int lirc_dev_fop_close(struct inode *inode, struct file *file)
{
struct irctl *ir = irctls[iminor(inode)];
+ struct cdev *cdev = &cdevs[iminor(inode)];
+
+ if (!ir) {
+ printk(KERN_ERR "%s: called with invalid irctl\n", __func__);
+ return -EINVAL;
+ }
dev_dbg(ir->d.dev, LOGHEAD "close called\n", ir->d.name, ir->d.minor);
WARN_ON(mutex_lock_killable(&lirc_dev_lock));
- --ir->open;
+ ir->open--;
if (ir->attached) {
ir->d.set_use_dec(ir->d.data);
- module_put(ir->d.owner);
+ module_put(cdev->owner);
} else {
- cleanup(ir);
+ lirc_irctl_cleanup(ir);
+ cdev_del(cdev);
irctls[ir->d.minor] = NULL;
kfree(ir);
}
@@ -496,6 +515,11 @@ unsigned int lirc_dev_fop_poll(struct file *file, poll_table *wait)
struct irctl *ir = irctls[iminor(file->f_dentry->d_inode)];
unsigned int ret;
+ if (!ir) {
+ printk(KERN_ERR "%s: called with invalid irctl\n", __func__);
+ return POLLERR;
+ }
+
dev_dbg(ir->d.dev, LOGHEAD "poll called\n", ir->d.name, ir->d.minor);
if (!ir->attached) {
@@ -522,9 +546,14 @@ EXPORT_SYMBOL(lirc_dev_fop_poll);
long lirc_dev_fop_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{
- unsigned long mode;
+ __u32 mode;
int result = 0;
- struct irctl *ir = file->private_data;
+ struct irctl *ir = irctls[iminor(file->f_dentry->d_inode)];
+
+ if (!ir) {
+ printk(KERN_ERR "lirc_dev: %s: no irctl found!\n", __func__);
+ return -ENODEV;
+ }
dev_dbg(ir->d.dev, LOGHEAD "ioctl called (0x%x)\n",
ir->d.name, ir->d.minor, cmd);
@@ -539,7 +568,7 @@ long lirc_dev_fop_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
switch (cmd) {
case LIRC_GET_FEATURES:
- result = put_user(ir->d.features, (unsigned long *)arg);
+ result = put_user(ir->d.features, (__u32 *)arg);
break;
case LIRC_GET_REC_MODE:
if (!(ir->d.features & LIRC_CAN_REC_MASK)) {
@@ -549,7 +578,7 @@ long lirc_dev_fop_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
result = put_user(LIRC_REC2MODE
(ir->d.features & LIRC_CAN_REC_MASK),
- (unsigned long *)arg);
+ (__u32 *)arg);
break;
case LIRC_SET_REC_MODE:
if (!(ir->d.features & LIRC_CAN_REC_MASK)) {
@@ -557,7 +586,7 @@ long lirc_dev_fop_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
break;
}
- result = get_user(mode, (unsigned long *)arg);
+ result = get_user(mode, (__u32 *)arg);
if (!result && !(LIRC_MODE2REC(mode) & ir->d.features))
result = -EINVAL;
/*
@@ -566,7 +595,7 @@ long lirc_dev_fop_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
*/
break;
case LIRC_GET_LENGTH:
- result = put_user(ir->d.code_length, (unsigned long *)arg);
+ result = put_user(ir->d.code_length, (__u32 *)arg);
break;
case LIRC_GET_MIN_TIMEOUT:
if (!(ir->d.features & LIRC_CAN_SET_REC_TIMEOUT) ||
@@ -575,7 +604,7 @@ long lirc_dev_fop_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
break;
}
- result = put_user(ir->d.min_timeout, (unsigned long *)arg);
+ result = put_user(ir->d.min_timeout, (__u32 *)arg);
break;
case LIRC_GET_MAX_TIMEOUT:
if (!(ir->d.features & LIRC_CAN_SET_REC_TIMEOUT) ||
@@ -584,7 +613,7 @@ long lirc_dev_fop_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
break;
}
- result = put_user(ir->d.max_timeout, (unsigned long *)arg);
+ result = put_user(ir->d.max_timeout, (__u32 *)arg);
break;
default:
result = -EINVAL;
@@ -605,12 +634,21 @@ ssize_t lirc_dev_fop_read(struct file *file,
loff_t *ppos)
{
struct irctl *ir = irctls[iminor(file->f_dentry->d_inode)];
- unsigned char buf[ir->chunk_size];
+ unsigned char *buf;
int ret = 0, written = 0;
DECLARE_WAITQUEUE(wait, current);
+ if (!ir) {
+ printk(KERN_ERR "%s: called with invalid irctl\n", __func__);
+ return -ENODEV;
+ }
+
dev_dbg(ir->d.dev, LOGHEAD "read called\n", ir->d.name, ir->d.minor);
+ buf = kzalloc(ir->chunk_size, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
if (mutex_lock_interruptible(&ir->irctl_lock))
return -ERESTARTSYS;
if (!ir->attached) {
@@ -682,6 +720,7 @@ ssize_t lirc_dev_fop_read(struct file *file,
mutex_unlock(&ir->irctl_lock);
out_unlocked:
+ kfree(buf);
dev_dbg(ir->d.dev, LOGHEAD "read result = %s (%d)\n",
ir->d.name, ir->d.minor, ret ? "-EFAULT" : "OK", ret);
@@ -710,6 +749,11 @@ ssize_t lirc_dev_fop_write(struct file *file, const char *buffer,
{
struct irctl *ir = irctls[iminor(file->f_dentry->d_inode)];
+ if (!ir) {
+ printk(KERN_ERR "%s: called with invalid irctl\n", __func__);
+ return -ENODEV;
+ }
+
dev_dbg(ir->d.dev, LOGHEAD "write called\n", ir->d.name, ir->d.minor);
if (!ir->attached)
diff --git a/drivers/media/IR/mceusb.c b/drivers/media/IR/mceusb.c
index bc620e10ef77..9dce684fd231 100644
--- a/drivers/media/IR/mceusb.c
+++ b/drivers/media/IR/mceusb.c
@@ -46,24 +46,58 @@
"device driver"
#define DRIVER_NAME "mceusb"
-#define USB_BUFLEN 32 /* USB reception buffer length */
-#define USB_CTRL_MSG_SZ 2 /* Size of usb ctrl msg on gen1 hw */
-#define MCE_G1_INIT_MSGS 40 /* Init messages on gen1 hw to throw out */
+#define USB_BUFLEN 32 /* USB reception buffer length */
+#define USB_CTRL_MSG_SZ 2 /* Size of usb ctrl msg on gen1 hw */
+#define MCE_G1_INIT_MSGS 40 /* Init messages on gen1 hw to throw out */
/* MCE constants */
-#define MCE_CMDBUF_SIZE 384 /* MCE Command buffer length */
-#define MCE_TIME_UNIT 50 /* Approx 50us resolution */
-#define MCE_CODE_LENGTH 5 /* Normal length of packet (with header) */
-#define MCE_PACKET_SIZE 4 /* Normal length of packet (without header) */
-#define MCE_PACKET_HEADER 0x84 /* Actual header format is 0x80 + num_bytes */
-#define MCE_CONTROL_HEADER 0x9F /* MCE status header */
-#define MCE_TX_HEADER_LENGTH 3 /* # of bytes in the initializing tx header */
-#define MCE_MAX_CHANNELS 2 /* Two transmitters, hardware dependent? */
-#define MCE_DEFAULT_TX_MASK 0x03 /* Val opts: TX1=0x01, TX2=0x02, ALL=0x03 */
-#define MCE_PULSE_BIT 0x80 /* Pulse bit, MSB set == PULSE else SPACE */
-#define MCE_PULSE_MASK 0x7F /* Pulse mask */
-#define MCE_MAX_PULSE_LENGTH 0x7F /* Longest transmittable pulse symbol */
-#define MCE_PACKET_LENGTH_MASK 0x1F /* Packet length mask */
+#define MCE_CMDBUF_SIZE 384 /* MCE Command buffer length */
+#define MCE_TIME_UNIT 50 /* Approx 50us resolution */
+#define MCE_CODE_LENGTH 5 /* Normal length of packet (with header) */
+#define MCE_PACKET_SIZE 4 /* Normal length of packet (without header) */
+#define MCE_IRDATA_HEADER 0x84 /* Actual header format is 0x80 + num_bytes */
+#define MCE_IRDATA_TRAILER 0x80 /* End of IR data */
+#define MCE_TX_HEADER_LENGTH 3 /* # of bytes in the initializing tx header */
+#define MCE_MAX_CHANNELS 2 /* Two transmitters, hardware dependent? */
+#define MCE_DEFAULT_TX_MASK 0x03 /* Vals: TX1=0x01, TX2=0x02, ALL=0x03 */
+#define MCE_PULSE_BIT 0x80 /* Pulse bit, MSB set == PULSE else SPACE */
+#define MCE_PULSE_MASK 0x7f /* Pulse mask */
+#define MCE_MAX_PULSE_LENGTH 0x7f /* Longest transmittable pulse symbol */
+
+#define MCE_HW_CMD_HEADER 0xff /* MCE hardware command header */
+#define MCE_COMMAND_HEADER 0x9f /* MCE command header */
+#define MCE_COMMAND_MASK 0xe0 /* Mask out command bits */
+#define MCE_COMMAND_NULL 0x00 /* These show up various places... */
+/* if buf[i] & MCE_COMMAND_MASK == 0x80 and buf[i] != MCE_COMMAND_HEADER,
+ * then we're looking at a raw IR data sample */
+#define MCE_COMMAND_IRDATA 0x80
+#define MCE_PACKET_LENGTH_MASK 0x1f /* Packet length mask */
+
+/* Sub-commands, which follow MCE_COMMAND_HEADER or MCE_HW_CMD_HEADER */
+#define MCE_CMD_PING 0x03 /* Ping device */
+#define MCE_CMD_UNKNOWN 0x04 /* Unknown */
+#define MCE_CMD_UNKNOWN2 0x05 /* Unknown */
+#define MCE_CMD_S_CARRIER 0x06 /* Set TX carrier frequency */
+#define MCE_CMD_G_CARRIER 0x07 /* Get TX carrier frequency */
+#define MCE_CMD_S_TXMASK 0x08 /* Set TX port bitmask */
+#define MCE_CMD_UNKNOWN3 0x09 /* Unknown */
+#define MCE_CMD_UNKNOWN4 0x0a /* Unknown */
+#define MCE_CMD_G_REVISION 0x0b /* Get hw/sw revision */
+#define MCE_CMD_S_TIMEOUT 0x0c /* Set RX timeout value */
+#define MCE_CMD_G_TIMEOUT 0x0d /* Get RX timeout value */
+#define MCE_CMD_UNKNOWN5 0x0e /* Unknown */
+#define MCE_CMD_UNKNOWN6 0x0f /* Unknown */
+#define MCE_CMD_G_RXPORTSTS 0x11 /* Get RX port status */
+#define MCE_CMD_G_TXMASK 0x13 /* Set TX port bitmask */
+#define MCE_CMD_S_RXSENSOR 0x14 /* Set RX sensor (std/learning) */
+#define MCE_CMD_G_RXSENSOR 0x15 /* Get RX sensor (std/learning) */
+#define MCE_CMD_TX_PORTS 0x16 /* Get number of TX ports */
+#define MCE_CMD_G_WAKESRC 0x17 /* Get wake source */
+#define MCE_CMD_UNKNOWN7 0x18 /* Unknown */
+#define MCE_CMD_UNKNOWN8 0x19 /* Unknown */
+#define MCE_CMD_UNKNOWN9 0x1b /* Unknown */
+#define MCE_CMD_DEVICE_RESET 0xaa /* Reset the hardware */
+#define MCE_RSP_CMD_INVALID 0xfe /* Invalid command issued */
/* module parameters */
@@ -104,14 +138,64 @@ static int debug;
#define VENDOR_NORTHSTAR 0x04eb
#define VENDOR_REALTEK 0x0bda
#define VENDOR_TIVO 0x105a
+#define VENDOR_CONEXANT 0x0572
+
+enum mceusb_model_type {
+ MCE_GEN2 = 0, /* Most boards */
+ MCE_GEN1,
+ MCE_GEN3,
+ MCE_GEN2_TX_INV,
+ POLARIS_EVK,
+};
+
+struct mceusb_model {
+ u32 mce_gen1:1;
+ u32 mce_gen2:1;
+ u32 mce_gen3:1;
+ u32 tx_mask_inverted:1;
+ u32 is_polaris:1;
+
+ const char *rc_map; /* Allow specify a per-board map */
+ const char *name; /* per-board name */
+};
+
+static const struct mceusb_model mceusb_model[] = {
+ [MCE_GEN1] = {
+ .mce_gen1 = 1,
+ .tx_mask_inverted = 1,
+ },
+ [MCE_GEN2] = {
+ .mce_gen2 = 1,
+ },
+ [MCE_GEN2_TX_INV] = {
+ .mce_gen2 = 1,
+ .tx_mask_inverted = 1,
+ },
+ [MCE_GEN3] = {
+ .mce_gen3 = 1,
+ .tx_mask_inverted = 1,
+ },
+ [POLARIS_EVK] = {
+ .is_polaris = 1,
+ /*
+ * In fact, the EVK is shipped without
+ * remotes, but we should have something handy,
+ * to allow testing it
+ */
+ .rc_map = RC_MAP_RC5_HAUPPAUGE_NEW,
+ .name = "cx231xx MCE IR",
+ },
+};
static struct usb_device_id mceusb_dev_table[] = {
/* Original Microsoft MCE IR Transceiver (often HP-branded) */
- { USB_DEVICE(VENDOR_MICROSOFT, 0x006d) },
+ { USB_DEVICE(VENDOR_MICROSOFT, 0x006d),
+ .driver_info = MCE_GEN1 },
/* Philips Infrared Transceiver - Sahara branded */
{ USB_DEVICE(VENDOR_PHILIPS, 0x0608) },
/* Philips Infrared Transceiver - HP branded */
- { USB_DEVICE(VENDOR_PHILIPS, 0x060c) },
+ { USB_DEVICE(VENDOR_PHILIPS, 0x060c),
+ .driver_info = MCE_GEN2_TX_INV },
/* Philips SRM5100 */
{ USB_DEVICE(VENDOR_PHILIPS, 0x060d) },
/* Philips Infrared Transceiver - Omaura */
@@ -127,11 +211,14 @@ static struct usb_device_id mceusb_dev_table[] = {
/* Realtek MCE IR Receiver */
{ USB_DEVICE(VENDOR_REALTEK, 0x0161) },
/* SMK/Toshiba G83C0004D410 */
- { USB_DEVICE(VENDOR_SMK, 0x031d) },
+ { USB_DEVICE(VENDOR_SMK, 0x031d),
+ .driver_info = MCE_GEN2_TX_INV },
/* SMK eHome Infrared Transceiver (Sony VAIO) */
- { USB_DEVICE(VENDOR_SMK, 0x0322) },
+ { USB_DEVICE(VENDOR_SMK, 0x0322),
+ .driver_info = MCE_GEN2_TX_INV },
/* bundled with Hauppauge PVR-150 */
- { USB_DEVICE(VENDOR_SMK, 0x0334) },
+ { USB_DEVICE(VENDOR_SMK, 0x0334),
+ .driver_info = MCE_GEN2_TX_INV },
/* SMK eHome Infrared Transceiver */
{ USB_DEVICE(VENDOR_SMK, 0x0338) },
/* Tatung eHome Infrared Transceiver */
@@ -145,17 +232,23 @@ static struct usb_device_id mceusb_dev_table[] = {
/* Mitsumi */
{ USB_DEVICE(VENDOR_MITSUMI, 0x2501) },
/* Topseed eHome Infrared Transceiver */
- { USB_DEVICE(VENDOR_TOPSEED, 0x0001) },
+ { USB_DEVICE(VENDOR_TOPSEED, 0x0001),
+ .driver_info = MCE_GEN2_TX_INV },
/* Topseed HP eHome Infrared Transceiver */
- { USB_DEVICE(VENDOR_TOPSEED, 0x0006) },
+ { USB_DEVICE(VENDOR_TOPSEED, 0x0006),
+ .driver_info = MCE_GEN2_TX_INV },
/* Topseed eHome Infrared Transceiver */
- { USB_DEVICE(VENDOR_TOPSEED, 0x0007) },
+ { USB_DEVICE(VENDOR_TOPSEED, 0x0007),
+ .driver_info = MCE_GEN2_TX_INV },
/* Topseed eHome Infrared Transceiver */
- { USB_DEVICE(VENDOR_TOPSEED, 0x0008) },
+ { USB_DEVICE(VENDOR_TOPSEED, 0x0008),
+ .driver_info = MCE_GEN3 },
/* Topseed eHome Infrared Transceiver */
- { USB_DEVICE(VENDOR_TOPSEED, 0x000a) },
+ { USB_DEVICE(VENDOR_TOPSEED, 0x000a),
+ .driver_info = MCE_GEN2_TX_INV },
/* Topseed eHome Infrared Transceiver */
- { USB_DEVICE(VENDOR_TOPSEED, 0x0011) },
+ { USB_DEVICE(VENDOR_TOPSEED, 0x0011),
+ .driver_info = MCE_GEN2_TX_INV },
/* Ricavision internal Infrared Transceiver */
{ USB_DEVICE(VENDOR_RICAVISION, 0x0010) },
/* Itron ione Libra Q-11 */
@@ -185,7 +278,8 @@ static struct usb_device_id mceusb_dev_table[] = {
/* Fintek eHome Infrared Transceiver (in the AOpen MP45) */
{ USB_DEVICE(VENDOR_FINTEK, 0x0702) },
/* Pinnacle Remote Kit */
- { USB_DEVICE(VENDOR_PINNACLE, 0x0225) },
+ { USB_DEVICE(VENDOR_PINNACLE, 0x0225),
+ .driver_info = MCE_GEN3 },
/* Elitegroup Computer Systems IR */
{ USB_DEVICE(VENDOR_ECS, 0x0f38) },
/* Wistron Corp. eHome Infrared Receiver */
@@ -198,37 +292,13 @@ static struct usb_device_id mceusb_dev_table[] = {
{ USB_DEVICE(VENDOR_NORTHSTAR, 0xe004) },
/* TiVo PC IR Receiver */
{ USB_DEVICE(VENDOR_TIVO, 0x2000) },
+ /* Conexant SDK */
+ { USB_DEVICE(VENDOR_CONEXANT, 0x58a1),
+ .driver_info = POLARIS_EVK },
/* Terminating entry */
{ }
};
-static struct usb_device_id gen3_list[] = {
- { USB_DEVICE(VENDOR_PINNACLE, 0x0225) },
- { USB_DEVICE(VENDOR_TOPSEED, 0x0008) },
- {}
-};
-
-static struct usb_device_id microsoft_gen1_list[] = {
- { USB_DEVICE(VENDOR_MICROSOFT, 0x006d) },
- {}
-};
-
-static struct usb_device_id std_tx_mask_list[] = {
- { USB_DEVICE(VENDOR_MICROSOFT, 0x006d) },
- { USB_DEVICE(VENDOR_PHILIPS, 0x060c) },
- { USB_DEVICE(VENDOR_SMK, 0x031d) },
- { USB_DEVICE(VENDOR_SMK, 0x0322) },
- { USB_DEVICE(VENDOR_SMK, 0x0334) },
- { USB_DEVICE(VENDOR_TOPSEED, 0x0001) },
- { USB_DEVICE(VENDOR_TOPSEED, 0x0006) },
- { USB_DEVICE(VENDOR_TOPSEED, 0x0007) },
- { USB_DEVICE(VENDOR_TOPSEED, 0x0008) },
- { USB_DEVICE(VENDOR_TOPSEED, 0x000a) },
- { USB_DEVICE(VENDOR_TOPSEED, 0x0011) },
- { USB_DEVICE(VENDOR_PINNACLE, 0x0225) },
- {}
-};
-
/* data structure for each usb transceiver */
struct mceusb_dev {
/* ir-core bits */
@@ -248,8 +318,15 @@ struct mceusb_dev {
/* buffers and dma */
unsigned char *buf_in;
unsigned int len_in;
- u8 cmd; /* MCE command type */
- u8 rem; /* Remaining IR data bytes in packet */
+
+ enum {
+ CMD_HEADER = 0,
+ SUBCMD,
+ CMD_DATA,
+ PARSE_IRDATA,
+ } parser_state;
+ u8 cmd, rem; /* Remaining IR data bytes in packet */
+
dma_addr_t dma_in;
dma_addr_t dma_out;
@@ -257,7 +334,6 @@ struct mceusb_dev {
u32 connected:1;
u32 tx_mask_inverted:1;
u32 microsoft_gen1:1;
- u32 reserved:29;
} flags;
/* transmit support */
@@ -267,6 +343,7 @@ struct mceusb_dev {
char name[128];
char phys[64];
+ enum mceusb_model_type model;
};
/*
@@ -291,43 +368,81 @@ struct mceusb_dev {
* - SET_RX_TIMEOUT sets the receiver timeout
* - SET_RX_SENSOR sets which receiver sensor to use
*/
-static char DEVICE_RESET[] = {0x00, 0xff, 0xaa};
-static char GET_REVISION[] = {0xff, 0x0b};
-static char GET_UNKNOWN[] = {0xff, 0x18};
-static char GET_UNKNOWN2[] = {0x9f, 0x05};
-static char GET_CARRIER_FREQ[] = {0x9f, 0x07};
-static char GET_RX_TIMEOUT[] = {0x9f, 0x0d};
-static char GET_TX_BITMASK[] = {0x9f, 0x13};
-static char GET_RX_SENSOR[] = {0x9f, 0x15};
+static char DEVICE_RESET[] = {MCE_COMMAND_NULL, MCE_HW_CMD_HEADER,
+ MCE_CMD_DEVICE_RESET};
+static char GET_REVISION[] = {MCE_HW_CMD_HEADER, MCE_CMD_G_REVISION};
+static char GET_UNKNOWN[] = {MCE_HW_CMD_HEADER, MCE_CMD_UNKNOWN7};
+static char GET_UNKNOWN2[] = {MCE_COMMAND_HEADER, MCE_CMD_UNKNOWN2};
+static char GET_CARRIER_FREQ[] = {MCE_COMMAND_HEADER, MCE_CMD_G_CARRIER};
+static char GET_RX_TIMEOUT[] = {MCE_COMMAND_HEADER, MCE_CMD_G_TIMEOUT};
+static char GET_TX_BITMASK[] = {MCE_COMMAND_HEADER, MCE_CMD_G_TXMASK};
+static char GET_RX_SENSOR[] = {MCE_COMMAND_HEADER, MCE_CMD_G_RXSENSOR};
/* sub in desired values in lower byte or bytes for full command */
/* FIXME: make use of these for transmit.
-static char SET_CARRIER_FREQ[] = {0x9f, 0x06, 0x00, 0x00};
-static char SET_TX_BITMASK[] = {0x9f, 0x08, 0x00};
-static char SET_RX_TIMEOUT[] = {0x9f, 0x0c, 0x00, 0x00};
-static char SET_RX_SENSOR[] = {0x9f, 0x14, 0x00};
+static char SET_CARRIER_FREQ[] = {MCE_COMMAND_HEADER,
+ MCE_CMD_S_CARRIER, 0x00, 0x00};
+static char SET_TX_BITMASK[] = {MCE_COMMAND_HEADER, MCE_CMD_S_TXMASK, 0x00};
+static char SET_RX_TIMEOUT[] = {MCE_COMMAND_HEADER,
+ MCE_CMD_S_TIMEOUT, 0x00, 0x00};
+static char SET_RX_SENSOR[] = {MCE_COMMAND_HEADER,
+ MCE_CMD_S_RXSENSOR, 0x00};
*/
+static int mceusb_cmdsize(u8 cmd, u8 subcmd)
+{
+ int datasize = 0;
+
+ switch (cmd) {
+ case MCE_COMMAND_NULL:
+ if (subcmd == MCE_HW_CMD_HEADER)
+ datasize = 1;
+ break;
+ case MCE_HW_CMD_HEADER:
+ switch (subcmd) {
+ case MCE_CMD_G_REVISION:
+ datasize = 2;
+ break;
+ }
+ case MCE_COMMAND_HEADER:
+ switch (subcmd) {
+ case MCE_CMD_UNKNOWN:
+ case MCE_CMD_S_CARRIER:
+ case MCE_CMD_S_TIMEOUT:
+ case MCE_CMD_G_RXSENSOR:
+ datasize = 2;
+ break;
+ case MCE_CMD_S_TXMASK:
+ case MCE_CMD_S_RXSENSOR:
+ datasize = 1;
+ break;
+ }
+ }
+ return datasize;
+}
+
static void mceusb_dev_printdata(struct mceusb_dev *ir, char *buf,
- int len, bool out)
+ int offset, int len, bool out)
{
char codes[USB_BUFLEN * 3 + 1];
char inout[9];
- int i;
u8 cmd, subcmd, data1, data2;
struct device *dev = ir->dev;
- int idx = 0;
+ int i, start, skip = 0;
+
+ if (!debug)
+ return;
/* skip meaningless 0xb1 0x60 header bytes on orig receiver */
if (ir->flags.microsoft_gen1 && !out)
- idx = 2;
+ skip = 2;
- if (len <= idx)
+ if (len <= skip)
return;
for (i = 0; i < len && i < USB_BUFLEN; i++)
- snprintf(codes + i * 3, 4, "%02x ", buf[i] & 0xFF);
+ snprintf(codes + i * 3, 4, "%02x ", buf[i + offset] & 0xff);
- dev_info(dev, "%sx data: %s (length=%d)\n",
+ dev_info(dev, "%sx data: %s(length=%d)\n",
(out ? "t" : "r"), codes, len);
if (out)
@@ -335,91 +450,93 @@ static void mceusb_dev_printdata(struct mceusb_dev *ir, char *buf,
else
strcpy(inout, "Got\0");
- cmd = buf[idx] & 0xff;
- subcmd = buf[idx + 1] & 0xff;
- data1 = buf[idx + 2] & 0xff;
- data2 = buf[idx + 3] & 0xff;
+ start = offset + skip;
+ cmd = buf[start] & 0xff;
+ subcmd = buf[start + 1] & 0xff;
+ data1 = buf[start + 2] & 0xff;
+ data2 = buf[start + 3] & 0xff;
switch (cmd) {
- case 0x00:
- if (subcmd == 0xff && data1 == 0xaa)
+ case MCE_COMMAND_NULL:
+ if ((subcmd == MCE_HW_CMD_HEADER) &&
+ (data1 == MCE_CMD_DEVICE_RESET))
dev_info(dev, "Device reset requested\n");
else
dev_info(dev, "Unknown command 0x%02x 0x%02x\n",
cmd, subcmd);
break;
- case 0xff:
+ case MCE_HW_CMD_HEADER:
switch (subcmd) {
- case 0x0b:
+ case MCE_CMD_G_REVISION:
if (len == 2)
dev_info(dev, "Get hw/sw rev?\n");
else
dev_info(dev, "hw/sw rev 0x%02x 0x%02x "
"0x%02x 0x%02x\n", data1, data2,
- buf[idx + 4], buf[idx + 5]);
+ buf[start + 4], buf[start + 5]);
break;
- case 0xaa:
+ case MCE_CMD_DEVICE_RESET:
dev_info(dev, "Device reset requested\n");
break;
- case 0xfe:
+ case MCE_RSP_CMD_INVALID:
dev_info(dev, "Previous command not supported\n");
break;
- case 0x18:
- case 0x1b:
+ case MCE_CMD_UNKNOWN7:
+ case MCE_CMD_UNKNOWN9:
default:
dev_info(dev, "Unknown command 0x%02x 0x%02x\n",
cmd, subcmd);
break;
}
break;
- case 0x9f:
+ case MCE_COMMAND_HEADER:
switch (subcmd) {
- case 0x03:
+ case MCE_CMD_PING:
dev_info(dev, "Ping\n");
break;
- case 0x04:
+ case MCE_CMD_UNKNOWN:
dev_info(dev, "Resp to 9f 05 of 0x%02x 0x%02x\n",
data1, data2);
break;
- case 0x06:
+ case MCE_CMD_S_CARRIER:
dev_info(dev, "%s carrier mode and freq of "
"0x%02x 0x%02x\n", inout, data1, data2);
break;
- case 0x07:
+ case MCE_CMD_G_CARRIER:
dev_info(dev, "Get carrier mode and freq\n");
break;
- case 0x08:
+ case MCE_CMD_S_TXMASK:
dev_info(dev, "%s transmit blaster mask of 0x%02x\n",
inout, data1);
break;
- case 0x0c:
+ case MCE_CMD_S_TIMEOUT:
/* value is in units of 50us, so x*50/100 or x/2 ms */
dev_info(dev, "%s receive timeout of %d ms\n",
inout, ((data1 << 8) | data2) / 2);
break;
- case 0x0d:
+ case MCE_CMD_G_TIMEOUT:
dev_info(dev, "Get receive timeout\n");
break;
- case 0x13:
+ case MCE_CMD_G_TXMASK:
dev_info(dev, "Get transmit blaster mask\n");
break;
- case 0x14:
+ case MCE_CMD_S_RXSENSOR:
dev_info(dev, "%s %s-range receive sensor in use\n",
inout, data1 == 0x02 ? "short" : "long");
break;
- case 0x15:
+ case MCE_CMD_G_RXSENSOR:
if (len == 2)
dev_info(dev, "Get receive sensor\n");
else
dev_info(dev, "Received pulse count is %d\n",
((data1 << 8) | data2));
break;
- case 0xfe:
+ case MCE_RSP_CMD_INVALID:
dev_info(dev, "Error! Hardware is likely wedged...\n");
break;
- case 0x05:
- case 0x09:
- case 0x0f:
+ case MCE_CMD_UNKNOWN2:
+ case MCE_CMD_UNKNOWN3:
+ case MCE_CMD_UNKNOWN5:
default:
dev_info(dev, "Unknown command 0x%02x 0x%02x\n",
cmd, subcmd);
@@ -429,6 +546,12 @@ static void mceusb_dev_printdata(struct mceusb_dev *ir, char *buf,
default:
break;
}
+
+ if (cmd == MCE_IRDATA_TRAILER)
+ dev_info(dev, "End of raw IR data\n");
+ else if ((cmd != MCE_COMMAND_HEADER) &&
+ ((cmd & MCE_COMMAND_MASK) == MCE_COMMAND_IRDATA))
+ dev_info(dev, "Raw IR data, %d pulse/space samples\n", ir->rem);
}
static void mce_async_callback(struct urb *urb, struct pt_regs *regs)
@@ -446,9 +569,7 @@ static void mce_async_callback(struct urb *urb, struct pt_regs *regs)
dev_dbg(ir->dev, "callback called (status=%d len=%d)\n",
urb->status, len);
- if (debug)
- mceusb_dev_printdata(ir, urb->transfer_buffer,
- len, true);
+ mceusb_dev_printdata(ir, urb->transfer_buffer, 0, len, true);
}
}
@@ -536,8 +657,8 @@ static int mceusb_tx_ir(void *priv, int *txbuf, u32 n)
return -ENOMEM;
/* MCE tx init header */
- cmdbuf[cmdcount++] = MCE_CONTROL_HEADER;
- cmdbuf[cmdcount++] = 0x08;
+ cmdbuf[cmdcount++] = MCE_COMMAND_HEADER;
+ cmdbuf[cmdcount++] = MCE_CMD_S_TXMASK;
cmdbuf[cmdcount++] = ir->tx_mask;
/* Generate mce packet data */
@@ -551,7 +672,7 @@ static int mceusb_tx_ir(void *priv, int *txbuf, u32 n)
if ((cmdcount < MCE_CMDBUF_SIZE) &&
(cmdcount - MCE_TX_HEADER_LENGTH) %
MCE_CODE_LENGTH == 0)
- cmdbuf[cmdcount++] = MCE_PACKET_HEADER;
+ cmdbuf[cmdcount++] = MCE_IRDATA_HEADER;
/* Insert mce packet data */
if (cmdcount < MCE_CMDBUF_SIZE)
@@ -570,7 +691,8 @@ static int mceusb_tx_ir(void *priv, int *txbuf, u32 n)
/* Fix packet length in last header */
cmdbuf[cmdcount - (cmdcount - MCE_TX_HEADER_LENGTH) % MCE_CODE_LENGTH] =
- 0x80 + (cmdcount - MCE_TX_HEADER_LENGTH) % MCE_CODE_LENGTH - 1;
+ MCE_COMMAND_IRDATA + (cmdcount - MCE_TX_HEADER_LENGTH) %
+ MCE_CODE_LENGTH - 1;
/* Check if we have room for the empty packet at the end */
if (cmdcount >= MCE_CMDBUF_SIZE) {
@@ -579,7 +701,7 @@ static int mceusb_tx_ir(void *priv, int *txbuf, u32 n)
}
/* All mce commands end with an empty packet (0x80) */
- cmdbuf[cmdcount++] = 0x80;
+ cmdbuf[cmdcount++] = MCE_IRDATA_TRAILER;
/* Transmit the command to the mce device */
mce_async_out(ir, cmdbuf, cmdcount);
@@ -608,7 +730,8 @@ static int mceusb_set_tx_mask(void *priv, u32 mask)
struct mceusb_dev *ir = priv;
if (ir->flags.tx_mask_inverted)
- ir->tx_mask = (mask != 0x03 ? mask ^ 0x03 : mask) << 1;
+ ir->tx_mask = (mask != MCE_DEFAULT_TX_MASK ?
+ mask ^ MCE_DEFAULT_TX_MASK : mask) << 1;
else
ir->tx_mask = mask;
@@ -621,7 +744,8 @@ static int mceusb_set_tx_carrier(void *priv, u32 carrier)
struct mceusb_dev *ir = priv;
int clk = 10000000;
int prescaler = 0, divisor = 0;
- unsigned char cmdbuf[4] = { 0x9f, 0x06, 0x00, 0x00 };
+ unsigned char cmdbuf[4] = { MCE_COMMAND_HEADER,
+ MCE_CMD_S_CARRIER, 0x00, 0x00 };
/* Carrier has changed */
if (ir->carrier != carrier) {
@@ -629,7 +753,7 @@ static int mceusb_set_tx_carrier(void *priv, u32 carrier)
if (carrier == 0) {
ir->carrier = carrier;
cmdbuf[2] = 0x01;
- cmdbuf[3] = 0x80;
+ cmdbuf[3] = MCE_IRDATA_TRAILER;
dev_dbg(ir->dev, "%s: disabling carrier "
"modulation\n", __func__);
mce_async_out(ir, cmdbuf, sizeof(cmdbuf));
@@ -638,7 +762,7 @@ static int mceusb_set_tx_carrier(void *priv, u32 carrier)
for (prescaler = 0; prescaler < 4; ++prescaler) {
divisor = (clk >> (2 * prescaler)) / carrier;
- if (divisor <= 0xFF) {
+ if (divisor <= 0xff) {
ir->carrier = carrier;
cmdbuf[2] = prescaler;
cmdbuf[3] = divisor;
@@ -660,47 +784,36 @@ static int mceusb_set_tx_carrier(void *priv, u32 carrier)
static void mceusb_process_ir_data(struct mceusb_dev *ir, int buf_len)
{
- struct ir_raw_event rawir = { .pulse = false, .duration = 0 };
- int i, start_index = 0;
- u8 hdr = MCE_CONTROL_HEADER;
+ DEFINE_IR_RAW_EVENT(rawir);
+ int i = 0;
/* skip meaningless 0xb1 0x60 header bytes on orig receiver */
if (ir->flags.microsoft_gen1)
- start_index = 2;
-
- for (i = start_index; i < buf_len;) {
- if (ir->rem == 0) {
- /* decode mce packets of the form (84),AA,BB,CC,DD */
- /* IR data packets can span USB messages - rem */
- hdr = ir->buf_in[i];
- ir->rem = (hdr & MCE_PACKET_LENGTH_MASK);
- ir->cmd = (hdr & ~MCE_PACKET_LENGTH_MASK);
- dev_dbg(ir->dev, "New data. rem: 0x%02x, cmd: 0x%02x\n",
- ir->rem, ir->cmd);
- i++;
- }
-
- /* don't process MCE commands */
- if (hdr == MCE_CONTROL_HEADER || hdr == 0xff) {
- ir->rem = 0;
- return;
- }
-
- for (; (ir->rem > 0) && (i < buf_len); i++) {
+ i = 2;
+
+ for (; i < buf_len; i++) {
+ switch (ir->parser_state) {
+ case SUBCMD:
+ ir->rem = mceusb_cmdsize(ir->cmd, ir->buf_in[i]);
+ mceusb_dev_printdata(ir, ir->buf_in, i - 1,
+ ir->rem + 2, false);
+ ir->parser_state = CMD_DATA;
+ break;
+ case PARSE_IRDATA:
ir->rem--;
-
rawir.pulse = ((ir->buf_in[i] & MCE_PULSE_BIT) != 0);
rawir.duration = (ir->buf_in[i] & MCE_PULSE_MASK)
* MCE_TIME_UNIT * 1000;
if ((ir->buf_in[i] & MCE_PULSE_MASK) == 0x7f) {
- if (ir->rawir.pulse == rawir.pulse)
+ if (ir->rawir.pulse == rawir.pulse) {
ir->rawir.duration += rawir.duration;
- else {
+ } else {
ir->rawir.duration = rawir.duration;
ir->rawir.pulse = rawir.pulse;
}
- continue;
+ if (ir->rem)
+ break;
}
rawir.duration += ir->rawir.duration;
ir->rawir.duration = 0;
@@ -711,14 +824,40 @@ static void mceusb_process_ir_data(struct mceusb_dev *ir, int buf_len)
rawir.duration);
ir_raw_event_store(ir->idev, &rawir);
+ break;
+ case CMD_DATA:
+ ir->rem--;
+ break;
+ case CMD_HEADER:
+ /* decode mce packets of the form (84),AA,BB,CC,DD */
+ /* IR data packets can span USB messages - rem */
+ ir->cmd = ir->buf_in[i];
+ if ((ir->cmd == MCE_COMMAND_HEADER) ||
+ ((ir->cmd & MCE_COMMAND_MASK) !=
+ MCE_COMMAND_IRDATA)) {
+ ir->parser_state = SUBCMD;
+ continue;
+ }
+ ir->rem = (ir->cmd & MCE_PACKET_LENGTH_MASK);
+ mceusb_dev_printdata(ir, ir->buf_in, i, ir->rem + 1, false);
+ if (ir->rem) {
+ ir->parser_state = PARSE_IRDATA;
+ break;
+ }
+ /*
+ * a package with len=0 (e. g. 0x80) means end of
+ * data. We could use it to do the call to
+ * ir_raw_event_handle(). For now, we don't need to
+ * use it.
+ */
+ break;
}
- if (ir->buf_in[i] == 0x80 || ir->buf_in[i] == 0x9f)
- ir->rem = 0;
-
- dev_dbg(ir->dev, "calling ir_raw_event_handle\n");
- ir_raw_event_handle(ir->idev);
+ if (ir->parser_state != CMD_HEADER && !ir->rem)
+ ir->parser_state = CMD_HEADER;
}
+ dev_dbg(ir->dev, "processed IR data, calling ir_raw_event_handle\n");
+ ir_raw_event_handle(ir->idev);
}
static void mceusb_dev_recv(struct urb *urb, struct pt_regs *regs)
@@ -737,9 +876,6 @@ static void mceusb_dev_recv(struct urb *urb, struct pt_regs *regs)
buf_len = urb->actual_length;
- if (debug)
- mceusb_dev_printdata(ir, urb->transfer_buffer, buf_len, false);
-
if (ir->send_flags == RECV_FLAG_IN_PROGRESS) {
ir->send_flags = SEND_FLAG_COMPLETE;
dev_dbg(ir->dev, "setup answer received %d bytes\n",
@@ -760,6 +896,7 @@ static void mceusb_dev_recv(struct urb *urb, struct pt_regs *regs)
case -EPIPE:
default:
+ dev_dbg(ir->dev, "Error: urb status = %d\n", urb->status);
break;
}
@@ -865,6 +1002,8 @@ static struct input_dev *mceusb_init_input_dev(struct mceusb_dev *ir)
struct input_dev *idev;
struct ir_dev_props *props;
struct device *dev = ir->dev;
+ const char *rc_map = RC_MAP_RC6_MCE;
+ const char *name = "Media Center Ed. eHome Infrared Remote Transceiver";
int ret = -ENODEV;
idev = input_allocate_device();
@@ -880,8 +1019,11 @@ static struct input_dev *mceusb_init_input_dev(struct mceusb_dev *ir)
goto props_alloc_failed;
}
- snprintf(ir->name, sizeof(ir->name), "Media Center Ed. eHome "
- "Infrared Remote Transceiver (%04x:%04x)",
+ if (mceusb_model[ir->model].name)
+ name = mceusb_model[ir->model].name;
+
+ snprintf(ir->name, sizeof(ir->name), "%s (%04x:%04x)",
+ name,
le16_to_cpu(ir->usbdev->descriptor.idVendor),
le16_to_cpu(ir->usbdev->descriptor.idProduct));
@@ -899,7 +1041,10 @@ static struct input_dev *mceusb_init_input_dev(struct mceusb_dev *ir)
ir->props = props;
- ret = ir_input_register(idev, RC_MAP_RC6_MCE, props, DRIVER_NAME);
+ if (mceusb_model[ir->model].rc_map)
+ rc_map = mceusb_model[ir->model].rc_map;
+
+ ret = ir_input_register(idev, rc_map, props, DRIVER_NAME);
if (ret < 0) {
dev_err(dev, "remote input device register failed\n");
goto irdev_failed;
@@ -926,17 +1071,26 @@ static int __devinit mceusb_dev_probe(struct usb_interface *intf,
struct mceusb_dev *ir = NULL;
int pipe, maxp, i;
char buf[63], name[128] = "";
+ enum mceusb_model_type model = id->driver_info;
bool is_gen3;
bool is_microsoft_gen1;
bool tx_mask_inverted;
+ bool is_polaris;
dev_dbg(&intf->dev, ": %s called\n", __func__);
idesc = intf->cur_altsetting;
- is_gen3 = usb_match_id(intf, gen3_list) ? 1 : 0;
- is_microsoft_gen1 = usb_match_id(intf, microsoft_gen1_list) ? 1 : 0;
- tx_mask_inverted = usb_match_id(intf, std_tx_mask_list) ? 0 : 1;
+ is_gen3 = mceusb_model[model].mce_gen3;
+ is_microsoft_gen1 = mceusb_model[model].mce_gen1;
+ tx_mask_inverted = mceusb_model[model].tx_mask_inverted;
+ is_polaris = mceusb_model[model].is_polaris;
+
+ if (is_polaris) {
+ /* Interface 0 is IR */
+ if (idesc->desc.bInterfaceNumber)
+ return -ENODEV;
+ }
/* step through the endpoints to find first bulk in and out endpoint */
for (i = 0; i < idesc->desc.bNumEndpoints; ++i) {
@@ -997,6 +1151,9 @@ static int __devinit mceusb_dev_probe(struct usb_interface *intf,
ir->len_in = maxp;
ir->flags.microsoft_gen1 = is_microsoft_gen1;
ir->flags.tx_mask_inverted = tx_mask_inverted;
+ ir->model = model;
+
+ init_ir_raw_event(&ir->rawir);
/* Saving usb interface data for use by the transmitter routine */
ir->usb_ep_in = ep_in;
diff --git a/drivers/media/IR/nuvoton-cir.c b/drivers/media/IR/nuvoton-cir.c
new file mode 100644
index 000000000000..301be53aee85
--- /dev/null
+++ b/drivers/media/IR/nuvoton-cir.c
@@ -0,0 +1,1246 @@
+/*
+ * Driver for Nuvoton Technology Corporation w83667hg/w83677hg-i CIR
+ *
+ * Copyright (C) 2010 Jarod Wilson <jarod@redhat.com>
+ * Copyright (C) 2009 Nuvoton PS Team
+ *
+ * Special thanks to Nuvoton for providing hardware, spec sheets and
+ * sample code upon which portions of this driver are based. Indirect
+ * thanks also to Maxim Levitsky, whose ene_ir driver this driver is
+ * modeled after.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
+ * USA
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pnp.h>
+#include <linux/io.h>
+#include <linux/interrupt.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/input.h>
+#include <media/ir-core.h>
+#include <linux/pci_ids.h>
+
+#include "nuvoton-cir.h"
+
+static char *chip_id = "w836x7hg";
+
+/* write val to config reg */
+static inline void nvt_cr_write(struct nvt_dev *nvt, u8 val, u8 reg)
+{
+ outb(reg, nvt->cr_efir);
+ outb(val, nvt->cr_efdr);
+}
+
+/* read val from config reg */
+static inline u8 nvt_cr_read(struct nvt_dev *nvt, u8 reg)
+{
+ outb(reg, nvt->cr_efir);
+ return inb(nvt->cr_efdr);
+}
+
+/* update config register bit without changing other bits */
+static inline void nvt_set_reg_bit(struct nvt_dev *nvt, u8 val, u8 reg)
+{
+ u8 tmp = nvt_cr_read(nvt, reg) | val;
+ nvt_cr_write(nvt, tmp, reg);
+}
+
+/* clear config register bit without changing other bits */
+static inline void nvt_clear_reg_bit(struct nvt_dev *nvt, u8 val, u8 reg)
+{
+ u8 tmp = nvt_cr_read(nvt, reg) & ~val;
+ nvt_cr_write(nvt, tmp, reg);
+}
+
+/* enter extended function mode */
+static inline void nvt_efm_enable(struct nvt_dev *nvt)
+{
+ /* Enabling Extended Function Mode explicitly requires writing 2x */
+ outb(EFER_EFM_ENABLE, nvt->cr_efir);
+ outb(EFER_EFM_ENABLE, nvt->cr_efir);
+}
+
+/* exit extended function mode */
+static inline void nvt_efm_disable(struct nvt_dev *nvt)
+{
+ outb(EFER_EFM_DISABLE, nvt->cr_efir);
+}
+
+/*
+ * When you want to address a specific logical device, write its logical
+ * device number to CR_LOGICAL_DEV_SEL, then enable/disable by writing
+ * 0x1/0x0 respectively to CR_LOGICAL_DEV_EN.
+ */
+static inline void nvt_select_logical_dev(struct nvt_dev *nvt, u8 ldev)
+{
+ outb(CR_LOGICAL_DEV_SEL, nvt->cr_efir);
+ outb(ldev, nvt->cr_efdr);
+}
+
+/* write val to cir config register */
+static inline void nvt_cir_reg_write(struct nvt_dev *nvt, u8 val, u8 offset)
+{
+ outb(val, nvt->cir_addr + offset);
+}
+
+/* read val from cir config register */
+static u8 nvt_cir_reg_read(struct nvt_dev *nvt, u8 offset)
+{
+ u8 val;
+
+ val = inb(nvt->cir_addr + offset);
+
+ return val;
+}
+
+/* write val to cir wake register */
+static inline void nvt_cir_wake_reg_write(struct nvt_dev *nvt,
+ u8 val, u8 offset)
+{
+ outb(val, nvt->cir_wake_addr + offset);
+}
+
+/* read val from cir wake config register */
+static u8 nvt_cir_wake_reg_read(struct nvt_dev *nvt, u8 offset)
+{
+ u8 val;
+
+ val = inb(nvt->cir_wake_addr + offset);
+
+ return val;
+}
+
+#define pr_reg(text, ...) \
+ printk(KERN_INFO KBUILD_MODNAME ": " text, ## __VA_ARGS__)
+
+/* dump current cir register contents */
+static void cir_dump_regs(struct nvt_dev *nvt)
+{
+ nvt_efm_enable(nvt);
+ nvt_select_logical_dev(nvt, LOGICAL_DEV_CIR);
+
+ pr_reg("%s: Dump CIR logical device registers:\n", NVT_DRIVER_NAME);
+ pr_reg(" * CR CIR ACTIVE : 0x%x\n",
+ nvt_cr_read(nvt, CR_LOGICAL_DEV_EN));
+ pr_reg(" * CR CIR BASE ADDR: 0x%x\n",
+ (nvt_cr_read(nvt, CR_CIR_BASE_ADDR_HI) << 8) |
+ nvt_cr_read(nvt, CR_CIR_BASE_ADDR_LO));
+ pr_reg(" * CR CIR IRQ NUM: 0x%x\n",
+ nvt_cr_read(nvt, CR_CIR_IRQ_RSRC));
+
+ nvt_efm_disable(nvt);
+
+ pr_reg("%s: Dump CIR registers:\n", NVT_DRIVER_NAME);
+ pr_reg(" * IRCON: 0x%x\n", nvt_cir_reg_read(nvt, CIR_IRCON));
+ pr_reg(" * IRSTS: 0x%x\n", nvt_cir_reg_read(nvt, CIR_IRSTS));
+ pr_reg(" * IREN: 0x%x\n", nvt_cir_reg_read(nvt, CIR_IREN));
+ pr_reg(" * RXFCONT: 0x%x\n", nvt_cir_reg_read(nvt, CIR_RXFCONT));
+ pr_reg(" * CP: 0x%x\n", nvt_cir_reg_read(nvt, CIR_CP));
+ pr_reg(" * CC: 0x%x\n", nvt_cir_reg_read(nvt, CIR_CC));
+ pr_reg(" * SLCH: 0x%x\n", nvt_cir_reg_read(nvt, CIR_SLCH));
+ pr_reg(" * SLCL: 0x%x\n", nvt_cir_reg_read(nvt, CIR_SLCL));
+ pr_reg(" * FIFOCON: 0x%x\n", nvt_cir_reg_read(nvt, CIR_FIFOCON));
+ pr_reg(" * IRFIFOSTS: 0x%x\n", nvt_cir_reg_read(nvt, CIR_IRFIFOSTS));
+ pr_reg(" * SRXFIFO: 0x%x\n", nvt_cir_reg_read(nvt, CIR_SRXFIFO));
+ pr_reg(" * TXFCONT: 0x%x\n", nvt_cir_reg_read(nvt, CIR_TXFCONT));
+ pr_reg(" * STXFIFO: 0x%x\n", nvt_cir_reg_read(nvt, CIR_STXFIFO));
+ pr_reg(" * FCCH: 0x%x\n", nvt_cir_reg_read(nvt, CIR_FCCH));
+ pr_reg(" * FCCL: 0x%x\n", nvt_cir_reg_read(nvt, CIR_FCCL));
+ pr_reg(" * IRFSM: 0x%x\n", nvt_cir_reg_read(nvt, CIR_IRFSM));
+}
+
+/* dump current cir wake register contents */
+static void cir_wake_dump_regs(struct nvt_dev *nvt)
+{
+ u8 i, fifo_len;
+
+ nvt_efm_enable(nvt);
+ nvt_select_logical_dev(nvt, LOGICAL_DEV_CIR_WAKE);
+
+ pr_reg("%s: Dump CIR WAKE logical device registers:\n",
+ NVT_DRIVER_NAME);
+ pr_reg(" * CR CIR WAKE ACTIVE : 0x%x\n",
+ nvt_cr_read(nvt, CR_LOGICAL_DEV_EN));
+ pr_reg(" * CR CIR WAKE BASE ADDR: 0x%x\n",
+ (nvt_cr_read(nvt, CR_CIR_BASE_ADDR_HI) << 8) |
+ nvt_cr_read(nvt, CR_CIR_BASE_ADDR_LO));
+ pr_reg(" * CR CIR WAKE IRQ NUM: 0x%x\n",
+ nvt_cr_read(nvt, CR_CIR_IRQ_RSRC));
+
+ nvt_efm_disable(nvt);
+
+ pr_reg("%s: Dump CIR WAKE registers\n", NVT_DRIVER_NAME);
+ pr_reg(" * IRCON: 0x%x\n",
+ nvt_cir_wake_reg_read(nvt, CIR_WAKE_IRCON));
+ pr_reg(" * IRSTS: 0x%x\n",
+ nvt_cir_wake_reg_read(nvt, CIR_WAKE_IRSTS));
+ pr_reg(" * IREN: 0x%x\n",
+ nvt_cir_wake_reg_read(nvt, CIR_WAKE_IREN));
+ pr_reg(" * FIFO CMP DEEP: 0x%x\n",
+ nvt_cir_wake_reg_read(nvt, CIR_WAKE_FIFO_CMP_DEEP));
+ pr_reg(" * FIFO CMP TOL: 0x%x\n",
+ nvt_cir_wake_reg_read(nvt, CIR_WAKE_FIFO_CMP_TOL));
+ pr_reg(" * FIFO COUNT: 0x%x\n",
+ nvt_cir_wake_reg_read(nvt, CIR_WAKE_FIFO_COUNT));
+ pr_reg(" * SLCH: 0x%x\n",
+ nvt_cir_wake_reg_read(nvt, CIR_WAKE_SLCH));
+ pr_reg(" * SLCL: 0x%x\n",
+ nvt_cir_wake_reg_read(nvt, CIR_WAKE_SLCL));
+ pr_reg(" * FIFOCON: 0x%x\n",
+ nvt_cir_wake_reg_read(nvt, CIR_WAKE_FIFOCON));
+ pr_reg(" * SRXFSTS: 0x%x\n",
+ nvt_cir_wake_reg_read(nvt, CIR_WAKE_SRXFSTS));
+ pr_reg(" * SAMPLE RX FIFO: 0x%x\n",
+ nvt_cir_wake_reg_read(nvt, CIR_WAKE_SAMPLE_RX_FIFO));
+ pr_reg(" * WR FIFO DATA: 0x%x\n",
+ nvt_cir_wake_reg_read(nvt, CIR_WAKE_WR_FIFO_DATA));
+ pr_reg(" * RD FIFO ONLY: 0x%x\n",
+ nvt_cir_wake_reg_read(nvt, CIR_WAKE_RD_FIFO_ONLY));
+ pr_reg(" * RD FIFO ONLY IDX: 0x%x\n",
+ nvt_cir_wake_reg_read(nvt, CIR_WAKE_RD_FIFO_ONLY_IDX));
+ pr_reg(" * FIFO IGNORE: 0x%x\n",
+ nvt_cir_wake_reg_read(nvt, CIR_WAKE_FIFO_IGNORE));
+ pr_reg(" * IRFSM: 0x%x\n",
+ nvt_cir_wake_reg_read(nvt, CIR_WAKE_IRFSM));
+
+ fifo_len = nvt_cir_wake_reg_read(nvt, CIR_WAKE_FIFO_COUNT);
+ pr_reg("%s: Dump CIR WAKE FIFO (len %d)\n", NVT_DRIVER_NAME, fifo_len);
+ pr_reg("* Contents = ");
+ for (i = 0; i < fifo_len; i++)
+ printk(KERN_CONT "%02x ",
+ nvt_cir_wake_reg_read(nvt, CIR_WAKE_RD_FIFO_ONLY));
+ printk(KERN_CONT "\n");
+}
+
+/* detect hardware features */
+static int nvt_hw_detect(struct nvt_dev *nvt)
+{
+ unsigned long flags;
+ u8 chip_major, chip_minor;
+ int ret = 0;
+
+ nvt_efm_enable(nvt);
+
+ /* Check if we're wired for the alternate EFER setup */
+ chip_major = nvt_cr_read(nvt, CR_CHIP_ID_HI);
+ if (chip_major == 0xff) {
+ nvt->cr_efir = CR_EFIR2;
+ nvt->cr_efdr = CR_EFDR2;
+ nvt_efm_enable(nvt);
+ chip_major = nvt_cr_read(nvt, CR_CHIP_ID_HI);
+ }
+
+ chip_minor = nvt_cr_read(nvt, CR_CHIP_ID_LO);
+ nvt_dbg("%s: chip id: 0x%02x 0x%02x", chip_id, chip_major, chip_minor);
+
+ if (chip_major != CHIP_ID_HIGH &&
+ (chip_minor != CHIP_ID_LOW || chip_minor != CHIP_ID_LOW2))
+ ret = -ENODEV;
+
+ nvt_efm_disable(nvt);
+
+ spin_lock_irqsave(&nvt->nvt_lock, flags);
+ nvt->chip_major = chip_major;
+ nvt->chip_minor = chip_minor;
+ spin_unlock_irqrestore(&nvt->nvt_lock, flags);
+
+ return ret;
+}
+
+static void nvt_cir_ldev_init(struct nvt_dev *nvt)
+{
+ u8 val;
+
+ /* output pin selection (Pin95=CIRRX, Pin96=CIRTX1, WB enabled */
+ val = nvt_cr_read(nvt, CR_OUTPUT_PIN_SEL);
+ val &= OUTPUT_PIN_SEL_MASK;
+ val |= (OUTPUT_ENABLE_CIR | OUTPUT_ENABLE_CIRWB);
+ nvt_cr_write(nvt, val, CR_OUTPUT_PIN_SEL);
+
+ /* Select CIR logical device and enable */
+ nvt_select_logical_dev(nvt, LOGICAL_DEV_CIR);
+ nvt_cr_write(nvt, LOGICAL_DEV_ENABLE, CR_LOGICAL_DEV_EN);
+
+ nvt_cr_write(nvt, nvt->cir_addr >> 8, CR_CIR_BASE_ADDR_HI);
+ nvt_cr_write(nvt, nvt->cir_addr & 0xff, CR_CIR_BASE_ADDR_LO);
+
+ nvt_cr_write(nvt, nvt->cir_irq, CR_CIR_IRQ_RSRC);
+
+ nvt_dbg("CIR initialized, base io port address: 0x%lx, irq: %d",
+ nvt->cir_addr, nvt->cir_irq);
+}
+
+static void nvt_cir_wake_ldev_init(struct nvt_dev *nvt)
+{
+ /* Select ACPI logical device, enable it and CIR Wake */
+ nvt_select_logical_dev(nvt, LOGICAL_DEV_ACPI);
+ nvt_cr_write(nvt, LOGICAL_DEV_ENABLE, CR_LOGICAL_DEV_EN);
+
+ /* Enable CIR Wake via PSOUT# (Pin60) */
+ nvt_set_reg_bit(nvt, CIR_WAKE_ENABLE_BIT, CR_ACPI_CIR_WAKE);
+
+ /* enable cir interrupt of mouse/keyboard IRQ event */
+ nvt_set_reg_bit(nvt, CIR_INTR_MOUSE_IRQ_BIT, CR_ACPI_IRQ_EVENTS);
+
+ /* enable pme interrupt of cir wakeup event */
+ nvt_set_reg_bit(nvt, PME_INTR_CIR_PASS_BIT, CR_ACPI_IRQ_EVENTS2);
+
+ /* Select CIR Wake logical device and enable */
+ nvt_select_logical_dev(nvt, LOGICAL_DEV_CIR_WAKE);
+ nvt_cr_write(nvt, LOGICAL_DEV_ENABLE, CR_LOGICAL_DEV_EN);
+
+ nvt_cr_write(nvt, nvt->cir_wake_addr >> 8, CR_CIR_BASE_ADDR_HI);
+ nvt_cr_write(nvt, nvt->cir_wake_addr & 0xff, CR_CIR_BASE_ADDR_LO);
+
+ nvt_cr_write(nvt, nvt->cir_wake_irq, CR_CIR_IRQ_RSRC);
+
+ nvt_dbg("CIR Wake initialized, base io port address: 0x%lx, irq: %d",
+ nvt->cir_wake_addr, nvt->cir_wake_irq);
+}
+
+/* clear out the hardware's cir rx fifo */
+static void nvt_clear_cir_fifo(struct nvt_dev *nvt)
+{
+ u8 val;
+
+ val = nvt_cir_reg_read(nvt, CIR_FIFOCON);
+ nvt_cir_reg_write(nvt, val | CIR_FIFOCON_RXFIFOCLR, CIR_FIFOCON);
+}
+
+/* clear out the hardware's cir wake rx fifo */
+static void nvt_clear_cir_wake_fifo(struct nvt_dev *nvt)
+{
+ u8 val;
+
+ val = nvt_cir_wake_reg_read(nvt, CIR_WAKE_FIFOCON);
+ nvt_cir_wake_reg_write(nvt, val | CIR_WAKE_FIFOCON_RXFIFOCLR,
+ CIR_WAKE_FIFOCON);
+}
+
+/* clear out the hardware's cir tx fifo */
+static void nvt_clear_tx_fifo(struct nvt_dev *nvt)
+{
+ u8 val;
+
+ val = nvt_cir_reg_read(nvt, CIR_FIFOCON);
+ nvt_cir_reg_write(nvt, val | CIR_FIFOCON_TXFIFOCLR, CIR_FIFOCON);
+}
+
+/* enable RX Trigger Level Reach and Packet End interrupts */
+static void nvt_set_cir_iren(struct nvt_dev *nvt)
+{
+ u8 iren;
+
+ iren = CIR_IREN_RTR | CIR_IREN_PE;
+ nvt_cir_reg_write(nvt, iren, CIR_IREN);
+}
+
+static void nvt_cir_regs_init(struct nvt_dev *nvt)
+{
+ /* set sample limit count (PE interrupt raised when reached) */
+ nvt_cir_reg_write(nvt, CIR_RX_LIMIT_COUNT >> 8, CIR_SLCH);
+ nvt_cir_reg_write(nvt, CIR_RX_LIMIT_COUNT & 0xff, CIR_SLCL);
+
+ /* set fifo irq trigger levels */
+ nvt_cir_reg_write(nvt, CIR_FIFOCON_TX_TRIGGER_LEV |
+ CIR_FIFOCON_RX_TRIGGER_LEV, CIR_FIFOCON);
+
+ /*
+ * Enable TX and RX, specify carrier on = low, off = high, and set
+ * sample period (currently 50us)
+ */
+ nvt_cir_reg_write(nvt,
+ CIR_IRCON_TXEN | CIR_IRCON_RXEN |
+ CIR_IRCON_RXINV | CIR_IRCON_SAMPLE_PERIOD_SEL,
+ CIR_IRCON);
+
+ /* clear hardware rx and tx fifos */
+ nvt_clear_cir_fifo(nvt);
+ nvt_clear_tx_fifo(nvt);
+
+ /* clear any and all stray interrupts */
+ nvt_cir_reg_write(nvt, 0xff, CIR_IRSTS);
+
+ /* and finally, enable interrupts */
+ nvt_set_cir_iren(nvt);
+}
+
+static void nvt_cir_wake_regs_init(struct nvt_dev *nvt)
+{
+ /* set number of bytes needed for wake key comparison (default 67) */
+ nvt_cir_wake_reg_write(nvt, CIR_WAKE_FIFO_LEN, CIR_WAKE_FIFO_CMP_DEEP);
+
+ /* set tolerance/variance allowed per byte during wake compare */
+ nvt_cir_wake_reg_write(nvt, CIR_WAKE_CMP_TOLERANCE,
+ CIR_WAKE_FIFO_CMP_TOL);
+
+ /* set sample limit count (PE interrupt raised when reached) */
+ nvt_cir_wake_reg_write(nvt, CIR_RX_LIMIT_COUNT >> 8, CIR_WAKE_SLCH);
+ nvt_cir_wake_reg_write(nvt, CIR_RX_LIMIT_COUNT & 0xff, CIR_WAKE_SLCL);
+
+ /* set cir wake fifo rx trigger level (currently 67) */
+ nvt_cir_wake_reg_write(nvt, CIR_WAKE_FIFOCON_RX_TRIGGER_LEV,
+ CIR_WAKE_FIFOCON);
+
+ /*
+ * Enable TX and RX, specific carrier on = low, off = high, and set
+ * sample period (currently 50us)
+ */
+ nvt_cir_wake_reg_write(nvt, CIR_WAKE_IRCON_MODE0 | CIR_WAKE_IRCON_RXEN |
+ CIR_WAKE_IRCON_R | CIR_WAKE_IRCON_RXINV |
+ CIR_WAKE_IRCON_SAMPLE_PERIOD_SEL,
+ CIR_WAKE_IRCON);
+
+ /* clear cir wake rx fifo */
+ nvt_clear_cir_wake_fifo(nvt);
+
+ /* clear any and all stray interrupts */
+ nvt_cir_wake_reg_write(nvt, 0xff, CIR_WAKE_IRSTS);
+}
+
+static void nvt_enable_wake(struct nvt_dev *nvt)
+{
+ nvt_efm_enable(nvt);
+
+ nvt_select_logical_dev(nvt, LOGICAL_DEV_ACPI);
+ nvt_set_reg_bit(nvt, CIR_WAKE_ENABLE_BIT, CR_ACPI_CIR_WAKE);
+ nvt_set_reg_bit(nvt, CIR_INTR_MOUSE_IRQ_BIT, CR_ACPI_IRQ_EVENTS);
+ nvt_set_reg_bit(nvt, PME_INTR_CIR_PASS_BIT, CR_ACPI_IRQ_EVENTS2);
+
+ nvt_select_logical_dev(nvt, LOGICAL_DEV_CIR_WAKE);
+ nvt_cr_write(nvt, LOGICAL_DEV_ENABLE, CR_LOGICAL_DEV_EN);
+
+ nvt_efm_disable(nvt);
+
+ nvt_cir_wake_reg_write(nvt, CIR_WAKE_IRCON_MODE0 | CIR_WAKE_IRCON_RXEN |
+ CIR_WAKE_IRCON_R | CIR_WAKE_IRCON_RXINV |
+ CIR_WAKE_IRCON_SAMPLE_PERIOD_SEL,
+ CIR_WAKE_IRCON);
+ nvt_cir_wake_reg_write(nvt, 0xff, CIR_WAKE_IRSTS);
+ nvt_cir_wake_reg_write(nvt, 0, CIR_WAKE_IREN);
+}
+
+/* rx carrier detect only works in learning mode, must be called w/nvt_lock */
+static u32 nvt_rx_carrier_detect(struct nvt_dev *nvt)
+{
+ u32 count, carrier, duration = 0;
+ int i;
+
+ count = nvt_cir_reg_read(nvt, CIR_FCCL) |
+ nvt_cir_reg_read(nvt, CIR_FCCH) << 8;
+
+ for (i = 0; i < nvt->pkts; i++) {
+ if (nvt->buf[i] & BUF_PULSE_BIT)
+ duration += nvt->buf[i] & BUF_LEN_MASK;
+ }
+
+ duration *= SAMPLE_PERIOD;
+
+ if (!count || !duration) {
+ nvt_pr(KERN_NOTICE, "Unable to determine carrier! (c:%u, d:%u)",
+ count, duration);
+ return 0;
+ }
+
+ carrier = (count * 1000000) / duration;
+
+ if ((carrier > MAX_CARRIER) || (carrier < MIN_CARRIER))
+ nvt_dbg("WTF? Carrier frequency out of range!");
+
+ nvt_dbg("Carrier frequency: %u (count %u, duration %u)",
+ carrier, count, duration);
+
+ return carrier;
+}
+
+/*
+ * set carrier frequency
+ *
+ * set carrier on 2 registers: CP & CC
+ * always set CP as 0x81
+ * set CC by SPEC, CC = 3MHz/carrier - 1
+ */
+static int nvt_set_tx_carrier(void *data, u32 carrier)
+{
+ struct nvt_dev *nvt = data;
+ u16 val;
+
+ nvt_cir_reg_write(nvt, 1, CIR_CP);
+ val = 3000000 / (carrier) - 1;
+ nvt_cir_reg_write(nvt, val & 0xff, CIR_CC);
+
+ nvt_dbg("cp: 0x%x cc: 0x%x\n",
+ nvt_cir_reg_read(nvt, CIR_CP), nvt_cir_reg_read(nvt, CIR_CC));
+
+ return 0;
+}
+
+/*
+ * nvt_tx_ir
+ *
+ * 1) clean TX fifo first (handled by AP)
+ * 2) copy data from user space
+ * 3) disable RX interrupts, enable TX interrupts: TTR & TFU
+ * 4) send 9 packets to TX FIFO to open TTR
+ * in interrupt_handler:
+ * 5) send all data out
+ * go back to write():
+ * 6) disable TX interrupts, re-enable RX interupts
+ *
+ * The key problem of this function is user space data may larger than
+ * driver's data buf length. So nvt_tx_ir() will only copy TX_BUF_LEN data to
+ * buf, and keep current copied data buf num in cur_buf_num. But driver's buf
+ * number may larger than TXFCONT (0xff). So in interrupt_handler, it has to
+ * set TXFCONT as 0xff, until buf_count less than 0xff.
+ */
+static int nvt_tx_ir(void *priv, int *txbuf, u32 n)
+{
+ struct nvt_dev *nvt = priv;
+ unsigned long flags;
+ size_t cur_count;
+ unsigned int i;
+ u8 iren;
+ int ret;
+
+ spin_lock_irqsave(&nvt->tx.lock, flags);
+
+ if (n >= TX_BUF_LEN) {
+ nvt->tx.buf_count = cur_count = TX_BUF_LEN;
+ ret = TX_BUF_LEN;
+ } else {
+ nvt->tx.buf_count = cur_count = n;
+ ret = n;
+ }
+
+ memcpy(nvt->tx.buf, txbuf, nvt->tx.buf_count);
+
+ nvt->tx.cur_buf_num = 0;
+
+ /* save currently enabled interrupts */
+ iren = nvt_cir_reg_read(nvt, CIR_IREN);
+
+ /* now disable all interrupts, save TFU & TTR */
+ nvt_cir_reg_write(nvt, CIR_IREN_TFU | CIR_IREN_TTR, CIR_IREN);
+
+ nvt->tx.tx_state = ST_TX_REPLY;
+
+ nvt_cir_reg_write(nvt, CIR_FIFOCON_TX_TRIGGER_LEV_8 |
+ CIR_FIFOCON_RXFIFOCLR, CIR_FIFOCON);
+
+ /* trigger TTR interrupt by writing out ones, (yes, it's ugly) */
+ for (i = 0; i < 9; i++)
+ nvt_cir_reg_write(nvt, 0x01, CIR_STXFIFO);
+
+ spin_unlock_irqrestore(&nvt->tx.lock, flags);
+
+ wait_event(nvt->tx.queue, nvt->tx.tx_state == ST_TX_REQUEST);
+
+ spin_lock_irqsave(&nvt->tx.lock, flags);
+ nvt->tx.tx_state = ST_TX_NONE;
+ spin_unlock_irqrestore(&nvt->tx.lock, flags);
+
+ /* restore enabled interrupts to prior state */
+ nvt_cir_reg_write(nvt, iren, CIR_IREN);
+
+ return ret;
+}
+
+/* dump contents of the last rx buffer we got from the hw rx fifo */
+static void nvt_dump_rx_buf(struct nvt_dev *nvt)
+{
+ int i;
+
+ printk(KERN_DEBUG "%s (len %d): ", __func__, nvt->pkts);
+ for (i = 0; (i < nvt->pkts) && (i < RX_BUF_LEN); i++)
+ printk(KERN_CONT "0x%02x ", nvt->buf[i]);
+ printk(KERN_CONT "\n");
+}
+
+/*
+ * Process raw data in rx driver buffer, store it in raw IR event kfifo,
+ * trigger decode when appropriate.
+ *
+ * We get IR data samples one byte at a time. If the msb is set, its a pulse,
+ * otherwise its a space. The lower 7 bits are the count of SAMPLE_PERIOD
+ * (default 50us) intervals for that pulse/space. A discrete signal is
+ * followed by a series of 0x7f packets, then either 0x7<something> or 0x80
+ * to signal more IR coming (repeats) or end of IR, respectively. We store
+ * sample data in the raw event kfifo until we see 0x7<something> (except f)
+ * or 0x80, at which time, we trigger a decode operation.
+ */
+static void nvt_process_rx_ir_data(struct nvt_dev *nvt)
+{
+ DEFINE_IR_RAW_EVENT(rawir);
+ unsigned int count;
+ u32 carrier;
+ u8 sample;
+ int i;
+
+ nvt_dbg_verbose("%s firing", __func__);
+
+ if (debug)
+ nvt_dump_rx_buf(nvt);
+
+ if (nvt->carrier_detect_enabled)
+ carrier = nvt_rx_carrier_detect(nvt);
+
+ count = nvt->pkts;
+ nvt_dbg_verbose("Processing buffer of len %d", count);
+
+ for (i = 0; i < count; i++) {
+ nvt->pkts--;
+ sample = nvt->buf[i];
+
+ rawir.pulse = ((sample & BUF_PULSE_BIT) != 0);
+ rawir.duration = (sample & BUF_LEN_MASK)
+ * SAMPLE_PERIOD * 1000;
+
+ if ((sample & BUF_LEN_MASK) == BUF_LEN_MASK) {
+ if (nvt->rawir.pulse == rawir.pulse)
+ nvt->rawir.duration += rawir.duration;
+ else {
+ nvt->rawir.duration = rawir.duration;
+ nvt->rawir.pulse = rawir.pulse;
+ }
+ continue;
+ }
+
+ rawir.duration += nvt->rawir.duration;
+
+ init_ir_raw_event(&nvt->rawir);
+ nvt->rawir.duration = 0;
+ nvt->rawir.pulse = rawir.pulse;
+
+ if (sample == BUF_PULSE_BIT)
+ rawir.pulse = false;
+
+ if (rawir.duration) {
+ nvt_dbg("Storing %s with duration %d",
+ rawir.pulse ? "pulse" : "space",
+ rawir.duration);
+
+ ir_raw_event_store(nvt->rdev, &rawir);
+ }
+
+ /*
+ * BUF_PULSE_BIT indicates end of IR data, BUF_REPEAT_BYTE
+ * indicates end of IR signal, but new data incoming. In both
+ * cases, it means we're ready to call ir_raw_event_handle
+ */
+ if (sample == BUF_PULSE_BIT || ((sample != BUF_LEN_MASK) &&
+ (sample & BUF_REPEAT_MASK) == BUF_REPEAT_BYTE))
+ ir_raw_event_handle(nvt->rdev);
+ }
+
+ if (nvt->pkts) {
+ nvt_dbg("Odd, pkts should be 0 now... (its %u)", nvt->pkts);
+ nvt->pkts = 0;
+ }
+
+ nvt_dbg_verbose("%s done", __func__);
+}
+
+static void nvt_handle_rx_fifo_overrun(struct nvt_dev *nvt)
+{
+ nvt_pr(KERN_WARNING, "RX FIFO overrun detected, flushing data!");
+
+ nvt->pkts = 0;
+ nvt_clear_cir_fifo(nvt);
+ ir_raw_event_reset(nvt->rdev);
+}
+
+/* copy data from hardware rx fifo into driver buffer */
+static void nvt_get_rx_ir_data(struct nvt_dev *nvt)
+{
+ unsigned long flags;
+ u8 fifocount, val;
+ unsigned int b_idx;
+ bool overrun = false;
+ int i;
+
+ /* Get count of how many bytes to read from RX FIFO */
+ fifocount = nvt_cir_reg_read(nvt, CIR_RXFCONT);
+ /* if we get 0xff, probably means the logical dev is disabled */
+ if (fifocount == 0xff)
+ return;
+ /* watch out for a fifo overrun condition */
+ else if (fifocount > RX_BUF_LEN) {
+ overrun = true;
+ fifocount = RX_BUF_LEN;
+ }
+
+ nvt_dbg("attempting to fetch %u bytes from hw rx fifo", fifocount);
+
+ spin_lock_irqsave(&nvt->nvt_lock, flags);
+
+ b_idx = nvt->pkts;
+
+ /* This should never happen, but lets check anyway... */
+ if (b_idx + fifocount > RX_BUF_LEN) {
+ nvt_process_rx_ir_data(nvt);
+ b_idx = 0;
+ }
+
+ /* Read fifocount bytes from CIR Sample RX FIFO register */
+ for (i = 0; i < fifocount; i++) {
+ val = nvt_cir_reg_read(nvt, CIR_SRXFIFO);
+ nvt->buf[b_idx + i] = val;
+ }
+
+ nvt->pkts += fifocount;
+ nvt_dbg("%s: pkts now %d", __func__, nvt->pkts);
+
+ nvt_process_rx_ir_data(nvt);
+
+ if (overrun)
+ nvt_handle_rx_fifo_overrun(nvt);
+
+ spin_unlock_irqrestore(&nvt->nvt_lock, flags);
+}
+
+static void nvt_cir_log_irqs(u8 status, u8 iren)
+{
+ nvt_pr(KERN_INFO, "IRQ 0x%02x (IREN 0x%02x) :%s%s%s%s%s%s%s%s%s",
+ status, iren,
+ status & CIR_IRSTS_RDR ? " RDR" : "",
+ status & CIR_IRSTS_RTR ? " RTR" : "",
+ status & CIR_IRSTS_PE ? " PE" : "",
+ status & CIR_IRSTS_RFO ? " RFO" : "",
+ status & CIR_IRSTS_TE ? " TE" : "",
+ status & CIR_IRSTS_TTR ? " TTR" : "",
+ status & CIR_IRSTS_TFU ? " TFU" : "",
+ status & CIR_IRSTS_GH ? " GH" : "",
+ status & ~(CIR_IRSTS_RDR | CIR_IRSTS_RTR | CIR_IRSTS_PE |
+ CIR_IRSTS_RFO | CIR_IRSTS_TE | CIR_IRSTS_TTR |
+ CIR_IRSTS_TFU | CIR_IRSTS_GH) ? " ?" : "");
+}
+
+static bool nvt_cir_tx_inactive(struct nvt_dev *nvt)
+{
+ unsigned long flags;
+ bool tx_inactive;
+ u8 tx_state;
+
+ spin_lock_irqsave(&nvt->tx.lock, flags);
+ tx_state = nvt->tx.tx_state;
+ spin_unlock_irqrestore(&nvt->tx.lock, flags);
+
+ tx_inactive = (tx_state == ST_TX_NONE);
+
+ return tx_inactive;
+}
+
+/* interrupt service routine for incoming and outgoing CIR data */
+static irqreturn_t nvt_cir_isr(int irq, void *data)
+{
+ struct nvt_dev *nvt = data;
+ u8 status, iren, cur_state;
+ unsigned long flags;
+
+ nvt_dbg_verbose("%s firing", __func__);
+
+ nvt_efm_enable(nvt);
+ nvt_select_logical_dev(nvt, LOGICAL_DEV_CIR);
+ nvt_efm_disable(nvt);
+
+ /*
+ * Get IR Status register contents. Write 1 to ack/clear
+ *
+ * bit: reg name - description
+ * 7: CIR_IRSTS_RDR - RX Data Ready
+ * 6: CIR_IRSTS_RTR - RX FIFO Trigger Level Reach
+ * 5: CIR_IRSTS_PE - Packet End
+ * 4: CIR_IRSTS_RFO - RX FIFO Overrun (RDR will also be set)
+ * 3: CIR_IRSTS_TE - TX FIFO Empty
+ * 2: CIR_IRSTS_TTR - TX FIFO Trigger Level Reach
+ * 1: CIR_IRSTS_TFU - TX FIFO Underrun
+ * 0: CIR_IRSTS_GH - Min Length Detected
+ */
+ status = nvt_cir_reg_read(nvt, CIR_IRSTS);
+ if (!status) {
+ nvt_dbg_verbose("%s exiting, IRSTS 0x0", __func__);
+ nvt_cir_reg_write(nvt, 0xff, CIR_IRSTS);
+ return IRQ_RETVAL(IRQ_NONE);
+ }
+
+ /* ack/clear all irq flags we've got */
+ nvt_cir_reg_write(nvt, status, CIR_IRSTS);
+ nvt_cir_reg_write(nvt, 0, CIR_IRSTS);
+
+ /* Interrupt may be shared with CIR Wake, bail if CIR not enabled */
+ iren = nvt_cir_reg_read(nvt, CIR_IREN);
+ if (!iren) {
+ nvt_dbg_verbose("%s exiting, CIR not enabled", __func__);
+ return IRQ_RETVAL(IRQ_NONE);
+ }
+
+ if (debug)
+ nvt_cir_log_irqs(status, iren);
+
+ if (status & CIR_IRSTS_RTR) {
+ /* FIXME: add code for study/learn mode */
+ /* We only do rx if not tx'ing */
+ if (nvt_cir_tx_inactive(nvt))
+ nvt_get_rx_ir_data(nvt);
+ }
+
+ if (status & CIR_IRSTS_PE) {
+ if (nvt_cir_tx_inactive(nvt))
+ nvt_get_rx_ir_data(nvt);
+
+ spin_lock_irqsave(&nvt->nvt_lock, flags);
+
+ cur_state = nvt->study_state;
+
+ spin_unlock_irqrestore(&nvt->nvt_lock, flags);
+
+ if (cur_state == ST_STUDY_NONE)
+ nvt_clear_cir_fifo(nvt);
+ }
+
+ if (status & CIR_IRSTS_TE)
+ nvt_clear_tx_fifo(nvt);
+
+ if (status & CIR_IRSTS_TTR) {
+ unsigned int pos, count;
+ u8 tmp;
+
+ spin_lock_irqsave(&nvt->tx.lock, flags);
+
+ pos = nvt->tx.cur_buf_num;
+ count = nvt->tx.buf_count;
+
+ /* Write data into the hardware tx fifo while pos < count */
+ if (pos < count) {
+ nvt_cir_reg_write(nvt, nvt->tx.buf[pos], CIR_STXFIFO);
+ nvt->tx.cur_buf_num++;
+ /* Disable TX FIFO Trigger Level Reach (TTR) interrupt */
+ } else {
+ tmp = nvt_cir_reg_read(nvt, CIR_IREN);
+ nvt_cir_reg_write(nvt, tmp & ~CIR_IREN_TTR, CIR_IREN);
+ }
+
+ spin_unlock_irqrestore(&nvt->tx.lock, flags);
+
+ }
+
+ if (status & CIR_IRSTS_TFU) {
+ spin_lock_irqsave(&nvt->tx.lock, flags);
+ if (nvt->tx.tx_state == ST_TX_REPLY) {
+ nvt->tx.tx_state = ST_TX_REQUEST;
+ wake_up(&nvt->tx.queue);
+ }
+ spin_unlock_irqrestore(&nvt->tx.lock, flags);
+ }
+
+ nvt_dbg_verbose("%s done", __func__);
+ return IRQ_RETVAL(IRQ_HANDLED);
+}
+
+/* Interrupt service routine for CIR Wake */
+static irqreturn_t nvt_cir_wake_isr(int irq, void *data)
+{
+ u8 status, iren, val;
+ struct nvt_dev *nvt = data;
+ unsigned long flags;
+
+ nvt_dbg_wake("%s firing", __func__);
+
+ status = nvt_cir_wake_reg_read(nvt, CIR_WAKE_IRSTS);
+ if (!status)
+ return IRQ_RETVAL(IRQ_NONE);
+
+ if (status & CIR_WAKE_IRSTS_IR_PENDING)
+ nvt_clear_cir_wake_fifo(nvt);
+
+ nvt_cir_wake_reg_write(nvt, status, CIR_WAKE_IRSTS);
+ nvt_cir_wake_reg_write(nvt, 0, CIR_WAKE_IRSTS);
+
+ /* Interrupt may be shared with CIR, bail if Wake not enabled */
+ iren = nvt_cir_wake_reg_read(nvt, CIR_WAKE_IREN);
+ if (!iren) {
+ nvt_dbg_wake("%s exiting, wake not enabled", __func__);
+ return IRQ_RETVAL(IRQ_HANDLED);
+ }
+
+ if ((status & CIR_WAKE_IRSTS_PE) &&
+ (nvt->wake_state == ST_WAKE_START)) {
+ while (nvt_cir_wake_reg_read(nvt, CIR_WAKE_RD_FIFO_ONLY_IDX)) {
+ val = nvt_cir_wake_reg_read(nvt, CIR_WAKE_RD_FIFO_ONLY);
+ nvt_dbg("setting wake up key: 0x%x", val);
+ }
+
+ nvt_cir_wake_reg_write(nvt, 0, CIR_WAKE_IREN);
+ spin_lock_irqsave(&nvt->nvt_lock, flags);
+ nvt->wake_state = ST_WAKE_FINISH;
+ spin_unlock_irqrestore(&nvt->nvt_lock, flags);
+ }
+
+ nvt_dbg_wake("%s done", __func__);
+ return IRQ_RETVAL(IRQ_HANDLED);
+}
+
+static void nvt_enable_cir(struct nvt_dev *nvt)
+{
+ /* set function enable flags */
+ nvt_cir_reg_write(nvt, CIR_IRCON_TXEN | CIR_IRCON_RXEN |
+ CIR_IRCON_RXINV | CIR_IRCON_SAMPLE_PERIOD_SEL,
+ CIR_IRCON);
+
+ nvt_efm_enable(nvt);
+
+ /* enable the CIR logical device */
+ nvt_select_logical_dev(nvt, LOGICAL_DEV_CIR);
+ nvt_cr_write(nvt, LOGICAL_DEV_ENABLE, CR_LOGICAL_DEV_EN);
+
+ nvt_efm_disable(nvt);
+
+ /* clear all pending interrupts */
+ nvt_cir_reg_write(nvt, 0xff, CIR_IRSTS);
+
+ /* enable interrupts */
+ nvt_set_cir_iren(nvt);
+}
+
+static void nvt_disable_cir(struct nvt_dev *nvt)
+{
+ /* disable CIR interrupts */
+ nvt_cir_reg_write(nvt, 0, CIR_IREN);
+
+ /* clear any and all pending interrupts */
+ nvt_cir_reg_write(nvt, 0xff, CIR_IRSTS);
+
+ /* clear all function enable flags */
+ nvt_cir_reg_write(nvt, 0, CIR_IRCON);
+
+ /* clear hardware rx and tx fifos */
+ nvt_clear_cir_fifo(nvt);
+ nvt_clear_tx_fifo(nvt);
+
+ nvt_efm_enable(nvt);
+
+ /* disable the CIR logical device */
+ nvt_select_logical_dev(nvt, LOGICAL_DEV_CIR);
+ nvt_cr_write(nvt, LOGICAL_DEV_DISABLE, CR_LOGICAL_DEV_EN);
+
+ nvt_efm_disable(nvt);
+}
+
+static int nvt_open(void *data)
+{
+ struct nvt_dev *nvt = (struct nvt_dev *)data;
+ unsigned long flags;
+
+ spin_lock_irqsave(&nvt->nvt_lock, flags);
+ nvt->in_use = true;
+ nvt_enable_cir(nvt);
+ spin_unlock_irqrestore(&nvt->nvt_lock, flags);
+
+ return 0;
+}
+
+static void nvt_close(void *data)
+{
+ struct nvt_dev *nvt = (struct nvt_dev *)data;
+ unsigned long flags;
+
+ spin_lock_irqsave(&nvt->nvt_lock, flags);
+ nvt->in_use = false;
+ nvt_disable_cir(nvt);
+ spin_unlock_irqrestore(&nvt->nvt_lock, flags);
+}
+
+/* Allocate memory, probe hardware, and initialize everything */
+static int nvt_probe(struct pnp_dev *pdev, const struct pnp_device_id *dev_id)
+{
+ struct nvt_dev *nvt = NULL;
+ struct input_dev *rdev = NULL;
+ struct ir_dev_props *props = NULL;
+ int ret = -ENOMEM;
+
+ nvt = kzalloc(sizeof(struct nvt_dev), GFP_KERNEL);
+ if (!nvt)
+ return ret;
+
+ props = kzalloc(sizeof(struct ir_dev_props), GFP_KERNEL);
+ if (!props)
+ goto failure;
+
+ /* input device for IR remote (and tx) */
+ rdev = input_allocate_device();
+ if (!rdev)
+ goto failure;
+
+ ret = -ENODEV;
+ /* validate pnp resources */
+ if (!pnp_port_valid(pdev, 0) ||
+ pnp_port_len(pdev, 0) < CIR_IOREG_LENGTH) {
+ dev_err(&pdev->dev, "IR PNP Port not valid!\n");
+ goto failure;
+ }
+
+ if (!pnp_irq_valid(pdev, 0)) {
+ dev_err(&pdev->dev, "PNP IRQ not valid!\n");
+ goto failure;
+ }
+
+ if (!pnp_port_valid(pdev, 1) ||
+ pnp_port_len(pdev, 1) < CIR_IOREG_LENGTH) {
+ dev_err(&pdev->dev, "Wake PNP Port not valid!\n");
+ goto failure;
+ }
+
+ nvt->cir_addr = pnp_port_start(pdev, 0);
+ nvt->cir_irq = pnp_irq(pdev, 0);
+
+ nvt->cir_wake_addr = pnp_port_start(pdev, 1);
+ /* irq is always shared between cir and cir wake */
+ nvt->cir_wake_irq = nvt->cir_irq;
+
+ nvt->cr_efir = CR_EFIR;
+ nvt->cr_efdr = CR_EFDR;
+
+ spin_lock_init(&nvt->nvt_lock);
+ spin_lock_init(&nvt->tx.lock);
+ init_ir_raw_event(&nvt->rawir);
+
+ ret = -EBUSY;
+ /* now claim resources */
+ if (!request_region(nvt->cir_addr,
+ CIR_IOREG_LENGTH, NVT_DRIVER_NAME))
+ goto failure;
+
+ if (request_irq(nvt->cir_irq, nvt_cir_isr, IRQF_SHARED,
+ NVT_DRIVER_NAME, (void *)nvt))
+ goto failure;
+
+ if (!request_region(nvt->cir_wake_addr,
+ CIR_IOREG_LENGTH, NVT_DRIVER_NAME))
+ goto failure;
+
+ if (request_irq(nvt->cir_wake_irq, nvt_cir_wake_isr, IRQF_SHARED,
+ NVT_DRIVER_NAME, (void *)nvt))
+ goto failure;
+
+ pnp_set_drvdata(pdev, nvt);
+ nvt->pdev = pdev;
+
+ init_waitqueue_head(&nvt->tx.queue);
+
+ ret = nvt_hw_detect(nvt);
+ if (ret)
+ goto failure;
+
+ /* Initialize CIR & CIR Wake Logical Devices */
+ nvt_efm_enable(nvt);
+ nvt_cir_ldev_init(nvt);
+ nvt_cir_wake_ldev_init(nvt);
+ nvt_efm_disable(nvt);
+
+ /* Initialize CIR & CIR Wake Config Registers */
+ nvt_cir_regs_init(nvt);
+ nvt_cir_wake_regs_init(nvt);
+
+ /* Set up ir-core props */
+ props->priv = nvt;
+ props->driver_type = RC_DRIVER_IR_RAW;
+ props->allowed_protos = IR_TYPE_ALL;
+ props->open = nvt_open;
+ props->close = nvt_close;
+#if 0
+ props->min_timeout = XYZ;
+ props->max_timeout = XYZ;
+ props->timeout = XYZ;
+ /* rx resolution is hardwired to 50us atm, 1, 25, 100 also possible */
+ props->rx_resolution = XYZ;
+
+ /* tx bits */
+ props->tx_resolution = XYZ;
+#endif
+ props->tx_ir = nvt_tx_ir;
+ props->s_tx_carrier = nvt_set_tx_carrier;
+
+ rdev->name = "Nuvoton w836x7hg Infrared Remote Transceiver";
+ rdev->id.bustype = BUS_HOST;
+ rdev->id.vendor = PCI_VENDOR_ID_WINBOND2;
+ rdev->id.product = nvt->chip_major;
+ rdev->id.version = nvt->chip_minor;
+
+ nvt->props = props;
+ nvt->rdev = rdev;
+
+ device_set_wakeup_capable(&pdev->dev, 1);
+ device_set_wakeup_enable(&pdev->dev, 1);
+
+ ret = ir_input_register(rdev, RC_MAP_RC6_MCE, props, NVT_DRIVER_NAME);
+ if (ret)
+ goto failure;
+
+ nvt_pr(KERN_NOTICE, "driver has been successfully loaded\n");
+ if (debug) {
+ cir_dump_regs(nvt);
+ cir_wake_dump_regs(nvt);
+ }
+
+ return 0;
+
+failure:
+ if (nvt->cir_irq)
+ free_irq(nvt->cir_irq, nvt);
+ if (nvt->cir_addr)
+ release_region(nvt->cir_addr, CIR_IOREG_LENGTH);
+
+ if (nvt->cir_wake_irq)
+ free_irq(nvt->cir_wake_irq, nvt);
+ if (nvt->cir_wake_addr)
+ release_region(nvt->cir_wake_addr, CIR_IOREG_LENGTH);
+
+ input_free_device(rdev);
+ kfree(props);
+ kfree(nvt);
+
+ return ret;
+}
+
+static void __devexit nvt_remove(struct pnp_dev *pdev)
+{
+ struct nvt_dev *nvt = pnp_get_drvdata(pdev);
+ unsigned long flags;
+
+ spin_lock_irqsave(&nvt->nvt_lock, flags);
+ /* disable CIR */
+ nvt_cir_reg_write(nvt, 0, CIR_IREN);
+ nvt_disable_cir(nvt);
+ /* enable CIR Wake (for IR power-on) */
+ nvt_enable_wake(nvt);
+ spin_unlock_irqrestore(&nvt->nvt_lock, flags);
+
+ /* free resources */
+ free_irq(nvt->cir_irq, nvt);
+ free_irq(nvt->cir_wake_irq, nvt);
+ release_region(nvt->cir_addr, CIR_IOREG_LENGTH);
+ release_region(nvt->cir_wake_addr, CIR_IOREG_LENGTH);
+
+ ir_input_unregister(nvt->rdev);
+
+ kfree(nvt->props);
+ kfree(nvt);
+}
+
+static int nvt_suspend(struct pnp_dev *pdev, pm_message_t state)
+{
+ struct nvt_dev *nvt = pnp_get_drvdata(pdev);
+ unsigned long flags;
+
+ nvt_dbg("%s called", __func__);
+
+ /* zero out misc state tracking */
+ spin_lock_irqsave(&nvt->nvt_lock, flags);
+ nvt->study_state = ST_STUDY_NONE;
+ nvt->wake_state = ST_WAKE_NONE;
+ spin_unlock_irqrestore(&nvt->nvt_lock, flags);
+
+ spin_lock_irqsave(&nvt->tx.lock, flags);
+ nvt->tx.tx_state = ST_TX_NONE;
+ spin_unlock_irqrestore(&nvt->tx.lock, flags);
+
+ /* disable all CIR interrupts */
+ nvt_cir_reg_write(nvt, 0, CIR_IREN);
+
+ nvt_efm_enable(nvt);
+
+ /* disable cir logical dev */
+ nvt_select_logical_dev(nvt, LOGICAL_DEV_CIR);
+ nvt_cr_write(nvt, LOGICAL_DEV_DISABLE, CR_LOGICAL_DEV_EN);
+
+ nvt_efm_disable(nvt);
+
+ /* make sure wake is enabled */
+ nvt_enable_wake(nvt);
+
+ return 0;
+}
+
+static int nvt_resume(struct pnp_dev *pdev)
+{
+ int ret = 0;
+ struct nvt_dev *nvt = pnp_get_drvdata(pdev);
+
+ nvt_dbg("%s called", __func__);
+
+ /* open interrupt */
+ nvt_set_cir_iren(nvt);
+
+ /* Enable CIR logical device */
+ nvt_efm_enable(nvt);
+ nvt_select_logical_dev(nvt, LOGICAL_DEV_CIR);
+ nvt_cr_write(nvt, LOGICAL_DEV_ENABLE, CR_LOGICAL_DEV_EN);
+
+ nvt_efm_disable(nvt);
+
+ nvt_cir_regs_init(nvt);
+ nvt_cir_wake_regs_init(nvt);
+
+ return ret;
+}
+
+static void nvt_shutdown(struct pnp_dev *pdev)
+{
+ struct nvt_dev *nvt = pnp_get_drvdata(pdev);
+ nvt_enable_wake(nvt);
+}
+
+static const struct pnp_device_id nvt_ids[] = {
+ { "WEC0530", 0 }, /* CIR */
+ { "NTN0530", 0 }, /* CIR for new chip's pnp id*/
+ { "", 0 },
+};
+
+static struct pnp_driver nvt_driver = {
+ .name = NVT_DRIVER_NAME,
+ .id_table = nvt_ids,
+ .flags = PNP_DRIVER_RES_DO_NOT_CHANGE,
+ .probe = nvt_probe,
+ .remove = __devexit_p(nvt_remove),
+ .suspend = nvt_suspend,
+ .resume = nvt_resume,
+ .shutdown = nvt_shutdown,
+};
+
+int nvt_init(void)
+{
+ return pnp_register_driver(&nvt_driver);
+}
+
+void nvt_exit(void)
+{
+ pnp_unregister_driver(&nvt_driver);
+}
+
+module_param(debug, int, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(debug, "Enable debugging output");
+
+MODULE_DEVICE_TABLE(pnp, nvt_ids);
+MODULE_DESCRIPTION("Nuvoton W83667HG-A & W83677HG-I CIR driver");
+
+MODULE_AUTHOR("Jarod Wilson <jarod@redhat.com>");
+MODULE_LICENSE("GPL");
+
+module_init(nvt_init);
+module_exit(nvt_exit);
diff --git a/drivers/media/IR/nuvoton-cir.h b/drivers/media/IR/nuvoton-cir.h
new file mode 100644
index 000000000000..62dc53017c8e
--- /dev/null
+++ b/drivers/media/IR/nuvoton-cir.h
@@ -0,0 +1,408 @@
+/*
+ * Driver for Nuvoton Technology Corporation w83667hg/w83677hg-i CIR
+ *
+ * Copyright (C) 2010 Jarod Wilson <jarod@redhat.com>
+ * Copyright (C) 2009 Nuvoton PS Team
+ *
+ * Special thanks to Nuvoton for providing hardware, spec sheets and
+ * sample code upon which portions of this driver are based. Indirect
+ * thanks also to Maxim Levitsky, whose ene_ir driver this driver is
+ * modeled after.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
+ * USA
+ */
+
+#include <linux/spinlock.h>
+#include <linux/ioctl.h>
+
+/* platform driver name to register */
+#define NVT_DRIVER_NAME "nuvoton-cir"
+
+/* debugging module parameter */
+static int debug;
+
+
+#define nvt_pr(level, text, ...) \
+ printk(level KBUILD_MODNAME ": " text, ## __VA_ARGS__)
+
+#define nvt_dbg(text, ...) \
+ if (debug) \
+ printk(KERN_DEBUG \
+ KBUILD_MODNAME ": " text "\n" , ## __VA_ARGS__)
+
+#define nvt_dbg_verbose(text, ...) \
+ if (debug > 1) \
+ printk(KERN_DEBUG \
+ KBUILD_MODNAME ": " text "\n" , ## __VA_ARGS__)
+
+#define nvt_dbg_wake(text, ...) \
+ if (debug > 2) \
+ printk(KERN_DEBUG \
+ KBUILD_MODNAME ": " text "\n" , ## __VA_ARGS__)
+
+
+/*
+ * Original lirc driver said min value of 76, and recommended value of 256
+ * for the buffer length, but then used 2048. Never mind that the size of the
+ * RX FIFO is 32 bytes... So I'm using 32 for RX and 256 for TX atm, but I'm
+ * not sure if maybe that TX value is off by a factor of 8 (bits vs. bytes),
+ * and I don't have TX-capable hardware to test/debug on...
+ */
+#define TX_BUF_LEN 256
+#define RX_BUF_LEN 32
+
+struct nvt_dev {
+ struct pnp_dev *pdev;
+ struct input_dev *rdev;
+ struct ir_dev_props *props;
+ struct ir_raw_event rawir;
+
+ spinlock_t nvt_lock;
+ bool in_use;
+
+ /* for rx */
+ u8 buf[RX_BUF_LEN];
+ unsigned int pkts;
+
+ struct {
+ spinlock_t lock;
+ u8 buf[TX_BUF_LEN];
+ unsigned int buf_count;
+ unsigned int cur_buf_num;
+ wait_queue_head_t queue;
+ u8 tx_state;
+ } tx;
+
+ /* EFER Config register index/data pair */
+ u8 cr_efir;
+ u8 cr_efdr;
+
+ /* hardware I/O settings */
+ unsigned long cir_addr;
+ unsigned long cir_wake_addr;
+ int cir_irq;
+ int cir_wake_irq;
+
+ /* hardware id */
+ u8 chip_major;
+ u8 chip_minor;
+
+ /* hardware features */
+ bool hw_learning_capable;
+ bool hw_tx_capable;
+
+ /* rx settings */
+ bool learning_enabled;
+ bool carrier_detect_enabled;
+
+ /* track cir wake state */
+ u8 wake_state;
+ /* for study */
+ u8 study_state;
+ /* carrier period = 1 / frequency */
+ u32 carrier;
+};
+
+/* study states */
+#define ST_STUDY_NONE 0x0
+#define ST_STUDY_START 0x1
+#define ST_STUDY_CARRIER 0x2
+#define ST_STUDY_ALL_RECV 0x4
+
+/* wake states */
+#define ST_WAKE_NONE 0x0
+#define ST_WAKE_START 0x1
+#define ST_WAKE_FINISH 0x2
+
+/* receive states */
+#define ST_RX_WAIT_7F 0x1
+#define ST_RX_WAIT_HEAD 0x2
+#define ST_RX_WAIT_SILENT_END 0x4
+
+/* send states */
+#define ST_TX_NONE 0x0
+#define ST_TX_REQUEST 0x2
+#define ST_TX_REPLY 0x4
+
+/* buffer packet constants */
+#define BUF_PULSE_BIT 0x80
+#define BUF_LEN_MASK 0x7f
+#define BUF_REPEAT_BYTE 0x70
+#define BUF_REPEAT_MASK 0xf0
+
+/* CIR settings */
+
+/* total length of CIR and CIR WAKE */
+#define CIR_IOREG_LENGTH 0x0f
+
+/* RX limit length, 8 high bits for SLCH, 8 low bits for SLCL (0x7d0 = 2000) */
+#define CIR_RX_LIMIT_COUNT 0x7d0
+
+/* CIR Regs */
+#define CIR_IRCON 0x00
+#define CIR_IRSTS 0x01
+#define CIR_IREN 0x02
+#define CIR_RXFCONT 0x03
+#define CIR_CP 0x04
+#define CIR_CC 0x05
+#define CIR_SLCH 0x06
+#define CIR_SLCL 0x07
+#define CIR_FIFOCON 0x08
+#define CIR_IRFIFOSTS 0x09
+#define CIR_SRXFIFO 0x0a
+#define CIR_TXFCONT 0x0b
+#define CIR_STXFIFO 0x0c
+#define CIR_FCCH 0x0d
+#define CIR_FCCL 0x0e
+#define CIR_IRFSM 0x0f
+
+/* CIR IRCON settings */
+#define CIR_IRCON_RECV 0x80
+#define CIR_IRCON_WIREN 0x40
+#define CIR_IRCON_TXEN 0x20
+#define CIR_IRCON_RXEN 0x10
+#define CIR_IRCON_WRXINV 0x08
+#define CIR_IRCON_RXINV 0x04
+
+#define CIR_IRCON_SAMPLE_PERIOD_SEL_1 0x00
+#define CIR_IRCON_SAMPLE_PERIOD_SEL_25 0x01
+#define CIR_IRCON_SAMPLE_PERIOD_SEL_50 0x02
+#define CIR_IRCON_SAMPLE_PERIOD_SEL_100 0x03
+
+/* FIXME: make this a runtime option */
+/* select sample period as 50us */
+#define CIR_IRCON_SAMPLE_PERIOD_SEL CIR_IRCON_SAMPLE_PERIOD_SEL_50
+
+/* CIR IRSTS settings */
+#define CIR_IRSTS_RDR 0x80
+#define CIR_IRSTS_RTR 0x40
+#define CIR_IRSTS_PE 0x20
+#define CIR_IRSTS_RFO 0x10
+#define CIR_IRSTS_TE 0x08
+#define CIR_IRSTS_TTR 0x04
+#define CIR_IRSTS_TFU 0x02
+#define CIR_IRSTS_GH 0x01
+
+/* CIR IREN settings */
+#define CIR_IREN_RDR 0x80
+#define CIR_IREN_RTR 0x40
+#define CIR_IREN_PE 0x20
+#define CIR_IREN_RFO 0x10
+#define CIR_IREN_TE 0x08
+#define CIR_IREN_TTR 0x04
+#define CIR_IREN_TFU 0x02
+#define CIR_IREN_GH 0x01
+
+/* CIR FIFOCON settings */
+#define CIR_FIFOCON_TXFIFOCLR 0x80
+
+#define CIR_FIFOCON_TX_TRIGGER_LEV_31 0x00
+#define CIR_FIFOCON_TX_TRIGGER_LEV_24 0x10
+#define CIR_FIFOCON_TX_TRIGGER_LEV_16 0x20
+#define CIR_FIFOCON_TX_TRIGGER_LEV_8 0x30
+
+/* FIXME: make this a runtime option */
+/* select TX trigger level as 16 */
+#define CIR_FIFOCON_TX_TRIGGER_LEV CIR_FIFOCON_TX_TRIGGER_LEV_16
+
+#define CIR_FIFOCON_RXFIFOCLR 0x08
+
+#define CIR_FIFOCON_RX_TRIGGER_LEV_1 0x00
+#define CIR_FIFOCON_RX_TRIGGER_LEV_8 0x01
+#define CIR_FIFOCON_RX_TRIGGER_LEV_16 0x02
+#define CIR_FIFOCON_RX_TRIGGER_LEV_24 0x03
+
+/* FIXME: make this a runtime option */
+/* select RX trigger level as 24 */
+#define CIR_FIFOCON_RX_TRIGGER_LEV CIR_FIFOCON_RX_TRIGGER_LEV_24
+
+/* CIR IRFIFOSTS settings */
+#define CIR_IRFIFOSTS_IR_PENDING 0x80
+#define CIR_IRFIFOSTS_RX_GS 0x40
+#define CIR_IRFIFOSTS_RX_FTA 0x20
+#define CIR_IRFIFOSTS_RX_EMPTY 0x10
+#define CIR_IRFIFOSTS_RX_FULL 0x08
+#define CIR_IRFIFOSTS_TX_FTA 0x04
+#define CIR_IRFIFOSTS_TX_EMPTY 0x02
+#define CIR_IRFIFOSTS_TX_FULL 0x01
+
+
+/* CIR WAKE UP Regs */
+#define CIR_WAKE_IRCON 0x00
+#define CIR_WAKE_IRSTS 0x01
+#define CIR_WAKE_IREN 0x02
+#define CIR_WAKE_FIFO_CMP_DEEP 0x03
+#define CIR_WAKE_FIFO_CMP_TOL 0x04
+#define CIR_WAKE_FIFO_COUNT 0x05
+#define CIR_WAKE_SLCH 0x06
+#define CIR_WAKE_SLCL 0x07
+#define CIR_WAKE_FIFOCON 0x08
+#define CIR_WAKE_SRXFSTS 0x09
+#define CIR_WAKE_SAMPLE_RX_FIFO 0x0a
+#define CIR_WAKE_WR_FIFO_DATA 0x0b
+#define CIR_WAKE_RD_FIFO_ONLY 0x0c
+#define CIR_WAKE_RD_FIFO_ONLY_IDX 0x0d
+#define CIR_WAKE_FIFO_IGNORE 0x0e
+#define CIR_WAKE_IRFSM 0x0f
+
+/* CIR WAKE UP IRCON settings */
+#define CIR_WAKE_IRCON_DEC_RST 0x80
+#define CIR_WAKE_IRCON_MODE1 0x40
+#define CIR_WAKE_IRCON_MODE0 0x20
+#define CIR_WAKE_IRCON_RXEN 0x10
+#define CIR_WAKE_IRCON_R 0x08
+#define CIR_WAKE_IRCON_RXINV 0x04
+
+/* FIXME/jarod: make this a runtime option */
+/* select a same sample period like cir register */
+#define CIR_WAKE_IRCON_SAMPLE_PERIOD_SEL CIR_IRCON_SAMPLE_PERIOD_SEL_50
+
+/* CIR WAKE IRSTS Bits */
+#define CIR_WAKE_IRSTS_RDR 0x80
+#define CIR_WAKE_IRSTS_RTR 0x40
+#define CIR_WAKE_IRSTS_PE 0x20
+#define CIR_WAKE_IRSTS_RFO 0x10
+#define CIR_WAKE_IRSTS_GH 0x08
+#define CIR_WAKE_IRSTS_IR_PENDING 0x01
+
+/* CIR WAKE UP IREN Bits */
+#define CIR_WAKE_IREN_RDR 0x80
+#define CIR_WAKE_IREN_RTR 0x40
+#define CIR_WAKE_IREN_PE 0x20
+#define CIR_WAKE_IREN_RFO 0x10
+#define CIR_WAKE_IREN_TE 0x08
+#define CIR_WAKE_IREN_TTR 0x04
+#define CIR_WAKE_IREN_TFU 0x02
+#define CIR_WAKE_IREN_GH 0x01
+
+/* CIR WAKE FIFOCON settings */
+#define CIR_WAKE_FIFOCON_RXFIFOCLR 0x08
+
+#define CIR_WAKE_FIFOCON_RX_TRIGGER_LEV_67 0x00
+#define CIR_WAKE_FIFOCON_RX_TRIGGER_LEV_66 0x01
+#define CIR_WAKE_FIFOCON_RX_TRIGGER_LEV_65 0x02
+#define CIR_WAKE_FIFOCON_RX_TRIGGER_LEV_64 0x03
+
+/* FIXME: make this a runtime option */
+/* select WAKE UP RX trigger level as 67 */
+#define CIR_WAKE_FIFOCON_RX_TRIGGER_LEV CIR_WAKE_FIFOCON_RX_TRIGGER_LEV_67
+
+/* CIR WAKE SRXFSTS settings */
+#define CIR_WAKE_IRFIFOSTS_RX_GS 0x80
+#define CIR_WAKE_IRFIFOSTS_RX_FTA 0x40
+#define CIR_WAKE_IRFIFOSTS_RX_EMPTY 0x20
+#define CIR_WAKE_IRFIFOSTS_RX_FULL 0x10
+
+/* CIR Wake FIFO buffer is 67 bytes long */
+#define CIR_WAKE_FIFO_LEN 67
+/* CIR Wake byte comparison tolerance */
+#define CIR_WAKE_CMP_TOLERANCE 5
+
+/*
+ * Extended Function Enable Registers:
+ * Extended Function Index Register
+ * Extended Function Data Register
+ */
+#define CR_EFIR 0x2e
+#define CR_EFDR 0x2f
+
+/* Possible alternate EFER values, depends on how the chip is wired */
+#define CR_EFIR2 0x4e
+#define CR_EFDR2 0x4f
+
+/* Extended Function Mode enable/disable magic values */
+#define EFER_EFM_ENABLE 0x87
+#define EFER_EFM_DISABLE 0xaa
+
+/* Chip IDs found in CR_CHIP_ID_{HI,LO} */
+#define CHIP_ID_HIGH 0xb4
+#define CHIP_ID_LOW 0x72
+#define CHIP_ID_LOW2 0x73
+
+/* Config regs we need to care about */
+#define CR_SOFTWARE_RESET 0x02
+#define CR_LOGICAL_DEV_SEL 0x07
+#define CR_CHIP_ID_HI 0x20
+#define CR_CHIP_ID_LO 0x21
+#define CR_DEV_POWER_DOWN 0x22 /* bit 2 is CIR power, default power on */
+#define CR_OUTPUT_PIN_SEL 0x27
+#define CR_LOGICAL_DEV_EN 0x30 /* valid for all logical devices */
+/* next three regs valid for both the CIR and CIR_WAKE logical devices */
+#define CR_CIR_BASE_ADDR_HI 0x60
+#define CR_CIR_BASE_ADDR_LO 0x61
+#define CR_CIR_IRQ_RSRC 0x70
+/* next three regs valid only for ACPI logical dev */
+#define CR_ACPI_CIR_WAKE 0xe0
+#define CR_ACPI_IRQ_EVENTS 0xf6
+#define CR_ACPI_IRQ_EVENTS2 0xf7
+
+/* Logical devices that we need to care about */
+#define LOGICAL_DEV_LPT 0x01
+#define LOGICAL_DEV_CIR 0x06
+#define LOGICAL_DEV_ACPI 0x0a
+#define LOGICAL_DEV_CIR_WAKE 0x0e
+
+#define LOGICAL_DEV_DISABLE 0x00
+#define LOGICAL_DEV_ENABLE 0x01
+
+#define CIR_WAKE_ENABLE_BIT 0x08
+#define CIR_INTR_MOUSE_IRQ_BIT 0x80
+#define PME_INTR_CIR_PASS_BIT 0x08
+
+#define OUTPUT_PIN_SEL_MASK 0xbc
+#define OUTPUT_ENABLE_CIR 0x01 /* Pin95=CIRRX, Pin96=CIRTX1 */
+#define OUTPUT_ENABLE_CIRWB 0x40 /* enable wide-band sensor */
+
+/* MCE CIR signal length, related on sample period */
+
+/* MCE CIR controller signal length: about 43ms
+ * 43ms / 50us (sample period) * 0.85 (inaccuracy)
+ */
+#define CONTROLLER_BUF_LEN_MIN 830
+
+/* MCE CIR keyboard signal length: about 26ms
+ * 26ms / 50us (sample period) * 0.85 (inaccuracy)
+ */
+#define KEYBOARD_BUF_LEN_MAX 650
+#define KEYBOARD_BUF_LEN_MIN 610
+
+/* MCE CIR mouse signal length: about 24ms
+ * 24ms / 50us (sample period) * 0.85 (inaccuracy)
+ */
+#define MOUSE_BUF_LEN_MIN 565
+
+#define CIR_SAMPLE_PERIOD 50
+#define CIR_SAMPLE_LOW_INACCURACY 0.85
+
+/* MAX silence time that driver will sent to lirc */
+#define MAX_SILENCE_TIME 60000
+
+#if CIR_IRCON_SAMPLE_PERIOD_SEL == CIR_IRCON_SAMPLE_PERIOD_SEL_100
+#define SAMPLE_PERIOD 100
+
+#elif CIR_IRCON_SAMPLE_PERIOD_SEL == CIR_IRCON_SAMPLE_PERIOD_SEL_50
+#define SAMPLE_PERIOD 50
+
+#elif CIR_IRCON_SAMPLE_PERIOD_SEL == CIR_IRCON_SAMPLE_PERIOD_SEL_25
+#define SAMPLE_PERIOD 25
+
+#else
+#define SAMPLE_PERIOD 1
+#endif
+
+/* as VISTA MCE definition, valid carrier value */
+#define MAX_CARRIER 60000
+#define MIN_CARRIER 30000
diff --git a/drivers/media/IR/streamzap.c b/drivers/media/IR/streamzap.c
index 058e29fd478c..548381c35bfd 100644
--- a/drivers/media/IR/streamzap.c
+++ b/drivers/media/IR/streamzap.c
@@ -38,7 +38,7 @@
#include <linux/input.h>
#include <media/ir-core.h>
-#define DRIVER_VERSION "1.60"
+#define DRIVER_VERSION "1.61"
#define DRIVER_NAME "streamzap"
#define DRIVER_DESC "Streamzap Remote Control driver"
@@ -61,14 +61,21 @@ static struct usb_device_id streamzap_table[] = {
MODULE_DEVICE_TABLE(usb, streamzap_table);
-#define STREAMZAP_PULSE_MASK 0xf0
-#define STREAMZAP_SPACE_MASK 0x0f
-#define STREAMZAP_TIMEOUT 0xff
-#define STREAMZAP_RESOLUTION 256
+#define SZ_PULSE_MASK 0xf0
+#define SZ_SPACE_MASK 0x0f
+#define SZ_TIMEOUT 0xff
+#define SZ_RESOLUTION 256
/* number of samples buffered */
#define SZ_BUF_LEN 128
+/* from ir-rc5-sz-decoder.c */
+#ifdef CONFIG_IR_RC5_SZ_DECODER_MODULE
+#define load_rc5_sz_decode() request_module("ir-rc5-sz-decoder")
+#else
+#define load_rc5_sz_decode() 0
+#endif
+
enum StreamzapDecoderState {
PulseSpace,
FullPulse,
@@ -81,7 +88,6 @@ struct streamzap_ir {
/* ir-core */
struct ir_dev_props *props;
- struct ir_raw_event rawir;
/* core device info */
struct device *dev;
@@ -98,17 +104,6 @@ struct streamzap_ir {
dma_addr_t dma_in;
unsigned int buf_in_len;
- /* timer used to support delay buffering */
- struct timer_list delay_timer;
- bool timer_running;
- spinlock_t timer_lock;
- struct timer_list flush_timer;
- bool flush;
-
- /* delay buffer */
- struct kfifo fifo;
- bool fifo_initialized;
-
/* track what state we're in */
enum StreamzapDecoderState decoder_state;
/* tracks whether we are currently receiving some signal */
@@ -118,7 +113,7 @@ struct streamzap_ir {
/* start time of signal; necessary for gap tracking */
struct timeval signal_last;
struct timeval signal_start;
- /* bool timeout_enabled; */
+ bool timeout_enabled;
char name[128];
char phys[64];
@@ -143,122 +138,16 @@ static struct usb_driver streamzap_driver = {
.id_table = streamzap_table,
};
-static void streamzap_stop_timer(struct streamzap_ir *sz)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&sz->timer_lock, flags);
- if (sz->timer_running) {
- sz->timer_running = false;
- spin_unlock_irqrestore(&sz->timer_lock, flags);
- del_timer_sync(&sz->delay_timer);
- } else {
- spin_unlock_irqrestore(&sz->timer_lock, flags);
- }
-}
-
-static void streamzap_flush_timeout(unsigned long arg)
-{
- struct streamzap_ir *sz = (struct streamzap_ir *)arg;
-
- dev_info(sz->dev, "%s: callback firing\n", __func__);
-
- /* finally start accepting data */
- sz->flush = false;
-}
-
-static void streamzap_delay_timeout(unsigned long arg)
-{
- struct streamzap_ir *sz = (struct streamzap_ir *)arg;
- struct ir_raw_event rawir = { .pulse = false, .duration = 0 };
- unsigned long flags;
- int len, ret;
- static unsigned long delay;
- bool wake = false;
-
- /* deliver data every 10 ms */
- delay = msecs_to_jiffies(10);
-
- spin_lock_irqsave(&sz->timer_lock, flags);
-
- if (kfifo_len(&sz->fifo) > 0) {
- ret = kfifo_out(&sz->fifo, &rawir, sizeof(rawir));
- if (ret != sizeof(rawir))
- dev_err(sz->dev, "Problem w/kfifo_out...\n");
- ir_raw_event_store(sz->idev, &rawir);
- wake = true;
- }
-
- len = kfifo_len(&sz->fifo);
- if (len > 0) {
- while ((len < SZ_BUF_LEN / 2) &&
- (len < SZ_BUF_LEN * sizeof(int))) {
- ret = kfifo_out(&sz->fifo, &rawir, sizeof(rawir));
- if (ret != sizeof(rawir))
- dev_err(sz->dev, "Problem w/kfifo_out...\n");
- ir_raw_event_store(sz->idev, &rawir);
- wake = true;
- len = kfifo_len(&sz->fifo);
- }
- if (sz->timer_running)
- mod_timer(&sz->delay_timer, jiffies + delay);
-
- } else {
- sz->timer_running = false;
- }
-
- if (wake)
- ir_raw_event_handle(sz->idev);
-
- spin_unlock_irqrestore(&sz->timer_lock, flags);
-}
-
-static void streamzap_flush_delay_buffer(struct streamzap_ir *sz)
+static void sz_push(struct streamzap_ir *sz, struct ir_raw_event rawir)
{
- struct ir_raw_event rawir = { .pulse = false, .duration = 0 };
- bool wake = false;
- int ret;
-
- while (kfifo_len(&sz->fifo) > 0) {
- ret = kfifo_out(&sz->fifo, &rawir, sizeof(rawir));
- if (ret != sizeof(rawir))
- dev_err(sz->dev, "Problem w/kfifo_out...\n");
- ir_raw_event_store(sz->idev, &rawir);
- wake = true;
- }
-
- if (wake)
- ir_raw_event_handle(sz->idev);
-}
-
-static void sz_push(struct streamzap_ir *sz)
-{
- struct ir_raw_event rawir = { .pulse = false, .duration = 0 };
- unsigned long flags;
- int ret;
-
- spin_lock_irqsave(&sz->timer_lock, flags);
- if (kfifo_len(&sz->fifo) >= sizeof(int) * SZ_BUF_LEN) {
- ret = kfifo_out(&sz->fifo, &rawir, sizeof(rawir));
- if (ret != sizeof(rawir))
- dev_err(sz->dev, "Problem w/kfifo_out...\n");
- ir_raw_event_store(sz->idev, &rawir);
- }
-
- kfifo_in(&sz->fifo, &sz->rawir, sizeof(rawir));
-
- if (!sz->timer_running) {
- sz->delay_timer.expires = jiffies + (HZ / 10);
- add_timer(&sz->delay_timer);
- sz->timer_running = true;
- }
-
- spin_unlock_irqrestore(&sz->timer_lock, flags);
+ ir_raw_event_store(sz->idev, &rawir);
}
static void sz_push_full_pulse(struct streamzap_ir *sz,
unsigned char value)
{
+ DEFINE_IR_RAW_EVENT(rawir);
+
if (sz->idle) {
long deltv;
@@ -266,57 +155,59 @@ static void sz_push_full_pulse(struct streamzap_ir *sz,
do_gettimeofday(&sz->signal_start);
deltv = sz->signal_start.tv_sec - sz->signal_last.tv_sec;
- sz->rawir.pulse = false;
+ rawir.pulse = false;
if (deltv > 15) {
/* really long time */
- sz->rawir.duration = IR_MAX_DURATION;
+ rawir.duration = IR_MAX_DURATION;
} else {
- sz->rawir.duration = (int)(deltv * 1000000 +
+ rawir.duration = (int)(deltv * 1000000 +
sz->signal_start.tv_usec -
sz->signal_last.tv_usec);
- sz->rawir.duration -= sz->sum;
- sz->rawir.duration *= 1000;
- sz->rawir.duration &= IR_MAX_DURATION;
+ rawir.duration -= sz->sum;
+ rawir.duration *= 1000;
+ rawir.duration &= IR_MAX_DURATION;
}
- dev_dbg(sz->dev, "ls %u\n", sz->rawir.duration);
- sz_push(sz);
+ dev_dbg(sz->dev, "ls %u\n", rawir.duration);
+ sz_push(sz, rawir);
- sz->idle = 0;
+ sz->idle = false;
sz->sum = 0;
}
- sz->rawir.pulse = true;
- sz->rawir.duration = ((int) value) * STREAMZAP_RESOLUTION;
- sz->rawir.duration += STREAMZAP_RESOLUTION / 2;
- sz->sum += sz->rawir.duration;
- sz->rawir.duration *= 1000;
- sz->rawir.duration &= IR_MAX_DURATION;
- dev_dbg(sz->dev, "p %u\n", sz->rawir.duration);
- sz_push(sz);
+ rawir.pulse = true;
+ rawir.duration = ((int) value) * SZ_RESOLUTION;
+ rawir.duration += SZ_RESOLUTION / 2;
+ sz->sum += rawir.duration;
+ rawir.duration *= 1000;
+ rawir.duration &= IR_MAX_DURATION;
+ dev_dbg(sz->dev, "p %u\n", rawir.duration);
+ sz_push(sz, rawir);
}
static void sz_push_half_pulse(struct streamzap_ir *sz,
unsigned char value)
{
- sz_push_full_pulse(sz, (value & STREAMZAP_PULSE_MASK) >> 4);
+ sz_push_full_pulse(sz, (value & SZ_PULSE_MASK) >> 4);
}
static void sz_push_full_space(struct streamzap_ir *sz,
unsigned char value)
{
- sz->rawir.pulse = false;
- sz->rawir.duration = ((int) value) * STREAMZAP_RESOLUTION;
- sz->rawir.duration += STREAMZAP_RESOLUTION / 2;
- sz->sum += sz->rawir.duration;
- sz->rawir.duration *= 1000;
- dev_dbg(sz->dev, "s %u\n", sz->rawir.duration);
- sz_push(sz);
+ DEFINE_IR_RAW_EVENT(rawir);
+
+ rawir.pulse = false;
+ rawir.duration = ((int) value) * SZ_RESOLUTION;
+ rawir.duration += SZ_RESOLUTION / 2;
+ sz->sum += rawir.duration;
+ rawir.duration *= 1000;
+ dev_dbg(sz->dev, "s %u\n", rawir.duration);
+ sz_push(sz, rawir);
}
static void sz_push_half_space(struct streamzap_ir *sz,
unsigned long value)
{
- sz_push_full_space(sz, value & STREAMZAP_SPACE_MASK);
+ sz_push_full_space(sz, value & SZ_SPACE_MASK);
}
/**
@@ -330,10 +221,8 @@ static void streamzap_callback(struct urb *urb)
struct streamzap_ir *sz;
unsigned int i;
int len;
- #if 0
- static int timeout = (((STREAMZAP_TIMEOUT * STREAMZAP_RESOLUTION) &
+ static int timeout = (((SZ_TIMEOUT * SZ_RESOLUTION * 1000) &
IR_MAX_DURATION) | 0x03000000);
- #endif
if (!urb)
return;
@@ -356,57 +245,53 @@ static void streamzap_callback(struct urb *urb)
}
dev_dbg(sz->dev, "%s: received urb, len %d\n", __func__, len);
- if (!sz->flush) {
- for (i = 0; i < urb->actual_length; i++) {
- dev_dbg(sz->dev, "%d: %x\n", i,
- (unsigned char)sz->buf_in[i]);
- switch (sz->decoder_state) {
- case PulseSpace:
- if ((sz->buf_in[i] & STREAMZAP_PULSE_MASK) ==
- STREAMZAP_PULSE_MASK) {
- sz->decoder_state = FullPulse;
- continue;
- } else if ((sz->buf_in[i] & STREAMZAP_SPACE_MASK)
- == STREAMZAP_SPACE_MASK) {
- sz_push_half_pulse(sz, sz->buf_in[i]);
- sz->decoder_state = FullSpace;
- continue;
- } else {
- sz_push_half_pulse(sz, sz->buf_in[i]);
- sz_push_half_space(sz, sz->buf_in[i]);
- }
- break;
- case FullPulse:
- sz_push_full_pulse(sz, sz->buf_in[i]);
- sz->decoder_state = IgnorePulse;
- break;
- case FullSpace:
- if (sz->buf_in[i] == STREAMZAP_TIMEOUT) {
- sz->idle = 1;
- streamzap_stop_timer(sz);
- #if 0
- if (sz->timeout_enabled) {
- sz->rawir.pulse = false;
- sz->rawir.duration = timeout;
- sz->rawir.duration *= 1000;
- sz_push(sz);
- }
- #endif
- streamzap_flush_delay_buffer(sz);
- } else
- sz_push_full_space(sz, sz->buf_in[i]);
- sz->decoder_state = PulseSpace;
- break;
- case IgnorePulse:
- if ((sz->buf_in[i]&STREAMZAP_SPACE_MASK) ==
- STREAMZAP_SPACE_MASK) {
- sz->decoder_state = FullSpace;
- continue;
- }
+ for (i = 0; i < len; i++) {
+ dev_dbg(sz->dev, "sz idx %d: %x\n",
+ i, (unsigned char)sz->buf_in[i]);
+ switch (sz->decoder_state) {
+ case PulseSpace:
+ if ((sz->buf_in[i] & SZ_PULSE_MASK) ==
+ SZ_PULSE_MASK) {
+ sz->decoder_state = FullPulse;
+ continue;
+ } else if ((sz->buf_in[i] & SZ_SPACE_MASK)
+ == SZ_SPACE_MASK) {
+ sz_push_half_pulse(sz, sz->buf_in[i]);
+ sz->decoder_state = FullSpace;
+ continue;
+ } else {
+ sz_push_half_pulse(sz, sz->buf_in[i]);
sz_push_half_space(sz, sz->buf_in[i]);
- sz->decoder_state = PulseSpace;
- break;
}
+ break;
+ case FullPulse:
+ sz_push_full_pulse(sz, sz->buf_in[i]);
+ sz->decoder_state = IgnorePulse;
+ break;
+ case FullSpace:
+ if (sz->buf_in[i] == SZ_TIMEOUT) {
+ DEFINE_IR_RAW_EVENT(rawir);
+
+ rawir.pulse = false;
+ rawir.duration = timeout;
+ sz->idle = true;
+ if (sz->timeout_enabled)
+ sz_push(sz, rawir);
+ ir_raw_event_handle(sz->idev);
+ } else {
+ sz_push_full_space(sz, sz->buf_in[i]);
+ }
+ sz->decoder_state = PulseSpace;
+ break;
+ case IgnorePulse:
+ if ((sz->buf_in[i] & SZ_SPACE_MASK) ==
+ SZ_SPACE_MASK) {
+ sz->decoder_state = FullSpace;
+ continue;
+ }
+ sz_push_half_space(sz, sz->buf_in[i]);
+ sz->decoder_state = PulseSpace;
+ break;
}
}
@@ -446,12 +331,11 @@ static struct input_dev *streamzap_init_input_dev(struct streamzap_ir *sz)
props->priv = sz;
props->driver_type = RC_DRIVER_IR_RAW;
- /* FIXME: not sure about supported protocols, check on this */
- props->allowed_protos = IR_TYPE_RC5 | IR_TYPE_RC6;
+ props->allowed_protos = IR_TYPE_ALL;
sz->props = props;
- ret = ir_input_register(idev, RC_MAP_RC5_STREAMZAP, props, DRIVER_NAME);
+ ret = ir_input_register(idev, RC_MAP_STREAMZAP, props, DRIVER_NAME);
if (ret < 0) {
dev_err(dev, "remote input device register failed\n");
goto irdev_failed;
@@ -467,29 +351,6 @@ idev_alloc_failed:
return NULL;
}
-static int streamzap_delay_buf_init(struct streamzap_ir *sz)
-{
- int ret;
-
- ret = kfifo_alloc(&sz->fifo, sizeof(int) * SZ_BUF_LEN,
- GFP_KERNEL);
- if (ret == 0)
- sz->fifo_initialized = 1;
-
- return ret;
-}
-
-static void streamzap_start_flush_timer(struct streamzap_ir *sz)
-{
- sz->flush_timer.expires = jiffies + HZ;
- sz->flush = true;
- add_timer(&sz->flush_timer);
-
- sz->urb_in->dev = sz->usbdev;
- if (usb_submit_urb(sz->urb_in, GFP_ATOMIC))
- dev_err(sz->dev, "urb submit failed\n");
-}
-
/**
* streamzap_probe
*
@@ -575,35 +436,21 @@ static int __devinit streamzap_probe(struct usb_interface *intf,
snprintf(name + strlen(name), sizeof(name) - strlen(name),
" %s", buf);
- retval = streamzap_delay_buf_init(sz);
- if (retval) {
- dev_err(&intf->dev, "%s: delay buffer init failed\n", __func__);
- goto free_urb_in;
- }
-
sz->idev = streamzap_init_input_dev(sz);
if (!sz->idev)
goto input_dev_fail;
sz->idle = true;
sz->decoder_state = PulseSpace;
+ /* FIXME: don't yet have a way to set this */
+ sz->timeout_enabled = true;
#if 0
/* not yet supported, depends on patches from maxim */
/* see also: LIRC_GET_REC_RESOLUTION and LIRC_SET_REC_TIMEOUT */
- sz->timeout_enabled = false;
- sz->min_timeout = STREAMZAP_TIMEOUT * STREAMZAP_RESOLUTION * 1000;
- sz->max_timeout = STREAMZAP_TIMEOUT * STREAMZAP_RESOLUTION * 1000;
+ sz->min_timeout = SZ_TIMEOUT * SZ_RESOLUTION * 1000;
+ sz->max_timeout = SZ_TIMEOUT * SZ_RESOLUTION * 1000;
#endif
- init_timer(&sz->delay_timer);
- sz->delay_timer.function = streamzap_delay_timeout;
- sz->delay_timer.data = (unsigned long)sz;
- spin_lock_init(&sz->timer_lock);
-
- init_timer(&sz->flush_timer);
- sz->flush_timer.function = streamzap_flush_timeout;
- sz->flush_timer.data = (unsigned long)sz;
-
do_gettimeofday(&sz->signal_start);
/* Complete final initialisations */
@@ -615,16 +462,18 @@ static int __devinit streamzap_probe(struct usb_interface *intf,
usb_set_intfdata(intf, sz);
- streamzap_start_flush_timer(sz);
+ if (usb_submit_urb(sz->urb_in, GFP_ATOMIC))
+ dev_err(sz->dev, "urb submit failed\n");
dev_info(sz->dev, "Registered %s on usb%d:%d\n", name,
usbdev->bus->busnum, usbdev->devnum);
+ /* Load the streamzap not-quite-rc5 decoder too */
+ load_rc5_sz_decode();
+
return 0;
input_dev_fail:
- kfifo_free(&sz->fifo);
-free_urb_in:
usb_free_urb(sz->urb_in);
free_buf_in:
usb_free_coherent(usbdev, maxp, sz->buf_in, sz->dma_in);
@@ -654,13 +503,6 @@ static void streamzap_disconnect(struct usb_interface *interface)
if (!sz)
return;
- if (sz->flush) {
- sz->flush = false;
- del_timer_sync(&sz->flush_timer);
- }
-
- streamzap_stop_timer(sz);
-
sz->usbdev = NULL;
ir_input_unregister(sz->idev);
usb_kill_urb(sz->urb_in);
@@ -674,13 +516,6 @@ static int streamzap_suspend(struct usb_interface *intf, pm_message_t message)
{
struct streamzap_ir *sz = usb_get_intfdata(intf);
- if (sz->flush) {
- sz->flush = false;
- del_timer_sync(&sz->flush_timer);
- }
-
- streamzap_stop_timer(sz);
-
usb_kill_urb(sz->urb_in);
return 0;
@@ -690,13 +525,6 @@ static int streamzap_resume(struct usb_interface *intf)
{
struct streamzap_ir *sz = usb_get_intfdata(intf);
- if (sz->fifo_initialized)
- kfifo_reset(&sz->fifo);
-
- sz->flush_timer.expires = jiffies + HZ;
- sz->flush = true;
- add_timer(&sz->flush_timer);
-
if (usb_submit_urb(sz->urb_in, GFP_ATOMIC)) {
dev_err(sz->dev, "Error sumbiting urb\n");
return -EIO;
diff --git a/drivers/media/Kconfig b/drivers/media/Kconfig
index bad2cedb8d96..a28541b2b1a2 100644
--- a/drivers/media/Kconfig
+++ b/drivers/media/Kconfig
@@ -19,7 +19,6 @@ comment "Multimedia core support"
config VIDEO_DEV
tristate "Video For Linux"
- depends on BKL # used in many drivers for ioctl handling, need to kill
---help---
V4L core support for video capture and overlay devices, webcams and
AM/FM radio cards.
diff --git a/drivers/media/common/saa7146_fops.c b/drivers/media/common/saa7146_fops.c
index 4da2a54cb8bd..e3fedc60fe77 100644
--- a/drivers/media/common/saa7146_fops.c
+++ b/drivers/media/common/saa7146_fops.c
@@ -56,7 +56,7 @@ void saa7146_dma_free(struct saa7146_dev *dev,struct videobuf_queue *q,
BUG_ON(in_interrupt());
- videobuf_waiton(&buf->vb,0,0);
+ videobuf_waiton(q, &buf->vb, 0, 0);
videobuf_dma_unmap(q->dev, dma);
videobuf_dma_free(dma);
buf->vb.state = VIDEOBUF_NEEDS_INIT;
diff --git a/drivers/media/common/saa7146_i2c.c b/drivers/media/common/saa7146_i2c.c
index 48cb154c7a46..74ee172b5bc9 100644
--- a/drivers/media/common/saa7146_i2c.c
+++ b/drivers/media/common/saa7146_i2c.c
@@ -391,7 +391,6 @@ static int saa7146_i2c_xfer(struct i2c_adapter* adapter, struct i2c_msg *msg, in
/*****************************************************************************/
/* i2c-adapter helper functions */
-#include <linux/i2c-id.h>
/* exported algorithm data */
static struct i2c_algorithm saa7146_algo = {
@@ -414,7 +413,6 @@ int saa7146_i2c_adapter_prepare(struct saa7146_dev *dev, struct i2c_adapter *i2c
i2c_adapter->dev.parent = &dev->pci->dev;
i2c_adapter->algo = &saa7146_algo;
i2c_adapter->algo_data = NULL;
- i2c_adapter->id = I2C_HW_SAA7146;
i2c_adapter->timeout = SAA7146_I2C_TIMEOUT;
i2c_adapter->retries = SAA7146_I2C_RETRIES;
}
diff --git a/drivers/media/common/saa7146_vbi.c b/drivers/media/common/saa7146_vbi.c
index 8224c301d050..2d4533ab22b7 100644
--- a/drivers/media/common/saa7146_vbi.c
+++ b/drivers/media/common/saa7146_vbi.c
@@ -412,7 +412,7 @@ static int vbi_open(struct saa7146_dev *dev, struct file *file)
V4L2_BUF_TYPE_VBI_CAPTURE,
V4L2_FIELD_SEQ_TB, // FIXME: does this really work?
sizeof(struct saa7146_buf),
- file);
+ file, NULL);
init_timer(&fh->vbi_read_timeout);
fh->vbi_read_timeout.function = vbi_read_timeout;
diff --git a/drivers/media/common/saa7146_video.c b/drivers/media/common/saa7146_video.c
index a212a91a30f0..741c5732b430 100644
--- a/drivers/media/common/saa7146_video.c
+++ b/drivers/media/common/saa7146_video.c
@@ -1386,7 +1386,7 @@ static int video_open(struct saa7146_dev *dev, struct file *file)
V4L2_BUF_TYPE_VIDEO_CAPTURE,
V4L2_FIELD_INTERLACED,
sizeof(struct saa7146_buf),
- file);
+ file, NULL);
return 0;
}
diff --git a/drivers/media/common/tuners/Kconfig b/drivers/media/common/tuners/Kconfig
index b3ed5daaacf2..78b089526e02 100644
--- a/drivers/media/common/tuners/Kconfig
+++ b/drivers/media/common/tuners/Kconfig
@@ -31,7 +31,7 @@ config MEDIA_TUNER
select MEDIA_TUNER_TDA9887 if !MEDIA_TUNER_CUSTOMISE
select MEDIA_TUNER_MC44S803 if !MEDIA_TUNER_CUSTOMISE
-menuconfig MEDIA_TUNER_CUSTOMISE
+config MEDIA_TUNER_CUSTOMISE
bool "Customize analog and hybrid tuner modules to build"
depends on MEDIA_TUNER
default y if EMBEDDED
@@ -44,7 +44,8 @@ menuconfig MEDIA_TUNER_CUSTOMISE
If unsure say N.
-if MEDIA_TUNER_CUSTOMISE
+menu "Customize TV tuners"
+ visible if MEDIA_TUNER_CUSTOMISE
config MEDIA_TUNER_SIMPLE
tristate "Simple tuner support"
@@ -179,4 +180,10 @@ config MEDIA_TUNER_MAX2165
help
A driver for the silicon tuner MAX2165 from Maxim.
-endif # MEDIA_TUNER_CUSTOMISE
+config MEDIA_TUNER_TDA18218
+ tristate "NXP TDA18218 silicon tuner"
+ depends on VIDEO_MEDIA && I2C
+ default m if MEDIA_TUNER_CUSTOMISE
+ help
+ NXP TDA18218 silicon tuner driver.
+endmenu
diff --git a/drivers/media/common/tuners/Makefile b/drivers/media/common/tuners/Makefile
index a5438523f30d..96da03d349ca 100644
--- a/drivers/media/common/tuners/Makefile
+++ b/drivers/media/common/tuners/Makefile
@@ -24,6 +24,7 @@ obj-$(CONFIG_MEDIA_TUNER_MXL5005S) += mxl5005s.o
obj-$(CONFIG_MEDIA_TUNER_MXL5007T) += mxl5007t.o
obj-$(CONFIG_MEDIA_TUNER_MC44S803) += mc44s803.o
obj-$(CONFIG_MEDIA_TUNER_MAX2165) += max2165.o
+obj-$(CONFIG_MEDIA_TUNER_TDA18218) += tda18218.o
EXTRA_CFLAGS += -Idrivers/media/dvb/dvb-core
EXTRA_CFLAGS += -Idrivers/media/dvb/frontends
diff --git a/drivers/media/common/tuners/tda18218.c b/drivers/media/common/tuners/tda18218.c
new file mode 100644
index 000000000000..8da1fdeddaa7
--- /dev/null
+++ b/drivers/media/common/tuners/tda18218.c
@@ -0,0 +1,334 @@
+/*
+ * NXP TDA18218HN silicon tuner driver
+ *
+ * Copyright (C) 2010 Antti Palosaari <crope@iki.fi>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include "tda18218.h"
+#include "tda18218_priv.h"
+
+static int debug;
+module_param(debug, int, 0644);
+MODULE_PARM_DESC(debug, "Turn on/off debugging (default:off).");
+
+/* write multiple registers */
+static int tda18218_wr_regs(struct tda18218_priv *priv, u8 reg, u8 *val, u8 len)
+{
+ int ret;
+ u8 buf[1+len], quotient, remainder, i, msg_len, msg_len_max;
+ struct i2c_msg msg[1] = {
+ {
+ .addr = priv->cfg->i2c_address,
+ .flags = 0,
+ .buf = buf,
+ }
+ };
+
+ msg_len_max = priv->cfg->i2c_wr_max - 1;
+ quotient = len / msg_len_max;
+ remainder = len % msg_len_max;
+ msg_len = msg_len_max;
+ for (i = 0; (i <= quotient && remainder); i++) {
+ if (i == quotient) /* set len of the last msg */
+ msg_len = remainder;
+
+ msg[0].len = msg_len + 1;
+ buf[0] = reg + i * msg_len_max;
+ memcpy(&buf[1], &val[i * msg_len_max], msg_len);
+
+ ret = i2c_transfer(priv->i2c, msg, 1);
+ if (ret != 1)
+ break;
+ }
+
+ if (ret == 1) {
+ ret = 0;
+ } else {
+ warn("i2c wr failed ret:%d reg:%02x len:%d", ret, reg, len);
+ ret = -EREMOTEIO;
+ }
+
+ return ret;
+}
+
+/* read multiple registers */
+static int tda18218_rd_regs(struct tda18218_priv *priv, u8 reg, u8 *val, u8 len)
+{
+ int ret;
+ u8 buf[reg+len]; /* we must start read always from reg 0x00 */
+ struct i2c_msg msg[2] = {
+ {
+ .addr = priv->cfg->i2c_address,
+ .flags = 0,
+ .len = 1,
+ .buf = "\x00",
+ }, {
+ .addr = priv->cfg->i2c_address,
+ .flags = I2C_M_RD,
+ .len = sizeof(buf),
+ .buf = buf,
+ }
+ };
+
+ ret = i2c_transfer(priv->i2c, msg, 2);
+ if (ret == 2) {
+ memcpy(val, &buf[reg], len);
+ ret = 0;
+ } else {
+ warn("i2c rd failed ret:%d reg:%02x len:%d", ret, reg, len);
+ ret = -EREMOTEIO;
+ }
+
+ return ret;
+}
+
+/* write single register */
+static int tda18218_wr_reg(struct tda18218_priv *priv, u8 reg, u8 val)
+{
+ return tda18218_wr_regs(priv, reg, &val, 1);
+}
+
+/* read single register */
+
+static int tda18218_rd_reg(struct tda18218_priv *priv, u8 reg, u8 *val)
+{
+ return tda18218_rd_regs(priv, reg, val, 1);
+}
+
+static int tda18218_set_params(struct dvb_frontend *fe,
+ struct dvb_frontend_parameters *params)
+{
+ struct tda18218_priv *priv = fe->tuner_priv;
+ int ret;
+ u8 buf[3], i, BP_Filter, LP_Fc;
+ u32 LO_Frac;
+ /* TODO: find out correct AGC algorithm */
+ u8 agc[][2] = {
+ { R20_AGC11, 0x60 },
+ { R23_AGC21, 0x02 },
+ { R20_AGC11, 0xa0 },
+ { R23_AGC21, 0x09 },
+ { R20_AGC11, 0xe0 },
+ { R23_AGC21, 0x0c },
+ { R20_AGC11, 0x40 },
+ { R23_AGC21, 0x01 },
+ { R20_AGC11, 0x80 },
+ { R23_AGC21, 0x08 },
+ { R20_AGC11, 0xc0 },
+ { R23_AGC21, 0x0b },
+ { R24_AGC22, 0x1c },
+ { R24_AGC22, 0x0c },
+ };
+
+ if (fe->ops.i2c_gate_ctrl)
+ fe->ops.i2c_gate_ctrl(fe, 1); /* open I2C-gate */
+
+ /* low-pass filter cut-off frequency */
+ switch (params->u.ofdm.bandwidth) {
+ case BANDWIDTH_6_MHZ:
+ LP_Fc = 0;
+ LO_Frac = params->frequency + 4000000;
+ break;
+ case BANDWIDTH_7_MHZ:
+ LP_Fc = 1;
+ LO_Frac = params->frequency + 3500000;
+ break;
+ case BANDWIDTH_8_MHZ:
+ default:
+ LP_Fc = 2;
+ LO_Frac = params->frequency + 4000000;
+ break;
+ }
+
+ /* band-pass filter */
+ if (LO_Frac < 188000000)
+ BP_Filter = 3;
+ else if (LO_Frac < 253000000)
+ BP_Filter = 4;
+ else if (LO_Frac < 343000000)
+ BP_Filter = 5;
+ else
+ BP_Filter = 6;
+
+ buf[0] = (priv->regs[R1A_IF1] & ~7) | BP_Filter; /* BP_Filter */
+ buf[1] = (priv->regs[R1B_IF2] & ~3) | LP_Fc; /* LP_Fc */
+ buf[2] = priv->regs[R1C_AGC2B];
+ ret = tda18218_wr_regs(priv, R1A_IF1, buf, 3);
+ if (ret)
+ goto error;
+
+ buf[0] = (LO_Frac / 1000) >> 12; /* LO_Frac_0 */
+ buf[1] = (LO_Frac / 1000) >> 4; /* LO_Frac_1 */
+ buf[2] = (LO_Frac / 1000) << 4 |
+ (priv->regs[R0C_MD5] & 0x0f); /* LO_Frac_2 */
+ ret = tda18218_wr_regs(priv, R0A_MD3, buf, 3);
+ if (ret)
+ goto error;
+
+ buf[0] = priv->regs[R0F_MD8] | (1 << 6); /* Freq_prog_Start */
+ ret = tda18218_wr_regs(priv, R0F_MD8, buf, 1);
+ if (ret)
+ goto error;
+
+ buf[0] = priv->regs[R0F_MD8] & ~(1 << 6); /* Freq_prog_Start */
+ ret = tda18218_wr_regs(priv, R0F_MD8, buf, 1);
+ if (ret)
+ goto error;
+
+ /* trigger AGC */
+ for (i = 0; i < ARRAY_SIZE(agc); i++) {
+ ret = tda18218_wr_reg(priv, agc[i][0], agc[i][1]);
+ if (ret)
+ goto error;
+ }
+
+error:
+ if (fe->ops.i2c_gate_ctrl)
+ fe->ops.i2c_gate_ctrl(fe, 0); /* close I2C-gate */
+
+ if (ret)
+ dbg("%s: failed ret:%d", __func__, ret);
+
+ return ret;
+}
+
+static int tda18218_sleep(struct dvb_frontend *fe)
+{
+ struct tda18218_priv *priv = fe->tuner_priv;
+ int ret;
+
+ if (fe->ops.i2c_gate_ctrl)
+ fe->ops.i2c_gate_ctrl(fe, 1); /* open I2C-gate */
+
+ /* standby */
+ ret = tda18218_wr_reg(priv, R17_PD1, priv->regs[R17_PD1] | (1 << 0));
+
+ if (fe->ops.i2c_gate_ctrl)
+ fe->ops.i2c_gate_ctrl(fe, 0); /* close I2C-gate */
+
+ if (ret)
+ dbg("%s: failed ret:%d", __func__, ret);
+
+ return ret;
+}
+
+static int tda18218_init(struct dvb_frontend *fe)
+{
+ struct tda18218_priv *priv = fe->tuner_priv;
+ int ret;
+
+ /* TODO: calibrations */
+
+ if (fe->ops.i2c_gate_ctrl)
+ fe->ops.i2c_gate_ctrl(fe, 1); /* open I2C-gate */
+
+ ret = tda18218_wr_regs(priv, R00_ID, priv->regs, TDA18218_NUM_REGS);
+
+ if (fe->ops.i2c_gate_ctrl)
+ fe->ops.i2c_gate_ctrl(fe, 0); /* close I2C-gate */
+
+ if (ret)
+ dbg("%s: failed ret:%d", __func__, ret);
+
+ return ret;
+}
+
+static int tda18218_release(struct dvb_frontend *fe)
+{
+ kfree(fe->tuner_priv);
+ fe->tuner_priv = NULL;
+ return 0;
+}
+
+static const struct dvb_tuner_ops tda18218_tuner_ops = {
+ .info = {
+ .name = "NXP TDA18218",
+
+ .frequency_min = 174000000,
+ .frequency_max = 864000000,
+ .frequency_step = 1000,
+ },
+
+ .release = tda18218_release,
+ .init = tda18218_init,
+ .sleep = tda18218_sleep,
+
+ .set_params = tda18218_set_params,
+};
+
+struct dvb_frontend *tda18218_attach(struct dvb_frontend *fe,
+ struct i2c_adapter *i2c, struct tda18218_config *cfg)
+{
+ struct tda18218_priv *priv = NULL;
+ u8 val;
+ int ret;
+ /* chip default registers values */
+ static u8 def_regs[] = {
+ 0xc0, 0x88, 0x00, 0x8e, 0x03, 0x00, 0x00, 0xd0, 0x00, 0x40,
+ 0x00, 0x00, 0x07, 0xff, 0x84, 0x09, 0x00, 0x13, 0x00, 0x00,
+ 0x01, 0x84, 0x09, 0xf0, 0x19, 0x0a, 0x8e, 0x69, 0x98, 0x01,
+ 0x00, 0x58, 0x10, 0x40, 0x8c, 0x00, 0x0c, 0x48, 0x85, 0xc9,
+ 0xa7, 0x00, 0x00, 0x00, 0x30, 0x81, 0x80, 0x00, 0x39, 0x00,
+ 0x8a, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xf6, 0xf6
+ };
+
+ priv = kzalloc(sizeof(struct tda18218_priv), GFP_KERNEL);
+ if (priv == NULL)
+ return NULL;
+
+ priv->cfg = cfg;
+ priv->i2c = i2c;
+ fe->tuner_priv = priv;
+
+ if (fe->ops.i2c_gate_ctrl)
+ fe->ops.i2c_gate_ctrl(fe, 1); /* open I2C-gate */
+
+ /* check if the tuner is there */
+ ret = tda18218_rd_reg(priv, R00_ID, &val);
+ dbg("%s: ret:%d chip ID:%02x", __func__, ret, val);
+ if (ret || val != def_regs[R00_ID]) {
+ kfree(priv);
+ return NULL;
+ }
+
+ info("NXP TDA18218HN successfully identified.");
+
+ memcpy(&fe->ops.tuner_ops, &tda18218_tuner_ops,
+ sizeof(struct dvb_tuner_ops));
+ memcpy(priv->regs, def_regs, sizeof(def_regs));
+
+ /* loop-through enabled chip default register values */
+ if (priv->cfg->loop_through) {
+ priv->regs[R17_PD1] = 0xb0;
+ priv->regs[R18_PD2] = 0x59;
+ }
+
+ /* standby */
+ ret = tda18218_wr_reg(priv, R17_PD1, priv->regs[R17_PD1] | (1 << 0));
+ if (ret)
+ dbg("%s: failed ret:%d", __func__, ret);
+
+ if (fe->ops.i2c_gate_ctrl)
+ fe->ops.i2c_gate_ctrl(fe, 0); /* close I2C-gate */
+
+ return fe;
+}
+EXPORT_SYMBOL(tda18218_attach);
+
+MODULE_DESCRIPTION("NXP TDA18218HN silicon tuner driver");
+MODULE_AUTHOR("Antti Palosaari <crope@iki.fi>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/media/common/tuners/tda18218.h b/drivers/media/common/tuners/tda18218.h
new file mode 100644
index 000000000000..b4180d180029
--- /dev/null
+++ b/drivers/media/common/tuners/tda18218.h
@@ -0,0 +1,45 @@
+/*
+ * NXP TDA18218HN silicon tuner driver
+ *
+ * Copyright (C) 2010 Antti Palosaari <crope@iki.fi>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#ifndef TDA18218_H
+#define TDA18218_H
+
+#include "dvb_frontend.h"
+
+struct tda18218_config {
+ u8 i2c_address;
+ u8 i2c_wr_max;
+ u8 loop_through:1;
+};
+
+#if defined(CONFIG_MEDIA_TUNER_TDA18218) || \
+ (defined(CONFIG_MEDIA_TUNER_TDA18218_MODULE) && defined(MODULE))
+extern struct dvb_frontend *tda18218_attach(struct dvb_frontend *fe,
+ struct i2c_adapter *i2c, struct tda18218_config *cfg);
+#else
+static inline struct dvb_frontend *tda18218_attach(struct dvb_frontend *fe,
+ struct i2c_adapter *i2c, struct tda18218_config *cfg)
+{
+ printk(KERN_WARNING "%s: driver disabled by Kconfig\n", __func__);
+ return NULL;
+}
+#endif
+
+#endif
diff --git a/drivers/media/common/tuners/tda18218_priv.h b/drivers/media/common/tuners/tda18218_priv.h
new file mode 100644
index 000000000000..904e5365c78c
--- /dev/null
+++ b/drivers/media/common/tuners/tda18218_priv.h
@@ -0,0 +1,106 @@
+/*
+ * NXP TDA18218HN silicon tuner driver
+ *
+ * Copyright (C) 2010 Antti Palosaari <crope@iki.fi>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#ifndef TDA18218_PRIV_H
+#define TDA18218_PRIV_H
+
+#define LOG_PREFIX "tda18218"
+
+#undef dbg
+#define dbg(f, arg...) \
+ if (debug) \
+ printk(KERN_DEBUG LOG_PREFIX": " f "\n" , ## arg)
+#undef err
+#define err(f, arg...) printk(KERN_ERR LOG_PREFIX": " f "\n" , ## arg)
+#undef info
+#define info(f, arg...) printk(KERN_INFO LOG_PREFIX": " f "\n" , ## arg)
+#undef warn
+#define warn(f, arg...) printk(KERN_WARNING LOG_PREFIX": " f "\n" , ## arg)
+
+#define R00_ID 0x00 /* ID byte */
+#define R01_R1 0x01 /* Read byte 1 */
+#define R02_R2 0x02 /* Read byte 2 */
+#define R03_R3 0x03 /* Read byte 3 */
+#define R04_R4 0x04 /* Read byte 4 */
+#define R05_R5 0x05 /* Read byte 5 */
+#define R06_R6 0x06 /* Read byte 6 */
+#define R07_MD1 0x07 /* Main divider byte 1 */
+#define R08_PSM1 0x08 /* PSM byte 1 */
+#define R09_MD2 0x09 /* Main divider byte 2 */
+#define R0A_MD3 0x0a /* Main divider byte 1 */
+#define R0B_MD4 0x0b /* Main divider byte 4 */
+#define R0C_MD5 0x0c /* Main divider byte 5 */
+#define R0D_MD6 0x0d /* Main divider byte 6 */
+#define R0E_MD7 0x0e /* Main divider byte 7 */
+#define R0F_MD8 0x0f /* Main divider byte 8 */
+#define R10_CD1 0x10 /* Call divider byte 1 */
+#define R11_CD2 0x11 /* Call divider byte 2 */
+#define R12_CD3 0x12 /* Call divider byte 3 */
+#define R13_CD4 0x13 /* Call divider byte 4 */
+#define R14_CD5 0x14 /* Call divider byte 5 */
+#define R15_CD6 0x15 /* Call divider byte 6 */
+#define R16_CD7 0x16 /* Call divider byte 7 */
+#define R17_PD1 0x17 /* Power-down byte 1 */
+#define R18_PD2 0x18 /* Power-down byte 2 */
+#define R19_XTOUT 0x19 /* XTOUT byte */
+#define R1A_IF1 0x1a /* IF byte 1 */
+#define R1B_IF2 0x1b /* IF byte 2 */
+#define R1C_AGC2B 0x1c /* AGC2b byte */
+#define R1D_PSM2 0x1d /* PSM byte 2 */
+#define R1E_PSM3 0x1e /* PSM byte 3 */
+#define R1F_PSM4 0x1f /* PSM byte 4 */
+#define R20_AGC11 0x20 /* AGC1 byte 1 */
+#define R21_AGC12 0x21 /* AGC1 byte 2 */
+#define R22_AGC13 0x22 /* AGC1 byte 3 */
+#define R23_AGC21 0x23 /* AGC2 byte 1 */
+#define R24_AGC22 0x24 /* AGC2 byte 2 */
+#define R25_AAGC 0x25 /* Analog AGC byte */
+#define R26_RC 0x26 /* RC byte */
+#define R27_RSSI 0x27 /* RSSI byte */
+#define R28_IRCAL1 0x28 /* IR CAL byte 1 */
+#define R29_IRCAL2 0x29 /* IR CAL byte 2 */
+#define R2A_IRCAL3 0x2a /* IR CAL byte 3 */
+#define R2B_IRCAL4 0x2b /* IR CAL byte 4 */
+#define R2C_RFCAL1 0x2c /* RF CAL byte 1 */
+#define R2D_RFCAL2 0x2d /* RF CAL byte 2 */
+#define R2E_RFCAL3 0x2e /* RF CAL byte 3 */
+#define R2F_RFCAL4 0x2f /* RF CAL byte 4 */
+#define R30_RFCAL5 0x30 /* RF CAL byte 5 */
+#define R31_RFCAL6 0x31 /* RF CAL byte 6 */
+#define R32_RFCAL7 0x32 /* RF CAL byte 7 */
+#define R33_RFCAL8 0x33 /* RF CAL byte 8 */
+#define R34_RFCAL9 0x34 /* RF CAL byte 9 */
+#define R35_RFCAL10 0x35 /* RF CAL byte 10 */
+#define R36_RFCALRAM1 0x36 /* RF CAL RAM byte 1 */
+#define R37_RFCALRAM2 0x37 /* RF CAL RAM byte 2 */
+#define R38_MARGIN 0x38 /* Margin byte */
+#define R39_FMAX1 0x39 /* Fmax byte 1 */
+#define R3A_FMAX2 0x3a /* Fmax byte 2 */
+
+#define TDA18218_NUM_REGS 59
+
+struct tda18218_priv {
+ struct tda18218_config *cfg;
+ struct i2c_adapter *i2c;
+
+ u8 regs[TDA18218_NUM_REGS];
+};
+
+#endif
diff --git a/drivers/media/common/tuners/tda18271-common.c b/drivers/media/common/tuners/tda18271-common.c
index e1f678281a58..5466d47db899 100644
--- a/drivers/media/common/tuners/tda18271-common.c
+++ b/drivers/media/common/tuners/tda18271-common.c
@@ -193,25 +193,51 @@ int tda18271_write_regs(struct dvb_frontend *fe, int idx, int len)
unsigned char *regs = priv->tda18271_regs;
unsigned char buf[TDA18271_NUM_REGS + 1];
struct i2c_msg msg = { .addr = priv->i2c_props.addr, .flags = 0,
- .buf = buf, .len = len + 1 };
- int i, ret;
+ .buf = buf };
+ int i, ret = 1, max;
BUG_ON((len == 0) || (idx + len > sizeof(buf)));
- buf[0] = idx;
- for (i = 1; i <= len; i++)
- buf[i] = regs[idx - 1 + i];
+
+ switch (priv->small_i2c) {
+ case TDA18271_03_BYTE_CHUNK_INIT:
+ max = 3;
+ break;
+ case TDA18271_08_BYTE_CHUNK_INIT:
+ max = 8;
+ break;
+ case TDA18271_16_BYTE_CHUNK_INIT:
+ max = 16;
+ break;
+ case TDA18271_39_BYTE_CHUNK_INIT:
+ default:
+ max = 39;
+ }
tda18271_i2c_gate_ctrl(fe, 1);
+ while (len) {
+ if (max > len)
+ max = len;
+
+ buf[0] = idx;
+ for (i = 1; i <= max; i++)
+ buf[i] = regs[idx - 1 + i];
- /* write registers */
- ret = i2c_transfer(priv->i2c_props.adap, &msg, 1);
+ msg.len = max + 1;
+ /* write registers */
+ ret = i2c_transfer(priv->i2c_props.adap, &msg, 1);
+ if (ret != 1)
+ break;
+
+ idx += max;
+ len -= max;
+ }
tda18271_i2c_gate_ctrl(fe, 0);
if (ret != 1)
tda_err("ERROR: idx = 0x%x, len = %d, "
- "i2c_transfer returned: %d\n", idx, len, ret);
+ "i2c_transfer returned: %d\n", idx, max, ret);
return (ret == 1 ? 0 : ret);
}
@@ -326,24 +352,7 @@ int tda18271_init_regs(struct dvb_frontend *fe)
regs[R_EB22] = 0x48;
regs[R_EB23] = 0xb0;
- switch (priv->small_i2c) {
- case TDA18271_08_BYTE_CHUNK_INIT:
- tda18271_write_regs(fe, 0x00, 0x08);
- tda18271_write_regs(fe, 0x08, 0x08);
- tda18271_write_regs(fe, 0x10, 0x08);
- tda18271_write_regs(fe, 0x18, 0x08);
- tda18271_write_regs(fe, 0x20, 0x07);
- break;
- case TDA18271_16_BYTE_CHUNK_INIT:
- tda18271_write_regs(fe, 0x00, 0x10);
- tda18271_write_regs(fe, 0x10, 0x10);
- tda18271_write_regs(fe, 0x20, 0x07);
- break;
- case TDA18271_39_BYTE_CHUNK_INIT:
- default:
- tda18271_write_regs(fe, 0x00, TDA18271_NUM_REGS);
- break;
- }
+ tda18271_write_regs(fe, 0x00, TDA18271_NUM_REGS);
/* setup agc1 gain */
regs[R_EB17] = 0x00;
diff --git a/drivers/media/common/tuners/tda18271-fe.c b/drivers/media/common/tuners/tda18271-fe.c
index 7955e49a3440..9ad4454a148d 100644
--- a/drivers/media/common/tuners/tda18271-fe.c
+++ b/drivers/media/common/tuners/tda18271-fe.c
@@ -1156,7 +1156,6 @@ static int tda18271_get_id(struct dvb_frontend *fe)
struct tda18271_priv *priv = fe->tuner_priv;
unsigned char *regs = priv->tda18271_regs;
char *name;
- int ret = 0;
mutex_lock(&priv->lock);
tda18271_read_regs(fe);
@@ -1172,17 +1171,16 @@ static int tda18271_get_id(struct dvb_frontend *fe)
priv->id = TDA18271HDC2;
break;
default:
- name = "Unknown device";
- ret = -EINVAL;
- break;
+ tda_info("Unknown device (%i) detected @ %d-%04x, device not supported.\n",
+ regs[R_ID], i2c_adapter_id(priv->i2c_props.adap),
+ priv->i2c_props.addr);
+ return -EINVAL;
}
- tda_info("%s detected @ %d-%04x%s\n", name,
- i2c_adapter_id(priv->i2c_props.adap),
- priv->i2c_props.addr,
- (0 == ret) ? "" : ", device not supported.");
+ tda_info("%s detected @ %d-%04x\n", name,
+ i2c_adapter_id(priv->i2c_props.adap), priv->i2c_props.addr);
- return ret;
+ return 0;
}
static int tda18271_setup_configuration(struct dvb_frontend *fe,
diff --git a/drivers/media/common/tuners/tda18271.h b/drivers/media/common/tuners/tda18271.h
index d7fcc36dc6e6..3abb221f3d07 100644
--- a/drivers/media/common/tuners/tda18271.h
+++ b/drivers/media/common/tuners/tda18271.h
@@ -80,8 +80,9 @@ enum tda18271_output_options {
enum tda18271_small_i2c {
TDA18271_39_BYTE_CHUNK_INIT = 0,
- TDA18271_16_BYTE_CHUNK_INIT = 1,
- TDA18271_08_BYTE_CHUNK_INIT = 2,
+ TDA18271_16_BYTE_CHUNK_INIT = 16,
+ TDA18271_08_BYTE_CHUNK_INIT = 8,
+ TDA18271_03_BYTE_CHUNK_INIT = 3,
};
struct tda18271_config {
diff --git a/drivers/media/common/tuners/xc5000.c b/drivers/media/common/tuners/xc5000.c
index d2b2c12a5561..76ac5cd84af7 100644
--- a/drivers/media/common/tuners/xc5000.c
+++ b/drivers/media/common/tuners/xc5000.c
@@ -1042,7 +1042,7 @@ static const struct dvb_tuner_ops xc5000_tuner_ops = {
struct dvb_frontend *xc5000_attach(struct dvb_frontend *fe,
struct i2c_adapter *i2c,
- struct xc5000_config *cfg)
+ const struct xc5000_config *cfg)
{
struct xc5000_priv *priv = NULL;
int instance;
diff --git a/drivers/media/common/tuners/xc5000.h b/drivers/media/common/tuners/xc5000.h
index e6d7236c9ea1..3756e73649be 100644
--- a/drivers/media/common/tuners/xc5000.h
+++ b/drivers/media/common/tuners/xc5000.h
@@ -53,11 +53,11 @@ struct xc5000_config {
(defined(CONFIG_MEDIA_TUNER_XC5000_MODULE) && defined(MODULE))
extern struct dvb_frontend *xc5000_attach(struct dvb_frontend *fe,
struct i2c_adapter *i2c,
- struct xc5000_config *cfg);
+ const struct xc5000_config *cfg);
#else
static inline struct dvb_frontend *xc5000_attach(struct dvb_frontend *fe,
struct i2c_adapter *i2c,
- struct xc5000_config *cfg)
+ const struct xc5000_config *cfg)
{
printk(KERN_WARNING "%s: driver disabled by Kconfig\n", __func__);
return NULL;
diff --git a/drivers/media/dvb/b2c2/flexcop-i2c.c b/drivers/media/dvb/b2c2/flexcop-i2c.c
index fd1df2352764..965d5eb33752 100644
--- a/drivers/media/dvb/b2c2/flexcop-i2c.c
+++ b/drivers/media/dvb/b2c2/flexcop-i2c.c
@@ -245,9 +245,6 @@ int flexcop_i2c_init(struct flexcop_device *fc)
i2c_set_adapdata(&fc->fc_i2c_adap[1].i2c_adap, &fc->fc_i2c_adap[1]);
i2c_set_adapdata(&fc->fc_i2c_adap[2].i2c_adap, &fc->fc_i2c_adap[2]);
- fc->fc_i2c_adap[0].i2c_adap.class =
- fc->fc_i2c_adap[1].i2c_adap.class =
- fc->fc_i2c_adap[2].i2c_adap.class = I2C_CLASS_TV_DIGITAL;
fc->fc_i2c_adap[0].i2c_adap.algo =
fc->fc_i2c_adap[1].i2c_adap.algo =
fc->fc_i2c_adap[2].i2c_adap.algo = &flexcop_algo;
diff --git a/drivers/media/dvb/dm1105/dm1105.c b/drivers/media/dvb/dm1105/dm1105.c
index bca07c0bcd01..5d404f1bf036 100644
--- a/drivers/media/dvb/dm1105/dm1105.c
+++ b/drivers/media/dvb/dm1105/dm1105.c
@@ -862,7 +862,6 @@ static int __devinit dm1105_probe(struct pci_dev *pdev,
i2c_set_adapdata(&dev->i2c_adap, dev);
strcpy(dev->i2c_adap.name, DRIVER_NAME);
dev->i2c_adap.owner = THIS_MODULE;
- dev->i2c_adap.class = I2C_CLASS_TV_DIGITAL;
dev->i2c_adap.dev.parent = &pdev->dev;
dev->i2c_adap.algo = &dm1105_algo;
dev->i2c_adap.algo_data = dev;
diff --git a/drivers/media/dvb/dvb-core/dvb_ca_en50221.c b/drivers/media/dvb/dvb-core/dvb_ca_en50221.c
index 4d0646da6087..7ea517b7e186 100644
--- a/drivers/media/dvb/dvb-core/dvb_ca_en50221.c
+++ b/drivers/media/dvb/dvb-core/dvb_ca_en50221.c
@@ -36,7 +36,6 @@
#include <linux/delay.h>
#include <linux/spinlock.h>
#include <linux/sched.h>
-#include <linux/smp_lock.h>
#include <linux/kthread.h>
#include "dvb_ca_en50221.h"
diff --git a/drivers/media/dvb/dvb-core/dvb_frontend.c b/drivers/media/dvb/dvb-core/dvb_frontend.c
index 970c9b8882d4..cad6634610ea 100644
--- a/drivers/media/dvb/dvb-core/dvb_frontend.c
+++ b/drivers/media/dvb/dvb-core/dvb_frontend.c
@@ -36,7 +36,6 @@
#include <linux/list.h>
#include <linux/freezer.h>
#include <linux/jiffies.h>
-#include <linux/smp_lock.h>
#include <linux/kthread.h>
#include <asm/processor.h>
@@ -702,7 +701,7 @@ static void dvb_frontend_stop(struct dvb_frontend *fe)
kthread_stop(fepriv->thread);
- init_MUTEX (&fepriv->sem);
+ sema_init(&fepriv->sem, 1);
fepriv->state = FESTATE_IDLE;
/* paranoia check in case a signal arrived */
@@ -2062,7 +2061,7 @@ int dvb_register_frontend(struct dvb_adapter* dvb,
}
fepriv = fe->frontend_priv;
- init_MUTEX (&fepriv->sem);
+ sema_init(&fepriv->sem, 1);
init_waitqueue_head (&fepriv->wait_queue);
init_waitqueue_head (&fepriv->events.wait_queue);
mutex_init(&fepriv->events.mtx);
diff --git a/drivers/media/dvb/dvb-core/dvb_frontend.h b/drivers/media/dvb/dvb-core/dvb_frontend.h
index bf0e6bed28dd..f9f19be77181 100644
--- a/drivers/media/dvb/dvb-core/dvb_frontend.h
+++ b/drivers/media/dvb/dvb-core/dvb_frontend.h
@@ -260,7 +260,7 @@ struct dvb_frontend_ops {
int (*init)(struct dvb_frontend* fe);
int (*sleep)(struct dvb_frontend* fe);
- int (*write)(struct dvb_frontend* fe, u8* buf, int len);
+ int (*write)(struct dvb_frontend* fe, const u8 buf[], int len);
/* if this is set, it overrides the default swzigzag */
int (*tune)(struct dvb_frontend* fe,
diff --git a/drivers/media/dvb/dvb-usb/Kconfig b/drivers/media/dvb/dvb-usb/Kconfig
index fdc19bba2128..2525d3b3c88d 100644
--- a/drivers/media/dvb/dvb-usb/Kconfig
+++ b/drivers/media/dvb/dvb-usb/Kconfig
@@ -314,6 +314,8 @@ config DVB_USB_AF9015
select MEDIA_TUNER_TDA18271 if !MEDIA_TUNER_CUSTOMISE
select MEDIA_TUNER_MXL5005S if !MEDIA_TUNER_CUSTOMISE
select MEDIA_TUNER_MC44S803 if !MEDIA_TUNER_CUSTOMISE
+ select MEDIA_TUNER_TDA18218 if !MEDIA_TUNER_CUSTOMISE
+ select MEDIA_TUNER_MXL5007T if !MEDIA_TUNER_CUSTOMISE
help
Say Y here to support the Afatech AF9015 based DVB-T USB2.0 receiver
@@ -346,3 +348,13 @@ config DVB_USB_AZ6027
select DVB_STB6100 if !DVB_FE_CUSTOMISE
help
Say Y here to support the AZ6027 device
+
+config DVB_USB_LME2510
+ tristate "LME DM04/QQBOX DVB-S USB2.0 support"
+ depends on DVB_USB
+ select DVB_TDA10086 if !DVB_FE_CUSTOMISE
+ select DVB_TDA826X if !DVB_FE_CUSTOMISE
+ select DVB_STV0288 if !DVB_FE_CUSTOMISE
+ select DVB_IX2505V if !DVB_FE_CUSTOMISE
+ help
+ Say Y here to support the LME DM04/QQBOX DVB-S USB2.0 .
diff --git a/drivers/media/dvb/dvb-usb/Makefile b/drivers/media/dvb/dvb-usb/Makefile
index 1a192453b0e7..5b1d12f2d591 100644
--- a/drivers/media/dvb/dvb-usb/Makefile
+++ b/drivers/media/dvb/dvb-usb/Makefile
@@ -88,6 +88,9 @@ obj-$(CONFIG_DVB_USB_EC168) += dvb-usb-ec168.o
dvb-usb-az6027-objs = az6027.o
obj-$(CONFIG_DVB_USB_AZ6027) += dvb-usb-az6027.o
+dvb-usb-lmedm04-objs = lmedm04.o
+obj-$(CONFIG_DVB_USB_LME2510) += dvb-usb-lmedm04.o
+
EXTRA_CFLAGS += -Idrivers/media/dvb/dvb-core/ -Idrivers/media/dvb/frontends/
# due to tuner-xc3028
EXTRA_CFLAGS += -Idrivers/media/common/tuners
diff --git a/drivers/media/dvb/dvb-usb/af9015.c b/drivers/media/dvb/dvb-usb/af9015.c
index ea1ed3b4592a..31c0a0ed39f5 100644
--- a/drivers/media/dvb/dvb-usb/af9015.c
+++ b/drivers/media/dvb/dvb-usb/af9015.c
@@ -31,6 +31,8 @@
#include "tda18271.h"
#include "mxl5005s.h"
#include "mc44s803.h"
+#include "tda18218.h"
+#include "mxl5007t.h"
static int dvb_usb_af9015_debug;
module_param_named(debug, dvb_usb_af9015_debug, int, 0644);
@@ -205,12 +207,18 @@ static int af9015_write_reg(struct dvb_usb_device *d, u16 addr, u8 val)
return af9015_write_regs(d, addr, &val, 1);
}
-static int af9015_read_reg(struct dvb_usb_device *d, u16 addr, u8 *val)
+static int af9015_read_regs(struct dvb_usb_device *d, u16 addr, u8 *val, u8 len)
{
- struct req_t req = {READ_MEMORY, AF9015_I2C_DEMOD, addr, 0, 0, 1, val};
+ struct req_t req = {READ_MEMORY, AF9015_I2C_DEMOD, addr, 0, 0, len,
+ val};
return af9015_ctrl_msg(d, &req);
}
+static int af9015_read_reg(struct dvb_usb_device *d, u16 addr, u8 *val)
+{
+ return af9015_read_regs(d, addr, val, 1);
+}
+
static int af9015_write_reg_i2c(struct dvb_usb_device *d, u8 addr, u16 reg,
u8 val)
{
@@ -241,7 +249,7 @@ static int af9015_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msg[],
struct dvb_usb_device *d = i2c_get_adapdata(adap);
int ret = 0, i = 0;
u16 addr;
- u8 mbox, addr_len;
+ u8 uninitialized_var(mbox), addr_len;
struct req_t req;
/* TODO: implement bus lock
@@ -280,7 +288,7 @@ Due to that the only way to select correct tuner is use demodulator I2C-gate.
} else {
addr = msg[i].buf[0];
addr_len = 1;
- mbox = 0;
+ /* mbox is don't care in that case */
}
if (num > i + 1 && (msg[i+1].flags & I2C_M_RD)) {
@@ -494,7 +502,8 @@ static int af9015_copy_firmware(struct dvb_usb_device *d)
/* wait 2nd demodulator ready */
msleep(100);
- ret = af9015_read_reg_i2c(d, 0x3a, 0x98be, &val);
+ ret = af9015_read_reg_i2c(d,
+ af9015_af9013_config[1].demod_address, 0x98be, &val);
if (ret)
goto error;
else
@@ -597,37 +606,6 @@ free:
return ret;
}
-static int af9015_download_ir_table(struct dvb_usb_device *d)
-{
- int i, packets = 0, ret;
- u16 addr = 0x9a56; /* ir-table start address */
- struct req_t req = {WRITE_MEMORY, 0, 0, 0, 0, 1, NULL};
- u8 *data = NULL;
- deb_info("%s:\n", __func__);
-
- data = af9015_config.ir_table;
- packets = af9015_config.ir_table_size;
-
- /* no remote */
- if (!packets)
- goto exit;
-
- /* load remote ir-table */
- for (i = 0; i < packets; i++) {
- req.addr = addr + i;
- req.data = &data[i];
- ret = af9015_ctrl_msg(d, &req);
- if (ret) {
- err("ir-table download failed at packet %d with " \
- "code %d", i, ret);
- return ret;
- }
- }
-
-exit:
- return 0;
-}
-
static int af9015_init(struct dvb_usb_device *d)
{
int ret;
@@ -637,10 +615,6 @@ static int af9015_init(struct dvb_usb_device *d)
if (ret)
goto error;
- ret = af9015_download_ir_table(d);
- if (ret)
- goto error;
-
error:
return ret;
}
@@ -733,125 +707,102 @@ error:
return ret;
}
-struct af9015_setup {
+struct af9015_rc_setup {
unsigned int id;
- struct ir_scancode *rc_key_map;
- unsigned int rc_key_map_size;
- u8 *ir_table;
- unsigned int ir_table_size;
+ char *rc_codes;
};
-static const struct af9015_setup *af9015_setup_match(unsigned int id,
- const struct af9015_setup *table)
+static char *af9015_rc_setup_match(unsigned int id,
+ const struct af9015_rc_setup *table)
{
- for (; table->rc_key_map; table++)
+ for (; table->rc_codes; table++)
if (table->id == id)
- return table;
+ return table->rc_codes;
return NULL;
}
-static const struct af9015_setup af9015_setup_modparam[] = {
- { AF9015_REMOTE_A_LINK_DTU_M,
- ir_codes_af9015_table_a_link, ARRAY_SIZE(ir_codes_af9015_table_a_link),
- af9015_ir_table_a_link, ARRAY_SIZE(af9015_ir_table_a_link) },
- { AF9015_REMOTE_MSI_DIGIVOX_MINI_II_V3,
- ir_codes_af9015_table_msi, ARRAY_SIZE(ir_codes_af9015_table_msi),
- af9015_ir_table_msi, ARRAY_SIZE(af9015_ir_table_msi) },
- { AF9015_REMOTE_MYGICTV_U718,
- ir_codes_af9015_table_mygictv, ARRAY_SIZE(ir_codes_af9015_table_mygictv),
- af9015_ir_table_mygictv, ARRAY_SIZE(af9015_ir_table_mygictv) },
- { AF9015_REMOTE_DIGITTRADE_DVB_T,
- ir_codes_af9015_table_digittrade, ARRAY_SIZE(ir_codes_af9015_table_digittrade),
- af9015_ir_table_digittrade, ARRAY_SIZE(af9015_ir_table_digittrade) },
- { AF9015_REMOTE_AVERMEDIA_KS,
- ir_codes_af9015_table_avermedia, ARRAY_SIZE(ir_codes_af9015_table_avermedia),
- af9015_ir_table_avermedia_ks, ARRAY_SIZE(af9015_ir_table_avermedia_ks) },
+static const struct af9015_rc_setup af9015_rc_setup_modparam[] = {
+ { AF9015_REMOTE_A_LINK_DTU_M, RC_MAP_ALINK_DTU_M },
+ { AF9015_REMOTE_MSI_DIGIVOX_MINI_II_V3, RC_MAP_MSI_DIGIVOX_II },
+ { AF9015_REMOTE_MYGICTV_U718, RC_MAP_TOTAL_MEDIA_IN_HAND },
+ { AF9015_REMOTE_DIGITTRADE_DVB_T, RC_MAP_DIGITTRADE },
+ { AF9015_REMOTE_AVERMEDIA_KS, RC_MAP_AVERMEDIA_RM_KS },
{ }
};
-/* don't add new entries here anymore, use hashes instead */
-static const struct af9015_setup af9015_setup_usbids[] = {
- { USB_VID_LEADTEK,
- ir_codes_af9015_table_leadtek, ARRAY_SIZE(ir_codes_af9015_table_leadtek),
- af9015_ir_table_leadtek, ARRAY_SIZE(af9015_ir_table_leadtek) },
- { USB_VID_VISIONPLUS,
- ir_codes_af9015_table_twinhan, ARRAY_SIZE(ir_codes_af9015_table_twinhan),
- af9015_ir_table_twinhan, ARRAY_SIZE(af9015_ir_table_twinhan) },
- { USB_VID_KWORLD_2, /* TODO: use correct rc keys */
- ir_codes_af9015_table_twinhan, ARRAY_SIZE(ir_codes_af9015_table_twinhan),
- af9015_ir_table_kworld, ARRAY_SIZE(af9015_ir_table_kworld) },
- { USB_VID_AVERMEDIA,
- ir_codes_af9015_table_avermedia, ARRAY_SIZE(ir_codes_af9015_table_avermedia),
- af9015_ir_table_avermedia, ARRAY_SIZE(af9015_ir_table_avermedia) },
- { USB_VID_MSI_2,
- ir_codes_af9015_table_msi_digivox_iii, ARRAY_SIZE(ir_codes_af9015_table_msi_digivox_iii),
- af9015_ir_table_msi_digivox_iii, ARRAY_SIZE(af9015_ir_table_msi_digivox_iii) },
+static const struct af9015_rc_setup af9015_rc_setup_hashes[] = {
+ { 0xb8feb708, RC_MAP_MSI_DIGIVOX_II },
+ { 0xa3703d00, RC_MAP_ALINK_DTU_M },
+ { 0x9b7dc64e, RC_MAP_TOTAL_MEDIA_IN_HAND }, /* MYGICTV U718 */
{ }
};
-static const struct af9015_setup af9015_setup_hashes[] = {
- { 0xb8feb708,
- ir_codes_af9015_table_msi, ARRAY_SIZE(ir_codes_af9015_table_msi),
- af9015_ir_table_msi, ARRAY_SIZE(af9015_ir_table_msi) },
- { 0xa3703d00,
- ir_codes_af9015_table_a_link, ARRAY_SIZE(ir_codes_af9015_table_a_link),
- af9015_ir_table_a_link, ARRAY_SIZE(af9015_ir_table_a_link) },
- { 0x9b7dc64e,
- ir_codes_af9015_table_mygictv, ARRAY_SIZE(ir_codes_af9015_table_mygictv),
- af9015_ir_table_mygictv, ARRAY_SIZE(af9015_ir_table_mygictv) },
+static const struct af9015_rc_setup af9015_rc_setup_usbids[] = {
+ { (USB_VID_TERRATEC << 16) + USB_PID_TERRATEC_CINERGY_T_STICK_DUAL_RC,
+ RC_MAP_TERRATEC_SLIM },
+ { (USB_VID_VISIONPLUS << 16) + USB_PID_AZUREWAVE_AD_TU700,
+ RC_MAP_AZUREWAVE_AD_TU700 },
+ { (USB_VID_VISIONPLUS << 16) + USB_PID_TINYTWIN,
+ RC_MAP_AZUREWAVE_AD_TU700 },
+ { (USB_VID_MSI_2 << 16) + USB_PID_MSI_DIGI_VOX_MINI_III,
+ RC_MAP_MSI_DIGIVOX_III },
+ { (USB_VID_LEADTEK << 16) + USB_PID_WINFAST_DTV_DONGLE_GOLD,
+ RC_MAP_LEADTEK_Y04G0051 },
+ { (USB_VID_AVERMEDIA << 16) + USB_PID_AVERMEDIA_VOLAR_X,
+ RC_MAP_AVERMEDIA_M135A },
+ { (USB_VID_AFATECH << 16) + USB_PID_TREKSTOR_DVBT,
+ RC_MAP_TREKSTOR },
+ { (USB_VID_KWORLD_2 << 16) + USB_PID_TINYTWIN_2,
+ RC_MAP_DIGITALNOW_TINYTWIN },
+ { (USB_VID_GTEK << 16) + USB_PID_TINYTWIN_3,
+ RC_MAP_DIGITALNOW_TINYTWIN },
{ }
};
static void af9015_set_remote_config(struct usb_device *udev,
struct dvb_usb_device_properties *props)
{
- const struct af9015_setup *table = NULL;
-
- if (dvb_usb_af9015_remote) {
- /* load remote defined as module param */
- table = af9015_setup_match(dvb_usb_af9015_remote,
- af9015_setup_modparam);
- } else {
- u16 vendor = le16_to_cpu(udev->descriptor.idVendor);
-
- table = af9015_setup_match(af9015_config.eeprom_sum,
- af9015_setup_hashes);
-
- if (!table && vendor == USB_VID_AFATECH) {
- /* Check USB manufacturer and product strings and try
- to determine correct remote in case of chip vendor
- reference IDs are used.
- DO NOT ADD ANYTHING NEW HERE. Use hashes instead.
- */
- char manufacturer[10];
- memset(manufacturer, 0, sizeof(manufacturer));
- usb_string(udev, udev->descriptor.iManufacturer,
- manufacturer, sizeof(manufacturer));
- if (!strcmp("MSI", manufacturer)) {
- /* iManufacturer 1 MSI
- iProduct 2 MSI K-VOX */
- table = af9015_setup_match(
- AF9015_REMOTE_MSI_DIGIVOX_MINI_II_V3,
- af9015_setup_modparam);
- } else if (udev->descriptor.idProduct ==
- cpu_to_le16(USB_PID_TREKSTOR_DVBT)) {
- table = &(const struct af9015_setup){ 0,
- ir_codes_af9015_table_trekstor,
- ARRAY_SIZE(ir_codes_af9015_table_trekstor),
- af9015_ir_table_trekstor,
- ARRAY_SIZE(af9015_ir_table_trekstor)
- };
- }
- } else if (!table)
- table = af9015_setup_match(vendor, af9015_setup_usbids);
+ u16 vid = le16_to_cpu(udev->descriptor.idVendor);
+ u16 pid = le16_to_cpu(udev->descriptor.idProduct);
+
+ /* try to load remote based module param */
+ props->rc.core.rc_codes = af9015_rc_setup_match(
+ dvb_usb_af9015_remote, af9015_rc_setup_modparam);
+
+ /* try to load remote based eeprom hash */
+ if (!props->rc.core.rc_codes)
+ props->rc.core.rc_codes = af9015_rc_setup_match(
+ af9015_config.eeprom_sum, af9015_rc_setup_hashes);
+
+ /* try to load remote based USB ID */
+ if (!props->rc.core.rc_codes)
+ props->rc.core.rc_codes = af9015_rc_setup_match(
+ (vid << 16) + pid, af9015_rc_setup_usbids);
+
+ /* try to load remote based USB iManufacturer string */
+ if (!props->rc.core.rc_codes && vid == USB_VID_AFATECH) {
+ /* Check USB manufacturer and product strings and try
+ to determine correct remote in case of chip vendor
+ reference IDs are used.
+ DO NOT ADD ANYTHING NEW HERE. Use hashes instead. */
+ char manufacturer[10];
+ memset(manufacturer, 0, sizeof(manufacturer));
+ usb_string(udev, udev->descriptor.iManufacturer,
+ manufacturer, sizeof(manufacturer));
+ if (!strcmp("MSI", manufacturer)) {
+ /* iManufacturer 1 MSI
+ iProduct 2 MSI K-VOX */
+ props->rc.core.rc_codes = af9015_rc_setup_match(
+ AF9015_REMOTE_MSI_DIGIVOX_MINI_II_V3,
+ af9015_rc_setup_modparam);
+ }
}
- if (table) {
- props->rc.legacy.rc_key_map = table->rc_key_map;
- props->rc.legacy.rc_key_map_size = table->rc_key_map_size;
- af9015_config.ir_table = table->ir_table;
- af9015_config.ir_table_size = table->ir_table_size;
- }
+ /* finally load "empty" just for leaving IR receiver enabled */
+ if (!props->rc.core.rc_codes)
+ props->rc.core.rc_codes = RC_MAP_EMPTY;
+
+ return;
}
static int af9015_read_config(struct usb_device *udev)
@@ -877,10 +828,9 @@ static int af9015_read_config(struct usb_device *udev)
deb_info("%s: IR mode:%d\n", __func__, val);
for (i = 0; i < af9015_properties_count; i++) {
- if (val == AF9015_IR_MODE_DISABLED) {
- af9015_properties[i].rc.legacy.rc_key_map = NULL;
- af9015_properties[i].rc.legacy.rc_key_map_size = 0;
- } else
+ if (val == AF9015_IR_MODE_DISABLED)
+ af9015_properties[i].rc.core.rc_codes = NULL;
+ else
af9015_set_remote_config(udev, &af9015_properties[i]);
}
@@ -992,20 +942,19 @@ static int af9015_read_config(struct usb_device *udev)
case AF9013_TUNER_MT2060_2:
case AF9013_TUNER_TDA18271:
case AF9013_TUNER_QT1010A:
+ case AF9013_TUNER_TDA18218:
af9015_af9013_config[i].rf_spec_inv = 1;
break;
case AF9013_TUNER_MXL5003D:
case AF9013_TUNER_MXL5005D:
case AF9013_TUNER_MXL5005R:
+ case AF9013_TUNER_MXL5007T:
af9015_af9013_config[i].rf_spec_inv = 0;
break;
case AF9013_TUNER_MC44S803:
af9015_af9013_config[i].gpio[1] = AF9013_GPIO_LO;
af9015_af9013_config[i].rf_spec_inv = 1;
break;
- case AF9013_TUNER_TDA18218:
- warn("tuner NXP TDA18218 not supported yet");
- return -ENODEV;
default:
warn("tuner id:%d not supported, please report!", val);
return -ENODEV;
@@ -1020,9 +969,13 @@ error:
err("eeprom read failed:%d", ret);
/* AverMedia AVerTV Volar Black HD (A850) device have bad EEPROM
- content :-( Override some wrong values here. */
+ content :-( Override some wrong values here. Ditto for the
+ AVerTV Red HD+ (A850T) device. */
if (le16_to_cpu(udev->descriptor.idVendor) == USB_VID_AVERMEDIA &&
- le16_to_cpu(udev->descriptor.idProduct) == USB_PID_AVERMEDIA_A850) {
+ ((le16_to_cpu(udev->descriptor.idProduct) ==
+ USB_PID_AVERMEDIA_A850) ||
+ (le16_to_cpu(udev->descriptor.idProduct) ==
+ USB_PID_AVERMEDIA_A850T))) {
deb_info("%s: AverMedia A850: overriding config\n", __func__);
/* disable dual mode */
af9015_config.dual_mode = 0;
@@ -1059,36 +1012,53 @@ static int af9015_identify_state(struct usb_device *udev,
return ret;
}
-static int af9015_rc_query(struct dvb_usb_device *d, u32 *event, int *state)
+static int af9015_rc_query(struct dvb_usb_device *d)
{
- u8 buf[8];
- struct req_t req = {GET_IR_CODE, 0, 0, 0, 0, sizeof(buf), buf};
- struct ir_scancode *keymap = d->props.rc.legacy.rc_key_map;
- int i, ret;
-
- memset(buf, 0, sizeof(buf));
+ struct af9015_state *priv = d->priv;
+ int ret;
+ u8 buf[16];
- ret = af9015_ctrl_msg(d, &req);
+ /* read registers needed to detect remote controller code */
+ ret = af9015_read_regs(d, 0x98d9, buf, sizeof(buf));
if (ret)
- return ret;
+ goto error;
- *event = 0;
- *state = REMOTE_NO_KEY_PRESSED;
+ if (buf[14] || buf[15]) {
+ deb_rc("%s: key pressed %02x %02x %02x %02x\n", __func__,
+ buf[12], buf[13], buf[14], buf[15]);
- for (i = 0; i < d->props.rc.legacy.rc_key_map_size; i++) {
- if (!buf[1] && rc5_custom(&keymap[i]) == buf[0] &&
- rc5_data(&keymap[i]) == buf[2]) {
- *event = keymap[i].keycode;
- *state = REMOTE_KEY_PRESSED;
- break;
+ /* clean IR code from mem */
+ ret = af9015_write_regs(d, 0x98e5, "\x00\x00\x00\x00", 4);
+ if (ret)
+ goto error;
+
+ if (buf[14] == (u8) ~buf[15]) {
+ if (buf[12] == (u8) ~buf[13]) {
+ /* NEC */
+ priv->rc_keycode = buf[12] << 8 | buf[14];
+ } else {
+ /* NEC extended*/
+ priv->rc_keycode = buf[12] << 16 |
+ buf[13] << 8 | buf[14];
+ }
+ ir_keydown(d->rc_input_dev, priv->rc_keycode, 0);
+ } else {
+ priv->rc_keycode = 0; /* clear just for sure */
}
+ } else if (priv->rc_repeat != buf[6] || buf[0]) {
+ deb_rc("%s: key repeated\n", __func__);
+ ir_keydown(d->rc_input_dev, priv->rc_keycode, 0);
+ } else {
+ deb_rc("%s: no key press\n", __func__);
}
- if (!buf[1])
- deb_rc("%s: %02x %02x %02x %02x %02x %02x %02x %02x\n",
- __func__, buf[0], buf[1], buf[2], buf[3], buf[4],
- buf[5], buf[6], buf[7]);
- return 0;
+ priv->rc_repeat = buf[6];
+
+error:
+ if (ret)
+ err("%s: failed:%d", __func__, ret);
+
+ return ret;
}
/* init 2nd I2C adapter */
@@ -1100,11 +1070,6 @@ static int af9015_i2c_init(struct dvb_usb_device *d)
strncpy(state->i2c_adap.name, d->desc->name,
sizeof(state->i2c_adap.name));
-#ifdef I2C_ADAP_CLASS_TV_DIGITAL
- state->i2c_adap.class = I2C_ADAP_CLASS_TV_DIGITAL,
-#else
- state->i2c_adap.class = I2C_CLASS_TV_DIGITAL,
-#endif
state->i2c_adap.algo = d->props.i2c_algo;
state->i2c_adap.algo_data = NULL;
state->i2c_adap.dev.parent = &d->udev->dev;
@@ -1166,7 +1131,7 @@ static struct qt1010_config af9015_qt1010_config = {
static struct tda18271_config af9015_tda18271_config = {
.gate = TDA18271_GATE_DIGITAL,
- .small_i2c = 1,
+ .small_i2c = TDA18271_16_BYTE_CHUNK_INIT,
};
static struct mxl5005s_config af9015_mxl5003_config = {
@@ -1208,12 +1173,22 @@ static struct mc44s803_config af9015_mc44s803_config = {
.dig_out = 1,
};
+static struct tda18218_config af9015_tda18218_config = {
+ .i2c_address = 0xc0,
+ .i2c_wr_max = 21, /* max wr bytes AF9015 I2C adap can handle at once */
+};
+
+static struct mxl5007t_config af9015_mxl5007t_config = {
+ .xtal_freq_hz = MxL_XTAL_24_MHZ,
+ .if_freq_hz = MxL_IF_4_57_MHZ,
+};
+
static int af9015_tuner_attach(struct dvb_usb_adapter *adap)
{
struct af9015_state *state = adap->dev->priv;
struct i2c_adapter *i2c_adap;
int ret;
- deb_info("%s: \n", __func__);
+ deb_info("%s:\n", __func__);
/* select I2C adapter */
if (adap->id == 0)
@@ -1238,6 +1213,10 @@ static int af9015_tuner_attach(struct dvb_usb_adapter *adap)
ret = dvb_attach(tda18271_attach, adap->fe, 0xc0, i2c_adap,
&af9015_tda18271_config) == NULL ? -ENODEV : 0;
break;
+ case AF9013_TUNER_TDA18218:
+ ret = dvb_attach(tda18218_attach, adap->fe, i2c_adap,
+ &af9015_tda18218_config) == NULL ? -ENODEV : 0;
+ break;
case AF9013_TUNER_MXL5003D:
ret = dvb_attach(mxl5005s_attach, adap->fe, i2c_adap,
&af9015_mxl5003_config) == NULL ? -ENODEV : 0;
@@ -1255,6 +1234,10 @@ static int af9015_tuner_attach(struct dvb_usb_adapter *adap)
ret = dvb_attach(mc44s803_attach, adap->fe, i2c_adap,
&af9015_mc44s803_config) == NULL ? -ENODEV : 0;
break;
+ case AF9013_TUNER_MXL5007T:
+ ret = dvb_attach(mxl5007t_attach, adap->fe, i2c_adap,
+ 0xc0, &af9015_mxl5007t_config) == NULL ? -ENODEV : 0;
+ break;
case AF9013_TUNER_UNKNOWN:
default:
ret = -ENODEV;
@@ -1300,10 +1283,16 @@ static struct usb_device_id af9015_usb_table[] = {
/* 30 */{USB_DEVICE(USB_VID_KWORLD_2, USB_PID_KWORLD_UB383_T)},
{USB_DEVICE(USB_VID_KWORLD_2, USB_PID_KWORLD_395U_4)},
{USB_DEVICE(USB_VID_AVERMEDIA, USB_PID_AVERMEDIA_A815M)},
+ {USB_DEVICE(USB_VID_TERRATEC, USB_PID_TERRATEC_CINERGY_T_STICK_RC)},
+ {USB_DEVICE(USB_VID_TERRATEC,
+ USB_PID_TERRATEC_CINERGY_T_STICK_DUAL_RC)},
+/* 35 */{USB_DEVICE(USB_VID_AVERMEDIA, USB_PID_AVERMEDIA_A850T)},
+ {USB_DEVICE(USB_VID_GTEK, USB_PID_TINYTWIN_3)},
{0},
};
MODULE_DEVICE_TABLE(usb, af9015_usb_table);
+#define AF9015_RC_INTERVAL 500
static struct dvb_usb_device_properties af9015_properties[] = {
{
.caps = DVB_USB_IS_AN_I2C_ADAPTER,
@@ -1354,14 +1343,19 @@ static struct dvb_usb_device_properties af9015_properties[] = {
.identify_state = af9015_identify_state,
- .rc.legacy = {
+ .rc.core = {
+ .protocol = IR_TYPE_NEC,
+ .module_name = "af9015",
.rc_query = af9015_rc_query,
- .rc_interval = 150,
+ .rc_interval = AF9015_RC_INTERVAL,
+ .rc_props = {
+ .allowed_protos = IR_TYPE_NEC,
+ },
},
.i2c_algo = &af9015_i2c_algo,
- .num_device_descs = 9, /* max 9 */
+ .num_device_descs = 12, /* check max from dvb-usb.h */
.devices = {
{
.name = "Afatech AF9015 DVB-T USB2.0 stick",
@@ -1389,7 +1383,8 @@ static struct dvb_usb_device_properties af9015_properties[] = {
{
.name = "DigitalNow TinyTwin DVB-T Receiver",
.cold_ids = {&af9015_usb_table[5],
- &af9015_usb_table[28], NULL},
+ &af9015_usb_table[28],
+ &af9015_usb_table[36], NULL},
.warm_ids = {NULL},
},
{
@@ -1413,6 +1408,21 @@ static struct dvb_usb_device_properties af9015_properties[] = {
.cold_ids = {&af9015_usb_table[9], NULL},
.warm_ids = {NULL},
},
+ {
+ .name = "TerraTec Cinergy T Stick RC",
+ .cold_ids = {&af9015_usb_table[33], NULL},
+ .warm_ids = {NULL},
+ },
+ {
+ .name = "TerraTec Cinergy T Stick Dual RC",
+ .cold_ids = {&af9015_usb_table[34], NULL},
+ .warm_ids = {NULL},
+ },
+ {
+ .name = "AverMedia AVerTV Red HD+ (A850T)",
+ .cold_ids = {&af9015_usb_table[35], NULL},
+ .warm_ids = {NULL},
+ },
}
}, {
.caps = DVB_USB_IS_AN_I2C_ADAPTER,
@@ -1463,14 +1473,19 @@ static struct dvb_usb_device_properties af9015_properties[] = {
.identify_state = af9015_identify_state,
- .rc.legacy = {
+ .rc.core = {
+ .protocol = IR_TYPE_NEC,
+ .module_name = "af9015",
.rc_query = af9015_rc_query,
- .rc_interval = 150,
+ .rc_interval = AF9015_RC_INTERVAL,
+ .rc_props = {
+ .allowed_protos = IR_TYPE_NEC,
+ },
},
.i2c_algo = &af9015_i2c_algo,
- .num_device_descs = 9, /* max 9 */
+ .num_device_descs = 9, /* check max from dvb-usb.h */
.devices = {
{
.name = "Xtensions XD-380",
@@ -1572,14 +1587,19 @@ static struct dvb_usb_device_properties af9015_properties[] = {
.identify_state = af9015_identify_state,
- .rc.legacy = {
+ .rc.core = {
+ .protocol = IR_TYPE_NEC,
+ .module_name = "af9015",
.rc_query = af9015_rc_query,
- .rc_interval = 150,
+ .rc_interval = AF9015_RC_INTERVAL,
+ .rc_props = {
+ .allowed_protos = IR_TYPE_NEC,
+ },
},
.i2c_algo = &af9015_i2c_algo,
- .num_device_descs = 9, /* max 9 */
+ .num_device_descs = 9, /* check max from dvb-usb.h */
.devices = {
{
.name = "AverMedia AVerTV Volar GPS 805 (A805)",
@@ -1672,7 +1692,7 @@ static int af9015_usb_probe(struct usb_interface *intf,
static void af9015_i2c_exit(struct dvb_usb_device *d)
{
struct af9015_state *state = d->priv;
- deb_info("%s: \n", __func__);
+ deb_info("%s:\n", __func__);
/* remove 2nd I2C adapter */
if (d->state & DVB_USB_STATE_I2C)
@@ -1682,7 +1702,7 @@ static void af9015_i2c_exit(struct dvb_usb_device *d)
static void af9015_usb_device_exit(struct usb_interface *intf)
{
struct dvb_usb_device *d = usb_get_intfdata(intf);
- deb_info("%s: \n", __func__);
+ deb_info("%s:\n", __func__);
/* remove 2nd I2C adapter */
if (d != NULL && d->desc != NULL)
diff --git a/drivers/media/dvb/dvb-usb/af9015.h b/drivers/media/dvb/dvb-usb/af9015.h
index c8e9349742ee..f20cfa6ed690 100644
--- a/drivers/media/dvb/dvb-usb/af9015.h
+++ b/drivers/media/dvb/dvb-usb/af9015.h
@@ -100,6 +100,8 @@ enum af9015_ir_mode {
struct af9015_state {
struct i2c_adapter i2c_adap; /* I2C adapter for 2nd FE */
+ u8 rc_repeat;
+ u32 rc_keycode;
};
struct af9015_config {
@@ -108,8 +110,6 @@ struct af9015_config {
u16 firmware_size;
u16 firmware_checksum;
u32 eeprom_sum;
- u8 *ir_table;
- u16 ir_table_size;
};
enum af9015_remote {
@@ -121,735 +121,4 @@ enum af9015_remote {
/* 5 */ AF9015_REMOTE_AVERMEDIA_KS,
};
-/* LeadTek - Y04G0051 */
-/* Leadtek WinFast DTV Dongle Gold */
-static struct ir_scancode ir_codes_af9015_table_leadtek[] = {
- { 0x001e, KEY_1 },
- { 0x001f, KEY_2 },
- { 0x0020, KEY_3 },
- { 0x0021, KEY_4 },
- { 0x0022, KEY_5 },
- { 0x0023, KEY_6 },
- { 0x0024, KEY_7 },
- { 0x0025, KEY_8 },
- { 0x0026, KEY_9 },
- { 0x0027, KEY_0 },
- { 0x0028, KEY_OK },
- { 0x004f, KEY_RIGHT },
- { 0x0050, KEY_LEFT },
- { 0x0051, KEY_DOWN },
- { 0x0052, KEY_UP },
- { 0x011a, KEY_POWER2 },
- { 0x04b4, KEY_TV },
- { 0x04b3, KEY_RED },
- { 0x04b2, KEY_GREEN },
- { 0x04b1, KEY_YELLOW },
- { 0x04b0, KEY_BLUE },
- { 0x003d, KEY_TEXT },
- { 0x0113, KEY_SLEEP },
- { 0x0010, KEY_MUTE },
- { 0x0105, KEY_ESC },
- { 0x0009, KEY_SCREEN },
- { 0x010f, KEY_MENU },
- { 0x003f, KEY_CHANNEL },
- { 0x0013, KEY_REWIND },
- { 0x0012, KEY_PLAY },
- { 0x0011, KEY_FASTFORWARD },
- { 0x0005, KEY_PREVIOUS },
- { 0x0029, KEY_STOP },
- { 0x002b, KEY_NEXT },
- { 0x0041, KEY_EPG },
- { 0x0019, KEY_VIDEO },
- { 0x0016, KEY_AUDIO },
- { 0x0037, KEY_DOT },
- { 0x002a, KEY_AGAIN },
- { 0x002c, KEY_CAMERA },
- { 0x003c, KEY_NEW },
- { 0x0115, KEY_RECORD },
- { 0x010b, KEY_TIME },
- { 0x0043, KEY_VOLUMEUP },
- { 0x0042, KEY_VOLUMEDOWN },
- { 0x004b, KEY_CHANNELUP },
- { 0x004e, KEY_CHANNELDOWN },
-};
-
-static u8 af9015_ir_table_leadtek[] = {
- 0x03, 0xfc, 0x00, 0xff, 0x1a, 0x01, 0x00, /* KEY_POWER2 */
- 0x03, 0xfc, 0x56, 0xa9, 0xb4, 0x04, 0x00, /* KEY_TV */
- 0x03, 0xfc, 0x4b, 0xb4, 0xb3, 0x04, 0x00, /* KEY_RED */
- 0x03, 0xfc, 0x4c, 0xb3, 0xb2, 0x04, 0x00, /* KEY_GREEN */
- 0x03, 0xfc, 0x4d, 0xb2, 0xb1, 0x04, 0x00, /* KEY_YELLOW */
- 0x03, 0xfc, 0x4e, 0xb1, 0xb0, 0x04, 0x00, /* KEY_BLUE */
- 0x03, 0xfc, 0x1f, 0xe0, 0x3d, 0x00, 0x00, /* KEY_TEXT */
- 0x03, 0xfc, 0x40, 0xbf, 0x13, 0x01, 0x00, /* KEY_SLEEP */
- 0x03, 0xfc, 0x14, 0xeb, 0x10, 0x00, 0x00, /* KEY_MUTE */
- 0x03, 0xfc, 0x49, 0xb6, 0x05, 0x01, 0x00, /* KEY_ESC */
- 0x03, 0xfc, 0x50, 0xaf, 0x29, 0x00, 0x00, /* KEY_STOP (1)*/
- 0x03, 0xfc, 0x0c, 0xf3, 0x52, 0x00, 0x00, /* KEY_UP */
- 0x03, 0xfc, 0x03, 0xfc, 0x09, 0x00, 0x00, /* KEY_SCREEN */
- 0x03, 0xfc, 0x08, 0xf7, 0x50, 0x00, 0x00, /* KEY_LEFT */
- 0x03, 0xfc, 0x13, 0xec, 0x28, 0x00, 0x00, /* KEY_OK (1) */
- 0x03, 0xfc, 0x04, 0xfb, 0x4f, 0x00, 0x00, /* KEY_RIGHT */
- 0x03, 0xfc, 0x4f, 0xb0, 0x0f, 0x01, 0x00, /* KEY_MENU */
- 0x03, 0xfc, 0x10, 0xef, 0x51, 0x00, 0x00, /* KEY_DOWN */
- 0x03, 0xfc, 0x51, 0xae, 0x3f, 0x00, 0x00, /* KEY_CHANNEL */
- 0x03, 0xfc, 0x42, 0xbd, 0x13, 0x00, 0x00, /* KEY_REWIND */
- 0x03, 0xfc, 0x43, 0xbc, 0x12, 0x00, 0x00, /* KEY_PLAY */
- 0x03, 0xfc, 0x44, 0xbb, 0x11, 0x00, 0x00, /* KEY_FASTFORWARD */
- 0x03, 0xfc, 0x52, 0xad, 0x19, 0x00, 0x00, /* KEY_VIDEO (1) */
- 0x03, 0xfc, 0x54, 0xab, 0x05, 0x00, 0x00, /* KEY_PREVIOUS */
- 0x03, 0xfc, 0x46, 0xb9, 0x29, 0x00, 0x00, /* KEY_STOP (2) */
- 0x03, 0xfc, 0x55, 0xaa, 0x2b, 0x00, 0x00, /* KEY_NEXT */
- 0x03, 0xfc, 0x53, 0xac, 0x41, 0x00, 0x00, /* KEY_EPG */
- 0x03, 0xfc, 0x05, 0xfa, 0x1e, 0x00, 0x00, /* KEY_1 */
- 0x03, 0xfc, 0x06, 0xf9, 0x1f, 0x00, 0x00, /* KEY_2 */
- 0x03, 0xfc, 0x07, 0xf8, 0x20, 0x00, 0x00, /* KEY_3 */
- 0x03, 0xfc, 0x1e, 0xe1, 0x19, 0x00, 0x00, /* KEY_VIDEO (2) */
- 0x03, 0xfc, 0x09, 0xf6, 0x21, 0x00, 0x00, /* KEY_4 */
- 0x03, 0xfc, 0x0a, 0xf5, 0x22, 0x00, 0x00, /* KEY_5 */
- 0x03, 0xfc, 0x0b, 0xf4, 0x23, 0x00, 0x00, /* KEY_6 */
- 0x03, 0xfc, 0x1b, 0xe4, 0x16, 0x00, 0x00, /* KEY_AUDIO */
- 0x03, 0xfc, 0x0d, 0xf2, 0x24, 0x00, 0x00, /* KEY_7 */
- 0x03, 0xfc, 0x0e, 0xf1, 0x25, 0x00, 0x00, /* KEY_8 */
- 0x03, 0xfc, 0x0f, 0xf0, 0x26, 0x00, 0x00, /* KEY_9 */
- 0x03, 0xfc, 0x16, 0xe9, 0x28, 0x00, 0x00, /* KEY_OK (2) */
- 0x03, 0xfc, 0x41, 0xbe, 0x37, 0x00, 0x00, /* KEY_DOT */
- 0x03, 0xfc, 0x12, 0xed, 0x27, 0x00, 0x00, /* KEY_0 */
- 0x03, 0xfc, 0x11, 0xee, 0x2a, 0x00, 0x00, /* KEY_AGAIN */
- 0x03, 0xfc, 0x48, 0xb7, 0x2c, 0x00, 0x00, /* KEY_CAMERA */
- 0x03, 0xfc, 0x4a, 0xb5, 0x3c, 0x00, 0x00, /* KEY_NEW */
- 0x03, 0xfc, 0x47, 0xb8, 0x15, 0x01, 0x00, /* KEY_RECORD */
- 0x03, 0xfc, 0x45, 0xba, 0x0b, 0x01, 0x00, /* KEY_TIME */
- 0x03, 0xfc, 0x5e, 0xa1, 0x43, 0x00, 0x00, /* KEY_VOLUMEUP */
- 0x03, 0xfc, 0x5a, 0xa5, 0x42, 0x00, 0x00, /* KEY_VOLUMEDOWN */
- 0x03, 0xfc, 0x5b, 0xa4, 0x4b, 0x00, 0x00, /* KEY_CHANNELUP */
- 0x03, 0xfc, 0x5f, 0xa0, 0x4e, 0x00, 0x00, /* KEY_CHANNELDOWN */
-};
-
-/* TwinHan AzureWave AD-TU700(704J) */
-static struct ir_scancode ir_codes_af9015_table_twinhan[] = {
- { 0x053f, KEY_POWER },
- { 0x0019, KEY_FAVORITES }, /* Favorite List */
- { 0x0004, KEY_TEXT }, /* Teletext */
- { 0x000e, KEY_POWER },
- { 0x000e, KEY_INFO }, /* Preview */
- { 0x0008, KEY_EPG }, /* Info/EPG */
- { 0x000f, KEY_LIST }, /* Record List */
- { 0x001e, KEY_1 },
- { 0x001f, KEY_2 },
- { 0x0020, KEY_3 },
- { 0x0021, KEY_4 },
- { 0x0022, KEY_5 },
- { 0x0023, KEY_6 },
- { 0x0024, KEY_7 },
- { 0x0025, KEY_8 },
- { 0x0026, KEY_9 },
- { 0x0027, KEY_0 },
- { 0x0029, KEY_CANCEL }, /* Cancel */
- { 0x004c, KEY_CLEAR }, /* Clear */
- { 0x002a, KEY_BACK }, /* Back */
- { 0x002b, KEY_TAB }, /* Tab */
- { 0x0052, KEY_UP }, /* up arrow */
- { 0x0051, KEY_DOWN }, /* down arrow */
- { 0x004f, KEY_RIGHT }, /* right arrow */
- { 0x0050, KEY_LEFT }, /* left arrow */
- { 0x0028, KEY_ENTER }, /* Enter / ok */
- { 0x0252, KEY_VOLUMEUP },
- { 0x0251, KEY_VOLUMEDOWN },
- { 0x004e, KEY_CHANNELDOWN },
- { 0x004b, KEY_CHANNELUP },
- { 0x004a, KEY_RECORD },
- { 0x0111, KEY_PLAY },
- { 0x0017, KEY_PAUSE },
- { 0x000c, KEY_REWIND }, /* FR << */
- { 0x0011, KEY_FASTFORWARD }, /* FF >> */
- { 0x0115, KEY_PREVIOUS }, /* Replay */
- { 0x010e, KEY_NEXT }, /* Skip */
- { 0x0013, KEY_CAMERA }, /* Capture */
- { 0x010f, KEY_LANGUAGE }, /* SAP */
- { 0x0113, KEY_TV2 }, /* PIP */
- { 0x001d, KEY_ZOOM }, /* Full Screen */
- { 0x0117, KEY_SUBTITLE }, /* Subtitle / CC */
- { 0x0010, KEY_MUTE },
- { 0x0119, KEY_AUDIO }, /* L/R */ /* TODO better event */
- { 0x0116, KEY_SLEEP }, /* Hibernate */
- { 0x0116, KEY_SWITCHVIDEOMODE },
- /* A/V */ /* TODO does not work */
- { 0x0006, KEY_AGAIN }, /* Recall */
- { 0x0116, KEY_KPPLUS }, /* Zoom+ */ /* TODO does not work */
- { 0x0116, KEY_KPMINUS }, /* Zoom- */ /* TODO does not work */
- { 0x0215, KEY_RED },
- { 0x020a, KEY_GREEN },
- { 0x021c, KEY_YELLOW },
- { 0x0205, KEY_BLUE },
-};
-
-static u8 af9015_ir_table_twinhan[] = {
- 0x00, 0xff, 0x16, 0xe9, 0x3f, 0x05, 0x00,
- 0x00, 0xff, 0x07, 0xf8, 0x16, 0x01, 0x00,
- 0x00, 0xff, 0x14, 0xeb, 0x11, 0x01, 0x00,
- 0x00, 0xff, 0x1a, 0xe5, 0x4d, 0x00, 0x00,
- 0x00, 0xff, 0x4c, 0xb3, 0x17, 0x00, 0x00,
- 0x00, 0xff, 0x12, 0xed, 0x11, 0x00, 0x00,
- 0x00, 0xff, 0x40, 0xbf, 0x0c, 0x00, 0x00,
- 0x00, 0xff, 0x11, 0xee, 0x4a, 0x00, 0x00,
- 0x00, 0xff, 0x54, 0xab, 0x13, 0x00, 0x00,
- 0x00, 0xff, 0x41, 0xbe, 0x15, 0x01, 0x00,
- 0x00, 0xff, 0x42, 0xbd, 0x0e, 0x01, 0x00,
- 0x00, 0xff, 0x43, 0xbc, 0x17, 0x01, 0x00,
- 0x00, 0xff, 0x50, 0xaf, 0x0f, 0x01, 0x00,
- 0x00, 0xff, 0x4d, 0xb2, 0x1d, 0x00, 0x00,
- 0x00, 0xff, 0x47, 0xb8, 0x13, 0x01, 0x00,
- 0x00, 0xff, 0x05, 0xfa, 0x4b, 0x00, 0x00,
- 0x00, 0xff, 0x02, 0xfd, 0x4e, 0x00, 0x00,
- 0x00, 0xff, 0x0e, 0xf1, 0x06, 0x00, 0x00,
- 0x00, 0xff, 0x1e, 0xe1, 0x52, 0x02, 0x00,
- 0x00, 0xff, 0x0a, 0xf5, 0x51, 0x02, 0x00,
- 0x00, 0xff, 0x10, 0xef, 0x10, 0x00, 0x00,
- 0x00, 0xff, 0x49, 0xb6, 0x19, 0x01, 0x00,
- 0x00, 0xff, 0x15, 0xea, 0x27, 0x00, 0x00,
- 0x00, 0xff, 0x03, 0xfc, 0x1e, 0x00, 0x00,
- 0x00, 0xff, 0x01, 0xfe, 0x1f, 0x00, 0x00,
- 0x00, 0xff, 0x06, 0xf9, 0x20, 0x00, 0x00,
- 0x00, 0xff, 0x09, 0xf6, 0x21, 0x00, 0x00,
- 0x00, 0xff, 0x1d, 0xe2, 0x22, 0x00, 0x00,
- 0x00, 0xff, 0x1f, 0xe0, 0x23, 0x00, 0x00,
- 0x00, 0xff, 0x0d, 0xf2, 0x24, 0x00, 0x00,
- 0x00, 0xff, 0x19, 0xe6, 0x25, 0x00, 0x00,
- 0x00, 0xff, 0x1b, 0xe4, 0x26, 0x00, 0x00,
- 0x00, 0xff, 0x00, 0xff, 0x2b, 0x00, 0x00,
- 0x00, 0xff, 0x4a, 0xb5, 0x4c, 0x00, 0x00,
- 0x00, 0xff, 0x4b, 0xb4, 0x52, 0x00, 0x00,
- 0x00, 0xff, 0x51, 0xae, 0x51, 0x00, 0x00,
- 0x00, 0xff, 0x52, 0xad, 0x4f, 0x00, 0x00,
- 0x00, 0xff, 0x4e, 0xb1, 0x50, 0x00, 0x00,
- 0x00, 0xff, 0x0c, 0xf3, 0x29, 0x00, 0x00,
- 0x00, 0xff, 0x4f, 0xb0, 0x28, 0x00, 0x00,
- 0x00, 0xff, 0x13, 0xec, 0x2a, 0x00, 0x00,
- 0x00, 0xff, 0x17, 0xe8, 0x19, 0x00, 0x00,
- 0x00, 0xff, 0x04, 0xfb, 0x0f, 0x00, 0x00,
- 0x00, 0xff, 0x48, 0xb7, 0x0e, 0x00, 0x00,
- 0x00, 0xff, 0x0f, 0xf0, 0x04, 0x00, 0x00,
- 0x00, 0xff, 0x1c, 0xe3, 0x08, 0x00, 0x00,
- 0x00, 0xff, 0x18, 0xe7, 0x15, 0x02, 0x00,
- 0x00, 0xff, 0x53, 0xac, 0x0a, 0x02, 0x00,
- 0x00, 0xff, 0x5e, 0xa1, 0x1c, 0x02, 0x00,
- 0x00, 0xff, 0x5f, 0xa0, 0x05, 0x02, 0x00,
-};
-
-/* A-Link DTU(m) */
-static struct ir_scancode ir_codes_af9015_table_a_link[] = {
- { 0x001e, KEY_1 },
- { 0x001f, KEY_2 },
- { 0x0020, KEY_3 },
- { 0x0021, KEY_4 },
- { 0x0022, KEY_5 },
- { 0x0023, KEY_6 },
- { 0x0024, KEY_7 },
- { 0x0025, KEY_8 },
- { 0x0026, KEY_9 },
- { 0x0027, KEY_0 },
- { 0x002e, KEY_CHANNELUP },
- { 0x002d, KEY_CHANNELDOWN },
- { 0x0428, KEY_ZOOM },
- { 0x0041, KEY_MUTE },
- { 0x0042, KEY_VOLUMEDOWN },
- { 0x0043, KEY_VOLUMEUP },
- { 0x0044, KEY_GOTO }, /* jump */
- { 0x0545, KEY_POWER },
-};
-
-static u8 af9015_ir_table_a_link[] = {
- 0x08, 0xf7, 0x12, 0xed, 0x45, 0x05, 0x00, /* power */
- 0x08, 0xf7, 0x1a, 0xe5, 0x41, 0x00, 0x00, /* mute */
- 0x08, 0xf7, 0x01, 0xfe, 0x1e, 0x00, 0x00, /* 1 */
- 0x08, 0xf7, 0x1c, 0xe3, 0x21, 0x00, 0x00, /* 4 */
- 0x08, 0xf7, 0x03, 0xfc, 0x24, 0x00, 0x00, /* 7 */
- 0x08, 0xf7, 0x05, 0xfa, 0x28, 0x04, 0x00, /* zoom */
- 0x08, 0xf7, 0x00, 0xff, 0x43, 0x00, 0x00, /* volume up */
- 0x08, 0xf7, 0x16, 0xe9, 0x42, 0x00, 0x00, /* volume down */
- 0x08, 0xf7, 0x0f, 0xf0, 0x1f, 0x00, 0x00, /* 2 */
- 0x08, 0xf7, 0x0d, 0xf2, 0x22, 0x00, 0x00, /* 5 */
- 0x08, 0xf7, 0x1b, 0xe4, 0x25, 0x00, 0x00, /* 8 */
- 0x08, 0xf7, 0x06, 0xf9, 0x27, 0x00, 0x00, /* 0 */
- 0x08, 0xf7, 0x14, 0xeb, 0x2e, 0x00, 0x00, /* channel up */
- 0x08, 0xf7, 0x1d, 0xe2, 0x2d, 0x00, 0x00, /* channel down */
- 0x08, 0xf7, 0x02, 0xfd, 0x20, 0x00, 0x00, /* 3 */
- 0x08, 0xf7, 0x18, 0xe7, 0x23, 0x00, 0x00, /* 6 */
- 0x08, 0xf7, 0x04, 0xfb, 0x26, 0x00, 0x00, /* 9 */
- 0x08, 0xf7, 0x07, 0xf8, 0x44, 0x00, 0x00, /* jump */
-};
-
-/* MSI DIGIVOX mini II V3.0 */
-static struct ir_scancode ir_codes_af9015_table_msi[] = {
- { 0x001e, KEY_1 },
- { 0x001f, KEY_2 },
- { 0x0020, KEY_3 },
- { 0x0021, KEY_4 },
- { 0x0022, KEY_5 },
- { 0x0023, KEY_6 },
- { 0x0024, KEY_7 },
- { 0x0025, KEY_8 },
- { 0x0026, KEY_9 },
- { 0x0027, KEY_0 },
- { 0x030f, KEY_CHANNELUP },
- { 0x030e, KEY_CHANNELDOWN },
- { 0x0042, KEY_VOLUMEDOWN },
- { 0x0043, KEY_VOLUMEUP },
- { 0x0545, KEY_POWER },
- { 0x0052, KEY_UP }, /* up */
- { 0x0051, KEY_DOWN }, /* down */
- { 0x0028, KEY_ENTER },
-};
-
-static u8 af9015_ir_table_msi[] = {
- 0x03, 0xfc, 0x17, 0xe8, 0x45, 0x05, 0x00, /* power */
- 0x03, 0xfc, 0x0d, 0xf2, 0x51, 0x00, 0x00, /* down */
- 0x03, 0xfc, 0x03, 0xfc, 0x52, 0x00, 0x00, /* up */
- 0x03, 0xfc, 0x1a, 0xe5, 0x1e, 0x00, 0x00, /* 1 */
- 0x03, 0xfc, 0x02, 0xfd, 0x1f, 0x00, 0x00, /* 2 */
- 0x03, 0xfc, 0x04, 0xfb, 0x20, 0x00, 0x00, /* 3 */
- 0x03, 0xfc, 0x1c, 0xe3, 0x21, 0x00, 0x00, /* 4 */
- 0x03, 0xfc, 0x08, 0xf7, 0x22, 0x00, 0x00, /* 5 */
- 0x03, 0xfc, 0x1d, 0xe2, 0x23, 0x00, 0x00, /* 6 */
- 0x03, 0xfc, 0x11, 0xee, 0x24, 0x00, 0x00, /* 7 */
- 0x03, 0xfc, 0x0b, 0xf4, 0x25, 0x00, 0x00, /* 8 */
- 0x03, 0xfc, 0x10, 0xef, 0x26, 0x00, 0x00, /* 9 */
- 0x03, 0xfc, 0x09, 0xf6, 0x27, 0x00, 0x00, /* 0 */
- 0x03, 0xfc, 0x14, 0xeb, 0x43, 0x00, 0x00, /* volume up */
- 0x03, 0xfc, 0x1f, 0xe0, 0x42, 0x00, 0x00, /* volume down */
- 0x03, 0xfc, 0x15, 0xea, 0x0f, 0x03, 0x00, /* channel up */
- 0x03, 0xfc, 0x05, 0xfa, 0x0e, 0x03, 0x00, /* channel down */
- 0x03, 0xfc, 0x16, 0xe9, 0x28, 0x00, 0x00, /* enter */
-};
-
-/* MYGICTV U718 */
-static struct ir_scancode ir_codes_af9015_table_mygictv[] = {
- { 0x003d, KEY_SWITCHVIDEOMODE },
- /* TV / AV */
- { 0x0545, KEY_POWER },
- { 0x001e, KEY_1 },
- { 0x001f, KEY_2 },
- { 0x0020, KEY_3 },
- { 0x0021, KEY_4 },
- { 0x0022, KEY_5 },
- { 0x0023, KEY_6 },
- { 0x0024, KEY_7 },
- { 0x0025, KEY_8 },
- { 0x0026, KEY_9 },
- { 0x0027, KEY_0 },
- { 0x0041, KEY_MUTE },
- { 0x002a, KEY_ESC }, /* Esc */
- { 0x002e, KEY_CHANNELUP },
- { 0x002d, KEY_CHANNELDOWN },
- { 0x0042, KEY_VOLUMEDOWN },
- { 0x0043, KEY_VOLUMEUP },
- { 0x0052, KEY_UP }, /* up arrow */
- { 0x0051, KEY_DOWN }, /* down arrow */
- { 0x004f, KEY_RIGHT }, /* right arrow */
- { 0x0050, KEY_LEFT }, /* left arrow */
- { 0x0028, KEY_ENTER }, /* ok */
- { 0x0115, KEY_RECORD },
- { 0x0313, KEY_PLAY },
- { 0x0113, KEY_PAUSE },
- { 0x0116, KEY_STOP },
- { 0x0307, KEY_REWIND }, /* FR << */
- { 0x0309, KEY_FASTFORWARD }, /* FF >> */
- { 0x003b, KEY_TIME }, /* TimeShift */
- { 0x003e, KEY_CAMERA }, /* Snapshot */
- { 0x0316, KEY_CYCLEWINDOWS }, /* yellow, min / max */
- { 0x0000, KEY_ZOOM }, /* 'select' (?) */
- { 0x0316, KEY_SHUFFLE }, /* Shuffle */
- { 0x0345, KEY_POWER },
-};
-
-static u8 af9015_ir_table_mygictv[] = {
- 0x02, 0xbd, 0x0c, 0xf3, 0x3d, 0x00, 0x00, /* TV / AV */
- 0x02, 0xbd, 0x14, 0xeb, 0x45, 0x05, 0x00, /* power */
- 0x02, 0xbd, 0x00, 0xff, 0x1e, 0x00, 0x00, /* 1 */
- 0x02, 0xbd, 0x01, 0xfe, 0x1f, 0x00, 0x00, /* 2 */
- 0x02, 0xbd, 0x02, 0xfd, 0x20, 0x00, 0x00, /* 3 */
- 0x02, 0xbd, 0x03, 0xfc, 0x21, 0x00, 0x00, /* 4 */
- 0x02, 0xbd, 0x04, 0xfb, 0x22, 0x00, 0x00, /* 5 */
- 0x02, 0xbd, 0x05, 0xfa, 0x23, 0x00, 0x00, /* 6 */
- 0x02, 0xbd, 0x06, 0xf9, 0x24, 0x00, 0x00, /* 7 */
- 0x02, 0xbd, 0x07, 0xf8, 0x25, 0x00, 0x00, /* 8 */
- 0x02, 0xbd, 0x08, 0xf7, 0x26, 0x00, 0x00, /* 9 */
- 0x02, 0xbd, 0x09, 0xf6, 0x27, 0x00, 0x00, /* 0 */
- 0x02, 0xbd, 0x0a, 0xf5, 0x41, 0x00, 0x00, /* mute */
- 0x02, 0xbd, 0x1c, 0xe3, 0x2a, 0x00, 0x00, /* esc */
- 0x02, 0xbd, 0x1f, 0xe0, 0x43, 0x00, 0x00, /* volume up */
- 0x02, 0xbd, 0x12, 0xed, 0x52, 0x00, 0x00, /* up arrow */
- 0x02, 0xbd, 0x11, 0xee, 0x50, 0x00, 0x00, /* left arrow */
- 0x02, 0xbd, 0x15, 0xea, 0x28, 0x00, 0x00, /* ok */
- 0x02, 0xbd, 0x10, 0xef, 0x4f, 0x00, 0x00, /* right arrow */
- 0x02, 0xbd, 0x13, 0xec, 0x51, 0x00, 0x00, /* down arrow */
- 0x02, 0xbd, 0x0e, 0xf1, 0x42, 0x00, 0x00, /* volume down */
- 0x02, 0xbd, 0x19, 0xe6, 0x15, 0x01, 0x00, /* record */
- 0x02, 0xbd, 0x1e, 0xe1, 0x13, 0x03, 0x00, /* play */
- 0x02, 0xbd, 0x16, 0xe9, 0x16, 0x01, 0x00, /* stop */
- 0x02, 0xbd, 0x0b, 0xf4, 0x28, 0x04, 0x00, /* yellow, min / max */
- 0x02, 0xbd, 0x0f, 0xf0, 0x3b, 0x00, 0x00, /* time shift */
- 0x02, 0xbd, 0x18, 0xe7, 0x2e, 0x00, 0x00, /* channel up */
- 0x02, 0xbd, 0x1a, 0xe5, 0x2d, 0x00, 0x00, /* channel down */
- 0x02, 0xbd, 0x17, 0xe8, 0x3e, 0x00, 0x00, /* snapshot */
- 0x02, 0xbd, 0x40, 0xbf, 0x13, 0x01, 0x00, /* pause */
- 0x02, 0xbd, 0x41, 0xbe, 0x09, 0x03, 0x00, /* FF >> */
- 0x02, 0xbd, 0x42, 0xbd, 0x07, 0x03, 0x00, /* FR << */
- 0x02, 0xbd, 0x43, 0xbc, 0x00, 0x00, 0x00, /* 'select' (?) */
- 0x02, 0xbd, 0x44, 0xbb, 0x16, 0x03, 0x00, /* shuffle */
- 0x02, 0xbd, 0x45, 0xba, 0x45, 0x03, 0x00, /* power */
-};
-
-/* KWorld PlusTV Dual DVB-T Stick (DVB-T 399U) */
-static u8 af9015_ir_table_kworld[] = {
- 0x86, 0x6b, 0x0c, 0xf3, 0x2e, 0x07, 0x00,
- 0x86, 0x6b, 0x16, 0xe9, 0x2d, 0x07, 0x00,
- 0x86, 0x6b, 0x1d, 0xe2, 0x37, 0x07, 0x00,
- 0x86, 0x6b, 0x00, 0xff, 0x1e, 0x07, 0x00,
- 0x86, 0x6b, 0x01, 0xfe, 0x1f, 0x07, 0x00,
- 0x86, 0x6b, 0x02, 0xfd, 0x20, 0x07, 0x00,
- 0x86, 0x6b, 0x03, 0xfc, 0x21, 0x07, 0x00,
- 0x86, 0x6b, 0x04, 0xfb, 0x22, 0x07, 0x00,
- 0x86, 0x6b, 0x05, 0xfa, 0x23, 0x07, 0x00,
- 0x86, 0x6b, 0x06, 0xf9, 0x24, 0x07, 0x00,
- 0x86, 0x6b, 0x07, 0xf8, 0x25, 0x07, 0x00,
- 0x86, 0x6b, 0x08, 0xf7, 0x26, 0x07, 0x00,
- 0x86, 0x6b, 0x09, 0xf6, 0x4d, 0x07, 0x00,
- 0x86, 0x6b, 0x0a, 0xf5, 0x4e, 0x07, 0x00,
- 0x86, 0x6b, 0x14, 0xeb, 0x4f, 0x07, 0x00,
- 0x86, 0x6b, 0x1e, 0xe1, 0x50, 0x07, 0x00,
- 0x86, 0x6b, 0x17, 0xe8, 0x52, 0x07, 0x00,
- 0x86, 0x6b, 0x1f, 0xe0, 0x51, 0x07, 0x00,
- 0x86, 0x6b, 0x0e, 0xf1, 0x0b, 0x07, 0x00,
- 0x86, 0x6b, 0x20, 0xdf, 0x0c, 0x07, 0x00,
- 0x86, 0x6b, 0x42, 0xbd, 0x0d, 0x07, 0x00,
- 0x86, 0x6b, 0x0b, 0xf4, 0x0e, 0x07, 0x00,
- 0x86, 0x6b, 0x43, 0xbc, 0x0f, 0x07, 0x00,
- 0x86, 0x6b, 0x10, 0xef, 0x10, 0x07, 0x00,
- 0x86, 0x6b, 0x21, 0xde, 0x11, 0x07, 0x00,
- 0x86, 0x6b, 0x13, 0xec, 0x12, 0x07, 0x00,
- 0x86, 0x6b, 0x11, 0xee, 0x13, 0x07, 0x00,
- 0x86, 0x6b, 0x12, 0xed, 0x14, 0x07, 0x00,
- 0x86, 0x6b, 0x19, 0xe6, 0x15, 0x07, 0x00,
- 0x86, 0x6b, 0x1a, 0xe5, 0x16, 0x07, 0x00,
- 0x86, 0x6b, 0x1b, 0xe4, 0x17, 0x07, 0x00,
- 0x86, 0x6b, 0x4b, 0xb4, 0x18, 0x07, 0x00,
- 0x86, 0x6b, 0x40, 0xbf, 0x19, 0x07, 0x00,
- 0x86, 0x6b, 0x44, 0xbb, 0x1a, 0x07, 0x00,
- 0x86, 0x6b, 0x41, 0xbe, 0x1b, 0x07, 0x00,
- 0x86, 0x6b, 0x22, 0xdd, 0x1c, 0x07, 0x00,
- 0x86, 0x6b, 0x15, 0xea, 0x1d, 0x07, 0x00,
- 0x86, 0x6b, 0x0f, 0xf0, 0x3f, 0x07, 0x00,
- 0x86, 0x6b, 0x1c, 0xe3, 0x40, 0x07, 0x00,
- 0x86, 0x6b, 0x4a, 0xb5, 0x41, 0x07, 0x00,
- 0x86, 0x6b, 0x48, 0xb7, 0x42, 0x07, 0x00,
- 0x86, 0x6b, 0x49, 0xb6, 0x43, 0x07, 0x00,
- 0x86, 0x6b, 0x18, 0xe7, 0x44, 0x07, 0x00,
- 0x86, 0x6b, 0x23, 0xdc, 0x45, 0x07, 0x00,
-};
-
-/* AverMedia Volar X */
-static struct ir_scancode ir_codes_af9015_table_avermedia[] = {
- { 0x053d, KEY_PROG1 }, /* SOURCE */
- { 0x0512, KEY_POWER }, /* POWER */
- { 0x051e, KEY_1 }, /* 1 */
- { 0x051f, KEY_2 }, /* 2 */
- { 0x0520, KEY_3 }, /* 3 */
- { 0x0521, KEY_4 }, /* 4 */
- { 0x0522, KEY_5 }, /* 5 */
- { 0x0523, KEY_6 }, /* 6 */
- { 0x0524, KEY_7 }, /* 7 */
- { 0x0525, KEY_8 }, /* 8 */
- { 0x0526, KEY_9 }, /* 9 */
- { 0x053f, KEY_LEFT }, /* L / DISPLAY */
- { 0x0527, KEY_0 }, /* 0 */
- { 0x050f, KEY_RIGHT }, /* R / CH RTN */
- { 0x0518, KEY_PROG2 }, /* SNAP SHOT */
- { 0x051c, KEY_PROG3 }, /* 16-CH PREV */
- { 0x052d, KEY_VOLUMEDOWN }, /* VOL DOWN */
- { 0x053e, KEY_ZOOM }, /* FULL SCREEN */
- { 0x052e, KEY_VOLUMEUP }, /* VOL UP */
- { 0x0510, KEY_MUTE }, /* MUTE */
- { 0x0504, KEY_AUDIO }, /* AUDIO */
- { 0x0515, KEY_RECORD }, /* RECORD */
- { 0x0511, KEY_PLAY }, /* PLAY */
- { 0x0516, KEY_STOP }, /* STOP */
- { 0x050c, KEY_PLAYPAUSE }, /* TIMESHIFT / PAUSE */
- { 0x0505, KEY_BACK }, /* << / RED */
- { 0x0509, KEY_FORWARD }, /* >> / YELLOW */
- { 0x0517, KEY_TEXT }, /* TELETEXT */
- { 0x050a, KEY_EPG }, /* EPG */
- { 0x0513, KEY_MENU }, /* MENU */
-
- { 0x050e, KEY_CHANNELUP }, /* CH UP */
- { 0x050d, KEY_CHANNELDOWN }, /* CH DOWN */
- { 0x0519, KEY_FIRST }, /* |<< / GREEN */
- { 0x0508, KEY_LAST }, /* >>| / BLUE */
-};
-
-static u8 af9015_ir_table_avermedia[] = {
- 0x02, 0xfd, 0x00, 0xff, 0x12, 0x05, 0x00,
- 0x02, 0xfd, 0x01, 0xfe, 0x3d, 0x05, 0x00,
- 0x02, 0xfd, 0x03, 0xfc, 0x17, 0x05, 0x00,
- 0x02, 0xfd, 0x04, 0xfb, 0x0a, 0x05, 0x00,
- 0x02, 0xfd, 0x05, 0xfa, 0x1e, 0x05, 0x00,
- 0x02, 0xfd, 0x06, 0xf9, 0x1f, 0x05, 0x00,
- 0x02, 0xfd, 0x07, 0xf8, 0x20, 0x05, 0x00,
- 0x02, 0xfd, 0x09, 0xf6, 0x21, 0x05, 0x00,
- 0x02, 0xfd, 0x0a, 0xf5, 0x22, 0x05, 0x00,
- 0x02, 0xfd, 0x0b, 0xf4, 0x23, 0x05, 0x00,
- 0x02, 0xfd, 0x0d, 0xf2, 0x24, 0x05, 0x00,
- 0x02, 0xfd, 0x0e, 0xf1, 0x25, 0x05, 0x00,
- 0x02, 0xfd, 0x0f, 0xf0, 0x26, 0x05, 0x00,
- 0x02, 0xfd, 0x11, 0xee, 0x27, 0x05, 0x00,
- 0x02, 0xfd, 0x08, 0xf7, 0x04, 0x05, 0x00,
- 0x02, 0xfd, 0x0c, 0xf3, 0x3e, 0x05, 0x00,
- 0x02, 0xfd, 0x10, 0xef, 0x1c, 0x05, 0x00,
- 0x02, 0xfd, 0x12, 0xed, 0x3f, 0x05, 0x00,
- 0x02, 0xfd, 0x13, 0xec, 0x0f, 0x05, 0x00,
- 0x02, 0xfd, 0x14, 0xeb, 0x10, 0x05, 0x00,
- 0x02, 0xfd, 0x15, 0xea, 0x13, 0x05, 0x00,
- 0x02, 0xfd, 0x17, 0xe8, 0x18, 0x05, 0x00,
- 0x02, 0xfd, 0x18, 0xe7, 0x11, 0x05, 0x00,
- 0x02, 0xfd, 0x19, 0xe6, 0x15, 0x05, 0x00,
- 0x02, 0xfd, 0x1a, 0xe5, 0x0c, 0x05, 0x00,
- 0x02, 0xfd, 0x1b, 0xe4, 0x16, 0x05, 0x00,
- 0x02, 0xfd, 0x1c, 0xe3, 0x09, 0x05, 0x00,
- 0x02, 0xfd, 0x1d, 0xe2, 0x05, 0x05, 0x00,
- 0x02, 0xfd, 0x1e, 0xe1, 0x2d, 0x05, 0x00,
- 0x02, 0xfd, 0x1f, 0xe0, 0x2e, 0x05, 0x00,
- 0x03, 0xfc, 0x00, 0xff, 0x08, 0x05, 0x00,
- 0x03, 0xfc, 0x01, 0xfe, 0x19, 0x05, 0x00,
- 0x03, 0xfc, 0x02, 0xfd, 0x0d, 0x05, 0x00,
- 0x03, 0xfc, 0x03, 0xfc, 0x0e, 0x05, 0x00,
-};
-
-static u8 af9015_ir_table_avermedia_ks[] = {
- 0x05, 0xfa, 0x01, 0xfe, 0x12, 0x05, 0x00,
- 0x05, 0xfa, 0x02, 0xfd, 0x0e, 0x05, 0x00,
- 0x05, 0xfa, 0x03, 0xfc, 0x0d, 0x05, 0x00,
- 0x05, 0xfa, 0x04, 0xfb, 0x2e, 0x05, 0x00,
- 0x05, 0xfa, 0x05, 0xfa, 0x2d, 0x05, 0x00,
- 0x05, 0xfa, 0x06, 0xf9, 0x10, 0x05, 0x00,
- 0x05, 0xfa, 0x07, 0xf8, 0x0f, 0x05, 0x00,
- 0x05, 0xfa, 0x08, 0xf7, 0x3d, 0x05, 0x00,
- 0x05, 0xfa, 0x09, 0xf6, 0x1e, 0x05, 0x00,
- 0x05, 0xfa, 0x0a, 0xf5, 0x1f, 0x05, 0x00,
- 0x05, 0xfa, 0x0b, 0xf4, 0x20, 0x05, 0x00,
- 0x05, 0xfa, 0x0c, 0xf3, 0x21, 0x05, 0x00,
- 0x05, 0xfa, 0x0d, 0xf2, 0x22, 0x05, 0x00,
- 0x05, 0xfa, 0x0e, 0xf1, 0x23, 0x05, 0x00,
- 0x05, 0xfa, 0x0f, 0xf0, 0x24, 0x05, 0x00,
- 0x05, 0xfa, 0x10, 0xef, 0x25, 0x05, 0x00,
- 0x05, 0xfa, 0x11, 0xee, 0x26, 0x05, 0x00,
- 0x05, 0xfa, 0x12, 0xed, 0x27, 0x05, 0x00,
- 0x05, 0xfa, 0x13, 0xec, 0x04, 0x05, 0x00,
- 0x05, 0xfa, 0x15, 0xea, 0x0a, 0x05, 0x00,
- 0x05, 0xfa, 0x16, 0xe9, 0x11, 0x05, 0x00,
- 0x05, 0xfa, 0x17, 0xe8, 0x15, 0x05, 0x00,
- 0x05, 0xfa, 0x18, 0xe7, 0x16, 0x05, 0x00,
- 0x05, 0xfa, 0x1c, 0xe3, 0x05, 0x05, 0x00,
- 0x05, 0xfa, 0x1d, 0xe2, 0x09, 0x05, 0x00,
- 0x05, 0xfa, 0x4d, 0xb2, 0x3f, 0x05, 0x00,
- 0x05, 0xfa, 0x56, 0xa9, 0x3e, 0x05, 0x00
-};
-
-/* Digittrade DVB-T USB Stick */
-static struct ir_scancode ir_codes_af9015_table_digittrade[] = {
- { 0x010f, KEY_LAST }, /* RETURN */
- { 0x0517, KEY_TEXT }, /* TELETEXT */
- { 0x0108, KEY_EPG }, /* EPG */
- { 0x0513, KEY_POWER }, /* POWER */
- { 0x0109, KEY_ZOOM }, /* FULLSCREEN */
- { 0x0040, KEY_AUDIO }, /* DUAL SOUND */
- { 0x002c, KEY_PRINT }, /* SNAPSHOT */
- { 0x0516, KEY_SUBTITLE }, /* SUBTITLE */
- { 0x0052, KEY_CHANNELUP }, /* CH Up */
- { 0x0051, KEY_CHANNELDOWN },/* Ch Dn */
- { 0x0057, KEY_VOLUMEUP }, /* Vol Up */
- { 0x0056, KEY_VOLUMEDOWN }, /* Vol Dn */
- { 0x0110, KEY_MUTE }, /* MUTE */
- { 0x0027, KEY_0 },
- { 0x001e, KEY_1 },
- { 0x001f, KEY_2 },
- { 0x0020, KEY_3 },
- { 0x0021, KEY_4 },
- { 0x0022, KEY_5 },
- { 0x0023, KEY_6 },
- { 0x0024, KEY_7 },
- { 0x0025, KEY_8 },
- { 0x0026, KEY_9 },
- { 0x0117, KEY_PLAYPAUSE }, /* TIMESHIFT */
- { 0x0115, KEY_RECORD }, /* RECORD */
- { 0x0313, KEY_PLAY }, /* PLAY */
- { 0x0116, KEY_STOP }, /* STOP */
- { 0x0113, KEY_PAUSE }, /* PAUSE */
-};
-
-static u8 af9015_ir_table_digittrade[] = {
- 0x00, 0xff, 0x06, 0xf9, 0x13, 0x05, 0x00,
- 0x00, 0xff, 0x4d, 0xb2, 0x17, 0x01, 0x00,
- 0x00, 0xff, 0x1f, 0xe0, 0x2c, 0x00, 0x00,
- 0x00, 0xff, 0x0a, 0xf5, 0x15, 0x01, 0x00,
- 0x00, 0xff, 0x0e, 0xf1, 0x16, 0x01, 0x00,
- 0x00, 0xff, 0x09, 0xf6, 0x09, 0x01, 0x00,
- 0x00, 0xff, 0x01, 0xfe, 0x08, 0x01, 0x00,
- 0x00, 0xff, 0x05, 0xfa, 0x10, 0x01, 0x00,
- 0x00, 0xff, 0x02, 0xfd, 0x56, 0x00, 0x00,
- 0x00, 0xff, 0x40, 0xbf, 0x57, 0x00, 0x00,
- 0x00, 0xff, 0x19, 0xe6, 0x52, 0x00, 0x00,
- 0x00, 0xff, 0x17, 0xe8, 0x51, 0x00, 0x00,
- 0x00, 0xff, 0x10, 0xef, 0x0f, 0x01, 0x00,
- 0x00, 0xff, 0x54, 0xab, 0x27, 0x00, 0x00,
- 0x00, 0xff, 0x1b, 0xe4, 0x1e, 0x00, 0x00,
- 0x00, 0xff, 0x11, 0xee, 0x1f, 0x00, 0x00,
- 0x00, 0xff, 0x15, 0xea, 0x20, 0x00, 0x00,
- 0x00, 0xff, 0x12, 0xed, 0x21, 0x00, 0x00,
- 0x00, 0xff, 0x16, 0xe9, 0x22, 0x00, 0x00,
- 0x00, 0xff, 0x4c, 0xb3, 0x23, 0x00, 0x00,
- 0x00, 0xff, 0x48, 0xb7, 0x24, 0x00, 0x00,
- 0x00, 0xff, 0x04, 0xfb, 0x25, 0x00, 0x00,
- 0x00, 0xff, 0x00, 0xff, 0x26, 0x00, 0x00,
- 0x00, 0xff, 0x1e, 0xe1, 0x13, 0x03, 0x00,
- 0x00, 0xff, 0x1a, 0xe5, 0x13, 0x01, 0x00,
- 0x00, 0xff, 0x03, 0xfc, 0x17, 0x05, 0x00,
- 0x00, 0xff, 0x0d, 0xf2, 0x16, 0x05, 0x00,
- 0x00, 0xff, 0x1d, 0xe2, 0x40, 0x00, 0x00,
-};
-
-/* TREKSTOR DVB-T USB Stick */
-static struct ir_scancode ir_codes_af9015_table_trekstor[] = {
- { 0x0704, KEY_AGAIN }, /* Home */
- { 0x0705, KEY_MUTE }, /* Mute */
- { 0x0706, KEY_UP }, /* Up */
- { 0x0707, KEY_DOWN }, /* Down */
- { 0x0709, KEY_RIGHT }, /* Right */
- { 0x070a, KEY_ENTER }, /* OK */
- { 0x070b, KEY_FASTFORWARD }, /* Fast forward */
- { 0x070c, KEY_REWIND }, /* Rewind */
- { 0x070d, KEY_PLAY }, /* Play/Pause */
- { 0x070e, KEY_VOLUMEUP }, /* Volume + */
- { 0x070f, KEY_VOLUMEDOWN }, /* Volume - */
- { 0x0710, KEY_RECORD }, /* Record */
- { 0x0711, KEY_STOP }, /* Stop */
- { 0x0712, KEY_ZOOM }, /* TV */
- { 0x0713, KEY_EPG }, /* Info/EPG */
- { 0x0714, KEY_CHANNELDOWN }, /* Channel - */
- { 0x0715, KEY_CHANNELUP }, /* Channel + */
- { 0x071e, KEY_1 },
- { 0x071f, KEY_2 },
- { 0x0720, KEY_3 },
- { 0x0721, KEY_4 },
- { 0x0722, KEY_5 },
- { 0x0723, KEY_6 },
- { 0x0724, KEY_7 },
- { 0x0725, KEY_8 },
- { 0x0726, KEY_9 },
- { 0x0708, KEY_LEFT }, /* LEFT */
- { 0x0727, KEY_0 },
-};
-
-static u8 af9015_ir_table_trekstor[] = {
- 0x00, 0xff, 0x86, 0x79, 0x04, 0x07, 0x00,
- 0x00, 0xff, 0x85, 0x7a, 0x05, 0x07, 0x00,
- 0x00, 0xff, 0x87, 0x78, 0x06, 0x07, 0x00,
- 0x00, 0xff, 0x8c, 0x73, 0x07, 0x07, 0x00,
- 0x00, 0xff, 0x89, 0x76, 0x09, 0x07, 0x00,
- 0x00, 0xff, 0x88, 0x77, 0x0a, 0x07, 0x00,
- 0x00, 0xff, 0x8a, 0x75, 0x0b, 0x07, 0x00,
- 0x00, 0xff, 0x9e, 0x61, 0x0c, 0x07, 0x00,
- 0x00, 0xff, 0x8d, 0x72, 0x0d, 0x07, 0x00,
- 0x00, 0xff, 0x8b, 0x74, 0x0e, 0x07, 0x00,
- 0x00, 0xff, 0x9b, 0x64, 0x0f, 0x07, 0x00,
- 0x00, 0xff, 0x9d, 0x62, 0x10, 0x07, 0x00,
- 0x00, 0xff, 0x8e, 0x71, 0x11, 0x07, 0x00,
- 0x00, 0xff, 0x9c, 0x63, 0x12, 0x07, 0x00,
- 0x00, 0xff, 0x8f, 0x70, 0x13, 0x07, 0x00,
- 0x00, 0xff, 0x93, 0x6c, 0x14, 0x07, 0x00,
- 0x00, 0xff, 0x97, 0x68, 0x15, 0x07, 0x00,
- 0x00, 0xff, 0x92, 0x6d, 0x1e, 0x07, 0x00,
- 0x00, 0xff, 0x96, 0x69, 0x1f, 0x07, 0x00,
- 0x00, 0xff, 0x9a, 0x65, 0x20, 0x07, 0x00,
- 0x00, 0xff, 0x91, 0x6e, 0x21, 0x07, 0x00,
- 0x00, 0xff, 0x95, 0x6a, 0x22, 0x07, 0x00,
- 0x00, 0xff, 0x99, 0x66, 0x23, 0x07, 0x00,
- 0x00, 0xff, 0x90, 0x6f, 0x24, 0x07, 0x00,
- 0x00, 0xff, 0x94, 0x6b, 0x25, 0x07, 0x00,
- 0x00, 0xff, 0x98, 0x67, 0x26, 0x07, 0x00,
- 0x00, 0xff, 0x9f, 0x60, 0x08, 0x07, 0x00,
- 0x00, 0xff, 0x84, 0x7b, 0x27, 0x07, 0x00,
-};
-
-/* MSI DIGIVOX mini III */
-static struct ir_scancode ir_codes_af9015_table_msi_digivox_iii[] = {
- { 0x0713, KEY_POWER }, /* [red power button] */
- { 0x073b, KEY_VIDEO }, /* Source */
- { 0x073e, KEY_ZOOM }, /* Zoom */
- { 0x070b, KEY_POWER2 }, /* ShutDown */
- { 0x071e, KEY_1 },
- { 0x071f, KEY_2 },
- { 0x0720, KEY_3 },
- { 0x0721, KEY_4 },
- { 0x0722, KEY_5 },
- { 0x0723, KEY_6 },
- { 0x0724, KEY_7 },
- { 0x0725, KEY_8 },
- { 0x0726, KEY_9 },
- { 0x0727, KEY_0 },
- { 0x0752, KEY_CHANNELUP }, /* CH+ */
- { 0x0751, KEY_CHANNELDOWN }, /* CH- */
- { 0x0750, KEY_VOLUMEUP }, /* Vol+ */
- { 0x074f, KEY_VOLUMEDOWN }, /* Vol- */
- { 0x0705, KEY_ESC }, /* [back up arrow] */
- { 0x0708, KEY_OK }, /* [enter arrow] */
- { 0x073f, KEY_RECORD }, /* Rec */
- { 0x0716, KEY_STOP }, /* Stop */
- { 0x072a, KEY_PLAY }, /* Play */
- { 0x073c, KEY_MUTE }, /* Mute */
- { 0x0718, KEY_UP },
- { 0x0707, KEY_DOWN },
- { 0x070f, KEY_LEFT },
- { 0x0715, KEY_RIGHT },
- { 0x0736, KEY_RED },
- { 0x0737, KEY_GREEN },
- { 0x072d, KEY_YELLOW },
- { 0x072e, KEY_BLUE },
-};
-
-static u8 af9015_ir_table_msi_digivox_iii[] = {
- 0x61, 0xd6, 0x43, 0xbc, 0x13, 0x07, 0x00, /* KEY_POWER */
- 0x61, 0xd6, 0x01, 0xfe, 0x3b, 0x07, 0x00, /* KEY_VIDEO */
- 0x61, 0xd6, 0x0b, 0xf4, 0x3e, 0x07, 0x00, /* KEY_ZOOM */
- 0x61, 0xd6, 0x03, 0xfc, 0x0b, 0x07, 0x00, /* KEY_POWER2 */
- 0x61, 0xd6, 0x04, 0xfb, 0x1e, 0x07, 0x00, /* KEY_1 */
- 0x61, 0xd6, 0x08, 0xf7, 0x1f, 0x07, 0x00, /* KEY_2 */
- 0x61, 0xd6, 0x02, 0xfd, 0x20, 0x07, 0x00, /* KEY_3 */
- 0x61, 0xd6, 0x0f, 0xf0, 0x21, 0x07, 0x00, /* KEY_4 */
- 0x61, 0xd6, 0x05, 0xfa, 0x22, 0x07, 0x00, /* KEY_5 */
- 0x61, 0xd6, 0x06, 0xf9, 0x23, 0x07, 0x00, /* KEY_6 */
- 0x61, 0xd6, 0x0c, 0xf3, 0x24, 0x07, 0x00, /* KEY_7 */
- 0x61, 0xd6, 0x0d, 0xf2, 0x25, 0x07, 0x00, /* KEY_8 */
- 0x61, 0xd6, 0x0a, 0xf5, 0x26, 0x07, 0x00, /* KEY_9 */
- 0x61, 0xd6, 0x11, 0xee, 0x27, 0x07, 0x00, /* KEY_0 */
- 0x61, 0xd6, 0x09, 0xf6, 0x52, 0x07, 0x00, /* KEY_CHANNELUP */
- 0x61, 0xd6, 0x07, 0xf8, 0x51, 0x07, 0x00, /* KEY_CHANNELDOWN */
- 0x61, 0xd6, 0x0e, 0xf1, 0x50, 0x07, 0x00, /* KEY_VOLUMEUP */
- 0x61, 0xd6, 0x13, 0xec, 0x4f, 0x07, 0x00, /* KEY_VOLUMEDOWN */
- 0x61, 0xd6, 0x10, 0xef, 0x05, 0x07, 0x00, /* KEY_ESC */
- 0x61, 0xd6, 0x12, 0xed, 0x08, 0x07, 0x00, /* KEY_OK */
- 0x61, 0xd6, 0x14, 0xeb, 0x3f, 0x07, 0x00, /* KEY_RECORD */
- 0x61, 0xd6, 0x15, 0xea, 0x16, 0x07, 0x00, /* KEY_STOP */
- 0x61, 0xd6, 0x16, 0xe9, 0x2a, 0x07, 0x00, /* KEY_PLAY */
- 0x61, 0xd6, 0x17, 0xe8, 0x3c, 0x07, 0x00, /* KEY_MUTE */
- 0x61, 0xd6, 0x18, 0xe7, 0x18, 0x07, 0x00, /* KEY_UP */
- 0x61, 0xd6, 0x19, 0xe6, 0x07, 0x07, 0x00, /* KEY_DOWN */
- 0x61, 0xd6, 0x1a, 0xe5, 0x0f, 0x07, 0x00, /* KEY_LEFT */
- 0x61, 0xd6, 0x1b, 0xe4, 0x15, 0x07, 0x00, /* KEY_RIGHT */
- 0x61, 0xd6, 0x1c, 0xe3, 0x36, 0x07, 0x00, /* KEY_RED */
- 0x61, 0xd6, 0x1d, 0xe2, 0x37, 0x07, 0x00, /* KEY_GREEN */
- 0x61, 0xd6, 0x1e, 0xe1, 0x2d, 0x07, 0x00, /* KEY_YELLOW */
- 0x61, 0xd6, 0x1f, 0xe0, 0x2e, 0x07, 0x00, /* KEY_BLUE */
-};
-
#endif
diff --git a/drivers/media/dvb/dvb-usb/anysee.c b/drivers/media/dvb/dvb-usb/anysee.c
index 4685259e1614..1759d26bca42 100644
--- a/drivers/media/dvb/dvb-usb/anysee.c
+++ b/drivers/media/dvb/dvb-usb/anysee.c
@@ -354,7 +354,7 @@ static int anysee_frontend_attach(struct dvb_usb_adapter *adap)
static int anysee_tuner_attach(struct dvb_usb_adapter *adap)
{
struct anysee_state *state = adap->dev->priv;
- deb_info("%s: \n", __func__);
+ deb_info("%s:\n", __func__);
switch (state->tuner) {
case DVB_PLL_THOMSON_DTT7579:
@@ -374,78 +374,32 @@ static int anysee_tuner_attach(struct dvb_usb_adapter *adap)
return 0;
}
-static int anysee_rc_query(struct dvb_usb_device *d, u32 *event, int *state)
+static int anysee_rc_query(struct dvb_usb_device *d)
{
u8 buf[] = {CMD_GET_IR_CODE};
- struct ir_scancode *keymap = d->props.rc.legacy.rc_key_map;
u8 ircode[2];
- int i, ret;
+ int ret;
+
+ /* Remote controller is basic NEC using address byte 0x08.
+ Anysee device RC query returns only two bytes, status and code,
+ address byte is dropped. Also it does not return any value for
+ NEC RCs having address byte other than 0x08. Due to that, we
+ cannot use that device as standard NEC receiver.
+ It could be possible make hack which reads whole code directly
+ from device memory... */
- ret = anysee_ctrl_msg(d, buf, sizeof(buf), &ircode[0], 2);
+ ret = anysee_ctrl_msg(d, buf, sizeof(buf), ircode, sizeof(ircode));
if (ret)
return ret;
- *event = 0;
- *state = REMOTE_NO_KEY_PRESSED;
-
- for (i = 0; i < d->props.rc.legacy.rc_key_map_size; i++) {
- if (rc5_custom(&keymap[i]) == ircode[0] &&
- rc5_data(&keymap[i]) == ircode[1]) {
- *event = keymap[i].keycode;
- *state = REMOTE_KEY_PRESSED;
- return 0;
- }
+ if (ircode[0]) {
+ deb_rc("%s: key pressed %02x\n", __func__, ircode[1]);
+ ir_keydown(d->rc_input_dev, 0x08 << 8 | ircode[1], 0);
}
+
return 0;
}
-static struct ir_scancode ir_codes_anysee_table[] = {
- { 0x0100, KEY_0 },
- { 0x0101, KEY_1 },
- { 0x0102, KEY_2 },
- { 0x0103, KEY_3 },
- { 0x0104, KEY_4 },
- { 0x0105, KEY_5 },
- { 0x0106, KEY_6 },
- { 0x0107, KEY_7 },
- { 0x0108, KEY_8 },
- { 0x0109, KEY_9 },
- { 0x010a, KEY_POWER },
- { 0x010b, KEY_DOCUMENTS }, /* * */
- { 0x0119, KEY_FAVORITES },
- { 0x0120, KEY_SLEEP },
- { 0x0121, KEY_MODE }, /* 4:3 / 16:9 select */
- { 0x0122, KEY_ZOOM },
- { 0x0147, KEY_TEXT },
- { 0x0116, KEY_TV }, /* TV / radio select */
- { 0x011e, KEY_LANGUAGE }, /* Second Audio Program */
- { 0x011a, KEY_SUBTITLE },
- { 0x011b, KEY_CAMERA }, /* screenshot */
- { 0x0142, KEY_MUTE },
- { 0x010e, KEY_MENU },
- { 0x010f, KEY_EPG },
- { 0x0117, KEY_INFO },
- { 0x0110, KEY_EXIT },
- { 0x0113, KEY_VOLUMEUP },
- { 0x0112, KEY_VOLUMEDOWN },
- { 0x0111, KEY_CHANNELUP },
- { 0x0114, KEY_CHANNELDOWN },
- { 0x0115, KEY_OK },
- { 0x011d, KEY_RED },
- { 0x011f, KEY_GREEN },
- { 0x011c, KEY_YELLOW },
- { 0x0144, KEY_BLUE },
- { 0x010c, KEY_SHUFFLE }, /* snapshot */
- { 0x0148, KEY_STOP },
- { 0x0150, KEY_PLAY },
- { 0x0151, KEY_PAUSE },
- { 0x0149, KEY_RECORD },
- { 0x0118, KEY_PREVIOUS }, /* |<< */
- { 0x010d, KEY_NEXT }, /* >>| */
- { 0x0124, KEY_PROG1 }, /* F1 */
- { 0x0125, KEY_PROG2 }, /* F2 */
-};
-
/* DVB USB Driver stuff */
static struct dvb_usb_device_properties anysee_properties;
@@ -520,11 +474,12 @@ static struct dvb_usb_device_properties anysee_properties = {
}
},
- .rc.legacy = {
- .rc_key_map = ir_codes_anysee_table,
- .rc_key_map_size = ARRAY_SIZE(ir_codes_anysee_table),
+ .rc.core = {
+ .rc_codes = RC_MAP_ANYSEE,
+ .protocol = IR_TYPE_OTHER,
+ .module_name = "anysee",
.rc_query = anysee_rc_query,
- .rc_interval = 200, /* windows driver uses 500ms */
+ .rc_interval = 250, /* windows driver uses 500ms */
},
.i2c_algo = &anysee_i2c_algo,
diff --git a/drivers/media/dvb/dvb-usb/dvb-usb-i2c.c b/drivers/media/dvb/dvb-usb/dvb-usb-i2c.c
index cead089bbb4f..88e4a62abc44 100644
--- a/drivers/media/dvb/dvb-usb/dvb-usb-i2c.c
+++ b/drivers/media/dvb/dvb-usb/dvb-usb-i2c.c
@@ -20,7 +20,6 @@ int dvb_usb_i2c_init(struct dvb_usb_device *d)
}
strlcpy(d->i2c_adap.name, d->desc->name, sizeof(d->i2c_adap.name));
- d->i2c_adap.class = I2C_CLASS_TV_DIGITAL,
d->i2c_adap.algo = d->props.i2c_algo;
d->i2c_adap.algo_data = NULL;
d->i2c_adap.dev.parent = &d->udev->dev;
diff --git a/drivers/media/dvb/dvb-usb/dvb-usb-ids.h b/drivers/media/dvb/dvb-usb/dvb-usb-ids.h
index 1a774d58d664..192a40ce583d 100644
--- a/drivers/media/dvb/dvb-usb/dvb-usb-ids.h
+++ b/drivers/media/dvb/dvb-usb/dvb-usb-ids.h
@@ -32,6 +32,7 @@
#define USB_VID_EMPIA 0xeb1a
#define USB_VID_GENPIX 0x09c0
#define USB_VID_GRANDTEC 0x5032
+#define USB_VID_GTEK 0x1f4d
#define USB_VID_HANFTEK 0x15f4
#define USB_VID_HAUPPAUGE 0x2040
#define USB_VID_HYPER_PALTEK 0x1025
@@ -133,6 +134,8 @@
#define USB_PID_KWORLD_VSTREAM_WARM 0x17df
#define USB_PID_TERRATEC_CINERGY_T_USB_XE 0x0055
#define USB_PID_TERRATEC_CINERGY_T_USB_XE_REV2 0x0069
+#define USB_PID_TERRATEC_CINERGY_T_STICK_RC 0x0097
+#define USB_PID_TERRATEC_CINERGY_T_STICK_DUAL_RC 0x0099
#define USB_PID_TWINHAN_VP7041_COLD 0x3201
#define USB_PID_TWINHAN_VP7041_WARM 0x3202
#define USB_PID_TWINHAN_VP7020_COLD 0x3203
@@ -143,6 +146,7 @@
#define USB_PID_TWINHAN_VP7021_WARM 0x3208
#define USB_PID_TINYTWIN 0x3226
#define USB_PID_TINYTWIN_2 0xe402
+#define USB_PID_TINYTWIN_3 0x9016
#define USB_PID_DNTV_TINYUSB2_COLD 0x3223
#define USB_PID_DNTV_TINYUSB2_WARM 0x3224
#define USB_PID_ULTIMA_TVBOX_COLD 0x8105
@@ -196,6 +200,7 @@
#define USB_PID_AVERMEDIA_A309 0xa309
#define USB_PID_AVERMEDIA_A310 0xa310
#define USB_PID_AVERMEDIA_A850 0x850a
+#define USB_PID_AVERMEDIA_A850T 0x850b
#define USB_PID_AVERMEDIA_A805 0xa805
#define USB_PID_AVERMEDIA_A815M 0x815a
#define USB_PID_TECHNOTREND_CONNECT_S2400 0x3006
@@ -268,6 +273,7 @@
#define USB_PID_GENPIX_8PSK_REV_2 0x0202
#define USB_PID_GENPIX_SKYWALKER_1 0x0203
#define USB_PID_GENPIX_SKYWALKER_CW3K 0x0204
+#define USB_PID_GENPIX_SKYWALKER_2 0x0206
#define USB_PID_SIGMATEK_DVB_110 0x6610
#define USB_PID_MSI_DIGI_VOX_MINI_II 0x1513
#define USB_PID_MSI_DIGIVOX_DUO 0x8801
diff --git a/drivers/media/dvb/dvb-usb/friio-fe.c b/drivers/media/dvb/dvb-usb/friio-fe.c
index 93c21ddd0b77..015b4e8af1a5 100644
--- a/drivers/media/dvb/dvb-usb/friio-fe.c
+++ b/drivers/media/dvb/dvb-usb/friio-fe.c
@@ -75,7 +75,7 @@ static int jdvbt90502_single_reg_write(struct jdvbt90502_state *state,
return 0;
}
-static int _jdvbt90502_write(struct dvb_frontend *fe, u8 *buf, int len)
+static int _jdvbt90502_write(struct dvb_frontend *fe, const u8 buf[], int len)
{
struct jdvbt90502_state *state = fe->demodulator_priv;
int err, i;
diff --git a/drivers/media/dvb/dvb-usb/gp8psk-fe.c b/drivers/media/dvb/dvb-usb/gp8psk-fe.c
index dbdb5347b2a8..60d11e57e7d0 100644
--- a/drivers/media/dvb/dvb-usb/gp8psk-fe.c
+++ b/drivers/media/dvb/dvb-usb/gp8psk-fe.c
@@ -109,7 +109,7 @@ static int gp8psk_fe_read_signal_strength(struct dvb_frontend* fe, u16 *strength
static int gp8psk_fe_get_tune_settings(struct dvb_frontend* fe, struct dvb_frontend_tune_settings *tune)
{
- tune->min_delay_ms = 200;
+ tune->min_delay_ms = 800;
return 0;
}
@@ -334,7 +334,7 @@ success:
static struct dvb_frontend_ops gp8psk_fe_ops = {
.info = {
- .name = "Genpix 8psk-to-USB2 DVB-S",
+ .name = "Genpix DVB-S",
.type = FE_QPSK,
.frequency_min = 800000,
.frequency_max = 2250000,
diff --git a/drivers/media/dvb/dvb-usb/gp8psk.c b/drivers/media/dvb/dvb-usb/gp8psk.c
index 45106ac49674..c821293dbc22 100644
--- a/drivers/media/dvb/dvb-usb/gp8psk.c
+++ b/drivers/media/dvb/dvb-usb/gp8psk.c
@@ -227,6 +227,7 @@ static struct usb_device_id gp8psk_usb_table [] = {
{ USB_DEVICE(USB_VID_GENPIX, USB_PID_GENPIX_8PSK_REV_1_WARM) },
{ USB_DEVICE(USB_VID_GENPIX, USB_PID_GENPIX_8PSK_REV_2) },
{ USB_DEVICE(USB_VID_GENPIX, USB_PID_GENPIX_SKYWALKER_1) },
+ { USB_DEVICE(USB_VID_GENPIX, USB_PID_GENPIX_SKYWALKER_2) },
/* { USB_DEVICE(USB_VID_GENPIX, USB_PID_GENPIX_SKYWALKER_CW3K) }, */
{ 0 },
};
@@ -258,7 +259,7 @@ static struct dvb_usb_device_properties gp8psk_properties = {
.generic_bulk_ctrl_endpoint = 0x01,
- .num_device_descs = 3,
+ .num_device_descs = 4,
.devices = {
{ .name = "Genpix 8PSK-to-USB2 Rev.1 DVB-S receiver",
.cold_ids = { &gp8psk_usb_table[0], NULL },
@@ -272,6 +273,10 @@ static struct dvb_usb_device_properties gp8psk_properties = {
.cold_ids = { NULL },
.warm_ids = { &gp8psk_usb_table[3], NULL },
},
+ { .name = "Genpix SkyWalker-2 DVB-S receiver",
+ .cold_ids = { NULL },
+ .warm_ids = { &gp8psk_usb_table[4], NULL },
+ },
{ NULL },
}
};
@@ -306,6 +311,6 @@ module_init(gp8psk_usb_module_init);
module_exit(gp8psk_usb_module_exit);
MODULE_AUTHOR("Alan Nisota <alannisota@gamil.com>");
-MODULE_DESCRIPTION("Driver for Genpix 8psk-to-USB2 DVB-S");
+MODULE_DESCRIPTION("Driver for Genpix DVB-S");
MODULE_VERSION("1.1");
MODULE_LICENSE("GPL");
diff --git a/drivers/media/dvb/dvb-usb/lmedm04.c b/drivers/media/dvb/dvb-usb/lmedm04.c
new file mode 100644
index 000000000000..d939fbbf9fe6
--- /dev/null
+++ b/drivers/media/dvb/dvb-usb/lmedm04.c
@@ -0,0 +1,1088 @@
+/* DVB USB compliant linux driver for
+ *
+ * DM04/QQBOX DVB-S USB BOX LME2510C + SHARP:BS2F7HZ7395
+ * LME2510C + LG TDQY-P001F
+ * LME2510 + LG TDQY-P001F
+ *
+ * MVB7395 (LME2510C+SHARP:BS2F7HZ7395)
+ * SHARP:BS2F7HZ7395 = (STV0288+Sharp IX2505V)
+ *
+ * MV001F (LME2510+LGTDQY-P001F)
+ * LG TDQY - P001F =(TDA8263 + TDA10086H)
+ *
+ * MVB0001F (LME2510C+LGTDQT-P001F)
+ *
+ * For firmware see Documentation/dvb/lmedm04.txt
+ *
+ * I2C addresses:
+ * 0xd0 - STV0288 - Demodulator
+ * 0xc0 - Sharp IX2505V - Tuner
+ * --or--
+ * 0x1c - TDA10086 - Demodulator
+ * 0xc0 - TDA8263 - Tuner
+ *
+ * ***Please Note***
+ * There are other variants of the DM04
+ * ***NOT SUPPORTED***
+ * MV0194 (LME2510+SHARP0194)
+ * MVB0194 (LME2510C+SHARP0194)
+ *
+ *
+ * VID = 3344 PID LME2510=1122 LME2510C=1120
+ *
+ * Copyright (C) 2010 Malcolm Priestley (tvboxspy@gmail.com)
+ * LME2510(C)(C) Leaguerme (Shenzhen) MicroElectronics Co., Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License Version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ *
+ * see Documentation/dvb/README.dvb-usb for more information
+ *
+ * Known Issues :
+ * LME2510: Non Intel USB chipsets fail to maintain High Speed on
+ * Boot or Hot Plug.
+ *
+ * QQbox suffers from noise on LNB voltage.
+ *
+ * PID functions have been removed from this driver version due to
+ * problems with different firmware and application versions.
+ */
+#define DVB_USB_LOG_PREFIX "LME2510(C)"
+#include <linux/usb.h>
+#include <linux/usb/input.h>
+#include <media/ir-core.h>
+
+#include "dvb-usb.h"
+#include "lmedm04.h"
+#include "tda826x.h"
+#include "tda10086.h"
+#include "stv0288.h"
+#include "ix2505v.h"
+
+
+
+/* debug */
+static int dvb_usb_lme2510_debug;
+#define l_dprintk(var, level, args...) do { \
+ if ((var >= level)) \
+ printk(KERN_DEBUG DVB_USB_LOG_PREFIX ": " args); \
+} while (0)
+
+#define deb_info(level, args...) l_dprintk(dvb_usb_lme2510_debug, level, args)
+#define debug_data_snipet(level, name, p) \
+ deb_info(level, name" (%02x%02x%02x%02x%02x%02x%02x%02x)", \
+ *p, *(p+1), *(p+2), *(p+3), *(p+4), \
+ *(p+5), *(p+6), *(p+7));
+
+
+module_param_named(debug, dvb_usb_lme2510_debug, int, 0644);
+MODULE_PARM_DESC(debug, "set debugging level (1=info (or-able))."
+ DVB_USB_DEBUG_STATUS);
+
+static int dvb_usb_lme2510_firmware;
+module_param_named(firmware, dvb_usb_lme2510_firmware, int, 0644);
+MODULE_PARM_DESC(firmware, "set default firmware 0=Sharp7395 1=LG");
+
+
+DVB_DEFINE_MOD_OPT_ADAPTER_NR(adapter_nr);
+#define TUNER_LG 0x1
+#define TUNER_S7395 0x2
+
+struct lme2510_state {
+ u8 id;
+ u8 tuner_config;
+ u8 signal_lock;
+ u8 signal_level;
+ u8 signal_sn;
+ u8 time_key;
+ u8 i2c_talk_onoff;
+ u8 i2c_gate;
+ u8 i2c_tuner_gate_w;
+ u8 i2c_tuner_gate_r;
+ u8 i2c_tuner_addr;
+ u8 stream_on;
+ u8 one_tune;
+ void *buffer;
+ struct urb *lme_urb;
+ void *usb_buffer;
+
+};
+
+static int lme2510_bulk_write(struct usb_device *dev,
+ u8 *snd, int len, u8 pipe)
+{
+ int ret, actual_l;
+
+ ret = usb_bulk_msg(dev, usb_sndbulkpipe(dev, pipe),
+ snd, len , &actual_l, 500);
+ return ret;
+}
+
+static int lme2510_bulk_read(struct usb_device *dev,
+ u8 *rev, int len, u8 pipe)
+{
+ int ret, actual_l;
+
+ ret = usb_bulk_msg(dev, usb_rcvbulkpipe(dev, pipe),
+ rev, len , &actual_l, 500);
+ return ret;
+}
+
+static int lme2510_usb_talk(struct dvb_usb_device *d,
+ u8 *wbuf, int wlen, u8 *rbuf, int rlen)
+{
+ struct lme2510_state *st = d->priv;
+ u8 *buff;
+ int ret = 0;
+
+ if (st->usb_buffer == NULL) {
+ st->usb_buffer = kmalloc(512, GFP_KERNEL);
+ if (st->usb_buffer == NULL) {
+ info("MEM Error no memory");
+ return -ENOMEM;
+ }
+ }
+ buff = st->usb_buffer;
+
+ /* the read/write capped at 512 */
+ memcpy(buff, wbuf, (wlen > 512) ? 512 : wlen);
+
+ ret = mutex_lock_interruptible(&d->usb_mutex);
+
+ if (ret < 0)
+ return -EAGAIN;
+
+ ret |= usb_clear_halt(d->udev, usb_sndbulkpipe(d->udev, 0x01));
+
+ ret |= lme2510_bulk_write(d->udev, buff, wlen , 0x01);
+
+ msleep(12);
+
+ ret |= usb_clear_halt(d->udev, usb_rcvbulkpipe(d->udev, 0x01));
+
+ ret |= lme2510_bulk_read(d->udev, buff, (rlen > 512) ?
+ 512 : rlen , 0x01);
+
+ if (rlen > 0)
+ memcpy(rbuf, buff, rlen);
+
+ mutex_unlock(&d->usb_mutex);
+
+ return (ret < 0) ? -ENODEV : 0;
+}
+
+static int lme2510_usb_talk_restart(struct dvb_usb_device *d,
+ u8 *wbuf, int wlen, u8 *rbuf, int rlen) {
+ static u8 stream_on[] = LME_ST_ON_W;
+ int ret;
+ u8 rbuff[10];
+ /*Send Normal Command*/
+ ret = lme2510_usb_talk(d, wbuf, wlen, rbuf, rlen);
+ /*Restart Stream Command*/
+ ret |= lme2510_usb_talk(d, stream_on, sizeof(stream_on),
+ rbuff, sizeof(rbuff));
+ return ret;
+}
+static int lme2510_remote_keypress(struct dvb_usb_adapter *adap, u16 keypress)
+{
+ struct dvb_usb_device *d = adap->dev;
+
+ deb_info(1, "INT Key Keypress =%04x", keypress);
+
+ if (keypress > 0)
+ ir_keydown(d->rc_input_dev, keypress, 0);
+
+ return 0;
+}
+
+static void lme2510_int_response(struct urb *lme_urb)
+{
+ struct dvb_usb_adapter *adap = lme_urb->context;
+ struct lme2510_state *st = adap->dev->priv;
+ static u8 *ibuf, *rbuf;
+ int i = 0, offset;
+
+ switch (lme_urb->status) {
+ case 0:
+ case -ETIMEDOUT:
+ break;
+ case -ECONNRESET:
+ case -ENOENT:
+ case -ESHUTDOWN:
+ return;
+ default:
+ info("Error %x", lme_urb->status);
+ break;
+ }
+
+ rbuf = (u8 *) lme_urb->transfer_buffer;
+
+ offset = ((lme_urb->actual_length/8) > 4)
+ ? 4 : (lme_urb->actual_length/8) ;
+
+ for (i = 0; i < offset; ++i) {
+ ibuf = (u8 *)&rbuf[i*8];
+ deb_info(5, "INT O/S C =%02x C/O=%02x Type =%02x%02x",
+ offset, i, ibuf[0], ibuf[1]);
+
+ switch (ibuf[0]) {
+ case 0xaa:
+ debug_data_snipet(1, "INT Remote data snipet in", ibuf);
+ lme2510_remote_keypress(adap,
+ (u16)(ibuf[4]<<8)+ibuf[5]);
+ break;
+ case 0xbb:
+ switch (st->tuner_config) {
+ case TUNER_LG:
+ if (ibuf[2] > 0)
+ st->signal_lock = ibuf[2];
+ st->signal_level = ibuf[4];
+ st->signal_sn = ibuf[3];
+ st->time_key = ibuf[7];
+ break;
+ case TUNER_S7395:
+ /* Tweak for earlier firmware*/
+ if (ibuf[1] == 0x03) {
+ st->signal_level = ibuf[3];
+ st->signal_sn = ibuf[4];
+ } else {
+ st->signal_level = ibuf[4];
+ st->signal_sn = ibuf[5];
+ }
+ break;
+ default:
+ break;
+ }
+ debug_data_snipet(5, "INT Remote data snipet in", ibuf);
+ break;
+ case 0xcc:
+ debug_data_snipet(1, "INT Control data snipet", ibuf);
+ break;
+ default:
+ debug_data_snipet(1, "INT Unknown data snipet", ibuf);
+ break;
+ }
+ }
+ usb_submit_urb(lme_urb, GFP_ATOMIC);
+}
+
+static int lme2510_int_read(struct dvb_usb_adapter *adap)
+{
+ struct lme2510_state *lme_int = adap->dev->priv;
+
+ lme_int->lme_urb = usb_alloc_urb(0, GFP_ATOMIC);
+
+ if (lme_int->lme_urb == NULL)
+ return -ENOMEM;
+
+ lme_int->buffer = usb_alloc_coherent(adap->dev->udev, 5000, GFP_ATOMIC,
+ &lme_int->lme_urb->transfer_dma);
+
+ if (lme_int->buffer == NULL)
+ return -ENOMEM;
+
+ usb_fill_int_urb(lme_int->lme_urb,
+ adap->dev->udev,
+ usb_rcvintpipe(adap->dev->udev, 0xa),
+ lme_int->buffer,
+ 4096,
+ lme2510_int_response,
+ adap,
+ 11);
+
+ lme_int->lme_urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
+
+ usb_submit_urb(lme_int->lme_urb, GFP_ATOMIC);
+ info("INT Interupt Service Started");
+
+ return 0;
+}
+
+static int lme2510_return_status(struct usb_device *dev)
+{
+ int ret = 0;
+ u8 data[10] = {0};
+
+ ret |= usb_control_msg(dev, usb_rcvctrlpipe(dev, 0),
+ 0x06, 0x80, 0x0302, 0x00, data, 0x0006, 200);
+ info("Firmware Status: %x (%x)", ret , data[2]);
+
+ return (ret < 0) ? -ENODEV : data[2];
+}
+
+static int lme2510_msg(struct dvb_usb_device *d,
+ u8 *wbuf, int wlen, u8 *rbuf, int rlen)
+{
+ int ret = 0;
+ struct lme2510_state *st = d->priv;
+
+ if (mutex_lock_interruptible(&d->i2c_mutex) < 0)
+ return -EAGAIN;
+
+ if (st->i2c_talk_onoff == 1) {
+
+ ret = lme2510_usb_talk(d, wbuf, wlen, rbuf, rlen);
+
+ switch (st->tuner_config) {
+ case TUNER_LG:
+ if (wbuf[2] == 0x1c) {
+ if (wbuf[3] == 0x0e) {
+ st->signal_lock = rbuf[1];
+ if ((st->stream_on & 1) &&
+ (st->signal_lock & 0x10)) {
+ lme2510_usb_talk_restart(d,
+ wbuf, wlen, rbuf, rlen);
+ st->i2c_talk_onoff = 0;
+ }
+ msleep(80);
+ }
+ }
+ break;
+ case TUNER_S7395:
+ if (wbuf[2] == 0xd0) {
+ if (wbuf[3] == 0x24) {
+ st->signal_lock = rbuf[1];
+ if ((st->stream_on & 1) &&
+ (st->signal_lock & 0x8)) {
+ lme2510_usb_talk_restart(d,
+ wbuf, wlen, rbuf, rlen);
+ st->i2c_talk_onoff = 0;
+ }
+ }
+ if ((wbuf[3] != 0x6) & (wbuf[3] != 0x5))
+ msleep(5);
+
+
+ }
+ break;
+ default:
+ break;
+ }
+ } else {
+ switch (st->tuner_config) {
+ case TUNER_LG:
+ switch (wbuf[3]) {
+ case 0x0e:
+ rbuf[0] = 0x55;
+ rbuf[1] = st->signal_lock;
+ break;
+ case 0x43:
+ rbuf[0] = 0x55;
+ rbuf[1] = st->signal_level;
+ break;
+ case 0x1c:
+ rbuf[0] = 0x55;
+ rbuf[1] = st->signal_sn;
+ break;
+ /*DiSEqC functions as per TDA10086*/
+ case 0x36:
+ case 0x48:
+ case 0x49:
+ case 0x4a:
+ case 0x4b:
+ case 0x4c:
+ case 0x4d:
+ if (wbuf[2] == 0x1c)
+ lme2510_usb_talk_restart(d,
+ wbuf, wlen, rbuf, rlen);
+ default:
+ break;
+ }
+ break;
+ case TUNER_S7395:
+ switch (wbuf[3]) {
+ case 0x10:
+ rbuf[0] = 0x55;
+ rbuf[1] = (st->signal_level & 0x80)
+ ? 0 : (st->signal_level * 2);
+ break;
+ case 0x2d:
+ rbuf[0] = 0x55;
+ rbuf[1] = st->signal_sn;
+ break;
+ case 0x24:
+ rbuf[0] = 0x55;
+ rbuf[1] = (st->signal_level & 0x80)
+ ? 0 : st->signal_lock;
+ break;
+ case 0x6:
+ if (wbuf[2] == 0xd0)
+ lme2510_usb_talk(d,
+ wbuf, wlen, rbuf, rlen);
+ break;
+ case 0x1:
+ if (st->one_tune > 0)
+ break;
+ st->one_tune++;
+ st->i2c_talk_onoff = 1;
+ /*DiSEqC functions as per STV0288*/
+ case 0x5:
+ case 0x7:
+ case 0x8:
+ case 0x9:
+ case 0xa:
+ case 0xb:
+ if (wbuf[2] == 0xd0)
+ lme2510_usb_talk_restart(d,
+ wbuf, wlen, rbuf, rlen);
+ break;
+ default:
+ rbuf[0] = 0x55;
+ rbuf[1] = 0x00;
+ break;
+ }
+ break;
+ default:
+ break;
+
+ }
+
+ deb_info(4, "I2C From Interupt Message out(%02x) in(%02x)",
+ wbuf[3], rbuf[1]);
+
+ }
+
+ mutex_unlock(&d->i2c_mutex);
+
+ return ret;
+}
+
+
+static int lme2510_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msg[],
+ int num)
+{
+ struct dvb_usb_device *d = i2c_get_adapdata(adap);
+ struct lme2510_state *st = d->priv;
+ static u8 obuf[64], ibuf[512];
+ int i, read, read_o;
+ u16 len;
+ u8 gate = st->i2c_gate;
+
+ if (gate == 0)
+ gate = 5;
+
+ if (num > 2)
+ warn("more than 2 i2c messages"
+ "at a time is not handled yet. TODO.");
+
+ for (i = 0; i < num; i++) {
+ read_o = 1 & (msg[i].flags & I2C_M_RD);
+ read = i+1 < num && (msg[i+1].flags & I2C_M_RD);
+ read |= read_o;
+ gate = (msg[i].addr == st->i2c_tuner_addr)
+ ? (read) ? st->i2c_tuner_gate_r
+ : st->i2c_tuner_gate_w
+ : st->i2c_gate;
+ obuf[0] = gate | (read << 7);
+
+ if (gate == 5)
+ obuf[1] = (read) ? 2 : msg[i].len + 1;
+ else
+ obuf[1] = msg[i].len + read + 1;
+
+ obuf[2] = msg[i].addr;
+ if (read) {
+ if (read_o)
+ len = 3;
+ else {
+ memcpy(&obuf[3], msg[i].buf, msg[i].len);
+ obuf[msg[i].len+3] = msg[i+1].len;
+ len = msg[i].len+4;
+ }
+ } else {
+ memcpy(&obuf[3], msg[i].buf, msg[i].len);
+ len = msg[i].len+3;
+ }
+
+ if (lme2510_msg(d, obuf, len, ibuf, 512) < 0) {
+ deb_info(1, "i2c transfer failed.");
+ return -EAGAIN;
+ }
+
+ if (read) {
+ if (read_o)
+ memcpy(msg[i].buf, &ibuf[1], msg[i].len);
+ else {
+ memcpy(msg[i+1].buf, &ibuf[1], msg[i+1].len);
+ i++;
+ }
+ }
+ }
+ return i;
+}
+
+static u32 lme2510_i2c_func(struct i2c_adapter *adapter)
+{
+ return I2C_FUNC_I2C;
+}
+
+static struct i2c_algorithm lme2510_i2c_algo = {
+ .master_xfer = lme2510_i2c_xfer,
+ .functionality = lme2510_i2c_func,
+};
+
+/* Callbacks for DVB USB */
+static int lme2510_identify_state(struct usb_device *udev,
+ struct dvb_usb_device_properties *props,
+ struct dvb_usb_device_description **desc,
+ int *cold)
+{
+ if (lme2510_return_status(udev) == 0x44)
+ *cold = 1;
+ else
+ *cold = 0;
+ return 0;
+}
+
+static int lme2510_streaming_ctrl(struct dvb_usb_adapter *adap, int onoff)
+{
+ struct lme2510_state *st = adap->dev->priv;
+ static u8 stream_on[] = LME_ST_ON_W;
+ static u8 clear_reg_3[] = LME_CLEAR_PID;
+ static u8 rbuf[1];
+ static u8 timeout;
+ int ret = 0, len = 2, rlen = sizeof(rbuf);
+
+ deb_info(1, "STM (%02x)", onoff);
+
+ if (onoff == 1) {
+ st->i2c_talk_onoff = 0;
+ timeout = 0;
+ /* wait for i2C to be free */
+ while (mutex_lock_interruptible(&adap->dev->i2c_mutex) < 0) {
+ timeout++;
+ if (timeout > 5)
+ return -ENODEV;
+ }
+ msleep(100);
+ ret |= lme2510_usb_talk(adap->dev,
+ stream_on, len, rbuf, rlen);
+ st->stream_on = 1;
+ st->one_tune = 0;
+ mutex_unlock(&adap->dev->i2c_mutex);
+ } else {
+ deb_info(1, "STM Steam Off");
+ ret |= lme2510_usb_talk(adap->dev, clear_reg_3,
+ sizeof(clear_reg_3), rbuf, rlen);
+ st->stream_on = 0;
+ st->i2c_talk_onoff = 1;
+ }
+
+ return (ret < 0) ? -ENODEV : 0;
+}
+
+static int lme2510_int_service(struct dvb_usb_adapter *adap)
+{
+ struct dvb_usb_device *d = adap->dev;
+ struct input_dev *input_dev;
+ char *ir_codes = RC_MAP_LME2510;
+ int ret = 0;
+
+ info("STA Configuring Remote");
+
+ usb_make_path(d->udev, d->rc_phys, sizeof(d->rc_phys));
+
+ strlcat(d->rc_phys, "/ir0", sizeof(d->rc_phys));
+
+ input_dev = input_allocate_device();
+ if (!input_dev)
+ return -ENOMEM;
+
+ input_dev->name = "LME2510 Remote Control";
+ input_dev->phys = d->rc_phys;
+
+ usb_to_input_id(d->udev, &input_dev->id);
+
+ ret |= ir_input_register(input_dev, ir_codes, NULL, "LME 2510");
+
+ if (ret) {
+ input_free_device(input_dev);
+ return ret;
+ }
+
+ d->rc_input_dev = input_dev;
+ /* Start the Interupt */
+ ret = lme2510_int_read(adap);
+
+ if (ret < 0) {
+ ir_input_unregister(input_dev);
+ input_free_device(input_dev);
+ }
+ return (ret < 0) ? -ENODEV : 0;
+}
+
+static u8 check_sum(u8 *p, u8 len)
+{
+ u8 sum = 0;
+ while (len--)
+ sum += *p++;
+ return sum;
+}
+
+static int lme2510_download_firmware(struct usb_device *dev,
+ const struct firmware *fw)
+{
+ int ret = 0;
+ u8 data[512] = {0};
+ u16 j, wlen, len_in, start, end;
+ u8 packet_size, dlen, i;
+ u8 *fw_data;
+
+ packet_size = 0x31;
+ len_in = 1;
+
+
+ info("FRM Starting Firmware Download");
+
+ for (i = 1; i < 3; i++) {
+ start = (i == 1) ? 0 : 512;
+ end = (i == 1) ? 512 : fw->size;
+ for (j = start; j < end; j += (packet_size+1)) {
+ fw_data = (u8 *)(fw->data + j);
+ if ((end - j) > packet_size) {
+ data[0] = i;
+ dlen = packet_size;
+ } else {
+ data[0] = i | 0x80;
+ dlen = (u8)(end - j)-1;
+ }
+ data[1] = dlen;
+ memcpy(&data[2], fw_data, dlen+1);
+ wlen = (u8) dlen + 4;
+ data[wlen-1] = check_sum(fw_data, dlen+1);
+ deb_info(1, "Data S=%02x:E=%02x CS= %02x", data[3],
+ data[dlen+2], data[dlen+3]);
+ ret |= lme2510_bulk_write(dev, data, wlen, 1);
+ ret |= lme2510_bulk_read(dev, data, len_in , 1);
+ ret |= (data[0] == 0x88) ? 0 : -1;
+ }
+ }
+ usb_control_msg(dev, usb_rcvctrlpipe(dev, 0),
+ 0x06, 0x80, 0x0200, 0x00, data, 0x0109, 1000);
+
+
+ data[0] = 0x8a;
+ len_in = 1;
+ msleep(2000);
+ ret |= lme2510_bulk_write(dev, data , len_in, 1); /*Resetting*/
+ ret |= lme2510_bulk_read(dev, data, len_in, 1);
+ msleep(400);
+
+ if (ret < 0)
+ info("FRM Firmware Download Failed (%04x)" , ret);
+ else
+ info("FRM Firmware Download Completed - Resetting Device");
+
+
+ return (ret < 0) ? -ENODEV : 0;
+}
+
+/* Default firmware for LME2510C */
+const char lme_firmware[50] = "dvb-usb-lme2510c-s7395.fw";
+
+static void lme_coldreset(struct usb_device *dev)
+{
+ int ret = 0, len_in;
+ u8 data[512] = {0};
+
+ data[0] = 0x0a;
+ len_in = 1;
+ info("FRM Firmware Cold Reset");
+ ret |= lme2510_bulk_write(dev, data , len_in, 1); /*Cold Resetting*/
+ ret |= lme2510_bulk_read(dev, data, len_in, 1);
+ return;
+}
+
+static void lme_firmware_switch(struct usb_device *udev, int cold)
+{
+ const struct firmware *fw = NULL;
+ char lme2510c_s7395[] = "dvb-usb-lme2510c-s7395.fw";
+ char lme2510c_lg[] = "dvb-usb-lme2510c-lg.fw";
+ char *firm_msg[] = {"Loading", "Switching to"};
+ int ret;
+
+ if (udev->descriptor.idProduct == 0x1122)
+ return;
+
+ switch (dvb_usb_lme2510_firmware) {
+ case 0:
+ default:
+ memcpy(&lme_firmware, lme2510c_s7395, sizeof(lme2510c_s7395));
+ ret = request_firmware(&fw, lme_firmware, &udev->dev);
+ if (ret == 0) {
+ info("FRM %s S7395 Firmware", firm_msg[cold]);
+ break;
+ }
+ if (cold == 0)
+ dvb_usb_lme2510_firmware = 1;
+ else
+ cold = 0;
+ case 1:
+ memcpy(&lme_firmware, lme2510c_lg, sizeof(lme2510c_lg));
+ ret = request_firmware(&fw, lme_firmware, &udev->dev);
+ if (ret == 0) {
+ info("FRM %s LG Firmware", firm_msg[cold]);
+ break;
+ }
+ info("FRM No Firmware Found - please install");
+ dvb_usb_lme2510_firmware = 0;
+ cold = 0;
+ break;
+ }
+ release_firmware(fw);
+ if (cold)
+ lme_coldreset(udev);
+ return;
+}
+
+static int lme2510_kill_urb(struct usb_data_stream *stream)
+{
+ int i;
+ for (i = 0; i < stream->urbs_submitted; i++) {
+ deb_info(3, "killing URB no. %d.", i);
+
+ /* stop the URB */
+ usb_kill_urb(stream->urb_list[i]);
+ }
+ stream->urbs_submitted = 0;
+ return 0;
+}
+
+static struct tda10086_config tda10086_config = {
+ .demod_address = 0x1c,
+ .invert = 0,
+ .diseqc_tone = 1,
+ .xtal_freq = TDA10086_XTAL_16M,
+};
+
+static struct stv0288_config lme_config = {
+ .demod_address = 0xd0,
+ .min_delay_ms = 15,
+ .inittab = s7395_inittab,
+};
+
+static struct ix2505v_config lme_tuner = {
+ .tuner_address = 0xc0,
+ .min_delay_ms = 100,
+ .tuner_gain = 0x0,
+ .tuner_chargepump = 0x3,
+};
+
+static int dm04_lme2510_set_voltage(struct dvb_frontend *fe,
+ fe_sec_voltage_t voltage)
+{
+ struct dvb_usb_adapter *adap = fe->dvb->priv;
+ struct lme2510_state *st = adap->dev->priv;
+ static u8 voltage_low[] = LME_VOLTAGE_L;
+ static u8 voltage_high[] = LME_VOLTAGE_H;
+ static u8 lnb_on[] = LNB_ON;
+ static u8 lnb_off[] = LNB_OFF;
+ static u8 rbuf[1];
+ int ret = 0, len = 3, rlen = 1;
+
+ if (st->stream_on == 1)
+ return 0;
+
+ ret |= lme2510_usb_talk(adap->dev, lnb_on, len, rbuf, rlen);
+
+ switch (voltage) {
+ case SEC_VOLTAGE_18:
+ ret |= lme2510_usb_talk(adap->dev,
+ voltage_high, len, rbuf, rlen);
+ break;
+
+ case SEC_VOLTAGE_OFF:
+ ret |= lme2510_usb_talk(adap->dev,
+ lnb_off, len, rbuf, rlen);
+ case SEC_VOLTAGE_13:
+ default:
+ ret |= lme2510_usb_talk(adap->dev,
+ voltage_low, len, rbuf, rlen);
+ break;
+
+
+ };
+ st->i2c_talk_onoff = 1;
+ return (ret < 0) ? -ENODEV : 0;
+}
+
+static int dm04_lme2510_frontend_attach(struct dvb_usb_adapter *adap)
+{
+ int ret = 0;
+ struct lme2510_state *st = adap->dev->priv;
+
+ /* Interupt Start */
+ ret = lme2510_int_service(adap);
+ if (ret < 0) {
+ info("INT Unable to start Interupt Service");
+ return -ENODEV;
+ }
+
+ st->i2c_talk_onoff = 1;
+ st->i2c_gate = 4;
+
+ adap->fe = dvb_attach(tda10086_attach, &tda10086_config,
+ &adap->dev->i2c_adap);
+
+ if (adap->fe) {
+ info("TUN Found Frontend TDA10086");
+ memcpy(&adap->fe->ops.info.name,
+ &"DM04_LG_TDQY-P001F DVB-S", 24);
+ adap->fe->ops.set_voltage = dm04_lme2510_set_voltage;
+ st->i2c_tuner_gate_w = 4;
+ st->i2c_tuner_gate_r = 4;
+ st->i2c_tuner_addr = 0xc0;
+ if (dvb_attach(tda826x_attach, adap->fe, 0xc0,
+ &adap->dev->i2c_adap, 1)) {
+ info("TUN TDA8263 Found");
+ st->tuner_config = TUNER_LG;
+ if (dvb_usb_lme2510_firmware != 1) {
+ dvb_usb_lme2510_firmware = 1;
+ lme_firmware_switch(adap->dev->udev, 1);
+ }
+ return 0;
+ }
+ kfree(adap->fe);
+ adap->fe = NULL;
+ }
+ st->i2c_gate = 5;
+ adap->fe = dvb_attach(stv0288_attach, &lme_config,
+ &adap->dev->i2c_adap);
+
+ if (adap->fe) {
+ info("FE Found Stv0288");
+ memcpy(&adap->fe->ops.info.name,
+ &"DM04_SHARP:BS2F7HZ7395", 22);
+ adap->fe->ops.set_voltage = dm04_lme2510_set_voltage;
+ st->i2c_tuner_gate_w = 4;
+ st->i2c_tuner_gate_r = 5;
+ st->i2c_tuner_addr = 0xc0;
+ if (dvb_attach(ix2505v_attach , adap->fe, &lme_tuner,
+ &adap->dev->i2c_adap)) {
+ st->tuner_config = TUNER_S7395;
+ info("TUN Sharp IX2505V silicon tuner");
+ if (dvb_usb_lme2510_firmware != 0) {
+ dvb_usb_lme2510_firmware = 0;
+ lme_firmware_switch(adap->dev->udev, 1);
+ }
+ return 0;
+ }
+ kfree(adap->fe);
+ adap->fe = NULL;
+ }
+
+ info("DM04 Not Supported");
+ return -ENODEV;
+}
+
+static int lme2510_powerup(struct dvb_usb_device *d, int onoff)
+{
+ struct lme2510_state *st = d->priv;
+ st->i2c_talk_onoff = 1;
+ return 0;
+}
+
+/* DVB USB Driver stuff */
+static struct dvb_usb_device_properties lme2510_properties;
+static struct dvb_usb_device_properties lme2510c_properties;
+
+static int lme2510_probe(struct usb_interface *intf,
+ const struct usb_device_id *id)
+{
+ struct usb_device *udev = interface_to_usbdev(intf);
+ int ret = 0;
+
+ usb_reset_configuration(udev);
+
+ usb_set_interface(udev, intf->cur_altsetting->desc.bInterfaceNumber, 1);
+
+ if (udev->speed != USB_SPEED_HIGH) {
+ ret = usb_reset_device(udev);
+ info("DEV Failed to connect in HIGH SPEED mode");
+ return -ENODEV;
+ }
+
+ lme_firmware_switch(udev, 0);
+
+ if (0 == dvb_usb_device_init(intf, &lme2510_properties,
+ THIS_MODULE, NULL, adapter_nr)) {
+ info("DEV registering device driver");
+ return 0;
+ }
+ if (0 == dvb_usb_device_init(intf, &lme2510c_properties,
+ THIS_MODULE, NULL, adapter_nr)) {
+ info("DEV registering device driver");
+ return 0;
+ }
+
+ info("DEV lme2510 Error");
+ return -ENODEV;
+
+}
+
+static struct usb_device_id lme2510_table[] = {
+ { USB_DEVICE(0x3344, 0x1122) }, /* LME2510 */
+ { USB_DEVICE(0x3344, 0x1120) }, /* LME2510C */
+ {} /* Terminating entry */
+};
+
+MODULE_DEVICE_TABLE(usb, lme2510_table);
+
+static struct dvb_usb_device_properties lme2510_properties = {
+ .caps = DVB_USB_IS_AN_I2C_ADAPTER,
+ .usb_ctrl = DEVICE_SPECIFIC,
+ .download_firmware = lme2510_download_firmware,
+ .firmware = "dvb-usb-lme2510-lg.fw",
+
+ .size_of_priv = sizeof(struct lme2510_state),
+ .num_adapters = 1,
+ .adapter = {
+ {
+ .streaming_ctrl = lme2510_streaming_ctrl,
+ .frontend_attach = dm04_lme2510_frontend_attach,
+ /* parameter for the MPEG2-data transfer */
+ .stream = {
+ .type = USB_BULK,
+ .count = 10,
+ .endpoint = 0x06,
+ .u = {
+ .bulk = {
+ .buffersize = 4096,
+
+ }
+ }
+ }
+ }
+ },
+ .power_ctrl = lme2510_powerup,
+ .identify_state = lme2510_identify_state,
+ .i2c_algo = &lme2510_i2c_algo,
+ .generic_bulk_ctrl_endpoint = 0,
+ .num_device_descs = 1,
+ .devices = {
+ { "DM04 LME2510 DVB-S USB 2.0",
+ { &lme2510_table[0], NULL },
+ },
+
+ }
+};
+
+static struct dvb_usb_device_properties lme2510c_properties = {
+ .caps = DVB_USB_IS_AN_I2C_ADAPTER,
+ .usb_ctrl = DEVICE_SPECIFIC,
+ .download_firmware = lme2510_download_firmware,
+ .firmware = lme_firmware,
+ .size_of_priv = sizeof(struct lme2510_state),
+ .num_adapters = 1,
+ .adapter = {
+ {
+ .streaming_ctrl = lme2510_streaming_ctrl,
+ .frontend_attach = dm04_lme2510_frontend_attach,
+ /* parameter for the MPEG2-data transfer */
+ .stream = {
+ .type = USB_BULK,
+ .count = 10,
+ .endpoint = 0x8,
+ .u = {
+ .bulk = {
+ .buffersize = 4096,
+
+ }
+ }
+ }
+ }
+ },
+ .power_ctrl = lme2510_powerup,
+ .identify_state = lme2510_identify_state,
+ .i2c_algo = &lme2510_i2c_algo,
+ .generic_bulk_ctrl_endpoint = 0,
+ .num_device_descs = 1,
+ .devices = {
+ { "DM04 LME2510C USB2.0",
+ { &lme2510_table[1], NULL },
+ },
+ }
+};
+
+void *lme2510_exit_int(struct dvb_usb_device *d)
+{
+ struct lme2510_state *st = d->priv;
+ struct dvb_usb_adapter *adap = &d->adapter[0];
+ void *buffer = NULL;
+
+ if (adap != NULL) {
+ lme2510_kill_urb(&adap->stream);
+ adap->feedcount = 0;
+ }
+
+ if (st->lme_urb != NULL) {
+ st->i2c_talk_onoff = 1;
+ st->signal_lock = 0;
+ st->signal_level = 0;
+ st->signal_sn = 0;
+ buffer = st->usb_buffer;
+ usb_kill_urb(st->lme_urb);
+ usb_free_coherent(d->udev, 5000, st->buffer,
+ st->lme_urb->transfer_dma);
+ info("Interupt Service Stopped");
+ ir_input_unregister(d->rc_input_dev);
+ info("Remote Stopped");
+ }
+ return buffer;
+}
+
+void lme2510_exit(struct usb_interface *intf)
+{
+ struct dvb_usb_device *d = usb_get_intfdata(intf);
+ void *usb_buffer;
+
+ if (d != NULL) {
+ usb_buffer = lme2510_exit_int(d);
+ dvb_usb_device_exit(intf);
+ kfree(usb_buffer);
+ }
+}
+
+static struct usb_driver lme2510_driver = {
+ .name = "LME2510C_DVBS",
+ .probe = lme2510_probe,
+ .disconnect = lme2510_exit,
+ .id_table = lme2510_table,
+};
+
+/* module stuff */
+static int __init lme2510_module_init(void)
+{
+ int result = usb_register(&lme2510_driver);
+ if (result) {
+ err("usb_register failed. Error number %d", result);
+ return result;
+ }
+
+ return 0;
+}
+
+static void __exit lme2510_module_exit(void)
+{
+ /* deregister this driver from the USB subsystem */
+ usb_deregister(&lme2510_driver);
+}
+
+module_init(lme2510_module_init);
+module_exit(lme2510_module_exit);
+
+MODULE_AUTHOR("Malcolm Priestley <tvboxspy@gmail.com>");
+MODULE_DESCRIPTION("LM2510(C) DVB-S USB2.0");
+MODULE_VERSION("1.60");
+MODULE_LICENSE("GPL");
diff --git a/drivers/media/dvb/dvb-usb/lmedm04.h b/drivers/media/dvb/dvb-usb/lmedm04.h
new file mode 100644
index 000000000000..e6af16c1e3e5
--- /dev/null
+++ b/drivers/media/dvb/dvb-usb/lmedm04.h
@@ -0,0 +1,173 @@
+/* DVB USB compliant linux driver for
+ *
+ * DM04/QQBOX DVB-S USB BOX LME2510C + SHARP:BS2F7HZ7395
+ * LME2510C + LG TDQY-P001F
+ * LME2510 + LG TDQY-P001F
+ *
+ * MVB7395 (LME2510C+SHARP:BS2F7HZ7395)
+ * SHARP:BS2F7HZ7395 = (STV0288+Sharp IX2505V)
+ *
+ * MVB001F (LME2510+LGTDQT-P001F)
+ * LG TDQY - P001F =(TDA8263 + TDA10086H)
+ *
+ * MVB0001F (LME2510C+LGTDQT-P001F)
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation, version 2.
+ * *
+ * see Documentation/dvb/README.dvb-usb for more information
+ */
+#ifndef _DVB_USB_LME2510_H_
+#define _DVB_USB_LME2510_H_
+
+/* Streamer & PID
+ *
+ * Note: These commands do not actually stop the streaming
+ * but form some kind of packet filtering/stream count
+ * or tuning related functions.
+ * 06 XX
+ * offset 1 = 00 Enable Streaming
+ *
+ *
+ * PID
+ * 03 XX XX ----> reg number ---> setting....20 XX
+ * offset 1 = length
+ * offset 2 = start of data
+ * end byte -1 = 20
+ * end byte = clear pid always a0, other wise 9c, 9a ??
+ *
+*/
+#define LME_ST_ON_W {0x06, 0x00}
+#define LME_CLEAR_PID {0x03, 0x02, 0x20, 0xa0}
+
+/* LNB Voltage
+ * 07 XX XX
+ * offset 1 = 01
+ * offset 2 = 00=Voltage low 01=Voltage high
+ *
+ * LNB Power
+ * 03 01 XX
+ * offset 2 = 00=ON 01=OFF
+ */
+
+#define LME_VOLTAGE_L {0x07, 0x01, 0x00}
+#define LME_VOLTAGE_H {0x07, 0x01, 0x01}
+#define LNB_ON {0x3a, 0x01, 0x00}
+#define LNB_OFF {0x3a, 0x01, 0x01}
+
+/* Initial stv0288 settings for 7395 Frontend */
+static u8 s7395_inittab[] = {
+ 0x01, 0x15,
+ 0x02, 0x20,
+ 0x03, 0xa0,
+ 0x04, 0xa0,
+ 0x05, 0x12,
+ 0x06, 0x00,
+ 0x09, 0x00,
+ 0x0a, 0x04,
+ 0x0b, 0x00,
+ 0x0c, 0x00,
+ 0x0d, 0x00,
+ 0x0e, 0xc1,
+ 0x0f, 0x54,
+ 0x11, 0x7a,
+ 0x12, 0x03,
+ 0x13, 0x48,
+ 0x14, 0x84,
+ 0x15, 0xc5,
+ 0x16, 0xb8,
+ 0x17, 0x9c,
+ 0x18, 0x00,
+ 0x19, 0xa6,
+ 0x1a, 0x88,
+ 0x1b, 0x8f,
+ 0x1c, 0xf0,
+ 0x20, 0x0b,
+ 0x21, 0x54,
+ 0x22, 0xff,
+ 0x23, 0x01,
+ 0x28, 0x46,
+ 0x29, 0x66,
+ 0x2a, 0x90,
+ 0x2b, 0xfa,
+ 0x2c, 0xd9,
+ 0x30, 0x0,
+ 0x31, 0x1e,
+ 0x32, 0x14,
+ 0x33, 0x0f,
+ 0x34, 0x09,
+ 0x35, 0x0c,
+ 0x36, 0x05,
+ 0x37, 0x2f,
+ 0x38, 0x16,
+ 0x39, 0xbd,
+ 0x3a, 0x0,
+ 0x3b, 0x13,
+ 0x3c, 0x11,
+ 0x3d, 0x30,
+ 0x40, 0x63,
+ 0x41, 0x04,
+ 0x42, 0x60,
+ 0x43, 0x00,
+ 0x44, 0x00,
+ 0x45, 0x00,
+ 0x46, 0x00,
+ 0x47, 0x00,
+ 0x4a, 0x00,
+ 0x50, 0x12,
+ 0x51, 0x36,
+ 0x52, 0x21,
+ 0x53, 0x94,
+ 0x54, 0xb2,
+ 0x55, 0x29,
+ 0x56, 0x64,
+ 0x57, 0x2b,
+ 0x58, 0x54,
+ 0x59, 0x86,
+ 0x5a, 0x00,
+ 0x5b, 0x9b,
+ 0x5c, 0x08,
+ 0x5d, 0x7f,
+ 0x5e, 0xff,
+ 0x5f, 0x8d,
+ 0x70, 0x0,
+ 0x71, 0x0,
+ 0x72, 0x0,
+ 0x74, 0x0,
+ 0x75, 0x0,
+ 0x76, 0x0,
+ 0x81, 0x0,
+ 0x82, 0x3f,
+ 0x83, 0x3f,
+ 0x84, 0x0,
+ 0x85, 0x0,
+ 0x88, 0x0,
+ 0x89, 0x0,
+ 0x8a, 0x0,
+ 0x8b, 0x0,
+ 0x8c, 0x0,
+ 0x90, 0x0,
+ 0x91, 0x0,
+ 0x92, 0x0,
+ 0x93, 0x0,
+ 0x94, 0x1c,
+ 0x97, 0x0,
+ 0xa0, 0x48,
+ 0xa1, 0x0,
+ 0xb0, 0xb8,
+ 0xb1, 0x3a,
+ 0xb2, 0x10,
+ 0xb3, 0x82,
+ 0xb4, 0x80,
+ 0xb5, 0x82,
+ 0xb6, 0x82,
+ 0xb7, 0x82,
+ 0xb8, 0x20,
+ 0xb9, 0x0,
+ 0xf0, 0x0,
+ 0xf1, 0x0,
+ 0xf2, 0xc0,
+ 0xff, 0xff,
+};
+#endif
diff --git a/drivers/media/dvb/firewire/firedtv-avc.c b/drivers/media/dvb/firewire/firedtv-avc.c
index 28294af752db..f0f1842fab60 100644
--- a/drivers/media/dvb/firewire/firedtv-avc.c
+++ b/drivers/media/dvb/firewire/firedtv-avc.c
@@ -24,6 +24,8 @@
#include <linux/wait.h>
#include <linux/workqueue.h>
+#include <dvb_frontend.h>
+
#include "firedtv.h"
#define FCP_COMMAND_REGISTER 0xfffff0000b00ULL
@@ -130,6 +132,20 @@ MODULE_PARM_DESC(debug, "Verbose logging (none = 0"
", FCP payloads = " __stringify(AVC_DEBUG_FCP_PAYLOADS)
", or a combination, or all = -1)");
+/*
+ * This is a workaround since there is no vendor specific command to retrieve
+ * ca_info using AVC. If this parameter is not used, ca_system_id will be
+ * filled with application_manufacturer from ca_app_info.
+ * Digital Everywhere have said that adding ca_info is on their TODO list.
+ */
+static unsigned int num_fake_ca_system_ids;
+static int fake_ca_system_ids[4] = { -1, -1, -1, -1 };
+module_param_array(fake_ca_system_ids, int, &num_fake_ca_system_ids, 0644);
+MODULE_PARM_DESC(fake_ca_system_ids, "If your CAM application manufacturer "
+ "does not have the same ca_system_id as your CAS, you can "
+ "override what ca_system_ids are presented to the "
+ "application by setting this field to an array of ids.");
+
static const char *debug_fcp_ctype(unsigned int ctype)
{
static const char *ctypes[] = {
@@ -368,10 +384,30 @@ static int avc_tuner_tuneqpsk(struct firedtv *fdtv,
c->operand[12] = 0;
if (fdtv->type == FIREDTV_DVB_S2) {
- c->operand[13] = 0x1;
- c->operand[14] = 0xff;
- c->operand[15] = 0xff;
-
+ if (fdtv->fe.dtv_property_cache.delivery_system == SYS_DVBS2) {
+ switch (fdtv->fe.dtv_property_cache.modulation) {
+ case QAM_16: c->operand[13] = 0x1; break;
+ case QPSK: c->operand[13] = 0x2; break;
+ case PSK_8: c->operand[13] = 0x3; break;
+ default: c->operand[13] = 0x2; break;
+ }
+ switch (fdtv->fe.dtv_property_cache.rolloff) {
+ case ROLLOFF_AUTO: c->operand[14] = 0x2; break;
+ case ROLLOFF_35: c->operand[14] = 0x2; break;
+ case ROLLOFF_20: c->operand[14] = 0x0; break;
+ case ROLLOFF_25: c->operand[14] = 0x1; break;
+ /* case ROLLOFF_NONE: c->operand[14] = 0xff; break; */
+ }
+ switch (fdtv->fe.dtv_property_cache.pilot) {
+ case PILOT_AUTO: c->operand[15] = 0x0; break;
+ case PILOT_OFF: c->operand[15] = 0x0; break;
+ case PILOT_ON: c->operand[15] = 0x1; break;
+ }
+ } else {
+ c->operand[13] = 0x1; /* auto modulation */
+ c->operand[14] = 0xff; /* disable rolloff */
+ c->operand[15] = 0xff; /* disable pilot */
+ }
return 16;
} else {
return 13;
@@ -977,7 +1013,7 @@ int avc_ca_info(struct firedtv *fdtv, char *app_info, unsigned int *len)
{
struct avc_command_frame *c = (void *)fdtv->avc_data;
struct avc_response_frame *r = (void *)fdtv->avc_data;
- int pos, ret;
+ int i, pos, ret;
mutex_lock(&fdtv->avc_mutex);
@@ -1004,9 +1040,18 @@ int avc_ca_info(struct firedtv *fdtv, char *app_info, unsigned int *len)
app_info[0] = (EN50221_TAG_CA_INFO >> 16) & 0xff;
app_info[1] = (EN50221_TAG_CA_INFO >> 8) & 0xff;
app_info[2] = (EN50221_TAG_CA_INFO >> 0) & 0xff;
- app_info[3] = 2;
- app_info[4] = r->operand[pos + 0];
- app_info[5] = r->operand[pos + 1];
+ if (num_fake_ca_system_ids == 0) {
+ app_info[3] = 2;
+ app_info[4] = r->operand[pos + 0];
+ app_info[5] = r->operand[pos + 1];
+ } else {
+ app_info[3] = num_fake_ca_system_ids * 2;
+ for (i = 0; i < num_fake_ca_system_ids; i++) {
+ app_info[4 + i * 2] =
+ (fake_ca_system_ids[i] >> 8) & 0xff;
+ app_info[5 + i * 2] = fake_ca_system_ids[i] & 0xff;
+ }
+ }
*len = app_info[3] + 4;
out:
mutex_unlock(&fdtv->avc_mutex);
diff --git a/drivers/media/dvb/firewire/firedtv-fe.c b/drivers/media/dvb/firewire/firedtv-fe.c
index e49cdc88b0c7..d10920e2f3a2 100644
--- a/drivers/media/dvb/firewire/firedtv-fe.c
+++ b/drivers/media/dvb/firewire/firedtv-fe.c
@@ -155,6 +155,16 @@ static int fdtv_get_frontend(struct dvb_frontend *fe,
return -EOPNOTSUPP;
}
+static int fdtv_get_property(struct dvb_frontend *fe, struct dtv_property *tvp)
+{
+ return 0;
+}
+
+static int fdtv_set_property(struct dvb_frontend *fe, struct dtv_property *tvp)
+{
+ return 0;
+}
+
void fdtv_frontend_init(struct firedtv *fdtv)
{
struct dvb_frontend_ops *ops = &fdtv->fe.ops;
@@ -166,6 +176,9 @@ void fdtv_frontend_init(struct firedtv *fdtv)
ops->set_frontend = fdtv_set_frontend;
ops->get_frontend = fdtv_get_frontend;
+ ops->get_property = fdtv_get_property;
+ ops->set_property = fdtv_set_property;
+
ops->read_status = fdtv_read_status;
ops->read_ber = fdtv_read_ber;
ops->read_signal_strength = fdtv_read_signal_strength;
@@ -179,7 +192,6 @@ void fdtv_frontend_init(struct firedtv *fdtv)
switch (fdtv->type) {
case FIREDTV_DVB_S:
- case FIREDTV_DVB_S2:
fi->type = FE_QPSK;
fi->frequency_min = 950000;
@@ -188,7 +200,7 @@ void fdtv_frontend_init(struct firedtv *fdtv)
fi->symbol_rate_min = 1000000;
fi->symbol_rate_max = 40000000;
- fi->caps = FE_CAN_INVERSION_AUTO |
+ fi->caps = FE_CAN_INVERSION_AUTO |
FE_CAN_FEC_1_2 |
FE_CAN_FEC_2_3 |
FE_CAN_FEC_3_4 |
@@ -198,6 +210,26 @@ void fdtv_frontend_init(struct firedtv *fdtv)
FE_CAN_QPSK;
break;
+ case FIREDTV_DVB_S2:
+ fi->type = FE_QPSK;
+
+ fi->frequency_min = 950000;
+ fi->frequency_max = 2150000;
+ fi->frequency_stepsize = 125;
+ fi->symbol_rate_min = 1000000;
+ fi->symbol_rate_max = 40000000;
+
+ fi->caps = FE_CAN_INVERSION_AUTO |
+ FE_CAN_FEC_1_2 |
+ FE_CAN_FEC_2_3 |
+ FE_CAN_FEC_3_4 |
+ FE_CAN_FEC_5_6 |
+ FE_CAN_FEC_7_8 |
+ FE_CAN_FEC_AUTO |
+ FE_CAN_QPSK |
+ FE_CAN_2G_MODULATION;
+ break;
+
case FIREDTV_DVB_C:
fi->type = FE_QAM;
diff --git a/drivers/media/dvb/frontends/Kconfig b/drivers/media/dvb/frontends/Kconfig
index b5f6a04f9c12..96b27016670e 100644
--- a/drivers/media/dvb/frontends/Kconfig
+++ b/drivers/media/dvb/frontends/Kconfig
@@ -12,9 +12,8 @@ config DVB_FE_CUSTOMISE
If unsure say N.
-if DVB_FE_CUSTOMISE
-
menu "Customise DVB Frontends"
+ visible if DVB_FE_CUSTOMISE
comment "Multistandard (satellite) frontends"
depends on DVB_CORE
@@ -257,6 +256,13 @@ config DVB_CX22702
help
A DVB-T tuner module. Say Y when you want to support this frontend.
+config DVB_S5H1432
+ tristate "Samsung s5h1432 demodulator (OFDM)"
+ depends on DVB_CORE && I2C
+ default m if DVB_FE_CUSTOMISE
+ help
+ A DVB-T tuner module. Say Y when you want to support this frontend.
+
config DVB_DRX397XD
tristate "Micronas DRX3975D/DRX3977D based"
depends on DVB_CORE && I2C
@@ -455,16 +461,8 @@ config DVB_LGDT330X
An ATSC 8VSB and QAM64/256 tuner module. Say Y when you want
to support this frontend.
-config DVB_LGDT3304
- tristate "LG Electronics LGDT3304"
- depends on DVB_CORE && I2C
- default m if DVB_FE_CUSTOMISE
- help
- An ATSC 8VSB and QAM64/256 tuner module. Say Y when you want
- to support this frontend.
-
config DVB_LGDT3305
- tristate "LG Electronics LGDT3305 based"
+ tristate "LG Electronics LGDT3304 and LGDT3305 based"
depends on DVB_CORE && I2C
default m if DVB_FE_CUSTOMISE
help
@@ -607,11 +605,16 @@ config DVB_TDA665x
Currently supported tuners:
* Panasonic ENV57H12D5 (ET-50DT)
+config DVB_IX2505V
+ tristate "Sharp IX2505V silicon tuner"
+ depends on DVB_CORE && I2C
+ default m if DVB_FE_CUSTOMISE
+ help
+ A DVB-S tuner module. Say Y when you want to support this frontend.
+
comment "Tools to develop new frontends"
config DVB_DUMMY_FE
tristate "Dummy frontend driver"
default n
endmenu
-
-endif
diff --git a/drivers/media/dvb/frontends/Makefile b/drivers/media/dvb/frontends/Makefile
index 874e8ada4d1d..9a31985c0dfb 100644
--- a/drivers/media/dvb/frontends/Makefile
+++ b/drivers/media/dvb/frontends/Makefile
@@ -16,6 +16,7 @@ obj-$(CONFIG_DVB_STB0899) += stb0899.o
obj-$(CONFIG_DVB_STB6100) += stb6100.o
obj-$(CONFIG_DVB_SP8870) += sp8870.o
obj-$(CONFIG_DVB_CX22700) += cx22700.o
+obj-$(CONFIG_DVB_S5H1432) += s5h1432.o
obj-$(CONFIG_DVB_CX24110) += cx24110.o
obj-$(CONFIG_DVB_TDA8083) += tda8083.o
obj-$(CONFIG_DVB_L64781) += l64781.o
@@ -45,7 +46,6 @@ obj-$(CONFIG_DVB_OR51132) += or51132.o
obj-$(CONFIG_DVB_BCM3510) += bcm3510.o
obj-$(CONFIG_DVB_S5H1420) += s5h1420.o
obj-$(CONFIG_DVB_LGDT330X) += lgdt330x.o
-obj-$(CONFIG_DVB_LGDT3304) += lgdt3304.o
obj-$(CONFIG_DVB_LGDT3305) += lgdt3305.o
obj-$(CONFIG_DVB_CX24123) += cx24123.o
obj-$(CONFIG_DVB_LNBP21) += lnbp21.o
@@ -82,3 +82,4 @@ obj-$(CONFIG_DVB_ISL6423) += isl6423.o
obj-$(CONFIG_DVB_EC100) += ec100.o
obj-$(CONFIG_DVB_DS3000) += ds3000.o
obj-$(CONFIG_DVB_MB86A16) += mb86a16.o
+obj-$(CONFIG_DVB_IX2505V) += ix2505v.o
diff --git a/drivers/media/dvb/frontends/af9013.c b/drivers/media/dvb/frontends/af9013.c
index dac917f7bb7f..e2a95c07bab4 100644
--- a/drivers/media/dvb/frontends/af9013.c
+++ b/drivers/media/dvb/frontends/af9013.c
@@ -42,6 +42,8 @@ struct af9013_state {
struct af9013_config config;
+ /* tuner/demod RF and IF AGC limits used for signal strength calc */
+ u8 signal_strength_en, rf_50, rf_80, if_50, if_80;
u16 signal_strength;
u32 ber;
u32 ucblocks;
@@ -220,184 +222,37 @@ static u32 af913_div(u32 a, u32 b, u32 x)
static int af9013_set_coeff(struct af9013_state *state, fe_bandwidth_t bw)
{
- int ret = 0;
- u8 i = 0;
- u8 buf[24];
- u32 uninitialized_var(ns_coeff1_2048nu);
- u32 uninitialized_var(ns_coeff1_8191nu);
- u32 uninitialized_var(ns_coeff1_8192nu);
- u32 uninitialized_var(ns_coeff1_8193nu);
- u32 uninitialized_var(ns_coeff2_2k);
- u32 uninitialized_var(ns_coeff2_8k);
-
+ int ret, i, j, found;
deb_info("%s: adc_clock:%d bw:%d\n", __func__,
state->config.adc_clock, bw);
- switch (state->config.adc_clock) {
- case 28800: /* 28.800 MHz */
- switch (bw) {
- case BANDWIDTH_6_MHZ:
- ns_coeff1_2048nu = 0x01e79e7a;
- ns_coeff1_8191nu = 0x0079eb6e;
- ns_coeff1_8192nu = 0x0079e79e;
- ns_coeff1_8193nu = 0x0079e3cf;
- ns_coeff2_2k = 0x00f3cf3d;
- ns_coeff2_8k = 0x003cf3cf;
- break;
- case BANDWIDTH_7_MHZ:
- ns_coeff1_2048nu = 0x0238e38e;
- ns_coeff1_8191nu = 0x008e3d55;
- ns_coeff1_8192nu = 0x008e38e4;
- ns_coeff1_8193nu = 0x008e3472;
- ns_coeff2_2k = 0x011c71c7;
- ns_coeff2_8k = 0x00471c72;
+ /* lookup coeff from table */
+ for (i = 0, found = 0; i < ARRAY_SIZE(coeff_table); i++) {
+ if (coeff_table[i].adc_clock == state->config.adc_clock &&
+ coeff_table[i].bw == bw) {
+ found = 1;
break;
- case BANDWIDTH_8_MHZ:
- ns_coeff1_2048nu = 0x028a28a3;
- ns_coeff1_8191nu = 0x00a28f3d;
- ns_coeff1_8192nu = 0x00a28a29;
- ns_coeff1_8193nu = 0x00a28514;
- ns_coeff2_2k = 0x01451451;
- ns_coeff2_8k = 0x00514514;
- break;
- default:
- ret = -EINVAL;
}
- break;
- case 20480: /* 20.480 MHz */
- switch (bw) {
- case BANDWIDTH_6_MHZ:
- ns_coeff1_2048nu = 0x02adb6dc;
- ns_coeff1_8191nu = 0x00ab7313;
- ns_coeff1_8192nu = 0x00ab6db7;
- ns_coeff1_8193nu = 0x00ab685c;
- ns_coeff2_2k = 0x0156db6e;
- ns_coeff2_8k = 0x0055b6dc;
- break;
- case BANDWIDTH_7_MHZ:
- ns_coeff1_2048nu = 0x03200001;
- ns_coeff1_8191nu = 0x00c80640;
- ns_coeff1_8192nu = 0x00c80000;
- ns_coeff1_8193nu = 0x00c7f9c0;
- ns_coeff2_2k = 0x01900000;
- ns_coeff2_8k = 0x00640000;
- break;
- case BANDWIDTH_8_MHZ:
- ns_coeff1_2048nu = 0x03924926;
- ns_coeff1_8191nu = 0x00e4996e;
- ns_coeff1_8192nu = 0x00e49249;
- ns_coeff1_8193nu = 0x00e48b25;
- ns_coeff2_2k = 0x01c92493;
- ns_coeff2_8k = 0x00724925;
- break;
- default:
- ret = -EINVAL;
- }
- break;
- case 28000: /* 28.000 MHz */
- switch (bw) {
- case BANDWIDTH_6_MHZ:
- ns_coeff1_2048nu = 0x01f58d10;
- ns_coeff1_8191nu = 0x007d672f;
- ns_coeff1_8192nu = 0x007d6344;
- ns_coeff1_8193nu = 0x007d5f59;
- ns_coeff2_2k = 0x00fac688;
- ns_coeff2_8k = 0x003eb1a2;
- break;
- case BANDWIDTH_7_MHZ:
- ns_coeff1_2048nu = 0x02492492;
- ns_coeff1_8191nu = 0x00924db7;
- ns_coeff1_8192nu = 0x00924925;
- ns_coeff1_8193nu = 0x00924492;
- ns_coeff2_2k = 0x01249249;
- ns_coeff2_8k = 0x00492492;
- break;
- case BANDWIDTH_8_MHZ:
- ns_coeff1_2048nu = 0x029cbc15;
- ns_coeff1_8191nu = 0x00a7343f;
- ns_coeff1_8192nu = 0x00a72f05;
- ns_coeff1_8193nu = 0x00a729cc;
- ns_coeff2_2k = 0x014e5e0a;
- ns_coeff2_8k = 0x00539783;
- break;
- default:
- ret = -EINVAL;
- }
- break;
- case 25000: /* 25.000 MHz */
- switch (bw) {
- case BANDWIDTH_6_MHZ:
- ns_coeff1_2048nu = 0x0231bcb5;
- ns_coeff1_8191nu = 0x008c7391;
- ns_coeff1_8192nu = 0x008c6f2d;
- ns_coeff1_8193nu = 0x008c6aca;
- ns_coeff2_2k = 0x0118de5b;
- ns_coeff2_8k = 0x00463797;
- break;
- case BANDWIDTH_7_MHZ:
- ns_coeff1_2048nu = 0x028f5c29;
- ns_coeff1_8191nu = 0x00a3dc29;
- ns_coeff1_8192nu = 0x00a3d70a;
- ns_coeff1_8193nu = 0x00a3d1ec;
- ns_coeff2_2k = 0x0147ae14;
- ns_coeff2_8k = 0x0051eb85;
- break;
- case BANDWIDTH_8_MHZ:
- ns_coeff1_2048nu = 0x02ecfb9d;
- ns_coeff1_8191nu = 0x00bb44c1;
- ns_coeff1_8192nu = 0x00bb3ee7;
- ns_coeff1_8193nu = 0x00bb390d;
- ns_coeff2_2k = 0x01767dce;
- ns_coeff2_8k = 0x005d9f74;
- break;
- default:
- ret = -EINVAL;
- }
- break;
- default:
- err("invalid xtal");
- return -EINVAL;
}
- if (ret) {
- err("invalid bandwidth");
- return ret;
+
+ if (!found) {
+ err("invalid bw or clock");
+ ret = -EINVAL;
+ goto error;
}
- buf[i++] = (u8) ((ns_coeff1_2048nu & 0x03000000) >> 24);
- buf[i++] = (u8) ((ns_coeff1_2048nu & 0x00ff0000) >> 16);
- buf[i++] = (u8) ((ns_coeff1_2048nu & 0x0000ff00) >> 8);
- buf[i++] = (u8) ((ns_coeff1_2048nu & 0x000000ff));
- buf[i++] = (u8) ((ns_coeff2_2k & 0x01c00000) >> 22);
- buf[i++] = (u8) ((ns_coeff2_2k & 0x003fc000) >> 14);
- buf[i++] = (u8) ((ns_coeff2_2k & 0x00003fc0) >> 6);
- buf[i++] = (u8) ((ns_coeff2_2k & 0x0000003f));
- buf[i++] = (u8) ((ns_coeff1_8191nu & 0x03000000) >> 24);
- buf[i++] = (u8) ((ns_coeff1_8191nu & 0x00ffc000) >> 16);
- buf[i++] = (u8) ((ns_coeff1_8191nu & 0x0000ff00) >> 8);
- buf[i++] = (u8) ((ns_coeff1_8191nu & 0x000000ff));
- buf[i++] = (u8) ((ns_coeff1_8192nu & 0x03000000) >> 24);
- buf[i++] = (u8) ((ns_coeff1_8192nu & 0x00ffc000) >> 16);
- buf[i++] = (u8) ((ns_coeff1_8192nu & 0x0000ff00) >> 8);
- buf[i++] = (u8) ((ns_coeff1_8192nu & 0x000000ff));
- buf[i++] = (u8) ((ns_coeff1_8193nu & 0x03000000) >> 24);
- buf[i++] = (u8) ((ns_coeff1_8193nu & 0x00ffc000) >> 16);
- buf[i++] = (u8) ((ns_coeff1_8193nu & 0x0000ff00) >> 8);
- buf[i++] = (u8) ((ns_coeff1_8193nu & 0x000000ff));
- buf[i++] = (u8) ((ns_coeff2_8k & 0x01c00000) >> 22);
- buf[i++] = (u8) ((ns_coeff2_8k & 0x003fc000) >> 14);
- buf[i++] = (u8) ((ns_coeff2_8k & 0x00003fc0) >> 6);
- buf[i++] = (u8) ((ns_coeff2_8k & 0x0000003f));
-
- deb_info("%s: coeff:", __func__);
- debug_dump(buf, sizeof(buf), deb_info);
+ deb_info("%s: coeff: ", __func__);
+ debug_dump(coeff_table[i].val, sizeof(coeff_table[i].val), deb_info);
/* program */
- for (i = 0; i < sizeof(buf); i++) {
- ret = af9013_write_reg(state, 0xae00 + i, buf[i]);
+ for (j = 0; j < sizeof(coeff_table[i].val); j++) {
+ ret = af9013_write_reg(state, 0xae00 + j,
+ coeff_table[i].val[j]);
if (ret)
break;
}
+error:
return ret;
}
@@ -486,6 +341,19 @@ static int af9013_set_freq_ctrl(struct af9013_state *state, fe_bandwidth_t bw)
if_sample_freq = 4300000; /* 4.3 MHz */
break;
}
+ } else if (state->config.tuner == AF9013_TUNER_TDA18218) {
+ switch (bw) {
+ case BANDWIDTH_6_MHZ:
+ if_sample_freq = 3000000; /* 3 MHz */
+ break;
+ case BANDWIDTH_7_MHZ:
+ if_sample_freq = 3500000; /* 3.5 MHz */
+ break;
+ case BANDWIDTH_8_MHZ:
+ default:
+ if_sample_freq = 4000000; /* 4 MHz */
+ break;
+ }
}
while (if_sample_freq > (adc_freq / 2))
@@ -1097,45 +965,31 @@ static int af9013_update_signal_strength(struct dvb_frontend *fe)
{
struct af9013_state *state = fe->demodulator_priv;
int ret;
- u8 tmp0;
- u8 rf_gain, rf_50, rf_80, if_gain, if_50, if_80;
+ u8 rf_gain, if_gain;
int signal_strength;
deb_info("%s\n", __func__);
- state->signal_strength = 0;
-
- ret = af9013_read_reg_bits(state, 0x9bee, 0, 1, &tmp0);
- if (ret)
- goto error;
- if (tmp0) {
- ret = af9013_read_reg(state, 0x9bbd, &rf_50);
- if (ret)
- goto error;
- ret = af9013_read_reg(state, 0x9bd0, &rf_80);
- if (ret)
- goto error;
- ret = af9013_read_reg(state, 0x9be2, &if_50);
- if (ret)
- goto error;
- ret = af9013_read_reg(state, 0x9be4, &if_80);
- if (ret)
- goto error;
+ if (state->signal_strength_en) {
ret = af9013_read_reg(state, 0xd07c, &rf_gain);
if (ret)
goto error;
ret = af9013_read_reg(state, 0xd07d, &if_gain);
if (ret)
goto error;
- signal_strength = (0xffff / (9 * (rf_50 + if_50) - \
- 11 * (rf_80 + if_80))) * (10 * (rf_gain + if_gain) - \
- 11 * (rf_80 + if_80));
+ signal_strength = (0xffff / \
+ (9 * (state->rf_50 + state->if_50) - \
+ 11 * (state->rf_80 + state->if_80))) * \
+ (10 * (rf_gain + if_gain) - \
+ 11 * (state->rf_80 + state->if_80));
if (signal_strength < 0)
signal_strength = 0;
else if (signal_strength > 0xffff)
signal_strength = 0xffff;
state->signal_strength = signal_strength;
+ } else {
+ state->signal_strength = 0;
}
error:
@@ -1368,6 +1222,7 @@ static int af9013_init(struct dvb_frontend *fe)
break;
case AF9013_TUNER_MXL5005D:
case AF9013_TUNER_MXL5005R:
+ case AF9013_TUNER_MXL5007T:
len = ARRAY_SIZE(tuner_init_mxl5005);
init = tuner_init_mxl5005;
break;
@@ -1393,6 +1248,7 @@ static int af9013_init(struct dvb_frontend *fe)
init = tuner_init_mt2060_2;
break;
case AF9013_TUNER_TDA18271:
+ case AF9013_TUNER_TDA18218:
len = ARRAY_SIZE(tuner_init_tda18271);
init = tuner_init_tda18271;
break;
@@ -1438,6 +1294,27 @@ static int af9013_init(struct dvb_frontend *fe)
if (ret)
goto error;
+ /* read values needed for signal strength calculation */
+ ret = af9013_read_reg_bits(state, 0x9bee, 0, 1,
+ &state->signal_strength_en);
+ if (ret)
+ goto error;
+
+ if (state->signal_strength_en) {
+ ret = af9013_read_reg(state, 0x9bbd, &state->rf_50);
+ if (ret)
+ goto error;
+ ret = af9013_read_reg(state, 0x9bd0, &state->rf_80);
+ if (ret)
+ goto error;
+ ret = af9013_read_reg(state, 0x9be2, &state->if_50);
+ if (ret)
+ goto error;
+ ret = af9013_read_reg(state, 0x9be4, &state->if_80);
+ if (ret)
+ goto error;
+ }
+
error:
return ret;
}
diff --git a/drivers/media/dvb/frontends/af9013.h b/drivers/media/dvb/frontends/af9013.h
index 72c71bb5d117..e53d873f7555 100644
--- a/drivers/media/dvb/frontends/af9013.h
+++ b/drivers/media/dvb/frontends/af9013.h
@@ -44,6 +44,7 @@ enum af9013_tuner {
AF9013_TUNER_MT2060_2 = 147, /* Microtune */
AF9013_TUNER_TDA18271 = 156, /* NXP */
AF9013_TUNER_QT1010A = 162, /* Quantek */
+ AF9013_TUNER_MXL5007T = 177, /* MaxLinear */
AF9013_TUNER_TDA18218 = 179, /* NXP */
};
diff --git a/drivers/media/dvb/frontends/af9013_priv.h b/drivers/media/dvb/frontends/af9013_priv.h
index 0fd42b7e248e..e00b2a4a2db6 100644
--- a/drivers/media/dvb/frontends/af9013_priv.h
+++ b/drivers/media/dvb/frontends/af9013_priv.h
@@ -60,6 +60,56 @@ struct snr_table {
u8 snr;
};
+struct coeff {
+ u32 adc_clock;
+ fe_bandwidth_t bw;
+ u8 val[24];
+};
+
+/* pre-calculated coeff lookup table */
+static struct coeff coeff_table[] = {
+ /* 28.800 MHz */
+ { 28800, BANDWIDTH_8_MHZ, { 0x02, 0x8a, 0x28, 0xa3, 0x05, 0x14,
+ 0x51, 0x11, 0x00, 0xa2, 0x8f, 0x3d, 0x00, 0xa2, 0x8a,
+ 0x29, 0x00, 0xa2, 0x85, 0x14, 0x01, 0x45, 0x14, 0x14 } },
+ { 28800, BANDWIDTH_7_MHZ, { 0x02, 0x38, 0xe3, 0x8e, 0x04, 0x71,
+ 0xc7, 0x07, 0x00, 0x8e, 0x3d, 0x55, 0x00, 0x8e, 0x38,
+ 0xe4, 0x00, 0x8e, 0x34, 0x72, 0x01, 0x1c, 0x71, 0x32 } },
+ { 28800, BANDWIDTH_6_MHZ, { 0x01, 0xe7, 0x9e, 0x7a, 0x03, 0xcf,
+ 0x3c, 0x3d, 0x00, 0x79, 0xeb, 0x6e, 0x00, 0x79, 0xe7,
+ 0x9e, 0x00, 0x79, 0xe3, 0xcf, 0x00, 0xf3, 0xcf, 0x0f } },
+ /* 20.480 MHz */
+ { 20480, BANDWIDTH_8_MHZ, { 0x03, 0x92, 0x49, 0x26, 0x07, 0x24,
+ 0x92, 0x13, 0x00, 0xe4, 0x99, 0x6e, 0x00, 0xe4, 0x92,
+ 0x49, 0x00, 0xe4, 0x8b, 0x25, 0x01, 0xc9, 0x24, 0x25 } },
+ { 20480, BANDWIDTH_7_MHZ, { 0x03, 0x20, 0x00, 0x01, 0x06, 0x40,
+ 0x00, 0x00, 0x00, 0xc8, 0x06, 0x40, 0x00, 0xc8, 0x00,
+ 0x00, 0x00, 0xc7, 0xf9, 0xc0, 0x01, 0x90, 0x00, 0x00 } },
+ { 20480, BANDWIDTH_6_MHZ, { 0x02, 0xad, 0xb6, 0xdc, 0x05, 0x5b,
+ 0x6d, 0x2e, 0x00, 0xab, 0x73, 0x13, 0x00, 0xab, 0x6d,
+ 0xb7, 0x00, 0xab, 0x68, 0x5c, 0x01, 0x56, 0xdb, 0x1c } },
+ /* 28.000 MHz */
+ { 28000, BANDWIDTH_8_MHZ, { 0x02, 0x9c, 0xbc, 0x15, 0x05, 0x39,
+ 0x78, 0x0a, 0x00, 0xa7, 0x34, 0x3f, 0x00, 0xa7, 0x2f,
+ 0x05, 0x00, 0xa7, 0x29, 0xcc, 0x01, 0x4e, 0x5e, 0x03 } },
+ { 28000, BANDWIDTH_7_MHZ, { 0x02, 0x49, 0x24, 0x92, 0x04, 0x92,
+ 0x49, 0x09, 0x00, 0x92, 0x4d, 0xb7, 0x00, 0x92, 0x49,
+ 0x25, 0x00, 0x92, 0x44, 0x92, 0x01, 0x24, 0x92, 0x12 } },
+ { 28000, BANDWIDTH_6_MHZ, { 0x01, 0xf5, 0x8d, 0x10, 0x03, 0xeb,
+ 0x1a, 0x08, 0x00, 0x7d, 0x67, 0x2f, 0x00, 0x7d, 0x63,
+ 0x44, 0x00, 0x7d, 0x5f, 0x59, 0x00, 0xfa, 0xc6, 0x22 } },
+ /* 25.000 MHz */
+ { 25000, BANDWIDTH_8_MHZ, { 0x02, 0xec, 0xfb, 0x9d, 0x05, 0xd9,
+ 0xf7, 0x0e, 0x00, 0xbb, 0x44, 0xc1, 0x00, 0xbb, 0x3e,
+ 0xe7, 0x00, 0xbb, 0x39, 0x0d, 0x01, 0x76, 0x7d, 0x34 } },
+ { 25000, BANDWIDTH_7_MHZ, { 0x02, 0x8f, 0x5c, 0x29, 0x05, 0x1e,
+ 0xb8, 0x14, 0x00, 0xa3, 0xdc, 0x29, 0x00, 0xa3, 0xd7,
+ 0x0a, 0x00, 0xa3, 0xd1, 0xec, 0x01, 0x47, 0xae, 0x05 } },
+ { 25000, BANDWIDTH_6_MHZ, { 0x02, 0x31, 0xbc, 0xb5, 0x04, 0x63,
+ 0x79, 0x1b, 0x00, 0x8c, 0x73, 0x91, 0x00, 0x8c, 0x6f,
+ 0x2d, 0x00, 0x8c, 0x6a, 0xca, 0x01, 0x18, 0xde, 0x17 } },
+};
+
/* QPSK SNR lookup table */
static struct snr_table qpsk_snr_table[] = {
{ 0x0b4771, 0 },
@@ -480,9 +530,10 @@ static struct regdesc tuner_init_mxl5003d[] = {
{ 0x9bd9, 0, 8, 0x08 },
};
-/* MaxLinear MXL5005 tuner init
+/* MaxLinear MXL5005S & MXL5007T tuner init
AF9013_TUNER_MXL5005D = 13
- AF9013_TUNER_MXL5005R = 30 */
+ AF9013_TUNER_MXL5005R = 30
+ AF9013_TUNER_MXL5007T = 177 */
static struct regdesc tuner_init_mxl5005[] = {
{ 0x9bd5, 0, 8, 0x01 },
{ 0x9bd6, 0, 8, 0x07 },
@@ -791,8 +842,9 @@ static struct regdesc tuner_init_unknown[] = {
{ 0x9bd9, 0, 8, 0x08 },
};
-/* NXP TDA18271 tuner init
- AF9013_TUNER_TDA18271 = 156 */
+/* NXP TDA18271 & TDA18218 tuner init
+ AF9013_TUNER_TDA18271 = 156
+ AF9013_TUNER_TDA18218 = 179 */
static struct regdesc tuner_init_tda18271[] = {
{ 0x9bd5, 0, 8, 0x01 },
{ 0x9bd6, 0, 8, 0x04 },
diff --git a/drivers/media/dvb/frontends/au8522_decoder.c b/drivers/media/dvb/frontends/au8522_decoder.c
index 29cdbfe36852..6d9c5943eb3d 100644
--- a/drivers/media/dvb/frontends/au8522_decoder.c
+++ b/drivers/media/dvb/frontends/au8522_decoder.c
@@ -36,7 +36,6 @@
#include <linux/delay.h>
#include <media/v4l2-common.h>
#include <media/v4l2-chip-ident.h>
-#include <media/v4l2-i2c-drv.h>
#include <media/v4l2-device.h>
#include "au8522.h"
#include "au8522_priv.h"
@@ -831,9 +830,25 @@ static const struct i2c_device_id au8522_id[] = {
MODULE_DEVICE_TABLE(i2c, au8522_id);
-static struct v4l2_i2c_driver_data v4l2_i2c_data = {
- .name = "au8522",
- .probe = au8522_probe,
- .remove = au8522_remove,
- .id_table = au8522_id,
+static struct i2c_driver au8522_driver = {
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = "au8522",
+ },
+ .probe = au8522_probe,
+ .remove = au8522_remove,
+ .id_table = au8522_id,
};
+
+static __init int init_au8522(void)
+{
+ return i2c_add_driver(&au8522_driver);
+}
+
+static __exit void exit_au8522(void)
+{
+ i2c_del_driver(&au8522_driver);
+}
+
+module_init(init_au8522);
+module_exit(exit_au8522);
diff --git a/drivers/media/dvb/frontends/cx22702.c b/drivers/media/dvb/frontends/cx22702.c
index 00b5c7e91d5d..ff6c4983051c 100644
--- a/drivers/media/dvb/frontends/cx22702.c
+++ b/drivers/media/dvb/frontends/cx22702.c
@@ -54,7 +54,7 @@ MODULE_PARM_DESC(debug, "Enable verbose debug messages");
#define dprintk if (debug) printk
/* Register values to initialise the demod */
-static u8 init_tab[] = {
+static const u8 init_tab[] = {
0x00, 0x00, /* Stop aquisition */
0x0B, 0x06,
0x09, 0x01,
@@ -92,52 +92,56 @@ static int cx22702_writereg(struct cx22702_state *state, u8 reg, u8 data)
ret = i2c_transfer(state->i2c, &msg, 1);
- if (ret != 1)
+ if (unlikely(ret != 1)) {
printk(KERN_ERR
"%s: error (reg == 0x%02x, val == 0x%02x, ret == %i)\n",
__func__, reg, data, ret);
+ return -1;
+ }
- return (ret != 1) ? -1 : 0;
+ return 0;
}
static u8 cx22702_readreg(struct cx22702_state *state, u8 reg)
{
int ret;
- u8 b0[] = { reg };
- u8 b1[] = { 0 };
+ u8 data;
struct i2c_msg msg[] = {
{ .addr = state->config->demod_address, .flags = 0,
- .buf = b0, .len = 1 },
+ .buf = &reg, .len = 1 },
{ .addr = state->config->demod_address, .flags = I2C_M_RD,
- .buf = b1, .len = 1 } };
+ .buf = &data, .len = 1 } };
ret = i2c_transfer(state->i2c, msg, 2);
- if (ret != 2)
- printk(KERN_ERR "%s: readreg error (ret == %i)\n",
- __func__, ret);
+ if (unlikely(ret != 2)) {
+ printk(KERN_ERR "%s: error (reg == 0x%02x, ret == %i)\n",
+ __func__, reg, ret);
+ return 0;
+ }
- return b1[0];
+ return data;
}
static int cx22702_set_inversion(struct cx22702_state *state, int inversion)
{
u8 val;
+ val = cx22702_readreg(state, 0x0C);
switch (inversion) {
case INVERSION_AUTO:
return -EOPNOTSUPP;
case INVERSION_ON:
- val = cx22702_readreg(state, 0x0C);
- return cx22702_writereg(state, 0x0C, val | 0x01);
+ val |= 0x01;
+ break;
case INVERSION_OFF:
- val = cx22702_readreg(state, 0x0C);
- return cx22702_writereg(state, 0x0C, val & 0xfe);
+ val &= 0xfe;
+ break;
default:
return -EINVAL;
}
-
+ return cx22702_writereg(state, 0x0C, val);
}
/* Retrieve the demod settings */
@@ -244,13 +248,15 @@ static int cx22702_get_tps(struct cx22702_state *state,
static int cx22702_i2c_gate_ctrl(struct dvb_frontend *fe, int enable)
{
struct cx22702_state *state = fe->demodulator_priv;
+ u8 val;
+
dprintk("%s(%d)\n", __func__, enable);
+ val = cx22702_readreg(state, 0x0D);
if (enable)
- return cx22702_writereg(state, 0x0D,
- cx22702_readreg(state, 0x0D) & 0xfe);
+ val &= 0xfe;
else
- return cx22702_writereg(state, 0x0D,
- cx22702_readreg(state, 0x0D) | 1);
+ val |= 0x01;
+ return cx22702_writereg(state, 0x0D, val);
}
/* Talk to the demod, set the FEC, GUARD, QAM settings etc */
@@ -270,23 +276,21 @@ static int cx22702_set_tps(struct dvb_frontend *fe,
cx22702_set_inversion(state, p->inversion);
/* set bandwidth */
+ val = cx22702_readreg(state, 0x0C) & 0xcf;
switch (p->u.ofdm.bandwidth) {
case BANDWIDTH_6_MHZ:
- cx22702_writereg(state, 0x0C,
- (cx22702_readreg(state, 0x0C) & 0xcf) | 0x20);
+ val |= 0x20;
break;
case BANDWIDTH_7_MHZ:
- cx22702_writereg(state, 0x0C,
- (cx22702_readreg(state, 0x0C) & 0xcf) | 0x10);
+ val |= 0x10;
break;
case BANDWIDTH_8_MHZ:
- cx22702_writereg(state, 0x0C,
- cx22702_readreg(state, 0x0C) & 0xcf);
break;
default:
dprintk("%s: invalid bandwidth\n", __func__);
return -EINVAL;
}
+ cx22702_writereg(state, 0x0C, val);
p->u.ofdm.code_rate_LP = FEC_AUTO; /* temp hack as manual not working */
@@ -312,33 +316,31 @@ static int cx22702_set_tps(struct dvb_frontend *fe,
}
/* manually programmed values */
- val = 0;
- switch (p->u.ofdm.constellation) {
+ switch (p->u.ofdm.constellation) { /* mask 0x18 */
case QPSK:
- val = (val & 0xe7);
+ val = 0x00;
break;
case QAM_16:
- val = (val & 0xe7) | 0x08;
+ val = 0x08;
break;
case QAM_64:
- val = (val & 0xe7) | 0x10;
+ val = 0x10;
break;
default:
dprintk("%s: invalid constellation\n", __func__);
return -EINVAL;
}
- switch (p->u.ofdm.hierarchy_information) {
+ switch (p->u.ofdm.hierarchy_information) { /* mask 0x07 */
case HIERARCHY_NONE:
- val = (val & 0xf8);
break;
case HIERARCHY_1:
- val = (val & 0xf8) | 1;
+ val |= 0x01;
break;
case HIERARCHY_2:
- val = (val & 0xf8) | 2;
+ val |= 0x02;
break;
case HIERARCHY_4:
- val = (val & 0xf8) | 3;
+ val |= 0x03;
break;
default:
dprintk("%s: invalid hierarchy\n", __func__);
@@ -346,44 +348,42 @@ static int cx22702_set_tps(struct dvb_frontend *fe,
}
cx22702_writereg(state, 0x06, val);
- val = 0;
- switch (p->u.ofdm.code_rate_HP) {
+ switch (p->u.ofdm.code_rate_HP) { /* mask 0x38 */
case FEC_NONE:
case FEC_1_2:
- val = (val & 0xc7);
+ val = 0x00;
break;
case FEC_2_3:
- val = (val & 0xc7) | 0x08;
+ val = 0x08;
break;
case FEC_3_4:
- val = (val & 0xc7) | 0x10;
+ val = 0x10;
break;
case FEC_5_6:
- val = (val & 0xc7) | 0x18;
+ val = 0x18;
break;
case FEC_7_8:
- val = (val & 0xc7) | 0x20;
+ val = 0x20;
break;
default:
dprintk("%s: invalid code_rate_HP\n", __func__);
return -EINVAL;
}
- switch (p->u.ofdm.code_rate_LP) {
+ switch (p->u.ofdm.code_rate_LP) { /* mask 0x07 */
case FEC_NONE:
case FEC_1_2:
- val = (val & 0xf8);
break;
case FEC_2_3:
- val = (val & 0xf8) | 1;
+ val |= 0x01;
break;
case FEC_3_4:
- val = (val & 0xf8) | 2;
+ val |= 0x02;
break;
case FEC_5_6:
- val = (val & 0xf8) | 3;
+ val |= 0x03;
break;
case FEC_7_8:
- val = (val & 0xf8) | 4;
+ val |= 0x04;
break;
default:
dprintk("%s: invalid code_rate_LP\n", __func__);
@@ -391,30 +391,28 @@ static int cx22702_set_tps(struct dvb_frontend *fe,
}
cx22702_writereg(state, 0x07, val);
- val = 0;
- switch (p->u.ofdm.guard_interval) {
+ switch (p->u.ofdm.guard_interval) { /* mask 0x0c */
case GUARD_INTERVAL_1_32:
- val = (val & 0xf3);
+ val = 0x00;
break;
case GUARD_INTERVAL_1_16:
- val = (val & 0xf3) | 0x04;
+ val = 0x04;
break;
case GUARD_INTERVAL_1_8:
- val = (val & 0xf3) | 0x08;
+ val = 0x08;
break;
case GUARD_INTERVAL_1_4:
- val = (val & 0xf3) | 0x0c;
+ val = 0x0c;
break;
default:
dprintk("%s: invalid guard_interval\n", __func__);
return -EINVAL;
}
- switch (p->u.ofdm.transmission_mode) {
+ switch (p->u.ofdm.transmission_mode) { /* mask 0x03 */
case TRANSMISSION_MODE_2K:
- val = (val & 0xfc);
break;
case TRANSMISSION_MODE_8K:
- val = (val & 0xfc) | 1;
+ val |= 0x1;
break;
default:
dprintk("%s: invalid transmission_mode\n", __func__);
@@ -505,7 +503,7 @@ static int cx22702_read_signal_strength(struct dvb_frontend *fe,
{
struct cx22702_state *state = fe->demodulator_priv;
- u16 rs_ber = 0;
+ u16 rs_ber;
rs_ber = cx22702_readreg(state, 0x23);
*signal_strength = (rs_ber << 8) | rs_ber;
@@ -516,7 +514,7 @@ static int cx22702_read_snr(struct dvb_frontend *fe, u16 *snr)
{
struct cx22702_state *state = fe->demodulator_priv;
- u16 rs_ber = 0;
+ u16 rs_ber;
if (cx22702_readreg(state, 0xE4) & 0x02) {
/* Realtime statistics */
rs_ber = (cx22702_readreg(state, 0xDE) & 0x7F) << 7
@@ -572,7 +570,7 @@ static void cx22702_release(struct dvb_frontend *fe)
kfree(state);
}
-static struct dvb_frontend_ops cx22702_ops;
+static const struct dvb_frontend_ops cx22702_ops;
struct dvb_frontend *cx22702_attach(const struct cx22702_config *config,
struct i2c_adapter *i2c)
@@ -587,7 +585,6 @@ struct dvb_frontend *cx22702_attach(const struct cx22702_config *config,
/* setup the state */
state->config = config;
state->i2c = i2c;
- state->prevUCBlocks = 0;
/* check if the demod is there */
if (cx22702_readreg(state, 0x1f) != 0x3)
@@ -605,7 +602,7 @@ error:
}
EXPORT_SYMBOL(cx22702_attach);
-static struct dvb_frontend_ops cx22702_ops = {
+static const struct dvb_frontend_ops cx22702_ops = {
.info = {
.name = "Conexant CX22702 DVB-T",
diff --git a/drivers/media/dvb/frontends/cx24110.c b/drivers/media/dvb/frontends/cx24110.c
index 00a4e8f03304..7a1a5bc337d8 100644
--- a/drivers/media/dvb/frontends/cx24110.c
+++ b/drivers/media/dvb/frontends/cx24110.c
@@ -310,7 +310,7 @@ static int cx24110_set_symbolrate (struct cx24110_state* state, u32 srate)
}
-static int _cx24110_pll_write (struct dvb_frontend* fe, u8 *buf, int len)
+static int _cx24110_pll_write (struct dvb_frontend* fe, const u8 buf[], int len)
{
struct cx24110_state *state = fe->demodulator_priv;
diff --git a/drivers/media/dvb/frontends/cx24123.c b/drivers/media/dvb/frontends/cx24123.c
index d8f921b6fafd..fad6a990a39b 100644
--- a/drivers/media/dvb/frontends/cx24123.c
+++ b/drivers/media/dvb/frontends/cx24123.c
@@ -1108,7 +1108,6 @@ struct dvb_frontend *cx24123_attach(const struct cx24123_config *config,
strlcpy(state->tuner_i2c_adapter.name, "CX24123 tuner I2C bus",
sizeof(state->tuner_i2c_adapter.name));
- state->tuner_i2c_adapter.class = I2C_CLASS_TV_DIGITAL,
state->tuner_i2c_adapter.algo = &cx24123_tuner_i2c_algo;
state->tuner_i2c_adapter.algo_data = NULL;
i2c_set_adapdata(&state->tuner_i2c_adapter, state);
diff --git a/drivers/media/dvb/frontends/dibx000_common.c b/drivers/media/dvb/frontends/dibx000_common.c
index 980e02f1575e..2311c0a3406c 100644
--- a/drivers/media/dvb/frontends/dibx000_common.c
+++ b/drivers/media/dvb/frontends/dibx000_common.c
@@ -130,7 +130,7 @@ static int i2c_adapter_init(struct i2c_adapter *i2c_adap,
struct dibx000_i2c_master *mst)
{
strncpy(i2c_adap->name, name, sizeof(i2c_adap->name));
- i2c_adap->class = I2C_CLASS_TV_DIGITAL, i2c_adap->algo = algo;
+ i2c_adap->algo = algo;
i2c_adap->algo_data = NULL;
i2c_set_adapdata(i2c_adap, mst);
if (i2c_add_adapter(i2c_adap) < 0)
diff --git a/drivers/media/dvb/frontends/drx397xD.c b/drivers/media/dvb/frontends/drx397xD.c
index f74cca6dc26b..a05007c80985 100644
--- a/drivers/media/dvb/frontends/drx397xD.c
+++ b/drivers/media/dvb/frontends/drx397xD.c
@@ -232,7 +232,7 @@ static int write_fw(struct drx397xD_state *s, enum blob_ix ix)
exit_rc:
read_unlock(&fw[s->chip_rev].lock);
- return 0;
+ return rc;
}
/* Function is not endian safe, use the RD16 wrapper below */
diff --git a/drivers/media/dvb/frontends/ix2505v.c b/drivers/media/dvb/frontends/ix2505v.c
new file mode 100644
index 000000000000..55f2eba7bc96
--- /dev/null
+++ b/drivers/media/dvb/frontends/ix2505v.c
@@ -0,0 +1,323 @@
+/**
+ * Driver for Sharp IX2505V (marked B0017) DVB-S silicon tuner
+ *
+ * Copyright (C) 2010 Malcolm Priestley
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License Version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/dvb/frontend.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+
+#include "ix2505v.h"
+
+static int ix2505v_debug;
+#define dprintk(level, args...) do { \
+ if (ix2505v_debug & level) \
+ printk(KERN_DEBUG "ix2505v: " args); \
+} while (0)
+
+#define deb_info(args...) dprintk(0x01, args)
+#define deb_i2c(args...) dprintk(0x02, args)
+
+struct ix2505v_state {
+ struct i2c_adapter *i2c;
+ const struct ix2505v_config *config;
+ u32 frequency;
+};
+
+/**
+ * Data read format of the Sharp IX2505V B0017
+ *
+ * byte1: 1 | 1 | 0 | 0 | 0 | MA1 | MA0 | 1
+ * byte2: POR | FL | RD2 | RD1 | RD0 | X | X | X
+ *
+ * byte1 = address
+ * byte2;
+ * POR = Power on Reset (VCC H=<2.2v L=>2.2v)
+ * FL = Phase Lock (H=lock L=unlock)
+ * RD0-2 = Reserved internal operations
+ *
+ * Only POR can be used to check the tuner is present
+ *
+ * Caution: after byte2 the I2C reverts to write mode continuing to read
+ * may corrupt tuning data.
+ *
+ */
+
+static int ix2505v_read_status_reg(struct ix2505v_state *state)
+{
+ u8 addr = state->config->tuner_address;
+ u8 b2[] = {0};
+ int ret;
+
+ struct i2c_msg msg[1] = {
+ { .addr = addr, .flags = I2C_M_RD, .buf = b2, .len = 1 }
+ };
+
+ ret = i2c_transfer(state->i2c, msg, 1);
+ deb_i2c("Read %s ", __func__);
+
+ return (ret = 1) ? (int) b2[0] : -1;
+}
+
+static int ix2505v_write(struct ix2505v_state *state, u8 buf[], u8 count)
+{
+ struct i2c_msg msg[1] = {
+ { .addr = state->config->tuner_address, .flags = 0,
+ .buf = buf, .len = count },
+ };
+
+ int ret;
+
+ ret = i2c_transfer(state->i2c, msg, 1);
+
+ if (ret != 1) {
+ deb_i2c("%s: i2c error, ret=%d\n", __func__, ret);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static int ix2505v_release(struct dvb_frontend *fe)
+{
+ struct ix2505v_state *state = fe->tuner_priv;
+
+ fe->tuner_priv = NULL;
+ kfree(state);
+
+ return 0;
+}
+
+/**
+ * Data write format of the Sharp IX2505V B0017
+ *
+ * byte1: 1 | 1 | 0 | 0 | 0 | 0(MA1)| 0(MA0)| 0
+ * byte2: 0 | BG1 | BG2 | N8 | N7 | N6 | N5 | N4
+ * byte3: N3 | N2 | N1 | A5 | A4 | A3 | A2 | A1
+ * byte4: 1 | 1(C1) | 1(C0) | PD5 | PD4 | TM | 0(RTS)| 1(REF)
+ * byte5: BA2 | BA1 | BA0 | PSC | PD3 |PD2/TS2|DIV/TS1|PD0/TS0
+ *
+ * byte1 = address
+ *
+ * Write order
+ * 1) byte1 -> byte2 -> byte3 -> byte4 -> byte5
+ * 2) byte1 -> byte4 -> byte5 -> byte2 -> byte3
+ * 3) byte1 -> byte2 -> byte3 -> byte4
+ * 4) byte1 -> byte4 -> byte5 -> byte2
+ * 5) byte1 -> byte2 -> byte3
+ * 6) byte1 -> byte4 -> byte5
+ * 7) byte1 -> byte2
+ * 8) byte1 -> byte4
+ *
+ * Recommended Setup
+ * 1 -> 8 -> 6
+ */
+
+static int ix2505v_set_params(struct dvb_frontend *fe,
+ struct dvb_frontend_parameters *params)
+{
+ struct ix2505v_state *state = fe->tuner_priv;
+ u32 frequency = params->frequency;
+ u32 b_w = (params->u.qpsk.symbol_rate * 27) / 32000;
+ u32 div_factor, N , A, x;
+ int ret = 0, len;
+ u8 gain, cc, ref, psc, local_osc, lpf;
+ u8 data[4] = {0};
+
+ if ((frequency < fe->ops.info.frequency_min)
+ || (frequency > fe->ops.info.frequency_max))
+ return -EINVAL;
+
+ if (state->config->tuner_gain)
+ gain = (state->config->tuner_gain < 4)
+ ? state->config->tuner_gain : 0;
+ else
+ gain = 0x0;
+
+ if (state->config->tuner_chargepump)
+ cc = state->config->tuner_chargepump;
+ else
+ cc = 0x3;
+
+ ref = 8; /* REF =1 */
+ psc = 32; /* PSC = 0 */
+
+ div_factor = (frequency * ref) / 40; /* local osc = 4Mhz */
+ x = div_factor / psc;
+ N = x/100;
+ A = ((x - (N * 100)) * psc) / 100;
+
+ data[0] = ((gain & 0x3) << 5) | (N >> 3);
+ data[1] = (N << 5) | (A & 0x1f);
+ data[2] = 0x81 | ((cc & 0x3) << 5) ; /*PD5,PD4 & TM = 0|C1,C0|REF=1*/
+
+ deb_info("Frq=%d x=%d N=%d A=%d\n", frequency, x, N, A);
+
+ if (frequency <= 1065000)
+ local_osc = (6 << 5) | 2;
+ else if (frequency <= 1170000)
+ local_osc = (7 << 5) | 2;
+ else if (frequency <= 1300000)
+ local_osc = (1 << 5);
+ else if (frequency <= 1445000)
+ local_osc = (2 << 5);
+ else if (frequency <= 1607000)
+ local_osc = (3 << 5);
+ else if (frequency <= 1778000)
+ local_osc = (4 << 5);
+ else if (frequency <= 1942000)
+ local_osc = (5 << 5);
+ else /*frequency up to 2150000*/
+ local_osc = (6 << 5);
+
+ data[3] = local_osc; /* all other bits set 0 */
+
+ if (b_w <= 10000)
+ lpf = 0xc;
+ else if (b_w <= 12000)
+ lpf = 0x2;
+ else if (b_w <= 14000)
+ lpf = 0xa;
+ else if (b_w <= 16000)
+ lpf = 0x6;
+ else if (b_w <= 18000)
+ lpf = 0xe;
+ else if (b_w <= 20000)
+ lpf = 0x1;
+ else if (b_w <= 22000)
+ lpf = 0x9;
+ else if (b_w <= 24000)
+ lpf = 0x5;
+ else if (b_w <= 26000)
+ lpf = 0xd;
+ else if (b_w <= 28000)
+ lpf = 0x3;
+ else
+ lpf = 0xb;
+
+ deb_info("Osc=%x b_w=%x lpf=%x\n", local_osc, b_w, lpf);
+ deb_info("Data 0=[%x%x%x%x]\n", data[0], data[1], data[2], data[3]);
+
+ if (fe->ops.i2c_gate_ctrl)
+ fe->ops.i2c_gate_ctrl(fe, 1);
+
+ len = sizeof(data);
+
+ ret |= ix2505v_write(state, data, len);
+
+ data[2] |= 0x4; /* set TM = 1 other bits same */
+
+ len = 1;
+ ret |= ix2505v_write(state, &data[2], len); /* write byte 4 only */
+
+ msleep(10);
+
+ data[2] |= ((lpf >> 2) & 0x3) << 3; /* lpf */
+ data[3] |= (lpf & 0x3) << 2;
+
+ deb_info("Data 2=[%x%x]\n", data[2], data[3]);
+
+ len = 2;
+ ret |= ix2505v_write(state, &data[2], len); /* write byte 4 & 5 */
+
+ if (fe->ops.i2c_gate_ctrl)
+ fe->ops.i2c_gate_ctrl(fe, 0);
+
+ if (state->config->min_delay_ms)
+ msleep(state->config->min_delay_ms);
+
+ state->frequency = frequency;
+
+ return ret;
+}
+
+static int ix2505v_get_frequency(struct dvb_frontend *fe, u32 *frequency)
+{
+ struct ix2505v_state *state = fe->tuner_priv;
+
+ *frequency = state->frequency;
+
+ return 0;
+}
+
+static struct dvb_tuner_ops ix2505v_tuner_ops = {
+ .info = {
+ .name = "Sharp IX2505V (B0017)",
+ .frequency_min = 950000,
+ .frequency_max = 2175000
+ },
+ .release = ix2505v_release,
+ .set_params = ix2505v_set_params,
+ .get_frequency = ix2505v_get_frequency,
+};
+
+struct dvb_frontend *ix2505v_attach(struct dvb_frontend *fe,
+ const struct ix2505v_config *config,
+ struct i2c_adapter *i2c)
+{
+ struct ix2505v_state *state = NULL;
+ int ret;
+
+ if (NULL == config) {
+ deb_i2c("%s: no config ", __func__);
+ goto error;
+ }
+
+ state = kzalloc(sizeof(struct ix2505v_state), GFP_KERNEL);
+ if (NULL == state)
+ return NULL;
+
+ state->config = config;
+ state->i2c = i2c;
+
+ if (state->config->tuner_write_only) {
+ if (fe->ops.i2c_gate_ctrl)
+ fe->ops.i2c_gate_ctrl(fe, 1);
+
+ ret = ix2505v_read_status_reg(state);
+
+ if (ret & 0x80) {
+ deb_i2c("%s: No IX2505V found\n", __func__);
+ goto error;
+ }
+
+ if (fe->ops.i2c_gate_ctrl)
+ fe->ops.i2c_gate_ctrl(fe, 0);
+ }
+
+ fe->tuner_priv = state;
+
+ memcpy(&fe->ops.tuner_ops, &ix2505v_tuner_ops,
+ sizeof(struct dvb_tuner_ops));
+ deb_i2c("%s: initialization (%s addr=0x%02x) ok\n",
+ __func__, fe->ops.tuner_ops.info.name, config->tuner_address);
+
+ return fe;
+
+error:
+ ix2505v_release(fe);
+ return NULL;
+}
+EXPORT_SYMBOL(ix2505v_attach);
+
+module_param_named(debug, ix2505v_debug, int, 0644);
+MODULE_PARM_DESC(debug, "Turn on/off frontend debugging (default:off).");
+MODULE_DESCRIPTION("DVB IX2505V tuner driver");
+MODULE_AUTHOR("Malcolm Priestley");
+MODULE_LICENSE("GPL");
diff --git a/drivers/media/dvb/frontends/ix2505v.h b/drivers/media/dvb/frontends/ix2505v.h
new file mode 100644
index 000000000000..67e89d616d50
--- /dev/null
+++ b/drivers/media/dvb/frontends/ix2505v.h
@@ -0,0 +1,64 @@
+/**
+ * Driver for Sharp IX2505V (marked B0017) DVB-S silicon tuner
+ *
+ * Copyright (C) 2010 Malcolm Priestley
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License Version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#ifndef DVB_IX2505V_H
+#define DVB_IX2505V_H
+
+#include <linux/i2c.h>
+#include "dvb_frontend.h"
+
+/**
+ * Attach a ix2505v tuner to the supplied frontend structure.
+ *
+ * @param fe Frontend to attach to.
+ * @param config ix2505v_config structure
+ * @return FE pointer on success, NULL on failure.
+ */
+
+struct ix2505v_config {
+ u8 tuner_address;
+
+ /*Baseband AMP gain control 0/1=0dB(default) 2=-2bB 3=-4dB */
+ u8 tuner_gain;
+
+ /*Charge pump output +/- 0=120 1=260 2=555 3=1200(default) */
+ u8 tuner_chargepump;
+
+ /* delay after tune */
+ int min_delay_ms;
+
+ /* disables reads*/
+ u8 tuner_write_only;
+
+};
+
+#if defined(CONFIG_DVB_IX2505V) || \
+ (defined(CONFIG_DVB_IX2505V_MODULE) && defined(MODULE))
+extern struct dvb_frontend *ix2505v_attach(struct dvb_frontend *fe,
+ const struct ix2505v_config *config, struct i2c_adapter *i2c);
+#else
+static inline struct dvb_frontend *ix2505v_attach(struct dvb_frontend *fe,
+ const struct ix2505v_config *config, struct i2c_adapter *i2c)
+{
+ printk(KERN_WARNING "%s: driver disabled by Kconfig\n", __func__);
+ return NULL;
+}
+#endif
+
+#endif /* DVB_IX2505V_H */
diff --git a/drivers/media/dvb/frontends/lgdt3304.c b/drivers/media/dvb/frontends/lgdt3304.c
deleted file mode 100644
index 45a529b06b9d..000000000000
--- a/drivers/media/dvb/frontends/lgdt3304.c
+++ /dev/null
@@ -1,380 +0,0 @@
-/*
- * Driver for LG ATSC lgdt3304 driver
- *
- * Copyright (C) 2008 Markus Rechberger <mrechberger@sundtek.de>
- *
- */
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/slab.h>
-#include <linux/delay.h>
-#include "dvb_frontend.h"
-#include "lgdt3304.h"
-
-static unsigned int debug = 0;
-module_param(debug, int, 0644);
-MODULE_PARM_DESC(debug,"lgdt3304 debugging (default off)");
-
-#define dprintk(fmt, args...) if (debug) do {\
- printk("lgdt3304 debug: " fmt, ##args); } while (0)
-
-struct lgdt3304_state
-{
- struct dvb_frontend frontend;
- fe_modulation_t current_modulation;
- __u32 snr;
- __u32 current_frequency;
- __u8 addr;
- struct i2c_adapter *i2c;
-};
-
-static int i2c_write_demod_bytes (struct dvb_frontend *fe, __u8 *buf, int len)
-{
- struct lgdt3304_state *state = fe->demodulator_priv;
- struct i2c_msg i2cmsgs = {
- .addr = state->addr,
- .flags = 0,
- .len = 3,
- .buf = buf
- };
- int i;
- int err;
-
- for (i=0; i<len-1; i+=3){
- if((err = i2c_transfer(state->i2c, &i2cmsgs, 1))<0) {
- printk("%s i2c_transfer error %d\n", __func__, err);
- if (err < 0)
- return err;
- else
- return -EREMOTEIO;
- }
- i2cmsgs.buf += 3;
- }
- return 0;
-}
-
-static int lgdt3304_i2c_read_reg(struct dvb_frontend *fe, unsigned int reg)
-{
- struct lgdt3304_state *state = fe->demodulator_priv;
- struct i2c_msg i2cmsgs[2];
- int ret;
- __u8 buf;
-
- __u8 regbuf[2] = { reg>>8, reg&0xff };
-
- i2cmsgs[0].addr = state->addr;
- i2cmsgs[0].flags = 0;
- i2cmsgs[0].len = 2;
- i2cmsgs[0].buf = regbuf;
-
- i2cmsgs[1].addr = state->addr;
- i2cmsgs[1].flags = I2C_M_RD;
- i2cmsgs[1].len = 1;
- i2cmsgs[1].buf = &buf;
-
- if((ret = i2c_transfer(state->i2c, i2cmsgs, 2))<0) {
- printk("%s i2c_transfer error %d\n", __func__, ret);
- return ret;
- }
-
- return buf;
-}
-
-static int lgdt3304_i2c_write_reg(struct dvb_frontend *fe, int reg, int val)
-{
- struct lgdt3304_state *state = fe->demodulator_priv;
- char buffer[3] = { reg>>8, reg&0xff, val };
- int ret;
-
- struct i2c_msg i2cmsgs = {
- .addr = state->addr,
- .flags = 0,
- .len = 3,
- .buf=buffer
- };
- ret = i2c_transfer(state->i2c, &i2cmsgs, 1);
- if (ret != 1) {
- printk("%s i2c_transfer error %d\n", __func__, ret);
- return ret;
- }
-
- return 0;
-}
-
-
-static int lgdt3304_soft_Reset(struct dvb_frontend *fe)
-{
- lgdt3304_i2c_write_reg(fe, 0x0002, 0x9a);
- lgdt3304_i2c_write_reg(fe, 0x0002, 0x9b);
- mdelay(200);
- return 0;
-}
-
-static int lgdt3304_set_parameters(struct dvb_frontend *fe, struct dvb_frontend_parameters *param) {
- int err = 0;
-
- static __u8 lgdt3304_vsb8_data[] = {
- /* 16bit , 8bit */
- /* regs , val */
- 0x00, 0x00, 0x02,
- 0x00, 0x00, 0x13,
- 0x00, 0x0d, 0x02,
- 0x00, 0x0e, 0x02,
- 0x00, 0x12, 0x32,
- 0x00, 0x13, 0xc4,
- 0x01, 0x12, 0x17,
- 0x01, 0x13, 0x15,
- 0x01, 0x14, 0x18,
- 0x01, 0x15, 0xff,
- 0x01, 0x16, 0x2c,
- 0x02, 0x14, 0x67,
- 0x02, 0x24, 0x8d,
- 0x04, 0x27, 0x12,
- 0x04, 0x28, 0x4f,
- 0x03, 0x08, 0x80,
- 0x03, 0x09, 0x00,
- 0x03, 0x0d, 0x00,
- 0x03, 0x0e, 0x1c,
- 0x03, 0x14, 0xe1,
- 0x05, 0x0e, 0x5b,
- };
-
- /* not yet tested .. */
- static __u8 lgdt3304_qam64_data[] = {
- /* 16bit , 8bit */
- /* regs , val */
- 0x00, 0x00, 0x18,
- 0x00, 0x0d, 0x02,
- //0x00, 0x0e, 0x02,
- 0x00, 0x12, 0x2a,
- 0x00, 0x13, 0x00,
- 0x03, 0x14, 0xe3,
- 0x03, 0x0e, 0x1c,
- 0x03, 0x08, 0x66,
- 0x03, 0x09, 0x66,
- 0x03, 0x0a, 0x08,
- 0x03, 0x0b, 0x9b,
- 0x05, 0x0e, 0x5b,
- };
-
-
- /* tested with KWorld a340 */
- static __u8 lgdt3304_qam256_data[] = {
- /* 16bit , 8bit */
- /* regs , val */
- 0x00, 0x00, 0x01, //0x19,
- 0x00, 0x12, 0x2a,
- 0x00, 0x13, 0x80,
- 0x00, 0x0d, 0x02,
- 0x03, 0x14, 0xe3,
-
- 0x03, 0x0e, 0x1c,
- 0x03, 0x08, 0x66,
- 0x03, 0x09, 0x66,
- 0x03, 0x0a, 0x08,
- 0x03, 0x0b, 0x9b,
-
- 0x03, 0x0d, 0x14,
- //0x05, 0x0e, 0x5b,
- 0x01, 0x06, 0x4a,
- 0x01, 0x07, 0x3d,
- 0x01, 0x08, 0x70,
- 0x01, 0x09, 0xa3,
-
- 0x05, 0x04, 0xfd,
-
- 0x00, 0x0d, 0x82,
-
- 0x05, 0x0e, 0x5b,
-
- 0x05, 0x0e, 0x5b,
-
- 0x00, 0x02, 0x9a,
-
- 0x00, 0x02, 0x9b,
-
- 0x00, 0x00, 0x01,
- 0x00, 0x12, 0x2a,
- 0x00, 0x13, 0x80,
- 0x00, 0x0d, 0x02,
- 0x03, 0x14, 0xe3,
-
- 0x03, 0x0e, 0x1c,
- 0x03, 0x08, 0x66,
- 0x03, 0x09, 0x66,
- 0x03, 0x0a, 0x08,
- 0x03, 0x0b, 0x9b,
-
- 0x03, 0x0d, 0x14,
- 0x01, 0x06, 0x4a,
- 0x01, 0x07, 0x3d,
- 0x01, 0x08, 0x70,
- 0x01, 0x09, 0xa3,
-
- 0x05, 0x04, 0xfd,
-
- 0x00, 0x0d, 0x82,
-
- 0x05, 0x0e, 0x5b,
- };
-
- struct lgdt3304_state *state = fe->demodulator_priv;
- if (state->current_modulation != param->u.vsb.modulation) {
- switch(param->u.vsb.modulation) {
- case VSB_8:
- err = i2c_write_demod_bytes(fe, lgdt3304_vsb8_data,
- sizeof(lgdt3304_vsb8_data));
- break;
- case QAM_64:
- err = i2c_write_demod_bytes(fe, lgdt3304_qam64_data,
- sizeof(lgdt3304_qam64_data));
- break;
- case QAM_256:
- err = i2c_write_demod_bytes(fe, lgdt3304_qam256_data,
- sizeof(lgdt3304_qam256_data));
- break;
- default:
- break;
- }
-
- if (err) {
- printk("%s error setting modulation\n", __func__);
- } else {
- state->current_modulation = param->u.vsb.modulation;
- }
- }
- state->current_frequency = param->frequency;
-
- lgdt3304_soft_Reset(fe);
-
-
- if (fe->ops.tuner_ops.set_params)
- fe->ops.tuner_ops.set_params(fe, param);
-
- return 0;
-}
-
-static int lgdt3304_init(struct dvb_frontend *fe) {
- return 0;
-}
-
-static int lgdt3304_sleep(struct dvb_frontend *fe) {
- return 0;
-}
-
-
-static int lgdt3304_read_status(struct dvb_frontend *fe, fe_status_t *status)
-{
- struct lgdt3304_state *state = fe->demodulator_priv;
- int r011d;
- int qam_lck;
-
- *status = 0;
- dprintk("lgdt read status\n");
-
- r011d = lgdt3304_i2c_read_reg(fe, 0x011d);
-
- dprintk("%02x\n", r011d);
-
- switch(state->current_modulation) {
- case VSB_8:
- if (r011d & 0x80) {
- dprintk("VSB Locked\n");
- *status |= FE_HAS_CARRIER;
- *status |= FE_HAS_LOCK;
- *status |= FE_HAS_SYNC;
- *status |= FE_HAS_SIGNAL;
- }
- break;
- case QAM_64:
- case QAM_256:
- qam_lck = r011d & 0x7;
- switch(qam_lck) {
- case 0x0: dprintk("Unlock\n");
- break;
- case 0x4: dprintk("1st Lock in acquisition state\n");
- break;
- case 0x6: dprintk("2nd Lock in acquisition state\n");
- break;
- case 0x7: dprintk("Final Lock in good reception state\n");
- *status |= FE_HAS_CARRIER;
- *status |= FE_HAS_LOCK;
- *status |= FE_HAS_SYNC;
- *status |= FE_HAS_SIGNAL;
- break;
- }
- break;
- default:
- printk("%s unhandled modulation\n", __func__);
- }
-
-
- return 0;
-}
-
-static int lgdt3304_read_ber(struct dvb_frontend *fe, __u32 *ber)
-{
- dprintk("read ber\n");
- return 0;
-}
-
-static int lgdt3304_read_snr(struct dvb_frontend *fe, __u16 *snr)
-{
- dprintk("read snr\n");
- return 0;
-}
-
-static int lgdt3304_read_ucblocks(struct dvb_frontend *fe, __u32 *ucblocks)
-{
- dprintk("read ucblocks\n");
- return 0;
-}
-
-static void lgdt3304_release(struct dvb_frontend *fe)
-{
- struct lgdt3304_state *state = (struct lgdt3304_state *)fe->demodulator_priv;
- kfree(state);
-}
-
-static struct dvb_frontend_ops demod_lgdt3304={
- .info = {
- .name = "LG 3304",
- .type = FE_ATSC,
- .frequency_min = 54000000,
- .frequency_max = 858000000,
- .frequency_stepsize = 62500,
- .symbol_rate_min = 5056941,
- .symbol_rate_max = 10762000,
- .caps = FE_CAN_QAM_64 | FE_CAN_QAM_256 | FE_CAN_8VSB
- },
- .init = lgdt3304_init,
- .sleep = lgdt3304_sleep,
- .set_frontend = lgdt3304_set_parameters,
- .read_snr = lgdt3304_read_snr,
- .read_ber = lgdt3304_read_ber,
- .read_status = lgdt3304_read_status,
- .read_ucblocks = lgdt3304_read_ucblocks,
- .release = lgdt3304_release,
-};
-
-struct dvb_frontend* lgdt3304_attach(const struct lgdt3304_config *config,
- struct i2c_adapter *i2c)
-{
-
- struct lgdt3304_state *state;
- state = kzalloc(sizeof(struct lgdt3304_state), GFP_KERNEL);
- if (state == NULL)
- return NULL;
- state->addr = config->i2c_address;
- state->i2c = i2c;
-
- memcpy(&state->frontend.ops, &demod_lgdt3304, sizeof(struct dvb_frontend_ops));
- state->frontend.demodulator_priv = state;
- return &state->frontend;
-}
-
-EXPORT_SYMBOL_GPL(lgdt3304_attach);
-MODULE_AUTHOR("Markus Rechberger <mrechberger@empiatech.com>");
-MODULE_DESCRIPTION("LGE LGDT3304 DVB-T demodulator driver");
-MODULE_LICENSE("GPL");
diff --git a/drivers/media/dvb/frontends/lgdt3304.h b/drivers/media/dvb/frontends/lgdt3304.h
deleted file mode 100644
index fc409fe59acb..000000000000
--- a/drivers/media/dvb/frontends/lgdt3304.h
+++ /dev/null
@@ -1,45 +0,0 @@
-/*
- * Driver for DVB-T lgdt3304 demodulator
- *
- * Copyright (C) 2008 Markus Rechberger <mrechberger@gmail.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- *
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.=
- */
-
-#ifndef LGDT3304_H
-#define LGDT3304_H
-
-#include <linux/dvb/frontend.h>
-
-struct lgdt3304_config
-{
- /* demodulator's I2C address */
- u8 i2c_address;
-};
-
-#if defined(CONFIG_DVB_LGDT3304) || (defined(CONFIG_DVB_LGDT3304_MODULE) && defined(MODULE))
-extern struct dvb_frontend* lgdt3304_attach(const struct lgdt3304_config *config,
- struct i2c_adapter *i2c);
-#else
-static inline struct dvb_frontend* lgdt3304_attach(const struct lgdt3304_config *config,
- struct i2c_adapter *i2c)
-{
- printk(KERN_WARNING "%s: driver disabled by Kconfig\n", __func__);
- return NULL;
-}
-#endif /* CONFIG_DVB_LGDT */
-
-#endif /* LGDT3304_H */
diff --git a/drivers/media/dvb/frontends/lgs8gxx.c b/drivers/media/dvb/frontends/lgs8gxx.c
index 5ea28ae2ba8f..0fcddc4569d2 100644
--- a/drivers/media/dvb/frontends/lgs8gxx.c
+++ b/drivers/media/dvb/frontends/lgs8gxx.c
@@ -662,7 +662,7 @@ static void lgs8gxx_release(struct dvb_frontend *fe)
}
-static int lgs8gxx_write(struct dvb_frontend *fe, u8 *buf, int len)
+static int lgs8gxx_write(struct dvb_frontend *fe, const u8 buf[], int len)
{
struct lgs8gxx_state *priv = fe->demodulator_priv;
diff --git a/drivers/media/dvb/frontends/mt352.c b/drivers/media/dvb/frontends/mt352.c
index beba5aa0db50..319672f8e1a7 100644
--- a/drivers/media/dvb/frontends/mt352.c
+++ b/drivers/media/dvb/frontends/mt352.c
@@ -69,7 +69,7 @@ static int mt352_single_write(struct dvb_frontend *fe, u8 reg, u8 val)
return 0;
}
-static int _mt352_write(struct dvb_frontend* fe, u8* ibuf, int ilen)
+static int _mt352_write(struct dvb_frontend* fe, const u8 ibuf[], int ilen)
{
int err,i;
for (i=0; i < ilen-1; i++)
diff --git a/drivers/media/dvb/frontends/mt352.h b/drivers/media/dvb/frontends/mt352.h
index 595092f9f0c4..ca2562d6f289 100644
--- a/drivers/media/dvb/frontends/mt352.h
+++ b/drivers/media/dvb/frontends/mt352.h
@@ -63,7 +63,7 @@ static inline struct dvb_frontend* mt352_attach(const struct mt352_config* confi
}
#endif // CONFIG_DVB_MT352
-static inline int mt352_write(struct dvb_frontend *fe, u8 *buf, int len) {
+static inline int mt352_write(struct dvb_frontend *fe, const u8 buf[], int len) {
int r = 0;
if (fe->ops.write)
r = fe->ops.write(fe, buf, len);
diff --git a/drivers/media/dvb/frontends/s5h1420.c b/drivers/media/dvb/frontends/s5h1420.c
index 2e9fd2893ede..e87b747ea99c 100644
--- a/drivers/media/dvb/frontends/s5h1420.c
+++ b/drivers/media/dvb/frontends/s5h1420.c
@@ -920,7 +920,6 @@ struct dvb_frontend *s5h1420_attach(const struct s5h1420_config *config,
/* create tuner i2c adapter */
strlcpy(state->tuner_i2c_adapter.name, "S5H1420-PN1010 tuner I2C bus",
sizeof(state->tuner_i2c_adapter.name));
- state->tuner_i2c_adapter.class = I2C_CLASS_TV_DIGITAL,
state->tuner_i2c_adapter.algo = &s5h1420_tuner_i2c_algo;
state->tuner_i2c_adapter.algo_data = NULL;
i2c_set_adapdata(&state->tuner_i2c_adapter, state);
diff --git a/drivers/media/dvb/frontends/s5h1432.c b/drivers/media/dvb/frontends/s5h1432.c
new file mode 100644
index 000000000000..0c6dcb90d168
--- /dev/null
+++ b/drivers/media/dvb/frontends/s5h1432.c
@@ -0,0 +1,415 @@
+/*
+ * Samsung s5h1432 DVB-T demodulator driver
+ *
+ * Copyright (C) 2009 Bill Liu <Bill.Liu@Conexant.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/string.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include "dvb_frontend.h"
+#include "s5h1432.h"
+
+struct s5h1432_state {
+
+ struct i2c_adapter *i2c;
+
+ /* configuration settings */
+ const struct s5h1432_config *config;
+
+ struct dvb_frontend frontend;
+
+ fe_modulation_t current_modulation;
+ unsigned int first_tune:1;
+
+ u32 current_frequency;
+ int if_freq;
+
+ u8 inversion;
+};
+
+static int debug;
+
+#define dprintk(arg...) do { \
+ if (debug) \
+ printk(arg); \
+ } while (0)
+
+static int s5h1432_writereg(struct s5h1432_state *state,
+ u8 addr, u8 reg, u8 data)
+{
+ int ret;
+ u8 buf[] = { reg, data };
+
+ struct i2c_msg msg = {.addr = addr, .flags = 0, .buf = buf, .len = 2 };
+
+ ret = i2c_transfer(state->i2c, &msg, 1);
+
+ if (ret != 1)
+ printk(KERN_ERR "%s: writereg error 0x%02x 0x%02x 0x%04x, "
+ "ret == %i)\n", __func__, addr, reg, data, ret);
+
+ return (ret != 1) ? -1 : 0;
+}
+
+static u8 s5h1432_readreg(struct s5h1432_state *state, u8 addr, u8 reg)
+{
+ int ret;
+ u8 b0[] = { reg };
+ u8 b1[] = { 0 };
+
+ struct i2c_msg msg[] = {
+ {.addr = addr, .flags = 0, .buf = b0, .len = 1},
+ {.addr = addr, .flags = I2C_M_RD, .buf = b1, .len = 1}
+ };
+
+ ret = i2c_transfer(state->i2c, msg, 2);
+
+ if (ret != 2)
+ printk(KERN_ERR "%s: readreg error (ret == %i)\n",
+ __func__, ret);
+ return b1[0];
+}
+
+static int s5h1432_sleep(struct dvb_frontend *fe)
+{
+ return 0;
+}
+
+static int s5h1432_set_channel_bandwidth(struct dvb_frontend *fe,
+ u32 bandwidth)
+{
+ struct s5h1432_state *state = fe->demodulator_priv;
+
+ u8 reg = 0;
+
+ /* Register [0x2E] bit 3:2 : 8MHz = 0; 7MHz = 1; 6MHz = 2 */
+ reg = s5h1432_readreg(state, S5H1432_I2C_TOP_ADDR, 0x2E);
+ reg &= ~(0x0C);
+ switch (bandwidth) {
+ case 6:
+ reg |= 0x08;
+ break;
+ case 7:
+ reg |= 0x04;
+ break;
+ case 8:
+ reg |= 0x00;
+ break;
+ default:
+ return 0;
+ }
+ s5h1432_writereg(state, S5H1432_I2C_TOP_ADDR, 0x2E, reg);
+ return 1;
+}
+
+static int s5h1432_set_IF(struct dvb_frontend *fe, u32 ifFreqHz)
+{
+ struct s5h1432_state *state = fe->demodulator_priv;
+
+ switch (ifFreqHz) {
+ case TAIWAN_HI_IF_FREQ_44_MHZ:
+ s5h1432_writereg(state, S5H1432_I2C_TOP_ADDR, 0xe4, 0x55);
+ s5h1432_writereg(state, S5H1432_I2C_TOP_ADDR, 0xe5, 0x55);
+ s5h1432_writereg(state, S5H1432_I2C_TOP_ADDR, 0xe7, 0x15);
+ break;
+ case EUROPE_HI_IF_FREQ_36_MHZ:
+ s5h1432_writereg(state, S5H1432_I2C_TOP_ADDR, 0xe4, 0x00);
+ s5h1432_writereg(state, S5H1432_I2C_TOP_ADDR, 0xe5, 0x00);
+ s5h1432_writereg(state, S5H1432_I2C_TOP_ADDR, 0xe7, 0x40);
+ break;
+ case IF_FREQ_6_MHZ:
+ s5h1432_writereg(state, S5H1432_I2C_TOP_ADDR, 0xe4, 0x00);
+ s5h1432_writereg(state, S5H1432_I2C_TOP_ADDR, 0xe5, 0x00);
+ s5h1432_writereg(state, S5H1432_I2C_TOP_ADDR, 0xe7, 0xe0);
+ break;
+ case IF_FREQ_3point3_MHZ:
+ s5h1432_writereg(state, S5H1432_I2C_TOP_ADDR, 0xe4, 0x66);
+ s5h1432_writereg(state, S5H1432_I2C_TOP_ADDR, 0xe5, 0x66);
+ s5h1432_writereg(state, S5H1432_I2C_TOP_ADDR, 0xe7, 0xEE);
+ break;
+ case IF_FREQ_3point5_MHZ:
+ s5h1432_writereg(state, S5H1432_I2C_TOP_ADDR, 0xe4, 0x55);
+ s5h1432_writereg(state, S5H1432_I2C_TOP_ADDR, 0xe5, 0x55);
+ s5h1432_writereg(state, S5H1432_I2C_TOP_ADDR, 0xe7, 0xED);
+ break;
+ case IF_FREQ_4_MHZ:
+ s5h1432_writereg(state, S5H1432_I2C_TOP_ADDR, 0xe4, 0xAA);
+ s5h1432_writereg(state, S5H1432_I2C_TOP_ADDR, 0xe5, 0xAA);
+ s5h1432_writereg(state, S5H1432_I2C_TOP_ADDR, 0xe7, 0xEA);
+ break;
+ default:
+ {
+ u32 value = 0;
+ value = (u32) (((48000 - (ifFreqHz / 1000)) * 512 *
+ (u32) 32768) / (48 * 1000));
+ printk(KERN_INFO
+ "Default IFFreq %d :reg value = 0x%x\n",
+ ifFreqHz, value);
+ s5h1432_writereg(state, S5H1432_I2C_TOP_ADDR, 0xe4,
+ (u8) value & 0xFF);
+ s5h1432_writereg(state, S5H1432_I2C_TOP_ADDR, 0xe5,
+ (u8) (value >> 8) & 0xFF);
+ s5h1432_writereg(state, S5H1432_I2C_TOP_ADDR, 0xe7,
+ (u8) (value >> 16) & 0xFF);
+ break;
+ }
+
+ }
+
+ return 1;
+}
+
+/* Talk to the demod, set the FEC, GUARD, QAM settings etc */
+static int s5h1432_set_frontend(struct dvb_frontend *fe,
+ struct dvb_frontend_parameters *p)
+{
+ u32 dvb_bandwidth = 8;
+ struct s5h1432_state *state = fe->demodulator_priv;
+
+ if (p->frequency == state->current_frequency) {
+ /*current_frequency = p->frequency; */
+ /*state->current_frequency = p->frequency; */
+ } else {
+ fe->ops.tuner_ops.set_params(fe, p);
+ msleep(300);
+ s5h1432_set_channel_bandwidth(fe, dvb_bandwidth);
+ switch (p->u.ofdm.bandwidth) {
+ case BANDWIDTH_6_MHZ:
+ dvb_bandwidth = 6;
+ s5h1432_set_IF(fe, IF_FREQ_4_MHZ);
+ break;
+ case BANDWIDTH_7_MHZ:
+ dvb_bandwidth = 7;
+ s5h1432_set_IF(fe, IF_FREQ_4_MHZ);
+ break;
+ case BANDWIDTH_8_MHZ:
+ dvb_bandwidth = 8;
+ s5h1432_set_IF(fe, IF_FREQ_4_MHZ);
+ break;
+ default:
+ return 0;
+ }
+ /*fe->ops.tuner_ops.set_params(fe, p); */
+/*Soft Reset chip*/
+ msleep(30);
+ s5h1432_writereg(state, S5H1432_I2C_TOP_ADDR, 0x09, 0x1a);
+ msleep(30);
+ s5h1432_writereg(state, S5H1432_I2C_TOP_ADDR, 0x09, 0x1b);
+
+ s5h1432_set_channel_bandwidth(fe, dvb_bandwidth);
+ switch (p->u.ofdm.bandwidth) {
+ case BANDWIDTH_6_MHZ:
+ dvb_bandwidth = 6;
+ s5h1432_set_IF(fe, IF_FREQ_4_MHZ);
+ break;
+ case BANDWIDTH_7_MHZ:
+ dvb_bandwidth = 7;
+ s5h1432_set_IF(fe, IF_FREQ_4_MHZ);
+ break;
+ case BANDWIDTH_8_MHZ:
+ dvb_bandwidth = 8;
+ s5h1432_set_IF(fe, IF_FREQ_4_MHZ);
+ break;
+ default:
+ return 0;
+ }
+ /*fe->ops.tuner_ops.set_params(fe,p); */
+ /*Soft Reset chip*/
+ msleep(30);
+ s5h1432_writereg(state, S5H1432_I2C_TOP_ADDR, 0x09, 0x1a);
+ msleep(30);
+ s5h1432_writereg(state, S5H1432_I2C_TOP_ADDR, 0x09, 0x1b);
+
+ }
+
+ state->current_frequency = p->frequency;
+
+ return 0;
+}
+
+static int s5h1432_init(struct dvb_frontend *fe)
+{
+ struct s5h1432_state *state = fe->demodulator_priv;
+
+ u8 reg = 0;
+ state->current_frequency = 0;
+ printk(KERN_INFO " s5h1432_init().\n");
+
+ /*Set VSB mode as default, this also does a soft reset */
+ /*Initialize registers */
+
+ s5h1432_writereg(state, S5H1432_I2C_TOP_ADDR, 0x04, 0xa8);
+ s5h1432_writereg(state, S5H1432_I2C_TOP_ADDR, 0x05, 0x01);
+ s5h1432_writereg(state, S5H1432_I2C_TOP_ADDR, 0x07, 0x70);
+ s5h1432_writereg(state, S5H1432_I2C_TOP_ADDR, 0x19, 0x80);
+ s5h1432_writereg(state, S5H1432_I2C_TOP_ADDR, 0x1b, 0x9D);
+ s5h1432_writereg(state, S5H1432_I2C_TOP_ADDR, 0x1c, 0x30);
+ s5h1432_writereg(state, S5H1432_I2C_TOP_ADDR, 0x1d, 0x20);
+ s5h1432_writereg(state, S5H1432_I2C_TOP_ADDR, 0x1e, 0x1B);
+ s5h1432_writereg(state, S5H1432_I2C_TOP_ADDR, 0x2e, 0x40);
+ s5h1432_writereg(state, S5H1432_I2C_TOP_ADDR, 0x42, 0x84);
+ s5h1432_writereg(state, S5H1432_I2C_TOP_ADDR, 0x50, 0x5a);
+ s5h1432_writereg(state, S5H1432_I2C_TOP_ADDR, 0x5a, 0xd3);
+ s5h1432_writereg(state, S5H1432_I2C_TOP_ADDR, 0x68, 0x50);
+ s5h1432_writereg(state, S5H1432_I2C_TOP_ADDR, 0xb8, 0x3c);
+ s5h1432_writereg(state, S5H1432_I2C_TOP_ADDR, 0xc4, 0x10);
+ s5h1432_writereg(state, S5H1432_I2C_TOP_ADDR, 0xcc, 0x9c);
+ s5h1432_writereg(state, S5H1432_I2C_TOP_ADDR, 0xDA, 0x00);
+ s5h1432_writereg(state, S5H1432_I2C_TOP_ADDR, 0xe1, 0x94);
+ /* s5h1432_writereg(state, S5H1432_I2C_TOP_ADDR, 0xf4, 0xa1); */
+ s5h1432_writereg(state, S5H1432_I2C_TOP_ADDR, 0xf9, 0x00);
+
+ /*For NXP tuner*/
+
+ /*Set 3.3MHz as default IF frequency */
+ s5h1432_writereg(state, S5H1432_I2C_TOP_ADDR, 0xe4, 0x66);
+ s5h1432_writereg(state, S5H1432_I2C_TOP_ADDR, 0xe5, 0x66);
+ s5h1432_writereg(state, S5H1432_I2C_TOP_ADDR, 0xe7, 0xEE);
+ /* Set reg 0x1E to get the full dynamic range */
+ s5h1432_writereg(state, S5H1432_I2C_TOP_ADDR, 0x1e, 0x31);
+
+ /* Mode setting in demod */
+ reg = s5h1432_readreg(state, S5H1432_I2C_TOP_ADDR, 0x42);
+ reg |= 0x80;
+ s5h1432_writereg(state, S5H1432_I2C_TOP_ADDR, 0x42, reg);
+ /* Serial mode */
+
+ /* Soft Reset chip */
+
+ s5h1432_writereg(state, S5H1432_I2C_TOP_ADDR, 0x09, 0x1a);
+ msleep(30);
+ s5h1432_writereg(state, S5H1432_I2C_TOP_ADDR, 0x09, 0x1b);
+
+
+ return 0;
+}
+
+static int s5h1432_read_status(struct dvb_frontend *fe, fe_status_t *status)
+{
+ return 0;
+}
+
+static int s5h1432_read_signal_strength(struct dvb_frontend *fe,
+ u16 *signal_strength)
+{
+ return 0;
+}
+
+static int s5h1432_read_snr(struct dvb_frontend *fe, u16 *snr)
+{
+ return 0;
+}
+
+static int s5h1432_read_ucblocks(struct dvb_frontend *fe, u32 *ucblocks)
+{
+
+ return 0;
+}
+
+static int s5h1432_read_ber(struct dvb_frontend *fe, u32 *ber)
+{
+ return 0;
+}
+
+static int s5h1432_get_frontend(struct dvb_frontend *fe,
+ struct dvb_frontend_parameters *p)
+{
+ return 0;
+}
+
+static int s5h1432_get_tune_settings(struct dvb_frontend *fe,
+ struct dvb_frontend_tune_settings *tune)
+{
+ return 0;
+}
+
+static void s5h1432_release(struct dvb_frontend *fe)
+{
+ struct s5h1432_state *state = fe->demodulator_priv;
+ kfree(state);
+}
+
+static struct dvb_frontend_ops s5h1432_ops;
+
+struct dvb_frontend *s5h1432_attach(const struct s5h1432_config *config,
+ struct i2c_adapter *i2c)
+{
+ struct s5h1432_state *state = NULL;
+
+ printk(KERN_INFO " Enter s5h1432_attach(). attach success!\n");
+ /* allocate memory for the internal state */
+ state = kmalloc(sizeof(struct s5h1432_state), GFP_KERNEL);
+ if (state == NULL)
+ goto error;
+
+ /* setup the state */
+ state->config = config;
+ state->i2c = i2c;
+ state->current_modulation = QAM_16;
+ state->inversion = state->config->inversion;
+
+ /* create dvb_frontend */
+ memcpy(&state->frontend.ops, &s5h1432_ops,
+ sizeof(struct dvb_frontend_ops));
+
+ state->frontend.demodulator_priv = state;
+
+ return &state->frontend;
+
+error:
+ kfree(state);
+ return NULL;
+}
+EXPORT_SYMBOL(s5h1432_attach);
+
+static struct dvb_frontend_ops s5h1432_ops = {
+
+ .info = {
+ .name = "Samsung s5h1432 DVB-T Frontend",
+ .type = FE_OFDM,
+ .frequency_min = 177000000,
+ .frequency_max = 858000000,
+ .frequency_stepsize = 166666,
+ .caps = FE_CAN_FEC_1_2 | FE_CAN_FEC_2_3 | FE_CAN_FEC_3_4 |
+ FE_CAN_FEC_5_6 | FE_CAN_FEC_7_8 | FE_CAN_FEC_AUTO |
+ FE_CAN_QPSK | FE_CAN_QAM_16 | FE_CAN_QAM_64 | FE_CAN_QAM_AUTO |
+ FE_CAN_HIERARCHY_AUTO | FE_CAN_GUARD_INTERVAL_AUTO |
+ FE_CAN_TRANSMISSION_MODE_AUTO | FE_CAN_RECOVER},
+
+ .init = s5h1432_init,
+ .sleep = s5h1432_sleep,
+ .set_frontend = s5h1432_set_frontend,
+ .get_frontend = s5h1432_get_frontend,
+ .get_tune_settings = s5h1432_get_tune_settings,
+ .read_status = s5h1432_read_status,
+ .read_ber = s5h1432_read_ber,
+ .read_signal_strength = s5h1432_read_signal_strength,
+ .read_snr = s5h1432_read_snr,
+ .read_ucblocks = s5h1432_read_ucblocks,
+ .release = s5h1432_release,
+};
+
+module_param(debug, int, 0644);
+MODULE_PARM_DESC(debug, "Enable verbose debug messages");
+
+MODULE_DESCRIPTION("Samsung s5h1432 DVB-T Demodulator driver");
+MODULE_AUTHOR("Bill Liu");
+MODULE_LICENSE("GPL");
diff --git a/drivers/media/dvb/frontends/s5h1432.h b/drivers/media/dvb/frontends/s5h1432.h
new file mode 100644
index 000000000000..b57438c32546
--- /dev/null
+++ b/drivers/media/dvb/frontends/s5h1432.h
@@ -0,0 +1,91 @@
+/*
+ * Samsung s5h1432 VSB/QAM demodulator driver
+ *
+ * Copyright (C) 2009 Bill Liu <Bill.Liu@Conexant.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ */
+
+#ifndef __S5H1432_H__
+#define __S5H1432_H__
+
+#include <linux/dvb/frontend.h>
+
+#define S5H1432_I2C_TOP_ADDR (0x02 >> 1)
+
+#define TAIWAN_HI_IF_FREQ_44_MHZ 44000000
+#define EUROPE_HI_IF_FREQ_36_MHZ 36000000
+#define IF_FREQ_6_MHZ 6000000
+#define IF_FREQ_3point3_MHZ 3300000
+#define IF_FREQ_3point5_MHZ 3500000
+#define IF_FREQ_4_MHZ 4000000
+
+struct s5h1432_config {
+
+ /* serial/parallel output */
+#define S5H1432_PARALLEL_OUTPUT 0
+#define S5H1432_SERIAL_OUTPUT 1
+ u8 output_mode;
+
+ /* GPIO Setting */
+#define S5H1432_GPIO_OFF 0
+#define S5H1432_GPIO_ON 1
+ u8 gpio;
+
+ /* MPEG signal timing */
+#define S5H1432_MPEGTIMING_CONTINOUS_INVERTING_CLOCK 0
+#define S5H1432_MPEGTIMING_CONTINOUS_NONINVERTING_CLOCK 1
+#define S5H1432_MPEGTIMING_NONCONTINOUS_INVERTING_CLOCK 2
+#define S5H1432_MPEGTIMING_NONCONTINOUS_NONINVERTING_CLOCK 3
+ u16 mpeg_timing;
+
+ /* IF Freq for QAM and VSB in KHz */
+#define S5H1432_IF_3250 3250
+#define S5H1432_IF_3500 3500
+#define S5H1432_IF_4000 4000
+#define S5H1432_IF_5380 5380
+#define S5H1432_IF_44000 44000
+#define S5H1432_VSB_IF_DEFAULT s5h1432_IF_44000
+#define S5H1432_QAM_IF_DEFAULT s5h1432_IF_44000
+ u16 qam_if;
+ u16 vsb_if;
+
+ /* Spectral Inversion */
+#define S5H1432_INVERSION_OFF 0
+#define S5H1432_INVERSION_ON 1
+ u8 inversion;
+
+ /* Return lock status based on tuner lock, or demod lock */
+#define S5H1432_TUNERLOCKING 0
+#define S5H1432_DEMODLOCKING 1
+ u8 status_mode;
+};
+
+#if defined(CONFIG_DVB_S5H1432) || \
+ (defined(CONFIG_DVB_S5H1432_MODULE) && defined(MODULE))
+extern struct dvb_frontend *s5h1432_attach(const struct s5h1432_config *config,
+ struct i2c_adapter *i2c);
+#else
+static inline struct dvb_frontend *s5h1432_attach(const struct s5h1432_config
+ *config,
+ struct i2c_adapter *i2c)
+{
+ printk(KERN_WARNING "%s: driver disabled by Kconfig\n", __func__);
+ return NULL;
+}
+#endif /* CONFIG_DVB_s5h1432 */
+
+#endif /* __s5h1432_H__ */
diff --git a/drivers/media/dvb/frontends/si21xx.c b/drivers/media/dvb/frontends/si21xx.c
index d21a327db629..4b0c99a08a85 100644
--- a/drivers/media/dvb/frontends/si21xx.c
+++ b/drivers/media/dvb/frontends/si21xx.c
@@ -268,7 +268,7 @@ static int si21_writereg(struct si21xx_state *state, u8 reg, u8 data)
return (ret != 1) ? -EREMOTEIO : 0;
}
-static int si21_write(struct dvb_frontend *fe, u8 *buf, int len)
+static int si21_write(struct dvb_frontend *fe, const u8 buf[], int len)
{
struct si21xx_state *state = fe->demodulator_priv;
diff --git a/drivers/media/dvb/frontends/stb6100.c b/drivers/media/dvb/frontends/stb6100.c
index f73c13323e90..80a9e4cba631 100644
--- a/drivers/media/dvb/frontends/stb6100.c
+++ b/drivers/media/dvb/frontends/stb6100.c
@@ -506,7 +506,7 @@ static struct dvb_tuner_ops stb6100_ops = {
};
struct dvb_frontend *stb6100_attach(struct dvb_frontend *fe,
- struct stb6100_config *config,
+ const struct stb6100_config *config,
struct i2c_adapter *i2c)
{
struct stb6100_state *state = NULL;
diff --git a/drivers/media/dvb/frontends/stb6100.h b/drivers/media/dvb/frontends/stb6100.h
index 395d056599a6..2ab096614b3f 100644
--- a/drivers/media/dvb/frontends/stb6100.h
+++ b/drivers/media/dvb/frontends/stb6100.h
@@ -97,13 +97,13 @@ struct stb6100_state {
#if defined(CONFIG_DVB_STB6100) || (defined(CONFIG_DVB_STB6100_MODULE) && defined(MODULE))
extern struct dvb_frontend *stb6100_attach(struct dvb_frontend *fe,
- struct stb6100_config *config,
+ const struct stb6100_config *config,
struct i2c_adapter *i2c);
#else
static inline struct dvb_frontend *stb6100_attach(struct dvb_frontend *fe,
- struct stb6100_config *config,
+ const struct stb6100_config *config,
struct i2c_adapter *i2c)
{
printk(KERN_WARNING "%s: Driver disabled by Kconfig\n", __func__);
diff --git a/drivers/media/dvb/frontends/stv0288.c b/drivers/media/dvb/frontends/stv0288.c
index 2930a5d6768a..63db8fd2754c 100644
--- a/drivers/media/dvb/frontends/stv0288.c
+++ b/drivers/media/dvb/frontends/stv0288.c
@@ -6,6 +6,8 @@
Copyright (C) 2008 Igor M. Liplianin <liplianin@me.by>
Removed stb6000 specific tuner code and revised some
procedures.
+ 2010-09-01 Josef Pavlik <josef@pavlik.it>
+ Fixed diseqc_msg, diseqc_burst and set_tone problems
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
@@ -78,7 +80,7 @@ static int stv0288_writeregI(struct stv0288_state *state, u8 reg, u8 data)
return (ret != 1) ? -EREMOTEIO : 0;
}
-static int stv0288_write(struct dvb_frontend *fe, u8 *buf, int len)
+static int stv0288_write(struct dvb_frontend *fe, const u8 buf[], int len)
{
struct stv0288_state *state = fe->demodulator_priv;
@@ -156,14 +158,13 @@ static int stv0288_send_diseqc_msg(struct dvb_frontend *fe,
stv0288_writeregI(state, 0x09, 0);
msleep(30);
- stv0288_writeregI(state, 0x05, 0x16);
+ stv0288_writeregI(state, 0x05, 0x12);/* modulated mode, single shot */
for (i = 0; i < m->msg_len; i++) {
if (stv0288_writeregI(state, 0x06, m->msg[i]))
return -EREMOTEIO;
- msleep(12);
}
-
+ msleep(m->msg_len*12);
return 0;
}
@@ -174,13 +175,14 @@ static int stv0288_send_diseqc_burst(struct dvb_frontend *fe,
dprintk("%s\n", __func__);
- if (stv0288_writeregI(state, 0x05, 0x16))/* burst mode */
+ if (stv0288_writeregI(state, 0x05, 0x03))/* burst mode, single shot */
return -EREMOTEIO;
if (stv0288_writeregI(state, 0x06, burst == SEC_MINI_A ? 0x00 : 0xff))
return -EREMOTEIO;
- if (stv0288_writeregI(state, 0x06, 0x12))
+ msleep(15);
+ if (stv0288_writeregI(state, 0x05, 0x12))
return -EREMOTEIO;
return 0;
@@ -192,18 +194,19 @@ static int stv0288_set_tone(struct dvb_frontend *fe, fe_sec_tone_mode_t tone)
switch (tone) {
case SEC_TONE_ON:
- if (stv0288_writeregI(state, 0x05, 0x10))/* burst mode */
+ if (stv0288_writeregI(state, 0x05, 0x10))/* cont carrier */
return -EREMOTEIO;
- return stv0288_writeregI(state, 0x06, 0xff);
+ break;
case SEC_TONE_OFF:
- if (stv0288_writeregI(state, 0x05, 0x13))/* burst mode */
+ if (stv0288_writeregI(state, 0x05, 0x12))/* burst mode off*/
return -EREMOTEIO;
- return stv0288_writeregI(state, 0x06, 0x00);
+ break;
default:
return -EINVAL;
}
+ return 0;
}
static u8 stv0288_inittab[] = {
@@ -486,7 +489,7 @@ static int stv0288_set_frontend(struct dvb_frontend *fe,
tda[2] = 0x0; /* CFRL */
for (tm = -6; tm < 7;) {
/* Viterbi status */
- if (stv0288_readreg(state, 0x24) & 0x80)
+ if (stv0288_readreg(state, 0x24) & 0x8)
break;
tda[2] += 40;
diff --git a/drivers/media/dvb/frontends/stv0299.c b/drivers/media/dvb/frontends/stv0299.c
index 968874469726..4e3db3a42e06 100644
--- a/drivers/media/dvb/frontends/stv0299.c
+++ b/drivers/media/dvb/frontends/stv0299.c
@@ -92,7 +92,7 @@ static int stv0299_writeregI (struct stv0299_state* state, u8 reg, u8 data)
return (ret != 1) ? -EREMOTEIO : 0;
}
-static int stv0299_write(struct dvb_frontend* fe, u8 *buf, int len)
+static int stv0299_write(struct dvb_frontend* fe, const u8 buf[], int len)
{
struct stv0299_state* state = fe->demodulator_priv;
diff --git a/drivers/media/dvb/frontends/stv0299.h b/drivers/media/dvb/frontends/stv0299.h
index 0fd96e22b650..ba219b767a69 100644
--- a/drivers/media/dvb/frontends/stv0299.h
+++ b/drivers/media/dvb/frontends/stv0299.h
@@ -65,7 +65,7 @@ struct stv0299_config
* First of each pair is the register, second is the value.
* List should be terminated with an 0xff, 0xff pair.
*/
- u8* inittab;
+ const u8* inittab;
/* master clock to use */
u32 mclk;
diff --git a/drivers/media/dvb/frontends/tda1004x.c b/drivers/media/dvb/frontends/tda1004x.c
index f2a8abe0a243..ea485d923550 100644
--- a/drivers/media/dvb/frontends/tda1004x.c
+++ b/drivers/media/dvb/frontends/tda1004x.c
@@ -598,7 +598,7 @@ static int tda1004x_decode_fec(int tdafec)
return -1;
}
-static int tda1004x_write(struct dvb_frontend* fe, u8 *buf, int len)
+static int tda1004x_write(struct dvb_frontend* fe, const u8 buf[], int len)
{
struct tda1004x_state* state = fe->demodulator_priv;
diff --git a/drivers/media/dvb/frontends/zl10353.c b/drivers/media/dvb/frontends/zl10353.c
index 8c612719adfc..adbbf6d3d044 100644
--- a/drivers/media/dvb/frontends/zl10353.c
+++ b/drivers/media/dvb/frontends/zl10353.c
@@ -64,7 +64,7 @@ static int zl10353_single_write(struct dvb_frontend *fe, u8 reg, u8 val)
return 0;
}
-static int zl10353_write(struct dvb_frontend *fe, u8 *ibuf, int ilen)
+static int zl10353_write(struct dvb_frontend *fe, const u8 ibuf[], int ilen)
{
int err, i;
for (i = 0; i < ilen - 1; i++)
diff --git a/drivers/media/dvb/mantis/mantis_core.c b/drivers/media/dvb/mantis/mantis_core.c
index 8113b23ce448..22524a8e6f61 100644
--- a/drivers/media/dvb/mantis/mantis_core.c
+++ b/drivers/media/dvb/mantis/mantis_core.c
@@ -91,10 +91,7 @@ static int get_mac_address(struct mantis_pci *mantis)
return err;
}
dprintk(verbose, MANTIS_ERROR, 0,
- " MAC Address=[%02x:%02x:%02x:%02x:%02x:%02x]\n",
- mantis->mac_address[0], mantis->mac_address[1],
- mantis->mac_address[2], mantis->mac_address[3],
- mantis->mac_address[4], mantis->mac_address[5]);
+ " MAC Address=[%pM]\n", mantis->mac_address);
return 0;
}
diff --git a/drivers/media/dvb/mantis/mantis_i2c.c b/drivers/media/dvb/mantis/mantis_i2c.c
index 7870bcf9689a..e7794517fe26 100644
--- a/drivers/media/dvb/mantis/mantis_i2c.c
+++ b/drivers/media/dvb/mantis/mantis_i2c.c
@@ -229,7 +229,6 @@ int __devinit mantis_i2c_init(struct mantis_pci *mantis)
i2c_set_adapdata(i2c_adapter, mantis);
i2c_adapter->owner = THIS_MODULE;
- i2c_adapter->class = I2C_CLASS_TV_DIGITAL;
i2c_adapter->algo = &mantis_algo;
i2c_adapter->algo_data = NULL;
i2c_adapter->timeout = 500;
diff --git a/drivers/media/dvb/mantis/mantis_ioc.c b/drivers/media/dvb/mantis/mantis_ioc.c
index de148ded52d8..fe31cfb0b158 100644
--- a/drivers/media/dvb/mantis/mantis_ioc.c
+++ b/drivers/media/dvb/mantis/mantis_ioc.c
@@ -68,14 +68,7 @@ int mantis_get_mac(struct mantis_pci *mantis)
return err;
}
- dprintk(MANTIS_ERROR, 0,
- " MAC Address=[%02x:%02x:%02x:%02x:%02x:%02x]\n",
- mac_addr[0],
- mac_addr[1],
- mac_addr[2],
- mac_addr[3],
- mac_addr[4],
- mac_addr[5]);
+ dprintk(MANTIS_ERROR, 0, " MAC Address=[%pM]\n", mac_addr);
return 0;
}
diff --git a/drivers/media/dvb/ngene/ngene-core.c b/drivers/media/dvb/ngene/ngene-core.c
index 4caeb163a666..3a7ef71087be 100644
--- a/drivers/media/dvb/ngene/ngene-core.c
+++ b/drivers/media/dvb/ngene/ngene-core.c
@@ -34,7 +34,6 @@
#include <linux/io.h>
#include <asm/div64.h>
#include <linux/pci.h>
-#include <linux/smp_lock.h>
#include <linux/timer.h>
#include <linux/byteorder/generic.h>
#include <linux/firmware.h>
diff --git a/drivers/media/dvb/ngene/ngene-dvb.c b/drivers/media/dvb/ngene/ngene-dvb.c
index 48f980b21d66..3832e5983c19 100644
--- a/drivers/media/dvb/ngene/ngene-dvb.c
+++ b/drivers/media/dvb/ngene/ngene-dvb.c
@@ -35,7 +35,6 @@
#include <linux/io.h>
#include <asm/div64.h>
#include <linux/pci.h>
-#include <linux/smp_lock.h>
#include <linux/timer.h>
#include <linux/byteorder/generic.h>
#include <linux/firmware.h>
diff --git a/drivers/media/dvb/ngene/ngene-i2c.c b/drivers/media/dvb/ngene/ngene-i2c.c
index 477fe0aade86..d28554f8ce99 100644
--- a/drivers/media/dvb/ngene/ngene-i2c.c
+++ b/drivers/media/dvb/ngene/ngene-i2c.c
@@ -37,7 +37,6 @@
#include <asm/div64.h>
#include <linux/pci.h>
#include <linux/pci_ids.h>
-#include <linux/smp_lock.h>
#include <linux/timer.h>
#include <linux/byteorder/generic.h>
#include <linux/firmware.h>
@@ -165,7 +164,6 @@ int ngene_i2c_init(struct ngene *dev, int dev_nr)
struct i2c_adapter *adap = &(dev->channel[dev_nr].i2c_adapter);
i2c_set_adapdata(adap, &(dev->channel[dev_nr]));
- adap->class = I2C_CLASS_TV_DIGITAL | I2C_CLASS_TV_ANALOG;
strcpy(adap->name, "nGene");
diff --git a/drivers/media/dvb/pluto2/pluto2.c b/drivers/media/dvb/pluto2/pluto2.c
index 1c798219dc7c..6ca6713d527a 100644
--- a/drivers/media/dvb/pluto2/pluto2.c
+++ b/drivers/media/dvb/pluto2/pluto2.c
@@ -647,7 +647,6 @@ static int __devinit pluto2_probe(struct pci_dev *pdev,
i2c_set_adapdata(&pluto->i2c_adap, pluto);
strcpy(pluto->i2c_adap.name, DRIVER_NAME);
pluto->i2c_adap.owner = THIS_MODULE;
- pluto->i2c_adap.class = I2C_CLASS_TV_DIGITAL;
pluto->i2c_adap.dev.parent = &pdev->dev;
pluto->i2c_adap.algo_data = &pluto->i2c_bit;
pluto->i2c_bit.data = pluto;
diff --git a/drivers/media/dvb/pt1/pt1.c b/drivers/media/dvb/pt1/pt1.c
index 69ad94934ec2..0486919c1d0f 100644
--- a/drivers/media/dvb/pt1/pt1.c
+++ b/drivers/media/dvb/pt1/pt1.c
@@ -1087,7 +1087,6 @@ pt1_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
pt1_update_power(pt1);
i2c_adap = &pt1->i2c_adap;
- i2c_adap->class = I2C_CLASS_TV_DIGITAL;
i2c_adap->algo = &pt1_i2c_algo;
i2c_adap->algo_data = NULL;
i2c_adap->dev.parent = &pdev->dev;
diff --git a/drivers/media/dvb/siano/smscoreapi.c b/drivers/media/dvb/siano/smscoreapi.c
index ff3b0fa901b3..135e45bd00c7 100644
--- a/drivers/media/dvb/siano/smscoreapi.c
+++ b/drivers/media/dvb/siano/smscoreapi.c
@@ -1504,8 +1504,7 @@ int smscore_gpio_set_level(struct smscore_device_t *coredev, u8 PinNum,
u32 msgData[3]; /* keep it 3 ! */
} *pMsg;
- if ((NewLevel > 1) || (PinNum > MAX_GPIO_PIN_NUMBER) ||
- (PinNum > MAX_GPIO_PIN_NUMBER))
+ if ((NewLevel > 1) || (PinNum > MAX_GPIO_PIN_NUMBER))
return -EINVAL;
totalLen = sizeof(struct SmsMsgHdr_ST) +
diff --git a/drivers/media/dvb/siano/smsir.c b/drivers/media/dvb/siano/smsir.c
index d0e4639ee9db..a27c44a8af5a 100644
--- a/drivers/media/dvb/siano/smsir.c
+++ b/drivers/media/dvb/siano/smsir.c
@@ -40,7 +40,7 @@ void sms_ir_event(struct smscore_device_t *coredev, const char *buf, int len)
const s32 *samples = (const void *)buf;
for (i = 0; i < len >> 2; i++) {
- struct ir_raw_event ev;
+ DEFINE_IR_RAW_EVENT(ev);
ev.duration = abs(samples[i]) * 1000; /* Convert to ns */
ev.pulse = (samples[i] > 0) ? false : true;
diff --git a/drivers/media/dvb/ttpci/av7110.c b/drivers/media/dvb/ttpci/av7110.c
index a12b88f53ed9..fc0a60f8a1e1 100644
--- a/drivers/media/dvb/ttpci/av7110.c
+++ b/drivers/media/dvb/ttpci/av7110.c
@@ -2472,7 +2472,6 @@ static int __devinit av7110_attach(struct saa7146_dev* dev,
get recognized before the main driver is fully loaded */
saa7146_write(dev, GPIO_CTRL, 0x500000);
- av7110->i2c_adap.class = I2C_CLASS_TV_DIGITAL;
strlcpy(av7110->i2c_adap.name, pci_ext->ext_priv, sizeof(av7110->i2c_adap.name));
saa7146_i2c_adapter_prepare(dev, &av7110->i2c_adap, SAA7146_I2C_BUS_BIT_RATE_120); /* 275 kHz */
@@ -2886,7 +2885,7 @@ MODULE_DEVICE_TABLE(pci, pci_tbl);
static struct saa7146_extension av7110_extension_driver = {
- .name = "dvb",
+ .name = "av7110",
.flags = SAA7146_USE_I2C_IRQ,
.module = THIS_MODULE,
diff --git a/drivers/media/dvb/ttpci/av7110_av.c b/drivers/media/dvb/ttpci/av7110_av.c
index 244d5d51f5f9..952b33dbac4f 100644
--- a/drivers/media/dvb/ttpci/av7110_av.c
+++ b/drivers/media/dvb/ttpci/av7110_av.c
@@ -245,8 +245,11 @@ int av7110_pes_play(void *dest, struct dvb_ringbuffer *buf, int dlen)
return -1;
}
while (1) {
- if ((len = dvb_ringbuffer_avail(buf)) < 6)
+ len = dvb_ringbuffer_avail(buf);
+ if (len < 6) {
+ wake_up(&buf->queue);
return -1;
+ }
sync = DVB_RINGBUFFER_PEEK(buf, 0) << 24;
sync |= DVB_RINGBUFFER_PEEK(buf, 1) << 16;
sync |= DVB_RINGBUFFER_PEEK(buf, 2) << 8;
diff --git a/drivers/media/dvb/ttpci/budget-core.c b/drivers/media/dvb/ttpci/budget-core.c
index 054661315311..37666d4edab6 100644
--- a/drivers/media/dvb/ttpci/budget-core.c
+++ b/drivers/media/dvb/ttpci/budget-core.c
@@ -495,8 +495,6 @@ int ttpci_budget_init(struct budget *budget, struct saa7146_dev *dev,
if (bi->type != BUDGET_FS_ACTIVY)
saa7146_write(dev, GPIO_CTRL, 0x500000); /* GPIO 3 = 1 */
- budget->i2c_adap.class = I2C_CLASS_TV_DIGITAL;
-
strlcpy(budget->i2c_adap.name, budget->card->name, sizeof(budget->i2c_adap.name));
saa7146_i2c_adapter_prepare(dev, &budget->i2c_adap, SAA7146_I2C_BUS_BIT_RATE_120);
diff --git a/drivers/media/dvb/ttusb-budget/dvb-ttusb-budget.c b/drivers/media/dvb/ttusb-budget/dvb-ttusb-budget.c
index 4a3f2b8ea37d..40625b26ac10 100644
--- a/drivers/media/dvb/ttusb-budget/dvb-ttusb-budget.c
+++ b/drivers/media/dvb/ttusb-budget/dvb-ttusb-budget.c
@@ -1694,7 +1694,6 @@ static int ttusb_probe(struct usb_interface *intf, const struct usb_device_id *i
i2c_set_adapdata(&ttusb->i2c_adap, ttusb);
- ttusb->i2c_adap.class = I2C_CLASS_TV_DIGITAL;
ttusb->i2c_adap.algo = &ttusb_dec_algo;
ttusb->i2c_adap.algo_data = NULL;
ttusb->i2c_adap.dev.parent = &udev->dev;
diff --git a/drivers/media/radio/radio-cadet.c b/drivers/media/radio/radio-cadet.c
index 482d0f3be5ff..b701ea6e7c73 100644
--- a/drivers/media/radio/radio-cadet.c
+++ b/drivers/media/radio/radio-cadet.c
@@ -374,7 +374,8 @@ static int vidioc_g_tuner(struct file *file, void *priv,
switch (v->index) {
case 0:
strlcpy(v->name, "FM", sizeof(v->name));
- v->capability = V4L2_TUNER_CAP_STEREO | V4L2_TUNER_CAP_RDS;
+ v->capability = V4L2_TUNER_CAP_STEREO | V4L2_TUNER_CAP_RDS |
+ V4L2_TUNER_CAP_RDS_BLOCK_IO;
v->rangelow = 1400; /* 87.5 MHz */
v->rangehigh = 1728; /* 108.0 MHz */
v->rxsubchans = cadet_getstereo(dev);
diff --git a/drivers/media/radio/radio-mr800.c b/drivers/media/radio/radio-mr800.c
index 353b82855949..e6b2d085a449 100644
--- a/drivers/media/radio/radio-mr800.c
+++ b/drivers/media/radio/radio-mr800.c
@@ -58,7 +58,6 @@
#include <linux/module.h>
#include <linux/init.h>
#include <linux/slab.h>
-#include <linux/smp_lock.h>
#include <linux/input.h>
#include <linux/videodev2.h>
#include <media/v4l2-device.h>
@@ -176,8 +175,6 @@ static int amradio_set_mute(struct amradio_device *radio, char argument)
int retval;
int size;
- BUG_ON(!mutex_is_locked(&radio->lock));
-
radio->buffer[0] = 0x00;
radio->buffer[1] = 0x55;
radio->buffer[2] = 0xaa;
@@ -207,8 +204,6 @@ static int amradio_setfreq(struct amradio_device *radio, int freq)
int size;
unsigned short freq_send = 0x10 + (freq >> 3) / 25;
- BUG_ON(!mutex_is_locked(&radio->lock));
-
radio->buffer[0] = 0x00;
radio->buffer[1] = 0x55;
radio->buffer[2] = 0xaa;
@@ -253,8 +248,6 @@ static int amradio_set_stereo(struct amradio_device *radio, char argument)
int retval;
int size;
- BUG_ON(!mutex_is_locked(&radio->lock));
-
radio->buffer[0] = 0x00;
radio->buffer[1] = 0x55;
radio->buffer[2] = 0xaa;
@@ -290,11 +283,13 @@ static void usb_amradio_disconnect(struct usb_interface *intf)
struct amradio_device *radio = to_amradio_dev(usb_get_intfdata(intf));
mutex_lock(&radio->lock);
- radio->usbdev = NULL;
- mutex_unlock(&radio->lock);
-
+ /* increase the device node's refcount */
+ get_device(&radio->videodev.dev);
v4l2_device_disconnect(&radio->v4l2_dev);
video_unregister_device(&radio->videodev);
+ mutex_unlock(&radio->lock);
+ /* decrease the device node's refcount, allowing it to be released */
+ put_device(&radio->videodev.dev);
}
/* vidioc_querycap - query device capabilities */
@@ -503,28 +498,18 @@ out:
static int usb_amradio_open(struct file *file)
{
struct amradio_device *radio = video_drvdata(file);
- int retval = 0;
-
- mutex_lock(&radio->lock);
-
- if (!radio->usbdev) {
- retval = -EIO;
- goto unlock;
- }
+ int retval;
file->private_data = radio;
retval = usb_autopm_get_interface(radio->intf);
if (retval)
- goto unlock;
+ return retval;
if (unlikely(!radio->initialized)) {
retval = usb_amradio_init(radio);
if (retval)
usb_autopm_put_interface(radio->intf);
}
-
-unlock:
- mutex_unlock(&radio->lock);
return retval;
}
@@ -532,37 +517,10 @@ unlock:
static int usb_amradio_close(struct file *file)
{
struct amradio_device *radio = file->private_data;
- int retval = 0;
-
- mutex_lock(&radio->lock);
- if (!radio->usbdev)
- retval = -EIO;
- else
+ if (video_is_registered(&radio->videodev))
usb_autopm_put_interface(radio->intf);
-
- mutex_unlock(&radio->lock);
- return retval;
-}
-
-static long usb_amradio_ioctl(struct file *file, unsigned int cmd,
- unsigned long arg)
-{
- struct amradio_device *radio = file->private_data;
- long retval = 0;
-
- mutex_lock(&radio->lock);
-
- if (!radio->usbdev) {
- retval = -EIO;
- goto unlock;
- }
-
- retval = video_ioctl2(file, cmd, arg);
-
-unlock:
- mutex_unlock(&radio->lock);
- return retval;
+ return 0;
}
/* Suspend device - stop device. Need to be checked and fixed */
@@ -571,15 +529,13 @@ static int usb_amradio_suspend(struct usb_interface *intf, pm_message_t message)
struct amradio_device *radio = to_amradio_dev(usb_get_intfdata(intf));
mutex_lock(&radio->lock);
-
if (!radio->muted && radio->initialized) {
amradio_set_mute(radio, AMRADIO_STOP);
radio->muted = 0;
}
+ mutex_unlock(&radio->lock);
dev_info(&intf->dev, "going into suspend..\n");
-
- mutex_unlock(&radio->lock);
return 0;
}
@@ -589,7 +545,6 @@ static int usb_amradio_resume(struct usb_interface *intf)
struct amradio_device *radio = to_amradio_dev(usb_get_intfdata(intf));
mutex_lock(&radio->lock);
-
if (unlikely(!radio->initialized))
goto unlock;
@@ -604,9 +559,9 @@ static int usb_amradio_resume(struct usb_interface *intf)
amradio_set_mute(radio, AMRADIO_START);
unlock:
- dev_info(&intf->dev, "coming out of suspend..\n");
-
mutex_unlock(&radio->lock);
+
+ dev_info(&intf->dev, "coming out of suspend..\n");
return 0;
}
@@ -615,7 +570,7 @@ static const struct v4l2_file_operations usb_amradio_fops = {
.owner = THIS_MODULE,
.open = usb_amradio_open,
.release = usb_amradio_close,
- .ioctl = usb_amradio_ioctl,
+ .unlocked_ioctl = video_ioctl2,
};
static const struct v4l2_ioctl_ops usb_amradio_ioctl_ops = {
@@ -671,19 +626,20 @@ static int usb_amradio_probe(struct usb_interface *intf,
goto err_v4l2;
}
+ mutex_init(&radio->lock);
+
strlcpy(radio->videodev.name, radio->v4l2_dev.name,
sizeof(radio->videodev.name));
radio->videodev.v4l2_dev = &radio->v4l2_dev;
radio->videodev.fops = &usb_amradio_fops;
radio->videodev.ioctl_ops = &usb_amradio_ioctl_ops;
radio->videodev.release = usb_amradio_video_device_release;
+ radio->videodev.lock = &radio->lock;
radio->usbdev = interface_to_usbdev(intf);
radio->intf = intf;
radio->curfreq = 95.16 * FREQ_MUL;
- mutex_init(&radio->lock);
-
video_set_drvdata(&radio->videodev, radio);
retval = video_register_device(&radio->videodev, VFL_TYPE_RADIO,
diff --git a/drivers/media/radio/radio-si4713.c b/drivers/media/radio/radio-si4713.c
index 13554ab13f76..03829e6818bd 100644
--- a/drivers/media/radio/radio-si4713.c
+++ b/drivers/media/radio/radio-si4713.c
@@ -291,19 +291,19 @@ static int radio_si4713_pdriver_probe(struct platform_device *pdev)
goto unregister_v4l2_dev;
}
- sd = v4l2_i2c_new_subdev_board(&rsdev->v4l2_dev, adapter, "si4713_i2c",
+ sd = v4l2_i2c_new_subdev_board(&rsdev->v4l2_dev, adapter,
pdata->subdev_board_info, NULL);
if (!sd) {
dev_err(&pdev->dev, "Cannot get v4l2 subdevice\n");
rval = -ENODEV;
- goto unregister_v4l2_dev;
+ goto put_adapter;
}
rsdev->radio_dev = video_device_alloc();
if (!rsdev->radio_dev) {
dev_err(&pdev->dev, "Failed to alloc video device.\n");
rval = -ENOMEM;
- goto unregister_v4l2_dev;
+ goto put_adapter;
}
memcpy(rsdev->radio_dev, &radio_si4713_vdev_template,
@@ -320,6 +320,8 @@ static int radio_si4713_pdriver_probe(struct platform_device *pdev)
free_vdev:
video_device_release(rsdev->radio_dev);
+put_adapter:
+ i2c_put_adapter(adapter);
unregister_v4l2_dev:
v4l2_device_unregister(&rsdev->v4l2_dev);
free_rsdev:
@@ -335,8 +337,12 @@ static int __exit radio_si4713_pdriver_remove(struct platform_device *pdev)
struct radio_si4713_device *rsdev = container_of(v4l2_dev,
struct radio_si4713_device,
v4l2_dev);
+ struct v4l2_subdev *sd = list_entry(v4l2_dev->subdevs.next,
+ struct v4l2_subdev, list);
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
video_unregister_device(rsdev->radio_dev);
+ i2c_put_adapter(client->adapter);
v4l2_device_unregister(&rsdev->v4l2_dev);
kfree(rsdev);
diff --git a/drivers/media/radio/si470x/radio-si470x-common.c b/drivers/media/radio/si470x/radio-si470x-common.c
index 9927a595b426..ac76dfe5b3fa 100644
--- a/drivers/media/radio/si470x/radio-si470x-common.c
+++ b/drivers/media/radio/si470x/radio-si470x-common.c
@@ -408,17 +408,15 @@ done:
/*
* si470x_rds_on - switch on rds reception
*/
-int si470x_rds_on(struct si470x_device *radio)
+static int si470x_rds_on(struct si470x_device *radio)
{
int retval;
/* sysconfig 1 */
- mutex_lock(&radio->lock);
radio->registers[SYSCONFIG1] |= SYSCONFIG1_RDS;
retval = si470x_set_register(radio, SYSCONFIG1);
if (retval < 0)
radio->registers[SYSCONFIG1] &= ~SYSCONFIG1_RDS;
- mutex_unlock(&radio->lock);
return retval;
}
@@ -440,6 +438,7 @@ static ssize_t si470x_fops_read(struct file *file, char __user *buf,
unsigned int block_count = 0;
/* switch on rds reception */
+ mutex_lock(&radio->lock);
if ((radio->registers[SYSCONFIG1] & SYSCONFIG1_RDS) == 0)
si470x_rds_on(radio);
@@ -480,9 +479,9 @@ static ssize_t si470x_fops_read(struct file *file, char __user *buf,
buf += 3;
retval += 3;
}
- mutex_unlock(&radio->lock);
done:
+ mutex_unlock(&radio->lock);
return retval;
}
@@ -497,8 +496,11 @@ static unsigned int si470x_fops_poll(struct file *file,
int retval = 0;
/* switch on rds reception */
+
+ mutex_lock(&radio->lock);
if ((radio->registers[SYSCONFIG1] & SYSCONFIG1_RDS) == 0)
si470x_rds_on(radio);
+ mutex_unlock(&radio->lock);
poll_wait(file, &radio->read_queue, pts);
@@ -516,7 +518,7 @@ static const struct v4l2_file_operations si470x_fops = {
.owner = THIS_MODULE,
.read = si470x_fops_read,
.poll = si470x_fops_poll,
- .ioctl = video_ioctl2,
+ .unlocked_ioctl = video_ioctl2,
.open = si470x_fops_open,
.release = si470x_fops_release,
};
@@ -572,6 +574,7 @@ static int si470x_vidioc_g_ctrl(struct file *file, void *priv,
struct si470x_device *radio = video_drvdata(file);
int retval = 0;
+ mutex_lock(&radio->lock);
/* safety checks */
retval = si470x_disconnect_check(radio);
if (retval)
@@ -594,6 +597,8 @@ done:
if (retval < 0)
dev_warn(&radio->videodev->dev,
"get control failed with %d\n", retval);
+
+ mutex_unlock(&radio->lock);
return retval;
}
@@ -607,6 +612,7 @@ static int si470x_vidioc_s_ctrl(struct file *file, void *priv,
struct si470x_device *radio = video_drvdata(file);
int retval = 0;
+ mutex_lock(&radio->lock);
/* safety checks */
retval = si470x_disconnect_check(radio);
if (retval)
@@ -633,6 +639,7 @@ done:
if (retval < 0)
dev_warn(&radio->videodev->dev,
"set control failed with %d\n", retval);
+ mutex_unlock(&radio->lock);
return retval;
}
@@ -662,6 +669,7 @@ static int si470x_vidioc_g_tuner(struct file *file, void *priv,
struct si470x_device *radio = video_drvdata(file);
int retval = 0;
+ mutex_lock(&radio->lock);
/* safety checks */
retval = si470x_disconnect_check(radio);
if (retval)
@@ -681,7 +689,7 @@ static int si470x_vidioc_g_tuner(struct file *file, void *priv,
tuner->type = V4L2_TUNER_RADIO;
#if defined(CONFIG_USB_SI470X) || defined(CONFIG_USB_SI470X_MODULE)
tuner->capability = V4L2_TUNER_CAP_LOW | V4L2_TUNER_CAP_STEREO |
- V4L2_TUNER_CAP_RDS;
+ V4L2_TUNER_CAP_RDS | V4L2_TUNER_CAP_RDS_BLOCK_IO;
#else
tuner->capability = V4L2_TUNER_CAP_LOW | V4L2_TUNER_CAP_STEREO;
#endif
@@ -737,6 +745,7 @@ done:
if (retval < 0)
dev_warn(&radio->videodev->dev,
"get tuner failed with %d\n", retval);
+ mutex_unlock(&radio->lock);
return retval;
}
@@ -750,6 +759,7 @@ static int si470x_vidioc_s_tuner(struct file *file, void *priv,
struct si470x_device *radio = video_drvdata(file);
int retval = 0;
+ mutex_lock(&radio->lock);
/* safety checks */
retval = si470x_disconnect_check(radio);
if (retval)
@@ -776,6 +786,7 @@ done:
if (retval < 0)
dev_warn(&radio->videodev->dev,
"set tuner failed with %d\n", retval);
+ mutex_unlock(&radio->lock);
return retval;
}
@@ -790,6 +801,7 @@ static int si470x_vidioc_g_frequency(struct file *file, void *priv,
int retval = 0;
/* safety checks */
+ mutex_lock(&radio->lock);
retval = si470x_disconnect_check(radio);
if (retval)
goto done;
@@ -806,6 +818,7 @@ done:
if (retval < 0)
dev_warn(&radio->videodev->dev,
"get frequency failed with %d\n", retval);
+ mutex_unlock(&radio->lock);
return retval;
}
@@ -819,6 +832,7 @@ static int si470x_vidioc_s_frequency(struct file *file, void *priv,
struct si470x_device *radio = video_drvdata(file);
int retval = 0;
+ mutex_lock(&radio->lock);
/* safety checks */
retval = si470x_disconnect_check(radio);
if (retval)
@@ -835,6 +849,7 @@ done:
if (retval < 0)
dev_warn(&radio->videodev->dev,
"set frequency failed with %d\n", retval);
+ mutex_unlock(&radio->lock);
return retval;
}
@@ -848,6 +863,7 @@ static int si470x_vidioc_s_hw_freq_seek(struct file *file, void *priv,
struct si470x_device *radio = video_drvdata(file);
int retval = 0;
+ mutex_lock(&radio->lock);
/* safety checks */
retval = si470x_disconnect_check(radio);
if (retval)
@@ -864,6 +880,7 @@ done:
if (retval < 0)
dev_warn(&radio->videodev->dev,
"set hardware frequency seek failed with %d\n", retval);
+ mutex_unlock(&radio->lock);
return retval;
}
diff --git a/drivers/media/radio/si470x/radio-si470x-usb.c b/drivers/media/radio/si470x/radio-si470x-usb.c
index 5ec13e50a9f0..392e84fe90ef 100644
--- a/drivers/media/radio/si470x/radio-si470x-usb.c
+++ b/drivers/media/radio/si470x/radio-si470x-usb.c
@@ -517,7 +517,7 @@ int si470x_fops_open(struct file *file)
struct si470x_device *radio = video_drvdata(file);
int retval;
- lock_kernel();
+ mutex_lock(&radio->lock);
radio->users++;
retval = usb_autopm_get_interface(radio->intf);
@@ -558,7 +558,7 @@ int si470x_fops_open(struct file *file)
}
done:
- unlock_kernel();
+ mutex_unlock(&radio->lock);
return retval;
}
@@ -577,7 +577,7 @@ int si470x_fops_release(struct file *file)
goto done;
}
- mutex_lock(&radio->disconnect_lock);
+ mutex_lock(&radio->lock);
radio->users--;
if (radio->users == 0) {
/* shutdown interrupt handler */
@@ -591,7 +591,7 @@ int si470x_fops_release(struct file *file)
video_unregister_device(radio->videodev);
kfree(radio->int_in_buffer);
kfree(radio->buffer);
- mutex_unlock(&radio->disconnect_lock);
+ mutex_unlock(&radio->lock);
kfree(radio);
goto done;
}
@@ -603,7 +603,7 @@ int si470x_fops_release(struct file *file)
retval = si470x_stop(radio);
usb_autopm_put_interface(radio->intf);
}
- mutex_unlock(&radio->disconnect_lock);
+ mutex_unlock(&radio->lock);
done:
return retval;
}
@@ -661,7 +661,6 @@ static int si470x_usb_driver_probe(struct usb_interface *intf,
radio->disconnected = 0;
radio->usbdev = interface_to_usbdev(intf);
radio->intf = intf;
- mutex_init(&radio->disconnect_lock);
mutex_init(&radio->lock);
iface_desc = intf->cur_altsetting;
@@ -830,7 +829,7 @@ static void si470x_usb_driver_disconnect(struct usb_interface *intf)
{
struct si470x_device *radio = usb_get_intfdata(intf);
- mutex_lock(&radio->disconnect_lock);
+ mutex_lock(&radio->lock);
radio->disconnected = 1;
usb_set_intfdata(intf, NULL);
if (radio->users == 0) {
@@ -843,10 +842,10 @@ static void si470x_usb_driver_disconnect(struct usb_interface *intf)
kfree(radio->int_in_buffer);
video_unregister_device(radio->videodev);
kfree(radio->buffer);
- mutex_unlock(&radio->disconnect_lock);
+ mutex_unlock(&radio->lock);
kfree(radio);
} else {
- mutex_unlock(&radio->disconnect_lock);
+ mutex_unlock(&radio->lock);
}
}
diff --git a/drivers/media/radio/si470x/radio-si470x.h b/drivers/media/radio/si470x/radio-si470x.h
index 3cd0a29cd6e7..b9914d7a0c9f 100644
--- a/drivers/media/radio/si470x/radio-si470x.h
+++ b/drivers/media/radio/si470x/radio-si470x.h
@@ -31,7 +31,6 @@
#include <linux/init.h>
#include <linux/sched.h>
#include <linux/slab.h>
-#include <linux/smp_lock.h>
#include <linux/input.h>
#include <linux/version.h>
#include <linux/videodev2.h>
@@ -177,7 +176,6 @@ struct si470x_device {
/* driver management */
unsigned char disconnected;
- struct mutex disconnect_lock;
#endif
#if defined(CONFIG_I2C_SI470X) || defined(CONFIG_I2C_SI470X_MODULE)
@@ -221,7 +219,6 @@ int si470x_disconnect_check(struct si470x_device *radio);
int si470x_set_freq(struct si470x_device *radio, unsigned int freq);
int si470x_start(struct si470x_device *radio);
int si470x_stop(struct si470x_device *radio);
-int si470x_rds_on(struct si470x_device *radio);
int si470x_fops_open(struct file *file);
int si470x_fops_release(struct file *file);
int si470x_vidioc_querycap(struct file *file, void *priv,
diff --git a/drivers/media/radio/si4713-i2c.c b/drivers/media/radio/si4713-i2c.c
index fc7f4b794649..a6e6f1987a3a 100644
--- a/drivers/media/radio/si4713-i2c.c
+++ b/drivers/media/radio/si4713-i2c.c
@@ -1804,7 +1804,7 @@ static int si4713_g_modulator(struct v4l2_subdev *sd, struct v4l2_modulator *vm)
strncpy(vm->name, "FM Modulator", 32);
vm->capability = V4L2_TUNER_CAP_STEREO | V4L2_TUNER_CAP_LOW |
- V4L2_TUNER_CAP_RDS;
+ V4L2_TUNER_CAP_RDS | V4L2_TUNER_CAP_RDS_CONTROLS;
/* Report current frequency range limits */
vm->rangelow = si4713_to_v4l2(FREQ_RANGE_LOW);
diff --git a/drivers/media/radio/tef6862.c b/drivers/media/radio/tef6862.c
index 90cae90277e7..7c0d77751f6e 100644
--- a/drivers/media/radio/tef6862.c
+++ b/drivers/media/radio/tef6862.c
@@ -22,7 +22,6 @@
#include <linux/kernel.h>
#include <linux/interrupt.h>
#include <linux/i2c.h>
-#include <linux/i2c-id.h>
#include <linux/slab.h>
#include <media/v4l2-ioctl.h>
#include <media/v4l2-device.h>
diff --git a/drivers/media/video/Kconfig b/drivers/media/video/Kconfig
index d000522cb0f4..6830d2848bd7 100644
--- a/drivers/media/video/Kconfig
+++ b/drivers/media/video/Kconfig
@@ -112,7 +112,7 @@ config VIDEO_IR_I2C
#
menu "Encoders/decoders and other helper chips"
- depends on !VIDEO_HELPER_CHIPS_AUTO
+ visible if !VIDEO_HELPER_CHIPS_AUTO
comment "Audio decoders"
@@ -539,7 +539,7 @@ config VIDEO_VIU
config VIDEO_VIVI
tristate "Virtual Video Driver"
depends on VIDEO_DEV && VIDEO_V4L2 && !SPARC32 && !SPARC64
- depends on (FRAMEBUFFER_CONSOLE || STI_CONSOLE) && FONTS
+ depends on FRAMEBUFFER_CONSOLE || STI_CONSOLE
select FONT_8x16
select VIDEOBUF_VMALLOC
default n
@@ -599,68 +599,8 @@ config VIDEO_W9966
Check out <file:Documentation/video4linux/w9966.txt> for more
information.
-config VIDEO_CPIA
- tristate "CPiA Video For Linux (DEPRECATED)"
- depends on VIDEO_V4L1
- default n
- ---help---
- This driver is DEPRECATED please use the gspca cpia1 module
- instead. Note that you need atleast version 0.6.4 of libv4l for
- the cpia1 gspca module.
-
- This is the video4linux driver for cameras based on Vision's CPiA
- (Colour Processor Interface ASIC), such as the Creative Labs Video
- Blaster Webcam II. If you have one of these cameras, say Y here
- and select parallel port and/or USB lowlevel support below,
- otherwise say N. This will not work with the Creative Webcam III.
-
- Please read <file:Documentation/video4linux/README.cpia> for more
- information.
-
- This driver is also available as a module (cpia).
-
-config VIDEO_CPIA_PP
- tristate "CPiA Parallel Port Lowlevel Support"
- depends on PARPORT_1284 && VIDEO_CPIA && PARPORT
- help
- This is the lowlevel parallel port support for cameras based on
- Vision's CPiA (Colour Processor Interface ASIC), such as the
- Creative Webcam II. If you have the parallel port version of one
- of these cameras, say Y here, otherwise say N. It is also available
- as a module (cpia_pp).
-
-config VIDEO_CPIA_USB
- tristate "CPiA USB Lowlevel Support"
- depends on VIDEO_CPIA && USB
- help
- This is the lowlevel USB support for cameras based on Vision's CPiA
- (Colour Processor Interface ASIC), such as the Creative Webcam II.
- If you have the USB version of one of these cameras, say Y here,
- otherwise say N. This will not work with the Creative Webcam III.
- It is also available as a module (cpia_usb).
-
source "drivers/media/video/cpia2/Kconfig"
-config VIDEO_SAA5246A
- tristate "SAA5246A, SAA5281 Teletext processor"
- depends on I2C && VIDEO_V4L2
- help
- Support for I2C bus based teletext using the SAA5246A or SAA5281
- chip. Useful only if you live in Europe.
-
- To compile this driver as a module, choose M here: the
- module will be called saa5246a.
-
-config VIDEO_SAA5249
- tristate "SAA5249 Teletext processor"
- depends on I2C && VIDEO_V4L2
- help
- Support for I2C bus based teletext using the SAA5249 chip. At the
- moment this is only useful on some European WinTV cards.
-
- To compile this driver as a module, choose M here: the
- module will be called saa5249.
-
config VIDEO_VINO
tristate "SGI Vino Video For Linux (EXPERIMENTAL)"
depends on I2C && SGI_IP22 && EXPERIMENTAL && VIDEO_V4L2
@@ -669,14 +609,6 @@ config VIDEO_VINO
Say Y here to build in support for the Vino video input system found
on SGI Indy machines.
-config VIDEO_STRADIS
- tristate "Stradis 4:2:2 MPEG-2 video driver (EXPERIMENTAL)"
- depends on EXPERIMENTAL && PCI && VIDEO_V4L1 && VIRT_TO_BUS
- help
- Say Y here to enable support for the Stradis 4:2:2 MPEG-2 video
- driver for PCI. There is a product page at
- <http://www.stradis.com/>.
-
source "drivers/media/video/zoran/Kconfig"
config VIDEO_MEYE
@@ -774,6 +706,22 @@ config VIDEO_CAFE_CCIC
CMOS camera controller. This is the controller found on first-
generation OLPC systems.
+config VIDEO_SR030PC30
+ tristate "SR030PC30 VGA camera sensor support"
+ depends on I2C && VIDEO_V4L2
+ ---help---
+ This driver supports SR030PC30 VGA camera from Siliconfile
+
+config VIDEO_VIA_CAMERA
+ tristate "VIAFB camera controller support"
+ depends on FB_VIA
+ select VIDEOBUF_DMA_SG
+ select VIDEO_OV7670
+ help
+ Driver support for the integrated camera controller in VIA
+ Chrome9 chipsets. Currently only tested on OLPC xo-1.5 systems
+ with ov7670 sensors.
+
config SOC_CAMERA
tristate "SoC camera support"
depends on VIDEO_V4L2 && HAS_DMA && I2C
@@ -783,6 +731,12 @@ config SOC_CAMERA
over a bus like PCI or USB. For example some i2c camera connected
directly to the data bus of an SoC.
+config SOC_CAMERA_IMX074
+ tristate "imx074 support"
+ depends on SOC_CAMERA && I2C
+ help
+ This driver supports IMX074 cameras from Sony
+
config SOC_CAMERA_MT9M001
tristate "mt9m001 support"
depends on SOC_CAMERA && I2C
@@ -835,6 +789,12 @@ config SOC_CAMERA_PLATFORM
help
This is a generic SoC camera platform driver, useful for testing
+config SOC_CAMERA_OV6650
+ tristate "ov6650 sensor support"
+ depends on SOC_CAMERA && I2C
+ ---help---
+ This is a V4L2 SoC camera driver for the OmniVision OV6650 sensor
+
config SOC_CAMERA_OV772X
tristate "ov772x camera support"
depends on SOC_CAMERA && I2C
@@ -890,6 +850,14 @@ config VIDEO_SH_MOBILE_CEU
---help---
This is a v4l2 driver for the SuperH Mobile CEU Interface
+config VIDEO_OMAP1
+ tristate "OMAP1 Camera Interface driver"
+ depends on VIDEO_DEV && ARCH_OMAP1 && SOC_CAMERA
+ select VIDEOBUF_DMA_CONTIG
+ select VIDEOBUF_DMA_SG
+ ---help---
+ This is a v4l2 driver for the TI OMAP1 camera interface
+
config VIDEO_OMAP2
tristate "OMAP2 Camera Capture Interface driver"
depends on VIDEO_DEV && ARCH_OMAP2
diff --git a/drivers/media/video/Makefile b/drivers/media/video/Makefile
index 40f98fba5f88..af79d476a4c8 100644
--- a/drivers/media/video/Makefile
+++ b/drivers/media/video/Makefile
@@ -33,8 +33,6 @@ obj-$(CONFIG_VIDEO_TVAUDIO) += tvaudio.o
obj-$(CONFIG_VIDEO_TDA7432) += tda7432.o
obj-$(CONFIG_VIDEO_TDA9875) += tda9875.o
obj-$(CONFIG_VIDEO_SAA6588) += saa6588.o
-obj-$(CONFIG_VIDEO_SAA5246A) += saa5246a.o
-obj-$(CONFIG_VIDEO_SAA5249) += saa5249.o
obj-$(CONFIG_VIDEO_TDA9840) += tda9840.o
obj-$(CONFIG_VIDEO_TEA6415C) += tea6415c.o
obj-$(CONFIG_VIDEO_TEA6420) += tea6420.o
@@ -73,12 +71,15 @@ obj-$(CONFIG_VIDEO_OV7670) += ov7670.o
obj-$(CONFIG_VIDEO_TCM825X) += tcm825x.o
obj-$(CONFIG_VIDEO_TVEEPROM) += tveeprom.o
obj-$(CONFIG_VIDEO_MT9V011) += mt9v011.o
+obj-$(CONFIG_VIDEO_SR030PC30) += sr030pc30.o
+obj-$(CONFIG_SOC_CAMERA_IMX074) += imx074.o
obj-$(CONFIG_SOC_CAMERA_MT9M001) += mt9m001.o
obj-$(CONFIG_SOC_CAMERA_MT9M111) += mt9m111.o
obj-$(CONFIG_SOC_CAMERA_MT9T031) += mt9t031.o
obj-$(CONFIG_SOC_CAMERA_MT9T112) += mt9t112.o
obj-$(CONFIG_SOC_CAMERA_MT9V022) += mt9v022.o
+obj-$(CONFIG_SOC_CAMERA_OV6650) += ov6650.o
obj-$(CONFIG_SOC_CAMERA_OV772X) += ov772x.o
obj-$(CONFIG_SOC_CAMERA_OV9640) += ov9640.o
obj-$(CONFIG_SOC_CAMERA_RJ54N1) += rj54n1cb0c.o
@@ -93,10 +94,6 @@ obj-$(CONFIG_VIDEO_BWQCAM) += bw-qcam.o
obj-$(CONFIG_VIDEO_W9966) += w9966.o
obj-$(CONFIG_VIDEO_PMS) += pms.o
obj-$(CONFIG_VIDEO_VINO) += vino.o
-obj-$(CONFIG_VIDEO_STRADIS) += stradis.o
-obj-$(CONFIG_VIDEO_CPIA) += cpia.o
-obj-$(CONFIG_VIDEO_CPIA_PP) += cpia_pp.o
-obj-$(CONFIG_VIDEO_CPIA_USB) += cpia_usb.o
obj-$(CONFIG_VIDEO_MEYE) += meye.o
obj-$(CONFIG_VIDEO_SAA7134) += saa7134/
obj-$(CONFIG_VIDEO_CX88) += cx88/
@@ -125,6 +122,8 @@ obj-$(CONFIG_VIDEO_CX2341X) += cx2341x.o
obj-$(CONFIG_VIDEO_CAFE_CCIC) += cafe_ccic.o
+obj-$(CONFIG_VIDEO_VIA_CAMERA) += via-camera.o
+
obj-$(CONFIG_USB_DABUSB) += dabusb.o
obj-$(CONFIG_USB_SE401) += se401.o
obj-$(CONFIG_USB_ZR364XX) += zr364xx.o
@@ -163,6 +162,7 @@ obj-$(CONFIG_VIDEO_MX3) += mx3_camera.o
obj-$(CONFIG_VIDEO_PXA27x) += pxa_camera.o
obj-$(CONFIG_VIDEO_SH_MOBILE_CSI2) += sh_mobile_csi2.o
obj-$(CONFIG_VIDEO_SH_MOBILE_CEU) += sh_mobile_ceu_camera.o
+obj-$(CONFIG_VIDEO_OMAP1) += omap1_camera.o
obj-$(CONFIG_VIDEO_SAMSUNG_S5P_FIMC) += s5p-fimc/
obj-$(CONFIG_ARCH_DAVINCI) += davinci/
diff --git a/drivers/media/video/adv7170.c b/drivers/media/video/adv7170.c
index 48e89fbf391b..23ba5c37c3e4 100644
--- a/drivers/media/video/adv7170.c
+++ b/drivers/media/video/adv7170.c
@@ -34,11 +34,9 @@
#include <linux/ioctl.h>
#include <asm/uaccess.h>
#include <linux/i2c.h>
-#include <linux/i2c-id.h>
#include <linux/videodev2.h>
#include <media/v4l2-device.h>
#include <media/v4l2-chip-ident.h>
-#include <media/v4l2-i2c-drv.h>
MODULE_DESCRIPTION("Analog Devices ADV7170 video encoder driver");
MODULE_AUTHOR("Maxim Yevtyushkin");
@@ -337,9 +335,25 @@ static const struct i2c_device_id adv7170_id[] = {
};
MODULE_DEVICE_TABLE(i2c, adv7170_id);
-static struct v4l2_i2c_driver_data v4l2_i2c_data = {
- .name = "adv7170",
- .probe = adv7170_probe,
- .remove = adv7170_remove,
- .id_table = adv7170_id,
+static struct i2c_driver adv7170_driver = {
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = "adv7170",
+ },
+ .probe = adv7170_probe,
+ .remove = adv7170_remove,
+ .id_table = adv7170_id,
};
+
+static __init int init_adv7170(void)
+{
+ return i2c_add_driver(&adv7170_driver);
+}
+
+static __exit void exit_adv7170(void)
+{
+ i2c_del_driver(&adv7170_driver);
+}
+
+module_init(init_adv7170);
+module_exit(exit_adv7170);
diff --git a/drivers/media/video/adv7175.c b/drivers/media/video/adv7175.c
index f1ba0d742c65..f318b51448b3 100644
--- a/drivers/media/video/adv7175.c
+++ b/drivers/media/video/adv7175.c
@@ -30,11 +30,9 @@
#include <linux/ioctl.h>
#include <asm/uaccess.h>
#include <linux/i2c.h>
-#include <linux/i2c-id.h>
#include <linux/videodev2.h>
#include <media/v4l2-device.h>
#include <media/v4l2-chip-ident.h>
-#include <media/v4l2-i2c-drv.h>
MODULE_DESCRIPTION("Analog Devices ADV7175 video encoder driver");
MODULE_AUTHOR("Dave Perks");
@@ -376,9 +374,25 @@ static const struct i2c_device_id adv7175_id[] = {
};
MODULE_DEVICE_TABLE(i2c, adv7175_id);
-static struct v4l2_i2c_driver_data v4l2_i2c_data = {
- .name = "adv7175",
- .probe = adv7175_probe,
- .remove = adv7175_remove,
- .id_table = adv7175_id,
+static struct i2c_driver adv7175_driver = {
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = "adv7175",
+ },
+ .probe = adv7175_probe,
+ .remove = adv7175_remove,
+ .id_table = adv7175_id,
};
+
+static __init int init_adv7175(void)
+{
+ return i2c_add_driver(&adv7175_driver);
+}
+
+static __exit void exit_adv7175(void)
+{
+ i2c_del_driver(&adv7175_driver);
+}
+
+module_init(init_adv7175);
+module_exit(exit_adv7175);
diff --git a/drivers/media/video/adv7180.c b/drivers/media/video/adv7180.c
index 23e610f62736..d2138d06bcad 100644
--- a/drivers/media/video/adv7180.c
+++ b/drivers/media/video/adv7180.c
@@ -22,7 +22,6 @@
#include <linux/kernel.h>
#include <linux/interrupt.h>
#include <linux/i2c.h>
-#include <linux/i2c-id.h>
#include <linux/slab.h>
#include <media/v4l2-ioctl.h>
#include <linux/videodev2.h>
diff --git a/drivers/media/video/au0828/au0828-cards.c b/drivers/media/video/au0828/au0828-cards.c
index 57dd9195daf5..01be89fa5c78 100644
--- a/drivers/media/video/au0828/au0828-cards.c
+++ b/drivers/media/video/au0828/au0828-cards.c
@@ -212,7 +212,7 @@ void au0828_card_setup(struct au0828_dev *dev)
be abstracted out if we ever need to support a different
demod) */
sd = v4l2_i2c_new_subdev(&dev->v4l2_dev, &dev->i2c_adap,
- "au8522", "au8522", 0x8e >> 1, NULL);
+ "au8522", 0x8e >> 1, NULL);
if (sd == NULL)
printk(KERN_ERR "analog subdev registration failed\n");
}
@@ -221,7 +221,7 @@ void au0828_card_setup(struct au0828_dev *dev)
if (dev->board.tuner_type != TUNER_ABSENT) {
/* Load the tuner module, which does the attach */
sd = v4l2_i2c_new_subdev(&dev->v4l2_dev, &dev->i2c_adap,
- "tuner", "tuner", dev->board.tuner_addr, NULL);
+ "tuner", dev->board.tuner_addr, NULL);
if (sd == NULL)
printk(KERN_ERR "tuner subdev registration fail\n");
diff --git a/drivers/media/video/au0828/au0828-video.c b/drivers/media/video/au0828/au0828-video.c
index 7989a7ba7c40..162fd5f9d448 100644
--- a/drivers/media/video/au0828/au0828-video.c
+++ b/drivers/media/video/au0828/au0828-video.c
@@ -965,7 +965,7 @@ static int au0828_v4l2_open(struct file *filp)
NULL, &dev->slock,
V4L2_BUF_TYPE_VIDEO_CAPTURE,
V4L2_FIELD_INTERLACED,
- sizeof(struct au0828_buffer), fh);
+ sizeof(struct au0828_buffer), fh, NULL);
/* VBI Setup */
dev->vbi_width = 720;
@@ -974,7 +974,7 @@ static int au0828_v4l2_open(struct file *filp)
NULL, &dev->slock,
V4L2_BUF_TYPE_VBI_CAPTURE,
V4L2_FIELD_SEQ_TB,
- sizeof(struct au0828_buffer), fh);
+ sizeof(struct au0828_buffer), fh, NULL);
return ret;
diff --git a/drivers/media/video/bt819.c b/drivers/media/video/bt819.c
index 770cb9accf81..c38300fc0b1d 100644
--- a/drivers/media/video/bt819.c
+++ b/drivers/media/video/bt819.c
@@ -33,12 +33,10 @@
#include <linux/ioctl.h>
#include <linux/delay.h>
#include <linux/i2c.h>
-#include <linux/i2c-id.h>
#include <linux/videodev2.h>
#include <linux/slab.h>
#include <media/v4l2-device.h>
#include <media/v4l2-chip-ident.h>
-#include <media/v4l2-i2c-drv.h>
#include <media/bt819.h>
MODULE_DESCRIPTION("Brooktree-819 video decoder driver");
@@ -537,9 +535,25 @@ static const struct i2c_device_id bt819_id[] = {
};
MODULE_DEVICE_TABLE(i2c, bt819_id);
-static struct v4l2_i2c_driver_data v4l2_i2c_data = {
- .name = "bt819",
- .probe = bt819_probe,
- .remove = bt819_remove,
- .id_table = bt819_id,
+static struct i2c_driver bt819_driver = {
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = "bt819",
+ },
+ .probe = bt819_probe,
+ .remove = bt819_remove,
+ .id_table = bt819_id,
};
+
+static __init int init_bt819(void)
+{
+ return i2c_add_driver(&bt819_driver);
+}
+
+static __exit void exit_bt819(void)
+{
+ i2c_del_driver(&bt819_driver);
+}
+
+module_init(init_bt819);
+module_exit(exit_bt819);
diff --git a/drivers/media/video/bt856.c b/drivers/media/video/bt856.c
index ae3337392505..a43059d4c799 100644
--- a/drivers/media/video/bt856.c
+++ b/drivers/media/video/bt856.c
@@ -34,11 +34,9 @@
#include <linux/ioctl.h>
#include <asm/uaccess.h>
#include <linux/i2c.h>
-#include <linux/i2c-id.h>
#include <linux/videodev2.h>
#include <media/v4l2-device.h>
#include <media/v4l2-chip-ident.h>
-#include <media/v4l2-i2c-drv.h>
MODULE_DESCRIPTION("Brooktree-856A video encoder driver");
MODULE_AUTHOR("Mike Bernson & Dave Perks");
@@ -262,9 +260,25 @@ static const struct i2c_device_id bt856_id[] = {
};
MODULE_DEVICE_TABLE(i2c, bt856_id);
-static struct v4l2_i2c_driver_data v4l2_i2c_data = {
- .name = "bt856",
- .probe = bt856_probe,
- .remove = bt856_remove,
- .id_table = bt856_id,
+static struct i2c_driver bt856_driver = {
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = "bt856",
+ },
+ .probe = bt856_probe,
+ .remove = bt856_remove,
+ .id_table = bt856_id,
};
+
+static __init int init_bt856(void)
+{
+ return i2c_add_driver(&bt856_driver);
+}
+
+static __exit void exit_bt856(void)
+{
+ i2c_del_driver(&bt856_driver);
+}
+
+module_init(init_bt856);
+module_exit(exit_bt856);
diff --git a/drivers/media/video/bt866.c b/drivers/media/video/bt866.c
index 62ac422bb159..4e5dcea0501d 100644
--- a/drivers/media/video/bt866.c
+++ b/drivers/media/video/bt866.c
@@ -34,11 +34,9 @@
#include <linux/ioctl.h>
#include <asm/uaccess.h>
#include <linux/i2c.h>
-#include <linux/i2c-id.h>
#include <linux/videodev2.h>
#include <media/v4l2-device.h>
#include <media/v4l2-chip-ident.h>
-#include <media/v4l2-i2c-drv.h>
MODULE_DESCRIPTION("Brooktree-866 video encoder driver");
MODULE_AUTHOR("Mike Bernson & Dave Perks");
@@ -232,9 +230,25 @@ static const struct i2c_device_id bt866_id[] = {
};
MODULE_DEVICE_TABLE(i2c, bt866_id);
-static struct v4l2_i2c_driver_data v4l2_i2c_data = {
- .name = "bt866",
- .probe = bt866_probe,
- .remove = bt866_remove,
- .id_table = bt866_id,
+static struct i2c_driver bt866_driver = {
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = "bt866",
+ },
+ .probe = bt866_probe,
+ .remove = bt866_remove,
+ .id_table = bt866_id,
};
+
+static __init int init_bt866(void)
+{
+ return i2c_add_driver(&bt866_driver);
+}
+
+static __exit void exit_bt866(void)
+{
+ i2c_del_driver(&bt866_driver);
+}
+
+module_init(init_bt866);
+module_exit(exit_bt866);
diff --git a/drivers/media/video/bt8xx/bttv-cards.c b/drivers/media/video/bt8xx/bttv-cards.c
index 7af56cde0c79..49efcf660ba6 100644
--- a/drivers/media/video/bt8xx/bttv-cards.c
+++ b/drivers/media/video/bt8xx/bttv-cards.c
@@ -3529,7 +3529,7 @@ void __devinit bttv_init_card2(struct bttv *btv)
struct v4l2_subdev *sd;
sd = v4l2_i2c_new_subdev(&btv->c.v4l2_dev,
- &btv->c.i2c_adap, "saa6588", "saa6588", 0, addrs);
+ &btv->c.i2c_adap, "saa6588", 0, addrs);
btv->has_saa6588 = (sd != NULL);
}
@@ -3554,7 +3554,7 @@ void __devinit bttv_init_card2(struct bttv *btv)
};
btv->sd_msp34xx = v4l2_i2c_new_subdev(&btv->c.v4l2_dev,
- &btv->c.i2c_adap, "msp3400", "msp3400", 0, addrs);
+ &btv->c.i2c_adap, "msp3400", 0, addrs);
if (btv->sd_msp34xx)
return;
goto no_audio;
@@ -3568,7 +3568,7 @@ void __devinit bttv_init_card2(struct bttv *btv)
};
if (v4l2_i2c_new_subdev(&btv->c.v4l2_dev,
- &btv->c.i2c_adap, "tda7432", "tda7432", 0, addrs))
+ &btv->c.i2c_adap, "tda7432", 0, addrs))
return;
goto no_audio;
}
@@ -3576,7 +3576,7 @@ void __devinit bttv_init_card2(struct bttv *btv)
case 3: {
/* The user specified that we should probe for tvaudio */
btv->sd_tvaudio = v4l2_i2c_new_subdev(&btv->c.v4l2_dev,
- &btv->c.i2c_adap, "tvaudio", "tvaudio", 0, tvaudio_addrs());
+ &btv->c.i2c_adap, "tvaudio", 0, tvaudio_addrs());
if (btv->sd_tvaudio)
return;
goto no_audio;
@@ -3596,11 +3596,11 @@ void __devinit bttv_init_card2(struct bttv *btv)
found is really something else (e.g. a tea6300). */
if (!bttv_tvcards[btv->c.type].no_msp34xx) {
btv->sd_msp34xx = v4l2_i2c_new_subdev(&btv->c.v4l2_dev,
- &btv->c.i2c_adap, "msp3400", "msp3400",
+ &btv->c.i2c_adap, "msp3400",
0, I2C_ADDRS(I2C_ADDR_MSP3400 >> 1));
} else if (bttv_tvcards[btv->c.type].msp34xx_alt) {
btv->sd_msp34xx = v4l2_i2c_new_subdev(&btv->c.v4l2_dev,
- &btv->c.i2c_adap, "msp3400", "msp3400",
+ &btv->c.i2c_adap, "msp3400",
0, I2C_ADDRS(I2C_ADDR_MSP3400_ALT >> 1));
}
@@ -3616,13 +3616,13 @@ void __devinit bttv_init_card2(struct bttv *btv)
};
if (v4l2_i2c_new_subdev(&btv->c.v4l2_dev,
- &btv->c.i2c_adap, "tda7432", "tda7432", 0, addrs))
+ &btv->c.i2c_adap, "tda7432", 0, addrs))
return;
}
/* Now see if we can find one of the tvaudio devices. */
btv->sd_tvaudio = v4l2_i2c_new_subdev(&btv->c.v4l2_dev,
- &btv->c.i2c_adap, "tvaudio", "tvaudio", 0, tvaudio_addrs());
+ &btv->c.i2c_adap, "tvaudio", 0, tvaudio_addrs());
if (btv->sd_tvaudio)
return;
@@ -3646,13 +3646,13 @@ void __devinit bttv_init_tuner(struct bttv *btv)
/* Load tuner module before issuing tuner config call! */
if (bttv_tvcards[btv->c.type].has_radio)
v4l2_i2c_new_subdev(&btv->c.v4l2_dev,
- &btv->c.i2c_adap, "tuner", "tuner",
+ &btv->c.i2c_adap, "tuner",
0, v4l2_i2c_tuner_addrs(ADDRS_RADIO));
v4l2_i2c_new_subdev(&btv->c.v4l2_dev,
- &btv->c.i2c_adap, "tuner", "tuner",
+ &btv->c.i2c_adap, "tuner",
0, v4l2_i2c_tuner_addrs(ADDRS_DEMOD));
v4l2_i2c_new_subdev(&btv->c.v4l2_dev,
- &btv->c.i2c_adap, "tuner", "tuner",
+ &btv->c.i2c_adap, "tuner",
0, v4l2_i2c_tuner_addrs(ADDRS_TV_WITH_DEMOD));
tun_setup.mode_mask = T_ANALOG_TV | T_DIGITAL_TV;
diff --git a/drivers/media/video/bt8xx/bttv-driver.c b/drivers/media/video/bt8xx/bttv-driver.c
index 38c7f78ad9cf..a529619e51f6 100644
--- a/drivers/media/video/bt8xx/bttv-driver.c
+++ b/drivers/media/video/bt8xx/bttv-driver.c
@@ -42,7 +42,6 @@
#include <linux/fs.h>
#include <linux/kernel.h>
#include <linux/sched.h>
-#include <linux/smp_lock.h>
#include <linux/interrupt.h>
#include <linux/kdev_t.h>
#include "bttvp.h"
@@ -842,7 +841,7 @@ static const struct v4l2_queryctrl *ctrl_by_id(int id)
RESOURCE_OVERLAY)
static
-int check_alloc_btres(struct bttv *btv, struct bttv_fh *fh, int bit)
+int check_alloc_btres_lock(struct bttv *btv, struct bttv_fh *fh, int bit)
{
int xbits; /* mutual exclusive resources */
@@ -935,7 +934,7 @@ disclaim_video_lines(struct bttv *btv)
}
static
-void free_btres(struct bttv *btv, struct bttv_fh *fh, int bits)
+void free_btres_lock(struct bttv *btv, struct bttv_fh *fh, int bits)
{
if ((fh->resources & bits) != bits) {
/* trying to free ressources not allocated by us ... */
@@ -1682,7 +1681,7 @@ bttv_switch_overlay(struct bttv *btv, struct bttv_fh *fh,
kfree(old);
}
if (NULL == new)
- free_btres(btv,fh,RESOURCE_OVERLAY);
+ free_btres_lock(btv,fh,RESOURCE_OVERLAY);
dprintk("switch_overlay: done\n");
return retval;
}
@@ -1859,21 +1858,25 @@ static int bttv_s_std(struct file *file, void *priv, v4l2_std_id *id)
unsigned int i;
int err;
+ mutex_lock(&btv->lock);
err = v4l2_prio_check(&btv->prio, fh->prio);
- if (0 != err)
- return err;
+ if (err)
+ goto err;
for (i = 0; i < BTTV_TVNORMS; i++)
if (*id & bttv_tvnorms[i].v4l2_id)
break;
- if (i == BTTV_TVNORMS)
- return -EINVAL;
+ if (i == BTTV_TVNORMS) {
+ err = -EINVAL;
+ goto err;
+ }
- mutex_lock(&btv->lock);
set_tvnorm(btv, i);
+
+err:
mutex_unlock(&btv->lock);
- return 0;
+ return err;
}
static int bttv_querystd(struct file *file, void *f, v4l2_std_id *id)
@@ -1893,10 +1896,13 @@ static int bttv_enum_input(struct file *file, void *priv,
{
struct bttv_fh *fh = priv;
struct bttv *btv = fh->btv;
- int n;
+ int rc = 0;
- if (i->index >= bttv_tvcards[btv->c.type].video_inputs)
- return -EINVAL;
+ mutex_lock(&btv->lock);
+ if (i->index >= bttv_tvcards[btv->c.type].video_inputs) {
+ rc = -EINVAL;
+ goto err;
+ }
i->type = V4L2_INPUT_TYPE_CAMERA;
i->audioset = 1;
@@ -1919,10 +1925,12 @@ static int bttv_enum_input(struct file *file, void *priv,
i->status |= V4L2_IN_ST_NO_H_LOCK;
}
- for (n = 0; n < BTTV_TVNORMS; n++)
- i->std |= bttv_tvnorms[n].v4l2_id;
+ i->std = BTTV_NORMS;
- return 0;
+err:
+ mutex_unlock(&btv->lock);
+
+ return rc;
}
static int bttv_g_input(struct file *file, void *priv, unsigned int *i)
@@ -1930,7 +1938,10 @@ static int bttv_g_input(struct file *file, void *priv, unsigned int *i)
struct bttv_fh *fh = priv;
struct bttv *btv = fh->btv;
+ mutex_lock(&btv->lock);
*i = btv->input;
+ mutex_unlock(&btv->lock);
+
return 0;
}
@@ -1941,15 +1952,19 @@ static int bttv_s_input(struct file *file, void *priv, unsigned int i)
int err;
+ mutex_lock(&btv->lock);
err = v4l2_prio_check(&btv->prio, fh->prio);
- if (0 != err)
- return err;
+ if (unlikely(err))
+ goto err;
- if (i > bttv_tvcards[btv->c.type].video_inputs)
- return -EINVAL;
+ if (i > bttv_tvcards[btv->c.type].video_inputs) {
+ err = -EINVAL;
+ goto err;
+ }
- mutex_lock(&btv->lock);
set_input(btv, i, btv->tvnorm);
+
+err:
mutex_unlock(&btv->lock);
return 0;
}
@@ -1961,22 +1976,25 @@ static int bttv_s_tuner(struct file *file, void *priv,
struct bttv *btv = fh->btv;
int err;
- err = v4l2_prio_check(&btv->prio, fh->prio);
- if (0 != err)
- return err;
-
- if (btv->tuner_type == TUNER_ABSENT)
- return -EINVAL;
-
- if (0 != t->index)
+ if (unlikely(0 != t->index))
return -EINVAL;
mutex_lock(&btv->lock);
+ if (unlikely(btv->tuner_type == TUNER_ABSENT)) {
+ err = -EINVAL;
+ goto err;
+ }
+
+ err = v4l2_prio_check(&btv->prio, fh->prio);
+ if (unlikely(err))
+ goto err;
+
bttv_call_all(btv, tuner, s_tuner, t);
if (btv->audio_mode_gpio)
btv->audio_mode_gpio(btv, t, 1);
+err:
mutex_unlock(&btv->lock);
return 0;
@@ -1988,8 +2006,10 @@ static int bttv_g_frequency(struct file *file, void *priv,
struct bttv_fh *fh = priv;
struct bttv *btv = fh->btv;
+ mutex_lock(&btv->lock);
f->type = btv->radio_user ? V4L2_TUNER_RADIO : V4L2_TUNER_ANALOG_TV;
f->frequency = btv->freq;
+ mutex_unlock(&btv->lock);
return 0;
}
@@ -2001,21 +2021,26 @@ static int bttv_s_frequency(struct file *file, void *priv,
struct bttv *btv = fh->btv;
int err;
- err = v4l2_prio_check(&btv->prio, fh->prio);
- if (0 != err)
- return err;
-
if (unlikely(f->tuner != 0))
return -EINVAL;
- if (unlikely(f->type != (btv->radio_user
- ? V4L2_TUNER_RADIO : V4L2_TUNER_ANALOG_TV)))
- return -EINVAL;
+
mutex_lock(&btv->lock);
+ err = v4l2_prio_check(&btv->prio, fh->prio);
+ if (unlikely(err))
+ goto err;
+
+ if (unlikely(f->type != (btv->radio_user
+ ? V4L2_TUNER_RADIO : V4L2_TUNER_ANALOG_TV))) {
+ err = -EINVAL;
+ goto err;
+ }
btv->freq = f->frequency;
bttv_call_all(btv, tuner, s_frequency, f);
if (btv->has_matchbox && btv->radio_user)
tea5757_set_freq(btv, btv->freq);
+err:
mutex_unlock(&btv->lock);
+
return 0;
}
@@ -2124,7 +2149,7 @@ bttv_crop_adjust (struct bttv_crop * c,
also adjust the current cropping parameters to get closer to the
desired image size. */
static int
-limit_scaled_size (struct bttv_fh * fh,
+limit_scaled_size_lock (struct bttv_fh * fh,
__s32 * width,
__s32 * height,
enum v4l2_field field,
@@ -2238,7 +2263,7 @@ limit_scaled_size (struct bttv_fh * fh,
may also adjust the current cropping parameters to get closer
to the desired window size. */
static int
-verify_window (struct bttv_fh * fh,
+verify_window_lock (struct bttv_fh * fh,
struct v4l2_window * win,
int adjust_size,
int adjust_crop)
@@ -2257,7 +2282,9 @@ verify_window (struct bttv_fh * fh,
if (V4L2_FIELD_ANY == field) {
__s32 height2;
+ mutex_lock(&fh->btv->lock);
height2 = fh->btv->crop[!!fh->do_crop].rect.height >> 1;
+ mutex_unlock(&fh->btv->lock);
field = (win->w.height > height2)
? V4L2_FIELD_INTERLACED
: V4L2_FIELD_TOP;
@@ -2292,7 +2319,7 @@ verify_window (struct bttv_fh * fh,
win->w.width -= win->w.left & ~width_mask;
win->w.left = (win->w.left - width_mask - 1) & width_mask;
- rc = limit_scaled_size(fh, &win->w.width, &win->w.height,
+ rc = limit_scaled_size_lock(fh, &win->w.width, &win->w.height,
field, width_mask,
/* width_bias: round down */ 0,
adjust_size, adjust_crop);
@@ -2303,7 +2330,7 @@ verify_window (struct bttv_fh * fh,
return 0;
}
-static int setup_window(struct bttv_fh *fh, struct bttv *btv,
+static int setup_window_lock(struct bttv_fh *fh, struct bttv *btv,
struct v4l2_window *win, int fixup)
{
struct v4l2_clip *clips = NULL;
@@ -2313,7 +2340,7 @@ static int setup_window(struct bttv_fh *fh, struct bttv *btv,
return -EINVAL;
if (!(fh->ovfmt->flags & FORMAT_FLAGS_PACKED))
return -EINVAL;
- retval = verify_window(fh, win,
+ retval = verify_window_lock(fh, win,
/* adjust_size */ fixup,
/* adjust_crop */ fixup);
if (0 != retval)
@@ -2332,6 +2359,8 @@ static int setup_window(struct bttv_fh *fh, struct bttv *btv,
return -EFAULT;
}
}
+
+ mutex_lock(&fh->cap.vb_lock);
/* clip against screen */
if (NULL != btv->fbuf.base)
n = btcx_screen_clips(btv->fbuf.fmt.width, btv->fbuf.fmt.height,
@@ -2354,7 +2383,6 @@ static int setup_window(struct bttv_fh *fh, struct bttv *btv,
BUG();
}
- mutex_lock(&fh->cap.vb_lock);
kfree(fh->ov.clips);
fh->ov.clips = clips;
fh->ov.nclips = n;
@@ -2362,6 +2390,14 @@ static int setup_window(struct bttv_fh *fh, struct bttv *btv,
fh->ov.w = win->w;
fh->ov.field = win->field;
fh->ov.setup_ok = 1;
+
+ /*
+ * FIXME: btv is protected by btv->lock mutex, while btv->init
+ * is protected by fh->cap.vb_lock. This seems to open the
+ * possibility for some race situations. Maybe the better would
+ * be to unify those locks or to use another way to store the
+ * init values that will be consumed by videobuf callbacks
+ */
btv->init.ov.w.width = win->w.width;
btv->init.ov.w.height = win->w.height;
btv->init.ov.field = win->field;
@@ -2490,7 +2526,9 @@ static int bttv_try_fmt_vid_cap(struct file *file, void *priv,
if (V4L2_FIELD_ANY == field) {
__s32 height2;
+ mutex_lock(&btv->lock);
height2 = btv->crop[!!fh->do_crop].rect.height >> 1;
+ mutex_unlock(&btv->lock);
field = (f->fmt.pix.height > height2)
? V4L2_FIELD_INTERLACED
: V4L2_FIELD_BOTTOM;
@@ -2516,7 +2554,7 @@ static int bttv_try_fmt_vid_cap(struct file *file, void *priv,
width = f->fmt.pix.width;
height = f->fmt.pix.height;
- rc = limit_scaled_size(fh, &width, &height, field,
+ rc = limit_scaled_size_lock(fh, &width, &height, field,
/* width_mask: 4 pixels */ ~3,
/* width_bias: nearest */ 2,
/* adjust_size */ 1,
@@ -2536,7 +2574,7 @@ static int bttv_try_fmt_vid_overlay(struct file *file, void *priv,
{
struct bttv_fh *fh = priv;
- return verify_window(fh, &f->fmt.win,
+ return verify_window_lock(fh, &f->fmt.win,
/* adjust_size */ 1,
/* adjust_crop */ 0);
}
@@ -2563,7 +2601,7 @@ static int bttv_s_fmt_vid_cap(struct file *file, void *priv,
height = f->fmt.pix.height;
field = f->fmt.pix.field;
- retval = limit_scaled_size(fh, &width, &height, f->fmt.pix.field,
+ retval = limit_scaled_size_lock(fh, &width, &height, f->fmt.pix.field,
/* width_mask: 4 pixels */ ~3,
/* width_bias: nearest */ 2,
/* adjust_size */ 1,
@@ -2601,7 +2639,7 @@ static int bttv_s_fmt_vid_overlay(struct file *file, void *priv,
return -EINVAL;
}
- return setup_window(fh, btv, &f->fmt.win, 1);
+ return setup_window_lock(fh, btv, &f->fmt.win, 1);
}
#ifdef CONFIG_VIDEO_V4L1_COMPAT
@@ -2651,11 +2689,15 @@ static int bttv_querycap(struct file *file, void *priv,
V4L2_CAP_VBI_CAPTURE |
V4L2_CAP_READWRITE |
V4L2_CAP_STREAMING;
- if (btv->has_saa6588)
- cap->capabilities |= V4L2_CAP_RDS_CAPTURE;
if (no_overlay <= 0)
cap->capabilities |= V4L2_CAP_VIDEO_OVERLAY;
+ /*
+ * No need to lock here: those vars are initialized during board
+ * probe and remains untouched during the rest of the driver lifecycle
+ */
+ if (btv->has_saa6588)
+ cap->capabilities |= V4L2_CAP_RDS_CAPTURE;
if (btv->tuner_type != TUNER_ABSENT)
cap->capabilities |= V4L2_CAP_TUNER;
return 0;
@@ -2730,19 +2772,25 @@ static int bttv_overlay(struct file *file, void *f, unsigned int on)
struct bttv_fh *fh = f;
struct bttv *btv = fh->btv;
struct bttv_buffer *new;
- int retval;
+ int retval = 0;
if (on) {
+ mutex_lock(&fh->cap.vb_lock);
/* verify args */
- if (NULL == btv->fbuf.base)
+ if (unlikely(!btv->fbuf.base)) {
+ mutex_unlock(&fh->cap.vb_lock);
return -EINVAL;
- if (!fh->ov.setup_ok) {
+ }
+ if (unlikely(!fh->ov.setup_ok)) {
dprintk("bttv%d: overlay: !setup_ok\n", btv->c.nr);
- return -EINVAL;
+ retval = -EINVAL;
}
+ if (retval)
+ return retval;
+ mutex_unlock(&fh->cap.vb_lock);
}
- if (!check_alloc_btres(btv, fh, RESOURCE_OVERLAY))
+ if (!check_alloc_btres_lock(btv, fh, RESOURCE_OVERLAY))
return -EBUSY;
mutex_lock(&fh->cap.vb_lock);
@@ -2785,7 +2833,7 @@ static int bttv_s_fbuf(struct file *file, void *f,
__s32 width = fb->fmt.width;
__s32 height = fb->fmt.height;
- retval = limit_scaled_size(fh, &width, &height,
+ retval = limit_scaled_size_lock(fh, &width, &height,
V4L2_FIELD_INTERLACED,
/* width_mask */ ~3,
/* width_bias */ 2,
@@ -2852,7 +2900,7 @@ static int bttv_qbuf(struct file *file, void *priv, struct v4l2_buffer *b)
struct bttv *btv = fh->btv;
int res = bttv_resource(fh);
- if (!check_alloc_btres(btv, fh, res))
+ if (!check_alloc_btres_lock(btv, fh, res))
return -EBUSY;
return videobuf_qbuf(bttv_queue(fh), b);
@@ -2872,7 +2920,7 @@ static int bttv_streamon(struct file *file, void *priv,
struct bttv *btv = fh->btv;
int res = bttv_resource(fh);
- if (!check_alloc_btres(btv, fh, res))
+ if (!check_alloc_btres_lock(btv, fh, res))
return -EBUSY;
return videobuf_streamon(bttv_queue(fh));
}
@@ -2890,7 +2938,7 @@ static int bttv_streamoff(struct file *file, void *priv,
retval = videobuf_streamoff(bttv_queue(fh));
if (retval < 0)
return retval;
- free_btres(btv, fh, res);
+ free_btres_lock(btv, fh, res);
return 0;
}
@@ -2907,6 +2955,7 @@ static int bttv_queryctrl(struct file *file, void *priv,
c->id >= V4L2_CID_PRIVATE_LASTP1))
return -EINVAL;
+ mutex_lock(&btv->lock);
if (!btv->volume_gpio && (c->id == V4L2_CID_AUDIO_VOLUME))
*c = no_ctl;
else {
@@ -2914,6 +2963,7 @@ static int bttv_queryctrl(struct file *file, void *priv,
*c = (NULL != ctrl) ? *ctrl : no_ctl;
}
+ mutex_unlock(&btv->lock);
return 0;
}
@@ -2924,8 +2974,11 @@ static int bttv_g_parm(struct file *file, void *f,
struct bttv_fh *fh = f;
struct bttv *btv = fh->btv;
+ mutex_lock(&btv->lock);
v4l2_video_std_frame_period(bttv_tvnorms[btv->tvnorm].v4l2_id,
&parm->parm.capture.timeperframe);
+ mutex_unlock(&btv->lock);
+
return 0;
}
@@ -2961,7 +3014,9 @@ static int bttv_g_priority(struct file *file, void *f, enum v4l2_priority *p)
struct bttv_fh *fh = f;
struct bttv *btv = fh->btv;
+ mutex_lock(&btv->lock);
*p = v4l2_prio_max(&btv->prio);
+ mutex_unlock(&btv->lock);
return 0;
}
@@ -2971,8 +3026,13 @@ static int bttv_s_priority(struct file *file, void *f,
{
struct bttv_fh *fh = f;
struct bttv *btv = fh->btv;
+ int rc;
- return v4l2_prio_change(&btv->prio, &fh->prio, prio);
+ mutex_lock(&btv->lock);
+ rc = v4l2_prio_change(&btv->prio, &fh->prio, prio);
+ mutex_unlock(&btv->lock);
+
+ return rc;
}
static int bttv_cropcap(struct file *file, void *priv,
@@ -2985,7 +3045,9 @@ static int bttv_cropcap(struct file *file, void *priv,
cap->type != V4L2_BUF_TYPE_VIDEO_OVERLAY)
return -EINVAL;
+ mutex_lock(&btv->lock);
*cap = bttv_tvnorms[btv->tvnorm].cropcap;
+ mutex_unlock(&btv->lock);
return 0;
}
@@ -3003,7 +3065,9 @@ static int bttv_g_crop(struct file *file, void *f, struct v4l2_crop *crop)
inconsistent with fh->width or fh->height and apps
do not expect a change here. */
+ mutex_lock(&btv->lock);
crop->c = btv->crop[!!fh->do_crop].rect;
+ mutex_unlock(&btv->lock);
return 0;
}
@@ -3024,14 +3088,15 @@ static int bttv_s_crop(struct file *file, void *f, struct v4l2_crop *crop)
crop->type != V4L2_BUF_TYPE_VIDEO_OVERLAY)
return -EINVAL;
- retval = v4l2_prio_check(&btv->prio, fh->prio);
- if (0 != retval)
- return retval;
-
/* Make sure tvnorm, vbi_end and the current cropping
parameters remain consistent until we're done. Note
- read() may change vbi_end in check_alloc_btres(). */
+ read() may change vbi_end in check_alloc_btres_lock(). */
mutex_lock(&btv->lock);
+ retval = v4l2_prio_check(&btv->prio, fh->prio);
+ if (0 != retval) {
+ mutex_unlock(&btv->lock);
+ return retval;
+ }
retval = -EBUSY;
@@ -3128,17 +3193,17 @@ static ssize_t bttv_read(struct file *file, char __user *data,
switch (fh->type) {
case V4L2_BUF_TYPE_VIDEO_CAPTURE:
- if (!check_alloc_btres(fh->btv, fh, RESOURCE_VIDEO_READ)) {
+ if (!check_alloc_btres_lock(fh->btv, fh, RESOURCE_VIDEO_READ)) {
/* VIDEO_READ in use by another fh,
or VIDEO_STREAM by any fh. */
return -EBUSY;
}
retval = videobuf_read_one(&fh->cap, data, count, ppos,
file->f_flags & O_NONBLOCK);
- free_btres(fh->btv, fh, RESOURCE_VIDEO_READ);
+ free_btres_lock(fh->btv, fh, RESOURCE_VIDEO_READ);
break;
case V4L2_BUF_TYPE_VBI_CAPTURE:
- if (!check_alloc_btres(fh->btv,fh,RESOURCE_VBI))
+ if (!check_alloc_btres_lock(fh->btv,fh,RESOURCE_VBI))
return -EBUSY;
retval = videobuf_read_stream(&fh->vbi, data, count, ppos, 1,
file->f_flags & O_NONBLOCK);
@@ -3157,20 +3222,19 @@ static unsigned int bttv_poll(struct file *file, poll_table *wait)
unsigned int rc = POLLERR;
if (V4L2_BUF_TYPE_VBI_CAPTURE == fh->type) {
- if (!check_alloc_btres(fh->btv,fh,RESOURCE_VBI))
+ if (!check_alloc_btres_lock(fh->btv,fh,RESOURCE_VBI))
return POLLERR;
return videobuf_poll_stream(file, &fh->vbi, wait);
}
+ mutex_lock(&fh->cap.vb_lock);
if (check_btres(fh,RESOURCE_VIDEO_STREAM)) {
- mutex_lock(&fh->cap.vb_lock);
/* streaming capture */
if (list_empty(&fh->cap.stream))
goto err;
buf = list_entry(fh->cap.stream.next,struct bttv_buffer,vb.stream);
} else {
/* read() capture */
- mutex_lock(&fh->cap.vb_lock);
if (NULL == fh->cap.read_buf) {
/* need to capture a new frame */
if (locked_btres(fh->btv,RESOURCE_VIDEO_STREAM))
@@ -3188,7 +3252,6 @@ static unsigned int bttv_poll(struct file *file, poll_table *wait)
fh->cap.ops->buf_queue(&fh->cap,fh->cap.read_buf);
fh->cap.read_off = 0;
}
- mutex_unlock(&fh->cap.vb_lock);
buf = (struct bttv_buffer*)fh->cap.read_buf;
}
@@ -3221,21 +3284,32 @@ static int bttv_open(struct file *file)
return -ENODEV;
}
- lock_kernel();
-
dprintk(KERN_DEBUG "bttv%d: open called (type=%s)\n",
btv->c.nr,v4l2_type_names[type]);
/* allocate per filehandle data */
- fh = kmalloc(sizeof(*fh),GFP_KERNEL);
- if (NULL == fh) {
- unlock_kernel();
+ fh = kmalloc(sizeof(*fh), GFP_KERNEL);
+ if (unlikely(!fh))
return -ENOMEM;
- }
file->private_data = fh;
+
+ /*
+ * btv is protected by btv->lock mutex, while btv->init and other
+ * streaming vars are protected by fh->cap.vb_lock. We need to take
+ * care of both locks to avoid troubles. However, vb_lock is used also
+ * inside videobuf, without calling buf->lock. So, it is a very bad
+ * idea to hold both locks at the same time.
+ * Let's first copy btv->init at fh, holding cap.vb_lock, and then work
+ * with the rest of init, holding btv->lock.
+ */
+ mutex_lock(&fh->cap.vb_lock);
*fh = btv->init;
+ mutex_unlock(&fh->cap.vb_lock);
+
fh->type = type;
fh->ov.setup_ok = 0;
+
+ mutex_lock(&btv->lock);
v4l2_prio_open(&btv->prio, &fh->prio);
videobuf_queue_sg_init(&fh->cap, &bttv_video_qops,
@@ -3243,13 +3317,13 @@ static int bttv_open(struct file *file)
V4L2_BUF_TYPE_VIDEO_CAPTURE,
V4L2_FIELD_INTERLACED,
sizeof(struct bttv_buffer),
- fh);
+ fh, NULL);
videobuf_queue_sg_init(&fh->vbi, &bttv_vbi_qops,
&btv->c.pci->dev, &btv->s_lock,
V4L2_BUF_TYPE_VBI_CAPTURE,
V4L2_FIELD_SEQ_TB,
sizeof(struct bttv_buffer),
- fh);
+ fh, NULL);
set_tvnorm(btv,btv->tvnorm);
set_input(btv, btv->input, btv->tvnorm);
@@ -3272,7 +3346,7 @@ static int bttv_open(struct file *file)
bttv_vbi_fmt_reset(&fh->vbi_fmt, btv->tvnorm);
bttv_field_count(btv);
- unlock_kernel();
+ mutex_unlock(&btv->lock);
return 0;
}
@@ -3281,6 +3355,7 @@ static int bttv_release(struct file *file)
struct bttv_fh *fh = file->private_data;
struct bttv *btv = fh->btv;
+ mutex_lock(&btv->lock);
/* turn off overlay */
if (check_btres(fh, RESOURCE_OVERLAY))
bttv_switch_overlay(btv,fh,NULL);
@@ -3288,25 +3363,32 @@ static int bttv_release(struct file *file)
/* stop video capture */
if (check_btres(fh, RESOURCE_VIDEO_STREAM)) {
videobuf_streamoff(&fh->cap);
- free_btres(btv,fh,RESOURCE_VIDEO_STREAM);
+ free_btres_lock(btv,fh,RESOURCE_VIDEO_STREAM);
}
if (fh->cap.read_buf) {
buffer_release(&fh->cap,fh->cap.read_buf);
kfree(fh->cap.read_buf);
}
if (check_btres(fh, RESOURCE_VIDEO_READ)) {
- free_btres(btv, fh, RESOURCE_VIDEO_READ);
+ free_btres_lock(btv, fh, RESOURCE_VIDEO_READ);
}
/* stop vbi capture */
if (check_btres(fh, RESOURCE_VBI)) {
videobuf_stop(&fh->vbi);
- free_btres(btv,fh,RESOURCE_VBI);
+ free_btres_lock(btv,fh,RESOURCE_VBI);
}
/* free stuff */
+
+ /*
+ * videobuf uses cap.vb_lock - we should avoid holding btv->lock,
+ * otherwise we may have dead lock conditions
+ */
+ mutex_unlock(&btv->lock);
videobuf_mmap_free(&fh->cap);
videobuf_mmap_free(&fh->vbi);
+ mutex_lock(&btv->lock);
v4l2_prio_close(&btv->prio, fh->prio);
file->private_data = NULL;
kfree(fh);
@@ -3316,6 +3398,7 @@ static int bttv_release(struct file *file)
if (!btv->users)
audio_mute(btv, 1);
+ mutex_unlock(&btv->lock);
return 0;
}
@@ -3333,13 +3416,13 @@ bttv_mmap(struct file *file, struct vm_area_struct *vma)
static const struct v4l2_file_operations bttv_fops =
{
- .owner = THIS_MODULE,
- .open = bttv_open,
- .release = bttv_release,
- .ioctl = video_ioctl2,
- .read = bttv_read,
- .mmap = bttv_mmap,
- .poll = bttv_poll,
+ .owner = THIS_MODULE,
+ .open = bttv_open,
+ .release = bttv_release,
+ .unlocked_ioctl = video_ioctl2,
+ .read = bttv_read,
+ .mmap = bttv_mmap,
+ .poll = bttv_poll,
};
static const struct v4l2_ioctl_ops bttv_ioctl_ops = {
@@ -3412,21 +3495,19 @@ static int radio_open(struct file *file)
dprintk("bttv: open dev=%s\n", video_device_node_name(vdev));
- lock_kernel();
-
dprintk("bttv%d: open called (radio)\n",btv->c.nr);
/* allocate per filehandle data */
fh = kmalloc(sizeof(*fh), GFP_KERNEL);
- if (NULL == fh) {
- unlock_kernel();
+ if (unlikely(!fh))
return -ENOMEM;
- }
file->private_data = fh;
+ mutex_lock(&fh->cap.vb_lock);
*fh = btv->init;
- v4l2_prio_open(&btv->prio, &fh->prio);
+ mutex_unlock(&fh->cap.vb_lock);
mutex_lock(&btv->lock);
+ v4l2_prio_open(&btv->prio, &fh->prio);
btv->radio_user++;
@@ -3434,7 +3515,6 @@ static int radio_open(struct file *file)
audio_input(btv,TVAUDIO_INPUT_RADIO);
mutex_unlock(&btv->lock);
- unlock_kernel();
return 0;
}
@@ -3444,6 +3524,7 @@ static int radio_release(struct file *file)
struct bttv *btv = fh->btv;
struct rds_command cmd;
+ mutex_lock(&btv->lock);
v4l2_prio_close(&btv->prio, fh->prio);
file->private_data = NULL;
kfree(fh);
@@ -3451,6 +3532,7 @@ static int radio_release(struct file *file)
btv->radio_user--;
bttv_call_all(btv, core, ioctl, RDS_CMD_CLOSE, &cmd);
+ mutex_unlock(&btv->lock);
return 0;
}
diff --git a/drivers/media/video/bt8xx/bttv-i2c.c b/drivers/media/video/bt8xx/bttv-i2c.c
index 685d6597ee79..d49b675045fe 100644
--- a/drivers/media/video/bt8xx/bttv-i2c.c
+++ b/drivers/media/video/bt8xx/bttv-i2c.c
@@ -121,9 +121,8 @@ bttv_i2c_wait_done(struct bttv *btv)
/* timeout */
if (wait_event_interruptible_timeout(btv->i2c_queue,
- btv->i2c_done, msecs_to_jiffies(85)) == -ERESTARTSYS)
-
- rc = -EIO;
+ btv->i2c_done, msecs_to_jiffies(85)) == -ERESTARTSYS)
+ rc = -EIO;
if (btv->i2c_done & BT848_INT_RACK)
rc = 1;
@@ -390,41 +389,3 @@ int __devinit init_bttv_i2c(struct bttv *btv)
return btv->i2c_rc;
}
-
-/* Instantiate the I2C IR receiver device, if present */
-void __devinit init_bttv_i2c_ir(struct bttv *btv)
-{
- if (0 == btv->i2c_rc) {
- struct i2c_board_info info;
- /* The external IR receiver is at i2c address 0x34 (0x35 for
- reads). Future Hauppauge cards will have an internal
- receiver at 0x30 (0x31 for reads). In theory, both can be
- fitted, and Hauppauge suggest an external overrides an
- internal.
-
- That's why we probe 0x1a (~0x34) first. CB
- */
- const unsigned short addr_list[] = {
- 0x1a, 0x18, 0x4b, 0x64, 0x30, 0x71,
- I2C_CLIENT_END
- };
-
- memset(&info, 0, sizeof(struct i2c_board_info));
- strlcpy(info.type, "ir_video", I2C_NAME_SIZE);
- i2c_new_probed_device(&btv->c.i2c_adap, &info, addr_list, NULL);
- }
-}
-
-int __devexit fini_bttv_i2c(struct bttv *btv)
-{
- if (0 != btv->i2c_rc)
- return 0;
-
- return i2c_del_adapter(&btv->c.i2c_adap);
-}
-
-/*
- * Local variables:
- * c-basic-offset: 8
- * End:
- */
diff --git a/drivers/media/video/bt8xx/bttv-input.c b/drivers/media/video/bt8xx/bttv-input.c
index f68717a4bdec..6bf05a7dc5f9 100644
--- a/drivers/media/video/bt8xx/bttv-input.c
+++ b/drivers/media/video/bt8xx/bttv-input.c
@@ -245,6 +245,83 @@ static void bttv_ir_stop(struct bttv *btv)
}
}
+/*
+ * Get_key functions used by I2C remotes
+ */
+
+static int get_key_pv951(struct IR_i2c *ir, u32 *ir_key, u32 *ir_raw)
+{
+ unsigned char b;
+
+ /* poll IR chip */
+ if (1 != i2c_master_recv(ir->c, &b, 1)) {
+ dprintk(KERN_INFO DEVNAME ": read error\n");
+ return -EIO;
+ }
+
+ /* ignore 0xaa */
+ if (b==0xaa)
+ return 0;
+ dprintk(KERN_INFO DEVNAME ": key %02x\n", b);
+
+ *ir_key = b;
+ *ir_raw = b;
+ return 1;
+}
+
+/* Instantiate the I2C IR receiver device, if present */
+void __devinit init_bttv_i2c_ir(struct bttv *btv)
+{
+ const unsigned short addr_list[] = {
+ 0x1a, 0x18, 0x64, 0x30, 0x71,
+ I2C_CLIENT_END
+ };
+ struct i2c_board_info info;
+
+ if (0 != btv->i2c_rc)
+ return;
+
+ memset(&info, 0, sizeof(struct i2c_board_info));
+ memset(&btv->init_data, 0, sizeof(btv->init_data));
+ strlcpy(info.type, "ir_video", I2C_NAME_SIZE);
+
+ switch (btv->c.type) {
+ case BTTV_BOARD_PV951:
+ btv->init_data.name = "PV951";
+ btv->init_data.get_key = get_key_pv951;
+ btv->init_data.ir_codes = RC_MAP_PV951;
+ btv->init_data.type = IR_TYPE_OTHER;
+ info.addr = 0x4b;
+ break;
+ default:
+ /*
+ * The external IR receiver is at i2c address 0x34 (0x35 for
+ * reads). Future Hauppauge cards will have an internal
+ * receiver at 0x30 (0x31 for reads). In theory, both can be
+ * fitted, and Hauppauge suggest an external overrides an
+ * internal.
+ * That's why we probe 0x1a (~0x34) first. CB
+ */
+
+ i2c_new_probed_device(&btv->c.i2c_adap, &info, addr_list, NULL);
+ return;
+ }
+
+ if (btv->init_data.name)
+ info.platform_data = &btv->init_data;
+ i2c_new_device(&btv->c.i2c_adap, &info);
+
+ return;
+}
+
+int __devexit fini_bttv_i2c(struct bttv *btv)
+{
+ if (0 != btv->i2c_rc)
+ return 0;
+
+ return i2c_del_adapter(&btv->c.i2c_adap);
+}
+
int bttv_input_init(struct bttv *btv)
{
struct card_ir *ir;
@@ -420,10 +497,3 @@ void bttv_input_fini(struct bttv *btv)
kfree(btv->remote);
btv->remote = NULL;
}
-
-
-/*
- * Local variables:
- * c-basic-offset: 8
- * End:
- */
diff --git a/drivers/media/video/bt8xx/bttv-risc.c b/drivers/media/video/bt8xx/bttv-risc.c
index 0fa9f39f37a3..9b57d091da48 100644
--- a/drivers/media/video/bt8xx/bttv-risc.c
+++ b/drivers/media/video/bt8xx/bttv-risc.c
@@ -582,7 +582,7 @@ bttv_dma_free(struct videobuf_queue *q,struct bttv *btv, struct bttv_buffer *buf
struct videobuf_dmabuf *dma=videobuf_to_dma(&buf->vb);
BUG_ON(in_interrupt());
- videobuf_waiton(&buf->vb,0,0);
+ videobuf_waiton(q, &buf->vb, 0, 0);
videobuf_dma_unmap(q->dev, dma);
videobuf_dma_free(dma);
btcx_riscmem_free(btv->c.pci,&buf->bottom);
diff --git a/drivers/media/video/bt8xx/bttv.h b/drivers/media/video/bt8xx/bttv.h
index 3ec2402c6b4a..6fd2a8ebda1e 100644
--- a/drivers/media/video/bt8xx/bttv.h
+++ b/drivers/media/video/bt8xx/bttv.h
@@ -18,7 +18,6 @@
#include <linux/i2c.h>
#include <media/v4l2-device.h>
#include <media/ir-common.h>
-#include <media/ir-kbd-i2c.h>
#include <media/i2c-addr.h>
#include <media/tuner.h>
diff --git a/drivers/media/video/bt8xx/bttvp.h b/drivers/media/video/bt8xx/bttvp.h
index 6cccc2a17eee..d1e26a448ed2 100644
--- a/drivers/media/video/bt8xx/bttvp.h
+++ b/drivers/media/video/bt8xx/bttvp.h
@@ -42,7 +42,7 @@
#include <media/videobuf-dma-sg.h>
#include <media/tveeprom.h>
#include <media/ir-common.h>
-
+#include <media/ir-kbd-i2c.h>
#include "bt848.h"
#include "bttv.h"
@@ -271,6 +271,12 @@ int bttv_sub_del_devices(struct bttv_core *core);
extern int no_overlay;
/* ---------------------------------------------------------- */
+/* bttv-input.c */
+
+extern void init_bttv_i2c_ir(struct bttv *btv);
+extern int fini_bttv_i2c(struct bttv *btv);
+
+/* ---------------------------------------------------------- */
/* bttv-driver.c */
/* insmod options */
@@ -279,8 +285,6 @@ extern unsigned int bttv_debug;
extern unsigned int bttv_gpio;
extern void bttv_gpio_tracking(struct bttv *btv, char *comment);
extern int init_bttv_i2c(struct bttv *btv);
-extern void init_bttv_i2c_ir(struct bttv *btv);
-extern int fini_bttv_i2c(struct bttv *btv);
#define bttv_printk if (bttv_verbose) printk
#define dprintk if (bttv_debug >= 1) printk
@@ -366,6 +370,9 @@ struct bttv {
int has_remote;
struct card_ir *remote;
+ /* I2C remote data */
+ struct IR_i2c_init_data init_data;
+
/* locking */
spinlock_t s_lock;
struct mutex lock;
diff --git a/drivers/media/video/cafe_ccic.c b/drivers/media/video/cafe_ccic.c
index 9536f1a40dd2..260c666ce931 100644
--- a/drivers/media/video/cafe_ccic.c
+++ b/drivers/media/video/cafe_ccic.c
@@ -25,6 +25,7 @@
#include <linux/module.h>
#include <linux/init.h>
#include <linux/fs.h>
+#include <linux/dmi.h>
#include <linux/mm.h>
#include <linux/pci.h>
#include <linux/i2c.h>
@@ -46,6 +47,7 @@
#include <asm/uaccess.h>
#include <asm/io.h>
+#include "ov7670.h"
#include "cafe_ccic-regs.h"
#define CAFE_VERSION 0x000002
@@ -180,6 +182,7 @@ struct cafe_camera
/* Current operating parameters */
u32 sensor_type; /* Currently ov7670 only */
struct v4l2_pix_format pix_format;
+ enum v4l2_mbus_pixelcode mbus_code;
/* Locks */
struct mutex s_mutex; /* Access to this structure */
@@ -207,6 +210,49 @@ static inline struct cafe_camera *to_cam(struct v4l2_device *dev)
return container_of(dev, struct cafe_camera, v4l2_dev);
}
+static struct cafe_format_struct {
+ __u8 *desc;
+ __u32 pixelformat;
+ int bpp; /* Bytes per pixel */
+ enum v4l2_mbus_pixelcode mbus_code;
+} cafe_formats[] = {
+ {
+ .desc = "YUYV 4:2:2",
+ .pixelformat = V4L2_PIX_FMT_YUYV,
+ .mbus_code = V4L2_MBUS_FMT_YUYV8_2X8,
+ .bpp = 2,
+ },
+ {
+ .desc = "RGB 444",
+ .pixelformat = V4L2_PIX_FMT_RGB444,
+ .mbus_code = V4L2_MBUS_FMT_RGB444_2X8_PADHI_LE,
+ .bpp = 2,
+ },
+ {
+ .desc = "RGB 565",
+ .pixelformat = V4L2_PIX_FMT_RGB565,
+ .mbus_code = V4L2_MBUS_FMT_RGB565_2X8_LE,
+ .bpp = 2,
+ },
+ {
+ .desc = "Raw RGB Bayer",
+ .pixelformat = V4L2_PIX_FMT_SBGGR8,
+ .mbus_code = V4L2_MBUS_FMT_SBGGR8_1X8,
+ .bpp = 1
+ },
+};
+#define N_CAFE_FMTS ARRAY_SIZE(cafe_formats)
+
+static struct cafe_format_struct *cafe_find_format(u32 pixelformat)
+{
+ unsigned i;
+
+ for (i = 0; i < N_CAFE_FMTS; i++)
+ if (cafe_formats[i].pixelformat == pixelformat)
+ return cafe_formats + i;
+ /* Not found? Then return the first format. */
+ return cafe_formats;
+}
/*
* Start over with DMA buffers - dev_lock needed.
@@ -319,7 +365,6 @@ static int cafe_smbus_write_data(struct cafe_camera *cam,
{
unsigned int rval;
unsigned long flags;
- DEFINE_WAIT(the_wait);
spin_lock_irqsave(&cam->dev_lock, flags);
rval = TWSIC0_EN | ((addr << TWSIC0_SID_SHIFT) & TWSIC0_SID);
@@ -334,28 +379,27 @@ static int cafe_smbus_write_data(struct cafe_camera *cam,
cafe_reg_write(cam, REG_TWSIC1, rval);
spin_unlock_irqrestore(&cam->dev_lock, flags);
+ /* Unfortunately, reading TWSIC1 too soon after sending a command
+ * causes the device to die.
+ * Use a busy-wait because we often send a large quantity of small
+ * commands at-once; using msleep() would cause a lot of context
+ * switches which take longer than 2ms, resulting in a noticable
+ * boot-time and capture-start delays.
+ */
+ mdelay(2);
+
/*
- * Time to wait for the write to complete. THIS IS A RACY
- * WAY TO DO IT, but the sad fact is that reading the TWSIC1
- * register too quickly after starting the operation sends
- * the device into a place that may be kinder and better, but
- * which is absolutely useless for controlling the sensor. In
- * practice we have plenty of time to get into our sleep state
- * before the interrupt hits, and the worst case is that we
- * time out and then see that things completed, so this seems
- * the best way for now.
+ * Another sad fact is that sometimes, commands silently complete but
+ * cafe_smbus_write_done() never becomes aware of this.
+ * This happens at random and appears to possible occur with any
+ * command.
+ * We don't understand why this is. We work around this issue
+ * with the timeout in the wait below, assuming that all commands
+ * complete within the timeout.
*/
- do {
- prepare_to_wait(&cam->smbus_wait, &the_wait,
- TASK_UNINTERRUPTIBLE);
- schedule_timeout(1); /* even 1 jiffy is too long */
- finish_wait(&cam->smbus_wait, &the_wait);
- } while (!cafe_smbus_write_done(cam));
-
-#ifdef IF_THE_CAFE_HARDWARE_WORKED_RIGHT
wait_event_timeout(cam->smbus_wait, cafe_smbus_write_done(cam),
CAFE_SMBUS_TIMEOUT);
-#endif
+
spin_lock_irqsave(&cam->dev_lock, flags);
rval = cafe_reg_read(cam, REG_TWSIC1);
spin_unlock_irqrestore(&cam->dev_lock, flags);
@@ -812,15 +856,15 @@ static int cafe_cam_set_flip(struct cafe_camera *cam)
static int cafe_cam_configure(struct cafe_camera *cam)
{
- struct v4l2_format fmt;
+ struct v4l2_mbus_framefmt mbus_fmt;
int ret;
if (cam->state != S_IDLE)
return -EINVAL;
- fmt.fmt.pix = cam->pix_format;
+ v4l2_fill_mbus_format(&mbus_fmt, &cam->pix_format, cam->mbus_code);
ret = sensor_call(cam, core, init, 0);
if (ret == 0)
- ret = sensor_call(cam, video, s_fmt, &fmt);
+ ret = sensor_call(cam, video, s_mbus_fmt, &mbus_fmt);
/*
* OV7670 does weird things if flip is set *before* format...
*/
@@ -1481,7 +1525,7 @@ static int cafe_vidioc_querycap(struct file *file, void *priv,
/*
* The default format we use until somebody says otherwise.
*/
-static struct v4l2_pix_format cafe_def_pix_format = {
+static const struct v4l2_pix_format cafe_def_pix_format = {
.width = VGA_WIDTH,
.height = VGA_HEIGHT,
.pixelformat = V4L2_PIX_FMT_YUYV,
@@ -1490,28 +1534,38 @@ static struct v4l2_pix_format cafe_def_pix_format = {
.sizeimage = VGA_WIDTH*VGA_HEIGHT*2,
};
+static const enum v4l2_mbus_pixelcode cafe_def_mbus_code =
+ V4L2_MBUS_FMT_YUYV8_2X8;
+
static int cafe_vidioc_enum_fmt_vid_cap(struct file *filp,
void *priv, struct v4l2_fmtdesc *fmt)
{
- struct cafe_camera *cam = priv;
- int ret;
-
- mutex_lock(&cam->s_mutex);
- ret = sensor_call(cam, video, enum_fmt, fmt);
- mutex_unlock(&cam->s_mutex);
- return ret;
+ if (fmt->index >= N_CAFE_FMTS)
+ return -EINVAL;
+ strlcpy(fmt->description, cafe_formats[fmt->index].desc,
+ sizeof(fmt->description));
+ fmt->pixelformat = cafe_formats[fmt->index].pixelformat;
+ return 0;
}
-
static int cafe_vidioc_try_fmt_vid_cap(struct file *filp, void *priv,
struct v4l2_format *fmt)
{
struct cafe_camera *cam = priv;
+ struct cafe_format_struct *f;
+ struct v4l2_pix_format *pix = &fmt->fmt.pix;
+ struct v4l2_mbus_framefmt mbus_fmt;
int ret;
+ f = cafe_find_format(pix->pixelformat);
+ pix->pixelformat = f->pixelformat;
+ v4l2_fill_mbus_format(&mbus_fmt, pix, f->mbus_code);
mutex_lock(&cam->s_mutex);
- ret = sensor_call(cam, video, try_fmt, fmt);
+ ret = sensor_call(cam, video, try_mbus_fmt, &mbus_fmt);
mutex_unlock(&cam->s_mutex);
+ v4l2_fill_pix_format(pix, &mbus_fmt);
+ pix->bytesperline = pix->width * f->bpp;
+ pix->sizeimage = pix->height * pix->bytesperline;
return ret;
}
@@ -1519,6 +1573,7 @@ static int cafe_vidioc_s_fmt_vid_cap(struct file *filp, void *priv,
struct v4l2_format *fmt)
{
struct cafe_camera *cam = priv;
+ struct cafe_format_struct *f;
int ret;
/*
@@ -1527,6 +1582,9 @@ static int cafe_vidioc_s_fmt_vid_cap(struct file *filp, void *priv,
*/
if (cam->state != S_IDLE || cam->n_sbufs > 0)
return -EBUSY;
+
+ f = cafe_find_format(fmt->fmt.pix.pixelformat);
+
/*
* See if the formatting works in principle.
*/
@@ -1539,6 +1597,8 @@ static int cafe_vidioc_s_fmt_vid_cap(struct file *filp, void *priv,
*/
mutex_lock(&cam->s_mutex);
cam->pix_format = fmt->fmt.pix;
+ cam->mbus_code = f->mbus_code;
+
/*
* Make sure we have appropriate DMA buffers.
*/
@@ -1652,6 +1712,30 @@ static int cafe_vidioc_g_chip_ident(struct file *file, void *priv,
return sensor_call(cam, core, g_chip_ident, chip);
}
+static int cafe_vidioc_enum_framesizes(struct file *filp, void *priv,
+ struct v4l2_frmsizeenum *sizes)
+{
+ struct cafe_camera *cam = priv;
+ int ret;
+
+ mutex_lock(&cam->s_mutex);
+ ret = sensor_call(cam, video, enum_framesizes, sizes);
+ mutex_unlock(&cam->s_mutex);
+ return ret;
+}
+
+static int cafe_vidioc_enum_frameintervals(struct file *filp, void *priv,
+ struct v4l2_frmivalenum *interval)
+{
+ struct cafe_camera *cam = priv;
+ int ret;
+
+ mutex_lock(&cam->s_mutex);
+ ret = sensor_call(cam, video, enum_frameintervals, interval);
+ mutex_unlock(&cam->s_mutex);
+ return ret;
+}
+
#ifdef CONFIG_VIDEO_ADV_DEBUG
static int cafe_vidioc_g_register(struct file *file, void *priv,
struct v4l2_dbg_register *reg)
@@ -1715,6 +1799,8 @@ static const struct v4l2_ioctl_ops cafe_v4l_ioctl_ops = {
.vidioc_s_ctrl = cafe_vidioc_s_ctrl,
.vidioc_g_parm = cafe_vidioc_g_parm,
.vidioc_s_parm = cafe_vidioc_s_parm,
+ .vidioc_enum_framesizes = cafe_vidioc_enum_framesizes,
+ .vidioc_enum_frameintervals = cafe_vidioc_enum_frameintervals,
.vidioc_g_chip_ident = cafe_vidioc_g_chip_ident,
#ifdef CONFIG_VIDEO_ADV_DEBUG
.vidioc_g_register = cafe_vidioc_g_register,
@@ -1890,11 +1976,33 @@ static irqreturn_t cafe_irq(int irq, void *data)
* PCI interface stuff.
*/
+static const struct dmi_system_id olpc_xo1_dmi[] = {
+ {
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "OLPC"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "XO"),
+ DMI_MATCH(DMI_PRODUCT_VERSION, "1"),
+ },
+ },
+ { }
+};
+
static int cafe_pci_probe(struct pci_dev *pdev,
const struct pci_device_id *id)
{
int ret;
struct cafe_camera *cam;
+ struct ov7670_config sensor_cfg = {
+ /* This controller only does SMBUS */
+ .use_smbus = true,
+
+ /*
+ * Exclude QCIF mode, because it only captures a tiny portion
+ * of the sensor FOV
+ */
+ .min_width = 320,
+ .min_height = 240,
+ };
/*
* Start putting together one of our big camera structures.
@@ -1915,6 +2023,7 @@ static int cafe_pci_probe(struct pci_dev *pdev,
init_waitqueue_head(&cam->iowait);
cam->pdev = pdev;
cam->pix_format = cafe_def_pix_format;
+ cam->mbus_code = cafe_def_mbus_code;
INIT_LIST_HEAD(&cam->dev_list);
INIT_LIST_HEAD(&cam->sb_avail);
INIT_LIST_HEAD(&cam->sb_full);
@@ -1951,13 +2060,18 @@ static int cafe_pci_probe(struct pci_dev *pdev,
if (ret)
goto out_freeirq;
+ /* Apply XO-1 clock speed */
+ if (dmi_check_system(olpc_xo1_dmi))
+ sensor_cfg.clock_speed = 45;
+
cam->sensor_addr = 0x42;
- cam->sensor = v4l2_i2c_new_subdev(&cam->v4l2_dev, &cam->i2c_adapter,
- "ov7670", "ov7670", cam->sensor_addr, NULL);
+ cam->sensor = v4l2_i2c_new_subdev_cfg(&cam->v4l2_dev, &cam->i2c_adapter,
+ "ov7670", 0, &sensor_cfg, cam->sensor_addr, NULL);
if (cam->sensor == NULL) {
ret = -ENODEV;
goto out_smbus;
}
+
ret = cafe_cam_init(cam);
if (ret)
goto out_smbus;
diff --git a/drivers/media/video/cpia2/Kconfig b/drivers/media/video/cpia2/Kconfig
index e39a96152004..66e9283f5993 100644
--- a/drivers/media/video/cpia2/Kconfig
+++ b/drivers/media/video/cpia2/Kconfig
@@ -1,6 +1,6 @@
config VIDEO_CPIA2
tristate "CPiA2 Video For Linux"
- depends on VIDEO_DEV && USB && VIDEO_V4L1
+ depends on VIDEO_DEV && USB && VIDEO_V4L2
---help---
This is the video4linux driver for cameras based on Vision's CPiA2
(Colour Processor Interface ASIC), such as the Digital Blue QX5
diff --git a/drivers/media/video/cpia2/cpia2.h b/drivers/media/video/cpia2/cpia2.h
index 8d2dfc128821..916c13d5cf7d 100644
--- a/drivers/media/video/cpia2/cpia2.h
+++ b/drivers/media/video/cpia2/cpia2.h
@@ -32,7 +32,7 @@
#define __CPIA2_H__
#include <linux/version.h>
-#include <linux/videodev.h>
+#include <linux/videodev2.h>
#include <media/v4l2-common.h>
#include <linux/usb.h>
#include <linux/poll.h>
@@ -43,7 +43,7 @@
/* define for verbose debug output */
//#define _CPIA2_DEBUG_
-#define CPIA2_MAJ_VER 2
+#define CPIA2_MAJ_VER 3
#define CPIA2_MIN_VER 0
#define CPIA2_PATCH_VER 0
@@ -396,8 +396,8 @@ struct camera_data {
/* v4l */
int video_size; /* VIDEO_SIZE_ */
struct video_device *vdev; /* v4l videodev */
- struct video_picture vp; /* v4l camera settings */
- struct video_window vw; /* v4l capture area */
+ u32 width;
+ u32 height; /* Its size */
__u32 pixelformat; /* Format fourcc */
/* USB */
diff --git a/drivers/media/video/cpia2/cpia2_core.c b/drivers/media/video/cpia2/cpia2_core.c
index 1cc0df8befff..9606bc01b803 100644
--- a/drivers/media/video/cpia2/cpia2_core.c
+++ b/drivers/media/video/cpia2/cpia2_core.c
@@ -1058,44 +1058,44 @@ static int set_vw_size(struct camera_data *cam, int size)
DBG("Setting size to VGA\n");
cam->params.roi.width = STV_IMAGE_VGA_COLS;
cam->params.roi.height = STV_IMAGE_VGA_ROWS;
- cam->vw.width = STV_IMAGE_VGA_COLS;
- cam->vw.height = STV_IMAGE_VGA_ROWS;
+ cam->width = STV_IMAGE_VGA_COLS;
+ cam->height = STV_IMAGE_VGA_ROWS;
break;
case VIDEOSIZE_CIF:
DBG("Setting size to CIF\n");
cam->params.roi.width = STV_IMAGE_CIF_COLS;
cam->params.roi.height = STV_IMAGE_CIF_ROWS;
- cam->vw.width = STV_IMAGE_CIF_COLS;
- cam->vw.height = STV_IMAGE_CIF_ROWS;
+ cam->width = STV_IMAGE_CIF_COLS;
+ cam->height = STV_IMAGE_CIF_ROWS;
break;
case VIDEOSIZE_QVGA:
DBG("Setting size to QVGA\n");
cam->params.roi.width = STV_IMAGE_QVGA_COLS;
cam->params.roi.height = STV_IMAGE_QVGA_ROWS;
- cam->vw.width = STV_IMAGE_QVGA_COLS;
- cam->vw.height = STV_IMAGE_QVGA_ROWS;
+ cam->width = STV_IMAGE_QVGA_COLS;
+ cam->height = STV_IMAGE_QVGA_ROWS;
break;
case VIDEOSIZE_288_216:
cam->params.roi.width = 288;
cam->params.roi.height = 216;
- cam->vw.width = 288;
- cam->vw.height = 216;
+ cam->width = 288;
+ cam->height = 216;
break;
case VIDEOSIZE_256_192:
- cam->vw.width = 256;
- cam->vw.height = 192;
+ cam->width = 256;
+ cam->height = 192;
cam->params.roi.width = 256;
cam->params.roi.height = 192;
break;
case VIDEOSIZE_224_168:
- cam->vw.width = 224;
- cam->vw.height = 168;
+ cam->width = 224;
+ cam->height = 168;
cam->params.roi.width = 224;
cam->params.roi.height = 168;
break;
case VIDEOSIZE_192_144:
- cam->vw.width = 192;
- cam->vw.height = 144;
+ cam->width = 192;
+ cam->height = 144;
cam->params.roi.width = 192;
cam->params.roi.height = 144;
break;
@@ -1103,8 +1103,8 @@ static int set_vw_size(struct camera_data *cam, int size)
DBG("Setting size to QCIF\n");
cam->params.roi.width = STV_IMAGE_QCIF_COLS;
cam->params.roi.height = STV_IMAGE_QCIF_ROWS;
- cam->vw.width = STV_IMAGE_QCIF_COLS;
- cam->vw.height = STV_IMAGE_QCIF_ROWS;
+ cam->width = STV_IMAGE_QCIF_COLS;
+ cam->height = STV_IMAGE_QCIF_ROWS;
break;
default:
retval = -EINVAL;
@@ -2224,23 +2224,8 @@ static void reset_camera_struct(struct camera_data *cam)
cam->params.roi.height = STV_IMAGE_CIF_ROWS;
}
- /***
- * Fill in the v4l structures. video_cap is filled in inside the VIDIOCCAP
- * Ioctl. Here, just do the window and picture stucts.
- ***/
- cam->vp.palette = (u16) VIDEO_PALETTE_RGB24; /* Is this right? */
- cam->vp.brightness = (u16) cam->params.color_params.brightness * 256;
- cam->vp.colour = (u16) cam->params.color_params.saturation * 256;
- cam->vp.contrast = (u16) cam->params.color_params.contrast * 256;
-
- cam->vw.x = 0;
- cam->vw.y = 0;
- cam->vw.width = cam->params.roi.width;
- cam->vw.height = cam->params.roi.height;
- cam->vw.flags = 0;
- cam->vw.clipcount = 0;
-
- return;
+ cam->width = cam->params.roi.width;
+ cam->height = cam->params.roi.height;
}
/******************************************************************************
diff --git a/drivers/media/video/cpia2/cpia2_v4l.c b/drivers/media/video/cpia2/cpia2_v4l.c
index 5520789854da..46b433bbf2c1 100644
--- a/drivers/media/video/cpia2/cpia2_v4l.c
+++ b/drivers/media/video/cpia2/cpia2_v4l.c
@@ -37,7 +37,7 @@
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/init.h>
-#include <linux/videodev.h>
+#include <linux/videodev2.h>
#include <linux/stringify.h>
#include <media/v4l2-ioctl.h>
@@ -391,113 +391,6 @@ static unsigned int cpia2_v4l_poll(struct file *filp, struct poll_table_struct *
}
-/******************************************************************************
- *
- * ioctl_cap_query
- *
- *****************************************************************************/
-static int ioctl_cap_query(void *arg, struct camera_data *cam)
-{
- struct video_capability *vc;
- int retval = 0;
- vc = arg;
-
- if (cam->params.pnp_id.product == 0x151)
- strcpy(vc->name, "QX5 Microscope");
- else
- strcpy(vc->name, "CPiA2 Camera");
-
- vc->type = VID_TYPE_CAPTURE | VID_TYPE_MJPEG_ENCODER;
- vc->channels = 1;
- vc->audios = 0;
- vc->minwidth = 176; /* VIDEOSIZE_QCIF */
- vc->minheight = 144;
- switch (cam->params.version.sensor_flags) {
- case CPIA2_VP_SENSOR_FLAGS_500:
- vc->maxwidth = STV_IMAGE_VGA_COLS;
- vc->maxheight = STV_IMAGE_VGA_ROWS;
- break;
- case CPIA2_VP_SENSOR_FLAGS_410:
- vc->maxwidth = STV_IMAGE_CIF_COLS;
- vc->maxheight = STV_IMAGE_CIF_ROWS;
- break;
- default:
- return -EINVAL;
- }
-
- return retval;
-}
-
-/******************************************************************************
- *
- * ioctl_get_channel
- *
- *****************************************************************************/
-static int ioctl_get_channel(void *arg)
-{
- int retval = 0;
- struct video_channel *v;
- v = arg;
-
- if (v->channel != 0)
- return -EINVAL;
-
- v->channel = 0;
- strcpy(v->name, "Camera");
- v->tuners = 0;
- v->flags = 0;
- v->type = VIDEO_TYPE_CAMERA;
- v->norm = 0;
-
- return retval;
-}
-
-/******************************************************************************
- *
- * ioctl_set_channel
- *
- *****************************************************************************/
-static int ioctl_set_channel(void *arg)
-{
- struct video_channel *v;
- int retval = 0;
- v = arg;
-
- if (retval == 0 && v->channel != 0)
- retval = -EINVAL;
-
- return retval;
-}
-
-/******************************************************************************
- *
- * ioctl_set_image_prop
- *
- *****************************************************************************/
-static int ioctl_set_image_prop(void *arg, struct camera_data *cam)
-{
- struct video_picture *vp;
- int retval = 0;
- vp = arg;
-
- /* brightness, color, contrast need no check 0-65535 */
- memcpy(&cam->vp, vp, sizeof(*vp));
-
- /* update cam->params.colorParams */
- cam->params.color_params.brightness = vp->brightness / 256;
- cam->params.color_params.saturation = vp->colour / 256;
- cam->params.color_params.contrast = vp->contrast / 256;
-
- DBG("Requested params: bright 0x%X, sat 0x%X, contrast 0x%X\n",
- cam->params.color_params.brightness,
- cam->params.color_params.saturation,
- cam->params.color_params.contrast);
-
- cpia2_set_color_params(cam);
-
- return retval;
-}
-
static int sync(struct camera_data *cam, int frame_nr)
{
struct framebuf *frame = &cam->buffers[frame_nr];
@@ -526,61 +419,10 @@ static int sync(struct camera_data *cam, int frame_nr)
/******************************************************************************
*
- * ioctl_set_window_size
- *
- *****************************************************************************/
-static int ioctl_set_window_size(void *arg, struct camera_data *cam,
- struct cpia2_fh *fh)
-{
- /* copy_from_user, check validity, copy to internal structure */
- struct video_window *vw;
- int frame, err;
- vw = arg;
-
- if (vw->clipcount != 0) /* clipping not supported */
- return -EINVAL;
-
- if (vw->clips != NULL) /* clipping not supported */
- return -EINVAL;
-
- /* Ensure that only this process can change the format. */
- err = v4l2_prio_change(&cam->prio, &fh->prio, V4L2_PRIORITY_RECORD);
- if(err != 0)
- return err;
-
- cam->pixelformat = V4L2_PIX_FMT_JPEG;
-
- /* Be sure to supply the Huffman tables, this isn't MJPEG */
- cam->params.compression.inhibit_htables = 0;
-
- /* we set the video window to something smaller or equal to what
- * is requested by the user???
- */
- DBG("Requested width = %d, height = %d\n", vw->width, vw->height);
- if (vw->width != cam->vw.width || vw->height != cam->vw.height) {
- cam->vw.width = vw->width;
- cam->vw.height = vw->height;
- cam->params.roi.width = vw->width;
- cam->params.roi.height = vw->height;
- cpia2_set_format(cam);
- }
-
- for (frame = 0; frame < cam->num_frames; ++frame) {
- if (cam->buffers[frame].status == FRAME_READING)
- if ((err = sync(cam, frame)) < 0)
- return err;
-
- cam->buffers[frame].status = FRAME_EMPTY;
- }
-
- return 0;
-}
-
-/******************************************************************************
- *
* ioctl_get_mbuf
*
*****************************************************************************/
+#ifdef CONFIG_VIDEO_V4L1_COMPAT
static int ioctl_get_mbuf(void *arg, struct camera_data *cam)
{
struct video_mbuf *vm;
@@ -595,66 +437,7 @@ static int ioctl_get_mbuf(void *arg, struct camera_data *cam)
return 0;
}
-
-/******************************************************************************
- *
- * ioctl_mcapture
- *
- *****************************************************************************/
-static int ioctl_mcapture(void *arg, struct camera_data *cam,
- struct cpia2_fh *fh)
-{
- struct video_mmap *vm;
- int video_size, err;
- vm = arg;
-
- if (vm->frame < 0 || vm->frame >= cam->num_frames)
- return -EINVAL;
-
- /* set video size */
- video_size = cpia2_match_video_size(vm->width, vm->height);
- if (cam->video_size < 0) {
- return -EINVAL;
- }
-
- /* Ensure that only this process can change the format. */
- err = v4l2_prio_change(&cam->prio, &fh->prio, V4L2_PRIORITY_RECORD);
- if(err != 0)
- return err;
-
- if (video_size != cam->video_size) {
- cam->video_size = video_size;
- cam->params.roi.width = vm->width;
- cam->params.roi.height = vm->height;
- cpia2_set_format(cam);
- }
-
- if (cam->buffers[vm->frame].status == FRAME_READING)
- if ((err=sync(cam, vm->frame)) < 0)
- return err;
-
- cam->buffers[vm->frame].status = FRAME_EMPTY;
-
- return cpia2_usb_stream_start(cam,cam->params.camera_state.stream_mode);
-}
-
-/******************************************************************************
- *
- * ioctl_sync
- *
- *****************************************************************************/
-static int ioctl_sync(void *arg, struct camera_data *cam)
-{
- int frame;
-
- frame = *(int*)arg;
-
- if (frame < 0 || frame >= cam->num_frames)
- return -EINVAL;
-
- return sync(cam, frame);
-}
-
+#endif
/******************************************************************************
*
@@ -897,10 +680,10 @@ static int ioctl_set_fmt(void *arg,struct camera_data *cam, struct cpia2_fh *fh)
*/
DBG("Requested width = %d, height = %d\n",
f->fmt.pix.width, f->fmt.pix.height);
- if (f->fmt.pix.width != cam->vw.width ||
- f->fmt.pix.height != cam->vw.height) {
- cam->vw.width = f->fmt.pix.width;
- cam->vw.height = f->fmt.pix.height;
+ if (f->fmt.pix.width != cam->width ||
+ f->fmt.pix.height != cam->height) {
+ cam->width = f->fmt.pix.width;
+ cam->height = f->fmt.pix.height;
cam->params.roi.width = f->fmt.pix.width;
cam->params.roi.height = f->fmt.pix.height;
cpia2_set_format(cam);
@@ -932,8 +715,8 @@ static int ioctl_get_fmt(void *arg,struct camera_data *cam)
if (f->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
return -EINVAL;
- f->fmt.pix.width = cam->vw.width;
- f->fmt.pix.height = cam->vw.height;
+ f->fmt.pix.width = cam->width;
+ f->fmt.pix.height = cam->height;
f->fmt.pix.pixelformat = cam->pixelformat;
f->fmt.pix.field = V4L2_FIELD_NONE;
f->fmt.pix.bytesperline = 0;
@@ -962,12 +745,12 @@ static int ioctl_cropcap(void *arg,struct camera_data *cam)
c->bounds.left = 0;
c->bounds.top = 0;
- c->bounds.width = cam->vw.width;
- c->bounds.height = cam->vw.height;
+ c->bounds.width = cam->width;
+ c->bounds.height = cam->height;
c->defrect.left = 0;
c->defrect.top = 0;
- c->defrect.width = cam->vw.width;
- c->defrect.height = cam->vw.height;
+ c->defrect.width = cam->width;
+ c->defrect.height = cam->height;
c->pixelaspect.numerator = 1;
c->pixelaspect.denominator = 1;
@@ -1587,8 +1370,6 @@ static long cpia2_do_ioctl(struct file *file, unsigned int cmd, void *arg)
/* Priority check */
switch (cmd) {
- case VIDIOCSWIN:
- case VIDIOCMCAPTURE:
case VIDIOC_S_FMT:
{
struct cpia2_fh *fh = file->private_data;
@@ -1599,8 +1380,8 @@ static long cpia2_do_ioctl(struct file *file, unsigned int cmd, void *arg)
}
break;
}
+#ifdef CONFIG_VIDEO_V4L1_COMPAT
case VIDIOCGMBUF:
- case VIDIOCSYNC:
{
struct cpia2_fh *fh = file->private_data;
if(fh->prio != V4L2_PRIORITY_RECORD) {
@@ -1609,68 +1390,21 @@ static long cpia2_do_ioctl(struct file *file, unsigned int cmd, void *arg)
}
break;
}
+#endif
default:
break;
}
switch (cmd) {
- case VIDIOCGCAP: /* query capabilities */
- retval = ioctl_cap_query(arg, cam);
- break;
-
- case VIDIOCGCHAN: /* get video source - we are a camera, nothing else */
- retval = ioctl_get_channel(arg);
- break;
- case VIDIOCSCHAN: /* set video source - we are a camera, nothing else */
- retval = ioctl_set_channel(arg);
- break;
- case VIDIOCGPICT: /* image properties */
- memcpy(arg, &cam->vp, sizeof(struct video_picture));
- break;
- case VIDIOCSPICT:
- retval = ioctl_set_image_prop(arg, cam);
- break;
- case VIDIOCGWIN: /* get/set capture window */
- memcpy(arg, &cam->vw, sizeof(struct video_window));
- break;
- case VIDIOCSWIN:
- retval = ioctl_set_window_size(arg, cam, file->private_data);
- break;
- case VIDIOCGMBUF: /* mmap interface */
- retval = ioctl_get_mbuf(arg, cam);
- break;
- case VIDIOCMCAPTURE:
- retval = ioctl_mcapture(arg, cam, file->private_data);
- break;
- case VIDIOCSYNC:
- retval = ioctl_sync(arg, cam);
- break;
- /* pointless to implement overlay with this camera */
- case VIDIOCCAPTURE:
- case VIDIOCGFBUF:
- case VIDIOCSFBUF:
- case VIDIOCKEY:
- retval = -EINVAL;
- break;
-
- /* tuner interface - we have none */
- case VIDIOCGTUNER:
- case VIDIOCSTUNER:
- case VIDIOCGFREQ:
- case VIDIOCSFREQ:
- retval = -EINVAL;
- break;
-
- /* audio interface - we have none */
- case VIDIOCGAUDIO:
- case VIDIOCSAUDIO:
- retval = -EINVAL;
- break;
-
/* CPIA2 extension to Video4Linux API */
case CPIA2_IOC_SET_GPIO:
retval = ioctl_set_gpio(arg, cam);
break;
+#ifdef CONFIG_VIDEO_V4L1_COMPAT
+ case VIDIOCGMBUF: /* mmap interface */
+ retval = ioctl_get_mbuf(arg, cam);
+ break;
+#endif
case VIDIOC_QUERYCAP:
retval = ioctl_querycap(arg,cam);
break;
@@ -1874,21 +1608,8 @@ static int cpia2_mmap(struct file *file, struct vm_area_struct *area)
*****************************************************************************/
static void reset_camera_struct_v4l(struct camera_data *cam)
{
- /***
- * Fill in the v4l structures. video_cap is filled in inside the VIDIOCCAP
- * Ioctl. Here, just do the window and picture stucts.
- ***/
- cam->vp.palette = (u16) VIDEO_PALETTE_RGB24; /* Is this right? */
- cam->vp.brightness = (u16) cam->params.color_params.brightness * 256;
- cam->vp.colour = (u16) cam->params.color_params.saturation * 256;
- cam->vp.contrast = (u16) cam->params.color_params.contrast * 256;
-
- cam->vw.x = 0;
- cam->vw.y = 0;
- cam->vw.width = cam->params.roi.width;
- cam->vw.height = cam->params.roi.height;
- cam->vw.flags = 0;
- cam->vw.clipcount = 0;
+ cam->width = cam->params.roi.width;
+ cam->height = cam->params.roi.height;
cam->frame_size = buffer_size;
cam->num_frames = num_buffers;
@@ -1902,13 +1623,12 @@ static void reset_camera_struct_v4l(struct camera_data *cam)
cam->pixelformat = V4L2_PIX_FMT_JPEG;
v4l2_prio_init(&cam->prio);
- return;
}
/***
* The v4l video device structure initialized for this device
***/
-static const struct v4l2_file_operations fops_template = {
+static const struct v4l2_file_operations cpia2_fops = {
.owner = THIS_MODULE,
.open = cpia2_open,
.release = cpia2_close,
@@ -1920,9 +1640,9 @@ static const struct v4l2_file_operations fops_template = {
static struct video_device cpia2_template = {
/* I could not find any place for the old .initialize initializer?? */
- .name= "CPiA2 Camera",
- .fops= &fops_template,
- .release= video_device_release,
+ .name = "CPiA2 Camera",
+ .fops = &cpia2_fops,
+ .release = video_device_release,
};
/******************************************************************************
diff --git a/drivers/media/video/cpia2/cpia2dev.h b/drivers/media/video/cpia2/cpia2dev.h
index d58097ce0d5e..f66691fe5a35 100644
--- a/drivers/media/video/cpia2/cpia2dev.h
+++ b/drivers/media/video/cpia2/cpia2dev.h
@@ -29,14 +29,14 @@
#ifndef CPIA2_DEV_HEADER
#define CPIA2_DEV_HEADER
-#include <linux/videodev.h>
+#include <linux/videodev2.h>
/***
* The following defines are ioctl numbers based on video4linux private ioctls,
* which can range from 192 (BASE_VIDIOCPRIVATE) to 255. All of these take int
* args
*/
-#define CPIA2_IOC_SET_GPIO _IOW('v', BASE_VIDIOCPRIVATE + 17, __u32)
+#define CPIA2_IOC_SET_GPIO _IOW('v', BASE_VIDIOC_PRIVATE + 17, __u32)
/* V4L2 driver specific controls */
#define CPIA2_CID_TARGET_KB (V4L2_CID_PRIVATE_BASE+0)
diff --git a/drivers/media/video/cs5345.c b/drivers/media/video/cs5345.c
index 8362db509e2c..9358fe77e562 100644
--- a/drivers/media/video/cs5345.c
+++ b/drivers/media/video/cs5345.c
@@ -25,7 +25,6 @@
#include <linux/slab.h>
#include <media/v4l2-device.h>
#include <media/v4l2-chip-ident.h>
-#include <media/v4l2-i2c-drv.h>
MODULE_DESCRIPTION("i2c device driver for cs5345 Audio ADC");
MODULE_AUTHOR("Hans Verkuil");
@@ -209,9 +208,25 @@ static const struct i2c_device_id cs5345_id[] = {
};
MODULE_DEVICE_TABLE(i2c, cs5345_id);
-static struct v4l2_i2c_driver_data v4l2_i2c_data = {
- .name = "cs5345",
- .probe = cs5345_probe,
- .remove = cs5345_remove,
- .id_table = cs5345_id,
+static struct i2c_driver cs5345_driver = {
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = "cs5345",
+ },
+ .probe = cs5345_probe,
+ .remove = cs5345_remove,
+ .id_table = cs5345_id,
};
+
+static __init int init_cs5345(void)
+{
+ return i2c_add_driver(&cs5345_driver);
+}
+
+static __exit void exit_cs5345(void)
+{
+ i2c_del_driver(&cs5345_driver);
+}
+
+module_init(init_cs5345);
+module_exit(exit_cs5345);
diff --git a/drivers/media/video/cs53l32a.c b/drivers/media/video/cs53l32a.c
index cc9e84d75ea7..d93e5ab45fd3 100644
--- a/drivers/media/video/cs53l32a.c
+++ b/drivers/media/video/cs53l32a.c
@@ -30,7 +30,6 @@
#include <media/v4l2-device.h>
#include <media/v4l2-chip-ident.h>
#include <media/v4l2-ctrls.h>
-#include <media/v4l2-i2c-drv.h>
MODULE_DESCRIPTION("i2c device driver for cs53l32a Audio ADC");
MODULE_AUTHOR("Martin Vaughan");
@@ -239,9 +238,25 @@ static const struct i2c_device_id cs53l32a_id[] = {
};
MODULE_DEVICE_TABLE(i2c, cs53l32a_id);
-static struct v4l2_i2c_driver_data v4l2_i2c_data = {
- .name = "cs53l32a",
- .remove = cs53l32a_remove,
- .probe = cs53l32a_probe,
- .id_table = cs53l32a_id,
+static struct i2c_driver cs53l32a_driver = {
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = "cs53l32a",
+ },
+ .probe = cs53l32a_probe,
+ .remove = cs53l32a_remove,
+ .id_table = cs53l32a_id,
};
+
+static __init int init_cs53l32a(void)
+{
+ return i2c_add_driver(&cs53l32a_driver);
+}
+
+static __exit void exit_cs53l32a(void)
+{
+ i2c_del_driver(&cs53l32a_driver);
+}
+
+module_init(init_cs53l32a);
+module_exit(exit_cs53l32a);
diff --git a/drivers/media/video/cx18/cx18-driver.h b/drivers/media/video/cx18/cx18-driver.h
index 9bc51a99376b..77be58c1096b 100644
--- a/drivers/media/video/cx18/cx18-driver.h
+++ b/drivers/media/video/cx18/cx18-driver.h
@@ -674,18 +674,25 @@ static inline int cx18_raw_vbi(const struct cx18 *cx)
/* Call the specified callback for all subdevs with a grp_id bit matching the
* mask in hw (if 0, then match them all). Ignore any errors. */
-#define cx18_call_hw(cx, hw, o, f, args...) \
- __v4l2_device_call_subdevs(&(cx)->v4l2_dev, \
- !(hw) || (sd->grp_id & (hw)), o, f , ##args)
+#define cx18_call_hw(cx, hw, o, f, args...) \
+ do { \
+ struct v4l2_subdev *__sd; \
+ __v4l2_device_call_subdevs_p(&(cx)->v4l2_dev, __sd, \
+ !(hw) || (__sd->grp_id & (hw)), o, f , ##args); \
+ } while (0)
#define cx18_call_all(cx, o, f, args...) cx18_call_hw(cx, 0, o, f , ##args)
/* Call the specified callback for all subdevs with a grp_id bit matching the
* mask in hw (if 0, then match them all). If the callback returns an error
* other than 0 or -ENOIOCTLCMD, then return with that error code. */
-#define cx18_call_hw_err(cx, hw, o, f, args...) \
- __v4l2_device_call_subdevs_until_err( \
- &(cx)->v4l2_dev, !(hw) || (sd->grp_id & (hw)), o, f , ##args)
+#define cx18_call_hw_err(cx, hw, o, f, args...) \
+({ \
+ struct v4l2_subdev *__sd; \
+ __v4l2_device_call_subdevs_until_err_p(&(cx)->v4l2_dev, \
+ __sd, !(hw) || (__sd->grp_id & (hw)), o, f, \
+ ##args); \
+})
#define cx18_call_all_err(cx, o, f, args...) \
cx18_call_hw_err(cx, 0, o, f , ##args)
diff --git a/drivers/media/video/cx18/cx18-i2c.c b/drivers/media/video/cx18/cx18-i2c.c
index 73ce90c2f577..e71a026f3419 100644
--- a/drivers/media/video/cx18/cx18-i2c.c
+++ b/drivers/media/video/cx18/cx18-i2c.c
@@ -71,19 +71,6 @@ static const u8 hw_bus[] = {
};
/* This array should match the CX18_HW_ defines */
-static const char * const hw_modules[] = {
- "tuner", /* CX18_HW_TUNER */
- NULL, /* CX18_HW_TVEEPROM */
- "cs5345", /* CX18_HW_CS5345 */
- NULL, /* CX18_HW_DVB */
- NULL, /* CX18_HW_418_AV */
- NULL, /* CX18_HW_GPIO_MUX */
- NULL, /* CX18_HW_GPIO_RESET_CTRL */
- NULL, /* CX18_HW_Z8F0811_IR_TX_HAUP */
- NULL, /* CX18_HW_Z8F0811_IR_RX_HAUP */
-};
-
-/* This array should match the CX18_HW_ defines */
static const char * const hw_devicenames[] = {
"tuner",
"tveeprom",
@@ -126,7 +113,6 @@ int cx18_i2c_register(struct cx18 *cx, unsigned idx)
struct v4l2_subdev *sd;
int bus = hw_bus[idx];
struct i2c_adapter *adap = &cx->i2c_adap[bus];
- const char *mod = hw_modules[idx];
const char *type = hw_devicenames[idx];
u32 hw = 1 << idx;
@@ -136,15 +122,15 @@ int cx18_i2c_register(struct cx18 *cx, unsigned idx)
if (hw == CX18_HW_TUNER) {
/* special tuner group handling */
sd = v4l2_i2c_new_subdev(&cx->v4l2_dev,
- adap, mod, type, 0, cx->card_i2c->radio);
+ adap, type, 0, cx->card_i2c->radio);
if (sd != NULL)
sd->grp_id = hw;
sd = v4l2_i2c_new_subdev(&cx->v4l2_dev,
- adap, mod, type, 0, cx->card_i2c->demod);
+ adap, type, 0, cx->card_i2c->demod);
if (sd != NULL)
sd->grp_id = hw;
sd = v4l2_i2c_new_subdev(&cx->v4l2_dev,
- adap, mod, type, 0, cx->card_i2c->tv);
+ adap, type, 0, cx->card_i2c->tv);
if (sd != NULL)
sd->grp_id = hw;
return sd != NULL ? 0 : -1;
@@ -158,7 +144,8 @@ int cx18_i2c_register(struct cx18 *cx, unsigned idx)
return -1;
/* It's an I2C device other than an analog tuner or IR chip */
- sd = v4l2_i2c_new_subdev(&cx->v4l2_dev, adap, mod, type, hw_addrs[idx], NULL);
+ sd = v4l2_i2c_new_subdev(&cx->v4l2_dev, adap, type, hw_addrs[idx],
+ NULL);
if (sd != NULL)
sd->grp_id = hw;
return sd != NULL ? 0 : -1;
diff --git a/drivers/media/video/cx18/cx18-ioctl.c b/drivers/media/video/cx18/cx18-ioctl.c
index d6792405f8d3..7150195740dc 100644
--- a/drivers/media/video/cx18/cx18-ioctl.c
+++ b/drivers/media/video/cx18/cx18-ioctl.c
@@ -40,7 +40,6 @@
#include "cx18-av-core.h"
#include <media/tveeprom.h>
#include <media/v4l2-chip-ident.h>
-#include <linux/i2c-id.h>
u16 cx18_service2vbi(int type)
{
diff --git a/drivers/media/video/cx231xx/Kconfig b/drivers/media/video/cx231xx/Kconfig
index 5ac7eceececa..bb04914983fd 100644
--- a/drivers/media/video/cx231xx/Kconfig
+++ b/drivers/media/video/cx231xx/Kconfig
@@ -6,6 +6,7 @@ config VIDEO_CX231XX
depends on VIDEO_IR
select VIDEOBUF_VMALLOC
select VIDEO_CX25840
+ select VIDEO_CX2341X
---help---
This is a video4linux driver for Conexant 231xx USB based TV cards.
diff --git a/drivers/media/video/cx231xx/Makefile b/drivers/media/video/cx231xx/Makefile
index 6f2b57384488..a6bc4cc54677 100644
--- a/drivers/media/video/cx231xx/Makefile
+++ b/drivers/media/video/cx231xx/Makefile
@@ -1,5 +1,5 @@
cx231xx-objs := cx231xx-video.o cx231xx-i2c.o cx231xx-cards.o cx231xx-core.o \
- cx231xx-avcore.o cx231xx-pcb-cfg.o cx231xx-vbi.o
+ cx231xx-avcore.o cx231xx-417.o cx231xx-pcb-cfg.o cx231xx-vbi.o
cx231xx-alsa-objs := cx231xx-audio.o
diff --git a/drivers/media/video/cx231xx/cx231xx-417.c b/drivers/media/video/cx231xx/cx231xx-417.c
new file mode 100644
index 000000000000..4c7cac3b6254
--- /dev/null
+++ b/drivers/media/video/cx231xx/cx231xx-417.c
@@ -0,0 +1,2192 @@
+/*
+ *
+ * Support for a cx23417 mpeg encoder via cx231xx host port.
+ *
+ * (c) 2004 Jelle Foks <jelle@foks.us>
+ * (c) 2004 Gerd Knorr <kraxel@bytesex.org>
+ * (c) 2008 Steven Toth <stoth@linuxtv.org>
+ * - CX23885/7/8 support
+ *
+ * Includes parts from the ivtv driver( http://ivtv.sourceforge.net/),
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/init.h>
+#include <linux/fs.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/firmware.h>
+#include <linux/vmalloc.h>
+#include <media/v4l2-common.h>
+#include <media/v4l2-ioctl.h>
+#include <media/cx2341x.h>
+#include <linux/usb.h>
+
+#include "cx231xx.h"
+/*#include "cx23885-ioctl.h"*/
+
+#define CX231xx_FIRM_IMAGE_SIZE 376836
+#define CX231xx_FIRM_IMAGE_NAME "v4l-cx23885-enc.fw"
+
+/* for polaris ITVC */
+#define ITVC_WRITE_DIR 0x03FDFC00
+#define ITVC_READ_DIR 0x0001FC00
+
+#define MCI_MEMORY_DATA_BYTE0 0x00
+#define MCI_MEMORY_DATA_BYTE1 0x08
+#define MCI_MEMORY_DATA_BYTE2 0x10
+#define MCI_MEMORY_DATA_BYTE3 0x18
+
+#define MCI_MEMORY_ADDRESS_BYTE2 0x20
+#define MCI_MEMORY_ADDRESS_BYTE1 0x28
+#define MCI_MEMORY_ADDRESS_BYTE0 0x30
+
+#define MCI_REGISTER_DATA_BYTE0 0x40
+#define MCI_REGISTER_DATA_BYTE1 0x48
+#define MCI_REGISTER_DATA_BYTE2 0x50
+#define MCI_REGISTER_DATA_BYTE3 0x58
+
+#define MCI_REGISTER_ADDRESS_BYTE0 0x60
+#define MCI_REGISTER_ADDRESS_BYTE1 0x68
+
+#define MCI_REGISTER_MODE 0x70
+
+/* Read and write modes for polaris ITVC */
+#define MCI_MODE_REGISTER_READ 0x000
+#define MCI_MODE_REGISTER_WRITE 0x100
+#define MCI_MODE_MEMORY_READ 0x000
+#define MCI_MODE_MEMORY_WRITE 0x4000
+
+static unsigned int mpegbufs = 8;
+module_param(mpegbufs, int, 0644);
+MODULE_PARM_DESC(mpegbufs, "number of mpeg buffers, range 2-32");
+static unsigned int mpeglines = 128;
+module_param(mpeglines, int, 0644);
+MODULE_PARM_DESC(mpeglines, "number of lines in an MPEG buffer, range 2-32");
+static unsigned int mpeglinesize = 512;
+module_param(mpeglinesize, int, 0644);
+MODULE_PARM_DESC(mpeglinesize,
+ "number of bytes in each line of an MPEG buffer, range 512-1024");
+
+static unsigned int v4l_debug = 1;
+module_param(v4l_debug, int, 0644);
+MODULE_PARM_DESC(v4l_debug, "enable V4L debug messages");
+struct cx231xx_dmaqueue *dma_qq;
+#define dprintk(level, fmt, arg...)\
+ do { if (v4l_debug >= level) \
+ printk(KERN_INFO "%s: " fmt, \
+ (dev) ? dev->name : "cx231xx[?]", ## arg); \
+ } while (0)
+
+static struct cx231xx_tvnorm cx231xx_tvnorms[] = {
+ {
+ .name = "NTSC-M",
+ .id = V4L2_STD_NTSC_M,
+ }, {
+ .name = "NTSC-JP",
+ .id = V4L2_STD_NTSC_M_JP,
+ }, {
+ .name = "PAL-BG",
+ .id = V4L2_STD_PAL_BG,
+ }, {
+ .name = "PAL-DK",
+ .id = V4L2_STD_PAL_DK,
+ }, {
+ .name = "PAL-I",
+ .id = V4L2_STD_PAL_I,
+ }, {
+ .name = "PAL-M",
+ .id = V4L2_STD_PAL_M,
+ }, {
+ .name = "PAL-N",
+ .id = V4L2_STD_PAL_N,
+ }, {
+ .name = "PAL-Nc",
+ .id = V4L2_STD_PAL_Nc,
+ }, {
+ .name = "PAL-60",
+ .id = V4L2_STD_PAL_60,
+ }, {
+ .name = "SECAM-L",
+ .id = V4L2_STD_SECAM_L,
+ }, {
+ .name = "SECAM-DK",
+ .id = V4L2_STD_SECAM_DK,
+ }
+};
+
+/* ------------------------------------------------------------------ */
+enum cx231xx_capture_type {
+ CX231xx_MPEG_CAPTURE,
+ CX231xx_RAW_CAPTURE,
+ CX231xx_RAW_PASSTHRU_CAPTURE
+};
+enum cx231xx_capture_bits {
+ CX231xx_RAW_BITS_NONE = 0x00,
+ CX231xx_RAW_BITS_YUV_CAPTURE = 0x01,
+ CX231xx_RAW_BITS_PCM_CAPTURE = 0x02,
+ CX231xx_RAW_BITS_VBI_CAPTURE = 0x04,
+ CX231xx_RAW_BITS_PASSTHRU_CAPTURE = 0x08,
+ CX231xx_RAW_BITS_TO_HOST_CAPTURE = 0x10
+};
+enum cx231xx_capture_end {
+ CX231xx_END_AT_GOP, /* stop at the end of gop, generate irq */
+ CX231xx_END_NOW, /* stop immediately, no irq */
+};
+enum cx231xx_framerate {
+ CX231xx_FRAMERATE_NTSC_30, /* NTSC: 30fps */
+ CX231xx_FRAMERATE_PAL_25 /* PAL: 25fps */
+};
+enum cx231xx_stream_port {
+ CX231xx_OUTPUT_PORT_MEMORY,
+ CX231xx_OUTPUT_PORT_STREAMING,
+ CX231xx_OUTPUT_PORT_SERIAL
+};
+enum cx231xx_data_xfer_status {
+ CX231xx_MORE_BUFFERS_FOLLOW,
+ CX231xx_LAST_BUFFER,
+};
+enum cx231xx_picture_mask {
+ CX231xx_PICTURE_MASK_NONE,
+ CX231xx_PICTURE_MASK_I_FRAMES,
+ CX231xx_PICTURE_MASK_I_P_FRAMES = 0x3,
+ CX231xx_PICTURE_MASK_ALL_FRAMES = 0x7,
+};
+enum cx231xx_vbi_mode_bits {
+ CX231xx_VBI_BITS_SLICED,
+ CX231xx_VBI_BITS_RAW,
+};
+enum cx231xx_vbi_insertion_bits {
+ CX231xx_VBI_BITS_INSERT_IN_XTENSION_USR_DATA,
+ CX231xx_VBI_BITS_INSERT_IN_PRIVATE_PACKETS = 0x1 << 1,
+ CX231xx_VBI_BITS_SEPARATE_STREAM = 0x2 << 1,
+ CX231xx_VBI_BITS_SEPARATE_STREAM_USR_DATA = 0x4 << 1,
+ CX231xx_VBI_BITS_SEPARATE_STREAM_PRV_DATA = 0x5 << 1,
+};
+enum cx231xx_dma_unit {
+ CX231xx_DMA_BYTES,
+ CX231xx_DMA_FRAMES,
+};
+enum cx231xx_dma_transfer_status_bits {
+ CX231xx_DMA_TRANSFER_BITS_DONE = 0x01,
+ CX231xx_DMA_TRANSFER_BITS_ERROR = 0x04,
+ CX231xx_DMA_TRANSFER_BITS_LL_ERROR = 0x10,
+};
+enum cx231xx_pause {
+ CX231xx_PAUSE_ENCODING,
+ CX231xx_RESUME_ENCODING,
+};
+enum cx231xx_copyright {
+ CX231xx_COPYRIGHT_OFF,
+ CX231xx_COPYRIGHT_ON,
+};
+enum cx231xx_notification_type {
+ CX231xx_NOTIFICATION_REFRESH,
+};
+enum cx231xx_notification_status {
+ CX231xx_NOTIFICATION_OFF,
+ CX231xx_NOTIFICATION_ON,
+};
+enum cx231xx_notification_mailbox {
+ CX231xx_NOTIFICATION_NO_MAILBOX = -1,
+};
+enum cx231xx_field1_lines {
+ CX231xx_FIELD1_SAA7114 = 0x00EF, /* 239 */
+ CX231xx_FIELD1_SAA7115 = 0x00F0, /* 240 */
+ CX231xx_FIELD1_MICRONAS = 0x0105, /* 261 */
+};
+enum cx231xx_field2_lines {
+ CX231xx_FIELD2_SAA7114 = 0x00EF, /* 239 */
+ CX231xx_FIELD2_SAA7115 = 0x00F0, /* 240 */
+ CX231xx_FIELD2_MICRONAS = 0x0106, /* 262 */
+};
+enum cx231xx_custom_data_type {
+ CX231xx_CUSTOM_EXTENSION_USR_DATA,
+ CX231xx_CUSTOM_PRIVATE_PACKET,
+};
+enum cx231xx_mute {
+ CX231xx_UNMUTE,
+ CX231xx_MUTE,
+};
+enum cx231xx_mute_video_mask {
+ CX231xx_MUTE_VIDEO_V_MASK = 0x0000FF00,
+ CX231xx_MUTE_VIDEO_U_MASK = 0x00FF0000,
+ CX231xx_MUTE_VIDEO_Y_MASK = 0xFF000000,
+};
+enum cx231xx_mute_video_shift {
+ CX231xx_MUTE_VIDEO_V_SHIFT = 8,
+ CX231xx_MUTE_VIDEO_U_SHIFT = 16,
+ CX231xx_MUTE_VIDEO_Y_SHIFT = 24,
+};
+
+/* defines below are from ivtv-driver.h */
+#define IVTV_CMD_HW_BLOCKS_RST 0xFFFFFFFF
+
+/* Firmware API commands */
+#define IVTV_API_STD_TIMEOUT 500
+
+/* Registers */
+/* IVTV_REG_OFFSET */
+#define IVTV_REG_ENC_SDRAM_REFRESH (0x07F8)
+#define IVTV_REG_ENC_SDRAM_PRECHARGE (0x07FC)
+#define IVTV_REG_SPU (0x9050)
+#define IVTV_REG_HW_BLOCKS (0x9054)
+#define IVTV_REG_VPU (0x9058)
+#define IVTV_REG_APU (0xA064)
+
+/*
+ * Bit definitions for MC417_RWD and MC417_OEN registers
+ *
+ * bits 31-16
+ *+-----------+
+ *| Reserved |
+ *|+-----------+
+ *| bit 15 bit 14 bit 13 bit 12 bit 11 bit 10 bit 9 bit 8
+ *|+-------+-------+-------+-------+-------+-------+-------+-------+
+ *|| MIWR# | MIRD# | MICS# |MIRDY# |MIADDR3|MIADDR2|MIADDR1|MIADDR0|
+ *|+-------+-------+-------+-------+-------+-------+-------+-------+
+ *| bit 7 bit 6 bit 5 bit 4 bit 3 bit 2 bit 1 bit 0
+ *|+-------+-------+-------+-------+-------+-------+-------+-------+
+ *||MIDATA7|MIDATA6|MIDATA5|MIDATA4|MIDATA3|MIDATA2|MIDATA1|MIDATA0|
+ *|+-------+-------+-------+-------+-------+-------+-------+-------+
+ */
+#define MC417_MIWR 0x8000
+#define MC417_MIRD 0x4000
+#define MC417_MICS 0x2000
+#define MC417_MIRDY 0x1000
+#define MC417_MIADDR 0x0F00
+#define MC417_MIDATA 0x00FF
+
+
+/* Bit definitions for MC417_CTL register ****
+ *bits 31-6 bits 5-4 bit 3 bits 2-1 Bit 0
+ *+--------+-------------+--------+--------------+------------+
+ *|Reserved|MC417_SPD_CTL|Reserved|MC417_GPIO_SEL|UART_GPIO_EN|
+ *+--------+-------------+--------+--------------+------------+
+ */
+#define MC417_SPD_CTL(x) (((x) << 4) & 0x00000030)
+#define MC417_GPIO_SEL(x) (((x) << 1) & 0x00000006)
+#define MC417_UART_GPIO_EN 0x00000001
+
+/* Values for speed control */
+#define MC417_SPD_CTL_SLOW 0x1
+#define MC417_SPD_CTL_MEDIUM 0x0
+#define MC417_SPD_CTL_FAST 0x3 /* b'1x, but we use b'11 */
+
+/* Values for GPIO select */
+#define MC417_GPIO_SEL_GPIO3 0x3
+#define MC417_GPIO_SEL_GPIO2 0x2
+#define MC417_GPIO_SEL_GPIO1 0x1
+#define MC417_GPIO_SEL_GPIO0 0x0
+
+
+#define CX23417_GPIO_MASK 0xFC0003FF
+static int setITVCReg(struct cx231xx *dev, u32 gpio_direction, u32 value)
+{
+ int status = 0;
+ u32 _gpio_direction = 0;
+
+ _gpio_direction = _gpio_direction & CX23417_GPIO_MASK;
+ _gpio_direction = _gpio_direction|gpio_direction;
+ status = cx231xx_send_gpio_cmd(dev, _gpio_direction,
+ (u8 *)&value, 4, 0, 0);
+ return status;
+}
+static int getITVCReg(struct cx231xx *dev, u32 gpio_direction, u32 *pValue)
+{
+ int status = 0;
+ u32 _gpio_direction = 0;
+
+ _gpio_direction = _gpio_direction & CX23417_GPIO_MASK;
+ _gpio_direction = _gpio_direction|gpio_direction;
+
+ status = cx231xx_send_gpio_cmd(dev, _gpio_direction,
+ (u8 *)pValue, 4, 0, 1);
+ return status;
+}
+
+static int waitForMciComplete(struct cx231xx *dev)
+{
+ u32 gpio;
+ u32 gpio_driection = 0;
+ u8 count = 0;
+ getITVCReg(dev, gpio_driection, &gpio);
+
+ while (!(gpio&0x020000)) {
+ msleep(10);
+
+ getITVCReg(dev, gpio_driection, &gpio);
+
+ if (count++ > 100) {
+ dprintk(3, "ERROR: Timeout - gpio=%x\n", gpio);
+ return -1;
+ }
+ }
+ return 0;
+}
+
+static int mc417_register_write(struct cx231xx *dev, u16 address, u32 value)
+{
+ u32 temp;
+ int status = 0;
+
+ temp = 0x82|MCI_REGISTER_DATA_BYTE0|((value&0x000000FF)<<8);
+ temp = temp<<10;
+ status = setITVCReg(dev, ITVC_WRITE_DIR, temp);
+ if (status < 0)
+ return status;
+ temp = temp|((0x05)<<10);
+ setITVCReg(dev, ITVC_WRITE_DIR, temp);
+
+ /*write data byte 1;*/
+ temp = 0x82|MCI_REGISTER_DATA_BYTE1|(value&0x0000FF00);
+ temp = temp<<10;
+ setITVCReg(dev, ITVC_WRITE_DIR, temp);
+ temp = temp|((0x05)<<10);
+ setITVCReg(dev, ITVC_WRITE_DIR, temp);
+
+ /*write data byte 2;*/
+ temp = 0x82|MCI_REGISTER_DATA_BYTE2|((value&0x00FF0000)>>8);
+ temp = temp<<10;
+ setITVCReg(dev, ITVC_WRITE_DIR, temp);
+ temp = temp|((0x05)<<10);
+ setITVCReg(dev, ITVC_WRITE_DIR, temp);
+
+ /*write data byte 3;*/
+ temp = 0x82|MCI_REGISTER_DATA_BYTE3|((value&0xFF000000)>>16);
+ temp = temp<<10;
+ setITVCReg(dev, ITVC_WRITE_DIR, temp);
+ temp = temp|((0x05)<<10);
+ setITVCReg(dev, ITVC_WRITE_DIR, temp);
+
+ /*write address byte 0;*/
+ temp = 0x82|MCI_REGISTER_ADDRESS_BYTE0|((address&0x000000FF)<<8);
+ temp = temp<<10;
+ setITVCReg(dev, ITVC_WRITE_DIR, temp);
+ temp = temp|((0x05)<<10);
+ setITVCReg(dev, ITVC_WRITE_DIR, temp);
+
+ /*write address byte 1;*/
+ temp = 0x82|MCI_REGISTER_ADDRESS_BYTE1|(address&0x0000FF00);
+ temp = temp<<10;
+ setITVCReg(dev, ITVC_WRITE_DIR, temp);
+ temp = temp|((0x05)<<10);
+ setITVCReg(dev, ITVC_WRITE_DIR, temp);
+
+ /*Write that the mode is write.*/
+ temp = 0x82 | MCI_REGISTER_MODE | MCI_MODE_REGISTER_WRITE;
+ temp = temp<<10;
+ setITVCReg(dev, ITVC_WRITE_DIR, temp);
+ temp = temp|((0x05)<<10);
+ setITVCReg(dev, ITVC_WRITE_DIR, temp);
+
+ return waitForMciComplete(dev);
+}
+
+static int mc417_register_read(struct cx231xx *dev, u16 address, u32 *value)
+{
+ /*write address byte 0;*/
+ u32 temp;
+ u32 return_value = 0;
+ int ret = 0;
+
+ temp = 0x82 | MCI_REGISTER_ADDRESS_BYTE0 | ((address & 0x00FF) << 8);
+ temp = temp << 10;
+ setITVCReg(dev, ITVC_WRITE_DIR, temp);
+ temp = temp | ((0x05) << 10);
+ setITVCReg(dev, ITVC_WRITE_DIR, temp);
+
+ /*write address byte 1;*/
+ temp = 0x82 | MCI_REGISTER_ADDRESS_BYTE1 | (address & 0xFF00);
+ temp = temp << 10;
+ setITVCReg(dev, ITVC_WRITE_DIR, temp);
+ temp = temp | ((0x05) << 10);
+ setITVCReg(dev, ITVC_WRITE_DIR, temp);
+
+ /*write that the mode is read;*/
+ temp = 0x82 | MCI_REGISTER_MODE | MCI_MODE_REGISTER_READ;
+ temp = temp << 10;
+ setITVCReg(dev, ITVC_WRITE_DIR, temp);
+ temp = temp | ((0x05) << 10);
+ setITVCReg(dev, ITVC_WRITE_DIR, temp);
+
+ /*wait for the MIRDY line to be asserted ,
+ signalling that the read is done;*/
+ ret = waitForMciComplete(dev);
+
+ /*switch the DATA- GPIO to input mode;*/
+
+ /*Read data byte 0;*/
+ temp = (0x82 | MCI_REGISTER_DATA_BYTE0) << 10;
+ setITVCReg(dev, ITVC_READ_DIR, temp);
+ temp = ((0x81 | MCI_REGISTER_DATA_BYTE0) << 10);
+ setITVCReg(dev, ITVC_READ_DIR, temp);
+ getITVCReg(dev, ITVC_READ_DIR, &temp);
+ return_value |= ((temp & 0x03FC0000) >> 18);
+ setITVCReg(dev, ITVC_READ_DIR, (0x87 << 10));
+
+ /* Read data byte 1;*/
+ temp = (0x82 | MCI_REGISTER_DATA_BYTE1) << 10;
+ setITVCReg(dev, ITVC_READ_DIR, temp);
+ temp = ((0x81 | MCI_REGISTER_DATA_BYTE1) << 10);
+ setITVCReg(dev, ITVC_READ_DIR, temp);
+ getITVCReg(dev, ITVC_READ_DIR, &temp);
+
+ return_value |= ((temp & 0x03FC0000) >> 10);
+ setITVCReg(dev, ITVC_READ_DIR, (0x87 << 10));
+
+ /*Read data byte 2;*/
+ temp = (0x82 | MCI_REGISTER_DATA_BYTE2) << 10;
+ setITVCReg(dev, ITVC_READ_DIR, temp);
+ temp = ((0x81 | MCI_REGISTER_DATA_BYTE2) << 10);
+ setITVCReg(dev, ITVC_READ_DIR, temp);
+ getITVCReg(dev, ITVC_READ_DIR, &temp);
+ return_value |= ((temp & 0x03FC0000) >> 2);
+ setITVCReg(dev, ITVC_READ_DIR, (0x87 << 10));
+
+ /*Read data byte 3;*/
+ temp = (0x82 | MCI_REGISTER_DATA_BYTE3) << 10;
+ setITVCReg(dev, ITVC_READ_DIR, temp);
+ temp = ((0x81 | MCI_REGISTER_DATA_BYTE3) << 10);
+ setITVCReg(dev, ITVC_READ_DIR, temp);
+ getITVCReg(dev, ITVC_READ_DIR, &temp);
+ return_value |= ((temp & 0x03FC0000) << 6);
+ setITVCReg(dev, ITVC_READ_DIR, (0x87 << 10));
+
+ *value = return_value;
+
+
+ return ret;
+}
+
+static int mc417_memory_write(struct cx231xx *dev, u32 address, u32 value)
+{
+ /*write data byte 0;*/
+
+ u32 temp;
+ int ret = 0;
+
+ temp = 0x82 | MCI_MEMORY_DATA_BYTE0|((value & 0x000000FF) << 8);
+ temp = temp << 10;
+ ret = setITVCReg(dev, ITVC_WRITE_DIR, temp);
+ if (ret < 0)
+ return ret;
+ temp = temp | ((0x05) << 10);
+ setITVCReg(dev, ITVC_WRITE_DIR, temp);
+
+ /*write data byte 1;*/
+ temp = 0x82 | MCI_MEMORY_DATA_BYTE1 | (value & 0x0000FF00);
+ temp = temp << 10;
+ setITVCReg(dev, ITVC_WRITE_DIR, temp);
+ temp = temp | ((0x05) << 10);
+ setITVCReg(dev, ITVC_WRITE_DIR, temp);
+
+ /*write data byte 2;*/
+ temp = 0x82|MCI_MEMORY_DATA_BYTE2|((value&0x00FF0000)>>8);
+ temp = temp<<10;
+ setITVCReg(dev, ITVC_WRITE_DIR, temp);
+ temp = temp|((0x05)<<10);
+ setITVCReg(dev, ITVC_WRITE_DIR, temp);
+
+ /*write data byte 3;*/
+ temp = 0x82|MCI_MEMORY_DATA_BYTE3|((value&0xFF000000)>>16);
+ temp = temp<<10;
+ setITVCReg(dev, ITVC_WRITE_DIR, temp);
+ temp = temp|((0x05)<<10);
+ setITVCReg(dev, ITVC_WRITE_DIR, temp);
+
+ /* write address byte 2;*/
+ temp = 0x82|MCI_MEMORY_ADDRESS_BYTE2 | MCI_MODE_MEMORY_WRITE |
+ ((address & 0x003F0000)>>8);
+ temp = temp<<10;
+ setITVCReg(dev, ITVC_WRITE_DIR, temp);
+ temp = temp|((0x05)<<10);
+ setITVCReg(dev, ITVC_WRITE_DIR, temp);
+
+ /* write address byte 1;*/
+ temp = 0x82|MCI_MEMORY_ADDRESS_BYTE1 | (address & 0xFF00);
+ temp = temp<<10;
+ setITVCReg(dev, ITVC_WRITE_DIR, temp);
+ temp = temp|((0x05)<<10);
+ setITVCReg(dev, ITVC_WRITE_DIR, temp);
+
+ /* write address byte 0;*/
+ temp = 0x82|MCI_MEMORY_ADDRESS_BYTE0|((address & 0x00FF)<<8);
+ temp = temp<<10;
+ setITVCReg(dev, ITVC_WRITE_DIR, temp);
+ temp = temp|((0x05)<<10);
+ setITVCReg(dev, ITVC_WRITE_DIR, temp);
+
+ /*wait for MIRDY line;*/
+ waitForMciComplete(dev);
+
+ return 0;
+}
+
+static int mc417_memory_read(struct cx231xx *dev, u32 address, u32 *value)
+{
+ u32 temp = 0;
+ u32 return_value = 0;
+ int ret = 0;
+
+ /*write address byte 2;*/
+ temp = 0x82|MCI_MEMORY_ADDRESS_BYTE2 | MCI_MODE_MEMORY_READ |
+ ((address & 0x003F0000)>>8);
+ temp = temp<<10;
+ ret = setITVCReg(dev, ITVC_WRITE_DIR, temp);
+ if (ret < 0)
+ return ret;
+ temp = temp|((0x05)<<10);
+ setITVCReg(dev, ITVC_WRITE_DIR, temp);
+
+ /*write address byte 1*/
+ temp = 0x82|MCI_MEMORY_ADDRESS_BYTE1 | (address & 0xFF00);
+ temp = temp<<10;
+ setITVCReg(dev, ITVC_WRITE_DIR, temp);
+ temp = temp|((0x05)<<10);
+ setITVCReg(dev, ITVC_WRITE_DIR, temp);
+
+ /*write address byte 0*/
+ temp = 0x82|MCI_MEMORY_ADDRESS_BYTE0 | ((address & 0x00FF)<<8);
+ temp = temp<<10;
+ setITVCReg(dev, ITVC_WRITE_DIR, temp);
+ temp = temp|((0x05)<<10);
+ setITVCReg(dev, ITVC_WRITE_DIR, temp);
+
+ /*Wait for MIRDY line*/
+ ret = waitForMciComplete(dev);
+
+
+ /*Read data byte 3;*/
+ temp = (0x82|MCI_MEMORY_DATA_BYTE3)<<10;
+ setITVCReg(dev, ITVC_READ_DIR, temp);
+ temp = ((0x81|MCI_MEMORY_DATA_BYTE3)<<10);
+ setITVCReg(dev, ITVC_READ_DIR, temp);
+ getITVCReg(dev, ITVC_READ_DIR, &temp);
+ return_value |= ((temp&0x03FC0000)<<6);
+ setITVCReg(dev, ITVC_READ_DIR, (0x87<<10));
+
+ /*Read data byte 2;*/
+ temp = (0x82|MCI_MEMORY_DATA_BYTE2)<<10;
+ setITVCReg(dev, ITVC_READ_DIR, temp);
+ temp = ((0x81|MCI_MEMORY_DATA_BYTE2)<<10);
+ setITVCReg(dev, ITVC_READ_DIR, temp);
+ getITVCReg(dev, ITVC_READ_DIR, &temp);
+ return_value |= ((temp&0x03FC0000)>>2);
+ setITVCReg(dev, ITVC_READ_DIR, (0x87<<10));
+
+ /* Read data byte 1;*/
+ temp = (0x82|MCI_MEMORY_DATA_BYTE1)<<10;
+ setITVCReg(dev, ITVC_READ_DIR, temp);
+ temp = ((0x81|MCI_MEMORY_DATA_BYTE1)<<10);
+ setITVCReg(dev, ITVC_READ_DIR, temp);
+ getITVCReg(dev, ITVC_READ_DIR, &temp);
+ return_value |= ((temp&0x03FC0000)>>10);
+ setITVCReg(dev, ITVC_READ_DIR, (0x87<<10));
+
+ /*Read data byte 0;*/
+ temp = (0x82|MCI_MEMORY_DATA_BYTE0)<<10;
+ setITVCReg(dev, ITVC_READ_DIR, temp);
+ temp = ((0x81|MCI_MEMORY_DATA_BYTE0)<<10);
+ setITVCReg(dev, ITVC_READ_DIR, temp);
+ getITVCReg(dev, ITVC_READ_DIR, &temp);
+ return_value |= ((temp&0x03FC0000)>>18);
+ setITVCReg(dev, ITVC_READ_DIR, (0x87<<10));
+
+ *value = return_value;
+ return ret;
+}
+
+/* ------------------------------------------------------------------ */
+
+/* MPEG encoder API */
+static char *cmd_to_str(int cmd)
+{
+ switch (cmd) {
+ case CX2341X_ENC_PING_FW:
+ return "PING_FW";
+ case CX2341X_ENC_START_CAPTURE:
+ return "START_CAPTURE";
+ case CX2341X_ENC_STOP_CAPTURE:
+ return "STOP_CAPTURE";
+ case CX2341X_ENC_SET_AUDIO_ID:
+ return "SET_AUDIO_ID";
+ case CX2341X_ENC_SET_VIDEO_ID:
+ return "SET_VIDEO_ID";
+ case CX2341X_ENC_SET_PCR_ID:
+ return "SET_PCR_PID";
+ case CX2341X_ENC_SET_FRAME_RATE:
+ return "SET_FRAME_RATE";
+ case CX2341X_ENC_SET_FRAME_SIZE:
+ return "SET_FRAME_SIZE";
+ case CX2341X_ENC_SET_BIT_RATE:
+ return "SET_BIT_RATE";
+ case CX2341X_ENC_SET_GOP_PROPERTIES:
+ return "SET_GOP_PROPERTIES";
+ case CX2341X_ENC_SET_ASPECT_RATIO:
+ return "SET_ASPECT_RATIO";
+ case CX2341X_ENC_SET_DNR_FILTER_MODE:
+ return "SET_DNR_FILTER_PROPS";
+ case CX2341X_ENC_SET_DNR_FILTER_PROPS:
+ return "SET_DNR_FILTER_PROPS";
+ case CX2341X_ENC_SET_CORING_LEVELS:
+ return "SET_CORING_LEVELS";
+ case CX2341X_ENC_SET_SPATIAL_FILTER_TYPE:
+ return "SET_SPATIAL_FILTER_TYPE";
+ case CX2341X_ENC_SET_VBI_LINE:
+ return "SET_VBI_LINE";
+ case CX2341X_ENC_SET_STREAM_TYPE:
+ return "SET_STREAM_TYPE";
+ case CX2341X_ENC_SET_OUTPUT_PORT:
+ return "SET_OUTPUT_PORT";
+ case CX2341X_ENC_SET_AUDIO_PROPERTIES:
+ return "SET_AUDIO_PROPERTIES";
+ case CX2341X_ENC_HALT_FW:
+ return "HALT_FW";
+ case CX2341X_ENC_GET_VERSION:
+ return "GET_VERSION";
+ case CX2341X_ENC_SET_GOP_CLOSURE:
+ return "SET_GOP_CLOSURE";
+ case CX2341X_ENC_GET_SEQ_END:
+ return "GET_SEQ_END";
+ case CX2341X_ENC_SET_PGM_INDEX_INFO:
+ return "SET_PGM_INDEX_INFO";
+ case CX2341X_ENC_SET_VBI_CONFIG:
+ return "SET_VBI_CONFIG";
+ case CX2341X_ENC_SET_DMA_BLOCK_SIZE:
+ return "SET_DMA_BLOCK_SIZE";
+ case CX2341X_ENC_GET_PREV_DMA_INFO_MB_10:
+ return "GET_PREV_DMA_INFO_MB_10";
+ case CX2341X_ENC_GET_PREV_DMA_INFO_MB_9:
+ return "GET_PREV_DMA_INFO_MB_9";
+ case CX2341X_ENC_SCHED_DMA_TO_HOST:
+ return "SCHED_DMA_TO_HOST";
+ case CX2341X_ENC_INITIALIZE_INPUT:
+ return "INITIALIZE_INPUT";
+ case CX2341X_ENC_SET_FRAME_DROP_RATE:
+ return "SET_FRAME_DROP_RATE";
+ case CX2341X_ENC_PAUSE_ENCODER:
+ return "PAUSE_ENCODER";
+ case CX2341X_ENC_REFRESH_INPUT:
+ return "REFRESH_INPUT";
+ case CX2341X_ENC_SET_COPYRIGHT:
+ return "SET_COPYRIGHT";
+ case CX2341X_ENC_SET_EVENT_NOTIFICATION:
+ return "SET_EVENT_NOTIFICATION";
+ case CX2341X_ENC_SET_NUM_VSYNC_LINES:
+ return "SET_NUM_VSYNC_LINES";
+ case CX2341X_ENC_SET_PLACEHOLDER:
+ return "SET_PLACEHOLDER";
+ case CX2341X_ENC_MUTE_VIDEO:
+ return "MUTE_VIDEO";
+ case CX2341X_ENC_MUTE_AUDIO:
+ return "MUTE_AUDIO";
+ case CX2341X_ENC_MISC:
+ return "MISC";
+ default:
+ return "UNKNOWN";
+ }
+}
+
+static int cx231xx_mbox_func(void *priv,
+ u32 command,
+ int in,
+ int out,
+ u32 data[CX2341X_MBOX_MAX_DATA])
+{
+ struct cx231xx *dev = priv;
+ unsigned long timeout;
+ u32 value, flag, retval = 0;
+ int i;
+
+ dprintk(3, "%s: command(0x%X) = %s\n", __func__, command,
+ cmd_to_str(command));
+
+ /* this may not be 100% safe if we can't read any memory location
+ without side effects */
+ mc417_memory_read(dev, dev->cx23417_mailbox - 4, &value);
+ if (value != 0x12345678) {
+ dprintk(3,
+ "Firmware and/or mailbox pointer not initialized "
+ "or corrupted, signature = 0x%x, cmd = %s\n", value,
+ cmd_to_str(command));
+ return -1;
+ }
+
+ /* This read looks at 32 bits, but flag is only 8 bits.
+ * Seems we also bail if CMD or TIMEOUT bytes are set???
+ */
+ mc417_memory_read(dev, dev->cx23417_mailbox, &flag);
+ if (flag) {
+ dprintk(3, "ERROR: Mailbox appears to be in use "
+ "(%x), cmd = %s\n", flag, cmd_to_str(command));
+ return -1;
+ }
+
+ flag |= 1; /* tell 'em we're working on it */
+ mc417_memory_write(dev, dev->cx23417_mailbox, flag);
+
+ /* write command + args + fill remaining with zeros */
+ /* command code */
+ mc417_memory_write(dev, dev->cx23417_mailbox + 1, command);
+ mc417_memory_write(dev, dev->cx23417_mailbox + 3,
+ IVTV_API_STD_TIMEOUT); /* timeout */
+ for (i = 0; i < in; i++) {
+ mc417_memory_write(dev, dev->cx23417_mailbox + 4 + i, data[i]);
+ dprintk(3, "API Input %d = %d\n", i, data[i]);
+ }
+ for (; i < CX2341X_MBOX_MAX_DATA; i++)
+ mc417_memory_write(dev, dev->cx23417_mailbox + 4 + i, 0);
+
+ flag |= 3; /* tell 'em we're done writing */
+ mc417_memory_write(dev, dev->cx23417_mailbox, flag);
+
+ /* wait for firmware to handle the API command */
+ timeout = jiffies + msecs_to_jiffies(10);
+ for (;;) {
+ mc417_memory_read(dev, dev->cx23417_mailbox, &flag);
+ if (0 != (flag & 4))
+ break;
+ if (time_after(jiffies, timeout)) {
+ dprintk(3, "ERROR: API Mailbox timeout\n");
+ return -1;
+ }
+ udelay(10);
+ }
+
+ /* read output values */
+ for (i = 0; i < out; i++) {
+ mc417_memory_read(dev, dev->cx23417_mailbox + 4 + i, data + i);
+ dprintk(3, "API Output %d = %d\n", i, data[i]);
+ }
+
+ mc417_memory_read(dev, dev->cx23417_mailbox + 2, &retval);
+ dprintk(3, "API result = %d\n", retval);
+
+ flag = 0;
+ mc417_memory_write(dev, dev->cx23417_mailbox, flag);
+
+ return retval;
+}
+
+/* We don't need to call the API often, so using just one
+ * mailbox will probably suffice
+ */
+static int cx231xx_api_cmd(struct cx231xx *dev,
+ u32 command,
+ u32 inputcnt,
+ u32 outputcnt,
+ ...)
+{
+ u32 data[CX2341X_MBOX_MAX_DATA];
+ va_list vargs;
+ int i, err;
+
+ dprintk(3, "%s() cmds = 0x%08x\n", __func__, command);
+
+ va_start(vargs, outputcnt);
+ for (i = 0; i < inputcnt; i++)
+ data[i] = va_arg(vargs, int);
+
+ err = cx231xx_mbox_func(dev, command, inputcnt, outputcnt, data);
+ for (i = 0; i < outputcnt; i++) {
+ int *vptr = va_arg(vargs, int *);
+ *vptr = data[i];
+ }
+ va_end(vargs);
+
+ return err;
+}
+
+static int cx231xx_find_mailbox(struct cx231xx *dev)
+{
+ u32 signature[4] = {
+ 0x12345678, 0x34567812, 0x56781234, 0x78123456
+ };
+ int signaturecnt = 0;
+ u32 value;
+ int i;
+ int ret = 0;
+
+ dprintk(2, "%s()\n", __func__);
+
+ for (i = 0; i < 0x100; i++) {/*CX231xx_FIRM_IMAGE_SIZE*/
+ ret = mc417_memory_read(dev, i, &value);
+ if (ret < 0)
+ return ret;
+ if (value == signature[signaturecnt])
+ signaturecnt++;
+ else
+ signaturecnt = 0;
+ if (4 == signaturecnt) {
+ dprintk(1, "Mailbox signature found at 0x%x\n", i+1);
+ return i+1;
+ }
+ }
+ dprintk(3, "Mailbox signature values not found!\n");
+ return -1;
+}
+
+static void mciWriteMemoryToGPIO(struct cx231xx *dev, u32 address, u32 value,
+ u32 *p_fw_image)
+{
+
+ u32 temp = 0;
+ int i = 0;
+
+ temp = 0x82|MCI_MEMORY_DATA_BYTE0|((value&0x000000FF)<<8);
+ temp = temp<<10;
+ *p_fw_image = temp;
+ p_fw_image++;
+ temp = temp|((0x05)<<10);
+ *p_fw_image = temp;
+ p_fw_image++;
+
+ /*write data byte 1;*/
+ temp = 0x82|MCI_MEMORY_DATA_BYTE1|(value&0x0000FF00);
+ temp = temp<<10;
+ *p_fw_image = temp;
+ p_fw_image++;
+ temp = temp|((0x05)<<10);
+ *p_fw_image = temp;
+ p_fw_image++;
+
+ /*write data byte 2;*/
+ temp = 0x82|MCI_MEMORY_DATA_BYTE2|((value&0x00FF0000)>>8);
+ temp = temp<<10;
+ *p_fw_image = temp;
+ p_fw_image++;
+ temp = temp|((0x05)<<10);
+ *p_fw_image = temp;
+ p_fw_image++;
+
+ /*write data byte 3;*/
+ temp = 0x82|MCI_MEMORY_DATA_BYTE3|((value&0xFF000000)>>16);
+ temp = temp<<10;
+ *p_fw_image = temp;
+ p_fw_image++;
+ temp = temp|((0x05)<<10);
+ *p_fw_image = temp;
+ p_fw_image++;
+
+ /* write address byte 2;*/
+ temp = 0x82|MCI_MEMORY_ADDRESS_BYTE2 | MCI_MODE_MEMORY_WRITE |
+ ((address & 0x003F0000)>>8);
+ temp = temp<<10;
+ *p_fw_image = temp;
+ p_fw_image++;
+ temp = temp|((0x05)<<10);
+ *p_fw_image = temp;
+ p_fw_image++;
+
+ /* write address byte 1;*/
+ temp = 0x82|MCI_MEMORY_ADDRESS_BYTE1 | (address & 0xFF00);
+ temp = temp<<10;
+ *p_fw_image = temp;
+ p_fw_image++;
+ temp = temp|((0x05)<<10);
+ *p_fw_image = temp;
+ p_fw_image++;
+
+ /* write address byte 0;*/
+ temp = 0x82|MCI_MEMORY_ADDRESS_BYTE0|((address & 0x00FF)<<8);
+ temp = temp<<10;
+ *p_fw_image = temp;
+ p_fw_image++;
+ temp = temp|((0x05)<<10);
+ *p_fw_image = temp;
+ p_fw_image++;
+
+ for (i = 0; i < 6; i++) {
+ *p_fw_image = 0xFFFFFFFF;
+ p_fw_image++;
+ }
+}
+
+
+static int cx231xx_load_firmware(struct cx231xx *dev)
+{
+ static const unsigned char magic[8] = {
+ 0xa7, 0x0d, 0x00, 0x00, 0x66, 0xbb, 0x55, 0xaa
+ };
+ const struct firmware *firmware;
+ int i, retval = 0;
+ u32 value = 0;
+ u32 gpio_output = 0;
+ /*u32 checksum = 0;*/
+ /*u32 *dataptr;*/
+ u32 transfer_size = 0;
+ u32 fw_data = 0;
+ u32 address = 0;
+ /*u32 current_fw[800];*/
+ u32 *p_current_fw, *p_fw;
+ u32 *p_fw_data;
+ int frame = 0;
+ u16 _buffer_size = 4096;
+ u8 *p_buffer;
+
+ p_current_fw = (u32 *)vmalloc(1884180*4);
+ p_fw = p_current_fw;
+ if (p_current_fw == 0) {
+ dprintk(2, "FAIL!!!\n");
+ return -1;
+ }
+
+ p_buffer = (u8 *)vmalloc(4096);
+ if (p_buffer == 0) {
+ dprintk(2, "FAIL!!!\n");
+ return -1;
+ }
+
+ dprintk(2, "%s()\n", __func__);
+
+ /* Save GPIO settings before reset of APU */
+ retval |= mc417_memory_read(dev, 0x9020, &gpio_output);
+ retval |= mc417_memory_read(dev, 0x900C, &value);
+
+ retval = mc417_register_write(dev,
+ IVTV_REG_VPU, 0xFFFFFFED);
+ retval |= mc417_register_write(dev,
+ IVTV_REG_HW_BLOCKS, IVTV_CMD_HW_BLOCKS_RST);
+ retval |= mc417_register_write(dev,
+ IVTV_REG_ENC_SDRAM_REFRESH, 0x80000800);
+ retval |= mc417_register_write(dev,
+ IVTV_REG_ENC_SDRAM_PRECHARGE, 0x1A);
+ retval |= mc417_register_write(dev,
+ IVTV_REG_APU, 0);
+
+ if (retval != 0) {
+ printk(KERN_ERR "%s: Error with mc417_register_write\n",
+ __func__);
+ return -1;
+ }
+
+ retval = request_firmware(&firmware, CX231xx_FIRM_IMAGE_NAME,
+ &dev->udev->dev);
+
+ if (retval != 0) {
+ printk(KERN_ERR
+ "ERROR: Hotplug firmware request failed (%s).\n",
+ CX231xx_FIRM_IMAGE_NAME);
+ printk(KERN_ERR "Please fix your hotplug setup, the board will "
+ "not work without firmware loaded!\n");
+ return -1;
+ }
+
+ if (firmware->size != CX231xx_FIRM_IMAGE_SIZE) {
+ printk(KERN_ERR "ERROR: Firmware size mismatch "
+ "(have %zd, expected %d)\n",
+ firmware->size, CX231xx_FIRM_IMAGE_SIZE);
+ release_firmware(firmware);
+ return -1;
+ }
+
+ if (0 != memcmp(firmware->data, magic, 8)) {
+ printk(KERN_ERR
+ "ERROR: Firmware magic mismatch, wrong file?\n");
+ release_firmware(firmware);
+ return -1;
+ }
+
+ initGPIO(dev);
+
+ /* transfer to the chip */
+ dprintk(2, "Loading firmware to GPIO...\n");
+ p_fw_data = (u32 *)firmware->data;
+ dprintk(2, "firmware->size=%zd\n", firmware->size);
+ for (transfer_size = 0; transfer_size < firmware->size;
+ transfer_size += 4) {
+ fw_data = *p_fw_data;
+
+ mciWriteMemoryToGPIO(dev, address, fw_data, p_current_fw);
+ address = address + 1;
+ p_current_fw += 20;
+ p_fw_data += 1;
+ }
+
+ /*download the firmware by ep5-out*/
+
+ for (frame = 0; frame < (int)(CX231xx_FIRM_IMAGE_SIZE*20/_buffer_size);
+ frame++) {
+ for (i = 0; i < _buffer_size; i++) {
+ *(p_buffer + i) = (u8)(*(p_fw + (frame * 128 * 8 + (i / 4))) & 0x000000FF);
+ i++;
+ *(p_buffer + i) = (u8)((*(p_fw + (frame * 128 * 8 + (i / 4))) & 0x0000FF00) >> 8);
+ i++;
+ *(p_buffer + i) = (u8)((*(p_fw + (frame * 128 * 8 + (i / 4))) & 0x00FF0000) >> 16);
+ i++;
+ *(p_buffer + i) = (u8)((*(p_fw + (frame * 128 * 8 + (i / 4))) & 0xFF000000) >> 24);
+ }
+ cx231xx_ep5_bulkout(dev, p_buffer, _buffer_size);
+ }
+
+ p_current_fw = p_fw;
+ vfree(p_current_fw);
+ p_current_fw = NULL;
+ uninitGPIO(dev);
+ release_firmware(firmware);
+ dprintk(1, "Firmware upload successful.\n");
+
+ retval |= mc417_register_write(dev, IVTV_REG_HW_BLOCKS,
+ IVTV_CMD_HW_BLOCKS_RST);
+ if (retval < 0) {
+ printk(KERN_ERR "%s: Error with mc417_register_write\n",
+ __func__);
+ return retval;
+ }
+ /* F/W power up disturbs the GPIOs, restore state */
+ retval |= mc417_register_write(dev, 0x9020, gpio_output);
+ retval |= mc417_register_write(dev, 0x900C, value);
+
+ retval |= mc417_register_read(dev, IVTV_REG_VPU, &value);
+ retval |= mc417_register_write(dev, IVTV_REG_VPU, value & 0xFFFFFFE8);
+
+ if (retval < 0) {
+ printk(KERN_ERR "%s: Error with mc417_register_write\n",
+ __func__);
+ return retval;
+ }
+ return 0;
+}
+
+static void cx231xx_417_check_encoder(struct cx231xx *dev)
+{
+ u32 status, seq;
+
+ status = 0;
+ seq = 0;
+ cx231xx_api_cmd(dev, CX2341X_ENC_GET_SEQ_END, 0, 2, &status, &seq);
+ dprintk(1, "%s() status = %d, seq = %d\n", __func__, status, seq);
+}
+
+static void cx231xx_codec_settings(struct cx231xx *dev)
+{
+ dprintk(1, "%s()\n", __func__);
+
+ /* assign frame size */
+ cx231xx_api_cmd(dev, CX2341X_ENC_SET_FRAME_SIZE, 2, 0,
+ dev->ts1.height, dev->ts1.width);
+
+ dev->mpeg_params.width = dev->ts1.width;
+ dev->mpeg_params.height = dev->ts1.height;
+
+ cx2341x_update(dev, cx231xx_mbox_func, NULL, &dev->mpeg_params);
+
+ cx231xx_api_cmd(dev, CX2341X_ENC_MISC, 2, 0, 3, 1);
+ cx231xx_api_cmd(dev, CX2341X_ENC_MISC, 2, 0, 4, 1);
+}
+
+static int cx231xx_initialize_codec(struct cx231xx *dev)
+{
+ int version;
+ int retval;
+ u32 i, data[7];
+ u32 val = 0;
+
+ dprintk(1, "%s()\n", __func__);
+ cx231xx_disable656(dev);
+ retval = cx231xx_api_cmd(dev, CX2341X_ENC_PING_FW, 0, 0); /* ping */
+ if (retval < 0) {
+ dprintk(2, "%s() PING OK\n", __func__);
+ retval = cx231xx_load_firmware(dev);
+ if (retval < 0) {
+ printk(KERN_ERR "%s() f/w load failed\n", __func__);
+ return retval;
+ }
+ retval = cx231xx_find_mailbox(dev);
+ if (retval < 0) {
+ printk(KERN_ERR "%s() mailbox < 0, error\n",
+ __func__);
+ return -1;
+ }
+ dev->cx23417_mailbox = retval;
+ retval = cx231xx_api_cmd(dev, CX2341X_ENC_PING_FW, 0, 0);
+ if (retval < 0) {
+ printk(KERN_ERR
+ "ERROR: cx23417 firmware ping failed!\n");
+ return -1;
+ }
+ retval = cx231xx_api_cmd(dev, CX2341X_ENC_GET_VERSION, 0, 1,
+ &version);
+ if (retval < 0) {
+ printk(KERN_ERR "ERROR: cx23417 firmware get encoder :"
+ "version failed!\n");
+ return -1;
+ }
+ dprintk(1, "cx23417 firmware version is 0x%08x\n", version);
+ msleep(200);
+ }
+
+ for (i = 0; i < 1; i++) {
+ retval = mc417_register_read(dev, 0x20f8, &val);
+ dprintk(3, "***before enable656() VIM Capture Lines =%d ***\n",
+ val);
+ if (retval < 0)
+ return retval;
+ }
+
+ cx231xx_enable656(dev);
+ /* stop mpeg capture */
+ cx231xx_api_cmd(dev, CX2341X_ENC_STOP_CAPTURE,
+ 3, 0, 1, 3, 4);
+
+ cx231xx_codec_settings(dev);
+ msleep(60);
+
+/* cx231xx_api_cmd(dev, CX2341X_ENC_SET_NUM_VSYNC_LINES, 2, 0,
+ CX231xx_FIELD1_SAA7115, CX231xx_FIELD2_SAA7115);
+ cx231xx_api_cmd(dev, CX2341X_ENC_SET_PLACEHOLDER, 12, 0,
+ CX231xx_CUSTOM_EXTENSION_USR_DATA, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0);
+*/
+ /* Setup to capture VBI */
+ data[0] = 0x0001BD00;
+ data[1] = 1; /* frames per interrupt */
+ data[2] = 4; /* total bufs */
+ data[3] = 0x91559155; /* start codes */
+ data[4] = 0x206080C0; /* stop codes */
+ data[5] = 6; /* lines */
+ data[6] = 64; /* BPL */
+/*
+ cx231xx_api_cmd(dev, CX2341X_ENC_SET_VBI_CONFIG, 7, 0, data[0], data[1],
+ data[2], data[3], data[4], data[5], data[6]);
+
+ for (i = 2; i <= 24; i++) {
+ int valid;
+
+ valid = ((i >= 19) && (i <= 21));
+ cx231xx_api_cmd(dev, CX2341X_ENC_SET_VBI_LINE, 5, 0, i,
+ valid, 0 , 0, 0);
+ cx231xx_api_cmd(dev, CX2341X_ENC_SET_VBI_LINE, 5, 0,
+ i | 0x80000000, valid, 0, 0, 0);
+ }
+*/
+/* cx231xx_api_cmd(dev, CX2341X_ENC_MUTE_AUDIO, 1, 0, CX231xx_UNMUTE);
+ msleep(60);
+*/
+ /* initialize the video input */
+ retval = cx231xx_api_cmd(dev, CX2341X_ENC_INITIALIZE_INPUT, 0, 0);
+ if (retval < 0)
+ return retval;
+ msleep(60);
+
+ /* Enable VIP style pixel invalidation so we work with scaled mode */
+ mc417_memory_write(dev, 2120, 0x00000080);
+
+ /* start capturing to the host interface */
+ retval = cx231xx_api_cmd(dev, CX2341X_ENC_START_CAPTURE, 2, 0,
+ CX231xx_MPEG_CAPTURE, CX231xx_RAW_BITS_NONE);
+ if (retval < 0)
+ return retval;
+ msleep(10);
+
+ for (i = 0; i < 1; i++) {
+ mc417_register_read(dev, 0x20f8, &val);
+ dprintk(3, "***VIM Capture Lines =%d ***\n", val);
+ }
+
+ return 0;
+}
+
+/* ------------------------------------------------------------------ */
+
+static int bb_buf_setup(struct videobuf_queue *q,
+ unsigned int *count, unsigned int *size)
+{
+ struct cx231xx_fh *fh = q->priv_data;
+
+ fh->dev->ts1.ts_packet_size = mpeglinesize;
+ fh->dev->ts1.ts_packet_count = mpeglines;
+
+ *size = fh->dev->ts1.ts_packet_size * fh->dev->ts1.ts_packet_count;
+ *count = mpegbufs;
+
+ return 0;
+}
+static void free_buffer(struct videobuf_queue *vq, struct cx231xx_buffer *buf)
+{
+ struct cx231xx_fh *fh = vq->priv_data;
+ struct cx231xx *dev = fh->dev;
+ unsigned long flags = 0;
+
+ if (in_interrupt())
+ BUG();
+
+ spin_lock_irqsave(&dev->video_mode.slock, flags);
+ if (dev->USE_ISO) {
+ if (dev->video_mode.isoc_ctl.buf == buf)
+ dev->video_mode.isoc_ctl.buf = NULL;
+ } else {
+ if (dev->video_mode.bulk_ctl.buf == buf)
+ dev->video_mode.bulk_ctl.buf = NULL;
+ }
+ spin_unlock_irqrestore(&dev->video_mode.slock, flags);
+ videobuf_waiton(vq, &buf->vb, 0, 0);
+ videobuf_vmalloc_free(&buf->vb);
+ buf->vb.state = VIDEOBUF_NEEDS_INIT;
+}
+
+static void buffer_copy(struct cx231xx *dev, char *data, int len, struct urb *urb,
+ struct cx231xx_dmaqueue *dma_q)
+{
+ void *vbuf;
+ struct cx231xx_buffer *buf;
+ u32 tail_data = 0;
+ char *p_data;
+
+ if (dma_q->mpeg_buffer_done == 0) {
+ if (list_empty(&dma_q->active))
+ return;
+
+ buf = list_entry(dma_q->active.next,
+ struct cx231xx_buffer, vb.queue);
+ dev->video_mode.isoc_ctl.buf = buf;
+ dma_q->mpeg_buffer_done = 1;
+ }
+ /* Fill buffer */
+ buf = dev->video_mode.isoc_ctl.buf;
+ vbuf = videobuf_to_vmalloc(&buf->vb);
+
+ if ((dma_q->mpeg_buffer_completed+len) <
+ mpeglines*mpeglinesize) {
+ if (dma_q->add_ps_package_head ==
+ CX231XX_NEED_ADD_PS_PACKAGE_HEAD) {
+ memcpy(vbuf+dma_q->mpeg_buffer_completed,
+ dma_q->ps_head, 3);
+ dma_q->mpeg_buffer_completed =
+ dma_q->mpeg_buffer_completed + 3;
+ dma_q->add_ps_package_head =
+ CX231XX_NONEED_PS_PACKAGE_HEAD;
+ }
+ memcpy(vbuf+dma_q->mpeg_buffer_completed, data, len);
+ dma_q->mpeg_buffer_completed =
+ dma_q->mpeg_buffer_completed + len;
+ } else {
+ dma_q->mpeg_buffer_done = 0;
+
+ tail_data =
+ mpeglines*mpeglinesize - dma_q->mpeg_buffer_completed;
+ memcpy(vbuf+dma_q->mpeg_buffer_completed,
+ data, tail_data);
+
+ buf->vb.state = VIDEOBUF_DONE;
+ buf->vb.field_count++;
+ do_gettimeofday(&buf->vb.ts);
+ list_del(&buf->vb.queue);
+ wake_up(&buf->vb.done);
+ dma_q->mpeg_buffer_completed = 0;
+
+ if (len - tail_data > 0) {
+ p_data = data + tail_data;
+ dma_q->left_data_count = len - tail_data;
+ memcpy(dma_q->p_left_data,
+ p_data, len - tail_data);
+ }
+
+ }
+
+ return;
+}
+
+static void buffer_filled(char *data, int len, struct urb *urb,
+ struct cx231xx_dmaqueue *dma_q)
+{
+ void *vbuf;
+ struct cx231xx_buffer *buf;
+
+ if (list_empty(&dma_q->active))
+ return;
+
+
+ buf = list_entry(dma_q->active.next,
+ struct cx231xx_buffer, vb.queue);
+
+
+ /* Fill buffer */
+ vbuf = videobuf_to_vmalloc(&buf->vb);
+ memcpy(vbuf, data, len);
+ buf->vb.state = VIDEOBUF_DONE;
+ buf->vb.field_count++;
+ do_gettimeofday(&buf->vb.ts);
+ list_del(&buf->vb.queue);
+ wake_up(&buf->vb.done);
+
+ return;
+}
+static inline int cx231xx_isoc_copy(struct cx231xx *dev, struct urb *urb)
+{
+ struct cx231xx_dmaqueue *dma_q = urb->context;
+ unsigned char *p_buffer;
+ u32 buffer_size = 0;
+ u32 i = 0;
+
+ for (i = 0; i < urb->number_of_packets; i++) {
+ if (dma_q->left_data_count > 0) {
+ buffer_copy(dev, dma_q->p_left_data,
+ dma_q->left_data_count, urb, dma_q);
+ dma_q->mpeg_buffer_completed = dma_q->left_data_count;
+ dma_q->left_data_count = 0;
+ }
+
+ p_buffer = urb->transfer_buffer +
+ urb->iso_frame_desc[i].offset;
+ buffer_size = urb->iso_frame_desc[i].actual_length;
+
+ if (buffer_size > 0)
+ buffer_copy(dev, p_buffer, buffer_size, urb, dma_q);
+ }
+
+ return 0;
+}
+static inline int cx231xx_bulk_copy(struct cx231xx *dev, struct urb *urb)
+{
+
+ /*char *outp;*/
+ /*struct cx231xx_buffer *buf;*/
+ struct cx231xx_dmaqueue *dma_q = urb->context;
+ unsigned char *p_buffer, *buffer;
+ u32 buffer_size = 0;
+
+ p_buffer = urb->transfer_buffer;
+ buffer_size = urb->actual_length;
+
+ buffer = kmalloc(buffer_size, GFP_ATOMIC);
+
+ memcpy(buffer, dma_q->ps_head, 3);
+ memcpy(buffer+3, p_buffer, buffer_size-3);
+ memcpy(dma_q->ps_head, p_buffer+buffer_size-3, 3);
+
+ p_buffer = buffer;
+ buffer_filled(p_buffer, buffer_size, urb, dma_q);
+
+ kfree(buffer);
+ return 0;
+}
+
+static int bb_buf_prepare(struct videobuf_queue *q,
+ struct videobuf_buffer *vb, enum v4l2_field field)
+{
+ struct cx231xx_fh *fh = q->priv_data;
+ struct cx231xx_buffer *buf =
+ container_of(vb, struct cx231xx_buffer, vb);
+ struct cx231xx *dev = fh->dev;
+ int rc = 0, urb_init = 0;
+ int size = fh->dev->ts1.ts_packet_size * fh->dev->ts1.ts_packet_count;
+
+ dma_qq = &dev->video_mode.vidq;
+
+ if (0 != buf->vb.baddr && buf->vb.bsize < size)
+ return -EINVAL;
+ buf->vb.width = fh->dev->ts1.ts_packet_size;
+ buf->vb.height = fh->dev->ts1.ts_packet_count;
+ buf->vb.size = size;
+ buf->vb.field = field;
+
+ if (VIDEOBUF_NEEDS_INIT == buf->vb.state) {
+ rc = videobuf_iolock(q, &buf->vb, NULL);
+ if (rc < 0)
+ goto fail;
+ }
+
+ if (dev->USE_ISO) {
+ if (!dev->video_mode.isoc_ctl.num_bufs)
+ urb_init = 1;
+ } else {
+ if (!dev->video_mode.bulk_ctl.num_bufs)
+ urb_init = 1;
+ }
+ /*cx231xx_info("urb_init=%d dev->video_mode.max_pkt_size=%d\n",
+ urb_init, dev->video_mode.max_pkt_size);*/
+ dev->mode_tv = 1;
+
+ if (urb_init) {
+ rc = cx231xx_set_mode(dev, CX231XX_DIGITAL_MODE);
+ rc = cx231xx_unmute_audio(dev);
+ if (dev->USE_ISO) {
+ cx231xx_set_alt_setting(dev, INDEX_TS1, 4);
+ rc = cx231xx_init_isoc(dev, mpeglines,
+ mpegbufs,
+ dev->ts1_mode.max_pkt_size,
+ cx231xx_isoc_copy);
+ } else {
+ cx231xx_set_alt_setting(dev, INDEX_TS1, 0);
+ rc = cx231xx_init_bulk(dev, mpeglines,
+ mpegbufs,
+ dev->ts1_mode.max_pkt_size,
+ cx231xx_bulk_copy);
+ }
+ if (rc < 0)
+ goto fail;
+ }
+
+ buf->vb.state = VIDEOBUF_PREPARED;
+ return 0;
+
+fail:
+ free_buffer(q, buf);
+ return rc;
+}
+
+static void bb_buf_queue(struct videobuf_queue *q,
+ struct videobuf_buffer *vb)
+{
+ struct cx231xx_fh *fh = q->priv_data;
+
+ struct cx231xx_buffer *buf =
+ container_of(vb, struct cx231xx_buffer, vb);
+ struct cx231xx *dev = fh->dev;
+ struct cx231xx_dmaqueue *vidq = &dev->video_mode.vidq;
+
+ buf->vb.state = VIDEOBUF_QUEUED;
+ list_add_tail(&buf->vb.queue, &vidq->active);
+
+}
+
+static void bb_buf_release(struct videobuf_queue *q,
+ struct videobuf_buffer *vb)
+{
+ struct cx231xx_buffer *buf =
+ container_of(vb, struct cx231xx_buffer, vb);
+ /*struct cx231xx_fh *fh = q->priv_data;*/
+ /*struct cx231xx *dev = (struct cx231xx *)fh->dev;*/
+
+ free_buffer(q, buf);
+}
+
+static struct videobuf_queue_ops cx231xx_qops = {
+ .buf_setup = bb_buf_setup,
+ .buf_prepare = bb_buf_prepare,
+ .buf_queue = bb_buf_queue,
+ .buf_release = bb_buf_release,
+};
+
+/* ------------------------------------------------------------------ */
+
+static const u32 *ctrl_classes[] = {
+ cx2341x_mpeg_ctrls,
+ NULL
+};
+
+static int cx231xx_queryctrl(struct cx231xx *dev,
+ struct v4l2_queryctrl *qctrl)
+{
+ qctrl->id = v4l2_ctrl_next(ctrl_classes, qctrl->id);
+ if (qctrl->id == 0)
+ return -EINVAL;
+
+ /* MPEG V4L2 controls */
+ if (cx2341x_ctrl_query(&dev->mpeg_params, qctrl))
+ qctrl->flags |= V4L2_CTRL_FLAG_DISABLED;
+
+ return 0;
+}
+
+static int cx231xx_querymenu(struct cx231xx *dev,
+ struct v4l2_querymenu *qmenu)
+{
+ struct v4l2_queryctrl qctrl;
+
+ qctrl.id = qmenu->id;
+ cx231xx_queryctrl(dev, &qctrl);
+ return v4l2_ctrl_query_menu(qmenu, &qctrl,
+ cx2341x_ctrl_get_menu(&dev->mpeg_params, qmenu->id));
+}
+
+static int vidioc_g_std(struct file *file, void *fh0, v4l2_std_id *norm)
+{
+ struct cx231xx_fh *fh = file->private_data;
+ struct cx231xx *dev = fh->dev;
+
+ *norm = dev->encodernorm.id;
+ return 0;
+}
+static int vidioc_s_std(struct file *file, void *priv, v4l2_std_id *id)
+{
+ struct cx231xx_fh *fh = file->private_data;
+ struct cx231xx *dev = fh->dev;
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(cx231xx_tvnorms); i++)
+ if (*id & cx231xx_tvnorms[i].id)
+ break;
+ if (i == ARRAY_SIZE(cx231xx_tvnorms))
+ return -EINVAL;
+ dev->encodernorm = cx231xx_tvnorms[i];
+
+ if (dev->encodernorm.id & 0xb000) {
+ dprintk(3, "encodernorm set to NTSC\n");
+ dev->norm = V4L2_STD_NTSC;
+ dev->ts1.height = 480;
+ dev->mpeg_params.is_50hz = 0;
+ } else {
+ dprintk(3, "encodernorm set to PAL\n");
+ dev->norm = V4L2_STD_PAL_B;
+ dev->ts1.height = 576;
+ dev->mpeg_params.is_50hz = 1;
+ }
+ call_all(dev, core, s_std, dev->norm);
+ /* do mode control overrides */
+ cx231xx_do_mode_ctrl_overrides(dev);
+
+ dprintk(3, "exit vidioc_s_std() i=0x%x\n", i);
+ return 0;
+}
+static int vidioc_g_audio(struct file *file, void *fh,
+ struct v4l2_audio *a)
+{
+ struct v4l2_audio *vin = a;
+
+ int ret = -EINVAL;
+ if (vin->index > 0)
+ return ret;
+ strncpy(vin->name, "VideoGrabber Audio", 14);
+ vin->capability = V4L2_AUDCAP_STEREO;
+return 0;
+}
+static int vidioc_enumaudio(struct file *file, void *fh,
+ struct v4l2_audio *a)
+{
+ struct v4l2_audio *vin = a;
+
+ int ret = -EINVAL;
+
+ if (vin->index > 0)
+ return ret;
+ strncpy(vin->name, "VideoGrabber Audio", 14);
+ vin->capability = V4L2_AUDCAP_STEREO;
+
+
+return 0;
+}
+static const char *iname[] = {
+ [CX231XX_VMUX_COMPOSITE1] = "Composite1",
+ [CX231XX_VMUX_SVIDEO] = "S-Video",
+ [CX231XX_VMUX_TELEVISION] = "Television",
+ [CX231XX_VMUX_CABLE] = "Cable TV",
+ [CX231XX_VMUX_DVB] = "DVB",
+ [CX231XX_VMUX_DEBUG] = "for debug only",
+};
+static int vidioc_enum_input(struct file *file, void *priv,
+ struct v4l2_input *i)
+{
+ struct cx231xx_fh *fh = file->private_data;
+ struct cx231xx *dev = fh->dev;
+ struct cx231xx_input *input;
+ int n;
+ dprintk(3, "enter vidioc_enum_input()i->index=%d\n", i->index);
+
+ if (i->index >= 4)
+ return -EINVAL;
+
+
+ input = &cx231xx_boards[dev->model].input[i->index];
+
+ if (input->type == 0)
+ return -EINVAL;
+
+ /* FIXME
+ * strcpy(i->name, input->name); */
+
+ n = i->index;
+ strcpy(i->name, iname[INPUT(n)->type]);
+
+ if (input->type == CX231XX_VMUX_TELEVISION ||
+ input->type == CX231XX_VMUX_CABLE)
+ i->type = V4L2_INPUT_TYPE_TUNER;
+ else
+ i->type = V4L2_INPUT_TYPE_CAMERA;
+
+
+ return 0;
+}
+
+static int vidioc_g_input(struct file *file, void *priv, unsigned int *i)
+{
+ *i = 0;
+ return 0;
+}
+
+static int vidioc_s_input(struct file *file, void *priv, unsigned int i)
+{
+ struct cx231xx_fh *fh = file->private_data;
+ struct cx231xx *dev = fh->dev;
+
+ dprintk(3, "enter vidioc_s_input() i=%d\n", i);
+
+ mutex_lock(&dev->lock);
+
+ video_mux(dev, i);
+
+ mutex_unlock(&dev->lock);
+
+ if (i >= 4)
+ return -EINVAL;
+ dev->input = i;
+ dprintk(3, "exit vidioc_s_input()\n");
+ return 0;
+}
+
+static int vidioc_g_tuner(struct file *file, void *priv,
+ struct v4l2_tuner *t)
+{
+ return 0;
+}
+
+static int vidioc_s_tuner(struct file *file, void *priv,
+ struct v4l2_tuner *t)
+{
+ return 0;
+}
+
+static int vidioc_g_frequency(struct file *file, void *priv,
+ struct v4l2_frequency *f)
+{
+ return 0;
+}
+
+static int vidioc_s_frequency(struct file *file, void *priv,
+ struct v4l2_frequency *f)
+{
+
+
+ return 0;
+}
+
+static int vidioc_s_ctrl(struct file *file, void *priv,
+ struct v4l2_control *ctl)
+{
+ struct cx231xx_fh *fh = file->private_data;
+ struct cx231xx *dev = fh->dev;
+ dprintk(3, "enter vidioc_s_ctrl()\n");
+ /* Update the A/V core */
+ call_all(dev, core, s_ctrl, ctl);
+ dprintk(3, "exit vidioc_s_ctrl()\n");
+ return 0;
+}
+static struct v4l2_capability pvr_capability = {
+ .driver = "cx231xx",
+ .card = "VideoGrabber",
+ .bus_info = "usb",
+ .version = 1,
+ .capabilities = (V4L2_CAP_VIDEO_CAPTURE |
+ V4L2_CAP_TUNER | V4L2_CAP_AUDIO | V4L2_CAP_RADIO |
+ V4L2_CAP_STREAMING | V4L2_CAP_READWRITE),
+ .reserved = {0, 0, 0, 0}
+};
+static int vidioc_querycap(struct file *file, void *priv,
+ struct v4l2_capability *cap)
+{
+
+
+
+ memcpy(cap, &pvr_capability, sizeof(struct v4l2_capability));
+ return 0;
+}
+
+static int vidioc_enum_fmt_vid_cap(struct file *file, void *priv,
+ struct v4l2_fmtdesc *f)
+{
+
+ if (f->index != 0)
+ return -EINVAL;
+
+ strlcpy(f->description, "MPEG", sizeof(f->description));
+ f->pixelformat = V4L2_PIX_FMT_MPEG;
+
+ return 0;
+}
+
+static int vidioc_g_fmt_vid_cap(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct cx231xx_fh *fh = file->private_data;
+ struct cx231xx *dev = fh->dev;
+ dprintk(3, "enter vidioc_g_fmt_vid_cap()\n");
+ f->fmt.pix.pixelformat = V4L2_PIX_FMT_MPEG;
+ f->fmt.pix.bytesperline = 0;
+ f->fmt.pix.sizeimage =
+ dev->ts1.ts_packet_size * dev->ts1.ts_packet_count;
+ f->fmt.pix.colorspace = 0;
+ f->fmt.pix.width = dev->ts1.width;
+ f->fmt.pix.height = dev->ts1.height;
+ f->fmt.pix.field = fh->vidq.field;
+ dprintk(1, "VIDIOC_G_FMT: w: %d, h: %d, f: %d\n",
+ dev->ts1.width, dev->ts1.height, fh->vidq.field);
+ dprintk(3, "exit vidioc_g_fmt_vid_cap()\n");
+ return 0;
+}
+
+static int vidioc_try_fmt_vid_cap(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct cx231xx_fh *fh = file->private_data;
+ struct cx231xx *dev = fh->dev;
+ dprintk(3, "enter vidioc_try_fmt_vid_cap()\n");
+ f->fmt.pix.pixelformat = V4L2_PIX_FMT_MPEG;
+ f->fmt.pix.bytesperline = 0;
+ f->fmt.pix.sizeimage =
+ dev->ts1.ts_packet_size * dev->ts1.ts_packet_count;
+ f->fmt.pix.colorspace = 0;
+ dprintk(1, "VIDIOC_TRY_FMT: w: %d, h: %d, f: %d\n",
+ dev->ts1.width, dev->ts1.height, fh->vidq.field);
+ dprintk(3, "exit vidioc_try_fmt_vid_cap()\n");
+ return 0;
+}
+
+static int vidioc_s_fmt_vid_cap(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+
+ return 0;
+}
+
+static int vidioc_reqbufs(struct file *file, void *priv,
+ struct v4l2_requestbuffers *p)
+{
+ struct cx231xx_fh *fh = file->private_data;
+
+ return videobuf_reqbufs(&fh->vidq, p);
+}
+
+static int vidioc_querybuf(struct file *file, void *priv,
+ struct v4l2_buffer *p)
+{
+ struct cx231xx_fh *fh = file->private_data;
+
+ return videobuf_querybuf(&fh->vidq, p);
+}
+
+static int vidioc_qbuf(struct file *file, void *priv,
+ struct v4l2_buffer *p)
+{
+ struct cx231xx_fh *fh = file->private_data;
+
+ return videobuf_qbuf(&fh->vidq, p);
+}
+
+static int vidioc_dqbuf(struct file *file, void *priv, struct v4l2_buffer *b)
+{
+ struct cx231xx_fh *fh = priv;
+
+ return videobuf_dqbuf(&fh->vidq, b, file->f_flags & O_NONBLOCK);
+}
+
+
+static int vidioc_streamon(struct file *file, void *priv,
+ enum v4l2_buf_type i)
+{
+ struct cx231xx_fh *fh = file->private_data;
+
+ struct cx231xx *dev = fh->dev;
+ int rc = 0;
+ dprintk(3, "enter vidioc_streamon()\n");
+ cx231xx_set_alt_setting(dev, INDEX_TS1, 0);
+ rc = cx231xx_set_mode(dev, CX231XX_DIGITAL_MODE);
+ if (dev->USE_ISO)
+ rc = cx231xx_init_isoc(dev, CX231XX_NUM_PACKETS,
+ CX231XX_NUM_BUFS,
+ dev->video_mode.max_pkt_size,
+ cx231xx_isoc_copy);
+ else {
+ rc = cx231xx_init_bulk(dev, 320,
+ 5,
+ dev->ts1_mode.max_pkt_size,
+ cx231xx_bulk_copy);
+ }
+ dprintk(3, "exit vidioc_streamon()\n");
+ return videobuf_streamon(&fh->vidq);
+}
+
+static int vidioc_streamoff(struct file *file, void *priv, enum v4l2_buf_type i)
+{
+ struct cx231xx_fh *fh = file->private_data;
+
+ return videobuf_streamoff(&fh->vidq);
+}
+
+static int vidioc_g_ext_ctrls(struct file *file, void *priv,
+ struct v4l2_ext_controls *f)
+{
+ struct cx231xx_fh *fh = priv;
+ struct cx231xx *dev = fh->dev;
+ dprintk(3, "enter vidioc_g_ext_ctrls()\n");
+ if (f->ctrl_class != V4L2_CTRL_CLASS_MPEG)
+ return -EINVAL;
+ dprintk(3, "exit vidioc_g_ext_ctrls()\n");
+ return cx2341x_ext_ctrls(&dev->mpeg_params, 0, f, VIDIOC_G_EXT_CTRLS);
+}
+
+static int vidioc_s_ext_ctrls(struct file *file, void *priv,
+ struct v4l2_ext_controls *f)
+{
+ struct cx231xx_fh *fh = priv;
+ struct cx231xx *dev = fh->dev;
+ struct cx2341x_mpeg_params p;
+ int err;
+ dprintk(3, "enter vidioc_s_ext_ctrls()\n");
+ if (f->ctrl_class != V4L2_CTRL_CLASS_MPEG)
+ return -EINVAL;
+
+ p = dev->mpeg_params;
+ err = cx2341x_ext_ctrls(&p, 0, f, VIDIOC_TRY_EXT_CTRLS);
+ if (err == 0) {
+ err = cx2341x_update(dev, cx231xx_mbox_func,
+ &dev->mpeg_params, &p);
+ dev->mpeg_params = p;
+ }
+
+ return err;
+
+
+return 0;
+}
+
+static int vidioc_try_ext_ctrls(struct file *file, void *priv,
+ struct v4l2_ext_controls *f)
+{
+ struct cx231xx_fh *fh = priv;
+ struct cx231xx *dev = fh->dev;
+ struct cx2341x_mpeg_params p;
+ int err;
+ dprintk(3, "enter vidioc_try_ext_ctrls()\n");
+ if (f->ctrl_class != V4L2_CTRL_CLASS_MPEG)
+ return -EINVAL;
+
+ p = dev->mpeg_params;
+ err = cx2341x_ext_ctrls(&p, 0, f, VIDIOC_TRY_EXT_CTRLS);
+ dprintk(3, "exit vidioc_try_ext_ctrls() err=%d\n", err);
+ return err;
+}
+
+static int vidioc_log_status(struct file *file, void *priv)
+{
+ struct cx231xx_fh *fh = priv;
+ struct cx231xx *dev = fh->dev;
+ char name[32 + 2];
+
+ snprintf(name, sizeof(name), "%s/2", dev->name);
+ dprintk(3,
+ "%s/2: ============ START LOG STATUS ============\n",
+ dev->name);
+ call_all(dev, core, log_status);
+ cx2341x_log_status(&dev->mpeg_params, name);
+ dprintk(3,
+ "%s/2: ============= END LOG STATUS =============\n",
+ dev->name);
+ return 0;
+}
+
+static int vidioc_querymenu(struct file *file, void *priv,
+ struct v4l2_querymenu *a)
+{
+ struct cx231xx_fh *fh = priv;
+ struct cx231xx *dev = fh->dev;
+ dprintk(3, "enter vidioc_querymenu()\n");
+ dprintk(3, "exit vidioc_querymenu()\n");
+ return cx231xx_querymenu(dev, a);
+}
+
+static int vidioc_queryctrl(struct file *file, void *priv,
+ struct v4l2_queryctrl *c)
+{
+ struct cx231xx_fh *fh = priv;
+ struct cx231xx *dev = fh->dev;
+ dprintk(3, "enter vidioc_queryctrl()\n");
+ dprintk(3, "exit vidioc_queryctrl()\n");
+ return cx231xx_queryctrl(dev, c);
+}
+
+static int mpeg_open(struct file *file)
+{
+ int minor = video_devdata(file)->minor;
+ struct cx231xx *h, *dev = NULL;
+ /*struct list_head *list;*/
+ struct cx231xx_fh *fh;
+ /*u32 value = 0;*/
+
+ dprintk(2, "%s()\n", __func__);
+
+ list_for_each_entry(h, &cx231xx_devlist, devlist) {
+ if (h->v4l_device->minor == minor)
+ dev = h;
+ }
+
+ if (dev == NULL)
+ return -ENODEV;
+
+ mutex_lock(&dev->lock);
+
+ /* allocate + initialize per filehandle data */
+ fh = kzalloc(sizeof(*fh), GFP_KERNEL);
+ if (NULL == fh) {
+ mutex_unlock(&dev->lock);
+ return -ENOMEM;
+ }
+
+ file->private_data = fh;
+ fh->dev = dev;
+
+
+ videobuf_queue_vmalloc_init(&fh->vidq, &cx231xx_qops,
+ NULL, &dev->video_mode.slock,
+ V4L2_BUF_TYPE_VIDEO_CAPTURE, V4L2_FIELD_INTERLACED,
+ sizeof(struct cx231xx_buffer), fh, NULL);
+/*
+ videobuf_queue_sg_init(&fh->vidq, &cx231xx_qops,
+ &dev->udev->dev, &dev->ts1.slock,
+ V4L2_BUF_TYPE_VIDEO_CAPTURE,
+ V4L2_FIELD_INTERLACED,
+ sizeof(struct cx231xx_buffer),
+ fh, NULL);
+*/
+
+
+ cx231xx_set_alt_setting(dev, INDEX_VANC, 1);
+ cx231xx_set_gpio_value(dev, 2, 0);
+
+ cx231xx_initialize_codec(dev);
+
+ mutex_unlock(&dev->lock);
+ cx231xx_start_TS1(dev);
+
+ return 0;
+}
+
+static int mpeg_release(struct file *file)
+{
+ struct cx231xx_fh *fh = file->private_data;
+ struct cx231xx *dev = fh->dev;
+
+ dprintk(3, "mpeg_release()! dev=0x%p\n", dev);
+
+ if (!dev) {
+ dprintk(3, "abort!!!\n");
+ return 0;
+ }
+
+ mutex_lock(&dev->lock);
+
+ cx231xx_stop_TS1(dev);
+
+ /* do this before setting alternate! */
+ if (dev->USE_ISO)
+ cx231xx_uninit_isoc(dev);
+ else
+ cx231xx_uninit_bulk(dev);
+ cx231xx_set_mode(dev, CX231XX_SUSPEND);
+
+ cx231xx_api_cmd(fh->dev, CX2341X_ENC_STOP_CAPTURE, 3, 0,
+ CX231xx_END_NOW, CX231xx_MPEG_CAPTURE,
+ CX231xx_RAW_BITS_NONE);
+
+ /* FIXME: Review this crap */
+ /* Shut device down on last close */
+ if (atomic_cmpxchg(&fh->v4l_reading, 1, 0) == 1) {
+ if (atomic_dec_return(&dev->v4l_reader_count) == 0) {
+ /* stop mpeg capture */
+
+ msleep(500);
+ cx231xx_417_check_encoder(dev);
+
+ }
+ }
+
+ if (fh->vidq.streaming)
+ videobuf_streamoff(&fh->vidq);
+ if (fh->vidq.reading)
+ videobuf_read_stop(&fh->vidq);
+
+ videobuf_mmap_free(&fh->vidq);
+ file->private_data = NULL;
+ kfree(fh);
+ mutex_unlock(&dev->lock);
+ return 0;
+}
+
+static ssize_t mpeg_read(struct file *file, char __user *data,
+ size_t count, loff_t *ppos)
+{
+ struct cx231xx_fh *fh = file->private_data;
+ struct cx231xx *dev = fh->dev;
+
+
+ /* Deal w/ A/V decoder * and mpeg encoder sync issues. */
+ /* Start mpeg encoder on first read. */
+ if (atomic_cmpxchg(&fh->v4l_reading, 0, 1) == 0) {
+ if (atomic_inc_return(&dev->v4l_reader_count) == 1) {
+ if (cx231xx_initialize_codec(dev) < 0)
+ return -EINVAL;
+ }
+ }
+
+ return videobuf_read_stream(&fh->vidq, data, count, ppos, 0,
+ file->f_flags & O_NONBLOCK);
+}
+
+static unsigned int mpeg_poll(struct file *file,
+ struct poll_table_struct *wait)
+{
+ struct cx231xx_fh *fh = file->private_data;
+ /*struct cx231xx *dev = fh->dev;*/
+
+ /*dprintk(2, "%s\n", __func__);*/
+
+ return videobuf_poll_stream(file, &fh->vidq, wait);
+}
+
+static int mpeg_mmap(struct file *file, struct vm_area_struct *vma)
+{
+ struct cx231xx_fh *fh = file->private_data;
+ struct cx231xx *dev = fh->dev;
+
+ dprintk(2, "%s()\n", __func__);
+
+ return videobuf_mmap_mapper(&fh->vidq, vma);
+}
+
+static struct v4l2_file_operations mpeg_fops = {
+ .owner = THIS_MODULE,
+ .open = mpeg_open,
+ .release = mpeg_release,
+ .read = mpeg_read,
+ .poll = mpeg_poll,
+ .mmap = mpeg_mmap,
+ .ioctl = video_ioctl2,
+};
+
+static const struct v4l2_ioctl_ops mpeg_ioctl_ops = {
+ .vidioc_s_std = vidioc_s_std,
+ .vidioc_g_std = vidioc_g_std,
+ .vidioc_enum_input = vidioc_enum_input,
+ .vidioc_enumaudio = vidioc_enumaudio,
+ .vidioc_g_audio = vidioc_g_audio,
+ .vidioc_g_input = vidioc_g_input,
+ .vidioc_s_input = vidioc_s_input,
+ .vidioc_g_tuner = vidioc_g_tuner,
+ .vidioc_s_tuner = vidioc_s_tuner,
+ .vidioc_g_frequency = vidioc_g_frequency,
+ .vidioc_s_frequency = vidioc_s_frequency,
+ .vidioc_s_ctrl = vidioc_s_ctrl,
+ .vidioc_querycap = vidioc_querycap,
+ .vidioc_enum_fmt_vid_cap = vidioc_enum_fmt_vid_cap,
+ .vidioc_g_fmt_vid_cap = vidioc_g_fmt_vid_cap,
+ .vidioc_try_fmt_vid_cap = vidioc_try_fmt_vid_cap,
+ .vidioc_s_fmt_vid_cap = vidioc_s_fmt_vid_cap,
+ .vidioc_reqbufs = vidioc_reqbufs,
+ .vidioc_querybuf = vidioc_querybuf,
+ .vidioc_qbuf = vidioc_qbuf,
+ .vidioc_dqbuf = vidioc_dqbuf,
+ .vidioc_streamon = vidioc_streamon,
+ .vidioc_streamoff = vidioc_streamoff,
+ .vidioc_g_ext_ctrls = vidioc_g_ext_ctrls,
+ .vidioc_s_ext_ctrls = vidioc_s_ext_ctrls,
+ .vidioc_try_ext_ctrls = vidioc_try_ext_ctrls,
+ .vidioc_log_status = vidioc_log_status,
+ .vidioc_querymenu = vidioc_querymenu,
+ .vidioc_queryctrl = vidioc_queryctrl,
+/* .vidioc_g_chip_ident = cx231xx_g_chip_ident,*/
+#ifdef CONFIG_VIDEO_ADV_DEBUG
+/* .vidioc_g_register = cx231xx_g_register,*/
+/* .vidioc_s_register = cx231xx_s_register,*/
+#endif
+};
+
+static struct video_device cx231xx_mpeg_template = {
+ .name = "cx231xx",
+ .fops = &mpeg_fops,
+ .ioctl_ops = &mpeg_ioctl_ops,
+ .minor = -1,
+ .tvnorms = CX231xx_NORMS,
+ .current_norm = V4L2_STD_NTSC_M,
+};
+
+void cx231xx_417_unregister(struct cx231xx *dev)
+{
+ dprintk(1, "%s()\n", __func__);
+ dprintk(3, "%s()\n", __func__);
+
+ if (dev->v4l_device) {
+ if (-1 != dev->v4l_device->minor)
+ video_unregister_device(dev->v4l_device);
+ else
+ video_device_release(dev->v4l_device);
+ dev->v4l_device = NULL;
+ }
+}
+
+static struct video_device *cx231xx_video_dev_alloc(
+ struct cx231xx *dev,
+ struct usb_device *usbdev,
+ struct video_device *template,
+ char *type)
+{
+ struct video_device *vfd;
+
+ dprintk(1, "%s()\n", __func__);
+ vfd = video_device_alloc();
+ if (NULL == vfd)
+ return NULL;
+ *vfd = *template;
+ vfd->minor = -1;
+ snprintf(vfd->name, sizeof(vfd->name), "%s %s (%s)", dev->name,
+ type, cx231xx_boards[dev->model].name);
+
+ vfd->v4l2_dev = &dev->v4l2_dev;
+ vfd->release = video_device_release;
+
+ return vfd;
+
+}
+
+int cx231xx_417_register(struct cx231xx *dev)
+{
+ /* FIXME: Port1 hardcoded here */
+ int err = -ENODEV;
+ struct cx231xx_tsport *tsport = &dev->ts1;
+
+ dprintk(1, "%s()\n", __func__);
+
+ /* Set default TV standard */
+ dev->encodernorm = cx231xx_tvnorms[0];
+
+ if (dev->encodernorm.id & V4L2_STD_525_60)
+ tsport->height = 480;
+ else
+ tsport->height = 576;
+
+ tsport->width = 720;
+ cx2341x_fill_defaults(&dev->mpeg_params);
+ dev->norm = V4L2_STD_NTSC;
+
+ dev->mpeg_params.port = CX2341X_PORT_SERIAL;
+
+ /* Allocate and initialize V4L video device */
+ dev->v4l_device = cx231xx_video_dev_alloc(dev,
+ dev->udev, &cx231xx_mpeg_template, "mpeg");
+ err = video_register_device(dev->v4l_device,
+ VFL_TYPE_GRABBER, -1);
+ if (err < 0) {
+ dprintk(3, "%s: can't register mpeg device\n", dev->name);
+ return err;
+ }
+
+ dprintk(3, "%s: registered device video%d [mpeg]\n",
+ dev->name, dev->v4l_device->num);
+
+ return 0;
+}
diff --git a/drivers/media/video/cx231xx/cx231xx-audio.c b/drivers/media/video/cx231xx/cx231xx-audio.c
index 7cae95a2245e..30d13c15739a 100644
--- a/drivers/media/video/cx231xx/cx231xx-audio.c
+++ b/drivers/media/video/cx231xx/cx231xx-audio.c
@@ -75,6 +75,30 @@ static int cx231xx_isoc_audio_deinit(struct cx231xx *dev)
return 0;
}
+static int cx231xx_bulk_audio_deinit(struct cx231xx *dev)
+{
+ int i;
+
+ dprintk("Stopping bulk\n");
+
+ for (i = 0; i < CX231XX_AUDIO_BUFS; i++) {
+ if (dev->adev.urb[i]) {
+ if (!irqs_disabled())
+ usb_kill_urb(dev->adev.urb[i]);
+ else
+ usb_unlink_urb(dev->adev.urb[i]);
+
+ usb_free_urb(dev->adev.urb[i]);
+ dev->adev.urb[i] = NULL;
+
+ kfree(dev->adev.transfer_buffer[i]);
+ dev->adev.transfer_buffer[i] = NULL;
+ }
+ }
+
+ return 0;
+}
+
static void cx231xx_audio_isocirq(struct urb *urb)
{
struct cx231xx *dev = urb->context;
@@ -100,6 +124,9 @@ static void cx231xx_audio_isocirq(struct urb *urb)
break;
}
+ if (atomic_read(&dev->stream_started) == 0)
+ return;
+
if (dev->adev.capture_pcm_substream) {
substream = dev->adev.capture_pcm_substream;
runtime = substream->runtime;
@@ -158,14 +185,95 @@ static void cx231xx_audio_isocirq(struct urb *urb)
return;
}
+static void cx231xx_audio_bulkirq(struct urb *urb)
+{
+ struct cx231xx *dev = urb->context;
+ unsigned int oldptr;
+ int period_elapsed = 0;
+ int status;
+ unsigned char *cp;
+ unsigned int stride;
+ struct snd_pcm_substream *substream;
+ struct snd_pcm_runtime *runtime;
+
+ switch (urb->status) {
+ case 0: /* success */
+ case -ETIMEDOUT: /* NAK */
+ break;
+ case -ECONNRESET: /* kill */
+ case -ENOENT:
+ case -ESHUTDOWN:
+ return;
+ default: /* error */
+ dprintk("urb completition error %d.\n", urb->status);
+ break;
+ }
+
+ if (atomic_read(&dev->stream_started) == 0)
+ return;
+
+ if (dev->adev.capture_pcm_substream) {
+ substream = dev->adev.capture_pcm_substream;
+ runtime = substream->runtime;
+ stride = runtime->frame_bits >> 3;
+
+ if (1) {
+ int length = urb->actual_length /
+ stride;
+ cp = (unsigned char *)urb->transfer_buffer;
+
+ oldptr = dev->adev.hwptr_done_capture;
+ if (oldptr + length >= runtime->buffer_size) {
+ unsigned int cnt;
+
+ cnt = runtime->buffer_size - oldptr;
+ memcpy(runtime->dma_area + oldptr * stride, cp,
+ cnt * stride);
+ memcpy(runtime->dma_area, cp + cnt * stride,
+ length * stride - cnt * stride);
+ } else {
+ memcpy(runtime->dma_area + oldptr * stride, cp,
+ length * stride);
+ }
+
+ snd_pcm_stream_lock(substream);
+
+ dev->adev.hwptr_done_capture += length;
+ if (dev->adev.hwptr_done_capture >=
+ runtime->buffer_size)
+ dev->adev.hwptr_done_capture -=
+ runtime->buffer_size;
+
+ dev->adev.capture_transfer_done += length;
+ if (dev->adev.capture_transfer_done >=
+ runtime->period_size) {
+ dev->adev.capture_transfer_done -=
+ runtime->period_size;
+ period_elapsed = 1;
+ }
+ snd_pcm_stream_unlock(substream);
+ }
+ if (period_elapsed)
+ snd_pcm_period_elapsed(substream);
+ }
+ urb->status = 0;
+
+ status = usb_submit_urb(urb, GFP_ATOMIC);
+ if (status < 0) {
+ cx231xx_errdev("resubmit of audio urb failed (error=%i)\n",
+ status);
+ }
+ return;
+}
+
static int cx231xx_init_audio_isoc(struct cx231xx *dev)
{
int i, errCode;
int sb_size;
- cx231xx_info("%s: Starting AUDIO transfers\n", __func__);
+ cx231xx_info("%s: Starting ISO AUDIO transfers\n", __func__);
- sb_size = CX231XX_NUM_AUDIO_PACKETS * dev->adev.max_pkt_size;
+ sb_size = CX231XX_ISO_NUM_AUDIO_PACKETS * dev->adev.max_pkt_size;
for (i = 0; i < CX231XX_AUDIO_BUFS; i++) {
struct urb *urb;
@@ -176,7 +284,7 @@ static int cx231xx_init_audio_isoc(struct cx231xx *dev)
return -ENOMEM;
memset(dev->adev.transfer_buffer[i], 0x80, sb_size);
- urb = usb_alloc_urb(CX231XX_NUM_AUDIO_PACKETS, GFP_ATOMIC);
+ urb = usb_alloc_urb(CX231XX_ISO_NUM_AUDIO_PACKETS, GFP_ATOMIC);
if (!urb) {
cx231xx_errdev("usb_alloc_urb failed!\n");
for (j = 0; j < i; j++) {
@@ -194,10 +302,10 @@ static int cx231xx_init_audio_isoc(struct cx231xx *dev)
urb->transfer_buffer = dev->adev.transfer_buffer[i];
urb->interval = 1;
urb->complete = cx231xx_audio_isocirq;
- urb->number_of_packets = CX231XX_NUM_AUDIO_PACKETS;
+ urb->number_of_packets = CX231XX_ISO_NUM_AUDIO_PACKETS;
urb->transfer_buffer_length = sb_size;
- for (j = k = 0; j < CX231XX_NUM_AUDIO_PACKETS;
+ for (j = k = 0; j < CX231XX_ISO_NUM_AUDIO_PACKETS;
j++, k += dev->adev.max_pkt_size) {
urb->iso_frame_desc[j].offset = k;
urb->iso_frame_desc[j].length = dev->adev.max_pkt_size;
@@ -216,27 +324,56 @@ static int cx231xx_init_audio_isoc(struct cx231xx *dev)
return errCode;
}
-static int cx231xx_cmd(struct cx231xx *dev, int cmd, int arg)
+static int cx231xx_init_audio_bulk(struct cx231xx *dev)
{
- dprintk("%s transfer\n", (dev->adev.capture_stream == STREAM_ON) ?
- "stop" : "start");
+ int i, errCode;
+ int sb_size;
- switch (cmd) {
- case CX231XX_CAPTURE_STREAM_EN:
- if (dev->adev.capture_stream == STREAM_OFF && arg == 1) {
- dev->adev.capture_stream = STREAM_ON;
- cx231xx_init_audio_isoc(dev);
- } else if (dev->adev.capture_stream == STREAM_ON && arg == 0) {
- dev->adev.capture_stream = STREAM_OFF;
- cx231xx_isoc_audio_deinit(dev);
- } else {
- cx231xx_errdev("An underrun very likely occurred. "
- "Ignoring it.\n");
+ cx231xx_info("%s: Starting BULK AUDIO transfers\n", __func__);
+
+ sb_size = CX231XX_NUM_AUDIO_PACKETS * dev->adev.max_pkt_size;
+
+ for (i = 0; i < CX231XX_AUDIO_BUFS; i++) {
+ struct urb *urb;
+ int j;
+
+ dev->adev.transfer_buffer[i] = kmalloc(sb_size, GFP_ATOMIC);
+ if (!dev->adev.transfer_buffer[i])
+ return -ENOMEM;
+
+ memset(dev->adev.transfer_buffer[i], 0x80, sb_size);
+ urb = usb_alloc_urb(CX231XX_NUM_AUDIO_PACKETS, GFP_ATOMIC);
+ if (!urb) {
+ cx231xx_errdev("usb_alloc_urb failed!\n");
+ for (j = 0; j < i; j++) {
+ usb_free_urb(dev->adev.urb[j]);
+ kfree(dev->adev.transfer_buffer[j]);
+ }
+ return -ENOMEM;
}
- return 0;
- default:
- return -EINVAL;
+
+ urb->dev = dev->udev;
+ urb->context = dev;
+ urb->pipe = usb_rcvbulkpipe(dev->udev,
+ dev->adev.end_point_addr);
+ urb->transfer_flags = 0;
+ urb->transfer_buffer = dev->adev.transfer_buffer[i];
+ urb->complete = cx231xx_audio_bulkirq;
+ urb->transfer_buffer_length = sb_size;
+
+ dev->adev.urb[i] = urb;
+
}
+
+ for (i = 0; i < CX231XX_AUDIO_BUFS; i++) {
+ errCode = usb_submit_urb(dev->adev.urb[i], GFP_ATOMIC);
+ if (errCode < 0) {
+ cx231xx_bulk_audio_deinit(dev);
+ return errCode;
+ }
+ }
+
+ return errCode;
}
static int snd_pcm_alloc_vmalloc_buffer(struct snd_pcm_substream *subs,
@@ -300,19 +437,24 @@ static int snd_cx231xx_capture_open(struct snd_pcm_substream *substream)
/* set alternate setting for audio interface */
/* 1 - 48000 samples per sec */
- ret = cx231xx_set_alt_setting(dev, INDEX_AUDIO, 1);
+ mutex_lock(&dev->lock);
+ if (dev->USE_ISO)
+ ret = cx231xx_set_alt_setting(dev, INDEX_AUDIO, 1);
+ else
+ ret = cx231xx_set_alt_setting(dev, INDEX_AUDIO, 0);
+ mutex_unlock(&dev->lock);
if (ret < 0) {
cx231xx_errdev("failed to set alternate setting !\n");
return ret;
}
- /* inform hardware to start streaming */
- ret = cx231xx_capture_start(dev, 1, Audio);
-
runtime->hw = snd_cx231xx_hw_capture;
mutex_lock(&dev->lock);
+ /* inform hardware to start streaming */
+ ret = cx231xx_capture_start(dev, 1, Audio);
+
dev->adev.users++;
mutex_unlock(&dev->lock);
@@ -330,20 +472,21 @@ static int snd_cx231xx_pcm_close(struct snd_pcm_substream *substream)
dprintk("closing device\n");
+ /* inform hardware to stop streaming */
+ mutex_lock(&dev->lock);
+ ret = cx231xx_capture_start(dev, 0, Audio);
+
/* set alternate setting for audio interface */
/* 1 - 48000 samples per sec */
ret = cx231xx_set_alt_setting(dev, INDEX_AUDIO, 0);
if (ret < 0) {
cx231xx_errdev("failed to set alternate setting !\n");
+ mutex_unlock(&dev->lock);
return ret;
}
- /* inform hardware to start streaming */
- ret = cx231xx_capture_start(dev, 0, Audio);
-
dev->mute = 1;
- mutex_lock(&dev->lock);
dev->adev.users--;
mutex_unlock(&dev->lock);
@@ -352,7 +495,10 @@ static int snd_cx231xx_pcm_close(struct snd_pcm_substream *substream)
dprintk("disabling audio stream!\n");
dev->adev.shutdown = 0;
dprintk("released lock\n");
- cx231xx_cmd(dev, CX231XX_CAPTURE_STREAM_EN, 0);
+ if (atomic_read(&dev->stream_started) > 0) {
+ atomic_set(&dev->stream_started, 0);
+ schedule_work(&dev->wq_trigger);
+ }
}
return 0;
}
@@ -383,43 +529,64 @@ static int snd_cx231xx_hw_capture_free(struct snd_pcm_substream *substream)
dprintk("Stop capture, if needed\n");
- if (dev->adev.capture_stream == STREAM_ON)
- cx231xx_cmd(dev, CX231XX_CAPTURE_STREAM_EN, CX231XX_STOP_AUDIO);
+ if (atomic_read(&dev->stream_started) > 0) {
+ atomic_set(&dev->stream_started, 0);
+ schedule_work(&dev->wq_trigger);
+ }
return 0;
}
static int snd_cx231xx_prepare(struct snd_pcm_substream *substream)
{
+ struct cx231xx *dev = snd_pcm_substream_chip(substream);
+
+ dev->adev.hwptr_done_capture = 0;
+ dev->adev.capture_transfer_done = 0;
+
return 0;
}
+static void audio_trigger(struct work_struct *work)
+{
+ struct cx231xx *dev = container_of(work, struct cx231xx, wq_trigger);
+
+ if (atomic_read(&dev->stream_started)) {
+ dprintk("starting capture");
+ if (is_fw_load(dev) == 0)
+ cx25840_call(dev, core, load_fw);
+ if (dev->USE_ISO)
+ cx231xx_init_audio_isoc(dev);
+ else
+ cx231xx_init_audio_bulk(dev);
+ } else {
+ dprintk("stopping capture");
+ cx231xx_isoc_audio_deinit(dev);
+ }
+}
+
static int snd_cx231xx_capture_trigger(struct snd_pcm_substream *substream,
int cmd)
{
struct cx231xx *dev = snd_pcm_substream_chip(substream);
int retval;
- dprintk("Should %s capture\n", (cmd == SNDRV_PCM_TRIGGER_START) ?
- "start" : "stop");
-
spin_lock(&dev->adev.slock);
switch (cmd) {
case SNDRV_PCM_TRIGGER_START:
- cx231xx_cmd(dev, CX231XX_CAPTURE_STREAM_EN,
- CX231XX_START_AUDIO);
- retval = 0;
+ atomic_set(&dev->stream_started, 1);
break;
case SNDRV_PCM_TRIGGER_STOP:
- cx231xx_cmd(dev, CX231XX_CAPTURE_STREAM_EN, CX231XX_STOP_AUDIO);
- retval = 0;
+ atomic_set(&dev->stream_started, 0);
break;
default:
retval = -EINVAL;
}
-
spin_unlock(&dev->adev.slock);
- return retval;
+
+ schedule_work(&dev->wq_trigger);
+
+ return 0;
}
static snd_pcm_uframes_t snd_cx231xx_capture_pointer(struct snd_pcm_substream
@@ -495,10 +662,13 @@ static int cx231xx_audio_init(struct cx231xx *dev)
pcm->info_flags = 0;
pcm->private_data = dev;
strcpy(pcm->name, "Conexant cx231xx Capture");
+ snd_card_set_dev(card, &dev->udev->dev);
strcpy(card->driver, "Cx231xx-Audio");
strcpy(card->shortname, "Cx231xx Audio");
strcpy(card->longname, "Conexant cx231xx Audio");
+ INIT_WORK(&dev->wq_trigger, audio_trigger);
+
err = snd_card_register(card);
if (err < 0) {
snd_card_free(card);
diff --git a/drivers/media/video/cx231xx/cx231xx-avcore.c b/drivers/media/video/cx231xx/cx231xx-avcore.c
index c2174413ab29..cf50fafa8abb 100644
--- a/drivers/media/video/cx231xx/cx231xx-avcore.c
+++ b/drivers/media/video/cx231xx/cx231xx-avcore.c
@@ -31,13 +31,16 @@
#include <linux/i2c.h>
#include <linux/mm.h>
#include <linux/mutex.h>
+#include <media/tuner.h>
#include <media/v4l2-common.h>
#include <media/v4l2-ioctl.h>
#include <media/v4l2-chip-ident.h>
#include "cx231xx.h"
+#include "cx231xx-dif.h"
+#define TUNER_MODE_FM_RADIO 0
/******************************************************************************
-: BLOCK ARRANGEMENT :-
I2S block ----------------------|
@@ -50,6 +53,57 @@
[Video]
*******************************************************************************/
+/******************************************************************************
+ * VERVE REGISTER *
+ * *
+ ******************************************************************************/
+static int verve_write_byte(struct cx231xx *dev, u8 saddr, u8 data)
+{
+ return cx231xx_write_i2c_data(dev, VERVE_I2C_ADDRESS,
+ saddr, 1, data, 1);
+}
+
+static int verve_read_byte(struct cx231xx *dev, u8 saddr, u8 *data)
+{
+ int status;
+ u32 temp = 0;
+
+ status = cx231xx_read_i2c_data(dev, VERVE_I2C_ADDRESS,
+ saddr, 1, &temp, 1);
+ *data = (u8) temp;
+ return status;
+}
+void initGPIO(struct cx231xx *dev)
+{
+ u32 _gpio_direction = 0;
+ u32 value = 0;
+ u8 val = 0;
+
+ _gpio_direction = _gpio_direction & 0xFC0003FF;
+ _gpio_direction = _gpio_direction | 0x03FDFC00;
+ cx231xx_send_gpio_cmd(dev, _gpio_direction, (u8 *)&value, 4, 0, 0);
+
+ verve_read_byte(dev, 0x07, &val);
+ cx231xx_info(" verve_read_byte address0x07=0x%x\n", val);
+ verve_write_byte(dev, 0x07, 0xF4);
+ verve_read_byte(dev, 0x07, &val);
+ cx231xx_info(" verve_read_byte address0x07=0x%x\n", val);
+
+ cx231xx_capture_start(dev, 1, 2);
+
+ cx231xx_mode_register(dev, EP_MODE_SET, 0x0500FE00);
+ cx231xx_mode_register(dev, GBULK_BIT_EN, 0xFFFDFFFF);
+
+}
+void uninitGPIO(struct cx231xx *dev)
+{
+ u8 value[4] = { 0, 0, 0, 0 };
+
+ cx231xx_capture_start(dev, 0, 2);
+ verve_write_byte(dev, 0x07, 0x14);
+ cx231xx_write_ctrl_reg(dev, VRT_SET_REGISTER,
+ 0x68, value, 4);
+}
/******************************************************************************
* A F E - B L O C K C O N T R O L functions *
@@ -258,7 +312,7 @@ int cx231xx_afe_set_mode(struct cx231xx *dev, enum AFE_MODE mode)
switch (mode) {
case AFE_MODE_LOW_IF:
- /* SetupAFEforLowIF(); */
+ cx231xx_Setup_AFE_for_LowIF(dev);
break;
case AFE_MODE_BASEBAND:
status = cx231xx_afe_setup_AFE_for_baseband(dev);
@@ -291,8 +345,15 @@ int cx231xx_afe_update_power_control(struct cx231xx *dev,
int status = 0;
switch (dev->model) {
+ case CX231XX_BOARD_CNXT_CARRAERA:
case CX231XX_BOARD_CNXT_RDE_250:
+ case CX231XX_BOARD_CNXT_SHELBY:
case CX231XX_BOARD_CNXT_RDU_250:
+ case CX231XX_BOARD_CNXT_RDE_253S:
+ case CX231XX_BOARD_CNXT_RDU_253S:
+ case CX231XX_BOARD_CNXT_VIDEO_GRABBER:
+ case CX231XX_BOARD_HAUPPAUGE_EXETER:
+ case CX231XX_BOARD_HAUPPAUGE_USBLIVE2:
if (avmode == POLARIS_AVMODE_ANALOGT_TV) {
while (afe_power_status != (FLD_PWRDN_TUNING_BIAS |
FLD_PWRDN_ENABLE_PLL)) {
@@ -483,6 +544,17 @@ static int vid_blk_read_word(struct cx231xx *dev, u16 saddr, u32 *data)
return cx231xx_read_i2c_data(dev, VID_BLK_I2C_ADDRESS,
saddr, 2, data, 4);
}
+int cx231xx_check_fw(struct cx231xx *dev)
+{
+ u8 temp = 0;
+ int status = 0;
+ status = vid_blk_read_byte(dev, DL_CTL_ADDRESS_LOW, &temp);
+ if (status < 0)
+ return status;
+ else
+ return temp;
+
+}
int cx231xx_set_video_input_mux(struct cx231xx *dev, u8 input)
{
@@ -521,9 +593,15 @@ int cx231xx_set_video_input_mux(struct cx231xx *dev, u8 input)
return status;
}
}
- status = cx231xx_set_decoder_video_input(dev,
+ if (dev->tuner_type == TUNER_NXP_TDA18271)
+ status = cx231xx_set_decoder_video_input(dev,
+ CX231XX_VMUX_TELEVISION,
+ INPUT(input)->vmux);
+ else
+ status = cx231xx_set_decoder_video_input(dev,
CX231XX_VMUX_COMPOSITE1,
INPUT(input)->vmux);
+
break;
default:
cx231xx_errdev("%s: set_power_mode : Unknown Input %d !\n",
@@ -578,12 +656,12 @@ int cx231xx_set_decoder_video_input(struct cx231xx *dev,
value |= (1 << 7);
status = vid_blk_write_word(dev, OUT_CTRL1, value);
- /* Set vip 1.1 output mode */
+ /* Set output mode */
status = cx231xx_read_modify_write_i2c_dword(dev,
VID_BLK_I2C_ADDRESS,
OUT_CTRL1,
FLD_OUT_MODE,
- OUT_MODE_VIP11);
+ dev->board.output_mode);
/* Tell DIF object to go to baseband mode */
status = cx231xx_dif_set_standard(dev, DIF_USE_BASEBAND);
@@ -681,7 +759,9 @@ int cx231xx_set_decoder_video_input(struct cx231xx *dev,
case CX231XX_VMUX_CABLE:
default:
switch (dev->model) {
+ case CX231XX_BOARD_CNXT_CARRAERA:
case CX231XX_BOARD_CNXT_RDE_250:
+ case CX231XX_BOARD_CNXT_SHELBY:
case CX231XX_BOARD_CNXT_RDU_250:
/* Disable the use of DIF */
@@ -699,11 +779,11 @@ int cx231xx_set_decoder_video_input(struct cx231xx *dev,
value |= (1 << 7);
status = vid_blk_write_word(dev, OUT_CTRL1, value);
- /* Set vip 1.1 output mode */
+ /* Set output mode */
status = cx231xx_read_modify_write_i2c_dword(dev,
VID_BLK_I2C_ADDRESS,
OUT_CTRL1, FLD_OUT_MODE,
- OUT_MODE_VIP11);
+ dev->board.output_mode);
/* Tell DIF object to go to baseband mode */
status = cx231xx_dif_set_standard(dev,
@@ -790,11 +870,11 @@ int cx231xx_set_decoder_video_input(struct cx231xx *dev,
(FLD_OEF_AGC_IF);
status = vid_blk_write_word(dev, PIN_CTRL, value);
- /* Set vip 1.1 output mode */
+ /* Set output mode */
status = cx231xx_read_modify_write_i2c_dword(dev,
VID_BLK_I2C_ADDRESS,
OUT_CTRL1, FLD_OUT_MODE,
- OUT_MODE_VIP11);
+ dev->board.output_mode);
/* Disable auto config of registers */
status = cx231xx_read_modify_write_i2c_dword(dev,
@@ -816,9 +896,21 @@ int cx231xx_set_decoder_video_input(struct cx231xx *dev,
/* Set VGA_SEL (for audio control) (bit 7-8) */
status = vid_blk_read_word(dev, AFE_CTRL, &value);
+ /*Set Func mode:01-DIF 10-baseband 11-YUV*/
+ value &= (~(FLD_FUNC_MODE));
+ value |= 0x800000;
+
value |= FLD_VGA_SEL_CH3 | FLD_VGA_SEL_CH2;
status = vid_blk_write_word(dev, AFE_CTRL, value);
+
+ if (dev->tuner_type == TUNER_NXP_TDA18271) {
+ status = vid_blk_read_word(dev, PIN_CTRL,
+ &value);
+ status = vid_blk_write_word(dev, PIN_CTRL,
+ (value & 0xFFFFFFEF));
+ }
+
break;
}
@@ -840,6 +932,39 @@ int cx231xx_set_decoder_video_input(struct cx231xx *dev,
return status;
}
+void cx231xx_enable656(struct cx231xx *dev)
+{
+ u8 temp = 0;
+ int status;
+ /*enable TS1 data[0:7] as output to export 656*/
+
+ status = vid_blk_write_byte(dev, TS1_PIN_CTL0, 0xFF);
+
+ /*enable TS1 clock as output to export 656*/
+
+ status = vid_blk_read_byte(dev, TS1_PIN_CTL1, &temp);
+ temp = temp|0x04;
+
+ status = vid_blk_write_byte(dev, TS1_PIN_CTL1, temp);
+
+}
+EXPORT_SYMBOL_GPL(cx231xx_enable656);
+
+void cx231xx_disable656(struct cx231xx *dev)
+{
+ u8 temp = 0;
+ int status;
+
+
+ status = vid_blk_write_byte(dev, TS1_PIN_CTL0, 0x00);
+
+ status = vid_blk_read_byte(dev, TS1_PIN_CTL1, &temp);
+ temp = temp&0xFB;
+
+ status = vid_blk_write_byte(dev, TS1_PIN_CTL1, temp);
+}
+EXPORT_SYMBOL_GPL(cx231xx_disable656);
+
/*
* Handle any video-mode specific overrides that are different
* on a per video standards basis after touching the MODE_CTRL
@@ -868,12 +993,12 @@ int cx231xx_do_mode_ctrl_overrides(struct cx231xx *dev)
VID_BLK_I2C_ADDRESS,
VERT_TIM_CTRL,
FLD_VACTIVE_CNT,
- 0x1E6000);
+ 0x1E7000);
status = cx231xx_read_modify_write_i2c_dword(dev,
VID_BLK_I2C_ADDRESS,
VERT_TIM_CTRL,
FLD_V656BLANK_CNT,
- 0x1E000000);
+ 0x1C000000);
status = cx231xx_read_modify_write_i2c_dword(dev,
VID_BLK_I2C_ADDRESS,
@@ -881,12 +1006,27 @@ int cx231xx_do_mode_ctrl_overrides(struct cx231xx *dev)
FLD_HBLANK_CNT,
cx231xx_set_field
(FLD_HBLANK_CNT, 0x79));
+
} else if (dev->norm & V4L2_STD_SECAM) {
cx231xx_info("do_mode_ctrl_overrides SECAM\n");
status = cx231xx_read_modify_write_i2c_dword(dev,
VID_BLK_I2C_ADDRESS,
VERT_TIM_CTRL,
- FLD_VBLANK_CNT, 0x24);
+ FLD_VBLANK_CNT, 0x20);
+ status = cx231xx_read_modify_write_i2c_dword(dev,
+ VID_BLK_I2C_ADDRESS,
+ VERT_TIM_CTRL,
+ FLD_VACTIVE_CNT,
+ cx231xx_set_field
+ (FLD_VACTIVE_CNT,
+ 0x244));
+ status = cx231xx_read_modify_write_i2c_dword(dev,
+ VID_BLK_I2C_ADDRESS,
+ VERT_TIM_CTRL,
+ FLD_V656BLANK_CNT,
+ cx231xx_set_field
+ (FLD_V656BLANK_CNT,
+ 0x24));
/* Adjust the active video horizontal start point */
status = cx231xx_read_modify_write_i2c_dword(dev,
VID_BLK_I2C_ADDRESS,
@@ -899,7 +1039,21 @@ int cx231xx_do_mode_ctrl_overrides(struct cx231xx *dev)
status = cx231xx_read_modify_write_i2c_dword(dev,
VID_BLK_I2C_ADDRESS,
VERT_TIM_CTRL,
- FLD_VBLANK_CNT, 0x24);
+ FLD_VBLANK_CNT, 0x20);
+ status = cx231xx_read_modify_write_i2c_dword(dev,
+ VID_BLK_I2C_ADDRESS,
+ VERT_TIM_CTRL,
+ FLD_VACTIVE_CNT,
+ cx231xx_set_field
+ (FLD_VACTIVE_CNT,
+ 0x244));
+ status = cx231xx_read_modify_write_i2c_dword(dev,
+ VID_BLK_I2C_ADDRESS,
+ VERT_TIM_CTRL,
+ FLD_V656BLANK_CNT,
+ cx231xx_set_field
+ (FLD_V656BLANK_CNT,
+ 0x24));
/* Adjust the active video horizontal start point */
status = cx231xx_read_modify_write_i2c_dword(dev,
VID_BLK_I2C_ADDRESS,
@@ -907,11 +1061,28 @@ int cx231xx_do_mode_ctrl_overrides(struct cx231xx *dev)
FLD_HBLANK_CNT,
cx231xx_set_field
(FLD_HBLANK_CNT, 0x85));
+
}
return status;
}
+int cx231xx_unmute_audio(struct cx231xx *dev)
+{
+ return vid_blk_write_byte(dev, PATH1_VOL_CTL, 0x24);
+}
+EXPORT_SYMBOL_GPL(cx231xx_unmute_audio);
+
+int stopAudioFirmware(struct cx231xx *dev)
+{
+ return vid_blk_write_byte(dev, DL_CTL_CONTROL, 0x03);
+}
+
+int restartAudioFirmware(struct cx231xx *dev)
+{
+ return vid_blk_write_byte(dev, DL_CTL_CONTROL, 0x13);
+}
+
int cx231xx_set_audio_input(struct cx231xx *dev, u8 input)
{
int status = 0;
@@ -970,6 +1141,7 @@ int cx231xx_set_audio_decoder_input(struct cx231xx *dev,
/* unmute all, AC97 in, independence mode
adr 08d0, data 0x00063073 */
+ status = vid_blk_write_word(dev, DL_CTL, 0x3000001);
status = vid_blk_write_word(dev, PATH1_CTL1, 0x00063073);
/* set AVC maximum threshold, adr 08d4, dat ffff0024 */
@@ -985,7 +1157,7 @@ int cx231xx_set_audio_decoder_input(struct cx231xx *dev,
case AUDIO_INPUT_TUNER_TV:
default:
-
+ status = stopAudioFirmware(dev);
/* Setup SRC sources and clocks */
status = vid_blk_write_word(dev, BAND_OUT_SEL,
cx231xx_set_field(FLD_SRC6_IN_SEL, 0x00) |
@@ -1013,18 +1185,32 @@ int cx231xx_set_audio_decoder_input(struct cx231xx *dev,
status = vid_blk_write_word(dev, PATH1_CTL1, 0x1F063870);
/* setAudioStandard(_audio_standard); */
-
status = vid_blk_write_word(dev, PATH1_CTL1, 0x00063870);
- switch (dev->model) {
- case CX231XX_BOARD_CNXT_RDE_250:
- case CX231XX_BOARD_CNXT_RDU_250:
+
+ status = restartAudioFirmware(dev);
+
+ switch (dev->board.tuner_type) {
+ case TUNER_XC5000:
+ /* SIF passthrough at 28.6363 MHz sample rate */
status = cx231xx_read_modify_write_i2c_dword(dev,
VID_BLK_I2C_ADDRESS,
CHIP_CTRL,
FLD_SIF_EN,
cx231xx_set_field(FLD_SIF_EN, 1));
break;
+ case TUNER_NXP_TDA18271:
+ /* Normal mode: SIF passthrough at 14.32 MHz */
+ status = cx231xx_read_modify_write_i2c_dword(dev,
+ VID_BLK_I2C_ADDRESS,
+ CHIP_CTRL,
+ FLD_SIF_EN,
+ cx231xx_set_field(FLD_SIF_EN, 0));
+ break;
default:
+ /* This is just a casual suggestion to people adding
+ new boards in case they use a tuner type we don't
+ currently know about */
+ printk(KERN_INFO "Unknown tuner type configuring SIF");
break;
}
break;
@@ -1049,18 +1235,6 @@ int cx231xx_set_audio_decoder_input(struct cx231xx *dev,
return status;
}
-/* Set resolution of the video */
-int cx231xx_resolution_set(struct cx231xx *dev)
-{
- /* set horzontal scale */
- int status = vid_blk_write_word(dev, HSCALE_CTRL, dev->hscale);
- if (status)
- return status;
-
- /* set vertical scale */
- return vid_blk_write_word(dev, VSCALE_CTRL, dev->vscale);
-}
-
/******************************************************************************
* C H I P Specific C O N T R O L functions *
******************************************************************************/
@@ -1094,34 +1268,350 @@ int cx231xx_set_agc_analog_digital_mux_select(struct cx231xx *dev,
return status;
}
-int cx231xx_enable_i2c_for_tuner(struct cx231xx *dev, u8 I2CIndex)
+int cx231xx_enable_i2c_port_3(struct cx231xx *dev, bool is_port_3)
{
u8 value[4] = { 0, 0, 0, 0 };
int status = 0;
-
- cx231xx_info("Changing the i2c port for tuner to %d\n", I2CIndex);
+ bool current_is_port_3;
status = cx231xx_read_ctrl_reg(dev, VRT_GET_REGISTER,
PWR_CTL_EN, value, 4);
if (status < 0)
return status;
- if (I2CIndex == I2C_1) {
- if (value[0] & I2C_DEMOD_EN) {
- value[0] &= ~I2C_DEMOD_EN;
- status = cx231xx_write_ctrl_reg(dev, VRT_SET_REGISTER,
- PWR_CTL_EN, value, 4);
- }
+ current_is_port_3 = value[0] & I2C_DEMOD_EN ? true : false;
+
+ /* Just return, if already using the right port */
+ if (current_is_port_3 == is_port_3)
+ return 0;
+
+ if (is_port_3)
+ value[0] |= I2C_DEMOD_EN;
+ else
+ value[0] &= ~I2C_DEMOD_EN;
+
+ cx231xx_info("Changing the i2c master port to %d\n",
+ is_port_3 ? 3 : 1);
+
+ status = cx231xx_write_ctrl_reg(dev, VRT_SET_REGISTER,
+ PWR_CTL_EN, value, 4);
+
+ return status;
+
+}
+EXPORT_SYMBOL_GPL(cx231xx_enable_i2c_port_3);
+
+void update_HH_register_after_set_DIF(struct cx231xx *dev)
+{
+/*
+ u8 status = 0;
+ u32 value = 0;
+
+ vid_blk_write_word(dev, PIN_CTRL, 0xA0FFF82F);
+ vid_blk_write_word(dev, DIF_MISC_CTRL, 0x0A203F11);
+ vid_blk_write_word(dev, DIF_SRC_PHASE_INC, 0x1BEFBF06);
+
+ status = vid_blk_read_word(dev, AFE_CTRL_C2HH_SRC_CTRL, &value);
+ vid_blk_write_word(dev, AFE_CTRL_C2HH_SRC_CTRL, 0x4485D390);
+ status = vid_blk_read_word(dev, AFE_CTRL_C2HH_SRC_CTRL, &value);
+*/
+}
+
+void cx231xx_dump_HH_reg(struct cx231xx *dev)
+{
+ u8 status = 0;
+ u32 value = 0;
+ u16 i = 0;
+
+ value = 0x45005390;
+ status = vid_blk_write_word(dev, 0x104, value);
+
+ for (i = 0x100; i < 0x140; i++) {
+ status = vid_blk_read_word(dev, i, &value);
+ cx231xx_info("reg0x%x=0x%x\n", i, value);
+ i = i+3;
+ }
+
+ for (i = 0x300; i < 0x400; i++) {
+ status = vid_blk_read_word(dev, i, &value);
+ cx231xx_info("reg0x%x=0x%x\n", i, value);
+ i = i+3;
+ }
+
+ for (i = 0x400; i < 0x440; i++) {
+ status = vid_blk_read_word(dev, i, &value);
+ cx231xx_info("reg0x%x=0x%x\n", i, value);
+ i = i+3;
+ }
+
+ status = vid_blk_read_word(dev, AFE_CTRL_C2HH_SRC_CTRL, &value);
+ cx231xx_info("AFE_CTRL_C2HH_SRC_CTRL=0x%x\n", value);
+ vid_blk_write_word(dev, AFE_CTRL_C2HH_SRC_CTRL, 0x4485D390);
+ status = vid_blk_read_word(dev, AFE_CTRL_C2HH_SRC_CTRL, &value);
+ cx231xx_info("AFE_CTRL_C2HH_SRC_CTRL=0x%x\n", value);
+}
+
+void cx231xx_dump_SC_reg(struct cx231xx *dev)
+{
+ u8 value[4] = { 0, 0, 0, 0 };
+ int status = 0;
+ cx231xx_info("cx231xx_dump_SC_reg %s!\n", __TIME__);
+
+ status = cx231xx_read_ctrl_reg(dev, VRT_GET_REGISTER, BOARD_CFG_STAT,
+ value, 4);
+ cx231xx_info("reg0x%x=0x%x 0x%x 0x%x 0x%x\n", BOARD_CFG_STAT, value[0],
+ value[1], value[2], value[3]);
+ status = cx231xx_read_ctrl_reg(dev, VRT_GET_REGISTER, TS_MODE_REG,
+ value, 4);
+ cx231xx_info("reg0x%x=0x%x 0x%x 0x%x 0x%x\n", TS_MODE_REG, value[0],
+ value[1], value[2], value[3]);
+ status = cx231xx_read_ctrl_reg(dev, VRT_GET_REGISTER, TS1_CFG_REG,
+ value, 4);
+ cx231xx_info("reg0x%x=0x%x 0x%x 0x%x 0x%x\n", TS1_CFG_REG, value[0],
+ value[1], value[2], value[3]);
+ status = cx231xx_read_ctrl_reg(dev, VRT_GET_REGISTER, TS1_LENGTH_REG,
+ value, 4);
+ cx231xx_info("reg0x%x=0x%x 0x%x 0x%x 0x%x\n", TS1_LENGTH_REG, value[0],
+ value[1], value[2], value[3]);
+
+ status = cx231xx_read_ctrl_reg(dev, VRT_GET_REGISTER, TS2_CFG_REG,
+ value, 4);
+ cx231xx_info("reg0x%x=0x%x 0x%x 0x%x 0x%x\n", TS2_CFG_REG, value[0],
+ value[1], value[2], value[3]);
+ status = cx231xx_read_ctrl_reg(dev, VRT_GET_REGISTER, TS2_LENGTH_REG,
+ value, 4);
+ cx231xx_info("reg0x%x=0x%x 0x%x 0x%x 0x%x\n", TS2_LENGTH_REG, value[0],
+ value[1], value[2], value[3]);
+ status = cx231xx_read_ctrl_reg(dev, VRT_GET_REGISTER, EP_MODE_SET,
+ value, 4);
+ cx231xx_info("reg0x%x=0x%x 0x%x 0x%x 0x%x\n", EP_MODE_SET, value[0],
+ value[1], value[2], value[3]);
+ status = cx231xx_read_ctrl_reg(dev, VRT_GET_REGISTER, CIR_PWR_PTN1,
+ value, 4);
+ cx231xx_info("reg0x%x=0x%x 0x%x 0x%x 0x%x\n", CIR_PWR_PTN1, value[0],
+ value[1], value[2], value[3]);
+
+ status = cx231xx_read_ctrl_reg(dev, VRT_GET_REGISTER, CIR_PWR_PTN2,
+ value, 4);
+ cx231xx_info("reg0x%x=0x%x 0x%x 0x%x 0x%x\n", CIR_PWR_PTN2, value[0],
+ value[1], value[2], value[3]);
+ status = cx231xx_read_ctrl_reg(dev, VRT_GET_REGISTER, CIR_PWR_PTN3,
+ value, 4);
+ cx231xx_info("reg0x%x=0x%x 0x%x 0x%x 0x%x\n", CIR_PWR_PTN3, value[0],
+ value[1], value[2], value[3]);
+ status = cx231xx_read_ctrl_reg(dev, VRT_GET_REGISTER, CIR_PWR_MASK0,
+ value, 4);
+ cx231xx_info("reg0x%x=0x%x 0x%x 0x%x 0x%x\n", CIR_PWR_MASK0, value[0],
+ value[1], value[2], value[3]);
+ status = cx231xx_read_ctrl_reg(dev, VRT_GET_REGISTER, CIR_PWR_MASK1,
+ value, 4);
+ cx231xx_info("reg0x%x=0x%x 0x%x 0x%x 0x%x\n", CIR_PWR_MASK1, value[0],
+ value[1], value[2], value[3]);
+
+ status = cx231xx_read_ctrl_reg(dev, VRT_GET_REGISTER, CIR_PWR_MASK2,
+ value, 4);
+ cx231xx_info("reg0x%x=0x%x 0x%x 0x%x 0x%x\n", CIR_PWR_MASK2, value[0],
+ value[1], value[2], value[3]);
+ status = cx231xx_read_ctrl_reg(dev, VRT_GET_REGISTER, CIR_GAIN,
+ value, 4);
+ cx231xx_info("reg0x%x=0x%x 0x%x 0x%x 0x%x\n", CIR_GAIN, value[0],
+ value[1], value[2], value[3]);
+ status = cx231xx_read_ctrl_reg(dev, VRT_GET_REGISTER, CIR_CAR_REG,
+ value, 4);
+ cx231xx_info("reg0x%x=0x%x 0x%x 0x%x 0x%x\n", CIR_CAR_REG, value[0],
+ value[1], value[2], value[3]);
+ status = cx231xx_read_ctrl_reg(dev, VRT_GET_REGISTER, CIR_OT_CFG1,
+ value, 4);
+ cx231xx_info("reg0x%x=0x%x 0x%x 0x%x 0x%x\n", CIR_OT_CFG1, value[0],
+ value[1], value[2], value[3]);
+
+ status = cx231xx_read_ctrl_reg(dev, VRT_GET_REGISTER, CIR_OT_CFG2,
+ value, 4);
+ cx231xx_info("reg0x%x=0x%x 0x%x 0x%x 0x%x\n", CIR_OT_CFG2, value[0],
+ value[1], value[2], value[3]);
+ status = cx231xx_read_ctrl_reg(dev, VRT_GET_REGISTER, PWR_CTL_EN,
+ value, 4);
+ cx231xx_info("reg0x%x=0x%x 0x%x 0x%x 0x%x\n", PWR_CTL_EN, value[0],
+ value[1], value[2], value[3]);
+
+
+}
+
+void cx231xx_Setup_AFE_for_LowIF(struct cx231xx *dev)
+
+{
+ u8 status = 0;
+ u8 value = 0;
+
+
+
+ status = afe_read_byte(dev, ADC_STATUS2_CH3, &value);
+ value = (value & 0xFE)|0x01;
+ status = afe_write_byte(dev, ADC_STATUS2_CH3, value);
+
+ status = afe_read_byte(dev, ADC_STATUS2_CH3, &value);
+ value = (value & 0xFE)|0x00;
+ status = afe_write_byte(dev, ADC_STATUS2_CH3, value);
+
+
+/*
+ config colibri to lo-if mode
+
+ FIXME: ntf_mode = 2'b00 by default. But set 0x1 would reduce
+ the diff IF input by half,
+
+ for low-if agc defect
+*/
+
+ status = afe_read_byte(dev, ADC_NTF_PRECLMP_EN_CH3, &value);
+ value = (value & 0xFC)|0x00;
+ status = afe_write_byte(dev, ADC_NTF_PRECLMP_EN_CH3, value);
+
+ status = afe_read_byte(dev, ADC_INPUT_CH3, &value);
+ value = (value & 0xF9)|0x02;
+ status = afe_write_byte(dev, ADC_INPUT_CH3, value);
+
+ status = afe_read_byte(dev, ADC_FB_FRCRST_CH3, &value);
+ value = (value & 0xFB)|0x04;
+ status = afe_write_byte(dev, ADC_FB_FRCRST_CH3, value);
+
+ status = afe_read_byte(dev, ADC_DCSERVO_DEM_CH3, &value);
+ value = (value & 0xFC)|0x03;
+ status = afe_write_byte(dev, ADC_DCSERVO_DEM_CH3, value);
+
+ status = afe_read_byte(dev, ADC_CTRL_DAC1_CH3, &value);
+ value = (value & 0xFB)|0x04;
+ status = afe_write_byte(dev, ADC_CTRL_DAC1_CH3, value);
+
+ status = afe_read_byte(dev, ADC_CTRL_DAC23_CH3, &value);
+ value = (value & 0xF8)|0x06;
+ status = afe_write_byte(dev, ADC_CTRL_DAC23_CH3, value);
+
+ status = afe_read_byte(dev, ADC_CTRL_DAC23_CH3, &value);
+ value = (value & 0x8F)|0x40;
+ status = afe_write_byte(dev, ADC_CTRL_DAC23_CH3, value);
+
+ status = afe_read_byte(dev, ADC_PWRDN_CLAMP_CH3, &value);
+ value = (value & 0xDF)|0x20;
+ status = afe_write_byte(dev, ADC_PWRDN_CLAMP_CH3, value);
+}
+
+void cx231xx_set_Colibri_For_LowIF(struct cx231xx *dev, u32 if_freq,
+ u8 spectral_invert, u32 mode)
+{
+ u32 colibri_carrier_offset = 0;
+ u8 status = 0;
+ u32 func_mode = 0x01; /* Device has a DIF if this function is called */
+ u32 standard = 0;
+ u8 value[4] = { 0, 0, 0, 0 };
+
+ cx231xx_info("Enter cx231xx_set_Colibri_For_LowIF()\n");
+ value[0] = (u8) 0x6F;
+ value[1] = (u8) 0x6F;
+ value[2] = (u8) 0x6F;
+ value[3] = (u8) 0x6F;
+ status = cx231xx_write_ctrl_reg(dev, VRT_SET_REGISTER,
+ PWR_CTL_EN, value, 4);
+
+ /*Set colibri for low IF*/
+ status = cx231xx_afe_set_mode(dev, AFE_MODE_LOW_IF);
+
+ /* Set C2HH for low IF operation.*/
+ standard = dev->norm;
+ status = cx231xx_dif_configure_C2HH_for_low_IF(dev, dev->active_mode,
+ func_mode, standard);
+
+ /* Get colibri offsets.*/
+ colibri_carrier_offset = cx231xx_Get_Colibri_CarrierOffset(mode,
+ standard);
+
+ cx231xx_info("colibri_carrier_offset=%d, standard=0x%x\n",
+ colibri_carrier_offset, standard);
+
+ /* Set the band Pass filter for DIF*/
+ cx231xx_set_DIF_bandpass(dev, (if_freq+colibri_carrier_offset),
+ spectral_invert, mode);
+}
+
+u32 cx231xx_Get_Colibri_CarrierOffset(u32 mode, u32 standerd)
+{
+ u32 colibri_carrier_offset = 0;
+
+ if (mode == TUNER_MODE_FM_RADIO) {
+ colibri_carrier_offset = 1100000;
+ } else if (standerd & (V4L2_STD_MN | V4L2_STD_NTSC_M_JP)) {
+ colibri_carrier_offset = 4832000; /*4.83MHz */
+ } else if (standerd & (V4L2_STD_PAL_B | V4L2_STD_PAL_G)) {
+ colibri_carrier_offset = 2700000; /*2.70MHz */
+ } else if (standerd & (V4L2_STD_PAL_D | V4L2_STD_PAL_I
+ | V4L2_STD_SECAM)) {
+ colibri_carrier_offset = 2100000; /*2.10MHz */
+ }
+
+ return colibri_carrier_offset;
+}
+
+void cx231xx_set_DIF_bandpass(struct cx231xx *dev, u32 if_freq,
+ u8 spectral_invert, u32 mode)
+{
+ unsigned long pll_freq_word;
+ int status = 0;
+ u32 dif_misc_ctrl_value = 0;
+ u64 pll_freq_u64 = 0;
+ u32 i = 0;
+
+ cx231xx_info("if_freq=%d;spectral_invert=0x%x;mode=0x%x\n",
+ if_freq, spectral_invert, mode);
+
+
+ if (mode == TUNER_MODE_FM_RADIO) {
+ pll_freq_word = 0x905A1CAC;
+ status = vid_blk_write_word(dev, DIF_PLL_FREQ_WORD, pll_freq_word);
+
+ } else /*KSPROPERTY_TUNER_MODE_TV*/{
+ /* Calculate the PLL frequency word based on the adjusted if_freq*/
+ pll_freq_word = if_freq;
+ pll_freq_u64 = (u64)pll_freq_word << 28L;
+ do_div(pll_freq_u64, 50000000);
+ pll_freq_word = (u32)pll_freq_u64;
+ /*pll_freq_word = 0x3463497;*/
+ status = vid_blk_write_word(dev, DIF_PLL_FREQ_WORD, pll_freq_word);
+
+ if (spectral_invert) {
+ if_freq -= 400000;
+ /* Enable Spectral Invert*/
+ status = vid_blk_read_word(dev, DIF_MISC_CTRL,
+ &dif_misc_ctrl_value);
+ dif_misc_ctrl_value = dif_misc_ctrl_value | 0x00200000;
+ status = vid_blk_write_word(dev, DIF_MISC_CTRL,
+ dif_misc_ctrl_value);
} else {
- if (!(value[0] & I2C_DEMOD_EN)) {
- value[0] |= I2C_DEMOD_EN;
- status = cx231xx_write_ctrl_reg(dev, VRT_SET_REGISTER,
- PWR_CTL_EN, value, 4);
- }
+ if_freq += 400000;
+ /* Disable Spectral Invert*/
+ status = vid_blk_read_word(dev, DIF_MISC_CTRL,
+ &dif_misc_ctrl_value);
+ dif_misc_ctrl_value = dif_misc_ctrl_value & 0xFFDFFFFF;
+ status = vid_blk_write_word(dev, DIF_MISC_CTRL,
+ dif_misc_ctrl_value);
}
- return status;
+ if_freq = (if_freq/100000)*100000;
+ if (if_freq < 3000000)
+ if_freq = 3000000;
+
+ if (if_freq > 16000000)
+ if_freq = 16000000;
+ }
+
+ cx231xx_info("Enter IF=%zd\n",
+ sizeof(Dif_set_array)/sizeof(struct dif_settings));
+ for (i = 0; i < sizeof(Dif_set_array)/sizeof(struct dif_settings); i++) {
+ if (Dif_set_array[i].if_freq == if_freq) {
+ status = vid_blk_write_word(dev,
+ Dif_set_array[i].register_address, Dif_set_array[i].value);
+ }
+ }
}
/******************************************************************************
@@ -1132,6 +1622,7 @@ int cx231xx_dif_configure_C2HH_for_low_IF(struct cx231xx *dev, u32 mode,
{
int status = 0;
+
if (mode == V4L2_TUNER_RADIO) {
/* C2HH */
/* lo if big signal */
@@ -1174,6 +1665,7 @@ int cx231xx_dif_configure_C2HH_for_low_IF(struct cx231xx *dev, u32 mode,
VID_BLK_I2C_ADDRESS, 32,
AUD_IO_CTRL, 0, 31, 0x00000003);
} else if ((standard == V4L2_STD_PAL_I) |
+ (standard & V4L2_STD_PAL_D) |
(standard & V4L2_STD_SECAM)) {
/* C2HH setup */
/* lo if big signal */
@@ -1232,10 +1724,18 @@ int cx231xx_dif_set_standard(struct cx231xx *dev, u32 standard)
dev->norm = standard;
switch (dev->model) {
+ case CX231XX_BOARD_CNXT_CARRAERA:
case CX231XX_BOARD_CNXT_RDE_250:
+ case CX231XX_BOARD_CNXT_SHELBY:
case CX231XX_BOARD_CNXT_RDU_250:
+ case CX231XX_BOARD_CNXT_VIDEO_GRABBER:
+ case CX231XX_BOARD_HAUPPAUGE_EXETER:
func_mode = 0x03;
break;
+ case CX231XX_BOARD_CNXT_RDE_253S:
+ case CX231XX_BOARD_CNXT_RDU_253S:
+ func_mode = 0x01;
+ break;
default:
func_mode = 0x01;
}
@@ -1617,17 +2117,27 @@ int cx231xx_tuner_post_channel_change(struct cx231xx *dev)
{
int status = 0;
u32 dwval;
-
+ cx231xx_info("cx231xx_tuner_post_channel_change dev->tuner_type =0%d\n",
+ dev->tuner_type);
/* Set the RF and IF k_agc values to 4 for PAL/NTSC and 8 for
* SECAM L/B/D standards */
status = vid_blk_read_word(dev, DIF_AGC_IF_REF, &dwval);
dwval &= ~(FLD_DIF_K_AGC_RF | FLD_DIF_K_AGC_IF);
if (dev->norm & (V4L2_STD_SECAM_L | V4L2_STD_SECAM_B |
- V4L2_STD_SECAM_D))
- dwval |= 0x88000000;
- else
- dwval |= 0x44000000;
+ V4L2_STD_SECAM_D)) {
+ if (dev->tuner_type == TUNER_NXP_TDA18271) {
+ dwval &= ~FLD_DIF_IF_REF;
+ dwval |= 0x88000300;
+ } else
+ dwval |= 0x88000000;
+ } else {
+ if (dev->tuner_type == TUNER_NXP_TDA18271) {
+ dwval &= ~FLD_DIF_IF_REF;
+ dwval |= 0xCC000300;
+ } else
+ dwval |= 0x44000000;
+ }
status = vid_blk_write_word(dev, DIF_AGC_IF_REF, dwval);
@@ -1714,8 +2224,6 @@ int cx231xx_set_power_mode(struct cx231xx *dev, enum AV_MODE mode)
return 0;
}
- cx231xx_info(" setPowerMode::mode = %d\n", mode);
-
status = cx231xx_read_ctrl_reg(dev, VRT_GET_REGISTER, PWR_CTL_EN, value,
4);
if (status < 0)
@@ -1761,7 +2269,7 @@ int cx231xx_set_power_mode(struct cx231xx *dev, enum AV_MODE mode)
case POLARIS_AVMODE_ANALOGT_TV:
- tmp &= (~PWR_DEMOD_EN);
+ tmp |= PWR_DEMOD_EN;
tmp |= (I2C_DEMOD_EN);
value[0] = (u8) tmp;
value[1] = (u8) (tmp >> 8);
@@ -1814,14 +2322,18 @@ int cx231xx_set_power_mode(struct cx231xx *dev, enum AV_MODE mode)
msleep(PWR_SLEEP_INTERVAL);
}
- if ((dev->model == CX231XX_BOARD_CNXT_RDE_250) ||
- (dev->model == CX231XX_BOARD_CNXT_RDU_250)) {
- /* tuner path to channel 1 from port 3 */
- cx231xx_enable_i2c_for_tuner(dev, I2C_3);
+ if (dev->board.tuner_type != TUNER_ABSENT) {
+ /* Enable tuner */
+ cx231xx_enable_i2c_port_3(dev, true);
+
+ /* reset the Tuner */
+ if (dev->board.tuner_gpio)
+ cx231xx_gpio_set(dev, dev->board.tuner_gpio);
if (dev->cx231xx_reset_analog_tuner)
dev->cx231xx_reset_analog_tuner(dev);
}
+
break;
case POLARIS_AVMODE_DIGITAL:
@@ -1856,6 +2368,7 @@ int cx231xx_set_power_mode(struct cx231xx *dev, enum AV_MODE mode)
msleep(PWR_SLEEP_INTERVAL);
}
+ tmp &= (~PWR_AV_MODE);
tmp |= POLARIS_AVMODE_DIGITAL | I2C_DEMOD_EN;
value[0] = (u8) tmp;
value[1] = (u8) (tmp >> 8);
@@ -1876,10 +2389,19 @@ int cx231xx_set_power_mode(struct cx231xx *dev, enum AV_MODE mode)
msleep(PWR_SLEEP_INTERVAL);
}
- if ((dev->model == CX231XX_BOARD_CNXT_RDE_250) ||
- (dev->model == CX231XX_BOARD_CNXT_RDU_250)) {
- /* tuner path to channel 1 from port 3 */
- cx231xx_enable_i2c_for_tuner(dev, I2C_3);
+ if (dev->board.tuner_type != TUNER_ABSENT) {
+ /*
+ * Enable tuner
+ * Hauppauge Exeter seems to need to do something different!
+ */
+ if (dev->model == CX231XX_BOARD_HAUPPAUGE_EXETER)
+ cx231xx_enable_i2c_port_3(dev, false);
+ else
+ cx231xx_enable_i2c_port_3(dev, true);
+
+ /* reset the Tuner */
+ if (dev->board.tuner_gpio)
+ cx231xx_gpio_set(dev, dev->board.tuner_gpio);
if (dev->cx231xx_reset_analog_tuner)
dev->cx231xx_reset_analog_tuner(dev);
@@ -1913,9 +2435,6 @@ int cx231xx_set_power_mode(struct cx231xx *dev, enum AV_MODE mode)
status = cx231xx_read_ctrl_reg(dev, VRT_GET_REGISTER, PWR_CTL_EN, value,
4);
- cx231xx_info(" The data of PWR_CTL_EN register 0x74"
- "=0x%0x,0x%0x,0x%0x,0x%0x\n",
- value[0], value[1], value[2], value[3]);
return status;
}
@@ -2000,6 +2519,8 @@ int cx231xx_stop_stream(struct cx231xx *dev, u32 ep_mask)
int cx231xx_initialize_stream_xfer(struct cx231xx *dev, u32 media_type)
{
int status = 0;
+ u32 value = 0;
+ u8 val[4] = { 0, 0, 0, 0 };
if (dev->udev->speed == USB_SPEED_HIGH) {
switch (media_type) {
@@ -2026,10 +2547,36 @@ int cx231xx_initialize_stream_xfer(struct cx231xx *dev, u32 media_type)
break;
case 4: /* ts1 */
- cx231xx_info("%s: set ts1 registers\n", __func__);
+ cx231xx_info("%s: set ts1 registers", __func__);
+
+ if (dev->model == CX231XX_BOARD_CNXT_VIDEO_GRABBER) {
+ cx231xx_info(" MPEG\n");
+ value &= 0xFFFFFFFC;
+ value |= 0x3;
+
+ status = cx231xx_mode_register(dev, TS_MODE_REG, value);
+
+ val[0] = 0x04;
+ val[1] = 0xA3;
+ val[2] = 0x3B;
+ val[3] = 0x00;
+ status = cx231xx_write_ctrl_reg(dev, VRT_SET_REGISTER,
+ TS1_CFG_REG, val, 4);
+
+ val[0] = 0x00;
+ val[1] = 0x08;
+ val[2] = 0x00;
+ val[3] = 0x08;
+ status = cx231xx_write_ctrl_reg(dev, VRT_SET_REGISTER,
+ TS1_LENGTH_REG, val, 4);
+
+ } else {
+ cx231xx_info(" BDA\n");
status = cx231xx_mode_register(dev, TS_MODE_REG, 0x101);
- status = cx231xx_mode_register(dev, TS1_CFG_REG, 0x400);
+ status = cx231xx_mode_register(dev, TS1_CFG_REG, 0x010);
+ }
break;
+
case 6: /* ts1 parallel mode */
cx231xx_info("%s: set ts1 parrallel mode registers\n",
__func__);
@@ -2128,7 +2675,7 @@ EXPORT_SYMBOL_GPL(cx231xx_capture_start);
/*****************************************************************************
* G P I O B I T control functions *
******************************************************************************/
-int cx231xx_set_gpio_bit(struct cx231xx *dev, u32 gpio_bit, u8 * gpio_val)
+int cx231xx_set_gpio_bit(struct cx231xx *dev, u32 gpio_bit, u8 *gpio_val)
{
int status = 0;
@@ -2137,7 +2684,7 @@ int cx231xx_set_gpio_bit(struct cx231xx *dev, u32 gpio_bit, u8 * gpio_val)
return status;
}
-int cx231xx_get_gpio_bit(struct cx231xx *dev, u32 gpio_bit, u8 * gpio_val)
+int cx231xx_get_gpio_bit(struct cx231xx *dev, u32 gpio_bit, u8 *gpio_val)
{
int status = 0;
@@ -2344,7 +2891,7 @@ int cx231xx_gpio_i2c_write_byte(struct cx231xx *dev, u8 data)
return status;
}
-int cx231xx_gpio_i2c_read_byte(struct cx231xx *dev, u8 * buf)
+int cx231xx_gpio_i2c_read_byte(struct cx231xx *dev, u8 *buf)
{
u8 value = 0;
int status = 0;
@@ -2494,7 +3041,7 @@ int cx231xx_gpio_i2c_write_nak(struct cx231xx *dev)
/* cx231xx_gpio_i2c_read
* Function to read data from gpio based I2C interface
*/
-int cx231xx_gpio_i2c_read(struct cx231xx *dev, u8 dev_addr, u8 * buf, u8 len)
+int cx231xx_gpio_i2c_read(struct cx231xx *dev, u8 dev_addr, u8 *buf, u8 len)
{
int status = 0;
int i = 0;
@@ -2538,7 +3085,7 @@ int cx231xx_gpio_i2c_read(struct cx231xx *dev, u8 dev_addr, u8 * buf, u8 len)
/* cx231xx_gpio_i2c_write
* Function to write data to gpio based I2C interface
*/
-int cx231xx_gpio_i2c_write(struct cx231xx *dev, u8 dev_addr, u8 * buf, u8 len)
+int cx231xx_gpio_i2c_write(struct cx231xx *dev, u8 dev_addr, u8 *buf, u8 len)
{
int status = 0;
int i = 0;
diff --git a/drivers/media/video/cx231xx/cx231xx-cards.c b/drivers/media/video/cx231xx/cx231xx-cards.c
index f2a4900014bc..2c78d188bb06 100644
--- a/drivers/media/video/cx231xx/cx231xx-cards.c
+++ b/drivers/media/video/cx231xx/cx231xx-cards.c
@@ -41,6 +41,10 @@ static int tuner = -1;
module_param(tuner, int, 0444);
MODULE_PARM_DESC(tuner, "tuner type");
+static int transfer_mode = 1;
+module_param(transfer_mode, int, 0444);
+MODULE_PARM_DESC(transfer_mode, "transfer mode (1-ISO or 0-BULK)");
+
static unsigned int disable_ir;
module_param(disable_ir, int, 0444);
MODULE_PARM_DESC(disable_ir, "disable infrared remote support");
@@ -86,8 +90,8 @@ struct cx231xx_board cx231xx_boards[] = {
}
},
},
- [CX231XX_BOARD_CNXT_RDE_250] = {
- .name = "Conexant Hybrid TV - RDE250",
+ [CX231XX_BOARD_CNXT_CARRAERA] = {
+ .name = "Conexant Hybrid TV - CARRAERA",
.tuner_type = TUNER_XC5000,
.tuner_addr = 0x61,
.tuner_gpio = RDE250_XCV_TUNER,
@@ -95,6 +99,7 @@ struct cx231xx_board cx231xx_boards[] = {
.tuner_scl_gpio = 0x1a,
.tuner_sda_gpio = 0x1b,
.decoder = CX231XX_AVDECODER,
+ .output_mode = OUT_MODE_VIP11,
.demod_xfer_mode = 0,
.ctl_pin_status_mask = 0xFFFFFFC4,
.agc_analog_digital_select_gpio = 0x0c,
@@ -125,9 +130,8 @@ struct cx231xx_board cx231xx_boards[] = {
}
},
},
-
- [CX231XX_BOARD_CNXT_RDU_250] = {
- .name = "Conexant Hybrid TV - RDU250",
+ [CX231XX_BOARD_CNXT_SHELBY] = {
+ .name = "Conexant Hybrid TV - SHELBY",
.tuner_type = TUNER_XC5000,
.tuner_addr = 0x61,
.tuner_gpio = RDE250_XCV_TUNER,
@@ -135,6 +139,7 @@ struct cx231xx_board cx231xx_boards[] = {
.tuner_scl_gpio = 0x1a,
.tuner_sda_gpio = 0x1b,
.decoder = CX231XX_AVDECODER,
+ .output_mode = OUT_MODE_VIP11,
.demod_xfer_mode = 0,
.ctl_pin_status_mask = 0xFFFFFFC4,
.agc_analog_digital_select_gpio = 0x0c,
@@ -165,6 +170,231 @@ struct cx231xx_board cx231xx_boards[] = {
}
},
},
+ [CX231XX_BOARD_CNXT_RDE_253S] = {
+ .name = "Conexant Hybrid TV - RDE253S",
+ .tuner_type = TUNER_NXP_TDA18271,
+ .tuner_addr = 0x60,
+ .tuner_gpio = RDE250_XCV_TUNER,
+ .tuner_sif_gpio = 0x05,
+ .tuner_scl_gpio = 0x1a,
+ .tuner_sda_gpio = 0x1b,
+ .decoder = CX231XX_AVDECODER,
+ .output_mode = OUT_MODE_VIP11,
+ .demod_xfer_mode = 0,
+ .ctl_pin_status_mask = 0xFFFFFFC4,
+ .agc_analog_digital_select_gpio = 0x1c,
+ .gpio_pin_status_mask = 0x4001000,
+ .tuner_i2c_master = 1,
+ .demod_i2c_master = 2,
+ .has_dvb = 1,
+ .demod_addr = 0x02,
+ .norm = V4L2_STD_PAL,
+
+ .input = {{
+ .type = CX231XX_VMUX_TELEVISION,
+ .vmux = CX231XX_VIN_3_1,
+ .amux = CX231XX_AMUX_VIDEO,
+ .gpio = NULL,
+ }, {
+ .type = CX231XX_VMUX_COMPOSITE1,
+ .vmux = CX231XX_VIN_2_1,
+ .amux = CX231XX_AMUX_LINE_IN,
+ .gpio = NULL,
+ }, {
+ .type = CX231XX_VMUX_SVIDEO,
+ .vmux = CX231XX_VIN_1_1 |
+ (CX231XX_VIN_1_2 << 8) |
+ CX25840_SVIDEO_ON,
+ .amux = CX231XX_AMUX_LINE_IN,
+ .gpio = NULL,
+ }
+ },
+ },
+
+ [CX231XX_BOARD_CNXT_RDU_253S] = {
+ .name = "Conexant Hybrid TV - RDU253S",
+ .tuner_type = TUNER_NXP_TDA18271,
+ .tuner_addr = 0x60,
+ .tuner_gpio = RDE250_XCV_TUNER,
+ .tuner_sif_gpio = 0x05,
+ .tuner_scl_gpio = 0x1a,
+ .tuner_sda_gpio = 0x1b,
+ .decoder = CX231XX_AVDECODER,
+ .output_mode = OUT_MODE_VIP11,
+ .demod_xfer_mode = 0,
+ .ctl_pin_status_mask = 0xFFFFFFC4,
+ .agc_analog_digital_select_gpio = 0x1c,
+ .gpio_pin_status_mask = 0x4001000,
+ .tuner_i2c_master = 1,
+ .demod_i2c_master = 2,
+ .has_dvb = 1,
+ .demod_addr = 0x02,
+ .norm = V4L2_STD_PAL,
+
+ .input = {{
+ .type = CX231XX_VMUX_TELEVISION,
+ .vmux = CX231XX_VIN_3_1,
+ .amux = CX231XX_AMUX_VIDEO,
+ .gpio = NULL,
+ }, {
+ .type = CX231XX_VMUX_COMPOSITE1,
+ .vmux = CX231XX_VIN_2_1,
+ .amux = CX231XX_AMUX_LINE_IN,
+ .gpio = NULL,
+ }, {
+ .type = CX231XX_VMUX_SVIDEO,
+ .vmux = CX231XX_VIN_1_1 |
+ (CX231XX_VIN_1_2 << 8) |
+ CX25840_SVIDEO_ON,
+ .amux = CX231XX_AMUX_LINE_IN,
+ .gpio = NULL,
+ }
+ },
+ },
+ [CX231XX_BOARD_CNXT_VIDEO_GRABBER] = {
+ .name = "Conexant VIDEO GRABBER",
+ .tuner_type = TUNER_ABSENT,
+ .decoder = CX231XX_AVDECODER,
+ .output_mode = OUT_MODE_VIP11,
+ .ctl_pin_status_mask = 0xFFFFFFC4,
+ .agc_analog_digital_select_gpio = 0x1c,
+ .gpio_pin_status_mask = 0x4001000,
+ .norm = V4L2_STD_PAL,
+
+ .input = {{
+ .type = CX231XX_VMUX_COMPOSITE1,
+ .vmux = CX231XX_VIN_2_1,
+ .amux = CX231XX_AMUX_LINE_IN,
+ .gpio = NULL,
+ }, {
+ .type = CX231XX_VMUX_SVIDEO,
+ .vmux = CX231XX_VIN_1_1 |
+ (CX231XX_VIN_1_2 << 8) |
+ CX25840_SVIDEO_ON,
+ .amux = CX231XX_AMUX_LINE_IN,
+ .gpio = NULL,
+ }
+ },
+ },
+ [CX231XX_BOARD_CNXT_RDE_250] = {
+ .name = "Conexant Hybrid TV - rde 250",
+ .tuner_type = TUNER_XC5000,
+ .tuner_addr = 0x61,
+ .tuner_gpio = RDE250_XCV_TUNER,
+ .tuner_sif_gpio = 0x05,
+ .tuner_scl_gpio = 0x1a,
+ .tuner_sda_gpio = 0x1b,
+ .decoder = CX231XX_AVDECODER,
+ .output_mode = OUT_MODE_VIP11,
+ .demod_xfer_mode = 0,
+ .ctl_pin_status_mask = 0xFFFFFFC4,
+ .agc_analog_digital_select_gpio = 0x0c,
+ .gpio_pin_status_mask = 0x4001000,
+ .tuner_i2c_master = 1,
+ .demod_i2c_master = 2,
+ .has_dvb = 1,
+ .demod_addr = 0x02,
+ .norm = V4L2_STD_PAL,
+
+ .input = {{
+ .type = CX231XX_VMUX_TELEVISION,
+ .vmux = CX231XX_VIN_2_1,
+ .amux = CX231XX_AMUX_VIDEO,
+ .gpio = NULL,
+ }
+ },
+ },
+ [CX231XX_BOARD_CNXT_RDU_250] = {
+ .name = "Conexant Hybrid TV - RDU 250",
+ .tuner_type = TUNER_XC5000,
+ .tuner_addr = 0x61,
+ .tuner_gpio = RDE250_XCV_TUNER,
+ .tuner_sif_gpio = 0x05,
+ .tuner_scl_gpio = 0x1a,
+ .tuner_sda_gpio = 0x1b,
+ .decoder = CX231XX_AVDECODER,
+ .output_mode = OUT_MODE_VIP11,
+ .demod_xfer_mode = 0,
+ .ctl_pin_status_mask = 0xFFFFFFC4,
+ .agc_analog_digital_select_gpio = 0x0c,
+ .gpio_pin_status_mask = 0x4001000,
+ .tuner_i2c_master = 1,
+ .demod_i2c_master = 2,
+ .has_dvb = 1,
+ .demod_addr = 0x32,
+ .norm = V4L2_STD_NTSC,
+
+ .input = {{
+ .type = CX231XX_VMUX_TELEVISION,
+ .vmux = CX231XX_VIN_2_1,
+ .amux = CX231XX_AMUX_VIDEO,
+ .gpio = NULL,
+ }
+ },
+ },
+ [CX231XX_BOARD_HAUPPAUGE_EXETER] = {
+ .name = "Hauppauge EXETER",
+ .tuner_type = TUNER_NXP_TDA18271,
+ .tuner_addr = 0x60,
+ .tuner_gpio = RDE250_XCV_TUNER,
+ .tuner_sif_gpio = 0x05,
+ .tuner_scl_gpio = 0x1a,
+ .tuner_sda_gpio = 0x1b,
+ .decoder = CX231XX_AVDECODER,
+ .output_mode = OUT_MODE_VIP11,
+ .demod_xfer_mode = 0,
+ .ctl_pin_status_mask = 0xFFFFFFC4,
+ .agc_analog_digital_select_gpio = 0x0c,
+ .gpio_pin_status_mask = 0x4001000,
+ .tuner_i2c_master = 1,
+ .demod_i2c_master = 2,
+ .has_dvb = 1,
+ .demod_addr = 0x0e,
+ .norm = V4L2_STD_NTSC,
+
+ .input = {{
+ .type = CX231XX_VMUX_TELEVISION,
+ .vmux = CX231XX_VIN_3_1,
+ .amux = CX231XX_AMUX_VIDEO,
+ .gpio = 0,
+ }, {
+ .type = CX231XX_VMUX_COMPOSITE1,
+ .vmux = CX231XX_VIN_2_1,
+ .amux = CX231XX_AMUX_LINE_IN,
+ .gpio = 0,
+ }, {
+ .type = CX231XX_VMUX_SVIDEO,
+ .vmux = CX231XX_VIN_1_1 |
+ (CX231XX_VIN_1_2 << 8) |
+ CX25840_SVIDEO_ON,
+ .amux = CX231XX_AMUX_LINE_IN,
+ .gpio = 0,
+ } },
+ },
+ [CX231XX_BOARD_HAUPPAUGE_USBLIVE2] = {
+ .name = "Hauppauge USB Live 2",
+ .tuner_type = TUNER_ABSENT,
+ .decoder = CX231XX_AVDECODER,
+ .output_mode = OUT_MODE_VIP11,
+ .demod_xfer_mode = 0,
+ .ctl_pin_status_mask = 0xFFFFFFC4,
+ .agc_analog_digital_select_gpio = 0x0c,
+ .gpio_pin_status_mask = 0x4001000,
+ .norm = V4L2_STD_NTSC,
+ .input = {{
+ .type = CX231XX_VMUX_COMPOSITE1,
+ .vmux = CX231XX_VIN_2_1,
+ .amux = CX231XX_AMUX_LINE_IN,
+ .gpio = 0,
+ }, {
+ .type = CX231XX_VMUX_SVIDEO,
+ .vmux = CX231XX_VIN_1_1 |
+ (CX231XX_VIN_1_2 << 8) |
+ CX25840_SVIDEO_ON,
+ .amux = CX231XX_AMUX_LINE_IN,
+ .gpio = 0,
+ } },
+ },
};
const unsigned int cx231xx_bcount = ARRAY_SIZE(cx231xx_boards);
@@ -172,12 +402,28 @@ const unsigned int cx231xx_bcount = ARRAY_SIZE(cx231xx_boards);
struct usb_device_id cx231xx_id_table[] = {
{USB_DEVICE(0x0572, 0x5A3C),
.driver_info = CX231XX_BOARD_UNKNOWN},
+ {USB_DEVICE_VER(USB_VID_PIXELVIEW, USB_PID_PIXELVIEW_SBTVD, 0x4000,0x4fff),
+ .driver_info = CX231XX_BOARD_UNKNOWN},
{USB_DEVICE(0x0572, 0x58A2),
- .driver_info = CX231XX_BOARD_CNXT_RDE_250},
+ .driver_info = CX231XX_BOARD_CNXT_CARRAERA},
{USB_DEVICE(0x0572, 0x58A1),
+ .driver_info = CX231XX_BOARD_CNXT_SHELBY},
+ {USB_DEVICE(0x0572, 0x58A4),
+ .driver_info = CX231XX_BOARD_CNXT_RDE_253S},
+ {USB_DEVICE(0x0572, 0x58A5),
+ .driver_info = CX231XX_BOARD_CNXT_RDU_253S},
+ {USB_DEVICE(0x0572, 0x58A6),
+ .driver_info = CX231XX_BOARD_CNXT_VIDEO_GRABBER},
+ {USB_DEVICE(0x0572, 0x589E),
+ .driver_info = CX231XX_BOARD_CNXT_RDE_250},
+ {USB_DEVICE(0x0572, 0x58A0),
.driver_info = CX231XX_BOARD_CNXT_RDU_250},
- {USB_DEVICE_VER(USB_VID_PIXELVIEW, USB_PID_PIXELVIEW_SBTVD, 0x4000,0x4fff),
- .driver_info = CX231XX_BOARD_UNKNOWN},
+ {USB_DEVICE(0x2040, 0xb120),
+ .driver_info = CX231XX_BOARD_HAUPPAUGE_EXETER},
+ {USB_DEVICE(0x2040, 0xb140),
+ .driver_info = CX231XX_BOARD_HAUPPAUGE_EXETER},
+ {USB_DEVICE(0x2040, 0xc200),
+ .driver_info = CX231XX_BOARD_HAUPPAUGE_USBLIVE2},
{},
};
@@ -212,6 +458,23 @@ int cx231xx_tuner_callback(void *ptr, int component, int command, int arg)
}
EXPORT_SYMBOL_GPL(cx231xx_tuner_callback);
+void cx231xx_reset_out(struct cx231xx *dev)
+{
+ cx231xx_set_gpio_value(dev, CX23417_RESET, 1);
+ msleep(200);
+ cx231xx_set_gpio_value(dev, CX23417_RESET, 0);
+ msleep(200);
+ cx231xx_set_gpio_value(dev, CX23417_RESET, 1);
+}
+void cx231xx_enable_OSC(struct cx231xx *dev)
+{
+ cx231xx_set_gpio_value(dev, CX23417_OSC_EN, 1);
+}
+void cx231xx_sleep_s5h1432(struct cx231xx *dev)
+{
+ cx231xx_set_gpio_value(dev, SLEEP_S5H1432, 0);
+}
+
static inline void cx231xx_set_model(struct cx231xx *dev)
{
memcpy(&dev->board, &cx231xx_boards[dev->model], sizeof(dev->board));
@@ -232,13 +495,11 @@ void cx231xx_pre_card_setup(struct cx231xx *dev)
if (dev->board.tuner_gpio) {
cx231xx_set_gpio_direction(dev, dev->board.tuner_gpio->bit, 1);
cx231xx_set_gpio_value(dev, dev->board.tuner_gpio->bit, 1);
+ }
+ if (dev->board.tuner_sif_gpio >= 0)
cx231xx_set_gpio_direction(dev, dev->board.tuner_sif_gpio, 1);
- /* request some modules if any required */
-
- /* reset the Tuner */
- cx231xx_gpio_set(dev, dev->board.tuner_gpio);
- }
+ /* request some modules if any required */
/* set the mode to Analog mode initially */
cx231xx_set_mode(dev, CX231XX_ANALOG_MODE);
@@ -286,26 +547,6 @@ static void cx231xx_config_tuner(struct cx231xx *dev)
}
-/* ----------------------------------------------------------------------- */
-void cx231xx_register_i2c_ir(struct cx231xx *dev)
-{
- if (disable_ir)
- return;
-
- /* REVISIT: instantiate IR device */
-
- /* detect & configure */
- switch (dev->model) {
-
- case CX231XX_BOARD_CNXT_RDE_250:
- break;
- case CX231XX_BOARD_CNXT_RDU_250:
- break;
- default:
- break;
- }
-}
-
void cx231xx_card_setup(struct cx231xx *dev)
{
@@ -319,29 +560,24 @@ void cx231xx_card_setup(struct cx231xx *dev)
if (dev->board.decoder == CX231XX_AVDECODER) {
dev->sd_cx25840 = v4l2_i2c_new_subdev(&dev->v4l2_dev,
&dev->i2c_bus[0].i2c_adap,
- "cx25840", "cx25840", 0x88 >> 1, NULL);
+ "cx25840", 0x88 >> 1, NULL);
if (dev->sd_cx25840 == NULL)
cx231xx_info("cx25840 subdev registration failure\n");
cx25840_call(dev, core, load_fw);
}
+ /* Initialize the tuner */
if (dev->board.tuner_type != TUNER_ABSENT) {
- dev->sd_tuner = v4l2_i2c_new_subdev(&dev->v4l2_dev,
- &dev->i2c_bus[1].i2c_adap,
- "tuner", "tuner", 0xc2 >> 1, NULL);
+ dev->sd_tuner = v4l2_i2c_new_subdev(&dev->v4l2_dev,
+ &dev->i2c_bus[dev->board.tuner_i2c_master].i2c_adap,
+ "tuner",
+ dev->tuner_addr, NULL);
if (dev->sd_tuner == NULL)
cx231xx_info("tuner subdev registration failure\n");
-
- cx231xx_config_tuner(dev);
+ else
+ cx231xx_config_tuner(dev);
}
-
- cx231xx_config_tuner(dev);
-
-#if 0
- /* TBD IR will be added later */
- cx231xx_ir_init(dev);
-#endif
}
/*
@@ -375,12 +611,6 @@ void cx231xx_config_i2c(struct cx231xx *dev)
*/
void cx231xx_release_resources(struct cx231xx *dev)
{
-
-#if 0 /* TBD IR related */
- if (dev->ir)
- cx231xx_ir_fini(dev);
-#endif
-
cx231xx_release_analog_resources(dev);
cx231xx_remove_from_devlist(dev);
@@ -409,6 +639,7 @@ static int cx231xx_init_dev(struct cx231xx **devhandle, struct usb_device *udev,
mutex_init(&dev->lock);
mutex_init(&dev->ctrl_urb_lock);
mutex_init(&dev->gpio_i2c_lock);
+ mutex_init(&dev->i2c_lock);
spin_lock_init(&dev->video_mode.slock);
spin_lock_init(&dev->vbi_mode.slock);
@@ -427,6 +658,13 @@ static int cx231xx_init_dev(struct cx231xx **devhandle, struct usb_device *udev,
/* Query cx231xx to find what pcb config it is related to */
initialize_cx231xx(dev);
+ /*To workaround error number=-71 on EP0 for VideoGrabber,
+ need set alt here.*/
+ if (dev->model == CX231XX_BOARD_CNXT_VIDEO_GRABBER ||
+ dev->model == CX231XX_BOARD_HAUPPAUGE_USBLIVE2) {
+ cx231xx_set_alt_setting(dev, INDEX_VIDEO, 3);
+ cx231xx_set_alt_setting(dev, INDEX_VANC, 1);
+ }
/* Cx231xx pre card setup */
cx231xx_pre_card_setup(dev);
@@ -442,6 +680,7 @@ static int cx231xx_init_dev(struct cx231xx **devhandle, struct usb_device *udev,
/* register i2c bus */
errCode = cx231xx_dev_init(dev);
if (errCode < 0) {
+ cx231xx_dev_uninit(dev);
cx231xx_errdev("%s: cx231xx_i2c_register - errCode [%d]!\n",
__func__, errCode);
return errCode;
@@ -460,8 +699,6 @@ static int cx231xx_init_dev(struct cx231xx **devhandle, struct usb_device *udev,
dev->width = maxw;
dev->height = maxh;
dev->interlaced = 0;
- dev->hscale = 0;
- dev->vscale = 0;
dev->video_input = 0;
errCode = cx231xx_config(dev);
@@ -480,9 +717,17 @@ static int cx231xx_init_dev(struct cx231xx **devhandle, struct usb_device *udev,
INIT_LIST_HEAD(&dev->vbi_mode.vidq.queued);
/* Reset other chips required if they are tied up with GPIO pins */
-
cx231xx_add_into_devlist(dev);
+ if (dev->model == CX231XX_BOARD_CNXT_VIDEO_GRABBER) {
+ printk(KERN_INFO "attach 417 %d\n", dev->model);
+ if (cx231xx_417_register(dev) < 0) {
+ printk(KERN_ERR
+ "%s() Failed to register 417 on VID_B\n",
+ __func__);
+ }
+ }
+
retval = cx231xx_register_analog_devices(dev);
if (retval < 0) {
cx231xx_release_resources(dev);
@@ -537,13 +782,12 @@ static int cx231xx_usb_probe(struct usb_interface *interface,
char *speed;
char descr[255] = "";
struct usb_interface *lif = NULL;
- int skip_interface = 0;
struct usb_interface_assoc_descriptor *assoc_desc;
udev = usb_get_dev(interface_to_usbdev(interface));
ifnum = interface->altsetting[0].desc.bInterfaceNumber;
- if (!ifnum) {
+ if (ifnum == 1) {
/*
* Interface number 0 - IR interface
*/
@@ -552,8 +796,8 @@ static int cx231xx_usb_probe(struct usb_interface *interface,
cx231xx_devused |= 1 << nr;
if (nr >= CX231XX_MAXBOARDS) {
- cx231xx_err(DRIVER_NAME ": Supports only %i cx231xx boards.\n",
- CX231XX_MAXBOARDS);
+ cx231xx_err(DRIVER_NAME
+ ": Supports only %i cx231xx boards.\n", CX231XX_MAXBOARDS);
cx231xx_devused &= ~(1 << nr);
return -ENOMEM;
}
@@ -578,6 +822,7 @@ static int cx231xx_usb_probe(struct usb_interface *interface,
dev->xc_fw_load_done = 0;
dev->has_alsa_audio = 1;
dev->power_mode = -1;
+ atomic_set(&dev->devlist_count, 0);
/* 0 - vbi ; 1 -sliced cc mode */
dev->vbi_or_sliced_cc_mode = 0;
@@ -591,6 +836,11 @@ static int cx231xx_usb_probe(struct usb_interface *interface,
/* store the current interface */
lif = interface;
+ /*mode_tv: digital=1 or analog=0*/
+ dev->mode_tv = 0;
+
+ dev->USE_ISO = transfer_mode;
+
switch (udev->speed) {
case USB_SPEED_LOW:
speed = "1.5";
@@ -624,13 +874,6 @@ static int cx231xx_usb_probe(struct usb_interface *interface,
le16_to_cpu(udev->descriptor.idVendor),
le16_to_cpu(udev->descriptor.idProduct),
dev->max_iad_interface_count);
- } else {
- /* Get dev structure first */
- dev = usb_get_intfdata(udev->actconfig->interface[0]);
- if (dev == NULL) {
- cx231xx_err(DRIVER_NAME ": out of first interface!\n");
- return -ENODEV;
- }
/* store the interface 0 back */
lif = udev->actconfig->interface[0];
@@ -641,35 +884,21 @@ static int cx231xx_usb_probe(struct usb_interface *interface,
/* get device number */
nr = dev->devno;
- /*
- * set skip interface, for all interfaces but
- * interface 1 and the last one
- */
- if ((ifnum != 1) && ((dev->interface_count - 1)
- != dev->max_iad_interface_count))
- skip_interface = 1;
-
- if (ifnum == 1) {
- assoc_desc = udev->actconfig->intf_assoc[0];
- if (assoc_desc->bFirstInterface != ifnum) {
- cx231xx_err(DRIVER_NAME ": Not found "
- "matching IAD interface\n");
- return -ENODEV;
- }
+ assoc_desc = udev->actconfig->intf_assoc[0];
+ if (assoc_desc->bFirstInterface != ifnum) {
+ cx231xx_err(DRIVER_NAME ": Not found "
+ "matching IAD interface\n");
+ return -ENODEV;
}
- }
-
- if (skip_interface)
+ } else {
return -ENODEV;
+ }
cx231xx_info("registering interface %d\n", ifnum);
/* save our data pointer in this interface device */
usb_set_intfdata(lif, dev);
- if ((dev->interface_count - 1) != dev->max_iad_interface_count)
- return 0;
-
/*
* AV device initialization - only done at the last interface
*/
@@ -680,15 +909,18 @@ static int cx231xx_usb_probe(struct usb_interface *interface,
cx231xx_errdev("v4l2_device_register failed\n");
cx231xx_devused &= ~(1 << nr);
kfree(dev);
+ dev = NULL;
return -EIO;
}
-
/* allocate device struct */
retval = cx231xx_init_dev(&dev, udev, nr);
if (retval) {
cx231xx_devused &= ~(1 << dev->devno);
v4l2_device_unregister(&dev->v4l2_dev);
kfree(dev);
+ dev = NULL;
+ usb_set_intfdata(lif, NULL);
+
return retval;
}
@@ -711,6 +943,7 @@ static int cx231xx_usb_probe(struct usb_interface *interface,
cx231xx_devused &= ~(1 << nr);
v4l2_device_unregister(&dev->v4l2_dev);
kfree(dev);
+ dev = NULL;
return -ENOMEM;
}
@@ -744,6 +977,7 @@ static int cx231xx_usb_probe(struct usb_interface *interface,
cx231xx_devused &= ~(1 << nr);
v4l2_device_unregister(&dev->v4l2_dev);
kfree(dev);
+ dev = NULL;
return -ENOMEM;
}
@@ -778,6 +1012,7 @@ static int cx231xx_usb_probe(struct usb_interface *interface,
cx231xx_devused &= ~(1 << nr);
v4l2_device_unregister(&dev->v4l2_dev);
kfree(dev);
+ dev = NULL;
return -ENOMEM;
}
@@ -813,6 +1048,7 @@ static int cx231xx_usb_probe(struct usb_interface *interface,
cx231xx_devused &= ~(1 << nr);
v4l2_device_unregister(&dev->v4l2_dev);
kfree(dev);
+ dev = NULL;
return -ENOMEM;
}
@@ -827,6 +1063,15 @@ static int cx231xx_usb_probe(struct usb_interface *interface,
}
}
+ if (dev->model == CX231XX_BOARD_CNXT_VIDEO_GRABBER) {
+ cx231xx_enable_OSC(dev);
+ cx231xx_reset_out(dev);
+ cx231xx_set_alt_setting(dev, INDEX_VIDEO, 3);
+ }
+
+ if (dev->model == CX231XX_BOARD_CNXT_RDE_253S)
+ cx231xx_sleep_s5h1432(dev);
+
/* load other modules required */
request_modules(dev);
@@ -867,7 +1112,10 @@ static void cx231xx_usb_disconnect(struct usb_interface *interface)
video_device_node_name(dev->vdev));
dev->state |= DEV_MISCONFIGURED;
- cx231xx_uninit_isoc(dev);
+ if (dev->USE_ISO)
+ cx231xx_uninit_isoc(dev);
+ else
+ cx231xx_uninit_bulk(dev);
dev->state |= DEV_DISCONNECTED;
wake_up_interruptible(&dev->wait_frame);
wake_up_interruptible(&dev->wait_stream);
@@ -886,6 +1134,7 @@ static void cx231xx_usb_disconnect(struct usb_interface *interface)
kfree(dev->sliced_cc_mode.alt_max_pkt_size);
kfree(dev->ts1_mode.alt_max_pkt_size);
kfree(dev);
+ dev = NULL;
}
}
diff --git a/drivers/media/video/cx231xx/cx231xx-conf-reg.h b/drivers/media/video/cx231xx/cx231xx-conf-reg.h
index 31a8759f6e54..25593f212abf 100644
--- a/drivers/media/video/cx231xx/cx231xx-conf-reg.h
+++ b/drivers/media/video/cx231xx/cx231xx-conf-reg.h
@@ -39,6 +39,7 @@
#define CIR_CAR_REG 0x38
#define CIR_OT_CFG1 0x40
#define CIR_OT_CFG2 0x44
+#define GBULK_BIT_EN 0x68
#define PWR_CTL_EN 0x74
/* Polaris Endpoints capture mask for register EP_MODE_SET */
diff --git a/drivers/media/video/cx231xx/cx231xx-core.c b/drivers/media/video/cx231xx/cx231xx-core.c
index 912a4d740206..4af46fca9b0a 100644
--- a/drivers/media/video/cx231xx/cx231xx-core.c
+++ b/drivers/media/video/cx231xx/cx231xx-core.c
@@ -27,6 +27,7 @@
#include <linux/usb.h>
#include <linux/vmalloc.h>
#include <media/v4l2-common.h>
+#include <media/tuner.h>
#include "cx231xx.h"
#include "cx231xx-reg.h"
@@ -46,11 +47,6 @@ static unsigned int reg_debug;
module_param(reg_debug, int, 0644);
MODULE_PARM_DESC(reg_debug, "enable debug messages [URB reg]");
-#define cx231xx_regdbg(fmt, arg...) do {\
- if (reg_debug) \
- printk(KERN_INFO "%s %s :"fmt, \
- dev->name, __func__ , ##arg); } while (0)
-
static int alt = CX231XX_PINOUT;
module_param(alt, int, 0644);
MODULE_PARM_DESC(alt, "alternate setting to use for video endpoint");
@@ -64,7 +60,7 @@ MODULE_PARM_DESC(alt, "alternate setting to use for video endpoint");
* Device control list functions *
******************************************************************/
-static LIST_HEAD(cx231xx_devlist);
+LIST_HEAD(cx231xx_devlist);
static DEFINE_MUTEX(cx231xx_devlist_mutex);
/*
@@ -74,33 +70,39 @@ static DEFINE_MUTEX(cx231xx_devlist_mutex);
*/
void cx231xx_remove_from_devlist(struct cx231xx *dev)
{
- mutex_lock(&cx231xx_devlist_mutex);
- list_del(&dev->devlist);
- mutex_unlock(&cx231xx_devlist_mutex);
+ if (dev == NULL)
+ return;
+ if (dev->udev == NULL)
+ return;
+
+ if (atomic_read(&dev->devlist_count) > 0) {
+ mutex_lock(&cx231xx_devlist_mutex);
+ list_del(&dev->devlist);
+ atomic_dec(&dev->devlist_count);
+ mutex_unlock(&cx231xx_devlist_mutex);
+ }
};
void cx231xx_add_into_devlist(struct cx231xx *dev)
{
mutex_lock(&cx231xx_devlist_mutex);
list_add_tail(&dev->devlist, &cx231xx_devlist);
+ atomic_inc(&dev->devlist_count);
mutex_unlock(&cx231xx_devlist_mutex);
};
static LIST_HEAD(cx231xx_extension_devlist);
-static DEFINE_MUTEX(cx231xx_extension_devlist_lock);
int cx231xx_register_extension(struct cx231xx_ops *ops)
{
struct cx231xx *dev = NULL;
mutex_lock(&cx231xx_devlist_mutex);
- mutex_lock(&cx231xx_extension_devlist_lock);
list_add_tail(&ops->next, &cx231xx_extension_devlist);
list_for_each_entry(dev, &cx231xx_devlist, devlist)
ops->init(dev);
printk(KERN_INFO DRIVER_NAME ": %s initialized\n", ops->name);
- mutex_unlock(&cx231xx_extension_devlist_lock);
mutex_unlock(&cx231xx_devlist_mutex);
return 0;
}
@@ -114,10 +116,9 @@ void cx231xx_unregister_extension(struct cx231xx_ops *ops)
list_for_each_entry(dev, &cx231xx_devlist, devlist)
ops->fini(dev);
- mutex_lock(&cx231xx_extension_devlist_lock);
+
printk(KERN_INFO DRIVER_NAME ": %s removed\n", ops->name);
list_del(&ops->next);
- mutex_unlock(&cx231xx_extension_devlist_lock);
mutex_unlock(&cx231xx_devlist_mutex);
}
EXPORT_SYMBOL(cx231xx_unregister_extension);
@@ -126,28 +127,28 @@ void cx231xx_init_extension(struct cx231xx *dev)
{
struct cx231xx_ops *ops = NULL;
- mutex_lock(&cx231xx_extension_devlist_lock);
+ mutex_lock(&cx231xx_devlist_mutex);
if (!list_empty(&cx231xx_extension_devlist)) {
list_for_each_entry(ops, &cx231xx_extension_devlist, next) {
if (ops->init)
ops->init(dev);
}
}
- mutex_unlock(&cx231xx_extension_devlist_lock);
+ mutex_unlock(&cx231xx_devlist_mutex);
}
void cx231xx_close_extension(struct cx231xx *dev)
{
struct cx231xx_ops *ops = NULL;
- mutex_lock(&cx231xx_extension_devlist_lock);
+ mutex_lock(&cx231xx_devlist_mutex);
if (!list_empty(&cx231xx_extension_devlist)) {
list_for_each_entry(ops, &cx231xx_extension_devlist, next) {
if (ops->fini)
ops->fini(dev);
}
}
- mutex_unlock(&cx231xx_extension_devlist_lock);
+ mutex_unlock(&cx231xx_devlist_mutex);
}
/****************************************************************
@@ -234,6 +235,66 @@ int cx231xx_send_usb_command(struct cx231xx_i2c *i2c_bus,
EXPORT_SYMBOL_GPL(cx231xx_send_usb_command);
/*
+ * Sends/Receives URB control messages, assuring to use a kalloced buffer
+ * for all operations (dev->urb_buf), to avoid using stacked buffers, as
+ * they aren't safe for usage with USB, due to DMA restrictions.
+ * Also implements the debug code for control URB's.
+ */
+static int __usb_control_msg(struct cx231xx *dev, unsigned int pipe,
+ __u8 request, __u8 requesttype, __u16 value, __u16 index,
+ void *data, __u16 size, int timeout)
+{
+ int rc, i;
+
+ if (reg_debug) {
+ printk(KERN_DEBUG "%s: (pipe 0x%08x): "
+ "%s: %02x %02x %02x %02x %02x %02x %02x %02x ",
+ dev->name,
+ pipe,
+ (requesttype & USB_DIR_IN) ? "IN" : "OUT",
+ requesttype,
+ request,
+ value & 0xff, value >> 8,
+ index & 0xff, index >> 8,
+ size & 0xff, size >> 8);
+ if (!(requesttype & USB_DIR_IN)) {
+ printk(KERN_CONT ">>>");
+ for (i = 0; i < size; i++)
+ printk(KERN_CONT " %02x",
+ ((unsigned char *)data)[i]);
+ }
+ }
+
+ /* Do the real call to usb_control_msg */
+ mutex_lock(&dev->ctrl_urb_lock);
+ if (!(requesttype & USB_DIR_IN) && size)
+ memcpy(dev->urb_buf, data, size);
+ rc = usb_control_msg(dev->udev, pipe, request, requesttype, value,
+ index, dev->urb_buf, size, timeout);
+ if ((requesttype & USB_DIR_IN) && size)
+ memcpy(data, dev->urb_buf, size);
+ mutex_unlock(&dev->ctrl_urb_lock);
+
+ if (reg_debug) {
+ if (unlikely(rc < 0)) {
+ printk(KERN_CONT "FAILED!\n");
+ return rc;
+ }
+
+ if ((requesttype & USB_DIR_IN)) {
+ printk(KERN_CONT "<<<");
+ for (i = 0; i < size; i++)
+ printk(KERN_CONT " %02x",
+ ((unsigned char *)data)[i]);
+ }
+ printk(KERN_CONT "\n");
+ }
+
+ return rc;
+}
+
+
+/*
* cx231xx_read_ctrl_reg()
* reads data from the usb device specifying bRequest and wValue
*/
@@ -270,39 +331,9 @@ int cx231xx_read_ctrl_reg(struct cx231xx *dev, u8 req, u16 reg,
if (val == 0xFF)
return -EINVAL;
- if (reg_debug) {
- cx231xx_isocdbg("(pipe 0x%08x): "
- "IN: %02x %02x %02x %02x %02x %02x %02x %02x ",
- pipe,
- USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
- req, 0, val,
- reg & 0xff, reg >> 8, len & 0xff, len >> 8);
- }
-
- mutex_lock(&dev->ctrl_urb_lock);
- ret = usb_control_msg(dev->udev, pipe, req,
+ ret = __usb_control_msg(dev, pipe, req,
USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
- val, reg, dev->urb_buf, len, HZ);
- if (ret < 0) {
- cx231xx_isocdbg(" failed!\n");
- /* mutex_unlock(&dev->ctrl_urb_lock); */
- return ret;
- }
-
- if (len)
- memcpy(buf, dev->urb_buf, len);
-
- mutex_unlock(&dev->ctrl_urb_lock);
-
- if (reg_debug) {
- int byte;
-
- cx231xx_isocdbg("<<<");
- for (byte = 0; byte < len; byte++)
- cx231xx_isocdbg(" %02x", (unsigned char)buf[byte]);
- cx231xx_isocdbg("\n");
- }
-
+ val, reg, buf, len, HZ);
return ret;
}
@@ -311,6 +342,8 @@ int cx231xx_send_vendor_cmd(struct cx231xx *dev,
{
int ret;
int pipe = 0;
+ int unsend_size = 0;
+ u8 *pdata;
if (dev->state & DEV_DISCONNECTED)
return -ENODEV;
@@ -323,31 +356,54 @@ int cx231xx_send_vendor_cmd(struct cx231xx *dev,
else
pipe = usb_sndctrlpipe(dev->udev, 0);
- if (reg_debug) {
- int byte;
+ /*
+ * If the cx23102 read more than 4 bytes with i2c bus,
+ * need chop to 4 byte per request
+ */
+ if ((ven_req->wLength > 4) && ((ven_req->bRequest == 0x4) ||
+ (ven_req->bRequest == 0x5) ||
+ (ven_req->bRequest == 0x6))) {
+ unsend_size = 0;
+ pdata = ven_req->pBuff;
+
+
+ unsend_size = ven_req->wLength;
+
+ /* the first package */
+ ven_req->wValue = ven_req->wValue & 0xFFFB;
+ ven_req->wValue = (ven_req->wValue & 0xFFBD) | 0x2;
+ ret = __usb_control_msg(dev, pipe, ven_req->bRequest,
+ ven_req->direction | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
+ ven_req->wValue, ven_req->wIndex, pdata,
+ 0x0004, HZ);
+ unsend_size = unsend_size - 4;
+
+ /* the middle package */
+ ven_req->wValue = (ven_req->wValue & 0xFFBD) | 0x42;
+ while (unsend_size - 4 > 0) {
+ pdata = pdata + 4;
+ ret = __usb_control_msg(dev, pipe,
+ ven_req->bRequest,
+ ven_req->direction | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
+ ven_req->wValue, ven_req->wIndex, pdata,
+ 0x0004, HZ);
+ unsend_size = unsend_size - 4;
+ }
- cx231xx_isocdbg("(pipe 0x%08x): "
- "OUT: %02x %02x %02x %04x %04x %04x >>>",
- pipe,
- ven_req->
- direction | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
- ven_req->bRequest, 0, ven_req->wValue,
- ven_req->wIndex, ven_req->wLength);
-
- for (byte = 0; byte < ven_req->wLength; byte++)
- cx231xx_isocdbg(" %02x",
- (unsigned char)ven_req->pBuff[byte]);
- cx231xx_isocdbg("\n");
+ /* the last package */
+ ven_req->wValue = (ven_req->wValue & 0xFFBD) | 0x40;
+ pdata = pdata + 4;
+ ret = __usb_control_msg(dev, pipe, ven_req->bRequest,
+ ven_req->direction | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
+ ven_req->wValue, ven_req->wIndex, pdata,
+ unsend_size, HZ);
+ } else {
+ ret = __usb_control_msg(dev, pipe, ven_req->bRequest,
+ ven_req->direction | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
+ ven_req->wValue, ven_req->wIndex,
+ ven_req->pBuff, ven_req->wLength, HZ);
}
- mutex_lock(&dev->ctrl_urb_lock);
- ret = usb_control_msg(dev->udev, pipe, ven_req->bRequest,
- ven_req->
- direction | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
- ven_req->wValue, ven_req->wIndex, ven_req->pBuff,
- ven_req->wLength, HZ);
- mutex_unlock(&dev->ctrl_urb_lock);
-
return ret;
}
@@ -403,12 +459,9 @@ int cx231xx_write_ctrl_reg(struct cx231xx *dev, u8 req, u16 reg, char *buf,
cx231xx_isocdbg("\n");
}
- mutex_lock(&dev->ctrl_urb_lock);
- memcpy(dev->urb_buf, buf, len);
- ret = usb_control_msg(dev->udev, pipe, req,
+ ret = __usb_control_msg(dev, pipe, req,
USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
- val, reg, dev->urb_buf, len, HZ);
- mutex_unlock(&dev->ctrl_urb_lock);
+ val, reg, buf, len, HZ);
return ret;
}
@@ -444,6 +497,11 @@ int cx231xx_set_video_alternate(struct cx231xx *dev)
dev->video_mode.alt = 0;
}
+ if (dev->USE_ISO == 0)
+ dev->video_mode.alt = 0;
+
+ cx231xx_coredbg("dev->video_mode.alt= %d\n", dev->video_mode.alt);
+
/* Get the correct video interface Index */
usb_interface_index =
dev->current_pcb_config.hs_config_info[0].interface_info.
@@ -452,15 +510,13 @@ int cx231xx_set_video_alternate(struct cx231xx *dev)
if (dev->video_mode.alt != prev_alt) {
cx231xx_coredbg("minimum isoc packet size: %u (alt=%d)\n",
min_pkt_size, dev->video_mode.alt);
- dev->video_mode.max_pkt_size =
- dev->video_mode.alt_max_pkt_size[dev->video_mode.alt];
+
+ if (dev->video_mode.alt_max_pkt_size != NULL)
+ dev->video_mode.max_pkt_size =
+ dev->video_mode.alt_max_pkt_size[dev->video_mode.alt];
cx231xx_coredbg("setting alternate %d with wMaxPacketSize=%u\n",
dev->video_mode.alt,
dev->video_mode.max_pkt_size);
- cx231xx_info
- (" setting alt %d with wMaxPktSize=%u , Interface = %d\n",
- dev->video_mode.alt, dev->video_mode.max_pkt_size,
- usb_interface_index);
errCode =
usb_set_interface(dev->udev, usb_interface_index,
dev->video_mode.alt);
@@ -485,7 +541,7 @@ int cx231xx_set_alt_setting(struct cx231xx *dev, u8 index, u8 alt)
usb_interface_index =
dev->current_pcb_config.hs_config_info[0].interface_info.
ts1_index + 1;
- dev->video_mode.alt = alt;
+ dev->ts1_mode.alt = alt;
if (dev->ts1_mode.alt_max_pkt_size != NULL)
max_pkt_size = dev->ts1_mode.max_pkt_size =
dev->ts1_mode.alt_max_pkt_size[dev->ts1_mode.alt];
@@ -542,12 +598,16 @@ int cx231xx_set_alt_setting(struct cx231xx *dev, u8 index, u8 alt)
cx231xx_errdev
("can't change interface %d alt no. to %d: Max. Pkt size = 0\n",
usb_interface_index, alt);
- return -1;
+ /*To workaround error number=-71 on EP0 for videograbber,
+ need add following codes.*/
+ if (dev->model != CX231XX_BOARD_CNXT_VIDEO_GRABBER &&
+ dev->model != CX231XX_BOARD_HAUPPAUGE_USBLIVE2)
+ return -1;
}
- cx231xx_info
- (" setting alternate %d with wMaxPacketSize=%u , Interface = %d\n",
- alt, max_pkt_size, usb_interface_index);
+ cx231xx_coredbg("setting alternate %d with wMaxPacketSize=%u,"
+ "Interface = %d\n", alt, max_pkt_size,
+ usb_interface_index);
if (usb_interface_index > 0) {
status = usb_set_interface(dev->udev, usb_interface_index, alt);
@@ -584,8 +644,56 @@ int cx231xx_gpio_set(struct cx231xx *dev, struct cx231xx_reg_seq *gpio)
return rc;
}
+int cx231xx_demod_reset(struct cx231xx *dev)
+{
+
+ u8 status = 0;
+ u8 value[4] = { 0, 0, 0, 0 };
+
+ status = cx231xx_read_ctrl_reg(dev, VRT_GET_REGISTER, PWR_CTL_EN,
+ value, 4);
+
+ cx231xx_coredbg("reg0x%x=0x%x 0x%x 0x%x 0x%x\n", PWR_CTL_EN,
+ value[0], value[1], value[2], value[3]);
+
+ cx231xx_coredbg("Enter cx231xx_demod_reset()\n");
+
+ value[1] = (u8) 0x3;
+ status = cx231xx_write_ctrl_reg(dev, VRT_SET_REGISTER,
+ PWR_CTL_EN, value, 4);
+ msleep(10);
+
+ value[1] = (u8) 0x0;
+ status = cx231xx_write_ctrl_reg(dev, VRT_SET_REGISTER,
+ PWR_CTL_EN, value, 4);
+ msleep(10);
+
+ value[1] = (u8) 0x3;
+ status = cx231xx_write_ctrl_reg(dev, VRT_SET_REGISTER,
+ PWR_CTL_EN, value, 4);
+ msleep(10);
+
+
+
+ status = cx231xx_read_ctrl_reg(dev, VRT_GET_REGISTER, PWR_CTL_EN,
+ value, 4);
+
+ cx231xx_coredbg("reg0x%x=0x%x 0x%x 0x%x 0x%x\n", PWR_CTL_EN,
+ value[0], value[1], value[2], value[3]);
+
+ return status;
+}
+EXPORT_SYMBOL_GPL(cx231xx_demod_reset);
+int is_fw_load(struct cx231xx *dev)
+{
+ return cx231xx_check_fw(dev);
+}
+EXPORT_SYMBOL_GPL(is_fw_load);
+
int cx231xx_set_mode(struct cx231xx *dev, enum cx231xx_mode set_mode)
{
+ int errCode = 0;
+
if (dev->mode == set_mode)
return 0;
@@ -600,15 +708,75 @@ int cx231xx_set_mode(struct cx231xx *dev, enum cx231xx_mode set_mode)
dev->mode = set_mode;
- if (dev->mode == CX231XX_DIGITAL_MODE)
- ;/* Set Digital power mode */
- else
- ;/* Set Analog Power mode */
+ if (dev->mode == CX231XX_DIGITAL_MODE)/* Set Digital power mode */ {
+ /* set AGC mode to Digital */
+ switch (dev->model) {
+ case CX231XX_BOARD_CNXT_CARRAERA:
+ case CX231XX_BOARD_CNXT_RDE_250:
+ case CX231XX_BOARD_CNXT_SHELBY:
+ case CX231XX_BOARD_CNXT_RDU_250:
+ errCode = cx231xx_set_agc_analog_digital_mux_select(dev, 0);
+ break;
+ case CX231XX_BOARD_CNXT_RDE_253S:
+ case CX231XX_BOARD_CNXT_RDU_253S:
+ errCode = cx231xx_set_agc_analog_digital_mux_select(dev, 1);
+ break;
+ case CX231XX_BOARD_HAUPPAUGE_EXETER:
+ errCode = cx231xx_set_power_mode(dev,
+ POLARIS_AVMODE_DIGITAL);
+ break;
+ default:
+ break;
+ }
+ } else/* Set Analog Power mode */ {
+ /* set AGC mode to Analog */
+ switch (dev->model) {
+ case CX231XX_BOARD_CNXT_CARRAERA:
+ case CX231XX_BOARD_CNXT_RDE_250:
+ case CX231XX_BOARD_CNXT_SHELBY:
+ case CX231XX_BOARD_CNXT_RDU_250:
+ errCode = cx231xx_set_agc_analog_digital_mux_select(dev, 1);
+ break;
+ case CX231XX_BOARD_CNXT_RDE_253S:
+ case CX231XX_BOARD_CNXT_RDU_253S:
+ case CX231XX_BOARD_HAUPPAUGE_EXETER:
+ errCode = cx231xx_set_agc_analog_digital_mux_select(dev, 0);
+ break;
+ default:
+ break;
+ }
+ }
return 0;
}
EXPORT_SYMBOL_GPL(cx231xx_set_mode);
+int cx231xx_ep5_bulkout(struct cx231xx *dev, u8 *firmware, u16 size)
+{
+ int errCode = 0;
+ int actlen, ret = -ENOMEM;
+ u32 *buffer;
+
+buffer = kzalloc(4096, GFP_KERNEL);
+ if (buffer == NULL) {
+ cx231xx_info("out of mem\n");
+ return -ENOMEM;
+ }
+ memcpy(&buffer[0], firmware, 4096);
+
+ ret = usb_bulk_msg(dev->udev, usb_sndbulkpipe(dev->udev, 5),
+ buffer, 4096, &actlen, 2000);
+
+ if (ret)
+ cx231xx_info("bulk message failed: %d (%d/%d)", ret,
+ size, actlen);
+ else {
+ errCode = actlen != size ? -1 : 0;
+ }
+kfree(buffer);
+ return 0;
+}
+
/*****************************************************************
* URB Streaming functions *
******************************************************************/
@@ -616,7 +784,7 @@ EXPORT_SYMBOL_GPL(cx231xx_set_mode);
/*
* IRQ callback, called by URB callback
*/
-static void cx231xx_irq_callback(struct urb *urb)
+static void cx231xx_isoc_irq_callback(struct urb *urb)
{
struct cx231xx_dmaqueue *dma_q = urb->context;
struct cx231xx_video_mode *vmode =
@@ -655,12 +823,54 @@ static void cx231xx_irq_callback(struct urb *urb)
urb->status);
}
}
+/*****************************************************************
+* URB Streaming functions *
+******************************************************************/
/*
+ * IRQ callback, called by URB callback
+ */
+static void cx231xx_bulk_irq_callback(struct urb *urb)
+{
+ struct cx231xx_dmaqueue *dma_q = urb->context;
+ struct cx231xx_video_mode *vmode =
+ container_of(dma_q, struct cx231xx_video_mode, vidq);
+ struct cx231xx *dev = container_of(vmode, struct cx231xx, video_mode);
+ int rc;
+
+ switch (urb->status) {
+ case 0: /* success */
+ case -ETIMEDOUT: /* NAK */
+ break;
+ case -ECONNRESET: /* kill */
+ case -ENOENT:
+ case -ESHUTDOWN:
+ return;
+ default: /* error */
+ cx231xx_isocdbg("urb completition error %d.\n", urb->status);
+ break;
+ }
+
+ /* Copy data from URB */
+ spin_lock(&dev->video_mode.slock);
+ rc = dev->video_mode.bulk_ctl.bulk_copy(dev, urb);
+ spin_unlock(&dev->video_mode.slock);
+
+ /* Reset urb buffers */
+ urb->status = 0;
+
+ urb->status = usb_submit_urb(urb, GFP_ATOMIC);
+ if (urb->status) {
+ cx231xx_isocdbg("urb resubmit failed (error=%i)\n",
+ urb->status);
+ }
+}
+/*
* Stop and Deallocate URBs
*/
void cx231xx_uninit_isoc(struct cx231xx *dev)
{
+ struct cx231xx_dmaqueue *dma_q = &dev->video_mode.vidq;
struct urb *urb;
int i;
@@ -690,16 +900,71 @@ void cx231xx_uninit_isoc(struct cx231xx *dev)
kfree(dev->video_mode.isoc_ctl.urb);
kfree(dev->video_mode.isoc_ctl.transfer_buffer);
+ kfree(dma_q->p_left_data);
dev->video_mode.isoc_ctl.urb = NULL;
dev->video_mode.isoc_ctl.transfer_buffer = NULL;
dev->video_mode.isoc_ctl.num_bufs = 0;
+ dma_q->p_left_data = NULL;
+
+ if (dev->mode_tv == 0)
+ cx231xx_capture_start(dev, 0, Raw_Video);
+ else
+ cx231xx_capture_start(dev, 0, TS1_serial_mode);
+
- cx231xx_capture_start(dev, 0, Raw_Video);
}
EXPORT_SYMBOL_GPL(cx231xx_uninit_isoc);
/*
+ * Stop and Deallocate URBs
+ */
+void cx231xx_uninit_bulk(struct cx231xx *dev)
+{
+ struct urb *urb;
+ int i;
+
+ cx231xx_isocdbg("cx231xx: called cx231xx_uninit_bulk\n");
+
+ dev->video_mode.bulk_ctl.nfields = -1;
+ for (i = 0; i < dev->video_mode.bulk_ctl.num_bufs; i++) {
+ urb = dev->video_mode.bulk_ctl.urb[i];
+ if (urb) {
+ if (!irqs_disabled())
+ usb_kill_urb(urb);
+ else
+ usb_unlink_urb(urb);
+
+ if (dev->video_mode.bulk_ctl.transfer_buffer[i]) {
+ usb_free_coherent(dev->udev,
+ urb->transfer_buffer_length,
+ dev->video_mode.isoc_ctl.
+ transfer_buffer[i],
+ urb->transfer_dma);
+ }
+ usb_free_urb(urb);
+ dev->video_mode.bulk_ctl.urb[i] = NULL;
+ }
+ dev->video_mode.bulk_ctl.transfer_buffer[i] = NULL;
+ }
+
+ kfree(dev->video_mode.bulk_ctl.urb);
+ kfree(dev->video_mode.bulk_ctl.transfer_buffer);
+
+ dev->video_mode.bulk_ctl.urb = NULL;
+ dev->video_mode.bulk_ctl.transfer_buffer = NULL;
+ dev->video_mode.bulk_ctl.num_bufs = 0;
+
+ if (dev->mode_tv == 0)
+ cx231xx_capture_start(dev, 0, Raw_Video);
+ else
+ cx231xx_capture_start(dev, 0, TS1_serial_mode);
+
+
+}
+EXPORT_SYMBOL_GPL(cx231xx_uninit_bulk);
+
+/*
* Allocate URBs and start IRQ
*/
int cx231xx_init_isoc(struct cx231xx *dev, int max_packets,
@@ -713,15 +978,16 @@ int cx231xx_init_isoc(struct cx231xx *dev, int max_packets,
int j, k;
int rc;
- cx231xx_isocdbg("cx231xx: called cx231xx_prepare_isoc\n");
+ /* De-allocates all pending stuff */
+ cx231xx_uninit_isoc(dev);
- dev->video_input = dev->video_input > 2 ? 2 : dev->video_input;
+ dma_q->p_left_data = kzalloc(4096, GFP_KERNEL);
+ if (dma_q->p_left_data == NULL) {
+ cx231xx_info("out of mem\n");
+ return -ENOMEM;
+ }
- cx231xx_info("Setting Video mux to %d\n", dev->video_input);
- video_mux(dev, dev->video_input);
- /* De-allocates all pending stuff */
- cx231xx_uninit_isoc(dev);
dev->video_mode.isoc_ctl.isoc_copy = isoc_copy;
dev->video_mode.isoc_ctl.num_bufs = num_bufs;
@@ -733,6 +999,14 @@ int cx231xx_init_isoc(struct cx231xx *dev, int max_packets,
dma_q->lines_per_field = dev->height / 2;
dma_q->bytes_left_in_line = dev->width << 1;
dma_q->lines_completed = 0;
+ dma_q->mpeg_buffer_done = 0;
+ dma_q->left_data_count = 0;
+ dma_q->mpeg_buffer_completed = 0;
+ dma_q->add_ps_package_head = CX231XX_NEED_ADD_PS_PACKAGE_HEAD;
+ dma_q->ps_head[0] = 0x00;
+ dma_q->ps_head[1] = 0x00;
+ dma_q->ps_head[2] = 0x01;
+ dma_q->ps_head[3] = 0xBA;
for (i = 0; i < 8; i++)
dma_q->partial_buf[i] = 0;
@@ -756,6 +1030,12 @@ int cx231xx_init_isoc(struct cx231xx *dev, int max_packets,
sb_size = max_packets * dev->video_mode.isoc_ctl.max_pkt_size;
+ if (dev->mode_tv == 1)
+ dev->video_mode.end_point_addr = 0x81;
+ else
+ dev->video_mode.end_point_addr = 0x84;
+
+
/* allocate urbs and transfer buffers */
for (i = 0; i < dev->video_mode.isoc_ctl.num_bufs; i++) {
urb = usb_alloc_urb(max_packets, GFP_KERNEL);
@@ -784,7 +1064,7 @@ int cx231xx_init_isoc(struct cx231xx *dev, int max_packets,
usb_fill_int_urb(urb, dev->udev, pipe,
dev->video_mode.isoc_ctl.transfer_buffer[i],
- sb_size, cx231xx_irq_callback, dma_q, 1);
+ sb_size, cx231xx_isoc_irq_callback, dma_q, 1);
urb->number_of_packets = max_packets;
urb->transfer_flags = URB_ISO_ASAP;
@@ -812,12 +1092,176 @@ int cx231xx_init_isoc(struct cx231xx *dev, int max_packets,
}
}
- cx231xx_capture_start(dev, 1, Raw_Video);
+ if (dev->mode_tv == 0)
+ cx231xx_capture_start(dev, 1, Raw_Video);
+ else
+ cx231xx_capture_start(dev, 1, TS1_serial_mode);
return 0;
}
EXPORT_SYMBOL_GPL(cx231xx_init_isoc);
+/*
+ * Allocate URBs and start IRQ
+ */
+int cx231xx_init_bulk(struct cx231xx *dev, int max_packets,
+ int num_bufs, int max_pkt_size,
+ int (*bulk_copy) (struct cx231xx *dev, struct urb *urb))
+{
+ struct cx231xx_dmaqueue *dma_q = &dev->video_mode.vidq;
+ int i;
+ int sb_size, pipe;
+ struct urb *urb;
+ int rc;
+
+ dev->video_input = dev->video_input > 2 ? 2 : dev->video_input;
+
+ cx231xx_coredbg("Setting Video mux to %d\n", dev->video_input);
+
+ video_mux(dev, dev->video_input);
+
+ /* De-allocates all pending stuff */
+ cx231xx_uninit_bulk(dev);
+
+ dev->video_mode.bulk_ctl.bulk_copy = bulk_copy;
+ dev->video_mode.bulk_ctl.num_bufs = num_bufs;
+ dma_q->pos = 0;
+ dma_q->is_partial_line = 0;
+ dma_q->last_sav = 0;
+ dma_q->current_field = -1;
+ dma_q->field1_done = 0;
+ dma_q->lines_per_field = dev->height / 2;
+ dma_q->bytes_left_in_line = dev->width << 1;
+ dma_q->lines_completed = 0;
+ dma_q->mpeg_buffer_done = 0;
+ dma_q->left_data_count = 0;
+ dma_q->mpeg_buffer_completed = 0;
+ dma_q->ps_head[0] = 0x00;
+ dma_q->ps_head[1] = 0x00;
+ dma_q->ps_head[2] = 0x01;
+ dma_q->ps_head[3] = 0xBA;
+ for (i = 0; i < 8; i++)
+ dma_q->partial_buf[i] = 0;
+
+ dev->video_mode.bulk_ctl.urb =
+ kzalloc(sizeof(void *) * num_bufs, GFP_KERNEL);
+ if (!dev->video_mode.bulk_ctl.urb) {
+ cx231xx_errdev("cannot alloc memory for usb buffers\n");
+ return -ENOMEM;
+ }
+
+ dev->video_mode.bulk_ctl.transfer_buffer =
+ kzalloc(sizeof(void *) * num_bufs, GFP_KERNEL);
+ if (!dev->video_mode.bulk_ctl.transfer_buffer) {
+ cx231xx_errdev("cannot allocate memory for usbtransfer\n");
+ kfree(dev->video_mode.bulk_ctl.urb);
+ return -ENOMEM;
+ }
+
+ dev->video_mode.bulk_ctl.max_pkt_size = max_pkt_size;
+ dev->video_mode.bulk_ctl.buf = NULL;
+
+ sb_size = max_packets * dev->video_mode.bulk_ctl.max_pkt_size;
+
+ if (dev->mode_tv == 1)
+ dev->video_mode.end_point_addr = 0x81;
+ else
+ dev->video_mode.end_point_addr = 0x84;
+
+
+ /* allocate urbs and transfer buffers */
+ for (i = 0; i < dev->video_mode.bulk_ctl.num_bufs; i++) {
+ urb = usb_alloc_urb(0, GFP_KERNEL);
+ if (!urb) {
+ cx231xx_err("cannot alloc bulk_ctl.urb %i\n", i);
+ cx231xx_uninit_bulk(dev);
+ return -ENOMEM;
+ }
+ dev->video_mode.bulk_ctl.urb[i] = urb;
+ urb->transfer_flags = 0;
+
+ dev->video_mode.bulk_ctl.transfer_buffer[i] =
+ usb_alloc_coherent(dev->udev, sb_size, GFP_KERNEL,
+ &urb->transfer_dma);
+ if (!dev->video_mode.bulk_ctl.transfer_buffer[i]) {
+ cx231xx_err("unable to allocate %i bytes for transfer"
+ " buffer %i%s\n",
+ sb_size, i,
+ in_interrupt() ? " while in int" : "");
+ cx231xx_uninit_bulk(dev);
+ return -ENOMEM;
+ }
+ memset(dev->video_mode.bulk_ctl.transfer_buffer[i], 0, sb_size);
+
+ pipe = usb_rcvbulkpipe(dev->udev,
+ dev->video_mode.end_point_addr);
+ usb_fill_bulk_urb(urb, dev->udev, pipe,
+ dev->video_mode.bulk_ctl.transfer_buffer[i],
+ sb_size, cx231xx_bulk_irq_callback, dma_q);
+ }
+
+ init_waitqueue_head(&dma_q->wq);
+
+ /* submit urbs and enables IRQ */
+ for (i = 0; i < dev->video_mode.bulk_ctl.num_bufs; i++) {
+ rc = usb_submit_urb(dev->video_mode.bulk_ctl.urb[i],
+ GFP_ATOMIC);
+ if (rc) {
+ cx231xx_err("submit of urb %i failed (error=%i)\n", i,
+ rc);
+ cx231xx_uninit_bulk(dev);
+ return rc;
+ }
+ }
+
+ if (dev->mode_tv == 0)
+ cx231xx_capture_start(dev, 1, Raw_Video);
+ else
+ cx231xx_capture_start(dev, 1, TS1_serial_mode);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(cx231xx_init_bulk);
+void cx231xx_stop_TS1(struct cx231xx *dev)
+{
+ int status = 0;
+ u8 val[4] = { 0, 0, 0, 0 };
+
+ val[0] = 0x00;
+ val[1] = 0x03;
+ val[2] = 0x00;
+ val[3] = 0x00;
+ status = cx231xx_write_ctrl_reg(dev, VRT_SET_REGISTER,
+ TS_MODE_REG, val, 4);
+
+ val[0] = 0x00;
+ val[1] = 0x70;
+ val[2] = 0x04;
+ val[3] = 0x00;
+ status = cx231xx_write_ctrl_reg(dev, VRT_SET_REGISTER,
+ TS1_CFG_REG, val, 4);
+}
+/* EXPORT_SYMBOL_GPL(cx231xx_stop_TS1); */
+void cx231xx_start_TS1(struct cx231xx *dev)
+{
+ int status = 0;
+ u8 val[4] = { 0, 0, 0, 0 };
+
+ val[0] = 0x03;
+ val[1] = 0x03;
+ val[2] = 0x00;
+ val[3] = 0x00;
+ status = cx231xx_write_ctrl_reg(dev, VRT_SET_REGISTER,
+ TS_MODE_REG, val, 4);
+
+ val[0] = 0x04;
+ val[1] = 0xA3;
+ val[2] = 0x3B;
+ val[3] = 0x00;
+ status = cx231xx_write_ctrl_reg(dev, VRT_SET_REGISTER,
+ TS1_CFG_REG, val, 4);
+}
+/* EXPORT_SYMBOL_GPL(cx231xx_start_TS1); */
/*****************************************************************
* Device Init/UnInit functions *
******************************************************************/
@@ -830,14 +1274,14 @@ int cx231xx_dev_init(struct cx231xx *dev)
/* External Master 1 Bus */
dev->i2c_bus[0].nr = 0;
dev->i2c_bus[0].dev = dev;
- dev->i2c_bus[0].i2c_period = I2C_SPEED_1M; /* 1MHz */
+ dev->i2c_bus[0].i2c_period = I2C_SPEED_100K; /* 100 KHz */
dev->i2c_bus[0].i2c_nostop = 0;
dev->i2c_bus[0].i2c_reserve = 0;
/* External Master 2 Bus */
dev->i2c_bus[1].nr = 1;
dev->i2c_bus[1].dev = dev;
- dev->i2c_bus[1].i2c_period = I2C_SPEED_1M; /* 1MHz */
+ dev->i2c_bus[1].i2c_period = I2C_SPEED_100K; /* 100 KHz */
dev->i2c_bus[1].i2c_nostop = 0;
dev->i2c_bus[1].i2c_reserve = 0;
@@ -856,14 +1300,34 @@ int cx231xx_dev_init(struct cx231xx *dev)
/* init hardware */
/* Note : with out calling set power mode function,
afe can not be set up correctly */
- errCode = cx231xx_set_power_mode(dev, POLARIS_AVMODE_ANALOGT_TV);
- if (errCode < 0) {
- cx231xx_errdev
- ("%s: Failed to set Power - errCode [%d]!\n",
- __func__, errCode);
- return errCode;
+ if (dev->model == CX231XX_BOARD_CNXT_VIDEO_GRABBER ||
+ dev->model == CX231XX_BOARD_HAUPPAUGE_USBLIVE2) {
+ errCode = cx231xx_set_power_mode(dev,
+ POLARIS_AVMODE_ENXTERNAL_AV);
+ if (errCode < 0) {
+ cx231xx_errdev
+ ("%s: Failed to set Power - errCode [%d]!\n",
+ __func__, errCode);
+ return errCode;
+ }
+ } else {
+ errCode = cx231xx_set_power_mode(dev,
+ POLARIS_AVMODE_ANALOGT_TV);
+ if (errCode < 0) {
+ cx231xx_errdev
+ ("%s: Failed to set Power - errCode [%d]!\n",
+ __func__, errCode);
+ return errCode;
+ }
}
+ /* reset the Tuner */
+ if ((dev->model == CX231XX_BOARD_CNXT_CARRAERA) ||
+ (dev->model == CX231XX_BOARD_CNXT_RDE_250) ||
+ (dev->model == CX231XX_BOARD_CNXT_SHELBY) ||
+ (dev->model == CX231XX_BOARD_CNXT_RDU_250))
+ cx231xx_gpio_set(dev, dev->board.tuner_gpio);
+
/* initialize Colibri block */
errCode = cx231xx_afe_init_super_block(dev, 0x23c);
if (errCode < 0) {
@@ -907,7 +1371,21 @@ int cx231xx_dev_init(struct cx231xx *dev)
}
/* set AGC mode to Analog */
+ switch (dev->model) {
+ case CX231XX_BOARD_CNXT_CARRAERA:
+ case CX231XX_BOARD_CNXT_RDE_250:
+ case CX231XX_BOARD_CNXT_SHELBY:
+ case CX231XX_BOARD_CNXT_RDU_250:
errCode = cx231xx_set_agc_analog_digital_mux_select(dev, 1);
+ break;
+ case CX231XX_BOARD_CNXT_RDE_253S:
+ case CX231XX_BOARD_CNXT_RDU_253S:
+ case CX231XX_BOARD_HAUPPAUGE_EXETER:
+ errCode = cx231xx_set_agc_analog_digital_mux_select(dev, 0);
+ break;
+ default:
+ break;
+ }
if (errCode < 0) {
cx231xx_errdev
("%s: cx231xx_AGC mode to Analog - errCode [%d]!\n",
@@ -923,7 +1401,7 @@ int cx231xx_dev_init(struct cx231xx *dev)
cx231xx_set_alt_setting(dev, INDEX_TS1, 0);
/* set the I2C master port to 3 on channel 1 */
- errCode = cx231xx_enable_i2c_for_tuner(dev, I2C_3);
+ errCode = cx231xx_enable_i2c_port_3(dev, true);
return errCode;
}
@@ -941,7 +1419,7 @@ EXPORT_SYMBOL_GPL(cx231xx_dev_uninit);
/*****************************************************************
* G P I O related functions *
******************************************************************/
-int cx231xx_send_gpio_cmd(struct cx231xx *dev, u32 gpio_bit, u8 * gpio_val,
+int cx231xx_send_gpio_cmd(struct cx231xx *dev, u32 gpio_bit, u8 *gpio_val,
u8 len, u8 request, u8 direction)
{
int status = 0;
@@ -1026,6 +1504,91 @@ int cx231xx_mode_register(struct cx231xx *dev, u16 address, u32 mode)
/*****************************************************************
* I 2 C Internal C O N T R O L functions *
*****************************************************************/
+int cx231xx_read_i2c_master(struct cx231xx *dev, u8 dev_addr, u16 saddr,
+ u8 saddr_len, u32 *data, u8 data_len, int master)
+{
+ int status = 0;
+ struct cx231xx_i2c_xfer_data req_data;
+ u8 value[64] = "0";
+
+ if (saddr_len == 0)
+ saddr = 0;
+ else if (saddr_len == 0)
+ saddr &= 0xff;
+
+ /* prepare xfer_data struct */
+ req_data.dev_addr = dev_addr >> 1;
+ req_data.direction = I2C_M_RD;
+ req_data.saddr_len = saddr_len;
+ req_data.saddr_dat = saddr;
+ req_data.buf_size = data_len;
+ req_data.p_buffer = (u8 *) value;
+
+ /* usb send command */
+ if (master == 0)
+ status = dev->cx231xx_send_usb_command(&dev->i2c_bus[0],
+ &req_data);
+ else if (master == 1)
+ status = dev->cx231xx_send_usb_command(&dev->i2c_bus[1],
+ &req_data);
+ else if (master == 2)
+ status = dev->cx231xx_send_usb_command(&dev->i2c_bus[2],
+ &req_data);
+
+ if (status >= 0) {
+ /* Copy the data read back to main buffer */
+ if (data_len == 1)
+ *data = value[0];
+ else if (data_len == 4)
+ *data =
+ value[0] | value[1] << 8 | value[2] << 16 | value[3]
+ << 24;
+ else if (data_len > 4)
+ *data = value[saddr];
+ }
+
+ return status;
+}
+
+int cx231xx_write_i2c_master(struct cx231xx *dev, u8 dev_addr, u16 saddr,
+ u8 saddr_len, u32 data, u8 data_len, int master)
+{
+ int status = 0;
+ u8 value[4] = { 0, 0, 0, 0 };
+ struct cx231xx_i2c_xfer_data req_data;
+
+ value[0] = (u8) data;
+ value[1] = (u8) (data >> 8);
+ value[2] = (u8) (data >> 16);
+ value[3] = (u8) (data >> 24);
+
+ if (saddr_len == 0)
+ saddr = 0;
+ else if (saddr_len == 0)
+ saddr &= 0xff;
+
+ /* prepare xfer_data struct */
+ req_data.dev_addr = dev_addr >> 1;
+ req_data.direction = 0;
+ req_data.saddr_len = saddr_len;
+ req_data.saddr_dat = saddr;
+ req_data.buf_size = data_len;
+ req_data.p_buffer = value;
+
+ /* usb send command */
+ if (master == 0)
+ status = dev->cx231xx_send_usb_command(&dev->i2c_bus[0],
+ &req_data);
+ else if (master == 1)
+ status = dev->cx231xx_send_usb_command(&dev->i2c_bus[1],
+ &req_data);
+ else if (master == 2)
+ status = dev->cx231xx_send_usb_command(&dev->i2c_bus[2],
+ &req_data);
+
+ return status;
+}
+
int cx231xx_read_i2c_data(struct cx231xx *dev, u8 dev_addr, u16 saddr,
u8 saddr_len, u32 *data, u8 data_len)
{
diff --git a/drivers/media/video/cx231xx/cx231xx-dif.h b/drivers/media/video/cx231xx/cx231xx-dif.h
new file mode 100644
index 000000000000..2b63c2f6d3b0
--- /dev/null
+++ b/drivers/media/video/cx231xx/cx231xx-dif.h
@@ -0,0 +1,3178 @@
+/*
+ * cx231xx-dif.h - driver for Conexant Cx23100/101/102 USB video capture devices
+ *
+ * Copyright {C} 2009 <Bill.Liu@conexant.com>
+ *
+ * This program is free software, you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY, without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program, if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#ifndef _CX231XX_DIF_H
+#define _CX231XX_DIF_H
+
+#include "cx231xx-reg.h"
+
+struct dif_settings{
+ u32 if_freq;
+ u32 register_address;
+ u32 value;
+};
+
+static struct dif_settings Dif_set_array[] = {
+
+/*case 3000000:*/
+/* BEGIN - DIF BPF register values from 30_quant.dat*/
+{3000000, DIF_BPF_COEFF01, 0x00000002},
+{3000000, DIF_BPF_COEFF23, 0x00080012},
+{3000000, DIF_BPF_COEFF45, 0x001e0024},
+{3000000, DIF_BPF_COEFF67, 0x001bfff8},
+{3000000, DIF_BPF_COEFF89, 0xffb4ff50},
+{3000000, DIF_BPF_COEFF1011, 0xfed8fe68},
+{3000000, DIF_BPF_COEFF1213, 0xfe24fe34},
+{3000000, DIF_BPF_COEFF1415, 0xfebaffc7},
+{3000000, DIF_BPF_COEFF1617, 0x014d031f},
+{3000000, DIF_BPF_COEFF1819, 0x04f0065d},
+{3000000, DIF_BPF_COEFF2021, 0x07010688},
+{3000000, DIF_BPF_COEFF2223, 0x04c901d6},
+{3000000, DIF_BPF_COEFF2425, 0xfe00f9d3},
+{3000000, DIF_BPF_COEFF2627, 0xf600f342},
+{3000000, DIF_BPF_COEFF2829, 0xf235f337},
+{3000000, DIF_BPF_COEFF3031, 0xf64efb22},
+{3000000, DIF_BPF_COEFF3233, 0x0105070f},
+{3000000, DIF_BPF_COEFF3435, 0x0c460fce},
+{3000000, DIF_BPF_COEFF36, 0x110d0000},
+/* END - DIF BPF register values from 30_quant.dat*/
+
+
+/*case 3100000:*/
+/* BEGIN - DIF BPF register values from 31_quant.dat*/
+{3100000, DIF_BPF_COEFF01, 0x00000001},
+{3100000, DIF_BPF_COEFF23, 0x00070012},
+{3100000, DIF_BPF_COEFF45, 0x00220032},
+{3100000, DIF_BPF_COEFF67, 0x00370026},
+{3100000, DIF_BPF_COEFF89, 0xfff0ff91},
+{3100000, DIF_BPF_COEFF1011, 0xff0efe7c},
+{3100000, DIF_BPF_COEFF1213, 0xfe01fdcc},
+{3100000, DIF_BPF_COEFF1415, 0xfe0afedb},
+{3100000, DIF_BPF_COEFF1617, 0x00440224},
+{3100000, DIF_BPF_COEFF1819, 0x0434060c},
+{3100000, DIF_BPF_COEFF2021, 0x0738074e},
+{3100000, DIF_BPF_COEFF2223, 0x06090361},
+{3100000, DIF_BPF_COEFF2425, 0xff99fb39},
+{3100000, DIF_BPF_COEFF2627, 0xf6fef3b6},
+{3100000, DIF_BPF_COEFF2829, 0xf21af2a5},
+{3100000, DIF_BPF_COEFF3031, 0xf573fa33},
+{3100000, DIF_BPF_COEFF3233, 0x0034067d},
+{3100000, DIF_BPF_COEFF3435, 0x0bfb0fb9},
+{3100000, DIF_BPF_COEFF36, 0x110d0000},
+/* END - DIF BPF register values from 31_quant.dat*/
+
+
+/*case 3200000:*/
+/* BEGIN - DIF BPF register values from 32_quant.dat*/
+{3200000, DIF_BPF_COEFF01, 0x00000000},
+{3200000, DIF_BPF_COEFF23, 0x0004000e},
+{3200000, DIF_BPF_COEFF45, 0x00200038},
+{3200000, DIF_BPF_COEFF67, 0x004c004f},
+{3200000, DIF_BPF_COEFF89, 0x002fffdf},
+{3200000, DIF_BPF_COEFF1011, 0xff5cfeb6},
+{3200000, DIF_BPF_COEFF1213, 0xfe0dfd92},
+{3200000, DIF_BPF_COEFF1415, 0xfd7ffe03},
+{3200000, DIF_BPF_COEFF1617, 0xff36010a},
+{3200000, DIF_BPF_COEFF1819, 0x03410575},
+{3200000, DIF_BPF_COEFF2021, 0x072607d2},
+{3200000, DIF_BPF_COEFF2223, 0x071804d5},
+{3200000, DIF_BPF_COEFF2425, 0x0134fcb7},
+{3200000, DIF_BPF_COEFF2627, 0xf81ff451},
+{3200000, DIF_BPF_COEFF2829, 0xf223f22e},
+{3200000, DIF_BPF_COEFF3031, 0xf4a7f94b},
+{3200000, DIF_BPF_COEFF3233, 0xff6405e8},
+{3200000, DIF_BPF_COEFF3435, 0x0bae0fa4},
+{3200000, DIF_BPF_COEFF36, 0x110d0000},
+/* END - DIF BPF register values from 32_quant.dat*/
+
+
+/*case 3300000:*/
+/* BEGIN - DIF BPF register values from 33_quant.dat*/
+{3300000, DIF_BPF_COEFF01, 0x0000ffff},
+{3300000, DIF_BPF_COEFF23, 0x00000008},
+{3300000, DIF_BPF_COEFF45, 0x001a0036},
+{3300000, DIF_BPF_COEFF67, 0x0056006d},
+{3300000, DIF_BPF_COEFF89, 0x00670030},
+{3300000, DIF_BPF_COEFF1011, 0xffbdff10},
+{3300000, DIF_BPF_COEFF1213, 0xfe46fd8d},
+{3300000, DIF_BPF_COEFF1415, 0xfd25fd4f},
+{3300000, DIF_BPF_COEFF1617, 0xfe35ffe0},
+{3300000, DIF_BPF_COEFF1819, 0x0224049f},
+{3300000, DIF_BPF_COEFF2021, 0x06c9080e},
+{3300000, DIF_BPF_COEFF2223, 0x07ef0627},
+{3300000, DIF_BPF_COEFF2425, 0x02c9fe45},
+{3300000, DIF_BPF_COEFF2627, 0xf961f513},
+{3300000, DIF_BPF_COEFF2829, 0xf250f1d2},
+{3300000, DIF_BPF_COEFF3031, 0xf3ecf869},
+{3300000, DIF_BPF_COEFF3233, 0xfe930552},
+{3300000, DIF_BPF_COEFF3435, 0x0b5f0f8f},
+{3300000, DIF_BPF_COEFF36, 0x110d0000},
+/* END - DIF BPF register values from 33_quant.dat*/
+
+
+/*case 3400000:*/
+/* BEGIN - DIF BPF register values from 34_quant.dat*/
+{3400000, DIF_BPF_COEFF01, 0xfffffffe},
+{3400000, DIF_BPF_COEFF23, 0xfffd0001},
+{3400000, DIF_BPF_COEFF45, 0x000f002c},
+{3400000, DIF_BPF_COEFF67, 0x0054007d},
+{3400000, DIF_BPF_COEFF89, 0x0093007c},
+{3400000, DIF_BPF_COEFF1011, 0x0024ff82},
+{3400000, DIF_BPF_COEFF1213, 0xfea6fdbb},
+{3400000, DIF_BPF_COEFF1415, 0xfd03fcca},
+{3400000, DIF_BPF_COEFF1617, 0xfd51feb9},
+{3400000, DIF_BPF_COEFF1819, 0x00eb0392},
+{3400000, DIF_BPF_COEFF2021, 0x06270802},
+{3400000, DIF_BPF_COEFF2223, 0x08880750},
+{3400000, DIF_BPF_COEFF2425, 0x044dffdb},
+{3400000, DIF_BPF_COEFF2627, 0xfabdf5f8},
+{3400000, DIF_BPF_COEFF2829, 0xf2a0f193},
+{3400000, DIF_BPF_COEFF3031, 0xf342f78f},
+{3400000, DIF_BPF_COEFF3233, 0xfdc404b9},
+{3400000, DIF_BPF_COEFF3435, 0x0b0e0f78},
+{3400000, DIF_BPF_COEFF36, 0x110d0000},
+/* END - DIF BPF register values from 34_quant.dat*/
+
+
+/*case 3500000:*/
+/* BEGIN - DIF BPF register values from 35_quant.dat*/
+{3500000, DIF_BPF_COEFF01, 0xfffffffd},
+{3500000, DIF_BPF_COEFF23, 0xfffafff9},
+{3500000, DIF_BPF_COEFF45, 0x0002001b},
+{3500000, DIF_BPF_COEFF67, 0x0046007d},
+{3500000, DIF_BPF_COEFF89, 0x00ad00ba},
+{3500000, DIF_BPF_COEFF1011, 0x00870000},
+{3500000, DIF_BPF_COEFF1213, 0xff26fe1a},
+{3500000, DIF_BPF_COEFF1415, 0xfd1bfc7e},
+{3500000, DIF_BPF_COEFF1617, 0xfc99fda4},
+{3500000, DIF_BPF_COEFF1819, 0xffa5025c},
+{3500000, DIF_BPF_COEFF2021, 0x054507ad},
+{3500000, DIF_BPF_COEFF2223, 0x08dd0847},
+{3500000, DIF_BPF_COEFF2425, 0x05b80172},
+{3500000, DIF_BPF_COEFF2627, 0xfc2ef6ff},
+{3500000, DIF_BPF_COEFF2829, 0xf313f170},
+{3500000, DIF_BPF_COEFF3031, 0xf2abf6bd},
+{3500000, DIF_BPF_COEFF3233, 0xfcf6041f},
+{3500000, DIF_BPF_COEFF3435, 0x0abc0f61},
+{3500000, DIF_BPF_COEFF36, 0x110d0000},
+/* END - DIF BPF register values from 35_quant.dat*/
+
+
+/*case 3600000:*/
+/* BEGIN - DIF BPF register values from 36_quant.dat*/
+{3600000, DIF_BPF_COEFF01, 0xfffffffd},
+{3600000, DIF_BPF_COEFF23, 0xfff8fff3},
+{3600000, DIF_BPF_COEFF45, 0xfff50006},
+{3600000, DIF_BPF_COEFF67, 0x002f006c},
+{3600000, DIF_BPF_COEFF89, 0x00b200e3},
+{3600000, DIF_BPF_COEFF1011, 0x00dc007e},
+{3600000, DIF_BPF_COEFF1213, 0xffb9fea0},
+{3600000, DIF_BPF_COEFF1415, 0xfd6bfc71},
+{3600000, DIF_BPF_COEFF1617, 0xfc17fcb1},
+{3600000, DIF_BPF_COEFF1819, 0xfe65010b},
+{3600000, DIF_BPF_COEFF2021, 0x042d0713},
+{3600000, DIF_BPF_COEFF2223, 0x08ec0906},
+{3600000, DIF_BPF_COEFF2425, 0x07020302},
+{3600000, DIF_BPF_COEFF2627, 0xfdaff823},
+{3600000, DIF_BPF_COEFF2829, 0xf3a7f16a},
+{3600000, DIF_BPF_COEFF3031, 0xf228f5f5},
+{3600000, DIF_BPF_COEFF3233, 0xfc2a0384},
+{3600000, DIF_BPF_COEFF3435, 0x0a670f4a},
+{3600000, DIF_BPF_COEFF36, 0x110d0000},
+/* END - DIF BPF register values from 36_quant.dat*/
+
+
+/*case 3700000:*/
+/* BEGIN - DIF BPF register values from 37_quant.dat*/
+{3700000, DIF_BPF_COEFF01, 0x0000fffd},
+{3700000, DIF_BPF_COEFF23, 0xfff7ffef},
+{3700000, DIF_BPF_COEFF45, 0xffe9fff1},
+{3700000, DIF_BPF_COEFF67, 0x0010004d},
+{3700000, DIF_BPF_COEFF89, 0x00a100f2},
+{3700000, DIF_BPF_COEFF1011, 0x011a00f0},
+{3700000, DIF_BPF_COEFF1213, 0x0053ff44},
+{3700000, DIF_BPF_COEFF1415, 0xfdedfca2},
+{3700000, DIF_BPF_COEFF1617, 0xfbd3fbef},
+{3700000, DIF_BPF_COEFF1819, 0xfd39ffae},
+{3700000, DIF_BPF_COEFF2021, 0x02ea0638},
+{3700000, DIF_BPF_COEFF2223, 0x08b50987},
+{3700000, DIF_BPF_COEFF2425, 0x08230483},
+{3700000, DIF_BPF_COEFF2627, 0xff39f960},
+{3700000, DIF_BPF_COEFF2829, 0xf45bf180},
+{3700000, DIF_BPF_COEFF3031, 0xf1b8f537},
+{3700000, DIF_BPF_COEFF3233, 0xfb6102e7},
+{3700000, DIF_BPF_COEFF3435, 0x0a110f32},
+{3700000, DIF_BPF_COEFF36, 0x110d0000},
+/* END - DIF BPF register values from 37_quant.dat*/
+
+
+/*case 3800000:*/
+/* BEGIN - DIF BPF register values from 38_quant.dat*/
+{3800000, DIF_BPF_COEFF01, 0x0000fffe},
+{3800000, DIF_BPF_COEFF23, 0xfff9ffee},
+{3800000, DIF_BPF_COEFF45, 0xffe1ffdd},
+{3800000, DIF_BPF_COEFF67, 0xfff00024},
+{3800000, DIF_BPF_COEFF89, 0x007c00e5},
+{3800000, DIF_BPF_COEFF1011, 0x013a014a},
+{3800000, DIF_BPF_COEFF1213, 0x00e6fff8},
+{3800000, DIF_BPF_COEFF1415, 0xfe98fd0f},
+{3800000, DIF_BPF_COEFF1617, 0xfbd3fb67},
+{3800000, DIF_BPF_COEFF1819, 0xfc32fe54},
+{3800000, DIF_BPF_COEFF2021, 0x01880525},
+{3800000, DIF_BPF_COEFF2223, 0x083909c7},
+{3800000, DIF_BPF_COEFF2425, 0x091505ee},
+{3800000, DIF_BPF_COEFF2627, 0x00c7fab3},
+{3800000, DIF_BPF_COEFF2829, 0xf52df1b4},
+{3800000, DIF_BPF_COEFF3031, 0xf15df484},
+{3800000, DIF_BPF_COEFF3233, 0xfa9b0249},
+{3800000, DIF_BPF_COEFF3435, 0x09ba0f19},
+{3800000, DIF_BPF_COEFF36, 0x110d0000},
+/* END - DIF BPF register values from 38_quant.dat*/
+
+
+/*case 3900000:*/
+/* BEGIN - DIF BPF register values from 39_quant.dat*/
+{3900000, DIF_BPF_COEFF01, 0x00000000},
+{3900000, DIF_BPF_COEFF23, 0xfffbfff0},
+{3900000, DIF_BPF_COEFF45, 0xffdeffcf},
+{3900000, DIF_BPF_COEFF67, 0xffd1fff6},
+{3900000, DIF_BPF_COEFF89, 0x004800be},
+{3900000, DIF_BPF_COEFF1011, 0x01390184},
+{3900000, DIF_BPF_COEFF1213, 0x016300ac},
+{3900000, DIF_BPF_COEFF1415, 0xff5efdb1},
+{3900000, DIF_BPF_COEFF1617, 0xfc17fb23},
+{3900000, DIF_BPF_COEFF1819, 0xfb5cfd0d},
+{3900000, DIF_BPF_COEFF2021, 0x001703e4},
+{3900000, DIF_BPF_COEFF2223, 0x077b09c4},
+{3900000, DIF_BPF_COEFF2425, 0x09d2073c},
+{3900000, DIF_BPF_COEFF2627, 0x0251fc18},
+{3900000, DIF_BPF_COEFF2829, 0xf61cf203},
+{3900000, DIF_BPF_COEFF3031, 0xf118f3dc},
+{3900000, DIF_BPF_COEFF3233, 0xf9d801aa},
+{3900000, DIF_BPF_COEFF3435, 0x09600eff},
+{3900000, DIF_BPF_COEFF36, 0x110d0000},
+/* END - DIF BPF register values from 39_quant.dat*/
+
+
+/*case 4000000:*/
+/* BEGIN - DIF BPF register values from 40_quant.dat*/
+{4000000, DIF_BPF_COEFF01, 0x00000001},
+{4000000, DIF_BPF_COEFF23, 0xfffefff4},
+{4000000, DIF_BPF_COEFF45, 0xffe1ffc8},
+{4000000, DIF_BPF_COEFF67, 0xffbaffca},
+{4000000, DIF_BPF_COEFF89, 0x000b0082},
+{4000000, DIF_BPF_COEFF1011, 0x01170198},
+{4000000, DIF_BPF_COEFF1213, 0x01c10152},
+{4000000, DIF_BPF_COEFF1415, 0x0030fe7b},
+{4000000, DIF_BPF_COEFF1617, 0xfc99fb24},
+{4000000, DIF_BPF_COEFF1819, 0xfac3fbe9},
+{4000000, DIF_BPF_COEFF2021, 0xfea5027f},
+{4000000, DIF_BPF_COEFF2223, 0x0683097f},
+{4000000, DIF_BPF_COEFF2425, 0x0a560867},
+{4000000, DIF_BPF_COEFF2627, 0x03d2fd89},
+{4000000, DIF_BPF_COEFF2829, 0xf723f26f},
+{4000000, DIF_BPF_COEFF3031, 0xf0e8f341},
+{4000000, DIF_BPF_COEFF3233, 0xf919010a},
+{4000000, DIF_BPF_COEFF3435, 0x09060ee5},
+{4000000, DIF_BPF_COEFF36, 0x110d0000},
+/* END - DIF BPF register values from 40_quant.dat*/
+
+
+/*case 4100000:*/
+/* BEGIN - DIF BPF register values from 41_quant.dat*/
+{4100000, DIF_BPF_COEFF01, 0x00010002},
+{4100000, DIF_BPF_COEFF23, 0x0002fffb},
+{4100000, DIF_BPF_COEFF45, 0xffe8ffca},
+{4100000, DIF_BPF_COEFF67, 0xffacffa4},
+{4100000, DIF_BPF_COEFF89, 0xffcd0036},
+{4100000, DIF_BPF_COEFF1011, 0x00d70184},
+{4100000, DIF_BPF_COEFF1213, 0x01f601dc},
+{4100000, DIF_BPF_COEFF1415, 0x00ffff60},
+{4100000, DIF_BPF_COEFF1617, 0xfd51fb6d},
+{4100000, DIF_BPF_COEFF1819, 0xfa6efaf5},
+{4100000, DIF_BPF_COEFF2021, 0xfd410103},
+{4100000, DIF_BPF_COEFF2223, 0x055708f9},
+{4100000, DIF_BPF_COEFF2425, 0x0a9e0969},
+{4100000, DIF_BPF_COEFF2627, 0x0543ff02},
+{4100000, DIF_BPF_COEFF2829, 0xf842f2f5},
+{4100000, DIF_BPF_COEFF3031, 0xf0cef2b2},
+{4100000, DIF_BPF_COEFF3233, 0xf85e006b},
+{4100000, DIF_BPF_COEFF3435, 0x08aa0ecb},
+{4100000, DIF_BPF_COEFF36, 0x110d0000},
+/* END - DIF BPF register values from 41_quant.dat*/
+
+
+/*case 4200000:*/
+/* BEGIN - DIF BPF register values from 42_quant.dat*/
+{4200000, DIF_BPF_COEFF01, 0x00010003},
+{4200000, DIF_BPF_COEFF23, 0x00050003},
+{4200000, DIF_BPF_COEFF45, 0xfff3ffd3},
+{4200000, DIF_BPF_COEFF67, 0xffaaff8b},
+{4200000, DIF_BPF_COEFF89, 0xff95ffe5},
+{4200000, DIF_BPF_COEFF1011, 0x0080014a},
+{4200000, DIF_BPF_COEFF1213, 0x01fe023f},
+{4200000, DIF_BPF_COEFF1415, 0x01ba0050},
+{4200000, DIF_BPF_COEFF1617, 0xfe35fbf8},
+{4200000, DIF_BPF_COEFF1819, 0xfa62fa3b},
+{4200000, DIF_BPF_COEFF2021, 0xfbf9ff7e},
+{4200000, DIF_BPF_COEFF2223, 0x04010836},
+{4200000, DIF_BPF_COEFF2425, 0x0aa90a3d},
+{4200000, DIF_BPF_COEFF2627, 0x069f007f},
+{4200000, DIF_BPF_COEFF2829, 0xf975f395},
+{4200000, DIF_BPF_COEFF3031, 0xf0cbf231},
+{4200000, DIF_BPF_COEFF3233, 0xf7a9ffcb},
+{4200000, DIF_BPF_COEFF3435, 0x084c0eaf},
+{4200000, DIF_BPF_COEFF36, 0x110d0000},
+/* END - DIF BPF register values from 42_quant.dat*/
+
+
+/*case 4300000:*/
+/* BEGIN - DIF BPF register values from 43_quant.dat*/
+{4300000, DIF_BPF_COEFF01, 0x00010003},
+{4300000, DIF_BPF_COEFF23, 0x0008000a},
+{4300000, DIF_BPF_COEFF45, 0x0000ffe4},
+{4300000, DIF_BPF_COEFF67, 0xffb4ff81},
+{4300000, DIF_BPF_COEFF89, 0xff6aff96},
+{4300000, DIF_BPF_COEFF1011, 0x001c00f0},
+{4300000, DIF_BPF_COEFF1213, 0x01d70271},
+{4300000, DIF_BPF_COEFF1415, 0x0254013b},
+{4300000, DIF_BPF_COEFF1617, 0xff36fcbd},
+{4300000, DIF_BPF_COEFF1819, 0xfa9ff9c5},
+{4300000, DIF_BPF_COEFF2021, 0xfadbfdfe},
+{4300000, DIF_BPF_COEFF2223, 0x028c073b},
+{4300000, DIF_BPF_COEFF2425, 0x0a750adf},
+{4300000, DIF_BPF_COEFF2627, 0x07e101fa},
+{4300000, DIF_BPF_COEFF2829, 0xfab8f44e},
+{4300000, DIF_BPF_COEFF3031, 0xf0ddf1be},
+{4300000, DIF_BPF_COEFF3233, 0xf6f9ff2b},
+{4300000, DIF_BPF_COEFF3435, 0x07ed0e94},
+{4300000, DIF_BPF_COEFF36, 0x110d0000},
+/* END - DIF BPF register values from 43_quant.dat*/
+
+
+/*case 4400000:*/
+/* BEGIN - DIF BPF register values from 44_quant.dat*/
+{4400000, DIF_BPF_COEFF01, 0x00000003},
+{4400000, DIF_BPF_COEFF23, 0x0009000f},
+{4400000, DIF_BPF_COEFF45, 0x000efff8},
+{4400000, DIF_BPF_COEFF67, 0xffc9ff87},
+{4400000, DIF_BPF_COEFF89, 0xff52ff54},
+{4400000, DIF_BPF_COEFF1011, 0xffb5007e},
+{4400000, DIF_BPF_COEFF1213, 0x01860270},
+{4400000, DIF_BPF_COEFF1415, 0x02c00210},
+{4400000, DIF_BPF_COEFF1617, 0x0044fdb2},
+{4400000, DIF_BPF_COEFF1819, 0xfb22f997},
+{4400000, DIF_BPF_COEFF2021, 0xf9f2fc90},
+{4400000, DIF_BPF_COEFF2223, 0x0102060f},
+{4400000, DIF_BPF_COEFF2425, 0x0a050b4c},
+{4400000, DIF_BPF_COEFF2627, 0x0902036e},
+{4400000, DIF_BPF_COEFF2829, 0xfc0af51e},
+{4400000, DIF_BPF_COEFF3031, 0xf106f15a},
+{4400000, DIF_BPF_COEFF3233, 0xf64efe8b},
+{4400000, DIF_BPF_COEFF3435, 0x078d0e77},
+{4400000, DIF_BPF_COEFF36, 0x110d0000},
+/* END - DIF BPF register values from 44_quant.dat*/
+
+
+/*case 4500000:*/
+/* BEGIN - DIF BPF register values from 45_quant.dat*/
+{4500000, DIF_BPF_COEFF01, 0x00000002},
+{4500000, DIF_BPF_COEFF23, 0x00080012},
+{4500000, DIF_BPF_COEFF45, 0x0019000e},
+{4500000, DIF_BPF_COEFF67, 0xffe5ff9e},
+{4500000, DIF_BPF_COEFF89, 0xff4fff25},
+{4500000, DIF_BPF_COEFF1011, 0xff560000},
+{4500000, DIF_BPF_COEFF1213, 0x0112023b},
+{4500000, DIF_BPF_COEFF1415, 0x02f702c0},
+{4500000, DIF_BPF_COEFF1617, 0x014dfec8},
+{4500000, DIF_BPF_COEFF1819, 0xfbe5f9b3},
+{4500000, DIF_BPF_COEFF2021, 0xf947fb41},
+{4500000, DIF_BPF_COEFF2223, 0xff7004b9},
+{4500000, DIF_BPF_COEFF2425, 0x095a0b81},
+{4500000, DIF_BPF_COEFF2627, 0x0a0004d8},
+{4500000, DIF_BPF_COEFF2829, 0xfd65f603},
+{4500000, DIF_BPF_COEFF3031, 0xf144f104},
+{4500000, DIF_BPF_COEFF3233, 0xf5aafdec},
+{4500000, DIF_BPF_COEFF3435, 0x072b0e5a},
+{4500000, DIF_BPF_COEFF36, 0x110d0000},
+/* END - DIF BPF register values from 45_quant.dat*/
+
+
+/*case 4600000:*/
+/* BEGIN - DIF BPF register values from 46_quant.dat*/
+{4600000, DIF_BPF_COEFF01, 0x00000001},
+{4600000, DIF_BPF_COEFF23, 0x00060012},
+{4600000, DIF_BPF_COEFF45, 0x00200022},
+{4600000, DIF_BPF_COEFF67, 0x0005ffc1},
+{4600000, DIF_BPF_COEFF89, 0xff61ff10},
+{4600000, DIF_BPF_COEFF1011, 0xff09ff82},
+{4600000, DIF_BPF_COEFF1213, 0x008601d7},
+{4600000, DIF_BPF_COEFF1415, 0x02f50340},
+{4600000, DIF_BPF_COEFF1617, 0x0241fff0},
+{4600000, DIF_BPF_COEFF1819, 0xfcddfa19},
+{4600000, DIF_BPF_COEFF2021, 0xf8e2fa1e},
+{4600000, DIF_BPF_COEFF2223, 0xfde30343},
+{4600000, DIF_BPF_COEFF2425, 0x08790b7f},
+{4600000, DIF_BPF_COEFF2627, 0x0ad50631},
+{4600000, DIF_BPF_COEFF2829, 0xfec7f6fc},
+{4600000, DIF_BPF_COEFF3031, 0xf198f0bd},
+{4600000, DIF_BPF_COEFF3233, 0xf50dfd4e},
+{4600000, DIF_BPF_COEFF3435, 0x06c90e3d},
+{4600000, DIF_BPF_COEFF36, 0x110d0000},
+/* END - DIF BPF register values from 46_quant.dat*/
+
+
+/*case 4700000:*/
+/* BEGIN - DIF BPF register values from 47_quant.dat*/
+{4700000, DIF_BPF_COEFF01, 0x0000ffff},
+{4700000, DIF_BPF_COEFF23, 0x0003000f},
+{4700000, DIF_BPF_COEFF45, 0x00220030},
+{4700000, DIF_BPF_COEFF67, 0x0025ffed},
+{4700000, DIF_BPF_COEFF89, 0xff87ff15},
+{4700000, DIF_BPF_COEFF1011, 0xfed6ff10},
+{4700000, DIF_BPF_COEFF1213, 0xffed014c},
+{4700000, DIF_BPF_COEFF1415, 0x02b90386},
+{4700000, DIF_BPF_COEFF1617, 0x03110119},
+{4700000, DIF_BPF_COEFF1819, 0xfdfefac4},
+{4700000, DIF_BPF_COEFF2021, 0xf8c6f92f},
+{4700000, DIF_BPF_COEFF2223, 0xfc6701b7},
+{4700000, DIF_BPF_COEFF2425, 0x07670b44},
+{4700000, DIF_BPF_COEFF2627, 0x0b7e0776},
+{4700000, DIF_BPF_COEFF2829, 0x002df807},
+{4700000, DIF_BPF_COEFF3031, 0xf200f086},
+{4700000, DIF_BPF_COEFF3233, 0xf477fcb1},
+{4700000, DIF_BPF_COEFF3435, 0x06650e1e},
+{4700000, DIF_BPF_COEFF36, 0x110d0000},
+/* END - DIF BPF register values from 47_quant.dat*/
+
+
+/*case 4800000:*/
+/* BEGIN - DIF BPF register values from 48_quant.dat*/
+{4800000, DIF_BPF_COEFF01, 0xfffffffe},
+{4800000, DIF_BPF_COEFF23, 0xffff0009},
+{4800000, DIF_BPF_COEFF45, 0x001e0038},
+{4800000, DIF_BPF_COEFF67, 0x003f001b},
+{4800000, DIF_BPF_COEFF89, 0xffbcff36},
+{4800000, DIF_BPF_COEFF1011, 0xfec2feb6},
+{4800000, DIF_BPF_COEFF1213, 0xff5600a5},
+{4800000, DIF_BPF_COEFF1415, 0x0248038d},
+{4800000, DIF_BPF_COEFF1617, 0x03b00232},
+{4800000, DIF_BPF_COEFF1819, 0xff39fbab},
+{4800000, DIF_BPF_COEFF2021, 0xf8f4f87f},
+{4800000, DIF_BPF_COEFF2223, 0xfb060020},
+{4800000, DIF_BPF_COEFF2425, 0x062a0ad2},
+{4800000, DIF_BPF_COEFF2627, 0x0bf908a3},
+{4800000, DIF_BPF_COEFF2829, 0x0192f922},
+{4800000, DIF_BPF_COEFF3031, 0xf27df05e},
+{4800000, DIF_BPF_COEFF3233, 0xf3e8fc14},
+{4800000, DIF_BPF_COEFF3435, 0x06000e00},
+{4800000, DIF_BPF_COEFF36, 0x110d0000},
+/* END - DIF BPF register values from 48_quant.dat*/
+
+
+/*case 4900000:*/
+/* BEGIN - DIF BPF register values from 49_quant.dat*/
+{4900000, DIF_BPF_COEFF01, 0xfffffffd},
+{4900000, DIF_BPF_COEFF23, 0xfffc0002},
+{4900000, DIF_BPF_COEFF45, 0x00160037},
+{4900000, DIF_BPF_COEFF67, 0x00510046},
+{4900000, DIF_BPF_COEFF89, 0xfff9ff6d},
+{4900000, DIF_BPF_COEFF1011, 0xfed0fe7c},
+{4900000, DIF_BPF_COEFF1213, 0xfecefff0},
+{4900000, DIF_BPF_COEFF1415, 0x01aa0356},
+{4900000, DIF_BPF_COEFF1617, 0x0413032b},
+{4900000, DIF_BPF_COEFF1819, 0x007ffcc5},
+{4900000, DIF_BPF_COEFF2021, 0xf96cf812},
+{4900000, DIF_BPF_COEFF2223, 0xf9cefe87},
+{4900000, DIF_BPF_COEFF2425, 0x04c90a2c},
+{4900000, DIF_BPF_COEFF2627, 0x0c4309b4},
+{4900000, DIF_BPF_COEFF2829, 0x02f3fa4a},
+{4900000, DIF_BPF_COEFF3031, 0xf30ef046},
+{4900000, DIF_BPF_COEFF3233, 0xf361fb7a},
+{4900000, DIF_BPF_COEFF3435, 0x059b0de0},
+{4900000, DIF_BPF_COEFF36, 0x110d0000},
+/* END - DIF BPF register values from 49_quant.dat*/
+
+
+/*case 5000000:*/
+/* BEGIN - DIF BPF register values from 50_quant.dat*/
+{5000000, DIF_BPF_COEFF01, 0xfffffffd},
+{5000000, DIF_BPF_COEFF23, 0xfff9fffa},
+{5000000, DIF_BPF_COEFF45, 0x000a002d},
+{5000000, DIF_BPF_COEFF67, 0x00570067},
+{5000000, DIF_BPF_COEFF89, 0x0037ffb5},
+{5000000, DIF_BPF_COEFF1011, 0xfefffe68},
+{5000000, DIF_BPF_COEFF1213, 0xfe62ff3d},
+{5000000, DIF_BPF_COEFF1415, 0x00ec02e3},
+{5000000, DIF_BPF_COEFF1617, 0x043503f6},
+{5000000, DIF_BPF_COEFF1819, 0x01befe05},
+{5000000, DIF_BPF_COEFF2021, 0xfa27f7ee},
+{5000000, DIF_BPF_COEFF2223, 0xf8c6fcf8},
+{5000000, DIF_BPF_COEFF2425, 0x034c0954},
+{5000000, DIF_BPF_COEFF2627, 0x0c5c0aa4},
+{5000000, DIF_BPF_COEFF2829, 0x044cfb7e},
+{5000000, DIF_BPF_COEFF3031, 0xf3b1f03f},
+{5000000, DIF_BPF_COEFF3233, 0xf2e2fae1},
+{5000000, DIF_BPF_COEFF3435, 0x05340dc0},
+{5000000, DIF_BPF_COEFF36, 0x110d0000},
+/* END - DIF BPF register values from 50_quant.dat*/
+
+
+/*case 5100000:*/
+/* BEGIN - DIF BPF register values from 51_quant.dat*/
+{5100000, DIF_BPF_COEFF01, 0x0000fffd},
+{5100000, DIF_BPF_COEFF23, 0xfff8fff4},
+{5100000, DIF_BPF_COEFF45, 0xfffd001e},
+{5100000, DIF_BPF_COEFF67, 0x0051007b},
+{5100000, DIF_BPF_COEFF89, 0x006e0006},
+{5100000, DIF_BPF_COEFF1011, 0xff48fe7c},
+{5100000, DIF_BPF_COEFF1213, 0xfe1bfe9a},
+{5100000, DIF_BPF_COEFF1415, 0x001d023e},
+{5100000, DIF_BPF_COEFF1617, 0x04130488},
+{5100000, DIF_BPF_COEFF1819, 0x02e6ff5b},
+{5100000, DIF_BPF_COEFF2021, 0xfb1ef812},
+{5100000, DIF_BPF_COEFF2223, 0xf7f7fb7f},
+{5100000, DIF_BPF_COEFF2425, 0x01bc084e},
+{5100000, DIF_BPF_COEFF2627, 0x0c430b72},
+{5100000, DIF_BPF_COEFF2829, 0x059afcba},
+{5100000, DIF_BPF_COEFF3031, 0xf467f046},
+{5100000, DIF_BPF_COEFF3233, 0xf26cfa4a},
+{5100000, DIF_BPF_COEFF3435, 0x04cd0da0},
+{5100000, DIF_BPF_COEFF36, 0x110d0000},
+/* END - DIF BPF register values from 51_quant.dat*/
+
+
+/*case 5200000:*/
+/* BEGIN - DIF BPF register values from 52_quant.dat*/
+{5200000, DIF_BPF_COEFF01, 0x0000fffe},
+{5200000, DIF_BPF_COEFF23, 0xfff8ffef},
+{5200000, DIF_BPF_COEFF45, 0xfff00009},
+{5200000, DIF_BPF_COEFF67, 0x003f007f},
+{5200000, DIF_BPF_COEFF89, 0x00980056},
+{5200000, DIF_BPF_COEFF1011, 0xffa5feb6},
+{5200000, DIF_BPF_COEFF1213, 0xfe00fe15},
+{5200000, DIF_BPF_COEFF1415, 0xff4b0170},
+{5200000, DIF_BPF_COEFF1617, 0x03b004d7},
+{5200000, DIF_BPF_COEFF1819, 0x03e800b9},
+{5200000, DIF_BPF_COEFF2021, 0xfc48f87f},
+{5200000, DIF_BPF_COEFF2223, 0xf768fa23},
+{5200000, DIF_BPF_COEFF2425, 0x0022071f},
+{5200000, DIF_BPF_COEFF2627, 0x0bf90c1b},
+{5200000, DIF_BPF_COEFF2829, 0x06dafdfd},
+{5200000, DIF_BPF_COEFF3031, 0xf52df05e},
+{5200000, DIF_BPF_COEFF3233, 0xf1fef9b5},
+{5200000, DIF_BPF_COEFF3435, 0x04640d7f},
+{5200000, DIF_BPF_COEFF36, 0x110d0000},
+/* END - DIF BPF register values from 52_quant.dat*/
+
+
+/*case 5300000:*/
+/* BEGIN - DIF BPF register values from 53_quant.dat*/
+{5300000, DIF_BPF_COEFF01, 0x0000ffff},
+{5300000, DIF_BPF_COEFF23, 0xfff9ffee},
+{5300000, DIF_BPF_COEFF45, 0xffe6fff3},
+{5300000, DIF_BPF_COEFF67, 0x00250072},
+{5300000, DIF_BPF_COEFF89, 0x00af009c},
+{5300000, DIF_BPF_COEFF1011, 0x000cff10},
+{5300000, DIF_BPF_COEFF1213, 0xfe13fdb8},
+{5300000, DIF_BPF_COEFF1415, 0xfe870089},
+{5300000, DIF_BPF_COEFF1617, 0x031104e1},
+{5300000, DIF_BPF_COEFF1819, 0x04b8020f},
+{5300000, DIF_BPF_COEFF2021, 0xfd98f92f},
+{5300000, DIF_BPF_COEFF2223, 0xf71df8f0},
+{5300000, DIF_BPF_COEFF2425, 0xfe8805ce},
+{5300000, DIF_BPF_COEFF2627, 0x0b7e0c9c},
+{5300000, DIF_BPF_COEFF2829, 0x0808ff44},
+{5300000, DIF_BPF_COEFF3031, 0xf603f086},
+{5300000, DIF_BPF_COEFF3233, 0xf19af922},
+{5300000, DIF_BPF_COEFF3435, 0x03fb0d5e},
+{5300000, DIF_BPF_COEFF36, 0x110d0000},
+/* END - DIF BPF register values from 53_quant.dat*/
+
+
+/*case 5400000:*/
+/* BEGIN - DIF BPF register values from 54_quant.dat*/
+{5400000, DIF_BPF_COEFF01, 0x00000001},
+{5400000, DIF_BPF_COEFF23, 0xfffcffef},
+{5400000, DIF_BPF_COEFF45, 0xffe0ffe0},
+{5400000, DIF_BPF_COEFF67, 0x00050056},
+{5400000, DIF_BPF_COEFF89, 0x00b000d1},
+{5400000, DIF_BPF_COEFF1011, 0x0071ff82},
+{5400000, DIF_BPF_COEFF1213, 0xfe53fd8c},
+{5400000, DIF_BPF_COEFF1415, 0xfddfff99},
+{5400000, DIF_BPF_COEFF1617, 0x024104a3},
+{5400000, DIF_BPF_COEFF1819, 0x054a034d},
+{5400000, DIF_BPF_COEFF2021, 0xff01fa1e},
+{5400000, DIF_BPF_COEFF2223, 0xf717f7ed},
+{5400000, DIF_BPF_COEFF2425, 0xfcf50461},
+{5400000, DIF_BPF_COEFF2627, 0x0ad50cf4},
+{5400000, DIF_BPF_COEFF2829, 0x0921008d},
+{5400000, DIF_BPF_COEFF3031, 0xf6e7f0bd},
+{5400000, DIF_BPF_COEFF3233, 0xf13ff891},
+{5400000, DIF_BPF_COEFF3435, 0x03920d3b},
+{5400000, DIF_BPF_COEFF36, 0x110d0000},
+/* END - DIF BPF register values from 54_quant.dat*/
+
+
+/*case 5500000:*/
+/* BEGIN - DIF BPF register values from 55_quant.dat*/
+{5500000, DIF_BPF_COEFF01, 0x00010002},
+{5500000, DIF_BPF_COEFF23, 0xfffffff3},
+{5500000, DIF_BPF_COEFF45, 0xffdeffd1},
+{5500000, DIF_BPF_COEFF67, 0xffe5002f},
+{5500000, DIF_BPF_COEFF89, 0x009c00ed},
+{5500000, DIF_BPF_COEFF1011, 0x00cb0000},
+{5500000, DIF_BPF_COEFF1213, 0xfebafd94},
+{5500000, DIF_BPF_COEFF1415, 0xfd61feb0},
+{5500000, DIF_BPF_COEFF1617, 0x014d0422},
+{5500000, DIF_BPF_COEFF1819, 0x05970464},
+{5500000, DIF_BPF_COEFF2021, 0x0074fb41},
+{5500000, DIF_BPF_COEFF2223, 0xf759f721},
+{5500000, DIF_BPF_COEFF2425, 0xfb7502de},
+{5500000, DIF_BPF_COEFF2627, 0x0a000d21},
+{5500000, DIF_BPF_COEFF2829, 0x0a2201d4},
+{5500000, DIF_BPF_COEFF3031, 0xf7d9f104},
+{5500000, DIF_BPF_COEFF3233, 0xf0edf804},
+{5500000, DIF_BPF_COEFF3435, 0x03280d19},
+{5500000, DIF_BPF_COEFF36, 0x110d0000},
+/* END - DIF BPF register values from 55_quant.dat*/
+
+
+/*case 5600000:*/
+/* BEGIN - DIF BPF register values from 56_quant.dat*/
+{5600000, DIF_BPF_COEFF01, 0x00010003},
+{5600000, DIF_BPF_COEFF23, 0x0003fffa},
+{5600000, DIF_BPF_COEFF45, 0xffe3ffc9},
+{5600000, DIF_BPF_COEFF67, 0xffc90002},
+{5600000, DIF_BPF_COEFF89, 0x007500ef},
+{5600000, DIF_BPF_COEFF1011, 0x010e007e},
+{5600000, DIF_BPF_COEFF1213, 0xff3dfdcf},
+{5600000, DIF_BPF_COEFF1415, 0xfd16fddd},
+{5600000, DIF_BPF_COEFF1617, 0x00440365},
+{5600000, DIF_BPF_COEFF1819, 0x059b0548},
+{5600000, DIF_BPF_COEFF2021, 0x01e3fc90},
+{5600000, DIF_BPF_COEFF2223, 0xf7dff691},
+{5600000, DIF_BPF_COEFF2425, 0xfa0f014d},
+{5600000, DIF_BPF_COEFF2627, 0x09020d23},
+{5600000, DIF_BPF_COEFF2829, 0x0b0a0318},
+{5600000, DIF_BPF_COEFF3031, 0xf8d7f15a},
+{5600000, DIF_BPF_COEFF3233, 0xf0a5f779},
+{5600000, DIF_BPF_COEFF3435, 0x02bd0cf6},
+{5600000, DIF_BPF_COEFF36, 0x110d0000},
+/* END - DIF BPF register values from 56_quant.dat*/
+
+
+/*case 5700000:*/
+/* BEGIN - DIF BPF register values from 57_quant.dat*/
+{5700000, DIF_BPF_COEFF01, 0x00010003},
+{5700000, DIF_BPF_COEFF23, 0x00060001},
+{5700000, DIF_BPF_COEFF45, 0xffecffc9},
+{5700000, DIF_BPF_COEFF67, 0xffb4ffd4},
+{5700000, DIF_BPF_COEFF89, 0x004000d5},
+{5700000, DIF_BPF_COEFF1011, 0x013600f0},
+{5700000, DIF_BPF_COEFF1213, 0xffd3fe39},
+{5700000, DIF_BPF_COEFF1415, 0xfd04fd31},
+{5700000, DIF_BPF_COEFF1617, 0xff360277},
+{5700000, DIF_BPF_COEFF1819, 0x055605ef},
+{5700000, DIF_BPF_COEFF2021, 0x033efdfe},
+{5700000, DIF_BPF_COEFF2223, 0xf8a5f642},
+{5700000, DIF_BPF_COEFF2425, 0xf8cbffb6},
+{5700000, DIF_BPF_COEFF2627, 0x07e10cfb},
+{5700000, DIF_BPF_COEFF2829, 0x0bd50456},
+{5700000, DIF_BPF_COEFF3031, 0xf9dff1be},
+{5700000, DIF_BPF_COEFF3233, 0xf067f6f2},
+{5700000, DIF_BPF_COEFF3435, 0x02520cd2},
+{5700000, DIF_BPF_COEFF36, 0x110d0000},
+/* END - DIF BPF register values from 57_quant.dat*/
+
+
+/*case 5800000:*/
+/* BEGIN - DIF BPF register values from 58_quant.dat*/
+{5800000, DIF_BPF_COEFF01, 0x00000003},
+{5800000, DIF_BPF_COEFF23, 0x00080009},
+{5800000, DIF_BPF_COEFF45, 0xfff8ffd2},
+{5800000, DIF_BPF_COEFF67, 0xffaaffac},
+{5800000, DIF_BPF_COEFF89, 0x000200a3},
+{5800000, DIF_BPF_COEFF1011, 0x013c014a},
+{5800000, DIF_BPF_COEFF1213, 0x006dfec9},
+{5800000, DIF_BPF_COEFF1415, 0xfd2bfcb7},
+{5800000, DIF_BPF_COEFF1617, 0xfe350165},
+{5800000, DIF_BPF_COEFF1819, 0x04cb0651},
+{5800000, DIF_BPF_COEFF2021, 0x0477ff7e},
+{5800000, DIF_BPF_COEFF2223, 0xf9a5f635},
+{5800000, DIF_BPF_COEFF2425, 0xf7b1fe20},
+{5800000, DIF_BPF_COEFF2627, 0x069f0ca8},
+{5800000, DIF_BPF_COEFF2829, 0x0c81058b},
+{5800000, DIF_BPF_COEFF3031, 0xfaf0f231},
+{5800000, DIF_BPF_COEFF3233, 0xf033f66d},
+{5800000, DIF_BPF_COEFF3435, 0x01e60cae},
+{5800000, DIF_BPF_COEFF36, 0x110d0000},
+/* END - DIF BPF register values from 58_quant.dat*/
+
+
+/*case 5900000:*/
+/* BEGIN - DIF BPF register values from 59_quant.dat*/
+{5900000, DIF_BPF_COEFF01, 0x00000002},
+{5900000, DIF_BPF_COEFF23, 0x0009000e},
+{5900000, DIF_BPF_COEFF45, 0x0005ffe1},
+{5900000, DIF_BPF_COEFF67, 0xffacff90},
+{5900000, DIF_BPF_COEFF89, 0xffc5005f},
+{5900000, DIF_BPF_COEFF1011, 0x01210184},
+{5900000, DIF_BPF_COEFF1213, 0x00fcff72},
+{5900000, DIF_BPF_COEFF1415, 0xfd8afc77},
+{5900000, DIF_BPF_COEFF1617, 0xfd51003f},
+{5900000, DIF_BPF_COEFF1819, 0x04020669},
+{5900000, DIF_BPF_COEFF2021, 0x05830103},
+{5900000, DIF_BPF_COEFF2223, 0xfad7f66b},
+{5900000, DIF_BPF_COEFF2425, 0xf6c8fc93},
+{5900000, DIF_BPF_COEFF2627, 0x05430c2b},
+{5900000, DIF_BPF_COEFF2829, 0x0d0d06b5},
+{5900000, DIF_BPF_COEFF3031, 0xfc08f2b2},
+{5900000, DIF_BPF_COEFF3233, 0xf00af5ec},
+{5900000, DIF_BPF_COEFF3435, 0x017b0c89},
+{5900000, DIF_BPF_COEFF36, 0x110d0000},
+/* END - DIF BPF register values from 59_quant.dat*/
+
+
+/*case 6000000:*/
+/* BEGIN - DIF BPF register values from 60_quant.dat*/
+{6000000, DIF_BPF_COEFF01, 0x00000001},
+{6000000, DIF_BPF_COEFF23, 0x00070012},
+{6000000, DIF_BPF_COEFF45, 0x0012fff5},
+{6000000, DIF_BPF_COEFF67, 0xffbaff82},
+{6000000, DIF_BPF_COEFF89, 0xff8e000f},
+{6000000, DIF_BPF_COEFF1011, 0x00e80198},
+{6000000, DIF_BPF_COEFF1213, 0x01750028},
+{6000000, DIF_BPF_COEFF1415, 0xfe18fc75},
+{6000000, DIF_BPF_COEFF1617, 0xfc99ff15},
+{6000000, DIF_BPF_COEFF1819, 0x03050636},
+{6000000, DIF_BPF_COEFF2021, 0x0656027f},
+{6000000, DIF_BPF_COEFF2223, 0xfc32f6e2},
+{6000000, DIF_BPF_COEFF2425, 0xf614fb17},
+{6000000, DIF_BPF_COEFF2627, 0x03d20b87},
+{6000000, DIF_BPF_COEFF2829, 0x0d7707d2},
+{6000000, DIF_BPF_COEFF3031, 0xfd26f341},
+{6000000, DIF_BPF_COEFF3233, 0xefeaf56f},
+{6000000, DIF_BPF_COEFF3435, 0x010f0c64},
+{6000000, DIF_BPF_COEFF36, 0x110d0000},
+/* END - DIF BPF register values from 60_quant.dat*/
+
+
+/*case 6100000:*/
+/* BEGIN - DIF BPF register values from 61_quant.dat*/
+{6100000, DIF_BPF_COEFF01, 0xffff0000},
+{6100000, DIF_BPF_COEFF23, 0x00050012},
+{6100000, DIF_BPF_COEFF45, 0x001c000b},
+{6100000, DIF_BPF_COEFF67, 0xffd1ff84},
+{6100000, DIF_BPF_COEFF89, 0xff66ffbe},
+{6100000, DIF_BPF_COEFF1011, 0x00960184},
+{6100000, DIF_BPF_COEFF1213, 0x01cd00da},
+{6100000, DIF_BPF_COEFF1415, 0xfeccfcb2},
+{6100000, DIF_BPF_COEFF1617, 0xfc17fdf9},
+{6100000, DIF_BPF_COEFF1819, 0x01e005bc},
+{6100000, DIF_BPF_COEFF2021, 0x06e703e4},
+{6100000, DIF_BPF_COEFF2223, 0xfdabf798},
+{6100000, DIF_BPF_COEFF2425, 0xf599f9b3},
+{6100000, DIF_BPF_COEFF2627, 0x02510abd},
+{6100000, DIF_BPF_COEFF2829, 0x0dbf08df},
+{6100000, DIF_BPF_COEFF3031, 0xfe48f3dc},
+{6100000, DIF_BPF_COEFF3233, 0xefd5f4f6},
+{6100000, DIF_BPF_COEFF3435, 0x00a20c3e},
+{6100000, DIF_BPF_COEFF36, 0x110d0000},
+/* END - DIF BPF register values from 61_quant.dat*/
+
+
+/*case 6200000:*/
+/* BEGIN - DIF BPF register values from 62_quant.dat*/
+{6200000, DIF_BPF_COEFF01, 0xfffffffe},
+{6200000, DIF_BPF_COEFF23, 0x0002000f},
+{6200000, DIF_BPF_COEFF45, 0x0021001f},
+{6200000, DIF_BPF_COEFF67, 0xfff0ff97},
+{6200000, DIF_BPF_COEFF89, 0xff50ff74},
+{6200000, DIF_BPF_COEFF1011, 0x0034014a},
+{6200000, DIF_BPF_COEFF1213, 0x01fa0179},
+{6200000, DIF_BPF_COEFF1415, 0xff97fd2a},
+{6200000, DIF_BPF_COEFF1617, 0xfbd3fcfa},
+{6200000, DIF_BPF_COEFF1819, 0x00a304fe},
+{6200000, DIF_BPF_COEFF2021, 0x07310525},
+{6200000, DIF_BPF_COEFF2223, 0xff37f886},
+{6200000, DIF_BPF_COEFF2425, 0xf55cf86e},
+{6200000, DIF_BPF_COEFF2627, 0x00c709d0},
+{6200000, DIF_BPF_COEFF2829, 0x0de209db},
+{6200000, DIF_BPF_COEFF3031, 0xff6df484},
+{6200000, DIF_BPF_COEFF3233, 0xefcbf481},
+{6200000, DIF_BPF_COEFF3435, 0x00360c18},
+{6200000, DIF_BPF_COEFF36, 0x110d0000},
+/* END - DIF BPF register values from 62_quant.dat*/
+
+
+/*case 6300000:*/
+/* BEGIN - DIF BPF register values from 63_quant.dat*/
+{6300000, DIF_BPF_COEFF01, 0xfffffffd},
+{6300000, DIF_BPF_COEFF23, 0xfffe000a},
+{6300000, DIF_BPF_COEFF45, 0x0021002f},
+{6300000, DIF_BPF_COEFF67, 0x0010ffb8},
+{6300000, DIF_BPF_COEFF89, 0xff50ff3b},
+{6300000, DIF_BPF_COEFF1011, 0xffcc00f0},
+{6300000, DIF_BPF_COEFF1213, 0x01fa01fa},
+{6300000, DIF_BPF_COEFF1415, 0x0069fdd4},
+{6300000, DIF_BPF_COEFF1617, 0xfbd3fc26},
+{6300000, DIF_BPF_COEFF1819, 0xff5d0407},
+{6300000, DIF_BPF_COEFF2021, 0x07310638},
+{6300000, DIF_BPF_COEFF2223, 0x00c9f9a8},
+{6300000, DIF_BPF_COEFF2425, 0xf55cf74e},
+{6300000, DIF_BPF_COEFF2627, 0xff3908c3},
+{6300000, DIF_BPF_COEFF2829, 0x0de20ac3},
+{6300000, DIF_BPF_COEFF3031, 0x0093f537},
+{6300000, DIF_BPF_COEFF3233, 0xefcbf410},
+{6300000, DIF_BPF_COEFF3435, 0xffca0bf2},
+{6300000, DIF_BPF_COEFF36, 0x110d0000},
+/* END - DIF BPF register values from 63_quant.dat*/
+
+
+/*case 6400000:*/
+/* BEGIN - DIF BPF register values from 64_quant.dat*/
+{6400000, DIF_BPF_COEFF01, 0xfffffffd},
+{6400000, DIF_BPF_COEFF23, 0xfffb0003},
+{6400000, DIF_BPF_COEFF45, 0x001c0037},
+{6400000, DIF_BPF_COEFF67, 0x002fffe2},
+{6400000, DIF_BPF_COEFF89, 0xff66ff17},
+{6400000, DIF_BPF_COEFF1011, 0xff6a007e},
+{6400000, DIF_BPF_COEFF1213, 0x01cd0251},
+{6400000, DIF_BPF_COEFF1415, 0x0134fea5},
+{6400000, DIF_BPF_COEFF1617, 0xfc17fb8b},
+{6400000, DIF_BPF_COEFF1819, 0xfe2002e0},
+{6400000, DIF_BPF_COEFF2021, 0x06e70713},
+{6400000, DIF_BPF_COEFF2223, 0x0255faf5},
+{6400000, DIF_BPF_COEFF2425, 0xf599f658},
+{6400000, DIF_BPF_COEFF2627, 0xfdaf0799},
+{6400000, DIF_BPF_COEFF2829, 0x0dbf0b96},
+{6400000, DIF_BPF_COEFF3031, 0x01b8f5f5},
+{6400000, DIF_BPF_COEFF3233, 0xefd5f3a3},
+{6400000, DIF_BPF_COEFF3435, 0xff5e0bca},
+{6400000, DIF_BPF_COEFF36, 0x110d0000},
+/* END - DIF BPF register values from 64_quant.dat*/
+
+
+/*case 6500000:*/
+/* BEGIN - DIF BPF register values from 65_quant.dat*/
+{6500000, DIF_BPF_COEFF01, 0x0000fffd},
+{6500000, DIF_BPF_COEFF23, 0xfff9fffb},
+{6500000, DIF_BPF_COEFF45, 0x00120037},
+{6500000, DIF_BPF_COEFF67, 0x00460010},
+{6500000, DIF_BPF_COEFF89, 0xff8eff0f},
+{6500000, DIF_BPF_COEFF1011, 0xff180000},
+{6500000, DIF_BPF_COEFF1213, 0x01750276},
+{6500000, DIF_BPF_COEFF1415, 0x01e8ff8d},
+{6500000, DIF_BPF_COEFF1617, 0xfc99fb31},
+{6500000, DIF_BPF_COEFF1819, 0xfcfb0198},
+{6500000, DIF_BPF_COEFF2021, 0x065607ad},
+{6500000, DIF_BPF_COEFF2223, 0x03cefc64},
+{6500000, DIF_BPF_COEFF2425, 0xf614f592},
+{6500000, DIF_BPF_COEFF2627, 0xfc2e0656},
+{6500000, DIF_BPF_COEFF2829, 0x0d770c52},
+{6500000, DIF_BPF_COEFF3031, 0x02daf6bd},
+{6500000, DIF_BPF_COEFF3233, 0xefeaf33b},
+{6500000, DIF_BPF_COEFF3435, 0xfef10ba3},
+{6500000, DIF_BPF_COEFF36, 0x110d0000},
+/* END - DIF BPF register values from 65_quant.dat*/
+
+
+/*case 6600000:*/
+/* BEGIN - DIF BPF register values from 66_quant.dat*/
+{6600000, DIF_BPF_COEFF01, 0x0000fffe},
+{6600000, DIF_BPF_COEFF23, 0xfff7fff5},
+{6600000, DIF_BPF_COEFF45, 0x0005002f},
+{6600000, DIF_BPF_COEFF67, 0x0054003c},
+{6600000, DIF_BPF_COEFF89, 0xffc5ff22},
+{6600000, DIF_BPF_COEFF1011, 0xfedfff82},
+{6600000, DIF_BPF_COEFF1213, 0x00fc0267},
+{6600000, DIF_BPF_COEFF1415, 0x0276007e},
+{6600000, DIF_BPF_COEFF1617, 0xfd51fb1c},
+{6600000, DIF_BPF_COEFF1819, 0xfbfe003e},
+{6600000, DIF_BPF_COEFF2021, 0x05830802},
+{6600000, DIF_BPF_COEFF2223, 0x0529fdec},
+{6600000, DIF_BPF_COEFF2425, 0xf6c8f4fe},
+{6600000, DIF_BPF_COEFF2627, 0xfabd04ff},
+{6600000, DIF_BPF_COEFF2829, 0x0d0d0cf6},
+{6600000, DIF_BPF_COEFF3031, 0x03f8f78f},
+{6600000, DIF_BPF_COEFF3233, 0xf00af2d7},
+{6600000, DIF_BPF_COEFF3435, 0xfe850b7b},
+{6600000, DIF_BPF_COEFF36, 0x110d0000},
+/* END - DIF BPF register values from 66_quant.dat*/
+
+
+/*case 6700000:*/
+/* BEGIN - DIF BPF register values from 67_quant.dat*/
+{6700000, DIF_BPF_COEFF01, 0x0000ffff},
+{6700000, DIF_BPF_COEFF23, 0xfff8fff0},
+{6700000, DIF_BPF_COEFF45, 0xfff80020},
+{6700000, DIF_BPF_COEFF67, 0x00560060},
+{6700000, DIF_BPF_COEFF89, 0x0002ff4e},
+{6700000, DIF_BPF_COEFF1011, 0xfec4ff10},
+{6700000, DIF_BPF_COEFF1213, 0x006d0225},
+{6700000, DIF_BPF_COEFF1415, 0x02d50166},
+{6700000, DIF_BPF_COEFF1617, 0xfe35fb4e},
+{6700000, DIF_BPF_COEFF1819, 0xfb35fee1},
+{6700000, DIF_BPF_COEFF2021, 0x0477080e},
+{6700000, DIF_BPF_COEFF2223, 0x065bff82},
+{6700000, DIF_BPF_COEFF2425, 0xf7b1f4a0},
+{6700000, DIF_BPF_COEFF2627, 0xf9610397},
+{6700000, DIF_BPF_COEFF2829, 0x0c810d80},
+{6700000, DIF_BPF_COEFF3031, 0x0510f869},
+{6700000, DIF_BPF_COEFF3233, 0xf033f278},
+{6700000, DIF_BPF_COEFF3435, 0xfe1a0b52},
+{6700000, DIF_BPF_COEFF36, 0x110d0000},
+/* END - DIF BPF register values from 67_quant.dat*/
+
+
+/*case 6800000:*/
+/* BEGIN - DIF BPF register values from 68_quant.dat*/
+{6800000, DIF_BPF_COEFF01, 0x00010000},
+{6800000, DIF_BPF_COEFF23, 0xfffaffee},
+{6800000, DIF_BPF_COEFF45, 0xffec000c},
+{6800000, DIF_BPF_COEFF67, 0x004c0078},
+{6800000, DIF_BPF_COEFF89, 0x0040ff8e},
+{6800000, DIF_BPF_COEFF1011, 0xfecafeb6},
+{6800000, DIF_BPF_COEFF1213, 0xffd301b6},
+{6800000, DIF_BPF_COEFF1415, 0x02fc0235},
+{6800000, DIF_BPF_COEFF1617, 0xff36fbc5},
+{6800000, DIF_BPF_COEFF1819, 0xfaaafd90},
+{6800000, DIF_BPF_COEFF2021, 0x033e07d2},
+{6800000, DIF_BPF_COEFF2223, 0x075b011b},
+{6800000, DIF_BPF_COEFF2425, 0xf8cbf47a},
+{6800000, DIF_BPF_COEFF2627, 0xf81f0224},
+{6800000, DIF_BPF_COEFF2829, 0x0bd50def},
+{6800000, DIF_BPF_COEFF3031, 0x0621f94b},
+{6800000, DIF_BPF_COEFF3233, 0xf067f21e},
+{6800000, DIF_BPF_COEFF3435, 0xfdae0b29},
+{6800000, DIF_BPF_COEFF36, 0x110d0000},
+/* END - DIF BPF register values from 68_quant.dat*/
+
+
+/*case 6900000:*/
+/* BEGIN - DIF BPF register values from 69_quant.dat*/
+{6900000, DIF_BPF_COEFF01, 0x00010001},
+{6900000, DIF_BPF_COEFF23, 0xfffdffef},
+{6900000, DIF_BPF_COEFF45, 0xffe3fff6},
+{6900000, DIF_BPF_COEFF67, 0x0037007f},
+{6900000, DIF_BPF_COEFF89, 0x0075ffdc},
+{6900000, DIF_BPF_COEFF1011, 0xfef2fe7c},
+{6900000, DIF_BPF_COEFF1213, 0xff3d0122},
+{6900000, DIF_BPF_COEFF1415, 0x02ea02dd},
+{6900000, DIF_BPF_COEFF1617, 0x0044fc79},
+{6900000, DIF_BPF_COEFF1819, 0xfa65fc5d},
+{6900000, DIF_BPF_COEFF2021, 0x01e3074e},
+{6900000, DIF_BPF_COEFF2223, 0x082102ad},
+{6900000, DIF_BPF_COEFF2425, 0xfa0ff48c},
+{6900000, DIF_BPF_COEFF2627, 0xf6fe00a9},
+{6900000, DIF_BPF_COEFF2829, 0x0b0a0e43},
+{6900000, DIF_BPF_COEFF3031, 0x0729fa33},
+{6900000, DIF_BPF_COEFF3233, 0xf0a5f1c9},
+{6900000, DIF_BPF_COEFF3435, 0xfd430b00},
+{6900000, DIF_BPF_COEFF36, 0x110d0000},
+/* END - DIF BPF register values from 69_quant.dat*/
+
+
+/*case 7000000:*/
+/* BEGIN - DIF BPF register values from 70_quant.dat*/
+{7000000, DIF_BPF_COEFF01, 0x00010002},
+{7000000, DIF_BPF_COEFF23, 0x0001fff3},
+{7000000, DIF_BPF_COEFF45, 0xffdeffe2},
+{7000000, DIF_BPF_COEFF67, 0x001b0076},
+{7000000, DIF_BPF_COEFF89, 0x009c002d},
+{7000000, DIF_BPF_COEFF1011, 0xff35fe68},
+{7000000, DIF_BPF_COEFF1213, 0xfeba0076},
+{7000000, DIF_BPF_COEFF1415, 0x029f0352},
+{7000000, DIF_BPF_COEFF1617, 0x014dfd60},
+{7000000, DIF_BPF_COEFF1819, 0xfa69fb53},
+{7000000, DIF_BPF_COEFF2021, 0x00740688},
+{7000000, DIF_BPF_COEFF2223, 0x08a7042d},
+{7000000, DIF_BPF_COEFF2425, 0xfb75f4d6},
+{7000000, DIF_BPF_COEFF2627, 0xf600ff2d},
+{7000000, DIF_BPF_COEFF2829, 0x0a220e7a},
+{7000000, DIF_BPF_COEFF3031, 0x0827fb22},
+{7000000, DIF_BPF_COEFF3233, 0xf0edf17a},
+{7000000, DIF_BPF_COEFF3435, 0xfcd80ad6},
+{7000000, DIF_BPF_COEFF36, 0x110d0000},
+/* END - DIF BPF register values from 70_quant.dat*/
+
+
+/*case 7100000:*/
+/* BEGIN - DIF BPF register values from 71_quant.dat*/
+{7100000, DIF_BPF_COEFF01, 0x00000003},
+{7100000, DIF_BPF_COEFF23, 0x0004fff9},
+{7100000, DIF_BPF_COEFF45, 0xffe0ffd2},
+{7100000, DIF_BPF_COEFF67, 0xfffb005e},
+{7100000, DIF_BPF_COEFF89, 0x00b0007a},
+{7100000, DIF_BPF_COEFF1011, 0xff8ffe7c},
+{7100000, DIF_BPF_COEFF1213, 0xfe53ffc1},
+{7100000, DIF_BPF_COEFF1415, 0x0221038c},
+{7100000, DIF_BPF_COEFF1617, 0x0241fe6e},
+{7100000, DIF_BPF_COEFF1819, 0xfab6fa80},
+{7100000, DIF_BPF_COEFF2021, 0xff010587},
+{7100000, DIF_BPF_COEFF2223, 0x08e90590},
+{7100000, DIF_BPF_COEFF2425, 0xfcf5f556},
+{7100000, DIF_BPF_COEFF2627, 0xf52bfdb3},
+{7100000, DIF_BPF_COEFF2829, 0x09210e95},
+{7100000, DIF_BPF_COEFF3031, 0x0919fc15},
+{7100000, DIF_BPF_COEFF3233, 0xf13ff12f},
+{7100000, DIF_BPF_COEFF3435, 0xfc6e0aab},
+{7100000, DIF_BPF_COEFF36, 0x110d0000},
+/* END - DIF BPF register values from 71_quant.dat*/
+
+
+/*case 7200000:*/
+/* BEGIN - DIF BPF register values from 72_quant.dat*/
+{7200000, DIF_BPF_COEFF01, 0x00000003},
+{7200000, DIF_BPF_COEFF23, 0x00070000},
+{7200000, DIF_BPF_COEFF45, 0xffe6ffc9},
+{7200000, DIF_BPF_COEFF67, 0xffdb0039},
+{7200000, DIF_BPF_COEFF89, 0x00af00b8},
+{7200000, DIF_BPF_COEFF1011, 0xfff4feb6},
+{7200000, DIF_BPF_COEFF1213, 0xfe13ff10},
+{7200000, DIF_BPF_COEFF1415, 0x01790388},
+{7200000, DIF_BPF_COEFF1617, 0x0311ff92},
+{7200000, DIF_BPF_COEFF1819, 0xfb48f9ed},
+{7200000, DIF_BPF_COEFF2021, 0xfd980453},
+{7200000, DIF_BPF_COEFF2223, 0x08e306cd},
+{7200000, DIF_BPF_COEFF2425, 0xfe88f60a},
+{7200000, DIF_BPF_COEFF2627, 0xf482fc40},
+{7200000, DIF_BPF_COEFF2829, 0x08080e93},
+{7200000, DIF_BPF_COEFF3031, 0x09fdfd0c},
+{7200000, DIF_BPF_COEFF3233, 0xf19af0ea},
+{7200000, DIF_BPF_COEFF3435, 0xfc050a81},
+{7200000, DIF_BPF_COEFF36, 0x110d0000},
+/* END - DIF BPF register values from 72_quant.dat*/
+
+
+/*case 7300000:*/
+/* BEGIN - DIF BPF register values from 73_quant.dat*/
+{7300000, DIF_BPF_COEFF01, 0x00000002},
+{7300000, DIF_BPF_COEFF23, 0x00080008},
+{7300000, DIF_BPF_COEFF45, 0xfff0ffc9},
+{7300000, DIF_BPF_COEFF67, 0xffc1000d},
+{7300000, DIF_BPF_COEFF89, 0x009800e2},
+{7300000, DIF_BPF_COEFF1011, 0x005bff10},
+{7300000, DIF_BPF_COEFF1213, 0xfe00fe74},
+{7300000, DIF_BPF_COEFF1415, 0x00b50345},
+{7300000, DIF_BPF_COEFF1617, 0x03b000bc},
+{7300000, DIF_BPF_COEFF1819, 0xfc18f9a1},
+{7300000, DIF_BPF_COEFF2021, 0xfc4802f9},
+{7300000, DIF_BPF_COEFF2223, 0x089807dc},
+{7300000, DIF_BPF_COEFF2425, 0x0022f6f0},
+{7300000, DIF_BPF_COEFF2627, 0xf407fada},
+{7300000, DIF_BPF_COEFF2829, 0x06da0e74},
+{7300000, DIF_BPF_COEFF3031, 0x0ad3fe06},
+{7300000, DIF_BPF_COEFF3233, 0xf1fef0ab},
+{7300000, DIF_BPF_COEFF3435, 0xfb9c0a55},
+{7300000, DIF_BPF_COEFF36, 0x110d0000},
+/* END - DIF BPF register values from 73_quant.dat*/
+
+
+/*case 7400000:*/
+/* BEGIN - DIF BPF register values from 74_quant.dat*/
+{7400000, DIF_BPF_COEFF01, 0x00000001},
+{7400000, DIF_BPF_COEFF23, 0x0008000e},
+{7400000, DIF_BPF_COEFF45, 0xfffdffd0},
+{7400000, DIF_BPF_COEFF67, 0xffafffdf},
+{7400000, DIF_BPF_COEFF89, 0x006e00f2},
+{7400000, DIF_BPF_COEFF1011, 0x00b8ff82},
+{7400000, DIF_BPF_COEFF1213, 0xfe1bfdf8},
+{7400000, DIF_BPF_COEFF1415, 0xffe302c8},
+{7400000, DIF_BPF_COEFF1617, 0x041301dc},
+{7400000, DIF_BPF_COEFF1819, 0xfd1af99e},
+{7400000, DIF_BPF_COEFF2021, 0xfb1e0183},
+{7400000, DIF_BPF_COEFF2223, 0x080908b5},
+{7400000, DIF_BPF_COEFF2425, 0x01bcf801},
+{7400000, DIF_BPF_COEFF2627, 0xf3bdf985},
+{7400000, DIF_BPF_COEFF2829, 0x059a0e38},
+{7400000, DIF_BPF_COEFF3031, 0x0b99ff03},
+{7400000, DIF_BPF_COEFF3233, 0xf26cf071},
+{7400000, DIF_BPF_COEFF3435, 0xfb330a2a},
+{7400000, DIF_BPF_COEFF36, 0x110d0000},
+/* END - DIF BPF register values from 74_quant.dat*/
+
+
+/*case 7500000:*/
+/* BEGIN - DIF BPF register values from 75_quant.dat*/
+{7500000, DIF_BPF_COEFF01, 0xffff0000},
+{7500000, DIF_BPF_COEFF23, 0x00070011},
+{7500000, DIF_BPF_COEFF45, 0x000affdf},
+{7500000, DIF_BPF_COEFF67, 0xffa9ffb5},
+{7500000, DIF_BPF_COEFF89, 0x003700e6},
+{7500000, DIF_BPF_COEFF1011, 0x01010000},
+{7500000, DIF_BPF_COEFF1213, 0xfe62fda8},
+{7500000, DIF_BPF_COEFF1415, 0xff140219},
+{7500000, DIF_BPF_COEFF1617, 0x043502e1},
+{7500000, DIF_BPF_COEFF1819, 0xfe42f9e6},
+{7500000, DIF_BPF_COEFF2021, 0xfa270000},
+{7500000, DIF_BPF_COEFF2223, 0x073a0953},
+{7500000, DIF_BPF_COEFF2425, 0x034cf939},
+{7500000, DIF_BPF_COEFF2627, 0xf3a4f845},
+{7500000, DIF_BPF_COEFF2829, 0x044c0de1},
+{7500000, DIF_BPF_COEFF3031, 0x0c4f0000},
+{7500000, DIF_BPF_COEFF3233, 0xf2e2f03c},
+{7500000, DIF_BPF_COEFF3435, 0xfacc09fe},
+{7500000, DIF_BPF_COEFF36, 0x110d0000},
+/* END - DIF BPF register values from 75_quant.dat*/
+
+
+/*case 7600000:*/
+/* BEGIN - DIF BPF register values from 76_quant.dat*/
+{7600000, DIF_BPF_COEFF01, 0xffffffff},
+{7600000, DIF_BPF_COEFF23, 0x00040012},
+{7600000, DIF_BPF_COEFF45, 0x0016fff3},
+{7600000, DIF_BPF_COEFF67, 0xffafff95},
+{7600000, DIF_BPF_COEFF89, 0xfff900c0},
+{7600000, DIF_BPF_COEFF1011, 0x0130007e},
+{7600000, DIF_BPF_COEFF1213, 0xfecefd89},
+{7600000, DIF_BPF_COEFF1415, 0xfe560146},
+{7600000, DIF_BPF_COEFF1617, 0x041303bc},
+{7600000, DIF_BPF_COEFF1819, 0xff81fa76},
+{7600000, DIF_BPF_COEFF2021, 0xf96cfe7d},
+{7600000, DIF_BPF_COEFF2223, 0x063209b1},
+{7600000, DIF_BPF_COEFF2425, 0x04c9fa93},
+{7600000, DIF_BPF_COEFF2627, 0xf3bdf71e},
+{7600000, DIF_BPF_COEFF2829, 0x02f30d6e},
+{7600000, DIF_BPF_COEFF3031, 0x0cf200fd},
+{7600000, DIF_BPF_COEFF3233, 0xf361f00e},
+{7600000, DIF_BPF_COEFF3435, 0xfa6509d1},
+{7600000, DIF_BPF_COEFF36, 0x110d0000},
+/* END - DIF BPF register values from 76_quant.dat*/
+
+
+/*case 7700000:*/
+/* BEGIN - DIF BPF register values from 77_quant.dat*/
+{7700000, DIF_BPF_COEFF01, 0xfffffffe},
+{7700000, DIF_BPF_COEFF23, 0x00010010},
+{7700000, DIF_BPF_COEFF45, 0x001e0008},
+{7700000, DIF_BPF_COEFF67, 0xffc1ff84},
+{7700000, DIF_BPF_COEFF89, 0xffbc0084},
+{7700000, DIF_BPF_COEFF1011, 0x013e00f0},
+{7700000, DIF_BPF_COEFF1213, 0xff56fd9f},
+{7700000, DIF_BPF_COEFF1415, 0xfdb8005c},
+{7700000, DIF_BPF_COEFF1617, 0x03b00460},
+{7700000, DIF_BPF_COEFF1819, 0x00c7fb45},
+{7700000, DIF_BPF_COEFF2021, 0xf8f4fd07},
+{7700000, DIF_BPF_COEFF2223, 0x04fa09ce},
+{7700000, DIF_BPF_COEFF2425, 0x062afc07},
+{7700000, DIF_BPF_COEFF2627, 0xf407f614},
+{7700000, DIF_BPF_COEFF2829, 0x01920ce0},
+{7700000, DIF_BPF_COEFF3031, 0x0d8301fa},
+{7700000, DIF_BPF_COEFF3233, 0xf3e8efe5},
+{7700000, DIF_BPF_COEFF3435, 0xfa0009a4},
+{7700000, DIF_BPF_COEFF36, 0x110d0000},
+/* END - DIF BPF register values from 77_quant.dat*/
+
+
+/*case 7800000:*/
+/* BEGIN - DIF BPF register values from 78_quant.dat*/
+{7800000, DIF_BPF_COEFF01, 0x0000fffd},
+{7800000, DIF_BPF_COEFF23, 0xfffd000b},
+{7800000, DIF_BPF_COEFF45, 0x0022001d},
+{7800000, DIF_BPF_COEFF67, 0xffdbff82},
+{7800000, DIF_BPF_COEFF89, 0xff870039},
+{7800000, DIF_BPF_COEFF1011, 0x012a014a},
+{7800000, DIF_BPF_COEFF1213, 0xffedfde7},
+{7800000, DIF_BPF_COEFF1415, 0xfd47ff6b},
+{7800000, DIF_BPF_COEFF1617, 0x031104c6},
+{7800000, DIF_BPF_COEFF1819, 0x0202fc4c},
+{7800000, DIF_BPF_COEFF2021, 0xf8c6fbad},
+{7800000, DIF_BPF_COEFF2223, 0x039909a7},
+{7800000, DIF_BPF_COEFF2425, 0x0767fd8e},
+{7800000, DIF_BPF_COEFF2627, 0xf482f52b},
+{7800000, DIF_BPF_COEFF2829, 0x002d0c39},
+{7800000, DIF_BPF_COEFF3031, 0x0e0002f4},
+{7800000, DIF_BPF_COEFF3233, 0xf477efc2},
+{7800000, DIF_BPF_COEFF3435, 0xf99b0977},
+{7800000, DIF_BPF_COEFF36, 0x110d0000},
+/* END - DIF BPF register values from 78_quant.dat*/
+
+
+/*case 7900000:*/
+/* BEGIN - DIF BPF register values from 79_quant.dat*/
+{7900000, DIF_BPF_COEFF01, 0x0000fffd},
+{7900000, DIF_BPF_COEFF23, 0xfffa0004},
+{7900000, DIF_BPF_COEFF45, 0x0020002d},
+{7900000, DIF_BPF_COEFF67, 0xfffbff91},
+{7900000, DIF_BPF_COEFF89, 0xff61ffe8},
+{7900000, DIF_BPF_COEFF1011, 0x00f70184},
+{7900000, DIF_BPF_COEFF1213, 0x0086fe5c},
+{7900000, DIF_BPF_COEFF1415, 0xfd0bfe85},
+{7900000, DIF_BPF_COEFF1617, 0x024104e5},
+{7900000, DIF_BPF_COEFF1819, 0x0323fd7d},
+{7900000, DIF_BPF_COEFF2021, 0xf8e2fa79},
+{7900000, DIF_BPF_COEFF2223, 0x021d093f},
+{7900000, DIF_BPF_COEFF2425, 0x0879ff22},
+{7900000, DIF_BPF_COEFF2627, 0xf52bf465},
+{7900000, DIF_BPF_COEFF2829, 0xfec70b79},
+{7900000, DIF_BPF_COEFF3031, 0x0e6803eb},
+{7900000, DIF_BPF_COEFF3233, 0xf50defa5},
+{7900000, DIF_BPF_COEFF3435, 0xf937094a},
+{7900000, DIF_BPF_COEFF36, 0x110d0000},
+/* END - DIF BPF register values from 79_quant.dat*/
+
+
+/*case 8000000:*/
+/* BEGIN - DIF BPF register values from 80_quant.dat*/
+{8000000, DIF_BPF_COEFF01, 0x0000fffe},
+{8000000, DIF_BPF_COEFF23, 0xfff8fffd},
+{8000000, DIF_BPF_COEFF45, 0x00190036},
+{8000000, DIF_BPF_COEFF67, 0x001bffaf},
+{8000000, DIF_BPF_COEFF89, 0xff4fff99},
+{8000000, DIF_BPF_COEFF1011, 0x00aa0198},
+{8000000, DIF_BPF_COEFF1213, 0x0112fef3},
+{8000000, DIF_BPF_COEFF1415, 0xfd09fdb9},
+{8000000, DIF_BPF_COEFF1617, 0x014d04be},
+{8000000, DIF_BPF_COEFF1819, 0x041bfecc},
+{8000000, DIF_BPF_COEFF2021, 0xf947f978},
+{8000000, DIF_BPF_COEFF2223, 0x00900897},
+{8000000, DIF_BPF_COEFF2425, 0x095a00b9},
+{8000000, DIF_BPF_COEFF2627, 0xf600f3c5},
+{8000000, DIF_BPF_COEFF2829, 0xfd650aa3},
+{8000000, DIF_BPF_COEFF3031, 0x0ebc04de},
+{8000000, DIF_BPF_COEFF3233, 0xf5aaef8e},
+{8000000, DIF_BPF_COEFF3435, 0xf8d5091c},
+{8000000, DIF_BPF_COEFF36, 0x110d0000},
+/* END - DIF BPF register values from 80_quant.dat*/
+
+
+/*case 8100000:*/
+/* BEGIN - DIF BPF register values from 81_quant.dat*/
+{8100000, DIF_BPF_COEFF01, 0x0000ffff},
+{8100000, DIF_BPF_COEFF23, 0xfff7fff6},
+{8100000, DIF_BPF_COEFF45, 0x000e0038},
+{8100000, DIF_BPF_COEFF67, 0x0037ffd7},
+{8100000, DIF_BPF_COEFF89, 0xff52ff56},
+{8100000, DIF_BPF_COEFF1011, 0x004b0184},
+{8100000, DIF_BPF_COEFF1213, 0x0186ffa1},
+{8100000, DIF_BPF_COEFF1415, 0xfd40fd16},
+{8100000, DIF_BPF_COEFF1617, 0x00440452},
+{8100000, DIF_BPF_COEFF1819, 0x04de0029},
+{8100000, DIF_BPF_COEFF2021, 0xf9f2f8b2},
+{8100000, DIF_BPF_COEFF2223, 0xfefe07b5},
+{8100000, DIF_BPF_COEFF2425, 0x0a05024d},
+{8100000, DIF_BPF_COEFF2627, 0xf6fef34d},
+{8100000, DIF_BPF_COEFF2829, 0xfc0a09b8},
+{8100000, DIF_BPF_COEFF3031, 0x0efa05cd},
+{8100000, DIF_BPF_COEFF3233, 0xf64eef7d},
+{8100000, DIF_BPF_COEFF3435, 0xf87308ed},
+{8100000, DIF_BPF_COEFF36, 0x110d0000},
+/* END - DIF BPF register values from 81_quant.dat*/
+
+
+/*case 8200000:*/
+/* BEGIN - DIF BPF register values from 82_quant.dat*/
+{8200000, DIF_BPF_COEFF01, 0x00010000},
+{8200000, DIF_BPF_COEFF23, 0xfff8fff0},
+{8200000, DIF_BPF_COEFF45, 0x00000031},
+{8200000, DIF_BPF_COEFF67, 0x004c0005},
+{8200000, DIF_BPF_COEFF89, 0xff6aff27},
+{8200000, DIF_BPF_COEFF1011, 0xffe4014a},
+{8200000, DIF_BPF_COEFF1213, 0x01d70057},
+{8200000, DIF_BPF_COEFF1415, 0xfdacfca6},
+{8200000, DIF_BPF_COEFF1617, 0xff3603a7},
+{8200000, DIF_BPF_COEFF1819, 0x05610184},
+{8200000, DIF_BPF_COEFF2021, 0xfadbf82e},
+{8200000, DIF_BPF_COEFF2223, 0xfd74069f},
+{8200000, DIF_BPF_COEFF2425, 0x0a7503d6},
+{8200000, DIF_BPF_COEFF2627, 0xf81ff2ff},
+{8200000, DIF_BPF_COEFF2829, 0xfab808b9},
+{8200000, DIF_BPF_COEFF3031, 0x0f2306b5},
+{8200000, DIF_BPF_COEFF3233, 0xf6f9ef72},
+{8200000, DIF_BPF_COEFF3435, 0xf81308bf},
+{8200000, DIF_BPF_COEFF36, 0x110d0000},
+/* END - DIF BPF register values from 82_quant.dat*/
+
+
+/*case 8300000:*/
+/* BEGIN - DIF BPF register values from 83_quant.dat*/
+{8300000, DIF_BPF_COEFF01, 0x00010001},
+{8300000, DIF_BPF_COEFF23, 0xfffbffee},
+{8300000, DIF_BPF_COEFF45, 0xfff30022},
+{8300000, DIF_BPF_COEFF67, 0x00560032},
+{8300000, DIF_BPF_COEFF89, 0xff95ff10},
+{8300000, DIF_BPF_COEFF1011, 0xff8000f0},
+{8300000, DIF_BPF_COEFF1213, 0x01fe0106},
+{8300000, DIF_BPF_COEFF1415, 0xfe46fc71},
+{8300000, DIF_BPF_COEFF1617, 0xfe3502c7},
+{8300000, DIF_BPF_COEFF1819, 0x059e02ce},
+{8300000, DIF_BPF_COEFF2021, 0xfbf9f7f2},
+{8300000, DIF_BPF_COEFF2223, 0xfbff055b},
+{8300000, DIF_BPF_COEFF2425, 0x0aa9054c},
+{8300000, DIF_BPF_COEFF2627, 0xf961f2db},
+{8300000, DIF_BPF_COEFF2829, 0xf97507aa},
+{8300000, DIF_BPF_COEFF3031, 0x0f350797},
+{8300000, DIF_BPF_COEFF3233, 0xf7a9ef6d},
+{8300000, DIF_BPF_COEFF3435, 0xf7b40890},
+{8300000, DIF_BPF_COEFF36, 0x110d0000},
+/* END - DIF BPF register values from 83_quant.dat*/
+
+
+/*case 8400000:*/
+/* BEGIN - DIF BPF register values from 84_quant.dat*/
+{8400000, DIF_BPF_COEFF01, 0x00010002},
+{8400000, DIF_BPF_COEFF23, 0xfffeffee},
+{8400000, DIF_BPF_COEFF45, 0xffe8000f},
+{8400000, DIF_BPF_COEFF67, 0x00540058},
+{8400000, DIF_BPF_COEFF89, 0xffcdff14},
+{8400000, DIF_BPF_COEFF1011, 0xff29007e},
+{8400000, DIF_BPF_COEFF1213, 0x01f6019e},
+{8400000, DIF_BPF_COEFF1415, 0xff01fc7c},
+{8400000, DIF_BPF_COEFF1617, 0xfd5101bf},
+{8400000, DIF_BPF_COEFF1819, 0x059203f6},
+{8400000, DIF_BPF_COEFF2021, 0xfd41f7fe},
+{8400000, DIF_BPF_COEFF2223, 0xfaa903f3},
+{8400000, DIF_BPF_COEFF2425, 0x0a9e06a9},
+{8400000, DIF_BPF_COEFF2627, 0xfabdf2e2},
+{8400000, DIF_BPF_COEFF2829, 0xf842068b},
+{8400000, DIF_BPF_COEFF3031, 0x0f320871},
+{8400000, DIF_BPF_COEFF3233, 0xf85eef6e},
+{8400000, DIF_BPF_COEFF3435, 0xf7560860},
+{8400000, DIF_BPF_COEFF36, 0x110d0000},
+/* END - DIF BPF register values from 84_quant.dat*/
+
+
+/*case 8500000:*/
+/* BEGIN - DIF BPF register values from 85_quant.dat*/
+{8500000, DIF_BPF_COEFF01, 0x00000003},
+{8500000, DIF_BPF_COEFF23, 0x0002fff2},
+{8500000, DIF_BPF_COEFF45, 0xffe1fff9},
+{8500000, DIF_BPF_COEFF67, 0x00460073},
+{8500000, DIF_BPF_COEFF89, 0x000bff34},
+{8500000, DIF_BPF_COEFF1011, 0xfee90000},
+{8500000, DIF_BPF_COEFF1213, 0x01c10215},
+{8500000, DIF_BPF_COEFF1415, 0xffd0fcc5},
+{8500000, DIF_BPF_COEFF1617, 0xfc99009d},
+{8500000, DIF_BPF_COEFF1819, 0x053d04f1},
+{8500000, DIF_BPF_COEFF2021, 0xfea5f853},
+{8500000, DIF_BPF_COEFF2223, 0xf97d0270},
+{8500000, DIF_BPF_COEFF2425, 0x0a5607e4},
+{8500000, DIF_BPF_COEFF2627, 0xfc2ef314},
+{8500000, DIF_BPF_COEFF2829, 0xf723055f},
+{8500000, DIF_BPF_COEFF3031, 0x0f180943},
+{8500000, DIF_BPF_COEFF3233, 0xf919ef75},
+{8500000, DIF_BPF_COEFF3435, 0xf6fa0830},
+{8500000, DIF_BPF_COEFF36, 0x110d0000},
+/* END - DIF BPF register values from 85_quant.dat*/
+
+
+/*case 8600000:*/
+/* BEGIN - DIF BPF register values from 86_quant.dat*/
+{8600000, DIF_BPF_COEFF01, 0x00000003},
+{8600000, DIF_BPF_COEFF23, 0x0005fff8},
+{8600000, DIF_BPF_COEFF45, 0xffdeffe4},
+{8600000, DIF_BPF_COEFF67, 0x002f007f},
+{8600000, DIF_BPF_COEFF89, 0x0048ff6b},
+{8600000, DIF_BPF_COEFF1011, 0xfec7ff82},
+{8600000, DIF_BPF_COEFF1213, 0x0163025f},
+{8600000, DIF_BPF_COEFF1415, 0x00a2fd47},
+{8600000, DIF_BPF_COEFF1617, 0xfc17ff73},
+{8600000, DIF_BPF_COEFF1819, 0x04a405b2},
+{8600000, DIF_BPF_COEFF2021, 0x0017f8ed},
+{8600000, DIF_BPF_COEFF2223, 0xf88500dc},
+{8600000, DIF_BPF_COEFF2425, 0x09d208f9},
+{8600000, DIF_BPF_COEFF2627, 0xfdaff370},
+{8600000, DIF_BPF_COEFF2829, 0xf61c0429},
+{8600000, DIF_BPF_COEFF3031, 0x0ee80a0b},
+{8600000, DIF_BPF_COEFF3233, 0xf9d8ef82},
+{8600000, DIF_BPF_COEFF3435, 0xf6a00800},
+{8600000, DIF_BPF_COEFF36, 0x110d0000},
+/* END - DIF BPF register values from 86_quant.dat*/
+
+
+/*case 8700000:*/
+/* BEGIN - DIF BPF register values from 87_quant.dat*/
+{8700000, DIF_BPF_COEFF01, 0x00000003},
+{8700000, DIF_BPF_COEFF23, 0x0007ffff},
+{8700000, DIF_BPF_COEFF45, 0xffe1ffd4},
+{8700000, DIF_BPF_COEFF67, 0x0010007a},
+{8700000, DIF_BPF_COEFF89, 0x007cffb2},
+{8700000, DIF_BPF_COEFF1011, 0xfec6ff10},
+{8700000, DIF_BPF_COEFF1213, 0x00e60277},
+{8700000, DIF_BPF_COEFF1415, 0x0168fdf9},
+{8700000, DIF_BPF_COEFF1617, 0xfbd3fe50},
+{8700000, DIF_BPF_COEFF1819, 0x03ce0631},
+{8700000, DIF_BPF_COEFF2021, 0x0188f9c8},
+{8700000, DIF_BPF_COEFF2223, 0xf7c7ff43},
+{8700000, DIF_BPF_COEFF2425, 0x091509e3},
+{8700000, DIF_BPF_COEFF2627, 0xff39f3f6},
+{8700000, DIF_BPF_COEFF2829, 0xf52d02ea},
+{8700000, DIF_BPF_COEFF3031, 0x0ea30ac9},
+{8700000, DIF_BPF_COEFF3233, 0xfa9bef95},
+{8700000, DIF_BPF_COEFF3435, 0xf64607d0},
+{8700000, DIF_BPF_COEFF36, 0x110d0000},
+/* END - DIF BPF register values from 87_quant.dat*/
+
+
+/*case 8800000:*/
+/* BEGIN - DIF BPF register values from 88_quant.dat*/
+{8800000, DIF_BPF_COEFF01, 0x00000002},
+{8800000, DIF_BPF_COEFF23, 0x00090007},
+{8800000, DIF_BPF_COEFF45, 0xffe9ffca},
+{8800000, DIF_BPF_COEFF67, 0xfff00065},
+{8800000, DIF_BPF_COEFF89, 0x00a10003},
+{8800000, DIF_BPF_COEFF1011, 0xfee6feb6},
+{8800000, DIF_BPF_COEFF1213, 0x0053025b},
+{8800000, DIF_BPF_COEFF1415, 0x0213fed0},
+{8800000, DIF_BPF_COEFF1617, 0xfbd3fd46},
+{8800000, DIF_BPF_COEFF1819, 0x02c70668},
+{8800000, DIF_BPF_COEFF2021, 0x02eafadb},
+{8800000, DIF_BPF_COEFF2223, 0xf74bfdae},
+{8800000, DIF_BPF_COEFF2425, 0x08230a9c},
+{8800000, DIF_BPF_COEFF2627, 0x00c7f4a3},
+{8800000, DIF_BPF_COEFF2829, 0xf45b01a6},
+{8800000, DIF_BPF_COEFF3031, 0x0e480b7c},
+{8800000, DIF_BPF_COEFF3233, 0xfb61efae},
+{8800000, DIF_BPF_COEFF3435, 0xf5ef079f},
+{8800000, DIF_BPF_COEFF36, 0x110d0000},
+/* END - DIF BPF register values from 88_quant.dat*/
+
+
+/*case 8900000:*/
+/* BEGIN - DIF BPF register values from 89_quant.dat*/
+{8900000, DIF_BPF_COEFF01, 0xffff0000},
+{8900000, DIF_BPF_COEFF23, 0x0008000d},
+{8900000, DIF_BPF_COEFF45, 0xfff5ffc8},
+{8900000, DIF_BPF_COEFF67, 0xffd10043},
+{8900000, DIF_BPF_COEFF89, 0x00b20053},
+{8900000, DIF_BPF_COEFF1011, 0xff24fe7c},
+{8900000, DIF_BPF_COEFF1213, 0xffb9020c},
+{8900000, DIF_BPF_COEFF1415, 0x0295ffbb},
+{8900000, DIF_BPF_COEFF1617, 0xfc17fc64},
+{8900000, DIF_BPF_COEFF1819, 0x019b0654},
+{8900000, DIF_BPF_COEFF2021, 0x042dfc1c},
+{8900000, DIF_BPF_COEFF2223, 0xf714fc2a},
+{8900000, DIF_BPF_COEFF2425, 0x07020b21},
+{8900000, DIF_BPF_COEFF2627, 0x0251f575},
+{8900000, DIF_BPF_COEFF2829, 0xf3a7005e},
+{8900000, DIF_BPF_COEFF3031, 0x0dd80c24},
+{8900000, DIF_BPF_COEFF3233, 0xfc2aefcd},
+{8900000, DIF_BPF_COEFF3435, 0xf599076e},
+{8900000, DIF_BPF_COEFF36, 0x110d0000},
+/* END - DIF BPF register values from 89_quant.dat*/
+
+
+/*case 9000000:*/
+/* BEGIN - DIF BPF register values from 90_quant.dat*/
+{9000000, DIF_BPF_COEFF01, 0xffffffff},
+{9000000, DIF_BPF_COEFF23, 0x00060011},
+{9000000, DIF_BPF_COEFF45, 0x0002ffcf},
+{9000000, DIF_BPF_COEFF67, 0xffba0018},
+{9000000, DIF_BPF_COEFF89, 0x00ad009a},
+{9000000, DIF_BPF_COEFF1011, 0xff79fe68},
+{9000000, DIF_BPF_COEFF1213, 0xff260192},
+{9000000, DIF_BPF_COEFF1415, 0x02e500ab},
+{9000000, DIF_BPF_COEFF1617, 0xfc99fbb6},
+{9000000, DIF_BPF_COEFF1819, 0x005b05f7},
+{9000000, DIF_BPF_COEFF2021, 0x0545fd81},
+{9000000, DIF_BPF_COEFF2223, 0xf723fabf},
+{9000000, DIF_BPF_COEFF2425, 0x05b80b70},
+{9000000, DIF_BPF_COEFF2627, 0x03d2f669},
+{9000000, DIF_BPF_COEFF2829, 0xf313ff15},
+{9000000, DIF_BPF_COEFF3031, 0x0d550cbf},
+{9000000, DIF_BPF_COEFF3233, 0xfcf6eff2},
+{9000000, DIF_BPF_COEFF3435, 0xf544073d},
+{9000000, DIF_BPF_COEFF36, 0x110d0000},
+/* END - DIF BPF register values from 90_quant.dat*/
+
+
+/*case 9100000:*/
+/* BEGIN - DIF BPF register values from 91_quant.dat*/
+{9100000, DIF_BPF_COEFF01, 0xfffffffe},
+{9100000, DIF_BPF_COEFF23, 0x00030012},
+{9100000, DIF_BPF_COEFF45, 0x000fffdd},
+{9100000, DIF_BPF_COEFF67, 0xffacffea},
+{9100000, DIF_BPF_COEFF89, 0x009300cf},
+{9100000, DIF_BPF_COEFF1011, 0xffdcfe7c},
+{9100000, DIF_BPF_COEFF1213, 0xfea600f7},
+{9100000, DIF_BPF_COEFF1415, 0x02fd0190},
+{9100000, DIF_BPF_COEFF1617, 0xfd51fb46},
+{9100000, DIF_BPF_COEFF1819, 0xff150554},
+{9100000, DIF_BPF_COEFF2021, 0x0627fefd},
+{9100000, DIF_BPF_COEFF2223, 0xf778f978},
+{9100000, DIF_BPF_COEFF2425, 0x044d0b87},
+{9100000, DIF_BPF_COEFF2627, 0x0543f77d},
+{9100000, DIF_BPF_COEFF2829, 0xf2a0fdcf},
+{9100000, DIF_BPF_COEFF3031, 0x0cbe0d4e},
+{9100000, DIF_BPF_COEFF3233, 0xfdc4f01d},
+{9100000, DIF_BPF_COEFF3435, 0xf4f2070b},
+{9100000, DIF_BPF_COEFF36, 0x110d0000},
+/* END - DIF BPF register values from 91_quant.dat*/
+
+
+/*case 9200000:*/
+/* BEGIN - DIF BPF register values from 92_quant.dat*/
+{9200000, DIF_BPF_COEFF01, 0x0000fffd},
+{9200000, DIF_BPF_COEFF23, 0x00000010},
+{9200000, DIF_BPF_COEFF45, 0x001afff0},
+{9200000, DIF_BPF_COEFF67, 0xffaaffbf},
+{9200000, DIF_BPF_COEFF89, 0x006700ed},
+{9200000, DIF_BPF_COEFF1011, 0x0043feb6},
+{9200000, DIF_BPF_COEFF1213, 0xfe460047},
+{9200000, DIF_BPF_COEFF1415, 0x02db0258},
+{9200000, DIF_BPF_COEFF1617, 0xfe35fb1b},
+{9200000, DIF_BPF_COEFF1819, 0xfddc0473},
+{9200000, DIF_BPF_COEFF2021, 0x06c90082},
+{9200000, DIF_BPF_COEFF2223, 0xf811f85e},
+{9200000, DIF_BPF_COEFF2425, 0x02c90b66},
+{9200000, DIF_BPF_COEFF2627, 0x069ff8ad},
+{9200000, DIF_BPF_COEFF2829, 0xf250fc8d},
+{9200000, DIF_BPF_COEFF3031, 0x0c140dcf},
+{9200000, DIF_BPF_COEFF3233, 0xfe93f04d},
+{9200000, DIF_BPF_COEFF3435, 0xf4a106d9},
+{9200000, DIF_BPF_COEFF36, 0x110d0000},
+/* END - DIF BPF register values from 92_quant.dat*/
+
+
+/*case 9300000:*/
+/* BEGIN - DIF BPF register values from 93_quant.dat*/
+{9300000, DIF_BPF_COEFF01, 0x0000fffd},
+{9300000, DIF_BPF_COEFF23, 0xfffc000c},
+{9300000, DIF_BPF_COEFF45, 0x00200006},
+{9300000, DIF_BPF_COEFF67, 0xffb4ff9c},
+{9300000, DIF_BPF_COEFF89, 0x002f00ef},
+{9300000, DIF_BPF_COEFF1011, 0x00a4ff10},
+{9300000, DIF_BPF_COEFF1213, 0xfe0dff92},
+{9300000, DIF_BPF_COEFF1415, 0x028102f7},
+{9300000, DIF_BPF_COEFF1617, 0xff36fb37},
+{9300000, DIF_BPF_COEFF1819, 0xfcbf035e},
+{9300000, DIF_BPF_COEFF2021, 0x07260202},
+{9300000, DIF_BPF_COEFF2223, 0xf8e8f778},
+{9300000, DIF_BPF_COEFF2425, 0x01340b0d},
+{9300000, DIF_BPF_COEFF2627, 0x07e1f9f4},
+{9300000, DIF_BPF_COEFF2829, 0xf223fb51},
+{9300000, DIF_BPF_COEFF3031, 0x0b590e42},
+{9300000, DIF_BPF_COEFF3233, 0xff64f083},
+{9300000, DIF_BPF_COEFF3435, 0xf45206a7},
+{9300000, DIF_BPF_COEFF36, 0x110d0000},
+/* END - DIF BPF register values from 93_quant.dat*/
+
+
+/*case 9400000:*/
+/* BEGIN - DIF BPF register values from 94_quant.dat*/
+{9400000, DIF_BPF_COEFF01, 0x0000fffd},
+{9400000, DIF_BPF_COEFF23, 0xfff90005},
+{9400000, DIF_BPF_COEFF45, 0x0022001a},
+{9400000, DIF_BPF_COEFF67, 0xffc9ff86},
+{9400000, DIF_BPF_COEFF89, 0xfff000d7},
+{9400000, DIF_BPF_COEFF1011, 0x00f2ff82},
+{9400000, DIF_BPF_COEFF1213, 0xfe01fee5},
+{9400000, DIF_BPF_COEFF1415, 0x01f60362},
+{9400000, DIF_BPF_COEFF1617, 0x0044fb99},
+{9400000, DIF_BPF_COEFF1819, 0xfbcc0222},
+{9400000, DIF_BPF_COEFF2021, 0x07380370},
+{9400000, DIF_BPF_COEFF2223, 0xf9f7f6cc},
+{9400000, DIF_BPF_COEFF2425, 0xff990a7e},
+{9400000, DIF_BPF_COEFF2627, 0x0902fb50},
+{9400000, DIF_BPF_COEFF2829, 0xf21afa1f},
+{9400000, DIF_BPF_COEFF3031, 0x0a8d0ea6},
+{9400000, DIF_BPF_COEFF3233, 0x0034f0bf},
+{9400000, DIF_BPF_COEFF3435, 0xf4050675},
+{9400000, DIF_BPF_COEFF36, 0x110d0000},
+/* END - DIF BPF register values from 94_quant.dat*/
+
+
+/*case 9500000:*/
+/* BEGIN - DIF BPF register values from 95_quant.dat*/
+{9500000, DIF_BPF_COEFF01, 0x0000fffe},
+{9500000, DIF_BPF_COEFF23, 0xfff8fffe},
+{9500000, DIF_BPF_COEFF45, 0x001e002b},
+{9500000, DIF_BPF_COEFF67, 0xffe5ff81},
+{9500000, DIF_BPF_COEFF89, 0xffb400a5},
+{9500000, DIF_BPF_COEFF1011, 0x01280000},
+{9500000, DIF_BPF_COEFF1213, 0xfe24fe50},
+{9500000, DIF_BPF_COEFF1415, 0x01460390},
+{9500000, DIF_BPF_COEFF1617, 0x014dfc3a},
+{9500000, DIF_BPF_COEFF1819, 0xfb1000ce},
+{9500000, DIF_BPF_COEFF2021, 0x070104bf},
+{9500000, DIF_BPF_COEFF2223, 0xfb37f65f},
+{9500000, DIF_BPF_COEFF2425, 0xfe0009bc},
+{9500000, DIF_BPF_COEFF2627, 0x0a00fcbb},
+{9500000, DIF_BPF_COEFF2829, 0xf235f8f8},
+{9500000, DIF_BPF_COEFF3031, 0x09b20efc},
+{9500000, DIF_BPF_COEFF3233, 0x0105f101},
+{9500000, DIF_BPF_COEFF3435, 0xf3ba0642},
+{9500000, DIF_BPF_COEFF36, 0x110d0000},
+/* END - DIF BPF register values from 95_quant.dat*/
+
+
+/*case 9600000:*/
+/* BEGIN - DIF BPF register values from 96_quant.dat*/
+{9600000, DIF_BPF_COEFF01, 0x0001ffff},
+{9600000, DIF_BPF_COEFF23, 0xfff8fff7},
+{9600000, DIF_BPF_COEFF45, 0x00150036},
+{9600000, DIF_BPF_COEFF67, 0x0005ff8c},
+{9600000, DIF_BPF_COEFF89, 0xff810061},
+{9600000, DIF_BPF_COEFF1011, 0x013d007e},
+{9600000, DIF_BPF_COEFF1213, 0xfe71fddf},
+{9600000, DIF_BPF_COEFF1415, 0x007c0380},
+{9600000, DIF_BPF_COEFF1617, 0x0241fd13},
+{9600000, DIF_BPF_COEFF1819, 0xfa94ff70},
+{9600000, DIF_BPF_COEFF2021, 0x068005e2},
+{9600000, DIF_BPF_COEFF2223, 0xfc9bf633},
+{9600000, DIF_BPF_COEFF2425, 0xfc7308ca},
+{9600000, DIF_BPF_COEFF2627, 0x0ad5fe30},
+{9600000, DIF_BPF_COEFF2829, 0xf274f7e0},
+{9600000, DIF_BPF_COEFF3031, 0x08c90f43},
+{9600000, DIF_BPF_COEFF3233, 0x01d4f147},
+{9600000, DIF_BPF_COEFF3435, 0xf371060f},
+{9600000, DIF_BPF_COEFF36, 0x110d0000},
+/* END - DIF BPF register values from 96_quant.dat*/
+
+
+/*case 9700000:*/
+/* BEGIN - DIF BPF register values from 97_quant.dat*/
+{9700000, DIF_BPF_COEFF01, 0x00010001},
+{9700000, DIF_BPF_COEFF23, 0xfff9fff1},
+{9700000, DIF_BPF_COEFF45, 0x00090038},
+{9700000, DIF_BPF_COEFF67, 0x0025ffa7},
+{9700000, DIF_BPF_COEFF89, 0xff5e0012},
+{9700000, DIF_BPF_COEFF1011, 0x013200f0},
+{9700000, DIF_BPF_COEFF1213, 0xfee3fd9b},
+{9700000, DIF_BPF_COEFF1415, 0xffaa0331},
+{9700000, DIF_BPF_COEFF1617, 0x0311fe15},
+{9700000, DIF_BPF_COEFF1819, 0xfa60fe18},
+{9700000, DIF_BPF_COEFF2021, 0x05bd06d1},
+{9700000, DIF_BPF_COEFF2223, 0xfe1bf64a},
+{9700000, DIF_BPF_COEFF2425, 0xfafa07ae},
+{9700000, DIF_BPF_COEFF2627, 0x0b7effab},
+{9700000, DIF_BPF_COEFF2829, 0xf2d5f6d7},
+{9700000, DIF_BPF_COEFF3031, 0x07d30f7a},
+{9700000, DIF_BPF_COEFF3233, 0x02a3f194},
+{9700000, DIF_BPF_COEFF3435, 0xf32905dc},
+{9700000, DIF_BPF_COEFF36, 0x110d0000},
+/* END - DIF BPF register values from 97_quant.dat*/
+
+
+/*case 9800000:*/
+/* BEGIN - DIF BPF register values from 98_quant.dat*/
+{9800000, DIF_BPF_COEFF01, 0x00010002},
+{9800000, DIF_BPF_COEFF23, 0xfffcffee},
+{9800000, DIF_BPF_COEFF45, 0xfffb0032},
+{9800000, DIF_BPF_COEFF67, 0x003fffcd},
+{9800000, DIF_BPF_COEFF89, 0xff4effc1},
+{9800000, DIF_BPF_COEFF1011, 0x0106014a},
+{9800000, DIF_BPF_COEFF1213, 0xff6efd8a},
+{9800000, DIF_BPF_COEFF1415, 0xfedd02aa},
+{9800000, DIF_BPF_COEFF1617, 0x03b0ff34},
+{9800000, DIF_BPF_COEFF1819, 0xfa74fcd7},
+{9800000, DIF_BPF_COEFF2021, 0x04bf0781},
+{9800000, DIF_BPF_COEFF2223, 0xffaaf6a3},
+{9800000, DIF_BPF_COEFF2425, 0xf99e066b},
+{9800000, DIF_BPF_COEFF2627, 0x0bf90128},
+{9800000, DIF_BPF_COEFF2829, 0xf359f5e1},
+{9800000, DIF_BPF_COEFF3031, 0x06d20fa2},
+{9800000, DIF_BPF_COEFF3233, 0x0370f1e5},
+{9800000, DIF_BPF_COEFF3435, 0xf2e405a8},
+{9800000, DIF_BPF_COEFF36, 0x110d0000},
+/* END - DIF BPF register values from 98_quant.dat*/
+
+
+/*case 9900000:*/
+/* BEGIN - DIF BPF register values from 99_quant.dat*/
+{9900000, DIF_BPF_COEFF01, 0x00000003},
+{9900000, DIF_BPF_COEFF23, 0xffffffee},
+{9900000, DIF_BPF_COEFF45, 0xffef0024},
+{9900000, DIF_BPF_COEFF67, 0x0051fffa},
+{9900000, DIF_BPF_COEFF89, 0xff54ff77},
+{9900000, DIF_BPF_COEFF1011, 0x00be0184},
+{9900000, DIF_BPF_COEFF1213, 0x0006fdad},
+{9900000, DIF_BPF_COEFF1415, 0xfe2701f3},
+{9900000, DIF_BPF_COEFF1617, 0x0413005e},
+{9900000, DIF_BPF_COEFF1819, 0xfad1fbba},
+{9900000, DIF_BPF_COEFF2021, 0x039007ee},
+{9900000, DIF_BPF_COEFF2223, 0x013bf73d},
+{9900000, DIF_BPF_COEFF2425, 0xf868050a},
+{9900000, DIF_BPF_COEFF2627, 0x0c4302a1},
+{9900000, DIF_BPF_COEFF2829, 0xf3fdf4fe},
+{9900000, DIF_BPF_COEFF3031, 0x05c70fba},
+{9900000, DIF_BPF_COEFF3233, 0x043bf23c},
+{9900000, DIF_BPF_COEFF3435, 0xf2a10575},
+{9900000, DIF_BPF_COEFF36, 0x110d0000},
+/* END - DIF BPF register values from 99_quant.dat*/
+
+
+/*case 10000000:*/
+/* BEGIN - DIF BPF register values from 100_quant.dat*/
+{10000000, DIF_BPF_COEFF01, 0x00000003},
+{10000000, DIF_BPF_COEFF23, 0x0003fff1},
+{10000000, DIF_BPF_COEFF45, 0xffe50011},
+{10000000, DIF_BPF_COEFF67, 0x00570027},
+{10000000, DIF_BPF_COEFF89, 0xff70ff3c},
+{10000000, DIF_BPF_COEFF1011, 0x00620198},
+{10000000, DIF_BPF_COEFF1213, 0x009efe01},
+{10000000, DIF_BPF_COEFF1415, 0xfd95011a},
+{10000000, DIF_BPF_COEFF1617, 0x04350183},
+{10000000, DIF_BPF_COEFF1819, 0xfb71fad0},
+{10000000, DIF_BPF_COEFF2021, 0x023c0812},
+{10000000, DIF_BPF_COEFF2223, 0x02c3f811},
+{10000000, DIF_BPF_COEFF2425, 0xf75e0390},
+{10000000, DIF_BPF_COEFF2627, 0x0c5c0411},
+{10000000, DIF_BPF_COEFF2829, 0xf4c1f432},
+{10000000, DIF_BPF_COEFF3031, 0x04b30fc1},
+{10000000, DIF_BPF_COEFF3233, 0x0503f297},
+{10000000, DIF_BPF_COEFF3435, 0xf2610541},
+{10000000, DIF_BPF_COEFF36, 0x110d0000},
+/* END - DIF BPF register values from 100_quant.dat*/
+
+
+/*case 10100000:*/
+/* BEGIN - DIF BPF register values from 101_quant.dat*/
+{10100000, DIF_BPF_COEFF01, 0x00000003},
+{10100000, DIF_BPF_COEFF23, 0x0006fff7},
+{10100000, DIF_BPF_COEFF45, 0xffdffffc},
+{10100000, DIF_BPF_COEFF67, 0x00510050},
+{10100000, DIF_BPF_COEFF89, 0xff9dff18},
+{10100000, DIF_BPF_COEFF1011, 0xfffc0184},
+{10100000, DIF_BPF_COEFF1213, 0x0128fe80},
+{10100000, DIF_BPF_COEFF1415, 0xfd32002e},
+{10100000, DIF_BPF_COEFF1617, 0x04130292},
+{10100000, DIF_BPF_COEFF1819, 0xfc4dfa21},
+{10100000, DIF_BPF_COEFF2021, 0x00d107ee},
+{10100000, DIF_BPF_COEFF2223, 0x0435f91c},
+{10100000, DIF_BPF_COEFF2425, 0xf6850205},
+{10100000, DIF_BPF_COEFF2627, 0x0c430573},
+{10100000, DIF_BPF_COEFF2829, 0xf5a1f37d},
+{10100000, DIF_BPF_COEFF3031, 0x03990fba},
+{10100000, DIF_BPF_COEFF3233, 0x05c7f2f8},
+{10100000, DIF_BPF_COEFF3435, 0xf222050d},
+{10100000, DIF_BPF_COEFF36, 0x110d0000},
+/* END - DIF BPF register values from 101_quant.dat*/
+
+
+/*case 10200000:*/
+/* BEGIN - DIF BPF register values from 102_quant.dat*/
+{10200000, DIF_BPF_COEFF01, 0x00000002},
+{10200000, DIF_BPF_COEFF23, 0x0008fffe},
+{10200000, DIF_BPF_COEFF45, 0xffdfffe7},
+{10200000, DIF_BPF_COEFF67, 0x003f006e},
+{10200000, DIF_BPF_COEFF89, 0xffd6ff0f},
+{10200000, DIF_BPF_COEFF1011, 0xff96014a},
+{10200000, DIF_BPF_COEFF1213, 0x0197ff1f},
+{10200000, DIF_BPF_COEFF1415, 0xfd05ff3e},
+{10200000, DIF_BPF_COEFF1617, 0x03b0037c},
+{10200000, DIF_BPF_COEFF1819, 0xfd59f9b7},
+{10200000, DIF_BPF_COEFF2021, 0xff5d0781},
+{10200000, DIF_BPF_COEFF2223, 0x0585fa56},
+{10200000, DIF_BPF_COEFF2425, 0xf5e4006f},
+{10200000, DIF_BPF_COEFF2627, 0x0bf906c4},
+{10200000, DIF_BPF_COEFF2829, 0xf69df2e0},
+{10200000, DIF_BPF_COEFF3031, 0x02790fa2},
+{10200000, DIF_BPF_COEFF3233, 0x0688f35d},
+{10200000, DIF_BPF_COEFF3435, 0xf1e604d8},
+{10200000, DIF_BPF_COEFF36, 0x110d0000},
+/* END - DIF BPF register values from 102_quant.dat*/
+
+
+/*case 10300000:*/
+/* BEGIN - DIF BPF register values from 103_quant.dat*/
+{10300000, DIF_BPF_COEFF01, 0xffff0001},
+{10300000, DIF_BPF_COEFF23, 0x00090005},
+{10300000, DIF_BPF_COEFF45, 0xffe4ffd6},
+{10300000, DIF_BPF_COEFF67, 0x0025007e},
+{10300000, DIF_BPF_COEFF89, 0x0014ff20},
+{10300000, DIF_BPF_COEFF1011, 0xff3c00f0},
+{10300000, DIF_BPF_COEFF1213, 0x01e1ffd0},
+{10300000, DIF_BPF_COEFF1415, 0xfd12fe5c},
+{10300000, DIF_BPF_COEFF1617, 0x03110433},
+{10300000, DIF_BPF_COEFF1819, 0xfe88f996},
+{10300000, DIF_BPF_COEFF2021, 0xfdf106d1},
+{10300000, DIF_BPF_COEFF2223, 0x06aafbb7},
+{10300000, DIF_BPF_COEFF2425, 0xf57efed8},
+{10300000, DIF_BPF_COEFF2627, 0x0b7e07ff},
+{10300000, DIF_BPF_COEFF2829, 0xf7b0f25e},
+{10300000, DIF_BPF_COEFF3031, 0x01560f7a},
+{10300000, DIF_BPF_COEFF3233, 0x0745f3c7},
+{10300000, DIF_BPF_COEFF3435, 0xf1ac04a4},
+{10300000, DIF_BPF_COEFF36, 0x110d0000},
+/* END - DIF BPF register values from 103_quant.dat*/
+
+
+/*case 10400000:*/
+/* BEGIN - DIF BPF register values from 104_quant.dat*/
+{10400000, DIF_BPF_COEFF01, 0xffffffff},
+{10400000, DIF_BPF_COEFF23, 0x0008000c},
+{10400000, DIF_BPF_COEFF45, 0xffedffcb},
+{10400000, DIF_BPF_COEFF67, 0x0005007d},
+{10400000, DIF_BPF_COEFF89, 0x0050ff4c},
+{10400000, DIF_BPF_COEFF1011, 0xfef6007e},
+{10400000, DIF_BPF_COEFF1213, 0x01ff0086},
+{10400000, DIF_BPF_COEFF1415, 0xfd58fd97},
+{10400000, DIF_BPF_COEFF1617, 0x024104ad},
+{10400000, DIF_BPF_COEFF1819, 0xffcaf9c0},
+{10400000, DIF_BPF_COEFF2021, 0xfc9905e2},
+{10400000, DIF_BPF_COEFF2223, 0x079afd35},
+{10400000, DIF_BPF_COEFF2425, 0xf555fd46},
+{10400000, DIF_BPF_COEFF2627, 0x0ad50920},
+{10400000, DIF_BPF_COEFF2829, 0xf8d9f1f6},
+{10400000, DIF_BPF_COEFF3031, 0x00310f43},
+{10400000, DIF_BPF_COEFF3233, 0x07fdf435},
+{10400000, DIF_BPF_COEFF3435, 0xf174046f},
+{10400000, DIF_BPF_COEFF36, 0x110d0000},
+/* END - DIF BPF register values from 104_quant.dat*/
+
+
+/*case 10500000:*/
+/* BEGIN - DIF BPF register values from 105_quant.dat*/
+{10500000, DIF_BPF_COEFF01, 0xfffffffe},
+{10500000, DIF_BPF_COEFF23, 0x00050011},
+{10500000, DIF_BPF_COEFF45, 0xfffaffc8},
+{10500000, DIF_BPF_COEFF67, 0xffe5006b},
+{10500000, DIF_BPF_COEFF89, 0x0082ff8c},
+{10500000, DIF_BPF_COEFF1011, 0xfecc0000},
+{10500000, DIF_BPF_COEFF1213, 0x01f00130},
+{10500000, DIF_BPF_COEFF1415, 0xfdd2fcfc},
+{10500000, DIF_BPF_COEFF1617, 0x014d04e3},
+{10500000, DIF_BPF_COEFF1819, 0x010efa32},
+{10500000, DIF_BPF_COEFF2021, 0xfb6404bf},
+{10500000, DIF_BPF_COEFF2223, 0x084efec5},
+{10500000, DIF_BPF_COEFF2425, 0xf569fbc2},
+{10500000, DIF_BPF_COEFF2627, 0x0a000a23},
+{10500000, DIF_BPF_COEFF2829, 0xfa15f1ab},
+{10500000, DIF_BPF_COEFF3031, 0xff0b0efc},
+{10500000, DIF_BPF_COEFF3233, 0x08b0f4a7},
+{10500000, DIF_BPF_COEFF3435, 0xf13f043a},
+{10500000, DIF_BPF_COEFF36, 0x110d0000},
+/* END - DIF BPF register values from 105_quant.dat*/
+
+
+/*case 10600000:*/
+/* BEGIN - DIF BPF register values from 106_quant.dat*/
+{10600000, DIF_BPF_COEFF01, 0x0000fffd},
+{10600000, DIF_BPF_COEFF23, 0x00020012},
+{10600000, DIF_BPF_COEFF45, 0x0007ffcd},
+{10600000, DIF_BPF_COEFF67, 0xffc9004c},
+{10600000, DIF_BPF_COEFF89, 0x00a4ffd9},
+{10600000, DIF_BPF_COEFF1011, 0xfec3ff82},
+{10600000, DIF_BPF_COEFF1213, 0x01b401c1},
+{10600000, DIF_BPF_COEFF1415, 0xfe76fc97},
+{10600000, DIF_BPF_COEFF1617, 0x004404d2},
+{10600000, DIF_BPF_COEFF1819, 0x0245fae8},
+{10600000, DIF_BPF_COEFF2021, 0xfa5f0370},
+{10600000, DIF_BPF_COEFF2223, 0x08c1005f},
+{10600000, DIF_BPF_COEFF2425, 0xf5bcfa52},
+{10600000, DIF_BPF_COEFF2627, 0x09020b04},
+{10600000, DIF_BPF_COEFF2829, 0xfb60f17b},
+{10600000, DIF_BPF_COEFF3031, 0xfde70ea6},
+{10600000, DIF_BPF_COEFF3233, 0x095df51e},
+{10600000, DIF_BPF_COEFF3435, 0xf10c0405},
+{10600000, DIF_BPF_COEFF36, 0x110d0000},
+/* END - DIF BPF register values from 106_quant.dat*/
+
+
+/*case 10700000:*/
+/* BEGIN - DIF BPF register values from 107_quant.dat*/
+{10700000, DIF_BPF_COEFF01, 0x0000fffd},
+{10700000, DIF_BPF_COEFF23, 0xffff0011},
+{10700000, DIF_BPF_COEFF45, 0x0014ffdb},
+{10700000, DIF_BPF_COEFF67, 0xffb40023},
+{10700000, DIF_BPF_COEFF89, 0x00b2002a},
+{10700000, DIF_BPF_COEFF1011, 0xfedbff10},
+{10700000, DIF_BPF_COEFF1213, 0x0150022d},
+{10700000, DIF_BPF_COEFF1415, 0xff38fc6f},
+{10700000, DIF_BPF_COEFF1617, 0xff36047b},
+{10700000, DIF_BPF_COEFF1819, 0x035efbda},
+{10700000, DIF_BPF_COEFF2021, 0xf9940202},
+{10700000, DIF_BPF_COEFF2223, 0x08ee01f5},
+{10700000, DIF_BPF_COEFF2425, 0xf649f8fe},
+{10700000, DIF_BPF_COEFF2627, 0x07e10bc2},
+{10700000, DIF_BPF_COEFF2829, 0xfcb6f169},
+{10700000, DIF_BPF_COEFF3031, 0xfcc60e42},
+{10700000, DIF_BPF_COEFF3233, 0x0a04f599},
+{10700000, DIF_BPF_COEFF3435, 0xf0db03d0},
+{10700000, DIF_BPF_COEFF36, 0x110d0000},
+/* END - DIF BPF register values from 107_quant.dat*/
+
+
+/*case 10800000:*/
+/* BEGIN - DIF BPF register values from 108_quant.dat*/
+{10800000, DIF_BPF_COEFF01, 0x0000fffd},
+{10800000, DIF_BPF_COEFF23, 0xfffb000d},
+{10800000, DIF_BPF_COEFF45, 0x001dffed},
+{10800000, DIF_BPF_COEFF67, 0xffaafff5},
+{10800000, DIF_BPF_COEFF89, 0x00aa0077},
+{10800000, DIF_BPF_COEFF1011, 0xff13feb6},
+{10800000, DIF_BPF_COEFF1213, 0x00ce026b},
+{10800000, DIF_BPF_COEFF1415, 0x000afc85},
+{10800000, DIF_BPF_COEFF1617, 0xfe3503e3},
+{10800000, DIF_BPF_COEFF1819, 0x044cfcfb},
+{10800000, DIF_BPF_COEFF2021, 0xf90c0082},
+{10800000, DIF_BPF_COEFF2223, 0x08d5037f},
+{10800000, DIF_BPF_COEFF2425, 0xf710f7cc},
+{10800000, DIF_BPF_COEFF2627, 0x069f0c59},
+{10800000, DIF_BPF_COEFF2829, 0xfe16f173},
+{10800000, DIF_BPF_COEFF3031, 0xfbaa0dcf},
+{10800000, DIF_BPF_COEFF3233, 0x0aa5f617},
+{10800000, DIF_BPF_COEFF3435, 0xf0ad039b},
+{10800000, DIF_BPF_COEFF36, 0x110d0000},
+/* END - DIF BPF register values from 108_quant.dat*/
+
+
+/*case 10900000:*/
+/* BEGIN - DIF BPF register values from 109_quant.dat*/
+{10900000, DIF_BPF_COEFF01, 0x0000fffe},
+{10900000, DIF_BPF_COEFF23, 0xfff90006},
+{10900000, DIF_BPF_COEFF45, 0x00210003},
+{10900000, DIF_BPF_COEFF67, 0xffacffc8},
+{10900000, DIF_BPF_COEFF89, 0x008e00b6},
+{10900000, DIF_BPF_COEFF1011, 0xff63fe7c},
+{10900000, DIF_BPF_COEFF1213, 0x003a0275},
+{10900000, DIF_BPF_COEFF1415, 0x00dafcda},
+{10900000, DIF_BPF_COEFF1617, 0xfd510313},
+{10900000, DIF_BPF_COEFF1819, 0x0501fe40},
+{10900000, DIF_BPF_COEFF2021, 0xf8cbfefd},
+{10900000, DIF_BPF_COEFF2223, 0x087604f0},
+{10900000, DIF_BPF_COEFF2425, 0xf80af6c2},
+{10900000, DIF_BPF_COEFF2627, 0x05430cc8},
+{10900000, DIF_BPF_COEFF2829, 0xff7af19a},
+{10900000, DIF_BPF_COEFF3031, 0xfa940d4e},
+{10900000, DIF_BPF_COEFF3233, 0x0b3ff699},
+{10900000, DIF_BPF_COEFF3435, 0xf0810365},
+{10900000, DIF_BPF_COEFF36, 0x110d0000},
+/* END - DIF BPF register values from 109_quant.dat*/
+
+
+/*case 11000000:*/
+/* BEGIN - DIF BPF register values from 110_quant.dat*/
+{11000000, DIF_BPF_COEFF01, 0x0001ffff},
+{11000000, DIF_BPF_COEFF23, 0xfff8ffff},
+{11000000, DIF_BPF_COEFF45, 0x00210018},
+{11000000, DIF_BPF_COEFF67, 0xffbaffa3},
+{11000000, DIF_BPF_COEFF89, 0x006000e1},
+{11000000, DIF_BPF_COEFF1011, 0xffc4fe68},
+{11000000, DIF_BPF_COEFF1213, 0xffa0024b},
+{11000000, DIF_BPF_COEFF1415, 0x019afd66},
+{11000000, DIF_BPF_COEFF1617, 0xfc990216},
+{11000000, DIF_BPF_COEFF1819, 0x0575ff99},
+{11000000, DIF_BPF_COEFF2021, 0xf8d4fd81},
+{11000000, DIF_BPF_COEFF2223, 0x07d40640},
+{11000000, DIF_BPF_COEFF2425, 0xf932f5e6},
+{11000000, DIF_BPF_COEFF2627, 0x03d20d0d},
+{11000000, DIF_BPF_COEFF2829, 0x00dff1de},
+{11000000, DIF_BPF_COEFF3031, 0xf9860cbf},
+{11000000, DIF_BPF_COEFF3233, 0x0bd1f71e},
+{11000000, DIF_BPF_COEFF3435, 0xf058032f},
+{11000000, DIF_BPF_COEFF36, 0x110d0000},
+/* END - DIF BPF register values from 110_quant.dat*/
+
+
+/*case 11100000:*/
+/* BEGIN - DIF BPF register values from 111_quant.dat*/
+{11100000, DIF_BPF_COEFF01, 0x00010000},
+{11100000, DIF_BPF_COEFF23, 0xfff8fff8},
+{11100000, DIF_BPF_COEFF45, 0x001b0029},
+{11100000, DIF_BPF_COEFF67, 0xffd1ff8a},
+{11100000, DIF_BPF_COEFF89, 0x002600f2},
+{11100000, DIF_BPF_COEFF1011, 0x002cfe7c},
+{11100000, DIF_BPF_COEFF1213, 0xff0f01f0},
+{11100000, DIF_BPF_COEFF1415, 0x023bfe20},
+{11100000, DIF_BPF_COEFF1617, 0xfc1700fa},
+{11100000, DIF_BPF_COEFF1819, 0x05a200f7},
+{11100000, DIF_BPF_COEFF2021, 0xf927fc1c},
+{11100000, DIF_BPF_COEFF2223, 0x06f40765},
+{11100000, DIF_BPF_COEFF2425, 0xfa82f53b},
+{11100000, DIF_BPF_COEFF2627, 0x02510d27},
+{11100000, DIF_BPF_COEFF2829, 0x0243f23d},
+{11100000, DIF_BPF_COEFF3031, 0xf8810c24},
+{11100000, DIF_BPF_COEFF3233, 0x0c5cf7a7},
+{11100000, DIF_BPF_COEFF3435, 0xf03102fa},
+{11100000, DIF_BPF_COEFF36, 0x110d0000},
+/* END - DIF BPF register values from 111_quant.dat*/
+
+
+/*case 11200000:*/
+/* BEGIN - DIF BPF register values from 112_quant.dat*/
+{11200000, DIF_BPF_COEFF01, 0x00010002},
+{11200000, DIF_BPF_COEFF23, 0xfffafff2},
+{11200000, DIF_BPF_COEFF45, 0x00110035},
+{11200000, DIF_BPF_COEFF67, 0xfff0ff81},
+{11200000, DIF_BPF_COEFF89, 0xffe700e7},
+{11200000, DIF_BPF_COEFF1011, 0x008ffeb6},
+{11200000, DIF_BPF_COEFF1213, 0xfe94016d},
+{11200000, DIF_BPF_COEFF1415, 0x02b0fefb},
+{11200000, DIF_BPF_COEFF1617, 0xfbd3ffd1},
+{11200000, DIF_BPF_COEFF1819, 0x05850249},
+{11200000, DIF_BPF_COEFF2021, 0xf9c1fadb},
+{11200000, DIF_BPF_COEFF2223, 0x05de0858},
+{11200000, DIF_BPF_COEFF2425, 0xfbf2f4c4},
+{11200000, DIF_BPF_COEFF2627, 0x00c70d17},
+{11200000, DIF_BPF_COEFF2829, 0x03a0f2b8},
+{11200000, DIF_BPF_COEFF3031, 0xf7870b7c},
+{11200000, DIF_BPF_COEFF3233, 0x0cdff833},
+{11200000, DIF_BPF_COEFF3435, 0xf00d02c4},
+{11200000, DIF_BPF_COEFF36, 0x110d0000},
+/* END - DIF BPF register values from 112_quant.dat*/
+
+
+/*case 11300000:*/
+/* BEGIN - DIF BPF register values from 113_quant.dat*/
+{11300000, DIF_BPF_COEFF01, 0x00000003},
+{11300000, DIF_BPF_COEFF23, 0xfffdffee},
+{11300000, DIF_BPF_COEFF45, 0x00040038},
+{11300000, DIF_BPF_COEFF67, 0x0010ff88},
+{11300000, DIF_BPF_COEFF89, 0xffac00c2},
+{11300000, DIF_BPF_COEFF1011, 0x00e2ff10},
+{11300000, DIF_BPF_COEFF1213, 0xfe3900cb},
+{11300000, DIF_BPF_COEFF1415, 0x02f1ffe9},
+{11300000, DIF_BPF_COEFF1617, 0xfbd3feaa},
+{11300000, DIF_BPF_COEFF1819, 0x05210381},
+{11300000, DIF_BPF_COEFF2021, 0xfa9cf9c8},
+{11300000, DIF_BPF_COEFF2223, 0x04990912},
+{11300000, DIF_BPF_COEFF2425, 0xfd7af484},
+{11300000, DIF_BPF_COEFF2627, 0xff390cdb},
+{11300000, DIF_BPF_COEFF2829, 0x04f4f34d},
+{11300000, DIF_BPF_COEFF3031, 0xf69a0ac9},
+{11300000, DIF_BPF_COEFF3233, 0x0d5af8c1},
+{11300000, DIF_BPF_COEFF3435, 0xefec028e},
+{11300000, DIF_BPF_COEFF36, 0x110d0000},
+/* END - DIF BPF register values from 113_quant.dat*/
+
+
+/*case 11400000:*/
+/* BEGIN - DIF BPF register values from 114_quant.dat*/
+{11400000, DIF_BPF_COEFF01, 0x00000003},
+{11400000, DIF_BPF_COEFF23, 0x0000ffee},
+{11400000, DIF_BPF_COEFF45, 0xfff60033},
+{11400000, DIF_BPF_COEFF67, 0x002fff9f},
+{11400000, DIF_BPF_COEFF89, 0xff7b0087},
+{11400000, DIF_BPF_COEFF1011, 0x011eff82},
+{11400000, DIF_BPF_COEFF1213, 0xfe080018},
+{11400000, DIF_BPF_COEFF1415, 0x02f900d8},
+{11400000, DIF_BPF_COEFF1617, 0xfc17fd96},
+{11400000, DIF_BPF_COEFF1819, 0x04790490},
+{11400000, DIF_BPF_COEFF2021, 0xfbadf8ed},
+{11400000, DIF_BPF_COEFF2223, 0x032f098e},
+{11400000, DIF_BPF_COEFF2425, 0xff10f47d},
+{11400000, DIF_BPF_COEFF2627, 0xfdaf0c75},
+{11400000, DIF_BPF_COEFF2829, 0x063cf3fc},
+{11400000, DIF_BPF_COEFF3031, 0xf5ba0a0b},
+{11400000, DIF_BPF_COEFF3233, 0x0dccf952},
+{11400000, DIF_BPF_COEFF3435, 0xefcd0258},
+{11400000, DIF_BPF_COEFF36, 0x110d0000},
+/* END - DIF BPF register values from 114_quant.dat*/
+
+
+/*case 11500000:*/
+/* BEGIN - DIF BPF register values from 115_quant.dat*/
+{11500000, DIF_BPF_COEFF01, 0x00000003},
+{11500000, DIF_BPF_COEFF23, 0x0004fff1},
+{11500000, DIF_BPF_COEFF45, 0xffea0026},
+{11500000, DIF_BPF_COEFF67, 0x0046ffc3},
+{11500000, DIF_BPF_COEFF89, 0xff5a003c},
+{11500000, DIF_BPF_COEFF1011, 0x013b0000},
+{11500000, DIF_BPF_COEFF1213, 0xfe04ff63},
+{11500000, DIF_BPF_COEFF1415, 0x02c801b8},
+{11500000, DIF_BPF_COEFF1617, 0xfc99fca6},
+{11500000, DIF_BPF_COEFF1819, 0x0397056a},
+{11500000, DIF_BPF_COEFF2021, 0xfcecf853},
+{11500000, DIF_BPF_COEFF2223, 0x01ad09c9},
+{11500000, DIF_BPF_COEFF2425, 0x00acf4ad},
+{11500000, DIF_BPF_COEFF2627, 0xfc2e0be7},
+{11500000, DIF_BPF_COEFF2829, 0x0773f4c2},
+{11500000, DIF_BPF_COEFF3031, 0xf4e90943},
+{11500000, DIF_BPF_COEFF3233, 0x0e35f9e6},
+{11500000, DIF_BPF_COEFF3435, 0xefb10221},
+{11500000, DIF_BPF_COEFF36, 0x110d0000},
+/* END - DIF BPF register values from 115_quant.dat*/
+
+
+/*case 11600000:*/
+/* BEGIN - DIF BPF register values from 116_quant.dat*/
+{11600000, DIF_BPF_COEFF01, 0x00000002},
+{11600000, DIF_BPF_COEFF23, 0x0007fff6},
+{11600000, DIF_BPF_COEFF45, 0xffe20014},
+{11600000, DIF_BPF_COEFF67, 0x0054ffee},
+{11600000, DIF_BPF_COEFF89, 0xff4effeb},
+{11600000, DIF_BPF_COEFF1011, 0x0137007e},
+{11600000, DIF_BPF_COEFF1213, 0xfe2efebb},
+{11600000, DIF_BPF_COEFF1415, 0x0260027a},
+{11600000, DIF_BPF_COEFF1617, 0xfd51fbe6},
+{11600000, DIF_BPF_COEFF1819, 0x02870605},
+{11600000, DIF_BPF_COEFF2021, 0xfe4af7fe},
+{11600000, DIF_BPF_COEFF2223, 0x001d09c1},
+{11600000, DIF_BPF_COEFF2425, 0x0243f515},
+{11600000, DIF_BPF_COEFF2627, 0xfabd0b32},
+{11600000, DIF_BPF_COEFF2829, 0x0897f59e},
+{11600000, DIF_BPF_COEFF3031, 0xf4280871},
+{11600000, DIF_BPF_COEFF3233, 0x0e95fa7c},
+{11600000, DIF_BPF_COEFF3435, 0xef9701eb},
+{11600000, DIF_BPF_COEFF36, 0x110d0000},
+/* END - DIF BPF register values from 116_quant.dat*/
+
+
+/*case 11700000:*/
+/* BEGIN - DIF BPF register values from 117_quant.dat*/
+{11700000, DIF_BPF_COEFF01, 0xffff0001},
+{11700000, DIF_BPF_COEFF23, 0x0008fffd},
+{11700000, DIF_BPF_COEFF45, 0xffdeffff},
+{11700000, DIF_BPF_COEFF67, 0x0056001d},
+{11700000, DIF_BPF_COEFF89, 0xff57ff9c},
+{11700000, DIF_BPF_COEFF1011, 0x011300f0},
+{11700000, DIF_BPF_COEFF1213, 0xfe82fe2e},
+{11700000, DIF_BPF_COEFF1415, 0x01ca0310},
+{11700000, DIF_BPF_COEFF1617, 0xfe35fb62},
+{11700000, DIF_BPF_COEFF1819, 0x0155065a},
+{11700000, DIF_BPF_COEFF2021, 0xffbaf7f2},
+{11700000, DIF_BPF_COEFF2223, 0xfe8c0977},
+{11700000, DIF_BPF_COEFF2425, 0x03cef5b2},
+{11700000, DIF_BPF_COEFF2627, 0xf9610a58},
+{11700000, DIF_BPF_COEFF2829, 0x09a5f68f},
+{11700000, DIF_BPF_COEFF3031, 0xf3790797},
+{11700000, DIF_BPF_COEFF3233, 0x0eebfb14},
+{11700000, DIF_BPF_COEFF3435, 0xef8001b5},
+{11700000, DIF_BPF_COEFF36, 0x110d0000},
+/* END - DIF BPF register values from 117_quant.dat*/
+
+
+/*case 11800000:*/
+/* BEGIN - DIF BPF register values from 118_quant.dat*/
+{11800000, DIF_BPF_COEFF01, 0xffff0000},
+{11800000, DIF_BPF_COEFF23, 0x00080004},
+{11800000, DIF_BPF_COEFF45, 0xffe0ffe9},
+{11800000, DIF_BPF_COEFF67, 0x004c0047},
+{11800000, DIF_BPF_COEFF89, 0xff75ff58},
+{11800000, DIF_BPF_COEFF1011, 0x00d1014a},
+{11800000, DIF_BPF_COEFF1213, 0xfef9fdc8},
+{11800000, DIF_BPF_COEFF1415, 0x0111036f},
+{11800000, DIF_BPF_COEFF1617, 0xff36fb21},
+{11800000, DIF_BPF_COEFF1819, 0x00120665},
+{11800000, DIF_BPF_COEFF2021, 0x012df82e},
+{11800000, DIF_BPF_COEFF2223, 0xfd0708ec},
+{11800000, DIF_BPF_COEFF2425, 0x0542f682},
+{11800000, DIF_BPF_COEFF2627, 0xf81f095c},
+{11800000, DIF_BPF_COEFF2829, 0x0a9af792},
+{11800000, DIF_BPF_COEFF3031, 0xf2db06b5},
+{11800000, DIF_BPF_COEFF3233, 0x0f38fbad},
+{11800000, DIF_BPF_COEFF3435, 0xef6c017e},
+{11800000, DIF_BPF_COEFF36, 0x110d0000},
+/* END - DIF BPF register values from 118_quant.dat*/
+
+
+/*case 11900000:*/
+/* BEGIN - DIF BPF register values from 119_quant.dat*/
+{11900000, DIF_BPF_COEFF01, 0xffffffff},
+{11900000, DIF_BPF_COEFF23, 0x0007000b},
+{11900000, DIF_BPF_COEFF45, 0xffe7ffd8},
+{11900000, DIF_BPF_COEFF67, 0x00370068},
+{11900000, DIF_BPF_COEFF89, 0xffa4ff28},
+{11900000, DIF_BPF_COEFF1011, 0x00790184},
+{11900000, DIF_BPF_COEFF1213, 0xff87fd91},
+{11900000, DIF_BPF_COEFF1415, 0x00430392},
+{11900000, DIF_BPF_COEFF1617, 0x0044fb26},
+{11900000, DIF_BPF_COEFF1819, 0xfece0626},
+{11900000, DIF_BPF_COEFF2021, 0x0294f8b2},
+{11900000, DIF_BPF_COEFF2223, 0xfb990825},
+{11900000, DIF_BPF_COEFF2425, 0x0698f77f},
+{11900000, DIF_BPF_COEFF2627, 0xf6fe0842},
+{11900000, DIF_BPF_COEFF2829, 0x0b73f8a7},
+{11900000, DIF_BPF_COEFF3031, 0xf25105cd},
+{11900000, DIF_BPF_COEFF3233, 0x0f7bfc48},
+{11900000, DIF_BPF_COEFF3435, 0xef5a0148},
+{11900000, DIF_BPF_COEFF36, 0x110d0000},
+/* END - DIF BPF register values from 119_quant.dat*/
+
+
+/*case 12000000:*/
+/* BEGIN - DIF BPF register values from 120_quant.dat*/
+{12000000, DIF_BPF_COEFF01, 0x0000fffe},
+{12000000, DIF_BPF_COEFF23, 0x00050010},
+{12000000, DIF_BPF_COEFF45, 0xfff2ffcc},
+{12000000, DIF_BPF_COEFF67, 0x001b007b},
+{12000000, DIF_BPF_COEFF89, 0xffdfff10},
+{12000000, DIF_BPF_COEFF1011, 0x00140198},
+{12000000, DIF_BPF_COEFF1213, 0x0020fd8e},
+{12000000, DIF_BPF_COEFF1415, 0xff710375},
+{12000000, DIF_BPF_COEFF1617, 0x014dfb73},
+{12000000, DIF_BPF_COEFF1819, 0xfd9a059f},
+{12000000, DIF_BPF_COEFF2021, 0x03e0f978},
+{12000000, DIF_BPF_COEFF2223, 0xfa4e0726},
+{12000000, DIF_BPF_COEFF2425, 0x07c8f8a7},
+{12000000, DIF_BPF_COEFF2627, 0xf600070c},
+{12000000, DIF_BPF_COEFF2829, 0x0c2ff9c9},
+{12000000, DIF_BPF_COEFF3031, 0xf1db04de},
+{12000000, DIF_BPF_COEFF3233, 0x0fb4fce5},
+{12000000, DIF_BPF_COEFF3435, 0xef4b0111},
+{12000000, DIF_BPF_COEFF36, 0x110d0000},
+/* END - DIF BPF register values from 120_quant.dat*/
+
+
+/*case 12100000:*/
+/* BEGIN - DIF BPF register values from 121_quant.dat*/
+{12100000, DIF_BPF_COEFF01, 0x0000fffd},
+{12100000, DIF_BPF_COEFF23, 0x00010012},
+{12100000, DIF_BPF_COEFF45, 0xffffffc8},
+{12100000, DIF_BPF_COEFF67, 0xfffb007e},
+{12100000, DIF_BPF_COEFF89, 0x001dff14},
+{12100000, DIF_BPF_COEFF1011, 0xffad0184},
+{12100000, DIF_BPF_COEFF1213, 0x00b7fdbe},
+{12100000, DIF_BPF_COEFF1415, 0xfea9031b},
+{12100000, DIF_BPF_COEFF1617, 0x0241fc01},
+{12100000, DIF_BPF_COEFF1819, 0xfc8504d6},
+{12100000, DIF_BPF_COEFF2021, 0x0504fa79},
+{12100000, DIF_BPF_COEFF2223, 0xf93005f6},
+{12100000, DIF_BPF_COEFF2425, 0x08caf9f2},
+{12100000, DIF_BPF_COEFF2627, 0xf52b05c0},
+{12100000, DIF_BPF_COEFF2829, 0x0ccbfaf9},
+{12100000, DIF_BPF_COEFF3031, 0xf17903eb},
+{12100000, DIF_BPF_COEFF3233, 0x0fe3fd83},
+{12100000, DIF_BPF_COEFF3435, 0xef3f00db},
+{12100000, DIF_BPF_COEFF36, 0x110d0000},
+/* END - DIF BPF register values from 121_quant.dat*/
+
+
+/*case 12200000:*/
+/* BEGIN - DIF BPF register values from 122_quant.dat*/
+{12200000, DIF_BPF_COEFF01, 0x0000fffd},
+{12200000, DIF_BPF_COEFF23, 0xfffe0011},
+{12200000, DIF_BPF_COEFF45, 0x000cffcc},
+{12200000, DIF_BPF_COEFF67, 0xffdb0071},
+{12200000, DIF_BPF_COEFF89, 0x0058ff32},
+{12200000, DIF_BPF_COEFF1011, 0xff4f014a},
+{12200000, DIF_BPF_COEFF1213, 0x013cfe1f},
+{12200000, DIF_BPF_COEFF1415, 0xfdfb028a},
+{12200000, DIF_BPF_COEFF1617, 0x0311fcc9},
+{12200000, DIF_BPF_COEFF1819, 0xfb9d03d6},
+{12200000, DIF_BPF_COEFF2021, 0x05f4fbad},
+{12200000, DIF_BPF_COEFF2223, 0xf848049d},
+{12200000, DIF_BPF_COEFF2425, 0x0999fb5b},
+{12200000, DIF_BPF_COEFF2627, 0xf4820461},
+{12200000, DIF_BPF_COEFF2829, 0x0d46fc32},
+{12200000, DIF_BPF_COEFF3031, 0xf12d02f4},
+{12200000, DIF_BPF_COEFF3233, 0x1007fe21},
+{12200000, DIF_BPF_COEFF3435, 0xef3600a4},
+{12200000, DIF_BPF_COEFF36, 0x110d0000},
+/* END - DIF BPF register values from 122_quant.dat*/
+
+
+/*case 12300000:*/
+/* BEGIN - DIF BPF register values from 123_quant.dat*/
+{12300000, DIF_BPF_COEFF01, 0x0000fffe},
+{12300000, DIF_BPF_COEFF23, 0xfffa000e},
+{12300000, DIF_BPF_COEFF45, 0x0017ffd9},
+{12300000, DIF_BPF_COEFF67, 0xffc10055},
+{12300000, DIF_BPF_COEFF89, 0x0088ff68},
+{12300000, DIF_BPF_COEFF1011, 0xff0400f0},
+{12300000, DIF_BPF_COEFF1213, 0x01a6fea7},
+{12300000, DIF_BPF_COEFF1415, 0xfd7501cc},
+{12300000, DIF_BPF_COEFF1617, 0x03b0fdc0},
+{12300000, DIF_BPF_COEFF1819, 0xfaef02a8},
+{12300000, DIF_BPF_COEFF2021, 0x06a7fd07},
+{12300000, DIF_BPF_COEFF2223, 0xf79d0326},
+{12300000, DIF_BPF_COEFF2425, 0x0a31fcda},
+{12300000, DIF_BPF_COEFF2627, 0xf40702f3},
+{12300000, DIF_BPF_COEFF2829, 0x0d9ffd72},
+{12300000, DIF_BPF_COEFF3031, 0xf0f601fa},
+{12300000, DIF_BPF_COEFF3233, 0x1021fec0},
+{12300000, DIF_BPF_COEFF3435, 0xef2f006d},
+{12300000, DIF_BPF_COEFF36, 0x110d0000},
+/* END - DIF BPF register values from 123_quant.dat*/
+
+
+/*case 12400000:*/
+/* BEGIN - DIF BPF register values from 124_quant.dat*/
+{12400000, DIF_BPF_COEFF01, 0x0001ffff},
+{12400000, DIF_BPF_COEFF23, 0xfff80007},
+{12400000, DIF_BPF_COEFF45, 0x001fffeb},
+{12400000, DIF_BPF_COEFF67, 0xffaf002d},
+{12400000, DIF_BPF_COEFF89, 0x00a8ffb0},
+{12400000, DIF_BPF_COEFF1011, 0xfed3007e},
+{12400000, DIF_BPF_COEFF1213, 0x01e9ff4c},
+{12400000, DIF_BPF_COEFF1415, 0xfd2000ee},
+{12400000, DIF_BPF_COEFF1617, 0x0413fed8},
+{12400000, DIF_BPF_COEFF1819, 0xfa82015c},
+{12400000, DIF_BPF_COEFF2021, 0x0715fe7d},
+{12400000, DIF_BPF_COEFF2223, 0xf7340198},
+{12400000, DIF_BPF_COEFF2425, 0x0a8dfe69},
+{12400000, DIF_BPF_COEFF2627, 0xf3bd017c},
+{12400000, DIF_BPF_COEFF2829, 0x0dd5feb8},
+{12400000, DIF_BPF_COEFF3031, 0xf0d500fd},
+{12400000, DIF_BPF_COEFF3233, 0x1031ff60},
+{12400000, DIF_BPF_COEFF3435, 0xef2b0037},
+{12400000, DIF_BPF_COEFF36, 0x110d0000},
+/* END - DIF BPF register values from 124_quant.dat*/
+
+
+/*case 12500000:*/
+/* BEGIN - DIF BPF register values from 125_quant.dat*/
+{12500000, DIF_BPF_COEFF01, 0x00010000},
+{12500000, DIF_BPF_COEFF23, 0xfff70000},
+{12500000, DIF_BPF_COEFF45, 0x00220000},
+{12500000, DIF_BPF_COEFF67, 0xffa90000},
+{12500000, DIF_BPF_COEFF89, 0x00b30000},
+{12500000, DIF_BPF_COEFF1011, 0xfec20000},
+{12500000, DIF_BPF_COEFF1213, 0x02000000},
+{12500000, DIF_BPF_COEFF1415, 0xfd030000},
+{12500000, DIF_BPF_COEFF1617, 0x04350000},
+{12500000, DIF_BPF_COEFF1819, 0xfa5e0000},
+{12500000, DIF_BPF_COEFF2021, 0x073b0000},
+{12500000, DIF_BPF_COEFF2223, 0xf7110000},
+{12500000, DIF_BPF_COEFF2425, 0x0aac0000},
+{12500000, DIF_BPF_COEFF2627, 0xf3a40000},
+{12500000, DIF_BPF_COEFF2829, 0x0de70000},
+{12500000, DIF_BPF_COEFF3031, 0xf0c90000},
+{12500000, DIF_BPF_COEFF3233, 0x10360000},
+{12500000, DIF_BPF_COEFF3435, 0xef290000},
+{12500000, DIF_BPF_COEFF36, 0x110d0000},
+/* END - DIF BPF register values from 125_quant.dat*/
+
+
+/*case 12600000:*/
+/* BEGIN - DIF BPF register values from 126_quant.dat*/
+{12600000, DIF_BPF_COEFF01, 0x00010001},
+{12600000, DIF_BPF_COEFF23, 0xfff8fff9},
+{12600000, DIF_BPF_COEFF45, 0x001f0015},
+{12600000, DIF_BPF_COEFF67, 0xffafffd3},
+{12600000, DIF_BPF_COEFF89, 0x00a80050},
+{12600000, DIF_BPF_COEFF1011, 0xfed3ff82},
+{12600000, DIF_BPF_COEFF1213, 0x01e900b4},
+{12600000, DIF_BPF_COEFF1415, 0xfd20ff12},
+{12600000, DIF_BPF_COEFF1617, 0x04130128},
+{12600000, DIF_BPF_COEFF1819, 0xfa82fea4},
+{12600000, DIF_BPF_COEFF2021, 0x07150183},
+{12600000, DIF_BPF_COEFF2223, 0xf734fe68},
+{12600000, DIF_BPF_COEFF2425, 0x0a8d0197},
+{12600000, DIF_BPF_COEFF2627, 0xf3bdfe84},
+{12600000, DIF_BPF_COEFF2829, 0x0dd50148},
+{12600000, DIF_BPF_COEFF3031, 0xf0d5ff03},
+{12600000, DIF_BPF_COEFF3233, 0x103100a0},
+{12600000, DIF_BPF_COEFF3435, 0xef2bffc9},
+{12600000, DIF_BPF_COEFF36, 0x110d0000},
+/* END - DIF BPF register values from 126_quant.dat*/
+
+
+/*case 12700000:*/
+/* BEGIN - DIF BPF register values from 127_quant.dat*/
+{12700000, DIF_BPF_COEFF01, 0x00000002},
+{12700000, DIF_BPF_COEFF23, 0xfffafff2},
+{12700000, DIF_BPF_COEFF45, 0x00170027},
+{12700000, DIF_BPF_COEFF67, 0xffc1ffab},
+{12700000, DIF_BPF_COEFF89, 0x00880098},
+{12700000, DIF_BPF_COEFF1011, 0xff04ff10},
+{12700000, DIF_BPF_COEFF1213, 0x01a60159},
+{12700000, DIF_BPF_COEFF1415, 0xfd75fe34},
+{12700000, DIF_BPF_COEFF1617, 0x03b00240},
+{12700000, DIF_BPF_COEFF1819, 0xfaeffd58},
+{12700000, DIF_BPF_COEFF2021, 0x06a702f9},
+{12700000, DIF_BPF_COEFF2223, 0xf79dfcda},
+{12700000, DIF_BPF_COEFF2425, 0x0a310326},
+{12700000, DIF_BPF_COEFF2627, 0xf407fd0d},
+{12700000, DIF_BPF_COEFF2829, 0x0d9f028e},
+{12700000, DIF_BPF_COEFF3031, 0xf0f6fe06},
+{12700000, DIF_BPF_COEFF3233, 0x10210140},
+{12700000, DIF_BPF_COEFF3435, 0xef2fff93},
+{12700000, DIF_BPF_COEFF36, 0x110d0000},
+/* END - DIF BPF register values from 127_quant.dat*/
+
+
+/*case 12800000:*/
+/* BEGIN - DIF BPF register values from 128_quant.dat*/
+{12800000, DIF_BPF_COEFF01, 0x00000003},
+{12800000, DIF_BPF_COEFF23, 0xfffeffef},
+{12800000, DIF_BPF_COEFF45, 0x000c0034},
+{12800000, DIF_BPF_COEFF67, 0xffdbff8f},
+{12800000, DIF_BPF_COEFF89, 0x005800ce},
+{12800000, DIF_BPF_COEFF1011, 0xff4ffeb6},
+{12800000, DIF_BPF_COEFF1213, 0x013c01e1},
+{12800000, DIF_BPF_COEFF1415, 0xfdfbfd76},
+{12800000, DIF_BPF_COEFF1617, 0x03110337},
+{12800000, DIF_BPF_COEFF1819, 0xfb9dfc2a},
+{12800000, DIF_BPF_COEFF2021, 0x05f40453},
+{12800000, DIF_BPF_COEFF2223, 0xf848fb63},
+{12800000, DIF_BPF_COEFF2425, 0x099904a5},
+{12800000, DIF_BPF_COEFF2627, 0xf482fb9f},
+{12800000, DIF_BPF_COEFF2829, 0x0d4603ce},
+{12800000, DIF_BPF_COEFF3031, 0xf12dfd0c},
+{12800000, DIF_BPF_COEFF3233, 0x100701df},
+{12800000, DIF_BPF_COEFF3435, 0xef36ff5c},
+{12800000, DIF_BPF_COEFF36, 0x110d0000},
+/* END - DIF BPF register values from 128_quant.dat*/
+
+
+/*case 12900000:*/
+/* BEGIN - DIF BPF register values from 129_quant.dat*/
+{12900000, DIF_BPF_COEFF01, 0x00000003},
+{12900000, DIF_BPF_COEFF23, 0x0001ffee},
+{12900000, DIF_BPF_COEFF45, 0xffff0038},
+{12900000, DIF_BPF_COEFF67, 0xfffbff82},
+{12900000, DIF_BPF_COEFF89, 0x001d00ec},
+{12900000, DIF_BPF_COEFF1011, 0xffadfe7c},
+{12900000, DIF_BPF_COEFF1213, 0x00b70242},
+{12900000, DIF_BPF_COEFF1415, 0xfea9fce5},
+{12900000, DIF_BPF_COEFF1617, 0x024103ff},
+{12900000, DIF_BPF_COEFF1819, 0xfc85fb2a},
+{12900000, DIF_BPF_COEFF2021, 0x05040587},
+{12900000, DIF_BPF_COEFF2223, 0xf930fa0a},
+{12900000, DIF_BPF_COEFF2425, 0x08ca060e},
+{12900000, DIF_BPF_COEFF2627, 0xf52bfa40},
+{12900000, DIF_BPF_COEFF2829, 0x0ccb0507},
+{12900000, DIF_BPF_COEFF3031, 0xf179fc15},
+{12900000, DIF_BPF_COEFF3233, 0x0fe3027d},
+{12900000, DIF_BPF_COEFF3435, 0xef3fff25},
+{12900000, DIF_BPF_COEFF36, 0x110d0000},
+/* END - DIF BPF register values from 129_quant.dat*/
+
+
+/*case 113000000:*/
+/* BEGIN - DIF BPF register values from 130_quant.dat*/
+{13000000, DIF_BPF_COEFF01, 0x00000002},
+{13000000, DIF_BPF_COEFF23, 0x0005fff0},
+{13000000, DIF_BPF_COEFF45, 0xfff20034},
+{13000000, DIF_BPF_COEFF67, 0x001bff85},
+{13000000, DIF_BPF_COEFF89, 0xffdf00f0},
+{13000000, DIF_BPF_COEFF1011, 0x0014fe68},
+{13000000, DIF_BPF_COEFF1213, 0x00200272},
+{13000000, DIF_BPF_COEFF1415, 0xff71fc8b},
+{13000000, DIF_BPF_COEFF1617, 0x014d048d},
+{13000000, DIF_BPF_COEFF1819, 0xfd9afa61},
+{13000000, DIF_BPF_COEFF2021, 0x03e00688},
+{13000000, DIF_BPF_COEFF2223, 0xfa4ef8da},
+{13000000, DIF_BPF_COEFF2425, 0x07c80759},
+{13000000, DIF_BPF_COEFF2627, 0xf600f8f4},
+{13000000, DIF_BPF_COEFF2829, 0x0c2f0637},
+{13000000, DIF_BPF_COEFF3031, 0xf1dbfb22},
+{13000000, DIF_BPF_COEFF3233, 0x0fb4031b},
+{13000000, DIF_BPF_COEFF3435, 0xef4bfeef},
+{13000000, DIF_BPF_COEFF36, 0x110d0000},
+/* END - DIF BPF register values from 130_quant.dat*/
+
+
+/*case 13100000:*/
+/* BEGIN - DIF BPF register values from 131_quant.dat*/
+{13100000, DIF_BPF_COEFF01, 0xffff0001},
+{13100000, DIF_BPF_COEFF23, 0x0007fff5},
+{13100000, DIF_BPF_COEFF45, 0xffe70028},
+{13100000, DIF_BPF_COEFF67, 0x0037ff98},
+{13100000, DIF_BPF_COEFF89, 0xffa400d8},
+{13100000, DIF_BPF_COEFF1011, 0x0079fe7c},
+{13100000, DIF_BPF_COEFF1213, 0xff87026f},
+{13100000, DIF_BPF_COEFF1415, 0x0043fc6e},
+{13100000, DIF_BPF_COEFF1617, 0x004404da},
+{13100000, DIF_BPF_COEFF1819, 0xfecef9da},
+{13100000, DIF_BPF_COEFF2021, 0x0294074e},
+{13100000, DIF_BPF_COEFF2223, 0xfb99f7db},
+{13100000, DIF_BPF_COEFF2425, 0x06980881},
+{13100000, DIF_BPF_COEFF2627, 0xf6fef7be},
+{13100000, DIF_BPF_COEFF2829, 0x0b730759},
+{13100000, DIF_BPF_COEFF3031, 0xf251fa33},
+{13100000, DIF_BPF_COEFF3233, 0x0f7b03b8},
+{13100000, DIF_BPF_COEFF3435, 0xef5afeb8},
+{13100000, DIF_BPF_COEFF36, 0x110d0000},
+/* END - DIF BPF register values from 131_quant.dat*/
+
+
+/*case 13200000:*/
+/* BEGIN - DIF BPF register values from 132_quant.dat*/
+{13200000, DIF_BPF_COEFF01, 0xffff0000},
+{13200000, DIF_BPF_COEFF23, 0x0008fffc},
+{13200000, DIF_BPF_COEFF45, 0xffe00017},
+{13200000, DIF_BPF_COEFF67, 0x004cffb9},
+{13200000, DIF_BPF_COEFF89, 0xff7500a8},
+{13200000, DIF_BPF_COEFF1011, 0x00d1feb6},
+{13200000, DIF_BPF_COEFF1213, 0xfef90238},
+{13200000, DIF_BPF_COEFF1415, 0x0111fc91},
+{13200000, DIF_BPF_COEFF1617, 0xff3604df},
+{13200000, DIF_BPF_COEFF1819, 0x0012f99b},
+{13200000, DIF_BPF_COEFF2021, 0x012d07d2},
+{13200000, DIF_BPF_COEFF2223, 0xfd07f714},
+{13200000, DIF_BPF_COEFF2425, 0x0542097e},
+{13200000, DIF_BPF_COEFF2627, 0xf81ff6a4},
+{13200000, DIF_BPF_COEFF2829, 0x0a9a086e},
+{13200000, DIF_BPF_COEFF3031, 0xf2dbf94b},
+{13200000, DIF_BPF_COEFF3233, 0x0f380453},
+{13200000, DIF_BPF_COEFF3435, 0xef6cfe82},
+{13200000, DIF_BPF_COEFF36, 0x110d0000},
+/* END - DIF BPF register values from 132_quant.dat*/
+
+
+/*case 13300000:*/
+/* BEGIN - DIF BPF register values from 133_quant.dat*/
+{13300000, DIF_BPF_COEFF01, 0xffffffff},
+{13300000, DIF_BPF_COEFF23, 0x00080003},
+{13300000, DIF_BPF_COEFF45, 0xffde0001},
+{13300000, DIF_BPF_COEFF67, 0x0056ffe3},
+{13300000, DIF_BPF_COEFF89, 0xff570064},
+{13300000, DIF_BPF_COEFF1011, 0x0113ff10},
+{13300000, DIF_BPF_COEFF1213, 0xfe8201d2},
+{13300000, DIF_BPF_COEFF1415, 0x01cafcf0},
+{13300000, DIF_BPF_COEFF1617, 0xfe35049e},
+{13300000, DIF_BPF_COEFF1819, 0x0155f9a6},
+{13300000, DIF_BPF_COEFF2021, 0xffba080e},
+{13300000, DIF_BPF_COEFF2223, 0xfe8cf689},
+{13300000, DIF_BPF_COEFF2425, 0x03ce0a4e},
+{13300000, DIF_BPF_COEFF2627, 0xf961f5a8},
+{13300000, DIF_BPF_COEFF2829, 0x09a50971},
+{13300000, DIF_BPF_COEFF3031, 0xf379f869},
+{13300000, DIF_BPF_COEFF3233, 0x0eeb04ec},
+{13300000, DIF_BPF_COEFF3435, 0xef80fe4b},
+{13300000, DIF_BPF_COEFF36, 0x110d0000},
+/* END - DIF BPF register values from 133_quant.dat*/
+
+
+/*case 13400000:*/
+/* BEGIN - DIF BPF register values from 134_quant.dat*/
+{13400000, DIF_BPF_COEFF01, 0x0000fffe},
+{13400000, DIF_BPF_COEFF23, 0x0007000a},
+{13400000, DIF_BPF_COEFF45, 0xffe2ffec},
+{13400000, DIF_BPF_COEFF67, 0x00540012},
+{13400000, DIF_BPF_COEFF89, 0xff4e0015},
+{13400000, DIF_BPF_COEFF1011, 0x0137ff82},
+{13400000, DIF_BPF_COEFF1213, 0xfe2e0145},
+{13400000, DIF_BPF_COEFF1415, 0x0260fd86},
+{13400000, DIF_BPF_COEFF1617, 0xfd51041a},
+{13400000, DIF_BPF_COEFF1819, 0x0287f9fb},
+{13400000, DIF_BPF_COEFF2021, 0xfe4a0802},
+{13400000, DIF_BPF_COEFF2223, 0x001df63f},
+{13400000, DIF_BPF_COEFF2425, 0x02430aeb},
+{13400000, DIF_BPF_COEFF2627, 0xfabdf4ce},
+{13400000, DIF_BPF_COEFF2829, 0x08970a62},
+{13400000, DIF_BPF_COEFF3031, 0xf428f78f},
+{13400000, DIF_BPF_COEFF3233, 0x0e950584},
+{13400000, DIF_BPF_COEFF3435, 0xef97fe15},
+{13400000, DIF_BPF_COEFF36, 0x110d0000},
+/* END - DIF BPF register values from 134_quant.dat*/
+
+
+/*case 13500000:*/
+/* BEGIN - DIF BPF register values from 135_quant.dat*/
+{13500000, DIF_BPF_COEFF01, 0x0000fffd},
+{13500000, DIF_BPF_COEFF23, 0x0004000f},
+{13500000, DIF_BPF_COEFF45, 0xffeaffda},
+{13500000, DIF_BPF_COEFF67, 0x0046003d},
+{13500000, DIF_BPF_COEFF89, 0xff5affc4},
+{13500000, DIF_BPF_COEFF1011, 0x013b0000},
+{13500000, DIF_BPF_COEFF1213, 0xfe04009d},
+{13500000, DIF_BPF_COEFF1415, 0x02c8fe48},
+{13500000, DIF_BPF_COEFF1617, 0xfc99035a},
+{13500000, DIF_BPF_COEFF1819, 0x0397fa96},
+{13500000, DIF_BPF_COEFF2021, 0xfcec07ad},
+{13500000, DIF_BPF_COEFF2223, 0x01adf637},
+{13500000, DIF_BPF_COEFF2425, 0x00ac0b53},
+{13500000, DIF_BPF_COEFF2627, 0xfc2ef419},
+{13500000, DIF_BPF_COEFF2829, 0x07730b3e},
+{13500000, DIF_BPF_COEFF3031, 0xf4e9f6bd},
+{13500000, DIF_BPF_COEFF3233, 0x0e35061a},
+{13500000, DIF_BPF_COEFF3435, 0xefb1fddf},
+{13500000, DIF_BPF_COEFF36, 0x110d0000},
+/* END - DIF BPF register values from 135_quant.dat*/
+
+
+/*case 13600000:*/
+/* BEGIN - DIF BPF register values from 136_quant.dat*/
+{13600000, DIF_BPF_COEFF01, 0x0000fffd},
+{13600000, DIF_BPF_COEFF23, 0x00000012},
+{13600000, DIF_BPF_COEFF45, 0xfff6ffcd},
+{13600000, DIF_BPF_COEFF67, 0x002f0061},
+{13600000, DIF_BPF_COEFF89, 0xff7bff79},
+{13600000, DIF_BPF_COEFF1011, 0x011e007e},
+{13600000, DIF_BPF_COEFF1213, 0xfe08ffe8},
+{13600000, DIF_BPF_COEFF1415, 0x02f9ff28},
+{13600000, DIF_BPF_COEFF1617, 0xfc17026a},
+{13600000, DIF_BPF_COEFF1819, 0x0479fb70},
+{13600000, DIF_BPF_COEFF2021, 0xfbad0713},
+{13600000, DIF_BPF_COEFF2223, 0x032ff672},
+{13600000, DIF_BPF_COEFF2425, 0xff100b83},
+{13600000, DIF_BPF_COEFF2627, 0xfdaff38b},
+{13600000, DIF_BPF_COEFF2829, 0x063c0c04},
+{13600000, DIF_BPF_COEFF3031, 0xf5baf5f5},
+{13600000, DIF_BPF_COEFF3233, 0x0dcc06ae},
+{13600000, DIF_BPF_COEFF3435, 0xefcdfda8},
+{13600000, DIF_BPF_COEFF36, 0x110d0000},
+/* END - DIF BPF register values from 136_quant.dat*/
+
+
+/*case 13700000:*/
+/* BEGIN - DIF BPF register values from 137_quant.dat*/
+{13700000, DIF_BPF_COEFF01, 0x0000fffd},
+{13700000, DIF_BPF_COEFF23, 0xfffd0012},
+{13700000, DIF_BPF_COEFF45, 0x0004ffc8},
+{13700000, DIF_BPF_COEFF67, 0x00100078},
+{13700000, DIF_BPF_COEFF89, 0xffacff3e},
+{13700000, DIF_BPF_COEFF1011, 0x00e200f0},
+{13700000, DIF_BPF_COEFF1213, 0xfe39ff35},
+{13700000, DIF_BPF_COEFF1415, 0x02f10017},
+{13700000, DIF_BPF_COEFF1617, 0xfbd30156},
+{13700000, DIF_BPF_COEFF1819, 0x0521fc7f},
+{13700000, DIF_BPF_COEFF2021, 0xfa9c0638},
+{13700000, DIF_BPF_COEFF2223, 0x0499f6ee},
+{13700000, DIF_BPF_COEFF2425, 0xfd7a0b7c},
+{13700000, DIF_BPF_COEFF2627, 0xff39f325},
+{13700000, DIF_BPF_COEFF2829, 0x04f40cb3},
+{13700000, DIF_BPF_COEFF3031, 0xf69af537},
+{13700000, DIF_BPF_COEFF3233, 0x0d5a073f},
+{13700000, DIF_BPF_COEFF3435, 0xefecfd72},
+{13700000, DIF_BPF_COEFF36, 0x110d0000},
+/* END - DIF BPF register values from 137_quant.dat*/
+
+
+/*case 13800000:*/
+/* BEGIN - DIF BPF register values from 138_quant.dat*/
+{13800000, DIF_BPF_COEFF01, 0x0001fffe},
+{13800000, DIF_BPF_COEFF23, 0xfffa000e},
+{13800000, DIF_BPF_COEFF45, 0x0011ffcb},
+{13800000, DIF_BPF_COEFF67, 0xfff0007f},
+{13800000, DIF_BPF_COEFF89, 0xffe7ff19},
+{13800000, DIF_BPF_COEFF1011, 0x008f014a},
+{13800000, DIF_BPF_COEFF1213, 0xfe94fe93},
+{13800000, DIF_BPF_COEFF1415, 0x02b00105},
+{13800000, DIF_BPF_COEFF1617, 0xfbd3002f},
+{13800000, DIF_BPF_COEFF1819, 0x0585fdb7},
+{13800000, DIF_BPF_COEFF2021, 0xf9c10525},
+{13800000, DIF_BPF_COEFF2223, 0x05def7a8},
+{13800000, DIF_BPF_COEFF2425, 0xfbf20b3c},
+{13800000, DIF_BPF_COEFF2627, 0x00c7f2e9},
+{13800000, DIF_BPF_COEFF2829, 0x03a00d48},
+{13800000, DIF_BPF_COEFF3031, 0xf787f484},
+{13800000, DIF_BPF_COEFF3233, 0x0cdf07cd},
+{13800000, DIF_BPF_COEFF3435, 0xf00dfd3c},
+{13800000, DIF_BPF_COEFF36, 0x110d0000},
+/* END - DIF BPF register values from 138_quant.dat*/
+
+
+/*case 13900000:*/
+/* BEGIN - DIF BPF register values from 139_quant.dat*/
+{13900000, DIF_BPF_COEFF01, 0x00010000},
+{13900000, DIF_BPF_COEFF23, 0xfff80008},
+{13900000, DIF_BPF_COEFF45, 0x001bffd7},
+{13900000, DIF_BPF_COEFF67, 0xffd10076},
+{13900000, DIF_BPF_COEFF89, 0x0026ff0e},
+{13900000, DIF_BPF_COEFF1011, 0x002c0184},
+{13900000, DIF_BPF_COEFF1213, 0xff0ffe10},
+{13900000, DIF_BPF_COEFF1415, 0x023b01e0},
+{13900000, DIF_BPF_COEFF1617, 0xfc17ff06},
+{13900000, DIF_BPF_COEFF1819, 0x05a2ff09},
+{13900000, DIF_BPF_COEFF2021, 0xf92703e4},
+{13900000, DIF_BPF_COEFF2223, 0x06f4f89b},
+{13900000, DIF_BPF_COEFF2425, 0xfa820ac5},
+{13900000, DIF_BPF_COEFF2627, 0x0251f2d9},
+{13900000, DIF_BPF_COEFF2829, 0x02430dc3},
+{13900000, DIF_BPF_COEFF3031, 0xf881f3dc},
+{13900000, DIF_BPF_COEFF3233, 0x0c5c0859},
+{13900000, DIF_BPF_COEFF3435, 0xf031fd06},
+{13900000, DIF_BPF_COEFF36, 0x110d0000},
+/* END - DIF BPF register values from 139_quant.dat*/
+
+
+/*case 14000000:*/
+/* BEGIN - DIF BPF register values from 140_quant.dat*/
+{14000000, DIF_BPF_COEFF01, 0x00010001},
+{14000000, DIF_BPF_COEFF23, 0xfff80001},
+{14000000, DIF_BPF_COEFF45, 0x0021ffe8},
+{14000000, DIF_BPF_COEFF67, 0xffba005d},
+{14000000, DIF_BPF_COEFF89, 0x0060ff1f},
+{14000000, DIF_BPF_COEFF1011, 0xffc40198},
+{14000000, DIF_BPF_COEFF1213, 0xffa0fdb5},
+{14000000, DIF_BPF_COEFF1415, 0x019a029a},
+{14000000, DIF_BPF_COEFF1617, 0xfc99fdea},
+{14000000, DIF_BPF_COEFF1819, 0x05750067},
+{14000000, DIF_BPF_COEFF2021, 0xf8d4027f},
+{14000000, DIF_BPF_COEFF2223, 0x07d4f9c0},
+{14000000, DIF_BPF_COEFF2425, 0xf9320a1a},
+{14000000, DIF_BPF_COEFF2627, 0x03d2f2f3},
+{14000000, DIF_BPF_COEFF2829, 0x00df0e22},
+{14000000, DIF_BPF_COEFF3031, 0xf986f341},
+{14000000, DIF_BPF_COEFF3233, 0x0bd108e2},
+{14000000, DIF_BPF_COEFF3435, 0xf058fcd1},
+{14000000, DIF_BPF_COEFF36, 0x110d0000},
+/* END - DIF BPF register values from 140_quant.dat*/
+
+
+/*case 14100000:*/
+/* BEGIN - DIF BPF register values from 141_quant.dat*/
+{14100000, DIF_BPF_COEFF01, 0x00000002},
+{14100000, DIF_BPF_COEFF23, 0xfff9fffa},
+{14100000, DIF_BPF_COEFF45, 0x0021fffd},
+{14100000, DIF_BPF_COEFF67, 0xffac0038},
+{14100000, DIF_BPF_COEFF89, 0x008eff4a},
+{14100000, DIF_BPF_COEFF1011, 0xff630184},
+{14100000, DIF_BPF_COEFF1213, 0x003afd8b},
+{14100000, DIF_BPF_COEFF1415, 0x00da0326},
+{14100000, DIF_BPF_COEFF1617, 0xfd51fced},
+{14100000, DIF_BPF_COEFF1819, 0x050101c0},
+{14100000, DIF_BPF_COEFF2021, 0xf8cb0103},
+{14100000, DIF_BPF_COEFF2223, 0x0876fb10},
+{14100000, DIF_BPF_COEFF2425, 0xf80a093e},
+{14100000, DIF_BPF_COEFF2627, 0x0543f338},
+{14100000, DIF_BPF_COEFF2829, 0xff7a0e66},
+{14100000, DIF_BPF_COEFF3031, 0xfa94f2b2},
+{14100000, DIF_BPF_COEFF3233, 0x0b3f0967},
+{14100000, DIF_BPF_COEFF3435, 0xf081fc9b},
+{14100000, DIF_BPF_COEFF36, 0x110d0000},
+/* END - DIF BPF register values from 141_quant.dat*/
+
+
+/*case 14200000:*/
+/* BEGIN - DIF BPF register values from 142_quant.dat*/
+{14200000, DIF_BPF_COEFF01, 0x00000003},
+{14200000, DIF_BPF_COEFF23, 0xfffbfff3},
+{14200000, DIF_BPF_COEFF45, 0x001d0013},
+{14200000, DIF_BPF_COEFF67, 0xffaa000b},
+{14200000, DIF_BPF_COEFF89, 0x00aaff89},
+{14200000, DIF_BPF_COEFF1011, 0xff13014a},
+{14200000, DIF_BPF_COEFF1213, 0x00cefd95},
+{14200000, DIF_BPF_COEFF1415, 0x000a037b},
+{14200000, DIF_BPF_COEFF1617, 0xfe35fc1d},
+{14200000, DIF_BPF_COEFF1819, 0x044c0305},
+{14200000, DIF_BPF_COEFF2021, 0xf90cff7e},
+{14200000, DIF_BPF_COEFF2223, 0x08d5fc81},
+{14200000, DIF_BPF_COEFF2425, 0xf7100834},
+{14200000, DIF_BPF_COEFF2627, 0x069ff3a7},
+{14200000, DIF_BPF_COEFF2829, 0xfe160e8d},
+{14200000, DIF_BPF_COEFF3031, 0xfbaaf231},
+{14200000, DIF_BPF_COEFF3233, 0x0aa509e9},
+{14200000, DIF_BPF_COEFF3435, 0xf0adfc65},
+{14200000, DIF_BPF_COEFF36, 0x110d0000},
+/* END - DIF BPF register values from 142_quant.dat*/
+
+
+/*case 14300000:*/
+/* BEGIN - DIF BPF register values from 143_quant.dat*/
+{14300000, DIF_BPF_COEFF01, 0x00000003},
+{14300000, DIF_BPF_COEFF23, 0xffffffef},
+{14300000, DIF_BPF_COEFF45, 0x00140025},
+{14300000, DIF_BPF_COEFF67, 0xffb4ffdd},
+{14300000, DIF_BPF_COEFF89, 0x00b2ffd6},
+{14300000, DIF_BPF_COEFF1011, 0xfedb00f0},
+{14300000, DIF_BPF_COEFF1213, 0x0150fdd3},
+{14300000, DIF_BPF_COEFF1415, 0xff380391},
+{14300000, DIF_BPF_COEFF1617, 0xff36fb85},
+{14300000, DIF_BPF_COEFF1819, 0x035e0426},
+{14300000, DIF_BPF_COEFF2021, 0xf994fdfe},
+{14300000, DIF_BPF_COEFF2223, 0x08eefe0b},
+{14300000, DIF_BPF_COEFF2425, 0xf6490702},
+{14300000, DIF_BPF_COEFF2627, 0x07e1f43e},
+{14300000, DIF_BPF_COEFF2829, 0xfcb60e97},
+{14300000, DIF_BPF_COEFF3031, 0xfcc6f1be},
+{14300000, DIF_BPF_COEFF3233, 0x0a040a67},
+{14300000, DIF_BPF_COEFF3435, 0xf0dbfc30},
+{14300000, DIF_BPF_COEFF36, 0x110d0000},
+/* END - DIF BPF register values from 143_quant.dat*/
+
+
+/*case 14400000:*/
+/* BEGIN - DIF BPF register values from 144_quant.dat*/
+{14400000, DIF_BPF_COEFF01, 0x00000003},
+{14400000, DIF_BPF_COEFF23, 0x0002ffee},
+{14400000, DIF_BPF_COEFF45, 0x00070033},
+{14400000, DIF_BPF_COEFF67, 0xffc9ffb4},
+{14400000, DIF_BPF_COEFF89, 0x00a40027},
+{14400000, DIF_BPF_COEFF1011, 0xfec3007e},
+{14400000, DIF_BPF_COEFF1213, 0x01b4fe3f},
+{14400000, DIF_BPF_COEFF1415, 0xfe760369},
+{14400000, DIF_BPF_COEFF1617, 0x0044fb2e},
+{14400000, DIF_BPF_COEFF1819, 0x02450518},
+{14400000, DIF_BPF_COEFF2021, 0xfa5ffc90},
+{14400000, DIF_BPF_COEFF2223, 0x08c1ffa1},
+{14400000, DIF_BPF_COEFF2425, 0xf5bc05ae},
+{14400000, DIF_BPF_COEFF2627, 0x0902f4fc},
+{14400000, DIF_BPF_COEFF2829, 0xfb600e85},
+{14400000, DIF_BPF_COEFF3031, 0xfde7f15a},
+{14400000, DIF_BPF_COEFF3233, 0x095d0ae2},
+{14400000, DIF_BPF_COEFF3435, 0xf10cfbfb},
+{14400000, DIF_BPF_COEFF36, 0x110d0000},
+/* END - DIF BPF register values from 144_quant.dat*/
+
+
+/*case 14500000:*/
+/* BEGIN - DIF BPF register values from 145_quant.dat*/
+{14500000, DIF_BPF_COEFF01, 0xffff0002},
+{14500000, DIF_BPF_COEFF23, 0x0005ffef},
+{14500000, DIF_BPF_COEFF45, 0xfffa0038},
+{14500000, DIF_BPF_COEFF67, 0xffe5ff95},
+{14500000, DIF_BPF_COEFF89, 0x00820074},
+{14500000, DIF_BPF_COEFF1011, 0xfecc0000},
+{14500000, DIF_BPF_COEFF1213, 0x01f0fed0},
+{14500000, DIF_BPF_COEFF1415, 0xfdd20304},
+{14500000, DIF_BPF_COEFF1617, 0x014dfb1d},
+{14500000, DIF_BPF_COEFF1819, 0x010e05ce},
+{14500000, DIF_BPF_COEFF2021, 0xfb64fb41},
+{14500000, DIF_BPF_COEFF2223, 0x084e013b},
+{14500000, DIF_BPF_COEFF2425, 0xf569043e},
+{14500000, DIF_BPF_COEFF2627, 0x0a00f5dd},
+{14500000, DIF_BPF_COEFF2829, 0xfa150e55},
+{14500000, DIF_BPF_COEFF3031, 0xff0bf104},
+{14500000, DIF_BPF_COEFF3233, 0x08b00b59},
+{14500000, DIF_BPF_COEFF3435, 0xf13ffbc6},
+{14500000, DIF_BPF_COEFF36, 0x110d0000},
+/* END - DIF BPF register values from 145_quant.dat*/
+
+
+/*case 14600000:*/
+/* BEGIN - DIF BPF register values from 146_quant.dat*/
+{14600000, DIF_BPF_COEFF01, 0xffff0001},
+{14600000, DIF_BPF_COEFF23, 0x0008fff4},
+{14600000, DIF_BPF_COEFF45, 0xffed0035},
+{14600000, DIF_BPF_COEFF67, 0x0005ff83},
+{14600000, DIF_BPF_COEFF89, 0x005000b4},
+{14600000, DIF_BPF_COEFF1011, 0xfef6ff82},
+{14600000, DIF_BPF_COEFF1213, 0x01ffff7a},
+{14600000, DIF_BPF_COEFF1415, 0xfd580269},
+{14600000, DIF_BPF_COEFF1617, 0x0241fb53},
+{14600000, DIF_BPF_COEFF1819, 0xffca0640},
+{14600000, DIF_BPF_COEFF2021, 0xfc99fa1e},
+{14600000, DIF_BPF_COEFF2223, 0x079a02cb},
+{14600000, DIF_BPF_COEFF2425, 0xf55502ba},
+{14600000, DIF_BPF_COEFF2627, 0x0ad5f6e0},
+{14600000, DIF_BPF_COEFF2829, 0xf8d90e0a},
+{14600000, DIF_BPF_COEFF3031, 0x0031f0bd},
+{14600000, DIF_BPF_COEFF3233, 0x07fd0bcb},
+{14600000, DIF_BPF_COEFF3435, 0xf174fb91},
+{14600000, DIF_BPF_COEFF36, 0x110d0000},
+/* END - DIF BPF register values from 146_quant.dat*/
+
+
+/*case 14700000:*/
+/* BEGIN - DIF BPF register values from 147_quant.dat*/
+{14700000, DIF_BPF_COEFF01, 0xffffffff},
+{14700000, DIF_BPF_COEFF23, 0x0009fffb},
+{14700000, DIF_BPF_COEFF45, 0xffe4002a},
+{14700000, DIF_BPF_COEFF67, 0x0025ff82},
+{14700000, DIF_BPF_COEFF89, 0x001400e0},
+{14700000, DIF_BPF_COEFF1011, 0xff3cff10},
+{14700000, DIF_BPF_COEFF1213, 0x01e10030},
+{14700000, DIF_BPF_COEFF1415, 0xfd1201a4},
+{14700000, DIF_BPF_COEFF1617, 0x0311fbcd},
+{14700000, DIF_BPF_COEFF1819, 0xfe88066a},
+{14700000, DIF_BPF_COEFF2021, 0xfdf1f92f},
+{14700000, DIF_BPF_COEFF2223, 0x06aa0449},
+{14700000, DIF_BPF_COEFF2425, 0xf57e0128},
+{14700000, DIF_BPF_COEFF2627, 0x0b7ef801},
+{14700000, DIF_BPF_COEFF2829, 0xf7b00da2},
+{14700000, DIF_BPF_COEFF3031, 0x0156f086},
+{14700000, DIF_BPF_COEFF3233, 0x07450c39},
+{14700000, DIF_BPF_COEFF3435, 0xf1acfb5c},
+{14700000, DIF_BPF_COEFF36, 0x110d0000},
+/* END - DIF BPF register values from 147_quant.dat*/
+
+
+/*case 14800000:*/
+/* BEGIN - DIF BPF register values from 148_quant.dat*/
+{14800000, DIF_BPF_COEFF01, 0x0000fffe},
+{14800000, DIF_BPF_COEFF23, 0x00080002},
+{14800000, DIF_BPF_COEFF45, 0xffdf0019},
+{14800000, DIF_BPF_COEFF67, 0x003fff92},
+{14800000, DIF_BPF_COEFF89, 0xffd600f1},
+{14800000, DIF_BPF_COEFF1011, 0xff96feb6},
+{14800000, DIF_BPF_COEFF1213, 0x019700e1},
+{14800000, DIF_BPF_COEFF1415, 0xfd0500c2},
+{14800000, DIF_BPF_COEFF1617, 0x03b0fc84},
+{14800000, DIF_BPF_COEFF1819, 0xfd590649},
+{14800000, DIF_BPF_COEFF2021, 0xff5df87f},
+{14800000, DIF_BPF_COEFF2223, 0x058505aa},
+{14800000, DIF_BPF_COEFF2425, 0xf5e4ff91},
+{14800000, DIF_BPF_COEFF2627, 0x0bf9f93c},
+{14800000, DIF_BPF_COEFF2829, 0xf69d0d20},
+{14800000, DIF_BPF_COEFF3031, 0x0279f05e},
+{14800000, DIF_BPF_COEFF3233, 0x06880ca3},
+{14800000, DIF_BPF_COEFF3435, 0xf1e6fb28},
+{14800000, DIF_BPF_COEFF36, 0x110d0000},
+/* END - DIF BPF register values from 148_quant.dat*/
+
+
+/*case 14900000:*/
+/* BEGIN - DIF BPF register values from 149_quant.dat*/
+{14900000, DIF_BPF_COEFF01, 0x0000fffd},
+{14900000, DIF_BPF_COEFF23, 0x00060009},
+{14900000, DIF_BPF_COEFF45, 0xffdf0004},
+{14900000, DIF_BPF_COEFF67, 0x0051ffb0},
+{14900000, DIF_BPF_COEFF89, 0xff9d00e8},
+{14900000, DIF_BPF_COEFF1011, 0xfffcfe7c},
+{14900000, DIF_BPF_COEFF1213, 0x01280180},
+{14900000, DIF_BPF_COEFF1415, 0xfd32ffd2},
+{14900000, DIF_BPF_COEFF1617, 0x0413fd6e},
+{14900000, DIF_BPF_COEFF1819, 0xfc4d05df},
+{14900000, DIF_BPF_COEFF2021, 0x00d1f812},
+{14900000, DIF_BPF_COEFF2223, 0x043506e4},
+{14900000, DIF_BPF_COEFF2425, 0xf685fdfb},
+{14900000, DIF_BPF_COEFF2627, 0x0c43fa8d},
+{14900000, DIF_BPF_COEFF2829, 0xf5a10c83},
+{14900000, DIF_BPF_COEFF3031, 0x0399f046},
+{14900000, DIF_BPF_COEFF3233, 0x05c70d08},
+{14900000, DIF_BPF_COEFF3435, 0xf222faf3},
+{14900000, DIF_BPF_COEFF36, 0x110d0000},
+/* END - DIF BPF register values from 149_quant.dat*/
+
+
+/*case 15000000:*/
+/* BEGIN - DIF BPF register values from 150_quant.dat*/
+{15000000, DIF_BPF_COEFF01, 0x0000fffd},
+{15000000, DIF_BPF_COEFF23, 0x0003000f},
+{15000000, DIF_BPF_COEFF45, 0xffe5ffef},
+{15000000, DIF_BPF_COEFF67, 0x0057ffd9},
+{15000000, DIF_BPF_COEFF89, 0xff7000c4},
+{15000000, DIF_BPF_COEFF1011, 0x0062fe68},
+{15000000, DIF_BPF_COEFF1213, 0x009e01ff},
+{15000000, DIF_BPF_COEFF1415, 0xfd95fee6},
+{15000000, DIF_BPF_COEFF1617, 0x0435fe7d},
+{15000000, DIF_BPF_COEFF1819, 0xfb710530},
+{15000000, DIF_BPF_COEFF2021, 0x023cf7ee},
+{15000000, DIF_BPF_COEFF2223, 0x02c307ef},
+{15000000, DIF_BPF_COEFF2425, 0xf75efc70},
+{15000000, DIF_BPF_COEFF2627, 0x0c5cfbef},
+{15000000, DIF_BPF_COEFF2829, 0xf4c10bce},
+{15000000, DIF_BPF_COEFF3031, 0x04b3f03f},
+{15000000, DIF_BPF_COEFF3233, 0x05030d69},
+{15000000, DIF_BPF_COEFF3435, 0xf261fabf},
+{15000000, DIF_BPF_COEFF36, 0x110d0000},
+/* END - DIF BPF register values from 150_quant.dat*/
+
+
+/*case 15100000:*/
+/* BEGIN - DIF BPF register values from 151_quant.dat*/
+{15100000, DIF_BPF_COEFF01, 0x0000fffd},
+{15100000, DIF_BPF_COEFF23, 0xffff0012},
+{15100000, DIF_BPF_COEFF45, 0xffefffdc},
+{15100000, DIF_BPF_COEFF67, 0x00510006},
+{15100000, DIF_BPF_COEFF89, 0xff540089},
+{15100000, DIF_BPF_COEFF1011, 0x00befe7c},
+{15100000, DIF_BPF_COEFF1213, 0x00060253},
+{15100000, DIF_BPF_COEFF1415, 0xfe27fe0d},
+{15100000, DIF_BPF_COEFF1617, 0x0413ffa2},
+{15100000, DIF_BPF_COEFF1819, 0xfad10446},
+{15100000, DIF_BPF_COEFF2021, 0x0390f812},
+{15100000, DIF_BPF_COEFF2223, 0x013b08c3},
+{15100000, DIF_BPF_COEFF2425, 0xf868faf6},
+{15100000, DIF_BPF_COEFF2627, 0x0c43fd5f},
+{15100000, DIF_BPF_COEFF2829, 0xf3fd0b02},
+{15100000, DIF_BPF_COEFF3031, 0x05c7f046},
+{15100000, DIF_BPF_COEFF3233, 0x043b0dc4},
+{15100000, DIF_BPF_COEFF3435, 0xf2a1fa8b},
+{15100000, DIF_BPF_COEFF36, 0x110d0000},
+/* END - DIF BPF register values from 151_quant.dat*/
+
+
+/*case 15200000:*/
+/* BEGIN - DIF BPF register values from 152_quant.dat*/
+{15200000, DIF_BPF_COEFF01, 0x0001fffe},
+{15200000, DIF_BPF_COEFF23, 0xfffc0012},
+{15200000, DIF_BPF_COEFF45, 0xfffbffce},
+{15200000, DIF_BPF_COEFF67, 0x003f0033},
+{15200000, DIF_BPF_COEFF89, 0xff4e003f},
+{15200000, DIF_BPF_COEFF1011, 0x0106feb6},
+{15200000, DIF_BPF_COEFF1213, 0xff6e0276},
+{15200000, DIF_BPF_COEFF1415, 0xfeddfd56},
+{15200000, DIF_BPF_COEFF1617, 0x03b000cc},
+{15200000, DIF_BPF_COEFF1819, 0xfa740329},
+{15200000, DIF_BPF_COEFF2021, 0x04bff87f},
+{15200000, DIF_BPF_COEFF2223, 0xffaa095d},
+{15200000, DIF_BPF_COEFF2425, 0xf99ef995},
+{15200000, DIF_BPF_COEFF2627, 0x0bf9fed8},
+{15200000, DIF_BPF_COEFF2829, 0xf3590a1f},
+{15200000, DIF_BPF_COEFF3031, 0x06d2f05e},
+{15200000, DIF_BPF_COEFF3233, 0x03700e1b},
+{15200000, DIF_BPF_COEFF3435, 0xf2e4fa58},
+{15200000, DIF_BPF_COEFF36, 0x110d0000},
+/* END - DIF BPF register values from 152_quant.dat*/
+
+
+/*case 115300000:*/
+/* BEGIN - DIF BPF register values from 153_quant.dat*/
+{15300000, DIF_BPF_COEFF01, 0x0001ffff},
+{15300000, DIF_BPF_COEFF23, 0xfff9000f},
+{15300000, DIF_BPF_COEFF45, 0x0009ffc8},
+{15300000, DIF_BPF_COEFF67, 0x00250059},
+{15300000, DIF_BPF_COEFF89, 0xff5effee},
+{15300000, DIF_BPF_COEFF1011, 0x0132ff10},
+{15300000, DIF_BPF_COEFF1213, 0xfee30265},
+{15300000, DIF_BPF_COEFF1415, 0xffaafccf},
+{15300000, DIF_BPF_COEFF1617, 0x031101eb},
+{15300000, DIF_BPF_COEFF1819, 0xfa6001e8},
+{15300000, DIF_BPF_COEFF2021, 0x05bdf92f},
+{15300000, DIF_BPF_COEFF2223, 0xfe1b09b6},
+{15300000, DIF_BPF_COEFF2425, 0xfafaf852},
+{15300000, DIF_BPF_COEFF2627, 0x0b7e0055},
+{15300000, DIF_BPF_COEFF2829, 0xf2d50929},
+{15300000, DIF_BPF_COEFF3031, 0x07d3f086},
+{15300000, DIF_BPF_COEFF3233, 0x02a30e6c},
+{15300000, DIF_BPF_COEFF3435, 0xf329fa24},
+{15300000, DIF_BPF_COEFF36, 0x110d0000},
+/* END - DIF BPF register values from 153_quant.dat*/
+
+
+/*case 115400000:*/
+/* BEGIN - DIF BPF register values from 154_quant.dat*/
+{15400000, DIF_BPF_COEFF01, 0x00010001},
+{15400000, DIF_BPF_COEFF23, 0xfff80009},
+{15400000, DIF_BPF_COEFF45, 0x0015ffca},
+{15400000, DIF_BPF_COEFF67, 0x00050074},
+{15400000, DIF_BPF_COEFF89, 0xff81ff9f},
+{15400000, DIF_BPF_COEFF1011, 0x013dff82},
+{15400000, DIF_BPF_COEFF1213, 0xfe710221},
+{15400000, DIF_BPF_COEFF1415, 0x007cfc80},
+{15400000, DIF_BPF_COEFF1617, 0x024102ed},
+{15400000, DIF_BPF_COEFF1819, 0xfa940090},
+{15400000, DIF_BPF_COEFF2021, 0x0680fa1e},
+{15400000, DIF_BPF_COEFF2223, 0xfc9b09cd},
+{15400000, DIF_BPF_COEFF2425, 0xfc73f736},
+{15400000, DIF_BPF_COEFF2627, 0x0ad501d0},
+{15400000, DIF_BPF_COEFF2829, 0xf2740820},
+{15400000, DIF_BPF_COEFF3031, 0x08c9f0bd},
+{15400000, DIF_BPF_COEFF3233, 0x01d40eb9},
+{15400000, DIF_BPF_COEFF3435, 0xf371f9f1},
+{15400000, DIF_BPF_COEFF36, 0x110d0000},
+/* END - DIF BPF register values from 154_quant.dat*/
+
+
+/*case 115500000:*/
+/* BEGIN - DIF BPF register values from 155_quant.dat*/
+{15500000, DIF_BPF_COEFF01, 0x00000002},
+{15500000, DIF_BPF_COEFF23, 0xfff80002},
+{15500000, DIF_BPF_COEFF45, 0x001effd5},
+{15500000, DIF_BPF_COEFF67, 0xffe5007f},
+{15500000, DIF_BPF_COEFF89, 0xffb4ff5b},
+{15500000, DIF_BPF_COEFF1011, 0x01280000},
+{15500000, DIF_BPF_COEFF1213, 0xfe2401b0},
+{15500000, DIF_BPF_COEFF1415, 0x0146fc70},
+{15500000, DIF_BPF_COEFF1617, 0x014d03c6},
+{15500000, DIF_BPF_COEFF1819, 0xfb10ff32},
+{15500000, DIF_BPF_COEFF2021, 0x0701fb41},
+{15500000, DIF_BPF_COEFF2223, 0xfb3709a1},
+{15500000, DIF_BPF_COEFF2425, 0xfe00f644},
+{15500000, DIF_BPF_COEFF2627, 0x0a000345},
+{15500000, DIF_BPF_COEFF2829, 0xf2350708},
+{15500000, DIF_BPF_COEFF3031, 0x09b2f104},
+{15500000, DIF_BPF_COEFF3233, 0x01050eff},
+{15500000, DIF_BPF_COEFF3435, 0xf3baf9be},
+{15500000, DIF_BPF_COEFF36, 0x110d0000},
+/* END - DIF BPF register values from 155_quant.dat*/
+
+
+/*case 115600000:*/
+/* BEGIN - DIF BPF register values from 156_quant.dat*/
+{15600000, DIF_BPF_COEFF01, 0x00000003},
+{15600000, DIF_BPF_COEFF23, 0xfff9fffb},
+{15600000, DIF_BPF_COEFF45, 0x0022ffe6},
+{15600000, DIF_BPF_COEFF67, 0xffc9007a},
+{15600000, DIF_BPF_COEFF89, 0xfff0ff29},
+{15600000, DIF_BPF_COEFF1011, 0x00f2007e},
+{15600000, DIF_BPF_COEFF1213, 0xfe01011b},
+{15600000, DIF_BPF_COEFF1415, 0x01f6fc9e},
+{15600000, DIF_BPF_COEFF1617, 0x00440467},
+{15600000, DIF_BPF_COEFF1819, 0xfbccfdde},
+{15600000, DIF_BPF_COEFF2021, 0x0738fc90},
+{15600000, DIF_BPF_COEFF2223, 0xf9f70934},
+{15600000, DIF_BPF_COEFF2425, 0xff99f582},
+{15600000, DIF_BPF_COEFF2627, 0x090204b0},
+{15600000, DIF_BPF_COEFF2829, 0xf21a05e1},
+{15600000, DIF_BPF_COEFF3031, 0x0a8df15a},
+{15600000, DIF_BPF_COEFF3233, 0x00340f41},
+{15600000, DIF_BPF_COEFF3435, 0xf405f98b},
+{15600000, DIF_BPF_COEFF36, 0x110d0000},
+/* END - DIF BPF register values from 156_quant.dat*/
+
+
+/*case 115700000:*/
+/* BEGIN - DIF BPF register values from 157_quant.dat*/
+{15700000, DIF_BPF_COEFF01, 0x00000003},
+{15700000, DIF_BPF_COEFF23, 0xfffcfff4},
+{15700000, DIF_BPF_COEFF45, 0x0020fffa},
+{15700000, DIF_BPF_COEFF67, 0xffb40064},
+{15700000, DIF_BPF_COEFF89, 0x002fff11},
+{15700000, DIF_BPF_COEFF1011, 0x00a400f0},
+{15700000, DIF_BPF_COEFF1213, 0xfe0d006e},
+{15700000, DIF_BPF_COEFF1415, 0x0281fd09},
+{15700000, DIF_BPF_COEFF1617, 0xff3604c9},
+{15700000, DIF_BPF_COEFF1819, 0xfcbffca2},
+{15700000, DIF_BPF_COEFF2021, 0x0726fdfe},
+{15700000, DIF_BPF_COEFF2223, 0xf8e80888},
+{15700000, DIF_BPF_COEFF2425, 0x0134f4f3},
+{15700000, DIF_BPF_COEFF2627, 0x07e1060c},
+{15700000, DIF_BPF_COEFF2829, 0xf22304af},
+{15700000, DIF_BPF_COEFF3031, 0x0b59f1be},
+{15700000, DIF_BPF_COEFF3233, 0xff640f7d},
+{15700000, DIF_BPF_COEFF3435, 0xf452f959},
+{15700000, DIF_BPF_COEFF36, 0x110d0000},
+/* END - DIF BPF register values from 157_quant.dat*/
+
+
+/*case 115800000:*/
+/* BEGIN - DIF BPF register values from 158_quant.dat*/
+{15800000, DIF_BPF_COEFF01, 0x00000003},
+{15800000, DIF_BPF_COEFF23, 0x0000fff0},
+{15800000, DIF_BPF_COEFF45, 0x001a0010},
+{15800000, DIF_BPF_COEFF67, 0xffaa0041},
+{15800000, DIF_BPF_COEFF89, 0x0067ff13},
+{15800000, DIF_BPF_COEFF1011, 0x0043014a},
+{15800000, DIF_BPF_COEFF1213, 0xfe46ffb9},
+{15800000, DIF_BPF_COEFF1415, 0x02dbfda8},
+{15800000, DIF_BPF_COEFF1617, 0xfe3504e5},
+{15800000, DIF_BPF_COEFF1819, 0xfddcfb8d},
+{15800000, DIF_BPF_COEFF2021, 0x06c9ff7e},
+{15800000, DIF_BPF_COEFF2223, 0xf81107a2},
+{15800000, DIF_BPF_COEFF2425, 0x02c9f49a},
+{15800000, DIF_BPF_COEFF2627, 0x069f0753},
+{15800000, DIF_BPF_COEFF2829, 0xf2500373},
+{15800000, DIF_BPF_COEFF3031, 0x0c14f231},
+{15800000, DIF_BPF_COEFF3233, 0xfe930fb3},
+{15800000, DIF_BPF_COEFF3435, 0xf4a1f927},
+{15800000, DIF_BPF_COEFF36, 0x110d0000},
+/* END - DIF BPF register values from 158_quant.dat*/
+
+
+/*case 115900000:*/
+/* BEGIN - DIF BPF register values from 159_quant.dat*/
+{15900000, DIF_BPF_COEFF01, 0xffff0002},
+{15900000, DIF_BPF_COEFF23, 0x0003ffee},
+{15900000, DIF_BPF_COEFF45, 0x000f0023},
+{15900000, DIF_BPF_COEFF67, 0xffac0016},
+{15900000, DIF_BPF_COEFF89, 0x0093ff31},
+{15900000, DIF_BPF_COEFF1011, 0xffdc0184},
+{15900000, DIF_BPF_COEFF1213, 0xfea6ff09},
+{15900000, DIF_BPF_COEFF1415, 0x02fdfe70},
+{15900000, DIF_BPF_COEFF1617, 0xfd5104ba},
+{15900000, DIF_BPF_COEFF1819, 0xff15faac},
+{15900000, DIF_BPF_COEFF2021, 0x06270103},
+{15900000, DIF_BPF_COEFF2223, 0xf7780688},
+{15900000, DIF_BPF_COEFF2425, 0x044df479},
+{15900000, DIF_BPF_COEFF2627, 0x05430883},
+{15900000, DIF_BPF_COEFF2829, 0xf2a00231},
+{15900000, DIF_BPF_COEFF3031, 0x0cbef2b2},
+{15900000, DIF_BPF_COEFF3233, 0xfdc40fe3},
+{15900000, DIF_BPF_COEFF3435, 0xf4f2f8f5},
+{15900000, DIF_BPF_COEFF36, 0x110d0000},
+/* END - DIF BPF register values from 159_quant.dat*/
+
+
+/*case 116000000:*/
+/* BEGIN - DIF BPF register values from 160_quant.dat*/
+{16000000, DIF_BPF_COEFF01, 0xffff0001},
+{16000000, DIF_BPF_COEFF23, 0x0006ffef},
+{16000000, DIF_BPF_COEFF45, 0x00020031},
+{16000000, DIF_BPF_COEFF67, 0xffbaffe8},
+{16000000, DIF_BPF_COEFF89, 0x00adff66},
+{16000000, DIF_BPF_COEFF1011, 0xff790198},
+{16000000, DIF_BPF_COEFF1213, 0xff26fe6e},
+{16000000, DIF_BPF_COEFF1415, 0x02e5ff55},
+{16000000, DIF_BPF_COEFF1617, 0xfc99044a},
+{16000000, DIF_BPF_COEFF1819, 0x005bfa09},
+{16000000, DIF_BPF_COEFF2021, 0x0545027f},
+{16000000, DIF_BPF_COEFF2223, 0xf7230541},
+{16000000, DIF_BPF_COEFF2425, 0x05b8f490},
+{16000000, DIF_BPF_COEFF2627, 0x03d20997},
+{16000000, DIF_BPF_COEFF2829, 0xf31300eb},
+{16000000, DIF_BPF_COEFF3031, 0x0d55f341},
+{16000000, DIF_BPF_COEFF3233, 0xfcf6100e},
+{16000000, DIF_BPF_COEFF3435, 0xf544f8c3},
+{16000000, DIF_BPF_COEFF36, 0x110d0000},
+/* END - DIF BPF register values from 160_quant.dat*/
+};
+
+#endif
diff --git a/drivers/media/video/cx231xx/cx231xx-dvb.c b/drivers/media/video/cx231xx/cx231xx-dvb.c
index 4ea3776b39fb..5feb3ee640d9 100644
--- a/drivers/media/video/cx231xx/cx231xx-dvb.c
+++ b/drivers/media/video/cx231xx/cx231xx-dvb.c
@@ -29,6 +29,10 @@
#include "xc5000.h"
#include "dvb_dummy_fe.h"
+#include "s5h1432.h"
+#include "tda18271.h"
+#include "s5h1411.h"
+#include "lgdt3305.h"
MODULE_DESCRIPTION("driver for cx231xx based DVB cards");
MODULE_AUTHOR("Srinivasa Deevi <srinivasa.deevi@conexant.com>");
@@ -65,6 +69,72 @@ struct cx231xx_dvb {
struct dvb_net net;
};
+static struct s5h1432_config dvico_s5h1432_config = {
+ .output_mode = S5H1432_SERIAL_OUTPUT,
+ .gpio = S5H1432_GPIO_ON,
+ .qam_if = S5H1432_IF_4000,
+ .vsb_if = S5H1432_IF_4000,
+ .inversion = S5H1432_INVERSION_OFF,
+ .status_mode = S5H1432_DEMODLOCKING,
+ .mpeg_timing = S5H1432_MPEGTIMING_CONTINOUS_NONINVERTING_CLOCK,
+};
+
+static struct tda18271_std_map cnxt_rde253s_tda18271_std_map = {
+ .dvbt_6 = { .if_freq = 4000, .agc_mode = 3, .std = 4,
+ .if_lvl = 1, .rfagc_top = 0x37, },
+ .dvbt_7 = { .if_freq = 4000, .agc_mode = 3, .std = 5,
+ .if_lvl = 1, .rfagc_top = 0x37, },
+ .dvbt_8 = { .if_freq = 4000, .agc_mode = 3, .std = 6,
+ .if_lvl = 1, .rfagc_top = 0x37, },
+};
+
+static struct tda18271_config cnxt_rde253s_tunerconfig = {
+ .std_map = &cnxt_rde253s_tda18271_std_map,
+ .gate = TDA18271_GATE_ANALOG,
+};
+
+static struct s5h1411_config tda18271_s5h1411_config = {
+ .output_mode = S5H1411_SERIAL_OUTPUT,
+ .gpio = S5H1411_GPIO_OFF,
+ .vsb_if = S5H1411_IF_3250,
+ .qam_if = S5H1411_IF_4000,
+ .inversion = S5H1411_INVERSION_ON,
+ .status_mode = S5H1411_DEMODLOCKING,
+ .mpeg_timing = S5H1411_MPEGTIMING_CONTINOUS_NONINVERTING_CLOCK,
+};
+static struct s5h1411_config xc5000_s5h1411_config = {
+ .output_mode = S5H1411_SERIAL_OUTPUT,
+ .gpio = S5H1411_GPIO_OFF,
+ .vsb_if = S5H1411_IF_3250,
+ .qam_if = S5H1411_IF_3250,
+ .inversion = S5H1411_INVERSION_OFF,
+ .status_mode = S5H1411_DEMODLOCKING,
+ .mpeg_timing = S5H1411_MPEGTIMING_CONTINOUS_NONINVERTING_CLOCK,
+};
+
+static struct lgdt3305_config hcw_lgdt3305_config = {
+ .i2c_addr = 0x0e,
+ .mpeg_mode = LGDT3305_MPEG_SERIAL,
+ .tpclk_edge = LGDT3305_TPCLK_FALLING_EDGE,
+ .tpvalid_polarity = LGDT3305_TP_VALID_HIGH,
+ .deny_i2c_rptr = 1,
+ .spectral_inversion = 1,
+ .qam_if_khz = 4000,
+ .vsb_if_khz = 3250,
+};
+
+static struct tda18271_std_map hauppauge_tda18271_std_map = {
+ .atsc_6 = { .if_freq = 3250, .agc_mode = 3, .std = 4,
+ .if_lvl = 1, .rfagc_top = 0x58, },
+ .qam_6 = { .if_freq = 4000, .agc_mode = 3, .std = 5,
+ .if_lvl = 1, .rfagc_top = 0x58, },
+};
+
+static struct tda18271_config hcw_tda18271_config = {
+ .std_map = &hauppauge_tda18271_std_map,
+ .gate = TDA18271_GATE_DIGITAL,
+};
+
static inline void print_err_status(struct cx231xx *dev, int packet, int status)
{
char *errmsg = "Unknown";
@@ -128,11 +198,33 @@ static inline int dvb_isoc_copy(struct cx231xx *dev, struct urb *urb)
continue;
}
- dvb_dmx_swfilter(&dev->dvb->demux, urb->transfer_buffer +
- urb->iso_frame_desc[i].offset,
- urb->iso_frame_desc[i].actual_length);
+ dvb_dmx_swfilter(&dev->dvb->demux,
+ urb->transfer_buffer +
+ urb->iso_frame_desc[i].offset,
+ urb->iso_frame_desc[i].actual_length);
+ }
+
+ return 0;
+}
+
+static inline int dvb_bulk_copy(struct cx231xx *dev, struct urb *urb)
+{
+ if (!dev)
+ return 0;
+
+ if ((dev->state & DEV_DISCONNECTED) || (dev->state & DEV_MISCONFIGURED))
+ return 0;
+
+ if (urb->status < 0) {
+ print_err_status(dev, -1, urb->status);
+ if (urb->status == -ENOENT)
+ return 0;
}
+ /* Feed the transport payload into the kernel demux */
+ dvb_dmx_swfilter(&dev->dvb->demux,
+ urb->transfer_buffer, urb->actual_length);
+
return 0;
}
@@ -141,21 +233,44 @@ static int start_streaming(struct cx231xx_dvb *dvb)
int rc;
struct cx231xx *dev = dvb->adapter.priv;
- usb_set_interface(dev->udev, 0, 1);
- rc = cx231xx_set_mode(dev, CX231XX_DIGITAL_MODE);
- if (rc < 0)
- return rc;
+ if (dev->USE_ISO) {
+ cx231xx_info("DVB transfer mode is ISO.\n");
+ mutex_lock(&dev->i2c_lock);
+ cx231xx_enable_i2c_port_3(dev, false);
+ cx231xx_set_alt_setting(dev, INDEX_TS1, 4);
+ cx231xx_enable_i2c_port_3(dev, true);
+ mutex_unlock(&dev->i2c_lock);
+ rc = cx231xx_set_mode(dev, CX231XX_DIGITAL_MODE);
+ if (rc < 0)
+ return rc;
+ dev->mode_tv = 1;
+ return cx231xx_init_isoc(dev, CX231XX_DVB_MAX_PACKETS,
+ CX231XX_DVB_NUM_BUFS,
+ dev->ts1_mode.max_pkt_size,
+ dvb_isoc_copy);
+ } else {
+ cx231xx_info("DVB transfer mode is BULK.\n");
+ cx231xx_set_alt_setting(dev, INDEX_TS1, 0);
+ rc = cx231xx_set_mode(dev, CX231XX_DIGITAL_MODE);
+ if (rc < 0)
+ return rc;
+ dev->mode_tv = 1;
+ return cx231xx_init_bulk(dev, CX231XX_DVB_MAX_PACKETS,
+ CX231XX_DVB_NUM_BUFS,
+ dev->ts1_mode.max_pkt_size,
+ dvb_bulk_copy);
+ }
- return cx231xx_init_isoc(dev, CX231XX_DVB_MAX_PACKETS,
- CX231XX_DVB_NUM_BUFS,
- CX231XX_DVB_MAX_PACKETSIZE, dvb_isoc_copy);
}
static int stop_streaming(struct cx231xx_dvb *dvb)
{
struct cx231xx *dev = dvb->adapter.priv;
- cx231xx_uninit_isoc(dev);
+ if (dev->USE_ISO)
+ cx231xx_uninit_isoc(dev);
+ else
+ cx231xx_uninit_bulk(dev);
cx231xx_set_mode(dev, CX231XX_SUSPEND);
@@ -216,7 +331,11 @@ static int cx231xx_dvb_bus_ctrl(struct dvb_frontend *fe, int acquire)
static struct xc5000_config cnxt_rde250_tunerconfig = {
.i2c_address = 0x61,
- .if_khz = 5380,
+ .if_khz = 4000,
+};
+static struct xc5000_config cnxt_rdu250_tunerconfig = {
+ .i2c_address = 0x61,
+ .if_khz = 3250,
};
/* ------------------------------------------------------------------ */
@@ -228,7 +347,7 @@ static int attach_xc5000(u8 addr, struct cx231xx *dev)
struct xc5000_config cfg;
memset(&cfg, 0, sizeof(cfg));
- cfg.i2c_adap = &dev->i2c_bus[1].i2c_adap;
+ cfg.i2c_adap = &dev->i2c_bus[dev->board.tuner_i2c_master].i2c_adap;
cfg.i2c_addr = addr;
if (!dev->dvb->frontend) {
@@ -268,7 +387,6 @@ int cx231xx_set_analog_freq(struct cx231xx *dev, u32 freq)
/*params.audmode = ; */
/* Set the analog parameters to set the frequency */
- cx231xx_info("Setting Frequency for XC5000\n");
dops->set_analog_params(dev->dvb->frontend, &params);
}
@@ -445,19 +563,21 @@ static int dvb_init(struct cx231xx *dev)
dev->cx231xx_set_analog_freq = cx231xx_set_analog_freq;
dev->cx231xx_reset_analog_tuner = cx231xx_reset_analog_tuner;
+ mutex_lock(&dev->lock);
cx231xx_set_mode(dev, CX231XX_DIGITAL_MODE);
+ cx231xx_demod_reset(dev);
/* init frontend */
switch (dev->model) {
+ case CX231XX_BOARD_CNXT_CARRAERA:
case CX231XX_BOARD_CNXT_RDE_250:
- /* dev->dvb->frontend = dvb_attach(s5h1411_attach,
- &dvico_s5h1411_config,
- &dev->i2c_bus[1].i2c_adap); */
- dev->dvb->frontend = dvb_attach(dvb_dummy_fe_ofdm_attach);
+ dev->dvb->frontend = dvb_attach(s5h1432_attach,
+ &dvico_s5h1432_config,
+ &dev->i2c_bus[dev->board.demod_i2c_master].i2c_adap);
if (dev->dvb->frontend == NULL) {
printk(DRIVER_NAME
- ": Failed to attach dummy front end\n");
+ ": Failed to attach s5h1432 front end\n");
result = -EINVAL;
goto out_free;
}
@@ -466,16 +586,19 @@ static int dvb_init(struct cx231xx *dev)
dvb->frontend->callback = cx231xx_tuner_callback;
if (!dvb_attach(xc5000_attach, dev->dvb->frontend,
- &dev->i2c_bus[1].i2c_adap,
+ &dev->i2c_bus[dev->board.tuner_i2c_master].i2c_adap,
&cnxt_rde250_tunerconfig)) {
result = -EINVAL;
goto out_free;
}
break;
+ case CX231XX_BOARD_CNXT_SHELBY:
case CX231XX_BOARD_CNXT_RDU_250:
- dev->dvb->frontend = dvb_attach(dvb_dummy_fe_ofdm_attach);
+ dev->dvb->frontend = dvb_attach(s5h1411_attach,
+ &xc5000_s5h1411_config,
+ &dev->i2c_bus[dev->board.demod_i2c_master].i2c_adap);
if (dev->dvb->frontend == NULL) {
printk(DRIVER_NAME
@@ -488,12 +611,82 @@ static int dvb_init(struct cx231xx *dev)
dvb->frontend->callback = cx231xx_tuner_callback;
if (!dvb_attach(xc5000_attach, dev->dvb->frontend,
- &dev->i2c_bus[1].i2c_adap,
- &cnxt_rde250_tunerconfig)) {
+ &dev->i2c_bus[dev->board.tuner_i2c_master].i2c_adap,
+ &cnxt_rdu250_tunerconfig)) {
+ result = -EINVAL;
+ goto out_free;
+ }
+ break;
+ case CX231XX_BOARD_CNXT_RDE_253S:
+
+ dev->dvb->frontend = dvb_attach(s5h1432_attach,
+ &dvico_s5h1432_config,
+ &dev->i2c_bus[dev->board.demod_i2c_master].i2c_adap);
+
+ if (dev->dvb->frontend == NULL) {
+ printk(DRIVER_NAME
+ ": Failed to attach s5h1432 front end\n");
+ result = -EINVAL;
+ goto out_free;
+ }
+
+ /* define general-purpose callback pointer */
+ dvb->frontend->callback = cx231xx_tuner_callback;
+
+ if (!dvb_attach(tda18271_attach, dev->dvb->frontend,
+ 0x60, &dev->i2c_bus[dev->board.tuner_i2c_master].i2c_adap,
+ &cnxt_rde253s_tunerconfig)) {
+ result = -EINVAL;
+ goto out_free;
+ }
+ break;
+ case CX231XX_BOARD_CNXT_RDU_253S:
+
+ dev->dvb->frontend = dvb_attach(s5h1411_attach,
+ &tda18271_s5h1411_config,
+ &dev->i2c_bus[dev->board.demod_i2c_master].i2c_adap);
+
+ if (dev->dvb->frontend == NULL) {
+ printk(DRIVER_NAME
+ ": Failed to attach dummy front end\n");
+ result = -EINVAL;
+ goto out_free;
+ }
+
+ /* define general-purpose callback pointer */
+ dvb->frontend->callback = cx231xx_tuner_callback;
+
+ if (!dvb_attach(tda18271_attach, dev->dvb->frontend,
+ 0x60, &dev->i2c_bus[dev->board.tuner_i2c_master].i2c_adap,
+ &cnxt_rde253s_tunerconfig)) {
result = -EINVAL;
goto out_free;
}
break;
+ case CX231XX_BOARD_HAUPPAUGE_EXETER:
+
+ printk(KERN_INFO "%s: looking for tuner / demod on i2c bus: %d\n",
+ __func__, i2c_adapter_id(&dev->i2c_bus[dev->board.tuner_i2c_master].i2c_adap));
+
+ dev->dvb->frontend = dvb_attach(lgdt3305_attach,
+ &hcw_lgdt3305_config,
+ &dev->i2c_bus[dev->board.tuner_i2c_master].i2c_adap);
+
+ if (dev->dvb->frontend == NULL) {
+ printk(DRIVER_NAME
+ ": Failed to attach LG3305 front end\n");
+ result = -EINVAL;
+ goto out_free;
+ }
+
+ /* define general-purpose callback pointer */
+ dvb->frontend->callback = cx231xx_tuner_callback;
+
+ dvb_attach(tda18271_attach, dev->dvb->frontend,
+ 0x60, &dev->i2c_bus[dev->board.tuner_i2c_master].i2c_adap,
+ &hcw_tda18271_config);
+ break;
+
default:
printk(KERN_ERR "%s/2: The frontend of your DVB/ATSC card"
@@ -513,15 +706,18 @@ static int dvb_init(struct cx231xx *dev)
if (result < 0)
goto out_free;
- cx231xx_set_mode(dev, CX231XX_SUSPEND);
+
printk(KERN_INFO "Successfully loaded cx231xx-dvb\n");
- return 0;
-out_free:
+ret:
cx231xx_set_mode(dev, CX231XX_SUSPEND);
+ mutex_unlock(&dev->lock);
+ return result;
+
+out_free:
kfree(dvb);
dev->dvb = NULL;
- return result;
+ goto ret;
}
static int dvb_fini(struct cx231xx *dev)
diff --git a/drivers/media/video/cx231xx/cx231xx-i2c.c b/drivers/media/video/cx231xx/cx231xx-i2c.c
index 58d9cc0867b9..835670623dfb 100644
--- a/drivers/media/video/cx231xx/cx231xx-i2c.c
+++ b/drivers/media/video/cx231xx/cx231xx-i2c.c
@@ -359,7 +359,7 @@ static int cx231xx_i2c_xfer(struct i2c_adapter *i2c_adap,
if (num <= 0)
return 0;
-
+ mutex_lock(&dev->i2c_lock);
for (i = 0; i < num; i++) {
addr = msgs[i].addr >> 1;
@@ -372,6 +372,7 @@ static int cx231xx_i2c_xfer(struct i2c_adapter *i2c_adap,
rc = cx231xx_i2c_check_for_device(i2c_adap, &msgs[i]);
if (rc < 0) {
dprintk2(2, " no device\n");
+ mutex_unlock(&dev->i2c_lock);
return rc;
}
@@ -384,7 +385,7 @@ static int cx231xx_i2c_xfer(struct i2c_adapter *i2c_adap,
}
} else if (i + 1 < num && (msgs[i + 1].flags & I2C_M_RD) &&
msgs[i].addr == msgs[i + 1].addr
- && (msgs[i].len <= 2) && (bus->nr < 2)) {
+ && (msgs[i].len <= 2) && (bus->nr < 3)) {
/* read bytes */
rc = cx231xx_i2c_recv_bytes_with_saddr(i2c_adap,
&msgs[i],
@@ -407,10 +408,11 @@ static int cx231xx_i2c_xfer(struct i2c_adapter *i2c_adap,
if (i2c_debug >= 2)
printk("\n");
}
-
+ mutex_unlock(&dev->i2c_lock);
return num;
err:
dprintk2(2, " ERROR: %i\n", rc);
+ mutex_unlock(&dev->i2c_lock);
return rc;
}
@@ -507,9 +509,6 @@ int cx231xx_i2c_register(struct cx231xx_i2c *bus)
if (0 == bus->i2c_rc) {
if (i2c_scan)
cx231xx_do_i2c_scan(dev, &bus->i2c_client);
-
- /* Instantiate the IR receiver device, if present */
- cx231xx_register_i2c_ir(dev);
} else
cx231xx_warn("%s: i2c bus %d register FAILED\n",
dev->name, bus->nr);
diff --git a/drivers/media/video/cx231xx/cx231xx-input.c b/drivers/media/video/cx231xx/cx231xx-input.c
deleted file mode 100644
index fd099153b746..000000000000
--- a/drivers/media/video/cx231xx/cx231xx-input.c
+++ /dev/null
@@ -1,222 +0,0 @@
-/*
- handle cx231xx IR remotes via linux kernel input layer.
-
- Copyright (C) 2008 <srinivasa.deevi at conexant dot com>
- Based on em28xx driver
-
- < This is a place holder for IR now.>
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- */
-
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/delay.h>
-#include <linux/interrupt.h>
-#include <linux/input.h>
-#include <linux/usb.h>
-#include <linux/slab.h>
-
-#include "cx231xx.h"
-
-static unsigned int ir_debug;
-module_param(ir_debug, int, 0644);
-MODULE_PARM_DESC(ir_debug, "enable debug messages [IR]");
-
-#define MODULE_NAME "cx231xx"
-
-#define i2cdprintk(fmt, arg...) \
- if (ir_debug) { \
- printk(KERN_DEBUG "%s/ir: " fmt, ir->name , ## arg); \
- }
-
-#define dprintk(fmt, arg...) \
- if (ir_debug) { \
- printk(KERN_DEBUG "%s/ir: " fmt, ir->name , ## arg); \
- }
-
-/**********************************************************
- Polling structure used by cx231xx IR's
- **********************************************************/
-
-struct cx231xx_ir_poll_result {
- unsigned int toggle_bit:1;
- unsigned int read_count:7;
- u8 rc_address;
- u8 rc_data[4];
-};
-
-struct cx231xx_IR {
- struct cx231xx *dev;
- struct input_dev *input;
- char name[32];
- char phys[32];
-
- /* poll external decoder */
- int polling;
- struct work_struct work;
- struct timer_list timer;
- unsigned int last_readcount;
-
- int (*get_key) (struct cx231xx_IR *, struct cx231xx_ir_poll_result *);
-};
-
-/**********************************************************
- Polling code for cx231xx
- **********************************************************/
-
-static void cx231xx_ir_handle_key(struct cx231xx_IR *ir)
-{
- int result;
- struct cx231xx_ir_poll_result poll_result;
-
- /* read the registers containing the IR status */
- result = ir->get_key(ir, &poll_result);
- if (result < 0) {
- dprintk("ir->get_key() failed %d\n", result);
- return;
- }
-
- dprintk("ir->get_key result tb=%02x rc=%02x lr=%02x data=%02x\n",
- poll_result.toggle_bit, poll_result.read_count,
- ir->last_readcount, poll_result.rc_data[0]);
-
- if (poll_result.read_count > 0 &&
- poll_result.read_count != ir->last_readcount)
- ir_keydown(ir->input,
- poll_result.rc_data[0],
- poll_result.toggle_bit);
-
- if (ir->dev->chip_id == CHIP_ID_EM2874)
- /* The em2874 clears the readcount field every time the
- register is read. The em2860/2880 datasheet says that it
- is supposed to clear the readcount, but it doesn't. So with
- the em2874, we are looking for a non-zero read count as
- opposed to a readcount that is incrementing */
- ir->last_readcount = 0;
- else
- ir->last_readcount = poll_result.read_count;
-
- }
-}
-
-static void ir_timer(unsigned long data)
-{
- struct cx231xx_IR *ir = (struct cx231xx_IR *)data;
-
- schedule_work(&ir->work);
-}
-
-static void cx231xx_ir_work(struct work_struct *work)
-{
- struct cx231xx_IR *ir = container_of(work, struct cx231xx_IR, work);
-
- cx231xx_ir_handle_key(ir);
- mod_timer(&ir->timer, jiffies + msecs_to_jiffies(ir->polling));
-}
-
-void cx231xx_ir_start(struct cx231xx_IR *ir)
-{
- setup_timer(&ir->timer, ir_timer, (unsigned long)ir);
- INIT_WORK(&ir->work, cx231xx_ir_work);
- schedule_work(&ir->work);
-}
-
-static void cx231xx_ir_stop(struct cx231xx_IR *ir)
-{
- del_timer_sync(&ir->timer);
- flush_scheduled_work();
-}
-
-int cx231xx_ir_init(struct cx231xx *dev)
-{
- struct cx231xx_IR *ir;
- struct input_dev *input_dev;
- u8 ir_config;
- int err = -ENOMEM;
-
- if (dev->board.ir_codes == NULL) {
- /* No remote control support */
- return 0;
- }
-
- ir = kzalloc(sizeof(*ir), GFP_KERNEL);
- input_dev = input_allocate_device();
- if (!ir || !input_dev)
- goto err_out_free;
-
- ir->input = input_dev;
-
- /* Setup the proper handler based on the chip */
- switch (dev->chip_id) {
- default:
- printk("Unrecognized cx231xx chip id: IR not supported\n");
- goto err_out_free;
- }
-
- /* This is how often we ask the chip for IR information */
- ir->polling = 100; /* ms */
-
- /* init input device */
- snprintf(ir->name, sizeof(ir->name), "cx231xx IR (%s)", dev->name);
-
- usb_make_path(dev->udev, ir->phys, sizeof(ir->phys));
- strlcat(ir->phys, "/input0", sizeof(ir->phys));
-
- input_dev->name = ir->name;
- input_dev->phys = ir->phys;
- input_dev->id.bustype = BUS_USB;
- input_dev->id.version = 1;
- input_dev->id.vendor = le16_to_cpu(dev->udev->descriptor.idVendor);
- input_dev->id.product = le16_to_cpu(dev->udev->descriptor.idProduct);
-
- input_dev->dev.parent = &dev->udev->dev;
- /* record handles to ourself */
- ir->dev = dev;
- dev->ir = ir;
-
- cx231xx_ir_start(ir);
-
- /* all done */
- err = __ir_input_register(ir->input, dev->board.ir_codes,
- NULL, MODULE_NAME);
- if (err)
- goto err_out_stop;
-
- return 0;
-err_out_stop:
- cx231xx_ir_stop(ir);
- dev->ir = NULL;
-err_out_free:
- kfree(ir);
- return err;
-}
-
-int cx231xx_ir_fini(struct cx231xx *dev)
-{
- struct cx231xx_IR *ir = dev->ir;
-
- /* skip detach on non attached boards */
- if (!ir)
- return 0;
-
- cx231xx_ir_stop(ir);
- ir_input_unregister(ir->input);
- kfree(ir);
-
- /* done */
- dev->ir = NULL;
- return 0;
-}
diff --git a/drivers/media/video/cx231xx/cx231xx-vbi.c b/drivers/media/video/cx231xx/cx231xx-vbi.c
index 689c5e25776c..1d914488dbb3 100644
--- a/drivers/media/video/cx231xx/cx231xx-vbi.c
+++ b/drivers/media/video/cx231xx/cx231xx-vbi.c
@@ -102,7 +102,7 @@ static inline int cx231xx_isoc_vbi_copy(struct cx231xx *dev, struct urb *urb)
return 0;
}
- buf = dev->vbi_mode.isoc_ctl.buf;
+ buf = dev->vbi_mode.bulk_ctl.buf;
/* get buffer pointer and length */
p_buffer = urb->transfer_buffer;
@@ -180,7 +180,7 @@ vbi_buffer_setup(struct videobuf_queue *vq, unsigned int *count,
height = ((dev->norm & V4L2_STD_625_50) ?
PAL_VBI_LINES : NTSC_VBI_LINES);
- *size = (dev->width * height * 2);
+ *size = (dev->width * height * 2 * 2);
if (0 == *count)
*count = CX231XX_DEF_VBI_BUF;
@@ -209,8 +209,8 @@ static void free_buffer(struct videobuf_queue *vq, struct cx231xx_buffer *buf)
VIDEOBUF_ACTIVE, it won't be, though.
*/
spin_lock_irqsave(&dev->vbi_mode.slock, flags);
- if (dev->vbi_mode.isoc_ctl.buf == buf)
- dev->vbi_mode.isoc_ctl.buf = NULL;
+ if (dev->vbi_mode.bulk_ctl.buf == buf)
+ dev->vbi_mode.bulk_ctl.buf = NULL;
spin_unlock_irqrestore(&dev->vbi_mode.slock, flags);
videobuf_vmalloc_free(&buf->vb);
@@ -230,7 +230,7 @@ vbi_buffer_prepare(struct videobuf_queue *vq, struct videobuf_buffer *vb,
height = ((dev->norm & V4L2_STD_625_50) ?
PAL_VBI_LINES : NTSC_VBI_LINES);
- buf->vb.size = ((dev->width << 1) * height);
+ buf->vb.size = ((dev->width << 1) * height * 2);
if (0 != buf->vb.baddr && buf->vb.bsize < buf->vb.size)
return -EINVAL;
@@ -246,7 +246,7 @@ vbi_buffer_prepare(struct videobuf_queue *vq, struct videobuf_buffer *vb,
goto fail;
}
- if (!dev->vbi_mode.isoc_ctl.num_bufs)
+ if (!dev->vbi_mode.bulk_ctl.num_bufs)
urb_init = 1;
if (urb_init) {
@@ -328,7 +328,7 @@ static void cx231xx_irq_vbi_callback(struct urb *urb)
/* Copy data from URB */
spin_lock(&dev->vbi_mode.slock);
- rc = dev->vbi_mode.isoc_ctl.isoc_copy(dev, urb);
+ rc = dev->vbi_mode.bulk_ctl.bulk_copy(dev, urb);
spin_unlock(&dev->vbi_mode.slock);
/* Reset status */
@@ -351,34 +351,34 @@ void cx231xx_uninit_vbi_isoc(struct cx231xx *dev)
cx231xx_info(DRIVER_NAME "cx231xx: called cx231xx_uninit_vbi_isoc\n");
- dev->vbi_mode.isoc_ctl.nfields = -1;
- for (i = 0; i < dev->vbi_mode.isoc_ctl.num_bufs; i++) {
- urb = dev->vbi_mode.isoc_ctl.urb[i];
+ dev->vbi_mode.bulk_ctl.nfields = -1;
+ for (i = 0; i < dev->vbi_mode.bulk_ctl.num_bufs; i++) {
+ urb = dev->vbi_mode.bulk_ctl.urb[i];
if (urb) {
if (!irqs_disabled())
usb_kill_urb(urb);
else
usb_unlink_urb(urb);
- if (dev->vbi_mode.isoc_ctl.transfer_buffer[i]) {
+ if (dev->vbi_mode.bulk_ctl.transfer_buffer[i]) {
- kfree(dev->vbi_mode.isoc_ctl.
+ kfree(dev->vbi_mode.bulk_ctl.
transfer_buffer[i]);
- dev->vbi_mode.isoc_ctl.transfer_buffer[i] =
+ dev->vbi_mode.bulk_ctl.transfer_buffer[i] =
NULL;
}
usb_free_urb(urb);
- dev->vbi_mode.isoc_ctl.urb[i] = NULL;
+ dev->vbi_mode.bulk_ctl.urb[i] = NULL;
}
- dev->vbi_mode.isoc_ctl.transfer_buffer[i] = NULL;
+ dev->vbi_mode.bulk_ctl.transfer_buffer[i] = NULL;
}
- kfree(dev->vbi_mode.isoc_ctl.urb);
- kfree(dev->vbi_mode.isoc_ctl.transfer_buffer);
+ kfree(dev->vbi_mode.bulk_ctl.urb);
+ kfree(dev->vbi_mode.bulk_ctl.transfer_buffer);
- dev->vbi_mode.isoc_ctl.urb = NULL;
- dev->vbi_mode.isoc_ctl.transfer_buffer = NULL;
- dev->vbi_mode.isoc_ctl.num_bufs = 0;
+ dev->vbi_mode.bulk_ctl.urb = NULL;
+ dev->vbi_mode.bulk_ctl.transfer_buffer = NULL;
+ dev->vbi_mode.bulk_ctl.num_bufs = 0;
cx231xx_capture_start(dev, 0, Vbi);
}
@@ -389,7 +389,7 @@ EXPORT_SYMBOL_GPL(cx231xx_uninit_vbi_isoc);
*/
int cx231xx_init_vbi_isoc(struct cx231xx *dev, int max_packets,
int num_bufs, int max_pkt_size,
- int (*isoc_copy) (struct cx231xx *dev,
+ int (*bulk_copy) (struct cx231xx *dev,
struct urb *urb))
{
struct cx231xx_dmaqueue *dma_q = &dev->vbi_mode.vidq;
@@ -408,8 +408,8 @@ int cx231xx_init_vbi_isoc(struct cx231xx *dev, int max_packets,
usb_rcvbulkpipe(dev->udev,
dev->vbi_mode.end_point_addr));
- dev->vbi_mode.isoc_ctl.isoc_copy = isoc_copy;
- dev->vbi_mode.isoc_ctl.num_bufs = num_bufs;
+ dev->vbi_mode.bulk_ctl.bulk_copy = bulk_copy;
+ dev->vbi_mode.bulk_ctl.num_bufs = num_bufs;
dma_q->pos = 0;
dma_q->is_partial_line = 0;
dma_q->last_sav = 0;
@@ -421,42 +421,42 @@ int cx231xx_init_vbi_isoc(struct cx231xx *dev, int max_packets,
for (i = 0; i < 8; i++)
dma_q->partial_buf[i] = 0;
- dev->vbi_mode.isoc_ctl.urb = kzalloc(sizeof(void *) * num_bufs,
+ dev->vbi_mode.bulk_ctl.urb = kzalloc(sizeof(void *) * num_bufs,
GFP_KERNEL);
- if (!dev->vbi_mode.isoc_ctl.urb) {
+ if (!dev->vbi_mode.bulk_ctl.urb) {
cx231xx_errdev("cannot alloc memory for usb buffers\n");
return -ENOMEM;
}
- dev->vbi_mode.isoc_ctl.transfer_buffer =
+ dev->vbi_mode.bulk_ctl.transfer_buffer =
kzalloc(sizeof(void *) * num_bufs, GFP_KERNEL);
- if (!dev->vbi_mode.isoc_ctl.transfer_buffer) {
+ if (!dev->vbi_mode.bulk_ctl.transfer_buffer) {
cx231xx_errdev("cannot allocate memory for usbtransfer\n");
- kfree(dev->vbi_mode.isoc_ctl.urb);
+ kfree(dev->vbi_mode.bulk_ctl.urb);
return -ENOMEM;
}
- dev->vbi_mode.isoc_ctl.max_pkt_size = max_pkt_size;
- dev->vbi_mode.isoc_ctl.buf = NULL;
+ dev->vbi_mode.bulk_ctl.max_pkt_size = max_pkt_size;
+ dev->vbi_mode.bulk_ctl.buf = NULL;
- sb_size = max_packets * dev->vbi_mode.isoc_ctl.max_pkt_size;
+ sb_size = max_packets * dev->vbi_mode.bulk_ctl.max_pkt_size;
/* allocate urbs and transfer buffers */
- for (i = 0; i < dev->vbi_mode.isoc_ctl.num_bufs; i++) {
+ for (i = 0; i < dev->vbi_mode.bulk_ctl.num_bufs; i++) {
urb = usb_alloc_urb(0, GFP_KERNEL);
if (!urb) {
cx231xx_err(DRIVER_NAME
- ": cannot alloc isoc_ctl.urb %i\n", i);
+ ": cannot alloc bulk_ctl.urb %i\n", i);
cx231xx_uninit_vbi_isoc(dev);
return -ENOMEM;
}
- dev->vbi_mode.isoc_ctl.urb[i] = urb;
+ dev->vbi_mode.bulk_ctl.urb[i] = urb;
urb->transfer_flags = 0;
- dev->vbi_mode.isoc_ctl.transfer_buffer[i] =
+ dev->vbi_mode.bulk_ctl.transfer_buffer[i] =
kzalloc(sb_size, GFP_KERNEL);
- if (!dev->vbi_mode.isoc_ctl.transfer_buffer[i]) {
+ if (!dev->vbi_mode.bulk_ctl.transfer_buffer[i]) {
cx231xx_err(DRIVER_NAME
": unable to allocate %i bytes for transfer"
" buffer %i%s\n", sb_size, i,
@@ -467,15 +467,15 @@ int cx231xx_init_vbi_isoc(struct cx231xx *dev, int max_packets,
pipe = usb_rcvbulkpipe(dev->udev, dev->vbi_mode.end_point_addr);
usb_fill_bulk_urb(urb, dev->udev, pipe,
- dev->vbi_mode.isoc_ctl.transfer_buffer[i],
+ dev->vbi_mode.bulk_ctl.transfer_buffer[i],
sb_size, cx231xx_irq_vbi_callback, dma_q);
}
init_waitqueue_head(&dma_q->wq);
/* submit urbs and enables IRQ */
- for (i = 0; i < dev->vbi_mode.isoc_ctl.num_bufs; i++) {
- rc = usb_submit_urb(dev->vbi_mode.isoc_ctl.urb[i], GFP_ATOMIC);
+ for (i = 0; i < dev->vbi_mode.bulk_ctl.num_bufs; i++) {
+ rc = usb_submit_urb(dev->vbi_mode.bulk_ctl.urb[i], GFP_ATOMIC);
if (rc) {
cx231xx_err(DRIVER_NAME
": submit of urb %i failed (error=%i)\n", i,
@@ -536,7 +536,7 @@ static inline void vbi_buffer_filled(struct cx231xx *dev,
buf->vb.field_count++;
do_gettimeofday(&buf->vb.ts);
- dev->vbi_mode.isoc_ctl.buf = NULL;
+ dev->vbi_mode.bulk_ctl.buf = NULL;
list_del(&buf->vb.queue);
wake_up(&buf->vb.done);
@@ -549,11 +549,16 @@ u32 cx231xx_copy_vbi_line(struct cx231xx *dev, struct cx231xx_dmaqueue *dma_q,
struct cx231xx_buffer *buf;
u32 _line_size = dev->width * 2;
- if (dma_q->current_field != field_number)
+ if (dma_q->current_field == -1) {
+ /* Just starting up */
cx231xx_reset_vbi_buffer(dev, dma_q);
+ }
+
+ if (dma_q->current_field != field_number)
+ dma_q->lines_completed = 0;
/* get the buffer pointer */
- buf = dev->vbi_mode.isoc_ctl.buf;
+ buf = dev->vbi_mode.bulk_ctl.buf;
/* Remember the field number for next time */
dma_q->current_field = field_number;
@@ -597,8 +602,8 @@ u32 cx231xx_copy_vbi_line(struct cx231xx *dev, struct cx231xx_dmaqueue *dma_q,
vbi_buffer_filled(dev, dma_q, buf);
dma_q->pos = 0;
- buf = NULL;
dma_q->lines_completed = 0;
+ cx231xx_reset_vbi_buffer(dev, dma_q);
}
}
@@ -618,7 +623,7 @@ static inline void get_next_vbi_buf(struct cx231xx_dmaqueue *dma_q,
if (list_empty(&dma_q->active)) {
cx231xx_err(DRIVER_NAME ": No active queue to serve\n");
- dev->vbi_mode.isoc_ctl.buf = NULL;
+ dev->vbi_mode.bulk_ctl.buf = NULL;
*buf = NULL;
return;
}
@@ -630,7 +635,7 @@ static inline void get_next_vbi_buf(struct cx231xx_dmaqueue *dma_q,
outp = videobuf_to_vmalloc(&(*buf)->vb);
memset(outp, 0, (*buf)->vb.size);
- dev->vbi_mode.isoc_ctl.buf = *buf;
+ dev->vbi_mode.bulk_ctl.buf = *buf;
return;
}
@@ -640,7 +645,7 @@ void cx231xx_reset_vbi_buffer(struct cx231xx *dev,
{
struct cx231xx_buffer *buf;
- buf = dev->vbi_mode.isoc_ctl.buf;
+ buf = dev->vbi_mode.bulk_ctl.buf;
if (buf == NULL) {
/* first try to get the buffer */
@@ -664,7 +669,7 @@ int cx231xx_do_vbi_copy(struct cx231xx *dev, struct cx231xx_dmaqueue *dma_q,
void *startwrite;
int offset, lencopy;
- buf = dev->vbi_mode.isoc_ctl.buf;
+ buf = dev->vbi_mode.bulk_ctl.buf;
if (buf == NULL)
return -EINVAL;
@@ -679,6 +684,11 @@ int cx231xx_do_vbi_copy(struct cx231xx *dev, struct cx231xx_dmaqueue *dma_q,
offset = (dma_q->lines_completed * _line_size) +
current_line_bytes_copied;
+ if (dma_q->current_field == 2) {
+ /* Populate the second half of the frame */
+ offset += (dev->width * 2 * dma_q->lines_per_field);
+ }
+
/* prepare destination address */
startwrite = p_out_buffer + offset;
@@ -697,5 +707,8 @@ u8 cx231xx_is_vbi_buffer_done(struct cx231xx *dev,
height = ((dev->norm & V4L2_STD_625_50) ?
PAL_VBI_LINES : NTSC_VBI_LINES);
- return (dma_q->lines_completed == height) ? 1 : 0;
+ if (dma_q->lines_completed == height && dma_q->current_field == 2)
+ return 1;
+ else
+ return 0;
}
diff --git a/drivers/media/video/cx231xx/cx231xx-vbi.h b/drivers/media/video/cx231xx/cx231xx-vbi.h
index 89c7fe80b261..16c7d20a22a4 100644
--- a/drivers/media/video/cx231xx/cx231xx-vbi.h
+++ b/drivers/media/video/cx231xx/cx231xx-vbi.h
@@ -41,7 +41,7 @@ extern struct videobuf_queue_ops cx231xx_vbi_qops;
/* stream functions */
int cx231xx_init_vbi_isoc(struct cx231xx *dev, int max_packets,
int num_bufs, int max_pkt_size,
- int (*isoc_copy) (struct cx231xx *dev,
+ int (*bulk_copy) (struct cx231xx *dev,
struct urb *urb));
void cx231xx_uninit_vbi_isoc(struct cx231xx *dev);
diff --git a/drivers/media/video/cx231xx/cx231xx-video.c b/drivers/media/video/cx231xx/cx231xx-video.c
index e76014561aa7..b13b69fb2af6 100644
--- a/drivers/media/video/cx231xx/cx231xx-video.c
+++ b/drivers/media/video/cx231xx/cx231xx-video.c
@@ -237,7 +237,10 @@ static inline void buffer_filled(struct cx231xx *dev,
buf->vb.field_count++;
do_gettimeofday(&buf->vb.ts);
- dev->video_mode.isoc_ctl.buf = NULL;
+ if (dev->USE_ISO)
+ dev->video_mode.isoc_ctl.buf = NULL;
+ else
+ dev->video_mode.bulk_ctl.buf = NULL;
list_del(&buf->vb.queue);
wake_up(&buf->vb.done);
@@ -295,7 +298,10 @@ static inline void get_next_buf(struct cx231xx_dmaqueue *dma_q,
if (list_empty(&dma_q->active)) {
cx231xx_isocdbg("No active queue to serve\n");
- dev->video_mode.isoc_ctl.buf = NULL;
+ if (dev->USE_ISO)
+ dev->video_mode.isoc_ctl.buf = NULL;
+ else
+ dev->video_mode.bulk_ctl.buf = NULL;
*buf = NULL;
return;
}
@@ -307,7 +313,10 @@ static inline void get_next_buf(struct cx231xx_dmaqueue *dma_q,
outp = videobuf_to_vmalloc(&(*buf)->vb);
memset(outp, 0, (*buf)->vb.size);
- dev->video_mode.isoc_ctl.buf = *buf;
+ if (dev->USE_ISO)
+ dev->video_mode.isoc_ctl.buf = *buf;
+ else
+ dev->video_mode.bulk_ctl.buf = *buf;
return;
}
@@ -418,6 +427,93 @@ static inline int cx231xx_isoc_copy(struct cx231xx *dev, struct urb *urb)
return rc;
}
+static inline int cx231xx_bulk_copy(struct cx231xx *dev, struct urb *urb)
+{
+ struct cx231xx_buffer *buf;
+ struct cx231xx_dmaqueue *dma_q = urb->context;
+ unsigned char *outp = NULL;
+ int rc = 1;
+ unsigned char *p_buffer;
+ u32 bytes_parsed = 0, buffer_size = 0;
+ u8 sav_eav = 0;
+
+ if (!dev)
+ return 0;
+
+ if ((dev->state & DEV_DISCONNECTED) || (dev->state & DEV_MISCONFIGURED))
+ return 0;
+
+ if (urb->status < 0) {
+ print_err_status(dev, -1, urb->status);
+ if (urb->status == -ENOENT)
+ return 0;
+ }
+
+ buf = dev->video_mode.bulk_ctl.buf;
+ if (buf != NULL)
+ outp = videobuf_to_vmalloc(&buf->vb);
+
+ if (1) {
+
+ /* get buffer pointer and length */
+ p_buffer = urb->transfer_buffer;
+ buffer_size = urb->actual_length;
+ bytes_parsed = 0;
+
+ if (dma_q->is_partial_line) {
+ /* Handle the case of a partial line */
+ sav_eav = dma_q->last_sav;
+ } else {
+ /* Check for a SAV/EAV overlapping
+ the buffer boundary */
+ sav_eav =
+ cx231xx_find_boundary_SAV_EAV(p_buffer,
+ dma_q->partial_buf,
+ &bytes_parsed);
+ }
+
+ sav_eav &= 0xF0;
+ /* Get the first line if we have some portion of an SAV/EAV from
+ the last buffer or a partial line */
+ if (sav_eav) {
+ bytes_parsed += cx231xx_get_video_line(dev, dma_q,
+ sav_eav, /* SAV/EAV */
+ p_buffer + bytes_parsed, /* p_buffer */
+ buffer_size - bytes_parsed);/* buf size */
+ }
+
+ /* Now parse data that is completely in this buffer */
+ /* dma_q->is_partial_line = 0; */
+
+ while (bytes_parsed < buffer_size) {
+ u32 bytes_used = 0;
+
+ sav_eav = cx231xx_find_next_SAV_EAV(
+ p_buffer + bytes_parsed, /* p_buffer */
+ buffer_size - bytes_parsed, /* buf size */
+ &bytes_used);/* bytes used to get SAV/EAV */
+
+ bytes_parsed += bytes_used;
+
+ sav_eav &= 0xF0;
+ if (sav_eav && (bytes_parsed < buffer_size)) {
+ bytes_parsed += cx231xx_get_video_line(dev,
+ dma_q, sav_eav, /* SAV/EAV */
+ p_buffer + bytes_parsed,/* p_buffer */
+ buffer_size - bytes_parsed);/*buf size*/
+ }
+ }
+
+ /* Save the last four bytes of the buffer so we can check the
+ buffer boundary condition next time */
+ memcpy(dma_q->partial_buf, p_buffer + buffer_size - 4, 4);
+ bytes_parsed = 0;
+
+ }
+ return rc;
+}
+
+
u8 cx231xx_find_boundary_SAV_EAV(u8 *p_buffer, u8 *partial_buf,
u32 *p_bytes_used)
{
@@ -533,7 +629,10 @@ u32 cx231xx_copy_video_line(struct cx231xx *dev,
cx231xx_reset_video_buffer(dev, dma_q);
/* get the buffer pointer */
- buf = dev->video_mode.isoc_ctl.buf;
+ if (dev->USE_ISO)
+ buf = dev->video_mode.isoc_ctl.buf;
+ else
+ buf = dev->video_mode.bulk_ctl.buf;
/* Remember the field number for next time */
dma_q->current_field = field_number;
@@ -596,7 +695,10 @@ void cx231xx_reset_video_buffer(struct cx231xx *dev,
dma_q->field1_done = 0;
}
- buf = dev->video_mode.isoc_ctl.buf;
+ if (dev->USE_ISO)
+ buf = dev->video_mode.isoc_ctl.buf;
+ else
+ buf = dev->video_mode.bulk_ctl.buf;
if (buf == NULL) {
u8 *outp = NULL;
@@ -626,7 +728,10 @@ int cx231xx_do_copy(struct cx231xx *dev, struct cx231xx_dmaqueue *dma_q,
void *startwrite;
int offset, lencopy;
- buf = dev->video_mode.isoc_ctl.buf;
+ if (dev->USE_ISO)
+ buf = dev->video_mode.isoc_ctl.buf;
+ else
+ buf = dev->video_mode.bulk_ctl.buf;
if (buf == NULL)
return -1;
@@ -691,7 +796,6 @@ buffer_setup(struct videobuf_queue *vq, unsigned int *count, unsigned int *size)
{
struct cx231xx_fh *fh = vq->priv_data;
struct cx231xx *dev = fh->dev;
- struct v4l2_frequency f;
*size = (fh->dev->width * fh->dev->height * dev->format->depth + 7)>>3;
if (0 == *count)
@@ -700,13 +804,6 @@ buffer_setup(struct videobuf_queue *vq, unsigned int *count, unsigned int *size)
if (*count < CX231XX_MIN_BUF)
*count = CX231XX_MIN_BUF;
- /* Ask tuner to go to analog mode */
- memset(&f, 0, sizeof(f));
- f.frequency = dev->ctl_freq;
- f.type = fh->radio ? V4L2_TUNER_RADIO : V4L2_TUNER_ANALOG_TV;
-
- call_all(dev, tuner, s_frequency, &f);
-
return 0;
}
@@ -730,8 +827,13 @@ static void free_buffer(struct videobuf_queue *vq, struct cx231xx_buffer *buf)
VIDEOBUF_ACTIVE, it won't be, though.
*/
spin_lock_irqsave(&dev->video_mode.slock, flags);
- if (dev->video_mode.isoc_ctl.buf == buf)
- dev->video_mode.isoc_ctl.buf = NULL;
+ if (dev->USE_ISO) {
+ if (dev->video_mode.isoc_ctl.buf == buf)
+ dev->video_mode.isoc_ctl.buf = NULL;
+ } else {
+ if (dev->video_mode.bulk_ctl.buf == buf)
+ dev->video_mode.bulk_ctl.buf = NULL;
+ }
spin_unlock_irqrestore(&dev->video_mode.slock, flags);
videobuf_vmalloc_free(&buf->vb);
@@ -764,14 +866,27 @@ buffer_prepare(struct videobuf_queue *vq, struct videobuf_buffer *vb,
goto fail;
}
- if (!dev->video_mode.isoc_ctl.num_bufs)
- urb_init = 1;
-
+ if (dev->USE_ISO) {
+ if (!dev->video_mode.isoc_ctl.num_bufs)
+ urb_init = 1;
+ } else {
+ if (!dev->video_mode.bulk_ctl.num_bufs)
+ urb_init = 1;
+ }
+ /*cx231xx_info("urb_init=%d dev->video_mode.max_pkt_size=%d\n",
+ urb_init, dev->video_mode.max_pkt_size);*/
if (urb_init) {
- rc = cx231xx_init_isoc(dev, CX231XX_NUM_PACKETS,
+ dev->mode_tv = 0;
+ if (dev->USE_ISO)
+ rc = cx231xx_init_isoc(dev, CX231XX_NUM_PACKETS,
CX231XX_NUM_BUFS,
dev->video_mode.max_pkt_size,
cx231xx_isoc_copy);
+ else
+ rc = cx231xx_init_bulk(dev, CX231XX_NUM_PACKETS,
+ CX231XX_NUM_BUFS,
+ dev->video_mode.max_pkt_size,
+ cx231xx_bulk_copy);
if (rc < 0)
goto fail;
}
@@ -894,22 +1009,6 @@ static int check_dev(struct cx231xx *dev)
return 0;
}
-static void get_scale(struct cx231xx *dev,
- unsigned int width, unsigned int height,
- unsigned int *hscale, unsigned int *vscale)
-{
- unsigned int maxw = norm_maxw(dev);
- unsigned int maxh = norm_maxh(dev);
-
- *hscale = (((unsigned long)maxw) << 12) / width - 4096L;
- if (*hscale >= 0x4000)
- *hscale = 0x3fff;
-
- *vscale = (((unsigned long)maxh) << 12) / height - 4096L;
- if (*vscale >= 0x4000)
- *vscale = 0x3fff;
-}
-
/* ------------------------------------------------------------------
IOCTL vidioc handling
------------------------------------------------------------------*/
@@ -920,8 +1019,6 @@ static int vidioc_g_fmt_vid_cap(struct file *file, void *priv,
struct cx231xx_fh *fh = priv;
struct cx231xx *dev = fh->dev;
- mutex_lock(&dev->lock);
-
f->fmt.pix.width = dev->width;
f->fmt.pix.height = dev->height;
f->fmt.pix.pixelformat = dev->format->fourcc;
@@ -931,8 +1028,6 @@ static int vidioc_g_fmt_vid_cap(struct file *file, void *priv,
f->fmt.pix.field = V4L2_FIELD_INTERLACED;
- mutex_unlock(&dev->lock);
-
return 0;
}
@@ -956,7 +1051,6 @@ static int vidioc_try_fmt_vid_cap(struct file *file, void *priv,
unsigned int height = f->fmt.pix.height;
unsigned int maxw = norm_maxw(dev);
unsigned int maxh = norm_maxh(dev);
- unsigned int hscale, vscale;
struct cx231xx_fmt *fmt;
fmt = format_by_fourcc(f->fmt.pix.pixelformat);
@@ -970,11 +1064,6 @@ static int vidioc_try_fmt_vid_cap(struct file *file, void *priv,
height must be even because of interlacing */
v4l_bound_align_image(&width, 48, maxw, 1, &height, 32, maxh, 1, 0);
- get_scale(dev, width, height, &hscale, &vscale);
-
- width = (((unsigned long)maxw) << 12) / (hscale + 4096L);
- height = (((unsigned long)maxh) << 12) / (vscale + 4096L);
-
f->fmt.pix.width = width;
f->fmt.pix.height = height;
f->fmt.pix.pixelformat = fmt->fourcc;
@@ -999,47 +1088,35 @@ static int vidioc_s_fmt_vid_cap(struct file *file, void *priv,
if (rc < 0)
return rc;
- mutex_lock(&dev->lock);
-
vidioc_try_fmt_vid_cap(file, priv, f);
fmt = format_by_fourcc(f->fmt.pix.pixelformat);
- if (!fmt) {
- rc = -EINVAL;
- goto out;
- }
+ if (!fmt)
+ return -EINVAL;
if (videobuf_queue_is_busy(&fh->vb_vidq)) {
cx231xx_errdev("%s queue busy\n", __func__);
- rc = -EBUSY;
- goto out;
+ return -EBUSY;
}
if (dev->stream_on && !fh->stream_on) {
cx231xx_errdev("%s device in use by another fh\n", __func__);
- rc = -EBUSY;
- goto out;
+ return -EBUSY;
}
/* set new image size */
dev->width = f->fmt.pix.width;
dev->height = f->fmt.pix.height;
dev->format = fmt;
- get_scale(dev, dev->width, dev->height, &dev->hscale, &dev->vscale);
v4l2_fill_mbus_format(&mbus_fmt, &f->fmt.pix, V4L2_MBUS_FMT_FIXED);
call_all(dev, video, s_mbus_fmt, &mbus_fmt);
v4l2_fill_pix_format(&f->fmt.pix, &mbus_fmt);
- /* Set the correct alternate setting for this resolution */
- cx231xx_resolution_set(dev);
-
-out:
- mutex_unlock(&dev->lock);
return rc;
}
-static int vidioc_g_std(struct file *file, void *priv, v4l2_std_id * id)
+static int vidioc_g_std(struct file *file, void *priv, v4l2_std_id *id)
{
struct cx231xx_fh *fh = priv;
struct cx231xx *dev = fh->dev;
@@ -1052,6 +1129,7 @@ static int vidioc_s_std(struct file *file, void *priv, v4l2_std_id *norm)
{
struct cx231xx_fh *fh = priv;
struct cx231xx *dev = fh->dev;
+ struct v4l2_mbus_framefmt mbus_fmt;
struct v4l2_format f;
int rc;
@@ -1061,7 +1139,6 @@ static int vidioc_s_std(struct file *file, void *priv, v4l2_std_id *norm)
cx231xx_info("vidioc_s_std : 0x%x\n", (unsigned int)*norm);
- mutex_lock(&dev->lock);
dev->norm = *norm;
/* Adjusts width/height, if needed */
@@ -1069,16 +1146,18 @@ static int vidioc_s_std(struct file *file, void *priv, v4l2_std_id *norm)
f.fmt.pix.height = dev->height;
vidioc_try_fmt_vid_cap(file, priv, &f);
- /* set new image size */
- dev->width = f.fmt.pix.width;
- dev->height = f.fmt.pix.height;
- get_scale(dev, dev->width, dev->height, &dev->hscale, &dev->vscale);
-
call_all(dev, core, s_std, dev->norm);
- mutex_unlock(&dev->lock);
+ /* We need to reset basic properties in the decoder related to
+ resolution (since a standard change effects things like the number
+ of lines in VACT, etc) */
+ v4l2_fill_mbus_format(&mbus_fmt, &f.fmt.pix, V4L2_MBUS_FMT_FIXED);
+ call_all(dev, video, s_mbus_fmt, &mbus_fmt);
+ v4l2_fill_pix_format(&f.fmt.pix, &mbus_fmt);
- cx231xx_resolution_set(dev);
+ /* set new image size */
+ dev->width = f.fmt.pix.width;
+ dev->height = f.fmt.pix.height;
/* do mode control overrides */
cx231xx_do_mode_ctrl_overrides(dev);
@@ -1138,6 +1217,7 @@ static int vidioc_s_input(struct file *file, void *priv, unsigned int i)
struct cx231xx *dev = fh->dev;
int rc;
+ dev->mode_tv = 0;
rc = check_dev(dev);
if (rc < 0)
return rc;
@@ -1147,11 +1227,16 @@ static int vidioc_s_input(struct file *file, void *priv, unsigned int i)
if (0 == INPUT(i)->type)
return -EINVAL;
- mutex_lock(&dev->lock);
-
video_mux(dev, i);
- mutex_unlock(&dev->lock);
+ if (INPUT(i)->type == CX231XX_VMUX_TELEVISION ||
+ INPUT(i)->type == CX231XX_VMUX_CABLE) {
+ /* There's a tuner, so reset the standard and put it on the
+ last known frequency (since it was probably powered down
+ until now */
+ call_all(dev, core, s_std, dev->norm);
+ }
+
return 0;
}
@@ -1227,9 +1312,7 @@ static int vidioc_queryctrl(struct file *file, void *priv,
}
*qc = cx231xx_ctls[i].v;
- mutex_lock(&dev->lock);
call_all(dev, core, queryctrl, qc);
- mutex_unlock(&dev->lock);
if (qc->type)
return 0;
@@ -1248,9 +1331,7 @@ static int vidioc_g_ctrl(struct file *file, void *priv,
if (rc < 0)
return rc;
- mutex_lock(&dev->lock);
call_all(dev, core, g_ctrl, ctrl);
- mutex_unlock(&dev->lock);
return rc;
}
@@ -1265,9 +1346,7 @@ static int vidioc_s_ctrl(struct file *file, void *priv,
if (rc < 0)
return rc;
- mutex_lock(&dev->lock);
call_all(dev, core, s_ctrl, ctrl);
- mutex_unlock(&dev->lock);
return rc;
}
@@ -1307,9 +1386,7 @@ static int vidioc_s_tuner(struct file *file, void *priv, struct v4l2_tuner *t)
if (0 != t->index)
return -EINVAL;
#if 0
- mutex_lock(&dev->lock);
call_all(dev, tuner, s_tuner, t);
- mutex_unlock(&dev->lock);
#endif
return 0;
}
@@ -1320,14 +1397,11 @@ static int vidioc_g_frequency(struct file *file, void *priv,
struct cx231xx_fh *fh = priv;
struct cx231xx *dev = fh->dev;
- mutex_lock(&dev->lock);
f->type = fh->radio ? V4L2_TUNER_RADIO : V4L2_TUNER_ANALOG_TV;
f->frequency = dev->ctl_freq;
call_all(dev, tuner, g_frequency, f);
- mutex_unlock(&dev->lock);
-
return 0;
}
@@ -1337,6 +1411,11 @@ static int vidioc_s_frequency(struct file *file, void *priv,
struct cx231xx_fh *fh = priv;
struct cx231xx *dev = fh->dev;
int rc;
+ u32 if_frequency = 5400000;
+
+ cx231xx_info("Enter vidioc_s_frequency()f->frequency=%d;f->type=%d\n",
+ f->frequency, f->type);
+ /*cx231xx_info("f->type: 1-radio 2-analogTV 3-digitalTV\n");*/
rc = check_dev(dev);
if (rc < 0)
@@ -1353,21 +1432,34 @@ static int vidioc_s_frequency(struct file *file, void *priv,
/* set pre channel change settings in DIF first */
rc = cx231xx_tuner_pre_channel_change(dev);
- mutex_lock(&dev->lock);
-
dev->ctl_freq = f->frequency;
-
- if (dev->tuner_type == TUNER_XC5000) {
- if (dev->cx231xx_set_analog_freq != NULL)
- dev->cx231xx_set_analog_freq(dev, f->frequency);
- } else
- call_all(dev, tuner, s_frequency, f);
-
- mutex_unlock(&dev->lock);
+ call_all(dev, tuner, s_frequency, f);
/* set post channel change settings in DIF first */
rc = cx231xx_tuner_post_channel_change(dev);
+ if (dev->tuner_type == TUNER_NXP_TDA18271) {
+ if (dev->norm & (V4L2_STD_MN | V4L2_STD_NTSC_443))
+ if_frequency = 5400000; /*5.4MHz */
+ else if (dev->norm & V4L2_STD_B)
+ if_frequency = 6000000; /*6.0MHz */
+ else if (dev->norm & (V4L2_STD_PAL_DK | V4L2_STD_SECAM_DK))
+ if_frequency = 6900000; /*6.9MHz */
+ else if (dev->norm & V4L2_STD_GH)
+ if_frequency = 7100000; /*7.1MHz */
+ else if (dev->norm & V4L2_STD_PAL_I)
+ if_frequency = 7250000; /*7.25MHz */
+ else if (dev->norm & V4L2_STD_SECAM_L)
+ if_frequency = 6900000; /*6.9MHz */
+ else if (dev->norm & V4L2_STD_SECAM_LC)
+ if_frequency = 1250000; /*1.25MHz */
+
+ cx231xx_info("if_frequency is set to %d\n", if_frequency);
+ cx231xx_set_Colibri_For_LowIF(dev, if_frequency, 1, 1);
+
+ update_HH_register_after_set_DIF(dev);
+ }
+
cx231xx_info("Set New FREQUENCY to %d\n", f->frequency);
return rc;
@@ -1445,17 +1537,92 @@ static int vidioc_g_register(struct file *file, void *priv,
case V4L2_CHIP_MATCH_I2C_DRIVER:
call_all(dev, core, g_register, reg);
return 0;
- case V4L2_CHIP_MATCH_I2C_ADDR:
- /* Not supported yet */
- return -EINVAL;
+ case V4L2_CHIP_MATCH_I2C_ADDR:/*for register debug*/
+ switch (reg->match.addr) {
+ case 0: /* Cx231xx - internal registers */
+ ret = cx231xx_read_ctrl_reg(dev, VRT_GET_REGISTER,
+ (u16)reg->reg, value, 4);
+ reg->val = value[0] | value[1] << 8 |
+ value[2] << 16 | value[3] << 24;
+
+ break;
+ case 0x600:/* AFE - read byte */
+ ret = cx231xx_read_i2c_master(dev, AFE_DEVICE_ADDRESS,
+ (u16)reg->reg, 2,
+ &data, 1 , 0);
+ reg->val = le32_to_cpu(data & 0xff);
+ break;
+
+ case 0x880:/* Video Block - read byte */
+ if (reg->reg < 0x0b) {
+ ret = cx231xx_read_i2c_master(dev,
+ VID_BLK_I2C_ADDRESS,
+ (u16)reg->reg, 2,
+ &data, 1 , 0);
+ reg->val = le32_to_cpu(data & 0xff);
+ } else {
+ ret = cx231xx_read_i2c_master(dev,
+ VID_BLK_I2C_ADDRESS,
+ (u16)reg->reg, 2,
+ &data, 4 , 0);
+ reg->val = le32_to_cpu(data);
+ }
+ break;
+ case 0x980:
+ ret = cx231xx_read_i2c_master(dev,
+ I2S_BLK_DEVICE_ADDRESS,
+ (u16)reg->reg, 1,
+ &data, 1 , 0);
+ reg->val = le32_to_cpu(data & 0xff);
+ break;
+ case 0x400:
+ ret =
+ cx231xx_read_i2c_master(dev, 0x40,
+ (u16)reg->reg, 1,
+ &data, 1 , 0);
+ reg->val = le32_to_cpu(data & 0xff);
+ break;
+ case 0xc01:
+ ret =
+ cx231xx_read_i2c_master(dev, 0xc0,
+ (u16)reg->reg, 2,
+ &data, 38, 1);
+ reg->val = le32_to_cpu(data);
+ break;
+ case 0x022:
+ ret =
+ cx231xx_read_i2c_master(dev, 0x02,
+ (u16)reg->reg, 1,
+ &data, 1, 2);
+ reg->val = le32_to_cpu(data & 0xff);
+ break;
+ case 0x322:
+ ret = cx231xx_read_i2c_master(dev,
+ 0x32,
+ (u16)reg->reg, 1,
+ &data, 4 , 2);
+ reg->val = le32_to_cpu(data);
+ break;
+ case 0x342:
+ ret = cx231xx_read_i2c_master(dev,
+ 0x34,
+ (u16)reg->reg, 1,
+ &data, 4 , 2);
+ reg->val = le32_to_cpu(data);
+ break;
+
+ default:
+ cx231xx_info("no match device address!!\n");
+ break;
+ }
+ return ret < 0 ? ret : 0;
+ /*return -EINVAL;*/
default:
if (!v4l2_chip_match_host(&reg->match))
return -EINVAL;
}
- mutex_lock(&dev->lock);
call_all(dev, core, g_register, reg);
- mutex_unlock(&dev->lock);
return ret;
}
@@ -1531,14 +1698,96 @@ static int vidioc_s_register(struct file *file, void *priv,
}
}
return ret < 0 ? ret : 0;
+ case V4L2_CHIP_MATCH_I2C_ADDR:
+ {
+ value = (u32) buf & 0xffffffff;
+
+ switch (reg->match.addr) {
+ case 0:/*cx231xx internal registers*/
+ data[0] = (u8) value;
+ data[1] = (u8) (value >> 8);
+ data[2] = (u8) (value >> 16);
+ data[3] = (u8) (value >> 24);
+ ret = cx231xx_write_ctrl_reg(dev,
+ VRT_SET_REGISTER,
+ (u16)reg->reg, data,
+ 4);
+ break;
+ case 0x600:/* AFE - read byte */
+ ret = cx231xx_write_i2c_master(dev,
+ AFE_DEVICE_ADDRESS,
+ (u16)reg->reg, 2,
+ value, 1 , 0);
+ break;
+ case 0x880:/* Video Block - read byte */
+ if (reg->reg < 0x0b)
+ cx231xx_write_i2c_master(dev,
+ VID_BLK_I2C_ADDRESS,
+ (u16)reg->reg, 2,
+ value, 1, 0);
+ else
+ cx231xx_write_i2c_master(dev,
+ VID_BLK_I2C_ADDRESS,
+ (u16)reg->reg, 2,
+ value, 4, 0);
+ break;
+ case 0x980:
+ ret =
+ cx231xx_write_i2c_master(dev,
+ I2S_BLK_DEVICE_ADDRESS,
+ (u16)reg->reg, 1,
+ value, 1, 0);
+ break;
+ case 0x400:
+ ret =
+ cx231xx_write_i2c_master(dev,
+ 0x40,
+ (u16)reg->reg, 1,
+ value, 1, 0);
+ break;
+ case 0xc01:
+ ret =
+ cx231xx_write_i2c_master(dev,
+ 0xc0,
+ (u16)reg->reg, 1,
+ value, 1, 1);
+ break;
+
+ case 0x022:
+ ret =
+ cx231xx_write_i2c_master(dev,
+ 0x02,
+ (u16)reg->reg, 1,
+ value, 1, 2);
+ case 0x322:
+ ret =
+ cx231xx_write_i2c_master(dev,
+ 0x32,
+ (u16)reg->reg, 1,
+ value, 4, 2);
+ break;
+
+ case 0x342:
+ ret =
+ cx231xx_write_i2c_master(dev,
+ 0x34,
+ (u16)reg->reg, 1,
+ value, 4, 2);
+ break;
+ default:
+ cx231xx_info("no match device address, "
+ "the value is %x\n", reg->match.addr);
+ break;
+
+ }
+
+ }
default:
break;
}
- mutex_lock(&dev->lock);
call_all(dev, core, s_register, reg);
- mutex_unlock(&dev->lock);
return ret;
}
@@ -1575,7 +1824,6 @@ static int vidioc_streamon(struct file *file, void *priv,
if (rc < 0)
return rc;
- mutex_lock(&dev->lock);
rc = res_get(fh);
if (likely(rc >= 0))
@@ -1583,8 +1831,6 @@ static int vidioc_streamon(struct file *file, void *priv,
call_all(dev, video, s_stream, 1);
- mutex_unlock(&dev->lock);
-
return rc;
}
@@ -1605,15 +1851,11 @@ static int vidioc_streamoff(struct file *file, void *priv,
if (type != fh->type)
return -EINVAL;
- mutex_lock(&dev->lock);
-
cx25840_call(dev, video, s_stream, 0);
videobuf_streamoff(&fh->vb_vidq);
res_free(fh);
- mutex_unlock(&dev->lock);
-
return 0;
}
@@ -1668,8 +1910,6 @@ static int vidioc_g_fmt_sliced_vbi_cap(struct file *file, void *priv,
if (rc < 0)
return rc;
- mutex_lock(&dev->lock);
-
f->fmt.sliced.service_set = 0;
call_all(dev, vbi, g_sliced_fmt, &f->fmt.sliced);
@@ -1677,7 +1917,6 @@ static int vidioc_g_fmt_sliced_vbi_cap(struct file *file, void *priv,
if (f->fmt.sliced.service_set == 0)
rc = -EINVAL;
- mutex_unlock(&dev->lock);
return rc;
}
@@ -1692,9 +1931,7 @@ static int vidioc_try_set_sliced_vbi_cap(struct file *file, void *priv,
if (rc < 0)
return rc;
- mutex_lock(&dev->lock);
call_all(dev, vbi, g_sliced_fmt, &f->fmt.sliced);
- mutex_unlock(&dev->lock);
if (f->fmt.sliced.service_set == 0)
return -EINVAL;
@@ -1709,12 +1946,10 @@ static int vidioc_g_fmt_vbi_cap(struct file *file, void *priv,
{
struct cx231xx_fh *fh = priv;
struct cx231xx *dev = fh->dev;
-
- f->fmt.vbi.sampling_rate = (dev->norm & V4L2_STD_625_50) ?
- 35468950 : 28636363;
+ f->fmt.vbi.sampling_rate = 6750000 * 4;
f->fmt.vbi.samples_per_line = VBI_LINE_LENGTH;
f->fmt.vbi.sample_format = V4L2_PIX_FMT_GREY;
- f->fmt.vbi.offset = 64 * 4;
+ f->fmt.vbi.offset = 0;
f->fmt.vbi.start[0] = (dev->norm & V4L2_STD_625_50) ?
PAL_VBI_START_LINE : NTSC_VBI_START_LINE;
f->fmt.vbi.count[0] = (dev->norm & V4L2_STD_625_50) ?
@@ -1739,11 +1974,10 @@ static int vidioc_try_fmt_vbi_cap(struct file *file, void *priv,
}
f->type = V4L2_BUF_TYPE_VBI_CAPTURE;
- f->fmt.vbi.sampling_rate = (dev->norm & V4L2_STD_625_50) ?
- 35468950 : 28636363;
+ f->fmt.vbi.sampling_rate = 6750000 * 4;
f->fmt.vbi.samples_per_line = VBI_LINE_LENGTH;
f->fmt.vbi.sample_format = V4L2_PIX_FMT_GREY;
- f->fmt.vbi.offset = 244;
+ f->fmt.vbi.offset = 0;
f->fmt.vbi.flags = 0;
f->fmt.vbi.start[0] = (dev->norm & V4L2_STD_625_50) ?
PAL_VBI_START_LINE : NTSC_VBI_START_LINE;
@@ -1847,9 +2081,7 @@ static int radio_g_tuner(struct file *file, void *priv, struct v4l2_tuner *t)
strcpy(t->name, "Radio");
t->type = V4L2_TUNER_RADIO;
- mutex_lock(&dev->lock);
call_all(dev, tuner, s_tuner, t);
- mutex_unlock(&dev->lock);
return 0;
}
@@ -1880,9 +2112,7 @@ static int radio_s_tuner(struct file *file, void *priv, struct v4l2_tuner *t)
if (0 != t->index)
return -EINVAL;
- mutex_lock(&dev->lock);
call_all(dev, tuner, s_tuner, t);
- mutex_unlock(&dev->lock);
return 0;
}
@@ -1941,8 +2171,6 @@ static int cx231xx_v4l2_open(struct file *filp)
break;
}
- mutex_lock(&dev->lock);
-
cx231xx_videodbg("open dev=%s type=%s users=%d\n",
video_device_node_name(vdev), v4l2_type_names[fh_type],
dev->users);
@@ -1952,7 +2180,6 @@ static int cx231xx_v4l2_open(struct file *filp)
if (errCode < 0) {
cx231xx_errdev
("Device locked on digital mode. Can't open analog\n");
- mutex_unlock(&dev->lock);
return -EBUSY;
}
#endif
@@ -1960,7 +2187,6 @@ static int cx231xx_v4l2_open(struct file *filp)
fh = kzalloc(sizeof(struct cx231xx_fh), GFP_KERNEL);
if (!fh) {
cx231xx_errdev("cx231xx-video.c: Out of memory?!\n");
- mutex_unlock(&dev->lock);
return -ENOMEM;
}
fh->dev = dev;
@@ -1971,16 +2197,18 @@ static int cx231xx_v4l2_open(struct file *filp)
if (fh->type == V4L2_BUF_TYPE_VIDEO_CAPTURE && dev->users == 0) {
dev->width = norm_maxw(dev);
dev->height = norm_maxh(dev);
- dev->hscale = 0;
- dev->vscale = 0;
/* Power up in Analog TV mode */
- cx231xx_set_power_mode(dev, POLARIS_AVMODE_ANALOGT_TV);
+ if (dev->model == CX231XX_BOARD_CNXT_VIDEO_GRABBER ||
+ dev->model == CX231XX_BOARD_HAUPPAUGE_USBLIVE2)
+ cx231xx_set_power_mode(dev,
+ POLARIS_AVMODE_ENXTERNAL_AV);
+ else
+ cx231xx_set_power_mode(dev, POLARIS_AVMODE_ANALOGT_TV);
#if 0
cx231xx_set_mode(dev, CX231XX_ANALOG_MODE);
#endif
- cx231xx_resolution_set(dev);
/* set video alternate setting */
cx231xx_set_video_alternate(dev);
@@ -1991,7 +2219,6 @@ static int cx231xx_v4l2_open(struct file *filp)
/* device needs to be initialized before isoc transfer */
dev->video_input = dev->video_input > 2 ? 2 : dev->video_input;
- video_mux(dev, dev->video_input);
}
if (fh->radio) {
@@ -2008,20 +2235,22 @@ static int cx231xx_v4l2_open(struct file *filp)
videobuf_queue_vmalloc_init(&fh->vb_vidq, &cx231xx_video_qops,
NULL, &dev->video_mode.slock,
fh->type, V4L2_FIELD_INTERLACED,
- sizeof(struct cx231xx_buffer), fh);
+ sizeof(struct cx231xx_buffer),
+ fh, &dev->lock);
if (fh->type == V4L2_BUF_TYPE_VBI_CAPTURE) {
/* Set the required alternate setting VBI interface works in
Bulk mode only */
- cx231xx_set_alt_setting(dev, INDEX_VANC, 0);
+ if (dev->model != CX231XX_BOARD_CNXT_VIDEO_GRABBER &&
+ dev->model != CX231XX_BOARD_HAUPPAUGE_USBLIVE2)
+ cx231xx_set_alt_setting(dev, INDEX_VANC, 0);
videobuf_queue_vmalloc_init(&fh->vb_vidq, &cx231xx_vbi_qops,
NULL, &dev->vbi_mode.slock,
fh->type, V4L2_FIELD_SEQ_TB,
- sizeof(struct cx231xx_buffer), fh);
+ sizeof(struct cx231xx_buffer),
+ fh, &dev->lock);
}
- mutex_unlock(&dev->lock);
-
return errCode;
}
@@ -2054,6 +2283,10 @@ void cx231xx_release_analog_resources(struct cx231xx *dev)
if (dev->vdev) {
cx231xx_info("V4L2 device %s deregistered\n",
video_device_node_name(dev->vdev));
+
+ if (dev->model == CX231XX_BOARD_CNXT_VIDEO_GRABBER)
+ cx231xx_417_unregister(dev);
+
if (video_is_registered(dev->vdev))
video_unregister_device(dev->vdev);
else
@@ -2074,39 +2307,44 @@ static int cx231xx_v4l2_close(struct file *filp)
cx231xx_videodbg("users=%d\n", dev->users);
- mutex_lock(&dev->lock);
-
+ cx231xx_videodbg("users=%d\n", dev->users);
if (res_check(fh))
res_free(fh);
- if (fh->type == V4L2_BUF_TYPE_VBI_CAPTURE) {
- videobuf_stop(&fh->vb_vidq);
- videobuf_mmap_free(&fh->vb_vidq);
-
- /* the device is already disconnect,
- free the remaining resources */
- if (dev->state & DEV_DISCONNECTED) {
- cx231xx_release_resources(dev);
- mutex_unlock(&dev->lock);
- kfree(dev);
- return 0;
- }
+ /*To workaround error number=-71 on EP0 for VideoGrabber,
+ need exclude following.*/
+ if (dev->model != CX231XX_BOARD_CNXT_VIDEO_GRABBER &&
+ dev->model != CX231XX_BOARD_HAUPPAUGE_USBLIVE2)
+ if (fh->type == V4L2_BUF_TYPE_VBI_CAPTURE) {
+ videobuf_stop(&fh->vb_vidq);
+ videobuf_mmap_free(&fh->vb_vidq);
+
+ /* the device is already disconnect,
+ free the remaining resources */
+ if (dev->state & DEV_DISCONNECTED) {
+ if (atomic_read(&dev->devlist_count) > 0) {
+ cx231xx_release_resources(dev);
+ kfree(dev);
+ dev = NULL;
+ return 0;
+ }
+ return 0;
+ }
- /* do this before setting alternate! */
- cx231xx_uninit_vbi_isoc(dev);
+ /* do this before setting alternate! */
+ cx231xx_uninit_vbi_isoc(dev);
- /* set alternate 0 */
- if (!dev->vbi_or_sliced_cc_mode)
- cx231xx_set_alt_setting(dev, INDEX_VANC, 0);
- else
- cx231xx_set_alt_setting(dev, INDEX_HANC, 0);
+ /* set alternate 0 */
+ if (!dev->vbi_or_sliced_cc_mode)
+ cx231xx_set_alt_setting(dev, INDEX_VANC, 0);
+ else
+ cx231xx_set_alt_setting(dev, INDEX_HANC, 0);
- kfree(fh);
- dev->users--;
- wake_up_interruptible_nr(&dev->open, 1);
- mutex_unlock(&dev->lock);
- return 0;
- }
+ kfree(fh);
+ dev->users--;
+ wake_up_interruptible_nr(&dev->open, 1);
+ return 0;
+ }
if (dev->users == 1) {
videobuf_stop(&fh->vb_vidq);
@@ -2116,8 +2354,8 @@ static int cx231xx_v4l2_close(struct file *filp)
free the remaining resources */
if (dev->state & DEV_DISCONNECTED) {
cx231xx_release_resources(dev);
- mutex_unlock(&dev->lock);
kfree(dev);
+ dev = NULL;
return 0;
}
@@ -2125,7 +2363,10 @@ static int cx231xx_v4l2_close(struct file *filp)
call_all(dev, core, s_power, 0);
/* do this before setting alternate! */
- cx231xx_uninit_isoc(dev);
+ if (dev->USE_ISO)
+ cx231xx_uninit_isoc(dev);
+ else
+ cx231xx_uninit_bulk(dev);
cx231xx_set_mode(dev, CX231XX_SUSPEND);
/* set alternate 0 */
@@ -2134,7 +2375,6 @@ static int cx231xx_v4l2_close(struct file *filp)
kfree(fh);
dev->users--;
wake_up_interruptible_nr(&dev->open, 1);
- mutex_unlock(&dev->lock);
return 0;
}
@@ -2156,9 +2396,7 @@ cx231xx_v4l2_read(struct file *filp, char __user *buf, size_t count,
if ((fh->type == V4L2_BUF_TYPE_VIDEO_CAPTURE) ||
(fh->type == V4L2_BUF_TYPE_VBI_CAPTURE)) {
- mutex_lock(&dev->lock);
rc = res_get(fh);
- mutex_unlock(&dev->lock);
if (unlikely(rc < 0))
return rc;
@@ -2173,7 +2411,7 @@ cx231xx_v4l2_read(struct file *filp, char __user *buf, size_t count,
* cx231xx_v4l2_poll()
* will allocate buffers when called for the first time
*/
-static unsigned int cx231xx_v4l2_poll(struct file *filp, poll_table * wait)
+static unsigned int cx231xx_v4l2_poll(struct file *filp, poll_table *wait)
{
struct cx231xx_fh *fh = filp->private_data;
struct cx231xx *dev = fh->dev;
@@ -2183,9 +2421,7 @@ static unsigned int cx231xx_v4l2_poll(struct file *filp, poll_table * wait)
if (rc < 0)
return rc;
- mutex_lock(&dev->lock);
rc = res_get(fh);
- mutex_unlock(&dev->lock);
if (unlikely(rc < 0))
return POLLERR;
@@ -2210,9 +2446,7 @@ static int cx231xx_v4l2_mmap(struct file *filp, struct vm_area_struct *vma)
if (rc < 0)
return rc;
- mutex_lock(&dev->lock);
rc = res_get(fh);
- mutex_unlock(&dev->lock);
if (unlikely(rc < 0))
return rc;
@@ -2234,7 +2468,7 @@ static const struct v4l2_file_operations cx231xx_v4l_fops = {
.read = cx231xx_v4l2_read,
.poll = cx231xx_v4l2_poll,
.mmap = cx231xx_v4l2_mmap,
- .ioctl = video_ioctl2,
+ .unlocked_ioctl = video_ioctl2,
};
static const struct v4l2_ioctl_ops video_ioctl_ops = {
@@ -2336,6 +2570,7 @@ static struct video_device *cx231xx_vdev_init(struct cx231xx *dev,
vfd->v4l2_dev = &dev->v4l2_dev;
vfd->release = video_device_release;
vfd->debug = video_debug;
+ vfd->lock = &dev->lock;
snprintf(vfd->name, sizeof(vfd->name), "%s %s", dev->name, type_name);
@@ -2358,12 +2593,12 @@ int cx231xx_register_analog_devices(struct cx231xx *dev)
dev->width = norm_maxw(dev);
dev->height = norm_maxh(dev);
dev->interlaced = 0;
- dev->hscale = 0;
- dev->vscale = 0;
/* Analog specific initialization */
dev->format = &format[0];
- /* video_mux(dev, dev->video_input); */
+
+ /* Set the initial input */
+ video_mux(dev, dev->video_input);
/* Audio defaults */
dev->mute = 1;
diff --git a/drivers/media/video/cx231xx/cx231xx.h b/drivers/media/video/cx231xx/cx231xx.h
index 38d417191a65..d067df9b81e7 100644
--- a/drivers/media/video/cx231xx/cx231xx.h
+++ b/drivers/media/video/cx231xx/cx231xx.h
@@ -27,16 +27,15 @@
#include <linux/ioctl.h>
#include <linux/i2c.h>
#include <linux/i2c-algo-bit.h>
+#include <linux/workqueue.h>
#include <linux/mutex.h>
+#include <media/cx2341x.h>
#include <media/videobuf-vmalloc.h>
#include <media/v4l2-device.h>
#include <media/ir-core.h>
-#if defined(CONFIG_VIDEO_CX231XX_DVB) || \
- defined(CONFIG_VIDEO_CX231XX_DVB_MODULE)
#include <media/videobuf-dvb.h>
-#endif
#include "cx231xx-reg.h"
#include "cx231xx-pcb-cfg.h"
@@ -49,12 +48,20 @@
#define AFE_DEVICE_ADDRESS 0x60
#define I2S_BLK_DEVICE_ADDRESS 0x98
#define VID_BLK_I2C_ADDRESS 0x88
+#define VERVE_I2C_ADDRESS 0x40
#define DIF_USE_BASEBAND 0xFFFFFFFF
/* Boards supported by driver */
#define CX231XX_BOARD_UNKNOWN 0
-#define CX231XX_BOARD_CNXT_RDE_250 1
-#define CX231XX_BOARD_CNXT_RDU_250 2
+#define CX231XX_BOARD_CNXT_CARRAERA 1
+#define CX231XX_BOARD_CNXT_SHELBY 2
+#define CX231XX_BOARD_CNXT_RDE_253S 3
+#define CX231XX_BOARD_CNXT_RDU_253S 4
+#define CX231XX_BOARD_CNXT_VIDEO_GRABBER 5
+#define CX231XX_BOARD_CNXT_RDE_250 6
+#define CX231XX_BOARD_CNXT_RDU_250 7
+#define CX231XX_BOARD_HAUPPAUGE_EXETER 8
+#define CX231XX_BOARD_HAUPPAUGE_USBLIVE2 9
/* Limits minimum and default number of buffers */
#define CX231XX_MIN_BUF 4
@@ -95,6 +102,24 @@
#define CX231XX_URB_TIMEOUT \
msecs_to_jiffies(CX231XX_NUM_BUFS * CX231XX_NUM_PACKETS)
+#define CX231xx_NORMS (\
+ V4L2_STD_NTSC_M | V4L2_STD_NTSC_M_JP | V4L2_STD_NTSC_443 | \
+ V4L2_STD_PAL_BG | V4L2_STD_PAL_DK | V4L2_STD_PAL_I | \
+ V4L2_STD_PAL_M | V4L2_STD_PAL_N | V4L2_STD_PAL_Nc | \
+ V4L2_STD_PAL_60 | V4L2_STD_SECAM_L | V4L2_STD_SECAM_DK)
+#define CX231xx_VERSION_CODE KERNEL_VERSION(0, 0, 2)
+
+#define SLEEP_S5H1432 30
+#define CX23417_OSC_EN 8
+#define CX23417_RESET 9
+
+struct cx23417_fmt {
+ char *name;
+ u32 fourcc; /* v4l2 format id */
+ int depth;
+ int flags;
+ u32 cxformat;
+};
enum cx231xx_mode {
CX231XX_SUSPEND,
CX231XX_ANALOG_MODE,
@@ -114,7 +139,7 @@ enum cx231xx_stream_state {
struct cx231xx;
-struct cx231xx_usb_isoc_ctl {
+struct cx231xx_isoc_ctl {
/* max packet size of isoc transaction */
int max_pkt_size;
@@ -148,6 +173,40 @@ struct cx231xx_usb_isoc_ctl {
int (*isoc_copy) (struct cx231xx *dev, struct urb *urb);
};
+struct cx231xx_bulk_ctl {
+ /* max packet size of bulk transaction */
+ int max_pkt_size;
+
+ /* number of allocated urbs */
+ int num_bufs;
+
+ /* urb for bulk transfers */
+ struct urb **urb;
+
+ /* transfer buffers for bulk transfer */
+ char **transfer_buffer;
+
+ /* Last buffer command and region */
+ u8 cmd;
+ int pos, size, pktsize;
+
+ /* Last field: ODD or EVEN? */
+ int field;
+
+ /* Stores incomplete commands */
+ u32 tmp_buf;
+ int tmp_buf_len;
+
+ /* Stores already requested buffers */
+ struct cx231xx_buffer *buf;
+
+ /* Stores the number of received fields */
+ int nfields;
+
+ /* bulk urb callback */
+ int (*bulk_copy) (struct cx231xx *dev, struct urb *urb);
+};
+
struct cx231xx_fmt {
char *name;
u32 fourcc; /* v4l2 format id */
@@ -165,6 +224,11 @@ struct cx231xx_buffer {
int receiving;
};
+enum ps_package_head {
+ CX231XX_NEED_ADD_PS_PACKAGE_HEAD = 0,
+ CX231XX_NONEED_PS_PACKAGE_HEAD
+};
+
struct cx231xx_dmaqueue {
struct list_head active;
struct list_head queued;
@@ -181,6 +245,14 @@ struct cx231xx_dmaqueue {
u32 lines_completed;
u8 field1_done;
u32 lines_per_field;
+
+ /*Mpeg2 control buffer*/
+ u8 *p_left_data;
+ u32 left_data_count;
+ u8 mpeg_buffer_done;
+ u32 mpeg_buffer_completed;
+ enum ps_package_head add_ps_package_head;
+ char ps_head[10];
};
/* inputs */
@@ -259,9 +331,10 @@ struct cx231xx_board {
struct cx231xx_reg_seq *dvb_gpio;
struct cx231xx_reg_seq *suspend_gpio;
struct cx231xx_reg_seq *tuner_gpio;
- u8 tuner_sif_gpio;
- u8 tuner_scl_gpio;
- u8 tuner_sda_gpio;
+ /* Negative means don't use it */
+ s8 tuner_sif_gpio;
+ s8 tuner_scl_gpio;
+ s8 tuner_sda_gpio;
/* PIN ctrl */
u32 ctl_pin_status_mask;
@@ -279,6 +352,7 @@ struct cx231xx_board {
unsigned char xclk, i2c_speed;
enum cx231xx_decoder decoder;
+ int output_mode;
struct cx231xx_input input[MAX_CX231XX_INPUT];
struct cx231xx_input radio;
@@ -309,10 +383,8 @@ enum AUDIO_INPUT {
};
#define CX231XX_AUDIO_BUFS 5
-#define CX231XX_NUM_AUDIO_PACKETS 64
-#define CX231XX_CAPTURE_STREAM_EN 1
-#define CX231XX_STOP_AUDIO 0
-#define CX231XX_START_AUDIO 1
+#define CX231XX_NUM_AUDIO_PACKETS 16
+#define CX231XX_ISO_NUM_AUDIO_PACKETS 64
/* cx231xx extensions */
#define CX231XX_AUDIO 0x10
@@ -330,7 +402,7 @@ struct cx231xx_audio {
struct snd_card *sndcard;
int users, shutdown;
- enum cx231xx_stream_state capture_stream;
+ /* locks */
spinlock_t slock;
int alt; /* alternate */
@@ -350,6 +422,28 @@ struct cx231xx_fh {
struct videobuf_queue vb_vidq;
enum v4l2_buf_type type;
+
+
+
+/*following is copyed from cx23885.h*/
+ u32 resources;
+
+ /* video overlay */
+ struct v4l2_window win;
+ struct v4l2_clip *clips;
+ unsigned int nclips;
+
+ /* video capture */
+ struct cx23417_fmt *fmt;
+ unsigned int width, height;
+
+ /* vbi capture */
+ struct videobuf_queue vidq;
+ struct videobuf_queue vbiq;
+
+ /* MPEG Encoder specifics ONLY */
+
+ atomic_t v4l_reading;
};
/*****************************************************************/
@@ -403,6 +497,13 @@ struct VENDOR_REQUEST_IN {
u8 *pBuff;
};
+struct cx231xx_tvnorm {
+ char *name;
+ v4l2_std_id id;
+ u32 cxiformat;
+ u32 cxoformat;
+};
+
struct cx231xx_ctrl {
struct v4l2_queryctrl v;
u32 off;
@@ -424,7 +525,9 @@ enum TRANSFER_TYPE {
struct cx231xx_video_mode {
/* Isoc control struct */
struct cx231xx_dmaqueue vidq;
- struct cx231xx_usb_isoc_ctl isoc_ctl;
+ struct cx231xx_isoc_ctl isoc_ctl;
+ struct cx231xx_bulk_ctl bulk_ctl;
+ /* locks */
spinlock_t slock;
/* usb transfer */
@@ -434,6 +537,64 @@ struct cx231xx_video_mode {
unsigned int *alt_max_pkt_size; /* array of wMaxPacketSize */
u16 end_point_addr;
};
+/*
+struct cx23885_dmaqueue {
+ struct list_head active;
+ struct list_head queued;
+ struct timer_list timeout;
+ struct btcx_riscmem stopper;
+ u32 count;
+};
+*/
+struct cx231xx_tsport {
+ struct cx231xx *dev;
+
+ int nr;
+ int sram_chno;
+
+ struct videobuf_dvb_frontends frontends;
+
+ /* dma queues */
+
+ u32 ts_packet_size;
+ u32 ts_packet_count;
+
+ int width;
+ int height;
+
+ /* locks */
+ spinlock_t slock;
+
+ /* registers */
+ u32 reg_gpcnt;
+ u32 reg_gpcnt_ctl;
+ u32 reg_dma_ctl;
+ u32 reg_lngth;
+ u32 reg_hw_sop_ctrl;
+ u32 reg_gen_ctrl;
+ u32 reg_bd_pkt_status;
+ u32 reg_sop_status;
+ u32 reg_fifo_ovfl_stat;
+ u32 reg_vld_misc;
+ u32 reg_ts_clk_en;
+ u32 reg_ts_int_msk;
+ u32 reg_ts_int_stat;
+ u32 reg_src_sel;
+
+ /* Default register vals */
+ int pci_irqmask;
+ u32 dma_ctl_val;
+ u32 ts_int_msk_val;
+ u32 gen_ctrl_val;
+ u32 ts_clk_en_val;
+ u32 src_sel_val;
+ u32 vld_misc_val;
+ u32 hw_sop_ctrl_val;
+
+ /* Allow a single tsport to have multiple frontends */
+ u32 num_frontends;
+ void *port_priv;
+};
/* main device struct */
struct cx231xx {
@@ -457,6 +618,9 @@ struct cx231xx {
struct cx231xx_IR *ir;
+ struct work_struct wq_trigger; /* Trigger to start/stop audio for alsa module */
+ atomic_t stream_started; /* stream should be running if true */
+
struct list_head devlist;
int tuner_type; /* type of the tuner */
@@ -465,7 +629,9 @@ struct cx231xx {
/* I2C adapters: Master 1 & 2 (External) & Master 3 (Internal only) */
struct cx231xx_i2c i2c_bus[3];
unsigned int xc_fw_load_done:1;
+ /* locks */
struct mutex gpio_i2c_lock;
+ struct mutex i2c_lock;
/* video for linux */
int users; /* user count for exclusive use */
@@ -479,8 +645,6 @@ struct cx231xx {
/* frame properties */
int width; /* current frame width */
int height; /* current frame height */
- unsigned hscale; /* horizontal scale factor (see datasheet) */
- unsigned vscale; /* vertical scale factor (see datasheet) */
int interlaced; /* 1=interlace fileds, 0=just top fileds */
struct cx231xx_audio adev;
@@ -505,6 +669,8 @@ struct cx231xx {
struct cx231xx_video_mode sliced_cc_mode;
struct cx231xx_video_mode ts1_mode;
+ atomic_t devlist_count;
+
struct usb_device *udev; /* the usb device */
char urb_buf[URB_MAX_CTRL_SIZE]; /* urb control msg buffer */
@@ -550,8 +716,24 @@ struct cx231xx {
u8 vbi_or_sliced_cc_mode; /* 0 - vbi ; 1 - sliced cc mode */
enum cx231xx_std_mode std_mode; /* 0 - Air; 1 - cable */
+ /*mode: digital=1 or analog=0*/
+ u8 mode_tv;
+
+ u8 USE_ISO;
+ struct cx231xx_tvnorm encodernorm;
+ struct cx231xx_tsport ts1, ts2;
+ struct cx2341x_mpeg_params mpeg_params;
+ struct video_device *v4l_device;
+ atomic_t v4l_reader_count;
+ u32 freq;
+ unsigned int input;
+ u32 cx23417_mailbox;
+ u32 __iomem *lmmio;
+ u8 __iomem *bmmio;
};
+extern struct list_head cx231xx_devlist;
+
#define cx25840_call(cx231xx, o, f, args...) \
v4l2_subdev_call(cx231xx->sd_cx25840, o, f, ##args)
#define tuner_call(cx231xx, o, f, args...) \
@@ -577,6 +759,10 @@ int cx231xx_i2c_register(struct cx231xx_i2c *bus);
int cx231xx_i2c_unregister(struct cx231xx_i2c *bus);
/* Internal block control functions */
+int cx231xx_read_i2c_master(struct cx231xx *dev, u8 dev_addr, u16 saddr,
+ u8 saddr_len, u32 *data, u8 data_len, int master);
+int cx231xx_write_i2c_master(struct cx231xx *dev, u8 dev_addr, u16 saddr,
+ u8 saddr_len, u32 data, u8 data_len, int master);
int cx231xx_read_i2c_data(struct cx231xx *dev, u8 dev_addr,
u16 saddr, u8 saddr_len, u32 *data, u8 data_len);
int cx231xx_write_i2c_data(struct cx231xx *dev, u8 dev_addr,
@@ -588,6 +774,9 @@ int cx231xx_read_modify_write_i2c_dword(struct cx231xx *dev, u8 dev_addr,
u16 saddr, u32 mask, u32 value);
u32 cx231xx_set_field(u32 field_mask, u32 data);
+/*verve r/w*/
+void initGPIO(struct cx231xx *dev);
+void uninitGPIO(struct cx231xx *dev);
/* afe related functions */
int cx231xx_afe_init_super_block(struct cx231xx *dev, u32 ref_count);
int cx231xx_afe_init_channels(struct cx231xx *dev);
@@ -607,6 +796,19 @@ int cx231xx_i2s_blk_set_audio_input(struct cx231xx *dev, u8 audio_input);
/* DIF related functions */
int cx231xx_dif_configure_C2HH_for_low_IF(struct cx231xx *dev, u32 mode,
u32 function_mode, u32 standard);
+void cx231xx_set_Colibri_For_LowIF(struct cx231xx *dev, u32 if_freq,
+ u8 spectral_invert, u32 mode);
+u32 cx231xx_Get_Colibri_CarrierOffset(u32 mode, u32 standerd);
+void cx231xx_set_DIF_bandpass(struct cx231xx *dev, u32 if_freq,
+ u8 spectral_invert, u32 mode);
+void cx231xx_Setup_AFE_for_LowIF(struct cx231xx *dev);
+void reset_s5h1432_demod(struct cx231xx *dev);
+void cx231xx_dump_HH_reg(struct cx231xx *dev);
+void update_HH_register_after_set_DIF(struct cx231xx *dev);
+void cx231xx_dump_SC_reg(struct cx231xx *dev);
+
+
+
int cx231xx_dif_set_standard(struct cx231xx *dev, u32 standard);
int cx231xx_tuner_pre_channel_change(struct cx231xx *dev);
int cx231xx_tuner_post_channel_change(struct cx231xx *dev);
@@ -672,15 +874,28 @@ int cx231xx_set_audio_decoder_input(struct cx231xx *dev,
enum AUDIO_INPUT audio_input);
int cx231xx_capture_start(struct cx231xx *dev, int start, u8 media_type);
-int cx231xx_resolution_set(struct cx231xx *dev);
int cx231xx_set_video_alternate(struct cx231xx *dev);
int cx231xx_set_alt_setting(struct cx231xx *dev, u8 index, u8 alt);
+int is_fw_load(struct cx231xx *dev);
+int cx231xx_check_fw(struct cx231xx *dev);
int cx231xx_init_isoc(struct cx231xx *dev, int max_packets,
int num_bufs, int max_pkt_size,
int (*isoc_copy) (struct cx231xx *dev,
struct urb *urb));
+int cx231xx_init_bulk(struct cx231xx *dev, int max_packets,
+ int num_bufs, int max_pkt_size,
+ int (*bulk_copy) (struct cx231xx *dev,
+ struct urb *urb));
+void cx231xx_stop_TS1(struct cx231xx *dev);
+void cx231xx_start_TS1(struct cx231xx *dev);
void cx231xx_uninit_isoc(struct cx231xx *dev);
+void cx231xx_uninit_bulk(struct cx231xx *dev);
int cx231xx_set_mode(struct cx231xx *dev, enum cx231xx_mode set_mode);
+int cx231xx_unmute_audio(struct cx231xx *dev);
+int cx231xx_ep5_bulkout(struct cx231xx *dev, u8 *firmware, u16 size);
+void cx231xx_disable656(struct cx231xx *dev);
+void cx231xx_enable656(struct cx231xx *dev);
+int cx231xx_demod_reset(struct cx231xx *dev);
int cx231xx_gpio_set(struct cx231xx *dev, struct cx231xx_reg_seq *gpio);
/* Device list functions */
@@ -712,7 +927,7 @@ int cx231xx_power_suspend(struct cx231xx *dev);
int cx231xx_init_ctrl_pin_status(struct cx231xx *dev);
int cx231xx_set_agc_analog_digital_mux_select(struct cx231xx *dev,
u8 analog_or_digital);
-int cx231xx_enable_i2c_for_tuner(struct cx231xx *dev, u8 I2CIndex);
+int cx231xx_enable_i2c_port_3(struct cx231xx *dev, bool is_port_3);
/* video audio decoder related functions */
void video_mux(struct cx231xx *dev, int index);
@@ -733,12 +948,11 @@ extern void cx231xx_card_setup(struct cx231xx *dev);
extern struct cx231xx_board cx231xx_boards[];
extern struct usb_device_id cx231xx_id_table[];
extern const unsigned int cx231xx_bcount;
-void cx231xx_register_i2c_ir(struct cx231xx *dev);
int cx231xx_tuner_callback(void *ptr, int component, int command, int arg);
-/* Provided by cx231xx-input.c */
-int cx231xx_ir_init(struct cx231xx *dev);
-int cx231xx_ir_fini(struct cx231xx *dev);
+/* cx23885-417.c */
+extern int cx231xx_417_register(struct cx231xx *dev);
+extern void cx231xx_417_unregister(struct cx231xx *dev);
/* printk macros */
diff --git a/drivers/media/video/cx23885/cx23885-417.c b/drivers/media/video/cx23885/cx23885-417.c
index 53a67824071b..9a98dc55f657 100644
--- a/drivers/media/video/cx23885/cx23885-417.c
+++ b/drivers/media/video/cx23885/cx23885-417.c
@@ -31,7 +31,6 @@
#include <linux/delay.h>
#include <linux/device.h>
#include <linux/firmware.h>
-#include <linux/smp_lock.h>
#include <linux/slab.h>
#include <media/v4l2-common.h>
#include <media/v4l2-ioctl.h>
@@ -1576,12 +1575,8 @@ static int mpeg_open(struct file *file)
/* allocate + initialize per filehandle data */
fh = kzalloc(sizeof(*fh), GFP_KERNEL);
- if (NULL == fh) {
- unlock_kernel();
+ if (!fh)
return -ENOMEM;
- }
-
- lock_kernel();
file->private_data = fh;
fh->dev = dev;
@@ -1591,9 +1586,7 @@ static int mpeg_open(struct file *file)
V4L2_BUF_TYPE_VIDEO_CAPTURE,
V4L2_FIELD_INTERLACED,
sizeof(struct cx23885_buffer),
- fh);
- unlock_kernel();
-
+ fh, NULL);
return 0;
}
diff --git a/drivers/media/video/cx23885/cx23885-cards.c b/drivers/media/video/cx23885/cx23885-cards.c
index e76ce8709afd..8861309268b1 100644
--- a/drivers/media/video/cx23885/cx23885-cards.c
+++ b/drivers/media/video/cx23885/cx23885-cards.c
@@ -1247,7 +1247,7 @@ void cx23885_card_setup(struct cx23885_dev *dev)
case CX23885_BOARD_LEADTEK_WINFAST_PXTV1200:
dev->sd_cx25840 = v4l2_i2c_new_subdev(&dev->v4l2_dev,
&dev->i2c_bus[2].i2c_adap,
- "cx25840", "cx25840", 0x88 >> 1, NULL);
+ "cx25840", 0x88 >> 1, NULL);
if (dev->sd_cx25840) {
dev->sd_cx25840->grp_id = CX23885_HW_AV_CORE;
v4l2_subdev_call(dev->sd_cx25840, core, load_fw);
diff --git a/drivers/media/video/cx23885/cx23885-core.c b/drivers/media/video/cx23885/cx23885-core.c
index f6b62e7398af..359882419b7f 100644
--- a/drivers/media/video/cx23885/cx23885-core.c
+++ b/drivers/media/video/cx23885/cx23885-core.c
@@ -815,6 +815,7 @@ static void cx23885_dev_checkrevision(struct cx23885_dev *dev)
case 0x0e:
/* CX23887-15Z */
dev->hwrevision = 0xc0;
+ break;
case 0x0f:
/* CX23887-14Z */
dev->hwrevision = 0xb1;
@@ -1221,7 +1222,7 @@ void cx23885_free_buffer(struct videobuf_queue *q, struct cx23885_buffer *buf)
struct videobuf_dmabuf *dma = videobuf_to_dma(&buf->vb);
BUG_ON(in_interrupt());
- videobuf_waiton(&buf->vb, 0, 0);
+ videobuf_waiton(q, &buf->vb, 0, 0);
videobuf_dma_unmap(q->dev, dma);
videobuf_dma_free(dma);
btcx_riscmem_free(to_pci_dev(q->dev), &buf->risc);
diff --git a/drivers/media/video/cx23885/cx23885-dvb.c b/drivers/media/video/cx23885/cx23885-dvb.c
index 3d70af283881..5958cb882e93 100644
--- a/drivers/media/video/cx23885/cx23885-dvb.c
+++ b/drivers/media/video/cx23885/cx23885-dvb.c
@@ -1017,10 +1017,7 @@ static int dvb_register(struct cx23885_tsport *port)
/* Read entire EEPROM */
dev->i2c_bus[0].i2c_client.addr = 0xa0 >> 1;
tveeprom_read(&dev->i2c_bus[0].i2c_client, eeprom, sizeof(eeprom));
- printk(KERN_INFO "TeVii S470 MAC= "
- "%02X:%02X:%02X:%02X:%02X:%02X\n",
- eeprom[0xa0], eeprom[0xa1], eeprom[0xa2],
- eeprom[0xa3], eeprom[0xa4], eeprom[0xa5]);
+ printk(KERN_INFO "TeVii S470 MAC= %pM\n", eeprom + 0xa0);
memcpy(port->frontends.adapter.proposed_mac, eeprom + 0xa0, 6);
break;
}
@@ -1074,7 +1071,7 @@ int cx23885_dvb_register(struct cx23885_tsport *port)
videobuf_queue_sg_init(&fe0->dvb.dvbq, &dvb_qops,
&dev->pci->dev, &port->slock,
V4L2_BUF_TYPE_VIDEO_CAPTURE, V4L2_FIELD_TOP,
- sizeof(struct cx23885_buffer), port);
+ sizeof(struct cx23885_buffer), port, NULL);
}
err = dvb_register(port);
if (err != 0)
diff --git a/drivers/media/video/cx23885/cx23885-video.c b/drivers/media/video/cx23885/cx23885-video.c
index da66e5f8d91d..8b2fb8a4375c 100644
--- a/drivers/media/video/cx23885/cx23885-video.c
+++ b/drivers/media/video/cx23885/cx23885-video.c
@@ -26,7 +26,6 @@
#include <linux/kmod.h>
#include <linux/kernel.h>
#include <linux/slab.h>
-#include <linux/smp_lock.h>
#include <linux/interrupt.h>
#include <linux/delay.h>
#include <linux/kthread.h>
@@ -743,8 +742,6 @@ static int video_open(struct file *file)
if (NULL == fh)
return -ENOMEM;
- lock_kernel();
-
file->private_data = fh;
fh->dev = dev;
fh->radio = radio;
@@ -758,12 +755,10 @@ static int video_open(struct file *file)
V4L2_BUF_TYPE_VIDEO_CAPTURE,
V4L2_FIELD_INTERLACED,
sizeof(struct cx23885_buffer),
- fh);
+ fh, NULL);
dprintk(1, "post videobuf_queue_init()\n");
- unlock_kernel();
-
return 0;
}
@@ -1165,9 +1160,10 @@ static int cx23885_enum_input(struct cx23885_dev *dev, struct v4l2_input *i)
i->type = V4L2_INPUT_TYPE_CAMERA;
strcpy(i->name, iname[INPUT(n)->type]);
if ((CX23885_VMUX_TELEVISION == INPUT(n)->type) ||
- (CX23885_VMUX_CABLE == INPUT(n)->type))
+ (CX23885_VMUX_CABLE == INPUT(n)->type)) {
i->type = V4L2_INPUT_TYPE_TUNER;
i->std = CX23885_NORMS;
+ }
return 0;
}
@@ -1511,11 +1507,11 @@ int cx23885_video_register(struct cx23885_dev *dev)
if (dev->tuner_addr)
sd = v4l2_i2c_new_subdev(&dev->v4l2_dev,
&dev->i2c_bus[1].i2c_adap,
- "tuner", "tuner", dev->tuner_addr, NULL);
+ "tuner", dev->tuner_addr, NULL);
else
sd = v4l2_i2c_new_subdev(&dev->v4l2_dev,
&dev->i2c_bus[1].i2c_adap,
- "tuner", "tuner", 0, v4l2_i2c_tuner_addrs(ADDRS_TV));
+ "tuner", 0, v4l2_i2c_tuner_addrs(ADDRS_TV));
if (sd) {
struct tuner_setup tun_setup;
diff --git a/drivers/media/video/cx23885/cx23888-ir.c b/drivers/media/video/cx23885/cx23888-ir.c
index 2502a0a67097..e78e3e4c8112 100644
--- a/drivers/media/video/cx23885/cx23888-ir.c
+++ b/drivers/media/video/cx23885/cx23888-ir.c
@@ -704,6 +704,7 @@ static int cx23888_ir_rx_read(struct v4l2_subdev *sd, u8 *buf, size_t count,
if (v > IR_MAX_DURATION)
v = IR_MAX_DURATION;
+ init_ir_raw_event(&p->ir_core_data);
p->ir_core_data.pulse = u;
p->ir_core_data.duration = v;
diff --git a/drivers/media/video/cx25840/cx25840-audio.c b/drivers/media/video/cx25840/cx25840-audio.c
index 6faad34df3ac..34b96c7cfd62 100644
--- a/drivers/media/video/cx25840/cx25840-audio.c
+++ b/drivers/media/video/cx25840/cx25840-audio.c
@@ -437,41 +437,45 @@ void cx25840_audio_set_path(struct i2c_client *client)
{
struct cx25840_state *state = to_state(i2c_get_clientdata(client));
- /* assert soft reset */
- cx25840_and_or(client, 0x810, ~0x1, 0x01);
+ if (!is_cx2583x(state)) {
+ /* assert soft reset */
+ cx25840_and_or(client, 0x810, ~0x1, 0x01);
- /* stop microcontroller */
- cx25840_and_or(client, 0x803, ~0x10, 0);
+ /* stop microcontroller */
+ cx25840_and_or(client, 0x803, ~0x10, 0);
- /* Mute everything to prevent the PFFT! */
- cx25840_write(client, 0x8d3, 0x1f);
+ /* Mute everything to prevent the PFFT! */
+ cx25840_write(client, 0x8d3, 0x1f);
- if (state->aud_input == CX25840_AUDIO_SERIAL) {
- /* Set Path1 to Serial Audio Input */
- cx25840_write4(client, 0x8d0, 0x01011012);
+ if (state->aud_input == CX25840_AUDIO_SERIAL) {
+ /* Set Path1 to Serial Audio Input */
+ cx25840_write4(client, 0x8d0, 0x01011012);
- /* The microcontroller should not be started for the
- * non-tuner inputs: autodetection is specific for
- * TV audio. */
- } else {
- /* Set Path1 to Analog Demod Main Channel */
- cx25840_write4(client, 0x8d0, 0x1f063870);
+ /* The microcontroller should not be started for the
+ * non-tuner inputs: autodetection is specific for
+ * TV audio. */
+ } else {
+ /* Set Path1 to Analog Demod Main Channel */
+ cx25840_write4(client, 0x8d0, 0x1f063870);
+ }
}
set_audclk_freq(client, state->audclk_freq);
- if (state->aud_input != CX25840_AUDIO_SERIAL) {
- /* When the microcontroller detects the
- * audio format, it will unmute the lines */
- cx25840_and_or(client, 0x803, ~0x10, 0x10);
- }
+ if (!is_cx2583x(state)) {
+ if (state->aud_input != CX25840_AUDIO_SERIAL) {
+ /* When the microcontroller detects the
+ * audio format, it will unmute the lines */
+ cx25840_and_or(client, 0x803, ~0x10, 0x10);
+ }
- /* deassert soft reset */
- cx25840_and_or(client, 0x810, ~0x1, 0x00);
+ /* deassert soft reset */
+ cx25840_and_or(client, 0x810, ~0x1, 0x00);
- /* Ensure the controller is running when we exit */
- if (is_cx2388x(state) || is_cx231xx(state))
- cx25840_and_or(client, 0x803, ~0x10, 0x10);
+ /* Ensure the controller is running when we exit */
+ if (is_cx2388x(state) || is_cx231xx(state))
+ cx25840_and_or(client, 0x803, ~0x10, 0x10);
+ }
}
static void set_volume(struct i2c_client *client, int volume)
diff --git a/drivers/media/video/cx25840/cx25840-core.c b/drivers/media/video/cx25840/cx25840-core.c
index f5a3e74c3c7c..dfb198d0415b 100644
--- a/drivers/media/video/cx25840/cx25840-core.c
+++ b/drivers/media/video/cx25840/cx25840-core.c
@@ -42,7 +42,6 @@
#include <linux/delay.h>
#include <media/v4l2-common.h>
#include <media/v4l2-chip-ident.h>
-#include <media/v4l2-i2c-drv.h>
#include <media/cx25840.h>
#include "cx25840-core.h"
@@ -871,6 +870,11 @@ static void input_change(struct i2c_client *client)
}
cx25840_and_or(client, 0x401, ~0x60, 0);
cx25840_and_or(client, 0x401, ~0x60, 0x60);
+
+ /* Don't write into audio registers on cx2583x chips */
+ if (is_cx2583x(state))
+ return;
+
cx25840_and_or(client, 0x810, ~0x01, 1);
if (state->radio) {
@@ -1029,10 +1033,8 @@ static int set_input(struct i2c_client *client, enum cx25840_video_input vid_inp
state->vid_input = vid_input;
state->aud_input = aud_input;
- if (!is_cx2583x(state)) {
- cx25840_audio_set_path(client);
- input_change(client);
- }
+ cx25840_audio_set_path(client);
+ input_change(client);
if (is_cx2388x(state)) {
/* Audio channel 1 src : Parallel 1 */
@@ -1553,18 +1555,14 @@ static int cx25840_s_audio_routing(struct v4l2_subdev *sd,
struct cx25840_state *state = to_state(sd);
struct i2c_client *client = v4l2_get_subdevdata(sd);
- if (is_cx2583x(state))
- return -EINVAL;
return set_input(client, state->vid_input, input);
}
static int cx25840_s_frequency(struct v4l2_subdev *sd, struct v4l2_frequency *freq)
{
- struct cx25840_state *state = to_state(sd);
struct i2c_client *client = v4l2_get_subdevdata(sd);
- if (!is_cx2583x(state))
- input_change(client);
+ input_change(client);
return 0;
}
@@ -2043,9 +2041,25 @@ static const struct i2c_device_id cx25840_id[] = {
};
MODULE_DEVICE_TABLE(i2c, cx25840_id);
-static struct v4l2_i2c_driver_data v4l2_i2c_data = {
- .name = "cx25840",
- .probe = cx25840_probe,
- .remove = cx25840_remove,
- .id_table = cx25840_id,
+static struct i2c_driver cx25840_driver = {
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = "cx25840",
+ },
+ .probe = cx25840_probe,
+ .remove = cx25840_remove,
+ .id_table = cx25840_id,
};
+
+static __init int init_cx25840(void)
+{
+ return i2c_add_driver(&cx25840_driver);
+}
+
+static __exit void exit_cx25840(void)
+{
+ i2c_del_driver(&cx25840_driver);
+}
+
+module_init(init_cx25840);
+module_exit(exit_cx25840);
diff --git a/drivers/media/video/cx25840/cx25840-ir.c b/drivers/media/video/cx25840/cx25840-ir.c
index c2b4c14dc9ab..97a4e9b25fe4 100644
--- a/drivers/media/video/cx25840/cx25840-ir.c
+++ b/drivers/media/video/cx25840/cx25840-ir.c
@@ -706,6 +706,7 @@ static int cx25840_ir_rx_read(struct v4l2_subdev *sd, u8 *buf, size_t count,
if (v > IR_MAX_DURATION)
v = IR_MAX_DURATION;
+ init_ir_raw_event(&p->ir_core_data);
p->ir_core_data.pulse = u;
p->ir_core_data.duration = v;
diff --git a/drivers/media/video/cx88/cx88-alsa.c b/drivers/media/video/cx88/cx88-alsa.c
index 4f383cdf5296..4aaa47c0eabf 100644
--- a/drivers/media/video/cx88/cx88-alsa.c
+++ b/drivers/media/video/cx88/cx88-alsa.c
@@ -40,6 +40,7 @@
#include <sound/control.h>
#include <sound/initval.h>
#include <sound/tlv.h>
+#include <media/wm8775.h>
#include "cx88.h"
#include "cx88-reg.h"
@@ -94,7 +95,7 @@ typedef struct cx88_audio_dev snd_cx88_card_t;
****************************************************************************/
static int index[SNDRV_CARDS] = SNDRV_DEFAULT_IDX; /* Index 0-MAX */
-static char *id[SNDRV_CARDS] = SNDRV_DEFAULT_STR; /* ID for this card */
+static const char *id[SNDRV_CARDS] = SNDRV_DEFAULT_STR; /* ID for this card */
static int enable[SNDRV_CARDS] = {1, [1 ... (SNDRV_CARDS - 1)] = 1};
module_param_array(enable, bool, NULL, 0444);
@@ -131,7 +132,7 @@ static int _cx88_start_audio_dma(snd_cx88_card_t *chip)
{
struct cx88_audio_buffer *buf = chip->buf;
struct cx88_core *core=chip->core;
- struct sram_channel *audio_ch = &cx88_sram_channels[SRAM_CH25];
+ const struct sram_channel *audio_ch = &cx88_sram_channels[SRAM_CH25];
/* Make sure RISC/FIFO are off before changing FIFO/RISC settings */
cx_clear(MO_AUD_DMACNTRL, 0x11);
@@ -197,7 +198,7 @@ static int _cx88_stop_audio_dma(snd_cx88_card_t *chip)
/*
* BOARD Specific: IRQ dma bits
*/
-static char *cx88_aud_irqs[32] = {
+static const char *cx88_aud_irqs[32] = {
"dn_risci1", "up_risci1", "rds_dn_risc1", /* 0-2 */
NULL, /* reserved */
"dn_risci2", "up_risci2", "rds_dn_risc2", /* 4-6 */
@@ -308,7 +309,7 @@ static int dsp_buffer_free(snd_cx88_card_t *chip)
* Digital hardware definition
*/
#define DEFAULT_FIFO_SIZE 4096
-static struct snd_pcm_hardware snd_cx88_digital_hw = {
+static const struct snd_pcm_hardware snd_cx88_digital_hw = {
.info = SNDRV_PCM_INFO_MMAP |
SNDRV_PCM_INFO_INTERLEAVED |
SNDRV_PCM_INFO_BLOCK_TRANSFER |
@@ -533,7 +534,7 @@ static struct snd_pcm_ops snd_cx88_pcm_ops = {
/*
* create a PCM device
*/
-static int __devinit snd_cx88_pcm(snd_cx88_card_t *chip, int device, char *name)
+static int __devinit snd_cx88_pcm(snd_cx88_card_t *chip, int device, const char *name)
{
int err;
struct snd_pcm *pcm;
@@ -586,26 +587,47 @@ static int snd_cx88_volume_put(struct snd_kcontrol *kcontrol,
int left, right, v, b;
int changed = 0;
u32 old;
+ struct v4l2_control client_ctl;
+
+ /* Pass volume & balance onto any WM8775 */
+ if (value->value.integer.value[0] >= value->value.integer.value[1]) {
+ v = value->value.integer.value[0] << 10;
+ b = value->value.integer.value[0] ?
+ (0x8000 * value->value.integer.value[1]) / value->value.integer.value[0] :
+ 0x8000;
+ } else {
+ v = value->value.integer.value[1] << 10;
+ b = value->value.integer.value[1] ?
+ 0xffff - (0x8000 * value->value.integer.value[0]) / value->value.integer.value[1] :
+ 0x8000;
+ }
+ client_ctl.value = v;
+ client_ctl.id = V4L2_CID_AUDIO_VOLUME;
+ call_hw(core, WM8775_GID, core, s_ctrl, &client_ctl);
+
+ client_ctl.value = b;
+ client_ctl.id = V4L2_CID_AUDIO_BALANCE;
+ call_hw(core, WM8775_GID, core, s_ctrl, &client_ctl);
left = value->value.integer.value[0] & 0x3f;
right = value->value.integer.value[1] & 0x3f;
b = right - left;
if (b < 0) {
- v = 0x3f - left;
- b = (-b) | 0x40;
+ v = 0x3f - left;
+ b = (-b) | 0x40;
} else {
- v = 0x3f - right;
+ v = 0x3f - right;
}
/* Do we really know this will always be called with IRQs on? */
spin_lock_irq(&chip->reg_lock);
old = cx_read(AUD_VOL_CTL);
if (v != (old & 0x3f)) {
- cx_write(AUD_VOL_CTL, (old & ~0x3f) | v);
- changed = 1;
+ cx_swrite(SHADOW_AUD_VOL_CTL, AUD_VOL_CTL, (old & ~0x3f) | v);
+ changed = 1;
}
- if (cx_read(AUD_BAL_CTL) != b) {
- cx_write(AUD_BAL_CTL, b);
- changed = 1;
+ if ((cx_read(AUD_BAL_CTL) & 0x7f) != b) {
+ cx_write(AUD_BAL_CTL, b);
+ changed = 1;
}
spin_unlock_irq(&chip->reg_lock);
@@ -614,11 +636,11 @@ static int snd_cx88_volume_put(struct snd_kcontrol *kcontrol,
static const DECLARE_TLV_DB_SCALE(snd_cx88_db_scale, -6300, 100, 0);
-static struct snd_kcontrol_new snd_cx88_volume = {
+static const struct snd_kcontrol_new snd_cx88_volume = {
.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
.access = SNDRV_CTL_ELEM_ACCESS_READWRITE |
SNDRV_CTL_ELEM_ACCESS_TLV_READ,
- .name = "Playback Volume",
+ .name = "Analog-TV Volume",
.info = snd_cx88_volume_info,
.get = snd_cx88_volume_get,
.put = snd_cx88_volume_put,
@@ -649,31 +671,74 @@ static int snd_cx88_switch_put(struct snd_kcontrol *kcontrol,
vol = cx_read(AUD_VOL_CTL);
if (value->value.integer.value[0] != !(vol & bit)) {
vol ^= bit;
- cx_write(AUD_VOL_CTL, vol);
+ cx_swrite(SHADOW_AUD_VOL_CTL, AUD_VOL_CTL, vol);
+ /* Pass mute onto any WM8775 */
+ if ((1<<6) == bit) {
+ struct v4l2_control client_ctl;
+ client_ctl.value = 0 != (vol & bit);
+ client_ctl.id = V4L2_CID_AUDIO_MUTE;
+ call_hw(core, WM8775_GID, core, s_ctrl, &client_ctl);
+ }
ret = 1;
}
spin_unlock_irq(&chip->reg_lock);
return ret;
}
-static struct snd_kcontrol_new snd_cx88_dac_switch = {
+static const struct snd_kcontrol_new snd_cx88_dac_switch = {
.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
- .name = "Playback Switch",
+ .name = "Audio-Out Switch",
.info = snd_ctl_boolean_mono_info,
.get = snd_cx88_switch_get,
.put = snd_cx88_switch_put,
.private_value = (1<<8),
};
-static struct snd_kcontrol_new snd_cx88_source_switch = {
+static const struct snd_kcontrol_new snd_cx88_source_switch = {
.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
- .name = "Capture Switch",
+ .name = "Analog-TV Switch",
.info = snd_ctl_boolean_mono_info,
.get = snd_cx88_switch_get,
.put = snd_cx88_switch_put,
.private_value = (1<<6),
};
+static int snd_cx88_alc_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *value)
+{
+ snd_cx88_card_t *chip = snd_kcontrol_chip(kcontrol);
+ struct cx88_core *core = chip->core;
+ struct v4l2_control client_ctl;
+
+ client_ctl.id = V4L2_CID_AUDIO_LOUDNESS;
+ call_hw(core, WM8775_GID, core, g_ctrl, &client_ctl);
+ value->value.integer.value[0] = client_ctl.value ? 1 : 0;
+
+ return 0;
+}
+
+static int snd_cx88_alc_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *value)
+{
+ snd_cx88_card_t *chip = snd_kcontrol_chip(kcontrol);
+ struct cx88_core *core = chip->core;
+ struct v4l2_control client_ctl;
+
+ client_ctl.value = 0 != value->value.integer.value[0];
+ client_ctl.id = V4L2_CID_AUDIO_LOUDNESS;
+ call_hw(core, WM8775_GID, core, s_ctrl, &client_ctl);
+
+ return 0;
+}
+
+static struct snd_kcontrol_new snd_cx88_alc_switch = {
+ .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
+ .name = "Line-In ALC Switch",
+ .info = snd_ctl_boolean_mono_info,
+ .get = snd_cx88_alc_get,
+ .put = snd_cx88_alc_put,
+};
+
/****************************************************************************
Basic Flow for Sound Devices
****************************************************************************/
@@ -683,7 +748,7 @@ static struct snd_kcontrol_new snd_cx88_source_switch = {
* Only boards with eeprom and byte 1 at eeprom=1 have it
*/
-static struct pci_device_id cx88_audio_pci_tbl[] __devinitdata = {
+static const struct pci_device_id const cx88_audio_pci_tbl[] __devinitdata = {
{0x14f1,0x8801,PCI_ANY_ID,PCI_ANY_ID,0,0,0},
{0x14f1,0x8811,PCI_ANY_ID,PCI_ANY_ID,0,0,0},
{0, }
@@ -795,6 +860,7 @@ static int __devinit cx88_audio_initdev(struct pci_dev *pci,
{
struct snd_card *card;
snd_cx88_card_t *chip;
+ struct v4l2_subdev *sd;
int err;
if (devno >= SNDRV_CARDS)
@@ -830,6 +896,15 @@ static int __devinit cx88_audio_initdev(struct pci_dev *pci,
if (err < 0)
goto error;
+ /* If there's a wm8775 then add a Line-In ALC switch */
+ list_for_each_entry(sd, &chip->core->v4l2_dev.subdevs, list) {
+ if (WM8775_GID == sd->grp_id) {
+ snd_ctl_add(card, snd_ctl_new1(&snd_cx88_alc_switch,
+ chip));
+ break;
+ }
+ }
+
strcpy (card->driver, "CX88x");
sprintf(card->shortname, "Conexant CX%x", pci->device);
sprintf(card->longname, "%s at %#llx",
diff --git a/drivers/media/video/cx88/cx88-blackbird.c b/drivers/media/video/cx88/cx88-blackbird.c
index 660b2a927feb..d7c94848249e 100644
--- a/drivers/media/video/cx88/cx88-blackbird.c
+++ b/drivers/media/video/cx88/cx88-blackbird.c
@@ -33,7 +33,6 @@
#include <linux/delay.h>
#include <linux/device.h>
#include <linux/firmware.h>
-#include <linux/smp_lock.h>
#include <media/v4l2-common.h>
#include <media/v4l2-ioctl.h>
#include <media/cx2341x.h>
@@ -1057,7 +1056,7 @@ static int mpeg_open(struct file *file)
dprintk( 1, "%s\n", __func__);
- lock_kernel();
+ mutex_lock(&dev->core->lock);
/* Make sure we can acquire the hardware */
drv = cx8802_get_driver(dev, CX88_MPEG_BLACKBIRD);
@@ -1065,7 +1064,7 @@ static int mpeg_open(struct file *file)
err = drv->request_acquire(drv);
if(err != 0) {
dprintk(1,"%s: Unable to acquire hardware, %d\n", __func__, err);
- unlock_kernel();
+ mutex_unlock(&dev->core->lock);;
return err;
}
}
@@ -1073,7 +1072,7 @@ static int mpeg_open(struct file *file)
if (!atomic_read(&dev->core->mpeg_users) && blackbird_initialize_codec(dev) < 0) {
if (drv)
drv->request_release(drv);
- unlock_kernel();
+ mutex_unlock(&dev->core->lock);
return -EINVAL;
}
dprintk(1, "open dev=%s\n", video_device_node_name(vdev));
@@ -1083,7 +1082,7 @@ static int mpeg_open(struct file *file)
if (NULL == fh) {
if (drv)
drv->request_release(drv);
- unlock_kernel();
+ mutex_unlock(&dev->core->lock);
return -ENOMEM;
}
file->private_data = fh;
@@ -1094,15 +1093,14 @@ static int mpeg_open(struct file *file)
V4L2_BUF_TYPE_VIDEO_CAPTURE,
V4L2_FIELD_INTERLACED,
sizeof(struct cx88_buffer),
- fh);
+ fh, NULL);
/* FIXME: locking against other video device */
cx88_set_scale(dev->core, dev->width, dev->height,
fh->mpegq.field);
- unlock_kernel();
atomic_inc(&dev->core->mpeg_users);
-
+ mutex_unlock(&dev->core->lock);
return 0;
}
@@ -1120,8 +1118,11 @@ static int mpeg_release(struct file *file)
videobuf_stop(&fh->mpegq);
videobuf_mmap_free(&fh->mpegq);
+
+ mutex_lock(&dev->core->lock);
file->private_data = NULL;
kfree(fh);
+ mutex_unlock(&dev->core->lock);
/* Make sure we release the hardware */
drv = cx8802_get_driver(dev, CX88_MPEG_BLACKBIRD);
diff --git a/drivers/media/video/cx88/cx88-cards.c b/drivers/media/video/cx88/cx88-cards.c
index e8416b76da67..9b9e169cce90 100644
--- a/drivers/media/video/cx88/cx88-cards.c
+++ b/drivers/media/video/cx88/cx88-cards.c
@@ -970,15 +970,22 @@ static const struct cx88_board cx88_boards[] = {
.radio_type = UNSET,
.tuner_addr = ADDR_UNSET,
.radio_addr = ADDR_UNSET,
+ .audio_chip = V4L2_IDENT_WM8775,
.input = {{
.type = CX88_VMUX_DVB,
.vmux = 0,
+ /* 2: Line-In */
+ .audioroute = 2,
},{
.type = CX88_VMUX_COMPOSITE1,
.vmux = 1,
+ /* 2: Line-In */
+ .audioroute = 2,
},{
.type = CX88_VMUX_SVIDEO,
.vmux = 2,
+ /* 2: Line-In */
+ .audioroute = 2,
}},
.mpeg = CX88_MPEG_DVB,
},
@@ -2104,6 +2111,18 @@ static const struct cx88_board cx88_boards[] = {
} },
.mpeg = CX88_MPEG_DVB,
},
+ [CX88_BOARD_TWINHAN_VP1027_DVBS] = {
+ .name = "Twinhan VP-1027 DVB-S",
+ .tuner_type = TUNER_ABSENT,
+ .radio_type = UNSET,
+ .tuner_addr = ADDR_UNSET,
+ .radio_addr = ADDR_UNSET,
+ .input = {{
+ .type = CX88_VMUX_DVB,
+ .vmux = 0,
+ } },
+ .mpeg = CX88_MPEG_DVB,
+ },
};
/* ------------------------------------------------------------------ */
@@ -2576,6 +2595,10 @@ static const struct cx88_subid cx88_subids[] = {
.subvendor = 0xb034,
.subdevice = 0x3034,
.card = CX88_BOARD_PROF_7301,
+ }, {
+ .subvendor = 0x1822,
+ .subdevice = 0x0023,
+ .card = CX88_BOARD_TWINHAN_VP1027_DVBS,
},
};
@@ -2673,10 +2696,10 @@ static void hauppauge_eeprom(struct cx88_core *core, u8 *eeprom_data)
/* ----------------------------------------------------------------------- */
/* some GDI (was: Modular Technology) specific stuff */
-static struct {
+static const struct {
int id;
int fm;
- char *name;
+ const char *name;
} gdi_tuner[] = {
[ 0x01 ] = { .id = TUNER_ABSENT,
.name = "NTSC_M" },
@@ -2710,7 +2733,7 @@ static struct {
static void gdi_eeprom(struct cx88_core *core, u8 *eeprom_data)
{
- char *name = (eeprom_data[0x0d] < ARRAY_SIZE(gdi_tuner))
+ const char *name = (eeprom_data[0x0d] < ARRAY_SIZE(gdi_tuner))
? gdi_tuner[eeprom_data[0x0d]].name : NULL;
info_printk(core, "GDI: tuner=%s\n", name ? name : "unknown");
@@ -3070,6 +3093,13 @@ static void cx88_card_setup_pre_i2c(struct cx88_core *core)
cx_set(MO_GP1_IO, 0x10);
mdelay(50);
break;
+
+ case CX88_BOARD_TWINHAN_VP1027_DVBS:
+ cx_write(MO_GP0_IO, 0x00003230);
+ cx_write(MO_GP0_IO, 0x00003210);
+ msleep(1);
+ cx_write(MO_GP0_IO, 0x00001230);
+ break;
}
}
@@ -3485,19 +3515,18 @@ struct cx88_core *cx88_core_create(struct pci_dev *pci, int nr)
later code configures a tea5767.
*/
v4l2_i2c_new_subdev(&core->v4l2_dev, &core->i2c_adap,
- "tuner", "tuner",
- 0, v4l2_i2c_tuner_addrs(ADDRS_RADIO));
+ "tuner", 0, v4l2_i2c_tuner_addrs(ADDRS_RADIO));
if (has_demod)
v4l2_i2c_new_subdev(&core->v4l2_dev,
- &core->i2c_adap, "tuner", "tuner",
+ &core->i2c_adap, "tuner",
0, v4l2_i2c_tuner_addrs(ADDRS_DEMOD));
if (core->board.tuner_addr == ADDR_UNSET) {
v4l2_i2c_new_subdev(&core->v4l2_dev,
- &core->i2c_adap, "tuner", "tuner",
+ &core->i2c_adap, "tuner",
0, has_demod ? tv_addrs + 4 : tv_addrs);
} else {
v4l2_i2c_new_subdev(&core->v4l2_dev, &core->i2c_adap,
- "tuner", "tuner", core->board.tuner_addr, NULL);
+ "tuner", core->board.tuner_addr, NULL);
}
}
diff --git a/drivers/media/video/cx88/cx88-core.c b/drivers/media/video/cx88/cx88-core.c
index 85eb266fb351..2e145f0a5fd9 100644
--- a/drivers/media/video/cx88/cx88-core.c
+++ b/drivers/media/video/cx88/cx88-core.c
@@ -217,7 +217,7 @@ cx88_free_buffer(struct videobuf_queue *q, struct cx88_buffer *buf)
struct videobuf_dmabuf *dma=videobuf_to_dma(&buf->vb);
BUG_ON(in_interrupt());
- videobuf_waiton(&buf->vb,0,0);
+ videobuf_waiton(q, &buf->vb, 0, 0);
videobuf_dma_unmap(q->dev, dma);
videobuf_dma_free(dma);
btcx_riscmem_free(to_pci_dev(q->dev), &buf->risc);
@@ -253,7 +253,7 @@ cx88_free_buffer(struct videobuf_queue *q, struct cx88_buffer *buf)
* 0x0c00 - FIFOs
*/
-struct sram_channel cx88_sram_channels[] = {
+const struct sram_channel const cx88_sram_channels[] = {
[SRAM_CH21] = {
.name = "video y / packed",
.cmds_start = 0x180040,
@@ -353,7 +353,7 @@ struct sram_channel cx88_sram_channels[] = {
};
int cx88_sram_channel_setup(struct cx88_core *core,
- struct sram_channel *ch,
+ const struct sram_channel *ch,
unsigned int bpl, u32 risc)
{
unsigned int i,lines;
@@ -394,7 +394,7 @@ int cx88_sram_channel_setup(struct cx88_core *core,
static int cx88_risc_decode(u32 risc)
{
- static char *instr[16] = {
+ static const char * const instr[16] = {
[ RISC_SYNC >> 28 ] = "sync",
[ RISC_WRITE >> 28 ] = "write",
[ RISC_WRITEC >> 28 ] = "writec",
@@ -406,14 +406,14 @@ static int cx88_risc_decode(u32 risc)
[ RISC_WRITECM >> 28 ] = "writecm",
[ RISC_WRITECR >> 28 ] = "writecr",
};
- static int incr[16] = {
+ static int const incr[16] = {
[ RISC_WRITE >> 28 ] = 2,
[ RISC_JUMP >> 28 ] = 2,
[ RISC_WRITERM >> 28 ] = 3,
[ RISC_WRITECM >> 28 ] = 3,
[ RISC_WRITECR >> 28 ] = 4,
};
- static char *bits[] = {
+ static const char * const bits[] = {
"12", "13", "14", "resync",
"cnt0", "cnt1", "18", "19",
"20", "21", "22", "23",
@@ -432,9 +432,9 @@ static int cx88_risc_decode(u32 risc)
void cx88_sram_channel_dump(struct cx88_core *core,
- struct sram_channel *ch)
+ const struct sram_channel *ch)
{
- static char *name[] = {
+ static const char * const name[] = {
"initial risc",
"cdt base",
"cdt size",
@@ -489,14 +489,14 @@ void cx88_sram_channel_dump(struct cx88_core *core,
core->name,cx_read(ch->cnt2_reg));
}
-static char *cx88_pci_irqs[32] = {
+static const char *cx88_pci_irqs[32] = {
"vid", "aud", "ts", "vip", "hst", "5", "6", "tm1",
"src_dma", "dst_dma", "risc_rd_err", "risc_wr_err",
"brdg_err", "src_dma_err", "dst_dma_err", "ipb_dma_err",
"i2c", "i2c_rack", "ir_smp", "gpio0", "gpio1"
};
-void cx88_print_irqbits(char *name, char *tag, char **strings,
+void cx88_print_irqbits(const char *name, const char *tag, const char *strings[],
int len, u32 bits, u32 mask)
{
unsigned int i;
@@ -770,7 +770,7 @@ static const u32 xtal = 28636363;
static int set_pll(struct cx88_core *core, int prescale, u32 ofreq)
{
- static u32 pre[] = { 0, 0, 0, 3, 2, 1 };
+ static const u32 pre[] = { 0, 0, 0, 3, 2, 1 };
u64 pll;
u32 reg;
int i;
@@ -879,7 +879,7 @@ static int set_tvaudio(struct cx88_core *core)
} else {
printk("%s/0: tvaudio support needs work for this tv norm [%s], sorry\n",
core->name, v4l2_norm_to_name(core->tvnorm));
- core->tvaudio = 0;
+ core->tvaudio = WW_NONE;
return 0;
}
@@ -1020,15 +1020,15 @@ int cx88_set_tvnorm(struct cx88_core *core, v4l2_std_id norm)
struct video_device *cx88_vdev_init(struct cx88_core *core,
struct pci_dev *pci,
- struct video_device *template,
- char *type)
+ const struct video_device *template_,
+ const char *type)
{
struct video_device *vfd;
vfd = video_device_alloc();
if (NULL == vfd)
return NULL;
- *vfd = *template;
+ *vfd = *template_;
vfd->v4l2_dev = &core->v4l2_dev;
vfd->parent = &pci->dev;
vfd->release = video_device_release;
diff --git a/drivers/media/video/cx88/cx88-dsp.c b/drivers/media/video/cx88/cx88-dsp.c
index a94e00a4ac5d..a9907265ff66 100644
--- a/drivers/media/video/cx88/cx88-dsp.c
+++ b/drivers/media/video/cx88/cx88-dsp.c
@@ -230,7 +230,7 @@ static s32 detect_btsc(struct cx88_core *core, s16 x[], u32 N)
static s16 *read_rds_samples(struct cx88_core *core, u32 *N)
{
- struct sram_channel *srch = &cx88_sram_channels[SRAM_CH27];
+ const struct sram_channel *srch = &cx88_sram_channels[SRAM_CH27];
s16 *samples;
unsigned int i;
@@ -292,11 +292,20 @@ s32 cx88_dsp_detect_stereo_sap(struct cx88_core *core)
switch (core->tvaudio) {
case WW_BG:
case WW_DK:
+ case WW_EIAJ:
+ case WW_M:
ret = detect_a2_a2m_eiaj(core, samples, N);
break;
case WW_BTSC:
ret = detect_btsc(core, samples, N);
break;
+ case WW_NONE:
+ case WW_I:
+ case WW_L:
+ case WW_I2SPT:
+ case WW_FM:
+ case WW_I2SADC:
+ break;
}
kfree(samples);
diff --git a/drivers/media/video/cx88/cx88-dvb.c b/drivers/media/video/cx88/cx88-dvb.c
index faa8e8163a4a..367a653f4c95 100644
--- a/drivers/media/video/cx88/cx88-dvb.c
+++ b/drivers/media/video/cx88/cx88-dvb.c
@@ -56,6 +56,7 @@
#include "stv0900.h"
#include "stb6100.h"
#include "stb6100_proc.h"
+#include "mb86a16.h"
MODULE_DESCRIPTION("driver for cx2388x based DVB cards");
MODULE_AUTHOR("Chris Pascoe <c.pascoe@itee.uq.edu.au>");
@@ -105,7 +106,7 @@ static void dvb_buf_release(struct videobuf_queue *q,
cx88_free_buffer(q, (struct cx88_buffer*)vb);
}
-static struct videobuf_queue_ops dvb_qops = {
+static const struct videobuf_queue_ops dvb_qops = {
.buf_setup = dvb_buf_setup,
.buf_prepare = dvb_buf_prepare,
.buf_queue = dvb_buf_queue,
@@ -167,12 +168,12 @@ static void cx88_dvb_gate_ctrl(struct cx88_core *core, int open)
static int dvico_fusionhdtv_demod_init(struct dvb_frontend* fe)
{
- static u8 clock_config [] = { CLOCK_CTL, 0x38, 0x39 };
- static u8 reset [] = { RESET, 0x80 };
- static u8 adc_ctl_1_cfg [] = { ADC_CTL_1, 0x40 };
- static u8 agc_cfg [] = { AGC_TARGET, 0x24, 0x20 };
- static u8 gpp_ctl_cfg [] = { GPP_CTL, 0x33 };
- static u8 capt_range_cfg[] = { CAPT_RANGE, 0x32 };
+ static const u8 clock_config [] = { CLOCK_CTL, 0x38, 0x39 };
+ static const u8 reset [] = { RESET, 0x80 };
+ static const u8 adc_ctl_1_cfg [] = { ADC_CTL_1, 0x40 };
+ static const u8 agc_cfg [] = { AGC_TARGET, 0x24, 0x20 };
+ static const u8 gpp_ctl_cfg [] = { GPP_CTL, 0x33 };
+ static const u8 capt_range_cfg[] = { CAPT_RANGE, 0x32 };
mt352_write(fe, clock_config, sizeof(clock_config));
udelay(200);
@@ -187,12 +188,12 @@ static int dvico_fusionhdtv_demod_init(struct dvb_frontend* fe)
static int dvico_dual_demod_init(struct dvb_frontend *fe)
{
- static u8 clock_config [] = { CLOCK_CTL, 0x38, 0x38 };
- static u8 reset [] = { RESET, 0x80 };
- static u8 adc_ctl_1_cfg [] = { ADC_CTL_1, 0x40 };
- static u8 agc_cfg [] = { AGC_TARGET, 0x28, 0x20 };
- static u8 gpp_ctl_cfg [] = { GPP_CTL, 0x33 };
- static u8 capt_range_cfg[] = { CAPT_RANGE, 0x32 };
+ static const u8 clock_config [] = { CLOCK_CTL, 0x38, 0x38 };
+ static const u8 reset [] = { RESET, 0x80 };
+ static const u8 adc_ctl_1_cfg [] = { ADC_CTL_1, 0x40 };
+ static const u8 agc_cfg [] = { AGC_TARGET, 0x28, 0x20 };
+ static const u8 gpp_ctl_cfg [] = { GPP_CTL, 0x33 };
+ static const u8 capt_range_cfg[] = { CAPT_RANGE, 0x32 };
mt352_write(fe, clock_config, sizeof(clock_config));
udelay(200);
@@ -208,13 +209,13 @@ static int dvico_dual_demod_init(struct dvb_frontend *fe)
static int dntv_live_dvbt_demod_init(struct dvb_frontend* fe)
{
- static u8 clock_config [] = { 0x89, 0x38, 0x39 };
- static u8 reset [] = { 0x50, 0x80 };
- static u8 adc_ctl_1_cfg [] = { 0x8E, 0x40 };
- static u8 agc_cfg [] = { 0x67, 0x10, 0x23, 0x00, 0xFF, 0xFF,
+ static const u8 clock_config [] = { 0x89, 0x38, 0x39 };
+ static const u8 reset [] = { 0x50, 0x80 };
+ static const u8 adc_ctl_1_cfg [] = { 0x8E, 0x40 };
+ static const u8 agc_cfg [] = { 0x67, 0x10, 0x23, 0x00, 0xFF, 0xFF,
0x00, 0xFF, 0x00, 0x40, 0x40 };
- static u8 dntv_extra[] = { 0xB5, 0x7A };
- static u8 capt_range_cfg[] = { 0x75, 0x32 };
+ static const u8 dntv_extra[] = { 0xB5, 0x7A };
+ static const u8 capt_range_cfg[] = { 0x75, 0x32 };
mt352_write(fe, clock_config, sizeof(clock_config));
udelay(2000);
@@ -229,37 +230,41 @@ static int dntv_live_dvbt_demod_init(struct dvb_frontend* fe)
return 0;
}
-static struct mt352_config dvico_fusionhdtv = {
+static const struct mt352_config dvico_fusionhdtv = {
.demod_address = 0x0f,
.demod_init = dvico_fusionhdtv_demod_init,
};
-static struct mt352_config dntv_live_dvbt_config = {
+static const struct mt352_config dntv_live_dvbt_config = {
.demod_address = 0x0f,
.demod_init = dntv_live_dvbt_demod_init,
};
-static struct mt352_config dvico_fusionhdtv_dual = {
+static const struct mt352_config dvico_fusionhdtv_dual = {
.demod_address = 0x0f,
.demod_init = dvico_dual_demod_init,
};
-static struct zl10353_config cx88_terratec_cinergy_ht_pci_mkii_config = {
+static const struct zl10353_config cx88_terratec_cinergy_ht_pci_mkii_config = {
.demod_address = (0x1e >> 1),
.no_tuner = 1,
.if2 = 45600,
};
+static struct mb86a16_config twinhan_vp1027 = {
+ .demod_address = 0x08,
+};
+
#if defined(CONFIG_VIDEO_CX88_VP3054) || (defined(CONFIG_VIDEO_CX88_VP3054_MODULE) && defined(MODULE))
static int dntv_live_dvbt_pro_demod_init(struct dvb_frontend* fe)
{
- static u8 clock_config [] = { 0x89, 0x38, 0x38 };
- static u8 reset [] = { 0x50, 0x80 };
- static u8 adc_ctl_1_cfg [] = { 0x8E, 0x40 };
- static u8 agc_cfg [] = { 0x67, 0x10, 0x20, 0x00, 0xFF, 0xFF,
+ static const u8 clock_config [] = { 0x89, 0x38, 0x38 };
+ static const u8 reset [] = { 0x50, 0x80 };
+ static const u8 adc_ctl_1_cfg [] = { 0x8E, 0x40 };
+ static const u8 agc_cfg [] = { 0x67, 0x10, 0x20, 0x00, 0xFF, 0xFF,
0x00, 0xFF, 0x00, 0x40, 0x40 };
- static u8 dntv_extra[] = { 0xB5, 0x7A };
- static u8 capt_range_cfg[] = { 0x75, 0x32 };
+ static const u8 dntv_extra[] = { 0xB5, 0x7A };
+ static const u8 capt_range_cfg[] = { 0x75, 0x32 };
mt352_write(fe, clock_config, sizeof(clock_config));
udelay(2000);
@@ -274,41 +279,41 @@ static int dntv_live_dvbt_pro_demod_init(struct dvb_frontend* fe)
return 0;
}
-static struct mt352_config dntv_live_dvbt_pro_config = {
+static const struct mt352_config dntv_live_dvbt_pro_config = {
.demod_address = 0x0f,
.no_tuner = 1,
.demod_init = dntv_live_dvbt_pro_demod_init,
};
#endif
-static struct zl10353_config dvico_fusionhdtv_hybrid = {
+static const struct zl10353_config dvico_fusionhdtv_hybrid = {
.demod_address = 0x0f,
.no_tuner = 1,
};
-static struct zl10353_config dvico_fusionhdtv_xc3028 = {
+static const struct zl10353_config dvico_fusionhdtv_xc3028 = {
.demod_address = 0x0f,
.if2 = 45600,
.no_tuner = 1,
};
-static struct mt352_config dvico_fusionhdtv_mt352_xc3028 = {
+static const struct mt352_config dvico_fusionhdtv_mt352_xc3028 = {
.demod_address = 0x0f,
.if2 = 4560,
.no_tuner = 1,
.demod_init = dvico_fusionhdtv_demod_init,
};
-static struct zl10353_config dvico_fusionhdtv_plus_v1_1 = {
+static const struct zl10353_config dvico_fusionhdtv_plus_v1_1 = {
.demod_address = 0x0f,
};
-static struct cx22702_config connexant_refboard_config = {
+static const struct cx22702_config connexant_refboard_config = {
.demod_address = 0x43,
.output_mode = CX22702_SERIAL_OUTPUT,
};
-static struct cx22702_config hauppauge_hvr_config = {
+static const struct cx22702_config hauppauge_hvr_config = {
.demod_address = 0x63,
.output_mode = CX22702_SERIAL_OUTPUT,
};
@@ -320,7 +325,7 @@ static int or51132_set_ts_param(struct dvb_frontend* fe, int is_punctured)
return 0;
}
-static struct or51132_config pchdtv_hd3000 = {
+static const struct or51132_config pchdtv_hd3000 = {
.demod_address = 0x15,
.set_ts_params = or51132_set_ts_param,
};
@@ -355,14 +360,14 @@ static struct lgdt330x_config fusionhdtv_3_gold = {
.set_ts_params = lgdt330x_set_ts_param,
};
-static struct lgdt330x_config fusionhdtv_5_gold = {
+static const struct lgdt330x_config fusionhdtv_5_gold = {
.demod_address = 0x0e,
.demod_chip = LGDT3303,
.serial_mpeg = 0x40, /* TPSERIAL for 3303 in TOP_CONTROL */
.set_ts_params = lgdt330x_set_ts_param,
};
-static struct lgdt330x_config pchdtv_hd5500 = {
+static const struct lgdt330x_config pchdtv_hd5500 = {
.demod_address = 0x59,
.demod_chip = LGDT3303,
.serial_mpeg = 0x40, /* TPSERIAL for 3303 in TOP_CONTROL */
@@ -376,7 +381,7 @@ static int nxt200x_set_ts_param(struct dvb_frontend* fe, int is_punctured)
return 0;
}
-static struct nxt200x_config ati_hdtvwonder = {
+static const struct nxt200x_config ati_hdtvwonder = {
.demod_address = 0x0a,
.set_ts_params = nxt200x_set_ts_param,
};
@@ -429,15 +434,15 @@ static int tevii_dvbs_set_voltage(struct dvb_frontend *fe,
cx_set(MO_GP0_IO, 0x6040);
switch (voltage) {
- case SEC_VOLTAGE_13:
- cx_clear(MO_GP0_IO, 0x20);
- break;
- case SEC_VOLTAGE_18:
- cx_set(MO_GP0_IO, 0x20);
- break;
- case SEC_VOLTAGE_OFF:
- cx_clear(MO_GP0_IO, 0x20);
- break;
+ case SEC_VOLTAGE_13:
+ cx_clear(MO_GP0_IO, 0x20);
+ break;
+ case SEC_VOLTAGE_18:
+ cx_set(MO_GP0_IO, 0x20);
+ break;
+ case SEC_VOLTAGE_OFF:
+ cx_clear(MO_GP0_IO, 0x20);
+ break;
}
if (core->prev_set_voltage)
@@ -445,23 +450,49 @@ static int tevii_dvbs_set_voltage(struct dvb_frontend *fe,
return 0;
}
-static struct cx24123_config geniatech_dvbs_config = {
+static int vp1027_set_voltage(struct dvb_frontend *fe,
+ fe_sec_voltage_t voltage)
+{
+ struct cx8802_dev *dev = fe->dvb->priv;
+ struct cx88_core *core = dev->core;
+
+ switch (voltage) {
+ case SEC_VOLTAGE_13:
+ dprintk(1, "LNB SEC Voltage=13\n");
+ cx_write(MO_GP0_IO, 0x00001220);
+ break;
+ case SEC_VOLTAGE_18:
+ dprintk(1, "LNB SEC Voltage=18\n");
+ cx_write(MO_GP0_IO, 0x00001222);
+ break;
+ case SEC_VOLTAGE_OFF:
+ dprintk(1, "LNB Voltage OFF\n");
+ cx_write(MO_GP0_IO, 0x00001230);
+ break;
+ }
+
+ if (core->prev_set_voltage)
+ return core->prev_set_voltage(fe, voltage);
+ return 0;
+}
+
+static const struct cx24123_config geniatech_dvbs_config = {
.demod_address = 0x55,
.set_ts_params = cx24123_set_ts_param,
};
-static struct cx24123_config hauppauge_novas_config = {
+static const struct cx24123_config hauppauge_novas_config = {
.demod_address = 0x55,
.set_ts_params = cx24123_set_ts_param,
};
-static struct cx24123_config kworld_dvbs_100_config = {
+static const struct cx24123_config kworld_dvbs_100_config = {
.demod_address = 0x15,
.set_ts_params = cx24123_set_ts_param,
.lnb_polarity = 1,
};
-static struct s5h1409_config pinnacle_pctv_hd_800i_config = {
+static const struct s5h1409_config pinnacle_pctv_hd_800i_config = {
.demod_address = 0x32 >> 1,
.output_mode = S5H1409_PARALLEL_OUTPUT,
.gpio = S5H1409_GPIO_ON,
@@ -471,7 +502,7 @@ static struct s5h1409_config pinnacle_pctv_hd_800i_config = {
.mpeg_timing = S5H1409_MPEGTIMING_NONCONTINOUS_NONINVERTING_CLOCK,
};
-static struct s5h1409_config dvico_hdtv5_pci_nano_config = {
+static const struct s5h1409_config dvico_hdtv5_pci_nano_config = {
.demod_address = 0x32 >> 1,
.output_mode = S5H1409_SERIAL_OUTPUT,
.gpio = S5H1409_GPIO_OFF,
@@ -480,7 +511,7 @@ static struct s5h1409_config dvico_hdtv5_pci_nano_config = {
.mpeg_timing = S5H1409_MPEGTIMING_CONTINOUS_NONINVERTING_CLOCK,
};
-static struct s5h1409_config kworld_atsc_120_config = {
+static const struct s5h1409_config kworld_atsc_120_config = {
.demod_address = 0x32 >> 1,
.output_mode = S5H1409_SERIAL_OUTPUT,
.gpio = S5H1409_GPIO_OFF,
@@ -489,24 +520,24 @@ static struct s5h1409_config kworld_atsc_120_config = {
.mpeg_timing = S5H1409_MPEGTIMING_CONTINOUS_NONINVERTING_CLOCK,
};
-static struct xc5000_config pinnacle_pctv_hd_800i_tuner_config = {
+static const struct xc5000_config pinnacle_pctv_hd_800i_tuner_config = {
.i2c_address = 0x64,
.if_khz = 5380,
};
-static struct zl10353_config cx88_pinnacle_hybrid_pctv = {
+static const struct zl10353_config cx88_pinnacle_hybrid_pctv = {
.demod_address = (0x1e >> 1),
.no_tuner = 1,
.if2 = 45600,
};
-static struct zl10353_config cx88_geniatech_x8000_mt = {
+static const struct zl10353_config cx88_geniatech_x8000_mt = {
.demod_address = (0x1e >> 1),
.no_tuner = 1,
.disable_i2c_gate_ctrl = 1,
};
-static struct s5h1411_config dvico_fusionhdtv7_config = {
+static const struct s5h1411_config dvico_fusionhdtv7_config = {
.output_mode = S5H1411_SERIAL_OUTPUT,
.gpio = S5H1411_GPIO_ON,
.mpeg_timing = S5H1411_MPEGTIMING_CONTINOUS_NONINVERTING_CLOCK,
@@ -516,7 +547,7 @@ static struct s5h1411_config dvico_fusionhdtv7_config = {
.status_mode = S5H1411_DEMODLOCKING
};
-static struct xc5000_config dvico_fusionhdtv7_tuner_config = {
+static const struct xc5000_config dvico_fusionhdtv7_tuner_config = {
.i2c_address = 0xc2 >> 1,
.if_khz = 5380,
};
@@ -601,19 +632,19 @@ static int cx24116_reset_device(struct dvb_frontend *fe)
return 0;
}
-static struct cx24116_config hauppauge_hvr4000_config = {
+static const struct cx24116_config hauppauge_hvr4000_config = {
.demod_address = 0x05,
.set_ts_params = cx24116_set_ts_param,
.reset_device = cx24116_reset_device,
};
-static struct cx24116_config tevii_s460_config = {
+static const struct cx24116_config tevii_s460_config = {
.demod_address = 0x55,
.set_ts_params = cx24116_set_ts_param,
.reset_device = cx24116_reset_device,
};
-static struct stv0900_config prof_7301_stv0900_config = {
+static const struct stv0900_config prof_7301_stv0900_config = {
.demod_address = 0x6a,
/* demod_mode = 0,*/
.xtal = 27000000,
@@ -625,12 +656,12 @@ static struct stv0900_config prof_7301_stv0900_config = {
.set_ts_params = stv0900_set_ts_param,
};
-static struct stb6100_config prof_7301_stb6100_config = {
+static const struct stb6100_config prof_7301_stb6100_config = {
.tuner_address = 0x60,
.refclock = 27000000,
};
-static struct stv0299_config tevii_tuner_sharp_config = {
+static const struct stv0299_config tevii_tuner_sharp_config = {
.demod_address = 0x68,
.inittab = sharp_z0194a_inittab,
.mclk = 88000000UL,
@@ -643,7 +674,7 @@ static struct stv0299_config tevii_tuner_sharp_config = {
.set_ts_params = cx24116_set_ts_param,
};
-static struct stv0288_config tevii_tuner_earda_config = {
+static const struct stv0288_config tevii_tuner_earda_config = {
.demod_address = 0x68,
.min_delay_ms = 100,
.set_ts_params = cx24116_set_ts_param,
@@ -676,7 +707,7 @@ static int cx8802_alloc_frontends(struct cx8802_dev *dev)
-static u8 samsung_smt_7020_inittab[] = {
+static const u8 samsung_smt_7020_inittab[] = {
0x01, 0x15,
0x02, 0x00,
0x03, 0x00,
@@ -850,7 +881,7 @@ static int samsung_smt_7020_stv0299_set_symbol_rate(struct dvb_frontend *fe,
}
-static struct stv0299_config samsung_stv0299_config = {
+static const struct stv0299_config samsung_stv0299_config = {
.demod_address = 0x68,
.inittab = samsung_smt_7020_inittab,
.mclk = 88000000UL,
@@ -1416,6 +1447,18 @@ static int dvb_register(struct cx8802_dev *dev)
}
break;
+ case CX88_BOARD_TWINHAN_VP1027_DVBS:
+ dev->ts_gen_cntrl = 0x00;
+ fe0->dvb.frontend = dvb_attach(mb86a16_attach,
+ &twinhan_vp1027,
+ &core->i2c_adap);
+ if (fe0->dvb.frontend) {
+ core->prev_set_voltage =
+ fe0->dvb.frontend->ops.set_voltage;
+ fe0->dvb.frontend->ops.set_voltage =
+ vp1027_set_voltage;
+ }
+ break;
default:
printk(KERN_ERR "%s/2: The frontend of your DVB/ATSC card isn't supported yet\n",
@@ -1576,7 +1619,7 @@ static int cx8802_dvb_probe(struct cx8802_driver *drv)
V4L2_BUF_TYPE_VIDEO_CAPTURE,
V4L2_FIELD_TOP,
sizeof(struct cx88_buffer),
- dev);
+ dev, NULL);
/* init struct videobuf_dvb */
fe->dvb.name = dev->core->name;
}
diff --git a/drivers/media/video/cx88/cx88-i2c.c b/drivers/media/video/cx88/cx88-i2c.c
index 82db555b22dd..f53836bb6a5a 100644
--- a/drivers/media/video/cx88/cx88-i2c.c
+++ b/drivers/media/video/cx88/cx88-i2c.c
@@ -108,7 +108,7 @@ static const struct i2c_algo_bit_data cx8800_i2c_algo_template = {
/* ----------------------------------------------------------------------- */
-static char *i2c_devs[128] = {
+static const char * const i2c_devs[128] = {
[ 0x1c >> 1 ] = "lgdt330x",
[ 0x86 >> 1 ] = "tda9887/cx22702",
[ 0xa0 >> 1 ] = "eeprom",
@@ -117,7 +117,7 @@ static char *i2c_devs[128] = {
[ 0xc8 >> 1 ] = "xc5000",
};
-static void do_i2c_scan(char *name, struct i2c_client *c)
+static void do_i2c_scan(const char *name, struct i2c_client *c)
{
unsigned char buf;
int i,rc;
@@ -183,30 +183,3 @@ int cx88_i2c_init(struct cx88_core *core, struct pci_dev *pci)
return core->i2c_rc;
}
-
-void cx88_i2c_init_ir(struct cx88_core *core)
-{
- /* Instantiate the IR receiver device, if present */
- if (0 == core->i2c_rc) {
- struct i2c_board_info info;
- const unsigned short addr_list[] = {
- 0x18, 0x6b, 0x71,
- I2C_CLIENT_END
- };
-
- memset(&info, 0, sizeof(struct i2c_board_info));
- strlcpy(info.type, "ir_video", I2C_NAME_SIZE);
- /* Use quick read command for probe, some IR chips don't
- * support writes */
- i2c_new_probed_device(&core->i2c_adap, &info, addr_list,
- i2c_probe_func_quick_read);
- }
-}
-
-/* ----------------------------------------------------------------------- */
-
-/*
- * Local variables:
- * c-basic-offset: 8
- * End:
- */
diff --git a/drivers/media/video/cx88/cx88-input.c b/drivers/media/video/cx88/cx88-input.c
index eccc5e49a350..fc777bc6e716 100644
--- a/drivers/media/video/cx88/cx88-input.c
+++ b/drivers/media/video/cx88/cx88-input.c
@@ -405,6 +405,11 @@ int cx88_ir_init(struct cx88_core *core, struct pci_dev *pci)
ir->mask_keycode = 0x7e;
ir->polling = 100; /* ms */
break;
+ case CX88_BOARD_TWINHAN_VP1027_DVBS:
+ ir_codes = RC_MAP_TWINHAN_VP1027_DVBS;
+ ir_type = IR_TYPE_NEC;
+ ir->sampling = 0xff00; /* address */
+ break;
}
if (NULL == ir_codes) {
@@ -530,6 +535,7 @@ void cx88_ir_irq(struct cx88_core *core)
case CX88_BOARD_PROF_7300:
case CX88_BOARD_PROF_7301:
case CX88_BOARD_PROF_6200:
+ case CX88_BOARD_TWINHAN_VP1027_DVBS:
ircode = ir_decode_pulsedistance(ir->samples, ir->scount, 1, 4);
if (ircode == 0xffffffff) { /* decoding error */
@@ -609,13 +615,54 @@ void cx88_ir_irq(struct cx88_core *core)
return;
}
+
+void cx88_i2c_init_ir(struct cx88_core *core)
+{
+ struct i2c_board_info info;
+ const unsigned short addr_list[] = {
+ 0x18, 0x6b, 0x71,
+ I2C_CLIENT_END
+ };
+ const unsigned short *addrp;
+ /* Instantiate the IR receiver device, if present */
+ if (0 != core->i2c_rc)
+ return;
+
+ memset(&info, 0, sizeof(struct i2c_board_info));
+ strlcpy(info.type, "ir_video", I2C_NAME_SIZE);
+
+ /*
+ * We can't call i2c_new_probed_device() because it uses
+ * quick writes for probing and at least some RC receiver
+ * devices only reply to reads.
+ * Also, Hauppauge XVR needs to be specified, as address 0x71
+ * conflicts with another remote type used with saa7134
+ */
+ for (addrp = addr_list; *addrp != I2C_CLIENT_END; addrp++) {
+ info.platform_data = NULL;
+ memset(&core->init_data, 0, sizeof(core->init_data));
+
+ if (*addrp == 0x71) {
+ /* Hauppauge XVR */
+ core->init_data.name = "cx88 Hauppauge XVR remote";
+ core->init_data.ir_codes = RC_MAP_HAUPPAUGE_NEW;
+ core->init_data.type = IR_TYPE_RC5;
+ core->init_data.internal_get_key_func = IR_KBD_GET_KEY_HAUP_XVR;
+
+ info.platform_data = &core->init_data;
+ }
+ if (i2c_smbus_xfer(&core->i2c_adap, *addrp, 0,
+ I2C_SMBUS_READ, 0,
+ I2C_SMBUS_QUICK, NULL) >= 0) {
+ info.addr = *addrp;
+ i2c_new_device(&core->i2c_adap, &info);
+ break;
+ }
+ }
+}
+
/* ---------------------------------------------------------------------- */
MODULE_AUTHOR("Gerd Knorr, Pavel Machek, Chris Pascoe");
MODULE_DESCRIPTION("input driver for cx88 GPIO-based IR remote controls");
MODULE_LICENSE("GPL");
-/*
- * Local variables:
- * c-basic-offset: 8
- * End:
- */
diff --git a/drivers/media/video/cx88/cx88-mpeg.c b/drivers/media/video/cx88/cx88-mpeg.c
index 499f8d512ad6..f7d71acbb078 100644
--- a/drivers/media/video/cx88/cx88-mpeg.c
+++ b/drivers/media/video/cx88/cx88-mpeg.c
@@ -313,7 +313,7 @@ void cx8802_buf_queue(struct cx8802_dev *dev, struct cx88_buffer *buf)
/* ----------------------------------------------------------- */
-static void do_cancel_buffers(struct cx8802_dev *dev, char *reason, int restart)
+static void do_cancel_buffers(struct cx8802_dev *dev, const char *reason, int restart)
{
struct cx88_dmaqueue *q = &dev->mpegq;
struct cx88_buffer *buf;
@@ -358,7 +358,7 @@ static void cx8802_timeout(unsigned long data)
do_cancel_buffers(dev,"timeout",1);
}
-static char *cx88_mpeg_irqs[32] = {
+static const char * cx88_mpeg_irqs[32] = {
"ts_risci1", NULL, NULL, NULL,
"ts_risci2", NULL, NULL, NULL,
"ts_oflow", NULL, NULL, NULL,
@@ -849,7 +849,7 @@ static void __devexit cx8802_remove(struct pci_dev *pci_dev)
kfree(dev);
}
-static struct pci_device_id cx8802_pci_tbl[] = {
+static const struct pci_device_id cx8802_pci_tbl[] = {
{
.vendor = 0x14f1,
.device = 0x8802,
diff --git a/drivers/media/video/cx88/cx88-tvaudio.c b/drivers/media/video/cx88/cx88-tvaudio.c
index 239631568f3b..08220de3d74d 100644
--- a/drivers/media/video/cx88/cx88-tvaudio.c
+++ b/drivers/media/video/cx88/cx88-tvaudio.c
@@ -70,7 +70,7 @@ MODULE_PARM_DESC(radio_deemphasis, "Radio deemphasis time constant, "
/* ----------------------------------------------------------- */
-static char *aud_ctl_names[64] = {
+static const char * const aud_ctl_names[64] = {
[EN_BTSC_FORCE_MONO] = "BTSC_FORCE_MONO",
[EN_BTSC_FORCE_STEREO] = "BTSC_FORCE_STEREO",
[EN_BTSC_FORCE_SAP] = "BTSC_FORCE_SAP",
@@ -360,7 +360,15 @@ static void set_audio_standard_NICAM(struct cx88_core *core, u32 mode)
set_audio_registers(core, nicam_bgdki_common);
set_audio_registers(core, nicam_i);
break;
- default:
+ case WW_NONE:
+ case WW_BTSC:
+ case WW_BG:
+ case WW_DK:
+ case WW_EIAJ:
+ case WW_I2SPT:
+ case WW_FM:
+ case WW_I2SADC:
+ case WW_M:
dprintk("%s PAL-BGDK NICAM (status: known-good)\n", __func__);
set_audio_registers(core, nicam_bgdki_common);
set_audio_registers(core, nicam_default);
@@ -621,7 +629,13 @@ static void set_audio_standard_A2(struct cx88_core *core, u32 mode)
dprintk("%s AM-L (status: devel)\n", __func__);
set_audio_registers(core, am_l);
break;
- default:
+ case WW_NONE:
+ case WW_BTSC:
+ case WW_EIAJ:
+ case WW_I2SPT:
+ case WW_FM:
+ case WW_I2SADC:
+ case WW_M:
dprintk("%s Warning: wrong value\n", __func__);
return;
break;
@@ -779,7 +793,7 @@ void cx88_set_tvaudio(struct cx88_core *core)
set_audio_finish(core, EN_I2SIN_ENABLE);
break;
case WW_NONE:
- default:
+ case WW_I2SPT:
printk("%s/0: unknown tv audio mode [%d]\n",
core->name, core->tvaudio);
break;
@@ -795,8 +809,8 @@ void cx88_newstation(struct cx88_core *core)
void cx88_get_stereo(struct cx88_core *core, struct v4l2_tuner *t)
{
- static char *m[] = { "stereo", "dual mono", "mono", "sap" };
- static char *p[] = { "no pilot", "pilot c1", "pilot c2", "?" };
+ static const char * const m[] = { "stereo", "dual mono", "mono", "sap" };
+ static const char * const p[] = { "no pilot", "pilot c1", "pilot c2", "?" };
u32 reg, mode, pilot;
reg = cx_read(AUD_STATUS);
@@ -840,7 +854,12 @@ void cx88_get_stereo(struct cx88_core *core, struct v4l2_tuner *t)
break;
}
break;
- default:
+ case WW_NONE:
+ case WW_I:
+ case WW_L:
+ case WW_I2SPT:
+ case WW_FM:
+ case WW_I2SADC:
/* nothing */
break;
}
@@ -945,6 +964,9 @@ void cx88_set_stereo(struct cx88_core *core, u32 mode, int manual)
}
break;
case WW_I2SADC:
+ case WW_NONE:
+ case WW_EIAJ:
+ case WW_I2SPT:
/* DO NOTHING */
break;
}
@@ -1000,7 +1022,12 @@ int cx88_audio_thread(void *data)
/* automatically switch to best available mode */
cx88_set_stereo(core, mode, 0);
break;
- default:
+ case WW_NONE:
+ case WW_BTSC:
+ case WW_EIAJ:
+ case WW_I2SPT:
+ case WW_FM:
+ case WW_I2SADC:
hw_autodetect:
/* stereo autodetection is supported by hardware so
we don't need to do it manually. Do nothing. */
diff --git a/drivers/media/video/cx88/cx88-vbi.c b/drivers/media/video/cx88/cx88-vbi.c
index d9445b0e7ab2..f8f8389c0362 100644
--- a/drivers/media/video/cx88/cx88-vbi.c
+++ b/drivers/media/video/cx88/cx88-vbi.c
@@ -230,7 +230,7 @@ static void vbi_release(struct videobuf_queue *q, struct videobuf_buffer *vb)
cx88_free_buffer(q,buf);
}
-struct videobuf_queue_ops cx8800_vbi_qops = {
+const struct videobuf_queue_ops cx8800_vbi_qops = {
.buf_setup = vbi_setup,
.buf_prepare = vbi_prepare,
.buf_queue = vbi_queue,
diff --git a/drivers/media/video/cx88/cx88-video.c b/drivers/media/video/cx88/cx88-video.c
index 0fab65c3ab39..62cea9549404 100644
--- a/drivers/media/video/cx88/cx88-video.c
+++ b/drivers/media/video/cx88/cx88-video.c
@@ -31,7 +31,6 @@
#include <linux/kmod.h>
#include <linux/kernel.h>
#include <linux/slab.h>
-#include <linux/smp_lock.h>
#include <linux/interrupt.h>
#include <linux/dma-mapping.h>
#include <linux/delay.h>
@@ -41,6 +40,7 @@
#include "cx88.h"
#include <media/v4l2-common.h>
#include <media/v4l2-ioctl.h>
+#include <media/wm8775.h>
MODULE_DESCRIPTION("v4l2 driver module for cx2388x based TV cards");
MODULE_AUTHOR("Gerd Knorr <kraxel@bytesex.org> [SuSE Labs]");
@@ -78,7 +78,7 @@ MODULE_PARM_DESC(vid_limit,"capture memory limit in megabytes");
/* ------------------------------------------------------------------- */
/* static data */
-static struct cx8800_fmt formats[] = {
+static const struct cx8800_fmt formats[] = {
{
.name = "8 bpp, gray",
.fourcc = V4L2_PIX_FMT_GREY,
@@ -142,7 +142,7 @@ static struct cx8800_fmt formats[] = {
},
};
-static struct cx8800_fmt* format_by_fourcc(unsigned int fourcc)
+static const struct cx8800_fmt* format_by_fourcc(unsigned int fourcc)
{
unsigned int i;
@@ -159,7 +159,7 @@ static const struct v4l2_queryctrl no_ctl = {
.flags = V4L2_CTRL_FLAG_DISABLED,
};
-static struct cx88_ctrl cx8800_ctls[] = {
+static const struct cx88_ctrl cx8800_ctls[] = {
/* --- video --- */
{
.v = {
@@ -288,7 +288,7 @@ static struct cx88_ctrl cx8800_ctls[] = {
.shift = 0,
}
};
-static const int CX8800_CTLS = ARRAY_SIZE(cx8800_ctls);
+enum { CX8800_CTLS = ARRAY_SIZE(cx8800_ctls) };
/* Must be sorted from low to high control ID! */
const u32 cx88_user_ctrls[] = {
@@ -306,7 +306,7 @@ const u32 cx88_user_ctrls[] = {
};
EXPORT_SYMBOL(cx88_user_ctrls);
-static const u32 *ctrl_classes[] = {
+static const u32 * const ctrl_classes[] = {
cx88_user_ctrls,
NULL
};
@@ -710,7 +710,7 @@ static void buffer_release(struct videobuf_queue *q, struct videobuf_buffer *vb)
cx88_free_buffer(q,buf);
}
-static struct videobuf_queue_ops cx8800_video_qops = {
+static const struct videobuf_queue_ops cx8800_video_qops = {
.buf_setup = buffer_setup,
.buf_prepare = buffer_prepare,
.buf_queue = buffer_queue,
@@ -752,7 +752,7 @@ static int video_open(struct file *file)
{
struct video_device *vdev = video_devdata(file);
struct cx8800_dev *dev = video_drvdata(file);
- struct cx88_core *core;
+ struct cx88_core *core = dev->core;
struct cx8800_fh *fh;
enum v4l2_buf_type type = 0;
int radio = 0;
@@ -769,19 +769,14 @@ static int video_open(struct file *file)
break;
}
- lock_kernel();
-
- core = dev->core;
-
dprintk(1, "open dev=%s radio=%d type=%s\n",
video_device_node_name(vdev), radio, v4l2_type_names[type]);
/* allocate + initialize per filehandle data */
fh = kzalloc(sizeof(*fh),GFP_KERNEL);
- if (NULL == fh) {
- unlock_kernel();
+ if (unlikely(!fh))
return -ENOMEM;
- }
+
file->private_data = fh;
fh->dev = dev;
fh->radio = radio;
@@ -790,18 +785,20 @@ static int video_open(struct file *file)
fh->height = 240;
fh->fmt = format_by_fourcc(V4L2_PIX_FMT_BGR24);
+ mutex_lock(&core->lock);
+
videobuf_queue_sg_init(&fh->vidq, &cx8800_video_qops,
&dev->pci->dev, &dev->slock,
V4L2_BUF_TYPE_VIDEO_CAPTURE,
V4L2_FIELD_INTERLACED,
sizeof(struct cx88_buffer),
- fh);
+ fh, NULL);
videobuf_queue_sg_init(&fh->vbiq, &cx8800_vbi_qops,
&dev->pci->dev, &dev->slock,
V4L2_BUF_TYPE_VBI_CAPTURE,
V4L2_FIELD_SEQ_TB,
sizeof(struct cx88_buffer),
- fh);
+ fh, NULL);
if (fh->radio) {
dprintk(1,"video_open: setting radio device\n");
@@ -826,9 +823,9 @@ static int video_open(struct file *file)
}
call_all(core, tuner, s_radio);
}
- unlock_kernel();
atomic_inc(&core->users);
+ mutex_unlock(&core->lock);
return 0;
}
@@ -920,10 +917,11 @@ static int video_release(struct file *file)
videobuf_mmap_free(&fh->vidq);
videobuf_mmap_free(&fh->vbiq);
+
+ mutex_lock(&dev->core->lock);
file->private_data = NULL;
kfree(fh);
- mutex_lock(&dev->core->lock);
if(atomic_dec_and_test(&dev->core->users))
call_all(dev->core, core, s_power, 0);
mutex_unlock(&dev->core->lock);
@@ -944,7 +942,7 @@ video_mmap(struct file *file, struct vm_area_struct * vma)
int cx88_get_control (struct cx88_core *core, struct v4l2_control *ctl)
{
- struct cx88_ctrl *c = NULL;
+ const struct cx88_ctrl *c = NULL;
u32 value;
int i;
@@ -976,9 +974,10 @@ EXPORT_SYMBOL(cx88_get_control);
int cx88_set_control(struct cx88_core *core, struct v4l2_control *ctl)
{
- struct cx88_ctrl *c = NULL;
+ const struct cx88_ctrl *c = NULL;
u32 value,mask;
int i;
+ struct v4l2_control client_ctl;
for (i = 0; i < CX8800_CTLS; i++) {
if (cx8800_ctls[i].v.id == ctl->id) {
@@ -992,6 +991,27 @@ int cx88_set_control(struct cx88_core *core, struct v4l2_control *ctl)
ctl->value = c->v.minimum;
if (ctl->value > c->v.maximum)
ctl->value = c->v.maximum;
+
+ /* Pass changes onto any WM8775 */
+ client_ctl.id = ctl->id;
+ switch (ctl->id) {
+ case V4L2_CID_AUDIO_MUTE:
+ client_ctl.value = ctl->value;
+ break;
+ case V4L2_CID_AUDIO_VOLUME:
+ client_ctl.value = (ctl->value) ?
+ (0x90 + ctl->value) << 8 : 0;
+ break;
+ case V4L2_CID_AUDIO_BALANCE:
+ client_ctl.value = ctl->value << 9;
+ break;
+ default:
+ client_ctl.id = 0;
+ break;
+ }
+ if (client_ctl.id)
+ call_hw(core, WM8775_GID, core, s_ctrl, &client_ctl);
+
mask=c->mask;
switch (ctl->id) {
case V4L2_CID_AUDIO_BALANCE:
@@ -1072,7 +1092,7 @@ static int vidioc_try_fmt_vid_cap(struct file *file, void *priv,
struct v4l2_format *f)
{
struct cx88_core *core = ((struct cx8800_fh *)priv)->dev->core;
- struct cx8800_fmt *fmt;
+ const struct cx8800_fmt *fmt;
enum v4l2_field field;
unsigned int maxw, maxh;
@@ -1247,7 +1267,7 @@ static int vidioc_s_std (struct file *file, void *priv, v4l2_std_id *tvnorms)
/* only one input in this sample driver */
int cx88_enum_input (struct cx88_core *core,struct v4l2_input *i)
{
- static const char *iname[] = {
+ static const char * const iname[] = {
[ CX88_VMUX_COMPOSITE1 ] = "Composite1",
[ CX88_VMUX_COMPOSITE2 ] = "Composite2",
[ CX88_VMUX_COMPOSITE3 ] = "Composite3",
@@ -1267,9 +1287,10 @@ int cx88_enum_input (struct cx88_core *core,struct v4l2_input *i)
i->type = V4L2_INPUT_TYPE_CAMERA;
strcpy(i->name,iname[INPUT(n).type]);
if ((CX88_VMUX_TELEVISION == INPUT(n).type) ||
- (CX88_VMUX_CABLE == INPUT(n).type))
+ (CX88_VMUX_CABLE == INPUT(n).type)) {
i->type = V4L2_INPUT_TYPE_TUNER;
i->std = CX88_NORMS;
+ }
return 0;
}
EXPORT_SYMBOL(cx88_enum_input);
@@ -1537,7 +1558,9 @@ static int radio_queryctrl (struct file *file, void *priv,
if (c->id < V4L2_CID_BASE ||
c->id >= V4L2_CID_LASTP1)
return -EINVAL;
- if (c->id == V4L2_CID_AUDIO_MUTE) {
+ if (c->id == V4L2_CID_AUDIO_MUTE ||
+ c->id == V4L2_CID_AUDIO_VOLUME ||
+ c->id == V4L2_CID_AUDIO_BALANCE) {
for (i = 0; i < CX8800_CTLS; i++) {
if (cx8800_ctls[i].v.id == c->id)
break;
@@ -1578,7 +1601,7 @@ static void cx8800_vid_timeout(unsigned long data)
spin_unlock_irqrestore(&dev->slock,flags);
}
-static char *cx88_vid_irqs[32] = {
+static const char *cx88_vid_irqs[32] = {
"y_risci1", "u_risci1", "v_risci1", "vbi_risc1",
"y_risci2", "u_risci2", "v_risci2", "vbi_risc2",
"y_oflow", "u_oflow", "v_oflow", "vbi_oflow",
@@ -1723,7 +1746,7 @@ static const struct v4l2_ioctl_ops video_ioctl_ops = {
static struct video_device cx8800_vbi_template;
-static struct video_device cx8800_video_template = {
+static const struct video_device cx8800_video_template = {
.name = "cx8800-video",
.fops = &video_fops,
.ioctl_ops = &video_ioctl_ops,
@@ -1758,7 +1781,7 @@ static const struct v4l2_ioctl_ops radio_ioctl_ops = {
#endif
};
-static struct video_device cx8800_radio_template = {
+static const struct video_device cx8800_radio_template = {
.name = "cx8800-radio",
.fops = &radio_fops,
.ioctl_ops = &radio_ioctl_ops,
@@ -1872,20 +1895,19 @@ static int __devinit cx8800_initdev(struct pci_dev *pci_dev,
if (core->board.audio_chip == V4L2_IDENT_WM8775)
v4l2_i2c_new_subdev(&core->v4l2_dev, &core->i2c_adap,
- "wm8775", "wm8775", 0x36 >> 1, NULL);
+ "wm8775", 0x36 >> 1, NULL);
if (core->board.audio_chip == V4L2_IDENT_TVAUDIO) {
/* This probes for a tda9874 as is used on some
Pixelview Ultra boards. */
- v4l2_i2c_new_subdev(&core->v4l2_dev,
- &core->i2c_adap,
- "tvaudio", "tvaudio", 0, I2C_ADDRS(0xb0 >> 1));
+ v4l2_i2c_new_subdev(&core->v4l2_dev, &core->i2c_adap,
+ "tvaudio", 0, I2C_ADDRS(0xb0 >> 1));
}
switch (core->boardnr) {
case CX88_BOARD_DVICO_FUSIONHDTV_5_GOLD:
case CX88_BOARD_DVICO_FUSIONHDTV_7_GOLD: {
- static struct i2c_board_info rtc_info = {
+ static const struct i2c_board_info rtc_info = {
I2C_BOARD_INFO("isl1208", 0x6f)
};
@@ -2082,7 +2104,7 @@ static int cx8800_resume(struct pci_dev *pci_dev)
/* ----------------------------------------------------------- */
-static struct pci_device_id cx8800_pci_tbl[] = {
+static const struct pci_device_id cx8800_pci_tbl[] = {
{
.vendor = 0x14f1,
.device = 0x8800,
diff --git a/drivers/media/video/cx88/cx88-vp3054-i2c.c b/drivers/media/video/cx88/cx88-vp3054-i2c.c
index 794f2932b755..ec5476d8b10b 100644
--- a/drivers/media/video/cx88/cx88-vp3054-i2c.c
+++ b/drivers/media/video/cx88/cx88-vp3054-i2c.c
@@ -121,8 +121,6 @@ int vp3054_i2c_probe(struct cx8802_dev *dev)
memcpy(&vp3054_i2c->algo, &vp3054_i2c_algo_template,
sizeof(vp3054_i2c->algo));
- vp3054_i2c->adap.class |= I2C_CLASS_TV_DIGITAL;
-
vp3054_i2c->adap.dev.parent = &dev->pci->dev;
strlcpy(vp3054_i2c->adap.name, core->name,
sizeof(vp3054_i2c->adap.name));
diff --git a/drivers/media/video/cx88/cx88.h b/drivers/media/video/cx88/cx88.h
index 33d161a11725..e8c732e7ae4f 100644
--- a/drivers/media/video/cx88/cx88.h
+++ b/drivers/media/video/cx88/cx88.h
@@ -31,9 +31,8 @@
#include <media/videobuf-dma-sg.h>
#include <media/v4l2-chip-ident.h>
#include <media/cx2341x.h>
-#if defined(CONFIG_VIDEO_CX88_DVB) || defined(CONFIG_VIDEO_CX88_DVB_MODULE)
#include <media/videobuf-dvb.h>
-#endif
+#include <media/ir-kbd-i2c.h>
#include "btcx-risc.h"
#include "cx88-reg.h"
@@ -108,7 +107,7 @@ static unsigned int inline norm_maxh(v4l2_std_id norm)
/* static data */
struct cx8800_fmt {
- char *name;
+ const char *name;
u32 fourcc; /* v4l2 format id */
int depth;
int flags;
@@ -138,7 +137,7 @@ struct cx88_ctrl {
/* more */
struct sram_channel {
- char *name;
+ const char *name;
u32 cmds_start;
u32 ctrl_start;
u32 cdt;
@@ -149,7 +148,7 @@ struct sram_channel {
u32 cnt1_reg;
u32 cnt2_reg;
};
-extern struct sram_channel cx88_sram_channels[];
+extern const struct sram_channel const cx88_sram_channels[];
/* ----------------------------------------------------------- */
/* card configuration */
@@ -240,6 +239,7 @@ extern struct sram_channel cx88_sram_channels[];
#define CX88_BOARD_WINFAST_DTV2000H_J 82
#define CX88_BOARD_PROF_7301 83
#define CX88_BOARD_SAMSUNG_SMT_7020 84
+#define CX88_BOARD_TWINHAN_VP1027_DVBS 85
enum cx88_itype {
CX88_VMUX_COMPOSITE1 = 1,
@@ -262,7 +262,7 @@ struct cx88_input {
};
struct cx88_board {
- char *name;
+ const char *name;
unsigned int tuner_type;
unsigned int radio_type;
unsigned char tuner_addr;
@@ -281,6 +281,20 @@ struct cx88_subid {
u32 card;
};
+enum cx88_tvaudio {
+ WW_NONE = 1,
+ WW_BTSC,
+ WW_BG,
+ WW_DK,
+ WW_I,
+ WW_L,
+ WW_EIAJ,
+ WW_I2SPT,
+ WW_FM,
+ WW_I2SADC,
+ WW_M
+};
+
#define INPUT(nr) (core->board.input[nr])
/* ----------------------------------------------------------- */
@@ -300,7 +314,7 @@ struct cx88_buffer {
/* cx88 specific */
unsigned int bpl;
struct btcx_riscmem risc;
- struct cx8800_fmt *fmt;
+ const struct cx8800_fmt *fmt;
u32 count;
};
@@ -352,7 +366,7 @@ struct cx88_core {
/* state info */
struct task_struct *kthread;
v4l2_std_id tvnorm;
- u32 tvaudio;
+ enum cx88_tvaudio tvaudio;
u32 audiomode_manual;
u32 audiomode_current;
u32 input;
@@ -363,6 +377,9 @@ struct cx88_core {
/* IR remote control state */
struct cx88_IR *ir;
+ /* I2C remote data */
+ struct IR_i2c_init_data init_data;
+
struct mutex lock;
/* various v4l controls */
u32 freq;
@@ -381,17 +398,19 @@ static inline struct cx88_core *to_core(struct v4l2_device *v4l2_dev)
return container_of(v4l2_dev, struct cx88_core, v4l2_dev);
}
-#define call_all(core, o, f, args...) \
+#define call_hw(core, grpid, o, f, args...) \
do { \
if (!core->i2c_rc) { \
if (core->gate_ctrl) \
core->gate_ctrl(core, 1); \
- v4l2_device_call_all(&core->v4l2_dev, 0, o, f, ##args); \
+ v4l2_device_call_all(&core->v4l2_dev, grpid, o, f, ##args); \
if (core->gate_ctrl) \
core->gate_ctrl(core, 0); \
} \
} while (0)
+#define call_all(core, o, f, args...) call_hw(core, 0, o, f, ##args)
+
struct cx8800_dev;
struct cx8802_dev;
@@ -410,7 +429,7 @@ struct cx8800_fh {
unsigned int nclips;
/* video capture */
- struct cx8800_fmt *fmt;
+ const struct cx8800_fmt *fmt;
unsigned int width,height;
struct videobuf_queue vidq;
@@ -565,7 +584,7 @@ struct cx8802_dev {
/* ----------------------------------------------------------- */
/* cx88-core.c */
-extern void cx88_print_irqbits(char *name, char *tag, char **strings,
+extern void cx88_print_irqbits(const char *name, const char *tag, const char *strings[],
int len, u32 bits, u32 mask);
extern int cx88_core_irq(struct cx88_core *core, u32 status);
@@ -592,10 +611,10 @@ cx88_free_buffer(struct videobuf_queue *q, struct cx88_buffer *buf);
extern void cx88_risc_disasm(struct cx88_core *core,
struct btcx_riscmem *risc);
extern int cx88_sram_channel_setup(struct cx88_core *core,
- struct sram_channel *ch,
+ const struct sram_channel *ch,
unsigned int bpl, u32 risc);
extern void cx88_sram_channel_dump(struct cx88_core *core,
- struct sram_channel *ch);
+ const struct sram_channel *ch);
extern int cx88_set_scale(struct cx88_core *core, unsigned int width,
unsigned int height, enum v4l2_field field);
@@ -603,8 +622,8 @@ extern int cx88_set_tvnorm(struct cx88_core *core, v4l2_std_id norm);
extern struct video_device *cx88_vdev_init(struct cx88_core *core,
struct pci_dev *pci,
- struct video_device *template,
- char *type);
+ const struct video_device *template_,
+ const char *type);
extern struct cx88_core* cx88_core_get(struct pci_dev *pci);
extern void cx88_core_put(struct cx88_core *core,
struct pci_dev *pci);
@@ -630,13 +649,12 @@ int cx8800_restart_vbi_queue(struct cx8800_dev *dev,
struct cx88_dmaqueue *q);
void cx8800_vbi_timeout(unsigned long data);
-extern struct videobuf_queue_ops cx8800_vbi_qops;
+extern const struct videobuf_queue_ops cx8800_vbi_qops;
/* ----------------------------------------------------------- */
/* cx88-i2c.c */
extern int cx88_i2c_init(struct cx88_core *core, struct pci_dev *pci);
-extern void cx88_i2c_init_ir(struct cx88_core *core);
/* ----------------------------------------------------------- */
@@ -651,18 +669,6 @@ extern void cx88_setup_xc3028(struct cx88_core *core, struct xc2028_ctrl *ctl);
/* ----------------------------------------------------------- */
/* cx88-tvaudio.c */
-#define WW_NONE 1
-#define WW_BTSC 2
-#define WW_BG 3
-#define WW_DK 4
-#define WW_I 5
-#define WW_L 6
-#define WW_EIAJ 7
-#define WW_I2SPT 8
-#define WW_FM 9
-#define WW_I2SADC 10
-#define WW_M 11
-
void cx88_set_tvaudio(struct cx88_core *core);
void cx88_newstation(struct cx88_core *core);
void cx88_get_stereo(struct cx88_core *core, struct v4l2_tuner *t);
@@ -686,6 +692,7 @@ int cx88_ir_fini(struct cx88_core *core);
void cx88_ir_irq(struct cx88_core *core);
int cx88_ir_start(struct cx88_core *core);
void cx88_ir_stop(struct cx88_core *core);
+extern void cx88_i2c_init_ir(struct cx88_core *core);
/* ----------------------------------------------------------- */
/* cx88-mpeg.c */
@@ -705,10 +712,3 @@ int cx88_set_freq (struct cx88_core *core,struct v4l2_frequency *f);
int cx88_get_control(struct cx88_core *core, struct v4l2_control *ctl);
int cx88_set_control(struct cx88_core *core, struct v4l2_control *ctl);
int cx88_video_mux(struct cx88_core *core, unsigned int input);
-
-/*
- * Local variables:
- * c-basic-offset: 8
- * End:
- * kate: eol "unix"; indent-width 3; remove-trailing-space on; replace-trailing-space-save on; tab-width 8; replace-tabs off; space-indent off; mixed-indent off
- */
diff --git a/drivers/media/video/davinci/vpfe_capture.c b/drivers/media/video/davinci/vpfe_capture.c
index 1c2588247289..7333a9bb2549 100644
--- a/drivers/media/video/davinci/vpfe_capture.c
+++ b/drivers/media/video/davinci/vpfe_capture.c
@@ -370,7 +370,7 @@ static int vpfe_config_ccdc_image_format(struct vpfe_device *vpfe_dev)
* For a given standard, this functions sets up the default
* pix format & crop values in the vpfe device and ccdc. It first
* starts with defaults based values from the standard table.
- * It then checks if sub device support g_fmt and then override the
+ * It then checks if sub device support g_mbus_fmt and then override the
* values based on that.Sets crop values to match with scan resolution
* starting at 0,0. It calls vpfe_config_ccdc_image_format() set the
* values in ccdc
@@ -379,6 +379,8 @@ static int vpfe_config_image_format(struct vpfe_device *vpfe_dev,
const v4l2_std_id *std_id)
{
struct vpfe_subdev_info *sdinfo = vpfe_dev->current_subdev;
+ struct v4l2_mbus_framefmt mbus_fmt;
+ struct v4l2_pix_format *pix = &vpfe_dev->fmt.fmt.pix;
int i, ret = 0;
for (i = 0; i < ARRAY_SIZE(vpfe_standards); i++) {
@@ -403,29 +405,36 @@ static int vpfe_config_image_format(struct vpfe_device *vpfe_dev,
vpfe_dev->crop.left = 0;
vpfe_dev->crop.width = vpfe_dev->std_info.active_pixels;
vpfe_dev->crop.height = vpfe_dev->std_info.active_lines;
- vpfe_dev->fmt.fmt.pix.width = vpfe_dev->crop.width;
- vpfe_dev->fmt.fmt.pix.height = vpfe_dev->crop.height;
+ pix->width = vpfe_dev->crop.width;
+ pix->height = vpfe_dev->crop.height;
/* first field and frame format based on standard frame format */
if (vpfe_dev->std_info.frame_format) {
- vpfe_dev->fmt.fmt.pix.field = V4L2_FIELD_INTERLACED;
+ pix->field = V4L2_FIELD_INTERLACED;
/* assume V4L2_PIX_FMT_UYVY as default */
- vpfe_dev->fmt.fmt.pix.pixelformat = V4L2_PIX_FMT_UYVY;
+ pix->pixelformat = V4L2_PIX_FMT_UYVY;
+ v4l2_fill_mbus_format(&mbus_fmt, pix,
+ V4L2_MBUS_FMT_YUYV10_2X10);
} else {
- vpfe_dev->fmt.fmt.pix.field = V4L2_FIELD_NONE;
+ pix->field = V4L2_FIELD_NONE;
/* assume V4L2_PIX_FMT_SBGGR8 */
- vpfe_dev->fmt.fmt.pix.pixelformat = V4L2_PIX_FMT_SBGGR8;
+ pix->pixelformat = V4L2_PIX_FMT_SBGGR8;
+ v4l2_fill_mbus_format(&mbus_fmt, pix,
+ V4L2_MBUS_FMT_SBGGR8_1X8);
}
- /* if sub device supports g_fmt, override the defaults */
+ /* if sub device supports g_mbus_fmt, override the defaults */
ret = v4l2_device_call_until_err(&vpfe_dev->v4l2_dev,
- sdinfo->grp_id, video, g_fmt, &vpfe_dev->fmt);
+ sdinfo->grp_id, video, g_mbus_fmt, &mbus_fmt);
if (ret && ret != -ENOIOCTLCMD) {
v4l2_err(&vpfe_dev->v4l2_dev,
- "error in getting g_fmt from sub device\n");
+ "error in getting g_mbus_fmt from sub device\n");
return ret;
}
+ v4l2_fill_pix_format(pix, &mbus_fmt);
+ pix->bytesperline = pix->width * 2;
+ pix->sizeimage = pix->bytesperline * pix->height;
/* Sets the values in CCDC */
ret = vpfe_config_ccdc_image_format(vpfe_dev);
@@ -434,11 +443,8 @@ static int vpfe_config_image_format(struct vpfe_device *vpfe_dev,
/* Update the values of sizeimage and bytesperline */
if (!ret) {
- vpfe_dev->fmt.fmt.pix.bytesperline =
- ccdc_dev->hw_ops.get_line_length();
- vpfe_dev->fmt.fmt.pix.sizeimage =
- vpfe_dev->fmt.fmt.pix.bytesperline *
- vpfe_dev->fmt.fmt.pix.height;
+ pix->bytesperline = ccdc_dev->hw_ops.get_line_length();
+ pix->sizeimage = pix->bytesperline * pix->height;
}
return ret;
}
@@ -1366,7 +1372,7 @@ static int vpfe_reqbufs(struct file *file, void *priv,
req_buf->type,
vpfe_dev->fmt.fmt.pix.field,
sizeof(struct videobuf_buffer),
- fh);
+ fh, NULL);
fh->io_allowed = 1;
vpfe_dev->io_usrs = 1;
@@ -1980,7 +1986,6 @@ static __init int vpfe_probe(struct platform_device *pdev)
vpfe_dev->sd[i] =
v4l2_i2c_new_subdev_board(&vpfe_dev->v4l2_dev,
i2c_adap,
- sdinfo->name,
&sdinfo->board_info,
NULL);
if (vpfe_dev->sd[i]) {
diff --git a/drivers/media/video/davinci/vpif_capture.c b/drivers/media/video/davinci/vpif_capture.c
index a7f48b53d3fc..193abab6b355 100644
--- a/drivers/media/video/davinci/vpif_capture.c
+++ b/drivers/media/video/davinci/vpif_capture.c
@@ -731,7 +731,6 @@ static int vpif_mmap(struct file *filep, struct vm_area_struct *vma)
*/
static unsigned int vpif_poll(struct file *filep, poll_table * wait)
{
- int err = 0;
struct vpif_fh *fh = filep->private_data;
struct channel_obj *channel = fh->channel;
struct common_obj *common = &(channel->common[VPIF_VIDEO_INDEX]);
@@ -739,8 +738,7 @@ static unsigned int vpif_poll(struct file *filep, poll_table * wait)
vpif_dbg(2, debug, "vpif_poll\n");
if (common->started)
- err = videobuf_poll_stream(filep, &common->buffer_queue, wait);
-
+ return videobuf_poll_stream(filep, &common->buffer_queue, wait);
return 0;
}
@@ -793,7 +791,7 @@ static int vpif_open(struct file *filep)
}
/* Allocate memory for the file handle object */
- fh = kmalloc(sizeof(struct vpif_fh), GFP_KERNEL);
+ fh = kzalloc(sizeof(struct vpif_fh), GFP_KERNEL);
if (NULL == fh) {
vpif_err("unable to allocate memory for file handle object\n");
ret = -ENOMEM;
@@ -929,7 +927,8 @@ static int vpif_reqbufs(struct file *file, void *priv,
&common->irqlock,
reqbuf->type,
common->fmt.fmt.pix.field,
- sizeof(struct videobuf_buffer), fh);
+ sizeof(struct videobuf_buffer), fh,
+ NULL);
/* Set io allowed member of file handle to TRUE */
fh->io_allowed[index] = 1;
@@ -1030,9 +1029,10 @@ static int vpif_qbuf(struct file *file, void *priv, struct v4l2_buffer *buf)
goto qbuf_exit;
if ((VIDEOBUF_NEEDS_INIT != buf1->state)
- && (buf1->baddr != tbuf.m.userptr))
+ && (buf1->baddr != tbuf.m.userptr)) {
vpif_buffer_release(&common->buffer_queue, buf1);
buf1->baddr = tbuf.m.userptr;
+ }
break;
default:
@@ -1994,7 +1994,7 @@ static __init int vpif_probe(struct platform_device *pdev)
config = pdev->dev.platform_data;
subdev_count = config->subdev_count;
- vpif_obj.sd = kmalloc(sizeof(struct v4l2_subdev *) * subdev_count,
+ vpif_obj.sd = kzalloc(sizeof(struct v4l2_subdev *) * subdev_count,
GFP_KERNEL);
if (vpif_obj.sd == NULL) {
vpif_err("unable to allocate memory for subdevice pointers\n");
@@ -2013,7 +2013,6 @@ static __init int vpif_probe(struct platform_device *pdev)
vpif_obj.sd[i] =
v4l2_i2c_new_subdev_board(&vpif_obj.v4l2_dev,
i2c_adap,
- subdevdata->name,
&subdevdata->board_info,
NULL);
@@ -2113,7 +2112,7 @@ static const struct dev_pm_ops vpif_dev_pm_ops = {
.resume = vpif_resume,
};
-static struct platform_driver vpif_driver = {
+static __refdata struct platform_driver vpif_driver = {
.driver = {
.name = "vpif_capture",
.owner = THIS_MODULE,
diff --git a/drivers/media/video/davinci/vpif_display.c b/drivers/media/video/davinci/vpif_display.c
index da07607cbc55..412c65d54fe1 100644
--- a/drivers/media/video/davinci/vpif_display.c
+++ b/drivers/media/video/davinci/vpif_display.c
@@ -600,7 +600,7 @@ static int vpif_open(struct file *filep)
ch = video_get_drvdata(vdev);
/* Allocate memory for the file handle object */
- fh = kmalloc(sizeof(struct vpif_fh), GFP_KERNEL);
+ fh = kzalloc(sizeof(struct vpif_fh), GFP_KERNEL);
if (fh == NULL) {
vpif_err("unable to allocate memory for file handle object\n");
return -ENOMEM;
@@ -853,7 +853,8 @@ static int vpif_reqbufs(struct file *file, void *priv,
&video_qops, NULL,
&common->irqlock,
reqbuf->type, field,
- sizeof(struct videobuf_buffer), fh);
+ sizeof(struct videobuf_buffer), fh,
+ NULL);
/* Set io allowed member of file handle to TRUE */
fh->io_allowed[index] = 1;
@@ -935,9 +936,10 @@ static int vpif_qbuf(struct file *file, void *priv, struct v4l2_buffer *buf)
goto qbuf_exit;
if ((VIDEOBUF_NEEDS_INIT != buf1->state)
- && (buf1->baddr != tbuf.m.userptr))
+ && (buf1->baddr != tbuf.m.userptr)) {
vpif_buffer_release(&common->buffer_queue, buf1);
buf1->baddr = tbuf.m.userptr;
+ }
break;
default:
@@ -1395,7 +1397,7 @@ static int initialize_vpif(void)
/* Allocate memory for six channel objects */
for (i = 0; i < VPIF_DISPLAY_MAX_DEVICES; i++) {
vpif_obj.dev[i] =
- kmalloc(sizeof(struct channel_obj), GFP_KERNEL);
+ kzalloc(sizeof(struct channel_obj), GFP_KERNEL);
/* If memory allocation fails, return error */
if (!vpif_obj.dev[i]) {
free_channel_objects_index = i;
@@ -1541,7 +1543,7 @@ static __init int vpif_probe(struct platform_device *pdev)
config = pdev->dev.platform_data;
subdev_count = config->subdev_count;
subdevdata = config->subdevinfo;
- vpif_obj.sd = kmalloc(sizeof(struct v4l2_subdev *) * subdev_count,
+ vpif_obj.sd = kzalloc(sizeof(struct v4l2_subdev *) * subdev_count,
GFP_KERNEL);
if (vpif_obj.sd == NULL) {
vpif_err("unable to allocate memory for subdevice pointers\n");
@@ -1551,7 +1553,7 @@ static __init int vpif_probe(struct platform_device *pdev)
for (i = 0; i < subdev_count; i++) {
vpif_obj.sd[i] = v4l2_i2c_new_subdev_board(&vpif_obj.v4l2_dev,
- i2c_adap, subdevdata[i].name,
+ i2c_adap,
&subdevdata[i].board_info,
NULL);
if (!vpif_obj.sd[i]) {
@@ -1610,7 +1612,7 @@ static int vpif_remove(struct platform_device *device)
return 0;
}
-static struct platform_driver vpif_driver = {
+static __refdata struct platform_driver vpif_driver = {
.driver = {
.name = "vpif_display",
.owner = THIS_MODULE,
diff --git a/drivers/media/video/em28xx/em28xx-audio.c b/drivers/media/video/em28xx/em28xx-audio.c
index e182abf476c9..3c48a72eb7de 100644
--- a/drivers/media/video/em28xx/em28xx-audio.c
+++ b/drivers/media/video/em28xx/em28xx-audio.c
@@ -102,6 +102,9 @@ static void em28xx_audio_isocirq(struct urb *urb)
break;
}
+ if (atomic_read(&dev->stream_started) == 0)
+ return;
+
if (dev->adev.capture_pcm_substream) {
substream = dev->adev.capture_pcm_substream;
runtime = substream->runtime;
@@ -217,31 +220,6 @@ static int em28xx_init_audio_isoc(struct em28xx *dev)
return 0;
}
-static int em28xx_cmd(struct em28xx *dev, int cmd, int arg)
-{
- dprintk("%s transfer\n", (dev->adev.capture_stream == STREAM_ON) ?
- "stop" : "start");
-
- switch (cmd) {
- case EM28XX_CAPTURE_STREAM_EN:
- if (dev->adev.capture_stream == STREAM_OFF &&
- arg == EM28XX_START_AUDIO) {
- dev->adev.capture_stream = STREAM_ON;
- em28xx_init_audio_isoc(dev);
- } else if (dev->adev.capture_stream == STREAM_ON &&
- arg == EM28XX_STOP_AUDIO) {
- dev->adev.capture_stream = STREAM_OFF;
- em28xx_deinit_isoc_audio(dev);
- } else {
- em28xx_errdev("An underrun very likely occurred. "
- "Ignoring it.\n");
- }
- return 0;
- default:
- return -EINVAL;
- }
-}
-
static int snd_pcm_alloc_vmalloc_buffer(struct snd_pcm_substream *subs,
size_t size)
{
@@ -303,7 +281,6 @@ static int snd_em28xx_capture_open(struct snd_pcm_substream *substream)
dev->mute = 0;
mutex_lock(&dev->lock);
ret = em28xx_audio_analog_set(dev);
- mutex_unlock(&dev->lock);
if (ret < 0)
goto err;
@@ -311,11 +288,10 @@ static int snd_em28xx_capture_open(struct snd_pcm_substream *substream)
if (dev->alt == 0 && dev->adev.users == 0) {
int errCode;
dev->alt = 7;
- errCode = usb_set_interface(dev->udev, 0, 7);
dprintk("changing alternate number to 7\n");
+ errCode = usb_set_interface(dev->udev, 0, 7);
}
- mutex_lock(&dev->lock);
dev->adev.users++;
mutex_unlock(&dev->lock);
@@ -325,6 +301,8 @@ static int snd_em28xx_capture_open(struct snd_pcm_substream *substream)
return 0;
err:
+ mutex_unlock(&dev->lock);
+
em28xx_err("Error while configuring em28xx mixer\n");
return ret;
}
@@ -338,6 +316,11 @@ static int snd_em28xx_pcm_close(struct snd_pcm_substream *substream)
dev->mute = 1;
mutex_lock(&dev->lock);
dev->adev.users--;
+ if (atomic_read(&dev->stream_started) > 0) {
+ atomic_set(&dev->stream_started, 0);
+ schedule_work(&dev->wq_trigger);
+ }
+
em28xx_audio_analog_set(dev);
if (substream->runtime->dma_area) {
dprintk("freeing\n");
@@ -375,8 +358,10 @@ static int snd_em28xx_hw_capture_free(struct snd_pcm_substream *substream)
dprintk("Stop capture, if needed\n");
- if (dev->adev.capture_stream == STREAM_ON)
- em28xx_cmd(dev, EM28XX_CAPTURE_STREAM_EN, EM28XX_STOP_AUDIO);
+ if (atomic_read(&dev->stream_started) > 0) {
+ atomic_set(&dev->stream_started, 0);
+ schedule_work(&dev->wq_trigger);
+ }
return 0;
}
@@ -391,31 +376,37 @@ static int snd_em28xx_prepare(struct snd_pcm_substream *substream)
return 0;
}
+static void audio_trigger(struct work_struct *work)
+{
+ struct em28xx *dev = container_of(work, struct em28xx, wq_trigger);
+
+ if (atomic_read(&dev->stream_started)) {
+ dprintk("starting capture");
+ em28xx_init_audio_isoc(dev);
+ } else {
+ dprintk("stopping capture");
+ em28xx_deinit_isoc_audio(dev);
+ }
+}
+
static int snd_em28xx_capture_trigger(struct snd_pcm_substream *substream,
int cmd)
{
struct em28xx *dev = snd_pcm_substream_chip(substream);
int retval;
- dprintk("Should %s capture\n", (cmd == SNDRV_PCM_TRIGGER_START) ?
- "start" : "stop");
-
- spin_lock(&dev->adev.slock);
switch (cmd) {
case SNDRV_PCM_TRIGGER_START:
- em28xx_cmd(dev, EM28XX_CAPTURE_STREAM_EN, EM28XX_START_AUDIO);
- retval = 0;
+ atomic_set(&dev->stream_started, 1);
break;
case SNDRV_PCM_TRIGGER_STOP:
- em28xx_cmd(dev, EM28XX_CAPTURE_STREAM_EN, EM28XX_STOP_AUDIO);
- retval = 0;
+ atomic_set(&dev->stream_started, 1);
break;
default:
retval = -EINVAL;
}
-
- spin_unlock(&dev->adev.slock);
- return retval;
+ schedule_work(&dev->wq_trigger);
+ return 0;
}
static snd_pcm_uframes_t snd_em28xx_capture_pointer(struct snd_pcm_substream
@@ -495,6 +486,8 @@ static int em28xx_audio_init(struct em28xx *dev)
strcpy(card->shortname, "Em28xx Audio");
strcpy(card->longname, "Empia Em28xx Audio");
+ INIT_WORK(&dev->wq_trigger, audio_trigger);
+
err = snd_card_register(card);
if (err < 0) {
snd_card_free(card);
diff --git a/drivers/media/video/em28xx/em28xx-cards.c b/drivers/media/video/em28xx/em28xx-cards.c
index e7efb4bffabd..f7e9168157a5 100644
--- a/drivers/media/video/em28xx/em28xx-cards.c
+++ b/drivers/media/video/em28xx/em28xx-cards.c
@@ -187,6 +187,18 @@ static struct em28xx_reg_seq pinnacle_hybrid_pro_digital[] = {
{ -1, -1, -1, -1},
};
+static struct em28xx_reg_seq terratec_cinergy_USB_XS_FR_analog[] = {
+ {EM28XX_R08_GPIO, 0x6d, ~EM_GPIO_4, 10},
+ {EM2880_R04_GPO, 0x00, 0xff, 10},
+ { -1, -1, -1, -1},
+};
+
+static struct em28xx_reg_seq terratec_cinergy_USB_XS_FR_digital[] = {
+ {EM28XX_R08_GPIO, 0x6e, ~EM_GPIO_4, 10},
+ {EM2880_R04_GPO, 0x08, 0xff, 10},
+ { -1, -1, -1, -1},
+};
+
/* eb1a:2868 Reddo DVB-C USB TV Box
GPIO4 - CU1216L NIM
Other GPIOs seems to be don't care. */
@@ -781,22 +793,22 @@ struct em28xx_board em28xx_boards[] = {
.tuner_gpio = default_tuner_gpio,
.decoder = EM28XX_TVP5150,
.has_dvb = 1,
- .dvb_gpio = default_digital,
+ .dvb_gpio = terratec_cinergy_USB_XS_FR_digital,
.input = { {
.type = EM28XX_VMUX_TELEVISION,
.vmux = TVP5150_COMPOSITE0,
.amux = EM28XX_AMUX_VIDEO,
- .gpio = default_analog,
+ .gpio = terratec_cinergy_USB_XS_FR_analog,
}, {
.type = EM28XX_VMUX_COMPOSITE1,
.vmux = TVP5150_COMPOSITE1,
.amux = EM28XX_AMUX_LINE_IN,
- .gpio = default_analog,
+ .gpio = terratec_cinergy_USB_XS_FR_analog,
}, {
.type = EM28XX_VMUX_SVIDEO,
.vmux = TVP5150_SVIDEO,
.amux = EM28XX_AMUX_LINE_IN,
- .gpio = default_analog,
+ .gpio = terratec_cinergy_USB_XS_FR_analog,
} },
},
[EM2880_BOARD_HAUPPAUGE_WINTV_HVR_900] = {
@@ -1648,6 +1660,22 @@ struct em28xx_board em28xx_boards[] = {
.gpio = terratec_av350_unmute_gpio,
} },
},
+
+ [EM2860_BOARD_ELGATO_VIDEO_CAPTURE] = {
+ .name = "Elgato Video Capture",
+ .decoder = EM28XX_SAA711X,
+ .tuner_type = TUNER_ABSENT, /* Capture only device */
+ .input = { {
+ .type = EM28XX_VMUX_COMPOSITE1,
+ .vmux = SAA7115_COMPOSITE0,
+ .amux = EM28XX_AMUX_LINE_IN,
+ }, {
+ .type = EM28XX_VMUX_SVIDEO,
+ .vmux = SAA7115_SVIDEO3,
+ .amux = EM28XX_AMUX_LINE_IN,
+ } },
+ },
+
[EM2882_BOARD_EVGA_INDTUBE] = {
.name = "Evga inDtube",
.tuner_type = TUNER_XC2028,
@@ -1772,6 +1800,8 @@ struct usb_device_id em28xx_id_table[] = {
.driver_info = EM2860_BOARD_TERRATEC_AV350 },
{ USB_DEVICE(0x0ccd, 0x0096),
.driver_info = EM2860_BOARD_TERRATEC_GRABBY },
+ { USB_DEVICE(0x0fd9, 0x0033),
+ .driver_info = EM2860_BOARD_ELGATO_VIDEO_CAPTURE},
{ USB_DEVICE(0x185b, 0x2870),
.driver_info = EM2870_BOARD_COMPRO_VIDEOMATE },
{ USB_DEVICE(0x185b, 0x2041),
@@ -2168,6 +2198,7 @@ static void em28xx_setup_xc3028(struct em28xx *dev, struct xc2028_ctrl *ctl)
ctl->demod = XC3028_FE_ZARLINK456;
break;
case EM2880_BOARD_TERRATEC_HYBRID_XS:
+ case EM2880_BOARD_TERRATEC_HYBRID_XS_FR:
case EM2881_BOARD_PINNACLE_HYBRID_PRO:
ctl->demod = XC3028_FE_ZARLINK456;
break;
@@ -2523,39 +2554,39 @@ void em28xx_card_setup(struct em28xx *dev)
/* request some modules */
if (dev->board.has_msp34xx)
v4l2_i2c_new_subdev(&dev->v4l2_dev, &dev->i2c_adap,
- "msp3400", "msp3400", 0, msp3400_addrs);
+ "msp3400", 0, msp3400_addrs);
if (dev->board.decoder == EM28XX_SAA711X)
v4l2_i2c_new_subdev(&dev->v4l2_dev, &dev->i2c_adap,
- "saa7115", "saa7115_auto", 0, saa711x_addrs);
+ "saa7115_auto", 0, saa711x_addrs);
if (dev->board.decoder == EM28XX_TVP5150)
v4l2_i2c_new_subdev(&dev->v4l2_dev, &dev->i2c_adap,
- "tvp5150", "tvp5150", 0, tvp5150_addrs);
+ "tvp5150", 0, tvp5150_addrs);
if (dev->em28xx_sensor == EM28XX_MT9V011) {
struct v4l2_subdev *sd;
sd = v4l2_i2c_new_subdev(&dev->v4l2_dev,
- &dev->i2c_adap, "mt9v011", "mt9v011", 0, mt9v011_addrs);
+ &dev->i2c_adap, "mt9v011", 0, mt9v011_addrs);
v4l2_subdev_call(sd, core, s_config, 0, &dev->sensor_xtal);
}
if (dev->board.adecoder == EM28XX_TVAUDIO)
v4l2_i2c_new_subdev(&dev->v4l2_dev, &dev->i2c_adap,
- "tvaudio", "tvaudio", dev->board.tvaudio_addr, NULL);
+ "tvaudio", dev->board.tvaudio_addr, NULL);
if (dev->board.tuner_type != TUNER_ABSENT) {
int has_demod = (dev->tda9887_conf & TDA9887_PRESENT);
if (dev->board.radio.type)
v4l2_i2c_new_subdev(&dev->v4l2_dev, &dev->i2c_adap,
- "tuner", "tuner", dev->board.radio_addr, NULL);
+ "tuner", dev->board.radio_addr, NULL);
if (has_demod)
v4l2_i2c_new_subdev(&dev->v4l2_dev,
- &dev->i2c_adap, "tuner", "tuner",
+ &dev->i2c_adap, "tuner",
0, v4l2_i2c_tuner_addrs(ADDRS_DEMOD));
if (dev->tuner_addr == 0) {
enum v4l2_i2c_tuner_type type =
@@ -2563,14 +2594,14 @@ void em28xx_card_setup(struct em28xx *dev)
struct v4l2_subdev *sd;
sd = v4l2_i2c_new_subdev(&dev->v4l2_dev,
- &dev->i2c_adap, "tuner", "tuner",
+ &dev->i2c_adap, "tuner",
0, v4l2_i2c_tuner_addrs(type));
if (sd)
dev->tuner_addr = v4l2_i2c_subdev_addr(sd);
} else {
v4l2_i2c_new_subdev(&dev->v4l2_dev, &dev->i2c_adap,
- "tuner", "tuner", dev->tuner_addr, NULL);
+ "tuner", dev->tuner_addr, NULL);
}
}
diff --git a/drivers/media/video/em28xx/em28xx-video.c b/drivers/media/video/em28xx/em28xx-video.c
index 7b9ec6e493e4..908e3bc88303 100644
--- a/drivers/media/video/em28xx/em28xx-video.c
+++ b/drivers/media/video/em28xx/em28xx-video.c
@@ -277,12 +277,13 @@ static void em28xx_copy_vbi(struct em28xx *dev,
{
void *startwrite, *startread;
int offset;
- int bytesperline = dev->vbi_width;
+ int bytesperline;
if (dev == NULL) {
em28xx_isocdbg("dev is null\n");
return;
}
+ bytesperline = dev->vbi_width;
if (dma_q == NULL) {
em28xx_isocdbg("dma_q is null\n");
@@ -862,17 +863,14 @@ static int res_get(struct em28xx_fh *fh, unsigned int bit)
return 1;
/* is it free? */
- mutex_lock(&dev->lock);
if (dev->resources & bit) {
/* no, someone else uses it */
- mutex_unlock(&dev->lock);
return 0;
}
/* it's free, grab it */
fh->resources |= bit;
dev->resources |= bit;
em28xx_videodbg("res: get %d\n", bit);
- mutex_unlock(&dev->lock);
return 1;
}
@@ -892,11 +890,9 @@ static void res_free(struct em28xx_fh *fh, unsigned int bits)
BUG_ON((fh->resources & bits) != bits);
- mutex_lock(&dev->lock);
fh->resources &= ~bits;
dev->resources &= ~bits;
em28xx_videodbg("res: put %d\n", bits);
- mutex_unlock(&dev->lock);
}
static int get_ressource(struct em28xx_fh *fh)
@@ -1023,8 +1019,6 @@ static int vidioc_g_fmt_vid_cap(struct file *file, void *priv,
struct em28xx_fh *fh = priv;
struct em28xx *dev = fh->dev;
- mutex_lock(&dev->lock);
-
f->fmt.pix.width = dev->width;
f->fmt.pix.height = dev->height;
f->fmt.pix.pixelformat = dev->format->fourcc;
@@ -1038,8 +1032,6 @@ static int vidioc_g_fmt_vid_cap(struct file *file, void *priv,
else
f->fmt.pix.field = dev->interlaced ?
V4L2_FIELD_INTERLACED : V4L2_FIELD_TOP;
-
- mutex_unlock(&dev->lock);
return 0;
}
@@ -1137,22 +1129,15 @@ static int vidioc_s_fmt_vid_cap(struct file *file, void *priv,
if (rc < 0)
return rc;
- mutex_lock(&dev->lock);
-
vidioc_try_fmt_vid_cap(file, priv, f);
if (videobuf_queue_is_busy(&fh->vb_vidq)) {
em28xx_errdev("%s queue busy\n", __func__);
- rc = -EBUSY;
- goto out;
+ return -EBUSY;
}
- rc = em28xx_set_video_format(dev, f->fmt.pix.pixelformat,
+ return em28xx_set_video_format(dev, f->fmt.pix.pixelformat,
f->fmt.pix.width, f->fmt.pix.height);
-
-out:
- mutex_unlock(&dev->lock);
- return rc;
}
static int vidioc_g_std(struct file *file, void *priv, v4l2_std_id *norm)
@@ -1181,7 +1166,6 @@ static int vidioc_s_std(struct file *file, void *priv, v4l2_std_id *norm)
if (rc < 0)
return rc;
- mutex_lock(&dev->lock);
dev->norm = *norm;
/* Adjusts width/height, if needed */
@@ -1197,7 +1181,6 @@ static int vidioc_s_std(struct file *file, void *priv, v4l2_std_id *norm)
em28xx_resolution_set(dev);
v4l2_device_call_all(&dev->v4l2_dev, 0, core, s_std, dev->norm);
- mutex_unlock(&dev->lock);
return 0;
}
@@ -1302,9 +1285,7 @@ static int vidioc_s_input(struct file *file, void *priv, unsigned int i)
dev->ctl_input = i;
- mutex_lock(&dev->lock);
video_mux(dev, dev->ctl_input);
- mutex_unlock(&dev->lock);
return 0;
}
@@ -1365,15 +1346,12 @@ static int vidioc_s_audio(struct file *file, void *priv, struct v4l2_audio *a)
if (0 == INPUT(a->index)->type)
return -EINVAL;
- mutex_lock(&dev->lock);
-
dev->ctl_ainput = INPUT(a->index)->amux;
dev->ctl_aoutput = INPUT(a->index)->aout;
if (!dev->ctl_aoutput)
dev->ctl_aoutput = EM28XX_AOUT_MASTER;
- mutex_unlock(&dev->lock);
return 0;
}
@@ -1393,17 +1371,15 @@ static int vidioc_queryctrl(struct file *file, void *priv,
qc->id = id;
- /* enumberate AC97 controls */
+ /* enumerate AC97 controls */
if (dev->audio_mode.ac97 != EM28XX_NO_AC97) {
rc = ac97_queryctrl(qc);
if (!rc)
return 0;
}
- /* enumberate V4L2 device controls */
- mutex_lock(&dev->lock);
+ /* enumerate V4L2 device controls */
v4l2_device_call_all(&dev->v4l2_dev, 0, core, queryctrl, qc);
- mutex_unlock(&dev->lock);
if (qc->type)
return 0;
@@ -1423,7 +1399,6 @@ static int vidioc_g_ctrl(struct file *file, void *priv,
return rc;
rc = 0;
- mutex_lock(&dev->lock);
/* Set an AC97 control */
if (dev->audio_mode.ac97 != EM28XX_NO_AC97)
@@ -1437,7 +1412,6 @@ static int vidioc_g_ctrl(struct file *file, void *priv,
rc = 0;
}
- mutex_unlock(&dev->lock);
return rc;
}
@@ -1452,8 +1426,6 @@ static int vidioc_s_ctrl(struct file *file, void *priv,
if (rc < 0)
return rc;
- mutex_lock(&dev->lock);
-
/* Set an AC97 control */
if (dev->audio_mode.ac97 != EM28XX_NO_AC97)
rc = ac97_set_ctrl(dev, ctrl);
@@ -1480,8 +1452,6 @@ static int vidioc_s_ctrl(struct file *file, void *priv,
rc = em28xx_audio_analog_set(dev);
}
}
-
- mutex_unlock(&dev->lock);
return rc;
}
@@ -1502,10 +1472,7 @@ static int vidioc_g_tuner(struct file *file, void *priv,
strcpy(t->name, "Tuner");
t->type = V4L2_TUNER_ANALOG_TV;
- mutex_lock(&dev->lock);
v4l2_device_call_all(&dev->v4l2_dev, 0, tuner, g_tuner, t);
- mutex_unlock(&dev->lock);
-
return 0;
}
@@ -1523,10 +1490,7 @@ static int vidioc_s_tuner(struct file *file, void *priv,
if (0 != t->index)
return -EINVAL;
- mutex_lock(&dev->lock);
v4l2_device_call_all(&dev->v4l2_dev, 0, tuner, s_tuner, t);
- mutex_unlock(&dev->lock);
-
return 0;
}
@@ -1536,11 +1500,8 @@ static int vidioc_g_frequency(struct file *file, void *priv,
struct em28xx_fh *fh = priv;
struct em28xx *dev = fh->dev;
- mutex_lock(&dev->lock);
f->type = fh->radio ? V4L2_TUNER_RADIO : V4L2_TUNER_ANALOG_TV;
f->frequency = dev->ctl_freq;
- mutex_unlock(&dev->lock);
-
return 0;
}
@@ -1563,13 +1524,9 @@ static int vidioc_s_frequency(struct file *file, void *priv,
if (unlikely(1 == fh->radio && f->type != V4L2_TUNER_RADIO))
return -EINVAL;
- mutex_lock(&dev->lock);
-
dev->ctl_freq = f->frequency;
v4l2_device_call_all(&dev->v4l2_dev, 0, tuner, s_frequency, f);
- mutex_unlock(&dev->lock);
-
return 0;
}
@@ -1610,9 +1567,7 @@ static int vidioc_g_register(struct file *file, void *priv,
switch (reg->match.type) {
case V4L2_CHIP_MATCH_AC97:
- mutex_lock(&dev->lock);
ret = em28xx_read_ac97(dev, reg->reg);
- mutex_unlock(&dev->lock);
if (ret < 0)
return ret;
@@ -1634,9 +1589,7 @@ static int vidioc_g_register(struct file *file, void *priv,
/* Match host */
reg->size = em28xx_reg_len(reg->reg);
if (reg->size == 1) {
- mutex_lock(&dev->lock);
ret = em28xx_read_reg(dev, reg->reg);
- mutex_unlock(&dev->lock);
if (ret < 0)
return ret;
@@ -1644,10 +1597,8 @@ static int vidioc_g_register(struct file *file, void *priv,
reg->val = ret;
} else {
__le16 val = 0;
- mutex_lock(&dev->lock);
ret = em28xx_read_reg_req_len(dev, USB_REQ_GET_STATUS,
reg->reg, (char *)&val, 2);
- mutex_unlock(&dev->lock);
if (ret < 0)
return ret;
@@ -1663,15 +1614,10 @@ static int vidioc_s_register(struct file *file, void *priv,
struct em28xx_fh *fh = priv;
struct em28xx *dev = fh->dev;
__le16 buf;
- int rc;
switch (reg->match.type) {
case V4L2_CHIP_MATCH_AC97:
- mutex_lock(&dev->lock);
- rc = em28xx_write_ac97(dev, reg->reg, reg->val);
- mutex_unlock(&dev->lock);
-
- return rc;
+ return em28xx_write_ac97(dev, reg->reg, reg->val);
case V4L2_CHIP_MATCH_I2C_DRIVER:
v4l2_device_call_all(&dev->v4l2_dev, 0, core, s_register, reg);
return 0;
@@ -1687,12 +1633,8 @@ static int vidioc_s_register(struct file *file, void *priv,
/* Match host */
buf = cpu_to_le16(reg->val);
- mutex_lock(&dev->lock);
- rc = em28xx_write_regs(dev, reg->reg, (char *)&buf,
+ return em28xx_write_regs(dev, reg->reg, (char *)&buf,
em28xx_reg_len(reg->reg));
- mutex_unlock(&dev->lock);
-
- return rc;
}
#endif
@@ -1829,16 +1771,12 @@ static int vidioc_g_fmt_sliced_vbi_cap(struct file *file, void *priv,
if (rc < 0)
return rc;
- mutex_lock(&dev->lock);
-
f->fmt.sliced.service_set = 0;
v4l2_device_call_all(&dev->v4l2_dev, 0, vbi, g_sliced_fmt, &f->fmt.sliced);
if (f->fmt.sliced.service_set == 0)
rc = -EINVAL;
- mutex_unlock(&dev->lock);
-
return rc;
}
@@ -1853,9 +1791,7 @@ static int vidioc_try_set_sliced_vbi_cap(struct file *file, void *priv,
if (rc < 0)
return rc;
- mutex_lock(&dev->lock);
v4l2_device_call_all(&dev->v4l2_dev, 0, vbi, g_sliced_fmt, &f->fmt.sliced);
- mutex_unlock(&dev->lock);
if (f->fmt.sliced.service_set == 0)
return -EINVAL;
@@ -2040,9 +1976,7 @@ static int radio_g_tuner(struct file *file, void *priv,
strcpy(t->name, "Radio");
t->type = V4L2_TUNER_RADIO;
- mutex_lock(&dev->lock);
v4l2_device_call_all(&dev->v4l2_dev, 0, tuner, g_tuner, t);
- mutex_unlock(&dev->lock);
return 0;
}
@@ -2075,9 +2009,7 @@ static int radio_s_tuner(struct file *file, void *priv,
if (0 != t->index)
return -EINVAL;
- mutex_lock(&dev->lock);
v4l2_device_call_all(&dev->v4l2_dev, 0, tuner, s_tuner, t);
- mutex_unlock(&dev->lock);
return 0;
}
@@ -2137,8 +2069,6 @@ static int em28xx_v4l2_open(struct file *filp)
break;
}
- mutex_lock(&dev->lock);
-
em28xx_videodbg("open dev=%s type=%s users=%d\n",
video_device_node_name(vdev), v4l2_type_names[fh_type],
dev->users);
@@ -2147,7 +2077,6 @@ static int em28xx_v4l2_open(struct file *filp)
fh = kzalloc(sizeof(struct em28xx_fh), GFP_KERNEL);
if (!fh) {
em28xx_errdev("em28xx-video.c: Out of memory?!\n");
- mutex_unlock(&dev->lock);
return -ENOMEM;
}
fh->dev = dev;
@@ -2181,15 +2110,13 @@ static int em28xx_v4l2_open(struct file *filp)
videobuf_queue_vmalloc_init(&fh->vb_vidq, &em28xx_video_qops,
NULL, &dev->slock,
V4L2_BUF_TYPE_VIDEO_CAPTURE, field,
- sizeof(struct em28xx_buffer), fh);
+ sizeof(struct em28xx_buffer), fh, &dev->lock);
videobuf_queue_vmalloc_init(&fh->vb_vbiq, &em28xx_vbi_qops,
NULL, &dev->slock,
V4L2_BUF_TYPE_VBI_CAPTURE,
V4L2_FIELD_SEQ_TB,
- sizeof(struct em28xx_buffer), fh);
-
- mutex_unlock(&dev->lock);
+ sizeof(struct em28xx_buffer), fh, &dev->lock);
return errCode;
}
@@ -2388,7 +2315,7 @@ static const struct v4l2_file_operations em28xx_v4l_fops = {
.read = em28xx_v4l2_read,
.poll = em28xx_v4l2_poll,
.mmap = em28xx_v4l2_mmap,
- .ioctl = video_ioctl2,
+ .unlocked_ioctl = video_ioctl2,
};
static const struct v4l2_ioctl_ops video_ioctl_ops = {
@@ -2496,6 +2423,7 @@ static struct video_device *em28xx_vdev_init(struct em28xx *dev,
vfd->v4l2_dev = &dev->v4l2_dev;
vfd->release = video_device_release;
vfd->debug = video_debug;
+ vfd->lock = &dev->lock;
snprintf(vfd->name, sizeof(vfd->name), "%s %s",
dev->name, type_name);
@@ -2516,6 +2444,7 @@ int em28xx_register_analog_devices(struct em28xx *dev)
/* set default norm */
dev->norm = em28xx_video_template.current_norm;
+ v4l2_device_call_all(&dev->v4l2_dev, 0, core, s_std, dev->norm);
dev->interlaced = EM28XX_INTERLACED_DEFAULT;
dev->ctl_input = 0;
diff --git a/drivers/media/video/em28xx/em28xx.h b/drivers/media/video/em28xx/em28xx.h
index 1c61a6b65d28..6a75e6a4fc21 100644
--- a/drivers/media/video/em28xx/em28xx.h
+++ b/drivers/media/video/em28xx/em28xx.h
@@ -25,12 +25,13 @@
#ifndef _EM28XX_H
#define _EM28XX_H
+#include <linux/workqueue.h>
+#include <linux/i2c.h>
+#include <linux/mutex.h>
#include <linux/videodev2.h>
+
#include <media/videobuf-vmalloc.h>
#include <media/v4l2-device.h>
-
-#include <linux/i2c.h>
-#include <linux/mutex.h>
#include <media/ir-kbd-i2c.h>
#include <media/ir-core.h>
#if defined(CONFIG_VIDEO_EM28XX_DVB) || defined(CONFIG_VIDEO_EM28XX_DVB_MODULE)
@@ -73,6 +74,7 @@
#define EM2820_BOARD_VIDEOLOGY_20K14XUSB 30
#define EM2821_BOARD_USBGEAR_VD204 31
#define EM2821_BOARD_SUPERCOMP_USB_2 32
+#define EM2860_BOARD_ELGATO_VIDEO_CAPTURE 33
#define EM2860_BOARD_TERRATEC_HYBRID_XS 34
#define EM2860_BOARD_TYPHOON_DVD_MAKER 35
#define EM2860_BOARD_NETGMBH_CAM 36
@@ -184,11 +186,6 @@ enum em28xx_mode {
EM28XX_DIGITAL_MODE,
};
-enum em28xx_stream_state {
- STREAM_OFF,
- STREAM_INTERRUPT,
- STREAM_ON,
-};
struct em28xx;
@@ -463,7 +460,6 @@ struct em28xx_audio {
struct snd_card *sndcard;
int users;
- enum em28xx_stream_state capture_stream;
spinlock_t slock;
};
@@ -505,6 +501,10 @@ struct em28xx {
unsigned int has_audio_class:1;
unsigned int has_alsa_audio:1;
+ /* Controls audio streaming */
+ struct work_struct wq_trigger; /* Trigger to start/stop audio for alsa module */
+ atomic_t stream_started; /* stream should be running if true */
+
struct em28xx_fmt *format;
struct em28xx_IR *ir;
diff --git a/drivers/media/video/fsl-viu.c b/drivers/media/video/fsl-viu.c
index 43d208f1f586..b8faff2dd711 100644
--- a/drivers/media/video/fsl-viu.c
+++ b/drivers/media/video/fsl-viu.c
@@ -22,6 +22,7 @@
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/of_platform.h>
+#include <linux/slab.h>
#include <linux/version.h>
#include <media/v4l2-common.h>
#include <media/v4l2-device.h>
@@ -425,7 +426,7 @@ static void free_buffer(struct videobuf_queue *vq, struct viu_buf *buf)
BUG_ON(in_interrupt());
- videobuf_waiton(&buf->vb, 0, 0);
+ videobuf_waiton(vq, &buf->vb, 0, 0);
if (vq->int_ops && vq->int_ops->vaddr)
vaddr = vq->int_ops->vaddr(vb);
@@ -1287,7 +1288,7 @@ static int viu_open(struct file *file)
videobuf_queue_dma_contig_init(&fh->vb_vidq, &viu_video_qops,
dev->dev, &fh->vbq_lock,
fh->type, V4L2_FIELD_INTERLACED,
- sizeof(struct viu_buf), fh);
+ sizeof(struct viu_buf), fh, NULL);
return 0;
}
@@ -1485,7 +1486,7 @@ static int __devinit viu_of_probe(struct platform_device *op,
ad = i2c_get_adapter(0);
viu_dev->decoder = v4l2_i2c_new_subdev(&viu_dev->v4l2_dev, ad,
- "saa7115", "saa7113", VIU_VIDEO_DECODER_ADDR, NULL);
+ "saa7113", VIU_VIDEO_DECODER_ADDR, NULL);
viu_dev->vidq.timeout.function = viu_vid_timeout;
viu_dev->vidq.timeout.data = (unsigned long)viu_dev;
diff --git a/drivers/media/video/gspca/Kconfig b/drivers/media/video/gspca/Kconfig
index 23db0c29f68c..dda56ff834f4 100644
--- a/drivers/media/video/gspca/Kconfig
+++ b/drivers/media/video/gspca/Kconfig
@@ -77,6 +77,15 @@ config USB_GSPCA_JEILINJ
To compile this driver as a module, choose M here: the
module will be called gspca_jeilinj.
+config USB_GSPCA_KONICA
+ tristate "Konica USB Camera V4L2 driver"
+ depends on VIDEO_V4L2 && USB_GSPCA
+ help
+ Say Y here if you want support for cameras based on the Konica chip.
+
+ To compile this driver as a module, choose M here: the
+ module will be called gspca_konica.
+
config USB_GSPCA_MARS
tristate "Mars USB Camera Driver"
depends on VIDEO_V4L2 && USB_GSPCA
@@ -337,6 +346,15 @@ config USB_GSPCA_VC032X
To compile this driver as a module, choose M here: the
module will be called gspca_vc032x.
+config USB_GSPCA_XIRLINK_CIT
+ tristate "Xirlink C-It USB Camera Driver"
+ depends on VIDEO_V4L2 && USB_GSPCA
+ help
+ Say Y here if you want support for Xirlink C-It bases cameras.
+
+ To compile this driver as a module, choose M here: the
+ module will be called gspca_xirlink_cit.
+
config USB_GSPCA_ZC3XX
tristate "ZC3XX USB Camera Driver"
depends on VIDEO_V4L2 && USB_GSPCA
diff --git a/drivers/media/video/gspca/Makefile b/drivers/media/video/gspca/Makefile
index f6616db0b7f8..24e695b8b077 100644
--- a/drivers/media/video/gspca/Makefile
+++ b/drivers/media/video/gspca/Makefile
@@ -5,6 +5,7 @@ obj-$(CONFIG_USB_GSPCA_CPIA1) += gspca_cpia1.o
obj-$(CONFIG_USB_GSPCA_ETOMS) += gspca_etoms.o
obj-$(CONFIG_USB_GSPCA_FINEPIX) += gspca_finepix.o
obj-$(CONFIG_USB_GSPCA_JEILINJ) += gspca_jeilinj.o
+obj-$(CONFIG_USB_GSPCA_KONICA) += gspca_konica.o
obj-$(CONFIG_USB_GSPCA_MARS) += gspca_mars.o
obj-$(CONFIG_USB_GSPCA_MR97310A) += gspca_mr97310a.o
obj-$(CONFIG_USB_GSPCA_OV519) += gspca_ov519.o
@@ -33,6 +34,7 @@ obj-$(CONFIG_USB_GSPCA_STV0680) += gspca_stv0680.o
obj-$(CONFIG_USB_GSPCA_T613) += gspca_t613.o
obj-$(CONFIG_USB_GSPCA_TV8532) += gspca_tv8532.o
obj-$(CONFIG_USB_GSPCA_VC032X) += gspca_vc032x.o
+obj-$(CONFIG_USB_GSPCA_XIRLINK_CIT) += gspca_xirlink_cit.o
obj-$(CONFIG_USB_GSPCA_ZC3XX) += gspca_zc3xx.o
gspca_main-objs := gspca.o
@@ -42,6 +44,7 @@ gspca_cpia1-objs := cpia1.o
gspca_etoms-objs := etoms.o
gspca_finepix-objs := finepix.o
gspca_jeilinj-objs := jeilinj.o
+gspca_konica-objs := konica.o
gspca_mars-objs := mars.o
gspca_mr97310a-objs := mr97310a.o
gspca_ov519-objs := ov519.o
@@ -70,6 +73,7 @@ gspca_sunplus-objs := sunplus.o
gspca_t613-objs := t613.o
gspca_tv8532-objs := tv8532.o
gspca_vc032x-objs := vc032x.o
+gspca_xirlink_cit-objs := xirlink_cit.o
gspca_zc3xx-objs := zc3xx.o
obj-$(CONFIG_USB_M5602) += m5602/
diff --git a/drivers/media/video/gspca/benq.c b/drivers/media/video/gspca/benq.c
index fce8d9492641..629043933501 100644
--- a/drivers/media/video/gspca/benq.c
+++ b/drivers/media/video/gspca/benq.c
@@ -62,7 +62,7 @@ static void reg_w(struct gspca_dev *gspca_dev,
0,
500);
if (ret < 0) {
- PDEBUG(D_ERR, "reg_w err %d", ret);
+ err("reg_w err %d", ret);
gspca_dev->usb_err = ret;
}
}
@@ -152,7 +152,8 @@ static void sd_stopN(struct gspca_dev *gspca_dev)
reg_w(gspca_dev, 0x003c, 0x0005);
reg_w(gspca_dev, 0x003c, 0x0006);
reg_w(gspca_dev, 0x003c, 0x0007);
- usb_set_interface(gspca_dev->dev, gspca_dev->iface, gspca_dev->nbalt - 1);
+ usb_set_interface(gspca_dev->dev, gspca_dev->iface,
+ gspca_dev->nbalt - 1);
}
static void sd_pkt_scan(struct gspca_dev *gspca_dev,
@@ -180,7 +181,7 @@ static void sd_isoc_irq(struct urb *urb)
if (gspca_dev->frozen)
return;
#endif
- PDEBUG(D_ERR|D_PACK, "urb status: %d", urb->status);
+ err("urb status: %d", urb->status);
return;
}
@@ -208,8 +209,7 @@ static void sd_isoc_irq(struct urb *urb)
if (st == 0)
st = urb->iso_frame_desc[i].status;
if (st) {
- PDEBUG(D_ERR,
- "ISOC data error: [%d] status=%d",
+ err("ISOC data error: [%d] status=%d",
i, st);
gspca_dev->last_packet_type = DISCARD_PACKET;
continue;
@@ -256,10 +256,10 @@ static void sd_isoc_irq(struct urb *urb)
/* resubmit the URBs */
st = usb_submit_urb(urb0, GFP_ATOMIC);
if (st < 0)
- PDEBUG(D_ERR|D_PACK, "usb_submit_urb(0) ret %d", st);
+ err("usb_submit_urb(0) ret %d", st);
st = usb_submit_urb(urb, GFP_ATOMIC);
if (st < 0)
- PDEBUG(D_ERR|D_PACK, "usb_submit_urb() ret %d", st);
+ err("usb_submit_urb() ret %d", st);
}
/* sub-driver description */
@@ -304,18 +304,11 @@ static struct usb_driver sd_driver = {
/* -- module insert / remove -- */
static int __init sd_mod_init(void)
{
- int ret;
-
- ret = usb_register(&sd_driver);
- if (ret < 0)
- return ret;
- info("registered");
- return 0;
+ return usb_register(&sd_driver);
}
static void __exit sd_mod_exit(void)
{
usb_deregister(&sd_driver);
- info("deregistered");
}
module_init(sd_mod_init);
diff --git a/drivers/media/video/gspca/conex.c b/drivers/media/video/gspca/conex.c
index d6a75772f3f8..1eacb6c7926d 100644
--- a/drivers/media/video/gspca/conex.c
+++ b/drivers/media/video/gspca/conex.c
@@ -687,7 +687,7 @@ static void cx11646_jpeg(struct gspca_dev*gspca_dev)
reg_w_val(gspca_dev, 0x00c0, 0x00);
reg_r(gspca_dev, 0x0001, 1);
length = 8;
- switch (gspca_dev->cam.cam_mode[(int) gspca_dev->curr_mode].priv) {
+ switch (gspca_dev->cam.cam_mode[gspca_dev->curr_mode].priv) {
case 0:
for (i = 0; i < 27; i++) {
if (i == 26)
@@ -901,7 +901,7 @@ static void sd_pkt_scan(struct gspca_dev *gspca_dev,
gspca_frame_add(gspca_dev, INTER_PACKET, data, len);
}
-static void setbrightness(struct gspca_dev*gspca_dev)
+static void setbrightness(struct gspca_dev *gspca_dev)
{
struct sd *sd = (struct sd *) gspca_dev;
__u8 regE5cbx[] = { 0x88, 0x00, 0xd4, 0x01, 0x88, 0x01, 0x01, 0x01 };
@@ -924,7 +924,7 @@ static void setbrightness(struct gspca_dev*gspca_dev)
reg_w_val(gspca_dev, 0x0070, reg70);
}
-static void setcontrast(struct gspca_dev*gspca_dev)
+static void setcontrast(struct gspca_dev *gspca_dev)
{
struct sd *sd = (struct sd *) gspca_dev;
__u8 regE5acx[] = { 0x88, 0x0a, 0x0c, 0x01 }; /* seem MSB */
@@ -1068,17 +1068,11 @@ static struct usb_driver sd_driver = {
/* -- module insert / remove -- */
static int __init sd_mod_init(void)
{
- int ret;
- ret = usb_register(&sd_driver);
- if (ret < 0)
- return ret;
- PDEBUG(D_PROBE, "registered");
- return 0;
+ return usb_register(&sd_driver);
}
static void __exit sd_mod_exit(void)
{
usb_deregister(&sd_driver);
- PDEBUG(D_PROBE, "deregistered");
}
module_init(sd_mod_init);
diff --git a/drivers/media/video/gspca/cpia1.c b/drivers/media/video/gspca/cpia1.c
index 3747a1dcff54..9b121681d135 100644
--- a/drivers/media/video/gspca/cpia1.c
+++ b/drivers/media/video/gspca/cpia1.c
@@ -1,7 +1,7 @@
/*
* cpia CPiA (1) gspca driver
*
- * Copyright (C) 2010 Hans de Goede <hdgoede@redhat.com>
+ * Copyright (C) 2010 Hans de Goede <hdegoede@redhat.com>
*
* This module is adapted from the in kernel v4l1 cpia driver which is :
*
@@ -30,7 +30,7 @@
#include "gspca.h"
-MODULE_AUTHOR("Hans de Goede <hdgoede@redhat.com>");
+MODULE_AUTHOR("Hans de Goede <hdegoede@redhat.com>");
MODULE_DESCRIPTION("Vision CPiA");
MODULE_LICENSE("GPL");
@@ -373,9 +373,14 @@ static int sd_setfreq(struct gspca_dev *gspca_dev, __s32 val);
static int sd_getfreq(struct gspca_dev *gspca_dev, __s32 *val);
static int sd_setcomptarget(struct gspca_dev *gspca_dev, __s32 val);
static int sd_getcomptarget(struct gspca_dev *gspca_dev, __s32 *val);
+static int sd_setilluminator1(struct gspca_dev *gspca_dev, __s32 val);
+static int sd_getilluminator1(struct gspca_dev *gspca_dev, __s32 *val);
+static int sd_setilluminator2(struct gspca_dev *gspca_dev, __s32 val);
+static int sd_getilluminator2(struct gspca_dev *gspca_dev, __s32 *val);
static const struct ctrl sd_ctrls[] = {
{
+#define BRIGHTNESS_IDX 0
{
.id = V4L2_CID_BRIGHTNESS,
.type = V4L2_CTRL_TYPE_INTEGER,
@@ -390,6 +395,7 @@ static const struct ctrl sd_ctrls[] = {
.set = sd_setbrightness,
.get = sd_getbrightness,
},
+#define CONTRAST_IDX 1
{
{
.id = V4L2_CID_CONTRAST,
@@ -404,6 +410,7 @@ static const struct ctrl sd_ctrls[] = {
.set = sd_setcontrast,
.get = sd_getcontrast,
},
+#define SATURATION_IDX 2
{
{
.id = V4L2_CID_SATURATION,
@@ -418,6 +425,7 @@ static const struct ctrl sd_ctrls[] = {
.set = sd_setsaturation,
.get = sd_getsaturation,
},
+#define POWER_LINE_FREQUENCY_IDX 3
{
{
.id = V4L2_CID_POWER_LINE_FREQUENCY,
@@ -432,6 +440,37 @@ static const struct ctrl sd_ctrls[] = {
.set = sd_setfreq,
.get = sd_getfreq,
},
+#define ILLUMINATORS_1_IDX 4
+ {
+ {
+ .id = V4L2_CID_ILLUMINATORS_1,
+ .type = V4L2_CTRL_TYPE_BOOLEAN,
+ .name = "Illuminator 1",
+ .minimum = 0,
+ .maximum = 1,
+ .step = 1,
+#define ILLUMINATORS_1_DEF 0
+ .default_value = ILLUMINATORS_1_DEF,
+ },
+ .set = sd_setilluminator1,
+ .get = sd_getilluminator1,
+ },
+#define ILLUMINATORS_2_IDX 5
+ {
+ {
+ .id = V4L2_CID_ILLUMINATORS_2,
+ .type = V4L2_CTRL_TYPE_BOOLEAN,
+ .name = "Illuminator 2",
+ .minimum = 0,
+ .maximum = 1,
+ .step = 1,
+#define ILLUMINATORS_2_DEF 0
+ .default_value = ILLUMINATORS_2_DEF,
+ },
+ .set = sd_setilluminator2,
+ .get = sd_getilluminator2,
+ },
+#define COMP_TARGET_IDX 6
{
{
#define V4L2_CID_COMP_TARGET V4L2_CID_PRIVATE_BASE
@@ -510,7 +549,7 @@ retry:
gspca_dev->usb_buf, databytes, 1000);
if (ret < 0)
- PDEBUG(D_ERR, "usb_control_msg %02x, error %d", command[1],
+ err("usb_control_msg %02x, error %d", command[1],
ret);
if (ret == -EPIPE && retries > 0) {
@@ -1059,7 +1098,6 @@ static int command_resume(struct gspca_dev *gspca_dev)
0, sd->params.streamStartLine, 0, 0);
}
-#if 0 /* Currently unused */
static int command_setlights(struct gspca_dev *gspca_dev)
{
struct sd *sd = (struct sd *) gspca_dev;
@@ -1079,7 +1117,6 @@ static int command_setlights(struct gspca_dev *gspca_dev)
return do_command(gspca_dev, CPIA_COMMAND_WriteMCPort, 2, 0,
p1 | p2 | 0xE0, 0);
}
-#endif
static int set_flicker(struct gspca_dev *gspca_dev, int on, int apply)
{
@@ -1236,7 +1273,7 @@ static void monitor_exposure(struct gspca_dev *gspca_dev)
cmd[7] = 0;
ret = cpia_usb_transferCmd(gspca_dev, cmd);
if (ret) {
- PDEBUG(D_ERR, "ReadVPRegs(30,4,9,8) - failed: %d", ret);
+ err("ReadVPRegs(30,4,9,8) - failed: %d", ret);
return;
}
exp_acc = gspca_dev->usb_buf[0];
@@ -1716,7 +1753,9 @@ static void sd_stopN(struct gspca_dev *gspca_dev)
/* this function is called at probe and resume time */
static int sd_init(struct gspca_dev *gspca_dev)
{
+#ifdef GSPCA_DEBUG
struct sd *sd = (struct sd *) gspca_dev;
+#endif
int ret;
/* Start / Stop the camera to make sure we are talking to
@@ -1726,6 +1765,14 @@ static int sd_init(struct gspca_dev *gspca_dev)
if (ret)
return ret;
+ /* Ensure the QX3 illuminators' states are restored upon resume,
+ or disable the illuminator controls, if this isn't a QX3 */
+ if (sd->params.qx3.qx3_detected)
+ command_setlights(gspca_dev);
+ else
+ gspca_dev->ctrl_dis |=
+ ((1 << ILLUMINATORS_1_IDX) | (1 << ILLUMINATORS_2_IDX));
+
sd_stopN(gspca_dev);
PDEBUG(D_PROBE, "CPIA Version: %d.%02d (%d.%d)",
@@ -1929,6 +1976,72 @@ static int sd_getcomptarget(struct gspca_dev *gspca_dev, __s32 *val)
return 0;
}
+static int sd_setilluminator(struct gspca_dev *gspca_dev, __s32 val, int n)
+{
+ struct sd *sd = (struct sd *) gspca_dev;
+ int ret;
+
+ if (!sd->params.qx3.qx3_detected)
+ return -EINVAL;
+
+ switch (n) {
+ case 1:
+ sd->params.qx3.bottomlight = val ? 1 : 0;
+ break;
+ case 2:
+ sd->params.qx3.toplight = val ? 1 : 0;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ ret = command_setlights(gspca_dev);
+ if (ret && ret != -EINVAL)
+ ret = -EBUSY;
+
+ return ret;
+}
+
+static int sd_setilluminator1(struct gspca_dev *gspca_dev, __s32 val)
+{
+ return sd_setilluminator(gspca_dev, val, 1);
+}
+
+static int sd_setilluminator2(struct gspca_dev *gspca_dev, __s32 val)
+{
+ return sd_setilluminator(gspca_dev, val, 2);
+}
+
+static int sd_getilluminator(struct gspca_dev *gspca_dev, __s32 *val, int n)
+{
+ struct sd *sd = (struct sd *) gspca_dev;
+
+ if (!sd->params.qx3.qx3_detected)
+ return -EINVAL;
+
+ switch (n) {
+ case 1:
+ *val = sd->params.qx3.bottomlight;
+ break;
+ case 2:
+ *val = sd->params.qx3.toplight;
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int sd_getilluminator1(struct gspca_dev *gspca_dev, __s32 *val)
+{
+ return sd_getilluminator(gspca_dev, val, 1);
+}
+
+static int sd_getilluminator2(struct gspca_dev *gspca_dev, __s32 *val)
+{
+ return sd_getilluminator(gspca_dev, val, 2);
+}
+
static int sd_querymenu(struct gspca_dev *gspca_dev,
struct v4l2_querymenu *menu)
{
@@ -2004,17 +2117,11 @@ static struct usb_driver sd_driver = {
/* -- module insert / remove -- */
static int __init sd_mod_init(void)
{
- int ret;
- ret = usb_register(&sd_driver);
- if (ret < 0)
- return ret;
- PDEBUG(D_PROBE, "registered");
- return 0;
+ return usb_register(&sd_driver);
}
static void __exit sd_mod_exit(void)
{
usb_deregister(&sd_driver);
- PDEBUG(D_PROBE, "deregistered");
}
module_init(sd_mod_init);
diff --git a/drivers/media/video/gspca/etoms.c b/drivers/media/video/gspca/etoms.c
index ecd4d743d2bc..a594b36d6199 100644
--- a/drivers/media/video/gspca/etoms.c
+++ b/drivers/media/video/gspca/etoms.c
@@ -710,9 +710,9 @@ static void Et_setgainG(struct gspca_dev *gspca_dev, __u8 gain)
}
#define BLIMIT(bright) \
- (__u8)((bright > 0x1f)?0x1f:((bright < 4)?3:bright))
+ (u8)((bright > 0x1f) ? 0x1f : ((bright < 4) ? 3 : bright))
#define LIMIT(color) \
- (unsigned char)((color > 0xff)?0xff:((color < 0)?0:color))
+ (u8)((color > 0xff) ? 0xff : ((color < 0) ? 0 : color))
static void do_autogain(struct gspca_dev *gspca_dev)
{
@@ -896,18 +896,12 @@ static struct usb_driver sd_driver = {
/* -- module insert / remove -- */
static int __init sd_mod_init(void)
{
- int ret;
- ret = usb_register(&sd_driver);
- if (ret < 0)
- return ret;
- PDEBUG(D_PROBE, "registered");
- return 0;
+ return usb_register(&sd_driver);
}
static void __exit sd_mod_exit(void)
{
usb_deregister(&sd_driver);
- PDEBUG(D_PROBE, "deregistered");
}
module_init(sd_mod_init);
diff --git a/drivers/media/video/gspca/finepix.c b/drivers/media/video/gspca/finepix.c
index 5d90e7448579..d78226455d1f 100644
--- a/drivers/media/video/gspca/finepix.c
+++ b/drivers/media/video/gspca/finepix.c
@@ -182,7 +182,7 @@ static int sd_start(struct gspca_dev *gspca_dev)
/* Init the device */
ret = command(gspca_dev, 0);
if (ret < 0) {
- PDEBUG(D_STREAM, "init failed %d", ret);
+ err("init failed %d", ret);
return ret;
}
@@ -194,14 +194,14 @@ static int sd_start(struct gspca_dev *gspca_dev)
FPIX_MAX_TRANSFER, &len,
FPIX_TIMEOUT);
if (ret < 0) {
- PDEBUG(D_STREAM, "usb_bulk_msg failed %d", ret);
+ err("usb_bulk_msg failed %d", ret);
return ret;
}
/* Request a frame, but don't read it */
ret = command(gspca_dev, 1);
if (ret < 0) {
- PDEBUG(D_STREAM, "frame request failed %d", ret);
+ err("frame request failed %d", ret);
return ret;
}
@@ -291,19 +291,12 @@ static struct usb_driver sd_driver = {
/* -- module insert / remove -- */
static int __init sd_mod_init(void)
{
- int ret;
-
- ret = usb_register(&sd_driver);
- if (ret < 0)
- return ret;
- PDEBUG(D_PROBE, "registered");
- return 0;
+ return usb_register(&sd_driver);
}
static void __exit sd_mod_exit(void)
{
usb_deregister(&sd_driver);
- PDEBUG(D_PROBE, "deregistered");
}
module_init(sd_mod_init);
diff --git a/drivers/media/video/gspca/gl860/gl860-mi2020.c b/drivers/media/video/gspca/gl860/gl860-mi2020.c
index 57782e011c9e..2edda6b7d653 100644
--- a/drivers/media/video/gspca/gl860/gl860-mi2020.c
+++ b/drivers/media/video/gspca/gl860/gl860-mi2020.c
@@ -69,15 +69,15 @@ static u8 dat_multi5[] = { 0x8c, 0xa1, 0x03 };
static u8 dat_multi6[] = { 0x90, 0x00, 0x05 };
static struct validx tbl_init_at_startup[] = {
- {0x0000, 0x0000}, {0x0010, 0x0010}, {0x0008, 0x00c0}, {0x0001,0x00c1},
+ {0x0000, 0x0000}, {0x0010, 0x0010}, {0x0008, 0x00c0}, {0x0001, 0x00c1},
{0x0001, 0x00c2}, {0x0020, 0x0006}, {0x006a, 0x000d},
{53, 0xffff},
{0x0040, 0x0000}, {0x0063, 0x0006},
};
static struct validx tbl_common_0B[] = {
- {0x0002, 0x0004}, {0x006a, 0x0007}, {0x00ef, 0x0006}, {0x006a,0x000d},
- {0x0000, 0x00c0}, {0x0010, 0x0010}, {0x0003, 0x00c1}, {0x0042,0x00c2},
+ {0x0002, 0x0004}, {0x006a, 0x0007}, {0x00ef, 0x0006}, {0x006a, 0x000d},
+ {0x0000, 0x00c0}, {0x0010, 0x0010}, {0x0003, 0x00c1}, {0x0042, 0x00c2},
{0x0004, 0x00d8}, {0x0000, 0x0058}, {0x0041, 0x0000},
};
diff --git a/drivers/media/video/gspca/gl860/gl860.c b/drivers/media/video/gspca/gl860/gl860.c
index e86eb8b4aedc..b05bec7321b5 100644
--- a/drivers/media/video/gspca/gl860/gl860.c
+++ b/drivers/media/video/gspca/gl860/gl860.c
@@ -540,15 +540,12 @@ static int __init sd_mod_init(void)
if (usb_register(&sd_driver) < 0)
return -1;
- PDEBUG(D_PROBE, "driver registered");
-
return 0;
}
static void __exit sd_mod_exit(void)
{
usb_deregister(&sd_driver);
- PDEBUG(D_PROBE, "driver deregistered");
}
module_init(sd_mod_init);
@@ -588,8 +585,7 @@ int gl860_RTx(struct gspca_dev *gspca_dev,
}
if (r < 0)
- PDEBUG(D_ERR,
- "ctrl transfer failed %4d "
+ err("ctrl transfer failed %4d "
"[p%02x r%d v%04x i%04x len%d]",
r, pref, req, val, index, len);
else if (len > 1 && r < len)
diff --git a/drivers/media/video/gspca/gspca.c b/drivers/media/video/gspca/gspca.c
index 78abc1c1f9d5..8fe8fb486d62 100644
--- a/drivers/media/video/gspca/gspca.c
+++ b/drivers/media/video/gspca/gspca.c
@@ -148,7 +148,7 @@ static void int_irq(struct urb *urb)
if (ret == 0) {
ret = usb_submit_urb(urb, GFP_ATOMIC);
if (ret < 0)
- PDEBUG(D_ERR, "Resubmit URB failed with error %i", ret);
+ err("Resubmit URB failed with error %i", ret);
}
}
@@ -177,8 +177,8 @@ static int gspca_input_connect(struct gspca_dev *dev)
err = input_register_device(input_dev);
if (err) {
- PDEBUG(D_ERR, "Input device registration failed "
- "with error %i", err);
+ err("Input device registration failed with error %i",
+ err);
input_dev->dev.parent = NULL;
input_free_device(input_dev);
} else {
@@ -328,8 +328,7 @@ static void fill_frame(struct gspca_dev *gspca_dev,
}
st = urb->iso_frame_desc[i].status;
if (st) {
- PDEBUG(D_ERR,
- "ISOC data error: [%d] len=%d, status=%d",
+ err("ISOC data error: [%d] len=%d, status=%d",
i, len, st);
gspca_dev->last_packet_type = DISCARD_PACKET;
continue;
@@ -347,7 +346,7 @@ resubmit:
/* resubmit the URB */
st = usb_submit_urb(urb, GFP_ATOMIC);
if (st < 0)
- PDEBUG(D_ERR|D_PACK, "usb_submit_urb() ret %d", st);
+ err("usb_submit_urb() ret %d", st);
}
/*
@@ -401,7 +400,7 @@ resubmit:
if (gspca_dev->cam.bulk_nurbs != 0) {
st = usb_submit_urb(urb, GFP_ATOMIC);
if (st < 0)
- PDEBUG(D_ERR|D_PACK, "usb_submit_urb() ret %d", st);
+ err("usb_submit_urb() ret %d", st);
}
}
@@ -433,12 +432,13 @@ void gspca_frame_add(struct gspca_dev *gspca_dev,
/* if there are no queued buffer, discard the whole frame */
if (i == atomic_read(&gspca_dev->fr_q)) {
gspca_dev->last_packet_type = DISCARD_PACKET;
+ gspca_dev->sequence++;
return;
}
j = gspca_dev->fr_queue[i];
frame = &gspca_dev->frame[j];
frame->v4l2_buf.timestamp = ktime_to_timeval(ktime_get());
- frame->v4l2_buf.sequence = ++gspca_dev->sequence;
+ frame->v4l2_buf.sequence = gspca_dev->sequence++;
gspca_dev->image = frame->data;
gspca_dev->image_len = 0;
} else {
@@ -590,7 +590,7 @@ static int gspca_set_alt0(struct gspca_dev *gspca_dev)
return 0;
ret = usb_set_interface(gspca_dev->dev, gspca_dev->iface, 0);
if (ret < 0)
- PDEBUG(D_ERR|D_STREAM, "set alt 0 err %d", ret);
+ err("set alt 0 err %d", ret);
return ret;
}
@@ -652,7 +652,7 @@ static struct usb_host_endpoint *get_ep(struct gspca_dev *gspca_dev)
: USB_ENDPOINT_XFER_ISOC;
i = gspca_dev->alt; /* previous alt setting */
if (gspca_dev->cam.reverse_alts) {
- if (gspca_dev->audio)
+ if (gspca_dev->audio && i < gspca_dev->nbalt - 2)
i++;
while (++i < gspca_dev->nbalt) {
ep = alt_xfer(&intf->altsetting[i], xfer);
@@ -660,7 +660,7 @@ static struct usb_host_endpoint *get_ep(struct gspca_dev *gspca_dev)
break;
}
} else {
- if (gspca_dev->audio)
+ if (gspca_dev->audio && i > 1)
i--;
while (--i >= 0) {
ep = alt_xfer(&intf->altsetting[i], xfer);
@@ -850,8 +850,7 @@ static int gspca_init_transfer(struct gspca_dev *gspca_dev)
break;
gspca_stream_off(gspca_dev);
if (ret != -ENOSPC) {
- PDEBUG(D_ERR|D_STREAM,
- "usb_submit_urb alt %d err %d",
+ err("usb_submit_urb alt %d err %d",
gspca_dev->alt, ret);
goto out;
}
@@ -880,6 +879,7 @@ out:
static void gspca_set_default_mode(struct gspca_dev *gspca_dev)
{
+ struct gspca_ctrl *ctrl;
int i;
i = gspca_dev->cam.nmodes - 1; /* take the highest mode */
@@ -887,6 +887,16 @@ static void gspca_set_default_mode(struct gspca_dev *gspca_dev)
gspca_dev->width = gspca_dev->cam.cam_mode[i].width;
gspca_dev->height = gspca_dev->cam.cam_mode[i].height;
gspca_dev->pixfmt = gspca_dev->cam.cam_mode[i].pixelformat;
+
+ /* set the current control values to their default values
+ * which may have changed in sd_init() */
+ ctrl = gspca_dev->cam.ctrls;
+ if (ctrl != NULL) {
+ for (i = 0;
+ i < gspca_dev->sd_desc->nctrls;
+ i++, ctrl++)
+ ctrl->val = ctrl->def;
+ }
}
static int wxh_to_mode(struct gspca_dev *gspca_dev,
@@ -1310,7 +1320,7 @@ out:
return ret;
}
-static const struct ctrl *get_ctrl(struct gspca_dev *gspca_dev,
+static int get_ctrl(struct gspca_dev *gspca_dev,
int id)
{
const struct ctrl *ctrls;
@@ -1322,9 +1332,9 @@ static const struct ctrl *get_ctrl(struct gspca_dev *gspca_dev,
if (gspca_dev->ctrl_dis & (1 << i))
continue;
if (id == ctrls->qctrl.id)
- return ctrls;
+ return i;
}
- return NULL;
+ return -1;
}
static int vidioc_queryctrl(struct file *file, void *priv,
@@ -1332,34 +1342,40 @@ static int vidioc_queryctrl(struct file *file, void *priv,
{
struct gspca_dev *gspca_dev = priv;
const struct ctrl *ctrls;
- int i;
+ struct gspca_ctrl *gspca_ctrl;
+ int i, idx;
u32 id;
- ctrls = NULL;
id = q_ctrl->id;
if (id & V4L2_CTRL_FLAG_NEXT_CTRL) {
id &= V4L2_CTRL_ID_MASK;
id++;
+ idx = -1;
for (i = 0; i < gspca_dev->sd_desc->nctrls; i++) {
if (gspca_dev->ctrl_dis & (1 << i))
continue;
if (gspca_dev->sd_desc->ctrls[i].qctrl.id < id)
continue;
- if (ctrls && gspca_dev->sd_desc->ctrls[i].qctrl.id
- > ctrls->qctrl.id)
+ if (idx >= 0
+ && gspca_dev->sd_desc->ctrls[i].qctrl.id
+ > gspca_dev->sd_desc->ctrls[idx].qctrl.id)
continue;
- ctrls = &gspca_dev->sd_desc->ctrls[i];
+ idx = i;
}
- if (ctrls == NULL)
- return -EINVAL;
} else {
- ctrls = get_ctrl(gspca_dev, id);
- if (ctrls == NULL)
- return -EINVAL;
- i = ctrls - gspca_dev->sd_desc->ctrls;
+ idx = get_ctrl(gspca_dev, id);
}
- memcpy(q_ctrl, ctrls, sizeof *q_ctrl);
- if (gspca_dev->ctrl_inac & (1 << i))
+ if (idx < 0)
+ return -EINVAL;
+ ctrls = &gspca_dev->sd_desc->ctrls[idx];
+ memcpy(q_ctrl, &ctrls->qctrl, sizeof *q_ctrl);
+ if (gspca_dev->cam.ctrls != NULL) {
+ gspca_ctrl = &gspca_dev->cam.ctrls[idx];
+ q_ctrl->default_value = gspca_ctrl->def;
+ q_ctrl->minimum = gspca_ctrl->min;
+ q_ctrl->maximum = gspca_ctrl->max;
+ }
+ if (gspca_dev->ctrl_inac & (1 << idx))
q_ctrl->flags |= V4L2_CTRL_FLAG_INACTIVE;
return 0;
}
@@ -1369,23 +1385,46 @@ static int vidioc_s_ctrl(struct file *file, void *priv,
{
struct gspca_dev *gspca_dev = priv;
const struct ctrl *ctrls;
- int ret;
+ struct gspca_ctrl *gspca_ctrl;
+ int idx, ret;
- ctrls = get_ctrl(gspca_dev, ctrl->id);
- if (ctrls == NULL)
+ idx = get_ctrl(gspca_dev, ctrl->id);
+ if (idx < 0)
return -EINVAL;
-
- if (ctrl->value < ctrls->qctrl.minimum
- || ctrl->value > ctrls->qctrl.maximum)
- return -ERANGE;
+ if (gspca_dev->ctrl_inac & (1 << idx))
+ return -EINVAL;
+ ctrls = &gspca_dev->sd_desc->ctrls[idx];
+ if (gspca_dev->cam.ctrls != NULL) {
+ gspca_ctrl = &gspca_dev->cam.ctrls[idx];
+ if (ctrl->value < gspca_ctrl->min
+ || ctrl->value > gspca_ctrl->max)
+ return -ERANGE;
+ } else {
+ gspca_ctrl = NULL;
+ if (ctrl->value < ctrls->qctrl.minimum
+ || ctrl->value > ctrls->qctrl.maximum)
+ return -ERANGE;
+ }
PDEBUG(D_CONF, "set ctrl [%08x] = %d", ctrl->id, ctrl->value);
if (mutex_lock_interruptible(&gspca_dev->usb_lock))
return -ERESTARTSYS;
+ if (!gspca_dev->present) {
+ ret = -ENODEV;
+ goto out;
+ }
gspca_dev->usb_err = 0;
- if (gspca_dev->present)
+ if (ctrls->set != NULL) {
ret = ctrls->set(gspca_dev, ctrl->value);
- else
- ret = -ENODEV;
+ goto out;
+ }
+ if (gspca_ctrl != NULL) {
+ gspca_ctrl->val = ctrl->value;
+ if (ctrls->set_control != NULL
+ && gspca_dev->streaming)
+ ctrls->set_control(gspca_dev);
+ }
+ ret = gspca_dev->usb_err;
+out:
mutex_unlock(&gspca_dev->usb_lock);
return ret;
}
@@ -1395,19 +1434,28 @@ static int vidioc_g_ctrl(struct file *file, void *priv,
{
struct gspca_dev *gspca_dev = priv;
const struct ctrl *ctrls;
- int ret;
+ int idx, ret;
- ctrls = get_ctrl(gspca_dev, ctrl->id);
- if (ctrls == NULL)
+ idx = get_ctrl(gspca_dev, ctrl->id);
+ if (idx < 0)
return -EINVAL;
+ ctrls = &gspca_dev->sd_desc->ctrls[idx];
if (mutex_lock_interruptible(&gspca_dev->usb_lock))
return -ERESTARTSYS;
+ if (!gspca_dev->present) {
+ ret = -ENODEV;
+ goto out;
+ }
gspca_dev->usb_err = 0;
- if (gspca_dev->present)
+ if (ctrls->get != NULL) {
ret = ctrls->get(gspca_dev, &ctrl->value);
- else
- ret = -ENODEV;
+ goto out;
+ }
+ if (gspca_dev->cam.ctrls != NULL)
+ ctrl->value = gspca_dev->cam.ctrls[idx].val;
+ ret = 0;
+out:
mutex_unlock(&gspca_dev->usb_lock);
return ret;
}
@@ -2127,6 +2175,22 @@ static struct video_device gspca_template = {
.release = gspca_release,
};
+/* initialize the controls */
+static void ctrls_init(struct gspca_dev *gspca_dev)
+{
+ struct gspca_ctrl *ctrl;
+ int i;
+
+ for (i = 0, ctrl = gspca_dev->cam.ctrls;
+ i < gspca_dev->sd_desc->nctrls;
+ i++, ctrl++) {
+ ctrl->def = gspca_dev->sd_desc->ctrls[i].qctrl.default_value;
+ ctrl->val = ctrl->def;
+ ctrl->min = gspca_dev->sd_desc->ctrls[i].qctrl.minimum;
+ ctrl->max = gspca_dev->sd_desc->ctrls[i].qctrl.maximum;
+ }
+}
+
/*
* probe and create a new gspca device
*
@@ -2188,6 +2252,8 @@ int gspca_dev_probe2(struct usb_interface *intf,
ret = sd_desc->config(gspca_dev, id);
if (ret < 0)
goto out;
+ if (gspca_dev->cam.ctrls != NULL)
+ ctrls_init(gspca_dev);
ret = sd_desc->init(gspca_dev);
if (ret < 0)
goto out;
@@ -2243,7 +2309,7 @@ int gspca_dev_probe(struct usb_interface *intf,
/* we don't handle multi-config cameras */
if (dev->descriptor.bNumConfigurations != 1) {
- PDEBUG(D_ERR, "%04x:%04x too many config",
+ err("%04x:%04x too many config",
id->idVendor, id->idProduct);
return -ENODEV;
}
@@ -2428,7 +2494,7 @@ EXPORT_SYMBOL(gspca_auto_gain_n_exposure);
/* -- module insert / remove -- */
static int __init gspca_init(void)
{
- info("main v%d.%d.%d registered",
+ info("v%d.%d.%d registered",
(DRIVER_VERSION_NUMBER >> 16) & 0xff,
(DRIVER_VERSION_NUMBER >> 8) & 0xff,
DRIVER_VERSION_NUMBER & 0xff);
@@ -2436,7 +2502,6 @@ static int __init gspca_init(void)
}
static void __exit gspca_exit(void)
{
- info("main deregistered");
}
module_init(gspca_init);
diff --git a/drivers/media/video/gspca/gspca.h b/drivers/media/video/gspca/gspca.h
index b749c36d9f7e..d4d210b56b49 100644
--- a/drivers/media/video/gspca/gspca.h
+++ b/drivers/media/video/gspca/gspca.h
@@ -52,11 +52,20 @@ struct framerates {
int nrates;
};
+/* control definition */
+struct gspca_ctrl {
+ s16 val; /* current value */
+ s16 def; /* default value */
+ s16 min, max; /* minimum and maximum values */
+};
+
/* device information - set at probe time */
struct cam {
const struct v4l2_pix_format *cam_mode; /* size nmodes */
const struct framerates *mode_framerates; /* must have size nmode,
* just like cam_mode */
+ struct gspca_ctrl *ctrls; /* control table - size nctrls */
+ /* may be NULL */
u32 bulk_size; /* buffer size when image transfer by bulk */
u32 input_flags; /* value for ENUM_INPUT status flags */
u8 nmodes; /* size of cam_mode */
@@ -99,6 +108,7 @@ struct ctrl {
struct v4l2_queryctrl qctrl;
int (*set)(struct gspca_dev *, __s32);
int (*get)(struct gspca_dev *, __s32 *);
+ cam_v_op set_control;
};
/* subdriver description */
@@ -106,7 +116,7 @@ struct sd_desc {
/* information */
const char *name; /* sub-driver name */
/* controls */
- const struct ctrl *ctrls;
+ const struct ctrl *ctrls; /* static control definition */
int nctrls;
/* mandatory operations */
cam_cf_op config; /* called on probe */
diff --git a/drivers/media/video/gspca/jeilinj.c b/drivers/media/video/gspca/jeilinj.c
index 12d9cf4caba2..a35e87bb0388 100644
--- a/drivers/media/video/gspca/jeilinj.c
+++ b/drivers/media/video/gspca/jeilinj.c
@@ -82,7 +82,7 @@ static int jlj_write2(struct gspca_dev *gspca_dev, unsigned char *command)
usb_sndbulkpipe(gspca_dev->dev, 3),
gspca_dev->usb_buf, 2, NULL, 500);
if (retval < 0)
- PDEBUG(D_ERR, "command write [%02x] error %d",
+ err("command write [%02x] error %d",
gspca_dev->usb_buf[0], retval);
return retval;
}
@@ -97,7 +97,7 @@ static int jlj_read1(struct gspca_dev *gspca_dev, unsigned char response)
gspca_dev->usb_buf, 1, NULL, 500);
response = gspca_dev->usb_buf[0];
if (retval < 0)
- PDEBUG(D_ERR, "read command [%02x] error %d",
+ err("read command [%02x] error %d",
gspca_dev->usb_buf[0], retval);
return retval;
}
@@ -191,7 +191,7 @@ static void jlj_dostream(struct work_struct *work)
buffer = kmalloc(JEILINJ_MAX_TRANSFER, GFP_KERNEL | GFP_DMA);
if (!buffer) {
- PDEBUG(D_ERR, "Couldn't allocate USB buffer");
+ err("Couldn't allocate USB buffer");
goto quit_stream;
}
while (gspca_dev->present && gspca_dev->streaming) {
@@ -354,19 +354,12 @@ static struct usb_driver sd_driver = {
/* -- module insert / remove -- */
static int __init sd_mod_init(void)
{
- int ret;
-
- ret = usb_register(&sd_driver);
- if (ret < 0)
- return ret;
- PDEBUG(D_PROBE, "registered");
- return 0;
+ return usb_register(&sd_driver);
}
static void __exit sd_mod_exit(void)
{
usb_deregister(&sd_driver);
- PDEBUG(D_PROBE, "deregistered");
}
module_init(sd_mod_init);
diff --git a/drivers/media/video/gspca/konica.c b/drivers/media/video/gspca/konica.c
new file mode 100644
index 000000000000..d2ce65dcbfdc
--- /dev/null
+++ b/drivers/media/video/gspca/konica.c
@@ -0,0 +1,646 @@
+/*
+ * Driver for USB webcams based on Konica chipset. This
+ * chipset is used in Intel YC76 camera.
+ *
+ * Copyright (C) 2010 Hans de Goede <hdegoede@redhat.com>
+ *
+ * Based on the usbvideo v4l1 konicawc driver which is:
+ *
+ * Copyright (C) 2002 Simon Evans <spse@secret.org.uk>
+ *
+ * The code for making gspca work with a webcam with 2 isoc endpoints was
+ * taken from the benq gspca subdriver which is:
+ *
+ * Copyright (C) 2009 Jean-Francois Moine (http://moinejf.free.fr)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#define MODULE_NAME "konica"
+
+#include <linux/input.h>
+#include "gspca.h"
+
+MODULE_AUTHOR("Hans de Goede <hdegoede@redhat.com>");
+MODULE_DESCRIPTION("Konica chipset USB Camera Driver");
+MODULE_LICENSE("GPL");
+
+#define WHITEBAL_REG 0x01
+#define BRIGHTNESS_REG 0x02
+#define SHARPNESS_REG 0x03
+#define CONTRAST_REG 0x04
+#define SATURATION_REG 0x05
+
+/* specific webcam descriptor */
+struct sd {
+ struct gspca_dev gspca_dev; /* !! must be the first item */
+ struct urb *last_data_urb;
+ u8 snapshot_pressed;
+ u8 brightness;
+ u8 contrast;
+ u8 saturation;
+ u8 whitebal;
+ u8 sharpness;
+};
+
+/* V4L2 controls supported by the driver */
+static int sd_setbrightness(struct gspca_dev *gspca_dev, __s32 val);
+static int sd_getbrightness(struct gspca_dev *gspca_dev, __s32 *val);
+static int sd_setcontrast(struct gspca_dev *gspca_dev, __s32 val);
+static int sd_getcontrast(struct gspca_dev *gspca_dev, __s32 *val);
+static int sd_setsaturation(struct gspca_dev *gspca_dev, __s32 val);
+static int sd_getsaturation(struct gspca_dev *gspca_dev, __s32 *val);
+static int sd_setwhitebal(struct gspca_dev *gspca_dev, __s32 val);
+static int sd_getwhitebal(struct gspca_dev *gspca_dev, __s32 *val);
+static int sd_setsharpness(struct gspca_dev *gspca_dev, __s32 val);
+static int sd_getsharpness(struct gspca_dev *gspca_dev, __s32 *val);
+
+static const struct ctrl sd_ctrls[] = {
+#define SD_BRIGHTNESS 0
+ {
+ {
+ .id = V4L2_CID_BRIGHTNESS,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "Brightness",
+ .minimum = 0,
+ .maximum = 9,
+ .step = 1,
+#define BRIGHTNESS_DEFAULT 4
+ .default_value = BRIGHTNESS_DEFAULT,
+ .flags = 0,
+ },
+ .set = sd_setbrightness,
+ .get = sd_getbrightness,
+ },
+#define SD_CONTRAST 1
+ {
+ {
+ .id = V4L2_CID_CONTRAST,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "Contrast",
+ .minimum = 0,
+ .maximum = 9,
+ .step = 4,
+#define CONTRAST_DEFAULT 10
+ .default_value = CONTRAST_DEFAULT,
+ .flags = 0,
+ },
+ .set = sd_setcontrast,
+ .get = sd_getcontrast,
+ },
+#define SD_SATURATION 2
+ {
+ {
+ .id = V4L2_CID_SATURATION,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "Saturation",
+ .minimum = 0,
+ .maximum = 9,
+ .step = 1,
+#define SATURATION_DEFAULT 4
+ .default_value = SATURATION_DEFAULT,
+ .flags = 0,
+ },
+ .set = sd_setsaturation,
+ .get = sd_getsaturation,
+ },
+#define SD_WHITEBAL 3
+ {
+ {
+ .id = V4L2_CID_WHITE_BALANCE_TEMPERATURE,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "White Balance",
+ .minimum = 0,
+ .maximum = 33,
+ .step = 1,
+#define WHITEBAL_DEFAULT 25
+ .default_value = WHITEBAL_DEFAULT,
+ .flags = 0,
+ },
+ .set = sd_setwhitebal,
+ .get = sd_getwhitebal,
+ },
+#define SD_SHARPNESS 4
+ {
+ {
+ .id = V4L2_CID_SHARPNESS,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "Sharpness",
+ .minimum = 0,
+ .maximum = 9,
+ .step = 1,
+#define SHARPNESS_DEFAULT 4
+ .default_value = SHARPNESS_DEFAULT,
+ .flags = 0,
+ },
+ .set = sd_setsharpness,
+ .get = sd_getsharpness,
+ },
+};
+
+/* .priv is what goes to register 8 for this mode, known working values:
+ 0x00 -> 176x144, cropped
+ 0x01 -> 176x144, cropped
+ 0x02 -> 176x144, cropped
+ 0x03 -> 176x144, cropped
+ 0x04 -> 176x144, binned
+ 0x05 -> 320x240
+ 0x06 -> 320x240
+ 0x07 -> 160x120, cropped
+ 0x08 -> 160x120, cropped
+ 0x09 -> 160x120, binned (note has 136 lines)
+ 0x0a -> 160x120, binned (note has 136 lines)
+ 0x0b -> 160x120, cropped
+*/
+static const struct v4l2_pix_format vga_mode[] = {
+ {160, 120, V4L2_PIX_FMT_KONICA420, V4L2_FIELD_NONE,
+ .bytesperline = 160,
+ .sizeimage = 160 * 136 * 3 / 2 + 960,
+ .colorspace = V4L2_COLORSPACE_SRGB,
+ .priv = 0x0a},
+ {176, 144, V4L2_PIX_FMT_KONICA420, V4L2_FIELD_NONE,
+ .bytesperline = 176,
+ .sizeimage = 176 * 144 * 3 / 2 + 960,
+ .colorspace = V4L2_COLORSPACE_SRGB,
+ .priv = 0x04},
+ {320, 240, V4L2_PIX_FMT_KONICA420, V4L2_FIELD_NONE,
+ .bytesperline = 320,
+ .sizeimage = 320 * 240 * 3 / 2 + 960,
+ .colorspace = V4L2_COLORSPACE_SRGB,
+ .priv = 0x05},
+};
+
+static void sd_isoc_irq(struct urb *urb);
+
+static void reg_w(struct gspca_dev *gspca_dev, u16 value, u16 index)
+{
+ struct usb_device *dev = gspca_dev->dev;
+ int ret;
+
+ if (gspca_dev->usb_err < 0)
+ return;
+ ret = usb_control_msg(dev, usb_sndctrlpipe(dev, 0),
+ 0x02,
+ USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
+ value,
+ index,
+ NULL,
+ 0,
+ 1000);
+ if (ret < 0) {
+ err("reg_w err %d", ret);
+ gspca_dev->usb_err = ret;
+ }
+}
+
+static void reg_r(struct gspca_dev *gspca_dev, u16 value, u16 index)
+{
+ struct usb_device *dev = gspca_dev->dev;
+ int ret;
+
+ if (gspca_dev->usb_err < 0)
+ return;
+ ret = usb_control_msg(dev, usb_rcvctrlpipe(dev, 0),
+ 0x03,
+ USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
+ value,
+ index,
+ gspca_dev->usb_buf,
+ 2,
+ 1000);
+ if (ret < 0) {
+ err("reg_w err %d", ret);
+ gspca_dev->usb_err = ret;
+ }
+}
+
+static void konica_stream_on(struct gspca_dev *gspca_dev)
+{
+ reg_w(gspca_dev, 1, 0x0b);
+}
+
+static void konica_stream_off(struct gspca_dev *gspca_dev)
+{
+ reg_w(gspca_dev, 0, 0x0b);
+}
+
+/* this function is called at probe time */
+static int sd_config(struct gspca_dev *gspca_dev,
+ const struct usb_device_id *id)
+{
+ struct sd *sd = (struct sd *) gspca_dev;
+
+ gspca_dev->cam.cam_mode = vga_mode;
+ gspca_dev->cam.nmodes = ARRAY_SIZE(vga_mode);
+ gspca_dev->cam.no_urb_create = 1;
+ /* The highest alt setting has an isoc packetsize of 0, so we
+ don't want to use it */
+ gspca_dev->nbalt--;
+
+ sd->brightness = BRIGHTNESS_DEFAULT;
+ sd->contrast = CONTRAST_DEFAULT;
+ sd->saturation = SATURATION_DEFAULT;
+ sd->whitebal = WHITEBAL_DEFAULT;
+ sd->sharpness = SHARPNESS_DEFAULT;
+
+ return 0;
+}
+
+/* this function is called at probe and resume time */
+static int sd_init(struct gspca_dev *gspca_dev)
+{
+ /* HDG not sure if these 2 reads are needed */
+ reg_r(gspca_dev, 0, 0x10);
+ PDEBUG(D_PROBE, "Reg 0x10 reads: %02x %02x",
+ gspca_dev->usb_buf[0], gspca_dev->usb_buf[1]);
+ reg_r(gspca_dev, 0, 0x10);
+ PDEBUG(D_PROBE, "Reg 0x10 reads: %02x %02x",
+ gspca_dev->usb_buf[0], gspca_dev->usb_buf[1]);
+ reg_w(gspca_dev, 0, 0x0d);
+
+ return 0;
+}
+
+static int sd_start(struct gspca_dev *gspca_dev)
+{
+ struct sd *sd = (struct sd *) gspca_dev;
+ struct urb *urb;
+ int i, n, packet_size;
+ struct usb_host_interface *alt;
+ struct usb_interface *intf;
+
+ intf = usb_ifnum_to_if(sd->gspca_dev.dev, sd->gspca_dev.iface);
+ alt = usb_altnum_to_altsetting(intf, sd->gspca_dev.alt);
+ if (!alt) {
+ err("Couldn't get altsetting");
+ return -EIO;
+ }
+
+ packet_size = le16_to_cpu(alt->endpoint[0].desc.wMaxPacketSize);
+
+ reg_w(gspca_dev, sd->brightness, BRIGHTNESS_REG);
+ reg_w(gspca_dev, sd->whitebal, WHITEBAL_REG);
+ reg_w(gspca_dev, sd->contrast, CONTRAST_REG);
+ reg_w(gspca_dev, sd->saturation, SATURATION_REG);
+ reg_w(gspca_dev, sd->sharpness, SHARPNESS_REG);
+
+ n = gspca_dev->cam.cam_mode[gspca_dev->curr_mode].priv;
+ reg_w(gspca_dev, n, 0x08);
+
+ konica_stream_on(gspca_dev);
+
+ if (gspca_dev->usb_err)
+ return gspca_dev->usb_err;
+
+ /* create 4 URBs - 2 on endpoint 0x83 and 2 on 0x082 */
+#if MAX_NURBS < 4
+#error "Not enough URBs in the gspca table"
+#endif
+#define SD_NPKT 32
+ for (n = 0; n < 4; n++) {
+ i = n & 1 ? 0 : 1;
+ packet_size =
+ le16_to_cpu(alt->endpoint[i].desc.wMaxPacketSize);
+ urb = usb_alloc_urb(SD_NPKT, GFP_KERNEL);
+ if (!urb) {
+ err("usb_alloc_urb failed");
+ return -ENOMEM;
+ }
+ gspca_dev->urb[n] = urb;
+ urb->transfer_buffer = usb_alloc_coherent(gspca_dev->dev,
+ packet_size * SD_NPKT,
+ GFP_KERNEL,
+ &urb->transfer_dma);
+ if (urb->transfer_buffer == NULL) {
+ err("usb_buffer_alloc failed");
+ return -ENOMEM;
+ }
+
+ urb->dev = gspca_dev->dev;
+ urb->context = gspca_dev;
+ urb->transfer_buffer_length = packet_size * SD_NPKT;
+ urb->pipe = usb_rcvisocpipe(gspca_dev->dev,
+ n & 1 ? 0x81 : 0x82);
+ urb->transfer_flags = URB_ISO_ASAP
+ | URB_NO_TRANSFER_DMA_MAP;
+ urb->interval = 1;
+ urb->complete = sd_isoc_irq;
+ urb->number_of_packets = SD_NPKT;
+ for (i = 0; i < SD_NPKT; i++) {
+ urb->iso_frame_desc[i].length = packet_size;
+ urb->iso_frame_desc[i].offset = packet_size * i;
+ }
+ }
+
+ return 0;
+}
+
+static void sd_stopN(struct gspca_dev *gspca_dev)
+{
+ struct sd *sd = (struct sd *) gspca_dev;
+
+ konica_stream_off(gspca_dev);
+#if defined(CONFIG_INPUT) || defined(CONFIG_INPUT_MODULE)
+ /* Don't keep the button in the pressed state "forever" if it was
+ pressed when streaming is stopped */
+ if (sd->snapshot_pressed) {
+ input_report_key(gspca_dev->input_dev, KEY_CAMERA, 0);
+ input_sync(gspca_dev->input_dev);
+ sd->snapshot_pressed = 0;
+ }
+#endif
+}
+
+/* reception of an URB */
+static void sd_isoc_irq(struct urb *urb)
+{
+ struct gspca_dev *gspca_dev = (struct gspca_dev *) urb->context;
+ struct sd *sd = (struct sd *) gspca_dev;
+ struct urb *data_urb, *status_urb;
+ u8 *data;
+ int i, st;
+
+ PDEBUG(D_PACK, "sd isoc irq");
+ if (!gspca_dev->streaming)
+ return;
+
+ if (urb->status != 0) {
+ if (urb->status == -ESHUTDOWN)
+ return; /* disconnection */
+#ifdef CONFIG_PM
+ if (gspca_dev->frozen)
+ return;
+#endif
+ PDEBUG(D_ERR, "urb status: %d", urb->status);
+ st = usb_submit_urb(urb, GFP_ATOMIC);
+ if (st < 0)
+ err("resubmit urb error %d", st);
+ return;
+ }
+
+ /* if this is a data URB (ep 0x82), wait */
+ if (urb->transfer_buffer_length > 32) {
+ sd->last_data_urb = urb;
+ return;
+ }
+
+ status_urb = urb;
+ data_urb = sd->last_data_urb;
+ sd->last_data_urb = NULL;
+
+ if (!data_urb || data_urb->start_frame != status_urb->start_frame) {
+ PDEBUG(D_ERR|D_PACK, "lost sync on frames");
+ goto resubmit;
+ }
+
+ if (data_urb->number_of_packets != status_urb->number_of_packets) {
+ PDEBUG(D_ERR|D_PACK,
+ "no packets does not match, data: %d, status: %d",
+ data_urb->number_of_packets,
+ status_urb->number_of_packets);
+ goto resubmit;
+ }
+
+ for (i = 0; i < status_urb->number_of_packets; i++) {
+ if (data_urb->iso_frame_desc[i].status ||
+ status_urb->iso_frame_desc[i].status) {
+ PDEBUG(D_ERR|D_PACK,
+ "pkt %d data-status %d, status-status %d", i,
+ data_urb->iso_frame_desc[i].status,
+ status_urb->iso_frame_desc[i].status);
+ gspca_dev->last_packet_type = DISCARD_PACKET;
+ continue;
+ }
+
+ if (status_urb->iso_frame_desc[i].actual_length != 1) {
+ PDEBUG(D_ERR|D_PACK,
+ "bad status packet length %d",
+ status_urb->iso_frame_desc[i].actual_length);
+ gspca_dev->last_packet_type = DISCARD_PACKET;
+ continue;
+ }
+
+ st = *((u8 *)status_urb->transfer_buffer
+ + status_urb->iso_frame_desc[i].offset);
+
+ data = (u8 *)data_urb->transfer_buffer
+ + data_urb->iso_frame_desc[i].offset;
+
+ /* st: 0x80-0xff: frame start with frame number (ie 0-7f)
+ * otherwise:
+ * bit 0 0: keep packet
+ * 1: drop packet (padding data)
+ *
+ * bit 4 0 button not clicked
+ * 1 button clicked
+ * button is used to `take a picture' (in software)
+ */
+ if (st & 0x80) {
+ gspca_frame_add(gspca_dev, LAST_PACKET, NULL, 0);
+ gspca_frame_add(gspca_dev, FIRST_PACKET, NULL, 0);
+ } else {
+#if defined(CONFIG_INPUT) || defined(CONFIG_INPUT_MODULE)
+ u8 button_state = st & 0x40 ? 1 : 0;
+ if (sd->snapshot_pressed != button_state) {
+ input_report_key(gspca_dev->input_dev,
+ KEY_CAMERA,
+ button_state);
+ input_sync(gspca_dev->input_dev);
+ sd->snapshot_pressed = button_state;
+ }
+#endif
+ if (st & 0x01)
+ continue;
+ }
+ gspca_frame_add(gspca_dev, INTER_PACKET, data,
+ data_urb->iso_frame_desc[i].actual_length);
+ }
+
+resubmit:
+ if (data_urb) {
+ st = usb_submit_urb(data_urb, GFP_ATOMIC);
+ if (st < 0)
+ PDEBUG(D_ERR|D_PACK,
+ "usb_submit_urb(data_urb) ret %d", st);
+ }
+ st = usb_submit_urb(status_urb, GFP_ATOMIC);
+ if (st < 0)
+ err("usb_submit_urb(status_urb) ret %d", st);
+}
+
+static int sd_setbrightness(struct gspca_dev *gspca_dev, __s32 val)
+{
+ struct sd *sd = (struct sd *) gspca_dev;
+
+ sd->brightness = val;
+ if (gspca_dev->streaming) {
+ konica_stream_off(gspca_dev);
+ reg_w(gspca_dev, sd->brightness, BRIGHTNESS_REG);
+ konica_stream_on(gspca_dev);
+ }
+
+ return 0;
+}
+
+static int sd_getbrightness(struct gspca_dev *gspca_dev, __s32 *val)
+{
+ struct sd *sd = (struct sd *) gspca_dev;
+
+ *val = sd->brightness;
+
+ return 0;
+}
+
+static int sd_setcontrast(struct gspca_dev *gspca_dev, __s32 val)
+{
+ struct sd *sd = (struct sd *) gspca_dev;
+
+ sd->contrast = val;
+ if (gspca_dev->streaming) {
+ konica_stream_off(gspca_dev);
+ reg_w(gspca_dev, sd->contrast, CONTRAST_REG);
+ konica_stream_on(gspca_dev);
+ }
+
+ return 0;
+}
+
+static int sd_getcontrast(struct gspca_dev *gspca_dev, __s32 *val)
+{
+ struct sd *sd = (struct sd *) gspca_dev;
+
+ *val = sd->contrast;
+
+ return 0;
+}
+
+static int sd_setsaturation(struct gspca_dev *gspca_dev, __s32 val)
+{
+ struct sd *sd = (struct sd *) gspca_dev;
+
+ sd->saturation = val;
+ if (gspca_dev->streaming) {
+ konica_stream_off(gspca_dev);
+ reg_w(gspca_dev, sd->saturation, SATURATION_REG);
+ konica_stream_on(gspca_dev);
+ }
+ return 0;
+}
+
+static int sd_getsaturation(struct gspca_dev *gspca_dev, __s32 *val)
+{
+ struct sd *sd = (struct sd *) gspca_dev;
+
+ *val = sd->saturation;
+
+ return 0;
+}
+
+static int sd_setwhitebal(struct gspca_dev *gspca_dev, __s32 val)
+{
+ struct sd *sd = (struct sd *) gspca_dev;
+
+ sd->whitebal = val;
+ if (gspca_dev->streaming) {
+ konica_stream_off(gspca_dev);
+ reg_w(gspca_dev, sd->whitebal, WHITEBAL_REG);
+ konica_stream_on(gspca_dev);
+ }
+ return 0;
+}
+
+static int sd_getwhitebal(struct gspca_dev *gspca_dev, __s32 *val)
+{
+ struct sd *sd = (struct sd *) gspca_dev;
+
+ *val = sd->whitebal;
+
+ return 0;
+}
+
+static int sd_setsharpness(struct gspca_dev *gspca_dev, __s32 val)
+{
+ struct sd *sd = (struct sd *) gspca_dev;
+
+ sd->sharpness = val;
+ if (gspca_dev->streaming) {
+ konica_stream_off(gspca_dev);
+ reg_w(gspca_dev, sd->sharpness, SHARPNESS_REG);
+ konica_stream_on(gspca_dev);
+ }
+ return 0;
+}
+
+static int sd_getsharpness(struct gspca_dev *gspca_dev, __s32 *val)
+{
+ struct sd *sd = (struct sd *) gspca_dev;
+
+ *val = sd->sharpness;
+
+ return 0;
+}
+
+/* sub-driver description */
+static const struct sd_desc sd_desc = {
+ .name = MODULE_NAME,
+ .ctrls = sd_ctrls,
+ .nctrls = ARRAY_SIZE(sd_ctrls),
+ .config = sd_config,
+ .init = sd_init,
+ .start = sd_start,
+ .stopN = sd_stopN,
+#if defined(CONFIG_INPUT) || defined(CONFIG_INPUT_MODULE)
+ .other_input = 1,
+#endif
+};
+
+/* -- module initialisation -- */
+static const __devinitdata struct usb_device_id device_table[] = {
+ {USB_DEVICE(0x04c8, 0x0720)}, /* Intel YC 76 */
+ {}
+};
+MODULE_DEVICE_TABLE(usb, device_table);
+
+/* -- device connect -- */
+static int sd_probe(struct usb_interface *intf,
+ const struct usb_device_id *id)
+{
+ return gspca_dev_probe(intf, id, &sd_desc, sizeof(struct sd),
+ THIS_MODULE);
+}
+
+static struct usb_driver sd_driver = {
+ .name = MODULE_NAME,
+ .id_table = device_table,
+ .probe = sd_probe,
+ .disconnect = gspca_disconnect,
+#ifdef CONFIG_PM
+ .suspend = gspca_suspend,
+ .resume = gspca_resume,
+#endif
+};
+
+/* -- module insert / remove -- */
+static int __init sd_mod_init(void)
+{
+ return usb_register(&sd_driver);
+}
+static void __exit sd_mod_exit(void)
+{
+ usb_deregister(&sd_driver);
+}
+
+module_init(sd_mod_init);
+module_exit(sd_mod_exit);
diff --git a/drivers/media/video/gspca/m5602/m5602_core.c b/drivers/media/video/gspca/m5602/m5602_core.c
index b073d66acd04..c872b93a3351 100644
--- a/drivers/media/video/gspca/m5602/m5602_core.c
+++ b/drivers/media/video/gspca/m5602/m5602_core.c
@@ -406,18 +406,12 @@ static struct usb_driver sd_driver = {
/* -- module insert / remove -- */
static int __init mod_m5602_init(void)
{
- int ret;
- ret = usb_register(&sd_driver);
- if (ret < 0)
- return ret;
- PDEBUG(D_PROBE, "registered");
- return 0;
+ return usb_register(&sd_driver);
}
static void __exit mod_m5602_exit(void)
{
usb_deregister(&sd_driver);
- PDEBUG(D_PROBE, "deregistered");
}
module_init(mod_m5602_init);
diff --git a/drivers/media/video/gspca/m5602/m5602_mt9m111.c b/drivers/media/video/gspca/m5602/m5602_mt9m111.c
index c0722fa64606..0d605a52b924 100644
--- a/drivers/media/video/gspca/m5602/m5602_mt9m111.c
+++ b/drivers/media/video/gspca/m5602/m5602_mt9m111.c
@@ -109,14 +109,14 @@ static const struct ctrl mt9m111_ctrls[] = {
#define GREEN_BALANCE_IDX 4
{
{
- .id = M5602_V4L2_CID_GREEN_BALANCE,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "green balance",
- .minimum = 0x00,
- .maximum = 0x7ff,
- .step = 0x1,
- .default_value = MT9M111_GREEN_GAIN_DEFAULT,
- .flags = V4L2_CTRL_FLAG_SLIDER
+ .id = M5602_V4L2_CID_GREEN_BALANCE,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "green balance",
+ .minimum = 0x00,
+ .maximum = 0x7ff,
+ .step = 0x1,
+ .default_value = MT9M111_GREEN_GAIN_DEFAULT,
+ .flags = V4L2_CTRL_FLAG_SLIDER
},
.set = mt9m111_set_green_balance,
.get = mt9m111_get_green_balance
@@ -124,14 +124,14 @@ static const struct ctrl mt9m111_ctrls[] = {
#define BLUE_BALANCE_IDX 5
{
{
- .id = V4L2_CID_BLUE_BALANCE,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "blue balance",
- .minimum = 0x00,
- .maximum = 0x7ff,
- .step = 0x1,
- .default_value = MT9M111_BLUE_GAIN_DEFAULT,
- .flags = V4L2_CTRL_FLAG_SLIDER
+ .id = V4L2_CID_BLUE_BALANCE,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "blue balance",
+ .minimum = 0x00,
+ .maximum = 0x7ff,
+ .step = 0x1,
+ .default_value = MT9M111_BLUE_GAIN_DEFAULT,
+ .flags = V4L2_CTRL_FLAG_SLIDER
},
.set = mt9m111_set_blue_balance,
.get = mt9m111_get_blue_balance
@@ -139,14 +139,14 @@ static const struct ctrl mt9m111_ctrls[] = {
#define RED_BALANCE_IDX 5
{
{
- .id = V4L2_CID_RED_BALANCE,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "red balance",
- .minimum = 0x00,
- .maximum = 0x7ff,
- .step = 0x1,
- .default_value = MT9M111_RED_GAIN_DEFAULT,
- .flags = V4L2_CTRL_FLAG_SLIDER
+ .id = V4L2_CID_RED_BALANCE,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "red balance",
+ .minimum = 0x00,
+ .maximum = 0x7ff,
+ .step = 0x1,
+ .default_value = MT9M111_RED_GAIN_DEFAULT,
+ .flags = V4L2_CTRL_FLAG_SLIDER
},
.set = mt9m111_set_red_balance,
.get = mt9m111_get_red_balance
diff --git a/drivers/media/video/gspca/m5602/m5602_mt9m111.h b/drivers/media/video/gspca/m5602/m5602_mt9m111.h
index b3de77823091..b1f0c492036a 100644
--- a/drivers/media/video/gspca/m5602/m5602_mt9m111.h
+++ b/drivers/media/video/gspca/m5602/m5602_mt9m111.h
@@ -70,7 +70,7 @@
#define MT9M111_COLORPIPE 0x01
#define MT9M111_CAMERA_CONTROL 0x02
-#define MT9M111_RESET (1 << 0)
+#define MT9M111_RESET (1 << 0)
#define MT9M111_RESTART (1 << 1)
#define MT9M111_ANALOG_STANDBY (1 << 2)
#define MT9M111_CHIP_ENABLE (1 << 3)
@@ -97,7 +97,7 @@
#define MT9M111_2D_DEFECT_CORRECTION_ENABLE (1 << 0)
#define INITIAL_MAX_GAIN 64
-#define MT9M111_DEFAULT_GAIN 283
+#define MT9M111_DEFAULT_GAIN 283
#define MT9M111_GREEN_GAIN_DEFAULT 0x20
#define MT9M111_BLUE_GAIN_DEFAULT 0x20
#define MT9M111_RED_GAIN_DEFAULT 0x20
@@ -125,8 +125,7 @@ static const struct m5602_sensor mt9m111 = {
.start = mt9m111_start,
};
-static const unsigned char preinit_mt9m111[][4] =
-{
+static const unsigned char preinit_mt9m111[][4] = {
{BRIDGE, M5602_XB_MCU_CLK_DIV, 0x02, 0x00},
{BRIDGE, M5602_XB_MCU_CLK_CTRL, 0xb0, 0x00},
{BRIDGE, M5602_XB_SEN_CLK_DIV, 0x00, 0x00},
@@ -165,8 +164,7 @@ static const unsigned char preinit_mt9m111[][4] =
{BRIDGE, M5602_XB_I2C_CLK_DIV, 0x0a, 0x00}
};
-static const unsigned char init_mt9m111[][4] =
-{
+static const unsigned char init_mt9m111[][4] = {
{BRIDGE, M5602_XB_MCU_CLK_DIV, 0x02, 0x00},
{BRIDGE, M5602_XB_MCU_CLK_CTRL, 0xb0, 0x00},
{BRIDGE, M5602_XB_SEN_CLK_DIV, 0x00, 0x00},
@@ -257,8 +255,7 @@ static const unsigned char init_mt9m111[][4] =
{SENSOR, MT9M111_SC_SHUTTER_WIDTH, 0x01, 0x90},
};
-static const unsigned char start_mt9m111[][4] =
-{
+static const unsigned char start_mt9m111[][4] = {
{BRIDGE, M5602_XB_SEN_CLK_DIV, 0x06, 0x00},
{BRIDGE, M5602_XB_SEN_CLK_CTRL, 0xb0, 0x00},
{BRIDGE, M5602_XB_ADC_CTRL, 0xc0, 0x00},
@@ -271,5 +268,4 @@ static const unsigned char start_mt9m111[][4] =
{BRIDGE, M5602_XB_VSYNC_PARA, 0x00, 0x00},
{BRIDGE, M5602_XB_VSYNC_PARA, 0x00, 0x00},
};
-
#endif
diff --git a/drivers/media/video/gspca/m5602/m5602_ov7660.c b/drivers/media/video/gspca/m5602/m5602_ov7660.c
index 62c1cbf06666..b12f60464b3b 100644
--- a/drivers/media/video/gspca/m5602/m5602_ov7660.c
+++ b/drivers/media/video/gspca/m5602/m5602_ov7660.c
@@ -54,13 +54,13 @@ static const struct ctrl ov7660_ctrls[] = {
#define AUTO_WHITE_BALANCE_IDX 4
{
{
- .id = V4L2_CID_AUTO_WHITE_BALANCE,
- .type = V4L2_CTRL_TYPE_BOOLEAN,
- .name = "auto white balance",
- .minimum = 0,
- .maximum = 1,
- .step = 1,
- .default_value = 1
+ .id = V4L2_CID_AUTO_WHITE_BALANCE,
+ .type = V4L2_CTRL_TYPE_BOOLEAN,
+ .name = "auto white balance",
+ .minimum = 0,
+ .maximum = 1,
+ .step = 1,
+ .default_value = 1
},
.set = ov7660_set_auto_white_balance,
.get = ov7660_get_auto_white_balance
@@ -68,13 +68,13 @@ static const struct ctrl ov7660_ctrls[] = {
#define AUTO_GAIN_CTRL_IDX 5
{
{
- .id = V4L2_CID_AUTOGAIN,
- .type = V4L2_CTRL_TYPE_BOOLEAN,
- .name = "auto gain control",
- .minimum = 0,
- .maximum = 1,
- .step = 1,
- .default_value = 1
+ .id = V4L2_CID_AUTOGAIN,
+ .type = V4L2_CTRL_TYPE_BOOLEAN,
+ .name = "auto gain control",
+ .minimum = 0,
+ .maximum = 1,
+ .step = 1,
+ .default_value = 1
},
.set = ov7660_set_auto_gain,
.get = ov7660_get_auto_gain
@@ -82,13 +82,13 @@ static const struct ctrl ov7660_ctrls[] = {
#define AUTO_EXPOSURE_IDX 6
{
{
- .id = V4L2_CID_EXPOSURE_AUTO,
- .type = V4L2_CTRL_TYPE_BOOLEAN,
- .name = "auto exposure",
- .minimum = 0,
- .maximum = 1,
- .step = 1,
- .default_value = 1
+ .id = V4L2_CID_EXPOSURE_AUTO,
+ .type = V4L2_CTRL_TYPE_BOOLEAN,
+ .name = "auto exposure",
+ .minimum = 0,
+ .maximum = 1,
+ .step = 1,
+ .default_value = 1
},
.set = ov7660_set_auto_exposure,
.get = ov7660_get_auto_exposure
@@ -96,13 +96,13 @@ static const struct ctrl ov7660_ctrls[] = {
#define HFLIP_IDX 7
{
{
- .id = V4L2_CID_HFLIP,
- .type = V4L2_CTRL_TYPE_BOOLEAN,
- .name = "horizontal flip",
- .minimum = 0,
- .maximum = 1,
- .step = 1,
- .default_value = 0
+ .id = V4L2_CID_HFLIP,
+ .type = V4L2_CTRL_TYPE_BOOLEAN,
+ .name = "horizontal flip",
+ .minimum = 0,
+ .maximum = 1,
+ .step = 1,
+ .default_value = 0
},
.set = ov7660_set_hflip,
.get = ov7660_get_hflip
@@ -110,13 +110,13 @@ static const struct ctrl ov7660_ctrls[] = {
#define VFLIP_IDX 8
{
{
- .id = V4L2_CID_VFLIP,
- .type = V4L2_CTRL_TYPE_BOOLEAN,
- .name = "vertical flip",
- .minimum = 0,
- .maximum = 1,
- .step = 1,
- .default_value = 0
+ .id = V4L2_CID_VFLIP,
+ .type = V4L2_CTRL_TYPE_BOOLEAN,
+ .name = "vertical flip",
+ .minimum = 0,
+ .maximum = 1,
+ .step = 1,
+ .default_value = 0
},
.set = ov7660_set_vflip,
.get = ov7660_get_vflip
diff --git a/drivers/media/video/gspca/m5602/m5602_ov7660.h b/drivers/media/video/gspca/m5602/m5602_ov7660.h
index 4d9dcf29da2e..2efd607987ec 100644
--- a/drivers/media/video/gspca/m5602/m5602_ov7660.h
+++ b/drivers/media/video/gspca/m5602/m5602_ov7660.h
@@ -80,7 +80,7 @@
#define OV7660_DEFAULT_GAIN 0x0e
#define OV7660_DEFAULT_RED_GAIN 0x80
-#define OV7660_DEFAULT_BLUE_GAIN 0x80
+#define OV7660_DEFAULT_BLUE_GAIN 0x80
#define OV7660_DEFAULT_SATURATION 0x00
#define OV7660_DEFAULT_EXPOSURE 0x20
@@ -105,8 +105,7 @@ static const struct m5602_sensor ov7660 = {
.disconnect = ov7660_disconnect,
};
-static const unsigned char preinit_ov7660[][4] =
-{
+static const unsigned char preinit_ov7660[][4] = {
{BRIDGE, M5602_XB_MCU_CLK_DIV, 0x02},
{BRIDGE, M5602_XB_MCU_CLK_CTRL, 0xb0},
{BRIDGE, M5602_XB_SEN_CLK_DIV, 0x00},
@@ -140,8 +139,7 @@ static const unsigned char preinit_ov7660[][4] =
{BRIDGE, M5602_XB_GPIO_EN_L, 0x00}
};
-static const unsigned char init_ov7660[][4] =
-{
+static const unsigned char init_ov7660[][4] = {
{BRIDGE, M5602_XB_MCU_CLK_DIV, 0x02},
{BRIDGE, M5602_XB_MCU_CLK_CTRL, 0xb0},
{BRIDGE, M5602_XB_SEN_CLK_DIV, 0x00},
@@ -259,5 +257,4 @@ static const unsigned char init_ov7660[][4] =
{BRIDGE, M5602_XB_SEN_CLK_DIV, 0x00},
{BRIDGE, M5602_XB_SEN_CLK_CTRL, 0xb0},
};
-
#endif
diff --git a/drivers/media/video/gspca/m5602/m5602_ov9650.c b/drivers/media/video/gspca/m5602/m5602_ov9650.c
index 069ba0044f8b..8ded8b100576 100644
--- a/drivers/media/video/gspca/m5602/m5602_ov9650.c
+++ b/drivers/media/video/gspca/m5602/m5602_ov9650.c
@@ -121,8 +121,8 @@ static const struct ctrl ov9650_ctrls[] = {
.minimum = 0x00,
.maximum = 0x1ff,
.step = 0x4,
- .default_value = EXPOSURE_DEFAULT,
- .flags = V4L2_CTRL_FLAG_SLIDER
+ .default_value = EXPOSURE_DEFAULT,
+ .flags = V4L2_CTRL_FLAG_SLIDER
},
.set = ov9650_set_exposure,
.get = ov9650_get_exposure
@@ -146,13 +146,13 @@ static const struct ctrl ov9650_ctrls[] = {
{
{
.id = V4L2_CID_RED_BALANCE,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "red balance",
- .minimum = 0x00,
- .maximum = 0xff,
- .step = 0x1,
- .default_value = RED_GAIN_DEFAULT,
- .flags = V4L2_CTRL_FLAG_SLIDER
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "red balance",
+ .minimum = 0x00,
+ .maximum = 0xff,
+ .step = 0x1,
+ .default_value = RED_GAIN_DEFAULT,
+ .flags = V4L2_CTRL_FLAG_SLIDER
},
.set = ov9650_set_red_balance,
.get = ov9650_get_red_balance
@@ -161,13 +161,13 @@ static const struct ctrl ov9650_ctrls[] = {
{
{
.id = V4L2_CID_BLUE_BALANCE,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "blue balance",
- .minimum = 0x00,
- .maximum = 0xff,
- .step = 0x1,
- .default_value = BLUE_GAIN_DEFAULT,
- .flags = V4L2_CTRL_FLAG_SLIDER
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "blue balance",
+ .minimum = 0x00,
+ .maximum = 0xff,
+ .step = 0x1,
+ .default_value = BLUE_GAIN_DEFAULT,
+ .flags = V4L2_CTRL_FLAG_SLIDER
},
.set = ov9650_set_blue_balance,
.get = ov9650_get_blue_balance
@@ -175,13 +175,13 @@ static const struct ctrl ov9650_ctrls[] = {
#define HFLIP_IDX 4
{
{
- .id = V4L2_CID_HFLIP,
- .type = V4L2_CTRL_TYPE_BOOLEAN,
- .name = "horizontal flip",
- .minimum = 0,
- .maximum = 1,
- .step = 1,
- .default_value = 0
+ .id = V4L2_CID_HFLIP,
+ .type = V4L2_CTRL_TYPE_BOOLEAN,
+ .name = "horizontal flip",
+ .minimum = 0,
+ .maximum = 1,
+ .step = 1,
+ .default_value = 0
},
.set = ov9650_set_hflip,
.get = ov9650_get_hflip
@@ -189,13 +189,13 @@ static const struct ctrl ov9650_ctrls[] = {
#define VFLIP_IDX 5
{
{
- .id = V4L2_CID_VFLIP,
- .type = V4L2_CTRL_TYPE_BOOLEAN,
- .name = "vertical flip",
- .minimum = 0,
- .maximum = 1,
- .step = 1,
- .default_value = 0
+ .id = V4L2_CID_VFLIP,
+ .type = V4L2_CTRL_TYPE_BOOLEAN,
+ .name = "vertical flip",
+ .minimum = 0,
+ .maximum = 1,
+ .step = 1,
+ .default_value = 0
},
.set = ov9650_set_vflip,
.get = ov9650_get_vflip
@@ -203,13 +203,13 @@ static const struct ctrl ov9650_ctrls[] = {
#define AUTO_WHITE_BALANCE_IDX 6
{
{
- .id = V4L2_CID_AUTO_WHITE_BALANCE,
- .type = V4L2_CTRL_TYPE_BOOLEAN,
- .name = "auto white balance",
- .minimum = 0,
- .maximum = 1,
- .step = 1,
- .default_value = 1
+ .id = V4L2_CID_AUTO_WHITE_BALANCE,
+ .type = V4L2_CTRL_TYPE_BOOLEAN,
+ .name = "auto white balance",
+ .minimum = 0,
+ .maximum = 1,
+ .step = 1,
+ .default_value = 1
},
.set = ov9650_set_auto_white_balance,
.get = ov9650_get_auto_white_balance
@@ -217,13 +217,13 @@ static const struct ctrl ov9650_ctrls[] = {
#define AUTO_GAIN_CTRL_IDX 7
{
{
- .id = V4L2_CID_AUTOGAIN,
- .type = V4L2_CTRL_TYPE_BOOLEAN,
- .name = "auto gain control",
- .minimum = 0,
- .maximum = 1,
- .step = 1,
- .default_value = 1
+ .id = V4L2_CID_AUTOGAIN,
+ .type = V4L2_CTRL_TYPE_BOOLEAN,
+ .name = "auto gain control",
+ .minimum = 0,
+ .maximum = 1,
+ .step = 1,
+ .default_value = 1
},
.set = ov9650_set_auto_gain,
.get = ov9650_get_auto_gain
@@ -231,13 +231,13 @@ static const struct ctrl ov9650_ctrls[] = {
#define AUTO_EXPOSURE_IDX 8
{
{
- .id = V4L2_CID_EXPOSURE_AUTO,
- .type = V4L2_CTRL_TYPE_BOOLEAN,
- .name = "auto exposure",
- .minimum = 0,
- .maximum = 1,
- .step = 1,
- .default_value = 1
+ .id = V4L2_CID_EXPOSURE_AUTO,
+ .type = V4L2_CTRL_TYPE_BOOLEAN,
+ .name = "auto exposure",
+ .minimum = 0,
+ .maximum = 1,
+ .step = 1,
+ .default_value = 1
},
.set = ov9650_set_auto_exposure,
.get = ov9650_get_auto_exposure
diff --git a/drivers/media/video/gspca/m5602/m5602_ov9650.h b/drivers/media/video/gspca/m5602/m5602_ov9650.h
index c98c40d69e05..da9a129b739d 100644
--- a/drivers/media/video/gspca/m5602/m5602_ov9650.h
+++ b/drivers/media/video/gspca/m5602/m5602_ov9650.h
@@ -110,7 +110,7 @@
#define OV9650_VARIOPIXEL (1 << 2)
#define OV9650_SYSTEM_CLK_SEL (1 << 7)
-#define OV9650_SLAM_MODE (1 << 4)
+#define OV9650_SLAM_MODE (1 << 4)
#define OV9650_QVGA_VARIOPIXEL (1 << 7)
@@ -154,8 +154,7 @@ static const struct m5602_sensor ov9650 = {
.disconnect = ov9650_disconnect,
};
-static const unsigned char preinit_ov9650[][3] =
-{
+static const unsigned char preinit_ov9650[][3] = {
/* [INITCAM] */
{BRIDGE, M5602_XB_MCU_CLK_DIV, 0x02},
{BRIDGE, M5602_XB_MCU_CLK_CTRL, 0xb0},
@@ -180,8 +179,7 @@ static const unsigned char preinit_ov9650[][3] =
{SENSOR, OV9650_OFON, 0x40}
};
-static const unsigned char init_ov9650[][3] =
-{
+static const unsigned char init_ov9650[][3] = {
/* [INITCAM] */
{BRIDGE, M5602_XB_MCU_CLK_DIV, 0x02},
{BRIDGE, M5602_XB_MCU_CLK_CTRL, 0xb0},
@@ -297,8 +295,7 @@ static const unsigned char init_ov9650[][3] =
{SENSOR, OV9650_COM2, OV9650_SOFT_SLEEP | OV9650_OUTPUT_DRIVE_2X},
};
-static const unsigned char res_init_ov9650[][3] =
-{
+static const unsigned char res_init_ov9650[][3] = {
{SENSOR, OV9650_COM2, OV9650_OUTPUT_DRIVE_2X},
{BRIDGE, M5602_XB_LINE_OF_FRAME_H, 0x82},
@@ -307,5 +304,4 @@ static const unsigned char res_init_ov9650[][3] =
{BRIDGE, M5602_XB_PIX_OF_LINE_L, 0x00},
{BRIDGE, M5602_XB_SIG_INI, 0x01}
};
-
#endif
diff --git a/drivers/media/video/gspca/m5602/m5602_po1030.c b/drivers/media/video/gspca/m5602/m5602_po1030.c
index 925b87d66f40..1febd34c2f05 100644
--- a/drivers/media/video/gspca/m5602/m5602_po1030.c
+++ b/drivers/media/video/gspca/m5602/m5602_po1030.c
@@ -58,14 +58,14 @@ static const struct ctrl po1030_ctrls[] = {
#define GAIN_IDX 0
{
{
- .id = V4L2_CID_GAIN,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "gain",
- .minimum = 0x00,
- .maximum = 0x4f,
- .step = 0x1,
- .default_value = PO1030_GLOBAL_GAIN_DEFAULT,
- .flags = V4L2_CTRL_FLAG_SLIDER
+ .id = V4L2_CID_GAIN,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "gain",
+ .minimum = 0x00,
+ .maximum = 0x4f,
+ .step = 0x1,
+ .default_value = PO1030_GLOBAL_GAIN_DEFAULT,
+ .flags = V4L2_CTRL_FLAG_SLIDER
},
.set = po1030_set_gain,
.get = po1030_get_gain
@@ -73,14 +73,14 @@ static const struct ctrl po1030_ctrls[] = {
#define EXPOSURE_IDX 1
{
{
- .id = V4L2_CID_EXPOSURE,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "exposure",
- .minimum = 0x00,
- .maximum = 0x02ff,
- .step = 0x1,
- .default_value = PO1030_EXPOSURE_DEFAULT,
- .flags = V4L2_CTRL_FLAG_SLIDER
+ .id = V4L2_CID_EXPOSURE,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "exposure",
+ .minimum = 0x00,
+ .maximum = 0x02ff,
+ .step = 0x1,
+ .default_value = PO1030_EXPOSURE_DEFAULT,
+ .flags = V4L2_CTRL_FLAG_SLIDER
},
.set = po1030_set_exposure,
.get = po1030_get_exposure
@@ -88,14 +88,14 @@ static const struct ctrl po1030_ctrls[] = {
#define RED_BALANCE_IDX 2
{
{
- .id = V4L2_CID_RED_BALANCE,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "red balance",
- .minimum = 0x00,
- .maximum = 0xff,
- .step = 0x1,
- .default_value = PO1030_RED_GAIN_DEFAULT,
- .flags = V4L2_CTRL_FLAG_SLIDER
+ .id = V4L2_CID_RED_BALANCE,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "red balance",
+ .minimum = 0x00,
+ .maximum = 0xff,
+ .step = 0x1,
+ .default_value = PO1030_RED_GAIN_DEFAULT,
+ .flags = V4L2_CTRL_FLAG_SLIDER
},
.set = po1030_set_red_balance,
.get = po1030_get_red_balance
@@ -103,14 +103,14 @@ static const struct ctrl po1030_ctrls[] = {
#define BLUE_BALANCE_IDX 3
{
{
- .id = V4L2_CID_BLUE_BALANCE,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "blue balance",
- .minimum = 0x00,
- .maximum = 0xff,
- .step = 0x1,
- .default_value = PO1030_BLUE_GAIN_DEFAULT,
- .flags = V4L2_CTRL_FLAG_SLIDER
+ .id = V4L2_CID_BLUE_BALANCE,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "blue balance",
+ .minimum = 0x00,
+ .maximum = 0xff,
+ .step = 0x1,
+ .default_value = PO1030_BLUE_GAIN_DEFAULT,
+ .flags = V4L2_CTRL_FLAG_SLIDER
},
.set = po1030_set_blue_balance,
.get = po1030_get_blue_balance
@@ -118,13 +118,13 @@ static const struct ctrl po1030_ctrls[] = {
#define HFLIP_IDX 4
{
{
- .id = V4L2_CID_HFLIP,
- .type = V4L2_CTRL_TYPE_BOOLEAN,
- .name = "horizontal flip",
- .minimum = 0,
- .maximum = 1,
- .step = 1,
- .default_value = 0,
+ .id = V4L2_CID_HFLIP,
+ .type = V4L2_CTRL_TYPE_BOOLEAN,
+ .name = "horizontal flip",
+ .minimum = 0,
+ .maximum = 1,
+ .step = 1,
+ .default_value = 0,
},
.set = po1030_set_hflip,
.get = po1030_get_hflip
@@ -132,13 +132,13 @@ static const struct ctrl po1030_ctrls[] = {
#define VFLIP_IDX 5
{
{
- .id = V4L2_CID_VFLIP,
- .type = V4L2_CTRL_TYPE_BOOLEAN,
- .name = "vertical flip",
- .minimum = 0,
- .maximum = 1,
- .step = 1,
- .default_value = 0,
+ .id = V4L2_CID_VFLIP,
+ .type = V4L2_CTRL_TYPE_BOOLEAN,
+ .name = "vertical flip",
+ .minimum = 0,
+ .maximum = 1,
+ .step = 1,
+ .default_value = 0,
},
.set = po1030_set_vflip,
.get = po1030_get_vflip
@@ -146,13 +146,13 @@ static const struct ctrl po1030_ctrls[] = {
#define AUTO_WHITE_BALANCE_IDX 6
{
{
- .id = V4L2_CID_AUTO_WHITE_BALANCE,
- .type = V4L2_CTRL_TYPE_BOOLEAN,
- .name = "auto white balance",
- .minimum = 0,
- .maximum = 1,
- .step = 1,
- .default_value = 0,
+ .id = V4L2_CID_AUTO_WHITE_BALANCE,
+ .type = V4L2_CTRL_TYPE_BOOLEAN,
+ .name = "auto white balance",
+ .minimum = 0,
+ .maximum = 1,
+ .step = 1,
+ .default_value = 0,
},
.set = po1030_set_auto_white_balance,
.get = po1030_get_auto_white_balance
@@ -160,13 +160,13 @@ static const struct ctrl po1030_ctrls[] = {
#define AUTO_EXPOSURE_IDX 7
{
{
- .id = V4L2_CID_EXPOSURE_AUTO,
- .type = V4L2_CTRL_TYPE_BOOLEAN,
- .name = "auto exposure",
- .minimum = 0,
- .maximum = 1,
- .step = 1,
- .default_value = 0,
+ .id = V4L2_CID_EXPOSURE_AUTO,
+ .type = V4L2_CTRL_TYPE_BOOLEAN,
+ .name = "auto exposure",
+ .minimum = 0,
+ .maximum = 1,
+ .step = 1,
+ .default_value = 0,
},
.set = po1030_set_auto_exposure,
.get = po1030_get_auto_exposure
@@ -174,14 +174,14 @@ static const struct ctrl po1030_ctrls[] = {
#define GREEN_BALANCE_IDX 8
{
{
- .id = M5602_V4L2_CID_GREEN_BALANCE,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "green balance",
- .minimum = 0x00,
- .maximum = 0xff,
- .step = 0x1,
- .default_value = PO1030_GREEN_GAIN_DEFAULT,
- .flags = V4L2_CTRL_FLAG_SLIDER
+ .id = M5602_V4L2_CID_GREEN_BALANCE,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "green balance",
+ .minimum = 0x00,
+ .maximum = 0xff,
+ .step = 0x1,
+ .default_value = PO1030_GREEN_GAIN_DEFAULT,
+ .flags = V4L2_CTRL_FLAG_SLIDER
},
.set = po1030_set_green_balance,
.get = po1030_get_green_balance
diff --git a/drivers/media/video/gspca/m5602/m5602_po1030.h b/drivers/media/video/gspca/m5602/m5602_po1030.h
index 1ea380b2bbe7..338359596398 100644
--- a/drivers/media/video/gspca/m5602/m5602_po1030.h
+++ b/drivers/media/video/gspca/m5602/m5602_po1030.h
@@ -139,9 +139,9 @@
#define PO1030_GLOBAL_GAIN_DEFAULT 0x12
#define PO1030_EXPOSURE_DEFAULT 0x0085
-#define PO1030_BLUE_GAIN_DEFAULT 0x36
-#define PO1030_RED_GAIN_DEFAULT 0x36
-#define PO1030_GREEN_GAIN_DEFAULT 0x40
+#define PO1030_BLUE_GAIN_DEFAULT 0x36
+#define PO1030_RED_GAIN_DEFAULT 0x36
+#define PO1030_GREEN_GAIN_DEFAULT 0x40
/*****************************************************************************/
@@ -166,8 +166,7 @@ static const struct m5602_sensor po1030 = {
.disconnect = po1030_disconnect,
};
-static const unsigned char preinit_po1030[][3] =
-{
+static const unsigned char preinit_po1030[][3] = {
{BRIDGE, M5602_XB_MCU_CLK_DIV, 0x02},
{BRIDGE, M5602_XB_MCU_CLK_CTRL, 0xb0},
{BRIDGE, M5602_XB_SEN_CLK_DIV, 0x00},
@@ -193,8 +192,7 @@ static const unsigned char preinit_po1030[][3] =
{BRIDGE, M5602_XB_GPIO_DAT, 0x00}
};
-static const unsigned char init_po1030[][3] =
-{
+static const unsigned char init_po1030[][3] = {
{BRIDGE, M5602_XB_MCU_CLK_DIV, 0x02},
{BRIDGE, M5602_XB_MCU_CLK_CTRL, 0xb0},
{BRIDGE, M5602_XB_SEN_CLK_DIV, 0x00},
@@ -271,5 +269,4 @@ static const unsigned char init_po1030[][3] =
{BRIDGE, M5602_XB_GPIO_EN_H, 0x06},
{BRIDGE, M5602_XB_GPIO_EN_L, 0x00},
};
-
#endif
diff --git a/drivers/media/video/gspca/m5602/m5602_s5k4aa.c b/drivers/media/video/gspca/m5602/m5602_s5k4aa.c
index da0a38c78708..d27280be9852 100644
--- a/drivers/media/video/gspca/m5602/m5602_s5k4aa.c
+++ b/drivers/media/video/gspca/m5602/m5602_s5k4aa.c
@@ -143,13 +143,13 @@ static const struct ctrl s5k4aa_ctrls[] = {
#define VFLIP_IDX 0
{
{
- .id = V4L2_CID_VFLIP,
- .type = V4L2_CTRL_TYPE_BOOLEAN,
- .name = "vertical flip",
- .minimum = 0,
- .maximum = 1,
- .step = 1,
- .default_value = 0
+ .id = V4L2_CID_VFLIP,
+ .type = V4L2_CTRL_TYPE_BOOLEAN,
+ .name = "vertical flip",
+ .minimum = 0,
+ .maximum = 1,
+ .step = 1,
+ .default_value = 0
},
.set = s5k4aa_set_vflip,
.get = s5k4aa_get_vflip
@@ -157,13 +157,13 @@ static const struct ctrl s5k4aa_ctrls[] = {
#define HFLIP_IDX 1
{
{
- .id = V4L2_CID_HFLIP,
- .type = V4L2_CTRL_TYPE_BOOLEAN,
- .name = "horizontal flip",
- .minimum = 0,
- .maximum = 1,
- .step = 1,
- .default_value = 0
+ .id = V4L2_CID_HFLIP,
+ .type = V4L2_CTRL_TYPE_BOOLEAN,
+ .name = "horizontal flip",
+ .minimum = 0,
+ .maximum = 1,
+ .step = 1,
+ .default_value = 0
},
.set = s5k4aa_set_hflip,
.get = s5k4aa_get_hflip
diff --git a/drivers/media/video/gspca/m5602/m5602_s5k4aa.h b/drivers/media/video/gspca/m5602/m5602_s5k4aa.h
index 4440da4e7f0f..8cc7a3f6da72 100644
--- a/drivers/media/video/gspca/m5602/m5602_s5k4aa.h
+++ b/drivers/media/video/gspca/m5602/m5602_s5k4aa.h
@@ -83,8 +83,7 @@ static const struct m5602_sensor s5k4aa = {
.disconnect = s5k4aa_disconnect,
};
-static const unsigned char preinit_s5k4aa[][4] =
-{
+static const unsigned char preinit_s5k4aa[][4] = {
{BRIDGE, M5602_XB_MCU_CLK_DIV, 0x02, 0x00},
{BRIDGE, M5602_XB_MCU_CLK_CTRL, 0xb0, 0x00},
{BRIDGE, M5602_XB_SEN_CLK_DIV, 0x00, 0x00},
@@ -127,8 +126,7 @@ static const unsigned char preinit_s5k4aa[][4] =
{SENSOR, S5K4AA_PAGE_MAP, 0x00, 0x00}
};
-static const unsigned char init_s5k4aa[][4] =
-{
+static const unsigned char init_s5k4aa[][4] = {
{BRIDGE, M5602_XB_MCU_CLK_DIV, 0x02, 0x00},
{BRIDGE, M5602_XB_MCU_CLK_CTRL, 0xb0, 0x00},
{BRIDGE, M5602_XB_SEN_CLK_DIV, 0x00, 0x00},
@@ -179,8 +177,7 @@ static const unsigned char init_s5k4aa[][4] =
{SENSOR, 0x37, 0x00, 0x00},
};
-static const unsigned char VGA_s5k4aa[][4] =
-{
+static const unsigned char VGA_s5k4aa[][4] = {
{BRIDGE, M5602_XB_SEN_CLK_DIV, 0x06, 0x00},
{BRIDGE, M5602_XB_SEN_CLK_CTRL, 0xb0, 0x00},
{BRIDGE, M5602_XB_ADC_CTRL, 0xc0, 0x00},
@@ -235,8 +232,7 @@ static const unsigned char VGA_s5k4aa[][4] =
{SENSOR, 0x02, 0x0e, 0x00},
};
-static const unsigned char SXGA_s5k4aa[][4] =
-{
+static const unsigned char SXGA_s5k4aa[][4] = {
{BRIDGE, M5602_XB_SEN_CLK_DIV, 0x06, 0x00},
{BRIDGE, M5602_XB_SEN_CLK_CTRL, 0xb0, 0x00},
{BRIDGE, M5602_XB_ADC_CTRL, 0xc0, 0x00},
@@ -284,6 +280,4 @@ static const unsigned char SXGA_s5k4aa[][4] =
{SENSOR, S5K4AA_PAGE_MAP, 0x02, 0x00},
{SENSOR, 0x02, 0x0e, 0x00},
};
-
-
#endif
diff --git a/drivers/media/video/gspca/m5602/m5602_s5k83a.h b/drivers/media/video/gspca/m5602/m5602_s5k83a.h
index 7814b078acde..80a63a236e24 100644
--- a/drivers/media/video/gspca/m5602/m5602_s5k83a.h
+++ b/drivers/media/video/gspca/m5602/m5602_s5k83a.h
@@ -35,7 +35,7 @@
#define S5K83A_MAXIMUM_EXPOSURE 0x3c
#define S5K83A_FLIP_MASK 0x10
#define S5K83A_GPIO_LED_MASK 0x10
-#define S5K83A_GPIO_ROTATION_MASK 0x40
+#define S5K83A_GPIO_ROTATION_MASK 0x40
/*****************************************************************************/
@@ -67,8 +67,7 @@ struct s5k83a_priv {
s32 *settings;
};
-static const unsigned char preinit_s5k83a[][4] =
-{
+static const unsigned char preinit_s5k83a[][4] = {
{BRIDGE, M5602_XB_MCU_CLK_DIV, 0x02, 0x00},
{BRIDGE, M5602_XB_MCU_CLK_CTRL, 0xb0, 0x00},
{BRIDGE, M5602_XB_SEN_CLK_DIV, 0x00, 0x00},
@@ -108,8 +107,7 @@ static const unsigned char preinit_s5k83a[][4] =
/* This could probably be considerably shortened.
I don't have the hardware to experiment with it, patches welcome
*/
-static const unsigned char init_s5k83a[][4] =
-{
+static const unsigned char init_s5k83a[][4] = {
/* The following sequence is useless after a clean boot
but is necessary after resume from suspend */
{BRIDGE, M5602_XB_GPIO_DIR, 0x1d, 0x00},
@@ -166,8 +164,7 @@ static const unsigned char init_s5k83a[][4] =
{SENSOR, 0x00, 0x06, 0x00},
};
-static const unsigned char start_s5k83a[][4] =
-{
+static const unsigned char start_s5k83a[][4] = {
{BRIDGE, M5602_XB_SEN_CLK_DIV, 0x06, 0x00},
{BRIDGE, M5602_XB_SEN_CLK_CTRL, 0xb0, 0x00},
{BRIDGE, M5602_XB_ADC_CTRL, 0xc0, 0x00},
@@ -193,5 +190,4 @@ static const unsigned char start_s5k83a[][4] =
{BRIDGE, M5602_XB_SEN_CLK_DIV, 0x00, 0x00},
{BRIDGE, M5602_XB_SEN_CLK_CTRL, 0xb0, 0x00},
};
-
#endif
diff --git a/drivers/media/video/gspca/mars.c b/drivers/media/video/gspca/mars.c
index 031f7195ce0d..a81536e78698 100644
--- a/drivers/media/video/gspca/mars.c
+++ b/drivers/media/video/gspca/mars.c
@@ -28,14 +28,23 @@ MODULE_AUTHOR("Michel Xhaard <mxhaard@users.sourceforge.net>");
MODULE_DESCRIPTION("GSPCA/Mars USB Camera Driver");
MODULE_LICENSE("GPL");
+/* controls */
+enum e_ctrl {
+ BRIGHTNESS,
+ COLORS,
+ GAMMA,
+ SHARPNESS,
+ ILLUM_TOP,
+ ILLUM_BOT,
+ NCTRLS /* number of controls */
+};
+
/* specific webcam descriptor */
struct sd {
struct gspca_dev gspca_dev; /* !! must be the first item */
- u8 brightness;
- u8 colors;
- u8 gamma;
- u8 sharpness;
+ struct gspca_ctrl ctrls[NCTRLS];
+
u8 quality;
#define QUALITY_MIN 40
#define QUALITY_MAX 70
@@ -45,17 +54,15 @@ struct sd {
};
/* V4L2 controls supported by the driver */
-static int sd_setbrightness(struct gspca_dev *gspca_dev, __s32 val);
-static int sd_getbrightness(struct gspca_dev *gspca_dev, __s32 *val);
-static int sd_setcolors(struct gspca_dev *gspca_dev, __s32 val);
-static int sd_getcolors(struct gspca_dev *gspca_dev, __s32 *val);
-static int sd_setgamma(struct gspca_dev *gspca_dev, __s32 val);
-static int sd_getgamma(struct gspca_dev *gspca_dev, __s32 *val);
-static int sd_setsharpness(struct gspca_dev *gspca_dev, __s32 val);
-static int sd_getsharpness(struct gspca_dev *gspca_dev, __s32 *val);
-
-static const struct ctrl sd_ctrls[] = {
- {
+static void setbrightness(struct gspca_dev *gspca_dev);
+static void setcolors(struct gspca_dev *gspca_dev);
+static void setgamma(struct gspca_dev *gspca_dev);
+static void setsharpness(struct gspca_dev *gspca_dev);
+static int sd_setilluminator1(struct gspca_dev *gspca_dev, __s32 val);
+static int sd_setilluminator2(struct gspca_dev *gspca_dev, __s32 val);
+
+static const struct ctrl sd_ctrls[NCTRLS] = {
+[BRIGHTNESS] = {
{
.id = V4L2_CID_BRIGHTNESS,
.type = V4L2_CTRL_TYPE_INTEGER,
@@ -63,13 +70,11 @@ static const struct ctrl sd_ctrls[] = {
.minimum = 0,
.maximum = 30,
.step = 1,
-#define BRIGHTNESS_DEF 15
- .default_value = BRIGHTNESS_DEF,
+ .default_value = 15,
},
- .set = sd_setbrightness,
- .get = sd_getbrightness,
+ .set_control = setbrightness
},
- {
+[COLORS] = {
{
.id = V4L2_CID_SATURATION,
.type = V4L2_CTRL_TYPE_INTEGER,
@@ -77,13 +82,11 @@ static const struct ctrl sd_ctrls[] = {
.minimum = 1,
.maximum = 255,
.step = 1,
-#define COLOR_DEF 200
- .default_value = COLOR_DEF,
+ .default_value = 200,
},
- .set = sd_setcolors,
- .get = sd_getcolors,
+ .set_control = setcolors
},
- {
+[GAMMA] = {
{
.id = V4L2_CID_GAMMA,
.type = V4L2_CTRL_TYPE_INTEGER,
@@ -91,13 +94,11 @@ static const struct ctrl sd_ctrls[] = {
.minimum = 0,
.maximum = 3,
.step = 1,
-#define GAMMA_DEF 1
- .default_value = GAMMA_DEF,
+ .default_value = 1,
},
- .set = sd_setgamma,
- .get = sd_getgamma,
+ .set_control = setgamma
},
- {
+[SHARPNESS] = {
{
.id = V4L2_CID_SHARPNESS,
.type = V4L2_CTRL_TYPE_INTEGER,
@@ -105,11 +106,35 @@ static const struct ctrl sd_ctrls[] = {
.minimum = 0,
.maximum = 2,
.step = 1,
-#define SHARPNESS_DEF 1
- .default_value = SHARPNESS_DEF,
+ .default_value = 1,
+ },
+ .set_control = setsharpness
+ },
+[ILLUM_TOP] = {
+ {
+ .id = V4L2_CID_ILLUMINATORS_1,
+ .type = V4L2_CTRL_TYPE_BOOLEAN,
+ .name = "Top illuminator",
+ .minimum = 0,
+ .maximum = 1,
+ .step = 1,
+ .default_value = 0,
+ .flags = V4L2_CTRL_FLAG_UPDATE,
+ },
+ .set = sd_setilluminator1
+ },
+[ILLUM_BOT] = {
+ {
+ .id = V4L2_CID_ILLUMINATORS_2,
+ .type = V4L2_CTRL_TYPE_BOOLEAN,
+ .name = "Bottom illuminator",
+ .minimum = 0,
+ .maximum = 1,
+ .step = 1,
+ .default_value = 0,
+ .flags = V4L2_CTRL_FLAG_UPDATE,
},
- .set = sd_setsharpness,
- .get = sd_getsharpness,
+ .set = sd_setilluminator2
},
};
@@ -138,21 +163,25 @@ static const __u8 mi_data[0x20] = {
};
/* write <len> bytes from gspca_dev->usb_buf */
-static int reg_w(struct gspca_dev *gspca_dev,
+static void reg_w(struct gspca_dev *gspca_dev,
int len)
{
int alen, ret;
+ if (gspca_dev->usb_err < 0)
+ return;
+
ret = usb_bulk_msg(gspca_dev->dev,
usb_sndbulkpipe(gspca_dev->dev, 4),
gspca_dev->usb_buf,
len,
&alen,
500); /* timeout in milliseconds */
- if (ret < 0)
- PDEBUG(D_ERR, "reg write [%02x] error %d",
+ if (ret < 0) {
+ err("reg write [%02x] error %d",
gspca_dev->usb_buf[0], ret);
- return ret;
+ gspca_dev->usb_err = ret;
+ }
}
static void mi_w(struct gspca_dev *gspca_dev,
@@ -167,6 +196,59 @@ static void mi_w(struct gspca_dev *gspca_dev,
reg_w(gspca_dev, 4);
}
+static void setbrightness(struct gspca_dev *gspca_dev)
+{
+ struct sd *sd = (struct sd *) gspca_dev;
+
+ gspca_dev->usb_buf[0] = 0x61;
+ gspca_dev->usb_buf[1] = sd->ctrls[BRIGHTNESS].val;
+ reg_w(gspca_dev, 2);
+}
+
+static void setcolors(struct gspca_dev *gspca_dev)
+{
+ struct sd *sd = (struct sd *) gspca_dev;
+ s16 val;
+
+ val = sd->ctrls[COLORS].val;
+ gspca_dev->usb_buf[0] = 0x5f;
+ gspca_dev->usb_buf[1] = val << 3;
+ gspca_dev->usb_buf[2] = ((val >> 2) & 0xf8) | 0x04;
+ reg_w(gspca_dev, 3);
+}
+
+static void setgamma(struct gspca_dev *gspca_dev)
+{
+ struct sd *sd = (struct sd *) gspca_dev;
+
+ gspca_dev->usb_buf[0] = 0x06;
+ gspca_dev->usb_buf[1] = sd->ctrls[GAMMA].val * 0x40;
+ reg_w(gspca_dev, 2);
+}
+
+static void setsharpness(struct gspca_dev *gspca_dev)
+{
+ struct sd *sd = (struct sd *) gspca_dev;
+
+ gspca_dev->usb_buf[0] = 0x67;
+ gspca_dev->usb_buf[1] = sd->ctrls[SHARPNESS].val * 4 + 3;
+ reg_w(gspca_dev, 2);
+}
+
+static void setilluminators(struct gspca_dev *gspca_dev)
+{
+ struct sd *sd = (struct sd *) gspca_dev;
+
+ gspca_dev->usb_buf[0] = 0x22;
+ if (sd->ctrls[ILLUM_TOP].val)
+ gspca_dev->usb_buf[1] = 0x76;
+ else if (sd->ctrls[ILLUM_BOT].val)
+ gspca_dev->usb_buf[1] = 0x7a;
+ else
+ gspca_dev->usb_buf[1] = 0x7e;
+ reg_w(gspca_dev, 2);
+}
+
/* this function is called at probe time */
static int sd_config(struct gspca_dev *gspca_dev,
const struct usb_device_id *id)
@@ -177,10 +259,7 @@ static int sd_config(struct gspca_dev *gspca_dev,
cam = &gspca_dev->cam;
cam->cam_mode = vga_mode;
cam->nmodes = ARRAY_SIZE(vga_mode);
- sd->brightness = BRIGHTNESS_DEF;
- sd->colors = COLOR_DEF;
- sd->gamma = GAMMA_DEF;
- sd->sharpness = SHARPNESS_DEF;
+ cam->ctrls = sd->ctrls;
sd->quality = QUALITY_DEF;
gspca_dev->nbalt = 9; /* use the altsetting 08 */
return 0;
@@ -189,13 +268,13 @@ static int sd_config(struct gspca_dev *gspca_dev,
/* this function is called at probe and resume time */
static int sd_init(struct gspca_dev *gspca_dev)
{
+ gspca_dev->ctrl_inac = (1 << ILLUM_TOP) | (1 << ILLUM_BOT);
return 0;
}
static int sd_start(struct gspca_dev *gspca_dev)
{
struct sd *sd = (struct sd *) gspca_dev;
- int err_code;
u8 *data;
int i;
@@ -208,9 +287,7 @@ static int sd_start(struct gspca_dev *gspca_dev)
data[0] = 0x01; /* address */
data[1] = 0x01;
- err_code = reg_w(gspca_dev, 2);
- if (err_code < 0)
- return err_code;
+ reg_w(gspca_dev, 2);
/*
Initialize the MR97113 chip register
@@ -223,7 +300,7 @@ static int sd_start(struct gspca_dev *gspca_dev)
data[5] = 0x30; /* reg 4, MI, PAS5101 :
* 0x30 for 24mhz , 0x28 for 12mhz */
data[6] = 0x02; /* reg 5, H start - was 0x04 */
- data[7] = sd->gamma * 0x40; /* reg 0x06: gamma */
+ data[7] = sd->ctrls[GAMMA].val * 0x40; /* reg 0x06: gamma */
data[8] = 0x01; /* reg 7, V start - was 0x03 */
/* if (h_size == 320 ) */
/* data[9]= 0x56; * reg 8, 24MHz, 2:1 scale down */
@@ -232,16 +309,12 @@ static int sd_start(struct gspca_dev *gspca_dev)
/*jfm: from win trace*/
data[10] = 0x18;
- err_code = reg_w(gspca_dev, 11);
- if (err_code < 0)
- return err_code;
+ reg_w(gspca_dev, 11);
data[0] = 0x23; /* address */
data[1] = 0x09; /* reg 35, append frame header */
- err_code = reg_w(gspca_dev, 2);
- if (err_code < 0)
- return err_code;
+ reg_w(gspca_dev, 2);
data[0] = 0x3c; /* address */
/* if (gspca_dev->width == 1280) */
@@ -250,9 +323,7 @@ static int sd_start(struct gspca_dev *gspca_dev)
/* else */
data[1] = 50; /* 50 reg 60, pc-cam frame size
* (unit: 4KB) 200KB */
- err_code = reg_w(gspca_dev, 2);
- if (err_code < 0)
- return err_code;
+ reg_w(gspca_dev, 2);
/* auto dark-gain */
data[0] = 0x5e; /* address */
@@ -261,37 +332,29 @@ static int sd_start(struct gspca_dev *gspca_dev)
/* reg 0x5f/0x60 (LE) = saturation */
/* h (60): xxxx x100
* l (5f): xxxx x000 */
- data[2] = sd->colors << 3;
- data[3] = ((sd->colors >> 2) & 0xf8) | 0x04;
- data[4] = sd->brightness; /* reg 0x61 = brightness */
+ data[2] = sd->ctrls[COLORS].val << 3;
+ data[3] = ((sd->ctrls[COLORS].val >> 2) & 0xf8) | 0x04;
+ data[4] = sd->ctrls[BRIGHTNESS].val; /* reg 0x61 = brightness */
data[5] = 0x00;
- err_code = reg_w(gspca_dev, 6);
- if (err_code < 0)
- return err_code;
+ reg_w(gspca_dev, 6);
data[0] = 0x67;
/*jfm: from win trace*/
- data[1] = sd->sharpness * 4 + 3;
+ data[1] = sd->ctrls[SHARPNESS].val * 4 + 3;
data[2] = 0x14;
- err_code = reg_w(gspca_dev, 3);
- if (err_code < 0)
- return err_code;
+ reg_w(gspca_dev, 3);
data[0] = 0x69;
data[1] = 0x2f;
data[2] = 0x28;
data[3] = 0x42;
- err_code = reg_w(gspca_dev, 4);
- if (err_code < 0)
- return err_code;
+ reg_w(gspca_dev, 4);
data[0] = 0x63;
data[1] = 0x07;
- err_code = reg_w(gspca_dev, 2);
+ reg_w(gspca_dev, 2);
/*jfm: win trace - many writes here to reg 0x64*/
- if (err_code < 0)
- return err_code;
/* initialize the MI sensor */
for (i = 0; i < sizeof mi_data; i++)
@@ -300,18 +363,26 @@ static int sd_start(struct gspca_dev *gspca_dev)
data[0] = 0x00;
data[1] = 0x4d; /* ISOC transfering enable... */
reg_w(gspca_dev, 2);
- return 0;
+
+ gspca_dev->ctrl_inac = 0; /* activate the illuminator controls */
+ return gspca_dev->usb_err;
}
static void sd_stopN(struct gspca_dev *gspca_dev)
{
- int result;
+ struct sd *sd = (struct sd *) gspca_dev;
+
+ gspca_dev->ctrl_inac = (1 << ILLUM_TOP) | (1 << ILLUM_BOT);
+ if (sd->ctrls[ILLUM_TOP].val || sd->ctrls[ILLUM_BOT].val) {
+ sd->ctrls[ILLUM_TOP].val = 0;
+ sd->ctrls[ILLUM_BOT].val = 0;
+ setilluminators(gspca_dev);
+ msleep(20);
+ }
gspca_dev->usb_buf[0] = 1;
gspca_dev->usb_buf[1] = 0;
- result = reg_w(gspca_dev, 2);
- if (result < 0)
- PDEBUG(D_ERR, "Camera Stop failed");
+ reg_w(gspca_dev, 2);
}
static void sd_pkt_scan(struct gspca_dev *gspca_dev,
@@ -352,91 +423,28 @@ static void sd_pkt_scan(struct gspca_dev *gspca_dev,
gspca_frame_add(gspca_dev, INTER_PACKET, data, len);
}
-static int sd_setbrightness(struct gspca_dev *gspca_dev, __s32 val)
-{
- struct sd *sd = (struct sd *) gspca_dev;
-
- sd->brightness = val;
- if (gspca_dev->streaming) {
- gspca_dev->usb_buf[0] = 0x61;
- gspca_dev->usb_buf[1] = val;
- reg_w(gspca_dev, 2);
- }
- return 0;
-}
-
-static int sd_getbrightness(struct gspca_dev *gspca_dev, __s32 *val)
+static int sd_setilluminator1(struct gspca_dev *gspca_dev, __s32 val)
{
struct sd *sd = (struct sd *) gspca_dev;
- *val = sd->brightness;
- return 0;
+ /* only one illuminator may be on */
+ sd->ctrls[ILLUM_TOP].val = val;
+ if (val)
+ sd->ctrls[ILLUM_BOT].val = 0;
+ setilluminators(gspca_dev);
+ return gspca_dev->usb_err;
}
-static int sd_setcolors(struct gspca_dev *gspca_dev, __s32 val)
+static int sd_setilluminator2(struct gspca_dev *gspca_dev, __s32 val)
{
struct sd *sd = (struct sd *) gspca_dev;
- sd->colors = val;
- if (gspca_dev->streaming) {
-
- /* see sd_start */
- gspca_dev->usb_buf[0] = 0x5f;
- gspca_dev->usb_buf[1] = sd->colors << 3;
- gspca_dev->usb_buf[2] = ((sd->colors >> 2) & 0xf8) | 0x04;
- reg_w(gspca_dev, 3);
- }
- return 0;
-}
-
-static int sd_getcolors(struct gspca_dev *gspca_dev, __s32 *val)
-{
- struct sd *sd = (struct sd *) gspca_dev;
-
- *val = sd->colors;
- return 0;
-}
-
-static int sd_setgamma(struct gspca_dev *gspca_dev, __s32 val)
-{
- struct sd *sd = (struct sd *) gspca_dev;
-
- sd->gamma = val;
- if (gspca_dev->streaming) {
- gspca_dev->usb_buf[0] = 0x06;
- gspca_dev->usb_buf[1] = val * 0x40;
- reg_w(gspca_dev, 2);
- }
- return 0;
-}
-
-static int sd_getgamma(struct gspca_dev *gspca_dev, __s32 *val)
-{
- struct sd *sd = (struct sd *) gspca_dev;
-
- *val = sd->gamma;
- return 0;
-}
-
-static int sd_setsharpness(struct gspca_dev *gspca_dev, __s32 val)
-{
- struct sd *sd = (struct sd *) gspca_dev;
-
- sd->sharpness = val;
- if (gspca_dev->streaming) {
- gspca_dev->usb_buf[0] = 0x67;
- gspca_dev->usb_buf[1] = val * 4 + 3;
- reg_w(gspca_dev, 2);
- }
- return 0;
-}
-
-static int sd_getsharpness(struct gspca_dev *gspca_dev, __s32 *val)
-{
- struct sd *sd = (struct sd *) gspca_dev;
-
- *val = sd->sharpness;
- return 0;
+ /* only one illuminator may be on */
+ sd->ctrls[ILLUM_BOT].val = val;
+ if (val)
+ sd->ctrls[ILLUM_TOP].val = 0;
+ setilluminators(gspca_dev);
+ return gspca_dev->usb_err;
}
static int sd_set_jcomp(struct gspca_dev *gspca_dev,
@@ -471,7 +479,7 @@ static int sd_get_jcomp(struct gspca_dev *gspca_dev,
static const struct sd_desc sd_desc = {
.name = MODULE_NAME,
.ctrls = sd_ctrls,
- .nctrls = ARRAY_SIZE(sd_ctrls),
+ .nctrls = NCTRLS,
.config = sd_config,
.init = sd_init,
.start = sd_start,
@@ -510,18 +518,11 @@ static struct usb_driver sd_driver = {
/* -- module insert / remove -- */
static int __init sd_mod_init(void)
{
- int ret;
-
- ret = usb_register(&sd_driver);
- if (ret < 0)
- return ret;
- PDEBUG(D_PROBE, "registered");
- return 0;
+ return usb_register(&sd_driver);
}
static void __exit sd_mod_exit(void)
{
usb_deregister(&sd_driver);
- PDEBUG(D_PROBE, "deregistered");
}
module_init(sd_mod_init);
diff --git a/drivers/media/video/gspca/mr97310a.c b/drivers/media/video/gspca/mr97310a.c
index 33744e724eaa..7607a288b51c 100644
--- a/drivers/media/video/gspca/mr97310a.c
+++ b/drivers/media/video/gspca/mr97310a.c
@@ -9,14 +9,14 @@
* is Copyright (C) 2009 Theodore Kilgore <kilgota@auburn.edu>
*
* Support for the control settings for the CIF cameras is
- * Copyright (C) 2009 Hans de Goede <hdgoede@redhat.com> and
+ * Copyright (C) 2009 Hans de Goede <hdegoede@redhat.com> and
* Thomas Kaiser <thomas@kaiser-linux.li>
*
* Support for the control settings for the VGA cameras is
* Copyright (C) 2009 Theodore Kilgore <kilgota@auburn.edu>
*
* Several previously unsupported cameras are owned and have been tested by
- * Hans de Goede <hdgoede@redhat.com> and
+ * Hans de Goede <hdegoede@redhat.com> and
* Thomas Kaiser <thomas@kaiser-linux.li> and
* Theodore Kilgore <kilgota@auburn.edu> and
* Edmond Rodriguez <erodrig_97@yahoo.com> and
@@ -267,7 +267,7 @@ static int mr_write(struct gspca_dev *gspca_dev, int len)
usb_sndbulkpipe(gspca_dev->dev, 4),
gspca_dev->usb_buf, len, NULL, 500);
if (rc < 0)
- PDEBUG(D_ERR, "reg write [%02x] error %d",
+ err("reg write [%02x] error %d",
gspca_dev->usb_buf[0], rc);
return rc;
}
@@ -281,7 +281,7 @@ static int mr_read(struct gspca_dev *gspca_dev, int len)
usb_rcvbulkpipe(gspca_dev->dev, 3),
gspca_dev->usb_buf, len, NULL, 500);
if (rc < 0)
- PDEBUG(D_ERR, "reg read [%02x] error %d",
+ err("reg read [%02x] error %d",
gspca_dev->usb_buf[0], rc);
return rc;
}
@@ -540,7 +540,7 @@ static int sd_config(struct gspca_dev *gspca_dev,
sd->sensor_type = 1;
break;
default:
- PDEBUG(D_ERR, "Unknown CIF Sensor id : %02x",
+ err("Unknown CIF Sensor id : %02x",
gspca_dev->usb_buf[1]);
return -ENODEV;
}
@@ -575,10 +575,10 @@ static int sd_config(struct gspca_dev *gspca_dev,
sd->sensor_type = 2;
} else if ((gspca_dev->usb_buf[0] != 0x03) &&
(gspca_dev->usb_buf[0] != 0x04)) {
- PDEBUG(D_ERR, "Unknown VGA Sensor id Byte 0: %02x",
+ err("Unknown VGA Sensor id Byte 0: %02x",
gspca_dev->usb_buf[0]);
- PDEBUG(D_ERR, "Defaults assumed, may not work");
- PDEBUG(D_ERR, "Please report this");
+ err("Defaults assumed, may not work");
+ err("Please report this");
}
/* Sakar Digital color needs to be adjusted. */
if ((gspca_dev->usb_buf[0] == 0x03) &&
@@ -595,12 +595,10 @@ static int sd_config(struct gspca_dev *gspca_dev,
/* Nothing to do here. */
break;
default:
- PDEBUG(D_ERR,
- "Unknown VGA Sensor id Byte 1: %02x",
+ err("Unknown VGA Sensor id Byte 1: %02x",
gspca_dev->usb_buf[1]);
- PDEBUG(D_ERR,
- "Defaults assumed, may not work");
- PDEBUG(D_ERR, "Please report this");
+ err("Defaults assumed, may not work");
+ err("Please report this");
}
}
PDEBUG(D_PROBE, "MR97310A VGA camera detected, sensor: %d",
@@ -675,7 +673,7 @@ static int start_cif_cam(struct gspca_dev *gspca_dev)
struct sd *sd = (struct sd *) gspca_dev;
__u8 *data = gspca_dev->usb_buf;
int err_code;
- const __u8 startup_string[] = {
+ static const __u8 startup_string[] = {
0x00,
0x0d,
0x01,
@@ -721,7 +719,7 @@ static int start_cif_cam(struct gspca_dev *gspca_dev)
return err_code;
if (!sd->sensor_type) {
- const struct sensor_w_data cif_sensor0_init_data[] = {
+ static const struct sensor_w_data cif_sensor0_init_data[] = {
{0x02, 0x00, {0x03, 0x5a, 0xb5, 0x01,
0x0f, 0x14, 0x0f, 0x10}, 8},
{0x0c, 0x00, {0x04, 0x01, 0x01, 0x00, 0x1f}, 5},
@@ -742,7 +740,7 @@ static int start_cif_cam(struct gspca_dev *gspca_dev)
err_code = sensor_write_regs(gspca_dev, cif_sensor0_init_data,
ARRAY_SIZE(cif_sensor0_init_data));
} else { /* sd->sensor_type = 1 */
- const struct sensor_w_data cif_sensor1_init_data[] = {
+ static const struct sensor_w_data cif_sensor1_init_data[] = {
/* Reg 3,4, 7,8 get set by the controls */
{0x02, 0x00, {0x10}, 1},
{0x05, 0x01, {0x22}, 1}, /* 5/6 also seen as 65h/32h */
@@ -777,8 +775,9 @@ static int start_vga_cam(struct gspca_dev *gspca_dev)
struct sd *sd = (struct sd *) gspca_dev;
__u8 *data = gspca_dev->usb_buf;
int err_code;
- const __u8 startup_string[] = {0x00, 0x0d, 0x01, 0x00, 0x00, 0x2b,
- 0x00, 0x00, 0x00, 0x50, 0xc0};
+ static const __u8 startup_string[] =
+ {0x00, 0x0d, 0x01, 0x00, 0x00, 0x2b, 0x00, 0x00,
+ 0x00, 0x50, 0xc0};
/* What some of these mean is explained in start_cif_cam(), above */
memcpy(data, startup_string, 11);
@@ -830,7 +829,7 @@ static int start_vga_cam(struct gspca_dev *gspca_dev)
return err_code;
if (!sd->sensor_type) {
- const struct sensor_w_data vga_sensor0_init_data[] = {
+ static const struct sensor_w_data vga_sensor0_init_data[] = {
{0x01, 0x00, {0x0c, 0x00, 0x04}, 3},
{0x14, 0x00, {0x01, 0xe4, 0x02, 0x84}, 4},
{0x20, 0x00, {0x00, 0x80, 0x00, 0x08}, 4},
@@ -841,20 +840,20 @@ static int start_vga_cam(struct gspca_dev *gspca_dev)
err_code = sensor_write_regs(gspca_dev, vga_sensor0_init_data,
ARRAY_SIZE(vga_sensor0_init_data));
} else if (sd->sensor_type == 1) {
- const struct sensor_w_data color_adj[] = {
+ static const struct sensor_w_data color_adj[] = {
{0x02, 0x00, {0x06, 0x59, 0x0c, 0x16, 0x00,
/* adjusted blue, green, red gain correct
too much blue from the Sakar Digital */
0x05, 0x01, 0x04}, 8}
};
- const struct sensor_w_data color_no_adj[] = {
+ static const struct sensor_w_data color_no_adj[] = {
{0x02, 0x00, {0x06, 0x59, 0x0c, 0x16, 0x00,
/* default blue, green, red gain settings */
0x07, 0x00, 0x01}, 8}
};
- const struct sensor_w_data vga_sensor1_init_data[] = {
+ static const struct sensor_w_data vga_sensor1_init_data[] = {
{0x11, 0x04, {0x01}, 1},
{0x0a, 0x00, {0x00, 0x01, 0x00, 0x00, 0x01,
/* These settings may be better for some cameras */
@@ -879,7 +878,7 @@ static int start_vga_cam(struct gspca_dev *gspca_dev)
err_code = sensor_write_regs(gspca_dev, vga_sensor1_init_data,
ARRAY_SIZE(vga_sensor1_init_data));
} else { /* sensor type == 2 */
- const struct sensor_w_data vga_sensor2_init_data[] = {
+ static const struct sensor_w_data vga_sensor2_init_data[] = {
{0x01, 0x00, {0x48}, 1},
{0x02, 0x00, {0x22}, 1},
@@ -976,7 +975,7 @@ static void setbrightness(struct gspca_dev *gspca_dev)
u8 val;
u8 sign_reg = 7; /* This reg and the next one used on CIF cams. */
u8 value_reg = 8; /* VGA cams seem to use regs 0x0b and 0x0c */
- const u8 quick_clix_table[] =
+ static const u8 quick_clix_table[] =
/* 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 */
{ 0, 4, 8, 12, 1, 2, 3, 5, 6, 9, 7, 10, 13, 11, 14, 15};
/*
@@ -1261,18 +1260,11 @@ static struct usb_driver sd_driver = {
/* -- module insert / remove -- */
static int __init sd_mod_init(void)
{
- int ret;
-
- ret = usb_register(&sd_driver);
- if (ret < 0)
- return ret;
- PDEBUG(D_PROBE, "registered");
- return 0;
+ return usb_register(&sd_driver);
}
static void __exit sd_mod_exit(void)
{
usb_deregister(&sd_driver);
- PDEBUG(D_PROBE, "deregistered");
}
module_init(sd_mod_init);
diff --git a/drivers/media/video/gspca/ov519.c b/drivers/media/video/gspca/ov519.c
index 2b2cbdbf03fe..6cf6855aa506 100644
--- a/drivers/media/video/gspca/ov519.c
+++ b/drivers/media/video/gspca/ov519.c
@@ -57,10 +57,24 @@ static int frame_rate;
* are getting "Failed to read sensor ID..." */
static int i2c_detect_tries = 10;
+/* controls */
+enum e_ctrl {
+ BRIGHTNESS,
+ CONTRAST,
+ COLORS,
+ HFLIP,
+ VFLIP,
+ AUTOBRIGHT,
+ FREQ,
+ NCTRL /* number of controls */
+};
+
/* ov519 device descriptor */
struct sd {
struct gspca_dev gspca_dev; /* !! must be the first item */
+ struct gspca_ctrl ctrls[NCTRL];
+
__u8 packet_nr;
char bridge;
@@ -82,13 +96,6 @@ struct sd {
/* Determined by sensor type */
__u8 sif;
- __u8 brightness;
- __u8 contrast;
- __u8 colors;
- __u8 hflip;
- __u8 vflip;
- __u8 autobrightness;
- __u8 freq;
__u8 quality;
#define QUALITY_MIN 50
#define QUALITY_MAX 70
@@ -130,29 +137,16 @@ struct sd {
#include "w996Xcf.c"
/* V4L2 controls supported by the driver */
-static int sd_setbrightness(struct gspca_dev *gspca_dev, __s32 val);
-static int sd_getbrightness(struct gspca_dev *gspca_dev, __s32 *val);
-static int sd_setcontrast(struct gspca_dev *gspca_dev, __s32 val);
-static int sd_getcontrast(struct gspca_dev *gspca_dev, __s32 *val);
-static int sd_setcolors(struct gspca_dev *gspca_dev, __s32 val);
-static int sd_getcolors(struct gspca_dev *gspca_dev, __s32 *val);
-static int sd_sethflip(struct gspca_dev *gspca_dev, __s32 val);
-static int sd_gethflip(struct gspca_dev *gspca_dev, __s32 *val);
-static int sd_setvflip(struct gspca_dev *gspca_dev, __s32 val);
-static int sd_getvflip(struct gspca_dev *gspca_dev, __s32 *val);
-static int sd_setautobrightness(struct gspca_dev *gspca_dev, __s32 val);
-static int sd_getautobrightness(struct gspca_dev *gspca_dev, __s32 *val);
-static int sd_setfreq(struct gspca_dev *gspca_dev, __s32 val);
-static int sd_getfreq(struct gspca_dev *gspca_dev, __s32 *val);
static void setbrightness(struct gspca_dev *gspca_dev);
static void setcontrast(struct gspca_dev *gspca_dev);
static void setcolors(struct gspca_dev *gspca_dev);
-static void setautobrightness(struct sd *sd);
-static void setfreq(struct sd *sd);
+static void sethvflip(struct gspca_dev *gspca_dev);
+static void setautobright(struct gspca_dev *gspca_dev);
+static void setfreq(struct gspca_dev *gspca_dev);
+static void setfreq_i(struct sd *sd);
static const struct ctrl sd_ctrls[] = {
-#define BRIGHTNESS_IDX 0
- {
+[BRIGHTNESS] = {
{
.id = V4L2_CID_BRIGHTNESS,
.type = V4L2_CTRL_TYPE_INTEGER,
@@ -160,14 +154,11 @@ static const struct ctrl sd_ctrls[] = {
.minimum = 0,
.maximum = 255,
.step = 1,
-#define BRIGHTNESS_DEF 127
- .default_value = BRIGHTNESS_DEF,
+ .default_value = 127,
},
- .set = sd_setbrightness,
- .get = sd_getbrightness,
+ .set_control = setbrightness,
},
-#define CONTRAST_IDX 1
- {
+[CONTRAST] = {
{
.id = V4L2_CID_CONTRAST,
.type = V4L2_CTRL_TYPE_INTEGER,
@@ -175,14 +166,11 @@ static const struct ctrl sd_ctrls[] = {
.minimum = 0,
.maximum = 255,
.step = 1,
-#define CONTRAST_DEF 127
- .default_value = CONTRAST_DEF,
+ .default_value = 127,
},
- .set = sd_setcontrast,
- .get = sd_getcontrast,
+ .set_control = setcontrast,
},
-#define COLOR_IDX 2
- {
+[COLORS] = {
{
.id = V4L2_CID_SATURATION,
.type = V4L2_CTRL_TYPE_INTEGER,
@@ -190,15 +178,12 @@ static const struct ctrl sd_ctrls[] = {
.minimum = 0,
.maximum = 255,
.step = 1,
-#define COLOR_DEF 127
- .default_value = COLOR_DEF,
+ .default_value = 127,
},
- .set = sd_setcolors,
- .get = sd_getcolors,
+ .set_control = setcolors,
},
/* The flip controls work with ov7670 only */
-#define HFLIP_IDX 3
- {
+[HFLIP] = {
{
.id = V4L2_CID_HFLIP,
.type = V4L2_CTRL_TYPE_BOOLEAN,
@@ -206,14 +191,11 @@ static const struct ctrl sd_ctrls[] = {
.minimum = 0,
.maximum = 1,
.step = 1,
-#define HFLIP_DEF 0
- .default_value = HFLIP_DEF,
+ .default_value = 0,
},
- .set = sd_sethflip,
- .get = sd_gethflip,
+ .set_control = sethvflip,
},
-#define VFLIP_IDX 4
- {
+[VFLIP] = {
{
.id = V4L2_CID_VFLIP,
.type = V4L2_CTRL_TYPE_BOOLEAN,
@@ -221,14 +203,11 @@ static const struct ctrl sd_ctrls[] = {
.minimum = 0,
.maximum = 1,
.step = 1,
-#define VFLIP_DEF 0
- .default_value = VFLIP_DEF,
+ .default_value = 0,
},
- .set = sd_setvflip,
- .get = sd_getvflip,
+ .set_control = sethvflip,
},
-#define AUTOBRIGHT_IDX 5
- {
+[AUTOBRIGHT] = {
{
.id = V4L2_CID_AUTOBRIGHTNESS,
.type = V4L2_CTRL_TYPE_BOOLEAN,
@@ -236,14 +215,11 @@ static const struct ctrl sd_ctrls[] = {
.minimum = 0,
.maximum = 1,
.step = 1,
-#define AUTOBRIGHT_DEF 1
- .default_value = AUTOBRIGHT_DEF,
+ .default_value = 1,
},
- .set = sd_setautobrightness,
- .get = sd_getautobrightness,
+ .set_control = setautobright,
},
-#define FREQ_IDX 6
- {
+[FREQ] = {
{
.id = V4L2_CID_POWER_LINE_FREQUENCY,
.type = V4L2_CTRL_TYPE_MENU,
@@ -251,26 +227,9 @@ static const struct ctrl sd_ctrls[] = {
.minimum = 0,
.maximum = 2, /* 0: 0, 1: 50Hz, 2:60Hz */
.step = 1,
-#define FREQ_DEF 0
- .default_value = FREQ_DEF,
- },
- .set = sd_setfreq,
- .get = sd_getfreq,
- },
-#define OV7670_FREQ_IDX 7
- {
- {
- .id = V4L2_CID_POWER_LINE_FREQUENCY,
- .type = V4L2_CTRL_TYPE_MENU,
- .name = "Light frequency filter",
- .minimum = 0,
- .maximum = 3, /* 0: 0, 1: 50Hz, 2:60Hz 3: Auto Hz */
- .step = 1,
-#define OV7670_FREQ_DEF 3
- .default_value = OV7670_FREQ_DEF,
+ .default_value = 0,
},
- .set = sd_setfreq,
- .get = sd_getfreq,
+ .set_control = setfreq,
},
};
@@ -456,10 +415,10 @@ static const struct v4l2_pix_format ovfx2_ov3610_mode[] = {
/* Registers common to OV511 / OV518 */
#define R51x_FIFO_PSIZE 0x30 /* 2 bytes wide w/ OV518(+) */
-#define R51x_SYS_RESET 0x50
+#define R51x_SYS_RESET 0x50
/* Reset type flags */
#define OV511_RESET_OMNICE 0x08
-#define R51x_SYS_INIT 0x53
+#define R51x_SYS_INIT 0x53
#define R51x_SYS_SNAP 0x52
#define R51x_SYS_CUST_ID 0x5F
#define R51x_COMP_LUT_BEGIN 0x80
@@ -644,13 +603,11 @@ struct ov_i2c_regvals {
};
/* Settings for OV2610 camera chip */
-static const struct ov_i2c_regvals norm_2610[] =
-{
+static const struct ov_i2c_regvals norm_2610[] = {
{ 0x12, 0x80 }, /* reset */
};
-static const struct ov_i2c_regvals norm_3620b[] =
-{
+static const struct ov_i2c_regvals norm_3620b[] = {
/*
* From the datasheet: "Note that after writing to register COMH
* (0x12) to change the sensor mode, registers related to the
@@ -1900,7 +1857,7 @@ static int reg_w(struct sd *sd, __u16 index, __u16 value)
sd->gspca_dev.usb_buf, 1, 500);
leave:
if (ret < 0) {
- PDEBUG(D_ERR, "Write reg 0x%04x -> [0x%02x] failed",
+ err("Write reg 0x%04x -> [0x%02x] failed",
value, index);
return ret;
}
@@ -1938,7 +1895,7 @@ static int reg_r(struct sd *sd, __u16 index)
ret = sd->gspca_dev.usb_buf[0];
PDEBUG(D_USBI, "Read reg [0x%02X] -> 0x%04X", index, ret);
} else
- PDEBUG(D_ERR, "Read reg [0x%02x] failed", index);
+ err("Read reg [0x%02x] failed", index);
return ret;
}
@@ -1958,7 +1915,7 @@ static int reg_r8(struct sd *sd,
if (ret >= 0)
ret = sd->gspca_dev.usb_buf[0];
else
- PDEBUG(D_ERR, "Read reg 8 [0x%02x] failed", index);
+ err("Read reg 8 [0x%02x] failed", index);
return ret;
}
@@ -2006,7 +1963,7 @@ static int ov518_reg_w32(struct sd *sd, __u16 index, u32 value, int n)
0, index,
sd->gspca_dev.usb_buf, n, 500);
if (ret < 0) {
- PDEBUG(D_ERR, "Write reg32 [%02x] %08x failed", index, value);
+ err("Write reg32 [%02x] %08x failed", index, value);
return ret;
}
@@ -2203,7 +2160,7 @@ static int ovfx2_i2c_w(struct sd *sd, __u8 reg, __u8 value)
(__u16)value, (__u16)reg, NULL, 0, 500);
if (ret < 0) {
- PDEBUG(D_ERR, "i2c 0x%02x -> [0x%02x] failed", value, reg);
+ err("i2c 0x%02x -> [0x%02x] failed", value, reg);
return ret;
}
@@ -2225,7 +2182,7 @@ static int ovfx2_i2c_r(struct sd *sd, __u8 reg)
ret = sd->gspca_dev.usb_buf[0];
PDEBUG(D_USBI, "i2c [0x%02X] -> 0x%02X", reg, ret);
} else
- PDEBUG(D_ERR, "i2c read [0x%02x] failed", reg);
+ err("i2c read [0x%02x] failed", reg);
return ret;
}
@@ -2481,7 +2438,7 @@ static int ov_hires_configure(struct sd *sd)
int high, low;
if (sd->bridge != BRIDGE_OVFX2) {
- PDEBUG(D_ERR, "error hires sensors only supported with ovfx2");
+ err("error hires sensors only supported with ovfx2");
return -1;
}
@@ -2498,7 +2455,7 @@ static int ov_hires_configure(struct sd *sd)
PDEBUG(D_PROBE, "Sensor is an OV3610");
sd->sensor = SEN_OV3610;
} else {
- PDEBUG(D_ERR, "Error unknown sensor type: 0x%02x%02x",
+ err("Error unknown sensor type: 0x%02x%02x",
high, low);
return -1;
}
@@ -2526,7 +2483,7 @@ static int ov8xx0_configure(struct sd *sd)
if ((rc & 3) == 1) {
sd->sensor = SEN_OV8610;
} else {
- PDEBUG(D_ERR, "Unknown image sensor version: %d", rc & 3);
+ err("Unknown image sensor version: %d", rc & 3);
return -1;
}
@@ -2589,9 +2546,8 @@ static int ov7xx0_configure(struct sd *sd)
if (high == 0x76) {
switch (low) {
case 0x30:
- PDEBUG(D_PROBE, "Sensor is an OV7630/OV7635");
- PDEBUG(D_ERR,
- "7630 is not supported by this driver");
+ err("Sensor is an OV7630/OV7635");
+ err("7630 is not supported by this driver");
return -1;
case 0x40:
PDEBUG(D_PROBE, "Sensor is an OV7645");
@@ -2614,7 +2570,7 @@ static int ov7xx0_configure(struct sd *sd)
sd->sensor = SEN_OV7620;
}
} else {
- PDEBUG(D_ERR, "Unknown image sensor version: %d", rc & 3);
+ err("Unknown image sensor version: %d", rc & 3);
return -1;
}
@@ -2641,9 +2597,8 @@ static int ov6xx0_configure(struct sd *sd)
switch (rc) {
case 0x00:
sd->sensor = SEN_OV6630;
- PDEBUG(D_ERR,
- "WARNING: Sensor is an OV66308. Your camera may have");
- PDEBUG(D_ERR, "been misdetected in previous driver versions.");
+ warn("WARNING: Sensor is an OV66308. Your camera may have");
+ warn("been misdetected in previous driver versions.");
break;
case 0x01:
sd->sensor = SEN_OV6620;
@@ -2659,12 +2614,11 @@ static int ov6xx0_configure(struct sd *sd)
break;
case 0x90:
sd->sensor = SEN_OV6630;
- PDEBUG(D_ERR,
- "WARNING: Sensor is an OV66307. Your camera may have");
- PDEBUG(D_ERR, "been misdetected in previous driver versions.");
+ warn("WARNING: Sensor is an OV66307. Your camera may have");
+ warn("been misdetected in previous driver versions.");
break;
default:
- PDEBUG(D_ERR, "FATAL: Unknown sensor version: 0x%02x", rc);
+ err("FATAL: Unknown sensor version: 0x%02x", rc);
return -1;
}
@@ -2823,7 +2777,7 @@ static int ov511_configure(struct gspca_dev *gspca_dev)
};
const struct ov_regvals norm_511[] = {
- { R511_DRAM_FLOW_CTL, 0x01 },
+ { R511_DRAM_FLOW_CTL, 0x01 },
{ R51x_SYS_SNAP, 0x00 },
{ R51x_SYS_SNAP, 0x02 },
{ R51x_SYS_SNAP, 0x00 },
@@ -2907,7 +2861,7 @@ static int ov518_configure(struct gspca_dev *gspca_dev)
const struct ov_regvals norm_518[] = {
{ R51x_SYS_SNAP, 0x02 }, /* Reset */
{ R51x_SYS_SNAP, 0x01 }, /* Enable */
- { 0x31, 0x0f },
+ { 0x31, 0x0f },
{ 0x5d, 0x03 },
{ 0x24, 0x9f },
{ 0x25, 0x90 },
@@ -2920,7 +2874,7 @@ static int ov518_configure(struct gspca_dev *gspca_dev)
const struct ov_regvals norm_518_p[] = {
{ R51x_SYS_SNAP, 0x02 }, /* Reset */
{ R51x_SYS_SNAP, 0x01 }, /* Enable */
- { 0x31, 0x0f },
+ { 0x31, 0x0f },
{ 0x5d, 0x03 },
{ 0x24, 0x9f },
{ 0x25, 0x90 },
@@ -3082,7 +3036,7 @@ static int sd_config(struct gspca_dev *gspca_dev,
goto error;
}
} else {
- PDEBUG(D_ERR, "Can't determine sensor slave IDs");
+ err("Can't determine sensor slave IDs");
goto error;
}
@@ -3142,36 +3096,23 @@ static int sd_config(struct gspca_dev *gspca_dev,
goto error;
break;
}
- sd->brightness = BRIGHTNESS_DEF;
- if (sd->sensor == SEN_OV6630 || sd->sensor == SEN_OV66308AF)
- sd->contrast = 200; /* The default is too low for the ov6630 */
+ gspca_dev->cam.ctrls = sd->ctrls;
+ if (sd->sensor == SEN_OV7670)
+ gspca_dev->ctrl_dis = 1 << COLORS;
else
- sd->contrast = CONTRAST_DEF;
- sd->colors = COLOR_DEF;
- sd->hflip = HFLIP_DEF;
- sd->vflip = VFLIP_DEF;
- sd->autobrightness = AUTOBRIGHT_DEF;
- if (sd->sensor == SEN_OV7670) {
- sd->freq = OV7670_FREQ_DEF;
- gspca_dev->ctrl_dis = (1 << FREQ_IDX) | (1 << COLOR_IDX);
- } else {
- sd->freq = FREQ_DEF;
- gspca_dev->ctrl_dis = (1 << HFLIP_IDX) | (1 << VFLIP_IDX) |
- (1 << OV7670_FREQ_IDX);
- }
+ gspca_dev->ctrl_dis = (1 << HFLIP) | (1 << VFLIP);
sd->quality = QUALITY_DEF;
if (sd->sensor == SEN_OV7640 ||
sd->sensor == SEN_OV7648)
- gspca_dev->ctrl_dis |= (1 << AUTOBRIGHT_IDX) |
- (1 << CONTRAST_IDX);
+ gspca_dev->ctrl_dis |= (1 << AUTOBRIGHT) | (1 << CONTRAST);
if (sd->sensor == SEN_OV7670)
- gspca_dev->ctrl_dis |= 1 << AUTOBRIGHT_IDX;
+ gspca_dev->ctrl_dis |= 1 << AUTOBRIGHT;
/* OV8610 Frequency filter control should work but needs testing */
if (sd->sensor == SEN_OV8610)
- gspca_dev->ctrl_dis |= 1 << FREQ_IDX;
+ gspca_dev->ctrl_dis |= 1 << FREQ;
/* No controls for the OV2610/OV3610 */
if (sd->sensor == SEN_OV2610 || sd->sensor == SEN_OV3610)
- gspca_dev->ctrl_dis |= 0xFF;
+ gspca_dev->ctrl_dis |= (1 << NCTRL) - 1;
return 0;
error:
@@ -3206,6 +3147,8 @@ static int sd_init(struct gspca_dev *gspca_dev)
break;
case SEN_OV6630:
case SEN_OV66308AF:
+ sd->ctrls[CONTRAST].def = 200;
+ /* The default is too low for the ov6630 */
if (write_i2c_regvals(sd, norm_6x30, ARRAY_SIZE(norm_6x30)))
return -EIO;
break;
@@ -3228,6 +3171,8 @@ static int sd_init(struct gspca_dev *gspca_dev)
return -EIO;
break;
case SEN_OV7670:
+ sd->ctrls[FREQ].max = 3; /* auto */
+ sd->ctrls[FREQ].def = 3;
if (write_i2c_regvals(sd, norm_7670, ARRAY_SIZE(norm_7670)))
return -EIO;
break;
@@ -3253,7 +3198,7 @@ static int ov511_mode_init_regs(struct sd *sd)
intf = usb_ifnum_to_if(sd->gspca_dev.dev, sd->gspca_dev.iface);
alt = usb_altnum_to_altsetting(intf, sd->gspca_dev.alt);
if (!alt) {
- PDEBUG(D_ERR, "Couldn't get altsetting");
+ err("Couldn't get altsetting");
return -EIO;
}
@@ -3377,7 +3322,7 @@ static int ov518_mode_init_regs(struct sd *sd)
intf = usb_ifnum_to_if(sd->gspca_dev.dev, sd->gspca_dev.iface);
alt = usb_altnum_to_altsetting(intf, sd->gspca_dev.alt);
if (!alt) {
- PDEBUG(D_ERR, "Couldn't get altsetting");
+ err("Couldn't get altsetting");
return -EIO;
}
@@ -3706,7 +3651,7 @@ static int mode_init_ov_sensor_regs(struct sd *sd)
break;
case SEN_OV7610:
i2c_w_mask(sd, 0x14, qvga ? 0x20 : 0x00, 0x20);
- i2c_w(sd, 0x35, qvga?0x1e:0x9e);
+ i2c_w(sd, 0x35, qvga ? 0x1e : 0x9e);
i2c_w_mask(sd, 0x13, 0x00, 0x20); /* Select 16 bit data bus */
i2c_w_mask(sd, 0x12, 0x04, 0x06); /* AWB: 1 Test pattern: 0 */
break;
@@ -3798,15 +3743,17 @@ static int mode_init_ov_sensor_regs(struct sd *sd)
return 0;
}
-static void sethvflip(struct sd *sd)
+static void sethvflip(struct gspca_dev *gspca_dev)
{
+ struct sd *sd = (struct sd *) gspca_dev;
+
if (sd->sensor != SEN_OV7670)
return;
if (sd->gspca_dev.streaming)
ov51x_stop(sd);
i2c_w_mask(sd, OV7670_REG_MVFP,
- OV7670_MVFP_MIRROR * sd->hflip
- | OV7670_MVFP_VFLIP * sd->vflip,
+ OV7670_MVFP_MIRROR * sd->ctrls[HFLIP].val
+ | OV7670_MVFP_VFLIP * sd->ctrls[VFLIP].val,
OV7670_MVFP_MIRROR | OV7670_MVFP_VFLIP);
if (sd->gspca_dev.streaming)
ov51x_restart(sd);
@@ -3957,9 +3904,9 @@ static int sd_start(struct gspca_dev *gspca_dev)
setcontrast(gspca_dev);
setbrightness(gspca_dev);
setcolors(gspca_dev);
- sethvflip(sd);
- setautobrightness(sd);
- setfreq(sd);
+ sethvflip(gspca_dev);
+ setautobright(gspca_dev);
+ setfreq_i(sd);
/* Force clear snapshot state in case the snapshot button was
pressed while we weren't streaming */
@@ -4000,7 +3947,7 @@ static void ov51x_handle_button(struct gspca_dev *gspca_dev, u8 state)
struct sd *sd = (struct sd *) gspca_dev;
if (sd->snapshot_pressed != state) {
-#ifdef CONFIG_INPUT
+#if defined(CONFIG_INPUT) || defined(CONFIG_INPUT_MODULE)
input_report_key(gspca_dev->input_dev, KEY_CAMERA, state);
input_sync(gspca_dev->input_dev);
#endif
@@ -4214,7 +4161,7 @@ static void setbrightness(struct gspca_dev *gspca_dev)
struct sd *sd = (struct sd *) gspca_dev;
int val;
- val = sd->brightness;
+ val = sd->ctrls[BRIGHTNESS].val;
switch (sd->sensor) {
case SEN_OV8610:
case SEN_OV7610:
@@ -4229,7 +4176,7 @@ static void setbrightness(struct gspca_dev *gspca_dev)
case SEN_OV7620:
case SEN_OV7620AE:
/* 7620 doesn't like manual changes when in auto mode */
- if (!sd->autobrightness)
+ if (!sd->ctrls[AUTOBRIGHT].val)
i2c_w(sd, OV7610_REG_BRT, val);
break;
case SEN_OV7670:
@@ -4245,7 +4192,7 @@ static void setcontrast(struct gspca_dev *gspca_dev)
struct sd *sd = (struct sd *) gspca_dev;
int val;
- val = sd->contrast;
+ val = sd->ctrls[CONTRAST].val;
switch (sd->sensor) {
case SEN_OV7610:
case SEN_OV6620:
@@ -4287,7 +4234,7 @@ static void setcolors(struct gspca_dev *gspca_dev)
struct sd *sd = (struct sd *) gspca_dev;
int val;
- val = sd->colors;
+ val = sd->ctrls[COLORS].val;
switch (sd->sensor) {
case SEN_OV8610:
case SEN_OV7610:
@@ -4317,23 +4264,25 @@ static void setcolors(struct gspca_dev *gspca_dev)
}
}
-static void setautobrightness(struct sd *sd)
+static void setautobright(struct gspca_dev *gspca_dev)
{
+ struct sd *sd = (struct sd *) gspca_dev;
+
if (sd->sensor == SEN_OV7640 || sd->sensor == SEN_OV7648 ||
sd->sensor == SEN_OV7670 ||
sd->sensor == SEN_OV2610 || sd->sensor == SEN_OV3610)
return;
- i2c_w_mask(sd, 0x2d, sd->autobrightness ? 0x10 : 0x00, 0x10);
+ i2c_w_mask(sd, 0x2d, sd->ctrls[AUTOBRIGHT].val ? 0x10 : 0x00, 0x10);
}
-static void setfreq(struct sd *sd)
+static void setfreq_i(struct sd *sd)
{
if (sd->sensor == SEN_OV2610 || sd->sensor == SEN_OV3610)
return;
if (sd->sensor == SEN_OV7670) {
- switch (sd->freq) {
+ switch (sd->ctrls[FREQ].val) {
case 0: /* Banding filter disabled */
i2c_w_mask(sd, OV7670_REG_COM8, 0, OV7670_COM8_BFILT);
break;
@@ -4355,7 +4304,7 @@ static void setfreq(struct sd *sd)
break;
}
} else {
- switch (sd->freq) {
+ switch (sd->ctrls[FREQ].val) {
case 0: /* Banding filter disabled */
i2c_w_mask(sd, 0x2d, 0x00, 0x04);
i2c_w_mask(sd, 0x2a, 0x00, 0x80);
@@ -4387,135 +4336,15 @@ static void setfreq(struct sd *sd)
}
}
}
-
-static int sd_setbrightness(struct gspca_dev *gspca_dev, __s32 val)
-{
- struct sd *sd = (struct sd *) gspca_dev;
-
- sd->brightness = val;
- if (gspca_dev->streaming)
- setbrightness(gspca_dev);
- return 0;
-}
-
-static int sd_getbrightness(struct gspca_dev *gspca_dev, __s32 *val)
-{
- struct sd *sd = (struct sd *) gspca_dev;
-
- *val = sd->brightness;
- return 0;
-}
-
-static int sd_setcontrast(struct gspca_dev *gspca_dev, __s32 val)
-{
- struct sd *sd = (struct sd *) gspca_dev;
-
- sd->contrast = val;
- if (gspca_dev->streaming)
- setcontrast(gspca_dev);
- return 0;
-}
-
-static int sd_getcontrast(struct gspca_dev *gspca_dev, __s32 *val)
-{
- struct sd *sd = (struct sd *) gspca_dev;
-
- *val = sd->contrast;
- return 0;
-}
-
-static int sd_setcolors(struct gspca_dev *gspca_dev, __s32 val)
-{
- struct sd *sd = (struct sd *) gspca_dev;
-
- sd->colors = val;
- if (gspca_dev->streaming)
- setcolors(gspca_dev);
- return 0;
-}
-
-static int sd_getcolors(struct gspca_dev *gspca_dev, __s32 *val)
-{
- struct sd *sd = (struct sd *) gspca_dev;
-
- *val = sd->colors;
- return 0;
-}
-
-static int sd_sethflip(struct gspca_dev *gspca_dev, __s32 val)
-{
- struct sd *sd = (struct sd *) gspca_dev;
-
- sd->hflip = val;
- if (gspca_dev->streaming)
- sethvflip(sd);
- return 0;
-}
-
-static int sd_gethflip(struct gspca_dev *gspca_dev, __s32 *val)
-{
- struct sd *sd = (struct sd *) gspca_dev;
-
- *val = sd->hflip;
- return 0;
-}
-
-static int sd_setvflip(struct gspca_dev *gspca_dev, __s32 val)
-{
- struct sd *sd = (struct sd *) gspca_dev;
-
- sd->vflip = val;
- if (gspca_dev->streaming)
- sethvflip(sd);
- return 0;
-}
-
-static int sd_getvflip(struct gspca_dev *gspca_dev, __s32 *val)
-{
- struct sd *sd = (struct sd *) gspca_dev;
-
- *val = sd->vflip;
- return 0;
-}
-
-static int sd_setautobrightness(struct gspca_dev *gspca_dev, __s32 val)
-{
- struct sd *sd = (struct sd *) gspca_dev;
-
- sd->autobrightness = val;
- if (gspca_dev->streaming)
- setautobrightness(sd);
- return 0;
-}
-
-static int sd_getautobrightness(struct gspca_dev *gspca_dev, __s32 *val)
-{
- struct sd *sd = (struct sd *) gspca_dev;
-
- *val = sd->autobrightness;
- return 0;
-}
-
-static int sd_setfreq(struct gspca_dev *gspca_dev, __s32 val)
+static void setfreq(struct gspca_dev *gspca_dev)
{
struct sd *sd = (struct sd *) gspca_dev;
- sd->freq = val;
- if (gspca_dev->streaming) {
- setfreq(sd);
- /* Ugly but necessary */
- if (sd->bridge == BRIDGE_W9968CF)
- w9968cf_set_crop_window(sd);
- }
- return 0;
-}
-
-static int sd_getfreq(struct gspca_dev *gspca_dev, __s32 *val)
-{
- struct sd *sd = (struct sd *) gspca_dev;
+ setfreq_i(sd);
- *val = sd->freq;
- return 0;
+ /* Ugly but necessary */
+ if (sd->bridge == BRIDGE_W9968CF)
+ w9968cf_set_crop_window(sd);
}
static int sd_querymenu(struct gspca_dev *gspca_dev,
@@ -4601,7 +4430,7 @@ static const struct sd_desc sd_desc = {
.querymenu = sd_querymenu,
.get_jcomp = sd_get_jcomp,
.set_jcomp = sd_set_jcomp,
-#ifdef CONFIG_INPUT
+#if defined(CONFIG_INPUT) || defined(CONFIG_INPUT_MODULE)
.other_input = 1,
#endif
};
@@ -4663,17 +4492,11 @@ static struct usb_driver sd_driver = {
/* -- module insert / remove -- */
static int __init sd_mod_init(void)
{
- int ret;
- ret = usb_register(&sd_driver);
- if (ret < 0)
- return ret;
- PDEBUG(D_PROBE, "registered");
- return 0;
+ return usb_register(&sd_driver);
}
static void __exit sd_mod_exit(void)
{
usb_deregister(&sd_driver);
- PDEBUG(D_PROBE, "deregistered");
}
module_init(sd_mod_init);
diff --git a/drivers/media/video/gspca/ov534.c b/drivers/media/video/gspca/ov534.c
index 96cb3a976581..88ef03f6235b 100644
--- a/drivers/media/video/gspca/ov534.c
+++ b/drivers/media/video/gspca/ov534.c
@@ -487,7 +487,7 @@ static void ov534_reg_write(struct gspca_dev *gspca_dev, u16 reg, u8 val)
USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
0x00, reg, gspca_dev->usb_buf, 1, CTRL_TIMEOUT);
if (ret < 0)
- PDEBUG(D_ERR, "write failed");
+ err("write failed %d", ret);
}
static u8 ov534_reg_read(struct gspca_dev *gspca_dev, u16 reg)
@@ -502,7 +502,7 @@ static u8 ov534_reg_read(struct gspca_dev *gspca_dev, u16 reg)
0x00, reg, gspca_dev->usb_buf, 1, CTRL_TIMEOUT);
PDEBUG(D_USBI, "reg=0x%04x, data=0x%02x", reg, gspca_dev->usb_buf[0]);
if (ret < 0)
- PDEBUG(D_ERR, "read failed");
+ err("read failed %d", ret);
return gspca_dev->usb_buf[0];
}
@@ -564,7 +564,7 @@ static void sccb_reg_write(struct gspca_dev *gspca_dev, u8 reg, u8 val)
ov534_reg_write(gspca_dev, OV534_REG_OPERATION, OV534_OP_WRITE_3);
if (!sccb_check_status(gspca_dev))
- PDEBUG(D_ERR, "sccb_reg_write failed");
+ err("sccb_reg_write failed");
}
static u8 sccb_reg_read(struct gspca_dev *gspca_dev, u16 reg)
@@ -572,11 +572,11 @@ static u8 sccb_reg_read(struct gspca_dev *gspca_dev, u16 reg)
ov534_reg_write(gspca_dev, OV534_REG_SUBADDR, reg);
ov534_reg_write(gspca_dev, OV534_REG_OPERATION, OV534_OP_WRITE_2);
if (!sccb_check_status(gspca_dev))
- PDEBUG(D_ERR, "sccb_reg_read failed 1");
+ err("sccb_reg_read failed 1");
ov534_reg_write(gspca_dev, OV534_REG_OPERATION, OV534_OP_READ_2);
if (!sccb_check_status(gspca_dev))
- PDEBUG(D_ERR, "sccb_reg_read failed 2");
+ err("sccb_reg_read failed 2");
return ov534_reg_read(gspca_dev, OV534_REG_READ);
}
@@ -1327,19 +1327,12 @@ static struct usb_driver sd_driver = {
/* -- module insert / remove -- */
static int __init sd_mod_init(void)
{
- int ret;
-
- ret = usb_register(&sd_driver);
- if (ret < 0)
- return ret;
- PDEBUG(D_PROBE, "registered");
- return 0;
+ return usb_register(&sd_driver);
}
static void __exit sd_mod_exit(void)
{
usb_deregister(&sd_driver);
- PDEBUG(D_PROBE, "deregistered");
}
module_init(sd_mod_init);
diff --git a/drivers/media/video/gspca/ov534_9.c b/drivers/media/video/gspca/ov534_9.c
index bbe5a030e3b4..e831f0d280ea 100644
--- a/drivers/media/video/gspca/ov534_9.c
+++ b/drivers/media/video/gspca/ov534_9.c
@@ -785,7 +785,7 @@ static void reg_w_i(struct gspca_dev *gspca_dev, u16 reg, u8 val)
USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
0x00, reg, gspca_dev->usb_buf, 1, CTRL_TIMEOUT);
if (ret < 0) {
- PDEBUG(D_ERR, "reg_w failed %d", ret);
+ err("reg_w failed %d", ret);
gspca_dev->usb_err = ret;
}
}
@@ -810,7 +810,7 @@ static u8 reg_r(struct gspca_dev *gspca_dev, u16 reg)
0x00, reg, gspca_dev->usb_buf, 1, CTRL_TIMEOUT);
PDEBUG(D_USBI, "reg_r [%04x] -> %02x", reg, gspca_dev->usb_buf[0]);
if (ret < 0) {
- PDEBUG(D_ERR, "reg_r err %d", ret);
+ err("reg_r err %d", ret);
gspca_dev->usb_err = ret;
}
return gspca_dev->usb_buf[0];
@@ -848,7 +848,7 @@ static void sccb_write(struct gspca_dev *gspca_dev, u8 reg, u8 val)
reg_w_i(gspca_dev, OV534_REG_OPERATION, OV534_OP_WRITE_3);
if (!sccb_check_status(gspca_dev))
- PDEBUG(D_ERR, "sccb_write failed");
+ err("sccb_write failed");
}
static u8 sccb_read(struct gspca_dev *gspca_dev, u16 reg)
@@ -856,11 +856,11 @@ static u8 sccb_read(struct gspca_dev *gspca_dev, u16 reg)
reg_w(gspca_dev, OV534_REG_SUBADDR, reg);
reg_w(gspca_dev, OV534_REG_OPERATION, OV534_OP_WRITE_2);
if (!sccb_check_status(gspca_dev))
- PDEBUG(D_ERR, "sccb_read failed 1");
+ err("sccb_read failed 1");
reg_w(gspca_dev, OV534_REG_OPERATION, OV534_OP_READ_2);
if (!sccb_check_status(gspca_dev))
- PDEBUG(D_ERR, "sccb_read failed 2");
+ err("sccb_read failed 2");
return reg_r(gspca_dev, OV534_REG_READ);
}
@@ -1458,19 +1458,12 @@ static struct usb_driver sd_driver = {
/* -- module insert / remove -- */
static int __init sd_mod_init(void)
{
- int ret;
-
- ret = usb_register(&sd_driver);
- if (ret < 0)
- return ret;
- PDEBUG(D_PROBE, "registered");
- return 0;
+ return usb_register(&sd_driver);
}
static void __exit sd_mod_exit(void)
{
usb_deregister(&sd_driver);
- PDEBUG(D_PROBE, "deregistered");
}
module_init(sd_mod_init);
diff --git a/drivers/media/video/gspca/pac207.c b/drivers/media/video/gspca/pac207.c
index a40f8893310d..15e97fa4c337 100644
--- a/drivers/media/video/gspca/pac207.c
+++ b/drivers/media/video/gspca/pac207.c
@@ -1,7 +1,7 @@
/*
* Pixart PAC207BCA library
*
- * Copyright (C) 2008 Hans de Goede <hdgoede@redhat.com>
+ * Copyright (C) 2008 Hans de Goede <hdegoede@redhat.com>
* Copyright (C) 2005 Thomas Kaiser thomas@kaiser-linux.li
* Copyleft (C) 2005 Michel Xhaard mxhaard@magic.fr
*
@@ -28,7 +28,7 @@
#include <linux/input.h>
#include "gspca.h"
-MODULE_AUTHOR("Hans de Goede <hdgoede@redhat.com>");
+MODULE_AUTHOR("Hans de Goede <hdegoede@redhat.com>");
MODULE_DESCRIPTION("Pixart PAC207");
MODULE_LICENSE("GPL");
@@ -45,7 +45,7 @@ MODULE_LICENSE("GPL");
#define PAC207_GAIN_MIN 0
#define PAC207_GAIN_MAX 31
-#define PAC207_GAIN_DEFAULT 9 /* power on default: 9 */
+#define PAC207_GAIN_DEFAULT 9 /* power on default: 9 */
#define PAC207_GAIN_KNEE 31
#define PAC207_AUTOGAIN_DEADZONE 30
@@ -178,8 +178,7 @@ static int pac207_write_regs(struct gspca_dev *gspca_dev, u16 index,
0x00, index,
gspca_dev->usb_buf, length, PAC207_CTRL_TIMEOUT);
if (err < 0)
- PDEBUG(D_ERR,
- "Failed to write registers to index 0x%04X, error %d)",
+ err("Failed to write registers to index 0x%04X, error %d)",
index, err);
return err;
@@ -195,7 +194,7 @@ static int pac207_write_reg(struct gspca_dev *gspca_dev, u16 index, u16 value)
USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_INTERFACE,
value, index, NULL, 0, PAC207_CTRL_TIMEOUT);
if (err)
- PDEBUG(D_ERR, "Failed to write a register (index 0x%04X,"
+ err("Failed to write a register (index 0x%04X,"
" value 0x%02X, error %d)", index, value, err);
return err;
@@ -211,8 +210,7 @@ static int pac207_read_reg(struct gspca_dev *gspca_dev, u16 index)
0x00, index,
gspca_dev->usb_buf, 1, PAC207_CTRL_TIMEOUT);
if (res < 0) {
- PDEBUG(D_ERR,
- "Failed to read a register (index 0x%04X, error %d)",
+ err("Failed to read a register (index 0x%04X, error %d)",
index, res);
return res;
}
@@ -496,7 +494,7 @@ static int sd_getautogain(struct gspca_dev *gspca_dev, __s32 *val)
return 0;
}
-#ifdef CONFIG_INPUT
+#if defined(CONFIG_INPUT) || defined(CONFIG_INPUT_MODULE)
static int sd_int_pkt_scan(struct gspca_dev *gspca_dev,
u8 *data, /* interrupt packet data */
int len) /* interrput packet length */
@@ -526,7 +524,7 @@ static const struct sd_desc sd_desc = {
.stopN = sd_stopN,
.dq_callback = pac207_do_auto_gain,
.pkt_scan = sd_pkt_scan,
-#ifdef CONFIG_INPUT
+#if defined(CONFIG_INPUT) || defined(CONFIG_INPUT_MODULE)
.int_pkt_scan = sd_int_pkt_scan,
#endif
};
@@ -572,17 +570,11 @@ static struct usb_driver sd_driver = {
/* -- module insert / remove -- */
static int __init sd_mod_init(void)
{
- int ret;
- ret = usb_register(&sd_driver);
- if (ret < 0)
- return ret;
- PDEBUG(D_PROBE, "registered");
- return 0;
+ return usb_register(&sd_driver);
}
static void __exit sd_mod_exit(void)
{
usb_deregister(&sd_driver);
- PDEBUG(D_PROBE, "deregistered");
}
module_init(sd_mod_init);
diff --git a/drivers/media/video/gspca/pac7302.c b/drivers/media/video/gspca/pac7302.c
index a66df07d7625..55fbea7381b0 100644
--- a/drivers/media/video/gspca/pac7302.c
+++ b/drivers/media/video/gspca/pac7302.c
@@ -408,9 +408,8 @@ static void reg_w_buf(struct gspca_dev *gspca_dev,
index, gspca_dev->usb_buf, len,
500);
if (ret < 0) {
- PDEBUG(D_ERR, "reg_w_buf(): "
- "Failed to write registers to index 0x%x, error %i",
- index, ret);
+ err("reg_w_buf failed index 0x%02x, error %d",
+ index, ret);
gspca_dev->usb_err = ret;
}
}
@@ -432,9 +431,8 @@ static void reg_w(struct gspca_dev *gspca_dev,
0, index, gspca_dev->usb_buf, 1,
500);
if (ret < 0) {
- PDEBUG(D_ERR, "reg_w(): "
- "Failed to write register to index 0x%x, value 0x%x, error %i",
- index, value, ret);
+ err("reg_w() failed index 0x%02x, value 0x%02x, error %d",
+ index, value, ret);
gspca_dev->usb_err = ret;
}
}
@@ -468,10 +466,9 @@ static void reg_w_page(struct gspca_dev *gspca_dev,
0, index, gspca_dev->usb_buf, 1,
500);
if (ret < 0) {
- PDEBUG(D_ERR, "reg_w_page(): "
- "Failed to write register to index 0x%x, "
- "value 0x%x, error %i",
- index, page[index], ret);
+ err("reg_w_page() failed index 0x%02x, "
+ "value 0x%02x, error %d",
+ index, page[index], ret);
gspca_dev->usb_err = ret;
break;
}
@@ -900,9 +897,8 @@ static int sd_setcontrast(struct gspca_dev *gspca_dev, __s32 val)
struct sd *sd = (struct sd *) gspca_dev;
sd->contrast = val;
- if (gspca_dev->streaming) {
+ if (gspca_dev->streaming)
setbrightcont(gspca_dev);
- }
return gspca_dev->usb_err;
}
@@ -1135,7 +1131,7 @@ static int sd_chip_ident(struct gspca_dev *gspca_dev,
}
#endif
-#ifdef CONFIG_INPUT
+#if defined(CONFIG_INPUT) || defined(CONFIG_INPUT_MODULE)
static int sd_int_pkt_scan(struct gspca_dev *gspca_dev,
u8 *data, /* interrupt packet data */
int len) /* interrput packet length */
@@ -1182,7 +1178,7 @@ static const struct sd_desc sd_desc = {
.set_register = sd_dbg_s_register,
.get_chip_ident = sd_chip_ident,
#endif
-#ifdef CONFIG_INPUT
+#if defined(CONFIG_INPUT) || defined(CONFIG_INPUT_MODULE)
.int_pkt_scan = sd_int_pkt_scan,
#endif
};
@@ -1226,17 +1222,11 @@ static struct usb_driver sd_driver = {
/* -- module insert / remove -- */
static int __init sd_mod_init(void)
{
- int ret;
- ret = usb_register(&sd_driver);
- if (ret < 0)
- return ret;
- PDEBUG(D_PROBE, "registered");
- return 0;
+ return usb_register(&sd_driver);
}
static void __exit sd_mod_exit(void)
{
usb_deregister(&sd_driver);
- PDEBUG(D_PROBE, "deregistered");
}
module_init(sd_mod_init);
diff --git a/drivers/media/video/gspca/pac7311.c b/drivers/media/video/gspca/pac7311.c
index 1cb7e99e92bd..7657b43b3203 100644
--- a/drivers/media/video/gspca/pac7311.c
+++ b/drivers/media/video/gspca/pac7311.c
@@ -276,9 +276,8 @@ static void reg_w_buf(struct gspca_dev *gspca_dev,
index, gspca_dev->usb_buf, len,
500);
if (ret < 0) {
- PDEBUG(D_ERR, "reg_w_buf(): "
- "Failed to write registers to index 0x%x, error %i",
- index, ret);
+ err("reg_w_buf() failed index 0x%02x, error %d",
+ index, ret);
gspca_dev->usb_err = ret;
}
}
@@ -300,9 +299,8 @@ static void reg_w(struct gspca_dev *gspca_dev,
0, index, gspca_dev->usb_buf, 1,
500);
if (ret < 0) {
- PDEBUG(D_ERR, "reg_w(): "
- "Failed to write register to index 0x%x, value 0x%x, error %i",
- index, value, ret);
+ err("reg_w() failed index 0x%02x, value 0x%02x, error %d",
+ index, value, ret);
gspca_dev->usb_err = ret;
}
}
@@ -336,10 +334,9 @@ static void reg_w_page(struct gspca_dev *gspca_dev,
0, index, gspca_dev->usb_buf, 1,
500);
if (ret < 0) {
- PDEBUG(D_ERR, "reg_w_page(): "
- "Failed to write register to index 0x%x, "
- "value 0x%x, error %i",
- index, page[index], ret);
+ err("reg_w_page() failed index 0x%02x, "
+ "value 0x%02x, error %d",
+ index, page[index], ret);
gspca_dev->usb_err = ret;
break;
}
@@ -675,9 +672,8 @@ static int sd_setcontrast(struct gspca_dev *gspca_dev, __s32 val)
struct sd *sd = (struct sd *) gspca_dev;
sd->contrast = val;
- if (gspca_dev->streaming) {
+ if (gspca_dev->streaming)
setcontrast(gspca_dev);
- }
return gspca_dev->usb_err;
}
@@ -792,7 +788,7 @@ static int sd_getvflip(struct gspca_dev *gspca_dev, __s32 *val)
return 0;
}
-#ifdef CONFIG_INPUT
+#if defined(CONFIG_INPUT) || defined(CONFIG_INPUT_MODULE)
static int sd_int_pkt_scan(struct gspca_dev *gspca_dev,
u8 *data, /* interrupt packet data */
int len) /* interrupt packet length */
@@ -835,7 +831,7 @@ static const struct sd_desc sd_desc = {
.stop0 = sd_stop0,
.pkt_scan = sd_pkt_scan,
.dq_callback = do_autogain,
-#ifdef CONFIG_INPUT
+#if defined(CONFIG_INPUT) || defined(CONFIG_INPUT_MODULE)
.int_pkt_scan = sd_int_pkt_scan,
#endif
};
@@ -874,17 +870,11 @@ static struct usb_driver sd_driver = {
/* -- module insert / remove -- */
static int __init sd_mod_init(void)
{
- int ret;
- ret = usb_register(&sd_driver);
- if (ret < 0)
- return ret;
- PDEBUG(D_PROBE, "registered");
- return 0;
+ return usb_register(&sd_driver);
}
static void __exit sd_mod_exit(void)
{
usb_deregister(&sd_driver);
- PDEBUG(D_PROBE, "deregistered");
}
module_init(sd_mod_init);
diff --git a/drivers/media/video/gspca/sn9c2028.c b/drivers/media/video/gspca/sn9c2028.c
index 71d9447a7986..40a06680502d 100644
--- a/drivers/media/video/gspca/sn9c2028.c
+++ b/drivers/media/video/gspca/sn9c2028.c
@@ -75,7 +75,7 @@ static int sn9c2028_command(struct gspca_dev *gspca_dev, u8 *command)
USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_INTERFACE,
2, 0, gspca_dev->usb_buf, 6, 500);
if (rc < 0) {
- PDEBUG(D_ERR, "command write [%02x] error %d",
+ err("command write [%02x] error %d",
gspca_dev->usb_buf[0], rc);
return rc;
}
@@ -93,7 +93,7 @@ static int sn9c2028_read1(struct gspca_dev *gspca_dev)
USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_INTERFACE,
1, 0, gspca_dev->usb_buf, 1, 500);
if (rc != 1) {
- PDEBUG(D_ERR, "read1 error %d", rc);
+ err("read1 error %d", rc);
return (rc < 0) ? rc : -EIO;
}
PDEBUG(D_USBI, "read1 response %02x", gspca_dev->usb_buf[0]);
@@ -109,7 +109,7 @@ static int sn9c2028_read4(struct gspca_dev *gspca_dev, u8 *reading)
USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_INTERFACE,
4, 0, gspca_dev->usb_buf, 4, 500);
if (rc != 4) {
- PDEBUG(D_ERR, "read4 error %d", rc);
+ err("read4 error %d", rc);
return (rc < 0) ? rc : -EIO;
}
memcpy(reading, gspca_dev->usb_buf, 4);
@@ -131,7 +131,7 @@ static int sn9c2028_long_command(struct gspca_dev *gspca_dev, u8 *command)
for (i = 0; i < 256 && status < 2; i++)
status = sn9c2028_read1(gspca_dev);
if (status != 2) {
- PDEBUG(D_ERR, "long command status read error %d", status);
+ err("long command status read error %d", status);
return (status < 0) ? status : -EIO;
}
@@ -638,7 +638,7 @@ static int sd_start(struct gspca_dev *gspca_dev)
err_code = start_vivitar_cam(gspca_dev);
break;
default:
- PDEBUG(D_ERR, "Starting unknown camera, please report this");
+ err("Starting unknown camera, please report this");
return -ENXIO;
}
@@ -738,19 +738,12 @@ static struct usb_driver sd_driver = {
/* -- module insert / remove -- */
static int __init sd_mod_init(void)
{
- int ret;
-
- ret = usb_register(&sd_driver);
- if (ret < 0)
- return ret;
- PDEBUG(D_PROBE, "registered");
- return 0;
+ return usb_register(&sd_driver);
}
static void __exit sd_mod_exit(void)
{
usb_deregister(&sd_driver);
- PDEBUG(D_PROBE, "deregistered");
}
module_init(sd_mod_init);
diff --git a/drivers/media/video/gspca/sn9c20x.c b/drivers/media/video/gspca/sn9c20x.c
index 9052d5702556..6b155ae3a746 100644
--- a/drivers/media/video/gspca/sn9c20x.c
+++ b/drivers/media/video/gspca/sn9c20x.c
@@ -18,9 +18,7 @@
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
-#ifdef CONFIG_INPUT
#include <linux/input.h>
-#endif
#include "gspca.h"
#include "jpeg.h"
@@ -347,8 +345,8 @@ static const struct ctrl sd_ctrls[] = {
static const struct v4l2_pix_format vga_mode[] = {
{160, 120, V4L2_PIX_FMT_JPEG, V4L2_FIELD_NONE,
- .bytesperline = 240,
- .sizeimage = 240 * 120,
+ .bytesperline = 160,
+ .sizeimage = 160 * 120 * 4 / 8 + 590,
.colorspace = V4L2_COLORSPACE_JPEG,
.priv = 0 | MODE_JPEG},
{160, 120, V4L2_PIX_FMT_SBGGR8, V4L2_FIELD_NONE,
@@ -357,13 +355,13 @@ static const struct v4l2_pix_format vga_mode[] = {
.colorspace = V4L2_COLORSPACE_SRGB,
.priv = 0 | MODE_RAW},
{160, 120, V4L2_PIX_FMT_SN9C20X_I420, V4L2_FIELD_NONE,
- .bytesperline = 240,
+ .bytesperline = 160,
.sizeimage = 240 * 120,
.colorspace = V4L2_COLORSPACE_SRGB,
.priv = 0},
{320, 240, V4L2_PIX_FMT_JPEG, V4L2_FIELD_NONE,
- .bytesperline = 480,
- .sizeimage = 480 * 240 ,
+ .bytesperline = 320,
+ .sizeimage = 320 * 240 * 3 / 8 + 590,
.colorspace = V4L2_COLORSPACE_JPEG,
.priv = 1 | MODE_JPEG},
{320, 240, V4L2_PIX_FMT_SBGGR8, V4L2_FIELD_NONE,
@@ -372,13 +370,13 @@ static const struct v4l2_pix_format vga_mode[] = {
.colorspace = V4L2_COLORSPACE_SRGB,
.priv = 1 | MODE_RAW},
{320, 240, V4L2_PIX_FMT_SN9C20X_I420, V4L2_FIELD_NONE,
- .bytesperline = 480,
+ .bytesperline = 320,
.sizeimage = 480 * 240 ,
.colorspace = V4L2_COLORSPACE_SRGB,
.priv = 1},
{640, 480, V4L2_PIX_FMT_JPEG, V4L2_FIELD_NONE,
- .bytesperline = 960,
- .sizeimage = 960 * 480,
+ .bytesperline = 640,
+ .sizeimage = 640 * 480 * 3 / 8 + 590,
.colorspace = V4L2_COLORSPACE_JPEG,
.priv = 2 | MODE_JPEG},
{640, 480, V4L2_PIX_FMT_SBGGR8, V4L2_FIELD_NONE,
@@ -387,7 +385,7 @@ static const struct v4l2_pix_format vga_mode[] = {
.colorspace = V4L2_COLORSPACE_SRGB,
.priv = 2 | MODE_RAW},
{640, 480, V4L2_PIX_FMT_SN9C20X_I420, V4L2_FIELD_NONE,
- .bytesperline = 960,
+ .bytesperline = 640,
.sizeimage = 960 * 480,
.colorspace = V4L2_COLORSPACE_SRGB,
.priv = 2},
@@ -395,8 +393,8 @@ static const struct v4l2_pix_format vga_mode[] = {
static const struct v4l2_pix_format sxga_mode[] = {
{160, 120, V4L2_PIX_FMT_JPEG, V4L2_FIELD_NONE,
- .bytesperline = 240,
- .sizeimage = 240 * 120,
+ .bytesperline = 160,
+ .sizeimage = 160 * 120 * 4 / 8 + 590,
.colorspace = V4L2_COLORSPACE_JPEG,
.priv = 0 | MODE_JPEG},
{160, 120, V4L2_PIX_FMT_SBGGR8, V4L2_FIELD_NONE,
@@ -405,13 +403,13 @@ static const struct v4l2_pix_format sxga_mode[] = {
.colorspace = V4L2_COLORSPACE_SRGB,
.priv = 0 | MODE_RAW},
{160, 120, V4L2_PIX_FMT_SN9C20X_I420, V4L2_FIELD_NONE,
- .bytesperline = 240,
+ .bytesperline = 160,
.sizeimage = 240 * 120,
.colorspace = V4L2_COLORSPACE_SRGB,
.priv = 0},
{320, 240, V4L2_PIX_FMT_JPEG, V4L2_FIELD_NONE,
- .bytesperline = 480,
- .sizeimage = 480 * 240 ,
+ .bytesperline = 320,
+ .sizeimage = 320 * 240 * 3 / 8 + 590,
.colorspace = V4L2_COLORSPACE_JPEG,
.priv = 1 | MODE_JPEG},
{320, 240, V4L2_PIX_FMT_SBGGR8, V4L2_FIELD_NONE,
@@ -420,13 +418,13 @@ static const struct v4l2_pix_format sxga_mode[] = {
.colorspace = V4L2_COLORSPACE_SRGB,
.priv = 1 | MODE_RAW},
{320, 240, V4L2_PIX_FMT_SN9C20X_I420, V4L2_FIELD_NONE,
- .bytesperline = 480,
+ .bytesperline = 320,
.sizeimage = 480 * 240 ,
.colorspace = V4L2_COLORSPACE_SRGB,
.priv = 1},
{640, 480, V4L2_PIX_FMT_JPEG, V4L2_FIELD_NONE,
- .bytesperline = 960,
- .sizeimage = 960 * 480,
+ .bytesperline = 640,
+ .sizeimage = 640 * 480 * 3 / 8 + 590,
.colorspace = V4L2_COLORSPACE_JPEG,
.priv = 2 | MODE_JPEG},
{640, 480, V4L2_PIX_FMT_SBGGR8, V4L2_FIELD_NONE,
@@ -435,13 +433,13 @@ static const struct v4l2_pix_format sxga_mode[] = {
.colorspace = V4L2_COLORSPACE_SRGB,
.priv = 2 | MODE_RAW},
{640, 480, V4L2_PIX_FMT_SN9C20X_I420, V4L2_FIELD_NONE,
- .bytesperline = 960,
+ .bytesperline = 640,
.sizeimage = 960 * 480,
.colorspace = V4L2_COLORSPACE_SRGB,
.priv = 2},
{1280, 1024, V4L2_PIX_FMT_SBGGR8, V4L2_FIELD_NONE,
.bytesperline = 1280,
- .sizeimage = (1280 * 1024) + 64,
+ .sizeimage = 1280 * 1024,
.colorspace = V4L2_COLORSPACE_SRGB,
.priv = 3 | MODE_RAW | MODE_SXGA},
};
@@ -1272,7 +1270,8 @@ static int soi968_init_sensor(struct gspca_dev *gspca_dev)
}
}
/* disable hflip and vflip */
- gspca_dev->ctrl_dis = (1 << HFLIP_IDX) | (1 << VFLIP_IDX) | (1 << EXPOSURE_IDX);
+ gspca_dev->ctrl_dis = (1 << HFLIP_IDX) | (1 << VFLIP_IDX)
+ | (1 << EXPOSURE_IDX);
sd->hstart = 60;
sd->vstart = 11;
return 0;
@@ -1351,7 +1350,9 @@ static int mt9v_init_sensor(struct gspca_dev *gspca_dev)
return -ENODEV;
}
}
- gspca_dev->ctrl_dis = (1 << EXPOSURE_IDX) | (1 << AUTOGAIN_IDX) | (1 << GAIN_IDX);
+ gspca_dev->ctrl_dis = (1 << EXPOSURE_IDX)
+ | (1 << AUTOGAIN_IDX)
+ | (1 << GAIN_IDX);
sd->hstart = 2;
sd->vstart = 2;
sd->sensor = SENSOR_MT9V111;
@@ -1395,7 +1396,8 @@ static int mt9m112_init_sensor(struct gspca_dev *gspca_dev)
return -ENODEV;
}
}
- gspca_dev->ctrl_dis = (1 << EXPOSURE_IDX) | (1 << AUTOGAIN_IDX) | (1 << GAIN_IDX);
+ gspca_dev->ctrl_dis = (1 << EXPOSURE_IDX) | (1 << AUTOGAIN_IDX)
+ | (1 << GAIN_IDX);
sd->hstart = 0;
sd->vstart = 2;
return 0;
@@ -1412,7 +1414,8 @@ static int mt9m111_init_sensor(struct gspca_dev *gspca_dev)
return -ENODEV;
}
}
- gspca_dev->ctrl_dis = (1 << EXPOSURE_IDX) | (1 << AUTOGAIN_IDX) | (1 << GAIN_IDX);
+ gspca_dev->ctrl_dis = (1 << EXPOSURE_IDX) | (1 << AUTOGAIN_IDX)
+ | (1 << GAIN_IDX);
sd->hstart = 0;
sd->vstart = 2;
return 0;
@@ -2304,7 +2307,7 @@ static void sd_dqcallback(struct gspca_dev *gspca_dev)
do_autoexposure(gspca_dev, avg_lum);
}
-#ifdef CONFIG_INPUT
+#if defined(CONFIG_INPUT) || defined(CONFIG_INPUT_MODULE)
static int sd_int_pkt_scan(struct gspca_dev *gspca_dev,
u8 *data, /* interrupt packet */
int len) /* interrupt packet length */
@@ -2386,7 +2389,7 @@ static const struct sd_desc sd_desc = {
.start = sd_start,
.stopN = sd_stopN,
.pkt_scan = sd_pkt_scan,
-#ifdef CONFIG_INPUT
+#if defined(CONFIG_INPUT) || defined(CONFIG_INPUT_MODULE)
.int_pkt_scan = sd_int_pkt_scan,
#endif
.dq_callback = sd_dqcallback,
@@ -2467,17 +2470,11 @@ static struct usb_driver sd_driver = {
/* -- module insert / remove -- */
static int __init sd_mod_init(void)
{
- int ret;
- ret = usb_register(&sd_driver);
- if (ret < 0)
- return ret;
- info("registered");
- return 0;
+ return usb_register(&sd_driver);
}
static void __exit sd_mod_exit(void)
{
usb_deregister(&sd_driver);
- info("deregistered");
}
module_init(sd_mod_init);
diff --git a/drivers/media/video/gspca/sonixb.c b/drivers/media/video/gspca/sonixb.c
index 204bb3af4559..706f96f92654 100644
--- a/drivers/media/video/gspca/sonixb.c
+++ b/drivers/media/video/gspca/sonixb.c
@@ -323,10 +323,9 @@ static const __u8 initOv6650[] = {
0x00, 0x01, 0x01, 0x0a, 0x16, 0x12, 0x68, 0x8b,
0x10, 0x1d, 0x10, 0x02, 0x02, 0x09, 0x07
};
-static const __u8 ov6650_sensor_init[][8] =
-{
+static const __u8 ov6650_sensor_init[][8] = {
/* Bright, contrast, etc are set through SCBB interface.
- * AVCAP on win2 do not send any data on this controls. */
+ * AVCAP on win2 do not send any data on this controls. */
/* Anyway, some registers appears to alter bright and constrat */
/* Reset sensor */
@@ -544,7 +543,7 @@ static const __u8 initTas5130[] = {
0x18, 0x10, 0x04, 0x03, 0x11, 0x0c
};
static const __u8 tas5130_sensor_init[][8] = {
-/* {0x30, 0x11, 0x00, 0x40, 0x47, 0x00, 0x00, 0x10},
+/* {0x30, 0x11, 0x00, 0x40, 0x47, 0x00, 0x00, 0x10},
* shutter 0x47 short exposure? */
{0x30, 0x11, 0x00, 0x40, 0x01, 0x00, 0x00, 0x10},
/* shutter 0x01 long exposure */
@@ -861,7 +860,7 @@ static void setexposure(struct gspca_dev *gspca_dev)
i2c[4] |= reg11 - 1;
/* If register 11 didn't change, don't change it */
- if (sd->reg11 == reg11 )
+ if (sd->reg11 == reg11)
i2c[0] = 0xa0;
if (i2c_w(gspca_dev, i2c) == 0)
@@ -1388,7 +1387,7 @@ static int sd_querymenu(struct gspca_dev *gspca_dev,
return -EINVAL;
}
-#ifdef CONFIG_INPUT
+#if defined(CONFIG_INPUT) || defined(CONFIG_INPUT_MODULE)
static int sd_int_pkt_scan(struct gspca_dev *gspca_dev,
u8 *data, /* interrupt packet data */
int len) /* interrupt packet length */
@@ -1419,7 +1418,7 @@ static const struct sd_desc sd_desc = {
.pkt_scan = sd_pkt_scan,
.querymenu = sd_querymenu,
.dq_callback = do_autogain,
-#ifdef CONFIG_INPUT
+#if defined(CONFIG_INPUT) || defined(CONFIG_INPUT_MODULE)
.int_pkt_scan = sd_int_pkt_scan,
#endif
};
@@ -1479,17 +1478,11 @@ static struct usb_driver sd_driver = {
/* -- module insert / remove -- */
static int __init sd_mod_init(void)
{
- int ret;
- ret = usb_register(&sd_driver);
- if (ret < 0)
- return ret;
- PDEBUG(D_PROBE, "registered");
- return 0;
+ return usb_register(&sd_driver);
}
static void __exit sd_mod_exit(void)
{
usb_deregister(&sd_driver);
- PDEBUG(D_PROBE, "deregistered");
}
module_init(sd_mod_init);
diff --git a/drivers/media/video/gspca/sonixj.c b/drivers/media/video/gspca/sonixj.c
index 370544361be2..330dadc00106 100644
--- a/drivers/media/video/gspca/sonixj.c
+++ b/drivers/media/video/gspca/sonixj.c
@@ -31,24 +31,32 @@ MODULE_AUTHOR("Jean-François Moine <http://moinejf.free.fr>");
MODULE_DESCRIPTION("GSPCA/SONIX JPEG USB Camera Driver");
MODULE_LICENSE("GPL");
+/* controls */
+enum e_ctrl {
+ BRIGHTNESS,
+ CONTRAST,
+ COLORS,
+ BLUE,
+ RED,
+ GAMMA,
+ AUTOGAIN,
+ HFLIP,
+ VFLIP,
+ SHARPNESS,
+ INFRARED,
+ FREQ,
+ NCTRLS /* number of controls */
+};
+
/* specific webcam descriptor */
struct sd {
struct gspca_dev gspca_dev; /* !! must be the first item */
+ struct gspca_ctrl ctrls[NCTRLS];
+
atomic_t avg_lum;
u32 exposure;
- u16 brightness;
- u8 contrast;
- u8 colors;
- u8 autogain;
- u8 blue;
- u8 red;
- u8 gamma;
- u8 vflip; /* ov7630/ov7648 only */
- u8 sharpness;
- u8 infrared; /* mt9v111 only */
- u8 freq; /* ov76xx only */
u8 quality; /* image quality */
#define QUALITY_MIN 60
#define QUALITY_MAX 95
@@ -75,6 +83,7 @@ enum sensors {
SENSOR_GC0307,
SENSOR_HV7131R,
SENSOR_MI0360,
+ SENSOR_MI0360B,
SENSOR_MO4000,
SENSOR_MT9V111,
SENSOR_OM6802,
@@ -88,48 +97,31 @@ enum sensors {
};
/* V4L2 controls supported by the driver */
-static int sd_setbrightness(struct gspca_dev *gspca_dev, __s32 val);
-static int sd_getbrightness(struct gspca_dev *gspca_dev, __s32 *val);
-static int sd_setcontrast(struct gspca_dev *gspca_dev, __s32 val);
-static int sd_getcontrast(struct gspca_dev *gspca_dev, __s32 *val);
-static int sd_setcolors(struct gspca_dev *gspca_dev, __s32 val);
-static int sd_getcolors(struct gspca_dev *gspca_dev, __s32 *val);
-static int sd_setblue_balance(struct gspca_dev *gspca_dev, __s32 val);
-static int sd_getblue_balance(struct gspca_dev *gspca_dev, __s32 *val);
-static int sd_setred_balance(struct gspca_dev *gspca_dev, __s32 val);
-static int sd_getred_balance(struct gspca_dev *gspca_dev, __s32 *val);
-static int sd_setgamma(struct gspca_dev *gspca_dev, __s32 val);
-static int sd_getgamma(struct gspca_dev *gspca_dev, __s32 *val);
-static int sd_setautogain(struct gspca_dev *gspca_dev, __s32 val);
-static int sd_getautogain(struct gspca_dev *gspca_dev, __s32 *val);
-static int sd_setvflip(struct gspca_dev *gspca_dev, __s32 val);
-static int sd_getvflip(struct gspca_dev *gspca_dev, __s32 *val);
-static int sd_setsharpness(struct gspca_dev *gspca_dev, __s32 val);
-static int sd_getsharpness(struct gspca_dev *gspca_dev, __s32 *val);
-static int sd_setinfrared(struct gspca_dev *gspca_dev, __s32 val);
-static int sd_getinfrared(struct gspca_dev *gspca_dev, __s32 *val);
-static int sd_setfreq(struct gspca_dev *gspca_dev, __s32 val);
-static int sd_getfreq(struct gspca_dev *gspca_dev, __s32 *val);
-
-static const struct ctrl sd_ctrls[] = {
-#define BRIGHTNESS_IDX 0
- {
+static void setbrightness(struct gspca_dev *gspca_dev);
+static void setcontrast(struct gspca_dev *gspca_dev);
+static void setcolors(struct gspca_dev *gspca_dev);
+static void setredblue(struct gspca_dev *gspca_dev);
+static void setgamma(struct gspca_dev *gspca_dev);
+static void setautogain(struct gspca_dev *gspca_dev);
+static void sethvflip(struct gspca_dev *gspca_dev);
+static void setsharpness(struct gspca_dev *gspca_dev);
+static void setinfrared(struct gspca_dev *gspca_dev);
+static void setfreq(struct gspca_dev *gspca_dev);
+
+static const struct ctrl sd_ctrls[NCTRLS] = {
+[BRIGHTNESS] = {
{
.id = V4L2_CID_BRIGHTNESS,
.type = V4L2_CTRL_TYPE_INTEGER,
.name = "Brightness",
.minimum = 0,
-#define BRIGHTNESS_MAX 0xffff
- .maximum = BRIGHTNESS_MAX,
+ .maximum = 0xff,
.step = 1,
-#define BRIGHTNESS_DEF 0x8000
- .default_value = BRIGHTNESS_DEF,
+ .default_value = 0x80,
},
- .set = sd_setbrightness,
- .get = sd_getbrightness,
+ .set_control = setbrightness
},
-#define CONTRAST_IDX 1
- {
+[CONTRAST] = {
{
.id = V4L2_CID_CONTRAST,
.type = V4L2_CTRL_TYPE_INTEGER,
@@ -138,14 +130,11 @@ static const struct ctrl sd_ctrls[] = {
#define CONTRAST_MAX 127
.maximum = CONTRAST_MAX,
.step = 1,
-#define CONTRAST_DEF 63
- .default_value = CONTRAST_DEF,
+ .default_value = 63,
},
- .set = sd_setcontrast,
- .get = sd_getcontrast,
+ .set_control = setcontrast
},
-#define COLOR_IDX 2
- {
+[COLORS] = {
{
.id = V4L2_CID_SATURATION,
.type = V4L2_CTRL_TYPE_INTEGER,
@@ -153,14 +142,12 @@ static const struct ctrl sd_ctrls[] = {
.minimum = 0,
.maximum = 40,
.step = 1,
-#define COLOR_DEF 25
- .default_value = COLOR_DEF,
+#define COLORS_DEF 25
+ .default_value = COLORS_DEF,
},
- .set = sd_setcolors,
- .get = sd_getcolors,
+ .set_control = setcolors
},
-#define BLUE_BALANCE_IDX 3
- {
+[BLUE] = {
{
.id = V4L2_CID_BLUE_BALANCE,
.type = V4L2_CTRL_TYPE_INTEGER,
@@ -168,14 +155,11 @@ static const struct ctrl sd_ctrls[] = {
.minimum = 24,
.maximum = 40,
.step = 1,
-#define BLUE_BALANCE_DEF 32
- .default_value = BLUE_BALANCE_DEF,
+ .default_value = 32,
},
- .set = sd_setblue_balance,
- .get = sd_getblue_balance,
+ .set_control = setredblue
},
-#define RED_BALANCE_IDX 4
- {
+[RED] = {
{
.id = V4L2_CID_RED_BALANCE,
.type = V4L2_CTRL_TYPE_INTEGER,
@@ -183,14 +167,11 @@ static const struct ctrl sd_ctrls[] = {
.minimum = 24,
.maximum = 40,
.step = 1,
-#define RED_BALANCE_DEF 32
- .default_value = RED_BALANCE_DEF,
+ .default_value = 32,
},
- .set = sd_setred_balance,
- .get = sd_getred_balance,
+ .set_control = setredblue
},
-#define GAMMA_IDX 5
- {
+[GAMMA] = {
{
.id = V4L2_CID_GAMMA,
.type = V4L2_CTRL_TYPE_INTEGER,
@@ -201,11 +182,9 @@ static const struct ctrl sd_ctrls[] = {
#define GAMMA_DEF 20
.default_value = GAMMA_DEF,
},
- .set = sd_setgamma,
- .get = sd_getgamma,
+ .set_control = setgamma
},
-#define AUTOGAIN_IDX 6
- {
+[AUTOGAIN] = {
{
.id = V4L2_CID_AUTOGAIN,
.type = V4L2_CTRL_TYPE_BOOLEAN,
@@ -213,15 +192,23 @@ static const struct ctrl sd_ctrls[] = {
.minimum = 0,
.maximum = 1,
.step = 1,
-#define AUTOGAIN_DEF 1
- .default_value = AUTOGAIN_DEF,
+ .default_value = 1
+ },
+ .set_control = setautogain
+ },
+[HFLIP] = {
+ {
+ .id = V4L2_CID_HFLIP,
+ .type = V4L2_CTRL_TYPE_BOOLEAN,
+ .name = "Mirror",
+ .minimum = 0,
+ .maximum = 1,
+ .step = 1,
+ .default_value = 0,
},
- .set = sd_setautogain,
- .get = sd_getautogain,
+ .set_control = sethvflip
},
-/* ov7630/ov7648 only */
-#define VFLIP_IDX 7
- {
+[VFLIP] = {
{
.id = V4L2_CID_VFLIP,
.type = V4L2_CTRL_TYPE_BOOLEAN,
@@ -229,14 +216,11 @@ static const struct ctrl sd_ctrls[] = {
.minimum = 0,
.maximum = 1,
.step = 1,
-#define VFLIP_DEF 0
- .default_value = VFLIP_DEF,
+ .default_value = 0,
},
- .set = sd_setvflip,
- .get = sd_getvflip,
+ .set_control = sethvflip
},
-#define SHARPNESS_IDX 8
- {
+[SHARPNESS] = {
{
.id = V4L2_CID_SHARPNESS,
.type = V4L2_CTRL_TYPE_INTEGER,
@@ -244,15 +228,12 @@ static const struct ctrl sd_ctrls[] = {
.minimum = 0,
.maximum = 255,
.step = 1,
-#define SHARPNESS_DEF 90
- .default_value = SHARPNESS_DEF,
+ .default_value = 90,
},
- .set = sd_setsharpness,
- .get = sd_getsharpness,
+ .set_control = setsharpness
},
/* mt9v111 only */
-#define INFRARED_IDX 9
- {
+[INFRARED] = {
{
.id = V4L2_CID_INFRARED,
.type = V4L2_CTRL_TYPE_BOOLEAN,
@@ -260,15 +241,12 @@ static const struct ctrl sd_ctrls[] = {
.minimum = 0,
.maximum = 1,
.step = 1,
-#define INFRARED_DEF 0
- .default_value = INFRARED_DEF,
+ .default_value = 0,
},
- .set = sd_setinfrared,
- .get = sd_getinfrared,
+ .set_control = setinfrared
},
/* ov7630/ov7648/ov7660 only */
-#define FREQ_IDX 10
- {
+[FREQ] = {
{
.id = V4L2_CID_POWER_LINE_FREQUENCY,
.type = V4L2_CTRL_TYPE_MENU,
@@ -276,69 +254,85 @@ static const struct ctrl sd_ctrls[] = {
.minimum = 0,
.maximum = 2, /* 0: 0, 1: 50Hz, 2:60Hz */
.step = 1,
-#define FREQ_DEF 1
- .default_value = FREQ_DEF,
+ .default_value = 1,
},
- .set = sd_setfreq,
- .get = sd_getfreq,
+ .set_control = setfreq
},
};
/* table of the disabled controls */
static const __u32 ctrl_dis[] = {
-[SENSOR_ADCM1700] = (1 << AUTOGAIN_IDX) |
- (1 << INFRARED_IDX) |
- (1 << VFLIP_IDX) |
- (1 << FREQ_IDX),
-
-[SENSOR_GC0307] = (1 << INFRARED_IDX) |
- (1 << VFLIP_IDX) |
- (1 << FREQ_IDX),
-
-[SENSOR_HV7131R] = (1 << INFRARED_IDX) |
- (1 << FREQ_IDX),
-
-[SENSOR_MI0360] = (1 << INFRARED_IDX) |
- (1 << VFLIP_IDX) |
- (1 << FREQ_IDX),
-
-[SENSOR_MO4000] = (1 << INFRARED_IDX) |
- (1 << VFLIP_IDX) |
- (1 << FREQ_IDX),
-
-[SENSOR_MT9V111] = (1 << VFLIP_IDX) |
- (1 << FREQ_IDX),
-
-[SENSOR_OM6802] = (1 << INFRARED_IDX) |
- (1 << VFLIP_IDX) |
- (1 << FREQ_IDX),
-
-[SENSOR_OV7630] = (1 << INFRARED_IDX),
-
-[SENSOR_OV7648] = (1 << INFRARED_IDX),
-
-[SENSOR_OV7660] = (1 << AUTOGAIN_IDX) |
- (1 << INFRARED_IDX) |
- (1 << VFLIP_IDX),
-
-[SENSOR_PO1030] = (1 << AUTOGAIN_IDX) |
- (1 << INFRARED_IDX) |
- (1 << VFLIP_IDX) |
- (1 << FREQ_IDX),
-
-[SENSOR_PO2030N] = (1 << AUTOGAIN_IDX) |
- (1 << INFRARED_IDX) |
- (1 << VFLIP_IDX) |
- (1 << FREQ_IDX),
-[SENSOR_SOI768] = (1 << AUTOGAIN_IDX) |
- (1 << INFRARED_IDX) |
- (1 << VFLIP_IDX) |
- (1 << FREQ_IDX),
-
-[SENSOR_SP80708] = (1 << AUTOGAIN_IDX) |
- (1 << INFRARED_IDX) |
- (1 << VFLIP_IDX) |
- (1 << FREQ_IDX),
+[SENSOR_ADCM1700] = (1 << AUTOGAIN) |
+ (1 << INFRARED) |
+ (1 << HFLIP) |
+ (1 << VFLIP) |
+ (1 << FREQ),
+
+[SENSOR_GC0307] = (1 << INFRARED) |
+ (1 << HFLIP) |
+ (1 << VFLIP) |
+ (1 << FREQ),
+
+[SENSOR_HV7131R] = (1 << INFRARED) |
+ (1 << HFLIP) |
+ (1 << FREQ),
+
+[SENSOR_MI0360] = (1 << INFRARED) |
+ (1 << HFLIP) |
+ (1 << VFLIP) |
+ (1 << FREQ),
+
+[SENSOR_MI0360B] = (1 << INFRARED) |
+ (1 << HFLIP) |
+ (1 << VFLIP) |
+ (1 << FREQ),
+
+[SENSOR_MO4000] = (1 << INFRARED) |
+ (1 << HFLIP) |
+ (1 << VFLIP) |
+ (1 << FREQ),
+
+[SENSOR_MT9V111] = (1 << HFLIP) |
+ (1 << VFLIP) |
+ (1 << FREQ),
+
+[SENSOR_OM6802] = (1 << INFRARED) |
+ (1 << HFLIP) |
+ (1 << VFLIP) |
+ (1 << FREQ),
+
+[SENSOR_OV7630] = (1 << INFRARED) |
+ (1 << HFLIP),
+
+[SENSOR_OV7648] = (1 << INFRARED) |
+ (1 << HFLIP),
+
+[SENSOR_OV7660] = (1 << AUTOGAIN) |
+ (1 << INFRARED) |
+ (1 << HFLIP) |
+ (1 << VFLIP),
+
+[SENSOR_PO1030] = (1 << AUTOGAIN) |
+ (1 << INFRARED) |
+ (1 << HFLIP) |
+ (1 << VFLIP) |
+ (1 << FREQ),
+
+[SENSOR_PO2030N] = (1 << AUTOGAIN) |
+ (1 << INFRARED) |
+ (1 << FREQ),
+
+[SENSOR_SOI768] = (1 << AUTOGAIN) |
+ (1 << INFRARED) |
+ (1 << HFLIP) |
+ (1 << VFLIP) |
+ (1 << FREQ),
+
+[SENSOR_SP80708] = (1 << AUTOGAIN) |
+ (1 << INFRARED) |
+ (1 << HFLIP) |
+ (1 << VFLIP) |
+ (1 << FREQ),
};
static const struct v4l2_pix_format cif_mode[] = {
@@ -411,6 +405,17 @@ static const u8 sn_mi0360[0x1c] = {
0x06, 0x00, 0x00, 0x00
};
+static const u8 sn_mi0360b[0x1c] = {
+/* reg0 reg1 reg2 reg3 reg4 reg5 reg6 reg7 */
+ 0x00, 0x61, 0x40, 0x00, 0x1a, 0x00, 0x00, 0x00,
+/* reg8 reg9 rega regb regc regd rege regf */
+ 0x81, 0x5d, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+/* reg10 reg11 reg12 reg13 reg14 reg15 reg16 reg17 */
+ 0x03, 0x00, 0x00, 0x02, 0x0a, 0x28, 0x1e, 0x40,
+/* reg18 reg19 reg1a reg1b */
+ 0x06, 0x00, 0x00, 0x00
+};
+
static const u8 sn_mo4000[0x1c] = {
/* reg0 reg1 reg2 reg3 reg4 reg5 reg6 reg7 */
0x00, 0x23, 0x60, 0x00, 0x1a, 0x00, 0x20, 0x18,
@@ -527,6 +532,7 @@ static const u8 *sn_tb[] = {
[SENSOR_GC0307] = sn_gc0307,
[SENSOR_HV7131R] = sn_hv7131,
[SENSOR_MI0360] = sn_mi0360,
+[SENSOR_MI0360B] = sn_mi0360b,
[SENSOR_MO4000] = sn_mo4000,
[SENSOR_MT9V111] = sn_mt9v111,
[SENSOR_OM6802] = sn_om6802,
@@ -572,20 +578,23 @@ static const u8 reg84[] = {
0x3e, 0x00, 0xcd, 0x0f, 0xf7, 0x0f, /* VR VG VB */
0x00, 0x00, 0x00 /* YUV offsets */
};
+
+#define DELAY 0xdd
+
static const u8 adcm1700_sensor_init[][8] = {
{0xa0, 0x51, 0xfe, 0x00, 0x00, 0x00, 0x00, 0x10},
{0xb0, 0x51, 0x04, 0x08, 0x00, 0x00, 0x00, 0x10}, /* reset */
- {0xdd, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00},
+ {DELAY, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00},
{0xb0, 0x51, 0x04, 0x00, 0x00, 0x00, 0x00, 0x10},
- {0xdd, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00},
+ {DELAY, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00},
{0xb0, 0x51, 0x0c, 0xe0, 0x2e, 0x00, 0x00, 0x10},
{0xb0, 0x51, 0x10, 0x02, 0x02, 0x00, 0x00, 0x10},
{0xb0, 0x51, 0x14, 0x0e, 0x0e, 0x00, 0x00, 0x10},
{0xb0, 0x51, 0x1c, 0x00, 0x80, 0x00, 0x00, 0x10},
{0xb0, 0x51, 0x20, 0x01, 0x00, 0x00, 0x00, 0x10},
- {0xdd, 0xff, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00},
+ {DELAY, 0xff, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00},
{0xb0, 0x51, 0x04, 0x04, 0x00, 0x00, 0x00, 0x10},
- {0xdd, 0xff, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00},
+ {DELAY, 0xff, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00},
{0xb0, 0x51, 0x04, 0x01, 0x00, 0x00, 0x00, 0x10},
{0xa0, 0x51, 0xfe, 0x10, 0x00, 0x00, 0x00, 0x10},
{0xb0, 0x51, 0x14, 0x01, 0x00, 0x00, 0x00, 0x10},
@@ -629,7 +638,7 @@ static const u8 gc0307_sensor_init[][8] = {
{0xa0, 0x21, 0x0e, 0x02, 0x00, 0x00, 0x00, 0x10},
{0xa0, 0x21, 0x0f, 0xb2, 0x00, 0x00, 0x00, 0x10},
{0xa0, 0x21, 0x12, 0x70, 0x00, 0x00, 0x00, 0x10},
- {0xdd, 0x0a, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, /*delay 10ms*/
+ {DELAY, 0x0a, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, /*delay 10ms*/
{0xa0, 0x21, 0x13, 0x00, 0x00, 0x00, 0x00, 0x10},
{0xa0, 0x21, 0x15, 0xb8, 0x00, 0x00, 0x00, 0x10},
{0xa0, 0x21, 0x16, 0x13, 0x00, 0x00, 0x00, 0x10},
@@ -747,6 +756,62 @@ static const u8 mi0360_sensor_init[][8] = {
{0xb1, 0x5d, 0x07, 0x00, 0x02, 0x00, 0x00, 0x10}, /* sensor on */
{}
};
+static const u8 mi0360b_sensor_init[][8] = {
+ {0xb1, 0x5d, 0x07, 0x00, 0x02, 0x00, 0x00, 0x10},
+ {0xb1, 0x5d, 0x0d, 0x00, 0x01, 0x00, 0x00, 0x10},
+ {DELAY, 0x14, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, /*delay 20ms*/
+ {0xb1, 0x5d, 0x0d, 0x00, 0x00, 0x00, 0x00, 0x10},
+ {DELAY, 0x14, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, /*delay 20ms*/
+ {0xd1, 0x5d, 0x01, 0x00, 0x08, 0x00, 0x16, 0x10},
+ {0xd1, 0x5d, 0x03, 0x01, 0xe2, 0x02, 0x82, 0x10},
+ {0xd1, 0x5d, 0x05, 0x00, 0x00, 0x00, 0x00, 0x10},
+ {0xb1, 0x5d, 0x0d, 0x00, 0x02, 0x00, 0x00, 0x10},
+ {0xd1, 0x5d, 0x0a, 0x00, 0x00, 0x00, 0x00, 0x10},
+ {0xd1, 0x5d, 0x0c, 0x00, 0x00, 0x00, 0x00, 0x10},
+ {0xd1, 0x5d, 0x0e, 0x00, 0x00, 0x00, 0x00, 0x10},
+ {0xd1, 0x5d, 0x10, 0x00, 0x00, 0x00, 0x00, 0x10},
+ {0xd1, 0x5d, 0x12, 0x00, 0x00, 0x00, 0x00, 0x10},
+ {0xd1, 0x5d, 0x14, 0x00, 0x00, 0x00, 0x00, 0x10},
+ {0xd1, 0x5d, 0x16, 0x00, 0x00, 0x00, 0x00, 0x10},
+ {0xd1, 0x5d, 0x18, 0x00, 0x00, 0x00, 0x00, 0x10},
+ {0xd1, 0x5d, 0x1a, 0x00, 0x00, 0x00, 0x00, 0x10},
+ {0xd1, 0x5d, 0x1c, 0x00, 0x00, 0x00, 0x00, 0x10},
+ {0xb1, 0x5d, 0x32, 0x00, 0x00, 0x00, 0x00, 0x10},
+ {0xd1, 0x5d, 0x20, 0x11, 0x01, 0x00, 0x00, 0x10},
+ {0xd1, 0x5d, 0x22, 0x00, 0x00, 0x00, 0x00, 0x10},
+ {0xd1, 0x5d, 0x24, 0x00, 0x00, 0x00, 0x00, 0x10},
+ {0xd1, 0x5d, 0x26, 0x00, 0x00, 0x00, 0x24, 0x10},
+ {0xd1, 0x5d, 0x2f, 0xf7, 0xb0, 0x00, 0x04, 0x10},
+ {0xd1, 0x5d, 0x31, 0x00, 0x00, 0x00, 0x00, 0x10},
+ {0xd1, 0x5d, 0x33, 0x00, 0x00, 0x01, 0x00, 0x10},
+ {0xb1, 0x5d, 0x3d, 0x06, 0x8f, 0x00, 0x00, 0x10},
+ {0xd1, 0x5d, 0x40, 0x01, 0xe0, 0x00, 0xd1, 0x10},
+ {0xb1, 0x5d, 0x44, 0x00, 0x82, 0x00, 0x00, 0x10},
+ {0xd1, 0x5d, 0x58, 0x00, 0x78, 0x00, 0x43, 0x10},
+ {0xd1, 0x5d, 0x5a, 0x00, 0x00, 0x00, 0x00, 0x10},
+ {0xd1, 0x5d, 0x5c, 0x00, 0x00, 0x00, 0x00, 0x10},
+ {0xd1, 0x5d, 0x5e, 0x00, 0x00, 0xa3, 0x1d, 0x10},
+ {0xb1, 0x5d, 0x62, 0x04, 0x11, 0x00, 0x00, 0x10},
+
+ {0xb1, 0x5d, 0x20, 0x11, 0x01, 0x00, 0x00, 0x10},
+ {0xb1, 0x5d, 0x20, 0x11, 0x01, 0x00, 0x00, 0x10},
+ {0xb1, 0x5d, 0x09, 0x00, 0x64, 0x00, 0x00, 0x10},
+ {0xd1, 0x5d, 0x2b, 0x00, 0x33, 0x00, 0xa0, 0x10},
+ {0xd1, 0x5d, 0x2d, 0x00, 0xa0, 0x00, 0x33, 0x10},
+ {}
+};
+static const u8 mi0360b_sensor_param1[][8] = {
+ {0xb1, 0x5d, 0x0a, 0x00, 0x00, 0x00, 0x00, 0x10},
+ {0xb1, 0x5d, 0x06, 0x00, 0x53, 0x00, 0x00, 0x10},
+ {0xb1, 0x5d, 0x05, 0x00, 0x09, 0x00, 0x00, 0x10},
+ {0xb1, 0x5d, 0x09, 0x02, 0x35, 0x00, 0x00, 0x10}, /* exposure 2 */
+
+ {0xd1, 0x5d, 0x2b, 0x00, 0xd1, 0x01, 0xc9, 0x10},
+ {0xd1, 0x5d, 0x2d, 0x00, 0xed, 0x00, 0xd1, 0x10},
+ {0xb1, 0x5d, 0x07, 0x00, 0x03, 0x00, 0x00, 0x10}, /* update */
+ {0xb1, 0x5d, 0x07, 0x00, 0x02, 0x00, 0x00, 0x10}, /* sensor on */
+ {}
+};
static const u8 mo4000_sensor_init[][8] = {
{0xa1, 0x21, 0x01, 0x02, 0x00, 0x00, 0x00, 0x10},
{0xa1, 0x21, 0x02, 0x00, 0x00, 0x00, 0x00, 0x10},
@@ -772,7 +837,7 @@ static const u8 mo4000_sensor_init[][8] = {
};
static const u8 mt9v111_sensor_init[][8] = {
{0xb1, 0x5c, 0x0d, 0x00, 0x01, 0x00, 0x00, 0x10}, /* reset? */
- {0xdd, 0x14, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, /* delay 20ms */
+ {DELAY, 0x14, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, /* delay 20ms */
{0xb1, 0x5c, 0x0d, 0x00, 0x00, 0x00, 0x00, 0x10},
{0xb1, 0x5c, 0x01, 0x00, 0x01, 0x00, 0x00, 0x10}, /* IFP select */
{0xb1, 0x5c, 0x08, 0x04, 0x80, 0x00, 0x00, 0x10}, /* output fmt ctrl */
@@ -860,10 +925,10 @@ static const u8 om6802_sensor_param1[][8] = {
static const u8 ov7630_sensor_init[][8] = {
{0xa1, 0x21, 0x76, 0x01, 0x00, 0x00, 0x00, 0x10},
{0xa1, 0x21, 0x12, 0xc8, 0x00, 0x00, 0x00, 0x10},
- {0xdd, 0x14, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, /* delay 20ms */
+ {DELAY, 0x14, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, /* delay 20ms */
{0xa1, 0x21, 0x12, 0x48, 0x00, 0x00, 0x00, 0x10},
{0xa1, 0x21, 0x12, 0xc8, 0x00, 0x00, 0x00, 0x10},
- {0xdd, 0x14, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, /* delay 20ms */
+ {DELAY, 0x14, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, /* delay 20ms */
{0xa1, 0x21, 0x12, 0x48, 0x00, 0x00, 0x00, 0x10},
/* win: i2c_r from 00 to 80 */
{0xd1, 0x21, 0x03, 0x80, 0x10, 0x20, 0x80, 0x10},
@@ -917,7 +982,7 @@ static const u8 ov7630_sensor_param1[][8] = {
static const u8 ov7648_sensor_init[][8] = {
{0xa1, 0x21, 0x76, 0x00, 0x00, 0x00, 0x00, 0x10},
{0xa1, 0x21, 0x12, 0x80, 0x00, 0x00, 0x00, 0x10}, /* reset */
- {0xdd, 0x14, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, /* delay 20ms */
+ {DELAY, 0x14, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, /* delay 20ms */
{0xa1, 0x21, 0x12, 0x00, 0x00, 0x00, 0x00, 0x10},
{0xd1, 0x21, 0x03, 0xa4, 0x30, 0x88, 0x00, 0x10},
{0xb1, 0x21, 0x11, 0x80, 0x08, 0x00, 0x00, 0x10},
@@ -966,7 +1031,7 @@ static const u8 ov7648_sensor_param1[][8] = {
static const u8 ov7660_sensor_init[][8] = {
{0xa1, 0x21, 0x12, 0x80, 0x00, 0x00, 0x00, 0x10}, /* reset SCCB */
- {0xdd, 0x14, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, /* delay 20ms */
+ {DELAY, 0x14, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, /* delay 20ms */
{0xa1, 0x21, 0x12, 0x05, 0x00, 0x00, 0x00, 0x10},
/* Outformat = rawRGB */
{0xa1, 0x21, 0x13, 0xb8, 0x00, 0x00, 0x00, 0x10}, /* init COM8 */
@@ -1062,7 +1127,7 @@ static const u8 ov7660_sensor_param1[][8] = {
static const u8 po1030_sensor_init[][8] = {
/* the sensor registers are described in m5602/m5602_po1030.h */
{0xa1, 0x6e, 0x3f, 0x20, 0x00, 0x00, 0x00, 0x10}, /* sensor reset */
- {0xdd, 0x14, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, /* delay 20ms */
+ {DELAY, 0x14, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, /* delay 20ms */
{0xa1, 0x6e, 0x3f, 0x00, 0x00, 0x00, 0x00, 0x10},
{0xa1, 0x6e, 0x3e, 0x00, 0x00, 0x00, 0x00, 0x10},
{0xd1, 0x6e, 0x04, 0x02, 0xb1, 0x02, 0x39, 0x10},
@@ -1116,10 +1181,10 @@ static const u8 po1030_sensor_param1[][8] = {
static const u8 po2030n_sensor_init[][8] = {
{0xa1, 0x6e, 0x1e, 0x1a, 0x00, 0x00, 0x00, 0x10},
{0xa1, 0x6e, 0x1f, 0x99, 0x00, 0x00, 0x00, 0x10},
- {0xdd, 0x0a, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, /* delay 10ms */
+ {DELAY, 0x0a, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, /* delay 10ms */
{0xa1, 0x6e, 0x1e, 0x0a, 0x00, 0x00, 0x00, 0x10},
{0xa1, 0x6e, 0x1f, 0x19, 0x00, 0x00, 0x00, 0x10},
- {0xdd, 0x0a, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, /* delay 10ms */
+ {DELAY, 0x0a, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, /* delay 10ms */
{0xa1, 0x6e, 0x20, 0x44, 0x00, 0x00, 0x00, 0x10},
{0xa1, 0x6e, 0x04, 0x03, 0x00, 0x00, 0x00, 0x10},
{0xa1, 0x6e, 0x05, 0x70, 0x00, 0x00, 0x00, 0x10},
@@ -1168,7 +1233,7 @@ static const u8 po2030n_sensor_init[][8] = {
};
static const u8 po2030n_sensor_param1[][8] = {
{0xa1, 0x6e, 0x1a, 0x01, 0x00, 0x00, 0x00, 0x10},
- {0xdd, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, /* delay 8ms */
+ {DELAY, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, /* delay 8ms */
{0xa1, 0x6e, 0x1b, 0xf4, 0x00, 0x00, 0x00, 0x10},
{0xa1, 0x6e, 0x15, 0x04, 0x00, 0x00, 0x00, 0x10},
{0xd1, 0x6e, 0x16, 0x50, 0x40, 0x49, 0x40, 0x10},
@@ -1182,16 +1247,16 @@ static const u8 po2030n_sensor_param1[][8] = {
{0xc1, 0x6e, 0x16, 0x52, 0x40, 0x48, 0x00, 0x10},
/*after start*/
{0xa1, 0x6e, 0x15, 0x0f, 0x00, 0x00, 0x00, 0x10},
- {0xdd, 0x05, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, /* delay 5ms */
+ {DELAY, 0x05, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, /* delay 5ms */
{0xa1, 0x6e, 0x1a, 0x05, 0x00, 0x00, 0x00, 0x10},
- {0xdd, 0x05, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, /* delay 5ms */
+ {DELAY, 0x05, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, /* delay 5ms */
{0xa1, 0x6e, 0x1b, 0x53, 0x00, 0x00, 0x00, 0x10},
{}
};
static const u8 soi768_sensor_init[][8] = {
{0xa1, 0x21, 0x12, 0x80, 0x00, 0x00, 0x00, 0x10}, /* reset */
- {0xdd, 0x60, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, /* delay 96ms */
+ {DELAY, 0x60, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, /* delay 96ms */
{0xa1, 0x21, 0x12, 0x00, 0x00, 0x00, 0x00, 0x10},
{0xa1, 0x21, 0x13, 0x80, 0x00, 0x00, 0x00, 0x10},
{0xa1, 0x21, 0x0f, 0x03, 0x00, 0x00, 0x00, 0x10},
@@ -1310,6 +1375,7 @@ static const u8 (*sensor_init[])[8] = {
[SENSOR_GC0307] = gc0307_sensor_init,
[SENSOR_HV7131R] = hv7131r_sensor_init,
[SENSOR_MI0360] = mi0360_sensor_init,
+[SENSOR_MI0360B] = mi0360b_sensor_init,
[SENSOR_MO4000] = mo4000_sensor_init,
[SENSOR_MT9V111] = mt9v111_sensor_init,
[SENSOR_OM6802] = om6802_sensor_init,
@@ -1326,13 +1392,17 @@ static const u8 (*sensor_init[])[8] = {
static void reg_r(struct gspca_dev *gspca_dev,
u16 value, int len)
{
+ int ret;
+
+ if (gspca_dev->usb_err < 0)
+ return;
#ifdef GSPCA_DEBUG
if (len > USB_BUF_SZ) {
err("reg_r: buffer overflow");
return;
}
#endif
- usb_control_msg(gspca_dev->dev,
+ ret = usb_control_msg(gspca_dev->dev,
usb_rcvctrlpipe(gspca_dev->dev, 0),
0,
USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_INTERFACE,
@@ -1340,15 +1410,23 @@ static void reg_r(struct gspca_dev *gspca_dev,
gspca_dev->usb_buf, len,
500);
PDEBUG(D_USBI, "reg_r [%02x] -> %02x", value, gspca_dev->usb_buf[0]);
+ if (ret < 0) {
+ err("reg_r err %d", ret);
+ gspca_dev->usb_err = ret;
+ }
}
static void reg_w1(struct gspca_dev *gspca_dev,
u16 value,
u8 data)
{
+ int ret;
+
+ if (gspca_dev->usb_err < 0)
+ return;
PDEBUG(D_USBO, "reg_w1 [%04x] = %02x", value, data);
gspca_dev->usb_buf[0] = data;
- usb_control_msg(gspca_dev->dev,
+ ret = usb_control_msg(gspca_dev->dev,
usb_sndctrlpipe(gspca_dev->dev, 0),
0x08,
USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_INTERFACE,
@@ -1356,12 +1434,20 @@ static void reg_w1(struct gspca_dev *gspca_dev,
0,
gspca_dev->usb_buf, 1,
500);
+ if (ret < 0) {
+ err("reg_w1 err %d", ret);
+ gspca_dev->usb_err = ret;
+ }
}
static void reg_w(struct gspca_dev *gspca_dev,
u16 value,
const u8 *buffer,
int len)
{
+ int ret;
+
+ if (gspca_dev->usb_err < 0)
+ return;
PDEBUG(D_USBO, "reg_w [%04x] = %02x %02x ..",
value, buffer[0], buffer[1]);
#ifdef GSPCA_DEBUG
@@ -1371,20 +1457,27 @@ static void reg_w(struct gspca_dev *gspca_dev,
}
#endif
memcpy(gspca_dev->usb_buf, buffer, len);
- usb_control_msg(gspca_dev->dev,
+ ret = usb_control_msg(gspca_dev->dev,
usb_sndctrlpipe(gspca_dev->dev, 0),
0x08,
USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_INTERFACE,
value, 0,
gspca_dev->usb_buf, len,
500);
+ if (ret < 0) {
+ err("reg_w err %d", ret);
+ gspca_dev->usb_err = ret;
+ }
}
/* I2C write 1 byte */
static void i2c_w1(struct gspca_dev *gspca_dev, u8 reg, u8 val)
{
struct sd *sd = (struct sd *) gspca_dev;
+ int ret;
+ if (gspca_dev->usb_err < 0)
+ return;
PDEBUG(D_USBO, "i2c_w1 [%02x] = %02x", reg, val);
switch (sd->sensor) {
case SENSOR_ADCM1700:
@@ -1403,7 +1496,7 @@ static void i2c_w1(struct gspca_dev *gspca_dev, u8 reg, u8 val)
gspca_dev->usb_buf[5] = 0;
gspca_dev->usb_buf[6] = 0;
gspca_dev->usb_buf[7] = 0x10;
- usb_control_msg(gspca_dev->dev,
+ ret = usb_control_msg(gspca_dev->dev,
usb_sndctrlpipe(gspca_dev->dev, 0),
0x08,
USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_INTERFACE,
@@ -1411,16 +1504,24 @@ static void i2c_w1(struct gspca_dev *gspca_dev, u8 reg, u8 val)
0,
gspca_dev->usb_buf, 8,
500);
+ if (ret < 0) {
+ err("i2c_w1 err %d", ret);
+ gspca_dev->usb_err = ret;
+ }
}
/* I2C write 8 bytes */
static void i2c_w8(struct gspca_dev *gspca_dev,
const u8 *buffer)
{
+ int ret;
+
+ if (gspca_dev->usb_err < 0)
+ return;
PDEBUG(D_USBO, "i2c_w8 [%02x] = %02x ..",
buffer[2], buffer[3]);
memcpy(gspca_dev->usb_buf, buffer, 8);
- usb_control_msg(gspca_dev->dev,
+ ret = usb_control_msg(gspca_dev->dev,
usb_sndctrlpipe(gspca_dev->dev, 0),
0x08,
USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_INTERFACE,
@@ -1428,6 +1529,10 @@ static void i2c_w8(struct gspca_dev *gspca_dev,
gspca_dev->usb_buf, 8,
500);
msleep(2);
+ if (ret < 0) {
+ err("i2c_w8 err %d", ret);
+ gspca_dev->usb_err = ret;
+ }
}
/* sensor read 'len' (1..5) bytes in gspca_dev->usb_buf */
@@ -1466,7 +1571,7 @@ static void i2c_w_seq(struct gspca_dev *gspca_dev,
const u8 (*data)[8])
{
while ((*data)[0] != 0) {
- if ((*data)[0] != 0xdd)
+ if ((*data)[0] != DELAY)
i2c_w8(gspca_dev, *data);
else
msleep((*data)[1]);
@@ -1529,7 +1634,13 @@ static void mi0360_probe(struct gspca_dev *gspca_dev)
if (val != 0xffff)
break;
}
+ if (gspca_dev->usb_err < 0)
+ return;
switch (val) {
+ case 0x8221:
+ PDEBUG(D_PROBE, "Sensor mi0360b");
+ sd->sensor = SENSOR_MI0360B;
+ break;
case 0x823a:
PDEBUG(D_PROBE, "Sensor mt9v111");
sd->sensor = SENSOR_MT9V111;
@@ -1556,6 +1667,8 @@ static void ov7630_probe(struct gspca_dev *gspca_dev)
val = (gspca_dev->usb_buf[3] << 8) | gspca_dev->usb_buf[4];
reg_w1(gspca_dev, 0x01, 0x29);
reg_w1(gspca_dev, 0x17, 0x42);
+ if (gspca_dev->usb_err < 0)
+ return;
if (val == 0x7628) { /* soi768 */
sd->sensor = SENSOR_SOI768;
/*fixme: only valid for 0c45:613e?*/
@@ -1593,13 +1706,14 @@ static void ov7648_probe(struct gspca_dev *gspca_dev)
val = (gspca_dev->usb_buf[3] << 8) | gspca_dev->usb_buf[4];
reg_w1(gspca_dev, 0x01, 0x29);
reg_w1(gspca_dev, 0x17, 0x42);
+ if (gspca_dev->usb_err < 0)
+ return;
if (val == 0x1030) { /* po1030 */
PDEBUG(D_PROBE, "Sensor po1030");
sd->sensor = SENSOR_PO1030;
return;
}
-
- PDEBUG(D_PROBE, "Unknown sensor %04x", val);
+ err("Unknown sensor %04x", val);
}
/* 0c45:6142 sensor may be po2030n, gc0305 or gc0307 */
@@ -1631,11 +1745,13 @@ static void po2030n_probe(struct gspca_dev *gspca_dev)
val = (gspca_dev->usb_buf[3] << 8) | gspca_dev->usb_buf[4];
reg_w1(gspca_dev, 0x01, 0x29);
reg_w1(gspca_dev, 0x17, 0x42);
+ if (gspca_dev->usb_err < 0)
+ return;
if (val == 0x2030) {
PDEBUG(D_PROBE, "Sensor po2030n");
/* sd->sensor = SENSOR_PO2030N; */
} else {
- PDEBUG(D_PROBE, "Unknown sensor ID %04x", val);
+ err("Unknown sensor ID %04x", val);
}
}
@@ -1697,6 +1813,12 @@ static void bridge_init(struct gspca_dev *gspca_dev,
reg_w1(gspca_dev, 0x01, 0x40);
msleep(50);
break;
+ case SENSOR_MI0360B:
+ reg_w1(gspca_dev, 0x01, 0x61);
+ reg_w1(gspca_dev, 0x17, 0x60);
+ reg_w1(gspca_dev, 0x01, 0x60);
+ reg_w1(gspca_dev, 0x01, 0x40);
+ break;
case SENSOR_MT9V111:
reg_w1(gspca_dev, 0x01, 0x61);
reg_w1(gspca_dev, 0x17, 0x61);
@@ -1762,8 +1884,7 @@ static void bridge_init(struct gspca_dev *gspca_dev,
reg_w1(gspca_dev, 0x01, 0x43);
reg_w1(gspca_dev, 0x17, 0x61);
reg_w1(gspca_dev, 0x01, 0x42);
- if (sd->sensor == SENSOR_HV7131R
- && sd->bridge == BRIDGE_SN9C102P)
+ if (sd->sensor == SENSOR_HV7131R)
hv7131r_probe(gspca_dev);
break;
}
@@ -1788,26 +1909,9 @@ static int sd_config(struct gspca_dev *gspca_dev,
cam->nmodes = ARRAY_SIZE(vga_mode);
}
cam->npkt = 24; /* 24 packets per ISOC message */
+ cam->ctrls = sd->ctrls;
- sd->brightness = BRIGHTNESS_DEF;
- sd->contrast = CONTRAST_DEF;
- sd->colors = COLOR_DEF;
- sd->blue = BLUE_BALANCE_DEF;
- sd->red = RED_BALANCE_DEF;
- sd->gamma = GAMMA_DEF;
- sd->autogain = AUTOGAIN_DEF;
sd->ag_cnt = -1;
- sd->vflip = VFLIP_DEF;
- switch (sd->sensor) {
- case SENSOR_OM6802:
- sd->sharpness = 0x10;
- break;
- default:
- sd->sharpness = SHARPNESS_DEF;
- break;
- }
- sd->infrared = INFRARED_DEF;
- sd->freq = FREQ_DEF;
sd->quality = QUALITY_DEF;
sd->jpegqual = 80;
@@ -1828,6 +1932,8 @@ static int sd_init(struct gspca_dev *gspca_dev)
reg_w1(gspca_dev, 0xf1, gspca_dev->usb_buf[0]);
reg_r(gspca_dev, 0x00, 1); /* get sonix chip id */
regF1 = gspca_dev->usb_buf[0];
+ if (gspca_dev->usb_err < 0)
+ return gspca_dev->usb_err;
PDEBUG(D_PROBE, "Sonix chip id: %02x", regF1);
switch (sd->bridge) {
case BRIDGE_SN9C102P:
@@ -1871,6 +1977,9 @@ static int sd_init(struct gspca_dev *gspca_dev)
break;
}
+ if (sd->sensor == SENSOR_OM6802)
+ sd->ctrls[SHARPNESS].def = 0x10;
+
/* Note we do not disable the sensor clock here (power saving mode),
as that also disables the button on the cam. */
reg_w1(gspca_dev, 0xf1, 0x00);
@@ -1881,7 +1990,7 @@ static int sd_init(struct gspca_dev *gspca_dev)
gspca_dev->ctrl_dis = ctrl_dis[sd->sensor];
- return 0;
+ return gspca_dev->usb_err;
}
static u32 setexposure(struct gspca_dev *gspca_dev,
@@ -1912,7 +2021,8 @@ static u32 setexposure(struct gspca_dev *gspca_dev,
i2c_w8(gspca_dev, Expodoit);
break;
}
- case SENSOR_MI0360: {
+ case SENSOR_MI0360:
+ case SENSOR_MI0360B: {
u8 expoMi[] = /* exposure 0x0635 -> 4 fp/s 0x10 */
{ 0xb1, 0x5d, 0x09, 0x00, 0x00, 0x00, 0x00, 0x16 };
static const u8 doit[] = /* update sensor */
@@ -1991,16 +2101,18 @@ static void setbrightness(struct gspca_dev *gspca_dev)
{
struct sd *sd = (struct sd *) gspca_dev;
unsigned int expo;
+ int brightness;
u8 k2;
- k2 = ((int) sd->brightness - 0x8000) >> 10;
+ brightness = sd->ctrls[BRIGHTNESS].val;
+ k2 = (brightness - 0x80) >> 2;
switch (sd->sensor) {
case SENSOR_ADCM1700:
if (k2 > 0x1f)
k2 = 0; /* only positive Y offset */
break;
case SENSOR_HV7131R:
- expo = sd->brightness << 4;
+ expo = brightness << 12;
if (expo > 0x002dc6c0)
expo = 0x002dc6c0;
else if (expo < 0x02a0)
@@ -2009,18 +2121,22 @@ static void setbrightness(struct gspca_dev *gspca_dev)
break;
case SENSOR_MI0360:
case SENSOR_MO4000:
- expo = sd->brightness >> 4;
+ expo = brightness << 4;
+ sd->exposure = setexposure(gspca_dev, expo);
+ break;
+ case SENSOR_MI0360B:
+ expo = brightness << 2;
sd->exposure = setexposure(gspca_dev, expo);
break;
case SENSOR_GC0307:
case SENSOR_MT9V111:
- expo = sd->brightness >> 8;
+ expo = brightness;
sd->exposure = setexposure(gspca_dev, expo);
return; /* don't set the Y offset */
case SENSOR_OM6802:
- expo = sd->brightness >> 6;
+ expo = brightness << 2;
sd->exposure = setexposure(gspca_dev, expo);
- k2 = sd->brightness >> 11;
+ k2 = brightness >> 3;
break;
}
@@ -2033,7 +2149,8 @@ static void setcontrast(struct gspca_dev *gspca_dev)
u8 k2;
u8 contrast[6];
- k2 = sd->contrast * 0x30 / (CONTRAST_MAX + 1) + 0x10; /* 10..40 */
+ k2 = sd->ctrls[CONTRAST].val * 0x30 / (CONTRAST_MAX + 1)
+ + 0x10; /* 10..40 */
contrast[0] = (k2 + 1) / 2; /* red */
contrast[1] = 0;
contrast[2] = k2; /* green */
@@ -2046,15 +2163,25 @@ static void setcontrast(struct gspca_dev *gspca_dev)
static void setcolors(struct gspca_dev *gspca_dev)
{
struct sd *sd = (struct sd *) gspca_dev;
- int i, v;
+ int i, v, colors;
+ const s16 *uv;
u8 reg8a[12]; /* U & V gains */
- static const s16 uv[6] = { /* same as reg84 in signed decimal */
+ static const s16 uv_com[6] = { /* same as reg84 in signed decimal */
-24, -38, 64, /* UR UG UB */
62, -51, -9 /* VR VG VB */
};
+ static const s16 uv_mi0360b[6] = {
+ -20, -38, 64, /* UR UG UB */
+ 60, -51, -9 /* VR VG VB */
+ };
+ colors = sd->ctrls[COLORS].val;
+ if (sd->sensor == SENSOR_MI0360B)
+ uv = uv_mi0360b;
+ else
+ uv = uv_com;
for (i = 0; i < 6; i++) {
- v = uv[i] * sd->colors / COLOR_DEF;
+ v = uv[i] * colors / COLORS_DEF;
reg8a[i * 2] = v;
reg8a[i * 2 + 1] = (v >> 8) & 0x0f;
}
@@ -2065,15 +2192,15 @@ static void setredblue(struct gspca_dev *gspca_dev)
{
struct sd *sd = (struct sd *) gspca_dev;
- reg_w1(gspca_dev, 0x05, sd->red);
+ reg_w1(gspca_dev, 0x05, sd->ctrls[RED].val);
/* reg_w1(gspca_dev, 0x07, 32); */
- reg_w1(gspca_dev, 0x06, sd->blue);
+ reg_w1(gspca_dev, 0x06, sd->ctrls[BLUE].val);
}
static void setgamma(struct gspca_dev *gspca_dev)
{
struct sd *sd = (struct sd *) gspca_dev;
- int i;
+ int i, val;
u8 gamma[17];
const u8 *gamma_base;
static const u8 delta[17] = {
@@ -2086,6 +2213,7 @@ static void setgamma(struct gspca_dev *gspca_dev)
gamma_base = gamma_spec_0;
break;
case SENSOR_HV7131R:
+ case SENSOR_MI0360B:
case SENSOR_MT9V111:
gamma_base = gamma_spec_1;
break;
@@ -2100,9 +2228,10 @@ static void setgamma(struct gspca_dev *gspca_dev)
break;
}
+ val = sd->ctrls[GAMMA].val;
for (i = 0; i < sizeof gamma; i++)
gamma[i] = gamma_base[i]
- + delta[i] * (sd->gamma - GAMMA_DEF) / 32;
+ + delta[i] * (val - GAMMA_DEF) / 32;
reg_w(gspca_dev, 0x20, gamma, sizeof gamma);
}
@@ -2110,7 +2239,7 @@ static void setautogain(struct gspca_dev *gspca_dev)
{
struct sd *sd = (struct sd *) gspca_dev;
- if (gspca_dev->ctrl_dis & (1 << AUTOGAIN_IDX))
+ if (gspca_dev->ctrl_dis & (1 << AUTOGAIN))
return;
switch (sd->sensor) {
case SENSOR_OV7630:
@@ -2121,74 +2250,91 @@ static void setautogain(struct gspca_dev *gspca_dev)
comb = 0xc0;
else
comb = 0xa0;
- if (sd->autogain)
+ if (sd->ctrls[AUTOGAIN].val)
comb |= 0x03;
i2c_w1(&sd->gspca_dev, 0x13, comb);
return;
}
}
- if (sd->autogain)
+ if (sd->ctrls[AUTOGAIN].val)
sd->ag_cnt = AG_CNT_START;
else
sd->ag_cnt = -1;
}
-/* hv7131r/ov7630/ov7648 only */
-static void setvflip(struct sd *sd)
+static void sethvflip(struct gspca_dev *gspca_dev)
{
+ struct sd *sd = (struct sd *) gspca_dev;
u8 comn;
- if (sd->gspca_dev.ctrl_dis & (1 << VFLIP_IDX))
- return;
switch (sd->sensor) {
case SENSOR_HV7131R:
comn = 0x18; /* clkdiv = 1, ablcen = 1 */
- if (sd->vflip)
+ if (sd->ctrls[VFLIP].val)
comn |= 0x01;
- i2c_w1(&sd->gspca_dev, 0x01, comn); /* sctra */
+ i2c_w1(gspca_dev, 0x01, comn); /* sctra */
break;
case SENSOR_OV7630:
comn = 0x02;
- if (!sd->vflip)
+ if (!sd->ctrls[VFLIP].val)
comn |= 0x80;
- i2c_w1(&sd->gspca_dev, 0x75, comn);
+ i2c_w1(gspca_dev, 0x75, comn);
break;
- default:
-/* case SENSOR_OV7648: */
+ case SENSOR_OV7648:
comn = 0x06;
- if (sd->vflip)
+ if (sd->ctrls[VFLIP].val)
+ comn |= 0x80;
+ i2c_w1(gspca_dev, 0x75, comn);
+ break;
+ case SENSOR_PO2030N:
+ /* Reg. 0x1E: Timing Generator Control Register 2 (Tgcontrol2)
+ * (reset value: 0x0A)
+ * bit7: HM: Horizontal Mirror: 0: disable, 1: enable
+ * bit6: VM: Vertical Mirror: 0: disable, 1: enable
+ * bit5: ST: Shutter Selection: 0: electrical, 1: mechanical
+ * bit4: FT: Single Frame Transfer: 0: disable, 1: enable
+ * bit3-0: X
+ */
+ comn = 0x0a;
+ if (sd->ctrls[HFLIP].val)
comn |= 0x80;
- i2c_w1(&sd->gspca_dev, 0x75, comn);
+ if (sd->ctrls[VFLIP].val)
+ comn |= 0x40;
+ i2c_w1(&sd->gspca_dev, 0x1e, comn);
break;
}
}
-static void setsharpness(struct sd *sd)
+static void setsharpness(struct gspca_dev *gspca_dev)
{
- reg_w1(&sd->gspca_dev, 0x99, sd->sharpness);
+ struct sd *sd = (struct sd *) gspca_dev;
+
+ reg_w1(gspca_dev, 0x99, sd->ctrls[SHARPNESS].val);
}
-static void setinfrared(struct sd *sd)
+static void setinfrared(struct gspca_dev *gspca_dev)
{
- if (sd->gspca_dev.ctrl_dis & (1 << INFRARED_IDX))
+ struct sd *sd = (struct sd *) gspca_dev;
+
+ if (gspca_dev->ctrl_dis & (1 << INFRARED))
return;
/*fixme: different sequence for StarCam Clip and StarCam 370i */
/* Clip */
- i2c_w1(&sd->gspca_dev, 0x02, /* gpio */
- sd->infrared ? 0x66 : 0x64);
+ i2c_w1(gspca_dev, 0x02, /* gpio */
+ sd->ctrls[INFRARED].val ? 0x66 : 0x64);
}
static void setfreq(struct gspca_dev *gspca_dev)
{
struct sd *sd = (struct sd *) gspca_dev;
- if (gspca_dev->ctrl_dis & (1 << FREQ_IDX))
+ if (gspca_dev->ctrl_dis & (1 << FREQ))
return;
if (sd->sensor == SENSOR_OV7660) {
u8 com8;
com8 = 0xdf; /* auto gain/wb/expo */
- switch (sd->freq) {
+ switch (sd->ctrls[FREQ].val) {
case 0: /* Banding filter disabled */
i2c_w1(gspca_dev, 0x13, com8 | 0x20);
break;
@@ -2216,7 +2362,7 @@ static void setfreq(struct gspca_dev *gspca_dev)
break;
}
- switch (sd->freq) {
+ switch (sd->ctrls[FREQ].val) {
case 0: /* Banding filter disabled */
break;
case 1: /* 50 hz (filter on and framerate adj) */
@@ -2334,6 +2480,7 @@ static int sd_start(struct gspca_dev *gspca_dev)
reg17 = 0xa2;
break;
case SENSOR_MT9V111:
+ case SENSOR_MI0360B:
reg17 = 0xe0;
break;
case SENSOR_ADCM1700:
@@ -2375,6 +2522,7 @@ static int sd_start(struct gspca_dev *gspca_dev)
break;
case SENSOR_GC0307:
case SENSOR_MT9V111:
+ case SENSOR_MI0360B:
reg_w1(gspca_dev, 0x9a, 0x07);
break;
case SENSOR_OV7630:
@@ -2389,7 +2537,7 @@ static int sd_start(struct gspca_dev *gspca_dev)
reg_w1(gspca_dev, 0x9a, 0x08);
break;
}
- setsharpness(sd);
+ setsharpness(gspca_dev);
reg_w(gspca_dev, 0x84, reg84, sizeof reg84);
reg_w1(gspca_dev, 0x05, 0x20); /* red */
@@ -2414,6 +2562,11 @@ static int sd_start(struct gspca_dev *gspca_dev)
reg17 = 0xa2;
reg1 = 0x44;
break;
+ case SENSOR_MI0360B:
+ init = mi0360b_sensor_param1;
+ reg1 &= ~0x02; /* don't inverse pin S_PWR_DN */
+ reg17 = 0xe2;
+ break;
case SENSOR_MO4000:
if (mode) {
/* reg1 = 0x46; * 320 clk 48Mhz 60fp/s */
@@ -2474,8 +2627,7 @@ static int sd_start(struct gspca_dev *gspca_dev)
reg1 = 0x44;
reg17 = 0xa2;
break;
- default:
-/* case SENSOR_SP80708: */
+ case SENSOR_SP80708:
init = sp80708_sensor_param1;
if (mode) {
/*?? reg1 = 0x04; * 320 clk 48Mhz */
@@ -2526,7 +2678,6 @@ static int sd_start(struct gspca_dev *gspca_dev)
break;
}
-
/* here change size mode 0 -> VGA; 1 -> CIF */
sd->reg18 = sn9c1xx[0x18] | (mode << 4) | 0x40;
reg_w1(gspca_dev, 0x18, sd->reg18);
@@ -2535,13 +2686,13 @@ static int sd_start(struct gspca_dev *gspca_dev)
reg_w1(gspca_dev, 0x17, reg17);
reg_w1(gspca_dev, 0x01, reg1);
- setvflip(sd);
+ sethvflip(gspca_dev);
setbrightness(gspca_dev);
setcontrast(gspca_dev);
setcolors(gspca_dev);
setautogain(gspca_dev);
setfreq(gspca_dev);
- return 0;
+ return gspca_dev->usb_err;
}
static void sd_stopN(struct gspca_dev *gspca_dev)
@@ -2568,6 +2719,7 @@ static void sd_stopN(struct gspca_dev *gspca_dev)
data = 0x2b;
break;
case SENSOR_MI0360:
+ case SENSOR_MI0360B:
i2c_w8(gspca_dev, stopmi0360);
data = 0x29;
break;
@@ -2641,6 +2793,7 @@ static void do_autogain(struct gspca_dev *gspca_dev)
default:
/* case SENSOR_MO4000: */
/* case SENSOR_MI0360: */
+/* case SENSOR_MI0360B: */
/* case SENSOR_MT9V111: */
expotimes = sd->exposure;
expotimes += (luma_mean - delta) >> 6;
@@ -2663,236 +2816,52 @@ static void sd_pkt_scan(struct gspca_dev *gspca_dev,
struct sd *sd = (struct sd *) gspca_dev;
int sof, avg_lum;
- sof = len - 64;
- if (sof >= 0 && data[sof] == 0xff && data[sof + 1] == 0xd9) {
+ /* the image ends on a 64 bytes block starting with
+ * ff d9 ff ff 00 c4 c4 96
+ * and followed by various information including luminosity */
+ /* this block may be splitted between two packets */
+ /* a new image always starts in a new packet */
+ switch (gspca_dev->last_packet_type) {
+ case DISCARD_PACKET: /* restart image building */
+ sof = len - 64;
+ if (sof >= 0 && data[sof] == 0xff && data[sof + 1] == 0xd9)
+ gspca_frame_add(gspca_dev, LAST_PACKET, NULL, 0);
+ return;
+ case LAST_PACKET: /* put the JPEG 422 header */
+ gspca_frame_add(gspca_dev, FIRST_PACKET,
+ sd->jpeg_hdr, JPEG_HDR_SZ);
+ break;
+ }
+ gspca_frame_add(gspca_dev, INTER_PACKET, data, len);
+
+ data = gspca_dev->image;
+ if (data == NULL)
+ return;
+ sof = gspca_dev->image_len - 64;
+ if (data[sof] != 0xff
+ || data[sof + 1] != 0xd9)
+ return;
- /* end of frame */
- gspca_frame_add(gspca_dev, LAST_PACKET,
- data, sof + 2);
- if (sd->ag_cnt < 0)
- return;
+ /* end of image found - remove the trailing data */
+ gspca_dev->image_len = sof + 2;
+ gspca_frame_add(gspca_dev, LAST_PACKET, NULL, 0);
+ if (sd->ag_cnt < 0)
+ return;
/* w1 w2 w3 */
/* w4 w5 w6 */
/* w7 w8 */
/* w4 */
- avg_lum = ((data[sof + 29] << 8) | data[sof + 30]) >> 6;
+ avg_lum = ((data[sof + 29] << 8) | data[sof + 30]) >> 6;
/* w6 */
- avg_lum += ((data[sof + 33] << 8) | data[sof + 34]) >> 6;
+ avg_lum += ((data[sof + 33] << 8) | data[sof + 34]) >> 6;
/* w2 */
- avg_lum += ((data[sof + 25] << 8) | data[sof + 26]) >> 6;
+ avg_lum += ((data[sof + 25] << 8) | data[sof + 26]) >> 6;
/* w8 */
- avg_lum += ((data[sof + 37] << 8) | data[sof + 38]) >> 6;
+ avg_lum += ((data[sof + 37] << 8) | data[sof + 38]) >> 6;
/* w5 */
- avg_lum += ((data[sof + 31] << 8) | data[sof + 32]) >> 4;
- avg_lum >>= 4;
- atomic_set(&sd->avg_lum, avg_lum);
- return;
- }
- if (gspca_dev->last_packet_type == LAST_PACKET) {
-
- /* put the JPEG 422 header */
- gspca_frame_add(gspca_dev, FIRST_PACKET,
- sd->jpeg_hdr, JPEG_HDR_SZ);
- }
- gspca_frame_add(gspca_dev, INTER_PACKET, data, len);
-}
-
-static int sd_setbrightness(struct gspca_dev *gspca_dev, __s32 val)
-{
- struct sd *sd = (struct sd *) gspca_dev;
-
- sd->brightness = val;
- if (gspca_dev->streaming)
- setbrightness(gspca_dev);
- return 0;
-}
-
-static int sd_getbrightness(struct gspca_dev *gspca_dev, __s32 *val)
-{
- struct sd *sd = (struct sd *) gspca_dev;
-
- *val = sd->brightness;
- return 0;
-}
-
-static int sd_setcontrast(struct gspca_dev *gspca_dev, __s32 val)
-{
- struct sd *sd = (struct sd *) gspca_dev;
-
- sd->contrast = val;
- if (gspca_dev->streaming)
- setcontrast(gspca_dev);
- return 0;
-}
-
-static int sd_getcontrast(struct gspca_dev *gspca_dev, __s32 *val)
-{
- struct sd *sd = (struct sd *) gspca_dev;
-
- *val = sd->contrast;
- return 0;
-}
-
-static int sd_setcolors(struct gspca_dev *gspca_dev, __s32 val)
-{
- struct sd *sd = (struct sd *) gspca_dev;
-
- sd->colors = val;
- if (gspca_dev->streaming)
- setcolors(gspca_dev);
- return 0;
-}
-
-static int sd_getcolors(struct gspca_dev *gspca_dev, __s32 *val)
-{
- struct sd *sd = (struct sd *) gspca_dev;
-
- *val = sd->colors;
- return 0;
-}
-
-static int sd_setblue_balance(struct gspca_dev *gspca_dev, __s32 val)
-{
- struct sd *sd = (struct sd *) gspca_dev;
-
- sd->blue = val;
- if (gspca_dev->streaming)
- setredblue(gspca_dev);
- return 0;
-}
-
-static int sd_getblue_balance(struct gspca_dev *gspca_dev, __s32 *val)
-{
- struct sd *sd = (struct sd *) gspca_dev;
-
- *val = sd->blue;
- return 0;
-}
-
-static int sd_setred_balance(struct gspca_dev *gspca_dev, __s32 val)
-{
- struct sd *sd = (struct sd *) gspca_dev;
-
- sd->red = val;
- if (gspca_dev->streaming)
- setredblue(gspca_dev);
- return 0;
-}
-
-static int sd_getred_balance(struct gspca_dev *gspca_dev, __s32 *val)
-{
- struct sd *sd = (struct sd *) gspca_dev;
-
- *val = sd->red;
- return 0;
-}
-
-static int sd_setgamma(struct gspca_dev *gspca_dev, __s32 val)
-{
- struct sd *sd = (struct sd *) gspca_dev;
-
- sd->gamma = val;
- if (gspca_dev->streaming)
- setgamma(gspca_dev);
- return 0;
-}
-
-static int sd_getgamma(struct gspca_dev *gspca_dev, __s32 *val)
-{
- struct sd *sd = (struct sd *) gspca_dev;
-
- *val = sd->gamma;
- return 0;
-}
-
-static int sd_setautogain(struct gspca_dev *gspca_dev, __s32 val)
-{
- struct sd *sd = (struct sd *) gspca_dev;
-
- sd->autogain = val;
- if (gspca_dev->streaming)
- setautogain(gspca_dev);
- return 0;
-}
-
-static int sd_getautogain(struct gspca_dev *gspca_dev, __s32 *val)
-{
- struct sd *sd = (struct sd *) gspca_dev;
-
- *val = sd->autogain;
- return 0;
-}
-
-static int sd_setsharpness(struct gspca_dev *gspca_dev, __s32 val)
-{
- struct sd *sd = (struct sd *) gspca_dev;
-
- sd->sharpness = val;
- if (gspca_dev->streaming)
- setsharpness(sd);
- return 0;
-}
-
-static int sd_getsharpness(struct gspca_dev *gspca_dev, __s32 *val)
-{
- struct sd *sd = (struct sd *) gspca_dev;
-
- *val = sd->sharpness;
- return 0;
-}
-
-static int sd_setvflip(struct gspca_dev *gspca_dev, __s32 val)
-{
- struct sd *sd = (struct sd *) gspca_dev;
-
- sd->vflip = val;
- if (gspca_dev->streaming)
- setvflip(sd);
- return 0;
-}
-
-static int sd_getvflip(struct gspca_dev *gspca_dev, __s32 *val)
-{
- struct sd *sd = (struct sd *) gspca_dev;
-
- *val = sd->vflip;
- return 0;
-}
-
-static int sd_setinfrared(struct gspca_dev *gspca_dev, __s32 val)
-{
- struct sd *sd = (struct sd *) gspca_dev;
-
- sd->infrared = val;
- if (gspca_dev->streaming)
- setinfrared(sd);
- return 0;
-}
-
-static int sd_getinfrared(struct gspca_dev *gspca_dev, __s32 *val)
-{
- struct sd *sd = (struct sd *) gspca_dev;
-
- *val = sd->infrared;
- return 0;
-}
-
-static int sd_setfreq(struct gspca_dev *gspca_dev, __s32 val)
-{
- struct sd *sd = (struct sd *) gspca_dev;
-
- sd->freq = val;
- if (gspca_dev->streaming)
- setfreq(gspca_dev);
- return 0;
-}
-
-static int sd_getfreq(struct gspca_dev *gspca_dev, __s32 *val)
-{
- struct sd *sd = (struct sd *) gspca_dev;
-
- *val = sd->freq;
- return 0;
+ avg_lum += ((data[sof + 31] << 8) | data[sof + 32]) >> 4;
+ avg_lum >>= 4;
+ atomic_set(&sd->avg_lum, avg_lum);
}
static int sd_set_jcomp(struct gspca_dev *gspca_dev,
@@ -2944,7 +2913,7 @@ static int sd_querymenu(struct gspca_dev *gspca_dev,
return -EINVAL;
}
-#ifdef CONFIG_INPUT
+#if defined(CONFIG_INPUT) || defined(CONFIG_INPUT_MODULE)
static int sd_int_pkt_scan(struct gspca_dev *gspca_dev,
u8 *data, /* interrupt packet data */
int len) /* interrupt packet length */
@@ -2967,7 +2936,7 @@ static int sd_int_pkt_scan(struct gspca_dev *gspca_dev,
static const struct sd_desc sd_desc = {
.name = MODULE_NAME,
.ctrls = sd_ctrls,
- .nctrls = ARRAY_SIZE(sd_ctrls),
+ .nctrls = NCTRLS,
.config = sd_config,
.init = sd_init,
.start = sd_start,
@@ -2977,7 +2946,7 @@ static const struct sd_desc sd_desc = {
.get_jcomp = sd_get_jcomp,
.set_jcomp = sd_set_jcomp,
.querymenu = sd_querymenu,
-#ifdef CONFIG_INPUT
+#if defined(CONFIG_INPUT) || defined(CONFIG_INPUT_MODULE)
.int_pkt_scan = sd_int_pkt_scan,
#endif
};
@@ -3005,6 +2974,7 @@ static const __devinitdata struct usb_device_id device_table[] = {
{USB_DEVICE(0x0c45, 0x607c), BS(SN9C102P, HV7131R)},
/* {USB_DEVICE(0x0c45, 0x607e), BS(SN9C102P, OV7630)}, */
{USB_DEVICE(0x0c45, 0x60c0), BS(SN9C105, MI0360)},
+ /* or MT9V111 */
/* {USB_DEVICE(0x0c45, 0x60c2), BS(SN9C105, P1030xC)}, */
/* {USB_DEVICE(0x0c45, 0x60c8), BS(SN9C105, OM6802)}, */
/* {USB_DEVICE(0x0c45, 0x60cc), BS(SN9C105, HV7131GP)}, */
@@ -3019,7 +2989,7 @@ static const __devinitdata struct usb_device_id device_table[] = {
{USB_DEVICE(0x0c45, 0x60fe), BS(SN9C105, OV7630)},
#endif
{USB_DEVICE(0x0c45, 0x6100), BS(SN9C120, MI0360)}, /*sn9c128*/
-/* {USB_DEVICE(0x0c45, 0x6102), BS(SN9C120, PO2030N)}, * / GC0305*/
+ {USB_DEVICE(0x0c45, 0x6102), BS(SN9C120, PO2030N)}, /* /GC0305*/
/* {USB_DEVICE(0x0c45, 0x6108), BS(SN9C120, OM6802)}, */
{USB_DEVICE(0x0c45, 0x610a), BS(SN9C120, OV7648)}, /*sn9c128*/
{USB_DEVICE(0x0c45, 0x610b), BS(SN9C120, OV7660)}, /*sn9c128*/
@@ -3031,12 +3001,12 @@ static const __devinitdata struct usb_device_id device_table[] = {
{USB_DEVICE(0x0c45, 0x6128), BS(SN9C120, OM6802)}, /*sn9c325?*/
/*bw600.inf:*/
{USB_DEVICE(0x0c45, 0x612a), BS(SN9C120, OV7648)}, /*sn9c325?*/
+ {USB_DEVICE(0x0c45, 0x612b), BS(SN9C110, ADCM1700)},
{USB_DEVICE(0x0c45, 0x612c), BS(SN9C110, MO4000)},
{USB_DEVICE(0x0c45, 0x612e), BS(SN9C110, OV7630)},
/* {USB_DEVICE(0x0c45, 0x612f), BS(SN9C110, ICM105C)}, */
-#if !defined CONFIG_USB_SN9C102 && !defined CONFIG_USB_SN9C102_MODULE
{USB_DEVICE(0x0c45, 0x6130), BS(SN9C120, MI0360)},
-#endif
+ /* or MT9V111 / MI0360B */
/* {USB_DEVICE(0x0c45, 0x6132), BS(SN9C120, OV7670)}, */
{USB_DEVICE(0x0c45, 0x6138), BS(SN9C120, MO4000)},
{USB_DEVICE(0x0c45, 0x613a), BS(SN9C120, OV7648)},
@@ -3076,17 +3046,11 @@ static struct usb_driver sd_driver = {
/* -- module insert / remove -- */
static int __init sd_mod_init(void)
{
- int ret;
- ret = usb_register(&sd_driver);
- if (ret < 0)
- return ret;
- info("registered");
- return 0;
+ return usb_register(&sd_driver);
}
static void __exit sd_mod_exit(void)
{
usb_deregister(&sd_driver);
- info("deregistered");
}
module_init(sd_mod_init);
diff --git a/drivers/media/video/gspca/spca1528.c b/drivers/media/video/gspca/spca1528.c
index 3f514eb1d99d..e64338664410 100644
--- a/drivers/media/video/gspca/spca1528.c
+++ b/drivers/media/video/gspca/spca1528.c
@@ -171,7 +171,7 @@ static void reg_r(struct gspca_dev *gspca_dev,
PDEBUG(D_USBI, "GET %02x 0000 %04x %02x", req, index,
gspca_dev->usb_buf[0]);
if (ret < 0) {
- PDEBUG(D_ERR, "reg_r err %d", ret);
+ err("reg_r err %d", ret);
gspca_dev->usb_err = ret;
}
}
@@ -193,7 +193,7 @@ static void reg_w(struct gspca_dev *gspca_dev,
value, index,
NULL, 0, 500);
if (ret < 0) {
- PDEBUG(D_ERR, "reg_w err %d", ret);
+ err("reg_w err %d", ret);
gspca_dev->usb_err = ret;
}
}
@@ -217,7 +217,7 @@ static void reg_wb(struct gspca_dev *gspca_dev,
value, index,
gspca_dev->usb_buf, 1, 500);
if (ret < 0) {
- PDEBUG(D_ERR, "reg_w err %d", ret);
+ err("reg_w err %d", ret);
gspca_dev->usb_err = ret;
}
}
@@ -587,18 +587,11 @@ static struct usb_driver sd_driver = {
/* -- module insert / remove -- */
static int __init sd_mod_init(void)
{
- int ret;
-
- ret = usb_register(&sd_driver);
- if (ret < 0)
- return ret;
- info("registered");
- return 0;
+ return usb_register(&sd_driver);
}
static void __exit sd_mod_exit(void)
{
usb_deregister(&sd_driver);
- info("deregistered");
}
module_init(sd_mod_init);
diff --git a/drivers/media/video/gspca/spca500.c b/drivers/media/video/gspca/spca500.c
index c02beb6c1e93..8e202b9039f1 100644
--- a/drivers/media/video/gspca/spca500.c
+++ b/drivers/media/video/gspca/spca500.c
@@ -396,7 +396,7 @@ static int reg_w(struct gspca_dev *gspca_dev,
USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
value, index, NULL, 0, 500);
if (ret < 0)
- PDEBUG(D_ERR, "reg write: error %d", ret);
+ err("reg write: error %d", ret);
return ret;
}
@@ -418,8 +418,8 @@ static int reg_r_12(struct gspca_dev *gspca_dev,
gspca_dev->usb_buf, length,
500); /* timeout */
if (ret < 0) {
- PDEBUG(D_ERR, "reg_r_12 err %d", ret);
- return -1;
+ err("reg_r_12 err %d", ret);
+ return ret;
}
return (gspca_dev->usb_buf[1] << 8) + gspca_dev->usb_buf[0];
}
@@ -1093,17 +1093,11 @@ static struct usb_driver sd_driver = {
/* -- module insert / remove -- */
static int __init sd_mod_init(void)
{
- int ret;
- ret = usb_register(&sd_driver);
- if (ret < 0)
- return ret;
- PDEBUG(D_PROBE, "registered");
- return 0;
+ return usb_register(&sd_driver);
}
static void __exit sd_mod_exit(void)
{
usb_deregister(&sd_driver);
- PDEBUG(D_PROBE, "deregistered");
}
module_init(sd_mod_init);
diff --git a/drivers/media/video/gspca/spca501.c b/drivers/media/video/gspca/spca501.c
index c99333933e32..642839a11e8d 100644
--- a/drivers/media/video/gspca/spca501.c
+++ b/drivers/media/video/gspca/spca501.c
@@ -1724,7 +1724,7 @@ static const __u16 spca501c_mysterious_init_data[][3] = {
{0x00, 0x0000, 0x0048},
{0x00, 0x0000, 0x0049},
{0x00, 0x0008, 0x004a},
-/* DSP Registers */
+/* DSP Registers */
{0x01, 0x00a6, 0x0000},
{0x01, 0x0028, 0x0001},
{0x01, 0x0000, 0x0002},
@@ -1788,7 +1788,7 @@ static const __u16 spca501c_mysterious_init_data[][3] = {
{0x05, 0x0022, 0x0004},
{0x05, 0x0025, 0x0001},
{0x05, 0x0000, 0x0000},
-/* Part 4 */
+/* Part 4 */
{0x05, 0x0026, 0x0001},
{0x05, 0x0001, 0x0000},
{0x05, 0x0027, 0x0001},
@@ -1806,7 +1806,7 @@ static const __u16 spca501c_mysterious_init_data[][3] = {
{0x05, 0x0001, 0x0000},
{0x05, 0x0027, 0x0001},
{0x05, 0x004e, 0x0000},
-/* Part 5 */
+/* Part 5 */
{0x01, 0x0003, 0x003f},
{0x01, 0x0001, 0x0056},
{0x01, 0x000f, 0x0008},
@@ -1852,7 +1852,7 @@ static int reg_write(struct usb_device *dev,
PDEBUG(D_USBO, "reg write: 0x%02x 0x%02x 0x%02x",
req, index, value);
if (ret < 0)
- PDEBUG(D_ERR, "reg write: error %d", ret);
+ err("reg write: error %d", ret);
return ret;
}
@@ -2189,17 +2189,11 @@ static struct usb_driver sd_driver = {
/* -- module insert / remove -- */
static int __init sd_mod_init(void)
{
- int ret;
- ret = usb_register(&sd_driver);
- if (ret < 0)
- return ret;
- PDEBUG(D_PROBE, "registered");
- return 0;
+ return usb_register(&sd_driver);
}
static void __exit sd_mod_exit(void)
{
usb_deregister(&sd_driver);
- PDEBUG(D_PROBE, "deregistered");
}
module_init(sd_mod_init);
diff --git a/drivers/media/video/gspca/spca505.c b/drivers/media/video/gspca/spca505.c
index c576eed73abe..bc9dd9034ab4 100644
--- a/drivers/media/video/gspca/spca505.c
+++ b/drivers/media/video/gspca/spca505.c
@@ -368,10 +368,6 @@ static const u8 spca505b_init_data[][3] = {
{0x08, 0x00, 0x00},
{0x08, 0x00, 0x01},
{0x08, 0x00, 0x02},
- {0x00, 0x01, 0x00},
- {0x00, 0x01, 0x01},
- {0x00, 0x01, 0x34},
- {0x00, 0x01, 0x35},
{0x06, 0x18, 0x08},
{0x06, 0xfc, 0x09},
{0x06, 0xfc, 0x0a},
@@ -582,7 +578,7 @@ static int reg_write(struct usb_device *dev,
PDEBUG(D_USBO, "reg write: 0x%02x,0x%02x:0x%02x, %d",
req, index, value, ret);
if (ret < 0)
- PDEBUG(D_ERR, "reg write: error %d", ret);
+ err("reg write: error %d", ret);
return ret;
}
@@ -689,8 +685,7 @@ static int sd_start(struct gspca_dev *gspca_dev)
return ret;
}
if (ret != 0x0101) {
- PDEBUG(D_ERR|D_CONF,
- "After vector read returns 0x%04x should be 0x0101",
+ err("After vector read returns 0x%04x should be 0x0101",
ret);
}
@@ -821,18 +816,11 @@ static struct usb_driver sd_driver = {
/* -- module insert / remove -- */
static int __init sd_mod_init(void)
{
- int ret;
-
- ret = usb_register(&sd_driver);
- if (ret < 0)
- return ret;
- PDEBUG(D_PROBE, "registered");
- return 0;
+ return usb_register(&sd_driver);
}
static void __exit sd_mod_exit(void)
{
usb_deregister(&sd_driver);
- PDEBUG(D_PROBE, "deregistered");
}
module_init(sd_mod_init);
diff --git a/drivers/media/video/gspca/spca508.c b/drivers/media/video/gspca/spca508.c
index edf0fe157501..7307638ac91d 100644
--- a/drivers/media/video/gspca/spca508.c
+++ b/drivers/media/video/gspca/spca508.c
@@ -92,8 +92,7 @@ static const struct v4l2_pix_format sif_mode[] = {
* Initialization data: this is the first set-up data written to the
* device (before the open data).
*/
-static const u16 spca508_init_data[][2] =
-{
+static const u16 spca508_init_data[][2] = {
{0x0000, 0x870b},
{0x0020, 0x8112}, /* Video drop enable, ISO streaming disable */
@@ -1276,7 +1275,7 @@ static int reg_write(struct usb_device *dev,
PDEBUG(D_USBO, "reg write i:0x%04x = 0x%02x",
index, value);
if (ret < 0)
- PDEBUG(D_ERR|D_USBO, "reg write: error %d", ret);
+ err("reg write: error %d", ret);
return ret;
}
@@ -1298,7 +1297,7 @@ static int reg_read(struct gspca_dev *gspca_dev,
PDEBUG(D_USBI, "reg read i:%04x --> %02x",
index, gspca_dev->usb_buf[0]);
if (ret < 0) {
- PDEBUG(D_ERR|D_USBI, "reg_read err %d", ret);
+ err("reg_read err %d", ret);
return ret;
}
return gspca_dev->usb_buf[0];
@@ -1543,18 +1542,11 @@ static struct usb_driver sd_driver = {
/* -- module insert / remove -- */
static int __init sd_mod_init(void)
{
- int ret;
-
- ret = usb_register(&sd_driver);
- if (ret < 0)
- return ret;
- PDEBUG(D_PROBE, "registered");
- return 0;
+ return usb_register(&sd_driver);
}
static void __exit sd_mod_exit(void)
{
usb_deregister(&sd_driver);
- PDEBUG(D_PROBE, "deregistered");
}
module_init(sd_mod_init);
diff --git a/drivers/media/video/gspca/spca561.c b/drivers/media/video/gspca/spca561.c
index 7bb2355005dc..ad73f4812c05 100644
--- a/drivers/media/video/gspca/spca561.c
+++ b/drivers/media/video/gspca/spca561.c
@@ -315,7 +315,7 @@ static void reg_w_val(struct usb_device *dev, __u16 index, __u8 value)
value, index, NULL, 0, 500);
PDEBUG(D_USBO, "reg write: 0x%02x:0x%02x", index, value);
if (ret < 0)
- PDEBUG(D_ERR, "reg write: error %d", ret);
+ err("reg write: error %d", ret);
}
static void write_vector(struct gspca_dev *gspca_dev,
@@ -787,7 +787,7 @@ static void sd_pkt_scan(struct gspca_dev *gspca_dev,
return;
}
-#ifdef CONFIG_INPUT
+#if defined(CONFIG_INPUT) || defined(CONFIG_INPUT_MODULE)
if (data[0] & 0x20) {
input_report_key(gspca_dev->input_dev, KEY_CAMERA, 1);
input_sync(gspca_dev->input_dev);
@@ -1037,7 +1037,7 @@ static const struct sd_desc sd_desc_12a = {
.start = sd_start_12a,
.stopN = sd_stopN,
.pkt_scan = sd_pkt_scan,
-#ifdef CONFIG_INPUT
+#if defined(CONFIG_INPUT) || defined(CONFIG_INPUT_MODULE)
.other_input = 1,
#endif
};
@@ -1051,7 +1051,7 @@ static const struct sd_desc sd_desc_72a = {
.stopN = sd_stopN,
.pkt_scan = sd_pkt_scan,
.dq_callback = do_autogain,
-#ifdef CONFIG_INPUT
+#if defined(CONFIG_INPUT) || defined(CONFIG_INPUT_MODULE)
.other_input = 1,
#endif
};
@@ -1107,17 +1107,11 @@ static struct usb_driver sd_driver = {
/* -- module insert / remove -- */
static int __init sd_mod_init(void)
{
- int ret;
- ret = usb_register(&sd_driver);
- if (ret < 0)
- return ret;
- PDEBUG(D_PROBE, "registered");
- return 0;
+ return usb_register(&sd_driver);
}
static void __exit sd_mod_exit(void)
{
usb_deregister(&sd_driver);
- PDEBUG(D_PROBE, "deregistered");
}
module_init(sd_mod_init);
diff --git a/drivers/media/video/gspca/sq905.c b/drivers/media/video/gspca/sq905.c
index 09b3f93fa4d6..404067745775 100644
--- a/drivers/media/video/gspca/sq905.c
+++ b/drivers/media/video/gspca/sq905.c
@@ -123,7 +123,7 @@ static int sq905_command(struct gspca_dev *gspca_dev, u16 index)
SQ905_COMMAND, index, gspca_dev->usb_buf, 1,
SQ905_CMD_TIMEOUT);
if (ret < 0) {
- PDEBUG(D_ERR, "%s: usb_control_msg failed (%d)",
+ err("%s: usb_control_msg failed (%d)",
__func__, ret);
return ret;
}
@@ -135,7 +135,7 @@ static int sq905_command(struct gspca_dev *gspca_dev, u16 index)
SQ905_PING, 0, gspca_dev->usb_buf, 1,
SQ905_CMD_TIMEOUT);
if (ret < 0) {
- PDEBUG(D_ERR, "%s: usb_control_msg failed 2 (%d)",
+ err("%s: usb_control_msg failed 2 (%d)",
__func__, ret);
return ret;
}
@@ -158,7 +158,7 @@ static int sq905_ack_frame(struct gspca_dev *gspca_dev)
SQ905_READ_DONE, 0, gspca_dev->usb_buf, 1,
SQ905_CMD_TIMEOUT);
if (ret < 0) {
- PDEBUG(D_ERR, "%s: usb_control_msg failed (%d)", __func__, ret);
+ err("%s: usb_control_msg failed (%d)", __func__, ret);
return ret;
}
@@ -186,7 +186,7 @@ sq905_read_data(struct gspca_dev *gspca_dev, u8 *data, int size, int need_lock)
if (need_lock)
mutex_unlock(&gspca_dev->usb_lock);
if (ret < 0) {
- PDEBUG(D_ERR, "%s: usb_control_msg failed (%d)", __func__, ret);
+ err("%s: usb_control_msg failed (%d)", __func__, ret);
return ret;
}
ret = usb_bulk_msg(gspca_dev->dev,
@@ -195,7 +195,7 @@ sq905_read_data(struct gspca_dev *gspca_dev, u8 *data, int size, int need_lock)
/* successful, it returns 0, otherwise negative */
if (ret < 0 || act_len != size) {
- PDEBUG(D_ERR, "bulk read fail (%d) len %d/%d",
+ err("bulk read fail (%d) len %d/%d",
ret, act_len, size);
return -EIO;
}
@@ -226,7 +226,7 @@ static void sq905_dostream(struct work_struct *work)
buffer = kmalloc(SQ905_MAX_TRANSFER, GFP_KERNEL | GFP_DMA);
if (!buffer) {
- PDEBUG(D_ERR, "Couldn't allocate USB buffer");
+ err("Couldn't allocate USB buffer");
goto quit_stream;
}
@@ -436,19 +436,12 @@ static struct usb_driver sd_driver = {
/* -- module insert / remove -- */
static int __init sd_mod_init(void)
{
- int ret;
-
- ret = usb_register(&sd_driver);
- if (ret < 0)
- return ret;
- PDEBUG(D_PROBE, "registered");
- return 0;
+ return usb_register(&sd_driver);
}
static void __exit sd_mod_exit(void)
{
usb_deregister(&sd_driver);
- PDEBUG(D_PROBE, "deregistered");
}
module_init(sd_mod_init);
diff --git a/drivers/media/video/gspca/sq905c.c b/drivers/media/video/gspca/sq905c.c
index 4c70628ca615..c2e88b5303cb 100644
--- a/drivers/media/video/gspca/sq905c.c
+++ b/drivers/media/video/gspca/sq905c.c
@@ -95,7 +95,7 @@ static int sq905c_command(struct gspca_dev *gspca_dev, u16 command, u16 index)
command, index, NULL, 0,
SQ905C_CMD_TIMEOUT);
if (ret < 0) {
- PDEBUG(D_ERR, "%s: usb_control_msg failed (%d)",
+ err("%s: usb_control_msg failed (%d)",
__func__, ret);
return ret;
}
@@ -115,7 +115,7 @@ static int sq905c_read(struct gspca_dev *gspca_dev, u16 command, u16 index,
command, index, gspca_dev->usb_buf, size,
SQ905C_CMD_TIMEOUT);
if (ret < 0) {
- PDEBUG(D_ERR, "%s: usb_control_msg failed (%d)",
+ err("%s: usb_control_msg failed (%d)",
__func__, ret);
return ret;
}
@@ -146,7 +146,7 @@ static void sq905c_dostream(struct work_struct *work)
buffer = kmalloc(SQ905C_MAX_TRANSFER, GFP_KERNEL | GFP_DMA);
if (!buffer) {
- PDEBUG(D_ERR, "Couldn't allocate USB buffer");
+ err("Couldn't allocate USB buffer");
goto quit_stream;
}
@@ -341,19 +341,12 @@ static struct usb_driver sd_driver = {
/* -- module insert / remove -- */
static int __init sd_mod_init(void)
{
- int ret;
-
- ret = usb_register(&sd_driver);
- if (ret < 0)
- return ret;
- PDEBUG(D_PROBE, "registered");
- return 0;
+ return usb_register(&sd_driver);
}
static void __exit sd_mod_exit(void)
{
usb_deregister(&sd_driver);
- PDEBUG(D_PROBE, "deregistered");
}
module_init(sd_mod_init);
diff --git a/drivers/media/video/gspca/sq930x.c b/drivers/media/video/gspca/sq930x.c
index 7ae6522d4edf..3e4b0b94c700 100644
--- a/drivers/media/video/gspca/sq930x.c
+++ b/drivers/media/video/gspca/sq930x.c
@@ -468,7 +468,7 @@ static void reg_r(struct gspca_dev *gspca_dev,
value, 0, gspca_dev->usb_buf, len,
500);
if (ret < 0) {
- PDEBUG(D_ERR, "reg_r %04x failed %d", value, ret);
+ err("reg_r %04x failed %d", value, ret);
gspca_dev->usb_err = ret;
}
}
@@ -488,7 +488,7 @@ static void reg_w(struct gspca_dev *gspca_dev, u16 value, u16 index)
500);
msleep(30);
if (ret < 0) {
- PDEBUG(D_ERR, "reg_w %04x %04x failed %d", value, index, ret);
+ err("reg_w %04x %04x failed %d", value, index, ret);
gspca_dev->usb_err = ret;
}
}
@@ -511,7 +511,7 @@ static void reg_wb(struct gspca_dev *gspca_dev, u16 value, u16 index,
1000);
msleep(30);
if (ret < 0) {
- PDEBUG(D_ERR, "reg_wb %04x %04x failed %d", value, index, ret);
+ err("reg_wb %04x %04x failed %d", value, index, ret);
gspca_dev->usb_err = ret;
}
}
@@ -556,7 +556,7 @@ static void i2c_write(struct sd *sd,
gspca_dev->usb_buf, buf - gspca_dev->usb_buf,
500);
if (ret < 0) {
- PDEBUG(D_ERR, "i2c_write failed %d", ret);
+ err("i2c_write failed %d", ret);
gspca_dev->usb_err = ret;
}
}
@@ -612,7 +612,7 @@ static void ucbus_write(struct gspca_dev *gspca_dev,
gspca_dev->usb_buf, buf - gspca_dev->usb_buf,
500);
if (ret < 0) {
- PDEBUG(D_ERR, "ucbus_write failed %d", ret);
+ err("ucbus_write failed %d", ret);
gspca_dev->usb_err = ret;
return;
}
@@ -688,7 +688,7 @@ static void cmos_probe(struct gspca_dev *gspca_dev)
break;
}
if (i >= ARRAY_SIZE(probe_order))
- PDEBUG(D_PROBE, "Unknown sensor");
+ err("Unknown sensor");
else
sd->sensor = probe_order[i];
}
@@ -1079,7 +1079,7 @@ static void sd_dq_callback(struct gspca_dev *gspca_dev)
gspca_dev->cam.bulk_nurbs = 1;
ret = usb_submit_urb(gspca_dev->urb[0], GFP_ATOMIC);
if (ret < 0)
- PDEBUG(D_ERR|D_PACK, "sd_dq_callback() err %d", ret);
+ err("sd_dq_callback() err %d", ret);
/* wait a little time, otherwise the webcam crashes */
msleep(100);
@@ -1185,18 +1185,11 @@ static struct usb_driver sd_driver = {
/* -- module insert / remove -- */
static int __init sd_mod_init(void)
{
- int ret;
-
- ret = usb_register(&sd_driver);
- if (ret < 0)
- return ret;
- info("registered");
- return 0;
+ return usb_register(&sd_driver);
}
static void __exit sd_mod_exit(void)
{
usb_deregister(&sd_driver);
- info("deregistered");
}
module_init(sd_mod_init);
diff --git a/drivers/media/video/gspca/stk014.c b/drivers/media/video/gspca/stk014.c
index 2aedf4b1bfa3..11a192b95ed4 100644
--- a/drivers/media/video/gspca/stk014.c
+++ b/drivers/media/video/gspca/stk014.c
@@ -27,14 +27,21 @@ MODULE_AUTHOR("Jean-Francois Moine <http://moinejf.free.fr>");
MODULE_DESCRIPTION("Syntek DV4000 (STK014) USB Camera Driver");
MODULE_LICENSE("GPL");
+/* controls */
+enum e_ctrl {
+ BRIGHTNESS,
+ CONTRAST,
+ COLORS,
+ LIGHTFREQ,
+ NCTRLS /* number of controls */
+};
+
/* specific webcam descriptor */
struct sd {
struct gspca_dev gspca_dev; /* !! must be the first item */
- unsigned char brightness;
- unsigned char contrast;
- unsigned char colors;
- unsigned char lightfreq;
+ struct gspca_ctrl ctrls[NCTRLS];
+
u8 quality;
#define QUALITY_MIN 70
#define QUALITY_MAX 95
@@ -44,17 +51,13 @@ struct sd {
};
/* V4L2 controls supported by the driver */
-static int sd_setbrightness(struct gspca_dev *gspca_dev, __s32 val);
-static int sd_getbrightness(struct gspca_dev *gspca_dev, __s32 *val);
-static int sd_setcontrast(struct gspca_dev *gspca_dev, __s32 val);
-static int sd_getcontrast(struct gspca_dev *gspca_dev, __s32 *val);
-static int sd_setcolors(struct gspca_dev *gspca_dev, __s32 val);
-static int sd_getcolors(struct gspca_dev *gspca_dev, __s32 *val);
-static int sd_setfreq(struct gspca_dev *gspca_dev, __s32 val);
-static int sd_getfreq(struct gspca_dev *gspca_dev, __s32 *val);
-
-static const struct ctrl sd_ctrls[] = {
- {
+static void setbrightness(struct gspca_dev *gspca_dev);
+static void setcontrast(struct gspca_dev *gspca_dev);
+static void setcolors(struct gspca_dev *gspca_dev);
+static void setlightfreq(struct gspca_dev *gspca_dev);
+
+static const struct ctrl sd_ctrls[NCTRLS] = {
+[BRIGHTNESS] = {
{
.id = V4L2_CID_BRIGHTNESS,
.type = V4L2_CTRL_TYPE_INTEGER,
@@ -62,13 +65,11 @@ static const struct ctrl sd_ctrls[] = {
.minimum = 0,
.maximum = 255,
.step = 1,
-#define BRIGHTNESS_DEF 127
- .default_value = BRIGHTNESS_DEF,
+ .default_value = 127,
},
- .set = sd_setbrightness,
- .get = sd_getbrightness,
+ .set_control = setbrightness
},
- {
+[CONTRAST] = {
{
.id = V4L2_CID_CONTRAST,
.type = V4L2_CTRL_TYPE_INTEGER,
@@ -76,13 +77,11 @@ static const struct ctrl sd_ctrls[] = {
.minimum = 0,
.maximum = 255,
.step = 1,
-#define CONTRAST_DEF 127
- .default_value = CONTRAST_DEF,
+ .default_value = 127,
},
- .set = sd_setcontrast,
- .get = sd_getcontrast,
+ .set_control = setcontrast
},
- {
+[COLORS] = {
{
.id = V4L2_CID_SATURATION,
.type = V4L2_CTRL_TYPE_INTEGER,
@@ -90,13 +89,11 @@ static const struct ctrl sd_ctrls[] = {
.minimum = 0,
.maximum = 255,
.step = 1,
-#define COLOR_DEF 127
- .default_value = COLOR_DEF,
+ .default_value = 127,
},
- .set = sd_setcolors,
- .get = sd_getcolors,
+ .set_control = setcolors
},
- {
+[LIGHTFREQ] = {
{
.id = V4L2_CID_POWER_LINE_FREQUENCY,
.type = V4L2_CTRL_TYPE_MENU,
@@ -104,11 +101,9 @@ static const struct ctrl sd_ctrls[] = {
.minimum = 1,
.maximum = 2, /* 0: 0, 1: 50Hz, 2:60Hz */
.step = 1,
-#define FREQ_DEF 1
- .default_value = FREQ_DEF,
+ .default_value = 1,
},
- .set = sd_setfreq,
- .get = sd_getfreq,
+ .set_control = setlightfreq
},
};
@@ -142,7 +137,7 @@ static u8 reg_r(struct gspca_dev *gspca_dev,
gspca_dev->usb_buf, 1,
500);
if (ret < 0) {
- PDEBUG(D_ERR, "reg_r err %d", ret);
+ err("reg_r err %d", ret);
gspca_dev->usb_err = ret;
return 0;
}
@@ -167,7 +162,7 @@ static void reg_w(struct gspca_dev *gspca_dev,
0,
500);
if (ret < 0) {
- PDEBUG(D_ERR, "reg_w err %d", ret);
+ err("reg_w err %d", ret);
gspca_dev->usb_err = ret;
}
}
@@ -197,7 +192,7 @@ static void rcv_val(struct gspca_dev *gspca_dev,
&alen,
500); /* timeout in milliseconds */
if (ret < 0) {
- PDEBUG(D_ERR, "rcv_val err %d", ret);
+ err("rcv_val err %d", ret);
gspca_dev->usb_err = ret;
}
}
@@ -240,7 +235,7 @@ static void snd_val(struct gspca_dev *gspca_dev,
&alen,
500); /* timeout in milliseconds */
if (ret < 0) {
- PDEBUG(D_ERR, "snd_val err %d", ret);
+ err("snd_val err %d", ret);
gspca_dev->usb_err = ret;
} else {
if (ads == 0x003f08) {
@@ -264,7 +259,7 @@ static void setbrightness(struct gspca_dev *gspca_dev)
int parval;
parval = 0x06000000 /* whiteness */
- + (sd->brightness << 16);
+ + (sd->ctrls[BRIGHTNESS].val << 16);
set_par(gspca_dev, parval);
}
@@ -274,7 +269,7 @@ static void setcontrast(struct gspca_dev *gspca_dev)
int parval;
parval = 0x07000000 /* contrast */
- + (sd->contrast << 16);
+ + (sd->ctrls[CONTRAST].val << 16);
set_par(gspca_dev, parval);
}
@@ -284,15 +279,15 @@ static void setcolors(struct gspca_dev *gspca_dev)
int parval;
parval = 0x08000000 /* saturation */
- + (sd->colors << 16);
+ + (sd->ctrls[COLORS].val << 16);
set_par(gspca_dev, parval);
}
-static void setfreq(struct gspca_dev *gspca_dev)
+static void setlightfreq(struct gspca_dev *gspca_dev)
{
struct sd *sd = (struct sd *) gspca_dev;
- set_par(gspca_dev, sd->lightfreq == 1
+ set_par(gspca_dev, sd->ctrls[LIGHTFREQ].val == 1
? 0x33640000 /* 50 Hz */
: 0x33780000); /* 60 Hz */
}
@@ -305,10 +300,7 @@ static int sd_config(struct gspca_dev *gspca_dev,
gspca_dev->cam.cam_mode = vga_mode;
gspca_dev->cam.nmodes = ARRAY_SIZE(vga_mode);
- sd->brightness = BRIGHTNESS_DEF;
- sd->contrast = CONTRAST_DEF;
- sd->colors = COLOR_DEF;
- sd->lightfreq = FREQ_DEF;
+ gspca_dev->cam.ctrls = sd->ctrls;
sd->quality = QUALITY_DEF;
return 0;
}
@@ -323,7 +315,7 @@ static int sd_init(struct gspca_dev *gspca_dev)
ret = reg_r(gspca_dev, 0x0740);
if (gspca_dev->usb_err >= 0) {
if (ret != 0xff) {
- PDEBUG(D_ERR|D_STREAM, "init reg: 0x%02x", ret);
+ err("init reg: 0x%02x", ret);
gspca_dev->usb_err = -EIO;
}
}
@@ -357,7 +349,7 @@ static int sd_start(struct gspca_dev *gspca_dev)
gspca_dev->iface,
gspca_dev->alt);
if (ret < 0) {
- PDEBUG(D_ERR|D_STREAM, "set intf %d %d failed",
+ err("set intf %d %d failed",
gspca_dev->iface, gspca_dev->alt);
gspca_dev->usb_err = ret;
goto out;
@@ -378,7 +370,7 @@ static int sd_start(struct gspca_dev *gspca_dev)
set_par(gspca_dev, 0x0a800000); /* Green ? */
set_par(gspca_dev, 0x0b800000); /* Blue ? */
set_par(gspca_dev, 0x0d030000); /* Gamma ? */
- setfreq(gspca_dev); /* light frequency */
+ setlightfreq(gspca_dev);
/* start the video flow */
set_par(gspca_dev, 0x01000000);
@@ -441,78 +433,6 @@ static void sd_pkt_scan(struct gspca_dev *gspca_dev,
gspca_frame_add(gspca_dev, INTER_PACKET, data, len);
}
-static int sd_setbrightness(struct gspca_dev *gspca_dev, __s32 val)
-{
- struct sd *sd = (struct sd *) gspca_dev;
-
- sd->brightness = val;
- if (gspca_dev->streaming)
- setbrightness(gspca_dev);
- return gspca_dev->usb_err;
-}
-
-static int sd_getbrightness(struct gspca_dev *gspca_dev, __s32 *val)
-{
- struct sd *sd = (struct sd *) gspca_dev;
-
- *val = sd->brightness;
- return 0;
-}
-
-static int sd_setcontrast(struct gspca_dev *gspca_dev, __s32 val)
-{
- struct sd *sd = (struct sd *) gspca_dev;
-
- sd->contrast = val;
- if (gspca_dev->streaming)
- setcontrast(gspca_dev);
- return gspca_dev->usb_err;
-}
-
-static int sd_getcontrast(struct gspca_dev *gspca_dev, __s32 *val)
-{
- struct sd *sd = (struct sd *) gspca_dev;
-
- *val = sd->contrast;
- return 0;
-}
-
-static int sd_setcolors(struct gspca_dev *gspca_dev, __s32 val)
-{
- struct sd *sd = (struct sd *) gspca_dev;
-
- sd->colors = val;
- if (gspca_dev->streaming)
- setcolors(gspca_dev);
- return gspca_dev->usb_err;
-}
-
-static int sd_getcolors(struct gspca_dev *gspca_dev, __s32 *val)
-{
- struct sd *sd = (struct sd *) gspca_dev;
-
- *val = sd->colors;
- return 0;
-}
-
-static int sd_setfreq(struct gspca_dev *gspca_dev, __s32 val)
-{
- struct sd *sd = (struct sd *) gspca_dev;
-
- sd->lightfreq = val;
- if (gspca_dev->streaming)
- setfreq(gspca_dev);
- return gspca_dev->usb_err;
-}
-
-static int sd_getfreq(struct gspca_dev *gspca_dev, __s32 *val)
-{
- struct sd *sd = (struct sd *) gspca_dev;
-
- *val = sd->lightfreq;
- return 0;
-}
-
static int sd_querymenu(struct gspca_dev *gspca_dev,
struct v4l2_querymenu *menu)
{
@@ -563,7 +483,7 @@ static int sd_get_jcomp(struct gspca_dev *gspca_dev,
static const struct sd_desc sd_desc = {
.name = MODULE_NAME,
.ctrls = sd_ctrls,
- .nctrls = ARRAY_SIZE(sd_ctrls),
+ .nctrls = NCTRLS,
.config = sd_config,
.init = sd_init,
.start = sd_start,
@@ -603,17 +523,11 @@ static struct usb_driver sd_driver = {
/* -- module insert / remove -- */
static int __init sd_mod_init(void)
{
- int ret;
- ret = usb_register(&sd_driver);
- if (ret < 0)
- return ret;
- info("registered");
- return 0;
+ return usb_register(&sd_driver);
}
static void __exit sd_mod_exit(void)
{
usb_deregister(&sd_driver);
- info("deregistered");
}
module_init(sd_mod_init);
diff --git a/drivers/media/video/gspca/stv0680.c b/drivers/media/video/gspca/stv0680.c
index e50dd7693f74..b199ad4666bd 100644
--- a/drivers/media/video/gspca/stv0680.c
+++ b/drivers/media/video/gspca/stv0680.c
@@ -1,7 +1,7 @@
/*
* STV0680 USB Camera Driver
*
- * Copyright (C) 2009 Hans de Goede <hdgoede@redhat.com>
+ * Copyright (C) 2009 Hans de Goede <hdegoede@redhat.com>
*
* This module is adapted from the in kernel v4l1 stv680 driver:
*
@@ -31,7 +31,7 @@
#include "gspca.h"
-MODULE_AUTHOR("Hans de Goede <hdgoede@redhat.com>");
+MODULE_AUTHOR("Hans de Goede <hdegoede@redhat.com>");
MODULE_DESCRIPTION("STV0680 USB Camera Driver");
MODULE_LICENSE("GPL");
@@ -79,8 +79,7 @@ static int stv_sndctrl(struct gspca_dev *gspca_dev, int set, u8 req, u16 val,
val, 0, gspca_dev->usb_buf, size, 500);
if ((ret < 0) && (req != 0x0a))
- PDEBUG(D_ERR,
- "usb_control_msg error %i, request = 0x%x, error = %i",
+ err("usb_control_msg error %i, request = 0x%x, error = %i",
set, req, ret);
return ret;
@@ -237,7 +236,7 @@ static int sd_config(struct gspca_dev *gspca_dev,
if (stv_sndctrl(gspca_dev, 2, 0x06, 0x0100, 0x12) != 0x12 ||
gspca_dev->usb_buf[8] != 0x53 || gspca_dev->usb_buf[9] != 0x05) {
- PDEBUG(D_ERR, "Could not get descriptor 0100.");
+ err("Could not get descriptor 0100.");
return stv0680_handle_error(gspca_dev, -EIO);
}
@@ -357,17 +356,11 @@ static struct usb_driver sd_driver = {
/* -- module insert / remove -- */
static int __init sd_mod_init(void)
{
- int ret;
- ret = usb_register(&sd_driver);
- if (ret < 0)
- return ret;
- PDEBUG(D_PROBE, "registered");
- return 0;
+ return usb_register(&sd_driver);
}
static void __exit sd_mod_exit(void)
{
usb_deregister(&sd_driver);
- PDEBUG(D_PROBE, "deregistered");
}
module_init(sd_mod_init);
diff --git a/drivers/media/video/gspca/stv06xx/stv06xx.c b/drivers/media/video/gspca/stv06xx/stv06xx.c
index 14f179a19485..086de44a6e57 100644
--- a/drivers/media/video/gspca/stv06xx/stv06xx.c
+++ b/drivers/media/video/gspca/stv06xx/stv06xx.c
@@ -189,7 +189,7 @@ int stv06xx_read_sensor(struct sd *sd, const u8 address, u16 *value)
0x04, 0x40, 0x1400, 0, buf, I2C_BUFFER_LENGTH,
STV06XX_URB_MSG_TIMEOUT);
if (err < 0) {
- PDEBUG(D_ERR, "I2C: Read error writing address: %d", err);
+ err("I2C: Read error writing address: %d", err);
return err;
}
@@ -428,7 +428,7 @@ frame_data:
}
}
-#ifdef CONFIG_INPUT
+#if defined(CONFIG_INPUT) || defined(CONFIG_INPUT_MODULE)
static int sd_int_pkt_scan(struct gspca_dev *gspca_dev,
u8 *data, /* interrupt packet data */
int len) /* interrupt packet length */
@@ -462,7 +462,7 @@ static const struct sd_desc sd_desc = {
.start = stv06xx_start,
.stopN = stv06xx_stopN,
.pkt_scan = stv06xx_pkt_scan,
-#ifdef CONFIG_INPUT
+#if defined(CONFIG_INPUT) || defined(CONFIG_INPUT_MODULE)
.int_pkt_scan = sd_int_pkt_scan,
#endif
};
@@ -562,17 +562,11 @@ static struct usb_driver sd_driver = {
/* -- module insert / remove -- */
static int __init sd_mod_init(void)
{
- int ret;
- ret = usb_register(&sd_driver);
- if (ret < 0)
- return ret;
- PDEBUG(D_PROBE, "registered");
- return 0;
+ return usb_register(&sd_driver);
}
static void __exit sd_mod_exit(void)
{
usb_deregister(&sd_driver);
- PDEBUG(D_PROBE, "deregistered");
}
module_init(sd_mod_init);
diff --git a/drivers/media/video/gspca/stv06xx/stv06xx.h b/drivers/media/video/gspca/stv06xx/stv06xx.h
index 053a27e3a400..e0f63c51f40d 100644
--- a/drivers/media/video/gspca/stv06xx/stv06xx.h
+++ b/drivers/media/video/gspca/stv06xx/stv06xx.h
@@ -37,7 +37,7 @@
#define STV_ISOC_ENDPOINT_ADDR 0x81
-#define STV_REG23 0x0423
+#define STV_REG23 0x0423
/* Control registers of the STV0600 ASIC */
#define STV_I2C_PARTNER 0x1420
diff --git a/drivers/media/video/gspca/stv06xx/stv06xx_hdcs.c b/drivers/media/video/gspca/stv06xx/stv06xx_hdcs.c
index 706e08dc5254..17531b41a073 100644
--- a/drivers/media/video/gspca/stv06xx/stv06xx_hdcs.c
+++ b/drivers/media/video/gspca/stv06xx/stv06xx_hdcs.c
@@ -39,8 +39,8 @@ static const struct ctrl hdcs1x00_ctrl[] = {
.minimum = 0x00,
.maximum = 0xff,
.step = 0x1,
- .default_value = HDCS_DEFAULT_EXPOSURE,
- .flags = V4L2_CTRL_FLAG_SLIDER
+ .default_value = HDCS_DEFAULT_EXPOSURE,
+ .flags = V4L2_CTRL_FLAG_SLIDER
},
.set = hdcs_set_exposure,
.get = hdcs_get_exposure
@@ -52,8 +52,8 @@ static const struct ctrl hdcs1x00_ctrl[] = {
.minimum = 0x00,
.maximum = 0xff,
.step = 0x1,
- .default_value = HDCS_DEFAULT_GAIN,
- .flags = V4L2_CTRL_FLAG_SLIDER
+ .default_value = HDCS_DEFAULT_GAIN,
+ .flags = V4L2_CTRL_FLAG_SLIDER
},
.set = hdcs_set_gain,
.get = hdcs_get_gain
@@ -83,8 +83,8 @@ static const struct ctrl hdcs1020_ctrl[] = {
.minimum = 0x00,
.maximum = 0xffff,
.step = 0x1,
- .default_value = HDCS_DEFAULT_EXPOSURE,
- .flags = V4L2_CTRL_FLAG_SLIDER
+ .default_value = HDCS_DEFAULT_EXPOSURE,
+ .flags = V4L2_CTRL_FLAG_SLIDER
},
.set = hdcs_set_exposure,
.get = hdcs_get_exposure
@@ -96,8 +96,8 @@ static const struct ctrl hdcs1020_ctrl[] = {
.minimum = 0x00,
.maximum = 0xff,
.step = 0x1,
- .default_value = HDCS_DEFAULT_GAIN,
- .flags = V4L2_CTRL_FLAG_SLIDER
+ .default_value = HDCS_DEFAULT_GAIN,
+ .flags = V4L2_CTRL_FLAG_SLIDER
},
.set = hdcs_set_gain,
.get = hdcs_get_gain
@@ -163,7 +163,8 @@ static int hdcs_reg_write_seq(struct sd *sd, u8 reg, u8 *vals, u8 len)
for (i = 0; i < len; i++) {
regs[2 * i] = reg;
regs[2 * i + 1] = vals[i];
- /* All addresses are shifted left one bit as bit 0 toggles r/w */
+ /* All addresses are shifted left one bit
+ * as bit 0 toggles r/w */
reg += 2;
}
diff --git a/drivers/media/video/gspca/stv06xx/stv06xx_hdcs.h b/drivers/media/video/gspca/stv06xx/stv06xx_hdcs.h
index 37b31c99d956..cf3d0ccc1121 100644
--- a/drivers/media/video/gspca/stv06xx/stv06xx_hdcs.h
+++ b/drivers/media/video/gspca/stv06xx/stv06xx_hdcs.h
@@ -37,7 +37,7 @@
#define HDCS_REG_CONTROL(sd) (IS_1020(sd) ? HDCS20_CONTROL : HDCS00_CONTROL)
#define HDCS_1X00_DEF_WIDTH 360
-#define HDCS_1X00_DEF_HEIGHT 296
+#define HDCS_1X00_DEF_HEIGHT 296
#define HDCS_1020_DEF_WIDTH 352
#define HDCS_1020_DEF_HEIGHT 292
diff --git a/drivers/media/video/gspca/stv06xx/stv06xx_st6422.c b/drivers/media/video/gspca/stv06xx/stv06xx_st6422.c
index c11f06e4ae76..3af53264a364 100644
--- a/drivers/media/video/gspca/stv06xx/stv06xx_st6422.c
+++ b/drivers/media/video/gspca/stv06xx/stv06xx_st6422.c
@@ -246,7 +246,7 @@ static int st6422_start(struct sd *sd)
intf = usb_ifnum_to_if(sd->gspca_dev.dev, sd->gspca_dev.iface);
alt = usb_altnum_to_altsetting(intf, sd->gspca_dev.alt);
if (!alt) {
- PDEBUG(D_ERR, "Couldn't get altsetting");
+ err("Couldn't get altsetting");
return -EIO;
}
diff --git a/drivers/media/video/gspca/stv06xx/stv06xx_vv6410.c b/drivers/media/video/gspca/stv06xx/stv06xx_vv6410.c
index 11a0c002f5dc..f8398434c328 100644
--- a/drivers/media/video/gspca/stv06xx/stv06xx_vv6410.c
+++ b/drivers/media/video/gspca/stv06xx/stv06xx_vv6410.c
@@ -66,7 +66,7 @@ static const struct ctrl vv6410_ctrl[] = {
.minimum = 0,
.maximum = 1,
.step = 1,
- .default_value = 0
+ .default_value = 0
},
.set = vv6410_set_vflip,
.get = vv6410_get_vflip
diff --git a/drivers/media/video/gspca/stv06xx/stv06xx_vv6410.h b/drivers/media/video/gspca/stv06xx/stv06xx_vv6410.h
index 96c61926d372..b3b5508473bc 100644
--- a/drivers/media/video/gspca/stv06xx/stv06xx_vv6410.h
+++ b/drivers/media/video/gspca/stv06xx/stv06xx_vv6410.h
@@ -157,8 +157,8 @@
/* Audio Amplifier Setup Register */
#define VV6410_AT1 0x79
-#define VV6410_HFLIP (1 << 3)
-#define VV6410_VFLIP (1 << 4)
+#define VV6410_HFLIP (1 << 3)
+#define VV6410_VFLIP (1 << 4)
#define VV6410_LOW_POWER_MODE (1 << 0)
#define VV6410_SOFT_RESET (1 << 2)
diff --git a/drivers/media/video/gspca/sunplus.c b/drivers/media/video/gspca/sunplus.c
index 9494f86b9a85..a9cbcd6011d9 100644
--- a/drivers/media/video/gspca/sunplus.c
+++ b/drivers/media/video/gspca/sunplus.c
@@ -343,7 +343,7 @@ static void reg_r(struct gspca_dev *gspca_dev,
len ? gspca_dev->usb_buf : NULL, len,
500);
if (ret < 0) {
- PDEBUG(D_ERR, "reg_r err %d", ret);
+ err("reg_r err %d", ret);
gspca_dev->usb_err = ret;
}
}
@@ -368,7 +368,7 @@ static void reg_w_1(struct gspca_dev *gspca_dev,
gspca_dev->usb_buf, 1,
500);
if (ret < 0) {
- PDEBUG(D_ERR, "reg_w_1 err %d", ret);
+ err("reg_w_1 err %d", ret);
gspca_dev->usb_err = ret;
}
}
@@ -388,7 +388,7 @@ static void reg_w_riv(struct gspca_dev *gspca_dev,
USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
value, index, NULL, 0, 500);
if (ret < 0) {
- PDEBUG(D_ERR, "reg_w_riv err %d", ret);
+ err("reg_w_riv err %d", ret);
gspca_dev->usb_err = ret;
return;
}
@@ -413,7 +413,7 @@ static u8 reg_r_1(struct gspca_dev *gspca_dev,
gspca_dev->usb_buf, 1,
500); /* timeout */
if (ret < 0) {
- PDEBUG(D_ERR, "reg_r_1 err %d", ret);
+ err("reg_r_1 err %d", ret);
gspca_dev->usb_err = ret;
return 0;
}
@@ -440,7 +440,7 @@ static u16 reg_r_12(struct gspca_dev *gspca_dev,
gspca_dev->usb_buf, length,
500);
if (ret < 0) {
- PDEBUG(D_ERR, "reg_r_12 err %d", ret);
+ err("reg_r_12 err %d", ret);
gspca_dev->usb_err = ret;
return 0;
}
@@ -463,7 +463,7 @@ static void setup_qtable(struct gspca_dev *gspca_dev,
/* loop over y components */
for (i = 0; i < 64; i++)
- reg_w_riv(gspca_dev, 0x00, 0x2800 + i, qtable[0][i]);
+ reg_w_riv(gspca_dev, 0x00, 0x2800 + i, qtable[0][i]);
/* loop over c components */
for (i = 0; i < 64; i++)
@@ -712,8 +712,9 @@ static int sd_config(struct gspca_dev *gspca_dev,
sd->subtype = id->driver_info;
if (sd->subtype == AiptekMiniPenCam13) {
-/* try to get the firmware as some cam answer 2.0.1.2.2
- * and should be a spca504b then overwrite that setting */
+
+ /* try to get the firmware as some cam answer 2.0.1.2.2
+ * and should be a spca504b then overwrite that setting */
reg_r(gspca_dev, 0x20, 0, 1);
switch (gspca_dev->usb_buf[0]) {
case 1:
@@ -733,7 +734,7 @@ static int sd_config(struct gspca_dev *gspca_dev,
/* case BRIDGE_SPCA504: */
/* case BRIDGE_SPCA536: */
cam->cam_mode = vga_mode;
- cam->nmodes =ARRAY_SIZE(vga_mode);
+ cam->nmodes = ARRAY_SIZE(vga_mode);
break;
case BRIDGE_SPCA533:
cam->cam_mode = custom_mode;
@@ -1247,17 +1248,11 @@ static struct usb_driver sd_driver = {
/* -- module insert / remove -- */
static int __init sd_mod_init(void)
{
- int ret;
- ret = usb_register(&sd_driver);
- if (ret < 0)
- return ret;
- PDEBUG(D_PROBE, "registered");
- return 0;
+ return usb_register(&sd_driver);
}
static void __exit sd_mod_exit(void)
{
usb_deregister(&sd_driver);
- PDEBUG(D_PROBE, "deregistered");
}
module_init(sd_mod_init);
diff --git a/drivers/media/video/gspca/t613.c b/drivers/media/video/gspca/t613.c
index 3b3b983f2b9d..b45f4d0f3997 100644
--- a/drivers/media/video/gspca/t613.c
+++ b/drivers/media/video/gspca/t613.c
@@ -892,7 +892,7 @@ static int sd_init(struct gspca_dev *gspca_dev)
sd->sensor = SENSOR_OM6802;
break;
default:
- PDEBUG(D_ERR|D_PROBE, "unknown sensor %04x", sensor_id);
+ err("unknown sensor %04x", sensor_id);
return -EINVAL;
}
@@ -1444,17 +1444,11 @@ static struct usb_driver sd_driver = {
/* -- module insert / remove -- */
static int __init sd_mod_init(void)
{
- int ret;
- ret = usb_register(&sd_driver);
- if (ret < 0)
- return ret;
- PDEBUG(D_PROBE, "registered");
- return 0;
+ return usb_register(&sd_driver);
}
static void __exit sd_mod_exit(void)
{
usb_deregister(&sd_driver);
- PDEBUG(D_PROBE, "deregistered");
}
module_init(sd_mod_init);
diff --git a/drivers/media/video/gspca/tv8532.c b/drivers/media/video/gspca/tv8532.c
index d9c5bf3449d4..d9e3c6050781 100644
--- a/drivers/media/video/gspca/tv8532.c
+++ b/drivers/media/video/gspca/tv8532.c
@@ -421,18 +421,12 @@ static struct usb_driver sd_driver = {
/* -- module insert / remove -- */
static int __init sd_mod_init(void)
{
- int ret;
- ret = usb_register(&sd_driver);
- if (ret < 0)
- return ret;
- PDEBUG(D_PROBE, "registered");
- return 0;
+ return usb_register(&sd_driver);
}
static void __exit sd_mod_exit(void)
{
usb_deregister(&sd_driver);
- PDEBUG(D_PROBE, "deregistered");
}
module_init(sd_mod_init);
diff --git a/drivers/media/video/gspca/vc032x.c b/drivers/media/video/gspca/vc032x.c
index b16fd47e8ced..38a6efe1a5f9 100644
--- a/drivers/media/video/gspca/vc032x.c
+++ b/drivers/media/video/gspca/vc032x.c
@@ -3164,7 +3164,7 @@ static void reg_r_i(struct gspca_dev *gspca_dev,
index, gspca_dev->usb_buf, len,
500);
if (ret < 0) {
- PDEBUG(D_ERR, "reg_r err %d", ret);
+ err("reg_r err %d", ret);
gspca_dev->usb_err = ret;
}
}
@@ -3205,7 +3205,7 @@ static void reg_w_i(struct gspca_dev *gspca_dev,
value, index, NULL, 0,
500);
if (ret < 0) {
- PDEBUG(D_ERR, "reg_w err %d", ret);
+ err("reg_w err %d", ret);
gspca_dev->usb_err = ret;
}
}
@@ -3230,7 +3230,7 @@ static u16 read_sensor_register(struct gspca_dev *gspca_dev,
reg_r(gspca_dev, 0xa1, 0xb33f, 1);
if (!(gspca_dev->usb_buf[0] & 0x02)) {
- PDEBUG(D_ERR, "I2c Bus Busy Wait %02x",
+ err("I2c Bus Busy Wait %02x",
gspca_dev->usb_buf[0]);
return 0;
}
@@ -3344,7 +3344,7 @@ static void i2c_write(struct gspca_dev *gspca_dev,
msleep(20);
} while (--retry > 0);
if (retry <= 0)
- PDEBUG(D_ERR, "i2c_write timeout");
+ err("i2c_write timeout");
}
static void put_tab_to_reg(struct gspca_dev *gspca_dev,
@@ -3440,7 +3440,7 @@ static int sd_init(struct gspca_dev *gspca_dev)
switch (sensor) {
case -1:
- PDEBUG(D_PROBE, "Unknown sensor...");
+ err("Unknown sensor...");
return -EINVAL;
case SENSOR_HV7131R:
PDEBUG(D_PROBE, "Find Sensor HV7131R");
@@ -4226,18 +4226,11 @@ static struct usb_driver sd_driver = {
/* -- module insert / remove -- */
static int __init sd_mod_init(void)
{
- int ret;
-
- ret = usb_register(&sd_driver);
- if (ret < 0)
- return ret;
- PDEBUG(D_PROBE, "registered");
- return 0;
+ return usb_register(&sd_driver);
}
static void __exit sd_mod_exit(void)
{
usb_deregister(&sd_driver);
- PDEBUG(D_PROBE, "deregistered");
}
module_init(sd_mod_init);
diff --git a/drivers/media/video/gspca/w996Xcf.c b/drivers/media/video/gspca/w996Xcf.c
index 38a68591ce48..4066ac8c45a0 100644
--- a/drivers/media/video/gspca/w996Xcf.c
+++ b/drivers/media/video/gspca/w996Xcf.c
@@ -67,7 +67,7 @@ static int reg_w(struct sd *sd, __u16 index, __u16 value);
--------------------------------------------------------------------------*/
static int w9968cf_write_fsb(struct sd *sd, u16* data)
{
- struct usb_device* udev = sd->gspca_dev.dev;
+ struct usb_device *udev = sd->gspca_dev.dev;
u16 value;
int ret;
@@ -78,7 +78,7 @@ static int w9968cf_write_fsb(struct sd *sd, u16* data)
USB_TYPE_VENDOR | USB_DIR_OUT | USB_RECIP_DEVICE,
value, 0x06, sd->gspca_dev.usb_buf, 6, 500);
if (ret < 0) {
- PDEBUG(D_ERR, "Write FSB registers failed (%d)", ret);
+ err("Write FSB registers failed (%d)", ret);
return ret;
}
@@ -104,7 +104,7 @@ static int w9968cf_write_sb(struct sd *sd, u16 value)
udelay(W9968CF_I2C_BUS_DELAY);
if (ret < 0) {
- PDEBUG(D_ERR, "Write SB reg [01] %04x failed", value);
+ err("Write SB reg [01] %04x failed", value);
return ret;
}
@@ -130,7 +130,7 @@ static int w9968cf_read_sb(struct sd *sd)
ret = sd->gspca_dev.usb_buf[0] |
(sd->gspca_dev.usb_buf[1] << 8);
else
- PDEBUG(D_ERR, "Read SB reg [01] failed");
+ err("Read SB reg [01] failed");
udelay(W9968CF_I2C_BUS_DELAY);
@@ -437,7 +437,7 @@ static int w9968cf_set_crop_window(struct sd *sd)
if (sd->sensor == SEN_OV7620) {
/* Sigh, this is dependend on the clock / framerate changes
made by the frequency control, sick. */
- if (sd->freq == 1) {
+ if (sd->ctrls[FREQ].val == 1) {
start_cropx = 277;
start_cropy = 37;
} else {
diff --git a/drivers/media/video/gspca/xirlink_cit.c b/drivers/media/video/gspca/xirlink_cit.c
new file mode 100644
index 000000000000..8715577bc2d8
--- /dev/null
+++ b/drivers/media/video/gspca/xirlink_cit.c
@@ -0,0 +1,3253 @@
+/*
+ * USB IBM C-It Video Camera driver
+ *
+ * Supports Xirlink C-It Video Camera, IBM PC Camera,
+ * IBM NetCamera and Veo Stingray.
+ *
+ * Copyright (C) 2010 Hans de Goede <hdegoede@redhat.com>
+ *
+ * This driver is based on earlier work of:
+ *
+ * (C) Copyright 1999 Johannes Erdfelt
+ * (C) Copyright 1999 Randy Dunlap
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ */
+
+#define MODULE_NAME "xirlink-cit"
+
+#include "gspca.h"
+
+MODULE_AUTHOR("Hans de Goede <hdegoede@redhat.com>");
+MODULE_DESCRIPTION("Xirlink C-IT");
+MODULE_LICENSE("GPL");
+
+/* FIXME we should autodetect this */
+static int ibm_netcam_pro;
+module_param(ibm_netcam_pro, int, 0);
+MODULE_PARM_DESC(ibm_netcam_pro,
+ "Use IBM Netcamera Pro init sequences for Model 3 cams");
+
+/* FIXME this should be handled through the V4L2 input selection API */
+static int rca_input;
+module_param(rca_input, int, 0644);
+MODULE_PARM_DESC(rca_input,
+ "Use rca input instead of ccd sensor on Model 3 cams");
+
+/* specific webcam descriptor */
+struct sd {
+ struct gspca_dev gspca_dev; /* !! must be the first item */
+ u8 model;
+#define CIT_MODEL0 0 /* bcd version 0.01 cams ie the xvp-500 */
+#define CIT_MODEL1 1 /* The model 1 - 4 nomenclature comes from the old */
+#define CIT_MODEL2 2 /* ibmcam driver */
+#define CIT_MODEL3 3
+#define CIT_MODEL4 4
+#define CIT_IBM_NETCAM_PRO 5
+ u8 input_index;
+ u8 stop_on_control_change;
+ u8 sof_read;
+ u8 sof_len;
+ u8 contrast;
+ u8 brightness;
+ u8 hue;
+ u8 sharpness;
+ u8 lighting;
+ u8 hflip;
+};
+
+/* V4L2 controls supported by the driver */
+static int sd_setbrightness(struct gspca_dev *gspca_dev, __s32 val);
+static int sd_getbrightness(struct gspca_dev *gspca_dev, __s32 *val);
+static int sd_setcontrast(struct gspca_dev *gspca_dev, __s32 val);
+static int sd_getcontrast(struct gspca_dev *gspca_dev, __s32 *val);
+static int sd_sethue(struct gspca_dev *gspca_dev, __s32 val);
+static int sd_gethue(struct gspca_dev *gspca_dev, __s32 *val);
+static int sd_setsharpness(struct gspca_dev *gspca_dev, __s32 val);
+static int sd_getsharpness(struct gspca_dev *gspca_dev, __s32 *val);
+static int sd_setlighting(struct gspca_dev *gspca_dev, __s32 val);
+static int sd_getlighting(struct gspca_dev *gspca_dev, __s32 *val);
+static int sd_sethflip(struct gspca_dev *gspca_dev, __s32 val);
+static int sd_gethflip(struct gspca_dev *gspca_dev, __s32 *val);
+static void sd_stop0(struct gspca_dev *gspca_dev);
+
+static const struct ctrl sd_ctrls[] = {
+#define SD_BRIGHTNESS 0
+ {
+ {
+ .id = V4L2_CID_BRIGHTNESS,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "Brightness",
+ .minimum = 0,
+ .maximum = 63,
+ .step = 1,
+#define BRIGHTNESS_DEFAULT 32
+ .default_value = BRIGHTNESS_DEFAULT,
+ .flags = 0,
+ },
+ .set = sd_setbrightness,
+ .get = sd_getbrightness,
+ },
+#define SD_CONTRAST 1
+ {
+ {
+ .id = V4L2_CID_CONTRAST,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "contrast",
+ .minimum = 0,
+ .maximum = 20,
+ .step = 1,
+#define CONTRAST_DEFAULT 10
+ .default_value = CONTRAST_DEFAULT,
+ .flags = 0,
+ },
+ .set = sd_setcontrast,
+ .get = sd_getcontrast,
+ },
+#define SD_HUE 2
+ {
+ {
+ .id = V4L2_CID_HUE,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "Hue",
+ .minimum = 0,
+ .maximum = 127,
+ .step = 1,
+#define HUE_DEFAULT 63
+ .default_value = HUE_DEFAULT,
+ .flags = 0,
+ },
+ .set = sd_sethue,
+ .get = sd_gethue,
+ },
+#define SD_SHARPNESS 3
+ {
+ {
+ .id = V4L2_CID_SHARPNESS,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "Sharpness",
+ .minimum = 0,
+ .maximum = 6,
+ .step = 1,
+#define SHARPNESS_DEFAULT 3
+ .default_value = SHARPNESS_DEFAULT,
+ .flags = 0,
+ },
+ .set = sd_setsharpness,
+ .get = sd_getsharpness,
+ },
+#define SD_LIGHTING 4
+ {
+ {
+ .id = V4L2_CID_BACKLIGHT_COMPENSATION,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "Lighting",
+ .minimum = 0,
+ .maximum = 2,
+ .step = 1,
+#define LIGHTING_DEFAULT 1
+ .default_value = LIGHTING_DEFAULT,
+ .flags = 0,
+ },
+ .set = sd_setlighting,
+ .get = sd_getlighting,
+ },
+#define SD_HFLIP 5
+ {
+ {
+ .id = V4L2_CID_HFLIP,
+ .type = V4L2_CTRL_TYPE_BOOLEAN,
+ .name = "Mirror",
+ .minimum = 0,
+ .maximum = 1,
+ .step = 1,
+#define HFLIP_DEFAULT 0
+ .default_value = HFLIP_DEFAULT,
+ },
+ .set = sd_sethflip,
+ .get = sd_gethflip,
+ },
+};
+
+static const struct v4l2_pix_format cif_yuv_mode[] = {
+ {176, 144, V4L2_PIX_FMT_CIT_YYVYUY, V4L2_FIELD_NONE,
+ .bytesperline = 176,
+ .sizeimage = 176 * 144 * 3 / 2,
+ .colorspace = V4L2_COLORSPACE_SRGB},
+ {352, 288, V4L2_PIX_FMT_CIT_YYVYUY, V4L2_FIELD_NONE,
+ .bytesperline = 352,
+ .sizeimage = 352 * 288 * 3 / 2,
+ .colorspace = V4L2_COLORSPACE_SRGB},
+};
+
+static const struct v4l2_pix_format vga_yuv_mode[] = {
+ {160, 120, V4L2_PIX_FMT_CIT_YYVYUY, V4L2_FIELD_NONE,
+ .bytesperline = 160,
+ .sizeimage = 160 * 120 * 3 / 2,
+ .colorspace = V4L2_COLORSPACE_SRGB},
+ {320, 240, V4L2_PIX_FMT_CIT_YYVYUY, V4L2_FIELD_NONE,
+ .bytesperline = 320,
+ .sizeimage = 320 * 240 * 3 / 2,
+ .colorspace = V4L2_COLORSPACE_SRGB},
+ {640, 480, V4L2_PIX_FMT_CIT_YYVYUY, V4L2_FIELD_NONE,
+ .bytesperline = 640,
+ .sizeimage = 640 * 480 * 3 / 2,
+ .colorspace = V4L2_COLORSPACE_SRGB},
+};
+
+static const struct v4l2_pix_format model0_mode[] = {
+ {160, 120, V4L2_PIX_FMT_CIT_YYVYUY, V4L2_FIELD_NONE,
+ .bytesperline = 160,
+ .sizeimage = 160 * 120 * 3 / 2,
+ .colorspace = V4L2_COLORSPACE_SRGB},
+ {176, 144, V4L2_PIX_FMT_CIT_YYVYUY, V4L2_FIELD_NONE,
+ .bytesperline = 176,
+ .sizeimage = 176 * 144 * 3 / 2,
+ .colorspace = V4L2_COLORSPACE_SRGB},
+ {320, 240, V4L2_PIX_FMT_CIT_YYVYUY, V4L2_FIELD_NONE,
+ .bytesperline = 320,
+ .sizeimage = 320 * 240 * 3 / 2,
+ .colorspace = V4L2_COLORSPACE_SRGB},
+};
+
+static const struct v4l2_pix_format model2_mode[] = {
+ {160, 120, V4L2_PIX_FMT_CIT_YYVYUY, V4L2_FIELD_NONE,
+ .bytesperline = 160,
+ .sizeimage = 160 * 120 * 3 / 2,
+ .colorspace = V4L2_COLORSPACE_SRGB},
+ {176, 144, V4L2_PIX_FMT_CIT_YYVYUY, V4L2_FIELD_NONE,
+ .bytesperline = 176,
+ .sizeimage = 176 * 144 * 3 / 2,
+ .colorspace = V4L2_COLORSPACE_SRGB},
+ {320, 240, V4L2_PIX_FMT_SGRBG8, V4L2_FIELD_NONE,
+ .bytesperline = 320,
+ .sizeimage = 320 * 240,
+ .colorspace = V4L2_COLORSPACE_SRGB},
+ {352, 288, V4L2_PIX_FMT_SGRBG8, V4L2_FIELD_NONE,
+ .bytesperline = 352,
+ .sizeimage = 352 * 288,
+ .colorspace = V4L2_COLORSPACE_SRGB},
+};
+
+/*
+ * 01.01.08 - Added for RCA video in support -LO
+ * This struct is used to init the Model3 cam to use the RCA video in port
+ * instead of the CCD sensor.
+ */
+static const u16 rca_initdata[][3] = {
+ {0, 0x0000, 0x010c},
+ {0, 0x0006, 0x012c},
+ {0, 0x0078, 0x012d},
+ {0, 0x0046, 0x012f},
+ {0, 0xd141, 0x0124},
+ {0, 0x0000, 0x0127},
+ {0, 0xfea8, 0x0124},
+ {1, 0x0000, 0x0116},
+ {0, 0x0064, 0x0116},
+ {1, 0x0000, 0x0115},
+ {0, 0x0003, 0x0115},
+ {0, 0x0008, 0x0123},
+ {0, 0x0000, 0x0117},
+ {0, 0x0000, 0x0112},
+ {0, 0x0080, 0x0100},
+ {0, 0x0000, 0x0100},
+ {1, 0x0000, 0x0116},
+ {0, 0x0060, 0x0116},
+ {0, 0x0002, 0x0112},
+ {0, 0x0000, 0x0123},
+ {0, 0x0001, 0x0117},
+ {0, 0x0040, 0x0108},
+ {0, 0x0019, 0x012c},
+ {0, 0x0040, 0x0116},
+ {0, 0x000a, 0x0115},
+ {0, 0x000b, 0x0115},
+ {0, 0x0078, 0x012d},
+ {0, 0x0046, 0x012f},
+ {0, 0xd141, 0x0124},
+ {0, 0x0000, 0x0127},
+ {0, 0xfea8, 0x0124},
+ {0, 0x0064, 0x0116},
+ {0, 0x0000, 0x0115},
+ {0, 0x0001, 0x0115},
+ {0, 0xffff, 0x0124},
+ {0, 0xfff9, 0x0124},
+ {0, 0x0086, 0x0127},
+ {0, 0xfff8, 0x0124},
+ {0, 0xfffd, 0x0124},
+ {0, 0x00aa, 0x0127},
+ {0, 0xfff8, 0x0124},
+ {0, 0xfffd, 0x0124},
+ {0, 0x0000, 0x0127},
+ {0, 0xfff8, 0x0124},
+ {0, 0xfffd, 0x0124},
+ {0, 0xfffa, 0x0124},
+ {0, 0xffff, 0x0124},
+ {0, 0xfff9, 0x0124},
+ {0, 0x0086, 0x0127},
+ {0, 0xfff8, 0x0124},
+ {0, 0xfffd, 0x0124},
+ {0, 0x00f2, 0x0127},
+ {0, 0xfff8, 0x0124},
+ {0, 0xfffd, 0x0124},
+ {0, 0x000f, 0x0127},
+ {0, 0xfff8, 0x0124},
+ {0, 0xfffd, 0x0124},
+ {0, 0xfffa, 0x0124},
+ {0, 0xffff, 0x0124},
+ {0, 0xfff9, 0x0124},
+ {0, 0x0086, 0x0127},
+ {0, 0xfff8, 0x0124},
+ {0, 0xfffd, 0x0124},
+ {0, 0x00f8, 0x0127},
+ {0, 0xfff8, 0x0124},
+ {0, 0xfffd, 0x0124},
+ {0, 0x00fc, 0x0127},
+ {0, 0xfff8, 0x0124},
+ {0, 0xfffd, 0x0124},
+ {0, 0xfffa, 0x0124},
+ {0, 0xffff, 0x0124},
+ {0, 0xfff9, 0x0124},
+ {0, 0x0086, 0x0127},
+ {0, 0xfff8, 0x0124},
+ {0, 0xfffd, 0x0124},
+ {0, 0x00f9, 0x0127},
+ {0, 0xfff8, 0x0124},
+ {0, 0xfffd, 0x0124},
+ {0, 0x003c, 0x0127},
+ {0, 0xfff8, 0x0124},
+ {0, 0xfffd, 0x0124},
+ {0, 0xfffa, 0x0124},
+ {0, 0xffff, 0x0124},
+ {0, 0xfff9, 0x0124},
+ {0, 0x0086, 0x0127},
+ {0, 0xfff8, 0x0124},
+ {0, 0xfffd, 0x0124},
+ {0, 0x0027, 0x0127},
+ {0, 0xfff8, 0x0124},
+ {0, 0xfffd, 0x0124},
+ {0, 0x0019, 0x0127},
+ {0, 0xfff8, 0x0124},
+ {0, 0xfffd, 0x0124},
+ {0, 0xfffa, 0x0124},
+ {0, 0xfff9, 0x0124},
+ {0, 0x0086, 0x0127},
+ {0, 0xfff8, 0x0124},
+ {0, 0xfffd, 0x0124},
+ {0, 0x0037, 0x0127},
+ {0, 0xfff8, 0x0124},
+ {0, 0xfffd, 0x0124},
+ {0, 0x0000, 0x0127},
+ {0, 0xfff8, 0x0124},
+ {0, 0xfffd, 0x0124},
+ {0, 0x0021, 0x0127},
+ {0, 0xfff8, 0x0124},
+ {0, 0xfffd, 0x0124},
+ {0, 0xfffa, 0x0124},
+ {0, 0xfff9, 0x0124},
+ {0, 0x0086, 0x0127},
+ {0, 0xfff8, 0x0124},
+ {0, 0xfffd, 0x0124},
+ {0, 0x0038, 0x0127},
+ {0, 0xfff8, 0x0124},
+ {0, 0xfffd, 0x0124},
+ {0, 0x0006, 0x0127},
+ {0, 0xfff8, 0x0124},
+ {0, 0xfffd, 0x0124},
+ {0, 0x0045, 0x0127},
+ {0, 0xfff8, 0x0124},
+ {0, 0xfffd, 0x0124},
+ {0, 0xfffa, 0x0124},
+ {0, 0xfff9, 0x0124},
+ {0, 0x0086, 0x0127},
+ {0, 0xfff8, 0x0124},
+ {0, 0xfffd, 0x0124},
+ {0, 0x0037, 0x0127},
+ {0, 0xfff8, 0x0124},
+ {0, 0xfffd, 0x0124},
+ {0, 0x0001, 0x0127},
+ {0, 0xfff8, 0x0124},
+ {0, 0xfffd, 0x0124},
+ {0, 0x002a, 0x0127},
+ {0, 0xfff8, 0x0124},
+ {0, 0xfffd, 0x0124},
+ {0, 0xfffa, 0x0124},
+ {0, 0xfff9, 0x0124},
+ {0, 0x0086, 0x0127},
+ {0, 0xfff8, 0x0124},
+ {0, 0xfffd, 0x0124},
+ {0, 0x0038, 0x0127},
+ {0, 0xfff8, 0x0124},
+ {0, 0xfffd, 0x0124},
+ {0, 0x0000, 0x0127},
+ {0, 0xfff8, 0x0124},
+ {0, 0xfffd, 0x0124},
+ {0, 0x000e, 0x0127},
+ {0, 0xfff8, 0x0124},
+ {0, 0xfffd, 0x0124},
+ {0, 0xfffa, 0x0124},
+ {0, 0xfff9, 0x0124},
+ {0, 0x0086, 0x0127},
+ {0, 0xfff8, 0x0124},
+ {0, 0xfffd, 0x0124},
+ {0, 0x0037, 0x0127},
+ {0, 0xfff8, 0x0124},
+ {0, 0xfffd, 0x0124},
+ {0, 0x0001, 0x0127},
+ {0, 0xfff8, 0x0124},
+ {0, 0xfffd, 0x0124},
+ {0, 0x002b, 0x0127},
+ {0, 0xfff8, 0x0124},
+ {0, 0xfffd, 0x0124},
+ {0, 0xfffa, 0x0124},
+ {0, 0xfff9, 0x0124},
+ {0, 0x0086, 0x0127},
+ {0, 0xfff8, 0x0124},
+ {0, 0xfffd, 0x0124},
+ {0, 0x0038, 0x0127},
+ {0, 0xfff8, 0x0124},
+ {0, 0xfffd, 0x0124},
+ {0, 0x0001, 0x0127},
+ {0, 0xfff8, 0x0124},
+ {0, 0xfffd, 0x0124},
+ {0, 0x00f4, 0x0127},
+ {0, 0xfff8, 0x0124},
+ {0, 0xfffd, 0x0124},
+ {0, 0xfffa, 0x0124},
+ {0, 0xfff9, 0x0124},
+ {0, 0x0086, 0x0127},
+ {0, 0xfff8, 0x0124},
+ {0, 0xfffd, 0x0124},
+ {0, 0x0037, 0x0127},
+ {0, 0xfff8, 0x0124},
+ {0, 0xfffd, 0x0124},
+ {0, 0x0001, 0x0127},
+ {0, 0xfff8, 0x0124},
+ {0, 0xfffd, 0x0124},
+ {0, 0x002c, 0x0127},
+ {0, 0xfff8, 0x0124},
+ {0, 0xfffd, 0x0124},
+ {0, 0xfffa, 0x0124},
+ {0, 0xfff9, 0x0124},
+ {0, 0x0086, 0x0127},
+ {0, 0xfff8, 0x0124},
+ {0, 0xfffd, 0x0124},
+ {0, 0x0038, 0x0127},
+ {0, 0xfff8, 0x0124},
+ {0, 0xfffd, 0x0124},
+ {0, 0x0001, 0x0127},
+ {0, 0xfff8, 0x0124},
+ {0, 0xfffd, 0x0124},
+ {0, 0x0004, 0x0127},
+ {0, 0xfff8, 0x0124},
+ {0, 0xfffd, 0x0124},
+ {0, 0xfffa, 0x0124},
+ {0, 0xfff9, 0x0124},
+ {0, 0x0086, 0x0127},
+ {0, 0xfff8, 0x0124},
+ {0, 0xfffd, 0x0124},
+ {0, 0x0037, 0x0127},
+ {0, 0xfff8, 0x0124},
+ {0, 0xfffd, 0x0124},
+ {0, 0x0001, 0x0127},
+ {0, 0xfff8, 0x0124},
+ {0, 0xfffd, 0x0124},
+ {0, 0x002d, 0x0127},
+ {0, 0xfff8, 0x0124},
+ {0, 0xfffd, 0x0124},
+ {0, 0xfffa, 0x0124},
+ {0, 0xfff9, 0x0124},
+ {0, 0x0086, 0x0127},
+ {0, 0xfff8, 0x0124},
+ {0, 0xfffd, 0x0124},
+ {0, 0x0038, 0x0127},
+ {0, 0xfff8, 0x0124},
+ {0, 0xfffd, 0x0124},
+ {0, 0x0000, 0x0127},
+ {0, 0xfff8, 0x0124},
+ {0, 0xfffd, 0x0124},
+ {0, 0x0014, 0x0127},
+ {0, 0xfff8, 0x0124},
+ {0, 0xfffd, 0x0124},
+ {0, 0xfffa, 0x0124},
+ {0, 0xfff9, 0x0124},
+ {0, 0x0086, 0x0127},
+ {0, 0xfff8, 0x0124},
+ {0, 0xfffd, 0x0124},
+ {0, 0x0037, 0x0127},
+ {0, 0xfff8, 0x0124},
+ {0, 0xfffd, 0x0124},
+ {0, 0x0001, 0x0127},
+ {0, 0xfff8, 0x0124},
+ {0, 0xfffd, 0x0124},
+ {0, 0x002e, 0x0127},
+ {0, 0xfff8, 0x0124},
+ {0, 0xfffd, 0x0124},
+ {0, 0xfffa, 0x0124},
+ {0, 0xfff9, 0x0124},
+ {0, 0x0086, 0x0127},
+ {0, 0xfff8, 0x0124},
+ {0, 0xfffd, 0x0124},
+ {0, 0x0038, 0x0127},
+ {0, 0xfff8, 0x0124},
+ {0, 0xfffd, 0x0124},
+ {0, 0x0003, 0x0127},
+ {0, 0xfff8, 0x0124},
+ {0, 0xfffd, 0x0124},
+ {0, 0x0000, 0x0127},
+ {0, 0xfff8, 0x0124},
+ {0, 0xfffd, 0x0124},
+ {0, 0xfffa, 0x0124},
+ {0, 0xfff9, 0x0124},
+ {0, 0x0086, 0x0127},
+ {0, 0xfff8, 0x0124},
+ {0, 0xfffd, 0x0124},
+ {0, 0x0037, 0x0127},
+ {0, 0xfff8, 0x0124},
+ {0, 0xfffd, 0x0124},
+ {0, 0x0001, 0x0127},
+ {0, 0xfff8, 0x0124},
+ {0, 0xfffd, 0x0124},
+ {0, 0x002f, 0x0127},
+ {0, 0xfff8, 0x0124},
+ {0, 0xfffd, 0x0124},
+ {0, 0xfffa, 0x0124},
+ {0, 0xfff9, 0x0124},
+ {0, 0x0086, 0x0127},
+ {0, 0xfff8, 0x0124},
+ {0, 0xfffd, 0x0124},
+ {0, 0x0038, 0x0127},
+ {0, 0xfff8, 0x0124},
+ {0, 0xfffd, 0x0124},
+ {0, 0x0003, 0x0127},
+ {0, 0xfff8, 0x0124},
+ {0, 0xfffd, 0x0124},
+ {0, 0x0014, 0x0127},
+ {0, 0xfff8, 0x0124},
+ {0, 0xfffd, 0x0124},
+ {0, 0xfffa, 0x0124},
+ {0, 0xfff9, 0x0124},
+ {0, 0x0086, 0x0127},
+ {0, 0xfff8, 0x0124},
+ {0, 0xfffd, 0x0124},
+ {0, 0x0037, 0x0127},
+ {0, 0xfff8, 0x0124},
+ {0, 0xfffd, 0x0124},
+ {0, 0x0001, 0x0127},
+ {0, 0xfff8, 0x0124},
+ {0, 0xfffd, 0x0124},
+ {0, 0x0040, 0x0127},
+ {0, 0xfff8, 0x0124},
+ {0, 0xfffd, 0x0124},
+ {0, 0xfffa, 0x0124},
+ {0, 0xfff9, 0x0124},
+ {0, 0x0086, 0x0127},
+ {0, 0xfff8, 0x0124},
+ {0, 0xfffd, 0x0124},
+ {0, 0x0038, 0x0127},
+ {0, 0xfff8, 0x0124},
+ {0, 0xfffd, 0x0124},
+ {0, 0x0000, 0x0127},
+ {0, 0xfff8, 0x0124},
+ {0, 0xfffd, 0x0124},
+ {0, 0x0040, 0x0127},
+ {0, 0xfff8, 0x0124},
+ {0, 0xfffd, 0x0124},
+ {0, 0xfffa, 0x0124},
+ {0, 0xfff9, 0x0124},
+ {0, 0x0086, 0x0127},
+ {0, 0xfff8, 0x0124},
+ {0, 0xfffd, 0x0124},
+ {0, 0x0037, 0x0127},
+ {0, 0xfff8, 0x0124},
+ {0, 0xfffd, 0x0124},
+ {0, 0x0001, 0x0127},
+ {0, 0xfff8, 0x0124},
+ {0, 0xfffd, 0x0124},
+ {0, 0x0053, 0x0127},
+ {0, 0xfff8, 0x0124},
+ {0, 0xfffd, 0x0124},
+ {0, 0xfffa, 0x0124},
+ {0, 0xfff9, 0x0124},
+ {0, 0x0086, 0x0127},
+ {0, 0xfff8, 0x0124},
+ {0, 0xfffd, 0x0124},
+ {0, 0x0038, 0x0127},
+ {0, 0xfff8, 0x0124},
+ {0, 0xfffd, 0x0124},
+ {0, 0x0000, 0x0127},
+ {0, 0xfff8, 0x0124},
+ {0, 0xfffd, 0x0124},
+ {0, 0x0038, 0x0127},
+ {0, 0xfff8, 0x0124},
+ {0, 0xfffd, 0x0124},
+ {0, 0xfffa, 0x0124},
+ {0, 0x0000, 0x0101},
+ {0, 0x00a0, 0x0103},
+ {0, 0x0078, 0x0105},
+ {0, 0x0000, 0x010a},
+ {0, 0x0024, 0x010b},
+ {0, 0x0028, 0x0119},
+ {0, 0x0088, 0x011b},
+ {0, 0x0002, 0x011d},
+ {0, 0x0003, 0x011e},
+ {0, 0x0000, 0x0129},
+ {0, 0x00fc, 0x012b},
+ {0, 0x0008, 0x0102},
+ {0, 0x0000, 0x0104},
+ {0, 0x0008, 0x011a},
+ {0, 0x0028, 0x011c},
+ {0, 0x0021, 0x012a},
+ {0, 0x0000, 0x0118},
+ {0, 0x0000, 0x0132},
+ {0, 0x0000, 0x0109},
+ {0, 0xfff9, 0x0124},
+ {0, 0x0086, 0x0127},
+ {0, 0xfff8, 0x0124},
+ {0, 0xfffd, 0x0124},
+ {0, 0x0037, 0x0127},
+ {0, 0xfff8, 0x0124},
+ {0, 0xfffd, 0x0124},
+ {0, 0x0001, 0x0127},
+ {0, 0xfff8, 0x0124},
+ {0, 0xfffd, 0x0124},
+ {0, 0x0031, 0x0127},
+ {0, 0xfff8, 0x0124},
+ {0, 0xfffd, 0x0124},
+ {0, 0xfffa, 0x0124},
+ {0, 0xfff9, 0x0124},
+ {0, 0x0086, 0x0127},
+ {0, 0xfff8, 0x0124},
+ {0, 0xfffd, 0x0124},
+ {0, 0x0038, 0x0127},
+ {0, 0xfff8, 0x0124},
+ {0, 0xfffd, 0x0124},
+ {0, 0x0000, 0x0127},
+ {0, 0xfff8, 0x0124},
+ {0, 0xfffd, 0x0124},
+ {0, 0x0000, 0x0127},
+ {0, 0xfff8, 0x0124},
+ {0, 0xfffd, 0x0124},
+ {0, 0xfffa, 0x0124},
+ {0, 0xfff9, 0x0124},
+ {0, 0x0086, 0x0127},
+ {0, 0xfff8, 0x0124},
+ {0, 0xfffd, 0x0124},
+ {0, 0x0037, 0x0127},
+ {0, 0xfff8, 0x0124},
+ {0, 0xfffd, 0x0124},
+ {0, 0x0001, 0x0127},
+ {0, 0xfff8, 0x0124},
+ {0, 0xfffd, 0x0124},
+ {0, 0x0040, 0x0127},
+ {0, 0xfff8, 0x0124},
+ {0, 0xfffd, 0x0124},
+ {0, 0xfffa, 0x0124},
+ {0, 0xfff9, 0x0124},
+ {0, 0x0086, 0x0127},
+ {0, 0xfff8, 0x0124},
+ {0, 0xfffd, 0x0124},
+ {0, 0x0038, 0x0127},
+ {0, 0xfff8, 0x0124},
+ {0, 0xfffd, 0x0124},
+ {0, 0x0000, 0x0127},
+ {0, 0xfff8, 0x0124},
+ {0, 0xfffd, 0x0124},
+ {0, 0x0040, 0x0127},
+ {0, 0xfff8, 0x0124},
+ {0, 0xfffd, 0x0124},
+ {0, 0xfffa, 0x0124},
+ {0, 0xfff9, 0x0124},
+ {0, 0x0086, 0x0127},
+ {0, 0xfff8, 0x0124},
+ {0, 0xfffd, 0x0124},
+ {0, 0x0037, 0x0127},
+ {0, 0xfff8, 0x0124},
+ {0, 0xfffd, 0x0124},
+ {0, 0x0000, 0x0127},
+ {0, 0xfff8, 0x0124},
+ {0, 0xfffd, 0x0124},
+ {0, 0x00dc, 0x0127},
+ {0, 0xfff8, 0x0124},
+ {0, 0xfffd, 0x0124},
+ {0, 0xfffa, 0x0124},
+ {0, 0xfff9, 0x0124},
+ {0, 0x0086, 0x0127},
+ {0, 0xfff8, 0x0124},
+ {0, 0xfffd, 0x0124},
+ {0, 0x0038, 0x0127},
+ {0, 0xfff8, 0x0124},
+ {0, 0xfffd, 0x0124},
+ {0, 0x0000, 0x0127},
+ {0, 0xfff8, 0x0124},
+ {0, 0xfffd, 0x0124},
+ {0, 0x0000, 0x0127},
+ {0, 0xfff8, 0x0124},
+ {0, 0xfffd, 0x0124},
+ {0, 0xfffa, 0x0124},
+ {0, 0xfff9, 0x0124},
+ {0, 0x0086, 0x0127},
+ {0, 0xfff8, 0x0124},
+ {0, 0xfffd, 0x0124},
+ {0, 0x0037, 0x0127},
+ {0, 0xfff8, 0x0124},
+ {0, 0xfffd, 0x0124},
+ {0, 0x0001, 0x0127},
+ {0, 0xfff8, 0x0124},
+ {0, 0xfffd, 0x0124},
+ {0, 0x0032, 0x0127},
+ {0, 0xfff8, 0x0124},
+ {0, 0xfffd, 0x0124},
+ {0, 0xfffa, 0x0124},
+ {0, 0xfff9, 0x0124},
+ {0, 0x0086, 0x0127},
+ {0, 0xfff8, 0x0124},
+ {0, 0xfffd, 0x0124},
+ {0, 0x0038, 0x0127},
+ {0, 0xfff8, 0x0124},
+ {0, 0xfffd, 0x0124},
+ {0, 0x0001, 0x0127},
+ {0, 0xfff8, 0x0124},
+ {0, 0xfffd, 0x0124},
+ {0, 0x0020, 0x0127},
+ {0, 0xfff8, 0x0124},
+ {0, 0xfffd, 0x0124},
+ {0, 0xfffa, 0x0124},
+ {0, 0xfff9, 0x0124},
+ {0, 0x0086, 0x0127},
+ {0, 0xfff8, 0x0124},
+ {0, 0xfffd, 0x0124},
+ {0, 0x0037, 0x0127},
+ {0, 0xfff8, 0x0124},
+ {0, 0xfffd, 0x0124},
+ {0, 0x0001, 0x0127},
+ {0, 0xfff8, 0x0124},
+ {0, 0xfffd, 0x0124},
+ {0, 0x0040, 0x0127},
+ {0, 0xfff8, 0x0124},
+ {0, 0xfffd, 0x0124},
+ {0, 0xfffa, 0x0124},
+ {0, 0xfff9, 0x0124},
+ {0, 0x0086, 0x0127},
+ {0, 0xfff8, 0x0124},
+ {0, 0xfffd, 0x0124},
+ {0, 0x0038, 0x0127},
+ {0, 0xfff8, 0x0124},
+ {0, 0xfffd, 0x0124},
+ {0, 0x0000, 0x0127},
+ {0, 0xfff8, 0x0124},
+ {0, 0xfffd, 0x0124},
+ {0, 0x0040, 0x0127},
+ {0, 0xfff8, 0x0124},
+ {0, 0xfffd, 0x0124},
+ {0, 0xfffa, 0x0124},
+ {0, 0xfff9, 0x0124},
+ {0, 0x0086, 0x0127},
+ {0, 0xfff8, 0x0124},
+ {0, 0xfffd, 0x0124},
+ {0, 0x0037, 0x0127},
+ {0, 0xfff8, 0x0124},
+ {0, 0xfffd, 0x0124},
+ {0, 0x0000, 0x0127},
+ {0, 0xfff8, 0x0124},
+ {0, 0xfffd, 0x0124},
+ {0, 0x0030, 0x0127},
+ {0, 0xfff8, 0x0124},
+ {0, 0xfffd, 0x0124},
+ {0, 0xfffa, 0x0124},
+ {0, 0xfff9, 0x0124},
+ {0, 0x0086, 0x0127},
+ {0, 0xfff8, 0x0124},
+ {0, 0xfffd, 0x0124},
+ {0, 0x0038, 0x0127},
+ {0, 0xfff8, 0x0124},
+ {0, 0xfffd, 0x0124},
+ {0, 0x0008, 0x0127},
+ {0, 0xfff8, 0x0124},
+ {0, 0xfffd, 0x0124},
+ {0, 0x0000, 0x0127},
+ {0, 0xfff8, 0x0124},
+ {0, 0xfffd, 0x0124},
+ {0, 0xfffa, 0x0124},
+ {0, 0x0003, 0x0111},
+};
+
+/* TESTME the old ibmcam driver repeats certain commands to Model1 cameras, we
+ do the same for now (testing needed to see if this is really necessary) */
+static const int cit_model1_ntries = 5;
+static const int cit_model1_ntries2 = 2;
+
+static int cit_write_reg(struct gspca_dev *gspca_dev, u16 value, u16 index)
+{
+ struct usb_device *udev = gspca_dev->dev;
+ int err;
+
+ err = usb_control_msg(udev, usb_sndctrlpipe(udev, 0), 0x00,
+ USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_ENDPOINT,
+ value, index, NULL, 0, 1000);
+ if (err < 0)
+ err("Failed to write a register (index 0x%04X,"
+ " value 0x%02X, error %d)", index, value, err);
+
+ return 0;
+}
+
+static int cit_read_reg(struct gspca_dev *gspca_dev, u16 index)
+{
+ struct usb_device *udev = gspca_dev->dev;
+ __u8 *buf = gspca_dev->usb_buf;
+ int res;
+
+ res = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0), 0x01,
+ USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_ENDPOINT,
+ 0x00, index, buf, 8, 1000);
+ if (res < 0) {
+ err("Failed to read a register (index 0x%04X, error %d)",
+ index, res);
+ return res;
+ }
+
+ PDEBUG(D_PROBE,
+ "Register %04x value: %02x %02x %02x %02x %02x %02x %02x %02x",
+ index,
+ buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6], buf[7]);
+
+ return 0;
+}
+
+/*
+ * cit_send_FF_04_02()
+ *
+ * This procedure sends magic 3-command prefix to the camera.
+ * The purpose of this prefix is not known.
+ *
+ * History:
+ * 1/2/00 Created.
+ */
+static void cit_send_FF_04_02(struct gspca_dev *gspca_dev)
+{
+ cit_write_reg(gspca_dev, 0x00FF, 0x0127);
+ cit_write_reg(gspca_dev, 0x0004, 0x0124);
+ cit_write_reg(gspca_dev, 0x0002, 0x0124);
+}
+
+static void cit_send_00_04_06(struct gspca_dev *gspca_dev)
+{
+ cit_write_reg(gspca_dev, 0x0000, 0x0127);
+ cit_write_reg(gspca_dev, 0x0004, 0x0124);
+ cit_write_reg(gspca_dev, 0x0006, 0x0124);
+}
+
+static void cit_send_x_00(struct gspca_dev *gspca_dev, unsigned short x)
+{
+ cit_write_reg(gspca_dev, x, 0x0127);
+ cit_write_reg(gspca_dev, 0x0000, 0x0124);
+}
+
+static void cit_send_x_00_05(struct gspca_dev *gspca_dev, unsigned short x)
+{
+ cit_send_x_00(gspca_dev, x);
+ cit_write_reg(gspca_dev, 0x0005, 0x0124);
+}
+
+static void cit_send_x_00_05_02(struct gspca_dev *gspca_dev, unsigned short x)
+{
+ cit_write_reg(gspca_dev, x, 0x0127);
+ cit_write_reg(gspca_dev, 0x0000, 0x0124);
+ cit_write_reg(gspca_dev, 0x0005, 0x0124);
+ cit_write_reg(gspca_dev, 0x0002, 0x0124);
+}
+
+static void cit_send_x_01_00_05(struct gspca_dev *gspca_dev, u16 x)
+{
+ cit_write_reg(gspca_dev, x, 0x0127);
+ cit_write_reg(gspca_dev, 0x0001, 0x0124);
+ cit_write_reg(gspca_dev, 0x0000, 0x0124);
+ cit_write_reg(gspca_dev, 0x0005, 0x0124);
+}
+
+static void cit_send_x_00_05_02_01(struct gspca_dev *gspca_dev, u16 x)
+{
+ cit_write_reg(gspca_dev, x, 0x0127);
+ cit_write_reg(gspca_dev, 0x0000, 0x0124);
+ cit_write_reg(gspca_dev, 0x0005, 0x0124);
+ cit_write_reg(gspca_dev, 0x0002, 0x0124);
+ cit_write_reg(gspca_dev, 0x0001, 0x0124);
+}
+
+static void cit_send_x_00_05_02_08_01(struct gspca_dev *gspca_dev, u16 x)
+{
+ cit_write_reg(gspca_dev, x, 0x0127);
+ cit_write_reg(gspca_dev, 0x0000, 0x0124);
+ cit_write_reg(gspca_dev, 0x0005, 0x0124);
+ cit_write_reg(gspca_dev, 0x0002, 0x0124);
+ cit_write_reg(gspca_dev, 0x0008, 0x0124);
+ cit_write_reg(gspca_dev, 0x0001, 0x0124);
+}
+
+static void cit_Packet_Format1(struct gspca_dev *gspca_dev, u16 fkey, u16 val)
+{
+ cit_send_x_01_00_05(gspca_dev, 0x0088);
+ cit_send_x_00_05(gspca_dev, fkey);
+ cit_send_x_00_05_02_08_01(gspca_dev, val);
+ cit_send_x_00_05(gspca_dev, 0x0088);
+ cit_send_x_00_05_02_01(gspca_dev, fkey);
+ cit_send_x_00_05(gspca_dev, 0x0089);
+ cit_send_x_00(gspca_dev, fkey);
+ cit_send_00_04_06(gspca_dev);
+ cit_read_reg(gspca_dev, 0x0126);
+ cit_send_FF_04_02(gspca_dev);
+}
+
+static void cit_PacketFormat2(struct gspca_dev *gspca_dev, u16 fkey, u16 val)
+{
+ cit_send_x_01_00_05(gspca_dev, 0x0088);
+ cit_send_x_00_05(gspca_dev, fkey);
+ cit_send_x_00_05_02(gspca_dev, val);
+}
+
+static void cit_model2_Packet2(struct gspca_dev *gspca_dev)
+{
+ cit_write_reg(gspca_dev, 0x00ff, 0x012d);
+ cit_write_reg(gspca_dev, 0xfea3, 0x0124);
+}
+
+static void cit_model2_Packet1(struct gspca_dev *gspca_dev, u16 v1, u16 v2)
+{
+ cit_write_reg(gspca_dev, 0x00aa, 0x012d);
+ cit_write_reg(gspca_dev, 0x00ff, 0x012e);
+ cit_write_reg(gspca_dev, v1, 0x012f);
+ cit_write_reg(gspca_dev, 0x00ff, 0x0130);
+ cit_write_reg(gspca_dev, 0xc719, 0x0124);
+ cit_write_reg(gspca_dev, v2, 0x0127);
+
+ cit_model2_Packet2(gspca_dev);
+}
+
+/*
+ * cit_model3_Packet1()
+ *
+ * 00_0078_012d
+ * 00_0097_012f
+ * 00_d141_0124
+ * 00_0096_0127
+ * 00_fea8_0124
+*/
+static void cit_model3_Packet1(struct gspca_dev *gspca_dev, u16 v1, u16 v2)
+{
+ cit_write_reg(gspca_dev, 0x0078, 0x012d);
+ cit_write_reg(gspca_dev, v1, 0x012f);
+ cit_write_reg(gspca_dev, 0xd141, 0x0124);
+ cit_write_reg(gspca_dev, v2, 0x0127);
+ cit_write_reg(gspca_dev, 0xfea8, 0x0124);
+}
+
+static void cit_model4_Packet1(struct gspca_dev *gspca_dev, u16 v1, u16 v2)
+{
+ cit_write_reg(gspca_dev, 0x00aa, 0x012d);
+ cit_write_reg(gspca_dev, v1, 0x012f);
+ cit_write_reg(gspca_dev, 0xd141, 0x0124);
+ cit_write_reg(gspca_dev, v2, 0x0127);
+ cit_write_reg(gspca_dev, 0xfea8, 0x0124);
+}
+
+static void cit_model4_BrightnessPacket(struct gspca_dev *gspca_dev, u16 val)
+{
+ cit_write_reg(gspca_dev, 0x00aa, 0x012d);
+ cit_write_reg(gspca_dev, 0x0026, 0x012f);
+ cit_write_reg(gspca_dev, 0xd141, 0x0124);
+ cit_write_reg(gspca_dev, val, 0x0127);
+ cit_write_reg(gspca_dev, 0x00aa, 0x0130);
+ cit_write_reg(gspca_dev, 0x82a8, 0x0124);
+ cit_write_reg(gspca_dev, 0x0038, 0x012d);
+ cit_write_reg(gspca_dev, 0x0004, 0x012f);
+ cit_write_reg(gspca_dev, 0xd145, 0x0124);
+ cit_write_reg(gspca_dev, 0xfffa, 0x0124);
+}
+
+/* this function is called at probe time */
+static int sd_config(struct gspca_dev *gspca_dev,
+ const struct usb_device_id *id)
+{
+ struct sd *sd = (struct sd *) gspca_dev;
+ struct cam *cam;
+
+ sd->model = id->driver_info;
+ if (sd->model == CIT_MODEL3 && ibm_netcam_pro)
+ sd->model = CIT_IBM_NETCAM_PRO;
+
+ cam = &gspca_dev->cam;
+ switch (sd->model) {
+ case CIT_MODEL0:
+ cam->cam_mode = model0_mode;
+ cam->nmodes = ARRAY_SIZE(model0_mode);
+ cam->reverse_alts = 1;
+ gspca_dev->ctrl_dis = ~((1 << SD_CONTRAST) | (1 << SD_HFLIP));
+ sd->sof_len = 4;
+ break;
+ case CIT_MODEL1:
+ cam->cam_mode = cif_yuv_mode;
+ cam->nmodes = ARRAY_SIZE(cif_yuv_mode);
+ cam->reverse_alts = 1;
+ gspca_dev->ctrl_dis = (1 << SD_HUE) | (1 << SD_HFLIP);
+ sd->sof_len = 4;
+ break;
+ case CIT_MODEL2:
+ cam->cam_mode = model2_mode + 1; /* no 160x120 */
+ cam->nmodes = 3;
+ gspca_dev->ctrl_dis = (1 << SD_CONTRAST) |
+ (1 << SD_SHARPNESS) |
+ (1 << SD_HFLIP);
+ break;
+ case CIT_MODEL3:
+ cam->cam_mode = vga_yuv_mode;
+ cam->nmodes = ARRAY_SIZE(vga_yuv_mode);
+ gspca_dev->ctrl_dis = (1 << SD_HUE) |
+ (1 << SD_LIGHTING) |
+ (1 << SD_HFLIP);
+ sd->stop_on_control_change = 1;
+ sd->sof_len = 4;
+ break;
+ case CIT_MODEL4:
+ cam->cam_mode = model2_mode;
+ cam->nmodes = ARRAY_SIZE(model2_mode);
+ gspca_dev->ctrl_dis = (1 << SD_CONTRAST) |
+ (1 << SD_SHARPNESS) |
+ (1 << SD_LIGHTING) |
+ (1 << SD_HFLIP);
+ break;
+ case CIT_IBM_NETCAM_PRO:
+ cam->cam_mode = vga_yuv_mode;
+ cam->nmodes = 2; /* no 640 x 480 */
+ cam->input_flags = V4L2_IN_ST_VFLIP;
+ gspca_dev->ctrl_dis = ~(1 << SD_CONTRAST);
+ sd->stop_on_control_change = 1;
+ sd->sof_len = 4;
+ break;
+ }
+
+ sd->brightness = BRIGHTNESS_DEFAULT;
+ sd->contrast = CONTRAST_DEFAULT;
+ sd->hue = HUE_DEFAULT;
+ sd->sharpness = SHARPNESS_DEFAULT;
+ sd->lighting = LIGHTING_DEFAULT;
+ sd->hflip = HFLIP_DEFAULT;
+
+ return 0;
+}
+
+static int cit_init_model0(struct gspca_dev *gspca_dev)
+{
+ cit_write_reg(gspca_dev, 0x0000, 0x0100); /* turn on led */
+ cit_write_reg(gspca_dev, 0x0001, 0x0112); /* turn on autogain ? */
+ cit_write_reg(gspca_dev, 0x0000, 0x0400);
+ cit_write_reg(gspca_dev, 0x0001, 0x0400);
+ cit_write_reg(gspca_dev, 0x0000, 0x0420);
+ cit_write_reg(gspca_dev, 0x0001, 0x0420);
+ cit_write_reg(gspca_dev, 0x000d, 0x0409);
+ cit_write_reg(gspca_dev, 0x0002, 0x040a);
+ cit_write_reg(gspca_dev, 0x0018, 0x0405);
+ cit_write_reg(gspca_dev, 0x0008, 0x0435);
+ cit_write_reg(gspca_dev, 0x0026, 0x040b);
+ cit_write_reg(gspca_dev, 0x0007, 0x0437);
+ cit_write_reg(gspca_dev, 0x0015, 0x042f);
+ cit_write_reg(gspca_dev, 0x002b, 0x0439);
+ cit_write_reg(gspca_dev, 0x0026, 0x043a);
+ cit_write_reg(gspca_dev, 0x0008, 0x0438);
+ cit_write_reg(gspca_dev, 0x001e, 0x042b);
+ cit_write_reg(gspca_dev, 0x0041, 0x042c);
+
+ return 0;
+}
+
+static int cit_init_ibm_netcam_pro(struct gspca_dev *gspca_dev)
+{
+ cit_read_reg(gspca_dev, 0x128);
+ cit_write_reg(gspca_dev, 0x0003, 0x0133);
+ cit_write_reg(gspca_dev, 0x0000, 0x0117);
+ cit_write_reg(gspca_dev, 0x0008, 0x0123);
+ cit_write_reg(gspca_dev, 0x0000, 0x0100);
+ cit_read_reg(gspca_dev, 0x0116);
+ cit_write_reg(gspca_dev, 0x0060, 0x0116);
+ cit_write_reg(gspca_dev, 0x0002, 0x0112);
+ cit_write_reg(gspca_dev, 0x0000, 0x0133);
+ cit_write_reg(gspca_dev, 0x0000, 0x0123);
+ cit_write_reg(gspca_dev, 0x0001, 0x0117);
+ cit_write_reg(gspca_dev, 0x0040, 0x0108);
+ cit_write_reg(gspca_dev, 0x0019, 0x012c);
+ cit_write_reg(gspca_dev, 0x0060, 0x0116);
+ cit_write_reg(gspca_dev, 0x0002, 0x0115);
+ cit_write_reg(gspca_dev, 0x000b, 0x0115);
+
+ cit_write_reg(gspca_dev, 0x0078, 0x012d);
+ cit_write_reg(gspca_dev, 0x0001, 0x012f);
+ cit_write_reg(gspca_dev, 0xd141, 0x0124);
+ cit_write_reg(gspca_dev, 0x0079, 0x012d);
+ cit_write_reg(gspca_dev, 0x00ff, 0x0130);
+ cit_write_reg(gspca_dev, 0xcd41, 0x0124);
+ cit_write_reg(gspca_dev, 0xfffa, 0x0124);
+ cit_read_reg(gspca_dev, 0x0126);
+
+ cit_model3_Packet1(gspca_dev, 0x0000, 0x0000);
+ cit_model3_Packet1(gspca_dev, 0x0000, 0x0001);
+ cit_model3_Packet1(gspca_dev, 0x000b, 0x0000);
+ cit_model3_Packet1(gspca_dev, 0x000c, 0x0008);
+ cit_model3_Packet1(gspca_dev, 0x000d, 0x003a);
+ cit_model3_Packet1(gspca_dev, 0x000e, 0x0060);
+ cit_model3_Packet1(gspca_dev, 0x000f, 0x0060);
+ cit_model3_Packet1(gspca_dev, 0x0010, 0x0008);
+ cit_model3_Packet1(gspca_dev, 0x0011, 0x0004);
+ cit_model3_Packet1(gspca_dev, 0x0012, 0x0028);
+ cit_model3_Packet1(gspca_dev, 0x0013, 0x0002);
+ cit_model3_Packet1(gspca_dev, 0x0014, 0x0000);
+ cit_model3_Packet1(gspca_dev, 0x0015, 0x00fb);
+ cit_model3_Packet1(gspca_dev, 0x0016, 0x0002);
+ cit_model3_Packet1(gspca_dev, 0x0017, 0x0037);
+ cit_model3_Packet1(gspca_dev, 0x0018, 0x0036);
+ cit_model3_Packet1(gspca_dev, 0x001e, 0x0000);
+ cit_model3_Packet1(gspca_dev, 0x001f, 0x0008);
+ cit_model3_Packet1(gspca_dev, 0x0020, 0x00c1);
+ cit_model3_Packet1(gspca_dev, 0x0021, 0x0034);
+ cit_model3_Packet1(gspca_dev, 0x0022, 0x0034);
+ cit_model3_Packet1(gspca_dev, 0x0025, 0x0002);
+ cit_model3_Packet1(gspca_dev, 0x0028, 0x0022);
+ cit_model3_Packet1(gspca_dev, 0x0029, 0x000a);
+ cit_model3_Packet1(gspca_dev, 0x002b, 0x0000);
+ cit_model3_Packet1(gspca_dev, 0x002c, 0x0000);
+ cit_model3_Packet1(gspca_dev, 0x002d, 0x00ff);
+ cit_model3_Packet1(gspca_dev, 0x002e, 0x00ff);
+ cit_model3_Packet1(gspca_dev, 0x002f, 0x00ff);
+ cit_model3_Packet1(gspca_dev, 0x0030, 0x00ff);
+ cit_model3_Packet1(gspca_dev, 0x0031, 0x00ff);
+ cit_model3_Packet1(gspca_dev, 0x0032, 0x0007);
+ cit_model3_Packet1(gspca_dev, 0x0033, 0x0005);
+ cit_model3_Packet1(gspca_dev, 0x0037, 0x0040);
+ cit_model3_Packet1(gspca_dev, 0x0039, 0x0000);
+ cit_model3_Packet1(gspca_dev, 0x003a, 0x0000);
+ cit_model3_Packet1(gspca_dev, 0x003b, 0x0001);
+ cit_model3_Packet1(gspca_dev, 0x003c, 0x0000);
+ cit_model3_Packet1(gspca_dev, 0x0040, 0x000c);
+ cit_model3_Packet1(gspca_dev, 0x0041, 0x00fb);
+ cit_model3_Packet1(gspca_dev, 0x0042, 0x0002);
+ cit_model3_Packet1(gspca_dev, 0x0043, 0x0000);
+ cit_model3_Packet1(gspca_dev, 0x0045, 0x0000);
+ cit_model3_Packet1(gspca_dev, 0x0046, 0x0000);
+ cit_model3_Packet1(gspca_dev, 0x0047, 0x0000);
+ cit_model3_Packet1(gspca_dev, 0x0048, 0x0000);
+ cit_model3_Packet1(gspca_dev, 0x0049, 0x0000);
+ cit_model3_Packet1(gspca_dev, 0x004a, 0x00ff);
+ cit_model3_Packet1(gspca_dev, 0x004b, 0x00ff);
+ cit_model3_Packet1(gspca_dev, 0x004c, 0x00ff);
+ cit_model3_Packet1(gspca_dev, 0x004f, 0x0000);
+ cit_model3_Packet1(gspca_dev, 0x0050, 0x0000);
+ cit_model3_Packet1(gspca_dev, 0x0051, 0x0002);
+ cit_model3_Packet1(gspca_dev, 0x0055, 0x0000);
+ cit_model3_Packet1(gspca_dev, 0x0056, 0x0000);
+ cit_model3_Packet1(gspca_dev, 0x0057, 0x0000);
+ cit_model3_Packet1(gspca_dev, 0x0058, 0x0002);
+ cit_model3_Packet1(gspca_dev, 0x0059, 0x0000);
+ cit_model3_Packet1(gspca_dev, 0x005c, 0x0016);
+ cit_model3_Packet1(gspca_dev, 0x005d, 0x0022);
+ cit_model3_Packet1(gspca_dev, 0x005e, 0x003c);
+ cit_model3_Packet1(gspca_dev, 0x005f, 0x0050);
+ cit_model3_Packet1(gspca_dev, 0x0060, 0x0044);
+ cit_model3_Packet1(gspca_dev, 0x0061, 0x0005);
+ cit_model3_Packet1(gspca_dev, 0x006a, 0x007e);
+ cit_model3_Packet1(gspca_dev, 0x006f, 0x0000);
+ cit_model3_Packet1(gspca_dev, 0x0072, 0x001b);
+ cit_model3_Packet1(gspca_dev, 0x0073, 0x0005);
+ cit_model3_Packet1(gspca_dev, 0x0074, 0x000a);
+ cit_model3_Packet1(gspca_dev, 0x0075, 0x001b);
+ cit_model3_Packet1(gspca_dev, 0x0076, 0x002a);
+ cit_model3_Packet1(gspca_dev, 0x0077, 0x003c);
+ cit_model3_Packet1(gspca_dev, 0x0078, 0x0050);
+ cit_model3_Packet1(gspca_dev, 0x007b, 0x0000);
+ cit_model3_Packet1(gspca_dev, 0x007c, 0x0011);
+ cit_model3_Packet1(gspca_dev, 0x007d, 0x0024);
+ cit_model3_Packet1(gspca_dev, 0x007e, 0x0043);
+ cit_model3_Packet1(gspca_dev, 0x007f, 0x005a);
+ cit_model3_Packet1(gspca_dev, 0x0084, 0x0020);
+ cit_model3_Packet1(gspca_dev, 0x0085, 0x0033);
+ cit_model3_Packet1(gspca_dev, 0x0086, 0x000a);
+ cit_model3_Packet1(gspca_dev, 0x0087, 0x0030);
+ cit_model3_Packet1(gspca_dev, 0x0088, 0x0070);
+ cit_model3_Packet1(gspca_dev, 0x008b, 0x0008);
+ cit_model3_Packet1(gspca_dev, 0x008f, 0x0000);
+ cit_model3_Packet1(gspca_dev, 0x0090, 0x0006);
+ cit_model3_Packet1(gspca_dev, 0x0091, 0x0028);
+ cit_model3_Packet1(gspca_dev, 0x0092, 0x005a);
+ cit_model3_Packet1(gspca_dev, 0x0093, 0x0082);
+ cit_model3_Packet1(gspca_dev, 0x0096, 0x0014);
+ cit_model3_Packet1(gspca_dev, 0x0097, 0x0020);
+ cit_model3_Packet1(gspca_dev, 0x0098, 0x0000);
+ cit_model3_Packet1(gspca_dev, 0x00b0, 0x0046);
+ cit_model3_Packet1(gspca_dev, 0x00b1, 0x0000);
+ cit_model3_Packet1(gspca_dev, 0x00b2, 0x0000);
+ cit_model3_Packet1(gspca_dev, 0x00b3, 0x0004);
+ cit_model3_Packet1(gspca_dev, 0x00b4, 0x0007);
+ cit_model3_Packet1(gspca_dev, 0x00b6, 0x0002);
+ cit_model3_Packet1(gspca_dev, 0x00b7, 0x0004);
+ cit_model3_Packet1(gspca_dev, 0x00bb, 0x0000);
+ cit_model3_Packet1(gspca_dev, 0x00bc, 0x0001);
+ cit_model3_Packet1(gspca_dev, 0x00bd, 0x0000);
+ cit_model3_Packet1(gspca_dev, 0x00bf, 0x0000);
+ cit_model3_Packet1(gspca_dev, 0x00c0, 0x00c8);
+ cit_model3_Packet1(gspca_dev, 0x00c1, 0x0014);
+ cit_model3_Packet1(gspca_dev, 0x00c2, 0x0001);
+ cit_model3_Packet1(gspca_dev, 0x00c3, 0x0000);
+ cit_model3_Packet1(gspca_dev, 0x00c4, 0x0004);
+ cit_model3_Packet1(gspca_dev, 0x00cb, 0x00bf);
+ cit_model3_Packet1(gspca_dev, 0x00cc, 0x00bf);
+ cit_model3_Packet1(gspca_dev, 0x00cd, 0x00bf);
+ cit_model3_Packet1(gspca_dev, 0x00ce, 0x0000);
+ cit_model3_Packet1(gspca_dev, 0x00cf, 0x0020);
+ cit_model3_Packet1(gspca_dev, 0x00d0, 0x0040);
+ cit_model3_Packet1(gspca_dev, 0x00d1, 0x00bf);
+ cit_model3_Packet1(gspca_dev, 0x00d1, 0x00bf);
+ cit_model3_Packet1(gspca_dev, 0x00d2, 0x00bf);
+ cit_model3_Packet1(gspca_dev, 0x00d3, 0x00bf);
+ cit_model3_Packet1(gspca_dev, 0x00ea, 0x0008);
+ cit_model3_Packet1(gspca_dev, 0x00eb, 0x0000);
+ cit_model3_Packet1(gspca_dev, 0x00ec, 0x00e8);
+ cit_model3_Packet1(gspca_dev, 0x00ed, 0x0001);
+ cit_model3_Packet1(gspca_dev, 0x00ef, 0x0022);
+ cit_model3_Packet1(gspca_dev, 0x00f0, 0x0000);
+ cit_model3_Packet1(gspca_dev, 0x00f2, 0x0028);
+ cit_model3_Packet1(gspca_dev, 0x00f4, 0x0002);
+ cit_model3_Packet1(gspca_dev, 0x00f5, 0x0000);
+ cit_model3_Packet1(gspca_dev, 0x00fa, 0x0000);
+ cit_model3_Packet1(gspca_dev, 0x00fb, 0x0001);
+ cit_model3_Packet1(gspca_dev, 0x00fc, 0x0000);
+ cit_model3_Packet1(gspca_dev, 0x00fd, 0x0000);
+ cit_model3_Packet1(gspca_dev, 0x00fe, 0x0000);
+ cit_model3_Packet1(gspca_dev, 0x00ff, 0x0000);
+
+ cit_model3_Packet1(gspca_dev, 0x00be, 0x0003);
+ cit_model3_Packet1(gspca_dev, 0x00c8, 0x0000);
+ cit_model3_Packet1(gspca_dev, 0x00c9, 0x0020);
+ cit_model3_Packet1(gspca_dev, 0x00ca, 0x0040);
+ cit_model3_Packet1(gspca_dev, 0x0053, 0x0001);
+ cit_model3_Packet1(gspca_dev, 0x0082, 0x000e);
+ cit_model3_Packet1(gspca_dev, 0x0083, 0x0020);
+ cit_model3_Packet1(gspca_dev, 0x0034, 0x003c);
+ cit_model3_Packet1(gspca_dev, 0x006e, 0x0055);
+ cit_model3_Packet1(gspca_dev, 0x0062, 0x0005);
+ cit_model3_Packet1(gspca_dev, 0x0063, 0x0008);
+ cit_model3_Packet1(gspca_dev, 0x0066, 0x000a);
+ cit_model3_Packet1(gspca_dev, 0x0067, 0x0006);
+ cit_model3_Packet1(gspca_dev, 0x006b, 0x0010);
+ cit_model3_Packet1(gspca_dev, 0x005a, 0x0001);
+ cit_model3_Packet1(gspca_dev, 0x005b, 0x000a);
+ cit_model3_Packet1(gspca_dev, 0x0023, 0x0006);
+ cit_model3_Packet1(gspca_dev, 0x0026, 0x0004);
+ cit_model3_Packet1(gspca_dev, 0x0036, 0x0069);
+ cit_model3_Packet1(gspca_dev, 0x0038, 0x0064);
+ cit_model3_Packet1(gspca_dev, 0x003d, 0x0003);
+ cit_model3_Packet1(gspca_dev, 0x003e, 0x0001);
+ cit_model3_Packet1(gspca_dev, 0x00b8, 0x0014);
+ cit_model3_Packet1(gspca_dev, 0x00b9, 0x0014);
+ cit_model3_Packet1(gspca_dev, 0x00e6, 0x0004);
+ cit_model3_Packet1(gspca_dev, 0x00e8, 0x0001);
+
+ return 0;
+}
+
+/* this function is called at probe and resume time */
+static int sd_init(struct gspca_dev *gspca_dev)
+{
+ struct sd *sd = (struct sd *) gspca_dev;
+
+ switch (sd->model) {
+ case CIT_MODEL0:
+ cit_init_model0(gspca_dev);
+ sd_stop0(gspca_dev);
+ break;
+ case CIT_MODEL1:
+ case CIT_MODEL2:
+ case CIT_MODEL3:
+ case CIT_MODEL4:
+ break; /* All is done in sd_start */
+ case CIT_IBM_NETCAM_PRO:
+ cit_init_ibm_netcam_pro(gspca_dev);
+ sd_stop0(gspca_dev);
+ break;
+ }
+ return 0;
+}
+
+static int cit_set_brightness(struct gspca_dev *gspca_dev)
+{
+ struct sd *sd = (struct sd *) gspca_dev;
+ int i;
+
+ switch (sd->model) {
+ case CIT_MODEL0:
+ case CIT_IBM_NETCAM_PRO:
+ /* No (known) brightness control for these */
+ break;
+ case CIT_MODEL1:
+ /* Model 1: Brightness range 0 - 63 */
+ cit_Packet_Format1(gspca_dev, 0x0031, sd->brightness);
+ cit_Packet_Format1(gspca_dev, 0x0032, sd->brightness);
+ cit_Packet_Format1(gspca_dev, 0x0033, sd->brightness);
+ break;
+ case CIT_MODEL2:
+ /* Model 2: Brightness range 0x60 - 0xee */
+ /* Scale 0 - 63 to 0x60 - 0xee */
+ i = 0x60 + sd->brightness * 2254 / 1000;
+ cit_model2_Packet1(gspca_dev, 0x001a, i);
+ break;
+ case CIT_MODEL3:
+ /* Model 3: Brightness range 'i' in [0x0C..0x3F] */
+ i = sd->brightness;
+ if (i < 0x0c)
+ i = 0x0c;
+ cit_model3_Packet1(gspca_dev, 0x0036, i);
+ break;
+ case CIT_MODEL4:
+ /* Model 4: Brightness range 'i' in [0x04..0xb4] */
+ /* Scale 0 - 63 to 0x04 - 0xb4 */
+ i = 0x04 + sd->brightness * 2794 / 1000;
+ cit_model4_BrightnessPacket(gspca_dev, i);
+ break;
+ }
+
+ return 0;
+}
+
+static int cit_set_contrast(struct gspca_dev *gspca_dev)
+{
+ struct sd *sd = (struct sd *) gspca_dev;
+
+ switch (sd->model) {
+ case CIT_MODEL0: {
+ int i;
+ /* gain 0-15, 0-20 -> 0-15 */
+ i = sd->contrast * 1000 / 1333;
+ cit_write_reg(gspca_dev, i, 0x0422);
+ /* gain 0-31, may not be lower then 0x0422, 0-20 -> 0-31 */
+ i = sd->contrast * 2000 / 1333;
+ cit_write_reg(gspca_dev, i, 0x0423);
+ /* gain 0-127, may not be lower then 0x0423, 0-20 -> 0-63 */
+ i = sd->contrast * 4000 / 1333;
+ cit_write_reg(gspca_dev, i, 0x0424);
+ /* gain 0-127, may not be lower then 0x0424, , 0-20 -> 0-127 */
+ i = sd->contrast * 8000 / 1333;
+ cit_write_reg(gspca_dev, i, 0x0425);
+ break;
+ }
+ case CIT_MODEL2:
+ case CIT_MODEL4:
+ /* These models do not have this control. */
+ break;
+ case CIT_MODEL1:
+ {
+ /* Scale 0 - 20 to 15 - 0 */
+ int i, new_contrast = (20 - sd->contrast) * 1000 / 1333;
+ for (i = 0; i < cit_model1_ntries; i++) {
+ cit_Packet_Format1(gspca_dev, 0x0014, new_contrast);
+ cit_send_FF_04_02(gspca_dev);
+ }
+ break;
+ }
+ case CIT_MODEL3:
+ { /* Preset hardware values */
+ static const struct {
+ unsigned short cv1;
+ unsigned short cv2;
+ unsigned short cv3;
+ } cv[7] = {
+ { 0x05, 0x05, 0x0f }, /* Minimum */
+ { 0x04, 0x04, 0x16 },
+ { 0x02, 0x03, 0x16 },
+ { 0x02, 0x08, 0x16 },
+ { 0x01, 0x0c, 0x16 },
+ { 0x01, 0x0e, 0x16 },
+ { 0x01, 0x10, 0x16 } /* Maximum */
+ };
+ int i = sd->contrast / 3;
+ cit_model3_Packet1(gspca_dev, 0x0067, cv[i].cv1);
+ cit_model3_Packet1(gspca_dev, 0x005b, cv[i].cv2);
+ cit_model3_Packet1(gspca_dev, 0x005c, cv[i].cv3);
+ break;
+ }
+ case CIT_IBM_NETCAM_PRO:
+ cit_model3_Packet1(gspca_dev, 0x005b, sd->contrast + 1);
+ break;
+ }
+ return 0;
+}
+
+static int cit_set_hue(struct gspca_dev *gspca_dev)
+{
+ struct sd *sd = (struct sd *) gspca_dev;
+
+ switch (sd->model) {
+ case CIT_MODEL0:
+ case CIT_MODEL1:
+ case CIT_IBM_NETCAM_PRO:
+ /* No hue control for these models */
+ break;
+ case CIT_MODEL2:
+ cit_model2_Packet1(gspca_dev, 0x0024, sd->hue);
+ /* cit_model2_Packet1(gspca_dev, 0x0020, sat); */
+ break;
+ case CIT_MODEL3: {
+ /* Model 3: Brightness range 'i' in [0x05..0x37] */
+ /* TESTME according to the ibmcam driver this does not work */
+ if (0) {
+ /* Scale 0 - 127 to 0x05 - 0x37 */
+ int i = 0x05 + sd->hue * 1000 / 2540;
+ cit_model3_Packet1(gspca_dev, 0x007e, i);
+ }
+ break;
+ }
+ case CIT_MODEL4:
+ /* HDG: taken from ibmcam, setting the color gains does not
+ * really belong here.
+ *
+ * I am not sure r/g/b_gain variables exactly control gain
+ * of those channels. Most likely they subtly change some
+ * very internal image processing settings in the camera.
+ * In any case, here is what they do, and feel free to tweak:
+ *
+ * r_gain: seriously affects red gain
+ * g_gain: seriously affects green gain
+ * b_gain: seriously affects blue gain
+ * hue: changes average color from violet (0) to red (0xFF)
+ */
+ cit_write_reg(gspca_dev, 0x00aa, 0x012d);
+ cit_write_reg(gspca_dev, 0x001e, 0x012f);
+ cit_write_reg(gspca_dev, 0xd141, 0x0124);
+ cit_write_reg(gspca_dev, 160, 0x0127); /* Green gain */
+ cit_write_reg(gspca_dev, 160, 0x012e); /* Red gain */
+ cit_write_reg(gspca_dev, 160, 0x0130); /* Blue gain */
+ cit_write_reg(gspca_dev, 0x8a28, 0x0124);
+ cit_write_reg(gspca_dev, sd->hue, 0x012d); /* Hue */
+ cit_write_reg(gspca_dev, 0xf545, 0x0124);
+ break;
+ }
+ return 0;
+}
+
+static int cit_set_sharpness(struct gspca_dev *gspca_dev)
+{
+ struct sd *sd = (struct sd *) gspca_dev;
+
+ switch (sd->model) {
+ case CIT_MODEL0:
+ case CIT_MODEL2:
+ case CIT_MODEL4:
+ case CIT_IBM_NETCAM_PRO:
+ /* These models do not have this control */
+ break;
+ case CIT_MODEL1: {
+ int i;
+ const unsigned short sa[] = {
+ 0x11, 0x13, 0x16, 0x18, 0x1a, 0x8, 0x0a };
+
+ for (i = 0; i < cit_model1_ntries; i++)
+ cit_PacketFormat2(gspca_dev, 0x0013, sa[sd->sharpness]);
+ break;
+ }
+ case CIT_MODEL3:
+ { /*
+ * "Use a table of magic numbers.
+ * This setting doesn't really change much.
+ * But that's how Windows does it."
+ */
+ static const struct {
+ unsigned short sv1;
+ unsigned short sv2;
+ unsigned short sv3;
+ unsigned short sv4;
+ } sv[7] = {
+ { 0x00, 0x00, 0x05, 0x14 }, /* Smoothest */
+ { 0x01, 0x04, 0x05, 0x14 },
+ { 0x02, 0x04, 0x05, 0x14 },
+ { 0x03, 0x04, 0x05, 0x14 },
+ { 0x03, 0x05, 0x05, 0x14 },
+ { 0x03, 0x06, 0x05, 0x14 },
+ { 0x03, 0x07, 0x05, 0x14 } /* Sharpest */
+ };
+ cit_model3_Packet1(gspca_dev, 0x0060, sv[sd->sharpness].sv1);
+ cit_model3_Packet1(gspca_dev, 0x0061, sv[sd->sharpness].sv2);
+ cit_model3_Packet1(gspca_dev, 0x0062, sv[sd->sharpness].sv3);
+ cit_model3_Packet1(gspca_dev, 0x0063, sv[sd->sharpness].sv4);
+ break;
+ }
+ }
+ return 0;
+}
+
+/*
+ * cit_set_lighting()
+ *
+ * Camera model 1:
+ * We have 3 levels of lighting conditions: 0=Bright, 1=Medium, 2=Low.
+ *
+ * Camera model 2:
+ * We have 16 levels of lighting, 0 for bright light and up to 15 for
+ * low light. But values above 5 or so are useless because camera is
+ * not really capable to produce anything worth viewing at such light.
+ * This setting may be altered only in certain camera state.
+ *
+ * Low lighting forces slower FPS.
+ *
+ * History:
+ * 1/5/00 Created.
+ * 2/20/00 Added support for Model 2 cameras.
+ */
+static void cit_set_lighting(struct gspca_dev *gspca_dev)
+{
+ struct sd *sd = (struct sd *) gspca_dev;
+
+ switch (sd->model) {
+ case CIT_MODEL0:
+ case CIT_MODEL2:
+ case CIT_MODEL3:
+ case CIT_MODEL4:
+ case CIT_IBM_NETCAM_PRO:
+ break;
+ case CIT_MODEL1: {
+ int i;
+ for (i = 0; i < cit_model1_ntries; i++)
+ cit_Packet_Format1(gspca_dev, 0x0027, sd->lighting);
+ break;
+ }
+ }
+}
+
+static void cit_set_hflip(struct gspca_dev *gspca_dev)
+{
+ struct sd *sd = (struct sd *) gspca_dev;
+
+ switch (sd->model) {
+ case CIT_MODEL0:
+ if (sd->hflip)
+ cit_write_reg(gspca_dev, 0x0020, 0x0115);
+ else
+ cit_write_reg(gspca_dev, 0x0040, 0x0115);
+ break;
+ case CIT_MODEL1:
+ case CIT_MODEL2:
+ case CIT_MODEL3:
+ case CIT_MODEL4:
+ case CIT_IBM_NETCAM_PRO:
+ break;
+ }
+}
+
+static int cit_restart_stream(struct gspca_dev *gspca_dev)
+{
+ struct sd *sd = (struct sd *) gspca_dev;
+
+ switch (sd->model) {
+ case CIT_MODEL0:
+ case CIT_MODEL1:
+ case CIT_MODEL3:
+ case CIT_IBM_NETCAM_PRO:
+ cit_write_reg(gspca_dev, 0x0001, 0x0114);
+ /* Fall through */
+ case CIT_MODEL2:
+ case CIT_MODEL4:
+ cit_write_reg(gspca_dev, 0x00c0, 0x010c); /* Go! */
+ usb_clear_halt(gspca_dev->dev, gspca_dev->urb[0]->pipe);
+ /* This happens repeatedly while streaming with the ibm netcam
+ pro and the ibmcam driver did it for model3 after changing
+ settings, but it does not seem to have any effect. */
+ /* cit_write_reg(gspca_dev, 0x0001, 0x0113); */
+ break;
+ }
+
+ sd->sof_read = 0;
+
+ return 0;
+}
+
+static int cit_get_packet_size(struct gspca_dev *gspca_dev)
+{
+ struct usb_host_interface *alt;
+ struct usb_interface *intf;
+
+ intf = usb_ifnum_to_if(gspca_dev->dev, gspca_dev->iface);
+ alt = usb_altnum_to_altsetting(intf, gspca_dev->alt);
+ if (!alt) {
+ err("Couldn't get altsetting");
+ return -EIO;
+ }
+
+ return le16_to_cpu(alt->endpoint[0].desc.wMaxPacketSize);
+}
+
+/* Calculate the clockdiv giving us max fps given the available bandwidth */
+static int cit_get_clock_div(struct gspca_dev *gspca_dev)
+{
+ int clock_div = 7; /* 0=30 1=25 2=20 3=15 4=12 5=7.5 6=6 7=3fps ?? */
+ int fps[8] = { 30, 25, 20, 15, 12, 8, 6, 3 };
+ int packet_size;
+
+ packet_size = cit_get_packet_size(gspca_dev);
+ if (packet_size < 0)
+ return packet_size;
+
+ while (clock_div > 3 &&
+ 1000 * packet_size >
+ gspca_dev->width * gspca_dev->height *
+ fps[clock_div - 1] * 3 / 2)
+ clock_div--;
+
+ PDEBUG(D_PROBE,
+ "PacketSize: %d, res: %dx%d -> using clockdiv: %d (%d fps)",
+ packet_size, gspca_dev->width, gspca_dev->height, clock_div,
+ fps[clock_div]);
+
+ return clock_div;
+}
+
+static int cit_start_model0(struct gspca_dev *gspca_dev)
+{
+ const unsigned short compression = 0; /* 0=none, 7=best frame rate */
+ int clock_div;
+
+ clock_div = cit_get_clock_div(gspca_dev);
+ if (clock_div < 0)
+ return clock_div;
+
+ cit_write_reg(gspca_dev, 0x0000, 0x0100); /* turn on led */
+ cit_write_reg(gspca_dev, 0x0003, 0x0438);
+ cit_write_reg(gspca_dev, 0x001e, 0x042b);
+ cit_write_reg(gspca_dev, 0x0041, 0x042c);
+ cit_write_reg(gspca_dev, 0x0008, 0x0436);
+ cit_write_reg(gspca_dev, 0x0024, 0x0403);
+ cit_write_reg(gspca_dev, 0x002c, 0x0404);
+ cit_write_reg(gspca_dev, 0x0002, 0x0426);
+ cit_write_reg(gspca_dev, 0x0014, 0x0427);
+
+ switch (gspca_dev->width) {
+ case 160: /* 160x120 */
+ cit_write_reg(gspca_dev, 0x0004, 0x010b);
+ cit_write_reg(gspca_dev, 0x0001, 0x010a);
+ cit_write_reg(gspca_dev, 0x0010, 0x0102);
+ cit_write_reg(gspca_dev, 0x00a0, 0x0103);
+ cit_write_reg(gspca_dev, 0x0000, 0x0104);
+ cit_write_reg(gspca_dev, 0x0078, 0x0105);
+ break;
+
+ case 176: /* 176x144 */
+ cit_write_reg(gspca_dev, 0x0006, 0x010b);
+ cit_write_reg(gspca_dev, 0x0000, 0x010a);
+ cit_write_reg(gspca_dev, 0x0005, 0x0102);
+ cit_write_reg(gspca_dev, 0x00b0, 0x0103);
+ cit_write_reg(gspca_dev, 0x0000, 0x0104);
+ cit_write_reg(gspca_dev, 0x0090, 0x0105);
+ break;
+
+ case 320: /* 320x240 */
+ cit_write_reg(gspca_dev, 0x0008, 0x010b);
+ cit_write_reg(gspca_dev, 0x0004, 0x010a);
+ cit_write_reg(gspca_dev, 0x0005, 0x0102);
+ cit_write_reg(gspca_dev, 0x00a0, 0x0103);
+ cit_write_reg(gspca_dev, 0x0010, 0x0104);
+ cit_write_reg(gspca_dev, 0x0078, 0x0105);
+ break;
+ }
+
+ cit_write_reg(gspca_dev, compression, 0x0109);
+ cit_write_reg(gspca_dev, clock_div, 0x0111);
+
+ return 0;
+}
+
+static int cit_start_model1(struct gspca_dev *gspca_dev)
+{
+ struct sd *sd = (struct sd *) gspca_dev;
+ int i, clock_div;
+
+ clock_div = cit_get_clock_div(gspca_dev);
+ if (clock_div < 0)
+ return clock_div;
+
+ cit_read_reg(gspca_dev, 0x0128);
+ cit_read_reg(gspca_dev, 0x0100);
+ cit_write_reg(gspca_dev, 0x01, 0x0100); /* LED On */
+ cit_read_reg(gspca_dev, 0x0100);
+ cit_write_reg(gspca_dev, 0x81, 0x0100); /* LED Off */
+ cit_read_reg(gspca_dev, 0x0100);
+ cit_write_reg(gspca_dev, 0x01, 0x0100); /* LED On */
+ cit_write_reg(gspca_dev, 0x01, 0x0108);
+
+ cit_write_reg(gspca_dev, 0x03, 0x0112);
+ cit_read_reg(gspca_dev, 0x0115);
+ cit_write_reg(gspca_dev, 0x06, 0x0115);
+ cit_read_reg(gspca_dev, 0x0116);
+ cit_write_reg(gspca_dev, 0x44, 0x0116);
+ cit_read_reg(gspca_dev, 0x0116);
+ cit_write_reg(gspca_dev, 0x40, 0x0116);
+ cit_read_reg(gspca_dev, 0x0115);
+ cit_write_reg(gspca_dev, 0x0e, 0x0115);
+ cit_write_reg(gspca_dev, 0x19, 0x012c);
+
+ cit_Packet_Format1(gspca_dev, 0x00, 0x1e);
+ cit_Packet_Format1(gspca_dev, 0x39, 0x0d);
+ cit_Packet_Format1(gspca_dev, 0x39, 0x09);
+ cit_Packet_Format1(gspca_dev, 0x3b, 0x00);
+ cit_Packet_Format1(gspca_dev, 0x28, 0x22);
+ cit_Packet_Format1(gspca_dev, 0x27, 0x00);
+ cit_Packet_Format1(gspca_dev, 0x2b, 0x1f);
+ cit_Packet_Format1(gspca_dev, 0x39, 0x08);
+
+ for (i = 0; i < cit_model1_ntries; i++)
+ cit_Packet_Format1(gspca_dev, 0x2c, 0x00);
+
+ for (i = 0; i < cit_model1_ntries; i++)
+ cit_Packet_Format1(gspca_dev, 0x30, 0x14);
+
+ cit_PacketFormat2(gspca_dev, 0x39, 0x02);
+ cit_PacketFormat2(gspca_dev, 0x01, 0xe1);
+ cit_PacketFormat2(gspca_dev, 0x02, 0xcd);
+ cit_PacketFormat2(gspca_dev, 0x03, 0xcd);
+ cit_PacketFormat2(gspca_dev, 0x04, 0xfa);
+ cit_PacketFormat2(gspca_dev, 0x3f, 0xff);
+ cit_PacketFormat2(gspca_dev, 0x39, 0x00);
+
+ cit_PacketFormat2(gspca_dev, 0x39, 0x02);
+ cit_PacketFormat2(gspca_dev, 0x0a, 0x37);
+ cit_PacketFormat2(gspca_dev, 0x0b, 0xb8);
+ cit_PacketFormat2(gspca_dev, 0x0c, 0xf3);
+ cit_PacketFormat2(gspca_dev, 0x0d, 0xe3);
+ cit_PacketFormat2(gspca_dev, 0x0e, 0x0d);
+ cit_PacketFormat2(gspca_dev, 0x0f, 0xf2);
+ cit_PacketFormat2(gspca_dev, 0x10, 0xd5);
+ cit_PacketFormat2(gspca_dev, 0x11, 0xba);
+ cit_PacketFormat2(gspca_dev, 0x12, 0x53);
+ cit_PacketFormat2(gspca_dev, 0x3f, 0xff);
+ cit_PacketFormat2(gspca_dev, 0x39, 0x00);
+
+ cit_PacketFormat2(gspca_dev, 0x39, 0x02);
+ cit_PacketFormat2(gspca_dev, 0x16, 0x00);
+ cit_PacketFormat2(gspca_dev, 0x17, 0x28);
+ cit_PacketFormat2(gspca_dev, 0x18, 0x7d);
+ cit_PacketFormat2(gspca_dev, 0x19, 0xbe);
+ cit_PacketFormat2(gspca_dev, 0x3f, 0xff);
+ cit_PacketFormat2(gspca_dev, 0x39, 0x00);
+
+ for (i = 0; i < cit_model1_ntries; i++)
+ cit_Packet_Format1(gspca_dev, 0x00, 0x18);
+ for (i = 0; i < cit_model1_ntries; i++)
+ cit_Packet_Format1(gspca_dev, 0x13, 0x18);
+ for (i = 0; i < cit_model1_ntries; i++)
+ cit_Packet_Format1(gspca_dev, 0x14, 0x06);
+
+ /* TESTME These are handled through controls
+ KEEP until someone can test leaving this out is ok */
+ if (0) {
+ /* This is default brightness */
+ for (i = 0; i < cit_model1_ntries; i++)
+ cit_Packet_Format1(gspca_dev, 0x31, 0x37);
+ for (i = 0; i < cit_model1_ntries; i++)
+ cit_Packet_Format1(gspca_dev, 0x32, 0x46);
+ for (i = 0; i < cit_model1_ntries; i++)
+ cit_Packet_Format1(gspca_dev, 0x33, 0x55);
+ }
+
+ cit_Packet_Format1(gspca_dev, 0x2e, 0x04);
+ for (i = 0; i < cit_model1_ntries; i++)
+ cit_Packet_Format1(gspca_dev, 0x2d, 0x04);
+ for (i = 0; i < cit_model1_ntries; i++)
+ cit_Packet_Format1(gspca_dev, 0x29, 0x80);
+ cit_Packet_Format1(gspca_dev, 0x2c, 0x01);
+ cit_Packet_Format1(gspca_dev, 0x30, 0x17);
+ cit_Packet_Format1(gspca_dev, 0x39, 0x08);
+ for (i = 0; i < cit_model1_ntries; i++)
+ cit_Packet_Format1(gspca_dev, 0x34, 0x00);
+
+ cit_write_reg(gspca_dev, 0x00, 0x0101);
+ cit_write_reg(gspca_dev, 0x00, 0x010a);
+
+ switch (gspca_dev->width) {
+ case 128: /* 128x96 */
+ cit_write_reg(gspca_dev, 0x80, 0x0103);
+ cit_write_reg(gspca_dev, 0x60, 0x0105);
+ cit_write_reg(gspca_dev, 0x0c, 0x010b);
+ cit_write_reg(gspca_dev, 0x04, 0x011b); /* Same everywhere */
+ cit_write_reg(gspca_dev, 0x0b, 0x011d);
+ cit_write_reg(gspca_dev, 0x00, 0x011e); /* Same everywhere */
+ cit_write_reg(gspca_dev, 0x00, 0x0129);
+ break;
+ case 176: /* 176x144 */
+ cit_write_reg(gspca_dev, 0xb0, 0x0103);
+ cit_write_reg(gspca_dev, 0x8f, 0x0105);
+ cit_write_reg(gspca_dev, 0x06, 0x010b);
+ cit_write_reg(gspca_dev, 0x04, 0x011b); /* Same everywhere */
+ cit_write_reg(gspca_dev, 0x0d, 0x011d);
+ cit_write_reg(gspca_dev, 0x00, 0x011e); /* Same everywhere */
+ cit_write_reg(gspca_dev, 0x03, 0x0129);
+ break;
+ case 352: /* 352x288 */
+ cit_write_reg(gspca_dev, 0xb0, 0x0103);
+ cit_write_reg(gspca_dev, 0x90, 0x0105);
+ cit_write_reg(gspca_dev, 0x02, 0x010b);
+ cit_write_reg(gspca_dev, 0x04, 0x011b); /* Same everywhere */
+ cit_write_reg(gspca_dev, 0x05, 0x011d);
+ cit_write_reg(gspca_dev, 0x00, 0x011e); /* Same everywhere */
+ cit_write_reg(gspca_dev, 0x00, 0x0129);
+ break;
+ }
+
+ cit_write_reg(gspca_dev, 0xff, 0x012b);
+
+ /* TESTME These are handled through controls
+ KEEP until someone can test leaving this out is ok */
+ if (0) {
+ /* This is another brightness - don't know why */
+ for (i = 0; i < cit_model1_ntries; i++)
+ cit_Packet_Format1(gspca_dev, 0x31, 0xc3);
+ for (i = 0; i < cit_model1_ntries; i++)
+ cit_Packet_Format1(gspca_dev, 0x32, 0xd2);
+ for (i = 0; i < cit_model1_ntries; i++)
+ cit_Packet_Format1(gspca_dev, 0x33, 0xe1);
+
+ /* Default contrast */
+ for (i = 0; i < cit_model1_ntries; i++)
+ cit_Packet_Format1(gspca_dev, 0x14, 0x0a);
+
+ /* Default sharpness */
+ for (i = 0; i < cit_model1_ntries2; i++)
+ cit_PacketFormat2(gspca_dev, 0x13, 0x1a);
+
+ /* Default lighting conditions */
+ cit_Packet_Format1(gspca_dev, 0x0027, sd->lighting);
+ }
+
+ /* Assorted init */
+ switch (gspca_dev->width) {
+ case 128: /* 128x96 */
+ cit_Packet_Format1(gspca_dev, 0x2b, 0x1e);
+ cit_write_reg(gspca_dev, 0xc9, 0x0119); /* Same everywhere */
+ cit_write_reg(gspca_dev, 0x80, 0x0109); /* Same everywhere */
+ cit_write_reg(gspca_dev, 0x36, 0x0102);
+ cit_write_reg(gspca_dev, 0x1a, 0x0104);
+ cit_write_reg(gspca_dev, 0x04, 0x011a); /* Same everywhere */
+ cit_write_reg(gspca_dev, 0x2b, 0x011c);
+ cit_write_reg(gspca_dev, 0x23, 0x012a); /* Same everywhere */
+ break;
+ case 176: /* 176x144 */
+ cit_Packet_Format1(gspca_dev, 0x2b, 0x1e);
+ cit_write_reg(gspca_dev, 0xc9, 0x0119); /* Same everywhere */
+ cit_write_reg(gspca_dev, 0x80, 0x0109); /* Same everywhere */
+ cit_write_reg(gspca_dev, 0x04, 0x0102);
+ cit_write_reg(gspca_dev, 0x02, 0x0104);
+ cit_write_reg(gspca_dev, 0x04, 0x011a); /* Same everywhere */
+ cit_write_reg(gspca_dev, 0x2b, 0x011c);
+ cit_write_reg(gspca_dev, 0x23, 0x012a); /* Same everywhere */
+ break;
+ case 352: /* 352x288 */
+ cit_Packet_Format1(gspca_dev, 0x2b, 0x1f);
+ cit_write_reg(gspca_dev, 0xc9, 0x0119); /* Same everywhere */
+ cit_write_reg(gspca_dev, 0x80, 0x0109); /* Same everywhere */
+ cit_write_reg(gspca_dev, 0x08, 0x0102);
+ cit_write_reg(gspca_dev, 0x01, 0x0104);
+ cit_write_reg(gspca_dev, 0x04, 0x011a); /* Same everywhere */
+ cit_write_reg(gspca_dev, 0x2f, 0x011c);
+ cit_write_reg(gspca_dev, 0x23, 0x012a); /* Same everywhere */
+ break;
+ }
+
+ cit_write_reg(gspca_dev, 0x01, 0x0100); /* LED On */
+ cit_write_reg(gspca_dev, clock_div, 0x0111);
+
+ return 0;
+}
+
+static int cit_start_model2(struct gspca_dev *gspca_dev)
+{
+ struct sd *sd = (struct sd *) gspca_dev;
+ int clock_div = 0;
+
+ cit_write_reg(gspca_dev, 0x0000, 0x0100); /* LED on */
+ cit_read_reg(gspca_dev, 0x0116);
+ cit_write_reg(gspca_dev, 0x0060, 0x0116);
+ cit_write_reg(gspca_dev, 0x0002, 0x0112);
+ cit_write_reg(gspca_dev, 0x00bc, 0x012c);
+ cit_write_reg(gspca_dev, 0x0008, 0x012b);
+ cit_write_reg(gspca_dev, 0x0000, 0x0108);
+ cit_write_reg(gspca_dev, 0x0001, 0x0133);
+ cit_write_reg(gspca_dev, 0x0001, 0x0102);
+ switch (gspca_dev->width) {
+ case 176: /* 176x144 */
+ cit_write_reg(gspca_dev, 0x002c, 0x0103); /* All except 320x240 */
+ cit_write_reg(gspca_dev, 0x0000, 0x0104); /* Same */
+ cit_write_reg(gspca_dev, 0x0024, 0x0105); /* 176x144, 352x288 */
+ cit_write_reg(gspca_dev, 0x00b9, 0x010a); /* Unique to this mode */
+ cit_write_reg(gspca_dev, 0x0038, 0x0119); /* Unique to this mode */
+ /* TESTME HDG: this does not seem right
+ (it is 2 for all other resolutions) */
+ sd->sof_len = 10;
+ break;
+ case 320: /* 320x240 */
+ cit_write_reg(gspca_dev, 0x0028, 0x0103); /* Unique to this mode */
+ cit_write_reg(gspca_dev, 0x0000, 0x0104); /* Same */
+ cit_write_reg(gspca_dev, 0x001e, 0x0105); /* 320x240, 352x240 */
+ cit_write_reg(gspca_dev, 0x0039, 0x010a); /* All except 176x144 */
+ cit_write_reg(gspca_dev, 0x0070, 0x0119); /* All except 176x144 */
+ sd->sof_len = 2;
+ break;
+ /* case VIDEOSIZE_352x240: */
+ cit_write_reg(gspca_dev, 0x002c, 0x0103); /* All except 320x240 */
+ cit_write_reg(gspca_dev, 0x0000, 0x0104); /* Same */
+ cit_write_reg(gspca_dev, 0x001e, 0x0105); /* 320x240, 352x240 */
+ cit_write_reg(gspca_dev, 0x0039, 0x010a); /* All except 176x144 */
+ cit_write_reg(gspca_dev, 0x0070, 0x0119); /* All except 176x144 */
+ sd->sof_len = 2;
+ break;
+ case 352: /* 352x288 */
+ cit_write_reg(gspca_dev, 0x002c, 0x0103); /* All except 320x240 */
+ cit_write_reg(gspca_dev, 0x0000, 0x0104); /* Same */
+ cit_write_reg(gspca_dev, 0x0024, 0x0105); /* 176x144, 352x288 */
+ cit_write_reg(gspca_dev, 0x0039, 0x010a); /* All except 176x144 */
+ cit_write_reg(gspca_dev, 0x0070, 0x0119); /* All except 176x144 */
+ sd->sof_len = 2;
+ break;
+ }
+
+ cit_write_reg(gspca_dev, 0x0000, 0x0100); /* LED on */
+
+ switch (gspca_dev->width) {
+ case 176: /* 176x144 */
+ cit_write_reg(gspca_dev, 0x0050, 0x0111);
+ cit_write_reg(gspca_dev, 0x00d0, 0x0111);
+ break;
+ case 320: /* 320x240 */
+ case 352: /* 352x288 */
+ cit_write_reg(gspca_dev, 0x0040, 0x0111);
+ cit_write_reg(gspca_dev, 0x00c0, 0x0111);
+ break;
+ }
+ cit_write_reg(gspca_dev, 0x009b, 0x010f);
+ cit_write_reg(gspca_dev, 0x00bb, 0x010f);
+
+ /*
+ * Hardware settings, may affect CMOS sensor; not user controls!
+ * -------------------------------------------------------------
+ * 0x0004: no effect
+ * 0x0006: hardware effect
+ * 0x0008: no effect
+ * 0x000a: stops video stream, probably important h/w setting
+ * 0x000c: changes color in hardware manner (not user setting)
+ * 0x0012: changes number of colors (does not affect speed)
+ * 0x002a: no effect
+ * 0x002c: hardware setting (related to scan lines)
+ * 0x002e: stops video stream, probably important h/w setting
+ */
+ cit_model2_Packet1(gspca_dev, 0x000a, 0x005c);
+ cit_model2_Packet1(gspca_dev, 0x0004, 0x0000);
+ cit_model2_Packet1(gspca_dev, 0x0006, 0x00fb);
+ cit_model2_Packet1(gspca_dev, 0x0008, 0x0000);
+ cit_model2_Packet1(gspca_dev, 0x000c, 0x0009);
+ cit_model2_Packet1(gspca_dev, 0x0012, 0x000a);
+ cit_model2_Packet1(gspca_dev, 0x002a, 0x0000);
+ cit_model2_Packet1(gspca_dev, 0x002c, 0x0000);
+ cit_model2_Packet1(gspca_dev, 0x002e, 0x0008);
+
+ /*
+ * Function 0x0030 pops up all over the place. Apparently
+ * it is a hardware control register, with every bit assigned to
+ * do something.
+ */
+ cit_model2_Packet1(gspca_dev, 0x0030, 0x0000);
+
+ /*
+ * Magic control of CMOS sensor. Only lower values like
+ * 0-3 work, and picture shifts left or right. Don't change.
+ */
+ switch (gspca_dev->width) {
+ case 176: /* 176x144 */
+ cit_model2_Packet1(gspca_dev, 0x0014, 0x0002);
+ cit_model2_Packet1(gspca_dev, 0x0016, 0x0002); /* Horizontal shift */
+ cit_model2_Packet1(gspca_dev, 0x0018, 0x004a); /* Another hardware setting */
+ clock_div = 6;
+ break;
+ case 320: /* 320x240 */
+ cit_model2_Packet1(gspca_dev, 0x0014, 0x0009);
+ cit_model2_Packet1(gspca_dev, 0x0016, 0x0005); /* Horizontal shift */
+ cit_model2_Packet1(gspca_dev, 0x0018, 0x0044); /* Another hardware setting */
+ clock_div = 8;
+ break;
+ /* case VIDEOSIZE_352x240: */
+ /* This mode doesn't work as Windows programs it; changed to work */
+ cit_model2_Packet1(gspca_dev, 0x0014, 0x0009); /* Windows sets this to 8 */
+ cit_model2_Packet1(gspca_dev, 0x0016, 0x0003); /* Horizontal shift */
+ cit_model2_Packet1(gspca_dev, 0x0018, 0x0044); /* Windows sets this to 0x0045 */
+ clock_div = 10;
+ break;
+ case 352: /* 352x288 */
+ cit_model2_Packet1(gspca_dev, 0x0014, 0x0003);
+ cit_model2_Packet1(gspca_dev, 0x0016, 0x0002); /* Horizontal shift */
+ cit_model2_Packet1(gspca_dev, 0x0018, 0x004a); /* Another hardware setting */
+ clock_div = 16;
+ break;
+ }
+
+ /* TESTME These are handled through controls
+ KEEP until someone can test leaving this out is ok */
+ if (0)
+ cit_model2_Packet1(gspca_dev, 0x001a, 0x005a);
+
+ /*
+ * We have our own frame rate setting varying from 0 (slowest) to 6
+ * (fastest). The camera model 2 allows frame rate in range [0..0x1F]
+ # where 0 is also the slowest setting. However for all practical
+ # reasons high settings make no sense because USB is not fast enough
+ # to support high FPS. Be aware that the picture datastream will be
+ # severely disrupted if you ask for frame rate faster than allowed
+ # for the video size - see below:
+ *
+ * Allowable ranges (obtained experimentally on OHCI, K6-3, 450 MHz):
+ * -----------------------------------------------------------------
+ * 176x144: [6..31]
+ * 320x240: [8..31]
+ * 352x240: [10..31]
+ * 352x288: [16..31] I have to raise lower threshold for stability...
+ *
+ * As usual, slower FPS provides better sensitivity.
+ */
+ cit_model2_Packet1(gspca_dev, 0x001c, clock_div);
+
+ /*
+ * This setting does not visibly affect pictures; left it here
+ * because it was present in Windows USB data stream. This function
+ * does not allow arbitrary values and apparently is a bit mask, to
+ * be activated only at appropriate time. Don't change it randomly!
+ */
+ switch (gspca_dev->width) {
+ case 176: /* 176x144 */
+ cit_model2_Packet1(gspca_dev, 0x0026, 0x00c2);
+ break;
+ case 320: /* 320x240 */
+ cit_model2_Packet1(gspca_dev, 0x0026, 0x0044);
+ break;
+ /* case VIDEOSIZE_352x240: */
+ cit_model2_Packet1(gspca_dev, 0x0026, 0x0046);
+ break;
+ case 352: /* 352x288 */
+ cit_model2_Packet1(gspca_dev, 0x0026, 0x0048);
+ break;
+ }
+
+ /* FIXME this cannot be changed while streaming, so we
+ should report a grabbed flag for this control. */
+ cit_model2_Packet1(gspca_dev, 0x0028, sd->lighting);
+ /* color balance rg2 */
+ cit_model2_Packet1(gspca_dev, 0x001e, 0x002f);
+ /* saturation */
+ cit_model2_Packet1(gspca_dev, 0x0020, 0x0034);
+ /* color balance yb */
+ cit_model2_Packet1(gspca_dev, 0x0022, 0x00a0);
+
+ /* Hardware control command */
+ cit_model2_Packet1(gspca_dev, 0x0030, 0x0004);
+
+ return 0;
+}
+
+static int cit_start_model3(struct gspca_dev *gspca_dev)
+{
+ const unsigned short compression = 0; /* 0=none, 7=best frame rate */
+ int i, clock_div = 0;
+
+ /* HDG not in ibmcam driver, added to see if it helps with
+ auto-detecting between model3 and ibm netcamera pro */
+ cit_read_reg(gspca_dev, 0x128);
+
+ cit_write_reg(gspca_dev, 0x0000, 0x0100);
+ cit_read_reg(gspca_dev, 0x0116);
+ cit_write_reg(gspca_dev, 0x0060, 0x0116);
+ cit_write_reg(gspca_dev, 0x0002, 0x0112);
+ cit_write_reg(gspca_dev, 0x0000, 0x0123);
+ cit_write_reg(gspca_dev, 0x0001, 0x0117);
+ cit_write_reg(gspca_dev, 0x0040, 0x0108);
+ cit_write_reg(gspca_dev, 0x0019, 0x012c);
+ cit_write_reg(gspca_dev, 0x0060, 0x0116);
+ cit_write_reg(gspca_dev, 0x0002, 0x0115);
+ cit_write_reg(gspca_dev, 0x0003, 0x0115);
+ cit_read_reg(gspca_dev, 0x0115);
+ cit_write_reg(gspca_dev, 0x000b, 0x0115);
+
+ /* TESTME HDG not in ibmcam driver, added to see if it helps with
+ auto-detecting between model3 and ibm netcamera pro */
+ if (0) {
+ cit_write_reg(gspca_dev, 0x0078, 0x012d);
+ cit_write_reg(gspca_dev, 0x0001, 0x012f);
+ cit_write_reg(gspca_dev, 0xd141, 0x0124);
+ cit_write_reg(gspca_dev, 0x0079, 0x012d);
+ cit_write_reg(gspca_dev, 0x00ff, 0x0130);
+ cit_write_reg(gspca_dev, 0xcd41, 0x0124);
+ cit_write_reg(gspca_dev, 0xfffa, 0x0124);
+ cit_read_reg(gspca_dev, 0x0126);
+ }
+
+ cit_model3_Packet1(gspca_dev, 0x000a, 0x0040);
+ cit_model3_Packet1(gspca_dev, 0x000b, 0x00f6);
+ cit_model3_Packet1(gspca_dev, 0x000c, 0x0002);
+ cit_model3_Packet1(gspca_dev, 0x000d, 0x0020);
+ cit_model3_Packet1(gspca_dev, 0x000e, 0x0033);
+ cit_model3_Packet1(gspca_dev, 0x000f, 0x0007);
+ cit_model3_Packet1(gspca_dev, 0x0010, 0x0000);
+ cit_model3_Packet1(gspca_dev, 0x0011, 0x0070);
+ cit_model3_Packet1(gspca_dev, 0x0012, 0x0030);
+ cit_model3_Packet1(gspca_dev, 0x0013, 0x0000);
+ cit_model3_Packet1(gspca_dev, 0x0014, 0x0001);
+ cit_model3_Packet1(gspca_dev, 0x0015, 0x0001);
+ cit_model3_Packet1(gspca_dev, 0x0016, 0x0001);
+ cit_model3_Packet1(gspca_dev, 0x0017, 0x0001);
+ cit_model3_Packet1(gspca_dev, 0x0018, 0x0000);
+ cit_model3_Packet1(gspca_dev, 0x001e, 0x00c3);
+ cit_model3_Packet1(gspca_dev, 0x0020, 0x0000);
+ cit_model3_Packet1(gspca_dev, 0x0028, 0x0010);
+ cit_model3_Packet1(gspca_dev, 0x0029, 0x0054);
+ cit_model3_Packet1(gspca_dev, 0x002a, 0x0013);
+ cit_model3_Packet1(gspca_dev, 0x002b, 0x0007);
+ cit_model3_Packet1(gspca_dev, 0x002d, 0x0028);
+ cit_model3_Packet1(gspca_dev, 0x002e, 0x0000);
+ cit_model3_Packet1(gspca_dev, 0x0031, 0x0000);
+ cit_model3_Packet1(gspca_dev, 0x0032, 0x0000);
+ cit_model3_Packet1(gspca_dev, 0x0033, 0x0000);
+ cit_model3_Packet1(gspca_dev, 0x0034, 0x0000);
+ cit_model3_Packet1(gspca_dev, 0x0035, 0x0038);
+ cit_model3_Packet1(gspca_dev, 0x003a, 0x0001);
+ cit_model3_Packet1(gspca_dev, 0x003c, 0x001e);
+ cit_model3_Packet1(gspca_dev, 0x003f, 0x000a);
+ cit_model3_Packet1(gspca_dev, 0x0041, 0x0000);
+ cit_model3_Packet1(gspca_dev, 0x0046, 0x003f);
+ cit_model3_Packet1(gspca_dev, 0x0047, 0x0000);
+ cit_model3_Packet1(gspca_dev, 0x0050, 0x0005);
+ cit_model3_Packet1(gspca_dev, 0x0052, 0x001a);
+ cit_model3_Packet1(gspca_dev, 0x0053, 0x0003);
+ cit_model3_Packet1(gspca_dev, 0x005a, 0x006b);
+ cit_model3_Packet1(gspca_dev, 0x005d, 0x001e);
+ cit_model3_Packet1(gspca_dev, 0x005e, 0x0030);
+ cit_model3_Packet1(gspca_dev, 0x005f, 0x0041);
+ cit_model3_Packet1(gspca_dev, 0x0064, 0x0008);
+ cit_model3_Packet1(gspca_dev, 0x0065, 0x0015);
+ cit_model3_Packet1(gspca_dev, 0x0068, 0x000f);
+ cit_model3_Packet1(gspca_dev, 0x0079, 0x0000);
+ cit_model3_Packet1(gspca_dev, 0x007a, 0x0000);
+ cit_model3_Packet1(gspca_dev, 0x007c, 0x003f);
+ cit_model3_Packet1(gspca_dev, 0x0082, 0x000f);
+ cit_model3_Packet1(gspca_dev, 0x0085, 0x0000);
+ cit_model3_Packet1(gspca_dev, 0x0099, 0x0000);
+ cit_model3_Packet1(gspca_dev, 0x009b, 0x0023);
+ cit_model3_Packet1(gspca_dev, 0x009c, 0x0022);
+ cit_model3_Packet1(gspca_dev, 0x009d, 0x0096);
+ cit_model3_Packet1(gspca_dev, 0x009e, 0x0096);
+ cit_model3_Packet1(gspca_dev, 0x009f, 0x000a);
+
+ switch (gspca_dev->width) {
+ case 160:
+ cit_write_reg(gspca_dev, 0x0000, 0x0101); /* Same on 160x120, 320x240 */
+ cit_write_reg(gspca_dev, 0x00a0, 0x0103); /* Same on 160x120, 320x240 */
+ cit_write_reg(gspca_dev, 0x0078, 0x0105); /* Same on 160x120, 320x240 */
+ cit_write_reg(gspca_dev, 0x0000, 0x010a); /* Same */
+ cit_write_reg(gspca_dev, 0x0024, 0x010b); /* Differs everywhere */
+ cit_write_reg(gspca_dev, 0x00a9, 0x0119);
+ cit_write_reg(gspca_dev, 0x0016, 0x011b);
+ cit_write_reg(gspca_dev, 0x0002, 0x011d); /* Same on 160x120, 320x240 */
+ cit_write_reg(gspca_dev, 0x0003, 0x011e); /* Same on 160x120, 640x480 */
+ cit_write_reg(gspca_dev, 0x0000, 0x0129); /* Same */
+ cit_write_reg(gspca_dev, 0x00fc, 0x012b); /* Same */
+ cit_write_reg(gspca_dev, 0x0018, 0x0102);
+ cit_write_reg(gspca_dev, 0x0004, 0x0104);
+ cit_write_reg(gspca_dev, 0x0004, 0x011a);
+ cit_write_reg(gspca_dev, 0x0028, 0x011c);
+ cit_write_reg(gspca_dev, 0x0022, 0x012a); /* Same */
+ cit_write_reg(gspca_dev, 0x0000, 0x0118);
+ cit_write_reg(gspca_dev, 0x0000, 0x0132);
+ cit_model3_Packet1(gspca_dev, 0x0021, 0x0001); /* Same */
+ cit_write_reg(gspca_dev, compression, 0x0109);
+ clock_div = 3;
+ break;
+ case 320:
+ cit_write_reg(gspca_dev, 0x0000, 0x0101); /* Same on 160x120, 320x240 */
+ cit_write_reg(gspca_dev, 0x00a0, 0x0103); /* Same on 160x120, 320x240 */
+ cit_write_reg(gspca_dev, 0x0078, 0x0105); /* Same on 160x120, 320x240 */
+ cit_write_reg(gspca_dev, 0x0000, 0x010a); /* Same */
+ cit_write_reg(gspca_dev, 0x0028, 0x010b); /* Differs everywhere */
+ cit_write_reg(gspca_dev, 0x0002, 0x011d); /* Same */
+ cit_write_reg(gspca_dev, 0x0000, 0x011e);
+ cit_write_reg(gspca_dev, 0x0000, 0x0129); /* Same */
+ cit_write_reg(gspca_dev, 0x00fc, 0x012b); /* Same */
+ /* 4 commands from 160x120 skipped */
+ cit_write_reg(gspca_dev, 0x0022, 0x012a); /* Same */
+ cit_model3_Packet1(gspca_dev, 0x0021, 0x0001); /* Same */
+ cit_write_reg(gspca_dev, compression, 0x0109);
+ cit_write_reg(gspca_dev, 0x00d9, 0x0119);
+ cit_write_reg(gspca_dev, 0x0006, 0x011b);
+ cit_write_reg(gspca_dev, 0x0021, 0x0102); /* Same on 320x240, 640x480 */
+ cit_write_reg(gspca_dev, 0x0010, 0x0104);
+ cit_write_reg(gspca_dev, 0x0004, 0x011a);
+ cit_write_reg(gspca_dev, 0x003f, 0x011c);
+ cit_write_reg(gspca_dev, 0x001c, 0x0118);
+ cit_write_reg(gspca_dev, 0x0000, 0x0132);
+ clock_div = 5;
+ break;
+ case 640:
+ cit_write_reg(gspca_dev, 0x00f0, 0x0105);
+ cit_write_reg(gspca_dev, 0x0000, 0x010a); /* Same */
+ cit_write_reg(gspca_dev, 0x0038, 0x010b); /* Differs everywhere */
+ cit_write_reg(gspca_dev, 0x00d9, 0x0119); /* Same on 320x240, 640x480 */
+ cit_write_reg(gspca_dev, 0x0006, 0x011b); /* Same on 320x240, 640x480 */
+ cit_write_reg(gspca_dev, 0x0004, 0x011d); /* NC */
+ cit_write_reg(gspca_dev, 0x0003, 0x011e); /* Same on 160x120, 640x480 */
+ cit_write_reg(gspca_dev, 0x0000, 0x0129); /* Same */
+ cit_write_reg(gspca_dev, 0x00fc, 0x012b); /* Same */
+ cit_write_reg(gspca_dev, 0x0021, 0x0102); /* Same on 320x240, 640x480 */
+ cit_write_reg(gspca_dev, 0x0016, 0x0104); /* NC */
+ cit_write_reg(gspca_dev, 0x0004, 0x011a); /* Same on 320x240, 640x480 */
+ cit_write_reg(gspca_dev, 0x003f, 0x011c); /* Same on 320x240, 640x480 */
+ cit_write_reg(gspca_dev, 0x0022, 0x012a); /* Same */
+ cit_write_reg(gspca_dev, 0x001c, 0x0118); /* Same on 320x240, 640x480 */
+ cit_model3_Packet1(gspca_dev, 0x0021, 0x0001); /* Same */
+ cit_write_reg(gspca_dev, compression, 0x0109);
+ cit_write_reg(gspca_dev, 0x0040, 0x0101);
+ cit_write_reg(gspca_dev, 0x0040, 0x0103);
+ cit_write_reg(gspca_dev, 0x0000, 0x0132); /* Same on 320x240, 640x480 */
+ clock_div = 7;
+ break;
+ }
+
+ cit_model3_Packet1(gspca_dev, 0x007e, 0x000e); /* Hue */
+ cit_model3_Packet1(gspca_dev, 0x0036, 0x0011); /* Brightness */
+ cit_model3_Packet1(gspca_dev, 0x0060, 0x0002); /* Sharpness */
+ cit_model3_Packet1(gspca_dev, 0x0061, 0x0004); /* Sharpness */
+ cit_model3_Packet1(gspca_dev, 0x0062, 0x0005); /* Sharpness */
+ cit_model3_Packet1(gspca_dev, 0x0063, 0x0014); /* Sharpness */
+ cit_model3_Packet1(gspca_dev, 0x0096, 0x00a0); /* Red sharpness */
+ cit_model3_Packet1(gspca_dev, 0x0097, 0x0096); /* Blue sharpness */
+ cit_model3_Packet1(gspca_dev, 0x0067, 0x0001); /* Contrast */
+ cit_model3_Packet1(gspca_dev, 0x005b, 0x000c); /* Contrast */
+ cit_model3_Packet1(gspca_dev, 0x005c, 0x0016); /* Contrast */
+ cit_model3_Packet1(gspca_dev, 0x0098, 0x000b);
+ cit_model3_Packet1(gspca_dev, 0x002c, 0x0003); /* Was 1, broke 640x480 */
+ cit_model3_Packet1(gspca_dev, 0x002f, 0x002a);
+ cit_model3_Packet1(gspca_dev, 0x0030, 0x0029);
+ cit_model3_Packet1(gspca_dev, 0x0037, 0x0002);
+ cit_model3_Packet1(gspca_dev, 0x0038, 0x0059);
+ cit_model3_Packet1(gspca_dev, 0x003d, 0x002e);
+ cit_model3_Packet1(gspca_dev, 0x003e, 0x0028);
+ cit_model3_Packet1(gspca_dev, 0x0078, 0x0005);
+ cit_model3_Packet1(gspca_dev, 0x007b, 0x0011);
+ cit_model3_Packet1(gspca_dev, 0x007d, 0x004b);
+ cit_model3_Packet1(gspca_dev, 0x007f, 0x0022);
+ cit_model3_Packet1(gspca_dev, 0x0080, 0x000c);
+ cit_model3_Packet1(gspca_dev, 0x0081, 0x000b);
+ cit_model3_Packet1(gspca_dev, 0x0083, 0x00fd);
+ cit_model3_Packet1(gspca_dev, 0x0086, 0x000b);
+ cit_model3_Packet1(gspca_dev, 0x0087, 0x000b);
+ cit_model3_Packet1(gspca_dev, 0x007e, 0x000e);
+ cit_model3_Packet1(gspca_dev, 0x0096, 0x00a0); /* Red sharpness */
+ cit_model3_Packet1(gspca_dev, 0x0097, 0x0096); /* Blue sharpness */
+ cit_model3_Packet1(gspca_dev, 0x0098, 0x000b);
+
+ /* FIXME we should probably use cit_get_clock_div() here (in
+ combination with isoc negotiation using the programmable isoc size)
+ like with the IBM netcam pro). */
+ cit_write_reg(gspca_dev, clock_div, 0x0111); /* Clock Divider */
+
+ switch (gspca_dev->width) {
+ case 160:
+ cit_model3_Packet1(gspca_dev, 0x001f, 0x0000); /* Same */
+ cit_model3_Packet1(gspca_dev, 0x0039, 0x001f); /* Same */
+ cit_model3_Packet1(gspca_dev, 0x003b, 0x003c); /* Same */
+ cit_model3_Packet1(gspca_dev, 0x0040, 0x000a);
+ cit_model3_Packet1(gspca_dev, 0x0051, 0x000a);
+ break;
+ case 320:
+ cit_model3_Packet1(gspca_dev, 0x001f, 0x0000); /* Same */
+ cit_model3_Packet1(gspca_dev, 0x0039, 0x001f); /* Same */
+ cit_model3_Packet1(gspca_dev, 0x003b, 0x003c); /* Same */
+ cit_model3_Packet1(gspca_dev, 0x0040, 0x0008);
+ cit_model3_Packet1(gspca_dev, 0x0051, 0x000b);
+ break;
+ case 640:
+ cit_model3_Packet1(gspca_dev, 0x001f, 0x0002); /* !Same */
+ cit_model3_Packet1(gspca_dev, 0x0039, 0x003e); /* !Same */
+ cit_model3_Packet1(gspca_dev, 0x0040, 0x0008);
+ cit_model3_Packet1(gspca_dev, 0x0051, 0x000a);
+ break;
+ }
+
+/* if (sd->input_index) { */
+ if (rca_input) {
+ for (i = 0; i < ARRAY_SIZE(rca_initdata); i++) {
+ if (rca_initdata[i][0])
+ cit_read_reg(gspca_dev, rca_initdata[i][2]);
+ else
+ cit_write_reg(gspca_dev, rca_initdata[i][1],
+ rca_initdata[i][2]);
+ }
+ }
+
+ return 0;
+}
+
+static int cit_start_model4(struct gspca_dev *gspca_dev)
+{
+ struct sd *sd = (struct sd *) gspca_dev;
+
+ cit_write_reg(gspca_dev, 0x0000, 0x0100);
+ cit_write_reg(gspca_dev, 0x00c0, 0x0111);
+ cit_write_reg(gspca_dev, 0x00bc, 0x012c);
+ cit_write_reg(gspca_dev, 0x0080, 0x012b);
+ cit_write_reg(gspca_dev, 0x0000, 0x0108);
+ cit_write_reg(gspca_dev, 0x0001, 0x0133);
+ cit_write_reg(gspca_dev, 0x009b, 0x010f);
+ cit_write_reg(gspca_dev, 0x00bb, 0x010f);
+ cit_model4_Packet1(gspca_dev, 0x0038, 0x0000);
+ cit_model4_Packet1(gspca_dev, 0x000a, 0x005c);
+
+ cit_write_reg(gspca_dev, 0x00aa, 0x012d);
+ cit_write_reg(gspca_dev, 0x0004, 0x012f);
+ cit_write_reg(gspca_dev, 0xd141, 0x0124);
+ cit_write_reg(gspca_dev, 0x0000, 0x0127);
+ cit_write_reg(gspca_dev, 0x00fb, 0x012e);
+ cit_write_reg(gspca_dev, 0x0000, 0x0130);
+ cit_write_reg(gspca_dev, 0x8a28, 0x0124);
+ cit_write_reg(gspca_dev, 0x00aa, 0x012f);
+ cit_write_reg(gspca_dev, 0xd055, 0x0124);
+ cit_write_reg(gspca_dev, 0x000c, 0x0127);
+ cit_write_reg(gspca_dev, 0x0009, 0x012e);
+ cit_write_reg(gspca_dev, 0xaa28, 0x0124);
+
+ cit_write_reg(gspca_dev, 0x00aa, 0x012d);
+ cit_write_reg(gspca_dev, 0x0012, 0x012f);
+ cit_write_reg(gspca_dev, 0xd141, 0x0124);
+ cit_write_reg(gspca_dev, 0x0008, 0x0127);
+ cit_write_reg(gspca_dev, 0x00aa, 0x0130);
+ cit_write_reg(gspca_dev, 0x82a8, 0x0124);
+ cit_write_reg(gspca_dev, 0x002a, 0x012d);
+ cit_write_reg(gspca_dev, 0x0000, 0x012f);
+ cit_write_reg(gspca_dev, 0xd145, 0x0124);
+ cit_write_reg(gspca_dev, 0xfffa, 0x0124);
+ cit_model4_Packet1(gspca_dev, 0x0034, 0x0000);
+
+ switch (gspca_dev->width) {
+ case 128: /* 128x96 */
+ cit_write_reg(gspca_dev, 0x0070, 0x0119);
+ cit_write_reg(gspca_dev, 0x00d0, 0x0111);
+ cit_write_reg(gspca_dev, 0x0039, 0x010a);
+ cit_write_reg(gspca_dev, 0x0001, 0x0102);
+ cit_write_reg(gspca_dev, 0x0028, 0x0103);
+ cit_write_reg(gspca_dev, 0x0000, 0x0104);
+ cit_write_reg(gspca_dev, 0x001e, 0x0105);
+ cit_write_reg(gspca_dev, 0x00aa, 0x012d);
+ cit_write_reg(gspca_dev, 0x0016, 0x012f);
+ cit_write_reg(gspca_dev, 0xd141, 0x0124);
+ cit_write_reg(gspca_dev, 0x000a, 0x0127);
+ cit_write_reg(gspca_dev, 0x00aa, 0x0130);
+ cit_write_reg(gspca_dev, 0x82a8, 0x0124);
+ cit_write_reg(gspca_dev, 0x0014, 0x012d);
+ cit_write_reg(gspca_dev, 0x0008, 0x012f);
+ cit_write_reg(gspca_dev, 0xd145, 0x0124);
+ cit_write_reg(gspca_dev, 0x00aa, 0x012e);
+ cit_write_reg(gspca_dev, 0x001a, 0x0130);
+ cit_write_reg(gspca_dev, 0x8a0a, 0x0124);
+ cit_write_reg(gspca_dev, 0x005a, 0x012d);
+ cit_write_reg(gspca_dev, 0x9545, 0x0124);
+ cit_write_reg(gspca_dev, 0x00aa, 0x0127);
+ cit_write_reg(gspca_dev, 0x0018, 0x012e);
+ cit_write_reg(gspca_dev, 0x0043, 0x0130);
+ cit_write_reg(gspca_dev, 0x8a28, 0x0124);
+ cit_write_reg(gspca_dev, 0x00aa, 0x012f);
+ cit_write_reg(gspca_dev, 0xd055, 0x0124);
+ cit_write_reg(gspca_dev, 0x001c, 0x0127);
+ cit_write_reg(gspca_dev, 0x00eb, 0x012e);
+ cit_write_reg(gspca_dev, 0xaa28, 0x0124);
+ cit_write_reg(gspca_dev, 0x00aa, 0x012d);
+ cit_write_reg(gspca_dev, 0x0032, 0x012f);
+ cit_write_reg(gspca_dev, 0xd141, 0x0124);
+ cit_write_reg(gspca_dev, 0x0000, 0x0127);
+ cit_write_reg(gspca_dev, 0x00aa, 0x0130);
+ cit_write_reg(gspca_dev, 0x82a8, 0x0124);
+ cit_write_reg(gspca_dev, 0x0036, 0x012d);
+ cit_write_reg(gspca_dev, 0x0008, 0x012f);
+ cit_write_reg(gspca_dev, 0xd145, 0x0124);
+ cit_write_reg(gspca_dev, 0xfffa, 0x0124);
+ cit_write_reg(gspca_dev, 0x00aa, 0x012d);
+ cit_write_reg(gspca_dev, 0x001e, 0x012f);
+ cit_write_reg(gspca_dev, 0xd141, 0x0124);
+ cit_write_reg(gspca_dev, 0x0017, 0x0127);
+ cit_write_reg(gspca_dev, 0x0013, 0x012e);
+ cit_write_reg(gspca_dev, 0x0031, 0x0130);
+ cit_write_reg(gspca_dev, 0x8a28, 0x0124);
+ cit_write_reg(gspca_dev, 0x0017, 0x012d);
+ cit_write_reg(gspca_dev, 0x0078, 0x012f);
+ cit_write_reg(gspca_dev, 0xd145, 0x0124);
+ cit_write_reg(gspca_dev, 0x0000, 0x0127);
+ cit_write_reg(gspca_dev, 0xfea8, 0x0124);
+ sd->sof_len = 2;
+ break;
+ case 160: /* 160x120 */
+ cit_write_reg(gspca_dev, 0x0038, 0x0119);
+ cit_write_reg(gspca_dev, 0x00d0, 0x0111);
+ cit_write_reg(gspca_dev, 0x00b9, 0x010a);
+ cit_write_reg(gspca_dev, 0x0001, 0x0102);
+ cit_write_reg(gspca_dev, 0x0028, 0x0103);
+ cit_write_reg(gspca_dev, 0x0000, 0x0104);
+ cit_write_reg(gspca_dev, 0x001e, 0x0105);
+ cit_write_reg(gspca_dev, 0x00aa, 0x012d);
+ cit_write_reg(gspca_dev, 0x0016, 0x012f);
+ cit_write_reg(gspca_dev, 0xd141, 0x0124);
+ cit_write_reg(gspca_dev, 0x000b, 0x0127);
+ cit_write_reg(gspca_dev, 0x00aa, 0x0130);
+ cit_write_reg(gspca_dev, 0x82a8, 0x0124);
+ cit_write_reg(gspca_dev, 0x0014, 0x012d);
+ cit_write_reg(gspca_dev, 0x0008, 0x012f);
+ cit_write_reg(gspca_dev, 0xd145, 0x0124);
+ cit_write_reg(gspca_dev, 0x00aa, 0x012e);
+ cit_write_reg(gspca_dev, 0x001a, 0x0130);
+ cit_write_reg(gspca_dev, 0x8a0a, 0x0124);
+ cit_write_reg(gspca_dev, 0x005a, 0x012d);
+ cit_write_reg(gspca_dev, 0x9545, 0x0124);
+ cit_write_reg(gspca_dev, 0x00aa, 0x0127);
+ cit_write_reg(gspca_dev, 0x0018, 0x012e);
+ cit_write_reg(gspca_dev, 0x0043, 0x0130);
+ cit_write_reg(gspca_dev, 0x8a28, 0x0124);
+ cit_write_reg(gspca_dev, 0x00aa, 0x012f);
+ cit_write_reg(gspca_dev, 0xd055, 0x0124);
+ cit_write_reg(gspca_dev, 0x001c, 0x0127);
+ cit_write_reg(gspca_dev, 0x00c7, 0x012e);
+ cit_write_reg(gspca_dev, 0xaa28, 0x0124);
+ cit_write_reg(gspca_dev, 0x00aa, 0x012d);
+ cit_write_reg(gspca_dev, 0x0032, 0x012f);
+ cit_write_reg(gspca_dev, 0xd141, 0x0124);
+ cit_write_reg(gspca_dev, 0x0025, 0x0127);
+ cit_write_reg(gspca_dev, 0x00aa, 0x0130);
+ cit_write_reg(gspca_dev, 0x82a8, 0x0124);
+ cit_write_reg(gspca_dev, 0x0036, 0x012d);
+ cit_write_reg(gspca_dev, 0x0008, 0x012f);
+ cit_write_reg(gspca_dev, 0xd145, 0x0124);
+ cit_write_reg(gspca_dev, 0xfffa, 0x0124);
+ cit_write_reg(gspca_dev, 0x00aa, 0x012d);
+ cit_write_reg(gspca_dev, 0x001e, 0x012f);
+ cit_write_reg(gspca_dev, 0xd141, 0x0124);
+ cit_write_reg(gspca_dev, 0x0048, 0x0127);
+ cit_write_reg(gspca_dev, 0x0035, 0x012e);
+ cit_write_reg(gspca_dev, 0x00d0, 0x0130);
+ cit_write_reg(gspca_dev, 0x8a28, 0x0124);
+ cit_write_reg(gspca_dev, 0x0048, 0x012d);
+ cit_write_reg(gspca_dev, 0x0090, 0x012f);
+ cit_write_reg(gspca_dev, 0xd145, 0x0124);
+ cit_write_reg(gspca_dev, 0x0001, 0x0127);
+ cit_write_reg(gspca_dev, 0xfea8, 0x0124);
+ sd->sof_len = 2;
+ break;
+ case 176: /* 176x144 */
+ cit_write_reg(gspca_dev, 0x0038, 0x0119);
+ cit_write_reg(gspca_dev, 0x00d0, 0x0111);
+ cit_write_reg(gspca_dev, 0x00b9, 0x010a);
+ cit_write_reg(gspca_dev, 0x0001, 0x0102);
+ cit_write_reg(gspca_dev, 0x002c, 0x0103);
+ cit_write_reg(gspca_dev, 0x0000, 0x0104);
+ cit_write_reg(gspca_dev, 0x0024, 0x0105);
+ cit_write_reg(gspca_dev, 0x00aa, 0x012d);
+ cit_write_reg(gspca_dev, 0x0016, 0x012f);
+ cit_write_reg(gspca_dev, 0xd141, 0x0124);
+ cit_write_reg(gspca_dev, 0x0007, 0x0127);
+ cit_write_reg(gspca_dev, 0x00aa, 0x0130);
+ cit_write_reg(gspca_dev, 0x82a8, 0x0124);
+ cit_write_reg(gspca_dev, 0x0014, 0x012d);
+ cit_write_reg(gspca_dev, 0x0001, 0x012f);
+ cit_write_reg(gspca_dev, 0xd145, 0x0124);
+ cit_write_reg(gspca_dev, 0x00aa, 0x012e);
+ cit_write_reg(gspca_dev, 0x001a, 0x0130);
+ cit_write_reg(gspca_dev, 0x8a0a, 0x0124);
+ cit_write_reg(gspca_dev, 0x005e, 0x012d);
+ cit_write_reg(gspca_dev, 0x9545, 0x0124);
+ cit_write_reg(gspca_dev, 0x00aa, 0x0127);
+ cit_write_reg(gspca_dev, 0x0018, 0x012e);
+ cit_write_reg(gspca_dev, 0x0049, 0x0130);
+ cit_write_reg(gspca_dev, 0x8a28, 0x0124);
+ cit_write_reg(gspca_dev, 0x00aa, 0x012f);
+ cit_write_reg(gspca_dev, 0xd055, 0x0124);
+ cit_write_reg(gspca_dev, 0x001c, 0x0127);
+ cit_write_reg(gspca_dev, 0x00c7, 0x012e);
+ cit_write_reg(gspca_dev, 0xaa28, 0x0124);
+ cit_write_reg(gspca_dev, 0x00aa, 0x012d);
+ cit_write_reg(gspca_dev, 0x0032, 0x012f);
+ cit_write_reg(gspca_dev, 0xd141, 0x0124);
+ cit_write_reg(gspca_dev, 0x0028, 0x0127);
+ cit_write_reg(gspca_dev, 0x00aa, 0x0130);
+ cit_write_reg(gspca_dev, 0x82a8, 0x0124);
+ cit_write_reg(gspca_dev, 0x0036, 0x012d);
+ cit_write_reg(gspca_dev, 0x0008, 0x012f);
+ cit_write_reg(gspca_dev, 0xd145, 0x0124);
+ cit_write_reg(gspca_dev, 0xfffa, 0x0124);
+ cit_write_reg(gspca_dev, 0x00aa, 0x012d);
+ cit_write_reg(gspca_dev, 0x001e, 0x012f);
+ cit_write_reg(gspca_dev, 0xd141, 0x0124);
+ cit_write_reg(gspca_dev, 0x0010, 0x0127);
+ cit_write_reg(gspca_dev, 0x0013, 0x012e);
+ cit_write_reg(gspca_dev, 0x002a, 0x0130);
+ cit_write_reg(gspca_dev, 0x8a28, 0x0124);
+ cit_write_reg(gspca_dev, 0x0010, 0x012d);
+ cit_write_reg(gspca_dev, 0x006d, 0x012f);
+ cit_write_reg(gspca_dev, 0xd145, 0x0124);
+ cit_write_reg(gspca_dev, 0x0001, 0x0127);
+ cit_write_reg(gspca_dev, 0xfea8, 0x0124);
+ /* TESTME HDG: this does not seem right
+ (it is 2 for all other resolutions) */
+ sd->sof_len = 10;
+ break;
+ case 320: /* 320x240 */
+ cit_write_reg(gspca_dev, 0x0070, 0x0119);
+ cit_write_reg(gspca_dev, 0x00d0, 0x0111);
+ cit_write_reg(gspca_dev, 0x0039, 0x010a);
+ cit_write_reg(gspca_dev, 0x0001, 0x0102);
+ cit_write_reg(gspca_dev, 0x0028, 0x0103);
+ cit_write_reg(gspca_dev, 0x0000, 0x0104);
+ cit_write_reg(gspca_dev, 0x001e, 0x0105);
+ cit_write_reg(gspca_dev, 0x00aa, 0x012d);
+ cit_write_reg(gspca_dev, 0x0016, 0x012f);
+ cit_write_reg(gspca_dev, 0xd141, 0x0124);
+ cit_write_reg(gspca_dev, 0x000a, 0x0127);
+ cit_write_reg(gspca_dev, 0x00aa, 0x0130);
+ cit_write_reg(gspca_dev, 0x82a8, 0x0124);
+ cit_write_reg(gspca_dev, 0x0014, 0x012d);
+ cit_write_reg(gspca_dev, 0x0008, 0x012f);
+ cit_write_reg(gspca_dev, 0xd145, 0x0124);
+ cit_write_reg(gspca_dev, 0x00aa, 0x012e);
+ cit_write_reg(gspca_dev, 0x001a, 0x0130);
+ cit_write_reg(gspca_dev, 0x8a0a, 0x0124);
+ cit_write_reg(gspca_dev, 0x005a, 0x012d);
+ cit_write_reg(gspca_dev, 0x9545, 0x0124);
+ cit_write_reg(gspca_dev, 0x00aa, 0x0127);
+ cit_write_reg(gspca_dev, 0x0018, 0x012e);
+ cit_write_reg(gspca_dev, 0x0043, 0x0130);
+ cit_write_reg(gspca_dev, 0x8a28, 0x0124);
+ cit_write_reg(gspca_dev, 0x00aa, 0x012f);
+ cit_write_reg(gspca_dev, 0xd055, 0x0124);
+ cit_write_reg(gspca_dev, 0x001c, 0x0127);
+ cit_write_reg(gspca_dev, 0x00eb, 0x012e);
+ cit_write_reg(gspca_dev, 0xaa28, 0x0124);
+ cit_write_reg(gspca_dev, 0x00aa, 0x012d);
+ cit_write_reg(gspca_dev, 0x0032, 0x012f);
+ cit_write_reg(gspca_dev, 0xd141, 0x0124);
+ cit_write_reg(gspca_dev, 0x0000, 0x0127);
+ cit_write_reg(gspca_dev, 0x00aa, 0x0130);
+ cit_write_reg(gspca_dev, 0x82a8, 0x0124);
+ cit_write_reg(gspca_dev, 0x0036, 0x012d);
+ cit_write_reg(gspca_dev, 0x0008, 0x012f);
+ cit_write_reg(gspca_dev, 0xd145, 0x0124);
+ cit_write_reg(gspca_dev, 0xfffa, 0x0124);
+ cit_write_reg(gspca_dev, 0x00aa, 0x012d);
+ cit_write_reg(gspca_dev, 0x001e, 0x012f);
+ cit_write_reg(gspca_dev, 0xd141, 0x0124);
+ cit_write_reg(gspca_dev, 0x0017, 0x0127);
+ cit_write_reg(gspca_dev, 0x0013, 0x012e);
+ cit_write_reg(gspca_dev, 0x0031, 0x0130);
+ cit_write_reg(gspca_dev, 0x8a28, 0x0124);
+ cit_write_reg(gspca_dev, 0x0017, 0x012d);
+ cit_write_reg(gspca_dev, 0x0078, 0x012f);
+ cit_write_reg(gspca_dev, 0xd145, 0x0124);
+ cit_write_reg(gspca_dev, 0x0000, 0x0127);
+ cit_write_reg(gspca_dev, 0xfea8, 0x0124);
+ sd->sof_len = 2;
+ break;
+ case 352: /* 352x288 */
+ cit_write_reg(gspca_dev, 0x0070, 0x0119);
+ cit_write_reg(gspca_dev, 0x00c0, 0x0111);
+ cit_write_reg(gspca_dev, 0x0039, 0x010a);
+ cit_write_reg(gspca_dev, 0x0001, 0x0102);
+ cit_write_reg(gspca_dev, 0x002c, 0x0103);
+ cit_write_reg(gspca_dev, 0x0000, 0x0104);
+ cit_write_reg(gspca_dev, 0x0024, 0x0105);
+ cit_write_reg(gspca_dev, 0x00aa, 0x012d);
+ cit_write_reg(gspca_dev, 0x0016, 0x012f);
+ cit_write_reg(gspca_dev, 0xd141, 0x0124);
+ cit_write_reg(gspca_dev, 0x0006, 0x0127);
+ cit_write_reg(gspca_dev, 0x00aa, 0x0130);
+ cit_write_reg(gspca_dev, 0x82a8, 0x0124);
+ cit_write_reg(gspca_dev, 0x0014, 0x012d);
+ cit_write_reg(gspca_dev, 0x0002, 0x012f);
+ cit_write_reg(gspca_dev, 0xd145, 0x0124);
+ cit_write_reg(gspca_dev, 0x00aa, 0x012e);
+ cit_write_reg(gspca_dev, 0x001a, 0x0130);
+ cit_write_reg(gspca_dev, 0x8a0a, 0x0124);
+ cit_write_reg(gspca_dev, 0x005e, 0x012d);
+ cit_write_reg(gspca_dev, 0x9545, 0x0124);
+ cit_write_reg(gspca_dev, 0x00aa, 0x0127);
+ cit_write_reg(gspca_dev, 0x0018, 0x012e);
+ cit_write_reg(gspca_dev, 0x0049, 0x0130);
+ cit_write_reg(gspca_dev, 0x8a28, 0x0124);
+ cit_write_reg(gspca_dev, 0x00aa, 0x012f);
+ cit_write_reg(gspca_dev, 0xd055, 0x0124);
+ cit_write_reg(gspca_dev, 0x001c, 0x0127);
+ cit_write_reg(gspca_dev, 0x00cf, 0x012e);
+ cit_write_reg(gspca_dev, 0xaa28, 0x0124);
+ cit_write_reg(gspca_dev, 0x00aa, 0x012d);
+ cit_write_reg(gspca_dev, 0x0032, 0x012f);
+ cit_write_reg(gspca_dev, 0xd141, 0x0124);
+ cit_write_reg(gspca_dev, 0x0000, 0x0127);
+ cit_write_reg(gspca_dev, 0x00aa, 0x0130);
+ cit_write_reg(gspca_dev, 0x82a8, 0x0124);
+ cit_write_reg(gspca_dev, 0x0036, 0x012d);
+ cit_write_reg(gspca_dev, 0x0008, 0x012f);
+ cit_write_reg(gspca_dev, 0xd145, 0x0124);
+ cit_write_reg(gspca_dev, 0xfffa, 0x0124);
+ cit_write_reg(gspca_dev, 0x00aa, 0x012d);
+ cit_write_reg(gspca_dev, 0x001e, 0x012f);
+ cit_write_reg(gspca_dev, 0xd141, 0x0124);
+ cit_write_reg(gspca_dev, 0x0010, 0x0127);
+ cit_write_reg(gspca_dev, 0x0013, 0x012e);
+ cit_write_reg(gspca_dev, 0x0025, 0x0130);
+ cit_write_reg(gspca_dev, 0x8a28, 0x0124);
+ cit_write_reg(gspca_dev, 0x0010, 0x012d);
+ cit_write_reg(gspca_dev, 0x0048, 0x012f);
+ cit_write_reg(gspca_dev, 0xd145, 0x0124);
+ cit_write_reg(gspca_dev, 0x0000, 0x0127);
+ cit_write_reg(gspca_dev, 0xfea8, 0x0124);
+ sd->sof_len = 2;
+ break;
+ }
+
+ cit_model4_Packet1(gspca_dev, 0x0038, 0x0004);
+
+ return 0;
+}
+
+static int cit_start_ibm_netcam_pro(struct gspca_dev *gspca_dev)
+{
+ const unsigned short compression = 0; /* 0=none, 7=best frame rate */
+ int i, clock_div;
+
+ clock_div = cit_get_clock_div(gspca_dev);
+ if (clock_div < 0)
+ return clock_div;
+
+ cit_write_reg(gspca_dev, 0x0003, 0x0133);
+ cit_write_reg(gspca_dev, 0x0000, 0x0117);
+ cit_write_reg(gspca_dev, 0x0008, 0x0123);
+ cit_write_reg(gspca_dev, 0x0000, 0x0100);
+ cit_write_reg(gspca_dev, 0x0060, 0x0116);
+ /* cit_write_reg(gspca_dev, 0x0002, 0x0112); see sd_stop0 */
+ cit_write_reg(gspca_dev, 0x0000, 0x0133);
+ cit_write_reg(gspca_dev, 0x0000, 0x0123);
+ cit_write_reg(gspca_dev, 0x0001, 0x0117);
+ cit_write_reg(gspca_dev, 0x0040, 0x0108);
+ cit_write_reg(gspca_dev, 0x0019, 0x012c);
+ cit_write_reg(gspca_dev, 0x0060, 0x0116);
+ /* cit_write_reg(gspca_dev, 0x000b, 0x0115); see sd_stop0 */
+
+ cit_model3_Packet1(gspca_dev, 0x0049, 0x0000);
+
+ cit_write_reg(gspca_dev, 0x0000, 0x0101); /* Same on 160x120, 320x240 */
+ cit_write_reg(gspca_dev, 0x003a, 0x0102); /* Hstart */
+ cit_write_reg(gspca_dev, 0x00a0, 0x0103); /* Same on 160x120, 320x240 */
+ cit_write_reg(gspca_dev, 0x0078, 0x0105); /* Same on 160x120, 320x240 */
+ cit_write_reg(gspca_dev, 0x0000, 0x010a); /* Same */
+ cit_write_reg(gspca_dev, 0x0002, 0x011d); /* Same on 160x120, 320x240 */
+ cit_write_reg(gspca_dev, 0x0000, 0x0129); /* Same */
+ cit_write_reg(gspca_dev, 0x00fc, 0x012b); /* Same */
+ cit_write_reg(gspca_dev, 0x0022, 0x012a); /* Same */
+
+ switch (gspca_dev->width) {
+ case 160: /* 160x120 */
+ cit_write_reg(gspca_dev, 0x0024, 0x010b);
+ cit_write_reg(gspca_dev, 0x0089, 0x0119);
+ cit_write_reg(gspca_dev, 0x000a, 0x011b);
+ cit_write_reg(gspca_dev, 0x0003, 0x011e);
+ cit_write_reg(gspca_dev, 0x0007, 0x0104);
+ cit_write_reg(gspca_dev, 0x0009, 0x011a);
+ cit_write_reg(gspca_dev, 0x008b, 0x011c);
+ cit_write_reg(gspca_dev, 0x0008, 0x0118);
+ cit_write_reg(gspca_dev, 0x0000, 0x0132);
+ break;
+ case 320: /* 320x240 */
+ cit_write_reg(gspca_dev, 0x0028, 0x010b);
+ cit_write_reg(gspca_dev, 0x00d9, 0x0119);
+ cit_write_reg(gspca_dev, 0x0006, 0x011b);
+ cit_write_reg(gspca_dev, 0x0000, 0x011e);
+ cit_write_reg(gspca_dev, 0x000e, 0x0104);
+ cit_write_reg(gspca_dev, 0x0004, 0x011a);
+ cit_write_reg(gspca_dev, 0x003f, 0x011c);
+ cit_write_reg(gspca_dev, 0x000c, 0x0118);
+ cit_write_reg(gspca_dev, 0x0000, 0x0132);
+ break;
+ }
+
+ cit_model3_Packet1(gspca_dev, 0x0019, 0x0031);
+ cit_model3_Packet1(gspca_dev, 0x001a, 0x0003);
+ cit_model3_Packet1(gspca_dev, 0x001b, 0x0038);
+ cit_model3_Packet1(gspca_dev, 0x001c, 0x0000);
+ cit_model3_Packet1(gspca_dev, 0x0024, 0x0001);
+ cit_model3_Packet1(gspca_dev, 0x0027, 0x0001);
+ cit_model3_Packet1(gspca_dev, 0x002a, 0x0004);
+ cit_model3_Packet1(gspca_dev, 0x0035, 0x000b);
+ cit_model3_Packet1(gspca_dev, 0x003f, 0x0001);
+ cit_model3_Packet1(gspca_dev, 0x0044, 0x0000);
+ cit_model3_Packet1(gspca_dev, 0x0054, 0x0000);
+ cit_model3_Packet1(gspca_dev, 0x00c4, 0x0000);
+ cit_model3_Packet1(gspca_dev, 0x00e7, 0x0001);
+ cit_model3_Packet1(gspca_dev, 0x00e9, 0x0001);
+ cit_model3_Packet1(gspca_dev, 0x00ee, 0x0000);
+ cit_model3_Packet1(gspca_dev, 0x00f3, 0x00c0);
+
+ cit_write_reg(gspca_dev, compression, 0x0109);
+ cit_write_reg(gspca_dev, clock_div, 0x0111);
+
+/* if (sd->input_index) { */
+ if (rca_input) {
+ for (i = 0; i < ARRAY_SIZE(rca_initdata); i++) {
+ if (rca_initdata[i][0])
+ cit_read_reg(gspca_dev, rca_initdata[i][2]);
+ else
+ cit_write_reg(gspca_dev, rca_initdata[i][1],
+ rca_initdata[i][2]);
+ }
+ }
+
+ return 0;
+}
+
+/* -- start the camera -- */
+static int sd_start(struct gspca_dev *gspca_dev)
+{
+ struct sd *sd = (struct sd *) gspca_dev;
+ int packet_size;
+
+ packet_size = cit_get_packet_size(gspca_dev);
+ if (packet_size < 0)
+ return packet_size;
+
+ switch (sd->model) {
+ case CIT_MODEL0:
+ cit_start_model0(gspca_dev);
+ break;
+ case CIT_MODEL1:
+ cit_start_model1(gspca_dev);
+ break;
+ case CIT_MODEL2:
+ cit_start_model2(gspca_dev);
+ break;
+ case CIT_MODEL3:
+ cit_start_model3(gspca_dev);
+ break;
+ case CIT_MODEL4:
+ cit_start_model4(gspca_dev);
+ break;
+ case CIT_IBM_NETCAM_PRO:
+ cit_start_ibm_netcam_pro(gspca_dev);
+ break;
+ }
+
+ cit_set_brightness(gspca_dev);
+ cit_set_contrast(gspca_dev);
+ cit_set_hue(gspca_dev);
+ cit_set_sharpness(gspca_dev);
+ cit_set_lighting(gspca_dev);
+ cit_set_hflip(gspca_dev);
+
+ /* Program max isoc packet size */
+ cit_write_reg(gspca_dev, packet_size >> 8, 0x0106);
+ cit_write_reg(gspca_dev, packet_size & 0xff, 0x0107);
+
+ cit_restart_stream(gspca_dev);
+
+ return 0;
+}
+
+static int sd_isoc_nego(struct gspca_dev *gspca_dev)
+{
+ int ret, packet_size;
+ struct usb_host_interface *alt;
+
+ alt = &gspca_dev->dev->config->intf_cache[0]->altsetting[1];
+ packet_size = le16_to_cpu(alt->endpoint[0].desc.wMaxPacketSize);
+ packet_size -= 100;
+ if (packet_size < 300)
+ return -EIO;
+ alt->endpoint[0].desc.wMaxPacketSize = cpu_to_le16(packet_size);
+
+ ret = usb_set_interface(gspca_dev->dev, gspca_dev->iface, 1);
+ if (ret < 0)
+ err("set alt 1 err %d", ret);
+
+ return ret;
+}
+
+static void sd_stopN(struct gspca_dev *gspca_dev)
+{
+ cit_write_reg(gspca_dev, 0x0000, 0x010c);
+}
+
+static void sd_stop0(struct gspca_dev *gspca_dev)
+{
+ struct sd *sd = (struct sd *) gspca_dev;
+ struct usb_host_interface *alt;
+
+ /* We cannot use gspca_dev->present here as that is not set when
+ sd_init gets called and we get called from sd_init */
+ if (!gspca_dev->dev)
+ return;
+
+ alt = &gspca_dev->dev->config->intf_cache[0]->altsetting[1];
+
+ switch (sd->model) {
+ case CIT_MODEL0:
+ /* HDG windows does this, but it causes the cams autogain to
+ restart from a gain of 0, which does not look good when
+ changing resolutions. */
+ /* cit_write_reg(gspca_dev, 0x0000, 0x0112); */
+ cit_write_reg(gspca_dev, 0x00c0, 0x0100); /* LED Off */
+ break;
+ case CIT_MODEL1:
+ cit_send_FF_04_02(gspca_dev);
+ cit_read_reg(gspca_dev, 0x0100);
+ cit_write_reg(gspca_dev, 0x81, 0x0100); /* LED Off */
+ break;
+ case CIT_MODEL2:
+ case CIT_MODEL4:
+ cit_model2_Packet1(gspca_dev, 0x0030, 0x0004);
+
+ cit_write_reg(gspca_dev, 0x0080, 0x0100); /* LED Off */
+ cit_write_reg(gspca_dev, 0x0020, 0x0111);
+ cit_write_reg(gspca_dev, 0x00a0, 0x0111);
+
+ cit_model2_Packet1(gspca_dev, 0x0030, 0x0002);
+
+ cit_write_reg(gspca_dev, 0x0020, 0x0111);
+ cit_write_reg(gspca_dev, 0x0000, 0x0112);
+ break;
+ case CIT_MODEL3:
+ cit_write_reg(gspca_dev, 0x0006, 0x012c);
+ cit_model3_Packet1(gspca_dev, 0x0046, 0x0000);
+ cit_read_reg(gspca_dev, 0x0116);
+ cit_write_reg(gspca_dev, 0x0064, 0x0116);
+ cit_read_reg(gspca_dev, 0x0115);
+ cit_write_reg(gspca_dev, 0x0003, 0x0115);
+ cit_write_reg(gspca_dev, 0x0008, 0x0123);
+ cit_write_reg(gspca_dev, 0x0000, 0x0117);
+ cit_write_reg(gspca_dev, 0x0000, 0x0112);
+ cit_write_reg(gspca_dev, 0x0080, 0x0100);
+ break;
+ case CIT_IBM_NETCAM_PRO:
+ cit_model3_Packet1(gspca_dev, 0x0049, 0x00ff);
+ cit_write_reg(gspca_dev, 0x0006, 0x012c);
+ cit_write_reg(gspca_dev, 0x0000, 0x0116);
+ /* HDG windows does this, but I cannot get the camera
+ to restart with this without redoing the entire init
+ sequence which makes switching modes really slow */
+ /* cit_write_reg(gspca_dev, 0x0006, 0x0115); */
+ cit_write_reg(gspca_dev, 0x0008, 0x0123);
+ cit_write_reg(gspca_dev, 0x0000, 0x0117);
+ cit_write_reg(gspca_dev, 0x0003, 0x0133);
+ cit_write_reg(gspca_dev, 0x0000, 0x0111);
+ /* HDG windows does this, but I get a green picture when
+ restarting the stream after this */
+ /* cit_write_reg(gspca_dev, 0x0000, 0x0112); */
+ cit_write_reg(gspca_dev, 0x00c0, 0x0100);
+
+ /* Start isoc bandwidth "negotiation" at max isoc bandwith
+ next stream start */
+ alt->endpoint[0].desc.wMaxPacketSize = cpu_to_le16(1022);
+ break;
+ }
+}
+
+static u8 *cit_find_sof(struct gspca_dev *gspca_dev, u8 *data, int len)
+{
+ struct sd *sd = (struct sd *) gspca_dev;
+ u8 byte3 = 0, byte4 = 0;
+ int i;
+
+ switch (sd->model) {
+ case CIT_MODEL0:
+ case CIT_MODEL1:
+ case CIT_MODEL3:
+ case CIT_IBM_NETCAM_PRO:
+ switch (gspca_dev->width) {
+ case 160: /* 160x120 */
+ byte3 = 0x02;
+ byte4 = 0x0a;
+ break;
+ case 176: /* 176x144 */
+ byte3 = 0x02;
+ byte4 = 0x0e;
+ break;
+ case 320: /* 320x240 */
+ byte3 = 0x02;
+ byte4 = 0x08;
+ break;
+ case 352: /* 352x288 */
+ byte3 = 0x02;
+ byte4 = 0x00;
+ break;
+ case 640:
+ byte3 = 0x03;
+ byte4 = 0x08;
+ break;
+ }
+
+ /* These have a different byte3 */
+ if (sd->model <= CIT_MODEL1)
+ byte3 = 0x00;
+
+ for (i = 0; i < len; i++) {
+ /* For this model the SOF always starts at offset 0
+ so no need to search the entire frame */
+ if (sd->model == CIT_MODEL0 && sd->sof_read != i)
+ break;
+
+ switch (sd->sof_read) {
+ case 0:
+ if (data[i] == 0x00)
+ sd->sof_read++;
+ break;
+ case 1:
+ if (data[i] == 0xff)
+ sd->sof_read++;
+ else if (data[i] == 0x00)
+ sd->sof_read = 1;
+ else
+ sd->sof_read = 0;
+ break;
+ case 2:
+ if (data[i] == byte3)
+ sd->sof_read++;
+ else if (data[i] == 0x00)
+ sd->sof_read = 1;
+ else
+ sd->sof_read = 0;
+ break;
+ case 3:
+ if (data[i] == byte4) {
+ sd->sof_read = 0;
+ return data + i + (sd->sof_len - 3);
+ }
+ if (byte3 == 0x00 && data[i] == 0xff)
+ sd->sof_read = 2;
+ else if (data[i] == 0x00)
+ sd->sof_read = 1;
+ else
+ sd->sof_read = 0;
+ break;
+ }
+ }
+ break;
+ case CIT_MODEL2:
+ case CIT_MODEL4:
+ /* TESTME we need to find a longer sof signature to avoid
+ false positives */
+ for (i = 0; i < len; i++) {
+ switch (sd->sof_read) {
+ case 0:
+ if (data[i] == 0x00)
+ sd->sof_read++;
+ break;
+ case 1:
+ sd->sof_read = 0;
+ if (data[i] == 0xff) {
+ if (i >= 4)
+ PDEBUG(D_FRAM,
+ "header found at offset: %d: %02x %02x 00 %02x %02x %02x\n",
+ i - 1,
+ data[i - 4],
+ data[i - 3],
+ data[i],
+ data[i + 1],
+ data[i + 2]);
+ else
+ PDEBUG(D_FRAM,
+ "header found at offset: %d: 00 %02x %02x %02x\n",
+ i - 1,
+ data[i],
+ data[i + 1],
+ data[i + 2]);
+ return data + i + (sd->sof_len - 1);
+ }
+ break;
+ }
+ }
+ break;
+ }
+ return NULL;
+}
+
+static void sd_pkt_scan(struct gspca_dev *gspca_dev,
+ u8 *data, int len)
+{
+ struct sd *sd = (struct sd *) gspca_dev;
+ unsigned char *sof;
+
+ sof = cit_find_sof(gspca_dev, data, len);
+ if (sof) {
+ int n;
+
+ /* finish decoding current frame */
+ n = sof - data;
+ if (n > sd->sof_len)
+ n -= sd->sof_len;
+ else
+ n = 0;
+ gspca_frame_add(gspca_dev, LAST_PACKET,
+ data, n);
+ gspca_frame_add(gspca_dev, FIRST_PACKET, NULL, 0);
+ len -= sof - data;
+ data = sof;
+ }
+
+ gspca_frame_add(gspca_dev, INTER_PACKET, data, len);
+}
+
+static int sd_setbrightness(struct gspca_dev *gspca_dev, __s32 val)
+{
+ struct sd *sd = (struct sd *) gspca_dev;
+
+ sd->brightness = val;
+ if (gspca_dev->streaming) {
+ if (sd->stop_on_control_change)
+ sd_stopN(gspca_dev);
+ cit_set_brightness(gspca_dev);
+ if (sd->stop_on_control_change)
+ cit_restart_stream(gspca_dev);
+ }
+
+ return 0;
+}
+
+static int sd_getbrightness(struct gspca_dev *gspca_dev, __s32 *val)
+{
+ struct sd *sd = (struct sd *) gspca_dev;
+
+ *val = sd->brightness;
+
+ return 0;
+}
+
+static int sd_setcontrast(struct gspca_dev *gspca_dev, __s32 val)
+{
+ struct sd *sd = (struct sd *) gspca_dev;
+
+ sd->contrast = val;
+ if (gspca_dev->streaming) {
+ if (sd->stop_on_control_change)
+ sd_stopN(gspca_dev);
+ cit_set_contrast(gspca_dev);
+ if (sd->stop_on_control_change)
+ cit_restart_stream(gspca_dev);
+ }
+
+ return 0;
+}
+
+static int sd_getcontrast(struct gspca_dev *gspca_dev, __s32 *val)
+{
+ struct sd *sd = (struct sd *) gspca_dev;
+
+ *val = sd->contrast;
+
+ return 0;
+}
+
+static int sd_sethue(struct gspca_dev *gspca_dev, __s32 val)
+{
+ struct sd *sd = (struct sd *) gspca_dev;
+
+ sd->hue = val;
+ if (gspca_dev->streaming) {
+ if (sd->stop_on_control_change)
+ sd_stopN(gspca_dev);
+ cit_set_hue(gspca_dev);
+ if (sd->stop_on_control_change)
+ cit_restart_stream(gspca_dev);
+ }
+ return 0;
+}
+
+static int sd_gethue(struct gspca_dev *gspca_dev, __s32 *val)
+{
+ struct sd *sd = (struct sd *) gspca_dev;
+
+ *val = sd->hue;
+
+ return 0;
+}
+
+static int sd_setsharpness(struct gspca_dev *gspca_dev, __s32 val)
+{
+ struct sd *sd = (struct sd *) gspca_dev;
+
+ sd->sharpness = val;
+ if (gspca_dev->streaming) {
+ if (sd->stop_on_control_change)
+ sd_stopN(gspca_dev);
+ cit_set_sharpness(gspca_dev);
+ if (sd->stop_on_control_change)
+ cit_restart_stream(gspca_dev);
+ }
+ return 0;
+}
+
+static int sd_getsharpness(struct gspca_dev *gspca_dev, __s32 *val)
+{
+ struct sd *sd = (struct sd *) gspca_dev;
+
+ *val = sd->sharpness;
+
+ return 0;
+}
+
+static int sd_setlighting(struct gspca_dev *gspca_dev, __s32 val)
+{
+ struct sd *sd = (struct sd *) gspca_dev;
+
+ sd->lighting = val;
+ if (gspca_dev->streaming) {
+ if (sd->stop_on_control_change)
+ sd_stopN(gspca_dev);
+ cit_set_lighting(gspca_dev);
+ if (sd->stop_on_control_change)
+ cit_restart_stream(gspca_dev);
+ }
+ return 0;
+}
+
+static int sd_getlighting(struct gspca_dev *gspca_dev, __s32 *val)
+{
+ struct sd *sd = (struct sd *) gspca_dev;
+
+ *val = sd->lighting;
+
+ return 0;
+}
+
+static int sd_sethflip(struct gspca_dev *gspca_dev, __s32 val)
+{
+ struct sd *sd = (struct sd *) gspca_dev;
+
+ sd->hflip = val;
+ if (gspca_dev->streaming) {
+ if (sd->stop_on_control_change)
+ sd_stopN(gspca_dev);
+ cit_set_hflip(gspca_dev);
+ if (sd->stop_on_control_change)
+ cit_restart_stream(gspca_dev);
+ }
+ return 0;
+}
+
+static int sd_gethflip(struct gspca_dev *gspca_dev, __s32 *val)
+{
+ struct sd *sd = (struct sd *) gspca_dev;
+
+ *val = sd->hflip;
+
+ return 0;
+}
+
+
+/* sub-driver description */
+static const struct sd_desc sd_desc = {
+ .name = MODULE_NAME,
+ .ctrls = sd_ctrls,
+ .nctrls = ARRAY_SIZE(sd_ctrls),
+ .config = sd_config,
+ .init = sd_init,
+ .start = sd_start,
+ .stopN = sd_stopN,
+ .stop0 = sd_stop0,
+ .pkt_scan = sd_pkt_scan,
+};
+
+static const struct sd_desc sd_desc_isoc_nego = {
+ .name = MODULE_NAME,
+ .ctrls = sd_ctrls,
+ .nctrls = ARRAY_SIZE(sd_ctrls),
+ .config = sd_config,
+ .init = sd_init,
+ .start = sd_start,
+ .isoc_nego = sd_isoc_nego,
+ .stopN = sd_stopN,
+ .stop0 = sd_stop0,
+ .pkt_scan = sd_pkt_scan,
+};
+
+/* -- module initialisation -- */
+static const __devinitdata struct usb_device_id device_table[] = {
+ { USB_DEVICE_VER(0x0545, 0x8080, 0x0001, 0x0001), .driver_info = CIT_MODEL0 },
+ { USB_DEVICE_VER(0x0545, 0x8080, 0x0002, 0x0002), .driver_info = CIT_MODEL1 },
+ { USB_DEVICE_VER(0x0545, 0x8080, 0x030a, 0x030a), .driver_info = CIT_MODEL2 },
+ { USB_DEVICE_VER(0x0545, 0x8080, 0x0301, 0x0301), .driver_info = CIT_MODEL3 },
+ { USB_DEVICE_VER(0x0545, 0x8002, 0x030a, 0x030a), .driver_info = CIT_MODEL4 },
+ { USB_DEVICE_VER(0x0545, 0x800c, 0x030a, 0x030a), .driver_info = CIT_MODEL2 },
+ { USB_DEVICE_VER(0x0545, 0x800d, 0x030a, 0x030a), .driver_info = CIT_MODEL4 },
+ {}
+};
+MODULE_DEVICE_TABLE(usb, device_table);
+
+/* -- device connect -- */
+static int sd_probe(struct usb_interface *intf,
+ const struct usb_device_id *id)
+{
+ const struct sd_desc *desc = &sd_desc;
+
+ switch (id->driver_info) {
+ case CIT_MODEL0:
+ case CIT_MODEL1:
+ if (intf->cur_altsetting->desc.bInterfaceNumber != 2)
+ return -ENODEV;
+ break;
+ case CIT_MODEL2:
+ case CIT_MODEL4:
+ if (intf->cur_altsetting->desc.bInterfaceNumber != 0)
+ return -ENODEV;
+ break;
+ case CIT_MODEL3:
+ if (intf->cur_altsetting->desc.bInterfaceNumber != 0)
+ return -ENODEV;
+ /* FIXME this likely applies to all model3 cams and probably
+ to other models too. */
+ if (ibm_netcam_pro)
+ desc = &sd_desc_isoc_nego;
+ break;
+ }
+
+ return gspca_dev_probe2(intf, id, desc, sizeof(struct sd), THIS_MODULE);
+}
+
+static struct usb_driver sd_driver = {
+ .name = MODULE_NAME,
+ .id_table = device_table,
+ .probe = sd_probe,
+ .disconnect = gspca_disconnect,
+#ifdef CONFIG_PM
+ .suspend = gspca_suspend,
+ .resume = gspca_resume,
+#endif
+};
+
+/* -- module insert / remove -- */
+static int __init sd_mod_init(void)
+{
+ return usb_register(&sd_driver);
+}
+static void __exit sd_mod_exit(void)
+{
+ usb_deregister(&sd_driver);
+}
+
+module_init(sd_mod_init);
+module_exit(sd_mod_exit);
diff --git a/drivers/media/video/gspca/zc3xx.c b/drivers/media/video/gspca/zc3xx.c
index 0666038a51b0..c7e1970ca284 100644
--- a/drivers/media/video/gspca/zc3xx.c
+++ b/drivers/media/video/gspca/zc3xx.c
@@ -21,9 +21,7 @@
#define MODULE_NAME "zc3xx"
-#ifdef CONFIG_INPUT
#include <linux/input.h>
-#endif
#include "gspca.h"
#include "jpeg.h"
@@ -2953,7 +2951,7 @@ static const struct usb_action mc501cb_Initial[] = {
{}
};
-static const struct usb_action mc501cb_InitialScale[] = { /* 320x240 */
+static const struct usb_action mc501cb_InitialScale[] = { /* 320x240 */
{0xa0, 0x01, ZC3XX_R000_SYSTEMCONTROL}, /* 00,00,01,cc */
{0xa0, 0x10, ZC3XX_R002_CLOCKSELECT}, /* 00,02,10,cc */
{0xa0, 0x01, ZC3XX_R010_CMOSSENSORSELECT}, /* 00,10,01,cc */
@@ -3731,7 +3729,6 @@ static const struct usb_action pas106b_InitialScale[] = { /* 176x144 */
{0xaa, 0x0d, 0x0000},
{0xaa, 0x0e, 0x0002},
{0xaa, 0x14, 0x0081},
-
/* Other registers */
{0xa0, 0x37, ZC3XX_R101_SENSORCORRECTION},
/* Frame retreiving */
@@ -3785,7 +3782,6 @@ static const struct usb_action pas106b_InitialScale[] = { /* 176x144 */
{0xa0, 0x05, ZC3XX_R185_WINYWIDTH},
{0xa0, 0x14, ZC3XX_R186_WINYCENTER},
{0xa0, 0x00, ZC3XX_R180_AUTOCORRECTENABLE},
-
/* Auto exposure and white balance */
{0xa0, 0x00, ZC3XX_R190_EXPOSURELIMITHIGH},
{0xa0, 0x03, ZC3XX_R191_EXPOSURELIMITMID},
@@ -3849,7 +3845,6 @@ static const struct usb_action pas106b_Initial[] = { /* 352x288 */
{0xaa, 0x0d, 0x0000},
{0xaa, 0x0e, 0x0002},
{0xaa, 0x14, 0x0081},
-
/* Other registers */
{0xa0, 0x37, ZC3XX_R101_SENSORCORRECTION},
/* Frame retreiving */
@@ -5698,7 +5693,7 @@ static u8 reg_r_i(struct gspca_dev *gspca_dev,
index, gspca_dev->usb_buf, 1,
500);
if (ret < 0) {
- PDEBUG(D_ERR, "reg_r_i err %d", ret);
+ err("reg_r_i err %d", ret);
gspca_dev->usb_err = ret;
return 0;
}
@@ -5730,7 +5725,7 @@ static void reg_w_i(struct gspca_dev *gspca_dev,
value, index, NULL, 0,
500);
if (ret < 0) {
- PDEBUG(D_ERR, "reg_w_i err %d", ret);
+ err("reg_w_i err %d", ret);
gspca_dev->usb_err = ret;
}
}
@@ -6309,8 +6304,7 @@ static int vga_3wr_probe(struct gspca_dev *gspca_dev)
if (chipset_revision_sensor[i].revision == retword) {
sd->chip_revision = retword;
send_unknown(gspca_dev, SENSOR_PB0330);
- return chipset_revision_sensor[i]
- .internal_sensor_id;
+ return chipset_revision_sensor[i].internal_sensor_id;
}
}
@@ -6503,8 +6497,7 @@ static int sd_init(struct gspca_dev *gspca_dev)
PDEBUG(D_PROBE, "Sensor Tas5130 (VF0250)");
break;
default:
- PDEBUG(D_PROBE,
- "Unknown sensor - set to TAS5130C");
+ warn("Unknown sensor - set to TAS5130C");
sd->sensor = SENSOR_TAS5130C;
}
break;
@@ -6610,7 +6603,7 @@ static int sd_init(struct gspca_dev *gspca_dev)
sd->sensor = SENSOR_OV7620; /* same sensor (?) */
break;
default:
- PDEBUG(D_ERR|D_PROBE, "Unknown sensor %04x", sensor);
+ err("Unknown sensor %04x", sensor);
return -EINVAL;
}
}
@@ -6790,7 +6783,7 @@ static int sd_start(struct gspca_dev *gspca_dev)
/* fall thru */
case SENSOR_PAS202B:
case SENSOR_PO2030:
-/* reg_w(gspca_dev, 0x40, ZC3XX_R117_GGAIN); * (from win traces) */
+/* reg_w(gspca_dev, 0x40, ZC3XX_R117_GGAIN); in win traces */
reg_r(gspca_dev, 0x0180);
break;
case SENSOR_OV7620:
@@ -6798,7 +6791,7 @@ static int sd_start(struct gspca_dev *gspca_dev)
reg_w(gspca_dev, 0x15, 0x01ae);
i2c_read(gspca_dev, 0x13); /*fixme: returns 0xa3 */
i2c_write(gspca_dev, 0x13, 0xa3, 0x00);
- /*fixme: returned value to send? */
+ /*fixme: returned value to send? */
reg_w(gspca_dev, 0x40, 0x0117);
reg_r(gspca_dev, 0x0180);
break;
@@ -6841,7 +6834,7 @@ static void sd_pkt_scan(struct gspca_dev *gspca_dev,
/* remove the webcam's header:
* ff d8 ff fe 00 0e 00 00 ss ss 00 01 ww ww hh hh pp pp
* - 'ss ss' is the frame sequence number (BE)
- * - 'ww ww' and 'hh hh' are the window dimensions (BE)
+ * - 'ww ww' and 'hh hh' are the window dimensions (BE)
* - 'pp pp' is the packet sequence number (BE)
*/
data += 18;
@@ -7007,7 +7000,7 @@ static int sd_get_jcomp(struct gspca_dev *gspca_dev,
return 0;
}
-#ifdef CONFIG_INPUT
+#if defined(CONFIG_INPUT) || defined(CONFIG_INPUT_MODULE)
static int sd_int_pkt_scan(struct gspca_dev *gspca_dev,
u8 *data, /* interrupt packet data */
int len) /* interrput packet length */
@@ -7035,7 +7028,7 @@ static const struct sd_desc sd_desc = {
.querymenu = sd_querymenu,
.get_jcomp = sd_get_jcomp,
.set_jcomp = sd_set_jcomp,
-#ifdef CONFIG_INPUT
+#if defined(CONFIG_INPUT) || defined(CONFIG_INPUT_MODULE)
.int_pkt_scan = sd_int_pkt_scan,
#endif
};
@@ -7120,18 +7113,12 @@ static struct usb_driver sd_driver = {
static int __init sd_mod_init(void)
{
- int ret;
- ret = usb_register(&sd_driver);
- if (ret < 0)
- return ret;
- PDEBUG(D_PROBE, "registered");
- return 0;
+ return usb_register(&sd_driver);
}
static void __exit sd_mod_exit(void)
{
usb_deregister(&sd_driver);
- PDEBUG(D_PROBE, "deregistered");
}
module_init(sd_mod_init);
diff --git a/drivers/media/video/hdpvr/hdpvr-control.c b/drivers/media/video/hdpvr/hdpvr-control.c
index 5a6b78b8d25d..068df4ba3f51 100644
--- a/drivers/media/video/hdpvr/hdpvr-control.c
+++ b/drivers/media/video/hdpvr/hdpvr-control.c
@@ -29,8 +29,6 @@ int hdpvr_config_call(struct hdpvr_device *dev, uint value, u8 valbuf)
int ret;
char request_type = 0x38, snd_request = 0x01;
- msleep(10);
-
mutex_lock(&dev->usbc_mutex);
dev->usbc_buf[0] = valbuf;
ret = usb_control_msg(dev->udev,
@@ -170,8 +168,7 @@ int hdpvr_set_audio(struct hdpvr_device *dev, u8 input,
if (ret == 2)
ret = 0;
} else
- ret = hdpvr_config_call(dev, CTRL_AUDIO_INPUT_VALUE,
- dev->options.audio_input+1);
+ ret = hdpvr_config_call(dev, CTRL_AUDIO_INPUT_VALUE, input);
error:
return ret;
}
diff --git a/drivers/media/video/hdpvr/hdpvr-core.c b/drivers/media/video/hdpvr/hdpvr-core.c
index 0cae5b82e1a2..b70d6afc9fec 100644
--- a/drivers/media/video/hdpvr/hdpvr-core.c
+++ b/drivers/media/video/hdpvr/hdpvr-core.c
@@ -60,6 +60,7 @@ static struct usb_device_id hdpvr_table[] = {
{ USB_DEVICE(HD_PVR_VENDOR_ID, HD_PVR_PRODUCT_ID1) },
{ USB_DEVICE(HD_PVR_VENDOR_ID, HD_PVR_PRODUCT_ID2) },
{ USB_DEVICE(HD_PVR_VENDOR_ID, HD_PVR_PRODUCT_ID3) },
+ { USB_DEVICE(HD_PVR_VENDOR_ID, HD_PVR_PRODUCT_ID4) },
{ } /* Terminating entry */
};
MODULE_DEVICE_TABLE(usb, hdpvr_table);
@@ -152,19 +153,26 @@ static int device_authorization(struct hdpvr_device *dev)
ret, print_buf);
}
#endif
- if (dev->usbc_buf[1] == HDPVR_FIRMWARE_VERSION) {
+
+ v4l2_info(&dev->v4l2_dev, "firmware version 0x%x dated %s\n",
+ dev->usbc_buf[1], &dev->usbc_buf[2]);
+
+ switch (dev->usbc_buf[1]) {
+ case HDPVR_FIRMWARE_VERSION:
dev->flags &= ~HDPVR_FLAG_AC3_CAP;
- } else if (dev->usbc_buf[1] == HDPVR_FIRMWARE_VERSION_AC3) {
- dev->flags |= HDPVR_FLAG_AC3_CAP;
- } else if (dev->usbc_buf[1] > HDPVR_FIRMWARE_VERSION_AC3) {
- v4l2_info(&dev->v4l2_dev, "untested firmware version 0x%x, "
- "the driver might not work\n", dev->usbc_buf[1]);
+ break;
+ case HDPVR_FIRMWARE_VERSION_AC3:
+ case HDPVR_FIRMWARE_VERSION_0X12:
+ case HDPVR_FIRMWARE_VERSION_0X15:
dev->flags |= HDPVR_FLAG_AC3_CAP;
- } else {
- v4l2_err(&dev->v4l2_dev, "unknown firmware version 0x%x\n",
- dev->usbc_buf[1]);
- ret = -EINVAL;
- goto unlock;
+ break;
+ default:
+ v4l2_info(&dev->v4l2_dev, "untested firmware, the driver might"
+ " not work.\n");
+ if (dev->usbc_buf[1] >= HDPVR_FIRMWARE_VERSION_AC3)
+ dev->flags |= HDPVR_FLAG_AC3_CAP;
+ else
+ dev->flags &= ~HDPVR_FLAG_AC3_CAP;
}
response = dev->usbc_buf+38;
@@ -319,8 +327,12 @@ static int hdpvr_probe(struct usb_interface *interface,
if (default_video_input < HDPVR_VIDEO_INPUTS)
dev->options.video_input = default_video_input;
- if (default_audio_input < HDPVR_AUDIO_INPUTS)
+ if (default_audio_input < HDPVR_AUDIO_INPUTS) {
dev->options.audio_input = default_audio_input;
+ if (default_audio_input == HDPVR_SPDIF)
+ dev->options.audio_codec =
+ V4L2_MPEG_AUDIO_ENCODING_AC3;
+ }
dev->udev = usb_get_dev(interface_to_usbdev(interface));
diff --git a/drivers/media/video/hdpvr/hdpvr-i2c.c b/drivers/media/video/hdpvr/hdpvr-i2c.c
index 463b81bef6e2..409de11096d4 100644
--- a/drivers/media/video/hdpvr/hdpvr-i2c.c
+++ b/drivers/media/video/hdpvr/hdpvr-i2c.c
@@ -127,7 +127,6 @@ int hdpvr_register_i2c_adapter(struct hdpvr_device *dev)
strlcpy(i2c_adap->name, "Hauppauge HD PVR I2C",
sizeof(i2c_adap->name));
i2c_adap->algo = &hdpvr_algo;
- i2c_adap->class = I2C_CLASS_TV_ANALOG;
i2c_adap->owner = THIS_MODULE;
i2c_adap->dev.parent = &dev->udev->dev;
diff --git a/drivers/media/video/hdpvr/hdpvr-video.c b/drivers/media/video/hdpvr/hdpvr-video.c
index 4863a21b1f24..d38fe1043e47 100644
--- a/drivers/media/video/hdpvr/hdpvr-video.c
+++ b/drivers/media/video/hdpvr/hdpvr-video.c
@@ -26,7 +26,7 @@
#include <media/v4l2-ioctl.h>
#include "hdpvr.h"
-#define BULK_URB_TIMEOUT 1250 /* 1.25 seconds */
+#define BULK_URB_TIMEOUT 90 /* 0.09 seconds */
#define print_buffer_status() { \
v4l2_dbg(MSG_BUFFER, hdpvr_debug, &dev->v4l2_dev, \
@@ -157,6 +157,7 @@ int hdpvr_alloc_buffers(struct hdpvr_device *dev, uint count)
mem, dev->bulk_in_size,
hdpvr_read_bulk_callback, buf);
+ buf->urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
buf->status = BUFSTAT_AVAILABLE;
list_add_tail(&buf->buff_list, &dev->free_buff_list);
}
@@ -337,8 +338,6 @@ static int hdpvr_stop_streaming(struct hdpvr_device *dev)
dev->bulk_in_endpointAddr),
buf, dev->bulk_in_size, &actual_length,
BULK_URB_TIMEOUT)) {
- /* wait */
- msleep(5);
v4l2_dbg(MSG_BUFFER, hdpvr_debug, &dev->v4l2_dev,
"%2d: got %d bytes\n", c, actual_length);
}
diff --git a/drivers/media/video/hdpvr/hdpvr.h b/drivers/media/video/hdpvr/hdpvr.h
index b0f046df3cd8..5efc963f9164 100644
--- a/drivers/media/video/hdpvr/hdpvr.h
+++ b/drivers/media/video/hdpvr/hdpvr.h
@@ -30,14 +30,17 @@
#define HD_PVR_PRODUCT_ID 0x4900
#define HD_PVR_PRODUCT_ID1 0x4901
#define HD_PVR_PRODUCT_ID2 0x4902
+#define HD_PVR_PRODUCT_ID4 0x4903
#define HD_PVR_PRODUCT_ID3 0x4982
#define UNSET (-1U)
#define NUM_BUFFERS 64
-#define HDPVR_FIRMWARE_VERSION 0x8
-#define HDPVR_FIRMWARE_VERSION_AC3 0xd
+#define HDPVR_FIRMWARE_VERSION 0x08
+#define HDPVR_FIRMWARE_VERSION_AC3 0x0d
+#define HDPVR_FIRMWARE_VERSION_0X12 0x12
+#define HDPVR_FIRMWARE_VERSION_0X15 0x15
/* #define HDPVR_DEBUG */
diff --git a/drivers/media/video/hexium_gemini.c b/drivers/media/video/hexium_gemini.c
index ad2c232baa6d..7ae96367b3ab 100644
--- a/drivers/media/video/hexium_gemini.c
+++ b/drivers/media/video/hexium_gemini.c
@@ -367,7 +367,6 @@ static int hexium_attach(struct saa7146_dev *dev, struct saa7146_pci_extension_d
saa7146_write(dev, MC1, (MASK_08 | MASK_24 | MASK_10 | MASK_26));
hexium->i2c_adapter = (struct i2c_adapter) {
- .class = I2C_CLASS_TV_ANALOG,
.name = "hexium gemini",
};
saa7146_i2c_adapter_prepare(dev, &hexium->i2c_adapter, SAA7146_I2C_BUS_BIT_RATE_480);
diff --git a/drivers/media/video/hexium_orion.c b/drivers/media/video/hexium_orion.c
index 938a1f8f880a..b72d0f0b8310 100644
--- a/drivers/media/video/hexium_orion.c
+++ b/drivers/media/video/hexium_orion.c
@@ -230,7 +230,6 @@ static int hexium_probe(struct saa7146_dev *dev)
saa7146_write(dev, MC2, (MASK_09 | MASK_25 | MASK_10 | MASK_26));
hexium->i2c_adapter = (struct i2c_adapter) {
- .class = I2C_CLASS_TV_ANALOG,
.name = "hexium orion",
};
saa7146_i2c_adapter_prepare(dev, &hexium->i2c_adapter, SAA7146_I2C_BUS_BIT_RATE_480);
diff --git a/drivers/media/video/imx074.c b/drivers/media/video/imx074.c
new file mode 100644
index 000000000000..27b5dfdfbb93
--- /dev/null
+++ b/drivers/media/video/imx074.c
@@ -0,0 +1,506 @@
+/*
+ * Driver for IMX074 CMOS Image Sensor from Sony
+ *
+ * Copyright (C) 2010, Guennadi Liakhovetski <g.liakhovetski@gmx.de>
+ *
+ * Partially inspired by the IMX074 driver from the Android / MSM tree
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/delay.h>
+#include <linux/i2c.h>
+#include <linux/slab.h>
+#include <linux/videodev2.h>
+
+#include <media/soc_camera.h>
+#include <media/soc_mediabus.h>
+#include <media/v4l2-subdev.h>
+#include <media/v4l2-chip-ident.h>
+
+/* IMX074 registers */
+
+#define MODE_SELECT 0x0100
+#define IMAGE_ORIENTATION 0x0101
+#define GROUPED_PARAMETER_HOLD 0x0104
+
+/* Integration Time */
+#define COARSE_INTEGRATION_TIME_HI 0x0202
+#define COARSE_INTEGRATION_TIME_LO 0x0203
+/* Gain */
+#define ANALOGUE_GAIN_CODE_GLOBAL_HI 0x0204
+#define ANALOGUE_GAIN_CODE_GLOBAL_LO 0x0205
+
+/* PLL registers */
+#define PRE_PLL_CLK_DIV 0x0305
+#define PLL_MULTIPLIER 0x0307
+#define PLSTATIM 0x302b
+#define VNDMY_ABLMGSHLMT 0x300a
+#define Y_OPBADDR_START_DI 0x3014
+/* mode setting */
+#define FRAME_LENGTH_LINES_HI 0x0340
+#define FRAME_LENGTH_LINES_LO 0x0341
+#define LINE_LENGTH_PCK_HI 0x0342
+#define LINE_LENGTH_PCK_LO 0x0343
+#define YADDR_START 0x0347
+#define YADDR_END 0x034b
+#define X_OUTPUT_SIZE_MSB 0x034c
+#define X_OUTPUT_SIZE_LSB 0x034d
+#define Y_OUTPUT_SIZE_MSB 0x034e
+#define Y_OUTPUT_SIZE_LSB 0x034f
+#define X_EVEN_INC 0x0381
+#define X_ODD_INC 0x0383
+#define Y_EVEN_INC 0x0385
+#define Y_ODD_INC 0x0387
+
+#define HMODEADD 0x3001
+#define VMODEADD 0x3016
+#define VAPPLINE_START 0x3069
+#define VAPPLINE_END 0x306b
+#define SHUTTER 0x3086
+#define HADDAVE 0x30e8
+#define LANESEL 0x3301
+
+/* IMX074 supported geometry */
+#define IMX074_WIDTH 1052
+#define IMX074_HEIGHT 780
+
+/* IMX074 has only one fixed colorspace per pixelcode */
+struct imx074_datafmt {
+ enum v4l2_mbus_pixelcode code;
+ enum v4l2_colorspace colorspace;
+};
+
+struct imx074 {
+ struct v4l2_subdev subdev;
+ const struct imx074_datafmt *fmt;
+};
+
+static const struct imx074_datafmt imx074_colour_fmts[] = {
+ {V4L2_MBUS_FMT_SBGGR8_1X8, V4L2_COLORSPACE_SRGB},
+};
+
+static struct imx074 *to_imx074(const struct i2c_client *client)
+{
+ return container_of(i2c_get_clientdata(client), struct imx074, subdev);
+}
+
+/* Find a data format by a pixel code in an array */
+static const struct imx074_datafmt *imx074_find_datafmt(enum v4l2_mbus_pixelcode code)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(imx074_colour_fmts); i++)
+ if (imx074_colour_fmts[i].code == code)
+ return imx074_colour_fmts + i;
+
+ return NULL;
+}
+
+static int reg_write(struct i2c_client *client, const u16 addr, const u8 data)
+{
+ struct i2c_adapter *adap = client->adapter;
+ struct i2c_msg msg;
+ unsigned char tx[3];
+ int ret;
+
+ msg.addr = client->addr;
+ msg.buf = tx;
+ msg.len = 3;
+ msg.flags = 0;
+
+ tx[0] = addr >> 8;
+ tx[1] = addr & 0xff;
+ tx[2] = data;
+
+ ret = i2c_transfer(adap, &msg, 1);
+
+ mdelay(2);
+
+ return ret == 1 ? 0 : -EIO;
+}
+
+static int reg_read(struct i2c_client *client, const u16 addr)
+{
+ u8 buf[2] = {addr >> 8, addr & 0xff};
+ int ret;
+ struct i2c_msg msgs[] = {
+ {
+ .addr = client->addr,
+ .flags = 0,
+ .len = 2,
+ .buf = buf,
+ }, {
+ .addr = client->addr,
+ .flags = I2C_M_RD,
+ .len = 2,
+ .buf = buf,
+ },
+ };
+
+ ret = i2c_transfer(client->adapter, msgs, ARRAY_SIZE(msgs));
+ if (ret < 0) {
+ dev_warn(&client->dev, "Reading register %x from %x failed\n",
+ addr, client->addr);
+ return ret;
+ }
+
+ return buf[0] & 0xff; /* no sign-extension */
+}
+
+static int imx074_try_fmt(struct v4l2_subdev *sd,
+ struct v4l2_mbus_framefmt *mf)
+{
+ const struct imx074_datafmt *fmt = imx074_find_datafmt(mf->code);
+
+ dev_dbg(sd->v4l2_dev->dev, "%s(%u)\n", __func__, mf->code);
+
+ if (!fmt) {
+ mf->code = imx074_colour_fmts[0].code;
+ mf->colorspace = imx074_colour_fmts[0].colorspace;
+ }
+
+ mf->width = IMX074_WIDTH;
+ mf->height = IMX074_HEIGHT;
+ mf->field = V4L2_FIELD_NONE;
+
+ return 0;
+}
+
+static int imx074_s_fmt(struct v4l2_subdev *sd,
+ struct v4l2_mbus_framefmt *mf)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ struct imx074 *priv = to_imx074(client);
+
+ dev_dbg(sd->v4l2_dev->dev, "%s(%u)\n", __func__, mf->code);
+
+ /* MIPI CSI could have changed the format, double-check */
+ if (!imx074_find_datafmt(mf->code))
+ return -EINVAL;
+
+ imx074_try_fmt(sd, mf);
+
+ priv->fmt = imx074_find_datafmt(mf->code);
+
+ return 0;
+}
+
+static int imx074_g_fmt(struct v4l2_subdev *sd,
+ struct v4l2_mbus_framefmt *mf)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ struct imx074 *priv = to_imx074(client);
+
+ const struct imx074_datafmt *fmt = priv->fmt;
+
+ mf->code = fmt->code;
+ mf->colorspace = fmt->colorspace;
+ mf->width = IMX074_WIDTH;
+ mf->height = IMX074_HEIGHT;
+ mf->field = V4L2_FIELD_NONE;
+
+ return 0;
+}
+
+static int imx074_g_crop(struct v4l2_subdev *sd, struct v4l2_crop *a)
+{
+ struct v4l2_rect *rect = &a->c;
+
+ a->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+ rect->top = 0;
+ rect->left = 0;
+ rect->width = IMX074_WIDTH;
+ rect->height = IMX074_HEIGHT;
+
+ return 0;
+}
+
+static int imx074_cropcap(struct v4l2_subdev *sd, struct v4l2_cropcap *a)
+{
+ a->bounds.left = 0;
+ a->bounds.top = 0;
+ a->bounds.width = IMX074_WIDTH;
+ a->bounds.height = IMX074_HEIGHT;
+ a->defrect = a->bounds;
+ a->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+ a->pixelaspect.numerator = 1;
+ a->pixelaspect.denominator = 1;
+
+ return 0;
+}
+
+static int imx074_enum_fmt(struct v4l2_subdev *sd, unsigned int index,
+ enum v4l2_mbus_pixelcode *code)
+{
+ if ((unsigned int)index >= ARRAY_SIZE(imx074_colour_fmts))
+ return -EINVAL;
+
+ *code = imx074_colour_fmts[index].code;
+ return 0;
+}
+
+static int imx074_s_stream(struct v4l2_subdev *sd, int enable)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+
+ /* MODE_SELECT: stream or standby */
+ return reg_write(client, MODE_SELECT, !!enable);
+}
+
+static int imx074_g_chip_ident(struct v4l2_subdev *sd,
+ struct v4l2_dbg_chip_ident *id)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+
+ if (id->match.type != V4L2_CHIP_MATCH_I2C_ADDR)
+ return -EINVAL;
+
+ if (id->match.addr != client->addr)
+ return -ENODEV;
+
+ id->ident = V4L2_IDENT_IMX074;
+ id->revision = 0;
+
+ return 0;
+}
+
+static struct v4l2_subdev_video_ops imx074_subdev_video_ops = {
+ .s_stream = imx074_s_stream,
+ .s_mbus_fmt = imx074_s_fmt,
+ .g_mbus_fmt = imx074_g_fmt,
+ .try_mbus_fmt = imx074_try_fmt,
+ .enum_mbus_fmt = imx074_enum_fmt,
+ .g_crop = imx074_g_crop,
+ .cropcap = imx074_cropcap,
+};
+
+static struct v4l2_subdev_core_ops imx074_subdev_core_ops = {
+ .g_chip_ident = imx074_g_chip_ident,
+};
+
+static struct v4l2_subdev_ops imx074_subdev_ops = {
+ .core = &imx074_subdev_core_ops,
+ .video = &imx074_subdev_video_ops,
+};
+
+/*
+ * We have to provide soc-camera operations, but we don't have anything to say
+ * there. The MIPI CSI2 driver will provide .query_bus_param and .set_bus_param
+ */
+static unsigned long imx074_query_bus_param(struct soc_camera_device *icd)
+{
+ return 0;
+}
+
+static int imx074_set_bus_param(struct soc_camera_device *icd,
+ unsigned long flags)
+{
+ return -1;
+}
+
+static struct soc_camera_ops imx074_ops = {
+ .query_bus_param = imx074_query_bus_param,
+ .set_bus_param = imx074_set_bus_param,
+};
+
+static int imx074_video_probe(struct soc_camera_device *icd,
+ struct i2c_client *client)
+{
+ int ret;
+ u16 id;
+
+ /* Read sensor Model ID */
+ ret = reg_read(client, 0);
+ if (ret < 0)
+ return ret;
+
+ id = ret << 8;
+
+ ret = reg_read(client, 1);
+ if (ret < 0)
+ return ret;
+
+ id |= ret;
+
+ dev_info(&client->dev, "Chip ID 0x%04x detected\n", id);
+
+ if (id != 0x74)
+ return -ENODEV;
+
+ /* PLL Setting EXTCLK=24MHz, 22.5times */
+ reg_write(client, PLL_MULTIPLIER, 0x2D);
+ reg_write(client, PRE_PLL_CLK_DIV, 0x02);
+ reg_write(client, PLSTATIM, 0x4B);
+
+ /* 2-lane mode */
+ reg_write(client, 0x3024, 0x00);
+
+ reg_write(client, IMAGE_ORIENTATION, 0x00);
+
+ /* select RAW mode:
+ * 0x08+0x08 = top 8 bits
+ * 0x0a+0x08 = compressed 8-bits
+ * 0x0a+0x0a = 10 bits
+ */
+ reg_write(client, 0x0112, 0x08);
+ reg_write(client, 0x0113, 0x08);
+
+ /* Base setting for High frame mode */
+ reg_write(client, VNDMY_ABLMGSHLMT, 0x80);
+ reg_write(client, Y_OPBADDR_START_DI, 0x08);
+ reg_write(client, 0x3015, 0x37);
+ reg_write(client, 0x301C, 0x01);
+ reg_write(client, 0x302C, 0x05);
+ reg_write(client, 0x3031, 0x26);
+ reg_write(client, 0x3041, 0x60);
+ reg_write(client, 0x3051, 0x24);
+ reg_write(client, 0x3053, 0x34);
+ reg_write(client, 0x3057, 0xC0);
+ reg_write(client, 0x305C, 0x09);
+ reg_write(client, 0x305D, 0x07);
+ reg_write(client, 0x3060, 0x30);
+ reg_write(client, 0x3065, 0x00);
+ reg_write(client, 0x30AA, 0x08);
+ reg_write(client, 0x30AB, 0x1C);
+ reg_write(client, 0x30B0, 0x32);
+ reg_write(client, 0x30B2, 0x83);
+ reg_write(client, 0x30D3, 0x04);
+ reg_write(client, 0x3106, 0x78);
+ reg_write(client, 0x310C, 0x82);
+ reg_write(client, 0x3304, 0x05);
+ reg_write(client, 0x3305, 0x04);
+ reg_write(client, 0x3306, 0x11);
+ reg_write(client, 0x3307, 0x02);
+ reg_write(client, 0x3308, 0x0C);
+ reg_write(client, 0x3309, 0x06);
+ reg_write(client, 0x330A, 0x08);
+ reg_write(client, 0x330B, 0x04);
+ reg_write(client, 0x330C, 0x08);
+ reg_write(client, 0x330D, 0x06);
+ reg_write(client, 0x330E, 0x01);
+ reg_write(client, 0x3381, 0x00);
+
+ /* V : 1/2V-addition (1,3), H : 1/2H-averaging (1,3) -> Full HD */
+ /* 1608 = 1560 + 48 (black lines) */
+ reg_write(client, FRAME_LENGTH_LINES_HI, 0x06);
+ reg_write(client, FRAME_LENGTH_LINES_LO, 0x48);
+ reg_write(client, YADDR_START, 0x00);
+ reg_write(client, YADDR_END, 0x2F);
+ /* 0x838 == 2104 */
+ reg_write(client, X_OUTPUT_SIZE_MSB, 0x08);
+ reg_write(client, X_OUTPUT_SIZE_LSB, 0x38);
+ /* 0x618 == 1560 */
+ reg_write(client, Y_OUTPUT_SIZE_MSB, 0x06);
+ reg_write(client, Y_OUTPUT_SIZE_LSB, 0x18);
+ reg_write(client, X_EVEN_INC, 0x01);
+ reg_write(client, X_ODD_INC, 0x03);
+ reg_write(client, Y_EVEN_INC, 0x01);
+ reg_write(client, Y_ODD_INC, 0x03);
+ reg_write(client, HMODEADD, 0x00);
+ reg_write(client, VMODEADD, 0x16);
+ reg_write(client, VAPPLINE_START, 0x24);
+ reg_write(client, VAPPLINE_END, 0x53);
+ reg_write(client, SHUTTER, 0x00);
+ reg_write(client, HADDAVE, 0x80);
+
+ reg_write(client, LANESEL, 0x00);
+
+ reg_write(client, GROUPED_PARAMETER_HOLD, 0x00); /* off */
+
+ return 0;
+}
+
+static int imx074_probe(struct i2c_client *client,
+ const struct i2c_device_id *did)
+{
+ struct imx074 *priv;
+ struct soc_camera_device *icd = client->dev.platform_data;
+ struct i2c_adapter *adapter = to_i2c_adapter(client->dev.parent);
+ struct soc_camera_link *icl;
+ int ret;
+
+ if (!icd) {
+ dev_err(&client->dev, "IMX074: missing soc-camera data!\n");
+ return -EINVAL;
+ }
+
+ icl = to_soc_camera_link(icd);
+ if (!icl) {
+ dev_err(&client->dev, "IMX074: missing platform data!\n");
+ return -EINVAL;
+ }
+
+ if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA)) {
+ dev_warn(&adapter->dev,
+ "I2C-Adapter doesn't support I2C_FUNC_SMBUS_BYTE\n");
+ return -EIO;
+ }
+
+ priv = kzalloc(sizeof(struct imx074), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ v4l2_i2c_subdev_init(&priv->subdev, client, &imx074_subdev_ops);
+
+ icd->ops = &imx074_ops;
+ priv->fmt = &imx074_colour_fmts[0];
+
+ ret = imx074_video_probe(icd, client);
+ if (ret < 0) {
+ icd->ops = NULL;
+ kfree(priv);
+ return ret;
+ }
+
+ return ret;
+}
+
+static int imx074_remove(struct i2c_client *client)
+{
+ struct imx074 *priv = to_imx074(client);
+ struct soc_camera_device *icd = client->dev.platform_data;
+ struct soc_camera_link *icl = to_soc_camera_link(icd);
+
+ icd->ops = NULL;
+ if (icl->free_bus)
+ icl->free_bus(icl);
+ client->driver = NULL;
+ kfree(priv);
+
+ return 0;
+}
+
+static const struct i2c_device_id imx074_id[] = {
+ { "imx074", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, imx074_id);
+
+static struct i2c_driver imx074_i2c_driver = {
+ .driver = {
+ .name = "imx074",
+ },
+ .probe = imx074_probe,
+ .remove = imx074_remove,
+ .id_table = imx074_id,
+};
+
+static int __init imx074_mod_init(void)
+{
+ return i2c_add_driver(&imx074_i2c_driver);
+}
+
+static void __exit imx074_mod_exit(void)
+{
+ i2c_del_driver(&imx074_i2c_driver);
+}
+
+module_init(imx074_mod_init);
+module_exit(imx074_mod_exit);
+
+MODULE_DESCRIPTION("Sony IMX074 Camera driver");
+MODULE_AUTHOR("Guennadi Liakhovetski <g.liakhovetski@gmx.de>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/video/indycam.c b/drivers/media/video/indycam.c
index 3d6940163b12..e5ed4db32e7b 100644
--- a/drivers/media/video/indycam.c
+++ b/drivers/media/video/indycam.c
@@ -24,7 +24,6 @@
#include <linux/i2c.h>
#include <media/v4l2-device.h>
#include <media/v4l2-chip-ident.h>
-#include <media/v4l2-i2c-drv.h>
#include "indycam.h"
@@ -378,9 +377,25 @@ static const struct i2c_device_id indycam_id[] = {
};
MODULE_DEVICE_TABLE(i2c, indycam_id);
-static struct v4l2_i2c_driver_data v4l2_i2c_data = {
- .name = "indycam",
- .probe = indycam_probe,
- .remove = indycam_remove,
- .id_table = indycam_id,
+static struct i2c_driver indycam_driver = {
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = "indycam",
+ },
+ .probe = indycam_probe,
+ .remove = indycam_remove,
+ .id_table = indycam_id,
};
+
+static __init int init_indycam(void)
+{
+ return i2c_add_driver(&indycam_driver);
+}
+
+static __exit void exit_indycam(void)
+{
+ i2c_del_driver(&indycam_driver);
+}
+
+module_init(init_indycam);
+module_exit(exit_indycam);
diff --git a/drivers/media/video/ir-kbd-i2c.c b/drivers/media/video/ir-kbd-i2c.c
index 27ae8bbfb477..ce4a75375909 100644
--- a/drivers/media/video/ir-kbd-i2c.c
+++ b/drivers/media/video/ir-kbd-i2c.c
@@ -44,7 +44,6 @@
#include <linux/errno.h>
#include <linux/slab.h>
#include <linux/i2c.h>
-#include <linux/i2c-id.h>
#include <linux/workqueue.h>
#include <media/ir-core.h>
@@ -146,26 +145,6 @@ static int get_key_pixelview(struct IR_i2c *ir, u32 *ir_key, u32 *ir_raw)
return 1;
}
-static int get_key_pv951(struct IR_i2c *ir, u32 *ir_key, u32 *ir_raw)
-{
- unsigned char b;
-
- /* poll IR chip */
- if (1 != i2c_master_recv(ir->c, &b, 1)) {
- dprintk(1,"read error\n");
- return -EIO;
- }
-
- /* ignore 0xaa */
- if (b==0xaa)
- return 0;
- dprintk(2,"key %02x\n", b);
-
- *ir_key = b;
- *ir_raw = b;
- return 1;
-}
-
static int get_key_fusionhdtv(struct IR_i2c *ir, u32 *ir_key, u32 *ir_raw)
{
unsigned char buf[4];
@@ -279,15 +258,9 @@ static void ir_key_poll(struct IR_i2c *ir)
static void ir_work(struct work_struct *work)
{
struct IR_i2c *ir = container_of(work, struct IR_i2c, work.work);
- int polling_interval = 100;
-
- /* MSI TV@nywhere Plus requires more frequent polling
- otherwise it will miss some keypresses */
- if (ir->c->adapter->id == I2C_HW_SAA7134 && ir->c->addr == 0x30)
- polling_interval = 50;
ir_key_poll(ir);
- schedule_delayed_work(&ir->work, msecs_to_jiffies(polling_interval));
+ schedule_delayed_work(&ir->work, msecs_to_jiffies(ir->polling_interval));
}
/* ----------------------------------------------------------------------- */
@@ -312,6 +285,7 @@ static int ir_probe(struct i2c_client *client, const struct i2c_device_id *id)
ir->c = client;
ir->input = input_dev;
+ ir->polling_interval = DEFAULT_POLLING_INTERVAL;
i2c_set_clientdata(client, ir);
switch(addr) {
@@ -321,12 +295,6 @@ static int ir_probe(struct i2c_client *client, const struct i2c_device_id *id)
ir_type = IR_TYPE_OTHER;
ir_codes = RC_MAP_EMPTY;
break;
- case 0x4b:
- name = "PV951";
- ir->get_key = get_key_pv951;
- ir_type = IR_TYPE_OTHER;
- ir_codes = RC_MAP_PV951;
- break;
case 0x18:
case 0x1f:
case 0x1a:
@@ -351,27 +319,6 @@ static int ir_probe(struct i2c_client *client, const struct i2c_device_id *id)
ir_type = IR_TYPE_RC5;
ir_codes = RC_MAP_FUSIONHDTV_MCE;
break;
- case 0x0b:
- case 0x47:
- case 0x71:
- if (adap->id == I2C_HW_B_CX2388x ||
- adap->id == I2C_HW_B_CX2341X) {
- /* Handled by cx88-input */
- name = adap->id == I2C_HW_B_CX2341X ? "CX2341x remote"
- : "CX2388x remote";
- ir_type = IR_TYPE_RC5;
- ir->get_key = get_key_haup_xvr;
- if (hauppauge == 1) {
- ir_codes = RC_MAP_HAUPPAUGE_NEW;
- } else {
- ir_codes = RC_MAP_RC5_TV;
- }
- } else {
- /* Handled by saa7134-input */
- name = "SAA713x remote";
- ir_type = IR_TYPE_OTHER;
- }
- break;
case 0x40:
name = "AVerMedia Cardbus remote";
ir->get_key = get_key_avermedia_cardbus;
@@ -390,6 +337,9 @@ static int ir_probe(struct i2c_client *client, const struct i2c_device_id *id)
if (init_data->type)
ir_type = init_data->type;
+ if (init_data->polling_interval)
+ ir->polling_interval = init_data->polling_interval;
+
switch (init_data->internal_get_key_func) {
case IR_KBD_GET_KEY_CUSTOM:
/* The bridge driver provided us its own function */
@@ -398,9 +348,6 @@ static int ir_probe(struct i2c_client *client, const struct i2c_device_id *id)
case IR_KBD_GET_KEY_PIXELVIEW:
ir->get_key = get_key_pixelview;
break;
- case IR_KBD_GET_KEY_PV951:
- ir->get_key = get_key_pv951;
- break;
case IR_KBD_GET_KEY_HAUP:
ir->get_key = get_key_haup;
break;
diff --git a/drivers/media/video/ivtv/ivtv-driver.h b/drivers/media/video/ivtv/ivtv-driver.h
index 75803141481e..04bacdbd10bb 100644
--- a/drivers/media/video/ivtv/ivtv-driver.h
+++ b/drivers/media/video/ivtv/ivtv-driver.h
@@ -811,15 +811,23 @@ static inline int ivtv_raw_vbi(const struct ivtv *itv)
/* Call the specified callback for all subdevs matching hw (if 0, then
match them all). Ignore any errors. */
#define ivtv_call_hw(itv, hw, o, f, args...) \
- __v4l2_device_call_subdevs(&(itv)->v4l2_dev, !(hw) || (sd->grp_id & (hw)), o, f , ##args)
+ do { \
+ struct v4l2_subdev *__sd; \
+ __v4l2_device_call_subdevs_p(&(itv)->v4l2_dev, __sd, \
+ !(hw) || (__sd->grp_id & (hw)), o, f , ##args); \
+ } while (0)
#define ivtv_call_all(itv, o, f, args...) ivtv_call_hw(itv, 0, o, f , ##args)
/* Call the specified callback for all subdevs matching hw (if 0, then
match them all). If the callback returns an error other than 0 or
-ENOIOCTLCMD, then return with that error code. */
-#define ivtv_call_hw_err(itv, hw, o, f, args...) \
- __v4l2_device_call_subdevs_until_err(&(itv)->v4l2_dev, !(hw) || (sd->grp_id & (hw)), o, f , ##args)
+#define ivtv_call_hw_err(itv, hw, o, f, args...) \
+({ \
+ struct v4l2_subdev *__sd; \
+ __v4l2_device_call_subdevs_until_err_p(&(itv)->v4l2_dev, __sd, \
+ !(hw) || (__sd->grp_id & (hw)), o, f , ##args); \
+})
#define ivtv_call_all_err(itv, o, f, args...) ivtv_call_hw_err(itv, 0, o, f , ##args)
diff --git a/drivers/media/video/ivtv/ivtv-i2c.c b/drivers/media/video/ivtv/ivtv-i2c.c
index a74fa099c565..665191c9b407 100644
--- a/drivers/media/video/ivtv/ivtv-i2c.c
+++ b/drivers/media/video/ivtv/ivtv-i2c.c
@@ -121,31 +121,6 @@ static const u8 hw_addrs[] = {
};
/* This array should match the IVTV_HW_ defines */
-static const char *hw_modules[] = {
- "cx25840",
- "saa7115",
- "saa7127",
- "msp3400",
- "tuner",
- "wm8775",
- "cs53l32a",
- NULL,
- "saa7115",
- "upd64031a",
- "upd64083",
- "saa717x",
- "wm8739",
- "vp27smpx",
- "m52790",
- NULL,
- NULL, /* IVTV_HW_I2C_IR_RX_AVER */
- NULL, /* IVTV_HW_I2C_IR_RX_HAUP_EXT */
- NULL, /* IVTV_HW_I2C_IR_RX_HAUP_INT */
- NULL, /* IVTV_HW_Z8F0811_IR_TX_HAUP */
- NULL, /* IVTV_HW_Z8F0811_IR_RX_HAUP */
-};
-
-/* This array should match the IVTV_HW_ defines */
static const char * const hw_devicenames[] = {
"cx25840",
"saa7115",
@@ -257,7 +232,6 @@ int ivtv_i2c_register(struct ivtv *itv, unsigned idx)
{
struct v4l2_subdev *sd;
struct i2c_adapter *adap = &itv->i2c_adap;
- const char *mod = hw_modules[idx];
const char *type = hw_devicenames[idx];
u32 hw = 1 << idx;
@@ -265,19 +239,16 @@ int ivtv_i2c_register(struct ivtv *itv, unsigned idx)
return -1;
if (hw == IVTV_HW_TUNER) {
/* special tuner handling */
- sd = v4l2_i2c_new_subdev(&itv->v4l2_dev,
- adap, mod, type,
- 0, itv->card_i2c->radio);
+ sd = v4l2_i2c_new_subdev(&itv->v4l2_dev, adap, type, 0,
+ itv->card_i2c->radio);
if (sd)
sd->grp_id = 1 << idx;
- sd = v4l2_i2c_new_subdev(&itv->v4l2_dev,
- adap, mod, type,
- 0, itv->card_i2c->demod);
+ sd = v4l2_i2c_new_subdev(&itv->v4l2_dev, adap, type, 0,
+ itv->card_i2c->demod);
if (sd)
sd->grp_id = 1 << idx;
- sd = v4l2_i2c_new_subdev(&itv->v4l2_dev,
- adap, mod, type,
- 0, itv->card_i2c->tv);
+ sd = v4l2_i2c_new_subdev(&itv->v4l2_dev, adap, type, 0,
+ itv->card_i2c->tv);
if (sd)
sd->grp_id = 1 << idx;
return sd ? 0 : -1;
@@ -293,16 +264,16 @@ int ivtv_i2c_register(struct ivtv *itv, unsigned idx)
/* It's an I2C device other than an analog tuner or IR chip */
if (hw == IVTV_HW_UPD64031A || hw == IVTV_HW_UPD6408X) {
sd = v4l2_i2c_new_subdev(&itv->v4l2_dev,
- adap, mod, type, 0, I2C_ADDRS(hw_addrs[idx]));
+ adap, type, 0, I2C_ADDRS(hw_addrs[idx]));
} else if (hw == IVTV_HW_CX25840) {
struct cx25840_platform_data pdata;
pdata.pvr150_workaround = itv->pvr150_workaround;
sd = v4l2_i2c_new_subdev_cfg(&itv->v4l2_dev,
- adap, mod, type, 0, &pdata, hw_addrs[idx], NULL);
+ adap, type, 0, &pdata, hw_addrs[idx], NULL);
} else {
sd = v4l2_i2c_new_subdev(&itv->v4l2_dev,
- adap, mod, type, hw_addrs[idx], NULL);
+ adap, type, hw_addrs[idx], NULL);
}
if (sd)
sd->grp_id = 1 << idx;
@@ -706,8 +677,7 @@ int init_ivtv_i2c(struct ivtv *itv)
/* Sanity checks for the I2C hardware arrays. They must be the
* same size.
*/
- if (ARRAY_SIZE(hw_devicenames) != ARRAY_SIZE(hw_addrs) ||
- ARRAY_SIZE(hw_devicenames) != ARRAY_SIZE(hw_modules)) {
+ if (ARRAY_SIZE(hw_devicenames) != ARRAY_SIZE(hw_addrs)) {
IVTV_ERR("Mismatched I2C hardware arrays\n");
return -ENODEV;
}
diff --git a/drivers/media/video/ivtv/ivtv-ioctl.c b/drivers/media/video/ivtv/ivtv-ioctl.c
index 4eed9123683e..b686da5e4326 100644
--- a/drivers/media/video/ivtv/ivtv-ioctl.c
+++ b/drivers/media/video/ivtv/ivtv-ioctl.c
@@ -37,7 +37,6 @@
#include <media/v4l2-chip-ident.h>
#include <media/v4l2-event.h>
#include <linux/dvb/audio.h>
-#include <linux/i2c-id.h>
u16 ivtv_service2vbi(int type)
{
diff --git a/drivers/media/video/ks0127.c b/drivers/media/video/ks0127.c
index 94734828053b..afa91182b448 100644
--- a/drivers/media/video/ks0127.c
+++ b/drivers/media/video/ks0127.c
@@ -43,7 +43,6 @@
#include <linux/slab.h>
#include <media/v4l2-device.h>
#include <media/v4l2-chip-ident.h>
-#include <media/v4l2-i2c-drv.h>
#include "ks0127.h"
MODULE_DESCRIPTION("KS0127 video decoder driver");
@@ -712,9 +711,25 @@ static const struct i2c_device_id ks0127_id[] = {
};
MODULE_DEVICE_TABLE(i2c, ks0127_id);
-static struct v4l2_i2c_driver_data v4l2_i2c_data = {
- .name = "ks0127",
- .probe = ks0127_probe,
- .remove = ks0127_remove,
- .id_table = ks0127_id,
+static struct i2c_driver ks0127_driver = {
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = "ks0127",
+ },
+ .probe = ks0127_probe,
+ .remove = ks0127_remove,
+ .id_table = ks0127_id,
};
+
+static __init int init_ks0127(void)
+{
+ return i2c_add_driver(&ks0127_driver);
+}
+
+static __exit void exit_ks0127(void)
+{
+ i2c_del_driver(&ks0127_driver);
+}
+
+module_init(init_ks0127);
+module_exit(exit_ks0127);
diff --git a/drivers/media/video/m52790.c b/drivers/media/video/m52790.c
index 4491d018eba6..5e1c9a81984c 100644
--- a/drivers/media/video/m52790.c
+++ b/drivers/media/video/m52790.c
@@ -26,12 +26,10 @@
#include <linux/ioctl.h>
#include <asm/uaccess.h>
#include <linux/i2c.h>
-#include <linux/i2c-id.h>
#include <linux/videodev2.h>
#include <media/m52790.h>
#include <media/v4l2-device.h>
#include <media/v4l2-chip-ident.h>
-#include <media/v4l2-i2c-drv.h>
MODULE_DESCRIPTION("i2c device driver for m52790 A/V switch");
MODULE_AUTHOR("Hans Verkuil");
@@ -205,9 +203,25 @@ static const struct i2c_device_id m52790_id[] = {
};
MODULE_DEVICE_TABLE(i2c, m52790_id);
-static struct v4l2_i2c_driver_data v4l2_i2c_data = {
- .name = "m52790",
- .probe = m52790_probe,
- .remove = m52790_remove,
- .id_table = m52790_id,
+static struct i2c_driver m52790_driver = {
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = "m52790",
+ },
+ .probe = m52790_probe,
+ .remove = m52790_remove,
+ .id_table = m52790_id,
};
+
+static __init int init_m52790(void)
+{
+ return i2c_add_driver(&m52790_driver);
+}
+
+static __exit void exit_m52790(void)
+{
+ i2c_del_driver(&m52790_driver);
+}
+
+module_init(init_m52790);
+module_exit(exit_m52790);
diff --git a/drivers/media/video/mem2mem_testdev.c b/drivers/media/video/mem2mem_testdev.c
index a7210d981388..3b19f5b25a72 100644
--- a/drivers/media/video/mem2mem_testdev.c
+++ b/drivers/media/video/mem2mem_testdev.c
@@ -848,7 +848,7 @@ static void queue_init(void *priv, struct videobuf_queue *vq,
videobuf_queue_vmalloc_init(vq, &m2mtest_qops, ctx->dev->v4l2_dev.dev,
&ctx->dev->irqlock, type, V4L2_FIELD_NONE,
- sizeof(struct m2mtest_buffer), priv);
+ sizeof(struct m2mtest_buffer), priv, NULL);
}
diff --git a/drivers/media/video/msp3400-driver.c b/drivers/media/video/msp3400-driver.c
index 0e412131da7c..b1763ac93ab3 100644
--- a/drivers/media/video/msp3400-driver.c
+++ b/drivers/media/video/msp3400-driver.c
@@ -56,7 +56,6 @@
#include <linux/videodev2.h>
#include <media/v4l2-device.h>
#include <media/v4l2-ioctl.h>
-#include <media/v4l2-i2c-drv.h>
#include <media/msp3400.h>
#include <media/tvaudio.h>
#include "msp3400-driver.h"
@@ -382,7 +381,12 @@ static int msp_s_ctrl(struct v4l2_ctrl *ctrl)
void msp_update_volume(struct msp_state *state)
{
- v4l2_ctrl_s_ctrl(state->volume, v4l2_ctrl_g_ctrl(state->volume));
+ /* Force an update of the volume/mute cluster */
+ v4l2_ctrl_lock(state->volume);
+ state->volume->val = state->volume->cur.val;
+ state->muted->val = state->muted->cur.val;
+ msp_s_ctrl(state->volume);
+ v4l2_ctrl_unlock(state->volume);
}
/* --- v4l2 ioctls --- */
@@ -843,15 +847,31 @@ static const struct i2c_device_id msp_id[] = {
};
MODULE_DEVICE_TABLE(i2c, msp_id);
-static struct v4l2_i2c_driver_data v4l2_i2c_data = {
- .name = "msp3400",
- .probe = msp_probe,
- .remove = msp_remove,
- .suspend = msp_suspend,
- .resume = msp_resume,
- .id_table = msp_id,
+static struct i2c_driver msp_driver = {
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = "msp3400",
+ },
+ .probe = msp_probe,
+ .remove = msp_remove,
+ .suspend = msp_suspend,
+ .resume = msp_resume,
+ .id_table = msp_id,
};
+static __init int init_msp(void)
+{
+ return i2c_add_driver(&msp_driver);
+}
+
+static __exit void exit_msp(void)
+{
+ i2c_del_driver(&msp_driver);
+}
+
+module_init(init_msp);
+module_exit(exit_msp);
+
/*
* Overrides for Emacs so that we follow Linus's tabbing style.
* ---------------------------------------------------------------------------
diff --git a/drivers/media/video/mt9m001.c b/drivers/media/video/mt9m001.c
index 79f096ddcf5d..fcb4cd941853 100644
--- a/drivers/media/video/mt9m001.c
+++ b/drivers/media/video/mt9m001.c
@@ -157,7 +157,7 @@ static int mt9m001_init(struct i2c_client *client)
static int mt9m001_s_stream(struct v4l2_subdev *sd, int enable)
{
- struct i2c_client *client = sd->priv;
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
/* Switch to master "normal" mode or stop sensor readout */
if (reg_write(client, MT9M001_OUTPUT_CONTROL, enable ? 2 : 0) < 0)
@@ -206,7 +206,7 @@ static unsigned long mt9m001_query_bus_param(struct soc_camera_device *icd)
static int mt9m001_s_crop(struct v4l2_subdev *sd, struct v4l2_crop *a)
{
- struct i2c_client *client = sd->priv;
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
struct mt9m001 *mt9m001 = to_mt9m001(client);
struct v4l2_rect rect = a->c;
struct soc_camera_device *icd = client->dev.platform_data;
@@ -271,7 +271,7 @@ static int mt9m001_s_crop(struct v4l2_subdev *sd, struct v4l2_crop *a)
static int mt9m001_g_crop(struct v4l2_subdev *sd, struct v4l2_crop *a)
{
- struct i2c_client *client = sd->priv;
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
struct mt9m001 *mt9m001 = to_mt9m001(client);
a->c = mt9m001->rect;
@@ -297,7 +297,7 @@ static int mt9m001_cropcap(struct v4l2_subdev *sd, struct v4l2_cropcap *a)
static int mt9m001_g_fmt(struct v4l2_subdev *sd,
struct v4l2_mbus_framefmt *mf)
{
- struct i2c_client *client = sd->priv;
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
struct mt9m001 *mt9m001 = to_mt9m001(client);
mf->width = mt9m001->rect.width;
@@ -312,7 +312,7 @@ static int mt9m001_g_fmt(struct v4l2_subdev *sd,
static int mt9m001_s_fmt(struct v4l2_subdev *sd,
struct v4l2_mbus_framefmt *mf)
{
- struct i2c_client *client = sd->priv;
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
struct mt9m001 *mt9m001 = to_mt9m001(client);
struct v4l2_crop a = {
.c = {
@@ -340,7 +340,7 @@ static int mt9m001_s_fmt(struct v4l2_subdev *sd,
static int mt9m001_try_fmt(struct v4l2_subdev *sd,
struct v4l2_mbus_framefmt *mf)
{
- struct i2c_client *client = sd->priv;
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
struct mt9m001 *mt9m001 = to_mt9m001(client);
const struct mt9m001_datafmt *fmt;
@@ -367,7 +367,7 @@ static int mt9m001_try_fmt(struct v4l2_subdev *sd,
static int mt9m001_g_chip_ident(struct v4l2_subdev *sd,
struct v4l2_dbg_chip_ident *id)
{
- struct i2c_client *client = sd->priv;
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
struct mt9m001 *mt9m001 = to_mt9m001(client);
if (id->match.type != V4L2_CHIP_MATCH_I2C_ADDR)
@@ -386,7 +386,7 @@ static int mt9m001_g_chip_ident(struct v4l2_subdev *sd,
static int mt9m001_g_register(struct v4l2_subdev *sd,
struct v4l2_dbg_register *reg)
{
- struct i2c_client *client = sd->priv;
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
if (reg->match.type != V4L2_CHIP_MATCH_I2C_ADDR || reg->reg > 0xff)
return -EINVAL;
@@ -406,7 +406,7 @@ static int mt9m001_g_register(struct v4l2_subdev *sd,
static int mt9m001_s_register(struct v4l2_subdev *sd,
struct v4l2_dbg_register *reg)
{
- struct i2c_client *client = sd->priv;
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
if (reg->match.type != V4L2_CHIP_MATCH_I2C_ADDR || reg->reg > 0xff)
return -EINVAL;
@@ -468,7 +468,7 @@ static struct soc_camera_ops mt9m001_ops = {
static int mt9m001_g_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl)
{
- struct i2c_client *client = sd->priv;
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
struct mt9m001 *mt9m001 = to_mt9m001(client);
int data;
@@ -494,7 +494,7 @@ static int mt9m001_g_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl)
static int mt9m001_s_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl)
{
- struct i2c_client *client = sd->priv;
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
struct mt9m001 *mt9m001 = to_mt9m001(client);
struct soc_camera_device *icd = client->dev.platform_data;
const struct v4l2_queryctrl *qctrl;
@@ -683,7 +683,7 @@ static void mt9m001_video_remove(struct soc_camera_device *icd)
static int mt9m001_g_skip_top_lines(struct v4l2_subdev *sd, u32 *lines)
{
- struct i2c_client *client = sd->priv;
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
struct mt9m001 *mt9m001 = to_mt9m001(client);
*lines = mt9m001->y_skip_top;
@@ -704,7 +704,7 @@ static struct v4l2_subdev_core_ops mt9m001_subdev_core_ops = {
static int mt9m001_enum_fmt(struct v4l2_subdev *sd, unsigned int index,
enum v4l2_mbus_pixelcode *code)
{
- struct i2c_client *client = sd->priv;
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
struct mt9m001 *mt9m001 = to_mt9m001(client);
if (index >= mt9m001->num_fmts)
diff --git a/drivers/media/video/mt9m111.c b/drivers/media/video/mt9m111.c
index c71af4e0e517..525a16e73285 100644
--- a/drivers/media/video/mt9m111.c
+++ b/drivers/media/video/mt9m111.c
@@ -100,14 +100,14 @@
#define MT9M111_OUTFMT_BYPASS_IFP (1 << 10)
#define MT9M111_OUTFMT_INV_PIX_CLOCK (1 << 9)
#define MT9M111_OUTFMT_RGB (1 << 8)
-#define MT9M111_OUTFMT_RGB565 (0x0 << 6)
-#define MT9M111_OUTFMT_RGB555 (0x1 << 6)
-#define MT9M111_OUTFMT_RGB444x (0x2 << 6)
-#define MT9M111_OUTFMT_RGBx444 (0x3 << 6)
-#define MT9M111_OUTFMT_TST_RAMP_OFF (0x0 << 4)
-#define MT9M111_OUTFMT_TST_RAMP_COL (0x1 << 4)
-#define MT9M111_OUTFMT_TST_RAMP_ROW (0x2 << 4)
-#define MT9M111_OUTFMT_TST_RAMP_FRAME (0x3 << 4)
+#define MT9M111_OUTFMT_RGB565 (0 << 6)
+#define MT9M111_OUTFMT_RGB555 (1 << 6)
+#define MT9M111_OUTFMT_RGB444x (2 << 6)
+#define MT9M111_OUTFMT_RGBx444 (3 << 6)
+#define MT9M111_OUTFMT_TST_RAMP_OFF (0 << 4)
+#define MT9M111_OUTFMT_TST_RAMP_COL (1 << 4)
+#define MT9M111_OUTFMT_TST_RAMP_ROW (2 << 4)
+#define MT9M111_OUTFMT_TST_RAMP_FRAME (3 << 4)
#define MT9M111_OUTFMT_SHIFT_3_UP (1 << 3)
#define MT9M111_OUTFMT_AVG_CHROMA (1 << 2)
#define MT9M111_OUTFMT_SWAP_YCbCr_C_Y (1 << 1)
@@ -124,7 +124,7 @@
#define reg_clear(reg, val) mt9m111_reg_clear(client, MT9M111_##reg, (val))
#define MT9M111_MIN_DARK_ROWS 8
-#define MT9M111_MIN_DARK_COLS 24
+#define MT9M111_MIN_DARK_COLS 26
#define MT9M111_MAX_HEIGHT 1024
#define MT9M111_MAX_WIDTH 1280
@@ -440,7 +440,7 @@ static int mt9m111_make_rect(struct i2c_client *client,
static int mt9m111_s_crop(struct v4l2_subdev *sd, struct v4l2_crop *a)
{
struct v4l2_rect rect = a->c;
- struct i2c_client *client = sd->priv;
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
struct mt9m111 *mt9m111 = to_mt9m111(client);
int ret;
@@ -458,7 +458,7 @@ static int mt9m111_s_crop(struct v4l2_subdev *sd, struct v4l2_crop *a)
static int mt9m111_g_crop(struct v4l2_subdev *sd, struct v4l2_crop *a)
{
- struct i2c_client *client = sd->priv;
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
struct mt9m111 *mt9m111 = to_mt9m111(client);
a->c = mt9m111->rect;
@@ -486,7 +486,7 @@ static int mt9m111_cropcap(struct v4l2_subdev *sd, struct v4l2_cropcap *a)
static int mt9m111_g_fmt(struct v4l2_subdev *sd,
struct v4l2_mbus_framefmt *mf)
{
- struct i2c_client *client = sd->priv;
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
struct mt9m111 *mt9m111 = to_mt9m111(client);
mf->width = mt9m111->rect.width;
@@ -549,7 +549,7 @@ static int mt9m111_set_pixfmt(struct i2c_client *client,
static int mt9m111_s_fmt(struct v4l2_subdev *sd,
struct v4l2_mbus_framefmt *mf)
{
- struct i2c_client *client = sd->priv;
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
const struct mt9m111_datafmt *fmt;
struct mt9m111 *mt9m111 = to_mt9m111(client);
struct v4l2_rect rect = {
@@ -584,7 +584,7 @@ static int mt9m111_s_fmt(struct v4l2_subdev *sd,
static int mt9m111_try_fmt(struct v4l2_subdev *sd,
struct v4l2_mbus_framefmt *mf)
{
- struct i2c_client *client = sd->priv;
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
struct mt9m111 *mt9m111 = to_mt9m111(client);
const struct mt9m111_datafmt *fmt;
bool bayer = mf->code == V4L2_MBUS_FMT_SBGGR8_1X8 ||
@@ -624,7 +624,7 @@ static int mt9m111_try_fmt(struct v4l2_subdev *sd,
static int mt9m111_g_chip_ident(struct v4l2_subdev *sd,
struct v4l2_dbg_chip_ident *id)
{
- struct i2c_client *client = sd->priv;
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
struct mt9m111 *mt9m111 = to_mt9m111(client);
if (id->match.type != V4L2_CHIP_MATCH_I2C_ADDR)
@@ -643,7 +643,7 @@ static int mt9m111_g_chip_ident(struct v4l2_subdev *sd,
static int mt9m111_g_register(struct v4l2_subdev *sd,
struct v4l2_dbg_register *reg)
{
- struct i2c_client *client = sd->priv;
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
int val;
if (reg->match.type != V4L2_CHIP_MATCH_I2C_ADDR || reg->reg > 0x2ff)
@@ -664,7 +664,7 @@ static int mt9m111_g_register(struct v4l2_subdev *sd,
static int mt9m111_s_register(struct v4l2_subdev *sd,
struct v4l2_dbg_register *reg)
{
- struct i2c_client *client = sd->priv;
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
if (reg->match.type != V4L2_CHIP_MATCH_I2C_ADDR || reg->reg > 0x2ff)
return -EINVAL;
@@ -812,7 +812,7 @@ static int mt9m111_set_autowhitebalance(struct i2c_client *client, int on)
static int mt9m111_g_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl)
{
- struct i2c_client *client = sd->priv;
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
struct mt9m111 *mt9m111 = to_mt9m111(client);
int data;
@@ -855,7 +855,7 @@ static int mt9m111_g_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl)
static int mt9m111_s_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl)
{
- struct i2c_client *client = sd->priv;
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
struct mt9m111 *mt9m111 = to_mt9m111(client);
const struct v4l2_queryctrl *qctrl;
int ret;
diff --git a/drivers/media/video/mt9t031.c b/drivers/media/video/mt9t031.c
index a9a28b214235..9bd44a816ea1 100644
--- a/drivers/media/video/mt9t031.c
+++ b/drivers/media/video/mt9t031.c
@@ -163,7 +163,7 @@ static int mt9t031_disable(struct i2c_client *client)
static int mt9t031_s_stream(struct v4l2_subdev *sd, int enable)
{
- struct i2c_client *client = sd->priv;
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
int ret;
if (enable)
@@ -393,7 +393,7 @@ static int mt9t031_set_params(struct i2c_client *client,
static int mt9t031_s_crop(struct v4l2_subdev *sd, struct v4l2_crop *a)
{
struct v4l2_rect rect = a->c;
- struct i2c_client *client = sd->priv;
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
struct mt9t031 *mt9t031 = to_mt9t031(client);
rect.width = ALIGN(rect.width, 2);
@@ -410,7 +410,7 @@ static int mt9t031_s_crop(struct v4l2_subdev *sd, struct v4l2_crop *a)
static int mt9t031_g_crop(struct v4l2_subdev *sd, struct v4l2_crop *a)
{
- struct i2c_client *client = sd->priv;
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
struct mt9t031 *mt9t031 = to_mt9t031(client);
a->c = mt9t031->rect;
@@ -436,7 +436,7 @@ static int mt9t031_cropcap(struct v4l2_subdev *sd, struct v4l2_cropcap *a)
static int mt9t031_g_fmt(struct v4l2_subdev *sd,
struct v4l2_mbus_framefmt *mf)
{
- struct i2c_client *client = sd->priv;
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
struct mt9t031 *mt9t031 = to_mt9t031(client);
mf->width = mt9t031->rect.width / mt9t031->xskip;
@@ -451,7 +451,7 @@ static int mt9t031_g_fmt(struct v4l2_subdev *sd,
static int mt9t031_s_fmt(struct v4l2_subdev *sd,
struct v4l2_mbus_framefmt *mf)
{
- struct i2c_client *client = sd->priv;
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
struct mt9t031 *mt9t031 = to_mt9t031(client);
u16 xskip, yskip;
struct v4l2_rect rect = mt9t031->rect;
@@ -490,7 +490,7 @@ static int mt9t031_try_fmt(struct v4l2_subdev *sd,
static int mt9t031_g_chip_ident(struct v4l2_subdev *sd,
struct v4l2_dbg_chip_ident *id)
{
- struct i2c_client *client = sd->priv;
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
struct mt9t031 *mt9t031 = to_mt9t031(client);
if (id->match.type != V4L2_CHIP_MATCH_I2C_ADDR)
@@ -509,7 +509,7 @@ static int mt9t031_g_chip_ident(struct v4l2_subdev *sd,
static int mt9t031_g_register(struct v4l2_subdev *sd,
struct v4l2_dbg_register *reg)
{
- struct i2c_client *client = sd->priv;
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
if (reg->match.type != V4L2_CHIP_MATCH_I2C_ADDR || reg->reg > 0xff)
return -EINVAL;
@@ -528,7 +528,7 @@ static int mt9t031_g_register(struct v4l2_subdev *sd,
static int mt9t031_s_register(struct v4l2_subdev *sd,
struct v4l2_dbg_register *reg)
{
- struct i2c_client *client = sd->priv;
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
if (reg->match.type != V4L2_CHIP_MATCH_I2C_ADDR || reg->reg > 0xff)
return -EINVAL;
@@ -545,7 +545,7 @@ static int mt9t031_s_register(struct v4l2_subdev *sd,
static int mt9t031_g_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl)
{
- struct i2c_client *client = sd->priv;
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
struct mt9t031 *mt9t031 = to_mt9t031(client);
int data;
@@ -577,7 +577,7 @@ static int mt9t031_g_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl)
static int mt9t031_s_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl)
{
- struct i2c_client *client = sd->priv;
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
struct mt9t031 *mt9t031 = to_mt9t031(client);
const struct v4l2_queryctrl *qctrl;
int data;
@@ -703,7 +703,7 @@ static int mt9t031_runtime_resume(struct device *dev)
struct soc_camera_device *icd = container_of(vdev->parent,
struct soc_camera_device, dev);
struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
- struct i2c_client *client = sd->priv;
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
struct mt9t031 *mt9t031 = to_mt9t031(client);
int ret;
@@ -780,7 +780,7 @@ static int mt9t031_video_probe(struct i2c_client *client)
static int mt9t031_g_skip_top_lines(struct v4l2_subdev *sd, u32 *lines)
{
- struct i2c_client *client = sd->priv;
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
struct mt9t031 *mt9t031 = to_mt9t031(client);
*lines = mt9t031->y_skip_top;
diff --git a/drivers/media/video/mt9t112.c b/drivers/media/video/mt9t112.c
index 8ec47e42d4d0..bffa9ee10968 100644
--- a/drivers/media/video/mt9t112.c
+++ b/drivers/media/video/mt9t112.c
@@ -804,7 +804,7 @@ static struct soc_camera_ops mt9t112_ops = {
static int mt9t112_g_chip_ident(struct v4l2_subdev *sd,
struct v4l2_dbg_chip_ident *id)
{
- struct i2c_client *client = sd->priv;
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
struct mt9t112_priv *priv = to_mt9t112(client);
id->ident = priv->model;
@@ -817,7 +817,7 @@ static int mt9t112_g_chip_ident(struct v4l2_subdev *sd,
static int mt9t112_g_register(struct v4l2_subdev *sd,
struct v4l2_dbg_register *reg)
{
- struct i2c_client *client = sd->priv;
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
int ret;
reg->size = 2;
@@ -831,7 +831,7 @@ static int mt9t112_g_register(struct v4l2_subdev *sd,
static int mt9t112_s_register(struct v4l2_subdev *sd,
struct v4l2_dbg_register *reg)
{
- struct i2c_client *client = sd->priv;
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
int ret;
mt9t112_reg_write(ret, client, reg->reg, reg->val);
@@ -858,7 +858,7 @@ static struct v4l2_subdev_core_ops mt9t112_subdev_core_ops = {
************************************************************************/
static int mt9t112_s_stream(struct v4l2_subdev *sd, int enable)
{
- struct i2c_client *client = sd->priv;
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
struct mt9t112_priv *priv = to_mt9t112(client);
int ret = 0;
@@ -968,7 +968,7 @@ static int mt9t112_g_crop(struct v4l2_subdev *sd, struct v4l2_crop *a)
static int mt9t112_s_crop(struct v4l2_subdev *sd, struct v4l2_crop *a)
{
- struct i2c_client *client = sd->priv;
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
struct v4l2_rect *rect = &a->c;
return mt9t112_set_params(client, rect->width, rect->height,
@@ -978,7 +978,7 @@ static int mt9t112_s_crop(struct v4l2_subdev *sd, struct v4l2_crop *a)
static int mt9t112_g_fmt(struct v4l2_subdev *sd,
struct v4l2_mbus_framefmt *mf)
{
- struct i2c_client *client = sd->priv;
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
struct mt9t112_priv *priv = to_mt9t112(client);
if (!priv->format) {
@@ -1000,7 +1000,7 @@ static int mt9t112_g_fmt(struct v4l2_subdev *sd,
static int mt9t112_s_fmt(struct v4l2_subdev *sd,
struct v4l2_mbus_framefmt *mf)
{
- struct i2c_client *client = sd->priv;
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
/* TODO: set colorspace */
return mt9t112_set_params(client, mf->width, mf->height, mf->code);
diff --git a/drivers/media/video/mt9v011.c b/drivers/media/video/mt9v011.c
index f5e778d5ca9f..209ff97261a9 100644
--- a/drivers/media/video/mt9v011.c
+++ b/drivers/media/video/mt9v011.c
@@ -11,9 +11,8 @@
#include <linux/delay.h>
#include <asm/div64.h>
#include <media/v4l2-device.h>
-#include "mt9v011.h"
-#include <media/v4l2-i2c-drv.h>
#include <media/v4l2-chip-ident.h>
+#include "mt9v011.h"
MODULE_DESCRIPTION("Micron mt9v011 sensor driver");
MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>");
@@ -624,9 +623,25 @@ static const struct i2c_device_id mt9v011_id[] = {
};
MODULE_DEVICE_TABLE(i2c, mt9v011_id);
-static struct v4l2_i2c_driver_data v4l2_i2c_data = {
- .name = "mt9v011",
- .probe = mt9v011_probe,
- .remove = mt9v011_remove,
- .id_table = mt9v011_id,
+static struct i2c_driver mt9v011_driver = {
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = "mt9v011",
+ },
+ .probe = mt9v011_probe,
+ .remove = mt9v011_remove,
+ .id_table = mt9v011_id,
};
+
+static __init int init_mt9v011(void)
+{
+ return i2c_add_driver(&mt9v011_driver);
+}
+
+static __exit void exit_mt9v011(void)
+{
+ i2c_del_driver(&mt9v011_driver);
+}
+
+module_init(init_mt9v011);
+module_exit(exit_mt9v011);
diff --git a/drivers/media/video/mt9v022.c b/drivers/media/video/mt9v022.c
index b48473c7896b..b96171cc79f9 100644
--- a/drivers/media/video/mt9v022.c
+++ b/drivers/media/video/mt9v022.c
@@ -184,7 +184,7 @@ static int mt9v022_init(struct i2c_client *client)
static int mt9v022_s_stream(struct v4l2_subdev *sd, int enable)
{
- struct i2c_client *client = sd->priv;
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
struct mt9v022 *mt9v022 = to_mt9v022(client);
if (enable)
@@ -273,7 +273,7 @@ static unsigned long mt9v022_query_bus_param(struct soc_camera_device *icd)
static int mt9v022_s_crop(struct v4l2_subdev *sd, struct v4l2_crop *a)
{
- struct i2c_client *client = sd->priv;
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
struct mt9v022 *mt9v022 = to_mt9v022(client);
struct v4l2_rect rect = a->c;
int ret;
@@ -334,7 +334,7 @@ static int mt9v022_s_crop(struct v4l2_subdev *sd, struct v4l2_crop *a)
static int mt9v022_g_crop(struct v4l2_subdev *sd, struct v4l2_crop *a)
{
- struct i2c_client *client = sd->priv;
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
struct mt9v022 *mt9v022 = to_mt9v022(client);
a->c = mt9v022->rect;
@@ -360,7 +360,7 @@ static int mt9v022_cropcap(struct v4l2_subdev *sd, struct v4l2_cropcap *a)
static int mt9v022_g_fmt(struct v4l2_subdev *sd,
struct v4l2_mbus_framefmt *mf)
{
- struct i2c_client *client = sd->priv;
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
struct mt9v022 *mt9v022 = to_mt9v022(client);
mf->width = mt9v022->rect.width;
@@ -375,7 +375,7 @@ static int mt9v022_g_fmt(struct v4l2_subdev *sd,
static int mt9v022_s_fmt(struct v4l2_subdev *sd,
struct v4l2_mbus_framefmt *mf)
{
- struct i2c_client *client = sd->priv;
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
struct mt9v022 *mt9v022 = to_mt9v022(client);
struct v4l2_crop a = {
.c = {
@@ -422,7 +422,7 @@ static int mt9v022_s_fmt(struct v4l2_subdev *sd,
static int mt9v022_try_fmt(struct v4l2_subdev *sd,
struct v4l2_mbus_framefmt *mf)
{
- struct i2c_client *client = sd->priv;
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
struct mt9v022 *mt9v022 = to_mt9v022(client);
const struct mt9v022_datafmt *fmt;
int align = mf->code == V4L2_MBUS_FMT_SBGGR8_1X8 ||
@@ -448,7 +448,7 @@ static int mt9v022_try_fmt(struct v4l2_subdev *sd,
static int mt9v022_g_chip_ident(struct v4l2_subdev *sd,
struct v4l2_dbg_chip_ident *id)
{
- struct i2c_client *client = sd->priv;
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
struct mt9v022 *mt9v022 = to_mt9v022(client);
if (id->match.type != V4L2_CHIP_MATCH_I2C_ADDR)
@@ -467,7 +467,7 @@ static int mt9v022_g_chip_ident(struct v4l2_subdev *sd,
static int mt9v022_g_register(struct v4l2_subdev *sd,
struct v4l2_dbg_register *reg)
{
- struct i2c_client *client = sd->priv;
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
if (reg->match.type != V4L2_CHIP_MATCH_I2C_ADDR || reg->reg > 0xff)
return -EINVAL;
@@ -487,7 +487,7 @@ static int mt9v022_g_register(struct v4l2_subdev *sd,
static int mt9v022_s_register(struct v4l2_subdev *sd,
struct v4l2_dbg_register *reg)
{
- struct i2c_client *client = sd->priv;
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
if (reg->match.type != V4L2_CHIP_MATCH_I2C_ADDR || reg->reg > 0xff)
return -EINVAL;
@@ -565,7 +565,7 @@ static struct soc_camera_ops mt9v022_ops = {
static int mt9v022_g_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl)
{
- struct i2c_client *client = sd->priv;
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
const struct v4l2_queryctrl *qctrl;
unsigned long range;
int data;
@@ -622,7 +622,7 @@ static int mt9v022_g_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl)
static int mt9v022_s_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl)
{
int data;
- struct i2c_client *client = sd->priv;
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
const struct v4l2_queryctrl *qctrl;
qctrl = soc_camera_find_qctrl(&mt9v022_ops, ctrl->id);
@@ -817,7 +817,7 @@ static void mt9v022_video_remove(struct soc_camera_device *icd)
static int mt9v022_g_skip_top_lines(struct v4l2_subdev *sd, u32 *lines)
{
- struct i2c_client *client = sd->priv;
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
struct mt9v022 *mt9v022 = to_mt9v022(client);
*lines = mt9v022->y_skip_top;
@@ -838,7 +838,7 @@ static struct v4l2_subdev_core_ops mt9v022_subdev_core_ops = {
static int mt9v022_enum_fmt(struct v4l2_subdev *sd, unsigned int index,
enum v4l2_mbus_pixelcode *code)
{
- struct i2c_client *client = sd->priv;
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
struct mt9v022 *mt9v022 = to_mt9v022(client);
if (index >= mt9v022->num_fmts)
diff --git a/drivers/media/video/mx1_camera.c b/drivers/media/video/mx1_camera.c
index 5c17f9ec3d7c..5e486a88ad7c 100644
--- a/drivers/media/video/mx1_camera.c
+++ b/drivers/media/video/mx1_camera.c
@@ -161,7 +161,7 @@ static void free_buffer(struct videobuf_queue *vq, struct mx1_buffer *buf)
* This waits until this buffer is out of danger, i.e., until it is no
* longer in STATE_QUEUED or STATE_ACTIVE
*/
- videobuf_waiton(vb, 0, 0);
+ videobuf_waiton(vq, vb, 0, 0);
videobuf_dma_contig_free(vq, vb);
vb->state = VIDEOBUF_NEEDS_INIT;
@@ -385,7 +385,7 @@ static void mx1_camera_init_videobuf(struct videobuf_queue *q,
&pcdev->lock,
V4L2_BUF_TYPE_VIDEO_CAPTURE,
V4L2_FIELD_NONE,
- sizeof(struct mx1_buffer), icd);
+ sizeof(struct mx1_buffer), icd, NULL);
}
static int mclk_get_divisor(struct mx1_camera_dev *pcdev)
@@ -638,7 +638,7 @@ static int mx1_camera_try_fmt(struct soc_camera_device *icd,
return 0;
}
-static int mx1_camera_reqbufs(struct soc_camera_file *icf,
+static int mx1_camera_reqbufs(struct soc_camera_device *icd,
struct v4l2_requestbuffers *p)
{
int i;
@@ -650,7 +650,7 @@ static int mx1_camera_reqbufs(struct soc_camera_file *icf,
* it hadn't triggered
*/
for (i = 0; i < p->count; i++) {
- struct mx1_buffer *buf = container_of(icf->vb_vidq.bufs[i],
+ struct mx1_buffer *buf = container_of(icd->vb_vidq.bufs[i],
struct mx1_buffer, vb);
buf->inwork = 0;
INIT_LIST_HEAD(&buf->vb.queue);
@@ -661,10 +661,10 @@ static int mx1_camera_reqbufs(struct soc_camera_file *icf,
static unsigned int mx1_camera_poll(struct file *file, poll_table *pt)
{
- struct soc_camera_file *icf = file->private_data;
+ struct soc_camera_device *icd = file->private_data;
struct mx1_buffer *buf;
- buf = list_entry(icf->vb_vidq.stream.next, struct mx1_buffer,
+ buf = list_entry(icd->vb_vidq.stream.next, struct mx1_buffer,
vb.stream);
poll_wait(file, &buf->vb.done, pt);
diff --git a/drivers/media/video/mx2_camera.c b/drivers/media/video/mx2_camera.c
index b6ea67221d1d..072bd2d1cfad 100644
--- a/drivers/media/video/mx2_camera.c
+++ b/drivers/media/video/mx2_camera.c
@@ -31,6 +31,7 @@
#include <media/v4l2-common.h>
#include <media/v4l2-dev.h>
+#include <media/videobuf-core.h>
#include <media/videobuf-dma-contig.h>
#include <media/soc_camera.h>
#include <media/soc_mediabus.h>
@@ -461,9 +462,9 @@ static void free_buffer(struct videobuf_queue *vq, struct mx2_buffer *buf)
/*
* This waits until this buffer is out of danger, i.e., until it is no
- * longer in STATE_QUEUED or STATE_ACTIVE
+ * longer in state VIDEOBUF_QUEUED or VIDEOBUF_ACTIVE
*/
- videobuf_waiton(vb, 0, 0);
+ videobuf_waiton(vq, vb, 0, 0);
videobuf_dma_contig_free(vq, vb);
dev_dbg(&icd->dev, "%s freed\n", __func__);
@@ -640,15 +641,27 @@ static void mx2_videobuf_release(struct videobuf_queue *vq,
* Terminate only queued but inactive buffers. Active buffers are
* released when they become inactive after videobuf_waiton().
*
- * FIXME: implement forced termination of active buffers, so that the
- * user won't get stuck in an uninterruptible state. This requires a
- * specific handling for each of the three DMA types that this driver
- * supports.
+ * FIXME: implement forced termination of active buffers for mx27 and
+ * mx27 eMMA, so that the user won't get stuck in an uninterruptible
+ * state. This requires a specific handling for each of the these DMA
+ * types.
*/
spin_lock_irqsave(&pcdev->lock, flags);
if (vb->state == VIDEOBUF_QUEUED) {
list_del(&vb->queue);
vb->state = VIDEOBUF_ERROR;
+ } else if (cpu_is_mx25() && vb->state == VIDEOBUF_ACTIVE) {
+ if (pcdev->fb1_active == buf) {
+ pcdev->csicr1 &= ~CSICR1_FB1_DMA_INTEN;
+ writel(0, pcdev->base_csi + CSIDMASA_FB1);
+ pcdev->fb1_active = NULL;
+ } else if (pcdev->fb2_active == buf) {
+ pcdev->csicr1 &= ~CSICR1_FB2_DMA_INTEN;
+ writel(0, pcdev->base_csi + CSIDMASA_FB2);
+ pcdev->fb2_active = NULL;
+ }
+ writel(pcdev->csicr1, pcdev->base_csi + CSICR1);
+ vb->state = VIDEOBUF_ERROR;
}
spin_unlock_irqrestore(&pcdev->lock, flags);
@@ -670,7 +683,7 @@ static void mx2_camera_init_videobuf(struct videobuf_queue *q,
videobuf_queue_dma_contig_init(q, &mx2_videobuf_ops, pcdev->dev,
&pcdev->lock, V4L2_BUF_TYPE_VIDEO_CAPTURE,
- V4L2_FIELD_NONE, sizeof(struct mx2_buffer), icd);
+ V4L2_FIELD_NONE, sizeof(struct mx2_buffer), icd, NULL);
}
#define MX2_BUS_FLAGS (SOCAM_DATAWIDTH_8 | \
@@ -716,8 +729,11 @@ static void mx27_camera_emma_buf_init(struct soc_camera_device *icd,
/*
* We only use the EMMA engine to get rid of the broken
* DMA Engine. No color space consversion at the moment.
- * We adjust incoming and outgoing pixelformat to rgb16
- * and adjust the bytesperline accordingly.
+ * We set the incomming and outgoing pixelformat to an
+ * 16 Bit wide format and adjust the bytesperline
+ * accordingly. With this configuration the inputdata
+ * will not be changed by the emma and could be any type
+ * of 16 Bit Pixelformat.
*/
writel(PRP_CNTL_CH1EN |
PRP_CNTL_CSIEN |
@@ -888,8 +904,6 @@ static int mx2_camera_set_crop(struct soc_camera_device *icd,
static int mx2_camera_set_fmt(struct soc_camera_device *icd,
struct v4l2_format *f)
{
- struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent);
- struct mx2_camera_dev *pcdev = ici->priv;
struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
const struct soc_camera_format_xlate *xlate;
struct v4l2_pix_format *pix = &f->fmt.pix;
@@ -903,10 +917,6 @@ static int mx2_camera_set_fmt(struct soc_camera_device *icd,
return -EINVAL;
}
- /* eMMA can only do RGB565 */
- if (mx27_camera_emma(pcdev) && pix->pixelformat != V4L2_PIX_FMT_RGB565)
- return -EINVAL;
-
mf.width = pix->width;
mf.height = pix->height;
mf.field = pix->field;
@@ -932,8 +942,6 @@ static int mx2_camera_set_fmt(struct soc_camera_device *icd,
static int mx2_camera_try_fmt(struct soc_camera_device *icd,
struct v4l2_format *f)
{
- struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent);
- struct mx2_camera_dev *pcdev = ici->priv;
struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
const struct soc_camera_format_xlate *xlate;
struct v4l2_pix_format *pix = &f->fmt.pix;
@@ -950,10 +958,6 @@ static int mx2_camera_try_fmt(struct soc_camera_device *icd,
/* FIXME: implement MX27 limits */
- /* eMMA can only do RGB565 */
- if (mx27_camera_emma(pcdev) && pixfmt != V4L2_PIX_FMT_RGB565)
- return -EINVAL;
-
/* limit to MX25 hardware capabilities */
if (cpu_is_mx25()) {
if (xlate->host_fmt->bits_per_sample <= 8)
@@ -1017,13 +1021,13 @@ static int mx2_camera_querycap(struct soc_camera_host *ici,
return 0;
}
-static int mx2_camera_reqbufs(struct soc_camera_file *icf,
+static int mx2_camera_reqbufs(struct soc_camera_device *icd,
struct v4l2_requestbuffers *p)
{
int i;
for (i = 0; i < p->count; i++) {
- struct mx2_buffer *buf = container_of(icf->vb_vidq.bufs[i],
+ struct mx2_buffer *buf = container_of(icd->vb_vidq.bufs[i],
struct mx2_buffer, vb);
INIT_LIST_HEAD(&buf->vb.queue);
}
@@ -1144,9 +1148,9 @@ err_out:
static unsigned int mx2_camera_poll(struct file *file, poll_table *pt)
{
- struct soc_camera_file *icf = file->private_data;
+ struct soc_camera_device *icd = file->private_data;
- return videobuf_poll_stream(file, &icf->vb_vidq, pt);
+ return videobuf_poll_stream(file, &icd->vb_vidq, pt);
}
static struct soc_camera_host_ops mx2_soc_camera_host_ops = {
@@ -1426,6 +1430,9 @@ static int __devinit mx2_camera_probe(struct platform_device *pdev)
if (err)
goto exit_free_emma;
+ dev_info(&pdev->dev, "MX2 Camera (CSI) driver probed, clock frequency: %ld\n",
+ clk_get_rate(pcdev->clk_csi));
+
return 0;
exit_free_emma:
diff --git a/drivers/media/video/mx3_camera.c b/drivers/media/video/mx3_camera.c
index a9be14c23912..aa871c2936b3 100644
--- a/drivers/media/video/mx3_camera.c
+++ b/drivers/media/video/mx3_camera.c
@@ -27,6 +27,7 @@
#include <mach/ipu.h>
#include <mach/mx3_camera.h>
+#include <mach/dma.h>
#define MX3_CAM_DRV_NAME "mx3-camera"
@@ -185,7 +186,7 @@ static void free_buffer(struct videobuf_queue *vq, struct mx3_camera_buffer *buf
* This waits until this buffer is out of danger, i.e., until it is no
* longer in STATE_QUEUED or STATE_ACTIVE
*/
- videobuf_waiton(vb, 0, 0);
+ videobuf_waiton(vq, vb, 0, 0);
if (txd) {
ichan = to_idmac_chan(txd->chan);
async_tx_ack(txd);
@@ -441,7 +442,8 @@ static void mx3_camera_init_videobuf(struct videobuf_queue *q,
&mx3_cam->lock,
V4L2_BUF_TYPE_VIDEO_CAPTURE,
V4L2_FIELD_NONE,
- sizeof(struct mx3_camera_buffer), icd);
+ sizeof(struct mx3_camera_buffer), icd,
+ NULL);
}
/* First part of ipu_csi_init_interface() */
@@ -637,6 +639,9 @@ static bool chan_filter(struct dma_chan *chan, void *arg)
struct dma_chan_request *rq = arg;
struct mx3_camera_pdata *pdata;
+ if (!imx_dma_is_ipu(chan))
+ return false;
+
if (!rq)
return false;
@@ -976,7 +981,7 @@ static int mx3_camera_try_fmt(struct soc_camera_device *icd,
return ret;
}
-static int mx3_camera_reqbufs(struct soc_camera_file *icf,
+static int mx3_camera_reqbufs(struct soc_camera_device *icd,
struct v4l2_requestbuffers *p)
{
return 0;
@@ -984,9 +989,9 @@ static int mx3_camera_reqbufs(struct soc_camera_file *icf,
static unsigned int mx3_camera_poll(struct file *file, poll_table *pt)
{
- struct soc_camera_file *icf = file->private_data;
+ struct soc_camera_device *icd = file->private_data;
- return videobuf_poll_stream(file, &icf->vb_vidq, pt);
+ return videobuf_poll_stream(file, &icd->vb_vidq, pt);
}
static int mx3_camera_querycap(struct soc_camera_host *ici,
diff --git a/drivers/media/video/mxb.c b/drivers/media/video/mxb.c
index b1dbcf1d2bcb..4e8fd965f151 100644
--- a/drivers/media/video/mxb.c
+++ b/drivers/media/video/mxb.c
@@ -32,7 +32,6 @@
#include "tea6415c.h"
#include "tea6420.h"
-#define I2C_SAA5246A 0x11
#define I2C_SAA7111A 0x24
#define I2C_TDA9840 0x42
#define I2C_TEA6415C 0x43
@@ -186,21 +185,17 @@ static int mxb_probe(struct saa7146_dev *dev)
}
mxb->saa7111a = v4l2_i2c_new_subdev(&dev->v4l2_dev, &mxb->i2c_adapter,
- "saa7115", "saa7111", I2C_SAA7111A, NULL);
+ "saa7111", I2C_SAA7111A, NULL);
mxb->tea6420_1 = v4l2_i2c_new_subdev(&dev->v4l2_dev, &mxb->i2c_adapter,
- "tea6420", "tea6420", I2C_TEA6420_1, NULL);
+ "tea6420", I2C_TEA6420_1, NULL);
mxb->tea6420_2 = v4l2_i2c_new_subdev(&dev->v4l2_dev, &mxb->i2c_adapter,
- "tea6420", "tea6420", I2C_TEA6420_2, NULL);
+ "tea6420", I2C_TEA6420_2, NULL);
mxb->tea6415c = v4l2_i2c_new_subdev(&dev->v4l2_dev, &mxb->i2c_adapter,
- "tea6415c", "tea6415c", I2C_TEA6415C, NULL);
+ "tea6415c", I2C_TEA6415C, NULL);
mxb->tda9840 = v4l2_i2c_new_subdev(&dev->v4l2_dev, &mxb->i2c_adapter,
- "tda9840", "tda9840", I2C_TDA9840, NULL);
+ "tda9840", I2C_TDA9840, NULL);
mxb->tuner = v4l2_i2c_new_subdev(&dev->v4l2_dev, &mxb->i2c_adapter,
- "tuner", "tuner", I2C_TUNER, NULL);
- if (v4l2_i2c_new_subdev(&dev->v4l2_dev, &mxb->i2c_adapter,
- "saa5246a", "saa5246a", I2C_SAA5246A, NULL)) {
- printk(KERN_INFO "mxb: found teletext decoder\n");
- }
+ "tuner", I2C_TUNER, NULL);
/* check if all devices are present */
if (!mxb->tea6420_1 || !mxb->tea6420_2 || !mxb->tea6415c ||
diff --git a/drivers/media/video/omap/omap_vout.c b/drivers/media/video/omap/omap_vout.c
index 4ed51b1552e1..15f8793e325b 100644
--- a/drivers/media/video/omap/omap_vout.c
+++ b/drivers/media/video/omap/omap_vout.c
@@ -1341,7 +1341,7 @@ static int omap_vout_open(struct file *file)
videobuf_queue_dma_contig_init(q, &video_vbq_ops, q->dev,
&vout->vbq_lock, vout->type, V4L2_FIELD_NONE,
- sizeof(struct videobuf_buffer), vout);
+ sizeof(struct videobuf_buffer), vout, NULL);
v4l2_dbg(1, debug, &vout->vid_dev->v4l2_dev, "Exiting %s\n", __func__);
return 0;
diff --git a/drivers/media/video/omap1_camera.c b/drivers/media/video/omap1_camera.c
new file mode 100644
index 000000000000..cbfd07f2d9da
--- /dev/null
+++ b/drivers/media/video/omap1_camera.c
@@ -0,0 +1,1702 @@
+/*
+ * V4L2 SoC Camera driver for OMAP1 Camera Interface
+ *
+ * Copyright (C) 2010, Janusz Krzysztofik <jkrzyszt@tis.icnet.pl>
+ *
+ * Based on V4L2 Driver for i.MXL/i.MXL camera (CSI) host
+ * Copyright (C) 2008, Paulius Zaleckas <paulius.zaleckas@teltonika.lt>
+ * Copyright (C) 2009, Darius Augulis <augulis.darius@gmail.com>
+ *
+ * Based on PXA SoC camera driver
+ * Copyright (C) 2006, Sascha Hauer, Pengutronix
+ * Copyright (C) 2008, Guennadi Liakhovetski <kernel@pengutronix.de>
+ *
+ * Hardware specific bits initialy based on former work by Matt Callow
+ * drivers/media/video/omap/omap1510cam.c
+ * Copyright (C) 2006 Matt Callow
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+
+#include <linux/clk.h>
+#include <linux/dma-mapping.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/version.h>
+
+#include <media/omap1_camera.h>
+#include <media/soc_camera.h>
+#include <media/soc_mediabus.h>
+#include <media/videobuf-dma-contig.h>
+#include <media/videobuf-dma-sg.h>
+
+#include <plat/dma.h>
+
+
+#define DRIVER_NAME "omap1-camera"
+#define VERSION_CODE KERNEL_VERSION(0, 0, 1)
+
+
+/*
+ * ---------------------------------------------------------------------------
+ * OMAP1 Camera Interface registers
+ * ---------------------------------------------------------------------------
+ */
+
+#define REG_CTRLCLOCK 0x00
+#define REG_IT_STATUS 0x04
+#define REG_MODE 0x08
+#define REG_STATUS 0x0C
+#define REG_CAMDATA 0x10
+#define REG_GPIO 0x14
+#define REG_PEAK_COUNTER 0x18
+
+/* CTRLCLOCK bit shifts */
+#define LCLK_EN BIT(7)
+#define DPLL_EN BIT(6)
+#define MCLK_EN BIT(5)
+#define CAMEXCLK_EN BIT(4)
+#define POLCLK BIT(3)
+#define FOSCMOD_SHIFT 0
+#define FOSCMOD_MASK (0x7 << FOSCMOD_SHIFT)
+#define FOSCMOD_12MHz 0x0
+#define FOSCMOD_6MHz 0x2
+#define FOSCMOD_9_6MHz 0x4
+#define FOSCMOD_24MHz 0x5
+#define FOSCMOD_8MHz 0x6
+
+/* IT_STATUS bit shifts */
+#define DATA_TRANSFER BIT(5)
+#define FIFO_FULL BIT(4)
+#define H_DOWN BIT(3)
+#define H_UP BIT(2)
+#define V_DOWN BIT(1)
+#define V_UP BIT(0)
+
+/* MODE bit shifts */
+#define RAZ_FIFO BIT(18)
+#define EN_FIFO_FULL BIT(17)
+#define EN_NIRQ BIT(16)
+#define THRESHOLD_SHIFT 9
+#define THRESHOLD_MASK (0x7f << THRESHOLD_SHIFT)
+#define DMA BIT(8)
+#define EN_H_DOWN BIT(7)
+#define EN_H_UP BIT(6)
+#define EN_V_DOWN BIT(5)
+#define EN_V_UP BIT(4)
+#define ORDERCAMD BIT(3)
+
+#define IRQ_MASK (EN_V_UP | EN_V_DOWN | EN_H_UP | EN_H_DOWN | \
+ EN_NIRQ | EN_FIFO_FULL)
+
+/* STATUS bit shifts */
+#define HSTATUS BIT(1)
+#define VSTATUS BIT(0)
+
+/* GPIO bit shifts */
+#define CAM_RST BIT(0)
+
+/* end of OMAP1 Camera Interface registers */
+
+
+#define SOCAM_BUS_FLAGS (SOCAM_MASTER | \
+ SOCAM_HSYNC_ACTIVE_HIGH | SOCAM_VSYNC_ACTIVE_HIGH | \
+ SOCAM_PCLK_SAMPLE_RISING | SOCAM_PCLK_SAMPLE_FALLING | \
+ SOCAM_DATA_ACTIVE_HIGH | SOCAM_DATAWIDTH_8)
+
+
+#define FIFO_SIZE ((THRESHOLD_MASK >> THRESHOLD_SHIFT) + 1)
+#define FIFO_SHIFT __fls(FIFO_SIZE)
+
+#define DMA_BURST_SHIFT (1 + OMAP_DMA_DATA_BURST_4)
+#define DMA_BURST_SIZE (1 << DMA_BURST_SHIFT)
+
+#define DMA_ELEMENT_SHIFT OMAP_DMA_DATA_TYPE_S32
+#define DMA_ELEMENT_SIZE (1 << DMA_ELEMENT_SHIFT)
+
+#define DMA_FRAME_SHIFT_CONTIG (FIFO_SHIFT - 1)
+#define DMA_FRAME_SHIFT_SG DMA_BURST_SHIFT
+
+#define DMA_FRAME_SHIFT(x) ((x) == OMAP1_CAM_DMA_CONTIG ? \
+ DMA_FRAME_SHIFT_CONTIG : \
+ DMA_FRAME_SHIFT_SG)
+#define DMA_FRAME_SIZE(x) (1 << DMA_FRAME_SHIFT(x))
+#define DMA_SYNC OMAP_DMA_SYNC_FRAME
+#define THRESHOLD_LEVEL DMA_FRAME_SIZE
+
+
+#define MAX_VIDEO_MEM 4 /* arbitrary video memory limit in MB */
+
+
+/*
+ * Structures
+ */
+
+/* buffer for one video frame */
+struct omap1_cam_buf {
+ struct videobuf_buffer vb;
+ enum v4l2_mbus_pixelcode code;
+ int inwork;
+ struct scatterlist *sgbuf;
+ int sgcount;
+ int bytes_left;
+ enum videobuf_state result;
+};
+
+struct omap1_cam_dev {
+ struct soc_camera_host soc_host;
+ struct soc_camera_device *icd;
+ struct clk *clk;
+
+ unsigned int irq;
+ void __iomem *base;
+
+ int dma_ch;
+
+ struct omap1_cam_platform_data *pdata;
+ struct resource *res;
+ unsigned long pflags;
+ unsigned long camexclk;
+
+ struct list_head capture;
+
+ /* lock used to protect videobuf */
+ spinlock_t lock;
+
+ /* Pointers to DMA buffers */
+ struct omap1_cam_buf *active;
+ struct omap1_cam_buf *ready;
+
+ enum omap1_cam_vb_mode vb_mode;
+ int (*mmap_mapper)(struct videobuf_queue *q,
+ struct videobuf_buffer *buf,
+ struct vm_area_struct *vma);
+
+ u32 reg_cache[0];
+};
+
+
+static void cam_write(struct omap1_cam_dev *pcdev, u16 reg, u32 val)
+{
+ pcdev->reg_cache[reg / sizeof(u32)] = val;
+ __raw_writel(val, pcdev->base + reg);
+}
+
+static u32 cam_read(struct omap1_cam_dev *pcdev, u16 reg, bool from_cache)
+{
+ return !from_cache ? __raw_readl(pcdev->base + reg) :
+ pcdev->reg_cache[reg / sizeof(u32)];
+}
+
+#define CAM_READ(pcdev, reg) \
+ cam_read(pcdev, REG_##reg, false)
+#define CAM_WRITE(pcdev, reg, val) \
+ cam_write(pcdev, REG_##reg, val)
+#define CAM_READ_CACHE(pcdev, reg) \
+ cam_read(pcdev, REG_##reg, true)
+
+/*
+ * Videobuf operations
+ */
+static int omap1_videobuf_setup(struct videobuf_queue *vq, unsigned int *count,
+ unsigned int *size)
+{
+ struct soc_camera_device *icd = vq->priv_data;
+ int bytes_per_line = soc_mbus_bytes_per_line(icd->user_width,
+ icd->current_fmt->host_fmt);
+ struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent);
+ struct omap1_cam_dev *pcdev = ici->priv;
+
+ if (bytes_per_line < 0)
+ return bytes_per_line;
+
+ *size = bytes_per_line * icd->user_height;
+
+ if (!*count || *count < OMAP1_CAMERA_MIN_BUF_COUNT(pcdev->vb_mode))
+ *count = OMAP1_CAMERA_MIN_BUF_COUNT(pcdev->vb_mode);
+
+ if (*size * *count > MAX_VIDEO_MEM * 1024 * 1024)
+ *count = (MAX_VIDEO_MEM * 1024 * 1024) / *size;
+
+ dev_dbg(icd->dev.parent,
+ "%s: count=%d, size=%d\n", __func__, *count, *size);
+
+ return 0;
+}
+
+static void free_buffer(struct videobuf_queue *vq, struct omap1_cam_buf *buf,
+ enum omap1_cam_vb_mode vb_mode)
+{
+ struct videobuf_buffer *vb = &buf->vb;
+
+ BUG_ON(in_interrupt());
+
+ videobuf_waiton(vq, vb, 0, 0);
+
+ if (vb_mode == OMAP1_CAM_DMA_CONTIG) {
+ videobuf_dma_contig_free(vq, vb);
+ } else {
+ struct soc_camera_device *icd = vq->priv_data;
+ struct device *dev = icd->dev.parent;
+ struct videobuf_dmabuf *dma = videobuf_to_dma(vb);
+
+ videobuf_dma_unmap(dev, dma);
+ videobuf_dma_free(dma);
+ }
+
+ vb->state = VIDEOBUF_NEEDS_INIT;
+}
+
+static int omap1_videobuf_prepare(struct videobuf_queue *vq,
+ struct videobuf_buffer *vb, enum v4l2_field field)
+{
+ struct soc_camera_device *icd = vq->priv_data;
+ struct omap1_cam_buf *buf = container_of(vb, struct omap1_cam_buf, vb);
+ int bytes_per_line = soc_mbus_bytes_per_line(icd->user_width,
+ icd->current_fmt->host_fmt);
+ struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent);
+ struct omap1_cam_dev *pcdev = ici->priv;
+ int ret;
+
+ if (bytes_per_line < 0)
+ return bytes_per_line;
+
+ WARN_ON(!list_empty(&vb->queue));
+
+ BUG_ON(NULL == icd->current_fmt);
+
+ buf->inwork = 1;
+
+ if (buf->code != icd->current_fmt->code || vb->field != field ||
+ vb->width != icd->user_width ||
+ vb->height != icd->user_height) {
+ buf->code = icd->current_fmt->code;
+ vb->width = icd->user_width;
+ vb->height = icd->user_height;
+ vb->field = field;
+ vb->state = VIDEOBUF_NEEDS_INIT;
+ }
+
+ vb->size = bytes_per_line * vb->height;
+
+ if (vb->baddr && vb->bsize < vb->size) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (vb->state == VIDEOBUF_NEEDS_INIT) {
+ ret = videobuf_iolock(vq, vb, NULL);
+ if (ret)
+ goto fail;
+
+ vb->state = VIDEOBUF_PREPARED;
+ }
+ buf->inwork = 0;
+
+ return 0;
+fail:
+ free_buffer(vq, buf, pcdev->vb_mode);
+out:
+ buf->inwork = 0;
+ return ret;
+}
+
+static void set_dma_dest_params(int dma_ch, struct omap1_cam_buf *buf,
+ enum omap1_cam_vb_mode vb_mode)
+{
+ dma_addr_t dma_addr;
+ unsigned int block_size;
+
+ if (vb_mode == OMAP1_CAM_DMA_CONTIG) {
+ dma_addr = videobuf_to_dma_contig(&buf->vb);
+ block_size = buf->vb.size;
+ } else {
+ if (WARN_ON(!buf->sgbuf)) {
+ buf->result = VIDEOBUF_ERROR;
+ return;
+ }
+ dma_addr = sg_dma_address(buf->sgbuf);
+ if (WARN_ON(!dma_addr)) {
+ buf->sgbuf = NULL;
+ buf->result = VIDEOBUF_ERROR;
+ return;
+ }
+ block_size = sg_dma_len(buf->sgbuf);
+ if (WARN_ON(!block_size)) {
+ buf->sgbuf = NULL;
+ buf->result = VIDEOBUF_ERROR;
+ return;
+ }
+ if (unlikely(buf->bytes_left < block_size))
+ block_size = buf->bytes_left;
+ if (WARN_ON(dma_addr & (DMA_FRAME_SIZE(vb_mode) *
+ DMA_ELEMENT_SIZE - 1))) {
+ dma_addr = ALIGN(dma_addr, DMA_FRAME_SIZE(vb_mode) *
+ DMA_ELEMENT_SIZE);
+ block_size &= ~(DMA_FRAME_SIZE(vb_mode) *
+ DMA_ELEMENT_SIZE - 1);
+ }
+ buf->bytes_left -= block_size;
+ buf->sgcount++;
+ }
+
+ omap_set_dma_dest_params(dma_ch,
+ OMAP_DMA_PORT_EMIFF, OMAP_DMA_AMODE_POST_INC, dma_addr, 0, 0);
+ omap_set_dma_transfer_params(dma_ch,
+ OMAP_DMA_DATA_TYPE_S32, DMA_FRAME_SIZE(vb_mode),
+ block_size >> (DMA_FRAME_SHIFT(vb_mode) + DMA_ELEMENT_SHIFT),
+ DMA_SYNC, 0, 0);
+}
+
+static struct omap1_cam_buf *prepare_next_vb(struct omap1_cam_dev *pcdev)
+{
+ struct omap1_cam_buf *buf;
+
+ /*
+ * If there is already a buffer pointed out by the pcdev->ready,
+ * (re)use it, otherwise try to fetch and configure a new one.
+ */
+ buf = pcdev->ready;
+ if (!buf) {
+ if (list_empty(&pcdev->capture))
+ return buf;
+ buf = list_entry(pcdev->capture.next,
+ struct omap1_cam_buf, vb.queue);
+ buf->vb.state = VIDEOBUF_ACTIVE;
+ pcdev->ready = buf;
+ list_del_init(&buf->vb.queue);
+ }
+
+ if (pcdev->vb_mode == OMAP1_CAM_DMA_CONTIG) {
+ /*
+ * In CONTIG mode, we can safely enter next buffer parameters
+ * into the DMA programming register set after the DMA
+ * has already been activated on the previous buffer
+ */
+ set_dma_dest_params(pcdev->dma_ch, buf, pcdev->vb_mode);
+ } else {
+ /*
+ * In SG mode, the above is not safe since there are probably
+ * a bunch of sgbufs from previous sglist still pending.
+ * Instead, mark the sglist fresh for the upcoming
+ * try_next_sgbuf().
+ */
+ buf->sgbuf = NULL;
+ }
+
+ return buf;
+}
+
+static struct scatterlist *try_next_sgbuf(int dma_ch, struct omap1_cam_buf *buf)
+{
+ struct scatterlist *sgbuf;
+
+ if (likely(buf->sgbuf)) {
+ /* current sglist is active */
+ if (unlikely(!buf->bytes_left)) {
+ /* indicate sglist complete */
+ sgbuf = NULL;
+ } else {
+ /* process next sgbuf */
+ sgbuf = sg_next(buf->sgbuf);
+ if (WARN_ON(!sgbuf)) {
+ buf->result = VIDEOBUF_ERROR;
+ } else if (WARN_ON(!sg_dma_len(sgbuf))) {
+ sgbuf = NULL;
+ buf->result = VIDEOBUF_ERROR;
+ }
+ }
+ buf->sgbuf = sgbuf;
+ } else {
+ /* sglist is fresh, initialize it before using */
+ struct videobuf_dmabuf *dma = videobuf_to_dma(&buf->vb);
+
+ sgbuf = dma->sglist;
+ if (!(WARN_ON(!sgbuf))) {
+ buf->sgbuf = sgbuf;
+ buf->sgcount = 0;
+ buf->bytes_left = buf->vb.size;
+ buf->result = VIDEOBUF_DONE;
+ }
+ }
+ if (sgbuf)
+ /*
+ * Put our next sgbuf parameters (address, size)
+ * into the DMA programming register set.
+ */
+ set_dma_dest_params(dma_ch, buf, OMAP1_CAM_DMA_SG);
+
+ return sgbuf;
+}
+
+static void start_capture(struct omap1_cam_dev *pcdev)
+{
+ struct omap1_cam_buf *buf = pcdev->active;
+ u32 ctrlclock = CAM_READ_CACHE(pcdev, CTRLCLOCK);
+ u32 mode = CAM_READ_CACHE(pcdev, MODE) & ~EN_V_DOWN;
+
+ if (WARN_ON(!buf))
+ return;
+
+ /*
+ * Enable start of frame interrupt, which we will use for activating
+ * our end of frame watchdog when capture actually starts.
+ */
+ mode |= EN_V_UP;
+
+ if (unlikely(ctrlclock & LCLK_EN))
+ /* stop pixel clock before FIFO reset */
+ CAM_WRITE(pcdev, CTRLCLOCK, ctrlclock & ~LCLK_EN);
+ /* reset FIFO */
+ CAM_WRITE(pcdev, MODE, mode | RAZ_FIFO);
+
+ omap_start_dma(pcdev->dma_ch);
+
+ if (pcdev->vb_mode == OMAP1_CAM_DMA_SG) {
+ /*
+ * In SG mode, it's a good moment for fetching next sgbuf
+ * from the current sglist and, if available, already putting
+ * its parameters into the DMA programming register set.
+ */
+ try_next_sgbuf(pcdev->dma_ch, buf);
+ }
+
+ /* (re)enable pixel clock */
+ CAM_WRITE(pcdev, CTRLCLOCK, ctrlclock | LCLK_EN);
+ /* release FIFO reset */
+ CAM_WRITE(pcdev, MODE, mode);
+}
+
+static void suspend_capture(struct omap1_cam_dev *pcdev)
+{
+ u32 ctrlclock = CAM_READ_CACHE(pcdev, CTRLCLOCK);
+
+ CAM_WRITE(pcdev, CTRLCLOCK, ctrlclock & ~LCLK_EN);
+ omap_stop_dma(pcdev->dma_ch);
+}
+
+static void disable_capture(struct omap1_cam_dev *pcdev)
+{
+ u32 mode = CAM_READ_CACHE(pcdev, MODE);
+
+ CAM_WRITE(pcdev, MODE, mode & ~(IRQ_MASK | DMA));
+}
+
+static void omap1_videobuf_queue(struct videobuf_queue *vq,
+ struct videobuf_buffer *vb)
+{
+ struct soc_camera_device *icd = vq->priv_data;
+ struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent);
+ struct omap1_cam_dev *pcdev = ici->priv;
+ struct omap1_cam_buf *buf;
+ u32 mode;
+
+ list_add_tail(&vb->queue, &pcdev->capture);
+ vb->state = VIDEOBUF_QUEUED;
+
+ if (pcdev->active) {
+ /*
+ * Capture in progress, so don't touch pcdev->ready even if
+ * empty. Since the transfer of the DMA programming register set
+ * content to the DMA working register set is done automatically
+ * by the DMA hardware, this can pretty well happen while we
+ * are keeping the lock here. Leave fetching it from the queue
+ * to be done when a next DMA interrupt occures instead.
+ */
+ return;
+ }
+
+ WARN_ON(pcdev->ready);
+
+ buf = prepare_next_vb(pcdev);
+ if (WARN_ON(!buf))
+ return;
+
+ pcdev->active = buf;
+ pcdev->ready = NULL;
+
+ dev_dbg(icd->dev.parent,
+ "%s: capture not active, setup FIFO, start DMA\n", __func__);
+ mode = CAM_READ_CACHE(pcdev, MODE) & ~THRESHOLD_MASK;
+ mode |= THRESHOLD_LEVEL(pcdev->vb_mode) << THRESHOLD_SHIFT;
+ CAM_WRITE(pcdev, MODE, mode | EN_FIFO_FULL | DMA);
+
+ if (pcdev->vb_mode == OMAP1_CAM_DMA_SG) {
+ /*
+ * In SG mode, the above prepare_next_vb() didn't actually
+ * put anything into the DMA programming register set,
+ * so we have to do it now, before activating DMA.
+ */
+ try_next_sgbuf(pcdev->dma_ch, buf);
+ }
+
+ start_capture(pcdev);
+}
+
+static void omap1_videobuf_release(struct videobuf_queue *vq,
+ struct videobuf_buffer *vb)
+{
+ struct omap1_cam_buf *buf =
+ container_of(vb, struct omap1_cam_buf, vb);
+ struct soc_camera_device *icd = vq->priv_data;
+ struct device *dev = icd->dev.parent;
+ struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent);
+ struct omap1_cam_dev *pcdev = ici->priv;
+
+ switch (vb->state) {
+ case VIDEOBUF_DONE:
+ dev_dbg(dev, "%s (done)\n", __func__);
+ break;
+ case VIDEOBUF_ACTIVE:
+ dev_dbg(dev, "%s (active)\n", __func__);
+ break;
+ case VIDEOBUF_QUEUED:
+ dev_dbg(dev, "%s (queued)\n", __func__);
+ break;
+ case VIDEOBUF_PREPARED:
+ dev_dbg(dev, "%s (prepared)\n", __func__);
+ break;
+ default:
+ dev_dbg(dev, "%s (unknown %d)\n", __func__, vb->state);
+ break;
+ }
+
+ free_buffer(vq, buf, pcdev->vb_mode);
+}
+
+static void videobuf_done(struct omap1_cam_dev *pcdev,
+ enum videobuf_state result)
+{
+ struct omap1_cam_buf *buf = pcdev->active;
+ struct videobuf_buffer *vb;
+ struct device *dev = pcdev->icd->dev.parent;
+
+ if (WARN_ON(!buf)) {
+ suspend_capture(pcdev);
+ disable_capture(pcdev);
+ return;
+ }
+
+ if (result == VIDEOBUF_ERROR)
+ suspend_capture(pcdev);
+
+ vb = &buf->vb;
+ if (waitqueue_active(&vb->done)) {
+ if (!pcdev->ready && result != VIDEOBUF_ERROR) {
+ /*
+ * No next buffer has been entered into the DMA
+ * programming register set on time (could be done only
+ * while the previous DMA interurpt was processed, not
+ * later), so the last DMA block, be it a whole buffer
+ * if in CONTIG or its last sgbuf if in SG mode, is
+ * about to be reused by the just autoreinitialized DMA
+ * engine, and overwritten with next frame data. Best we
+ * can do is stopping the capture as soon as possible,
+ * hopefully before the next frame start.
+ */
+ suspend_capture(pcdev);
+ }
+ vb->state = result;
+ do_gettimeofday(&vb->ts);
+ if (result != VIDEOBUF_ERROR)
+ vb->field_count++;
+ wake_up(&vb->done);
+
+ /* shift in next buffer */
+ buf = pcdev->ready;
+ pcdev->active = buf;
+ pcdev->ready = NULL;
+
+ if (!buf) {
+ /*
+ * No next buffer was ready on time (see above), so
+ * indicate error condition to force capture restart or
+ * stop, depending on next buffer already queued or not.
+ */
+ result = VIDEOBUF_ERROR;
+ prepare_next_vb(pcdev);
+
+ buf = pcdev->ready;
+ pcdev->active = buf;
+ pcdev->ready = NULL;
+ }
+ } else if (pcdev->ready) {
+ /*
+ * In both CONTIG and SG mode, the DMA engine has possibly
+ * been already autoreinitialized with the preprogrammed
+ * pcdev->ready buffer. We can either accept this fact
+ * and just swap the buffers, or provoke an error condition
+ * and restart capture. The former seems less intrusive.
+ */
+ dev_dbg(dev, "%s: nobody waiting on videobuf, swap with next\n",
+ __func__);
+ pcdev->active = pcdev->ready;
+
+ if (pcdev->vb_mode == OMAP1_CAM_DMA_SG) {
+ /*
+ * In SG mode, we have to make sure that the buffer we
+ * are putting back into the pcdev->ready is marked
+ * fresh.
+ */
+ buf->sgbuf = NULL;
+ }
+ pcdev->ready = buf;
+
+ buf = pcdev->active;
+ } else {
+ /*
+ * No next buffer has been entered into
+ * the DMA programming register set on time.
+ */
+ if (pcdev->vb_mode == OMAP1_CAM_DMA_CONTIG) {
+ /*
+ * In CONTIG mode, the DMA engine has already been
+ * reinitialized with the current buffer. Best we can do
+ * is not touching it.
+ */
+ dev_dbg(dev,
+ "%s: nobody waiting on videobuf, reuse it\n",
+ __func__);
+ } else {
+ /*
+ * In SG mode, the DMA engine has just been
+ * autoreinitialized with the last sgbuf from the
+ * current list. Restart capture in order to transfer
+ * next frame start into the first sgbuf, not the last
+ * one.
+ */
+ if (result != VIDEOBUF_ERROR) {
+ suspend_capture(pcdev);
+ result = VIDEOBUF_ERROR;
+ }
+ }
+ }
+
+ if (!buf) {
+ dev_dbg(dev, "%s: no more videobufs, stop capture\n", __func__);
+ disable_capture(pcdev);
+ return;
+ }
+
+ if (pcdev->vb_mode == OMAP1_CAM_DMA_CONTIG) {
+ /*
+ * In CONTIG mode, the current buffer parameters had already
+ * been entered into the DMA programming register set while the
+ * buffer was fetched with prepare_next_vb(), they may have also
+ * been transfered into the runtime set and already active if
+ * the DMA still running.
+ */
+ } else {
+ /* In SG mode, extra steps are required */
+ if (result == VIDEOBUF_ERROR)
+ /* make sure we (re)use sglist from start on error */
+ buf->sgbuf = NULL;
+
+ /*
+ * In any case, enter the next sgbuf parameters into the DMA
+ * programming register set. They will be used either during
+ * nearest DMA autoreinitialization or, in case of an error,
+ * on DMA startup below.
+ */
+ try_next_sgbuf(pcdev->dma_ch, buf);
+ }
+
+ if (result == VIDEOBUF_ERROR) {
+ dev_dbg(dev, "%s: videobuf error; reset FIFO, restart DMA\n",
+ __func__);
+ start_capture(pcdev);
+ /*
+ * In SG mode, the above also resulted in the next sgbuf
+ * parameters being entered into the DMA programming register
+ * set, making them ready for next DMA autoreinitialization.
+ */
+ }
+
+ /*
+ * Finally, try fetching next buffer.
+ * In CONTIG mode, it will also enter it into the DMA programming
+ * register set, making it ready for next DMA autoreinitialization.
+ */
+ prepare_next_vb(pcdev);
+}
+
+static void dma_isr(int channel, unsigned short status, void *data)
+{
+ struct omap1_cam_dev *pcdev = data;
+ struct omap1_cam_buf *buf = pcdev->active;
+ unsigned long flags;
+
+ spin_lock_irqsave(&pcdev->lock, flags);
+
+ if (WARN_ON(!buf)) {
+ suspend_capture(pcdev);
+ disable_capture(pcdev);
+ goto out;
+ }
+
+ if (pcdev->vb_mode == OMAP1_CAM_DMA_CONTIG) {
+ /*
+ * In CONTIG mode, assume we have just managed to collect the
+ * whole frame, hopefully before our end of frame watchdog is
+ * triggered. Then, all we have to do is disabling the watchdog
+ * for this frame, and calling videobuf_done() with success
+ * indicated.
+ */
+ CAM_WRITE(pcdev, MODE,
+ CAM_READ_CACHE(pcdev, MODE) & ~EN_V_DOWN);
+ videobuf_done(pcdev, VIDEOBUF_DONE);
+ } else {
+ /*
+ * In SG mode, we have to process every sgbuf from the current
+ * sglist, one after another.
+ */
+ if (buf->sgbuf) {
+ /*
+ * Current sglist not completed yet, try fetching next
+ * sgbuf, hopefully putting it into the DMA programming
+ * register set, making it ready for next DMA
+ * autoreinitialization.
+ */
+ try_next_sgbuf(pcdev->dma_ch, buf);
+ if (buf->sgbuf)
+ goto out;
+
+ /*
+ * No more sgbufs left in the current sglist. This
+ * doesn't mean that the whole videobuffer is already
+ * complete, but only that the last sgbuf from the
+ * current sglist is about to be filled. It will be
+ * ready on next DMA interrupt, signalled with the
+ * buf->sgbuf set back to NULL.
+ */
+ if (buf->result != VIDEOBUF_ERROR) {
+ /*
+ * Video frame collected without errors so far,
+ * we can prepare for collecting a next one
+ * as soon as DMA gets autoreinitialized
+ * after the current (last) sgbuf is completed.
+ */
+ buf = prepare_next_vb(pcdev);
+ if (!buf)
+ goto out;
+
+ try_next_sgbuf(pcdev->dma_ch, buf);
+ goto out;
+ }
+ }
+ /* end of videobuf */
+ videobuf_done(pcdev, buf->result);
+ }
+
+out:
+ spin_unlock_irqrestore(&pcdev->lock, flags);
+}
+
+static irqreturn_t cam_isr(int irq, void *data)
+{
+ struct omap1_cam_dev *pcdev = data;
+ struct device *dev = pcdev->icd->dev.parent;
+ struct omap1_cam_buf *buf = pcdev->active;
+ u32 it_status;
+ unsigned long flags;
+
+ it_status = CAM_READ(pcdev, IT_STATUS);
+ if (!it_status)
+ return IRQ_NONE;
+
+ spin_lock_irqsave(&pcdev->lock, flags);
+
+ if (WARN_ON(!buf)) {
+ dev_warn(dev, "%s: unhandled camera interrupt, status == "
+ "%#x\n", __func__, it_status);
+ suspend_capture(pcdev);
+ disable_capture(pcdev);
+ goto out;
+ }
+
+ if (unlikely(it_status & FIFO_FULL)) {
+ dev_warn(dev, "%s: FIFO overflow\n", __func__);
+
+ } else if (it_status & V_DOWN) {
+ /* end of video frame watchdog */
+ if (pcdev->vb_mode == OMAP1_CAM_DMA_CONTIG) {
+ /*
+ * In CONTIG mode, the watchdog is disabled with
+ * successful DMA end of block interrupt, and reenabled
+ * on next frame start. If we get here, there is nothing
+ * to check, we must be out of sync.
+ */
+ } else {
+ if (buf->sgcount == 2) {
+ /*
+ * If exactly 2 sgbufs from the next sglist have
+ * been programmed into the DMA engine (the
+ * frist one already transfered into the DMA
+ * runtime register set, the second one still
+ * in the programming set), then we are in sync.
+ */
+ goto out;
+ }
+ }
+ dev_notice(dev, "%s: unexpected end of video frame\n",
+ __func__);
+
+ } else if (it_status & V_UP) {
+ u32 mode;
+
+ if (pcdev->vb_mode == OMAP1_CAM_DMA_CONTIG) {
+ /*
+ * In CONTIG mode, we need this interrupt every frame
+ * in oredr to reenable our end of frame watchdog.
+ */
+ mode = CAM_READ_CACHE(pcdev, MODE);
+ } else {
+ /*
+ * In SG mode, the below enabled end of frame watchdog
+ * is kept on permanently, so we can turn this one shot
+ * setup off.
+ */
+ mode = CAM_READ_CACHE(pcdev, MODE) & ~EN_V_UP;
+ }
+
+ if (!(mode & EN_V_DOWN)) {
+ /* (re)enable end of frame watchdog interrupt */
+ mode |= EN_V_DOWN;
+ }
+ CAM_WRITE(pcdev, MODE, mode);
+ goto out;
+
+ } else {
+ dev_warn(dev, "%s: unhandled camera interrupt, status == %#x\n",
+ __func__, it_status);
+ goto out;
+ }
+
+ videobuf_done(pcdev, VIDEOBUF_ERROR);
+out:
+ spin_unlock_irqrestore(&pcdev->lock, flags);
+ return IRQ_HANDLED;
+}
+
+static struct videobuf_queue_ops omap1_videobuf_ops = {
+ .buf_setup = omap1_videobuf_setup,
+ .buf_prepare = omap1_videobuf_prepare,
+ .buf_queue = omap1_videobuf_queue,
+ .buf_release = omap1_videobuf_release,
+};
+
+
+/*
+ * SOC Camera host operations
+ */
+
+static void sensor_reset(struct omap1_cam_dev *pcdev, bool reset)
+{
+ /* apply/release camera sensor reset if requested by platform data */
+ if (pcdev->pflags & OMAP1_CAMERA_RST_HIGH)
+ CAM_WRITE(pcdev, GPIO, reset);
+ else if (pcdev->pflags & OMAP1_CAMERA_RST_LOW)
+ CAM_WRITE(pcdev, GPIO, !reset);
+}
+
+/*
+ * The following two functions absolutely depend on the fact, that
+ * there can be only one camera on OMAP1 camera sensor interface
+ */
+static int omap1_cam_add_device(struct soc_camera_device *icd)
+{
+ struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent);
+ struct omap1_cam_dev *pcdev = ici->priv;
+ u32 ctrlclock;
+
+ if (pcdev->icd)
+ return -EBUSY;
+
+ clk_enable(pcdev->clk);
+
+ /* setup sensor clock */
+ ctrlclock = CAM_READ(pcdev, CTRLCLOCK);
+ ctrlclock &= ~(CAMEXCLK_EN | MCLK_EN | DPLL_EN);
+ CAM_WRITE(pcdev, CTRLCLOCK, ctrlclock);
+
+ ctrlclock &= ~FOSCMOD_MASK;
+ switch (pcdev->camexclk) {
+ case 6000000:
+ ctrlclock |= CAMEXCLK_EN | FOSCMOD_6MHz;
+ break;
+ case 8000000:
+ ctrlclock |= CAMEXCLK_EN | FOSCMOD_8MHz | DPLL_EN;
+ break;
+ case 9600000:
+ ctrlclock |= CAMEXCLK_EN | FOSCMOD_9_6MHz | DPLL_EN;
+ break;
+ case 12000000:
+ ctrlclock |= CAMEXCLK_EN | FOSCMOD_12MHz;
+ break;
+ case 24000000:
+ ctrlclock |= CAMEXCLK_EN | FOSCMOD_24MHz | DPLL_EN;
+ default:
+ break;
+ }
+ CAM_WRITE(pcdev, CTRLCLOCK, ctrlclock & ~DPLL_EN);
+
+ /* enable internal clock */
+ ctrlclock |= MCLK_EN;
+ CAM_WRITE(pcdev, CTRLCLOCK, ctrlclock);
+
+ sensor_reset(pcdev, false);
+
+ pcdev->icd = icd;
+
+ dev_dbg(icd->dev.parent, "OMAP1 Camera driver attached to camera %d\n",
+ icd->devnum);
+ return 0;
+}
+
+static void omap1_cam_remove_device(struct soc_camera_device *icd)
+{
+ struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent);
+ struct omap1_cam_dev *pcdev = ici->priv;
+ u32 ctrlclock;
+
+ BUG_ON(icd != pcdev->icd);
+
+ suspend_capture(pcdev);
+ disable_capture(pcdev);
+
+ sensor_reset(pcdev, true);
+
+ /* disable and release system clocks */
+ ctrlclock = CAM_READ_CACHE(pcdev, CTRLCLOCK);
+ ctrlclock &= ~(MCLK_EN | DPLL_EN | CAMEXCLK_EN);
+ CAM_WRITE(pcdev, CTRLCLOCK, ctrlclock);
+
+ ctrlclock = (ctrlclock & ~FOSCMOD_MASK) | FOSCMOD_12MHz;
+ CAM_WRITE(pcdev, CTRLCLOCK, ctrlclock);
+ CAM_WRITE(pcdev, CTRLCLOCK, ctrlclock | MCLK_EN);
+
+ CAM_WRITE(pcdev, CTRLCLOCK, ctrlclock & ~MCLK_EN);
+
+ clk_disable(pcdev->clk);
+
+ pcdev->icd = NULL;
+
+ dev_dbg(icd->dev.parent,
+ "OMAP1 Camera driver detached from camera %d\n", icd->devnum);
+}
+
+/* Duplicate standard formats based on host capability of byte swapping */
+static const struct soc_mbus_pixelfmt omap1_cam_formats[] = {
+ [V4L2_MBUS_FMT_UYVY8_2X8] = {
+ .fourcc = V4L2_PIX_FMT_YUYV,
+ .name = "YUYV",
+ .bits_per_sample = 8,
+ .packing = SOC_MBUS_PACKING_2X8_PADHI,
+ .order = SOC_MBUS_ORDER_BE,
+ },
+ [V4L2_MBUS_FMT_VYUY8_2X8] = {
+ .fourcc = V4L2_PIX_FMT_YVYU,
+ .name = "YVYU",
+ .bits_per_sample = 8,
+ .packing = SOC_MBUS_PACKING_2X8_PADHI,
+ .order = SOC_MBUS_ORDER_BE,
+ },
+ [V4L2_MBUS_FMT_YUYV8_2X8] = {
+ .fourcc = V4L2_PIX_FMT_UYVY,
+ .name = "UYVY",
+ .bits_per_sample = 8,
+ .packing = SOC_MBUS_PACKING_2X8_PADHI,
+ .order = SOC_MBUS_ORDER_BE,
+ },
+ [V4L2_MBUS_FMT_YVYU8_2X8] = {
+ .fourcc = V4L2_PIX_FMT_VYUY,
+ .name = "VYUY",
+ .bits_per_sample = 8,
+ .packing = SOC_MBUS_PACKING_2X8_PADHI,
+ .order = SOC_MBUS_ORDER_BE,
+ },
+ [V4L2_MBUS_FMT_RGB555_2X8_PADHI_BE] = {
+ .fourcc = V4L2_PIX_FMT_RGB555,
+ .name = "RGB555",
+ .bits_per_sample = 8,
+ .packing = SOC_MBUS_PACKING_2X8_PADHI,
+ .order = SOC_MBUS_ORDER_BE,
+ },
+ [V4L2_MBUS_FMT_RGB555_2X8_PADHI_LE] = {
+ .fourcc = V4L2_PIX_FMT_RGB555X,
+ .name = "RGB555X",
+ .bits_per_sample = 8,
+ .packing = SOC_MBUS_PACKING_2X8_PADHI,
+ .order = SOC_MBUS_ORDER_BE,
+ },
+ [V4L2_MBUS_FMT_RGB565_2X8_BE] = {
+ .fourcc = V4L2_PIX_FMT_RGB565,
+ .name = "RGB565",
+ .bits_per_sample = 8,
+ .packing = SOC_MBUS_PACKING_2X8_PADHI,
+ .order = SOC_MBUS_ORDER_BE,
+ },
+ [V4L2_MBUS_FMT_RGB565_2X8_LE] = {
+ .fourcc = V4L2_PIX_FMT_RGB565X,
+ .name = "RGB565X",
+ .bits_per_sample = 8,
+ .packing = SOC_MBUS_PACKING_2X8_PADHI,
+ .order = SOC_MBUS_ORDER_BE,
+ },
+};
+
+static int omap1_cam_get_formats(struct soc_camera_device *icd,
+ unsigned int idx, struct soc_camera_format_xlate *xlate)
+{
+ struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
+ struct device *dev = icd->dev.parent;
+ int formats = 0, ret;
+ enum v4l2_mbus_pixelcode code;
+ const struct soc_mbus_pixelfmt *fmt;
+
+ ret = v4l2_subdev_call(sd, video, enum_mbus_fmt, idx, &code);
+ if (ret < 0)
+ /* No more formats */
+ return 0;
+
+ fmt = soc_mbus_get_fmtdesc(code);
+ if (!fmt) {
+ dev_err(dev, "%s: invalid format code #%d: %d\n", __func__,
+ idx, code);
+ return 0;
+ }
+
+ /* Check support for the requested bits-per-sample */
+ if (fmt->bits_per_sample != 8)
+ return 0;
+
+ switch (code) {
+ case V4L2_MBUS_FMT_YUYV8_2X8:
+ case V4L2_MBUS_FMT_YVYU8_2X8:
+ case V4L2_MBUS_FMT_UYVY8_2X8:
+ case V4L2_MBUS_FMT_VYUY8_2X8:
+ case V4L2_MBUS_FMT_RGB555_2X8_PADHI_BE:
+ case V4L2_MBUS_FMT_RGB555_2X8_PADHI_LE:
+ case V4L2_MBUS_FMT_RGB565_2X8_BE:
+ case V4L2_MBUS_FMT_RGB565_2X8_LE:
+ formats++;
+ if (xlate) {
+ xlate->host_fmt = &omap1_cam_formats[code];
+ xlate->code = code;
+ xlate++;
+ dev_dbg(dev, "%s: providing format %s "
+ "as byte swapped code #%d\n", __func__,
+ omap1_cam_formats[code].name, code);
+ }
+ default:
+ if (xlate)
+ dev_dbg(dev, "%s: providing format %s "
+ "in pass-through mode\n", __func__,
+ fmt->name);
+ }
+ formats++;
+ if (xlate) {
+ xlate->host_fmt = fmt;
+ xlate->code = code;
+ xlate++;
+ }
+
+ return formats;
+}
+
+static bool is_dma_aligned(s32 bytes_per_line, unsigned int height,
+ enum omap1_cam_vb_mode vb_mode)
+{
+ int size = bytes_per_line * height;
+
+ return IS_ALIGNED(bytes_per_line, DMA_ELEMENT_SIZE) &&
+ IS_ALIGNED(size, DMA_FRAME_SIZE(vb_mode) * DMA_ELEMENT_SIZE);
+}
+
+static int dma_align(int *width, int *height,
+ const struct soc_mbus_pixelfmt *fmt,
+ enum omap1_cam_vb_mode vb_mode, bool enlarge)
+{
+ s32 bytes_per_line = soc_mbus_bytes_per_line(*width, fmt);
+
+ if (bytes_per_line < 0)
+ return bytes_per_line;
+
+ if (!is_dma_aligned(bytes_per_line, *height, vb_mode)) {
+ unsigned int pxalign = __fls(bytes_per_line / *width);
+ unsigned int salign = DMA_FRAME_SHIFT(vb_mode) +
+ DMA_ELEMENT_SHIFT - pxalign;
+ unsigned int incr = enlarge << salign;
+
+ v4l_bound_align_image(width, 1, *width + incr, 0,
+ height, 1, *height + incr, 0, salign);
+ return 0;
+ }
+ return 1;
+}
+
+#define subdev_call_with_sense(pcdev, dev, icd, sd, function, args...) \
+({ \
+ struct soc_camera_sense sense = { \
+ .master_clock = pcdev->camexclk, \
+ .pixel_clock_max = 0, \
+ }; \
+ int __ret; \
+ \
+ if (pcdev->pdata) \
+ sense.pixel_clock_max = pcdev->pdata->lclk_khz_max * 1000; \
+ icd->sense = &sense; \
+ __ret = v4l2_subdev_call(sd, video, function, ##args); \
+ icd->sense = NULL; \
+ \
+ if (sense.flags & SOCAM_SENSE_PCLK_CHANGED) { \
+ if (sense.pixel_clock > sense.pixel_clock_max) { \
+ dev_err(dev, "%s: pixel clock %lu " \
+ "set by the camera too high!\n", \
+ __func__, sense.pixel_clock); \
+ __ret = -EINVAL; \
+ } \
+ } \
+ __ret; \
+})
+
+static int set_mbus_format(struct omap1_cam_dev *pcdev, struct device *dev,
+ struct soc_camera_device *icd, struct v4l2_subdev *sd,
+ struct v4l2_mbus_framefmt *mf,
+ const struct soc_camera_format_xlate *xlate)
+{
+ s32 bytes_per_line;
+ int ret = subdev_call_with_sense(pcdev, dev, icd, sd, s_mbus_fmt, mf);
+
+ if (ret < 0) {
+ dev_err(dev, "%s: s_mbus_fmt failed\n", __func__);
+ return ret;
+ }
+
+ if (mf->code != xlate->code) {
+ dev_err(dev, "%s: unexpected pixel code change\n", __func__);
+ return -EINVAL;
+ }
+
+ bytes_per_line = soc_mbus_bytes_per_line(mf->width, xlate->host_fmt);
+ if (bytes_per_line < 0) {
+ dev_err(dev, "%s: soc_mbus_bytes_per_line() failed\n",
+ __func__);
+ return bytes_per_line;
+ }
+
+ if (!is_dma_aligned(bytes_per_line, mf->height, pcdev->vb_mode)) {
+ dev_err(dev, "%s: resulting geometry %ux%u not DMA aligned\n",
+ __func__, mf->width, mf->height);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int omap1_cam_set_crop(struct soc_camera_device *icd,
+ struct v4l2_crop *crop)
+{
+ struct v4l2_rect *rect = &crop->c;
+ const struct soc_camera_format_xlate *xlate = icd->current_fmt;
+ struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
+ struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent);
+ struct omap1_cam_dev *pcdev = ici->priv;
+ struct device *dev = icd->dev.parent;
+ struct v4l2_mbus_framefmt mf;
+ int ret;
+
+ ret = subdev_call_with_sense(pcdev, dev, icd, sd, s_crop, crop);
+ if (ret < 0) {
+ dev_warn(dev, "%s: failed to crop to %ux%u@%u:%u\n", __func__,
+ rect->width, rect->height, rect->left, rect->top);
+ return ret;
+ }
+
+ ret = v4l2_subdev_call(sd, video, g_mbus_fmt, &mf);
+ if (ret < 0) {
+ dev_warn(dev, "%s: failed to fetch current format\n", __func__);
+ return ret;
+ }
+
+ ret = dma_align(&mf.width, &mf.height, xlate->host_fmt, pcdev->vb_mode,
+ false);
+ if (ret < 0) {
+ dev_err(dev, "%s: failed to align %ux%u %s with DMA\n",
+ __func__, mf.width, mf.height,
+ xlate->host_fmt->name);
+ return ret;
+ }
+
+ if (!ret) {
+ /* sensor returned geometry not DMA aligned, trying to fix */
+ ret = set_mbus_format(pcdev, dev, icd, sd, &mf, xlate);
+ if (ret < 0) {
+ dev_err(dev, "%s: failed to set format\n", __func__);
+ return ret;
+ }
+ }
+
+ icd->user_width = mf.width;
+ icd->user_height = mf.height;
+
+ return 0;
+}
+
+static int omap1_cam_set_fmt(struct soc_camera_device *icd,
+ struct v4l2_format *f)
+{
+ struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
+ const struct soc_camera_format_xlate *xlate;
+ struct device *dev = icd->dev.parent;
+ struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent);
+ struct omap1_cam_dev *pcdev = ici->priv;
+ struct v4l2_pix_format *pix = &f->fmt.pix;
+ struct v4l2_mbus_framefmt mf;
+ int ret;
+
+ xlate = soc_camera_xlate_by_fourcc(icd, pix->pixelformat);
+ if (!xlate) {
+ dev_warn(dev, "%s: format %#x not found\n", __func__,
+ pix->pixelformat);
+ return -EINVAL;
+ }
+
+ mf.width = pix->width;
+ mf.height = pix->height;
+ mf.field = pix->field;
+ mf.colorspace = pix->colorspace;
+ mf.code = xlate->code;
+
+ ret = dma_align(&mf.width, &mf.height, xlate->host_fmt, pcdev->vb_mode,
+ true);
+ if (ret < 0) {
+ dev_err(dev, "%s: failed to align %ux%u %s with DMA\n",
+ __func__, pix->width, pix->height,
+ xlate->host_fmt->name);
+ return ret;
+ }
+
+ ret = set_mbus_format(pcdev, dev, icd, sd, &mf, xlate);
+ if (ret < 0) {
+ dev_err(dev, "%s: failed to set format\n", __func__);
+ return ret;
+ }
+
+ pix->width = mf.width;
+ pix->height = mf.height;
+ pix->field = mf.field;
+ pix->colorspace = mf.colorspace;
+ icd->current_fmt = xlate;
+
+ return 0;
+}
+
+static int omap1_cam_try_fmt(struct soc_camera_device *icd,
+ struct v4l2_format *f)
+{
+ struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
+ const struct soc_camera_format_xlate *xlate;
+ struct v4l2_pix_format *pix = &f->fmt.pix;
+ struct v4l2_mbus_framefmt mf;
+ int ret;
+ /* TODO: limit to mx1 hardware capabilities */
+
+ xlate = soc_camera_xlate_by_fourcc(icd, pix->pixelformat);
+ if (!xlate) {
+ dev_warn(icd->dev.parent, "Format %#x not found\n",
+ pix->pixelformat);
+ return -EINVAL;
+ }
+
+ mf.width = pix->width;
+ mf.height = pix->height;
+ mf.field = pix->field;
+ mf.colorspace = pix->colorspace;
+ mf.code = xlate->code;
+
+ /* limit to sensor capabilities */
+ ret = v4l2_subdev_call(sd, video, try_mbus_fmt, &mf);
+ if (ret < 0)
+ return ret;
+
+ pix->width = mf.width;
+ pix->height = mf.height;
+ pix->field = mf.field;
+ pix->colorspace = mf.colorspace;
+
+ return 0;
+}
+
+static bool sg_mode;
+
+/*
+ * Local mmap_mapper wrapper,
+ * used for detecting videobuf-dma-contig buffer allocation failures
+ * and switching to videobuf-dma-sg automatically for future attempts.
+ */
+static int omap1_cam_mmap_mapper(struct videobuf_queue *q,
+ struct videobuf_buffer *buf,
+ struct vm_area_struct *vma)
+{
+ struct soc_camera_device *icd = q->priv_data;
+ struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent);
+ struct omap1_cam_dev *pcdev = ici->priv;
+ int ret;
+
+ ret = pcdev->mmap_mapper(q, buf, vma);
+
+ if (ret == -ENOMEM)
+ sg_mode = true;
+
+ return ret;
+}
+
+static void omap1_cam_init_videobuf(struct videobuf_queue *q,
+ struct soc_camera_device *icd)
+{
+ struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent);
+ struct omap1_cam_dev *pcdev = ici->priv;
+
+ if (!sg_mode)
+ videobuf_queue_dma_contig_init(q, &omap1_videobuf_ops,
+ icd->dev.parent, &pcdev->lock,
+ V4L2_BUF_TYPE_VIDEO_CAPTURE, V4L2_FIELD_NONE,
+ sizeof(struct omap1_cam_buf), icd, NULL);
+ else
+ videobuf_queue_sg_init(q, &omap1_videobuf_ops,
+ icd->dev.parent, &pcdev->lock,
+ V4L2_BUF_TYPE_VIDEO_CAPTURE, V4L2_FIELD_NONE,
+ sizeof(struct omap1_cam_buf), icd, NULL);
+
+ /* use videobuf mode (auto)selected with the module parameter */
+ pcdev->vb_mode = sg_mode ? OMAP1_CAM_DMA_SG : OMAP1_CAM_DMA_CONTIG;
+
+ /*
+ * Ensure we substitute the videobuf-dma-contig version of the
+ * mmap_mapper() callback with our own wrapper, used for switching
+ * automatically to videobuf-dma-sg on buffer allocation failure.
+ */
+ if (!sg_mode && q->int_ops->mmap_mapper != omap1_cam_mmap_mapper) {
+ pcdev->mmap_mapper = q->int_ops->mmap_mapper;
+ q->int_ops->mmap_mapper = omap1_cam_mmap_mapper;
+ }
+}
+
+static int omap1_cam_reqbufs(struct soc_camera_device *icd,
+ struct v4l2_requestbuffers *p)
+{
+ int i;
+
+ /*
+ * This is for locking debugging only. I removed spinlocks and now I
+ * check whether .prepare is ever called on a linked buffer, or whether
+ * a dma IRQ can occur for an in-work or unlinked buffer. Until now
+ * it hadn't triggered
+ */
+ for (i = 0; i < p->count; i++) {
+ struct omap1_cam_buf *buf = container_of(icd->vb_vidq.bufs[i],
+ struct omap1_cam_buf, vb);
+ buf->inwork = 0;
+ INIT_LIST_HEAD(&buf->vb.queue);
+ }
+
+ return 0;
+}
+
+static int omap1_cam_querycap(struct soc_camera_host *ici,
+ struct v4l2_capability *cap)
+{
+ /* cap->name is set by the friendly caller:-> */
+ strlcpy(cap->card, "OMAP1 Camera", sizeof(cap->card));
+ cap->version = VERSION_CODE;
+ cap->capabilities = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING;
+
+ return 0;
+}
+
+static int omap1_cam_set_bus_param(struct soc_camera_device *icd,
+ __u32 pixfmt)
+{
+ struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent);
+ struct omap1_cam_dev *pcdev = ici->priv;
+ struct device *dev = icd->dev.parent;
+ const struct soc_camera_format_xlate *xlate;
+ const struct soc_mbus_pixelfmt *fmt;
+ unsigned long camera_flags, common_flags;
+ u32 ctrlclock, mode;
+ int ret;
+
+ camera_flags = icd->ops->query_bus_param(icd);
+
+ common_flags = soc_camera_bus_param_compatible(camera_flags,
+ SOCAM_BUS_FLAGS);
+ if (!common_flags)
+ return -EINVAL;
+
+ /* Make choices, possibly based on platform configuration */
+ if ((common_flags & SOCAM_PCLK_SAMPLE_RISING) &&
+ (common_flags & SOCAM_PCLK_SAMPLE_FALLING)) {
+ if (!pcdev->pdata ||
+ pcdev->pdata->flags & OMAP1_CAMERA_LCLK_RISING)
+ common_flags &= ~SOCAM_PCLK_SAMPLE_FALLING;
+ else
+ common_flags &= ~SOCAM_PCLK_SAMPLE_RISING;
+ }
+
+ ret = icd->ops->set_bus_param(icd, common_flags);
+ if (ret < 0)
+ return ret;
+
+ ctrlclock = CAM_READ_CACHE(pcdev, CTRLCLOCK);
+ if (ctrlclock & LCLK_EN)
+ CAM_WRITE(pcdev, CTRLCLOCK, ctrlclock & ~LCLK_EN);
+
+ if (common_flags & SOCAM_PCLK_SAMPLE_RISING) {
+ dev_dbg(dev, "CTRLCLOCK_REG |= POLCLK\n");
+ ctrlclock |= POLCLK;
+ } else {
+ dev_dbg(dev, "CTRLCLOCK_REG &= ~POLCLK\n");
+ ctrlclock &= ~POLCLK;
+ }
+ CAM_WRITE(pcdev, CTRLCLOCK, ctrlclock & ~LCLK_EN);
+
+ if (ctrlclock & LCLK_EN)
+ CAM_WRITE(pcdev, CTRLCLOCK, ctrlclock);
+
+ /* select bus endianess */
+ xlate = soc_camera_xlate_by_fourcc(icd, pixfmt);
+ fmt = xlate->host_fmt;
+
+ mode = CAM_READ(pcdev, MODE) & ~(RAZ_FIFO | IRQ_MASK | DMA);
+ if (fmt->order == SOC_MBUS_ORDER_LE) {
+ dev_dbg(dev, "MODE_REG &= ~ORDERCAMD\n");
+ CAM_WRITE(pcdev, MODE, mode & ~ORDERCAMD);
+ } else {
+ dev_dbg(dev, "MODE_REG |= ORDERCAMD\n");
+ CAM_WRITE(pcdev, MODE, mode | ORDERCAMD);
+ }
+
+ return 0;
+}
+
+static unsigned int omap1_cam_poll(struct file *file, poll_table *pt)
+{
+ struct soc_camera_device *icd = file->private_data;
+ struct omap1_cam_buf *buf;
+
+ buf = list_entry(icd->vb_vidq.stream.next, struct omap1_cam_buf,
+ vb.stream);
+
+ poll_wait(file, &buf->vb.done, pt);
+
+ if (buf->vb.state == VIDEOBUF_DONE ||
+ buf->vb.state == VIDEOBUF_ERROR)
+ return POLLIN | POLLRDNORM;
+
+ return 0;
+}
+
+static struct soc_camera_host_ops omap1_host_ops = {
+ .owner = THIS_MODULE,
+ .add = omap1_cam_add_device,
+ .remove = omap1_cam_remove_device,
+ .get_formats = omap1_cam_get_formats,
+ .set_crop = omap1_cam_set_crop,
+ .set_fmt = omap1_cam_set_fmt,
+ .try_fmt = omap1_cam_try_fmt,
+ .init_videobuf = omap1_cam_init_videobuf,
+ .reqbufs = omap1_cam_reqbufs,
+ .querycap = omap1_cam_querycap,
+ .set_bus_param = omap1_cam_set_bus_param,
+ .poll = omap1_cam_poll,
+};
+
+static int __init omap1_cam_probe(struct platform_device *pdev)
+{
+ struct omap1_cam_dev *pcdev;
+ struct resource *res;
+ struct clk *clk;
+ void __iomem *base;
+ unsigned int irq;
+ int err = 0;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ irq = platform_get_irq(pdev, 0);
+ if (!res || (int)irq <= 0) {
+ err = -ENODEV;
+ goto exit;
+ }
+
+ clk = clk_get(&pdev->dev, "armper_ck");
+ if (IS_ERR(clk)) {
+ err = PTR_ERR(clk);
+ goto exit;
+ }
+
+ pcdev = kzalloc(sizeof(*pcdev) + resource_size(res), GFP_KERNEL);
+ if (!pcdev) {
+ dev_err(&pdev->dev, "Could not allocate pcdev\n");
+ err = -ENOMEM;
+ goto exit_put_clk;
+ }
+
+ pcdev->res = res;
+ pcdev->clk = clk;
+
+ pcdev->pdata = pdev->dev.platform_data;
+ pcdev->pflags = pcdev->pdata->flags;
+
+ if (pcdev->pdata)
+ pcdev->camexclk = pcdev->pdata->camexclk_khz * 1000;
+
+ switch (pcdev->camexclk) {
+ case 6000000:
+ case 8000000:
+ case 9600000:
+ case 12000000:
+ case 24000000:
+ break;
+ default:
+ dev_warn(&pdev->dev,
+ "Incorrect sensor clock frequency %ld kHz, "
+ "should be one of 0, 6, 8, 9.6, 12 or 24 MHz, "
+ "please correct your platform data\n",
+ pcdev->pdata->camexclk_khz);
+ pcdev->camexclk = 0;
+ case 0:
+ dev_info(&pdev->dev,
+ "Not providing sensor clock\n");
+ }
+
+ INIT_LIST_HEAD(&pcdev->capture);
+ spin_lock_init(&pcdev->lock);
+
+ /*
+ * Request the region.
+ */
+ if (!request_mem_region(res->start, resource_size(res), DRIVER_NAME)) {
+ err = -EBUSY;
+ goto exit_kfree;
+ }
+
+ base = ioremap(res->start, resource_size(res));
+ if (!base) {
+ err = -ENOMEM;
+ goto exit_release;
+ }
+ pcdev->irq = irq;
+ pcdev->base = base;
+
+ sensor_reset(pcdev, true);
+
+ err = omap_request_dma(OMAP_DMA_CAMERA_IF_RX, DRIVER_NAME,
+ dma_isr, (void *)pcdev, &pcdev->dma_ch);
+ if (err < 0) {
+ dev_err(&pdev->dev, "Can't request DMA for OMAP1 Camera\n");
+ err = -EBUSY;
+ goto exit_iounmap;
+ }
+ dev_dbg(&pdev->dev, "got DMA channel %d\n", pcdev->dma_ch);
+
+ /* preconfigure DMA */
+ omap_set_dma_src_params(pcdev->dma_ch, OMAP_DMA_PORT_TIPB,
+ OMAP_DMA_AMODE_CONSTANT, res->start + REG_CAMDATA,
+ 0, 0);
+ omap_set_dma_dest_burst_mode(pcdev->dma_ch, OMAP_DMA_DATA_BURST_4);
+ /* setup DMA autoinitialization */
+ omap_dma_link_lch(pcdev->dma_ch, pcdev->dma_ch);
+
+ err = request_irq(pcdev->irq, cam_isr, 0, DRIVER_NAME, pcdev);
+ if (err) {
+ dev_err(&pdev->dev, "Camera interrupt register failed\n");
+ goto exit_free_dma;
+ }
+
+ pcdev->soc_host.drv_name = DRIVER_NAME;
+ pcdev->soc_host.ops = &omap1_host_ops;
+ pcdev->soc_host.priv = pcdev;
+ pcdev->soc_host.v4l2_dev.dev = &pdev->dev;
+ pcdev->soc_host.nr = pdev->id;
+
+ err = soc_camera_host_register(&pcdev->soc_host);
+ if (err)
+ goto exit_free_irq;
+
+ dev_info(&pdev->dev, "OMAP1 Camera Interface driver loaded\n");
+
+ return 0;
+
+exit_free_irq:
+ free_irq(pcdev->irq, pcdev);
+exit_free_dma:
+ omap_free_dma(pcdev->dma_ch);
+exit_iounmap:
+ iounmap(base);
+exit_release:
+ release_mem_region(res->start, resource_size(res));
+exit_kfree:
+ kfree(pcdev);
+exit_put_clk:
+ clk_put(clk);
+exit:
+ return err;
+}
+
+static int __exit omap1_cam_remove(struct platform_device *pdev)
+{
+ struct soc_camera_host *soc_host = to_soc_camera_host(&pdev->dev);
+ struct omap1_cam_dev *pcdev = container_of(soc_host,
+ struct omap1_cam_dev, soc_host);
+ struct resource *res;
+
+ free_irq(pcdev->irq, pcdev);
+
+ omap_free_dma(pcdev->dma_ch);
+
+ soc_camera_host_unregister(soc_host);
+
+ iounmap(pcdev->base);
+
+ res = pcdev->res;
+ release_mem_region(res->start, resource_size(res));
+
+ kfree(pcdev);
+
+ clk_put(pcdev->clk);
+
+ dev_info(&pdev->dev, "OMAP1 Camera Interface driver unloaded\n");
+
+ return 0;
+}
+
+static struct platform_driver omap1_cam_driver = {
+ .driver = {
+ .name = DRIVER_NAME,
+ },
+ .probe = omap1_cam_probe,
+ .remove = __exit_p(omap1_cam_remove),
+};
+
+static int __init omap1_cam_init(void)
+{
+ return platform_driver_register(&omap1_cam_driver);
+}
+module_init(omap1_cam_init);
+
+static void __exit omap1_cam_exit(void)
+{
+ platform_driver_unregister(&omap1_cam_driver);
+}
+module_exit(omap1_cam_exit);
+
+module_param(sg_mode, bool, 0644);
+MODULE_PARM_DESC(sg_mode, "videobuf mode, 0: dma-contig (default), 1: dma-sg");
+
+MODULE_DESCRIPTION("OMAP1 Camera Interface driver");
+MODULE_AUTHOR("Janusz Krzysztofik <jkrzyszt@tis.icnet.pl>");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:" DRIVER_NAME);
diff --git a/drivers/media/video/omap24xxcam.c b/drivers/media/video/omap24xxcam.c
index 926a5aa6f7f8..378b094aff16 100644
--- a/drivers/media/video/omap24xxcam.c
+++ b/drivers/media/video/omap24xxcam.c
@@ -420,7 +420,7 @@ static void omap24xxcam_vbq_release(struct videobuf_queue *vbq,
struct videobuf_dmabuf *dma = videobuf_to_dma(vb);
/* wait for buffer, especially to get out of the sgdma queue */
- videobuf_waiton(vb, 0, 0);
+ videobuf_waiton(vbq, vb, 0, 0);
if (vb->memory == V4L2_MEMORY_MMAP) {
dma_unmap_sg(vbq->dev, dma->sglist, dma->sglen,
dma->direction);
@@ -1491,7 +1491,7 @@ static int omap24xxcam_open(struct file *file)
videobuf_queue_sg_init(&fh->vbq, &omap24xxcam_vbq_ops, NULL,
&fh->vbq_lock, V4L2_BUF_TYPE_VIDEO_CAPTURE,
V4L2_FIELD_NONE,
- sizeof(struct videobuf_buffer), fh);
+ sizeof(struct videobuf_buffer), fh, NULL);
return 0;
diff --git a/drivers/media/video/ov6650.c b/drivers/media/video/ov6650.c
new file mode 100644
index 000000000000..cf93de988068
--- /dev/null
+++ b/drivers/media/video/ov6650.c
@@ -0,0 +1,1221 @@
+/*
+ * V4L2 SoC Camera driver for OmniVision OV6650 Camera Sensor
+ *
+ * Copyright (C) 2010 Janusz Krzysztofik <jkrzyszt@tis.icnet.pl>
+ *
+ * Based on OmniVision OV96xx Camera Driver
+ * Copyright (C) 2009 Marek Vasut <marek.vasut@gmail.com>
+ *
+ * Based on ov772x camera driver:
+ * Copyright (C) 2008 Renesas Solutions Corp.
+ * Kuninori Morimoto <morimoto.kuninori@renesas.com>
+ *
+ * Based on ov7670 and soc_camera_platform driver,
+ * Copyright 2006-7 Jonathan Corbet <corbet@lwn.net>
+ * Copyright (C) 2008 Magnus Damm
+ * Copyright (C) 2008, Guennadi Liakhovetski <kernel@pengutronix.de>
+ *
+ * Hardware specific bits initialy based on former work by Matt Callow
+ * drivers/media/video/omap/sensor_ov6650.c
+ * Copyright (C) 2006 Matt Callow
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/bitops.h>
+#include <linux/delay.h>
+#include <linux/i2c.h>
+#include <linux/slab.h>
+
+#include <media/soc_camera.h>
+#include <media/v4l2-chip-ident.h>
+
+
+/* Register definitions */
+#define REG_GAIN 0x00 /* range 00 - 3F */
+#define REG_BLUE 0x01
+#define REG_RED 0x02
+#define REG_SAT 0x03 /* [7:4] saturation [0:3] reserved */
+#define REG_HUE 0x04 /* [7:6] rsrvd [5] hue en [4:0] hue */
+
+#define REG_BRT 0x06
+
+#define REG_PIDH 0x0a
+#define REG_PIDL 0x0b
+
+#define REG_AECH 0x10
+#define REG_CLKRC 0x11 /* Data Format and Internal Clock */
+ /* [7:6] Input system clock (MHz)*/
+ /* 00=8, 01=12, 10=16, 11=24 */
+ /* [5:0]: Internal Clock Pre-Scaler */
+#define REG_COMA 0x12 /* [7] Reset */
+#define REG_COMB 0x13
+#define REG_COMC 0x14
+#define REG_COMD 0x15
+#define REG_COML 0x16
+#define REG_HSTRT 0x17
+#define REG_HSTOP 0x18
+#define REG_VSTRT 0x19
+#define REG_VSTOP 0x1a
+#define REG_PSHFT 0x1b
+#define REG_MIDH 0x1c
+#define REG_MIDL 0x1d
+#define REG_HSYNS 0x1e
+#define REG_HSYNE 0x1f
+#define REG_COME 0x20
+#define REG_YOFF 0x21
+#define REG_UOFF 0x22
+#define REG_VOFF 0x23
+#define REG_AEW 0x24
+#define REG_AEB 0x25
+#define REG_COMF 0x26
+#define REG_COMG 0x27
+#define REG_COMH 0x28
+#define REG_COMI 0x29
+
+#define REG_FRARL 0x2b
+#define REG_COMJ 0x2c
+#define REG_COMK 0x2d
+#define REG_AVGY 0x2e
+#define REG_REF0 0x2f
+#define REG_REF1 0x30
+#define REG_REF2 0x31
+#define REG_FRAJH 0x32
+#define REG_FRAJL 0x33
+#define REG_FACT 0x34
+#define REG_L1AEC 0x35
+#define REG_AVGU 0x36
+#define REG_AVGV 0x37
+
+#define REG_SPCB 0x60
+#define REG_SPCC 0x61
+#define REG_GAM1 0x62
+#define REG_GAM2 0x63
+#define REG_GAM3 0x64
+#define REG_SPCD 0x65
+
+#define REG_SPCE 0x68
+#define REG_ADCL 0x69
+
+#define REG_RMCO 0x6c
+#define REG_GMCO 0x6d
+#define REG_BMCO 0x6e
+
+
+/* Register bits, values, etc. */
+#define OV6650_PIDH 0x66 /* high byte of product ID number */
+#define OV6650_PIDL 0x50 /* low byte of product ID number */
+#define OV6650_MIDH 0x7F /* high byte of mfg ID */
+#define OV6650_MIDL 0xA2 /* low byte of mfg ID */
+
+#define DEF_GAIN 0x00
+#define DEF_BLUE 0x80
+#define DEF_RED 0x80
+
+#define SAT_SHIFT 4
+#define SAT_MASK (0xf << SAT_SHIFT)
+#define SET_SAT(x) (((x) << SAT_SHIFT) & SAT_MASK)
+
+#define HUE_EN BIT(5)
+#define HUE_MASK 0x1f
+#define DEF_HUE 0x10
+#define SET_HUE(x) (HUE_EN | ((x) & HUE_MASK))
+
+#define DEF_AECH 0x4D
+
+#define CLKRC_6MHz 0x00
+#define CLKRC_12MHz 0x40
+#define CLKRC_16MHz 0x80
+#define CLKRC_24MHz 0xc0
+#define CLKRC_DIV_MASK 0x3f
+#define GET_CLKRC_DIV(x) (((x) & CLKRC_DIV_MASK) + 1)
+
+#define COMA_RESET BIT(7)
+#define COMA_QCIF BIT(5)
+#define COMA_RAW_RGB BIT(4)
+#define COMA_RGB BIT(3)
+#define COMA_BW BIT(2)
+#define COMA_WORD_SWAP BIT(1)
+#define COMA_BYTE_SWAP BIT(0)
+#define DEF_COMA 0x00
+
+#define COMB_FLIP_V BIT(7)
+#define COMB_FLIP_H BIT(5)
+#define COMB_BAND_FILTER BIT(4)
+#define COMB_AWB BIT(2)
+#define COMB_AGC BIT(1)
+#define COMB_AEC BIT(0)
+#define DEF_COMB 0x5f
+
+#define COML_ONE_CHANNEL BIT(7)
+
+#define DEF_HSTRT 0x24
+#define DEF_HSTOP 0xd4
+#define DEF_VSTRT 0x04
+#define DEF_VSTOP 0x94
+
+#define COMF_HREF_LOW BIT(4)
+
+#define COMJ_PCLK_RISING BIT(4)
+#define COMJ_VSYNC_HIGH BIT(0)
+
+/* supported resolutions */
+#define W_QCIF (DEF_HSTOP - DEF_HSTRT)
+#define W_CIF (W_QCIF << 1)
+#define H_QCIF (DEF_VSTOP - DEF_VSTRT)
+#define H_CIF (H_QCIF << 1)
+
+#define FRAME_RATE_MAX 30
+
+
+struct ov6650_reg {
+ u8 reg;
+ u8 val;
+};
+
+struct ov6650 {
+ struct v4l2_subdev subdev;
+
+ int gain;
+ int blue;
+ int red;
+ int saturation;
+ int hue;
+ int brightness;
+ int exposure;
+ int gamma;
+ int aec;
+ bool vflip;
+ bool hflip;
+ bool awb;
+ bool agc;
+ bool half_scale; /* scale down output by 2 */
+ struct v4l2_rect rect; /* sensor cropping window */
+ unsigned long pclk_limit; /* from host */
+ unsigned long pclk_max; /* from resolution and format */
+ struct v4l2_fract tpf; /* as requested with s_parm */
+ enum v4l2_mbus_pixelcode code;
+ enum v4l2_colorspace colorspace;
+};
+
+
+static enum v4l2_mbus_pixelcode ov6650_codes[] = {
+ V4L2_MBUS_FMT_YUYV8_2X8,
+ V4L2_MBUS_FMT_UYVY8_2X8,
+ V4L2_MBUS_FMT_YVYU8_2X8,
+ V4L2_MBUS_FMT_VYUY8_2X8,
+ V4L2_MBUS_FMT_SBGGR8_1X8,
+ V4L2_MBUS_FMT_GREY8_1X8,
+};
+
+static const struct v4l2_queryctrl ov6650_controls[] = {
+ {
+ .id = V4L2_CID_AUTOGAIN,
+ .type = V4L2_CTRL_TYPE_BOOLEAN,
+ .name = "AGC",
+ .minimum = 0,
+ .maximum = 1,
+ .step = 1,
+ .default_value = 1,
+ },
+ {
+ .id = V4L2_CID_GAIN,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "Gain",
+ .minimum = 0,
+ .maximum = 0x3f,
+ .step = 1,
+ .default_value = DEF_GAIN,
+ },
+ {
+ .id = V4L2_CID_AUTO_WHITE_BALANCE,
+ .type = V4L2_CTRL_TYPE_BOOLEAN,
+ .name = "AWB",
+ .minimum = 0,
+ .maximum = 1,
+ .step = 1,
+ .default_value = 1,
+ },
+ {
+ .id = V4L2_CID_BLUE_BALANCE,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "Blue",
+ .minimum = 0,
+ .maximum = 0xff,
+ .step = 1,
+ .default_value = DEF_BLUE,
+ },
+ {
+ .id = V4L2_CID_RED_BALANCE,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "Red",
+ .minimum = 0,
+ .maximum = 0xff,
+ .step = 1,
+ .default_value = DEF_RED,
+ },
+ {
+ .id = V4L2_CID_SATURATION,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "Saturation",
+ .minimum = 0,
+ .maximum = 0xf,
+ .step = 1,
+ .default_value = 0x8,
+ },
+ {
+ .id = V4L2_CID_HUE,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "Hue",
+ .minimum = 0,
+ .maximum = HUE_MASK,
+ .step = 1,
+ .default_value = DEF_HUE,
+ },
+ {
+ .id = V4L2_CID_BRIGHTNESS,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "Brightness",
+ .minimum = 0,
+ .maximum = 0xff,
+ .step = 1,
+ .default_value = 0x80,
+ },
+ {
+ .id = V4L2_CID_EXPOSURE_AUTO,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "AEC",
+ .minimum = 0,
+ .maximum = 3,
+ .step = 1,
+ .default_value = 0,
+ },
+ {
+ .id = V4L2_CID_EXPOSURE,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "Exposure",
+ .minimum = 0,
+ .maximum = 0xff,
+ .step = 1,
+ .default_value = DEF_AECH,
+ },
+ {
+ .id = V4L2_CID_GAMMA,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "Gamma",
+ .minimum = 0,
+ .maximum = 0xff,
+ .step = 1,
+ .default_value = 0x12,
+ },
+ {
+ .id = V4L2_CID_VFLIP,
+ .type = V4L2_CTRL_TYPE_BOOLEAN,
+ .name = "Flip Vertically",
+ .minimum = 0,
+ .maximum = 1,
+ .step = 1,
+ .default_value = 0,
+ },
+ {
+ .id = V4L2_CID_HFLIP,
+ .type = V4L2_CTRL_TYPE_BOOLEAN,
+ .name = "Flip Horizontally",
+ .minimum = 0,
+ .maximum = 1,
+ .step = 1,
+ .default_value = 0,
+ },
+};
+
+/* read a register */
+static int ov6650_reg_read(struct i2c_client *client, u8 reg, u8 *val)
+{
+ int ret;
+ u8 data = reg;
+ struct i2c_msg msg = {
+ .addr = client->addr,
+ .flags = 0,
+ .len = 1,
+ .buf = &data,
+ };
+
+ ret = i2c_transfer(client->adapter, &msg, 1);
+ if (ret < 0)
+ goto err;
+
+ msg.flags = I2C_M_RD;
+ ret = i2c_transfer(client->adapter, &msg, 1);
+ if (ret < 0)
+ goto err;
+
+ *val = data;
+ return 0;
+
+err:
+ dev_err(&client->dev, "Failed reading register 0x%02x!\n", reg);
+ return ret;
+}
+
+/* write a register */
+static int ov6650_reg_write(struct i2c_client *client, u8 reg, u8 val)
+{
+ int ret;
+ unsigned char data[2] = { reg, val };
+ struct i2c_msg msg = {
+ .addr = client->addr,
+ .flags = 0,
+ .len = 2,
+ .buf = data,
+ };
+
+ ret = i2c_transfer(client->adapter, &msg, 1);
+ udelay(100);
+
+ if (ret < 0) {
+ dev_err(&client->dev, "Failed writing register 0x%02x!\n", reg);
+ return ret;
+ }
+ return 0;
+}
+
+
+/* Read a register, alter its bits, write it back */
+static int ov6650_reg_rmw(struct i2c_client *client, u8 reg, u8 set, u8 mask)
+{
+ u8 val;
+ int ret;
+
+ ret = ov6650_reg_read(client, reg, &val);
+ if (ret) {
+ dev_err(&client->dev,
+ "[Read]-Modify-Write of register 0x%02x failed!\n",
+ reg);
+ return ret;
+ }
+
+ val &= ~mask;
+ val |= set;
+
+ ret = ov6650_reg_write(client, reg, val);
+ if (ret)
+ dev_err(&client->dev,
+ "Read-Modify-[Write] of register 0x%02x failed!\n",
+ reg);
+
+ return ret;
+}
+
+static struct ov6650 *to_ov6650(const struct i2c_client *client)
+{
+ return container_of(i2c_get_clientdata(client), struct ov6650, subdev);
+}
+
+/* Start/Stop streaming from the device */
+static int ov6650_s_stream(struct v4l2_subdev *sd, int enable)
+{
+ return 0;
+}
+
+/* Alter bus settings on camera side */
+static int ov6650_set_bus_param(struct soc_camera_device *icd,
+ unsigned long flags)
+{
+ struct soc_camera_link *icl = to_soc_camera_link(icd);
+ struct i2c_client *client = to_i2c_client(to_soc_camera_control(icd));
+ int ret;
+
+ flags = soc_camera_apply_sensor_flags(icl, flags);
+
+ if (flags & SOCAM_PCLK_SAMPLE_RISING)
+ ret = ov6650_reg_rmw(client, REG_COMJ, COMJ_PCLK_RISING, 0);
+ else
+ ret = ov6650_reg_rmw(client, REG_COMJ, 0, COMJ_PCLK_RISING);
+ if (ret)
+ return ret;
+
+ if (flags & SOCAM_HSYNC_ACTIVE_LOW)
+ ret = ov6650_reg_rmw(client, REG_COMF, COMF_HREF_LOW, 0);
+ else
+ ret = ov6650_reg_rmw(client, REG_COMF, 0, COMF_HREF_LOW);
+ if (ret)
+ return ret;
+
+ if (flags & SOCAM_VSYNC_ACTIVE_HIGH)
+ ret = ov6650_reg_rmw(client, REG_COMJ, COMJ_VSYNC_HIGH, 0);
+ else
+ ret = ov6650_reg_rmw(client, REG_COMJ, 0, COMJ_VSYNC_HIGH);
+
+ return ret;
+}
+
+/* Request bus settings on camera side */
+static unsigned long ov6650_query_bus_param(struct soc_camera_device *icd)
+{
+ struct soc_camera_link *icl = to_soc_camera_link(icd);
+
+ unsigned long flags = SOCAM_MASTER |
+ SOCAM_PCLK_SAMPLE_RISING | SOCAM_PCLK_SAMPLE_FALLING |
+ SOCAM_HSYNC_ACTIVE_HIGH | SOCAM_HSYNC_ACTIVE_LOW |
+ SOCAM_VSYNC_ACTIVE_HIGH | SOCAM_VSYNC_ACTIVE_LOW |
+ SOCAM_DATA_ACTIVE_HIGH | SOCAM_DATAWIDTH_8;
+
+ return soc_camera_apply_sensor_flags(icl, flags);
+}
+
+/* Get status of additional camera capabilities */
+static int ov6650_g_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ struct ov6650 *priv = to_ov6650(client);
+ uint8_t reg;
+ int ret = 0;
+
+ switch (ctrl->id) {
+ case V4L2_CID_AUTOGAIN:
+ ctrl->value = priv->agc;
+ break;
+ case V4L2_CID_GAIN:
+ if (priv->agc) {
+ ret = ov6650_reg_read(client, REG_GAIN, &reg);
+ ctrl->value = reg;
+ } else {
+ ctrl->value = priv->gain;
+ }
+ break;
+ case V4L2_CID_AUTO_WHITE_BALANCE:
+ ctrl->value = priv->awb;
+ break;
+ case V4L2_CID_BLUE_BALANCE:
+ if (priv->awb) {
+ ret = ov6650_reg_read(client, REG_BLUE, &reg);
+ ctrl->value = reg;
+ } else {
+ ctrl->value = priv->blue;
+ }
+ break;
+ case V4L2_CID_RED_BALANCE:
+ if (priv->awb) {
+ ret = ov6650_reg_read(client, REG_RED, &reg);
+ ctrl->value = reg;
+ } else {
+ ctrl->value = priv->red;
+ }
+ break;
+ case V4L2_CID_SATURATION:
+ ctrl->value = priv->saturation;
+ break;
+ case V4L2_CID_HUE:
+ ctrl->value = priv->hue;
+ break;
+ case V4L2_CID_BRIGHTNESS:
+ ctrl->value = priv->brightness;
+ break;
+ case V4L2_CID_EXPOSURE_AUTO:
+ ctrl->value = priv->aec;
+ break;
+ case V4L2_CID_EXPOSURE:
+ if (priv->aec) {
+ ret = ov6650_reg_read(client, REG_AECH, &reg);
+ ctrl->value = reg;
+ } else {
+ ctrl->value = priv->exposure;
+ }
+ break;
+ case V4L2_CID_GAMMA:
+ ctrl->value = priv->gamma;
+ break;
+ case V4L2_CID_VFLIP:
+ ctrl->value = priv->vflip;
+ break;
+ case V4L2_CID_HFLIP:
+ ctrl->value = priv->hflip;
+ break;
+ }
+ return ret;
+}
+
+/* Set status of additional camera capabilities */
+static int ov6650_s_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ struct ov6650 *priv = to_ov6650(client);
+ int ret = 0;
+
+ switch (ctrl->id) {
+ case V4L2_CID_AUTOGAIN:
+ ret = ov6650_reg_rmw(client, REG_COMB,
+ ctrl->value ? COMB_AGC : 0, COMB_AGC);
+ if (!ret)
+ priv->agc = ctrl->value;
+ break;
+ case V4L2_CID_GAIN:
+ ret = ov6650_reg_write(client, REG_GAIN, ctrl->value);
+ if (!ret)
+ priv->gain = ctrl->value;
+ break;
+ case V4L2_CID_AUTO_WHITE_BALANCE:
+ ret = ov6650_reg_rmw(client, REG_COMB,
+ ctrl->value ? COMB_AWB : 0, COMB_AWB);
+ if (!ret)
+ priv->awb = ctrl->value;
+ break;
+ case V4L2_CID_BLUE_BALANCE:
+ ret = ov6650_reg_write(client, REG_BLUE, ctrl->value);
+ if (!ret)
+ priv->blue = ctrl->value;
+ break;
+ case V4L2_CID_RED_BALANCE:
+ ret = ov6650_reg_write(client, REG_RED, ctrl->value);
+ if (!ret)
+ priv->red = ctrl->value;
+ break;
+ case V4L2_CID_SATURATION:
+ ret = ov6650_reg_rmw(client, REG_SAT, SET_SAT(ctrl->value),
+ SAT_MASK);
+ if (!ret)
+ priv->saturation = ctrl->value;
+ break;
+ case V4L2_CID_HUE:
+ ret = ov6650_reg_rmw(client, REG_HUE, SET_HUE(ctrl->value),
+ HUE_MASK);
+ if (!ret)
+ priv->hue = ctrl->value;
+ break;
+ case V4L2_CID_BRIGHTNESS:
+ ret = ov6650_reg_write(client, REG_BRT, ctrl->value);
+ if (!ret)
+ priv->brightness = ctrl->value;
+ break;
+ case V4L2_CID_EXPOSURE_AUTO:
+ switch (ctrl->value) {
+ case V4L2_EXPOSURE_AUTO:
+ ret = ov6650_reg_rmw(client, REG_COMB, COMB_AEC, 0);
+ break;
+ default:
+ ret = ov6650_reg_rmw(client, REG_COMB, 0, COMB_AEC);
+ break;
+ }
+ if (!ret)
+ priv->aec = ctrl->value;
+ break;
+ case V4L2_CID_EXPOSURE:
+ ret = ov6650_reg_write(client, REG_AECH, ctrl->value);
+ if (!ret)
+ priv->exposure = ctrl->value;
+ break;
+ case V4L2_CID_GAMMA:
+ ret = ov6650_reg_write(client, REG_GAM1, ctrl->value);
+ if (!ret)
+ priv->gamma = ctrl->value;
+ break;
+ case V4L2_CID_VFLIP:
+ ret = ov6650_reg_rmw(client, REG_COMB,
+ ctrl->value ? COMB_FLIP_V : 0, COMB_FLIP_V);
+ if (!ret)
+ priv->vflip = ctrl->value;
+ break;
+ case V4L2_CID_HFLIP:
+ ret = ov6650_reg_rmw(client, REG_COMB,
+ ctrl->value ? COMB_FLIP_H : 0, COMB_FLIP_H);
+ if (!ret)
+ priv->hflip = ctrl->value;
+ break;
+ }
+
+ return ret;
+}
+
+/* Get chip identification */
+static int ov6650_g_chip_ident(struct v4l2_subdev *sd,
+ struct v4l2_dbg_chip_ident *id)
+{
+ id->ident = V4L2_IDENT_OV6650;
+ id->revision = 0;
+
+ return 0;
+}
+
+#ifdef CONFIG_VIDEO_ADV_DEBUG
+static int ov6650_get_register(struct v4l2_subdev *sd,
+ struct v4l2_dbg_register *reg)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ int ret;
+ u8 val;
+
+ if (reg->reg & ~0xff)
+ return -EINVAL;
+
+ reg->size = 1;
+
+ ret = ov6650_reg_read(client, reg->reg, &val);
+ if (!ret)
+ reg->val = (__u64)val;
+
+ return ret;
+}
+
+static int ov6650_set_register(struct v4l2_subdev *sd,
+ struct v4l2_dbg_register *reg)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+
+ if (reg->reg & ~0xff || reg->val & ~0xff)
+ return -EINVAL;
+
+ return ov6650_reg_write(client, reg->reg, reg->val);
+}
+#endif
+
+static int ov6650_g_crop(struct v4l2_subdev *sd, struct v4l2_crop *a)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ struct ov6650 *priv = to_ov6650(client);
+
+ a->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+ a->c = priv->rect;
+
+ return 0;
+}
+
+static int ov6650_s_crop(struct v4l2_subdev *sd, struct v4l2_crop *a)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ struct ov6650 *priv = to_ov6650(client);
+ struct v4l2_rect *rect = &a->c;
+ int ret;
+
+ if (a->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
+ return -EINVAL;
+
+ rect->left = ALIGN(rect->left, 2);
+ rect->width = ALIGN(rect->width, 2);
+ rect->top = ALIGN(rect->top, 2);
+ rect->height = ALIGN(rect->height, 2);
+ soc_camera_limit_side(&rect->left, &rect->width,
+ DEF_HSTRT << 1, 2, W_CIF);
+ soc_camera_limit_side(&rect->top, &rect->height,
+ DEF_VSTRT << 1, 2, H_CIF);
+
+ ret = ov6650_reg_write(client, REG_HSTRT, rect->left >> 1);
+ if (!ret) {
+ priv->rect.left = rect->left;
+ ret = ov6650_reg_write(client, REG_HSTOP,
+ (rect->left + rect->width) >> 1);
+ }
+ if (!ret) {
+ priv->rect.width = rect->width;
+ ret = ov6650_reg_write(client, REG_VSTRT, rect->top >> 1);
+ }
+ if (!ret) {
+ priv->rect.top = rect->top;
+ ret = ov6650_reg_write(client, REG_VSTOP,
+ (rect->top + rect->height) >> 1);
+ }
+ if (!ret)
+ priv->rect.height = rect->height;
+
+ return ret;
+}
+
+static int ov6650_cropcap(struct v4l2_subdev *sd, struct v4l2_cropcap *a)
+{
+ if (a->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
+ return -EINVAL;
+
+ a->bounds.left = DEF_HSTRT << 1;
+ a->bounds.top = DEF_VSTRT << 1;
+ a->bounds.width = W_CIF;
+ a->bounds.height = H_CIF;
+ a->defrect = a->bounds;
+ a->pixelaspect.numerator = 1;
+ a->pixelaspect.denominator = 1;
+
+ return 0;
+}
+
+static int ov6650_g_fmt(struct v4l2_subdev *sd,
+ struct v4l2_mbus_framefmt *mf)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ struct ov6650 *priv = to_ov6650(client);
+
+ mf->width = priv->rect.width >> priv->half_scale;
+ mf->height = priv->rect.height >> priv->half_scale;
+ mf->code = priv->code;
+ mf->colorspace = priv->colorspace;
+ mf->field = V4L2_FIELD_NONE;
+
+ return 0;
+}
+
+static bool is_unscaled_ok(int width, int height, struct v4l2_rect *rect)
+{
+ return width > rect->width >> 1 || height > rect->height >> 1;
+}
+
+static u8 to_clkrc(struct v4l2_fract *timeperframe,
+ unsigned long pclk_limit, unsigned long pclk_max)
+{
+ unsigned long pclk;
+
+ if (timeperframe->numerator && timeperframe->denominator)
+ pclk = pclk_max * timeperframe->denominator /
+ (FRAME_RATE_MAX * timeperframe->numerator);
+ else
+ pclk = pclk_max;
+
+ if (pclk_limit && pclk_limit < pclk)
+ pclk = pclk_limit;
+
+ return (pclk_max - 1) / pclk;
+}
+
+/* set the format we will capture in */
+static int ov6650_s_fmt(struct v4l2_subdev *sd, struct v4l2_mbus_framefmt *mf)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ struct soc_camera_device *icd = client->dev.platform_data;
+ struct soc_camera_sense *sense = icd->sense;
+ struct ov6650 *priv = to_ov6650(client);
+ bool half_scale = !is_unscaled_ok(mf->width, mf->height, &priv->rect);
+ struct v4l2_crop a = {
+ .type = V4L2_BUF_TYPE_VIDEO_CAPTURE,
+ .c = {
+ .left = priv->rect.left + (priv->rect.width >> 1) -
+ (mf->width >> (1 - half_scale)),
+ .top = priv->rect.top + (priv->rect.height >> 1) -
+ (mf->height >> (1 - half_scale)),
+ .width = mf->width << half_scale,
+ .height = mf->height << half_scale,
+ },
+ };
+ enum v4l2_mbus_pixelcode code = mf->code;
+ unsigned long mclk, pclk;
+ u8 coma_set = 0, coma_mask = 0, coml_set, coml_mask, clkrc;
+ int ret;
+
+ /* select color matrix configuration for given color encoding */
+ switch (code) {
+ case V4L2_MBUS_FMT_GREY8_1X8:
+ dev_dbg(&client->dev, "pixel format GREY8_1X8\n");
+ coma_mask |= COMA_RGB | COMA_WORD_SWAP | COMA_BYTE_SWAP;
+ coma_set |= COMA_BW;
+ break;
+ case V4L2_MBUS_FMT_YUYV8_2X8:
+ dev_dbg(&client->dev, "pixel format YUYV8_2X8_LE\n");
+ coma_mask |= COMA_RGB | COMA_BW | COMA_BYTE_SWAP;
+ coma_set |= COMA_WORD_SWAP;
+ break;
+ case V4L2_MBUS_FMT_YVYU8_2X8:
+ dev_dbg(&client->dev, "pixel format YVYU8_2X8_LE (untested)\n");
+ coma_mask |= COMA_RGB | COMA_BW | COMA_WORD_SWAP |
+ COMA_BYTE_SWAP;
+ break;
+ case V4L2_MBUS_FMT_UYVY8_2X8:
+ dev_dbg(&client->dev, "pixel format YUYV8_2X8_BE\n");
+ if (half_scale) {
+ coma_mask |= COMA_RGB | COMA_BW | COMA_WORD_SWAP;
+ coma_set |= COMA_BYTE_SWAP;
+ } else {
+ coma_mask |= COMA_RGB | COMA_BW;
+ coma_set |= COMA_BYTE_SWAP | COMA_WORD_SWAP;
+ }
+ break;
+ case V4L2_MBUS_FMT_VYUY8_2X8:
+ dev_dbg(&client->dev, "pixel format YVYU8_2X8_BE (untested)\n");
+ if (half_scale) {
+ coma_mask |= COMA_RGB | COMA_BW;
+ coma_set |= COMA_BYTE_SWAP | COMA_WORD_SWAP;
+ } else {
+ coma_mask |= COMA_RGB | COMA_BW | COMA_WORD_SWAP;
+ coma_set |= COMA_BYTE_SWAP;
+ }
+ break;
+ case V4L2_MBUS_FMT_SBGGR8_1X8:
+ dev_dbg(&client->dev, "pixel format SBGGR8_1X8 (untested)\n");
+ coma_mask |= COMA_BW | COMA_BYTE_SWAP | COMA_WORD_SWAP;
+ coma_set |= COMA_RAW_RGB | COMA_RGB;
+ break;
+ default:
+ dev_err(&client->dev, "Pixel format not handled: 0x%x\n", code);
+ return -EINVAL;
+ }
+ priv->code = code;
+
+ if (code == V4L2_MBUS_FMT_GREY8_1X8 ||
+ code == V4L2_MBUS_FMT_SBGGR8_1X8) {
+ coml_mask = COML_ONE_CHANNEL;
+ coml_set = 0;
+ priv->pclk_max = 4000000;
+ } else {
+ coml_mask = 0;
+ coml_set = COML_ONE_CHANNEL;
+ priv->pclk_max = 8000000;
+ }
+
+ if (code == V4L2_MBUS_FMT_SBGGR8_1X8)
+ priv->colorspace = V4L2_COLORSPACE_SRGB;
+ else if (code != 0)
+ priv->colorspace = V4L2_COLORSPACE_JPEG;
+
+ if (half_scale) {
+ dev_dbg(&client->dev, "max resolution: QCIF\n");
+ coma_set |= COMA_QCIF;
+ priv->pclk_max /= 2;
+ } else {
+ dev_dbg(&client->dev, "max resolution: CIF\n");
+ coma_mask |= COMA_QCIF;
+ }
+ priv->half_scale = half_scale;
+
+ if (sense) {
+ if (sense->master_clock == 8000000) {
+ dev_dbg(&client->dev, "8MHz input clock\n");
+ clkrc = CLKRC_6MHz;
+ } else if (sense->master_clock == 12000000) {
+ dev_dbg(&client->dev, "12MHz input clock\n");
+ clkrc = CLKRC_12MHz;
+ } else if (sense->master_clock == 16000000) {
+ dev_dbg(&client->dev, "16MHz input clock\n");
+ clkrc = CLKRC_16MHz;
+ } else if (sense->master_clock == 24000000) {
+ dev_dbg(&client->dev, "24MHz input clock\n");
+ clkrc = CLKRC_24MHz;
+ } else {
+ dev_err(&client->dev,
+ "unspported input clock, check platform data\n");
+ return -EINVAL;
+ }
+ mclk = sense->master_clock;
+ priv->pclk_limit = sense->pixel_clock_max;
+ } else {
+ clkrc = CLKRC_24MHz;
+ mclk = 24000000;
+ priv->pclk_limit = 0;
+ dev_dbg(&client->dev, "using default 24MHz input clock\n");
+ }
+
+ clkrc |= to_clkrc(&priv->tpf, priv->pclk_limit, priv->pclk_max);
+
+ pclk = priv->pclk_max / GET_CLKRC_DIV(clkrc);
+ dev_dbg(&client->dev, "pixel clock divider: %ld.%ld\n",
+ mclk / pclk, 10 * mclk % pclk / pclk);
+
+ ret = ov6650_s_crop(sd, &a);
+ if (!ret)
+ ret = ov6650_reg_rmw(client, REG_COMA, coma_set, coma_mask);
+ if (!ret)
+ ret = ov6650_reg_write(client, REG_CLKRC, clkrc);
+ if (!ret)
+ ret = ov6650_reg_rmw(client, REG_COML, coml_set, coml_mask);
+
+ if (!ret) {
+ mf->colorspace = priv->colorspace;
+ mf->width = priv->rect.width >> half_scale;
+ mf->height = priv->rect.height >> half_scale;
+ }
+
+ return ret;
+}
+
+static int ov6650_try_fmt(struct v4l2_subdev *sd,
+ struct v4l2_mbus_framefmt *mf)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ struct ov6650 *priv = to_ov6650(client);
+
+ if (is_unscaled_ok(mf->width, mf->height, &priv->rect))
+ v4l_bound_align_image(&mf->width, 2, W_CIF, 1,
+ &mf->height, 2, H_CIF, 1, 0);
+
+ mf->field = V4L2_FIELD_NONE;
+
+ switch (mf->code) {
+ case V4L2_MBUS_FMT_Y10_1X10:
+ mf->code = V4L2_MBUS_FMT_GREY8_1X8;
+ case V4L2_MBUS_FMT_GREY8_1X8:
+ case V4L2_MBUS_FMT_YVYU8_2X8:
+ case V4L2_MBUS_FMT_YUYV8_2X8:
+ case V4L2_MBUS_FMT_VYUY8_2X8:
+ case V4L2_MBUS_FMT_UYVY8_2X8:
+ mf->colorspace = V4L2_COLORSPACE_JPEG;
+ break;
+ default:
+ mf->code = V4L2_MBUS_FMT_SBGGR8_1X8;
+ case V4L2_MBUS_FMT_SBGGR8_1X8:
+ mf->colorspace = V4L2_COLORSPACE_SRGB;
+ break;
+ }
+
+ return 0;
+}
+
+static int ov6650_enum_fmt(struct v4l2_subdev *sd, unsigned int index,
+ enum v4l2_mbus_pixelcode *code)
+{
+ if (index >= ARRAY_SIZE(ov6650_codes))
+ return -EINVAL;
+
+ *code = ov6650_codes[index];
+ return 0;
+}
+
+static int ov6650_g_parm(struct v4l2_subdev *sd, struct v4l2_streamparm *parms)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ struct ov6650 *priv = to_ov6650(client);
+ struct v4l2_captureparm *cp = &parms->parm.capture;
+
+ if (parms->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
+ return -EINVAL;
+
+ memset(cp, 0, sizeof(*cp));
+ cp->capability = V4L2_CAP_TIMEPERFRAME;
+ cp->timeperframe.numerator = GET_CLKRC_DIV(to_clkrc(&priv->tpf,
+ priv->pclk_limit, priv->pclk_max));
+ cp->timeperframe.denominator = FRAME_RATE_MAX;
+
+ dev_dbg(&client->dev, "Frame interval: %u/%u s\n",
+ cp->timeperframe.numerator, cp->timeperframe.denominator);
+
+ return 0;
+}
+
+static int ov6650_s_parm(struct v4l2_subdev *sd, struct v4l2_streamparm *parms)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ struct ov6650 *priv = to_ov6650(client);
+ struct v4l2_captureparm *cp = &parms->parm.capture;
+ struct v4l2_fract *tpf = &cp->timeperframe;
+ int div, ret;
+ u8 clkrc;
+
+ if (parms->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
+ return -EINVAL;
+
+ if (cp->extendedmode != 0)
+ return -EINVAL;
+
+ if (tpf->numerator == 0 || tpf->denominator == 0)
+ div = 1; /* Reset to full rate */
+ else
+ div = (tpf->numerator * FRAME_RATE_MAX) / tpf->denominator;
+
+ if (div == 0)
+ div = 1;
+ else if (div > GET_CLKRC_DIV(CLKRC_DIV_MASK))
+ div = GET_CLKRC_DIV(CLKRC_DIV_MASK);
+
+ /*
+ * Keep result to be used as tpf limit
+ * for subseqent clock divider calculations
+ */
+ priv->tpf.numerator = div;
+ priv->tpf.denominator = FRAME_RATE_MAX;
+
+ clkrc = to_clkrc(&priv->tpf, priv->pclk_limit, priv->pclk_max);
+
+ ret = ov6650_reg_rmw(client, REG_CLKRC, clkrc, CLKRC_DIV_MASK);
+ if (!ret) {
+ tpf->numerator = GET_CLKRC_DIV(clkrc);
+ tpf->denominator = FRAME_RATE_MAX;
+ }
+
+ return ret;
+}
+
+/* Soft reset the camera. This has nothing to do with the RESET pin! */
+static int ov6650_reset(struct i2c_client *client)
+{
+ int ret;
+
+ dev_dbg(&client->dev, "reset\n");
+
+ ret = ov6650_reg_rmw(client, REG_COMA, COMA_RESET, 0);
+ if (ret)
+ dev_err(&client->dev,
+ "An error occured while entering soft reset!\n");
+
+ return ret;
+}
+
+/* program default register values */
+static int ov6650_prog_dflt(struct i2c_client *client)
+{
+ int ret;
+
+ dev_dbg(&client->dev, "initializing\n");
+
+ ret = ov6650_reg_write(client, REG_COMA, 0); /* ~COMA_RESET */
+ if (!ret)
+ ret = ov6650_reg_rmw(client, REG_COMB, 0, COMB_BAND_FILTER);
+
+ return ret;
+}
+
+static int ov6650_video_probe(struct soc_camera_device *icd,
+ struct i2c_client *client)
+{
+ u8 pidh, pidl, midh, midl;
+ int ret = 0;
+
+ /*
+ * check and show product ID and manufacturer ID
+ */
+ ret = ov6650_reg_read(client, REG_PIDH, &pidh);
+ if (!ret)
+ ret = ov6650_reg_read(client, REG_PIDL, &pidl);
+ if (!ret)
+ ret = ov6650_reg_read(client, REG_MIDH, &midh);
+ if (!ret)
+ ret = ov6650_reg_read(client, REG_MIDL, &midl);
+
+ if (ret)
+ return ret;
+
+ if ((pidh != OV6650_PIDH) || (pidl != OV6650_PIDL)) {
+ dev_err(&client->dev, "Product ID error 0x%02x:0x%02x\n",
+ pidh, pidl);
+ return -ENODEV;
+ }
+
+ dev_info(&client->dev,
+ "ov6650 Product ID 0x%02x:0x%02x Manufacturer ID 0x%02x:0x%02x\n",
+ pidh, pidl, midh, midl);
+
+ ret = ov6650_reset(client);
+ if (!ret)
+ ret = ov6650_prog_dflt(client);
+
+ return ret;
+}
+
+static struct soc_camera_ops ov6650_ops = {
+ .set_bus_param = ov6650_set_bus_param,
+ .query_bus_param = ov6650_query_bus_param,
+ .controls = ov6650_controls,
+ .num_controls = ARRAY_SIZE(ov6650_controls),
+};
+
+static struct v4l2_subdev_core_ops ov6650_core_ops = {
+ .g_ctrl = ov6650_g_ctrl,
+ .s_ctrl = ov6650_s_ctrl,
+ .g_chip_ident = ov6650_g_chip_ident,
+#ifdef CONFIG_VIDEO_ADV_DEBUG
+ .g_register = ov6650_get_register,
+ .s_register = ov6650_set_register,
+#endif
+};
+
+static struct v4l2_subdev_video_ops ov6650_video_ops = {
+ .s_stream = ov6650_s_stream,
+ .g_mbus_fmt = ov6650_g_fmt,
+ .s_mbus_fmt = ov6650_s_fmt,
+ .try_mbus_fmt = ov6650_try_fmt,
+ .enum_mbus_fmt = ov6650_enum_fmt,
+ .cropcap = ov6650_cropcap,
+ .g_crop = ov6650_g_crop,
+ .s_crop = ov6650_s_crop,
+ .g_parm = ov6650_g_parm,
+ .s_parm = ov6650_s_parm,
+};
+
+static struct v4l2_subdev_ops ov6650_subdev_ops = {
+ .core = &ov6650_core_ops,
+ .video = &ov6650_video_ops,
+};
+
+/*
+ * i2c_driver function
+ */
+static int ov6650_probe(struct i2c_client *client,
+ const struct i2c_device_id *did)
+{
+ struct ov6650 *priv;
+ struct soc_camera_device *icd = client->dev.platform_data;
+ struct soc_camera_link *icl;
+ int ret;
+
+ if (!icd) {
+ dev_err(&client->dev, "Missing soc-camera data!\n");
+ return -EINVAL;
+ }
+
+ icl = to_soc_camera_link(icd);
+ if (!icl) {
+ dev_err(&client->dev, "Missing platform_data for driver\n");
+ return -EINVAL;
+ }
+
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ if (!priv) {
+ dev_err(&client->dev,
+ "Failed to allocate memory for private data!\n");
+ return -ENOMEM;
+ }
+
+ v4l2_i2c_subdev_init(&priv->subdev, client, &ov6650_subdev_ops);
+
+ icd->ops = &ov6650_ops;
+
+ priv->rect.left = DEF_HSTRT << 1;
+ priv->rect.top = DEF_VSTRT << 1;
+ priv->rect.width = W_CIF;
+ priv->rect.height = H_CIF;
+ priv->half_scale = false;
+ priv->code = V4L2_MBUS_FMT_YUYV8_2X8;
+ priv->colorspace = V4L2_COLORSPACE_JPEG;
+
+ ret = ov6650_video_probe(icd, client);
+
+ if (ret) {
+ icd->ops = NULL;
+ kfree(priv);
+ }
+
+ return ret;
+}
+
+static int ov6650_remove(struct i2c_client *client)
+{
+ struct ov6650 *priv = to_ov6650(client);
+
+ kfree(priv);
+ return 0;
+}
+
+static const struct i2c_device_id ov6650_id[] = {
+ { "ov6650", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, ov6650_id);
+
+static struct i2c_driver ov6650_i2c_driver = {
+ .driver = {
+ .name = "ov6650",
+ },
+ .probe = ov6650_probe,
+ .remove = ov6650_remove,
+ .id_table = ov6650_id,
+};
+
+static int __init ov6650_module_init(void)
+{
+ return i2c_add_driver(&ov6650_i2c_driver);
+}
+
+static void __exit ov6650_module_exit(void)
+{
+ i2c_del_driver(&ov6650_i2c_driver);
+}
+
+module_init(ov6650_module_init);
+module_exit(ov6650_module_exit);
+
+MODULE_DESCRIPTION("SoC Camera driver for OmniVision OV6650");
+MODULE_AUTHOR("Janusz Krzysztofik <jkrzyszt@tis.icnet.pl>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/video/ov7670.c b/drivers/media/video/ov7670.c
index 91c886ab15c6..c881a64b41fd 100644
--- a/drivers/media/video/ov7670.c
+++ b/drivers/media/video/ov7670.c
@@ -18,8 +18,9 @@
#include <linux/videodev2.h>
#include <media/v4l2-device.h>
#include <media/v4l2-chip-ident.h>
-#include <media/v4l2-i2c-drv.h>
+#include <media/v4l2-mediabus.h>
+#include "ov7670.h"
MODULE_AUTHOR("Jonathan Corbet <corbet@lwn.net>");
MODULE_DESCRIPTION("A low-level driver for OmniVision ov7670 sensors");
@@ -43,11 +44,6 @@ MODULE_PARM_DESC(debug, "Debug level (0-1)");
#define QCIF_HEIGHT 144
/*
- * Our nominal (default) frame rate.
- */
-#define OV7670_FRAME_RATE 30
-
-/*
* The 7670 sits on i2c with ID 0x42
*/
#define OV7670_I2C_ADDR 0x42
@@ -198,7 +194,11 @@ struct ov7670_info {
struct ov7670_format_struct *fmt; /* Current format */
unsigned char sat; /* Saturation value */
int hue; /* Hue value */
+ int min_width; /* Filter out smaller sizes */
+ int min_height; /* Filter out smaller sizes */
+ int clock_speed; /* External clock speed (MHz) */
u8 clkrc; /* Clock divider value */
+ bool use_smbus; /* Use smbus I/O instead of I2C */
};
static inline struct ov7670_info *to_state(struct v4l2_subdev *sd)
@@ -415,8 +415,7 @@ static struct regval_list ov7670_fmt_raw[] = {
* ov7670 is not really an SMBUS device, though, so the communication
* is not always entirely reliable.
*/
-#ifdef CONFIG_OLPC_XO_1
-static int ov7670_read(struct v4l2_subdev *sd, unsigned char reg,
+static int ov7670_read_smbus(struct v4l2_subdev *sd, unsigned char reg,
unsigned char *value)
{
struct i2c_client *client = v4l2_get_subdevdata(sd);
@@ -431,7 +430,7 @@ static int ov7670_read(struct v4l2_subdev *sd, unsigned char reg,
}
-static int ov7670_write(struct v4l2_subdev *sd, unsigned char reg,
+static int ov7670_write_smbus(struct v4l2_subdev *sd, unsigned char reg,
unsigned char value)
{
struct i2c_client *client = v4l2_get_subdevdata(sd);
@@ -442,11 +441,10 @@ static int ov7670_write(struct v4l2_subdev *sd, unsigned char reg,
return ret;
}
-#else /* ! CONFIG_OLPC_XO_1 */
/*
* On most platforms, we'd rather do straight i2c I/O.
*/
-static int ov7670_read(struct v4l2_subdev *sd, unsigned char reg,
+static int ov7670_read_i2c(struct v4l2_subdev *sd, unsigned char reg,
unsigned char *value)
{
struct i2c_client *client = v4l2_get_subdevdata(sd);
@@ -479,7 +477,7 @@ static int ov7670_read(struct v4l2_subdev *sd, unsigned char reg,
}
-static int ov7670_write(struct v4l2_subdev *sd, unsigned char reg,
+static int ov7670_write_i2c(struct v4l2_subdev *sd, unsigned char reg,
unsigned char value)
{
struct i2c_client *client = v4l2_get_subdevdata(sd);
@@ -498,8 +496,26 @@ static int ov7670_write(struct v4l2_subdev *sd, unsigned char reg,
msleep(5); /* Wait for reset to run */
return ret;
}
-#endif /* CONFIG_OLPC_XO_1 */
+static int ov7670_read(struct v4l2_subdev *sd, unsigned char reg,
+ unsigned char *value)
+{
+ struct ov7670_info *info = to_state(sd);
+ if (info->use_smbus)
+ return ov7670_read_smbus(sd, reg, value);
+ else
+ return ov7670_read_i2c(sd, reg, value);
+}
+
+static int ov7670_write(struct v4l2_subdev *sd, unsigned char reg,
+ unsigned char value)
+{
+ struct ov7670_info *info = to_state(sd);
+ if (info->use_smbus)
+ return ov7670_write_smbus(sd, reg, value);
+ else
+ return ov7670_write_i2c(sd, reg, value);
+}
/*
* Write a list of register settings; ff/ff stops the process.
@@ -572,42 +588,37 @@ static int ov7670_detect(struct v4l2_subdev *sd)
/*
* Store information about the video data format. The color matrix
* is deeply tied into the format, so keep the relevant values here.
- * The magic matrix nubmers come from OmniVision.
+ * The magic matrix numbers come from OmniVision.
*/
static struct ov7670_format_struct {
- __u8 *desc;
- __u32 pixelformat;
+ enum v4l2_mbus_pixelcode mbus_code;
+ enum v4l2_colorspace colorspace;
struct regval_list *regs;
int cmatrix[CMATRIX_LEN];
- int bpp; /* Bytes per pixel */
} ov7670_formats[] = {
{
- .desc = "YUYV 4:2:2",
- .pixelformat = V4L2_PIX_FMT_YUYV,
+ .mbus_code = V4L2_MBUS_FMT_YUYV8_2X8,
+ .colorspace = V4L2_COLORSPACE_JPEG,
.regs = ov7670_fmt_yuv422,
.cmatrix = { 128, -128, 0, -34, -94, 128 },
- .bpp = 2,
},
{
- .desc = "RGB 444",
- .pixelformat = V4L2_PIX_FMT_RGB444,
+ .mbus_code = V4L2_MBUS_FMT_RGB444_2X8_PADHI_LE,
+ .colorspace = V4L2_COLORSPACE_SRGB,
.regs = ov7670_fmt_rgb444,
.cmatrix = { 179, -179, 0, -61, -176, 228 },
- .bpp = 2,
},
{
- .desc = "RGB 565",
- .pixelformat = V4L2_PIX_FMT_RGB565,
+ .mbus_code = V4L2_MBUS_FMT_RGB565_2X8_LE,
+ .colorspace = V4L2_COLORSPACE_SRGB,
.regs = ov7670_fmt_rgb565,
.cmatrix = { 179, -179, 0, -61, -176, 228 },
- .bpp = 2,
},
{
- .desc = "Raw RGB Bayer",
- .pixelformat = V4L2_PIX_FMT_SBGGR8,
+ .mbus_code = V4L2_MBUS_FMT_SBGGR8_1X8,
+ .colorspace = V4L2_COLORSPACE_SRGB,
.regs = ov7670_fmt_raw,
.cmatrix = { 0, 0, 0, 0, 0, 0 },
- .bpp = 1
},
};
#define N_OV7670_FMTS ARRAY_SIZE(ov7670_formats)
@@ -680,10 +691,10 @@ static struct ov7670_win_size {
.width = QVGA_WIDTH,
.height = QVGA_HEIGHT,
.com7_bit = COM7_FMT_QVGA,
- .hstart = 164, /* Empirically determined */
- .hstop = 20,
- .vstart = 14,
- .vstop = 494,
+ .hstart = 168, /* Empirically determined */
+ .hstop = 24,
+ .vstart = 12,
+ .vstop = 492,
.regs = NULL,
},
/* QCIF */
@@ -734,51 +745,45 @@ static int ov7670_set_hw(struct v4l2_subdev *sd, int hstart, int hstop,
}
-static int ov7670_enum_fmt(struct v4l2_subdev *sd, struct v4l2_fmtdesc *fmt)
+static int ov7670_enum_mbus_fmt(struct v4l2_subdev *sd, unsigned index,
+ enum v4l2_mbus_pixelcode *code)
{
- struct ov7670_format_struct *ofmt;
-
- if (fmt->index >= N_OV7670_FMTS)
+ if (index >= N_OV7670_FMTS)
return -EINVAL;
- ofmt = ov7670_formats + fmt->index;
- fmt->flags = 0;
- strcpy(fmt->description, ofmt->desc);
- fmt->pixelformat = ofmt->pixelformat;
+ *code = ov7670_formats[index].mbus_code;
return 0;
}
-
static int ov7670_try_fmt_internal(struct v4l2_subdev *sd,
- struct v4l2_format *fmt,
+ struct v4l2_mbus_framefmt *fmt,
struct ov7670_format_struct **ret_fmt,
struct ov7670_win_size **ret_wsize)
{
int index;
struct ov7670_win_size *wsize;
- struct v4l2_pix_format *pix = &fmt->fmt.pix;
for (index = 0; index < N_OV7670_FMTS; index++)
- if (ov7670_formats[index].pixelformat == pix->pixelformat)
+ if (ov7670_formats[index].mbus_code == fmt->code)
break;
if (index >= N_OV7670_FMTS) {
/* default to first format */
index = 0;
- pix->pixelformat = ov7670_formats[0].pixelformat;
+ fmt->code = ov7670_formats[0].mbus_code;
}
if (ret_fmt != NULL)
*ret_fmt = ov7670_formats + index;
/*
* Fields: the OV devices claim to be progressive.
*/
- pix->field = V4L2_FIELD_NONE;
+ fmt->field = V4L2_FIELD_NONE;
/*
* Round requested image size down to the nearest
* we support, but not below the smallest.
*/
for (wsize = ov7670_win_sizes; wsize < ov7670_win_sizes + N_WIN_SIZES;
wsize++)
- if (pix->width >= wsize->width && pix->height >= wsize->height)
+ if (fmt->width >= wsize->width && fmt->height >= wsize->height)
break;
if (wsize >= ov7670_win_sizes + N_WIN_SIZES)
wsize--; /* Take the smallest one */
@@ -787,14 +792,14 @@ static int ov7670_try_fmt_internal(struct v4l2_subdev *sd,
/*
* Note the size we'll actually handle.
*/
- pix->width = wsize->width;
- pix->height = wsize->height;
- pix->bytesperline = pix->width*ov7670_formats[index].bpp;
- pix->sizeimage = pix->height*pix->bytesperline;
+ fmt->width = wsize->width;
+ fmt->height = wsize->height;
+ fmt->colorspace = ov7670_formats[index].colorspace;
return 0;
}
-static int ov7670_try_fmt(struct v4l2_subdev *sd, struct v4l2_format *fmt)
+static int ov7670_try_mbus_fmt(struct v4l2_subdev *sd,
+ struct v4l2_mbus_framefmt *fmt)
{
return ov7670_try_fmt_internal(sd, fmt, NULL, NULL);
}
@@ -802,15 +807,17 @@ static int ov7670_try_fmt(struct v4l2_subdev *sd, struct v4l2_format *fmt)
/*
* Set a format.
*/
-static int ov7670_s_fmt(struct v4l2_subdev *sd, struct v4l2_format *fmt)
+static int ov7670_s_mbus_fmt(struct v4l2_subdev *sd,
+ struct v4l2_mbus_framefmt *fmt)
{
- int ret;
struct ov7670_format_struct *ovfmt;
struct ov7670_win_size *wsize;
struct ov7670_info *info = to_state(sd);
unsigned char com7;
+ int ret;
ret = ov7670_try_fmt_internal(sd, fmt, &ovfmt, &wsize);
+
if (ret)
return ret;
/*
@@ -845,7 +852,7 @@ static int ov7670_s_fmt(struct v4l2_subdev *sd, struct v4l2_format *fmt)
*/
if (ret == 0)
ret = ov7670_write(sd, REG_CLKRC, info->clkrc);
- return ret;
+ return 0;
}
/*
@@ -863,7 +870,7 @@ static int ov7670_g_parm(struct v4l2_subdev *sd, struct v4l2_streamparm *parms)
memset(cp, 0, sizeof(struct v4l2_captureparm));
cp->capability = V4L2_CAP_TIMEPERFRAME;
cp->timeperframe.numerator = 1;
- cp->timeperframe.denominator = OV7670_FRAME_RATE;
+ cp->timeperframe.denominator = info->clock_speed;
if ((info->clkrc & CLK_EXT) == 0 && (info->clkrc & CLK_SCALE) > 1)
cp->timeperframe.denominator /= (info->clkrc & CLK_SCALE);
return 0;
@@ -884,26 +891,72 @@ static int ov7670_s_parm(struct v4l2_subdev *sd, struct v4l2_streamparm *parms)
if (tpf->numerator == 0 || tpf->denominator == 0)
div = 1; /* Reset to full rate */
else
- div = (tpf->numerator*OV7670_FRAME_RATE)/tpf->denominator;
+ div = (tpf->numerator * info->clock_speed) / tpf->denominator;
if (div == 0)
div = 1;
else if (div > CLK_SCALE)
div = CLK_SCALE;
info->clkrc = (info->clkrc & 0x80) | div;
tpf->numerator = 1;
- tpf->denominator = OV7670_FRAME_RATE/div;
+ tpf->denominator = info->clock_speed / div;
return ov7670_write(sd, REG_CLKRC, info->clkrc);
}
-
/*
- * Code for dealing with controls.
+ * Frame intervals. Since frame rates are controlled with the clock
+ * divider, we can only do 30/n for integer n values. So no continuous
+ * or stepwise options. Here we just pick a handful of logical values.
*/
+static int ov7670_frame_rates[] = { 30, 15, 10, 5, 1 };
+
+static int ov7670_enum_frameintervals(struct v4l2_subdev *sd,
+ struct v4l2_frmivalenum *interval)
+{
+ if (interval->index >= ARRAY_SIZE(ov7670_frame_rates))
+ return -EINVAL;
+ interval->type = V4L2_FRMIVAL_TYPE_DISCRETE;
+ interval->discrete.numerator = 1;
+ interval->discrete.denominator = ov7670_frame_rates[interval->index];
+ return 0;
+}
+
+/*
+ * Frame size enumeration
+ */
+static int ov7670_enum_framesizes(struct v4l2_subdev *sd,
+ struct v4l2_frmsizeenum *fsize)
+{
+ struct ov7670_info *info = to_state(sd);
+ int i;
+ int num_valid = -1;
+ __u32 index = fsize->index;
+ /*
+ * If a minimum width/height was requested, filter out the capture
+ * windows that fall outside that.
+ */
+ for (i = 0; i < N_WIN_SIZES; i++) {
+ struct ov7670_win_size *win = &ov7670_win_sizes[index];
+ if (info->min_width && win->width < info->min_width)
+ continue;
+ if (info->min_height && win->height < info->min_height)
+ continue;
+ if (index == ++num_valid) {
+ fsize->type = V4L2_FRMSIZE_TYPE_DISCRETE;
+ fsize->discrete.width = win->width;
+ fsize->discrete.height = win->height;
+ return 0;
+ }
+ }
+ return -EINVAL;
+}
+/*
+ * Code for dealing with controls.
+ */
static int ov7670_store_cmatrix(struct v4l2_subdev *sd,
int matrix[CMATRIX_LEN])
@@ -1396,6 +1449,47 @@ static int ov7670_g_chip_ident(struct v4l2_subdev *sd,
return v4l2_chip_ident_i2c_client(client, chip, V4L2_IDENT_OV7670, 0);
}
+static int ov7670_s_config(struct v4l2_subdev *sd, int dumb, void *data)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ struct ov7670_config *config = data;
+ struct ov7670_info *info = to_state(sd);
+ int ret;
+
+ info->clock_speed = 30; /* default: a guess */
+
+ /*
+ * Must apply configuration before initializing device, because it
+ * selects I/O method.
+ */
+ if (config) {
+ info->min_width = config->min_width;
+ info->min_height = config->min_height;
+ info->use_smbus = config->use_smbus;
+
+ if (config->clock_speed)
+ info->clock_speed = config->clock_speed;
+ }
+
+ /* Make sure it's an ov7670 */
+ ret = ov7670_detect(sd);
+ if (ret) {
+ v4l_dbg(1, debug, client,
+ "chip found @ 0x%x (%s) is not an ov7670 chip.\n",
+ client->addr << 1, client->adapter->name);
+ kfree(info);
+ return ret;
+ }
+ v4l_info(client, "chip found @ 0x%02x (%s)\n",
+ client->addr << 1, client->adapter->name);
+
+ info->fmt = &ov7670_formats[0];
+ info->sat = 128; /* Review this */
+ info->clkrc = info->clock_speed / 30;
+
+ return 0;
+}
+
#ifdef CONFIG_VIDEO_ADV_DEBUG
static int ov7670_g_register(struct v4l2_subdev *sd, struct v4l2_dbg_register *reg)
{
@@ -1434,6 +1528,7 @@ static const struct v4l2_subdev_core_ops ov7670_core_ops = {
.s_ctrl = ov7670_s_ctrl,
.queryctrl = ov7670_queryctrl,
.reset = ov7670_reset,
+ .s_config = ov7670_s_config,
.init = ov7670_init,
#ifdef CONFIG_VIDEO_ADV_DEBUG
.g_register = ov7670_g_register,
@@ -1442,11 +1537,13 @@ static const struct v4l2_subdev_core_ops ov7670_core_ops = {
};
static const struct v4l2_subdev_video_ops ov7670_video_ops = {
- .enum_fmt = ov7670_enum_fmt,
- .try_fmt = ov7670_try_fmt,
- .s_fmt = ov7670_s_fmt,
+ .enum_mbus_fmt = ov7670_enum_mbus_fmt,
+ .try_mbus_fmt = ov7670_try_mbus_fmt,
+ .s_mbus_fmt = ov7670_s_mbus_fmt,
.s_parm = ov7670_s_parm,
.g_parm = ov7670_g_parm,
+ .enum_frameintervals = ov7670_enum_frameintervals,
+ .enum_framesizes = ov7670_enum_framesizes,
};
static const struct v4l2_subdev_ops ov7670_ops = {
@@ -1461,7 +1558,6 @@ static int ov7670_probe(struct i2c_client *client,
{
struct v4l2_subdev *sd;
struct ov7670_info *info;
- int ret;
info = kzalloc(sizeof(struct ov7670_info), GFP_KERNEL);
if (info == NULL)
@@ -1469,22 +1565,6 @@ static int ov7670_probe(struct i2c_client *client,
sd = &info->sd;
v4l2_i2c_subdev_init(sd, client, &ov7670_ops);
- /* Make sure it's an ov7670 */
- ret = ov7670_detect(sd);
- if (ret) {
- v4l_dbg(1, debug, client,
- "chip found @ 0x%x (%s) is not an ov7670 chip.\n",
- client->addr << 1, client->adapter->name);
- kfree(info);
- return ret;
- }
- v4l_info(client, "chip found @ 0x%02x (%s)\n",
- client->addr << 1, client->adapter->name);
-
- info->fmt = &ov7670_formats[0];
- info->sat = 128; /* Review this */
- info->clkrc = 1; /* 30fps */
-
return 0;
}
@@ -1504,9 +1584,25 @@ static const struct i2c_device_id ov7670_id[] = {
};
MODULE_DEVICE_TABLE(i2c, ov7670_id);
-static struct v4l2_i2c_driver_data v4l2_i2c_data = {
- .name = "ov7670",
- .probe = ov7670_probe,
- .remove = ov7670_remove,
- .id_table = ov7670_id,
+static struct i2c_driver ov7670_driver = {
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = "ov7670",
+ },
+ .probe = ov7670_probe,
+ .remove = ov7670_remove,
+ .id_table = ov7670_id,
};
+
+static __init int init_ov7670(void)
+{
+ return i2c_add_driver(&ov7670_driver);
+}
+
+static __exit void exit_ov7670(void)
+{
+ i2c_del_driver(&ov7670_driver);
+}
+
+module_init(init_ov7670);
+module_exit(exit_ov7670);
diff --git a/drivers/media/video/ov7670.h b/drivers/media/video/ov7670.h
new file mode 100644
index 000000000000..b133bc123031
--- /dev/null
+++ b/drivers/media/video/ov7670.h
@@ -0,0 +1,20 @@
+/*
+ * A V4L2 driver for OmniVision OV7670 cameras.
+ *
+ * Copyright 2010 One Laptop Per Child
+ *
+ * This file may be distributed under the terms of the GNU General
+ * Public License, version 2.
+ */
+
+#ifndef __OV7670_H
+#define __OV7670_H
+
+struct ov7670_config {
+ int min_width; /* Filter out smaller sizes */
+ int min_height; /* Filter out smaller sizes */
+ int clock_speed; /* External clock speed (MHz) */
+ bool use_smbus; /* Use smbus I/O instead of I2C */
+};
+
+#endif
diff --git a/drivers/media/video/ov772x.c b/drivers/media/video/ov772x.c
index 25eb5d637eea..a84b770352f9 100644
--- a/drivers/media/video/ov772x.c
+++ b/drivers/media/video/ov772x.c
@@ -599,7 +599,7 @@ static int ov772x_reset(struct i2c_client *client)
static int ov772x_s_stream(struct v4l2_subdev *sd, int enable)
{
- struct i2c_client *client = sd->priv;
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
struct ov772x_priv *priv = to_ov772x(client);
if (!enable) {
@@ -645,7 +645,7 @@ static unsigned long ov772x_query_bus_param(struct soc_camera_device *icd)
static int ov772x_g_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl)
{
- struct i2c_client *client = sd->priv;
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
struct ov772x_priv *priv = to_ov772x(client);
switch (ctrl->id) {
@@ -664,7 +664,7 @@ static int ov772x_g_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl)
static int ov772x_s_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl)
{
- struct i2c_client *client = sd->priv;
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
struct ov772x_priv *priv = to_ov772x(client);
int ret = 0;
u8 val;
@@ -715,7 +715,7 @@ static int ov772x_s_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl)
static int ov772x_g_chip_ident(struct v4l2_subdev *sd,
struct v4l2_dbg_chip_ident *id)
{
- struct i2c_client *client = sd->priv;
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
struct ov772x_priv *priv = to_ov772x(client);
id->ident = priv->model;
@@ -728,7 +728,7 @@ static int ov772x_g_chip_ident(struct v4l2_subdev *sd,
static int ov772x_g_register(struct v4l2_subdev *sd,
struct v4l2_dbg_register *reg)
{
- struct i2c_client *client = sd->priv;
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
int ret;
reg->size = 1;
@@ -747,7 +747,7 @@ static int ov772x_g_register(struct v4l2_subdev *sd,
static int ov772x_s_register(struct v4l2_subdev *sd,
struct v4l2_dbg_register *reg)
{
- struct i2c_client *client = sd->priv;
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
if (reg->reg > 0xff ||
reg->val > 0xff)
@@ -954,7 +954,7 @@ static int ov772x_cropcap(struct v4l2_subdev *sd, struct v4l2_cropcap *a)
static int ov772x_g_fmt(struct v4l2_subdev *sd,
struct v4l2_mbus_framefmt *mf)
{
- struct i2c_client *client = sd->priv;
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
struct ov772x_priv *priv = to_ov772x(client);
if (!priv->win || !priv->cfmt) {
@@ -977,7 +977,7 @@ static int ov772x_g_fmt(struct v4l2_subdev *sd,
static int ov772x_s_fmt(struct v4l2_subdev *sd,
struct v4l2_mbus_framefmt *mf)
{
- struct i2c_client *client = sd->priv;
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
struct ov772x_priv *priv = to_ov772x(client);
int ret = ov772x_set_params(client, &mf->width, &mf->height,
mf->code);
@@ -991,7 +991,7 @@ static int ov772x_s_fmt(struct v4l2_subdev *sd,
static int ov772x_try_fmt(struct v4l2_subdev *sd,
struct v4l2_mbus_framefmt *mf)
{
- struct i2c_client *client = sd->priv;
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
struct ov772x_priv *priv = to_ov772x(client);
const struct ov772x_win_size *win;
int i;
diff --git a/drivers/media/video/ov9640.c b/drivers/media/video/ov9640.c
index 40cdfab74ccc..99e9e1d3c83b 100644
--- a/drivers/media/video/ov9640.c
+++ b/drivers/media/video/ov9640.c
@@ -308,7 +308,7 @@ static unsigned long ov9640_query_bus_param(struct soc_camera_device *icd)
/* Get status of additional camera capabilities */
static int ov9640_g_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl)
{
- struct i2c_client *client = sd->priv;
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
struct ov9640_priv *priv = container_of(i2c_get_clientdata(client),
struct ov9640_priv, subdev);
@@ -326,7 +326,7 @@ static int ov9640_g_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl)
/* Set status of additional camera capabilities */
static int ov9640_s_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl)
{
- struct i2c_client *client = sd->priv;
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
struct ov9640_priv *priv = container_of(i2c_get_clientdata(client),
struct ov9640_priv, subdev);
@@ -360,7 +360,7 @@ static int ov9640_s_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl)
static int ov9640_g_chip_ident(struct v4l2_subdev *sd,
struct v4l2_dbg_chip_ident *id)
{
- struct i2c_client *client = sd->priv;
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
struct ov9640_priv *priv = container_of(i2c_get_clientdata(client),
struct ov9640_priv, subdev);
@@ -374,7 +374,7 @@ static int ov9640_g_chip_ident(struct v4l2_subdev *sd,
static int ov9640_get_register(struct v4l2_subdev *sd,
struct v4l2_dbg_register *reg)
{
- struct i2c_client *client = sd->priv;
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
int ret;
u8 val;
@@ -395,7 +395,7 @@ static int ov9640_get_register(struct v4l2_subdev *sd,
static int ov9640_set_register(struct v4l2_subdev *sd,
struct v4l2_dbg_register *reg)
{
- struct i2c_client *client = sd->priv;
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
if (reg->reg & ~0xff || reg->val & ~0xff)
return -EINVAL;
@@ -558,7 +558,7 @@ static int ov9640_prog_dflt(struct i2c_client *client)
static int ov9640_s_fmt(struct v4l2_subdev *sd,
struct v4l2_mbus_framefmt *mf)
{
- struct i2c_client *client = sd->priv;
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
struct ov9640_reg_alt alts = {0};
enum v4l2_colorspace cspace;
enum v4l2_mbus_pixelcode code = mf->code;
diff --git a/drivers/media/video/pvrusb2/pvrusb2-hdw.c b/drivers/media/video/pvrusb2/pvrusb2-hdw.c
index 70ea578d6266..66ad516bdfd9 100644
--- a/drivers/media/video/pvrusb2/pvrusb2-hdw.c
+++ b/drivers/media/video/pvrusb2/pvrusb2-hdw.c
@@ -2082,29 +2082,20 @@ static int pvr2_hdw_load_subdev(struct pvr2_hdw *hdw,
return -EINVAL;
}
- /* Note how the 2nd and 3rd arguments are the same for
- * v4l2_i2c_new_subdev(). Why?
- * Well the 2nd argument is the module name to load, while the 3rd
- * argument is documented in the framework as being the "chipid" -
- * and every other place where I can find examples of this, the
- * "chipid" appears to just be the module name again. So here we
- * just do the same thing. */
if (i2ccnt == 1) {
pvr2_trace(PVR2_TRACE_INIT,
"Module ID %u:"
" Setting up with specified i2c address 0x%x",
mid, i2caddr[0]);
sd = v4l2_i2c_new_subdev(&hdw->v4l2_dev, &hdw->i2c_adap,
- fname, fname,
- i2caddr[0], NULL);
+ fname, i2caddr[0], NULL);
} else {
pvr2_trace(PVR2_TRACE_INIT,
"Module ID %u:"
" Setting up with address probe list",
mid);
sd = v4l2_i2c_new_subdev(&hdw->v4l2_dev, &hdw->i2c_adap,
- fname, fname,
- 0, i2caddr);
+ fname, 0, i2caddr);
}
if (!sd) {
diff --git a/drivers/media/video/pwc/Kconfig b/drivers/media/video/pwc/Kconfig
index 11980db22d31..8da42e4f1ba0 100644
--- a/drivers/media/video/pwc/Kconfig
+++ b/drivers/media/video/pwc/Kconfig
@@ -1,6 +1,6 @@
config USB_PWC
tristate "USB Philips Cameras"
- depends on VIDEO_V4L1
+ depends on VIDEO_V4L2
---help---
Say Y or M here if you want to use one of these Philips & OEM
webcams:
diff --git a/drivers/media/video/pwc/pwc-ctrl.c b/drivers/media/video/pwc/pwc-ctrl.c
index f7f7e04cf485..6b8fbddc0747 100644
--- a/drivers/media/video/pwc/pwc-ctrl.c
+++ b/drivers/media/video/pwc/pwc-ctrl.c
@@ -261,7 +261,7 @@ static int set_video_mode_Nala(struct pwc_device *pdev, int size, int frames)
PWC_DEBUG_MODULE("Failed to send video command... %d\n", ret);
return ret;
}
- if (pEntry->compressed && pdev->vpalette != VIDEO_PALETTE_RAW)
+ if (pEntry->compressed && pdev->pixfmt == V4L2_PIX_FMT_YUV420)
pwc_dec1_init(pdev->type, pdev->release, buf, pdev->decompress_data);
pdev->cmd_len = 3;
@@ -321,7 +321,7 @@ static int set_video_mode_Timon(struct pwc_device *pdev, int size, int frames, i
if (ret < 0)
return ret;
- if (pChoose->bandlength > 0 && pdev->vpalette != VIDEO_PALETTE_RAW)
+ if (pChoose->bandlength > 0 && pdev->pixfmt == V4L2_PIX_FMT_YUV420)
pwc_dec23_init(pdev, pdev->type, buf);
pdev->cmd_len = 13;
@@ -356,7 +356,7 @@ static int set_video_mode_Kiara(struct pwc_device *pdev, int size, int frames, i
fps = (frames / 5) - 1;
/* special case: VGA @ 5 fps and snapshot is raw bayer mode */
- if (size == PSZ_VGA && frames == 5 && snapshot && pdev->vpalette == VIDEO_PALETTE_RAW)
+ if (size == PSZ_VGA && frames == 5 && snapshot && pdev->pixfmt != V4L2_PIX_FMT_YUV420)
{
/* Only available in case the raw palette is selected or
we have the decompressor available. This mode is
@@ -394,7 +394,7 @@ static int set_video_mode_Kiara(struct pwc_device *pdev, int size, int frames, i
if (ret < 0)
return ret;
- if (pChoose->bandlength > 0 && pdev->vpalette != VIDEO_PALETTE_RAW)
+ if (pChoose->bandlength > 0 && pdev->pixfmt == V4L2_PIX_FMT_YUV420)
pwc_dec23_init(pdev, pdev->type, buf);
pdev->cmd_len = 12;
@@ -429,7 +429,7 @@ int pwc_set_video_mode(struct pwc_device *pdev, int width, int height, int frame
{
int ret, size;
- PWC_DEBUG_FLOW("set_video_mode(%dx%d @ %d, palette %d).\n", width, height, frames, pdev->vpalette);
+ PWC_DEBUG_FLOW("set_video_mode(%dx%d @ %d, pixfmt %08x).\n", width, height, frames, pdev->pixfmt);
size = pwc_decode_size(pdev, width, height);
if (size < 0) {
PWC_DEBUG_MODULE("Could not find suitable size.\n");
@@ -519,13 +519,13 @@ static void pwc_set_image_buffer_size(struct pwc_device *pdev)
{
int i, factor = 0;
- /* for PALETTE_YUV420P */
- switch(pdev->vpalette)
- {
- case VIDEO_PALETTE_YUV420P:
+ /* for V4L2_PIX_FMT_YUV420 */
+ switch (pdev->pixfmt) {
+ case V4L2_PIX_FMT_YUV420:
factor = 6;
break;
- case VIDEO_PALETTE_RAW:
+ case V4L2_PIX_FMT_PWC1:
+ case V4L2_PIX_FMT_PWC2:
factor = 6; /* can be uncompressed YUV420P */
break;
}
diff --git a/drivers/media/video/pwc/pwc-if.c b/drivers/media/video/pwc/pwc-if.c
index aea7e224cef6..f3dc89da4c4e 100644
--- a/drivers/media/video/pwc/pwc-if.c
+++ b/drivers/media/video/pwc/pwc-if.c
@@ -62,7 +62,6 @@
#include <linux/module.h>
#include <linux/poll.h>
#include <linux/slab.h>
-#include <linux/smp_lock.h>
#ifdef CONFIG_USB_PWC_INPUT_EVDEV
#include <linux/usb/input.h>
#endif
@@ -163,7 +162,7 @@ static const struct v4l2_file_operations pwc_fops = {
.read = pwc_video_read,
.poll = pwc_video_poll,
.mmap = pwc_video_mmap,
- .ioctl = pwc_video_ioctl,
+ .unlocked_ioctl = pwc_video_ioctl,
};
static struct video_device pwc_template = {
.name = "Philips Webcam", /* Filled in later */
@@ -1247,8 +1246,8 @@ static int pwc_video_close(struct file *file)
PWC_DEBUG_OPEN(">> video_close called(vdev = 0x%p).\n", vdev);
- lock_kernel();
pdev = video_get_drvdata(vdev);
+ mutex_lock(&pdev->modlock);
if (pdev->vopen == 0)
PWC_DEBUG_MODULE("video_close() called on closed device?\n");
@@ -1286,7 +1285,7 @@ static int pwc_video_close(struct file *file)
if (device_hint[hint].pdev == pdev)
device_hint[hint].pdev = NULL;
}
- unlock_kernel();
+ mutex_unlock(&pdev->modlock);
return 0;
}
@@ -1365,7 +1364,7 @@ static ssize_t pwc_video_read(struct file *file, char __user *buf,
}
PWC_DEBUG_READ("Copying data to user space.\n");
- if (pdev->vpalette == VIDEO_PALETTE_RAW)
+ if (pdev->pixfmt != V4L2_PIX_FMT_YUV420)
bytes_to_read = pdev->frame_size + sizeof(struct pwc_raw_frame);
else
bytes_to_read = pdev->view.size;
@@ -1800,13 +1799,6 @@ static int usb_pwc_probe(struct usb_interface *intf, const struct usb_device_id
}
pdev->vdev->release = video_device_release;
- rc = video_register_device(pdev->vdev, VFL_TYPE_GRABBER, video_nr);
- if (rc < 0) {
- PWC_ERROR("Failed to register as video device (%d).\n", rc);
- goto err_video_release;
- }
-
- PWC_INFO("Registered as %s.\n", video_device_node_name(pdev->vdev));
/* occupy slot */
if (hint < MAX_DEV_HINTS)
@@ -1814,14 +1806,22 @@ static int usb_pwc_probe(struct usb_interface *intf, const struct usb_device_id
PWC_DEBUG_PROBE("probe() function returning struct at 0x%p.\n", pdev);
usb_set_intfdata(intf, pdev);
- rc = pwc_create_sysfs_files(pdev->vdev);
- if (rc)
- goto err_video_unreg;
/* Set the leds off */
pwc_set_leds(pdev, 0, 0);
pwc_camera_power(pdev, 0);
+ rc = video_register_device(pdev->vdev, VFL_TYPE_GRABBER, video_nr);
+ if (rc < 0) {
+ PWC_ERROR("Failed to register as video device (%d).\n", rc);
+ goto err_video_release;
+ }
+ rc = pwc_create_sysfs_files(pdev->vdev);
+ if (rc)
+ goto err_video_unreg;
+
+ PWC_INFO("Registered as %s.\n", video_device_node_name(pdev->vdev));
+
#ifdef CONFIG_USB_PWC_INPUT_EVDEV
/* register webcam snapshot button input device */
pdev->button_dev = input_allocate_device();
@@ -1871,8 +1871,8 @@ static void usb_pwc_disconnect(struct usb_interface *intf)
struct pwc_device *pdev;
int hint;
- lock_kernel();
pdev = usb_get_intfdata (intf);
+ mutex_lock(&pdev->modlock);
usb_set_intfdata (intf, NULL);
if (pdev == NULL) {
PWC_ERROR("pwc_disconnect() Called without private pointer.\n");
@@ -1897,9 +1897,7 @@ static void usb_pwc_disconnect(struct usb_interface *intf)
wake_up_interruptible(&pdev->frameq);
/* Wait until device is closed */
if (pdev->vopen) {
- mutex_lock(&pdev->modlock);
pdev->unplugged = 1;
- mutex_unlock(&pdev->modlock);
pwc_iso_stop(pdev);
} else {
/* Device is closed, so we can safely unregister it */
@@ -1913,7 +1911,7 @@ disconnect_out:
device_hint[hint].pdev = NULL;
}
- unlock_kernel();
+ mutex_unlock(&pdev->modlock);
}
diff --git a/drivers/media/video/pwc/pwc-misc.c b/drivers/media/video/pwc/pwc-misc.c
index 589c687439da..6af5bb538358 100644
--- a/drivers/media/video/pwc/pwc-misc.c
+++ b/drivers/media/video/pwc/pwc-misc.c
@@ -47,7 +47,7 @@ int pwc_decode_size(struct pwc_device *pdev, int width, int height)
you don't have the decompressor loaded or use RAW mode,
the maximum viewable size is smaller.
*/
- if (pdev->vpalette == VIDEO_PALETTE_RAW)
+ if (pdev->pixfmt != V4L2_PIX_FMT_YUV420)
{
if (width > pdev->abs_max.x || height > pdev->abs_max.y)
{
@@ -123,7 +123,7 @@ void pwc_construct(struct pwc_device *pdev)
pdev->frame_header_size = 0;
pdev->frame_trailer_size = 0;
}
- pdev->vpalette = VIDEO_PALETTE_YUV420P; /* default */
+ pdev->pixfmt = V4L2_PIX_FMT_YUV420; /* default */
pdev->view_min.size = pdev->view_min.x * pdev->view_min.y;
pdev->view_max.size = pdev->view_max.x * pdev->view_max.y;
/* length of image, in YUV format; always allocate enough memory. */
diff --git a/drivers/media/video/pwc/pwc-uncompress.c b/drivers/media/video/pwc/pwc-uncompress.c
index 5d82028ef942..3b73f295f032 100644
--- a/drivers/media/video/pwc/pwc-uncompress.c
+++ b/drivers/media/video/pwc/pwc-uncompress.c
@@ -54,7 +54,7 @@ int pwc_decompress(struct pwc_device *pdev)
yuv = fbuf->data + pdev->frame_header_size; /* Skip header */
/* Raw format; that's easy... */
- if (pdev->vpalette == VIDEO_PALETTE_RAW)
+ if (pdev->pixfmt != V4L2_PIX_FMT_YUV420)
{
struct pwc_raw_frame *raw_frame = image;
raw_frame->type = cpu_to_le16(pdev->type);
diff --git a/drivers/media/video/pwc/pwc-v4l.c b/drivers/media/video/pwc/pwc-v4l.c
index 62d89b3113a4..7061a03f5cf1 100644
--- a/drivers/media/video/pwc/pwc-v4l.c
+++ b/drivers/media/video/pwc/pwc-v4l.c
@@ -216,7 +216,7 @@ static void pwc_vidioc_fill_fmt(const struct pwc_device *pdev, struct v4l2_forma
f->fmt.pix.width = pdev->view.x;
f->fmt.pix.height = pdev->view.y;
f->fmt.pix.field = V4L2_FIELD_NONE;
- if (pdev->vpalette == VIDEO_PALETTE_YUV420P) {
+ if (pdev->pixfmt == V4L2_PIX_FMT_YUV420) {
f->fmt.pix.pixelformat = V4L2_PIX_FMT_YUV420;
f->fmt.pix.bytesperline = (f->fmt.pix.width * 3)/2;
f->fmt.pix.sizeimage = f->fmt.pix.height * f->fmt.pix.bytesperline;
@@ -304,10 +304,10 @@ static int pwc_vidioc_set_fmt(struct pwc_device *pdev, struct v4l2_format *f)
fps = pdev->vframes;
}
- if (pixelformat == V4L2_PIX_FMT_YUV420)
- pdev->vpalette = VIDEO_PALETTE_YUV420P;
- else
- pdev->vpalette = VIDEO_PALETTE_RAW;
+ if (pixelformat != V4L2_PIX_FMT_YUV420 &&
+ pixelformat != V4L2_PIX_FMT_PWC1 &&
+ pixelformat != V4L2_PIX_FMT_PWC2)
+ return -EINVAL;
PWC_DEBUG_IOCTL("Try to change format to: width=%d height=%d fps=%d "
"compression=%d snapshot=%d format=%c%c%c%c\n",
@@ -330,6 +330,8 @@ static int pwc_vidioc_set_fmt(struct pwc_device *pdev, struct v4l2_format *f)
if (ret)
return ret;
+ pdev->pixfmt = pixelformat;
+
pwc_vidioc_fill_fmt(pdev, f);
return 0;
@@ -357,152 +359,7 @@ long pwc_video_do_ioctl(struct file *file, unsigned int cmd, void *arg)
switch (cmd) {
- /* Query cabapilities */
- case VIDIOCGCAP:
- {
- struct video_capability *caps = arg;
-
- strcpy(caps->name, vdev->name);
- caps->type = VID_TYPE_CAPTURE;
- caps->channels = 1;
- caps->audios = 1;
- caps->minwidth = pdev->view_min.x;
- caps->minheight = pdev->view_min.y;
- caps->maxwidth = pdev->view_max.x;
- caps->maxheight = pdev->view_max.y;
- break;
- }
-
- /* Channel functions (simulate 1 channel) */
- case VIDIOCGCHAN:
- {
- struct video_channel *v = arg;
-
- if (v->channel != 0)
- return -EINVAL;
- v->flags = 0;
- v->tuners = 0;
- v->type = VIDEO_TYPE_CAMERA;
- strcpy(v->name, "Webcam");
- return 0;
- }
-
- case VIDIOCSCHAN:
- {
- /* The spec says the argument is an integer, but
- the bttv driver uses a video_channel arg, which
- makes sense becasue it also has the norm flag.
- */
- struct video_channel *v = arg;
- if (v->channel != 0)
- return -EINVAL;
- return 0;
- }
-
-
- /* Picture functions; contrast etc. */
- case VIDIOCGPICT:
- {
- struct video_picture *p = arg;
- int val;
-
- val = pwc_get_brightness(pdev);
- if (val >= 0)
- p->brightness = (val<<9);
- else
- p->brightness = 0xffff;
- val = pwc_get_contrast(pdev);
- if (val >= 0)
- p->contrast = (val<<10);
- else
- p->contrast = 0xffff;
- /* Gamma, Whiteness, what's the difference? :) */
- val = pwc_get_gamma(pdev);
- if (val >= 0)
- p->whiteness = (val<<11);
- else
- p->whiteness = 0xffff;
- if (pwc_get_saturation(pdev, &val)<0)
- p->colour = 0xffff;
- else
- p->colour = 32768 + val * 327;
- p->depth = 24;
- p->palette = pdev->vpalette;
- p->hue = 0xFFFF; /* N/A */
- break;
- }
-
- case VIDIOCSPICT:
- {
- struct video_picture *p = arg;
- /*
- * FIXME: Suppose we are mid read
- ANSWER: No problem: the firmware of the camera
- can handle brightness/contrast/etc
- changes at _any_ time, and the palette
- is used exactly once in the uncompress
- routine.
- */
- pwc_set_brightness(pdev, p->brightness);
- pwc_set_contrast(pdev, p->contrast);
- pwc_set_gamma(pdev, p->whiteness);
- pwc_set_saturation(pdev, (p->colour-32768)/327);
- if (p->palette && p->palette != pdev->vpalette) {
- switch (p->palette) {
- case VIDEO_PALETTE_YUV420P:
- case VIDEO_PALETTE_RAW:
- pdev->vpalette = p->palette;
- return pwc_try_video_mode(pdev, pdev->image.x, pdev->image.y, pdev->vframes, pdev->vcompression, pdev->vsnapshot);
- break;
- default:
- return -EINVAL;
- break;
- }
- }
- break;
- }
-
- /* Window/size parameters */
- case VIDIOCGWIN:
- {
- struct video_window *vw = arg;
-
- vw->x = 0;
- vw->y = 0;
- vw->width = pdev->view.x;
- vw->height = pdev->view.y;
- vw->chromakey = 0;
- vw->flags = (pdev->vframes << PWC_FPS_SHIFT) |
- (pdev->vsnapshot ? PWC_FPS_SNAPSHOT : 0);
- break;
- }
-
- case VIDIOCSWIN:
- {
- struct video_window *vw = arg;
- int fps, snapshot, ret;
-
- fps = (vw->flags & PWC_FPS_FRMASK) >> PWC_FPS_SHIFT;
- snapshot = vw->flags & PWC_FPS_SNAPSHOT;
- if (fps == 0)
- fps = pdev->vframes;
- if (pdev->view.x == vw->width && pdev->view.y && fps == pdev->vframes && snapshot == pdev->vsnapshot)
- return 0;
- ret = pwc_try_video_mode(pdev, vw->width, vw->height, fps, pdev->vcompression, snapshot);
- if (ret)
- return ret;
- break;
- }
-
- /* We don't have overlay support (yet) */
- case VIDIOCGFBUF:
- {
- struct video_buffer *vb = arg;
-
- memset(vb,0,sizeof(*vb));
- break;
- }
-
+#ifdef CONFIG_VIDEO_V4L1_COMPAT
/* mmap() functions */
case VIDIOCGMBUF:
{
@@ -517,164 +374,7 @@ long pwc_video_do_ioctl(struct file *file, unsigned int cmd, void *arg)
vm->offsets[i] = i * pdev->len_per_image;
break;
}
-
- case VIDIOCMCAPTURE:
- {
- /* Start capture into a given image buffer (called 'frame' in video_mmap structure) */
- struct video_mmap *vm = arg;
-
- PWC_DEBUG_READ("VIDIOCMCAPTURE: %dx%d, frame %d, format %d\n", vm->width, vm->height, vm->frame, vm->format);
- if (vm->frame < 0 || vm->frame >= pwc_mbufs)
- return -EINVAL;
-
- /* xawtv is nasty. It probes the available palettes
- by setting a very small image size and trying
- various palettes... The driver doesn't support
- such small images, so I'm working around it.
- */
- if (vm->format)
- {
- switch (vm->format)
- {
- case VIDEO_PALETTE_YUV420P:
- case VIDEO_PALETTE_RAW:
- break;
- default:
- return -EINVAL;
- break;
- }
- }
-
- if ((vm->width != pdev->view.x || vm->height != pdev->view.y) &&
- (vm->width >= pdev->view_min.x && vm->height >= pdev->view_min.y)) {
- int ret;
-
- PWC_DEBUG_OPEN("VIDIOCMCAPTURE: changing size to please xawtv :-(.\n");
- ret = pwc_try_video_mode(pdev, vm->width, vm->height, pdev->vframes, pdev->vcompression, pdev->vsnapshot);
- if (ret)
- return ret;
- } /* ... size mismatch */
-
- /* FIXME: should we lock here? */
- if (pdev->image_used[vm->frame])
- return -EBUSY; /* buffer wasn't available. Bummer */
- pdev->image_used[vm->frame] = 1;
-
- /* Okay, we're done here. In the SYNC call we wait until a
- frame comes available, then expand image into the given
- buffer.
- In contrast to the CPiA cam the Philips cams deliver a
- constant stream, almost like a grabber card. Also,
- we have separate buffers for the rawdata and the image,
- meaning we can nearly always expand into the requested buffer.
- */
- PWC_DEBUG_READ("VIDIOCMCAPTURE done.\n");
- break;
- }
-
- case VIDIOCSYNC:
- {
- /* The doc says: "Whenever a buffer is used it should
- call VIDIOCSYNC to free this frame up and continue."
-
- The only odd thing about this whole procedure is
- that MCAPTURE flags the buffer as "in use", and
- SYNC immediately unmarks it, while it isn't
- after SYNC that you know that the buffer actually
- got filled! So you better not start a CAPTURE in
- the same frame immediately (use double buffering).
- This is not a problem for this cam, since it has
- extra intermediate buffers, but a hardware
- grabber card will then overwrite the buffer
- you're working on.
- */
- int *mbuf = arg;
- int ret;
-
- PWC_DEBUG_READ("VIDIOCSYNC called (%d).\n", *mbuf);
-
- /* bounds check */
- if (*mbuf < 0 || *mbuf >= pwc_mbufs)
- return -EINVAL;
- /* check if this buffer was requested anyway */
- if (pdev->image_used[*mbuf] == 0)
- return -EINVAL;
-
- /* Add ourselves to the frame wait-queue.
-
- FIXME: needs auditing for safety.
- QUESTION: In what respect? I think that using the
- frameq is safe now.
- */
- add_wait_queue(&pdev->frameq, &wait);
- while (pdev->full_frames == NULL) {
- /* Check for unplugged/etc. here */
- if (pdev->error_status) {
- remove_wait_queue(&pdev->frameq, &wait);
- set_current_state(TASK_RUNNING);
- return -pdev->error_status;
- }
-
- if (signal_pending(current)) {
- remove_wait_queue(&pdev->frameq, &wait);
- set_current_state(TASK_RUNNING);
- return -ERESTARTSYS;
- }
- schedule();
- set_current_state(TASK_INTERRUPTIBLE);
- }
- remove_wait_queue(&pdev->frameq, &wait);
- set_current_state(TASK_RUNNING);
-
- /* The frame is ready. Expand in the image buffer
- requested by the user. I don't care if you
- mmap() 5 buffers and request data in this order:
- buffer 4 2 3 0 1 2 3 0 4 3 1 . . .
- Grabber hardware may not be so forgiving.
- */
- PWC_DEBUG_READ("VIDIOCSYNC: frame ready.\n");
- pdev->fill_image = *mbuf; /* tell in which buffer we want the image to be expanded */
- /* Decompress, etc */
- ret = pwc_handle_frame(pdev);
- pdev->image_used[*mbuf] = 0;
- if (ret)
- return -EFAULT;
- break;
- }
-
- case VIDIOCGAUDIO:
- {
- struct video_audio *v = arg;
-
- strcpy(v->name, "Microphone");
- v->audio = -1; /* unknown audio minor */
- v->flags = 0;
- v->mode = VIDEO_SOUND_MONO;
- v->volume = 0;
- v->bass = 0;
- v->treble = 0;
- v->balance = 0x8000;
- v->step = 1;
- break;
- }
-
- case VIDIOCSAUDIO:
- {
- /* Dummy: nothing can be set */
- break;
- }
-
- case VIDIOCGUNIT:
- {
- struct video_unit *vu = arg;
-
- vu->video = pdev->vdev->minor & 0x3F;
- vu->audio = -1; /* not known yet */
- vu->vbi = -1;
- vu->radio = -1;
- vu->teletext = -1;
- break;
- }
+#endif
/* V4L2 Layer */
case VIDIOC_QUERYCAP:
@@ -1081,7 +781,7 @@ long pwc_video_do_ioctl(struct file *file, unsigned int cmd, void *arg)
buf->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
buf->index = index;
buf->m.offset = index * pdev->len_per_image;
- if (pdev->vpalette == VIDEO_PALETTE_RAW)
+ if (pdev->pixfmt != V4L2_PIX_FMT_YUV420)
buf->bytesused = pdev->frame_size + sizeof(struct pwc_raw_frame);
else
buf->bytesused = pdev->view.size;
@@ -1158,7 +858,7 @@ long pwc_video_do_ioctl(struct file *file, unsigned int cmd, void *arg)
PWC_DEBUG_IOCTL("VIDIOC_DQBUF: after pwc_handle_frame\n");
buf->index = pdev->fill_image;
- if (pdev->vpalette == VIDEO_PALETTE_RAW)
+ if (pdev->pixfmt != V4L2_PIX_FMT_YUV420)
buf->bytesused = pdev->frame_size + sizeof(struct pwc_raw_frame);
else
buf->bytesused = pdev->view.size;
diff --git a/drivers/media/video/pwc/pwc.h b/drivers/media/video/pwc/pwc.h
index f1b206632957..36a9c83b5f5d 100644
--- a/drivers/media/video/pwc/pwc.h
+++ b/drivers/media/video/pwc/pwc.h
@@ -34,7 +34,7 @@
#include <linux/mm.h>
#include <linux/slab.h>
#include <asm/errno.h>
-#include <linux/videodev.h>
+#include <linux/videodev2.h>
#include <media/v4l2-common.h>
#include <media/v4l2-ioctl.h>
#ifdef CONFIG_USB_PWC_INPUT_EVDEV
@@ -49,7 +49,7 @@
#define PWC_MINOR 0
#define PWC_EXTRAMINOR 12
#define PWC_VERSION_CODE KERNEL_VERSION(PWC_MAJOR,PWC_MINOR,PWC_EXTRAMINOR)
-#define PWC_VERSION "10.0.13"
+#define PWC_VERSION "10.0.14"
#define PWC_NAME "pwc"
#define PFX PWC_NAME ": "
@@ -180,7 +180,7 @@ struct pwc_device
int vcinterface; /* video control interface */
int valternate; /* alternate interface needed */
int vframes, vsize; /* frames-per-second & size (see PSZ_*) */
- int vpalette; /* palette: 420P, RAW or RGBBAYER */
+ int pixfmt; /* pixelformat: V4L2_PIX_FMT_YUV420 or raw: _PWC1, _PWC2 */
int vframe_count; /* received frames */
int vframes_dumped; /* counter for dumped frames */
int vframes_error; /* frames received in error */
diff --git a/drivers/media/video/pxa_camera.c b/drivers/media/video/pxa_camera.c
index 9de7d59916bd..c143ed0a5270 100644
--- a/drivers/media/video/pxa_camera.c
+++ b/drivers/media/video/pxa_camera.c
@@ -275,7 +275,7 @@ static void free_buffer(struct videobuf_queue *vq, struct pxa_buffer *buf)
* This waits until this buffer is out of danger, i.e., until it is no
* longer in STATE_QUEUED or STATE_ACTIVE
*/
- videobuf_waiton(&buf->vb, 0, 0);
+ videobuf_waiton(vq, &buf->vb, 0, 0);
videobuf_dma_unmap(vq->dev, dma);
videobuf_dma_free(dma);
@@ -852,7 +852,7 @@ static void pxa_camera_init_videobuf(struct videobuf_queue *q,
*/
videobuf_queue_sg_init(q, &pxa_videobuf_ops, NULL, &pcdev->lock,
V4L2_BUF_TYPE_VIDEO_CAPTURE, V4L2_FIELD_NONE,
- sizeof(struct pxa_buffer), icd);
+ sizeof(struct pxa_buffer), icd, NULL);
}
static u32 mclk_get_divisor(struct platform_device *pdev,
@@ -1539,7 +1539,7 @@ static int pxa_camera_try_fmt(struct soc_camera_device *icd,
return ret;
}
-static int pxa_camera_reqbufs(struct soc_camera_file *icf,
+static int pxa_camera_reqbufs(struct soc_camera_device *icd,
struct v4l2_requestbuffers *p)
{
int i;
@@ -1551,7 +1551,7 @@ static int pxa_camera_reqbufs(struct soc_camera_file *icf,
* it hadn't triggered
*/
for (i = 0; i < p->count; i++) {
- struct pxa_buffer *buf = container_of(icf->vb_vidq.bufs[i],
+ struct pxa_buffer *buf = container_of(icd->vb_vidq.bufs[i],
struct pxa_buffer, vb);
buf->inwork = 0;
INIT_LIST_HEAD(&buf->vb.queue);
@@ -1562,10 +1562,10 @@ static int pxa_camera_reqbufs(struct soc_camera_file *icf,
static unsigned int pxa_camera_poll(struct file *file, poll_table *pt)
{
- struct soc_camera_file *icf = file->private_data;
+ struct soc_camera_device *icd = file->private_data;
struct pxa_buffer *buf;
- buf = list_entry(icf->vb_vidq.stream.next, struct pxa_buffer,
+ buf = list_entry(icd->vb_vidq.stream.next, struct pxa_buffer,
vb.stream);
poll_wait(file, &buf->vb.done, pt);
diff --git a/drivers/media/video/rj54n1cb0c.c b/drivers/media/video/rj54n1cb0c.c
index ce78fff23425..d2fa2d43ff19 100644
--- a/drivers/media/video/rj54n1cb0c.c
+++ b/drivers/media/video/rj54n1cb0c.c
@@ -493,7 +493,7 @@ static int rj54n1_enum_fmt(struct v4l2_subdev *sd, unsigned int index,
static int rj54n1_s_stream(struct v4l2_subdev *sd, int enable)
{
- struct i2c_client *client = sd->priv;
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
/* Switch between preview and still shot modes */
return reg_set(client, RJ54N1_STILL_CONTROL, (!enable) << 7, 0x80);
@@ -503,7 +503,7 @@ static int rj54n1_set_bus_param(struct soc_camera_device *icd,
unsigned long flags)
{
struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
- struct i2c_client *client = sd->priv;
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
/* Figures 2.5-1 to 2.5-3 - default falling pixclk edge */
if (flags & SOCAM_PCLK_SAMPLE_RISING)
@@ -560,7 +560,7 @@ static int rj54n1_sensor_scale(struct v4l2_subdev *sd, s32 *in_w, s32 *in_h,
static int rj54n1_s_crop(struct v4l2_subdev *sd, struct v4l2_crop *a)
{
- struct i2c_client *client = sd->priv;
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
struct rj54n1 *rj54n1 = to_rj54n1(client);
struct v4l2_rect *rect = &a->c;
int dummy = 0, output_w, output_h,
@@ -595,7 +595,7 @@ static int rj54n1_s_crop(struct v4l2_subdev *sd, struct v4l2_crop *a)
static int rj54n1_g_crop(struct v4l2_subdev *sd, struct v4l2_crop *a)
{
- struct i2c_client *client = sd->priv;
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
struct rj54n1 *rj54n1 = to_rj54n1(client);
a->c = rj54n1->rect;
@@ -621,7 +621,7 @@ static int rj54n1_cropcap(struct v4l2_subdev *sd, struct v4l2_cropcap *a)
static int rj54n1_g_fmt(struct v4l2_subdev *sd,
struct v4l2_mbus_framefmt *mf)
{
- struct i2c_client *client = sd->priv;
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
struct rj54n1 *rj54n1 = to_rj54n1(client);
mf->code = rj54n1->fmt->code;
@@ -641,7 +641,7 @@ static int rj54n1_g_fmt(struct v4l2_subdev *sd,
static int rj54n1_sensor_scale(struct v4l2_subdev *sd, s32 *in_w, s32 *in_h,
s32 *out_w, s32 *out_h)
{
- struct i2c_client *client = sd->priv;
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
struct rj54n1 *rj54n1 = to_rj54n1(client);
unsigned int skip, resize, input_w = *in_w, input_h = *in_h,
output_w = *out_w, output_h = *out_h;
@@ -983,7 +983,7 @@ static int rj54n1_reg_init(struct i2c_client *client)
static int rj54n1_try_fmt(struct v4l2_subdev *sd,
struct v4l2_mbus_framefmt *mf)
{
- struct i2c_client *client = sd->priv;
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
struct rj54n1 *rj54n1 = to_rj54n1(client);
const struct rj54n1_datafmt *fmt;
int align = mf->code == V4L2_MBUS_FMT_SBGGR10_1X10 ||
@@ -1014,7 +1014,7 @@ static int rj54n1_try_fmt(struct v4l2_subdev *sd,
static int rj54n1_s_fmt(struct v4l2_subdev *sd,
struct v4l2_mbus_framefmt *mf)
{
- struct i2c_client *client = sd->priv;
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
struct rj54n1 *rj54n1 = to_rj54n1(client);
const struct rj54n1_datafmt *fmt;
int output_w, output_h, max_w, max_h,
@@ -1145,7 +1145,7 @@ static int rj54n1_s_fmt(struct v4l2_subdev *sd,
static int rj54n1_g_chip_ident(struct v4l2_subdev *sd,
struct v4l2_dbg_chip_ident *id)
{
- struct i2c_client *client = sd->priv;
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
if (id->match.type != V4L2_CHIP_MATCH_I2C_ADDR)
return -EINVAL;
@@ -1163,7 +1163,7 @@ static int rj54n1_g_chip_ident(struct v4l2_subdev *sd,
static int rj54n1_g_register(struct v4l2_subdev *sd,
struct v4l2_dbg_register *reg)
{
- struct i2c_client *client = sd->priv;
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
if (reg->match.type != V4L2_CHIP_MATCH_I2C_ADDR ||
reg->reg < 0x400 || reg->reg > 0x1fff)
@@ -1185,7 +1185,7 @@ static int rj54n1_g_register(struct v4l2_subdev *sd,
static int rj54n1_s_register(struct v4l2_subdev *sd,
struct v4l2_dbg_register *reg)
{
- struct i2c_client *client = sd->priv;
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
if (reg->match.type != V4L2_CHIP_MATCH_I2C_ADDR ||
reg->reg < 0x400 || reg->reg > 0x1fff)
@@ -1248,7 +1248,7 @@ static struct soc_camera_ops rj54n1_ops = {
static int rj54n1_g_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl)
{
- struct i2c_client *client = sd->priv;
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
struct rj54n1 *rj54n1 = to_rj54n1(client);
int data;
@@ -1283,7 +1283,7 @@ static int rj54n1_g_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl)
static int rj54n1_s_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl)
{
int data;
- struct i2c_client *client = sd->priv;
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
struct rj54n1 *rj54n1 = to_rj54n1(client);
const struct v4l2_queryctrl *qctrl;
diff --git a/drivers/media/video/s2255drv.c b/drivers/media/video/s2255drv.c
index 8ec7c9a45a17..a845753665c1 100644
--- a/drivers/media/video/s2255drv.c
+++ b/drivers/media/video/s2255drv.c
@@ -49,7 +49,6 @@
#include <linux/videodev2.h>
#include <linux/version.h>
#include <linux/mm.h>
-#include <linux/smp_lock.h>
#include <media/videobuf-vmalloc.h>
#include <media/v4l2-common.h>
#include <media/v4l2-device.h>
@@ -600,7 +599,7 @@ static int s2255_got_frame(struct s2255_channel *channel, int jpgsize)
dprintk(2, "%s: [buf/i] [%p/%d]\n", __func__, buf, buf->vb.i);
unlock:
spin_unlock_irqrestore(&dev->slock, flags);
- return 0;
+ return rc;
}
static const struct s2255_fmt *format_by_fourcc(int fourcc)
@@ -1817,7 +1816,7 @@ static int s2255_open(struct file *file)
NULL, &dev->slock,
fh->type,
V4L2_FIELD_INTERLACED,
- sizeof(struct s2255_buffer), fh);
+ sizeof(struct s2255_buffer), fh, NULL);
return 0;
}
diff --git a/drivers/media/video/s5p-fimc/Makefile b/drivers/media/video/s5p-fimc/Makefile
index 0d9d54132ecc..7ea1b1403b1e 100644
--- a/drivers/media/video/s5p-fimc/Makefile
+++ b/drivers/media/video/s5p-fimc/Makefile
@@ -1,3 +1,3 @@
obj-$(CONFIG_VIDEO_SAMSUNG_S5P_FIMC) := s5p-fimc.o
-s5p-fimc-y := fimc-core.o fimc-reg.o
+s5p-fimc-y := fimc-core.o fimc-reg.o fimc-capture.o
diff --git a/drivers/media/video/s5p-fimc/fimc-capture.c b/drivers/media/video/s5p-fimc/fimc-capture.c
new file mode 100644
index 000000000000..1b93207c89e8
--- /dev/null
+++ b/drivers/media/video/s5p-fimc/fimc-capture.c
@@ -0,0 +1,819 @@
+/*
+ * Samsung S5P SoC series camera interface (camera capture) driver
+ *
+ * Copyright (c) 2010 Samsung Electronics Co., Ltd
+ * Author: Sylwester Nawrocki, <s.nawrocki@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/version.h>
+#include <linux/types.h>
+#include <linux/errno.h>
+#include <linux/bug.h>
+#include <linux/interrupt.h>
+#include <linux/device.h>
+#include <linux/platform_device.h>
+#include <linux/list.h>
+#include <linux/slab.h>
+#include <linux/clk.h>
+#include <linux/i2c.h>
+
+#include <linux/videodev2.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-ioctl.h>
+#include <media/v4l2-mem2mem.h>
+#include <media/videobuf-core.h>
+#include <media/videobuf-dma-contig.h>
+
+#include "fimc-core.h"
+
+static struct v4l2_subdev *fimc_subdev_register(struct fimc_dev *fimc,
+ struct s3c_fimc_isp_info *isp_info)
+{
+ struct i2c_adapter *i2c_adap;
+ struct fimc_vid_cap *vid_cap = &fimc->vid_cap;
+ struct v4l2_subdev *sd = NULL;
+
+ i2c_adap = i2c_get_adapter(isp_info->i2c_bus_num);
+ if (!i2c_adap)
+ return ERR_PTR(-ENOMEM);
+
+ sd = v4l2_i2c_new_subdev_board(&vid_cap->v4l2_dev, i2c_adap,
+ isp_info->board_info, NULL);
+ if (!sd) {
+ v4l2_err(&vid_cap->v4l2_dev, "failed to acquire subdev\n");
+ return NULL;
+ }
+
+ v4l2_info(&vid_cap->v4l2_dev, "subdevice %s registered successfuly\n",
+ isp_info->board_info->type);
+
+ return sd;
+}
+
+static void fimc_subdev_unregister(struct fimc_dev *fimc)
+{
+ struct fimc_vid_cap *vid_cap = &fimc->vid_cap;
+ struct i2c_client *client;
+
+ if (vid_cap->input_index < 0)
+ return; /* Subdevice already released or not registered. */
+
+ if (vid_cap->sd) {
+ v4l2_device_unregister_subdev(vid_cap->sd);
+ client = v4l2_get_subdevdata(vid_cap->sd);
+ i2c_unregister_device(client);
+ i2c_put_adapter(client->adapter);
+ vid_cap->sd = NULL;
+ }
+
+ vid_cap->input_index = -1;
+}
+
+/**
+ * fimc_subdev_attach - attach v4l2_subdev to camera host interface
+ *
+ * @fimc: FIMC device information
+ * @index: index to the array of available subdevices,
+ * -1 for full array search or non negative value
+ * to select specific subdevice
+ */
+static int fimc_subdev_attach(struct fimc_dev *fimc, int index)
+{
+ struct fimc_vid_cap *vid_cap = &fimc->vid_cap;
+ struct s3c_platform_fimc *pdata = fimc->pdata;
+ struct s3c_fimc_isp_info *isp_info;
+ struct v4l2_subdev *sd;
+ int i;
+
+ for (i = 0; i < FIMC_MAX_CAMIF_CLIENTS; ++i) {
+ isp_info = pdata->isp_info[i];
+
+ if (!isp_info || (index >= 0 && i != index))
+ continue;
+
+ sd = fimc_subdev_register(fimc, isp_info);
+ if (sd) {
+ vid_cap->sd = sd;
+ vid_cap->input_index = i;
+
+ return 0;
+ }
+ }
+
+ vid_cap->input_index = -1;
+ vid_cap->sd = NULL;
+ v4l2_err(&vid_cap->v4l2_dev, "fimc%d: sensor attach failed\n",
+ fimc->id);
+ return -ENODEV;
+}
+
+static int fimc_isp_subdev_init(struct fimc_dev *fimc, int index)
+{
+ struct s3c_fimc_isp_info *isp_info;
+ int ret;
+
+ ret = fimc_subdev_attach(fimc, index);
+ if (ret)
+ return ret;
+
+ isp_info = fimc->pdata->isp_info[fimc->vid_cap.input_index];
+ ret = fimc_hw_set_camera_polarity(fimc, isp_info);
+ if (!ret) {
+ ret = v4l2_subdev_call(fimc->vid_cap.sd, core,
+ s_power, 1);
+ if (!ret)
+ return ret;
+ }
+
+ fimc_subdev_unregister(fimc);
+ err("ISP initialization failed: %d", ret);
+ return ret;
+}
+
+/*
+ * At least one buffer on the pending_buf_q queue is required.
+ * Locking: The caller holds fimc->slock spinlock.
+ */
+int fimc_vid_cap_buf_queue(struct fimc_dev *fimc,
+ struct fimc_vid_buffer *fimc_vb)
+{
+ struct fimc_vid_cap *cap = &fimc->vid_cap;
+ struct fimc_ctx *ctx = cap->ctx;
+ int ret = 0;
+
+ BUG_ON(!fimc || !fimc_vb);
+
+ ret = fimc_prepare_addr(ctx, fimc_vb, &ctx->d_frame,
+ &fimc_vb->paddr);
+ if (ret)
+ return ret;
+
+ if (test_bit(ST_CAPT_STREAM, &fimc->state)) {
+ fimc_pending_queue_add(cap, fimc_vb);
+ } else {
+ /* Setup the buffer directly for processing. */
+ int buf_id = (cap->reqbufs_count == 1) ? -1 : cap->buf_index;
+ fimc_hw_set_output_addr(fimc, &fimc_vb->paddr, buf_id);
+
+ fimc_vb->index = cap->buf_index;
+ active_queue_add(cap, fimc_vb);
+
+ if (++cap->buf_index >= FIMC_MAX_OUT_BUFS)
+ cap->buf_index = 0;
+ }
+ return ret;
+}
+
+static int fimc_stop_capture(struct fimc_dev *fimc)
+{
+ unsigned long flags;
+ struct fimc_vid_cap *cap;
+ int ret;
+
+ cap = &fimc->vid_cap;
+
+ if (!fimc_capture_active(fimc))
+ return 0;
+
+ spin_lock_irqsave(&fimc->slock, flags);
+ set_bit(ST_CAPT_SHUT, &fimc->state);
+ fimc_deactivate_capture(fimc);
+ spin_unlock_irqrestore(&fimc->slock, flags);
+
+ wait_event_timeout(fimc->irq_queue,
+ test_bit(ST_CAPT_SHUT, &fimc->state),
+ FIMC_SHUTDOWN_TIMEOUT);
+
+ ret = v4l2_subdev_call(cap->sd, video, s_stream, 0);
+ if (ret)
+ v4l2_err(&fimc->vid_cap.v4l2_dev, "s_stream(0) failed\n");
+
+ spin_lock_irqsave(&fimc->slock, flags);
+ fimc->state &= ~(1 << ST_CAPT_RUN | 1 << ST_CAPT_PEND |
+ 1 << ST_CAPT_STREAM);
+
+ fimc->vid_cap.active_buf_cnt = 0;
+ spin_unlock_irqrestore(&fimc->slock, flags);
+
+ dbg("state: 0x%lx", fimc->state);
+ return 0;
+}
+
+static int fimc_capture_open(struct file *file)
+{
+ struct fimc_dev *fimc = video_drvdata(file);
+ int ret = 0;
+
+ dbg("pid: %d, state: 0x%lx", task_pid_nr(current), fimc->state);
+
+ /* Return if the corresponding video mem2mem node is already opened. */
+ if (fimc_m2m_active(fimc))
+ return -EBUSY;
+
+ if (mutex_lock_interruptible(&fimc->lock))
+ return -ERESTARTSYS;
+
+ if (++fimc->vid_cap.refcnt == 1) {
+ ret = fimc_isp_subdev_init(fimc, -1);
+ if (ret) {
+ fimc->vid_cap.refcnt--;
+ ret = -EIO;
+ }
+ }
+
+ file->private_data = fimc->vid_cap.ctx;
+
+ mutex_unlock(&fimc->lock);
+ return ret;
+}
+
+static int fimc_capture_close(struct file *file)
+{
+ struct fimc_dev *fimc = video_drvdata(file);
+
+ if (mutex_lock_interruptible(&fimc->lock))
+ return -ERESTARTSYS;
+
+ dbg("pid: %d, state: 0x%lx", task_pid_nr(current), fimc->state);
+
+ if (--fimc->vid_cap.refcnt == 0) {
+ fimc_stop_capture(fimc);
+
+ videobuf_stop(&fimc->vid_cap.vbq);
+ videobuf_mmap_free(&fimc->vid_cap.vbq);
+
+ v4l2_err(&fimc->vid_cap.v4l2_dev, "releasing ISP\n");
+ v4l2_subdev_call(fimc->vid_cap.sd, core, s_power, 0);
+ fimc_subdev_unregister(fimc);
+ }
+
+ mutex_unlock(&fimc->lock);
+ return 0;
+}
+
+static unsigned int fimc_capture_poll(struct file *file,
+ struct poll_table_struct *wait)
+{
+ struct fimc_ctx *ctx = file->private_data;
+ struct fimc_dev *fimc = ctx->fimc_dev;
+ struct fimc_vid_cap *cap = &fimc->vid_cap;
+ int ret;
+
+ if (mutex_lock_interruptible(&fimc->lock))
+ return POLLERR;
+
+ ret = videobuf_poll_stream(file, &cap->vbq, wait);
+ mutex_unlock(&fimc->lock);
+
+ return ret;
+}
+
+static int fimc_capture_mmap(struct file *file, struct vm_area_struct *vma)
+{
+ struct fimc_ctx *ctx = file->private_data;
+ struct fimc_dev *fimc = ctx->fimc_dev;
+ struct fimc_vid_cap *cap = &fimc->vid_cap;
+ int ret;
+
+ if (mutex_lock_interruptible(&fimc->lock))
+ return -ERESTARTSYS;
+
+ ret = videobuf_mmap_mapper(&cap->vbq, vma);
+ mutex_unlock(&fimc->lock);
+
+ return ret;
+}
+
+/* video device file operations */
+static const struct v4l2_file_operations fimc_capture_fops = {
+ .owner = THIS_MODULE,
+ .open = fimc_capture_open,
+ .release = fimc_capture_close,
+ .poll = fimc_capture_poll,
+ .unlocked_ioctl = video_ioctl2,
+ .mmap = fimc_capture_mmap,
+};
+
+static int fimc_vidioc_querycap_capture(struct file *file, void *priv,
+ struct v4l2_capability *cap)
+{
+ struct fimc_ctx *ctx = file->private_data;
+ struct fimc_dev *fimc = ctx->fimc_dev;
+
+ strncpy(cap->driver, fimc->pdev->name, sizeof(cap->driver) - 1);
+ strncpy(cap->card, fimc->pdev->name, sizeof(cap->card) - 1);
+ cap->bus_info[0] = 0;
+ cap->version = KERNEL_VERSION(1, 0, 0);
+ cap->capabilities = V4L2_CAP_STREAMING | V4L2_CAP_VIDEO_CAPTURE;
+
+ return 0;
+}
+
+/* Synchronize formats of the camera interface input and attached sensor. */
+static int sync_capture_fmt(struct fimc_ctx *ctx)
+{
+ struct fimc_frame *frame = &ctx->s_frame;
+ struct fimc_dev *fimc = ctx->fimc_dev;
+ struct v4l2_mbus_framefmt *fmt = &fimc->vid_cap.fmt;
+ int ret;
+
+ fmt->width = ctx->d_frame.o_width;
+ fmt->height = ctx->d_frame.o_height;
+
+ ret = v4l2_subdev_call(fimc->vid_cap.sd, video, s_mbus_fmt, fmt);
+ if (ret == -ENOIOCTLCMD) {
+ err("s_mbus_fmt failed");
+ return ret;
+ }
+ dbg("w: %d, h: %d, code= %d", fmt->width, fmt->height, fmt->code);
+
+ frame->fmt = find_mbus_format(fmt, FMT_FLAGS_CAM);
+ if (!frame->fmt) {
+ err("fimc source format not found\n");
+ return -EINVAL;
+ }
+
+ frame->f_width = fmt->width;
+ frame->f_height = fmt->height;
+ frame->width = fmt->width;
+ frame->height = fmt->height;
+ frame->o_width = fmt->width;
+ frame->o_height = fmt->height;
+ frame->offs_h = 0;
+ frame->offs_v = 0;
+
+ return 0;
+}
+
+static int fimc_cap_s_fmt(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct fimc_ctx *ctx = priv;
+ struct fimc_dev *fimc = ctx->fimc_dev;
+ struct fimc_frame *frame;
+ struct v4l2_pix_format *pix;
+ int ret;
+
+ if (f->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
+ return -EINVAL;
+
+ ret = fimc_vidioc_try_fmt(file, priv, f);
+ if (ret)
+ return ret;
+
+ if (mutex_lock_interruptible(&fimc->lock))
+ return -ERESTARTSYS;
+
+ if (fimc_capture_active(fimc)) {
+ ret = -EBUSY;
+ goto sf_unlock;
+ }
+
+ frame = &ctx->d_frame;
+
+ pix = &f->fmt.pix;
+ frame->fmt = find_format(f, FMT_FLAGS_M2M | FMT_FLAGS_CAM);
+ if (!frame->fmt) {
+ err("fimc target format not found\n");
+ ret = -EINVAL;
+ goto sf_unlock;
+ }
+
+ /* Output DMA frame pixel size and offsets. */
+ frame->f_width = pix->bytesperline * 8 / frame->fmt->depth;
+ frame->f_height = pix->height;
+ frame->width = pix->width;
+ frame->height = pix->height;
+ frame->o_width = pix->width;
+ frame->o_height = pix->height;
+ frame->size = (pix->width * pix->height * frame->fmt->depth) >> 3;
+ frame->offs_h = 0;
+ frame->offs_v = 0;
+
+ ret = sync_capture_fmt(ctx);
+
+ ctx->state |= (FIMC_PARAMS | FIMC_DST_FMT);
+
+sf_unlock:
+ mutex_unlock(&fimc->lock);
+ return ret;
+}
+
+static int fimc_cap_enum_input(struct file *file, void *priv,
+ struct v4l2_input *i)
+{
+ struct fimc_ctx *ctx = priv;
+ struct s3c_platform_fimc *pldata = ctx->fimc_dev->pdata;
+ struct s3c_fimc_isp_info *isp_info;
+
+ if (i->index >= FIMC_MAX_CAMIF_CLIENTS)
+ return -EINVAL;
+
+ isp_info = pldata->isp_info[i->index];
+ if (isp_info == NULL)
+ return -EINVAL;
+
+ i->type = V4L2_INPUT_TYPE_CAMERA;
+ strncpy(i->name, isp_info->board_info->type, 32);
+ return 0;
+}
+
+static int fimc_cap_s_input(struct file *file, void *priv,
+ unsigned int i)
+{
+ struct fimc_ctx *ctx = priv;
+ struct fimc_dev *fimc = ctx->fimc_dev;
+ struct s3c_platform_fimc *pdata = fimc->pdata;
+ int ret;
+
+ if (fimc_capture_active(ctx->fimc_dev))
+ return -EBUSY;
+
+ if (mutex_lock_interruptible(&fimc->lock))
+ return -ERESTARTSYS;
+
+ if (i >= FIMC_MAX_CAMIF_CLIENTS || !pdata->isp_info[i]) {
+ ret = -EINVAL;
+ goto si_unlock;
+ }
+
+ if (fimc->vid_cap.sd) {
+ ret = v4l2_subdev_call(fimc->vid_cap.sd, core, s_power, 0);
+ if (ret)
+ err("s_power failed: %d", ret);
+ }
+
+ /* Release the attached sensor subdevice. */
+ fimc_subdev_unregister(fimc);
+
+ ret = fimc_isp_subdev_init(fimc, i);
+
+si_unlock:
+ mutex_unlock(&fimc->lock);
+ return ret;
+}
+
+static int fimc_cap_g_input(struct file *file, void *priv,
+ unsigned int *i)
+{
+ struct fimc_ctx *ctx = priv;
+ struct fimc_vid_cap *cap = &ctx->fimc_dev->vid_cap;
+
+ *i = cap->input_index;
+ return 0;
+}
+
+static int fimc_cap_streamon(struct file *file, void *priv,
+ enum v4l2_buf_type type)
+{
+ struct s3c_fimc_isp_info *isp_info;
+ struct fimc_ctx *ctx = priv;
+ struct fimc_dev *fimc = ctx->fimc_dev;
+ int ret = -EBUSY;
+
+ if (mutex_lock_interruptible(&fimc->lock))
+ return -ERESTARTSYS;
+
+ if (fimc_capture_active(fimc) || !fimc->vid_cap.sd)
+ goto s_unlock;
+
+ if (!(ctx->state & FIMC_DST_FMT)) {
+ v4l2_err(&fimc->vid_cap.v4l2_dev, "Format is not set\n");
+ ret = -EINVAL;
+ goto s_unlock;
+ }
+
+ ret = v4l2_subdev_call(fimc->vid_cap.sd, video, s_stream, 1);
+ if (ret && ret != -ENOIOCTLCMD)
+ goto s_unlock;
+
+ ret = fimc_prepare_config(ctx, ctx->state);
+ if (ret)
+ goto s_unlock;
+
+ isp_info = fimc->pdata->isp_info[fimc->vid_cap.input_index];
+ fimc_hw_set_camera_type(fimc, isp_info);
+ fimc_hw_set_camera_source(fimc, isp_info);
+ fimc_hw_set_camera_offset(fimc, &ctx->s_frame);
+
+ if (ctx->state & FIMC_PARAMS) {
+ ret = fimc_set_scaler_info(ctx);
+ if (ret) {
+ err("Scaler setup error");
+ goto s_unlock;
+ }
+ fimc_hw_set_input_path(ctx);
+ fimc_hw_set_scaler(ctx);
+ fimc_hw_set_target_format(ctx);
+ fimc_hw_set_rotation(ctx);
+ fimc_hw_set_effect(ctx);
+ }
+
+ fimc_hw_set_output_path(ctx);
+ fimc_hw_set_out_dma(ctx);
+
+ INIT_LIST_HEAD(&fimc->vid_cap.pending_buf_q);
+ INIT_LIST_HEAD(&fimc->vid_cap.active_buf_q);
+ fimc->vid_cap.active_buf_cnt = 0;
+ fimc->vid_cap.frame_count = 0;
+
+ set_bit(ST_CAPT_PEND, &fimc->state);
+ ret = videobuf_streamon(&fimc->vid_cap.vbq);
+
+s_unlock:
+ mutex_unlock(&fimc->lock);
+ return ret;
+}
+
+static int fimc_cap_streamoff(struct file *file, void *priv,
+ enum v4l2_buf_type type)
+{
+ struct fimc_ctx *ctx = priv;
+ struct fimc_dev *fimc = ctx->fimc_dev;
+ struct fimc_vid_cap *cap = &fimc->vid_cap;
+ unsigned long flags;
+ int ret;
+
+ spin_lock_irqsave(&fimc->slock, flags);
+ if (!fimc_capture_running(fimc) && !fimc_capture_pending(fimc)) {
+ spin_unlock_irqrestore(&fimc->slock, flags);
+ dbg("state: 0x%lx", fimc->state);
+ return -EINVAL;
+ }
+ spin_unlock_irqrestore(&fimc->slock, flags);
+
+ if (mutex_lock_interruptible(&fimc->lock))
+ return -ERESTARTSYS;
+
+ fimc_stop_capture(fimc);
+ ret = videobuf_streamoff(&cap->vbq);
+ mutex_unlock(&fimc->lock);
+ return ret;
+}
+
+static int fimc_cap_reqbufs(struct file *file, void *priv,
+ struct v4l2_requestbuffers *reqbufs)
+{
+ struct fimc_ctx *ctx = priv;
+ struct fimc_dev *fimc = ctx->fimc_dev;
+ struct fimc_vid_cap *cap = &fimc->vid_cap;
+ int ret;
+
+ if (fimc_capture_active(ctx->fimc_dev))
+ return -EBUSY;
+
+ if (mutex_lock_interruptible(&fimc->lock))
+ return -ERESTARTSYS;
+
+ ret = videobuf_reqbufs(&cap->vbq, reqbufs);
+ if (!ret)
+ cap->reqbufs_count = reqbufs->count;
+
+ mutex_unlock(&fimc->lock);
+ return ret;
+}
+
+static int fimc_cap_querybuf(struct file *file, void *priv,
+ struct v4l2_buffer *buf)
+{
+ struct fimc_ctx *ctx = priv;
+ struct fimc_vid_cap *cap = &ctx->fimc_dev->vid_cap;
+
+ if (fimc_capture_active(ctx->fimc_dev))
+ return -EBUSY;
+
+ return videobuf_querybuf(&cap->vbq, buf);
+}
+
+static int fimc_cap_qbuf(struct file *file, void *priv,
+ struct v4l2_buffer *buf)
+{
+ struct fimc_ctx *ctx = priv;
+ struct fimc_dev *fimc = ctx->fimc_dev;
+ struct fimc_vid_cap *cap = &fimc->vid_cap;
+ int ret;
+
+ if (mutex_lock_interruptible(&fimc->lock))
+ return -ERESTARTSYS;
+
+ ret = videobuf_qbuf(&cap->vbq, buf);
+
+ mutex_unlock(&fimc->lock);
+ return ret;
+}
+
+static int fimc_cap_dqbuf(struct file *file, void *priv,
+ struct v4l2_buffer *buf)
+{
+ struct fimc_ctx *ctx = priv;
+ int ret;
+
+ if (mutex_lock_interruptible(&ctx->fimc_dev->lock))
+ return -ERESTARTSYS;
+
+ ret = videobuf_dqbuf(&ctx->fimc_dev->vid_cap.vbq, buf,
+ file->f_flags & O_NONBLOCK);
+
+ mutex_unlock(&ctx->fimc_dev->lock);
+ return ret;
+}
+
+static int fimc_cap_s_ctrl(struct file *file, void *priv,
+ struct v4l2_control *ctrl)
+{
+ struct fimc_ctx *ctx = priv;
+ int ret = -EINVAL;
+
+ if (mutex_lock_interruptible(&ctx->fimc_dev->lock))
+ return -ERESTARTSYS;
+
+ /* Allow any controls but 90/270 rotation while streaming */
+ if (!fimc_capture_active(ctx->fimc_dev) ||
+ ctrl->id != V4L2_CID_ROTATE ||
+ (ctrl->value != 90 && ctrl->value != 270)) {
+ ret = check_ctrl_val(ctx, ctrl);
+ if (!ret) {
+ ret = fimc_s_ctrl(ctx, ctrl);
+ if (!ret)
+ ctx->state |= FIMC_PARAMS;
+ }
+ }
+ if (ret == -EINVAL)
+ ret = v4l2_subdev_call(ctx->fimc_dev->vid_cap.sd,
+ core, s_ctrl, ctrl);
+
+ mutex_unlock(&ctx->fimc_dev->lock);
+ return ret;
+}
+
+static int fimc_cap_s_crop(struct file *file, void *fh,
+ struct v4l2_crop *cr)
+{
+ struct fimc_frame *f;
+ struct fimc_ctx *ctx = file->private_data;
+ struct fimc_dev *fimc = ctx->fimc_dev;
+ int ret = -EINVAL;
+
+ if (fimc_capture_active(fimc))
+ return -EBUSY;
+
+ ret = fimc_try_crop(ctx, cr);
+ if (ret)
+ return ret;
+
+ if (mutex_lock_interruptible(&fimc->lock))
+ return -ERESTARTSYS;
+
+ if (!(ctx->state & FIMC_DST_FMT)) {
+ v4l2_err(&fimc->vid_cap.v4l2_dev,
+ "Capture color format not set\n");
+ goto sc_unlock;
+ }
+
+ f = &ctx->s_frame;
+ /* Check for the pixel scaling ratio when cropping input image. */
+ ret = fimc_check_scaler_ratio(&cr->c, &ctx->d_frame);
+ if (ret) {
+ v4l2_err(&fimc->vid_cap.v4l2_dev, "Out of the scaler range");
+ } else {
+ ret = 0;
+ f->offs_h = cr->c.left;
+ f->offs_v = cr->c.top;
+ f->width = cr->c.width;
+ f->height = cr->c.height;
+ }
+
+sc_unlock:
+ mutex_unlock(&fimc->lock);
+ return ret;
+}
+
+
+static const struct v4l2_ioctl_ops fimc_capture_ioctl_ops = {
+ .vidioc_querycap = fimc_vidioc_querycap_capture,
+
+ .vidioc_enum_fmt_vid_cap = fimc_vidioc_enum_fmt,
+ .vidioc_try_fmt_vid_cap = fimc_vidioc_try_fmt,
+ .vidioc_s_fmt_vid_cap = fimc_cap_s_fmt,
+ .vidioc_g_fmt_vid_cap = fimc_vidioc_g_fmt,
+
+ .vidioc_reqbufs = fimc_cap_reqbufs,
+ .vidioc_querybuf = fimc_cap_querybuf,
+
+ .vidioc_qbuf = fimc_cap_qbuf,
+ .vidioc_dqbuf = fimc_cap_dqbuf,
+
+ .vidioc_streamon = fimc_cap_streamon,
+ .vidioc_streamoff = fimc_cap_streamoff,
+
+ .vidioc_queryctrl = fimc_vidioc_queryctrl,
+ .vidioc_g_ctrl = fimc_vidioc_g_ctrl,
+ .vidioc_s_ctrl = fimc_cap_s_ctrl,
+
+ .vidioc_g_crop = fimc_vidioc_g_crop,
+ .vidioc_s_crop = fimc_cap_s_crop,
+ .vidioc_cropcap = fimc_vidioc_cropcap,
+
+ .vidioc_enum_input = fimc_cap_enum_input,
+ .vidioc_s_input = fimc_cap_s_input,
+ .vidioc_g_input = fimc_cap_g_input,
+};
+
+int fimc_register_capture_device(struct fimc_dev *fimc)
+{
+ struct v4l2_device *v4l2_dev = &fimc->vid_cap.v4l2_dev;
+ struct video_device *vfd;
+ struct fimc_vid_cap *vid_cap;
+ struct fimc_ctx *ctx;
+ struct v4l2_format f;
+ int ret;
+
+ ctx = kzalloc(sizeof *ctx, GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+
+ ctx->fimc_dev = fimc;
+ ctx->in_path = FIMC_CAMERA;
+ ctx->out_path = FIMC_DMA;
+ ctx->state = FIMC_CTX_CAP;
+
+ f.fmt.pix.pixelformat = V4L2_PIX_FMT_RGB24;
+ ctx->d_frame.fmt = find_format(&f, FMT_FLAGS_M2M);
+
+ if (!v4l2_dev->name[0])
+ snprintf(v4l2_dev->name, sizeof(v4l2_dev->name),
+ "%s.capture", dev_name(&fimc->pdev->dev));
+
+ ret = v4l2_device_register(NULL, v4l2_dev);
+ if (ret)
+ goto err_info;
+
+ vfd = video_device_alloc();
+ if (!vfd) {
+ v4l2_err(v4l2_dev, "Failed to allocate video device\n");
+ goto err_v4l2_reg;
+ }
+
+ snprintf(vfd->name, sizeof(vfd->name), "%s:cap",
+ dev_name(&fimc->pdev->dev));
+
+ vfd->fops = &fimc_capture_fops;
+ vfd->ioctl_ops = &fimc_capture_ioctl_ops;
+ vfd->minor = -1;
+ vfd->release = video_device_release;
+ video_set_drvdata(vfd, fimc);
+
+ vid_cap = &fimc->vid_cap;
+ vid_cap->vfd = vfd;
+ vid_cap->active_buf_cnt = 0;
+ vid_cap->reqbufs_count = 0;
+ vid_cap->refcnt = 0;
+ /* The default color format for image sensor. */
+ vid_cap->fmt.code = V4L2_MBUS_FMT_YUYV8_2X8;
+
+ INIT_LIST_HEAD(&vid_cap->pending_buf_q);
+ INIT_LIST_HEAD(&vid_cap->active_buf_q);
+ spin_lock_init(&ctx->slock);
+ vid_cap->ctx = ctx;
+
+ videobuf_queue_dma_contig_init(&vid_cap->vbq, &fimc_qops,
+ vid_cap->v4l2_dev.dev, &fimc->irqlock,
+ V4L2_BUF_TYPE_VIDEO_CAPTURE, V4L2_FIELD_NONE,
+ sizeof(struct fimc_vid_buffer), (void *)ctx);
+
+ ret = video_register_device(vfd, VFL_TYPE_GRABBER, -1);
+ if (ret) {
+ v4l2_err(v4l2_dev, "Failed to register video device\n");
+ goto err_vd_reg;
+ }
+
+ v4l2_info(v4l2_dev,
+ "FIMC capture driver registered as /dev/video%d\n",
+ vfd->num);
+
+ return 0;
+
+err_vd_reg:
+ video_device_release(vfd);
+err_v4l2_reg:
+ v4l2_device_unregister(v4l2_dev);
+err_info:
+ dev_err(&fimc->pdev->dev, "failed to install\n");
+ return ret;
+}
+
+void fimc_unregister_capture_device(struct fimc_dev *fimc)
+{
+ struct fimc_vid_cap *capture = &fimc->vid_cap;
+
+ if (capture->vfd)
+ video_unregister_device(capture->vfd);
+
+ kfree(capture->ctx);
+}
diff --git a/drivers/media/video/s5p-fimc/fimc-core.c b/drivers/media/video/s5p-fimc/fimc-core.c
index 6961c55baf9b..2e7c547894b6 100644
--- a/drivers/media/video/s5p-fimc/fimc-core.c
+++ b/drivers/media/video/s5p-fimc/fimc-core.c
@@ -1,7 +1,7 @@
/*
* S5P camera interface (video postprocessor) driver
*
- * Copyright (c) 2010 Samsung Electronics
+ * Copyright (c) 2010 Samsung Electronics Co., Ltd
*
* Sylwester Nawrocki, <s.nawrocki@samsung.com>
*
@@ -38,86 +38,103 @@ static struct fimc_fmt fimc_formats[] = {
.depth = 16,
.color = S5P_FIMC_RGB565,
.buff_cnt = 1,
- .planes_cnt = 1
+ .planes_cnt = 1,
+ .mbus_code = V4L2_MBUS_FMT_RGB565_2X8_BE,
+ .flags = FMT_FLAGS_M2M,
}, {
.name = "BGR666",
.fourcc = V4L2_PIX_FMT_BGR666,
.depth = 32,
.color = S5P_FIMC_RGB666,
.buff_cnt = 1,
- .planes_cnt = 1
+ .planes_cnt = 1,
+ .flags = FMT_FLAGS_M2M,
}, {
.name = "XRGB-8-8-8-8, 24 bpp",
.fourcc = V4L2_PIX_FMT_RGB24,
.depth = 32,
.color = S5P_FIMC_RGB888,
.buff_cnt = 1,
- .planes_cnt = 1
+ .planes_cnt = 1,
+ .flags = FMT_FLAGS_M2M,
}, {
.name = "YUV 4:2:2 packed, YCbYCr",
.fourcc = V4L2_PIX_FMT_YUYV,
.depth = 16,
.color = S5P_FIMC_YCBYCR422,
.buff_cnt = 1,
- .planes_cnt = 1
- }, {
+ .planes_cnt = 1,
+ .mbus_code = V4L2_MBUS_FMT_YUYV8_2X8,
+ .flags = FMT_FLAGS_M2M | FMT_FLAGS_CAM,
+ }, {
.name = "YUV 4:2:2 packed, CbYCrY",
.fourcc = V4L2_PIX_FMT_UYVY,
.depth = 16,
.color = S5P_FIMC_CBYCRY422,
.buff_cnt = 1,
- .planes_cnt = 1
+ .planes_cnt = 1,
+ .mbus_code = V4L2_MBUS_FMT_UYVY8_2X8,
+ .flags = FMT_FLAGS_M2M | FMT_FLAGS_CAM,
}, {
.name = "YUV 4:2:2 packed, CrYCbY",
.fourcc = V4L2_PIX_FMT_VYUY,
.depth = 16,
.color = S5P_FIMC_CRYCBY422,
.buff_cnt = 1,
- .planes_cnt = 1
+ .planes_cnt = 1,
+ .mbus_code = V4L2_MBUS_FMT_VYUY8_2X8,
+ .flags = FMT_FLAGS_M2M | FMT_FLAGS_CAM,
}, {
.name = "YUV 4:2:2 packed, YCrYCb",
.fourcc = V4L2_PIX_FMT_YVYU,
.depth = 16,
.color = S5P_FIMC_YCRYCB422,
.buff_cnt = 1,
- .planes_cnt = 1
+ .planes_cnt = 1,
+ .mbus_code = V4L2_MBUS_FMT_YVYU8_2X8,
+ .flags = FMT_FLAGS_M2M | FMT_FLAGS_CAM,
}, {
.name = "YUV 4:2:2 planar, Y/Cb/Cr",
.fourcc = V4L2_PIX_FMT_YUV422P,
.depth = 12,
.color = S5P_FIMC_YCBCR422,
.buff_cnt = 1,
- .planes_cnt = 3
+ .planes_cnt = 3,
+ .flags = FMT_FLAGS_M2M,
}, {
.name = "YUV 4:2:2 planar, Y/CbCr",
.fourcc = V4L2_PIX_FMT_NV16,
.depth = 16,
.color = S5P_FIMC_YCBCR422,
.buff_cnt = 1,
- .planes_cnt = 2
+ .planes_cnt = 2,
+ .flags = FMT_FLAGS_M2M,
}, {
.name = "YUV 4:2:2 planar, Y/CrCb",
.fourcc = V4L2_PIX_FMT_NV61,
.depth = 16,
.color = S5P_FIMC_RGB565,
.buff_cnt = 1,
- .planes_cnt = 2
+ .planes_cnt = 2,
+ .flags = FMT_FLAGS_M2M,
}, {
.name = "YUV 4:2:0 planar, YCbCr",
.fourcc = V4L2_PIX_FMT_YUV420,
.depth = 12,
.color = S5P_FIMC_YCBCR420,
.buff_cnt = 1,
- .planes_cnt = 3
+ .planes_cnt = 3,
+ .flags = FMT_FLAGS_M2M,
}, {
.name = "YUV 4:2:0 planar, Y/CbCr",
.fourcc = V4L2_PIX_FMT_NV12,
.depth = 12,
.color = S5P_FIMC_YCBCR420,
.buff_cnt = 1,
- .planes_cnt = 2
- }
- };
+ .planes_cnt = 2,
+ .flags = FMT_FLAGS_M2M,
+ },
+};
static struct v4l2_queryctrl fimc_ctrls[] = {
{
@@ -127,16 +144,14 @@ static struct v4l2_queryctrl fimc_ctrls[] = {
.minimum = 0,
.maximum = 1,
.default_value = 0,
- },
- {
+ }, {
.id = V4L2_CID_VFLIP,
.type = V4L2_CTRL_TYPE_BOOLEAN,
.name = "Vertical flip",
.minimum = 0,
.maximum = 1,
.default_value = 0,
- },
- {
+ }, {
.id = V4L2_CID_ROTATE,
.type = V4L2_CTRL_TYPE_INTEGER,
.name = "Rotation (CCW)",
@@ -158,7 +173,7 @@ static struct v4l2_queryctrl *get_ctrl(int id)
return NULL;
}
-static int fimc_check_scaler_ratio(struct v4l2_rect *r, struct fimc_frame *f)
+int fimc_check_scaler_ratio(struct v4l2_rect *r, struct fimc_frame *f)
{
if (r->width > f->width) {
if (f->width > (r->width * SCALER_MAX_HRATIO))
@@ -181,32 +196,27 @@ static int fimc_check_scaler_ratio(struct v4l2_rect *r, struct fimc_frame *f)
static int fimc_get_scaler_factor(u32 src, u32 tar, u32 *ratio, u32 *shift)
{
- if (src >= tar * 64) {
+ u32 sh = 6;
+
+ if (src >= 64 * tar)
return -EINVAL;
- } else if (src >= tar * 32) {
- *ratio = 32;
- *shift = 5;
- } else if (src >= tar * 16) {
- *ratio = 16;
- *shift = 4;
- } else if (src >= tar * 8) {
- *ratio = 8;
- *shift = 3;
- } else if (src >= tar * 4) {
- *ratio = 4;
- *shift = 2;
- } else if (src >= tar * 2) {
- *ratio = 2;
- *shift = 1;
- } else {
- *ratio = 1;
- *shift = 0;
+
+ while (sh--) {
+ u32 tmp = 1 << sh;
+ if (src >= tar * tmp) {
+ *shift = sh, *ratio = tmp;
+ return 0;
+ }
}
+ *shift = 0, *ratio = 1;
+
+ dbg("s: %d, t: %d, shift: %d, ratio: %d",
+ src, tar, *shift, *ratio);
return 0;
}
-static int fimc_set_scaler_info(struct fimc_ctx *ctx)
+int fimc_set_scaler_info(struct fimc_ctx *ctx)
{
struct fimc_scaler *sc = &ctx->scaler;
struct fimc_frame *s_frame = &ctx->s_frame;
@@ -214,8 +224,13 @@ static int fimc_set_scaler_info(struct fimc_ctx *ctx)
int tx, ty, sx, sy;
int ret;
- tx = d_frame->width;
- ty = d_frame->height;
+ if (ctx->rotation == 90 || ctx->rotation == 270) {
+ ty = d_frame->width;
+ tx = d_frame->height;
+ } else {
+ tx = d_frame->width;
+ ty = d_frame->height;
+ }
if (tx <= 0 || ty <= 0) {
v4l2_err(&ctx->fimc_dev->m2m.v4l2_dev,
"invalid target size: %d x %d", tx, ty);
@@ -261,12 +276,57 @@ static int fimc_set_scaler_info(struct fimc_ctx *ctx)
return 0;
}
+static void fimc_capture_handler(struct fimc_dev *fimc)
+{
+ struct fimc_vid_cap *cap = &fimc->vid_cap;
+ struct fimc_vid_buffer *v_buf = NULL;
+
+ if (!list_empty(&cap->active_buf_q)) {
+ v_buf = active_queue_pop(cap);
+ fimc_buf_finish(fimc, v_buf);
+ }
+
+ if (test_and_clear_bit(ST_CAPT_SHUT, &fimc->state)) {
+ wake_up(&fimc->irq_queue);
+ return;
+ }
+
+ if (!list_empty(&cap->pending_buf_q)) {
+
+ v_buf = pending_queue_pop(cap);
+ fimc_hw_set_output_addr(fimc, &v_buf->paddr, cap->buf_index);
+ v_buf->index = cap->buf_index;
+
+ dbg("hw ptr: %d, sw ptr: %d",
+ fimc_hw_get_frame_index(fimc), cap->buf_index);
+
+ spin_lock(&fimc->irqlock);
+ v_buf->vb.state = VIDEOBUF_ACTIVE;
+ spin_unlock(&fimc->irqlock);
+
+ /* Move the buffer to the capture active queue */
+ active_queue_add(cap, v_buf);
+
+ dbg("next frame: %d, done frame: %d",
+ fimc_hw_get_frame_index(fimc), v_buf->index);
+
+ if (++cap->buf_index >= FIMC_MAX_OUT_BUFS)
+ cap->buf_index = 0;
+
+ } else if (test_and_clear_bit(ST_CAPT_STREAM, &fimc->state) &&
+ cap->active_buf_cnt <= 1) {
+ fimc_deactivate_capture(fimc);
+ }
+
+ dbg("frame: %d, active_buf_cnt= %d",
+ fimc_hw_get_frame_index(fimc), cap->active_buf_cnt);
+}
static irqreturn_t fimc_isr(int irq, void *priv)
{
struct fimc_vid_buffer *src_buf, *dst_buf;
- struct fimc_dev *fimc = (struct fimc_dev *)priv;
struct fimc_ctx *ctx;
+ struct fimc_dev *fimc = priv;
BUG_ON(!fimc);
fimc_hw_clear_irq(fimc);
@@ -281,12 +341,22 @@ static irqreturn_t fimc_isr(int irq, void *priv)
dst_buf = v4l2_m2m_dst_buf_remove(ctx->m2m_ctx);
if (src_buf && dst_buf) {
spin_lock(&fimc->irqlock);
- src_buf->vb.state = dst_buf->vb.state = VIDEOBUF_DONE;
+ src_buf->vb.state = dst_buf->vb.state = VIDEOBUF_DONE;
wake_up(&src_buf->vb.done);
wake_up(&dst_buf->vb.done);
spin_unlock(&fimc->irqlock);
v4l2_m2m_job_finish(fimc->m2m.m2m_dev, ctx->m2m_ctx);
}
+ goto isr_unlock;
+
+ }
+
+ if (test_bit(ST_CAPT_RUN, &fimc->state))
+ fimc_capture_handler(fimc);
+
+ if (test_and_clear_bit(ST_CAPT_PEND, &fimc->state)) {
+ set_bit(ST_CAPT_RUN, &fimc->state);
+ wake_up(&fimc->irq_queue);
}
isr_unlock:
@@ -295,20 +365,13 @@ isr_unlock:
}
/* The color format (planes_cnt, buff_cnt) must be already configured. */
-static int fimc_prepare_addr(struct fimc_ctx *ctx,
- struct fimc_vid_buffer *buf, enum v4l2_buf_type type)
+int fimc_prepare_addr(struct fimc_ctx *ctx, struct fimc_vid_buffer *buf,
+ struct fimc_frame *frame, struct fimc_addr *paddr)
{
- struct fimc_frame *frame;
- struct fimc_addr *paddr;
- u32 pix_size;
int ret = 0;
+ u32 pix_size;
- frame = ctx_m2m_get_frame(ctx, type);
- if (IS_ERR(frame))
- return PTR_ERR(frame);
- paddr = &frame->paddr;
-
- if (!buf)
+ if (buf == NULL || frame == NULL)
return -EINVAL;
pix_size = frame->width * frame->height;
@@ -344,8 +407,8 @@ static int fimc_prepare_addr(struct fimc_ctx *ctx,
}
}
- dbg("PHYS_ADDR: type= %d, y= 0x%X cb= 0x%X cr= 0x%X ret= %d",
- type, paddr->y, paddr->cb, paddr->cr, ret);
+ dbg("PHYS_ADDR: y= 0x%X cb= 0x%X cr= 0x%X ret= %d",
+ paddr->y, paddr->cb, paddr->cr, ret);
return ret;
}
@@ -433,7 +496,7 @@ static void fimc_prepare_dma_offset(struct fimc_ctx *ctx, struct fimc_frame *f)
*
* Return: 0 if dimensions are valid or non zero otherwise.
*/
-static int fimc_prepare_config(struct fimc_ctx *ctx, u32 flags)
+int fimc_prepare_config(struct fimc_ctx *ctx, u32 flags)
{
struct fimc_frame *s_frame, *d_frame;
struct fimc_vid_buffer *buf = NULL;
@@ -443,12 +506,6 @@ static int fimc_prepare_config(struct fimc_ctx *ctx, u32 flags)
d_frame = &ctx->d_frame;
if (flags & FIMC_PARAMS) {
- if ((ctx->out_path == FIMC_DMA) &&
- (ctx->rotation == 90 || ctx->rotation == 270)) {
- swap(d_frame->f_width, d_frame->f_height);
- swap(d_frame->width, d_frame->height);
- }
-
/* Prepare the DMA offset ratios for scaler. */
fimc_prepare_dma_offset(ctx, &ctx->s_frame);
fimc_prepare_dma_offset(ctx, &ctx->d_frame);
@@ -466,16 +523,14 @@ static int fimc_prepare_config(struct fimc_ctx *ctx, u32 flags)
if (flags & FIMC_SRC_ADDR) {
buf = v4l2_m2m_next_src_buf(ctx->m2m_ctx);
- ret = fimc_prepare_addr(ctx, buf,
- V4L2_BUF_TYPE_VIDEO_OUTPUT);
+ ret = fimc_prepare_addr(ctx, buf, s_frame, &s_frame->paddr);
if (ret)
return ret;
}
if (flags & FIMC_DST_ADDR) {
buf = v4l2_m2m_next_dst_buf(ctx->m2m_ctx);
- ret = fimc_prepare_addr(ctx, buf,
- V4L2_BUF_TYPE_VIDEO_CAPTURE);
+ ret = fimc_prepare_addr(ctx, buf, d_frame, &d_frame->paddr);
}
return ret;
@@ -499,12 +554,14 @@ static void fimc_dma_run(void *priv)
ctx->state |= (FIMC_SRC_ADDR | FIMC_DST_ADDR);
ret = fimc_prepare_config(ctx, ctx->state);
if (ret) {
- err("general configuration error");
+ err("Wrong parameters");
goto dma_unlock;
}
-
- if (fimc->m2m.ctx != ctx)
+ /* Reconfigure hardware if the context has changed. */
+ if (fimc->m2m.ctx != ctx) {
ctx->state |= FIMC_PARAMS;
+ fimc->m2m.ctx = ctx;
+ }
fimc_hw_set_input_addr(fimc, &ctx->s_frame.paddr);
@@ -512,10 +569,9 @@ static void fimc_dma_run(void *priv)
fimc_hw_set_input_path(ctx);
fimc_hw_set_in_dma(ctx);
if (fimc_set_scaler_info(ctx)) {
- err("scaler configuration error");
+ err("Scaler setup error");
goto dma_unlock;
}
- fimc_hw_set_prescaler(ctx);
fimc_hw_set_scaler(ctx);
fimc_hw_set_target_format(ctx);
fimc_hw_set_rotation(ctx);
@@ -524,19 +580,15 @@ static void fimc_dma_run(void *priv)
fimc_hw_set_output_path(ctx);
if (ctx->state & (FIMC_DST_ADDR | FIMC_PARAMS))
- fimc_hw_set_output_addr(fimc, &ctx->d_frame.paddr);
+ fimc_hw_set_output_addr(fimc, &ctx->d_frame.paddr, -1);
if (ctx->state & FIMC_PARAMS)
fimc_hw_set_out_dma(ctx);
- if (ctx->scaler.enabled)
- fimc_hw_start_scaler(fimc);
- fimc_hw_en_capture(ctx);
+ fimc_activate_capture(ctx);
- ctx->state = 0;
- fimc_hw_start_in_dma(fimc);
-
- fimc->m2m.ctx = ctx;
+ ctx->state &= (FIMC_CTX_M2M | FIMC_CTX_CAP);
+ fimc_hw_activate_input_dma(fimc, true);
dma_unlock:
spin_unlock_irqrestore(&ctx->slock, flags);
@@ -560,7 +612,7 @@ static int fimc_buf_setup(struct videobuf_queue *vq, unsigned int *count,
struct fimc_ctx *ctx = vq->priv_data;
struct fimc_frame *frame;
- frame = ctx_m2m_get_frame(ctx, vq->type);
+ frame = ctx_get_frame(ctx, vq->type);
if (IS_ERR(frame))
return PTR_ERR(frame);
@@ -578,7 +630,7 @@ static int fimc_buf_prepare(struct videobuf_queue *vq,
struct fimc_frame *frame;
int ret;
- frame = ctx_m2m_get_frame(ctx, vq->type);
+ frame = ctx_get_frame(ctx, vq->type);
if (IS_ERR(frame))
return PTR_ERR(frame);
@@ -618,10 +670,31 @@ static void fimc_buf_queue(struct videobuf_queue *vq,
struct videobuf_buffer *vb)
{
struct fimc_ctx *ctx = vq->priv_data;
- v4l2_m2m_buf_queue(ctx->m2m_ctx, vq, vb);
+ struct fimc_dev *fimc = ctx->fimc_dev;
+ struct fimc_vid_cap *cap = &fimc->vid_cap;
+ unsigned long flags;
+
+ dbg("ctx: %p, ctx->state: 0x%x", ctx, ctx->state);
+
+ if ((ctx->state & FIMC_CTX_M2M) && ctx->m2m_ctx) {
+ v4l2_m2m_buf_queue(ctx->m2m_ctx, vq, vb);
+ } else if (ctx->state & FIMC_CTX_CAP) {
+ spin_lock_irqsave(&fimc->slock, flags);
+ fimc_vid_cap_buf_queue(fimc, (struct fimc_vid_buffer *)vb);
+
+ dbg("fimc->cap.active_buf_cnt: %d",
+ fimc->vid_cap.active_buf_cnt);
+
+ if (cap->active_buf_cnt >= cap->reqbufs_count ||
+ cap->active_buf_cnt >= FIMC_MAX_OUT_BUFS) {
+ if (!test_and_set_bit(ST_CAPT_STREAM, &fimc->state))
+ fimc_activate_capture(ctx);
+ }
+ spin_unlock_irqrestore(&fimc->slock, flags);
+ }
}
-static struct videobuf_queue_ops fimc_qops = {
+struct videobuf_queue_ops fimc_qops = {
.buf_setup = fimc_buf_setup,
.buf_prepare = fimc_buf_prepare,
.buf_queue = fimc_buf_queue,
@@ -644,7 +717,7 @@ static int fimc_m2m_querycap(struct file *file, void *priv,
return 0;
}
-static int fimc_m2m_enum_fmt(struct file *file, void *priv,
+int fimc_vidioc_enum_fmt(struct file *file, void *priv,
struct v4l2_fmtdesc *f)
{
struct fimc_fmt *fmt;
@@ -655,189 +728,210 @@ static int fimc_m2m_enum_fmt(struct file *file, void *priv,
fmt = &fimc_formats[f->index];
strncpy(f->description, fmt->name, sizeof(f->description) - 1);
f->pixelformat = fmt->fourcc;
+
return 0;
}
-static int fimc_m2m_g_fmt(struct file *file, void *priv, struct v4l2_format *f)
+int fimc_vidioc_g_fmt(struct file *file, void *priv, struct v4l2_format *f)
{
struct fimc_ctx *ctx = priv;
+ struct fimc_dev *fimc = ctx->fimc_dev;
struct fimc_frame *frame;
- frame = ctx_m2m_get_frame(ctx, f->type);
+ frame = ctx_get_frame(ctx, f->type);
if (IS_ERR(frame))
return PTR_ERR(frame);
+ if (mutex_lock_interruptible(&fimc->lock))
+ return -ERESTARTSYS;
+
f->fmt.pix.width = frame->width;
f->fmt.pix.height = frame->height;
f->fmt.pix.field = V4L2_FIELD_NONE;
f->fmt.pix.pixelformat = frame->fmt->fourcc;
+ mutex_unlock(&fimc->lock);
return 0;
}
-static struct fimc_fmt *find_format(struct v4l2_format *f)
+struct fimc_fmt *find_format(struct v4l2_format *f, unsigned int mask)
{
struct fimc_fmt *fmt;
unsigned int i;
for (i = 0; i < ARRAY_SIZE(fimc_formats); ++i) {
fmt = &fimc_formats[i];
- if (fmt->fourcc == f->fmt.pix.pixelformat)
+ if (fmt->fourcc == f->fmt.pix.pixelformat &&
+ (fmt->flags & mask))
break;
}
- if (i == ARRAY_SIZE(fimc_formats))
- return NULL;
- return fmt;
+ return (i == ARRAY_SIZE(fimc_formats)) ? NULL : fmt;
}
-static int fimc_m2m_try_fmt(struct file *file, void *priv,
- struct v4l2_format *f)
+struct fimc_fmt *find_mbus_format(struct v4l2_mbus_framefmt *f,
+ unsigned int mask)
{
struct fimc_fmt *fmt;
- u32 max_width, max_height, mod_x, mod_y;
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(fimc_formats); ++i) {
+ fmt = &fimc_formats[i];
+ if (fmt->mbus_code == f->code && (fmt->flags & mask))
+ break;
+ }
+
+ return (i == ARRAY_SIZE(fimc_formats)) ? NULL : fmt;
+}
+
+
+int fimc_vidioc_try_fmt(struct file *file, void *priv, struct v4l2_format *f)
+{
struct fimc_ctx *ctx = priv;
struct fimc_dev *fimc = ctx->fimc_dev;
- struct v4l2_pix_format *pix = &f->fmt.pix;
struct samsung_fimc_variant *variant = fimc->variant;
+ struct v4l2_pix_format *pix = &f->fmt.pix;
+ struct fimc_fmt *fmt;
+ u32 max_width, mod_x, mod_y, mask;
+ int ret = -EINVAL, is_output = 0;
- fmt = find_format(f);
- if (!fmt) {
- v4l2_err(&fimc->m2m.v4l2_dev,
- "Fourcc format (0x%X) invalid.\n", pix->pixelformat);
+ if (f->type == V4L2_BUF_TYPE_VIDEO_OUTPUT) {
+ if (ctx->state & FIMC_CTX_CAP)
+ return -EINVAL;
+ is_output = 1;
+ } else if (f->type != V4L2_BUF_TYPE_VIDEO_CAPTURE) {
return -EINVAL;
}
+ dbg("w: %d, h: %d, bpl: %d",
+ pix->width, pix->height, pix->bytesperline);
+
+ if (mutex_lock_interruptible(&fimc->lock))
+ return -ERESTARTSYS;
+
+ mask = is_output ? FMT_FLAGS_M2M : FMT_FLAGS_M2M | FMT_FLAGS_CAM;
+ fmt = find_format(f, mask);
+ if (!fmt) {
+ v4l2_err(&fimc->m2m.v4l2_dev, "Fourcc format (0x%X) invalid.\n",
+ pix->pixelformat);
+ goto tf_out;
+ }
+
if (pix->field == V4L2_FIELD_ANY)
pix->field = V4L2_FIELD_NONE;
else if (V4L2_FIELD_NONE != pix->field)
- return -EINVAL;
+ goto tf_out;
- if (f->type == V4L2_BUF_TYPE_VIDEO_OUTPUT) {
- max_width = variant->scaler_dis_w;
- max_height = variant->scaler_dis_w;
- mod_x = variant->min_inp_pixsize;
- mod_y = variant->min_inp_pixsize;
- } else if (f->type == V4L2_BUF_TYPE_VIDEO_CAPTURE) {
- max_width = variant->out_rot_dis_w;
- max_height = variant->out_rot_dis_w;
- mod_x = variant->min_out_pixsize;
- mod_y = variant->min_out_pixsize;
+ if (is_output) {
+ max_width = variant->pix_limit->scaler_dis_w;
+ mod_x = ffs(variant->min_inp_pixsize) - 1;
} else {
- err("Wrong stream type (%d)", f->type);
- return -EINVAL;
+ max_width = variant->pix_limit->out_rot_dis_w;
+ mod_x = ffs(variant->min_out_pixsize) - 1;
}
- dbg("max_w= %d, max_h= %d", max_width, max_height);
-
- if (pix->height > max_height)
- pix->height = max_height;
- if (pix->width > max_width)
- pix->width = max_width;
-
if (tiled_fmt(fmt)) {
- mod_x = 64; /* 64x32 tile */
- mod_y = 32;
+ mod_x = 6; /* 64 x 32 pixels tile */
+ mod_y = 5;
+ } else {
+ if (fimc->id == 1 && fimc->variant->pix_hoff)
+ mod_y = fimc_fmt_is_rgb(fmt->color) ? 0 : 1;
+ else
+ mod_y = mod_x;
}
- dbg("mod_x= 0x%X, mod_y= 0x%X", mod_x, mod_y);
+ dbg("mod_x: %d, mod_y: %d, max_w: %d", mod_x, mod_y, max_width);
- pix->width = (pix->width == 0) ? mod_x : ALIGN(pix->width, mod_x);
- pix->height = (pix->height == 0) ? mod_y : ALIGN(pix->height, mod_y);
+ v4l_bound_align_image(&pix->width, 16, max_width, mod_x,
+ &pix->height, 8, variant->pix_limit->scaler_dis_w, mod_y, 0);
if (pix->bytesperline == 0 ||
- pix->bytesperline * 8 / fmt->depth > pix->width)
+ (pix->bytesperline * 8 / fmt->depth) > pix->width)
pix->bytesperline = (pix->width * fmt->depth) >> 3;
if (pix->sizeimage == 0)
pix->sizeimage = pix->height * pix->bytesperline;
- dbg("pix->bytesperline= %d, fmt->depth= %d",
- pix->bytesperline, fmt->depth);
+ dbg("w: %d, h: %d, bpl: %d, depth: %d",
+ pix->width, pix->height, pix->bytesperline, fmt->depth);
- return 0;
-}
+ ret = 0;
+tf_out:
+ mutex_unlock(&fimc->lock);
+ return ret;
+}
static int fimc_m2m_s_fmt(struct file *file, void *priv, struct v4l2_format *f)
{
struct fimc_ctx *ctx = priv;
- struct v4l2_device *v4l2_dev = &ctx->fimc_dev->m2m.v4l2_dev;
- struct videobuf_queue *src_vq, *dst_vq;
+ struct fimc_dev *fimc = ctx->fimc_dev;
+ struct v4l2_device *v4l2_dev = &fimc->m2m.v4l2_dev;
+ struct videobuf_queue *vq;
struct fimc_frame *frame;
struct v4l2_pix_format *pix;
unsigned long flags;
int ret = 0;
- BUG_ON(!ctx);
-
- ret = fimc_m2m_try_fmt(file, priv, f);
+ ret = fimc_vidioc_try_fmt(file, priv, f);
if (ret)
return ret;
- mutex_lock(&ctx->fimc_dev->lock);
+ if (mutex_lock_interruptible(&fimc->lock))
+ return -ERESTARTSYS;
- src_vq = v4l2_m2m_get_src_vq(ctx->m2m_ctx);
- dst_vq = v4l2_m2m_get_dst_vq(ctx->m2m_ctx);
+ vq = v4l2_m2m_get_vq(ctx->m2m_ctx, f->type);
+ mutex_lock(&vq->vb_lock);
- mutex_lock(&src_vq->vb_lock);
- mutex_lock(&dst_vq->vb_lock);
+ if (videobuf_queue_is_busy(vq)) {
+ v4l2_err(v4l2_dev, "%s: queue (%d) busy\n", __func__, f->type);
+ ret = -EBUSY;
+ goto sf_out;
+ }
+ spin_lock_irqsave(&ctx->slock, flags);
if (f->type == V4L2_BUF_TYPE_VIDEO_OUTPUT) {
- if (videobuf_queue_is_busy(src_vq)) {
- v4l2_err(v4l2_dev, "%s queue busy\n", __func__);
- ret = -EBUSY;
- goto s_fmt_out;
- }
frame = &ctx->s_frame;
- spin_lock_irqsave(&ctx->slock, flags);
ctx->state |= FIMC_SRC_FMT;
- spin_unlock_irqrestore(&ctx->slock, flags);
-
} else if (f->type == V4L2_BUF_TYPE_VIDEO_CAPTURE) {
- if (videobuf_queue_is_busy(dst_vq)) {
- v4l2_err(v4l2_dev, "%s queue busy\n", __func__);
- ret = -EBUSY;
- goto s_fmt_out;
- }
frame = &ctx->d_frame;
- spin_lock_irqsave(&ctx->slock, flags);
ctx->state |= FIMC_DST_FMT;
- spin_unlock_irqrestore(&ctx->slock, flags);
} else {
+ spin_unlock_irqrestore(&ctx->slock, flags);
v4l2_err(&ctx->fimc_dev->m2m.v4l2_dev,
"Wrong buffer/video queue type (%d)\n", f->type);
ret = -EINVAL;
- goto s_fmt_out;
+ goto sf_out;
}
+ spin_unlock_irqrestore(&ctx->slock, flags);
pix = &f->fmt.pix;
- frame->fmt = find_format(f);
+ frame->fmt = find_format(f, FMT_FLAGS_M2M);
if (!frame->fmt) {
ret = -EINVAL;
- goto s_fmt_out;
+ goto sf_out;
}
- frame->f_width = pix->bytesperline * 8 / frame->fmt->depth;
- frame->f_height = pix->sizeimage/pix->bytesperline;
- frame->width = pix->width;
- frame->height = pix->height;
- frame->o_width = pix->width;
+ frame->f_width = pix->bytesperline * 8 / frame->fmt->depth;
+ frame->f_height = pix->height;
+ frame->width = pix->width;
+ frame->height = pix->height;
+ frame->o_width = pix->width;
frame->o_height = pix->height;
- frame->offs_h = 0;
- frame->offs_v = 0;
- frame->size = (pix->width * pix->height * frame->fmt->depth) >> 3;
- src_vq->field = dst_vq->field = pix->field;
+ frame->offs_h = 0;
+ frame->offs_v = 0;
+ frame->size = (pix->width * pix->height * frame->fmt->depth) >> 3;
+ vq->field = pix->field;
+
spin_lock_irqsave(&ctx->slock, flags);
ctx->state |= FIMC_PARAMS;
spin_unlock_irqrestore(&ctx->slock, flags);
- dbg("f_width= %d, f_height= %d", frame->f_width, frame->f_height);
+ dbg("f_w: %d, f_h: %d", frame->f_width, frame->f_height);
-s_fmt_out:
- mutex_unlock(&dst_vq->vb_lock);
- mutex_unlock(&src_vq->vb_lock);
- mutex_unlock(&ctx->fimc_dev->lock);
+sf_out:
+ mutex_unlock(&vq->vb_lock);
+ mutex_unlock(&fimc->lock);
return ret;
}
@@ -884,21 +978,33 @@ static int fimc_m2m_streamoff(struct file *file, void *priv,
return v4l2_m2m_streamoff(file, ctx->m2m_ctx, type);
}
-int fimc_m2m_queryctrl(struct file *file, void *priv,
+int fimc_vidioc_queryctrl(struct file *file, void *priv,
struct v4l2_queryctrl *qc)
{
+ struct fimc_ctx *ctx = priv;
struct v4l2_queryctrl *c;
+
c = get_ctrl(qc->id);
- if (!c)
- return -EINVAL;
- *qc = *c;
- return 0;
+ if (c) {
+ *qc = *c;
+ return 0;
+ }
+
+ if (ctx->state & FIMC_CTX_CAP)
+ return v4l2_subdev_call(ctx->fimc_dev->vid_cap.sd,
+ core, queryctrl, qc);
+ return -EINVAL;
}
-int fimc_m2m_g_ctrl(struct file *file, void *priv,
+int fimc_vidioc_g_ctrl(struct file *file, void *priv,
struct v4l2_control *ctrl)
{
struct fimc_ctx *ctx = priv;
+ struct fimc_dev *fimc = ctx->fimc_dev;
+ int ret = 0;
+
+ if (mutex_lock_interruptible(&fimc->lock))
+ return -ERESTARTSYS;
switch (ctrl->id) {
case V4L2_CID_HFLIP:
@@ -911,15 +1017,22 @@ int fimc_m2m_g_ctrl(struct file *file, void *priv,
ctrl->value = ctx->rotation;
break;
default:
- v4l2_err(&ctx->fimc_dev->m2m.v4l2_dev, "Invalid control\n");
- return -EINVAL;
+ if (ctx->state & FIMC_CTX_CAP) {
+ ret = v4l2_subdev_call(fimc->vid_cap.sd, core,
+ g_ctrl, ctrl);
+ } else {
+ v4l2_err(&fimc->m2m.v4l2_dev,
+ "Invalid control\n");
+ ret = -EINVAL;
+ }
}
dbg("ctrl->value= %d", ctrl->value);
- return 0;
+
+ mutex_unlock(&fimc->lock);
+ return ret;
}
-static int check_ctrl_val(struct fimc_ctx *ctx,
- struct v4l2_control *ctrl)
+int check_ctrl_val(struct fimc_ctx *ctx, struct v4l2_control *ctrl)
{
struct v4l2_queryctrl *c;
c = get_ctrl(ctrl->id);
@@ -936,22 +1049,23 @@ static int check_ctrl_val(struct fimc_ctx *ctx,
return 0;
}
-int fimc_m2m_s_ctrl(struct file *file, void *priv,
- struct v4l2_control *ctrl)
+int fimc_s_ctrl(struct fimc_ctx *ctx, struct v4l2_control *ctrl)
{
- struct fimc_ctx *ctx = priv;
struct samsung_fimc_variant *variant = ctx->fimc_dev->variant;
+ struct fimc_dev *fimc = ctx->fimc_dev;
unsigned long flags;
- int ret = 0;
- ret = check_ctrl_val(ctx, ctrl);
- if (ret)
- return ret;
+ if (ctx->rotation != 0 &&
+ (ctrl->id == V4L2_CID_HFLIP || ctrl->id == V4L2_CID_VFLIP)) {
+ v4l2_err(&fimc->m2m.v4l2_dev,
+ "Simultaneous flip and rotation is not supported\n");
+ return -EINVAL;
+ }
+
+ spin_lock_irqsave(&ctx->slock, flags);
switch (ctrl->id) {
case V4L2_CID_HFLIP:
- if (ctx->rotation != 0)
- return 0;
if (ctrl->value)
ctx->flip |= FLIP_X_AXIS;
else
@@ -959,8 +1073,6 @@ int fimc_m2m_s_ctrl(struct file *file, void *priv,
break;
case V4L2_CID_VFLIP:
- if (ctx->rotation != 0)
- return 0;
if (ctrl->value)
ctx->flip |= FLIP_Y_AXIS;
else
@@ -968,77 +1080,95 @@ int fimc_m2m_s_ctrl(struct file *file, void *priv,
break;
case V4L2_CID_ROTATE:
- if (ctrl->value == 90 || ctrl->value == 270) {
- if (ctx->out_path == FIMC_LCDFIFO &&
- !variant->has_inp_rot) {
- return -EINVAL;
- } else if (ctx->in_path == FIMC_DMA &&
- !variant->has_out_rot) {
- return -EINVAL;
- }
+ /* Check for the output rotator availability */
+ if ((ctrl->value == 90 || ctrl->value == 270) &&
+ (ctx->in_path == FIMC_DMA && !variant->has_out_rot)) {
+ spin_unlock_irqrestore(&ctx->slock, flags);
+ return -EINVAL;
+ } else {
+ ctx->rotation = ctrl->value;
}
- ctx->rotation = ctrl->value;
- if (ctrl->value == 180)
- ctx->flip = FLIP_XY_AXIS;
break;
default:
- v4l2_err(&ctx->fimc_dev->m2m.v4l2_dev, "Invalid control\n");
+ spin_unlock_irqrestore(&ctx->slock, flags);
+ v4l2_err(&fimc->m2m.v4l2_dev, "Invalid control\n");
return -EINVAL;
}
- spin_lock_irqsave(&ctx->slock, flags);
ctx->state |= FIMC_PARAMS;
spin_unlock_irqrestore(&ctx->slock, flags);
+
return 0;
}
+static int fimc_m2m_s_ctrl(struct file *file, void *priv,
+ struct v4l2_control *ctrl)
+{
+ struct fimc_ctx *ctx = priv;
+ int ret = 0;
+
+ ret = check_ctrl_val(ctx, ctrl);
+ if (ret)
+ return ret;
+
+ ret = fimc_s_ctrl(ctx, ctrl);
+ return 0;
+}
-static int fimc_m2m_cropcap(struct file *file, void *fh,
- struct v4l2_cropcap *cr)
+int fimc_vidioc_cropcap(struct file *file, void *fh,
+ struct v4l2_cropcap *cr)
{
struct fimc_frame *frame;
struct fimc_ctx *ctx = fh;
+ struct fimc_dev *fimc = ctx->fimc_dev;
- frame = ctx_m2m_get_frame(ctx, cr->type);
+ frame = ctx_get_frame(ctx, cr->type);
if (IS_ERR(frame))
return PTR_ERR(frame);
- cr->bounds.left = 0;
- cr->bounds.top = 0;
- cr->bounds.width = frame->f_width;
- cr->bounds.height = frame->f_height;
- cr->defrect.left = frame->offs_h;
- cr->defrect.top = frame->offs_v;
- cr->defrect.width = frame->o_width;
- cr->defrect.height = frame->o_height;
+ if (mutex_lock_interruptible(&fimc->lock))
+ return -ERESTARTSYS;
+
+ cr->bounds.left = 0;
+ cr->bounds.top = 0;
+ cr->bounds.width = frame->f_width;
+ cr->bounds.height = frame->f_height;
+ cr->defrect = cr->bounds;
+
+ mutex_unlock(&fimc->lock);
return 0;
}
-static int fimc_m2m_g_crop(struct file *file, void *fh, struct v4l2_crop *cr)
+int fimc_vidioc_g_crop(struct file *file, void *fh, struct v4l2_crop *cr)
{
struct fimc_frame *frame;
struct fimc_ctx *ctx = file->private_data;
+ struct fimc_dev *fimc = ctx->fimc_dev;
- frame = ctx_m2m_get_frame(ctx, cr->type);
+ frame = ctx_get_frame(ctx, cr->type);
if (IS_ERR(frame))
return PTR_ERR(frame);
+ if (mutex_lock_interruptible(&fimc->lock))
+ return -ERESTARTSYS;
+
cr->c.left = frame->offs_h;
cr->c.top = frame->offs_v;
cr->c.width = frame->width;
cr->c.height = frame->height;
+ mutex_unlock(&fimc->lock);
return 0;
}
-static int fimc_m2m_s_crop(struct file *file, void *fh, struct v4l2_crop *cr)
+int fimc_try_crop(struct fimc_ctx *ctx, struct v4l2_crop *cr)
{
- struct fimc_ctx *ctx = file->private_data;
struct fimc_dev *fimc = ctx->fimc_dev;
- unsigned long flags;
struct fimc_frame *f;
- u32 min_size;
- int ret = 0;
+ u32 min_size, halign;
+
+ f = (cr->type == V4L2_BUF_TYPE_VIDEO_OUTPUT) ?
+ &ctx->s_frame : &ctx->d_frame;
if (cr->c.top < 0 || cr->c.left < 0) {
v4l2_err(&fimc->m2m.v4l2_dev,
@@ -1046,66 +1176,98 @@ static int fimc_m2m_s_crop(struct file *file, void *fh, struct v4l2_crop *cr)
return -EINVAL;
}
- if (cr->c.width <= 0 || cr->c.height <= 0) {
- v4l2_err(&fimc->m2m.v4l2_dev,
- "crop width and height must be greater than 0\n");
- return -EINVAL;
- }
-
- f = ctx_m2m_get_frame(ctx, cr->type);
+ f = ctx_get_frame(ctx, cr->type);
if (IS_ERR(f))
return PTR_ERR(f);
- /* Adjust to required pixel boundary. */
- min_size = (cr->type == V4L2_BUF_TYPE_VIDEO_OUTPUT) ?
- fimc->variant->min_inp_pixsize : fimc->variant->min_out_pixsize;
-
- cr->c.width = round_down(cr->c.width, min_size);
- cr->c.height = round_down(cr->c.height, min_size);
- cr->c.left = round_down(cr->c.left + 1, min_size);
- cr->c.top = round_down(cr->c.top + 1, min_size);
+ min_size = (cr->type == V4L2_BUF_TYPE_VIDEO_OUTPUT)
+ ? fimc->variant->min_inp_pixsize
+ : fimc->variant->min_out_pixsize;
- if ((cr->c.left + cr->c.width > f->o_width)
- || (cr->c.top + cr->c.height > f->o_height)) {
- v4l2_err(&fimc->m2m.v4l2_dev, "Error in S_CROP params\n");
- return -EINVAL;
+ if (ctx->state & FIMC_CTX_M2M) {
+ if (fimc->id == 1 && fimc->variant->pix_hoff)
+ halign = fimc_fmt_is_rgb(f->fmt->color) ? 0 : 1;
+ else
+ halign = ffs(min_size) - 1;
+ /* there are more strict aligment requirements at camera interface */
+ } else {
+ min_size = 16;
+ halign = 4;
}
+ v4l_bound_align_image(&cr->c.width, min_size, f->o_width,
+ ffs(min_size) - 1,
+ &cr->c.height, min_size, f->o_height,
+ halign, 64/(ALIGN(f->fmt->depth, 8)));
+
+ /* adjust left/top if cropping rectangle is out of bounds */
+ if (cr->c.left + cr->c.width > f->o_width)
+ cr->c.left = f->o_width - cr->c.width;
+ if (cr->c.top + cr->c.height > f->o_height)
+ cr->c.top = f->o_height - cr->c.height;
+
+ cr->c.left = round_down(cr->c.left, min_size);
+ cr->c.top = round_down(cr->c.top,
+ ctx->state & FIMC_CTX_M2M ? 8 : 16);
+
+ dbg("l:%d, t:%d, w:%d, h:%d, f_w: %d, f_h: %d",
+ cr->c.left, cr->c.top, cr->c.width, cr->c.height,
+ f->f_width, f->f_height);
+
+ return 0;
+}
+
+
+static int fimc_m2m_s_crop(struct file *file, void *fh, struct v4l2_crop *cr)
+{
+ struct fimc_ctx *ctx = file->private_data;
+ struct fimc_dev *fimc = ctx->fimc_dev;
+ unsigned long flags;
+ struct fimc_frame *f;
+ int ret;
+
+ ret = fimc_try_crop(ctx, cr);
+ if (ret)
+ return ret;
+
+ f = (cr->type == V4L2_BUF_TYPE_VIDEO_OUTPUT) ?
+ &ctx->s_frame : &ctx->d_frame;
+
spin_lock_irqsave(&ctx->slock, flags);
- if ((ctx->state & FIMC_SRC_FMT) && (ctx->state & FIMC_DST_FMT)) {
- /* Check for the pixel scaling ratio when cropping input img. */
+ if (~ctx->state & (FIMC_SRC_FMT | FIMC_DST_FMT)) {
+ /* Check to see if scaling ratio is within supported range */
if (cr->type == V4L2_BUF_TYPE_VIDEO_OUTPUT)
ret = fimc_check_scaler_ratio(&cr->c, &ctx->d_frame);
- else if (cr->type == V4L2_BUF_TYPE_VIDEO_CAPTURE)
+ else
ret = fimc_check_scaler_ratio(&cr->c, &ctx->s_frame);
-
if (ret) {
spin_unlock_irqrestore(&ctx->slock, flags);
- v4l2_err(&fimc->m2m.v4l2_dev, "Out of scaler range");
+ v4l2_err(&fimc->m2m.v4l2_dev, "Out of scaler range");
return -EINVAL;
}
}
ctx->state |= FIMC_PARAMS;
- spin_unlock_irqrestore(&ctx->slock, flags);
f->offs_h = cr->c.left;
f->offs_v = cr->c.top;
- f->width = cr->c.width;
+ f->width = cr->c.width;
f->height = cr->c.height;
+
+ spin_unlock_irqrestore(&ctx->slock, flags);
return 0;
}
static const struct v4l2_ioctl_ops fimc_m2m_ioctl_ops = {
.vidioc_querycap = fimc_m2m_querycap,
- .vidioc_enum_fmt_vid_cap = fimc_m2m_enum_fmt,
- .vidioc_enum_fmt_vid_out = fimc_m2m_enum_fmt,
+ .vidioc_enum_fmt_vid_cap = fimc_vidioc_enum_fmt,
+ .vidioc_enum_fmt_vid_out = fimc_vidioc_enum_fmt,
- .vidioc_g_fmt_vid_cap = fimc_m2m_g_fmt,
- .vidioc_g_fmt_vid_out = fimc_m2m_g_fmt,
+ .vidioc_g_fmt_vid_cap = fimc_vidioc_g_fmt,
+ .vidioc_g_fmt_vid_out = fimc_vidioc_g_fmt,
- .vidioc_try_fmt_vid_cap = fimc_m2m_try_fmt,
- .vidioc_try_fmt_vid_out = fimc_m2m_try_fmt,
+ .vidioc_try_fmt_vid_cap = fimc_vidioc_try_fmt,
+ .vidioc_try_fmt_vid_out = fimc_vidioc_try_fmt,
.vidioc_s_fmt_vid_cap = fimc_m2m_s_fmt,
.vidioc_s_fmt_vid_out = fimc_m2m_s_fmt,
@@ -1119,13 +1281,13 @@ static const struct v4l2_ioctl_ops fimc_m2m_ioctl_ops = {
.vidioc_streamon = fimc_m2m_streamon,
.vidioc_streamoff = fimc_m2m_streamoff,
- .vidioc_queryctrl = fimc_m2m_queryctrl,
- .vidioc_g_ctrl = fimc_m2m_g_ctrl,
+ .vidioc_queryctrl = fimc_vidioc_queryctrl,
+ .vidioc_g_ctrl = fimc_vidioc_g_ctrl,
.vidioc_s_ctrl = fimc_m2m_s_ctrl,
- .vidioc_g_crop = fimc_m2m_g_crop,
+ .vidioc_g_crop = fimc_vidioc_g_crop,
.vidioc_s_crop = fimc_m2m_s_crop,
- .vidioc_cropcap = fimc_m2m_cropcap
+ .vidioc_cropcap = fimc_vidioc_cropcap
};
@@ -1136,9 +1298,9 @@ static void queue_init(void *priv, struct videobuf_queue *vq,
struct fimc_dev *fimc = ctx->fimc_dev;
videobuf_queue_dma_contig_init(vq, &fimc_qops,
- fimc->m2m.v4l2_dev.dev,
+ &fimc->pdev->dev,
&fimc->irqlock, type, V4L2_FIELD_NONE,
- sizeof(struct fimc_vid_buffer), priv);
+ sizeof(struct fimc_vid_buffer), priv, NULL);
}
static int fimc_m2m_open(struct file *file)
@@ -1147,25 +1309,38 @@ static int fimc_m2m_open(struct file *file)
struct fimc_ctx *ctx = NULL;
int err = 0;
- mutex_lock(&fimc->lock);
+ if (mutex_lock_interruptible(&fimc->lock))
+ return -ERESTARTSYS;
+
+ dbg("pid: %d, state: 0x%lx, refcnt: %d",
+ task_pid_nr(current), fimc->state, fimc->vid_cap.refcnt);
+
+ /*
+ * Return if the corresponding video capture node
+ * is already opened.
+ */
+ if (fimc->vid_cap.refcnt > 0) {
+ err = -EBUSY;
+ goto err_unlock;
+ }
+
fimc->m2m.refcnt++;
set_bit(ST_OUTDMA_RUN, &fimc->state);
- mutex_unlock(&fimc->lock);
-
ctx = kzalloc(sizeof *ctx, GFP_KERNEL);
- if (!ctx)
- return -ENOMEM;
+ if (!ctx) {
+ err = -ENOMEM;
+ goto err_unlock;
+ }
file->private_data = ctx;
ctx->fimc_dev = fimc;
- /* default format */
+ /* Default color format */
ctx->s_frame.fmt = &fimc_formats[0];
ctx->d_frame.fmt = &fimc_formats[0];
- /* per user process device context initialization */
- ctx->state = 0;
+ /* Setup the device context for mem2mem mode. */
+ ctx->state = FIMC_CTX_M2M;
ctx->flags = 0;
- ctx->effect.type = S5P_FIMC_EFFECT_ORIGINAL;
ctx->in_path = FIMC_DMA;
ctx->out_path = FIMC_DMA;
spin_lock_init(&ctx->slock);
@@ -1175,6 +1350,9 @@ static int fimc_m2m_open(struct file *file)
err = PTR_ERR(ctx->m2m_ctx);
kfree(ctx);
}
+
+err_unlock:
+ mutex_unlock(&fimc->lock);
return err;
}
@@ -1183,11 +1361,16 @@ static int fimc_m2m_release(struct file *file)
struct fimc_ctx *ctx = file->private_data;
struct fimc_dev *fimc = ctx->fimc_dev;
+ mutex_lock(&fimc->lock);
+
+ dbg("pid: %d, state: 0x%lx, refcnt= %d",
+ task_pid_nr(current), fimc->state, fimc->m2m.refcnt);
+
v4l2_m2m_ctx_release(ctx->m2m_ctx);
kfree(ctx);
- mutex_lock(&fimc->lock);
if (--fimc->m2m.refcnt <= 0)
clear_bit(ST_OUTDMA_RUN, &fimc->state);
+
mutex_unlock(&fimc->lock);
return 0;
}
@@ -1196,6 +1379,7 @@ static unsigned int fimc_m2m_poll(struct file *file,
struct poll_table_struct *wait)
{
struct fimc_ctx *ctx = file->private_data;
+
return v4l2_m2m_poll(file, ctx->m2m_ctx, wait);
}
@@ -1203,6 +1387,7 @@ static unsigned int fimc_m2m_poll(struct file *file,
static int fimc_m2m_mmap(struct file *file, struct vm_area_struct *vma)
{
struct fimc_ctx *ctx = file->private_data;
+
return v4l2_m2m_mmap(file, ctx->m2m_ctx, vma);
}
@@ -1241,7 +1426,7 @@ static int fimc_register_m2m_device(struct fimc_dev *fimc)
ret = v4l2_device_register(&pdev->dev, v4l2_dev);
if (ret)
- return ret;;
+ goto err_m2m_r1;
vfd = video_device_alloc();
if (!vfd) {
@@ -1293,7 +1478,7 @@ static void fimc_unregister_m2m_device(struct fimc_dev *fimc)
if (fimc) {
v4l2_m2m_release(fimc->m2m.m2m_dev);
video_unregister_device(fimc->m2m.vfd);
- video_device_release(fimc->m2m.vfd);
+
v4l2_device_unregister(&fimc->m2m.v4l2_dev);
}
}
@@ -1337,7 +1522,7 @@ static int fimc_probe(struct platform_device *pdev)
drv_data = (struct samsung_fimc_driverdata *)
platform_get_device_id(pdev)->driver_data;
- if (pdev->id >= drv_data->devs_cnt) {
+ if (pdev->id >= drv_data->num_entities) {
dev_err(&pdev->dev, "Invalid platform device id: %d\n",
pdev->id);
return -EINVAL;
@@ -1350,9 +1535,11 @@ static int fimc_probe(struct platform_device *pdev)
fimc->id = pdev->id;
fimc->variant = drv_data->variant[fimc->id];
fimc->pdev = pdev;
+ fimc->pdata = pdev->dev.platform_data;
fimc->state = ST_IDLE;
spin_lock_init(&fimc->irqlock);
+ init_waitqueue_head(&fimc->irq_queue);
spin_lock_init(&fimc->slock);
mutex_init(&fimc->lock);
@@ -1382,6 +1569,7 @@ static int fimc_probe(struct platform_device *pdev)
ret = fimc_clk_get(fimc);
if (ret)
goto err_regs_unmap;
+ clk_set_rate(fimc->clock[0], drv_data->lclk_frequency);
res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
if (!res) {
@@ -1399,25 +1587,38 @@ static int fimc_probe(struct platform_device *pdev)
goto err_clk;
}
- fimc->work_queue = create_workqueue(dev_name(&fimc->pdev->dev));
- if (!fimc->work_queue) {
- ret = -ENOMEM;
- goto err_irq;
- }
-
ret = fimc_register_m2m_device(fimc);
if (ret)
- goto err_wq;
+ goto err_irq;
+
+ /* At least one camera sensor is required to register capture node */
+ if (fimc->pdata) {
+ int i;
+ for (i = 0; i < FIMC_MAX_CAMIF_CLIENTS; ++i)
+ if (fimc->pdata->isp_info[i])
+ break;
+
+ if (i < FIMC_MAX_CAMIF_CLIENTS) {
+ ret = fimc_register_capture_device(fimc);
+ if (ret)
+ goto err_m2m;
+ }
+ }
- fimc_hw_en_lastirq(fimc, true);
+ /*
+ * Exclude the additional output DMA address registers by masking
+ * them out on HW revisions that provide extended capabilites.
+ */
+ if (fimc->variant->out_buf_count > 4)
+ fimc_hw_set_dma_seq(fimc, 0xF);
dev_dbg(&pdev->dev, "%s(): fimc-%d registered successfully\n",
__func__, fimc->id);
return 0;
-err_wq:
- destroy_workqueue(fimc->work_queue);
+err_m2m:
+ fimc_unregister_m2m_device(fimc);
err_irq:
free_irq(fimc->irq, fimc);
err_clk:
@@ -1429,7 +1630,7 @@ err_req_region:
kfree(fimc->regs_res);
err_info:
kfree(fimc);
- dev_err(&pdev->dev, "failed to install\n");
+
return ret;
}
@@ -1438,91 +1639,151 @@ static int __devexit fimc_remove(struct platform_device *pdev)
struct fimc_dev *fimc =
(struct fimc_dev *)platform_get_drvdata(pdev);
- v4l2_info(&fimc->m2m.v4l2_dev, "Removing %s\n", pdev->name);
-
free_irq(fimc->irq, fimc);
-
fimc_hw_reset(fimc);
fimc_unregister_m2m_device(fimc);
+ fimc_unregister_capture_device(fimc);
+
fimc_clk_release(fimc);
iounmap(fimc->regs);
release_resource(fimc->regs_res);
kfree(fimc->regs_res);
kfree(fimc);
+
+ dev_info(&pdev->dev, "%s driver unloaded\n", pdev->name);
return 0;
}
-static struct samsung_fimc_variant fimc01_variant_s5p = {
- .has_inp_rot = 1,
- .has_out_rot = 1,
+/* Image pixel limits, similar across several FIMC HW revisions. */
+static struct fimc_pix_limit s5p_pix_limit[3] = {
+ [0] = {
+ .scaler_en_w = 3264,
+ .scaler_dis_w = 8192,
+ .in_rot_en_h = 1920,
+ .in_rot_dis_w = 8192,
+ .out_rot_en_w = 1920,
+ .out_rot_dis_w = 4224,
+ },
+ [1] = {
+ .scaler_en_w = 4224,
+ .scaler_dis_w = 8192,
+ .in_rot_en_h = 1920,
+ .in_rot_dis_w = 8192,
+ .out_rot_en_w = 1920,
+ .out_rot_dis_w = 4224,
+ },
+ [2] = {
+ .scaler_en_w = 1920,
+ .scaler_dis_w = 8192,
+ .in_rot_en_h = 1280,
+ .in_rot_dis_w = 8192,
+ .out_rot_en_w = 1280,
+ .out_rot_dis_w = 1920,
+ },
+};
+
+static struct samsung_fimc_variant fimc0_variant_s5p = {
+ .has_inp_rot = 1,
+ .has_out_rot = 1,
.min_inp_pixsize = 16,
.min_out_pixsize = 16,
-
- .scaler_en_w = 3264,
- .scaler_dis_w = 8192,
- .in_rot_en_h = 1920,
- .in_rot_dis_w = 8192,
- .out_rot_en_w = 1920,
- .out_rot_dis_w = 4224,
+ .hor_offs_align = 8,
+ .out_buf_count = 4,
+ .pix_limit = &s5p_pix_limit[0],
};
static struct samsung_fimc_variant fimc2_variant_s5p = {
.min_inp_pixsize = 16,
.min_out_pixsize = 16,
+ .hor_offs_align = 8,
+ .out_buf_count = 4,
+ .pix_limit = &s5p_pix_limit[1],
+};
- .scaler_en_w = 4224,
- .scaler_dis_w = 8192,
- .in_rot_en_h = 1920,
- .in_rot_dis_w = 8192,
- .out_rot_en_w = 1920,
- .out_rot_dis_w = 4224,
+static struct samsung_fimc_variant fimc0_variant_s5pv210 = {
+ .pix_hoff = 1,
+ .has_inp_rot = 1,
+ .has_out_rot = 1,
+ .min_inp_pixsize = 16,
+ .min_out_pixsize = 16,
+ .hor_offs_align = 8,
+ .out_buf_count = 4,
+ .pix_limit = &s5p_pix_limit[1],
};
-static struct samsung_fimc_variant fimc01_variant_s5pv210 = {
- .pix_hoff = 1,
- .has_inp_rot = 1,
- .has_out_rot = 1,
+static struct samsung_fimc_variant fimc1_variant_s5pv210 = {
+ .pix_hoff = 1,
+ .has_inp_rot = 1,
+ .has_out_rot = 1,
.min_inp_pixsize = 16,
- .min_out_pixsize = 32,
-
- .scaler_en_w = 4224,
- .scaler_dis_w = 8192,
- .in_rot_en_h = 1920,
- .in_rot_dis_w = 8192,
- .out_rot_en_w = 1920,
- .out_rot_dis_w = 4224,
+ .min_out_pixsize = 16,
+ .hor_offs_align = 1,
+ .out_buf_count = 4,
+ .pix_limit = &s5p_pix_limit[2],
};
static struct samsung_fimc_variant fimc2_variant_s5pv210 = {
.pix_hoff = 1,
.min_inp_pixsize = 16,
- .min_out_pixsize = 32,
-
- .scaler_en_w = 1920,
- .scaler_dis_w = 8192,
- .in_rot_en_h = 1280,
- .in_rot_dis_w = 8192,
- .out_rot_en_w = 1280,
- .out_rot_dis_w = 1920,
+ .min_out_pixsize = 16,
+ .hor_offs_align = 8,
+ .out_buf_count = 4,
+ .pix_limit = &s5p_pix_limit[2],
+};
+
+static struct samsung_fimc_variant fimc0_variant_s5pv310 = {
+ .pix_hoff = 1,
+ .has_inp_rot = 1,
+ .has_out_rot = 1,
+ .min_inp_pixsize = 16,
+ .min_out_pixsize = 16,
+ .hor_offs_align = 1,
+ .out_buf_count = 32,
+ .pix_limit = &s5p_pix_limit[1],
+};
+
+static struct samsung_fimc_variant fimc2_variant_s5pv310 = {
+ .pix_hoff = 1,
+ .min_inp_pixsize = 16,
+ .min_out_pixsize = 16,
+ .hor_offs_align = 1,
+ .out_buf_count = 32,
+ .pix_limit = &s5p_pix_limit[2],
};
+/* S5PC100 */
static struct samsung_fimc_driverdata fimc_drvdata_s5p = {
.variant = {
- [0] = &fimc01_variant_s5p,
- [1] = &fimc01_variant_s5p,
+ [0] = &fimc0_variant_s5p,
+ [1] = &fimc0_variant_s5p,
[2] = &fimc2_variant_s5p,
},
- .devs_cnt = 3
+ .num_entities = 3,
+ .lclk_frequency = 133000000UL,
};
+/* S5PV210, S5PC110 */
static struct samsung_fimc_driverdata fimc_drvdata_s5pv210 = {
.variant = {
- [0] = &fimc01_variant_s5pv210,
- [1] = &fimc01_variant_s5pv210,
+ [0] = &fimc0_variant_s5pv210,
+ [1] = &fimc1_variant_s5pv210,
[2] = &fimc2_variant_s5pv210,
},
- .devs_cnt = 3
+ .num_entities = 3,
+ .lclk_frequency = 166000000UL,
+};
+
+/* S5PV310, S5PC210 */
+static struct samsung_fimc_driverdata fimc_drvdata_s5pv310 = {
+ .variant = {
+ [0] = &fimc0_variant_s5pv310,
+ [1] = &fimc0_variant_s5pv310,
+ [2] = &fimc0_variant_s5pv310,
+ [3] = &fimc2_variant_s5pv310,
+ },
+ .num_entities = 4,
+ .lclk_frequency = 166000000UL,
};
static struct platform_device_id fimc_driver_ids[] = {
@@ -1532,6 +1793,9 @@ static struct platform_device_id fimc_driver_ids[] = {
}, {
.name = "s5pv210-fimc",
.driver_data = (unsigned long)&fimc_drvdata_s5pv210,
+ }, {
+ .name = "s5pv310-fimc",
+ .driver_data = (unsigned long)&fimc_drvdata_s5pv310,
},
{},
};
@@ -1547,20 +1811,12 @@ static struct platform_driver fimc_driver = {
}
};
-static char banner[] __initdata = KERN_INFO
- "S5PC Camera Interface V4L2 Driver, (c) 2010 Samsung Electronics\n";
-
static int __init fimc_init(void)
{
- u32 ret;
- printk(banner);
-
- ret = platform_driver_register(&fimc_driver);
- if (ret) {
- printk(KERN_ERR "FIMC platform driver register failed\n");
- return -1;
- }
- return 0;
+ int ret = platform_driver_register(&fimc_driver);
+ if (ret)
+ err("platform_driver_register failed: %d\n", ret);
+ return ret;
}
static void __exit fimc_exit(void)
@@ -1571,6 +1827,6 @@ static void __exit fimc_exit(void)
module_init(fimc_init);
module_exit(fimc_exit);
-MODULE_AUTHOR("Sylwester Nawrocki, s.nawrocki@samsung.com");
-MODULE_DESCRIPTION("S3C/S5P FIMC (video postprocessor) driver");
+MODULE_AUTHOR("Sylwester Nawrocki <s.nawrocki@samsung.com>");
+MODULE_DESCRIPTION("S5P FIMC camera host interface/video postprocessor driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/media/video/s5p-fimc/fimc-core.h b/drivers/media/video/s5p-fimc/fimc-core.h
index 6b3e0cd73cdd..3e1078516560 100644
--- a/drivers/media/video/s5p-fimc/fimc-core.h
+++ b/drivers/media/video/s5p-fimc/fimc-core.h
@@ -11,10 +11,14 @@
#ifndef FIMC_CORE_H_
#define FIMC_CORE_H_
+/*#define DEBUG*/
+
#include <linux/types.h>
#include <media/videobuf-core.h>
#include <media/v4l2-device.h>
#include <media/v4l2-mem2mem.h>
+#include <media/v4l2-mediabus.h>
+#include <media/s3c_fimc.h>
#include <linux/videodev2.h>
#include "regs-fimc.h"
@@ -28,47 +32,72 @@
#define dbg(fmt, args...)
#endif
+/* Time to wait for next frame VSYNC interrupt while stopping operation. */
+#define FIMC_SHUTDOWN_TIMEOUT ((100*HZ)/1000)
#define NUM_FIMC_CLOCKS 2
#define MODULE_NAME "s5p-fimc"
-#define FIMC_MAX_DEVS 3
+#define FIMC_MAX_DEVS 4
#define FIMC_MAX_OUT_BUFS 4
#define SCALER_MAX_HRATIO 64
#define SCALER_MAX_VRATIO 64
+#define DMA_MIN_SIZE 8
-enum {
+/* FIMC device state flags */
+enum fimc_dev_flags {
+ /* for m2m node */
ST_IDLE,
ST_OUTDMA_RUN,
ST_M2M_PEND,
+ /* for capture node */
+ ST_CAPT_PEND,
+ ST_CAPT_RUN,
+ ST_CAPT_STREAM,
+ ST_CAPT_SHUT,
};
#define fimc_m2m_active(dev) test_bit(ST_OUTDMA_RUN, &(dev)->state)
#define fimc_m2m_pending(dev) test_bit(ST_M2M_PEND, &(dev)->state)
+#define fimc_capture_running(dev) test_bit(ST_CAPT_RUN, &(dev)->state)
+#define fimc_capture_pending(dev) test_bit(ST_CAPT_PEND, &(dev)->state)
+
+#define fimc_capture_active(dev) \
+ (test_bit(ST_CAPT_RUN, &(dev)->state) || \
+ test_bit(ST_CAPT_PEND, &(dev)->state))
+
+#define fimc_capture_streaming(dev) \
+ test_bit(ST_CAPT_STREAM, &(dev)->state)
+
+#define fimc_buf_finish(dev, vid_buf) do { \
+ spin_lock(&(dev)->irqlock); \
+ (vid_buf)->vb.state = VIDEOBUF_DONE; \
+ spin_unlock(&(dev)->irqlock); \
+ wake_up(&(vid_buf)->vb.done); \
+} while (0)
+
enum fimc_datapath {
- FIMC_ITU_CAM_A,
- FIMC_ITU_CAM_B,
- FIMC_MIPI_CAM,
+ FIMC_CAMERA,
FIMC_DMA,
FIMC_LCDFIFO,
FIMC_WRITEBACK
};
enum fimc_color_fmt {
- S5P_FIMC_RGB565,
+ S5P_FIMC_RGB565 = 0x10,
S5P_FIMC_RGB666,
S5P_FIMC_RGB888,
- S5P_FIMC_YCBCR420,
+ S5P_FIMC_RGB30_LOCAL,
+ S5P_FIMC_YCBCR420 = 0x20,
S5P_FIMC_YCBCR422,
S5P_FIMC_YCBYCR422,
S5P_FIMC_YCRYCB422,
S5P_FIMC_CBYCRY422,
S5P_FIMC_CRYCBY422,
- S5P_FIMC_RGB30_LOCAL,
S5P_FIMC_YCBCR444_LOCAL,
- S5P_FIMC_MAX_COLOR = S5P_FIMC_YCBCR444_LOCAL,
- S5P_FIMC_COLOR_MASK = 0x0F,
};
+#define fimc_fmt_is_rgb(x) ((x) & 0x10)
+
/* Y/Cb/Cr components order at DMA output for 1 plane YCbCr 4:2:2 formats. */
#define S5P_FIMC_OUT_CRYCBY S5P_CIOCTRL_ORDER422_CRYCBY
#define S5P_FIMC_OUT_CBYCRY S5P_CIOCTRL_ORDER422_YCRYCB
@@ -93,11 +122,13 @@ enum fimc_color_fmt {
#define S5P_FIMC_EFFECT_SIKHOUETTE S5P_CIIMGEFF_FIN_SILHOUETTE
/* The hardware context state. */
-#define FIMC_PARAMS (1 << 0)
-#define FIMC_SRC_ADDR (1 << 1)
-#define FIMC_DST_ADDR (1 << 2)
-#define FIMC_SRC_FMT (1 << 3)
-#define FIMC_DST_FMT (1 << 4)
+#define FIMC_PARAMS (1 << 0)
+#define FIMC_SRC_ADDR (1 << 1)
+#define FIMC_DST_ADDR (1 << 2)
+#define FIMC_SRC_FMT (1 << 3)
+#define FIMC_DST_FMT (1 << 4)
+#define FIMC_CTX_M2M (1 << 5)
+#define FIMC_CTX_CAP (1 << 6)
/* Image conversion flags */
#define FIMC_IN_DMA_ACCESS_TILED (1 << 0)
@@ -106,7 +137,9 @@ enum fimc_color_fmt {
#define FIMC_OUT_DMA_ACCESS_LINEAR (0 << 1)
#define FIMC_SCAN_MODE_PROGRESSIVE (0 << 2)
#define FIMC_SCAN_MODE_INTERLACED (1 << 2)
-/* YCbCr data dynamic range for RGB-YUV color conversion. Y/Cb/Cr: (0 ~ 255) */
+/*
+ * YCbCr data dynamic range for RGB-YUV color conversion.
+ * Y/Cb/Cr: (0 ~ 255) */
#define FIMC_COLOR_RANGE_WIDE (0 << 3)
/* Y (16 ~ 235), Cb/Cr (16 ~ 240) */
#define FIMC_COLOR_RANGE_NARROW (1 << 3)
@@ -118,20 +151,25 @@ enum fimc_color_fmt {
/**
* struct fimc_fmt - the driver's internal color format data
+ * @mbus_code: Media Bus pixel code, -1 if not applicable
* @name: format description
- * @fourcc: the fourcc code for this format
+ * @fourcc: the fourcc code for this format, 0 if not applicable
* @color: the corresponding fimc_color_fmt
- * @depth: number of bits per pixel
+ * @depth: driver's private 'number of bits per pixel'
* @buff_cnt: number of physically non-contiguous data planes
* @planes_cnt: number of physically contiguous data planes
*/
struct fimc_fmt {
+ enum v4l2_mbus_pixelcode mbus_code;
char *name;
u32 fourcc;
u32 color;
- u32 depth;
u16 buff_cnt;
u16 planes_cnt;
+ u16 depth;
+ u16 flags;
+#define FMT_FLAGS_CAM (1 << 0)
+#define FMT_FLAGS_M2M (1 << 1)
};
/**
@@ -167,37 +205,37 @@ struct fimc_effect {
/**
* struct fimc_scaler - the configuration data for FIMC inetrnal scaler
*
- * @enabled: the flag set when the scaler is used
+ * @scaleup_h: flag indicating scaling up horizontally
+ * @scaleup_v: flag indicating scaling up vertically
+ * @copy_mode: flag indicating transparent DMA transfer (no scaling
+ * and color format conversion)
+ * @enabled: flag indicating if the scaler is used
* @hfactor: horizontal shift factor
* @vfactor: vertical shift factor
* @pre_hratio: horizontal ratio of the prescaler
* @pre_vratio: vertical ratio of the prescaler
* @pre_dst_width: the prescaler's destination width
* @pre_dst_height: the prescaler's destination height
- * @scaleup_h: flag indicating scaling up horizontally
- * @scaleup_v: flag indicating scaling up vertically
* @main_hratio: the main scaler's horizontal ratio
* @main_vratio: the main scaler's vertical ratio
- * @real_width: source width - offset
- * @real_height: source height - offset
- * @copy_mode: flag set if one-to-one mode is used, i.e. no scaling
- * and color format conversion
+ * @real_width: source pixel (width - offset)
+ * @real_height: source pixel (height - offset)
*/
struct fimc_scaler {
- u32 enabled;
+ unsigned int scaleup_h:1;
+ unsigned int scaleup_v:1;
+ unsigned int copy_mode:1;
+ unsigned int enabled:1;
u32 hfactor;
u32 vfactor;
u32 pre_hratio;
u32 pre_vratio;
u32 pre_dst_width;
u32 pre_dst_height;
- u32 scaleup_h;
- u32 scaleup_v;
u32 main_hratio;
u32 main_vratio;
u32 real_width;
u32 real_height;
- u32 copy_mode;
};
/**
@@ -215,15 +253,18 @@ struct fimc_addr {
/**
* struct fimc_vid_buffer - the driver's video buffer
- * @vb: v4l videobuf buffer
+ * @vb: v4l videobuf buffer
+ * @paddr: precalculated physical address set
+ * @index: buffer index for the output DMA engine
*/
struct fimc_vid_buffer {
struct videobuf_buffer vb;
+ struct fimc_addr paddr;
+ int index;
};
/**
- * struct fimc_frame - input/output frame format properties
- *
+ * struct fimc_frame - source/target frame properties
* @f_width: image full width (virtual screen size)
* @f_height: image full height (virtual screen size)
* @o_width: original image width as set by S_FMT
@@ -270,67 +311,119 @@ struct fimc_m2m_device {
};
/**
+ * struct fimc_vid_cap - camera capture device information
+ * @ctx: hardware context data
+ * @vfd: video device node for camera capture mode
+ * @v4l2_dev: v4l2_device struct to manage subdevs
+ * @sd: pointer to camera sensor subdevice currently in use
+ * @fmt: Media Bus format configured at selected image sensor
+ * @pending_buf_q: the pending buffer queue head
+ * @active_buf_q: the queue head of buffers scheduled in hardware
+ * @vbq: the capture am video buffer queue
+ * @active_buf_cnt: number of video buffers scheduled in hardware
+ * @buf_index: index for managing the output DMA buffers
+ * @frame_count: the frame counter for statistics
+ * @reqbufs_count: the number of buffers requested in REQBUFS ioctl
+ * @input_index: input (camera sensor) index
+ * @refcnt: driver's private reference counter
+ */
+struct fimc_vid_cap {
+ struct fimc_ctx *ctx;
+ struct video_device *vfd;
+ struct v4l2_device v4l2_dev;
+ struct v4l2_subdev *sd;
+ struct v4l2_mbus_framefmt fmt;
+ struct list_head pending_buf_q;
+ struct list_head active_buf_q;
+ struct videobuf_queue vbq;
+ int active_buf_cnt;
+ int buf_index;
+ unsigned int frame_count;
+ unsigned int reqbufs_count;
+ int input_index;
+ int refcnt;
+};
+
+/**
+ * struct fimc_pix_limit - image pixel size limits in various IP configurations
+ *
+ * @scaler_en_w: max input pixel width when the scaler is enabled
+ * @scaler_dis_w: max input pixel width when the scaler is disabled
+ * @in_rot_en_h: max input width with the input rotator is on
+ * @in_rot_dis_w: max input width with the input rotator is off
+ * @out_rot_en_w: max output width with the output rotator on
+ * @out_rot_dis_w: max output width with the output rotator off
+ */
+struct fimc_pix_limit {
+ u16 scaler_en_w;
+ u16 scaler_dis_w;
+ u16 in_rot_en_h;
+ u16 in_rot_dis_w;
+ u16 out_rot_en_w;
+ u16 out_rot_dis_w;
+};
+
+/**
* struct samsung_fimc_variant - camera interface variant information
*
* @pix_hoff: indicate whether horizontal offset is in pixels or in bytes
* @has_inp_rot: set if has input rotator
* @has_out_rot: set if has output rotator
+ * @pix_limit: pixel size constraints for the scaler
* @min_inp_pixsize: minimum input pixel size
* @min_out_pixsize: minimum output pixel size
- * @scaler_en_w: maximum input pixel width when the scaler is enabled
- * @scaler_dis_w: maximum input pixel width when the scaler is disabled
- * @in_rot_en_h: maximum input width when the input rotator is used
- * @in_rot_dis_w: maximum input width when the input rotator is used
- * @out_rot_en_w: maximum output width for the output rotator enabled
- * @out_rot_dis_w: maximum output width for the output rotator enabled
+ * @hor_offs_align: horizontal pixel offset aligment
+ * @out_buf_count: the number of buffers in output DMA sequence
*/
struct samsung_fimc_variant {
unsigned int pix_hoff:1;
unsigned int has_inp_rot:1;
unsigned int has_out_rot:1;
-
+ struct fimc_pix_limit *pix_limit;
u16 min_inp_pixsize;
u16 min_out_pixsize;
- u16 scaler_en_w;
- u16 scaler_dis_w;
- u16 in_rot_en_h;
- u16 in_rot_dis_w;
- u16 out_rot_en_w;
- u16 out_rot_dis_w;
+ u16 hor_offs_align;
+ u16 out_buf_count;
};
/**
- * struct samsung_fimc_driverdata - per-device type driver data for init time.
+ * struct samsung_fimc_driverdata - per device type driver data for init time.
*
* @variant: the variant information for this driver.
* @dev_cnt: number of fimc sub-devices available in SoC
+ * @lclk_frequency: fimc bus clock frequency
*/
struct samsung_fimc_driverdata {
struct samsung_fimc_variant *variant[FIMC_MAX_DEVS];
- int devs_cnt;
+ unsigned long lclk_frequency;
+ int num_entities;
};
struct fimc_ctx;
/**
- * struct fimc_subdev - abstraction for a FIMC entity
+ * struct fimc_dev - abstraction for FIMC entity
*
* @slock: the spinlock protecting this data structure
* @lock: the mutex protecting this data structure
* @pdev: pointer to the FIMC platform device
+ * @pdata: pointer to the device platform data
* @id: FIMC device index (0..2)
* @clock[]: the clocks required for FIMC operation
* @regs: the mapped hardware registers
* @regs_res: the resource claimed for IO registers
* @irq: interrupt number of the FIMC subdevice
- * @irqlock: spinlock protecting videbuffer queue
+ * @irqlock: spinlock protecting videobuffer queue
+ * @irq_queue:
* @m2m: memory-to-memory V4L2 device information
- * @state: the FIMC device state flags
+ * @vid_cap: camera capture device information
+ * @state: flags used to synchronize m2m and capture mode operation
*/
struct fimc_dev {
spinlock_t slock;
struct mutex lock;
struct platform_device *pdev;
+ struct s3c_platform_fimc *pdata;
struct samsung_fimc_variant *variant;
int id;
struct clk *clock[NUM_FIMC_CLOCKS];
@@ -338,8 +431,9 @@ struct fimc_dev {
struct resource *regs_res;
int irq;
spinlock_t irqlock;
- struct workqueue_struct *work_queue;
+ wait_queue_head_t irq_queue;
struct fimc_m2m_device m2m;
+ struct fimc_vid_cap vid_cap;
unsigned long state;
};
@@ -359,7 +453,7 @@ struct fimc_dev {
* @effect: image effect
* @rotation: image clockwise rotation in degrees
* @flip: image flip mode
- * @flags: an additional flags for image conversion
+ * @flags: additional flags for image conversion
* @state: flags to keep track of user configuration
* @fimc_dev: the FIMC device this context applies to
* @m2m_ctx: memory-to-memory device context
@@ -384,6 +478,7 @@ struct fimc_ctx {
struct v4l2_m2m_ctx *m2m_ctx;
};
+extern struct videobuf_queue_ops fimc_qops;
static inline int tiled_fmt(struct fimc_fmt *fmt)
{
@@ -397,18 +492,24 @@ static inline void fimc_hw_clear_irq(struct fimc_dev *dev)
writel(cfg, dev->regs + S5P_CIGCTRL);
}
-static inline void fimc_hw_start_scaler(struct fimc_dev *dev)
+static inline void fimc_hw_enable_scaler(struct fimc_dev *dev, bool on)
{
u32 cfg = readl(dev->regs + S5P_CISCCTRL);
- cfg |= S5P_CISCCTRL_SCALERSTART;
+ if (on)
+ cfg |= S5P_CISCCTRL_SCALERSTART;
+ else
+ cfg &= ~S5P_CISCCTRL_SCALERSTART;
writel(cfg, dev->regs + S5P_CISCCTRL);
}
-static inline void fimc_hw_stop_scaler(struct fimc_dev *dev)
+static inline void fimc_hw_activate_input_dma(struct fimc_dev *dev, bool on)
{
- u32 cfg = readl(dev->regs + S5P_CISCCTRL);
- cfg &= ~S5P_CISCCTRL_SCALERSTART;
- writel(cfg, dev->regs + S5P_CISCCTRL);
+ u32 cfg = readl(dev->regs + S5P_MSCTRL);
+ if (on)
+ cfg |= S5P_MSCTRL_ENVID;
+ else
+ cfg &= ~S5P_MSCTRL_ENVID;
+ writel(cfg, dev->regs + S5P_MSCTRL);
}
static inline void fimc_hw_dis_capture(struct fimc_dev *dev)
@@ -418,27 +519,30 @@ static inline void fimc_hw_dis_capture(struct fimc_dev *dev)
writel(cfg, dev->regs + S5P_CIIMGCPT);
}
-static inline void fimc_hw_start_in_dma(struct fimc_dev *dev)
-{
- u32 cfg = readl(dev->regs + S5P_MSCTRL);
- cfg |= S5P_MSCTRL_ENVID;
- writel(cfg, dev->regs + S5P_MSCTRL);
-}
-
-static inline void fimc_hw_stop_in_dma(struct fimc_dev *dev)
+/**
+ * fimc_hw_set_dma_seq - configure output DMA buffer sequence
+ * @mask: each bit corresponds to one of 32 output buffer registers set
+ * 1 to include buffer in the sequence, 0 to disable
+ *
+ * This function mask output DMA ring buffers, i.e. it allows to configure
+ * which of the output buffer address registers will be used by the DMA
+ * engine.
+ */
+static inline void fimc_hw_set_dma_seq(struct fimc_dev *dev, u32 mask)
{
- u32 cfg = readl(dev->regs + S5P_MSCTRL);
- cfg &= ~S5P_MSCTRL_ENVID;
- writel(cfg, dev->regs + S5P_MSCTRL);
+ writel(mask, dev->regs + S5P_CIFCNTSEQ);
}
-static inline struct fimc_frame *ctx_m2m_get_frame(struct fimc_ctx *ctx,
- enum v4l2_buf_type type)
+static inline struct fimc_frame *ctx_get_frame(struct fimc_ctx *ctx,
+ enum v4l2_buf_type type)
{
struct fimc_frame *frame;
if (V4L2_BUF_TYPE_VIDEO_OUTPUT == type) {
- frame = &ctx->s_frame;
+ if (ctx->state & FIMC_CTX_M2M)
+ frame = &ctx->s_frame;
+ else
+ return ERR_PTR(-EINVAL);
} else if (V4L2_BUF_TYPE_VIDEO_CAPTURE == type) {
frame = &ctx->d_frame;
} else {
@@ -450,22 +554,137 @@ static inline struct fimc_frame *ctx_m2m_get_frame(struct fimc_ctx *ctx,
return frame;
}
+static inline u32 fimc_hw_get_frame_index(struct fimc_dev *dev)
+{
+ u32 reg = readl(dev->regs + S5P_CISTATUS);
+ return (reg & S5P_CISTATUS_FRAMECNT_MASK) >>
+ S5P_CISTATUS_FRAMECNT_SHIFT;
+}
+
/* -----------------------------------------------------*/
/* fimc-reg.c */
-void fimc_hw_reset(struct fimc_dev *dev);
+void fimc_hw_reset(struct fimc_dev *fimc);
void fimc_hw_set_rotation(struct fimc_ctx *ctx);
void fimc_hw_set_target_format(struct fimc_ctx *ctx);
void fimc_hw_set_out_dma(struct fimc_ctx *ctx);
-void fimc_hw_en_lastirq(struct fimc_dev *dev, int enable);
-void fimc_hw_en_irq(struct fimc_dev *dev, int enable);
-void fimc_hw_set_prescaler(struct fimc_ctx *ctx);
+void fimc_hw_en_lastirq(struct fimc_dev *fimc, int enable);
+void fimc_hw_en_irq(struct fimc_dev *fimc, int enable);
void fimc_hw_set_scaler(struct fimc_ctx *ctx);
void fimc_hw_en_capture(struct fimc_ctx *ctx);
void fimc_hw_set_effect(struct fimc_ctx *ctx);
void fimc_hw_set_in_dma(struct fimc_ctx *ctx);
void fimc_hw_set_input_path(struct fimc_ctx *ctx);
void fimc_hw_set_output_path(struct fimc_ctx *ctx);
-void fimc_hw_set_input_addr(struct fimc_dev *dev, struct fimc_addr *paddr);
-void fimc_hw_set_output_addr(struct fimc_dev *dev, struct fimc_addr *paddr);
+void fimc_hw_set_input_addr(struct fimc_dev *fimc, struct fimc_addr *paddr);
+void fimc_hw_set_output_addr(struct fimc_dev *fimc, struct fimc_addr *paddr,
+ int index);
+int fimc_hw_set_camera_source(struct fimc_dev *fimc,
+ struct s3c_fimc_isp_info *cam);
+int fimc_hw_set_camera_offset(struct fimc_dev *fimc, struct fimc_frame *f);
+int fimc_hw_set_camera_polarity(struct fimc_dev *fimc,
+ struct s3c_fimc_isp_info *cam);
+int fimc_hw_set_camera_type(struct fimc_dev *fimc,
+ struct s3c_fimc_isp_info *cam);
+
+/* -----------------------------------------------------*/
+/* fimc-core.c */
+int fimc_vidioc_enum_fmt(struct file *file, void *priv,
+ struct v4l2_fmtdesc *f);
+int fimc_vidioc_g_fmt(struct file *file, void *priv,
+ struct v4l2_format *f);
+int fimc_vidioc_try_fmt(struct file *file, void *priv,
+ struct v4l2_format *f);
+int fimc_vidioc_g_crop(struct file *file, void *fh,
+ struct v4l2_crop *cr);
+int fimc_vidioc_cropcap(struct file *file, void *fh,
+ struct v4l2_cropcap *cr);
+int fimc_vidioc_queryctrl(struct file *file, void *priv,
+ struct v4l2_queryctrl *qc);
+int fimc_vidioc_g_ctrl(struct file *file, void *priv,
+ struct v4l2_control *ctrl);
+
+int fimc_try_crop(struct fimc_ctx *ctx, struct v4l2_crop *cr);
+int check_ctrl_val(struct fimc_ctx *ctx, struct v4l2_control *ctrl);
+int fimc_s_ctrl(struct fimc_ctx *ctx, struct v4l2_control *ctrl);
+
+struct fimc_fmt *find_format(struct v4l2_format *f, unsigned int mask);
+struct fimc_fmt *find_mbus_format(struct v4l2_mbus_framefmt *f,
+ unsigned int mask);
+
+int fimc_check_scaler_ratio(struct v4l2_rect *r, struct fimc_frame *f);
+int fimc_set_scaler_info(struct fimc_ctx *ctx);
+int fimc_prepare_config(struct fimc_ctx *ctx, u32 flags);
+int fimc_prepare_addr(struct fimc_ctx *ctx, struct fimc_vid_buffer *buf,
+ struct fimc_frame *frame, struct fimc_addr *paddr);
+
+/* -----------------------------------------------------*/
+/* fimc-capture.c */
+int fimc_register_capture_device(struct fimc_dev *fimc);
+void fimc_unregister_capture_device(struct fimc_dev *fimc);
+int fimc_sensor_sd_init(struct fimc_dev *fimc, int index);
+int fimc_vid_cap_buf_queue(struct fimc_dev *fimc,
+ struct fimc_vid_buffer *fimc_vb);
+
+/* Locking: the caller holds fimc->slock */
+static inline void fimc_activate_capture(struct fimc_ctx *ctx)
+{
+ fimc_hw_enable_scaler(ctx->fimc_dev, ctx->scaler.enabled);
+ fimc_hw_en_capture(ctx);
+}
+
+static inline void fimc_deactivate_capture(struct fimc_dev *fimc)
+{
+ fimc_hw_en_lastirq(fimc, true);
+ fimc_hw_dis_capture(fimc);
+ fimc_hw_enable_scaler(fimc, false);
+ fimc_hw_en_lastirq(fimc, false);
+}
+
+/*
+ * Add video buffer to the active buffers queue.
+ * The caller holds irqlock spinlock.
+ */
+static inline void active_queue_add(struct fimc_vid_cap *vid_cap,
+ struct fimc_vid_buffer *buf)
+{
+ buf->vb.state = VIDEOBUF_ACTIVE;
+ list_add_tail(&buf->vb.queue, &vid_cap->active_buf_q);
+ vid_cap->active_buf_cnt++;
+}
+
+/*
+ * Pop a video buffer from the capture active buffers queue
+ * Locking: Need to be called with dev->slock held.
+ */
+static inline struct fimc_vid_buffer *
+active_queue_pop(struct fimc_vid_cap *vid_cap)
+{
+ struct fimc_vid_buffer *buf;
+ buf = list_entry(vid_cap->active_buf_q.next,
+ struct fimc_vid_buffer, vb.queue);
+ list_del(&buf->vb.queue);
+ vid_cap->active_buf_cnt--;
+ return buf;
+}
+
+/* Add video buffer to the capture pending buffers queue */
+static inline void fimc_pending_queue_add(struct fimc_vid_cap *vid_cap,
+ struct fimc_vid_buffer *buf)
+{
+ buf->vb.state = VIDEOBUF_QUEUED;
+ list_add_tail(&buf->vb.queue, &vid_cap->pending_buf_q);
+}
+
+/* Add video buffer to the capture pending buffers queue */
+static inline struct fimc_vid_buffer *
+pending_queue_pop(struct fimc_vid_cap *vid_cap)
+{
+ struct fimc_vid_buffer *buf;
+ buf = list_entry(vid_cap->pending_buf_q.next,
+ struct fimc_vid_buffer, vb.queue);
+ list_del(&buf->vb.queue);
+ return buf;
+}
+
#endif /* FIMC_CORE_H_ */
diff --git a/drivers/media/video/s5p-fimc/fimc-reg.c b/drivers/media/video/s5p-fimc/fimc-reg.c
index 5570f1ce0c9c..511631a2e5c3 100644
--- a/drivers/media/video/s5p-fimc/fimc-reg.c
+++ b/drivers/media/video/s5p-fimc/fimc-reg.c
@@ -13,6 +13,7 @@
#include <linux/io.h>
#include <linux/delay.h>
#include <mach/map.h>
+#include <media/s3c_fimc.h>
#include "fimc-core.h"
@@ -29,49 +30,11 @@ void fimc_hw_reset(struct fimc_dev *dev)
cfg = readl(dev->regs + S5P_CIGCTRL);
cfg |= (S5P_CIGCTRL_SWRST | S5P_CIGCTRL_IRQ_LEVEL);
writel(cfg, dev->regs + S5P_CIGCTRL);
- msleep(1);
+ udelay(1000);
cfg = readl(dev->regs + S5P_CIGCTRL);
cfg &= ~S5P_CIGCTRL_SWRST;
writel(cfg, dev->regs + S5P_CIGCTRL);
-
-}
-
-void fimc_hw_set_rotation(struct fimc_ctx *ctx)
-{
- u32 cfg, flip;
- struct fimc_dev *dev = ctx->fimc_dev;
-
- cfg = readl(dev->regs + S5P_CITRGFMT);
- cfg &= ~(S5P_CITRGFMT_INROT90 | S5P_CITRGFMT_OUTROT90);
-
- flip = readl(dev->regs + S5P_MSCTRL);
- flip &= ~S5P_MSCTRL_FLIP_MASK;
-
- /*
- * The input and output rotator cannot work simultaneously.
- * Use the output rotator in output DMA mode or the input rotator
- * in direct fifo output mode.
- */
- if (ctx->rotation == 90 || ctx->rotation == 270) {
- if (ctx->out_path == FIMC_LCDFIFO) {
- cfg |= S5P_CITRGFMT_INROT90;
- if (ctx->rotation == 270)
- flip |= S5P_MSCTRL_FLIP_180;
- } else {
- cfg |= S5P_CITRGFMT_OUTROT90;
- if (ctx->rotation == 270)
- cfg |= S5P_CITRGFMT_FLIP_180;
- }
- } else if (ctx->rotation == 180) {
- if (ctx->out_path == FIMC_LCDFIFO)
- flip |= S5P_MSCTRL_FLIP_180;
- else
- cfg |= S5P_CITRGFMT_FLIP_180;
- }
- if (ctx->rotation == 180 || ctx->rotation == 270)
- writel(flip, dev->regs + S5P_MSCTRL);
- writel(cfg, dev->regs + S5P_CITRGFMT);
}
static u32 fimc_hw_get_in_flip(u32 ctx_flip)
@@ -114,6 +77,46 @@ static u32 fimc_hw_get_target_flip(u32 ctx_flip)
return flip;
}
+void fimc_hw_set_rotation(struct fimc_ctx *ctx)
+{
+ u32 cfg, flip;
+ struct fimc_dev *dev = ctx->fimc_dev;
+
+ cfg = readl(dev->regs + S5P_CITRGFMT);
+ cfg &= ~(S5P_CITRGFMT_INROT90 | S5P_CITRGFMT_OUTROT90 |
+ S5P_CITRGFMT_FLIP_180);
+
+ flip = readl(dev->regs + S5P_MSCTRL);
+ flip &= ~S5P_MSCTRL_FLIP_MASK;
+
+ /*
+ * The input and output rotator cannot work simultaneously.
+ * Use the output rotator in output DMA mode or the input rotator
+ * in direct fifo output mode.
+ */
+ if (ctx->rotation == 90 || ctx->rotation == 270) {
+ if (ctx->out_path == FIMC_LCDFIFO) {
+ cfg |= S5P_CITRGFMT_INROT90;
+ if (ctx->rotation == 270)
+ flip |= S5P_MSCTRL_FLIP_180;
+ } else {
+ cfg |= S5P_CITRGFMT_OUTROT90;
+ if (ctx->rotation == 270)
+ cfg |= S5P_CITRGFMT_FLIP_180;
+ }
+ } else if (ctx->rotation == 180) {
+ if (ctx->out_path == FIMC_LCDFIFO)
+ flip |= S5P_MSCTRL_FLIP_180;
+ else
+ cfg |= S5P_CITRGFMT_FLIP_180;
+ }
+ if (ctx->rotation == 180 || ctx->rotation == 270)
+ writel(flip, dev->regs + S5P_MSCTRL);
+
+ cfg |= fimc_hw_get_target_flip(ctx->flip);
+ writel(cfg, dev->regs + S5P_CITRGFMT);
+}
+
void fimc_hw_set_target_format(struct fimc_ctx *ctx)
{
u32 cfg;
@@ -149,13 +152,15 @@ void fimc_hw_set_target_format(struct fimc_ctx *ctx)
break;
}
- cfg |= S5P_CITRGFMT_HSIZE(frame->width);
- cfg |= S5P_CITRGFMT_VSIZE(frame->height);
+ if (ctx->rotation == 90 || ctx->rotation == 270) {
+ cfg |= S5P_CITRGFMT_HSIZE(frame->height);
+ cfg |= S5P_CITRGFMT_VSIZE(frame->width);
+ } else {
- if (ctx->rotation == 0) {
- cfg &= ~S5P_CITRGFMT_FLIP_MASK;
- cfg |= fimc_hw_get_target_flip(ctx->flip);
+ cfg |= S5P_CITRGFMT_HSIZE(frame->width);
+ cfg |= S5P_CITRGFMT_VSIZE(frame->height);
}
+
writel(cfg, dev->regs + S5P_CITRGFMT);
cfg = readl(dev->regs + S5P_CITAREA) & ~S5P_CITAREA_MASK;
@@ -167,16 +172,20 @@ static void fimc_hw_set_out_dma_size(struct fimc_ctx *ctx)
{
struct fimc_dev *dev = ctx->fimc_dev;
struct fimc_frame *frame = &ctx->d_frame;
- u32 cfg = 0;
+ u32 cfg;
- if (ctx->rotation == 90 || ctx->rotation == 270) {
- cfg |= S5P_ORIG_SIZE_HOR(frame->f_height);
- cfg |= S5P_ORIG_SIZE_VER(frame->f_width);
- } else {
- cfg |= S5P_ORIG_SIZE_HOR(frame->f_width);
- cfg |= S5P_ORIG_SIZE_VER(frame->f_height);
- }
+ cfg = S5P_ORIG_SIZE_HOR(frame->f_width);
+ cfg |= S5P_ORIG_SIZE_VER(frame->f_height);
writel(cfg, dev->regs + S5P_ORGOSIZE);
+
+ /* Select color space conversion equation (HD/SD size).*/
+ cfg = readl(dev->regs + S5P_CIGCTRL);
+ if (frame->f_width >= 1280) /* HD */
+ cfg |= S5P_CIGCTRL_CSC_ITU601_709;
+ else /* SD */
+ cfg &= ~S5P_CIGCTRL_CSC_ITU601_709;
+ writel(cfg, dev->regs + S5P_CIGCTRL);
+
}
void fimc_hw_set_out_dma(struct fimc_ctx *ctx)
@@ -232,36 +241,28 @@ static void fimc_hw_en_autoload(struct fimc_dev *dev, int enable)
void fimc_hw_en_lastirq(struct fimc_dev *dev, int enable)
{
- unsigned long flags;
- u32 cfg;
-
- spin_lock_irqsave(&dev->slock, flags);
-
- cfg = readl(dev->regs + S5P_CIOCTRL);
+ u32 cfg = readl(dev->regs + S5P_CIOCTRL);
if (enable)
cfg |= S5P_CIOCTRL_LASTIRQ_ENABLE;
else
cfg &= ~S5P_CIOCTRL_LASTIRQ_ENABLE;
writel(cfg, dev->regs + S5P_CIOCTRL);
-
- spin_unlock_irqrestore(&dev->slock, flags);
}
-void fimc_hw_set_prescaler(struct fimc_ctx *ctx)
+static void fimc_hw_set_prescaler(struct fimc_ctx *ctx)
{
struct fimc_dev *dev = ctx->fimc_dev;
struct fimc_scaler *sc = &ctx->scaler;
- u32 cfg = 0, shfactor;
+ u32 cfg, shfactor;
shfactor = 10 - (sc->hfactor + sc->vfactor);
- cfg |= S5P_CISCPRERATIO_SHFACTOR(shfactor);
+ cfg = S5P_CISCPRERATIO_SHFACTOR(shfactor);
cfg |= S5P_CISCPRERATIO_HOR(sc->pre_hratio);
cfg |= S5P_CISCPRERATIO_VER(sc->pre_vratio);
writel(cfg, dev->regs + S5P_CISCPRERATIO);
- cfg = 0;
- cfg |= S5P_CISCPREDST_WIDTH(sc->pre_dst_width);
+ cfg = S5P_CISCPREDST_WIDTH(sc->pre_dst_width);
cfg |= S5P_CISCPREDST_HEIGHT(sc->pre_dst_height);
writel(cfg, dev->regs + S5P_CISCPREDST);
}
@@ -274,6 +275,8 @@ void fimc_hw_set_scaler(struct fimc_ctx *ctx)
struct fimc_frame *dst_frame = &ctx->d_frame;
u32 cfg = 0;
+ fimc_hw_set_prescaler(ctx);
+
if (!(ctx->flags & FIMC_COLOR_RANGE_NARROW))
cfg |= (S5P_CISCCTRL_CSCR2Y_WIDE | S5P_CISCCTRL_CSCY2R_WIDE);
@@ -325,14 +328,18 @@ void fimc_hw_set_scaler(struct fimc_ctx *ctx)
void fimc_hw_en_capture(struct fimc_ctx *ctx)
{
struct fimc_dev *dev = ctx->fimc_dev;
- u32 cfg;
- cfg = readl(dev->regs + S5P_CIIMGCPT);
- /* One shot mode for output DMA or freerun for FIFO. */
- if (ctx->out_path == FIMC_DMA)
- cfg |= S5P_CIIMGCPT_CPT_FREN_ENABLE;
- else
- cfg &= ~S5P_CIIMGCPT_CPT_FREN_ENABLE;
+ u32 cfg = readl(dev->regs + S5P_CIIMGCPT);
+
+ if (ctx->out_path == FIMC_DMA) {
+ /* one shot mode */
+ cfg |= S5P_CIIMGCPT_CPT_FREN_ENABLE | S5P_CIIMGCPT_IMGCPTEN;
+ } else {
+ /* Continous frame capture mode (freerun). */
+ cfg &= ~(S5P_CIIMGCPT_CPT_FREN_ENABLE |
+ S5P_CIIMGCPT_CPT_FRMOD_CNT);
+ cfg |= S5P_CIIMGCPT_IMGCPTEN;
+ }
if (ctx->scaler.enabled)
cfg |= S5P_CIIMGCPT_IMGCPTEN_SC;
@@ -364,7 +371,7 @@ static void fimc_hw_set_in_dma_size(struct fimc_ctx *ctx)
u32 cfg_r = 0;
if (FIMC_LCDFIFO == ctx->out_path)
- cfg_r |= S5P_CIREAL_ISIZE_AUTOLOAD_EN;
+ cfg_r |= S5P_CIREAL_ISIZE_AUTOLOAD_EN;
cfg_o |= S5P_ORIG_SIZE_HOR(frame->f_width);
cfg_o |= S5P_ORIG_SIZE_VER(frame->f_height);
@@ -380,27 +387,25 @@ void fimc_hw_set_in_dma(struct fimc_ctx *ctx)
struct fimc_dev *dev = ctx->fimc_dev;
struct fimc_frame *frame = &ctx->s_frame;
struct fimc_dma_offset *offset = &frame->dma_offset;
- u32 cfg = 0;
+ u32 cfg;
/* Set the pixel offsets. */
- cfg |= S5P_CIO_OFFS_HOR(offset->y_h);
+ cfg = S5P_CIO_OFFS_HOR(offset->y_h);
cfg |= S5P_CIO_OFFS_VER(offset->y_v);
writel(cfg, dev->regs + S5P_CIIYOFF);
- cfg = 0;
- cfg |= S5P_CIO_OFFS_HOR(offset->cb_h);
+ cfg = S5P_CIO_OFFS_HOR(offset->cb_h);
cfg |= S5P_CIO_OFFS_VER(offset->cb_v);
writel(cfg, dev->regs + S5P_CIICBOFF);
- cfg = 0;
- cfg |= S5P_CIO_OFFS_HOR(offset->cr_h);
+ cfg = S5P_CIO_OFFS_HOR(offset->cr_h);
cfg |= S5P_CIO_OFFS_VER(offset->cr_v);
writel(cfg, dev->regs + S5P_CIICROFF);
/* Input original and real size. */
fimc_hw_set_in_dma_size(ctx);
- /* Autoload is used currently only in FIFO mode. */
+ /* Use DMA autoload only in FIFO mode. */
fimc_hw_en_autoload(dev, ctx->out_path == FIMC_LCDFIFO);
/* Set the input DMA to process single frame only. */
@@ -501,27 +506,163 @@ void fimc_hw_set_output_path(struct fimc_ctx *ctx)
void fimc_hw_set_input_addr(struct fimc_dev *dev, struct fimc_addr *paddr)
{
- u32 cfg = 0;
-
- cfg = readl(dev->regs + S5P_CIREAL_ISIZE);
+ u32 cfg = readl(dev->regs + S5P_CIREAL_ISIZE);
cfg |= S5P_CIREAL_ISIZE_ADDR_CH_DIS;
writel(cfg, dev->regs + S5P_CIREAL_ISIZE);
- writel(paddr->y, dev->regs + S5P_CIIYSA0);
- writel(paddr->cb, dev->regs + S5P_CIICBSA0);
- writel(paddr->cr, dev->regs + S5P_CIICRSA0);
+ writel(paddr->y, dev->regs + S5P_CIIYSA(0));
+ writel(paddr->cb, dev->regs + S5P_CIICBSA(0));
+ writel(paddr->cr, dev->regs + S5P_CIICRSA(0));
cfg &= ~S5P_CIREAL_ISIZE_ADDR_CH_DIS;
writel(cfg, dev->regs + S5P_CIREAL_ISIZE);
}
-void fimc_hw_set_output_addr(struct fimc_dev *dev, struct fimc_addr *paddr)
+void fimc_hw_set_output_addr(struct fimc_dev *dev,
+ struct fimc_addr *paddr, int index)
{
- int i;
- /* Set all the output register sets to point to single video buffer. */
- for (i = 0; i < FIMC_MAX_OUT_BUFS; i++) {
+ int i = (index == -1) ? 0 : index;
+ do {
writel(paddr->y, dev->regs + S5P_CIOYSA(i));
writel(paddr->cb, dev->regs + S5P_CIOCBSA(i));
writel(paddr->cr, dev->regs + S5P_CIOCRSA(i));
+ dbg("dst_buf[%d]: 0x%X, cb: 0x%X, cr: 0x%X",
+ i, paddr->y, paddr->cb, paddr->cr);
+ } while (index == -1 && ++i < FIMC_MAX_OUT_BUFS);
+}
+
+int fimc_hw_set_camera_polarity(struct fimc_dev *fimc,
+ struct s3c_fimc_isp_info *cam)
+{
+ u32 cfg = readl(fimc->regs + S5P_CIGCTRL);
+
+ cfg &= ~(S5P_CIGCTRL_INVPOLPCLK | S5P_CIGCTRL_INVPOLVSYNC |
+ S5P_CIGCTRL_INVPOLHREF | S5P_CIGCTRL_INVPOLHSYNC);
+
+ if (cam->flags & FIMC_CLK_INV_PCLK)
+ cfg |= S5P_CIGCTRL_INVPOLPCLK;
+
+ if (cam->flags & FIMC_CLK_INV_VSYNC)
+ cfg |= S5P_CIGCTRL_INVPOLVSYNC;
+
+ if (cam->flags & FIMC_CLK_INV_HREF)
+ cfg |= S5P_CIGCTRL_INVPOLHREF;
+
+ if (cam->flags & FIMC_CLK_INV_HSYNC)
+ cfg |= S5P_CIGCTRL_INVPOLHSYNC;
+
+ writel(cfg, fimc->regs + S5P_CIGCTRL);
+
+ return 0;
+}
+
+int fimc_hw_set_camera_source(struct fimc_dev *fimc,
+ struct s3c_fimc_isp_info *cam)
+{
+ struct fimc_frame *f = &fimc->vid_cap.ctx->s_frame;
+ u32 cfg = 0;
+
+ if (cam->bus_type == FIMC_ITU_601 || cam->bus_type == FIMC_ITU_656) {
+
+ switch (fimc->vid_cap.fmt.code) {
+ case V4L2_MBUS_FMT_YUYV8_2X8:
+ cfg = S5P_CISRCFMT_ORDER422_YCBYCR;
+ break;
+ case V4L2_MBUS_FMT_YVYU8_2X8:
+ cfg = S5P_CISRCFMT_ORDER422_YCRYCB;
+ break;
+ case V4L2_MBUS_FMT_VYUY8_2X8:
+ cfg = S5P_CISRCFMT_ORDER422_CRYCBY;
+ break;
+ case V4L2_MBUS_FMT_UYVY8_2X8:
+ cfg = S5P_CISRCFMT_ORDER422_CBYCRY;
+ break;
+ default:
+ err("camera image format not supported: %d",
+ fimc->vid_cap.fmt.code);
+ return -EINVAL;
+ }
+
+ if (cam->bus_type == FIMC_ITU_601) {
+ if (cam->bus_width == 8) {
+ cfg |= S5P_CISRCFMT_ITU601_8BIT;
+ } else if (cam->bus_width == 16) {
+ cfg |= S5P_CISRCFMT_ITU601_16BIT;
+ } else {
+ err("invalid bus width: %d", cam->bus_width);
+ return -EINVAL;
+ }
+ } /* else defaults to ITU-R BT.656 8-bit */
}
+
+ cfg |= S5P_CISRCFMT_HSIZE(f->o_width) | S5P_CISRCFMT_VSIZE(f->o_height);
+ writel(cfg, fimc->regs + S5P_CISRCFMT);
+ return 0;
+}
+
+
+int fimc_hw_set_camera_offset(struct fimc_dev *fimc, struct fimc_frame *f)
+{
+ u32 hoff2, voff2;
+
+ u32 cfg = readl(fimc->regs + S5P_CIWDOFST);
+
+ cfg &= ~(S5P_CIWDOFST_HOROFF_MASK | S5P_CIWDOFST_VEROFF_MASK);
+ cfg |= S5P_CIWDOFST_OFF_EN |
+ S5P_CIWDOFST_HOROFF(f->offs_h) |
+ S5P_CIWDOFST_VEROFF(f->offs_v);
+
+ writel(cfg, fimc->regs + S5P_CIWDOFST);
+
+ /* See CIWDOFSTn register description in the datasheet for details. */
+ hoff2 = f->o_width - f->width - f->offs_h;
+ voff2 = f->o_height - f->height - f->offs_v;
+ cfg = S5P_CIWDOFST2_HOROFF(hoff2) | S5P_CIWDOFST2_VEROFF(voff2);
+
+ writel(cfg, fimc->regs + S5P_CIWDOFST2);
+ return 0;
+}
+
+int fimc_hw_set_camera_type(struct fimc_dev *fimc,
+ struct s3c_fimc_isp_info *cam)
+{
+ u32 cfg, tmp;
+ struct fimc_vid_cap *vid_cap = &fimc->vid_cap;
+
+ cfg = readl(fimc->regs + S5P_CIGCTRL);
+
+ /* Select ITU B interface, disable Writeback path and test pattern. */
+ cfg &= ~(S5P_CIGCTRL_TESTPAT_MASK | S5P_CIGCTRL_SELCAM_ITU_A |
+ S5P_CIGCTRL_SELCAM_MIPI | S5P_CIGCTRL_CAMIF_SELWB |
+ S5P_CIGCTRL_SELCAM_MIPI_A);
+
+ if (cam->bus_type == FIMC_MIPI_CSI2) {
+ cfg |= S5P_CIGCTRL_SELCAM_MIPI;
+
+ if (cam->mux_id == 0)
+ cfg |= S5P_CIGCTRL_SELCAM_MIPI_A;
+
+ /* TODO: add remaining supported formats. */
+ if (vid_cap->fmt.code == V4L2_MBUS_FMT_VYUY8_2X8) {
+ tmp = S5P_CSIIMGFMT_YCBCR422_8BIT;
+ } else {
+ err("camera image format not supported: %d",
+ vid_cap->fmt.code);
+ return -EINVAL;
+ }
+ writel(tmp | (0x1 << 8), fimc->regs + S5P_CSIIMGFMT);
+
+ } else if (cam->bus_type == FIMC_ITU_601 ||
+ cam->bus_type == FIMC_ITU_656) {
+ if (cam->mux_id == 0) /* ITU-A, ITU-B: 0, 1 */
+ cfg |= S5P_CIGCTRL_SELCAM_ITU_A;
+ } else if (cam->bus_type == FIMC_LCD_WB) {
+ cfg |= S5P_CIGCTRL_CAMIF_SELWB;
+ } else {
+ err("invalid camera bus type selected\n");
+ return -EINVAL;
+ }
+ writel(cfg, fimc->regs + S5P_CIGCTRL);
+
+ return 0;
}
diff --git a/drivers/media/video/s5p-fimc/regs-fimc.h b/drivers/media/video/s5p-fimc/regs-fimc.h
index a3cfe824db00..a57daedb5b5c 100644
--- a/drivers/media/video/s5p-fimc/regs-fimc.h
+++ b/drivers/media/video/s5p-fimc/regs-fimc.h
@@ -11,10 +11,6 @@
#ifndef REGS_FIMC_H_
#define REGS_FIMC_H_
-#define S5P_CIOYSA(__x) (0x18 + (__x) * 4)
-#define S5P_CIOCBSA(__x) (0x28 + (__x) * 4)
-#define S5P_CIOCRSA(__x) (0x38 + (__x) * 4)
-
/* Input source format */
#define S5P_CISRCFMT 0x00
#define S5P_CISRCFMT_ITU601_8BIT (1 << 31)
@@ -28,22 +24,21 @@
/* Window offset */
#define S5P_CIWDOFST 0x04
-#define S5P_CIWDOFST_WINOFSEN (1 << 31)
+#define S5P_CIWDOFST_OFF_EN (1 << 31)
#define S5P_CIWDOFST_CLROVFIY (1 << 30)
#define S5P_CIWDOFST_CLROVRLB (1 << 29)
-#define S5P_CIWDOFST_WINHOROFST_MASK (0x7ff << 16)
+#define S5P_CIWDOFST_HOROFF_MASK (0x7ff << 16)
#define S5P_CIWDOFST_CLROVFICB (1 << 15)
#define S5P_CIWDOFST_CLROVFICR (1 << 14)
-#define S5P_CIWDOFST_WINHOROFST(x) ((x) << 16)
-#define S5P_CIWDOFST_WINVEROFST(x) ((x) << 0)
-#define S5P_CIWDOFST_WINVEROFST_MASK (0xfff << 0)
+#define S5P_CIWDOFST_HOROFF(x) ((x) << 16)
+#define S5P_CIWDOFST_VEROFF(x) ((x) << 0)
+#define S5P_CIWDOFST_VEROFF_MASK (0xfff << 0)
/* Global control */
#define S5P_CIGCTRL 0x08
#define S5P_CIGCTRL_SWRST (1 << 31)
#define S5P_CIGCTRL_CAMRST_A (1 << 30)
#define S5P_CIGCTRL_SELCAM_ITU_A (1 << 29)
-#define S5P_CIGCTRL_SELCAM_ITU_MASK (1 << 29)
#define S5P_CIGCTRL_TESTPAT_NORMAL (0 << 27)
#define S5P_CIGCTRL_TESTPAT_COLOR_BAR (1 << 27)
#define S5P_CIGCTRL_TESTPAT_HOR_INC (2 << 27)
@@ -61,6 +56,8 @@
#define S5P_CIGCTRL_SHDW_DISABLE (1 << 12)
#define S5P_CIGCTRL_SELCAM_MIPI_A (1 << 7)
#define S5P_CIGCTRL_CAMIF_SELWB (1 << 6)
+/* 0 - ITU601; 1 - ITU709 */
+#define S5P_CIGCTRL_CSC_ITU601_709 (1 << 5)
#define S5P_CIGCTRL_INVPOLHSYNC (1 << 4)
#define S5P_CIGCTRL_SELCAM_MIPI (1 << 3)
#define S5P_CIGCTRL_INTERLACE (1 << 0)
@@ -72,23 +69,10 @@
#define S5P_CIWDOFST2_HOROFF(x) ((x) << 16)
#define S5P_CIWDOFST2_VEROFF(x) ((x) << 0)
-/* Output DMA Y plane start address */
-#define S5P_CIOYSA1 0x18
-#define S5P_CIOYSA2 0x1c
-#define S5P_CIOYSA3 0x20
-#define S5P_CIOYSA4 0x24
-
-/* Output DMA Cb plane start address */
-#define S5P_CIOCBSA1 0x28
-#define S5P_CIOCBSA2 0x2c
-#define S5P_CIOCBSA3 0x30
-#define S5P_CIOCBSA4 0x34
-
-/* Output DMA Cr plane start address */
-#define S5P_CIOCRSA1 0x38
-#define S5P_CIOCRSA2 0x3c
-#define S5P_CIOCRSA3 0x40
-#define S5P_CIOCRSA4 0x44
+/* Output DMA Y/Cb/Cr plane start addresses */
+#define S5P_CIOYSA(n) (0x18 + (n) * 4)
+#define S5P_CIOCBSA(n) (0x28 + (n) * 4)
+#define S5P_CIOCRSA(n) (0x38 + (n) * 4)
/* Target image format */
#define S5P_CITRGFMT 0x48
@@ -168,6 +152,8 @@
#define S5P_CISTATUS_OVFICB (1 << 30)
#define S5P_CISTATUS_OVFICR (1 << 29)
#define S5P_CISTATUS_VSYNC (1 << 28)
+#define S5P_CISTATUS_FRAMECNT_MASK (3 << 26)
+#define S5P_CISTATUS_FRAMECNT_SHIFT 26
#define S5P_CISTATUS_WINOFF_EN (1 << 25)
#define S5P_CISTATUS_IMGCPT_EN (1 << 22)
#define S5P_CISTATUS_IMGCPT_SCEN (1 << 21)
@@ -206,10 +192,10 @@
#define S5P_CIIMGEFF_PAT_CB(x) ((x) << 13)
#define S5P_CIIMGEFF_PAT_CR(x) ((x) << 0)
-/* Input DMA Y/Cb/Cr plane start address 0 */
-#define S5P_CIIYSA0 0xd4
-#define S5P_CIICBSA0 0xd8
-#define S5P_CIICRSA0 0xdc
+/* Input DMA Y/Cb/Cr plane start address 0/1 */
+#define S5P_CIIYSA(n) (0xd4 + (n) * 0x70)
+#define S5P_CIICBSA(n) (0xd8 + (n) * 0x70)
+#define S5P_CIICRSA(n) (0xdc + (n) * 0x70)
/* Real input DMA image size */
#define S5P_CIREAL_ISIZE 0xf8
@@ -250,11 +236,6 @@
#define S5P_MSCTRL_ENVID (1 << 0)
#define S5P_MSCTRL_FRAME_COUNT(x) ((x) << 24)
-/* Input DMA Y/Cb/Cr plane start address 1 */
-#define S5P_CIIYSA1 0x144
-#define S5P_CIICBSA1 0x148
-#define S5P_CIICRSA1 0x14c
-
/* Output DMA Y/Cb/Cr offset */
#define S5P_CIOYOFF 0x168
#define S5P_CIOCBOFF 0x16c
@@ -289,5 +270,16 @@
/* MIPI CSI image format */
#define S5P_CSIIMGFMT 0x194
+#define S5P_CSIIMGFMT_YCBCR422_8BIT 0x1e
+#define S5P_CSIIMGFMT_RAW8 0x2a
+#define S5P_CSIIMGFMT_RAW10 0x2b
+#define S5P_CSIIMGFMT_RAW12 0x2c
+#define S5P_CSIIMGFMT_USER1 0x30
+#define S5P_CSIIMGFMT_USER2 0x31
+#define S5P_CSIIMGFMT_USER3 0x32
+#define S5P_CSIIMGFMT_USER4 0x33
+
+/* Output frame buffer sequence mask */
+#define S5P_CIFCNTSEQ 0x1FC
#endif /* REGS_FIMC_H_ */
diff --git a/drivers/media/video/saa5246a.c b/drivers/media/video/saa5246a.c
deleted file mode 100644
index 6b3b09ef8978..000000000000
--- a/drivers/media/video/saa5246a.c
+++ /dev/null
@@ -1,1123 +0,0 @@
-/*
- * Driver for the SAA5246A or SAA5281 Teletext (=Videotext) decoder chips from
- * Philips.
- *
- * Only capturing of Teletext pages is tested. The videotext chips also have a
- * TV output but my hardware doesn't use it. For this reason this driver does
- * not support changing any TV display settings.
- *
- * Copyright (C) 2004 Michael Geng <linux@MichaelGeng.de>
- *
- * Derived from
- *
- * saa5249 driver
- * Copyright (C) 1998 Richard Guenther
- * <richard.guenther@student.uni-tuebingen.de>
- *
- * with changes by
- * Alan Cox <alan@lxorguk.ukuu.org.uk>
- *
- * and
- *
- * vtx.c
- * Copyright (C) 1994-97 Martin Buck <martin-2.buck@student.uni-ulm.de>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307,
- * USA.
- */
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/mm.h>
-#include <linux/init.h>
-#include <linux/i2c.h>
-#include <linux/slab.h>
-#include <linux/mutex.h>
-#include <linux/videotext.h>
-#include <linux/videodev2.h>
-#include <media/v4l2-device.h>
-#include <media/v4l2-chip-ident.h>
-#include <media/v4l2-ioctl.h>
-#include <media/v4l2-i2c-drv.h>
-
-MODULE_AUTHOR("Michael Geng <linux@MichaelGeng.de>");
-MODULE_DESCRIPTION("Philips SAA5246A, SAA5281 Teletext decoder driver");
-MODULE_LICENSE("GPL");
-
-#define MAJOR_VERSION 1 /* driver major version number */
-#define MINOR_VERSION 8 /* driver minor version number */
-
-/* Number of DAUs = number of pages that can be searched at the same time. */
-#define NUM_DAUS 4
-
-#define NUM_ROWS_PER_PAGE 40
-
-/* first column is 0 (not 1) */
-#define POS_TIME_START 32
-#define POS_TIME_END 39
-
-#define POS_HEADER_START 7
-#define POS_HEADER_END 31
-
-/* Returns 'true' if the part of the videotext page described with req contains
- (at least parts of) the time field */
-#define REQ_CONTAINS_TIME(p_req) \
- ((p_req)->start <= POS_TIME_END && \
- (p_req)->end >= POS_TIME_START)
-
-/* Returns 'true' if the part of the videotext page described with req contains
- (at least parts of) the page header */
-#define REQ_CONTAINS_HEADER(p_req) \
- ((p_req)->start <= POS_HEADER_END && \
- (p_req)->end >= POS_HEADER_START)
-
-/*****************************************************************************/
-/* Mode register numbers of the SAA5246A */
-/*****************************************************************************/
-#define SAA5246A_REGISTER_R0 0
-#define SAA5246A_REGISTER_R1 1
-#define SAA5246A_REGISTER_R2 2
-#define SAA5246A_REGISTER_R3 3
-#define SAA5246A_REGISTER_R4 4
-#define SAA5246A_REGISTER_R5 5
-#define SAA5246A_REGISTER_R6 6
-#define SAA5246A_REGISTER_R7 7
-#define SAA5246A_REGISTER_R8 8
-#define SAA5246A_REGISTER_R9 9
-#define SAA5246A_REGISTER_R10 10
-#define SAA5246A_REGISTER_R11 11
-#define SAA5246A_REGISTER_R11B 11
-
-/* SAA5246A mode registers often autoincrement to the next register.
- Therefore we use variable argument lists. The following macro indicates
- the end of a command list. */
-#define COMMAND_END (-1)
-
-/*****************************************************************************/
-/* Contents of the mode registers of the SAA5246A */
-/*****************************************************************************/
-/* Register R0 (Advanced Control) */
-#define R0_SELECT_R11 0x00
-#define R0_SELECT_R11B 0x01
-
-#define R0_PLL_TIME_CONSTANT_LONG 0x00
-#define R0_PLL_TIME_CONSTANT_SHORT 0x02
-
-#define R0_ENABLE_nODD_EVEN_OUTPUT 0x00
-#define R0_DISABLE_nODD_EVEN_OUTPUT 0x04
-
-#define R0_ENABLE_HDR_POLL 0x00
-#define R0_DISABLE_HDR_POLL 0x10
-
-#define R0_DO_NOT_FORCE_nODD_EVEN_LOW_IF_PICTURE_DISPLAYED 0x00
-#define R0_FORCE_nODD_EVEN_LOW_IF_PICTURE_DISPLAYED 0x20
-
-#define R0_NO_FREE_RUN_PLL 0x00
-#define R0_FREE_RUN_PLL 0x40
-
-#define R0_NO_AUTOMATIC_FASTEXT_PROMPT 0x00
-#define R0_AUTOMATIC_FASTEXT_PROMPT 0x80
-
-/* Register R1 (Mode) */
-#define R1_INTERLACED_312_AND_HALF_312_AND_HALF_LINES 0x00
-#define R1_NON_INTERLACED_312_313_LINES 0x01
-#define R1_NON_INTERLACED_312_312_LINES 0x02
-#define R1_FFB_LEADING_EDGE_IN_FIRST_BROAD_PULSE 0x03
-#define R1_FFB_LEADING_EDGE_IN_SECOND_BROAD_PULSE 0x07
-
-#define R1_DEW 0x00
-#define R1_FULL_FIELD 0x08
-
-#define R1_EXTENDED_PACKET_DISABLE 0x00
-#define R1_EXTENDED_PACKET_ENABLE 0x10
-
-#define R1_DAUS_ALL_ON 0x00
-#define R1_DAUS_ALL_OFF 0x20
-
-#define R1_7_BITS_PLUS_PARITY 0x00
-#define R1_8_BITS_NO_PARITY 0x40
-
-#define R1_VCS_TO_SCS 0x00
-#define R1_NO_VCS_TO_SCS 0x80
-
-/* Register R2 (Page request address) */
-#define R2_IN_R3_SELECT_PAGE_HUNDREDS 0x00
-#define R2_IN_R3_SELECT_PAGE_TENS 0x01
-#define R2_IN_R3_SELECT_PAGE_UNITS 0x02
-#define R2_IN_R3_SELECT_HOURS_TENS 0x03
-#define R2_IN_R3_SELECT_HOURS_UNITS 0x04
-#define R2_IN_R3_SELECT_MINUTES_TENS 0x05
-#define R2_IN_R3_SELECT_MINUTES_UNITS 0x06
-
-#define R2_DAU_0 0x00
-#define R2_DAU_1 0x10
-#define R2_DAU_2 0x20
-#define R2_DAU_3 0x30
-
-#define R2_BANK_0 0x00
-#define R2_BANK 1 0x40
-
-#define R2_HAMMING_CHECK_ON 0x80
-#define R2_HAMMING_CHECK_OFF 0x00
-
-/* Register R3 (Page request data) */
-#define R3_PAGE_HUNDREDS_0 0x00
-#define R3_PAGE_HUNDREDS_1 0x01
-#define R3_PAGE_HUNDREDS_2 0x02
-#define R3_PAGE_HUNDREDS_3 0x03
-#define R3_PAGE_HUNDREDS_4 0x04
-#define R3_PAGE_HUNDREDS_5 0x05
-#define R3_PAGE_HUNDREDS_6 0x06
-#define R3_PAGE_HUNDREDS_7 0x07
-
-#define R3_HOLD_PAGE 0x00
-#define R3_UPDATE_PAGE 0x08
-
-#define R3_PAGE_HUNDREDS_DO_NOT_CARE 0x00
-#define R3_PAGE_HUNDREDS_DO_CARE 0x10
-
-#define R3_PAGE_TENS_DO_NOT_CARE 0x00
-#define R3_PAGE_TENS_DO_CARE 0x10
-
-#define R3_PAGE_UNITS_DO_NOT_CARE 0x00
-#define R3_PAGE_UNITS_DO_CARE 0x10
-
-#define R3_HOURS_TENS_DO_NOT_CARE 0x00
-#define R3_HOURS_TENS_DO_CARE 0x10
-
-#define R3_HOURS_UNITS_DO_NOT_CARE 0x00
-#define R3_HOURS_UNITS_DO_CARE 0x10
-
-#define R3_MINUTES_TENS_DO_NOT_CARE 0x00
-#define R3_MINUTES_TENS_DO_CARE 0x10
-
-#define R3_MINUTES_UNITS_DO_NOT_CARE 0x00
-#define R3_MINUTES_UNITS_DO_CARE 0x10
-
-/* Register R4 (Display chapter) */
-#define R4_DISPLAY_PAGE_0 0x00
-#define R4_DISPLAY_PAGE_1 0x01
-#define R4_DISPLAY_PAGE_2 0x02
-#define R4_DISPLAY_PAGE_3 0x03
-#define R4_DISPLAY_PAGE_4 0x04
-#define R4_DISPLAY_PAGE_5 0x05
-#define R4_DISPLAY_PAGE_6 0x06
-#define R4_DISPLAY_PAGE_7 0x07
-
-/* Register R5 (Normal display control) */
-#define R5_PICTURE_INSIDE_BOXING_OFF 0x00
-#define R5_PICTURE_INSIDE_BOXING_ON 0x01
-
-#define R5_PICTURE_OUTSIDE_BOXING_OFF 0x00
-#define R5_PICTURE_OUTSIDE_BOXING_ON 0x02
-
-#define R5_TEXT_INSIDE_BOXING_OFF 0x00
-#define R5_TEXT_INSIDE_BOXING_ON 0x04
-
-#define R5_TEXT_OUTSIDE_BOXING_OFF 0x00
-#define R5_TEXT_OUTSIDE_BOXING_ON 0x08
-
-#define R5_CONTRAST_REDUCTION_INSIDE_BOXING_OFF 0x00
-#define R5_CONTRAST_REDUCTION_INSIDE_BOXING_ON 0x10
-
-#define R5_CONTRAST_REDUCTION_OUTSIDE_BOXING_OFF 0x00
-#define R5_CONTRAST_REDUCTION_OUTSIDE_BOXING_ON 0x20
-
-#define R5_BACKGROUND_COLOR_INSIDE_BOXING_OFF 0x00
-#define R5_BACKGROUND_COLOR_INSIDE_BOXING_ON 0x40
-
-#define R5_BACKGROUND_COLOR_OUTSIDE_BOXING_OFF 0x00
-#define R5_BACKGROUND_COLOR_OUTSIDE_BOXING_ON 0x80
-
-/* Register R6 (Newsflash display) */
-#define R6_NEWSFLASH_PICTURE_INSIDE_BOXING_OFF 0x00
-#define R6_NEWSFLASH_PICTURE_INSIDE_BOXING_ON 0x01
-
-#define R6_NEWSFLASH_PICTURE_OUTSIDE_BOXING_OFF 0x00
-#define R6_NEWSFLASH_PICTURE_OUTSIDE_BOXING_ON 0x02
-
-#define R6_NEWSFLASH_TEXT_INSIDE_BOXING_OFF 0x00
-#define R6_NEWSFLASH_TEXT_INSIDE_BOXING_ON 0x04
-
-#define R6_NEWSFLASH_TEXT_OUTSIDE_BOXING_OFF 0x00
-#define R6_NEWSFLASH_TEXT_OUTSIDE_BOXING_ON 0x08
-
-#define R6_NEWSFLASH_CONTRAST_REDUCTION_INSIDE_BOXING_OFF 0x00
-#define R6_NEWSFLASH_CONTRAST_REDUCTION_INSIDE_BOXING_ON 0x10
-
-#define R6_NEWSFLASH_CONTRAST_REDUCTION_OUTSIDE_BOXING_OFF 0x00
-#define R6_NEWSFLASH_CONTRAST_REDUCTION_OUTSIDE_BOXING_ON 0x20
-
-#define R6_NEWSFLASH_BACKGROUND_COLOR_INSIDE_BOXING_OFF 0x00
-#define R6_NEWSFLASH_BACKGROUND_COLOR_INSIDE_BOXING_ON 0x40
-
-#define R6_NEWSFLASH_BACKGROUND_COLOR_OUTSIDE_BOXING_OFF 0x00
-#define R6_NEWSFLASH_BACKGROUND_COLOR_OUTSIDE_BOXING_ON 0x80
-
-/* Register R7 (Display mode) */
-#define R7_BOX_OFF_ROW_0 0x00
-#define R7_BOX_ON_ROW_0 0x01
-
-#define R7_BOX_OFF_ROW_1_TO_23 0x00
-#define R7_BOX_ON_ROW_1_TO_23 0x02
-
-#define R7_BOX_OFF_ROW_24 0x00
-#define R7_BOX_ON_ROW_24 0x04
-
-#define R7_SINGLE_HEIGHT 0x00
-#define R7_DOUBLE_HEIGHT 0x08
-
-#define R7_TOP_HALF 0x00
-#define R7_BOTTOM_HALF 0x10
-
-#define R7_REVEAL_OFF 0x00
-#define R7_REVEAL_ON 0x20
-
-#define R7_CURSER_OFF 0x00
-#define R7_CURSER_ON 0x40
-
-#define R7_STATUS_BOTTOM 0x00
-#define R7_STATUS_TOP 0x80
-
-/* Register R8 (Active chapter) */
-#define R8_ACTIVE_CHAPTER_0 0x00
-#define R8_ACTIVE_CHAPTER_1 0x01
-#define R8_ACTIVE_CHAPTER_2 0x02
-#define R8_ACTIVE_CHAPTER_3 0x03
-#define R8_ACTIVE_CHAPTER_4 0x04
-#define R8_ACTIVE_CHAPTER_5 0x05
-#define R8_ACTIVE_CHAPTER_6 0x06
-#define R8_ACTIVE_CHAPTER_7 0x07
-
-#define R8_CLEAR_MEMORY 0x08
-#define R8_DO_NOT_CLEAR_MEMORY 0x00
-
-/* Register R9 (Curser row) */
-#define R9_CURSER_ROW_0 0x00
-#define R9_CURSER_ROW_1 0x01
-#define R9_CURSER_ROW_2 0x02
-#define R9_CURSER_ROW_25 0x19
-
-/* Register R10 (Curser column) */
-#define R10_CURSER_COLUMN_0 0x00
-#define R10_CURSER_COLUMN_6 0x06
-#define R10_CURSER_COLUMN_8 0x08
-
-/*****************************************************************************/
-/* Row 25 control data in column 0 to 9 */
-/*****************************************************************************/
-#define ROW25_COLUMN0_PAGE_UNITS 0x0F
-
-#define ROW25_COLUMN1_PAGE_TENS 0x0F
-
-#define ROW25_COLUMN2_MINUTES_UNITS 0x0F
-
-#define ROW25_COLUMN3_MINUTES_TENS 0x07
-#define ROW25_COLUMN3_DELETE_PAGE 0x08
-
-#define ROW25_COLUMN4_HOUR_UNITS 0x0F
-
-#define ROW25_COLUMN5_HOUR_TENS 0x03
-#define ROW25_COLUMN5_INSERT_HEADLINE 0x04
-#define ROW25_COLUMN5_INSERT_SUBTITLE 0x08
-
-#define ROW25_COLUMN6_SUPPRESS_HEADER 0x01
-#define ROW25_COLUMN6_UPDATE_PAGE 0x02
-#define ROW25_COLUMN6_INTERRUPTED_SEQUENCE 0x04
-#define ROW25_COLUMN6_SUPPRESS_DISPLAY 0x08
-
-#define ROW25_COLUMN7_SERIAL_MODE 0x01
-#define ROW25_COLUMN7_CHARACTER_SET 0x0E
-
-#define ROW25_COLUMN8_PAGE_HUNDREDS 0x07
-#define ROW25_COLUMN8_PAGE_NOT_FOUND 0x10
-
-#define ROW25_COLUMN9_PAGE_BEING_LOOKED_FOR 0x20
-
-#define ROW25_COLUMN0_TO_7_HAMMING_ERROR 0x10
-
-/*****************************************************************************/
-/* Helper macros for extracting page, hour and minute digits */
-/*****************************************************************************/
-/* BYTE_POS 0 is at row 0, column 0,
- BYTE_POS 1 is at row 0, column 1,
- BYTE_POS 40 is at row 1, column 0, (with NUM_ROWS_PER_PAGE = 40)
- BYTE_POS 41 is at row 1, column 1, (with NUM_ROWS_PER_PAGE = 40),
- ... */
-#define ROW(BYTE_POS) (BYTE_POS / NUM_ROWS_PER_PAGE)
-#define COLUMN(BYTE_POS) (BYTE_POS % NUM_ROWS_PER_PAGE)
-
-/*****************************************************************************/
-/* Helper macros for extracting page, hour and minute digits */
-/*****************************************************************************/
-/* Macros for extracting hundreds, tens and units of a page number which
- must be in the range 0 ... 0x799.
- Note that page is coded in hexadecimal, i.e. 0x123 means page 123.
- page 0x.. means page 8.. */
-#define HUNDREDS_OF_PAGE(page) (((page) / 0x100) & 0x7)
-#define TENS_OF_PAGE(page) (((page) / 0x10) & 0xF)
-#define UNITS_OF_PAGE(page) ((page) & 0xF)
-
-/* Macros for extracting tens and units of a hour information which
- must be in the range 0 ... 0x24.
- Note that hour is coded in hexadecimal, i.e. 0x12 means 12 hours */
-#define TENS_OF_HOUR(hour) ((hour) / 0x10)
-#define UNITS_OF_HOUR(hour) ((hour) & 0xF)
-
-/* Macros for extracting tens and units of a minute information which
- must be in the range 0 ... 0x59.
- Note that minute is coded in hexadecimal, i.e. 0x12 means 12 minutes */
-#define TENS_OF_MINUTE(minute) ((minute) / 0x10)
-#define UNITS_OF_MINUTE(minute) ((minute) & 0xF)
-
-#define HOUR_MAX 0x23
-#define MINUTE_MAX 0x59
-#define PAGE_MAX 0x8FF
-
-
-struct saa5246a_device
-{
- struct v4l2_subdev sd;
- struct video_device *vdev;
- u8 pgbuf[NUM_DAUS][VTX_VIRTUALSIZE];
- int is_searching[NUM_DAUS];
- unsigned long in_use;
- struct mutex lock;
-};
-
-static inline struct saa5246a_device *to_dev(struct v4l2_subdev *sd)
-{
- return container_of(sd, struct saa5246a_device, sd);
-}
-
-static struct video_device saa_template; /* Declared near bottom */
-
-/*
- * I2C interfaces
- */
-
-static int i2c_sendbuf(struct saa5246a_device *t, int reg, int count, u8 *data)
-{
- struct i2c_client *client = v4l2_get_subdevdata(&t->sd);
- char buf[64];
-
- buf[0] = reg;
- memcpy(buf+1, data, count);
-
- if (i2c_master_send(client, buf, count + 1) == count + 1)
- return 0;
- return -1;
-}
-
-static int i2c_senddata(struct saa5246a_device *t, ...)
-{
- unsigned char buf[64];
- int v;
- int ct = 0;
- va_list argp;
- va_start(argp, t);
-
- while ((v = va_arg(argp, int)) != -1)
- buf[ct++] = v;
-
- va_end(argp);
- return i2c_sendbuf(t, buf[0], ct-1, buf+1);
-}
-
-/* Get count number of bytes from I²C-device at address adr, store them in buf.
- * Start & stop handshaking is done by this routine, ack will be sent after the
- * last byte to inhibit further sending of data. If uaccess is 'true', data is
- * written to user-space with put_user. Returns -1 if I²C-device didn't send
- * acknowledge, 0 otherwise
- */
-static int i2c_getdata(struct saa5246a_device *t, int count, u8 *buf)
-{
- struct i2c_client *client = v4l2_get_subdevdata(&t->sd);
-
- if (i2c_master_recv(client, buf, count) != count)
- return -1;
- return 0;
-}
-
-/* When a page is found then the not FOUND bit in one of the status registers
- * of the SAA5264A chip is cleared. Unfortunately this bit is not set
- * automatically when a new page is requested. Instead this function must be
- * called after a page has been requested.
- *
- * Return value: 0 if successful
- */
-static int saa5246a_clear_found_bit(struct saa5246a_device *t,
- unsigned char dau_no)
-{
- unsigned char row_25_column_8;
-
- if (i2c_senddata(t, SAA5246A_REGISTER_R8,
-
- dau_no |
- R8_DO_NOT_CLEAR_MEMORY,
-
- R9_CURSER_ROW_25,
-
- R10_CURSER_COLUMN_8,
-
- COMMAND_END) ||
- i2c_getdata(t, 1, &row_25_column_8))
- {
- return -EIO;
- }
- row_25_column_8 |= ROW25_COLUMN8_PAGE_NOT_FOUND;
- if (i2c_senddata(t, SAA5246A_REGISTER_R8,
-
- dau_no |
- R8_DO_NOT_CLEAR_MEMORY,
-
- R9_CURSER_ROW_25,
-
- R10_CURSER_COLUMN_8,
-
- row_25_column_8,
-
- COMMAND_END))
- {
- return -EIO;
- }
-
- return 0;
-}
-
-/* Requests one videotext page as described in req. The fields of req are
- * checked and an error is returned if something is invalid.
- *
- * Return value: 0 if successful
- */
-static int saa5246a_request_page(struct saa5246a_device *t,
- vtx_pagereq_t *req)
-{
- if (req->pagemask < 0 || req->pagemask >= PGMASK_MAX)
- return -EINVAL;
- if (req->pagemask & PGMASK_PAGE)
- if (req->page < 0 || req->page > PAGE_MAX)
- return -EINVAL;
- if (req->pagemask & PGMASK_HOUR)
- if (req->hour < 0 || req->hour > HOUR_MAX)
- return -EINVAL;
- if (req->pagemask & PGMASK_MINUTE)
- if (req->minute < 0 || req->minute > MINUTE_MAX)
- return -EINVAL;
- if (req->pgbuf < 0 || req->pgbuf >= NUM_DAUS)
- return -EINVAL;
-
- if (i2c_senddata(t, SAA5246A_REGISTER_R2,
-
- R2_IN_R3_SELECT_PAGE_HUNDREDS |
- req->pgbuf << 4 |
- R2_BANK_0 |
- R2_HAMMING_CHECK_OFF,
-
- HUNDREDS_OF_PAGE(req->page) |
- R3_HOLD_PAGE |
- (req->pagemask & PG_HUND ?
- R3_PAGE_HUNDREDS_DO_CARE :
- R3_PAGE_HUNDREDS_DO_NOT_CARE),
-
- TENS_OF_PAGE(req->page) |
- (req->pagemask & PG_TEN ?
- R3_PAGE_TENS_DO_CARE :
- R3_PAGE_TENS_DO_NOT_CARE),
-
- UNITS_OF_PAGE(req->page) |
- (req->pagemask & PG_UNIT ?
- R3_PAGE_UNITS_DO_CARE :
- R3_PAGE_UNITS_DO_NOT_CARE),
-
- TENS_OF_HOUR(req->hour) |
- (req->pagemask & HR_TEN ?
- R3_HOURS_TENS_DO_CARE :
- R3_HOURS_TENS_DO_NOT_CARE),
-
- UNITS_OF_HOUR(req->hour) |
- (req->pagemask & HR_UNIT ?
- R3_HOURS_UNITS_DO_CARE :
- R3_HOURS_UNITS_DO_NOT_CARE),
-
- TENS_OF_MINUTE(req->minute) |
- (req->pagemask & MIN_TEN ?
- R3_MINUTES_TENS_DO_CARE :
- R3_MINUTES_TENS_DO_NOT_CARE),
-
- UNITS_OF_MINUTE(req->minute) |
- (req->pagemask & MIN_UNIT ?
- R3_MINUTES_UNITS_DO_CARE :
- R3_MINUTES_UNITS_DO_NOT_CARE),
-
- COMMAND_END) || i2c_senddata(t, SAA5246A_REGISTER_R2,
-
- R2_IN_R3_SELECT_PAGE_HUNDREDS |
- req->pgbuf << 4 |
- R2_BANK_0 |
- R2_HAMMING_CHECK_OFF,
-
- HUNDREDS_OF_PAGE(req->page) |
- R3_UPDATE_PAGE |
- (req->pagemask & PG_HUND ?
- R3_PAGE_HUNDREDS_DO_CARE :
- R3_PAGE_HUNDREDS_DO_NOT_CARE),
-
- COMMAND_END))
- {
- return -EIO;
- }
-
- t->is_searching[req->pgbuf] = true;
- return 0;
-}
-
-/* This routine decodes the page number from the infobits contained in line 25.
- *
- * Parameters:
- * infobits: must be bits 0 to 9 of column 25
- *
- * Return value: page number coded in hexadecimal, i. e. page 123 is coded 0x123
- */
-static inline int saa5246a_extract_pagenum_from_infobits(
- unsigned char infobits[10])
-{
- int page_hundreds, page_tens, page_units;
-
- page_units = infobits[0] & ROW25_COLUMN0_PAGE_UNITS;
- page_tens = infobits[1] & ROW25_COLUMN1_PAGE_TENS;
- page_hundreds = infobits[8] & ROW25_COLUMN8_PAGE_HUNDREDS;
-
- /* page 0x.. means page 8.. */
- if (page_hundreds == 0)
- page_hundreds = 8;
-
- return((page_hundreds << 8) | (page_tens << 4) | page_units);
-}
-
-/* Decodes the hour from the infobits contained in line 25.
- *
- * Parameters:
- * infobits: must be bits 0 to 9 of column 25
- *
- * Return: hour coded in hexadecimal, i. e. 12h is coded 0x12
- */
-static inline int saa5246a_extract_hour_from_infobits(
- unsigned char infobits[10])
-{
- int hour_tens, hour_units;
-
- hour_units = infobits[4] & ROW25_COLUMN4_HOUR_UNITS;
- hour_tens = infobits[5] & ROW25_COLUMN5_HOUR_TENS;
-
- return((hour_tens << 4) | hour_units);
-}
-
-/* Decodes the minutes from the infobits contained in line 25.
- *
- * Parameters:
- * infobits: must be bits 0 to 9 of column 25
- *
- * Return: minutes coded in hexadecimal, i. e. 10min is coded 0x10
- */
-static inline int saa5246a_extract_minutes_from_infobits(
- unsigned char infobits[10])
-{
- int minutes_tens, minutes_units;
-
- minutes_units = infobits[2] & ROW25_COLUMN2_MINUTES_UNITS;
- minutes_tens = infobits[3] & ROW25_COLUMN3_MINUTES_TENS;
-
- return((minutes_tens << 4) | minutes_units);
-}
-
-/* Reads the status bits contained in the first 10 columns of the first line
- * and extracts the information into info.
- *
- * Return value: 0 if successful
- */
-static inline int saa5246a_get_status(struct saa5246a_device *t,
- vtx_pageinfo_t *info, unsigned char dau_no)
-{
- unsigned char infobits[10];
- int column;
-
- if (dau_no >= NUM_DAUS)
- return -EINVAL;
-
- if (i2c_senddata(t, SAA5246A_REGISTER_R8,
-
- dau_no |
- R8_DO_NOT_CLEAR_MEMORY,
-
- R9_CURSER_ROW_25,
-
- R10_CURSER_COLUMN_0,
-
- COMMAND_END) ||
- i2c_getdata(t, 10, infobits))
- {
- return -EIO;
- }
-
- info->pagenum = saa5246a_extract_pagenum_from_infobits(infobits);
- info->hour = saa5246a_extract_hour_from_infobits(infobits);
- info->minute = saa5246a_extract_minutes_from_infobits(infobits);
- info->charset = ((infobits[7] & ROW25_COLUMN7_CHARACTER_SET) >> 1);
- info->delete = !!(infobits[3] & ROW25_COLUMN3_DELETE_PAGE);
- info->headline = !!(infobits[5] & ROW25_COLUMN5_INSERT_HEADLINE);
- info->subtitle = !!(infobits[5] & ROW25_COLUMN5_INSERT_SUBTITLE);
- info->supp_header = !!(infobits[6] & ROW25_COLUMN6_SUPPRESS_HEADER);
- info->update = !!(infobits[6] & ROW25_COLUMN6_UPDATE_PAGE);
- info->inter_seq = !!(infobits[6] & ROW25_COLUMN6_INTERRUPTED_SEQUENCE);
- info->dis_disp = !!(infobits[6] & ROW25_COLUMN6_SUPPRESS_DISPLAY);
- info->serial = !!(infobits[7] & ROW25_COLUMN7_SERIAL_MODE);
- info->notfound = !!(infobits[8] & ROW25_COLUMN8_PAGE_NOT_FOUND);
- info->pblf = !!(infobits[9] & ROW25_COLUMN9_PAGE_BEING_LOOKED_FOR);
- info->hamming = 0;
- for (column = 0; column <= 7; column++) {
- if (infobits[column] & ROW25_COLUMN0_TO_7_HAMMING_ERROR) {
- info->hamming = 1;
- break;
- }
- }
- if (!info->hamming && !info->notfound)
- t->is_searching[dau_no] = false;
- return 0;
-}
-
-/* Reads 1 videotext page buffer of the SAA5246A.
- *
- * req is used both as input and as output. It contains information which part
- * must be read. The videotext page is copied into req->buffer.
- *
- * Return value: 0 if successful
- */
-static inline int saa5246a_get_page(struct saa5246a_device *t,
- vtx_pagereq_t *req)
-{
- int start, end, size;
- char *buf;
- int err;
-
- if (req->pgbuf < 0 || req->pgbuf >= NUM_DAUS ||
- req->start < 0 || req->start > req->end || req->end >= VTX_PAGESIZE)
- return -EINVAL;
-
- buf = kmalloc(VTX_PAGESIZE, GFP_KERNEL);
- if (!buf)
- return -ENOMEM;
-
- /* Read "normal" part of page */
- err = -EIO;
-
- end = min(req->end, VTX_PAGESIZE - 1);
- if (i2c_senddata(t, SAA5246A_REGISTER_R8,
- req->pgbuf | R8_DO_NOT_CLEAR_MEMORY,
- ROW(req->start), COLUMN(req->start), COMMAND_END))
- goto out;
- if (i2c_getdata(t, end - req->start + 1, buf))
- goto out;
- err = -EFAULT;
- if (copy_to_user(req->buffer, buf, end - req->start + 1))
- goto out;
-
- /* Always get the time from buffer 4, since this stupid SAA5246A only
- * updates the currently displayed buffer...
- */
- if (REQ_CONTAINS_TIME(req)) {
- start = max(req->start, POS_TIME_START);
- end = min(req->end, POS_TIME_END);
- size = end - start + 1;
- err = -EINVAL;
- if (size < 0)
- goto out;
- err = -EIO;
- if (i2c_senddata(t, SAA5246A_REGISTER_R8,
- R8_ACTIVE_CHAPTER_4 | R8_DO_NOT_CLEAR_MEMORY,
- R9_CURSER_ROW_0, start, COMMAND_END))
- goto out;
- if (i2c_getdata(t, size, buf))
- goto out;
- err = -EFAULT;
- if (copy_to_user(req->buffer + start - req->start, buf, size))
- goto out;
- }
- /* Insert the header from buffer 4 only, if acquisition circuit is still searching for a page */
- if (REQ_CONTAINS_HEADER(req) && t->is_searching[req->pgbuf]) {
- start = max(req->start, POS_HEADER_START);
- end = min(req->end, POS_HEADER_END);
- size = end - start + 1;
- err = -EINVAL;
- if (size < 0)
- goto out;
- err = -EIO;
- if (i2c_senddata(t, SAA5246A_REGISTER_R8,
- R8_ACTIVE_CHAPTER_4 | R8_DO_NOT_CLEAR_MEMORY,
- R9_CURSER_ROW_0, start, COMMAND_END))
- goto out;
- if (i2c_getdata(t, end - start + 1, buf))
- goto out;
- err = -EFAULT;
- if (copy_to_user(req->buffer + start - req->start, buf, size))
- goto out;
- }
- err = 0;
-out:
- kfree(buf);
- return err;
-}
-
-/* Stops the acquisition circuit given in dau_no. The page buffer associated
- * with this acquisition circuit will no more be updated. The other daus are
- * not affected.
- *
- * Return value: 0 if successful
- */
-static inline int saa5246a_stop_dau(struct saa5246a_device *t,
- unsigned char dau_no)
-{
- if (dau_no >= NUM_DAUS)
- return -EINVAL;
- if (i2c_senddata(t, SAA5246A_REGISTER_R2,
-
- R2_IN_R3_SELECT_PAGE_HUNDREDS |
- dau_no << 4 |
- R2_BANK_0 |
- R2_HAMMING_CHECK_OFF,
-
- R3_PAGE_HUNDREDS_0 |
- R3_HOLD_PAGE |
- R3_PAGE_HUNDREDS_DO_NOT_CARE,
-
- COMMAND_END))
- {
- return -EIO;
- }
- t->is_searching[dau_no] = false;
- return 0;
-}
-
-/* Handles ioctls defined in videotext.h
- *
- * Returns 0 if successful
- */
-static long do_saa5246a_ioctl(struct file *file, unsigned int cmd, void *arg)
-{
- struct saa5246a_device *t = video_drvdata(file);
-
- switch(cmd)
- {
- case VTXIOCGETINFO:
- {
- vtx_info_t *info = arg;
-
- info->version_major = MAJOR_VERSION;
- info->version_minor = MINOR_VERSION;
- info->numpages = NUM_DAUS;
- return 0;
- }
-
- case VTXIOCCLRPAGE:
- {
- vtx_pagereq_t *req = arg;
-
- if (req->pgbuf < 0 || req->pgbuf >= NUM_DAUS)
- return -EINVAL;
- memset(t->pgbuf[req->pgbuf], ' ', sizeof(t->pgbuf[0]));
- return 0;
- }
-
- case VTXIOCCLRFOUND:
- {
- vtx_pagereq_t *req = arg;
-
- if (req->pgbuf < 0 || req->pgbuf >= NUM_DAUS)
- return -EINVAL;
- return(saa5246a_clear_found_bit(t, req->pgbuf));
- }
-
- case VTXIOCPAGEREQ:
- {
- vtx_pagereq_t *req = arg;
-
- return(saa5246a_request_page(t, req));
- }
-
- case VTXIOCGETSTAT:
- {
- vtx_pagereq_t *req = arg;
- vtx_pageinfo_t info;
- int rval;
-
- if ((rval = saa5246a_get_status(t, &info, req->pgbuf)))
- return rval;
- if(copy_to_user(req->buffer, &info,
- sizeof(vtx_pageinfo_t)))
- return -EFAULT;
- return 0;
- }
-
- case VTXIOCGETPAGE:
- {
- vtx_pagereq_t *req = arg;
-
- return(saa5246a_get_page(t, req));
- }
-
- case VTXIOCSTOPDAU:
- {
- vtx_pagereq_t *req = arg;
-
- return(saa5246a_stop_dau(t, req->pgbuf));
- }
-
- case VTXIOCPUTPAGE:
- case VTXIOCSETDISP:
- case VTXIOCPUTSTAT:
- return 0;
-
- case VTXIOCCLRCACHE:
- {
- return 0;
- }
-
- case VTXIOCSETVIRT:
- {
- /* I do not know what "virtual mode" means */
- return 0;
- }
- }
- return -EINVAL;
-}
-
-/*
- * Translates old vtx IOCTLs to new ones
- *
- * This keeps new kernel versions compatible with old userspace programs.
- */
-static inline unsigned int vtx_fix_command(unsigned int cmd)
-{
- switch (cmd) {
- case VTXIOCGETINFO_OLD:
- cmd = VTXIOCGETINFO;
- break;
- case VTXIOCCLRPAGE_OLD:
- cmd = VTXIOCCLRPAGE;
- break;
- case VTXIOCCLRFOUND_OLD:
- cmd = VTXIOCCLRFOUND;
- break;
- case VTXIOCPAGEREQ_OLD:
- cmd = VTXIOCPAGEREQ;
- break;
- case VTXIOCGETSTAT_OLD:
- cmd = VTXIOCGETSTAT;
- break;
- case VTXIOCGETPAGE_OLD:
- cmd = VTXIOCGETPAGE;
- break;
- case VTXIOCSTOPDAU_OLD:
- cmd = VTXIOCSTOPDAU;
- break;
- case VTXIOCPUTPAGE_OLD:
- cmd = VTXIOCPUTPAGE;
- break;
- case VTXIOCSETDISP_OLD:
- cmd = VTXIOCSETDISP;
- break;
- case VTXIOCPUTSTAT_OLD:
- cmd = VTXIOCPUTSTAT;
- break;
- case VTXIOCCLRCACHE_OLD:
- cmd = VTXIOCCLRCACHE;
- break;
- case VTXIOCSETVIRT_OLD:
- cmd = VTXIOCSETVIRT;
- break;
- }
- return cmd;
-}
-
-/*
- * Handle the locking
- */
-static long saa5246a_ioctl(struct file *file,
- unsigned int cmd, unsigned long arg)
-{
- struct saa5246a_device *t = video_drvdata(file);
- long err;
-
- cmd = vtx_fix_command(cmd);
- mutex_lock(&t->lock);
- err = video_usercopy(file, cmd, arg, do_saa5246a_ioctl);
- mutex_unlock(&t->lock);
- return err;
-}
-
-static int saa5246a_open(struct file *file)
-{
- struct saa5246a_device *t = video_drvdata(file);
-
- if (test_and_set_bit(0, &t->in_use))
- return -EBUSY;
-
- if (i2c_senddata(t, SAA5246A_REGISTER_R0,
- R0_SELECT_R11 |
- R0_PLL_TIME_CONSTANT_LONG |
- R0_ENABLE_nODD_EVEN_OUTPUT |
- R0_ENABLE_HDR_POLL |
- R0_DO_NOT_FORCE_nODD_EVEN_LOW_IF_PICTURE_DISPLAYED |
- R0_NO_FREE_RUN_PLL |
- R0_NO_AUTOMATIC_FASTEXT_PROMPT,
-
- R1_NON_INTERLACED_312_312_LINES |
- R1_DEW |
- R1_EXTENDED_PACKET_DISABLE |
- R1_DAUS_ALL_ON |
- R1_8_BITS_NO_PARITY |
- R1_VCS_TO_SCS,
-
- COMMAND_END) ||
- i2c_senddata(t, SAA5246A_REGISTER_R4,
-
- /* We do not care much for the TV display but nevertheless we
- * need the currently displayed page later because only on that
- * page the time is updated. */
- R4_DISPLAY_PAGE_4,
-
- COMMAND_END))
- {
- clear_bit(0, &t->in_use);
- return -EIO;
- }
- return 0;
-}
-
-static int saa5246a_release(struct file *file)
-{
- struct saa5246a_device *t = video_drvdata(file);
-
- /* Stop all acquisition circuits. */
- i2c_senddata(t, SAA5246A_REGISTER_R1,
-
- R1_INTERLACED_312_AND_HALF_312_AND_HALF_LINES |
- R1_DEW |
- R1_EXTENDED_PACKET_DISABLE |
- R1_DAUS_ALL_OFF |
- R1_8_BITS_NO_PARITY |
- R1_VCS_TO_SCS,
-
- COMMAND_END);
- clear_bit(0, &t->in_use);
- return 0;
-}
-
-static const struct v4l2_file_operations saa_fops = {
- .owner = THIS_MODULE,
- .open = saa5246a_open,
- .release = saa5246a_release,
- .ioctl = saa5246a_ioctl,
-};
-
-static struct video_device saa_template =
-{
- .name = "saa5246a",
- .fops = &saa_fops,
- .release = video_device_release,
-};
-
-static int saa5246a_g_chip_ident(struct v4l2_subdev *sd, struct v4l2_dbg_chip_ident *chip)
-{
- struct i2c_client *client = v4l2_get_subdevdata(sd);
-
- return v4l2_chip_ident_i2c_client(client, chip, V4L2_IDENT_SAA5246A, 0);
-}
-
-static const struct v4l2_subdev_core_ops saa5246a_core_ops = {
- .g_chip_ident = saa5246a_g_chip_ident,
-};
-
-static const struct v4l2_subdev_ops saa5246a_ops = {
- .core = &saa5246a_core_ops,
-};
-
-
-static int saa5246a_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
-{
- int pgbuf;
- int err;
- struct saa5246a_device *t;
- struct v4l2_subdev *sd;
-
- v4l_info(client, "chip found @ 0x%x (%s)\n",
- client->addr << 1, client->adapter->name);
- v4l_info(client, "VideoText version %d.%d\n",
- MAJOR_VERSION, MINOR_VERSION);
- t = kzalloc(sizeof(*t), GFP_KERNEL);
- if (t == NULL)
- return -ENOMEM;
- sd = &t->sd;
- v4l2_i2c_subdev_init(sd, client, &saa5246a_ops);
- mutex_init(&t->lock);
-
- /* Now create a video4linux device */
- t->vdev = video_device_alloc();
- if (t->vdev == NULL) {
- kfree(t);
- return -ENOMEM;
- }
- memcpy(t->vdev, &saa_template, sizeof(*t->vdev));
-
- for (pgbuf = 0; pgbuf < NUM_DAUS; pgbuf++) {
- memset(t->pgbuf[pgbuf], ' ', sizeof(t->pgbuf[0]));
- t->is_searching[pgbuf] = false;
- }
- video_set_drvdata(t->vdev, t);
-
- /* Register it */
- err = video_register_device(t->vdev, VFL_TYPE_VTX, -1);
- if (err < 0) {
- video_device_release(t->vdev);
- kfree(t);
- return err;
- }
- return 0;
-}
-
-static int saa5246a_remove(struct i2c_client *client)
-{
- struct v4l2_subdev *sd = i2c_get_clientdata(client);
- struct saa5246a_device *t = to_dev(sd);
-
- video_unregister_device(t->vdev);
- v4l2_device_unregister_subdev(sd);
- kfree(t);
- return 0;
-}
-
-static const struct i2c_device_id saa5246a_id[] = {
- { "saa5246a", 0 },
- { }
-};
-MODULE_DEVICE_TABLE(i2c, saa5246a_id);
-
-static struct v4l2_i2c_driver_data v4l2_i2c_data = {
- .name = "saa5246a",
- .probe = saa5246a_probe,
- .remove = saa5246a_remove,
- .id_table = saa5246a_id,
-};
diff --git a/drivers/media/video/saa5249.c b/drivers/media/video/saa5249.c
deleted file mode 100644
index 31ff27df4cbf..000000000000
--- a/drivers/media/video/saa5249.c
+++ /dev/null
@@ -1,650 +0,0 @@
-/*
- * Modified in order to keep it compatible both with new and old videotext IOCTLs by
- * Michael Geng <linux@MichaelGeng.de>
- *
- * Cleaned up to use existing videodev interface and allow the idea
- * of multiple teletext decoders on the video4linux iface. Changed i2c
- * to cover addressing clashes on device busses. It's also rebuilt so
- * you can add arbitary multiple teletext devices to Linux video4linux
- * now (well 32 anyway).
- *
- * Alan Cox <alan@lxorguk.ukuu.org.uk>
- *
- * The original driver was heavily modified to match the i2c interface
- * It was truncated to use the WinTV boards, too.
- *
- * Copyright (c) 1998 Richard Guenther <richard.guenther@student.uni-tuebingen.de>
- *
- * Derived From
- *
- * vtx.c:
- * This is a loadable character-device-driver for videotext-interfaces
- * (aka teletext). Please check the Makefile/README for a list of supported
- * interfaces.
- *
- * Copyright (c) 1994-97 Martin Buck <martin-2.buck@student.uni-ulm.de>
- *
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307,
- * USA.
- */
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/mm.h>
-#include <linux/init.h>
-#include <linux/i2c.h>
-#include <linux/mutex.h>
-#include <linux/delay.h>
-#include <linux/videotext.h>
-#include <linux/videodev2.h>
-#include <linux/slab.h>
-#include <media/v4l2-device.h>
-#include <media/v4l2-chip-ident.h>
-#include <media/v4l2-ioctl.h>
-#include <media/v4l2-i2c-drv.h>
-
-MODULE_AUTHOR("Michael Geng <linux@MichaelGeng.de>");
-MODULE_DESCRIPTION("Philips SAA5249 Teletext decoder driver");
-MODULE_LICENSE("GPL");
-
-
-#define VTX_VER_MAJ 1
-#define VTX_VER_MIN 8
-
-
-#define NUM_DAUS 4
-#define NUM_BUFS 8
-
-static const int disp_modes[8][3] =
-{
- { 0x46, 0x03, 0x03 }, /* DISPOFF */
- { 0x46, 0xcc, 0xcc }, /* DISPNORM */
- { 0x44, 0x0f, 0x0f }, /* DISPTRANS */
- { 0x46, 0xcc, 0x46 }, /* DISPINS */
- { 0x44, 0x03, 0x03 }, /* DISPOFF, interlaced */
- { 0x44, 0xcc, 0xcc }, /* DISPNORM, interlaced */
- { 0x44, 0x0f, 0x0f }, /* DISPTRANS, interlaced */
- { 0x44, 0xcc, 0x46 } /* DISPINS, interlaced */
-};
-
-
-
-#define PAGE_WAIT msecs_to_jiffies(300) /* Time between requesting page and */
- /* checking status bits */
-#define PGBUF_EXPIRE msecs_to_jiffies(15000) /* Time to wait before retransmitting */
- /* page regardless of infobits */
-typedef struct {
- u8 pgbuf[VTX_VIRTUALSIZE]; /* Page-buffer */
- u8 laststat[10]; /* Last value of infobits for DAU */
- u8 sregs[7]; /* Page-request registers */
- unsigned long expire; /* Time when page will be expired */
- unsigned clrfound : 1; /* VTXIOCCLRFOUND has been called */
- unsigned stopped : 1; /* VTXIOCSTOPDAU has been called */
-} vdau_t;
-
-struct saa5249_device
-{
- struct v4l2_subdev sd;
- struct video_device *vdev;
- vdau_t vdau[NUM_DAUS]; /* Data for virtual DAUs (the 5249 only has one */
- /* real DAU, so we have to simulate some more) */
- int vtx_use_count;
- int is_searching[NUM_DAUS];
- int disp_mode;
- int virtual_mode;
- unsigned long in_use;
- struct mutex lock;
-};
-
-static inline struct saa5249_device *to_dev(struct v4l2_subdev *sd)
-{
- return container_of(sd, struct saa5249_device, sd);
-}
-
-
-#define CCTWR 34 /* IC write/read-address of vtx-chip */
-#define CCTRD 35
-#define NOACK_REPEAT 10 /* Retry access this many times on failure */
-#define CLEAR_DELAY msecs_to_jiffies(50) /* Time required to clear a page */
-#define READY_TIMEOUT msecs_to_jiffies(30) /* Time to wait for ready signal of I2C-bus interface */
-#define INIT_DELAY 500 /* Time in usec to wait at initialization of CEA interface */
-#define START_DELAY 10 /* Time in usec to wait before starting write-cycle (CEA) */
-
-#define VTX_DEV_MINOR 0
-
-static struct video_device saa_template; /* Declared near bottom */
-
-/*
- * Wait the given number of jiffies (10ms). This calls the scheduler, so the actual
- * delay may be longer.
- */
-
-static void jdelay(unsigned long delay)
-{
- sigset_t oldblocked = current->blocked;
-
- spin_lock_irq(&current->sighand->siglock);
- sigfillset(&current->blocked);
- recalc_sigpending();
- spin_unlock_irq(&current->sighand->siglock);
- msleep_interruptible(jiffies_to_msecs(delay));
-
- spin_lock_irq(&current->sighand->siglock);
- current->blocked = oldblocked;
- recalc_sigpending();
- spin_unlock_irq(&current->sighand->siglock);
-}
-
-
-/*
- * I2C interfaces
- */
-
-static int i2c_sendbuf(struct saa5249_device *t, int reg, int count, u8 *data)
-{
- struct i2c_client *client = v4l2_get_subdevdata(&t->sd);
- char buf[64];
-
- buf[0] = reg;
- memcpy(buf+1, data, count);
-
- if (i2c_master_send(client, buf, count + 1) == count + 1)
- return 0;
- return -1;
-}
-
-static int i2c_senddata(struct saa5249_device *t, ...)
-{
- unsigned char buf[64];
- int v;
- int ct = 0;
- va_list argp;
- va_start(argp,t);
-
- while ((v = va_arg(argp, int)) != -1)
- buf[ct++] = v;
-
- va_end(argp);
- return i2c_sendbuf(t, buf[0], ct-1, buf+1);
-}
-
-/* Get count number of bytes from I²C-device at address adr, store them in buf. Start & stop
- * handshaking is done by this routine, ack will be sent after the last byte to inhibit further
- * sending of data. If uaccess is 'true', data is written to user-space with put_user.
- * Returns -1 if I²C-device didn't send acknowledge, 0 otherwise
- */
-
-static int i2c_getdata(struct saa5249_device *t, int count, u8 *buf)
-{
- struct i2c_client *client = v4l2_get_subdevdata(&t->sd);
-
- if (i2c_master_recv(client, buf, count) != count)
- return -1;
- return 0;
-}
-
-
-/*
- * Standard character-device-driver functions
- */
-
-static long do_saa5249_ioctl(struct file *file, unsigned int cmd, void *arg)
-{
- static int virtual_mode = false;
- struct saa5249_device *t = video_drvdata(file);
-
- switch (cmd) {
- case VTXIOCGETINFO:
- {
- vtx_info_t *info = arg;
- info->version_major = VTX_VER_MAJ;
- info->version_minor = VTX_VER_MIN;
- info->numpages = NUM_DAUS;
- /*info->cct_type = CCT_TYPE;*/
- return 0;
- }
-
- case VTXIOCCLRPAGE:
- {
- vtx_pagereq_t *req = arg;
-
- if (req->pgbuf < 0 || req->pgbuf >= NUM_DAUS)
- return -EINVAL;
- memset(t->vdau[req->pgbuf].pgbuf, ' ', sizeof(t->vdau[0].pgbuf));
- t->vdau[req->pgbuf].clrfound = true;
- return 0;
- }
-
- case VTXIOCCLRFOUND:
- {
- vtx_pagereq_t *req = arg;
-
- if (req->pgbuf < 0 || req->pgbuf >= NUM_DAUS)
- return -EINVAL;
- t->vdau[req->pgbuf].clrfound = true;
- return 0;
- }
-
- case VTXIOCPAGEREQ:
- {
- vtx_pagereq_t *req = arg;
- if (!(req->pagemask & PGMASK_PAGE))
- req->page = 0;
- if (!(req->pagemask & PGMASK_HOUR))
- req->hour = 0;
- if (!(req->pagemask & PGMASK_MINUTE))
- req->minute = 0;
- if (req->page < 0 || req->page > 0x8ff) /* 7FF ?? */
- return -EINVAL;
- req->page &= 0x7ff;
- if (req->hour < 0 || req->hour > 0x3f || req->minute < 0 || req->minute > 0x7f ||
- req->pagemask < 0 || req->pagemask >= PGMASK_MAX || req->pgbuf < 0 || req->pgbuf >= NUM_DAUS)
- return -EINVAL;
- t->vdau[req->pgbuf].sregs[0] = (req->pagemask & PG_HUND ? 0x10 : 0) | (req->page / 0x100);
- t->vdau[req->pgbuf].sregs[1] = (req->pagemask & PG_TEN ? 0x10 : 0) | ((req->page / 0x10) & 0xf);
- t->vdau[req->pgbuf].sregs[2] = (req->pagemask & PG_UNIT ? 0x10 : 0) | (req->page & 0xf);
- t->vdau[req->pgbuf].sregs[3] = (req->pagemask & HR_TEN ? 0x10 : 0) | (req->hour / 0x10);
- t->vdau[req->pgbuf].sregs[4] = (req->pagemask & HR_UNIT ? 0x10 : 0) | (req->hour & 0xf);
- t->vdau[req->pgbuf].sregs[5] = (req->pagemask & MIN_TEN ? 0x10 : 0) | (req->minute / 0x10);
- t->vdau[req->pgbuf].sregs[6] = (req->pagemask & MIN_UNIT ? 0x10 : 0) | (req->minute & 0xf);
- t->vdau[req->pgbuf].stopped = false;
- t->vdau[req->pgbuf].clrfound = true;
- t->is_searching[req->pgbuf] = true;
- return 0;
- }
-
- case VTXIOCGETSTAT:
- {
- vtx_pagereq_t *req = arg;
- u8 infobits[10];
- vtx_pageinfo_t info;
- int a;
-
- if (req->pgbuf < 0 || req->pgbuf >= NUM_DAUS)
- return -EINVAL;
- if (!t->vdau[req->pgbuf].stopped) {
- if (i2c_senddata(t, 2, 0, -1) ||
- i2c_sendbuf(t, 3, sizeof(t->vdau[0].sregs), t->vdau[req->pgbuf].sregs) ||
- i2c_senddata(t, 8, 0, 25, 0, ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', -1) ||
- i2c_senddata(t, 2, 0, t->vdau[req->pgbuf].sregs[0] | 8, -1) ||
- i2c_senddata(t, 8, 0, 25, 0, -1))
- return -EIO;
- jdelay(PAGE_WAIT);
- if (i2c_getdata(t, 10, infobits))
- return -EIO;
-
- if (!(infobits[8] & 0x10) && !(infobits[7] & 0xf0) && /* check FOUND-bit */
- (memcmp(infobits, t->vdau[req->pgbuf].laststat, sizeof(infobits)) ||
- time_after_eq(jiffies, t->vdau[req->pgbuf].expire)))
- { /* check if new page arrived */
- if (i2c_senddata(t, 8, 0, 0, 0, -1) ||
- i2c_getdata(t, VTX_PAGESIZE, t->vdau[req->pgbuf].pgbuf))
- return -EIO;
- t->vdau[req->pgbuf].expire = jiffies + PGBUF_EXPIRE;
- memset(t->vdau[req->pgbuf].pgbuf + VTX_PAGESIZE, ' ', VTX_VIRTUALSIZE - VTX_PAGESIZE);
- if (t->virtual_mode) {
- /* Packet X/24 */
- if (i2c_senddata(t, 8, 0, 0x20, 0, -1) ||
- i2c_getdata(t, 40, t->vdau[req->pgbuf].pgbuf + VTX_PAGESIZE + 20 * 40))
- return -EIO;
- /* Packet X/27/0 */
- if (i2c_senddata(t, 8, 0, 0x21, 0, -1) ||
- i2c_getdata(t, 40, t->vdau[req->pgbuf].pgbuf + VTX_PAGESIZE + 16 * 40))
- return -EIO;
- /* Packet 8/30/0...8/30/15
- * FIXME: AFAIK, the 5249 does hamming-decoding for some bytes in packet 8/30,
- * so we should undo this here.
- */
- if (i2c_senddata(t, 8, 0, 0x22, 0, -1) ||
- i2c_getdata(t, 40, t->vdau[req->pgbuf].pgbuf + VTX_PAGESIZE + 23 * 40))
- return -EIO;
- }
- t->vdau[req->pgbuf].clrfound = false;
- memcpy(t->vdau[req->pgbuf].laststat, infobits, sizeof(infobits));
- } else {
- memcpy(infobits, t->vdau[req->pgbuf].laststat, sizeof(infobits));
- }
- } else {
- memcpy(infobits, t->vdau[req->pgbuf].laststat, sizeof(infobits));
- }
-
- info.pagenum = ((infobits[8] << 8) & 0x700) | ((infobits[1] << 4) & 0xf0) | (infobits[0] & 0x0f);
- if (info.pagenum < 0x100)
- info.pagenum += 0x800;
- info.hour = ((infobits[5] << 4) & 0x30) | (infobits[4] & 0x0f);
- info.minute = ((infobits[3] << 4) & 0x70) | (infobits[2] & 0x0f);
- info.charset = ((infobits[7] >> 1) & 7);
- info.delete = !!(infobits[3] & 8);
- info.headline = !!(infobits[5] & 4);
- info.subtitle = !!(infobits[5] & 8);
- info.supp_header = !!(infobits[6] & 1);
- info.update = !!(infobits[6] & 2);
- info.inter_seq = !!(infobits[6] & 4);
- info.dis_disp = !!(infobits[6] & 8);
- info.serial = !!(infobits[7] & 1);
- info.notfound = !!(infobits[8] & 0x10);
- info.pblf = !!(infobits[9] & 0x20);
- info.hamming = 0;
- for (a = 0; a <= 7; a++) {
- if (infobits[a] & 0xf0) {
- info.hamming = 1;
- break;
- }
- }
- if (t->vdau[req->pgbuf].clrfound)
- info.notfound = 1;
- if (copy_to_user(req->buffer, &info, sizeof(vtx_pageinfo_t)))
- return -EFAULT;
- if (!info.hamming && !info.notfound)
- t->is_searching[req->pgbuf] = false;
- return 0;
- }
-
- case VTXIOCGETPAGE:
- {
- vtx_pagereq_t *req = arg;
- int start, end;
-
- if (req->pgbuf < 0 || req->pgbuf >= NUM_DAUS || req->start < 0 ||
- req->start > req->end || req->end >= (virtual_mode ? VTX_VIRTUALSIZE : VTX_PAGESIZE))
- return -EINVAL;
- if (copy_to_user(req->buffer, &t->vdau[req->pgbuf].pgbuf[req->start], req->end - req->start + 1))
- return -EFAULT;
-
- /*
- * Always read the time directly from SAA5249
- */
-
- if (req->start <= 39 && req->end >= 32) {
- int len;
- char buf[16];
- start = max(req->start, 32);
- end = min(req->end, 39);
- len = end - start + 1;
- if (i2c_senddata(t, 8, 0, 0, start, -1) ||
- i2c_getdata(t, len, buf))
- return -EIO;
- if (copy_to_user(req->buffer + start - req->start, buf, len))
- return -EFAULT;
- }
- /* Insert the current header if DAU is still searching for a page */
- if (req->start <= 31 && req->end >= 7 && t->is_searching[req->pgbuf]) {
- char buf[32];
- int len;
-
- start = max(req->start, 7);
- end = min(req->end, 31);
- len = end - start + 1;
- if (i2c_senddata(t, 8, 0, 0, start, -1) ||
- i2c_getdata(t, len, buf))
- return -EIO;
- if (copy_to_user(req->buffer + start - req->start, buf, len))
- return -EFAULT;
- }
- return 0;
- }
-
- case VTXIOCSTOPDAU:
- {
- vtx_pagereq_t *req = arg;
-
- if (req->pgbuf < 0 || req->pgbuf >= NUM_DAUS)
- return -EINVAL;
- t->vdau[req->pgbuf].stopped = true;
- t->is_searching[req->pgbuf] = false;
- return 0;
- }
-
- case VTXIOCPUTPAGE:
- case VTXIOCSETDISP:
- case VTXIOCPUTSTAT:
- return 0;
-
- case VTXIOCCLRCACHE:
- {
- if (i2c_senddata(t, 0, NUM_DAUS, 0, 8, -1) || i2c_senddata(t, 11,
- ' ', ' ', ' ', ' ', ' ', ' ',
- ' ', ' ', ' ', ' ', ' ', ' ',
- ' ', ' ', ' ', ' ', ' ', ' ',
- ' ', ' ', ' ', ' ', ' ', ' ',
- -1))
- return -EIO;
- if (i2c_senddata(t, 3, 0x20, -1))
- return -EIO;
- jdelay(10 * CLEAR_DELAY); /* I have no idea how long we have to wait here */
- return 0;
- }
-
- case VTXIOCSETVIRT:
- {
- /* The SAA5249 has virtual-row reception turned on always */
- t->virtual_mode = (int)(long)arg;
- return 0;
- }
- }
- return -EINVAL;
-}
-
-/*
- * Translates old vtx IOCTLs to new ones
- *
- * This keeps new kernel versions compatible with old userspace programs.
- */
-static inline unsigned int vtx_fix_command(unsigned int cmd)
-{
- switch (cmd) {
- case VTXIOCGETINFO_OLD:
- cmd = VTXIOCGETINFO;
- break;
- case VTXIOCCLRPAGE_OLD:
- cmd = VTXIOCCLRPAGE;
- break;
- case VTXIOCCLRFOUND_OLD:
- cmd = VTXIOCCLRFOUND;
- break;
- case VTXIOCPAGEREQ_OLD:
- cmd = VTXIOCPAGEREQ;
- break;
- case VTXIOCGETSTAT_OLD:
- cmd = VTXIOCGETSTAT;
- break;
- case VTXIOCGETPAGE_OLD:
- cmd = VTXIOCGETPAGE;
- break;
- case VTXIOCSTOPDAU_OLD:
- cmd = VTXIOCSTOPDAU;
- break;
- case VTXIOCPUTPAGE_OLD:
- cmd = VTXIOCPUTPAGE;
- break;
- case VTXIOCSETDISP_OLD:
- cmd = VTXIOCSETDISP;
- break;
- case VTXIOCPUTSTAT_OLD:
- cmd = VTXIOCPUTSTAT;
- break;
- case VTXIOCCLRCACHE_OLD:
- cmd = VTXIOCCLRCACHE;
- break;
- case VTXIOCSETVIRT_OLD:
- cmd = VTXIOCSETVIRT;
- break;
- }
- return cmd;
-}
-
-/*
- * Handle the locking
- */
-
-static long saa5249_ioctl(struct file *file,
- unsigned int cmd, unsigned long arg)
-{
- struct saa5249_device *t = video_drvdata(file);
- long err;
-
- cmd = vtx_fix_command(cmd);
- mutex_lock(&t->lock);
- err = video_usercopy(file, cmd, arg, do_saa5249_ioctl);
- mutex_unlock(&t->lock);
- return err;
-}
-
-static int saa5249_open(struct file *file)
-{
- struct saa5249_device *t = video_drvdata(file);
- int pgbuf;
-
- if (test_and_set_bit(0, &t->in_use))
- return -EBUSY;
-
- if (i2c_senddata(t, 0, 0, -1) || /* Select R11 */
- /* Turn off parity checks (we do this ourselves) */
- i2c_senddata(t, 1, disp_modes[t->disp_mode][0], 0, -1) ||
- /* Display TV-picture, no virtual rows */
- i2c_senddata(t, 4, NUM_DAUS, disp_modes[t->disp_mode][1], disp_modes[t->disp_mode][2], 7, -1))
- /* Set display to page 4 */
- {
- clear_bit(0, &t->in_use);
- return -EIO;
- }
-
- for (pgbuf = 0; pgbuf < NUM_DAUS; pgbuf++) {
- memset(t->vdau[pgbuf].pgbuf, ' ', sizeof(t->vdau[0].pgbuf));
- memset(t->vdau[pgbuf].sregs, 0, sizeof(t->vdau[0].sregs));
- memset(t->vdau[pgbuf].laststat, 0, sizeof(t->vdau[0].laststat));
- t->vdau[pgbuf].expire = 0;
- t->vdau[pgbuf].clrfound = true;
- t->vdau[pgbuf].stopped = true;
- t->is_searching[pgbuf] = false;
- }
- t->virtual_mode = false;
- return 0;
-}
-
-
-
-static int saa5249_release(struct file *file)
-{
- struct saa5249_device *t = video_drvdata(file);
-
- i2c_senddata(t, 1, 0x20, -1); /* Turn off CCT */
- i2c_senddata(t, 5, 3, 3, -1); /* Turn off TV-display */
- clear_bit(0, &t->in_use);
- return 0;
-}
-
-static const struct v4l2_file_operations saa_fops = {
- .owner = THIS_MODULE,
- .open = saa5249_open,
- .release = saa5249_release,
- .ioctl = saa5249_ioctl,
-};
-
-static struct video_device saa_template =
-{
- .name = "saa5249",
- .fops = &saa_fops,
- .release = video_device_release,
-};
-
-static int saa5249_g_chip_ident(struct v4l2_subdev *sd, struct v4l2_dbg_chip_ident *chip)
-{
- struct i2c_client *client = v4l2_get_subdevdata(sd);
-
- return v4l2_chip_ident_i2c_client(client, chip, V4L2_IDENT_SAA5249, 0);
-}
-
-static const struct v4l2_subdev_core_ops saa5249_core_ops = {
- .g_chip_ident = saa5249_g_chip_ident,
-};
-
-static const struct v4l2_subdev_ops saa5249_ops = {
- .core = &saa5249_core_ops,
-};
-
-static int saa5249_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
-{
- int pgbuf;
- int err;
- struct saa5249_device *t;
- struct v4l2_subdev *sd;
-
- v4l_info(client, "chip found @ 0x%x (%s)\n",
- client->addr << 1, client->adapter->name);
- v4l_info(client, "VideoText version %d.%d\n",
- VTX_VER_MAJ, VTX_VER_MIN);
- t = kzalloc(sizeof(*t), GFP_KERNEL);
- if (t == NULL)
- return -ENOMEM;
- sd = &t->sd;
- v4l2_i2c_subdev_init(sd, client, &saa5249_ops);
- mutex_init(&t->lock);
-
- /* Now create a video4linux device */
- t->vdev = video_device_alloc();
- if (t->vdev == NULL) {
- kfree(t);
- kfree(client);
- return -ENOMEM;
- }
- memcpy(t->vdev, &saa_template, sizeof(*t->vdev));
-
- for (pgbuf = 0; pgbuf < NUM_DAUS; pgbuf++) {
- memset(t->vdau[pgbuf].pgbuf, ' ', sizeof(t->vdau[0].pgbuf));
- memset(t->vdau[pgbuf].sregs, 0, sizeof(t->vdau[0].sregs));
- memset(t->vdau[pgbuf].laststat, 0, sizeof(t->vdau[0].laststat));
- t->vdau[pgbuf].expire = 0;
- t->vdau[pgbuf].clrfound = true;
- t->vdau[pgbuf].stopped = true;
- t->is_searching[pgbuf] = false;
- }
- video_set_drvdata(t->vdev, t);
-
- /* Register it */
- err = video_register_device(t->vdev, VFL_TYPE_VTX, -1);
- if (err < 0) {
- video_device_release(t->vdev);
- kfree(t);
- return err;
- }
- return 0;
-}
-
-static int saa5249_remove(struct i2c_client *client)
-{
- struct v4l2_subdev *sd = i2c_get_clientdata(client);
- struct saa5249_device *t = to_dev(sd);
-
- video_unregister_device(t->vdev);
- v4l2_device_unregister_subdev(sd);
- kfree(t);
- return 0;
-}
-
-static const struct i2c_device_id saa5249_id[] = {
- { "saa5249", 0 },
- { }
-};
-MODULE_DEVICE_TABLE(i2c, saa5249_id);
-
-static struct v4l2_i2c_driver_data v4l2_i2c_data = {
- .name = "saa5249",
- .probe = saa5249_probe,
- .remove = saa5249_remove,
- .id_table = saa5249_id,
-};
diff --git a/drivers/media/video/saa6588.c b/drivers/media/video/saa6588.c
index c3e96f070973..984c0feb2a4e 100644
--- a/drivers/media/video/saa6588.c
+++ b/drivers/media/video/saa6588.c
@@ -34,7 +34,6 @@
#include <media/rds.h>
#include <media/v4l2-device.h>
#include <media/v4l2-chip-ident.h>
-#include <media/v4l2-i2c-drv.h>
/* insmod options */
@@ -430,7 +429,7 @@ static int saa6588_g_tuner(struct v4l2_subdev *sd, struct v4l2_tuner *vt)
{
struct saa6588 *s = to_saa6588(sd);
- vt->capability |= V4L2_TUNER_CAP_RDS;
+ vt->capability |= V4L2_TUNER_CAP_RDS | V4L2_TUNER_CAP_RDS_BLOCK_IO;
if (s->sync)
vt->rxsubchans |= V4L2_TUNER_SUB_RDS;
return 0;
@@ -530,9 +529,25 @@ static const struct i2c_device_id saa6588_id[] = {
};
MODULE_DEVICE_TABLE(i2c, saa6588_id);
-static struct v4l2_i2c_driver_data v4l2_i2c_data = {
- .name = "saa6588",
- .probe = saa6588_probe,
- .remove = saa6588_remove,
- .id_table = saa6588_id,
+static struct i2c_driver saa6588_driver = {
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = "saa6588",
+ },
+ .probe = saa6588_probe,
+ .remove = saa6588_remove,
+ .id_table = saa6588_id,
};
+
+static __init int init_saa6588(void)
+{
+ return i2c_add_driver(&saa6588_driver);
+}
+
+static __exit void exit_saa6588(void)
+{
+ i2c_del_driver(&saa6588_driver);
+}
+
+module_init(init_saa6588);
+module_exit(exit_saa6588);
diff --git a/drivers/media/video/saa7110.c b/drivers/media/video/saa7110.c
index 3bca744e43af..7913f93979b8 100644
--- a/drivers/media/video/saa7110.c
+++ b/drivers/media/video/saa7110.c
@@ -36,7 +36,6 @@
#include <linux/videodev2.h>
#include <media/v4l2-device.h>
#include <media/v4l2-chip-ident.h>
-#include <media/v4l2-i2c-drv.h>
MODULE_DESCRIPTION("Philips SAA7110 video decoder driver");
MODULE_AUTHOR("Pauline Middelink");
@@ -505,9 +504,25 @@ static const struct i2c_device_id saa7110_id[] = {
};
MODULE_DEVICE_TABLE(i2c, saa7110_id);
-static struct v4l2_i2c_driver_data v4l2_i2c_data = {
- .name = "saa7110",
- .probe = saa7110_probe,
- .remove = saa7110_remove,
- .id_table = saa7110_id,
+static struct i2c_driver saa7110_driver = {
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = "saa7110",
+ },
+ .probe = saa7110_probe,
+ .remove = saa7110_remove,
+ .id_table = saa7110_id,
};
+
+static __init int init_saa7110(void)
+{
+ return i2c_add_driver(&saa7110_driver);
+}
+
+static __exit void exit_saa7110(void)
+{
+ i2c_del_driver(&saa7110_driver);
+}
+
+module_init(init_saa7110);
+module_exit(exit_saa7110);
diff --git a/drivers/media/video/saa7115.c b/drivers/media/video/saa7115.c
index ee963f4d01bc..301c62b88cad 100644
--- a/drivers/media/video/saa7115.c
+++ b/drivers/media/video/saa7115.c
@@ -47,7 +47,6 @@
#include <media/v4l2-device.h>
#include <media/v4l2-ctrls.h>
#include <media/v4l2-chip-ident.h>
-#include <media/v4l2-i2c-drv.h>
#include <media/saa7115.h>
#include <asm/div64.h>
@@ -1676,7 +1675,7 @@ static int saa711x_remove(struct i2c_client *client)
return 0;
}
-static const struct i2c_device_id saa7115_id[] = {
+static const struct i2c_device_id saa711x_id[] = {
{ "saa7115_auto", 1 }, /* autodetect */
{ "saa7111", 0 },
{ "saa7113", 0 },
@@ -1685,11 +1684,27 @@ static const struct i2c_device_id saa7115_id[] = {
{ "saa7118", 0 },
{ }
};
-MODULE_DEVICE_TABLE(i2c, saa7115_id);
-
-static struct v4l2_i2c_driver_data v4l2_i2c_data = {
- .name = "saa7115",
- .probe = saa711x_probe,
- .remove = saa711x_remove,
- .id_table = saa7115_id,
+MODULE_DEVICE_TABLE(i2c, saa711x_id);
+
+static struct i2c_driver saa711x_driver = {
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = "saa7115",
+ },
+ .probe = saa711x_probe,
+ .remove = saa711x_remove,
+ .id_table = saa711x_id,
};
+
+static __init int init_saa711x(void)
+{
+ return i2c_add_driver(&saa711x_driver);
+}
+
+static __exit void exit_saa711x(void)
+{
+ i2c_del_driver(&saa711x_driver);
+}
+
+module_init(init_saa711x);
+module_exit(exit_saa711x);
diff --git a/drivers/media/video/saa7127.c b/drivers/media/video/saa7127.c
index 79fffcf39ba8..ad964616c9d2 100644
--- a/drivers/media/video/saa7127.c
+++ b/drivers/media/video/saa7127.c
@@ -55,7 +55,6 @@
#include <linux/videodev2.h>
#include <media/v4l2-device.h>
#include <media/v4l2-chip-ident.h>
-#include <media/v4l2-i2c-drv.h>
#include <media/saa7127.h>
static int debug;
@@ -843,9 +842,25 @@ static struct i2c_device_id saa7127_id[] = {
};
MODULE_DEVICE_TABLE(i2c, saa7127_id);
-static struct v4l2_i2c_driver_data v4l2_i2c_data = {
- .name = "saa7127",
- .probe = saa7127_probe,
- .remove = saa7127_remove,
- .id_table = saa7127_id,
+static struct i2c_driver saa7127_driver = {
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = "saa7127",
+ },
+ .probe = saa7127_probe,
+ .remove = saa7127_remove,
+ .id_table = saa7127_id,
};
+
+static __init int init_saa7127(void)
+{
+ return i2c_add_driver(&saa7127_driver);
+}
+
+static __exit void exit_saa7127(void)
+{
+ i2c_del_driver(&saa7127_driver);
+}
+
+module_init(init_saa7127);
+module_exit(exit_saa7127);
diff --git a/drivers/media/video/saa7134/Kconfig b/drivers/media/video/saa7134/Kconfig
index fda005e01670..3fe71be41a1f 100644
--- a/drivers/media/video/saa7134/Kconfig
+++ b/drivers/media/video/saa7134/Kconfig
@@ -1,8 +1,7 @@
config VIDEO_SAA7134
tristate "Philips SAA7134 support"
- depends on VIDEO_DEV && PCI && I2C && INPUT
+ depends on VIDEO_DEV && PCI && I2C
select VIDEOBUF_DMA_SG
- depends on VIDEO_IR
select VIDEO_TUNER
select VIDEO_TVEEPROM
select CRC32
@@ -25,6 +24,14 @@ config VIDEO_SAA7134_ALSA
To compile this driver as a module, choose M here: the
module will be called saa7134-alsa.
+config VIDEO_SAA7134_RC
+ bool "Philips SAA7134 Remote Controller support"
+ depends on VIDEO_IR
+ depends on VIDEO_SAA7134
+ default y
+ ---help---
+ Enables Remote Controller support on saa7134 driver.
+
config VIDEO_SAA7134_DVB
tristate "DVB/ATSC Support for saa7134 based TV cards"
depends on VIDEO_SAA7134 && DVB_CORE
diff --git a/drivers/media/video/saa7134/Makefile b/drivers/media/video/saa7134/Makefile
index 604158a8c235..8a5ff4d3cf15 100644
--- a/drivers/media/video/saa7134/Makefile
+++ b/drivers/media/video/saa7134/Makefile
@@ -1,7 +1,8 @@
-saa7134-objs := saa7134-cards.o saa7134-core.o saa7134-i2c.o \
- saa7134-ts.o saa7134-tvaudio.o saa7134-vbi.o \
- saa7134-video.o saa7134-input.o
+saa7134-y := saa7134-cards.o saa7134-core.o saa7134-i2c.o
+saa7134-y += saa7134-ts.o saa7134-tvaudio.o saa7134-vbi.o
+saa7134-y += saa7134-video.o
+saa7134-$(CONFIG_VIDEO_SAA7134_RC) += saa7134-input.o
obj-$(CONFIG_VIDEO_SAA7134) += saa6752hs.o saa7134.o saa7134-empress.o
diff --git a/drivers/media/video/saa7134/saa6752hs.c b/drivers/media/video/saa7134/saa6752hs.c
index 40fd31ca7716..f9f29cc93a8a 100644
--- a/drivers/media/video/saa7134/saa6752hs.c
+++ b/drivers/media/video/saa7134/saa6752hs.c
@@ -36,7 +36,6 @@
#include <media/v4l2-device.h>
#include <media/v4l2-common.h>
#include <media/v4l2-chip-ident.h>
-#include <media/v4l2-i2c-drv.h>
#include <linux/init.h>
#include <linux/crc32.h>
@@ -992,13 +991,29 @@ static const struct i2c_device_id saa6752hs_id[] = {
};
MODULE_DEVICE_TABLE(i2c, saa6752hs_id);
-static struct v4l2_i2c_driver_data v4l2_i2c_data = {
- .name = "saa6752hs",
- .probe = saa6752hs_probe,
- .remove = saa6752hs_remove,
- .id_table = saa6752hs_id,
+static struct i2c_driver saa6752hs_driver = {
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = "saa6752hs",
+ },
+ .probe = saa6752hs_probe,
+ .remove = saa6752hs_remove,
+ .id_table = saa6752hs_id,
};
+static __init int init_saa6752hs(void)
+{
+ return i2c_add_driver(&saa6752hs_driver);
+}
+
+static __exit void exit_saa6752hs(void)
+{
+ i2c_del_driver(&saa6752hs_driver);
+}
+
+module_init(init_saa6752hs);
+module_exit(exit_saa6752hs);
+
/*
* Overrides for Emacs so that we follow Linus's tabbing style.
* ---------------------------------------------------------------------------
diff --git a/drivers/media/video/saa7134/saa7134-cards.c b/drivers/media/video/saa7134/saa7134-cards.c
index bb8d83d8ddaf..1d4d0a49ea52 100644
--- a/drivers/media/video/saa7134/saa7134-cards.c
+++ b/drivers/media/video/saa7134/saa7134-cards.c
@@ -6661,6 +6661,18 @@ struct pci_device_id saa7134_pci_tbl[] = {
.subdevice = 0x2804,
.driver_data = SAA7134_BOARD_TECHNOTREND_BUDGET_T3000,
}, {
+ .vendor = PCI_VENDOR_ID_PHILIPS,
+ .device = PCI_DEVICE_ID_PHILIPS_SAA7133,
+ .subvendor = 0x5ace, /* Beholder Intl. Ltd. */
+ .subdevice = 0x7190,
+ .driver_data = SAA7134_BOARD_BEHOLD_H7,
+ }, {
+ .vendor = PCI_VENDOR_ID_PHILIPS,
+ .device = PCI_DEVICE_ID_PHILIPS_SAA7133,
+ .subvendor = 0x5ace, /* Beholder Intl. Ltd. */
+ .subdevice = 0x7090,
+ .driver_data = SAA7134_BOARD_BEHOLD_A7,
+ }, {
/* --- boards without eeprom + subsystem ID --- */
.vendor = PCI_VENDOR_ID_PHILIPS,
.device = PCI_DEVICE_ID_PHILIPS_SAA7134,
@@ -6698,18 +6710,6 @@ struct pci_device_id saa7134_pci_tbl[] = {
.subvendor = PCI_ANY_ID,
.subdevice = PCI_ANY_ID,
.driver_data = SAA7134_BOARD_UNKNOWN,
- }, {
- .vendor = PCI_VENDOR_ID_PHILIPS,
- .device = PCI_DEVICE_ID_PHILIPS_SAA7133,
- .subvendor = 0x5ace, /* Beholder Intl. Ltd. */
- .subdevice = 0x7190,
- .driver_data = SAA7134_BOARD_BEHOLD_H7,
- }, {
- .vendor = PCI_VENDOR_ID_PHILIPS,
- .device = PCI_DEVICE_ID_PHILIPS_SAA7133,
- .subvendor = 0x5ace, /* Beholder Intl. Ltd. */
- .subdevice = 0x7090,
- .driver_data = SAA7134_BOARD_BEHOLD_A7,
},{
/* --- end of list --- */
}
@@ -7551,22 +7551,22 @@ int saa7134_board_init2(struct saa7134_dev *dev)
so we do not need to probe for a radio tuner device. */
if (dev->radio_type != UNSET)
v4l2_i2c_new_subdev(&dev->v4l2_dev,
- &dev->i2c_adap, "tuner", "tuner",
+ &dev->i2c_adap, "tuner",
dev->radio_addr, NULL);
if (has_demod)
v4l2_i2c_new_subdev(&dev->v4l2_dev,
- &dev->i2c_adap, "tuner", "tuner",
+ &dev->i2c_adap, "tuner",
0, v4l2_i2c_tuner_addrs(ADDRS_DEMOD));
if (dev->tuner_addr == ADDR_UNSET) {
enum v4l2_i2c_tuner_type type =
has_demod ? ADDRS_TV_WITH_DEMOD : ADDRS_TV;
v4l2_i2c_new_subdev(&dev->v4l2_dev,
- &dev->i2c_adap, "tuner", "tuner",
+ &dev->i2c_adap, "tuner",
0, v4l2_i2c_tuner_addrs(type));
} else {
v4l2_i2c_new_subdev(&dev->v4l2_dev,
- &dev->i2c_adap, "tuner", "tuner",
+ &dev->i2c_adap, "tuner",
dev->tuner_addr, NULL);
}
}
diff --git a/drivers/media/video/saa7134/saa7134-core.c b/drivers/media/video/saa7134/saa7134-core.c
index 40bc635e8a3f..756a27812260 100644
--- a/drivers/media/video/saa7134/saa7134-core.c
+++ b/drivers/media/video/saa7134/saa7134-core.c
@@ -255,7 +255,7 @@ void saa7134_dma_free(struct videobuf_queue *q,struct saa7134_buf *buf)
struct videobuf_dmabuf *dma=videobuf_to_dma(&buf->vb);
BUG_ON(in_interrupt());
- videobuf_waiton(&buf->vb,0,0);
+ videobuf_waiton(q, &buf->vb, 0, 0);
videobuf_dma_unmap(q->dev, dma);
videobuf_dma_free(dma);
buf->vb.state = VIDEOBUF_NEEDS_INIT;
@@ -991,7 +991,7 @@ static int __devinit saa7134_initdev(struct pci_dev *pci_dev,
if (card_is_empress(dev)) {
struct v4l2_subdev *sd =
v4l2_i2c_new_subdev(&dev->v4l2_dev, &dev->i2c_adap,
- "saa6752hs", "saa6752hs",
+ "saa6752hs",
saa7134_boards[dev->board].empress_addr, NULL);
if (sd)
@@ -1002,7 +1002,7 @@ static int __devinit saa7134_initdev(struct pci_dev *pci_dev,
struct v4l2_subdev *sd;
sd = v4l2_i2c_new_subdev(&dev->v4l2_dev,
- &dev->i2c_adap, "saa6588", "saa6588",
+ &dev->i2c_adap, "saa6588",
0, I2C_ADDRS(saa7134_boards[dev->board].rds_addr));
if (sd) {
printk(KERN_INFO "%s: found RDS decoder\n", dev->name);
diff --git a/drivers/media/video/saa7134/saa7134-dvb.c b/drivers/media/video/saa7134/saa7134-dvb.c
index f26fe7661a1d..beb95e21d109 100644
--- a/drivers/media/video/saa7134/saa7134-dvb.c
+++ b/drivers/media/video/saa7134/saa7134-dvb.c
@@ -1111,7 +1111,7 @@ static int dvb_init(struct saa7134_dev *dev)
V4L2_BUF_TYPE_VIDEO_CAPTURE,
V4L2_FIELD_ALTERNATE,
sizeof(struct saa7134_buf),
- dev);
+ dev, NULL);
switch (dev->board) {
case SAA7134_BOARD_PINNACLE_300I_DVBT_PAL:
diff --git a/drivers/media/video/saa7134/saa7134-empress.c b/drivers/media/video/saa7134/saa7134-empress.c
index e763f9fd0133..b890aafe7d64 100644
--- a/drivers/media/video/saa7134/saa7134-empress.c
+++ b/drivers/media/video/saa7134/saa7134-empress.c
@@ -21,7 +21,6 @@
#include <linux/list.h>
#include <linux/module.h>
#include <linux/kernel.h>
-#include <linux/smp_lock.h>
#include <linux/delay.h>
#include "saa7134-reg.h"
@@ -542,7 +541,7 @@ static int empress_init(struct saa7134_dev *dev)
V4L2_BUF_TYPE_VIDEO_CAPTURE,
V4L2_FIELD_ALTERNATE,
sizeof(struct saa7134_buf),
- dev);
+ dev, NULL);
empress_signal_update(&dev->empress_workqueue);
return 0;
diff --git a/drivers/media/video/saa7134/saa7134-i2c.c b/drivers/media/video/saa7134/saa7134-i2c.c
index da41b6b1e64a..2d3f6d265bbf 100644
--- a/drivers/media/video/saa7134/saa7134-i2c.c
+++ b/drivers/media/video/saa7134/saa7134-i2c.c
@@ -328,7 +328,6 @@ static struct i2c_algorithm saa7134_algo = {
static struct i2c_adapter saa7134_adap_template = {
.owner = THIS_MODULE,
.name = "saa7134",
- .id = I2C_HW_SAA7134,
.algo = &saa7134_algo,
};
diff --git a/drivers/media/video/saa7134/saa7134-input.c b/drivers/media/video/saa7134/saa7134-input.c
index 0b336ca6d55b..46d31dfca7a3 100644
--- a/drivers/media/video/saa7134/saa7134-input.c
+++ b/drivers/media/video/saa7134/saa7134-input.c
@@ -429,7 +429,7 @@ static void saa7134_input_timer(unsigned long data)
mod_timer(&ir->timer, jiffies + msecs_to_jiffies(ir->polling));
}
-void ir_raw_decode_timer_end(unsigned long data)
+static void ir_raw_decode_timer_end(unsigned long data)
{
struct saa7134_dev *dev = (struct saa7134_dev *)data;
struct card_ir *ir = dev->remote;
@@ -550,7 +550,7 @@ static void saa7134_ir_close(void *priv)
}
-int saa7134_ir_change_protocol(void *priv, u64 ir_type)
+static int saa7134_ir_change_protocol(void *priv, u64 ir_type)
{
struct saa7134_dev *dev = priv;
struct card_ir *ir = dev->remote;
@@ -772,8 +772,10 @@ int saa7134_input_init1(struct saa7134_dev *dev)
case SAA7134_BOARD_ASUSTeK_P7131_HYBRID_LNA:
case SAA7134_BOARD_ASUSTeK_P7131_ANALOG:
ir_codes = RC_MAP_ASUS_PC39;
- mask_keydown = 0x0040000;
- rc5_gpio = 1;
+ mask_keydown = 0x0040000; /* Enable GPIO18 line on both edges */
+ mask_keyup = 0x0040000;
+ mask_keycode = 0xffff;
+ raw_decode = 1;
break;
case SAA7134_BOARD_ENCORE_ENLTV:
case SAA7134_BOARD_ENCORE_ENLTV_FM:
@@ -959,6 +961,11 @@ void saa7134_probe_i2c_ir(struct saa7134_dev *dev)
dev->init_data.name = "MSI TV@nywhere Plus";
dev->init_data.get_key = get_key_msi_tvanywhere_plus;
dev->init_data.ir_codes = RC_MAP_MSI_TVANYWHERE_PLUS;
+ /*
+ * MSI TV@nyware Plus requires more frequent polling
+ * otherwise it will miss some keypresses
+ */
+ dev->init_data.polling_interval = 50;
info.addr = 0x30;
/* MSI TV@nywhere Plus controller doesn't seem to
respond to probes unless we read something from
diff --git a/drivers/media/video/saa7134/saa7134-video.c b/drivers/media/video/saa7134/saa7134-video.c
index 45f0ac8f3c0f..f0b1573137f4 100644
--- a/drivers/media/video/saa7134/saa7134-video.c
+++ b/drivers/media/video/saa7134/saa7134-video.c
@@ -1366,13 +1366,13 @@ static int video_open(struct file *file)
V4L2_BUF_TYPE_VIDEO_CAPTURE,
V4L2_FIELD_INTERLACED,
sizeof(struct saa7134_buf),
- fh);
+ fh, NULL);
videobuf_queue_sg_init(&fh->vbi, &saa7134_vbi_qops,
&dev->pci->dev, &dev->slock,
V4L2_BUF_TYPE_VBI_CAPTURE,
V4L2_FIELD_SEQ_TB,
sizeof(struct saa7134_buf),
- fh);
+ fh, NULL);
saa7134_pgtable_alloc(dev->pci,&fh->pt_cap);
saa7134_pgtable_alloc(dev->pci,&fh->pt_vbi);
@@ -1825,7 +1825,7 @@ static int saa7134_querycap(struct file *file, void *priv,
if ((tuner_type == TUNER_ABSENT) || (tuner_type == UNSET))
cap->capabilities &= ~V4L2_CAP_TUNER;
- return 0;
+ return 0;
}
int saa7134_s_std_internal(struct saa7134_dev *dev, struct saa7134_fh *fh, v4l2_std_id *id)
@@ -1871,9 +1871,12 @@ int saa7134_s_std_internal(struct saa7134_dev *dev, struct saa7134_fh *fh, v4l2_
else
fixup = V4L2_STD_SECAM;
}
- for (i = 0; i < TVNORMS; i++)
+ for (i = 0; i < TVNORMS; i++) {
if (fixup == tvnorms[i].id)
break;
+ }
+ if (i == TVNORMS)
+ return -EINVAL;
}
*id = tvnorms[i].id;
@@ -1997,9 +2000,12 @@ static int saa7134_g_tuner(struct file *file, void *priv,
if (0 != t->index)
return -EINVAL;
memset(t, 0, sizeof(*t));
- for (n = 0; n < SAA7134_INPUT_MAX; n++)
+ for (n = 0; n < SAA7134_INPUT_MAX; n++) {
if (card_in(dev, n).tv)
break;
+ }
+ if (n == SAA7134_INPUT_MAX)
+ return -EINVAL;
if (NULL != card_in(dev, n).name) {
strcpy(t->name, "Television");
t->type = V4L2_TUNER_ANALOG_TV;
diff --git a/drivers/media/video/saa7134/saa7134.h b/drivers/media/video/saa7134/saa7134.h
index c040a1808542..d3b6a196e5dc 100644
--- a/drivers/media/video/saa7134/saa7134.h
+++ b/drivers/media/video/saa7134/saa7134.h
@@ -810,16 +810,18 @@ void saa7134_irq_oss_done(struct saa7134_dev *dev, unsigned long status);
/* ----------------------------------------------------------- */
/* saa7134-input.c */
+#if defined(CONFIG_VIDEO_SAA7134_RC)
int saa7134_input_init1(struct saa7134_dev *dev);
void saa7134_input_fini(struct saa7134_dev *dev);
void saa7134_input_irq(struct saa7134_dev *dev);
void saa7134_probe_i2c_ir(struct saa7134_dev *dev);
int saa7134_ir_start(struct saa7134_dev *dev);
void saa7134_ir_stop(struct saa7134_dev *dev);
-
-
-/*
- * Local variables:
- * c-basic-offset: 8
- * End:
- */
+#else
+#define saa7134_input_init1(dev) (0)
+#define saa7134_input_fini(dev) (0)
+#define saa7134_input_irq(dev) (0)
+#define saa7134_probe_i2c_ir(dev) (0)
+#define saa7134_ir_start(dev) (0)
+#define saa7134_ir_stop(dev) (0)
+#endif
diff --git a/drivers/media/video/saa7164/Makefile b/drivers/media/video/saa7164/Makefile
index 4b329fd42add..6303a8e60eac 100644
--- a/drivers/media/video/saa7164/Makefile
+++ b/drivers/media/video/saa7164/Makefile
@@ -1,6 +1,6 @@
saa7164-objs := saa7164-cards.o saa7164-core.o saa7164-i2c.o saa7164-dvb.o \
saa7164-fw.o saa7164-bus.o saa7164-cmd.o saa7164-api.o \
- saa7164-buffer.o
+ saa7164-buffer.o saa7164-encoder.o saa7164-vbi.o
obj-$(CONFIG_VIDEO_SAA7164) += saa7164.o
diff --git a/drivers/media/video/saa7164/saa7164-api.c b/drivers/media/video/saa7164/saa7164-api.c
index 3f1262b00cc0..ad3bc4154176 100644
--- a/drivers/media/video/saa7164/saa7164-api.c
+++ b/drivers/media/video/saa7164/saa7164-api.c
@@ -1,7 +1,7 @@
/*
* Driver for the NXP SAA7164 PCIe bridge
*
- * Copyright (c) 2009 Steven Toth <stoth@kernellabs.com>
+ * Copyright (c) 2010 Steven Toth <stoth@kernellabs.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -24,14 +24,750 @@
#include "saa7164.h"
-int saa7164_api_transition_port(struct saa7164_tsport *port, u8 mode)
+int saa7164_api_get_load_info(struct saa7164_dev *dev, struct tmFwInfoStruct *i)
{
int ret;
+ if (!(saa_debug & DBGLVL_CPU))
+ return 0;
+
+ dprintk(DBGLVL_API, "%s()\n", __func__);
+
+ i->deviceinst = 0;
+ i->devicespec = 0;
+ i->mode = 0;
+ i->status = 0;
+
+ ret = saa7164_cmd_send(dev, 0, GET_CUR,
+ GET_FW_STATUS_CONTROL, sizeof(struct tmFwInfoStruct), i);
+ if (ret != SAA_OK) {
+ printk(KERN_ERR "%s() error, ret = 0x%x\n", __func__, ret);
+ }
+
+ printk(KERN_INFO "saa7164[%d]-CPU: %d percent", dev->nr, i->CPULoad);
+
+ return ret;
+}
+
+int saa7164_api_collect_debug(struct saa7164_dev *dev)
+{
+ struct tmComResDebugGetData d;
+ u8 more = 255;
+ int ret;
+
+ dprintk(DBGLVL_API, "%s()\n", __func__);
+
+ while (more--) {
+
+ memset(&d, 0, sizeof(d));
+
+ ret = saa7164_cmd_send(dev, 0, GET_CUR,
+ GET_DEBUG_DATA_CONTROL, sizeof(d), &d);
+ if (ret != SAA_OK) {
+ printk(KERN_ERR "%s() error, ret = 0x%x\n", __func__, ret);
+ }
+
+ if (d.dwResult != SAA_OK)
+ break;
+
+ printk(KERN_INFO "saa7164[%d]-FWMSG: %s", dev->nr, d.ucDebugData);
+ }
+
+ return 0;
+}
+
+int saa7164_api_set_debug(struct saa7164_dev *dev, u8 level)
+{
+ struct tmComResDebugSetLevel lvl;
+ int ret;
+
+ dprintk(DBGLVL_API, "%s(level=%d)\n", __func__, level);
+
+ /* Retrieve current state */
+ ret = saa7164_cmd_send(dev, 0, GET_CUR,
+ SET_DEBUG_LEVEL_CONTROL, sizeof(lvl), &lvl);
+ if (ret != SAA_OK) {
+ printk(KERN_ERR "%s() error, ret = 0x%x\n", __func__, ret);
+ }
+ dprintk(DBGLVL_API, "%s() Was %d\n", __func__, lvl.dwDebugLevel);
+
+ lvl.dwDebugLevel = level;
+
+ /* set new state */
+ ret = saa7164_cmd_send(dev, 0, SET_CUR,
+ SET_DEBUG_LEVEL_CONTROL, sizeof(lvl), &lvl);
+ if (ret != SAA_OK) {
+ printk(KERN_ERR "%s() error, ret = 0x%x\n", __func__, ret);
+ }
+
+ return ret;
+}
+
+int saa7164_api_set_vbi_format(struct saa7164_port *port)
+{
+ struct saa7164_dev *dev = port->dev;
+ struct tmComResProbeCommit fmt, rsp;
+ int ret;
+
+ dprintk(DBGLVL_API, "%s(nr=%d, unitid=0x%x)\n", __func__,
+ port->nr, port->hwcfg.unitid);
+
+ fmt.bmHint = 0;
+ fmt.bFormatIndex = 1;
+ fmt.bFrameIndex = 1;
+
+ /* Probe, see if it can support this format */
+ ret = saa7164_cmd_send(port->dev, port->hwcfg.unitid,
+ SET_CUR, SAA_PROBE_CONTROL, sizeof(fmt), &fmt);
+ if (ret != SAA_OK)
+ printk(KERN_ERR "%s() set error, ret = 0x%x\n", __func__, ret);
+
+ /* See of the format change was successful */
+ ret = saa7164_cmd_send(port->dev, port->hwcfg.unitid,
+ GET_CUR, SAA_PROBE_CONTROL, sizeof(rsp), &rsp);
+ if (ret != SAA_OK) {
+ printk(KERN_ERR "%s() get error, ret = 0x%x\n", __func__, ret);
+ } else {
+ /* Compare requested vs received, should be same */
+ if (memcmp(&fmt, &rsp, sizeof(rsp)) == 0) {
+ dprintk(DBGLVL_API, "SET/PROBE Verified\n");
+
+ /* Ask the device to select the negotiated format */
+ ret = saa7164_cmd_send(port->dev, port->hwcfg.unitid,
+ SET_CUR, SAA_COMMIT_CONTROL, sizeof(fmt), &fmt);
+ if (ret != SAA_OK)
+ printk(KERN_ERR "%s() commit error, ret = 0x%x\n",
+ __func__, ret);
+
+ ret = saa7164_cmd_send(port->dev, port->hwcfg.unitid,
+ GET_CUR, SAA_COMMIT_CONTROL, sizeof(rsp), &rsp);
+ if (ret != SAA_OK)
+ printk(KERN_ERR "%s() GET commit error, ret = 0x%x\n",
+ __func__, ret);
+
+ if (memcmp(&fmt, &rsp, sizeof(rsp)) != 0) {
+ printk(KERN_ERR "%s() memcmp error, ret = 0x%x\n",
+ __func__, ret);
+ } else
+ dprintk(DBGLVL_API, "SET/COMMIT Verified\n");
+
+ dprintk(DBGLVL_API, "rsp.bmHint = 0x%x\n", rsp.bmHint);
+ dprintk(DBGLVL_API, "rsp.bFormatIndex = 0x%x\n", rsp.bFormatIndex);
+ dprintk(DBGLVL_API, "rsp.bFrameIndex = 0x%x\n", rsp.bFrameIndex);
+ } else
+ printk(KERN_ERR "%s() compare failed\n", __func__);
+ }
+
+ if (ret == SAA_OK)
+ dprintk(DBGLVL_API, "%s(nr=%d) Success\n", __func__, port->nr);
+
+ return ret;
+}
+
+int saa7164_api_set_gop_size(struct saa7164_port *port)
+{
+ struct saa7164_dev *dev = port->dev;
+ struct tmComResEncVideoGopStructure gs;
+ int ret;
+
+ dprintk(DBGLVL_ENC, "%s()\n", __func__);
+
+ gs.ucRefFrameDist = port->encoder_params.refdist;
+ gs.ucGOPSize = port->encoder_params.gop_size;
+ ret = saa7164_cmd_send(port->dev, port->hwcfg.sourceid, SET_CUR,
+ EU_VIDEO_GOP_STRUCTURE_CONTROL,
+ sizeof(gs), &gs);
+ if (ret != SAA_OK)
+ printk(KERN_ERR "%s() error, ret = 0x%x\n", __func__, ret);
+
+ return ret;
+}
+
+int saa7164_api_set_encoder(struct saa7164_port *port)
+{
+ struct saa7164_dev *dev = port->dev;
+ struct tmComResEncVideoBitRate vb;
+ struct tmComResEncAudioBitRate ab;
+ int ret;
+
+ dprintk(DBGLVL_ENC, "%s() unitid=0x%x\n", __func__,
+ port->hwcfg.sourceid);
+
+ if (port->encoder_params.stream_type == V4L2_MPEG_STREAM_TYPE_MPEG2_PS)
+ port->encoder_profile = EU_PROFILE_PS_DVD;
+ else
+ port->encoder_profile = EU_PROFILE_TS_HQ;
+
+ ret = saa7164_cmd_send(port->dev, port->hwcfg.sourceid, SET_CUR,
+ EU_PROFILE_CONTROL, sizeof(u8), &port->encoder_profile);
+ if (ret != SAA_OK)
+ printk(KERN_ERR "%s() error, ret = 0x%x\n", __func__, ret);
+
+ /* Resolution */
+ ret = saa7164_cmd_send(port->dev, port->hwcfg.sourceid, SET_CUR,
+ EU_PROFILE_CONTROL, sizeof(u8), &port->encoder_profile);
+ if (ret != SAA_OK)
+ printk(KERN_ERR "%s() error, ret = 0x%x\n", __func__, ret);
+
+ /* Establish video bitrates */
+ if (port->encoder_params.bitrate_mode == V4L2_MPEG_VIDEO_BITRATE_MODE_CBR)
+ vb.ucVideoBitRateMode = EU_VIDEO_BIT_RATE_MODE_CONSTANT;
+ else
+ vb.ucVideoBitRateMode = EU_VIDEO_BIT_RATE_MODE_VARIABLE_PEAK;
+ vb.dwVideoBitRate = port->encoder_params.bitrate;
+ vb.dwVideoBitRatePeak = port->encoder_params.bitrate_peak;
+ ret = saa7164_cmd_send(port->dev, port->hwcfg.sourceid, SET_CUR,
+ EU_VIDEO_BIT_RATE_CONTROL, sizeof(struct tmComResEncVideoBitRate), &vb);
+ if (ret != SAA_OK)
+ printk(KERN_ERR "%s() error, ret = 0x%x\n", __func__, ret);
+
+ /* Establish audio bitrates */
+ ab.ucAudioBitRateMode = 0;
+ ab.dwAudioBitRate = 384000;
+ ab.dwAudioBitRatePeak = ab.dwAudioBitRate;
+ ret = saa7164_cmd_send(port->dev, port->hwcfg.sourceid, SET_CUR,
+ EU_AUDIO_BIT_RATE_CONTROL, sizeof(struct tmComResEncAudioBitRate), &ab);
+ if (ret != SAA_OK)
+ printk(KERN_ERR "%s() error, ret = 0x%x\n", __func__, ret);
+
+ saa7164_api_set_aspect_ratio(port);
+ saa7164_api_set_gop_size(port);
+
+ return ret;
+}
+
+int saa7164_api_get_encoder(struct saa7164_port *port)
+{
+ struct saa7164_dev *dev = port->dev;
+ struct tmComResEncVideoBitRate v;
+ struct tmComResEncAudioBitRate a;
+ struct tmComResEncVideoInputAspectRatio ar;
+ int ret;
+
+ dprintk(DBGLVL_ENC, "%s() unitid=0x%x\n", __func__, port->hwcfg.sourceid);
+
+ port->encoder_profile = 0;
+ port->video_format = 0;
+ port->video_resolution = 0;
+ port->audio_format = 0;
+
+ ret = saa7164_cmd_send(port->dev, port->hwcfg.sourceid, GET_CUR,
+ EU_PROFILE_CONTROL, sizeof(u8), &port->encoder_profile);
+ if (ret != SAA_OK)
+ printk(KERN_ERR "%s() error, ret = 0x%x\n", __func__, ret);
+
+ ret = saa7164_cmd_send(port->dev, port->hwcfg.sourceid, GET_CUR,
+ EU_VIDEO_RESOLUTION_CONTROL, sizeof(u8), &port->video_resolution);
+ if (ret != SAA_OK)
+ printk(KERN_ERR "%s() error, ret = 0x%x\n", __func__, ret);
+
+ ret = saa7164_cmd_send(port->dev, port->hwcfg.sourceid, GET_CUR,
+ EU_VIDEO_FORMAT_CONTROL, sizeof(u8), &port->video_format);
+ if (ret != SAA_OK)
+ printk(KERN_ERR "%s() error, ret = 0x%x\n", __func__, ret);
+
+ ret = saa7164_cmd_send(port->dev, port->hwcfg.sourceid, GET_CUR,
+ EU_VIDEO_BIT_RATE_CONTROL, sizeof(v), &v);
+ if (ret != SAA_OK)
+ printk(KERN_ERR "%s() error, ret = 0x%x\n", __func__, ret);
+
+ ret = saa7164_cmd_send(port->dev, port->hwcfg.sourceid, GET_CUR,
+ EU_AUDIO_FORMAT_CONTROL, sizeof(u8), &port->audio_format);
+ if (ret != SAA_OK)
+ printk(KERN_ERR "%s() error, ret = 0x%x\n", __func__, ret);
+
+ ret = saa7164_cmd_send(port->dev, port->hwcfg.sourceid, GET_CUR,
+ EU_AUDIO_BIT_RATE_CONTROL, sizeof(a), &a);
+ if (ret != SAA_OK)
+ printk(KERN_ERR "%s() error, ret = 0x%x\n", __func__, ret);
+
+ /* Aspect Ratio */
+ ar.width = 0;
+ ar.height = 0;
+ ret = saa7164_cmd_send(port->dev, port->hwcfg.sourceid, GET_CUR,
+ EU_VIDEO_INPUT_ASPECT_CONTROL,
+ sizeof(struct tmComResEncVideoInputAspectRatio), &ar);
+ if (ret != SAA_OK)
+ printk(KERN_ERR "%s() error, ret = 0x%x\n", __func__, ret);
+
+ dprintk(DBGLVL_ENC, "encoder_profile = %d\n", port->encoder_profile);
+ dprintk(DBGLVL_ENC, "video_format = %d\n", port->video_format);
+ dprintk(DBGLVL_ENC, "audio_format = %d\n", port->audio_format);
+ dprintk(DBGLVL_ENC, "video_resolution= %d\n", port->video_resolution);
+ dprintk(DBGLVL_ENC, "v.ucVideoBitRateMode = %d\n", v.ucVideoBitRateMode);
+ dprintk(DBGLVL_ENC, "v.dwVideoBitRate = %d\n", v.dwVideoBitRate);
+ dprintk(DBGLVL_ENC, "v.dwVideoBitRatePeak = %d\n", v.dwVideoBitRatePeak);
+ dprintk(DBGLVL_ENC, "a.ucVideoBitRateMode = %d\n", a.ucAudioBitRateMode);
+ dprintk(DBGLVL_ENC, "a.dwVideoBitRate = %d\n", a.dwAudioBitRate);
+ dprintk(DBGLVL_ENC, "a.dwVideoBitRatePeak = %d\n", a.dwAudioBitRatePeak);
+ dprintk(DBGLVL_ENC, "aspect.width / height = %d:%d\n", ar.width, ar.height);
+
+ return ret;
+}
+
+int saa7164_api_set_aspect_ratio(struct saa7164_port *port)
+{
+ struct saa7164_dev *dev = port->dev;
+ struct tmComResEncVideoInputAspectRatio ar;
+ int ret;
+
+ dprintk(DBGLVL_ENC, "%s(%d)\n", __func__,
+ port->encoder_params.ctl_aspect);
+
+ switch (port->encoder_params.ctl_aspect) {
+ case V4L2_MPEG_VIDEO_ASPECT_1x1:
+ ar.width = 1;
+ ar.height = 1;
+ break;
+ case V4L2_MPEG_VIDEO_ASPECT_4x3:
+ ar.width = 4;
+ ar.height = 3;
+ break;
+ case V4L2_MPEG_VIDEO_ASPECT_16x9:
+ ar.width = 16;
+ ar.height = 9;
+ break;
+ case V4L2_MPEG_VIDEO_ASPECT_221x100:
+ ar.width = 221;
+ ar.height = 100;
+ break;
+ default:
+ BUG();
+ }
+
+ dprintk(DBGLVL_ENC, "%s(%d) now %d:%d\n", __func__,
+ port->encoder_params.ctl_aspect,
+ ar.width, ar.height);
+
+ /* Aspect Ratio */
+ ret = saa7164_cmd_send(port->dev, port->hwcfg.sourceid, SET_CUR,
+ EU_VIDEO_INPUT_ASPECT_CONTROL,
+ sizeof(struct tmComResEncVideoInputAspectRatio), &ar);
+ if (ret != SAA_OK)
+ printk(KERN_ERR "%s() error, ret = 0x%x\n", __func__, ret);
+
+ return ret;
+}
+
+int saa7164_api_set_usercontrol(struct saa7164_port *port, u8 ctl)
+{
+ struct saa7164_dev *dev = port->dev;
+ int ret;
+ u16 val;
+
+ if (ctl == PU_BRIGHTNESS_CONTROL)
+ val = port->ctl_brightness;
+ else
+ if (ctl == PU_CONTRAST_CONTROL)
+ val = port->ctl_contrast;
+ else
+ if (ctl == PU_HUE_CONTROL)
+ val = port->ctl_hue;
+ else
+ if (ctl == PU_SATURATION_CONTROL)
+ val = port->ctl_saturation;
+ else
+ if (ctl == PU_SHARPNESS_CONTROL)
+ val = port->ctl_sharpness;
+ else
+ return -EINVAL;
+
+ dprintk(DBGLVL_ENC, "%s() unitid=0x%x ctl=%d, val=%d\n",
+ __func__, port->encunit.vsourceid, ctl, val);
+
+ ret = saa7164_cmd_send(port->dev, port->encunit.vsourceid, SET_CUR,
+ ctl, sizeof(u16), &val);
+ if (ret != SAA_OK)
+ printk(KERN_ERR "%s() error, ret = 0x%x\n", __func__, ret);
+
+ return ret;
+}
+
+int saa7164_api_get_usercontrol(struct saa7164_port *port, u8 ctl)
+{
+ struct saa7164_dev *dev = port->dev;
+ int ret;
+ u16 val;
+
+ ret = saa7164_cmd_send(port->dev, port->encunit.vsourceid, GET_CUR,
+ ctl, sizeof(u16), &val);
+ if (ret != SAA_OK) {
+ printk(KERN_ERR "%s() error, ret = 0x%x\n", __func__, ret);
+ return ret;
+ }
+
+ dprintk(DBGLVL_ENC, "%s() ctl=%d, val=%d\n",
+ __func__, ctl, val);
+
+ if (ctl == PU_BRIGHTNESS_CONTROL)
+ port->ctl_brightness = val;
+ else
+ if (ctl == PU_CONTRAST_CONTROL)
+ port->ctl_contrast = val;
+ else
+ if (ctl == PU_HUE_CONTROL)
+ port->ctl_hue = val;
+ else
+ if (ctl == PU_SATURATION_CONTROL)
+ port->ctl_saturation = val;
+ else
+ if (ctl == PU_SHARPNESS_CONTROL)
+ port->ctl_sharpness = val;
+
+ return ret;
+}
+
+int saa7164_api_set_videomux(struct saa7164_port *port)
+{
+ struct saa7164_dev *dev = port->dev;
+ u8 inputs[] = { 1, 2, 2, 2, 5, 5, 5 };
+ int ret;
+
+ dprintk(DBGLVL_ENC, "%s() v_mux=%d a_mux=%d\n",
+ __func__, port->mux_input, inputs[port->mux_input - 1]);
+
+ /* Audio Mute */
+ ret = saa7164_api_audio_mute(port, 1);
+ if (ret != SAA_OK)
+ printk(KERN_ERR "%s() error, ret = 0x%x\n", __func__, ret);
+
+ /* Video Mux */
+ ret = saa7164_cmd_send(port->dev, port->vidproc.sourceid, SET_CUR,
+ SU_INPUT_SELECT_CONTROL, sizeof(u8), &port->mux_input);
+ if (ret != SAA_OK)
+ printk(KERN_ERR "%s() error, ret = 0x%x\n", __func__, ret);
+
+ /* Audio Mux */
+ ret = saa7164_cmd_send(port->dev, port->audfeat.sourceid, SET_CUR,
+ SU_INPUT_SELECT_CONTROL, sizeof(u8), &inputs[port->mux_input - 1]);
+ if (ret != SAA_OK)
+ printk(KERN_ERR "%s() error, ret = 0x%x\n", __func__, ret);
+
+ /* Audio UnMute */
+ ret = saa7164_api_audio_mute(port, 0);
+ if (ret != SAA_OK)
+ printk(KERN_ERR "%s() error, ret = 0x%x\n", __func__, ret);
+
+ return ret;
+}
+
+int saa7164_api_audio_mute(struct saa7164_port *port, int mute)
+{
+ struct saa7164_dev *dev = port->dev;
+ u8 v = mute;
+ int ret;
+
+ dprintk(DBGLVL_API, "%s(%d)\n", __func__, mute);
+
+ ret = saa7164_cmd_send(port->dev, port->audfeat.unitid, SET_CUR,
+ MUTE_CONTROL, sizeof(u8), &v);
+ if (ret != SAA_OK)
+ printk(KERN_ERR "%s() error, ret = 0x%x\n", __func__, ret);
+
+ return ret;
+}
+
+/* 0 = silence, 0xff = full */
+int saa7164_api_set_audio_volume(struct saa7164_port *port, s8 level)
+{
+ struct saa7164_dev *dev = port->dev;
+ s16 v, min, max;
+ int ret;
+
+ dprintk(DBGLVL_API, "%s(%d)\n", __func__, level);
+
+ /* Obtain the min/max ranges */
+ ret = saa7164_cmd_send(port->dev, port->audfeat.unitid, GET_MIN,
+ VOLUME_CONTROL, sizeof(u16), &min);
+ if (ret != SAA_OK)
+ printk(KERN_ERR "%s() error, ret = 0x%x\n", __func__, ret);
+
+ ret = saa7164_cmd_send(port->dev, port->audfeat.unitid, GET_MAX,
+ VOLUME_CONTROL, sizeof(u16), &max);
+ if (ret != SAA_OK)
+ printk(KERN_ERR "%s() error, ret = 0x%x\n", __func__, ret);
+
+ ret = saa7164_cmd_send(port->dev, port->audfeat.unitid, GET_CUR,
+ (0x01 << 8) | VOLUME_CONTROL, sizeof(u16), &v);
+ if (ret != SAA_OK)
+ printk(KERN_ERR "%s() error, ret = 0x%x\n", __func__, ret);
+
+ dprintk(DBGLVL_API, "%s(%d) min=%d max=%d cur=%d\n", __func__, level, min, max, v);
+
+ v = level;
+ if (v < min)
+ v = min;
+ if (v > max)
+ v = max;
+
+ /* Left */
+ ret = saa7164_cmd_send(port->dev, port->audfeat.unitid, SET_CUR,
+ (0x01 << 8) | VOLUME_CONTROL, sizeof(s16), &v);
+ if (ret != SAA_OK)
+ printk(KERN_ERR "%s() error, ret = 0x%x\n", __func__, ret);
+
+ /* Right */
+ ret = saa7164_cmd_send(port->dev, port->audfeat.unitid, SET_CUR,
+ (0x02 << 8) | VOLUME_CONTROL, sizeof(s16), &v);
+ if (ret != SAA_OK)
+ printk(KERN_ERR "%s() error, ret = 0x%x\n", __func__, ret);
+
+ ret = saa7164_cmd_send(port->dev, port->audfeat.unitid, GET_CUR,
+ (0x01 << 8) | VOLUME_CONTROL, sizeof(u16), &v);
+ if (ret != SAA_OK)
+ printk(KERN_ERR "%s() error, ret = 0x%x\n", __func__, ret);
+
+ dprintk(DBGLVL_API, "%s(%d) min=%d max=%d cur=%d\n", __func__, level, min, max, v);
+
+ return ret;
+}
+
+int saa7164_api_set_audio_std(struct saa7164_port *port)
+{
+ struct saa7164_dev *dev = port->dev;
+ struct tmComResAudioDefaults lvl;
+ struct tmComResTunerStandard tvaudio;
+ int ret;
+
+ dprintk(DBGLVL_API, "%s()\n", __func__);
+
+ /* Establish default levels */
+ lvl.ucDecoderLevel = TMHW_LEV_ADJ_DECLEV_DEFAULT;
+ lvl.ucDecoderFM_Level = TMHW_LEV_ADJ_DECLEV_DEFAULT;
+ lvl.ucMonoLevel = TMHW_LEV_ADJ_MONOLEV_DEFAULT;
+ lvl.ucNICAM_Level = TMHW_LEV_ADJ_NICLEV_DEFAULT;
+ lvl.ucSAP_Level = TMHW_LEV_ADJ_SAPLEV_DEFAULT;
+ lvl.ucADC_Level = TMHW_LEV_ADJ_ADCLEV_DEFAULT;
+ ret = saa7164_cmd_send(port->dev, port->audfeat.unitid, SET_CUR,
+ AUDIO_DEFAULT_CONTROL, sizeof(struct tmComResAudioDefaults), &lvl);
+ if (ret != SAA_OK)
+ printk(KERN_ERR "%s() error, ret = 0x%x\n", __func__, ret);
+
+ /* Manually select the appropriate TV audio standard */
+ if (port->encodernorm.id & V4L2_STD_NTSC) {
+ tvaudio.std = TU_STANDARD_NTSC_M;
+ tvaudio.country = 1;
+ } else {
+ tvaudio.std = TU_STANDARD_PAL_I;
+ tvaudio.country = 44;
+ }
+
+ ret = saa7164_cmd_send(port->dev, port->tunerunit.unitid, SET_CUR,
+ TU_STANDARD_CONTROL, sizeof(tvaudio), &tvaudio);
+ if (ret != SAA_OK)
+ printk(KERN_ERR "%s() TU_STANDARD_CONTROL error, ret = 0x%x\n", __func__, ret);
+ return ret;
+}
+
+int saa7164_api_set_audio_detection(struct saa7164_port *port, int autodetect)
+{
+ struct saa7164_dev *dev = port->dev;
+ struct tmComResTunerStandardAuto p;
+ int ret;
+
+ dprintk(DBGLVL_API, "%s(%d)\n", __func__, autodetect);
+
+ /* Disable TV Audio autodetect if not already set (buggy) */
+ if (autodetect)
+ p.mode = TU_STANDARD_AUTO;
+ else
+ p.mode = TU_STANDARD_MANUAL;
+ ret = saa7164_cmd_send(port->dev, port->tunerunit.unitid, SET_CUR,
+ TU_STANDARD_AUTO_CONTROL, sizeof(p), &p);
+ if (ret != SAA_OK)
+ printk(KERN_ERR "%s() TU_STANDARD_AUTO_CONTROL error, ret = 0x%x\n", __func__, ret);
+
+ return ret;
+}
+
+int saa7164_api_get_videomux(struct saa7164_port *port)
+{
+ struct saa7164_dev *dev = port->dev;
+ int ret;
+
+ ret = saa7164_cmd_send(port->dev, port->vidproc.sourceid, GET_CUR,
+ SU_INPUT_SELECT_CONTROL, sizeof(u8), &port->mux_input);
+ if (ret != SAA_OK)
+ printk(KERN_ERR "%s() error, ret = 0x%x\n", __func__, ret);
+
+ dprintk(DBGLVL_ENC, "%s() v_mux=%d\n",
+ __func__, port->mux_input);
+
+ return ret;
+}
+
+int saa7164_api_set_dif(struct saa7164_port *port, u8 reg, u8 val)
+{
+ struct saa7164_dev *dev = port->dev;
+
+ u16 len = 0;
+ u8 buf[256];
+ int ret;
+ u8 mas;
+
+ dprintk(DBGLVL_API, "%s(nr=%d type=%d val=%x)\n", __func__,
+ port->nr, port->type, val);
+
+ if (port->nr == 0)
+ mas = 0xd0;
+ else
+ mas = 0xe0;
+
+ memset(buf, 0, sizeof(buf));
+
+ buf[0x00] = 0x04;
+ buf[0x01] = 0x00;
+ buf[0x02] = 0x00;
+ buf[0x03] = 0x00;
+
+ buf[0x04] = 0x04;
+ buf[0x05] = 0x00;
+ buf[0x06] = 0x00;
+ buf[0x07] = 0x00;
+
+ buf[0x08] = reg;
+ buf[0x09] = 0x26;
+ buf[0x0a] = mas;
+ buf[0x0b] = 0xb0;
+
+ buf[0x0c] = val;
+ buf[0x0d] = 0x00;
+ buf[0x0e] = 0x00;
+ buf[0x0f] = 0x00;
+
+ ret = saa7164_cmd_send(dev, port->ifunit.unitid, GET_LEN,
+ EXU_REGISTER_ACCESS_CONTROL, sizeof(len), &len);
+ if (ret != SAA_OK) {
+ printk(KERN_ERR "%s() error, ret(1) = 0x%x\n", __func__, ret);
+ return -EIO;
+ }
+
+ ret = saa7164_cmd_send(dev, port->ifunit.unitid, SET_CUR,
+ EXU_REGISTER_ACCESS_CONTROL, len, &buf);
+ if (ret != SAA_OK)
+ printk(KERN_ERR "%s() error, ret(2) = 0x%x\n", __func__, ret);
+
+ //saa7164_dumphex16(dev, buf, 16);
+
+ return ret == SAA_OK ? 0 : -EIO;
+}
+
+/* Disable the IF block AGC controls */
+int saa7164_api_configure_dif(struct saa7164_port *port, u32 std)
+{
+ struct saa7164_dev *dev = port->dev;
+ int ret = 0;
+ u8 agc_disable;
+
+ dprintk(DBGLVL_API, "%s(nr=%d, 0x%x)\n", __func__, port->nr, std);
+
+ if (std & V4L2_STD_NTSC) {
+ dprintk(DBGLVL_API, " NTSC\n");
+ saa7164_api_set_dif(port, 0x00, 0x01); /* Video Standard */
+ agc_disable = 0;
+ } else if (std & V4L2_STD_PAL_I) {
+ dprintk(DBGLVL_API, " PAL-I\n");
+ saa7164_api_set_dif(port, 0x00, 0x08); /* Video Standard */
+ agc_disable = 0;
+ } else if (std & V4L2_STD_PAL_M) {
+ dprintk(DBGLVL_API, " PAL-M\n");
+ saa7164_api_set_dif(port, 0x00, 0x01); /* Video Standard */
+ agc_disable = 0;
+ } else if (std & V4L2_STD_PAL_N) {
+ dprintk(DBGLVL_API, " PAL-N\n");
+ saa7164_api_set_dif(port, 0x00, 0x01); /* Video Standard */
+ agc_disable = 0;
+ } else if (std & V4L2_STD_PAL_Nc) {
+ dprintk(DBGLVL_API, " PAL-Nc\n");
+ saa7164_api_set_dif(port, 0x00, 0x01); /* Video Standard */
+ agc_disable = 0;
+ } else if (std & V4L2_STD_PAL_B) {
+ dprintk(DBGLVL_API, " PAL-B\n");
+ saa7164_api_set_dif(port, 0x00, 0x02); /* Video Standard */
+ agc_disable = 0;
+ } else if (std & V4L2_STD_PAL_DK) {
+ dprintk(DBGLVL_API, " PAL-DK\n");
+ saa7164_api_set_dif(port, 0x00, 0x10); /* Video Standard */
+ agc_disable = 0;
+ } else if (std & V4L2_STD_SECAM_L) {
+ dprintk(DBGLVL_API, " SECAM-L\n");
+ saa7164_api_set_dif(port, 0x00, 0x20); /* Video Standard */
+ agc_disable = 0;
+ } else {
+ /* Unknown standard, assume DTV */
+ dprintk(DBGLVL_API, " Unknown (assuming DTV)\n");
+ saa7164_api_set_dif(port, 0x00, 0x80); /* Undefined Video Standard */
+ agc_disable = 1;
+ }
+
+ saa7164_api_set_dif(port, 0x48, 0xa0); /* AGC Functions 1 */
+ saa7164_api_set_dif(port, 0xc0, agc_disable); /* AGC Output Disable */
+ saa7164_api_set_dif(port, 0x7c, 0x04); /* CVBS EQ */
+ saa7164_api_set_dif(port, 0x04, 0x01); /* Active */
+ msleep(100);
+ saa7164_api_set_dif(port, 0x04, 0x00); /* Active (again) */
+ msleep(100);
+
+ return ret;
+}
+
+/* Ensure the dif is in the correct state for the operating mode
+ * (analog / dtv). We only configure the diff through the analog encoder
+ * so when we're in digital mode we need to find the appropriate encoder
+ * and use it to configure the DIF.
+ */
+int saa7164_api_initialize_dif(struct saa7164_port *port)
+{
+ struct saa7164_dev *dev = port->dev;
+ struct saa7164_port *p = 0;
+ int ret = -EINVAL;
+ u32 std = 0;
+
+ dprintk(DBGLVL_API, "%s(nr=%d type=%d)\n", __func__,
+ port->nr, port->type);
+
+ if (port->type == SAA7164_MPEG_ENCODER) {
+ /* Pick any analog standard to init the diff.
+ * we'll come back during encoder_init'
+ * and set the correct standard if requried.
+ */
+ std = V4L2_STD_NTSC;
+ } else
+ if (port->type == SAA7164_MPEG_DVB) {
+ if (port->nr == SAA7164_PORT_TS1)
+ p = &dev->ports[SAA7164_PORT_ENC1];
+ else
+ p = &dev->ports[SAA7164_PORT_ENC2];
+ } else
+ if (port->type == SAA7164_MPEG_VBI) {
+ std = V4L2_STD_NTSC;
+ if (port->nr == SAA7164_PORT_VBI1)
+ p = &dev->ports[SAA7164_PORT_ENC1];
+ else
+ p = &dev->ports[SAA7164_PORT_ENC2];
+ } else
+ BUG();
+
+ if (p)
+ ret = saa7164_api_configure_dif(p, std);
+
+ return ret;
+}
+
+int saa7164_api_transition_port(struct saa7164_port *port, u8 mode)
+{
+ struct saa7164_dev *dev = port->dev;
+
+ int ret;
+
+ dprintk(DBGLVL_API, "%s(nr=%d unitid=0x%x,%d)\n",
+ __func__, port->nr, port->hwcfg.unitid, mode);
+
ret = saa7164_cmd_send(port->dev, port->hwcfg.unitid, SET_CUR,
SAA_STATE_CONTROL, sizeof(mode), &mode);
if (ret != SAA_OK)
- printk(KERN_ERR "%s() error, ret = 0x%x\n", __func__, ret);
+ printk(KERN_ERR "%s(portnr %d unitid 0x%x) error, ret = 0x%x\n",
+ __func__, port->nr, port->hwcfg.unitid, ret);
return ret;
}
@@ -61,10 +797,45 @@ int saa7164_api_read_eeprom(struct saa7164_dev *dev, u8 *buf, int buflen)
&reg[0], 128, buf);
}
+int saa7164_api_configure_port_vbi(struct saa7164_dev *dev,
+ struct saa7164_port *port)
+{
+ struct tmComResVBIFormatDescrHeader *fmt = &port->vbi_fmt_ntsc;
+
+ dprintk(DBGLVL_API, " bFormatIndex = 0x%x\n", fmt->bFormatIndex);
+ dprintk(DBGLVL_API, " VideoStandard = 0x%x\n", fmt->VideoStandard);
+ dprintk(DBGLVL_API, " StartLine = %d\n", fmt->StartLine);
+ dprintk(DBGLVL_API, " EndLine = %d\n", fmt->EndLine);
+ dprintk(DBGLVL_API, " FieldRate = %d\n", fmt->FieldRate);
+ dprintk(DBGLVL_API, " bNumLines = %d\n", fmt->bNumLines);
+
+ /* Cache the hardware configuration in the port */
+
+ port->bufcounter = port->hwcfg.BARLocation;
+ port->pitch = port->hwcfg.BARLocation + (2 * sizeof(u32));
+ port->bufsize = port->hwcfg.BARLocation + (3 * sizeof(u32));
+ port->bufoffset = port->hwcfg.BARLocation + (4 * sizeof(u32));
+ port->bufptr32l = port->hwcfg.BARLocation +
+ (4 * sizeof(u32)) +
+ (sizeof(u32) * port->hwcfg.buffercount) + sizeof(u32);
+ port->bufptr32h = port->hwcfg.BARLocation +
+ (4 * sizeof(u32)) +
+ (sizeof(u32) * port->hwcfg.buffercount);
+ port->bufptr64 = port->hwcfg.BARLocation +
+ (4 * sizeof(u32)) +
+ (sizeof(u32) * port->hwcfg.buffercount);
+ dprintk(DBGLVL_API, " = port->hwcfg.BARLocation = 0x%x\n",
+ port->hwcfg.BARLocation);
+
+ dprintk(DBGLVL_API, " = VS_FORMAT_VBI (becomes dev->en[%d])\n",
+ port->nr);
+
+ return 0;
+}
int saa7164_api_configure_port_mpeg2ts(struct saa7164_dev *dev,
- struct saa7164_tsport *port,
- tmComResTSFormatDescrHeader_t *tsfmt)
+ struct saa7164_port *port,
+ struct tmComResTSFormatDescrHeader *tsfmt)
{
dprintk(DBGLVL_API, " bFormatIndex = 0x%x\n", tsfmt->bFormatIndex);
dprintk(DBGLVL_API, " bDataOffset = 0x%x\n", tsfmt->bDataOffset);
@@ -96,27 +867,68 @@ int saa7164_api_configure_port_mpeg2ts(struct saa7164_dev *dev,
return 0;
}
+int saa7164_api_configure_port_mpeg2ps(struct saa7164_dev *dev,
+ struct saa7164_port *port,
+ struct tmComResPSFormatDescrHeader *fmt)
+{
+ dprintk(DBGLVL_API, " bFormatIndex = 0x%x\n", fmt->bFormatIndex);
+ dprintk(DBGLVL_API, " wPacketLength= 0x%x\n", fmt->wPacketLength);
+ dprintk(DBGLVL_API, " wPackLength= 0x%x\n", fmt->wPackLength);
+ dprintk(DBGLVL_API, " bPackDataType= 0x%x\n", fmt->bPackDataType);
+
+ /* Cache the hardware configuration in the port */
+ /* TODO: CHECK THIS in the port config */
+ port->bufcounter = port->hwcfg.BARLocation;
+ port->pitch = port->hwcfg.BARLocation + (2 * sizeof(u32));
+ port->bufsize = port->hwcfg.BARLocation + (3 * sizeof(u32));
+ port->bufoffset = port->hwcfg.BARLocation + (4 * sizeof(u32));
+ port->bufptr32l = port->hwcfg.BARLocation +
+ (4 * sizeof(u32)) +
+ (sizeof(u32) * port->hwcfg.buffercount) + sizeof(u32);
+ port->bufptr32h = port->hwcfg.BARLocation +
+ (4 * sizeof(u32)) +
+ (sizeof(u32) * port->hwcfg.buffercount);
+ port->bufptr64 = port->hwcfg.BARLocation +
+ (4 * sizeof(u32)) +
+ (sizeof(u32) * port->hwcfg.buffercount);
+ dprintk(DBGLVL_API, " = port->hwcfg.BARLocation = 0x%x\n",
+ port->hwcfg.BARLocation);
+
+ dprintk(DBGLVL_API, " = VS_FORMAT_MPEGPS (becomes dev->enc[%d])\n",
+ port->nr);
+
+ return 0;
+}
+
int saa7164_api_dump_subdevs(struct saa7164_dev *dev, u8 *buf, int len)
{
- struct saa7164_tsport *port = 0;
+ struct saa7164_port *tsport = 0;
+ struct saa7164_port *encport = 0;
+ struct saa7164_port *vbiport = 0;
u32 idx, next_offset;
int i;
- tmComResDescrHeader_t *hdr, *t;
- tmComResExtDevDescrHeader_t *exthdr;
- tmComResPathDescrHeader_t *pathhdr;
- tmComResAntTermDescrHeader_t *anttermhdr;
- tmComResTunerDescrHeader_t *tunerunithdr;
- tmComResDMATermDescrHeader_t *vcoutputtermhdr;
- tmComResTSFormatDescrHeader_t *tsfmt;
+ struct tmComResDescrHeader *hdr, *t;
+ struct tmComResExtDevDescrHeader *exthdr;
+ struct tmComResPathDescrHeader *pathhdr;
+ struct tmComResAntTermDescrHeader *anttermhdr;
+ struct tmComResTunerDescrHeader *tunerunithdr;
+ struct tmComResDMATermDescrHeader *vcoutputtermhdr;
+ struct tmComResTSFormatDescrHeader *tsfmt;
+ struct tmComResPSFormatDescrHeader *psfmt;
+ struct tmComResSelDescrHeader *psel;
+ struct tmComResProcDescrHeader *pdh;
+ struct tmComResAFeatureDescrHeader *afd;
+ struct tmComResEncoderDescrHeader *edh;
+ struct tmComResVBIFormatDescrHeader *vbifmt;
u32 currpath = 0;
dprintk(DBGLVL_API,
- "%s(?,?,%d) sizeof(tmComResDescrHeader_t) = %d bytes\n",
- __func__, len, (u32)sizeof(tmComResDescrHeader_t));
+ "%s(?,?,%d) sizeof(struct tmComResDescrHeader) = %d bytes\n",
+ __func__, len, (u32)sizeof(struct tmComResDescrHeader));
- for (idx = 0; idx < (len - sizeof(tmComResDescrHeader_t)); ) {
+ for (idx = 0; idx < (len - sizeof(struct tmComResDescrHeader));) {
- hdr = (tmComResDescrHeader_t *)(buf + idx);
+ hdr = (struct tmComResDescrHeader *)(buf + idx);
if (hdr->type != CS_INTERFACE)
return SAA_ERR_NOT_SUPPORTED;
@@ -128,7 +940,7 @@ int saa7164_api_dump_subdevs(struct saa7164_dev *dev, u8 *buf, int len)
break;
case VC_TUNER_PATH:
dprintk(DBGLVL_API, " VC_TUNER_PATH\n");
- pathhdr = (tmComResPathDescrHeader_t *)(buf + idx);
+ pathhdr = (struct tmComResPathDescrHeader *)(buf + idx);
dprintk(DBGLVL_API, " pathid = 0x%x\n",
pathhdr->pathid);
currpath = pathhdr->pathid;
@@ -136,7 +948,7 @@ int saa7164_api_dump_subdevs(struct saa7164_dev *dev, u8 *buf, int len)
case VC_INPUT_TERMINAL:
dprintk(DBGLVL_API, " VC_INPUT_TERMINAL\n");
anttermhdr =
- (tmComResAntTermDescrHeader_t *)(buf + idx);
+ (struct tmComResAntTermDescrHeader *)(buf + idx);
dprintk(DBGLVL_API, " terminalid = 0x%x\n",
anttermhdr->terminalid);
dprintk(DBGLVL_API, " terminaltype = 0x%x\n",
@@ -179,7 +991,7 @@ int saa7164_api_dump_subdevs(struct saa7164_dev *dev, u8 *buf, int len)
case VC_OUTPUT_TERMINAL:
dprintk(DBGLVL_API, " VC_OUTPUT_TERMINAL\n");
vcoutputtermhdr =
- (tmComResDMATermDescrHeader_t *)(buf + idx);
+ (struct tmComResDMATermDescrHeader *)(buf + idx);
dprintk(DBGLVL_API, " unitid = 0x%x\n",
vcoutputtermhdr->unitid);
dprintk(DBGLVL_API, " terminaltype = 0x%x\n",
@@ -233,32 +1045,49 @@ int saa7164_api_dump_subdevs(struct saa7164_dev *dev, u8 *buf, int len)
dprintk(DBGLVL_API, " numformats = 0x%x\n",
vcoutputtermhdr->numformats);
- t = (tmComResDescrHeader_t *)
- ((tmComResDMATermDescrHeader_t *)(buf + idx));
+ t = (struct tmComResDescrHeader *)
+ ((struct tmComResDMATermDescrHeader *)(buf + idx));
next_offset = idx + (vcoutputtermhdr->len);
for (i = 0; i < vcoutputtermhdr->numformats; i++) {
- t = (tmComResDescrHeader_t *)
+ t = (struct tmComResDescrHeader *)
(buf + next_offset);
switch (t->subtype) {
case VS_FORMAT_MPEG2TS:
tsfmt =
- (tmComResTSFormatDescrHeader_t *)t;
+ (struct tmComResTSFormatDescrHeader *)t;
if (currpath == 1)
- port = &dev->ts1;
+ tsport = &dev->ports[SAA7164_PORT_TS1];
else
- port = &dev->ts2;
- memcpy(&port->hwcfg, vcoutputtermhdr,
+ tsport = &dev->ports[SAA7164_PORT_TS2];
+ memcpy(&tsport->hwcfg, vcoutputtermhdr,
sizeof(*vcoutputtermhdr));
saa7164_api_configure_port_mpeg2ts(dev,
- port, tsfmt);
+ tsport, tsfmt);
break;
case VS_FORMAT_MPEG2PS:
- dprintk(DBGLVL_API,
- " = VS_FORMAT_MPEG2PS\n");
+ psfmt =
+ (struct tmComResPSFormatDescrHeader *)t;
+ if (currpath == 1)
+ encport = &dev->ports[SAA7164_PORT_ENC1];
+ else
+ encport = &dev->ports[SAA7164_PORT_ENC2];
+ memcpy(&encport->hwcfg, vcoutputtermhdr,
+ sizeof(*vcoutputtermhdr));
+ saa7164_api_configure_port_mpeg2ps(dev,
+ encport, psfmt);
break;
case VS_FORMAT_VBI:
- dprintk(DBGLVL_API,
- " = VS_FORMAT_VBI\n");
+ vbifmt =
+ (struct tmComResVBIFormatDescrHeader *)t;
+ if (currpath == 1)
+ vbiport = &dev->ports[SAA7164_PORT_VBI1];
+ else
+ vbiport = &dev->ports[SAA7164_PORT_VBI2];
+ memcpy(&vbiport->hwcfg, vcoutputtermhdr,
+ sizeof(*vcoutputtermhdr));
+ memcpy(&vbiport->vbi_fmt_ntsc, vbifmt, sizeof(*vbifmt));
+ saa7164_api_configure_port_vbi(dev,
+ vbiport);
break;
case VS_FORMAT_RDS:
dprintk(DBGLVL_API,
@@ -284,7 +1113,7 @@ int saa7164_api_dump_subdevs(struct saa7164_dev *dev, u8 *buf, int len)
case TUNER_UNIT:
dprintk(DBGLVL_API, " TUNER_UNIT\n");
tunerunithdr =
- (tmComResTunerDescrHeader_t *)(buf + idx);
+ (struct tmComResTunerDescrHeader *)(buf + idx);
dprintk(DBGLVL_API, " unitid = 0x%x\n",
tunerunithdr->unitid);
dprintk(DBGLVL_API, " sourceid = 0x%x\n",
@@ -297,22 +1126,84 @@ int saa7164_api_dump_subdevs(struct saa7164_dev *dev, u8 *buf, int len)
tunerunithdr->controlsize);
dprintk(DBGLVL_API, " controls = 0x%x\n",
tunerunithdr->controls);
+
+ if (tunerunithdr->unitid == tunerunithdr->iunit) {
+ if (currpath == 1)
+ encport = &dev->ports[SAA7164_PORT_ENC1];
+ else
+ encport = &dev->ports[SAA7164_PORT_ENC2];
+ memcpy(&encport->tunerunit, tunerunithdr,
+ sizeof(struct tmComResTunerDescrHeader));
+ dprintk(DBGLVL_API, " (becomes dev->enc[%d] tuner)\n", encport->nr);
+ }
break;
case VC_SELECTOR_UNIT:
+ psel = (struct tmComResSelDescrHeader *)(buf + idx);
dprintk(DBGLVL_API, " VC_SELECTOR_UNIT\n");
+ dprintk(DBGLVL_API, " unitid = 0x%x\n",
+ psel->unitid);
+ dprintk(DBGLVL_API, " nrinpins = 0x%x\n",
+ psel->nrinpins);
+ dprintk(DBGLVL_API, " sourceid = 0x%x\n",
+ psel->sourceid);
break;
case VC_PROCESSING_UNIT:
+ pdh = (struct tmComResProcDescrHeader *)(buf + idx);
dprintk(DBGLVL_API, " VC_PROCESSING_UNIT\n");
+ dprintk(DBGLVL_API, " unitid = 0x%x\n",
+ pdh->unitid);
+ dprintk(DBGLVL_API, " sourceid = 0x%x\n",
+ pdh->sourceid);
+ dprintk(DBGLVL_API, " controlsize = 0x%x\n",
+ pdh->controlsize);
+ if (pdh->controlsize == 0x04) {
+ if (currpath == 1)
+ encport = &dev->ports[SAA7164_PORT_ENC1];
+ else
+ encport = &dev->ports[SAA7164_PORT_ENC2];
+ memcpy(&encport->vidproc, pdh,
+ sizeof(struct tmComResProcDescrHeader));
+ dprintk(DBGLVL_API, " (becomes dev->enc[%d])\n", encport->nr);
+ }
break;
case FEATURE_UNIT:
+ afd = (struct tmComResAFeatureDescrHeader *)(buf + idx);
dprintk(DBGLVL_API, " FEATURE_UNIT\n");
+ dprintk(DBGLVL_API, " unitid = 0x%x\n",
+ afd->unitid);
+ dprintk(DBGLVL_API, " sourceid = 0x%x\n",
+ afd->sourceid);
+ dprintk(DBGLVL_API, " controlsize = 0x%x\n",
+ afd->controlsize);
+ if (currpath == 1)
+ encport = &dev->ports[SAA7164_PORT_ENC1];
+ else
+ encport = &dev->ports[SAA7164_PORT_ENC2];
+ memcpy(&encport->audfeat, afd,
+ sizeof(struct tmComResAFeatureDescrHeader));
+ dprintk(DBGLVL_API, " (becomes dev->enc[%d])\n", encport->nr);
break;
case ENCODER_UNIT:
+ edh = (struct tmComResEncoderDescrHeader *)(buf + idx);
dprintk(DBGLVL_API, " ENCODER_UNIT\n");
+ dprintk(DBGLVL_API, " subtype = 0x%x\n", edh->subtype);
+ dprintk(DBGLVL_API, " unitid = 0x%x\n", edh->unitid);
+ dprintk(DBGLVL_API, " vsourceid = 0x%x\n", edh->vsourceid);
+ dprintk(DBGLVL_API, " asourceid = 0x%x\n", edh->asourceid);
+ dprintk(DBGLVL_API, " iunit = 0x%x\n", edh->iunit);
+ if (edh->iunit == edh->unitid) {
+ if (currpath == 1)
+ encport = &dev->ports[SAA7164_PORT_ENC1];
+ else
+ encport = &dev->ports[SAA7164_PORT_ENC2];
+ memcpy(&encport->encunit, edh,
+ sizeof(struct tmComResEncoderDescrHeader));
+ dprintk(DBGLVL_API, " (becomes dev->enc[%d])\n", encport->nr);
+ }
break;
case EXTENSION_UNIT:
dprintk(DBGLVL_API, " EXTENSION_UNIT\n");
- exthdr = (tmComResExtDevDescrHeader_t *)(buf + idx);
+ exthdr = (struct tmComResExtDevDescrHeader *)(buf + idx);
dprintk(DBGLVL_API, " unitid = 0x%x\n",
exthdr->unitid);
dprintk(DBGLVL_API, " deviceid = 0x%x\n",
@@ -364,6 +1255,15 @@ int saa7164_api_dump_subdevs(struct saa7164_dev *dev, u8 *buf, int len)
exthdr->numgpiogroups);
dprintk(DBGLVL_API, " controlsize = 0x%x\n",
exthdr->controlsize);
+ if (exthdr->devicetype & 0x80) {
+ if (currpath == 1)
+ encport = &dev->ports[SAA7164_PORT_ENC1];
+ else
+ encport = &dev->ports[SAA7164_PORT_ENC2];
+ memcpy(&encport->ifunit, exthdr,
+ sizeof(struct tmComResExtDevDescrHeader));
+ dprintk(DBGLVL_API, " (becomes dev->enc[%d])\n", encport->nr);
+ }
break;
case PVC_INFRARED_UNIT:
dprintk(DBGLVL_API, " PVC_INFRARED_UNIT\n");
@@ -560,12 +1460,11 @@ int saa7164_api_i2c_write(struct saa7164_i2c *bus, u8 addr, u32 datalen,
return ret == SAA_OK ? 0 : -EIO;
}
-
int saa7164_api_modify_gpio(struct saa7164_dev *dev, u8 unitid,
u8 pin, u8 state)
{
int ret;
- tmComResGPIO_t t;
+ struct tmComResGPIO t;
dprintk(DBGLVL_API, "%s(0x%x, %d, %d)\n",
__func__, unitid, pin, state);
@@ -597,5 +1496,3 @@ int saa7164_api_clear_gpiobit(struct saa7164_dev *dev, u8 unitid,
return saa7164_api_modify_gpio(dev, unitid, pin, 0);
}
-
-
diff --git a/drivers/media/video/saa7164/saa7164-buffer.c b/drivers/media/video/saa7164/saa7164-buffer.c
index ddd25d32723d..7230912acc7d 100644
--- a/drivers/media/video/saa7164/saa7164-buffer.c
+++ b/drivers/media/video/saa7164/saa7164-buffer.c
@@ -1,7 +1,7 @@
/*
* Driver for the NXP SAA7164 PCIe bridge
*
- * Copyright (c) 2009 Steven Toth <stoth@kernellabs.com>
+ * Copyright (c) 2010 Steven Toth <stoth@kernellabs.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -66,12 +66,33 @@
| etc
*/
+void saa7164_buffer_display(struct saa7164_buffer *buf)
+{
+ struct saa7164_dev *dev = buf->port->dev;
+ int i;
+
+ dprintk(DBGLVL_BUF, "%s() buffer @ 0x%p nr=%d\n",
+ __func__, buf, buf->idx);
+ dprintk(DBGLVL_BUF, " pci_cpu @ 0x%p dma @ 0x%08llx len = 0x%x\n",
+ buf->cpu, (long long)buf->dma, buf->pci_size);
+ dprintk(DBGLVL_BUF, " pt_cpu @ 0x%p pt_dma @ 0x%08llx len = 0x%x\n",
+ buf->pt_cpu, (long long)buf->pt_dma, buf->pt_size);
+
+ /* Format the Page Table Entries to point into the data buffer */
+ for (i = 0 ; i < SAA7164_PT_ENTRIES; i++) {
+
+ dprintk(DBGLVL_BUF, " pt[%02d] = 0x%p -> 0x%llx\n",
+ i, buf->pt_cpu, (u64)*(buf->pt_cpu));
+
+ }
+}
/* Allocate a new buffer structure and associated PCI space in bytes.
* len must be a multiple of sizeof(u64)
*/
-struct saa7164_buffer *saa7164_buffer_alloc(struct saa7164_tsport *port,
+struct saa7164_buffer *saa7164_buffer_alloc(struct saa7164_port *port,
u32 len)
{
+ struct tmHWStreamParameters *params = &port->hw_streamingparams;
struct saa7164_buffer *buf = 0;
struct saa7164_dev *dev = port->dev;
int i;
@@ -87,8 +108,12 @@ struct saa7164_buffer *saa7164_buffer_alloc(struct saa7164_tsport *port,
goto ret;
}
+ buf->idx = -1;
buf->port = port;
buf->flags = SAA7164_BUFFER_FREE;
+ buf->pos = 0;
+ buf->actual_size = params->pitch * params->numberoflines;
+ buf->crc = 0;
/* TODO: arg len is being ignored */
buf->pci_size = SAA7164_PT_ENTRIES * 0x1000;
buf->pt_size = (SAA7164_PT_ENTRIES * sizeof(u64)) + 0x1000;
@@ -105,19 +130,23 @@ struct saa7164_buffer *saa7164_buffer_alloc(struct saa7164_tsport *port,
goto fail2;
/* init the buffers to a known pattern, easier during debugging */
- memset(buf->cpu, 0xff, buf->pci_size);
- memset(buf->pt_cpu, 0xff, buf->pt_size);
+ memset_io(buf->cpu, 0xff, buf->pci_size);
+ buf->crc = crc32(0, buf->cpu, buf->actual_size);
+ memset_io(buf->pt_cpu, 0xff, buf->pt_size);
- dprintk(DBGLVL_BUF, "%s() allocated buffer @ 0x%p\n", __func__, buf);
+ dprintk(DBGLVL_BUF, "%s() allocated buffer @ 0x%p (%d pageptrs)\n",
+ __func__, buf, params->numpagetables);
dprintk(DBGLVL_BUF, " pci_cpu @ 0x%p dma @ 0x%08lx len = 0x%x\n",
buf->cpu, (long)buf->dma, buf->pci_size);
dprintk(DBGLVL_BUF, " pt_cpu @ 0x%p pt_dma @ 0x%08lx len = 0x%x\n",
buf->pt_cpu, (long)buf->pt_dma, buf->pt_size);
/* Format the Page Table Entries to point into the data buffer */
- for (i = 0 ; i < SAA7164_PT_ENTRIES; i++) {
+ for (i = 0 ; i < params->numpagetables; i++) {
*(buf->pt_cpu + i) = buf->dma + (i * 0x1000); /* TODO */
+ dprintk(DBGLVL_BUF, " pt[%02d] = 0x%p -> 0x%llx\n",
+ i, buf->pt_cpu, (u64)*(buf->pt_cpu));
}
@@ -133,26 +162,163 @@ ret:
return buf;
}
-int saa7164_buffer_dealloc(struct saa7164_tsport *port,
- struct saa7164_buffer *buf)
+int saa7164_buffer_dealloc(struct saa7164_buffer *buf)
{
struct saa7164_dev *dev;
- if (!buf || !port)
+ if (!buf || !buf->port)
return SAA_ERR_BAD_PARAMETER;
- dev = port->dev;
+ dev = buf->port->dev;
- dprintk(DBGLVL_BUF, "%s() deallocating buffer @ 0x%p\n", __func__, buf);
+ dprintk(DBGLVL_BUF, "%s() deallocating buffer @ 0x%p\n",
+ __func__, buf);
if (buf->flags != SAA7164_BUFFER_FREE)
log_warn(" freeing a non-free buffer\n");
- pci_free_consistent(port->dev->pci, buf->pci_size, buf->cpu, buf->dma);
- pci_free_consistent(port->dev->pci, buf->pt_size, buf->pt_cpu,
- buf->pt_dma);
+ pci_free_consistent(dev->pci, buf->pci_size, buf->cpu, buf->dma);
+ pci_free_consistent(dev->pci, buf->pt_size, buf->pt_cpu, buf->pt_dma);
kfree(buf);
return SAA_OK;
}
+int saa7164_buffer_zero_offsets(struct saa7164_port *port, int i)
+{
+ struct saa7164_dev *dev = port->dev;
+
+ if ((i < 0) || (i >= port->hwcfg.buffercount))
+ return -EINVAL;
+
+ dprintk(DBGLVL_BUF, "%s(idx = %d)\n", __func__, i);
+
+ saa7164_writel(port->bufoffset + (sizeof(u32) * i), 0);
+
+ return 0;
+}
+
+/* Write a buffer into the hardware */
+int saa7164_buffer_activate(struct saa7164_buffer *buf, int i)
+{
+ struct saa7164_port *port = buf->port;
+ struct saa7164_dev *dev = port->dev;
+
+ if ((i < 0) || (i >= port->hwcfg.buffercount))
+ return -EINVAL;
+
+ dprintk(DBGLVL_BUF, "%s(idx = %d)\n", __func__, i);
+
+ buf->idx = i; /* Note of which buffer list index position we occupy */
+ buf->flags = SAA7164_BUFFER_BUSY;
+ buf->pos = 0;
+
+ /* TODO: Review this in light of 32v64 assignments */
+ saa7164_writel(port->bufoffset + (sizeof(u32) * i), 0);
+ saa7164_writel(port->bufptr32h + ((sizeof(u32) * 2) * i), buf->pt_dma);
+ saa7164_writel(port->bufptr32l + ((sizeof(u32) * 2) * i), 0);
+
+ dprintk(DBGLVL_BUF, " buf[%d] offset 0x%llx (0x%x) "
+ "buf 0x%llx/%llx (0x%x/%x) nr=%d\n",
+ buf->idx,
+ (u64)port->bufoffset + (i * sizeof(u32)),
+ saa7164_readl(port->bufoffset + (sizeof(u32) * i)),
+ (u64)port->bufptr32h + ((sizeof(u32) * 2) * i),
+ (u64)port->bufptr32l + ((sizeof(u32) * 2) * i),
+ saa7164_readl(port->bufptr32h + ((sizeof(u32) * i) * 2)),
+ saa7164_readl(port->bufptr32l + ((sizeof(u32) * i) * 2)),
+ buf->idx);
+
+ return 0;
+}
+
+int saa7164_buffer_cfg_port(struct saa7164_port *port)
+{
+ struct tmHWStreamParameters *params = &port->hw_streamingparams;
+ struct saa7164_dev *dev = port->dev;
+ struct saa7164_buffer *buf;
+ struct list_head *c, *n;
+ int i = 0;
+
+ dprintk(DBGLVL_BUF, "%s(port=%d)\n", __func__, port->nr);
+
+ saa7164_writel(port->bufcounter, 0);
+ saa7164_writel(port->pitch, params->pitch);
+ saa7164_writel(port->bufsize, params->pitch * params->numberoflines);
+
+ dprintk(DBGLVL_BUF, " configured:\n");
+ dprintk(DBGLVL_BUF, " lmmio 0x%p\n", dev->lmmio);
+ dprintk(DBGLVL_BUF, " bufcounter 0x%x = 0x%x\n", port->bufcounter,
+ saa7164_readl(port->bufcounter));
+
+ dprintk(DBGLVL_BUF, " pitch 0x%x = %d\n", port->pitch,
+ saa7164_readl(port->pitch));
+
+ dprintk(DBGLVL_BUF, " bufsize 0x%x = %d\n", port->bufsize,
+ saa7164_readl(port->bufsize));
+
+ dprintk(DBGLVL_BUF, " buffercount = %d\n", port->hwcfg.buffercount);
+ dprintk(DBGLVL_BUF, " bufoffset = 0x%x\n", port->bufoffset);
+ dprintk(DBGLVL_BUF, " bufptr32h = 0x%x\n", port->bufptr32h);
+ dprintk(DBGLVL_BUF, " bufptr32l = 0x%x\n", port->bufptr32l);
+
+ /* Poke the buffers and offsets into PCI space */
+ mutex_lock(&port->dmaqueue_lock);
+ list_for_each_safe(c, n, &port->dmaqueue.list) {
+ buf = list_entry(c, struct saa7164_buffer, list);
+
+ if (buf->flags != SAA7164_BUFFER_FREE)
+ BUG();
+
+ /* Place the buffer in the h/w queue */
+ saa7164_buffer_activate(buf, i);
+
+ /* Don't exceed the device maximum # bufs */
+ if (i++ > port->hwcfg.buffercount)
+ BUG();
+
+ }
+ mutex_unlock(&port->dmaqueue_lock);
+
+ return 0;
+}
+
+struct saa7164_user_buffer *saa7164_buffer_alloc_user(struct saa7164_dev *dev, u32 len)
+{
+ struct saa7164_user_buffer *buf;
+
+ buf = kzalloc(sizeof(struct saa7164_user_buffer), GFP_KERNEL);
+ if (buf == 0)
+ return 0;
+
+ buf->data = kzalloc(len, GFP_KERNEL);
+
+ if (buf->data == 0) {
+ kfree(buf);
+ return 0;
+ }
+
+ buf->actual_size = len;
+ buf->pos = 0;
+ buf->crc = 0;
+
+ dprintk(DBGLVL_BUF, "%s() allocated user buffer @ 0x%p\n",
+ __func__, buf);
+
+ return buf;
+}
+
+void saa7164_buffer_dealloc_user(struct saa7164_user_buffer *buf)
+{
+ if (!buf)
+ return;
+
+ if (buf->data) {
+ kfree(buf->data);
+ buf->data = 0;
+ }
+
+ if (buf)
+ kfree(buf);
+}
+
diff --git a/drivers/media/video/saa7164/saa7164-bus.c b/drivers/media/video/saa7164/saa7164-bus.c
index 83a04640a25a..30d5283da41e 100644
--- a/drivers/media/video/saa7164/saa7164-bus.c
+++ b/drivers/media/video/saa7164/saa7164-bus.c
@@ -1,7 +1,7 @@
/*
* Driver for the NXP SAA7164 PCIe bridge
*
- * Copyright (c) 2009 Steven Toth <stoth@kernellabs.com>
+ * Copyright (c) 2010 Steven Toth <stoth@kernellabs.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -26,7 +26,7 @@
*/
int saa7164_bus_setup(struct saa7164_dev *dev)
{
- tmComResBusInfo_t *b = &dev->bus;
+ struct tmComResBusInfo *b = &dev->bus;
mutex_init(&b->lock);
@@ -43,24 +43,18 @@ int saa7164_bus_setup(struct saa7164_dev *dev)
b->m_dwSizeGetRing = SAA_DEVICE_BUFFERBLOCKSIZE;
- b->m_pdwSetWritePos = (u32 *)((u8 *)(dev->bmmio +
- ((u32)dev->intfdesc.BARLocation) + (2 * sizeof(u64))));
+ b->m_dwSetWritePos = ((u32)dev->intfdesc.BARLocation) + (2 * sizeof(u64));
+ b->m_dwSetReadPos = b->m_dwSetWritePos + (1 * sizeof(u32));
- b->m_pdwSetReadPos = (u32 *)((u8 *)b->m_pdwSetWritePos +
- 1 * sizeof(u32));
-
- b->m_pdwGetWritePos = (u32 *)((u8 *)b->m_pdwSetWritePos +
- 2 * sizeof(u32));
-
- b->m_pdwGetReadPos = (u32 *)((u8 *)b->m_pdwSetWritePos +
- 3 * sizeof(u32));
+ b->m_dwGetWritePos = b->m_dwSetWritePos + (2 * sizeof(u32));
+ b->m_dwGetReadPos = b->m_dwSetWritePos + (3 * sizeof(u32));
return 0;
}
void saa7164_bus_dump(struct saa7164_dev *dev)
{
- tmComResBusInfo_t *b = &dev->bus;
+ struct tmComResBusInfo *b = &dev->bus;
dprintk(DBGLVL_BUS, "Dumping the bus structure:\n");
dprintk(DBGLVL_BUS, " .type = %d\n", b->Type);
@@ -71,20 +65,47 @@ void saa7164_bus_dump(struct saa7164_dev *dev)
dprintk(DBGLVL_BUS, " .m_pdwGetRing = 0x%p\n", b->m_pdwGetRing);
dprintk(DBGLVL_BUS, " .m_dwSizeGetRing = 0x%x\n", b->m_dwSizeGetRing);
- dprintk(DBGLVL_BUS, " .m_pdwSetWritePos = 0x%p (0x%08x)\n",
- b->m_pdwSetWritePos, *b->m_pdwSetWritePos);
+ dprintk(DBGLVL_BUS, " .m_dwSetReadPos = 0x%x (0x%08x)\n",
+ b->m_dwSetReadPos, saa7164_readl(b->m_dwSetReadPos));
+
+ dprintk(DBGLVL_BUS, " .m_dwSetWritePos = 0x%x (0x%08x)\n",
+ b->m_dwSetWritePos, saa7164_readl(b->m_dwSetWritePos));
+
+ dprintk(DBGLVL_BUS, " .m_dwGetReadPos = 0x%x (0x%08x)\n",
+ b->m_dwGetReadPos, saa7164_readl(b->m_dwGetReadPos));
+
+ dprintk(DBGLVL_BUS, " .m_dwGetWritePos = 0x%x (0x%08x)\n",
+ b->m_dwGetWritePos, saa7164_readl(b->m_dwGetWritePos));
+
+}
+
+/* Intensionally throw a BUG() if the state of the message bus looks corrupt */
+void saa7164_bus_verify(struct saa7164_dev *dev)
+{
+ struct tmComResBusInfo *b = &dev->bus;
+ int bug = 0;
- dprintk(DBGLVL_BUS, " .m_pdwSetReadPos = 0x%p (0x%08x)\n",
- b->m_pdwSetReadPos, *b->m_pdwSetReadPos);
+ if (saa7164_readl(b->m_dwSetReadPos) > b->m_dwSizeSetRing)
+ bug++;
- dprintk(DBGLVL_BUS, " .m_pdwGetWritePos = 0x%p (0x%08x)\n",
- b->m_pdwGetWritePos, *b->m_pdwGetWritePos);
+ if (saa7164_readl(b->m_dwSetWritePos) > b->m_dwSizeSetRing)
+ bug++;
- dprintk(DBGLVL_BUS, " .m_pdwGetReadPos = 0x%p (0x%08x)\n",
- b->m_pdwGetReadPos, *b->m_pdwGetReadPos);
+ if (saa7164_readl(b->m_dwGetReadPos) > b->m_dwSizeGetRing)
+ bug++;
+
+ if (saa7164_readl(b->m_dwGetWritePos) > b->m_dwSizeGetRing)
+ bug++;
+
+ if (bug) {
+ saa_debug = 0xffff; /* Ensure we get the bus dump */
+ saa7164_bus_dump(dev);
+ saa_debug = 1024; /* Ensure we get the bus dump */
+ BUG();
+ }
}
-void saa7164_bus_dumpmsg(struct saa7164_dev *dev, tmComResInfo_t* m, void *buf)
+void saa7164_bus_dumpmsg(struct saa7164_dev *dev, struct tmComResInfo* m, void *buf)
{
dprintk(DBGLVL_BUS, "Dumping msg structure:\n");
dprintk(DBGLVL_BUS, " .id = %d\n", m->id);
@@ -100,7 +121,7 @@ void saa7164_bus_dumpmsg(struct saa7164_dev *dev, tmComResInfo_t* m, void *buf)
/*
* Places a command or a response on the bus. The implementation does not
* know if it is a command or a response it just places the data on the
- * bus depending on the bus information given in the tmComResBusInfo_t
+ * bus depending on the bus information given in the struct tmComResBusInfo
* structure. If the command or response does not fit into the bus ring
* buffer it will be refused.
*
@@ -108,10 +129,10 @@ void saa7164_bus_dumpmsg(struct saa7164_dev *dev, tmComResInfo_t* m, void *buf)
* SAA_OK The function executed successfully.
* < 0 One or more members are not initialized.
*/
-int saa7164_bus_set(struct saa7164_dev *dev, tmComResInfo_t* msg, void *buf)
+int saa7164_bus_set(struct saa7164_dev *dev, struct tmComResInfo* msg, void *buf)
{
- tmComResBusInfo_t *bus = &dev->bus;
- u32 bytes_to_write, read_distance, timeout, curr_srp, curr_swp;
+ struct tmComResBusInfo *bus = &dev->bus;
+ u32 bytes_to_write, free_write_space, timeout, curr_srp, curr_swp;
u32 new_swp, space_rem;
int ret = SAA_ERR_BAD_PARAMETER;
@@ -122,6 +143,8 @@ int saa7164_bus_set(struct saa7164_dev *dev, tmComResInfo_t* msg, void *buf)
dprintk(DBGLVL_BUS, "%s()\n", __func__);
+ saa7164_bus_verify(dev);
+
msg->size = cpu_to_le16(msg->size);
msg->command = cpu_to_le16(msg->command);
msg->controlselector = cpu_to_le16(msg->controlselector);
@@ -141,30 +164,30 @@ int saa7164_bus_set(struct saa7164_dev *dev, tmComResInfo_t* msg, void *buf)
mutex_lock(&bus->lock);
bytes_to_write = sizeof(*msg) + msg->size;
- read_distance = 0;
+ free_write_space = 0;
timeout = SAA_BUS_TIMEOUT;
- curr_srp = le32_to_cpu(*bus->m_pdwSetReadPos);
- curr_swp = le32_to_cpu(*bus->m_pdwSetWritePos);
+ curr_srp = le32_to_cpu(saa7164_readl(bus->m_dwSetReadPos));
+ curr_swp = le32_to_cpu(saa7164_readl(bus->m_dwSetWritePos));
/* Deal with ring wrapping issues */
if (curr_srp > curr_swp)
- /* The ring has not wrapped yet */
- read_distance = curr_srp - curr_swp;
- else
/* Deal with the wrapped ring */
- read_distance = (curr_srp + bus->m_dwSizeSetRing) - curr_swp;
+ free_write_space = curr_srp - curr_swp;
+ else
+ /* The ring has not wrapped yet */
+ free_write_space = (curr_srp + bus->m_dwSizeSetRing) - curr_swp;
dprintk(DBGLVL_BUS, "%s() bytes_to_write = %d\n", __func__,
bytes_to_write);
- dprintk(DBGLVL_BUS, "%s() read_distance = %d\n", __func__,
- read_distance);
+ dprintk(DBGLVL_BUS, "%s() free_write_space = %d\n", __func__,
+ free_write_space);
dprintk(DBGLVL_BUS, "%s() curr_srp = %x\n", __func__, curr_srp);
dprintk(DBGLVL_BUS, "%s() curr_swp = %x\n", __func__, curr_swp);
/* Process the msg and write the content onto the bus */
- while (bytes_to_write >= read_distance) {
+ while (bytes_to_write >= free_write_space) {
if (timeout-- == 0) {
printk(KERN_ERR "%s() bus timeout\n", __func__);
@@ -177,15 +200,15 @@ int saa7164_bus_set(struct saa7164_dev *dev, tmComResInfo_t* msg, void *buf)
mdelay(1);
/* Check the space usage again */
- curr_srp = le32_to_cpu(*bus->m_pdwSetReadPos);
+ curr_srp = le32_to_cpu(saa7164_readl(bus->m_dwSetReadPos));
/* Deal with ring wrapping issues */
if (curr_srp > curr_swp)
- /* Read didn't wrap around the buffer */
- read_distance = curr_srp - curr_swp;
- else
/* Deal with the wrapped ring */
- read_distance = (curr_srp + bus->m_dwSizeSetRing) -
+ free_write_space = curr_srp - curr_swp;
+ else
+ /* Read didn't wrap around the buffer */
+ free_write_space = (curr_srp + bus->m_dwSizeSetRing) -
curr_swp;
}
@@ -257,37 +280,37 @@ int saa7164_bus_set(struct saa7164_dev *dev, tmComResInfo_t* msg, void *buf)
dprintk(DBGLVL_BUS, "%s() new_swp = %x\n", __func__, new_swp);
- /* TODO: Convert all of the direct PCI writes into
- * saa7164_writel/b calls for consistency.
- */
-
/* Update the bus write position */
- *bus->m_pdwSetWritePos = cpu_to_le32(new_swp);
+ saa7164_writel(bus->m_dwSetWritePos, cpu_to_le32(new_swp));
ret = SAA_OK;
out:
+ saa7164_bus_dump(dev);
mutex_unlock(&bus->lock);
+ saa7164_bus_verify(dev);
return ret;
}
/*
* Receive a command or a response from the bus. The implementation does not
* know if it is a command or a response it simply dequeues the data,
- * depending on the bus information given in the tmComResBusInfo_t structure.
+ * depending on the bus information given in the struct tmComResBusInfo structure.
*
* Return Value:
* 0 The function executed successfully.
* < 0 One or more members are not initialized.
*/
-int saa7164_bus_get(struct saa7164_dev *dev, tmComResInfo_t* msg, void *buf,
+int saa7164_bus_get(struct saa7164_dev *dev, struct tmComResInfo* msg, void *buf,
int peekonly)
{
- tmComResBusInfo_t *bus = &dev->bus;
+ struct tmComResBusInfo *bus = &dev->bus;
u32 bytes_to_read, write_distance, curr_grp, curr_gwp,
new_grp, buf_size, space_rem;
- tmComResInfo_t msg_tmp;
+ struct tmComResInfo msg_tmp;
int ret = SAA_ERR_BAD_PARAMETER;
+ saa7164_bus_verify(dev);
+
if (msg == 0)
return ret;
@@ -309,11 +332,10 @@ int saa7164_bus_get(struct saa7164_dev *dev, tmComResInfo_t* msg, void *buf,
/* Peek the bus to see if a msg exists, if it's not what we're expecting
* then return cleanly else read the message from the bus.
*/
- curr_gwp = le32_to_cpu(*bus->m_pdwGetWritePos);
- curr_grp = le32_to_cpu(*bus->m_pdwGetReadPos);
+ curr_gwp = le32_to_cpu(saa7164_readl(bus->m_dwGetWritePos));
+ curr_grp = le32_to_cpu(saa7164_readl(bus->m_dwGetReadPos));
if (curr_gwp == curr_grp) {
- dprintk(DBGLVL_BUS, "%s() No message on the bus\n", __func__);
ret = SAA_ERR_EMPTY;
goto out;
}
@@ -434,7 +456,7 @@ int saa7164_bus_get(struct saa7164_dev *dev, tmComResInfo_t* msg, void *buf,
}
/* Update the read positions, adjusting the ring */
- *bus->m_pdwGetReadPos = cpu_to_le32(new_grp);
+ saa7164_writel(bus->m_dwGetReadPos, cpu_to_le32(new_grp));
peekout:
msg->size = le16_to_cpu(msg->size);
@@ -443,6 +465,7 @@ peekout:
ret = SAA_OK;
out:
mutex_unlock(&bus->lock);
+ saa7164_bus_verify(dev);
return ret;
}
diff --git a/drivers/media/video/saa7164/saa7164-cards.c b/drivers/media/video/saa7164/saa7164-cards.c
index a3c299405f46..4cb634e952a6 100644
--- a/drivers/media/video/saa7164/saa7164-cards.c
+++ b/drivers/media/video/saa7164/saa7164-cards.c
@@ -1,7 +1,7 @@
/*
* Driver for the NXP SAA7164 PCIe bridge
*
- * Copyright (c) 2009 Steven Toth <stoth@kernellabs.com>
+ * Copyright (c) 2010 Steven Toth <stoth@kernellabs.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -55,6 +55,10 @@ struct saa7164_board saa7164_boards[] = {
.name = "Hauppauge WinTV-HVR2200",
.porta = SAA7164_MPEG_DVB,
.portb = SAA7164_MPEG_DVB,
+ .portc = SAA7164_MPEG_ENCODER,
+ .portd = SAA7164_MPEG_ENCODER,
+ .porte = SAA7164_MPEG_VBI,
+ .portf = SAA7164_MPEG_VBI,
.chiprev = SAA7164_CHIP_REV3,
.unit = {{
.id = 0x1d,
@@ -97,6 +101,10 @@ struct saa7164_board saa7164_boards[] = {
.name = "Hauppauge WinTV-HVR2200",
.porta = SAA7164_MPEG_DVB,
.portb = SAA7164_MPEG_DVB,
+ .portc = SAA7164_MPEG_ENCODER,
+ .portd = SAA7164_MPEG_ENCODER,
+ .porte = SAA7164_MPEG_VBI,
+ .portf = SAA7164_MPEG_VBI,
.chiprev = SAA7164_CHIP_REV2,
.unit = {{
.id = 0x06,
@@ -139,6 +147,10 @@ struct saa7164_board saa7164_boards[] = {
.name = "Hauppauge WinTV-HVR2200",
.porta = SAA7164_MPEG_DVB,
.portb = SAA7164_MPEG_DVB,
+ .portc = SAA7164_MPEG_ENCODER,
+ .portd = SAA7164_MPEG_ENCODER,
+ .porte = SAA7164_MPEG_VBI,
+ .portf = SAA7164_MPEG_VBI,
.chiprev = SAA7164_CHIP_REV2,
.unit = {{
.id = 0x1d,
@@ -195,6 +207,12 @@ struct saa7164_board saa7164_boards[] = {
.name = "Hauppauge WinTV-HVR2250",
.porta = SAA7164_MPEG_DVB,
.portb = SAA7164_MPEG_DVB,
+ .portc = SAA7164_MPEG_ENCODER,
+ .portd = SAA7164_MPEG_ENCODER,
+ .portc = SAA7164_MPEG_ENCODER,
+ .portd = SAA7164_MPEG_ENCODER,
+ .porte = SAA7164_MPEG_VBI,
+ .portf = SAA7164_MPEG_VBI,
.chiprev = SAA7164_CHIP_REV3,
.unit = {{
.id = 0x22,
@@ -251,6 +269,12 @@ struct saa7164_board saa7164_boards[] = {
.name = "Hauppauge WinTV-HVR2250",
.porta = SAA7164_MPEG_DVB,
.portb = SAA7164_MPEG_DVB,
+ .portc = SAA7164_MPEG_ENCODER,
+ .portd = SAA7164_MPEG_ENCODER,
+ .porte = SAA7164_MPEG_VBI,
+ .portf = SAA7164_MPEG_VBI,
+ .porte = SAA7164_MPEG_VBI,
+ .portf = SAA7164_MPEG_VBI,
.chiprev = SAA7164_CHIP_REV3,
.unit = {{
.id = 0x28,
@@ -307,6 +331,10 @@ struct saa7164_board saa7164_boards[] = {
.name = "Hauppauge WinTV-HVR2250",
.porta = SAA7164_MPEG_DVB,
.portb = SAA7164_MPEG_DVB,
+ .portc = SAA7164_MPEG_ENCODER,
+ .portd = SAA7164_MPEG_ENCODER,
+ .porte = SAA7164_MPEG_VBI,
+ .portf = SAA7164_MPEG_VBI,
.chiprev = SAA7164_CHIP_REV3,
.unit = {{
.id = 0x26,
@@ -437,8 +465,6 @@ void saa7164_card_list(struct saa7164_dev *dev)
void saa7164_gpio_setup(struct saa7164_dev *dev)
{
-
-
switch (dev->board) {
case SAA7164_BOARD_HAUPPAUGE_HVR2200:
case SAA7164_BOARD_HAUPPAUGE_HVR2200_2:
@@ -462,7 +488,6 @@ void saa7164_gpio_setup(struct saa7164_dev *dev)
saa7164_api_set_gpiobit(dev, PCIEBRIDGE_UNITID, 3);
break;
}
-
}
static void hauppauge_eeprom(struct saa7164_dev *dev, u8 *eeprom_data)
diff --git a/drivers/media/video/saa7164/saa7164-cmd.c b/drivers/media/video/saa7164/saa7164-cmd.c
index 9c1d3ac43869..301a9e302f45 100644
--- a/drivers/media/video/saa7164/saa7164-cmd.c
+++ b/drivers/media/video/saa7164/saa7164-cmd.c
@@ -1,7 +1,7 @@
/*
* Driver for the NXP SAA7164 PCIe bridge
*
- * Copyright (c) 2009 Steven Toth <stoth@kernellabs.com>
+ * Copyright (c) 2010 Steven Toth <stoth@kernellabs.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -82,16 +82,17 @@ u32 saa7164_cmd_timeout_get(struct saa7164_dev *dev, u8 seqno)
* -bus/c running buffer. */
int saa7164_irq_dequeue(struct saa7164_dev *dev)
{
- int ret = SAA_OK;
+ int ret = SAA_OK, i = 0;
u32 timeout;
wait_queue_head_t *q = 0;
+ u8 tmp[512];
dprintk(DBGLVL_CMD, "%s()\n", __func__);
/* While any outstand message on the bus exists... */
do {
/* Peek the msg bus */
- tmComResInfo_t tRsp = { 0, 0, 0, 0, 0, 0 };
+ struct tmComResInfo tRsp = { 0, 0, 0, 0, 0, 0 };
ret = saa7164_bus_get(dev, &tRsp, NULL, 1);
if (ret != SAA_OK)
break;
@@ -109,8 +110,22 @@ int saa7164_irq_dequeue(struct saa7164_dev *dev)
printk(KERN_ERR
"%s() found timed out command on the bus\n",
__func__);
+
+ /* Clean the bus */
+ ret = saa7164_bus_get(dev, &tRsp, &tmp, 0);
+ printk(KERN_ERR "%s() ret = %x\n", __func__, ret);
+ if (ret == SAA_ERR_EMPTY)
+ /* Someone else already fetched the response */
+ return SAA_OK;
+
+ if (ret != SAA_OK)
+ return ret;
}
- } while (0);
+
+ /* It's unlikely to have more than 4 or 5 pending messages, ensure we exit
+ * at some point regardles.
+ */
+ } while (i++ < 32);
return ret;
}
@@ -128,7 +143,7 @@ int saa7164_cmd_dequeue(struct saa7164_dev *dev)
while (loop) {
- tmComResInfo_t tRsp = { 0, 0, 0, 0, 0, 0 };
+ struct tmComResInfo tRsp = { 0, 0, 0, 0, 0, 0 };
ret = saa7164_bus_get(dev, &tRsp, NULL, 1);
if (ret == SAA_ERR_EMPTY)
return SAA_OK;
@@ -171,9 +186,9 @@ int saa7164_cmd_dequeue(struct saa7164_dev *dev)
return SAA_OK;
}
-int saa7164_cmd_set(struct saa7164_dev *dev, tmComResInfo_t* msg, void *buf)
+int saa7164_cmd_set(struct saa7164_dev *dev, struct tmComResInfo* msg, void *buf)
{
- tmComResBusInfo_t *bus = &dev->bus;
+ struct tmComResBusInfo *bus = &dev->bus;
u8 cmd_sent;
u16 size, idx;
u32 cmds;
@@ -324,11 +339,11 @@ void saa7164_cmd_signal(struct saa7164_dev *dev, u8 seqno)
mutex_unlock(&dev->lock);
}
-int saa7164_cmd_send(struct saa7164_dev *dev, u8 id, tmComResCmd_t command,
+int saa7164_cmd_send(struct saa7164_dev *dev, u8 id, enum tmComResCmd command,
u16 controlselector, u16 size, void *buf)
{
- tmComResInfo_t command_t, *pcommand_t;
- tmComResInfo_t response_t, *presponse_t;
+ struct tmComResInfo command_t, *pcommand_t;
+ struct tmComResInfo response_t, *presponse_t;
u8 errdata[256];
u16 resp_dsize;
u16 data_recd;
diff --git a/drivers/media/video/saa7164/saa7164-core.c b/drivers/media/video/saa7164/saa7164-core.c
index e6aa0fbd1e91..e1bac5051460 100644
--- a/drivers/media/video/saa7164/saa7164-core.c
+++ b/drivers/media/video/saa7164/saa7164-core.c
@@ -1,7 +1,7 @@
/*
* Driver for the NXP SAA7164 PCIe bridge
*
- * Copyright (c) 2009 Steven Toth <stoth@kernellabs.com>
+ * Copyright (c) 2010 Steven Toth <stoth@kernellabs.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -30,6 +30,9 @@
#include <linux/delay.h>
#include <asm/div64.h>
+#ifdef CONFIG_PROC_FS
+#include <linux/proc_fs.h>
+#endif
#include "saa7164.h"
MODULE_DESCRIPTION("Driver for NXP SAA7164 based TV cards");
@@ -49,14 +52,38 @@ unsigned int saa_debug;
module_param_named(debug, saa_debug, int, 0644);
MODULE_PARM_DESC(debug, "enable debug messages");
+unsigned int fw_debug;
+module_param(fw_debug, int, 0644);
+MODULE_PARM_DESC(fw_debug, "Firware debug level def:2");
+
+unsigned int encoder_buffers = SAA7164_MAX_ENCODER_BUFFERS;
+module_param(encoder_buffers, int, 0644);
+MODULE_PARM_DESC(encoder_buffers, "Total buffers in read queue 16-512 def:64");
+
+unsigned int vbi_buffers = SAA7164_MAX_VBI_BUFFERS;
+module_param(vbi_buffers, int, 0644);
+MODULE_PARM_DESC(vbi_buffers, "Total buffers in read queue 16-512 def:64");
+
unsigned int waitsecs = 10;
module_param(waitsecs, int, 0644);
-MODULE_PARM_DESC(debug, "timeout on firmware messages");
+MODULE_PARM_DESC(waitsecs, "timeout on firmware messages");
static unsigned int card[] = {[0 ... (SAA7164_MAXBOARDS - 1)] = UNSET };
module_param_array(card, int, NULL, 0444);
MODULE_PARM_DESC(card, "card type");
+unsigned int print_histogram = 64;
+module_param(print_histogram, int, 0644);
+MODULE_PARM_DESC(print_histogram, "print histogram values once");
+
+unsigned int crc_checking = 1;
+module_param(crc_checking, int, 0644);
+MODULE_PARM_DESC(crc_checking, "enable crc sanity checking on buffers");
+
+unsigned int guard_checking = 1;
+module_param(guard_checking, int, 0644);
+MODULE_PARM_DESC(guard_checking, "enable dma sanity checking for buffer overruns");
+
static unsigned int saa7164_devcount;
static DEFINE_MUTEX(devlist);
@@ -64,6 +91,444 @@ LIST_HEAD(saa7164_devlist);
#define INT_SIZE 16
+void saa7164_dumphex16FF(struct saa7164_dev *dev, u8 *buf, int len)
+{
+ int i;
+ u8 tmp[16];
+ memset(&tmp[0], 0xff, sizeof(tmp));
+
+ printk(KERN_INFO "--------------------> "
+ "00 01 02 03 04 05 06 07 08 09 0a 0b 0c 0d 0e 0f\n");
+
+ for (i = 0; i < len; i += 16) {
+ if (memcmp(&tmp, buf + i, sizeof(tmp)) != 0) {
+ printk(KERN_INFO " [0x%08x] "
+ "%02x %02x %02x %02x %02x %02x %02x %02x "
+ "%02x %02x %02x %02x %02x %02x %02x %02x\n", i,
+ *(buf+i+0), *(buf+i+1), *(buf+i+2), *(buf+i+3),
+ *(buf+i+4), *(buf+i+5), *(buf+i+6), *(buf+i+7),
+ *(buf+i+8), *(buf+i+9), *(buf+i+10), *(buf+i+11),
+ *(buf+i+12), *(buf+i+13), *(buf+i+14), *(buf+i+15));
+ }
+ }
+}
+
+static void saa7164_pack_verifier(struct saa7164_buffer *buf)
+{
+ u8 *p = (u8 *)buf->cpu;
+ int i;
+
+ for (i = 0; i < buf->actual_size; i += 2048) {
+
+ if ((*(p + i + 0) != 0x00) || (*(p + i + 1) != 0x00) ||
+ (*(p + i + 2) != 0x01) || (*(p + i + 3) != 0xBA)) {
+ printk(KERN_ERR "No pack at 0x%x\n", i);
+// saa7164_dumphex16FF(buf->port->dev, (p + i), 32);
+ }
+ }
+}
+
+#define FIXED_VIDEO_PID 0xf1
+#define FIXED_AUDIO_PID 0xf2
+
+static void saa7164_ts_verifier(struct saa7164_buffer *buf)
+{
+ struct saa7164_port *port = buf->port;
+ u32 i;
+ u8 cc, a;
+ u16 pid;
+ u8 __iomem *bufcpu = (u8 *)buf->cpu;
+
+ port->sync_errors = 0;
+ port->v_cc_errors = 0;
+ port->a_cc_errors = 0;
+
+ for (i = 0; i < buf->actual_size; i += 188) {
+ if (*(bufcpu + i) != 0x47)
+ port->sync_errors++;
+
+ /* TODO: Query pid lower 8 bits, ignoring upper bits intensionally */
+ pid = ((*(bufcpu + i + 1) & 0x1f) << 8) | *(bufcpu + i + 2);
+ cc = *(bufcpu + i + 3) & 0x0f;
+
+ if (pid == FIXED_VIDEO_PID) {
+ a = ((port->last_v_cc + 1) & 0x0f);
+ if (a != cc) {
+ printk(KERN_ERR "video cc last = %x current = %x i = %d\n",
+ port->last_v_cc, cc, i);
+ port->v_cc_errors++;
+ }
+
+ port->last_v_cc = cc;
+ } else
+ if (pid == FIXED_AUDIO_PID) {
+ a = ((port->last_a_cc + 1) & 0x0f);
+ if (a != cc) {
+ printk(KERN_ERR "audio cc last = %x current = %x i = %d\n",
+ port->last_a_cc, cc, i);
+ port->a_cc_errors++;
+ }
+
+ port->last_a_cc = cc;
+ }
+
+ }
+
+ /* Only report errors if we've been through this function atleast
+ * once already and the cached cc values are primed. First time through
+ * always generates errors.
+ */
+ if (port->v_cc_errors && (port->done_first_interrupt > 1))
+ printk(KERN_ERR "video pid cc, %d errors\n", port->v_cc_errors);
+
+ if (port->a_cc_errors && (port->done_first_interrupt > 1))
+ printk(KERN_ERR "audio pid cc, %d errors\n", port->a_cc_errors);
+
+ if (port->sync_errors && (port->done_first_interrupt > 1))
+ printk(KERN_ERR "sync_errors = %d\n", port->sync_errors);
+
+ if (port->done_first_interrupt == 1)
+ port->done_first_interrupt++;
+}
+
+static void saa7164_histogram_reset(struct saa7164_histogram *hg, char *name)
+{
+ int i;
+
+ memset(hg, 0, sizeof(struct saa7164_histogram));
+ strcpy(hg->name, name);
+
+ /* First 30ms x 1ms */
+ for (i = 0; i < 30; i++) {
+ hg->counter1[0 + i].val = i;
+ }
+
+ /* 30 - 200ms x 10ms */
+ for (i = 0; i < 18; i++) {
+ hg->counter1[30 + i].val = 30 + (i * 10);
+ }
+
+ /* 200 - 2000ms x 100ms */
+ for (i = 0; i < 15; i++) {
+ hg->counter1[48 + i].val = 200 + (i * 200);
+ }
+
+ /* Catch all massive value (2secs) */
+ hg->counter1[55].val = 2000;
+
+ /* Catch all massive value (4secs) */
+ hg->counter1[56].val = 4000;
+
+ /* Catch all massive value (8secs) */
+ hg->counter1[57].val = 8000;
+
+ /* Catch all massive value (15secs) */
+ hg->counter1[58].val = 15000;
+
+ /* Catch all massive value (30secs) */
+ hg->counter1[59].val = 30000;
+
+ /* Catch all massive value (60secs) */
+ hg->counter1[60].val = 60000;
+
+ /* Catch all massive value (5mins) */
+ hg->counter1[61].val = 300000;
+
+ /* Catch all massive value (15mins) */
+ hg->counter1[62].val = 900000;
+
+ /* Catch all massive values (1hr) */
+ hg->counter1[63].val = 3600000;
+}
+
+void saa7164_histogram_update(struct saa7164_histogram *hg, u32 val)
+{
+ int i;
+ for (i = 0; i < 64; i++) {
+ if (val <= hg->counter1[i].val) {
+ hg->counter1[i].count++;
+ hg->counter1[i].update_time = jiffies;
+ break;
+ }
+ }
+}
+
+static void saa7164_histogram_print(struct saa7164_port *port,
+ struct saa7164_histogram *hg)
+{
+ u32 entries = 0;
+ int i;
+
+ printk(KERN_ERR "Histogram named %s (ms, count, last_update_jiffy)\n", hg->name);
+ for (i = 0; i < 64; i++) {
+ if (hg->counter1[i].count == 0)
+ continue;
+
+ printk(KERN_ERR " %4d %12d %Ld\n",
+ hg->counter1[i].val,
+ hg->counter1[i].count,
+ hg->counter1[i].update_time);
+
+ entries++;
+ }
+ printk(KERN_ERR "Total: %d\n", entries);
+}
+
+static void saa7164_work_enchandler_helper(struct saa7164_port *port, int bufnr)
+{
+ struct saa7164_dev *dev = port->dev;
+ struct saa7164_buffer *buf = 0;
+ struct saa7164_user_buffer *ubuf = 0;
+ struct list_head *c, *n;
+ int i = 0;
+ u8 __iomem *p;
+
+ mutex_lock(&port->dmaqueue_lock);
+ list_for_each_safe(c, n, &port->dmaqueue.list) {
+
+ buf = list_entry(c, struct saa7164_buffer, list);
+ if (i++ > port->hwcfg.buffercount) {
+ printk(KERN_ERR "%s() illegal i count %d\n",
+ __func__, i);
+ break;
+ }
+
+ if (buf->idx == bufnr) {
+
+ /* Found the buffer, deal with it */
+ dprintk(DBGLVL_IRQ, "%s() bufnr: %d\n", __func__, bufnr);
+
+ if (crc_checking) {
+ /* Throw a new checksum on the dma buffer */
+ buf->crc = crc32(0, buf->cpu, buf->actual_size);
+ }
+
+ if (guard_checking) {
+ p = (u8 *)buf->cpu;
+ if ((*(p + buf->actual_size + 0) != 0xff) ||
+ (*(p + buf->actual_size + 1) != 0xff) ||
+ (*(p + buf->actual_size + 2) != 0xff) ||
+ (*(p + buf->actual_size + 3) != 0xff) ||
+ (*(p + buf->actual_size + 0x10) != 0xff) ||
+ (*(p + buf->actual_size + 0x11) != 0xff) ||
+ (*(p + buf->actual_size + 0x12) != 0xff) ||
+ (*(p + buf->actual_size + 0x13) != 0xff)) {
+ printk(KERN_ERR "%s() buf %p guard buffer breach\n",
+ __func__, buf);
+// saa7164_dumphex16FF(dev, (p + buf->actual_size) - 32 , 64);
+ }
+ }
+
+ if ((port->nr != SAA7164_PORT_VBI1) && (port->nr != SAA7164_PORT_VBI2)) {
+ /* Validate the incoming buffer content */
+ if (port->encoder_params.stream_type == V4L2_MPEG_STREAM_TYPE_MPEG2_TS)
+ saa7164_ts_verifier(buf);
+ else if (port->encoder_params.stream_type == V4L2_MPEG_STREAM_TYPE_MPEG2_PS)
+ saa7164_pack_verifier(buf);
+ }
+
+ /* find a free user buffer and clone to it */
+ if (!list_empty(&port->list_buf_free.list)) {
+
+ /* Pull the first buffer from the used list */
+ ubuf = list_first_entry(&port->list_buf_free.list,
+ struct saa7164_user_buffer, list);
+
+ if (buf->actual_size <= ubuf->actual_size) {
+
+ memcpy_fromio(ubuf->data, buf->cpu,
+ ubuf->actual_size);
+
+ if (crc_checking) {
+ /* Throw a new checksum on the read buffer */
+ ubuf->crc = crc32(0, ubuf->data, ubuf->actual_size);
+ }
+
+ /* Requeue the buffer on the free list */
+ ubuf->pos = 0;
+
+ list_move_tail(&ubuf->list,
+ &port->list_buf_used.list);
+
+ /* Flag any userland waiters */
+ wake_up_interruptible(&port->wait_read);
+
+ } else {
+ printk(KERN_ERR "buf %p bufsize fails match\n", buf);
+ }
+
+ } else
+ printk(KERN_ERR "encirq no free buffers, increase param encoder_buffers\n");
+
+ /* Ensure offset into buffer remains 0, fill buffer
+ * with known bad data. We check for this data at a later point
+ * in time. */
+ saa7164_buffer_zero_offsets(port, bufnr);
+ memset_io(buf->cpu, 0xff, buf->pci_size);
+ if (crc_checking) {
+ /* Throw yet aanother new checksum on the dma buffer */
+ buf->crc = crc32(0, buf->cpu, buf->actual_size);
+ }
+
+ break;
+ }
+ }
+ mutex_unlock(&port->dmaqueue_lock);
+}
+
+static void saa7164_work_enchandler(struct work_struct *w)
+{
+ struct saa7164_port *port =
+ container_of(w, struct saa7164_port, workenc);
+ struct saa7164_dev *dev = port->dev;
+
+ u32 wp, mcb, rp, cnt = 0;
+
+ port->last_svc_msecs_diff = port->last_svc_msecs;
+ port->last_svc_msecs = jiffies_to_msecs(jiffies);
+
+ port->last_svc_msecs_diff = port->last_svc_msecs -
+ port->last_svc_msecs_diff;
+
+ saa7164_histogram_update(&port->svc_interval,
+ port->last_svc_msecs_diff);
+
+ port->last_irq_svc_msecs_diff = port->last_svc_msecs -
+ port->last_irq_msecs;
+
+ saa7164_histogram_update(&port->irq_svc_interval,
+ port->last_irq_svc_msecs_diff);
+
+ dprintk(DBGLVL_IRQ,
+ "%s() %Ldms elapsed irq->deferred %Ldms wp: %d rp: %d\n",
+ __func__,
+ port->last_svc_msecs_diff,
+ port->last_irq_svc_msecs_diff,
+ port->last_svc_wp,
+ port->last_svc_rp
+ );
+
+ /* Current write position */
+ wp = saa7164_readl(port->bufcounter);
+ if (wp > (port->hwcfg.buffercount - 1)) {
+ printk(KERN_ERR "%s() illegal buf count %d\n", __func__, wp);
+ return;
+ }
+
+ /* Most current complete buffer */
+ if (wp == 0)
+ mcb = (port->hwcfg.buffercount - 1);
+ else
+ mcb = wp - 1;
+
+ while (1) {
+ if (port->done_first_interrupt == 0) {
+ port->done_first_interrupt++;
+ rp = mcb;
+ } else
+ rp = (port->last_svc_rp + 1) % 8;
+
+ if ((rp < 0) || (rp > (port->hwcfg.buffercount - 1))) {
+ printk(KERN_ERR "%s() illegal rp count %d\n", __func__, rp);
+ break;
+ }
+
+ saa7164_work_enchandler_helper(port, rp);
+ port->last_svc_rp = rp;
+ cnt++;
+
+ if (rp == mcb)
+ break;
+ }
+
+ /* TODO: Convert this into a /proc/saa7164 style readable file */
+ if (print_histogram == port->nr) {
+ saa7164_histogram_print(port, &port->irq_interval);
+ saa7164_histogram_print(port, &port->svc_interval);
+ saa7164_histogram_print(port, &port->irq_svc_interval);
+ saa7164_histogram_print(port, &port->read_interval);
+ saa7164_histogram_print(port, &port->poll_interval);
+ /* TODO: fix this to preserve any previous state */
+ print_histogram = 64 + port->nr;
+ }
+}
+
+static void saa7164_work_vbihandler(struct work_struct *w)
+{
+ struct saa7164_port *port =
+ container_of(w, struct saa7164_port, workenc);
+ struct saa7164_dev *dev = port->dev;
+
+ u32 wp, mcb, rp, cnt = 0;
+
+ port->last_svc_msecs_diff = port->last_svc_msecs;
+ port->last_svc_msecs = jiffies_to_msecs(jiffies);
+ port->last_svc_msecs_diff = port->last_svc_msecs -
+ port->last_svc_msecs_diff;
+
+ saa7164_histogram_update(&port->svc_interval,
+ port->last_svc_msecs_diff);
+
+ port->last_irq_svc_msecs_diff = port->last_svc_msecs -
+ port->last_irq_msecs;
+
+ saa7164_histogram_update(&port->irq_svc_interval,
+ port->last_irq_svc_msecs_diff);
+
+ dprintk(DBGLVL_IRQ,
+ "%s() %Ldms elapsed irq->deferred %Ldms wp: %d rp: %d\n",
+ __func__,
+ port->last_svc_msecs_diff,
+ port->last_irq_svc_msecs_diff,
+ port->last_svc_wp,
+ port->last_svc_rp
+ );
+
+ /* Current write position */
+ wp = saa7164_readl(port->bufcounter);
+ if (wp > (port->hwcfg.buffercount - 1)) {
+ printk(KERN_ERR "%s() illegal buf count %d\n", __func__, wp);
+ return;
+ }
+
+ /* Most current complete buffer */
+ if (wp == 0)
+ mcb = (port->hwcfg.buffercount - 1);
+ else
+ mcb = wp - 1;
+
+ while (1) {
+ if (port->done_first_interrupt == 0) {
+ port->done_first_interrupt++;
+ rp = mcb;
+ } else
+ rp = (port->last_svc_rp + 1) % 8;
+
+ if ((rp < 0) || (rp > (port->hwcfg.buffercount - 1))) {
+ printk(KERN_ERR "%s() illegal rp count %d\n", __func__, rp);
+ break;
+ }
+
+ saa7164_work_enchandler_helper(port, rp);
+ port->last_svc_rp = rp;
+ cnt++;
+
+ if (rp == mcb)
+ break;
+ }
+
+ /* TODO: Convert this into a /proc/saa7164 style readable file */
+ if (print_histogram == port->nr) {
+ saa7164_histogram_print(port, &port->irq_interval);
+ saa7164_histogram_print(port, &port->svc_interval);
+ saa7164_histogram_print(port, &port->irq_svc_interval);
+ saa7164_histogram_print(port, &port->read_interval);
+ saa7164_histogram_print(port, &port->poll_interval);
+ /* TODO: fix this to preserve any previous state */
+ print_histogram = 64 + port->nr;
+ }
+}
+
static void saa7164_work_cmdhandler(struct work_struct *w)
{
struct saa7164_dev *dev = container_of(w, struct saa7164_dev, workcmd);
@@ -74,7 +539,7 @@ static void saa7164_work_cmdhandler(struct work_struct *w)
static void saa7164_buffer_deliver(struct saa7164_buffer *buf)
{
- struct saa7164_tsport *port = buf->port;
+ struct saa7164_port *port = buf->port;
/* Feed the transport payload into the kernel demux */
dvb_dmx_swfilter_packets(&port->dvb.demux, (u8 *)buf->cpu,
@@ -82,7 +547,56 @@ static void saa7164_buffer_deliver(struct saa7164_buffer *buf)
}
-static irqreturn_t saa7164_irq_ts(struct saa7164_tsport *port)
+static irqreturn_t saa7164_irq_vbi(struct saa7164_port *port)
+{
+ struct saa7164_dev *dev = port->dev;
+
+ /* Store old time */
+ port->last_irq_msecs_diff = port->last_irq_msecs;
+
+ /* Collect new stats */
+ port->last_irq_msecs = jiffies_to_msecs(jiffies);
+
+ /* Calculate stats */
+ port->last_irq_msecs_diff = port->last_irq_msecs -
+ port->last_irq_msecs_diff;
+
+ saa7164_histogram_update(&port->irq_interval,
+ port->last_irq_msecs_diff);
+
+ dprintk(DBGLVL_IRQ, "%s() %Ldms elapsed\n", __func__,
+ port->last_irq_msecs_diff);
+
+ /* Tis calls the vbi irq handler */
+ schedule_work(&port->workenc);
+ return 0;
+}
+
+static irqreturn_t saa7164_irq_encoder(struct saa7164_port *port)
+{
+ struct saa7164_dev *dev = port->dev;
+
+ /* Store old time */
+ port->last_irq_msecs_diff = port->last_irq_msecs;
+
+ /* Collect new stats */
+ port->last_irq_msecs = jiffies_to_msecs(jiffies);
+
+ /* Calculate stats */
+ port->last_irq_msecs_diff = port->last_irq_msecs -
+ port->last_irq_msecs_diff;
+
+ saa7164_histogram_update(&port->irq_interval,
+ port->last_irq_msecs_diff);
+
+ dprintk(DBGLVL_IRQ, "%s() %Ldms elapsed\n", __func__,
+ port->last_irq_msecs_diff);
+
+ schedule_work(&port->workenc);
+ return 0;
+}
+
+static irqreturn_t saa7164_irq_ts(struct saa7164_port *port)
{
struct saa7164_dev *dev = port->dev;
struct saa7164_buffer *buf;
@@ -96,7 +610,7 @@ static irqreturn_t saa7164_irq_ts(struct saa7164_tsport *port)
/* Find the previous buffer to the current write point */
if (wp == 0)
- rp = 7;
+ rp = (port->hwcfg.buffercount - 1);
else
rp = wp - 1;
@@ -107,7 +621,7 @@ static irqreturn_t saa7164_irq_ts(struct saa7164_tsport *port)
if (i++ > port->hwcfg.buffercount)
BUG();
- if (buf->nr == rp) {
+ if (buf->idx == rp) {
/* Found the buffer, deal with it */
dprintk(DBGLVL_IRQ, "%s() wp: %d processing: %d\n",
__func__, wp, rp);
@@ -123,6 +637,13 @@ static irqreturn_t saa7164_irq_ts(struct saa7164_tsport *port)
static irqreturn_t saa7164_irq(int irq, void *dev_id)
{
struct saa7164_dev *dev = dev_id;
+ struct saa7164_port *porta = &dev->ports[SAA7164_PORT_TS1];
+ struct saa7164_port *portb = &dev->ports[SAA7164_PORT_TS2];
+ struct saa7164_port *portc = &dev->ports[SAA7164_PORT_ENC1];
+ struct saa7164_port *portd = &dev->ports[SAA7164_PORT_ENC2];
+ struct saa7164_port *porte = &dev->ports[SAA7164_PORT_VBI1];
+ struct saa7164_port *portf = &dev->ports[SAA7164_PORT_VBI2];
+
u32 intid, intstat[INT_SIZE/4];
int i, handled = 0, bit;
@@ -168,17 +689,35 @@ static irqreturn_t saa7164_irq(int irq, void *dev_id)
if (intid == dev->intfdesc.bInterruptId) {
/* A response to an cmd/api call */
schedule_work(&dev->workcmd);
- } else if (intid ==
- dev->ts1.hwcfg.interruptid) {
+ } else if (intid == porta->hwcfg.interruptid) {
/* Transport path 1 */
- saa7164_irq_ts(&dev->ts1);
+ saa7164_irq_ts(porta);
- } else if (intid ==
- dev->ts2.hwcfg.interruptid) {
+ } else if (intid == portb->hwcfg.interruptid) {
/* Transport path 2 */
- saa7164_irq_ts(&dev->ts2);
+ saa7164_irq_ts(portb);
+
+ } else if (intid == portc->hwcfg.interruptid) {
+
+ /* Encoder path 1 */
+ saa7164_irq_encoder(portc);
+
+ } else if (intid == portd->hwcfg.interruptid) {
+
+ /* Encoder path 2 */
+ saa7164_irq_encoder(portd);
+
+ } else if (intid == porte->hwcfg.interruptid) {
+
+ /* VBI path 1 */
+ saa7164_irq_vbi(porte);
+
+ } else if (intid == portf->hwcfg.interruptid) {
+
+ /* VBI path 2 */
+ saa7164_irq_vbi(portf);
} else {
/* Find the function */
@@ -286,8 +825,8 @@ void saa7164_dumpregs(struct saa7164_dev *dev, u32 addr)
static void saa7164_dump_hwdesc(struct saa7164_dev *dev)
{
- dprintk(1, "@0x%p hwdesc sizeof(tmComResHWDescr_t) = %d bytes\n",
- &dev->hwdesc, (u32)sizeof(tmComResHWDescr_t));
+ dprintk(1, "@0x%p hwdesc sizeof(struct tmComResHWDescr) = %d bytes\n",
+ &dev->hwdesc, (u32)sizeof(struct tmComResHWDescr));
dprintk(1, " .bLength = 0x%x\n", dev->hwdesc.bLength);
dprintk(1, " .bDescriptorType = 0x%x\n", dev->hwdesc.bDescriptorType);
@@ -317,8 +856,8 @@ static void saa7164_dump_hwdesc(struct saa7164_dev *dev)
static void saa7164_dump_intfdesc(struct saa7164_dev *dev)
{
dprintk(1, "@0x%p intfdesc "
- "sizeof(tmComResInterfaceDescr_t) = %d bytes\n",
- &dev->intfdesc, (u32)sizeof(tmComResInterfaceDescr_t));
+ "sizeof(struct tmComResInterfaceDescr) = %d bytes\n",
+ &dev->intfdesc, (u32)sizeof(struct tmComResInterfaceDescr));
dprintk(1, " .bLength = 0x%x\n", dev->intfdesc.bLength);
dprintk(1, " .bDescriptorType = 0x%x\n", dev->intfdesc.bDescriptorType);
@@ -338,8 +877,8 @@ static void saa7164_dump_intfdesc(struct saa7164_dev *dev)
static void saa7164_dump_busdesc(struct saa7164_dev *dev)
{
- dprintk(1, "@0x%p busdesc sizeof(tmComResBusDescr_t) = %d bytes\n",
- &dev->busdesc, (u32)sizeof(tmComResBusDescr_t));
+ dprintk(1, "@0x%p busdesc sizeof(struct tmComResBusDescr) = %d bytes\n",
+ &dev->busdesc, (u32)sizeof(struct tmComResBusDescr));
dprintk(1, " .CommandRing = 0x%016Lx\n", dev->busdesc.CommandRing);
dprintk(1, " .ResponseRing = 0x%016Lx\n", dev->busdesc.ResponseRing);
@@ -356,23 +895,23 @@ static void saa7164_dump_busdesc(struct saa7164_dev *dev)
*/
static void saa7164_get_descriptors(struct saa7164_dev *dev)
{
- memcpy(&dev->hwdesc, dev->bmmio, sizeof(tmComResHWDescr_t));
- memcpy(&dev->intfdesc, dev->bmmio + sizeof(tmComResHWDescr_t),
- sizeof(tmComResInterfaceDescr_t));
- memcpy(&dev->busdesc, dev->bmmio + dev->intfdesc.BARLocation,
- sizeof(tmComResBusDescr_t));
-
- if (dev->hwdesc.bLength != sizeof(tmComResHWDescr_t)) {
- printk(KERN_ERR "Structure tmComResHWDescr_t is mangled\n");
+ memcpy_fromio(&dev->hwdesc, dev->bmmio, sizeof(struct tmComResHWDescr));
+ memcpy_fromio(&dev->intfdesc, dev->bmmio + sizeof(struct tmComResHWDescr),
+ sizeof(struct tmComResInterfaceDescr));
+ memcpy_fromio(&dev->busdesc, dev->bmmio + dev->intfdesc.BARLocation,
+ sizeof(struct tmComResBusDescr));
+
+ if (dev->hwdesc.bLength != sizeof(struct tmComResHWDescr)) {
+ printk(KERN_ERR "Structure struct tmComResHWDescr is mangled\n");
printk(KERN_ERR "Need %x got %d\n", dev->hwdesc.bLength,
- (u32)sizeof(tmComResHWDescr_t));
+ (u32)sizeof(struct tmComResHWDescr));
} else
saa7164_dump_hwdesc(dev);
- if (dev->intfdesc.bLength != sizeof(tmComResInterfaceDescr_t)) {
- printk(KERN_ERR "struct tmComResInterfaceDescr_t is mangled\n");
+ if (dev->intfdesc.bLength != sizeof(struct tmComResInterfaceDescr)) {
+ printk(KERN_ERR "struct struct tmComResInterfaceDescr is mangled\n");
printk(KERN_ERR "Need %x got %d\n", dev->intfdesc.bLength,
- (u32)sizeof(tmComResInterfaceDescr_t));
+ (u32)sizeof(struct tmComResInterfaceDescr));
} else
saa7164_dump_intfdesc(dev);
@@ -402,6 +941,58 @@ static int get_resources(struct saa7164_dev *dev)
return -EBUSY;
}
+static int saa7164_port_init(struct saa7164_dev *dev, int portnr)
+{
+ struct saa7164_port *port = 0;
+
+ if ((portnr < 0) || (portnr >= SAA7164_MAX_PORTS))
+ BUG();
+
+ port = &dev->ports[portnr];
+
+ port->dev = dev;
+ port->nr = portnr;
+
+ if ((portnr == SAA7164_PORT_TS1) || (portnr == SAA7164_PORT_TS2))
+ port->type = SAA7164_MPEG_DVB;
+ else
+ if ((portnr == SAA7164_PORT_ENC1) || (portnr == SAA7164_PORT_ENC2)) {
+ port->type = SAA7164_MPEG_ENCODER;
+
+ /* We need a deferred interrupt handler for cmd handling */
+ INIT_WORK(&port->workenc, saa7164_work_enchandler);
+ }
+ else
+ if ((portnr == SAA7164_PORT_VBI1) || (portnr == SAA7164_PORT_VBI2)) {
+ port->type = SAA7164_MPEG_VBI;
+
+ /* We need a deferred interrupt handler for cmd handling */
+ INIT_WORK(&port->workenc, saa7164_work_vbihandler);
+ } else
+ BUG();
+
+ /* Init all the critical resources */
+ mutex_init(&port->dvb.lock);
+ INIT_LIST_HEAD(&port->dmaqueue.list);
+ mutex_init(&port->dmaqueue_lock);
+
+ INIT_LIST_HEAD(&port->list_buf_used.list);
+ INIT_LIST_HEAD(&port->list_buf_free.list);
+ init_waitqueue_head(&port->wait_read);
+
+
+ saa7164_histogram_reset(&port->irq_interval, "irq intervals");
+ saa7164_histogram_reset(&port->svc_interval, "deferred intervals");
+ saa7164_histogram_reset(&port->irq_svc_interval,
+ "irq to deferred intervals");
+ saa7164_histogram_reset(&port->read_interval,
+ "encoder/vbi read() intervals");
+ saa7164_histogram_reset(&port->poll_interval,
+ "encoder/vbi poll() intervals");
+
+ return 0;
+}
+
static int saa7164_dev_setup(struct saa7164_dev *dev)
{
int i;
@@ -443,23 +1034,13 @@ static int saa7164_dev_setup(struct saa7164_dev *dev)
dev->i2c_bus[2].dev = dev;
dev->i2c_bus[2].nr = 2;
- /* Transport port A Defaults / setup */
- dev->ts1.dev = dev;
- dev->ts1.nr = 0;
- mutex_init(&dev->ts1.dvb.lock);
- INIT_LIST_HEAD(&dev->ts1.dmaqueue.list);
- INIT_LIST_HEAD(&dev->ts1.dummy_dmaqueue.list);
- mutex_init(&dev->ts1.dmaqueue_lock);
- mutex_init(&dev->ts1.dummy_dmaqueue_lock);
-
- /* Transport port B Defaults / setup */
- dev->ts2.dev = dev;
- dev->ts2.nr = 1;
- mutex_init(&dev->ts2.dvb.lock);
- INIT_LIST_HEAD(&dev->ts2.dmaqueue.list);
- INIT_LIST_HEAD(&dev->ts2.dummy_dmaqueue.list);
- mutex_init(&dev->ts2.dmaqueue_lock);
- mutex_init(&dev->ts2.dummy_dmaqueue_lock);
+ /* Transport + Encoder ports 1, 2, 3, 4 - Defaults / setup */
+ saa7164_port_init(dev, SAA7164_PORT_TS1);
+ saa7164_port_init(dev, SAA7164_PORT_TS2);
+ saa7164_port_init(dev, SAA7164_PORT_ENC1);
+ saa7164_port_init(dev, SAA7164_PORT_ENC2);
+ saa7164_port_init(dev, SAA7164_PORT_VBI1);
+ saa7164_port_init(dev, SAA7164_PORT_VBI2);
if (get_resources(dev) < 0) {
printk(KERN_ERR "CORE %s No more PCIe resources for "
@@ -516,6 +1097,132 @@ static void saa7164_dev_unregister(struct saa7164_dev *dev)
return;
}
+#ifdef CONFIG_PROC_FS
+static int saa7164_proc_show(struct seq_file *m, void *v)
+{
+ struct saa7164_dev *dev;
+ struct tmComResBusInfo *b;
+ struct list_head *list;
+ int i, c;
+
+ if (saa7164_devcount == 0)
+ return 0;
+
+ list_for_each(list, &saa7164_devlist) {
+ dev = list_entry(list, struct saa7164_dev, devlist);
+ seq_printf(m, "%s = %p\n", dev->name, dev);
+
+ /* Lock the bus from any other access */
+ b = &dev->bus;
+ mutex_lock(&b->lock);
+
+ seq_printf(m, " .m_pdwSetWritePos = 0x%x (0x%08x)\n",
+ b->m_dwSetReadPos, saa7164_readl(b->m_dwSetReadPos));
+
+ seq_printf(m, " .m_pdwSetReadPos = 0x%x (0x%08x)\n",
+ b->m_dwSetWritePos, saa7164_readl(b->m_dwSetWritePos));
+
+ seq_printf(m, " .m_pdwGetWritePos = 0x%x (0x%08x)\n",
+ b->m_dwGetReadPos, saa7164_readl(b->m_dwGetReadPos));
+
+ seq_printf(m, " .m_pdwGetReadPos = 0x%x (0x%08x)\n",
+ b->m_dwGetWritePos, saa7164_readl(b->m_dwGetWritePos));
+ c = 0;
+ seq_printf(m, "\n Set Ring:\n");
+ seq_printf(m, "\n addr 00 01 02 03 04 05 06 07 08 09 0a 0b 0c 0d 0e 0f\n");
+ for (i = 0; i < b->m_dwSizeSetRing; i++) {
+ if (c == 0)
+ seq_printf(m, " %04x:", i);
+
+ seq_printf(m, " %02x", *(b->m_pdwSetRing + i));
+
+ if (++c == 16) {
+ seq_printf(m, "\n");
+ c = 0;
+ }
+ }
+
+ c = 0;
+ seq_printf(m, "\n Get Ring:\n");
+ seq_printf(m, "\n addr 00 01 02 03 04 05 06 07 08 09 0a 0b 0c 0d 0e 0f\n");
+ for (i = 0; i < b->m_dwSizeGetRing; i++) {
+ if (c == 0)
+ seq_printf(m, " %04x:", i);
+
+ seq_printf(m, " %02x", *(b->m_pdwGetRing + i));
+
+ if (++c == 16) {
+ seq_printf(m, "\n");
+ c = 0;
+ }
+ }
+
+ mutex_unlock(&b->lock);
+
+ }
+
+ return 0;
+}
+
+static int saa7164_proc_open(struct inode *inode, struct file *filp)
+{
+ return single_open(filp, saa7164_proc_show, NULL);
+}
+
+static struct file_operations saa7164_proc_fops = {
+ .open = saa7164_proc_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static int saa7164_proc_create(void)
+{
+ struct proc_dir_entry *pe;
+
+ pe = proc_create("saa7164", S_IRUGO, NULL, &saa7164_proc_fops);
+ if (!pe)
+ return -ENOMEM;
+
+ return 0;
+}
+#endif
+
+static int saa7164_thread_function(void *data)
+{
+ struct saa7164_dev *dev = data;
+ struct tmFwInfoStruct fwinfo;
+ u64 last_poll_time = 0;
+
+ dprintk(DBGLVL_THR, "thread started\n");
+
+ set_freezable();
+
+ while (1) {
+ msleep_interruptible(100);
+ if (kthread_should_stop())
+ break;
+ try_to_freeze();
+
+ dprintk(DBGLVL_THR, "thread running\n");
+
+ /* Dump the firmware debug message to console */
+ /* Polling this costs us 1-2% of the arm CPU */
+ /* convert this into a respnde to interrupt 0x7a */
+ saa7164_api_collect_debug(dev);
+
+ /* Monitor CPU load every 1 second */
+ if ((last_poll_time + 1000 /* ms */) < jiffies_to_msecs(jiffies)) {
+ saa7164_api_get_load_info(dev, &fwinfo);
+ last_poll_time = jiffies_to_msecs(jiffies);
+ }
+
+ }
+
+ dprintk(DBGLVL_THR, "thread exiting\n");
+ return 0;
+}
+
static int __devinit saa7164_initdev(struct pci_dev *pci_dev,
const struct pci_device_id *pci_id)
{
@@ -622,7 +1329,6 @@ static int __devinit saa7164_initdev(struct pci_dev *pci_dev,
saa7164_gpio_setup(dev);
saa7164_card_setup(dev);
-
/* Parse the dynamic device configuration, find various
* media endpoints (MPEG, WMV, PS, TS) and cache their
* configuration details into the driver, so we can
@@ -633,7 +1339,7 @@ static int __devinit saa7164_initdev(struct pci_dev *pci_dev,
/* Begin to create the video sub-systems and register funcs */
if (saa7164_boards[dev->board].porta == SAA7164_MPEG_DVB) {
- if (saa7164_dvb_register(&dev->ts1) < 0) {
+ if (saa7164_dvb_register(&dev->ports[SAA7164_PORT_TS1]) < 0) {
printk(KERN_ERR "%s() Failed to register "
"dvb adapters on porta\n",
__func__);
@@ -641,13 +1347,50 @@ static int __devinit saa7164_initdev(struct pci_dev *pci_dev,
}
if (saa7164_boards[dev->board].portb == SAA7164_MPEG_DVB) {
- if (saa7164_dvb_register(&dev->ts2) < 0) {
+ if (saa7164_dvb_register(&dev->ports[SAA7164_PORT_TS2]) < 0) {
printk(KERN_ERR"%s() Failed to register "
"dvb adapters on portb\n",
__func__);
}
}
+ if (saa7164_boards[dev->board].portc == SAA7164_MPEG_ENCODER) {
+ if (saa7164_encoder_register(&dev->ports[SAA7164_PORT_ENC1]) < 0) {
+ printk(KERN_ERR"%s() Failed to register "
+ "mpeg encoder\n", __func__);
+ }
+ }
+
+ if (saa7164_boards[dev->board].portd == SAA7164_MPEG_ENCODER) {
+ if (saa7164_encoder_register(&dev->ports[SAA7164_PORT_ENC2]) < 0) {
+ printk(KERN_ERR"%s() Failed to register "
+ "mpeg encoder\n", __func__);
+ }
+ }
+
+ if (saa7164_boards[dev->board].porte == SAA7164_MPEG_VBI) {
+ if (saa7164_vbi_register(&dev->ports[SAA7164_PORT_VBI1]) < 0) {
+ printk(KERN_ERR"%s() Failed to register "
+ "vbi device\n", __func__);
+ }
+ }
+
+ if (saa7164_boards[dev->board].portf == SAA7164_MPEG_VBI) {
+ if (saa7164_vbi_register(&dev->ports[SAA7164_PORT_VBI2]) < 0) {
+ printk(KERN_ERR"%s() Failed to register "
+ "vbi device\n", __func__);
+ }
+ }
+ saa7164_api_set_debug(dev, fw_debug);
+
+ if (fw_debug) {
+ dev->kthread = kthread_run(saa7164_thread_function, dev,
+ "saa7164 debug");
+ if (!dev->kthread)
+ printk(KERN_ERR "%s() Failed to create "
+ "debug kernel thread\n", __func__);
+ }
+
} /* != BOARD_UNKNOWN */
else
printk(KERN_ERR "%s() Unsupported board detected, "
@@ -675,13 +1418,49 @@ static void __devexit saa7164_finidev(struct pci_dev *pci_dev)
{
struct saa7164_dev *dev = pci_get_drvdata(pci_dev);
+ if (dev->board != SAA7164_BOARD_UNKNOWN) {
+ if (fw_debug && dev->kthread) {
+ kthread_stop(dev->kthread);
+ dev->kthread = NULL;
+ }
+ if (dev->firmwareloaded)
+ saa7164_api_set_debug(dev, 0x00);
+ }
+
+ saa7164_histogram_print(&dev->ports[SAA7164_PORT_ENC1],
+ &dev->ports[SAA7164_PORT_ENC1].irq_interval);
+ saa7164_histogram_print(&dev->ports[SAA7164_PORT_ENC1],
+ &dev->ports[SAA7164_PORT_ENC1].svc_interval);
+ saa7164_histogram_print(&dev->ports[SAA7164_PORT_ENC1],
+ &dev->ports[SAA7164_PORT_ENC1].irq_svc_interval);
+ saa7164_histogram_print(&dev->ports[SAA7164_PORT_ENC1],
+ &dev->ports[SAA7164_PORT_ENC1].read_interval);
+ saa7164_histogram_print(&dev->ports[SAA7164_PORT_ENC1],
+ &dev->ports[SAA7164_PORT_ENC1].poll_interval);
+ saa7164_histogram_print(&dev->ports[SAA7164_PORT_VBI1],
+ &dev->ports[SAA7164_PORT_VBI1].read_interval);
+ saa7164_histogram_print(&dev->ports[SAA7164_PORT_VBI2],
+ &dev->ports[SAA7164_PORT_VBI2].poll_interval);
+
saa7164_shutdown(dev);
if (saa7164_boards[dev->board].porta == SAA7164_MPEG_DVB)
- saa7164_dvb_unregister(&dev->ts1);
+ saa7164_dvb_unregister(&dev->ports[SAA7164_PORT_TS1]);
if (saa7164_boards[dev->board].portb == SAA7164_MPEG_DVB)
- saa7164_dvb_unregister(&dev->ts2);
+ saa7164_dvb_unregister(&dev->ports[SAA7164_PORT_TS2]);
+
+ if (saa7164_boards[dev->board].portc == SAA7164_MPEG_ENCODER)
+ saa7164_encoder_unregister(&dev->ports[SAA7164_PORT_ENC1]);
+
+ if (saa7164_boards[dev->board].portd == SAA7164_MPEG_ENCODER)
+ saa7164_encoder_unregister(&dev->ports[SAA7164_PORT_ENC2]);
+
+ if (saa7164_boards[dev->board].porte == SAA7164_MPEG_VBI)
+ saa7164_vbi_unregister(&dev->ports[SAA7164_PORT_VBI1]);
+
+ if (saa7164_boards[dev->board].portf == SAA7164_MPEG_VBI)
+ saa7164_vbi_unregister(&dev->ports[SAA7164_PORT_VBI2]);
saa7164_i2c_unregister(&dev->i2c_bus[0]);
saa7164_i2c_unregister(&dev->i2c_bus[1]);
@@ -727,11 +1506,18 @@ static struct pci_driver saa7164_pci_driver = {
static int __init saa7164_init(void)
{
printk(KERN_INFO "saa7164 driver loaded\n");
+
+#ifdef CONFIG_PROC_FS
+ saa7164_proc_create();
+#endif
return pci_register_driver(&saa7164_pci_driver);
}
static void __exit saa7164_fini(void)
{
+#ifdef CONFIG_PROC_FS
+ remove_proc_entry("saa7164", NULL);
+#endif
pci_unregister_driver(&saa7164_pci_driver);
}
diff --git a/drivers/media/video/saa7164/saa7164-dvb.c b/drivers/media/video/saa7164/saa7164-dvb.c
index cf099c59b38e..b305a01b3bde 100644
--- a/drivers/media/video/saa7164/saa7164-dvb.c
+++ b/drivers/media/video/saa7164/saa7164-dvb.c
@@ -1,7 +1,7 @@
/*
* Driver for the NXP SAA7164 PCIe bridge
*
- * Copyright (c) 2009 Steven Toth <stoth@kernellabs.com>
+ * Copyright (c) 2010 Steven Toth <stoth@kernellabs.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -82,7 +82,7 @@ static struct s5h1411_config hauppauge_s5h1411_config = {
.mpeg_timing = S5H1411_MPEGTIMING_CONTINOUS_NONINVERTING_CLOCK,
};
-static int saa7164_dvb_stop_tsport(struct saa7164_tsport *port)
+static int saa7164_dvb_stop_port(struct saa7164_port *port)
{
struct saa7164_dev *dev = port->dev;
int ret;
@@ -100,7 +100,7 @@ static int saa7164_dvb_stop_tsport(struct saa7164_tsport *port)
return ret;
}
-static int saa7164_dvb_acquire_tsport(struct saa7164_tsport *port)
+static int saa7164_dvb_acquire_port(struct saa7164_port *port)
{
struct saa7164_dev *dev = port->dev;
int ret;
@@ -118,7 +118,7 @@ static int saa7164_dvb_acquire_tsport(struct saa7164_tsport *port)
return ret;
}
-static int saa7164_dvb_pause_tsport(struct saa7164_tsport *port)
+static int saa7164_dvb_pause_port(struct saa7164_port *port)
{
struct saa7164_dev *dev = port->dev;
int ret;
@@ -140,90 +140,38 @@ static int saa7164_dvb_pause_tsport(struct saa7164_tsport *port)
* the part through AVStream / KS Windows stages, forwards or backwards.
* States are: stopped, acquired (h/w), paused, started.
*/
-static int saa7164_dvb_stop_streaming(struct saa7164_tsport *port)
+static int saa7164_dvb_stop_streaming(struct saa7164_port *port)
{
struct saa7164_dev *dev = port->dev;
- int ret;
-
- dprintk(DBGLVL_DVB, "%s(port=%d)\n", __func__, port->nr);
-
- ret = saa7164_dvb_pause_tsport(port);
- ret = saa7164_dvb_acquire_tsport(port);
- ret = saa7164_dvb_stop_tsport(port);
-
- return ret;
-}
-
-static int saa7164_dvb_cfg_tsport(struct saa7164_tsport *port)
-{
- tmHWStreamParameters_t *params = &port->hw_streamingparams;
- struct saa7164_dev *dev = port->dev;
struct saa7164_buffer *buf;
- struct list_head *c, *n;
- int i = 0;
+ struct list_head *p, *q;
+ int ret;
dprintk(DBGLVL_DVB, "%s(port=%d)\n", __func__, port->nr);
- saa7164_writel(port->pitch, params->pitch);
- saa7164_writel(port->bufsize, params->pitch * params->numberoflines);
+ ret = saa7164_dvb_pause_port(port);
+ ret = saa7164_dvb_acquire_port(port);
+ ret = saa7164_dvb_stop_port(port);
- dprintk(DBGLVL_DVB, " configured:\n");
- dprintk(DBGLVL_DVB, " lmmio 0x%p\n", dev->lmmio);
- dprintk(DBGLVL_DVB, " bufcounter 0x%x = 0x%x\n", port->bufcounter,
- saa7164_readl(port->bufcounter));
-
- dprintk(DBGLVL_DVB, " pitch 0x%x = %d\n", port->pitch,
- saa7164_readl(port->pitch));
-
- dprintk(DBGLVL_DVB, " bufsize 0x%x = %d\n", port->bufsize,
- saa7164_readl(port->bufsize));
-
- dprintk(DBGLVL_DVB, " buffercount = %d\n", port->hwcfg.buffercount);
- dprintk(DBGLVL_DVB, " bufoffset = 0x%x\n", port->bufoffset);
- dprintk(DBGLVL_DVB, " bufptr32h = 0x%x\n", port->bufptr32h);
- dprintk(DBGLVL_DVB, " bufptr32l = 0x%x\n", port->bufptr32l);
-
- /* Poke the buffers and offsets into PCI space */
+ /* Mark the hardware buffers as free */
mutex_lock(&port->dmaqueue_lock);
- list_for_each_safe(c, n, &port->dmaqueue.list) {
- buf = list_entry(c, struct saa7164_buffer, list);
-
- /* TODO: Review this in light of 32v64 assignments */
- saa7164_writel(port->bufoffset + (sizeof(u32) * i), 0);
- saa7164_writel(port->bufptr32h + ((sizeof(u32) * 2) * i),
- buf->pt_dma);
- saa7164_writel(port->bufptr32l + ((sizeof(u32) * 2) * i), 0);
-
- dprintk(DBGLVL_DVB,
- " buf[%d] offset 0x%llx (0x%x) "
- "buf 0x%llx/%llx (0x%x/%x)\n",
- i,
- (u64)port->bufoffset + (i * sizeof(u32)),
- saa7164_readl(port->bufoffset + (sizeof(u32) * i)),
- (u64)port->bufptr32h + ((sizeof(u32) * 2) * i),
- (u64)port->bufptr32l + ((sizeof(u32) * 2) * i),
- saa7164_readl(port->bufptr32h + ((sizeof(u32) * i)
- * 2)),
- saa7164_readl(port->bufptr32l + ((sizeof(u32) * i)
- * 2)));
-
- if (i++ > port->hwcfg.buffercount)
- BUG();
-
+ list_for_each_safe(p, q, &port->dmaqueue.list) {
+ buf = list_entry(p, struct saa7164_buffer, list);
+ buf->flags = SAA7164_BUFFER_FREE;
}
mutex_unlock(&port->dmaqueue_lock);
- return 0;
+ return ret;
}
-static int saa7164_dvb_start_tsport(struct saa7164_tsport *port)
+static int saa7164_dvb_start_port(struct saa7164_port *port)
{
struct saa7164_dev *dev = port->dev;
int ret = 0, result;
dprintk(DBGLVL_DVB, "%s(port=%d)\n", __func__, port->nr);
- saa7164_dvb_cfg_tsport(port);
+ saa7164_buffer_cfg_port(port);
/* Acquire the hardware */
result = saa7164_api_transition_port(port, SAA_DMASTATE_ACQUIRE);
@@ -284,7 +232,7 @@ out:
static int saa7164_dvb_start_feed(struct dvb_demux_feed *feed)
{
struct dvb_demux *demux = feed->demux;
- struct saa7164_tsport *port = (struct saa7164_tsport *) demux->priv;
+ struct saa7164_port *port = (struct saa7164_port *) demux->priv;
struct saa7164_dvb *dvb = &port->dvb;
struct saa7164_dev *dev = port->dev;
int ret = 0;
@@ -298,7 +246,7 @@ static int saa7164_dvb_start_feed(struct dvb_demux_feed *feed)
mutex_lock(&dvb->lock);
if (dvb->feeding++ == 0) {
/* Start transport */
- ret = saa7164_dvb_start_tsport(port);
+ ret = saa7164_dvb_start_port(port);
}
mutex_unlock(&dvb->lock);
dprintk(DBGLVL_DVB, "%s(port=%d) now feeding = %d\n",
@@ -311,7 +259,7 @@ static int saa7164_dvb_start_feed(struct dvb_demux_feed *feed)
static int saa7164_dvb_stop_feed(struct dvb_demux_feed *feed)
{
struct dvb_demux *demux = feed->demux;
- struct saa7164_tsport *port = (struct saa7164_tsport *) demux->priv;
+ struct saa7164_port *port = (struct saa7164_port *) demux->priv;
struct saa7164_dvb *dvb = &port->dvb;
struct saa7164_dev *dev = port->dev;
int ret = 0;
@@ -332,7 +280,7 @@ static int saa7164_dvb_stop_feed(struct dvb_demux_feed *feed)
return ret;
}
-static int dvb_register(struct saa7164_tsport *port)
+static int dvb_register(struct saa7164_port *port)
{
struct saa7164_dvb *dvb = &port->dvb;
struct saa7164_dev *dev = port->dev;
@@ -341,6 +289,9 @@ static int dvb_register(struct saa7164_tsport *port)
dprintk(DBGLVL_DVB, "%s(port=%d)\n", __func__, port->nr);
+ if (port->type != SAA7164_MPEG_DVB)
+ BUG();
+
/* Sanity check that the PCI configuration space is active */
if (port->hwcfg.BARLocation == 0) {
result = -ENOMEM;
@@ -378,7 +329,6 @@ static int dvb_register(struct saa7164_tsport *port)
DRIVER_NAME, result);
goto fail_adapter;
}
- buf->nr = i;
mutex_lock(&port->dmaqueue_lock);
list_add_tail(&buf->list, &port->dmaqueue.list);
@@ -473,7 +423,7 @@ fail_adapter:
return result;
}
-int saa7164_dvb_unregister(struct saa7164_tsport *port)
+int saa7164_dvb_unregister(struct saa7164_port *port)
{
struct saa7164_dvb *dvb = &port->dvb;
struct saa7164_dev *dev = port->dev;
@@ -482,12 +432,15 @@ int saa7164_dvb_unregister(struct saa7164_tsport *port)
dprintk(DBGLVL_DVB, "%s()\n", __func__);
+ if (port->type != SAA7164_MPEG_DVB)
+ BUG();
+
/* Remove any allocated buffers */
mutex_lock(&port->dmaqueue_lock);
list_for_each_safe(c, n, &port->dmaqueue.list) {
b = list_entry(c, struct saa7164_buffer, list);
list_del(c);
- saa7164_buffer_dealloc(port, b);
+ saa7164_buffer_dealloc(b);
}
mutex_unlock(&port->dmaqueue_lock);
@@ -508,7 +461,7 @@ int saa7164_dvb_unregister(struct saa7164_tsport *port)
/* All the DVB attach calls go here, this function get's modified
* for each new card.
*/
-int saa7164_dvb_register(struct saa7164_tsport *port)
+int saa7164_dvb_register(struct saa7164_port *port)
{
struct saa7164_dev *dev = port->dev;
struct saa7164_dvb *dvb = &port->dvb;
@@ -588,8 +541,6 @@ int saa7164_dvb_register(struct saa7164_tsport *port)
return -1;
}
- /* Put the analog decoder in standby to keep it quiet */
-
/* register everything */
ret = dvb_register(port);
if (ret < 0) {
diff --git a/drivers/media/video/saa7164/saa7164-encoder.c b/drivers/media/video/saa7164/saa7164-encoder.c
new file mode 100644
index 000000000000..cbb53d0ee979
--- /dev/null
+++ b/drivers/media/video/saa7164/saa7164-encoder.c
@@ -0,0 +1,1503 @@
+/*
+ * Driver for the NXP SAA7164 PCIe bridge
+ *
+ * Copyright (c) 2010 Steven Toth <stoth@kernellabs.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ *
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include "saa7164.h"
+
+#define ENCODER_MAX_BITRATE 6500000
+#define ENCODER_MIN_BITRATE 1000000
+#define ENCODER_DEF_BITRATE 5000000
+
+static struct saa7164_tvnorm saa7164_tvnorms[] = {
+ {
+ .name = "NTSC-M",
+ .id = V4L2_STD_NTSC_M,
+ }, {
+ .name = "NTSC-JP",
+ .id = V4L2_STD_NTSC_M_JP,
+ }
+};
+
+static const u32 saa7164_v4l2_ctrls[] = {
+ V4L2_CID_BRIGHTNESS,
+ V4L2_CID_CONTRAST,
+ V4L2_CID_SATURATION,
+ V4L2_CID_HUE,
+ V4L2_CID_AUDIO_VOLUME,
+ V4L2_CID_SHARPNESS,
+ V4L2_CID_MPEG_STREAM_TYPE,
+ V4L2_CID_MPEG_VIDEO_ASPECT,
+ V4L2_CID_MPEG_VIDEO_B_FRAMES,
+ V4L2_CID_MPEG_VIDEO_GOP_SIZE,
+ V4L2_CID_MPEG_AUDIO_MUTE,
+ V4L2_CID_MPEG_VIDEO_BITRATE_MODE,
+ V4L2_CID_MPEG_VIDEO_BITRATE,
+ V4L2_CID_MPEG_VIDEO_BITRATE_PEAK,
+ 0
+};
+
+/* Take the encoder configuration form the port struct and
+ * flush it to the hardware.
+ */
+static void saa7164_encoder_configure(struct saa7164_port *port)
+{
+ struct saa7164_dev *dev = port->dev;
+ dprintk(DBGLVL_ENC, "%s()\n", __func__);
+
+ port->encoder_params.width = port->width;
+ port->encoder_params.height = port->height;
+ port->encoder_params.is_50hz =
+ (port->encodernorm.id & V4L2_STD_625_50) != 0;
+
+ /* Set up the DIF (enable it) for analog mode by default */
+ saa7164_api_initialize_dif(port);
+
+ /* Configure the correct video standard */
+ saa7164_api_configure_dif(port, port->encodernorm.id);
+
+ /* Ensure the audio decoder is correct configured */
+ saa7164_api_set_audio_std(port);
+}
+
+static int saa7164_encoder_buffers_dealloc(struct saa7164_port *port)
+{
+ struct list_head *c, *n, *p, *q, *l, *v;
+ struct saa7164_dev *dev = port->dev;
+ struct saa7164_buffer *buf;
+ struct saa7164_user_buffer *ubuf;
+
+ /* Remove any allocated buffers */
+ mutex_lock(&port->dmaqueue_lock);
+
+ dprintk(DBGLVL_ENC, "%s(port=%d) dmaqueue\n", __func__, port->nr);
+ list_for_each_safe(c, n, &port->dmaqueue.list) {
+ buf = list_entry(c, struct saa7164_buffer, list);
+ list_del(c);
+ saa7164_buffer_dealloc(buf);
+ }
+
+ dprintk(DBGLVL_ENC, "%s(port=%d) used\n", __func__, port->nr);
+ list_for_each_safe(p, q, &port->list_buf_used.list) {
+ ubuf = list_entry(p, struct saa7164_user_buffer, list);
+ list_del(p);
+ saa7164_buffer_dealloc_user(ubuf);
+ }
+
+ dprintk(DBGLVL_ENC, "%s(port=%d) free\n", __func__, port->nr);
+ list_for_each_safe(l, v, &port->list_buf_free.list) {
+ ubuf = list_entry(l, struct saa7164_user_buffer, list);
+ list_del(l);
+ saa7164_buffer_dealloc_user(ubuf);
+ }
+
+ mutex_unlock(&port->dmaqueue_lock);
+ dprintk(DBGLVL_ENC, "%s(port=%d) done\n", __func__, port->nr);
+
+ return 0;
+}
+
+/* Dynamic buffer switch at encoder start time */
+static int saa7164_encoder_buffers_alloc(struct saa7164_port *port)
+{
+ struct saa7164_dev *dev = port->dev;
+ struct saa7164_buffer *buf;
+ struct saa7164_user_buffer *ubuf;
+ struct tmHWStreamParameters *params = &port->hw_streamingparams;
+ int result = -ENODEV, i;
+ int len = 0;
+
+ dprintk(DBGLVL_ENC, "%s()\n", __func__);
+
+ if (port->encoder_params.stream_type == V4L2_MPEG_STREAM_TYPE_MPEG2_PS) {
+ dprintk(DBGLVL_ENC, "%s() type=V4L2_MPEG_STREAM_TYPE_MPEG2_PS\n", __func__);
+ params->samplesperline = 128;
+ params->numberoflines = 256;
+ params->pitch = 128;
+ params->numpagetables = 2 +
+ ((SAA7164_PS_NUMBER_OF_LINES * 128) / PAGE_SIZE);
+ } else
+ if (port->encoder_params.stream_type == V4L2_MPEG_STREAM_TYPE_MPEG2_TS) {
+ dprintk(DBGLVL_ENC, "%s() type=V4L2_MPEG_STREAM_TYPE_MPEG2_TS\n", __func__);
+ params->samplesperline = 188;
+ params->numberoflines = 312;
+ params->pitch = 188;
+ params->numpagetables = 2 +
+ ((SAA7164_TS_NUMBER_OF_LINES * 188) / PAGE_SIZE);
+ } else
+ BUG();
+
+ /* Init and establish defaults */
+ params->bitspersample = 8;
+ params->linethreshold = 0;
+ params->pagetablelistvirt = 0;
+ params->pagetablelistphys = 0;
+ params->numpagetableentries = port->hwcfg.buffercount;
+
+ /* Allocate the PCI resources, buffers (hard) */
+ for (i = 0; i < port->hwcfg.buffercount; i++) {
+ buf = saa7164_buffer_alloc(port,
+ params->numberoflines *
+ params->pitch);
+
+ if (!buf) {
+ printk(KERN_ERR "%s() failed "
+ "(errno = %d), unable to allocate buffer\n",
+ __func__, result);
+ result = -ENOMEM;
+ goto failed;
+ } else {
+
+ mutex_lock(&port->dmaqueue_lock);
+ list_add_tail(&buf->list, &port->dmaqueue.list);
+ mutex_unlock(&port->dmaqueue_lock);
+
+ }
+ }
+
+ /* Allocate some kenrel kernel buffers for copying
+ * to userpsace.
+ */
+ len = params->numberoflines * params->pitch;
+
+ if (encoder_buffers < 16)
+ encoder_buffers = 16;
+ if (encoder_buffers > 512)
+ encoder_buffers = 512;
+
+ for (i = 0; i < encoder_buffers; i++) {
+
+ ubuf = saa7164_buffer_alloc_user(dev, len);
+ if (ubuf) {
+ mutex_lock(&port->dmaqueue_lock);
+ list_add_tail(&ubuf->list, &port->list_buf_free.list);
+ mutex_unlock(&port->dmaqueue_lock);
+ }
+
+ }
+
+ result = 0;
+
+failed:
+ return result;
+}
+
+static int saa7164_encoder_initialize(struct saa7164_port *port)
+{
+ saa7164_encoder_configure(port);
+ return 0;
+}
+
+/* -- V4L2 --------------------------------------------------------- */
+static int vidioc_s_std(struct file *file, void *priv, v4l2_std_id *id)
+{
+ struct saa7164_encoder_fh *fh = file->private_data;
+ struct saa7164_port *port = fh->port;
+ struct saa7164_dev *dev = port->dev;
+ unsigned int i;
+
+ dprintk(DBGLVL_ENC, "%s(id=0x%x)\n", __func__, (u32)*id);
+
+ for (i = 0; i < ARRAY_SIZE(saa7164_tvnorms); i++) {
+ if (*id & saa7164_tvnorms[i].id)
+ break;
+ }
+ if (i == ARRAY_SIZE(saa7164_tvnorms))
+ return -EINVAL;
+
+ port->encodernorm = saa7164_tvnorms[i];
+
+ /* Update the audio decoder while is not running in
+ * auto detect mode.
+ */
+ saa7164_api_set_audio_std(port);
+
+ dprintk(DBGLVL_ENC, "%s(id=0x%x) OK\n", __func__, (u32)*id);
+
+ return 0;
+}
+
+static int vidioc_enum_input(struct file *file, void *priv,
+ struct v4l2_input *i)
+{
+ int n;
+
+ char *inputs[] = { "tuner", "composite", "svideo", "aux",
+ "composite 2", "svideo 2", "aux 2" };
+
+ if (i->index >= 7)
+ return -EINVAL;
+
+ strcpy(i->name, inputs[i->index]);
+
+ if (i->index == 0)
+ i->type = V4L2_INPUT_TYPE_TUNER;
+ else
+ i->type = V4L2_INPUT_TYPE_CAMERA;
+
+ for (n = 0; n < ARRAY_SIZE(saa7164_tvnorms); n++)
+ i->std |= saa7164_tvnorms[n].id;
+
+ return 0;
+}
+
+static int vidioc_g_input(struct file *file, void *priv, unsigned int *i)
+{
+ struct saa7164_encoder_fh *fh = file->private_data;
+ struct saa7164_port *port = fh->port;
+ struct saa7164_dev *dev = port->dev;
+
+ if (saa7164_api_get_videomux(port) != SAA_OK)
+ return -EIO;
+
+ *i = (port->mux_input - 1);
+
+ dprintk(DBGLVL_ENC, "%s() input=%d\n", __func__, *i);
+
+ return 0;
+}
+
+static int vidioc_s_input(struct file *file, void *priv, unsigned int i)
+{
+ struct saa7164_encoder_fh *fh = file->private_data;
+ struct saa7164_port *port = fh->port;
+ struct saa7164_dev *dev = port->dev;
+
+ dprintk(DBGLVL_ENC, "%s() input=%d\n", __func__, i);
+
+ if (i >= 7)
+ return -EINVAL;
+
+ port->mux_input = i + 1;
+
+ if (saa7164_api_set_videomux(port) != SAA_OK)
+ return -EIO;
+
+ return 0;
+}
+
+static int vidioc_g_tuner(struct file *file, void *priv,
+ struct v4l2_tuner *t)
+{
+ struct saa7164_encoder_fh *fh = file->private_data;
+ struct saa7164_port *port = fh->port;
+ struct saa7164_dev *dev = port->dev;
+
+ if (0 != t->index)
+ return -EINVAL;
+
+ strcpy(t->name, "tuner");
+ t->type = V4L2_TUNER_ANALOG_TV;
+ t->capability = V4L2_TUNER_CAP_NORM | V4L2_TUNER_CAP_STEREO;
+
+ dprintk(DBGLVL_ENC, "VIDIOC_G_TUNER: tuner type %d\n", t->type);
+
+ return 0;
+}
+
+static int vidioc_s_tuner(struct file *file, void *priv,
+ struct v4l2_tuner *t)
+{
+ /* Update the A/V core */
+ return 0;
+}
+
+static int vidioc_g_frequency(struct file *file, void *priv,
+ struct v4l2_frequency *f)
+{
+ struct saa7164_encoder_fh *fh = file->private_data;
+ struct saa7164_port *port = fh->port;
+
+ f->type = V4L2_TUNER_ANALOG_TV;
+ f->frequency = port->freq;
+
+ return 0;
+}
+
+static int vidioc_s_frequency(struct file *file, void *priv,
+ struct v4l2_frequency *f)
+{
+ struct saa7164_encoder_fh *fh = file->private_data;
+ struct saa7164_port *port = fh->port;
+ struct saa7164_dev *dev = port->dev;
+ struct saa7164_port *tsport;
+ struct dvb_frontend *fe;
+
+ /* TODO: Pull this for the std */
+ struct analog_parameters params = {
+ .mode = V4L2_TUNER_ANALOG_TV,
+ .audmode = V4L2_TUNER_MODE_STEREO,
+ .std = port->encodernorm.id,
+ .frequency = f->frequency
+ };
+
+ /* Stop the encoder */
+ dprintk(DBGLVL_ENC, "%s() frequency=%d tuner=%d\n", __func__,
+ f->frequency, f->tuner);
+
+ if (f->tuner != 0)
+ return -EINVAL;
+
+ if (f->type != V4L2_TUNER_ANALOG_TV)
+ return -EINVAL;
+
+ port->freq = f->frequency;
+
+ /* Update the hardware */
+ if (port->nr == SAA7164_PORT_ENC1)
+ tsport = &dev->ports[SAA7164_PORT_TS1];
+ else
+ if (port->nr == SAA7164_PORT_ENC2)
+ tsport = &dev->ports[SAA7164_PORT_TS2];
+ else
+ BUG();
+
+ fe = tsport->dvb.frontend;
+
+ if (fe && fe->ops.tuner_ops.set_analog_params)
+ fe->ops.tuner_ops.set_analog_params(fe, &params);
+ else
+ printk(KERN_ERR "%s() No analog tuner, aborting\n", __func__);
+
+ saa7164_encoder_initialize(port);
+
+ return 0;
+}
+
+static int vidioc_g_ctrl(struct file *file, void *priv,
+ struct v4l2_control *ctl)
+{
+ struct saa7164_encoder_fh *fh = file->private_data;
+ struct saa7164_port *port = fh->port;
+ struct saa7164_dev *dev = port->dev;
+
+ dprintk(DBGLVL_ENC, "%s(id=%d, value=%d)\n", __func__,
+ ctl->id, ctl->value);
+
+ switch (ctl->id) {
+ case V4L2_CID_BRIGHTNESS:
+ ctl->value = port->ctl_brightness;
+ break;
+ case V4L2_CID_CONTRAST:
+ ctl->value = port->ctl_contrast;
+ break;
+ case V4L2_CID_SATURATION:
+ ctl->value = port->ctl_saturation;
+ break;
+ case V4L2_CID_HUE:
+ ctl->value = port->ctl_hue;
+ break;
+ case V4L2_CID_SHARPNESS:
+ ctl->value = port->ctl_sharpness;
+ break;
+ case V4L2_CID_AUDIO_VOLUME:
+ ctl->value = port->ctl_volume;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int vidioc_s_ctrl(struct file *file, void *priv,
+ struct v4l2_control *ctl)
+{
+ struct saa7164_encoder_fh *fh = file->private_data;
+ struct saa7164_port *port = fh->port;
+ struct saa7164_dev *dev = port->dev;
+ int ret = 0;
+
+ dprintk(DBGLVL_ENC, "%s(id=%d, value=%d)\n", __func__,
+ ctl->id, ctl->value);
+
+ switch (ctl->id) {
+ case V4L2_CID_BRIGHTNESS:
+ if ((ctl->value >= 0) && (ctl->value <= 255)) {
+ port->ctl_brightness = ctl->value;
+ saa7164_api_set_usercontrol(port,
+ PU_BRIGHTNESS_CONTROL);
+ } else
+ ret = -EINVAL;
+ break;
+ case V4L2_CID_CONTRAST:
+ if ((ctl->value >= 0) && (ctl->value <= 255)) {
+ port->ctl_contrast = ctl->value;
+ saa7164_api_set_usercontrol(port, PU_CONTRAST_CONTROL);
+ } else
+ ret = -EINVAL;
+ break;
+ case V4L2_CID_SATURATION:
+ if ((ctl->value >= 0) && (ctl->value <= 255)) {
+ port->ctl_saturation = ctl->value;
+ saa7164_api_set_usercontrol(port,
+ PU_SATURATION_CONTROL);
+ } else
+ ret = -EINVAL;
+ break;
+ case V4L2_CID_HUE:
+ if ((ctl->value >= 0) && (ctl->value <= 255)) {
+ port->ctl_hue = ctl->value;
+ saa7164_api_set_usercontrol(port, PU_HUE_CONTROL);
+ } else
+ ret = -EINVAL;
+ break;
+ case V4L2_CID_SHARPNESS:
+ if ((ctl->value >= 0) && (ctl->value <= 255)) {
+ port->ctl_sharpness = ctl->value;
+ saa7164_api_set_usercontrol(port, PU_SHARPNESS_CONTROL);
+ } else
+ ret = -EINVAL;
+ break;
+ case V4L2_CID_AUDIO_VOLUME:
+ if ((ctl->value >= -83) && (ctl->value <= 24)) {
+ port->ctl_volume = ctl->value;
+ saa7164_api_set_audio_volume(port, port->ctl_volume);
+ } else
+ ret = -EINVAL;
+ break;
+ default:
+ ret = -EINVAL;
+ }
+
+ return ret;
+}
+
+static int saa7164_get_ctrl(struct saa7164_port *port,
+ struct v4l2_ext_control *ctrl)
+{
+ struct saa7164_encoder_params *params = &port->encoder_params;
+
+ switch (ctrl->id) {
+ case V4L2_CID_MPEG_VIDEO_BITRATE:
+ ctrl->value = params->bitrate;
+ break;
+ case V4L2_CID_MPEG_STREAM_TYPE:
+ ctrl->value = params->stream_type;
+ break;
+ case V4L2_CID_MPEG_AUDIO_MUTE:
+ ctrl->value = params->ctl_mute;
+ break;
+ case V4L2_CID_MPEG_VIDEO_ASPECT:
+ ctrl->value = params->ctl_aspect;
+ break;
+ case V4L2_CID_MPEG_VIDEO_BITRATE_MODE:
+ ctrl->value = params->bitrate_mode;
+ break;
+ case V4L2_CID_MPEG_VIDEO_B_FRAMES:
+ ctrl->value = params->refdist;
+ break;
+ case V4L2_CID_MPEG_VIDEO_BITRATE_PEAK:
+ ctrl->value = params->bitrate_peak;
+ break;
+ case V4L2_CID_MPEG_VIDEO_GOP_SIZE:
+ ctrl->value = params->gop_size;
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int vidioc_g_ext_ctrls(struct file *file, void *priv,
+ struct v4l2_ext_controls *ctrls)
+{
+ struct saa7164_encoder_fh *fh = file->private_data;
+ struct saa7164_port *port = fh->port;
+ int i, err = 0;
+
+ if (ctrls->ctrl_class == V4L2_CTRL_CLASS_MPEG) {
+ for (i = 0; i < ctrls->count; i++) {
+ struct v4l2_ext_control *ctrl = ctrls->controls + i;
+
+ err = saa7164_get_ctrl(port, ctrl);
+ if (err) {
+ ctrls->error_idx = i;
+ break;
+ }
+ }
+ return err;
+
+ }
+
+ return -EINVAL;
+}
+
+static int saa7164_try_ctrl(struct v4l2_ext_control *ctrl, int ac3)
+{
+ int ret = -EINVAL;
+
+ switch (ctrl->id) {
+ case V4L2_CID_MPEG_VIDEO_BITRATE:
+ if ((ctrl->value >= ENCODER_MIN_BITRATE) &&
+ (ctrl->value <= ENCODER_MAX_BITRATE))
+ ret = 0;
+ break;
+ case V4L2_CID_MPEG_STREAM_TYPE:
+ if ((ctrl->value == V4L2_MPEG_STREAM_TYPE_MPEG2_PS) ||
+ (ctrl->value == V4L2_MPEG_STREAM_TYPE_MPEG2_TS))
+ ret = 0;
+ break;
+ case V4L2_CID_MPEG_AUDIO_MUTE:
+ if ((ctrl->value >= 0) &&
+ (ctrl->value <= 1))
+ ret = 0;
+ break;
+ case V4L2_CID_MPEG_VIDEO_ASPECT:
+ if ((ctrl->value >= V4L2_MPEG_VIDEO_ASPECT_1x1) &&
+ (ctrl->value <= V4L2_MPEG_VIDEO_ASPECT_221x100))
+ ret = 0;
+ break;
+ case V4L2_CID_MPEG_VIDEO_GOP_SIZE:
+ if ((ctrl->value >= 0) &&
+ (ctrl->value <= 255))
+ ret = 0;
+ break;
+ case V4L2_CID_MPEG_VIDEO_BITRATE_MODE:
+ if ((ctrl->value == V4L2_MPEG_VIDEO_BITRATE_MODE_VBR) ||
+ (ctrl->value == V4L2_MPEG_VIDEO_BITRATE_MODE_CBR))
+ ret = 0;
+ break;
+ case V4L2_CID_MPEG_VIDEO_B_FRAMES:
+ if ((ctrl->value >= 1) &&
+ (ctrl->value <= 3))
+ ret = 0;
+ break;
+ case V4L2_CID_MPEG_VIDEO_BITRATE_PEAK:
+ if ((ctrl->value >= ENCODER_MIN_BITRATE) &&
+ (ctrl->value <= ENCODER_MAX_BITRATE))
+ ret = 0;
+ break;
+ default:
+ ret = -EINVAL;
+ }
+
+ return ret;
+}
+
+static int vidioc_try_ext_ctrls(struct file *file, void *priv,
+ struct v4l2_ext_controls *ctrls)
+{
+ int i, err = 0;
+
+ if (ctrls->ctrl_class == V4L2_CTRL_CLASS_MPEG) {
+ for (i = 0; i < ctrls->count; i++) {
+ struct v4l2_ext_control *ctrl = ctrls->controls + i;
+
+ err = saa7164_try_ctrl(ctrl, 0);
+ if (err) {
+ ctrls->error_idx = i;
+ break;
+ }
+ }
+ return err;
+ }
+
+ return -EINVAL;
+}
+
+static int saa7164_set_ctrl(struct saa7164_port *port,
+ struct v4l2_ext_control *ctrl)
+{
+ struct saa7164_encoder_params *params = &port->encoder_params;
+ int ret = 0;
+
+ switch (ctrl->id) {
+ case V4L2_CID_MPEG_VIDEO_BITRATE:
+ params->bitrate = ctrl->value;
+ break;
+ case V4L2_CID_MPEG_STREAM_TYPE:
+ params->stream_type = ctrl->value;
+ break;
+ case V4L2_CID_MPEG_AUDIO_MUTE:
+ params->ctl_mute = ctrl->value;
+ ret = saa7164_api_audio_mute(port, params->ctl_mute);
+ if (ret != SAA_OK) {
+ printk(KERN_ERR "%s() error, ret = 0x%x\n", __func__,
+ ret);
+ ret = -EIO;
+ }
+ break;
+ case V4L2_CID_MPEG_VIDEO_ASPECT:
+ params->ctl_aspect = ctrl->value;
+ ret = saa7164_api_set_aspect_ratio(port);
+ if (ret != SAA_OK) {
+ printk(KERN_ERR "%s() error, ret = 0x%x\n", __func__,
+ ret);
+ ret = -EIO;
+ }
+ break;
+ case V4L2_CID_MPEG_VIDEO_BITRATE_MODE:
+ params->bitrate_mode = ctrl->value;
+ break;
+ case V4L2_CID_MPEG_VIDEO_B_FRAMES:
+ params->refdist = ctrl->value;
+ break;
+ case V4L2_CID_MPEG_VIDEO_BITRATE_PEAK:
+ params->bitrate_peak = ctrl->value;
+ break;
+ case V4L2_CID_MPEG_VIDEO_GOP_SIZE:
+ params->gop_size = ctrl->value;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ /* TODO: Update the hardware */
+
+ return ret;
+}
+
+static int vidioc_s_ext_ctrls(struct file *file, void *priv,
+ struct v4l2_ext_controls *ctrls)
+{
+ struct saa7164_encoder_fh *fh = file->private_data;
+ struct saa7164_port *port = fh->port;
+ int i, err = 0;
+
+ if (ctrls->ctrl_class == V4L2_CTRL_CLASS_MPEG) {
+ for (i = 0; i < ctrls->count; i++) {
+ struct v4l2_ext_control *ctrl = ctrls->controls + i;
+
+ err = saa7164_try_ctrl(ctrl, 0);
+ if (err) {
+ ctrls->error_idx = i;
+ break;
+ }
+ err = saa7164_set_ctrl(port, ctrl);
+ if (err) {
+ ctrls->error_idx = i;
+ break;
+ }
+ }
+ return err;
+
+ }
+
+ return -EINVAL;
+}
+
+static int vidioc_querycap(struct file *file, void *priv,
+ struct v4l2_capability *cap)
+{
+ struct saa7164_encoder_fh *fh = file->private_data;
+ struct saa7164_port *port = fh->port;
+ struct saa7164_dev *dev = port->dev;
+
+ strcpy(cap->driver, dev->name);
+ strlcpy(cap->card, saa7164_boards[dev->board].name,
+ sizeof(cap->card));
+ sprintf(cap->bus_info, "PCI:%s", pci_name(dev->pci));
+
+ cap->capabilities =
+ V4L2_CAP_VIDEO_CAPTURE |
+ V4L2_CAP_READWRITE |
+ 0;
+
+ cap->capabilities |= V4L2_CAP_TUNER;
+ cap->version = 0;
+
+ return 0;
+}
+
+static int vidioc_enum_fmt_vid_cap(struct file *file, void *priv,
+ struct v4l2_fmtdesc *f)
+{
+ if (f->index != 0)
+ return -EINVAL;
+
+ strlcpy(f->description, "MPEG", sizeof(f->description));
+ f->pixelformat = V4L2_PIX_FMT_MPEG;
+
+ return 0;
+}
+
+static int vidioc_g_fmt_vid_cap(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct saa7164_encoder_fh *fh = file->private_data;
+ struct saa7164_port *port = fh->port;
+ struct saa7164_dev *dev = port->dev;
+
+ f->fmt.pix.pixelformat = V4L2_PIX_FMT_MPEG;
+ f->fmt.pix.bytesperline = 0;
+ f->fmt.pix.sizeimage =
+ port->ts_packet_size * port->ts_packet_count;
+ f->fmt.pix.colorspace = 0;
+ f->fmt.pix.width = port->width;
+ f->fmt.pix.height = port->height;
+
+ dprintk(DBGLVL_ENC, "VIDIOC_G_FMT: w: %d, h: %d\n",
+ port->width, port->height);
+
+ return 0;
+}
+
+static int vidioc_try_fmt_vid_cap(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct saa7164_encoder_fh *fh = file->private_data;
+ struct saa7164_port *port = fh->port;
+ struct saa7164_dev *dev = port->dev;
+
+ f->fmt.pix.pixelformat = V4L2_PIX_FMT_MPEG;
+ f->fmt.pix.bytesperline = 0;
+ f->fmt.pix.sizeimage =
+ port->ts_packet_size * port->ts_packet_count;
+ f->fmt.pix.colorspace = 0;
+ dprintk(DBGLVL_ENC, "VIDIOC_TRY_FMT: w: %d, h: %d\n",
+ port->width, port->height);
+ return 0;
+}
+
+static int vidioc_s_fmt_vid_cap(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct saa7164_encoder_fh *fh = file->private_data;
+ struct saa7164_port *port = fh->port;
+ struct saa7164_dev *dev = port->dev;
+
+ f->fmt.pix.pixelformat = V4L2_PIX_FMT_MPEG;
+ f->fmt.pix.bytesperline = 0;
+ f->fmt.pix.sizeimage =
+ port->ts_packet_size * port->ts_packet_count;
+ f->fmt.pix.colorspace = 0;
+
+ dprintk(DBGLVL_ENC, "VIDIOC_S_FMT: w: %d, h: %d, f: %d\n",
+ f->fmt.pix.width, f->fmt.pix.height, f->fmt.pix.field);
+
+ return 0;
+}
+
+static int vidioc_log_status(struct file *file, void *priv)
+{
+ return 0;
+}
+
+static int fill_queryctrl(struct saa7164_encoder_params *params,
+ struct v4l2_queryctrl *c)
+{
+ switch (c->id) {
+ case V4L2_CID_BRIGHTNESS:
+ return v4l2_ctrl_query_fill(c, 0x0, 0xff, 1, 127);
+ case V4L2_CID_CONTRAST:
+ return v4l2_ctrl_query_fill(c, 0x0, 0xff, 1, 66);
+ case V4L2_CID_SATURATION:
+ return v4l2_ctrl_query_fill(c, 0x0, 0xff, 1, 62);
+ case V4L2_CID_HUE:
+ return v4l2_ctrl_query_fill(c, 0x0, 0xff, 1, 128);
+ case V4L2_CID_SHARPNESS:
+ return v4l2_ctrl_query_fill(c, 0x0, 0x0f, 1, 8);
+ case V4L2_CID_MPEG_AUDIO_MUTE:
+ return v4l2_ctrl_query_fill(c, 0x0, 0x01, 1, 0);
+ case V4L2_CID_AUDIO_VOLUME:
+ return v4l2_ctrl_query_fill(c, -83, 24, 1, 20);
+ case V4L2_CID_MPEG_VIDEO_BITRATE:
+ return v4l2_ctrl_query_fill(c,
+ ENCODER_MIN_BITRATE, ENCODER_MAX_BITRATE,
+ 100000, ENCODER_DEF_BITRATE);
+ case V4L2_CID_MPEG_STREAM_TYPE:
+ return v4l2_ctrl_query_fill(c,
+ V4L2_MPEG_STREAM_TYPE_MPEG2_PS,
+ V4L2_MPEG_STREAM_TYPE_MPEG2_TS,
+ 1, V4L2_MPEG_STREAM_TYPE_MPEG2_PS);
+ case V4L2_CID_MPEG_VIDEO_ASPECT:
+ return v4l2_ctrl_query_fill(c,
+ V4L2_MPEG_VIDEO_ASPECT_1x1,
+ V4L2_MPEG_VIDEO_ASPECT_221x100,
+ 1, V4L2_MPEG_VIDEO_ASPECT_4x3);
+ case V4L2_CID_MPEG_VIDEO_GOP_SIZE:
+ return v4l2_ctrl_query_fill(c, 1, 255, 1, 15);
+ case V4L2_CID_MPEG_VIDEO_BITRATE_MODE:
+ return v4l2_ctrl_query_fill(c,
+ V4L2_MPEG_VIDEO_BITRATE_MODE_VBR, V4L2_MPEG_VIDEO_BITRATE_MODE_CBR,
+ 1, V4L2_MPEG_VIDEO_BITRATE_MODE_VBR);
+ case V4L2_CID_MPEG_VIDEO_B_FRAMES:
+ return v4l2_ctrl_query_fill(c,
+ 1, 3, 1, 1);
+ case V4L2_CID_MPEG_VIDEO_BITRATE_PEAK:
+ return v4l2_ctrl_query_fill(c,
+ ENCODER_MIN_BITRATE, ENCODER_MAX_BITRATE,
+ 100000, ENCODER_DEF_BITRATE);
+ default:
+ return -EINVAL;
+ }
+}
+
+static int vidioc_queryctrl(struct file *file, void *priv,
+ struct v4l2_queryctrl *c)
+{
+ struct saa7164_encoder_fh *fh = priv;
+ struct saa7164_port *port = fh->port;
+ int i, next;
+ u32 id = c->id;
+
+ memset(c, 0, sizeof(*c));
+
+ next = !!(id & V4L2_CTRL_FLAG_NEXT_CTRL);
+ c->id = id & ~V4L2_CTRL_FLAG_NEXT_CTRL;
+
+ for (i = 0; i < ARRAY_SIZE(saa7164_v4l2_ctrls); i++) {
+ if (next) {
+ if (c->id < saa7164_v4l2_ctrls[i])
+ c->id = saa7164_v4l2_ctrls[i];
+ else
+ continue;
+ }
+
+ if (c->id == saa7164_v4l2_ctrls[i])
+ return fill_queryctrl(&port->encoder_params, c);
+
+ if (c->id < saa7164_v4l2_ctrls[i])
+ break;
+ }
+
+ return -EINVAL;
+}
+
+static int saa7164_encoder_stop_port(struct saa7164_port *port)
+{
+ struct saa7164_dev *dev = port->dev;
+ int ret;
+
+ ret = saa7164_api_transition_port(port, SAA_DMASTATE_STOP);
+ if ((ret != SAA_OK) && (ret != SAA_ERR_ALREADY_STOPPED)) {
+ printk(KERN_ERR "%s() stop transition failed, ret = 0x%x\n",
+ __func__, ret);
+ ret = -EIO;
+ } else {
+ dprintk(DBGLVL_ENC, "%s() Stopped\n", __func__);
+ ret = 0;
+ }
+
+ return ret;
+}
+
+static int saa7164_encoder_acquire_port(struct saa7164_port *port)
+{
+ struct saa7164_dev *dev = port->dev;
+ int ret;
+
+ ret = saa7164_api_transition_port(port, SAA_DMASTATE_ACQUIRE);
+ if ((ret != SAA_OK) && (ret != SAA_ERR_ALREADY_STOPPED)) {
+ printk(KERN_ERR "%s() acquire transition failed, ret = 0x%x\n",
+ __func__, ret);
+ ret = -EIO;
+ } else {
+ dprintk(DBGLVL_ENC, "%s() Acquired\n", __func__);
+ ret = 0;
+ }
+
+ return ret;
+}
+
+static int saa7164_encoder_pause_port(struct saa7164_port *port)
+{
+ struct saa7164_dev *dev = port->dev;
+ int ret;
+
+ ret = saa7164_api_transition_port(port, SAA_DMASTATE_PAUSE);
+ if ((ret != SAA_OK) && (ret != SAA_ERR_ALREADY_STOPPED)) {
+ printk(KERN_ERR "%s() pause transition failed, ret = 0x%x\n",
+ __func__, ret);
+ ret = -EIO;
+ } else {
+ dprintk(DBGLVL_ENC, "%s() Paused\n", __func__);
+ ret = 0;
+ }
+
+ return ret;
+}
+
+/* Firmware is very windows centric, meaning you have to transition
+ * the part through AVStream / KS Windows stages, forwards or backwards.
+ * States are: stopped, acquired (h/w), paused, started.
+ * We have to leave here will all of the soft buffers on the free list,
+ * else the cfg_post() func won't have soft buffers to correctly configure.
+ */
+static int saa7164_encoder_stop_streaming(struct saa7164_port *port)
+{
+ struct saa7164_dev *dev = port->dev;
+ struct saa7164_buffer *buf;
+ struct saa7164_user_buffer *ubuf;
+ struct list_head *c, *n;
+ int ret;
+
+ dprintk(DBGLVL_ENC, "%s(port=%d)\n", __func__, port->nr);
+
+ ret = saa7164_encoder_pause_port(port);
+ ret = saa7164_encoder_acquire_port(port);
+ ret = saa7164_encoder_stop_port(port);
+
+ dprintk(DBGLVL_ENC, "%s(port=%d) Hardware stopped\n", __func__,
+ port->nr);
+
+ /* Reset the state of any allocated buffer resources */
+ mutex_lock(&port->dmaqueue_lock);
+
+ /* Reset the hard and soft buffer state */
+ list_for_each_safe(c, n, &port->dmaqueue.list) {
+ buf = list_entry(c, struct saa7164_buffer, list);
+ buf->flags = SAA7164_BUFFER_FREE;
+ buf->pos = 0;
+ }
+
+ list_for_each_safe(c, n, &port->list_buf_used.list) {
+ ubuf = list_entry(c, struct saa7164_user_buffer, list);
+ ubuf->pos = 0;
+ list_move_tail(&ubuf->list, &port->list_buf_free.list);
+ }
+
+ mutex_unlock(&port->dmaqueue_lock);
+
+ /* Free any allocated resources */
+ saa7164_encoder_buffers_dealloc(port);
+
+ dprintk(DBGLVL_ENC, "%s(port=%d) Released\n", __func__, port->nr);
+
+ return ret;
+}
+
+static int saa7164_encoder_start_streaming(struct saa7164_port *port)
+{
+ struct saa7164_dev *dev = port->dev;
+ int result, ret = 0;
+
+ dprintk(DBGLVL_ENC, "%s(port=%d)\n", __func__, port->nr);
+
+ port->done_first_interrupt = 0;
+
+ /* allocate all of the PCIe DMA buffer resources on the fly,
+ * allowing switching between TS and PS payloads without
+ * requiring a complete driver reload.
+ */
+ saa7164_encoder_buffers_alloc(port);
+
+ /* Configure the encoder with any cache values */
+ saa7164_api_set_encoder(port);
+ saa7164_api_get_encoder(port);
+
+ /* Place the empty buffers on the hardware */
+ saa7164_buffer_cfg_port(port);
+
+ /* Acquire the hardware */
+ result = saa7164_api_transition_port(port, SAA_DMASTATE_ACQUIRE);
+ if ((result != SAA_OK) && (result != SAA_ERR_ALREADY_STOPPED)) {
+ printk(KERN_ERR "%s() acquire transition failed, res = 0x%x\n",
+ __func__, result);
+
+ /* Stop the hardware, regardless */
+ result = saa7164_api_transition_port(port, SAA_DMASTATE_STOP);
+ if ((result != SAA_OK) && (result != SAA_ERR_ALREADY_STOPPED)) {
+ printk(KERN_ERR "%s() acquire/forced stop transition "
+ "failed, res = 0x%x\n", __func__, result);
+ }
+ ret = -EIO;
+ goto out;
+ } else
+ dprintk(DBGLVL_ENC, "%s() Acquired\n", __func__);
+
+ /* Pause the hardware */
+ result = saa7164_api_transition_port(port, SAA_DMASTATE_PAUSE);
+ if ((result != SAA_OK) && (result != SAA_ERR_ALREADY_STOPPED)) {
+ printk(KERN_ERR "%s() pause transition failed, res = 0x%x\n",
+ __func__, result);
+
+ /* Stop the hardware, regardless */
+ result = saa7164_api_transition_port(port, SAA_DMASTATE_STOP);
+ if ((result != SAA_OK) && (result != SAA_ERR_ALREADY_STOPPED)) {
+ printk(KERN_ERR "%s() pause/forced stop transition "
+ "failed, res = 0x%x\n", __func__, result);
+ }
+
+ ret = -EIO;
+ goto out;
+ } else
+ dprintk(DBGLVL_ENC, "%s() Paused\n", __func__);
+
+ /* Start the hardware */
+ result = saa7164_api_transition_port(port, SAA_DMASTATE_RUN);
+ if ((result != SAA_OK) && (result != SAA_ERR_ALREADY_STOPPED)) {
+ printk(KERN_ERR "%s() run transition failed, result = 0x%x\n",
+ __func__, result);
+
+ /* Stop the hardware, regardless */
+ result = saa7164_api_transition_port(port, SAA_DMASTATE_STOP);
+ if ((result != SAA_OK) && (result != SAA_ERR_ALREADY_STOPPED)) {
+ printk(KERN_ERR "%s() run/forced stop transition "
+ "failed, res = 0x%x\n", __func__, result);
+ }
+
+ ret = -EIO;
+ } else
+ dprintk(DBGLVL_ENC, "%s() Running\n", __func__);
+
+out:
+ return ret;
+}
+
+static int fops_open(struct file *file)
+{
+ struct saa7164_dev *dev;
+ struct saa7164_port *port;
+ struct saa7164_encoder_fh *fh;
+
+ port = (struct saa7164_port *)video_get_drvdata(video_devdata(file));
+ if (!port)
+ return -ENODEV;
+
+ dev = port->dev;
+
+ dprintk(DBGLVL_ENC, "%s()\n", __func__);
+
+ /* allocate + initialize per filehandle data */
+ fh = kzalloc(sizeof(*fh), GFP_KERNEL);
+ if (NULL == fh)
+ return -ENOMEM;
+
+ file->private_data = fh;
+ fh->port = port;
+
+ return 0;
+}
+
+static int fops_release(struct file *file)
+{
+ struct saa7164_encoder_fh *fh = file->private_data;
+ struct saa7164_port *port = fh->port;
+ struct saa7164_dev *dev = port->dev;
+
+ dprintk(DBGLVL_ENC, "%s()\n", __func__);
+
+ /* Shut device down on last close */
+ if (atomic_cmpxchg(&fh->v4l_reading, 1, 0) == 1) {
+ if (atomic_dec_return(&port->v4l_reader_count) == 0) {
+ /* stop mpeg capture then cancel buffers */
+ saa7164_encoder_stop_streaming(port);
+ }
+ }
+
+ file->private_data = NULL;
+ kfree(fh);
+
+ return 0;
+}
+
+struct saa7164_user_buffer *saa7164_enc_next_buf(struct saa7164_port *port)
+{
+ struct saa7164_user_buffer *ubuf = 0;
+ struct saa7164_dev *dev = port->dev;
+ u32 crc;
+
+ mutex_lock(&port->dmaqueue_lock);
+ if (!list_empty(&port->list_buf_used.list)) {
+ ubuf = list_first_entry(&port->list_buf_used.list,
+ struct saa7164_user_buffer, list);
+
+ if (crc_checking) {
+ crc = crc32(0, ubuf->data, ubuf->actual_size);
+ if (crc != ubuf->crc) {
+ printk(KERN_ERR "%s() ubuf %p crc became invalid, was 0x%x became 0x%x\n", __func__,
+ ubuf, ubuf->crc, crc);
+ }
+ }
+
+ }
+ mutex_unlock(&port->dmaqueue_lock);
+
+ dprintk(DBGLVL_ENC, "%s() returns %p\n", __func__, ubuf);
+
+ return ubuf;
+}
+
+static ssize_t fops_read(struct file *file, char __user *buffer,
+ size_t count, loff_t *pos)
+{
+ struct saa7164_encoder_fh *fh = file->private_data;
+ struct saa7164_port *port = fh->port;
+ struct saa7164_user_buffer *ubuf = NULL;
+ struct saa7164_dev *dev = port->dev;
+ int ret = 0;
+ int rem, cnt;
+ u8 *p;
+
+ port->last_read_msecs_diff = port->last_read_msecs;
+ port->last_read_msecs = jiffies_to_msecs(jiffies);
+ port->last_read_msecs_diff = port->last_read_msecs -
+ port->last_read_msecs_diff;
+
+ saa7164_histogram_update(&port->read_interval,
+ port->last_read_msecs_diff);
+
+ if (*pos) {
+ printk(KERN_ERR "%s() ESPIPE\n", __func__);
+ return -ESPIPE;
+ }
+
+ if (atomic_cmpxchg(&fh->v4l_reading, 0, 1) == 0) {
+ if (atomic_inc_return(&port->v4l_reader_count) == 1) {
+
+ if (saa7164_encoder_initialize(port) < 0) {
+ printk(KERN_ERR "%s() EINVAL\n", __func__);
+ return -EINVAL;
+ }
+
+ saa7164_encoder_start_streaming(port);
+ msleep(200);
+ }
+ }
+
+ /* blocking wait for buffer */
+ if ((file->f_flags & O_NONBLOCK) == 0) {
+ if (wait_event_interruptible(port->wait_read,
+ saa7164_enc_next_buf(port))) {
+ printk(KERN_ERR "%s() ERESTARTSYS\n", __func__);
+ return -ERESTARTSYS;
+ }
+ }
+
+ /* Pull the first buffer from the used list */
+ ubuf = saa7164_enc_next_buf(port);
+
+ while ((count > 0) && ubuf) {
+
+ /* set remaining bytes to copy */
+ rem = ubuf->actual_size - ubuf->pos;
+ cnt = rem > count ? count : rem;
+
+ p = ubuf->data + ubuf->pos;
+
+ dprintk(DBGLVL_ENC,
+ "%s() count=%d cnt=%d rem=%d buf=%p buf->pos=%d\n",
+ __func__, (int)count, cnt, rem, ubuf, ubuf->pos);
+
+ if (copy_to_user(buffer, p, cnt)) {
+ printk(KERN_ERR "%s() copy_to_user failed\n", __func__);
+ if (!ret) {
+ printk(KERN_ERR "%s() EFAULT\n", __func__);
+ ret = -EFAULT;
+ }
+ goto err;
+ }
+
+ ubuf->pos += cnt;
+ count -= cnt;
+ buffer += cnt;
+ ret += cnt;
+
+ if (ubuf->pos > ubuf->actual_size) {
+ printk(KERN_ERR "read() pos > actual, huh?\n");
+ }
+
+ if (ubuf->pos == ubuf->actual_size) {
+
+ /* finished with current buffer, take next buffer */
+
+ /* Requeue the buffer on the free list */
+ ubuf->pos = 0;
+
+ mutex_lock(&port->dmaqueue_lock);
+ list_move_tail(&ubuf->list, &port->list_buf_free.list);
+ mutex_unlock(&port->dmaqueue_lock);
+
+ /* Dequeue next */
+ if ((file->f_flags & O_NONBLOCK) == 0) {
+ if (wait_event_interruptible(port->wait_read,
+ saa7164_enc_next_buf(port))) {
+ break;
+ }
+ }
+ ubuf = saa7164_enc_next_buf(port);
+ }
+ }
+err:
+ if (!ret && !ubuf) {
+ ret = -EAGAIN;
+ }
+
+ return ret;
+}
+
+static unsigned int fops_poll(struct file *file, poll_table *wait)
+{
+ struct saa7164_encoder_fh *fh = (struct saa7164_encoder_fh *)file->private_data;
+ struct saa7164_port *port = fh->port;
+ struct saa7164_user_buffer *ubuf;
+ unsigned int mask = 0;
+
+ port->last_poll_msecs_diff = port->last_poll_msecs;
+ port->last_poll_msecs = jiffies_to_msecs(jiffies);
+ port->last_poll_msecs_diff = port->last_poll_msecs -
+ port->last_poll_msecs_diff;
+
+ saa7164_histogram_update(&port->poll_interval,
+ port->last_poll_msecs_diff);
+
+ if (!video_is_registered(port->v4l_device)) {
+ return -EIO;
+ }
+
+ if (atomic_cmpxchg(&fh->v4l_reading, 0, 1) == 0) {
+ if (atomic_inc_return(&port->v4l_reader_count) == 1) {
+ if (saa7164_encoder_initialize(port) < 0)
+ return -EINVAL;
+ saa7164_encoder_start_streaming(port);
+ msleep(200);
+ }
+ }
+
+ /* blocking wait for buffer */
+ if ((file->f_flags & O_NONBLOCK) == 0) {
+ if (wait_event_interruptible(port->wait_read,
+ saa7164_enc_next_buf(port))) {
+ return -ERESTARTSYS;
+ }
+ }
+
+ /* Pull the first buffer from the used list */
+ ubuf = list_first_entry(&port->list_buf_used.list,
+ struct saa7164_user_buffer, list);
+
+ if (ubuf)
+ mask |= POLLIN | POLLRDNORM;
+
+ return mask;
+}
+
+static const struct v4l2_file_operations mpeg_fops = {
+ .owner = THIS_MODULE,
+ .open = fops_open,
+ .release = fops_release,
+ .read = fops_read,
+ .poll = fops_poll,
+ .unlocked_ioctl = video_ioctl2,
+};
+
+int saa7164_g_chip_ident(struct file *file, void *fh,
+ struct v4l2_dbg_chip_ident *chip)
+{
+ struct saa7164_port *port = ((struct saa7164_encoder_fh *)fh)->port;
+ struct saa7164_dev *dev = port->dev;
+ dprintk(DBGLVL_ENC, "%s()\n", __func__);
+
+ return 0;
+}
+
+int saa7164_g_register(struct file *file, void *fh,
+ struct v4l2_dbg_register *reg)
+{
+ struct saa7164_port *port = ((struct saa7164_encoder_fh *)fh)->port;
+ struct saa7164_dev *dev = port->dev;
+ dprintk(DBGLVL_ENC, "%s()\n", __func__);
+
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+
+ return 0;
+}
+
+int saa7164_s_register(struct file *file, void *fh,
+ struct v4l2_dbg_register *reg)
+{
+ struct saa7164_port *port = ((struct saa7164_encoder_fh *)fh)->port;
+ struct saa7164_dev *dev = port->dev;
+ dprintk(DBGLVL_ENC, "%s()\n", __func__);
+
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+
+ return 0;
+}
+
+static const struct v4l2_ioctl_ops mpeg_ioctl_ops = {
+ .vidioc_s_std = vidioc_s_std,
+ .vidioc_enum_input = vidioc_enum_input,
+ .vidioc_g_input = vidioc_g_input,
+ .vidioc_s_input = vidioc_s_input,
+ .vidioc_g_tuner = vidioc_g_tuner,
+ .vidioc_s_tuner = vidioc_s_tuner,
+ .vidioc_g_frequency = vidioc_g_frequency,
+ .vidioc_s_frequency = vidioc_s_frequency,
+ .vidioc_s_ctrl = vidioc_s_ctrl,
+ .vidioc_g_ctrl = vidioc_g_ctrl,
+ .vidioc_querycap = vidioc_querycap,
+ .vidioc_enum_fmt_vid_cap = vidioc_enum_fmt_vid_cap,
+ .vidioc_g_fmt_vid_cap = vidioc_g_fmt_vid_cap,
+ .vidioc_try_fmt_vid_cap = vidioc_try_fmt_vid_cap,
+ .vidioc_s_fmt_vid_cap = vidioc_s_fmt_vid_cap,
+ .vidioc_g_ext_ctrls = vidioc_g_ext_ctrls,
+ .vidioc_s_ext_ctrls = vidioc_s_ext_ctrls,
+ .vidioc_try_ext_ctrls = vidioc_try_ext_ctrls,
+ .vidioc_log_status = vidioc_log_status,
+ .vidioc_queryctrl = vidioc_queryctrl,
+ .vidioc_g_chip_ident = saa7164_g_chip_ident,
+#ifdef CONFIG_VIDEO_ADV_DEBUG
+ .vidioc_g_register = saa7164_g_register,
+ .vidioc_s_register = saa7164_s_register,
+#endif
+};
+
+static struct video_device saa7164_mpeg_template = {
+ .name = "saa7164",
+ .fops = &mpeg_fops,
+ .ioctl_ops = &mpeg_ioctl_ops,
+ .minor = -1,
+ .tvnorms = SAA7164_NORMS,
+ .current_norm = V4L2_STD_NTSC_M,
+};
+
+static struct video_device *saa7164_encoder_alloc(
+ struct saa7164_port *port,
+ struct pci_dev *pci,
+ struct video_device *template,
+ char *type)
+{
+ struct video_device *vfd;
+ struct saa7164_dev *dev = port->dev;
+
+ dprintk(DBGLVL_ENC, "%s()\n", __func__);
+
+ vfd = video_device_alloc();
+ if (NULL == vfd)
+ return NULL;
+
+ *vfd = *template;
+ snprintf(vfd->name, sizeof(vfd->name), "%s %s (%s)", dev->name,
+ type, saa7164_boards[dev->board].name);
+
+ vfd->parent = &pci->dev;
+ vfd->release = video_device_release;
+ return vfd;
+}
+
+int saa7164_encoder_register(struct saa7164_port *port)
+{
+ struct saa7164_dev *dev = port->dev;
+ int result = -ENODEV;
+
+ dprintk(DBGLVL_ENC, "%s()\n", __func__);
+
+ if (port->type != SAA7164_MPEG_ENCODER)
+ BUG();
+
+ /* Sanity check that the PCI configuration space is active */
+ if (port->hwcfg.BARLocation == 0) {
+ printk(KERN_ERR "%s() failed "
+ "(errno = %d), NO PCI configuration\n",
+ __func__, result);
+ result = -ENOMEM;
+ goto failed;
+ }
+
+ /* Establish encoder defaults here */
+ /* Set default TV standard */
+ port->encodernorm = saa7164_tvnorms[0];
+ port->width = 720;
+ port->mux_input = 1; /* Composite */
+ port->video_format = EU_VIDEO_FORMAT_MPEG_2;
+ port->audio_format = 0;
+ port->video_resolution = 0;
+ port->ctl_brightness = 127;
+ port->ctl_contrast = 66;
+ port->ctl_hue = 128;
+ port->ctl_saturation = 62;
+ port->ctl_sharpness = 8;
+ port->encoder_params.bitrate = ENCODER_DEF_BITRATE;
+ port->encoder_params.bitrate_peak = ENCODER_DEF_BITRATE;
+ port->encoder_params.bitrate_mode = V4L2_MPEG_VIDEO_BITRATE_MODE_CBR;
+ port->encoder_params.stream_type = V4L2_MPEG_STREAM_TYPE_MPEG2_PS;
+ port->encoder_params.ctl_mute = 0;
+ port->encoder_params.ctl_aspect = V4L2_MPEG_VIDEO_ASPECT_4x3;
+ port->encoder_params.refdist = 1;
+ port->encoder_params.gop_size = SAA7164_ENCODER_DEFAULT_GOP_SIZE;
+
+ if (port->encodernorm.id & V4L2_STD_525_60)
+ port->height = 480;
+ else
+ port->height = 576;
+
+ /* Allocate and register the video device node */
+ port->v4l_device = saa7164_encoder_alloc(port,
+ dev->pci, &saa7164_mpeg_template, "mpeg");
+
+ if (port->v4l_device == NULL) {
+ printk(KERN_INFO "%s: can't allocate mpeg device\n",
+ dev->name);
+ result = -ENOMEM;
+ goto failed;
+ }
+
+ video_set_drvdata(port->v4l_device, port);
+ result = video_register_device(port->v4l_device,
+ VFL_TYPE_GRABBER, -1);
+ if (result < 0) {
+ printk(KERN_INFO "%s: can't register mpeg device\n",
+ dev->name);
+ /* TODO: We're going to leak here if we don't dealloc
+ The buffers above. The unreg function can't deal wit it.
+ */
+ goto failed;
+ }
+
+ printk(KERN_INFO "%s: registered device video%d [mpeg]\n",
+ dev->name, port->v4l_device->num);
+
+ /* Configure the hardware defaults */
+ saa7164_api_set_videomux(port);
+ saa7164_api_set_usercontrol(port, PU_BRIGHTNESS_CONTROL);
+ saa7164_api_set_usercontrol(port, PU_CONTRAST_CONTROL);
+ saa7164_api_set_usercontrol(port, PU_HUE_CONTROL);
+ saa7164_api_set_usercontrol(port, PU_SATURATION_CONTROL);
+ saa7164_api_set_usercontrol(port, PU_SHARPNESS_CONTROL);
+ saa7164_api_audio_mute(port, 0);
+ saa7164_api_set_audio_volume(port, 20);
+ saa7164_api_set_aspect_ratio(port);
+
+ /* Disable audio standard detection, it's buggy */
+ saa7164_api_set_audio_detection(port, 0);
+
+ saa7164_api_set_encoder(port);
+ saa7164_api_get_encoder(port);
+
+ result = 0;
+failed:
+ return result;
+}
+
+void saa7164_encoder_unregister(struct saa7164_port *port)
+{
+ struct saa7164_dev *dev = port->dev;
+
+ dprintk(DBGLVL_ENC, "%s(port=%d)\n", __func__, port->nr);
+
+ if (port->type != SAA7164_MPEG_ENCODER)
+ BUG();
+
+ if (port->v4l_device) {
+ if (port->v4l_device->minor != -1)
+ video_unregister_device(port->v4l_device);
+ else
+ video_device_release(port->v4l_device);
+
+ port->v4l_device = NULL;
+ }
+
+ dprintk(DBGLVL_ENC, "%s(port=%d) done\n", __func__, port->nr);
+}
+
diff --git a/drivers/media/video/saa7164/saa7164-fw.c b/drivers/media/video/saa7164/saa7164-fw.c
index 270245d275ab..484533c32bb1 100644
--- a/drivers/media/video/saa7164/saa7164-fw.c
+++ b/drivers/media/video/saa7164/saa7164-fw.c
@@ -1,7 +1,7 @@
/*
* Driver for the NXP SAA7164 PCIe bridge
*
- * Copyright (c) 2009 Steven Toth <stoth@kernellabs.com>
+ * Copyright (c) 2010 Steven Toth <stoth@kernellabs.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -24,11 +24,11 @@
#include "saa7164.h"
-#define SAA7164_REV2_FIRMWARE "v4l-saa7164-1.0.2.fw"
-#define SAA7164_REV2_FIRMWARE_SIZE 3978608
+#define SAA7164_REV2_FIRMWARE "NXP7164-2010-03-10.1.fw"
+#define SAA7164_REV2_FIRMWARE_SIZE 4019072
-#define SAA7164_REV3_FIRMWARE "v4l-saa7164-1.0.3.fw"
-#define SAA7164_REV3_FIRMWARE_SIZE 3978608
+#define SAA7164_REV3_FIRMWARE "NXP7164-2010-03-10.1.fw"
+#define SAA7164_REV3_FIRMWARE_SIZE 4019072
struct fw_header {
u32 firmwaresize;
@@ -604,6 +604,7 @@ int saa7164_downloadfirmware(struct saa7164_dev *dev)
}
}
+ dev->firmwareloaded = 1;
ret = 0;
out:
diff --git a/drivers/media/video/saa7164/saa7164-i2c.c b/drivers/media/video/saa7164/saa7164-i2c.c
index e1ae9b01bf0f..b5167d33650a 100644
--- a/drivers/media/video/saa7164/saa7164-i2c.c
+++ b/drivers/media/video/saa7164/saa7164-i2c.c
@@ -1,7 +1,7 @@
/*
* Driver for the NXP SAA7164 PCIe bridge
*
- * Copyright (c) 2009 Steven Toth <stoth@kernellabs.com>
+ * Copyright (c) 2010 Steven Toth <stoth@kernellabs.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
diff --git a/drivers/media/video/saa7164/saa7164-reg.h b/drivers/media/video/saa7164/saa7164-reg.h
index 06be4c13d5b1..2bbf81583d33 100644
--- a/drivers/media/video/saa7164/saa7164-reg.h
+++ b/drivers/media/video/saa7164/saa7164-reg.h
@@ -1,7 +1,7 @@
/*
* Driver for the NXP SAA7164 PCIe bridge
*
- * Copyright (c) 2009 Steven Toth <stoth@kernellabs.com>
+ * Copyright (c) 2010 Steven Toth <stoth@kernellabs.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -60,6 +60,7 @@
#define GET_STRING_CONTROL 0x03
#define GET_LANGUAGE_CONTROL 0x05
#define SET_POWER_CONTROL 0x07
+#define GET_FW_STATUS_CONTROL 0x08
#define GET_FW_VERSION_CONTROL 0x09
#define SET_DEBUG_LEVEL_CONTROL 0x0B
#define GET_DEBUG_DATA_CONTROL 0x0C
@@ -156,11 +157,63 @@
#define EXU_INTERRUPT_CONTROL 0x03
/* State Transition and args */
+#define SAA_PROBE_CONTROL 0x01
+#define SAA_COMMIT_CONTROL 0x02
#define SAA_STATE_CONTROL 0x03
#define SAA_DMASTATE_STOP 0x00
#define SAA_DMASTATE_ACQUIRE 0x01
#define SAA_DMASTATE_PAUSE 0x02
#define SAA_DMASTATE_RUN 0x03
-/* Hardware registers */
-
+/* A/V Mux Input Selector */
+#define SU_INPUT_SELECT_CONTROL 0x01
+
+/* Encoder Profiles */
+#define EU_PROFILE_PS_DVD 0x06
+#define EU_PROFILE_TS_HQ 0x09
+#define EU_VIDEO_FORMAT_MPEG_2 0x02
+
+/* Tuner */
+#define TU_AUDIO_MODE_CONTROL 0x17
+
+/* Video Formats */
+#define TU_STANDARD_CONTROL 0x00
+#define TU_STANDARD_AUTO_CONTROL 0x01
+#define TU_STANDARD_NONE 0x00
+#define TU_STANDARD_NTSC_M 0x01
+#define TU_STANDARD_PAL_I 0x08
+#define TU_STANDARD_MANUAL 0x00
+#define TU_STANDARD_AUTO 0x01
+
+/* Video Controls */
+#define PU_BRIGHTNESS_CONTROL 0x02
+#define PU_CONTRAST_CONTROL 0x03
+#define PU_HUE_CONTROL 0x06
+#define PU_SATURATION_CONTROL 0x07
+#define PU_SHARPNESS_CONTROL 0x08
+
+/* Audio Controls */
+#define MUTE_CONTROL 0x01
+#define VOLUME_CONTROL 0x02
+#define AUDIO_DEFAULT_CONTROL 0x0D
+
+/* Default Volume Levels */
+#define TMHW_LEV_ADJ_DECLEV_DEFAULT 0x00
+#define TMHW_LEV_ADJ_MONOLEV_DEFAULT 0x00
+#define TMHW_LEV_ADJ_NICLEV_DEFAULT 0x00
+#define TMHW_LEV_ADJ_SAPLEV_DEFAULT 0x00
+#define TMHW_LEV_ADJ_ADCLEV_DEFAULT 0x00
+
+/* Encoder Related Commands */
+#define EU_PROFILE_CONTROL 0x00
+#define EU_VIDEO_FORMAT_CONTROL 0x01
+#define EU_VIDEO_BIT_RATE_CONTROL 0x02
+#define EU_VIDEO_RESOLUTION_CONTROL 0x03
+#define EU_VIDEO_GOP_STRUCTURE_CONTROL 0x04
+#define EU_VIDEO_INPUT_ASPECT_CONTROL 0x0A
+#define EU_AUDIO_FORMAT_CONTROL 0x0C
+#define EU_AUDIO_BIT_RATE_CONTROL 0x0D
+
+/* Firmware Debugging */
+#define SET_DEBUG_LEVEL_CONTROL 0x0B
+#define GET_DEBUG_DATA_CONTROL 0x0C
diff --git a/drivers/media/video/saa7164/saa7164-types.h b/drivers/media/video/saa7164/saa7164-types.h
index 99093f23aae5..df1d2997fa6c 100644
--- a/drivers/media/video/saa7164/saa7164-types.h
+++ b/drivers/media/video/saa7164/saa7164-types.h
@@ -1,7 +1,7 @@
/*
* Driver for the NXP SAA7164 PCIe bridge
*
- * Copyright (c) 2009 Steven Toth <stoth@kernellabs.com>
+ * Copyright (c) 2010 Steven Toth <stoth@kernellabs.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -24,7 +24,7 @@
/* Some structues are passed directly to/from the firmware and
* have strict alignment requirements. This is one of them.
*/
-typedef struct {
+struct tmComResHWDescr {
u8 bLength;
u8 bDescriptorType;
u8 bDescriptorSubtype;
@@ -37,14 +37,14 @@ typedef struct {
u32 dwHostMemoryRegionSize;
u32 dwHostHibernatMemRegion;
u32 dwHostHibernatMemRegionSize;
-} __attribute__((packed)) tmComResHWDescr_t;
+} __attribute__((packed));
/* This is DWORD aligned on windows but I can't find the right
* gcc syntax to match the binary data from the device.
* I've manually padded with Reserved[3] bytes to match the hardware,
* but this could break if GCC decies to pack in a different way.
*/
-typedef struct {
+struct tmComResInterfaceDescr {
u8 bLength;
u8 bDescriptorType;
u8 bDescriptorSubtype;
@@ -56,52 +56,52 @@ typedef struct {
u8 bDebugInterruptId;
u8 BARLocation;
u8 Reserved[3];
-} tmComResInterfaceDescr_t;
+};
-typedef struct {
+struct tmComResBusDescr {
u64 CommandRing;
u64 ResponseRing;
u32 CommandWrite;
u32 CommandRead;
u32 ResponseWrite;
u32 ResponseRead;
-} tmComResBusDescr_t;
+};
-typedef enum {
+enum tmBusType {
NONE = 0,
TYPE_BUS_PCI = 1,
TYPE_BUS_PCIe = 2,
TYPE_BUS_USB = 3,
TYPE_BUS_I2C = 4
-} tmBusType_t;
+};
-typedef struct {
- tmBusType_t Type;
+struct tmComResBusInfo {
+ enum tmBusType Type;
u16 m_wMaxReqSize;
u8 *m_pdwSetRing;
u32 m_dwSizeSetRing;
u8 *m_pdwGetRing;
u32 m_dwSizeGetRing;
- u32 *m_pdwSetWritePos;
- u32 *m_pdwSetReadPos;
- u32 *m_pdwGetWritePos;
- u32 *m_pdwGetReadPos;
+ u32 m_dwSetWritePos;
+ u32 m_dwSetReadPos;
+ u32 m_dwGetWritePos;
+ u32 m_dwGetReadPos;
/* All access is protected */
struct mutex lock;
-} tmComResBusInfo_t;
+};
-typedef struct {
+struct tmComResInfo {
u8 id;
u8 flags;
u16 size;
u32 command;
u16 controlselector;
u8 seqno;
-} __attribute__((packed)) tmComResInfo_t;
+} __attribute__((packed));
-typedef enum {
+enum tmComResCmd {
SET_CUR = 0x01,
GET_CUR = 0x81,
GET_MIN = 0x82,
@@ -110,7 +110,7 @@ typedef enum {
GET_LEN = 0x85,
GET_INFO = 0x86,
GET_DEF = 0x87
-} tmComResCmd_t;
+};
struct cmd {
u8 seqno;
@@ -121,20 +121,20 @@ struct cmd {
wait_queue_head_t wait;
};
-typedef struct {
+struct tmDescriptor {
u32 pathid;
u32 size;
void *descriptor;
-} tmDescriptor_t;
+};
-typedef struct {
+struct tmComResDescrHeader {
u8 len;
u8 type;
u8 subtype;
u8 unitid;
-} __attribute__((packed)) tmComResDescrHeader_t;
+} __attribute__((packed));
-typedef struct {
+struct tmComResExtDevDescrHeader {
u8 len;
u8 type;
u8 subtype;
@@ -144,22 +144,22 @@ typedef struct {
u32 numgpiopins;
u8 numgpiogroups;
u8 controlsize;
-} __attribute__((packed)) tmComResExtDevDescrHeader_t;
+} __attribute__((packed));
-typedef struct {
+struct tmComResGPIO {
u32 pin;
u8 state;
-} __attribute__((packed)) tmComResGPIO_t;
+} __attribute__((packed));
-typedef struct {
+struct tmComResPathDescrHeader {
u8 len;
u8 type;
u8 subtype;
u8 pathid;
-} __attribute__((packed)) tmComResPathDescrHeader_t;
+} __attribute__((packed));
/* terminaltype */
-typedef enum {
+enum tmComResTermType {
ITT_ANTENNA = 0x0203,
LINE_CONNECTOR = 0x0603,
SPDIF_CONNECTOR = 0x0605,
@@ -167,9 +167,9 @@ typedef enum {
SVIDEO_CONNECTOR = 0x0402,
COMPONENT_CONNECTOR = 0x0403,
STANDARD_DMA = 0xF101
-} tmComResTermType_t;
+};
-typedef struct {
+struct tmComResAntTermDescrHeader {
u8 len;
u8 type;
u8 subtype;
@@ -178,9 +178,9 @@ typedef struct {
u8 assocterminal;
u8 iterminal;
u8 controlsize;
-} __attribute__((packed)) tmComResAntTermDescrHeader_t;
+} __attribute__((packed));
-typedef struct {
+struct tmComResTunerDescrHeader {
u8 len;
u8 type;
u8 subtype;
@@ -190,9 +190,9 @@ typedef struct {
u32 tuningstandards;
u8 controlsize;
u32 controls;
-} __attribute__((packed)) tmComResTunerDescrHeader_t;
+} __attribute__((packed));
-typedef enum {
+enum tmBufferFlag {
/* the buffer does not contain any valid data */
TM_BUFFER_FLAG_EMPTY,
@@ -201,23 +201,23 @@ typedef enum {
/* the buffer is the dummy buffer - TODO??? */
TM_BUFFER_FLAG_DUMMY_BUFFER
-} tmBufferFlag_t;
+};
-typedef struct {
+struct tmBuffer {
u64 *pagetablevirt;
u64 pagetablephys;
u16 offset;
u8 *context;
u64 timestamp;
- tmBufferFlag_t BufferFlag_t;
+ enum tmBufferFlag BufferFlag;
u32 lostbuffers;
u32 validbuffers;
u64 *dummypagevirt;
u64 dummypagephys;
u64 *addressvirt;
-} tmBuffer_t;
+};
-typedef struct {
+struct tmHWStreamParameters {
u32 bitspersample;
u32 samplesperline;
u32 numberoflines;
@@ -227,15 +227,15 @@ typedef struct {
u64 *pagetablelistphys;
u32 numpagetables;
u32 numpagetableentries;
-} tmHWStreamParameters_t;
+};
-typedef struct {
- tmHWStreamParameters_t HWStreamParameters_t;
+struct tmStreamParameters {
+ struct tmHWStreamParameters HWStreamParameters;
u64 qwDummyPageTablePhys;
u64 *pDummyPageTableVirt;
-} tmStreamParameters_t;
+};
-typedef struct {
+struct tmComResDMATermDescrHeader {
u8 len;
u8 type;
u8 subtyle;
@@ -251,7 +251,7 @@ typedef struct {
u8 metadatasize;
u8 numformats;
u8 controlsize;
-} __attribute__((packed)) tmComResDMATermDescrHeader_t;
+} __attribute__((packed));
/*
*
@@ -274,7 +274,7 @@ typedef struct {
* Data is to be ignored by the application.
*
*/
-typedef struct {
+struct tmComResTSFormatDescrHeader {
u8 len;
u8 type;
u8 subtype;
@@ -283,5 +283,160 @@ typedef struct {
u8 bPacketLength;
u8 bStrideLength;
u8 guidStrideFormat[16];
-} __attribute__((packed)) tmComResTSFormatDescrHeader_t;
+} __attribute__((packed));
+
+/* Encoder related structures */
+
+/* A/V Mux Selector */
+struct tmComResSelDescrHeader {
+ u8 len;
+ u8 type;
+ u8 subtype;
+ u8 unitid;
+ u8 nrinpins;
+ u8 sourceid;
+} __attribute__((packed));
+
+/* A/V Audio processor definitions */
+struct tmComResProcDescrHeader {
+ u8 len;
+ u8 type;
+ u8 subtype;
+ u8 unitid;
+ u8 sourceid;
+ u16 wreserved;
+ u8 controlsize;
+} __attribute__((packed));
+
+/* Video bitrate control message */
+#define EU_VIDEO_BIT_RATE_MODE_CONSTANT (0)
+#define EU_VIDEO_BIT_RATE_MODE_VARIABLE_AVERAGE (1)
+#define EU_VIDEO_BIT_RATE_MODE_VARIABLE_PEAK (2)
+struct tmComResEncVideoBitRate {
+ u8 ucVideoBitRateMode;
+ u32 dwVideoBitRate;
+ u32 dwVideoBitRatePeak;
+} __attribute__((packed));
+
+/* Video Encoder Aspect Ratio message */
+struct tmComResEncVideoInputAspectRatio {
+ u8 width;
+ u8 height;
+} __attribute__((packed));
+
+/* Video Encoder GOP IBP message */
+/* 1. IPPPPPPPPPPPPPP */
+/* 2. IBPBPBPBPBPBPBP */
+/* 3. IBBPBBPBBPBBP */
+#define SAA7164_ENCODER_DEFAULT_GOP_DIST (1)
+#define SAA7164_ENCODER_DEFAULT_GOP_SIZE (15)
+struct tmComResEncVideoGopStructure {
+ u8 ucGOPSize; /* GOP Size 12, 15 */
+ u8 ucRefFrameDist; /* Reference Frame Distance */
+} __attribute__((packed));
+
+/* Encoder processor definition */
+struct tmComResEncoderDescrHeader {
+ u8 len;
+ u8 type;
+ u8 subtype;
+ u8 unitid;
+ u8 vsourceid;
+ u8 asourceid;
+ u8 iunit;
+ u32 dwmControlCap;
+ u32 dwmProfileCap;
+ u32 dwmVidFormatCap;
+ u8 bmVidBitrateCap;
+ u16 wmVidResolutionsCap;
+ u16 wmVidFrmRateCap;
+ u32 dwmAudFormatCap;
+ u8 bmAudBitrateCap;
+} __attribute__((packed));
+
+/* Audio processor definition */
+struct tmComResAFeatureDescrHeader {
+ u8 len;
+ u8 type;
+ u8 subtype;
+ u8 unitid;
+ u8 sourceid;
+ u8 controlsize;
+} __attribute__((packed));
+
+/* Audio control messages */
+struct tmComResAudioDefaults {
+ u8 ucDecoderLevel;
+ u8 ucDecoderFM_Level;
+ u8 ucMonoLevel;
+ u8 ucNICAM_Level;
+ u8 ucSAP_Level;
+ u8 ucADC_Level;
+} __attribute__((packed));
+
+/* Audio bitrate control message */
+struct tmComResEncAudioBitRate {
+ u8 ucAudioBitRateMode;
+ u32 dwAudioBitRate;
+ u32 dwAudioBitRatePeak;
+} __attribute__((packed));
+
+/* Tuner / AV Decoder messages */
+struct tmComResTunerStandard {
+ u8 std;
+ u32 country;
+} __attribute__((packed));
+
+struct tmComResTunerStandardAuto {
+ u8 mode;
+} __attribute__((packed));
+
+/* EEPROM definition for PS stream types */
+struct tmComResPSFormatDescrHeader {
+ u8 len;
+ u8 type;
+ u8 subtype;
+ u8 bFormatIndex;
+ u16 wPacketLength;
+ u16 wPackLength;
+ u8 bPackDataType;
+} __attribute__((packed));
+
+/* VBI control structure */
+struct tmComResVBIFormatDescrHeader {
+ u8 len;
+ u8 type;
+ u8 subtype; /* VS_FORMAT_VBI */
+ u8 bFormatIndex;
+ u32 VideoStandard; /* See KS_AnalogVideoStandard, NTSC = 1 */
+ u8 StartLine; /* NTSC Start = 10 */
+ u8 EndLine; /* NTSC = 21 */
+ u8 FieldRate; /* 60 for NTSC */
+ u8 bNumLines; /* Unsed - scheduled for removal */
+} __attribute__((packed));
+
+struct tmComResProbeCommit {
+ u16 bmHint;
+ u8 bFormatIndex;
+ u8 bFrameIndex;
+} __attribute__((packed));
+
+struct tmComResDebugSetLevel {
+ u32 dwDebugLevel;
+} __attribute__((packed));
+
+struct tmComResDebugGetData {
+ u32 dwResult;
+ u8 ucDebugData[256];
+} __attribute__((packed));
+struct tmFwInfoStruct {
+ u32 status;
+ u32 mode;
+ u32 devicespec;
+ u32 deviceinst;
+ u32 CPULoad;
+ u32 RemainHeap;
+ u32 CPUClock;
+ u32 RAMSpeed;
+} __attribute__((packed));
diff --git a/drivers/media/video/saa7164/saa7164-vbi.c b/drivers/media/video/saa7164/saa7164-vbi.c
new file mode 100644
index 000000000000..323c7cdca37b
--- /dev/null
+++ b/drivers/media/video/saa7164/saa7164-vbi.c
@@ -0,0 +1,1375 @@
+/*
+ * Driver for the NXP SAA7164 PCIe bridge
+ *
+ * Copyright (c) 2010 Steven Toth <stoth@kernellabs.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ *
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include "saa7164.h"
+
+static struct saa7164_tvnorm saa7164_tvnorms[] = {
+ {
+ .name = "NTSC-M",
+ .id = V4L2_STD_NTSC_M,
+ }, {
+ .name = "NTSC-JP",
+ .id = V4L2_STD_NTSC_M_JP,
+ }
+};
+
+static const u32 saa7164_v4l2_ctrls[] = {
+ 0
+};
+
+/* Take the encoder configuration from the port struct and
+ * flush it to the hardware.
+ */
+static void saa7164_vbi_configure(struct saa7164_port *port)
+{
+ struct saa7164_dev *dev = port->dev;
+ dprintk(DBGLVL_VBI, "%s()\n", __func__);
+
+ port->vbi_params.width = port->width;
+ port->vbi_params.height = port->height;
+ port->vbi_params.is_50hz =
+ (port->encodernorm.id & V4L2_STD_625_50) != 0;
+
+ /* Set up the DIF (enable it) for analog mode by default */
+ saa7164_api_initialize_dif(port);
+
+// /* Configure the correct video standard */
+// saa7164_api_configure_dif(port, port->encodernorm.id);
+
+// /* Ensure the audio decoder is correct configured */
+// saa7164_api_set_audio_std(port);
+ dprintk(DBGLVL_VBI, "%s() ends\n", __func__);
+}
+
+static int saa7164_vbi_buffers_dealloc(struct saa7164_port *port)
+{
+ struct list_head *c, *n, *p, *q, *l, *v;
+ struct saa7164_dev *dev = port->dev;
+ struct saa7164_buffer *buf;
+ struct saa7164_user_buffer *ubuf;
+
+ /* Remove any allocated buffers */
+ mutex_lock(&port->dmaqueue_lock);
+
+ dprintk(DBGLVL_VBI, "%s(port=%d) dmaqueue\n", __func__, port->nr);
+ list_for_each_safe(c, n, &port->dmaqueue.list) {
+ buf = list_entry(c, struct saa7164_buffer, list);
+ list_del(c);
+ saa7164_buffer_dealloc(buf);
+ }
+
+ dprintk(DBGLVL_VBI, "%s(port=%d) used\n", __func__, port->nr);
+ list_for_each_safe(p, q, &port->list_buf_used.list) {
+ ubuf = list_entry(p, struct saa7164_user_buffer, list);
+ list_del(p);
+ saa7164_buffer_dealloc_user(ubuf);
+ }
+
+ dprintk(DBGLVL_VBI, "%s(port=%d) free\n", __func__, port->nr);
+ list_for_each_safe(l, v, &port->list_buf_free.list) {
+ ubuf = list_entry(l, struct saa7164_user_buffer, list);
+ list_del(l);
+ saa7164_buffer_dealloc_user(ubuf);
+ }
+
+ mutex_unlock(&port->dmaqueue_lock);
+ dprintk(DBGLVL_VBI, "%s(port=%d) done\n", __func__, port->nr);
+
+ return 0;
+}
+
+/* Dynamic buffer switch at vbi start time */
+static int saa7164_vbi_buffers_alloc(struct saa7164_port *port)
+{
+ struct saa7164_dev *dev = port->dev;
+ struct saa7164_buffer *buf;
+ struct saa7164_user_buffer *ubuf;
+ struct tmHWStreamParameters *params = &port->hw_streamingparams;
+ int result = -ENODEV, i;
+ int len = 0;
+
+ dprintk(DBGLVL_VBI, "%s()\n", __func__);
+
+ /* TODO: NTSC SPECIFIC */
+ /* Init and establish defaults */
+ params->samplesperline = 1440;
+ params->numberoflines = 12;
+ params->numberoflines = 18;
+ params->pitch = 1600;
+ params->pitch = 1440;
+ params->numpagetables = 2 +
+ ((params->numberoflines * params->pitch) / PAGE_SIZE);
+ params->bitspersample = 8;
+ params->linethreshold = 0;
+ params->pagetablelistvirt = 0;
+ params->pagetablelistphys = 0;
+ params->numpagetableentries = port->hwcfg.buffercount;
+
+ /* Allocate the PCI resources, buffers (hard) */
+ for (i = 0; i < port->hwcfg.buffercount; i++) {
+ buf = saa7164_buffer_alloc(port,
+ params->numberoflines *
+ params->pitch);
+
+ if (!buf) {
+ printk(KERN_ERR "%s() failed "
+ "(errno = %d), unable to allocate buffer\n",
+ __func__, result);
+ result = -ENOMEM;
+ goto failed;
+ } else {
+
+ mutex_lock(&port->dmaqueue_lock);
+ list_add_tail(&buf->list, &port->dmaqueue.list);
+ mutex_unlock(&port->dmaqueue_lock);
+
+ }
+ }
+
+ /* Allocate some kenrel kernel buffers for copying
+ * to userpsace.
+ */
+ len = params->numberoflines * params->pitch;
+
+ if (vbi_buffers < 16)
+ vbi_buffers = 16;
+ if (vbi_buffers > 512)
+ vbi_buffers = 512;
+
+ for (i = 0; i < vbi_buffers; i++) {
+
+ ubuf = saa7164_buffer_alloc_user(dev, len);
+ if (ubuf) {
+ mutex_lock(&port->dmaqueue_lock);
+ list_add_tail(&ubuf->list, &port->list_buf_free.list);
+ mutex_unlock(&port->dmaqueue_lock);
+ }
+
+ }
+
+ result = 0;
+
+failed:
+ return result;
+}
+
+
+static int saa7164_vbi_initialize(struct saa7164_port *port)
+{
+ saa7164_vbi_configure(port);
+ return 0;
+}
+
+/* -- V4L2 --------------------------------------------------------- */
+static int vidioc_s_std(struct file *file, void *priv, v4l2_std_id *id)
+{
+ struct saa7164_vbi_fh *fh = file->private_data;
+ struct saa7164_port *port = fh->port;
+ struct saa7164_dev *dev = port->dev;
+ unsigned int i;
+
+ dprintk(DBGLVL_VBI, "%s(id=0x%x)\n", __func__, (u32)*id);
+
+ for (i = 0; i < ARRAY_SIZE(saa7164_tvnorms); i++) {
+ if (*id & saa7164_tvnorms[i].id)
+ break;
+ }
+ if (i == ARRAY_SIZE(saa7164_tvnorms))
+ return -EINVAL;
+
+ port->encodernorm = saa7164_tvnorms[i];
+
+ /* Update the audio decoder while is not running in
+ * auto detect mode.
+ */
+ saa7164_api_set_audio_std(port);
+
+ dprintk(DBGLVL_VBI, "%s(id=0x%x) OK\n", __func__, (u32)*id);
+
+ return 0;
+}
+
+static int vidioc_enum_input(struct file *file, void *priv,
+ struct v4l2_input *i)
+{
+ int n;
+
+ char *inputs[] = { "tuner", "composite", "svideo", "aux",
+ "composite 2", "svideo 2", "aux 2" };
+
+ if (i->index >= 7)
+ return -EINVAL;
+
+ strcpy(i->name, inputs[i->index]);
+
+ if (i->index == 0)
+ i->type = V4L2_INPUT_TYPE_TUNER;
+ else
+ i->type = V4L2_INPUT_TYPE_CAMERA;
+
+ for (n = 0; n < ARRAY_SIZE(saa7164_tvnorms); n++)
+ i->std |= saa7164_tvnorms[n].id;
+
+ return 0;
+}
+
+static int vidioc_g_input(struct file *file, void *priv, unsigned int *i)
+{
+ struct saa7164_vbi_fh *fh = file->private_data;
+ struct saa7164_port *port = fh->port;
+ struct saa7164_dev *dev = port->dev;
+
+ if (saa7164_api_get_videomux(port) != SAA_OK)
+ return -EIO;
+
+ *i = (port->mux_input - 1);
+
+ dprintk(DBGLVL_VBI, "%s() input=%d\n", __func__, *i);
+
+ return 0;
+}
+
+static int vidioc_s_input(struct file *file, void *priv, unsigned int i)
+{
+ struct saa7164_vbi_fh *fh = file->private_data;
+ struct saa7164_port *port = fh->port;
+ struct saa7164_dev *dev = port->dev;
+
+ dprintk(DBGLVL_VBI, "%s() input=%d\n", __func__, i);
+
+ if (i >= 7)
+ return -EINVAL;
+
+ port->mux_input = i + 1;
+
+ if (saa7164_api_set_videomux(port) != SAA_OK)
+ return -EIO;
+
+ return 0;
+}
+
+static int vidioc_g_tuner(struct file *file, void *priv,
+ struct v4l2_tuner *t)
+{
+ struct saa7164_vbi_fh *fh = file->private_data;
+ struct saa7164_port *port = fh->port;
+ struct saa7164_dev *dev = port->dev;
+
+ if (0 != t->index)
+ return -EINVAL;
+
+ strcpy(t->name, "tuner");
+ t->type = V4L2_TUNER_ANALOG_TV;
+ t->capability = V4L2_TUNER_CAP_NORM | V4L2_TUNER_CAP_STEREO;
+
+ dprintk(DBGLVL_VBI, "VIDIOC_G_TUNER: tuner type %d\n", t->type);
+
+ return 0;
+}
+
+static int vidioc_s_tuner(struct file *file, void *priv,
+ struct v4l2_tuner *t)
+{
+ /* Update the A/V core */
+ return 0;
+}
+
+static int vidioc_g_frequency(struct file *file, void *priv,
+ struct v4l2_frequency *f)
+{
+ struct saa7164_vbi_fh *fh = file->private_data;
+ struct saa7164_port *port = fh->port;
+
+ f->type = V4L2_TUNER_ANALOG_TV;
+ f->frequency = port->freq;
+
+ return 0;
+}
+
+static int vidioc_s_frequency(struct file *file, void *priv,
+ struct v4l2_frequency *f)
+{
+ struct saa7164_vbi_fh *fh = file->private_data;
+ struct saa7164_port *port = fh->port;
+ struct saa7164_dev *dev = port->dev;
+ struct saa7164_port *tsport;
+ struct dvb_frontend *fe;
+
+ /* TODO: Pull this for the std */
+ struct analog_parameters params = {
+ .mode = V4L2_TUNER_ANALOG_TV,
+ .audmode = V4L2_TUNER_MODE_STEREO,
+ .std = port->encodernorm.id,
+ .frequency = f->frequency
+ };
+
+ /* Stop the encoder */
+ dprintk(DBGLVL_VBI, "%s() frequency=%d tuner=%d\n", __func__,
+ f->frequency, f->tuner);
+
+ if (f->tuner != 0)
+ return -EINVAL;
+
+ if (f->type != V4L2_TUNER_ANALOG_TV)
+ return -EINVAL;
+
+ port->freq = f->frequency;
+
+ /* Update the hardware */
+ if (port->nr == SAA7164_PORT_VBI1)
+ tsport = &dev->ports[SAA7164_PORT_TS1];
+ else
+ if (port->nr == SAA7164_PORT_VBI2)
+ tsport = &dev->ports[SAA7164_PORT_TS2];
+ else
+ BUG();
+
+ fe = tsport->dvb.frontend;
+
+ if (fe && fe->ops.tuner_ops.set_analog_params)
+ fe->ops.tuner_ops.set_analog_params(fe, &params);
+ else
+ printk(KERN_ERR "%s() No analog tuner, aborting\n", __func__);
+
+ saa7164_vbi_initialize(port);
+
+ return 0;
+}
+
+static int vidioc_g_ctrl(struct file *file, void *priv,
+ struct v4l2_control *ctl)
+{
+ struct saa7164_vbi_fh *fh = file->private_data;
+ struct saa7164_port *port = fh->port;
+ struct saa7164_dev *dev = port->dev;
+
+ dprintk(DBGLVL_VBI, "%s(id=%d, value=%d)\n", __func__,
+ ctl->id, ctl->value);
+
+ switch (ctl->id) {
+ case V4L2_CID_BRIGHTNESS:
+ ctl->value = port->ctl_brightness;
+ break;
+ case V4L2_CID_CONTRAST:
+ ctl->value = port->ctl_contrast;
+ break;
+ case V4L2_CID_SATURATION:
+ ctl->value = port->ctl_saturation;
+ break;
+ case V4L2_CID_HUE:
+ ctl->value = port->ctl_hue;
+ break;
+ case V4L2_CID_SHARPNESS:
+ ctl->value = port->ctl_sharpness;
+ break;
+ case V4L2_CID_AUDIO_VOLUME:
+ ctl->value = port->ctl_volume;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int vidioc_s_ctrl(struct file *file, void *priv,
+ struct v4l2_control *ctl)
+{
+ struct saa7164_vbi_fh *fh = file->private_data;
+ struct saa7164_port *port = fh->port;
+ struct saa7164_dev *dev = port->dev;
+ int ret = 0;
+
+ dprintk(DBGLVL_VBI, "%s(id=%d, value=%d)\n", __func__,
+ ctl->id, ctl->value);
+
+ switch (ctl->id) {
+ case V4L2_CID_BRIGHTNESS:
+ if ((ctl->value >= 0) && (ctl->value <= 255)) {
+ port->ctl_brightness = ctl->value;
+ saa7164_api_set_usercontrol(port,
+ PU_BRIGHTNESS_CONTROL);
+ } else
+ ret = -EINVAL;
+ break;
+ case V4L2_CID_CONTRAST:
+ if ((ctl->value >= 0) && (ctl->value <= 255)) {
+ port->ctl_contrast = ctl->value;
+ saa7164_api_set_usercontrol(port, PU_CONTRAST_CONTROL);
+ } else
+ ret = -EINVAL;
+ break;
+ case V4L2_CID_SATURATION:
+ if ((ctl->value >= 0) && (ctl->value <= 255)) {
+ port->ctl_saturation = ctl->value;
+ saa7164_api_set_usercontrol(port,
+ PU_SATURATION_CONTROL);
+ } else
+ ret = -EINVAL;
+ break;
+ case V4L2_CID_HUE:
+ if ((ctl->value >= 0) && (ctl->value <= 255)) {
+ port->ctl_hue = ctl->value;
+ saa7164_api_set_usercontrol(port, PU_HUE_CONTROL);
+ } else
+ ret = -EINVAL;
+ break;
+ case V4L2_CID_SHARPNESS:
+ if ((ctl->value >= 0) && (ctl->value <= 255)) {
+ port->ctl_sharpness = ctl->value;
+ saa7164_api_set_usercontrol(port, PU_SHARPNESS_CONTROL);
+ } else
+ ret = -EINVAL;
+ break;
+ case V4L2_CID_AUDIO_VOLUME:
+ if ((ctl->value >= -83) && (ctl->value <= 24)) {
+ port->ctl_volume = ctl->value;
+ saa7164_api_set_audio_volume(port, port->ctl_volume);
+ } else
+ ret = -EINVAL;
+ break;
+ default:
+ ret = -EINVAL;
+ }
+
+ return ret;
+}
+
+static int saa7164_get_ctrl(struct saa7164_port *port,
+ struct v4l2_ext_control *ctrl)
+{
+ struct saa7164_vbi_params *params = &port->vbi_params;
+
+ switch (ctrl->id) {
+ case V4L2_CID_MPEG_STREAM_TYPE:
+ ctrl->value = params->stream_type;
+ break;
+ case V4L2_CID_MPEG_AUDIO_MUTE:
+ ctrl->value = params->ctl_mute;
+ break;
+ case V4L2_CID_MPEG_VIDEO_ASPECT:
+ ctrl->value = params->ctl_aspect;
+ break;
+ case V4L2_CID_MPEG_VIDEO_B_FRAMES:
+ ctrl->value = params->refdist;
+ break;
+ case V4L2_CID_MPEG_VIDEO_GOP_SIZE:
+ ctrl->value = params->gop_size;
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int vidioc_g_ext_ctrls(struct file *file, void *priv,
+ struct v4l2_ext_controls *ctrls)
+{
+ struct saa7164_vbi_fh *fh = file->private_data;
+ struct saa7164_port *port = fh->port;
+ int i, err = 0;
+
+ if (ctrls->ctrl_class == V4L2_CTRL_CLASS_MPEG) {
+ for (i = 0; i < ctrls->count; i++) {
+ struct v4l2_ext_control *ctrl = ctrls->controls + i;
+
+ err = saa7164_get_ctrl(port, ctrl);
+ if (err) {
+ ctrls->error_idx = i;
+ break;
+ }
+ }
+ return err;
+
+ }
+
+ return -EINVAL;
+}
+
+static int saa7164_try_ctrl(struct v4l2_ext_control *ctrl, int ac3)
+{
+ int ret = -EINVAL;
+
+ switch (ctrl->id) {
+ case V4L2_CID_MPEG_STREAM_TYPE:
+ if ((ctrl->value == V4L2_MPEG_STREAM_TYPE_MPEG2_PS) ||
+ (ctrl->value == V4L2_MPEG_STREAM_TYPE_MPEG2_TS))
+ ret = 0;
+ break;
+ case V4L2_CID_MPEG_AUDIO_MUTE:
+ if ((ctrl->value >= 0) &&
+ (ctrl->value <= 1))
+ ret = 0;
+ break;
+ case V4L2_CID_MPEG_VIDEO_ASPECT:
+ if ((ctrl->value >= V4L2_MPEG_VIDEO_ASPECT_1x1) &&
+ (ctrl->value <= V4L2_MPEG_VIDEO_ASPECT_221x100))
+ ret = 0;
+ break;
+ case V4L2_CID_MPEG_VIDEO_GOP_SIZE:
+ if ((ctrl->value >= 0) &&
+ (ctrl->value <= 255))
+ ret = 0;
+ break;
+ case V4L2_CID_MPEG_VIDEO_B_FRAMES:
+ if ((ctrl->value >= 1) &&
+ (ctrl->value <= 3))
+ ret = 0;
+ break;
+ default:
+ ret = -EINVAL;
+ }
+
+ return ret;
+}
+
+static int vidioc_try_ext_ctrls(struct file *file, void *priv,
+ struct v4l2_ext_controls *ctrls)
+{
+ int i, err = 0;
+
+ if (ctrls->ctrl_class == V4L2_CTRL_CLASS_MPEG) {
+ for (i = 0; i < ctrls->count; i++) {
+ struct v4l2_ext_control *ctrl = ctrls->controls + i;
+
+ err = saa7164_try_ctrl(ctrl, 0);
+ if (err) {
+ ctrls->error_idx = i;
+ break;
+ }
+ }
+ return err;
+ }
+
+ return -EINVAL;
+}
+
+static int saa7164_set_ctrl(struct saa7164_port *port,
+ struct v4l2_ext_control *ctrl)
+{
+ struct saa7164_vbi_params *params = &port->vbi_params;
+ int ret = 0;
+
+ switch (ctrl->id) {
+ case V4L2_CID_MPEG_STREAM_TYPE:
+ params->stream_type = ctrl->value;
+ break;
+ case V4L2_CID_MPEG_AUDIO_MUTE:
+ params->ctl_mute = ctrl->value;
+ ret = saa7164_api_audio_mute(port, params->ctl_mute);
+ if (ret != SAA_OK) {
+ printk(KERN_ERR "%s() error, ret = 0x%x\n", __func__,
+ ret);
+ ret = -EIO;
+ }
+ break;
+ case V4L2_CID_MPEG_VIDEO_ASPECT:
+ params->ctl_aspect = ctrl->value;
+ ret = saa7164_api_set_aspect_ratio(port);
+ if (ret != SAA_OK) {
+ printk(KERN_ERR "%s() error, ret = 0x%x\n", __func__,
+ ret);
+ ret = -EIO;
+ }
+ break;
+ case V4L2_CID_MPEG_VIDEO_B_FRAMES:
+ params->refdist = ctrl->value;
+ break;
+ case V4L2_CID_MPEG_VIDEO_GOP_SIZE:
+ params->gop_size = ctrl->value;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ /* TODO: Update the hardware */
+
+ return ret;
+}
+
+static int vidioc_s_ext_ctrls(struct file *file, void *priv,
+ struct v4l2_ext_controls *ctrls)
+{
+ struct saa7164_vbi_fh *fh = file->private_data;
+ struct saa7164_port *port = fh->port;
+ int i, err = 0;
+
+ if (ctrls->ctrl_class == V4L2_CTRL_CLASS_MPEG) {
+ for (i = 0; i < ctrls->count; i++) {
+ struct v4l2_ext_control *ctrl = ctrls->controls + i;
+
+ err = saa7164_try_ctrl(ctrl, 0);
+ if (err) {
+ ctrls->error_idx = i;
+ break;
+ }
+ err = saa7164_set_ctrl(port, ctrl);
+ if (err) {
+ ctrls->error_idx = i;
+ break;
+ }
+ }
+ return err;
+
+ }
+
+ return -EINVAL;
+}
+
+static int vidioc_querycap(struct file *file, void *priv,
+ struct v4l2_capability *cap)
+{
+ struct saa7164_vbi_fh *fh = file->private_data;
+ struct saa7164_port *port = fh->port;
+ struct saa7164_dev *dev = port->dev;
+
+ strcpy(cap->driver, dev->name);
+ strlcpy(cap->card, saa7164_boards[dev->board].name,
+ sizeof(cap->card));
+ sprintf(cap->bus_info, "PCI:%s", pci_name(dev->pci));
+
+ cap->capabilities =
+ V4L2_CAP_VBI_CAPTURE |
+ V4L2_CAP_READWRITE |
+ 0;
+
+ cap->capabilities |= V4L2_CAP_TUNER;
+ cap->version = 0;
+
+ return 0;
+}
+
+static int vidioc_enum_fmt_vid_cap(struct file *file, void *priv,
+ struct v4l2_fmtdesc *f)
+{
+ if (f->index != 0)
+ return -EINVAL;
+
+ strlcpy(f->description, "VBI", sizeof(f->description));
+ f->pixelformat = V4L2_PIX_FMT_MPEG;
+
+ return 0;
+}
+
+static int vidioc_g_fmt_vid_cap(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct saa7164_vbi_fh *fh = file->private_data;
+ struct saa7164_port *port = fh->port;
+ struct saa7164_dev *dev = port->dev;
+
+ f->fmt.pix.pixelformat = V4L2_PIX_FMT_MPEG;
+ f->fmt.pix.bytesperline = 0;
+ f->fmt.pix.sizeimage =
+ port->ts_packet_size * port->ts_packet_count;
+ f->fmt.pix.colorspace = 0;
+ f->fmt.pix.width = port->width;
+ f->fmt.pix.height = port->height;
+
+ dprintk(DBGLVL_VBI, "VIDIOC_G_FMT: w: %d, h: %d\n",
+ port->width, port->height);
+
+ return 0;
+}
+
+static int vidioc_try_fmt_vid_cap(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct saa7164_vbi_fh *fh = file->private_data;
+ struct saa7164_port *port = fh->port;
+ struct saa7164_dev *dev = port->dev;
+
+ f->fmt.pix.pixelformat = V4L2_PIX_FMT_MPEG;
+ f->fmt.pix.bytesperline = 0;
+ f->fmt.pix.sizeimage =
+ port->ts_packet_size * port->ts_packet_count;
+ f->fmt.pix.colorspace = 0;
+ dprintk(DBGLVL_VBI, "VIDIOC_TRY_FMT: w: %d, h: %d\n",
+ port->width, port->height);
+ return 0;
+}
+
+static int vidioc_s_fmt_vid_cap(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct saa7164_vbi_fh *fh = file->private_data;
+ struct saa7164_port *port = fh->port;
+ struct saa7164_dev *dev = port->dev;
+
+ f->fmt.pix.pixelformat = V4L2_PIX_FMT_MPEG;
+ f->fmt.pix.bytesperline = 0;
+ f->fmt.pix.sizeimage =
+ port->ts_packet_size * port->ts_packet_count;
+ f->fmt.pix.colorspace = 0;
+
+ dprintk(DBGLVL_VBI, "VIDIOC_S_FMT: w: %d, h: %d, f: %d\n",
+ f->fmt.pix.width, f->fmt.pix.height, f->fmt.pix.field);
+
+ return 0;
+}
+
+static int vidioc_log_status(struct file *file, void *priv)
+{
+ return 0;
+}
+
+static int fill_queryctrl(struct saa7164_vbi_params *params,
+ struct v4l2_queryctrl *c)
+{
+ switch (c->id) {
+ case V4L2_CID_BRIGHTNESS:
+ return v4l2_ctrl_query_fill(c, 0x0, 0xff, 1, 127);
+ case V4L2_CID_CONTRAST:
+ return v4l2_ctrl_query_fill(c, 0x0, 0xff, 1, 66);
+ case V4L2_CID_SATURATION:
+ return v4l2_ctrl_query_fill(c, 0x0, 0xff, 1, 62);
+ case V4L2_CID_HUE:
+ return v4l2_ctrl_query_fill(c, 0x0, 0xff, 1, 128);
+ case V4L2_CID_SHARPNESS:
+ return v4l2_ctrl_query_fill(c, 0x0, 0x0f, 1, 8);
+ case V4L2_CID_MPEG_AUDIO_MUTE:
+ return v4l2_ctrl_query_fill(c, 0x0, 0x01, 1, 0);
+ case V4L2_CID_AUDIO_VOLUME:
+ return v4l2_ctrl_query_fill(c, -83, 24, 1, 20);
+ case V4L2_CID_MPEG_STREAM_TYPE:
+ return v4l2_ctrl_query_fill(c,
+ V4L2_MPEG_STREAM_TYPE_MPEG2_PS,
+ V4L2_MPEG_STREAM_TYPE_MPEG2_TS,
+ 1, V4L2_MPEG_STREAM_TYPE_MPEG2_PS);
+ case V4L2_CID_MPEG_VIDEO_ASPECT:
+ return v4l2_ctrl_query_fill(c,
+ V4L2_MPEG_VIDEO_ASPECT_1x1,
+ V4L2_MPEG_VIDEO_ASPECT_221x100,
+ 1, V4L2_MPEG_VIDEO_ASPECT_4x3);
+ case V4L2_CID_MPEG_VIDEO_GOP_SIZE:
+ return v4l2_ctrl_query_fill(c, 1, 255, 1, 15);
+ case V4L2_CID_MPEG_VIDEO_B_FRAMES:
+ return v4l2_ctrl_query_fill(c,
+ 1, 3, 1, 1);
+ default:
+ return -EINVAL;
+ }
+}
+
+static int vidioc_queryctrl(struct file *file, void *priv,
+ struct v4l2_queryctrl *c)
+{
+ struct saa7164_vbi_fh *fh = priv;
+ struct saa7164_port *port = fh->port;
+ int i, next;
+ u32 id = c->id;
+
+ memset(c, 0, sizeof(*c));
+
+ next = !!(id & V4L2_CTRL_FLAG_NEXT_CTRL);
+ c->id = id & ~V4L2_CTRL_FLAG_NEXT_CTRL;
+
+ for (i = 0; i < ARRAY_SIZE(saa7164_v4l2_ctrls); i++) {
+ if (next) {
+ if (c->id < saa7164_v4l2_ctrls[i])
+ c->id = saa7164_v4l2_ctrls[i];
+ else
+ continue;
+ }
+
+ if (c->id == saa7164_v4l2_ctrls[i])
+ return fill_queryctrl(&port->vbi_params, c);
+
+ if (c->id < saa7164_v4l2_ctrls[i])
+ break;
+ }
+
+ return -EINVAL;
+}
+
+static int saa7164_vbi_stop_port(struct saa7164_port *port)
+{
+ struct saa7164_dev *dev = port->dev;
+ int ret;
+
+ ret = saa7164_api_transition_port(port, SAA_DMASTATE_STOP);
+ if ((ret != SAA_OK) && (ret != SAA_ERR_ALREADY_STOPPED)) {
+ printk(KERN_ERR "%s() stop transition failed, ret = 0x%x\n",
+ __func__, ret);
+ ret = -EIO;
+ } else {
+ dprintk(DBGLVL_VBI, "%s() Stopped\n", __func__);
+ ret = 0;
+ }
+
+ return ret;
+}
+
+static int saa7164_vbi_acquire_port(struct saa7164_port *port)
+{
+ struct saa7164_dev *dev = port->dev;
+ int ret;
+
+ ret = saa7164_api_transition_port(port, SAA_DMASTATE_ACQUIRE);
+ if ((ret != SAA_OK) && (ret != SAA_ERR_ALREADY_STOPPED)) {
+ printk(KERN_ERR "%s() acquire transition failed, ret = 0x%x\n",
+ __func__, ret);
+ ret = -EIO;
+ } else {
+ dprintk(DBGLVL_VBI, "%s() Acquired\n", __func__);
+ ret = 0;
+ }
+
+ return ret;
+}
+
+static int saa7164_vbi_pause_port(struct saa7164_port *port)
+{
+ struct saa7164_dev *dev = port->dev;
+ int ret;
+
+ ret = saa7164_api_transition_port(port, SAA_DMASTATE_PAUSE);
+ if ((ret != SAA_OK) && (ret != SAA_ERR_ALREADY_STOPPED)) {
+ printk(KERN_ERR "%s() pause transition failed, ret = 0x%x\n",
+ __func__, ret);
+ ret = -EIO;
+ } else {
+ dprintk(DBGLVL_VBI, "%s() Paused\n", __func__);
+ ret = 0;
+ }
+
+ return ret;
+}
+
+/* Firmware is very windows centric, meaning you have to transition
+ * the part through AVStream / KS Windows stages, forwards or backwards.
+ * States are: stopped, acquired (h/w), paused, started.
+ * We have to leave here will all of the soft buffers on the free list,
+ * else the cfg_post() func won't have soft buffers to correctly configure.
+ */
+static int saa7164_vbi_stop_streaming(struct saa7164_port *port)
+{
+ struct saa7164_dev *dev = port->dev;
+ struct saa7164_buffer *buf;
+ struct saa7164_user_buffer *ubuf;
+ struct list_head *c, *n;
+ int ret;
+
+ dprintk(DBGLVL_VBI, "%s(port=%d)\n", __func__, port->nr);
+
+ ret = saa7164_vbi_pause_port(port);
+ ret = saa7164_vbi_acquire_port(port);
+ ret = saa7164_vbi_stop_port(port);
+
+ dprintk(DBGLVL_VBI, "%s(port=%d) Hardware stopped\n", __func__,
+ port->nr);
+
+ /* Reset the state of any allocated buffer resources */
+ mutex_lock(&port->dmaqueue_lock);
+
+ /* Reset the hard and soft buffer state */
+ list_for_each_safe(c, n, &port->dmaqueue.list) {
+ buf = list_entry(c, struct saa7164_buffer, list);
+ buf->flags = SAA7164_BUFFER_FREE;
+ buf->pos = 0;
+ }
+
+ list_for_each_safe(c, n, &port->list_buf_used.list) {
+ ubuf = list_entry(c, struct saa7164_user_buffer, list);
+ ubuf->pos = 0;
+ list_move_tail(&ubuf->list, &port->list_buf_free.list);
+ }
+
+ mutex_unlock(&port->dmaqueue_lock);
+
+ /* Free any allocated resources */
+ saa7164_vbi_buffers_dealloc(port);
+
+ dprintk(DBGLVL_VBI, "%s(port=%d) Released\n", __func__, port->nr);
+
+ return ret;
+}
+
+static int saa7164_vbi_start_streaming(struct saa7164_port *port)
+{
+ struct saa7164_dev *dev = port->dev;
+ int result, ret = 0;
+
+ dprintk(DBGLVL_VBI, "%s(port=%d)\n", __func__, port->nr);
+
+ port->done_first_interrupt = 0;
+
+ /* allocate all of the PCIe DMA buffer resources on the fly,
+ * allowing switching between TS and PS payloads without
+ * requiring a complete driver reload.
+ */
+ saa7164_vbi_buffers_alloc(port);
+
+ /* Configure the encoder with any cache values */
+// saa7164_api_set_encoder(port);
+// saa7164_api_get_encoder(port);
+
+ /* Place the empty buffers on the hardware */
+ saa7164_buffer_cfg_port(port);
+
+ /* Negotiate format */
+ if (saa7164_api_set_vbi_format(port) != SAA_OK) {
+ printk(KERN_ERR "%s() No supported VBI format\n", __func__);
+ ret = -EIO;
+ goto out;
+ }
+
+ /* Acquire the hardware */
+ result = saa7164_api_transition_port(port, SAA_DMASTATE_ACQUIRE);
+ if ((result != SAA_OK) && (result != SAA_ERR_ALREADY_STOPPED)) {
+ printk(KERN_ERR "%s() acquire transition failed, res = 0x%x\n",
+ __func__, result);
+
+ ret = -EIO;
+ goto out;
+ } else
+ dprintk(DBGLVL_VBI, "%s() Acquired\n", __func__);
+
+ /* Pause the hardware */
+ result = saa7164_api_transition_port(port, SAA_DMASTATE_PAUSE);
+ if ((result != SAA_OK) && (result != SAA_ERR_ALREADY_STOPPED)) {
+ printk(KERN_ERR "%s() pause transition failed, res = 0x%x\n",
+ __func__, result);
+
+ /* Stop the hardware, regardless */
+ result = saa7164_vbi_stop_port(port);
+ if ((result != SAA_OK) && (result != SAA_ERR_ALREADY_STOPPED)) {
+ printk(KERN_ERR "%s() pause/forced stop transition "
+ "failed, res = 0x%x\n", __func__, result);
+ }
+
+ ret = -EIO;
+ goto out;
+ } else
+ dprintk(DBGLVL_VBI, "%s() Paused\n", __func__);
+
+ /* Start the hardware */
+ result = saa7164_api_transition_port(port, SAA_DMASTATE_RUN);
+ if ((result != SAA_OK) && (result != SAA_ERR_ALREADY_STOPPED)) {
+ printk(KERN_ERR "%s() run transition failed, result = 0x%x\n",
+ __func__, result);
+
+ /* Stop the hardware, regardless */
+ result = saa7164_vbi_acquire_port(port);
+ result = saa7164_vbi_stop_port(port);
+ if ((result != SAA_OK) && (result != SAA_ERR_ALREADY_STOPPED)) {
+ printk(KERN_ERR "%s() run/forced stop transition "
+ "failed, res = 0x%x\n", __func__, result);
+ }
+
+ ret = -EIO;
+ } else
+ dprintk(DBGLVL_VBI, "%s() Running\n", __func__);
+
+out:
+ return ret;
+}
+
+int saa7164_vbi_fmt(struct file *file, void *priv, struct v4l2_format *f)
+{
+ /* ntsc */
+ f->fmt.vbi.samples_per_line = 1600;
+ f->fmt.vbi.samples_per_line = 1440;
+ f->fmt.vbi.sampling_rate = 27000000;
+ f->fmt.vbi.sample_format = V4L2_PIX_FMT_GREY;
+ f->fmt.vbi.offset = 0;
+ f->fmt.vbi.flags = 0;
+ f->fmt.vbi.start[0] = 10;
+ f->fmt.vbi.count[0] = 18;
+ f->fmt.vbi.start[1] = 263 + 10 + 1;
+ f->fmt.vbi.count[1] = 18;
+ return 0;
+}
+
+static int fops_open(struct file *file)
+{
+ struct saa7164_dev *dev;
+ struct saa7164_port *port;
+ struct saa7164_vbi_fh *fh;
+
+ port = (struct saa7164_port *)video_get_drvdata(video_devdata(file));
+ if (!port)
+ return -ENODEV;
+
+ dev = port->dev;
+
+ dprintk(DBGLVL_VBI, "%s()\n", __func__);
+
+ /* allocate + initialize per filehandle data */
+ fh = kzalloc(sizeof(*fh), GFP_KERNEL);
+ if (NULL == fh)
+ return -ENOMEM;
+
+ file->private_data = fh;
+ fh->port = port;
+
+ return 0;
+}
+
+static int fops_release(struct file *file)
+{
+ struct saa7164_vbi_fh *fh = file->private_data;
+ struct saa7164_port *port = fh->port;
+ struct saa7164_dev *dev = port->dev;
+
+ dprintk(DBGLVL_VBI, "%s()\n", __func__);
+
+ /* Shut device down on last close */
+ if (atomic_cmpxchg(&fh->v4l_reading, 1, 0) == 1) {
+ if (atomic_dec_return(&port->v4l_reader_count) == 0) {
+ /* stop vbi capture then cancel buffers */
+ saa7164_vbi_stop_streaming(port);
+ }
+ }
+
+ file->private_data = NULL;
+ kfree(fh);
+
+ return 0;
+}
+
+struct saa7164_user_buffer *saa7164_vbi_next_buf(struct saa7164_port *port)
+{
+ struct saa7164_user_buffer *ubuf = 0;
+ struct saa7164_dev *dev = port->dev;
+ u32 crc;
+
+ mutex_lock(&port->dmaqueue_lock);
+ if (!list_empty(&port->list_buf_used.list)) {
+ ubuf = list_first_entry(&port->list_buf_used.list,
+ struct saa7164_user_buffer, list);
+
+ if (crc_checking) {
+ crc = crc32(0, ubuf->data, ubuf->actual_size);
+ if (crc != ubuf->crc) {
+ printk(KERN_ERR "%s() ubuf %p crc became invalid, was 0x%x became 0x%x\n", __func__,
+ ubuf, ubuf->crc, crc);
+ }
+ }
+
+ }
+ mutex_unlock(&port->dmaqueue_lock);
+
+ dprintk(DBGLVL_VBI, "%s() returns %p\n", __func__, ubuf);
+
+ return ubuf;
+}
+
+static ssize_t fops_read(struct file *file, char __user *buffer,
+ size_t count, loff_t *pos)
+{
+ struct saa7164_vbi_fh *fh = file->private_data;
+ struct saa7164_port *port = fh->port;
+ struct saa7164_user_buffer *ubuf = NULL;
+ struct saa7164_dev *dev = port->dev;
+ int ret = 0;
+ int rem, cnt;
+ u8 *p;
+
+ port->last_read_msecs_diff = port->last_read_msecs;
+ port->last_read_msecs = jiffies_to_msecs(jiffies);
+ port->last_read_msecs_diff = port->last_read_msecs -
+ port->last_read_msecs_diff;
+
+ saa7164_histogram_update(&port->read_interval,
+ port->last_read_msecs_diff);
+
+ if (*pos) {
+ printk(KERN_ERR "%s() ESPIPE\n", __func__);
+ return -ESPIPE;
+ }
+
+ if (atomic_cmpxchg(&fh->v4l_reading, 0, 1) == 0) {
+ if (atomic_inc_return(&port->v4l_reader_count) == 1) {
+
+ if (saa7164_vbi_initialize(port) < 0) {
+ printk(KERN_ERR "%s() EINVAL\n", __func__);
+ return -EINVAL;
+ }
+
+ saa7164_vbi_start_streaming(port);
+ msleep(200);
+ }
+ }
+
+ /* blocking wait for buffer */
+ if ((file->f_flags & O_NONBLOCK) == 0) {
+ if (wait_event_interruptible(port->wait_read,
+ saa7164_vbi_next_buf(port))) {
+ printk(KERN_ERR "%s() ERESTARTSYS\n", __func__);
+ return -ERESTARTSYS;
+ }
+ }
+
+ /* Pull the first buffer from the used list */
+ ubuf = saa7164_vbi_next_buf(port);
+
+ while ((count > 0) && ubuf) {
+
+ /* set remaining bytes to copy */
+ rem = ubuf->actual_size - ubuf->pos;
+ cnt = rem > count ? count : rem;
+
+ p = ubuf->data + ubuf->pos;
+
+ dprintk(DBGLVL_VBI,
+ "%s() count=%d cnt=%d rem=%d buf=%p buf->pos=%d\n",
+ __func__, (int)count, cnt, rem, ubuf, ubuf->pos);
+
+ if (copy_to_user(buffer, p, cnt)) {
+ printk(KERN_ERR "%s() copy_to_user failed\n", __func__);
+ if (!ret) {
+ printk(KERN_ERR "%s() EFAULT\n", __func__);
+ ret = -EFAULT;
+ }
+ goto err;
+ }
+
+ ubuf->pos += cnt;
+ count -= cnt;
+ buffer += cnt;
+ ret += cnt;
+
+ if (ubuf->pos > ubuf->actual_size) {
+ printk(KERN_ERR "read() pos > actual, huh?\n");
+ }
+
+ if (ubuf->pos == ubuf->actual_size) {
+
+ /* finished with current buffer, take next buffer */
+
+ /* Requeue the buffer on the free list */
+ ubuf->pos = 0;
+
+ mutex_lock(&port->dmaqueue_lock);
+ list_move_tail(&ubuf->list, &port->list_buf_free.list);
+ mutex_unlock(&port->dmaqueue_lock);
+
+ /* Dequeue next */
+ if ((file->f_flags & O_NONBLOCK) == 0) {
+ if (wait_event_interruptible(port->wait_read,
+ saa7164_vbi_next_buf(port))) {
+ break;
+ }
+ }
+ ubuf = saa7164_vbi_next_buf(port);
+ }
+ }
+err:
+ if (!ret && !ubuf) {
+ printk(KERN_ERR "%s() EAGAIN\n", __func__);
+ ret = -EAGAIN;
+ }
+
+ return ret;
+}
+
+static unsigned int fops_poll(struct file *file, poll_table *wait)
+{
+ struct saa7164_vbi_fh *fh = (struct saa7164_vbi_fh *)file->private_data;
+ struct saa7164_port *port = fh->port;
+ struct saa7164_user_buffer *ubuf;
+ unsigned int mask = 0;
+
+ port->last_poll_msecs_diff = port->last_poll_msecs;
+ port->last_poll_msecs = jiffies_to_msecs(jiffies);
+ port->last_poll_msecs_diff = port->last_poll_msecs -
+ port->last_poll_msecs_diff;
+
+ saa7164_histogram_update(&port->poll_interval,
+ port->last_poll_msecs_diff);
+
+ if (!video_is_registered(port->v4l_device)) {
+ return -EIO;
+ }
+
+ if (atomic_cmpxchg(&fh->v4l_reading, 0, 1) == 0) {
+ if (atomic_inc_return(&port->v4l_reader_count) == 1) {
+ if (saa7164_vbi_initialize(port) < 0)
+ return -EINVAL;
+ saa7164_vbi_start_streaming(port);
+ msleep(200);
+ }
+ }
+
+ /* blocking wait for buffer */
+ if ((file->f_flags & O_NONBLOCK) == 0) {
+ if (wait_event_interruptible(port->wait_read,
+ saa7164_vbi_next_buf(port))) {
+ return -ERESTARTSYS;
+ }
+ }
+
+ /* Pull the first buffer from the used list */
+ ubuf = list_first_entry(&port->list_buf_used.list,
+ struct saa7164_user_buffer, list);
+
+ if (ubuf)
+ mask |= POLLIN | POLLRDNORM;
+
+ return mask;
+}
+static const struct v4l2_file_operations vbi_fops = {
+ .owner = THIS_MODULE,
+ .open = fops_open,
+ .release = fops_release,
+ .read = fops_read,
+ .poll = fops_poll,
+ .unlocked_ioctl = video_ioctl2,
+};
+
+static const struct v4l2_ioctl_ops vbi_ioctl_ops = {
+ .vidioc_s_std = vidioc_s_std,
+ .vidioc_enum_input = vidioc_enum_input,
+ .vidioc_g_input = vidioc_g_input,
+ .vidioc_s_input = vidioc_s_input,
+ .vidioc_g_tuner = vidioc_g_tuner,
+ .vidioc_s_tuner = vidioc_s_tuner,
+ .vidioc_g_frequency = vidioc_g_frequency,
+ .vidioc_s_frequency = vidioc_s_frequency,
+ .vidioc_s_ctrl = vidioc_s_ctrl,
+ .vidioc_g_ctrl = vidioc_g_ctrl,
+ .vidioc_querycap = vidioc_querycap,
+ .vidioc_enum_fmt_vid_cap = vidioc_enum_fmt_vid_cap,
+ .vidioc_g_fmt_vid_cap = vidioc_g_fmt_vid_cap,
+ .vidioc_try_fmt_vid_cap = vidioc_try_fmt_vid_cap,
+ .vidioc_s_fmt_vid_cap = vidioc_s_fmt_vid_cap,
+ .vidioc_g_ext_ctrls = vidioc_g_ext_ctrls,
+ .vidioc_s_ext_ctrls = vidioc_s_ext_ctrls,
+ .vidioc_try_ext_ctrls = vidioc_try_ext_ctrls,
+ .vidioc_log_status = vidioc_log_status,
+ .vidioc_queryctrl = vidioc_queryctrl,
+// .vidioc_g_chip_ident = saa7164_g_chip_ident,
+#ifdef CONFIG_VIDEO_ADV_DEBUG
+// .vidioc_g_register = saa7164_g_register,
+// .vidioc_s_register = saa7164_s_register,
+#endif
+ .vidioc_g_fmt_vbi_cap = saa7164_vbi_fmt,
+ .vidioc_try_fmt_vbi_cap = saa7164_vbi_fmt,
+ .vidioc_s_fmt_vbi_cap = saa7164_vbi_fmt,
+};
+
+static struct video_device saa7164_vbi_template = {
+ .name = "saa7164",
+ .fops = &vbi_fops,
+ .ioctl_ops = &vbi_ioctl_ops,
+ .minor = -1,
+ .tvnorms = SAA7164_NORMS,
+ .current_norm = V4L2_STD_NTSC_M,
+};
+
+static struct video_device *saa7164_vbi_alloc(
+ struct saa7164_port *port,
+ struct pci_dev *pci,
+ struct video_device *template,
+ char *type)
+{
+ struct video_device *vfd;
+ struct saa7164_dev *dev = port->dev;
+
+ dprintk(DBGLVL_VBI, "%s()\n", __func__);
+
+ vfd = video_device_alloc();
+ if (NULL == vfd)
+ return NULL;
+
+ *vfd = *template;
+ snprintf(vfd->name, sizeof(vfd->name), "%s %s (%s)", dev->name,
+ type, saa7164_boards[dev->board].name);
+
+ vfd->parent = &pci->dev;
+ vfd->release = video_device_release;
+ return vfd;
+}
+
+int saa7164_vbi_register(struct saa7164_port *port)
+{
+ struct saa7164_dev *dev = port->dev;
+ int result = -ENODEV;
+
+ dprintk(DBGLVL_VBI, "%s()\n", __func__);
+
+ if (port->type != SAA7164_MPEG_VBI)
+ BUG();
+
+ /* Sanity check that the PCI configuration space is active */
+ if (port->hwcfg.BARLocation == 0) {
+ printk(KERN_ERR "%s() failed "
+ "(errno = %d), NO PCI configuration\n",
+ __func__, result);
+ result = -ENOMEM;
+ goto failed;
+ }
+
+ /* Establish VBI defaults here */
+
+ /* Allocate and register the video device node */
+ port->v4l_device = saa7164_vbi_alloc(port,
+ dev->pci, &saa7164_vbi_template, "vbi");
+
+ if (port->v4l_device == NULL) {
+ printk(KERN_INFO "%s: can't allocate vbi device\n",
+ dev->name);
+ result = -ENOMEM;
+ goto failed;
+ }
+
+ video_set_drvdata(port->v4l_device, port);
+ result = video_register_device(port->v4l_device,
+ VFL_TYPE_VBI, -1);
+ if (result < 0) {
+ printk(KERN_INFO "%s: can't register vbi device\n",
+ dev->name);
+ /* TODO: We're going to leak here if we don't dealloc
+ The buffers above. The unreg function can't deal wit it.
+ */
+ goto failed;
+ }
+
+ printk(KERN_INFO "%s: registered device vbi%d [vbi]\n",
+ dev->name, port->v4l_device->num);
+
+ /* Configure the hardware defaults */
+
+ result = 0;
+failed:
+ return result;
+}
+
+void saa7164_vbi_unregister(struct saa7164_port *port)
+{
+ struct saa7164_dev *dev = port->dev;
+
+ dprintk(DBGLVL_VBI, "%s(port=%d)\n", __func__, port->nr);
+
+ if (port->type != SAA7164_MPEG_VBI)
+ BUG();
+
+ if (port->v4l_device) {
+ if (port->v4l_device->minor != -1)
+ video_unregister_device(port->v4l_device);
+ else
+ video_device_release(port->v4l_device);
+
+ port->v4l_device = NULL;
+ }
+
+}
diff --git a/drivers/media/video/saa7164/saa7164.h b/drivers/media/video/saa7164/saa7164.h
index 42660b546f0e..041ae8e20f68 100644
--- a/drivers/media/video/saa7164/saa7164.h
+++ b/drivers/media/video/saa7164/saa7164.h
@@ -1,7 +1,7 @@
/*
* Driver for the NXP SAA7164 PCIe bridge
*
- * Copyright (c) 2009 Steven Toth <stoth@kernellabs.com>
+ * Copyright (c) 2010 Steven Toth <stoth@kernellabs.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -48,18 +48,28 @@
#include <linux/i2c.h>
#include <linux/i2c-algo-bit.h>
#include <linux/kdev_t.h>
+#include <linux/version.h>
+#include <linux/mutex.h>
+#include <linux/crc32.h>
+#include <linux/kthread.h>
+#include <linux/freezer.h>
#include <media/tuner.h>
#include <media/tveeprom.h>
#include <media/videobuf-dma-sg.h>
#include <media/videobuf-dvb.h>
+#include <dvb_demux.h>
+#include <dvb_frontend.h>
+#include <dvb_net.h>
+#include <dvbdev.h>
+#include <dmxdev.h>
+#include <media/v4l2-common.h>
+#include <media/v4l2-ioctl.h>
+#include <media/v4l2-chip-ident.h>
#include "saa7164-reg.h"
#include "saa7164-types.h"
-#include <linux/version.h>
-#include <linux/mutex.h>
-
#define SAA7164_MAXBOARDS 8
#define UNSET (-1U)
@@ -76,7 +86,19 @@
#define SAA7164_MAX_UNITS 8
#define SAA7164_TS_NUMBER_OF_LINES 312
+#define SAA7164_PS_NUMBER_OF_LINES 256
#define SAA7164_PT_ENTRIES 16 /* (312 * 188) / 4096 */
+#define SAA7164_MAX_ENCODER_BUFFERS 64 /* max 5secs of latency at 6Mbps */
+#define SAA7164_MAX_VBI_BUFFERS 64
+
+/* Port related defines */
+#define SAA7164_PORT_TS1 (0)
+#define SAA7164_PORT_TS2 (SAA7164_PORT_TS1 + 1)
+#define SAA7164_PORT_ENC1 (SAA7164_PORT_TS2 + 1)
+#define SAA7164_PORT_ENC2 (SAA7164_PORT_ENC1 + 1)
+#define SAA7164_PORT_VBI1 (SAA7164_PORT_ENC2 + 1)
+#define SAA7164_PORT_VBI2 (SAA7164_PORT_VBI1 + 1)
+#define SAA7164_MAX_PORTS (SAA7164_PORT_VBI2 + 1)
#define DBGLVL_FW 4
#define DBGLVL_DVB 8
@@ -86,10 +108,18 @@
#define DBGLVL_BUS 128
#define DBGLVL_IRQ 256
#define DBGLVL_BUF 512
+#define DBGLVL_ENC 1024
+#define DBGLVL_VBI 2048
+#define DBGLVL_THR 4096
+#define DBGLVL_CPU 8192
+
+#define SAA7164_NORMS (V4L2_STD_NTSC_M | V4L2_STD_NTSC_M_JP | V4L2_STD_NTSC_443)
enum port_t {
SAA7164_MPEG_UNDEFINED = 0,
SAA7164_MPEG_DVB,
+ SAA7164_MPEG_ENCODER,
+ SAA7164_MPEG_VBI,
};
enum saa7164_i2c_bus_nr {
@@ -134,7 +164,8 @@ struct saa7164_unit {
struct saa7164_board {
char *name;
- enum port_t porta, portb;
+ enum port_t porta, portb, portc,
+ portd, porte, portf;
enum {
SAA7164_CHIP_UNDEFINED = 0,
SAA7164_CHIP_REV2,
@@ -149,6 +180,42 @@ struct saa7164_subid {
u32 card;
};
+struct saa7164_encoder_fh {
+ struct saa7164_port *port;
+// u32 freq;
+// u32 tuner_type;
+ atomic_t v4l_reading;
+};
+
+struct saa7164_vbi_fh {
+ struct saa7164_port *port;
+// u32 freq;
+// u32 tuner_type;
+ atomic_t v4l_reading;
+};
+
+struct saa7164_histogram_bucket {
+ u32 val;
+ u32 count;
+ u64 update_time;
+};
+
+struct saa7164_histogram {
+ char name[32];
+ struct saa7164_histogram_bucket counter1[64];
+};
+
+struct saa7164_user_buffer {
+ struct list_head list;
+
+ /* Attributes */
+ u8 *data;
+ u32 pos;
+ u32 actual_size;
+
+ u32 crc;
+};
+
struct saa7164_fw_status {
/* RISC Core details */
@@ -191,14 +258,60 @@ struct saa7164_i2c {
u32 i2c_rc;
};
-struct saa7164_tsport;
+struct saa7164_ctrl {
+ struct v4l2_queryctrl v;
+};
+
+struct saa7164_tvnorm {
+ char *name;
+ v4l2_std_id id;
+// u32 cxiformat;
+// u32 cxoformat;
+};
+
+struct saa7164_encoder_params {
+ struct saa7164_tvnorm encodernorm;
+ u32 height;
+ u32 width;
+ u32 is_50hz;
+ u32 bitrate; /* bps */
+ u32 bitrate_peak; /* bps */
+ u32 bitrate_mode;
+ u32 stream_type; /* V4L2_MPEG_STREAM_TYPE_MPEG2_TS */
+
+ u32 audio_sampling_freq;
+ u32 ctl_mute;
+ u32 ctl_aspect;
+ u32 refdist;
+ u32 gop_size;
+};
+
+struct saa7164_vbi_params {
+ struct saa7164_tvnorm encodernorm;
+ u32 height;
+ u32 width;
+ u32 is_50hz;
+ u32 bitrate; /* bps */
+ u32 bitrate_peak; /* bps */
+ u32 bitrate_mode;
+ u32 stream_type; /* V4L2_MPEG_STREAM_TYPE_MPEG2_TS */
+
+ u32 audio_sampling_freq;
+ u32 ctl_mute;
+ u32 ctl_aspect;
+ u32 refdist;
+ u32 gop_size;
+};
+
+struct saa7164_port;
struct saa7164_buffer {
struct list_head list;
- u32 nr;
+ /* Note of which h/w buffer list index position we occupy */
+ int idx;
- struct saa7164_tsport *port;
+ struct saa7164_port *port;
/* Hardware Specific */
/* PCI Memory allocations */
@@ -206,28 +319,33 @@ struct saa7164_buffer {
/* A block of page align PCI memory */
u32 pci_size; /* PCI allocation size in bytes */
- u64 *cpu; /* Virtual address */
+ u64 __iomem *cpu; /* Virtual address */
dma_addr_t dma; /* Physical address */
+ u32 crc; /* Checksum for the entire buffer data */
/* A page table that splits the block into a number of entries */
u32 pt_size; /* PCI allocation size in bytes */
- u64 *pt_cpu; /* Virtual address */
+ u64 __iomem *pt_cpu; /* Virtual address */
dma_addr_t pt_dma; /* Physical address */
+
+ /* Encoder fops */
+ u32 pos;
+ u32 actual_size;
};
-struct saa7164_tsport {
+struct saa7164_port {
struct saa7164_dev *dev;
- int nr;
enum port_t type;
+ int nr;
- struct saa7164_dvb dvb;
+ /* --- Generic port attributes --- */
- /* HW related stream parameters */
- tmHWStreamParameters_t hw_streamingparams;
+ /* HW stream parameters */
+ struct tmHWStreamParameters hw_streamingparams;
/* DMA configuration values, is seeded during initialization */
- tmComResDMATermDescrHeader_t hwcfg;
+ struct tmComResDMATermDescrHeader hwcfg;
/* hardware specific registers */
u32 bufcounter;
@@ -239,11 +357,76 @@ struct saa7164_tsport {
u64 bufptr64;
u32 numpte; /* Number of entries in array, only valid in head */
+
struct mutex dmaqueue_lock;
- struct mutex dummy_dmaqueue_lock;
struct saa7164_buffer dmaqueue;
- struct saa7164_buffer dummy_dmaqueue;
+ u64 last_irq_msecs, last_svc_msecs;
+ u64 last_irq_msecs_diff, last_svc_msecs_diff;
+ u32 last_svc_wp;
+ u32 last_svc_rp;
+ u64 last_irq_svc_msecs_diff;
+ u64 last_read_msecs, last_read_msecs_diff;
+ u64 last_poll_msecs, last_poll_msecs_diff;
+
+ struct saa7164_histogram irq_interval;
+ struct saa7164_histogram svc_interval;
+ struct saa7164_histogram irq_svc_interval;
+ struct saa7164_histogram read_interval;
+ struct saa7164_histogram poll_interval;
+
+ /* --- DVB Transport Specific --- */
+ struct saa7164_dvb dvb;
+
+ /* --- Encoder/V4L related attributes --- */
+ /* Encoder */
+ /* Defaults established in saa7164-encoder.c */
+ struct saa7164_tvnorm encodernorm;
+ u32 height;
+ u32 width;
+ u32 freq;
+ u32 ts_packet_size;
+ u32 ts_packet_count;
+ u8 mux_input;
+ u8 encoder_profile;
+ u8 video_format;
+ u8 audio_format;
+ u8 video_resolution;
+ u16 ctl_brightness;
+ u16 ctl_contrast;
+ u16 ctl_hue;
+ u16 ctl_saturation;
+ u16 ctl_sharpness;
+ s8 ctl_volume;
+
+ struct tmComResAFeatureDescrHeader audfeat;
+ struct tmComResEncoderDescrHeader encunit;
+ struct tmComResProcDescrHeader vidproc;
+ struct tmComResExtDevDescrHeader ifunit;
+ struct tmComResTunerDescrHeader tunerunit;
+
+ struct work_struct workenc;
+
+ /* V4L Encoder Video */
+ struct saa7164_encoder_params encoder_params;
+ struct video_device *v4l_device;
+ atomic_t v4l_reader_count;
+
+ struct saa7164_buffer list_buf_used;
+ struct saa7164_buffer list_buf_free;
+ wait_queue_head_t wait_read;
+
+ /* V4L VBI */
+ struct tmComResVBIFormatDescrHeader vbi_fmt_ntsc;
+ struct saa7164_vbi_params vbi_params;
+
+ /* Debug */
+ u32 sync_errors;
+ u32 v_cc_errors;
+ u32 a_cc_errors;
+ u8 last_v_cc;
+ u8 last_a_cc;
+ u32 done_first_interrupt;
};
struct saa7164_dev {
@@ -268,12 +451,13 @@ struct saa7164_dev {
/* firmware status */
struct saa7164_fw_status fw_status;
+ u32 firmwareloaded;
- tmComResHWDescr_t hwdesc;
- tmComResInterfaceDescr_t intfdesc;
- tmComResBusDescr_t busdesc;
+ struct tmComResHWDescr hwdesc;
+ struct tmComResInterfaceDescr intfdesc;
+ struct tmComResBusDescr busdesc;
- tmComResBusInfo_t bus;
+ struct tmComResBusInfo bus;
/* Interrupt status and ack registers */
u32 int_status;
@@ -286,15 +470,22 @@ struct saa7164_dev {
struct saa7164_i2c i2c_bus[3];
/* Transport related */
- struct saa7164_tsport ts1, ts2;
+ struct saa7164_port ports[SAA7164_MAX_PORTS];
/* Deferred command/api interrupts handling */
struct work_struct workcmd;
+ /* A kernel thread to monitor the firmware log, used
+ * only in debug mode.
+ */
+ struct task_struct *kthread;
+
};
extern struct list_head saa7164_devlist;
extern unsigned int waitsecs;
+extern unsigned int encoder_buffers;
+extern unsigned int vbi_buffers;
/* ----------------------------------------------------------- */
/* saa7164-core.c */
@@ -302,6 +493,7 @@ void saa7164_dumpregs(struct saa7164_dev *dev, u32 addr);
void saa7164_dumphex16(struct saa7164_dev *dev, u8 *buf, int len);
void saa7164_getfirmwarestatus(struct saa7164_dev *dev);
u32 saa7164_getcurrentfirmwareversion(struct saa7164_dev *dev);
+void saa7164_histogram_update(struct saa7164_histogram *hg, u32 val);
/* ----------------------------------------------------------- */
/* saa7164-fw.c */
@@ -318,14 +510,14 @@ extern void saa7164_call_i2c_clients(struct saa7164_i2c *bus,
/* saa7164-bus.c */
int saa7164_bus_setup(struct saa7164_dev *dev);
void saa7164_bus_dump(struct saa7164_dev *dev);
-int saa7164_bus_set(struct saa7164_dev *dev, tmComResInfo_t* msg, void *buf);
-int saa7164_bus_get(struct saa7164_dev *dev, tmComResInfo_t* msg,
+int saa7164_bus_set(struct saa7164_dev *dev, struct tmComResInfo* msg, void *buf);
+int saa7164_bus_get(struct saa7164_dev *dev, struct tmComResInfo* msg,
void *buf, int peekonly);
/* ----------------------------------------------------------- */
/* saa7164-cmd.c */
int saa7164_cmd_send(struct saa7164_dev *dev,
- u8 id, tmComResCmd_t command, u16 controlselector,
+ u8 id, enum tmComResCmd command, u16 controlselector,
u16 size, void *buf);
void saa7164_cmd_signal(struct saa7164_dev *dev, u8 seqno);
int saa7164_irq_dequeue(struct saa7164_dev *dev);
@@ -343,7 +535,24 @@ int saa7164_api_dif_write(struct saa7164_i2c *bus, u8 addr,
int saa7164_api_read_eeprom(struct saa7164_dev *dev, u8 *buf, int buflen);
int saa7164_api_set_gpiobit(struct saa7164_dev *dev, u8 unitid, u8 pin);
int saa7164_api_clear_gpiobit(struct saa7164_dev *dev, u8 unitid, u8 pin);
-int saa7164_api_transition_port(struct saa7164_tsport *port, u8 mode);
+int saa7164_api_transition_port(struct saa7164_port *port, u8 mode);
+int saa7164_api_initialize_dif(struct saa7164_port *port);
+int saa7164_api_configure_dif(struct saa7164_port *port, u32 std);
+int saa7164_api_set_encoder(struct saa7164_port *port);
+int saa7164_api_get_encoder(struct saa7164_port *port);
+int saa7164_api_set_aspect_ratio(struct saa7164_port *port);
+int saa7164_api_set_usercontrol(struct saa7164_port *port, u8 ctl);
+int saa7164_api_get_usercontrol(struct saa7164_port *port, u8 ctl);
+int saa7164_api_set_videomux(struct saa7164_port *port);
+int saa7164_api_audio_mute(struct saa7164_port *port, int mute);
+int saa7164_api_set_audio_volume(struct saa7164_port *port, s8 level);
+int saa7164_api_set_audio_std(struct saa7164_port *port);
+int saa7164_api_set_audio_detection(struct saa7164_port *port, int autodetect);
+int saa7164_api_get_videomux(struct saa7164_port *port);
+int saa7164_api_set_vbi_format(struct saa7164_port *port);
+int saa7164_api_set_debug(struct saa7164_dev *dev, u8 level);
+int saa7164_api_collect_debug(struct saa7164_dev *dev);
+int saa7164_api_get_load_info(struct saa7164_dev *dev, struct tmFwInfoStruct *i);
/* ----------------------------------------------------------- */
/* saa7164-cards.c */
@@ -363,18 +572,36 @@ extern char *saa7164_unitid_name(struct saa7164_dev *dev, u8 unitid);
/* ----------------------------------------------------------- */
/* saa7164-dvb.c */
-extern int saa7164_dvb_register(struct saa7164_tsport *port);
-extern int saa7164_dvb_unregister(struct saa7164_tsport *port);
+extern int saa7164_dvb_register(struct saa7164_port *port);
+extern int saa7164_dvb_unregister(struct saa7164_port *port);
/* ----------------------------------------------------------- */
/* saa7164-buffer.c */
-extern struct saa7164_buffer *saa7164_buffer_alloc(struct saa7164_tsport *port,
- u32 len);
-extern int saa7164_buffer_dealloc(struct saa7164_tsport *port,
- struct saa7164_buffer *buf);
+extern struct saa7164_buffer *saa7164_buffer_alloc(
+ struct saa7164_port *port, u32 len);
+extern int saa7164_buffer_dealloc(struct saa7164_buffer *buf);
+extern void saa7164_buffer_display(struct saa7164_buffer *buf);
+extern int saa7164_buffer_activate(struct saa7164_buffer *buf, int i);
+extern int saa7164_buffer_cfg_port(struct saa7164_port *port);
+extern struct saa7164_user_buffer *saa7164_buffer_alloc_user(
+ struct saa7164_dev *dev, u32 len);
+extern void saa7164_buffer_dealloc_user(struct saa7164_user_buffer *buf);
+extern int saa7164_buffer_zero_offsets(struct saa7164_port *port, int i);
+
+/* ----------------------------------------------------------- */
+/* saa7164-encoder.c */
+int saa7164_encoder_register(struct saa7164_port *port);
+void saa7164_encoder_unregister(struct saa7164_port *port);
+
+/* ----------------------------------------------------------- */
+/* saa7164-vbi.c */
+int saa7164_vbi_register(struct saa7164_port *port);
+void saa7164_vbi_unregister(struct saa7164_port *port);
/* ----------------------------------------------------------- */
+extern unsigned int crc_checking;
+
extern unsigned int saa_debug;
#define dprintk(level, fmt, arg...)\
do { if (saa_debug & level)\
@@ -394,7 +621,6 @@ extern unsigned int saa_debug;
#define saa7164_readl(reg) readl(dev->lmmio + ((reg) >> 2))
#define saa7164_writel(reg, value) writel((value), dev->lmmio + ((reg) >> 2))
-
#define saa7164_readb(reg) readl(dev->bmmio + (reg))
#define saa7164_writeb(reg, value) writel((value), dev->bmmio + (reg))
diff --git a/drivers/media/video/saa717x.c b/drivers/media/video/saa717x.c
index 45f8bfc1342e..b6172c2c517e 100644
--- a/drivers/media/video/saa717x.c
+++ b/drivers/media/video/saa717x.c
@@ -39,7 +39,6 @@
#include <linux/i2c.h>
#include <media/v4l2-device.h>
#include <media/v4l2-ctrls.h>
-#include <media/v4l2-i2c-drv.h>
MODULE_DESCRIPTION("Philips SAA717x audio/video decoder driver");
MODULE_AUTHOR("K. Ohta, T. Adachi, Hans Verkuil");
@@ -1366,9 +1365,25 @@ static const struct i2c_device_id saa717x_id[] = {
};
MODULE_DEVICE_TABLE(i2c, saa717x_id);
-static struct v4l2_i2c_driver_data v4l2_i2c_data = {
- .name = "saa717x",
- .probe = saa717x_probe,
- .remove = saa717x_remove,
- .id_table = saa717x_id,
+static struct i2c_driver saa717x_driver = {
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = "saa717x",
+ },
+ .probe = saa717x_probe,
+ .remove = saa717x_remove,
+ .id_table = saa717x_id,
};
+
+static __init int init_saa717x(void)
+{
+ return i2c_add_driver(&saa717x_driver);
+}
+
+static __exit void exit_saa717x(void)
+{
+ i2c_del_driver(&saa717x_driver);
+}
+
+module_init(init_saa717x);
+module_exit(exit_saa717x);
diff --git a/drivers/media/video/saa7185.c b/drivers/media/video/saa7185.c
index 77db20392910..96f56c2f11f3 100644
--- a/drivers/media/video/saa7185.c
+++ b/drivers/media/video/saa7185.c
@@ -30,11 +30,9 @@
#include <linux/ioctl.h>
#include <asm/uaccess.h>
#include <linux/i2c.h>
-#include <linux/i2c-id.h>
#include <linux/videodev2.h>
#include <media/v4l2-device.h>
#include <media/v4l2-chip-ident.h>
-#include <media/v4l2-i2c-drv.h>
MODULE_DESCRIPTION("Philips SAA7185 video encoder driver");
MODULE_AUTHOR("Dave Perks");
@@ -366,9 +364,25 @@ static const struct i2c_device_id saa7185_id[] = {
};
MODULE_DEVICE_TABLE(i2c, saa7185_id);
-static struct v4l2_i2c_driver_data v4l2_i2c_data = {
- .name = "saa7185",
- .probe = saa7185_probe,
- .remove = saa7185_remove,
- .id_table = saa7185_id,
+static struct i2c_driver saa7185_driver = {
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = "saa7185",
+ },
+ .probe = saa7185_probe,
+ .remove = saa7185_remove,
+ .id_table = saa7185_id,
};
+
+static __init int init_saa7185(void)
+{
+ return i2c_add_driver(&saa7185_driver);
+}
+
+static __exit void exit_saa7185(void)
+{
+ i2c_del_driver(&saa7185_driver);
+}
+
+module_init(init_saa7185);
+module_exit(exit_saa7185);
diff --git a/drivers/media/video/saa7191.c b/drivers/media/video/saa7191.c
index a2513772196b..211fa25a1239 100644
--- a/drivers/media/video/saa7191.c
+++ b/drivers/media/video/saa7191.c
@@ -23,7 +23,6 @@
#include <linux/i2c.h>
#include <media/v4l2-device.h>
#include <media/v4l2-chip-ident.h>
-#include <media/v4l2-i2c-drv.h>
#include "saa7191.h"
@@ -647,9 +646,25 @@ static const struct i2c_device_id saa7191_id[] = {
};
MODULE_DEVICE_TABLE(i2c, saa7191_id);
-static struct v4l2_i2c_driver_data v4l2_i2c_data = {
- .name = "saa7191",
- .probe = saa7191_probe,
- .remove = saa7191_remove,
- .id_table = saa7191_id,
+static struct i2c_driver saa7191_driver = {
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = "saa7191",
+ },
+ .probe = saa7191_probe,
+ .remove = saa7191_remove,
+ .id_table = saa7191_id,
};
+
+static __init int init_saa7191(void)
+{
+ return i2c_add_driver(&saa7191_driver);
+}
+
+static __exit void exit_saa7191(void)
+{
+ i2c_del_driver(&saa7191_driver);
+}
+
+module_init(init_saa7191);
+module_exit(exit_saa7191);
diff --git a/drivers/media/video/se401.c b/drivers/media/video/se401.c
index 41d0166c0f95..41360d7c3e96 100644
--- a/drivers/media/video/se401.c
+++ b/drivers/media/video/se401.c
@@ -31,7 +31,6 @@ static const char version[] = "0.24";
#include <linux/init.h>
#include <linux/vmalloc.h>
#include <linux/slab.h>
-#include <linux/smp_lock.h>
#include <linux/pagemap.h>
#include <linux/usb.h>
#include "se401.h"
@@ -951,9 +950,9 @@ static int se401_open(struct file *file)
struct usb_se401 *se401 = (struct usb_se401 *)dev;
int err = 0;
- lock_kernel();
+ mutex_lock(&se401->lock);
if (se401->user) {
- unlock_kernel();
+ mutex_unlock(&se401->lock);
return -EBUSY;
}
se401->fbuf = rvmalloc(se401->maxframesize * SE401_NUMFRAMES);
@@ -962,7 +961,7 @@ static int se401_open(struct file *file)
else
err = -ENOMEM;
se401->user = !err;
- unlock_kernel();
+ mutex_unlock(&se401->lock);
return err;
}
diff --git a/drivers/media/video/sh_mobile_ceu_camera.c b/drivers/media/video/sh_mobile_ceu_camera.c
index 2b24bd0de3ad..5c209afb0ac8 100644
--- a/drivers/media/video/sh_mobile_ceu_camera.c
+++ b/drivers/media/video/sh_mobile_ceu_camera.c
@@ -245,7 +245,7 @@ static void free_buffer(struct videobuf_queue *vq,
if (in_interrupt())
BUG();
- videobuf_waiton(&buf->vb, 0, 0);
+ videobuf_waiton(vq, &buf->vb, 0, 0);
videobuf_dma_contig_free(vq, &buf->vb);
dev_dbg(dev, "%s freed\n", __func__);
buf->vb.state = VIDEOBUF_NEEDS_INIT;
@@ -1726,7 +1726,7 @@ static int sh_mobile_ceu_try_fmt(struct soc_camera_device *icd,
return ret;
}
-static int sh_mobile_ceu_reqbufs(struct soc_camera_file *icf,
+static int sh_mobile_ceu_reqbufs(struct soc_camera_device *icd,
struct v4l2_requestbuffers *p)
{
int i;
@@ -1740,7 +1740,7 @@ static int sh_mobile_ceu_reqbufs(struct soc_camera_file *icf,
for (i = 0; i < p->count; i++) {
struct sh_mobile_ceu_buffer *buf;
- buf = container_of(icf->vb_vidq.bufs[i],
+ buf = container_of(icd->vb_vidq.bufs[i],
struct sh_mobile_ceu_buffer, vb);
INIT_LIST_HEAD(&buf->vb.queue);
}
@@ -1750,10 +1750,10 @@ static int sh_mobile_ceu_reqbufs(struct soc_camera_file *icf,
static unsigned int sh_mobile_ceu_poll(struct file *file, poll_table *pt)
{
- struct soc_camera_file *icf = file->private_data;
+ struct soc_camera_device *icd = file->private_data;
struct sh_mobile_ceu_buffer *buf;
- buf = list_entry(icf->vb_vidq.stream.next,
+ buf = list_entry(icd->vb_vidq.stream.next,
struct sh_mobile_ceu_buffer, vb.stream);
poll_wait(file, &buf->vb.done, pt);
@@ -1786,23 +1786,7 @@ static void sh_mobile_ceu_init_videobuf(struct videobuf_queue *q,
V4L2_BUF_TYPE_VIDEO_CAPTURE,
pcdev->field,
sizeof(struct sh_mobile_ceu_buffer),
- icd);
-}
-
-static int sh_mobile_ceu_get_parm(struct soc_camera_device *icd,
- struct v4l2_streamparm *parm)
-{
- struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
-
- return v4l2_subdev_call(sd, video, g_parm, parm);
-}
-
-static int sh_mobile_ceu_set_parm(struct soc_camera_device *icd,
- struct v4l2_streamparm *parm)
-{
- struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
-
- return v4l2_subdev_call(sd, video, s_parm, parm);
+ icd, NULL);
}
static int sh_mobile_ceu_get_ctrl(struct soc_camera_device *icd,
@@ -1866,8 +1850,6 @@ static struct soc_camera_host_ops sh_mobile_ceu_host_ops = {
.try_fmt = sh_mobile_ceu_try_fmt,
.set_ctrl = sh_mobile_ceu_set_ctrl,
.get_ctrl = sh_mobile_ceu_get_ctrl,
- .set_parm = sh_mobile_ceu_set_parm,
- .get_parm = sh_mobile_ceu_get_parm,
.reqbufs = sh_mobile_ceu_reqbufs,
.poll = sh_mobile_ceu_poll,
.querycap = sh_mobile_ceu_querycap,
diff --git a/drivers/media/video/sh_vou.c b/drivers/media/video/sh_vou.c
index d394187eb701..4e5a8cf76ded 100644
--- a/drivers/media/video/sh_vou.c
+++ b/drivers/media/video/sh_vou.c
@@ -230,7 +230,7 @@ static void free_buffer(struct videobuf_queue *vq, struct videobuf_buffer *vb)
BUG_ON(in_interrupt());
/* Wait until this buffer is no longer in STATE_QUEUED or STATE_ACTIVE */
- videobuf_waiton(vb, 0, 0);
+ videobuf_waiton(vq, vb, 0, 0);
videobuf_dma_contig_free(vq, vb);
vb->state = VIDEOBUF_NEEDS_INIT;
}
@@ -1189,7 +1189,8 @@ static int sh_vou_open(struct file *file)
vou_dev->v4l2_dev.dev, &vou_dev->lock,
V4L2_BUF_TYPE_VIDEO_OUTPUT,
V4L2_FIELD_NONE,
- sizeof(struct videobuf_buffer), vdev);
+ sizeof(struct videobuf_buffer), vdev,
+ NULL);
return 0;
}
@@ -1405,7 +1406,7 @@ static int __devinit sh_vou_probe(struct platform_device *pdev)
goto ereset;
subdev = v4l2_i2c_new_subdev_board(&vou_dev->v4l2_dev, i2c_adap,
- vou_pdata->module_name, vou_pdata->board_info, NULL);
+ vou_pdata->board_info, NULL);
if (!subdev) {
ret = -ENOMEM;
goto ei2cnd;
diff --git a/drivers/media/video/sn9c102/sn9c102_devtable.h b/drivers/media/video/sn9c102/sn9c102_devtable.h
index b6643ca7656a..ccfa59c54552 100644
--- a/drivers/media/video/sn9c102/sn9c102_devtable.h
+++ b/drivers/media/video/sn9c102/sn9c102_devtable.h
@@ -116,10 +116,14 @@ static const struct usb_device_id sn9c102_id_table[] = {
{ SN9C102_USB_DEVICE(0x0c45, 0x60fe, BRIDGE_SN9C105), },
/* SN9C120 */
{ SN9C102_USB_DEVICE(0x0458, 0x7025, BRIDGE_SN9C120), },
+#if !defined CONFIG_USB_GSPCA_SONIXJ && !defined CONFIG_USB_GSPCA_SONIXJ_MODULE
{ SN9C102_USB_DEVICE(0x0c45, 0x6102, BRIDGE_SN9C120), },
+#endif
{ SN9C102_USB_DEVICE(0x0c45, 0x6108, BRIDGE_SN9C120), },
{ SN9C102_USB_DEVICE(0x0c45, 0x610f, BRIDGE_SN9C120), },
+#if !defined CONFIG_USB_GSPCA_SONIXJ && !defined CONFIG_USB_GSPCA_SONIXJ_MODULE
{ SN9C102_USB_DEVICE(0x0c45, 0x6130, BRIDGE_SN9C120), },
+#endif
/* { SN9C102_USB_DEVICE(0x0c45, 0x6138, BRIDGE_SN9C120), }, MO8000 */
#if !defined CONFIG_USB_GSPCA_SONIXJ && !defined CONFIG_USB_GSPCA_SONIXJ_MODULE
{ SN9C102_USB_DEVICE(0x0c45, 0x613a, BRIDGE_SN9C120), },
diff --git a/drivers/media/video/soc_camera.c b/drivers/media/video/soc_camera.c
index a499cacec1f3..335120c2021b 100644
--- a/drivers/media/video/soc_camera.c
+++ b/drivers/media/video/soc_camera.c
@@ -92,8 +92,7 @@ EXPORT_SYMBOL(soc_camera_apply_sensor_flags);
static int soc_camera_try_fmt_vid_cap(struct file *file, void *priv,
struct v4l2_format *f)
{
- struct soc_camera_file *icf = file->private_data;
- struct soc_camera_device *icd = icf->icd;
+ struct soc_camera_device *icd = file->private_data;
struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent);
WARN_ON(priv != file->private_data);
@@ -105,8 +104,7 @@ static int soc_camera_try_fmt_vid_cap(struct file *file, void *priv,
static int soc_camera_enum_input(struct file *file, void *priv,
struct v4l2_input *inp)
{
- struct soc_camera_file *icf = file->private_data;
- struct soc_camera_device *icd = icf->icd;
+ struct soc_camera_device *icd = file->private_data;
int ret = 0;
if (inp->index != 0)
@@ -141,8 +139,7 @@ static int soc_camera_s_input(struct file *file, void *priv, unsigned int i)
static int soc_camera_s_std(struct file *file, void *priv, v4l2_std_id *a)
{
- struct soc_camera_file *icf = file->private_data;
- struct soc_camera_device *icd = icf->icd;
+ struct soc_camera_device *icd = file->private_data;
struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
return v4l2_subdev_call(sd, core, s_std, *a);
@@ -152,47 +149,59 @@ static int soc_camera_reqbufs(struct file *file, void *priv,
struct v4l2_requestbuffers *p)
{
int ret;
- struct soc_camera_file *icf = file->private_data;
- struct soc_camera_device *icd = icf->icd;
+ struct soc_camera_device *icd = file->private_data;
struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent);
WARN_ON(priv != file->private_data);
- ret = videobuf_reqbufs(&icf->vb_vidq, p);
+ if (icd->streamer && icd->streamer != file)
+ return -EBUSY;
+
+ ret = videobuf_reqbufs(&icd->vb_vidq, p);
if (ret < 0)
return ret;
- return ici->ops->reqbufs(icf, p);
+ ret = ici->ops->reqbufs(icd, p);
+ if (!ret && !icd->streamer)
+ icd->streamer = file;
+
+ return ret;
}
static int soc_camera_querybuf(struct file *file, void *priv,
struct v4l2_buffer *p)
{
- struct soc_camera_file *icf = file->private_data;
+ struct soc_camera_device *icd = file->private_data;
WARN_ON(priv != file->private_data);
- return videobuf_querybuf(&icf->vb_vidq, p);
+ return videobuf_querybuf(&icd->vb_vidq, p);
}
static int soc_camera_qbuf(struct file *file, void *priv,
struct v4l2_buffer *p)
{
- struct soc_camera_file *icf = file->private_data;
+ struct soc_camera_device *icd = file->private_data;
WARN_ON(priv != file->private_data);
- return videobuf_qbuf(&icf->vb_vidq, p);
+ if (icd->streamer != file)
+ return -EBUSY;
+
+ return videobuf_qbuf(&icd->vb_vidq, p);
}
static int soc_camera_dqbuf(struct file *file, void *priv,
struct v4l2_buffer *p)
{
- struct soc_camera_file *icf = file->private_data;
+ struct soc_camera_device *icd = file->private_data;
WARN_ON(priv != file->private_data);
- return videobuf_dqbuf(&icf->vb_vidq, p, file->f_flags & O_NONBLOCK);
+ if (icd->streamer != file)
+ return -EBUSY;
+
+ return videobuf_dqbuf(&icd->vb_vidq, p, file->f_flags & O_NONBLOCK);
}
/* Always entered with .video_lock held */
@@ -280,10 +289,9 @@ static void soc_camera_free_user_formats(struct soc_camera_device *icd)
((x) >> 24) & 0xff
/* Called with .vb_lock held, or from the first open(2), see comment there */
-static int soc_camera_set_fmt(struct soc_camera_file *icf,
+static int soc_camera_set_fmt(struct soc_camera_device *icd,
struct v4l2_format *f)
{
- struct soc_camera_device *icd = icf->icd;
struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent);
struct v4l2_pix_format *pix = &f->fmt.pix;
int ret;
@@ -309,7 +317,7 @@ static int soc_camera_set_fmt(struct soc_camera_file *icf,
icd->user_width = pix->width;
icd->user_height = pix->height;
icd->colorspace = pix->colorspace;
- icf->vb_vidq.field =
+ icd->vb_vidq.field =
icd->field = pix->field;
if (f->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
@@ -331,7 +339,6 @@ static int soc_camera_open(struct file *file)
dev);
struct soc_camera_link *icl = to_soc_camera_link(icd);
struct soc_camera_host *ici;
- struct soc_camera_file *icf;
int ret;
if (!icd->ops)
@@ -340,14 +347,9 @@ static int soc_camera_open(struct file *file)
ici = to_soc_camera_host(icd->dev.parent);
- icf = vmalloc(sizeof(*icf));
- if (!icf)
- return -ENOMEM;
-
if (!try_module_get(ici->ops->owner)) {
dev_err(&icd->dev, "Couldn't lock capture bus driver.\n");
- ret = -EINVAL;
- goto emgi;
+ return -EINVAL;
}
/*
@@ -356,7 +358,6 @@ static int soc_camera_open(struct file *file)
*/
mutex_lock(&icd->video_lock);
- icf->icd = icd;
icd->use_count++;
/* Now we really have to activate the camera */
@@ -401,15 +402,15 @@ static int soc_camera_open(struct file *file)
* apart from someone else calling open() simultaneously, but
* .video_lock is protecting us against it.
*/
- ret = soc_camera_set_fmt(icf, &f);
+ ret = soc_camera_set_fmt(icd, &f);
if (ret < 0)
goto esfmt;
}
- file->private_data = icf;
+ file->private_data = icd;
dev_dbg(&icd->dev, "camera device open\n");
- ici->ops->init_videobuf(&icf->vb_vidq, icd);
+ ici->ops->init_videobuf(&icd->vb_vidq, icd);
mutex_unlock(&icd->video_lock);
@@ -430,15 +431,13 @@ epower:
icd->use_count--;
mutex_unlock(&icd->video_lock);
module_put(ici->ops->owner);
-emgi:
- vfree(icf);
+
return ret;
}
static int soc_camera_close(struct file *file)
{
- struct soc_camera_file *icf = file->private_data;
- struct soc_camera_device *icd = icf->icd;
+ struct soc_camera_device *icd = file->private_data;
struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent);
mutex_lock(&icd->video_lock);
@@ -455,12 +454,13 @@ static int soc_camera_close(struct file *file)
icl->power(icd->pdev, 0);
}
+ if (icd->streamer == file)
+ icd->streamer = NULL;
+
mutex_unlock(&icd->video_lock);
module_put(ici->ops->owner);
- vfree(icf);
-
dev_dbg(&icd->dev, "camera device close\n");
return 0;
@@ -469,8 +469,7 @@ static int soc_camera_close(struct file *file)
static ssize_t soc_camera_read(struct file *file, char __user *buf,
size_t count, loff_t *ppos)
{
- struct soc_camera_file *icf = file->private_data;
- struct soc_camera_device *icd = icf->icd;
+ struct soc_camera_device *icd = file->private_data;
int err = -EINVAL;
dev_err(&icd->dev, "camera device read not implemented\n");
@@ -480,13 +479,15 @@ static ssize_t soc_camera_read(struct file *file, char __user *buf,
static int soc_camera_mmap(struct file *file, struct vm_area_struct *vma)
{
- struct soc_camera_file *icf = file->private_data;
- struct soc_camera_device *icd = icf->icd;
+ struct soc_camera_device *icd = file->private_data;
int err;
dev_dbg(&icd->dev, "mmap called, vma=0x%08lx\n", (unsigned long)vma);
- err = videobuf_mmap_mapper(&icf->vb_vidq, vma);
+ if (icd->streamer != file)
+ return -EBUSY;
+
+ err = videobuf_mmap_mapper(&icd->vb_vidq, vma);
dev_dbg(&icd->dev, "vma start=0x%08lx, size=%ld, ret=%d\n",
(unsigned long)vma->vm_start,
@@ -498,11 +499,13 @@ static int soc_camera_mmap(struct file *file, struct vm_area_struct *vma)
static unsigned int soc_camera_poll(struct file *file, poll_table *pt)
{
- struct soc_camera_file *icf = file->private_data;
- struct soc_camera_device *icd = icf->icd;
+ struct soc_camera_device *icd = file->private_data;
struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent);
- if (list_empty(&icf->vb_vidq.stream)) {
+ if (icd->streamer != file)
+ return -EBUSY;
+
+ if (list_empty(&icd->vb_vidq.stream)) {
dev_err(&icd->dev, "Trying to poll with no queued buffers!\n");
return POLLERR;
}
@@ -523,24 +526,29 @@ static struct v4l2_file_operations soc_camera_fops = {
static int soc_camera_s_fmt_vid_cap(struct file *file, void *priv,
struct v4l2_format *f)
{
- struct soc_camera_file *icf = file->private_data;
- struct soc_camera_device *icd = icf->icd;
+ struct soc_camera_device *icd = file->private_data;
int ret;
WARN_ON(priv != file->private_data);
- mutex_lock(&icf->vb_vidq.vb_lock);
+ if (icd->streamer && icd->streamer != file)
+ return -EBUSY;
+
+ mutex_lock(&icd->vb_vidq.vb_lock);
- if (icf->vb_vidq.bufs[0]) {
+ if (icd->vb_vidq.bufs[0]) {
dev_err(&icd->dev, "S_FMT denied: queue initialised\n");
ret = -EBUSY;
goto unlock;
}
- ret = soc_camera_set_fmt(icf, f);
+ ret = soc_camera_set_fmt(icd, f);
+
+ if (!ret && !icd->streamer)
+ icd->streamer = file;
unlock:
- mutex_unlock(&icf->vb_vidq.vb_lock);
+ mutex_unlock(&icd->vb_vidq.vb_lock);
return ret;
}
@@ -548,8 +556,7 @@ unlock:
static int soc_camera_enum_fmt_vid_cap(struct file *file, void *priv,
struct v4l2_fmtdesc *f)
{
- struct soc_camera_file *icf = file->private_data;
- struct soc_camera_device *icd = icf->icd;
+ struct soc_camera_device *icd = file->private_data;
const struct soc_mbus_pixelfmt *format;
WARN_ON(priv != file->private_data);
@@ -568,15 +575,14 @@ static int soc_camera_enum_fmt_vid_cap(struct file *file, void *priv,
static int soc_camera_g_fmt_vid_cap(struct file *file, void *priv,
struct v4l2_format *f)
{
- struct soc_camera_file *icf = file->private_data;
- struct soc_camera_device *icd = icf->icd;
+ struct soc_camera_device *icd = file->private_data;
struct v4l2_pix_format *pix = &f->fmt.pix;
WARN_ON(priv != file->private_data);
pix->width = icd->user_width;
pix->height = icd->user_height;
- pix->field = icf->vb_vidq.field;
+ pix->field = icd->vb_vidq.field;
pix->pixelformat = icd->current_fmt->host_fmt->fourcc;
pix->bytesperline = soc_mbus_bytes_per_line(pix->width,
icd->current_fmt->host_fmt);
@@ -592,8 +598,7 @@ static int soc_camera_g_fmt_vid_cap(struct file *file, void *priv,
static int soc_camera_querycap(struct file *file, void *priv,
struct v4l2_capability *cap)
{
- struct soc_camera_file *icf = file->private_data;
- struct soc_camera_device *icd = icf->icd;
+ struct soc_camera_device *icd = file->private_data;
struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent);
WARN_ON(priv != file->private_data);
@@ -605,8 +610,7 @@ static int soc_camera_querycap(struct file *file, void *priv,
static int soc_camera_streamon(struct file *file, void *priv,
enum v4l2_buf_type i)
{
- struct soc_camera_file *icf = file->private_data;
- struct soc_camera_device *icd = icf->icd;
+ struct soc_camera_device *icd = file->private_data;
struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
int ret;
@@ -615,12 +619,15 @@ static int soc_camera_streamon(struct file *file, void *priv,
if (i != V4L2_BUF_TYPE_VIDEO_CAPTURE)
return -EINVAL;
+ if (icd->streamer != file)
+ return -EBUSY;
+
mutex_lock(&icd->video_lock);
v4l2_subdev_call(sd, video, s_stream, 1);
/* This calls buf_queue from host driver's videobuf_queue_ops */
- ret = videobuf_streamon(&icf->vb_vidq);
+ ret = videobuf_streamon(&icd->vb_vidq);
mutex_unlock(&icd->video_lock);
@@ -630,8 +637,7 @@ static int soc_camera_streamon(struct file *file, void *priv,
static int soc_camera_streamoff(struct file *file, void *priv,
enum v4l2_buf_type i)
{
- struct soc_camera_file *icf = file->private_data;
- struct soc_camera_device *icd = icf->icd;
+ struct soc_camera_device *icd = file->private_data;
struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
WARN_ON(priv != file->private_data);
@@ -639,13 +645,16 @@ static int soc_camera_streamoff(struct file *file, void *priv,
if (i != V4L2_BUF_TYPE_VIDEO_CAPTURE)
return -EINVAL;
+ if (icd->streamer != file)
+ return -EBUSY;
+
mutex_lock(&icd->video_lock);
/*
* This calls buf_release from host driver's videobuf_queue_ops for all
* remaining buffers. When the last buffer is freed, stop capture
*/
- videobuf_streamoff(&icf->vb_vidq);
+ videobuf_streamoff(&icd->vb_vidq);
v4l2_subdev_call(sd, video, s_stream, 0);
@@ -657,8 +666,7 @@ static int soc_camera_streamoff(struct file *file, void *priv,
static int soc_camera_queryctrl(struct file *file, void *priv,
struct v4l2_queryctrl *qc)
{
- struct soc_camera_file *icf = file->private_data;
- struct soc_camera_device *icd = icf->icd;
+ struct soc_camera_device *icd = file->private_data;
struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent);
int i;
@@ -689,8 +697,7 @@ static int soc_camera_queryctrl(struct file *file, void *priv,
static int soc_camera_g_ctrl(struct file *file, void *priv,
struct v4l2_control *ctrl)
{
- struct soc_camera_file *icf = file->private_data;
- struct soc_camera_device *icd = icf->icd;
+ struct soc_camera_device *icd = file->private_data;
struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent);
struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
int ret;
@@ -709,8 +716,7 @@ static int soc_camera_g_ctrl(struct file *file, void *priv,
static int soc_camera_s_ctrl(struct file *file, void *priv,
struct v4l2_control *ctrl)
{
- struct soc_camera_file *icf = file->private_data;
- struct soc_camera_device *icd = icf->icd;
+ struct soc_camera_device *icd = file->private_data;
struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent);
struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
int ret;
@@ -729,8 +735,7 @@ static int soc_camera_s_ctrl(struct file *file, void *priv,
static int soc_camera_cropcap(struct file *file, void *fh,
struct v4l2_cropcap *a)
{
- struct soc_camera_file *icf = file->private_data;
- struct soc_camera_device *icd = icf->icd;
+ struct soc_camera_device *icd = file->private_data;
struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent);
return ici->ops->cropcap(icd, a);
@@ -739,14 +744,13 @@ static int soc_camera_cropcap(struct file *file, void *fh,
static int soc_camera_g_crop(struct file *file, void *fh,
struct v4l2_crop *a)
{
- struct soc_camera_file *icf = file->private_data;
- struct soc_camera_device *icd = icf->icd;
+ struct soc_camera_device *icd = file->private_data;
struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent);
int ret;
- mutex_lock(&icf->vb_vidq.vb_lock);
+ mutex_lock(&icd->vb_vidq.vb_lock);
ret = ici->ops->get_crop(icd, a);
- mutex_unlock(&icf->vb_vidq.vb_lock);
+ mutex_unlock(&icd->vb_vidq.vb_lock);
return ret;
}
@@ -759,8 +763,7 @@ static int soc_camera_g_crop(struct file *file, void *fh,
static int soc_camera_s_crop(struct file *file, void *fh,
struct v4l2_crop *a)
{
- struct soc_camera_file *icf = file->private_data;
- struct soc_camera_device *icd = icf->icd;
+ struct soc_camera_device *icd = file->private_data;
struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent);
struct v4l2_rect *rect = &a->c;
struct v4l2_crop current_crop;
@@ -773,7 +776,7 @@ static int soc_camera_s_crop(struct file *file, void *fh,
rect->width, rect->height, rect->left, rect->top);
/* Cropping is allowed during a running capture, guard consistency */
- mutex_lock(&icf->vb_vidq.vb_lock);
+ mutex_lock(&icd->vb_vidq.vb_lock);
/* If get_crop fails, we'll let host and / or client drivers decide */
ret = ici->ops->get_crop(icd, &current_crop);
@@ -782,7 +785,7 @@ static int soc_camera_s_crop(struct file *file, void *fh,
if (ret < 0) {
dev_err(&icd->dev,
"S_CROP denied: getting current crop failed\n");
- } else if (icf->vb_vidq.bufs[0] &&
+ } else if (icd->vb_vidq.bufs[0] &&
(a->c.width != current_crop.c.width ||
a->c.height != current_crop.c.height)) {
dev_err(&icd->dev,
@@ -792,7 +795,7 @@ static int soc_camera_s_crop(struct file *file, void *fh,
ret = ici->ops->set_crop(icd, a);
}
- mutex_unlock(&icf->vb_vidq.vb_lock);
+ mutex_unlock(&icd->vb_vidq.vb_lock);
return ret;
}
@@ -800,8 +803,7 @@ static int soc_camera_s_crop(struct file *file, void *fh,
static int soc_camera_g_parm(struct file *file, void *fh,
struct v4l2_streamparm *a)
{
- struct soc_camera_file *icf = file->private_data;
- struct soc_camera_device *icd = icf->icd;
+ struct soc_camera_device *icd = file->private_data;
struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent);
if (ici->ops->get_parm)
@@ -813,8 +815,7 @@ static int soc_camera_g_parm(struct file *file, void *fh,
static int soc_camera_s_parm(struct file *file, void *fh,
struct v4l2_streamparm *a)
{
- struct soc_camera_file *icf = file->private_data;
- struct soc_camera_device *icd = icf->icd;
+ struct soc_camera_device *icd = file->private_data;
struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent);
if (ici->ops->set_parm)
@@ -826,8 +827,7 @@ static int soc_camera_s_parm(struct file *file, void *fh,
static int soc_camera_g_chip_ident(struct file *file, void *fh,
struct v4l2_dbg_chip_ident *id)
{
- struct soc_camera_file *icf = file->private_data;
- struct soc_camera_device *icd = icf->icd;
+ struct soc_camera_device *icd = file->private_data;
struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
return v4l2_subdev_call(sd, core, g_chip_ident, id);
@@ -837,8 +837,7 @@ static int soc_camera_g_chip_ident(struct file *file, void *fh,
static int soc_camera_g_register(struct file *file, void *fh,
struct v4l2_dbg_register *reg)
{
- struct soc_camera_file *icf = file->private_data;
- struct soc_camera_device *icd = icf->icd;
+ struct soc_camera_device *icd = file->private_data;
struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
return v4l2_subdev_call(sd, core, g_register, reg);
@@ -847,8 +846,7 @@ static int soc_camera_g_register(struct file *file, void *fh,
static int soc_camera_s_register(struct file *file, void *fh,
struct v4l2_dbg_register *reg)
{
- struct soc_camera_file *icf = file->private_data;
- struct soc_camera_device *icd = icf->icd;
+ struct soc_camera_device *icd = file->private_data;
struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
return v4l2_subdev_call(sd, core, s_register, reg);
@@ -898,11 +896,11 @@ static int soc_camera_init_i2c(struct soc_camera_device *icd,
icl->board_info->platform_data = icd;
subdev = v4l2_i2c_new_subdev_board(&ici->v4l2_dev, adap,
- icl->module_name, icl->board_info, NULL);
+ icl->board_info, NULL);
if (!subdev)
goto ei2cnd;
- client = subdev->priv;
+ client = v4l2_get_subdevdata(subdev);
/* Use to_i2c_client(dev) to recover the i2c client */
dev_set_drvdata(&icd->dev, &client->dev);
@@ -1148,6 +1146,20 @@ static int default_s_crop(struct soc_camera_device *icd, struct v4l2_crop *a)
return v4l2_subdev_call(sd, video, s_crop, a);
}
+static int default_g_parm(struct soc_camera_device *icd,
+ struct v4l2_streamparm *parm)
+{
+ struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
+ return v4l2_subdev_call(sd, video, g_parm, parm);
+}
+
+static int default_s_parm(struct soc_camera_device *icd,
+ struct v4l2_streamparm *parm)
+{
+ struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
+ return v4l2_subdev_call(sd, video, s_parm, parm);
+}
+
static void soc_camera_device_init(struct device *dev, void *pdata)
{
dev->platform_data = pdata;
@@ -1179,6 +1191,10 @@ int soc_camera_host_register(struct soc_camera_host *ici)
ici->ops->get_crop = default_g_crop;
if (!ici->ops->cropcap)
ici->ops->cropcap = default_cropcap;
+ if (!ici->ops->set_parm)
+ ici->ops->set_parm = default_s_parm;
+ if (!ici->ops->get_parm)
+ ici->ops->get_parm = default_g_parm;
mutex_lock(&list_lock);
list_for_each_entry(ix, &hosts, list) {
diff --git a/drivers/media/video/sr030pc30.c b/drivers/media/video/sr030pc30.c
new file mode 100644
index 000000000000..c9dc67aba980
--- /dev/null
+++ b/drivers/media/video/sr030pc30.c
@@ -0,0 +1,894 @@
+/*
+ * Driver for SiliconFile SR030PC30 VGA (1/10-Inch) Image Sensor with ISP
+ *
+ * Copyright (C) 2010 Samsung Electronics Co., Ltd
+ * Author: Sylwester Nawrocki, s.nawrocki@samsung.com
+ *
+ * Based on original driver authored by Dongsoo Nathaniel Kim
+ * and HeungJun Kim <riverful.kim@samsung.com>.
+ *
+ * Based on mt9v011 Micron Digital Image Sensor driver
+ * Copyright (c) 2009 Mauro Carvalho Chehab (mchehab@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/i2c.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-subdev.h>
+#include <media/v4l2-mediabus.h>
+#include <media/sr030pc30.h>
+
+static int debug;
+module_param(debug, int, 0644);
+
+#define MODULE_NAME "SR030PC30"
+
+/*
+ * Register offsets within a page
+ * b15..b8 - page id, b7..b0 - register address
+ */
+#define POWER_CTRL_REG 0x0001
+#define PAGEMODE_REG 0x03
+#define DEVICE_ID_REG 0x0004
+#define NOON010PC30_ID 0x86
+#define SR030PC30_ID 0x8C
+#define VDO_CTL1_REG 0x0010
+#define SUBSAMPL_NONE_VGA 0
+#define SUBSAMPL_QVGA 0x10
+#define SUBSAMPL_QQVGA 0x20
+#define VDO_CTL2_REG 0x0011
+#define SYNC_CTL_REG 0x0012
+#define WIN_ROWH_REG 0x0020
+#define WIN_ROWL_REG 0x0021
+#define WIN_COLH_REG 0x0022
+#define WIN_COLL_REG 0x0023
+#define WIN_HEIGHTH_REG 0x0024
+#define WIN_HEIGHTL_REG 0x0025
+#define WIN_WIDTHH_REG 0x0026
+#define WIN_WIDTHL_REG 0x0027
+#define HBLANKH_REG 0x0040
+#define HBLANKL_REG 0x0041
+#define VSYNCH_REG 0x0042
+#define VSYNCL_REG 0x0043
+/* page 10 */
+#define ISP_CTL_REG(n) (0x1010 + (n))
+#define YOFS_REG 0x1040
+#define DARK_YOFS_REG 0x1041
+#define AG_ABRTH_REG 0x1050
+#define SAT_CTL_REG 0x1060
+#define BSAT_REG 0x1061
+#define RSAT_REG 0x1062
+#define AG_SAT_TH_REG 0x1063
+/* page 11 */
+#define ZLPF_CTRL_REG 0x1110
+#define ZLPF_CTRL2_REG 0x1112
+#define ZLPF_AGH_THR_REG 0x1121
+#define ZLPF_THR_REG 0x1160
+#define ZLPF_DYN_THR_REG 0x1160
+/* page 12 */
+#define YCLPF_CTL1_REG 0x1240
+#define YCLPF_CTL2_REG 0x1241
+#define YCLPF_THR_REG 0x1250
+#define BLPF_CTL_REG 0x1270
+#define BLPF_THR1_REG 0x1274
+#define BLPF_THR2_REG 0x1275
+/* page 14 - Lens Shading Compensation */
+#define LENS_CTRL_REG 0x1410
+#define LENS_XCEN_REG 0x1420
+#define LENS_YCEN_REG 0x1421
+#define LENS_R_COMP_REG 0x1422
+#define LENS_G_COMP_REG 0x1423
+#define LENS_B_COMP_REG 0x1424
+/* page 15 - Color correction */
+#define CMC_CTL_REG 0x1510
+#define CMC_OFSGH_REG 0x1514
+#define CMC_OFSGL_REG 0x1516
+#define CMC_SIGN_REG 0x1517
+/* Color correction coefficients */
+#define CMC_COEF_REG(n) (0x1530 + (n))
+/* Color correction offset coefficients */
+#define CMC_OFS_REG(n) (0x1540 + (n))
+/* page 16 - Gamma correction */
+#define GMA_CTL_REG 0x1610
+/* Gamma correction coefficients 0.14 */
+#define GMA_COEF_REG(n) (0x1630 + (n))
+/* page 20 - Auto Exposure */
+#define AE_CTL1_REG 0x2010
+#define AE_CTL2_REG 0x2011
+#define AE_FRM_CTL_REG 0x2020
+#define AE_FINE_CTL_REG(n) (0x2028 + (n))
+#define EXP_TIMEH_REG 0x2083
+#define EXP_TIMEM_REG 0x2084
+#define EXP_TIMEL_REG 0x2085
+#define EXP_MMINH_REG 0x2086
+#define EXP_MMINL_REG 0x2087
+#define EXP_MMAXH_REG 0x2088
+#define EXP_MMAXM_REG 0x2089
+#define EXP_MMAXL_REG 0x208A
+/* page 22 - Auto White Balance */
+#define AWB_CTL1_REG 0x2210
+#define AWB_ENABLE 0x80
+#define AWB_CTL2_REG 0x2211
+#define MWB_ENABLE 0x01
+/* RGB gain control (manual WB) when AWB_CTL1[7]=0 */
+#define AWB_RGAIN_REG 0x2280
+#define AWB_GGAIN_REG 0x2281
+#define AWB_BGAIN_REG 0x2282
+#define AWB_RMAX_REG 0x2283
+#define AWB_RMIN_REG 0x2284
+#define AWB_BMAX_REG 0x2285
+#define AWB_BMIN_REG 0x2286
+/* R, B gain range in bright light conditions */
+#define AWB_RMAXB_REG 0x2287
+#define AWB_RMINB_REG 0x2288
+#define AWB_BMAXB_REG 0x2289
+#define AWB_BMINB_REG 0x228A
+/* manual white balance, when AWB_CTL2[0]=1 */
+#define MWB_RGAIN_REG 0x22B2
+#define MWB_BGAIN_REG 0x22B3
+/* the token to mark an array end */
+#define REG_TERM 0xFFFF
+
+/* Minimum and maximum exposure time in ms */
+#define EXPOS_MIN_MS 1
+#define EXPOS_MAX_MS 125
+
+struct sr030pc30_info {
+ struct v4l2_subdev sd;
+ const struct sr030pc30_platform_data *pdata;
+ const struct sr030pc30_format *curr_fmt;
+ const struct sr030pc30_frmsize *curr_win;
+ unsigned int auto_wb:1;
+ unsigned int auto_exp:1;
+ unsigned int hflip:1;
+ unsigned int vflip:1;
+ unsigned int sleep:1;
+ unsigned int exposure;
+ u8 blue_balance;
+ u8 red_balance;
+ u8 i2c_reg_page;
+};
+
+struct sr030pc30_format {
+ enum v4l2_mbus_pixelcode code;
+ enum v4l2_colorspace colorspace;
+ u16 ispctl1_reg;
+};
+
+struct sr030pc30_frmsize {
+ u16 width;
+ u16 height;
+ int vid_ctl1;
+};
+
+struct i2c_regval {
+ u16 addr;
+ u16 val;
+};
+
+static const struct v4l2_queryctrl sr030pc30_ctrl[] = {
+ {
+ .id = V4L2_CID_AUTO_WHITE_BALANCE,
+ .type = V4L2_CTRL_TYPE_BOOLEAN,
+ .name = "Auto White Balance",
+ .minimum = 0,
+ .maximum = 1,
+ .step = 1,
+ .default_value = 1,
+ }, {
+ .id = V4L2_CID_RED_BALANCE,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "Red Balance",
+ .minimum = 0,
+ .maximum = 127,
+ .step = 1,
+ .default_value = 64,
+ .flags = 0,
+ }, {
+ .id = V4L2_CID_BLUE_BALANCE,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "Blue Balance",
+ .minimum = 0,
+ .maximum = 127,
+ .step = 1,
+ .default_value = 64,
+ }, {
+ .id = V4L2_CID_EXPOSURE_AUTO,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "Auto Exposure",
+ .minimum = 0,
+ .maximum = 1,
+ .step = 1,
+ .default_value = 1,
+ }, {
+ .id = V4L2_CID_EXPOSURE,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "Exposure",
+ .minimum = EXPOS_MIN_MS,
+ .maximum = EXPOS_MAX_MS,
+ .step = 1,
+ .default_value = 1,
+ }, {
+ }
+};
+
+/* supported resolutions */
+static const struct sr030pc30_frmsize sr030pc30_sizes[] = {
+ {
+ .width = 640,
+ .height = 480,
+ .vid_ctl1 = SUBSAMPL_NONE_VGA,
+ }, {
+ .width = 320,
+ .height = 240,
+ .vid_ctl1 = SUBSAMPL_QVGA,
+ }, {
+ .width = 160,
+ .height = 120,
+ .vid_ctl1 = SUBSAMPL_QQVGA,
+ },
+};
+
+/* supported pixel formats */
+static const struct sr030pc30_format sr030pc30_formats[] = {
+ {
+ .code = V4L2_MBUS_FMT_YUYV8_2X8,
+ .colorspace = V4L2_COLORSPACE_JPEG,
+ .ispctl1_reg = 0x03,
+ }, {
+ .code = V4L2_MBUS_FMT_YVYU8_2X8,
+ .colorspace = V4L2_COLORSPACE_JPEG,
+ .ispctl1_reg = 0x02,
+ }, {
+ .code = V4L2_MBUS_FMT_VYUY8_2X8,
+ .colorspace = V4L2_COLORSPACE_JPEG,
+ .ispctl1_reg = 0,
+ }, {
+ .code = V4L2_MBUS_FMT_UYVY8_2X8,
+ .colorspace = V4L2_COLORSPACE_JPEG,
+ .ispctl1_reg = 0x01,
+ }, {
+ .code = V4L2_MBUS_FMT_RGB565_2X8_BE,
+ .colorspace = V4L2_COLORSPACE_JPEG,
+ .ispctl1_reg = 0x40,
+ },
+};
+
+static const struct i2c_regval sr030pc30_base_regs[] = {
+ /* Window size and position within pixel matrix */
+ { WIN_ROWH_REG, 0x00 }, { WIN_ROWL_REG, 0x06 },
+ { WIN_COLH_REG, 0x00 }, { WIN_COLL_REG, 0x06 },
+ { WIN_HEIGHTH_REG, 0x01 }, { WIN_HEIGHTL_REG, 0xE0 },
+ { WIN_WIDTHH_REG, 0x02 }, { WIN_WIDTHL_REG, 0x80 },
+ { HBLANKH_REG, 0x01 }, { HBLANKL_REG, 0x50 },
+ { VSYNCH_REG, 0x00 }, { VSYNCL_REG, 0x14 },
+ { SYNC_CTL_REG, 0 },
+ /* Color corection and saturation */
+ { ISP_CTL_REG(0), 0x30 }, { YOFS_REG, 0x80 },
+ { DARK_YOFS_REG, 0x04 }, { AG_ABRTH_REG, 0x78 },
+ { SAT_CTL_REG, 0x1F }, { BSAT_REG, 0x90 },
+ { AG_SAT_TH_REG, 0xF0 }, { 0x1064, 0x80 },
+ { CMC_CTL_REG, 0x03 }, { CMC_OFSGH_REG, 0x3C },
+ { CMC_OFSGL_REG, 0x2C }, { CMC_SIGN_REG, 0x2F },
+ { CMC_COEF_REG(0), 0xCB }, { CMC_OFS_REG(0), 0x87 },
+ { CMC_COEF_REG(1), 0x61 }, { CMC_OFS_REG(1), 0x18 },
+ { CMC_COEF_REG(2), 0x16 }, { CMC_OFS_REG(2), 0x91 },
+ { CMC_COEF_REG(3), 0x23 }, { CMC_OFS_REG(3), 0x94 },
+ { CMC_COEF_REG(4), 0xCE }, { CMC_OFS_REG(4), 0x9f },
+ { CMC_COEF_REG(5), 0x2B }, { CMC_OFS_REG(5), 0x33 },
+ { CMC_COEF_REG(6), 0x01 }, { CMC_OFS_REG(6), 0x00 },
+ { CMC_COEF_REG(7), 0x34 }, { CMC_OFS_REG(7), 0x94 },
+ { CMC_COEF_REG(8), 0x75 }, { CMC_OFS_REG(8), 0x14 },
+ /* Color corection coefficients */
+ { GMA_CTL_REG, 0x03 }, { GMA_COEF_REG(0), 0x00 },
+ { GMA_COEF_REG(1), 0x19 }, { GMA_COEF_REG(2), 0x26 },
+ { GMA_COEF_REG(3), 0x3B }, { GMA_COEF_REG(4), 0x5D },
+ { GMA_COEF_REG(5), 0x79 }, { GMA_COEF_REG(6), 0x8E },
+ { GMA_COEF_REG(7), 0x9F }, { GMA_COEF_REG(8), 0xAF },
+ { GMA_COEF_REG(9), 0xBD }, { GMA_COEF_REG(10), 0xCA },
+ { GMA_COEF_REG(11), 0xDD }, { GMA_COEF_REG(12), 0xEC },
+ { GMA_COEF_REG(13), 0xF7 }, { GMA_COEF_REG(14), 0xFF },
+ /* Noise reduction, Z-LPF, YC-LPF and BLPF filters setup */
+ { ZLPF_CTRL_REG, 0x99 }, { ZLPF_CTRL2_REG, 0x0E },
+ { ZLPF_AGH_THR_REG, 0x29 }, { ZLPF_THR_REG, 0x0F },
+ { ZLPF_DYN_THR_REG, 0x63 }, { YCLPF_CTL1_REG, 0x23 },
+ { YCLPF_CTL2_REG, 0x3B }, { YCLPF_THR_REG, 0x05 },
+ { BLPF_CTL_REG, 0x1D }, { BLPF_THR1_REG, 0x05 },
+ { BLPF_THR2_REG, 0x04 },
+ /* Automatic white balance */
+ { AWB_CTL1_REG, 0xFB }, { AWB_CTL2_REG, 0x26 },
+ { AWB_RMAX_REG, 0x54 }, { AWB_RMIN_REG, 0x2B },
+ { AWB_BMAX_REG, 0x57 }, { AWB_BMIN_REG, 0x29 },
+ { AWB_RMAXB_REG, 0x50 }, { AWB_RMINB_REG, 0x43 },
+ { AWB_BMAXB_REG, 0x30 }, { AWB_BMINB_REG, 0x22 },
+ /* Auto exposure */
+ { AE_CTL1_REG, 0x8C }, { AE_CTL2_REG, 0x04 },
+ { AE_FRM_CTL_REG, 0x01 }, { AE_FINE_CTL_REG(0), 0x3F },
+ { AE_FINE_CTL_REG(1), 0xA3 }, { AE_FINE_CTL_REG(3), 0x34 },
+ /* Lens shading compensation */
+ { LENS_CTRL_REG, 0x01 }, { LENS_XCEN_REG, 0x80 },
+ { LENS_YCEN_REG, 0x70 }, { LENS_R_COMP_REG, 0x53 },
+ { LENS_G_COMP_REG, 0x40 }, { LENS_B_COMP_REG, 0x3e },
+ { REG_TERM, 0 },
+};
+
+static inline struct sr030pc30_info *to_sr030pc30(struct v4l2_subdev *sd)
+{
+ return container_of(sd, struct sr030pc30_info, sd);
+}
+
+static inline int set_i2c_page(struct sr030pc30_info *info,
+ struct i2c_client *client, unsigned int reg)
+{
+ int ret = 0;
+ u32 page = reg >> 8 & 0xFF;
+
+ if (info->i2c_reg_page != page && (reg & 0xFF) != 0x03) {
+ ret = i2c_smbus_write_byte_data(client, PAGEMODE_REG, page);
+ if (!ret)
+ info->i2c_reg_page = page;
+ }
+ return ret;
+}
+
+static int cam_i2c_read(struct v4l2_subdev *sd, u32 reg_addr)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ struct sr030pc30_info *info = to_sr030pc30(sd);
+
+ int ret = set_i2c_page(info, client, reg_addr);
+ if (!ret)
+ ret = i2c_smbus_read_byte_data(client, reg_addr & 0xFF);
+ return ret;
+}
+
+static int cam_i2c_write(struct v4l2_subdev *sd, u32 reg_addr, u32 val)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ struct sr030pc30_info *info = to_sr030pc30(sd);
+
+ int ret = set_i2c_page(info, client, reg_addr);
+ if (!ret)
+ ret = i2c_smbus_write_byte_data(
+ client, reg_addr & 0xFF, val);
+ return ret;
+}
+
+static inline int sr030pc30_bulk_write_reg(struct v4l2_subdev *sd,
+ const struct i2c_regval *msg)
+{
+ while (msg->addr != REG_TERM) {
+ int ret = cam_i2c_write(sd, msg->addr, msg->val);
+ if (ret)
+ return ret;
+ msg++;
+ }
+ return 0;
+}
+
+/* Device reset and sleep mode control */
+static int sr030pc30_pwr_ctrl(struct v4l2_subdev *sd,
+ bool reset, bool sleep)
+{
+ struct sr030pc30_info *info = to_sr030pc30(sd);
+ u8 reg = sleep ? 0xF1 : 0xF0;
+ int ret = 0;
+
+ if (reset)
+ ret = cam_i2c_write(sd, POWER_CTRL_REG, reg | 0x02);
+ if (!ret) {
+ ret = cam_i2c_write(sd, POWER_CTRL_REG, reg);
+ if (!ret) {
+ info->sleep = sleep;
+ if (reset)
+ info->i2c_reg_page = -1;
+ }
+ }
+ return ret;
+}
+
+static inline int sr030pc30_enable_autoexposure(struct v4l2_subdev *sd, int on)
+{
+ struct sr030pc30_info *info = to_sr030pc30(sd);
+ /* auto anti-flicker is also enabled here */
+ int ret = cam_i2c_write(sd, AE_CTL1_REG, on ? 0xDC : 0x0C);
+ if (!ret)
+ info->auto_exp = on;
+ return ret;
+}
+
+static int sr030pc30_set_exposure(struct v4l2_subdev *sd, int value)
+{
+ struct sr030pc30_info *info = to_sr030pc30(sd);
+
+ unsigned long expos = value * info->pdata->clk_rate / (8 * 1000);
+
+ int ret = cam_i2c_write(sd, EXP_TIMEH_REG, expos >> 16 & 0xFF);
+ if (!ret)
+ ret = cam_i2c_write(sd, EXP_TIMEM_REG, expos >> 8 & 0xFF);
+ if (!ret)
+ ret = cam_i2c_write(sd, EXP_TIMEL_REG, expos & 0xFF);
+ if (!ret) { /* Turn off AE */
+ info->exposure = value;
+ ret = sr030pc30_enable_autoexposure(sd, 0);
+ }
+ return ret;
+}
+
+/* Automatic white balance control */
+static int sr030pc30_enable_autowhitebalance(struct v4l2_subdev *sd, int on)
+{
+ struct sr030pc30_info *info = to_sr030pc30(sd);
+
+ int ret = cam_i2c_write(sd, AWB_CTL2_REG, on ? 0x2E : 0x2F);
+ if (!ret)
+ ret = cam_i2c_write(sd, AWB_CTL1_REG, on ? 0xFB : 0x7B);
+ if (!ret)
+ info->auto_wb = on;
+
+ return ret;
+}
+
+static int sr030pc30_set_flip(struct v4l2_subdev *sd)
+{
+ struct sr030pc30_info *info = to_sr030pc30(sd);
+
+ s32 reg = cam_i2c_read(sd, VDO_CTL2_REG);
+ if (reg < 0)
+ return reg;
+
+ reg &= 0x7C;
+ if (info->hflip)
+ reg |= 0x01;
+ if (info->vflip)
+ reg |= 0x02;
+ return cam_i2c_write(sd, VDO_CTL2_REG, reg | 0x80);
+}
+
+/* Configure resolution, color format and image flip */
+static int sr030pc30_set_params(struct v4l2_subdev *sd)
+{
+ struct sr030pc30_info *info = to_sr030pc30(sd);
+ int ret;
+
+ if (!info->curr_win)
+ return -EINVAL;
+
+ /* Configure the resolution through subsampling */
+ ret = cam_i2c_write(sd, VDO_CTL1_REG,
+ info->curr_win->vid_ctl1);
+
+ if (!ret && info->curr_fmt)
+ ret = cam_i2c_write(sd, ISP_CTL_REG(0),
+ info->curr_fmt->ispctl1_reg);
+ if (!ret)
+ ret = sr030pc30_set_flip(sd);
+
+ return ret;
+}
+
+/* Find nearest matching image pixel size. */
+static int sr030pc30_try_frame_size(struct v4l2_mbus_framefmt *mf)
+{
+ unsigned int min_err = ~0;
+ int i = ARRAY_SIZE(sr030pc30_sizes);
+ const struct sr030pc30_frmsize *fsize = &sr030pc30_sizes[0],
+ *match = NULL;
+ while (i--) {
+ int err = abs(fsize->width - mf->width)
+ + abs(fsize->height - mf->height);
+ if (err < min_err) {
+ min_err = err;
+ match = fsize;
+ }
+ fsize++;
+ }
+ if (match) {
+ mf->width = match->width;
+ mf->height = match->height;
+ return 0;
+ }
+ return -EINVAL;
+}
+
+static int sr030pc30_queryctrl(struct v4l2_subdev *sd,
+ struct v4l2_queryctrl *qc)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(sr030pc30_ctrl); i++)
+ if (qc->id == sr030pc30_ctrl[i].id) {
+ *qc = sr030pc30_ctrl[i];
+ v4l2_dbg(1, debug, sd, "%s id: %d\n",
+ __func__, qc->id);
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static inline int sr030pc30_set_bluebalance(struct v4l2_subdev *sd, int value)
+{
+ int ret = cam_i2c_write(sd, MWB_BGAIN_REG, value);
+ if (!ret)
+ to_sr030pc30(sd)->blue_balance = value;
+ return ret;
+}
+
+static inline int sr030pc30_set_redbalance(struct v4l2_subdev *sd, int value)
+{
+ int ret = cam_i2c_write(sd, MWB_RGAIN_REG, value);
+ if (!ret)
+ to_sr030pc30(sd)->red_balance = value;
+ return ret;
+}
+
+static int sr030pc30_s_ctrl(struct v4l2_subdev *sd,
+ struct v4l2_control *ctrl)
+{
+ int i, ret = 0;
+
+ for (i = 0; i < ARRAY_SIZE(sr030pc30_ctrl); i++)
+ if (ctrl->id == sr030pc30_ctrl[i].id)
+ break;
+
+ if (i == ARRAY_SIZE(sr030pc30_ctrl))
+ return -EINVAL;
+
+ if (ctrl->value < sr030pc30_ctrl[i].minimum ||
+ ctrl->value > sr030pc30_ctrl[i].maximum)
+ return -ERANGE;
+
+ v4l2_dbg(1, debug, sd, "%s: ctrl_id: %d, value: %d\n",
+ __func__, ctrl->id, ctrl->value);
+
+ switch (ctrl->id) {
+ case V4L2_CID_AUTO_WHITE_BALANCE:
+ sr030pc30_enable_autowhitebalance(sd, ctrl->value);
+ break;
+ case V4L2_CID_BLUE_BALANCE:
+ ret = sr030pc30_set_bluebalance(sd, ctrl->value);
+ break;
+ case V4L2_CID_RED_BALANCE:
+ ret = sr030pc30_set_redbalance(sd, ctrl->value);
+ break;
+ case V4L2_CID_EXPOSURE_AUTO:
+ sr030pc30_enable_autoexposure(sd,
+ ctrl->value == V4L2_EXPOSURE_AUTO);
+ break;
+ case V4L2_CID_EXPOSURE:
+ ret = sr030pc30_set_exposure(sd, ctrl->value);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return ret;
+}
+
+static int sr030pc30_g_ctrl(struct v4l2_subdev *sd,
+ struct v4l2_control *ctrl)
+{
+ struct sr030pc30_info *info = to_sr030pc30(sd);
+
+ v4l2_dbg(1, debug, sd, "%s: id: %d\n", __func__, ctrl->id);
+
+ switch (ctrl->id) {
+ case V4L2_CID_AUTO_WHITE_BALANCE:
+ ctrl->value = info->auto_wb;
+ break;
+ case V4L2_CID_BLUE_BALANCE:
+ ctrl->value = info->blue_balance;
+ break;
+ case V4L2_CID_RED_BALANCE:
+ ctrl->value = info->red_balance;
+ break;
+ case V4L2_CID_EXPOSURE_AUTO:
+ ctrl->value = info->auto_exp;
+ break;
+ case V4L2_CID_EXPOSURE:
+ ctrl->value = info->exposure;
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int sr030pc30_enum_fmt(struct v4l2_subdev *sd, unsigned int index,
+ enum v4l2_mbus_pixelcode *code)
+{
+ if (!code || index >= ARRAY_SIZE(sr030pc30_formats))
+ return -EINVAL;
+
+ *code = sr030pc30_formats[index].code;
+ return 0;
+}
+
+static int sr030pc30_g_fmt(struct v4l2_subdev *sd,
+ struct v4l2_mbus_framefmt *mf)
+{
+ struct sr030pc30_info *info = to_sr030pc30(sd);
+ int ret;
+
+ if (!mf)
+ return -EINVAL;
+
+ if (!info->curr_win || !info->curr_fmt) {
+ ret = sr030pc30_set_params(sd);
+ if (ret)
+ return ret;
+ }
+
+ mf->width = info->curr_win->width;
+ mf->height = info->curr_win->height;
+ mf->code = info->curr_fmt->code;
+ mf->colorspace = info->curr_fmt->colorspace;
+ mf->field = V4L2_FIELD_NONE;
+
+ return 0;
+}
+
+/* Return nearest media bus frame format. */
+static const struct sr030pc30_format *try_fmt(struct v4l2_subdev *sd,
+ struct v4l2_mbus_framefmt *mf)
+{
+ int i = ARRAY_SIZE(sr030pc30_formats);
+
+ sr030pc30_try_frame_size(mf);
+
+ while (i--)
+ if (mf->code == sr030pc30_formats[i].code)
+ break;
+
+ mf->code = sr030pc30_formats[i].code;
+
+ return &sr030pc30_formats[i];
+}
+
+/* Return nearest media bus frame format. */
+static int sr030pc30_try_fmt(struct v4l2_subdev *sd,
+ struct v4l2_mbus_framefmt *mf)
+{
+ if (!sd || !mf)
+ return -EINVAL;
+
+ try_fmt(sd, mf);
+ return 0;
+}
+
+static int sr030pc30_s_fmt(struct v4l2_subdev *sd,
+ struct v4l2_mbus_framefmt *mf)
+{
+ struct sr030pc30_info *info = to_sr030pc30(sd);
+
+ if (!sd || !mf)
+ return -EINVAL;
+
+ info->curr_fmt = try_fmt(sd, mf);
+
+ return sr030pc30_set_params(sd);
+}
+
+static int sr030pc30_base_config(struct v4l2_subdev *sd)
+{
+ struct sr030pc30_info *info = to_sr030pc30(sd);
+ int ret;
+ unsigned long expmin, expmax;
+
+ ret = sr030pc30_bulk_write_reg(sd, sr030pc30_base_regs);
+ if (!ret) {
+ info->curr_fmt = &sr030pc30_formats[0];
+ info->curr_win = &sr030pc30_sizes[0];
+ ret = sr030pc30_set_params(sd);
+ }
+ if (!ret)
+ ret = sr030pc30_pwr_ctrl(sd, false, false);
+
+ if (!ret && !info->pdata)
+ return ret;
+
+ expmin = EXPOS_MIN_MS * info->pdata->clk_rate / (8 * 1000);
+ expmax = EXPOS_MAX_MS * info->pdata->clk_rate / (8 * 1000);
+
+ v4l2_dbg(1, debug, sd, "%s: expmin= %lx, expmax= %lx", __func__,
+ expmin, expmax);
+
+ /* Setting up manual exposure time range */
+ ret = cam_i2c_write(sd, EXP_MMINH_REG, expmin >> 8 & 0xFF);
+ if (!ret)
+ ret = cam_i2c_write(sd, EXP_MMINL_REG, expmin & 0xFF);
+ if (!ret)
+ ret = cam_i2c_write(sd, EXP_MMAXH_REG, expmax >> 16 & 0xFF);
+ if (!ret)
+ ret = cam_i2c_write(sd, EXP_MMAXM_REG, expmax >> 8 & 0xFF);
+ if (!ret)
+ ret = cam_i2c_write(sd, EXP_MMAXL_REG, expmax & 0xFF);
+
+ return ret;
+}
+
+static int sr030pc30_s_config(struct v4l2_subdev *sd,
+ int irq, void *platform_data)
+{
+ struct sr030pc30_info *info = to_sr030pc30(sd);
+
+ info->pdata = platform_data;
+ return 0;
+}
+
+static int sr030pc30_s_stream(struct v4l2_subdev *sd, int enable)
+{
+ return 0;
+}
+
+static int sr030pc30_s_power(struct v4l2_subdev *sd, int on)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ struct sr030pc30_info *info = to_sr030pc30(sd);
+ const struct sr030pc30_platform_data *pdata = info->pdata;
+ int ret;
+
+ if (WARN(pdata == NULL, "No platform data!"))
+ return -ENOMEM;
+
+ /*
+ * Put sensor into power sleep mode before switching off
+ * power and disabling MCLK.
+ */
+ if (!on)
+ sr030pc30_pwr_ctrl(sd, false, true);
+
+ /* set_power controls sensor's power and clock */
+ if (pdata->set_power) {
+ ret = pdata->set_power(&client->dev, on);
+ if (ret)
+ return ret;
+ }
+
+ if (on) {
+ ret = sr030pc30_base_config(sd);
+ } else {
+ info->curr_win = NULL;
+ info->curr_fmt = NULL;
+ }
+
+ return ret;
+}
+
+static const struct v4l2_subdev_core_ops sr030pc30_core_ops = {
+ .s_config = sr030pc30_s_config,
+ .s_power = sr030pc30_s_power,
+ .queryctrl = sr030pc30_queryctrl,
+ .s_ctrl = sr030pc30_s_ctrl,
+ .g_ctrl = sr030pc30_g_ctrl,
+};
+
+static const struct v4l2_subdev_video_ops sr030pc30_video_ops = {
+ .s_stream = sr030pc30_s_stream,
+ .g_mbus_fmt = sr030pc30_g_fmt,
+ .s_mbus_fmt = sr030pc30_s_fmt,
+ .try_mbus_fmt = sr030pc30_try_fmt,
+ .enum_mbus_fmt = sr030pc30_enum_fmt,
+};
+
+static const struct v4l2_subdev_ops sr030pc30_ops = {
+ .core = &sr030pc30_core_ops,
+ .video = &sr030pc30_video_ops,
+};
+
+/*
+ * Detect sensor type. Return 0 if SR030PC30 was detected
+ * or -ENODEV otherwise.
+ */
+static int sr030pc30_detect(struct i2c_client *client)
+{
+ const struct sr030pc30_platform_data *pdata
+ = client->dev.platform_data;
+ int ret;
+
+ /* Enable sensor's power and clock */
+ if (pdata->set_power) {
+ ret = pdata->set_power(&client->dev, 1);
+ if (ret)
+ return ret;
+ }
+
+ ret = i2c_smbus_read_byte_data(client, DEVICE_ID_REG);
+
+ if (pdata->set_power)
+ pdata->set_power(&client->dev, 0);
+
+ if (ret < 0) {
+ dev_err(&client->dev, "%s: I2C read failed\n", __func__);
+ return ret;
+ }
+
+ return ret == SR030PC30_ID ? 0 : -ENODEV;
+}
+
+
+static int sr030pc30_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct sr030pc30_info *info;
+ struct v4l2_subdev *sd;
+ const struct sr030pc30_platform_data *pdata
+ = client->dev.platform_data;
+ int ret;
+
+ if (!pdata) {
+ dev_err(&client->dev, "No platform data!");
+ return -EIO;
+ }
+
+ ret = sr030pc30_detect(client);
+ if (ret)
+ return ret;
+
+ info = kzalloc(sizeof(*info), GFP_KERNEL);
+ if (!info)
+ return -ENOMEM;
+
+ sd = &info->sd;
+ strcpy(sd->name, MODULE_NAME);
+ info->pdata = client->dev.platform_data;
+
+ v4l2_i2c_subdev_init(sd, client, &sr030pc30_ops);
+
+ info->i2c_reg_page = -1;
+ info->hflip = 1;
+ info->auto_exp = 1;
+ info->exposure = 30;
+
+ return 0;
+}
+
+static int sr030pc30_remove(struct i2c_client *client)
+{
+ struct v4l2_subdev *sd = i2c_get_clientdata(client);
+ struct sr030pc30_info *info = to_sr030pc30(sd);
+
+ v4l2_device_unregister_subdev(sd);
+ kfree(info);
+ return 0;
+}
+
+static const struct i2c_device_id sr030pc30_id[] = {
+ { MODULE_NAME, 0 },
+ { },
+};
+MODULE_DEVICE_TABLE(i2c, sr030pc30_id);
+
+
+static struct i2c_driver sr030pc30_i2c_driver = {
+ .driver = {
+ .name = MODULE_NAME
+ },
+ .probe = sr030pc30_probe,
+ .remove = sr030pc30_remove,
+ .id_table = sr030pc30_id,
+};
+
+static int __init sr030pc30_init(void)
+{
+ return i2c_add_driver(&sr030pc30_i2c_driver);
+}
+
+static void __exit sr030pc30_exit(void)
+{
+ i2c_del_driver(&sr030pc30_i2c_driver);
+}
+
+module_init(sr030pc30_init);
+module_exit(sr030pc30_exit);
+
+MODULE_DESCRIPTION("Siliconfile SR030PC30 camera driver");
+MODULE_AUTHOR("Sylwester Nawrocki <s.nawrocki@samsung.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/media/video/stk-webcam.c b/drivers/media/video/stk-webcam.c
index f07a0f6b71c4..b5afe5f841ce 100644
--- a/drivers/media/video/stk-webcam.c
+++ b/drivers/media/video/stk-webcam.c
@@ -27,7 +27,6 @@
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/slab.h>
-#include <linux/smp_lock.h>
#include <linux/usb.h>
#include <linux/mm.h>
@@ -673,14 +672,11 @@ static int v4l_stk_open(struct file *fp)
vdev = video_devdata(fp);
dev = vdev_to_camera(vdev);
- lock_kernel();
if (dev == NULL || !is_present(dev)) {
- unlock_kernel();
return -ENXIO;
}
fp->private_data = dev;
usb_autopm_get_interface(dev->interface);
- unlock_kernel();
return 0;
}
diff --git a/drivers/media/video/tda7432.c b/drivers/media/video/tda7432.c
index 80f1cee23fa5..3941f954daf4 100644
--- a/drivers/media/video/tda7432.c
+++ b/drivers/media/video/tda7432.c
@@ -36,7 +36,6 @@
#include <media/v4l2-device.h>
#include <media/v4l2-ioctl.h>
#include <media/i2c-addr.h>
-#include <media/v4l2-i2c-drv.h>
#ifndef VIDEO_AUDIO_BALANCE
# define VIDEO_AUDIO_BALANCE 32
@@ -472,9 +471,25 @@ static const struct i2c_device_id tda7432_id[] = {
};
MODULE_DEVICE_TABLE(i2c, tda7432_id);
-static struct v4l2_i2c_driver_data v4l2_i2c_data = {
- .name = "tda7432",
- .probe = tda7432_probe,
- .remove = tda7432_remove,
- .id_table = tda7432_id,
+static struct i2c_driver tda7432_driver = {
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = "tda7432",
+ },
+ .probe = tda7432_probe,
+ .remove = tda7432_remove,
+ .id_table = tda7432_id,
};
+
+static __init int init_tda7432(void)
+{
+ return i2c_add_driver(&tda7432_driver);
+}
+
+static __exit void exit_tda7432(void)
+{
+ i2c_del_driver(&tda7432_driver);
+}
+
+module_init(init_tda7432);
+module_exit(exit_tda7432);
diff --git a/drivers/media/video/tda9840.c b/drivers/media/video/tda9840.c
index 92d22d8931c1..5d4cf3b3d435 100644
--- a/drivers/media/video/tda9840.c
+++ b/drivers/media/video/tda9840.c
@@ -32,7 +32,6 @@
#include <linux/i2c.h>
#include <media/v4l2-device.h>
#include <media/v4l2-chip-ident.h>
-#include <media/v4l2-i2c-drv.h>
MODULE_AUTHOR("Michael Hunold <michael@mihu.de>");
MODULE_DESCRIPTION("tda9840 driver");
@@ -199,9 +198,25 @@ static const struct i2c_device_id tda9840_id[] = {
};
MODULE_DEVICE_TABLE(i2c, tda9840_id);
-static struct v4l2_i2c_driver_data v4l2_i2c_data = {
- .name = "tda9840",
- .probe = tda9840_probe,
- .remove = tda9840_remove,
- .id_table = tda9840_id,
+static struct i2c_driver tda9840_driver = {
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = "tda9840",
+ },
+ .probe = tda9840_probe,
+ .remove = tda9840_remove,
+ .id_table = tda9840_id,
};
+
+static __init int init_tda9840(void)
+{
+ return i2c_add_driver(&tda9840_driver);
+}
+
+static __exit void exit_tda9840(void)
+{
+ i2c_del_driver(&tda9840_driver);
+}
+
+module_init(init_tda9840);
+module_exit(exit_tda9840);
diff --git a/drivers/media/video/tda9875.c b/drivers/media/video/tda9875.c
index 24e2b7d2ae58..35b6ff5db319 100644
--- a/drivers/media/video/tda9875.c
+++ b/drivers/media/video/tda9875.c
@@ -28,7 +28,6 @@
#include <linux/i2c.h>
#include <linux/videodev2.h>
#include <media/v4l2-device.h>
-#include <media/v4l2-i2c-drv.h>
#include <media/i2c-addr.h>
static int debug; /* insmod parameter */
@@ -388,9 +387,25 @@ static const struct i2c_device_id tda9875_id[] = {
};
MODULE_DEVICE_TABLE(i2c, tda9875_id);
-static struct v4l2_i2c_driver_data v4l2_i2c_data = {
- .name = "tda9875",
- .probe = tda9875_probe,
- .remove = tda9875_remove,
- .id_table = tda9875_id,
+static struct i2c_driver tda9875_driver = {
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = "tda9875",
+ },
+ .probe = tda9875_probe,
+ .remove = tda9875_remove,
+ .id_table = tda9875_id,
};
+
+static __init int init_tda9875(void)
+{
+ return i2c_add_driver(&tda9875_driver);
+}
+
+static __exit void exit_tda9875(void)
+{
+ i2c_del_driver(&tda9875_driver);
+}
+
+module_init(init_tda9875);
+module_exit(exit_tda9875);
diff --git a/drivers/media/video/tea6415c.c b/drivers/media/video/tea6415c.c
index 3021a1e6b7bb..3e99cea8e4dc 100644
--- a/drivers/media/video/tea6415c.c
+++ b/drivers/media/video/tea6415c.c
@@ -34,7 +34,6 @@
#include <linux/i2c.h>
#include <media/v4l2-device.h>
#include <media/v4l2-chip-ident.h>
-#include <media/v4l2-i2c-drv.h>
#include "tea6415c.h"
MODULE_AUTHOR("Michael Hunold <michael@mihu.de>");
@@ -175,9 +174,25 @@ static const struct i2c_device_id tea6415c_id[] = {
};
MODULE_DEVICE_TABLE(i2c, tea6415c_id);
-static struct v4l2_i2c_driver_data v4l2_i2c_data = {
- .name = "tea6415c",
- .probe = tea6415c_probe,
- .remove = tea6415c_remove,
- .id_table = tea6415c_id,
+static struct i2c_driver tea6415c_driver = {
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = "tea6415c",
+ },
+ .probe = tea6415c_probe,
+ .remove = tea6415c_remove,
+ .id_table = tea6415c_id,
};
+
+static __init int init_tea6415c(void)
+{
+ return i2c_add_driver(&tea6415c_driver);
+}
+
+static __exit void exit_tea6415c(void)
+{
+ i2c_del_driver(&tea6415c_driver);
+}
+
+module_init(init_tea6415c);
+module_exit(exit_tea6415c);
diff --git a/drivers/media/video/tea6420.c b/drivers/media/video/tea6420.c
index 49dafc5e1e2f..5ea840401f21 100644
--- a/drivers/media/video/tea6420.c
+++ b/drivers/media/video/tea6420.c
@@ -34,7 +34,6 @@
#include <linux/i2c.h>
#include <media/v4l2-device.h>
#include <media/v4l2-chip-ident.h>
-#include <media/v4l2-i2c-drv.h>
#include "tea6420.h"
MODULE_AUTHOR("Michael Hunold <michael@mihu.de>");
@@ -157,9 +156,25 @@ static const struct i2c_device_id tea6420_id[] = {
};
MODULE_DEVICE_TABLE(i2c, tea6420_id);
-static struct v4l2_i2c_driver_data v4l2_i2c_data = {
- .name = "tea6420",
- .probe = tea6420_probe,
- .remove = tea6420_remove,
- .id_table = tea6420_id,
+static struct i2c_driver tea6420_driver = {
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = "tea6420",
+ },
+ .probe = tea6420_probe,
+ .remove = tea6420_remove,
+ .id_table = tea6420_id,
};
+
+static __init int init_tea6420(void)
+{
+ return i2c_add_driver(&tea6420_driver);
+}
+
+static __exit void exit_tea6420(void)
+{
+ i2c_del_driver(&tea6420_driver);
+}
+
+module_init(init_tea6420);
+module_exit(exit_tea6420);
diff --git a/drivers/media/video/tlg2300/pd-main.c b/drivers/media/video/tlg2300/pd-main.c
index 4555f4a5f4c8..c91424c0c135 100644
--- a/drivers/media/video/tlg2300/pd-main.c
+++ b/drivers/media/video/tlg2300/pd-main.c
@@ -36,7 +36,6 @@
#include <linux/string.h>
#include <linux/types.h>
#include <linux/firmware.h>
-#include <linux/smp_lock.h>
#include "vendorcmds.h"
#include "pd-common.h"
@@ -485,15 +484,11 @@ static void poseidon_disconnect(struct usb_interface *interface)
/*unregister v4l2 device */
v4l2_device_unregister(&pd->v4l2_dev);
- lock_kernel();
- {
- pd_dvb_usb_device_exit(pd);
- poseidon_fm_exit(pd);
+ pd_dvb_usb_device_exit(pd);
+ poseidon_fm_exit(pd);
- poseidon_audio_free(pd);
- pd_video_exit(pd);
- }
- unlock_kernel();
+ poseidon_audio_free(pd);
+ pd_video_exit(pd);
usb_set_intfdata(interface, NULL);
kref_put(&pd->kref, poseidon_delete);
diff --git a/drivers/media/video/tlg2300/pd-video.c b/drivers/media/video/tlg2300/pd-video.c
index d0cc012f7ae6..a1ffe18640fe 100644
--- a/drivers/media/video/tlg2300/pd-video.c
+++ b/drivers/media/video/tlg2300/pd-video.c
@@ -1434,7 +1434,7 @@ static int pd_video_open(struct file *file)
V4L2_BUF_TYPE_VIDEO_CAPTURE,
V4L2_FIELD_INTERLACED,/* video is interlacd */
sizeof(struct videobuf_buffer),/*it's enough*/
- front);
+ front, NULL);
} else if (vfd->vfl_type == VFL_TYPE_VBI
&& !(pd->state & POSEIDON_STATE_VBI)) {
front = kzalloc(sizeof(struct front_face), GFP_KERNEL);
@@ -1451,7 +1451,7 @@ static int pd_video_open(struct file *file)
V4L2_BUF_TYPE_VBI_CAPTURE,
V4L2_FIELD_NONE, /* vbi is NONE mode */
sizeof(struct videobuf_buffer),
- front);
+ front, NULL);
} else {
/* maybe add FM support here */
log("other ");
diff --git a/drivers/media/video/tlv320aic23b.c b/drivers/media/video/tlv320aic23b.c
index 9ddb32bc7af0..dfc4dd7c5097 100644
--- a/drivers/media/video/tlv320aic23b.c
+++ b/drivers/media/video/tlv320aic23b.c
@@ -29,10 +29,8 @@
#include <linux/ioctl.h>
#include <asm/uaccess.h>
#include <linux/i2c.h>
-#include <linux/i2c-id.h>
#include <linux/videodev2.h>
#include <media/v4l2-device.h>
-#include <media/v4l2-i2c-drv.h>
MODULE_DESCRIPTION("tlv320aic23b driver");
MODULE_AUTHOR("Scott Alfter, Ulf Eklund, Hans Verkuil");
@@ -199,9 +197,25 @@ static const struct i2c_device_id tlv320aic23b_id[] = {
};
MODULE_DEVICE_TABLE(i2c, tlv320aic23b_id);
-static struct v4l2_i2c_driver_data v4l2_i2c_data = {
- .name = "tlv320aic23b",
- .probe = tlv320aic23b_probe,
- .remove = tlv320aic23b_remove,
- .id_table = tlv320aic23b_id,
+static struct i2c_driver tlv320aic23b_driver = {
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = "tlv320aic23b",
+ },
+ .probe = tlv320aic23b_probe,
+ .remove = tlv320aic23b_remove,
+ .id_table = tlv320aic23b_id,
};
+
+static __init int init_tlv320aic23b(void)
+{
+ return i2c_add_driver(&tlv320aic23b_driver);
+}
+
+static __exit void exit_tlv320aic23b(void)
+{
+ i2c_del_driver(&tlv320aic23b_driver);
+}
+
+module_init(init_tlv320aic23b);
+module_exit(exit_tlv320aic23b);
diff --git a/drivers/media/video/tuner-core.c b/drivers/media/video/tuner-core.c
index c4dab6cfd948..1cec1224913f 100644
--- a/drivers/media/video/tuner-core.c
+++ b/drivers/media/video/tuner-core.c
@@ -20,7 +20,6 @@
#include <media/tuner-types.h>
#include <media/v4l2-device.h>
#include <media/v4l2-ioctl.h>
-#include <media/v4l2-i2c-drv.h>
#include "mt20xx.h"
#include "tda8290.h"
#include "tea5761.h"
@@ -428,6 +427,7 @@ static void set_type(struct i2c_client *c, unsigned int type,
{
struct tda18271_config cfg = {
.config = t->config,
+ .small_i2c = TDA18271_03_BYTE_CHUNK_INIT,
};
if (!dvb_attach(tda18271_attach, &t->fe, t->i2c->addr,
@@ -1053,12 +1053,6 @@ static int tuner_probe(struct i2c_client *client,
printk(KERN_CONT "%02x ", buffer[i]);
printk("\n");
}
- /* HACK: This test was added to avoid tuner to probe tda9840 and
- tea6415c on the MXB card */
- if (client->adapter->id == I2C_HW_SAA7146 && client->addr < 0x4a) {
- kfree(t);
- return -ENODEV;
- }
/* autodetection code based on the i2c addr */
if (!no_autodetect) {
@@ -1176,16 +1170,32 @@ static const struct i2c_device_id tuner_id[] = {
};
MODULE_DEVICE_TABLE(i2c, tuner_id);
-static struct v4l2_i2c_driver_data v4l2_i2c_data = {
- .name = "tuner",
- .probe = tuner_probe,
- .remove = tuner_remove,
- .command = tuner_command,
- .suspend = tuner_suspend,
- .resume = tuner_resume,
- .id_table = tuner_id,
+static struct i2c_driver tuner_driver = {
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = "tuner",
+ },
+ .probe = tuner_probe,
+ .remove = tuner_remove,
+ .command = tuner_command,
+ .suspend = tuner_suspend,
+ .resume = tuner_resume,
+ .id_table = tuner_id,
};
+static __init int init_tuner(void)
+{
+ return i2c_add_driver(&tuner_driver);
+}
+
+static __exit void exit_tuner(void)
+{
+ i2c_del_driver(&tuner_driver);
+}
+
+module_init(init_tuner);
+module_exit(exit_tuner);
+
/*
* Overrides for Emacs so that we follow Linus's tabbing style.
* ---------------------------------------------------------------------------
diff --git a/drivers/media/video/tvaudio.c b/drivers/media/video/tvaudio.c
index 800fc1b111ef..a25e2b5e1944 100644
--- a/drivers/media/video/tvaudio.c
+++ b/drivers/media/video/tvaudio.c
@@ -35,7 +35,6 @@
#include <media/tvaudio.h>
#include <media/v4l2-device.h>
#include <media/v4l2-chip-ident.h>
-#include <media/v4l2-i2c-drv.h>
#include <media/i2c-addr.h>
@@ -1227,18 +1226,6 @@ static int tea6320_initialize(struct CHIPSTATE * chip)
static int tda8425_shift10(int val) { return (val >> 10) | 0xc0; }
static int tda8425_shift12(int val) { return (val >> 12) | 0xf0; }
-static int tda8425_initialize(struct CHIPSTATE *chip)
-{
- struct CHIPDESC *desc = chip->desc;
- struct i2c_client *c = v4l2_get_subdevdata(&chip->sd);
- int inputmap[4] = { /* tuner */ TDA8425_S1_CH2, /* radio */ TDA8425_S1_CH1,
- /* extern */ TDA8425_S1_CH1, /* intern */ TDA8425_S1_OFF};
-
- if (c->adapter->id == I2C_HW_B_RIVA)
- memcpy(desc->inputmap, inputmap, sizeof(inputmap));
- return 0;
-}
-
static void tda8425_setmode(struct CHIPSTATE *chip, int mode)
{
int s1 = chip->shadow.bytes[TDA8425_S1+1] & 0xe1;
@@ -1574,7 +1561,6 @@ static struct CHIPDESC chiplist[] = {
.treblereg = TDA8425_TR,
/* callbacks */
- .initialize = tda8425_initialize,
.volfunc = tda8425_shift10,
.bassfunc = tda8425_shift12,
.treblefunc = tda8425_shift12,
@@ -2079,9 +2065,25 @@ static const struct i2c_device_id tvaudio_id[] = {
};
MODULE_DEVICE_TABLE(i2c, tvaudio_id);
-static struct v4l2_i2c_driver_data v4l2_i2c_data = {
- .name = "tvaudio",
- .probe = tvaudio_probe,
- .remove = tvaudio_remove,
- .id_table = tvaudio_id,
+static struct i2c_driver tvaudio_driver = {
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = "tvaudio",
+ },
+ .probe = tvaudio_probe,
+ .remove = tvaudio_remove,
+ .id_table = tvaudio_id,
};
+
+static __init int init_tvaudio(void)
+{
+ return i2c_add_driver(&tvaudio_driver);
+}
+
+static __exit void exit_tvaudio(void)
+{
+ i2c_del_driver(&tvaudio_driver);
+}
+
+module_init(init_tvaudio);
+module_exit(exit_tvaudio);
diff --git a/drivers/media/video/tvp514x.c b/drivers/media/video/tvp514x.c
index 71c73fa0d68c..45bcf0358a1d 100644
--- a/drivers/media/video/tvp514x.c
+++ b/drivers/media/video/tvp514x.c
@@ -35,6 +35,7 @@
#include <media/v4l2-device.h>
#include <media/v4l2-common.h>
+#include <media/v4l2-mediabus.h>
#include <media/v4l2-chip-ident.h>
#include <media/tvp514x.h>
@@ -929,69 +930,51 @@ tvp514x_s_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl)
}
/**
- * tvp514x_enum_fmt_cap() - V4L2 decoder interface handler for enum_fmt
+ * tvp514x_enum_mbus_fmt() - V4L2 decoder interface handler for enum_mbus_fmt
* @sd: pointer to standard V4L2 sub-device structure
- * @fmt: standard V4L2 VIDIOC_ENUM_FMT ioctl structure
+ * @index: index of pixelcode to retrieve
+ * @code: receives the pixelcode
*
- * Implement the VIDIOC_ENUM_FMT ioctl to enumerate supported formats
+ * Enumerates supported mediabus formats
*/
static int
-tvp514x_enum_fmt_cap(struct v4l2_subdev *sd, struct v4l2_fmtdesc *fmt)
+tvp514x_enum_mbus_fmt(struct v4l2_subdev *sd, unsigned index,
+ enum v4l2_mbus_pixelcode *code)
{
- if (fmt == NULL || fmt->index)
+ if (index)
return -EINVAL;
- if (fmt->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
- /* only capture is supported */
- return -EINVAL;
-
- /* only one format */
- fmt->flags = 0;
- strlcpy(fmt->description, "8-bit UYVY 4:2:2 Format",
- sizeof(fmt->description));
- fmt->pixelformat = V4L2_PIX_FMT_UYVY;
+ *code = V4L2_MBUS_FMT_YUYV10_2X10;
return 0;
}
/**
- * tvp514x_fmt_cap() - V4L2 decoder interface handler for try/s/g_fmt
+ * tvp514x_mbus_fmt_cap() - V4L2 decoder interface handler for try/s/g_mbus_fmt
* @sd: pointer to standard V4L2 sub-device structure
- * @f: pointer to standard V4L2 VIDIOC_TRY_FMT ioctl structure
+ * @f: pointer to the mediabus format structure
*
- * Implement the VIDIOC_TRY/S/G_FMT ioctl for the CAPTURE buffer type. This
- * ioctl is used to negotiate the image capture size and pixel format.
+ * Negotiates the image capture size and mediabus format.
*/
static int
-tvp514x_fmt_cap(struct v4l2_subdev *sd, struct v4l2_format *f)
+tvp514x_mbus_fmt(struct v4l2_subdev *sd, struct v4l2_mbus_framefmt *f)
{
struct tvp514x_decoder *decoder = to_decoder(sd);
- struct v4l2_pix_format *pix;
enum tvp514x_std current_std;
if (f == NULL)
return -EINVAL;
- if (f->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
- return -EINVAL;
-
- pix = &f->fmt.pix;
-
/* Calculate height and width based on current standard */
current_std = decoder->current_std;
- pix->pixelformat = V4L2_PIX_FMT_UYVY;
- pix->width = decoder->std_list[current_std].width;
- pix->height = decoder->std_list[current_std].height;
- pix->field = V4L2_FIELD_INTERLACED;
- pix->bytesperline = pix->width * 2;
- pix->sizeimage = pix->bytesperline * pix->height;
- pix->colorspace = V4L2_COLORSPACE_SMPTE170M;
- pix->priv = 0;
-
- v4l2_dbg(1, debug, sd, "FMT: bytesperline - %d"
- "Width - %d, Height - %d\n",
- pix->bytesperline,
- pix->width, pix->height);
+ f->code = V4L2_MBUS_FMT_YUYV10_2X10;
+ f->width = decoder->std_list[current_std].width;
+ f->height = decoder->std_list[current_std].height;
+ f->field = V4L2_FIELD_INTERLACED;
+ f->colorspace = V4L2_COLORSPACE_SMPTE170M;
+
+ v4l2_dbg(1, debug, sd, "MBUS_FMT: Width - %d, Height - %d\n",
+ f->width, f->height);
return 0;
}
@@ -1131,10 +1114,10 @@ static const struct v4l2_subdev_core_ops tvp514x_core_ops = {
static const struct v4l2_subdev_video_ops tvp514x_video_ops = {
.s_routing = tvp514x_s_routing,
.querystd = tvp514x_querystd,
- .enum_fmt = tvp514x_enum_fmt_cap,
- .g_fmt = tvp514x_fmt_cap,
- .try_fmt = tvp514x_fmt_cap,
- .s_fmt = tvp514x_fmt_cap,
+ .enum_mbus_fmt = tvp514x_enum_mbus_fmt,
+ .g_mbus_fmt = tvp514x_mbus_fmt,
+ .try_mbus_fmt = tvp514x_mbus_fmt,
+ .s_mbus_fmt = tvp514x_mbus_fmt,
.g_parm = tvp514x_g_parm,
.s_parm = tvp514x_s_parm,
.s_stream = tvp514x_s_stream,
diff --git a/drivers/media/video/tvp5150.c b/drivers/media/video/tvp5150.c
index 1654f65cca7c..58927664d3ea 100644
--- a/drivers/media/video/tvp5150.c
+++ b/drivers/media/video/tvp5150.c
@@ -11,7 +11,6 @@
#include <linux/delay.h>
#include <media/v4l2-device.h>
#include <media/tvp5150.h>
-#include <media/v4l2-i2c-drv.h>
#include <media/v4l2-chip-ident.h>
#include "tvp5150_reg.h"
@@ -277,7 +276,7 @@ static int tvp5150_log_status(struct v4l2_subdev *sd)
static inline void tvp5150_selmux(struct v4l2_subdev *sd)
{
- int opmode=0;
+ int opmode = 0;
struct tvp5150 *decoder = to_tvp5150(sd);
int input = 0;
unsigned char val;
@@ -290,12 +289,10 @@ static inline void tvp5150_selmux(struct v4l2_subdev *sd)
input |= 2;
/* fall through */
case TVP5150_COMPOSITE0:
- opmode=0x30; /* TV Mode */
break;
case TVP5150_SVIDEO:
default:
input |= 1;
- opmode=0; /* Auto Mode */
break;
}
@@ -1111,9 +1108,25 @@ static const struct i2c_device_id tvp5150_id[] = {
};
MODULE_DEVICE_TABLE(i2c, tvp5150_id);
-static struct v4l2_i2c_driver_data v4l2_i2c_data = {
- .name = "tvp5150",
- .probe = tvp5150_probe,
- .remove = tvp5150_remove,
- .id_table = tvp5150_id,
+static struct i2c_driver tvp5150_driver = {
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = "tvp5150",
+ },
+ .probe = tvp5150_probe,
+ .remove = tvp5150_remove,
+ .id_table = tvp5150_id,
};
+
+static __init int init_tvp5150(void)
+{
+ return i2c_add_driver(&tvp5150_driver);
+}
+
+static __exit void exit_tvp5150(void)
+{
+ i2c_del_driver(&tvp5150_driver);
+}
+
+module_init(init_tvp5150);
+module_exit(exit_tvp5150);
diff --git a/drivers/media/video/tvp7002.c b/drivers/media/video/tvp7002.c
index 48f5c76ab521..e63b40f5a706 100644
--- a/drivers/media/video/tvp7002.c
+++ b/drivers/media/video/tvp7002.c
@@ -330,19 +330,6 @@ static const struct i2c_reg_value tvp7002_parms_720P50[] = {
{ TVP7002_EOR, 0xff, TVP7002_RESERVED }
};
-/* Struct list for available formats */
-static const struct v4l2_fmtdesc tvp7002_fmt_list[] = {
- {
- .index = 0,
- .type = V4L2_BUF_TYPE_VIDEO_CAPTURE,
- .flags = 0,
- .description = "8-bit UYVY 4:2:2 Format",
- .pixelformat = V4L2_PIX_FMT_UYVY,
- },
-};
-
-#define NUM_FORMATS ARRAY_SIZE(tvp7002_fmt_list)
-
/* Preset definition for handling device operation */
struct tvp7002_preset_definition {
u32 preset;
@@ -439,7 +426,6 @@ struct tvp7002 {
int ver;
int streaming;
- struct v4l2_pix_format pix;
const struct tvp7002_preset_definition *current_preset;
u8 gain;
};
@@ -695,81 +681,33 @@ static int tvp7002_queryctrl(struct v4l2_subdev *sd, struct v4l2_queryctrl *qc)
}
/*
- * tvp7002_try_fmt_cap() - V4L2 decoder interface handler for try_fmt
+ * tvp7002_mbus_fmt() - V4L2 decoder interface handler for try/s/g_mbus_fmt
* @sd: pointer to standard V4L2 sub-device structure
- * @f: pointer to standard V4L2 VIDIOC_TRY_FMT ioctl structure
+ * @f: pointer to mediabus format structure
*
- * Implement the VIDIOC_TRY_FMT ioctl for the CAPTURE buffer type. This
- * ioctl is used to negotiate the image capture size and pixel format
- * without actually making it take effect.
+ * Negotiate the image capture size and mediabus format.
+ * There is only one possible format, so this single function works for
+ * get, set and try.
*/
-static int tvp7002_try_fmt_cap(struct v4l2_subdev *sd, struct v4l2_format *f)
+static int tvp7002_mbus_fmt(struct v4l2_subdev *sd, struct v4l2_mbus_framefmt *f)
{
struct tvp7002 *device = to_tvp7002(sd);
struct v4l2_dv_enum_preset e_preset;
- struct v4l2_pix_format *pix;
- int error = 0;
-
- pix = &f->fmt.pix;
+ int error;
/* Calculate height and width based on current standard */
error = v4l_fill_dv_preset_info(device->current_preset->preset, &e_preset);
if (error)
- return -EINVAL;
-
- pix->width = e_preset.width;
- pix->height = e_preset.height;
- pix->pixelformat = V4L2_PIX_FMT_UYVY;
- pix->field = device->current_preset->scanmode;
- pix->bytesperline = pix->width * 2;
- pix->sizeimage = pix->bytesperline * pix->height;
- pix->colorspace = device->current_preset->color_space;
- pix->priv = 0;
-
- v4l2_dbg(1, debug, sd, "Try FMT: pixelformat - %s, bytesperline - %d"
- "Width - %d, Height - %d", "8-bit UYVY 4:2:2 Format",
- pix->bytesperline, pix->width, pix->height);
- return error;
-}
-
-/*
- * tvp7002_s_fmt() - V4L2 decoder interface handler for s_fmt
- * @sd: pointer to standard V4L2 sub-device structure
- * @f: pointer to standard V4L2 VIDIOC_S_FMT ioctl structure
- *
- * If the requested format is supported, configures the HW to use that
- * format, returns error code if format not supported or HW can't be
- * correctly configured.
- */
-static int tvp7002_s_fmt(struct v4l2_subdev *sd, struct v4l2_format *f)
-{
- struct tvp7002 *decoder = to_tvp7002(sd);
- int rval;
-
- rval = tvp7002_try_fmt_cap(sd, f);
- if (!rval)
- decoder->pix = f->fmt.pix;
- return rval;
-}
-
-/*
- * tvp7002_g_fmt() - V4L2 decoder interface handler for tvp7002_g_fmt
- * @sd: pointer to standard V4L2 sub-device structure
- * @f: pointer to standard V4L2 v4l2_format structure
- *
- * Returns the decoder's current pixel format in the v4l2_format
- * parameter.
- */
-static int tvp7002_g_fmt(struct v4l2_subdev *sd, struct v4l2_format *f)
-{
- struct tvp7002 *decoder = to_tvp7002(sd);
+ return error;
- f->fmt.pix = decoder->pix;
+ f->width = e_preset.width;
+ f->height = e_preset.height;
+ f->code = V4L2_MBUS_FMT_YUYV10_1X20;
+ f->field = device->current_preset->scanmode;
+ f->colorspace = device->current_preset->color_space;
- v4l2_dbg(1, debug, sd, "Current FMT: bytesperline - %d"
- "Width - %d, Height - %d",
- decoder->pix.bytesperline,
- decoder->pix.width, decoder->pix.height);
+ v4l2_dbg(1, debug, sd, "MBUS_FMT: Width - %d, Height - %d",
+ f->width, f->height);
return 0;
}
@@ -894,21 +832,21 @@ static int tvp7002_s_register(struct v4l2_subdev *sd,
#endif
/*
- * tvp7002_enum_fmt() - Enum supported formats
+ * tvp7002_enum_mbus_fmt() - Enum supported mediabus formats
* @sd: pointer to standard V4L2 sub-device structure
- * @fmtdesc: pointer to format struct
+ * @index: format index
+ * @code: pointer to mediabus format
*
- * Enumerate supported formats.
+ * Enumerate supported mediabus formats.
*/
-static int tvp7002_enum_fmt(struct v4l2_subdev *sd,
- struct v4l2_fmtdesc *fmtdesc)
+static int tvp7002_enum_mbus_fmt(struct v4l2_subdev *sd, unsigned index,
+ enum v4l2_mbus_pixelcode *code)
{
/* Check requested format index is within range */
- if (fmtdesc->index < 0 || fmtdesc->index >= NUM_FORMATS)
+ if (index)
return -EINVAL;
- *fmtdesc = tvp7002_fmt_list[fmtdesc->index];
-
+ *code = V4L2_MBUS_FMT_YUYV10_1X20;
return 0;
}
@@ -1027,9 +965,10 @@ static const struct v4l2_subdev_video_ops tvp7002_video_ops = {
.s_dv_preset = tvp7002_s_dv_preset,
.query_dv_preset = tvp7002_query_dv_preset,
.s_stream = tvp7002_s_stream,
- .g_fmt = tvp7002_g_fmt,
- .s_fmt = tvp7002_s_fmt,
- .enum_fmt = tvp7002_enum_fmt,
+ .g_mbus_fmt = tvp7002_mbus_fmt,
+ .try_mbus_fmt = tvp7002_mbus_fmt,
+ .s_mbus_fmt = tvp7002_mbus_fmt,
+ .enum_mbus_fmt = tvp7002_enum_mbus_fmt,
};
/* V4L2 top level operation handlers */
@@ -1040,17 +979,6 @@ static const struct v4l2_subdev_ops tvp7002_ops = {
static struct tvp7002 tvp7002_dev = {
.streaming = 0,
-
- .pix = {
- .width = 1280,
- .height = 720,
- .pixelformat = V4L2_PIX_FMT_UYVY,
- .field = V4L2_FIELD_NONE,
- .bytesperline = 1280 * 2,
- .sizeimage = 1280 * 2 * 720,
- .colorspace = V4L2_COLORSPACE_REC709,
- },
-
.current_preset = tvp7002_presets,
.gain = 0,
};
diff --git a/drivers/media/video/tw9910.c b/drivers/media/video/tw9910.c
index a727962781a3..0347bbe36459 100644
--- a/drivers/media/video/tw9910.c
+++ b/drivers/media/video/tw9910.c
@@ -469,7 +469,7 @@ tw9910_select_norm(struct soc_camera_device *icd, u32 width, u32 height)
*/
static int tw9910_s_stream(struct v4l2_subdev *sd, int enable)
{
- struct i2c_client *client = sd->priv;
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
struct tw9910_priv *priv = to_tw9910(client);
u8 val;
int ret;
@@ -511,7 +511,7 @@ static int tw9910_set_bus_param(struct soc_camera_device *icd,
unsigned long flags)
{
struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
- struct i2c_client *client = sd->priv;
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
u8 val = VSSL_VVALID | HSSL_DVALID;
/*
@@ -565,7 +565,7 @@ static int tw9910_enum_input(struct soc_camera_device *icd,
static int tw9910_g_chip_ident(struct v4l2_subdev *sd,
struct v4l2_dbg_chip_ident *id)
{
- struct i2c_client *client = sd->priv;
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
struct tw9910_priv *priv = to_tw9910(client);
id->ident = V4L2_IDENT_TW9910;
@@ -578,7 +578,7 @@ static int tw9910_g_chip_ident(struct v4l2_subdev *sd,
static int tw9910_g_register(struct v4l2_subdev *sd,
struct v4l2_dbg_register *reg)
{
- struct i2c_client *client = sd->priv;
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
int ret;
if (reg->reg > 0xff)
@@ -600,7 +600,7 @@ static int tw9910_g_register(struct v4l2_subdev *sd,
static int tw9910_s_register(struct v4l2_subdev *sd,
struct v4l2_dbg_register *reg)
{
- struct i2c_client *client = sd->priv;
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
if (reg->reg > 0xff ||
reg->val > 0xff)
@@ -613,7 +613,7 @@ static int tw9910_s_register(struct v4l2_subdev *sd,
static int tw9910_s_crop(struct v4l2_subdev *sd, struct v4l2_crop *a)
{
struct v4l2_rect *rect = &a->c;
- struct i2c_client *client = sd->priv;
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
struct tw9910_priv *priv = to_tw9910(client);
struct soc_camera_device *icd = client->dev.platform_data;
int ret = -EINVAL;
@@ -701,7 +701,7 @@ tw9910_set_fmt_error:
static int tw9910_g_crop(struct v4l2_subdev *sd, struct v4l2_crop *a)
{
- struct i2c_client *client = sd->priv;
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
struct tw9910_priv *priv = to_tw9910(client);
if (!priv->scale) {
@@ -748,7 +748,7 @@ static int tw9910_cropcap(struct v4l2_subdev *sd, struct v4l2_cropcap *a)
static int tw9910_g_fmt(struct v4l2_subdev *sd,
struct v4l2_mbus_framefmt *mf)
{
- struct i2c_client *client = sd->priv;
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
struct tw9910_priv *priv = to_tw9910(client);
if (!priv->scale) {
@@ -778,7 +778,7 @@ static int tw9910_g_fmt(struct v4l2_subdev *sd,
static int tw9910_s_fmt(struct v4l2_subdev *sd,
struct v4l2_mbus_framefmt *mf)
{
- struct i2c_client *client = sd->priv;
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
struct tw9910_priv *priv = to_tw9910(client);
/* See tw9910_s_crop() - no proper cropping support */
struct v4l2_crop a = {
@@ -813,7 +813,7 @@ static int tw9910_s_fmt(struct v4l2_subdev *sd,
static int tw9910_try_fmt(struct v4l2_subdev *sd,
struct v4l2_mbus_framefmt *mf)
{
- struct i2c_client *client = sd->priv;
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
struct soc_camera_device *icd = client->dev.platform_data;
const struct tw9910_scale_ctrl *scale;
diff --git a/drivers/media/video/upd64031a.c b/drivers/media/video/upd64031a.c
index 36c0c461d8be..f8138c75be8b 100644
--- a/drivers/media/video/upd64031a.c
+++ b/drivers/media/video/upd64031a.c
@@ -28,7 +28,6 @@
#include <linux/slab.h>
#include <media/v4l2-device.h>
#include <media/v4l2-chip-ident.h>
-#include <media/v4l2-i2c-drv.h>
#include <media/upd64031a.h>
/* --------------------- read registers functions define -------------------- */
@@ -262,9 +261,25 @@ static const struct i2c_device_id upd64031a_id[] = {
};
MODULE_DEVICE_TABLE(i2c, upd64031a_id);
-static struct v4l2_i2c_driver_data v4l2_i2c_data = {
- .name = "upd64031a",
- .probe = upd64031a_probe,
- .remove = upd64031a_remove,
- .id_table = upd64031a_id,
+static struct i2c_driver upd64031a_driver = {
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = "upd64031a",
+ },
+ .probe = upd64031a_probe,
+ .remove = upd64031a_remove,
+ .id_table = upd64031a_id,
};
+
+static __init int init_upd64031a(void)
+{
+ return i2c_add_driver(&upd64031a_driver);
+}
+
+static __exit void exit_upd64031a(void)
+{
+ i2c_del_driver(&upd64031a_driver);
+}
+
+module_init(init_upd64031a);
+module_exit(exit_upd64031a);
diff --git a/drivers/media/video/upd64083.c b/drivers/media/video/upd64083.c
index c5af93b30a2b..28e0e6b6ca84 100644
--- a/drivers/media/video/upd64083.c
+++ b/drivers/media/video/upd64083.c
@@ -28,7 +28,6 @@
#include <linux/slab.h>
#include <media/v4l2-device.h>
#include <media/v4l2-chip-ident.h>
-#include <media/v4l2-i2c-drv.h>
#include <media/upd64083.h>
MODULE_DESCRIPTION("uPD64083 driver");
@@ -234,9 +233,25 @@ static const struct i2c_device_id upd64083_id[] = {
};
MODULE_DEVICE_TABLE(i2c, upd64083_id);
-static struct v4l2_i2c_driver_data v4l2_i2c_data = {
- .name = "upd64083",
- .probe = upd64083_probe,
- .remove = upd64083_remove,
- .id_table = upd64083_id,
+static struct i2c_driver upd64083_driver = {
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = "upd64083",
+ },
+ .probe = upd64083_probe,
+ .remove = upd64083_remove,
+ .id_table = upd64083_id,
};
+
+static __init int init_upd64083(void)
+{
+ return i2c_add_driver(&upd64083_driver);
+}
+
+static __exit void exit_upd64083(void)
+{
+ i2c_del_driver(&upd64083_driver);
+}
+
+module_init(init_upd64083);
+module_exit(exit_upd64083);
diff --git a/drivers/media/video/usbvideo/Kconfig b/drivers/media/video/usbvideo/Kconfig
index d6e16959f78b..dfa7fc68a657 100644
--- a/drivers/media/video/usbvideo/Kconfig
+++ b/drivers/media/video/usbvideo/Kconfig
@@ -12,10 +12,13 @@ config USB_VICAM
module will be called vicam.
config USB_IBMCAM
- tristate "USB IBM (Xirlink) C-it Camera support"
+ tristate "USB IBM (Xirlink) C-it Camera support (DEPRECATED)"
depends on VIDEO_V4L1
select VIDEO_USBVIDEO
---help---
+ This driver is DEPRECATED please use the gspca xirlink_cit module
+ instead.
+
Say Y here if you want to connect a IBM "C-It" camera, also known as
"Xirlink PC Camera" to your computer's USB port.
@@ -27,10 +30,13 @@ config USB_IBMCAM
<file:Documentation/video4linux/ibmcam.txt> to learn more.
config USB_KONICAWC
- tristate "USB Konica Webcam support"
+ tristate "USB Konica Webcam support (DEPRECATED)"
depends on VIDEO_V4L1
select VIDEO_USBVIDEO
---help---
+ This driver is DEPRECATED (and known to crash) please use the
+ gspca konica module instead.
+
Say Y here if you want support for webcams based on a Konica
chipset. This is known to work with the Intel YC76 webcam.
diff --git a/drivers/media/video/usbvideo/vicam.c b/drivers/media/video/usbvideo/vicam.c
index 5d6fd01f918a..dc17cce2fbb6 100644
--- a/drivers/media/video/usbvideo/vicam.c
+++ b/drivers/media/video/usbvideo/vicam.c
@@ -43,7 +43,6 @@
#include <linux/vmalloc.h>
#include <linux/mm.h>
#include <linux/slab.h>
-#include <linux/smp_lock.h>
#include <linux/mutex.h>
#include <linux/firmware.h>
#include <linux/ihex.h>
@@ -483,29 +482,28 @@ vicam_open(struct file *file)
return -EINVAL;
}
- /* the videodev_lock held above us protects us from
- * simultaneous opens...for now. we probably shouldn't
- * rely on this fact forever.
+ /* cam_lock/open_count protects us from simultaneous opens
+ * ... for now. we probably shouldn't rely on this fact forever.
*/
- lock_kernel();
+ mutex_lock(&cam->cam_lock);
if (cam->open_count > 0) {
printk(KERN_INFO
"vicam_open called on already opened camera");
- unlock_kernel();
+ mutex_unlock(&cam->cam_lock);
return -EBUSY;
}
cam->raw_image = kmalloc(VICAM_MAX_READ_SIZE, GFP_KERNEL);
if (!cam->raw_image) {
- unlock_kernel();
+ mutex_unlock(&cam->cam_lock);
return -ENOMEM;
}
cam->framebuf = rvmalloc(VICAM_MAX_FRAME_SIZE * VICAM_FRAMES);
if (!cam->framebuf) {
kfree(cam->raw_image);
- unlock_kernel();
+ mutex_unlock(&cam->cam_lock);
return -ENOMEM;
}
@@ -513,10 +511,17 @@ vicam_open(struct file *file)
if (!cam->cntrlbuf) {
kfree(cam->raw_image);
rvfree(cam->framebuf, VICAM_MAX_FRAME_SIZE * VICAM_FRAMES);
- unlock_kernel();
+ mutex_unlock(&cam->cam_lock);
return -ENOMEM;
}
+ cam->needsDummyRead = 1;
+ cam->open_count++;
+
+ file->private_data = cam;
+ mutex_unlock(&cam->cam_lock);
+
+
// First upload firmware, then turn the camera on
if (!cam->is_initialized) {
@@ -527,12 +532,6 @@ vicam_open(struct file *file)
set_camera_power(cam, 1);
- cam->needsDummyRead = 1;
- cam->open_count++;
-
- file->private_data = cam;
- unlock_kernel();
-
return 0;
}
diff --git a/drivers/media/video/usbvision/usbvision-i2c.c b/drivers/media/video/usbvision/usbvision-i2c.c
index 42ba28785750..81dd53bb5267 100644
--- a/drivers/media/video/usbvision/usbvision-i2c.c
+++ b/drivers/media/video/usbvision/usbvision-i2c.c
@@ -211,6 +211,9 @@ int usbvision_i2c_register(struct usb_usbvision *usbvision)
0x42 >> 1, 0x40 >> 1, /* SAA7114, SAA7115 and SAA7118 */
I2C_CLIENT_END };
+ if (usbvision->registered_i2c)
+ return 0;
+
memcpy(&usbvision->i2c_adap, &i2c_adap_template,
sizeof(struct i2c_adapter));
@@ -248,7 +251,7 @@ int usbvision_i2c_register(struct usb_usbvision *usbvision)
hit-and-miss. */
mdelay(10);
v4l2_i2c_new_subdev(&usbvision->v4l2_dev,
- &usbvision->i2c_adap, "saa7115",
+ &usbvision->i2c_adap,
"saa7115_auto", 0, saa711x_addrs);
break;
}
@@ -258,16 +261,18 @@ int usbvision_i2c_register(struct usb_usbvision *usbvision)
struct tuner_setup tun_setup;
sd = v4l2_i2c_new_subdev(&usbvision->v4l2_dev,
- &usbvision->i2c_adap, "tuner",
+ &usbvision->i2c_adap,
"tuner", 0, v4l2_i2c_tuner_addrs(ADDRS_DEMOD));
/* depending on whether we found a demod or not, select
the tuner type. */
type = sd ? ADDRS_TV_WITH_DEMOD : ADDRS_TV;
sd = v4l2_i2c_new_subdev(&usbvision->v4l2_dev,
- &usbvision->i2c_adap, "tuner",
+ &usbvision->i2c_adap,
"tuner", 0, v4l2_i2c_tuner_addrs(type));
+ if (sd == NULL)
+ return -ENODEV;
if (usbvision->tuner_type != -1) {
tun_setup.mode_mask = T_ANALOG_TV | T_RADIO;
tun_setup.type = usbvision->tuner_type;
@@ -275,14 +280,18 @@ int usbvision_i2c_register(struct usb_usbvision *usbvision)
call_all(usbvision, tuner, s_type_addr, &tun_setup);
}
}
+ usbvision->registered_i2c = 1;
return 0;
}
int usbvision_i2c_unregister(struct usb_usbvision *usbvision)
{
+ if (!usbvision->registered_i2c)
+ return 0;
i2c_del_adapter(&(usbvision->i2c_adap));
+ usbvision->registered_i2c = 0;
PDEBUG(DBG_I2C,"i2c bus for %s unregistered", usbvision->i2c_adap.name);
diff --git a/drivers/media/video/usbvision/usbvision-video.c b/drivers/media/video/usbvision/usbvision-video.c
index c2690df33438..011c0c386995 100644
--- a/drivers/media/video/usbvision/usbvision-video.c
+++ b/drivers/media/video/usbvision/usbvision-video.c
@@ -50,7 +50,6 @@
#include <linux/list.h>
#include <linux/timer.h>
#include <linux/slab.h>
-#include <linux/smp_lock.h>
#include <linux/mm.h>
#include <linux/highmem.h>
#include <linux/vmalloc.h>
@@ -357,7 +356,7 @@ static int usbvision_v4l2_open(struct file *file)
PDEBUG(DBG_IO, "open");
- lock_kernel();
+ mutex_lock(&usbvision->lock);
usbvision_reset_powerOffTimer(usbvision);
if (usbvision->user)
@@ -379,7 +378,6 @@ static int usbvision_v4l2_open(struct file *file)
/* If so far no errors then we shall start the camera */
if (!errCode) {
- mutex_lock(&usbvision->lock);
if (usbvision->power == 0) {
usbvision_power_on(usbvision);
usbvision_i2c_register(usbvision);
@@ -408,14 +406,13 @@ static int usbvision_v4l2_open(struct file *file)
usbvision->initialized = 0;
}
}
- mutex_unlock(&usbvision->lock);
}
/* prepare queues */
usbvision_empty_framequeues(usbvision);
PDEBUG(DBG_IO, "success");
- unlock_kernel();
+ mutex_unlock(&usbvision->lock);
return errCode;
}
@@ -1645,8 +1642,8 @@ static int __devinit usbvision_probe(struct usb_interface *intf,
usbvision->usb_bandwidth = 0;
usbvision->user = 0;
usbvision->streaming = Stream_Off;
- usbvision_register_video(usbvision);
usbvision_configure_video(usbvision);
+ usbvision_register_video(usbvision);
mutex_unlock(&usbvision->lock);
usbvision_create_sysfs(usbvision->vdev);
diff --git a/drivers/media/video/usbvision/usbvision.h b/drivers/media/video/usbvision/usbvision.h
index d1b3cc0cd87f..cc4e96c8cd6c 100644
--- a/drivers/media/video/usbvision/usbvision.h
+++ b/drivers/media/video/usbvision/usbvision.h
@@ -363,6 +363,7 @@ struct usb_usbvision {
/* i2c Declaration Section*/
struct i2c_adapter i2c_adap;
+ int registered_i2c;
struct urb *ctrlUrb;
unsigned char ctrlUrbBuffer[8];
diff --git a/drivers/media/video/uvc/uvc_ctrl.c b/drivers/media/video/uvc/uvc_ctrl.c
index a350fad0db43..f169f7736677 100644
--- a/drivers/media/video/uvc/uvc_ctrl.c
+++ b/drivers/media/video/uvc/uvc_ctrl.c
@@ -1,8 +1,8 @@
/*
* uvc_ctrl.c -- USB Video Class driver - Controls
*
- * Copyright (C) 2005-2009
- * Laurent Pinchart (laurent.pinchart@skynet.be)
+ * Copyright (C) 2005-2010
+ * Laurent Pinchart (laurent.pinchart@ideasonboard.com)
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -643,7 +643,7 @@ static struct uvc_control_mapping uvc_ctrl_mappings[] = {
static inline __u8 *uvc_ctrl_data(struct uvc_control *ctrl, int id)
{
- return ctrl->uvc_data + id * ctrl->info->size;
+ return ctrl->uvc_data + id * ctrl->info.size;
}
static inline int uvc_test_bit(const __u8 *data, int bit)
@@ -727,7 +727,8 @@ static const __u8 uvc_camera_guid[16] = UVC_GUID_UVC_CAMERA;
static const __u8 uvc_media_transport_input_guid[16] =
UVC_GUID_UVC_MEDIA_TRANSPORT_INPUT;
-static int uvc_entity_match_guid(struct uvc_entity *entity, __u8 guid[16])
+static int uvc_entity_match_guid(const struct uvc_entity *entity,
+ const __u8 guid[16])
{
switch (UVC_ENTITY_TYPE(entity)) {
case UVC_ITT_CAMERA:
@@ -765,10 +766,10 @@ static void __uvc_find_control(struct uvc_entity *entity, __u32 v4l2_id,
for (i = 0; i < entity->ncontrols; ++i) {
ctrl = &entity->controls[i];
- if (ctrl->info == NULL)
+ if (!ctrl->initialized)
continue;
- list_for_each_entry(map, &ctrl->info->mappings, list) {
+ list_for_each_entry(map, &ctrl->info.mappings, list) {
if ((map->id == v4l2_id) && !next) {
*control = ctrl;
*mapping = map;
@@ -815,36 +816,36 @@ static int uvc_ctrl_populate_cache(struct uvc_video_chain *chain,
{
int ret;
- if (ctrl->info->flags & UVC_CONTROL_GET_DEF) {
+ if (ctrl->info.flags & UVC_CONTROL_GET_DEF) {
ret = uvc_query_ctrl(chain->dev, UVC_GET_DEF, ctrl->entity->id,
- chain->dev->intfnum, ctrl->info->selector,
+ chain->dev->intfnum, ctrl->info.selector,
uvc_ctrl_data(ctrl, UVC_CTRL_DATA_DEF),
- ctrl->info->size);
+ ctrl->info.size);
if (ret < 0)
return ret;
}
- if (ctrl->info->flags & UVC_CONTROL_GET_MIN) {
+ if (ctrl->info.flags & UVC_CONTROL_GET_MIN) {
ret = uvc_query_ctrl(chain->dev, UVC_GET_MIN, ctrl->entity->id,
- chain->dev->intfnum, ctrl->info->selector,
+ chain->dev->intfnum, ctrl->info.selector,
uvc_ctrl_data(ctrl, UVC_CTRL_DATA_MIN),
- ctrl->info->size);
+ ctrl->info.size);
if (ret < 0)
return ret;
}
- if (ctrl->info->flags & UVC_CONTROL_GET_MAX) {
+ if (ctrl->info.flags & UVC_CONTROL_GET_MAX) {
ret = uvc_query_ctrl(chain->dev, UVC_GET_MAX, ctrl->entity->id,
- chain->dev->intfnum, ctrl->info->selector,
+ chain->dev->intfnum, ctrl->info.selector,
uvc_ctrl_data(ctrl, UVC_CTRL_DATA_MAX),
- ctrl->info->size);
+ ctrl->info.size);
if (ret < 0)
return ret;
}
- if (ctrl->info->flags & UVC_CONTROL_GET_RES) {
+ if (ctrl->info.flags & UVC_CONTROL_GET_RES) {
ret = uvc_query_ctrl(chain->dev, UVC_GET_RES, ctrl->entity->id,
- chain->dev->intfnum, ctrl->info->selector,
+ chain->dev->intfnum, ctrl->info.selector,
uvc_ctrl_data(ctrl, UVC_CTRL_DATA_RES),
- ctrl->info->size);
+ ctrl->info.size);
if (ret < 0)
return ret;
}
@@ -862,9 +863,15 @@ int uvc_query_v4l2_ctrl(struct uvc_video_chain *chain,
unsigned int i;
int ret;
+ ret = mutex_lock_interruptible(&chain->ctrl_mutex);
+ if (ret < 0)
+ return -ERESTARTSYS;
+
ctrl = uvc_find_control(chain, v4l2_ctrl->id, &mapping);
- if (ctrl == NULL)
- return -EINVAL;
+ if (ctrl == NULL) {
+ ret = -EINVAL;
+ goto done;
+ }
memset(v4l2_ctrl, 0, sizeof *v4l2_ctrl);
v4l2_ctrl->id = mapping->id;
@@ -872,18 +879,18 @@ int uvc_query_v4l2_ctrl(struct uvc_video_chain *chain,
strlcpy(v4l2_ctrl->name, mapping->name, sizeof v4l2_ctrl->name);
v4l2_ctrl->flags = 0;
- if (!(ctrl->info->flags & UVC_CONTROL_GET_CUR))
+ if (!(ctrl->info.flags & UVC_CONTROL_GET_CUR))
v4l2_ctrl->flags |= V4L2_CTRL_FLAG_WRITE_ONLY;
- if (!(ctrl->info->flags & UVC_CONTROL_SET_CUR))
+ if (!(ctrl->info.flags & UVC_CONTROL_SET_CUR))
v4l2_ctrl->flags |= V4L2_CTRL_FLAG_READ_ONLY;
if (!ctrl->cached) {
ret = uvc_ctrl_populate_cache(chain, ctrl);
if (ret < 0)
- return ret;
+ goto done;
}
- if (ctrl->info->flags & UVC_CONTROL_GET_DEF) {
+ if (ctrl->info.flags & UVC_CONTROL_GET_DEF) {
v4l2_ctrl->default_value = mapping->get(mapping, UVC_GET_DEF,
uvc_ctrl_data(ctrl, UVC_CTRL_DATA_DEF));
}
@@ -902,37 +909,39 @@ int uvc_query_v4l2_ctrl(struct uvc_video_chain *chain,
}
}
- return 0;
+ goto done;
case V4L2_CTRL_TYPE_BOOLEAN:
v4l2_ctrl->minimum = 0;
v4l2_ctrl->maximum = 1;
v4l2_ctrl->step = 1;
- return 0;
+ goto done;
case V4L2_CTRL_TYPE_BUTTON:
v4l2_ctrl->minimum = 0;
v4l2_ctrl->maximum = 0;
v4l2_ctrl->step = 0;
- return 0;
+ goto done;
default:
break;
}
- if (ctrl->info->flags & UVC_CONTROL_GET_MIN)
+ if (ctrl->info.flags & UVC_CONTROL_GET_MIN)
v4l2_ctrl->minimum = mapping->get(mapping, UVC_GET_MIN,
uvc_ctrl_data(ctrl, UVC_CTRL_DATA_MIN));
- if (ctrl->info->flags & UVC_CONTROL_GET_MAX)
+ if (ctrl->info.flags & UVC_CONTROL_GET_MAX)
v4l2_ctrl->maximum = mapping->get(mapping, UVC_GET_MAX,
uvc_ctrl_data(ctrl, UVC_CTRL_DATA_MAX));
- if (ctrl->info->flags & UVC_CONTROL_GET_RES)
+ if (ctrl->info.flags & UVC_CONTROL_GET_RES)
v4l2_ctrl->step = mapping->get(mapping, UVC_GET_RES,
uvc_ctrl_data(ctrl, UVC_CTRL_DATA_RES));
- return 0;
+done:
+ mutex_unlock(&chain->ctrl_mutex);
+ return ret;
}
@@ -977,14 +986,14 @@ static int uvc_ctrl_commit_entity(struct uvc_device *dev,
for (i = 0; i < entity->ncontrols; ++i) {
ctrl = &entity->controls[i];
- if (ctrl->info == NULL)
+ if (!ctrl->initialized)
continue;
/* Reset the loaded flag for auto-update controls that were
* marked as loaded in uvc_ctrl_get/uvc_ctrl_set to prevent
* uvc_ctrl_get from using the cached value.
*/
- if (ctrl->info->flags & UVC_CONTROL_AUTO_UPDATE)
+ if (ctrl->info.flags & UVC_CONTROL_AUTO_UPDATE)
ctrl->loaded = 0;
if (!ctrl->dirty)
@@ -992,16 +1001,16 @@ static int uvc_ctrl_commit_entity(struct uvc_device *dev,
if (!rollback)
ret = uvc_query_ctrl(dev, UVC_SET_CUR, ctrl->entity->id,
- dev->intfnum, ctrl->info->selector,
+ dev->intfnum, ctrl->info.selector,
uvc_ctrl_data(ctrl, UVC_CTRL_DATA_CURRENT),
- ctrl->info->size);
+ ctrl->info.size);
else
ret = 0;
if (rollback || ret < 0)
memcpy(uvc_ctrl_data(ctrl, UVC_CTRL_DATA_CURRENT),
uvc_ctrl_data(ctrl, UVC_CTRL_DATA_BACKUP),
- ctrl->info->size);
+ ctrl->info.size);
ctrl->dirty = 0;
@@ -1039,14 +1048,14 @@ int uvc_ctrl_get(struct uvc_video_chain *chain,
int ret;
ctrl = uvc_find_control(chain, xctrl->id, &mapping);
- if (ctrl == NULL || (ctrl->info->flags & UVC_CONTROL_GET_CUR) == 0)
+ if (ctrl == NULL || (ctrl->info.flags & UVC_CONTROL_GET_CUR) == 0)
return -EINVAL;
if (!ctrl->loaded) {
ret = uvc_query_ctrl(chain->dev, UVC_GET_CUR, ctrl->entity->id,
- chain->dev->intfnum, ctrl->info->selector,
+ chain->dev->intfnum, ctrl->info.selector,
uvc_ctrl_data(ctrl, UVC_CTRL_DATA_CURRENT),
- ctrl->info->size);
+ ctrl->info.size);
if (ret < 0)
return ret;
@@ -1081,7 +1090,7 @@ int uvc_ctrl_set(struct uvc_video_chain *chain,
int ret;
ctrl = uvc_find_control(chain, xctrl->id, &mapping);
- if (ctrl == NULL || (ctrl->info->flags & UVC_CONTROL_SET_CUR) == 0)
+ if (ctrl == NULL || (ctrl->info.flags & UVC_CONTROL_SET_CUR) == 0)
return -EINVAL;
/* Clamp out of range values. */
@@ -1127,16 +1136,16 @@ int uvc_ctrl_set(struct uvc_video_chain *chain,
* needs to be loaded from the device to perform the read-modify-write
* operation.
*/
- if (!ctrl->loaded && (ctrl->info->size * 8) != mapping->size) {
- if ((ctrl->info->flags & UVC_CONTROL_GET_CUR) == 0) {
+ if (!ctrl->loaded && (ctrl->info.size * 8) != mapping->size) {
+ if ((ctrl->info.flags & UVC_CONTROL_GET_CUR) == 0) {
memset(uvc_ctrl_data(ctrl, UVC_CTRL_DATA_CURRENT),
- 0, ctrl->info->size);
+ 0, ctrl->info.size);
} else {
ret = uvc_query_ctrl(chain->dev, UVC_GET_CUR,
ctrl->entity->id, chain->dev->intfnum,
- ctrl->info->selector,
+ ctrl->info.selector,
uvc_ctrl_data(ctrl, UVC_CTRL_DATA_CURRENT),
- ctrl->info->size);
+ ctrl->info.size);
if (ret < 0)
return ret;
}
@@ -1148,7 +1157,7 @@ int uvc_ctrl_set(struct uvc_video_chain *chain,
if (!ctrl->dirty) {
memcpy(uvc_ctrl_data(ctrl, UVC_CTRL_DATA_BACKUP),
uvc_ctrl_data(ctrl, UVC_CTRL_DATA_CURRENT),
- ctrl->info->size);
+ ctrl->info.size);
}
mapping->set(mapping, value,
@@ -1163,12 +1172,138 @@ int uvc_ctrl_set(struct uvc_video_chain *chain,
* Dynamic controls
*/
+static void uvc_ctrl_fixup_xu_info(struct uvc_device *dev,
+ const struct uvc_control *ctrl, struct uvc_control_info *info)
+{
+ struct uvc_ctrl_fixup {
+ struct usb_device_id id;
+ u8 entity;
+ u8 selector;
+ u8 flags;
+ };
+
+ static const struct uvc_ctrl_fixup fixups[] = {
+ { { USB_DEVICE(0x046d, 0x08c2) }, 9, 1,
+ UVC_CONTROL_GET_MIN | UVC_CONTROL_GET_MAX |
+ UVC_CONTROL_GET_DEF | UVC_CONTROL_SET_CUR |
+ UVC_CONTROL_AUTO_UPDATE },
+ { { USB_DEVICE(0x046d, 0x08cc) }, 9, 1,
+ UVC_CONTROL_GET_MIN | UVC_CONTROL_GET_MAX |
+ UVC_CONTROL_GET_DEF | UVC_CONTROL_SET_CUR |
+ UVC_CONTROL_AUTO_UPDATE },
+ { { USB_DEVICE(0x046d, 0x0994) }, 9, 1,
+ UVC_CONTROL_GET_MIN | UVC_CONTROL_GET_MAX |
+ UVC_CONTROL_GET_DEF | UVC_CONTROL_SET_CUR |
+ UVC_CONTROL_AUTO_UPDATE },
+ };
+
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(fixups); ++i) {
+ if (!usb_match_one_id(dev->intf, &fixups[i].id))
+ continue;
+
+ if (fixups[i].entity == ctrl->entity->id &&
+ fixups[i].selector == info->selector) {
+ info->flags = fixups[i].flags;
+ return;
+ }
+ }
+}
+
+/*
+ * Query control information (size and flags) for XU controls.
+ */
+static int uvc_ctrl_fill_xu_info(struct uvc_device *dev,
+ const struct uvc_control *ctrl, struct uvc_control_info *info)
+{
+ u8 *data;
+ int ret;
+
+ data = kmalloc(2, GFP_KERNEL);
+ if (data == NULL)
+ return -ENOMEM;
+
+ memcpy(info->entity, ctrl->entity->extension.guidExtensionCode,
+ sizeof(info->entity));
+ info->index = ctrl->index;
+ info->selector = ctrl->index + 1;
+
+ /* Query and verify the control length (GET_LEN) */
+ ret = uvc_query_ctrl(dev, UVC_GET_LEN, ctrl->entity->id, dev->intfnum,
+ info->selector, data, 2);
+ if (ret < 0) {
+ uvc_trace(UVC_TRACE_CONTROL,
+ "GET_LEN failed on control %pUl/%u (%d).\n",
+ info->entity, info->selector, ret);
+ goto done;
+ }
+
+ info->size = le16_to_cpup((__le16 *)data);
+
+ /* Query the control information (GET_INFO) */
+ ret = uvc_query_ctrl(dev, UVC_GET_INFO, ctrl->entity->id, dev->intfnum,
+ info->selector, data, 1);
+ if (ret < 0) {
+ uvc_trace(UVC_TRACE_CONTROL,
+ "GET_INFO failed on control %pUl/%u (%d).\n",
+ info->entity, info->selector, ret);
+ goto done;
+ }
+
+ info->flags = UVC_CONTROL_GET_MIN | UVC_CONTROL_GET_MAX
+ | UVC_CONTROL_GET_RES | UVC_CONTROL_GET_DEF
+ | (data[0] & UVC_CONTROL_CAP_GET ? UVC_CONTROL_GET_CUR : 0)
+ | (data[0] & UVC_CONTROL_CAP_SET ? UVC_CONTROL_SET_CUR : 0)
+ | (data[0] & UVC_CONTROL_CAP_AUTOUPDATE ?
+ UVC_CONTROL_AUTO_UPDATE : 0);
+
+ uvc_ctrl_fixup_xu_info(dev, ctrl, info);
+
+ uvc_trace(UVC_TRACE_CONTROL, "XU control %pUl/%u queried: len %u, "
+ "flags { get %u set %u auto %u }.\n",
+ info->entity, info->selector, info->size,
+ (info->flags & UVC_CONTROL_GET_CUR) ? 1 : 0,
+ (info->flags & UVC_CONTROL_SET_CUR) ? 1 : 0,
+ (info->flags & UVC_CONTROL_AUTO_UPDATE) ? 1 : 0);
+
+done:
+ kfree(data);
+ return ret;
+}
+
+static int uvc_ctrl_add_info(struct uvc_device *dev, struct uvc_control *ctrl,
+ const struct uvc_control_info *info);
+
+static int uvc_ctrl_init_xu_ctrl(struct uvc_device *dev,
+ struct uvc_control *ctrl)
+{
+ struct uvc_control_info info;
+ int ret;
+
+ if (ctrl->initialized)
+ return 0;
+
+ ret = uvc_ctrl_fill_xu_info(dev, ctrl, &info);
+ if (ret < 0)
+ return ret;
+
+ ret = uvc_ctrl_add_info(dev, ctrl, &info);
+ if (ret < 0)
+ uvc_trace(UVC_TRACE_CONTROL, "Failed to initialize control "
+ "%pUl/%u on device %s entity %u\n", info.entity,
+ info.selector, dev->udev->devpath, ctrl->entity->id);
+
+ return ret;
+}
+
int uvc_xu_ctrl_query(struct uvc_video_chain *chain,
struct uvc_xu_control *xctrl, int set)
{
struct uvc_entity *entity;
struct uvc_control *ctrl = NULL;
unsigned int i, found = 0;
+ int restore = 0;
__u8 *data;
int ret;
@@ -1185,13 +1320,10 @@ int uvc_xu_ctrl_query(struct uvc_video_chain *chain,
return -EINVAL;
}
- /* Find the control. */
+ /* Find the control and perform delayed initialization if needed. */
for (i = 0; i < entity->ncontrols; ++i) {
ctrl = &entity->controls[i];
- if (ctrl->info == NULL)
- continue;
-
- if (ctrl->info->selector == xctrl->selector) {
+ if (ctrl->index == xctrl->selector - 1) {
found = 1;
break;
}
@@ -1203,40 +1335,48 @@ int uvc_xu_ctrl_query(struct uvc_video_chain *chain,
return -EINVAL;
}
- /* Validate control data size. */
- if (ctrl->info->size != xctrl->size)
- return -EINVAL;
-
- if ((set && !(ctrl->info->flags & UVC_CONTROL_SET_CUR)) ||
- (!set && !(ctrl->info->flags & UVC_CONTROL_GET_CUR)))
- return -EINVAL;
-
if (mutex_lock_interruptible(&chain->ctrl_mutex))
return -ERESTARTSYS;
+ ret = uvc_ctrl_init_xu_ctrl(chain->dev, ctrl);
+ if (ret < 0) {
+ ret = -ENOENT;
+ goto done;
+ }
+
+ /* Validate control data size. */
+ if (ctrl->info.size != xctrl->size) {
+ ret = -EINVAL;
+ goto done;
+ }
+
+ if ((set && !(ctrl->info.flags & UVC_CONTROL_SET_CUR)) ||
+ (!set && !(ctrl->info.flags & UVC_CONTROL_GET_CUR))) {
+ ret = -EINVAL;
+ goto done;
+ }
+
memcpy(uvc_ctrl_data(ctrl, UVC_CTRL_DATA_BACKUP),
uvc_ctrl_data(ctrl, UVC_CTRL_DATA_CURRENT),
- xctrl->size);
+ ctrl->info.size);
data = uvc_ctrl_data(ctrl, UVC_CTRL_DATA_CURRENT);
+ restore = set;
if (set && copy_from_user(data, xctrl->data, xctrl->size)) {
ret = -EFAULT;
- goto out;
+ goto done;
}
ret = uvc_query_ctrl(chain->dev, set ? UVC_SET_CUR : UVC_GET_CUR,
xctrl->unit, chain->dev->intfnum, xctrl->selector,
data, xctrl->size);
if (ret < 0)
- goto out;
+ goto done;
- if (!set && copy_to_user(xctrl->data, data, xctrl->size)) {
+ if (!set && copy_to_user(xctrl->data, data, xctrl->size))
ret = -EFAULT;
- goto out;
- }
-
-out:
- if (ret)
+done:
+ if (ret && restore)
memcpy(uvc_ctrl_data(ctrl, UVC_CTRL_DATA_CURRENT),
uvc_ctrl_data(ctrl, UVC_CTRL_DATA_BACKUP),
xctrl->size);
@@ -1271,13 +1411,13 @@ int uvc_ctrl_resume_device(struct uvc_device *dev)
for (i = 0; i < entity->ncontrols; ++i) {
ctrl = &entity->controls[i];
- if (ctrl->info == NULL || !ctrl->modified ||
- (ctrl->info->flags & UVC_CONTROL_RESTORE) == 0)
+ if (!ctrl->initialized || !ctrl->modified ||
+ (ctrl->info.flags & UVC_CONTROL_RESTORE) == 0)
continue;
printk(KERN_INFO "restoring control %pUl/%u/%u\n",
- ctrl->info->entity, ctrl->info->index,
- ctrl->info->selector);
+ ctrl->info.entity, ctrl->info.index,
+ ctrl->info.selector);
ctrl->dirty = 1;
}
@@ -1293,201 +1433,150 @@ int uvc_ctrl_resume_device(struct uvc_device *dev)
* Control and mapping handling
*/
-static int uvc_ctrl_add_ctrl(struct uvc_device *dev,
- struct uvc_control_info *info)
+/*
+ * Add control information to a given control.
+ */
+static int uvc_ctrl_add_info(struct uvc_device *dev, struct uvc_control *ctrl,
+ const struct uvc_control_info *info)
{
- struct uvc_entity *entity;
- struct uvc_control *ctrl = NULL;
- int ret = 0, found = 0;
- unsigned int i;
- u8 *uvc_info;
- u8 *uvc_data;
-
- list_for_each_entry(entity, &dev->entities, list) {
- if (!uvc_entity_match_guid(entity, info->entity))
- continue;
-
- for (i = 0; i < entity->ncontrols; ++i) {
- ctrl = &entity->controls[i];
- if (ctrl->index == info->index) {
- found = 1;
- break;
- }
- }
-
- if (found)
- break;
- }
-
- if (!found)
- return 0;
-
- uvc_data = kmalloc(info->size * UVC_CTRL_DATA_LAST + 1, GFP_KERNEL);
- if (uvc_data == NULL)
- return -ENOMEM;
-
- uvc_info = uvc_data + info->size * UVC_CTRL_DATA_LAST;
-
- if (UVC_ENTITY_TYPE(entity) == UVC_VC_EXTENSION_UNIT) {
- /* Check if the device control information and length match
- * the user supplied information.
- */
- ret = uvc_query_ctrl(dev, UVC_GET_LEN, ctrl->entity->id,
- dev->intfnum, info->selector, uvc_data, 2);
- if (ret < 0) {
- uvc_trace(UVC_TRACE_CONTROL,
- "GET_LEN failed on control %pUl/%u (%d).\n",
- info->entity, info->selector, ret);
- goto done;
- }
-
- if (info->size != le16_to_cpu(*(__le16 *)uvc_data)) {
- uvc_trace(UVC_TRACE_CONTROL, "Control %pUl/%u size "
- "doesn't match user supplied value.\n",
- info->entity, info->selector);
- ret = -EINVAL;
- goto done;
- }
+ int ret = 0;
- ret = uvc_query_ctrl(dev, UVC_GET_INFO, ctrl->entity->id,
- dev->intfnum, info->selector, uvc_info, 1);
- if (ret < 0) {
- uvc_trace(UVC_TRACE_CONTROL,
- "GET_INFO failed on control %pUl/%u (%d).\n",
- info->entity, info->selector, ret);
- goto done;
- }
+ memcpy(&ctrl->info, info, sizeof(*info));
+ INIT_LIST_HEAD(&ctrl->info.mappings);
- if (((info->flags & UVC_CONTROL_GET_CUR) &&
- !(*uvc_info & UVC_CONTROL_CAP_GET)) ||
- ((info->flags & UVC_CONTROL_SET_CUR) &&
- !(*uvc_info & UVC_CONTROL_CAP_SET))) {
- uvc_trace(UVC_TRACE_CONTROL, "Control %pUl/%u flags "
- "don't match supported operations.\n",
- info->entity, info->selector);
- ret = -EINVAL;
- goto done;
- }
+ /* Allocate an array to save control values (cur, def, max, etc.) */
+ ctrl->uvc_data = kzalloc(ctrl->info.size * UVC_CTRL_DATA_LAST + 1,
+ GFP_KERNEL);
+ if (ctrl->uvc_data == NULL) {
+ ret = -ENOMEM;
+ goto done;
}
- ctrl->info = info;
- ctrl->uvc_data = uvc_data;
- ctrl->uvc_info = uvc_info;
+ ctrl->initialized = 1;
uvc_trace(UVC_TRACE_CONTROL, "Added control %pUl/%u to device %s "
- "entity %u\n", ctrl->info->entity, ctrl->info->selector,
- dev->udev->devpath, entity->id);
+ "entity %u\n", ctrl->info.entity, ctrl->info.selector,
+ dev->udev->devpath, ctrl->entity->id);
done:
if (ret < 0)
- kfree(uvc_data);
-
+ kfree(ctrl->uvc_data);
return ret;
}
/*
- * Add an item to the UVC control information list, and instantiate a control
- * structure for each device that supports the control.
+ * Add a control mapping to a given control.
*/
-int uvc_ctrl_add_info(struct uvc_control_info *info)
+static int __uvc_ctrl_add_mapping(struct uvc_device *dev,
+ struct uvc_control *ctrl, const struct uvc_control_mapping *mapping)
{
- struct uvc_control_info *ctrl;
- struct uvc_device *dev;
- int ret = 0;
-
- /* Find matching controls by walking the devices, entities and
- * controls list.
- */
- mutex_lock(&uvc_driver.ctrl_mutex);
+ struct uvc_control_mapping *map;
+ unsigned int size;
- /* First check if the list contains a control matching the new one.
- * Bail out if it does.
+ /* Most mappings come from static kernel data and need to be duplicated.
+ * Mappings that come from userspace will be unnecessarily duplicated,
+ * this could be optimized.
*/
- list_for_each_entry(ctrl, &uvc_driver.controls, list) {
- if (memcmp(ctrl->entity, info->entity, 16))
- continue;
+ map = kmemdup(mapping, sizeof(*mapping), GFP_KERNEL);
+ if (map == NULL)
+ return -ENOMEM;
- if (ctrl->selector == info->selector) {
- uvc_trace(UVC_TRACE_CONTROL,
- "Control %pUl/%u is already defined.\n",
- info->entity, info->selector);
- ret = -EEXIST;
- goto end;
- }
- if (ctrl->index == info->index) {
- uvc_trace(UVC_TRACE_CONTROL,
- "Control %pUl/%u would overwrite index %d.\n",
- info->entity, info->selector, info->index);
- ret = -EEXIST;
- goto end;
- }
+ size = sizeof(*mapping->menu_info) * mapping->menu_count;
+ map->menu_info = kmemdup(mapping->menu_info, size, GFP_KERNEL);
+ if (map->menu_info == NULL) {
+ kfree(map);
+ return -ENOMEM;
}
- list_for_each_entry(dev, &uvc_driver.devices, list)
- uvc_ctrl_add_ctrl(dev, info);
+ if (map->get == NULL)
+ map->get = uvc_get_le_value;
+ if (map->set == NULL)
+ map->set = uvc_set_le_value;
- INIT_LIST_HEAD(&info->mappings);
- list_add_tail(&info->list, &uvc_driver.controls);
-end:
- mutex_unlock(&uvc_driver.ctrl_mutex);
- return ret;
+ map->ctrl = &ctrl->info;
+ list_add_tail(&map->list, &ctrl->info.mappings);
+ uvc_trace(UVC_TRACE_CONTROL,
+ "Adding mapping '%s' to control %pUl/%u.\n",
+ map->name, ctrl->info.entity, ctrl->info.selector);
+
+ return 0;
}
-int uvc_ctrl_add_mapping(struct uvc_control_mapping *mapping)
+int uvc_ctrl_add_mapping(struct uvc_video_chain *chain,
+ const struct uvc_control_mapping *mapping)
{
- struct uvc_control_info *info;
+ struct uvc_device *dev = chain->dev;
struct uvc_control_mapping *map;
- int ret = -EINVAL;
-
- if (mapping->get == NULL)
- mapping->get = uvc_get_le_value;
- if (mapping->set == NULL)
- mapping->set = uvc_set_le_value;
+ struct uvc_entity *entity;
+ struct uvc_control *ctrl;
+ int found = 0;
+ int ret;
if (mapping->id & ~V4L2_CTRL_ID_MASK) {
- uvc_trace(UVC_TRACE_CONTROL, "Can't add mapping '%s' with "
- "invalid control id 0x%08x\n", mapping->name,
+ uvc_trace(UVC_TRACE_CONTROL, "Can't add mapping '%s', control "
+ "id 0x%08x is invalid.\n", mapping->name,
mapping->id);
return -EINVAL;
}
- mutex_lock(&uvc_driver.ctrl_mutex);
- list_for_each_entry(info, &uvc_driver.controls, list) {
- if (memcmp(info->entity, mapping->entity, 16) ||
- info->selector != mapping->selector)
- continue;
+ /* Search for the matching (GUID/CS) control in the given device */
+ list_for_each_entry(entity, &dev->entities, list) {
+ unsigned int i;
- if (info->size * 8 < mapping->size + mapping->offset) {
- uvc_trace(UVC_TRACE_CONTROL,
- "Mapping '%s' would overflow control %pUl/%u\n",
- mapping->name, info->entity, info->selector);
- ret = -EOVERFLOW;
- goto end;
- }
+ if (UVC_ENTITY_TYPE(entity) != UVC_VC_EXTENSION_UNIT ||
+ !uvc_entity_match_guid(entity, mapping->entity))
+ continue;
- /* Check if the list contains a mapping matching the new one.
- * Bail out if it does.
- */
- list_for_each_entry(map, &info->mappings, list) {
- if (map->id == mapping->id) {
- uvc_trace(UVC_TRACE_CONTROL, "Mapping '%s' is "
- "already defined.\n", mapping->name);
- ret = -EEXIST;
- goto end;
+ for (i = 0; i < entity->ncontrols; ++i) {
+ ctrl = &entity->controls[i];
+ if (ctrl->index == mapping->selector - 1) {
+ found = 1;
+ break;
}
}
- mapping->ctrl = info;
- list_add_tail(&mapping->list, &info->mappings);
- uvc_trace(UVC_TRACE_CONTROL,
- "Adding mapping %s to control %pUl/%u.\n",
- mapping->name, info->entity, info->selector);
+ if (found)
+ break;
+ }
+ if (!found)
+ return -ENOENT;
- ret = 0;
- break;
+ if (mutex_lock_interruptible(&chain->ctrl_mutex))
+ return -ERESTARTSYS;
+
+ /* Perform delayed initialization of XU controls */
+ ret = uvc_ctrl_init_xu_ctrl(dev, ctrl);
+ if (ret < 0) {
+ ret = -ENOENT;
+ goto done;
}
-end:
- mutex_unlock(&uvc_driver.ctrl_mutex);
+
+ list_for_each_entry(map, &ctrl->info.mappings, list) {
+ if (mapping->id == map->id) {
+ uvc_trace(UVC_TRACE_CONTROL, "Can't add mapping '%s', "
+ "control id 0x%08x already exists.\n",
+ mapping->name, mapping->id);
+ ret = -EEXIST;
+ goto done;
+ }
+ }
+
+ /* Prevent excess memory consumption */
+ if (atomic_inc_return(&dev->nmappings) > UVC_MAX_CONTROL_MAPPINGS) {
+ atomic_dec(&dev->nmappings);
+ uvc_trace(UVC_TRACE_CONTROL, "Can't add mapping '%s', maximum "
+ "mappings count (%u) exceeded.\n", mapping->name,
+ UVC_MAX_CONTROL_MAPPINGS);
+ ret = -ENOMEM;
+ goto done;
+ }
+
+ ret = __uvc_ctrl_add_mapping(dev, ctrl, mapping);
+ if (ret < 0)
+ atomic_dec(&dev->nmappings);
+
+done:
+ mutex_unlock(&chain->ctrl_mutex);
return ret;
}
@@ -1496,29 +1585,49 @@ end:
* are currently the ones that crash the camera or unconditionally return an
* error when queried.
*/
-static void
-uvc_ctrl_prune_entity(struct uvc_device *dev, struct uvc_entity *entity)
+static void uvc_ctrl_prune_entity(struct uvc_device *dev,
+ struct uvc_entity *entity)
{
- static const struct {
+ struct uvc_ctrl_blacklist {
struct usb_device_id id;
u8 index;
- } blacklist[] = {
+ };
+
+ static const struct uvc_ctrl_blacklist processing_blacklist[] = {
{ { USB_DEVICE(0x13d3, 0x509b) }, 9 }, /* Gain */
{ { USB_DEVICE(0x1c4f, 0x3000) }, 6 }, /* WB Temperature */
{ { USB_DEVICE(0x5986, 0x0241) }, 2 }, /* Hue */
};
+ static const struct uvc_ctrl_blacklist camera_blacklist[] = {
+ { { USB_DEVICE(0x06f8, 0x3005) }, 9 }, /* Zoom, Absolute */
+ };
- u8 *controls;
+ const struct uvc_ctrl_blacklist *blacklist;
unsigned int size;
+ unsigned int count;
unsigned int i;
+ u8 *controls;
- if (UVC_ENTITY_TYPE(entity) != UVC_VC_PROCESSING_UNIT)
- return;
+ switch (UVC_ENTITY_TYPE(entity)) {
+ case UVC_VC_PROCESSING_UNIT:
+ blacklist = processing_blacklist;
+ count = ARRAY_SIZE(processing_blacklist);
+ controls = entity->processing.bmControls;
+ size = entity->processing.bControlSize;
+ break;
- controls = entity->processing.bmControls;
- size = entity->processing.bControlSize;
+ case UVC_ITT_CAMERA:
+ blacklist = camera_blacklist;
+ count = ARRAY_SIZE(camera_blacklist);
+ controls = entity->camera.bmControls;
+ size = entity->camera.bControlSize;
+ break;
- for (i = 0; i < ARRAY_SIZE(blacklist); ++i) {
+ default:
+ return;
+ }
+
+ for (i = 0; i < count; ++i) {
if (!usb_match_one_id(dev->intf, &blacklist[i].id))
continue;
@@ -1534,17 +1643,54 @@ uvc_ctrl_prune_entity(struct uvc_device *dev, struct uvc_entity *entity)
}
/*
+ * Add control information and hardcoded stock control mappings to the given
+ * device.
+ */
+static void uvc_ctrl_init_ctrl(struct uvc_device *dev, struct uvc_control *ctrl)
+{
+ const struct uvc_control_info *info = uvc_ctrls;
+ const struct uvc_control_info *iend = info + ARRAY_SIZE(uvc_ctrls);
+ const struct uvc_control_mapping *mapping = uvc_ctrl_mappings;
+ const struct uvc_control_mapping *mend =
+ mapping + ARRAY_SIZE(uvc_ctrl_mappings);
+
+ /* XU controls initialization requires querying the device for control
+ * information. As some buggy UVC devices will crash when queried
+ * repeatedly in a tight loop, delay XU controls initialization until
+ * first use.
+ */
+ if (UVC_ENTITY_TYPE(ctrl->entity) == UVC_VC_EXTENSION_UNIT)
+ return;
+
+ for (; info < iend; ++info) {
+ if (uvc_entity_match_guid(ctrl->entity, info->entity) &&
+ ctrl->index == info->index) {
+ uvc_ctrl_add_info(dev, ctrl, info);
+ break;
+ }
+ }
+
+ if (!ctrl->initialized)
+ return;
+
+ for (; mapping < mend; ++mapping) {
+ if (uvc_entity_match_guid(ctrl->entity, mapping->entity) &&
+ ctrl->info.selector == mapping->selector)
+ __uvc_ctrl_add_mapping(dev, ctrl, mapping);
+ }
+}
+
+/*
* Initialize device controls.
*/
int uvc_ctrl_init_device(struct uvc_device *dev)
{
- struct uvc_control_info *info;
- struct uvc_control *ctrl;
struct uvc_entity *entity;
unsigned int i;
/* Walk the entities list and instantiate controls */
list_for_each_entry(entity, &dev->entities, list) {
+ struct uvc_control *ctrl;
unsigned int bControlSize = 0, ncontrols = 0;
__u8 *bmControls = NULL;
@@ -1559,20 +1705,22 @@ int uvc_ctrl_init_device(struct uvc_device *dev)
bControlSize = entity->camera.bControlSize;
}
+ /* Remove bogus/blacklisted controls */
uvc_ctrl_prune_entity(dev, entity);
+ /* Count supported controls and allocate the controls array */
for (i = 0; i < bControlSize; ++i)
ncontrols += hweight8(bmControls[i]);
-
if (ncontrols == 0)
continue;
- entity->controls = kzalloc(ncontrols*sizeof *ctrl, GFP_KERNEL);
+ entity->controls = kzalloc(ncontrols * sizeof(*ctrl),
+ GFP_KERNEL);
if (entity->controls == NULL)
return -ENOMEM;
-
entity->ncontrols = ncontrols;
+ /* Initialize all supported controls */
ctrl = entity->controls;
for (i = 0; i < bControlSize * 8; ++i) {
if (uvc_test_bit(bmControls, i) == 0)
@@ -1580,81 +1728,47 @@ int uvc_ctrl_init_device(struct uvc_device *dev)
ctrl->entity = entity;
ctrl->index = i;
+
+ uvc_ctrl_init_ctrl(dev, ctrl);
ctrl++;
}
}
- /* Walk the controls info list and associate them with the device
- * controls, then add the device to the global device list. This has
- * to be done while holding the controls lock, to make sure
- * uvc_ctrl_add_info() will not get called in-between.
- */
- mutex_lock(&uvc_driver.ctrl_mutex);
- list_for_each_entry(info, &uvc_driver.controls, list)
- uvc_ctrl_add_ctrl(dev, info);
-
- list_add_tail(&dev->list, &uvc_driver.devices);
- mutex_unlock(&uvc_driver.ctrl_mutex);
-
return 0;
}
/*
* Cleanup device controls.
*/
-void uvc_ctrl_cleanup_device(struct uvc_device *dev)
+static void uvc_ctrl_cleanup_mappings(struct uvc_device *dev,
+ struct uvc_control *ctrl)
{
- struct uvc_entity *entity;
- unsigned int i;
+ struct uvc_control_mapping *mapping, *nm;
- /* Remove the device from the global devices list */
- mutex_lock(&uvc_driver.ctrl_mutex);
- if (dev->list.next != NULL)
- list_del(&dev->list);
- mutex_unlock(&uvc_driver.ctrl_mutex);
-
- list_for_each_entry(entity, &dev->entities, list) {
- for (i = 0; i < entity->ncontrols; ++i)
- kfree(entity->controls[i].uvc_data);
-
- kfree(entity->controls);
+ list_for_each_entry_safe(mapping, nm, &ctrl->info.mappings, list) {
+ list_del(&mapping->list);
+ kfree(mapping->menu_info);
+ kfree(mapping);
}
}
-void uvc_ctrl_cleanup(void)
+void uvc_ctrl_cleanup_device(struct uvc_device *dev)
{
- struct uvc_control_info *info;
- struct uvc_control_info *ni;
- struct uvc_control_mapping *mapping;
- struct uvc_control_mapping *nm;
+ struct uvc_entity *entity;
+ unsigned int i;
- list_for_each_entry_safe(info, ni, &uvc_driver.controls, list) {
- if (!(info->flags & UVC_CONTROL_EXTENSION))
- continue;
+ /* Free controls and control mappings for all entities. */
+ list_for_each_entry(entity, &dev->entities, list) {
+ for (i = 0; i < entity->ncontrols; ++i) {
+ struct uvc_control *ctrl = &entity->controls[i];
- list_for_each_entry_safe(mapping, nm, &info->mappings, list) {
- list_del(&mapping->list);
- kfree(mapping->menu_info);
- kfree(mapping);
+ if (!ctrl->initialized)
+ continue;
+
+ uvc_ctrl_cleanup_mappings(dev, ctrl);
+ kfree(ctrl->uvc_data);
}
- list_del(&info->list);
- kfree(info);
+ kfree(entity->controls);
}
}
-
-void uvc_ctrl_init(void)
-{
- struct uvc_control_info *ctrl = uvc_ctrls;
- struct uvc_control_info *cend = ctrl + ARRAY_SIZE(uvc_ctrls);
- struct uvc_control_mapping *mapping = uvc_ctrl_mappings;
- struct uvc_control_mapping *mend =
- mapping + ARRAY_SIZE(uvc_ctrl_mappings);
-
- for (; ctrl < cend; ++ctrl)
- uvc_ctrl_add_info(ctrl);
-
- for (; mapping < mend; ++mapping)
- uvc_ctrl_add_mapping(mapping);
-}
-
diff --git a/drivers/media/video/uvc/uvc_driver.c b/drivers/media/video/uvc/uvc_driver.c
index 2ac85d8984f0..a1e9dfb52f69 100644
--- a/drivers/media/video/uvc/uvc_driver.c
+++ b/drivers/media/video/uvc/uvc_driver.c
@@ -1,8 +1,8 @@
/*
* uvc_driver.c -- USB Video Class driver
*
- * Copyright (C) 2005-2009
- * Laurent Pinchart (laurent.pinchart@skynet.be)
+ * Copyright (C) 2005-2010
+ * Laurent Pinchart (laurent.pinchart@ideasonboard.com)
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -38,11 +38,9 @@
#include "uvcvideo.h"
-#define DRIVER_AUTHOR "Laurent Pinchart <laurent.pinchart@skynet.be>"
+#define DRIVER_AUTHOR "Laurent Pinchart " \
+ "<laurent.pinchart@ideasonboard.com>"
#define DRIVER_DESC "USB Video Class driver"
-#ifndef DRIVER_VERSION
-#define DRIVER_VERSION "v0.1.0"
-#endif
unsigned int uvc_clock_param = CLOCK_MONOTONIC;
unsigned int uvc_no_drop_param;
@@ -1762,6 +1760,7 @@ static int uvc_probe(struct usb_interface *intf,
INIT_LIST_HEAD(&dev->streams);
atomic_set(&dev->nstreams, 0);
atomic_set(&dev->users, 0);
+ atomic_set(&dev->nmappings, 0);
dev->udev = usb_get_dev(udev);
dev->intf = usb_get_intf(intf);
@@ -1820,6 +1819,7 @@ static int uvc_probe(struct usb_interface *intf,
}
uvc_trace(UVC_TRACE_PROBE, "UVC device initialized.\n");
+ usb_enable_autosuspend(udev);
return 0;
error:
@@ -2287,12 +2287,6 @@ static int __init uvc_init(void)
{
int result;
- INIT_LIST_HEAD(&uvc_driver.devices);
- INIT_LIST_HEAD(&uvc_driver.controls);
- mutex_init(&uvc_driver.ctrl_mutex);
-
- uvc_ctrl_init();
-
result = usb_register(&uvc_driver.driver);
if (result == 0)
printk(KERN_INFO DRIVER_DESC " (" DRIVER_VERSION ")\n");
@@ -2302,7 +2296,6 @@ static int __init uvc_init(void)
static void __exit uvc_cleanup(void)
{
usb_deregister(&uvc_driver.driver);
- uvc_ctrl_cleanup();
}
module_init(uvc_init);
diff --git a/drivers/media/video/uvc/uvc_isight.c b/drivers/media/video/uvc/uvc_isight.c
index a9285b570dbe..74bbe8f18f3e 100644
--- a/drivers/media/video/uvc/uvc_isight.c
+++ b/drivers/media/video/uvc/uvc_isight.c
@@ -4,7 +4,7 @@
* Copyright (C) 2006-2007
* Ivan N. Zlatev <contact@i-nz.net>
* Copyright (C) 2008-2009
- * Laurent Pinchart <laurent.pinchart@skynet.be>
+ * Laurent Pinchart <laurent.pinchart@ideasonboard.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
diff --git a/drivers/media/video/uvc/uvc_queue.c b/drivers/media/video/uvc/uvc_queue.c
index e9928a415086..ed6d5449741c 100644
--- a/drivers/media/video/uvc/uvc_queue.c
+++ b/drivers/media/video/uvc/uvc_queue.c
@@ -1,8 +1,8 @@
/*
* uvc_queue.c -- USB Video Class driver - Buffers management
*
- * Copyright (C) 2005-2009
- * Laurent Pinchart (laurent.pinchart@skynet.be)
+ * Copyright (C) 2005-2010
+ * Laurent Pinchart (laurent.pinchart@ideasonboard.com)
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -135,7 +135,6 @@ int uvc_alloc_buffers(struct uvc_video_queue *queue, unsigned int nbuffers,
queue->buffer[i].buf.m.offset = i * bufsize;
queue->buffer[i].buf.length = buflength;
queue->buffer[i].buf.type = queue->type;
- queue->buffer[i].buf.sequence = 0;
queue->buffer[i].buf.field = V4L2_FIELD_NONE;
queue->buffer[i].buf.memory = V4L2_MEMORY_MMAP;
queue->buffer[i].buf.flags = 0;
@@ -410,8 +409,7 @@ done:
* state can be properly initialized before buffers are accessed from the
* interrupt handler.
*
- * Enabling the video queue initializes parameters (such as sequence number,
- * sync pattern, ...). If the queue is already enabled, return -EBUSY.
+ * Enabling the video queue returns -EBUSY if the queue is already enabled.
*
* Disabling the video queue cancels the queue and removes all buffers from
* the main queue.
@@ -430,7 +428,6 @@ int uvc_queue_enable(struct uvc_video_queue *queue, int enable)
ret = -EBUSY;
goto done;
}
- queue->sequence = 0;
queue->flags |= UVC_QUEUE_STREAMING;
queue->buf_used = 0;
} else {
@@ -510,8 +507,6 @@ struct uvc_buffer *uvc_queue_next_buffer(struct uvc_video_queue *queue,
nextbuf = NULL;
spin_unlock_irqrestore(&queue->irqlock, flags);
- buf->buf.sequence = queue->sequence++;
-
wake_up(&buf->wait);
return nextbuf;
}
diff --git a/drivers/media/video/uvc/uvc_status.c b/drivers/media/video/uvc/uvc_status.c
index 85019bdacdf7..b7492775e6ae 100644
--- a/drivers/media/video/uvc/uvc_status.c
+++ b/drivers/media/video/uvc/uvc_status.c
@@ -1,8 +1,8 @@
/*
* uvc_status.c -- USB Video Class driver - Status endpoint
*
- * Copyright (C) 2007-2009
- * Laurent Pinchart (laurent.pinchart@skynet.be)
+ * Copyright (C) 2005-2009
+ * Laurent Pinchart (laurent.pinchart@ideasonboard.com)
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
diff --git a/drivers/media/video/uvc/uvc_v4l2.c b/drivers/media/video/uvc/uvc_v4l2.c
index 86db32697b80..6d15de9b5204 100644
--- a/drivers/media/video/uvc/uvc_v4l2.c
+++ b/drivers/media/video/uvc/uvc_v4l2.c
@@ -1,8 +1,8 @@
/*
* uvc_v4l2.c -- USB Video Class driver - V4L2 API
*
- * Copyright (C) 2005-2009
- * Laurent Pinchart (laurent.pinchart@skynet.be)
+ * Copyright (C) 2005-2010
+ * Laurent Pinchart (laurent.pinchart@ideasonboard.com)
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -31,7 +31,8 @@
/* ------------------------------------------------------------------------
* UVC ioctls
*/
-static int uvc_ioctl_ctrl_map(struct uvc_xu_control_mapping *xmap, int old)
+static int uvc_ioctl_ctrl_map(struct uvc_video_chain *chain,
+ struct uvc_xu_control_mapping *xmap, int old)
{
struct uvc_control_mapping *map;
unsigned int size;
@@ -58,6 +59,8 @@ static int uvc_ioctl_ctrl_map(struct uvc_xu_control_mapping *xmap, int old)
case V4L2_CTRL_TYPE_MENU:
if (old) {
+ uvc_trace(UVC_TRACE_CONTROL, "V4L2_CTRL_TYPE_MENU not "
+ "supported for UVCIOC_CTRL_MAP_OLD.\n");
ret = -EINVAL;
goto done;
}
@@ -78,17 +81,17 @@ static int uvc_ioctl_ctrl_map(struct uvc_xu_control_mapping *xmap, int old)
break;
default:
+ uvc_trace(UVC_TRACE_CONTROL, "Unsupported V4L2 control type "
+ "%u.\n", xmap->v4l2_type);
ret = -EINVAL;
goto done;
}
- ret = uvc_ctrl_add_mapping(map);
+ ret = uvc_ctrl_add_mapping(chain, map);
done:
- if (ret < 0) {
- kfree(map->menu_info);
- kfree(map);
- }
+ kfree(map->menu_info);
+ kfree(map);
return ret;
}
@@ -1021,42 +1024,13 @@ static long uvc_v4l2_do_ioctl(struct file *file, unsigned int cmd, void *arg)
/* Dynamic controls. */
case UVCIOC_CTRL_ADD:
- {
- struct uvc_xu_control_info *xinfo = arg;
- struct uvc_control_info *info;
-
- if (!capable(CAP_SYS_ADMIN))
- return -EPERM;
-
- if (xinfo->size == 0)
- return -EINVAL;
-
- info = kzalloc(sizeof *info, GFP_KERNEL);
- if (info == NULL)
- return -ENOMEM;
-
- memcpy(info->entity, xinfo->entity, sizeof info->entity);
- info->index = xinfo->index;
- info->selector = xinfo->selector;
- info->size = xinfo->size;
- info->flags = xinfo->flags;
-
- info->flags |= UVC_CONTROL_GET_MIN | UVC_CONTROL_GET_MAX |
- UVC_CONTROL_GET_RES | UVC_CONTROL_GET_DEF |
- UVC_CONTROL_EXTENSION;
-
- ret = uvc_ctrl_add_info(info);
- if (ret < 0)
- kfree(info);
- break;
- }
+ /* Legacy ioctl, kept for API compatibility reasons */
+ return -EEXIST;
case UVCIOC_CTRL_MAP_OLD:
case UVCIOC_CTRL_MAP:
- if (!capable(CAP_SYS_ADMIN))
- return -EPERM;
-
- return uvc_ioctl_ctrl_map(arg, cmd == UVCIOC_CTRL_MAP_OLD);
+ return uvc_ioctl_ctrl_map(chain, arg,
+ cmd == UVCIOC_CTRL_MAP_OLD);
case UVCIOC_CTRL_GET:
return uvc_xu_ctrl_query(chain, arg, 0);
diff --git a/drivers/media/video/uvc/uvc_video.c b/drivers/media/video/uvc/uvc_video.c
index e27cf0d3b6d9..5555f0102838 100644
--- a/drivers/media/video/uvc/uvc_video.c
+++ b/drivers/media/video/uvc/uvc_video.c
@@ -1,8 +1,8 @@
/*
* uvc_video.c -- USB Video Class driver - Video handling
*
- * Copyright (C) 2005-2009
- * Laurent Pinchart (laurent.pinchart@skynet.be)
+ * Copyright (C) 2005-2010
+ * Laurent Pinchart (laurent.pinchart@ideasonboard.com)
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -45,6 +45,30 @@ static int __uvc_query_ctrl(struct uvc_device *dev, __u8 query, __u8 unit,
unit << 8 | intfnum, data, size, timeout);
}
+static const char *uvc_query_name(__u8 query)
+{
+ switch (query) {
+ case UVC_SET_CUR:
+ return "SET_CUR";
+ case UVC_GET_CUR:
+ return "GET_CUR";
+ case UVC_GET_MIN:
+ return "GET_MIN";
+ case UVC_GET_MAX:
+ return "GET_MAX";
+ case UVC_GET_RES:
+ return "GET_RES";
+ case UVC_GET_LEN:
+ return "GET_LEN";
+ case UVC_GET_INFO:
+ return "GET_INFO";
+ case UVC_GET_DEF:
+ return "GET_DEF";
+ default:
+ return "<invalid>";
+ }
+}
+
int uvc_query_ctrl(struct uvc_device *dev, __u8 query, __u8 unit,
__u8 intfnum, __u8 cs, void *data, __u16 size)
{
@@ -53,9 +77,9 @@ int uvc_query_ctrl(struct uvc_device *dev, __u8 query, __u8 unit,
ret = __uvc_query_ctrl(dev, query, unit, intfnum, cs, data, size,
UVC_CTRL_CONTROL_TIMEOUT);
if (ret != size) {
- uvc_printk(KERN_ERR, "Failed to query (%u) UVC control %u "
- "(unit %u) : %d (exp. %u).\n", query, cs, unit, ret,
- size);
+ uvc_printk(KERN_ERR, "Failed to query (%s) UVC control %u on "
+ "unit %u: %d (exp. %u).\n", uvc_query_name(query), cs,
+ unit, ret, size);
return -EIO;
}
@@ -114,6 +138,15 @@ static void uvc_fixup_video_ctrl(struct uvc_streaming *stream,
bandwidth /= 8;
bandwidth += 12;
+ /* The bandwidth estimate is too low for many cameras. Don't use
+ * maximum packet sizes lower than 1024 bytes to try and work
+ * around the problem. According to measurements done on two
+ * different camera models, the value is high enough to get most
+ * resolutions working while not preventing two simultaneous
+ * VGA streams at 15 fps.
+ */
+ bandwidth = max_t(u32, bandwidth, 1024);
+
ctrl->dwMaxPayloadTransferSize = bandwidth;
}
}
@@ -394,6 +427,12 @@ static int uvc_video_decode_start(struct uvc_streaming *stream,
fid = data[1] & UVC_STREAM_FID;
+ /* Increase the sequence number regardless of any buffer states, so
+ * that discontinuous sequence numbers always indicate lost frames.
+ */
+ if (stream->last_fid != fid)
+ stream->sequence++;
+
/* Store the payload FID bit and return immediately when the buffer is
* NULL.
*/
@@ -427,6 +466,7 @@ static int uvc_video_decode_start(struct uvc_streaming *stream,
else
ktime_get_real_ts(&ts);
+ buf->buf.sequence = stream->sequence;
buf->buf.timestamp.tv_sec = ts.tv_sec;
buf->buf.timestamp.tv_usec = ts.tv_nsec / NSEC_PER_USEC;
@@ -688,6 +728,7 @@ static void uvc_video_encode_bulk(struct urb *urb, struct uvc_streaming *stream,
if (buf->buf.bytesused == stream->queue.buf_used) {
stream->queue.buf_used = 0;
buf->state = UVC_BUF_STATE_READY;
+ buf->buf.sequence = ++stream->sequence;
uvc_queue_next_buffer(&stream->queue, buf);
stream->last_fid ^= UVC_STREAM_FID;
}
@@ -946,6 +987,7 @@ static int uvc_init_video(struct uvc_streaming *stream, gfp_t gfp_flags)
unsigned int i;
int ret;
+ stream->sequence = -1;
stream->last_fid = -1;
stream->bulk.header_size = 0;
stream->bulk.skip_payload = 0;
diff --git a/drivers/media/video/uvc/uvcvideo.h b/drivers/media/video/uvc/uvcvideo.h
index 892e0e51916c..d97cf6d6a4f9 100644
--- a/drivers/media/video/uvc/uvcvideo.h
+++ b/drivers/media/video/uvc/uvcvideo.h
@@ -27,8 +27,6 @@
#define UVC_CONTROL_RESTORE (1 << 6)
/* Control can be updated by the camera. */
#define UVC_CONTROL_AUTO_UPDATE (1 << 7)
-/* Control is an extension unit control. */
-#define UVC_CONTROL_EXTENSION (1 << 8)
#define UVC_CONTROL_GET_RANGE (UVC_CONTROL_GET_CUR | UVC_CONTROL_GET_MIN | \
UVC_CONTROL_GET_MAX | UVC_CONTROL_GET_RES | \
@@ -159,7 +157,8 @@ struct uvc_xu_control {
* Driver specific constants.
*/
-#define DRIVER_VERSION_NUMBER KERNEL_VERSION(0, 1, 0)
+#define DRIVER_VERSION_NUMBER KERNEL_VERSION(1, 0, 0)
+#define DRIVER_VERSION "v1.0.0"
/* Number of isochronous URBs. */
#define UVC_URBS 5
@@ -173,6 +172,9 @@ struct uvc_xu_control {
#define UVC_CTRL_CONTROL_TIMEOUT 300
#define UVC_CTRL_STREAMING_TIMEOUT 5000
+/* Maximum allowed number of control mappings per device */
+#define UVC_MAX_CONTROL_MAPPINGS 1024
+
/* Devices quirks */
#define UVC_QUIRK_STATUS_INTERVAL 0x00000001
#define UVC_QUIRK_PROBE_MINMAX 0x00000002
@@ -198,11 +200,10 @@ struct uvc_device;
* structures to maximize cache efficiency.
*/
struct uvc_control_info {
- struct list_head list;
struct list_head mappings;
__u8 entity[16];
- __u8 index;
+ __u8 index; /* Bit index in bmControls */
__u8 selector;
__u16 size;
@@ -235,17 +236,17 @@ struct uvc_control_mapping {
struct uvc_control {
struct uvc_entity *entity;
- struct uvc_control_info *info;
+ struct uvc_control_info info;
__u8 index; /* Used to match the uvc_control entry with a
uvc_control_info. */
- __u8 dirty : 1,
- loaded : 1,
- modified : 1,
- cached : 1;
+ __u8 dirty:1,
+ loaded:1,
+ modified:1,
+ cached:1,
+ initialized:1;
__u8 *uvc_data;
- __u8 *uvc_info;
};
struct uvc_format_desc {
@@ -392,7 +393,6 @@ struct uvc_video_queue {
void *mem;
unsigned int flags;
- __u32 sequence;
unsigned int count;
unsigned int buf_size;
@@ -413,7 +413,7 @@ struct uvc_video_chain {
struct uvc_entity *processing; /* Processing unit */
struct uvc_entity *selector; /* Selector unit */
- struct mutex ctrl_mutex;
+ struct mutex ctrl_mutex; /* Protects ctrl.info */
};
struct uvc_streaming {
@@ -458,6 +458,7 @@ struct uvc_streaming {
dma_addr_t urb_dma[UVC_URBS];
unsigned int urb_size;
+ __u32 sequence;
__u8 last_fid;
};
@@ -474,8 +475,8 @@ struct uvc_device {
char name[32];
enum uvc_device_state state;
- struct list_head list;
atomic_t users;
+ atomic_t nmappings;
/* Video control interface */
__u16 uvc_version;
@@ -509,11 +510,6 @@ struct uvc_fh {
struct uvc_driver {
struct usb_driver driver;
-
- struct list_head devices; /* struct uvc_device list */
- struct list_head controls; /* struct uvc_control_info list */
- struct mutex ctrl_mutex; /* protects controls and devices
- lists */
};
/* ------------------------------------------------------------------------
@@ -615,13 +611,11 @@ extern struct uvc_control *uvc_find_control(struct uvc_video_chain *chain,
extern int uvc_query_v4l2_ctrl(struct uvc_video_chain *chain,
struct v4l2_queryctrl *v4l2_ctrl);
-extern int uvc_ctrl_add_info(struct uvc_control_info *info);
-extern int uvc_ctrl_add_mapping(struct uvc_control_mapping *mapping);
+extern int uvc_ctrl_add_mapping(struct uvc_video_chain *chain,
+ const struct uvc_control_mapping *mapping);
extern int uvc_ctrl_init_device(struct uvc_device *dev);
extern void uvc_ctrl_cleanup_device(struct uvc_device *dev);
extern int uvc_ctrl_resume_device(struct uvc_device *dev);
-extern void uvc_ctrl_init(void);
-extern void uvc_ctrl_cleanup(void);
extern int uvc_ctrl_begin(struct uvc_video_chain *chain);
extern int __uvc_ctrl_commit(struct uvc_video_chain *chain, int rollback);
diff --git a/drivers/media/video/v4l1-compat.c b/drivers/media/video/v4l1-compat.c
index 0c2105ca611e..d4ac751036a2 100644
--- a/drivers/media/video/v4l1-compat.c
+++ b/drivers/media/video/v4l1-compat.c
@@ -645,9 +645,16 @@ static noinline long v4l1_compat_get_picture(
goto done;
}
- pict->depth = ((fmt->fmt.pix.bytesperline << 3)
- + (fmt->fmt.pix.width - 1))
- / fmt->fmt.pix.width;
+ if (fmt->fmt.pix.width)
+ {
+ pict->depth = ((fmt->fmt.pix.bytesperline << 3)
+ + (fmt->fmt.pix.width - 1))
+ / fmt->fmt.pix.width;
+ } else {
+ err = -EINVAL;
+ goto done;
+ }
+
pict->palette = pixelformat_to_palette(
fmt->fmt.pix.pixelformat);
done:
diff --git a/drivers/media/video/v4l2-common.c b/drivers/media/video/v4l2-common.c
index 8ee1179be926..b5eb1f3950b1 100644
--- a/drivers/media/video/v4l2-common.c
+++ b/drivers/media/video/v4l2-common.c
@@ -368,16 +368,15 @@ EXPORT_SYMBOL_GPL(v4l2_i2c_subdev_init);
/* Load an i2c sub-device. */
struct v4l2_subdev *v4l2_i2c_new_subdev_board(struct v4l2_device *v4l2_dev,
- struct i2c_adapter *adapter, const char *module_name,
- struct i2c_board_info *info, const unsigned short *probe_addrs)
+ struct i2c_adapter *adapter, struct i2c_board_info *info,
+ const unsigned short *probe_addrs)
{
struct v4l2_subdev *sd = NULL;
struct i2c_client *client;
BUG_ON(!v4l2_dev);
- if (module_name)
- request_module(module_name);
+ request_module(I2C_MODULE_PREFIX "%s", info->type);
/* Create the i2c client */
if (info->addr == 0 && probe_addrs)
@@ -430,8 +429,7 @@ error:
EXPORT_SYMBOL_GPL(v4l2_i2c_new_subdev_board);
struct v4l2_subdev *v4l2_i2c_new_subdev_cfg(struct v4l2_device *v4l2_dev,
- struct i2c_adapter *adapter,
- const char *module_name, const char *client_type,
+ struct i2c_adapter *adapter, const char *client_type,
int irq, void *platform_data,
u8 addr, const unsigned short *probe_addrs)
{
@@ -445,8 +443,7 @@ struct v4l2_subdev *v4l2_i2c_new_subdev_cfg(struct v4l2_device *v4l2_dev,
info.irq = irq;
info.platform_data = platform_data;
- return v4l2_i2c_new_subdev_board(v4l2_dev, adapter, module_name,
- &info, probe_addrs);
+ return v4l2_i2c_new_subdev_board(v4l2_dev, adapter, &info, probe_addrs);
}
EXPORT_SYMBOL_GPL(v4l2_i2c_new_subdev_cfg);
@@ -676,3 +673,28 @@ int v4l_fill_dv_preset_info(u32 preset, struct v4l2_dv_enum_preset *info)
return 0;
}
EXPORT_SYMBOL_GPL(v4l_fill_dv_preset_info);
+
+const struct v4l2_frmsize_discrete *v4l2_find_nearest_format(
+ const struct v4l2_discrete_probe *probe,
+ s32 width, s32 height)
+{
+ int i;
+ u32 error, min_error = UINT_MAX;
+ const struct v4l2_frmsize_discrete *size, *best = NULL;
+
+ if (!probe)
+ return best;
+
+ for (i = 0, size = probe->sizes; i < probe->num_sizes; i++, size++) {
+ error = abs(size->width - width) + abs(size->height - height);
+ if (error < min_error) {
+ min_error = error;
+ best = size;
+ }
+ if (!error)
+ break;
+ }
+
+ return best;
+}
+EXPORT_SYMBOL_GPL(v4l2_find_nearest_format);
diff --git a/drivers/media/video/v4l2-compat-ioctl32.c b/drivers/media/video/v4l2-compat-ioctl32.c
index 86294ed35c9b..e30e8dfb6205 100644
--- a/drivers/media/video/v4l2-compat-ioctl32.c
+++ b/drivers/media/video/v4l2-compat-ioctl32.c
@@ -18,7 +18,6 @@
#include <linux/videodev.h>
#include <linux/videodev2.h>
#include <linux/module.h>
-#include <linux/smp_lock.h>
#include <media/v4l2-ioctl.h>
#ifdef CONFIG_COMPAT
diff --git a/drivers/media/video/v4l2-ctrls.c b/drivers/media/video/v4l2-ctrls.c
index ea8d32cd425d..9d2502cd03ff 100644
--- a/drivers/media/video/v4l2-ctrls.c
+++ b/drivers/media/video/v4l2-ctrls.c
@@ -305,6 +305,8 @@ const char *v4l2_ctrl_get_name(u32 id)
case V4L2_CID_ROTATE: return "Rotate";
case V4L2_CID_BG_COLOR: return "Background Color";
case V4L2_CID_CHROMA_GAIN: return "Chroma Gain";
+ case V4L2_CID_ILLUMINATORS_1: return "Illuminator 1";
+ case V4L2_CID_ILLUMINATORS_2: return "Illuminator 2";
/* MPEG controls */
/* Keep the order of the 'case's the same as in videodev2.h! */
@@ -419,6 +421,8 @@ void v4l2_ctrl_fill(u32 id, const char **name, enum v4l2_ctrl_type *type,
case V4L2_CID_AUDIO_LIMITER_ENABLED:
case V4L2_CID_AUDIO_COMPRESSION_ENABLED:
case V4L2_CID_PILOT_TONE_ENABLED:
+ case V4L2_CID_ILLUMINATORS_1:
+ case V4L2_CID_ILLUMINATORS_2:
*type = V4L2_CTRL_TYPE_BOOLEAN;
*min = 0;
*max = *step = 1;
diff --git a/drivers/media/video/v4l2-dev.c b/drivers/media/video/v4l2-dev.c
index cb77197d480e..03f7f4670e9b 100644
--- a/drivers/media/video/v4l2-dev.c
+++ b/drivers/media/video/v4l2-dev.c
@@ -25,7 +25,6 @@
#include <linux/init.h>
#include <linux/kmod.h>
#include <linux/slab.h>
-#include <linux/smp_lock.h>
#include <asm/uaccess.h>
#include <asm/system.h>
@@ -81,7 +80,7 @@ static inline unsigned long *devnode_bits(int vfl_type)
/* Any types not assigned to fixed minor ranges must be mapped to
one single bitmap for the purposes of finding a free node number
since all those unassigned types use the same minor range. */
- int idx = (vfl_type > VFL_TYPE_VTX) ? VFL_TYPE_MAX - 1 : vfl_type;
+ int idx = (vfl_type > VFL_TYPE_RADIO) ? VFL_TYPE_MAX - 1 : vfl_type;
return devnode_nums[idx];
}
@@ -187,79 +186,92 @@ static ssize_t v4l2_read(struct file *filp, char __user *buf,
size_t sz, loff_t *off)
{
struct video_device *vdev = video_devdata(filp);
+ int ret = -EIO;
if (!vdev->fops->read)
return -EINVAL;
- if (!video_is_registered(vdev))
- return -EIO;
- return vdev->fops->read(filp, buf, sz, off);
+ if (vdev->lock)
+ mutex_lock(vdev->lock);
+ if (video_is_registered(vdev))
+ ret = vdev->fops->read(filp, buf, sz, off);
+ if (vdev->lock)
+ mutex_unlock(vdev->lock);
+ return ret;
}
static ssize_t v4l2_write(struct file *filp, const char __user *buf,
size_t sz, loff_t *off)
{
struct video_device *vdev = video_devdata(filp);
+ int ret = -EIO;
if (!vdev->fops->write)
return -EINVAL;
- if (!video_is_registered(vdev))
- return -EIO;
- return vdev->fops->write(filp, buf, sz, off);
+ if (vdev->lock)
+ mutex_lock(vdev->lock);
+ if (video_is_registered(vdev))
+ ret = vdev->fops->write(filp, buf, sz, off);
+ if (vdev->lock)
+ mutex_unlock(vdev->lock);
+ return ret;
}
static unsigned int v4l2_poll(struct file *filp, struct poll_table_struct *poll)
{
struct video_device *vdev = video_devdata(filp);
+ int ret = DEFAULT_POLLMASK;
- if (!vdev->fops->poll || !video_is_registered(vdev))
- return DEFAULT_POLLMASK;
- return vdev->fops->poll(filp, poll);
+ if (!vdev->fops->poll)
+ return ret;
+ if (vdev->lock)
+ mutex_lock(vdev->lock);
+ if (video_is_registered(vdev))
+ ret = vdev->fops->poll(filp, poll);
+ if (vdev->lock)
+ mutex_unlock(vdev->lock);
+ return ret;
}
static long v4l2_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
{
struct video_device *vdev = video_devdata(filp);
- int ret;
+ int ret = -ENODEV;
- /* Allow ioctl to continue even if the device was unregistered.
- Things like dequeueing buffers might still be useful. */
if (vdev->fops->unlocked_ioctl) {
- ret = vdev->fops->unlocked_ioctl(filp, cmd, arg);
+ if (vdev->lock)
+ mutex_lock(vdev->lock);
+ if (video_is_registered(vdev))
+ ret = vdev->fops->unlocked_ioctl(filp, cmd, arg);
+ if (vdev->lock)
+ mutex_unlock(vdev->lock);
} else if (vdev->fops->ioctl) {
/* TODO: convert all drivers to unlocked_ioctl */
- lock_kernel();
- ret = vdev->fops->ioctl(filp, cmd, arg);
- unlock_kernel();
+ static DEFINE_MUTEX(v4l2_ioctl_mutex);
+
+ mutex_lock(&v4l2_ioctl_mutex);
+ if (video_is_registered(vdev))
+ ret = vdev->fops->ioctl(filp, cmd, arg);
+ mutex_unlock(&v4l2_ioctl_mutex);
} else
ret = -ENOTTY;
return ret;
}
-#ifdef CONFIG_MMU
-#define v4l2_get_unmapped_area NULL
-#else
-static unsigned long v4l2_get_unmapped_area(struct file *filp,
- unsigned long addr, unsigned long len, unsigned long pgoff,
- unsigned long flags)
-{
- struct video_device *vdev = video_devdata(filp);
-
- if (!vdev->fops->get_unmapped_area)
- return -ENOSYS;
- if (!video_is_registered(vdev))
- return -ENODEV;
- return vdev->fops->get_unmapped_area(filp, addr, len, pgoff, flags);
-}
-#endif
-
static int v4l2_mmap(struct file *filp, struct vm_area_struct *vm)
{
struct video_device *vdev = video_devdata(filp);
+ int ret = -ENODEV;
- if (!vdev->fops->mmap || !video_is_registered(vdev))
- return -ENODEV;
- return vdev->fops->mmap(filp, vm);
+ if (!vdev->fops->mmap)
+ return ret;
+ if (vdev->lock)
+ mutex_lock(vdev->lock);
+ if (video_is_registered(vdev))
+ ret = vdev->fops->mmap(filp, vm);
+ if (vdev->lock)
+ mutex_unlock(vdev->lock);
+ return ret;
}
/* Override for the open function */
@@ -271,17 +283,24 @@ static int v4l2_open(struct inode *inode, struct file *filp)
/* Check if the video device is available */
mutex_lock(&videodev_lock);
vdev = video_devdata(filp);
- /* return ENODEV if the video device has been removed
- already or if it is not registered anymore. */
- if (vdev == NULL || !video_is_registered(vdev)) {
+ /* return ENODEV if the video device has already been removed. */
+ if (vdev == NULL) {
mutex_unlock(&videodev_lock);
return -ENODEV;
}
/* and increase the device refcount */
video_get(vdev);
mutex_unlock(&videodev_lock);
- if (vdev->fops->open)
- ret = vdev->fops->open(filp);
+ if (vdev->fops->open) {
+ if (vdev->lock)
+ mutex_lock(vdev->lock);
+ if (video_is_registered(vdev))
+ ret = vdev->fops->open(filp);
+ else
+ ret = -ENODEV;
+ if (vdev->lock)
+ mutex_unlock(vdev->lock);
+ }
/* decrease the refcount in case of an error */
if (ret)
@@ -295,8 +314,13 @@ static int v4l2_release(struct inode *inode, struct file *filp)
struct video_device *vdev = video_devdata(filp);
int ret = 0;
- if (vdev->fops->release)
+ if (vdev->fops->release) {
+ if (vdev->lock)
+ mutex_lock(vdev->lock);
vdev->fops->release(filp);
+ if (vdev->lock)
+ mutex_unlock(vdev->lock);
+ }
/* decrease the refcount unconditionally since the release()
return value is ignored. */
@@ -309,7 +333,6 @@ static const struct file_operations v4l2_fops = {
.read = v4l2_read,
.write = v4l2_write,
.open = v4l2_open,
- .get_unmapped_area = v4l2_get_unmapped_area,
.mmap = v4l2_mmap,
.unlocked_ioctl = v4l2_ioctl,
#ifdef CONFIG_COMPAT
@@ -377,8 +400,6 @@ static int get_index(struct video_device *vdev)
*
* %VFL_TYPE_GRABBER - A frame grabber
*
- * %VFL_TYPE_VTX - A teletext device
- *
* %VFL_TYPE_VBI - Vertical blank data (undecoded)
*
* %VFL_TYPE_RADIO - A radio card
@@ -411,9 +432,6 @@ static int __video_register_device(struct video_device *vdev, int type, int nr,
case VFL_TYPE_GRABBER:
name_base = "video";
break;
- case VFL_TYPE_VTX:
- name_base = "vtx";
- break;
case VFL_TYPE_VBI:
name_base = "vbi";
break;
@@ -451,10 +469,6 @@ static int __video_register_device(struct video_device *vdev, int type, int nr,
minor_offset = 64;
minor_cnt = 64;
break;
- case VFL_TYPE_VTX:
- minor_offset = 192;
- minor_cnt = 32;
- break;
case VFL_TYPE_VBI:
minor_offset = 224;
minor_cnt = 32;
diff --git a/drivers/media/video/v4l2-event.c b/drivers/media/video/v4l2-event.c
index de74ce07b5e2..69fd343d4774 100644
--- a/drivers/media/video/v4l2-event.c
+++ b/drivers/media/video/v4l2-event.c
@@ -134,15 +134,22 @@ int v4l2_event_dequeue(struct v4l2_fh *fh, struct v4l2_event *event,
if (nonblocking)
return __v4l2_event_dequeue(fh, event);
+ /* Release the vdev lock while waiting */
+ if (fh->vdev->lock)
+ mutex_unlock(fh->vdev->lock);
+
do {
ret = wait_event_interruptible(events->wait,
events->navailable != 0);
if (ret < 0)
- return ret;
+ break;
ret = __v4l2_event_dequeue(fh, event);
} while (ret == -ENOENT);
+ if (fh->vdev->lock)
+ mutex_lock(fh->vdev->lock);
+
return ret;
}
EXPORT_SYMBOL_GPL(v4l2_event_dequeue);
diff --git a/drivers/media/video/v4l2-mem2mem.c b/drivers/media/video/v4l2-mem2mem.c
index f45f9405ea39..ac832a28e18e 100644
--- a/drivers/media/video/v4l2-mem2mem.c
+++ b/drivers/media/video/v4l2-mem2mem.c
@@ -421,8 +421,8 @@ unsigned int v4l2_m2m_poll(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
src_q = v4l2_m2m_get_src_vq(m2m_ctx);
dst_q = v4l2_m2m_get_dst_vq(m2m_ctx);
- mutex_lock(&src_q->vb_lock);
- mutex_lock(&dst_q->vb_lock);
+ videobuf_queue_lock(src_q);
+ videobuf_queue_lock(dst_q);
if (src_q->streaming && !list_empty(&src_q->stream))
src_vb = list_first_entry(&src_q->stream,
@@ -450,8 +450,8 @@ unsigned int v4l2_m2m_poll(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
}
end:
- mutex_unlock(&dst_q->vb_lock);
- mutex_unlock(&src_q->vb_lock);
+ videobuf_queue_unlock(dst_q);
+ videobuf_queue_unlock(src_q);
return rc;
}
EXPORT_SYMBOL_GPL(v4l2_m2m_poll);
diff --git a/drivers/media/video/via-camera.c b/drivers/media/video/via-camera.c
new file mode 100644
index 000000000000..9eda7cc03121
--- /dev/null
+++ b/drivers/media/video/via-camera.c
@@ -0,0 +1,1474 @@
+/*
+ * Driver for the VIA Chrome integrated camera controller.
+ *
+ * Copyright 2009,2010 Jonathan Corbet <corbet@lwn.net>
+ * Distributable under the terms of the GNU General Public License, version 2
+ *
+ * This work was supported by the One Laptop Per Child project
+ */
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/list.h>
+#include <linux/pci.h>
+#include <linux/gpio.h>
+#include <linux/interrupt.h>
+#include <linux/pci.h>
+#include <linux/platform_device.h>
+#include <linux/videodev2.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-ioctl.h>
+#include <media/v4l2-chip-ident.h>
+#include <media/videobuf-dma-sg.h>
+#include <linux/device.h>
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/pm_qos_params.h>
+#include <linux/via-core.h>
+#include <linux/via-gpio.h>
+#include <linux/via_i2c.h>
+
+#include "via-camera.h"
+
+MODULE_AUTHOR("Jonathan Corbet <corbet@lwn.net>");
+MODULE_DESCRIPTION("VIA framebuffer-based camera controller driver");
+MODULE_LICENSE("GPL");
+
+static int flip_image;
+module_param(flip_image, bool, 0444);
+MODULE_PARM_DESC(flip_image,
+ "If set, the sensor will be instructed to flip the image "
+ "vertically.");
+
+#ifdef CONFIG_OLPC_XO_1_5
+static int override_serial;
+module_param(override_serial, bool, 0444);
+MODULE_PARM_DESC(override_serial,
+ "The camera driver will normally refuse to load if "
+ "the XO 1.5 serial port is enabled. Set this option "
+ "to force the issue.");
+#endif
+
+/*
+ * Basic window sizes.
+ */
+#define VGA_WIDTH 640
+#define VGA_HEIGHT 480
+#define QCIF_WIDTH 176
+#define QCIF_HEIGHT 144
+
+/*
+ * The structure describing our camera.
+ */
+enum viacam_opstate { S_IDLE = 0, S_RUNNING = 1 };
+
+struct via_camera {
+ struct v4l2_device v4l2_dev;
+ struct video_device vdev;
+ struct v4l2_subdev *sensor;
+ struct platform_device *platdev;
+ struct viafb_dev *viadev;
+ struct mutex lock;
+ enum viacam_opstate opstate;
+ unsigned long flags;
+ struct pm_qos_request_list qos_request;
+ /*
+ * GPIO info for power/reset management
+ */
+ int power_gpio;
+ int reset_gpio;
+ /*
+ * I/O memory stuff.
+ */
+ void __iomem *mmio; /* Where the registers live */
+ void __iomem *fbmem; /* Frame buffer memory */
+ u32 fb_offset; /* Reserved memory offset (FB) */
+ /*
+ * Capture buffers and related. The controller supports
+ * up to three, so that's what we have here. These buffers
+ * live in frame buffer memory, so we don't call them "DMA".
+ */
+ unsigned int cb_offsets[3]; /* offsets into fb mem */
+ u8 *cb_addrs[3]; /* Kernel-space addresses */
+ int n_cap_bufs; /* How many are we using? */
+ int next_buf;
+ struct videobuf_queue vb_queue;
+ struct list_head buffer_queue; /* prot. by reg_lock */
+ /*
+ * User tracking.
+ */
+ int users;
+ struct file *owner;
+ /*
+ * Video format information. sensor_format is kept in a form
+ * that we can use to pass to the sensor. We always run the
+ * sensor in VGA resolution, though, and let the controller
+ * downscale things if need be. So we keep the "real*
+ * dimensions separately.
+ */
+ struct v4l2_pix_format sensor_format;
+ struct v4l2_pix_format user_format;
+ enum v4l2_mbus_pixelcode mbus_code;
+};
+
+/*
+ * Yes, this is a hack, but there's only going to be one of these
+ * on any system we know of.
+ */
+static struct via_camera *via_cam_info;
+
+/*
+ * Flag values, manipulated with bitops
+ */
+#define CF_DMA_ACTIVE 0 /* A frame is incoming */
+#define CF_CONFIG_NEEDED 1 /* Must configure hardware */
+
+
+/*
+ * Nasty ugly v4l2 boilerplate.
+ */
+#define sensor_call(cam, optype, func, args...) \
+ v4l2_subdev_call(cam->sensor, optype, func, ##args)
+
+/*
+ * Debugging and related.
+ */
+#define cam_err(cam, fmt, arg...) \
+ dev_err(&(cam)->platdev->dev, fmt, ##arg);
+#define cam_warn(cam, fmt, arg...) \
+ dev_warn(&(cam)->platdev->dev, fmt, ##arg);
+#define cam_dbg(cam, fmt, arg...) \
+ dev_dbg(&(cam)->platdev->dev, fmt, ##arg);
+
+/*
+ * Format handling. This is ripped almost directly from Hans's changes
+ * to cafe_ccic.c. It's a little unfortunate; until this change, we
+ * didn't need to know anything about the format except its byte depth;
+ * now this information must be managed at this level too.
+ */
+static struct via_format {
+ __u8 *desc;
+ __u32 pixelformat;
+ int bpp; /* Bytes per pixel */
+ enum v4l2_mbus_pixelcode mbus_code;
+} via_formats[] = {
+ {
+ .desc = "YUYV 4:2:2",
+ .pixelformat = V4L2_PIX_FMT_YUYV,
+ .mbus_code = V4L2_MBUS_FMT_YUYV8_2X8,
+ .bpp = 2,
+ },
+ {
+ .desc = "RGB 565",
+ .pixelformat = V4L2_PIX_FMT_RGB565,
+ .mbus_code = V4L2_MBUS_FMT_RGB565_2X8_LE,
+ .bpp = 2,
+ },
+ /* RGB444 and Bayer should be doable, but have never been
+ tested with this driver. */
+};
+#define N_VIA_FMTS ARRAY_SIZE(via_formats)
+
+static struct via_format *via_find_format(u32 pixelformat)
+{
+ unsigned i;
+
+ for (i = 0; i < N_VIA_FMTS; i++)
+ if (via_formats[i].pixelformat == pixelformat)
+ return via_formats + i;
+ /* Not found? Then return the first format. */
+ return via_formats;
+}
+
+
+/*--------------------------------------------------------------------------*/
+/*
+ * Sensor power/reset management. This piece is OLPC-specific for
+ * sure; other configurations will have things connected differently.
+ */
+static int via_sensor_power_setup(struct via_camera *cam)
+{
+ int ret;
+
+ cam->power_gpio = viafb_gpio_lookup("VGPIO3");
+ cam->reset_gpio = viafb_gpio_lookup("VGPIO2");
+ if (cam->power_gpio < 0 || cam->reset_gpio < 0) {
+ dev_err(&cam->platdev->dev, "Unable to find GPIO lines\n");
+ return -EINVAL;
+ }
+ ret = gpio_request(cam->power_gpio, "viafb-camera");
+ if (ret) {
+ dev_err(&cam->platdev->dev, "Unable to request power GPIO\n");
+ return ret;
+ }
+ ret = gpio_request(cam->reset_gpio, "viafb-camera");
+ if (ret) {
+ dev_err(&cam->platdev->dev, "Unable to request reset GPIO\n");
+ gpio_free(cam->power_gpio);
+ return ret;
+ }
+ gpio_direction_output(cam->power_gpio, 0);
+ gpio_direction_output(cam->reset_gpio, 0);
+ return 0;
+}
+
+/*
+ * Power up the sensor and perform the reset dance.
+ */
+static void via_sensor_power_up(struct via_camera *cam)
+{
+ gpio_set_value(cam->power_gpio, 1);
+ gpio_set_value(cam->reset_gpio, 0);
+ msleep(20); /* Probably excessive */
+ gpio_set_value(cam->reset_gpio, 1);
+ msleep(20);
+}
+
+static void via_sensor_power_down(struct via_camera *cam)
+{
+ gpio_set_value(cam->power_gpio, 0);
+ gpio_set_value(cam->reset_gpio, 0);
+}
+
+
+static void via_sensor_power_release(struct via_camera *cam)
+{
+ via_sensor_power_down(cam);
+ gpio_free(cam->power_gpio);
+ gpio_free(cam->reset_gpio);
+}
+
+/* --------------------------------------------------------------------------*/
+/* Sensor ops */
+
+/*
+ * Manage the ov7670 "flip" bit, which needs special help.
+ */
+static int viacam_set_flip(struct via_camera *cam)
+{
+ struct v4l2_control ctrl;
+
+ memset(&ctrl, 0, sizeof(ctrl));
+ ctrl.id = V4L2_CID_VFLIP;
+ ctrl.value = flip_image;
+ return sensor_call(cam, core, s_ctrl, &ctrl);
+}
+
+/*
+ * Configure the sensor. It's up to the caller to ensure
+ * that the camera is in the correct operating state.
+ */
+static int viacam_configure_sensor(struct via_camera *cam)
+{
+ struct v4l2_mbus_framefmt mbus_fmt;
+ int ret;
+
+ v4l2_fill_mbus_format(&mbus_fmt, &cam->sensor_format, cam->mbus_code);
+ ret = sensor_call(cam, core, init, 0);
+ if (ret == 0)
+ ret = sensor_call(cam, video, s_mbus_fmt, &mbus_fmt);
+ /*
+ * OV7670 does weird things if flip is set *before* format...
+ */
+ if (ret == 0)
+ ret = viacam_set_flip(cam);
+ return ret;
+}
+
+
+
+/* --------------------------------------------------------------------------*/
+/*
+ * Some simple register accessors; they assume that the lock is held.
+ *
+ * Should we want to support the second capture engine, we could
+ * hide the register difference by adding 0x1000 to registers in the
+ * 0x300-350 range.
+ */
+static inline void viacam_write_reg(struct via_camera *cam,
+ int reg, int value)
+{
+ iowrite32(value, cam->mmio + reg);
+}
+
+static inline int viacam_read_reg(struct via_camera *cam, int reg)
+{
+ return ioread32(cam->mmio + reg);
+}
+
+static inline void viacam_write_reg_mask(struct via_camera *cam,
+ int reg, int value, int mask)
+{
+ int tmp = viacam_read_reg(cam, reg);
+
+ tmp = (tmp & ~mask) | (value & mask);
+ viacam_write_reg(cam, reg, tmp);
+}
+
+
+/* --------------------------------------------------------------------------*/
+/* Interrupt management and handling */
+
+static irqreturn_t viacam_quick_irq(int irq, void *data)
+{
+ struct via_camera *cam = data;
+ irqreturn_t ret = IRQ_NONE;
+ int icv;
+
+ /*
+ * All we do here is to clear the interrupts and tell
+ * the handler thread to wake up.
+ */
+ spin_lock(&cam->viadev->reg_lock);
+ icv = viacam_read_reg(cam, VCR_INTCTRL);
+ if (icv & VCR_IC_EAV) {
+ icv |= VCR_IC_EAV|VCR_IC_EVBI|VCR_IC_FFULL;
+ viacam_write_reg(cam, VCR_INTCTRL, icv);
+ ret = IRQ_WAKE_THREAD;
+ }
+ spin_unlock(&cam->viadev->reg_lock);
+ return ret;
+}
+
+/*
+ * Find the next videobuf buffer which has somebody waiting on it.
+ */
+static struct videobuf_buffer *viacam_next_buffer(struct via_camera *cam)
+{
+ unsigned long flags;
+ struct videobuf_buffer *buf = NULL;
+
+ spin_lock_irqsave(&cam->viadev->reg_lock, flags);
+ if (cam->opstate != S_RUNNING)
+ goto out;
+ if (list_empty(&cam->buffer_queue))
+ goto out;
+ buf = list_entry(cam->buffer_queue.next, struct videobuf_buffer, queue);
+ if (!waitqueue_active(&buf->done)) {/* Nobody waiting */
+ buf = NULL;
+ goto out;
+ }
+ list_del(&buf->queue);
+ buf->state = VIDEOBUF_ACTIVE;
+out:
+ spin_unlock_irqrestore(&cam->viadev->reg_lock, flags);
+ return buf;
+}
+
+/*
+ * The threaded IRQ handler.
+ */
+static irqreturn_t viacam_irq(int irq, void *data)
+{
+ int bufn;
+ struct videobuf_buffer *vb;
+ struct via_camera *cam = data;
+ struct videobuf_dmabuf *vdma;
+
+ /*
+ * If there is no place to put the data frame, don't bother
+ * with anything else.
+ */
+ vb = viacam_next_buffer(cam);
+ if (vb == NULL)
+ goto done;
+ /*
+ * Figure out which buffer we just completed.
+ */
+ bufn = (viacam_read_reg(cam, VCR_INTCTRL) & VCR_IC_ACTBUF) >> 3;
+ bufn -= 1;
+ if (bufn < 0)
+ bufn = cam->n_cap_bufs - 1;
+ /*
+ * Copy over the data and let any waiters know.
+ */
+ vdma = videobuf_to_dma(vb);
+ viafb_dma_copy_out_sg(cam->cb_offsets[bufn], vdma->sglist, vdma->sglen);
+ vb->state = VIDEOBUF_DONE;
+ vb->size = cam->user_format.sizeimage;
+ wake_up(&vb->done);
+done:
+ return IRQ_HANDLED;
+}
+
+
+/*
+ * These functions must mess around with the general interrupt
+ * control register, which is relevant to much more than just the
+ * camera. Nothing else uses interrupts, though, as of this writing.
+ * Should that situation change, we'll have to improve support at
+ * the via-core level.
+ */
+static void viacam_int_enable(struct via_camera *cam)
+{
+ viacam_write_reg(cam, VCR_INTCTRL,
+ VCR_IC_INTEN|VCR_IC_EAV|VCR_IC_EVBI|VCR_IC_FFULL);
+ viafb_irq_enable(VDE_I_C0AVEN);
+}
+
+static void viacam_int_disable(struct via_camera *cam)
+{
+ viafb_irq_disable(VDE_I_C0AVEN);
+ viacam_write_reg(cam, VCR_INTCTRL, 0);
+}
+
+
+
+/* --------------------------------------------------------------------------*/
+/* Controller operations */
+
+/*
+ * Set up our capture buffers in framebuffer memory.
+ */
+static int viacam_ctlr_cbufs(struct via_camera *cam)
+{
+ int nbuf = cam->viadev->camera_fbmem_size/cam->sensor_format.sizeimage;
+ int i;
+ unsigned int offset;
+
+ /*
+ * See how many buffers we can work with.
+ */
+ if (nbuf >= 3) {
+ cam->n_cap_bufs = 3;
+ viacam_write_reg_mask(cam, VCR_CAPINTC, VCR_CI_3BUFS,
+ VCR_CI_3BUFS);
+ } else if (nbuf == 2) {
+ cam->n_cap_bufs = 2;
+ viacam_write_reg_mask(cam, VCR_CAPINTC, 0, VCR_CI_3BUFS);
+ } else {
+ cam_warn(cam, "Insufficient frame buffer memory\n");
+ return -ENOMEM;
+ }
+ /*
+ * Set them up.
+ */
+ offset = cam->fb_offset;
+ for (i = 0; i < cam->n_cap_bufs; i++) {
+ cam->cb_offsets[i] = offset;
+ cam->cb_addrs[i] = cam->fbmem + offset;
+ viacam_write_reg(cam, VCR_VBUF1 + i*4, offset & VCR_VBUF_MASK);
+ offset += cam->sensor_format.sizeimage;
+ }
+ return 0;
+}
+
+/*
+ * Set the scaling register for downscaling the image.
+ *
+ * This register works like this... Vertical scaling is enabled
+ * by bit 26; if that bit is set, downscaling is controlled by the
+ * value in bits 16:25. Those bits are divided by 1024 to get
+ * the scaling factor; setting just bit 25 thus cuts the height
+ * in half.
+ *
+ * Horizontal scaling works about the same, but it's enabled by
+ * bit 11, with bits 0:10 giving the numerator of a fraction
+ * (over 2048) for the scaling value.
+ *
+ * This function is naive in that, if the user departs from
+ * the 3x4 VGA scaling factor, the image will distort. We
+ * could work around that if it really seemed important.
+ */
+static void viacam_set_scale(struct via_camera *cam)
+{
+ unsigned int avscale;
+ int sf;
+
+ if (cam->user_format.width == VGA_WIDTH)
+ avscale = 0;
+ else {
+ sf = (cam->user_format.width*2048)/VGA_WIDTH;
+ avscale = VCR_AVS_HEN | sf;
+ }
+ if (cam->user_format.height < VGA_HEIGHT) {
+ sf = (1024*cam->user_format.height)/VGA_HEIGHT;
+ avscale |= VCR_AVS_VEN | (sf << 16);
+ }
+ viacam_write_reg(cam, VCR_AVSCALE, avscale);
+}
+
+
+/*
+ * Configure image-related information into the capture engine.
+ */
+static void viacam_ctlr_image(struct via_camera *cam)
+{
+ int cicreg;
+
+ /*
+ * Disable clock before messing with stuff - from the via
+ * sample driver.
+ */
+ viacam_write_reg(cam, VCR_CAPINTC, ~(VCR_CI_ENABLE|VCR_CI_CLKEN));
+ /*
+ * Set up the controller for VGA resolution, modulo magic
+ * offsets from the via sample driver.
+ */
+ viacam_write_reg(cam, VCR_HORRANGE, 0x06200120);
+ viacam_write_reg(cam, VCR_VERTRANGE, 0x01de0000);
+ viacam_set_scale(cam);
+ /*
+ * Image size info.
+ */
+ viacam_write_reg(cam, VCR_MAXDATA,
+ (cam->sensor_format.height << 16) |
+ (cam->sensor_format.bytesperline >> 3));
+ viacam_write_reg(cam, VCR_MAXVBI, 0);
+ viacam_write_reg(cam, VCR_VSTRIDE,
+ cam->user_format.bytesperline & VCR_VS_STRIDE);
+ /*
+ * Set up the capture interface control register,
+ * everything but the "go" bit.
+ *
+ * The FIFO threshold is a bit of a magic number; 8 is what
+ * VIA's sample code uses.
+ */
+ cicreg = VCR_CI_CLKEN |
+ 0x08000000 | /* FIFO threshold */
+ VCR_CI_FLDINV | /* OLPC-specific? */
+ VCR_CI_VREFINV | /* OLPC-specific? */
+ VCR_CI_DIBOTH | /* Capture both fields */
+ VCR_CI_CCIR601_8;
+ if (cam->n_cap_bufs == 3)
+ cicreg |= VCR_CI_3BUFS;
+ /*
+ * YUV formats need different byte swapping than RGB.
+ */
+ if (cam->user_format.pixelformat == V4L2_PIX_FMT_YUYV)
+ cicreg |= VCR_CI_YUYV;
+ else
+ cicreg |= VCR_CI_UYVY;
+ viacam_write_reg(cam, VCR_CAPINTC, cicreg);
+}
+
+
+static int viacam_config_controller(struct via_camera *cam)
+{
+ int ret;
+ unsigned long flags;
+
+ spin_lock_irqsave(&cam->viadev->reg_lock, flags);
+ ret = viacam_ctlr_cbufs(cam);
+ if (!ret)
+ viacam_ctlr_image(cam);
+ spin_unlock_irqrestore(&cam->viadev->reg_lock, flags);
+ clear_bit(CF_CONFIG_NEEDED, &cam->flags);
+ return ret;
+}
+
+/*
+ * Make it start grabbing data.
+ */
+static void viacam_start_engine(struct via_camera *cam)
+{
+ spin_lock_irq(&cam->viadev->reg_lock);
+ cam->next_buf = 0;
+ viacam_write_reg_mask(cam, VCR_CAPINTC, VCR_CI_ENABLE, VCR_CI_ENABLE);
+ viacam_int_enable(cam);
+ (void) viacam_read_reg(cam, VCR_CAPINTC); /* Force post */
+ cam->opstate = S_RUNNING;
+ spin_unlock_irq(&cam->viadev->reg_lock);
+}
+
+
+static void viacam_stop_engine(struct via_camera *cam)
+{
+ spin_lock_irq(&cam->viadev->reg_lock);
+ viacam_int_disable(cam);
+ viacam_write_reg_mask(cam, VCR_CAPINTC, 0, VCR_CI_ENABLE);
+ (void) viacam_read_reg(cam, VCR_CAPINTC); /* Force post */
+ cam->opstate = S_IDLE;
+ spin_unlock_irq(&cam->viadev->reg_lock);
+}
+
+
+/* --------------------------------------------------------------------------*/
+/* Videobuf callback ops */
+
+/*
+ * buffer_setup. The purpose of this one would appear to be to tell
+ * videobuf how big a single image is. It's also evidently up to us
+ * to put some sort of limit on the maximum number of buffers allowed.
+ */
+static int viacam_vb_buf_setup(struct videobuf_queue *q,
+ unsigned int *count, unsigned int *size)
+{
+ struct via_camera *cam = q->priv_data;
+
+ *size = cam->user_format.sizeimage;
+ if (*count == 0 || *count > 6) /* Arbitrary number */
+ *count = 6;
+ return 0;
+}
+
+/*
+ * Prepare a buffer.
+ */
+static int viacam_vb_buf_prepare(struct videobuf_queue *q,
+ struct videobuf_buffer *vb, enum v4l2_field field)
+{
+ struct via_camera *cam = q->priv_data;
+
+ vb->size = cam->user_format.sizeimage;
+ vb->width = cam->user_format.width; /* bytesperline???? */
+ vb->height = cam->user_format.height;
+ vb->field = field;
+ if (vb->state == VIDEOBUF_NEEDS_INIT) {
+ int ret = videobuf_iolock(q, vb, NULL);
+ if (ret)
+ return ret;
+ }
+ vb->state = VIDEOBUF_PREPARED;
+ return 0;
+}
+
+/*
+ * We've got a buffer to put data into.
+ *
+ * FIXME: check for a running engine and valid buffers?
+ */
+static void viacam_vb_buf_queue(struct videobuf_queue *q,
+ struct videobuf_buffer *vb)
+{
+ struct via_camera *cam = q->priv_data;
+
+ /*
+ * Note that videobuf holds the lock when it calls
+ * us, so we need not (indeed, cannot) take it here.
+ */
+ vb->state = VIDEOBUF_QUEUED;
+ list_add_tail(&vb->queue, &cam->buffer_queue);
+}
+
+/*
+ * Free a buffer.
+ */
+static void viacam_vb_buf_release(struct videobuf_queue *q,
+ struct videobuf_buffer *vb)
+{
+ struct via_camera *cam = q->priv_data;
+
+ videobuf_dma_unmap(&cam->platdev->dev, videobuf_to_dma(vb));
+ videobuf_dma_free(videobuf_to_dma(vb));
+ vb->state = VIDEOBUF_NEEDS_INIT;
+}
+
+static const struct videobuf_queue_ops viacam_vb_ops = {
+ .buf_setup = viacam_vb_buf_setup,
+ .buf_prepare = viacam_vb_buf_prepare,
+ .buf_queue = viacam_vb_buf_queue,
+ .buf_release = viacam_vb_buf_release,
+};
+
+/* --------------------------------------------------------------------------*/
+/* File operations */
+
+static int viacam_open(struct file *filp)
+{
+ struct via_camera *cam = video_drvdata(filp);
+
+ filp->private_data = cam;
+ /*
+ * Note the new user. If this is the first one, we'll also
+ * need to power up the sensor.
+ */
+ mutex_lock(&cam->lock);
+ if (cam->users == 0) {
+ int ret = viafb_request_dma();
+
+ if (ret) {
+ mutex_unlock(&cam->lock);
+ return ret;
+ }
+ via_sensor_power_up(cam);
+ set_bit(CF_CONFIG_NEEDED, &cam->flags);
+ /*
+ * Hook into videobuf. Evidently this cannot fail.
+ */
+ videobuf_queue_sg_init(&cam->vb_queue, &viacam_vb_ops,
+ &cam->platdev->dev, &cam->viadev->reg_lock,
+ V4L2_BUF_TYPE_VIDEO_CAPTURE, V4L2_FIELD_NONE,
+ sizeof(struct videobuf_buffer), cam, NULL);
+ }
+ (cam->users)++;
+ mutex_unlock(&cam->lock);
+ return 0;
+}
+
+static int viacam_release(struct file *filp)
+{
+ struct via_camera *cam = video_drvdata(filp);
+
+ mutex_lock(&cam->lock);
+ (cam->users)--;
+ /*
+ * If the "owner" is closing, shut down any ongoing
+ * operations.
+ */
+ if (filp == cam->owner) {
+ videobuf_stop(&cam->vb_queue);
+ /*
+ * We don't hold the spinlock here, but, if release()
+ * is being called by the owner, nobody else will
+ * be changing the state. And an extra stop would
+ * not hurt anyway.
+ */
+ if (cam->opstate != S_IDLE)
+ viacam_stop_engine(cam);
+ cam->owner = NULL;
+ }
+ /*
+ * Last one out needs to turn out the lights.
+ */
+ if (cam->users == 0) {
+ videobuf_mmap_free(&cam->vb_queue);
+ via_sensor_power_down(cam);
+ viafb_release_dma();
+ }
+ mutex_unlock(&cam->lock);
+ return 0;
+}
+
+/*
+ * Read a frame from the device.
+ */
+static ssize_t viacam_read(struct file *filp, char __user *buffer,
+ size_t len, loff_t *pos)
+{
+ struct via_camera *cam = video_drvdata(filp);
+ int ret;
+
+ mutex_lock(&cam->lock);
+ /*
+ * Enforce the V4l2 "only one owner gets to read data" rule.
+ */
+ if (cam->owner && cam->owner != filp) {
+ ret = -EBUSY;
+ goto out_unlock;
+ }
+ cam->owner = filp;
+ /*
+ * Do we need to configure the hardware?
+ */
+ if (test_bit(CF_CONFIG_NEEDED, &cam->flags)) {
+ ret = viacam_configure_sensor(cam);
+ if (!ret)
+ ret = viacam_config_controller(cam);
+ if (ret)
+ goto out_unlock;
+ }
+ /*
+ * Fire up the capture engine, then have videobuf do
+ * the heavy lifting. Someday it would be good to avoid
+ * stopping and restarting the engine each time.
+ */
+ INIT_LIST_HEAD(&cam->buffer_queue);
+ viacam_start_engine(cam);
+ ret = videobuf_read_stream(&cam->vb_queue, buffer, len, pos, 0,
+ filp->f_flags & O_NONBLOCK);
+ viacam_stop_engine(cam);
+ /* videobuf_stop() ?? */
+
+out_unlock:
+ mutex_unlock(&cam->lock);
+ return ret;
+}
+
+
+static unsigned int viacam_poll(struct file *filp, struct poll_table_struct *pt)
+{
+ struct via_camera *cam = video_drvdata(filp);
+
+ return videobuf_poll_stream(filp, &cam->vb_queue, pt);
+}
+
+
+static int viacam_mmap(struct file *filp, struct vm_area_struct *vma)
+{
+ struct via_camera *cam = video_drvdata(filp);
+
+ return videobuf_mmap_mapper(&cam->vb_queue, vma);
+}
+
+
+
+static const struct v4l2_file_operations viacam_fops = {
+ .owner = THIS_MODULE,
+ .open = viacam_open,
+ .release = viacam_release,
+ .read = viacam_read,
+ .poll = viacam_poll,
+ .mmap = viacam_mmap,
+ .unlocked_ioctl = video_ioctl2,
+};
+
+/*----------------------------------------------------------------------------*/
+/*
+ * The long list of v4l2 ioctl ops
+ */
+
+static int viacam_g_chip_ident(struct file *file, void *priv,
+ struct v4l2_dbg_chip_ident *ident)
+{
+ struct via_camera *cam = priv;
+
+ ident->ident = V4L2_IDENT_NONE;
+ ident->revision = 0;
+ if (v4l2_chip_match_host(&ident->match)) {
+ ident->ident = V4L2_IDENT_VIA_VX855;
+ return 0;
+ }
+ return sensor_call(cam, core, g_chip_ident, ident);
+}
+
+/*
+ * Control ops are passed through to the sensor.
+ */
+static int viacam_queryctrl(struct file *filp, void *priv,
+ struct v4l2_queryctrl *qc)
+{
+ struct via_camera *cam = priv;
+ int ret;
+
+ mutex_lock(&cam->lock);
+ ret = sensor_call(cam, core, queryctrl, qc);
+ mutex_unlock(&cam->lock);
+ return ret;
+}
+
+
+static int viacam_g_ctrl(struct file *filp, void *priv,
+ struct v4l2_control *ctrl)
+{
+ struct via_camera *cam = priv;
+ int ret;
+
+ mutex_lock(&cam->lock);
+ ret = sensor_call(cam, core, g_ctrl, ctrl);
+ mutex_unlock(&cam->lock);
+ return ret;
+}
+
+
+static int viacam_s_ctrl(struct file *filp, void *priv,
+ struct v4l2_control *ctrl)
+{
+ struct via_camera *cam = priv;
+ int ret;
+
+ mutex_lock(&cam->lock);
+ ret = sensor_call(cam, core, s_ctrl, ctrl);
+ mutex_unlock(&cam->lock);
+ return ret;
+}
+
+/*
+ * Only one input.
+ */
+static int viacam_enum_input(struct file *filp, void *priv,
+ struct v4l2_input *input)
+{
+ if (input->index != 0)
+ return -EINVAL;
+
+ input->type = V4L2_INPUT_TYPE_CAMERA;
+ input->std = V4L2_STD_ALL; /* Not sure what should go here */
+ strcpy(input->name, "Camera");
+ return 0;
+}
+
+static int viacam_g_input(struct file *filp, void *priv, unsigned int *i)
+{
+ *i = 0;
+ return 0;
+}
+
+static int viacam_s_input(struct file *filp, void *priv, unsigned int i)
+{
+ if (i != 0)
+ return -EINVAL;
+ return 0;
+}
+
+static int viacam_s_std(struct file *filp, void *priv, v4l2_std_id *std)
+{
+ return 0;
+}
+
+/*
+ * Video format stuff. Here is our default format until
+ * user space messes with things.
+ */
+static const struct v4l2_pix_format viacam_def_pix_format = {
+ .width = VGA_WIDTH,
+ .height = VGA_HEIGHT,
+ .pixelformat = V4L2_PIX_FMT_YUYV,
+ .field = V4L2_FIELD_NONE,
+ .bytesperline = VGA_WIDTH * 2,
+ .sizeimage = VGA_WIDTH * VGA_HEIGHT * 2,
+};
+
+static const enum v4l2_mbus_pixelcode via_def_mbus_code = V4L2_MBUS_FMT_YUYV8_2X8;
+
+static int viacam_enum_fmt_vid_cap(struct file *filp, void *priv,
+ struct v4l2_fmtdesc *fmt)
+{
+ if (fmt->index >= N_VIA_FMTS)
+ return -EINVAL;
+ strlcpy(fmt->description, via_formats[fmt->index].desc,
+ sizeof(fmt->description));
+ fmt->pixelformat = via_formats[fmt->index].pixelformat;
+ return 0;
+}
+
+/*
+ * Figure out proper image dimensions, but always force the
+ * sensor to VGA.
+ */
+static void viacam_fmt_pre(struct v4l2_pix_format *userfmt,
+ struct v4l2_pix_format *sensorfmt)
+{
+ *sensorfmt = *userfmt;
+ if (userfmt->width < QCIF_WIDTH || userfmt->height < QCIF_HEIGHT) {
+ userfmt->width = QCIF_WIDTH;
+ userfmt->height = QCIF_HEIGHT;
+ }
+ if (userfmt->width > VGA_WIDTH || userfmt->height > VGA_HEIGHT) {
+ userfmt->width = VGA_WIDTH;
+ userfmt->height = VGA_HEIGHT;
+ }
+ sensorfmt->width = VGA_WIDTH;
+ sensorfmt->height = VGA_HEIGHT;
+}
+
+static void viacam_fmt_post(struct v4l2_pix_format *userfmt,
+ struct v4l2_pix_format *sensorfmt)
+{
+ struct via_format *f = via_find_format(userfmt->pixelformat);
+
+ sensorfmt->bytesperline = sensorfmt->width * f->bpp;
+ sensorfmt->sizeimage = sensorfmt->height * sensorfmt->bytesperline;
+ userfmt->pixelformat = sensorfmt->pixelformat;
+ userfmt->field = sensorfmt->field;
+ userfmt->bytesperline = 2 * userfmt->width;
+ userfmt->sizeimage = userfmt->bytesperline * userfmt->height;
+}
+
+
+/*
+ * The real work of figuring out a workable format.
+ */
+static int viacam_do_try_fmt(struct via_camera *cam,
+ struct v4l2_pix_format *upix, struct v4l2_pix_format *spix)
+{
+ int ret;
+ struct v4l2_mbus_framefmt mbus_fmt;
+ struct via_format *f = via_find_format(upix->pixelformat);
+
+ upix->pixelformat = f->pixelformat;
+ viacam_fmt_pre(upix, spix);
+ v4l2_fill_mbus_format(&mbus_fmt, upix, f->mbus_code);
+ ret = sensor_call(cam, video, try_mbus_fmt, &mbus_fmt);
+ v4l2_fill_pix_format(spix, &mbus_fmt);
+ viacam_fmt_post(upix, spix);
+ return ret;
+}
+
+
+
+static int viacam_try_fmt_vid_cap(struct file *filp, void *priv,
+ struct v4l2_format *fmt)
+{
+ struct via_camera *cam = priv;
+ struct v4l2_format sfmt;
+ int ret;
+
+ mutex_lock(&cam->lock);
+ ret = viacam_do_try_fmt(cam, &fmt->fmt.pix, &sfmt.fmt.pix);
+ mutex_unlock(&cam->lock);
+ return ret;
+}
+
+
+static int viacam_g_fmt_vid_cap(struct file *filp, void *priv,
+ struct v4l2_format *fmt)
+{
+ struct via_camera *cam = priv;
+
+ mutex_lock(&cam->lock);
+ fmt->fmt.pix = cam->user_format;
+ mutex_unlock(&cam->lock);
+ return 0;
+}
+
+static int viacam_s_fmt_vid_cap(struct file *filp, void *priv,
+ struct v4l2_format *fmt)
+{
+ struct via_camera *cam = priv;
+ int ret;
+ struct v4l2_format sfmt;
+ struct via_format *f = via_find_format(fmt->fmt.pix.pixelformat);
+
+ /*
+ * Camera must be idle or we can't mess with the
+ * video setup.
+ */
+ mutex_lock(&cam->lock);
+ if (cam->opstate != S_IDLE) {
+ ret = -EBUSY;
+ goto out;
+ }
+ /*
+ * Let the sensor code look over and tweak the
+ * requested formatting.
+ */
+ ret = viacam_do_try_fmt(cam, &fmt->fmt.pix, &sfmt.fmt.pix);
+ if (ret)
+ goto out;
+ /*
+ * OK, let's commit to the new format.
+ */
+ cam->user_format = fmt->fmt.pix;
+ cam->sensor_format = sfmt.fmt.pix;
+ cam->mbus_code = f->mbus_code;
+ ret = viacam_configure_sensor(cam);
+ if (!ret)
+ ret = viacam_config_controller(cam);
+out:
+ mutex_unlock(&cam->lock);
+ return ret;
+}
+
+static int viacam_querycap(struct file *filp, void *priv,
+ struct v4l2_capability *cap)
+{
+ strcpy(cap->driver, "via-camera");
+ strcpy(cap->card, "via-camera");
+ cap->version = 1;
+ cap->capabilities = V4L2_CAP_VIDEO_CAPTURE |
+ V4L2_CAP_READWRITE | V4L2_CAP_STREAMING;
+ return 0;
+}
+
+/*
+ * Streaming operations - pure videobuf stuff.
+ */
+static int viacam_reqbufs(struct file *filp, void *priv,
+ struct v4l2_requestbuffers *rb)
+{
+ struct via_camera *cam = priv;
+
+ return videobuf_reqbufs(&cam->vb_queue, rb);
+}
+
+static int viacam_querybuf(struct file *filp, void *priv,
+ struct v4l2_buffer *buf)
+{
+ struct via_camera *cam = priv;
+
+ return videobuf_querybuf(&cam->vb_queue, buf);
+}
+
+static int viacam_qbuf(struct file *filp, void *priv, struct v4l2_buffer *buf)
+{
+ struct via_camera *cam = priv;
+
+ return videobuf_qbuf(&cam->vb_queue, buf);
+}
+
+static int viacam_dqbuf(struct file *filp, void *priv, struct v4l2_buffer *buf)
+{
+ struct via_camera *cam = priv;
+
+ return videobuf_dqbuf(&cam->vb_queue, buf, filp->f_flags & O_NONBLOCK);
+}
+
+static int viacam_streamon(struct file *filp, void *priv, enum v4l2_buf_type t)
+{
+ struct via_camera *cam = priv;
+ int ret = 0;
+
+ if (t != V4L2_BUF_TYPE_VIDEO_CAPTURE)
+ return -EINVAL;
+
+ mutex_lock(&cam->lock);
+ if (cam->opstate != S_IDLE) {
+ ret = -EBUSY;
+ goto out;
+ }
+ /*
+ * Enforce the V4l2 "only one owner gets to read data" rule.
+ */
+ if (cam->owner && cam->owner != filp) {
+ ret = -EBUSY;
+ goto out;
+ }
+ cam->owner = filp;
+ /*
+ * Configure things if need be.
+ */
+ if (test_bit(CF_CONFIG_NEEDED, &cam->flags)) {
+ ret = viacam_configure_sensor(cam);
+ if (ret)
+ goto out;
+ ret = viacam_config_controller(cam);
+ if (ret)
+ goto out;
+ }
+ /*
+ * If the CPU goes into C3, the DMA transfer gets corrupted and
+ * users start filing unsightly bug reports. Put in a "latency"
+ * requirement which will keep the CPU out of the deeper sleep
+ * states.
+ */
+ pm_qos_add_request(&cam->qos_request, PM_QOS_CPU_DMA_LATENCY, 50);
+ /*
+ * Fire things up.
+ */
+ INIT_LIST_HEAD(&cam->buffer_queue);
+ ret = videobuf_streamon(&cam->vb_queue);
+ if (!ret)
+ viacam_start_engine(cam);
+out:
+ mutex_unlock(&cam->lock);
+ return ret;
+}
+
+static int viacam_streamoff(struct file *filp, void *priv, enum v4l2_buf_type t)
+{
+ struct via_camera *cam = priv;
+ int ret;
+
+ if (t != V4L2_BUF_TYPE_VIDEO_CAPTURE)
+ return -EINVAL;
+ mutex_lock(&cam->lock);
+ if (cam->opstate != S_RUNNING) {
+ ret = -EINVAL;
+ goto out;
+ }
+ pm_qos_remove_request(&cam->qos_request);
+ viacam_stop_engine(cam);
+ /*
+ * Videobuf will recycle all of the outstanding buffers, but
+ * we should be sure we don't retain any references to
+ * any of them.
+ */
+ ret = videobuf_streamoff(&cam->vb_queue);
+ INIT_LIST_HEAD(&cam->buffer_queue);
+out:
+ mutex_unlock(&cam->lock);
+ return ret;
+}
+
+#ifdef CONFIG_VIDEO_V4L1_COMPAT
+static int viacam_vidiocgmbuf(struct file *filp, void *priv,
+ struct video_mbuf *mbuf)
+{
+ struct via_camera *cam = priv;
+
+ return videobuf_cgmbuf(&cam->vb_queue, mbuf, 6);
+}
+#endif
+
+/* G/S_PARM */
+
+static int viacam_g_parm(struct file *filp, void *priv,
+ struct v4l2_streamparm *parm)
+{
+ struct via_camera *cam = priv;
+ int ret;
+
+ mutex_lock(&cam->lock);
+ ret = sensor_call(cam, video, g_parm, parm);
+ mutex_unlock(&cam->lock);
+ parm->parm.capture.readbuffers = cam->n_cap_bufs;
+ return ret;
+}
+
+static int viacam_s_parm(struct file *filp, void *priv,
+ struct v4l2_streamparm *parm)
+{
+ struct via_camera *cam = priv;
+ int ret;
+
+ mutex_lock(&cam->lock);
+ ret = sensor_call(cam, video, s_parm, parm);
+ mutex_unlock(&cam->lock);
+ parm->parm.capture.readbuffers = cam->n_cap_bufs;
+ return ret;
+}
+
+static int viacam_enum_framesizes(struct file *filp, void *priv,
+ struct v4l2_frmsizeenum *sizes)
+{
+ if (sizes->index != 0)
+ return -EINVAL;
+ sizes->type = V4L2_FRMSIZE_TYPE_CONTINUOUS;
+ sizes->stepwise.min_width = QCIF_WIDTH;
+ sizes->stepwise.min_height = QCIF_HEIGHT;
+ sizes->stepwise.max_width = VGA_WIDTH;
+ sizes->stepwise.max_height = VGA_HEIGHT;
+ sizes->stepwise.step_width = sizes->stepwise.step_height = 1;
+ return 0;
+}
+
+static int viacam_enum_frameintervals(struct file *filp, void *priv,
+ struct v4l2_frmivalenum *interval)
+{
+ struct via_camera *cam = priv;
+ int ret;
+
+ mutex_lock(&cam->lock);
+ ret = sensor_call(cam, video, enum_frameintervals, interval);
+ mutex_unlock(&cam->lock);
+ return ret;
+}
+
+
+
+static const struct v4l2_ioctl_ops viacam_ioctl_ops = {
+ .vidioc_g_chip_ident = viacam_g_chip_ident,
+ .vidioc_queryctrl = viacam_queryctrl,
+ .vidioc_g_ctrl = viacam_g_ctrl,
+ .vidioc_s_ctrl = viacam_s_ctrl,
+ .vidioc_enum_input = viacam_enum_input,
+ .vidioc_g_input = viacam_g_input,
+ .vidioc_s_input = viacam_s_input,
+ .vidioc_s_std = viacam_s_std,
+ .vidioc_enum_fmt_vid_cap = viacam_enum_fmt_vid_cap,
+ .vidioc_try_fmt_vid_cap = viacam_try_fmt_vid_cap,
+ .vidioc_g_fmt_vid_cap = viacam_g_fmt_vid_cap,
+ .vidioc_s_fmt_vid_cap = viacam_s_fmt_vid_cap,
+ .vidioc_querycap = viacam_querycap,
+ .vidioc_reqbufs = viacam_reqbufs,
+ .vidioc_querybuf = viacam_querybuf,
+ .vidioc_qbuf = viacam_qbuf,
+ .vidioc_dqbuf = viacam_dqbuf,
+ .vidioc_streamon = viacam_streamon,
+ .vidioc_streamoff = viacam_streamoff,
+ .vidioc_g_parm = viacam_g_parm,
+ .vidioc_s_parm = viacam_s_parm,
+ .vidioc_enum_framesizes = viacam_enum_framesizes,
+ .vidioc_enum_frameintervals = viacam_enum_frameintervals,
+#ifdef CONFIG_VIDEO_V4L1_COMPAT
+ .vidiocgmbuf = viacam_vidiocgmbuf,
+#endif
+};
+
+/*----------------------------------------------------------------------------*/
+
+/*
+ * Power management.
+ */
+
+/*
+ * Setup stuff.
+ */
+
+static struct video_device viacam_v4l_template = {
+ .name = "via-camera",
+ .minor = -1,
+ .tvnorms = V4L2_STD_NTSC_M,
+ .current_norm = V4L2_STD_NTSC_M,
+ .fops = &viacam_fops,
+ .ioctl_ops = &viacam_ioctl_ops,
+ .release = video_device_release_empty, /* Check this */
+};
+
+
+static __devinit int viacam_probe(struct platform_device *pdev)
+{
+ int ret;
+ struct i2c_adapter *sensor_adapter;
+ struct viafb_dev *viadev = pdev->dev.platform_data;
+
+ /*
+ * Note that there are actually two capture channels on
+ * the device. We only deal with one for now. That
+ * is encoded here; nothing else assumes it's dealing with
+ * a unique capture device.
+ */
+ struct via_camera *cam;
+
+ /*
+ * Ensure that frame buffer memory has been set aside for
+ * this purpose. As an arbitrary limit, refuse to work
+ * with less than two frames of VGA 16-bit data.
+ *
+ * If we ever support the second port, we'll need to set
+ * aside more memory.
+ */
+ if (viadev->camera_fbmem_size < (VGA_HEIGHT*VGA_WIDTH*4)) {
+ printk(KERN_ERR "viacam: insufficient FB memory reserved\n");
+ return -ENOMEM;
+ }
+ if (viadev->engine_mmio == NULL) {
+ printk(KERN_ERR "viacam: No I/O memory, so no pictures\n");
+ return -ENOMEM;
+ }
+ /*
+ * Basic structure initialization.
+ */
+ cam = kzalloc (sizeof(struct via_camera), GFP_KERNEL);
+ if (cam == NULL)
+ return -ENOMEM;
+ via_cam_info = cam;
+ cam->platdev = pdev;
+ cam->viadev = viadev;
+ cam->users = 0;
+ cam->owner = NULL;
+ cam->opstate = S_IDLE;
+ cam->user_format = cam->sensor_format = viacam_def_pix_format;
+ mutex_init(&cam->lock);
+ INIT_LIST_HEAD(&cam->buffer_queue);
+ cam->mmio = viadev->engine_mmio;
+ cam->fbmem = viadev->fbmem;
+ cam->fb_offset = viadev->camera_fbmem_offset;
+ cam->flags = 1 << CF_CONFIG_NEEDED;
+ cam->mbus_code = via_def_mbus_code;
+ /*
+ * Tell V4L that we exist.
+ */
+ ret = v4l2_device_register(&pdev->dev, &cam->v4l2_dev);
+ if (ret) {
+ dev_err(&pdev->dev, "Unable to register v4l2 device\n");
+ return ret;
+ }
+ /*
+ * Convince the system that we can do DMA.
+ */
+ pdev->dev.dma_mask = &viadev->pdev->dma_mask;
+ dma_set_mask(&pdev->dev, 0xffffffff);
+ /*
+ * Fire up the capture port. The write to 0x78 looks purely
+ * OLPCish; any system will need to tweak 0x1e.
+ */
+ via_write_reg_mask(VIASR, 0x78, 0, 0x80);
+ via_write_reg_mask(VIASR, 0x1e, 0xc0, 0xc0);
+ /*
+ * Get the sensor powered up.
+ */
+ ret = via_sensor_power_setup(cam);
+ if (ret)
+ goto out_unregister;
+ via_sensor_power_up(cam);
+
+ /*
+ * See if we can't find it on the bus. The VIA_PORT_31 assumption
+ * is OLPC-specific. 0x42 assumption is ov7670-specific.
+ */
+ sensor_adapter = viafb_find_i2c_adapter(VIA_PORT_31);
+ cam->sensor = v4l2_i2c_new_subdev(&cam->v4l2_dev, sensor_adapter,
+ "ov7670", 0x42 >> 1, NULL);
+ if (cam->sensor == NULL) {
+ dev_err(&pdev->dev, "Unable to find the sensor!\n");
+ ret = -ENODEV;
+ goto out_power_down;
+ }
+ /*
+ * Get the IRQ.
+ */
+ viacam_int_disable(cam);
+ ret = request_threaded_irq(viadev->pdev->irq, viacam_quick_irq,
+ viacam_irq, IRQF_SHARED, "via-camera", cam);
+ if (ret)
+ goto out_power_down;
+ /*
+ * Tell V4l2 that we exist.
+ */
+ cam->vdev = viacam_v4l_template;
+ cam->vdev.v4l2_dev = &cam->v4l2_dev;
+ ret = video_register_device(&cam->vdev, VFL_TYPE_GRABBER, -1);
+ if (ret)
+ goto out_irq;
+ video_set_drvdata(&cam->vdev, cam);
+
+ /* Power the sensor down until somebody opens the device */
+ via_sensor_power_down(cam);
+ return 0;
+
+out_irq:
+ free_irq(viadev->pdev->irq, cam);
+out_power_down:
+ via_sensor_power_release(cam);
+out_unregister:
+ v4l2_device_unregister(&cam->v4l2_dev);
+ return ret;
+}
+
+static __devexit int viacam_remove(struct platform_device *pdev)
+{
+ struct via_camera *cam = via_cam_info;
+ struct viafb_dev *viadev = pdev->dev.platform_data;
+
+ video_unregister_device(&cam->vdev);
+ v4l2_device_unregister(&cam->v4l2_dev);
+ free_irq(viadev->pdev->irq, cam);
+ via_sensor_power_release(cam);
+ via_cam_info = NULL;
+ return 0;
+}
+
+
+static struct platform_driver viacam_driver = {
+ .driver = {
+ .name = "viafb-camera",
+ },
+ .probe = viacam_probe,
+ .remove = viacam_remove,
+};
+
+
+#ifdef CONFIG_OLPC_XO_1_5
+/*
+ * The OLPC folks put the serial port on the same pin as
+ * the camera. They also get grumpy if we break the
+ * serial port and keep them from using it. So we have
+ * to check the serial enable bit and not step on it.
+ */
+#define VIACAM_SERIAL_DEVFN 0x88
+#define VIACAM_SERIAL_CREG 0x46
+#define VIACAM_SERIAL_BIT 0x40
+
+static __devinit int viacam_check_serial_port(void)
+{
+ struct pci_bus *pbus = pci_find_bus(0, 0);
+ u8 cbyte;
+
+ pci_bus_read_config_byte(pbus, VIACAM_SERIAL_DEVFN,
+ VIACAM_SERIAL_CREG, &cbyte);
+ if ((cbyte & VIACAM_SERIAL_BIT) == 0)
+ return 0; /* Not enabled */
+ if (override_serial == 0) {
+ printk(KERN_NOTICE "Via camera: serial port is enabled, " \
+ "refusing to load.\n");
+ printk(KERN_NOTICE "Specify override_serial=1 to force " \
+ "module loading.\n");
+ return -EBUSY;
+ }
+ printk(KERN_NOTICE "Via camera: overriding serial port\n");
+ pci_bus_write_config_byte(pbus, VIACAM_SERIAL_DEVFN,
+ VIACAM_SERIAL_CREG, cbyte & ~VIACAM_SERIAL_BIT);
+ return 0;
+}
+#endif
+
+
+
+
+static int viacam_init(void)
+{
+#ifdef CONFIG_OLPC_XO_1_5
+ if (viacam_check_serial_port())
+ return -EBUSY;
+#endif
+ return platform_driver_register(&viacam_driver);
+}
+module_init(viacam_init);
+
+static void viacam_exit(void)
+{
+ platform_driver_unregister(&viacam_driver);
+}
+module_exit(viacam_exit);
diff --git a/drivers/media/video/via-camera.h b/drivers/media/video/via-camera.h
new file mode 100644
index 000000000000..b12a4b3d616f
--- /dev/null
+++ b/drivers/media/video/via-camera.h
@@ -0,0 +1,93 @@
+/*
+ * VIA Camera register definitions.
+ */
+#define VCR_INTCTRL 0x300 /* Capture interrupt control */
+#define VCR_IC_EAV 0x0001 /* End of active video status */
+#define VCR_IC_EVBI 0x0002 /* End of VBI status */
+#define VCR_IC_FBOTFLD 0x0004 /* "flipping" Bottom field is active */
+#define VCR_IC_ACTBUF 0x0018 /* Active video buffer */
+#define VCR_IC_VSYNC 0x0020 /* 0 = VB, 1 = active video */
+#define VCR_IC_BOTFLD 0x0040 /* Bottom field is active */
+#define VCR_IC_FFULL 0x0080 /* FIFO full */
+#define VCR_IC_INTEN 0x0100 /* End of active video int. enable */
+#define VCR_IC_VBIINT 0x0200 /* End of VBI int enable */
+#define VCR_IC_VBIBUF 0x0400 /* Current VBI buffer */
+
+#define VCR_TSC 0x308 /* Transport stream control */
+#define VCR_TSC_ENABLE 0x000001 /* Transport stream input enable */
+#define VCR_TSC_DROPERR 0x000002 /* Drop error packets */
+#define VCR_TSC_METHOD 0x00000c /* DMA method (non-functional) */
+#define VCR_TSC_COUNT 0x07fff0 /* KByte or packet count */
+#define VCR_TSC_CBMODE 0x080000 /* Change buffer by byte count */
+#define VCR_TSC_PSSIG 0x100000 /* Packet starting signal disable */
+#define VCR_TSC_BE 0x200000 /* MSB first (serial mode) */
+#define VCR_TSC_SERIAL 0x400000 /* Serial input (0 = parallel) */
+
+#define VCR_CAPINTC 0x310 /* Capture interface control */
+#define VCR_CI_ENABLE 0x00000001 /* Capture enable */
+#define VCR_CI_BSS 0x00000002 /* WTF "bit stream selection" */
+#define VCR_CI_3BUFS 0x00000004 /* 1 = 3 buffers, 0 = 2 buffers */
+#define VCR_CI_VIPEN 0x00000008 /* VIP enable */
+#define VCR_CI_CCIR601_8 0 /* CCIR601 input stream, 8 bit */
+#define VCR_CI_CCIR656_8 0x00000010 /* ... CCIR656, 8 bit */
+#define VCR_CI_CCIR601_16 0x00000020 /* ... CCIR601, 16 bit */
+#define VCR_CI_CCIR656_16 0x00000030 /* ... CCIR656, 16 bit */
+#define VCR_CI_HDMODE 0x00000040 /* CCIR656-16 hdr decode mode; 1=16b */
+#define VCR_CI_BSWAP 0x00000080 /* Swap bytes (16-bit) */
+#define VCR_CI_YUYV 0 /* Byte order 0123 */
+#define VCR_CI_UYVY 0x00000100 /* Byte order 1032 */
+#define VCR_CI_YVYU 0x00000200 /* Byte order 0321 */
+#define VCR_CI_VYUY 0x00000300 /* Byte order 3012 */
+#define VCR_CI_VIPTYPE 0x00000400 /* VIP type */
+#define VCR_CI_IFSEN 0x00000800 /* Input field signal enable */
+#define VCR_CI_DIODD 0 /* De-interlace odd, 30fps */
+#define VCR_CI_DIEVEN 0x00001000 /* ...even field, 30fps */
+#define VCR_CI_DIBOTH 0x00002000 /* ...both fields, 60fps */
+#define VCR_CI_DIBOTH30 0x00003000 /* ...both fields, 30fps interlace */
+#define VCR_CI_CONVTYPE 0x00004000 /* 4:2:2 to 4:4:4; 1 = interpolate */
+#define VCR_CI_CFC 0x00008000 /* Capture flipping control */
+#define VCR_CI_FILTER 0x00070000 /* Horiz filter mode select
+ 000 = none
+ 001 = 2 tap
+ 010 = 3 tap
+ 011 = 4 tap
+ 100 = 5 tap */
+#define VCR_CI_CLKINV 0x00080000 /* Input CLK inverted */
+#define VCR_CI_VREFINV 0x00100000 /* VREF inverted */
+#define VCR_CI_HREFINV 0x00200000 /* HREF inverted */
+#define VCR_CI_FLDINV 0x00400000 /* Field inverted */
+#define VCR_CI_CLKPIN 0x00800000 /* Capture clock pin */
+#define VCR_CI_THRESH 0x0f000000 /* Capture fifo threshold */
+#define VCR_CI_HRLE 0x10000000 /* Positive edge of HREF */
+#define VCR_CI_VRLE 0x20000000 /* Positive edge of VREF */
+#define VCR_CI_OFLDINV 0x40000000 /* Field output inverted */
+#define VCR_CI_CLKEN 0x80000000 /* Capture clock enable */
+
+#define VCR_HORRANGE 0x314 /* Active video horizontal range */
+#define VCR_VERTRANGE 0x318 /* Active video vertical range */
+#define VCR_AVSCALE 0x31c /* Active video scaling control */
+#define VCR_AVS_HEN 0x00000800 /* Horizontal scale enable */
+#define VCR_AVS_VEN 0x04000000 /* Vertical enable */
+#define VCR_VBIHOR 0x320 /* VBI Data horizontal range */
+#define VCR_VBIVERT 0x324 /* VBI data vertical range */
+#define VCR_VBIBUF1 0x328 /* First VBI buffer */
+#define VCR_VBISTRIDE 0x32c /* VBI stride */
+#define VCR_ANCDATACNT 0x330 /* Ancillary data count setting */
+#define VCR_MAXDATA 0x334 /* Active data count of active video */
+#define VCR_MAXVBI 0x338 /* Maximum data count of VBI */
+#define VCR_CAPDATA 0x33c /* Capture data count */
+#define VCR_VBUF1 0x340 /* First video buffer */
+#define VCR_VBUF2 0x344 /* Second video buffer */
+#define VCR_VBUF3 0x348 /* Third video buffer */
+#define VCR_VBUF_MASK 0x1ffffff0 /* Bits 28:4 */
+#define VCR_VBIBUF2 0x34c /* Second VBI buffer */
+#define VCR_VSTRIDE 0x350 /* Stride of video + coring control */
+#define VCR_VS_STRIDE_SHIFT 4
+#define VCR_VS_STRIDE 0x00001ff0 /* Stride (8-byte units) */
+#define VCR_VS_CCD 0x007f0000 /* Coring compare data */
+#define VCR_VS_COREEN 0x00800000 /* Coring enable */
+#define VCR_TS0ERR 0x354 /* TS buffer 0 error indicator */
+#define VCR_TS1ERR 0x358 /* TS buffer 0 error indicator */
+#define VCR_TS2ERR 0x35c /* TS buffer 0 error indicator */
+
+/* Add 0x1000 for the second capture engine registers */
diff --git a/drivers/media/video/videobuf-core.c b/drivers/media/video/videobuf-core.c
index ce1595bef629..8979f91fa8e5 100644
--- a/drivers/media/video/videobuf-core.c
+++ b/drivers/media/video/videobuf-core.c
@@ -73,25 +73,46 @@ struct videobuf_buffer *videobuf_alloc_vb(struct videobuf_queue *q)
}
EXPORT_SYMBOL_GPL(videobuf_alloc_vb);
-#define WAITON_CONDITION (vb->state != VIDEOBUF_ACTIVE &&\
- vb->state != VIDEOBUF_QUEUED)
-int videobuf_waiton(struct videobuf_buffer *vb, int non_blocking, int intr)
+static int is_state_active_or_queued(struct videobuf_queue *q, struct videobuf_buffer *vb)
{
+ unsigned long flags;
+ bool rc;
+
+ spin_lock_irqsave(q->irqlock, flags);
+ rc = vb->state != VIDEOBUF_ACTIVE && vb->state != VIDEOBUF_QUEUED;
+ spin_unlock_irqrestore(q->irqlock, flags);
+ return rc;
+};
+
+int videobuf_waiton(struct videobuf_queue *q, struct videobuf_buffer *vb,
+ int non_blocking, int intr)
+{
+ bool is_ext_locked;
+ int ret = 0;
+
MAGIC_CHECK(vb->magic, MAGIC_BUFFER);
if (non_blocking) {
- if (WAITON_CONDITION)
+ if (is_state_active_or_queued(q, vb))
return 0;
- else
- return -EAGAIN;
+ return -EAGAIN;
}
+ is_ext_locked = q->ext_lock && mutex_is_locked(q->ext_lock);
+
+ /* Release vdev lock to prevent this wait from blocking outside access to
+ the device. */
+ if (is_ext_locked)
+ mutex_unlock(q->ext_lock);
if (intr)
- return wait_event_interruptible(vb->done, WAITON_CONDITION);
+ ret = wait_event_interruptible(vb->done, is_state_active_or_queued(q, vb));
else
- wait_event(vb->done, WAITON_CONDITION);
+ wait_event(vb->done, is_state_active_or_queued(q, vb));
+ /* Relock */
+ if (is_ext_locked)
+ mutex_lock(q->ext_lock);
- return 0;
+ return ret;
}
EXPORT_SYMBOL_GPL(videobuf_waiton);
@@ -125,11 +146,13 @@ void videobuf_queue_core_init(struct videobuf_queue *q,
enum v4l2_field field,
unsigned int msize,
void *priv,
- struct videobuf_qtype_ops *int_ops)
+ struct videobuf_qtype_ops *int_ops,
+ struct mutex *ext_lock)
{
BUG_ON(!q);
memset(q, 0, sizeof(*q));
q->irqlock = irqlock;
+ q->ext_lock = ext_lock;
q->dev = dev;
q->type = type;
q->field = field;
@@ -350,9 +373,9 @@ static void videobuf_status(struct videobuf_queue *q, struct v4l2_buffer *b,
int videobuf_mmap_free(struct videobuf_queue *q)
{
int ret;
- mutex_lock(&q->vb_lock);
+ videobuf_queue_lock(q);
ret = __videobuf_free(q);
- mutex_unlock(&q->vb_lock);
+ videobuf_queue_unlock(q);
return ret;
}
EXPORT_SYMBOL_GPL(videobuf_mmap_free);
@@ -407,9 +430,9 @@ int videobuf_mmap_setup(struct videobuf_queue *q,
enum v4l2_memory memory)
{
int ret;
- mutex_lock(&q->vb_lock);
+ videobuf_queue_lock(q);
ret = __videobuf_mmap_setup(q, bcount, bsize, memory);
- mutex_unlock(&q->vb_lock);
+ videobuf_queue_unlock(q);
return ret;
}
EXPORT_SYMBOL_GPL(videobuf_mmap_setup);
@@ -432,7 +455,7 @@ int videobuf_reqbufs(struct videobuf_queue *q,
return -EINVAL;
}
- mutex_lock(&q->vb_lock);
+ videobuf_queue_lock(q);
if (req->type != q->type) {
dprintk(1, "reqbufs: queue type invalid\n");
retval = -EINVAL;
@@ -469,7 +492,7 @@ int videobuf_reqbufs(struct videobuf_queue *q,
retval = 0;
done:
- mutex_unlock(&q->vb_lock);
+ videobuf_queue_unlock(q);
return retval;
}
EXPORT_SYMBOL_GPL(videobuf_reqbufs);
@@ -478,7 +501,7 @@ int videobuf_querybuf(struct videobuf_queue *q, struct v4l2_buffer *b)
{
int ret = -EINVAL;
- mutex_lock(&q->vb_lock);
+ videobuf_queue_lock(q);
if (unlikely(b->type != q->type)) {
dprintk(1, "querybuf: Wrong type.\n");
goto done;
@@ -496,7 +519,7 @@ int videobuf_querybuf(struct videobuf_queue *q, struct v4l2_buffer *b)
ret = 0;
done:
- mutex_unlock(&q->vb_lock);
+ videobuf_queue_unlock(q);
return ret;
}
EXPORT_SYMBOL_GPL(videobuf_querybuf);
@@ -513,7 +536,7 @@ int videobuf_qbuf(struct videobuf_queue *q, struct v4l2_buffer *b)
if (b->memory == V4L2_MEMORY_MMAP)
down_read(&current->mm->mmap_sem);
- mutex_lock(&q->vb_lock);
+ videobuf_queue_lock(q);
retval = -EBUSY;
if (q->reading) {
dprintk(1, "qbuf: Reading running...\n");
@@ -605,7 +628,7 @@ int videobuf_qbuf(struct videobuf_queue *q, struct v4l2_buffer *b)
wake_up_interruptible_sync(&q->wait);
done:
- mutex_unlock(&q->vb_lock);
+ videobuf_queue_unlock(q);
if (b->memory == V4L2_MEMORY_MMAP)
up_read(&current->mm->mmap_sem);
@@ -635,14 +658,14 @@ checks:
dprintk(2, "next_buffer: waiting on buffer\n");
/* Drop lock to avoid deadlock with qbuf */
- mutex_unlock(&q->vb_lock);
+ videobuf_queue_unlock(q);
/* Checking list_empty and streaming is safe without
* locks because we goto checks to validate while
* holding locks before proceeding */
retval = wait_event_interruptible(q->wait,
!list_empty(&q->stream) || !q->streaming);
- mutex_lock(&q->vb_lock);
+ videobuf_queue_lock(q);
if (retval)
goto done;
@@ -669,7 +692,7 @@ static int stream_next_buffer(struct videobuf_queue *q,
goto done;
buf = list_entry(q->stream.next, struct videobuf_buffer, stream);
- retval = videobuf_waiton(buf, nonblocking, 1);
+ retval = videobuf_waiton(q, buf, nonblocking, 1);
if (retval < 0)
goto done;
@@ -687,7 +710,7 @@ int videobuf_dqbuf(struct videobuf_queue *q,
MAGIC_CHECK(q->int_ops->magic, MAGIC_QTYPE_OPS);
memset(b, 0, sizeof(*b));
- mutex_lock(&q->vb_lock);
+ videobuf_queue_lock(q);
retval = stream_next_buffer(q, &buf, nonblocking);
if (retval < 0) {
@@ -713,7 +736,7 @@ int videobuf_dqbuf(struct videobuf_queue *q,
buf->state = VIDEOBUF_IDLE;
b->flags &= ~V4L2_BUF_FLAG_DONE;
done:
- mutex_unlock(&q->vb_lock);
+ videobuf_queue_unlock(q);
return retval;
}
EXPORT_SYMBOL_GPL(videobuf_dqbuf);
@@ -724,7 +747,7 @@ int videobuf_streamon(struct videobuf_queue *q)
unsigned long flags = 0;
int retval;
- mutex_lock(&q->vb_lock);
+ videobuf_queue_lock(q);
retval = -EBUSY;
if (q->reading)
goto done;
@@ -740,7 +763,7 @@ int videobuf_streamon(struct videobuf_queue *q)
wake_up_interruptible_sync(&q->wait);
done:
- mutex_unlock(&q->vb_lock);
+ videobuf_queue_unlock(q);
return retval;
}
EXPORT_SYMBOL_GPL(videobuf_streamon);
@@ -760,9 +783,9 @@ int videobuf_streamoff(struct videobuf_queue *q)
{
int retval;
- mutex_lock(&q->vb_lock);
+ videobuf_queue_lock(q);
retval = __videobuf_streamoff(q);
- mutex_unlock(&q->vb_lock);
+ videobuf_queue_unlock(q);
return retval;
}
@@ -797,7 +820,7 @@ static ssize_t videobuf_read_zerocopy(struct videobuf_queue *q,
spin_lock_irqsave(q->irqlock, flags);
q->ops->buf_queue(q, q->read_buf);
spin_unlock_irqrestore(q->irqlock, flags);
- retval = videobuf_waiton(q->read_buf, 0, 0);
+ retval = videobuf_waiton(q, q->read_buf, 0, 0);
if (0 == retval) {
CALL(q, sync, q, q->read_buf);
if (VIDEOBUF_ERROR == q->read_buf->state)
@@ -868,7 +891,7 @@ ssize_t videobuf_read_one(struct videobuf_queue *q,
MAGIC_CHECK(q->int_ops->magic, MAGIC_QTYPE_OPS);
- mutex_lock(&q->vb_lock);
+ videobuf_queue_lock(q);
q->ops->buf_setup(q, &nbufs, &size);
@@ -909,7 +932,7 @@ ssize_t videobuf_read_one(struct videobuf_queue *q,
}
/* wait until capture is done */
- retval = videobuf_waiton(q->read_buf, nonblocking, 1);
+ retval = videobuf_waiton(q, q->read_buf, nonblocking, 1);
if (0 != retval)
goto done;
@@ -938,7 +961,7 @@ ssize_t videobuf_read_one(struct videobuf_queue *q,
}
done:
- mutex_unlock(&q->vb_lock);
+ videobuf_queue_unlock(q);
return retval;
}
EXPORT_SYMBOL_GPL(videobuf_read_one);
@@ -999,9 +1022,9 @@ int videobuf_read_start(struct videobuf_queue *q)
{
int rc;
- mutex_lock(&q->vb_lock);
+ videobuf_queue_lock(q);
rc = __videobuf_read_start(q);
- mutex_unlock(&q->vb_lock);
+ videobuf_queue_unlock(q);
return rc;
}
@@ -1009,15 +1032,15 @@ EXPORT_SYMBOL_GPL(videobuf_read_start);
void videobuf_read_stop(struct videobuf_queue *q)
{
- mutex_lock(&q->vb_lock);
+ videobuf_queue_lock(q);
__videobuf_read_stop(q);
- mutex_unlock(&q->vb_lock);
+ videobuf_queue_unlock(q);
}
EXPORT_SYMBOL_GPL(videobuf_read_stop);
void videobuf_stop(struct videobuf_queue *q)
{
- mutex_lock(&q->vb_lock);
+ videobuf_queue_lock(q);
if (q->streaming)
__videobuf_streamoff(q);
@@ -1025,7 +1048,7 @@ void videobuf_stop(struct videobuf_queue *q)
if (q->reading)
__videobuf_read_stop(q);
- mutex_unlock(&q->vb_lock);
+ videobuf_queue_unlock(q);
}
EXPORT_SYMBOL_GPL(videobuf_stop);
@@ -1039,7 +1062,7 @@ ssize_t videobuf_read_stream(struct videobuf_queue *q,
MAGIC_CHECK(q->int_ops->magic, MAGIC_QTYPE_OPS);
dprintk(2, "%s\n", __func__);
- mutex_lock(&q->vb_lock);
+ videobuf_queue_lock(q);
retval = -EBUSY;
if (q->streaming)
goto done;
@@ -1059,7 +1082,7 @@ ssize_t videobuf_read_stream(struct videobuf_queue *q,
list_del(&q->read_buf->stream);
q->read_off = 0;
}
- rc = videobuf_waiton(q->read_buf, nonblocking, 1);
+ rc = videobuf_waiton(q, q->read_buf, nonblocking, 1);
if (rc < 0) {
if (0 == retval)
retval = rc;
@@ -1097,7 +1120,7 @@ ssize_t videobuf_read_stream(struct videobuf_queue *q,
}
done:
- mutex_unlock(&q->vb_lock);
+ videobuf_queue_unlock(q);
return retval;
}
EXPORT_SYMBOL_GPL(videobuf_read_stream);
@@ -1109,7 +1132,7 @@ unsigned int videobuf_poll_stream(struct file *file,
struct videobuf_buffer *buf = NULL;
unsigned int rc = 0;
- mutex_lock(&q->vb_lock);
+ videobuf_queue_lock(q);
if (q->streaming) {
if (!list_empty(&q->stream))
buf = list_entry(q->stream.next,
@@ -1147,7 +1170,7 @@ unsigned int videobuf_poll_stream(struct file *file,
}
}
}
- mutex_unlock(&q->vb_lock);
+ videobuf_queue_unlock(q);
return rc;
}
EXPORT_SYMBOL_GPL(videobuf_poll_stream);
@@ -1164,7 +1187,7 @@ int videobuf_mmap_mapper(struct videobuf_queue *q, struct vm_area_struct *vma)
return -EINVAL;
}
- mutex_lock(&q->vb_lock);
+ videobuf_queue_lock(q);
for (i = 0; i < VIDEO_MAX_FRAME; i++) {
struct videobuf_buffer *buf = q->bufs[i];
@@ -1174,7 +1197,7 @@ int videobuf_mmap_mapper(struct videobuf_queue *q, struct vm_area_struct *vma)
break;
}
}
- mutex_unlock(&q->vb_lock);
+ videobuf_queue_unlock(q);
return rc;
}
diff --git a/drivers/media/video/videobuf-dma-contig.c b/drivers/media/video/videobuf-dma-contig.c
index 6ff9e4bac3ea..c9691115f2d2 100644
--- a/drivers/media/video/videobuf-dma-contig.c
+++ b/drivers/media/video/videobuf-dma-contig.c
@@ -28,7 +28,6 @@ struct videobuf_dma_contig_memory {
void *vaddr;
dma_addr_t dma_handle;
unsigned long size;
- int is_userptr;
};
#define MAGIC_DC_MEM 0x0733ac61
@@ -63,7 +62,7 @@ static void videobuf_vm_close(struct vm_area_struct *vma)
struct videobuf_dma_contig_memory *mem;
dev_dbg(q->dev, "munmap %p q=%p\n", map, q);
- mutex_lock(&q->vb_lock);
+ videobuf_queue_lock(q);
/* We need first to cancel streams, before unmapping */
if (q->streaming)
@@ -103,7 +102,7 @@ static void videobuf_vm_close(struct vm_area_struct *vma)
kfree(map);
- mutex_unlock(&q->vb_lock);
+ videobuf_queue_unlock(q);
}
}
@@ -120,7 +119,6 @@ static const struct vm_operations_struct videobuf_vm_ops = {
*/
static void videobuf_dma_contig_user_put(struct videobuf_dma_contig_memory *mem)
{
- mem->is_userptr = 0;
mem->dma_handle = 0;
mem->size = 0;
}
@@ -147,7 +145,6 @@ static int videobuf_dma_contig_user_get(struct videobuf_dma_contig_memory *mem,
offset = vb->baddr & ~PAGE_MASK;
mem->size = PAGE_ALIGN(vb->size + offset);
- mem->is_userptr = 0;
ret = -EINVAL;
down_read(&mm->mmap_sem);
@@ -181,9 +178,6 @@ static int videobuf_dma_contig_user_get(struct videobuf_dma_contig_memory *mem,
pages_done++;
}
- if (!ret)
- mem->is_userptr = 1;
-
out_up:
up_read(&current->mm->mmap_sem);
@@ -349,10 +343,11 @@ void videobuf_queue_dma_contig_init(struct videobuf_queue *q,
enum v4l2_buf_type type,
enum v4l2_field field,
unsigned int msize,
- void *priv)
+ void *priv,
+ struct mutex *ext_lock)
{
videobuf_queue_core_init(q, ops, dev, irqlock, type, field, msize,
- priv, &qops);
+ priv, &qops, ext_lock);
}
EXPORT_SYMBOL_GPL(videobuf_queue_dma_contig_init);
diff --git a/drivers/media/video/videobuf-dma-sg.c b/drivers/media/video/videobuf-dma-sg.c
index 2ad0bc252b0e..20f227ee2b3e 100644
--- a/drivers/media/video/videobuf-dma-sg.c
+++ b/drivers/media/video/videobuf-dma-sg.c
@@ -116,8 +116,8 @@ static struct scatterlist *videobuf_pages_to_sg(struct page **pages,
goto nopage;
if (PageHighMem(pages[i]))
goto highmem;
- sg_set_page(&sglist[i], pages[i], min(PAGE_SIZE, size), 0);
- size -= min(PAGE_SIZE, size);
+ sg_set_page(&sglist[i], pages[i], min_t(size_t, PAGE_SIZE, size), 0);
+ size -= min_t(size_t, PAGE_SIZE, size);
}
return sglist;
@@ -358,7 +358,7 @@ static void videobuf_vm_close(struct vm_area_struct *vma)
map->count--;
if (0 == map->count) {
dprintk(1, "munmap %p q=%p\n", map, q);
- mutex_lock(&q->vb_lock);
+ videobuf_queue_lock(q);
for (i = 0; i < VIDEO_MAX_FRAME; i++) {
if (NULL == q->bufs[i])
continue;
@@ -374,7 +374,7 @@ static void videobuf_vm_close(struct vm_area_struct *vma)
q->bufs[i]->baddr = 0;
q->ops->buf_release(q, q->bufs[i]);
}
- mutex_unlock(&q->vb_lock);
+ videobuf_queue_unlock(q);
kfree(map);
}
return;
@@ -654,10 +654,11 @@ void videobuf_queue_sg_init(struct videobuf_queue *q,
enum v4l2_buf_type type,
enum v4l2_field field,
unsigned int msize,
- void *priv)
+ void *priv,
+ struct mutex *ext_lock)
{
videobuf_queue_core_init(q, ops, dev, irqlock, type, field, msize,
- priv, &sg_ops);
+ priv, &sg_ops, ext_lock);
}
EXPORT_SYMBOL_GPL(videobuf_queue_sg_init);
diff --git a/drivers/media/video/videobuf-dvb.c b/drivers/media/video/videobuf-dvb.c
index 3f76398968b8..3de7c7e4402d 100644
--- a/drivers/media/video/videobuf-dvb.c
+++ b/drivers/media/video/videobuf-dvb.c
@@ -57,7 +57,7 @@ static int videobuf_dvb_thread(void *data)
buf = list_entry(dvb->dvbq.stream.next,
struct videobuf_buffer, stream);
list_del(&buf->stream);
- err = videobuf_waiton(buf,0,1);
+ err = videobuf_waiton(&dvb->dvbq, buf, 0, 1);
/* no more feeds left or stop_feed() asked us to quit */
if (0 == dvb->nfeeds)
diff --git a/drivers/media/video/videobuf-vmalloc.c b/drivers/media/video/videobuf-vmalloc.c
index e7fe31d54f07..df142580e44c 100644
--- a/drivers/media/video/videobuf-vmalloc.c
+++ b/drivers/media/video/videobuf-vmalloc.c
@@ -75,7 +75,7 @@ static void videobuf_vm_close(struct vm_area_struct *vma)
struct videobuf_vmalloc_memory *mem;
dprintk(1, "munmap %p q=%p\n", map, q);
- mutex_lock(&q->vb_lock);
+ videobuf_queue_lock(q);
/* We need first to cancel streams, before unmapping */
if (q->streaming)
@@ -114,7 +114,7 @@ static void videobuf_vm_close(struct vm_area_struct *vma)
kfree(map);
- mutex_unlock(&q->vb_lock);
+ videobuf_queue_unlock(q);
}
return;
@@ -304,10 +304,11 @@ void videobuf_queue_vmalloc_init(struct videobuf_queue *q,
enum v4l2_buf_type type,
enum v4l2_field field,
unsigned int msize,
- void *priv)
+ void *priv,
+ struct mutex *ext_lock)
{
videobuf_queue_core_init(q, ops, dev, irqlock, type, field, msize,
- priv, &qops);
+ priv, &qops, ext_lock);
}
EXPORT_SYMBOL_GPL(videobuf_queue_vmalloc_init);
diff --git a/drivers/media/video/vino.c b/drivers/media/video/vino.c
index 3eb15f72ac09..7e7eec48f8b1 100644
--- a/drivers/media/video/vino.c
+++ b/drivers/media/video/vino.c
@@ -4334,10 +4334,10 @@ static int __init vino_module_init(void)
vino_drvdata->decoder =
v4l2_i2c_new_subdev(&vino_drvdata->v4l2_dev, &vino_i2c_adapter,
- "saa7191", "saa7191", 0, I2C_ADDRS(0x45));
+ "saa7191", 0, I2C_ADDRS(0x45));
vino_drvdata->camera =
v4l2_i2c_new_subdev(&vino_drvdata->v4l2_dev, &vino_i2c_adapter,
- "indycam", "indycam", 0, I2C_ADDRS(0x2b));
+ "indycam", 0, I2C_ADDRS(0x2b));
dprintk("init complete!\n");
diff --git a/drivers/media/video/vivi.c b/drivers/media/video/vivi.c
index e17b6fee046b..9797e5a69265 100644
--- a/drivers/media/video/vivi.c
+++ b/drivers/media/video/vivi.c
@@ -820,14 +820,11 @@ static int vidioc_s_fmt_vid_cap(struct file *file, void *priv,
struct v4l2_format *f)
{
struct vivi_dev *dev = video_drvdata(file);
- struct videobuf_queue *q = &dev->vb_vidq;
int ret = vidioc_try_fmt_vid_cap(file, priv, f);
if (ret < 0)
return ret;
- mutex_lock(&q->vb_lock);
-
if (vivi_is_generating(dev)) {
dprintk(dev, 1, "%s device busy\n", __func__);
ret = -EBUSY;
@@ -840,7 +837,6 @@ static int vidioc_s_fmt_vid_cap(struct file *file, void *priv,
dev->vb_vidq.field = f->fmt.pix.field;
ret = 0;
out:
- mutex_unlock(&q->vb_lock);
return ret;
}
@@ -1086,7 +1082,7 @@ static const struct v4l2_file_operations vivi_fops = {
.release = vivi_close,
.read = vivi_read,
.poll = vivi_poll,
- .ioctl = video_ioctl2, /* V4L2 ioctl handler */
+ .unlocked_ioctl = video_ioctl2, /* V4L2 ioctl handler */
.mmap = vivi_mmap,
};
@@ -1173,19 +1169,19 @@ static int __init vivi_create_instance(int inst)
dev->saturation = 127;
dev->hue = 0;
+ /* initialize locks */
+ spin_lock_init(&dev->slock);
+ mutex_init(&dev->mutex);
+
videobuf_queue_vmalloc_init(&dev->vb_vidq, &vivi_video_qops,
NULL, &dev->slock, V4L2_BUF_TYPE_VIDEO_CAPTURE,
V4L2_FIELD_INTERLACED,
- sizeof(struct vivi_buffer), dev);
+ sizeof(struct vivi_buffer), dev, &dev->mutex);
/* init video dma queues */
INIT_LIST_HEAD(&dev->vidq.active);
init_waitqueue_head(&dev->vidq.wq);
- /* initialize locks */
- spin_lock_init(&dev->slock);
- mutex_init(&dev->mutex);
-
ret = -ENOMEM;
vfd = video_device_alloc();
if (!vfd)
@@ -1194,6 +1190,7 @@ static int __init vivi_create_instance(int inst)
*vfd = vivi_template;
vfd->debug = debug;
vfd->v4l2_dev = &dev->v4l2_dev;
+ vfd->lock = &dev->mutex;
ret = video_register_device(vfd, VFL_TYPE_GRABBER, video_nr);
if (ret < 0)
diff --git a/drivers/media/video/vp27smpx.c b/drivers/media/video/vp27smpx.c
index ca8303bd2401..c15efb6e7771 100644
--- a/drivers/media/video/vp27smpx.c
+++ b/drivers/media/video/vp27smpx.c
@@ -27,11 +27,9 @@
#include <linux/ioctl.h>
#include <asm/uaccess.h>
#include <linux/i2c.h>
-#include <linux/i2c-id.h>
#include <linux/videodev2.h>
#include <media/v4l2-device.h>
#include <media/v4l2-chip-ident.h>
-#include <media/v4l2-i2c-drv.h>
MODULE_DESCRIPTION("vp27smpx driver");
MODULE_AUTHOR("Hans Verkuil");
@@ -200,9 +198,25 @@ static const struct i2c_device_id vp27smpx_id[] = {
};
MODULE_DEVICE_TABLE(i2c, vp27smpx_id);
-static struct v4l2_i2c_driver_data v4l2_i2c_data = {
- .name = "vp27smpx",
- .probe = vp27smpx_probe,
- .remove = vp27smpx_remove,
- .id_table = vp27smpx_id,
+static struct i2c_driver vp27smpx_driver = {
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = "vp27smpx",
+ },
+ .probe = vp27smpx_probe,
+ .remove = vp27smpx_remove,
+ .id_table = vp27smpx_id,
};
+
+static __init int init_vp27smpx(void)
+{
+ return i2c_add_driver(&vp27smpx_driver);
+}
+
+static __exit void exit_vp27smpx(void)
+{
+ i2c_del_driver(&vp27smpx_driver);
+}
+
+module_init(init_vp27smpx);
+module_exit(exit_vp27smpx);
diff --git a/drivers/media/video/vpx3220.c b/drivers/media/video/vpx3220.c
index 77ebcea7c3da..91a01b3cdf8c 100644
--- a/drivers/media/video/vpx3220.c
+++ b/drivers/media/video/vpx3220.c
@@ -28,7 +28,6 @@
#include <linux/videodev2.h>
#include <media/v4l2-device.h>
#include <media/v4l2-chip-ident.h>
-#include <media/v4l2-i2c-drv.h>
MODULE_DESCRIPTION("vpx3220a/vpx3216b/vpx3214c video decoder driver");
MODULE_AUTHOR("Laurent Pinchart");
@@ -614,9 +613,25 @@ static const struct i2c_device_id vpx3220_id[] = {
};
MODULE_DEVICE_TABLE(i2c, vpx3220_id);
-static struct v4l2_i2c_driver_data v4l2_i2c_data = {
- .name = "vpx3220",
- .probe = vpx3220_probe,
- .remove = vpx3220_remove,
- .id_table = vpx3220_id,
+static struct i2c_driver vpx3220_driver = {
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = "vpx3220",
+ },
+ .probe = vpx3220_probe,
+ .remove = vpx3220_remove,
+ .id_table = vpx3220_id,
};
+
+static __init int init_vpx3220(void)
+{
+ return i2c_add_driver(&vpx3220_driver);
+}
+
+static __exit void exit_vpx3220(void)
+{
+ i2c_del_driver(&vpx3220_driver);
+}
+
+module_init(init_vpx3220);
+module_exit(exit_vpx3220);
diff --git a/drivers/media/video/wm8739.c b/drivers/media/video/wm8739.c
index d5965543ecab..a22f765e968a 100644
--- a/drivers/media/video/wm8739.c
+++ b/drivers/media/video/wm8739.c
@@ -30,7 +30,6 @@
#include <linux/videodev2.h>
#include <media/v4l2-device.h>
#include <media/v4l2-chip-ident.h>
-#include <media/v4l2-i2c-drv.h>
#include <media/v4l2-ctrls.h>
MODULE_DESCRIPTION("wm8739 driver");
@@ -282,9 +281,25 @@ static const struct i2c_device_id wm8739_id[] = {
};
MODULE_DEVICE_TABLE(i2c, wm8739_id);
-static struct v4l2_i2c_driver_data v4l2_i2c_data = {
- .name = "wm8739",
- .probe = wm8739_probe,
- .remove = wm8739_remove,
- .id_table = wm8739_id,
+static struct i2c_driver wm8739_driver = {
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = "wm8739",
+ },
+ .probe = wm8739_probe,
+ .remove = wm8739_remove,
+ .id_table = wm8739_id,
};
+
+static __init int init_wm8739(void)
+{
+ return i2c_add_driver(&wm8739_driver);
+}
+
+static __exit void exit_wm8739(void)
+{
+ i2c_del_driver(&wm8739_driver);
+}
+
+module_init(init_wm8739);
+module_exit(exit_wm8739);
diff --git a/drivers/media/video/wm8775.c b/drivers/media/video/wm8775.c
index 23bad3fd6dc5..135525649086 100644
--- a/drivers/media/video/wm8775.c
+++ b/drivers/media/video/wm8775.c
@@ -31,12 +31,11 @@
#include <linux/ioctl.h>
#include <asm/uaccess.h>
#include <linux/i2c.h>
-#include <linux/i2c-id.h>
#include <linux/videodev2.h>
#include <media/v4l2-device.h>
#include <media/v4l2-chip-ident.h>
#include <media/v4l2-ctrls.h>
-#include <media/v4l2-i2c-drv.h>
+#include <media/wm8775.h>
MODULE_DESCRIPTION("wm8775 driver");
MODULE_AUTHOR("Ulf Eklund, Hans Verkuil");
@@ -52,10 +51,16 @@ enum {
TOT_REGS
};
+#define ALC_HOLD 0x85 /* R17: use zero cross detection, ALC hold time 42.6 ms */
+#define ALC_EN 0x100 /* R17: ALC enable */
+
struct wm8775_state {
struct v4l2_subdev sd;
struct v4l2_ctrl_handler hdl;
struct v4l2_ctrl *mute;
+ struct v4l2_ctrl *vol;
+ struct v4l2_ctrl *bal;
+ struct v4l2_ctrl *loud;
u8 input; /* Last selected input (0-0xf) */
};
@@ -87,6 +92,30 @@ static int wm8775_write(struct v4l2_subdev *sd, int reg, u16 val)
return -1;
}
+static void wm8775_set_audio(struct v4l2_subdev *sd, int quietly)
+{
+ struct wm8775_state *state = to_state(sd);
+ u8 vol_l, vol_r;
+ int muted = 0 != state->mute->val;
+ u16 volume = (u16)state->vol->val;
+ u16 balance = (u16)state->bal->val;
+
+ /* normalize ( 65535 to 0 -> 255 to 0 (+24dB to -103dB) ) */
+ vol_l = (min(65536 - balance, 32768) * volume) >> 23;
+ vol_r = (min(balance, (u16)32768) * volume) >> 23;
+
+ /* Mute */
+ if (muted || quietly)
+ wm8775_write(sd, R21, 0x0c0 | state->input);
+
+ wm8775_write(sd, R14, vol_l | 0x100); /* 0x100= Left channel ADC zero cross enable */
+ wm8775_write(sd, R15, vol_r | 0x100); /* 0x100= Right channel ADC zero cross enable */
+
+ /* Un-mute */
+ if (!muted)
+ wm8775_write(sd, R21, state->input);
+}
+
static int wm8775_s_routing(struct v4l2_subdev *sd,
u32 input, u32 output, u32 config)
{
@@ -104,25 +133,26 @@ static int wm8775_s_routing(struct v4l2_subdev *sd,
state->input = input;
if (!v4l2_ctrl_g_ctrl(state->mute))
return 0;
- wm8775_write(sd, R21, 0x0c0);
- wm8775_write(sd, R14, 0x1d4);
- wm8775_write(sd, R15, 0x1d4);
- wm8775_write(sd, R21, 0x100 + state->input);
+ if (!v4l2_ctrl_g_ctrl(state->vol))
+ return 0;
+ if (!v4l2_ctrl_g_ctrl(state->bal))
+ return 0;
+ wm8775_set_audio(sd, 1);
return 0;
}
static int wm8775_s_ctrl(struct v4l2_ctrl *ctrl)
{
struct v4l2_subdev *sd = to_sd(ctrl);
- struct wm8775_state *state = to_state(sd);
switch (ctrl->id) {
case V4L2_CID_AUDIO_MUTE:
- wm8775_write(sd, R21, 0x0c0);
- wm8775_write(sd, R14, 0x1d4);
- wm8775_write(sd, R15, 0x1d4);
- if (!ctrl->val)
- wm8775_write(sd, R21, 0x100 + state->input);
+ case V4L2_CID_AUDIO_VOLUME:
+ case V4L2_CID_AUDIO_BALANCE:
+ wm8775_set_audio(sd, 0);
+ return 0;
+ case V4L2_CID_AUDIO_LOUDNESS:
+ wm8775_write(sd, R17, (ctrl->val ? ALC_EN : 0) | ALC_HOLD);
return 0;
}
return -EINVAL;
@@ -146,16 +176,7 @@ static int wm8775_log_status(struct v4l2_subdev *sd)
static int wm8775_s_frequency(struct v4l2_subdev *sd, struct v4l2_frequency *freq)
{
- struct wm8775_state *state = to_state(sd);
-
- /* If I remove this, then it can happen that I have no
- sound the first time I tune from static to a valid channel.
- It's difficult to reproduce and is almost certainly related
- to the zero cross detect circuit. */
- wm8775_write(sd, R21, 0x0c0);
- wm8775_write(sd, R14, 0x1d4);
- wm8775_write(sd, R15, 0x1d4);
- wm8775_write(sd, R21, 0x100 + state->input);
+ wm8775_set_audio(sd, 0);
return 0;
}
@@ -205,6 +226,7 @@ static int wm8775_probe(struct i2c_client *client,
{
struct wm8775_state *state;
struct v4l2_subdev *sd;
+ int err;
/* Check if the adapter supports the needed features */
if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_BYTE_DATA))
@@ -218,15 +240,21 @@ static int wm8775_probe(struct i2c_client *client,
return -ENOMEM;
sd = &state->sd;
v4l2_i2c_subdev_init(sd, client, &wm8775_ops);
+ sd->grp_id = WM8775_GID; /* subdev group id */
state->input = 2;
- v4l2_ctrl_handler_init(&state->hdl, 1);
+ v4l2_ctrl_handler_init(&state->hdl, 4);
state->mute = v4l2_ctrl_new_std(&state->hdl, &wm8775_ctrl_ops,
V4L2_CID_AUDIO_MUTE, 0, 1, 1, 0);
+ state->vol = v4l2_ctrl_new_std(&state->hdl, &wm8775_ctrl_ops,
+ V4L2_CID_AUDIO_VOLUME, 0, 65535, (65535+99)/100, 0xCF00); /* 0dB*/
+ state->bal = v4l2_ctrl_new_std(&state->hdl, &wm8775_ctrl_ops,
+ V4L2_CID_AUDIO_BALANCE, 0, 65535, (65535+99)/100, 32768);
+ state->loud = v4l2_ctrl_new_std(&state->hdl, &wm8775_ctrl_ops,
+ V4L2_CID_AUDIO_LOUDNESS, 0, 1, 1, 1);
sd->ctrl_handler = &state->hdl;
- if (state->hdl.error) {
- int err = state->hdl.error;
-
+ err = state->hdl.error;
+ if (err) {
v4l2_ctrl_handler_free(&state->hdl);
kfree(state);
return err;
@@ -238,29 +266,25 @@ static int wm8775_probe(struct i2c_client *client,
wm8775_write(sd, R23, 0x000);
/* Disable zero cross detect timeout */
wm8775_write(sd, R7, 0x000);
- /* Left justified, 24-bit mode */
- wm8775_write(sd, R11, 0x021);
+ /* HPF enable, I2S mode, 24-bit */
+ wm8775_write(sd, R11, 0x022);
/* Master mode, clock ratio 256fs */
wm8775_write(sd, R12, 0x102);
/* Powered up */
wm8775_write(sd, R13, 0x000);
- /* ADC gain +2.5dB, enable zero cross */
- wm8775_write(sd, R14, 0x1d4);
- /* ADC gain +2.5dB, enable zero cross */
- wm8775_write(sd, R15, 0x1d4);
- /* ALC Stereo, ALC target level -1dB FS max gain +8dB */
- wm8775_write(sd, R16, 0x1bf);
- /* Enable gain control, use zero cross detection,
- ALC hold time 42.6 ms */
- wm8775_write(sd, R17, 0x185);
+ /* ALC stereo, ALC target level -5dB FS, ALC max gain +8dB */
+ wm8775_write(sd, R16, 0x1bb);
+ /* Set ALC mode and hold time */
+ wm8775_write(sd, R17, (state->loud->val ? ALC_EN : 0) | ALC_HOLD);
/* ALC gain ramp up delay 34 s, ALC gain ramp down delay 33 ms */
wm8775_write(sd, R18, 0x0a2);
/* Enable noise gate, threshold -72dBfs */
wm8775_write(sd, R19, 0x005);
- /* Transient window 4ms, lower PGA gain limit -1dB */
- wm8775_write(sd, R20, 0x07a);
- /* LRBOTH = 1, use input 2. */
- wm8775_write(sd, R21, 0x102);
+ /* Transient window 4ms, ALC min gain -5dB */
+ wm8775_write(sd, R20, 0x0fb);
+
+ wm8775_set_audio(sd, 1); /* set volume/mute/mux */
+
return 0;
}
@@ -281,9 +305,25 @@ static const struct i2c_device_id wm8775_id[] = {
};
MODULE_DEVICE_TABLE(i2c, wm8775_id);
-static struct v4l2_i2c_driver_data v4l2_i2c_data = {
- .name = "wm8775",
- .probe = wm8775_probe,
- .remove = wm8775_remove,
- .id_table = wm8775_id,
+static struct i2c_driver wm8775_driver = {
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = "wm8775",
+ },
+ .probe = wm8775_probe,
+ .remove = wm8775_remove,
+ .id_table = wm8775_id,
};
+
+static __init int init_wm8775(void)
+{
+ return i2c_add_driver(&wm8775_driver);
+}
+
+static __exit void exit_wm8775(void)
+{
+ i2c_del_driver(&wm8775_driver);
+}
+
+module_init(init_wm8775);
+module_exit(exit_wm8775);
diff --git a/drivers/media/video/zoran/zoran.h b/drivers/media/video/zoran/zoran.h
index 307e847fe1cd..27f05551183f 100644
--- a/drivers/media/video/zoran/zoran.h
+++ b/drivers/media/video/zoran/zoran.h
@@ -341,10 +341,8 @@ struct card_info {
enum card_type type;
char name[32];
const char *i2c_decoder; /* i2c decoder device */
- const char *mod_decoder; /* i2c decoder module */
const unsigned short *addrs_decoder;
const char *i2c_encoder; /* i2c encoder device */
- const char *mod_encoder; /* i2c encoder module */
const unsigned short *addrs_encoder;
u16 video_vfe, video_codec; /* videocodec types */
u16 audio_chip; /* audio type */
@@ -390,6 +388,7 @@ struct zoran {
struct videocodec *vfe; /* video front end */
struct mutex resource_lock; /* prevent evil stuff */
+ struct mutex other_lock; /* please merge with above */
u8 initialized; /* flag if zoran has been correctly initialized */
int user; /* number of current users */
diff --git a/drivers/media/video/zoran/zoran_card.c b/drivers/media/video/zoran/zoran_card.c
index bfcd3aef50f9..e520abf9f4c3 100644
--- a/drivers/media/video/zoran/zoran_card.c
+++ b/drivers/media/video/zoran/zoran_card.c
@@ -379,7 +379,6 @@ static struct card_info zoran_cards[NUM_CARDS] __devinitdata = {
.type = DC10_old,
.name = "DC10(old)",
.i2c_decoder = "vpx3220a",
- .mod_decoder = "vpx3220",
.addrs_decoder = vpx3220_addrs,
.video_codec = CODEC_TYPE_ZR36050,
.video_vfe = CODEC_TYPE_ZR36016,
@@ -409,10 +408,8 @@ static struct card_info zoran_cards[NUM_CARDS] __devinitdata = {
.type = DC10_new,
.name = "DC10(new)",
.i2c_decoder = "saa7110",
- .mod_decoder = "saa7110",
.addrs_decoder = saa7110_addrs,
.i2c_encoder = "adv7175",
- .mod_encoder = "adv7175",
.addrs_encoder = adv717x_addrs,
.video_codec = CODEC_TYPE_ZR36060,
@@ -440,10 +437,8 @@ static struct card_info zoran_cards[NUM_CARDS] __devinitdata = {
.type = DC10plus,
.name = "DC10plus",
.i2c_decoder = "saa7110",
- .mod_decoder = "saa7110",
.addrs_decoder = saa7110_addrs,
.i2c_encoder = "adv7175",
- .mod_encoder = "adv7175",
.addrs_encoder = adv717x_addrs,
.video_codec = CODEC_TYPE_ZR36060,
@@ -472,10 +467,8 @@ static struct card_info zoran_cards[NUM_CARDS] __devinitdata = {
.type = DC30,
.name = "DC30",
.i2c_decoder = "vpx3220a",
- .mod_decoder = "vpx3220",
.addrs_decoder = vpx3220_addrs,
.i2c_encoder = "adv7175",
- .mod_encoder = "adv7175",
.addrs_encoder = adv717x_addrs,
.video_codec = CODEC_TYPE_ZR36050,
.video_vfe = CODEC_TYPE_ZR36016,
@@ -505,10 +498,8 @@ static struct card_info zoran_cards[NUM_CARDS] __devinitdata = {
.type = DC30plus,
.name = "DC30plus",
.i2c_decoder = "vpx3220a",
- .mod_decoder = "vpx3220",
.addrs_decoder = vpx3220_addrs,
.i2c_encoder = "adv7175",
- .mod_encoder = "adv7175",
.addrs_encoder = adv717x_addrs,
.video_codec = CODEC_TYPE_ZR36050,
.video_vfe = CODEC_TYPE_ZR36016,
@@ -538,10 +529,8 @@ static struct card_info zoran_cards[NUM_CARDS] __devinitdata = {
.type = LML33,
.name = "LML33",
.i2c_decoder = "bt819a",
- .mod_decoder = "bt819",
.addrs_decoder = bt819_addrs,
.i2c_encoder = "bt856",
- .mod_encoder = "bt856",
.addrs_encoder = bt856_addrs,
.video_codec = CODEC_TYPE_ZR36060,
@@ -569,10 +558,8 @@ static struct card_info zoran_cards[NUM_CARDS] __devinitdata = {
.type = LML33R10,
.name = "LML33R10",
.i2c_decoder = "saa7114",
- .mod_decoder = "saa7115",
.addrs_decoder = saa7114_addrs,
.i2c_encoder = "adv7170",
- .mod_encoder = "adv7170",
.addrs_encoder = adv717x_addrs,
.video_codec = CODEC_TYPE_ZR36060,
@@ -600,10 +587,8 @@ static struct card_info zoran_cards[NUM_CARDS] __devinitdata = {
.type = BUZ,
.name = "Buz",
.i2c_decoder = "saa7111",
- .mod_decoder = "saa7115",
.addrs_decoder = saa7111_addrs,
.i2c_encoder = "saa7185",
- .mod_encoder = "saa7185",
.addrs_encoder = saa7185_addrs,
.video_codec = CODEC_TYPE_ZR36060,
@@ -633,10 +618,8 @@ static struct card_info zoran_cards[NUM_CARDS] __devinitdata = {
/* AverMedia chose not to brand the 6-Eyes. Thus it
can't be autodetected, and requires card=x. */
.i2c_decoder = "ks0127",
- .mod_decoder = "ks0127",
.addrs_decoder = ks0127_addrs,
.i2c_encoder = "bt866",
- .mod_encoder = "bt866",
.addrs_encoder = bt866_addrs,
.video_codec = CODEC_TYPE_ZR36060,
@@ -1244,6 +1227,7 @@ static int __devinit zoran_probe(struct pci_dev *pdev,
snprintf(ZR_DEVNAME(zr), sizeof(ZR_DEVNAME(zr)), "MJPEG[%u]", zr->id);
spin_lock_init(&zr->spinlock);
mutex_init(&zr->resource_lock);
+ mutex_init(&zr->other_lock);
if (pci_enable_device(pdev))
goto zr_unreg;
pci_read_config_byte(zr->pci_dev, PCI_CLASS_REVISION, &zr->revision);
@@ -1359,13 +1343,12 @@ static int __devinit zoran_probe(struct pci_dev *pdev,
}
zr->decoder = v4l2_i2c_new_subdev(&zr->v4l2_dev,
- &zr->i2c_adapter, zr->card.mod_decoder, zr->card.i2c_decoder,
+ &zr->i2c_adapter, zr->card.i2c_decoder,
0, zr->card.addrs_decoder);
- if (zr->card.mod_encoder)
+ if (zr->card.i2c_encoder)
zr->encoder = v4l2_i2c_new_subdev(&zr->v4l2_dev,
- &zr->i2c_adapter,
- zr->card.mod_encoder, zr->card.i2c_encoder,
+ &zr->i2c_adapter, zr->card.i2c_encoder,
0, zr->card.addrs_encoder);
dprintk(2,
diff --git a/drivers/media/video/zoran/zoran_device.c b/drivers/media/video/zoran/zoran_device.c
index 6f846abee3e4..b02007e42150 100644
--- a/drivers/media/video/zoran/zoran_device.c
+++ b/drivers/media/video/zoran/zoran_device.c
@@ -1470,8 +1470,7 @@ zoran_irq (int irq,
(zr->codec_mode == BUZ_MODE_MOTION_DECOMPRESS ||
zr->codec_mode == BUZ_MODE_MOTION_COMPRESS)) {
if (zr36067_debug > 1 && (!zr->frame_num || zr->JPEG_error)) {
- char sc[] = "0000";
- char sv[5];
+ char sv[BUZ_NUM_STAT_COM + 1];
int i;
printk(KERN_INFO
@@ -1481,12 +1480,9 @@ zoran_irq (int irq,
zr->jpg_settings.field_per_buff,
zr->JPEG_missed);
- strcpy(sv, sc);
- for (i = 0; i < 4; i++) {
- if (le32_to_cpu(zr->stat_com[i]) & 1)
- sv[i] = '1';
- }
- sv[4] = 0;
+ for (i = 0; i < BUZ_NUM_STAT_COM; i++)
+ sv[i] = le32_to_cpu(zr->stat_com[i]) & 1 ? '1' : '0';
+ sv[BUZ_NUM_STAT_COM] = 0;
printk(KERN_INFO
"%s: stat_com=%s queue_state=%ld/%ld/%ld/%ld\n",
ZR_DEVNAME(zr), sv,
diff --git a/drivers/media/video/zoran/zoran_driver.c b/drivers/media/video/zoran/zoran_driver.c
index 3c471a4e3e4a..67a52e844ae6 100644
--- a/drivers/media/video/zoran/zoran_driver.c
+++ b/drivers/media/video/zoran/zoran_driver.c
@@ -49,7 +49,6 @@
#include <linux/module.h>
#include <linux/delay.h>
#include <linux/slab.h>
-#include <linux/smp_lock.h>
#include <linux/pci.h>
#include <linux/vmalloc.h>
#include <linux/wait.h>
@@ -913,7 +912,7 @@ static int zoran_open(struct file *file)
dprintk(2, KERN_INFO "%s: %s(%s, pid=[%d]), users(-)=%d\n",
ZR_DEVNAME(zr), __func__, current->comm, task_pid_nr(current), zr->user + 1);
- lock_kernel();
+ mutex_lock(&zr->other_lock);
if (zr->user >= 2048) {
dprintk(1, KERN_ERR "%s: too many users (%d) on device\n",
@@ -963,14 +962,14 @@ static int zoran_open(struct file *file)
file->private_data = fh;
fh->zr = zr;
zoran_open_init_session(fh);
- unlock_kernel();
+ mutex_unlock(&zr->other_lock);
return 0;
fail_fh:
kfree(fh);
fail_unlock:
- unlock_kernel();
+ mutex_unlock(&zr->other_lock);
dprintk(2, KERN_INFO "%s: open failed (%d), users(-)=%d\n",
ZR_DEVNAME(zr), res, zr->user);
@@ -989,7 +988,7 @@ zoran_close(struct file *file)
/* kernel locks (fs/device.c), so don't do that ourselves
* (prevents deadlocks) */
- /*mutex_lock(&zr->resource_lock);*/
+ mutex_lock(&zr->other_lock);
zoran_close_end_session(fh);
@@ -1023,6 +1022,7 @@ zoran_close(struct file *file)
encoder_call(zr, video, s_routing, 2, 0, 0);
}
}
+ mutex_unlock(&zr->other_lock);
file->private_data = NULL;
kfree(fh->overlay_mask);
@@ -3322,7 +3322,7 @@ zoran_mmap (struct file *file,
mmap_unlock_and_return:
mutex_unlock(&zr->resource_lock);
- return 0;
+ return res;
}
static const struct v4l2_ioctl_ops zoran_ioctl_ops = {
@@ -3370,11 +3370,26 @@ static const struct v4l2_ioctl_ops zoran_ioctl_ops = {
#endif
};
+/* please use zr->resource_lock consistently and kill this wrapper */
+static long zoran_ioctl(struct file *file, unsigned int cmd,
+ unsigned long arg)
+{
+ struct zoran_fh *fh = file->private_data;
+ struct zoran *zr = fh->zr;
+ int ret;
+
+ mutex_lock(&zr->other_lock);
+ ret = video_ioctl2(file, cmd, arg);
+ mutex_unlock(&zr->other_lock);
+
+ return ret;
+}
+
static const struct v4l2_file_operations zoran_fops = {
.owner = THIS_MODULE,
.open = zoran_open,
.release = zoran_close,
- .ioctl = video_ioctl2,
+ .unlocked_ioctl = zoran_ioctl,
.read = zoran_read,
.write = zoran_write,
.mmap = zoran_mmap,
diff --git a/drivers/media/video/zr364xx.c b/drivers/media/video/zr364xx.c
index a82b5bd18d26..7dfb01e9930e 100644
--- a/drivers/media/video/zr364xx.c
+++ b/drivers/media/video/zr364xx.c
@@ -572,7 +572,7 @@ static int zr364xx_got_frame(struct zr364xx_camera *cam, int jpgsize)
DBG("wakeup [buf/i] [%p/%d]\n", buf, buf->vb.i);
unlock:
spin_unlock_irqrestore(&cam->slock, flags);
- return 0;
+ return rc;
}
/* this function moves the usb stream read pipe data
@@ -1304,7 +1304,7 @@ static int zr364xx_open(struct file *file)
NULL, &cam->slock,
cam->type,
V4L2_FIELD_NONE,
- sizeof(struct zr364xx_buffer), cam);
+ sizeof(struct zr364xx_buffer), cam, NULL);
/* Added some delay here, since opening/closing the camera quickly,
* like Ekiga does during its startup, can crash the webcam
diff --git a/drivers/message/fusion/mptfc.c b/drivers/message/fusion/mptfc.c
index e15220ff52fc..d784c36707c0 100644
--- a/drivers/message/fusion/mptfc.c
+++ b/drivers/message/fusion/mptfc.c
@@ -97,8 +97,7 @@ static u8 mptfcInternalCtx = MPT_MAX_PROTOCOL_DRIVERS;
static int mptfc_target_alloc(struct scsi_target *starget);
static int mptfc_slave_alloc(struct scsi_device *sdev);
-static int mptfc_qcmd(struct scsi_cmnd *SCpnt,
- void (*done)(struct scsi_cmnd *));
+static int mptfc_qcmd(struct Scsi_Host *shost, struct scsi_cmnd *SCpnt);
static void mptfc_target_destroy(struct scsi_target *starget);
static void mptfc_set_rport_loss_tmo(struct fc_rport *rport, uint32_t timeout);
static void __devexit mptfc_remove(struct pci_dev *pdev);
@@ -650,7 +649,7 @@ mptfc_slave_alloc(struct scsi_device *sdev)
}
static int
-mptfc_qcmd(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_cmnd *))
+mptfc_qcmd_lck(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_cmnd *))
{
struct mptfc_rport_info *ri;
struct fc_rport *rport = starget_to_rport(scsi_target(SCpnt->device));
@@ -681,6 +680,8 @@ mptfc_qcmd(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_cmnd *))
return mptscsih_qcmd(SCpnt,done);
}
+static DEF_SCSI_QCMD(mptfc_qcmd)
+
/*
* mptfc_display_port_link_speed - displaying link speed
* @ioc: Pointer to MPT_ADAPTER structure
diff --git a/drivers/message/fusion/mptsas.c b/drivers/message/fusion/mptsas.c
index 83a5115f0251..d48c2c6058e1 100644
--- a/drivers/message/fusion/mptsas.c
+++ b/drivers/message/fusion/mptsas.c
@@ -1889,7 +1889,7 @@ mptsas_slave_alloc(struct scsi_device *sdev)
}
static int
-mptsas_qcmd(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_cmnd *))
+mptsas_qcmd_lck(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_cmnd *))
{
MPT_SCSI_HOST *hd;
MPT_ADAPTER *ioc;
@@ -1913,6 +1913,8 @@ mptsas_qcmd(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_cmnd *))
return mptscsih_qcmd(SCpnt,done);
}
+static DEF_SCSI_QCMD(mptsas_qcmd)
+
/**
* mptsas_mptsas_eh_timed_out - resets the scsi_cmnd timeout
* if the device under question is currently in the
diff --git a/drivers/message/fusion/mptspi.c b/drivers/message/fusion/mptspi.c
index 0e2803155ae2..6d9568d2ec59 100644
--- a/drivers/message/fusion/mptspi.c
+++ b/drivers/message/fusion/mptspi.c
@@ -780,7 +780,7 @@ static int mptspi_slave_configure(struct scsi_device *sdev)
}
static int
-mptspi_qcmd(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_cmnd *))
+mptspi_qcmd_lck(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_cmnd *))
{
struct _MPT_SCSI_HOST *hd = shost_priv(SCpnt->device->host);
VirtDevice *vdevice = SCpnt->device->hostdata;
@@ -805,6 +805,8 @@ mptspi_qcmd(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_cmnd *))
return mptscsih_qcmd(SCpnt,done);
}
+static DEF_SCSI_QCMD(mptspi_qcmd)
+
static void mptspi_slave_destroy(struct scsi_device *sdev)
{
struct scsi_target *starget = scsi_target(sdev);
diff --git a/drivers/message/i2o/i2o_scsi.c b/drivers/message/i2o/i2o_scsi.c
index ea6b2197da8a..97bdf82ec905 100644
--- a/drivers/message/i2o/i2o_scsi.c
+++ b/drivers/message/i2o/i2o_scsi.c
@@ -506,7 +506,7 @@ static struct i2o_driver i2o_scsi_driver = {
* Locks: takes the controller lock on error path only
*/
-static int i2o_scsi_queuecommand(struct scsi_cmnd *SCpnt,
+static int i2o_scsi_queuecommand_lck(struct scsi_cmnd *SCpnt,
void (*done) (struct scsi_cmnd *))
{
struct i2o_controller *c;
@@ -688,7 +688,9 @@ static int i2o_scsi_queuecommand(struct scsi_cmnd *SCpnt,
exit:
return rc;
-};
+}
+
+static DEF_SCSI_QCMD(i2o_scsi_queuecommand)
/**
* i2o_scsi_abort - abort a running command
diff --git a/drivers/mfd/88pm860x-core.c b/drivers/mfd/88pm860x-core.c
index 07933f3f7e4c..20895e7a99c9 100644
--- a/drivers/mfd/88pm860x-core.c
+++ b/drivers/mfd/88pm860x-core.c
@@ -158,6 +158,43 @@ static struct mfd_cell onkey_devs[] = {
},
};
+static struct resource codec_resources[] = {
+ {
+ /* Headset microphone insertion or removal */
+ .name = "micin",
+ .start = PM8607_IRQ_MICIN,
+ .end = PM8607_IRQ_MICIN,
+ .flags = IORESOURCE_IRQ,
+ }, {
+ /* Hook-switch press or release */
+ .name = "hook",
+ .start = PM8607_IRQ_HOOK,
+ .end = PM8607_IRQ_HOOK,
+ .flags = IORESOURCE_IRQ,
+ }, {
+ /* Headset insertion or removal */
+ .name = "headset",
+ .start = PM8607_IRQ_HEADSET,
+ .end = PM8607_IRQ_HEADSET,
+ .flags = IORESOURCE_IRQ,
+ }, {
+ /* Audio short */
+ .name = "audio-short",
+ .start = PM8607_IRQ_AUDIO_SHORT,
+ .end = PM8607_IRQ_AUDIO_SHORT,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct mfd_cell codec_devs[] = {
+ {
+ .name = "88pm860x-codec",
+ .num_resources = ARRAY_SIZE(codec_resources),
+ .resources = &codec_resources[0],
+ .id = -1,
+ },
+};
+
static struct resource regulator_resources[] = {
PM8607_REG_RESOURCE(BUCK1, BUCK1),
PM8607_REG_RESOURCE(BUCK2, BUCK2),
@@ -608,10 +645,13 @@ static void __devinit device_8607_init(struct pm860x_chip *chip,
dev_err(chip->dev, "Failed to read CHIP ID: %d\n", ret);
goto out;
}
- if ((ret & PM8607_VERSION_MASK) == PM8607_VERSION)
+ switch (ret & PM8607_VERSION_MASK) {
+ case 0x40:
+ case 0x50:
dev_info(chip->dev, "Marvell 88PM8607 (ID: %02x) detected\n",
ret);
- else {
+ break;
+ default:
dev_err(chip->dev, "Failed to detect Marvell 88PM8607. "
"Chip ID: %02x\n", ret);
goto out;
@@ -687,6 +727,13 @@ static void __devinit device_8607_init(struct pm860x_chip *chip,
goto out_dev;
}
+ ret = mfd_add_devices(chip->dev, 0, &codec_devs[0],
+ ARRAY_SIZE(codec_devs),
+ &codec_resources[0], 0);
+ if (ret < 0) {
+ dev_err(chip->dev, "Failed to add codec subdev\n");
+ goto out_dev;
+ }
return;
out_dev:
mfd_remove_devices(chip->dev);
diff --git a/drivers/mfd/Kconfig b/drivers/mfd/Kconfig
index db51ea1c6082..3a1493b8b5e5 100644
--- a/drivers/mfd/Kconfig
+++ b/drivers/mfd/Kconfig
@@ -75,7 +75,7 @@ config MFD_DAVINCI_VOICECODEC
config MFD_DM355EVM_MSP
bool "DaVinci DM355 EVM microcontroller"
- depends on I2C && MACH_DAVINCI_DM355_EVM
+ depends on I2C=y && MACH_DAVINCI_DM355_EVM
help
This driver supports the MSP430 microcontroller used on these
boards. MSP430 firmware manages resets and power sequencing,
@@ -294,14 +294,15 @@ config MFD_MAX8925
to use the functionality of the device.
config MFD_MAX8998
- bool "Maxim Semiconductor MAX8998 PMIC Support"
- depends on I2C=y
+ bool "Maxim Semiconductor MAX8998/National LP3974 PMIC Support"
+ depends on I2C=y && GENERIC_HARDIRQS
select MFD_CORE
help
- Say yes here to support for Maxim Semiconductor MAX8998. This is
- a Power Management IC. This driver provies common support for
- accessing the device, additional drivers must be enabled in order
- to use the functionality of the device.
+ Say yes here to support for Maxim Semiconductor MAX8998 and
+ National Semiconductor LP3974. This is a Power Management IC.
+ This driver provies common support for accessing the device,
+ additional drivers must be enabled in order to use the functionality
+ of the device.
config MFD_WM8400
tristate "Support Wolfson Microelectronics WM8400"
@@ -314,14 +315,30 @@ config MFD_WM8400
the functionality of the device.
config MFD_WM831X
- bool "Support Wolfson Microelectronics WM831x/2x PMICs"
+ bool
+ depends on GENERIC_HARDIRQS
+
+config MFD_WM831X_I2C
+ bool "Support Wolfson Microelectronics WM831x/2x PMICs with I2C"
select MFD_CORE
+ select MFD_WM831X
depends on I2C=y && GENERIC_HARDIRQS
help
- Support for the Wolfson Microelecronics WM831x and WM832x PMICs.
- This driver provides common support for accessing the device,
- additional drivers must be enabled in order to use the
- functionality of the device.
+ Support for the Wolfson Microelecronics WM831x and WM832x PMICs
+ when controlled using I2C. This driver provides common support
+ for accessing the device, additional drivers must be enabled in
+ order to use the functionality of the device.
+
+config MFD_WM831X_SPI
+ bool "Support Wolfson Microelectronics WM831x/2x PMICs with SPI"
+ select MFD_CORE
+ select MFD_WM831X
+ depends on SPI_MASTER && GENERIC_HARDIRQS
+ help
+ Support for the Wolfson Microelecronics WM831x and WM832x PMICs
+ when controlled using SPI. This driver provides common support
+ for accessing the device, additional drivers must be enabled in
+ order to use the functionality of the device.
config MFD_WM8350
bool
@@ -408,11 +425,16 @@ config MFD_PCF50633
so that function-specific drivers can bind to them.
config MFD_MC13783
- tristate "Support Freescale MC13783"
+ tristate
+
+config MFD_MC13XXX
+ tristate "Support Freescale MC13783 and MC13892"
depends on SPI_MASTER
select MFD_CORE
+ select MFD_MC13783
help
- Support for the Freescale (Atlas) MC13783 PMIC and audio CODEC.
+ Support for the Freescale (Atlas) PMIC and audio CODECs
+ MC13783 and MC13892.
This driver provides common support for accessing the device,
additional drivers must be enabled in order to use the
functionality of the device.
@@ -433,7 +455,7 @@ config PCF50633_GPIO
config ABX500_CORE
bool "ST-Ericsson ABX500 Mixed Signal Circuit register functions"
- default y if ARCH_U300
+ default y if ARCH_U300 || ARCH_U8500
help
Say yes here if you have the ABX500 Mixed Signal IC family
chips. This core driver expose register access functions.
@@ -444,6 +466,7 @@ config ABX500_CORE
config AB3100_CORE
bool "ST-Ericsson AB3100 Mixed Signal Circuit core functions"
depends on I2C=y && ABX500_CORE
+ select MFD_CORE
default y if ARCH_U300
help
Select this to enable the AB3100 Mixed Signal IC core
@@ -473,14 +496,33 @@ config EZX_PCAP
config AB8500_CORE
bool "ST-Ericsson AB8500 Mixed Signal Power Management chip"
- depends on SPI=y && GENERIC_HARDIRQS
+ depends on GENERIC_HARDIRQS && ABX500_CORE && SPI_MASTER && ARCH_U8500
select MFD_CORE
help
Select this option to enable access to AB8500 power management
- chip. This connects to U8500 on the SSP/SPI bus and exports
- read/write functions for the devices to get access to this chip.
+ chip. This connects to U8500 either on the SSP/SPI bus
+ or the I2C bus via PRCMU. It also adds the irq_chip
+ parts for handling the Mixed Signal chip events.
This chip embeds various other multimedia funtionalities as well.
+config AB8500_I2C_CORE
+ bool "AB8500 register access via PRCMU I2C"
+ depends on AB8500_CORE && UX500_SOC_DB8500
+ default y
+ help
+ This enables register access to the AB8500 chip via PRCMU I2C.
+ The AB8500 chip can be accessed via SPI or I2C. On DB8500 hardware
+ the I2C bus is connected to the Power Reset
+ and Mangagement Unit, PRCMU.
+
+config AB8500_DEBUG
+ bool "Enable debug info via debugfs"
+ depends on AB8500_CORE && DEBUG_FS
+ default y if DEBUG_FS
+ help
+ Select this option if you want debug information using the debug
+ filesystem, debugfs.
+
config AB3550_CORE
bool "ST-Ericsson AB3550 Mixed Signal Circuit core functions"
select MFD_CORE
@@ -542,8 +584,8 @@ config MFD_JZ4740_ADC
This driver is necessary for jz4740-battery and jz4740-hwmon driver.
config MFD_TPS6586X
- tristate "TPS6586x Power Management chips"
- depends on I2C && GPIOLIB
+ bool "TPS6586x Power Management chips"
+ depends on I2C=y && GPIOLIB && GENERIC_HARDIRQS
select MFD_CORE
help
If you say yes here you get support for the TPS6586X series of
@@ -555,6 +597,15 @@ config MFD_TPS6586X
This driver can also be built as a module. If so, the module
will be called tps6586x.
+config MFD_VX855
+ tristate "Support for VIA VX855/VX875 integrated south bridge"
+ depends on PCI
+ select MFD_CORE
+ help
+ Say yes here to enable support for various functions of the
+ VIA VX855/VX875 south bridge. You will need to enable the vx855_spi
+ and/or vx855_gpio drivers for this to do anything useful.
+
endif # MFD_SUPPORT
menu "Multimedia Capabilities Port drivers"
diff --git a/drivers/mfd/Makefile b/drivers/mfd/Makefile
index feaeeaeeddb7..f54b3659abbb 100644
--- a/drivers/mfd/Makefile
+++ b/drivers/mfd/Makefile
@@ -24,6 +24,8 @@ obj-$(CONFIG_MFD_TC6393XB) += tc6393xb.o tmio_core.o
obj-$(CONFIG_MFD_WM8400) += wm8400-core.o
wm831x-objs := wm831x-core.o wm831x-irq.o wm831x-otp.o
obj-$(CONFIG_MFD_WM831X) += wm831x.o
+obj-$(CONFIG_MFD_WM831X_I2C) += wm831x-i2c.o
+obj-$(CONFIG_MFD_WM831X_SPI) += wm831x-spi.o
wm8350-objs := wm8350-core.o wm8350-regmap.o wm8350-gpio.o
wm8350-objs += wm8350-irq.o
obj-$(CONFIG_MFD_WM8350) += wm8350.o
@@ -39,7 +41,7 @@ obj-$(CONFIG_TWL4030_POWER) += twl4030-power.o
obj-$(CONFIG_TWL4030_CODEC) += twl4030-codec.o
obj-$(CONFIG_TWL6030_PWM) += twl6030-pwm.o
-obj-$(CONFIG_MFD_MC13783) += mc13783-core.o
+obj-$(CONFIG_MFD_MC13XXX) += mc13xxx-core.o
obj-$(CONFIG_MFD_CORE) += mfd-core.o
@@ -58,7 +60,7 @@ obj-$(CONFIG_UCB1400_CORE) += ucb1400_core.o
obj-$(CONFIG_PMIC_DA903X) += da903x.o
max8925-objs := max8925-core.o max8925-i2c.o
obj-$(CONFIG_MFD_MAX8925) += max8925.o
-obj-$(CONFIG_MFD_MAX8998) += max8998.o
+obj-$(CONFIG_MFD_MAX8998) += max8998.o max8998-irq.o
pcf50633-objs := pcf50633-core.o pcf50633-irq.o
obj-$(CONFIG_MFD_PCF50633) += pcf50633.o
@@ -69,6 +71,8 @@ obj-$(CONFIG_AB3100_CORE) += ab3100-core.o
obj-$(CONFIG_AB3100_OTP) += ab3100-otp.o
obj-$(CONFIG_AB3550_CORE) += ab3550-core.o
obj-$(CONFIG_AB8500_CORE) += ab8500-core.o ab8500-spi.o
+obj-$(CONFIG_AB8500_I2C_CORE) += ab8500-i2c.o
+obj-$(CONFIG_AB8500_DEBUG) += ab8500-debugfs.o
obj-$(CONFIG_MFD_TIMBERDALE) += timberdale.o
obj-$(CONFIG_PMIC_ADP5520) += adp5520.o
obj-$(CONFIG_LPC_SCH) += lpc_sch.o
@@ -76,3 +80,4 @@ obj-$(CONFIG_MFD_RDC321X) += rdc321x-southbridge.o
obj-$(CONFIG_MFD_JANZ_CMODIO) += janz-cmodio.o
obj-$(CONFIG_MFD_JZ4740_ADC) += jz4740-adc.o
obj-$(CONFIG_MFD_TPS6586X) += tps6586x.o
+obj-$(CONFIG_MFD_VX855) += vx855.o
diff --git a/drivers/mfd/ab3100-core.c b/drivers/mfd/ab3100-core.c
index b048ecc56db9..4193af5f2743 100644
--- a/drivers/mfd/ab3100-core.c
+++ b/drivers/mfd/ab3100-core.c
@@ -19,6 +19,7 @@
#include <linux/debugfs.h>
#include <linux/seq_file.h>
#include <linux/uaccess.h>
+#include <linux/mfd/core.h>
#include <linux/mfd/abx500.h>
/* These are the only registers inside AB3100 used in this main file */
@@ -146,7 +147,7 @@ static int ab3100_set_test_register_interruptible(struct ab3100 *ab3100,
}
static int ab3100_get_register_interruptible(struct ab3100 *ab3100,
- u8 reg, u8 *regval)
+ u8 reg, u8 *regval)
{
int err;
@@ -202,7 +203,7 @@ static int ab3100_get_register_interruptible(struct ab3100 *ab3100,
}
static int get_register_interruptible(struct device *dev, u8 bank, u8 reg,
- u8 *value)
+ u8 *value)
{
struct ab3100 *ab3100 = dev_get_drvdata(dev->parent);
@@ -666,7 +667,7 @@ struct ab3100_init_setting {
u8 setting;
};
-static const struct ab3100_init_setting __initconst
+static const struct ab3100_init_setting __devinitconst
ab3100_init_settings[] = {
{
.abreg = AB3100_MCA,
@@ -713,7 +714,7 @@ ab3100_init_settings[] = {
},
};
-static int __init ab3100_setup(struct ab3100 *ab3100)
+static int __devinit ab3100_setup(struct ab3100 *ab3100)
{
int err = 0;
int i;
@@ -743,52 +744,64 @@ static int __init ab3100_setup(struct ab3100 *ab3100)
return err;
}
-/*
- * Here we define all the platform devices that appear
- * as children of the AB3100. These are regular platform
- * devices with the IORESOURCE_IO .start and .end set
- * to correspond to the internal AB3100 register range
- * mapping to the corresponding subdevice.
- */
-
-#define AB3100_DEVICE(devname, devid) \
-static struct platform_device ab3100_##devname##_device = { \
- .name = devid, \
- .id = -1, \
-}
-
-/* This lists all the subdevices */
-AB3100_DEVICE(dac, "ab3100-dac");
-AB3100_DEVICE(leds, "ab3100-leds");
-AB3100_DEVICE(power, "ab3100-power");
-AB3100_DEVICE(regulators, "ab3100-regulators");
-AB3100_DEVICE(sim, "ab3100-sim");
-AB3100_DEVICE(uart, "ab3100-uart");
-AB3100_DEVICE(rtc, "ab3100-rtc");
-AB3100_DEVICE(charger, "ab3100-charger");
-AB3100_DEVICE(boost, "ab3100-boost");
-AB3100_DEVICE(adc, "ab3100-adc");
-AB3100_DEVICE(fuelgauge, "ab3100-fuelgauge");
-AB3100_DEVICE(vibrator, "ab3100-vibrator");
-AB3100_DEVICE(otp, "ab3100-otp");
-AB3100_DEVICE(codec, "ab3100-codec");
-
-static struct platform_device *
-ab3100_platform_devs[] = {
- &ab3100_dac_device,
- &ab3100_leds_device,
- &ab3100_power_device,
- &ab3100_regulators_device,
- &ab3100_sim_device,
- &ab3100_uart_device,
- &ab3100_rtc_device,
- &ab3100_charger_device,
- &ab3100_boost_device,
- &ab3100_adc_device,
- &ab3100_fuelgauge_device,
- &ab3100_vibrator_device,
- &ab3100_otp_device,
- &ab3100_codec_device,
+/* The subdevices of the AB3100 */
+static struct mfd_cell ab3100_devs[] = {
+ {
+ .name = "ab3100-dac",
+ .id = -1,
+ },
+ {
+ .name = "ab3100-leds",
+ .id = -1,
+ },
+ {
+ .name = "ab3100-power",
+ .id = -1,
+ },
+ {
+ .name = "ab3100-regulators",
+ .id = -1,
+ },
+ {
+ .name = "ab3100-sim",
+ .id = -1,
+ },
+ {
+ .name = "ab3100-uart",
+ .id = -1,
+ },
+ {
+ .name = "ab3100-rtc",
+ .id = -1,
+ },
+ {
+ .name = "ab3100-charger",
+ .id = -1,
+ },
+ {
+ .name = "ab3100-boost",
+ .id = -1,
+ },
+ {
+ .name = "ab3100-adc",
+ .id = -1,
+ },
+ {
+ .name = "ab3100-fuelgauge",
+ .id = -1,
+ },
+ {
+ .name = "ab3100-vibrator",
+ .id = -1,
+ },
+ {
+ .name = "ab3100-otp",
+ .id = -1,
+ },
+ {
+ .name = "ab3100-codec",
+ .id = -1,
+ },
};
struct ab_family_id {
@@ -796,7 +809,7 @@ struct ab_family_id {
char *name;
};
-static const struct ab_family_id ids[] __initdata = {
+static const struct ab_family_id ids[] __devinitdata = {
/* AB3100 */
{
.id = 0xc0,
@@ -850,8 +863,8 @@ static const struct ab_family_id ids[] __initdata = {
},
};
-static int __init ab3100_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int __devinit ab3100_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
{
struct ab3100 *ab3100;
struct ab3100_platform_data *ab3100_plf_data =
@@ -935,18 +948,14 @@ static int __init ab3100_probe(struct i2c_client *client,
if (err)
goto exit_no_ops;
- /* Set parent and a pointer back to the container in device data */
- for (i = 0; i < ARRAY_SIZE(ab3100_platform_devs); i++) {
- ab3100_platform_devs[i]->dev.parent =
- &client->dev;
- ab3100_platform_devs[i]->dev.platform_data =
- ab3100_plf_data;
- platform_set_drvdata(ab3100_platform_devs[i], ab3100);
+ /* Set up and register the platform devices. */
+ for (i = 0; i < ARRAY_SIZE(ab3100_devs); i++) {
+ ab3100_devs[i].platform_data = ab3100_plf_data;
+ ab3100_devs[i].data_size = sizeof(struct ab3100_platform_data);
}
- /* Register the platform devices */
- platform_add_devices(ab3100_platform_devs,
- ARRAY_SIZE(ab3100_platform_devs));
+ err = mfd_add_devices(&client->dev, 0, ab3100_devs,
+ ARRAY_SIZE(ab3100_devs), NULL, 0);
ab3100_setup_debugfs(ab3100);
@@ -962,14 +971,12 @@ static int __init ab3100_probe(struct i2c_client *client,
return err;
}
-static int __exit ab3100_remove(struct i2c_client *client)
+static int __devexit ab3100_remove(struct i2c_client *client)
{
struct ab3100 *ab3100 = i2c_get_clientdata(client);
- int i;
/* Unregister subdevices */
- for (i = 0; i < ARRAY_SIZE(ab3100_platform_devs); i++)
- platform_device_unregister(ab3100_platform_devs[i]);
+ mfd_remove_devices(&client->dev);
ab3100_remove_debugfs();
i2c_unregister_device(ab3100->testreg_client);
@@ -996,7 +1003,7 @@ static struct i2c_driver ab3100_driver = {
},
.id_table = ab3100_id,
.probe = ab3100_probe,
- .remove = __exit_p(ab3100_remove),
+ .remove = __devexit_p(ab3100_remove),
};
static int __init ab3100_i2c_init(void)
diff --git a/drivers/mfd/ab8500-core.c b/drivers/mfd/ab8500-core.c
index defa786dee34..dbe1c93c1af3 100644
--- a/drivers/mfd/ab8500-core.c
+++ b/drivers/mfd/ab8500-core.c
@@ -4,6 +4,7 @@
* License Terms: GNU General Public License v2
* Author: Srinidhi Kasagar <srinidhi.kasagar@stericsson.com>
* Author: Rabin Vincent <rabin.vincent@stericsson.com>
+ * Changes: Mattias Wallin <mattias.wallin@stericsson.com>
*/
#include <linux/kernel.h>
@@ -15,6 +16,7 @@
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/mfd/core.h>
+#include <linux/mfd/abx500.h>
#include <linux/mfd/ab8500.h>
#include <linux/regulator/ab8500.h>
@@ -22,71 +24,71 @@
* Interrupt register offsets
* Bank : 0x0E
*/
-#define AB8500_IT_SOURCE1_REG 0x0E00
-#define AB8500_IT_SOURCE2_REG 0x0E01
-#define AB8500_IT_SOURCE3_REG 0x0E02
-#define AB8500_IT_SOURCE4_REG 0x0E03
-#define AB8500_IT_SOURCE5_REG 0x0E04
-#define AB8500_IT_SOURCE6_REG 0x0E05
-#define AB8500_IT_SOURCE7_REG 0x0E06
-#define AB8500_IT_SOURCE8_REG 0x0E07
-#define AB8500_IT_SOURCE19_REG 0x0E12
-#define AB8500_IT_SOURCE20_REG 0x0E13
-#define AB8500_IT_SOURCE21_REG 0x0E14
-#define AB8500_IT_SOURCE22_REG 0x0E15
-#define AB8500_IT_SOURCE23_REG 0x0E16
-#define AB8500_IT_SOURCE24_REG 0x0E17
+#define AB8500_IT_SOURCE1_REG 0x00
+#define AB8500_IT_SOURCE2_REG 0x01
+#define AB8500_IT_SOURCE3_REG 0x02
+#define AB8500_IT_SOURCE4_REG 0x03
+#define AB8500_IT_SOURCE5_REG 0x04
+#define AB8500_IT_SOURCE6_REG 0x05
+#define AB8500_IT_SOURCE7_REG 0x06
+#define AB8500_IT_SOURCE8_REG 0x07
+#define AB8500_IT_SOURCE19_REG 0x12
+#define AB8500_IT_SOURCE20_REG 0x13
+#define AB8500_IT_SOURCE21_REG 0x14
+#define AB8500_IT_SOURCE22_REG 0x15
+#define AB8500_IT_SOURCE23_REG 0x16
+#define AB8500_IT_SOURCE24_REG 0x17
/*
* latch registers
*/
-#define AB8500_IT_LATCH1_REG 0x0E20
-#define AB8500_IT_LATCH2_REG 0x0E21
-#define AB8500_IT_LATCH3_REG 0x0E22
-#define AB8500_IT_LATCH4_REG 0x0E23
-#define AB8500_IT_LATCH5_REG 0x0E24
-#define AB8500_IT_LATCH6_REG 0x0E25
-#define AB8500_IT_LATCH7_REG 0x0E26
-#define AB8500_IT_LATCH8_REG 0x0E27
-#define AB8500_IT_LATCH9_REG 0x0E28
-#define AB8500_IT_LATCH10_REG 0x0E29
-#define AB8500_IT_LATCH19_REG 0x0E32
-#define AB8500_IT_LATCH20_REG 0x0E33
-#define AB8500_IT_LATCH21_REG 0x0E34
-#define AB8500_IT_LATCH22_REG 0x0E35
-#define AB8500_IT_LATCH23_REG 0x0E36
-#define AB8500_IT_LATCH24_REG 0x0E37
+#define AB8500_IT_LATCH1_REG 0x20
+#define AB8500_IT_LATCH2_REG 0x21
+#define AB8500_IT_LATCH3_REG 0x22
+#define AB8500_IT_LATCH4_REG 0x23
+#define AB8500_IT_LATCH5_REG 0x24
+#define AB8500_IT_LATCH6_REG 0x25
+#define AB8500_IT_LATCH7_REG 0x26
+#define AB8500_IT_LATCH8_REG 0x27
+#define AB8500_IT_LATCH9_REG 0x28
+#define AB8500_IT_LATCH10_REG 0x29
+#define AB8500_IT_LATCH19_REG 0x32
+#define AB8500_IT_LATCH20_REG 0x33
+#define AB8500_IT_LATCH21_REG 0x34
+#define AB8500_IT_LATCH22_REG 0x35
+#define AB8500_IT_LATCH23_REG 0x36
+#define AB8500_IT_LATCH24_REG 0x37
/*
* mask registers
*/
-#define AB8500_IT_MASK1_REG 0x0E40
-#define AB8500_IT_MASK2_REG 0x0E41
-#define AB8500_IT_MASK3_REG 0x0E42
-#define AB8500_IT_MASK4_REG 0x0E43
-#define AB8500_IT_MASK5_REG 0x0E44
-#define AB8500_IT_MASK6_REG 0x0E45
-#define AB8500_IT_MASK7_REG 0x0E46
-#define AB8500_IT_MASK8_REG 0x0E47
-#define AB8500_IT_MASK9_REG 0x0E48
-#define AB8500_IT_MASK10_REG 0x0E49
-#define AB8500_IT_MASK11_REG 0x0E4A
-#define AB8500_IT_MASK12_REG 0x0E4B
-#define AB8500_IT_MASK13_REG 0x0E4C
-#define AB8500_IT_MASK14_REG 0x0E4D
-#define AB8500_IT_MASK15_REG 0x0E4E
-#define AB8500_IT_MASK16_REG 0x0E4F
-#define AB8500_IT_MASK17_REG 0x0E50
-#define AB8500_IT_MASK18_REG 0x0E51
-#define AB8500_IT_MASK19_REG 0x0E52
-#define AB8500_IT_MASK20_REG 0x0E53
-#define AB8500_IT_MASK21_REG 0x0E54
-#define AB8500_IT_MASK22_REG 0x0E55
-#define AB8500_IT_MASK23_REG 0x0E56
-#define AB8500_IT_MASK24_REG 0x0E57
-
-#define AB8500_REV_REG 0x1080
+#define AB8500_IT_MASK1_REG 0x40
+#define AB8500_IT_MASK2_REG 0x41
+#define AB8500_IT_MASK3_REG 0x42
+#define AB8500_IT_MASK4_REG 0x43
+#define AB8500_IT_MASK5_REG 0x44
+#define AB8500_IT_MASK6_REG 0x45
+#define AB8500_IT_MASK7_REG 0x46
+#define AB8500_IT_MASK8_REG 0x47
+#define AB8500_IT_MASK9_REG 0x48
+#define AB8500_IT_MASK10_REG 0x49
+#define AB8500_IT_MASK11_REG 0x4A
+#define AB8500_IT_MASK12_REG 0x4B
+#define AB8500_IT_MASK13_REG 0x4C
+#define AB8500_IT_MASK14_REG 0x4D
+#define AB8500_IT_MASK15_REG 0x4E
+#define AB8500_IT_MASK16_REG 0x4F
+#define AB8500_IT_MASK17_REG 0x50
+#define AB8500_IT_MASK18_REG 0x51
+#define AB8500_IT_MASK19_REG 0x52
+#define AB8500_IT_MASK20_REG 0x53
+#define AB8500_IT_MASK21_REG 0x54
+#define AB8500_IT_MASK22_REG 0x55
+#define AB8500_IT_MASK23_REG 0x56
+#define AB8500_IT_MASK24_REG 0x57
+
+#define AB8500_REV_REG 0x80
/*
* Map interrupt numbers to the LATCH and MASK register offsets, Interrupt
@@ -99,96 +101,132 @@ static const int ab8500_irq_regoffset[AB8500_NUM_IRQ_REGS] = {
0, 1, 2, 3, 4, 6, 7, 8, 9, 18, 19, 20, 21,
};
-static int __ab8500_write(struct ab8500 *ab8500, u16 addr, u8 data)
+static int ab8500_get_chip_id(struct device *dev)
+{
+ struct ab8500 *ab8500 = dev_get_drvdata(dev->parent);
+ return (int)ab8500->chip_id;
+}
+
+static int set_register_interruptible(struct ab8500 *ab8500, u8 bank,
+ u8 reg, u8 data)
{
int ret;
+ /*
+ * Put the u8 bank and u8 register together into a an u16.
+ * The bank on higher 8 bits and register in lower 8 bits.
+ * */
+ u16 addr = ((u16)bank) << 8 | reg;
dev_vdbg(ab8500->dev, "wr: addr %#x <= %#x\n", addr, data);
+ ret = mutex_lock_interruptible(&ab8500->lock);
+ if (ret)
+ return ret;
+
ret = ab8500->write(ab8500, addr, data);
if (ret < 0)
dev_err(ab8500->dev, "failed to write reg %#x: %d\n",
addr, ret);
+ mutex_unlock(&ab8500->lock);
return ret;
}
-/**
- * ab8500_write() - write an AB8500 register
- * @ab8500: device to write to
- * @addr: address of the register
- * @data: value to write
- */
-int ab8500_write(struct ab8500 *ab8500, u16 addr, u8 data)
+static int ab8500_set_register(struct device *dev, u8 bank,
+ u8 reg, u8 value)
{
- int ret;
-
- mutex_lock(&ab8500->lock);
- ret = __ab8500_write(ab8500, addr, data);
- mutex_unlock(&ab8500->lock);
+ struct ab8500 *ab8500 = dev_get_drvdata(dev->parent);
- return ret;
+ return set_register_interruptible(ab8500, bank, reg, value);
}
-EXPORT_SYMBOL_GPL(ab8500_write);
-static int __ab8500_read(struct ab8500 *ab8500, u16 addr)
+static int get_register_interruptible(struct ab8500 *ab8500, u8 bank,
+ u8 reg, u8 *value)
{
int ret;
+ /* put the u8 bank and u8 reg together into a an u16.
+ * bank on higher 8 bits and reg in lower */
+ u16 addr = ((u16)bank) << 8 | reg;
+
+ ret = mutex_lock_interruptible(&ab8500->lock);
+ if (ret)
+ return ret;
ret = ab8500->read(ab8500, addr);
if (ret < 0)
dev_err(ab8500->dev, "failed to read reg %#x: %d\n",
addr, ret);
+ else
+ *value = ret;
+ mutex_unlock(&ab8500->lock);
dev_vdbg(ab8500->dev, "rd: addr %#x => data %#x\n", addr, ret);
return ret;
}
-/**
- * ab8500_read() - read an AB8500 register
- * @ab8500: device to read from
- * @addr: address of the register
- */
-int ab8500_read(struct ab8500 *ab8500, u16 addr)
+static int ab8500_get_register(struct device *dev, u8 bank,
+ u8 reg, u8 *value)
{
- int ret;
-
- mutex_lock(&ab8500->lock);
- ret = __ab8500_read(ab8500, addr);
- mutex_unlock(&ab8500->lock);
+ struct ab8500 *ab8500 = dev_get_drvdata(dev->parent);
- return ret;
+ return get_register_interruptible(ab8500, bank, reg, value);
}
-EXPORT_SYMBOL_GPL(ab8500_read);
-
-/**
- * ab8500_set_bits() - set a bitfield in an AB8500 register
- * @ab8500: device to read from
- * @addr: address of the register
- * @mask: mask of the bitfield to modify
- * @data: value to set to the bitfield
- */
-int ab8500_set_bits(struct ab8500 *ab8500, u16 addr, u8 mask, u8 data)
+
+static int mask_and_set_register_interruptible(struct ab8500 *ab8500, u8 bank,
+ u8 reg, u8 bitmask, u8 bitvalues)
{
int ret;
+ u8 data;
+ /* put the u8 bank and u8 reg together into a an u16.
+ * bank on higher 8 bits and reg in lower */
+ u16 addr = ((u16)bank) << 8 | reg;
- mutex_lock(&ab8500->lock);
+ ret = mutex_lock_interruptible(&ab8500->lock);
+ if (ret)
+ return ret;
- ret = __ab8500_read(ab8500, addr);
- if (ret < 0)
+ ret = ab8500->read(ab8500, addr);
+ if (ret < 0) {
+ dev_err(ab8500->dev, "failed to read reg %#x: %d\n",
+ addr, ret);
goto out;
+ }
- ret &= ~mask;
- ret |= data;
+ data = (u8)ret;
+ data = (~bitmask & data) | (bitmask & bitvalues);
- ret = __ab8500_write(ab8500, addr, ret);
+ ret = ab8500->write(ab8500, addr, data);
+ if (ret < 0)
+ dev_err(ab8500->dev, "failed to write reg %#x: %d\n",
+ addr, ret);
+ dev_vdbg(ab8500->dev, "mask: addr %#x => data %#x\n", addr, data);
out:
mutex_unlock(&ab8500->lock);
return ret;
}
-EXPORT_SYMBOL_GPL(ab8500_set_bits);
+
+static int ab8500_mask_and_set_register(struct device *dev,
+ u8 bank, u8 reg, u8 bitmask, u8 bitvalues)
+{
+ struct ab8500 *ab8500 = dev_get_drvdata(dev->parent);
+
+ return mask_and_set_register_interruptible(ab8500, bank, reg,
+ bitmask, bitvalues);
+
+}
+
+static struct abx500_ops ab8500_ops = {
+ .get_chip_id = ab8500_get_chip_id,
+ .get_register = ab8500_get_register,
+ .set_register = ab8500_set_register,
+ .get_register_page = NULL,
+ .set_register_page = NULL,
+ .mask_and_set_register = ab8500_mask_and_set_register,
+ .event_registers_startup_state_get = NULL,
+ .startup_irq_enabled = NULL,
+};
static void ab8500_irq_lock(unsigned int irq)
{
@@ -213,7 +251,7 @@ static void ab8500_irq_sync_unlock(unsigned int irq)
ab8500->oldmask[i] = new;
reg = AB8500_IT_MASK1_REG + ab8500_irq_regoffset[i];
- ab8500_write(ab8500, reg, new);
+ set_register_interruptible(ab8500, AB8500_INTERRUPT, reg, new);
}
mutex_unlock(&ab8500->irq_lock);
@@ -257,9 +295,11 @@ static irqreturn_t ab8500_irq(int irq, void *dev)
for (i = 0; i < AB8500_NUM_IRQ_REGS; i++) {
int regoffset = ab8500_irq_regoffset[i];
int status;
+ u8 value;
- status = ab8500_read(ab8500, AB8500_IT_LATCH1_REG + regoffset);
- if (status <= 0)
+ status = get_register_interruptible(ab8500, AB8500_INTERRUPT,
+ AB8500_IT_LATCH1_REG + regoffset, &value);
+ if (status < 0 || value == 0)
continue;
do {
@@ -267,8 +307,8 @@ static irqreturn_t ab8500_irq(int irq, void *dev)
int line = i * 8 + bit;
handle_nested_irq(ab8500->irq_base + line);
- status &= ~(1 << bit);
- } while (status);
+ value &= ~(1 << bit);
+ } while (value);
}
return IRQ_HANDLED;
@@ -338,7 +378,27 @@ static struct resource ab8500_rtc_resources[] = {
},
};
+static struct resource ab8500_poweronkey_db_resources[] = {
+ {
+ .name = "ONKEY_DBF",
+ .start = AB8500_INT_PON_KEY1DB_F,
+ .end = AB8500_INT_PON_KEY1DB_F,
+ .flags = IORESOURCE_IRQ,
+ },
+ {
+ .name = "ONKEY_DBR",
+ .start = AB8500_INT_PON_KEY1DB_R,
+ .end = AB8500_INT_PON_KEY1DB_R,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
static struct mfd_cell ab8500_devs[] = {
+#ifdef CONFIG_DEBUG_FS
+ {
+ .name = "ab8500-debug",
+ },
+#endif
{
.name = "ab8500-gpadc",
.num_resources = ARRAY_SIZE(ab8500_gpadc_resources),
@@ -349,11 +409,27 @@ static struct mfd_cell ab8500_devs[] = {
.num_resources = ARRAY_SIZE(ab8500_rtc_resources),
.resources = ab8500_rtc_resources,
},
+ {
+ .name = "ab8500-pwm",
+ .id = 1,
+ },
+ {
+ .name = "ab8500-pwm",
+ .id = 2,
+ },
+ {
+ .name = "ab8500-pwm",
+ .id = 3,
+ },
{ .name = "ab8500-charger", },
{ .name = "ab8500-audio", },
{ .name = "ab8500-usb", },
- { .name = "ab8500-pwm", },
{ .name = "ab8500-regulator", },
+ {
+ .name = "ab8500-poweron-key",
+ .num_resources = ARRAY_SIZE(ab8500_poweronkey_db_resources),
+ .resources = ab8500_poweronkey_db_resources,
+ },
};
int __devinit ab8500_init(struct ab8500 *ab8500)
@@ -361,6 +437,7 @@ int __devinit ab8500_init(struct ab8500 *ab8500)
struct ab8500_platform_data *plat = dev_get_platdata(ab8500->dev);
int ret;
int i;
+ u8 value;
if (plat)
ab8500->irq_base = plat->irq_base;
@@ -368,7 +445,8 @@ int __devinit ab8500_init(struct ab8500 *ab8500)
mutex_init(&ab8500->lock);
mutex_init(&ab8500->irq_lock);
- ret = ab8500_read(ab8500, AB8500_REV_REG);
+ ret = get_register_interruptible(ab8500, AB8500_MISC,
+ AB8500_REV_REG, &value);
if (ret < 0)
return ret;
@@ -377,28 +455,37 @@ int __devinit ab8500_init(struct ab8500 *ab8500)
* 0x10 - Cut 1.0
* 0x11 - Cut 1.1
*/
- if (ret == 0x0 || ret == 0x10 || ret == 0x11) {
- ab8500->revision = ret;
- dev_info(ab8500->dev, "detected chip, revision: %#x\n", ret);
+ if (value == 0x0 || value == 0x10 || value == 0x11) {
+ ab8500->revision = value;
+ dev_info(ab8500->dev, "detected chip, revision: %#x\n", value);
} else {
- dev_err(ab8500->dev, "unknown chip, revision: %#x\n", ret);
+ dev_err(ab8500->dev, "unknown chip, revision: %#x\n", value);
return -EINVAL;
}
+ ab8500->chip_id = value;
if (plat && plat->init)
plat->init(ab8500);
/* Clear and mask all interrupts */
for (i = 0; i < 10; i++) {
- ab8500_read(ab8500, AB8500_IT_LATCH1_REG + i);
- ab8500_write(ab8500, AB8500_IT_MASK1_REG + i, 0xff);
+ get_register_interruptible(ab8500, AB8500_INTERRUPT,
+ AB8500_IT_LATCH1_REG + i, &value);
+ set_register_interruptible(ab8500, AB8500_INTERRUPT,
+ AB8500_IT_MASK1_REG + i, 0xff);
}
for (i = 18; i < 24; i++) {
- ab8500_read(ab8500, AB8500_IT_LATCH1_REG + i);
- ab8500_write(ab8500, AB8500_IT_MASK1_REG + i, 0xff);
+ get_register_interruptible(ab8500, AB8500_INTERRUPT,
+ AB8500_IT_LATCH1_REG + i, &value);
+ set_register_interruptible(ab8500, AB8500_INTERRUPT,
+ AB8500_IT_MASK1_REG + i, 0xff);
}
+ ret = abx500_register_ops(ab8500->dev, &ab8500_ops);
+ if (ret)
+ return ret;
+
for (i = 0; i < AB8500_NUM_IRQ_REGS; i++)
ab8500->mask[i] = ab8500->oldmask[i] = 0xff;
diff --git a/drivers/mfd/ab8500-debugfs.c b/drivers/mfd/ab8500-debugfs.c
new file mode 100644
index 000000000000..8d1e05a39815
--- /dev/null
+++ b/drivers/mfd/ab8500-debugfs.c
@@ -0,0 +1,652 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ *
+ * Author: Mattias Wallin <mattias.wallin@stericsson.com> for ST-Ericsson.
+ * License Terms: GNU General Public License v2
+ */
+
+#include <linux/seq_file.h>
+#include <linux/uaccess.h>
+#include <linux/fs.h>
+#include <linux/debugfs.h>
+#include <linux/platform_device.h>
+
+#include <linux/mfd/abx500.h>
+#include <linux/mfd/ab8500.h>
+
+static u32 debug_bank;
+static u32 debug_address;
+
+/**
+ * struct ab8500_reg_range
+ * @first: the first address of the range
+ * @last: the last address of the range
+ * @perm: access permissions for the range
+ */
+struct ab8500_reg_range {
+ u8 first;
+ u8 last;
+ u8 perm;
+};
+
+/**
+ * struct ab8500_i2c_ranges
+ * @num_ranges: the number of ranges in the list
+ * @bankid: bank identifier
+ * @range: the list of register ranges
+ */
+struct ab8500_i2c_ranges {
+ u8 num_ranges;
+ u8 bankid;
+ const struct ab8500_reg_range *range;
+};
+
+#define AB8500_NAME_STRING "ab8500"
+#define AB8500_NUM_BANKS 22
+
+#define AB8500_REV_REG 0x80
+
+static struct ab8500_i2c_ranges debug_ranges[AB8500_NUM_BANKS] = {
+ [0x0] = {
+ .num_ranges = 0,
+ .range = 0,
+ },
+ [AB8500_SYS_CTRL1_BLOCK] = {
+ .num_ranges = 3,
+ .range = (struct ab8500_reg_range[]) {
+ {
+ .first = 0x00,
+ .last = 0x02,
+ },
+ {
+ .first = 0x42,
+ .last = 0x42,
+ },
+ {
+ .first = 0x80,
+ .last = 0x81,
+ },
+ },
+ },
+ [AB8500_SYS_CTRL2_BLOCK] = {
+ .num_ranges = 4,
+ .range = (struct ab8500_reg_range[]) {
+ {
+ .first = 0x00,
+ .last = 0x0D,
+ },
+ {
+ .first = 0x0F,
+ .last = 0x17,
+ },
+ {
+ .first = 0x30,
+ .last = 0x30,
+ },
+ {
+ .first = 0x32,
+ .last = 0x33,
+ },
+ },
+ },
+ [AB8500_REGU_CTRL1] = {
+ .num_ranges = 3,
+ .range = (struct ab8500_reg_range[]) {
+ {
+ .first = 0x00,
+ .last = 0x00,
+ },
+ {
+ .first = 0x03,
+ .last = 0x10,
+ },
+ {
+ .first = 0x80,
+ .last = 0x84,
+ },
+ },
+ },
+ [AB8500_REGU_CTRL2] = {
+ .num_ranges = 5,
+ .range = (struct ab8500_reg_range[]) {
+ {
+ .first = 0x00,
+ .last = 0x15,
+ },
+ {
+ .first = 0x17,
+ .last = 0x19,
+ },
+ {
+ .first = 0x1B,
+ .last = 0x1D,
+ },
+ {
+ .first = 0x1F,
+ .last = 0x22,
+ },
+ {
+ .first = 0x40,
+ .last = 0x44,
+ },
+ /* 0x80-0x8B is SIM registers and should
+ * not be accessed from here */
+ },
+ },
+ [AB8500_USB] = {
+ .num_ranges = 2,
+ .range = (struct ab8500_reg_range[]) {
+ {
+ .first = 0x80,
+ .last = 0x83,
+ },
+ {
+ .first = 0x87,
+ .last = 0x8A,
+ },
+ },
+ },
+ [AB8500_TVOUT] = {
+ .num_ranges = 9,
+ .range = (struct ab8500_reg_range[]) {
+ {
+ .first = 0x00,
+ .last = 0x12,
+ },
+ {
+ .first = 0x15,
+ .last = 0x17,
+ },
+ {
+ .first = 0x19,
+ .last = 0x21,
+ },
+ {
+ .first = 0x27,
+ .last = 0x2C,
+ },
+ {
+ .first = 0x41,
+ .last = 0x41,
+ },
+ {
+ .first = 0x45,
+ .last = 0x5B,
+ },
+ {
+ .first = 0x5D,
+ .last = 0x5D,
+ },
+ {
+ .first = 0x69,
+ .last = 0x69,
+ },
+ {
+ .first = 0x80,
+ .last = 0x81,
+ },
+ },
+ },
+ [AB8500_DBI] = {
+ .num_ranges = 0,
+ .range = 0,
+ },
+ [AB8500_ECI_AV_ACC] = {
+ .num_ranges = 1,
+ .range = (struct ab8500_reg_range[]) {
+ {
+ .first = 0x80,
+ .last = 0x82,
+ },
+ },
+ },
+ [0x9] = {
+ .num_ranges = 0,
+ .range = 0,
+ },
+ [AB8500_GPADC] = {
+ .num_ranges = 1,
+ .range = (struct ab8500_reg_range[]) {
+ {
+ .first = 0x00,
+ .last = 0x08,
+ },
+ },
+ },
+ [AB8500_CHARGER] = {
+ .num_ranges = 8,
+ .range = (struct ab8500_reg_range[]) {
+ {
+ .first = 0x00,
+ .last = 0x03,
+ },
+ {
+ .first = 0x05,
+ .last = 0x05,
+ },
+ {
+ .first = 0x40,
+ .last = 0x40,
+ },
+ {
+ .first = 0x42,
+ .last = 0x42,
+ },
+ {
+ .first = 0x44,
+ .last = 0x44,
+ },
+ {
+ .first = 0x50,
+ .last = 0x55,
+ },
+ {
+ .first = 0x80,
+ .last = 0x82,
+ },
+ {
+ .first = 0xC0,
+ .last = 0xC2,
+ },
+ },
+ },
+ [AB8500_GAS_GAUGE] = {
+ .num_ranges = 3,
+ .range = (struct ab8500_reg_range[]) {
+ {
+ .first = 0x00,
+ .last = 0x00,
+ },
+ {
+ .first = 0x07,
+ .last = 0x0A,
+ },
+ {
+ .first = 0x10,
+ .last = 0x14,
+ },
+ },
+ },
+ [AB8500_AUDIO] = {
+ .num_ranges = 1,
+ .range = (struct ab8500_reg_range[]) {
+ {
+ .first = 0x00,
+ .last = 0x6F,
+ },
+ },
+ },
+ [AB8500_INTERRUPT] = {
+ .num_ranges = 0,
+ .range = 0,
+ },
+ [AB8500_RTC] = {
+ .num_ranges = 1,
+ .range = (struct ab8500_reg_range[]) {
+ {
+ .first = 0x00,
+ .last = 0x0F,
+ },
+ },
+ },
+ [AB8500_MISC] = {
+ .num_ranges = 8,
+ .range = (struct ab8500_reg_range[]) {
+ {
+ .first = 0x00,
+ .last = 0x05,
+ },
+ {
+ .first = 0x10,
+ .last = 0x15,
+ },
+ {
+ .first = 0x20,
+ .last = 0x25,
+ },
+ {
+ .first = 0x30,
+ .last = 0x35,
+ },
+ {
+ .first = 0x40,
+ .last = 0x45,
+ },
+ {
+ .first = 0x50,
+ .last = 0x50,
+ },
+ {
+ .first = 0x60,
+ .last = 0x67,
+ },
+ {
+ .first = 0x80,
+ .last = 0x80,
+ },
+ },
+ },
+ [0x11] = {
+ .num_ranges = 0,
+ .range = 0,
+ },
+ [0x12] = {
+ .num_ranges = 0,
+ .range = 0,
+ },
+ [0x13] = {
+ .num_ranges = 0,
+ .range = 0,
+ },
+ [0x14] = {
+ .num_ranges = 0,
+ .range = 0,
+ },
+ [AB8500_OTP_EMUL] = {
+ .num_ranges = 1,
+ .range = (struct ab8500_reg_range[]) {
+ {
+ .first = 0x01,
+ .last = 0x0F,
+ },
+ },
+ },
+};
+
+static int ab8500_registers_print(struct seq_file *s, void *p)
+{
+ struct device *dev = s->private;
+ unsigned int i;
+ u32 bank = debug_bank;
+
+ seq_printf(s, AB8500_NAME_STRING " register values:\n");
+
+ seq_printf(s, " bank %u:\n", bank);
+ for (i = 0; i < debug_ranges[bank].num_ranges; i++) {
+ u32 reg;
+
+ for (reg = debug_ranges[bank].range[i].first;
+ reg <= debug_ranges[bank].range[i].last;
+ reg++) {
+ u8 value;
+ int err;
+
+ err = abx500_get_register_interruptible(dev,
+ (u8)bank, (u8)reg, &value);
+ if (err < 0) {
+ dev_err(dev, "ab->read fail %d\n", err);
+ return err;
+ }
+
+ err = seq_printf(s, " [%u/0x%02X]: 0x%02X\n", bank,
+ reg, value);
+ if (err < 0) {
+ dev_err(dev, "seq_printf overflow\n");
+ /* Error is not returned here since
+ * the output is wanted in any case */
+ return 0;
+ }
+ }
+ }
+ return 0;
+}
+
+static int ab8500_registers_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, ab8500_registers_print, inode->i_private);
+}
+
+static const struct file_operations ab8500_registers_fops = {
+ .open = ab8500_registers_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+ .owner = THIS_MODULE,
+};
+
+static int ab8500_bank_print(struct seq_file *s, void *p)
+{
+ return seq_printf(s, "%d\n", debug_bank);
+}
+
+static int ab8500_bank_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, ab8500_bank_print, inode->i_private);
+}
+
+static ssize_t ab8500_bank_write(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct device *dev = ((struct seq_file *)(file->private_data))->private;
+ char buf[32];
+ int buf_size;
+ unsigned long user_bank;
+ int err;
+
+ /* Get userspace string and assure termination */
+ buf_size = min(count, (sizeof(buf) - 1));
+ if (copy_from_user(buf, user_buf, buf_size))
+ return -EFAULT;
+ buf[buf_size] = 0;
+
+ err = strict_strtoul(buf, 0, &user_bank);
+ if (err)
+ return -EINVAL;
+
+ if (user_bank >= AB8500_NUM_BANKS) {
+ dev_err(dev, "debugfs error input > number of banks\n");
+ return -EINVAL;
+ }
+
+ debug_bank = user_bank;
+
+ return buf_size;
+}
+
+static int ab8500_address_print(struct seq_file *s, void *p)
+{
+ return seq_printf(s, "0x%02X\n", debug_address);
+}
+
+static int ab8500_address_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, ab8500_address_print, inode->i_private);
+}
+
+static ssize_t ab8500_address_write(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct device *dev = ((struct seq_file *)(file->private_data))->private;
+ char buf[32];
+ int buf_size;
+ unsigned long user_address;
+ int err;
+
+ /* Get userspace string and assure termination */
+ buf_size = min(count, (sizeof(buf) - 1));
+ if (copy_from_user(buf, user_buf, buf_size))
+ return -EFAULT;
+ buf[buf_size] = 0;
+
+ err = strict_strtoul(buf, 0, &user_address);
+ if (err)
+ return -EINVAL;
+ if (user_address > 0xff) {
+ dev_err(dev, "debugfs error input > 0xff\n");
+ return -EINVAL;
+ }
+ debug_address = user_address;
+ return buf_size;
+}
+
+static int ab8500_val_print(struct seq_file *s, void *p)
+{
+ struct device *dev = s->private;
+ int ret;
+ u8 regvalue;
+
+ ret = abx500_get_register_interruptible(dev,
+ (u8)debug_bank, (u8)debug_address, &regvalue);
+ if (ret < 0) {
+ dev_err(dev, "abx500_get_reg fail %d, %d\n",
+ ret, __LINE__);
+ return -EINVAL;
+ }
+ seq_printf(s, "0x%02X\n", regvalue);
+
+ return 0;
+}
+
+static int ab8500_val_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, ab8500_val_print, inode->i_private);
+}
+
+static ssize_t ab8500_val_write(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct device *dev = ((struct seq_file *)(file->private_data))->private;
+ char buf[32];
+ int buf_size;
+ unsigned long user_val;
+ int err;
+
+ /* Get userspace string and assure termination */
+ buf_size = min(count, (sizeof(buf)-1));
+ if (copy_from_user(buf, user_buf, buf_size))
+ return -EFAULT;
+ buf[buf_size] = 0;
+
+ err = strict_strtoul(buf, 0, &user_val);
+ if (err)
+ return -EINVAL;
+ if (user_val > 0xff) {
+ dev_err(dev, "debugfs error input > 0xff\n");
+ return -EINVAL;
+ }
+ err = abx500_set_register_interruptible(dev,
+ (u8)debug_bank, debug_address, (u8)user_val);
+ if (err < 0) {
+ printk(KERN_ERR "abx500_set_reg failed %d, %d", err, __LINE__);
+ return -EINVAL;
+ }
+
+ return buf_size;
+}
+
+static const struct file_operations ab8500_bank_fops = {
+ .open = ab8500_bank_open,
+ .write = ab8500_bank_write,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+ .owner = THIS_MODULE,
+};
+
+static const struct file_operations ab8500_address_fops = {
+ .open = ab8500_address_open,
+ .write = ab8500_address_write,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+ .owner = THIS_MODULE,
+};
+
+static const struct file_operations ab8500_val_fops = {
+ .open = ab8500_val_open,
+ .write = ab8500_val_write,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+ .owner = THIS_MODULE,
+};
+
+static struct dentry *ab8500_dir;
+static struct dentry *ab8500_reg_file;
+static struct dentry *ab8500_bank_file;
+static struct dentry *ab8500_address_file;
+static struct dentry *ab8500_val_file;
+
+static int __devinit ab8500_debug_probe(struct platform_device *plf)
+{
+ debug_bank = AB8500_MISC;
+ debug_address = AB8500_REV_REG & 0x00FF;
+
+ ab8500_dir = debugfs_create_dir(AB8500_NAME_STRING, NULL);
+ if (!ab8500_dir)
+ goto exit_no_debugfs;
+
+ ab8500_reg_file = debugfs_create_file("all-bank-registers",
+ S_IRUGO, ab8500_dir, &plf->dev, &ab8500_registers_fops);
+ if (!ab8500_reg_file)
+ goto exit_destroy_dir;
+
+ ab8500_bank_file = debugfs_create_file("register-bank",
+ (S_IRUGO | S_IWUGO), ab8500_dir, &plf->dev, &ab8500_bank_fops);
+ if (!ab8500_bank_file)
+ goto exit_destroy_reg;
+
+ ab8500_address_file = debugfs_create_file("register-address",
+ (S_IRUGO | S_IWUGO), ab8500_dir, &plf->dev,
+ &ab8500_address_fops);
+ if (!ab8500_address_file)
+ goto exit_destroy_bank;
+
+ ab8500_val_file = debugfs_create_file("register-value",
+ (S_IRUGO | S_IWUGO), ab8500_dir, &plf->dev, &ab8500_val_fops);
+ if (!ab8500_val_file)
+ goto exit_destroy_address;
+
+ return 0;
+
+exit_destroy_address:
+ debugfs_remove(ab8500_address_file);
+exit_destroy_bank:
+ debugfs_remove(ab8500_bank_file);
+exit_destroy_reg:
+ debugfs_remove(ab8500_reg_file);
+exit_destroy_dir:
+ debugfs_remove(ab8500_dir);
+exit_no_debugfs:
+ dev_err(&plf->dev, "failed to create debugfs entries.\n");
+ return -ENOMEM;
+}
+
+static int __devexit ab8500_debug_remove(struct platform_device *plf)
+{
+ debugfs_remove(ab8500_val_file);
+ debugfs_remove(ab8500_address_file);
+ debugfs_remove(ab8500_bank_file);
+ debugfs_remove(ab8500_reg_file);
+ debugfs_remove(ab8500_dir);
+
+ return 0;
+}
+
+static struct platform_driver ab8500_debug_driver = {
+ .driver = {
+ .name = "ab8500-debug",
+ .owner = THIS_MODULE,
+ },
+ .probe = ab8500_debug_probe,
+ .remove = __devexit_p(ab8500_debug_remove)
+};
+
+static int __init ab8500_debug_init(void)
+{
+ return platform_driver_register(&ab8500_debug_driver);
+}
+
+static void __exit ab8500_debug_exit(void)
+{
+ platform_driver_unregister(&ab8500_debug_driver);
+}
+subsys_initcall(ab8500_debug_init);
+module_exit(ab8500_debug_exit);
+
+MODULE_AUTHOR("Mattias WALLIN <mattias.wallin@stericsson.com");
+MODULE_DESCRIPTION("AB8500 DEBUG");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/mfd/ab8500-i2c.c b/drivers/mfd/ab8500-i2c.c
new file mode 100644
index 000000000000..6820327adf4a
--- /dev/null
+++ b/drivers/mfd/ab8500-i2c.c
@@ -0,0 +1,105 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ * Author: Mattias Wallin <mattias.wallin@stericsson.com> for ST-Ericsson.
+ * License Terms: GNU General Public License v2
+ * This file was based on drivers/mfd/ab8500-spi.c
+ */
+
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/mfd/ab8500.h>
+
+#include <mach/prcmu.h>
+
+static int ab8500_i2c_write(struct ab8500 *ab8500, u16 addr, u8 data)
+{
+ int ret;
+
+ ret = prcmu_abb_write((u8)(addr >> 8), (u8)(addr & 0xFF), &data, 1);
+ if (ret < 0)
+ dev_err(ab8500->dev, "prcmu i2c error %d\n", ret);
+ return ret;
+}
+
+static int ab8500_i2c_read(struct ab8500 *ab8500, u16 addr)
+{
+ int ret;
+ u8 data;
+
+ ret = prcmu_abb_read((u8)(addr >> 8), (u8)(addr & 0xFF), &data, 1);
+ if (ret < 0) {
+ dev_err(ab8500->dev, "prcmu i2c error %d\n", ret);
+ return ret;
+ }
+ return (int)data;
+}
+
+static int __devinit ab8500_i2c_probe(struct platform_device *plf)
+{
+ struct ab8500 *ab8500;
+ struct resource *resource;
+ int ret;
+
+ ab8500 = kzalloc(sizeof *ab8500, GFP_KERNEL);
+ if (!ab8500)
+ return -ENOMEM;
+
+ ab8500->dev = &plf->dev;
+
+ resource = platform_get_resource(plf, IORESOURCE_IRQ, 0);
+ if (!resource) {
+ kfree(ab8500);
+ return -ENODEV;
+ }
+
+ ab8500->irq = resource->start;
+
+ ab8500->read = ab8500_i2c_read;
+ ab8500->write = ab8500_i2c_write;
+
+ platform_set_drvdata(plf, ab8500);
+
+ ret = ab8500_init(ab8500);
+ if (ret)
+ kfree(ab8500);
+
+ return ret;
+}
+
+static int __devexit ab8500_i2c_remove(struct platform_device *plf)
+{
+ struct ab8500 *ab8500 = platform_get_drvdata(plf);
+
+ ab8500_exit(ab8500);
+ kfree(ab8500);
+
+ return 0;
+}
+
+static struct platform_driver ab8500_i2c_driver = {
+ .driver = {
+ .name = "ab8500-i2c",
+ .owner = THIS_MODULE,
+ },
+ .probe = ab8500_i2c_probe,
+ .remove = __devexit_p(ab8500_i2c_remove)
+};
+
+static int __init ab8500_i2c_init(void)
+{
+ return platform_driver_register(&ab8500_i2c_driver);
+}
+
+static void __exit ab8500_i2c_exit(void)
+{
+ platform_driver_unregister(&ab8500_i2c_driver);
+}
+subsys_initcall(ab8500_i2c_init);
+module_exit(ab8500_i2c_exit);
+
+MODULE_AUTHOR("Mattias WALLIN <mattias.wallin@stericsson.com");
+MODULE_DESCRIPTION("AB8500 Core access via PRCMU I2C");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/mfd/ab8500-spi.c b/drivers/mfd/ab8500-spi.c
index 01b6d584442c..b1653421edb5 100644
--- a/drivers/mfd/ab8500-spi.c
+++ b/drivers/mfd/ab8500-spi.c
@@ -119,7 +119,7 @@ static int __devexit ab8500_spi_remove(struct spi_device *spi)
static struct spi_driver ab8500_spi_driver = {
.driver = {
- .name = "ab8500",
+ .name = "ab8500-spi",
.owner = THIS_MODULE,
},
.probe = ab8500_spi_probe,
diff --git a/drivers/mfd/da903x.c b/drivers/mfd/da903x.c
index c07aece900fb..2fadbaeb1cb1 100644
--- a/drivers/mfd/da903x.c
+++ b/drivers/mfd/da903x.c
@@ -470,13 +470,19 @@ static int __devinit da903x_add_subdevs(struct da903x_chip *chip,
subdev = &pdata->subdevs[i];
pdev = platform_device_alloc(subdev->name, subdev->id);
+ if (!pdev) {
+ ret = -ENOMEM;
+ goto failed;
+ }
pdev->dev.parent = chip->dev;
pdev->dev.platform_data = subdev->platform_data;
ret = platform_device_add(pdev);
- if (ret)
+ if (ret) {
+ platform_device_put(pdev);
goto failed;
+ }
}
return 0;
diff --git a/drivers/mfd/ezx-pcap.c b/drivers/mfd/ezx-pcap.c
index 134c69aa4790..c2b698d69a93 100644
--- a/drivers/mfd/ezx-pcap.c
+++ b/drivers/mfd/ezx-pcap.c
@@ -384,12 +384,20 @@ static int __devinit pcap_add_subdev(struct pcap_chip *pcap,
struct pcap_subdev *subdev)
{
struct platform_device *pdev;
+ int ret;
pdev = platform_device_alloc(subdev->name, subdev->id);
+ if (!pdev)
+ return -ENOMEM;
+
pdev->dev.parent = &pcap->spi->dev;
pdev->dev.platform_data = subdev->platform_data;
- return platform_device_add(pdev);
+ ret = platform_device_add(pdev);
+ if (ret)
+ platform_device_put(pdev);
+
+ return ret;
}
static int __devexit ezx_pcap_remove(struct spi_device *spi)
@@ -457,6 +465,7 @@ static int __devinit ezx_pcap_probe(struct spi_device *spi)
pcap->irq_base = pdata->irq_base;
pcap->workqueue = create_singlethread_workqueue("pcapd");
if (!pcap->workqueue) {
+ ret = -ENOMEM;
dev_err(&spi->dev, "cant create pcap thread\n");
goto free_pcap;
}
diff --git a/drivers/mfd/htc-pasic3.c b/drivers/mfd/htc-pasic3.c
index f04300e05fd6..7bc752272dc1 100644
--- a/drivers/mfd/htc-pasic3.c
+++ b/drivers/mfd/htc-pasic3.c
@@ -138,13 +138,6 @@ static int __init pasic3_probe(struct platform_device *pdev)
irq = r->start;
}
- r = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
- if (r) {
- ds1wm_resources[1].flags = IORESOURCE_IRQ | (r->flags &
- (IORESOURCE_IRQ_HIGHEDGE | IORESOURCE_IRQ_LOWEDGE));
- irq = r->start;
- }
-
r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!r)
return -ENXIO;
diff --git a/drivers/mfd/jz4740-adc.c b/drivers/mfd/jz4740-adc.c
index 3ad492cb6c41..9dd1b33f2275 100644
--- a/drivers/mfd/jz4740-adc.c
+++ b/drivers/mfd/jz4740-adc.c
@@ -153,7 +153,7 @@ static inline void jz4740_adc_set_enabled(struct jz4740_adc *adc, int engine,
if (enabled)
val |= BIT(engine);
else
- val &= BIT(engine);
+ val &= ~BIT(engine);
writeb(val, adc->base + JZ_REG_ADC_ENABLE);
spin_unlock_irqrestore(&adc->lock, flags);
diff --git a/drivers/mfd/max8925-core.c b/drivers/mfd/max8925-core.c
index 428377a5a6f5..44695f5a1800 100644
--- a/drivers/mfd/max8925-core.c
+++ b/drivers/mfd/max8925-core.c
@@ -93,8 +93,13 @@ static struct mfd_cell rtc_devs[] = {
static struct resource onkey_resources[] = {
{
.name = "max8925-onkey",
- .start = MAX8925_IRQ_GPM_SW_3SEC,
- .end = MAX8925_IRQ_GPM_SW_3SEC,
+ .start = MAX8925_IRQ_GPM_SW_R,
+ .end = MAX8925_IRQ_GPM_SW_R,
+ .flags = IORESOURCE_IRQ,
+ }, {
+ .name = "max8925-onkey",
+ .start = MAX8925_IRQ_GPM_SW_F,
+ .end = MAX8925_IRQ_GPM_SW_F,
.flags = IORESOURCE_IRQ,
},
};
@@ -102,7 +107,7 @@ static struct resource onkey_resources[] = {
static struct mfd_cell onkey_devs[] = {
{
.name = "max8925-onkey",
- .num_resources = 1,
+ .num_resources = 2,
.resources = &onkey_resources[0],
.id = -1,
},
diff --git a/drivers/mfd/max8998-irq.c b/drivers/mfd/max8998-irq.c
new file mode 100644
index 000000000000..45bfe77b639b
--- /dev/null
+++ b/drivers/mfd/max8998-irq.c
@@ -0,0 +1,258 @@
+/*
+ * Interrupt controller support for MAX8998
+ *
+ * Copyright (C) 2010 Samsung Electronics Co.Ltd
+ * Author: Joonyoung Shim <jy0922.shim@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ */
+
+#include <linux/device.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/mfd/max8998-private.h>
+
+struct max8998_irq_data {
+ int reg;
+ int mask;
+};
+
+static struct max8998_irq_data max8998_irqs[] = {
+ [MAX8998_IRQ_DCINF] = {
+ .reg = 1,
+ .mask = MAX8998_IRQ_DCINF_MASK,
+ },
+ [MAX8998_IRQ_DCINR] = {
+ .reg = 1,
+ .mask = MAX8998_IRQ_DCINR_MASK,
+ },
+ [MAX8998_IRQ_JIGF] = {
+ .reg = 1,
+ .mask = MAX8998_IRQ_JIGF_MASK,
+ },
+ [MAX8998_IRQ_JIGR] = {
+ .reg = 1,
+ .mask = MAX8998_IRQ_JIGR_MASK,
+ },
+ [MAX8998_IRQ_PWRONF] = {
+ .reg = 1,
+ .mask = MAX8998_IRQ_PWRONF_MASK,
+ },
+ [MAX8998_IRQ_PWRONR] = {
+ .reg = 1,
+ .mask = MAX8998_IRQ_PWRONR_MASK,
+ },
+ [MAX8998_IRQ_WTSREVNT] = {
+ .reg = 2,
+ .mask = MAX8998_IRQ_WTSREVNT_MASK,
+ },
+ [MAX8998_IRQ_SMPLEVNT] = {
+ .reg = 2,
+ .mask = MAX8998_IRQ_SMPLEVNT_MASK,
+ },
+ [MAX8998_IRQ_ALARM1] = {
+ .reg = 2,
+ .mask = MAX8998_IRQ_ALARM1_MASK,
+ },
+ [MAX8998_IRQ_ALARM0] = {
+ .reg = 2,
+ .mask = MAX8998_IRQ_ALARM0_MASK,
+ },
+ [MAX8998_IRQ_ONKEY1S] = {
+ .reg = 3,
+ .mask = MAX8998_IRQ_ONKEY1S_MASK,
+ },
+ [MAX8998_IRQ_TOPOFFR] = {
+ .reg = 3,
+ .mask = MAX8998_IRQ_TOPOFFR_MASK,
+ },
+ [MAX8998_IRQ_DCINOVPR] = {
+ .reg = 3,
+ .mask = MAX8998_IRQ_DCINOVPR_MASK,
+ },
+ [MAX8998_IRQ_CHGRSTF] = {
+ .reg = 3,
+ .mask = MAX8998_IRQ_CHGRSTF_MASK,
+ },
+ [MAX8998_IRQ_DONER] = {
+ .reg = 3,
+ .mask = MAX8998_IRQ_DONER_MASK,
+ },
+ [MAX8998_IRQ_CHGFAULT] = {
+ .reg = 3,
+ .mask = MAX8998_IRQ_CHGFAULT_MASK,
+ },
+ [MAX8998_IRQ_LOBAT1] = {
+ .reg = 4,
+ .mask = MAX8998_IRQ_LOBAT1_MASK,
+ },
+ [MAX8998_IRQ_LOBAT2] = {
+ .reg = 4,
+ .mask = MAX8998_IRQ_LOBAT2_MASK,
+ },
+};
+
+static inline struct max8998_irq_data *
+irq_to_max8998_irq(struct max8998_dev *max8998, int irq)
+{
+ return &max8998_irqs[irq - max8998->irq_base];
+}
+
+static void max8998_irq_lock(unsigned int irq)
+{
+ struct max8998_dev *max8998 = get_irq_chip_data(irq);
+
+ mutex_lock(&max8998->irqlock);
+}
+
+static void max8998_irq_sync_unlock(unsigned int irq)
+{
+ struct max8998_dev *max8998 = get_irq_chip_data(irq);
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(max8998->irq_masks_cur); i++) {
+ /*
+ * If there's been a change in the mask write it back
+ * to the hardware.
+ */
+ if (max8998->irq_masks_cur[i] != max8998->irq_masks_cache[i]) {
+ max8998->irq_masks_cache[i] = max8998->irq_masks_cur[i];
+ max8998_write_reg(max8998->i2c, MAX8998_REG_IRQM1 + i,
+ max8998->irq_masks_cur[i]);
+ }
+ }
+
+ mutex_unlock(&max8998->irqlock);
+}
+
+static void max8998_irq_unmask(unsigned int irq)
+{
+ struct max8998_dev *max8998 = get_irq_chip_data(irq);
+ struct max8998_irq_data *irq_data = irq_to_max8998_irq(max8998, irq);
+
+ max8998->irq_masks_cur[irq_data->reg - 1] &= ~irq_data->mask;
+}
+
+static void max8998_irq_mask(unsigned int irq)
+{
+ struct max8998_dev *max8998 = get_irq_chip_data(irq);
+ struct max8998_irq_data *irq_data = irq_to_max8998_irq(max8998, irq);
+
+ max8998->irq_masks_cur[irq_data->reg - 1] |= irq_data->mask;
+}
+
+static struct irq_chip max8998_irq_chip = {
+ .name = "max8998",
+ .bus_lock = max8998_irq_lock,
+ .bus_sync_unlock = max8998_irq_sync_unlock,
+ .mask = max8998_irq_mask,
+ .unmask = max8998_irq_unmask,
+};
+
+static irqreturn_t max8998_irq_thread(int irq, void *data)
+{
+ struct max8998_dev *max8998 = data;
+ u8 irq_reg[MAX8998_NUM_IRQ_REGS];
+ int ret;
+ int i;
+
+ ret = max8998_bulk_read(max8998->i2c, MAX8998_REG_IRQ1,
+ MAX8998_NUM_IRQ_REGS, irq_reg);
+ if (ret < 0) {
+ dev_err(max8998->dev, "Failed to read interrupt register: %d\n",
+ ret);
+ return IRQ_NONE;
+ }
+
+ /* Apply masking */
+ for (i = 0; i < MAX8998_NUM_IRQ_REGS; i++)
+ irq_reg[i] &= ~max8998->irq_masks_cur[i];
+
+ /* Report */
+ for (i = 0; i < MAX8998_IRQ_NR; i++) {
+ if (irq_reg[max8998_irqs[i].reg - 1] & max8998_irqs[i].mask)
+ handle_nested_irq(max8998->irq_base + i);
+ }
+
+ return IRQ_HANDLED;
+}
+
+int max8998_irq_init(struct max8998_dev *max8998)
+{
+ int i;
+ int cur_irq;
+ int ret;
+
+ if (!max8998->irq) {
+ dev_warn(max8998->dev,
+ "No interrupt specified, no interrupts\n");
+ max8998->irq_base = 0;
+ return 0;
+ }
+
+ if (!max8998->irq_base) {
+ dev_err(max8998->dev,
+ "No interrupt base specified, no interrupts\n");
+ return 0;
+ }
+
+ mutex_init(&max8998->irqlock);
+
+ /* Mask the individual interrupt sources */
+ for (i = 0; i < MAX8998_NUM_IRQ_REGS; i++) {
+ max8998->irq_masks_cur[i] = 0xff;
+ max8998->irq_masks_cache[i] = 0xff;
+ max8998_write_reg(max8998->i2c, MAX8998_REG_IRQM1 + i, 0xff);
+ }
+
+ max8998_write_reg(max8998->i2c, MAX8998_REG_STATUSM1, 0xff);
+ max8998_write_reg(max8998->i2c, MAX8998_REG_STATUSM2, 0xff);
+
+ /* register with genirq */
+ for (i = 0; i < MAX8998_IRQ_NR; i++) {
+ cur_irq = i + max8998->irq_base;
+ set_irq_chip_data(cur_irq, max8998);
+ set_irq_chip_and_handler(cur_irq, &max8998_irq_chip,
+ handle_edge_irq);
+ set_irq_nested_thread(cur_irq, 1);
+#ifdef CONFIG_ARM
+ set_irq_flags(cur_irq, IRQF_VALID);
+#else
+ set_irq_noprobe(cur_irq);
+#endif
+ }
+
+ ret = request_threaded_irq(max8998->irq, NULL, max8998_irq_thread,
+ IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
+ "max8998-irq", max8998);
+ if (ret) {
+ dev_err(max8998->dev, "Failed to request IRQ %d: %d\n",
+ max8998->irq, ret);
+ return ret;
+ }
+
+ if (!max8998->ono)
+ return 0;
+
+ ret = request_threaded_irq(max8998->ono, NULL, max8998_irq_thread,
+ IRQF_TRIGGER_FALLING | IRQF_TRIGGER_RISING |
+ IRQF_ONESHOT, "max8998-ono", max8998);
+ if (ret)
+ dev_err(max8998->dev, "Failed to request IRQ %d: %d\n",
+ max8998->ono, ret);
+
+ return 0;
+}
+
+void max8998_irq_exit(struct max8998_dev *max8998)
+{
+ if (max8998->ono)
+ free_irq(max8998->ono, max8998);
+
+ if (max8998->irq)
+ free_irq(max8998->irq, max8998);
+}
diff --git a/drivers/mfd/max8998.c b/drivers/mfd/max8998.c
index 73e6f5c4efc9..bb9977bebe78 100644
--- a/drivers/mfd/max8998.c
+++ b/drivers/mfd/max8998.c
@@ -1,5 +1,5 @@
/*
- * max8698.c - mfd core driver for the Maxim 8998
+ * max8998.c - mfd core driver for the Maxim 8998
*
* Copyright (C) 2009-2010 Samsung Electronics
* Kyungmin Park <kyungmin.park@samsung.com>
@@ -30,19 +30,23 @@
#include <linux/mfd/max8998.h>
#include <linux/mfd/max8998-private.h>
+#define RTC_I2C_ADDR (0x0c >> 1)
+
static struct mfd_cell max8998_devs[] = {
{
.name = "max8998-pmic",
- }
+ }, {
+ .name = "max8998-rtc",
+ },
};
-static int max8998_i2c_device_read(struct max8998_dev *max8998, u8 reg, u8 *dest)
+int max8998_read_reg(struct i2c_client *i2c, u8 reg, u8 *dest)
{
- struct i2c_client *client = max8998->i2c_client;
+ struct max8998_dev *max8998 = i2c_get_clientdata(i2c);
int ret;
mutex_lock(&max8998->iolock);
- ret = i2c_smbus_read_byte_data(client, reg);
+ ret = i2c_smbus_read_byte_data(i2c, reg);
mutex_unlock(&max8998->iolock);
if (ret < 0)
return ret;
@@ -51,40 +55,71 @@ static int max8998_i2c_device_read(struct max8998_dev *max8998, u8 reg, u8 *dest
*dest = ret;
return 0;
}
+EXPORT_SYMBOL(max8998_read_reg);
-static int max8998_i2c_device_write(struct max8998_dev *max8998, u8 reg, u8 value)
+int max8998_bulk_read(struct i2c_client *i2c, u8 reg, int count, u8 *buf)
{
- struct i2c_client *client = max8998->i2c_client;
+ struct max8998_dev *max8998 = i2c_get_clientdata(i2c);
+ int ret;
+
+ mutex_lock(&max8998->iolock);
+ ret = i2c_smbus_read_i2c_block_data(i2c, reg, count, buf);
+ mutex_unlock(&max8998->iolock);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+EXPORT_SYMBOL(max8998_bulk_read);
+
+int max8998_write_reg(struct i2c_client *i2c, u8 reg, u8 value)
+{
+ struct max8998_dev *max8998 = i2c_get_clientdata(i2c);
int ret;
mutex_lock(&max8998->iolock);
- ret = i2c_smbus_write_byte_data(client, reg, value);
+ ret = i2c_smbus_write_byte_data(i2c, reg, value);
mutex_unlock(&max8998->iolock);
return ret;
}
+EXPORT_SYMBOL(max8998_write_reg);
-static int max8998_i2c_device_update(struct max8998_dev *max8998, u8 reg,
- u8 val, u8 mask)
+int max8998_bulk_write(struct i2c_client *i2c, u8 reg, int count, u8 *buf)
{
- struct i2c_client *client = max8998->i2c_client;
+ struct max8998_dev *max8998 = i2c_get_clientdata(i2c);
+ int ret;
+
+ mutex_lock(&max8998->iolock);
+ ret = i2c_smbus_write_i2c_block_data(i2c, reg, count, buf);
+ mutex_unlock(&max8998->iolock);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+EXPORT_SYMBOL(max8998_bulk_write);
+
+int max8998_update_reg(struct i2c_client *i2c, u8 reg, u8 val, u8 mask)
+{
+ struct max8998_dev *max8998 = i2c_get_clientdata(i2c);
int ret;
mutex_lock(&max8998->iolock);
- ret = i2c_smbus_read_byte_data(client, reg);
+ ret = i2c_smbus_read_byte_data(i2c, reg);
if (ret >= 0) {
u8 old_val = ret & 0xff;
u8 new_val = (val & mask) | (old_val & (~mask));
- ret = i2c_smbus_write_byte_data(client, reg, new_val);
- if (ret >= 0)
- ret = 0;
+ ret = i2c_smbus_write_byte_data(i2c, reg, new_val);
}
mutex_unlock(&max8998->iolock);
return ret;
}
+EXPORT_SYMBOL(max8998_update_reg);
static int max8998_i2c_probe(struct i2c_client *i2c,
const struct i2c_device_id *id)
{
+ struct max8998_platform_data *pdata = i2c->dev.platform_data;
struct max8998_dev *max8998;
int ret = 0;
@@ -94,12 +129,20 @@ static int max8998_i2c_probe(struct i2c_client *i2c,
i2c_set_clientdata(i2c, max8998);
max8998->dev = &i2c->dev;
- max8998->i2c_client = i2c;
- max8998->dev_read = max8998_i2c_device_read;
- max8998->dev_write = max8998_i2c_device_write;
- max8998->dev_update = max8998_i2c_device_update;
+ max8998->i2c = i2c;
+ max8998->irq = i2c->irq;
+ max8998->type = id->driver_data;
+ if (pdata) {
+ max8998->ono = pdata->ono;
+ max8998->irq_base = pdata->irq_base;
+ }
mutex_init(&max8998->iolock);
+ max8998->rtc = i2c_new_dummy(i2c->adapter, RTC_I2C_ADDR);
+ i2c_set_clientdata(max8998->rtc, max8998);
+
+ max8998_irq_init(max8998);
+
ret = mfd_add_devices(max8998->dev, -1,
max8998_devs, ARRAY_SIZE(max8998_devs),
NULL, 0);
@@ -110,6 +153,8 @@ static int max8998_i2c_probe(struct i2c_client *i2c,
err:
mfd_remove_devices(max8998->dev);
+ max8998_irq_exit(max8998);
+ i2c_unregister_device(max8998->rtc);
kfree(max8998);
return ret;
}
@@ -119,14 +164,17 @@ static int max8998_i2c_remove(struct i2c_client *i2c)
struct max8998_dev *max8998 = i2c_get_clientdata(i2c);
mfd_remove_devices(max8998->dev);
+ max8998_irq_exit(max8998);
+ i2c_unregister_device(max8998->rtc);
kfree(max8998);
return 0;
}
static const struct i2c_device_id max8998_i2c_id[] = {
- { "max8998", 0 },
- { }
+ { "max8998", TYPE_MAX8998 },
+ { "lp3974", TYPE_LP3974},
+ { }
};
MODULE_DEVICE_TABLE(i2c, max8998_i2c_id);
diff --git a/drivers/mfd/mc13783-core.c b/drivers/mfd/mc13783-core.c
deleted file mode 100644
index 6df34989c1f6..000000000000
--- a/drivers/mfd/mc13783-core.c
+++ /dev/null
@@ -1,752 +0,0 @@
-/*
- * Copyright 2009 Pengutronix
- * Uwe Kleine-Koenig <u.kleine-koenig@pengutronix.de>
- *
- * loosely based on an earlier driver that has
- * Copyright 2009 Pengutronix, Sascha Hauer <s.hauer@pengutronix.de>
- *
- * This program is free software; you can redistribute it and/or modify it under
- * the terms of the GNU General Public License version 2 as published by the
- * Free Software Foundation.
- */
-#include <linux/slab.h>
-#include <linux/module.h>
-#include <linux/platform_device.h>
-#include <linux/mutex.h>
-#include <linux/interrupt.h>
-#include <linux/spi/spi.h>
-#include <linux/mfd/core.h>
-#include <linux/mfd/mc13783.h>
-
-struct mc13783 {
- struct spi_device *spidev;
- struct mutex lock;
- int irq;
- int flags;
-
- irq_handler_t irqhandler[MC13783_NUM_IRQ];
- void *irqdata[MC13783_NUM_IRQ];
-
- /* XXX these should go as platformdata to the regulator subdevice */
- struct mc13783_regulator_init_data *regulators;
- int num_regulators;
-};
-
-#define MC13783_REG_REVISION 7
-#define MC13783_REG_ADC_0 43
-#define MC13783_REG_ADC_1 44
-#define MC13783_REG_ADC_2 45
-
-#define MC13783_IRQSTAT0 0
-#define MC13783_IRQSTAT0_ADCDONEI (1 << 0)
-#define MC13783_IRQSTAT0_ADCBISDONEI (1 << 1)
-#define MC13783_IRQSTAT0_TSI (1 << 2)
-#define MC13783_IRQSTAT0_WHIGHI (1 << 3)
-#define MC13783_IRQSTAT0_WLOWI (1 << 4)
-#define MC13783_IRQSTAT0_CHGDETI (1 << 6)
-#define MC13783_IRQSTAT0_CHGOVI (1 << 7)
-#define MC13783_IRQSTAT0_CHGREVI (1 << 8)
-#define MC13783_IRQSTAT0_CHGSHORTI (1 << 9)
-#define MC13783_IRQSTAT0_CCCVI (1 << 10)
-#define MC13783_IRQSTAT0_CHGCURRI (1 << 11)
-#define MC13783_IRQSTAT0_BPONI (1 << 12)
-#define MC13783_IRQSTAT0_LOBATLI (1 << 13)
-#define MC13783_IRQSTAT0_LOBATHI (1 << 14)
-#define MC13783_IRQSTAT0_UDPI (1 << 15)
-#define MC13783_IRQSTAT0_USBI (1 << 16)
-#define MC13783_IRQSTAT0_IDI (1 << 19)
-#define MC13783_IRQSTAT0_SE1I (1 << 21)
-#define MC13783_IRQSTAT0_CKDETI (1 << 22)
-#define MC13783_IRQSTAT0_UDMI (1 << 23)
-
-#define MC13783_IRQMASK0 1
-#define MC13783_IRQMASK0_ADCDONEM MC13783_IRQSTAT0_ADCDONEI
-#define MC13783_IRQMASK0_ADCBISDONEM MC13783_IRQSTAT0_ADCBISDONEI
-#define MC13783_IRQMASK0_TSM MC13783_IRQSTAT0_TSI
-#define MC13783_IRQMASK0_WHIGHM MC13783_IRQSTAT0_WHIGHI
-#define MC13783_IRQMASK0_WLOWM MC13783_IRQSTAT0_WLOWI
-#define MC13783_IRQMASK0_CHGDETM MC13783_IRQSTAT0_CHGDETI
-#define MC13783_IRQMASK0_CHGOVM MC13783_IRQSTAT0_CHGOVI
-#define MC13783_IRQMASK0_CHGREVM MC13783_IRQSTAT0_CHGREVI
-#define MC13783_IRQMASK0_CHGSHORTM MC13783_IRQSTAT0_CHGSHORTI
-#define MC13783_IRQMASK0_CCCVM MC13783_IRQSTAT0_CCCVI
-#define MC13783_IRQMASK0_CHGCURRM MC13783_IRQSTAT0_CHGCURRI
-#define MC13783_IRQMASK0_BPONM MC13783_IRQSTAT0_BPONI
-#define MC13783_IRQMASK0_LOBATLM MC13783_IRQSTAT0_LOBATLI
-#define MC13783_IRQMASK0_LOBATHM MC13783_IRQSTAT0_LOBATHI
-#define MC13783_IRQMASK0_UDPM MC13783_IRQSTAT0_UDPI
-#define MC13783_IRQMASK0_USBM MC13783_IRQSTAT0_USBI
-#define MC13783_IRQMASK0_IDM MC13783_IRQSTAT0_IDI
-#define MC13783_IRQMASK0_SE1M MC13783_IRQSTAT0_SE1I
-#define MC13783_IRQMASK0_CKDETM MC13783_IRQSTAT0_CKDETI
-#define MC13783_IRQMASK0_UDMM MC13783_IRQSTAT0_UDMI
-
-#define MC13783_IRQSTAT1 3
-#define MC13783_IRQSTAT1_1HZI (1 << 0)
-#define MC13783_IRQSTAT1_TODAI (1 << 1)
-#define MC13783_IRQSTAT1_ONOFD1I (1 << 3)
-#define MC13783_IRQSTAT1_ONOFD2I (1 << 4)
-#define MC13783_IRQSTAT1_ONOFD3I (1 << 5)
-#define MC13783_IRQSTAT1_SYSRSTI (1 << 6)
-#define MC13783_IRQSTAT1_RTCRSTI (1 << 7)
-#define MC13783_IRQSTAT1_PCI (1 << 8)
-#define MC13783_IRQSTAT1_WARMI (1 << 9)
-#define MC13783_IRQSTAT1_MEMHLDI (1 << 10)
-#define MC13783_IRQSTAT1_PWRRDYI (1 << 11)
-#define MC13783_IRQSTAT1_THWARNLI (1 << 12)
-#define MC13783_IRQSTAT1_THWARNHI (1 << 13)
-#define MC13783_IRQSTAT1_CLKI (1 << 14)
-#define MC13783_IRQSTAT1_SEMAFI (1 << 15)
-#define MC13783_IRQSTAT1_MC2BI (1 << 17)
-#define MC13783_IRQSTAT1_HSDETI (1 << 18)
-#define MC13783_IRQSTAT1_HSLI (1 << 19)
-#define MC13783_IRQSTAT1_ALSPTHI (1 << 20)
-#define MC13783_IRQSTAT1_AHSSHORTI (1 << 21)
-
-#define MC13783_IRQMASK1 4
-#define MC13783_IRQMASK1_1HZM MC13783_IRQSTAT1_1HZI
-#define MC13783_IRQMASK1_TODAM MC13783_IRQSTAT1_TODAI
-#define MC13783_IRQMASK1_ONOFD1M MC13783_IRQSTAT1_ONOFD1I
-#define MC13783_IRQMASK1_ONOFD2M MC13783_IRQSTAT1_ONOFD2I
-#define MC13783_IRQMASK1_ONOFD3M MC13783_IRQSTAT1_ONOFD3I
-#define MC13783_IRQMASK1_SYSRSTM MC13783_IRQSTAT1_SYSRSTI
-#define MC13783_IRQMASK1_RTCRSTM MC13783_IRQSTAT1_RTCRSTI
-#define MC13783_IRQMASK1_PCM MC13783_IRQSTAT1_PCI
-#define MC13783_IRQMASK1_WARMM MC13783_IRQSTAT1_WARMI
-#define MC13783_IRQMASK1_MEMHLDM MC13783_IRQSTAT1_MEMHLDI
-#define MC13783_IRQMASK1_PWRRDYM MC13783_IRQSTAT1_PWRRDYI
-#define MC13783_IRQMASK1_THWARNLM MC13783_IRQSTAT1_THWARNLI
-#define MC13783_IRQMASK1_THWARNHM MC13783_IRQSTAT1_THWARNHI
-#define MC13783_IRQMASK1_CLKM MC13783_IRQSTAT1_CLKI
-#define MC13783_IRQMASK1_SEMAFM MC13783_IRQSTAT1_SEMAFI
-#define MC13783_IRQMASK1_MC2BM MC13783_IRQSTAT1_MC2BI
-#define MC13783_IRQMASK1_HSDETM MC13783_IRQSTAT1_HSDETI
-#define MC13783_IRQMASK1_HSLM MC13783_IRQSTAT1_HSLI
-#define MC13783_IRQMASK1_ALSPTHM MC13783_IRQSTAT1_ALSPTHI
-#define MC13783_IRQMASK1_AHSSHORTM MC13783_IRQSTAT1_AHSSHORTI
-
-#define MC13783_ADC1 44
-#define MC13783_ADC1_ADEN (1 << 0)
-#define MC13783_ADC1_RAND (1 << 1)
-#define MC13783_ADC1_ADSEL (1 << 3)
-#define MC13783_ADC1_ASC (1 << 20)
-#define MC13783_ADC1_ADTRIGIGN (1 << 21)
-
-#define MC13783_NUMREGS 0x3f
-
-void mc13783_lock(struct mc13783 *mc13783)
-{
- if (!mutex_trylock(&mc13783->lock)) {
- dev_dbg(&mc13783->spidev->dev, "wait for %s from %pf\n",
- __func__, __builtin_return_address(0));
-
- mutex_lock(&mc13783->lock);
- }
- dev_dbg(&mc13783->spidev->dev, "%s from %pf\n",
- __func__, __builtin_return_address(0));
-}
-EXPORT_SYMBOL(mc13783_lock);
-
-void mc13783_unlock(struct mc13783 *mc13783)
-{
- dev_dbg(&mc13783->spidev->dev, "%s from %pf\n",
- __func__, __builtin_return_address(0));
- mutex_unlock(&mc13783->lock);
-}
-EXPORT_SYMBOL(mc13783_unlock);
-
-#define MC13783_REGOFFSET_SHIFT 25
-int mc13783_reg_read(struct mc13783 *mc13783, unsigned int offset, u32 *val)
-{
- struct spi_transfer t;
- struct spi_message m;
- int ret;
-
- BUG_ON(!mutex_is_locked(&mc13783->lock));
-
- if (offset > MC13783_NUMREGS)
- return -EINVAL;
-
- *val = offset << MC13783_REGOFFSET_SHIFT;
-
- memset(&t, 0, sizeof(t));
-
- t.tx_buf = val;
- t.rx_buf = val;
- t.len = sizeof(u32);
-
- spi_message_init(&m);
- spi_message_add_tail(&t, &m);
-
- ret = spi_sync(mc13783->spidev, &m);
-
- /* error in message.status implies error return from spi_sync */
- BUG_ON(!ret && m.status);
-
- if (ret)
- return ret;
-
- *val &= 0xffffff;
-
- dev_vdbg(&mc13783->spidev->dev, "[0x%02x] -> 0x%06x\n", offset, *val);
-
- return 0;
-}
-EXPORT_SYMBOL(mc13783_reg_read);
-
-int mc13783_reg_write(struct mc13783 *mc13783, unsigned int offset, u32 val)
-{
- u32 buf;
- struct spi_transfer t;
- struct spi_message m;
- int ret;
-
- BUG_ON(!mutex_is_locked(&mc13783->lock));
-
- dev_vdbg(&mc13783->spidev->dev, "[0x%02x] <- 0x%06x\n", offset, val);
-
- if (offset > MC13783_NUMREGS || val > 0xffffff)
- return -EINVAL;
-
- buf = 1 << 31 | offset << MC13783_REGOFFSET_SHIFT | val;
-
- memset(&t, 0, sizeof(t));
-
- t.tx_buf = &buf;
- t.rx_buf = &buf;
- t.len = sizeof(u32);
-
- spi_message_init(&m);
- spi_message_add_tail(&t, &m);
-
- ret = spi_sync(mc13783->spidev, &m);
-
- BUG_ON(!ret && m.status);
-
- if (ret)
- return ret;
-
- return 0;
-}
-EXPORT_SYMBOL(mc13783_reg_write);
-
-int mc13783_reg_rmw(struct mc13783 *mc13783, unsigned int offset,
- u32 mask, u32 val)
-{
- int ret;
- u32 valread;
-
- BUG_ON(val & ~mask);
-
- ret = mc13783_reg_read(mc13783, offset, &valread);
- if (ret)
- return ret;
-
- valread = (valread & ~mask) | val;
-
- return mc13783_reg_write(mc13783, offset, valread);
-}
-EXPORT_SYMBOL(mc13783_reg_rmw);
-
-int mc13783_get_flags(struct mc13783 *mc13783)
-{
- return mc13783->flags;
-}
-EXPORT_SYMBOL(mc13783_get_flags);
-
-int mc13783_irq_mask(struct mc13783 *mc13783, int irq)
-{
- int ret;
- unsigned int offmask = irq < 24 ? MC13783_IRQMASK0 : MC13783_IRQMASK1;
- u32 irqbit = 1 << (irq < 24 ? irq : irq - 24);
- u32 mask;
-
- if (irq < 0 || irq >= MC13783_NUM_IRQ)
- return -EINVAL;
-
- ret = mc13783_reg_read(mc13783, offmask, &mask);
- if (ret)
- return ret;
-
- if (mask & irqbit)
- /* already masked */
- return 0;
-
- return mc13783_reg_write(mc13783, offmask, mask | irqbit);
-}
-EXPORT_SYMBOL(mc13783_irq_mask);
-
-int mc13783_irq_unmask(struct mc13783 *mc13783, int irq)
-{
- int ret;
- unsigned int offmask = irq < 24 ? MC13783_IRQMASK0 : MC13783_IRQMASK1;
- u32 irqbit = 1 << (irq < 24 ? irq : irq - 24);
- u32 mask;
-
- if (irq < 0 || irq >= MC13783_NUM_IRQ)
- return -EINVAL;
-
- ret = mc13783_reg_read(mc13783, offmask, &mask);
- if (ret)
- return ret;
-
- if (!(mask & irqbit))
- /* already unmasked */
- return 0;
-
- return mc13783_reg_write(mc13783, offmask, mask & ~irqbit);
-}
-EXPORT_SYMBOL(mc13783_irq_unmask);
-
-int mc13783_irq_status(struct mc13783 *mc13783, int irq,
- int *enabled, int *pending)
-{
- int ret;
- unsigned int offmask = irq < 24 ? MC13783_IRQMASK0 : MC13783_IRQMASK1;
- unsigned int offstat = irq < 24 ? MC13783_IRQSTAT0 : MC13783_IRQSTAT1;
- u32 irqbit = 1 << (irq < 24 ? irq : irq - 24);
-
- if (irq < 0 || irq >= MC13783_NUM_IRQ)
- return -EINVAL;
-
- if (enabled) {
- u32 mask;
-
- ret = mc13783_reg_read(mc13783, offmask, &mask);
- if (ret)
- return ret;
-
- *enabled = mask & irqbit;
- }
-
- if (pending) {
- u32 stat;
-
- ret = mc13783_reg_read(mc13783, offstat, &stat);
- if (ret)
- return ret;
-
- *pending = stat & irqbit;
- }
-
- return 0;
-}
-EXPORT_SYMBOL(mc13783_irq_status);
-
-int mc13783_irq_ack(struct mc13783 *mc13783, int irq)
-{
- unsigned int offstat = irq < 24 ? MC13783_IRQSTAT0 : MC13783_IRQSTAT1;
- unsigned int val = 1 << (irq < 24 ? irq : irq - 24);
-
- BUG_ON(irq < 0 || irq >= MC13783_NUM_IRQ);
-
- return mc13783_reg_write(mc13783, offstat, val);
-}
-EXPORT_SYMBOL(mc13783_irq_ack);
-
-int mc13783_irq_request_nounmask(struct mc13783 *mc13783, int irq,
- irq_handler_t handler, const char *name, void *dev)
-{
- BUG_ON(!mutex_is_locked(&mc13783->lock));
- BUG_ON(!handler);
-
- if (irq < 0 || irq >= MC13783_NUM_IRQ)
- return -EINVAL;
-
- if (mc13783->irqhandler[irq])
- return -EBUSY;
-
- mc13783->irqhandler[irq] = handler;
- mc13783->irqdata[irq] = dev;
-
- return 0;
-}
-EXPORT_SYMBOL(mc13783_irq_request_nounmask);
-
-int mc13783_irq_request(struct mc13783 *mc13783, int irq,
- irq_handler_t handler, const char *name, void *dev)
-{
- int ret;
-
- ret = mc13783_irq_request_nounmask(mc13783, irq, handler, name, dev);
- if (ret)
- return ret;
-
- ret = mc13783_irq_unmask(mc13783, irq);
- if (ret) {
- mc13783->irqhandler[irq] = NULL;
- mc13783->irqdata[irq] = NULL;
- return ret;
- }
-
- return 0;
-}
-EXPORT_SYMBOL(mc13783_irq_request);
-
-int mc13783_irq_free(struct mc13783 *mc13783, int irq, void *dev)
-{
- int ret;
- BUG_ON(!mutex_is_locked(&mc13783->lock));
-
- if (irq < 0 || irq >= MC13783_NUM_IRQ || !mc13783->irqhandler[irq] ||
- mc13783->irqdata[irq] != dev)
- return -EINVAL;
-
- ret = mc13783_irq_mask(mc13783, irq);
- if (ret)
- return ret;
-
- mc13783->irqhandler[irq] = NULL;
- mc13783->irqdata[irq] = NULL;
-
- return 0;
-}
-EXPORT_SYMBOL(mc13783_irq_free);
-
-static inline irqreturn_t mc13783_irqhandler(struct mc13783 *mc13783, int irq)
-{
- return mc13783->irqhandler[irq](irq, mc13783->irqdata[irq]);
-}
-
-/*
- * returns: number of handled irqs or negative error
- * locking: holds mc13783->lock
- */
-static int mc13783_irq_handle(struct mc13783 *mc13783,
- unsigned int offstat, unsigned int offmask, int baseirq)
-{
- u32 stat, mask;
- int ret = mc13783_reg_read(mc13783, offstat, &stat);
- int num_handled = 0;
-
- if (ret)
- return ret;
-
- ret = mc13783_reg_read(mc13783, offmask, &mask);
- if (ret)
- return ret;
-
- while (stat & ~mask) {
- int irq = __ffs(stat & ~mask);
-
- stat &= ~(1 << irq);
-
- if (likely(mc13783->irqhandler[baseirq + irq])) {
- irqreturn_t handled;
-
- handled = mc13783_irqhandler(mc13783, baseirq + irq);
- if (handled == IRQ_HANDLED)
- num_handled++;
- } else {
- dev_err(&mc13783->spidev->dev,
- "BUG: irq %u but no handler\n",
- baseirq + irq);
-
- mask |= 1 << irq;
-
- ret = mc13783_reg_write(mc13783, offmask, mask);
- }
- }
-
- return num_handled;
-}
-
-static irqreturn_t mc13783_irq_thread(int irq, void *data)
-{
- struct mc13783 *mc13783 = data;
- irqreturn_t ret;
- int handled = 0;
-
- mc13783_lock(mc13783);
-
- ret = mc13783_irq_handle(mc13783, MC13783_IRQSTAT0,
- MC13783_IRQMASK0, MC13783_IRQ_ADCDONE);
- if (ret > 0)
- handled = 1;
-
- ret = mc13783_irq_handle(mc13783, MC13783_IRQSTAT1,
- MC13783_IRQMASK1, MC13783_IRQ_1HZ);
- if (ret > 0)
- handled = 1;
-
- mc13783_unlock(mc13783);
-
- return IRQ_RETVAL(handled);
-}
-
-#define MC13783_ADC1_CHAN0_SHIFT 5
-#define MC13783_ADC1_CHAN1_SHIFT 8
-
-struct mc13783_adcdone_data {
- struct mc13783 *mc13783;
- struct completion done;
-};
-
-static irqreturn_t mc13783_handler_adcdone(int irq, void *data)
-{
- struct mc13783_adcdone_data *adcdone_data = data;
-
- mc13783_irq_ack(adcdone_data->mc13783, irq);
-
- complete_all(&adcdone_data->done);
-
- return IRQ_HANDLED;
-}
-
-#define MC13783_ADC_WORKING (1 << 16)
-
-int mc13783_adc_do_conversion(struct mc13783 *mc13783, unsigned int mode,
- unsigned int channel, unsigned int *sample)
-{
- u32 adc0, adc1, old_adc0;
- int i, ret;
- struct mc13783_adcdone_data adcdone_data = {
- .mc13783 = mc13783,
- };
- init_completion(&adcdone_data.done);
-
- dev_dbg(&mc13783->spidev->dev, "%s\n", __func__);
-
- mc13783_lock(mc13783);
-
- if (mc13783->flags & MC13783_ADC_WORKING) {
- ret = -EBUSY;
- goto out;
- }
-
- mc13783->flags |= MC13783_ADC_WORKING;
-
- mc13783_reg_read(mc13783, MC13783_ADC0, &old_adc0);
-
- adc0 = MC13783_ADC0_ADINC1 | MC13783_ADC0_ADINC2;
- adc1 = MC13783_ADC1_ADEN | MC13783_ADC1_ADTRIGIGN | MC13783_ADC1_ASC;
-
- if (channel > 7)
- adc1 |= MC13783_ADC1_ADSEL;
-
- switch (mode) {
- case MC13783_ADC_MODE_TS:
- adc0 |= MC13783_ADC0_ADREFEN | MC13783_ADC0_TSMOD0 |
- MC13783_ADC0_TSMOD1;
- adc1 |= 4 << MC13783_ADC1_CHAN1_SHIFT;
- break;
-
- case MC13783_ADC_MODE_SINGLE_CHAN:
- adc0 |= old_adc0 & MC13783_ADC0_TSMOD_MASK;
- adc1 |= (channel & 0x7) << MC13783_ADC1_CHAN0_SHIFT;
- adc1 |= MC13783_ADC1_RAND;
- break;
-
- case MC13783_ADC_MODE_MULT_CHAN:
- adc0 |= old_adc0 & MC13783_ADC0_TSMOD_MASK;
- adc1 |= 4 << MC13783_ADC1_CHAN1_SHIFT;
- break;
-
- default:
- mc13783_unlock(mc13783);
- return -EINVAL;
- }
-
- dev_dbg(&mc13783->spidev->dev, "%s: request irq\n", __func__);
- mc13783_irq_request(mc13783, MC13783_IRQ_ADCDONE,
- mc13783_handler_adcdone, __func__, &adcdone_data);
- mc13783_irq_ack(mc13783, MC13783_IRQ_ADCDONE);
-
- mc13783_reg_write(mc13783, MC13783_REG_ADC_0, adc0);
- mc13783_reg_write(mc13783, MC13783_REG_ADC_1, adc1);
-
- mc13783_unlock(mc13783);
-
- ret = wait_for_completion_interruptible_timeout(&adcdone_data.done, HZ);
-
- if (!ret)
- ret = -ETIMEDOUT;
-
- mc13783_lock(mc13783);
-
- mc13783_irq_free(mc13783, MC13783_IRQ_ADCDONE, &adcdone_data);
-
- if (ret > 0)
- for (i = 0; i < 4; ++i) {
- ret = mc13783_reg_read(mc13783,
- MC13783_REG_ADC_2, &sample[i]);
- if (ret)
- break;
- }
-
- if (mode == MC13783_ADC_MODE_TS)
- /* restore TSMOD */
- mc13783_reg_write(mc13783, MC13783_REG_ADC_0, old_adc0);
-
- mc13783->flags &= ~MC13783_ADC_WORKING;
-out:
- mc13783_unlock(mc13783);
-
- return ret;
-}
-EXPORT_SYMBOL_GPL(mc13783_adc_do_conversion);
-
-static int mc13783_add_subdevice_pdata(struct mc13783 *mc13783,
- const char *name, void *pdata, size_t pdata_size)
-{
- struct mfd_cell cell = {
- .name = name,
- .platform_data = pdata,
- .data_size = pdata_size,
- };
-
- return mfd_add_devices(&mc13783->spidev->dev, -1, &cell, 1, NULL, 0);
-}
-
-static int mc13783_add_subdevice(struct mc13783 *mc13783, const char *name)
-{
- return mc13783_add_subdevice_pdata(mc13783, name, NULL, 0);
-}
-
-static int mc13783_check_revision(struct mc13783 *mc13783)
-{
- u32 rev_id, rev1, rev2, finid, icid;
-
- mc13783_reg_read(mc13783, MC13783_REG_REVISION, &rev_id);
-
- rev1 = (rev_id & 0x018) >> 3;
- rev2 = (rev_id & 0x007);
- icid = (rev_id & 0x01C0) >> 6;
- finid = (rev_id & 0x01E00) >> 9;
-
- /* Ver 0.2 is actually 3.2a. Report as 3.2 */
- if ((rev1 == 0) && (rev2 == 2))
- rev1 = 3;
-
- if (rev1 == 0 || icid != 2) {
- dev_err(&mc13783->spidev->dev, "No MC13783 detected.\n");
- return -ENODEV;
- }
-
- dev_info(&mc13783->spidev->dev,
- "MC13783 Rev %d.%d FinVer %x detected\n",
- rev1, rev2, finid);
-
- return 0;
-}
-
-static int mc13783_probe(struct spi_device *spi)
-{
- struct mc13783 *mc13783;
- struct mc13783_platform_data *pdata = dev_get_platdata(&spi->dev);
- int ret;
-
- mc13783 = kzalloc(sizeof(*mc13783), GFP_KERNEL);
- if (!mc13783)
- return -ENOMEM;
-
- dev_set_drvdata(&spi->dev, mc13783);
- spi->mode = SPI_MODE_0 | SPI_CS_HIGH;
- spi->bits_per_word = 32;
- spi_setup(spi);
-
- mc13783->spidev = spi;
-
- mutex_init(&mc13783->lock);
- mc13783_lock(mc13783);
-
- ret = mc13783_check_revision(mc13783);
- if (ret)
- goto err_revision;
-
- /* mask all irqs */
- ret = mc13783_reg_write(mc13783, MC13783_IRQMASK0, 0x00ffffff);
- if (ret)
- goto err_mask;
-
- ret = mc13783_reg_write(mc13783, MC13783_IRQMASK1, 0x00ffffff);
- if (ret)
- goto err_mask;
-
- ret = request_threaded_irq(spi->irq, NULL, mc13783_irq_thread,
- IRQF_ONESHOT | IRQF_TRIGGER_HIGH, "mc13783", mc13783);
-
- if (ret) {
-err_mask:
-err_revision:
- mutex_unlock(&mc13783->lock);
- dev_set_drvdata(&spi->dev, NULL);
- kfree(mc13783);
- return ret;
- }
-
- /* This should go away (BEGIN) */
- if (pdata) {
- mc13783->flags = pdata->flags;
- mc13783->regulators = pdata->regulators;
- mc13783->num_regulators = pdata->num_regulators;
- }
- /* This should go away (END) */
-
- mc13783_unlock(mc13783);
-
- if (pdata->flags & MC13783_USE_ADC)
- mc13783_add_subdevice(mc13783, "mc13783-adc");
-
- if (pdata->flags & MC13783_USE_CODEC)
- mc13783_add_subdevice(mc13783, "mc13783-codec");
-
- if (pdata->flags & MC13783_USE_REGULATOR) {
- struct mc13783_regulator_platform_data regulator_pdata = {
- .num_regulators = pdata->num_regulators,
- .regulators = pdata->regulators,
- };
-
- mc13783_add_subdevice_pdata(mc13783, "mc13783-regulator",
- &regulator_pdata, sizeof(regulator_pdata));
- }
-
- if (pdata->flags & MC13783_USE_RTC)
- mc13783_add_subdevice(mc13783, "mc13783-rtc");
-
- if (pdata->flags & MC13783_USE_TOUCHSCREEN)
- mc13783_add_subdevice(mc13783, "mc13783-ts");
-
- if (pdata->flags & MC13783_USE_LED)
- mc13783_add_subdevice_pdata(mc13783, "mc13783-led",
- pdata->leds, sizeof(*pdata->leds));
-
- return 0;
-}
-
-static int __devexit mc13783_remove(struct spi_device *spi)
-{
- struct mc13783 *mc13783 = dev_get_drvdata(&spi->dev);
-
- free_irq(mc13783->spidev->irq, mc13783);
-
- mfd_remove_devices(&spi->dev);
-
- return 0;
-}
-
-static struct spi_driver mc13783_driver = {
- .driver = {
- .name = "mc13783",
- .bus = &spi_bus_type,
- .owner = THIS_MODULE,
- },
- .probe = mc13783_probe,
- .remove = __devexit_p(mc13783_remove),
-};
-
-static int __init mc13783_init(void)
-{
- return spi_register_driver(&mc13783_driver);
-}
-subsys_initcall(mc13783_init);
-
-static void __exit mc13783_exit(void)
-{
- spi_unregister_driver(&mc13783_driver);
-}
-module_exit(mc13783_exit);
-
-MODULE_DESCRIPTION("Core driver for Freescale MC13783 PMIC");
-MODULE_AUTHOR("Uwe Kleine-Koenig <u.kleine-koenig@pengutronix.de>");
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/mfd/mc13xxx-core.c b/drivers/mfd/mc13xxx-core.c
new file mode 100644
index 000000000000..a2ac2ed6d64c
--- /dev/null
+++ b/drivers/mfd/mc13xxx-core.c
@@ -0,0 +1,840 @@
+/*
+ * Copyright 2009-2010 Pengutronix
+ * Uwe Kleine-Koenig <u.kleine-koenig@pengutronix.de>
+ *
+ * loosely based on an earlier driver that has
+ * Copyright 2009 Pengutronix, Sascha Hauer <s.hauer@pengutronix.de>
+ *
+ * This program is free software; you can redistribute it and/or modify it under
+ * the terms of the GNU General Public License version 2 as published by the
+ * Free Software Foundation.
+ */
+
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/mutex.h>
+#include <linux/interrupt.h>
+#include <linux/spi/spi.h>
+#include <linux/mfd/core.h>
+#include <linux/mfd/mc13xxx.h>
+
+struct mc13xxx {
+ struct spi_device *spidev;
+ struct mutex lock;
+ int irq;
+
+ irq_handler_t irqhandler[MC13XXX_NUM_IRQ];
+ void *irqdata[MC13XXX_NUM_IRQ];
+};
+
+struct mc13783 {
+ struct mc13xxx mc13xxx;
+
+ int adcflags;
+};
+
+struct mc13xxx *mc13783_to_mc13xxx(struct mc13783 *mc13783)
+{
+ return &mc13783->mc13xxx;
+}
+EXPORT_SYMBOL(mc13783_to_mc13xxx);
+
+#define MC13XXX_IRQSTAT0 0
+#define MC13XXX_IRQSTAT0_ADCDONEI (1 << 0)
+#define MC13XXX_IRQSTAT0_ADCBISDONEI (1 << 1)
+#define MC13XXX_IRQSTAT0_TSI (1 << 2)
+#define MC13783_IRQSTAT0_WHIGHI (1 << 3)
+#define MC13783_IRQSTAT0_WLOWI (1 << 4)
+#define MC13XXX_IRQSTAT0_CHGDETI (1 << 6)
+#define MC13783_IRQSTAT0_CHGOVI (1 << 7)
+#define MC13XXX_IRQSTAT0_CHGREVI (1 << 8)
+#define MC13XXX_IRQSTAT0_CHGSHORTI (1 << 9)
+#define MC13XXX_IRQSTAT0_CCCVI (1 << 10)
+#define MC13XXX_IRQSTAT0_CHGCURRI (1 << 11)
+#define MC13XXX_IRQSTAT0_BPONI (1 << 12)
+#define MC13XXX_IRQSTAT0_LOBATLI (1 << 13)
+#define MC13XXX_IRQSTAT0_LOBATHI (1 << 14)
+#define MC13783_IRQSTAT0_UDPI (1 << 15)
+#define MC13783_IRQSTAT0_USBI (1 << 16)
+#define MC13783_IRQSTAT0_IDI (1 << 19)
+#define MC13783_IRQSTAT0_SE1I (1 << 21)
+#define MC13783_IRQSTAT0_CKDETI (1 << 22)
+#define MC13783_IRQSTAT0_UDMI (1 << 23)
+
+#define MC13XXX_IRQMASK0 1
+#define MC13XXX_IRQMASK0_ADCDONEM MC13XXX_IRQSTAT0_ADCDONEI
+#define MC13XXX_IRQMASK0_ADCBISDONEM MC13XXX_IRQSTAT0_ADCBISDONEI
+#define MC13XXX_IRQMASK0_TSM MC13XXX_IRQSTAT0_TSI
+#define MC13783_IRQMASK0_WHIGHM MC13783_IRQSTAT0_WHIGHI
+#define MC13783_IRQMASK0_WLOWM MC13783_IRQSTAT0_WLOWI
+#define MC13XXX_IRQMASK0_CHGDETM MC13XXX_IRQSTAT0_CHGDETI
+#define MC13783_IRQMASK0_CHGOVM MC13783_IRQSTAT0_CHGOVI
+#define MC13XXX_IRQMASK0_CHGREVM MC13XXX_IRQSTAT0_CHGREVI
+#define MC13XXX_IRQMASK0_CHGSHORTM MC13XXX_IRQSTAT0_CHGSHORTI
+#define MC13XXX_IRQMASK0_CCCVM MC13XXX_IRQSTAT0_CCCVI
+#define MC13XXX_IRQMASK0_CHGCURRM MC13XXX_IRQSTAT0_CHGCURRI
+#define MC13XXX_IRQMASK0_BPONM MC13XXX_IRQSTAT0_BPONI
+#define MC13XXX_IRQMASK0_LOBATLM MC13XXX_IRQSTAT0_LOBATLI
+#define MC13XXX_IRQMASK0_LOBATHM MC13XXX_IRQSTAT0_LOBATHI
+#define MC13783_IRQMASK0_UDPM MC13783_IRQSTAT0_UDPI
+#define MC13783_IRQMASK0_USBM MC13783_IRQSTAT0_USBI
+#define MC13783_IRQMASK0_IDM MC13783_IRQSTAT0_IDI
+#define MC13783_IRQMASK0_SE1M MC13783_IRQSTAT0_SE1I
+#define MC13783_IRQMASK0_CKDETM MC13783_IRQSTAT0_CKDETI
+#define MC13783_IRQMASK0_UDMM MC13783_IRQSTAT0_UDMI
+
+#define MC13XXX_IRQSTAT1 3
+#define MC13XXX_IRQSTAT1_1HZI (1 << 0)
+#define MC13XXX_IRQSTAT1_TODAI (1 << 1)
+#define MC13783_IRQSTAT1_ONOFD1I (1 << 3)
+#define MC13783_IRQSTAT1_ONOFD2I (1 << 4)
+#define MC13783_IRQSTAT1_ONOFD3I (1 << 5)
+#define MC13XXX_IRQSTAT1_SYSRSTI (1 << 6)
+#define MC13XXX_IRQSTAT1_RTCRSTI (1 << 7)
+#define MC13XXX_IRQSTAT1_PCI (1 << 8)
+#define MC13XXX_IRQSTAT1_WARMI (1 << 9)
+#define MC13XXX_IRQSTAT1_MEMHLDI (1 << 10)
+#define MC13783_IRQSTAT1_PWRRDYI (1 << 11)
+#define MC13XXX_IRQSTAT1_THWARNLI (1 << 12)
+#define MC13XXX_IRQSTAT1_THWARNHI (1 << 13)
+#define MC13XXX_IRQSTAT1_CLKI (1 << 14)
+#define MC13783_IRQSTAT1_SEMAFI (1 << 15)
+#define MC13783_IRQSTAT1_MC2BI (1 << 17)
+#define MC13783_IRQSTAT1_HSDETI (1 << 18)
+#define MC13783_IRQSTAT1_HSLI (1 << 19)
+#define MC13783_IRQSTAT1_ALSPTHI (1 << 20)
+#define MC13783_IRQSTAT1_AHSSHORTI (1 << 21)
+
+#define MC13XXX_IRQMASK1 4
+#define MC13XXX_IRQMASK1_1HZM MC13XXX_IRQSTAT1_1HZI
+#define MC13XXX_IRQMASK1_TODAM MC13XXX_IRQSTAT1_TODAI
+#define MC13783_IRQMASK1_ONOFD1M MC13783_IRQSTAT1_ONOFD1I
+#define MC13783_IRQMASK1_ONOFD2M MC13783_IRQSTAT1_ONOFD2I
+#define MC13783_IRQMASK1_ONOFD3M MC13783_IRQSTAT1_ONOFD3I
+#define MC13XXX_IRQMASK1_SYSRSTM MC13XXX_IRQSTAT1_SYSRSTI
+#define MC13XXX_IRQMASK1_RTCRSTM MC13XXX_IRQSTAT1_RTCRSTI
+#define MC13XXX_IRQMASK1_PCM MC13XXX_IRQSTAT1_PCI
+#define MC13XXX_IRQMASK1_WARMM MC13XXX_IRQSTAT1_WARMI
+#define MC13XXX_IRQMASK1_MEMHLDM MC13XXX_IRQSTAT1_MEMHLDI
+#define MC13783_IRQMASK1_PWRRDYM MC13783_IRQSTAT1_PWRRDYI
+#define MC13XXX_IRQMASK1_THWARNLM MC13XXX_IRQSTAT1_THWARNLI
+#define MC13XXX_IRQMASK1_THWARNHM MC13XXX_IRQSTAT1_THWARNHI
+#define MC13XXX_IRQMASK1_CLKM MC13XXX_IRQSTAT1_CLKI
+#define MC13783_IRQMASK1_SEMAFM MC13783_IRQSTAT1_SEMAFI
+#define MC13783_IRQMASK1_MC2BM MC13783_IRQSTAT1_MC2BI
+#define MC13783_IRQMASK1_HSDETM MC13783_IRQSTAT1_HSDETI
+#define MC13783_IRQMASK1_HSLM MC13783_IRQSTAT1_HSLI
+#define MC13783_IRQMASK1_ALSPTHM MC13783_IRQSTAT1_ALSPTHI
+#define MC13783_IRQMASK1_AHSSHORTM MC13783_IRQSTAT1_AHSSHORTI
+
+#define MC13XXX_REVISION 7
+#define MC13XXX_REVISION_REVMETAL (0x07 << 0)
+#define MC13XXX_REVISION_REVFULL (0x03 << 3)
+#define MC13XXX_REVISION_ICID (0x07 << 6)
+#define MC13XXX_REVISION_FIN (0x03 << 9)
+#define MC13XXX_REVISION_FAB (0x03 << 11)
+#define MC13XXX_REVISION_ICIDCODE (0x3f << 13)
+
+#define MC13783_ADC1 44
+#define MC13783_ADC1_ADEN (1 << 0)
+#define MC13783_ADC1_RAND (1 << 1)
+#define MC13783_ADC1_ADSEL (1 << 3)
+#define MC13783_ADC1_ASC (1 << 20)
+#define MC13783_ADC1_ADTRIGIGN (1 << 21)
+
+#define MC13783_ADC2 45
+
+#define MC13XXX_NUMREGS 0x3f
+
+void mc13xxx_lock(struct mc13xxx *mc13xxx)
+{
+ if (!mutex_trylock(&mc13xxx->lock)) {
+ dev_dbg(&mc13xxx->spidev->dev, "wait for %s from %pf\n",
+ __func__, __builtin_return_address(0));
+
+ mutex_lock(&mc13xxx->lock);
+ }
+ dev_dbg(&mc13xxx->spidev->dev, "%s from %pf\n",
+ __func__, __builtin_return_address(0));
+}
+EXPORT_SYMBOL(mc13xxx_lock);
+
+void mc13xxx_unlock(struct mc13xxx *mc13xxx)
+{
+ dev_dbg(&mc13xxx->spidev->dev, "%s from %pf\n",
+ __func__, __builtin_return_address(0));
+ mutex_unlock(&mc13xxx->lock);
+}
+EXPORT_SYMBOL(mc13xxx_unlock);
+
+#define MC13XXX_REGOFFSET_SHIFT 25
+int mc13xxx_reg_read(struct mc13xxx *mc13xxx, unsigned int offset, u32 *val)
+{
+ struct spi_transfer t;
+ struct spi_message m;
+ int ret;
+
+ BUG_ON(!mutex_is_locked(&mc13xxx->lock));
+
+ if (offset > MC13XXX_NUMREGS)
+ return -EINVAL;
+
+ *val = offset << MC13XXX_REGOFFSET_SHIFT;
+
+ memset(&t, 0, sizeof(t));
+
+ t.tx_buf = val;
+ t.rx_buf = val;
+ t.len = sizeof(u32);
+
+ spi_message_init(&m);
+ spi_message_add_tail(&t, &m);
+
+ ret = spi_sync(mc13xxx->spidev, &m);
+
+ /* error in message.status implies error return from spi_sync */
+ BUG_ON(!ret && m.status);
+
+ if (ret)
+ return ret;
+
+ *val &= 0xffffff;
+
+ dev_vdbg(&mc13xxx->spidev->dev, "[0x%02x] -> 0x%06x\n", offset, *val);
+
+ return 0;
+}
+EXPORT_SYMBOL(mc13xxx_reg_read);
+
+int mc13xxx_reg_write(struct mc13xxx *mc13xxx, unsigned int offset, u32 val)
+{
+ u32 buf;
+ struct spi_transfer t;
+ struct spi_message m;
+ int ret;
+
+ BUG_ON(!mutex_is_locked(&mc13xxx->lock));
+
+ dev_vdbg(&mc13xxx->spidev->dev, "[0x%02x] <- 0x%06x\n", offset, val);
+
+ if (offset > MC13XXX_NUMREGS || val > 0xffffff)
+ return -EINVAL;
+
+ buf = 1 << 31 | offset << MC13XXX_REGOFFSET_SHIFT | val;
+
+ memset(&t, 0, sizeof(t));
+
+ t.tx_buf = &buf;
+ t.rx_buf = &buf;
+ t.len = sizeof(u32);
+
+ spi_message_init(&m);
+ spi_message_add_tail(&t, &m);
+
+ ret = spi_sync(mc13xxx->spidev, &m);
+
+ BUG_ON(!ret && m.status);
+
+ if (ret)
+ return ret;
+
+ return 0;
+}
+EXPORT_SYMBOL(mc13xxx_reg_write);
+
+int mc13xxx_reg_rmw(struct mc13xxx *mc13xxx, unsigned int offset,
+ u32 mask, u32 val)
+{
+ int ret;
+ u32 valread;
+
+ BUG_ON(val & ~mask);
+
+ ret = mc13xxx_reg_read(mc13xxx, offset, &valread);
+ if (ret)
+ return ret;
+
+ valread = (valread & ~mask) | val;
+
+ return mc13xxx_reg_write(mc13xxx, offset, valread);
+}
+EXPORT_SYMBOL(mc13xxx_reg_rmw);
+
+int mc13xxx_irq_mask(struct mc13xxx *mc13xxx, int irq)
+{
+ int ret;
+ unsigned int offmask = irq < 24 ? MC13XXX_IRQMASK0 : MC13XXX_IRQMASK1;
+ u32 irqbit = 1 << (irq < 24 ? irq : irq - 24);
+ u32 mask;
+
+ if (irq < 0 || irq >= MC13XXX_NUM_IRQ)
+ return -EINVAL;
+
+ ret = mc13xxx_reg_read(mc13xxx, offmask, &mask);
+ if (ret)
+ return ret;
+
+ if (mask & irqbit)
+ /* already masked */
+ return 0;
+
+ return mc13xxx_reg_write(mc13xxx, offmask, mask | irqbit);
+}
+EXPORT_SYMBOL(mc13xxx_irq_mask);
+
+int mc13xxx_irq_unmask(struct mc13xxx *mc13xxx, int irq)
+{
+ int ret;
+ unsigned int offmask = irq < 24 ? MC13XXX_IRQMASK0 : MC13XXX_IRQMASK1;
+ u32 irqbit = 1 << (irq < 24 ? irq : irq - 24);
+ u32 mask;
+
+ if (irq < 0 || irq >= MC13XXX_NUM_IRQ)
+ return -EINVAL;
+
+ ret = mc13xxx_reg_read(mc13xxx, offmask, &mask);
+ if (ret)
+ return ret;
+
+ if (!(mask & irqbit))
+ /* already unmasked */
+ return 0;
+
+ return mc13xxx_reg_write(mc13xxx, offmask, mask & ~irqbit);
+}
+EXPORT_SYMBOL(mc13xxx_irq_unmask);
+
+int mc13xxx_irq_status(struct mc13xxx *mc13xxx, int irq,
+ int *enabled, int *pending)
+{
+ int ret;
+ unsigned int offmask = irq < 24 ? MC13XXX_IRQMASK0 : MC13XXX_IRQMASK1;
+ unsigned int offstat = irq < 24 ? MC13XXX_IRQSTAT0 : MC13XXX_IRQSTAT1;
+ u32 irqbit = 1 << (irq < 24 ? irq : irq - 24);
+
+ if (irq < 0 || irq >= MC13XXX_NUM_IRQ)
+ return -EINVAL;
+
+ if (enabled) {
+ u32 mask;
+
+ ret = mc13xxx_reg_read(mc13xxx, offmask, &mask);
+ if (ret)
+ return ret;
+
+ *enabled = mask & irqbit;
+ }
+
+ if (pending) {
+ u32 stat;
+
+ ret = mc13xxx_reg_read(mc13xxx, offstat, &stat);
+ if (ret)
+ return ret;
+
+ *pending = stat & irqbit;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(mc13xxx_irq_status);
+
+int mc13xxx_irq_ack(struct mc13xxx *mc13xxx, int irq)
+{
+ unsigned int offstat = irq < 24 ? MC13XXX_IRQSTAT0 : MC13XXX_IRQSTAT1;
+ unsigned int val = 1 << (irq < 24 ? irq : irq - 24);
+
+ BUG_ON(irq < 0 || irq >= MC13XXX_NUM_IRQ);
+
+ return mc13xxx_reg_write(mc13xxx, offstat, val);
+}
+EXPORT_SYMBOL(mc13xxx_irq_ack);
+
+int mc13xxx_irq_request_nounmask(struct mc13xxx *mc13xxx, int irq,
+ irq_handler_t handler, const char *name, void *dev)
+{
+ BUG_ON(!mutex_is_locked(&mc13xxx->lock));
+ BUG_ON(!handler);
+
+ if (irq < 0 || irq >= MC13XXX_NUM_IRQ)
+ return -EINVAL;
+
+ if (mc13xxx->irqhandler[irq])
+ return -EBUSY;
+
+ mc13xxx->irqhandler[irq] = handler;
+ mc13xxx->irqdata[irq] = dev;
+
+ return 0;
+}
+EXPORT_SYMBOL(mc13xxx_irq_request_nounmask);
+
+int mc13xxx_irq_request(struct mc13xxx *mc13xxx, int irq,
+ irq_handler_t handler, const char *name, void *dev)
+{
+ int ret;
+
+ ret = mc13xxx_irq_request_nounmask(mc13xxx, irq, handler, name, dev);
+ if (ret)
+ return ret;
+
+ ret = mc13xxx_irq_unmask(mc13xxx, irq);
+ if (ret) {
+ mc13xxx->irqhandler[irq] = NULL;
+ mc13xxx->irqdata[irq] = NULL;
+ return ret;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(mc13xxx_irq_request);
+
+int mc13xxx_irq_free(struct mc13xxx *mc13xxx, int irq, void *dev)
+{
+ int ret;
+ BUG_ON(!mutex_is_locked(&mc13xxx->lock));
+
+ if (irq < 0 || irq >= MC13XXX_NUM_IRQ || !mc13xxx->irqhandler[irq] ||
+ mc13xxx->irqdata[irq] != dev)
+ return -EINVAL;
+
+ ret = mc13xxx_irq_mask(mc13xxx, irq);
+ if (ret)
+ return ret;
+
+ mc13xxx->irqhandler[irq] = NULL;
+ mc13xxx->irqdata[irq] = NULL;
+
+ return 0;
+}
+EXPORT_SYMBOL(mc13xxx_irq_free);
+
+static inline irqreturn_t mc13xxx_irqhandler(struct mc13xxx *mc13xxx, int irq)
+{
+ return mc13xxx->irqhandler[irq](irq, mc13xxx->irqdata[irq]);
+}
+
+/*
+ * returns: number of handled irqs or negative error
+ * locking: holds mc13xxx->lock
+ */
+static int mc13xxx_irq_handle(struct mc13xxx *mc13xxx,
+ unsigned int offstat, unsigned int offmask, int baseirq)
+{
+ u32 stat, mask;
+ int ret = mc13xxx_reg_read(mc13xxx, offstat, &stat);
+ int num_handled = 0;
+
+ if (ret)
+ return ret;
+
+ ret = mc13xxx_reg_read(mc13xxx, offmask, &mask);
+ if (ret)
+ return ret;
+
+ while (stat & ~mask) {
+ int irq = __ffs(stat & ~mask);
+
+ stat &= ~(1 << irq);
+
+ if (likely(mc13xxx->irqhandler[baseirq + irq])) {
+ irqreturn_t handled;
+
+ handled = mc13xxx_irqhandler(mc13xxx, baseirq + irq);
+ if (handled == IRQ_HANDLED)
+ num_handled++;
+ } else {
+ dev_err(&mc13xxx->spidev->dev,
+ "BUG: irq %u but no handler\n",
+ baseirq + irq);
+
+ mask |= 1 << irq;
+
+ ret = mc13xxx_reg_write(mc13xxx, offmask, mask);
+ }
+ }
+
+ return num_handled;
+}
+
+static irqreturn_t mc13xxx_irq_thread(int irq, void *data)
+{
+ struct mc13xxx *mc13xxx = data;
+ irqreturn_t ret;
+ int handled = 0;
+
+ mc13xxx_lock(mc13xxx);
+
+ ret = mc13xxx_irq_handle(mc13xxx, MC13XXX_IRQSTAT0,
+ MC13XXX_IRQMASK0, 0);
+ if (ret > 0)
+ handled = 1;
+
+ ret = mc13xxx_irq_handle(mc13xxx, MC13XXX_IRQSTAT1,
+ MC13XXX_IRQMASK1, 24);
+ if (ret > 0)
+ handled = 1;
+
+ mc13xxx_unlock(mc13xxx);
+
+ return IRQ_RETVAL(handled);
+}
+
+enum mc13xxx_id {
+ MC13XXX_ID_MC13783,
+ MC13XXX_ID_MC13892,
+ MC13XXX_ID_INVALID,
+};
+
+const char *mc13xxx_chipname[] = {
+ [MC13XXX_ID_MC13783] = "mc13783",
+ [MC13XXX_ID_MC13892] = "mc13892",
+};
+
+#define maskval(reg, mask) (((reg) & (mask)) >> __ffs(mask))
+static int mc13xxx_identify(struct mc13xxx *mc13xxx, enum mc13xxx_id *id)
+{
+ u32 icid;
+ u32 revision;
+ const char *name;
+ int ret;
+
+ ret = mc13xxx_reg_read(mc13xxx, 46, &icid);
+ if (ret)
+ return ret;
+
+ icid = (icid >> 6) & 0x7;
+
+ switch (icid) {
+ case 2:
+ *id = MC13XXX_ID_MC13783;
+ name = "mc13783";
+ break;
+ case 7:
+ *id = MC13XXX_ID_MC13892;
+ name = "mc13892";
+ break;
+ default:
+ *id = MC13XXX_ID_INVALID;
+ break;
+ }
+
+ if (*id == MC13XXX_ID_MC13783 || *id == MC13XXX_ID_MC13892) {
+ ret = mc13xxx_reg_read(mc13xxx, MC13XXX_REVISION, &revision);
+ if (ret)
+ return ret;
+
+ dev_info(&mc13xxx->spidev->dev, "%s: rev: %d.%d, "
+ "fin: %d, fab: %d, icid: %d/%d\n",
+ mc13xxx_chipname[*id],
+ maskval(revision, MC13XXX_REVISION_REVFULL),
+ maskval(revision, MC13XXX_REVISION_REVMETAL),
+ maskval(revision, MC13XXX_REVISION_FIN),
+ maskval(revision, MC13XXX_REVISION_FAB),
+ maskval(revision, MC13XXX_REVISION_ICID),
+ maskval(revision, MC13XXX_REVISION_ICIDCODE));
+ }
+
+ if (*id != MC13XXX_ID_INVALID) {
+ const struct spi_device_id *devid =
+ spi_get_device_id(mc13xxx->spidev);
+ if (!devid || devid->driver_data != *id)
+ dev_warn(&mc13xxx->spidev->dev, "device id doesn't "
+ "match auto detection!\n");
+ }
+
+ return 0;
+}
+
+static const char *mc13xxx_get_chipname(struct mc13xxx *mc13xxx)
+{
+ const struct spi_device_id *devid =
+ spi_get_device_id(mc13xxx->spidev);
+
+ if (!devid)
+ return NULL;
+
+ return mc13xxx_chipname[devid->driver_data];
+}
+
+#include <linux/mfd/mc13783.h>
+
+int mc13xxx_get_flags(struct mc13xxx *mc13xxx)
+{
+ struct mc13xxx_platform_data *pdata =
+ dev_get_platdata(&mc13xxx->spidev->dev);
+
+ return pdata->flags;
+}
+EXPORT_SYMBOL(mc13xxx_get_flags);
+
+#define MC13783_ADC1_CHAN0_SHIFT 5
+#define MC13783_ADC1_CHAN1_SHIFT 8
+
+struct mc13xxx_adcdone_data {
+ struct mc13xxx *mc13xxx;
+ struct completion done;
+};
+
+static irqreturn_t mc13783_handler_adcdone(int irq, void *data)
+{
+ struct mc13xxx_adcdone_data *adcdone_data = data;
+
+ mc13xxx_irq_ack(adcdone_data->mc13xxx, irq);
+
+ complete_all(&adcdone_data->done);
+
+ return IRQ_HANDLED;
+}
+
+#define MC13783_ADC_WORKING (1 << 0)
+
+int mc13783_adc_do_conversion(struct mc13783 *mc13783, unsigned int mode,
+ unsigned int channel, unsigned int *sample)
+{
+ struct mc13xxx *mc13xxx = &mc13783->mc13xxx;
+ u32 adc0, adc1, old_adc0;
+ int i, ret;
+ struct mc13xxx_adcdone_data adcdone_data = {
+ .mc13xxx = mc13xxx,
+ };
+ init_completion(&adcdone_data.done);
+
+ dev_dbg(&mc13xxx->spidev->dev, "%s\n", __func__);
+
+ mc13xxx_lock(mc13xxx);
+
+ if (mc13783->adcflags & MC13783_ADC_WORKING) {
+ ret = -EBUSY;
+ goto out;
+ }
+
+ mc13783->adcflags |= MC13783_ADC_WORKING;
+
+ mc13xxx_reg_read(mc13xxx, MC13783_ADC0, &old_adc0);
+
+ adc0 = MC13783_ADC0_ADINC1 | MC13783_ADC0_ADINC2;
+ adc1 = MC13783_ADC1_ADEN | MC13783_ADC1_ADTRIGIGN | MC13783_ADC1_ASC;
+
+ if (channel > 7)
+ adc1 |= MC13783_ADC1_ADSEL;
+
+ switch (mode) {
+ case MC13783_ADC_MODE_TS:
+ adc0 |= MC13783_ADC0_ADREFEN | MC13783_ADC0_TSMOD0 |
+ MC13783_ADC0_TSMOD1;
+ adc1 |= 4 << MC13783_ADC1_CHAN1_SHIFT;
+ break;
+
+ case MC13783_ADC_MODE_SINGLE_CHAN:
+ adc0 |= old_adc0 & MC13783_ADC0_TSMOD_MASK;
+ adc1 |= (channel & 0x7) << MC13783_ADC1_CHAN0_SHIFT;
+ adc1 |= MC13783_ADC1_RAND;
+ break;
+
+ case MC13783_ADC_MODE_MULT_CHAN:
+ adc0 |= old_adc0 & MC13783_ADC0_TSMOD_MASK;
+ adc1 |= 4 << MC13783_ADC1_CHAN1_SHIFT;
+ break;
+
+ default:
+ mc13783_unlock(mc13783);
+ return -EINVAL;
+ }
+
+ dev_dbg(&mc13783->mc13xxx.spidev->dev, "%s: request irq\n", __func__);
+ mc13xxx_irq_request(mc13xxx, MC13783_IRQ_ADCDONE,
+ mc13783_handler_adcdone, __func__, &adcdone_data);
+ mc13xxx_irq_ack(mc13xxx, MC13783_IRQ_ADCDONE);
+
+ mc13xxx_reg_write(mc13xxx, MC13783_ADC0, adc0);
+ mc13xxx_reg_write(mc13xxx, MC13783_ADC1, adc1);
+
+ mc13xxx_unlock(mc13xxx);
+
+ ret = wait_for_completion_interruptible_timeout(&adcdone_data.done, HZ);
+
+ if (!ret)
+ ret = -ETIMEDOUT;
+
+ mc13xxx_lock(mc13xxx);
+
+ mc13xxx_irq_free(mc13xxx, MC13783_IRQ_ADCDONE, &adcdone_data);
+
+ if (ret > 0)
+ for (i = 0; i < 4; ++i) {
+ ret = mc13xxx_reg_read(mc13xxx,
+ MC13783_ADC2, &sample[i]);
+ if (ret)
+ break;
+ }
+
+ if (mode == MC13783_ADC_MODE_TS)
+ /* restore TSMOD */
+ mc13xxx_reg_write(mc13xxx, MC13783_ADC0, old_adc0);
+
+ mc13783->adcflags &= ~MC13783_ADC_WORKING;
+out:
+ mc13xxx_unlock(mc13xxx);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(mc13783_adc_do_conversion);
+
+static int mc13xxx_add_subdevice_pdata(struct mc13xxx *mc13xxx,
+ const char *format, void *pdata, size_t pdata_size)
+{
+ char buf[30];
+ const char *name = mc13xxx_get_chipname(mc13xxx);
+
+ struct mfd_cell cell = {
+ .platform_data = pdata,
+ .data_size = pdata_size,
+ };
+
+ /* there is no asnprintf in the kernel :-( */
+ if (snprintf(buf, sizeof(buf), format, name) > sizeof(buf))
+ return -E2BIG;
+
+ cell.name = kmemdup(buf, strlen(buf) + 1, GFP_KERNEL);
+ if (!cell.name)
+ return -ENOMEM;
+
+ return mfd_add_devices(&mc13xxx->spidev->dev, -1, &cell, 1, NULL, 0);
+}
+
+static int mc13xxx_add_subdevice(struct mc13xxx *mc13xxx, const char *format)
+{
+ return mc13xxx_add_subdevice_pdata(mc13xxx, format, NULL, 0);
+}
+
+static int mc13xxx_probe(struct spi_device *spi)
+{
+ struct mc13xxx *mc13xxx;
+ struct mc13xxx_platform_data *pdata = dev_get_platdata(&spi->dev);
+ enum mc13xxx_id id;
+ int ret;
+
+ mc13xxx = kzalloc(sizeof(*mc13xxx), GFP_KERNEL);
+ if (!mc13xxx)
+ return -ENOMEM;
+
+ dev_set_drvdata(&spi->dev, mc13xxx);
+ spi->mode = SPI_MODE_0 | SPI_CS_HIGH;
+ spi->bits_per_word = 32;
+ spi_setup(spi);
+
+ mc13xxx->spidev = spi;
+
+ mutex_init(&mc13xxx->lock);
+ mc13xxx_lock(mc13xxx);
+
+ ret = mc13xxx_identify(mc13xxx, &id);
+ if (ret || id == MC13XXX_ID_INVALID)
+ goto err_revision;
+
+ /* mask all irqs */
+ ret = mc13xxx_reg_write(mc13xxx, MC13XXX_IRQMASK0, 0x00ffffff);
+ if (ret)
+ goto err_mask;
+
+ ret = mc13xxx_reg_write(mc13xxx, MC13XXX_IRQMASK1, 0x00ffffff);
+ if (ret)
+ goto err_mask;
+
+ ret = request_threaded_irq(spi->irq, NULL, mc13xxx_irq_thread,
+ IRQF_ONESHOT | IRQF_TRIGGER_HIGH, "mc13xxx", mc13xxx);
+
+ if (ret) {
+err_mask:
+err_revision:
+ mutex_unlock(&mc13xxx->lock);
+ dev_set_drvdata(&spi->dev, NULL);
+ kfree(mc13xxx);
+ return ret;
+ }
+
+ mc13xxx_unlock(mc13xxx);
+
+ if (pdata->flags & MC13XXX_USE_ADC)
+ mc13xxx_add_subdevice(mc13xxx, "%s-adc");
+
+ if (pdata->flags & MC13XXX_USE_CODEC)
+ mc13xxx_add_subdevice(mc13xxx, "%s-codec");
+
+ if (pdata->flags & MC13XXX_USE_REGULATOR) {
+ struct mc13xxx_regulator_platform_data regulator_pdata = {
+ .num_regulators = pdata->num_regulators,
+ .regulators = pdata->regulators,
+ };
+
+ mc13xxx_add_subdevice_pdata(mc13xxx, "%s-regulator",
+ &regulator_pdata, sizeof(regulator_pdata));
+ }
+
+ if (pdata->flags & MC13XXX_USE_RTC)
+ mc13xxx_add_subdevice(mc13xxx, "%s-rtc");
+
+ if (pdata->flags & MC13XXX_USE_TOUCHSCREEN)
+ mc13xxx_add_subdevice(mc13xxx, "%s-ts");
+
+ if (pdata->flags & MC13XXX_USE_LED) {
+ mc13xxx_add_subdevice_pdata(mc13xxx, "%s-led",
+ pdata->leds, sizeof(*pdata->leds));
+ }
+
+ return 0;
+}
+
+static int __devexit mc13xxx_remove(struct spi_device *spi)
+{
+ struct mc13xxx *mc13xxx = dev_get_drvdata(&spi->dev);
+
+ free_irq(mc13xxx->spidev->irq, mc13xxx);
+
+ mfd_remove_devices(&spi->dev);
+
+ kfree(mc13xxx);
+
+ return 0;
+}
+
+static const struct spi_device_id mc13xxx_device_id[] = {
+ {
+ .name = "mc13783",
+ .driver_data = MC13XXX_ID_MC13783,
+ }, {
+ .name = "mc13892",
+ .driver_data = MC13XXX_ID_MC13892,
+ }, {
+ /* sentinel */
+ }
+};
+
+static struct spi_driver mc13xxx_driver = {
+ .id_table = mc13xxx_device_id,
+ .driver = {
+ .name = "mc13xxx",
+ .bus = &spi_bus_type,
+ .owner = THIS_MODULE,
+ },
+ .probe = mc13xxx_probe,
+ .remove = __devexit_p(mc13xxx_remove),
+};
+
+static int __init mc13xxx_init(void)
+{
+ return spi_register_driver(&mc13xxx_driver);
+}
+subsys_initcall(mc13xxx_init);
+
+static void __exit mc13xxx_exit(void)
+{
+ spi_unregister_driver(&mc13xxx_driver);
+}
+module_exit(mc13xxx_exit);
+
+MODULE_DESCRIPTION("Core driver for Freescale MC13XXX PMIC");
+MODULE_AUTHOR("Uwe Kleine-Koenig <u.kleine-koenig@pengutronix.de>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/mfd/mfd-core.c b/drivers/mfd/mfd-core.c
index 1823a57b7d8f..ec99f681e773 100644
--- a/drivers/mfd/mfd-core.c
+++ b/drivers/mfd/mfd-core.c
@@ -38,10 +38,12 @@ static int mfd_add_device(struct device *parent, int id,
pdev->dev.parent = parent;
platform_set_drvdata(pdev, cell->driver_data);
- ret = platform_device_add_data(pdev,
- cell->platform_data, cell->data_size);
- if (ret)
- goto fail_res;
+ if (cell->data_size) {
+ ret = platform_device_add_data(pdev,
+ cell->platform_data, cell->data_size);
+ if (ret)
+ goto fail_res;
+ }
for (r = 0; r < cell->num_resources; r++) {
res[r].name = cell->resources[r].name;
@@ -65,9 +67,11 @@ static int mfd_add_device(struct device *parent, int id,
res[r].end = cell->resources[r].end;
}
- ret = acpi_check_resource_conflict(res);
- if (ret)
- goto fail_res;
+ if (!cell->ignore_resource_conflicts) {
+ ret = acpi_check_resource_conflict(res);
+ if (ret)
+ goto fail_res;
+ }
}
ret = platform_device_add_resources(pdev, res, cell->num_resources);
diff --git a/drivers/mfd/pcf50633-core.c b/drivers/mfd/pcf50633-core.c
index 23e585527285..501ce13b693e 100644
--- a/drivers/mfd/pcf50633-core.c
+++ b/drivers/mfd/pcf50633-core.c
@@ -25,13 +25,6 @@
#include <linux/mfd/pcf50633/core.h>
-int pcf50633_irq_init(struct pcf50633 *pcf, int irq);
-void pcf50633_irq_free(struct pcf50633 *pcf);
-#ifdef CONFIG_PM
-int pcf50633_irq_suspend(struct pcf50633 *pcf);
-int pcf50633_irq_resume(struct pcf50633 *pcf);
-#endif
-
static int __pcf50633_read(struct pcf50633 *pcf, u8 reg, int num, u8 *data)
{
int ret;
@@ -346,12 +339,14 @@ static int __devexit pcf50633_remove(struct i2c_client *client)
struct pcf50633 *pcf = i2c_get_clientdata(client);
int i;
+ sysfs_remove_group(&client->dev.kobj, &pcf_attr_group);
pcf50633_irq_free(pcf);
platform_device_unregister(pcf->input_pdev);
platform_device_unregister(pcf->rtc_pdev);
platform_device_unregister(pcf->mbc_pdev);
platform_device_unregister(pcf->adc_pdev);
+ platform_device_unregister(pcf->bl_pdev);
for (i = 0; i < PCF50633_NUM_REGULATORS; i++)
platform_device_unregister(pcf->regulator_pdev[i]);
diff --git a/drivers/mfd/sh_mobile_sdhi.c b/drivers/mfd/sh_mobile_sdhi.c
index cd164595f08a..f1714f93af9d 100644
--- a/drivers/mfd/sh_mobile_sdhi.c
+++ b/drivers/mfd/sh_mobile_sdhi.c
@@ -65,7 +65,18 @@ static void sh_mobile_sdhi_set_pwr(struct platform_device *tmio, int state)
p->set_pwr(pdev, state);
}
-static int __init sh_mobile_sdhi_probe(struct platform_device *pdev)
+static int sh_mobile_sdhi_get_cd(struct platform_device *tmio)
+{
+ struct platform_device *pdev = to_platform_device(tmio->dev.parent);
+ struct sh_mobile_sdhi_info *p = pdev->dev.platform_data;
+
+ if (p && p->get_cd)
+ return p->get_cd(pdev);
+ else
+ return -ENOSYS;
+}
+
+static int __devinit sh_mobile_sdhi_probe(struct platform_device *pdev)
{
struct sh_mobile_sdhi *priv;
struct tmio_mmc_data *mmc_data;
@@ -106,12 +117,20 @@ static int __init sh_mobile_sdhi_probe(struct platform_device *pdev)
mmc_data->hclk = clk_get_rate(priv->clk);
mmc_data->set_pwr = sh_mobile_sdhi_set_pwr;
+ mmc_data->get_cd = sh_mobile_sdhi_get_cd;
mmc_data->capabilities = MMC_CAP_MMC_HIGHSPEED;
if (p) {
mmc_data->flags = p->tmio_flags;
mmc_data->ocr_mask = p->tmio_ocr_mask;
+ mmc_data->capabilities |= p->tmio_caps;
}
+ /*
+ * All SDHI blocks support 2-byte and larger block sizes in 4-bit
+ * bus width mode.
+ */
+ mmc_data->flags |= TMIO_MMC_BLKSZ_2BYTES;
+
if (p && p->dma_slave_tx >= 0 && p->dma_slave_rx >= 0) {
priv->param_tx.slave_id = p->dma_slave_tx;
priv->param_rx.slave_id = p->dma_slave_rx;
diff --git a/drivers/mfd/stmpe.c b/drivers/mfd/stmpe.c
index 0754c5e91995..b11487f1e1cb 100644
--- a/drivers/mfd/stmpe.c
+++ b/drivers/mfd/stmpe.c
@@ -873,6 +873,28 @@ static int __devinit stmpe_devices_init(struct stmpe *stmpe)
return ret;
}
+#ifdef CONFIG_PM
+static int stmpe_suspend(struct device *dev)
+{
+ struct i2c_client *i2c = to_i2c_client(dev);
+
+ if (device_may_wakeup(&i2c->dev))
+ enable_irq_wake(i2c->irq);
+
+ return 0;
+}
+
+static int stmpe_resume(struct device *dev)
+{
+ struct i2c_client *i2c = to_i2c_client(dev);
+
+ if (device_may_wakeup(&i2c->dev))
+ disable_irq_wake(i2c->irq);
+
+ return 0;
+}
+#endif
+
static int __devinit stmpe_probe(struct i2c_client *i2c,
const struct i2c_device_id *id)
{
@@ -960,9 +982,19 @@ static const struct i2c_device_id stmpe_id[] = {
};
MODULE_DEVICE_TABLE(i2c, stmpe_id);
+#ifdef CONFIG_PM
+static const struct dev_pm_ops stmpe_dev_pm_ops = {
+ .suspend = stmpe_suspend,
+ .resume = stmpe_resume,
+};
+#endif
+
static struct i2c_driver stmpe_driver = {
.driver.name = "stmpe",
.driver.owner = THIS_MODULE,
+#ifdef CONFIG_PM
+ .driver.pm = &stmpe_dev_pm_ops,
+#endif
.probe = stmpe_probe,
.remove = __devexit_p(stmpe_remove),
.id_table = stmpe_id,
diff --git a/drivers/mfd/tc6393xb.c b/drivers/mfd/tc6393xb.c
index ef6c42c8917a..1ea80d8ad915 100644
--- a/drivers/mfd/tc6393xb.c
+++ b/drivers/mfd/tc6393xb.c
@@ -155,7 +155,7 @@ static struct resource __devinitdata tc6393xb_nand_resources[] = {
},
};
-static struct resource __devinitdata tc6393xb_mmc_resources[] = {
+static struct resource tc6393xb_mmc_resources[] = {
{
.start = 0x800,
.end = 0x9ff,
diff --git a/drivers/mfd/timberdale.c b/drivers/mfd/timberdale.c
index ac5995026c88..727f62c15a60 100644
--- a/drivers/mfd/timberdale.c
+++ b/drivers/mfd/timberdale.c
@@ -43,6 +43,8 @@
#include <linux/timb_dma.h>
+#include <linux/ks8842.h>
+
#include "timberdale.h"
#define DRIVER_NAME "timberdale"
@@ -161,6 +163,12 @@ static const __devinitconst struct resource timberdale_spi_resources[] = {
},
};
+static __devinitdata struct ks8842_platform_data
+ timberdale_ks8842_platform_data = {
+ .rx_dma_channel = DMA_ETH_RX,
+ .tx_dma_channel = DMA_ETH_TX
+};
+
static const __devinitconst struct resource timberdale_eth_resources[] = {
{
.start = ETHOFFSET,
@@ -389,6 +397,8 @@ static __devinitdata struct mfd_cell timberdale_cells_bar0_cfg0[] = {
.name = "ks8842",
.num_resources = ARRAY_SIZE(timberdale_eth_resources),
.resources = timberdale_eth_resources,
+ .platform_data = &timberdale_ks8842_platform_data,
+ .data_size = sizeof(timberdale_ks8842_platform_data)
},
};
@@ -447,6 +457,8 @@ static __devinitdata struct mfd_cell timberdale_cells_bar0_cfg1[] = {
.name = "ks8842",
.num_resources = ARRAY_SIZE(timberdale_eth_resources),
.resources = timberdale_eth_resources,
+ .platform_data = &timberdale_ks8842_platform_data,
+ .data_size = sizeof(timberdale_ks8842_platform_data)
},
};
@@ -538,6 +550,8 @@ static __devinitdata struct mfd_cell timberdale_cells_bar0_cfg3[] = {
.name = "ks8842",
.num_resources = ARRAY_SIZE(timberdale_eth_resources),
.resources = timberdale_eth_resources,
+ .platform_data = &timberdale_ks8842_platform_data,
+ .data_size = sizeof(timberdale_ks8842_platform_data)
},
};
diff --git a/drivers/mfd/tps6507x.c b/drivers/mfd/tps6507x.c
index fc0197649281..33ba7723c967 100644
--- a/drivers/mfd/tps6507x.c
+++ b/drivers/mfd/tps6507x.c
@@ -68,7 +68,7 @@ static int tps6507x_i2c_write_device(struct tps6507x_dev *tps6507x, char reg,
u8 msg[TPS6507X_MAX_REGISTER + 1];
int ret;
- if (bytes > (TPS6507X_MAX_REGISTER + 1))
+ if (bytes > TPS6507X_MAX_REGISTER)
return -EINVAL;
msg[0] = reg;
diff --git a/drivers/mfd/tps6586x.c b/drivers/mfd/tps6586x.c
index 4cde31e6a252..b4931ab34929 100644
--- a/drivers/mfd/tps6586x.c
+++ b/drivers/mfd/tps6586x.c
@@ -15,6 +15,8 @@
* published by the Free Software Foundation.
*/
+#include <linux/interrupt.h>
+#include <linux/irq.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/mutex.h>
@@ -29,9 +31,64 @@
#define TPS6586X_GPIOSET1 0x5d
#define TPS6586X_GPIOSET2 0x5e
+/* interrupt control registers */
+#define TPS6586X_INT_ACK1 0xb5
+#define TPS6586X_INT_ACK2 0xb6
+#define TPS6586X_INT_ACK3 0xb7
+#define TPS6586X_INT_ACK4 0xb8
+
+/* interrupt mask registers */
+#define TPS6586X_INT_MASK1 0xb0
+#define TPS6586X_INT_MASK2 0xb1
+#define TPS6586X_INT_MASK3 0xb2
+#define TPS6586X_INT_MASK4 0xb3
+#define TPS6586X_INT_MASK5 0xb4
+
/* device id */
#define TPS6586X_VERSIONCRC 0xcd
#define TPS658621A_VERSIONCRC 0x15
+#define TPS658621C_VERSIONCRC 0x2c
+
+struct tps6586x_irq_data {
+ u8 mask_reg;
+ u8 mask_mask;
+};
+
+#define TPS6586X_IRQ(_reg, _mask) \
+ { \
+ .mask_reg = (_reg) - TPS6586X_INT_MASK1, \
+ .mask_mask = (_mask), \
+ }
+
+static const struct tps6586x_irq_data tps6586x_irqs[] = {
+ [TPS6586X_INT_PLDO_0] = TPS6586X_IRQ(TPS6586X_INT_MASK1, 1 << 0),
+ [TPS6586X_INT_PLDO_1] = TPS6586X_IRQ(TPS6586X_INT_MASK1, 1 << 1),
+ [TPS6586X_INT_PLDO_2] = TPS6586X_IRQ(TPS6586X_INT_MASK1, 1 << 2),
+ [TPS6586X_INT_PLDO_3] = TPS6586X_IRQ(TPS6586X_INT_MASK1, 1 << 3),
+ [TPS6586X_INT_PLDO_4] = TPS6586X_IRQ(TPS6586X_INT_MASK1, 1 << 4),
+ [TPS6586X_INT_PLDO_5] = TPS6586X_IRQ(TPS6586X_INT_MASK1, 1 << 5),
+ [TPS6586X_INT_PLDO_6] = TPS6586X_IRQ(TPS6586X_INT_MASK1, 1 << 6),
+ [TPS6586X_INT_PLDO_7] = TPS6586X_IRQ(TPS6586X_INT_MASK1, 1 << 7),
+ [TPS6586X_INT_COMP_DET] = TPS6586X_IRQ(TPS6586X_INT_MASK4, 1 << 0),
+ [TPS6586X_INT_ADC] = TPS6586X_IRQ(TPS6586X_INT_MASK2, 1 << 1),
+ [TPS6586X_INT_PLDO_8] = TPS6586X_IRQ(TPS6586X_INT_MASK2, 1 << 2),
+ [TPS6586X_INT_PLDO_9] = TPS6586X_IRQ(TPS6586X_INT_MASK2, 1 << 3),
+ [TPS6586X_INT_PSM_0] = TPS6586X_IRQ(TPS6586X_INT_MASK2, 1 << 4),
+ [TPS6586X_INT_PSM_1] = TPS6586X_IRQ(TPS6586X_INT_MASK2, 1 << 5),
+ [TPS6586X_INT_PSM_2] = TPS6586X_IRQ(TPS6586X_INT_MASK2, 1 << 6),
+ [TPS6586X_INT_PSM_3] = TPS6586X_IRQ(TPS6586X_INT_MASK2, 1 << 7),
+ [TPS6586X_INT_RTC_ALM1] = TPS6586X_IRQ(TPS6586X_INT_MASK5, 1 << 4),
+ [TPS6586X_INT_ACUSB_OVP] = TPS6586X_IRQ(TPS6586X_INT_MASK5, 0x03),
+ [TPS6586X_INT_USB_DET] = TPS6586X_IRQ(TPS6586X_INT_MASK5, 1 << 2),
+ [TPS6586X_INT_AC_DET] = TPS6586X_IRQ(TPS6586X_INT_MASK5, 1 << 3),
+ [TPS6586X_INT_BAT_DET] = TPS6586X_IRQ(TPS6586X_INT_MASK3, 1 << 0),
+ [TPS6586X_INT_CHG_STAT] = TPS6586X_IRQ(TPS6586X_INT_MASK4, 0xfc),
+ [TPS6586X_INT_CHG_TEMP] = TPS6586X_IRQ(TPS6586X_INT_MASK3, 0x06),
+ [TPS6586X_INT_PP] = TPS6586X_IRQ(TPS6586X_INT_MASK3, 0xf0),
+ [TPS6586X_INT_RESUME] = TPS6586X_IRQ(TPS6586X_INT_MASK5, 1 << 5),
+ [TPS6586X_INT_LOW_SYS] = TPS6586X_IRQ(TPS6586X_INT_MASK5, 1 << 6),
+ [TPS6586X_INT_RTC_ALM2] = TPS6586X_IRQ(TPS6586X_INT_MASK4, 1 << 1),
+};
struct tps6586x {
struct mutex lock;
@@ -39,6 +96,12 @@ struct tps6586x {
struct i2c_client *client;
struct gpio_chip gpio;
+ struct irq_chip irq_chip;
+ struct mutex irq_lock;
+ int irq_base;
+ u32 irq_en;
+ u8 mask_cache[5];
+ u8 mask_reg[5];
};
static inline int __tps6586x_read(struct i2c_client *client,
@@ -262,6 +325,129 @@ static int tps6586x_remove_subdevs(struct tps6586x *tps6586x)
return device_for_each_child(tps6586x->dev, NULL, __remove_subdev);
}
+static void tps6586x_irq_lock(unsigned int irq)
+{
+ struct tps6586x *tps6586x = get_irq_chip_data(irq);
+
+ mutex_lock(&tps6586x->irq_lock);
+}
+
+static void tps6586x_irq_enable(unsigned int irq)
+{
+ struct tps6586x *tps6586x = get_irq_chip_data(irq);
+ unsigned int __irq = irq - tps6586x->irq_base;
+ const struct tps6586x_irq_data *data = &tps6586x_irqs[__irq];
+
+ tps6586x->mask_reg[data->mask_reg] &= ~data->mask_mask;
+ tps6586x->irq_en |= (1 << __irq);
+}
+
+static void tps6586x_irq_disable(unsigned int irq)
+{
+ struct tps6586x *tps6586x = get_irq_chip_data(irq);
+
+ unsigned int __irq = irq - tps6586x->irq_base;
+ const struct tps6586x_irq_data *data = &tps6586x_irqs[__irq];
+
+ tps6586x->mask_reg[data->mask_reg] |= data->mask_mask;
+ tps6586x->irq_en &= ~(1 << __irq);
+}
+
+static void tps6586x_irq_sync_unlock(unsigned int irq)
+{
+ struct tps6586x *tps6586x = get_irq_chip_data(irq);
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(tps6586x->mask_reg); i++) {
+ if (tps6586x->mask_reg[i] != tps6586x->mask_cache[i]) {
+ if (!WARN_ON(tps6586x_write(tps6586x->dev,
+ TPS6586X_INT_MASK1 + i,
+ tps6586x->mask_reg[i])))
+ tps6586x->mask_cache[i] = tps6586x->mask_reg[i];
+ }
+ }
+
+ mutex_unlock(&tps6586x->irq_lock);
+}
+
+static irqreturn_t tps6586x_irq(int irq, void *data)
+{
+ struct tps6586x *tps6586x = data;
+ u32 acks;
+ int ret = 0;
+
+ ret = tps6586x_reads(tps6586x->dev, TPS6586X_INT_ACK1,
+ sizeof(acks), (uint8_t *)&acks);
+
+ if (ret < 0) {
+ dev_err(tps6586x->dev, "failed to read interrupt status\n");
+ return IRQ_NONE;
+ }
+
+ acks = le32_to_cpu(acks);
+
+ while (acks) {
+ int i = __ffs(acks);
+
+ if (tps6586x->irq_en & (1 << i))
+ handle_nested_irq(tps6586x->irq_base + i);
+
+ acks &= ~(1 << i);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static int __devinit tps6586x_irq_init(struct tps6586x *tps6586x, int irq,
+ int irq_base)
+{
+ int i, ret;
+ u8 tmp[4];
+
+ if (!irq_base) {
+ dev_warn(tps6586x->dev, "No interrupt support on IRQ base\n");
+ return -EINVAL;
+ }
+
+ mutex_init(&tps6586x->irq_lock);
+ for (i = 0; i < 5; i++) {
+ tps6586x->mask_cache[i] = 0xff;
+ tps6586x->mask_reg[i] = 0xff;
+ tps6586x_write(tps6586x->dev, TPS6586X_INT_MASK1 + i, 0xff);
+ }
+
+ tps6586x_reads(tps6586x->dev, TPS6586X_INT_ACK1, sizeof(tmp), tmp);
+
+ tps6586x->irq_base = irq_base;
+
+ tps6586x->irq_chip.name = "tps6586x";
+ tps6586x->irq_chip.enable = tps6586x_irq_enable;
+ tps6586x->irq_chip.disable = tps6586x_irq_disable;
+ tps6586x->irq_chip.bus_lock = tps6586x_irq_lock;
+ tps6586x->irq_chip.bus_sync_unlock = tps6586x_irq_sync_unlock;
+
+ for (i = 0; i < ARRAY_SIZE(tps6586x_irqs); i++) {
+ int __irq = i + tps6586x->irq_base;
+ set_irq_chip_data(__irq, tps6586x);
+ set_irq_chip_and_handler(__irq, &tps6586x->irq_chip,
+ handle_simple_irq);
+ set_irq_nested_thread(__irq, 1);
+#ifdef CONFIG_ARM
+ set_irq_flags(__irq, IRQF_VALID);
+#endif
+ }
+
+ ret = request_threaded_irq(irq, NULL, tps6586x_irq, IRQF_ONESHOT,
+ "tps6586x", tps6586x);
+
+ if (!ret) {
+ device_init_wakeup(tps6586x->dev, 1);
+ enable_irq_wake(irq);
+ }
+
+ return ret;
+}
+
static int __devinit tps6586x_add_subdevs(struct tps6586x *tps6586x,
struct tps6586x_platform_data *pdata)
{
@@ -273,13 +459,19 @@ static int __devinit tps6586x_add_subdevs(struct tps6586x *tps6586x,
subdev = &pdata->subdevs[i];
pdev = platform_device_alloc(subdev->name, subdev->id);
+ if (!pdev) {
+ ret = -ENOMEM;
+ goto failed;
+ }
pdev->dev.parent = tps6586x->dev;
pdev->dev.platform_data = subdev->platform_data;
ret = platform_device_add(pdev);
- if (ret)
+ if (ret) {
+ platform_device_put(pdev);
goto failed;
+ }
}
return 0;
@@ -306,7 +498,8 @@ static int __devinit tps6586x_i2c_probe(struct i2c_client *client,
return -EIO;
}
- if (ret != TPS658621A_VERSIONCRC) {
+ if ((ret != TPS658621A_VERSIONCRC) &&
+ (ret != TPS658621C_VERSIONCRC)) {
dev_err(&client->dev, "Unsupported chip ID: %x\n", ret);
return -ENODEV;
}
@@ -321,6 +514,15 @@ static int __devinit tps6586x_i2c_probe(struct i2c_client *client,
mutex_init(&tps6586x->lock);
+ if (client->irq) {
+ ret = tps6586x_irq_init(tps6586x, client->irq,
+ pdata->irq_base);
+ if (ret) {
+ dev_err(&client->dev, "IRQ init failed: %d\n", ret);
+ goto err_irq_init;
+ }
+ }
+
ret = tps6586x_add_subdevs(tps6586x, pdata);
if (ret) {
dev_err(&client->dev, "add devices failed: %d\n", ret);
@@ -332,12 +534,31 @@ static int __devinit tps6586x_i2c_probe(struct i2c_client *client,
return 0;
err_add_devs:
+ if (client->irq)
+ free_irq(client->irq, tps6586x);
+err_irq_init:
kfree(tps6586x);
return ret;
}
static int __devexit tps6586x_i2c_remove(struct i2c_client *client)
{
+ struct tps6586x *tps6586x = i2c_get_clientdata(client);
+ struct tps6586x_platform_data *pdata = client->dev.platform_data;
+ int ret;
+
+ if (client->irq)
+ free_irq(client->irq, tps6586x);
+
+ if (pdata->gpio_base) {
+ ret = gpiochip_remove(&tps6586x->gpio);
+ if (ret)
+ dev_err(&client->dev, "Can't remove gpio chip: %d\n",
+ ret);
+ }
+
+ tps6586x_remove_subdevs(tps6586x);
+ kfree(tps6586x);
return 0;
}
diff --git a/drivers/mfd/twl-core.c b/drivers/mfd/twl-core.c
index 720e099e506d..35275ba7096f 100644
--- a/drivers/mfd/twl-core.c
+++ b/drivers/mfd/twl-core.c
@@ -115,6 +115,12 @@
#define twl_has_codec() false
#endif
+#if defined(CONFIG_CHARGER_TWL4030) || defined(CONFIG_CHARGER_TWL4030_MODULE)
+#define twl_has_bci() true
+#else
+#define twl_has_bci() false
+#endif
+
/* Triton Core internal information (BEGIN) */
/* Last - for index max*/
@@ -202,12 +208,6 @@
/* Few power values */
#define R_CFG_BOOT 0x05
-#define R_PROTECT_KEY 0x0E
-
-/* access control values for R_PROTECT_KEY */
-#define KEY_UNLOCK1 0xce
-#define KEY_UNLOCK2 0xec
-#define KEY_LOCK 0x00
/* some fields in R_CFG_BOOT */
#define HFCLK_FREQ_19p2_MHZ (1 << 0)
@@ -255,7 +255,7 @@ struct twl_mapping {
unsigned char sid; /* Slave ID */
unsigned char base; /* base address */
};
-struct twl_mapping *twl_map;
+static struct twl_mapping *twl_map;
static struct twl_mapping twl4030_map[TWL4030_MODULE_LAST + 1] = {
/*
@@ -698,17 +698,17 @@ add_children(struct twl4030_platform_data *pdata, unsigned long features)
if (twl_has_codec() && pdata->codec && twl_class_is_4030()) {
sub_chip_id = twl_map[TWL_MODULE_AUDIO_VOICE].sid;
- child = add_child(sub_chip_id, "twl4030_codec",
+ child = add_child(sub_chip_id, "twl4030-audio",
pdata->codec, sizeof(*pdata->codec),
false, 0, 0);
if (IS_ERR(child))
return PTR_ERR(child);
}
- /* Phoenix*/
+ /* Phoenix codec driver is probed directly atm */
if (twl_has_codec() && pdata->codec && twl_class_is_6030()) {
sub_chip_id = twl_map[TWL_MODULE_AUDIO_VOICE].sid;
- child = add_child(sub_chip_id, "twl6040_codec",
+ child = add_child(sub_chip_id, "twl6040-codec",
pdata->codec, sizeof(*pdata->codec),
false, 0, 0);
if (IS_ERR(child))
@@ -832,6 +832,17 @@ add_children(struct twl4030_platform_data *pdata, unsigned long features)
return PTR_ERR(child);
}
+ if (twl_has_bci() && pdata->bci &&
+ !(features & (TPS_SUBSET | TWL5031))) {
+ child = add_child(3, "twl4030_bci",
+ pdata->bci, sizeof(*pdata->bci), false,
+ /* irq0 = CHG_PRES, irq1 = BCI */
+ pdata->irq_base + BCI_PRES_INTR_OFFSET,
+ pdata->irq_base + BCI_INTR_OFFSET);
+ if (IS_ERR(child))
+ return PTR_ERR(child);
+ }
+
return 0;
}
@@ -846,8 +857,8 @@ static inline int __init protect_pm_master(void)
{
int e = 0;
- e = twl_i2c_write_u8(TWL_MODULE_PM_MASTER, KEY_LOCK,
- R_PROTECT_KEY);
+ e = twl_i2c_write_u8(TWL4030_MODULE_PM_MASTER, 0,
+ TWL4030_PM_MASTER_PROTECT_KEY);
return e;
}
@@ -855,10 +866,13 @@ static inline int __init unprotect_pm_master(void)
{
int e = 0;
- e |= twl_i2c_write_u8(TWL_MODULE_PM_MASTER, KEY_UNLOCK1,
- R_PROTECT_KEY);
- e |= twl_i2c_write_u8(TWL_MODULE_PM_MASTER, KEY_UNLOCK2,
- R_PROTECT_KEY);
+ e |= twl_i2c_write_u8(TWL4030_MODULE_PM_MASTER,
+ TWL4030_PM_MASTER_KEY_CFG1,
+ TWL4030_PM_MASTER_PROTECT_KEY);
+ e |= twl_i2c_write_u8(TWL4030_MODULE_PM_MASTER,
+ TWL4030_PM_MASTER_KEY_CFG2,
+ TWL4030_PM_MASTER_PROTECT_KEY);
+
return e;
}
diff --git a/drivers/mfd/twl-core.h b/drivers/mfd/twl-core.h
new file mode 100644
index 000000000000..8c50a556e986
--- /dev/null
+++ b/drivers/mfd/twl-core.h
@@ -0,0 +1,10 @@
+#ifndef __TWL_CORE_H__
+#define __TWL_CORE_H__
+
+extern int twl6030_init_irq(int irq_num, unsigned irq_base, unsigned irq_end);
+extern int twl6030_exit_irq(void);
+extern int twl4030_init_irq(int irq_num, unsigned irq_base, unsigned irq_end);
+extern int twl4030_exit_irq(void);
+extern int twl4030_init_chip_irq(const char *chip);
+
+#endif /* __TWL_CORE_H__ */
diff --git a/drivers/mfd/twl4030-codec.c b/drivers/mfd/twl4030-codec.c
index add6f67d8032..9a4b196d6deb 100644
--- a/drivers/mfd/twl4030-codec.c
+++ b/drivers/mfd/twl4030-codec.c
@@ -207,14 +207,14 @@ static int __devinit twl4030_codec_probe(struct platform_device *pdev)
if (pdata->audio) {
cell = &codec->cells[childs];
- cell->name = "twl4030_codec_audio";
+ cell->name = "twl4030-codec";
cell->platform_data = pdata->audio;
cell->data_size = sizeof(*pdata->audio);
childs++;
}
if (pdata->vibra) {
cell = &codec->cells[childs];
- cell->name = "twl4030_codec_vibra";
+ cell->name = "twl4030-vibra";
cell->platform_data = pdata->vibra;
cell->data_size = sizeof(*pdata->vibra);
childs++;
@@ -249,14 +249,14 @@ static int __devexit twl4030_codec_remove(struct platform_device *pdev)
return 0;
}
-MODULE_ALIAS("platform:twl4030_codec");
+MODULE_ALIAS("platform:twl4030-audio");
static struct platform_driver twl4030_codec_driver = {
.probe = twl4030_codec_probe,
.remove = __devexit_p(twl4030_codec_remove),
.driver = {
.owner = THIS_MODULE,
- .name = "twl4030_codec",
+ .name = "twl4030-audio",
},
};
diff --git a/drivers/mfd/twl4030-irq.c b/drivers/mfd/twl4030-irq.c
index b9fda7018cef..5d3a1478004b 100644
--- a/drivers/mfd/twl4030-irq.c
+++ b/drivers/mfd/twl4030-irq.c
@@ -35,6 +35,7 @@
#include <linux/i2c/twl.h>
+#include "twl-core.h"
/*
* TWL4030 IRQ handling has two stages in hardware, and thus in software.
@@ -144,6 +145,7 @@ static const struct sih sih_modules_twl4030[6] = {
.name = "bci",
.module = TWL4030_MODULE_INTERRUPTS,
.control_offset = TWL4030_INTERRUPTS_BCISIHCTRL,
+ .set_cor = true,
.bits = 12,
.bytes_ixr = 2,
.edr_offset = TWL4030_INTERRUPTS_BCIEDR1,
@@ -408,7 +410,7 @@ static int twl4030_init_sih_modules(unsigned line)
* set Clear-On-Read (COR) bit.
*
* NOTE that sometimes COR polarity is documented as being
- * inverted: for MADC and BCI, COR=1 means "clear on write".
+ * inverted: for MADC, COR=1 means "clear on write".
* And for PWR_INT it's not documented...
*/
if (sih->set_cor) {
diff --git a/drivers/mfd/twl4030-power.c b/drivers/mfd/twl4030-power.c
index 7efa8789a3a2..16422de0823a 100644
--- a/drivers/mfd/twl4030-power.c
+++ b/drivers/mfd/twl4030-power.c
@@ -63,10 +63,6 @@ static u8 twl4030_start_script_address = 0x2b;
#define R_MEMORY_ADDRESS PHY_TO_OFF_PM_MASTER(0x59)
#define R_MEMORY_DATA PHY_TO_OFF_PM_MASTER(0x5a)
-#define R_PROTECT_KEY 0x0E
-#define R_KEY_1 0xC0
-#define R_KEY_2 0x0C
-
/* resource configuration registers
<RESOURCE>_DEV_GRP at address 'n+0'
<RESOURCE>_TYPE at address 'n+1'
@@ -465,15 +461,17 @@ int twl4030_remove_script(u8 flags)
{
int err = 0;
- err = twl_i2c_write_u8(TWL4030_MODULE_PM_MASTER, R_KEY_1,
- R_PROTECT_KEY);
+ err = twl_i2c_write_u8(TWL4030_MODULE_PM_MASTER,
+ TWL4030_PM_MASTER_KEY_CFG1,
+ TWL4030_PM_MASTER_PROTECT_KEY);
if (err) {
pr_err("twl4030: unable to unlock PROTECT_KEY\n");
return err;
}
- err = twl_i2c_write_u8(TWL4030_MODULE_PM_MASTER, R_KEY_2,
- R_PROTECT_KEY);
+ err = twl_i2c_write_u8(TWL4030_MODULE_PM_MASTER,
+ TWL4030_PM_MASTER_KEY_CFG2,
+ TWL4030_PM_MASTER_PROTECT_KEY);
if (err) {
pr_err("twl4030: unable to unlock PROTECT_KEY\n");
return err;
@@ -504,7 +502,8 @@ int twl4030_remove_script(u8 flags)
return err;
}
- err = twl_i2c_write_u8(TWL4030_MODULE_PM_MASTER, 0, R_PROTECT_KEY);
+ err = twl_i2c_write_u8(TWL4030_MODULE_PM_MASTER, 0,
+ TWL4030_PM_MASTER_PROTECT_KEY);
if (err)
pr_err("TWL4030 Unable to relock registers\n");
@@ -518,13 +517,15 @@ void __init twl4030_power_init(struct twl4030_power_data *twl4030_scripts)
struct twl4030_resconfig *resconfig;
u8 address = twl4030_start_script_address;
- err = twl_i2c_write_u8(TWL4030_MODULE_PM_MASTER, R_KEY_1,
- R_PROTECT_KEY);
+ err = twl_i2c_write_u8(TWL4030_MODULE_PM_MASTER,
+ TWL4030_PM_MASTER_KEY_CFG1,
+ TWL4030_PM_MASTER_PROTECT_KEY);
if (err)
goto unlock;
- err = twl_i2c_write_u8(TWL4030_MODULE_PM_MASTER, R_KEY_2,
- R_PROTECT_KEY);
+ err = twl_i2c_write_u8(TWL4030_MODULE_PM_MASTER,
+ TWL4030_PM_MASTER_KEY_CFG2,
+ TWL4030_PM_MASTER_PROTECT_KEY);
if (err)
goto unlock;
@@ -546,7 +547,8 @@ void __init twl4030_power_init(struct twl4030_power_data *twl4030_scripts)
}
}
- err = twl_i2c_write_u8(TWL4030_MODULE_PM_MASTER, 0, R_PROTECT_KEY);
+ err = twl_i2c_write_u8(TWL4030_MODULE_PM_MASTER, 0,
+ TWL4030_PM_MASTER_PROTECT_KEY);
if (err)
pr_err("TWL4030 Unable to relock registers\n");
return;
diff --git a/drivers/mfd/twl6030-irq.c b/drivers/mfd/twl6030-irq.c
index 10bf228ad626..aaedb11d9d2c 100644
--- a/drivers/mfd/twl6030-irq.c
+++ b/drivers/mfd/twl6030-irq.c
@@ -36,6 +36,9 @@
#include <linux/irq.h>
#include <linux/kthread.h>
#include <linux/i2c/twl.h>
+#include <linux/platform_device.h>
+
+#include "twl-core.h"
/*
* TWL6030 (unlike its predecessors, which had two level interrupt handling)
@@ -223,6 +226,78 @@ int twl6030_interrupt_mask(u8 bit_mask, u8 offset)
}
EXPORT_SYMBOL(twl6030_interrupt_mask);
+int twl6030_mmc_card_detect_config(void)
+{
+ int ret;
+ u8 reg_val = 0;
+
+ /* Unmasking the Card detect Interrupt line for MMC1 from Phoenix */
+ twl6030_interrupt_unmask(TWL6030_MMCDETECT_INT_MASK,
+ REG_INT_MSK_LINE_B);
+ twl6030_interrupt_unmask(TWL6030_MMCDETECT_INT_MASK,
+ REG_INT_MSK_STS_B);
+ /*
+ * Intially Configuring MMC_CTRL for receving interrupts &
+ * Card status on TWL6030 for MMC1
+ */
+ ret = twl_i2c_read_u8(TWL6030_MODULE_ID0, &reg_val, TWL6030_MMCCTRL);
+ if (ret < 0) {
+ pr_err("twl6030: Failed to read MMCCTRL, error %d\n", ret);
+ return ret;
+ }
+ reg_val &= ~VMMC_AUTO_OFF;
+ reg_val |= SW_FC;
+ ret = twl_i2c_write_u8(TWL6030_MODULE_ID0, reg_val, TWL6030_MMCCTRL);
+ if (ret < 0) {
+ pr_err("twl6030: Failed to write MMCCTRL, error %d\n", ret);
+ return ret;
+ }
+
+ /* Configuring PullUp-PullDown register */
+ ret = twl_i2c_read_u8(TWL6030_MODULE_ID0, &reg_val,
+ TWL6030_CFG_INPUT_PUPD3);
+ if (ret < 0) {
+ pr_err("twl6030: Failed to read CFG_INPUT_PUPD3, error %d\n",
+ ret);
+ return ret;
+ }
+ reg_val &= ~(MMC_PU | MMC_PD);
+ ret = twl_i2c_write_u8(TWL6030_MODULE_ID0, reg_val,
+ TWL6030_CFG_INPUT_PUPD3);
+ if (ret < 0) {
+ pr_err("twl6030: Failed to write CFG_INPUT_PUPD3, error %d\n",
+ ret);
+ return ret;
+ }
+ return 0;
+}
+EXPORT_SYMBOL(twl6030_mmc_card_detect_config);
+
+int twl6030_mmc_card_detect(struct device *dev, int slot)
+{
+ int ret = -EIO;
+ u8 read_reg = 0;
+ struct platform_device *pdev = to_platform_device(dev);
+
+ if (pdev->id) {
+ /* TWL6030 provide's Card detect support for
+ * only MMC1 controller.
+ */
+ pr_err("Unkown MMC controller %d in %s\n", pdev->id, __func__);
+ return ret;
+ }
+ /*
+ * BIT0 of MMC_CTRL on TWL6030 provides card status for MMC1
+ * 0 - Card not present ,1 - Card present
+ */
+ ret = twl_i2c_read_u8(TWL6030_MODULE_ID0, &read_reg,
+ TWL6030_MMCCTRL);
+ if (ret >= 0)
+ ret = read_reg & STS_MMC;
+ return ret;
+}
+EXPORT_SYMBOL(twl6030_mmc_card_detect);
+
int twl6030_init_irq(int irq_num, unsigned irq_base, unsigned irq_end)
{
diff --git a/drivers/mfd/vx855.c b/drivers/mfd/vx855.c
new file mode 100644
index 000000000000..ebb059765edd
--- /dev/null
+++ b/drivers/mfd/vx855.c
@@ -0,0 +1,147 @@
+/*
+ * Linux multi-function-device driver (MFD) for the integrated peripherals
+ * of the VIA VX855 chipset
+ *
+ * Copyright (C) 2009 VIA Technologies, Inc.
+ * Copyright (C) 2010 One Laptop per Child
+ * Author: Harald Welte <HaraldWelte@viatech.com>
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
+ * MA 02111-1307 USA
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/platform_device.h>
+#include <linux/pci.h>
+#include <linux/mfd/core.h>
+
+/* offset into pci config space indicating the 16bit register containing
+ * the power management IO space base */
+#define VX855_CFG_PMIO_OFFSET 0x88
+
+/* ACPI I/O Space registers */
+#define VX855_PMIO_ACPI 0x00
+#define VX855_PMIO_ACPI_LEN 0x0b
+
+/* Processor Power Management */
+#define VX855_PMIO_PPM 0x10
+#define VX855_PMIO_PPM_LEN 0x08
+
+/* General Purpose Power Management */
+#define VX855_PMIO_GPPM 0x20
+#define VX855_PMIO_R_GPI 0x48
+#define VX855_PMIO_R_GPO 0x4c
+#define VX855_PMIO_GPPM_LEN 0x33
+
+#define VSPIC_MMIO_SIZE 0x1000
+
+static struct resource vx855_gpio_resources[] = {
+ {
+ .flags = IORESOURCE_IO,
+ },
+ {
+ .flags = IORESOURCE_IO,
+ },
+};
+
+static struct mfd_cell vx855_cells[] = {
+ {
+ .name = "vx855_gpio",
+ .num_resources = ARRAY_SIZE(vx855_gpio_resources),
+ .resources = vx855_gpio_resources,
+
+ /* we must ignore resource conflicts, for reasons outlined in
+ * the vx855_gpio driver */
+ .ignore_resource_conflicts = true,
+ },
+};
+
+static __devinit int vx855_probe(struct pci_dev *pdev,
+ const struct pci_device_id *id)
+{
+ int ret;
+ u16 gpio_io_offset;
+
+ ret = pci_enable_device(pdev);
+ if (ret)
+ return -ENODEV;
+
+ pci_read_config_word(pdev, VX855_CFG_PMIO_OFFSET, &gpio_io_offset);
+ if (!gpio_io_offset) {
+ dev_warn(&pdev->dev,
+ "BIOS did not assign PMIO base offset?!?\n");
+ ret = -ENODEV;
+ goto out;
+ }
+
+ /* mask out the lowest seven bits, as they are always zero, but
+ * hardware returns them as 0x01 */
+ gpio_io_offset &= 0xff80;
+
+ /* As the region identified here includes many non-GPIO things, we
+ * only work with the specific registers that concern us. */
+ vx855_gpio_resources[0].start = gpio_io_offset + VX855_PMIO_R_GPI;
+ vx855_gpio_resources[0].end = vx855_gpio_resources[0].start + 3;
+ vx855_gpio_resources[1].start = gpio_io_offset + VX855_PMIO_R_GPO;
+ vx855_gpio_resources[1].end = vx855_gpio_resources[1].start + 3;
+
+ ret = mfd_add_devices(&pdev->dev, -1, vx855_cells, ARRAY_SIZE(vx855_cells),
+ NULL, 0);
+
+ /* we always return -ENODEV here in order to enable other
+ * drivers like old, not-yet-platform_device ported i2c-viapro */
+ return -ENODEV;
+out:
+ pci_disable_device(pdev);
+ return ret;
+}
+
+static void vx855_remove(struct pci_dev *pdev)
+{
+ mfd_remove_devices(&pdev->dev);
+ pci_disable_device(pdev);
+}
+
+static struct pci_device_id vx855_pci_tbl[] = {
+ { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_VX855) },
+ { 0, }
+};
+
+static struct pci_driver vx855_pci_driver = {
+ .name = "vx855",
+ .id_table = vx855_pci_tbl,
+ .probe = vx855_probe,
+ .remove = __devexit_p(vx855_remove),
+};
+
+static int vx855_init(void)
+{
+ return pci_register_driver(&vx855_pci_driver);
+}
+module_init(vx855_init);
+
+static void vx855_exit(void)
+{
+ pci_unregister_driver(&vx855_pci_driver);
+}
+module_exit(vx855_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Harald Welte <HaraldWelte@viatech.com>");
+MODULE_DESCRIPTION("Driver for the VIA VX855 chipset");
diff --git a/drivers/mfd/wm831x-core.c b/drivers/mfd/wm831x-core.c
index 1e7aaaf6cc6f..7d2563fc15c6 100644
--- a/drivers/mfd/wm831x-core.c
+++ b/drivers/mfd/wm831x-core.c
@@ -14,7 +14,6 @@
#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/i2c.h>
#include <linux/bcd.h>
#include <linux/delay.h>
#include <linux/mfd/core.h>
@@ -90,14 +89,6 @@ int wm831x_isinkv_values[WM831X_ISINK_MAX_ISEL + 1] = {
};
EXPORT_SYMBOL_GPL(wm831x_isinkv_values);
-enum wm831x_parent {
- WM8310 = 0x8310,
- WM8311 = 0x8311,
- WM8312 = 0x8312,
- WM8320 = 0x8320,
- WM8321 = 0x8321,
-};
-
static int wm831x_reg_locked(struct wm831x *wm831x, unsigned short reg)
{
if (!wm831x->locked)
@@ -1446,7 +1437,7 @@ static struct mfd_cell backlight_devs[] = {
/*
* Instantiate the generic non-control parts of the device.
*/
-static int wm831x_device_init(struct wm831x *wm831x, unsigned long id, int irq)
+int wm831x_device_init(struct wm831x *wm831x, unsigned long id, int irq)
{
struct wm831x_pdata *pdata = wm831x->dev->platform_data;
int rev;
@@ -1540,6 +1531,12 @@ static int wm831x_device_init(struct wm831x *wm831x, unsigned long id, int irq)
dev_info(wm831x->dev, "WM8321 revision %c\n", 'A' + rev);
break;
+ case WM8325:
+ parent = WM8325;
+ wm831x->num_gpio = 12;
+ dev_info(wm831x->dev, "WM8325 revision %c\n", 'A' + rev);
+ break;
+
default:
dev_err(wm831x->dev, "Unknown WM831x device %04x\n", ret);
ret = -EINVAL;
@@ -1620,6 +1617,12 @@ static int wm831x_device_init(struct wm831x *wm831x, unsigned long id, int irq)
NULL, 0);
break;
+ case WM8325:
+ ret = mfd_add_devices(wm831x->dev, -1,
+ wm8320_devs, ARRAY_SIZE(wm8320_devs),
+ NULL, 0);
+ break;
+
default:
/* If this happens the bus probe function is buggy */
BUG();
@@ -1660,7 +1663,7 @@ err:
return ret;
}
-static void wm831x_device_exit(struct wm831x *wm831x)
+void wm831x_device_exit(struct wm831x *wm831x)
{
wm831x_otp_exit(wm831x);
mfd_remove_devices(wm831x->dev);
@@ -1670,7 +1673,7 @@ static void wm831x_device_exit(struct wm831x *wm831x)
kfree(wm831x);
}
-static int wm831x_device_suspend(struct wm831x *wm831x)
+int wm831x_device_suspend(struct wm831x *wm831x)
{
int reg, mask;
@@ -1706,125 +1709,6 @@ static int wm831x_device_suspend(struct wm831x *wm831x)
return 0;
}
-static int wm831x_i2c_read_device(struct wm831x *wm831x, unsigned short reg,
- int bytes, void *dest)
-{
- struct i2c_client *i2c = wm831x->control_data;
- int ret;
- u16 r = cpu_to_be16(reg);
-
- ret = i2c_master_send(i2c, (unsigned char *)&r, 2);
- if (ret < 0)
- return ret;
- if (ret != 2)
- return -EIO;
-
- ret = i2c_master_recv(i2c, dest, bytes);
- if (ret < 0)
- return ret;
- if (ret != bytes)
- return -EIO;
- return 0;
-}
-
-/* Currently we allocate the write buffer on the stack; this is OK for
- * small writes - if we need to do large writes this will need to be
- * revised.
- */
-static int wm831x_i2c_write_device(struct wm831x *wm831x, unsigned short reg,
- int bytes, void *src)
-{
- struct i2c_client *i2c = wm831x->control_data;
- unsigned char msg[bytes + 2];
- int ret;
-
- reg = cpu_to_be16(reg);
- memcpy(&msg[0], &reg, 2);
- memcpy(&msg[2], src, bytes);
-
- ret = i2c_master_send(i2c, msg, bytes + 2);
- if (ret < 0)
- return ret;
- if (ret < bytes + 2)
- return -EIO;
-
- return 0;
-}
-
-static int wm831x_i2c_probe(struct i2c_client *i2c,
- const struct i2c_device_id *id)
-{
- struct wm831x *wm831x;
-
- wm831x = kzalloc(sizeof(struct wm831x), GFP_KERNEL);
- if (wm831x == NULL)
- return -ENOMEM;
-
- i2c_set_clientdata(i2c, wm831x);
- wm831x->dev = &i2c->dev;
- wm831x->control_data = i2c;
- wm831x->read_dev = wm831x_i2c_read_device;
- wm831x->write_dev = wm831x_i2c_write_device;
-
- return wm831x_device_init(wm831x, id->driver_data, i2c->irq);
-}
-
-static int wm831x_i2c_remove(struct i2c_client *i2c)
-{
- struct wm831x *wm831x = i2c_get_clientdata(i2c);
-
- wm831x_device_exit(wm831x);
-
- return 0;
-}
-
-static int wm831x_i2c_suspend(struct i2c_client *i2c, pm_message_t mesg)
-{
- struct wm831x *wm831x = i2c_get_clientdata(i2c);
-
- return wm831x_device_suspend(wm831x);
-}
-
-static const struct i2c_device_id wm831x_i2c_id[] = {
- { "wm8310", WM8310 },
- { "wm8311", WM8311 },
- { "wm8312", WM8312 },
- { "wm8320", WM8320 },
- { "wm8321", WM8321 },
- { }
-};
-MODULE_DEVICE_TABLE(i2c, wm831x_i2c_id);
-
-
-static struct i2c_driver wm831x_i2c_driver = {
- .driver = {
- .name = "wm831x",
- .owner = THIS_MODULE,
- },
- .probe = wm831x_i2c_probe,
- .remove = wm831x_i2c_remove,
- .suspend = wm831x_i2c_suspend,
- .id_table = wm831x_i2c_id,
-};
-
-static int __init wm831x_i2c_init(void)
-{
- int ret;
-
- ret = i2c_add_driver(&wm831x_i2c_driver);
- if (ret != 0)
- pr_err("Failed to register wm831x I2C driver: %d\n", ret);
-
- return ret;
-}
-subsys_initcall(wm831x_i2c_init);
-
-static void __exit wm831x_i2c_exit(void)
-{
- i2c_del_driver(&wm831x_i2c_driver);
-}
-module_exit(wm831x_i2c_exit);
-
-MODULE_DESCRIPTION("I2C support for the WM831X AudioPlus PMIC");
+MODULE_DESCRIPTION("Core support for the WM831X AudioPlus PMIC");
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Mark Brown");
diff --git a/drivers/mfd/wm831x-i2c.c b/drivers/mfd/wm831x-i2c.c
new file mode 100644
index 000000000000..156b19859e81
--- /dev/null
+++ b/drivers/mfd/wm831x-i2c.c
@@ -0,0 +1,143 @@
+/*
+ * wm831x-i2c.c -- I2C access for Wolfson WM831x PMICs
+ *
+ * Copyright 2009,2010 Wolfson Microelectronics PLC.
+ *
+ * Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/i2c.h>
+#include <linux/delay.h>
+#include <linux/mfd/core.h>
+#include <linux/slab.h>
+
+#include <linux/mfd/wm831x/core.h>
+#include <linux/mfd/wm831x/pdata.h>
+
+static int wm831x_i2c_read_device(struct wm831x *wm831x, unsigned short reg,
+ int bytes, void *dest)
+{
+ struct i2c_client *i2c = wm831x->control_data;
+ int ret;
+ u16 r = cpu_to_be16(reg);
+
+ ret = i2c_master_send(i2c, (unsigned char *)&r, 2);
+ if (ret < 0)
+ return ret;
+ if (ret != 2)
+ return -EIO;
+
+ ret = i2c_master_recv(i2c, dest, bytes);
+ if (ret < 0)
+ return ret;
+ if (ret != bytes)
+ return -EIO;
+ return 0;
+}
+
+/* Currently we allocate the write buffer on the stack; this is OK for
+ * small writes - if we need to do large writes this will need to be
+ * revised.
+ */
+static int wm831x_i2c_write_device(struct wm831x *wm831x, unsigned short reg,
+ int bytes, void *src)
+{
+ struct i2c_client *i2c = wm831x->control_data;
+ unsigned char msg[bytes + 2];
+ int ret;
+
+ reg = cpu_to_be16(reg);
+ memcpy(&msg[0], &reg, 2);
+ memcpy(&msg[2], src, bytes);
+
+ ret = i2c_master_send(i2c, msg, bytes + 2);
+ if (ret < 0)
+ return ret;
+ if (ret < bytes + 2)
+ return -EIO;
+
+ return 0;
+}
+
+static int wm831x_i2c_probe(struct i2c_client *i2c,
+ const struct i2c_device_id *id)
+{
+ struct wm831x *wm831x;
+
+ wm831x = kzalloc(sizeof(struct wm831x), GFP_KERNEL);
+ if (wm831x == NULL)
+ return -ENOMEM;
+
+ i2c_set_clientdata(i2c, wm831x);
+ wm831x->dev = &i2c->dev;
+ wm831x->control_data = i2c;
+ wm831x->read_dev = wm831x_i2c_read_device;
+ wm831x->write_dev = wm831x_i2c_write_device;
+
+ return wm831x_device_init(wm831x, id->driver_data, i2c->irq);
+}
+
+static int wm831x_i2c_remove(struct i2c_client *i2c)
+{
+ struct wm831x *wm831x = i2c_get_clientdata(i2c);
+
+ wm831x_device_exit(wm831x);
+
+ return 0;
+}
+
+static int wm831x_i2c_suspend(struct i2c_client *i2c, pm_message_t mesg)
+{
+ struct wm831x *wm831x = i2c_get_clientdata(i2c);
+
+ return wm831x_device_suspend(wm831x);
+}
+
+static const struct i2c_device_id wm831x_i2c_id[] = {
+ { "wm8310", WM8310 },
+ { "wm8311", WM8311 },
+ { "wm8312", WM8312 },
+ { "wm8320", WM8320 },
+ { "wm8321", WM8321 },
+ { "wm8325", WM8325 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, wm831x_i2c_id);
+
+
+static struct i2c_driver wm831x_i2c_driver = {
+ .driver = {
+ .name = "wm831x",
+ .owner = THIS_MODULE,
+ },
+ .probe = wm831x_i2c_probe,
+ .remove = wm831x_i2c_remove,
+ .suspend = wm831x_i2c_suspend,
+ .id_table = wm831x_i2c_id,
+};
+
+static int __init wm831x_i2c_init(void)
+{
+ int ret;
+
+ ret = i2c_add_driver(&wm831x_i2c_driver);
+ if (ret != 0)
+ pr_err("Failed to register wm831x I2C driver: %d\n", ret);
+
+ return ret;
+}
+subsys_initcall(wm831x_i2c_init);
+
+static void __exit wm831x_i2c_exit(void)
+{
+ i2c_del_driver(&wm831x_i2c_driver);
+}
+module_exit(wm831x_i2c_exit);
diff --git a/drivers/mfd/wm831x-spi.c b/drivers/mfd/wm831x-spi.c
new file mode 100644
index 000000000000..2789b151b0f9
--- /dev/null
+++ b/drivers/mfd/wm831x-spi.c
@@ -0,0 +1,232 @@
+/*
+ * wm831x-spi.c -- SPI access for Wolfson WM831x PMICs
+ *
+ * Copyright 2009,2010 Wolfson Microelectronics PLC.
+ *
+ * Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/spi/spi.h>
+
+#include <linux/mfd/wm831x/core.h>
+
+static int wm831x_spi_read_device(struct wm831x *wm831x, unsigned short reg,
+ int bytes, void *dest)
+{
+ u16 tx_val;
+ u16 *d = dest;
+ int r, ret;
+
+ /* Go register at a time */
+ for (r = reg; r < reg + (bytes / 2); r++) {
+ tx_val = r | 0x8000;
+
+ ret = spi_write_then_read(wm831x->control_data,
+ (u8 *)&tx_val, 2, (u8 *)d, 2);
+ if (ret != 0)
+ return ret;
+
+ *d = be16_to_cpu(*d);
+
+ d++;
+ }
+
+ return 0;
+}
+
+static int wm831x_spi_write_device(struct wm831x *wm831x, unsigned short reg,
+ int bytes, void *src)
+{
+ struct spi_device *spi = wm831x->control_data;
+ u16 *s = src;
+ u16 data[2];
+ int ret, r;
+
+ /* Go register at a time */
+ for (r = reg; r < reg + (bytes / 2); r++) {
+ data[0] = r;
+ data[1] = *s++;
+
+ ret = spi_write(spi, (char *)&data, sizeof(data));
+ if (ret != 0)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int __devinit wm831x_spi_probe(struct spi_device *spi)
+{
+ struct wm831x *wm831x;
+ enum wm831x_parent type;
+
+ /* Currently SPI support for ID tables is unmerged, we're faking it */
+ if (strcmp(spi->modalias, "wm8310") == 0)
+ type = WM8310;
+ else if (strcmp(spi->modalias, "wm8311") == 0)
+ type = WM8311;
+ else if (strcmp(spi->modalias, "wm8312") == 0)
+ type = WM8312;
+ else if (strcmp(spi->modalias, "wm8320") == 0)
+ type = WM8320;
+ else if (strcmp(spi->modalias, "wm8321") == 0)
+ type = WM8321;
+ else if (strcmp(spi->modalias, "wm8325") == 0)
+ type = WM8325;
+ else {
+ dev_err(&spi->dev, "Unknown device type\n");
+ return -EINVAL;
+ }
+
+ wm831x = kzalloc(sizeof(struct wm831x), GFP_KERNEL);
+ if (wm831x == NULL)
+ return -ENOMEM;
+
+ spi->bits_per_word = 16;
+ spi->mode = SPI_MODE_0;
+
+ dev_set_drvdata(&spi->dev, wm831x);
+ wm831x->dev = &spi->dev;
+ wm831x->control_data = spi;
+ wm831x->read_dev = wm831x_spi_read_device;
+ wm831x->write_dev = wm831x_spi_write_device;
+
+ return wm831x_device_init(wm831x, type, spi->irq);
+}
+
+static int __devexit wm831x_spi_remove(struct spi_device *spi)
+{
+ struct wm831x *wm831x = dev_get_drvdata(&spi->dev);
+
+ wm831x_device_exit(wm831x);
+
+ return 0;
+}
+
+static int wm831x_spi_suspend(struct spi_device *spi, pm_message_t m)
+{
+ struct wm831x *wm831x = dev_get_drvdata(&spi->dev);
+
+ return wm831x_device_suspend(wm831x);
+}
+
+static struct spi_driver wm8310_spi_driver = {
+ .driver = {
+ .name = "wm8310",
+ .bus = &spi_bus_type,
+ .owner = THIS_MODULE,
+ },
+ .probe = wm831x_spi_probe,
+ .remove = __devexit_p(wm831x_spi_remove),
+ .suspend = wm831x_spi_suspend,
+};
+
+static struct spi_driver wm8311_spi_driver = {
+ .driver = {
+ .name = "wm8311",
+ .bus = &spi_bus_type,
+ .owner = THIS_MODULE,
+ },
+ .probe = wm831x_spi_probe,
+ .remove = __devexit_p(wm831x_spi_remove),
+ .suspend = wm831x_spi_suspend,
+};
+
+static struct spi_driver wm8312_spi_driver = {
+ .driver = {
+ .name = "wm8312",
+ .bus = &spi_bus_type,
+ .owner = THIS_MODULE,
+ },
+ .probe = wm831x_spi_probe,
+ .remove = __devexit_p(wm831x_spi_remove),
+ .suspend = wm831x_spi_suspend,
+};
+
+static struct spi_driver wm8320_spi_driver = {
+ .driver = {
+ .name = "wm8320",
+ .bus = &spi_bus_type,
+ .owner = THIS_MODULE,
+ },
+ .probe = wm831x_spi_probe,
+ .remove = __devexit_p(wm831x_spi_remove),
+ .suspend = wm831x_spi_suspend,
+};
+
+static struct spi_driver wm8321_spi_driver = {
+ .driver = {
+ .name = "wm8321",
+ .bus = &spi_bus_type,
+ .owner = THIS_MODULE,
+ },
+ .probe = wm831x_spi_probe,
+ .remove = __devexit_p(wm831x_spi_remove),
+ .suspend = wm831x_spi_suspend,
+};
+
+static struct spi_driver wm8325_spi_driver = {
+ .driver = {
+ .name = "wm8325",
+ .bus = &spi_bus_type,
+ .owner = THIS_MODULE,
+ },
+ .probe = wm831x_spi_probe,
+ .remove = __devexit_p(wm831x_spi_remove),
+ .suspend = wm831x_spi_suspend,
+};
+
+static int __init wm831x_spi_init(void)
+{
+ int ret;
+
+ ret = spi_register_driver(&wm8310_spi_driver);
+ if (ret != 0)
+ pr_err("Failed to register WM8310 SPI driver: %d\n", ret);
+
+ ret = spi_register_driver(&wm8311_spi_driver);
+ if (ret != 0)
+ pr_err("Failed to register WM8311 SPI driver: %d\n", ret);
+
+ ret = spi_register_driver(&wm8312_spi_driver);
+ if (ret != 0)
+ pr_err("Failed to register WM8312 SPI driver: %d\n", ret);
+
+ ret = spi_register_driver(&wm8320_spi_driver);
+ if (ret != 0)
+ pr_err("Failed to register WM8320 SPI driver: %d\n", ret);
+
+ ret = spi_register_driver(&wm8321_spi_driver);
+ if (ret != 0)
+ pr_err("Failed to register WM8321 SPI driver: %d\n", ret);
+
+ ret = spi_register_driver(&wm8325_spi_driver);
+ if (ret != 0)
+ pr_err("Failed to register WM8325 SPI driver: %d\n", ret);
+
+ return 0;
+}
+subsys_initcall(wm831x_spi_init);
+
+static void __exit wm831x_spi_exit(void)
+{
+ spi_unregister_driver(&wm8325_spi_driver);
+ spi_unregister_driver(&wm8321_spi_driver);
+ spi_unregister_driver(&wm8320_spi_driver);
+ spi_unregister_driver(&wm8312_spi_driver);
+ spi_unregister_driver(&wm8311_spi_driver);
+ spi_unregister_driver(&wm8310_spi_driver);
+}
+module_exit(wm831x_spi_exit);
+
+MODULE_DESCRIPTION("SPI support for WM831x/2x AudioPlus PMICs");
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Mark Brown");
diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig
index 1f69743b12ec..4d073f1e4502 100644
--- a/drivers/misc/Kconfig
+++ b/drivers/misc/Kconfig
@@ -4,7 +4,6 @@
menuconfig MISC_DEVICES
bool "Misc devices"
- default y
---help---
Say Y here to get to see options for device drivers from various
different categories. This option alone does not add any kernel code.
@@ -24,7 +23,8 @@ config AD525X_DPOT
AD5260, AD5262, AD5263, AD5290, AD5291, AD5292, AD5293,
AD7376, AD8400, AD8402, AD8403, ADN2850, AD5241, AD5242,
AD5243, AD5245, AD5246, AD5247, AD5248, AD5280, AD5282,
- ADN2860, AD5273, AD5171, AD5170, AD5172, AD5173
+ ADN2860, AD5273, AD5171, AD5170, AD5172, AD5173, AD5270,
+ AD5271, AD5272, AD5274
digital potentiometer chips.
See Documentation/misc-devices/ad525x_dpot.txt for the
@@ -62,6 +62,15 @@ config ATMEL_PWM
purposes including software controlled power-efficient backlights
on LCD displays, motor control, and waveform generation.
+config AB8500_PWM
+ bool "AB8500 PWM support"
+ depends on AB8500_CORE
+ select HAVE_PWM
+ help
+ This driver exports functions to enable/disble/config/free Pulse
+ Width Modulation in the Analog Baseband Chip AB8500.
+ It is used by led and backlight driver to control the intensity.
+
config ATMEL_TCLIB
bool "Atmel AT32/AT91 Timer/Counter Library"
depends on (AVR32 || ARCH_AT91)
@@ -284,6 +293,16 @@ config SGI_GRU_DEBUG
This option enables addition debugging code for the SGI GRU driver. If
you are unsure, say N.
+config APDS9802ALS
+ tristate "Medfield Avago APDS9802 ALS Sensor module"
+ depends on I2C
+ help
+ If you say yes here you get support for the ALS APDS9802 ambient
+ light sensor.
+
+ This driver can also be built as a module. If so, the module
+ will be called apds9802als.
+
config ISL29003
tristate "Intersil ISL29003 ambient light sensor"
depends on I2C && SYSFS
@@ -294,6 +313,16 @@ config ISL29003
This driver can also be built as a module. If so, the module
will be called isl29003.
+config ISL29020
+ tristate "Intersil ISL29020 ambient light sensor"
+ depends on I2C
+ help
+ If you say yes here you get support for the Intersil ISL29020
+ ambient light sensor.
+
+ This driver can also be built as a module. If so, the module
+ will be called isl29020.
+
config SENSORS_TSL2550
tristate "Taos TSL2550 ambient light sensor"
depends on I2C && SYSFS
@@ -314,6 +343,27 @@ config SENSORS_BH1780
This driver can also be built as a module. If so, the module
will be called bh1780gli.
+config SENSORS_BH1770
+ tristate "BH1770GLC / SFH7770 combined ALS - Proximity sensor"
+ depends on I2C
+ ---help---
+ Say Y here if you want to build a driver for BH1770GLC (ROHM) or
+ SFH7770 (Osram) combined ambient light and proximity sensor chip.
+
+ To compile this driver as a module, choose M here: the
+ module will be called bh1770glc. If unsure, say N here.
+
+config SENSORS_APDS990X
+ tristate "APDS990X combined als and proximity sensors"
+ depends on I2C
+ default n
+ ---help---
+ Say Y here if you want to build a driver for Avago APDS990x
+ combined ambient light and proximity sensor chip.
+
+ To compile this driver as a module, choose M here: the
+ module will be called apds990x. If unsure, say N here.
+
config HMC6352
tristate "Honeywell HMC6352 compass"
depends on I2C
@@ -406,5 +456,6 @@ source "drivers/misc/c2port/Kconfig"
source "drivers/misc/eeprom/Kconfig"
source "drivers/misc/cb710/Kconfig"
source "drivers/misc/iwmc3200top/Kconfig"
+source "drivers/misc/ti-st/Kconfig"
endif # MISC_DEVICES
diff --git a/drivers/misc/Makefile b/drivers/misc/Makefile
index 9f2986b4da2f..98009cc20cb9 100644
--- a/drivers/misc/Makefile
+++ b/drivers/misc/Makefile
@@ -16,6 +16,8 @@ obj-$(CONFIG_TIFM_CORE) += tifm_core.o
obj-$(CONFIG_TIFM_7XX1) += tifm_7xx1.o
obj-$(CONFIG_PHANTOM) += phantom.o
obj-$(CONFIG_SENSORS_BH1780) += bh1780gli.o
+obj-$(CONFIG_SENSORS_BH1770) += bh1770glc.o
+obj-$(CONFIG_SENSORS_APDS990X) += apds990x.o
obj-$(CONFIG_SGI_IOC4) += ioc4.o
obj-$(CONFIG_ENCLOSURE_SERVICES) += enclosure.o
obj-$(CONFIG_KGDB_TESTS) += kgdbts.o
@@ -23,7 +25,9 @@ obj-$(CONFIG_SGI_XP) += sgi-xp/
obj-$(CONFIG_SGI_GRU) += sgi-gru/
obj-$(CONFIG_CS5535_MFGPT) += cs5535-mfgpt.o
obj-$(CONFIG_HP_ILO) += hpilo.o
+obj-$(CONFIG_APDS9802ALS) += apds9802als.o
obj-$(CONFIG_ISL29003) += isl29003.o
+obj-$(CONFIG_ISL29020) += isl29020.o
obj-$(CONFIG_SENSORS_TSL2550) += tsl2550.o
obj-$(CONFIG_EP93XX_PWM) += ep93xx_pwm.o
obj-$(CONFIG_DS1682) += ds1682.o
@@ -36,3 +40,5 @@ obj-y += cb710/
obj-$(CONFIG_VMWARE_BALLOON) += vmw_balloon.o
obj-$(CONFIG_ARM_CHARLCD) += arm-charlcd.o
obj-$(CONFIG_PCH_PHUB) += pch_phub.o
+obj-y += ti-st/
+obj-$(CONFIG_AB8500_PWM) += ab8500-pwm.o
diff --git a/drivers/misc/ab8500-pwm.c b/drivers/misc/ab8500-pwm.c
new file mode 100644
index 000000000000..54e3d05b63cc
--- /dev/null
+++ b/drivers/misc/ab8500-pwm.c
@@ -0,0 +1,168 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ *
+ * Author: Arun R Murthy <arun.murthy@stericsson.com>
+ * License terms: GNU General Public License (GPL) version 2
+ */
+#include <linux/err.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/pwm.h>
+#include <linux/mfd/ab8500.h>
+#include <linux/mfd/abx500.h>
+
+/*
+ * PWM Out generators
+ * Bank: 0x10
+ */
+#define AB8500_PWM_OUT_CTRL1_REG 0x60
+#define AB8500_PWM_OUT_CTRL2_REG 0x61
+#define AB8500_PWM_OUT_CTRL7_REG 0x66
+
+/* backlight driver constants */
+#define ENABLE_PWM 1
+#define DISABLE_PWM 0
+
+struct pwm_device {
+ struct device *dev;
+ struct list_head node;
+ const char *label;
+ unsigned int pwm_id;
+};
+
+static LIST_HEAD(pwm_list);
+
+int pwm_config(struct pwm_device *pwm, int duty_ns, int period_ns)
+{
+ int ret = 0;
+ unsigned int higher_val, lower_val;
+ u8 reg;
+
+ /*
+ * get the first 8 bits that are be written to
+ * AB8500_PWM_OUT_CTRL1_REG[0:7]
+ */
+ lower_val = duty_ns & 0x00FF;
+ /*
+ * get bits [9:10] that are to be written to
+ * AB8500_PWM_OUT_CTRL2_REG[0:1]
+ */
+ higher_val = ((duty_ns & 0x0300) >> 8);
+
+ reg = AB8500_PWM_OUT_CTRL1_REG + ((pwm->pwm_id - 1) * 2);
+
+ ret = abx500_set_register_interruptible(pwm->dev, AB8500_MISC,
+ reg, (u8)lower_val);
+ if (ret < 0)
+ return ret;
+ ret = abx500_set_register_interruptible(pwm->dev, AB8500_MISC,
+ (reg + 1), (u8)higher_val);
+
+ return ret;
+}
+EXPORT_SYMBOL(pwm_config);
+
+int pwm_enable(struct pwm_device *pwm)
+{
+ int ret;
+
+ ret = abx500_mask_and_set_register_interruptible(pwm->dev,
+ AB8500_MISC, AB8500_PWM_OUT_CTRL7_REG,
+ 1 << (pwm->pwm_id-1), ENABLE_PWM);
+ if (ret < 0)
+ dev_err(pwm->dev, "%s: Failed to disable PWM, Error %d\n",
+ pwm->label, ret);
+ return ret;
+}
+EXPORT_SYMBOL(pwm_enable);
+
+void pwm_disable(struct pwm_device *pwm)
+{
+ int ret;
+
+ ret = abx500_mask_and_set_register_interruptible(pwm->dev,
+ AB8500_MISC, AB8500_PWM_OUT_CTRL7_REG,
+ 1 << (pwm->pwm_id-1), DISABLE_PWM);
+ if (ret < 0)
+ dev_err(pwm->dev, "%s: Failed to disable PWM, Error %d\n",
+ pwm->label, ret);
+ return;
+}
+EXPORT_SYMBOL(pwm_disable);
+
+struct pwm_device *pwm_request(int pwm_id, const char *label)
+{
+ struct pwm_device *pwm;
+
+ list_for_each_entry(pwm, &pwm_list, node) {
+ if (pwm->pwm_id == pwm_id) {
+ pwm->label = label;
+ pwm->pwm_id = pwm_id;
+ return pwm;
+ }
+ }
+
+ return ERR_PTR(-ENOENT);
+}
+EXPORT_SYMBOL(pwm_request);
+
+void pwm_free(struct pwm_device *pwm)
+{
+ pwm_disable(pwm);
+}
+EXPORT_SYMBOL(pwm_free);
+
+static int __devinit ab8500_pwm_probe(struct platform_device *pdev)
+{
+ struct pwm_device *pwm;
+ /*
+ * Nothing to be done in probe, this is required to get the
+ * device which is required for ab8500 read and write
+ */
+ pwm = kzalloc(sizeof(struct pwm_device), GFP_KERNEL);
+ if (pwm == NULL) {
+ dev_err(&pdev->dev, "failed to allocate memory\n");
+ return -ENOMEM;
+ }
+ pwm->dev = &pdev->dev;
+ pwm->pwm_id = pdev->id;
+ list_add_tail(&pwm->node, &pwm_list);
+ platform_set_drvdata(pdev, pwm);
+ dev_dbg(pwm->dev, "pwm probe successful\n");
+ return 0;
+}
+
+static int __devexit ab8500_pwm_remove(struct platform_device *pdev)
+{
+ struct pwm_device *pwm = platform_get_drvdata(pdev);
+ list_del(&pwm->node);
+ dev_dbg(&pdev->dev, "pwm driver removed\n");
+ kfree(pwm);
+ return 0;
+}
+
+static struct platform_driver ab8500_pwm_driver = {
+ .driver = {
+ .name = "ab8500-pwm",
+ .owner = THIS_MODULE,
+ },
+ .probe = ab8500_pwm_probe,
+ .remove = __devexit_p(ab8500_pwm_remove),
+};
+
+static int __init ab8500_pwm_init(void)
+{
+ return platform_driver_register(&ab8500_pwm_driver);
+}
+
+static void __exit ab8500_pwm_exit(void)
+{
+ platform_driver_unregister(&ab8500_pwm_driver);
+}
+
+subsys_initcall(ab8500_pwm_init);
+module_exit(ab8500_pwm_exit);
+MODULE_AUTHOR("Arun MURTHY <arun.murthy@stericsson.com>");
+MODULE_DESCRIPTION("AB8500 Pulse Width Modulation Driver");
+MODULE_ALIAS("AB8500 PWM driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/misc/ad525x_dpot-i2c.c b/drivers/misc/ad525x_dpot-i2c.c
index 374352af7979..4ff73c215746 100644
--- a/drivers/misc/ad525x_dpot-i2c.c
+++ b/drivers/misc/ad525x_dpot-i2c.c
@@ -102,6 +102,8 @@ static const struct i2c_device_id ad_dpot_id[] = {
{"ad5170", AD5170_ID},
{"ad5172", AD5172_ID},
{"ad5173", AD5173_ID},
+ {"ad5272", AD5272_ID},
+ {"ad5274", AD5274_ID},
{}
};
MODULE_DEVICE_TABLE(i2c, ad_dpot_id);
diff --git a/drivers/misc/ad525x_dpot-spi.c b/drivers/misc/ad525x_dpot-spi.c
index b8c6df9c8437..7f9a55afe05d 100644
--- a/drivers/misc/ad525x_dpot-spi.c
+++ b/drivers/misc/ad525x_dpot-spi.c
@@ -38,6 +38,8 @@ static const struct ad_dpot_id ad_dpot_spi_devlist[] = {
{.name = "ad8402", .devid = AD8402_ID},
{.name = "ad8403", .devid = AD8403_ID},
{.name = "adn2850", .devid = ADN2850_ID},
+ {.name = "ad5270", .devid = AD5270_ID},
+ {.name = "ad5271", .devid = AD5271_ID},
{}
};
@@ -53,13 +55,13 @@ static int write8(void *client, u8 val)
static int write16(void *client, u8 reg, u8 val)
{
u8 data[2] = {reg, val};
- return spi_write(client, data, 1);
+ return spi_write(client, data, 2);
}
static int write24(void *client, u8 reg, u16 val)
{
u8 data[3] = {reg, val >> 8, val};
- return spi_write(client, data, 1);
+ return spi_write(client, data, 3);
}
static int read8(void *client)
diff --git a/drivers/misc/ad525x_dpot.c b/drivers/misc/ad525x_dpot.c
index 5e6fa8449e8b..7cb911028d09 100644
--- a/drivers/misc/ad525x_dpot.c
+++ b/drivers/misc/ad525x_dpot.c
@@ -29,9 +29,9 @@
* AD5262 2 256 20, 50, 200
* AD5263 4 256 20, 50, 200
* AD5290 1 256 10, 50, 100
- * AD5291 1 256 20
- * AD5292 1 1024 20
- * AD5293 1 1024 20
+ * AD5291 1 256 20, 50, 100 (20-TP)
+ * AD5292 1 1024 20, 50, 100 (20-TP)
+ * AD5293 1 1024 20, 50, 100
* AD7376 1 128 10, 50, 100, 1M
* AD8400 1 256 1, 10, 50, 100
* AD8402 2 256 1, 10, 50, 100
@@ -52,6 +52,10 @@
* AD5170 1 256 2.5, 10, 50, 100 (OTP)
* AD5172 2 256 2.5, 10, 50, 100 (OTP)
* AD5173 2 256 2.5, 10, 50, 100 (OTP)
+ * AD5270 1 1024 20, 50, 100 (50-TP)
+ * AD5271 1 256 20, 50, 100 (50-TP)
+ * AD5272 1 1024 20, 50, 100 (50-TP)
+ * AD5274 1 256 20, 50, 100 (50-TP)
*
* See Documentation/misc-devices/ad525x_dpot.txt for more info.
*
@@ -126,18 +130,38 @@ static inline int dpot_write_r8d16(struct dpot_data *dpot, u8 reg, u16 val)
static s32 dpot_read_spi(struct dpot_data *dpot, u8 reg)
{
unsigned ctrl = 0;
+ int value;
if (!(reg & (DPOT_ADDR_EEPROM | DPOT_ADDR_CMD))) {
if (dpot->feat & F_RDACS_WONLY)
return dpot->rdac_cache[reg & DPOT_RDAC_MASK];
-
if (dpot->uid == DPOT_UID(AD5291_ID) ||
dpot->uid == DPOT_UID(AD5292_ID) ||
- dpot->uid == DPOT_UID(AD5293_ID))
- return dpot_read_r8d8(dpot,
+ dpot->uid == DPOT_UID(AD5293_ID)) {
+
+ value = dpot_read_r8d8(dpot,
DPOT_AD5291_READ_RDAC << 2);
+ if (dpot->uid == DPOT_UID(AD5291_ID))
+ value = value >> 2;
+
+ return value;
+ } else if (dpot->uid == DPOT_UID(AD5270_ID) ||
+ dpot->uid == DPOT_UID(AD5271_ID)) {
+
+ value = dpot_read_r8d8(dpot,
+ DPOT_AD5270_1_2_4_READ_RDAC << 2);
+
+ if (value < 0)
+ return value;
+
+ if (dpot->uid == DPOT_UID(AD5271_ID))
+ value = value >> 2;
+
+ return value;
+ }
+
ctrl = DPOT_SPI_READ_RDAC;
} else if (reg & DPOT_ADDR_EEPROM) {
ctrl = DPOT_SPI_READ_EEPROM;
@@ -153,6 +177,7 @@ static s32 dpot_read_spi(struct dpot_data *dpot, u8 reg)
static s32 dpot_read_i2c(struct dpot_data *dpot, u8 reg)
{
+ int value;
unsigned ctrl = 0;
switch (dpot->uid) {
case DPOT_UID(AD5246_ID):
@@ -166,7 +191,7 @@ static s32 dpot_read_i2c(struct dpot_data *dpot, u8 reg)
case DPOT_UID(AD5280_ID):
case DPOT_UID(AD5282_ID):
ctrl = ((reg & DPOT_RDAC_MASK) == DPOT_RDAC0) ?
- 0 : DPOT_AD5291_RDAC_AB;
+ 0 : DPOT_AD5282_RDAC_AB;
return dpot_read_r8d8(dpot, ctrl);
case DPOT_UID(AD5170_ID):
case DPOT_UID(AD5171_ID):
@@ -175,8 +200,27 @@ static s32 dpot_read_i2c(struct dpot_data *dpot, u8 reg)
case DPOT_UID(AD5172_ID):
case DPOT_UID(AD5173_ID):
ctrl = ((reg & DPOT_RDAC_MASK) == DPOT_RDAC0) ?
- 0 : DPOT_AD5272_3_A0;
+ 0 : DPOT_AD5172_3_A0;
return dpot_read_r8d8(dpot, ctrl);
+ case DPOT_UID(AD5272_ID):
+ case DPOT_UID(AD5274_ID):
+ dpot_write_r8d8(dpot,
+ (DPOT_AD5270_1_2_4_READ_RDAC << 2), 0);
+
+ value = dpot_read_r8d16(dpot,
+ DPOT_AD5270_1_2_4_RDAC << 2);
+
+ if (value < 0)
+ return value;
+ /*
+ * AD5272/AD5274 returns high byte first, however
+ * underling smbus expects low byte first.
+ */
+ value = swab16(value);
+
+ if (dpot->uid == DPOT_UID(AD5271_ID))
+ value = value >> 2;
+ return value;
default:
if ((reg & DPOT_REG_TOL) || (dpot->max_pos > 256))
return dpot_read_r8d16(dpot, (reg & 0xF8) |
@@ -198,7 +242,7 @@ static s32 dpot_write_spi(struct dpot_data *dpot, u8 reg, u16 value)
{
unsigned val = 0;
- if (!(reg & (DPOT_ADDR_EEPROM | DPOT_ADDR_CMD))) {
+ if (!(reg & (DPOT_ADDR_EEPROM | DPOT_ADDR_CMD | DPOT_ADDR_OTP))) {
if (dpot->feat & F_RDACS_WONLY)
dpot->rdac_cache[reg & DPOT_RDAC_MASK] = value;
@@ -219,11 +263,30 @@ static s32 dpot_write_spi(struct dpot_data *dpot, u8 reg, u16 value)
} else {
if (dpot->uid == DPOT_UID(AD5291_ID) ||
dpot->uid == DPOT_UID(AD5292_ID) ||
- dpot->uid == DPOT_UID(AD5293_ID))
+ dpot->uid == DPOT_UID(AD5293_ID)) {
+
+ dpot_write_r8d8(dpot, DPOT_AD5291_CTRLREG << 2,
+ DPOT_AD5291_UNLOCK_CMD);
+
+ if (dpot->uid == DPOT_UID(AD5291_ID))
+ value = value << 2;
+
return dpot_write_r8d8(dpot,
(DPOT_AD5291_RDAC << 2) |
(value >> 8), value & 0xFF);
+ } else if (dpot->uid == DPOT_UID(AD5270_ID) ||
+ dpot->uid == DPOT_UID(AD5271_ID)) {
+ dpot_write_r8d8(dpot,
+ DPOT_AD5270_1_2_4_CTRLREG << 2,
+ DPOT_AD5270_1_2_4_UNLOCK_CMD);
+
+ if (dpot->uid == DPOT_UID(AD5271_ID))
+ value = value << 2;
+ return dpot_write_r8d8(dpot,
+ (DPOT_AD5270_1_2_4_RDAC << 2) |
+ (value >> 8), value & 0xFF);
+ }
val = DPOT_SPI_RDAC | (reg & DPOT_RDAC_MASK);
}
} else if (reg & DPOT_ADDR_EEPROM) {
@@ -243,6 +306,16 @@ static s32 dpot_write_spi(struct dpot_data *dpot, u8 reg, u16 value)
val = DPOT_SPI_INC_ALL;
break;
}
+ } else if (reg & DPOT_ADDR_OTP) {
+ if (dpot->uid == DPOT_UID(AD5291_ID) ||
+ dpot->uid == DPOT_UID(AD5292_ID)) {
+ return dpot_write_r8d8(dpot,
+ DPOT_AD5291_STORE_XTPM << 2, 0);
+ } else if (dpot->uid == DPOT_UID(AD5270_ID) ||
+ dpot->uid == DPOT_UID(AD5271_ID)) {
+ return dpot_write_r8d8(dpot,
+ DPOT_AD5270_1_2_4_STORE_XTPM << 2, 0);
+ }
} else
BUG();
@@ -273,7 +346,7 @@ static s32 dpot_write_i2c(struct dpot_data *dpot, u8 reg, u16 value)
case DPOT_UID(AD5280_ID):
case DPOT_UID(AD5282_ID):
ctrl = ((reg & DPOT_RDAC_MASK) == DPOT_RDAC0) ?
- 0 : DPOT_AD5291_RDAC_AB;
+ 0 : DPOT_AD5282_RDAC_AB;
return dpot_write_r8d8(dpot, ctrl, value);
break;
case DPOT_UID(AD5171_ID):
@@ -289,12 +362,12 @@ static s32 dpot_write_i2c(struct dpot_data *dpot, u8 reg, u16 value)
case DPOT_UID(AD5172_ID):
case DPOT_UID(AD5173_ID):
ctrl = ((reg & DPOT_RDAC_MASK) == DPOT_RDAC0) ?
- 0 : DPOT_AD5272_3_A0;
+ 0 : DPOT_AD5172_3_A0;
if (reg & DPOT_ADDR_OTP) {
tmp = dpot_read_r8d16(dpot, ctrl);
if (tmp >> 14) /* Ready to Program? */
return -EFAULT;
- ctrl |= DPOT_AD5270_2_3_FUSE;
+ ctrl |= DPOT_AD5170_2_3_FUSE;
}
return dpot_write_r8d8(dpot, ctrl, value);
break;
@@ -303,10 +376,25 @@ static s32 dpot_write_i2c(struct dpot_data *dpot, u8 reg, u16 value)
tmp = dpot_read_r8d16(dpot, tmp);
if (tmp >> 14) /* Ready to Program? */
return -EFAULT;
- ctrl = DPOT_AD5270_2_3_FUSE;
+ ctrl = DPOT_AD5170_2_3_FUSE;
}
return dpot_write_r8d8(dpot, ctrl, value);
break;
+ case DPOT_UID(AD5272_ID):
+ case DPOT_UID(AD5274_ID):
+ dpot_write_r8d8(dpot, DPOT_AD5270_1_2_4_CTRLREG << 2,
+ DPOT_AD5270_1_2_4_UNLOCK_CMD);
+
+ if (reg & DPOT_ADDR_OTP)
+ return dpot_write_r8d8(dpot,
+ DPOT_AD5270_1_2_4_STORE_XTPM << 2, 0);
+
+ if (dpot->uid == DPOT_UID(AD5274_ID))
+ value = value << 2;
+
+ return dpot_write_r8d8(dpot, (DPOT_AD5270_1_2_4_RDAC << 2) |
+ (value >> 8), value & 0xFF);
+ break;
default:
if (reg & DPOT_ADDR_CMD)
return dpot_write_d8(dpot, reg);
@@ -320,7 +408,6 @@ static s32 dpot_write_i2c(struct dpot_data *dpot, u8 reg, u16 value)
}
}
-
static s32 dpot_write(struct dpot_data *dpot, u8 reg, u16 value)
{
if (dpot->feat & F_SPI)
diff --git a/drivers/misc/ad525x_dpot.h b/drivers/misc/ad525x_dpot.h
index 78b89fd2e2fd..a662f5987b68 100644
--- a/drivers/misc/ad525x_dpot.h
+++ b/drivers/misc/ad525x_dpot.h
@@ -47,9 +47,9 @@ enum dpot_devid {
AD5258_ID = DPOT_CONF(F_RDACS_RW_TOL, BRDAC0, 6, 0), /* I2C */
AD5259_ID = DPOT_CONF(F_RDACS_RW_TOL, BRDAC0, 8, 1),
AD5251_ID = DPOT_CONF(F_RDACS_RW_TOL | F_CMD_INC,
- BRDAC0 | BRDAC3, 6, 2),
+ BRDAC1 | BRDAC3, 6, 2),
AD5252_ID = DPOT_CONF(F_RDACS_RW_TOL | F_CMD_INC,
- BRDAC0 | BRDAC3, 8, 3),
+ BRDAC1 | BRDAC3, 8, 3),
AD5253_ID = DPOT_CONF(F_RDACS_RW_TOL | F_CMD_INC,
BRDAC0 | BRDAC1 | BRDAC2 | BRDAC3, 6, 4),
AD5254_ID = DPOT_CONF(F_RDACS_RW_TOL | F_CMD_INC,
@@ -93,8 +93,10 @@ enum dpot_devid {
BRDAC0 | BRDAC1 | BRDAC2 | BRDAC3, 8, 23),
AD5290_ID = DPOT_CONF(F_RDACS_WONLY | F_AD_APPDATA | F_SPI_8BIT,
BRDAC0, 8, 24),
- AD5291_ID = DPOT_CONF(F_RDACS_RW | F_SPI_16BIT, BRDAC0, 8, 25),
- AD5292_ID = DPOT_CONF(F_RDACS_RW | F_SPI_16BIT, BRDAC0, 10, 26),
+ AD5291_ID = DPOT_CONF(F_RDACS_RW | F_SPI_16BIT | F_CMD_OTP,
+ BRDAC0, 8, 25),
+ AD5292_ID = DPOT_CONF(F_RDACS_RW | F_SPI_16BIT | F_CMD_OTP,
+ BRDAC0, 10, 26),
AD5293_ID = DPOT_CONF(F_RDACS_RW | F_SPI_16BIT, BRDAC0, 10, 27),
AD7376_ID = DPOT_CONF(F_RDACS_WONLY | F_AD_APPDATA | F_SPI_8BIT,
BRDAC0, 7, 28),
@@ -122,6 +124,12 @@ enum dpot_devid {
AD5170_ID = DPOT_CONF(F_RDACS_RW | F_CMD_OTP, BRDAC0, 8, 45),
AD5172_ID = DPOT_CONF(F_RDACS_RW | F_CMD_OTP, BRDAC0 | BRDAC1, 8, 46),
AD5173_ID = DPOT_CONF(F_RDACS_RW | F_CMD_OTP, BRDAC0 | BRDAC1, 8, 47),
+ AD5270_ID = DPOT_CONF(F_RDACS_RW | F_CMD_OTP | F_SPI_16BIT,
+ BRDAC0, 10, 48),
+ AD5271_ID = DPOT_CONF(F_RDACS_RW | F_CMD_OTP | F_SPI_16BIT,
+ BRDAC0, 8, 49),
+ AD5272_ID = DPOT_CONF(F_RDACS_RW | F_CMD_OTP, BRDAC0, 10, 50),
+ AD5274_ID = DPOT_CONF(F_RDACS_RW | F_CMD_OTP, BRDAC0, 8, 51),
};
#define DPOT_RDAC0 0
@@ -165,15 +173,24 @@ enum dpot_devid {
/* AD5291/2/3 use special commands */
#define DPOT_AD5291_RDAC 0x01
#define DPOT_AD5291_READ_RDAC 0x02
+#define DPOT_AD5291_STORE_XTPM 0x03
+#define DPOT_AD5291_CTRLREG 0x06
+#define DPOT_AD5291_UNLOCK_CMD 0x03
-/* AD524x use special commands */
-#define DPOT_AD5291_RDAC_AB 0x80
+/* AD5270/1/2/4 use special commands */
+#define DPOT_AD5270_1_2_4_RDAC 0x01
+#define DPOT_AD5270_1_2_4_READ_RDAC 0x02
+#define DPOT_AD5270_1_2_4_STORE_XTPM 0x03
+#define DPOT_AD5270_1_2_4_CTRLREG 0x07
+#define DPOT_AD5270_1_2_4_UNLOCK_CMD 0x03
+
+#define DPOT_AD5282_RDAC_AB 0x80
#define DPOT_AD5273_FUSE 0x80
-#define DPOT_AD5270_2_3_FUSE 0x20
-#define DPOT_AD5270_2_3_OW 0x08
-#define DPOT_AD5272_3_A0 0x08
-#define DPOT_AD5270_2FUSE 0x80
+#define DPOT_AD5170_2_3_FUSE 0x20
+#define DPOT_AD5170_2_3_OW 0x08
+#define DPOT_AD5172_3_A0 0x08
+#define DPOT_AD5170_2FUSE 0x80
struct dpot_data;
diff --git a/drivers/misc/apds9802als.c b/drivers/misc/apds9802als.c
new file mode 100644
index 000000000000..644d4cd071cc
--- /dev/null
+++ b/drivers/misc/apds9802als.c
@@ -0,0 +1,346 @@
+/*
+ * apds9802als.c - apds9802 ALS Driver
+ *
+ * Copyright (C) 2009 Intel Corp
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/i2c.h>
+#include <linux/err.h>
+#include <linux/delay.h>
+#include <linux/mutex.h>
+#include <linux/sysfs.h>
+#include <linux/pm_runtime.h>
+
+#define ALS_MIN_RANGE_VAL 1
+#define ALS_MAX_RANGE_VAL 2
+#define POWER_STA_ENABLE 1
+#define POWER_STA_DISABLE 0
+
+#define DRIVER_NAME "apds9802als"
+
+struct als_data {
+ struct mutex mutex;
+};
+
+static ssize_t als_sensing_range_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ int val;
+
+ val = i2c_smbus_read_byte_data(client, 0x81);
+ if (val < 0)
+ return val;
+ if (val & 1)
+ return sprintf(buf, "4095\n");
+ else
+ return sprintf(buf, "65535\n");
+}
+
+static int als_wait_for_data_ready(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ int ret;
+ int retry = 10;
+
+ do {
+ msleep(30);
+ ret = i2c_smbus_read_byte_data(client, 0x86);
+ } while (!(ret & 0x80) && retry--);
+
+ if (!retry) {
+ dev_warn(dev, "timeout waiting for data ready\n");
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+
+static ssize_t als_lux0_input_data_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct als_data *data = i2c_get_clientdata(client);
+ int ret_val;
+ int temp;
+
+ /* Protect against parallel reads */
+ pm_runtime_get_sync(dev);
+ mutex_lock(&data->mutex);
+
+ /* clear EOC interrupt status */
+ i2c_smbus_write_byte(client, 0x40);
+ /* start measurement */
+ temp = i2c_smbus_read_byte_data(client, 0x81);
+ i2c_smbus_write_byte_data(client, 0x81, temp | 0x08);
+
+ ret_val = als_wait_for_data_ready(dev);
+ if (ret_val < 0)
+ goto failed;
+
+ temp = i2c_smbus_read_byte_data(client, 0x8C); /* LSB data */
+ if (temp < 0) {
+ ret_val = temp;
+ goto failed;
+ }
+ ret_val = i2c_smbus_read_byte_data(client, 0x8D); /* MSB data */
+ if (ret_val < 0)
+ goto failed;
+
+ mutex_unlock(&data->mutex);
+ pm_runtime_put_sync(dev);
+
+ temp = (ret_val << 8) | temp;
+ return sprintf(buf, "%d\n", temp);
+failed:
+ mutex_unlock(&data->mutex);
+ pm_runtime_put_sync(dev);
+ return ret_val;
+}
+
+static ssize_t als_sensing_range_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct als_data *data = i2c_get_clientdata(client);
+ int ret_val;
+ unsigned long val;
+
+ if (strict_strtoul(buf, 10, &val))
+ return -EINVAL;
+
+ if (val < 4096)
+ val = 1;
+ else if (val < 65536)
+ val = 2;
+ else
+ return -ERANGE;
+
+ pm_runtime_get_sync(dev);
+
+ /* Make sure nobody else reads/modifies/writes 0x81 while we
+ are active */
+ mutex_lock(&data->mutex);
+
+ ret_val = i2c_smbus_read_byte_data(client, 0x81);
+ if (ret_val < 0)
+ goto fail;
+
+ /* Reset the bits before setting them */
+ ret_val = ret_val & 0xFA;
+
+ if (val == 1) /* Setting detection range up to 4k LUX */
+ ret_val = (ret_val | 0x01);
+ else /* Setting detection range up to 64k LUX*/
+ ret_val = (ret_val | 0x00);
+
+ ret_val = i2c_smbus_write_byte_data(client, 0x81, ret_val);
+
+ if (ret_val >= 0) {
+ /* All OK */
+ mutex_unlock(&data->mutex);
+ pm_runtime_put_sync(dev);
+ return count;
+ }
+fail:
+ mutex_unlock(&data->mutex);
+ pm_runtime_put_sync(dev);
+ return ret_val;
+}
+
+static int als_set_power_state(struct i2c_client *client, bool on_off)
+{
+ int ret_val;
+ struct als_data *data = i2c_get_clientdata(client);
+
+ mutex_lock(&data->mutex);
+ ret_val = i2c_smbus_read_byte_data(client, 0x80);
+ if (ret_val < 0)
+ goto fail;
+ if (on_off)
+ ret_val = ret_val | 0x01;
+ else
+ ret_val = ret_val & 0xFE;
+ ret_val = i2c_smbus_write_byte_data(client, 0x80, ret_val);
+fail:
+ mutex_unlock(&data->mutex);
+ return ret_val;
+}
+
+static DEVICE_ATTR(lux0_sensor_range, S_IRUGO | S_IWUSR,
+ als_sensing_range_show, als_sensing_range_store);
+static DEVICE_ATTR(lux0_input, S_IRUGO, als_lux0_input_data_show, NULL);
+
+static struct attribute *mid_att_als[] = {
+ &dev_attr_lux0_sensor_range.attr,
+ &dev_attr_lux0_input.attr,
+ NULL
+};
+
+static struct attribute_group m_als_gr = {
+ .name = "apds9802als",
+ .attrs = mid_att_als
+};
+
+static int als_set_default_config(struct i2c_client *client)
+{
+ int ret_val;
+ /* Write the command and then switch on */
+ ret_val = i2c_smbus_write_byte_data(client, 0x80, 0x01);
+ if (ret_val < 0) {
+ dev_err(&client->dev, "failed default switch on write\n");
+ return ret_val;
+ }
+ /* detection range: 1~64K Lux, maunal measurement */
+ ret_val = i2c_smbus_write_byte_data(client, 0x81, 0x08);
+ if (ret_val < 0)
+ dev_err(&client->dev, "failed default LUX on write\n");
+
+ /* We always get 0 for the 1st measurement after system power on,
+ * so make sure it is finished before user asks for data.
+ */
+ als_wait_for_data_ready(&client->dev);
+
+ return ret_val;
+}
+
+static int apds9802als_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ int res;
+ struct als_data *data;
+
+ data = kzalloc(sizeof(struct als_data), GFP_KERNEL);
+ if (data == NULL) {
+ dev_err(&client->dev, "Memory allocation failed\n");
+ return -ENOMEM;
+ }
+ i2c_set_clientdata(client, data);
+ res = sysfs_create_group(&client->dev.kobj, &m_als_gr);
+ if (res) {
+ dev_err(&client->dev, "device create file failed\n");
+ goto als_error1;
+ }
+ dev_info(&client->dev, "ALS chip found\n");
+ als_set_default_config(client);
+ mutex_init(&data->mutex);
+
+ pm_runtime_enable(&client->dev);
+ pm_runtime_get(&client->dev);
+ pm_runtime_put(&client->dev);
+
+ return res;
+als_error1:
+ kfree(data);
+ return res;
+}
+
+static int apds9802als_remove(struct i2c_client *client)
+{
+ struct als_data *data = i2c_get_clientdata(client);
+
+ als_set_power_state(client, false);
+ sysfs_remove_group(&client->dev.kobj, &m_als_gr);
+ kfree(data);
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int apds9802als_suspend(struct i2c_client *client, pm_message_t mesg)
+{
+ als_set_power_state(client, false);
+ return 0;
+}
+
+static int apds9802als_resume(struct i2c_client *client)
+{
+ als_set_default_config(client);
+
+ pm_runtime_get(&client->dev);
+ pm_runtime_put(&client->dev);
+ return 0;
+}
+
+static int apds9802als_runtime_suspend(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+
+ als_set_power_state(client, false);
+ return 0;
+}
+
+static int apds9802als_runtime_resume(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+
+ als_set_power_state(client, true);
+ return 0;
+}
+
+static const struct dev_pm_ops apds9802als_pm_ops = {
+ .runtime_suspend = apds9802als_runtime_suspend,
+ .runtime_resume = apds9802als_runtime_resume,
+};
+
+#define APDS9802ALS_PM_OPS (&apds9802als_pm_ops)
+
+#else /* CONFIG_PM */
+#define apds9802als_suspend NULL
+#define apds9802als_resume NULL
+#define APDS9802ALS_PM_OPS NULL
+#endif /* CONFIG_PM */
+
+static struct i2c_device_id apds9802als_id[] = {
+ { DRIVER_NAME, 0 },
+ { }
+};
+
+MODULE_DEVICE_TABLE(i2c, apds9802als_id);
+
+static struct i2c_driver apds9802als_driver = {
+ .driver = {
+ .name = DRIVER_NAME,
+ .pm = APDS9802ALS_PM_OPS,
+ },
+ .probe = apds9802als_probe,
+ .remove = apds9802als_remove,
+ .suspend = apds9802als_suspend,
+ .resume = apds9802als_resume,
+ .id_table = apds9802als_id,
+};
+
+static int __init sensor_apds9802als_init(void)
+{
+ return i2c_add_driver(&apds9802als_driver);
+}
+
+static void __exit sensor_apds9802als_exit(void)
+{
+ i2c_del_driver(&apds9802als_driver);
+}
+module_init(sensor_apds9802als_init);
+module_exit(sensor_apds9802als_exit);
+
+MODULE_AUTHOR("Anantha Narayanan <Anantha.Narayanan@intel.com");
+MODULE_DESCRIPTION("Avago apds9802als ALS Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/misc/apds990x.c b/drivers/misc/apds990x.c
new file mode 100644
index 000000000000..200311fea369
--- /dev/null
+++ b/drivers/misc/apds990x.c
@@ -0,0 +1,1295 @@
+/*
+ * This file is part of the APDS990x sensor driver.
+ * Chip is combined proximity and ambient light sensor.
+ *
+ * Copyright (C) 2010 Nokia Corporation and/or its subsidiary(-ies).
+ *
+ * Contact: Samu Onkalo <samu.p.onkalo@nokia.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/i2c.h>
+#include <linux/interrupt.h>
+#include <linux/mutex.h>
+#include <linux/regulator/consumer.h>
+#include <linux/pm_runtime.h>
+#include <linux/delay.h>
+#include <linux/wait.h>
+#include <linux/slab.h>
+#include <linux/i2c/apds990x.h>
+
+/* Register map */
+#define APDS990X_ENABLE 0x00 /* Enable of states and interrupts */
+#define APDS990X_ATIME 0x01 /* ALS ADC time */
+#define APDS990X_PTIME 0x02 /* Proximity ADC time */
+#define APDS990X_WTIME 0x03 /* Wait time */
+#define APDS990X_AILTL 0x04 /* ALS interrupt low threshold low byte */
+#define APDS990X_AILTH 0x05 /* ALS interrupt low threshold hi byte */
+#define APDS990X_AIHTL 0x06 /* ALS interrupt hi threshold low byte */
+#define APDS990X_AIHTH 0x07 /* ALS interrupt hi threshold hi byte */
+#define APDS990X_PILTL 0x08 /* Proximity interrupt low threshold low byte */
+#define APDS990X_PILTH 0x09 /* Proximity interrupt low threshold hi byte */
+#define APDS990X_PIHTL 0x0a /* Proximity interrupt hi threshold low byte */
+#define APDS990X_PIHTH 0x0b /* Proximity interrupt hi threshold hi byte */
+#define APDS990X_PERS 0x0c /* Interrupt persistence filters */
+#define APDS990X_CONFIG 0x0d /* Configuration */
+#define APDS990X_PPCOUNT 0x0e /* Proximity pulse count */
+#define APDS990X_CONTROL 0x0f /* Gain control register */
+#define APDS990X_REV 0x11 /* Revision Number */
+#define APDS990X_ID 0x12 /* Device ID */
+#define APDS990X_STATUS 0x13 /* Device status */
+#define APDS990X_CDATAL 0x14 /* Clear ADC low data register */
+#define APDS990X_CDATAH 0x15 /* Clear ADC high data register */
+#define APDS990X_IRDATAL 0x16 /* IR ADC low data register */
+#define APDS990X_IRDATAH 0x17 /* IR ADC high data register */
+#define APDS990X_PDATAL 0x18 /* Proximity ADC low data register */
+#define APDS990X_PDATAH 0x19 /* Proximity ADC high data register */
+
+/* Control */
+#define APDS990X_MAX_AGAIN 3
+
+/* Enable register */
+#define APDS990X_EN_PIEN (0x1 << 5)
+#define APDS990X_EN_AIEN (0x1 << 4)
+#define APDS990X_EN_WEN (0x1 << 3)
+#define APDS990X_EN_PEN (0x1 << 2)
+#define APDS990X_EN_AEN (0x1 << 1)
+#define APDS990X_EN_PON (0x1 << 0)
+#define APDS990X_EN_DISABLE_ALL 0
+
+/* Status register */
+#define APDS990X_ST_PINT (0x1 << 5)
+#define APDS990X_ST_AINT (0x1 << 4)
+
+/* I2C access types */
+#define APDS990x_CMD_TYPE_MASK (0x03 << 5)
+#define APDS990x_CMD_TYPE_RB (0x00 << 5) /* Repeated byte */
+#define APDS990x_CMD_TYPE_INC (0x01 << 5) /* Auto increment */
+#define APDS990x_CMD_TYPE_SPE (0x03 << 5) /* Special function */
+
+#define APDS990x_ADDR_SHIFT 0
+#define APDS990x_CMD 0x80
+
+/* Interrupt ack commands */
+#define APDS990X_INT_ACK_ALS 0x6
+#define APDS990X_INT_ACK_PS 0x5
+#define APDS990X_INT_ACK_BOTH 0x7
+
+/* ptime */
+#define APDS990X_PTIME_DEFAULT 0xff /* Recommended conversion time 2.7ms*/
+
+/* wtime */
+#define APDS990X_WTIME_DEFAULT 0xee /* ~50ms wait time */
+
+#define APDS990X_TIME_TO_ADC 1024 /* One timetick as ADC count value */
+
+/* Persistence */
+#define APDS990X_APERS_SHIFT 0
+#define APDS990X_PPERS_SHIFT 4
+
+/* Supported ID:s */
+#define APDS990X_ID_0 0x0
+#define APDS990X_ID_4 0x4
+#define APDS990X_ID_29 0x29
+
+/* pgain and pdiode settings */
+#define APDS_PGAIN_1X 0x0
+#define APDS_PDIODE_IR 0x2
+
+#define APDS990X_LUX_OUTPUT_SCALE 10
+
+/* Reverse chip factors for threshold calculation */
+struct reverse_factors {
+ u32 afactor;
+ int cf1;
+ int irf1;
+ int cf2;
+ int irf2;
+};
+
+struct apds990x_chip {
+ struct apds990x_platform_data *pdata;
+ struct i2c_client *client;
+ struct mutex mutex; /* avoid parallel access */
+ struct regulator_bulk_data regs[2];
+ wait_queue_head_t wait;
+
+ int prox_en;
+ bool prox_continuous_mode;
+ bool lux_wait_fresh_res;
+
+ /* Chip parameters */
+ struct apds990x_chip_factors cf;
+ struct reverse_factors rcf;
+ u16 atime; /* als integration time */
+ u16 arate; /* als reporting rate */
+ u16 a_max_result; /* Max possible ADC value with current atime */
+ u8 again_meas; /* Gain used in last measurement */
+ u8 again_next; /* Next calculated gain */
+ u8 pgain;
+ u8 pdiode;
+ u8 pdrive;
+ u8 lux_persistence;
+ u8 prox_persistence;
+
+ u32 lux_raw;
+ u32 lux;
+ u16 lux_clear;
+ u16 lux_ir;
+ u16 lux_calib;
+ u32 lux_thres_hi;
+ u32 lux_thres_lo;
+
+ u32 prox_thres;
+ u16 prox_data;
+ u16 prox_calib;
+
+ char chipname[10];
+ u8 revision;
+};
+
+#define APDS_CALIB_SCALER 8192
+#define APDS_LUX_NEUTRAL_CALIB_VALUE (1 * APDS_CALIB_SCALER)
+#define APDS_PROX_NEUTRAL_CALIB_VALUE (1 * APDS_CALIB_SCALER)
+
+#define APDS_PROX_DEF_THRES 600
+#define APDS_PROX_HYSTERESIS 50
+#define APDS_LUX_DEF_THRES_HI 101
+#define APDS_LUX_DEF_THRES_LO 100
+#define APDS_DEFAULT_PROX_PERS 1
+
+#define APDS_TIMEOUT 2000
+#define APDS_STARTUP_DELAY 25000 /* us */
+#define APDS_RANGE 65535
+#define APDS_PROX_RANGE 1023
+#define APDS_LUX_GAIN_LO_LIMIT 100
+#define APDS_LUX_GAIN_LO_LIMIT_STRICT 25
+
+#define TIMESTEP 87 /* 2.7ms is about 87 / 32 */
+#define TIME_STEP_SCALER 32
+
+#define APDS_LUX_AVERAGING_TIME 50 /* tolerates 50/60Hz ripple */
+#define APDS_LUX_DEFAULT_RATE 200
+
+static const u8 again[] = {1, 8, 16, 120}; /* ALS gain steps */
+static const u8 ir_currents[] = {100, 50, 25, 12}; /* IRled currents in mA */
+
+/* Following two tables must match i.e 10Hz rate means 1 as persistence value */
+static const u16 arates_hz[] = {10, 5, 2, 1};
+static const u8 apersis[] = {1, 2, 4, 5};
+
+/* Regulators */
+static const char reg_vcc[] = "Vdd";
+static const char reg_vled[] = "Vled";
+
+static int apds990x_read_byte(struct apds990x_chip *chip, u8 reg, u8 *data)
+{
+ struct i2c_client *client = chip->client;
+ s32 ret;
+
+ reg &= ~APDS990x_CMD_TYPE_MASK;
+ reg |= APDS990x_CMD | APDS990x_CMD_TYPE_RB;
+
+ ret = i2c_smbus_read_byte_data(client, reg);
+ *data = ret;
+ return (int)ret;
+}
+
+static int apds990x_read_word(struct apds990x_chip *chip, u8 reg, u16 *data)
+{
+ struct i2c_client *client = chip->client;
+ s32 ret;
+
+ reg &= ~APDS990x_CMD_TYPE_MASK;
+ reg |= APDS990x_CMD | APDS990x_CMD_TYPE_INC;
+
+ ret = i2c_smbus_read_word_data(client, reg);
+ *data = ret;
+ return (int)ret;
+}
+
+static int apds990x_write_byte(struct apds990x_chip *chip, u8 reg, u8 data)
+{
+ struct i2c_client *client = chip->client;
+ s32 ret;
+
+ reg &= ~APDS990x_CMD_TYPE_MASK;
+ reg |= APDS990x_CMD | APDS990x_CMD_TYPE_RB;
+
+ ret = i2c_smbus_write_byte_data(client, reg, data);
+ return (int)ret;
+}
+
+static int apds990x_write_word(struct apds990x_chip *chip, u8 reg, u16 data)
+{
+ struct i2c_client *client = chip->client;
+ s32 ret;
+
+ reg &= ~APDS990x_CMD_TYPE_MASK;
+ reg |= APDS990x_CMD | APDS990x_CMD_TYPE_INC;
+
+ ret = i2c_smbus_write_word_data(client, reg, data);
+ return (int)ret;
+}
+
+static int apds990x_mode_on(struct apds990x_chip *chip)
+{
+ /* ALS is mandatory, proximity optional */
+ u8 reg = APDS990X_EN_AIEN | APDS990X_EN_PON | APDS990X_EN_AEN |
+ APDS990X_EN_WEN;
+
+ if (chip->prox_en)
+ reg |= APDS990X_EN_PIEN | APDS990X_EN_PEN;
+
+ return apds990x_write_byte(chip, APDS990X_ENABLE, reg);
+}
+
+static u16 apds990x_lux_to_threshold(struct apds990x_chip *chip, u32 lux)
+{
+ u32 thres;
+ u32 cpl;
+ u32 ir;
+
+ if (lux == 0)
+ return 0;
+ else if (lux == APDS_RANGE)
+ return APDS_RANGE;
+
+ /*
+ * Reported LUX value is a combination of the IR and CLEAR channel
+ * values. However, interrupt threshold is only for clear channel.
+ * This function approximates needed HW threshold value for a given
+ * LUX value in the current lightning type.
+ * IR level compared to visible light varies heavily depending on the
+ * source of the light
+ *
+ * Calculate threshold value for the next measurement period.
+ * Math: threshold = lux * cpl where
+ * cpl = atime * again / (glass_attenuation * device_factor)
+ * (count-per-lux)
+ *
+ * First remove calibration. Division by four is to avoid overflow
+ */
+ lux = lux * (APDS_CALIB_SCALER / 4) / (chip->lux_calib / 4);
+
+ /* Multiplication by 64 is to increase accuracy */
+ cpl = ((u32)chip->atime * (u32)again[chip->again_next] *
+ APDS_PARAM_SCALE * 64) / (chip->cf.ga * chip->cf.df);
+
+ thres = lux * cpl / 64;
+ /*
+ * Convert IR light from the latest result to match with
+ * new gain step. This helps to adapt with the current
+ * source of light.
+ */
+ ir = (u32)chip->lux_ir * (u32)again[chip->again_next] /
+ (u32)again[chip->again_meas];
+
+ /*
+ * Compensate count with IR light impact
+ * IAC1 > IAC2 (see apds990x_get_lux for formulas)
+ */
+ if (chip->lux_clear * APDS_PARAM_SCALE >=
+ chip->rcf.afactor * chip->lux_ir)
+ thres = (chip->rcf.cf1 * thres + chip->rcf.irf1 * ir) /
+ APDS_PARAM_SCALE;
+ else
+ thres = (chip->rcf.cf2 * thres + chip->rcf.irf2 * ir) /
+ APDS_PARAM_SCALE;
+
+ if (thres >= chip->a_max_result)
+ thres = chip->a_max_result - 1;
+ return thres;
+}
+
+static inline int apds990x_set_atime(struct apds990x_chip *chip, u32 time_ms)
+{
+ u8 reg_value;
+
+ chip->atime = time_ms;
+ /* Formula is specified in the data sheet */
+ reg_value = 256 - ((time_ms * TIME_STEP_SCALER) / TIMESTEP);
+ /* Calculate max ADC value for given integration time */
+ chip->a_max_result = (u16)(256 - reg_value) * APDS990X_TIME_TO_ADC;
+ return apds990x_write_byte(chip, APDS990X_ATIME, reg_value);
+}
+
+/* Called always with mutex locked */
+static int apds990x_refresh_pthres(struct apds990x_chip *chip, int data)
+{
+ int ret, lo, hi;
+
+ /* If the chip is not in use, don't try to access it */
+ if (pm_runtime_suspended(&chip->client->dev))
+ return 0;
+
+ if (data < chip->prox_thres) {
+ lo = 0;
+ hi = chip->prox_thres;
+ } else {
+ lo = chip->prox_thres - APDS_PROX_HYSTERESIS;
+ if (chip->prox_continuous_mode)
+ hi = chip->prox_thres;
+ else
+ hi = APDS_RANGE;
+ }
+
+ ret = apds990x_write_word(chip, APDS990X_PILTL, lo);
+ ret |= apds990x_write_word(chip, APDS990X_PIHTL, hi);
+ return ret;
+}
+
+/* Called always with mutex locked */
+static int apds990x_refresh_athres(struct apds990x_chip *chip)
+{
+ int ret;
+ /* If the chip is not in use, don't try to access it */
+ if (pm_runtime_suspended(&chip->client->dev))
+ return 0;
+
+ ret = apds990x_write_word(chip, APDS990X_AILTL,
+ apds990x_lux_to_threshold(chip, chip->lux_thres_lo));
+ ret |= apds990x_write_word(chip, APDS990X_AIHTL,
+ apds990x_lux_to_threshold(chip, chip->lux_thres_hi));
+
+ return ret;
+}
+
+/* Called always with mutex locked */
+static void apds990x_force_a_refresh(struct apds990x_chip *chip)
+{
+ /* This will force ALS interrupt after the next measurement. */
+ apds990x_write_word(chip, APDS990X_AILTL, APDS_LUX_DEF_THRES_LO);
+ apds990x_write_word(chip, APDS990X_AIHTL, APDS_LUX_DEF_THRES_HI);
+}
+
+/* Called always with mutex locked */
+static void apds990x_force_p_refresh(struct apds990x_chip *chip)
+{
+ /* This will force proximity interrupt after the next measurement. */
+ apds990x_write_word(chip, APDS990X_PILTL, APDS_PROX_DEF_THRES - 1);
+ apds990x_write_word(chip, APDS990X_PIHTL, APDS_PROX_DEF_THRES);
+}
+
+/* Called always with mutex locked */
+static int apds990x_calc_again(struct apds990x_chip *chip)
+{
+ int curr_again = chip->again_meas;
+ int next_again = chip->again_meas;
+ int ret = 0;
+
+ /* Calculate suitable als gain */
+ if (chip->lux_clear == chip->a_max_result)
+ next_again -= 2; /* ALS saturated. Decrease gain by 2 steps */
+ else if (chip->lux_clear > chip->a_max_result / 2)
+ next_again--;
+ else if (chip->lux_clear < APDS_LUX_GAIN_LO_LIMIT_STRICT)
+ next_again += 2; /* Too dark. Increase gain by 2 steps */
+ else if (chip->lux_clear < APDS_LUX_GAIN_LO_LIMIT)
+ next_again++;
+
+ /* Limit gain to available range */
+ if (next_again < 0)
+ next_again = 0;
+ else if (next_again > APDS990X_MAX_AGAIN)
+ next_again = APDS990X_MAX_AGAIN;
+
+ /* Let's check can we trust the measured result */
+ if (chip->lux_clear == chip->a_max_result)
+ /* Result can be totally garbage due to saturation */
+ ret = -ERANGE;
+ else if (next_again != curr_again &&
+ chip->lux_clear < APDS_LUX_GAIN_LO_LIMIT_STRICT)
+ /*
+ * Gain is changed and measurement result is very small.
+ * Result can be totally garbage due to underflow
+ */
+ ret = -ERANGE;
+
+ chip->again_next = next_again;
+ apds990x_write_byte(chip, APDS990X_CONTROL,
+ (chip->pdrive << 6) |
+ (chip->pdiode << 4) |
+ (chip->pgain << 2) |
+ (chip->again_next << 0));
+
+ /*
+ * Error means bad result -> re-measurement is needed. The forced
+ * refresh uses fastest possible persistence setting to get result
+ * as soon as possible.
+ */
+ if (ret < 0)
+ apds990x_force_a_refresh(chip);
+ else
+ apds990x_refresh_athres(chip);
+
+ return ret;
+}
+
+/* Called always with mutex locked */
+static int apds990x_get_lux(struct apds990x_chip *chip, int clear, int ir)
+{
+ int iac, iac1, iac2; /* IR adjusted counts */
+ u32 lpc; /* Lux per count */
+
+ /* Formulas:
+ * iac1 = CF1 * CLEAR_CH - IRF1 * IR_CH
+ * iac2 = CF2 * CLEAR_CH - IRF2 * IR_CH
+ */
+ iac1 = (chip->cf.cf1 * clear - chip->cf.irf1 * ir) / APDS_PARAM_SCALE;
+ iac2 = (chip->cf.cf2 * clear - chip->cf.irf2 * ir) / APDS_PARAM_SCALE;
+
+ iac = max(iac1, iac2);
+ iac = max(iac, 0);
+
+ lpc = APDS990X_LUX_OUTPUT_SCALE * (chip->cf.df * chip->cf.ga) /
+ (u32)(again[chip->again_meas] * (u32)chip->atime);
+
+ return (iac * lpc) / APDS_PARAM_SCALE;
+}
+
+static int apds990x_ack_int(struct apds990x_chip *chip, u8 mode)
+{
+ struct i2c_client *client = chip->client;
+ s32 ret;
+ u8 reg = APDS990x_CMD | APDS990x_CMD_TYPE_SPE;
+
+ switch (mode & (APDS990X_ST_AINT | APDS990X_ST_PINT)) {
+ case APDS990X_ST_AINT:
+ reg |= APDS990X_INT_ACK_ALS;
+ break;
+ case APDS990X_ST_PINT:
+ reg |= APDS990X_INT_ACK_PS;
+ break;
+ default:
+ reg |= APDS990X_INT_ACK_BOTH;
+ break;
+ }
+
+ ret = i2c_smbus_read_byte_data(client, reg);
+ return (int)ret;
+}
+
+static irqreturn_t apds990x_irq(int irq, void *data)
+{
+ struct apds990x_chip *chip = data;
+ u8 status;
+
+ apds990x_read_byte(chip, APDS990X_STATUS, &status);
+ apds990x_ack_int(chip, status);
+
+ mutex_lock(&chip->mutex);
+ if (!pm_runtime_suspended(&chip->client->dev)) {
+ if (status & APDS990X_ST_AINT) {
+ apds990x_read_word(chip, APDS990X_CDATAL,
+ &chip->lux_clear);
+ apds990x_read_word(chip, APDS990X_IRDATAL,
+ &chip->lux_ir);
+ /* Store used gain for calculations */
+ chip->again_meas = chip->again_next;
+
+ chip->lux_raw = apds990x_get_lux(chip,
+ chip->lux_clear,
+ chip->lux_ir);
+
+ if (apds990x_calc_again(chip) == 0) {
+ /* Result is valid */
+ chip->lux = chip->lux_raw;
+ chip->lux_wait_fresh_res = false;
+ wake_up(&chip->wait);
+ sysfs_notify(&chip->client->dev.kobj,
+ NULL, "lux0_input");
+ }
+ }
+
+ if ((status & APDS990X_ST_PINT) && chip->prox_en) {
+ u16 clr_ch;
+
+ apds990x_read_word(chip, APDS990X_CDATAL, &clr_ch);
+ /*
+ * If ALS channel is saturated at min gain,
+ * proximity gives false posivite values.
+ * Just ignore them.
+ */
+ if (chip->again_meas == 0 &&
+ clr_ch == chip->a_max_result)
+ chip->prox_data = 0;
+ else
+ apds990x_read_word(chip,
+ APDS990X_PDATAL,
+ &chip->prox_data);
+
+ apds990x_refresh_pthres(chip, chip->prox_data);
+ if (chip->prox_data < chip->prox_thres)
+ chip->prox_data = 0;
+ else if (!chip->prox_continuous_mode)
+ chip->prox_data = APDS_PROX_RANGE;
+ sysfs_notify(&chip->client->dev.kobj,
+ NULL, "prox0_raw");
+ }
+ }
+ mutex_unlock(&chip->mutex);
+ return IRQ_HANDLED;
+}
+
+static int apds990x_configure(struct apds990x_chip *chip)
+{
+ /* It is recommended to use disabled mode during these operations */
+ apds990x_write_byte(chip, APDS990X_ENABLE, APDS990X_EN_DISABLE_ALL);
+
+ /* conversion and wait times for different state machince states */
+ apds990x_write_byte(chip, APDS990X_PTIME, APDS990X_PTIME_DEFAULT);
+ apds990x_write_byte(chip, APDS990X_WTIME, APDS990X_WTIME_DEFAULT);
+ apds990x_set_atime(chip, APDS_LUX_AVERAGING_TIME);
+
+ apds990x_write_byte(chip, APDS990X_CONFIG, 0);
+
+ /* Persistence levels */
+ apds990x_write_byte(chip, APDS990X_PERS,
+ (chip->lux_persistence << APDS990X_APERS_SHIFT) |
+ (chip->prox_persistence << APDS990X_PPERS_SHIFT));
+
+ apds990x_write_byte(chip, APDS990X_PPCOUNT, chip->pdata->ppcount);
+
+ /* Start with relatively small gain */
+ chip->again_meas = 1;
+ chip->again_next = 1;
+ apds990x_write_byte(chip, APDS990X_CONTROL,
+ (chip->pdrive << 6) |
+ (chip->pdiode << 4) |
+ (chip->pgain << 2) |
+ (chip->again_next << 0));
+ return 0;
+}
+
+static int apds990x_detect(struct apds990x_chip *chip)
+{
+ struct i2c_client *client = chip->client;
+ int ret;
+ u8 id;
+
+ ret = apds990x_read_byte(chip, APDS990X_ID, &id);
+ if (ret < 0) {
+ dev_err(&client->dev, "ID read failed\n");
+ return ret;
+ }
+
+ ret = apds990x_read_byte(chip, APDS990X_REV, &chip->revision);
+ if (ret < 0) {
+ dev_err(&client->dev, "REV read failed\n");
+ return ret;
+ }
+
+ switch (id) {
+ case APDS990X_ID_0:
+ case APDS990X_ID_4:
+ case APDS990X_ID_29:
+ snprintf(chip->chipname, sizeof(chip->chipname), "APDS-990x");
+ break;
+ default:
+ ret = -ENODEV;
+ break;
+ }
+ return ret;
+}
+
+static int apds990x_chip_on(struct apds990x_chip *chip)
+{
+ int err = regulator_bulk_enable(ARRAY_SIZE(chip->regs),
+ chip->regs);
+ if (err < 0)
+ return err;
+
+ usleep_range(APDS_STARTUP_DELAY, 2 * APDS_STARTUP_DELAY);
+
+ /* Refresh all configs in case of regulators were off */
+ chip->prox_data = 0;
+ apds990x_configure(chip);
+ apds990x_mode_on(chip);
+ return 0;
+}
+
+static int apds990x_chip_off(struct apds990x_chip *chip)
+{
+ apds990x_write_byte(chip, APDS990X_ENABLE, APDS990X_EN_DISABLE_ALL);
+ regulator_bulk_disable(ARRAY_SIZE(chip->regs), chip->regs);
+ return 0;
+}
+
+static ssize_t apds990x_lux_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct apds990x_chip *chip = dev_get_drvdata(dev);
+ ssize_t ret;
+ u32 result;
+ long timeout;
+
+ if (pm_runtime_suspended(dev))
+ return -EIO;
+
+ timeout = wait_event_interruptible_timeout(chip->wait,
+ !chip->lux_wait_fresh_res,
+ msecs_to_jiffies(APDS_TIMEOUT));
+ if (!timeout)
+ return -EIO;
+
+ mutex_lock(&chip->mutex);
+ result = (chip->lux * chip->lux_calib) / APDS_CALIB_SCALER;
+ if (result > (APDS_RANGE * APDS990X_LUX_OUTPUT_SCALE))
+ result = APDS_RANGE * APDS990X_LUX_OUTPUT_SCALE;
+
+ ret = sprintf(buf, "%d.%d\n",
+ result / APDS990X_LUX_OUTPUT_SCALE,
+ result % APDS990X_LUX_OUTPUT_SCALE);
+ mutex_unlock(&chip->mutex);
+ return ret;
+}
+
+static DEVICE_ATTR(lux0_input, S_IRUGO, apds990x_lux_show, NULL);
+
+static ssize_t apds990x_lux_range_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return sprintf(buf, "%u\n", APDS_RANGE);
+}
+
+static DEVICE_ATTR(lux0_sensor_range, S_IRUGO, apds990x_lux_range_show, NULL);
+
+static ssize_t apds990x_lux_calib_format_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return sprintf(buf, "%u\n", APDS_CALIB_SCALER);
+}
+
+static DEVICE_ATTR(lux0_calibscale_default, S_IRUGO,
+ apds990x_lux_calib_format_show, NULL);
+
+static ssize_t apds990x_lux_calib_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct apds990x_chip *chip = dev_get_drvdata(dev);
+
+ return sprintf(buf, "%u\n", chip->lux_calib);
+}
+
+static ssize_t apds990x_lux_calib_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t len)
+{
+ struct apds990x_chip *chip = dev_get_drvdata(dev);
+ unsigned long value;
+
+ if (strict_strtoul(buf, 0, &value))
+ return -EINVAL;
+
+ if (chip->lux_calib > APDS_RANGE)
+ return -EINVAL;
+
+ chip->lux_calib = value;
+
+ return len;
+}
+
+static DEVICE_ATTR(lux0_calibscale, S_IRUGO | S_IWUSR, apds990x_lux_calib_show,
+ apds990x_lux_calib_store);
+
+static ssize_t apds990x_rate_avail(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ int i;
+ int pos = 0;
+ for (i = 0; i < ARRAY_SIZE(arates_hz); i++)
+ pos += sprintf(buf + pos, "%d ", arates_hz[i]);
+ sprintf(buf + pos - 1, "\n");
+ return pos;
+}
+
+static ssize_t apds990x_rate_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct apds990x_chip *chip = dev_get_drvdata(dev);
+ return sprintf(buf, "%d\n", chip->arate);
+}
+
+static int apds990x_set_arate(struct apds990x_chip *chip, int rate)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(arates_hz); i++)
+ if (rate >= arates_hz[i])
+ break;
+
+ if (i == ARRAY_SIZE(arates_hz))
+ return -EINVAL;
+
+ /* Pick up corresponding persistence value */
+ chip->lux_persistence = apersis[i];
+ chip->arate = arates_hz[i];
+
+ /* If the chip is not in use, don't try to access it */
+ if (pm_runtime_suspended(&chip->client->dev))
+ return 0;
+
+ /* Persistence levels */
+ return apds990x_write_byte(chip, APDS990X_PERS,
+ (chip->lux_persistence << APDS990X_APERS_SHIFT) |
+ (chip->prox_persistence << APDS990X_PPERS_SHIFT));
+}
+
+static ssize_t apds990x_rate_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t len)
+{
+ struct apds990x_chip *chip = dev_get_drvdata(dev);
+ unsigned long value;
+ int ret;
+
+ if (strict_strtoul(buf, 0, &value))
+ return -EINVAL;
+
+ mutex_lock(&chip->mutex);
+ ret = apds990x_set_arate(chip, value);
+ mutex_unlock(&chip->mutex);
+
+ if (ret < 0)
+ return ret;
+ return len;
+}
+
+static DEVICE_ATTR(lux0_rate_avail, S_IRUGO, apds990x_rate_avail, NULL);
+
+static DEVICE_ATTR(lux0_rate, S_IRUGO | S_IWUSR, apds990x_rate_show,
+ apds990x_rate_store);
+
+static ssize_t apds990x_prox_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ ssize_t ret;
+ struct apds990x_chip *chip = dev_get_drvdata(dev);
+ if (pm_runtime_suspended(dev) || !chip->prox_en)
+ return -EIO;
+
+ mutex_lock(&chip->mutex);
+ ret = sprintf(buf, "%d\n", chip->prox_data);
+ mutex_unlock(&chip->mutex);
+ return ret;
+}
+
+static DEVICE_ATTR(prox0_raw, S_IRUGO, apds990x_prox_show, NULL);
+
+static ssize_t apds990x_prox_range_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return sprintf(buf, "%u\n", APDS_PROX_RANGE);
+}
+
+static DEVICE_ATTR(prox0_sensor_range, S_IRUGO, apds990x_prox_range_show, NULL);
+
+static ssize_t apds990x_prox_enable_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct apds990x_chip *chip = dev_get_drvdata(dev);
+ return sprintf(buf, "%d\n", chip->prox_en);
+}
+
+static ssize_t apds990x_prox_enable_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t len)
+{
+ struct apds990x_chip *chip = dev_get_drvdata(dev);
+ unsigned long value;
+
+ if (strict_strtoul(buf, 0, &value))
+ return -EINVAL;
+
+ mutex_lock(&chip->mutex);
+
+ if (!chip->prox_en)
+ chip->prox_data = 0;
+
+ if (value)
+ chip->prox_en++;
+ else if (chip->prox_en > 0)
+ chip->prox_en--;
+
+ if (!pm_runtime_suspended(dev))
+ apds990x_mode_on(chip);
+ mutex_unlock(&chip->mutex);
+ return len;
+}
+
+static DEVICE_ATTR(prox0_raw_en, S_IRUGO | S_IWUSR, apds990x_prox_enable_show,
+ apds990x_prox_enable_store);
+
+static const char reporting_modes[][9] = {"trigger", "periodic"};
+
+static ssize_t apds990x_prox_reporting_mode_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct apds990x_chip *chip = dev_get_drvdata(dev);
+ return sprintf(buf, "%s\n",
+ reporting_modes[!!chip->prox_continuous_mode]);
+}
+
+static ssize_t apds990x_prox_reporting_mode_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t len)
+{
+ struct apds990x_chip *chip = dev_get_drvdata(dev);
+
+ if (sysfs_streq(buf, reporting_modes[0]))
+ chip->prox_continuous_mode = 0;
+ else if (sysfs_streq(buf, reporting_modes[1]))
+ chip->prox_continuous_mode = 1;
+ else
+ return -EINVAL;
+ return len;
+}
+
+static DEVICE_ATTR(prox0_reporting_mode, S_IRUGO | S_IWUSR,
+ apds990x_prox_reporting_mode_show,
+ apds990x_prox_reporting_mode_store);
+
+static ssize_t apds990x_prox_reporting_avail_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return sprintf(buf, "%s %s\n", reporting_modes[0], reporting_modes[1]);
+}
+
+static DEVICE_ATTR(prox0_reporting_mode_avail, S_IRUGO | S_IWUSR,
+ apds990x_prox_reporting_avail_show, NULL);
+
+
+static ssize_t apds990x_lux_thresh_above_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct apds990x_chip *chip = dev_get_drvdata(dev);
+ return sprintf(buf, "%d\n", chip->lux_thres_hi);
+}
+
+static ssize_t apds990x_lux_thresh_below_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct apds990x_chip *chip = dev_get_drvdata(dev);
+ return sprintf(buf, "%d\n", chip->lux_thres_lo);
+}
+
+static ssize_t apds990x_set_lux_thresh(struct apds990x_chip *chip, u32 *target,
+ const char *buf)
+{
+ int ret = 0;
+ unsigned long thresh;
+
+ if (strict_strtoul(buf, 0, &thresh))
+ return -EINVAL;
+
+ if (thresh > APDS_RANGE)
+ return -EINVAL;
+
+ mutex_lock(&chip->mutex);
+ *target = thresh;
+ /*
+ * Don't update values in HW if we are still waiting for
+ * first interrupt to come after device handle open call.
+ */
+ if (!chip->lux_wait_fresh_res)
+ apds990x_refresh_athres(chip);
+ mutex_unlock(&chip->mutex);
+ return ret;
+
+}
+
+static ssize_t apds990x_lux_thresh_above_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t len)
+{
+ struct apds990x_chip *chip = dev_get_drvdata(dev);
+ int ret = apds990x_set_lux_thresh(chip, &chip->lux_thres_hi, buf);
+ if (ret < 0)
+ return ret;
+ return len;
+}
+
+static ssize_t apds990x_lux_thresh_below_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t len)
+{
+ struct apds990x_chip *chip = dev_get_drvdata(dev);
+ int ret = apds990x_set_lux_thresh(chip, &chip->lux_thres_lo, buf);
+ if (ret < 0)
+ return ret;
+ return len;
+}
+
+static DEVICE_ATTR(lux0_thresh_above_value, S_IRUGO | S_IWUSR,
+ apds990x_lux_thresh_above_show,
+ apds990x_lux_thresh_above_store);
+
+static DEVICE_ATTR(lux0_thresh_below_value, S_IRUGO | S_IWUSR,
+ apds990x_lux_thresh_below_show,
+ apds990x_lux_thresh_below_store);
+
+static ssize_t apds990x_prox_threshold_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct apds990x_chip *chip = dev_get_drvdata(dev);
+ return sprintf(buf, "%d\n", chip->prox_thres);
+}
+
+static ssize_t apds990x_prox_threshold_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t len)
+{
+ struct apds990x_chip *chip = dev_get_drvdata(dev);
+ unsigned long value;
+
+ if (strict_strtoul(buf, 0, &value))
+ return -EINVAL;
+
+ if ((value > APDS_RANGE) || (value == 0) ||
+ (value < APDS_PROX_HYSTERESIS))
+ return -EINVAL;
+
+ mutex_lock(&chip->mutex);
+ chip->prox_thres = value;
+
+ apds990x_force_p_refresh(chip);
+ mutex_unlock(&chip->mutex);
+ return len;
+}
+
+static DEVICE_ATTR(prox0_thresh_above_value, S_IRUGO | S_IWUSR,
+ apds990x_prox_threshold_show,
+ apds990x_prox_threshold_store);
+
+static ssize_t apds990x_power_state_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return sprintf(buf, "%d\n", !pm_runtime_suspended(dev));
+ return 0;
+}
+
+static ssize_t apds990x_power_state_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t len)
+{
+ struct apds990x_chip *chip = dev_get_drvdata(dev);
+ unsigned long value;
+
+ if (strict_strtoul(buf, 0, &value))
+ return -EINVAL;
+ if (value) {
+ pm_runtime_get_sync(dev);
+ mutex_lock(&chip->mutex);
+ chip->lux_wait_fresh_res = true;
+ apds990x_force_a_refresh(chip);
+ apds990x_force_p_refresh(chip);
+ mutex_unlock(&chip->mutex);
+ } else {
+ if (!pm_runtime_suspended(dev))
+ pm_runtime_put(dev);
+ }
+ return len;
+}
+
+static DEVICE_ATTR(power_state, S_IRUGO | S_IWUSR,
+ apds990x_power_state_show,
+ apds990x_power_state_store);
+
+static ssize_t apds990x_chip_id_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct apds990x_chip *chip = dev_get_drvdata(dev);
+ return sprintf(buf, "%s %d\n", chip->chipname, chip->revision);
+}
+
+static DEVICE_ATTR(chip_id, S_IRUGO, apds990x_chip_id_show, NULL);
+
+static struct attribute *sysfs_attrs_ctrl[] = {
+ &dev_attr_lux0_calibscale.attr,
+ &dev_attr_lux0_calibscale_default.attr,
+ &dev_attr_lux0_input.attr,
+ &dev_attr_lux0_sensor_range.attr,
+ &dev_attr_lux0_rate.attr,
+ &dev_attr_lux0_rate_avail.attr,
+ &dev_attr_lux0_thresh_above_value.attr,
+ &dev_attr_lux0_thresh_below_value.attr,
+ &dev_attr_prox0_raw_en.attr,
+ &dev_attr_prox0_raw.attr,
+ &dev_attr_prox0_sensor_range.attr,
+ &dev_attr_prox0_thresh_above_value.attr,
+ &dev_attr_prox0_reporting_mode.attr,
+ &dev_attr_prox0_reporting_mode_avail.attr,
+ &dev_attr_chip_id.attr,
+ &dev_attr_power_state.attr,
+ NULL
+};
+
+static struct attribute_group apds990x_attribute_group[] = {
+ {.attrs = sysfs_attrs_ctrl },
+};
+
+static int __devinit apds990x_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct apds990x_chip *chip;
+ int err;
+
+ chip = kzalloc(sizeof *chip, GFP_KERNEL);
+ if (!chip)
+ return -ENOMEM;
+
+ i2c_set_clientdata(client, chip);
+ chip->client = client;
+
+ init_waitqueue_head(&chip->wait);
+ mutex_init(&chip->mutex);
+ chip->pdata = client->dev.platform_data;
+
+ if (chip->pdata == NULL) {
+ dev_err(&client->dev, "platform data is mandatory\n");
+ err = -EINVAL;
+ goto fail1;
+ }
+
+ if (chip->pdata->cf.ga == 0) {
+ /* set uncovered sensor default parameters */
+ chip->cf.ga = 1966; /* 0.48 * APDS_PARAM_SCALE */
+ chip->cf.cf1 = 4096; /* 1.00 * APDS_PARAM_SCALE */
+ chip->cf.irf1 = 9134; /* 2.23 * APDS_PARAM_SCALE */
+ chip->cf.cf2 = 2867; /* 0.70 * APDS_PARAM_SCALE */
+ chip->cf.irf2 = 5816; /* 1.42 * APDS_PARAM_SCALE */
+ chip->cf.df = 52;
+ } else {
+ chip->cf = chip->pdata->cf;
+ }
+
+ /* precalculate inverse chip factors for threshold control */
+ chip->rcf.afactor =
+ (chip->cf.irf1 - chip->cf.irf2) * APDS_PARAM_SCALE /
+ (chip->cf.cf1 - chip->cf.cf2);
+ chip->rcf.cf1 = APDS_PARAM_SCALE * APDS_PARAM_SCALE /
+ chip->cf.cf1;
+ chip->rcf.irf1 = chip->cf.irf1 * APDS_PARAM_SCALE /
+ chip->cf.cf1;
+ chip->rcf.cf2 = APDS_PARAM_SCALE * APDS_PARAM_SCALE /
+ chip->cf.cf2;
+ chip->rcf.irf2 = chip->cf.irf2 * APDS_PARAM_SCALE /
+ chip->cf.cf2;
+
+ /* Set something to start with */
+ chip->lux_thres_hi = APDS_LUX_DEF_THRES_HI;
+ chip->lux_thres_lo = APDS_LUX_DEF_THRES_LO;
+ chip->lux_calib = APDS_LUX_NEUTRAL_CALIB_VALUE;
+
+ chip->prox_thres = APDS_PROX_DEF_THRES;
+ chip->pdrive = chip->pdata->pdrive;
+ chip->pdiode = APDS_PDIODE_IR;
+ chip->pgain = APDS_PGAIN_1X;
+ chip->prox_calib = APDS_PROX_NEUTRAL_CALIB_VALUE;
+ chip->prox_persistence = APDS_DEFAULT_PROX_PERS;
+ chip->prox_continuous_mode = false;
+
+ chip->regs[0].supply = reg_vcc;
+ chip->regs[1].supply = reg_vled;
+
+ err = regulator_bulk_get(&client->dev,
+ ARRAY_SIZE(chip->regs), chip->regs);
+ if (err < 0) {
+ dev_err(&client->dev, "Cannot get regulators\n");
+ goto fail1;
+ }
+
+ err = regulator_bulk_enable(ARRAY_SIZE(chip->regs), chip->regs);
+ if (err < 0) {
+ dev_err(&client->dev, "Cannot enable regulators\n");
+ goto fail2;
+ }
+
+ usleep_range(APDS_STARTUP_DELAY, 2 * APDS_STARTUP_DELAY);
+
+ err = apds990x_detect(chip);
+ if (err < 0) {
+ dev_err(&client->dev, "APDS990X not found\n");
+ goto fail3;
+ }
+
+ pm_runtime_set_active(&client->dev);
+
+ apds990x_configure(chip);
+ apds990x_set_arate(chip, APDS_LUX_DEFAULT_RATE);
+ apds990x_mode_on(chip);
+
+ pm_runtime_enable(&client->dev);
+
+ if (chip->pdata->setup_resources) {
+ err = chip->pdata->setup_resources();
+ if (err) {
+ err = -EINVAL;
+ goto fail3;
+ }
+ }
+
+ err = sysfs_create_group(&chip->client->dev.kobj,
+ apds990x_attribute_group);
+ if (err < 0) {
+ dev_err(&chip->client->dev, "Sysfs registration failed\n");
+ goto fail4;
+ }
+
+ err = request_threaded_irq(client->irq, NULL,
+ apds990x_irq,
+ IRQF_TRIGGER_FALLING | IRQF_TRIGGER_LOW |
+ IRQF_ONESHOT,
+ "apds990x", chip);
+ if (err) {
+ dev_err(&client->dev, "could not get IRQ %d\n",
+ client->irq);
+ goto fail5;
+ }
+ return err;
+fail5:
+ sysfs_remove_group(&chip->client->dev.kobj,
+ &apds990x_attribute_group[0]);
+fail4:
+ if (chip->pdata && chip->pdata->release_resources)
+ chip->pdata->release_resources();
+fail3:
+ regulator_bulk_disable(ARRAY_SIZE(chip->regs), chip->regs);
+fail2:
+ regulator_bulk_free(ARRAY_SIZE(chip->regs), chip->regs);
+fail1:
+ kfree(chip);
+ return err;
+}
+
+static int __devexit apds990x_remove(struct i2c_client *client)
+{
+ struct apds990x_chip *chip = i2c_get_clientdata(client);
+
+ free_irq(client->irq, chip);
+ sysfs_remove_group(&chip->client->dev.kobj,
+ apds990x_attribute_group);
+
+ if (chip->pdata && chip->pdata->release_resources)
+ chip->pdata->release_resources();
+
+ if (!pm_runtime_suspended(&client->dev))
+ apds990x_chip_off(chip);
+
+ pm_runtime_disable(&client->dev);
+ pm_runtime_set_suspended(&client->dev);
+
+ regulator_bulk_free(ARRAY_SIZE(chip->regs), chip->regs);
+
+ kfree(chip);
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int apds990x_suspend(struct device *dev)
+{
+ struct i2c_client *client = container_of(dev, struct i2c_client, dev);
+ struct apds990x_chip *chip = i2c_get_clientdata(client);
+
+ apds990x_chip_off(chip);
+ return 0;
+}
+
+static int apds990x_resume(struct device *dev)
+{
+ struct i2c_client *client = container_of(dev, struct i2c_client, dev);
+ struct apds990x_chip *chip = i2c_get_clientdata(client);
+
+ /*
+ * If we were enabled at suspend time, it is expected
+ * everything works nice and smoothly. Chip_on is enough
+ */
+ apds990x_chip_on(chip);
+
+ return 0;
+}
+#else
+#define apds990x_suspend NULL
+#define apds990x_resume NULL
+#define apds990x_shutdown NULL
+#endif
+
+#ifdef CONFIG_PM_RUNTIME
+static int apds990x_runtime_suspend(struct device *dev)
+{
+ struct i2c_client *client = container_of(dev, struct i2c_client, dev);
+ struct apds990x_chip *chip = i2c_get_clientdata(client);
+
+ apds990x_chip_off(chip);
+ return 0;
+}
+
+static int apds990x_runtime_resume(struct device *dev)
+{
+ struct i2c_client *client = container_of(dev, struct i2c_client, dev);
+ struct apds990x_chip *chip = i2c_get_clientdata(client);
+
+ apds990x_chip_on(chip);
+ return 0;
+}
+
+#endif
+
+static const struct i2c_device_id apds990x_id[] = {
+ {"apds990x", 0 },
+ {}
+};
+
+MODULE_DEVICE_TABLE(i2c, apds990x_id);
+
+static const struct dev_pm_ops apds990x_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(apds990x_suspend, apds990x_resume)
+ SET_RUNTIME_PM_OPS(apds990x_runtime_suspend,
+ apds990x_runtime_resume,
+ NULL)
+};
+
+static struct i2c_driver apds990x_driver = {
+ .driver = {
+ .name = "apds990x",
+ .owner = THIS_MODULE,
+ .pm = &apds990x_pm_ops,
+ },
+ .probe = apds990x_probe,
+ .remove = __devexit_p(apds990x_remove),
+ .id_table = apds990x_id,
+};
+
+static int __init apds990x_init(void)
+{
+ return i2c_add_driver(&apds990x_driver);
+}
+
+static void __exit apds990x_exit(void)
+{
+ i2c_del_driver(&apds990x_driver);
+}
+
+MODULE_DESCRIPTION("APDS990X combined ALS and proximity sensor");
+MODULE_AUTHOR("Samu Onkalo, Nokia Corporation");
+MODULE_LICENSE("GPL v2");
+
+module_init(apds990x_init);
+module_exit(apds990x_exit);
diff --git a/drivers/misc/bh1770glc.c b/drivers/misc/bh1770glc.c
new file mode 100644
index 000000000000..d79a972f2c79
--- /dev/null
+++ b/drivers/misc/bh1770glc.c
@@ -0,0 +1,1417 @@
+/*
+ * This file is part of the ROHM BH1770GLC / OSRAM SFH7770 sensor driver.
+ * Chip is combined proximity and ambient light sensor.
+ *
+ * Copyright (C) 2010 Nokia Corporation and/or its subsidiary(-ies).
+ *
+ * Contact: Samu Onkalo <samu.p.onkalo@nokia.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/i2c.h>
+#include <linux/interrupt.h>
+#include <linux/mutex.h>
+#include <linux/i2c/bh1770glc.h>
+#include <linux/regulator/consumer.h>
+#include <linux/pm_runtime.h>
+#include <linux/workqueue.h>
+#include <linux/delay.h>
+#include <linux/wait.h>
+#include <linux/slab.h>
+
+#define BH1770_ALS_CONTROL 0x80 /* ALS operation mode control */
+#define BH1770_PS_CONTROL 0x81 /* PS operation mode control */
+#define BH1770_I_LED 0x82 /* active LED and LED1, LED2 current */
+#define BH1770_I_LED3 0x83 /* LED3 current setting */
+#define BH1770_ALS_PS_MEAS 0x84 /* Forced mode trigger */
+#define BH1770_PS_MEAS_RATE 0x85 /* PS meas. rate at stand alone mode */
+#define BH1770_ALS_MEAS_RATE 0x86 /* ALS meas. rate at stand alone mode */
+#define BH1770_PART_ID 0x8a /* Part number and revision ID */
+#define BH1770_MANUFACT_ID 0x8b /* Manufacturerer ID */
+#define BH1770_ALS_DATA_0 0x8c /* ALS DATA low byte */
+#define BH1770_ALS_DATA_1 0x8d /* ALS DATA high byte */
+#define BH1770_ALS_PS_STATUS 0x8e /* Measurement data and int status */
+#define BH1770_PS_DATA_LED1 0x8f /* PS data from LED1 */
+#define BH1770_PS_DATA_LED2 0x90 /* PS data from LED2 */
+#define BH1770_PS_DATA_LED3 0x91 /* PS data from LED3 */
+#define BH1770_INTERRUPT 0x92 /* Interrupt setting */
+#define BH1770_PS_TH_LED1 0x93 /* PS interrupt threshold for LED1 */
+#define BH1770_PS_TH_LED2 0x94 /* PS interrupt threshold for LED2 */
+#define BH1770_PS_TH_LED3 0x95 /* PS interrupt threshold for LED3 */
+#define BH1770_ALS_TH_UP_0 0x96 /* ALS upper threshold low byte */
+#define BH1770_ALS_TH_UP_1 0x97 /* ALS upper threshold high byte */
+#define BH1770_ALS_TH_LOW_0 0x98 /* ALS lower threshold low byte */
+#define BH1770_ALS_TH_LOW_1 0x99 /* ALS lower threshold high byte */
+
+/* MANUFACT_ID */
+#define BH1770_MANUFACT_ROHM 0x01
+#define BH1770_MANUFACT_OSRAM 0x03
+
+/* PART_ID */
+#define BH1770_PART 0x90
+#define BH1770_PART_MASK 0xf0
+#define BH1770_REV_MASK 0x0f
+#define BH1770_REV_SHIFT 0
+#define BH1770_REV_0 0x00
+#define BH1770_REV_1 0x01
+
+/* Operating modes for both */
+#define BH1770_STANDBY 0x00
+#define BH1770_FORCED 0x02
+#define BH1770_STANDALONE 0x03
+#define BH1770_SWRESET (0x01 << 2)
+
+#define BH1770_PS_TRIG_MEAS (1 << 0)
+#define BH1770_ALS_TRIG_MEAS (1 << 1)
+
+/* Interrupt control */
+#define BH1770_INT_OUTPUT_MODE (1 << 3) /* 0 = latched */
+#define BH1770_INT_POLARITY (1 << 2) /* 1 = active high */
+#define BH1770_INT_ALS_ENA (1 << 1)
+#define BH1770_INT_PS_ENA (1 << 0)
+
+/* Interrupt status */
+#define BH1770_INT_LED1_DATA (1 << 0)
+#define BH1770_INT_LED1_INT (1 << 1)
+#define BH1770_INT_LED2_DATA (1 << 2)
+#define BH1770_INT_LED2_INT (1 << 3)
+#define BH1770_INT_LED3_DATA (1 << 4)
+#define BH1770_INT_LED3_INT (1 << 5)
+#define BH1770_INT_LEDS_INT ((1 << 1) | (1 << 3) | (1 << 5))
+#define BH1770_INT_ALS_DATA (1 << 6)
+#define BH1770_INT_ALS_INT (1 << 7)
+
+/* Led channels */
+#define BH1770_LED1 0x00
+
+#define BH1770_DISABLE 0
+#define BH1770_ENABLE 1
+#define BH1770_PROX_CHANNELS 1
+
+#define BH1770_LUX_DEFAULT_RATE 1 /* Index to lux rate table */
+#define BH1770_PROX_DEFAULT_RATE 1 /* Direct HW value =~ 50Hz */
+#define BH1770_PROX_DEF_RATE_THRESH 6 /* Direct HW value =~ 5 Hz */
+#define BH1770_STARTUP_DELAY 50
+#define BH1770_RESET_TIME 10
+#define BH1770_TIMEOUT 2100 /* Timeout in 2.1 seconds */
+
+#define BH1770_LUX_RANGE 65535
+#define BH1770_PROX_RANGE 255
+#define BH1770_COEF_SCALER 1024
+#define BH1770_CALIB_SCALER 8192
+#define BH1770_LUX_NEUTRAL_CALIB_VALUE (1 * BH1770_CALIB_SCALER)
+#define BH1770_LUX_DEF_THRES 1000
+#define BH1770_PROX_DEF_THRES 70
+#define BH1770_PROX_DEF_ABS_THRES 100
+#define BH1770_DEFAULT_PERSISTENCE 10
+#define BH1770_PROX_MAX_PERSISTENCE 50
+#define BH1770_LUX_GA_SCALE 16384
+#define BH1770_LUX_CF_SCALE 2048 /* CF ChipFactor */
+#define BH1770_NEUTRAL_CF BH1770_LUX_CF_SCALE
+#define BH1770_LUX_CORR_SCALE 4096
+
+#define PROX_ABOVE_THRESHOLD 1
+#define PROX_BELOW_THRESHOLD 0
+
+#define PROX_IGNORE_LUX_LIMIT 500
+
+struct bh1770_chip {
+ struct bh1770_platform_data *pdata;
+ char chipname[10];
+ u8 revision;
+ struct i2c_client *client;
+ struct regulator_bulk_data regs[2];
+ struct mutex mutex; /* avoid parallel access */
+ wait_queue_head_t wait;
+
+ bool int_mode_prox;
+ bool int_mode_lux;
+ struct delayed_work prox_work;
+ u32 lux_cf; /* Chip specific factor */
+ u32 lux_ga;
+ u32 lux_calib;
+ int lux_rate_index;
+ u32 lux_corr;
+ u16 lux_data_raw;
+ u16 lux_threshold_hi;
+ u16 lux_threshold_lo;
+ u16 lux_thres_hi_onchip;
+ u16 lux_thres_lo_onchip;
+ bool lux_wait_result;
+
+ int prox_enable_count;
+ u16 prox_coef;
+ u16 prox_const;
+ int prox_rate;
+ int prox_rate_threshold;
+ u8 prox_persistence;
+ u8 prox_persistence_counter;
+ u8 prox_data;
+ u8 prox_threshold;
+ u8 prox_threshold_hw;
+ bool prox_force_update;
+ u8 prox_abs_thres;
+ u8 prox_led;
+};
+
+static const char reg_vcc[] = "Vcc";
+static const char reg_vleds[] = "Vleds";
+
+/*
+ * Supported stand alone rates in ms from chip data sheet
+ * {10, 20, 30, 40, 70, 100, 200, 500, 1000, 2000};
+ */
+static const s16 prox_rates_hz[] = {100, 50, 33, 25, 14, 10, 5, 2};
+static const s16 prox_rates_ms[] = {10, 20, 30, 40, 70, 100, 200, 500};
+
+/* Supported IR-led currents in mA */
+static const u8 prox_curr_ma[] = {5, 10, 20, 50, 100, 150, 200};
+
+/*
+ * Supported stand alone rates in ms from chip data sheet
+ * {100, 200, 500, 1000, 2000};
+ */
+static const s16 lux_rates_hz[] = {10, 5, 2, 1, 0};
+
+/*
+ * interrupt control functions are called while keeping chip->mutex
+ * excluding module probe / remove
+ */
+static inline int bh1770_lux_interrupt_control(struct bh1770_chip *chip,
+ int lux)
+{
+ chip->int_mode_lux = lux;
+ /* Set interrupt modes, interrupt active low, latched */
+ return i2c_smbus_write_byte_data(chip->client,
+ BH1770_INTERRUPT,
+ (lux << 1) | chip->int_mode_prox);
+}
+
+static inline int bh1770_prox_interrupt_control(struct bh1770_chip *chip,
+ int ps)
+{
+ chip->int_mode_prox = ps;
+ return i2c_smbus_write_byte_data(chip->client,
+ BH1770_INTERRUPT,
+ (chip->int_mode_lux << 1) | (ps << 0));
+}
+
+/* chip->mutex is always kept here */
+static int bh1770_lux_rate(struct bh1770_chip *chip, int rate_index)
+{
+ /* sysfs may call this when the chip is powered off */
+ if (pm_runtime_suspended(&chip->client->dev))
+ return 0;
+
+ /* Proper proximity response needs fastest lux rate (100ms) */
+ if (chip->prox_enable_count)
+ rate_index = 0;
+
+ return i2c_smbus_write_byte_data(chip->client,
+ BH1770_ALS_MEAS_RATE,
+ rate_index);
+}
+
+static int bh1770_prox_rate(struct bh1770_chip *chip, int mode)
+{
+ int rate;
+
+ rate = (mode == PROX_ABOVE_THRESHOLD) ?
+ chip->prox_rate_threshold : chip->prox_rate;
+
+ return i2c_smbus_write_byte_data(chip->client,
+ BH1770_PS_MEAS_RATE,
+ rate);
+}
+
+/* InfraredLED is controlled by the chip during proximity scanning */
+static inline int bh1770_led_cfg(struct bh1770_chip *chip)
+{
+ /* LED cfg, current for leds 1 and 2 */
+ return i2c_smbus_write_byte_data(chip->client,
+ BH1770_I_LED,
+ (BH1770_LED1 << 6) |
+ (BH1770_LED_5mA << 3) |
+ chip->prox_led);
+}
+
+/*
+ * Following two functions converts raw ps values from HW to normalized
+ * values. Purpose is to compensate differences between different sensor
+ * versions and variants so that result means about the same between
+ * versions.
+ */
+static inline u8 bh1770_psraw_to_adjusted(struct bh1770_chip *chip, u8 psraw)
+{
+ u16 adjusted;
+ adjusted = (u16)(((u32)(psraw + chip->prox_const) * chip->prox_coef) /
+ BH1770_COEF_SCALER);
+ if (adjusted > BH1770_PROX_RANGE)
+ adjusted = BH1770_PROX_RANGE;
+ return adjusted;
+}
+
+static inline u8 bh1770_psadjusted_to_raw(struct bh1770_chip *chip, u8 ps)
+{
+ u16 raw;
+
+ raw = (((u32)ps * BH1770_COEF_SCALER) / chip->prox_coef);
+ if (raw > chip->prox_const)
+ raw = raw - chip->prox_const;
+ else
+ raw = 0;
+ return raw;
+}
+
+/*
+ * Following two functions converts raw lux values from HW to normalized
+ * values. Purpose is to compensate differences between different sensor
+ * versions and variants so that result means about the same between
+ * versions. Chip->mutex is kept when this is called.
+ */
+static int bh1770_prox_set_threshold(struct bh1770_chip *chip)
+{
+ u8 tmp = 0;
+
+ /* sysfs may call this when the chip is powered off */
+ if (pm_runtime_suspended(&chip->client->dev))
+ return 0;
+
+ tmp = bh1770_psadjusted_to_raw(chip, chip->prox_threshold);
+ chip->prox_threshold_hw = tmp;
+
+ return i2c_smbus_write_byte_data(chip->client, BH1770_PS_TH_LED1,
+ tmp);
+}
+
+static inline u16 bh1770_lux_raw_to_adjusted(struct bh1770_chip *chip, u16 raw)
+{
+ u32 lux;
+ lux = ((u32)raw * chip->lux_corr) / BH1770_LUX_CORR_SCALE;
+ return min(lux, (u32)BH1770_LUX_RANGE);
+}
+
+static inline u16 bh1770_lux_adjusted_to_raw(struct bh1770_chip *chip,
+ u16 adjusted)
+{
+ return (u32)adjusted * BH1770_LUX_CORR_SCALE / chip->lux_corr;
+}
+
+/* chip->mutex is kept when this is called */
+static int bh1770_lux_update_thresholds(struct bh1770_chip *chip,
+ u16 threshold_hi, u16 threshold_lo)
+{
+ u8 data[4];
+ int ret;
+
+ /* sysfs may call this when the chip is powered off */
+ if (pm_runtime_suspended(&chip->client->dev))
+ return 0;
+
+ /*
+ * Compensate threshold values with the correction factors if not
+ * set to minimum or maximum.
+ * Min & max values disables interrupts.
+ */
+ if (threshold_hi != BH1770_LUX_RANGE && threshold_hi != 0)
+ threshold_hi = bh1770_lux_adjusted_to_raw(chip, threshold_hi);
+
+ if (threshold_lo != BH1770_LUX_RANGE && threshold_lo != 0)
+ threshold_lo = bh1770_lux_adjusted_to_raw(chip, threshold_lo);
+
+ if (chip->lux_thres_hi_onchip == threshold_hi &&
+ chip->lux_thres_lo_onchip == threshold_lo)
+ return 0;
+
+ chip->lux_thres_hi_onchip = threshold_hi;
+ chip->lux_thres_lo_onchip = threshold_lo;
+
+ data[0] = threshold_hi;
+ data[1] = threshold_hi >> 8;
+ data[2] = threshold_lo;
+ data[3] = threshold_lo >> 8;
+
+ ret = i2c_smbus_write_i2c_block_data(chip->client,
+ BH1770_ALS_TH_UP_0,
+ ARRAY_SIZE(data),
+ data);
+ return ret;
+}
+
+static int bh1770_lux_get_result(struct bh1770_chip *chip)
+{
+ u16 data;
+ int ret;
+
+ ret = i2c_smbus_read_byte_data(chip->client, BH1770_ALS_DATA_0);
+ if (ret < 0)
+ return ret;
+
+ data = ret & 0xff;
+ ret = i2c_smbus_read_byte_data(chip->client, BH1770_ALS_DATA_1);
+ if (ret < 0)
+ return ret;
+
+ chip->lux_data_raw = data | ((ret & 0xff) << 8);
+
+ return 0;
+}
+
+/* Calculate correction value which contains chip and device specific parts */
+static u32 bh1770_get_corr_value(struct bh1770_chip *chip)
+{
+ u32 tmp;
+ /* Impact of glass attenuation correction */
+ tmp = (BH1770_LUX_CORR_SCALE * chip->lux_ga) / BH1770_LUX_GA_SCALE;
+ /* Impact of chip factor correction */
+ tmp = (tmp * chip->lux_cf) / BH1770_LUX_CF_SCALE;
+ /* Impact of Device specific calibration correction */
+ tmp = (tmp * chip->lux_calib) / BH1770_CALIB_SCALER;
+ return tmp;
+}
+
+static int bh1770_lux_read_result(struct bh1770_chip *chip)
+{
+ bh1770_lux_get_result(chip);
+ return bh1770_lux_raw_to_adjusted(chip, chip->lux_data_raw);
+}
+
+/*
+ * Chip on / off functions are called while keeping mutex except probe
+ * or remove phase
+ */
+static int bh1770_chip_on(struct bh1770_chip *chip)
+{
+ int ret = regulator_bulk_enable(ARRAY_SIZE(chip->regs),
+ chip->regs);
+ if (ret < 0)
+ return ret;
+
+ usleep_range(BH1770_STARTUP_DELAY, BH1770_STARTUP_DELAY * 2);
+
+ /* Reset the chip */
+ i2c_smbus_write_byte_data(chip->client, BH1770_ALS_CONTROL,
+ BH1770_SWRESET);
+ usleep_range(BH1770_RESET_TIME, BH1770_RESET_TIME * 2);
+
+ /*
+ * ALS is started always since proximity needs als results
+ * for realibility estimation.
+ * Let's assume dark until the first ALS measurement is ready.
+ */
+ chip->lux_data_raw = 0;
+ chip->prox_data = 0;
+ ret = i2c_smbus_write_byte_data(chip->client,
+ BH1770_ALS_CONTROL, BH1770_STANDALONE);
+
+ /* Assume reset defaults */
+ chip->lux_thres_hi_onchip = BH1770_LUX_RANGE;
+ chip->lux_thres_lo_onchip = 0;
+
+ return ret;
+}
+
+static void bh1770_chip_off(struct bh1770_chip *chip)
+{
+ i2c_smbus_write_byte_data(chip->client,
+ BH1770_INTERRUPT, BH1770_DISABLE);
+ i2c_smbus_write_byte_data(chip->client,
+ BH1770_ALS_CONTROL, BH1770_STANDBY);
+ i2c_smbus_write_byte_data(chip->client,
+ BH1770_PS_CONTROL, BH1770_STANDBY);
+ regulator_bulk_disable(ARRAY_SIZE(chip->regs), chip->regs);
+}
+
+/* chip->mutex is kept when this is called */
+static int bh1770_prox_mode_control(struct bh1770_chip *chip)
+{
+ if (chip->prox_enable_count) {
+ chip->prox_force_update = true; /* Force immediate update */
+
+ bh1770_lux_rate(chip, chip->lux_rate_index);
+ bh1770_prox_set_threshold(chip);
+ bh1770_led_cfg(chip);
+ bh1770_prox_rate(chip, PROX_BELOW_THRESHOLD);
+ bh1770_prox_interrupt_control(chip, BH1770_ENABLE);
+ i2c_smbus_write_byte_data(chip->client,
+ BH1770_PS_CONTROL, BH1770_STANDALONE);
+ } else {
+ chip->prox_data = 0;
+ bh1770_lux_rate(chip, chip->lux_rate_index);
+ bh1770_prox_interrupt_control(chip, BH1770_DISABLE);
+ i2c_smbus_write_byte_data(chip->client,
+ BH1770_PS_CONTROL, BH1770_STANDBY);
+ }
+ return 0;
+}
+
+/* chip->mutex is kept when this is called */
+static int bh1770_prox_read_result(struct bh1770_chip *chip)
+{
+ int ret;
+ bool above;
+ u8 mode;
+
+ ret = i2c_smbus_read_byte_data(chip->client, BH1770_PS_DATA_LED1);
+ if (ret < 0)
+ goto out;
+
+ if (ret > chip->prox_threshold_hw)
+ above = true;
+ else
+ above = false;
+
+ /*
+ * when ALS levels goes above limit, proximity result may be
+ * false proximity. Thus ignore the result. With real proximity
+ * there is a shadow causing low als levels.
+ */
+ if (chip->lux_data_raw > PROX_IGNORE_LUX_LIMIT)
+ ret = 0;
+
+ chip->prox_data = bh1770_psraw_to_adjusted(chip, ret);
+
+ /* Strong proximity level or force mode requires immediate response */
+ if (chip->prox_data >= chip->prox_abs_thres ||
+ chip->prox_force_update)
+ chip->prox_persistence_counter = chip->prox_persistence;
+
+ chip->prox_force_update = false;
+
+ /* Persistence filttering to reduce false proximity events */
+ if (likely(above)) {
+ if (chip->prox_persistence_counter < chip->prox_persistence) {
+ chip->prox_persistence_counter++;
+ ret = -ENODATA;
+ } else {
+ mode = PROX_ABOVE_THRESHOLD;
+ ret = 0;
+ }
+ } else {
+ chip->prox_persistence_counter = 0;
+ mode = PROX_BELOW_THRESHOLD;
+ chip->prox_data = 0;
+ ret = 0;
+ }
+
+ /* Set proximity detection rate based on above or below value */
+ if (ret == 0) {
+ bh1770_prox_rate(chip, mode);
+ sysfs_notify(&chip->client->dev.kobj, NULL, "prox0_raw");
+ }
+out:
+ return ret;
+}
+
+static int bh1770_detect(struct bh1770_chip *chip)
+{
+ struct i2c_client *client = chip->client;
+ s32 ret;
+ u8 manu, part;
+
+ ret = i2c_smbus_read_byte_data(client, BH1770_MANUFACT_ID);
+ if (ret < 0)
+ goto error;
+ manu = (u8)ret;
+
+ ret = i2c_smbus_read_byte_data(client, BH1770_PART_ID);
+ if (ret < 0)
+ goto error;
+ part = (u8)ret;
+
+ chip->revision = (part & BH1770_REV_MASK) >> BH1770_REV_SHIFT;
+ chip->prox_coef = BH1770_COEF_SCALER;
+ chip->prox_const = 0;
+ chip->lux_cf = BH1770_NEUTRAL_CF;
+
+ if ((manu == BH1770_MANUFACT_ROHM) &&
+ ((part & BH1770_PART_MASK) == BH1770_PART)) {
+ snprintf(chip->chipname, sizeof(chip->chipname), "BH1770GLC");
+ return 0;
+ }
+
+ if ((manu == BH1770_MANUFACT_OSRAM) &&
+ ((part & BH1770_PART_MASK) == BH1770_PART)) {
+ snprintf(chip->chipname, sizeof(chip->chipname), "SFH7770");
+ /* Values selected by comparing different versions */
+ chip->prox_coef = 819; /* 0.8 * BH1770_COEF_SCALER */
+ chip->prox_const = 40;
+ return 0;
+ }
+
+ ret = -ENODEV;
+error:
+ dev_dbg(&client->dev, "BH1770 or SFH7770 not found\n");
+
+ return ret;
+}
+
+/*
+ * This work is re-scheduled at every proximity interrupt.
+ * If this work is running, it means that there hasn't been any
+ * proximity interrupt in time. Situation is handled as no-proximity.
+ * It would be nice to have low-threshold interrupt or interrupt
+ * when measurement and hi-threshold are both 0. But neither of those exists.
+ * This is a workaroud for missing HW feature.
+ */
+
+static void bh1770_prox_work(struct work_struct *work)
+{
+ struct bh1770_chip *chip =
+ container_of(work, struct bh1770_chip, prox_work.work);
+
+ mutex_lock(&chip->mutex);
+ bh1770_prox_read_result(chip);
+ mutex_unlock(&chip->mutex);
+}
+
+/* This is threaded irq handler */
+static irqreturn_t bh1770_irq(int irq, void *data)
+{
+ struct bh1770_chip *chip = data;
+ int status;
+ int rate = 0;
+
+ mutex_lock(&chip->mutex);
+ status = i2c_smbus_read_byte_data(chip->client, BH1770_ALS_PS_STATUS);
+
+ /* Acknowledge interrupt by reading this register */
+ i2c_smbus_read_byte_data(chip->client, BH1770_INTERRUPT);
+
+ /*
+ * Check if there is fresh data available for als.
+ * If this is the very first data, update thresholds after that.
+ */
+ if (status & BH1770_INT_ALS_DATA) {
+ bh1770_lux_get_result(chip);
+ if (unlikely(chip->lux_wait_result)) {
+ chip->lux_wait_result = false;
+ wake_up(&chip->wait);
+ bh1770_lux_update_thresholds(chip,
+ chip->lux_threshold_hi,
+ chip->lux_threshold_lo);
+ }
+ }
+
+ /* Disable interrupt logic to guarantee acknowledgement */
+ i2c_smbus_write_byte_data(chip->client, BH1770_INTERRUPT,
+ (0 << 1) | (0 << 0));
+
+ if ((status & BH1770_INT_ALS_INT))
+ sysfs_notify(&chip->client->dev.kobj, NULL, "lux0_input");
+
+ if (chip->int_mode_prox && (status & BH1770_INT_LEDS_INT)) {
+ rate = prox_rates_ms[chip->prox_rate_threshold];
+ bh1770_prox_read_result(chip);
+ }
+
+ /* Re-enable interrupt logic */
+ i2c_smbus_write_byte_data(chip->client, BH1770_INTERRUPT,
+ (chip->int_mode_lux << 1) |
+ (chip->int_mode_prox << 0));
+ mutex_unlock(&chip->mutex);
+
+ /*
+ * Can't cancel work while keeping mutex since the work uses the
+ * same mutex.
+ */
+ if (rate) {
+ /*
+ * Simulate missing no-proximity interrupt 50ms after the
+ * next expected interrupt time.
+ */
+ cancel_delayed_work_sync(&chip->prox_work);
+ schedule_delayed_work(&chip->prox_work,
+ msecs_to_jiffies(rate + 50));
+ }
+ return IRQ_HANDLED;
+}
+
+static ssize_t bh1770_power_state_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct bh1770_chip *chip = dev_get_drvdata(dev);
+ unsigned long value;
+ ssize_t ret;
+
+ if (strict_strtoul(buf, 0, &value))
+ return -EINVAL;
+
+ mutex_lock(&chip->mutex);
+ if (value) {
+ pm_runtime_get_sync(dev);
+
+ ret = bh1770_lux_rate(chip, chip->lux_rate_index);
+ if (ret < 0) {
+ pm_runtime_put(dev);
+ goto leave;
+ }
+
+ ret = bh1770_lux_interrupt_control(chip, BH1770_ENABLE);
+ if (ret < 0) {
+ pm_runtime_put(dev);
+ goto leave;
+ }
+
+ /* This causes interrupt after the next measurement cycle */
+ bh1770_lux_update_thresholds(chip, BH1770_LUX_DEF_THRES,
+ BH1770_LUX_DEF_THRES);
+ /* Inform that we are waiting for a result from ALS */
+ chip->lux_wait_result = true;
+ bh1770_prox_mode_control(chip);
+ } else if (!pm_runtime_suspended(dev)) {
+ pm_runtime_put(dev);
+ }
+ ret = count;
+leave:
+ mutex_unlock(&chip->mutex);
+ return ret;
+}
+
+static ssize_t bh1770_power_state_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return sprintf(buf, "%d\n", !pm_runtime_suspended(dev));
+}
+
+static ssize_t bh1770_lux_result_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct bh1770_chip *chip = dev_get_drvdata(dev);
+ ssize_t ret;
+ long timeout;
+
+ if (pm_runtime_suspended(dev))
+ return -EIO; /* Chip is not enabled at all */
+
+ timeout = wait_event_interruptible_timeout(chip->wait,
+ !chip->lux_wait_result,
+ msecs_to_jiffies(BH1770_TIMEOUT));
+ if (!timeout)
+ return -EIO;
+
+ mutex_lock(&chip->mutex);
+ ret = sprintf(buf, "%d\n", bh1770_lux_read_result(chip));
+ mutex_unlock(&chip->mutex);
+
+ return ret;
+}
+
+static ssize_t bh1770_lux_range_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return sprintf(buf, "%d\n", BH1770_LUX_RANGE);
+}
+
+static ssize_t bh1770_prox_enable_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct bh1770_chip *chip = dev_get_drvdata(dev);
+ unsigned long value;
+
+ if (strict_strtoul(buf, 0, &value))
+ return -EINVAL;
+
+ mutex_lock(&chip->mutex);
+ /* Assume no proximity. Sensor will tell real state soon */
+ if (!chip->prox_enable_count)
+ chip->prox_data = 0;
+
+ if (value)
+ chip->prox_enable_count++;
+ else if (chip->prox_enable_count > 0)
+ chip->prox_enable_count--;
+ else
+ goto leave;
+
+ /* Run control only when chip is powered on */
+ if (!pm_runtime_suspended(dev))
+ bh1770_prox_mode_control(chip);
+leave:
+ mutex_unlock(&chip->mutex);
+ return count;
+}
+
+static ssize_t bh1770_prox_enable_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct bh1770_chip *chip = dev_get_drvdata(dev);
+ ssize_t len;
+
+ mutex_lock(&chip->mutex);
+ len = sprintf(buf, "%d\n", chip->prox_enable_count);
+ mutex_unlock(&chip->mutex);
+ return len;
+}
+
+static ssize_t bh1770_prox_result_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct bh1770_chip *chip = dev_get_drvdata(dev);
+ ssize_t ret;
+
+ mutex_lock(&chip->mutex);
+ if (chip->prox_enable_count && !pm_runtime_suspended(dev))
+ ret = sprintf(buf, "%d\n", chip->prox_data);
+ else
+ ret = -EIO;
+ mutex_unlock(&chip->mutex);
+ return ret;
+}
+
+static ssize_t bh1770_prox_range_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return sprintf(buf, "%d\n", BH1770_PROX_RANGE);
+}
+
+static ssize_t bh1770_get_prox_rate_avail(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ int i;
+ int pos = 0;
+ for (i = 0; i < ARRAY_SIZE(prox_rates_hz); i++)
+ pos += sprintf(buf + pos, "%d ", prox_rates_hz[i]);
+ sprintf(buf + pos - 1, "\n");
+ return pos;
+}
+
+static ssize_t bh1770_get_prox_rate_above(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct bh1770_chip *chip = dev_get_drvdata(dev);
+ return sprintf(buf, "%d\n", prox_rates_hz[chip->prox_rate_threshold]);
+}
+
+static ssize_t bh1770_get_prox_rate_below(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct bh1770_chip *chip = dev_get_drvdata(dev);
+ return sprintf(buf, "%d\n", prox_rates_hz[chip->prox_rate]);
+}
+
+static int bh1770_prox_rate_validate(int rate)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(prox_rates_hz) - 1; i++)
+ if (rate >= prox_rates_hz[i])
+ break;
+ return i;
+}
+
+static ssize_t bh1770_set_prox_rate_above(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct bh1770_chip *chip = dev_get_drvdata(dev);
+ unsigned long value;
+
+ if (strict_strtoul(buf, 0, &value))
+ return -EINVAL;
+
+ mutex_lock(&chip->mutex);
+ chip->prox_rate_threshold = bh1770_prox_rate_validate(value);
+ mutex_unlock(&chip->mutex);
+ return count;
+}
+
+static ssize_t bh1770_set_prox_rate_below(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct bh1770_chip *chip = dev_get_drvdata(dev);
+ unsigned long value;
+
+ if (strict_strtoul(buf, 0, &value))
+ return -EINVAL;
+
+ mutex_lock(&chip->mutex);
+ chip->prox_rate = bh1770_prox_rate_validate(value);
+ mutex_unlock(&chip->mutex);
+ return count;
+}
+
+static ssize_t bh1770_get_prox_thres(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct bh1770_chip *chip = dev_get_drvdata(dev);
+ return sprintf(buf, "%d\n", chip->prox_threshold);
+}
+
+static ssize_t bh1770_set_prox_thres(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct bh1770_chip *chip = dev_get_drvdata(dev);
+ unsigned long value;
+ int ret;
+
+ if (strict_strtoul(buf, 0, &value))
+ return -EINVAL;
+ if (value > BH1770_PROX_RANGE)
+ return -EINVAL;
+
+ mutex_lock(&chip->mutex);
+ chip->prox_threshold = value;
+ ret = bh1770_prox_set_threshold(chip);
+ mutex_unlock(&chip->mutex);
+ if (ret < 0)
+ return ret;
+ return count;
+}
+
+static ssize_t bh1770_prox_persistence_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct bh1770_chip *chip = dev_get_drvdata(dev);
+
+ return sprintf(buf, "%u\n", chip->prox_persistence);
+}
+
+static ssize_t bh1770_prox_persistence_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t len)
+{
+ struct bh1770_chip *chip = dev_get_drvdata(dev);
+ unsigned long value;
+
+ if (strict_strtoul(buf, 0, &value))
+ return -EINVAL;
+
+ if (value > BH1770_PROX_MAX_PERSISTENCE)
+ return -EINVAL;
+
+ chip->prox_persistence = value;
+
+ return len;
+}
+
+static ssize_t bh1770_prox_abs_thres_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct bh1770_chip *chip = dev_get_drvdata(dev);
+ return sprintf(buf, "%u\n", chip->prox_abs_thres);
+}
+
+static ssize_t bh1770_prox_abs_thres_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t len)
+{
+ struct bh1770_chip *chip = dev_get_drvdata(dev);
+ unsigned long value;
+
+ if (strict_strtoul(buf, 0, &value))
+ return -EINVAL;
+
+ if (value > BH1770_PROX_RANGE)
+ return -EINVAL;
+
+ chip->prox_abs_thres = value;
+
+ return len;
+}
+
+static ssize_t bh1770_chip_id_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct bh1770_chip *chip = dev_get_drvdata(dev);
+ return sprintf(buf, "%s rev %d\n", chip->chipname, chip->revision);
+}
+
+static ssize_t bh1770_lux_calib_default_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return sprintf(buf, "%u\n", BH1770_CALIB_SCALER);
+}
+
+static ssize_t bh1770_lux_calib_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct bh1770_chip *chip = dev_get_drvdata(dev);
+ ssize_t len;
+
+ mutex_lock(&chip->mutex);
+ len = sprintf(buf, "%u\n", chip->lux_calib);
+ mutex_unlock(&chip->mutex);
+ return len;
+}
+
+static ssize_t bh1770_lux_calib_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t len)
+{
+ struct bh1770_chip *chip = dev_get_drvdata(dev);
+ unsigned long value;
+ u32 old_calib;
+ u32 new_corr;
+
+ if (strict_strtoul(buf, 0, &value))
+ return -EINVAL;
+
+ mutex_lock(&chip->mutex);
+ old_calib = chip->lux_calib;
+ chip->lux_calib = value;
+ new_corr = bh1770_get_corr_value(chip);
+ if (new_corr == 0) {
+ chip->lux_calib = old_calib;
+ mutex_unlock(&chip->mutex);
+ return -EINVAL;
+ }
+ chip->lux_corr = new_corr;
+ /* Refresh thresholds on HW after changing correction value */
+ bh1770_lux_update_thresholds(chip, chip->lux_threshold_hi,
+ chip->lux_threshold_lo);
+
+ mutex_unlock(&chip->mutex);
+
+ return len;
+}
+
+static ssize_t bh1770_get_lux_rate_avail(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ int i;
+ int pos = 0;
+ for (i = 0; i < ARRAY_SIZE(lux_rates_hz); i++)
+ pos += sprintf(buf + pos, "%d ", lux_rates_hz[i]);
+ sprintf(buf + pos - 1, "\n");
+ return pos;
+}
+
+static ssize_t bh1770_get_lux_rate(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct bh1770_chip *chip = dev_get_drvdata(dev);
+ return sprintf(buf, "%d\n", lux_rates_hz[chip->lux_rate_index]);
+}
+
+static ssize_t bh1770_set_lux_rate(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct bh1770_chip *chip = dev_get_drvdata(dev);
+ unsigned long rate_hz;
+ int ret, i;
+
+ if (strict_strtoul(buf, 0, &rate_hz))
+ return -EINVAL;
+
+ for (i = 0; i < ARRAY_SIZE(lux_rates_hz) - 1; i++)
+ if (rate_hz >= lux_rates_hz[i])
+ break;
+
+ mutex_lock(&chip->mutex);
+ chip->lux_rate_index = i;
+ ret = bh1770_lux_rate(chip, i);
+ mutex_unlock(&chip->mutex);
+
+ if (ret < 0)
+ return ret;
+
+ return count;
+}
+
+static ssize_t bh1770_get_lux_thresh_above(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct bh1770_chip *chip = dev_get_drvdata(dev);
+ return sprintf(buf, "%d\n", chip->lux_threshold_hi);
+}
+
+static ssize_t bh1770_get_lux_thresh_below(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct bh1770_chip *chip = dev_get_drvdata(dev);
+ return sprintf(buf, "%d\n", chip->lux_threshold_lo);
+}
+
+static ssize_t bh1770_set_lux_thresh(struct bh1770_chip *chip, u16 *target,
+ const char *buf)
+{
+ int ret = 0;
+ unsigned long thresh;
+
+ if (strict_strtoul(buf, 0, &thresh))
+ return -EINVAL;
+
+ if (thresh > BH1770_LUX_RANGE)
+ return -EINVAL;
+
+ mutex_lock(&chip->mutex);
+ *target = thresh;
+ /*
+ * Don't update values in HW if we are still waiting for
+ * first interrupt to come after device handle open call.
+ */
+ if (!chip->lux_wait_result)
+ ret = bh1770_lux_update_thresholds(chip,
+ chip->lux_threshold_hi,
+ chip->lux_threshold_lo);
+ mutex_unlock(&chip->mutex);
+ return ret;
+
+}
+
+static ssize_t bh1770_set_lux_thresh_above(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t len)
+{
+ struct bh1770_chip *chip = dev_get_drvdata(dev);
+ int ret = bh1770_set_lux_thresh(chip, &chip->lux_threshold_hi, buf);
+ if (ret < 0)
+ return ret;
+ return len;
+}
+
+static ssize_t bh1770_set_lux_thresh_below(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t len)
+{
+ struct bh1770_chip *chip = dev_get_drvdata(dev);
+ int ret = bh1770_set_lux_thresh(chip, &chip->lux_threshold_lo, buf);
+ if (ret < 0)
+ return ret;
+ return len;
+}
+
+static DEVICE_ATTR(prox0_raw_en, S_IRUGO | S_IWUSR, bh1770_prox_enable_show,
+ bh1770_prox_enable_store);
+static DEVICE_ATTR(prox0_thresh_above1_value, S_IRUGO | S_IWUSR,
+ bh1770_prox_abs_thres_show,
+ bh1770_prox_abs_thres_store);
+static DEVICE_ATTR(prox0_thresh_above0_value, S_IRUGO | S_IWUSR,
+ bh1770_get_prox_thres,
+ bh1770_set_prox_thres);
+static DEVICE_ATTR(prox0_raw, S_IRUGO, bh1770_prox_result_show, NULL);
+static DEVICE_ATTR(prox0_sensor_range, S_IRUGO, bh1770_prox_range_show, NULL);
+static DEVICE_ATTR(prox0_thresh_above_count, S_IRUGO | S_IWUSR,
+ bh1770_prox_persistence_show,
+ bh1770_prox_persistence_store);
+static DEVICE_ATTR(prox0_rate_above, S_IRUGO | S_IWUSR,
+ bh1770_get_prox_rate_above,
+ bh1770_set_prox_rate_above);
+static DEVICE_ATTR(prox0_rate_below, S_IRUGO | S_IWUSR,
+ bh1770_get_prox_rate_below,
+ bh1770_set_prox_rate_below);
+static DEVICE_ATTR(prox0_rate_avail, S_IRUGO, bh1770_get_prox_rate_avail, NULL);
+
+static DEVICE_ATTR(lux0_calibscale, S_IRUGO | S_IWUSR, bh1770_lux_calib_show,
+ bh1770_lux_calib_store);
+static DEVICE_ATTR(lux0_calibscale_default, S_IRUGO,
+ bh1770_lux_calib_default_show,
+ NULL);
+static DEVICE_ATTR(lux0_input, S_IRUGO, bh1770_lux_result_show, NULL);
+static DEVICE_ATTR(lux0_sensor_range, S_IRUGO, bh1770_lux_range_show, NULL);
+static DEVICE_ATTR(lux0_rate, S_IRUGO | S_IWUSR, bh1770_get_lux_rate,
+ bh1770_set_lux_rate);
+static DEVICE_ATTR(lux0_rate_avail, S_IRUGO, bh1770_get_lux_rate_avail, NULL);
+static DEVICE_ATTR(lux0_thresh_above_value, S_IRUGO | S_IWUSR,
+ bh1770_get_lux_thresh_above,
+ bh1770_set_lux_thresh_above);
+static DEVICE_ATTR(lux0_thresh_below_value, S_IRUGO | S_IWUSR,
+ bh1770_get_lux_thresh_below,
+ bh1770_set_lux_thresh_below);
+static DEVICE_ATTR(chip_id, S_IRUGO, bh1770_chip_id_show, NULL);
+static DEVICE_ATTR(power_state, S_IRUGO | S_IWUSR, bh1770_power_state_show,
+ bh1770_power_state_store);
+
+
+static struct attribute *sysfs_attrs[] = {
+ &dev_attr_lux0_calibscale.attr,
+ &dev_attr_lux0_calibscale_default.attr,
+ &dev_attr_lux0_input.attr,
+ &dev_attr_lux0_sensor_range.attr,
+ &dev_attr_lux0_rate.attr,
+ &dev_attr_lux0_rate_avail.attr,
+ &dev_attr_lux0_thresh_above_value.attr,
+ &dev_attr_lux0_thresh_below_value.attr,
+ &dev_attr_prox0_raw.attr,
+ &dev_attr_prox0_sensor_range.attr,
+ &dev_attr_prox0_raw_en.attr,
+ &dev_attr_prox0_thresh_above_count.attr,
+ &dev_attr_prox0_rate_above.attr,
+ &dev_attr_prox0_rate_below.attr,
+ &dev_attr_prox0_rate_avail.attr,
+ &dev_attr_prox0_thresh_above0_value.attr,
+ &dev_attr_prox0_thresh_above1_value.attr,
+ &dev_attr_chip_id.attr,
+ &dev_attr_power_state.attr,
+ NULL
+};
+
+static struct attribute_group bh1770_attribute_group = {
+ .attrs = sysfs_attrs
+};
+
+static int __devinit bh1770_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct bh1770_chip *chip;
+ int err;
+
+ chip = kzalloc(sizeof *chip, GFP_KERNEL);
+ if (!chip)
+ return -ENOMEM;
+
+ i2c_set_clientdata(client, chip);
+ chip->client = client;
+
+ mutex_init(&chip->mutex);
+ init_waitqueue_head(&chip->wait);
+ INIT_DELAYED_WORK(&chip->prox_work, bh1770_prox_work);
+
+ if (client->dev.platform_data == NULL) {
+ dev_err(&client->dev, "platform data is mandatory\n");
+ err = -EINVAL;
+ goto fail1;
+ }
+
+ chip->pdata = client->dev.platform_data;
+ chip->lux_calib = BH1770_LUX_NEUTRAL_CALIB_VALUE;
+ chip->lux_rate_index = BH1770_LUX_DEFAULT_RATE;
+ chip->lux_threshold_lo = BH1770_LUX_DEF_THRES;
+ chip->lux_threshold_hi = BH1770_LUX_DEF_THRES;
+
+ if (chip->pdata->glass_attenuation == 0)
+ chip->lux_ga = BH1770_NEUTRAL_GA;
+ else
+ chip->lux_ga = chip->pdata->glass_attenuation;
+
+ chip->prox_threshold = BH1770_PROX_DEF_THRES;
+ chip->prox_led = chip->pdata->led_def_curr;
+ chip->prox_abs_thres = BH1770_PROX_DEF_ABS_THRES;
+ chip->prox_persistence = BH1770_DEFAULT_PERSISTENCE;
+ chip->prox_rate_threshold = BH1770_PROX_DEF_RATE_THRESH;
+ chip->prox_rate = BH1770_PROX_DEFAULT_RATE;
+ chip->prox_data = 0;
+
+ chip->regs[0].supply = reg_vcc;
+ chip->regs[1].supply = reg_vleds;
+
+ err = regulator_bulk_get(&client->dev,
+ ARRAY_SIZE(chip->regs), chip->regs);
+ if (err < 0) {
+ dev_err(&client->dev, "Cannot get regulators\n");
+ goto fail1;
+ }
+
+ err = regulator_bulk_enable(ARRAY_SIZE(chip->regs),
+ chip->regs);
+ if (err < 0) {
+ dev_err(&client->dev, "Cannot enable regulators\n");
+ goto fail2;
+ }
+
+ usleep_range(BH1770_STARTUP_DELAY, BH1770_STARTUP_DELAY * 2);
+ err = bh1770_detect(chip);
+ if (err < 0)
+ goto fail3;
+
+ /* Start chip */
+ bh1770_chip_on(chip);
+ pm_runtime_set_active(&client->dev);
+ pm_runtime_enable(&client->dev);
+
+ chip->lux_corr = bh1770_get_corr_value(chip);
+ if (chip->lux_corr == 0) {
+ dev_err(&client->dev, "Improper correction values\n");
+ err = -EINVAL;
+ goto fail3;
+ }
+
+ if (chip->pdata->setup_resources) {
+ err = chip->pdata->setup_resources();
+ if (err) {
+ err = -EINVAL;
+ goto fail3;
+ }
+ }
+
+ err = sysfs_create_group(&chip->client->dev.kobj,
+ &bh1770_attribute_group);
+ if (err < 0) {
+ dev_err(&chip->client->dev, "Sysfs registration failed\n");
+ goto fail4;
+ }
+
+ /*
+ * Chip needs level triggered interrupt to work. However,
+ * level triggering doesn't work always correctly with power
+ * management. Select both
+ */
+ err = request_threaded_irq(client->irq, NULL,
+ bh1770_irq,
+ IRQF_TRIGGER_FALLING | IRQF_ONESHOT |
+ IRQF_TRIGGER_LOW,
+ "bh1770", chip);
+ if (err) {
+ dev_err(&client->dev, "could not get IRQ %d\n",
+ client->irq);
+ goto fail5;
+ }
+ regulator_bulk_disable(ARRAY_SIZE(chip->regs), chip->regs);
+ return err;
+fail5:
+ sysfs_remove_group(&chip->client->dev.kobj,
+ &bh1770_attribute_group);
+fail4:
+ if (chip->pdata->release_resources)
+ chip->pdata->release_resources();
+fail3:
+ regulator_bulk_disable(ARRAY_SIZE(chip->regs), chip->regs);
+fail2:
+ regulator_bulk_free(ARRAY_SIZE(chip->regs), chip->regs);
+fail1:
+ kfree(chip);
+ return err;
+}
+
+static int __devexit bh1770_remove(struct i2c_client *client)
+{
+ struct bh1770_chip *chip = i2c_get_clientdata(client);
+
+ free_irq(client->irq, chip);
+
+ sysfs_remove_group(&chip->client->dev.kobj,
+ &bh1770_attribute_group);
+
+ if (chip->pdata->release_resources)
+ chip->pdata->release_resources();
+
+ cancel_delayed_work_sync(&chip->prox_work);
+
+ if (!pm_runtime_suspended(&client->dev))
+ bh1770_chip_off(chip);
+
+ pm_runtime_disable(&client->dev);
+ pm_runtime_set_suspended(&client->dev);
+
+ regulator_bulk_free(ARRAY_SIZE(chip->regs), chip->regs);
+ kfree(chip);
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int bh1770_suspend(struct device *dev)
+{
+ struct i2c_client *client = container_of(dev, struct i2c_client, dev);
+ struct bh1770_chip *chip = i2c_get_clientdata(client);
+
+ bh1770_chip_off(chip);
+
+ return 0;
+}
+
+static int bh1770_resume(struct device *dev)
+{
+ struct i2c_client *client = container_of(dev, struct i2c_client, dev);
+ struct bh1770_chip *chip = i2c_get_clientdata(client);
+ int ret = 0;
+
+ bh1770_chip_on(chip);
+
+ if (!pm_runtime_suspended(dev)) {
+ /*
+ * If we were enabled at suspend time, it is expected
+ * everything works nice and smoothly
+ */
+ ret = bh1770_lux_rate(chip, chip->lux_rate_index);
+ ret |= bh1770_lux_interrupt_control(chip, BH1770_ENABLE);
+
+ /* This causes interrupt after the next measurement cycle */
+ bh1770_lux_update_thresholds(chip, BH1770_LUX_DEF_THRES,
+ BH1770_LUX_DEF_THRES);
+ /* Inform that we are waiting for a result from ALS */
+ chip->lux_wait_result = true;
+ bh1770_prox_mode_control(chip);
+ }
+ return ret;
+}
+
+#else
+#define bh1770_suspend NULL
+#define bh1770_shutdown NULL
+#define bh1770_resume NULL
+#endif
+
+#ifdef CONFIG_PM_RUNTIME
+static int bh1770_runtime_suspend(struct device *dev)
+{
+ struct i2c_client *client = container_of(dev, struct i2c_client, dev);
+ struct bh1770_chip *chip = i2c_get_clientdata(client);
+
+ bh1770_chip_off(chip);
+
+ return 0;
+}
+
+static int bh1770_runtime_resume(struct device *dev)
+{
+ struct i2c_client *client = container_of(dev, struct i2c_client, dev);
+ struct bh1770_chip *chip = i2c_get_clientdata(client);
+
+ bh1770_chip_on(chip);
+
+ return 0;
+}
+#endif
+
+static const struct i2c_device_id bh1770_id[] = {
+ {"bh1770glc", 0 },
+ {"sfh7770", 0 },
+ {}
+};
+
+MODULE_DEVICE_TABLE(i2c, bh1770_id);
+
+static const struct dev_pm_ops bh1770_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(bh1770_suspend, bh1770_resume)
+ SET_RUNTIME_PM_OPS(bh1770_runtime_suspend, bh1770_runtime_resume, NULL)
+};
+
+static struct i2c_driver bh1770_driver = {
+ .driver = {
+ .name = "bh1770glc",
+ .owner = THIS_MODULE,
+ .pm = &bh1770_pm_ops,
+ },
+ .probe = bh1770_probe,
+ .remove = __devexit_p(bh1770_remove),
+ .id_table = bh1770_id,
+};
+
+static int __init bh1770_init(void)
+{
+ return i2c_add_driver(&bh1770_driver);
+}
+
+static void __exit bh1770_exit(void)
+{
+ i2c_del_driver(&bh1770_driver);
+}
+
+MODULE_DESCRIPTION("BH1770GLC / SFH7770 combined ALS and proximity sensor");
+MODULE_AUTHOR("Samu Onkalo, Nokia Corporation");
+MODULE_LICENSE("GPL v2");
+
+module_init(bh1770_init);
+module_exit(bh1770_exit);
diff --git a/drivers/misc/ibmasm/ibmasmfs.c b/drivers/misc/ibmasm/ibmasmfs.c
index af2497ae5fe3..d2d5d23416dd 100644
--- a/drivers/misc/ibmasm/ibmasmfs.c
+++ b/drivers/misc/ibmasm/ibmasmfs.c
@@ -91,11 +91,10 @@ static void ibmasmfs_create_files (struct super_block *sb, struct dentry *root);
static int ibmasmfs_fill_super (struct super_block *sb, void *data, int silent);
-static int ibmasmfs_get_super(struct file_system_type *fst,
- int flags, const char *name, void *data,
- struct vfsmount *mnt)
+static struct dentry *ibmasmfs_mount(struct file_system_type *fst,
+ int flags, const char *name, void *data)
{
- return get_sb_single(fst, flags, data, ibmasmfs_fill_super, mnt);
+ return mount_single(fst, flags, data, ibmasmfs_fill_super);
}
static const struct super_operations ibmasmfs_s_ops = {
@@ -108,7 +107,7 @@ static const struct file_operations *ibmasmfs_dir_ops = &simple_dir_operations;
static struct file_system_type ibmasmfs_type = {
.owner = THIS_MODULE,
.name = "ibmasmfs",
- .get_sb = ibmasmfs_get_super,
+ .mount = ibmasmfs_mount,
.kill_sb = kill_litter_super,
};
@@ -146,6 +145,7 @@ static struct inode *ibmasmfs_make_inode(struct super_block *sb, int mode)
struct inode *ret = new_inode(sb);
if (ret) {
+ ret->i_ino = get_next_ino();
ret->i_mode = mode;
ret->i_atime = ret->i_mtime = ret->i_ctime = CURRENT_TIME;
}
diff --git a/drivers/misc/isl29020.c b/drivers/misc/isl29020.c
new file mode 100644
index 000000000000..307aada5fffe
--- /dev/null
+++ b/drivers/misc/isl29020.c
@@ -0,0 +1,248 @@
+/*
+ * isl29020.c - Intersil ALS Driver
+ *
+ * Copyright (C) 2008 Intel Corp
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ * Data sheet at: http://www.intersil.com/data/fn/fn6505.pdf
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/i2c.h>
+#include <linux/err.h>
+#include <linux/delay.h>
+#include <linux/sysfs.h>
+#include <linux/pm_runtime.h>
+
+static DEFINE_MUTEX(mutex);
+
+static ssize_t als_sensing_range_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ int val;
+
+ val = i2c_smbus_read_byte_data(client, 0x00);
+
+ if (val < 0)
+ return val;
+ return sprintf(buf, "%d000\n", 1 << (2 * (val & 3)));
+
+}
+
+static ssize_t als_lux_input_data_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ int ret_val, val;
+ unsigned long int lux;
+ int temp;
+
+ pm_runtime_get_sync(dev);
+ msleep(100);
+
+ mutex_lock(&mutex);
+ temp = i2c_smbus_read_byte_data(client, 0x02); /* MSB data */
+ if (temp < 0) {
+ pm_runtime_put_sync(dev);
+ mutex_unlock(&mutex);
+ return temp;
+ }
+
+ ret_val = i2c_smbus_read_byte_data(client, 0x01); /* LSB data */
+ mutex_unlock(&mutex);
+
+ if (ret_val < 0) {
+ pm_runtime_put_sync(dev);
+ return ret_val;
+ }
+
+ ret_val |= temp << 8;
+ val = i2c_smbus_read_byte_data(client, 0x00);
+ pm_runtime_put_sync(dev);
+ if (val < 0)
+ return val;
+ lux = ((((1 << (2 * (val & 3))))*1000) * ret_val) / 65536;
+ return sprintf(buf, "%ld\n", lux);
+}
+
+static ssize_t als_sensing_range_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ int ret_val;
+ unsigned long val;
+
+ if (strict_strtoul(buf, 10, &val))
+ return -EINVAL;
+ if (val < 1 || val > 64000)
+ return -EINVAL;
+
+ /* Pick the smallest sensor range that will meet our requirements */
+ if (val <= 1000)
+ val = 1;
+ else if (val <= 4000)
+ val = 2;
+ else if (val <= 16000)
+ val = 3;
+ else
+ val = 4;
+
+ ret_val = i2c_smbus_read_byte_data(client, 0x00);
+ if (ret_val < 0)
+ return ret_val;
+
+ ret_val &= 0xFC; /*reset the bit before setting them */
+ ret_val |= val - 1;
+ ret_val = i2c_smbus_write_byte_data(client, 0x00, ret_val);
+
+ if (ret_val < 0)
+ return ret_val;
+ return count;
+}
+
+static void als_set_power_state(struct i2c_client *client, int enable)
+{
+ int ret_val;
+
+ ret_val = i2c_smbus_read_byte_data(client, 0x00);
+ if (ret_val < 0)
+ return;
+
+ if (enable)
+ ret_val |= 0x80;
+ else
+ ret_val &= 0x7F;
+
+ i2c_smbus_write_byte_data(client, 0x00, ret_val);
+}
+
+static DEVICE_ATTR(lux0_sensor_range, S_IRUGO | S_IWUSR,
+ als_sensing_range_show, als_sensing_range_store);
+static DEVICE_ATTR(lux0_input, S_IRUGO, als_lux_input_data_show, NULL);
+
+static struct attribute *mid_att_als[] = {
+ &dev_attr_lux0_sensor_range.attr,
+ &dev_attr_lux0_input.attr,
+ NULL
+};
+
+static struct attribute_group m_als_gr = {
+ .name = "isl29020",
+ .attrs = mid_att_als
+};
+
+static int als_set_default_config(struct i2c_client *client)
+{
+ int retval;
+
+ retval = i2c_smbus_write_byte_data(client, 0x00, 0xc0);
+ if (retval < 0) {
+ dev_err(&client->dev, "default write failed.");
+ return retval;
+ }
+ return 0;;
+}
+
+static int isl29020_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ int res;
+
+ res = als_set_default_config(client);
+ if (res < 0)
+ return res;
+
+ res = sysfs_create_group(&client->dev.kobj, &m_als_gr);
+ if (res) {
+ dev_err(&client->dev, "isl29020: device create file failed\n");
+ return res;
+ }
+ dev_info(&client->dev, "%s isl29020: ALS chip found\n", client->name);
+ als_set_power_state(client, 0);
+ pm_runtime_enable(&client->dev);
+ return res;
+}
+
+static int isl29020_remove(struct i2c_client *client)
+{
+ sysfs_remove_group(&client->dev.kobj, &m_als_gr);
+ return 0;
+}
+
+static struct i2c_device_id isl29020_id[] = {
+ { "isl29020", 0 },
+ { }
+};
+
+MODULE_DEVICE_TABLE(i2c, isl29020_id);
+
+#ifdef CONFIG_PM
+
+static int isl29020_runtime_suspend(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ als_set_power_state(client, 0);
+ return 0;
+}
+
+static int isl29020_runtime_resume(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ als_set_power_state(client, 1);
+ return 0;
+}
+
+static const struct dev_pm_ops isl29020_pm_ops = {
+ .runtime_suspend = isl29020_runtime_suspend,
+ .runtime_resume = isl29020_runtime_resume,
+};
+
+#define ISL29020_PM_OPS (&isl29020_pm_ops)
+#else /* CONFIG_PM */
+#define ISL29020_PM_OPS NULL
+#endif /* CONFIG_PM */
+
+static struct i2c_driver isl29020_driver = {
+ .driver = {
+ .name = "isl29020",
+ .pm = ISL29020_PM_OPS,
+ },
+ .probe = isl29020_probe,
+ .remove = isl29020_remove,
+ .id_table = isl29020_id,
+};
+
+static int __init sensor_isl29020_init(void)
+{
+ return i2c_add_driver(&isl29020_driver);
+}
+
+static void __exit sensor_isl29020_exit(void)
+{
+ i2c_del_driver(&isl29020_driver);
+}
+
+module_init(sensor_isl29020_init);
+module_exit(sensor_isl29020_exit);
+
+MODULE_AUTHOR("Kalhan Trisal <kalhan.trisal@intel.com>");
+MODULE_DESCRIPTION("Intersil isl29020 ALS Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/misc/kgdbts.c b/drivers/misc/kgdbts.c
index 72450237a0f4..59c118c19a91 100644
--- a/drivers/misc/kgdbts.c
+++ b/drivers/misc/kgdbts.c
@@ -1044,12 +1044,6 @@ static int __init init_kgdbts(void)
return configure_kgdbts();
}
-static void cleanup_kgdbts(void)
-{
- if (configured == 1)
- kgdb_unregister_io_module(&kgdbts_io_ops);
-}
-
static int kgdbts_get_char(void)
{
int val = 0;
@@ -1081,10 +1075,8 @@ static int param_set_kgdbts_var(const char *kmessage, struct kernel_param *kp)
return 0;
}
- if (kgdb_connected) {
- printk(KERN_ERR
- "kgdbts: Cannot reconfigure while KGDB is connected.\n");
-
+ if (configured == 1) {
+ printk(KERN_ERR "kgdbts: ERROR: Already configured and running.\n");
return -EBUSY;
}
@@ -1093,9 +1085,6 @@ static int param_set_kgdbts_var(const char *kmessage, struct kernel_param *kp)
if (config[len - 1] == '\n')
config[len - 1] = '\0';
- if (configured == 1)
- cleanup_kgdbts();
-
/* Go and configure with the new params. */
return configure_kgdbts();
}
@@ -1123,7 +1112,6 @@ static struct kgdb_io kgdbts_io_ops = {
};
module_init(init_kgdbts);
-module_exit(cleanup_kgdbts);
module_param_call(kgdbts, param_set_kgdbts_var, param_get_string, &kps, 0644);
MODULE_PARM_DESC(kgdbts, "<A|V1|V2>[F#|S#][N#]");
MODULE_DESCRIPTION("KGDB Test Suite");
diff --git a/drivers/misc/lkdtm.c b/drivers/misc/lkdtm.c
index 343b5d8ea697..81d7fa4ec0db 100644
--- a/drivers/misc/lkdtm.c
+++ b/drivers/misc/lkdtm.c
@@ -52,32 +52,32 @@
#define REC_NUM_DEFAULT 10
enum cname {
- INVALID,
- INT_HARDWARE_ENTRY,
- INT_HW_IRQ_EN,
- INT_TASKLET_ENTRY,
- FS_DEVRW,
- MEM_SWAPOUT,
- TIMERADD,
- SCSI_DISPATCH_CMD,
- IDE_CORE_CP,
- DIRECT,
+ CN_INVALID,
+ CN_INT_HARDWARE_ENTRY,
+ CN_INT_HW_IRQ_EN,
+ CN_INT_TASKLET_ENTRY,
+ CN_FS_DEVRW,
+ CN_MEM_SWAPOUT,
+ CN_TIMERADD,
+ CN_SCSI_DISPATCH_CMD,
+ CN_IDE_CORE_CP,
+ CN_DIRECT,
};
enum ctype {
- NONE,
- PANIC,
- BUG,
- EXCEPTION,
- LOOP,
- OVERFLOW,
- CORRUPT_STACK,
- UNALIGNED_LOAD_STORE_WRITE,
- OVERWRITE_ALLOCATION,
- WRITE_AFTER_FREE,
- SOFTLOCKUP,
- HARDLOCKUP,
- HUNG_TASK,
+ CT_NONE,
+ CT_PANIC,
+ CT_BUG,
+ CT_EXCEPTION,
+ CT_LOOP,
+ CT_OVERFLOW,
+ CT_CORRUPT_STACK,
+ CT_UNALIGNED_LOAD_STORE_WRITE,
+ CT_OVERWRITE_ALLOCATION,
+ CT_WRITE_AFTER_FREE,
+ CT_SOFTLOCKUP,
+ CT_HARDLOCKUP,
+ CT_HUNG_TASK,
};
static char* cp_name[] = {
@@ -117,8 +117,8 @@ static char* cpoint_type;
static int cpoint_count = DEFAULT_COUNT;
static int recur_count = REC_NUM_DEFAULT;
-static enum cname cpoint = INVALID;
-static enum ctype cptype = NONE;
+static enum cname cpoint = CN_INVALID;
+static enum ctype cptype = CT_NONE;
static int count = DEFAULT_COUNT;
module_param(recur_count, int, 0644);
@@ -207,12 +207,12 @@ static enum ctype parse_cp_type(const char *what, size_t count)
return i + 1;
}
- return NONE;
+ return CT_NONE;
}
static const char *cp_type_to_str(enum ctype type)
{
- if (type == NONE || type < 0 || type > ARRAY_SIZE(cp_type))
+ if (type == CT_NONE || type < 0 || type > ARRAY_SIZE(cp_type))
return "None";
return cp_type[type - 1];
@@ -220,7 +220,7 @@ static const char *cp_type_to_str(enum ctype type)
static const char *cp_name_to_str(enum cname name)
{
- if (name == INVALID || name < 0 || name > ARRAY_SIZE(cp_name))
+ if (name == CN_INVALID || name < 0 || name > ARRAY_SIZE(cp_name))
return "INVALID";
return cp_name[name - 1];
@@ -245,7 +245,7 @@ static int lkdtm_parse_commandline(void)
return -EINVAL;
cptype = parse_cp_type(cpoint_type, strlen(cpoint_type));
- if (cptype == NONE)
+ if (cptype == CT_NONE)
return -EINVAL;
for (i = 0; i < ARRAY_SIZE(cp_name); i++) {
@@ -274,30 +274,30 @@ static int recursive_loop(int a)
static void lkdtm_do_action(enum ctype which)
{
switch (which) {
- case PANIC:
+ case CT_PANIC:
panic("dumptest");
break;
- case BUG:
+ case CT_BUG:
BUG();
break;
- case EXCEPTION:
+ case CT_EXCEPTION:
*((int *) 0) = 0;
break;
- case LOOP:
+ case CT_LOOP:
for (;;)
;
break;
- case OVERFLOW:
+ case CT_OVERFLOW:
(void) recursive_loop(0);
break;
- case CORRUPT_STACK: {
+ case CT_CORRUPT_STACK: {
volatile u32 data[8];
volatile u32 *p = data;
p[12] = 0x12345678;
break;
}
- case UNALIGNED_LOAD_STORE_WRITE: {
+ case CT_UNALIGNED_LOAD_STORE_WRITE: {
static u8 data[5] __attribute__((aligned(4))) = {1, 2,
3, 4, 5};
u32 *p;
@@ -309,7 +309,7 @@ static void lkdtm_do_action(enum ctype which)
*p = val;
break;
}
- case OVERWRITE_ALLOCATION: {
+ case CT_OVERWRITE_ALLOCATION: {
size_t len = 1020;
u32 *data = kmalloc(len, GFP_KERNEL);
@@ -317,7 +317,7 @@ static void lkdtm_do_action(enum ctype which)
kfree(data);
break;
}
- case WRITE_AFTER_FREE: {
+ case CT_WRITE_AFTER_FREE: {
size_t len = 1024;
u32 *data = kmalloc(len, GFP_KERNEL);
@@ -326,21 +326,21 @@ static void lkdtm_do_action(enum ctype which)
memset(data, 0x78, len);
break;
}
- case SOFTLOCKUP:
+ case CT_SOFTLOCKUP:
preempt_disable();
for (;;)
cpu_relax();
break;
- case HARDLOCKUP:
+ case CT_HARDLOCKUP:
local_irq_disable();
for (;;)
cpu_relax();
break;
- case HUNG_TASK:
+ case CT_HUNG_TASK:
set_current_state(TASK_UNINTERRUPTIBLE);
schedule();
break;
- case NONE:
+ case CT_NONE:
default:
break;
}
@@ -363,43 +363,43 @@ static int lkdtm_register_cpoint(enum cname which)
{
int ret;
- cpoint = INVALID;
+ cpoint = CN_INVALID;
if (lkdtm.entry != NULL)
unregister_jprobe(&lkdtm);
switch (which) {
- case DIRECT:
+ case CN_DIRECT:
lkdtm_do_action(cptype);
return 0;
- case INT_HARDWARE_ENTRY:
+ case CN_INT_HARDWARE_ENTRY:
lkdtm.kp.symbol_name = "do_IRQ";
lkdtm.entry = (kprobe_opcode_t*) jp_do_irq;
break;
- case INT_HW_IRQ_EN:
+ case CN_INT_HW_IRQ_EN:
lkdtm.kp.symbol_name = "handle_IRQ_event";
lkdtm.entry = (kprobe_opcode_t*) jp_handle_irq_event;
break;
- case INT_TASKLET_ENTRY:
+ case CN_INT_TASKLET_ENTRY:
lkdtm.kp.symbol_name = "tasklet_action";
lkdtm.entry = (kprobe_opcode_t*) jp_tasklet_action;
break;
- case FS_DEVRW:
+ case CN_FS_DEVRW:
lkdtm.kp.symbol_name = "ll_rw_block";
lkdtm.entry = (kprobe_opcode_t*) jp_ll_rw_block;
break;
- case MEM_SWAPOUT:
+ case CN_MEM_SWAPOUT:
lkdtm.kp.symbol_name = "shrink_inactive_list";
lkdtm.entry = (kprobe_opcode_t*) jp_shrink_inactive_list;
break;
- case TIMERADD:
+ case CN_TIMERADD:
lkdtm.kp.symbol_name = "hrtimer_start";
lkdtm.entry = (kprobe_opcode_t*) jp_hrtimer_start;
break;
- case SCSI_DISPATCH_CMD:
+ case CN_SCSI_DISPATCH_CMD:
lkdtm.kp.symbol_name = "scsi_dispatch_cmd";
lkdtm.entry = (kprobe_opcode_t*) jp_scsi_dispatch_cmd;
break;
- case IDE_CORE_CP:
+ case CN_IDE_CORE_CP:
#ifdef CONFIG_IDE
lkdtm.kp.symbol_name = "generic_ide_ioctl";
lkdtm.entry = (kprobe_opcode_t*) jp_generic_ide_ioctl;
@@ -416,7 +416,7 @@ static int lkdtm_register_cpoint(enum cname which)
cpoint = which;
if ((ret = register_jprobe(&lkdtm)) < 0) {
printk(KERN_INFO "lkdtm: Couldn't register jprobe\n");
- cpoint = INVALID;
+ cpoint = CN_INVALID;
}
return ret;
@@ -445,7 +445,7 @@ static ssize_t do_register_entry(enum cname which, struct file *f,
cptype = parse_cp_type(buf, count);
free_page((unsigned long) buf);
- if (cptype == NONE)
+ if (cptype == CT_NONE)
return -EINVAL;
err = lkdtm_register_cpoint(which);
@@ -487,49 +487,49 @@ static int lkdtm_debugfs_open(struct inode *inode, struct file *file)
static ssize_t int_hardware_entry(struct file *f, const char __user *buf,
size_t count, loff_t *off)
{
- return do_register_entry(INT_HARDWARE_ENTRY, f, buf, count, off);
+ return do_register_entry(CN_INT_HARDWARE_ENTRY, f, buf, count, off);
}
static ssize_t int_hw_irq_en(struct file *f, const char __user *buf,
size_t count, loff_t *off)
{
- return do_register_entry(INT_HW_IRQ_EN, f, buf, count, off);
+ return do_register_entry(CN_INT_HW_IRQ_EN, f, buf, count, off);
}
static ssize_t int_tasklet_entry(struct file *f, const char __user *buf,
size_t count, loff_t *off)
{
- return do_register_entry(INT_TASKLET_ENTRY, f, buf, count, off);
+ return do_register_entry(CN_INT_TASKLET_ENTRY, f, buf, count, off);
}
static ssize_t fs_devrw_entry(struct file *f, const char __user *buf,
size_t count, loff_t *off)
{
- return do_register_entry(FS_DEVRW, f, buf, count, off);
+ return do_register_entry(CN_FS_DEVRW, f, buf, count, off);
}
static ssize_t mem_swapout_entry(struct file *f, const char __user *buf,
size_t count, loff_t *off)
{
- return do_register_entry(MEM_SWAPOUT, f, buf, count, off);
+ return do_register_entry(CN_MEM_SWAPOUT, f, buf, count, off);
}
static ssize_t timeradd_entry(struct file *f, const char __user *buf,
size_t count, loff_t *off)
{
- return do_register_entry(TIMERADD, f, buf, count, off);
+ return do_register_entry(CN_TIMERADD, f, buf, count, off);
}
static ssize_t scsi_dispatch_cmd_entry(struct file *f,
const char __user *buf, size_t count, loff_t *off)
{
- return do_register_entry(SCSI_DISPATCH_CMD, f, buf, count, off);
+ return do_register_entry(CN_SCSI_DISPATCH_CMD, f, buf, count, off);
}
static ssize_t ide_core_cp_entry(struct file *f, const char __user *buf,
size_t count, loff_t *off)
{
- return do_register_entry(IDE_CORE_CP, f, buf, count, off);
+ return do_register_entry(CN_IDE_CORE_CP, f, buf, count, off);
}
/* Special entry to just crash directly. Available without KPROBEs */
@@ -557,7 +557,7 @@ static ssize_t direct_entry(struct file *f, const char __user *user_buf,
type = parse_cp_type(buf, count);
free_page((unsigned long) buf);
- if (type == NONE)
+ if (type == CT_NONE)
return -EINVAL;
printk(KERN_INFO "lkdtm: Performing direct entry %s\n",
@@ -649,7 +649,7 @@ static int __init lkdtm_module_init(void)
goto out_err;
}
- if (cpoint != INVALID && cptype != NONE) {
+ if (cpoint != CN_INVALID && cptype != CT_NONE) {
ret = lkdtm_register_cpoint(cpoint);
if (ret < 0) {
printk(KERN_INFO "lkdtm: Invalid crash point %d\n",
diff --git a/drivers/misc/phantom.c b/drivers/misc/phantom.c
index 4197a3cb26ba..b05db55c8c8e 100644
--- a/drivers/misc/phantom.c
+++ b/drivers/misc/phantom.c
@@ -343,8 +343,10 @@ static int __devinit phantom_probe(struct pci_dev *pdev,
int retval;
retval = pci_enable_device(pdev);
- if (retval)
+ if (retval) {
+ dev_err(&pdev->dev, "pci_enable_device failed!\n");
goto err;
+ }
minor = phantom_get_free();
if (minor == PHANTOM_MAX_MINORS) {
@@ -356,8 +358,10 @@ static int __devinit phantom_probe(struct pci_dev *pdev,
phantom_devices[minor] = 1;
retval = pci_request_regions(pdev, "phantom");
- if (retval)
+ if (retval) {
+ dev_err(&pdev->dev, "pci_request_regions failed!\n");
goto err_null;
+ }
retval = -ENOMEM;
pht = kzalloc(sizeof(*pht), GFP_KERNEL);
diff --git a/drivers/misc/sgi-xp/xpc_partition.c b/drivers/misc/sgi-xp/xpc_partition.c
index d551f09ccb79..6956f7e7d439 100644
--- a/drivers/misc/sgi-xp/xpc_partition.c
+++ b/drivers/misc/sgi-xp/xpc_partition.c
@@ -439,18 +439,23 @@ xpc_discovery(void)
* nodes that can comprise an access protection grouping. The access
* protection is in regards to memory, IOI and IPI.
*/
- max_regions = 64;
region_size = xp_region_size;
- switch (region_size) {
- case 128:
- max_regions *= 2;
- case 64:
- max_regions *= 2;
- case 32:
- max_regions *= 2;
- region_size = 16;
- DBUG_ON(!is_shub2());
+ if (is_uv())
+ max_regions = 256;
+ else {
+ max_regions = 64;
+
+ switch (region_size) {
+ case 128:
+ max_regions *= 2;
+ case 64:
+ max_regions *= 2;
+ case 32:
+ max_regions *= 2;
+ region_size = 16;
+ DBUG_ON(!is_shub2());
+ }
}
for (region = 0; region < max_regions; region++) {
diff --git a/drivers/misc/sgi-xp/xpc_uv.c b/drivers/misc/sgi-xp/xpc_uv.c
index 1f59ee2226ca..17bbacb1b4b1 100644
--- a/drivers/misc/sgi-xp/xpc_uv.c
+++ b/drivers/misc/sgi-xp/xpc_uv.c
@@ -417,6 +417,7 @@ xpc_process_activate_IRQ_rcvd_uv(void)
static void
xpc_handle_activate_mq_msg_uv(struct xpc_partition *part,
struct xpc_activate_mq_msghdr_uv *msg_hdr,
+ int part_setup,
int *wakeup_hb_checker)
{
unsigned long irq_flags;
@@ -481,6 +482,9 @@ xpc_handle_activate_mq_msg_uv(struct xpc_partition *part,
case XPC_ACTIVATE_MQ_MSG_CHCTL_CLOSEREQUEST_UV: {
struct xpc_activate_mq_msg_chctl_closerequest_uv *msg;
+ if (!part_setup)
+ break;
+
msg = container_of(msg_hdr, struct
xpc_activate_mq_msg_chctl_closerequest_uv,
hdr);
@@ -497,6 +501,9 @@ xpc_handle_activate_mq_msg_uv(struct xpc_partition *part,
case XPC_ACTIVATE_MQ_MSG_CHCTL_CLOSEREPLY_UV: {
struct xpc_activate_mq_msg_chctl_closereply_uv *msg;
+ if (!part_setup)
+ break;
+
msg = container_of(msg_hdr, struct
xpc_activate_mq_msg_chctl_closereply_uv,
hdr);
@@ -511,6 +518,9 @@ xpc_handle_activate_mq_msg_uv(struct xpc_partition *part,
case XPC_ACTIVATE_MQ_MSG_CHCTL_OPENREQUEST_UV: {
struct xpc_activate_mq_msg_chctl_openrequest_uv *msg;
+ if (!part_setup)
+ break;
+
msg = container_of(msg_hdr, struct
xpc_activate_mq_msg_chctl_openrequest_uv,
hdr);
@@ -528,6 +538,9 @@ xpc_handle_activate_mq_msg_uv(struct xpc_partition *part,
case XPC_ACTIVATE_MQ_MSG_CHCTL_OPENREPLY_UV: {
struct xpc_activate_mq_msg_chctl_openreply_uv *msg;
+ if (!part_setup)
+ break;
+
msg = container_of(msg_hdr, struct
xpc_activate_mq_msg_chctl_openreply_uv, hdr);
args = &part->remote_openclose_args[msg->ch_number];
@@ -545,6 +558,9 @@ xpc_handle_activate_mq_msg_uv(struct xpc_partition *part,
case XPC_ACTIVATE_MQ_MSG_CHCTL_OPENCOMPLETE_UV: {
struct xpc_activate_mq_msg_chctl_opencomplete_uv *msg;
+ if (!part_setup)
+ break;
+
msg = container_of(msg_hdr, struct
xpc_activate_mq_msg_chctl_opencomplete_uv, hdr);
spin_lock_irqsave(&part->chctl_lock, irq_flags);
@@ -621,6 +637,7 @@ xpc_handle_activate_IRQ_uv(int irq, void *dev_id)
part_referenced = xpc_part_ref(part);
xpc_handle_activate_mq_msg_uv(part, msg_hdr,
+ part_referenced,
&wakeup_hb_checker);
if (part_referenced)
xpc_part_deref(part);
diff --git a/drivers/misc/ti-st/Kconfig b/drivers/misc/ti-st/Kconfig
new file mode 100644
index 000000000000..2c8c3f39710d
--- /dev/null
+++ b/drivers/misc/ti-st/Kconfig
@@ -0,0 +1,17 @@
+#
+# TI's shared transport line discipline and the protocol
+# drivers (BT, FM and GPS)
+#
+menu "Texas Instruments shared transport line discipline"
+config TI_ST
+ tristate "Shared transport core driver"
+ depends on RFKILL
+ select FW_LOADER
+ help
+ This enables the shared transport core driver for TI
+ BT / FM and GPS combo chips. This enables protocol drivers
+ to register themselves with core and send data, the responses
+ are returned to relevant protocol drivers based on their
+ packet types.
+
+endmenu
diff --git a/drivers/misc/ti-st/Makefile b/drivers/misc/ti-st/Makefile
new file mode 100644
index 000000000000..78d7ebb14749
--- /dev/null
+++ b/drivers/misc/ti-st/Makefile
@@ -0,0 +1,6 @@
+#
+# Makefile for TI's shared transport line discipline
+# and its protocol drivers (BT, FM, GPS)
+#
+obj-$(CONFIG_TI_ST) += st_drv.o
+st_drv-objs := st_core.o st_kim.o st_ll.o
diff --git a/drivers/staging/ti-st/st_core.c b/drivers/misc/ti-st/st_core.c
index b85d8bfdf600..f9aad06d1ae5 100644
--- a/drivers/staging/ti-st/st_core.c
+++ b/drivers/misc/ti-st/st_core.c
@@ -1,7 +1,8 @@
/*
* Shared Transport Line discipline driver Core
* This hooks up ST KIM driver and ST LL driver
- * Copyright (C) 2009 Texas Instruments
+ * Copyright (C) 2009-2010 Texas Instruments
+ * Author: Pavan Savoy <pavan_savoy@ti.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -28,25 +29,8 @@
#include <net/bluetooth/bluetooth.h>
#include <net/bluetooth/hci_core.h>
#include <net/bluetooth/hci.h>
-#include "fm.h"
-/*
- * packet formats for fm and gps
- * #include "gps.h"
- */
-#include "st_core.h"
-#include "st_kim.h"
-#include "st_ll.h"
-#include "st.h"
+#include <linux/ti_wilink_st.h>
-/* strings to be used for rfkill entries and by
- * ST Core to be used for sysfs debug entry
- */
-#define PROTO_ENTRY(type, name) name
-const unsigned char *protocol_strngs[] = {
- PROTO_ENTRY(ST_BT, "Bluetooth"),
- PROTO_ENTRY(ST_FM, "FM"),
- PROTO_ENTRY(ST_GPS, "GPS"),
-};
/* function pointer pointing to either,
* st_kim_recv during registration to receive fw download responses
* st_int_recv after registration to receive proto stack responses
@@ -151,7 +135,7 @@ void st_reg_complete(struct st_data_s *st_gdata, char err)
static inline int st_check_data_len(struct st_data_s *st_gdata,
int protoid, int len)
{
- register int room = skb_tailroom(st_gdata->rx_skb);
+ int room = skb_tailroom(st_gdata->rx_skb);
pr_debug("len %d room %d", len, room);
@@ -194,7 +178,7 @@ static inline int st_check_data_len(struct st_data_s *st_gdata,
static inline void st_wakeup_ack(struct st_data_s *st_gdata,
unsigned char cmd)
{
- register struct sk_buff *waiting_skb;
+ struct sk_buff *waiting_skb;
unsigned long flags = 0;
spin_lock_irqsave(&st_gdata->lock, flags);
@@ -223,13 +207,13 @@ static inline void st_wakeup_ack(struct st_data_s *st_gdata,
void st_int_recv(void *disc_data,
const unsigned char *data, long count)
{
- register char *ptr;
+ char *ptr;
struct hci_event_hdr *eh;
struct hci_acl_hdr *ah;
struct hci_sco_hdr *sh;
struct fm_event_hdr *fm;
struct gps_event_hdr *gps;
- register int len = 0, type = 0, dlen = 0;
+ int len = 0, type = 0, dlen = 0;
static enum proto_type protoid = ST_MAX;
struct st_data_s *st_gdata = (struct st_data_s *)disc_data;
@@ -685,9 +669,8 @@ long st_register(struct st_proto_s *new_proto)
default:
pr_err("%d protocol not supported",
new_proto->type);
- err = -EPROTONOSUPPORT;
- /* something wrong */
- break;
+ spin_unlock_irqrestore(&st_gdata->lock, flags);
+ return -EPROTONOSUPPORT;
}
st_gdata->list[new_proto->type] = new_proto;
st_gdata->protos_registered++;
@@ -926,34 +909,27 @@ static void st_tty_flush_buffer(struct tty_struct *tty)
return;
}
+static struct tty_ldisc_ops st_ldisc_ops = {
+ .magic = TTY_LDISC_MAGIC,
+ .name = "n_st",
+ .open = st_tty_open,
+ .close = st_tty_close,
+ .receive_buf = st_tty_receive,
+ .write_wakeup = st_tty_wakeup,
+ .flush_buffer = st_tty_flush_buffer,
+ .owner = THIS_MODULE
+};
+
/********************************************************************/
int st_core_init(struct st_data_s **core_data)
{
struct st_data_s *st_gdata;
long err;
- static struct tty_ldisc_ops *st_ldisc_ops;
- /* populate and register to TTY line discipline */
- st_ldisc_ops = kzalloc(sizeof(*st_ldisc_ops), GFP_KERNEL);
- if (!st_ldisc_ops) {
- pr_err("no mem to allocate");
- return -ENOMEM;
- }
-
- st_ldisc_ops->magic = TTY_LDISC_MAGIC;
- st_ldisc_ops->name = "n_st"; /*"n_hci"; */
- st_ldisc_ops->open = st_tty_open;
- st_ldisc_ops->close = st_tty_close;
- st_ldisc_ops->receive_buf = st_tty_receive;
- st_ldisc_ops->write_wakeup = st_tty_wakeup;
- st_ldisc_ops->flush_buffer = st_tty_flush_buffer;
- st_ldisc_ops->owner = THIS_MODULE;
-
- err = tty_register_ldisc(N_TI_WL, st_ldisc_ops);
+ err = tty_register_ldisc(N_TI_WL, &st_ldisc_ops);
if (err) {
pr_err("error registering %d line discipline %ld",
N_TI_WL, err);
- kfree(st_ldisc_ops);
return err;
}
pr_debug("registered n_shared line discipline");
@@ -964,7 +940,6 @@ int st_core_init(struct st_data_s **core_data)
err = tty_unregister_ldisc(N_TI_WL);
if (err)
pr_err("unable to un-register ldisc %ld", err);
- kfree(st_ldisc_ops);
err = -ENOMEM;
return err;
}
@@ -978,22 +953,6 @@ int st_core_init(struct st_data_s **core_data)
/* Locking used in st_int_enqueue() to avoid multiple execution */
spin_lock_init(&st_gdata->lock);
- /* ldisc_ops ref to be only used in __exit of module */
- st_gdata->ldisc_ops = st_ldisc_ops;
-
-#if 0
- err = st_kim_init();
- if (err) {
- pr_err("error during kim initialization(%ld)", err);
- kfree(st_gdata);
- err = tty_unregister_ldisc(N_TI_WL);
- if (err)
- pr_err("unable to un-register ldisc");
- kfree(st_ldisc_ops);
- return -1;
- }
-#endif
-
err = st_ll_init(st_gdata);
if (err) {
pr_err("error during st_ll initialization(%ld)", err);
@@ -1001,7 +960,6 @@ int st_core_init(struct st_data_s **core_data)
err = tty_unregister_ldisc(N_TI_WL);
if (err)
pr_err("unable to un-register ldisc");
- kfree(st_ldisc_ops);
return -1;
}
*core_data = st_gdata;
@@ -1015,11 +973,7 @@ void st_core_exit(struct st_data_s *st_gdata)
err = st_ll_deinit(st_gdata);
if (err)
pr_err("error during deinit of ST LL %ld", err);
-#if 0
- err = st_kim_deinit();
- if (err)
- pr_err("error during deinit of ST KIM %ld", err);
-#endif
+
if (st_gdata != NULL) {
/* Free ST Tx Qs and skbs */
skb_queue_purge(&st_gdata->txq);
@@ -1030,7 +984,6 @@ void st_core_exit(struct st_data_s *st_gdata)
err = tty_unregister_ldisc(N_TI_WL);
if (err)
pr_err("unable to un-register ldisc %ld", err);
- kfree(st_gdata->ldisc_ops);
/* free the global data pointer */
kfree(st_gdata);
}
diff --git a/drivers/staging/ti-st/st_kim.c b/drivers/misc/ti-st/st_kim.c
index 9e99463f76e8..73b6c8b0e869 100644
--- a/drivers/staging/ti-st/st_kim.c
+++ b/drivers/misc/ti-st/st_kim.c
@@ -2,7 +2,8 @@
* Shared Transport Line discipline driver Core
* Init Manager module responsible for GPIO control
* and firmware download
- * Copyright (C) 2009 Texas Instruments
+ * Copyright (C) 2009-2010 Texas Instruments
+ * Author: Pavan Savoy <pavan_savoy@ti.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -28,15 +29,16 @@
#include <linux/gpio.h>
#include <linux/debugfs.h>
#include <linux/seq_file.h>
-
#include <linux/sched.h>
+#include <linux/rfkill.h>
-#include "st_kim.h"
/* understand BT events for fw response */
#include <net/bluetooth/bluetooth.h>
#include <net/bluetooth/hci_core.h>
#include <net/bluetooth/hci.h>
+#include <linux/ti_wilink_st.h>
+
static int kim_probe(struct platform_device *pdev);
static int kim_remove(struct platform_device *pdev);
@@ -73,7 +75,7 @@ const unsigned char *protocol_names[] = {
};
#define MAX_ST_DEVICES 3 /* Imagine 1 on each UART for now */
-struct platform_device *st_kim_devices[MAX_ST_DEVICES];
+static struct platform_device *st_kim_devices[MAX_ST_DEVICES];
/**********************************************************************/
/* internal functions */
@@ -155,17 +157,18 @@ static inline int kim_check_data_len(struct kim_data_s *kim_gdata, int len)
void kim_int_recv(struct kim_data_s *kim_gdata,
const unsigned char *data, long count)
{
- register char *ptr;
+ const unsigned char *ptr;
struct hci_event_hdr *eh;
- register int len = 0, type = 0;
+ int len = 0, type = 0;
pr_debug("%s", __func__);
/* Decode received bytes here */
- ptr = (char *)data;
+ ptr = data;
if (unlikely(ptr == NULL)) {
pr_err(" received null from TTY ");
return;
}
+
while (count) {
if (kim_gdata->rx_count) {
len = min_t(unsigned int, kim_gdata->rx_count, count);
@@ -229,7 +232,7 @@ void kim_int_recv(struct kim_data_s *kim_gdata,
static long read_local_version(struct kim_data_s *kim_gdata, char *bts_scr_name)
{
unsigned short version = 0, chip = 0, min_ver = 0, maj_ver = 0;
- char read_ver_cmd[] = { 0x01, 0x01, 0x10, 0x00 };
+ const char read_ver_cmd[] = { 0x01, 0x01, 0x10, 0x00 };
pr_debug("%s", __func__);
@@ -276,8 +279,8 @@ static long download_firmware(struct kim_data_s *kim_gdata)
{
long err = 0;
long len = 0;
- register unsigned char *ptr = NULL;
- register unsigned char *action_ptr = NULL;
+ unsigned char *ptr = NULL;
+ unsigned char *action_ptr = NULL;
unsigned char bts_scr_name[30] = { 0 }; /* 30 char long bts scr name? */
err = read_local_version(kim_gdata, bts_scr_name);
@@ -638,7 +641,14 @@ static int kim_probe(struct platform_device *pdev)
long *gpios = pdev->dev.platform_data;
struct kim_data_s *kim_gdata;
- st_kim_devices[pdev->id] = pdev;
+ if ((pdev->id != -1) && (pdev->id < MAX_ST_DEVICES)) {
+ /* multiple devices could exist */
+ st_kim_devices[pdev->id] = pdev;
+ } else {
+ /* platform's sure about existance of 1 device */
+ st_kim_devices[0] = pdev;
+ }
+
kim_gdata = kzalloc(sizeof(struct kim_data_s), GFP_ATOMIC);
if (!kim_gdata) {
pr_err("no mem to allocate");
diff --git a/drivers/staging/ti-st/st_ll.c b/drivers/misc/ti-st/st_ll.c
index 7a1fb6de830d..2bda8dea15b0 100644
--- a/drivers/staging/ti-st/st_ll.c
+++ b/drivers/misc/ti-st/st_ll.c
@@ -1,7 +1,8 @@
/*
* Shared Transport driver
* HCI-LL module responsible for TI proprietary HCI_LL protocol
- * Copyright (C) 2009 Texas Instruments
+ * Copyright (C) 2009-2010 Texas Instruments
+ * Author: Pavan Savoy <pavan_savoy@ti.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -19,7 +20,9 @@
*/
#define pr_fmt(fmt) "(stll) :" fmt
-#include "st_ll.h"
+#include <linux/skbuff.h>
+#include <linux/module.h>
+#include <linux/ti_wilink_st.h>
/**********************************************************************/
/* internal functions */
diff --git a/drivers/mmc/Makefile b/drivers/mmc/Makefile
index 9979f5e9765b..12eef393e216 100644
--- a/drivers/mmc/Makefile
+++ b/drivers/mmc/Makefile
@@ -2,9 +2,7 @@
# Makefile for the kernel mmc device drivers.
#
-ifeq ($(CONFIG_MMC_DEBUG),y)
- EXTRA_CFLAGS += -DDEBUG
-endif
+subdir-ccflags-$(CONFIG_MMC_DEBUG) := -DDEBUG
obj-$(CONFIG_MMC) += core/
obj-$(CONFIG_MMC) += card/
diff --git a/drivers/mmc/card/Kconfig b/drivers/mmc/card/Kconfig
index 3f2a912659af..57e4416b9ef0 100644
--- a/drivers/mmc/card/Kconfig
+++ b/drivers/mmc/card/Kconfig
@@ -14,6 +14,23 @@ config MMC_BLOCK
mount the filesystem. Almost everyone wishing MMC support
should say Y or M here.
+config MMC_BLOCK_MINORS
+ int "Number of minors per block device"
+ range 4 256
+ default 8
+ help
+ Number of minors per block device. One is needed for every
+ partition on the disk (plus one for the whole disk).
+
+ Number of total MMC minors available is 256, so your number
+ of supported block devices will be limited to 256 divided
+ by this number.
+
+ Default is 8 to be backwards compatible with previous
+ hardwired device numbering.
+
+ If unsure, say 8 here.
+
config MMC_BLOCK_BOUNCE
bool "Use bounce buffer for simple hosts"
depends on MMC_BLOCK
diff --git a/drivers/mmc/card/Makefile b/drivers/mmc/card/Makefile
index 0d407514f67d..c73b406a06cd 100644
--- a/drivers/mmc/card/Makefile
+++ b/drivers/mmc/card/Makefile
@@ -2,10 +2,6 @@
# Makefile for MMC/SD card drivers
#
-ifeq ($(CONFIG_MMC_DEBUG),y)
- EXTRA_CFLAGS += -DDEBUG
-endif
-
obj-$(CONFIG_MMC_BLOCK) += mmc_block.o
mmc_block-objs := block.o queue.o
obj-$(CONFIG_MMC_TEST) += mmc_test.o
diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c
index 00073b7c0368..217f82037fc1 100644
--- a/drivers/mmc/card/block.c
+++ b/drivers/mmc/card/block.c
@@ -43,15 +43,27 @@
#include "queue.h"
MODULE_ALIAS("mmc:block");
+#ifdef MODULE_PARAM_PREFIX
+#undef MODULE_PARAM_PREFIX
+#endif
+#define MODULE_PARAM_PREFIX "mmcblk."
+
+static DEFINE_MUTEX(block_mutex);
/*
- * max 8 partitions per card
+ * The defaults come from config options but can be overriden by module
+ * or bootarg options.
*/
-#define MMC_SHIFT 3
-#define MMC_NUM_MINORS (256 >> MMC_SHIFT)
+static int perdev_minors = CONFIG_MMC_BLOCK_MINORS;
-static DEFINE_MUTEX(block_mutex);
-static DECLARE_BITMAP(dev_use, MMC_NUM_MINORS);
+/*
+ * We've only got one major, so number of mmcblk devices is
+ * limited to 256 / number of minors per device.
+ */
+static int max_devices;
+
+/* 256 minors, so at most 256 separate devices */
+static DECLARE_BITMAP(dev_use, 256);
/*
* There is one mmc_blk_data per slot.
@@ -67,6 +79,9 @@ struct mmc_blk_data {
static DEFINE_MUTEX(open_lock);
+module_param(perdev_minors, int, 0444);
+MODULE_PARM_DESC(perdev_minors, "Minors numbers to allocate per device");
+
static struct mmc_blk_data *mmc_blk_get(struct gendisk *disk)
{
struct mmc_blk_data *md;
@@ -88,10 +103,10 @@ static void mmc_blk_put(struct mmc_blk_data *md)
md->usage--;
if (md->usage == 0) {
int devmaj = MAJOR(disk_devt(md->disk));
- int devidx = MINOR(disk_devt(md->disk)) >> MMC_SHIFT;
+ int devidx = MINOR(disk_devt(md->disk)) / perdev_minors;
if (!devmaj)
- devidx = md->disk->first_minor >> MMC_SHIFT;
+ devidx = md->disk->first_minor / perdev_minors;
blk_cleanup_queue(md->queue.queue);
@@ -373,7 +388,6 @@ static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *req)
readcmd = MMC_READ_SINGLE_BLOCK;
writecmd = MMC_WRITE_BLOCK;
}
-
if (rq_data_dir(req) == READ) {
brq.cmd.opcode = readcmd;
brq.data.flags |= MMC_DATA_READ;
@@ -567,8 +581,8 @@ static struct mmc_blk_data *mmc_blk_alloc(struct mmc_card *card)
struct mmc_blk_data *md;
int devidx, ret;
- devidx = find_first_zero_bit(dev_use, MMC_NUM_MINORS);
- if (devidx >= MMC_NUM_MINORS)
+ devidx = find_first_zero_bit(dev_use, max_devices);
+ if (devidx >= max_devices)
return ERR_PTR(-ENOSPC);
__set_bit(devidx, dev_use);
@@ -585,7 +599,7 @@ static struct mmc_blk_data *mmc_blk_alloc(struct mmc_card *card)
*/
md->read_only = mmc_blk_readonly(card);
- md->disk = alloc_disk(1 << MMC_SHIFT);
+ md->disk = alloc_disk(perdev_minors);
if (md->disk == NULL) {
ret = -ENOMEM;
goto err_kfree;
@@ -602,7 +616,7 @@ static struct mmc_blk_data *mmc_blk_alloc(struct mmc_card *card)
md->queue.data = md;
md->disk->major = MMC_BLOCK_MAJOR;
- md->disk->first_minor = devidx << MMC_SHIFT;
+ md->disk->first_minor = devidx * perdev_minors;
md->disk->fops = &mmc_bdops;
md->disk->private_data = md;
md->disk->queue = md->queue.queue;
@@ -620,7 +634,8 @@ static struct mmc_blk_data *mmc_blk_alloc(struct mmc_card *card)
* messages to tell when the card is present.
*/
- sprintf(md->disk->disk_name, "mmcblk%d", devidx);
+ snprintf(md->disk->disk_name, sizeof(md->disk->disk_name),
+ "mmcblk%d", devidx);
blk_queue_logical_block_size(md->queue.queue, 512);
@@ -651,23 +666,15 @@ static struct mmc_blk_data *mmc_blk_alloc(struct mmc_card *card)
static int
mmc_blk_set_blksize(struct mmc_blk_data *md, struct mmc_card *card)
{
- struct mmc_command cmd;
int err;
- /* Block-addressed cards ignore MMC_SET_BLOCKLEN. */
- if (mmc_card_blockaddr(card))
- return 0;
-
mmc_claim_host(card->host);
- cmd.opcode = MMC_SET_BLOCKLEN;
- cmd.arg = 512;
- cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;
- err = mmc_wait_for_cmd(card->host, &cmd, 5);
+ err = mmc_set_blocklen(card, 512);
mmc_release_host(card->host);
if (err) {
- printk(KERN_ERR "%s: unable to set block size to %d: %d\n",
- md->disk->disk_name, cmd.arg, err);
+ printk(KERN_ERR "%s: unable to set block size to 512: %d\n",
+ md->disk->disk_name, err);
return -EINVAL;
}
@@ -678,7 +685,6 @@ static int mmc_blk_probe(struct mmc_card *card)
{
struct mmc_blk_data *md;
int err;
-
char cap_str[10];
/*
@@ -768,6 +774,11 @@ static int __init mmc_blk_init(void)
{
int res;
+ if (perdev_minors != CONFIG_MMC_BLOCK_MINORS)
+ pr_info("mmcblk: using %d minors per device\n", perdev_minors);
+
+ max_devices = 256 / perdev_minors;
+
res = register_blkdev(MMC_BLOCK_MAJOR, "mmc");
if (res)
goto out;
diff --git a/drivers/mmc/card/mmc_test.c b/drivers/mmc/card/mmc_test.c
index 5dd8576b5c18..21adc27f4132 100644
--- a/drivers/mmc/card/mmc_test.c
+++ b/drivers/mmc/card/mmc_test.c
@@ -17,6 +17,11 @@
#include <linux/scatterlist.h>
#include <linux/swap.h> /* For nr_free_buffer_pages() */
+#include <linux/list.h>
+
+#include <linux/debugfs.h>
+#include <linux/uaccess.h>
+#include <linux/seq_file.h>
#define RESULT_OK 0
#define RESULT_FAIL 1
@@ -56,7 +61,9 @@ struct mmc_test_mem {
* struct mmc_test_area - information for performance tests.
* @max_sz: test area size (in bytes)
* @dev_addr: address on card at which to do performance tests
- * @max_segs: maximum segments in scatterlist @sg
+ * @max_tfr: maximum transfer size allowed by driver (in bytes)
+ * @max_segs: maximum segments allowed by driver in scatterlist @sg
+ * @max_seg_sz: maximum segment size allowed by driver
* @blocks: number of (512 byte) blocks currently mapped by @sg
* @sg_len: length of currently mapped scatterlist @sg
* @mem: allocated memory
@@ -65,7 +72,9 @@ struct mmc_test_mem {
struct mmc_test_area {
unsigned long max_sz;
unsigned int dev_addr;
+ unsigned int max_tfr;
unsigned int max_segs;
+ unsigned int max_seg_sz;
unsigned int blocks;
unsigned int sg_len;
struct mmc_test_mem *mem;
@@ -73,12 +82,57 @@ struct mmc_test_area {
};
/**
+ * struct mmc_test_transfer_result - transfer results for performance tests.
+ * @link: double-linked list
+ * @count: amount of group of sectors to check
+ * @sectors: amount of sectors to check in one group
+ * @ts: time values of transfer
+ * @rate: calculated transfer rate
+ */
+struct mmc_test_transfer_result {
+ struct list_head link;
+ unsigned int count;
+ unsigned int sectors;
+ struct timespec ts;
+ unsigned int rate;
+};
+
+/**
+ * struct mmc_test_general_result - results for tests.
+ * @link: double-linked list
+ * @card: card under test
+ * @testcase: number of test case
+ * @result: result of test run
+ * @tr_lst: transfer measurements if any as mmc_test_transfer_result
+ */
+struct mmc_test_general_result {
+ struct list_head link;
+ struct mmc_card *card;
+ int testcase;
+ int result;
+ struct list_head tr_lst;
+};
+
+/**
+ * struct mmc_test_dbgfs_file - debugfs related file.
+ * @link: double-linked list
+ * @card: card under test
+ * @file: file created under debugfs
+ */
+struct mmc_test_dbgfs_file {
+ struct list_head link;
+ struct mmc_card *card;
+ struct dentry *file;
+};
+
+/**
* struct mmc_test_card - test information.
* @card: card under test
* @scratch: transfer buffer
* @buffer: transfer buffer
* @highmem: buffer for highmem tests
* @area: information for performance tests
+ * @gr: pointer to results of current testcase
*/
struct mmc_test_card {
struct mmc_card *card;
@@ -88,7 +142,8 @@ struct mmc_test_card {
#ifdef CONFIG_HIGHMEM
struct page *highmem;
#endif
- struct mmc_test_area area;
+ struct mmc_test_area area;
+ struct mmc_test_general_result *gr;
};
/*******************************************************************/
@@ -100,17 +155,7 @@ struct mmc_test_card {
*/
static int mmc_test_set_blksize(struct mmc_test_card *test, unsigned size)
{
- struct mmc_command cmd;
- int ret;
-
- cmd.opcode = MMC_SET_BLOCKLEN;
- cmd.arg = size;
- cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
- ret = mmc_wait_for_cmd(test->card->host, &cmd, 0);
- if (ret)
- return ret;
-
- return 0;
+ return mmc_set_blocklen(test->card, size);
}
/*
@@ -245,27 +290,38 @@ static void mmc_test_free_mem(struct mmc_test_mem *mem)
/*
* Allocate a lot of memory, preferrably max_sz but at least min_sz. In case
- * there isn't much memory do not exceed 1/16th total lowmem pages.
+ * there isn't much memory do not exceed 1/16th total lowmem pages. Also do
+ * not exceed a maximum number of segments and try not to make segments much
+ * bigger than maximum segment size.
*/
static struct mmc_test_mem *mmc_test_alloc_mem(unsigned long min_sz,
- unsigned long max_sz)
+ unsigned long max_sz,
+ unsigned int max_segs,
+ unsigned int max_seg_sz)
{
unsigned long max_page_cnt = DIV_ROUND_UP(max_sz, PAGE_SIZE);
unsigned long min_page_cnt = DIV_ROUND_UP(min_sz, PAGE_SIZE);
+ unsigned long max_seg_page_cnt = DIV_ROUND_UP(max_seg_sz, PAGE_SIZE);
unsigned long page_cnt = 0;
unsigned long limit = nr_free_buffer_pages() >> 4;
struct mmc_test_mem *mem;
if (max_page_cnt > limit)
max_page_cnt = limit;
- if (max_page_cnt < min_page_cnt)
- max_page_cnt = min_page_cnt;
+ if (min_page_cnt > max_page_cnt)
+ min_page_cnt = max_page_cnt;
+
+ if (max_seg_page_cnt > max_page_cnt)
+ max_seg_page_cnt = max_page_cnt;
+
+ if (max_segs > max_page_cnt)
+ max_segs = max_page_cnt;
mem = kzalloc(sizeof(struct mmc_test_mem), GFP_KERNEL);
if (!mem)
return NULL;
- mem->arr = kzalloc(sizeof(struct mmc_test_pages) * max_page_cnt,
+ mem->arr = kzalloc(sizeof(struct mmc_test_pages) * max_segs,
GFP_KERNEL);
if (!mem->arr)
goto out_free;
@@ -276,7 +332,7 @@ static struct mmc_test_mem *mmc_test_alloc_mem(unsigned long min_sz,
gfp_t flags = GFP_KERNEL | GFP_DMA | __GFP_NOWARN |
__GFP_NORETRY;
- order = get_order(max_page_cnt << PAGE_SHIFT);
+ order = get_order(max_seg_page_cnt << PAGE_SHIFT);
while (1) {
page = alloc_pages(flags, order);
if (page || !order)
@@ -295,6 +351,11 @@ static struct mmc_test_mem *mmc_test_alloc_mem(unsigned long min_sz,
break;
max_page_cnt -= 1UL << order;
page_cnt += 1UL << order;
+ if (mem->cnt >= max_segs) {
+ if (page_cnt < min_page_cnt)
+ goto out_free;
+ break;
+ }
}
return mem;
@@ -310,7 +371,8 @@ out_free:
*/
static int mmc_test_map_sg(struct mmc_test_mem *mem, unsigned long sz,
struct scatterlist *sglist, int repeat,
- unsigned int max_segs, unsigned int *sg_len)
+ unsigned int max_segs, unsigned int max_seg_sz,
+ unsigned int *sg_len)
{
struct scatterlist *sg = NULL;
unsigned int i;
@@ -322,8 +384,10 @@ static int mmc_test_map_sg(struct mmc_test_mem *mem, unsigned long sz,
for (i = 0; i < mem->cnt; i++) {
unsigned long len = PAGE_SIZE << mem->arr[i].order;
- if (sz < len)
+ if (len > sz)
len = sz;
+ if (len > max_seg_sz)
+ len = max_seg_sz;
if (sg)
sg = sg_next(sg);
else
@@ -355,6 +419,7 @@ static int mmc_test_map_sg_max_scatter(struct mmc_test_mem *mem,
unsigned long sz,
struct scatterlist *sglist,
unsigned int max_segs,
+ unsigned int max_seg_sz,
unsigned int *sg_len)
{
struct scatterlist *sg = NULL;
@@ -365,7 +430,7 @@ static int mmc_test_map_sg_max_scatter(struct mmc_test_mem *mem,
sg_init_table(sglist, max_segs);
*sg_len = 0;
- while (sz && i) {
+ while (sz) {
base = page_address(mem->arr[--i].page);
cnt = 1 << mem->arr[i].order;
while (sz && cnt) {
@@ -374,7 +439,9 @@ static int mmc_test_map_sg_max_scatter(struct mmc_test_mem *mem,
continue;
last_addr = addr;
len = PAGE_SIZE;
- if (sz < len)
+ if (len > max_seg_sz)
+ len = max_seg_sz;
+ if (len > sz)
len = sz;
if (sg)
sg = sg_next(sg);
@@ -386,6 +453,8 @@ static int mmc_test_map_sg_max_scatter(struct mmc_test_mem *mem,
sz -= len;
*sg_len += 1;
}
+ if (i == 0)
+ i = mem->cnt;
}
if (sg)
@@ -421,6 +490,30 @@ static unsigned int mmc_test_rate(uint64_t bytes, struct timespec *ts)
}
/*
+ * Save transfer results for future usage
+ */
+static void mmc_test_save_transfer_result(struct mmc_test_card *test,
+ unsigned int count, unsigned int sectors, struct timespec ts,
+ unsigned int rate)
+{
+ struct mmc_test_transfer_result *tr;
+
+ if (!test->gr)
+ return;
+
+ tr = kmalloc(sizeof(struct mmc_test_transfer_result), GFP_KERNEL);
+ if (!tr)
+ return;
+
+ tr->count = count;
+ tr->sectors = sectors;
+ tr->ts = ts;
+ tr->rate = rate;
+
+ list_add_tail(&tr->link, &test->gr->tr_lst);
+}
+
+/*
* Print the transfer rate.
*/
static void mmc_test_print_rate(struct mmc_test_card *test, uint64_t bytes,
@@ -436,8 +529,10 @@ static void mmc_test_print_rate(struct mmc_test_card *test, uint64_t bytes,
printk(KERN_INFO "%s: Transfer of %u sectors (%u%s KiB) took %lu.%09lu "
"seconds (%u kB/s, %u KiB/s)\n",
mmc_hostname(test->card->host), sectors, sectors >> 1,
- (sectors == 1 ? ".5" : ""), (unsigned long)ts.tv_sec,
+ (sectors & 1 ? ".5" : ""), (unsigned long)ts.tv_sec,
(unsigned long)ts.tv_nsec, rate / 1000, rate / 1024);
+
+ mmc_test_save_transfer_result(test, 1, sectors, ts, rate);
}
/*
@@ -458,9 +553,11 @@ static void mmc_test_print_avg_rate(struct mmc_test_card *test, uint64_t bytes,
printk(KERN_INFO "%s: Transfer of %u x %u sectors (%u x %u%s KiB) took "
"%lu.%09lu seconds (%u kB/s, %u KiB/s)\n",
mmc_hostname(test->card->host), count, sectors, count,
- sectors >> 1, (sectors == 1 ? ".5" : ""),
+ sectors >> 1, (sectors & 1 ? ".5" : ""),
(unsigned long)ts.tv_sec, (unsigned long)ts.tv_nsec,
rate / 1000, rate / 1024);
+
+ mmc_test_save_transfer_result(test, count, sectors, ts, rate);
}
/*
@@ -1215,16 +1312,22 @@ static int mmc_test_area_map(struct mmc_test_card *test, unsigned long sz,
int max_scatter)
{
struct mmc_test_area *t = &test->area;
+ int err;
t->blocks = sz >> 9;
if (max_scatter) {
- return mmc_test_map_sg_max_scatter(t->mem, sz, t->sg,
- t->max_segs, &t->sg_len);
- } else {
- return mmc_test_map_sg(t->mem, sz, t->sg, 1, t->max_segs,
+ err = mmc_test_map_sg_max_scatter(t->mem, sz, t->sg,
+ t->max_segs, t->max_seg_sz,
&t->sg_len);
+ } else {
+ err = mmc_test_map_sg(t->mem, sz, t->sg, 1, t->max_segs,
+ t->max_seg_sz, &t->sg_len);
}
+ if (err)
+ printk(KERN_INFO "%s: Failed to map sg list\n",
+ mmc_hostname(test->card->host));
+ return err;
}
/*
@@ -1249,6 +1352,22 @@ static int mmc_test_area_io(struct mmc_test_card *test, unsigned long sz,
struct timespec ts1, ts2;
int ret;
+ /*
+ * In the case of a maximally scattered transfer, the maximum transfer
+ * size is further limited by using PAGE_SIZE segments.
+ */
+ if (max_scatter) {
+ struct mmc_test_area *t = &test->area;
+ unsigned long max_tfr;
+
+ if (t->max_seg_sz >= PAGE_SIZE)
+ max_tfr = t->max_segs * PAGE_SIZE;
+ else
+ max_tfr = t->max_segs * t->max_seg_sz;
+ if (sz > max_tfr)
+ sz = max_tfr;
+ }
+
ret = mmc_test_area_map(test, sz, max_scatter);
if (ret)
return ret;
@@ -1274,7 +1393,7 @@ static int mmc_test_area_io(struct mmc_test_card *test, unsigned long sz,
*/
static int mmc_test_area_fill(struct mmc_test_card *test)
{
- return mmc_test_area_io(test, test->area.max_sz, test->area.dev_addr,
+ return mmc_test_area_io(test, test->area.max_tfr, test->area.dev_addr,
1, 0, 0);
}
@@ -1328,16 +1447,29 @@ static int mmc_test_area_init(struct mmc_test_card *test, int erase, int fill)
t->max_sz = TEST_AREA_MAX_SIZE;
else
t->max_sz = (unsigned long)test->card->pref_erase << 9;
+
+ t->max_segs = test->card->host->max_segs;
+ t->max_seg_sz = test->card->host->max_seg_size;
+
+ t->max_tfr = t->max_sz;
+ if (t->max_tfr >> 9 > test->card->host->max_blk_count)
+ t->max_tfr = test->card->host->max_blk_count << 9;
+ if (t->max_tfr > test->card->host->max_req_size)
+ t->max_tfr = test->card->host->max_req_size;
+ if (t->max_tfr / t->max_seg_sz > t->max_segs)
+ t->max_tfr = t->max_segs * t->max_seg_sz;
+
/*
- * Try to allocate enough memory for the whole area. Less is OK
+ * Try to allocate enough memory for a max. sized transfer. Less is OK
* because the same memory can be mapped into the scatterlist more than
- * once.
+ * once. Also, take into account the limits imposed on scatterlist
+ * segments by the host driver.
*/
- t->mem = mmc_test_alloc_mem(min_sz, t->max_sz);
+ t->mem = mmc_test_alloc_mem(min_sz, t->max_tfr, t->max_segs,
+ t->max_seg_sz);
if (!t->mem)
return -ENOMEM;
- t->max_segs = DIV_ROUND_UP(t->max_sz, PAGE_SIZE);
t->sg = kmalloc(sizeof(struct scatterlist) * t->max_segs, GFP_KERNEL);
if (!t->sg) {
ret = -ENOMEM;
@@ -1401,7 +1533,7 @@ static int mmc_test_area_prepare_fill(struct mmc_test_card *test)
static int mmc_test_best_performance(struct mmc_test_card *test, int write,
int max_scatter)
{
- return mmc_test_area_io(test, test->area.max_sz, test->area.dev_addr,
+ return mmc_test_area_io(test, test->area.max_tfr, test->area.dev_addr,
write, max_scatter, 1);
}
@@ -1446,12 +1578,13 @@ static int mmc_test_profile_read_perf(struct mmc_test_card *test)
unsigned int dev_addr;
int ret;
- for (sz = 512; sz < test->area.max_sz; sz <<= 1) {
+ for (sz = 512; sz < test->area.max_tfr; sz <<= 1) {
dev_addr = test->area.dev_addr + (sz >> 9);
ret = mmc_test_area_io(test, sz, dev_addr, 0, 0, 1);
if (ret)
return ret;
}
+ sz = test->area.max_tfr;
dev_addr = test->area.dev_addr;
return mmc_test_area_io(test, sz, dev_addr, 0, 0, 1);
}
@@ -1468,7 +1601,7 @@ static int mmc_test_profile_write_perf(struct mmc_test_card *test)
ret = mmc_test_area_erase(test);
if (ret)
return ret;
- for (sz = 512; sz < test->area.max_sz; sz <<= 1) {
+ for (sz = 512; sz < test->area.max_tfr; sz <<= 1) {
dev_addr = test->area.dev_addr + (sz >> 9);
ret = mmc_test_area_io(test, sz, dev_addr, 1, 0, 1);
if (ret)
@@ -1477,6 +1610,7 @@ static int mmc_test_profile_write_perf(struct mmc_test_card *test)
ret = mmc_test_area_erase(test);
if (ret)
return ret;
+ sz = test->area.max_tfr;
dev_addr = test->area.dev_addr;
return mmc_test_area_io(test, sz, dev_addr, 1, 0, 1);
}
@@ -1516,29 +1650,63 @@ static int mmc_test_profile_trim_perf(struct mmc_test_card *test)
return 0;
}
+static int mmc_test_seq_read_perf(struct mmc_test_card *test, unsigned long sz)
+{
+ unsigned int dev_addr, i, cnt;
+ struct timespec ts1, ts2;
+ int ret;
+
+ cnt = test->area.max_sz / sz;
+ dev_addr = test->area.dev_addr;
+ getnstimeofday(&ts1);
+ for (i = 0; i < cnt; i++) {
+ ret = mmc_test_area_io(test, sz, dev_addr, 0, 0, 0);
+ if (ret)
+ return ret;
+ dev_addr += (sz >> 9);
+ }
+ getnstimeofday(&ts2);
+ mmc_test_print_avg_rate(test, sz, cnt, &ts1, &ts2);
+ return 0;
+}
+
/*
* Consecutive read performance by transfer size.
*/
static int mmc_test_profile_seq_read_perf(struct mmc_test_card *test)
{
unsigned long sz;
+ int ret;
+
+ for (sz = 512; sz < test->area.max_tfr; sz <<= 1) {
+ ret = mmc_test_seq_read_perf(test, sz);
+ if (ret)
+ return ret;
+ }
+ sz = test->area.max_tfr;
+ return mmc_test_seq_read_perf(test, sz);
+}
+
+static int mmc_test_seq_write_perf(struct mmc_test_card *test, unsigned long sz)
+{
unsigned int dev_addr, i, cnt;
struct timespec ts1, ts2;
int ret;
- for (sz = 512; sz <= test->area.max_sz; sz <<= 1) {
- cnt = test->area.max_sz / sz;
- dev_addr = test->area.dev_addr;
- getnstimeofday(&ts1);
- for (i = 0; i < cnt; i++) {
- ret = mmc_test_area_io(test, sz, dev_addr, 0, 0, 0);
- if (ret)
- return ret;
- dev_addr += (sz >> 9);
- }
- getnstimeofday(&ts2);
- mmc_test_print_avg_rate(test, sz, cnt, &ts1, &ts2);
+ ret = mmc_test_area_erase(test);
+ if (ret)
+ return ret;
+ cnt = test->area.max_sz / sz;
+ dev_addr = test->area.dev_addr;
+ getnstimeofday(&ts1);
+ for (i = 0; i < cnt; i++) {
+ ret = mmc_test_area_io(test, sz, dev_addr, 1, 0, 0);
+ if (ret)
+ return ret;
+ dev_addr += (sz >> 9);
}
+ getnstimeofday(&ts2);
+ mmc_test_print_avg_rate(test, sz, cnt, &ts1, &ts2);
return 0;
}
@@ -1548,27 +1716,15 @@ static int mmc_test_profile_seq_read_perf(struct mmc_test_card *test)
static int mmc_test_profile_seq_write_perf(struct mmc_test_card *test)
{
unsigned long sz;
- unsigned int dev_addr, i, cnt;
- struct timespec ts1, ts2;
int ret;
- for (sz = 512; sz <= test->area.max_sz; sz <<= 1) {
- ret = mmc_test_area_erase(test);
+ for (sz = 512; sz < test->area.max_tfr; sz <<= 1) {
+ ret = mmc_test_seq_write_perf(test, sz);
if (ret)
return ret;
- cnt = test->area.max_sz / sz;
- dev_addr = test->area.dev_addr;
- getnstimeofday(&ts1);
- for (i = 0; i < cnt; i++) {
- ret = mmc_test_area_io(test, sz, dev_addr, 1, 0, 0);
- if (ret)
- return ret;
- dev_addr += (sz >> 9);
- }
- getnstimeofday(&ts2);
- mmc_test_print_avg_rate(test, sz, cnt, &ts1, &ts2);
}
- return 0;
+ sz = test->area.max_tfr;
+ return mmc_test_seq_write_perf(test, sz);
}
/*
@@ -1853,6 +2009,8 @@ static const struct mmc_test_case mmc_test_cases[] = {
static DEFINE_MUTEX(mmc_test_lock);
+static LIST_HEAD(mmc_test_result);
+
static void mmc_test_run(struct mmc_test_card *test, int testcase)
{
int i, ret;
@@ -1863,6 +2021,8 @@ static void mmc_test_run(struct mmc_test_card *test, int testcase)
mmc_claim_host(test->card->host);
for (i = 0;i < ARRAY_SIZE(mmc_test_cases);i++) {
+ struct mmc_test_general_result *gr;
+
if (testcase && ((i + 1) != testcase))
continue;
@@ -1881,6 +2041,25 @@ static void mmc_test_run(struct mmc_test_card *test, int testcase)
}
}
+ gr = kzalloc(sizeof(struct mmc_test_general_result),
+ GFP_KERNEL);
+ if (gr) {
+ INIT_LIST_HEAD(&gr->tr_lst);
+
+ /* Assign data what we know already */
+ gr->card = test->card;
+ gr->testcase = i;
+
+ /* Append container to global one */
+ list_add_tail(&gr->link, &mmc_test_result);
+
+ /*
+ * Save the pointer to created container in our private
+ * structure.
+ */
+ test->gr = gr;
+ }
+
ret = mmc_test_cases[i].run(test);
switch (ret) {
case RESULT_OK:
@@ -1906,6 +2085,10 @@ static void mmc_test_run(struct mmc_test_card *test, int testcase)
mmc_hostname(test->card->host), ret);
}
+ /* Save the result */
+ if (gr)
+ gr->result = ret;
+
if (mmc_test_cases[i].cleanup) {
ret = mmc_test_cases[i].cleanup(test);
if (ret) {
@@ -1923,30 +2106,95 @@ static void mmc_test_run(struct mmc_test_card *test, int testcase)
mmc_hostname(test->card->host));
}
-static ssize_t mmc_test_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+static void mmc_test_free_result(struct mmc_card *card)
{
+ struct mmc_test_general_result *gr, *grs;
+
mutex_lock(&mmc_test_lock);
+
+ list_for_each_entry_safe(gr, grs, &mmc_test_result, link) {
+ struct mmc_test_transfer_result *tr, *trs;
+
+ if (card && gr->card != card)
+ continue;
+
+ list_for_each_entry_safe(tr, trs, &gr->tr_lst, link) {
+ list_del(&tr->link);
+ kfree(tr);
+ }
+
+ list_del(&gr->link);
+ kfree(gr);
+ }
+
+ mutex_unlock(&mmc_test_lock);
+}
+
+static LIST_HEAD(mmc_test_file_test);
+
+static int mtf_test_show(struct seq_file *sf, void *data)
+{
+ struct mmc_card *card = (struct mmc_card *)sf->private;
+ struct mmc_test_general_result *gr;
+
+ mutex_lock(&mmc_test_lock);
+
+ list_for_each_entry(gr, &mmc_test_result, link) {
+ struct mmc_test_transfer_result *tr;
+
+ if (gr->card != card)
+ continue;
+
+ seq_printf(sf, "Test %d: %d\n", gr->testcase + 1, gr->result);
+
+ list_for_each_entry(tr, &gr->tr_lst, link) {
+ seq_printf(sf, "%u %d %lu.%09lu %u\n",
+ tr->count, tr->sectors,
+ (unsigned long)tr->ts.tv_sec,
+ (unsigned long)tr->ts.tv_nsec,
+ tr->rate);
+ }
+ }
+
mutex_unlock(&mmc_test_lock);
return 0;
}
-static ssize_t mmc_test_store(struct device *dev,
- struct device_attribute *attr, const char *buf, size_t count)
+static int mtf_test_open(struct inode *inode, struct file *file)
{
- struct mmc_card *card;
+ return single_open(file, mtf_test_show, inode->i_private);
+}
+
+static ssize_t mtf_test_write(struct file *file, const char __user *buf,
+ size_t count, loff_t *pos)
+{
+ struct seq_file *sf = (struct seq_file *)file->private_data;
+ struct mmc_card *card = (struct mmc_card *)sf->private;
struct mmc_test_card *test;
- int testcase;
+ char lbuf[12];
+ long testcase;
- card = container_of(dev, struct mmc_card, dev);
+ if (count >= sizeof(lbuf))
+ return -EINVAL;
- testcase = simple_strtol(buf, NULL, 10);
+ if (copy_from_user(lbuf, buf, count))
+ return -EFAULT;
+ lbuf[count] = '\0';
+
+ if (strict_strtol(lbuf, 10, &testcase))
+ return -EINVAL;
test = kzalloc(sizeof(struct mmc_test_card), GFP_KERNEL);
if (!test)
return -ENOMEM;
+ /*
+ * Remove all test cases associated with given card. Thus we have only
+ * actual data of the last run.
+ */
+ mmc_test_free_result(card);
+
test->card = card;
test->buffer = kzalloc(BUFFER_SIZE, GFP_KERNEL);
@@ -1973,16 +2221,78 @@ static ssize_t mmc_test_store(struct device *dev,
return count;
}
-static DEVICE_ATTR(test, S_IWUSR | S_IRUGO, mmc_test_show, mmc_test_store);
+static const struct file_operations mmc_test_fops_test = {
+ .open = mtf_test_open,
+ .read = seq_read,
+ .write = mtf_test_write,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static void mmc_test_free_file_test(struct mmc_card *card)
+{
+ struct mmc_test_dbgfs_file *df, *dfs;
+
+ mutex_lock(&mmc_test_lock);
+
+ list_for_each_entry_safe(df, dfs, &mmc_test_file_test, link) {
+ if (card && df->card != card)
+ continue;
+ debugfs_remove(df->file);
+ list_del(&df->link);
+ kfree(df);
+ }
+
+ mutex_unlock(&mmc_test_lock);
+}
+
+static int mmc_test_register_file_test(struct mmc_card *card)
+{
+ struct dentry *file = NULL;
+ struct mmc_test_dbgfs_file *df;
+ int ret = 0;
+
+ mutex_lock(&mmc_test_lock);
+
+ if (card->debugfs_root)
+ file = debugfs_create_file("test", S_IWUSR | S_IRUGO,
+ card->debugfs_root, card, &mmc_test_fops_test);
+
+ if (IS_ERR_OR_NULL(file)) {
+ dev_err(&card->dev,
+ "Can't create file. Perhaps debugfs is disabled.\n");
+ ret = -ENODEV;
+ goto err;
+ }
+
+ df = kmalloc(sizeof(struct mmc_test_dbgfs_file), GFP_KERNEL);
+ if (!df) {
+ debugfs_remove(file);
+ dev_err(&card->dev,
+ "Can't allocate memory for internal usage.\n");
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ df->card = card;
+ df->file = file;
+
+ list_add(&df->link, &mmc_test_file_test);
+
+err:
+ mutex_unlock(&mmc_test_lock);
+
+ return ret;
+}
static int mmc_test_probe(struct mmc_card *card)
{
int ret;
- if ((card->type != MMC_TYPE_MMC) && (card->type != MMC_TYPE_SD))
+ if (!mmc_card_mmc(card) && !mmc_card_sd(card))
return -ENODEV;
- ret = device_create_file(&card->dev, &dev_attr_test);
+ ret = mmc_test_register_file_test(card);
if (ret)
return ret;
@@ -1993,7 +2303,8 @@ static int mmc_test_probe(struct mmc_card *card)
static void mmc_test_remove(struct mmc_card *card)
{
- device_remove_file(&card->dev, &dev_attr_test);
+ mmc_test_free_result(card);
+ mmc_test_free_file_test(card);
}
static struct mmc_driver mmc_driver = {
@@ -2011,6 +2322,10 @@ static int __init mmc_test_init(void)
static void __exit mmc_test_exit(void)
{
+ /* Clear stalled data if card is still plugged */
+ mmc_test_free_result(NULL);
+ mmc_test_free_file_test(NULL);
+
mmc_unregister_driver(&mmc_driver);
}
diff --git a/drivers/mmc/card/queue.c b/drivers/mmc/card/queue.c
index 9c0b42bfe089..4e42d030e097 100644
--- a/drivers/mmc/card/queue.c
+++ b/drivers/mmc/card/queue.c
@@ -146,7 +146,7 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card, spinlock_t *lock
}
#ifdef CONFIG_MMC_BLOCK_BOUNCE
- if (host->max_hw_segs == 1) {
+ if (host->max_segs == 1) {
unsigned int bouncesz;
bouncesz = MMC_QUEUE_BOUNCESZ;
@@ -196,21 +196,23 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card, spinlock_t *lock
blk_queue_bounce_limit(mq->queue, limit);
blk_queue_max_hw_sectors(mq->queue,
min(host->max_blk_count, host->max_req_size / 512));
- blk_queue_max_segments(mq->queue, host->max_hw_segs);
+ blk_queue_max_segments(mq->queue, host->max_segs);
blk_queue_max_segment_size(mq->queue, host->max_seg_size);
mq->sg = kmalloc(sizeof(struct scatterlist) *
- host->max_phys_segs, GFP_KERNEL);
+ host->max_segs, GFP_KERNEL);
if (!mq->sg) {
ret = -ENOMEM;
goto cleanup_queue;
}
- sg_init_table(mq->sg, host->max_phys_segs);
+ sg_init_table(mq->sg, host->max_segs);
}
- init_MUTEX(&mq->thread_sem);
+ sema_init(&mq->thread_sem, 1);
+
+ mq->thread = kthread_run(mmc_queue_thread, mq, "mmcqd/%d",
+ host->index);
- mq->thread = kthread_run(mmc_queue_thread, mq, "mmcqd");
if (IS_ERR(mq->thread)) {
ret = PTR_ERR(mq->thread);
goto free_bounce_sg;
diff --git a/drivers/mmc/core/Makefile b/drivers/mmc/core/Makefile
index 889e5f898f6f..86b479119332 100644
--- a/drivers/mmc/core/Makefile
+++ b/drivers/mmc/core/Makefile
@@ -2,10 +2,6 @@
# Makefile for the kernel mmc core.
#
-ifeq ($(CONFIG_MMC_DEBUG),y)
- EXTRA_CFLAGS += -DDEBUG
-endif
-
obj-$(CONFIG_MMC) += mmc_core.o
mmc_core-y := core.o bus.o host.o \
mmc.o mmc_ops.o sd.o sd_ops.o \
diff --git a/drivers/mmc/core/bus.c b/drivers/mmc/core/bus.c
index 7cd9749dc21d..af8dc6a2a317 100644
--- a/drivers/mmc/core/bus.c
+++ b/drivers/mmc/core/bus.c
@@ -14,6 +14,7 @@
#include <linux/device.h>
#include <linux/err.h>
#include <linux/slab.h>
+#include <linux/pm_runtime.h>
#include <linux/mmc/card.h>
#include <linux/mmc/host.h>
@@ -22,13 +23,12 @@
#include "sdio_cis.h"
#include "bus.h"
-#define dev_to_mmc_card(d) container_of(d, struct mmc_card, dev)
#define to_mmc_driver(d) container_of(d, struct mmc_driver, drv)
static ssize_t mmc_type_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct mmc_card *card = dev_to_mmc_card(dev);
+ struct mmc_card *card = mmc_dev_to_card(dev);
switch (card->type) {
case MMC_TYPE_MMC:
@@ -62,7 +62,7 @@ static int mmc_bus_match(struct device *dev, struct device_driver *drv)
static int
mmc_bus_uevent(struct device *dev, struct kobj_uevent_env *env)
{
- struct mmc_card *card = dev_to_mmc_card(dev);
+ struct mmc_card *card = mmc_dev_to_card(dev);
const char *type;
int retval = 0;
@@ -105,7 +105,7 @@ mmc_bus_uevent(struct device *dev, struct kobj_uevent_env *env)
static int mmc_bus_probe(struct device *dev)
{
struct mmc_driver *drv = to_mmc_driver(dev->driver);
- struct mmc_card *card = dev_to_mmc_card(dev);
+ struct mmc_card *card = mmc_dev_to_card(dev);
return drv->probe(card);
}
@@ -113,7 +113,7 @@ static int mmc_bus_probe(struct device *dev)
static int mmc_bus_remove(struct device *dev)
{
struct mmc_driver *drv = to_mmc_driver(dev->driver);
- struct mmc_card *card = dev_to_mmc_card(dev);
+ struct mmc_card *card = mmc_dev_to_card(dev);
drv->remove(card);
@@ -123,7 +123,7 @@ static int mmc_bus_remove(struct device *dev)
static int mmc_bus_suspend(struct device *dev, pm_message_t state)
{
struct mmc_driver *drv = to_mmc_driver(dev->driver);
- struct mmc_card *card = dev_to_mmc_card(dev);
+ struct mmc_card *card = mmc_dev_to_card(dev);
int ret = 0;
if (dev->driver && drv->suspend)
@@ -134,7 +134,7 @@ static int mmc_bus_suspend(struct device *dev, pm_message_t state)
static int mmc_bus_resume(struct device *dev)
{
struct mmc_driver *drv = to_mmc_driver(dev->driver);
- struct mmc_card *card = dev_to_mmc_card(dev);
+ struct mmc_card *card = mmc_dev_to_card(dev);
int ret = 0;
if (dev->driver && drv->resume)
@@ -142,6 +142,41 @@ static int mmc_bus_resume(struct device *dev)
return ret;
}
+#ifdef CONFIG_PM_RUNTIME
+
+static int mmc_runtime_suspend(struct device *dev)
+{
+ struct mmc_card *card = mmc_dev_to_card(dev);
+
+ return mmc_power_save_host(card->host);
+}
+
+static int mmc_runtime_resume(struct device *dev)
+{
+ struct mmc_card *card = mmc_dev_to_card(dev);
+
+ return mmc_power_restore_host(card->host);
+}
+
+static int mmc_runtime_idle(struct device *dev)
+{
+ return pm_runtime_suspend(dev);
+}
+
+static const struct dev_pm_ops mmc_bus_pm_ops = {
+ .runtime_suspend = mmc_runtime_suspend,
+ .runtime_resume = mmc_runtime_resume,
+ .runtime_idle = mmc_runtime_idle,
+};
+
+#define MMC_PM_OPS_PTR (&mmc_bus_pm_ops)
+
+#else /* !CONFIG_PM_RUNTIME */
+
+#define MMC_PM_OPS_PTR NULL
+
+#endif /* !CONFIG_PM_RUNTIME */
+
static struct bus_type mmc_bus_type = {
.name = "mmc",
.dev_attrs = mmc_dev_attrs,
@@ -151,6 +186,7 @@ static struct bus_type mmc_bus_type = {
.remove = mmc_bus_remove,
.suspend = mmc_bus_suspend,
.resume = mmc_bus_resume,
+ .pm = MMC_PM_OPS_PTR,
};
int mmc_register_bus(void)
@@ -189,7 +225,7 @@ EXPORT_SYMBOL(mmc_unregister_driver);
static void mmc_release_card(struct device *dev)
{
- struct mmc_card *card = dev_to_mmc_card(dev);
+ struct mmc_card *card = mmc_dev_to_card(dev);
sdio_free_common_cis(card);
@@ -254,14 +290,16 @@ int mmc_add_card(struct mmc_card *card)
}
if (mmc_host_is_spi(card->host)) {
- printk(KERN_INFO "%s: new %s%s card on SPI\n",
+ printk(KERN_INFO "%s: new %s%s%s card on SPI\n",
mmc_hostname(card->host),
mmc_card_highspeed(card) ? "high speed " : "",
+ mmc_card_ddr_mode(card) ? "DDR " : "",
type);
} else {
- printk(KERN_INFO "%s: new %s%s card at address %04x\n",
+ printk(KERN_INFO "%s: new %s%s%s card at address %04x\n",
mmc_hostname(card->host),
mmc_card_highspeed(card) ? "high speed " : "",
+ mmc_card_ddr_mode(card) ? "DDR " : "",
type, card->rca);
}
diff --git a/drivers/mmc/core/bus.h b/drivers/mmc/core/bus.h
index 18178766ab46..00a19710b6b4 100644
--- a/drivers/mmc/core/bus.h
+++ b/drivers/mmc/core/bus.h
@@ -14,7 +14,7 @@
#define MMC_DEV_ATTR(name, fmt, args...) \
static ssize_t mmc_##name##_show (struct device *dev, struct device_attribute *attr, char *buf) \
{ \
- struct mmc_card *card = container_of(dev, struct mmc_card, dev); \
+ struct mmc_card *card = mmc_dev_to_card(dev); \
return sprintf(buf, fmt, args); \
} \
static DEVICE_ATTR(name, S_IRUGO, mmc_##name##_show, NULL)
diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c
index 09eee6df0653..31ae07a36576 100644
--- a/drivers/mmc/core/core.c
+++ b/drivers/mmc/core/core.c
@@ -58,6 +58,7 @@ int mmc_assume_removable;
#else
int mmc_assume_removable = 1;
#endif
+EXPORT_SYMBOL(mmc_assume_removable);
module_param_named(removable, mmc_assume_removable, bool, 0644);
MODULE_PARM_DESC(
removable,
@@ -650,14 +651,24 @@ void mmc_set_bus_mode(struct mmc_host *host, unsigned int mode)
}
/*
- * Change data bus width of a host.
+ * Change data bus width and DDR mode of a host.
*/
-void mmc_set_bus_width(struct mmc_host *host, unsigned int width)
+void mmc_set_bus_width_ddr(struct mmc_host *host, unsigned int width,
+ unsigned int ddr)
{
host->ios.bus_width = width;
+ host->ios.ddr = ddr;
mmc_set_ios(host);
}
+/*
+ * Change data bus width of a host.
+ */
+void mmc_set_bus_width(struct mmc_host *host, unsigned int width)
+{
+ mmc_set_bus_width_ddr(host, width, MMC_SDR_MODE);
+}
+
/**
* mmc_vdd_to_ocrbitnum - Convert a voltage to the OCR bit number
* @vdd: voltage (mV)
@@ -771,8 +782,9 @@ EXPORT_SYMBOL(mmc_regulator_get_ocrmask);
/**
* mmc_regulator_set_ocr - set regulator to match host->ios voltage
- * @vdd_bit: zero for power off, else a bit number (host->ios.vdd)
+ * @mmc: the host to regulate
* @supply: regulator to use
+ * @vdd_bit: zero for power off, else a bit number (host->ios.vdd)
*
* Returns zero on success, else negative errno.
*
@@ -780,15 +792,12 @@ EXPORT_SYMBOL(mmc_regulator_get_ocrmask);
* a particular supply voltage. This would normally be called from the
* set_ios() method.
*/
-int mmc_regulator_set_ocr(struct regulator *supply, unsigned short vdd_bit)
+int mmc_regulator_set_ocr(struct mmc_host *mmc,
+ struct regulator *supply,
+ unsigned short vdd_bit)
{
int result = 0;
int min_uV, max_uV;
- int enabled;
-
- enabled = regulator_is_enabled(supply);
- if (enabled < 0)
- return enabled;
if (vdd_bit) {
int tmp;
@@ -819,17 +828,25 @@ int mmc_regulator_set_ocr(struct regulator *supply, unsigned short vdd_bit)
else
result = 0;
- if (result == 0 && !enabled)
+ if (result == 0 && !mmc->regulator_enabled) {
result = regulator_enable(supply);
- } else if (enabled) {
+ if (!result)
+ mmc->regulator_enabled = true;
+ }
+ } else if (mmc->regulator_enabled) {
result = regulator_disable(supply);
+ if (result == 0)
+ mmc->regulator_enabled = false;
}
+ if (result)
+ dev_err(mmc_dev(mmc),
+ "could not set regulator OCR (%d)\n", result);
return result;
}
EXPORT_SYMBOL(mmc_regulator_set_ocr);
-#endif
+#endif /* CONFIG_REGULATOR */
/*
* Mask off any voltages we don't support and select
@@ -907,12 +924,7 @@ static void mmc_power_up(struct mmc_host *host)
*/
mmc_delay(10);
- if (host->f_min > 400000) {
- pr_warning("%s: Minimum clock frequency too high for "
- "identification mode\n", mmc_hostname(host));
- host->ios.clock = host->f_min;
- } else
- host->ios.clock = 400000;
+ host->ios.clock = host->f_init;
host->ios.power_mode = MMC_POWER_ON;
mmc_set_ios(host);
@@ -1397,6 +1409,21 @@ int mmc_erase_group_aligned(struct mmc_card *card, unsigned int from,
}
EXPORT_SYMBOL(mmc_erase_group_aligned);
+int mmc_set_blocklen(struct mmc_card *card, unsigned int blocklen)
+{
+ struct mmc_command cmd;
+
+ if (mmc_card_blockaddr(card) || mmc_card_ddr_mode(card))
+ return 0;
+
+ memset(&cmd, 0, sizeof(struct mmc_command));
+ cmd.opcode = MMC_SET_BLOCKLEN;
+ cmd.arg = blocklen;
+ cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;
+ return mmc_wait_for_cmd(card->host, &cmd, 5);
+}
+EXPORT_SYMBOL(mmc_set_blocklen);
+
void mmc_rescan(struct work_struct *work)
{
struct mmc_host *host =
@@ -1404,6 +1431,8 @@ void mmc_rescan(struct work_struct *work)
u32 ocr;
int err;
unsigned long flags;
+ int i;
+ const unsigned freqs[] = { 400000, 300000, 200000, 100000 };
spin_lock_irqsave(&host->lock, flags);
@@ -1443,55 +1472,71 @@ void mmc_rescan(struct work_struct *work)
if (host->ops->get_cd && host->ops->get_cd(host) == 0)
goto out;
- mmc_claim_host(host);
+ for (i = 0; i < ARRAY_SIZE(freqs); i++) {
+ mmc_claim_host(host);
- mmc_power_up(host);
- sdio_reset(host);
- mmc_go_idle(host);
+ if (freqs[i] >= host->f_min)
+ host->f_init = freqs[i];
+ else if (!i || freqs[i-1] > host->f_min)
+ host->f_init = host->f_min;
+ else {
+ mmc_release_host(host);
+ goto out;
+ }
+#ifdef CONFIG_MMC_DEBUG
+ pr_info("%s: %s: trying to init card at %u Hz\n",
+ mmc_hostname(host), __func__, host->f_init);
+#endif
+ mmc_power_up(host);
+ sdio_reset(host);
+ mmc_go_idle(host);
- mmc_send_if_cond(host, host->ocr_avail);
+ mmc_send_if_cond(host, host->ocr_avail);
- /*
- * First we search for SDIO...
- */
- err = mmc_send_io_op_cond(host, 0, &ocr);
- if (!err) {
- if (mmc_attach_sdio(host, ocr)) {
- mmc_claim_host(host);
- /* try SDMEM (but not MMC) even if SDIO is broken */
- if (mmc_send_app_op_cond(host, 0, &ocr))
- goto out_fail;
+ /*
+ * First we search for SDIO...
+ */
+ err = mmc_send_io_op_cond(host, 0, &ocr);
+ if (!err) {
+ if (mmc_attach_sdio(host, ocr)) {
+ mmc_claim_host(host);
+ /*
+ * Try SDMEM (but not MMC) even if SDIO
+ * is broken.
+ */
+ if (mmc_send_app_op_cond(host, 0, &ocr))
+ goto out_fail;
+
+ if (mmc_attach_sd(host, ocr))
+ mmc_power_off(host);
+ }
+ goto out;
+ }
+ /*
+ * ...then normal SD...
+ */
+ err = mmc_send_app_op_cond(host, 0, &ocr);
+ if (!err) {
if (mmc_attach_sd(host, ocr))
mmc_power_off(host);
+ goto out;
}
- goto out;
- }
- /*
- * ...then normal SD...
- */
- err = mmc_send_app_op_cond(host, 0, &ocr);
- if (!err) {
- if (mmc_attach_sd(host, ocr))
- mmc_power_off(host);
- goto out;
- }
-
- /*
- * ...and finally MMC.
- */
- err = mmc_send_op_cond(host, 0, &ocr);
- if (!err) {
- if (mmc_attach_mmc(host, ocr))
- mmc_power_off(host);
- goto out;
- }
+ /*
+ * ...and finally MMC.
+ */
+ err = mmc_send_op_cond(host, 0, &ocr);
+ if (!err) {
+ if (mmc_attach_mmc(host, ocr))
+ mmc_power_off(host);
+ goto out;
+ }
out_fail:
- mmc_release_host(host);
- mmc_power_off(host);
-
+ mmc_release_host(host);
+ mmc_power_off(host);
+ }
out:
if (host->caps & MMC_CAP_NEEDS_POLL)
mmc_schedule_delayed_work(&host->detect, HZ);
@@ -1514,7 +1559,7 @@ void mmc_stop_host(struct mmc_host *host)
if (host->caps & MMC_CAP_DISABLE)
cancel_delayed_work(&host->disable);
- cancel_delayed_work(&host->detect);
+ cancel_delayed_work_sync(&host->detect);
mmc_flush_scheduled_work();
/* clear pm flags now and let card drivers set them as needed */
@@ -1538,37 +1583,45 @@ void mmc_stop_host(struct mmc_host *host)
mmc_power_off(host);
}
-void mmc_power_save_host(struct mmc_host *host)
+int mmc_power_save_host(struct mmc_host *host)
{
+ int ret = 0;
+
mmc_bus_get(host);
if (!host->bus_ops || host->bus_dead || !host->bus_ops->power_restore) {
mmc_bus_put(host);
- return;
+ return -EINVAL;
}
if (host->bus_ops->power_save)
- host->bus_ops->power_save(host);
+ ret = host->bus_ops->power_save(host);
mmc_bus_put(host);
mmc_power_off(host);
+
+ return ret;
}
EXPORT_SYMBOL(mmc_power_save_host);
-void mmc_power_restore_host(struct mmc_host *host)
+int mmc_power_restore_host(struct mmc_host *host)
{
+ int ret;
+
mmc_bus_get(host);
if (!host->bus_ops || host->bus_dead || !host->bus_ops->power_restore) {
mmc_bus_put(host);
- return;
+ return -EINVAL;
}
mmc_power_up(host);
- host->bus_ops->power_restore(host);
+ ret = host->bus_ops->power_restore(host);
mmc_bus_put(host);
+
+ return ret;
}
EXPORT_SYMBOL(mmc_power_restore_host);
diff --git a/drivers/mmc/core/core.h b/drivers/mmc/core/core.h
index 9d9eef50e5d1..77240cd11bcf 100644
--- a/drivers/mmc/core/core.h
+++ b/drivers/mmc/core/core.h
@@ -22,8 +22,8 @@ struct mmc_bus_ops {
void (*detect)(struct mmc_host *);
int (*suspend)(struct mmc_host *);
int (*resume)(struct mmc_host *);
- void (*power_save)(struct mmc_host *);
- void (*power_restore)(struct mmc_host *);
+ int (*power_save)(struct mmc_host *);
+ int (*power_restore)(struct mmc_host *);
};
void mmc_attach_bus(struct mmc_host *host, const struct mmc_bus_ops *ops);
@@ -35,6 +35,8 @@ void mmc_set_chip_select(struct mmc_host *host, int mode);
void mmc_set_clock(struct mmc_host *host, unsigned int hz);
void mmc_set_bus_mode(struct mmc_host *host, unsigned int mode);
void mmc_set_bus_width(struct mmc_host *host, unsigned int width);
+void mmc_set_bus_width_ddr(struct mmc_host *host, unsigned int width,
+ unsigned int ddr);
u32 mmc_select_voltage(struct mmc_host *host, u32 ocr);
void mmc_set_timing(struct mmc_host *host, unsigned int timing);
@@ -58,7 +60,6 @@ int mmc_attach_sdio(struct mmc_host *host, u32 ocr);
/* Module parameters */
extern int use_spi_crc;
-extern int mmc_assume_removable;
/* Debugfs information for hosts and cards */
void mmc_add_host_debugfs(struct mmc_host *host);
diff --git a/drivers/mmc/core/debugfs.c b/drivers/mmc/core/debugfs.c
index 46bc6d7551a3..eed1405fd742 100644
--- a/drivers/mmc/core/debugfs.c
+++ b/drivers/mmc/core/debugfs.c
@@ -134,6 +134,33 @@ static const struct file_operations mmc_ios_fops = {
.release = single_release,
};
+static int mmc_clock_opt_get(void *data, u64 *val)
+{
+ struct mmc_host *host = data;
+
+ *val = host->ios.clock;
+
+ return 0;
+}
+
+static int mmc_clock_opt_set(void *data, u64 val)
+{
+ struct mmc_host *host = data;
+
+ /* We need this check due to input value is u64 */
+ if (val > host->f_max)
+ return -EINVAL;
+
+ mmc_claim_host(host);
+ mmc_set_clock(host, (unsigned int) val);
+ mmc_release_host(host);
+
+ return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(mmc_clock_fops, mmc_clock_opt_get, mmc_clock_opt_set,
+ "%llu\n");
+
void mmc_add_host_debugfs(struct mmc_host *host)
{
struct dentry *root;
@@ -150,11 +177,15 @@ void mmc_add_host_debugfs(struct mmc_host *host)
host->debugfs_root = root;
if (!debugfs_create_file("ios", S_IRUSR, root, host, &mmc_ios_fops))
- goto err_ios;
+ goto err_node;
+
+ if (!debugfs_create_file("clock", S_IRUSR | S_IWUSR, root, host,
+ &mmc_clock_fops))
+ goto err_node;
return;
-err_ios:
+err_node:
debugfs_remove_recursive(root);
host->debugfs_root = NULL;
err_root:
diff --git a/drivers/mmc/core/host.c b/drivers/mmc/core/host.c
index d80cfdc8edd2..10b8af27e03a 100644
--- a/drivers/mmc/core/host.c
+++ b/drivers/mmc/core/host.c
@@ -94,8 +94,7 @@ struct mmc_host *mmc_alloc_host(int extra, struct device *dev)
* By default, hosts do not support SGIO or large requests.
* They have to set these according to their abilities.
*/
- host->max_hw_segs = 1;
- host->max_phys_segs = 1;
+ host->max_segs = 1;
host->max_seg_size = PAGE_CACHE_SIZE;
host->max_req_size = PAGE_CACHE_SIZE;
diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c
index 6909a54c39be..77f93c3b8808 100644
--- a/drivers/mmc/core/mmc.c
+++ b/drivers/mmc/core/mmc.c
@@ -258,6 +258,21 @@ static int mmc_read_ext_csd(struct mmc_card *card)
}
switch (ext_csd[EXT_CSD_CARD_TYPE] & EXT_CSD_CARD_TYPE_MASK) {
+ case EXT_CSD_CARD_TYPE_DDR_52 | EXT_CSD_CARD_TYPE_52 |
+ EXT_CSD_CARD_TYPE_26:
+ card->ext_csd.hs_max_dtr = 52000000;
+ card->ext_csd.card_type = EXT_CSD_CARD_TYPE_DDR_52;
+ break;
+ case EXT_CSD_CARD_TYPE_DDR_1_2V | EXT_CSD_CARD_TYPE_52 |
+ EXT_CSD_CARD_TYPE_26:
+ card->ext_csd.hs_max_dtr = 52000000;
+ card->ext_csd.card_type = EXT_CSD_CARD_TYPE_DDR_1_2V;
+ break;
+ case EXT_CSD_CARD_TYPE_DDR_1_8V | EXT_CSD_CARD_TYPE_52 |
+ EXT_CSD_CARD_TYPE_26:
+ card->ext_csd.hs_max_dtr = 52000000;
+ card->ext_csd.card_type = EXT_CSD_CARD_TYPE_DDR_1_8V;
+ break;
case EXT_CSD_CARD_TYPE_52 | EXT_CSD_CARD_TYPE_26:
card->ext_csd.hs_max_dtr = 52000000;
break;
@@ -360,7 +375,7 @@ static int mmc_init_card(struct mmc_host *host, u32 ocr,
struct mmc_card *oldcard)
{
struct mmc_card *card;
- int err;
+ int err, ddr = 0;
u32 cid[4];
unsigned int max_dtr;
@@ -503,17 +518,35 @@ static int mmc_init_card(struct mmc_host *host, u32 ocr,
mmc_set_clock(host, max_dtr);
/*
- * Activate wide bus (if supported).
+ * Indicate DDR mode (if supported).
+ */
+ if (mmc_card_highspeed(card)) {
+ if ((card->ext_csd.card_type & EXT_CSD_CARD_TYPE_DDR_1_8V)
+ && (host->caps & (MMC_CAP_1_8V_DDR)))
+ ddr = MMC_1_8V_DDR_MODE;
+ else if ((card->ext_csd.card_type & EXT_CSD_CARD_TYPE_DDR_1_2V)
+ && (host->caps & (MMC_CAP_1_2V_DDR)))
+ ddr = MMC_1_2V_DDR_MODE;
+ }
+
+ /*
+ * Activate wide bus and DDR (if supported).
*/
if ((card->csd.mmca_vsn >= CSD_SPEC_VER_4) &&
(host->caps & (MMC_CAP_4_BIT_DATA | MMC_CAP_8_BIT_DATA))) {
unsigned ext_csd_bit, bus_width;
if (host->caps & MMC_CAP_8_BIT_DATA) {
- ext_csd_bit = EXT_CSD_BUS_WIDTH_8;
+ if (ddr)
+ ext_csd_bit = EXT_CSD_DDR_BUS_WIDTH_8;
+ else
+ ext_csd_bit = EXT_CSD_BUS_WIDTH_8;
bus_width = MMC_BUS_WIDTH_8;
} else {
- ext_csd_bit = EXT_CSD_BUS_WIDTH_4;
+ if (ddr)
+ ext_csd_bit = EXT_CSD_DDR_BUS_WIDTH_4;
+ else
+ ext_csd_bit = EXT_CSD_BUS_WIDTH_4;
bus_width = MMC_BUS_WIDTH_4;
}
@@ -524,12 +557,17 @@ static int mmc_init_card(struct mmc_host *host, u32 ocr,
goto free_card;
if (err) {
- printk(KERN_WARNING "%s: switch to bus width %d "
+ printk(KERN_WARNING "%s: switch to bus width %d ddr %d "
"failed\n", mmc_hostname(card->host),
- 1 << bus_width);
+ 1 << bus_width, ddr);
err = 0;
} else {
- mmc_set_bus_width(card->host, bus_width);
+ if (ddr)
+ mmc_card_set_ddr_mode(card);
+ else
+ ddr = MMC_SDR_MODE;
+
+ mmc_set_bus_width_ddr(card->host, bus_width, ddr);
}
}
@@ -623,12 +661,16 @@ static int mmc_resume(struct mmc_host *host)
return err;
}
-static void mmc_power_restore(struct mmc_host *host)
+static int mmc_power_restore(struct mmc_host *host)
{
+ int ret;
+
host->card->state &= ~MMC_STATE_HIGHSPEED;
mmc_claim_host(host);
- mmc_init_card(host, host->ocr, host->card);
+ ret = mmc_init_card(host, host->ocr, host->card);
mmc_release_host(host);
+
+ return ret;
}
static int mmc_sleep(struct mmc_host *host)
@@ -685,7 +727,7 @@ static void mmc_attach_bus_ops(struct mmc_host *host)
{
const struct mmc_bus_ops *bus_ops;
- if (host->caps & MMC_CAP_NONREMOVABLE || !mmc_assume_removable)
+ if (!mmc_card_is_removable(host))
bus_ops = &mmc_ops_unsafe;
else
bus_ops = &mmc_ops;
diff --git a/drivers/mmc/core/sd.c b/drivers/mmc/core/sd.c
index 0f5241085557..49da4dffd28e 100644
--- a/drivers/mmc/core/sd.c
+++ b/drivers/mmc/core/sd.c
@@ -722,12 +722,16 @@ static int mmc_sd_resume(struct mmc_host *host)
return err;
}
-static void mmc_sd_power_restore(struct mmc_host *host)
+static int mmc_sd_power_restore(struct mmc_host *host)
{
+ int ret;
+
host->card->state &= ~MMC_STATE_HIGHSPEED;
mmc_claim_host(host);
- mmc_sd_init_card(host, host->ocr, host->card);
+ ret = mmc_sd_init_card(host, host->ocr, host->card);
mmc_release_host(host);
+
+ return ret;
}
static const struct mmc_bus_ops mmc_sd_ops = {
@@ -750,7 +754,7 @@ static void mmc_sd_attach_bus_ops(struct mmc_host *host)
{
const struct mmc_bus_ops *bus_ops;
- if (host->caps & MMC_CAP_NONREMOVABLE || !mmc_assume_removable)
+ if (!mmc_card_is_removable(host))
bus_ops = &mmc_sd_ops_unsafe;
else
bus_ops = &mmc_sd_ops;
diff --git a/drivers/mmc/core/sdio.c b/drivers/mmc/core/sdio.c
index f332c52968b7..efef5f94ac42 100644
--- a/drivers/mmc/core/sdio.c
+++ b/drivers/mmc/core/sdio.c
@@ -10,6 +10,7 @@
*/
#include <linux/err.h>
+#include <linux/pm_runtime.h>
#include <linux/mmc/host.h>
#include <linux/mmc/card.h>
@@ -456,7 +457,6 @@ static int mmc_sdio_init_card(struct mmc_host *host, u32 ocr,
return -ENOENT;
card = oldcard;
- return 0;
}
if (card->type == MMC_TYPE_SD_COMBO) {
@@ -546,6 +546,13 @@ static void mmc_sdio_detect(struct mmc_host *host)
BUG_ON(!host);
BUG_ON(!host->card);
+ /* Make sure card is powered before detecting it */
+ if (host->caps & MMC_CAP_POWER_OFF_CARD) {
+ err = pm_runtime_get_sync(&host->card->dev);
+ if (err < 0)
+ goto out;
+ }
+
mmc_claim_host(host);
/*
@@ -555,6 +562,21 @@ static void mmc_sdio_detect(struct mmc_host *host)
mmc_release_host(host);
+ /*
+ * Tell PM core it's OK to power off the card now.
+ *
+ * The _sync variant is used in order to ensure that the card
+ * is left powered off in case an error occurred, and the card
+ * is going to be removed.
+ *
+ * Since there is no specific reason to believe a new user
+ * is about to show up at this point, the _sync variant is
+ * desirable anyway.
+ */
+ if (host->caps & MMC_CAP_POWER_OFF_CARD)
+ pm_runtime_put_sync(&host->card->dev);
+
+out:
if (err) {
mmc_sdio_remove(host);
@@ -614,14 +636,6 @@ static int mmc_sdio_resume(struct mmc_host *host)
mmc_claim_host(host);
err = mmc_sdio_init_card(host, host->ocr, host->card,
(host->pm_flags & MMC_PM_KEEP_POWER));
- if (!err) {
- /* We may have switched to 1-bit mode during suspend. */
- err = sdio_enable_4bit_bus(host->card);
- if (err > 0) {
- mmc_set_bus_width(host, MMC_BUS_WIDTH_4);
- err = 0;
- }
- }
if (!err && host->sdio_irqs)
mmc_signal_sdio_irq(host);
mmc_release_host(host);
@@ -647,11 +661,29 @@ static int mmc_sdio_resume(struct mmc_host *host)
return err;
}
+static int mmc_sdio_power_restore(struct mmc_host *host)
+{
+ int ret;
+
+ BUG_ON(!host);
+ BUG_ON(!host->card);
+
+ mmc_claim_host(host);
+ ret = mmc_sdio_init_card(host, host->ocr, host->card,
+ (host->pm_flags & MMC_PM_KEEP_POWER));
+ if (!ret && host->sdio_irqs)
+ mmc_signal_sdio_irq(host);
+ mmc_release_host(host);
+
+ return ret;
+}
+
static const struct mmc_bus_ops mmc_sdio_ops = {
.remove = mmc_sdio_remove,
.detect = mmc_sdio_detect,
.suspend = mmc_sdio_suspend,
.resume = mmc_sdio_resume,
+ .power_restore = mmc_sdio_power_restore,
};
@@ -699,6 +731,23 @@ int mmc_attach_sdio(struct mmc_host *host, u32 ocr)
card = host->card;
/*
+ * Enable runtime PM only if supported by host+card+board
+ */
+ if (host->caps & MMC_CAP_POWER_OFF_CARD) {
+ /*
+ * Let runtime PM core know our card is active
+ */
+ err = pm_runtime_set_active(&card->dev);
+ if (err)
+ goto remove;
+
+ /*
+ * Enable runtime PM for this card
+ */
+ pm_runtime_enable(&card->dev);
+ }
+
+ /*
* The number of functions on the card is encoded inside
* the ocr.
*/
@@ -712,6 +761,12 @@ int mmc_attach_sdio(struct mmc_host *host, u32 ocr)
err = sdio_init_func(host->card, i + 1);
if (err)
goto remove;
+
+ /*
+ * Enable Runtime PM for this func (if supported)
+ */
+ if (host->caps & MMC_CAP_POWER_OFF_CARD)
+ pm_runtime_enable(&card->sdio_func[i]->dev);
}
mmc_release_host(host);
diff --git a/drivers/mmc/core/sdio_bus.c b/drivers/mmc/core/sdio_bus.c
index 4a890dcb95ab..203da443e339 100644
--- a/drivers/mmc/core/sdio_bus.c
+++ b/drivers/mmc/core/sdio_bus.c
@@ -14,8 +14,10 @@
#include <linux/device.h>
#include <linux/err.h>
#include <linux/slab.h>
+#include <linux/pm_runtime.h>
#include <linux/mmc/card.h>
+#include <linux/mmc/host.h>
#include <linux/mmc/sdio_func.h>
#include "sdio_cis.h"
@@ -125,21 +127,51 @@ static int sdio_bus_probe(struct device *dev)
if (!id)
return -ENODEV;
+ /* Unbound SDIO functions are always suspended.
+ * During probe, the function is set active and the usage count
+ * is incremented. If the driver supports runtime PM,
+ * it should call pm_runtime_put_noidle() in its probe routine and
+ * pm_runtime_get_noresume() in its remove routine.
+ */
+ if (func->card->host->caps & MMC_CAP_POWER_OFF_CARD) {
+ ret = pm_runtime_get_sync(dev);
+ if (ret < 0)
+ goto out;
+ }
+
/* Set the default block size so the driver is sure it's something
* sensible. */
sdio_claim_host(func);
ret = sdio_set_block_size(func, 0);
sdio_release_host(func);
if (ret)
- return ret;
+ goto disable_runtimepm;
+
+ ret = drv->probe(func, id);
+ if (ret)
+ goto disable_runtimepm;
- return drv->probe(func, id);
+ return 0;
+
+disable_runtimepm:
+ if (func->card->host->caps & MMC_CAP_POWER_OFF_CARD)
+ pm_runtime_put_noidle(dev);
+out:
+ return ret;
}
static int sdio_bus_remove(struct device *dev)
{
struct sdio_driver *drv = to_sdio_driver(dev->driver);
struct sdio_func *func = dev_to_sdio_func(dev);
+ int ret = 0;
+
+ /* Make sure card is powered before invoking ->remove() */
+ if (func->card->host->caps & MMC_CAP_POWER_OFF_CARD) {
+ ret = pm_runtime_get_sync(dev);
+ if (ret < 0)
+ goto out;
+ }
drv->remove(func);
@@ -151,9 +183,68 @@ static int sdio_bus_remove(struct device *dev)
sdio_release_host(func);
}
+ /* First, undo the increment made directly above */
+ if (func->card->host->caps & MMC_CAP_POWER_OFF_CARD)
+ pm_runtime_put_noidle(dev);
+
+ /* Then undo the runtime PM settings in sdio_bus_probe() */
+ if (func->card->host->caps & MMC_CAP_POWER_OFF_CARD)
+ pm_runtime_put_noidle(dev);
+
+out:
+ return ret;
+}
+
+#ifdef CONFIG_PM_RUNTIME
+
+static int sdio_bus_pm_prepare(struct device *dev)
+{
+ struct sdio_func *func = dev_to_sdio_func(dev);
+
+ /*
+ * Resume an SDIO device which was suspended at run time at this
+ * point, in order to allow standard SDIO suspend/resume paths
+ * to keep working as usual.
+ *
+ * Ultimately, the SDIO driver itself will decide (in its
+ * suspend handler, or lack thereof) whether the card should be
+ * removed or kept, and if kept, at what power state.
+ *
+ * At this point, PM core have increased our use count, so it's
+ * safe to directly resume the device. After system is resumed
+ * again, PM core will drop back its runtime PM use count, and if
+ * needed device will be suspended again.
+ *
+ * The end result is guaranteed to be a power state that is
+ * coherent with the device's runtime PM use count.
+ *
+ * The return value of pm_runtime_resume is deliberately unchecked
+ * since there is little point in failing system suspend if a
+ * device can't be resumed.
+ */
+ if (func->card->host->caps & MMC_CAP_POWER_OFF_CARD)
+ pm_runtime_resume(dev);
+
return 0;
}
+static const struct dev_pm_ops sdio_bus_pm_ops = {
+ SET_RUNTIME_PM_OPS(
+ pm_generic_runtime_suspend,
+ pm_generic_runtime_resume,
+ pm_generic_runtime_idle
+ )
+ .prepare = sdio_bus_pm_prepare,
+};
+
+#define SDIO_PM_OPS_PTR (&sdio_bus_pm_ops)
+
+#else /* !CONFIG_PM_RUNTIME */
+
+#define SDIO_PM_OPS_PTR NULL
+
+#endif /* !CONFIG_PM_RUNTIME */
+
static struct bus_type sdio_bus_type = {
.name = "sdio",
.dev_attrs = sdio_dev_attrs,
@@ -161,6 +252,7 @@ static struct bus_type sdio_bus_type = {
.uevent = sdio_bus_uevent,
.probe = sdio_bus_probe,
.remove = sdio_bus_remove,
+ .pm = SDIO_PM_OPS_PTR,
};
int sdio_register_bus(void)
diff --git a/drivers/mmc/host/Kconfig b/drivers/mmc/host/Kconfig
index 68d12794cfd9..d618e8673996 100644
--- a/drivers/mmc/host/Kconfig
+++ b/drivers/mmc/host/Kconfig
@@ -130,6 +130,16 @@ config MMC_SDHCI_CNS3XXX
If unsure, say N.
+config MMC_SDHCI_ESDHC_IMX
+ bool "SDHCI platform support for the Freescale eSDHC i.MX controller"
+ depends on MMC_SDHCI_PLTFM && (ARCH_MX25 || ARCH_MX35 || ARCH_MX5)
+ select MMC_SDHCI_IO_ACCESSORS
+ help
+ This selects the Freescale eSDHC controller support on the platform
+ bus, found on platforms like mx35/51.
+
+ If unsure, say N.
+
config MMC_SDHCI_S3C
tristate "SDHCI support on Samsung S3C SoC"
depends on MMC_SDHCI && PLAT_SAMSUNG
@@ -145,6 +155,18 @@ config MMC_SDHCI_S3C
If unsure, say N.
+config MMC_SDHCI_PXA
+ tristate "Marvell PXA168/PXA910/MMP2 SD Host Controller support"
+ depends on ARCH_PXA || ARCH_MMP
+ select MMC_SDHCI
+ select MMC_SDHCI_IO_ACCESSORS
+ help
+ This selects the Marvell(R) PXA168/PXA910/MMP2 SD Host Controller.
+ If you have a PXA168/PXA910/MMP2 platform with SD Host Controller
+ and a card slot, say Y or M here.
+
+ If unsure, say N.
+
config MMC_SDHCI_SPEAR
tristate "SDHCI support on ST SPEAr platform"
depends on MMC_SDHCI && PLAT_SPEAR
@@ -237,7 +259,7 @@ endchoice
config MMC_ATMELMCI_DMA
bool "Atmel MCI DMA support (EXPERIMENTAL)"
- depends on MMC_ATMELMCI && AVR32 && DMA_ENGINE && EXPERIMENTAL
+ depends on MMC_ATMELMCI && (AVR32 || ARCH_AT91SAM9G45) && DMA_ENGINE && EXPERIMENTAL
help
Say Y here to have the Atmel MCI driver use a DMA engine to
do data transfers and thus increase the throughput and
@@ -395,6 +417,7 @@ config MMC_TMIO
config MMC_CB710
tristate "ENE CB710 MMC/SD Interface support"
depends on PCI
+ select MISC_DEVICES
select CB710_CORE
help
This option enables support for MMC/SD part of ENE CB710/720 Flash
@@ -451,3 +474,17 @@ config MMC_JZ4740
SoCs.
If you have a board based on such a SoC and with a SD/MMC slot,
say Y or M here.
+
+config MMC_USHC
+ tristate "USB SD Host Controller (USHC) support"
+ depends on USB
+ help
+ This selects support for USB SD Host Controllers based on
+ the Cypress Astoria chip with firmware compliant with CSR's
+ USB SD Host Controller specification (CS-118793-SP).
+
+ CSR boards with this device include: USB<>SDIO (M1985v2),
+ and Ultrasira.
+
+ Note: These controllers only support SDIO cards and do not
+ support MMC or SD memory cards.
diff --git a/drivers/mmc/host/Makefile b/drivers/mmc/host/Makefile
index 840bcb52d82f..7b645ff43b30 100644
--- a/drivers/mmc/host/Makefile
+++ b/drivers/mmc/host/Makefile
@@ -2,16 +2,13 @@
# Makefile for MMC/SD host controller drivers
#
-ifeq ($(CONFIG_MMC_DEBUG),y)
- EXTRA_CFLAGS += -DDEBUG
-endif
-
obj-$(CONFIG_MMC_ARMMMCI) += mmci.o
obj-$(CONFIG_MMC_PXA) += pxamci.o
obj-$(CONFIG_MMC_IMX) += imxmmc.o
obj-$(CONFIG_MMC_MXC) += mxcmmc.o
obj-$(CONFIG_MMC_SDHCI) += sdhci.o
obj-$(CONFIG_MMC_SDHCI_PCI) += sdhci-pci.o
+obj-$(CONFIG_MMC_SDHCI_PXA) += sdhci-pxa.o
obj-$(CONFIG_MMC_SDHCI_S3C) += sdhci-s3c.o
obj-$(CONFIG_MMC_SDHCI_SPEAR) += sdhci-spear.o
obj-$(CONFIG_MMC_WBSD) += wbsd.o
@@ -36,10 +33,12 @@ obj-$(CONFIG_MMC_VIA_SDMMC) += via-sdmmc.o
obj-$(CONFIG_SDH_BFIN) += bfin_sdh.o
obj-$(CONFIG_MMC_SH_MMCIF) += sh_mmcif.o
obj-$(CONFIG_MMC_JZ4740) += jz4740_mmc.o
+obj-$(CONFIG_MMC_USHC) += ushc.o
obj-$(CONFIG_MMC_SDHCI_PLTFM) += sdhci-platform.o
sdhci-platform-y := sdhci-pltfm.o
sdhci-platform-$(CONFIG_MMC_SDHCI_CNS3XXX) += sdhci-cns3xxx.o
+sdhci-platform-$(CONFIG_MMC_SDHCI_ESDHC_IMX) += sdhci-esdhc-imx.o
obj-$(CONFIG_MMC_SDHCI_OF) += sdhci-of.o
sdhci-of-y := sdhci-of-core.o
diff --git a/drivers/mmc/host/at91_mci.c b/drivers/mmc/host/at91_mci.c
index 87226cd202a5..591ab540b407 100644
--- a/drivers/mmc/host/at91_mci.c
+++ b/drivers/mmc/host/at91_mci.c
@@ -928,7 +928,7 @@ static int __init at91_mci_probe(struct platform_device *pdev)
if (!res)
return -ENXIO;
- if (!request_mem_region(res->start, res->end - res->start + 1, DRIVER_NAME))
+ if (!request_mem_region(res->start, resource_size(res), DRIVER_NAME))
return -EBUSY;
mmc = mmc_alloc_host(sizeof(struct at91mci_host), &pdev->dev);
@@ -947,8 +947,7 @@ static int __init at91_mci_probe(struct platform_device *pdev)
mmc->max_blk_size = MCI_MAXBLKSIZE;
mmc->max_blk_count = MCI_BLKATONCE;
mmc->max_req_size = MCI_BUFSIZE;
- mmc->max_phys_segs = MCI_BLKATONCE;
- mmc->max_hw_segs = MCI_BLKATONCE;
+ mmc->max_segs = MCI_BLKATONCE;
mmc->max_seg_size = MCI_BUFSIZE;
host = mmc_priv(mmc);
@@ -1017,7 +1016,7 @@ static int __init at91_mci_probe(struct platform_device *pdev)
/*
* Map I/O region
*/
- host->baseaddr = ioremap(res->start, res->end - res->start + 1);
+ host->baseaddr = ioremap(res->start, resource_size(res));
if (!host->baseaddr) {
ret = -ENOMEM;
goto fail1;
@@ -1093,7 +1092,7 @@ fail4b:
fail5:
mmc_free_host(mmc);
fail6:
- release_mem_region(res->start, res->end - res->start + 1);
+ release_mem_region(res->start, resource_size(res));
dev_err(&pdev->dev, "probe failed, err %d\n", ret);
return ret;
}
@@ -1138,7 +1137,7 @@ static int __exit at91_mci_remove(struct platform_device *pdev)
iounmap(host->baseaddr);
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- release_mem_region(res->start, res->end - res->start + 1);
+ release_mem_region(res->start, resource_size(res));
mmc_free_host(mmc);
platform_set_drvdata(pdev, NULL);
diff --git a/drivers/mmc/host/atmel-mci.c b/drivers/mmc/host/atmel-mci.c
index 95ef864ad8f9..301351a5d838 100644
--- a/drivers/mmc/host/atmel-mci.c
+++ b/drivers/mmc/host/atmel-mci.c
@@ -1618,8 +1618,7 @@ static int __init atmci_init_slot(struct atmel_mci *host,
if (slot_data->bus_width >= 4)
mmc->caps |= MMC_CAP_4_BIT_DATA;
- mmc->max_hw_segs = 64;
- mmc->max_phys_segs = 64;
+ mmc->max_segs = 64;
mmc->max_req_size = 32768 * 512;
mmc->max_blk_size = 32768;
mmc->max_blk_count = 512;
@@ -1777,7 +1776,7 @@ static int __init atmci_probe(struct platform_device *pdev)
}
ret = -ENOMEM;
- host->regs = ioremap(regs->start, regs->end - regs->start + 1);
+ host->regs = ioremap(regs->start, resource_size(regs));
if (!host->regs)
goto err_ioremap;
diff --git a/drivers/mmc/host/au1xmmc.c b/drivers/mmc/host/au1xmmc.c
index c8da5d30a861..41e5a60493ad 100644
--- a/drivers/mmc/host/au1xmmc.c
+++ b/drivers/mmc/host/au1xmmc.c
@@ -964,7 +964,7 @@ static int __devinit au1xmmc_probe(struct platform_device *pdev)
goto out1;
}
- host->ioarea = request_mem_region(r->start, r->end - r->start + 1,
+ host->ioarea = request_mem_region(r->start, resource_size(r),
pdev->name);
if (!host->ioarea) {
dev_err(&pdev->dev, "mmio already in use\n");
@@ -998,7 +998,7 @@ static int __devinit au1xmmc_probe(struct platform_device *pdev)
mmc->f_max = 24000000;
mmc->max_seg_size = AU1XMMC_DESCRIPTOR_SIZE;
- mmc->max_phys_segs = AU1XMMC_DESCRIPTOR_COUNT;
+ mmc->max_segs = AU1XMMC_DESCRIPTOR_COUNT;
mmc->max_blk_size = 2048;
mmc->max_blk_count = 512;
diff --git a/drivers/mmc/host/bfin_sdh.c b/drivers/mmc/host/bfin_sdh.c
index 4b0e677d7295..bac7d62866b7 100644
--- a/drivers/mmc/host/bfin_sdh.c
+++ b/drivers/mmc/host/bfin_sdh.c
@@ -469,7 +469,7 @@ static int __devinit sdh_probe(struct platform_device *pdev)
}
mmc->ops = &sdh_ops;
- mmc->max_phys_segs = 32;
+ mmc->max_segs = 32;
mmc->max_seg_size = 1 << 16;
mmc->max_blk_size = 1 << 11;
mmc->max_blk_count = 1 << 11;
diff --git a/drivers/mmc/host/cb710-mmc.c b/drivers/mmc/host/cb710-mmc.c
index ca3bdc831900..66b4ce587f4b 100644
--- a/drivers/mmc/host/cb710-mmc.c
+++ b/drivers/mmc/host/cb710-mmc.c
@@ -25,7 +25,7 @@ static const u8 cb710_src_freq_mhz[16] = {
50, 55, 60, 65, 70, 75, 80, 85
};
-static void cb710_mmc_set_clock(struct mmc_host *mmc, int hz)
+static void cb710_mmc_select_clock_divider(struct mmc_host *mmc, int hz)
{
struct cb710_slot *slot = cb710_mmc_to_slot(mmc);
struct pci_dev *pdev = cb710_slot_to_chip(slot)->pdev;
@@ -33,8 +33,11 @@ static void cb710_mmc_set_clock(struct mmc_host *mmc, int hz)
u32 divider_idx;
int src_hz;
- /* this is magic, unverifiable for me, unless I get
- * MMC card with cables connected to bus signals */
+ /* on CB710 in HP nx9500:
+ * src_freq_idx == 0
+ * indexes 1-7 work as written in the table
+ * indexes 0,8-15 give no clock output
+ */
pci_read_config_dword(pdev, 0x48, &src_freq_idx);
src_freq_idx = (src_freq_idx >> 16) & 0xF;
src_hz = cb710_src_freq_mhz[src_freq_idx] * 1000000;
@@ -46,13 +49,15 @@ static void cb710_mmc_set_clock(struct mmc_host *mmc, int hz)
if (src_freq_idx)
divider_idx |= 0x8;
+ else if (divider_idx == 0)
+ divider_idx = 1;
cb710_pci_update_config_reg(pdev, 0x40, ~0xF0000000, divider_idx << 28);
dev_dbg(cb710_slot_dev(slot),
- "clock set to %d Hz, wanted %d Hz; flag = %d\n",
+ "clock set to %d Hz, wanted %d Hz; src_freq_idx = %d, divider_idx = %d|%d\n",
src_hz >> cb710_clock_divider_log2[divider_idx & 7],
- hz, (divider_idx & 8) != 0);
+ hz, src_freq_idx, divider_idx & 7, divider_idx & 8);
}
static void __cb710_mmc_enable_irq(struct cb710_slot *slot,
@@ -95,16 +100,8 @@ static void cb710_mmc_reset_events(struct cb710_slot *slot)
cb710_write_port_8(slot, CB710_MMC_STATUS2_PORT, 0xFF);
}
-static int cb710_mmc_is_card_inserted(struct cb710_slot *slot)
-{
- return cb710_read_port_8(slot, CB710_MMC_STATUS3_PORT)
- & CB710_MMC_S3_CARD_DETECTED;
-}
-
static void cb710_mmc_enable_4bit_data(struct cb710_slot *slot, int enable)
{
- dev_dbg(cb710_slot_dev(slot), "configuring %d-data-line%s mode\n",
- enable ? 4 : 1, enable ? "s" : "");
if (enable)
cb710_modify_port_8(slot, CB710_MMC_CONFIG1_PORT,
CB710_MMC_C1_4BIT_DATA_BUS, 0);
@@ -494,13 +491,8 @@ static void cb710_mmc_request(struct mmc_host *mmc, struct mmc_request *mrq)
reader->mrq = mrq;
cb710_mmc_enable_irq(slot, CB710_MMC_IE_TEST_MASK, 0);
- if (cb710_mmc_is_card_inserted(slot)) {
- if (!cb710_mmc_command(mmc, mrq->cmd) && mrq->stop)
- cb710_mmc_command(mmc, mrq->stop);
- mdelay(1);
- } else {
- mrq->cmd->error = -ENOMEDIUM;
- }
+ if (!cb710_mmc_command(mmc, mrq->cmd) && mrq->stop)
+ cb710_mmc_command(mmc, mrq->stop);
tasklet_schedule(&reader->finish_req_tasklet);
}
@@ -512,7 +504,7 @@ static int cb710_mmc_powerup(struct cb710_slot *slot)
#endif
int err;
- /* a lot of magic; see comment in cb710_mmc_set_clock() */
+ /* a lot of magic for now */
dev_dbg(cb710_slot_dev(slot), "bus powerup\n");
cb710_dump_regs(chip, CB710_DUMP_REGS_MMC);
err = cb710_wait_while_busy(slot, CB710_MMC_S2_BUSY_20);
@@ -572,13 +564,7 @@ static void cb710_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
struct cb710_mmc_reader *reader = mmc_priv(mmc);
int err;
- cb710_mmc_set_clock(mmc, ios->clock);
-
- if (!cb710_mmc_is_card_inserted(slot)) {
- dev_dbg(cb710_slot_dev(slot),
- "no card inserted - ignoring bus powerup request\n");
- ios->power_mode = MMC_POWER_OFF;
- }
+ cb710_mmc_select_clock_divider(mmc, ios->clock);
if (ios->power_mode != reader->last_power_mode)
switch (ios->power_mode) {
@@ -619,6 +605,14 @@ static int cb710_mmc_get_ro(struct mmc_host *mmc)
& CB710_MMC_S3_WRITE_PROTECTED;
}
+static int cb710_mmc_get_cd(struct mmc_host *mmc)
+{
+ struct cb710_slot *slot = cb710_mmc_to_slot(mmc);
+
+ return cb710_read_port_8(slot, CB710_MMC_STATUS3_PORT)
+ & CB710_MMC_S3_CARD_DETECTED;
+}
+
static int cb710_mmc_irq_handler(struct cb710_slot *slot)
{
struct mmc_host *mmc = cb710_slot_to_mmc(slot);
@@ -664,7 +658,8 @@ static void cb710_mmc_finish_request_tasklet(unsigned long data)
static const struct mmc_host_ops cb710_mmc_host = {
.request = cb710_mmc_request,
.set_ios = cb710_mmc_set_ios,
- .get_ro = cb710_mmc_get_ro
+ .get_ro = cb710_mmc_get_ro,
+ .get_cd = cb710_mmc_get_cd,
};
#ifdef CONFIG_PM
@@ -746,6 +741,7 @@ static int __devinit cb710_mmc_init(struct platform_device *pdev)
err_free_mmc:
dev_dbg(cb710_slot_dev(slot), "mmc_add_host() failed: %d\n", err);
+ cb710_set_irq_handler(slot, NULL);
mmc_free_host(mmc);
return err;
}
diff --git a/drivers/mmc/host/davinci_mmc.c b/drivers/mmc/host/davinci_mmc.c
index 33d9f1b00862..e15547cf701f 100644
--- a/drivers/mmc/host/davinci_mmc.c
+++ b/drivers/mmc/host/davinci_mmc.c
@@ -138,7 +138,7 @@
/*
* One scatterlist dma "segment" is at most MAX_CCNT rw_threshold units,
* and we handle up to MAX_NR_SG segments. MMC_BLOCK_BOUNCE kicks in only
- * for drivers with max_hw_segs == 1, making the segments bigger (64KB)
+ * for drivers with max_segs == 1, making the segments bigger (64KB)
* than the page or two that's otherwise typical. nr_sg (passed from
* platform data) == 16 gives at least the same throughput boost, using
* EDMA transfer linkage instead of spending CPU time copying pages.
@@ -1239,8 +1239,7 @@ static int __init davinci_mmcsd_probe(struct platform_device *pdev)
* Each hw_seg uses one EDMA parameter RAM slot, always one
* channel and then usually some linked slots.
*/
- mmc->max_hw_segs = 1 + host->n_link;
- mmc->max_phys_segs = mmc->max_hw_segs;
+ mmc->max_segs = 1 + host->n_link;
/* EDMA limit per hw segment (one or two MBytes) */
mmc->max_seg_size = MAX_CCNT * rw_threshold;
@@ -1250,8 +1249,7 @@ static int __init davinci_mmcsd_probe(struct platform_device *pdev)
mmc->max_blk_count = 65535; /* NBLK is 16 bits */
mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count;
- dev_dbg(mmc_dev(host->mmc), "max_phys_segs=%d\n", mmc->max_phys_segs);
- dev_dbg(mmc_dev(host->mmc), "max_hw_segs=%d\n", mmc->max_hw_segs);
+ dev_dbg(mmc_dev(host->mmc), "max_segs=%d\n", mmc->max_segs);
dev_dbg(mmc_dev(host->mmc), "max_blk_size=%d\n", mmc->max_blk_size);
dev_dbg(mmc_dev(host->mmc), "max_req_size=%d\n", mmc->max_req_size);
dev_dbg(mmc_dev(host->mmc), "max_seg_size=%d\n", mmc->max_seg_size);
diff --git a/drivers/mmc/host/imxmmc.c b/drivers/mmc/host/imxmmc.c
index 5a950b16d9e6..881f7ba545ae 100644
--- a/drivers/mmc/host/imxmmc.c
+++ b/drivers/mmc/host/imxmmc.c
@@ -966,8 +966,7 @@ static int __init imxmci_probe(struct platform_device *pdev)
mmc->caps = MMC_CAP_4_BIT_DATA;
/* MMC core transfer sizes tunable parameters */
- mmc->max_hw_segs = 64;
- mmc->max_phys_segs = 64;
+ mmc->max_segs = 64;
mmc->max_seg_size = 64*512; /* default PAGE_CACHE_SIZE */
mmc->max_req_size = 64*512; /* default PAGE_CACHE_SIZE */
mmc->max_blk_size = 2048;
diff --git a/drivers/mmc/host/jz4740_mmc.c b/drivers/mmc/host/jz4740_mmc.c
index ad4f9870e3ca..b3a0ab0e4c2b 100644
--- a/drivers/mmc/host/jz4740_mmc.c
+++ b/drivers/mmc/host/jz4740_mmc.c
@@ -876,8 +876,7 @@ static int __devinit jz4740_mmc_probe(struct platform_device* pdev)
mmc->max_blk_count = (1 << 15) - 1;
mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count;
- mmc->max_phys_segs = 128;
- mmc->max_hw_segs = 128;
+ mmc->max_segs = 128;
mmc->max_seg_size = mmc->max_req_size;
host->mmc = mmc;
diff --git a/drivers/mmc/host/mmc_spi.c b/drivers/mmc/host/mmc_spi.c
index 62a35822003e..fd877f633dd2 100644
--- a/drivers/mmc/host/mmc_spi.c
+++ b/drivers/mmc/host/mmc_spi.c
@@ -1055,6 +1055,8 @@ static void mmc_spi_request(struct mmc_host *mmc, struct mmc_request *mrq)
{
struct mmc_spi_host *host = mmc_priv(mmc);
int status = -EINVAL;
+ int crc_retry = 5;
+ struct mmc_command stop;
#ifdef DEBUG
/* MMC core and layered drivers *MUST* issue SPI-aware commands */
@@ -1087,10 +1089,29 @@ static void mmc_spi_request(struct mmc_host *mmc, struct mmc_request *mrq)
/* request exclusive bus access */
spi_bus_lock(host->spi->master);
+crc_recover:
/* issue command; then optionally data and stop */
status = mmc_spi_command_send(host, mrq, mrq->cmd, mrq->data != NULL);
if (status == 0 && mrq->data) {
mmc_spi_data_do(host, mrq->cmd, mrq->data, mrq->data->blksz);
+
+ /*
+ * The SPI bus is not always reliable for large data transfers.
+ * If an occasional crc error is reported by the SD device with
+ * data read/write over SPI, it may be recovered by repeating
+ * the last SD command again. The retry count is set to 5 to
+ * ensure the driver passes stress tests.
+ */
+ if (mrq->data->error == -EILSEQ && crc_retry) {
+ stop.opcode = MMC_STOP_TRANSMISSION;
+ stop.arg = 0;
+ stop.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC;
+ status = mmc_spi_command_send(host, mrq, &stop, 0);
+ crc_retry--;
+ mrq->data->error = 0;
+ goto crc_recover;
+ }
+
if (mrq->stop)
status = mmc_spi_command_send(host, mrq, mrq->stop, 0);
else
@@ -1345,8 +1366,7 @@ static int mmc_spi_probe(struct spi_device *spi)
mmc->ops = &mmc_spi_ops;
mmc->max_blk_size = MMC_SPI_BLOCKSIZE;
- mmc->max_hw_segs = MMC_SPI_BLOCKSATONCE;
- mmc->max_phys_segs = MMC_SPI_BLOCKSATONCE;
+ mmc->max_segs = MMC_SPI_BLOCKSATONCE;
mmc->max_req_size = MMC_SPI_BLOCKSATONCE * MMC_SPI_BLOCKSIZE;
mmc->max_blk_count = MMC_SPI_BLOCKSATONCE;
diff --git a/drivers/mmc/host/mmci.c b/drivers/mmc/host/mmci.c
index f2e02d7d9f3d..87b4fc6c98c2 100644
--- a/drivers/mmc/host/mmci.c
+++ b/drivers/mmc/host/mmci.c
@@ -523,19 +523,27 @@ static void mmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
struct mmci_host *host = mmc_priv(mmc);
u32 pwr = 0;
unsigned long flags;
+ int ret;
switch (ios->power_mode) {
case MMC_POWER_OFF:
- if(host->vcc &&
- regulator_is_enabled(host->vcc))
- regulator_disable(host->vcc);
+ if (host->vcc)
+ ret = mmc_regulator_set_ocr(mmc, host->vcc, 0);
break;
case MMC_POWER_UP:
-#ifdef CONFIG_REGULATOR
- if (host->vcc)
- /* This implicitly enables the regulator */
- mmc_regulator_set_ocr(host->vcc, ios->vdd);
-#endif
+ if (host->vcc) {
+ ret = mmc_regulator_set_ocr(mmc, host->vcc, ios->vdd);
+ if (ret) {
+ dev_err(mmc_dev(mmc), "unable to set OCR\n");
+ /*
+ * The .set_ios() function in the mmc_host_ops
+ * struct return void, and failing to set the
+ * power should be rare so we print an error
+ * and return here.
+ */
+ return;
+ }
+ }
if (host->plat->vdd_handler)
pwr |= host->plat->vdd_handler(mmc_dev(mmc), ios->vdd,
ios->power_mode);
@@ -734,8 +742,7 @@ static int __devinit mmci_probe(struct amba_device *dev, struct amba_id *id)
/*
* We can do SGIO
*/
- mmc->max_hw_segs = 16;
- mmc->max_phys_segs = NR_SG;
+ mmc->max_segs = NR_SG;
/*
* Since only a certain number of bits are valid in the data length
@@ -870,8 +877,8 @@ static int __devexit mmci_remove(struct amba_device *dev)
clk_disable(host->clk);
clk_put(host->clk);
- if (regulator_is_enabled(host->vcc))
- regulator_disable(host->vcc);
+ if (host->vcc)
+ mmc_regulator_set_ocr(mmc, host->vcc, 0);
regulator_put(host->vcc);
mmc_free_host(mmc);
diff --git a/drivers/mmc/host/msm_sdcc.c b/drivers/mmc/host/msm_sdcc.c
index ff7752348b11..1290d14c5839 100644
--- a/drivers/mmc/host/msm_sdcc.c
+++ b/drivers/mmc/host/msm_sdcc.c
@@ -1164,8 +1164,7 @@ msmsdcc_probe(struct platform_device *pdev)
mmc->caps |= MMC_CAP_SDIO_IRQ;
mmc->caps |= MMC_CAP_MMC_HIGHSPEED | MMC_CAP_SD_HIGHSPEED;
- mmc->max_phys_segs = NR_SG;
- mmc->max_hw_segs = NR_SG;
+ mmc->max_segs = NR_SG;
mmc->max_blk_size = 4096; /* MCI_DATA_CTL BLOCKSIZE up to 4096 */
mmc->max_blk_count = 65536;
diff --git a/drivers/mmc/host/mvsdio.c b/drivers/mmc/host/mvsdio.c
index 366eefa77c5a..a5bf60e01af4 100644
--- a/drivers/mmc/host/mvsdio.c
+++ b/drivers/mmc/host/mvsdio.c
@@ -742,8 +742,7 @@ static int __init mvsd_probe(struct platform_device *pdev)
mmc->max_blk_size = 2048;
mmc->max_blk_count = 65535;
- mmc->max_hw_segs = 1;
- mmc->max_phys_segs = 1;
+ mmc->max_segs = 1;
mmc->max_seg_size = mmc->max_blk_size * mmc->max_blk_count;
mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count;
diff --git a/drivers/mmc/host/mxcmmc.c b/drivers/mmc/host/mxcmmc.c
index 350f78e86245..bdd2cbb87cba 100644
--- a/drivers/mmc/host/mxcmmc.c
+++ b/drivers/mmc/host/mxcmmc.c
@@ -790,8 +790,7 @@ static int mxcmci_probe(struct platform_device *pdev)
mmc->caps = MMC_CAP_4_BIT_DATA | MMC_CAP_SDIO_IRQ;
/* MMC core transfer sizes tunable parameters */
- mmc->max_hw_segs = 64;
- mmc->max_phys_segs = 64;
+ mmc->max_segs = 64;
mmc->max_blk_size = 2048;
mmc->max_blk_count = 65535;
mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count;
diff --git a/drivers/mmc/host/omap.c b/drivers/mmc/host/omap.c
index d98ddcfac5e5..0c7e37f496ef 100644
--- a/drivers/mmc/host/omap.c
+++ b/drivers/mmc/host/omap.c
@@ -1335,8 +1335,7 @@ static int __init mmc_omap_new_slot(struct mmc_omap_host *host, int id)
* NOTE max_seg_size assumption that small blocks aren't
* normally used (except e.g. for reading SD registers).
*/
- mmc->max_phys_segs = 32;
- mmc->max_hw_segs = 32;
+ mmc->max_segs = 32;
mmc->max_blk_size = 2048; /* BLEN is 11 bits (+1) */
mmc->max_blk_count = 2048; /* NBLK is 11 bits (+1) */
mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count;
diff --git a/drivers/mmc/host/omap_hsmmc.c b/drivers/mmc/host/omap_hsmmc.c
index 4526d2791f29..5d46021cbb57 100644
--- a/drivers/mmc/host/omap_hsmmc.c
+++ b/drivers/mmc/host/omap_hsmmc.c
@@ -250,9 +250,9 @@ static int omap_hsmmc_1_set_power(struct device *dev, int slot, int power_on,
mmc_slot(host).before_set_reg(dev, slot, power_on, vdd);
if (power_on)
- ret = mmc_regulator_set_ocr(host->vcc, vdd);
+ ret = mmc_regulator_set_ocr(host->mmc, host->vcc, vdd);
else
- ret = mmc_regulator_set_ocr(host->vcc, 0);
+ ret = mmc_regulator_set_ocr(host->mmc, host->vcc, 0);
if (mmc_slot(host).after_set_reg)
mmc_slot(host).after_set_reg(dev, slot, power_on, vdd);
@@ -291,18 +291,23 @@ static int omap_hsmmc_23_set_power(struct device *dev, int slot, int power_on,
* chips/cards need an interface voltage rail too.
*/
if (power_on) {
- ret = mmc_regulator_set_ocr(host->vcc, vdd);
+ ret = mmc_regulator_set_ocr(host->mmc, host->vcc, vdd);
/* Enable interface voltage rail, if needed */
if (ret == 0 && host->vcc_aux) {
ret = regulator_enable(host->vcc_aux);
if (ret < 0)
- ret = mmc_regulator_set_ocr(host->vcc, 0);
+ ret = mmc_regulator_set_ocr(host->mmc,
+ host->vcc, 0);
}
} else {
+ /* Shut down the rail */
if (host->vcc_aux)
ret = regulator_disable(host->vcc_aux);
- if (ret == 0)
- ret = mmc_regulator_set_ocr(host->vcc, 0);
+ if (!ret) {
+ /* Then proceed to shut down the local regulator */
+ ret = mmc_regulator_set_ocr(host->mmc,
+ host->vcc, 0);
+ }
}
if (mmc_slot(host).after_set_reg)
@@ -343,9 +348,9 @@ static int omap_hsmmc_23_set_sleep(struct device *dev, int slot, int sleep,
if (cardsleep) {
/* VCC can be turned off if card is asleep */
if (sleep)
- err = mmc_regulator_set_ocr(host->vcc, 0);
+ err = mmc_regulator_set_ocr(host->mmc, host->vcc, 0);
else
- err = mmc_regulator_set_ocr(host->vcc, vdd);
+ err = mmc_regulator_set_ocr(host->mmc, host->vcc, vdd);
} else
err = regulator_set_mode(host->vcc, mode);
if (err)
@@ -364,6 +369,7 @@ static int omap_hsmmc_reg_get(struct omap_hsmmc_host *host)
{
struct regulator *reg;
int ret = 0;
+ int ocr_value = 0;
switch (host->id) {
case OMAP_MMC1_DEVID:
@@ -396,6 +402,17 @@ static int omap_hsmmc_reg_get(struct omap_hsmmc_host *host)
}
} else {
host->vcc = reg;
+ ocr_value = mmc_regulator_get_ocrmask(reg);
+ if (!mmc_slot(host).ocr_mask) {
+ mmc_slot(host).ocr_mask = ocr_value;
+ } else {
+ if (!(mmc_slot(host).ocr_mask & ocr_value)) {
+ pr_err("MMC%d ocrmask %x is not supported\n",
+ host->id, mmc_slot(host).ocr_mask);
+ mmc_slot(host).ocr_mask = 0;
+ return -EINVAL;
+ }
+ }
mmc_slot(host).ocr_mask = mmc_regulator_get_ocrmask(reg);
/* Allow an aux regulator */
@@ -466,8 +483,6 @@ static int omap_hsmmc_gpio_init(struct omap_mmc_platform_data *pdata)
int ret;
if (gpio_is_valid(pdata->slots[0].switch_pin)) {
- pdata->suspend = omap_hsmmc_suspend_cdirq;
- pdata->resume = omap_hsmmc_resume_cdirq;
if (pdata->slots[0].cover)
pdata->slots[0].get_cover_state =
omap_hsmmc_get_cover_state;
@@ -982,6 +997,17 @@ static inline void omap_hsmmc_reset_controller_fsm(struct omap_hsmmc_host *host,
OMAP_HSMMC_WRITE(host->base, SYSCTL,
OMAP_HSMMC_READ(host->base, SYSCTL) | bit);
+ /*
+ * OMAP4 ES2 and greater has an updated reset logic.
+ * Monitor a 0->1 transition first
+ */
+ if (mmc_slot(host).features & HSMMC_HAS_UPDATED_RESET) {
+ while ((!(OMAP_HSMMC_READ(host->base, SYSCTL) & bit))
+ && (i++ < limit))
+ cpu_relax();
+ }
+ i = 0;
+
while ((OMAP_HSMMC_READ(host->base, SYSCTL) & bit) &&
(i++ < limit))
cpu_relax();
@@ -2003,6 +2029,8 @@ static int __init omap_hsmmc_probe(struct platform_device *pdev)
if (res == NULL || irq < 0)
return -ENXIO;
+ res->start += pdata->reg_offset;
+ res->end += pdata->reg_offset;
res = request_mem_region(res->start, res->end - res->start + 1,
pdev->name);
if (res == NULL)
@@ -2105,8 +2133,7 @@ static int __init omap_hsmmc_probe(struct platform_device *pdev)
/* Since we do only SG emulation, we can have as many segs
* as we want. */
- mmc->max_phys_segs = 1024;
- mmc->max_hw_segs = 1024;
+ mmc->max_segs = 1024;
mmc->max_blk_size = 512; /* Block Length at max can be 1024 */
mmc->max_blk_count = 0xFFFF; /* No. of Blocks is 16 bits */
@@ -2116,23 +2143,9 @@ static int __init omap_hsmmc_probe(struct platform_device *pdev)
mmc->caps |= MMC_CAP_MMC_HIGHSPEED | MMC_CAP_SD_HIGHSPEED |
MMC_CAP_WAIT_WHILE_BUSY | MMC_CAP_ERASE;
- switch (mmc_slot(host).wires) {
- case 8:
- mmc->caps |= MMC_CAP_8_BIT_DATA;
- /* Fall through */
- case 4:
+ mmc->caps |= mmc_slot(host).caps;
+ if (mmc->caps & MMC_CAP_8_BIT_DATA)
mmc->caps |= MMC_CAP_4_BIT_DATA;
- break;
- case 1:
- /* Nothing to crib here */
- case 0:
- /* Assuming nothing was given by board, Core use's 1-Bit */
- break;
- default:
- /* Completely unexpected.. Core goes with 1-Bit Width */
- dev_crit(mmc_dev(host->mmc), "Invalid width %d\n used!"
- "using 1 instead\n", mmc_slot(host).wires);
- }
if (mmc_slot(host).nonremovable)
mmc->caps |= MMC_CAP_NONREMOVABLE;
@@ -2203,6 +2216,8 @@ static int __init omap_hsmmc_probe(struct platform_device *pdev)
"Unable to grab MMC CD IRQ\n");
goto err_irq_cd;
}
+ pdata->suspend = omap_hsmmc_suspend_cdirq;
+ pdata->resume = omap_hsmmc_resume_cdirq;
}
omap_hsmmc_disable_irq(host);
diff --git a/drivers/mmc/host/pxamci.c b/drivers/mmc/host/pxamci.c
index 0a4e43f37140..7257738fd7da 100644
--- a/drivers/mmc/host/pxamci.c
+++ b/drivers/mmc/host/pxamci.c
@@ -99,14 +99,25 @@ static inline void pxamci_init_ocr(struct pxamci_host *host)
}
}
-static inline void pxamci_set_power(struct pxamci_host *host, unsigned int vdd)
+static inline int pxamci_set_power(struct pxamci_host *host,
+ unsigned char power_mode,
+ unsigned int vdd)
{
int on;
-#ifdef CONFIG_REGULATOR
- if (host->vcc)
- mmc_regulator_set_ocr(host->vcc, vdd);
-#endif
+ if (host->vcc) {
+ int ret;
+
+ if (power_mode == MMC_POWER_UP) {
+ ret = mmc_regulator_set_ocr(host->mmc, host->vcc, vdd);
+ if (ret)
+ return ret;
+ } else if (power_mode == MMC_POWER_OFF) {
+ ret = mmc_regulator_set_ocr(host->mmc, host->vcc, 0);
+ if (ret)
+ return ret;
+ }
+ }
if (!host->vcc && host->pdata &&
gpio_is_valid(host->pdata->gpio_power)) {
on = ((1 << vdd) & host->pdata->ocr_mask);
@@ -115,6 +126,8 @@ static inline void pxamci_set_power(struct pxamci_host *host, unsigned int vdd)
}
if (!host->vcc && host->pdata && host->pdata->setpower)
host->pdata->setpower(mmc_dev(host->mmc), vdd);
+
+ return 0;
}
static void pxamci_stop_clock(struct pxamci_host *host)
@@ -490,9 +503,21 @@ static void pxamci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
}
if (host->power_mode != ios->power_mode) {
+ int ret;
+
host->power_mode = ios->power_mode;
- pxamci_set_power(host, ios->vdd);
+ ret = pxamci_set_power(host, ios->power_mode, ios->vdd);
+ if (ret) {
+ dev_err(mmc_dev(mmc), "unable to set power\n");
+ /*
+ * The .set_ios() function in the mmc_host_ops
+ * struct return void, and failing to set the
+ * power should be rare so we print an error and
+ * return here.
+ */
+ return;
+ }
if (ios->power_mode == MMC_POWER_ON)
host->cmdat |= CMDAT_INIT;
@@ -503,8 +528,8 @@ static void pxamci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
else
host->cmdat &= ~CMDAT_SD_4DAT;
- pr_debug("PXAMCI: clkrt = %x cmdat = %x\n",
- host->clkrt, host->cmdat);
+ dev_dbg(mmc_dev(mmc), "PXAMCI: clkrt = %x cmdat = %x\n",
+ host->clkrt, host->cmdat);
}
static void pxamci_enable_sdio_irq(struct mmc_host *host, int enable)
@@ -576,7 +601,7 @@ static int pxamci_probe(struct platform_device *pdev)
* We can do SG-DMA, but we don't because we never know how much
* data we successfully wrote to the card.
*/
- mmc->max_phys_segs = NR_SG;
+ mmc->max_segs = NR_SG;
/*
* Our hardware DMA can handle a maximum of one page per SG entry.
diff --git a/drivers/mmc/host/s3cmci.c b/drivers/mmc/host/s3cmci.c
index 976330de379e..1ccd4b256cee 100644
--- a/drivers/mmc/host/s3cmci.c
+++ b/drivers/mmc/host/s3cmci.c
@@ -1736,8 +1736,7 @@ static int __devinit s3cmci_probe(struct platform_device *pdev)
mmc->max_req_size = 4095 * 512;
mmc->max_seg_size = mmc->max_req_size;
- mmc->max_phys_segs = 128;
- mmc->max_hw_segs = 128;
+ mmc->max_segs = 128;
dbg(host, dbg_debug,
"probe: mode:%s mapped mci_base:%p irq:%u irq_cd:%u dma:%u.\n",
diff --git a/drivers/mmc/host/sdhci-cns3xxx.c b/drivers/mmc/host/sdhci-cns3xxx.c
index b7050b380d5f..9ebd1d7759dc 100644
--- a/drivers/mmc/host/sdhci-cns3xxx.c
+++ b/drivers/mmc/host/sdhci-cns3xxx.c
@@ -15,7 +15,7 @@
#include <linux/delay.h>
#include <linux/device.h>
#include <linux/mmc/host.h>
-#include <linux/sdhci-pltfm.h>
+#include <linux/mmc/sdhci-pltfm.h>
#include <mach/cns3xxx.h>
#include "sdhci.h"
#include "sdhci-pltfm.h"
diff --git a/drivers/mmc/host/sdhci-esdhc-imx.c b/drivers/mmc/host/sdhci-esdhc-imx.c
new file mode 100644
index 000000000000..9b82910b9dbb
--- /dev/null
+++ b/drivers/mmc/host/sdhci-esdhc-imx.c
@@ -0,0 +1,149 @@
+/*
+ * Freescale eSDHC i.MX controller driver for the platform bus.
+ *
+ * derived from the OF-version.
+ *
+ * Copyright (c) 2010 Pengutronix e.K.
+ * Author: Wolfram Sang <w.sang@pengutronix.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License.
+ */
+
+#include <linux/io.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/clk.h>
+#include <linux/mmc/host.h>
+#include <linux/mmc/sdhci-pltfm.h>
+#include <mach/hardware.h>
+#include "sdhci.h"
+#include "sdhci-pltfm.h"
+#include "sdhci-esdhc.h"
+
+static inline void esdhc_clrset_le(struct sdhci_host *host, u32 mask, u32 val, int reg)
+{
+ void __iomem *base = host->ioaddr + (reg & ~0x3);
+ u32 shift = (reg & 0x3) * 8;
+
+ writel(((readl(base) & ~(mask << shift)) | (val << shift)), base);
+}
+
+static u16 esdhc_readw_le(struct sdhci_host *host, int reg)
+{
+ if (unlikely(reg == SDHCI_HOST_VERSION))
+ reg ^= 2;
+
+ return readw(host->ioaddr + reg);
+}
+
+static void esdhc_writew_le(struct sdhci_host *host, u16 val, int reg)
+{
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+
+ switch (reg) {
+ case SDHCI_TRANSFER_MODE:
+ /*
+ * Postpone this write, we must do it together with a
+ * command write that is down below.
+ */
+ pltfm_host->scratchpad = val;
+ return;
+ case SDHCI_COMMAND:
+ writel(val << 16 | pltfm_host->scratchpad,
+ host->ioaddr + SDHCI_TRANSFER_MODE);
+ return;
+ case SDHCI_BLOCK_SIZE:
+ val &= ~SDHCI_MAKE_BLKSZ(0x7, 0);
+ break;
+ }
+ esdhc_clrset_le(host, 0xffff, val, reg);
+}
+
+static void esdhc_writeb_le(struct sdhci_host *host, u8 val, int reg)
+{
+ u32 new_val;
+
+ switch (reg) {
+ case SDHCI_POWER_CONTROL:
+ /*
+ * FSL put some DMA bits here
+ * If your board has a regulator, code should be here
+ */
+ return;
+ case SDHCI_HOST_CONTROL:
+ /* FSL messed up here, so we can just keep those two */
+ new_val = val & (SDHCI_CTRL_LED | SDHCI_CTRL_4BITBUS);
+ /* ensure the endianess */
+ new_val |= ESDHC_HOST_CONTROL_LE;
+ /* DMA mode bits are shifted */
+ new_val |= (val & SDHCI_CTRL_DMA_MASK) << 5;
+
+ esdhc_clrset_le(host, 0xffff, new_val, reg);
+ return;
+ }
+ esdhc_clrset_le(host, 0xff, val, reg);
+}
+
+static unsigned int esdhc_pltfm_get_max_clock(struct sdhci_host *host)
+{
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+
+ return clk_get_rate(pltfm_host->clk);
+}
+
+static unsigned int esdhc_pltfm_get_min_clock(struct sdhci_host *host)
+{
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+
+ return clk_get_rate(pltfm_host->clk) / 256 / 16;
+}
+
+static int esdhc_pltfm_init(struct sdhci_host *host, struct sdhci_pltfm_data *pdata)
+{
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct clk *clk;
+
+ clk = clk_get(mmc_dev(host->mmc), NULL);
+ if (IS_ERR(clk)) {
+ dev_err(mmc_dev(host->mmc), "clk err\n");
+ return PTR_ERR(clk);
+ }
+ clk_enable(clk);
+ pltfm_host->clk = clk;
+
+ if (cpu_is_mx35() || cpu_is_mx51())
+ host->quirks |= SDHCI_QUIRK_BROKEN_TIMEOUT_VAL;
+
+ /* Fix errata ENGcm07207 which is present on i.MX25 and i.MX35 */
+ if (cpu_is_mx25() || cpu_is_mx35())
+ host->quirks |= SDHCI_QUIRK_NO_MULTIBLOCK;
+
+ return 0;
+}
+
+static void esdhc_pltfm_exit(struct sdhci_host *host)
+{
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+
+ clk_disable(pltfm_host->clk);
+ clk_put(pltfm_host->clk);
+}
+
+static struct sdhci_ops sdhci_esdhc_ops = {
+ .read_w = esdhc_readw_le,
+ .write_w = esdhc_writew_le,
+ .write_b = esdhc_writeb_le,
+ .set_clock = esdhc_set_clock,
+ .get_max_clock = esdhc_pltfm_get_max_clock,
+ .get_min_clock = esdhc_pltfm_get_min_clock,
+};
+
+struct sdhci_pltfm_data sdhci_esdhc_imx_pdata = {
+ .quirks = ESDHC_DEFAULT_QUIRKS | SDHCI_QUIRK_BROKEN_ADMA,
+ /* ADMA has issues. Might be fixable */
+ .ops = &sdhci_esdhc_ops,
+ .init = esdhc_pltfm_init,
+ .exit = esdhc_pltfm_exit,
+};
diff --git a/drivers/mmc/host/sdhci-esdhc.h b/drivers/mmc/host/sdhci-esdhc.h
new file mode 100644
index 000000000000..afaf1bc4913a
--- /dev/null
+++ b/drivers/mmc/host/sdhci-esdhc.h
@@ -0,0 +1,83 @@
+/*
+ * Freescale eSDHC controller driver generics for OF and pltfm.
+ *
+ * Copyright (c) 2007 Freescale Semiconductor, Inc.
+ * Copyright (c) 2009 MontaVista Software, Inc.
+ * Copyright (c) 2010 Pengutronix e.K.
+ * Author: Wolfram Sang <w.sang@pengutronix.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License.
+ */
+
+#ifndef _DRIVERS_MMC_SDHCI_ESDHC_H
+#define _DRIVERS_MMC_SDHCI_ESDHC_H
+
+/*
+ * Ops and quirks for the Freescale eSDHC controller.
+ */
+
+#define ESDHC_DEFAULT_QUIRKS (SDHCI_QUIRK_FORCE_BLK_SZ_2048 | \
+ SDHCI_QUIRK_BROKEN_CARD_DETECTION | \
+ SDHCI_QUIRK_NO_BUSY_IRQ | \
+ SDHCI_QUIRK_NONSTANDARD_CLOCK | \
+ SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK | \
+ SDHCI_QUIRK_PIO_NEEDS_DELAY | \
+ SDHCI_QUIRK_RESTORE_IRQS_AFTER_RESET | \
+ SDHCI_QUIRK_NO_CARD_NO_RESET)
+
+#define ESDHC_SYSTEM_CONTROL 0x2c
+#define ESDHC_CLOCK_MASK 0x0000fff0
+#define ESDHC_PREDIV_SHIFT 8
+#define ESDHC_DIVIDER_SHIFT 4
+#define ESDHC_CLOCK_PEREN 0x00000004
+#define ESDHC_CLOCK_HCKEN 0x00000002
+#define ESDHC_CLOCK_IPGEN 0x00000001
+
+/* pltfm-specific */
+#define ESDHC_HOST_CONTROL_LE 0x20
+
+/* OF-specific */
+#define ESDHC_DMA_SYSCTL 0x40c
+#define ESDHC_DMA_SNOOP 0x00000040
+
+#define ESDHC_HOST_CONTROL_RES 0x05
+
+static inline void esdhc_set_clock(struct sdhci_host *host, unsigned int clock)
+{
+ int pre_div = 2;
+ int div = 1;
+ u32 temp;
+
+ temp = sdhci_readl(host, ESDHC_SYSTEM_CONTROL);
+ temp &= ~(ESDHC_CLOCK_IPGEN | ESDHC_CLOCK_HCKEN | ESDHC_CLOCK_PEREN
+ | ESDHC_CLOCK_MASK);
+ sdhci_writel(host, temp, ESDHC_SYSTEM_CONTROL);
+
+ if (clock == 0)
+ goto out;
+
+ while (host->max_clk / pre_div / 16 > clock && pre_div < 256)
+ pre_div *= 2;
+
+ while (host->max_clk / pre_div / div > clock && div < 16)
+ div++;
+
+ dev_dbg(mmc_dev(host->mmc), "desired SD clock: %d, actual: %d\n",
+ clock, host->max_clk / pre_div / div);
+
+ pre_div >>= 1;
+ div--;
+
+ temp = sdhci_readl(host, ESDHC_SYSTEM_CONTROL);
+ temp |= (ESDHC_CLOCK_IPGEN | ESDHC_CLOCK_HCKEN | ESDHC_CLOCK_PEREN
+ | (div << ESDHC_DIVIDER_SHIFT)
+ | (pre_div << ESDHC_PREDIV_SHIFT));
+ sdhci_writel(host, temp, ESDHC_SYSTEM_CONTROL);
+ mdelay(100);
+out:
+ host->clock = clock;
+}
+
+#endif /* _DRIVERS_MMC_SDHCI_ESDHC_H */
diff --git a/drivers/mmc/host/sdhci-of-esdhc.c b/drivers/mmc/host/sdhci-of-esdhc.c
index c8623de13af3..fcd0e1fcba44 100644
--- a/drivers/mmc/host/sdhci-of-esdhc.c
+++ b/drivers/mmc/host/sdhci-of-esdhc.c
@@ -18,23 +18,7 @@
#include <linux/mmc/host.h>
#include "sdhci-of.h"
#include "sdhci.h"
-
-/*
- * Ops and quirks for the Freescale eSDHC controller.
- */
-
-#define ESDHC_DMA_SYSCTL 0x40c
-#define ESDHC_DMA_SNOOP 0x00000040
-
-#define ESDHC_SYSTEM_CONTROL 0x2c
-#define ESDHC_CLOCK_MASK 0x0000fff0
-#define ESDHC_PREDIV_SHIFT 8
-#define ESDHC_DIVIDER_SHIFT 4
-#define ESDHC_CLOCK_PEREN 0x00000004
-#define ESDHC_CLOCK_HCKEN 0x00000002
-#define ESDHC_CLOCK_IPGEN 0x00000001
-
-#define ESDHC_HOST_CONTROL_RES 0x05
+#include "sdhci-esdhc.h"
static u16 esdhc_readw(struct sdhci_host *host, int reg)
{
@@ -68,51 +52,20 @@ static void esdhc_writeb(struct sdhci_host *host, u8 val, int reg)
sdhci_be32bs_writeb(host, val, reg);
}
-static void esdhc_set_clock(struct sdhci_host *host, unsigned int clock)
-{
- int pre_div = 2;
- int div = 1;
-
- clrbits32(host->ioaddr + ESDHC_SYSTEM_CONTROL, ESDHC_CLOCK_IPGEN |
- ESDHC_CLOCK_HCKEN | ESDHC_CLOCK_PEREN | ESDHC_CLOCK_MASK);
-
- if (clock == 0)
- goto out;
-
- while (host->max_clk / pre_div / 16 > clock && pre_div < 256)
- pre_div *= 2;
-
- while (host->max_clk / pre_div / div > clock && div < 16)
- div++;
-
- dev_dbg(mmc_dev(host->mmc), "desired SD clock: %d, actual: %d\n",
- clock, host->max_clk / pre_div / div);
-
- pre_div >>= 1;
- div--;
-
- setbits32(host->ioaddr + ESDHC_SYSTEM_CONTROL, ESDHC_CLOCK_IPGEN |
- ESDHC_CLOCK_HCKEN | ESDHC_CLOCK_PEREN |
- div << ESDHC_DIVIDER_SHIFT | pre_div << ESDHC_PREDIV_SHIFT);
- mdelay(100);
-out:
- host->clock = clock;
-}
-
-static int esdhc_enable_dma(struct sdhci_host *host)
+static int esdhc_of_enable_dma(struct sdhci_host *host)
{
setbits32(host->ioaddr + ESDHC_DMA_SYSCTL, ESDHC_DMA_SNOOP);
return 0;
}
-static unsigned int esdhc_get_max_clock(struct sdhci_host *host)
+static unsigned int esdhc_of_get_max_clock(struct sdhci_host *host)
{
struct sdhci_of_host *of_host = sdhci_priv(host);
return of_host->clock;
}
-static unsigned int esdhc_get_min_clock(struct sdhci_host *host)
+static unsigned int esdhc_of_get_min_clock(struct sdhci_host *host)
{
struct sdhci_of_host *of_host = sdhci_priv(host);
@@ -120,14 +73,7 @@ static unsigned int esdhc_get_min_clock(struct sdhci_host *host)
}
struct sdhci_of_data sdhci_esdhc = {
- .quirks = SDHCI_QUIRK_FORCE_BLK_SZ_2048 |
- SDHCI_QUIRK_BROKEN_CARD_DETECTION |
- SDHCI_QUIRK_NO_BUSY_IRQ |
- SDHCI_QUIRK_NONSTANDARD_CLOCK |
- SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK |
- SDHCI_QUIRK_PIO_NEEDS_DELAY |
- SDHCI_QUIRK_RESTORE_IRQS_AFTER_RESET |
- SDHCI_QUIRK_NO_CARD_NO_RESET,
+ .quirks = ESDHC_DEFAULT_QUIRKS,
.ops = {
.read_l = sdhci_be32bs_readl,
.read_w = esdhc_readw,
@@ -136,8 +82,8 @@ struct sdhci_of_data sdhci_esdhc = {
.write_w = esdhc_writew,
.write_b = esdhc_writeb,
.set_clock = esdhc_set_clock,
- .enable_dma = esdhc_enable_dma,
- .get_max_clock = esdhc_get_max_clock,
- .get_min_clock = esdhc_get_min_clock,
+ .enable_dma = esdhc_of_enable_dma,
+ .get_max_clock = esdhc_of_get_max_clock,
+ .get_min_clock = esdhc_of_get_min_clock,
},
};
diff --git a/drivers/mmc/host/sdhci-pci.c b/drivers/mmc/host/sdhci-pci.c
index e8aa99deae9a..3d9c2460d437 100644
--- a/drivers/mmc/host/sdhci-pci.c
+++ b/drivers/mmc/host/sdhci-pci.c
@@ -145,6 +145,37 @@ static const struct sdhci_pci_fixes sdhci_cafe = {
SDHCI_QUIRK_BROKEN_TIMEOUT_VAL,
};
+/*
+ * ADMA operation is disabled for Moorestown platform due to
+ * hardware bugs.
+ */
+static int mrst_hc_probe(struct sdhci_pci_chip *chip)
+{
+ /*
+ * slots number is fixed here for MRST as SDIO3/5 are never used and
+ * have hardware bugs.
+ */
+ chip->num_slots = 1;
+ return 0;
+}
+
+static const struct sdhci_pci_fixes sdhci_intel_mrst_hc0 = {
+ .quirks = SDHCI_QUIRK_BROKEN_ADMA | SDHCI_QUIRK_NO_HISPD_BIT,
+};
+
+static const struct sdhci_pci_fixes sdhci_intel_mrst_hc1_hc2 = {
+ .quirks = SDHCI_QUIRK_BROKEN_ADMA | SDHCI_QUIRK_NO_HISPD_BIT,
+ .probe = mrst_hc_probe,
+};
+
+static const struct sdhci_pci_fixes sdhci_intel_mfd_sd = {
+ .quirks = SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC,
+};
+
+static const struct sdhci_pci_fixes sdhci_intel_mfd_emmc_sdio = {
+ .quirks = SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC,
+};
+
static int jmicron_pmos(struct sdhci_pci_chip *chip, int on)
{
u8 scratch;
@@ -494,6 +525,70 @@ static const struct pci_device_id pci_ids[] __devinitdata = {
.driver_data = (kernel_ulong_t)&sdhci_via,
},
+ {
+ .vendor = PCI_VENDOR_ID_INTEL,
+ .device = PCI_DEVICE_ID_INTEL_MRST_SD0,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ .driver_data = (kernel_ulong_t)&sdhci_intel_mrst_hc0,
+ },
+
+ {
+ .vendor = PCI_VENDOR_ID_INTEL,
+ .device = PCI_DEVICE_ID_INTEL_MRST_SD1,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ .driver_data = (kernel_ulong_t)&sdhci_intel_mrst_hc1_hc2,
+ },
+
+ {
+ .vendor = PCI_VENDOR_ID_INTEL,
+ .device = PCI_DEVICE_ID_INTEL_MRST_SD2,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ .driver_data = (kernel_ulong_t)&sdhci_intel_mrst_hc1_hc2,
+ },
+
+ {
+ .vendor = PCI_VENDOR_ID_INTEL,
+ .device = PCI_DEVICE_ID_INTEL_MFD_SD,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ .driver_data = (kernel_ulong_t)&sdhci_intel_mfd_sd,
+ },
+
+ {
+ .vendor = PCI_VENDOR_ID_INTEL,
+ .device = PCI_DEVICE_ID_INTEL_MFD_SDIO1,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ .driver_data = (kernel_ulong_t)&sdhci_intel_mfd_emmc_sdio,
+ },
+
+ {
+ .vendor = PCI_VENDOR_ID_INTEL,
+ .device = PCI_DEVICE_ID_INTEL_MFD_SDIO2,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ .driver_data = (kernel_ulong_t)&sdhci_intel_mfd_emmc_sdio,
+ },
+
+ {
+ .vendor = PCI_VENDOR_ID_INTEL,
+ .device = PCI_DEVICE_ID_INTEL_MFD_EMMC0,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ .driver_data = (kernel_ulong_t)&sdhci_intel_mfd_emmc_sdio,
+ },
+
+ {
+ .vendor = PCI_VENDOR_ID_INTEL,
+ .device = PCI_DEVICE_ID_INTEL_MFD_EMMC1,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ .driver_data = (kernel_ulong_t)&sdhci_intel_mfd_emmc_sdio,
+ },
+
{ /* Generic SD host controller */
PCI_DEVICE_CLASS((PCI_CLASS_SYSTEM_SDHCI << 8), 0xFFFF00)
},
@@ -550,6 +645,7 @@ static int sdhci_pci_suspend (struct pci_dev *pdev, pm_message_t state)
{
struct sdhci_pci_chip *chip;
struct sdhci_pci_slot *slot;
+ mmc_pm_flag_t slot_pm_flags;
mmc_pm_flag_t pm_flags = 0;
int i, ret;
@@ -570,7 +666,11 @@ static int sdhci_pci_suspend (struct pci_dev *pdev, pm_message_t state)
return ret;
}
- pm_flags |= slot->host->mmc->pm_flags;
+ slot_pm_flags = slot->host->mmc->pm_flags;
+ if (slot_pm_flags & MMC_PM_WAKE_SDIO_IRQ)
+ sdhci_enable_irq_wakeups(slot->host);
+
+ pm_flags |= slot_pm_flags;
}
if (chip->fixes && chip->fixes->suspend) {
@@ -584,8 +684,10 @@ static int sdhci_pci_suspend (struct pci_dev *pdev, pm_message_t state)
pci_save_state(pdev);
if (pm_flags & MMC_PM_KEEP_POWER) {
- if (pm_flags & MMC_PM_WAKE_SDIO_IRQ)
+ if (pm_flags & MMC_PM_WAKE_SDIO_IRQ) {
+ pci_pme_active(pdev, true);
pci_enable_wake(pdev, PCI_D3hot, 1);
+ }
pci_set_power_state(pdev, PCI_D3hot);
} else {
pci_enable_wake(pdev, pci_choose_state(pdev, state), 0);
@@ -818,6 +920,8 @@ static int __devinit sdhci_pci_probe(struct pci_dev *pdev,
goto free;
}
+ slots = chip->num_slots; /* Quirk may have changed this */
+
for (i = 0;i < slots;i++) {
slot = sdhci_pci_probe_slot(pdev, chip, first_bar + i);
if (IS_ERR(slot)) {
diff --git a/drivers/mmc/host/sdhci-pltfm.c b/drivers/mmc/host/sdhci-pltfm.c
index e045e3c61dde..0502f89f662b 100644
--- a/drivers/mmc/host/sdhci-pltfm.c
+++ b/drivers/mmc/host/sdhci-pltfm.c
@@ -30,7 +30,7 @@
#include <linux/mmc/host.h>
#include <linux/io.h>
-#include <linux/sdhci-pltfm.h>
+#include <linux/mmc/sdhci-pltfm.h>
#include "sdhci.h"
#include "sdhci-pltfm.h"
@@ -52,14 +52,17 @@ static struct sdhci_ops sdhci_pltfm_ops = {
static int __devinit sdhci_pltfm_probe(struct platform_device *pdev)
{
- struct sdhci_pltfm_data *pdata = pdev->dev.platform_data;
const struct platform_device_id *platid = platform_get_device_id(pdev);
+ struct sdhci_pltfm_data *pdata;
struct sdhci_host *host;
+ struct sdhci_pltfm_host *pltfm_host;
struct resource *iomem;
int ret;
- if (!pdata && platid && platid->driver_data)
+ if (platid && platid->driver_data)
pdata = (void *)platid->driver_data;
+ else
+ pdata = pdev->dev.platform_data;
iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!iomem) {
@@ -71,16 +74,19 @@ static int __devinit sdhci_pltfm_probe(struct platform_device *pdev)
dev_err(&pdev->dev, "Invalid iomem size. You may "
"experience problems.\n");
- if (pdev->dev.parent)
- host = sdhci_alloc_host(pdev->dev.parent, 0);
+ /* Some PCI-based MFD need the parent here */
+ if (pdev->dev.parent != &platform_bus)
+ host = sdhci_alloc_host(pdev->dev.parent, sizeof(*pltfm_host));
else
- host = sdhci_alloc_host(&pdev->dev, 0);
+ host = sdhci_alloc_host(&pdev->dev, sizeof(*pltfm_host));
if (IS_ERR(host)) {
ret = PTR_ERR(host);
goto err;
}
+ pltfm_host = sdhci_priv(host);
+
host->hw_name = "platform";
if (pdata && pdata->ops)
host->ops = pdata->ops;
@@ -105,7 +111,7 @@ static int __devinit sdhci_pltfm_probe(struct platform_device *pdev)
}
if (pdata && pdata->init) {
- ret = pdata->init(host);
+ ret = pdata->init(host, pdata);
if (ret)
goto err_plat_init;
}
@@ -161,10 +167,32 @@ static const struct platform_device_id sdhci_pltfm_ids[] = {
#ifdef CONFIG_MMC_SDHCI_CNS3XXX
{ "sdhci-cns3xxx", (kernel_ulong_t)&sdhci_cns3xxx_pdata },
#endif
+#ifdef CONFIG_MMC_SDHCI_ESDHC_IMX
+ { "sdhci-esdhc-imx", (kernel_ulong_t)&sdhci_esdhc_imx_pdata },
+#endif
{ },
};
MODULE_DEVICE_TABLE(platform, sdhci_pltfm_ids);
+#ifdef CONFIG_PM
+static int sdhci_pltfm_suspend(struct platform_device *dev, pm_message_t state)
+{
+ struct sdhci_host *host = platform_get_drvdata(dev);
+
+ return sdhci_suspend_host(host, state);
+}
+
+static int sdhci_pltfm_resume(struct platform_device *dev)
+{
+ struct sdhci_host *host = platform_get_drvdata(dev);
+
+ return sdhci_resume_host(host);
+}
+#else
+#define sdhci_pltfm_suspend NULL
+#define sdhci_pltfm_resume NULL
+#endif /* CONFIG_PM */
+
static struct platform_driver sdhci_pltfm_driver = {
.driver = {
.name = "sdhci",
@@ -173,6 +201,8 @@ static struct platform_driver sdhci_pltfm_driver = {
.probe = sdhci_pltfm_probe,
.remove = __devexit_p(sdhci_pltfm_remove),
.id_table = sdhci_pltfm_ids,
+ .suspend = sdhci_pltfm_suspend,
+ .resume = sdhci_pltfm_resume,
};
/*****************************************************************************\
diff --git a/drivers/mmc/host/sdhci-pltfm.h b/drivers/mmc/host/sdhci-pltfm.h
index 900f32902f73..c1bfe48af56a 100644
--- a/drivers/mmc/host/sdhci-pltfm.h
+++ b/drivers/mmc/host/sdhci-pltfm.h
@@ -11,8 +11,16 @@
#ifndef _DRIVERS_MMC_SDHCI_PLTFM_H
#define _DRIVERS_MMC_SDHCI_PLTFM_H
-#include <linux/sdhci-pltfm.h>
+#include <linux/clk.h>
+#include <linux/types.h>
+#include <linux/mmc/sdhci-pltfm.h>
+
+struct sdhci_pltfm_host {
+ struct clk *clk;
+ u32 scratchpad; /* to handle quirks across io-accessor calls */
+};
extern struct sdhci_pltfm_data sdhci_cns3xxx_pdata;
+extern struct sdhci_pltfm_data sdhci_esdhc_imx_pdata;
#endif /* _DRIVERS_MMC_SDHCI_PLTFM_H */
diff --git a/drivers/mmc/host/sdhci-pxa.c b/drivers/mmc/host/sdhci-pxa.c
new file mode 100644
index 000000000000..5a61208cbc66
--- /dev/null
+++ b/drivers/mmc/host/sdhci-pxa.c
@@ -0,0 +1,257 @@
+/* linux/drivers/mmc/host/sdhci-pxa.c
+ *
+ * Copyright (C) 2010 Marvell International Ltd.
+ * Zhangfei Gao <zhangfei.gao@marvell.com>
+ * Kevin Wang <dwang4@marvell.com>
+ * Mingwei Wang <mwwang@marvell.com>
+ * Philip Rakity <prakity@marvell.com>
+ * Mark Brown <markb@marvell.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+/* Supports:
+ * SDHCI support for MMP2/PXA910/PXA168
+ *
+ * Refer to sdhci-s3c.c.
+ */
+
+#include <linux/delay.h>
+#include <linux/platform_device.h>
+#include <linux/mmc/host.h>
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/err.h>
+#include <plat/sdhci.h>
+#include "sdhci.h"
+
+#define DRIVER_NAME "sdhci-pxa"
+
+#define SD_FIFO_PARAM 0x104
+#define DIS_PAD_SD_CLK_GATE 0x400
+
+struct sdhci_pxa {
+ struct sdhci_host *host;
+ struct sdhci_pxa_platdata *pdata;
+ struct clk *clk;
+ struct resource *res;
+
+ u8 clk_enable;
+};
+
+/*****************************************************************************\
+ * *
+ * SDHCI core callbacks *
+ * *
+\*****************************************************************************/
+static void set_clock(struct sdhci_host *host, unsigned int clock)
+{
+ struct sdhci_pxa *pxa = sdhci_priv(host);
+ u32 tmp = 0;
+
+ if (clock == 0) {
+ if (pxa->clk_enable) {
+ clk_disable(pxa->clk);
+ pxa->clk_enable = 0;
+ }
+ } else {
+ if (0 == pxa->clk_enable) {
+ if (pxa->pdata->flags & PXA_FLAG_DISABLE_CLOCK_GATING) {
+ tmp = readl(host->ioaddr + SD_FIFO_PARAM);
+ tmp |= DIS_PAD_SD_CLK_GATE;
+ writel(tmp, host->ioaddr + SD_FIFO_PARAM);
+ }
+ clk_enable(pxa->clk);
+ pxa->clk_enable = 1;
+ }
+ }
+}
+
+static struct sdhci_ops sdhci_pxa_ops = {
+ .set_clock = set_clock,
+};
+
+/*****************************************************************************\
+ * *
+ * Device probing/removal *
+ * *
+\*****************************************************************************/
+
+static int __devinit sdhci_pxa_probe(struct platform_device *pdev)
+{
+ struct sdhci_pxa_platdata *pdata = pdev->dev.platform_data;
+ struct device *dev = &pdev->dev;
+ struct sdhci_host *host = NULL;
+ struct resource *iomem = NULL;
+ struct sdhci_pxa *pxa = NULL;
+ int ret, irq;
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0) {
+ dev_err(dev, "no irq specified\n");
+ return irq;
+ }
+
+ iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!iomem) {
+ dev_err(dev, "no memory specified\n");
+ return -ENOENT;
+ }
+
+ host = sdhci_alloc_host(&pdev->dev, sizeof(struct sdhci_pxa));
+ if (IS_ERR(host)) {
+ dev_err(dev, "failed to alloc host\n");
+ return PTR_ERR(host);
+ }
+
+ pxa = sdhci_priv(host);
+ pxa->host = host;
+ pxa->pdata = pdata;
+ pxa->clk_enable = 0;
+
+ pxa->clk = clk_get(dev, "PXA-SDHCLK");
+ if (IS_ERR(pxa->clk)) {
+ dev_err(dev, "failed to get io clock\n");
+ ret = PTR_ERR(pxa->clk);
+ goto out;
+ }
+
+ pxa->res = request_mem_region(iomem->start, resource_size(iomem),
+ mmc_hostname(host->mmc));
+ if (!pxa->res) {
+ dev_err(&pdev->dev, "cannot request region\n");
+ ret = -EBUSY;
+ goto out;
+ }
+
+ host->ioaddr = ioremap(iomem->start, resource_size(iomem));
+ if (!host->ioaddr) {
+ dev_err(&pdev->dev, "failed to remap registers\n");
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ host->hw_name = "MMC";
+ host->ops = &sdhci_pxa_ops;
+ host->irq = irq;
+ host->quirks = SDHCI_QUIRK_BROKEN_ADMA | SDHCI_QUIRK_BROKEN_TIMEOUT_VAL;
+
+ if (pdata->quirks)
+ host->quirks |= pdata->quirks;
+
+ /* If slot design supports 8 bit data, indicate this to MMC. */
+ if (pdata->flags & PXA_FLAG_SD_8_BIT_CAPABLE_SLOT)
+ host->mmc->caps |= MMC_CAP_8_BIT_DATA;
+
+ ret = sdhci_add_host(host);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to add host\n");
+ goto out;
+ }
+
+ if (pxa->pdata->max_speed)
+ host->mmc->f_max = pxa->pdata->max_speed;
+
+ platform_set_drvdata(pdev, host);
+
+ return 0;
+out:
+ if (host) {
+ clk_put(pxa->clk);
+ if (host->ioaddr)
+ iounmap(host->ioaddr);
+ if (pxa->res)
+ release_mem_region(pxa->res->start,
+ resource_size(pxa->res));
+ sdhci_free_host(host);
+ }
+
+ return ret;
+}
+
+static int __devexit sdhci_pxa_remove(struct platform_device *pdev)
+{
+ struct sdhci_host *host = platform_get_drvdata(pdev);
+ struct sdhci_pxa *pxa = sdhci_priv(host);
+ int dead = 0;
+ u32 scratch;
+
+ if (host) {
+ scratch = readl(host->ioaddr + SDHCI_INT_STATUS);
+ if (scratch == (u32)-1)
+ dead = 1;
+
+ sdhci_remove_host(host, dead);
+
+ if (host->ioaddr)
+ iounmap(host->ioaddr);
+ if (pxa->res)
+ release_mem_region(pxa->res->start,
+ resource_size(pxa->res));
+ if (pxa->clk_enable) {
+ clk_disable(pxa->clk);
+ pxa->clk_enable = 0;
+ }
+ clk_put(pxa->clk);
+
+ sdhci_free_host(host);
+ platform_set_drvdata(pdev, NULL);
+ }
+
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int sdhci_pxa_suspend(struct platform_device *dev, pm_message_t state)
+{
+ struct sdhci_host *host = platform_get_drvdata(dev);
+
+ return sdhci_suspend_host(host, state);
+}
+
+static int sdhci_pxa_resume(struct platform_device *dev)
+{
+ struct sdhci_host *host = platform_get_drvdata(dev);
+
+ return sdhci_resume_host(host);
+}
+#else
+#define sdhci_pxa_suspend NULL
+#define sdhci_pxa_resume NULL
+#endif
+
+static struct platform_driver sdhci_pxa_driver = {
+ .probe = sdhci_pxa_probe,
+ .remove = __devexit_p(sdhci_pxa_remove),
+ .suspend = sdhci_pxa_suspend,
+ .resume = sdhci_pxa_resume,
+ .driver = {
+ .name = DRIVER_NAME,
+ .owner = THIS_MODULE,
+ },
+};
+
+/*****************************************************************************\
+ * *
+ * Driver init/exit *
+ * *
+\*****************************************************************************/
+
+static int __init sdhci_pxa_init(void)
+{
+ return platform_driver_register(&sdhci_pxa_driver);
+}
+
+static void __exit sdhci_pxa_exit(void)
+{
+ platform_driver_unregister(&sdhci_pxa_driver);
+}
+
+module_init(sdhci_pxa_init);
+module_exit(sdhci_pxa_exit);
+
+MODULE_DESCRIPTION("SDH controller driver for PXA168/PXA910/MMP2");
+MODULE_AUTHOR("Zhangfei Gao <zhangfei.gao@marvell.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
index 401527d273b5..a25db426c910 100644
--- a/drivers/mmc/host/sdhci.c
+++ b/drivers/mmc/host/sdhci.c
@@ -47,7 +47,8 @@ static void sdhci_finish_command(struct sdhci_host *);
static void sdhci_dumpregs(struct sdhci_host *host)
{
- printk(KERN_DEBUG DRIVER_NAME ": ============== REGISTER DUMP ==============\n");
+ printk(KERN_DEBUG DRIVER_NAME ": =========== REGISTER DUMP (%s)===========\n",
+ mmc_hostname(host->mmc));
printk(KERN_DEBUG DRIVER_NAME ": Sys addr: 0x%08x | Version: 0x%08x\n",
sdhci_readl(host, SDHCI_DMA_ADDRESS),
@@ -1001,13 +1002,28 @@ static void sdhci_set_clock(struct sdhci_host *host, unsigned int clock)
if (clock == 0)
goto out;
- for (div = 1;div < 256;div *= 2) {
- if ((host->max_clk / div) <= clock)
- break;
+ if (host->version >= SDHCI_SPEC_300) {
+ /* Version 3.00 divisors must be a multiple of 2. */
+ if (host->max_clk <= clock)
+ div = 1;
+ else {
+ for (div = 2; div < SDHCI_MAX_DIV_SPEC_300; div += 2) {
+ if ((host->max_clk / div) <= clock)
+ break;
+ }
+ }
+ } else {
+ /* Version 2.00 divisors must be a power of 2. */
+ for (div = 1; div < SDHCI_MAX_DIV_SPEC_200; div *= 2) {
+ if ((host->max_clk / div) <= clock)
+ break;
+ }
}
div >>= 1;
- clk = div << SDHCI_DIVIDER_SHIFT;
+ clk = (div & SDHCI_DIV_MASK) << SDHCI_DIVIDER_SHIFT;
+ clk |= ((div & SDHCI_DIV_HI_MASK) >> SDHCI_DIV_MASK_LEN)
+ << SDHCI_DIVIDER_HI_SHIFT;
clk |= SDHCI_CLOCK_INT_EN;
sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
@@ -1034,11 +1050,9 @@ out:
static void sdhci_set_power(struct sdhci_host *host, unsigned short power)
{
- u8 pwr;
+ u8 pwr = 0;
- if (power == (unsigned short)-1)
- pwr = 0;
- else {
+ if (power != (unsigned short)-1) {
switch (1 << power) {
case MMC_VDD_165_195:
pwr = SDHCI_POWER_180;
@@ -1168,20 +1182,38 @@ static void sdhci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
else
sdhci_set_power(host, ios->vdd);
- ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
+ if (host->ops->platform_send_init_74_clocks)
+ host->ops->platform_send_init_74_clocks(host, ios->power_mode);
- if (ios->bus_width == MMC_BUS_WIDTH_8)
- ctrl |= SDHCI_CTRL_8BITBUS;
- else
- ctrl &= ~SDHCI_CTRL_8BITBUS;
+ /*
+ * If your platform has 8-bit width support but is not a v3 controller,
+ * or if it requires special setup code, you should implement that in
+ * platform_8bit_width().
+ */
+ if (host->ops->platform_8bit_width)
+ host->ops->platform_8bit_width(host, ios->bus_width);
+ else {
+ ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
+ if (ios->bus_width == MMC_BUS_WIDTH_8) {
+ ctrl &= ~SDHCI_CTRL_4BITBUS;
+ if (host->version >= SDHCI_SPEC_300)
+ ctrl |= SDHCI_CTRL_8BITBUS;
+ } else {
+ if (host->version >= SDHCI_SPEC_300)
+ ctrl &= ~SDHCI_CTRL_8BITBUS;
+ if (ios->bus_width == MMC_BUS_WIDTH_4)
+ ctrl |= SDHCI_CTRL_4BITBUS;
+ else
+ ctrl &= ~SDHCI_CTRL_4BITBUS;
+ }
+ sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
+ }
- if (ios->bus_width == MMC_BUS_WIDTH_4)
- ctrl |= SDHCI_CTRL_4BITBUS;
- else
- ctrl &= ~SDHCI_CTRL_4BITBUS;
+ ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
- if (ios->timing == MMC_TIMING_SD_HS &&
- !(host->quirks & SDHCI_QUIRK_NO_HISPD_BIT))
+ if ((ios->timing == MMC_TIMING_SD_HS ||
+ ios->timing == MMC_TIMING_MMC_HS)
+ && !(host->quirks & SDHCI_QUIRK_NO_HISPD_BIT))
ctrl |= SDHCI_CTRL_HISPD;
else
ctrl &= ~SDHCI_CTRL_HISPD;
@@ -1205,22 +1237,25 @@ static int sdhci_get_ro(struct mmc_host *mmc)
{
struct sdhci_host *host;
unsigned long flags;
- int present;
+ int is_readonly;
host = mmc_priv(mmc);
spin_lock_irqsave(&host->lock, flags);
if (host->flags & SDHCI_DEVICE_DEAD)
- present = 0;
+ is_readonly = 0;
+ else if (host->ops->get_ro)
+ is_readonly = host->ops->get_ro(host);
else
- present = sdhci_readl(host, SDHCI_PRESENT_STATE);
+ is_readonly = !(sdhci_readl(host, SDHCI_PRESENT_STATE)
+ & SDHCI_WRITE_PROTECT);
spin_unlock_irqrestore(&host->lock, flags);
- if (host->quirks & SDHCI_QUIRK_INVERTED_WRITE_PROTECT)
- return !!(present & SDHCI_WRITE_PROTECT);
- return !(present & SDHCI_WRITE_PROTECT);
+ /* This quirk needs to be replaced by a callback-function later */
+ return host->quirks & SDHCI_QUIRK_INVERTED_WRITE_PROTECT ?
+ !is_readonly : is_readonly;
}
static void sdhci_enable_sdio_irq(struct mmc_host *mmc, int enable)
@@ -1427,7 +1462,7 @@ static void sdhci_cmd_irq(struct sdhci_host *host, u32 intmask)
sdhci_finish_command(host);
}
-#ifdef DEBUG
+#ifdef CONFIG_MMC_DEBUG
static void sdhci_show_adma_error(struct sdhci_host *host)
{
const char *name = mmc_hostname(host->mmc);
@@ -1660,6 +1695,16 @@ int sdhci_resume_host(struct sdhci_host *host)
EXPORT_SYMBOL_GPL(sdhci_resume_host);
+void sdhci_enable_irq_wakeups(struct sdhci_host *host)
+{
+ u8 val;
+ val = sdhci_readb(host, SDHCI_WAKE_UP_CONTROL);
+ val |= SDHCI_WAKE_ON_INT;
+ sdhci_writeb(host, val, SDHCI_WAKE_UP_CONTROL);
+}
+
+EXPORT_SYMBOL_GPL(sdhci_enable_irq_wakeups);
+
#endif /* CONFIG_PM */
/*****************************************************************************\
@@ -1708,7 +1753,7 @@ int sdhci_add_host(struct sdhci_host *host)
host->version = sdhci_readw(host, SDHCI_HOST_VERSION);
host->version = (host->version & SDHCI_SPEC_VER_MASK)
>> SDHCI_SPEC_VER_SHIFT;
- if (host->version > SDHCI_SPEC_200) {
+ if (host->version > SDHCI_SPEC_300) {
printk(KERN_ERR "%s: Unknown controller version (%d). "
"You may experience problems.\n", mmc_hostname(mmc),
host->version);
@@ -1779,8 +1824,13 @@ int sdhci_add_host(struct sdhci_host *host)
mmc_dev(host->mmc)->dma_mask = &host->dma_mask;
}
- host->max_clk =
- (caps & SDHCI_CLOCK_BASE_MASK) >> SDHCI_CLOCK_BASE_SHIFT;
+ if (host->version >= SDHCI_SPEC_300)
+ host->max_clk = (caps & SDHCI_CLOCK_V3_BASE_MASK)
+ >> SDHCI_CLOCK_BASE_SHIFT;
+ else
+ host->max_clk = (caps & SDHCI_CLOCK_BASE_MASK)
+ >> SDHCI_CLOCK_BASE_SHIFT;
+
host->max_clk *= 1000000;
if (host->max_clk == 0 || host->quirks &
SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN) {
@@ -1815,18 +1865,29 @@ int sdhci_add_host(struct sdhci_host *host)
mmc->ops = &sdhci_ops;
if (host->ops->get_min_clock)
mmc->f_min = host->ops->get_min_clock(host);
+ else if (host->version >= SDHCI_SPEC_300)
+ mmc->f_min = host->max_clk / SDHCI_MAX_DIV_SPEC_300;
else
- mmc->f_min = host->max_clk / 256;
+ mmc->f_min = host->max_clk / SDHCI_MAX_DIV_SPEC_200;
+
mmc->f_max = host->max_clk;
mmc->caps |= MMC_CAP_SDIO_IRQ;
+ /*
+ * A controller may support 8-bit width, but the board itself
+ * might not have the pins brought out. Boards that support
+ * 8-bit width must set "mmc->caps |= MMC_CAP_8_BIT_DATA;" in
+ * their platform code before calling sdhci_add_host(), and we
+ * won't assume 8-bit width for hosts without that CAP.
+ */
if (!(host->quirks & SDHCI_QUIRK_FORCE_1_BIT_DATA))
mmc->caps |= MMC_CAP_4_BIT_DATA;
if (caps & SDHCI_CAN_DO_HISPD)
- mmc->caps |= MMC_CAP_SD_HIGHSPEED;
+ mmc->caps |= MMC_CAP_SD_HIGHSPEED | MMC_CAP_MMC_HIGHSPEED;
- if (host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION)
+ if ((host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION) &&
+ mmc_card_is_removable(mmc))
mmc->caps |= MMC_CAP_NEEDS_POLL;
mmc->ocr_avail = 0;
@@ -1850,12 +1911,11 @@ int sdhci_add_host(struct sdhci_host *host)
* can do scatter/gather or not.
*/
if (host->flags & SDHCI_USE_ADMA)
- mmc->max_hw_segs = 128;
+ mmc->max_segs = 128;
else if (host->flags & SDHCI_USE_SDMA)
- mmc->max_hw_segs = 1;
+ mmc->max_segs = 1;
else /* PIO */
- mmc->max_hw_segs = 128;
- mmc->max_phys_segs = 128;
+ mmc->max_segs = 128;
/*
* Maximum number of sectors in one transfer. Limited by DMA boundary
diff --git a/drivers/mmc/host/sdhci.h b/drivers/mmc/host/sdhci.h
index d316bc79b636..e42d7f00c060 100644
--- a/drivers/mmc/host/sdhci.h
+++ b/drivers/mmc/host/sdhci.h
@@ -1,6 +1,8 @@
/*
* linux/drivers/mmc/host/sdhci.h - Secure Digital Host Controller Interface driver
*
+ * Header file for Host Controller registers and I/O accessors.
+ *
* Copyright (C) 2005-2008 Pierre Ossman, All Rights Reserved.
*
* This program is free software; you can redistribute it and/or modify
@@ -8,14 +10,16 @@
* the Free Software Foundation; either version 2 of the License, or (at
* your option) any later version.
*/
-#ifndef __SDHCI_H
-#define __SDHCI_H
+#ifndef __SDHCI_HW_H
+#define __SDHCI_HW_H
#include <linux/scatterlist.h>
#include <linux/compiler.h>
#include <linux/types.h>
#include <linux/io.h>
+#include <linux/mmc/sdhci.h>
+
/*
* Controller registers
*/
@@ -72,7 +76,7 @@
#define SDHCI_CTRL_ADMA1 0x08
#define SDHCI_CTRL_ADMA32 0x10
#define SDHCI_CTRL_ADMA64 0x18
-#define SDHCI_CTRL_8BITBUS 0x20
+#define SDHCI_CTRL_8BITBUS 0x20
#define SDHCI_POWER_CONTROL 0x29
#define SDHCI_POWER_ON 0x01
@@ -83,9 +87,16 @@
#define SDHCI_BLOCK_GAP_CONTROL 0x2A
#define SDHCI_WAKE_UP_CONTROL 0x2B
+#define SDHCI_WAKE_ON_INT 0x01
+#define SDHCI_WAKE_ON_INSERT 0x02
+#define SDHCI_WAKE_ON_REMOVE 0x04
#define SDHCI_CLOCK_CONTROL 0x2C
#define SDHCI_DIVIDER_SHIFT 8
+#define SDHCI_DIVIDER_HI_SHIFT 6
+#define SDHCI_DIV_MASK 0xFF
+#define SDHCI_DIV_MASK_LEN 8
+#define SDHCI_DIV_HI_MASK 0x300
#define SDHCI_CLOCK_CARD_EN 0x0004
#define SDHCI_CLOCK_INT_STABLE 0x0002
#define SDHCI_CLOCK_INT_EN 0x0001
@@ -140,9 +151,11 @@
#define SDHCI_TIMEOUT_CLK_SHIFT 0
#define SDHCI_TIMEOUT_CLK_UNIT 0x00000080
#define SDHCI_CLOCK_BASE_MASK 0x00003F00
+#define SDHCI_CLOCK_V3_BASE_MASK 0x0000FF00
#define SDHCI_CLOCK_BASE_SHIFT 8
#define SDHCI_MAX_BLOCK_MASK 0x00030000
#define SDHCI_MAX_BLOCK_SHIFT 16
+#define SDHCI_CAN_DO_8BIT 0x00040000
#define SDHCI_CAN_DO_ADMA2 0x00080000
#define SDHCI_CAN_DO_ADMA1 0x00100000
#define SDHCI_CAN_DO_HISPD 0x00200000
@@ -178,134 +191,14 @@
#define SDHCI_SPEC_VER_SHIFT 0
#define SDHCI_SPEC_100 0
#define SDHCI_SPEC_200 1
+#define SDHCI_SPEC_300 2
-struct sdhci_ops;
-
-struct sdhci_host {
- /* Data set by hardware interface driver */
- const char *hw_name; /* Hardware bus name */
-
- unsigned int quirks; /* Deviations from spec. */
-
-/* Controller doesn't honor resets unless we touch the clock register */
-#define SDHCI_QUIRK_CLOCK_BEFORE_RESET (1<<0)
-/* Controller has bad caps bits, but really supports DMA */
-#define SDHCI_QUIRK_FORCE_DMA (1<<1)
-/* Controller doesn't like to be reset when there is no card inserted. */
-#define SDHCI_QUIRK_NO_CARD_NO_RESET (1<<2)
-/* Controller doesn't like clearing the power reg before a change */
-#define SDHCI_QUIRK_SINGLE_POWER_WRITE (1<<3)
-/* Controller has flaky internal state so reset it on each ios change */
-#define SDHCI_QUIRK_RESET_CMD_DATA_ON_IOS (1<<4)
-/* Controller has an unusable DMA engine */
-#define SDHCI_QUIRK_BROKEN_DMA (1<<5)
-/* Controller has an unusable ADMA engine */
-#define SDHCI_QUIRK_BROKEN_ADMA (1<<6)
-/* Controller can only DMA from 32-bit aligned addresses */
-#define SDHCI_QUIRK_32BIT_DMA_ADDR (1<<7)
-/* Controller can only DMA chunk sizes that are a multiple of 32 bits */
-#define SDHCI_QUIRK_32BIT_DMA_SIZE (1<<8)
-/* Controller can only ADMA chunks that are a multiple of 32 bits */
-#define SDHCI_QUIRK_32BIT_ADMA_SIZE (1<<9)
-/* Controller needs to be reset after each request to stay stable */
-#define SDHCI_QUIRK_RESET_AFTER_REQUEST (1<<10)
-/* Controller needs voltage and power writes to happen separately */
-#define SDHCI_QUIRK_NO_SIMULT_VDD_AND_POWER (1<<11)
-/* Controller provides an incorrect timeout value for transfers */
-#define SDHCI_QUIRK_BROKEN_TIMEOUT_VAL (1<<12)
-/* Controller has an issue with buffer bits for small transfers */
-#define SDHCI_QUIRK_BROKEN_SMALL_PIO (1<<13)
-/* Controller does not provide transfer-complete interrupt when not busy */
-#define SDHCI_QUIRK_NO_BUSY_IRQ (1<<14)
-/* Controller has unreliable card detection */
-#define SDHCI_QUIRK_BROKEN_CARD_DETECTION (1<<15)
-/* Controller reports inverted write-protect state */
-#define SDHCI_QUIRK_INVERTED_WRITE_PROTECT (1<<16)
-/* Controller has nonstandard clock management */
-#define SDHCI_QUIRK_NONSTANDARD_CLOCK (1<<17)
-/* Controller does not like fast PIO transfers */
-#define SDHCI_QUIRK_PIO_NEEDS_DELAY (1<<18)
-/* Controller losing signal/interrupt enable states after reset */
-#define SDHCI_QUIRK_RESTORE_IRQS_AFTER_RESET (1<<19)
-/* Controller has to be forced to use block size of 2048 bytes */
-#define SDHCI_QUIRK_FORCE_BLK_SZ_2048 (1<<20)
-/* Controller cannot do multi-block transfers */
-#define SDHCI_QUIRK_NO_MULTIBLOCK (1<<21)
-/* Controller can only handle 1-bit data transfers */
-#define SDHCI_QUIRK_FORCE_1_BIT_DATA (1<<22)
-/* Controller needs 10ms delay between applying power and clock */
-#define SDHCI_QUIRK_DELAY_AFTER_POWER (1<<23)
-/* Controller uses SDCLK instead of TMCLK for data timeouts */
-#define SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK (1<<24)
-/* Controller reports wrong base clock capability */
-#define SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN (1<<25)
-/* Controller cannot support End Attribute in NOP ADMA descriptor */
-#define SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC (1<<26)
-/* Controller is missing device caps. Use caps provided by host */
-#define SDHCI_QUIRK_MISSING_CAPS (1<<27)
-/* Controller uses Auto CMD12 command to stop the transfer */
-#define SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12 (1<<28)
-/* Controller doesn't have HISPD bit field in HI-SPEED SD card */
-#define SDHCI_QUIRK_NO_HISPD_BIT (1<<29)
-
- int irq; /* Device IRQ */
- void __iomem * ioaddr; /* Mapped address */
-
- const struct sdhci_ops *ops; /* Low level hw interface */
-
- struct regulator *vmmc; /* Power regulator */
-
- /* Internal data */
- struct mmc_host *mmc; /* MMC structure */
- u64 dma_mask; /* custom DMA mask */
-
-#if defined(CONFIG_LEDS_CLASS) || defined(CONFIG_LEDS_CLASS_MODULE)
- struct led_classdev led; /* LED control */
- char led_name[32];
-#endif
-
- spinlock_t lock; /* Mutex */
-
- int flags; /* Host attributes */
-#define SDHCI_USE_SDMA (1<<0) /* Host is SDMA capable */
-#define SDHCI_USE_ADMA (1<<1) /* Host is ADMA capable */
-#define SDHCI_REQ_USE_DMA (1<<2) /* Use DMA for this req. */
-#define SDHCI_DEVICE_DEAD (1<<3) /* Device unresponsive */
-
- unsigned int version; /* SDHCI spec. version */
-
- unsigned int max_clk; /* Max possible freq (MHz) */
- unsigned int timeout_clk; /* Timeout freq (KHz) */
-
- unsigned int clock; /* Current clock (MHz) */
- u8 pwr; /* Current voltage */
-
- struct mmc_request *mrq; /* Current request */
- struct mmc_command *cmd; /* Current command */
- struct mmc_data *data; /* Current data request */
- unsigned int data_early:1; /* Data finished before cmd */
-
- struct sg_mapping_iter sg_miter; /* SG state for PIO */
- unsigned int blocks; /* remaining PIO blocks */
-
- int sg_count; /* Mapped sg entries */
-
- u8 *adma_desc; /* ADMA descriptor table */
- u8 *align_buffer; /* Bounce buffer */
-
- dma_addr_t adma_addr; /* Mapped ADMA descr. table */
- dma_addr_t align_addr; /* Mapped bounce buffer */
-
- struct tasklet_struct card_tasklet; /* Tasklet structures */
- struct tasklet_struct finish_tasklet;
-
- struct timer_list timer; /* Timer for timeouts */
-
- unsigned int caps; /* Alternative capabilities */
-
- unsigned long private[0] ____cacheline_aligned;
-};
+/*
+ * End of controller registers.
+ */
+#define SDHCI_MAX_DIV_SPEC_200 256
+#define SDHCI_MAX_DIV_SPEC_300 2046
struct sdhci_ops {
#ifdef CONFIG_MMC_SDHCI_IO_ACCESSORS
@@ -323,6 +216,11 @@ struct sdhci_ops {
unsigned int (*get_max_clock)(struct sdhci_host *host);
unsigned int (*get_min_clock)(struct sdhci_host *host);
unsigned int (*get_timeout_clock)(struct sdhci_host *host);
+ int (*platform_8bit_width)(struct sdhci_host *host,
+ int width);
+ void (*platform_send_init_74_clocks)(struct sdhci_host *host,
+ u8 power_mode);
+ unsigned int (*get_ro)(struct sdhci_host *host);
};
#ifdef CONFIG_MMC_SDHCI_IO_ACCESSORS
@@ -425,6 +323,7 @@ extern void sdhci_remove_host(struct sdhci_host *host, int dead);
#ifdef CONFIG_PM
extern int sdhci_suspend_host(struct sdhci_host *host, pm_message_t state);
extern int sdhci_resume_host(struct sdhci_host *host);
+extern void sdhci_enable_irq_wakeups(struct sdhci_host *host);
#endif
-#endif /* __SDHCI_H */
+#endif /* __SDHCI_HW_H */
diff --git a/drivers/mmc/host/sh_mmcif.c b/drivers/mmc/host/sh_mmcif.c
index 5d3f824bb5a3..ddd09840520b 100644
--- a/drivers/mmc/host/sh_mmcif.c
+++ b/drivers/mmc/host/sh_mmcif.c
@@ -710,9 +710,21 @@ static void sh_mmcif_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
host->bus_width = ios->bus_width;
}
+static int sh_mmcif_get_cd(struct mmc_host *mmc)
+{
+ struct sh_mmcif_host *host = mmc_priv(mmc);
+ struct sh_mmcif_plat_data *p = host->pd->dev.platform_data;
+
+ if (!p->get_cd)
+ return -ENOSYS;
+ else
+ return p->get_cd(host->pd);
+}
+
static struct mmc_host_ops sh_mmcif_ops = {
.request = sh_mmcif_request,
.set_ios = sh_mmcif_set_ios,
+ .get_cd = sh_mmcif_get_cd,
};
static void sh_mmcif_detect(struct mmc_host *mmc)
@@ -846,8 +858,7 @@ static int __devinit sh_mmcif_probe(struct platform_device *pdev)
mmc->caps = MMC_CAP_MMC_HIGHSPEED;
if (pd->caps)
mmc->caps |= pd->caps;
- mmc->max_phys_segs = 128;
- mmc->max_hw_segs = 128;
+ mmc->max_segs = 128;
mmc->max_blk_size = 512;
mmc->max_blk_count = 65535;
mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count;
diff --git a/drivers/mmc/host/tifm_sd.c b/drivers/mmc/host/tifm_sd.c
index cec99958b652..457c26ea09de 100644
--- a/drivers/mmc/host/tifm_sd.c
+++ b/drivers/mmc/host/tifm_sd.c
@@ -978,11 +978,10 @@ static int tifm_sd_probe(struct tifm_dev *sock)
mmc->f_max = 24000000;
mmc->max_blk_count = 2048;
- mmc->max_hw_segs = mmc->max_blk_count;
+ mmc->max_segs = mmc->max_blk_count;
mmc->max_blk_size = min(TIFM_MMCSD_MAX_BLOCK_SIZE, PAGE_SIZE);
mmc->max_seg_size = mmc->max_blk_count * mmc->max_blk_size;
mmc->max_req_size = mmc->max_seg_size;
- mmc->max_phys_segs = mmc->max_hw_segs;
sock->card_event = tifm_sd_card_event;
sock->data_event = tifm_sd_data_event;
diff --git a/drivers/mmc/host/tmio_mmc.c b/drivers/mmc/host/tmio_mmc.c
index 69d98e3bf6ab..e7765a89593e 100644
--- a/drivers/mmc/host/tmio_mmc.c
+++ b/drivers/mmc/host/tmio_mmc.c
@@ -658,14 +658,21 @@ static void tmio_mmc_release_dma(struct tmio_mmc_host *host)
static int tmio_mmc_start_data(struct tmio_mmc_host *host,
struct mmc_data *data)
{
+ struct mfd_cell *cell = host->pdev->dev.platform_data;
+ struct tmio_mmc_data *pdata = cell->driver_data;
+
pr_debug("setup data transfer: blocksize %08x nr_blocks %d\n",
data->blksz, data->blocks);
- /* Hardware cannot perform 1 and 2 byte requests in 4 bit mode */
- if (data->blksz < 4 && host->mmc->ios.bus_width == MMC_BUS_WIDTH_4) {
- pr_err("%s: %d byte block unsupported in 4 bit mode\n",
- mmc_hostname(host->mmc), data->blksz);
- return -EINVAL;
+ /* Some hardware cannot perform 2 byte requests in 4 bit mode */
+ if (host->mmc->ios.bus_width == MMC_BUS_WIDTH_4) {
+ int blksz_2bytes = pdata->flags & TMIO_MMC_BLKSZ_2BYTES;
+
+ if (data->blksz < 2 || (data->blksz < 4 && !blksz_2bytes)) {
+ pr_err("%s: %d byte block unsupported in 4 bit mode\n",
+ mmc_hostname(host->mmc), data->blksz);
+ return -EINVAL;
+ }
}
tmio_mmc_init_sg(host, data);
@@ -756,10 +763,23 @@ static int tmio_mmc_get_ro(struct mmc_host *mmc)
(sd_ctrl_read32(host, CTL_STATUS) & TMIO_STAT_WRPROTECT)) ? 0 : 1;
}
+static int tmio_mmc_get_cd(struct mmc_host *mmc)
+{
+ struct tmio_mmc_host *host = mmc_priv(mmc);
+ struct mfd_cell *cell = host->pdev->dev.platform_data;
+ struct tmio_mmc_data *pdata = cell->driver_data;
+
+ if (!pdata->get_cd)
+ return -ENOSYS;
+ else
+ return pdata->get_cd(host->pdev);
+}
+
static const struct mmc_host_ops tmio_mmc_ops = {
.request = tmio_mmc_request,
.set_ios = tmio_mmc_set_ios,
.get_ro = tmio_mmc_get_ro,
+ .get_cd = tmio_mmc_get_cd,
};
#ifdef CONFIG_PM
diff --git a/drivers/mmc/host/ushc.c b/drivers/mmc/host/ushc.c
new file mode 100644
index 000000000000..f8f65df9b017
--- /dev/null
+++ b/drivers/mmc/host/ushc.c
@@ -0,0 +1,580 @@
+/*
+ * USB SD Host Controller (USHC) controller driver.
+ *
+ * Copyright (C) 2010 Cambridge Silicon Radio Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or (at
+ * your option) any later version.
+ *
+ * Notes:
+ * - Only version 2 devices are supported.
+ * - Version 2 devices only support SDIO cards/devices (R2 response is
+ * unsupported).
+ *
+ * References:
+ * [USHC] USB SD Host Controller specification (CS-118793-SP)
+ */
+#include <linux/module.h>
+#include <linux/usb.h>
+#include <linux/kernel.h>
+#include <linux/usb.h>
+#include <linux/slab.h>
+#include <linux/dma-mapping.h>
+#include <linux/mmc/host.h>
+
+enum ushc_request {
+ USHC_GET_CAPS = 0x00,
+ USHC_HOST_CTRL = 0x01,
+ USHC_PWR_CTRL = 0x02,
+ USHC_CLK_FREQ = 0x03,
+ USHC_EXEC_CMD = 0x04,
+ USHC_READ_RESP = 0x05,
+ USHC_RESET = 0x06,
+};
+
+enum ushc_request_type {
+ USHC_GET_CAPS_TYPE = USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
+ USHC_HOST_CTRL_TYPE = USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
+ USHC_PWR_CTRL_TYPE = USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
+ USHC_CLK_FREQ_TYPE = USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
+ USHC_EXEC_CMD_TYPE = USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
+ USHC_READ_RESP_TYPE = USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
+ USHC_RESET_TYPE = USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
+};
+
+#define USHC_GET_CAPS_VERSION_MASK 0xff
+#define USHC_GET_CAPS_3V3 (1 << 8)
+#define USHC_GET_CAPS_3V0 (1 << 9)
+#define USHC_GET_CAPS_1V8 (1 << 10)
+#define USHC_GET_CAPS_HIGH_SPD (1 << 16)
+
+#define USHC_HOST_CTRL_4BIT (1 << 1)
+#define USHC_HOST_CTRL_HIGH_SPD (1 << 0)
+
+#define USHC_PWR_CTRL_OFF 0x00
+#define USHC_PWR_CTRL_3V3 0x01
+#define USHC_PWR_CTRL_3V0 0x02
+#define USHC_PWR_CTRL_1V8 0x03
+
+#define USHC_READ_RESP_BUSY (1 << 4)
+#define USHC_READ_RESP_ERR_TIMEOUT (1 << 3)
+#define USHC_READ_RESP_ERR_CRC (1 << 2)
+#define USHC_READ_RESP_ERR_DAT (1 << 1)
+#define USHC_READ_RESP_ERR_CMD (1 << 0)
+#define USHC_READ_RESP_ERR_MASK 0x0f
+
+struct ushc_cbw {
+ __u8 signature;
+ __u8 cmd_idx;
+ __le16 block_size;
+ __le32 arg;
+} __attribute__((packed));
+
+#define USHC_CBW_SIGNATURE 'C'
+
+struct ushc_csw {
+ __u8 signature;
+ __u8 status;
+ __le32 response;
+} __attribute__((packed));
+
+#define USHC_CSW_SIGNATURE 'S'
+
+struct ushc_int_data {
+ u8 status;
+ u8 reserved[3];
+};
+
+#define USHC_INT_STATUS_SDIO_INT (1 << 1)
+#define USHC_INT_STATUS_CARD_PRESENT (1 << 0)
+
+
+struct ushc_data {
+ struct usb_device *usb_dev;
+ struct mmc_host *mmc;
+
+ struct urb *int_urb;
+ struct ushc_int_data *int_data;
+
+ struct urb *cbw_urb;
+ struct ushc_cbw *cbw;
+
+ struct urb *data_urb;
+
+ struct urb *csw_urb;
+ struct ushc_csw *csw;
+
+ spinlock_t lock;
+ struct mmc_request *current_req;
+ u32 caps;
+ u16 host_ctrl;
+ unsigned long flags;
+ u8 last_status;
+ int clock_freq;
+};
+
+#define DISCONNECTED 0
+#define INT_EN 1
+#define IGNORE_NEXT_INT 2
+
+static void data_callback(struct urb *urb);
+
+static int ushc_hw_reset(struct ushc_data *ushc)
+{
+ return usb_control_msg(ushc->usb_dev, usb_sndctrlpipe(ushc->usb_dev, 0),
+ USHC_RESET, USHC_RESET_TYPE,
+ 0, 0, NULL, 0, 100);
+}
+
+static int ushc_hw_get_caps(struct ushc_data *ushc)
+{
+ int ret;
+ int version;
+
+ ret = usb_control_msg(ushc->usb_dev, usb_rcvctrlpipe(ushc->usb_dev, 0),
+ USHC_GET_CAPS, USHC_GET_CAPS_TYPE,
+ 0, 0, &ushc->caps, sizeof(ushc->caps), 100);
+ if (ret < 0)
+ return ret;
+
+ ushc->caps = le32_to_cpu(ushc->caps);
+
+ version = ushc->caps & USHC_GET_CAPS_VERSION_MASK;
+ if (version != 0x02) {
+ dev_err(&ushc->usb_dev->dev, "controller version %d is not supported\n", version);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int ushc_hw_set_host_ctrl(struct ushc_data *ushc, u16 mask, u16 val)
+{
+ u16 host_ctrl;
+ int ret;
+
+ host_ctrl = (ushc->host_ctrl & ~mask) | val;
+ ret = usb_control_msg(ushc->usb_dev, usb_sndctrlpipe(ushc->usb_dev, 0),
+ USHC_HOST_CTRL, USHC_HOST_CTRL_TYPE,
+ host_ctrl, 0, NULL, 0, 100);
+ if (ret < 0)
+ return ret;
+ ushc->host_ctrl = host_ctrl;
+ return 0;
+}
+
+static void int_callback(struct urb *urb)
+{
+ struct ushc_data *ushc = urb->context;
+ u8 status, last_status;
+
+ if (urb->status < 0)
+ return;
+
+ status = ushc->int_data->status;
+ last_status = ushc->last_status;
+ ushc->last_status = status;
+
+ /*
+ * Ignore the card interrupt status on interrupt transfers that
+ * were submitted while card interrupts where disabled.
+ *
+ * This avoid occasional spurious interrupts when enabling
+ * interrupts immediately after clearing the source on the card.
+ */
+
+ if (!test_and_clear_bit(IGNORE_NEXT_INT, &ushc->flags)
+ && test_bit(INT_EN, &ushc->flags)
+ && status & USHC_INT_STATUS_SDIO_INT) {
+ mmc_signal_sdio_irq(ushc->mmc);
+ }
+
+ if ((status ^ last_status) & USHC_INT_STATUS_CARD_PRESENT)
+ mmc_detect_change(ushc->mmc, msecs_to_jiffies(100));
+
+ if (!test_bit(INT_EN, &ushc->flags))
+ set_bit(IGNORE_NEXT_INT, &ushc->flags);
+ usb_submit_urb(ushc->int_urb, GFP_ATOMIC);
+}
+
+static void cbw_callback(struct urb *urb)
+{
+ struct ushc_data *ushc = urb->context;
+
+ if (urb->status != 0) {
+ usb_unlink_urb(ushc->data_urb);
+ usb_unlink_urb(ushc->csw_urb);
+ }
+}
+
+static void data_callback(struct urb *urb)
+{
+ struct ushc_data *ushc = urb->context;
+
+ if (urb->status != 0)
+ usb_unlink_urb(ushc->csw_urb);
+}
+
+static void csw_callback(struct urb *urb)
+{
+ struct ushc_data *ushc = urb->context;
+ struct mmc_request *req = ushc->current_req;
+ int status;
+
+ status = ushc->csw->status;
+
+ if (urb->status != 0) {
+ req->cmd->error = urb->status;
+ } else if (status & USHC_READ_RESP_ERR_CMD) {
+ if (status & USHC_READ_RESP_ERR_CRC)
+ req->cmd->error = -EIO;
+ else
+ req->cmd->error = -ETIMEDOUT;
+ }
+ if (req->data) {
+ if (status & USHC_READ_RESP_ERR_DAT) {
+ if (status & USHC_READ_RESP_ERR_CRC)
+ req->data->error = -EIO;
+ else
+ req->data->error = -ETIMEDOUT;
+ req->data->bytes_xfered = 0;
+ } else {
+ req->data->bytes_xfered = req->data->blksz * req->data->blocks;
+ }
+ }
+
+ req->cmd->resp[0] = le32_to_cpu(ushc->csw->response);
+
+ mmc_request_done(ushc->mmc, req);
+}
+
+static void ushc_request(struct mmc_host *mmc, struct mmc_request *req)
+{
+ struct ushc_data *ushc = mmc_priv(mmc);
+ int ret;
+ unsigned long flags;
+
+ spin_lock_irqsave(&ushc->lock, flags);
+
+ if (test_bit(DISCONNECTED, &ushc->flags)) {
+ ret = -ENODEV;
+ goto out;
+ }
+
+ /* Version 2 firmware doesn't support the R2 response format. */
+ if (req->cmd->flags & MMC_RSP_136) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ /* The Astoria's data FIFOs don't work with clock speeds < 5MHz so
+ limit commands with data to 6MHz or more. */
+ if (req->data && ushc->clock_freq < 6000000) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ ushc->current_req = req;
+
+ /* Start cmd with CBW. */
+ ushc->cbw->cmd_idx = cpu_to_le16(req->cmd->opcode);
+ if (req->data)
+ ushc->cbw->block_size = cpu_to_le16(req->data->blksz);
+ else
+ ushc->cbw->block_size = 0;
+ ushc->cbw->arg = cpu_to_le32(req->cmd->arg);
+
+ ret = usb_submit_urb(ushc->cbw_urb, GFP_ATOMIC);
+ if (ret < 0)
+ goto out;
+
+ /* Submit data (if any). */
+ if (req->data) {
+ struct mmc_data *data = req->data;
+ int pipe;
+
+ if (data->flags & MMC_DATA_READ)
+ pipe = usb_rcvbulkpipe(ushc->usb_dev, 6);
+ else
+ pipe = usb_sndbulkpipe(ushc->usb_dev, 2);
+
+ usb_fill_bulk_urb(ushc->data_urb, ushc->usb_dev, pipe,
+ sg_virt(data->sg), data->sg->length,
+ data_callback, ushc);
+ ret = usb_submit_urb(ushc->data_urb, GFP_ATOMIC);
+ if (ret < 0)
+ goto out;
+ }
+
+ /* Submit CSW. */
+ ret = usb_submit_urb(ushc->csw_urb, GFP_ATOMIC);
+ if (ret < 0)
+ goto out;
+
+out:
+ spin_unlock_irqrestore(&ushc->lock, flags);
+ if (ret < 0) {
+ usb_unlink_urb(ushc->cbw_urb);
+ usb_unlink_urb(ushc->data_urb);
+ req->cmd->error = ret;
+ mmc_request_done(mmc, req);
+ }
+}
+
+static int ushc_set_power(struct ushc_data *ushc, unsigned char power_mode)
+{
+ u16 voltage;
+
+ switch (power_mode) {
+ case MMC_POWER_OFF:
+ voltage = USHC_PWR_CTRL_OFF;
+ break;
+ case MMC_POWER_UP:
+ case MMC_POWER_ON:
+ voltage = USHC_PWR_CTRL_3V3;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return usb_control_msg(ushc->usb_dev, usb_sndctrlpipe(ushc->usb_dev, 0),
+ USHC_PWR_CTRL, USHC_PWR_CTRL_TYPE,
+ voltage, 0, NULL, 0, 100);
+}
+
+static int ushc_set_bus_width(struct ushc_data *ushc, int bus_width)
+{
+ return ushc_hw_set_host_ctrl(ushc, USHC_HOST_CTRL_4BIT,
+ bus_width == 4 ? USHC_HOST_CTRL_4BIT : 0);
+}
+
+static int ushc_set_bus_freq(struct ushc_data *ushc, int clk, bool enable_hs)
+{
+ int ret;
+
+ /* Hardware can't detect interrupts while the clock is off. */
+ if (clk == 0)
+ clk = 400000;
+
+ ret = ushc_hw_set_host_ctrl(ushc, USHC_HOST_CTRL_HIGH_SPD,
+ enable_hs ? USHC_HOST_CTRL_HIGH_SPD : 0);
+ if (ret < 0)
+ return ret;
+
+ ret = usb_control_msg(ushc->usb_dev, usb_sndctrlpipe(ushc->usb_dev, 0),
+ USHC_CLK_FREQ, USHC_CLK_FREQ_TYPE,
+ clk & 0xffff, (clk >> 16) & 0xffff, NULL, 0, 100);
+ if (ret < 0)
+ return ret;
+
+ ushc->clock_freq = clk;
+ return 0;
+}
+
+static void ushc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
+{
+ struct ushc_data *ushc = mmc_priv(mmc);
+
+ ushc_set_power(ushc, ios->power_mode);
+ ushc_set_bus_width(ushc, 1 << ios->bus_width);
+ ushc_set_bus_freq(ushc, ios->clock, ios->timing == MMC_TIMING_SD_HS);
+}
+
+static int ushc_get_cd(struct mmc_host *mmc)
+{
+ struct ushc_data *ushc = mmc_priv(mmc);
+
+ return !!(ushc->last_status & USHC_INT_STATUS_CARD_PRESENT);
+}
+
+static void ushc_enable_sdio_irq(struct mmc_host *mmc, int enable)
+{
+ struct ushc_data *ushc = mmc_priv(mmc);
+
+ if (enable)
+ set_bit(INT_EN, &ushc->flags);
+ else
+ clear_bit(INT_EN, &ushc->flags);
+}
+
+static void ushc_clean_up(struct ushc_data *ushc)
+{
+ usb_free_urb(ushc->int_urb);
+ usb_free_urb(ushc->csw_urb);
+ usb_free_urb(ushc->data_urb);
+ usb_free_urb(ushc->cbw_urb);
+
+ kfree(ushc->int_data);
+ kfree(ushc->cbw);
+ kfree(ushc->csw);
+
+ mmc_free_host(ushc->mmc);
+}
+
+static const struct mmc_host_ops ushc_ops = {
+ .request = ushc_request,
+ .set_ios = ushc_set_ios,
+ .get_cd = ushc_get_cd,
+ .enable_sdio_irq = ushc_enable_sdio_irq,
+};
+
+static int ushc_probe(struct usb_interface *intf, const struct usb_device_id *id)
+{
+ struct usb_device *usb_dev = interface_to_usbdev(intf);
+ struct mmc_host *mmc;
+ struct ushc_data *ushc;
+ int ret;
+
+ mmc = mmc_alloc_host(sizeof(struct ushc_data), &intf->dev);
+ if (mmc == NULL)
+ return -ENOMEM;
+ ushc = mmc_priv(mmc);
+ usb_set_intfdata(intf, ushc);
+
+ ushc->usb_dev = usb_dev;
+ ushc->mmc = mmc;
+
+ spin_lock_init(&ushc->lock);
+
+ ret = ushc_hw_reset(ushc);
+ if (ret < 0)
+ goto err;
+
+ /* Read capabilities. */
+ ret = ushc_hw_get_caps(ushc);
+ if (ret < 0)
+ goto err;
+
+ mmc->ops = &ushc_ops;
+
+ mmc->f_min = 400000;
+ mmc->f_max = 50000000;
+ mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34;
+ mmc->caps = MMC_CAP_4_BIT_DATA | MMC_CAP_SDIO_IRQ;
+ mmc->caps |= (ushc->caps & USHC_GET_CAPS_HIGH_SPD) ? MMC_CAP_SD_HIGHSPEED : 0;
+
+ mmc->max_seg_size = 512*511;
+ mmc->max_segs = 1;
+ mmc->max_req_size = 512*511;
+ mmc->max_blk_size = 512;
+ mmc->max_blk_count = 511;
+
+ ushc->int_urb = usb_alloc_urb(0, GFP_KERNEL);
+ if (ushc->int_urb == NULL) {
+ ret = -ENOMEM;
+ goto err;
+ }
+ ushc->int_data = kzalloc(sizeof(struct ushc_int_data), GFP_KERNEL);
+ if (ushc->int_data == NULL) {
+ ret = -ENOMEM;
+ goto err;
+ }
+ usb_fill_int_urb(ushc->int_urb, ushc->usb_dev,
+ usb_rcvintpipe(usb_dev,
+ intf->cur_altsetting->endpoint[0].desc.bEndpointAddress),
+ ushc->int_data, sizeof(struct ushc_int_data),
+ int_callback, ushc,
+ intf->cur_altsetting->endpoint[0].desc.bInterval);
+
+ ushc->cbw_urb = usb_alloc_urb(0, GFP_KERNEL);
+ if (ushc->cbw_urb == NULL) {
+ ret = -ENOMEM;
+ goto err;
+ }
+ ushc->cbw = kzalloc(sizeof(struct ushc_cbw), GFP_KERNEL);
+ if (ushc->cbw == NULL) {
+ ret = -ENOMEM;
+ goto err;
+ }
+ ushc->cbw->signature = USHC_CBW_SIGNATURE;
+
+ usb_fill_bulk_urb(ushc->cbw_urb, ushc->usb_dev, usb_sndbulkpipe(usb_dev, 2),
+ ushc->cbw, sizeof(struct ushc_cbw),
+ cbw_callback, ushc);
+
+ ushc->data_urb = usb_alloc_urb(0, GFP_KERNEL);
+ if (ushc->data_urb == NULL) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ ushc->csw_urb = usb_alloc_urb(0, GFP_KERNEL);
+ if (ushc->csw_urb == NULL) {
+ ret = -ENOMEM;
+ goto err;
+ }
+ ushc->csw = kzalloc(sizeof(struct ushc_cbw), GFP_KERNEL);
+ if (ushc->csw == NULL) {
+ ret = -ENOMEM;
+ goto err;
+ }
+ usb_fill_bulk_urb(ushc->csw_urb, ushc->usb_dev, usb_rcvbulkpipe(usb_dev, 6),
+ ushc->csw, sizeof(struct ushc_csw),
+ csw_callback, ushc);
+
+ ret = mmc_add_host(ushc->mmc);
+ if (ret)
+ goto err;
+
+ ret = usb_submit_urb(ushc->int_urb, GFP_KERNEL);
+ if (ret < 0) {
+ mmc_remove_host(ushc->mmc);
+ goto err;
+ }
+
+ return 0;
+
+err:
+ ushc_clean_up(ushc);
+ return ret;
+}
+
+static void ushc_disconnect(struct usb_interface *intf)
+{
+ struct ushc_data *ushc = usb_get_intfdata(intf);
+
+ spin_lock_irq(&ushc->lock);
+ set_bit(DISCONNECTED, &ushc->flags);
+ spin_unlock_irq(&ushc->lock);
+
+ usb_kill_urb(ushc->int_urb);
+ usb_kill_urb(ushc->cbw_urb);
+ usb_kill_urb(ushc->data_urb);
+ usb_kill_urb(ushc->csw_urb);
+
+ mmc_remove_host(ushc->mmc);
+
+ ushc_clean_up(ushc);
+}
+
+static struct usb_device_id ushc_id_table[] = {
+ /* CSR USB SD Host Controller */
+ { USB_DEVICE(0x0a12, 0x5d10) },
+ { },
+};
+MODULE_DEVICE_TABLE(usb, ushc_id_table);
+
+static struct usb_driver ushc_driver = {
+ .name = "ushc",
+ .id_table = ushc_id_table,
+ .probe = ushc_probe,
+ .disconnect = ushc_disconnect,
+};
+
+static int __init ushc_init(void)
+{
+ return usb_register(&ushc_driver);
+}
+module_init(ushc_init);
+
+static void __exit ushc_exit(void)
+{
+ usb_deregister(&ushc_driver);
+}
+module_exit(ushc_exit);
+
+MODULE_DESCRIPTION("USB SD Host Controller driver");
+MODULE_AUTHOR("David Vrabel <david.vrabel@csr.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/mmc/host/via-sdmmc.c b/drivers/mmc/host/via-sdmmc.c
index 19f2d72dbca5..9ed84ddb4780 100644
--- a/drivers/mmc/host/via-sdmmc.c
+++ b/drivers/mmc/host/via-sdmmc.c
@@ -1050,8 +1050,7 @@ static void via_init_mmc_host(struct via_crdr_mmc_host *host)
mmc->ops = &via_sdc_ops;
/*Hardware cannot do scatter lists*/
- mmc->max_hw_segs = 1;
- mmc->max_phys_segs = 1;
+ mmc->max_segs = 1;
mmc->max_blk_size = VIA_CRDR_MAX_BLOCK_LENGTH;
mmc->max_blk_count = VIA_CRDR_MAX_BLOCK_COUNT;
diff --git a/drivers/mmc/host/wbsd.c b/drivers/mmc/host/wbsd.c
index 0012f5d13d28..7fca0a386ba0 100644
--- a/drivers/mmc/host/wbsd.c
+++ b/drivers/mmc/host/wbsd.c
@@ -1235,8 +1235,7 @@ static int __devinit wbsd_alloc_mmc(struct device *dev)
* Maximum number of segments. Worst case is one sector per segment
* so this will be 64kB/512.
*/
- mmc->max_hw_segs = 128;
- mmc->max_phys_segs = 128;
+ mmc->max_segs = 128;
/*
* Maximum request size. Also limited by 64KiB buffer.
diff --git a/drivers/mtd/chips/cfi_cmdset_0001.c b/drivers/mtd/chips/cfi_cmdset_0001.c
index 9e2b7e9e0ad9..ad9268b44416 100644
--- a/drivers/mtd/chips/cfi_cmdset_0001.c
+++ b/drivers/mtd/chips/cfi_cmdset_0001.c
@@ -1496,7 +1496,7 @@ static int __xipram do_write_oneword(struct map_info *map, struct flchip *chip,
switch (mode) {
case FL_WRITING:
- write_cmd = (cfi->cfiq->P_ID != 0x0200) ? CMD(0x40) : CMD(0x41);
+ write_cmd = (cfi->cfiq->P_ID != P_ID_INTEL_PERFORMANCE) ? CMD(0x40) : CMD(0x41);
break;
case FL_OTP_WRITE:
write_cmd = CMD(0xc0);
@@ -1661,7 +1661,7 @@ static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip,
cmd_adr = adr & ~(wbufsize-1);
/* Let's determine this according to the interleave only once */
- write_cmd = (cfi->cfiq->P_ID != 0x0200) ? CMD(0xe8) : CMD(0xe9);
+ write_cmd = (cfi->cfiq->P_ID != P_ID_INTEL_PERFORMANCE) ? CMD(0xe8) : CMD(0xe9);
mutex_lock(&chip->mutex);
ret = get_chip(map, chip, cmd_adr, FL_WRITING);
diff --git a/drivers/mtd/chips/cfi_cmdset_0002.c b/drivers/mtd/chips/cfi_cmdset_0002.c
index ba29d2f0ffd7..3b8e32d87977 100644
--- a/drivers/mtd/chips/cfi_cmdset_0002.c
+++ b/drivers/mtd/chips/cfi_cmdset_0002.c
@@ -291,6 +291,23 @@ static void fixup_sst39vf_rev_b(struct mtd_info *mtd, void *param)
cfi->addr_unlock1 = 0x555;
cfi->addr_unlock2 = 0x2AA;
+
+ cfi->sector_erase_cmd = CMD(0x50);
+}
+
+static void fixup_sst38vf640x_sectorsize(struct mtd_info *mtd, void *param)
+{
+ struct map_info *map = mtd->priv;
+ struct cfi_private *cfi = map->fldrv_priv;
+
+ fixup_sst39vf_rev_b(mtd, param);
+
+ /*
+ * CFI reports 1024 sectors (0x03ff+1) of 64KBytes (0x0100*256) where
+ * it should report a size of 8KBytes (0x0020*256).
+ */
+ cfi->cfiq->EraseRegionInfo[0] = 0x002003ff;
+ pr_warning("%s: Bad 38VF640x CFI data; adjusting sector size from 64 to 8KiB\n", mtd->name);
}
static void fixup_s29gl064n_sectors(struct mtd_info *mtd, void *param)
@@ -317,14 +334,14 @@ static void fixup_s29gl032n_sectors(struct mtd_info *mtd, void *param)
/* Used to fix CFI-Tables of chips without Extended Query Tables */
static struct cfi_fixup cfi_nopri_fixup_table[] = {
- { CFI_MFR_SST, 0x234A, fixup_sst39vf, NULL, }, // SST39VF1602
- { CFI_MFR_SST, 0x234B, fixup_sst39vf, NULL, }, // SST39VF1601
- { CFI_MFR_SST, 0x235A, fixup_sst39vf, NULL, }, // SST39VF3202
- { CFI_MFR_SST, 0x235B, fixup_sst39vf, NULL, }, // SST39VF3201
- { CFI_MFR_SST, 0x235C, fixup_sst39vf_rev_b, NULL, }, // SST39VF3202B
- { CFI_MFR_SST, 0x235D, fixup_sst39vf_rev_b, NULL, }, // SST39VF3201B
- { CFI_MFR_SST, 0x236C, fixup_sst39vf_rev_b, NULL, }, // SST39VF6402B
- { CFI_MFR_SST, 0x236D, fixup_sst39vf_rev_b, NULL, }, // SST39VF6401B
+ { CFI_MFR_SST, 0x234A, fixup_sst39vf, NULL, }, /* SST39VF1602 */
+ { CFI_MFR_SST, 0x234B, fixup_sst39vf, NULL, }, /* SST39VF1601 */
+ { CFI_MFR_SST, 0x235A, fixup_sst39vf, NULL, }, /* SST39VF3202 */
+ { CFI_MFR_SST, 0x235B, fixup_sst39vf, NULL, }, /* SST39VF3201 */
+ { CFI_MFR_SST, 0x235C, fixup_sst39vf_rev_b, NULL, }, /* SST39VF3202B */
+ { CFI_MFR_SST, 0x235D, fixup_sst39vf_rev_b, NULL, }, /* SST39VF3201B */
+ { CFI_MFR_SST, 0x236C, fixup_sst39vf_rev_b, NULL, }, /* SST39VF6402B */
+ { CFI_MFR_SST, 0x236D, fixup_sst39vf_rev_b, NULL, }, /* SST39VF6401B */
{ 0, 0, NULL, NULL }
};
@@ -344,6 +361,10 @@ static struct cfi_fixup cfi_fixup_table[] = {
{ CFI_MFR_AMD, 0x1301, fixup_s29gl064n_sectors, NULL, },
{ CFI_MFR_AMD, 0x1a00, fixup_s29gl032n_sectors, NULL, },
{ CFI_MFR_AMD, 0x1a01, fixup_s29gl032n_sectors, NULL, },
+ { CFI_MFR_SST, 0x536A, fixup_sst38vf640x_sectorsize, NULL, }, /* SST38VF6402 */
+ { CFI_MFR_SST, 0x536B, fixup_sst38vf640x_sectorsize, NULL, }, /* SST38VF6401 */
+ { CFI_MFR_SST, 0x536C, fixup_sst38vf640x_sectorsize, NULL, }, /* SST38VF6404 */
+ { CFI_MFR_SST, 0x536D, fixup_sst38vf640x_sectorsize, NULL, }, /* SST38VF6403 */
#if !FORCE_WORD_WRITE
{ CFI_MFR_ANY, CFI_ID_ANY, fixup_use_write_buffers, NULL, },
#endif
@@ -374,6 +395,13 @@ static void cfi_fixup_major_minor(struct cfi_private *cfi,
if (cfi->mfr == CFI_MFR_SAMSUNG && cfi->id == 0x257e &&
extp->MajorVersion == '0')
extp->MajorVersion = '1';
+ /*
+ * SST 38VF640x chips report major=0xFF / minor=0xFF.
+ */
+ if (cfi->mfr == CFI_MFR_SST && (cfi->id >> 4) == 0x0536) {
+ extp->MajorVersion = '1';
+ extp->MinorVersion = '0';
+ }
}
struct mtd_info *cfi_cmdset_0002(struct map_info *map, int primary)
@@ -545,15 +573,6 @@ static struct mtd_info *cfi_amdstd_setup(struct mtd_info *mtd)
printk(KERN_WARNING "Sum of regions (%lx) != total size of set of interleaved chips (%lx)\n", offset, devsize);
goto setup_err;
}
-#if 0
- // debug
- for (i=0; i<mtd->numeraseregions;i++){
- printk("%d: offset=0x%x,size=0x%x,blocks=%d\n",
- i,mtd->eraseregions[i].offset,
- mtd->eraseregions[i].erasesize,
- mtd->eraseregions[i].numblocks);
- }
-#endif
__module_get(THIS_MODULE);
register_reboot_notifier(&mtd->reboot_notifier);
@@ -674,7 +693,7 @@ static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr
* there was an error (so leave the erase
* routine to recover from it) or we trying to
* use the erase-in-progress sector. */
- map_write(map, CMD(0x30), chip->in_progress_block_addr);
+ map_write(map, cfi->sector_erase_cmd, chip->in_progress_block_addr);
chip->state = FL_ERASING;
chip->oldstate = FL_READY;
printk(KERN_ERR "MTD %s(): chip not ready after erase suspend\n", __func__);
@@ -727,7 +746,7 @@ static void put_chip(struct map_info *map, struct flchip *chip, unsigned long ad
switch(chip->oldstate) {
case FL_ERASING:
chip->state = chip->oldstate;
- map_write(map, CMD(0x30), chip->in_progress_block_addr);
+ map_write(map, cfi->sector_erase_cmd, chip->in_progress_block_addr);
chip->oldstate = FL_READY;
chip->state = FL_ERASING;
break;
@@ -870,7 +889,7 @@ static void __xipram xip_udelay(struct map_info *map, struct flchip *chip,
local_irq_disable();
/* Resume the write or erase operation */
- map_write(map, CMD(0x30), adr);
+ map_write(map, cfi->sector_erase_cmd, adr);
chip->state = oldstate;
start = xip_currtime();
} else if (usec >= 1000000/HZ) {
@@ -1025,9 +1044,6 @@ static inline int do_read_secsi_onechip(struct map_info *map, struct flchip *chi
mutex_lock(&chip->mutex);
if (chip->state != FL_READY){
-#if 0
- printk(KERN_DEBUG "Waiting for chip to read, status = %d\n", chip->state);
-#endif
set_current_state(TASK_UNINTERRUPTIBLE);
add_wait_queue(&chip->wq, &wait);
@@ -1035,10 +1051,6 @@ static inline int do_read_secsi_onechip(struct map_info *map, struct flchip *chi
schedule();
remove_wait_queue(&chip->wq, &wait);
-#if 0
- if(signal_pending(current))
- return -EINTR;
-#endif
timeo = jiffies + HZ;
goto retry;
@@ -1246,9 +1258,6 @@ static int cfi_amdstd_write_words(struct mtd_info *mtd, loff_t to, size_t len,
mutex_lock(&cfi->chips[chipnum].mutex);
if (cfi->chips[chipnum].state != FL_READY) {
-#if 0
- printk(KERN_DEBUG "Waiting for chip to write, status = %d\n", cfi->chips[chipnum].state);
-#endif
set_current_state(TASK_UNINTERRUPTIBLE);
add_wait_queue(&cfi->chips[chipnum].wq, &wait);
@@ -1256,10 +1265,6 @@ static int cfi_amdstd_write_words(struct mtd_info *mtd, loff_t to, size_t len,
schedule();
remove_wait_queue(&cfi->chips[chipnum].wq, &wait);
-#if 0
- if(signal_pending(current))
- return -EINTR;
-#endif
goto retry;
}
@@ -1324,9 +1329,6 @@ static int cfi_amdstd_write_words(struct mtd_info *mtd, loff_t to, size_t len,
mutex_lock(&cfi->chips[chipnum].mutex);
if (cfi->chips[chipnum].state != FL_READY) {
-#if 0
- printk(KERN_DEBUG "Waiting for chip to write, status = %d\n", cfi->chips[chipnum].state);
-#endif
set_current_state(TASK_UNINTERRUPTIBLE);
add_wait_queue(&cfi->chips[chipnum].wq, &wait);
@@ -1334,10 +1336,6 @@ static int cfi_amdstd_write_words(struct mtd_info *mtd, loff_t to, size_t len,
schedule();
remove_wait_queue(&cfi->chips[chipnum].wq, &wait);
-#if 0
- if(signal_pending(current))
- return -EINTR;
-#endif
goto retry1;
}
@@ -1396,7 +1394,6 @@ static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip,
cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
- //cfi_send_gen_cmd(0xA0, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
/* Write Buffer Load */
map_write(map, CMD(0x25), cmd_adr);
@@ -1675,7 +1672,7 @@ static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip,
cfi_send_gen_cmd(0x80, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
- map_write(map, CMD(0x30), adr);
+ map_write(map, cfi->sector_erase_cmd, adr);
chip->state = FL_ERASING;
chip->erase_suspended = 0;
diff --git a/drivers/mtd/chips/cfi_probe.c b/drivers/mtd/chips/cfi_probe.c
index 8f5b96aa87a0..d25535279404 100644
--- a/drivers/mtd/chips/cfi_probe.c
+++ b/drivers/mtd/chips/cfi_probe.c
@@ -177,6 +177,8 @@ static int __xipram cfi_chip_setup(struct map_info *map,
cfi->cfi_mode = CFI_MODE_CFI;
+ cfi->sector_erase_cmd = CMD(0x30);
+
/* Read the CFI info structure */
xip_disable_qry(base, map, cfi);
for (i=0; i<(sizeof(struct cfi_ident) + num_erase_regions * 4); i++)
diff --git a/drivers/mtd/chips/cfi_util.c b/drivers/mtd/chips/cfi_util.c
index e503b2ca894d..360525c637d2 100644
--- a/drivers/mtd/chips/cfi_util.c
+++ b/drivers/mtd/chips/cfi_util.c
@@ -77,6 +77,13 @@ int __xipram cfi_qry_mode_on(uint32_t base, struct map_info *map,
cfi_send_gen_cmd(0x98, 0x5555, base, map, cfi, cfi->device_type, NULL);
if (cfi_qry_present(map, base, cfi))
return 1;
+ /* SST 39VF640xB */
+ cfi_send_gen_cmd(0xF0, 0, base, map, cfi, cfi->device_type, NULL);
+ cfi_send_gen_cmd(0xAA, 0x555, base, map, cfi, cfi->device_type, NULL);
+ cfi_send_gen_cmd(0x55, 0x2AA, base, map, cfi, cfi->device_type, NULL);
+ cfi_send_gen_cmd(0x98, 0x555, base, map, cfi, cfi->device_type, NULL);
+ if (cfi_qry_present(map, base, cfi))
+ return 1;
/* QRY not found */
return 0;
}
diff --git a/drivers/mtd/devices/block2mtd.c b/drivers/mtd/devices/block2mtd.c
index 93651865ddbe..2cf0cc6a4189 100644
--- a/drivers/mtd/devices/block2mtd.c
+++ b/drivers/mtd/devices/block2mtd.c
@@ -91,7 +91,6 @@ static int block2mtd_erase(struct mtd_info *mtd, struct erase_info *instr)
} else
instr->state = MTD_ERASE_DONE;
- instr->state = MTD_ERASE_DONE;
mtd_erase_callback(instr);
return err;
}
diff --git a/drivers/mtd/devices/m25p80.c b/drivers/mtd/devices/m25p80.c
index 6f512b5c117b..bf5a002209bd 100644
--- a/drivers/mtd/devices/m25p80.c
+++ b/drivers/mtd/devices/m25p80.c
@@ -661,11 +661,14 @@ static const struct spi_device_id m25p_ids[] = {
{ "s25sl008a", INFO(0x010213, 0, 64 * 1024, 16, 0) },
{ "s25sl016a", INFO(0x010214, 0, 64 * 1024, 32, 0) },
{ "s25sl032a", INFO(0x010215, 0, 64 * 1024, 64, 0) },
+ { "s25sl032p", INFO(0x010215, 0x4d00, 64 * 1024, 64, SECT_4K) },
{ "s25sl064a", INFO(0x010216, 0, 64 * 1024, 128, 0) },
{ "s25sl12800", INFO(0x012018, 0x0300, 256 * 1024, 64, 0) },
{ "s25sl12801", INFO(0x012018, 0x0301, 64 * 1024, 256, 0) },
{ "s25fl129p0", INFO(0x012018, 0x4d00, 256 * 1024, 64, 0) },
{ "s25fl129p1", INFO(0x012018, 0x4d01, 64 * 1024, 256, 0) },
+ { "s25fl016k", INFO(0xef4015, 0, 64 * 1024, 32, SECT_4K) },
+ { "s25fl064k", INFO(0xef4017, 0, 64 * 1024, 128, SECT_4K) },
/* SST -- large erase sizes are "overlays", "sectors" are 4K */
{ "sst25vf040b", INFO(0xbf258d, 0, 64 * 1024, 8, SECT_4K) },
@@ -714,6 +717,7 @@ static const struct spi_device_id m25p_ids[] = {
{ "w25x32", INFO(0xef3016, 0, 64 * 1024, 64, SECT_4K) },
{ "w25q32", INFO(0xef4016, 0, 64 * 1024, 64, SECT_4K) },
{ "w25x64", INFO(0xef3017, 0, 64 * 1024, 128, SECT_4K) },
+ { "w25q64", INFO(0xef4017, 0, 64 * 1024, 128, SECT_4K) },
/* Catalyst / On Semiconductor -- non-JEDEC */
{ "cat25c11", CAT25_INFO( 16, 8, 16, 1) },
@@ -924,6 +928,13 @@ static int __devinit m25p_probe(struct spi_device *spi)
nr_parts = data->nr_parts;
}
+#ifdef CONFIG_MTD_OF_PARTS
+ if (nr_parts <= 0 && spi->dev.of_node) {
+ nr_parts = of_mtd_parse_partitions(&spi->dev,
+ spi->dev.of_node, &parts);
+ }
+#endif
+
if (nr_parts > 0) {
for (i = 0; i < nr_parts; i++) {
DEBUG(MTD_DEBUG_LEVEL2, "partitions[%d] = "
diff --git a/drivers/mtd/devices/phram.c b/drivers/mtd/devices/phram.c
index 1696bbecaa7e..52393282eaf1 100644
--- a/drivers/mtd/devices/phram.c
+++ b/drivers/mtd/devices/phram.c
@@ -15,7 +15,7 @@
* phram=swap,64Mi,128Mi phram=test,900Mi,1Mi
*/
-#define pr_fmt(fmt) "phram: " fmt
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <asm/io.h>
#include <linux/init.h>
diff --git a/drivers/mtd/maps/Kconfig b/drivers/mtd/maps/Kconfig
index 962212628f6e..a0dd7bba9481 100644
--- a/drivers/mtd/maps/Kconfig
+++ b/drivers/mtd/maps/Kconfig
@@ -251,6 +251,15 @@ config MTD_NETtel
help
Support for flash chips on NETtel/SecureEdge/SnapGear boards.
+config MTD_BCM963XX
+ tristate "Map driver for Broadcom BCM963xx boards"
+ depends on BCM63XX
+ select MTD_MAP_BANK_WIDTH_2
+ select MTD_CFI_I1
+ help
+ Support for parsing CFE image tag and creating MTD partitions on
+ Broadcom BCM63xx boards.
+
config MTD_DILNETPC
tristate "CFI Flash device mapped on DIL/Net PC"
depends on X86 && MTD_CONCAT && MTD_PARTITIONS && MTD_CFI_INTELEXT && BROKEN
diff --git a/drivers/mtd/maps/Makefile b/drivers/mtd/maps/Makefile
index f216bb573713..c7869c7a6b18 100644
--- a/drivers/mtd/maps/Makefile
+++ b/drivers/mtd/maps/Makefile
@@ -58,3 +58,4 @@ obj-$(CONFIG_MTD_BFIN_ASYNC) += bfin-async-flash.o
obj-$(CONFIG_MTD_RBTX4939) += rbtx4939-flash.o
obj-$(CONFIG_MTD_VMU) += vmu-flash.o
obj-$(CONFIG_MTD_GPIO_ADDR) += gpio-addr-flash.o
+obj-$(CONFIG_MTD_BCM963XX) += bcm963xx-flash.o
diff --git a/drivers/mtd/maps/bcm963xx-flash.c b/drivers/mtd/maps/bcm963xx-flash.c
new file mode 100644
index 000000000000..d175c120ee84
--- /dev/null
+++ b/drivers/mtd/maps/bcm963xx-flash.c
@@ -0,0 +1,271 @@
+/*
+ * Copyright © 2006-2008 Florian Fainelli <florian@openwrt.org>
+ * Mike Albon <malbon@openwrt.org>
+ * Copyright © 2009-2010 Daniel Dickinson <openwrt@cshore.neomailbox.net>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/mtd/map.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/partitions.h>
+#include <linux/vmalloc.h>
+#include <linux/platform_device.h>
+#include <linux/io.h>
+
+#include <asm/mach-bcm63xx/bcm963xx_tag.h>
+
+#define BCM63XX_BUSWIDTH 2 /* Buswidth */
+#define BCM63XX_EXTENDED_SIZE 0xBFC00000 /* Extended flash address */
+
+#define PFX KBUILD_MODNAME ": "
+
+static struct mtd_partition *parsed_parts;
+
+static struct mtd_info *bcm963xx_mtd_info;
+
+static struct map_info bcm963xx_map = {
+ .name = "bcm963xx",
+ .bankwidth = BCM63XX_BUSWIDTH,
+};
+
+static int parse_cfe_partitions(struct mtd_info *master,
+ struct mtd_partition **pparts)
+{
+ /* CFE, NVRAM and global Linux are always present */
+ int nrparts = 3, curpart = 0;
+ struct bcm_tag *buf;
+ struct mtd_partition *parts;
+ int ret;
+ size_t retlen;
+ unsigned int rootfsaddr, kerneladdr, spareaddr;
+ unsigned int rootfslen, kernellen, sparelen, totallen;
+ int namelen = 0;
+ int i;
+ char *boardid;
+ char *tagversion;
+
+ /* Allocate memory for buffer */
+ buf = vmalloc(sizeof(struct bcm_tag));
+ if (!buf)
+ return -ENOMEM;
+
+ /* Get the tag */
+ ret = master->read(master, master->erasesize, sizeof(struct bcm_tag),
+ &retlen, (void *)buf);
+ if (retlen != sizeof(struct bcm_tag)) {
+ vfree(buf);
+ return -EIO;
+ }
+
+ sscanf(buf->kernel_address, "%u", &kerneladdr);
+ sscanf(buf->kernel_length, "%u", &kernellen);
+ sscanf(buf->total_length, "%u", &totallen);
+ tagversion = &(buf->tag_version[0]);
+ boardid = &(buf->board_id[0]);
+
+ printk(KERN_INFO PFX "CFE boot tag found with version %s "
+ "and board type %s\n", tagversion, boardid);
+
+ kerneladdr = kerneladdr - BCM63XX_EXTENDED_SIZE;
+ rootfsaddr = kerneladdr + kernellen;
+ spareaddr = roundup(totallen, master->erasesize) + master->erasesize;
+ sparelen = master->size - spareaddr - master->erasesize;
+ rootfslen = spareaddr - rootfsaddr;
+
+ /* Determine number of partitions */
+ namelen = 8;
+ if (rootfslen > 0) {
+ nrparts++;
+ namelen += 6;
+ };
+ if (kernellen > 0) {
+ nrparts++;
+ namelen += 6;
+ };
+
+ /* Ask kernel for more memory */
+ parts = kzalloc(sizeof(*parts) * nrparts + 10 * nrparts, GFP_KERNEL);
+ if (!parts) {
+ vfree(buf);
+ return -ENOMEM;
+ };
+
+ /* Start building partition list */
+ parts[curpart].name = "CFE";
+ parts[curpart].offset = 0;
+ parts[curpart].size = master->erasesize;
+ curpart++;
+
+ if (kernellen > 0) {
+ parts[curpart].name = "kernel";
+ parts[curpart].offset = kerneladdr;
+ parts[curpart].size = kernellen;
+ curpart++;
+ };
+
+ if (rootfslen > 0) {
+ parts[curpart].name = "rootfs";
+ parts[curpart].offset = rootfsaddr;
+ parts[curpart].size = rootfslen;
+ if (sparelen > 0)
+ parts[curpart].size += sparelen;
+ curpart++;
+ };
+
+ parts[curpart].name = "nvram";
+ parts[curpart].offset = master->size - master->erasesize;
+ parts[curpart].size = master->erasesize;
+
+ /* Global partition "linux" to make easy firmware upgrade */
+ curpart++;
+ parts[curpart].name = "linux";
+ parts[curpart].offset = parts[0].size;
+ parts[curpart].size = master->size - parts[0].size - parts[3].size;
+
+ for (i = 0; i < nrparts; i++)
+ printk(KERN_INFO PFX "Partition %d is %s offset %lx and "
+ "length %lx\n", i, parts[i].name,
+ (long unsigned int)(parts[i].offset),
+ (long unsigned int)(parts[i].size));
+
+ printk(KERN_INFO PFX "Spare partition is %x offset and length %x\n",
+ spareaddr, sparelen);
+ *pparts = parts;
+ vfree(buf);
+
+ return nrparts;
+};
+
+static int bcm963xx_detect_cfe(struct mtd_info *master)
+{
+ int idoffset = 0x4e0;
+ static char idstring[8] = "CFE1CFE1";
+ char buf[9];
+ int ret;
+ size_t retlen;
+
+ ret = master->read(master, idoffset, 8, &retlen, (void *)buf);
+ buf[retlen] = 0;
+ printk(KERN_INFO PFX "Read Signature value of %s\n", buf);
+
+ return strncmp(idstring, buf, 8);
+}
+
+static int bcm963xx_probe(struct platform_device *pdev)
+{
+ int err = 0;
+ int parsed_nr_parts = 0;
+ char *part_type;
+ struct resource *r;
+
+ r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!r) {
+ dev_err(&pdev->dev, "no resource supplied\n");
+ return -ENODEV;
+ }
+
+ bcm963xx_map.phys = r->start;
+ bcm963xx_map.size = resource_size(r);
+ bcm963xx_map.virt = ioremap(r->start, resource_size(r));
+ if (!bcm963xx_map.virt) {
+ dev_err(&pdev->dev, "failed to ioremap\n");
+ return -EIO;
+ }
+
+ dev_info(&pdev->dev, "0x%08lx at 0x%08x\n",
+ bcm963xx_map.size, bcm963xx_map.phys);
+
+ simple_map_init(&bcm963xx_map);
+
+ bcm963xx_mtd_info = do_map_probe("cfi_probe", &bcm963xx_map);
+ if (!bcm963xx_mtd_info) {
+ dev_err(&pdev->dev, "failed to probe using CFI\n");
+ err = -EIO;
+ goto err_probe;
+ }
+
+ bcm963xx_mtd_info->owner = THIS_MODULE;
+
+ /* This is mutually exclusive */
+ if (bcm963xx_detect_cfe(bcm963xx_mtd_info) == 0) {
+ dev_info(&pdev->dev, "CFE bootloader detected\n");
+ if (parsed_nr_parts == 0) {
+ int ret = parse_cfe_partitions(bcm963xx_mtd_info,
+ &parsed_parts);
+ if (ret > 0) {
+ part_type = "CFE";
+ parsed_nr_parts = ret;
+ }
+ }
+ } else {
+ dev_info(&pdev->dev, "unsupported bootloader\n");
+ err = -ENODEV;
+ goto err_probe;
+ }
+
+ return add_mtd_partitions(bcm963xx_mtd_info, parsed_parts,
+ parsed_nr_parts);
+
+err_probe:
+ iounmap(bcm963xx_map.virt);
+ return err;
+}
+
+static int bcm963xx_remove(struct platform_device *pdev)
+{
+ if (bcm963xx_mtd_info) {
+ del_mtd_partitions(bcm963xx_mtd_info);
+ map_destroy(bcm963xx_mtd_info);
+ }
+
+ if (bcm963xx_map.virt) {
+ iounmap(bcm963xx_map.virt);
+ bcm963xx_map.virt = 0;
+ }
+
+ return 0;
+}
+
+static struct platform_driver bcm63xx_mtd_dev = {
+ .probe = bcm963xx_probe,
+ .remove = bcm963xx_remove,
+ .driver = {
+ .name = "bcm963xx-flash",
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init bcm963xx_mtd_init(void)
+{
+ return platform_driver_register(&bcm63xx_mtd_dev);
+}
+
+static void __exit bcm963xx_mtd_exit(void)
+{
+ platform_driver_unregister(&bcm63xx_mtd_dev);
+}
+
+module_init(bcm963xx_mtd_init);
+module_exit(bcm963xx_mtd_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Broadcom BCM63xx MTD driver for CFE and RedBoot");
+MODULE_AUTHOR("Daniel Dickinson <openwrt@cshore.neomailbox.net>");
+MODULE_AUTHOR("Florian Fainelli <florian@openwrt.org>");
+MODULE_AUTHOR("Mike Albon <malbon@openwrt.org>");
diff --git a/drivers/mtd/maps/gpio-addr-flash.c b/drivers/mtd/maps/gpio-addr-flash.c
index 32e89d773b4e..af5707a80205 100644
--- a/drivers/mtd/maps/gpio-addr-flash.c
+++ b/drivers/mtd/maps/gpio-addr-flash.c
@@ -208,10 +208,14 @@ static int __devinit gpio_flash_probe(struct platform_device *pdev)
if (!state)
return -ENOMEM;
+ /*
+ * We cast start/end to known types in the boards file, so cast
+ * away their pointer types here to the known types (gpios->xxx).
+ */
state->gpio_count = gpios->end;
- state->gpio_addrs = (void *)gpios->start;
+ state->gpio_addrs = (void *)(unsigned long)gpios->start;
state->gpio_values = (void *)(state + 1);
- state->win_size = memory->end - memory->start + 1;
+ state->win_size = resource_size(memory);
memset(state->gpio_values, 0xff, arr_size);
state->map.name = DRIVER_NAME;
@@ -221,7 +225,7 @@ static int __devinit gpio_flash_probe(struct platform_device *pdev)
state->map.copy_to = gf_copy_to;
state->map.bankwidth = pdata->width;
state->map.size = state->win_size * (1 << state->gpio_count);
- state->map.virt = (void __iomem *)memory->start;
+ state->map.virt = ioremap_nocache(memory->start, state->map.size);
state->map.phys = NO_XIP;
state->map.map_priv_1 = (unsigned long)state;
diff --git a/drivers/mtd/maps/pcmciamtd.c b/drivers/mtd/maps/pcmciamtd.c
index 57a1acfe22c4..917022948399 100644
--- a/drivers/mtd/maps/pcmciamtd.c
+++ b/drivers/mtd/maps/pcmciamtd.c
@@ -640,10 +640,6 @@ static int pcmciamtd_config(struct pcmcia_device *link)
}
dev_info(&dev->p_dev->dev, "mtd%d: %s\n", mtd->index, mtd->name);
return 0;
-
- dev_err(&dev->p_dev->dev, "CS Error, exiting\n");
- pcmciamtd_release(link);
- return -ENODEV;
}
diff --git a/drivers/mtd/maps/physmap_of.c b/drivers/mtd/maps/physmap_of.c
index fe63f6bd663c..9861814aa027 100644
--- a/drivers/mtd/maps/physmap_of.c
+++ b/drivers/mtd/maps/physmap_of.c
@@ -50,7 +50,7 @@ static int parse_obsolete_partitions(struct platform_device *dev,
{
int i, plen, nr_parts;
const struct {
- u32 offset, len;
+ __be32 offset, len;
} *part;
const char *names;
@@ -69,9 +69,9 @@ static int parse_obsolete_partitions(struct platform_device *dev,
names = of_get_property(dp, "partition-names", &plen);
for (i = 0; i < nr_parts; i++) {
- info->parts[i].offset = part->offset;
- info->parts[i].size = part->len & ~1;
- if (part->len & 1) /* bit 0 set signifies read only partition */
+ info->parts[i].offset = be32_to_cpu(part->offset);
+ info->parts[i].size = be32_to_cpu(part->len) & ~1;
+ if (be32_to_cpu(part->len) & 1) /* bit 0 set signifies read only partition */
info->parts[i].mask_flags = MTD_WRITEABLE;
if (names && (plen > 0)) {
@@ -226,11 +226,11 @@ static int __devinit of_flash_probe(struct platform_device *dev,
struct resource res;
struct of_flash *info;
const char *probe_type = match->data;
- const u32 *width;
+ const __be32 *width;
int err;
int i;
int count;
- const u32 *p;
+ const __be32 *p;
int reg_tuple_size;
struct mtd_info **mtd_list = NULL;
resource_size_t res_size;
@@ -267,9 +267,11 @@ static int __devinit of_flash_probe(struct platform_device *dev,
for (i = 0; i < count; i++) {
err = -ENXIO;
if (of_address_to_resource(dp, i, &res)) {
- dev_err(&dev->dev, "Can't get IO address from device"
- " tree\n");
- goto err_out;
+ /*
+ * Continue with next register tuple if this
+ * one is not mappable
+ */
+ continue;
}
dev_dbg(&dev->dev, "of_flash device: %.8llx-%.8llx\n",
@@ -294,7 +296,7 @@ static int __devinit of_flash_probe(struct platform_device *dev,
info->list[i].map.name = dev_name(&dev->dev);
info->list[i].map.phys = res.start;
info->list[i].map.size = res_size;
- info->list[i].map.bankwidth = *width;
+ info->list[i].map.bankwidth = be32_to_cpup(width);
err = -ENOMEM;
info->list[i].map.virt = ioremap(info->list[i].map.phys,
diff --git a/drivers/mtd/mtd_blkdevs.c b/drivers/mtd/mtd_blkdevs.c
index 50ab431b24eb..cb20c67995d8 100644
--- a/drivers/mtd/mtd_blkdevs.c
+++ b/drivers/mtd/mtd_blkdevs.c
@@ -37,7 +37,6 @@
#include "mtdcore.h"
-static DEFINE_MUTEX(mtd_blkdevs_mutex);
static LIST_HEAD(blktrans_majors);
static DEFINE_MUTEX(blktrans_ref_mutex);
@@ -133,6 +132,10 @@ static int mtd_blktrans_thread(void *arg)
if (!req && !(req = blk_fetch_request(rq))) {
set_current_state(TASK_INTERRUPTIBLE);
+
+ if (kthread_should_stop())
+ set_current_state(TASK_RUNNING);
+
spin_unlock_irq(rq->queue_lock);
schedule();
spin_lock_irq(rq->queue_lock);
@@ -176,54 +179,53 @@ static void mtd_blktrans_request(struct request_queue *rq)
static int blktrans_open(struct block_device *bdev, fmode_t mode)
{
struct mtd_blktrans_dev *dev = blktrans_dev_get(bdev->bd_disk);
- int ret;
+ int ret = 0;
if (!dev)
return -ERESTARTSYS; /* FIXME: busy loop! -arnd*/
- mutex_lock(&mtd_blkdevs_mutex);
mutex_lock(&dev->lock);
- if (!dev->mtd) {
- ret = -ENXIO;
+ if (dev->open++)
goto unlock;
- }
- ret = !dev->open++ && dev->tr->open ? dev->tr->open(dev) : 0;
+ kref_get(&dev->ref);
+ __module_get(dev->tr->owner);
+
+ if (dev->mtd) {
+ ret = dev->tr->open ? dev->tr->open(dev) : 0;
+ __get_mtd_device(dev->mtd);
+ }
- /* Take another reference on the device so it won't go away till
- last release */
- if (!ret)
- kref_get(&dev->ref);
unlock:
mutex_unlock(&dev->lock);
blktrans_dev_put(dev);
- mutex_unlock(&mtd_blkdevs_mutex);
return ret;
}
static int blktrans_release(struct gendisk *disk, fmode_t mode)
{
struct mtd_blktrans_dev *dev = blktrans_dev_get(disk);
- int ret = -ENXIO;
+ int ret = 0;
if (!dev)
return ret;
- mutex_lock(&mtd_blkdevs_mutex);
mutex_lock(&dev->lock);
- /* Release one reference, we sure its not the last one here*/
- kref_put(&dev->ref, blktrans_dev_release);
-
- if (!dev->mtd)
+ if (--dev->open)
goto unlock;
- ret = !--dev->open && dev->tr->release ? dev->tr->release(dev) : 0;
+ kref_put(&dev->ref, blktrans_dev_release);
+ module_put(dev->tr->owner);
+
+ if (dev->mtd) {
+ ret = dev->tr->release ? dev->tr->release(dev) : 0;
+ __put_mtd_device(dev->mtd);
+ }
unlock:
mutex_unlock(&dev->lock);
blktrans_dev_put(dev);
- mutex_unlock(&mtd_blkdevs_mutex);
return ret;
}
@@ -256,7 +258,6 @@ static int blktrans_ioctl(struct block_device *bdev, fmode_t mode,
if (!dev)
return ret;
- mutex_lock(&mtd_blkdevs_mutex);
mutex_lock(&dev->lock);
if (!dev->mtd)
@@ -271,7 +272,6 @@ static int blktrans_ioctl(struct block_device *bdev, fmode_t mode,
}
unlock:
mutex_unlock(&dev->lock);
- mutex_unlock(&mtd_blkdevs_mutex);
blktrans_dev_put(dev);
return ret;
}
@@ -385,9 +385,6 @@ int add_mtd_blktrans_dev(struct mtd_blktrans_dev *new)
gd->queue = new->rq;
- __get_mtd_device(new->mtd);
- __module_get(tr->owner);
-
/* Create processing thread */
/* TODO: workqueue ? */
new->thread = kthread_run(mtd_blktrans_thread, new,
@@ -410,8 +407,6 @@ int add_mtd_blktrans_dev(struct mtd_blktrans_dev *new)
}
return 0;
error4:
- module_put(tr->owner);
- __put_mtd_device(new->mtd);
blk_cleanup_queue(new->rq);
error3:
put_disk(new->disk);
@@ -448,17 +443,15 @@ int del_mtd_blktrans_dev(struct mtd_blktrans_dev *old)
blk_start_queue(old->rq);
spin_unlock_irqrestore(&old->queue_lock, flags);
- /* Ask trans driver for release to the mtd device */
+ /* If the device is currently open, tell trans driver to close it,
+ then put mtd device, and don't touch it again */
mutex_lock(&old->lock);
- if (old->open && old->tr->release) {
- old->tr->release(old);
- old->open = 0;
+ if (old->open) {
+ if (old->tr->release)
+ old->tr->release(old);
+ __put_mtd_device(old->mtd);
}
- __put_mtd_device(old->mtd);
- module_put(old->tr->owner);
-
- /* At that point, we don't touch the mtd anymore */
old->mtd = NULL;
mutex_unlock(&old->lock);
@@ -508,13 +501,16 @@ int register_mtd_blktrans(struct mtd_blktrans_ops *tr)
mutex_lock(&mtd_table_mutex);
ret = register_blkdev(tr->major, tr->name);
- if (ret) {
+ if (ret < 0) {
printk(KERN_WARNING "Unable to register %s block device on major %d: %d\n",
tr->name, tr->major, ret);
mutex_unlock(&mtd_table_mutex);
return ret;
}
+ if (ret)
+ tr->major = ret;
+
tr->blkshift = ffs(tr->blksize) - 1;
INIT_LIST_HEAD(&tr->devs);
diff --git a/drivers/mtd/mtdchar.c b/drivers/mtd/mtdchar.c
index 5ef45487b65f..4759d827e8c7 100644
--- a/drivers/mtd/mtdchar.c
+++ b/drivers/mtd/mtdchar.c
@@ -30,8 +30,9 @@
#include <linux/backing-dev.h>
#include <linux/compat.h>
#include <linux/mount.h>
-
+#include <linux/blkpg.h>
#include <linux/mtd/mtd.h>
+#include <linux/mtd/partitions.h>
#include <linux/mtd/map.h>
#include <asm/uaccess.h>
@@ -478,6 +479,78 @@ static int mtd_do_readoob(struct mtd_info *mtd, uint64_t start,
return ret;
}
+/*
+ * Copies (and truncates, if necessary) data from the larger struct,
+ * nand_ecclayout, to the smaller, deprecated layout struct,
+ * nand_ecclayout_user. This is necessary only to suppport the deprecated
+ * API ioctl ECCGETLAYOUT while allowing all new functionality to use
+ * nand_ecclayout flexibly (i.e. the struct may change size in new
+ * releases without requiring major rewrites).
+ */
+static int shrink_ecclayout(const struct nand_ecclayout *from,
+ struct nand_ecclayout_user *to)
+{
+ int i;
+
+ if (!from || !to)
+ return -EINVAL;
+
+ memset(to, 0, sizeof(*to));
+
+ to->eccbytes = min((int)from->eccbytes, MTD_MAX_ECCPOS_ENTRIES);
+ for (i = 0; i < to->eccbytes; i++)
+ to->eccpos[i] = from->eccpos[i];
+
+ for (i = 0; i < MTD_MAX_OOBFREE_ENTRIES; i++) {
+ if (from->oobfree[i].length == 0 &&
+ from->oobfree[i].offset == 0)
+ break;
+ to->oobavail += from->oobfree[i].length;
+ to->oobfree[i] = from->oobfree[i];
+ }
+
+ return 0;
+}
+
+#ifdef CONFIG_MTD_PARTITIONS
+static int mtd_blkpg_ioctl(struct mtd_info *mtd,
+ struct blkpg_ioctl_arg __user *arg)
+{
+ struct blkpg_ioctl_arg a;
+ struct blkpg_partition p;
+
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+
+ /* Only master mtd device must be used to control partitions */
+ if (!mtd_is_master(mtd))
+ return -EINVAL;
+
+ if (copy_from_user(&a, arg, sizeof(struct blkpg_ioctl_arg)))
+ return -EFAULT;
+
+ if (copy_from_user(&p, a.data, sizeof(struct blkpg_partition)))
+ return -EFAULT;
+
+ switch (a.op) {
+ case BLKPG_ADD_PARTITION:
+
+ return mtd_add_partition(mtd, p.devname, p.start, p.length);
+
+ case BLKPG_DEL_PARTITION:
+
+ if (p.pno < 0)
+ return -EINVAL;
+
+ return mtd_del_partition(mtd, p.pno);
+
+ default:
+ return -EINVAL;
+ }
+}
+#endif
+
+
static int mtd_ioctl(struct file *file, u_int cmd, u_long arg)
{
struct mtd_file_info *mfi = file->private_data;
@@ -514,6 +587,9 @@ static int mtd_ioctl(struct file *file, u_int cmd, u_long arg)
if (get_user(ur_idx, &(ur->regionindex)))
return -EFAULT;
+ if (ur_idx >= mtd->numeraseregions)
+ return -EINVAL;
+
kr = &(mtd->eraseregions[ur_idx]);
if (put_user(kr->offset, &(ur->offset))
@@ -813,14 +889,23 @@ static int mtd_ioctl(struct file *file, u_int cmd, u_long arg)
}
#endif
+ /* This ioctl is being deprecated - it truncates the ecc layout */
case ECCGETLAYOUT:
{
+ struct nand_ecclayout_user *usrlay;
+
if (!mtd->ecclayout)
return -EOPNOTSUPP;
- if (copy_to_user(argp, mtd->ecclayout,
- sizeof(struct nand_ecclayout)))
- return -EFAULT;
+ usrlay = kmalloc(sizeof(*usrlay), GFP_KERNEL);
+ if (!usrlay)
+ return -ENOMEM;
+
+ shrink_ecclayout(mtd->ecclayout, usrlay);
+
+ if (copy_to_user(argp, usrlay, sizeof(*usrlay)))
+ ret = -EFAULT;
+ kfree(usrlay);
break;
}
@@ -856,6 +941,22 @@ static int mtd_ioctl(struct file *file, u_int cmd, u_long arg)
break;
}
+#ifdef CONFIG_MTD_PARTITIONS
+ case BLKPG:
+ {
+ ret = mtd_blkpg_ioctl(mtd,
+ (struct blkpg_ioctl_arg __user *)arg);
+ break;
+ }
+
+ case BLKRRPART:
+ {
+ /* No reread partition feature. Just return ok */
+ ret = 0;
+ break;
+ }
+#endif
+
default:
ret = -ENOTTY;
}
@@ -1030,17 +1131,15 @@ static const struct file_operations mtd_fops = {
#endif
};
-static int mtd_inodefs_get_sb(struct file_system_type *fs_type, int flags,
- const char *dev_name, void *data,
- struct vfsmount *mnt)
+static struct dentry *mtd_inodefs_mount(struct file_system_type *fs_type,
+ int flags, const char *dev_name, void *data)
{
- return get_sb_pseudo(fs_type, "mtd_inode:", NULL, MTD_INODE_FS_MAGIC,
- mnt);
+ return mount_pseudo(fs_type, "mtd_inode:", NULL, MTD_INODE_FS_MAGIC);
}
static struct file_system_type mtd_inodefs_type = {
.name = "mtd_inodefs",
- .get_sb = mtd_inodefs_get_sb,
+ .mount = mtd_inodefs_mount,
.kill_sb = kill_anon_super,
};
diff --git a/drivers/mtd/mtdpart.c b/drivers/mtd/mtdpart.c
index dc6558568876..79e3689f1e16 100644
--- a/drivers/mtd/mtdpart.c
+++ b/drivers/mtd/mtdpart.c
@@ -29,9 +29,11 @@
#include <linux/kmod.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/partitions.h>
+#include <linux/err.h>
/* Our partition linked list */
static LIST_HEAD(mtd_partitions);
+static DEFINE_MUTEX(mtd_partitions_mutex);
/* Our partition node structure */
struct mtd_part {
@@ -326,6 +328,12 @@ static int part_block_markbad(struct mtd_info *mtd, loff_t ofs)
return res;
}
+static inline void free_partition(struct mtd_part *p)
+{
+ kfree(p->mtd.name);
+ kfree(p);
+}
+
/*
* This function unregisters and destroy all slave MTD objects which are
* attached to the given master MTD object.
@@ -334,33 +342,42 @@ static int part_block_markbad(struct mtd_info *mtd, loff_t ofs)
int del_mtd_partitions(struct mtd_info *master)
{
struct mtd_part *slave, *next;
+ int ret, err = 0;
+ mutex_lock(&mtd_partitions_mutex);
list_for_each_entry_safe(slave, next, &mtd_partitions, list)
if (slave->master == master) {
+ ret = del_mtd_device(&slave->mtd);
+ if (ret < 0) {
+ err = ret;
+ continue;
+ }
list_del(&slave->list);
- del_mtd_device(&slave->mtd);
- kfree(slave);
+ free_partition(slave);
}
+ mutex_unlock(&mtd_partitions_mutex);
- return 0;
+ return err;
}
EXPORT_SYMBOL(del_mtd_partitions);
-static struct mtd_part *add_one_partition(struct mtd_info *master,
- const struct mtd_partition *part, int partno,
- uint64_t cur_offset)
+static struct mtd_part *allocate_partition(struct mtd_info *master,
+ const struct mtd_partition *part, int partno,
+ uint64_t cur_offset)
{
struct mtd_part *slave;
+ char *name;
/* allocate the partition structure */
slave = kzalloc(sizeof(*slave), GFP_KERNEL);
- if (!slave) {
+ name = kstrdup(part->name, GFP_KERNEL);
+ if (!name || !slave) {
printk(KERN_ERR"memory allocation error while creating partitions for \"%s\"\n",
- master->name);
- del_mtd_partitions(master);
- return NULL;
+ master->name);
+ kfree(name);
+ kfree(slave);
+ return ERR_PTR(-ENOMEM);
}
- list_add(&slave->list, &mtd_partitions);
/* set up the MTD object for this partition */
slave->mtd.type = master->type;
@@ -371,7 +388,7 @@ static struct mtd_part *add_one_partition(struct mtd_info *master,
slave->mtd.oobavail = master->oobavail;
slave->mtd.subpage_sft = master->subpage_sft;
- slave->mtd.name = part->name;
+ slave->mtd.name = name;
slave->mtd.owner = master->owner;
slave->mtd.backing_dev_info = master->backing_dev_info;
@@ -518,12 +535,89 @@ static struct mtd_part *add_one_partition(struct mtd_info *master,
}
out_register:
- /* register our partition */
- add_mtd_device(&slave->mtd);
-
return slave;
}
+int mtd_add_partition(struct mtd_info *master, char *name,
+ long long offset, long long length)
+{
+ struct mtd_partition part;
+ struct mtd_part *p, *new;
+ uint64_t start, end;
+ int ret = 0;
+
+ /* the direct offset is expected */
+ if (offset == MTDPART_OFS_APPEND ||
+ offset == MTDPART_OFS_NXTBLK)
+ return -EINVAL;
+
+ if (length == MTDPART_SIZ_FULL)
+ length = master->size - offset;
+
+ if (length <= 0)
+ return -EINVAL;
+
+ part.name = name;
+ part.size = length;
+ part.offset = offset;
+ part.mask_flags = 0;
+ part.ecclayout = NULL;
+
+ new = allocate_partition(master, &part, -1, offset);
+ if (IS_ERR(new))
+ return PTR_ERR(new);
+
+ start = offset;
+ end = offset + length;
+
+ mutex_lock(&mtd_partitions_mutex);
+ list_for_each_entry(p, &mtd_partitions, list)
+ if (p->master == master) {
+ if ((start >= p->offset) &&
+ (start < (p->offset + p->mtd.size)))
+ goto err_inv;
+
+ if ((end >= p->offset) &&
+ (end < (p->offset + p->mtd.size)))
+ goto err_inv;
+ }
+
+ list_add(&new->list, &mtd_partitions);
+ mutex_unlock(&mtd_partitions_mutex);
+
+ add_mtd_device(&new->mtd);
+
+ return ret;
+err_inv:
+ mutex_unlock(&mtd_partitions_mutex);
+ free_partition(new);
+ return -EINVAL;
+}
+EXPORT_SYMBOL_GPL(mtd_add_partition);
+
+int mtd_del_partition(struct mtd_info *master, int partno)
+{
+ struct mtd_part *slave, *next;
+ int ret = -EINVAL;
+
+ mutex_lock(&mtd_partitions_mutex);
+ list_for_each_entry_safe(slave, next, &mtd_partitions, list)
+ if ((slave->master == master) &&
+ (slave->mtd.index == partno)) {
+ ret = del_mtd_device(&slave->mtd);
+ if (ret < 0)
+ break;
+
+ list_del(&slave->list);
+ free_partition(slave);
+ break;
+ }
+ mutex_unlock(&mtd_partitions_mutex);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(mtd_del_partition);
+
/*
* This function, given a master MTD object and a partition table, creates
* and registers slave MTD objects which are bound to the master according to
@@ -544,9 +638,16 @@ int add_mtd_partitions(struct mtd_info *master,
printk(KERN_NOTICE "Creating %d MTD partitions on \"%s\":\n", nbparts, master->name);
for (i = 0; i < nbparts; i++) {
- slave = add_one_partition(master, parts + i, i, cur_offset);
- if (!slave)
- return -ENOMEM;
+ slave = allocate_partition(master, parts + i, i, cur_offset);
+ if (IS_ERR(slave))
+ return PTR_ERR(slave);
+
+ mutex_lock(&mtd_partitions_mutex);
+ list_add(&slave->list, &mtd_partitions);
+ mutex_unlock(&mtd_partitions_mutex);
+
+ add_mtd_device(&slave->mtd);
+
cur_offset = slave->offset + slave->mtd.size;
}
@@ -618,3 +719,20 @@ int parse_mtd_partitions(struct mtd_info *master, const char **types,
return ret;
}
EXPORT_SYMBOL_GPL(parse_mtd_partitions);
+
+int mtd_is_master(struct mtd_info *mtd)
+{
+ struct mtd_part *part;
+ int nopart = 0;
+
+ mutex_lock(&mtd_partitions_mutex);
+ list_for_each_entry(part, &mtd_partitions, list)
+ if (&part->mtd == mtd) {
+ nopart = 1;
+ break;
+ }
+ mutex_unlock(&mtd_partitions_mutex);
+
+ return nopart;
+}
+EXPORT_SYMBOL_GPL(mtd_is_master);
diff --git a/drivers/mtd/mtdsuper.c b/drivers/mtd/mtdsuper.c
index 38e2ab07e7a3..16b02a1fc100 100644
--- a/drivers/mtd/mtdsuper.c
+++ b/drivers/mtd/mtdsuper.c
@@ -54,11 +54,10 @@ static int get_sb_mtd_set(struct super_block *sb, void *_mtd)
/*
* get a superblock on an MTD-backed filesystem
*/
-static int get_sb_mtd_aux(struct file_system_type *fs_type, int flags,
+static struct dentry *mount_mtd_aux(struct file_system_type *fs_type, int flags,
const char *dev_name, void *data,
struct mtd_info *mtd,
- int (*fill_super)(struct super_block *, void *, int),
- struct vfsmount *mnt)
+ int (*fill_super)(struct super_block *, void *, int))
{
struct super_block *sb;
int ret;
@@ -79,57 +78,49 @@ static int get_sb_mtd_aux(struct file_system_type *fs_type, int flags,
ret = fill_super(sb, data, flags & MS_SILENT ? 1 : 0);
if (ret < 0) {
deactivate_locked_super(sb);
- return ret;
+ return ERR_PTR(ret);
}
/* go */
sb->s_flags |= MS_ACTIVE;
- simple_set_mnt(mnt, sb);
-
- return 0;
+ return dget(sb->s_root);
/* new mountpoint for an already mounted superblock */
already_mounted:
DEBUG(1, "MTDSB: Device %d (\"%s\") is already mounted\n",
mtd->index, mtd->name);
- simple_set_mnt(mnt, sb);
- ret = 0;
- goto out_put;
+ put_mtd_device(mtd);
+ return dget(sb->s_root);
out_error:
- ret = PTR_ERR(sb);
-out_put:
put_mtd_device(mtd);
- return ret;
+ return ERR_CAST(sb);
}
/*
* get a superblock on an MTD-backed filesystem by MTD device number
*/
-static int get_sb_mtd_nr(struct file_system_type *fs_type, int flags,
+static struct dentry *mount_mtd_nr(struct file_system_type *fs_type, int flags,
const char *dev_name, void *data, int mtdnr,
- int (*fill_super)(struct super_block *, void *, int),
- struct vfsmount *mnt)
+ int (*fill_super)(struct super_block *, void *, int))
{
struct mtd_info *mtd;
mtd = get_mtd_device(NULL, mtdnr);
if (IS_ERR(mtd)) {
DEBUG(0, "MTDSB: Device #%u doesn't appear to exist\n", mtdnr);
- return PTR_ERR(mtd);
+ return ERR_CAST(mtd);
}
- return get_sb_mtd_aux(fs_type, flags, dev_name, data, mtd, fill_super,
- mnt);
+ return mount_mtd_aux(fs_type, flags, dev_name, data, mtd, fill_super);
}
/*
* set up an MTD-based superblock
*/
-int get_sb_mtd(struct file_system_type *fs_type, int flags,
+struct dentry *mount_mtd(struct file_system_type *fs_type, int flags,
const char *dev_name, void *data,
- int (*fill_super)(struct super_block *, void *, int),
- struct vfsmount *mnt)
+ int (*fill_super)(struct super_block *, void *, int))
{
#ifdef CONFIG_BLOCK
struct block_device *bdev;
@@ -138,7 +129,7 @@ int get_sb_mtd(struct file_system_type *fs_type, int flags,
int mtdnr;
if (!dev_name)
- return -EINVAL;
+ return ERR_PTR(-EINVAL);
DEBUG(2, "MTDSB: dev_name \"%s\"\n", dev_name);
@@ -156,10 +147,10 @@ int get_sb_mtd(struct file_system_type *fs_type, int flags,
mtd = get_mtd_device_nm(dev_name + 4);
if (!IS_ERR(mtd))
- return get_sb_mtd_aux(
+ return mount_mtd_aux(
fs_type, flags,
dev_name, data, mtd,
- fill_super, mnt);
+ fill_super);
printk(KERN_NOTICE "MTD:"
" MTD device with name \"%s\" not found.\n",
@@ -174,9 +165,9 @@ int get_sb_mtd(struct file_system_type *fs_type, int flags,
/* It was a valid number */
DEBUG(1, "MTDSB: mtd%%d, mtdnr %d\n",
mtdnr);
- return get_sb_mtd_nr(fs_type, flags,
+ return mount_mtd_nr(fs_type, flags,
dev_name, data,
- mtdnr, fill_super, mnt);
+ mtdnr, fill_super);
}
}
}
@@ -189,7 +180,7 @@ int get_sb_mtd(struct file_system_type *fs_type, int flags,
if (IS_ERR(bdev)) {
ret = PTR_ERR(bdev);
DEBUG(1, "MTDSB: lookup_bdev() returned %d\n", ret);
- return ret;
+ return ERR_PTR(ret);
}
DEBUG(1, "MTDSB: lookup_bdev() returned 0\n");
@@ -202,8 +193,7 @@ int get_sb_mtd(struct file_system_type *fs_type, int flags,
if (major != MTD_BLOCK_MAJOR)
goto not_an_MTD_device;
- return get_sb_mtd_nr(fs_type, flags, dev_name, data, mtdnr, fill_super,
- mnt);
+ return mount_mtd_nr(fs_type, flags, dev_name, data, mtdnr, fill_super);
not_an_MTD_device:
#endif /* CONFIG_BLOCK */
@@ -212,10 +202,10 @@ not_an_MTD_device:
printk(KERN_NOTICE
"MTD: Attempt to mount non-MTD device \"%s\"\n",
dev_name);
- return -EINVAL;
+ return ERR_PTR(-EINVAL);
}
-EXPORT_SYMBOL_GPL(get_sb_mtd);
+EXPORT_SYMBOL_GPL(mount_mtd);
/*
* destroy an MTD-based superblock
diff --git a/drivers/mtd/nand/Kconfig b/drivers/mtd/nand/Kconfig
index 8b4b67c8a391..8229802b4346 100644
--- a/drivers/mtd/nand/Kconfig
+++ b/drivers/mtd/nand/Kconfig
@@ -400,13 +400,6 @@ config MTD_NAND_PXA3xx
This enables the driver for the NAND flash device found on
PXA3xx processors
-config MTD_NAND_PXA3xx_BUILTIN
- bool "Use builtin definitions for some NAND chips (deprecated)"
- depends on MTD_NAND_PXA3xx
- help
- This enables builtin definitions for some NAND chips. This
- is deprecated in favor of platform specific data.
-
config MTD_NAND_CM_X270
tristate "Support for NAND Flash on CM-X270 modules"
depends on MACH_ARMCORE
@@ -458,6 +451,7 @@ config MTD_NAND_ORION
config MTD_NAND_FSL_ELBC
tristate "NAND support for Freescale eLBC controllers"
depends on PPC_OF
+ select FSL_LBC
help
Various Freescale chips, including the 8313, include a NAND Flash
Controller Module with built-in hardware ECC capabilities.
@@ -531,4 +525,11 @@ config MTD_NAND_JZ4740
help
Enables support for NAND Flash on JZ4740 SoC based boards.
+config MTD_NAND_FSMC
+ tristate "Support for NAND on ST Micros FSMC"
+ depends on PLAT_SPEAR || PLAT_NOMADIK || MACH_U300
+ help
+ Enables support for NAND Flash chips on the ST Microelectronics
+ Flexible Static Memory Controller (FSMC)
+
endif # MTD_NAND
diff --git a/drivers/mtd/nand/Makefile b/drivers/mtd/nand/Makefile
index ac83dcdac5d6..8ad6faec72cb 100644
--- a/drivers/mtd/nand/Makefile
+++ b/drivers/mtd/nand/Makefile
@@ -19,6 +19,7 @@ obj-$(CONFIG_MTD_NAND_PPCHAMELEONEVB) += ppchameleonevb.o
obj-$(CONFIG_MTD_NAND_S3C2410) += s3c2410.o
obj-$(CONFIG_MTD_NAND_DAVINCI) += davinci_nand.o
obj-$(CONFIG_MTD_NAND_DISKONCHIP) += diskonchip.o
+obj-$(CONFIG_MTD_NAND_FSMC) += fsmc_nand.o
obj-$(CONFIG_MTD_NAND_H1900) += h1910.o
obj-$(CONFIG_MTD_NAND_RTC_FROM4) += rtc_from4.o
obj-$(CONFIG_MTD_NAND_SHARPSL) += sharpsl.o
diff --git a/drivers/mtd/nand/bf5xx_nand.c b/drivers/mtd/nand/bf5xx_nand.c
index 6fbeefa3a766..79947bea4d57 100644
--- a/drivers/mtd/nand/bf5xx_nand.c
+++ b/drivers/mtd/nand/bf5xx_nand.c
@@ -110,15 +110,6 @@ static const unsigned short bfin_nfc_pin_req[] =
0};
#ifdef CONFIG_MTD_NAND_BF5XX_BOOTROM_ECC
-static uint8_t bbt_pattern[] = { 0xff };
-
-static struct nand_bbt_descr bootrom_bbt = {
- .options = 0,
- .offs = 63,
- .len = 1,
- .pattern = bbt_pattern,
-};
-
static struct nand_ecclayout bootrom_ecclayout = {
.eccbytes = 24,
.eccpos = {
@@ -809,7 +800,6 @@ static int __devinit bf5xx_nand_probe(struct platform_device *pdev)
/* setup hardware ECC data struct */
if (hardware_ecc) {
#ifdef CONFIG_MTD_NAND_BF5XX_BOOTROM_ECC
- chip->badblock_pattern = &bootrom_bbt;
chip->ecc.layout = &bootrom_ecclayout;
#endif
chip->read_buf = bf5xx_nand_dma_read_buf;
@@ -830,6 +820,10 @@ static int __devinit bf5xx_nand_probe(struct platform_device *pdev)
goto out_err_nand_scan;
}
+#ifdef CONFIG_MTD_NAND_BF5XX_BOOTROM_ECC
+ chip->badblockpos = 63;
+#endif
+
/* add NAND partition */
bf5xx_nand_add_partition(info);
diff --git a/drivers/mtd/nand/davinci_nand.c b/drivers/mtd/nand/davinci_nand.c
index 2ac7367afe77..a90fde3ede28 100644
--- a/drivers/mtd/nand/davinci_nand.c
+++ b/drivers/mtd/nand/davinci_nand.c
@@ -35,6 +35,7 @@
#include <linux/slab.h>
#include <mach/nand.h>
+#include <mach/aemif.h>
#include <asm/mach-types.h>
@@ -74,6 +75,8 @@ struct davinci_nand_info {
uint32_t mask_cle;
uint32_t core_chipsel;
+
+ struct davinci_aemif_timing *timing;
};
static DEFINE_SPINLOCK(davinci_nand_lock);
@@ -313,7 +316,7 @@ static int nand_davinci_correct_4bit(struct mtd_info *mtd,
u32 syndrome[4];
u32 ecc_state;
unsigned num_errors, corrected;
- unsigned long timeo = jiffies + msecs_to_jiffies(100);
+ unsigned long timeo;
/* All bytes 0xff? It's an erased page; ignore its ECC. */
for (i = 0; i < 10; i++) {
@@ -369,9 +372,11 @@ compare:
* after setting the 4BITECC_ADD_CALC_START bit. So if you immediately
* begin trying to poll for the state, you may fall right out of your
* loop without any of the correction calculations having taken place.
- * The recommendation from the hardware team is to wait till ECC_STATE
- * reads less than 4, which means ECC HW has entered correction state.
+ * The recommendation from the hardware team is to initially delay as
+ * long as ECC_STATE reads less than 4. After that, ECC HW has entered
+ * correction state.
*/
+ timeo = jiffies + usecs_to_jiffies(100);
do {
ecc_state = (davinci_nand_readl(info,
NANDFSR_OFFSET) >> 8) & 0x0f;
@@ -478,36 +483,6 @@ static int nand_davinci_dev_ready(struct mtd_info *mtd)
return davinci_nand_readl(info, NANDFSR_OFFSET) & BIT(0);
}
-static void __init nand_dm6446evm_flash_init(struct davinci_nand_info *info)
-{
- uint32_t regval, a1cr;
-
- /*
- * NAND FLASH timings @ PLL1 == 459 MHz
- * - AEMIF.CLK freq = PLL1/6 = 459/6 = 76.5 MHz
- * - AEMIF.CLK period = 1/76.5 MHz = 13.1 ns
- */
- regval = 0
- | (0 << 31) /* selectStrobe */
- | (0 << 30) /* extWait (never with NAND) */
- | (1 << 26) /* writeSetup 10 ns */
- | (3 << 20) /* writeStrobe 40 ns */
- | (1 << 17) /* writeHold 10 ns */
- | (0 << 13) /* readSetup 10 ns */
- | (3 << 7) /* readStrobe 60 ns */
- | (0 << 4) /* readHold 10 ns */
- | (3 << 2) /* turnAround ?? ns */
- | (0 << 0) /* asyncSize 8-bit bus */
- ;
- a1cr = davinci_nand_readl(info, A1CR_OFFSET);
- if (a1cr != regval) {
- dev_dbg(info->dev, "Warning: NAND config: Set A1CR " \
- "reg to 0x%08x, was 0x%08x, should be done by " \
- "bootloader.\n", regval, a1cr);
- davinci_nand_writel(info, A1CR_OFFSET, regval);
- }
-}
-
/*----------------------------------------------------------------------*/
/* An ECC layout for using 4-bit ECC with small-page flash, storing
@@ -611,6 +586,7 @@ static int __init nand_davinci_probe(struct platform_device *pdev)
info->chip.options = pdata->options;
info->chip.bbt_td = pdata->bbt_td;
info->chip.bbt_md = pdata->bbt_md;
+ info->timing = pdata->timing;
info->ioaddr = (uint32_t __force) vaddr;
@@ -688,15 +664,25 @@ static int __init nand_davinci_probe(struct platform_device *pdev)
goto err_clk_enable;
}
- /* EMIF timings should normally be set by the boot loader,
- * especially after boot-from-NAND. The *only* reason to
- * have this special casing for the DM6446 EVM is to work
- * with boot-from-NOR ... with CS0 manually re-jumpered
- * (after startup) so it addresses the NAND flash, not NOR.
- * Even for dev boards, that's unusually rude...
+ /*
+ * Setup Async configuration register in case we did not boot from
+ * NAND and so bootloader did not bother to set it up.
*/
- if (machine_is_davinci_evm())
- nand_dm6446evm_flash_init(info);
+ val = davinci_nand_readl(info, A1CR_OFFSET + info->core_chipsel * 4);
+
+ /* Extended Wait is not valid and Select Strobe mode is not used */
+ val &= ~(ACR_ASIZE_MASK | ACR_EW_MASK | ACR_SS_MASK);
+ if (info->chip.options & NAND_BUSWIDTH_16)
+ val |= 0x1;
+
+ davinci_nand_writel(info, A1CR_OFFSET + info->core_chipsel * 4, val);
+
+ ret = davinci_aemif_setup_timing(info->timing, info->base,
+ info->core_chipsel);
+ if (ret < 0) {
+ dev_dbg(&pdev->dev, "NAND timing values setup fail\n");
+ goto err_timing;
+ }
spin_lock_irq(&davinci_nand_lock);
@@ -749,6 +735,9 @@ static int __init nand_davinci_probe(struct platform_device *pdev)
* breaks userspace ioctl interface with mtd-utils. Once we
* resolve this issue, NAND_ECC_HW_OOB_FIRST mode can be used
* for the 4KiB page chips.
+ *
+ * TODO: Note that nand_ecclayout has now been expanded and can
+ * hold plenty of OOB entries.
*/
dev_warn(&pdev->dev, "no 4-bit ECC support yet "
"for 4KiB-page NAND\n");
@@ -809,6 +798,7 @@ syndrome_done:
return 0;
err_scan:
+err_timing:
clk_disable(info->clk);
err_clk_enable:
diff --git a/drivers/mtd/nand/denali.c b/drivers/mtd/nand/denali.c
index 532fe07cf886..8c8d3c86c0e8 100644
--- a/drivers/mtd/nand/denali.c
+++ b/drivers/mtd/nand/denali.c
@@ -1292,6 +1292,7 @@ static void denali_cmdfunc(struct mtd_info *mtd, unsigned int cmd, int col,
read_status(denali);
break;
case NAND_CMD_READID:
+ case NAND_CMD_PARAM:
reset_buf(denali);
/*sometimes ManufactureId read from register is not right
* e.g. some of Micron MT29F32G08QAA MLC NAND chips
diff --git a/drivers/mtd/nand/fsl_elbc_nand.c b/drivers/mtd/nand/fsl_elbc_nand.c
index 80de0bff6c3a..c141b07b25d1 100644
--- a/drivers/mtd/nand/fsl_elbc_nand.c
+++ b/drivers/mtd/nand/fsl_elbc_nand.c
@@ -1,9 +1,11 @@
/* Freescale Enhanced Local Bus Controller NAND driver
*
- * Copyright (c) 2006-2007 Freescale Semiconductor
+ * Copyright © 2006-2007, 2010 Freescale Semiconductor
*
* Authors: Nick Spence <nick.spence@freescale.com>,
* Scott Wood <scottwood@freescale.com>
+ * Jack Lan <jack.lan@freescale.com>
+ * Roy Zang <tie-fei.zang@freescale.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -27,6 +29,7 @@
#include <linux/string.h>
#include <linux/ioport.h>
#include <linux/of_platform.h>
+#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/interrupt.h>
@@ -42,14 +45,12 @@
#define ERR_BYTE 0xFF /* Value returned for read bytes when read failed */
#define FCM_TIMEOUT_MSECS 500 /* Maximum number of mSecs to wait for FCM */
-struct fsl_elbc_ctrl;
-
/* mtd information per set */
struct fsl_elbc_mtd {
struct mtd_info mtd;
struct nand_chip chip;
- struct fsl_elbc_ctrl *ctrl;
+ struct fsl_lbc_ctrl *ctrl;
struct device *dev;
int bank; /* Chip select bank number */
@@ -58,18 +59,12 @@ struct fsl_elbc_mtd {
unsigned int fmr; /* FCM Flash Mode Register value */
};
-/* overview of the fsl elbc controller */
+/* Freescale eLBC FCM controller infomation */
-struct fsl_elbc_ctrl {
+struct fsl_elbc_fcm_ctrl {
struct nand_hw_control controller;
struct fsl_elbc_mtd *chips[MAX_BANKS];
- /* device info */
- struct device *dev;
- struct fsl_lbc_regs __iomem *regs;
- int irq;
- wait_queue_head_t irq_wait;
- unsigned int irq_status; /* status read from LTESR by irq handler */
u8 __iomem *addr; /* Address of assigned FCM buffer */
unsigned int page; /* Last page written to / read from */
unsigned int read_bytes; /* Number of bytes read during command */
@@ -79,6 +74,7 @@ struct fsl_elbc_ctrl {
unsigned int mdr; /* UPM/FCM Data Register value */
unsigned int use_mdr; /* Non zero if the MDR is to be set */
unsigned int oob; /* Non zero if operating on OOB data */
+ unsigned int counter; /* counter for the initializations */
char *oob_poi; /* Place to write ECC after read back */
};
@@ -164,11 +160,12 @@ static void set_addr(struct mtd_info *mtd, int column, int page_addr, int oob)
{
struct nand_chip *chip = mtd->priv;
struct fsl_elbc_mtd *priv = chip->priv;
- struct fsl_elbc_ctrl *ctrl = priv->ctrl;
+ struct fsl_lbc_ctrl *ctrl = priv->ctrl;
struct fsl_lbc_regs __iomem *lbc = ctrl->regs;
+ struct fsl_elbc_fcm_ctrl *elbc_fcm_ctrl = ctrl->nand;
int buf_num;
- ctrl->page = page_addr;
+ elbc_fcm_ctrl->page = page_addr;
out_be32(&lbc->fbar,
page_addr >> (chip->phys_erase_shift - chip->page_shift));
@@ -185,16 +182,18 @@ static void set_addr(struct mtd_info *mtd, int column, int page_addr, int oob)
buf_num = page_addr & 7;
}
- ctrl->addr = priv->vbase + buf_num * 1024;
- ctrl->index = column;
+ elbc_fcm_ctrl->addr = priv->vbase + buf_num * 1024;
+ elbc_fcm_ctrl->index = column;
/* for OOB data point to the second half of the buffer */
if (oob)
- ctrl->index += priv->page_size ? 2048 : 512;
+ elbc_fcm_ctrl->index += priv->page_size ? 2048 : 512;
- dev_vdbg(ctrl->dev, "set_addr: bank=%d, ctrl->addr=0x%p (0x%p), "
+ dev_vdbg(priv->dev, "set_addr: bank=%d, "
+ "elbc_fcm_ctrl->addr=0x%p (0x%p), "
"index %x, pes %d ps %d\n",
- buf_num, ctrl->addr, priv->vbase, ctrl->index,
+ buf_num, elbc_fcm_ctrl->addr, priv->vbase,
+ elbc_fcm_ctrl->index,
chip->phys_erase_shift, chip->page_shift);
}
@@ -205,18 +204,19 @@ static int fsl_elbc_run_command(struct mtd_info *mtd)
{
struct nand_chip *chip = mtd->priv;
struct fsl_elbc_mtd *priv = chip->priv;
- struct fsl_elbc_ctrl *ctrl = priv->ctrl;
+ struct fsl_lbc_ctrl *ctrl = priv->ctrl;
+ struct fsl_elbc_fcm_ctrl *elbc_fcm_ctrl = ctrl->nand;
struct fsl_lbc_regs __iomem *lbc = ctrl->regs;
/* Setup the FMR[OP] to execute without write protection */
out_be32(&lbc->fmr, priv->fmr | 3);
- if (ctrl->use_mdr)
- out_be32(&lbc->mdr, ctrl->mdr);
+ if (elbc_fcm_ctrl->use_mdr)
+ out_be32(&lbc->mdr, elbc_fcm_ctrl->mdr);
- dev_vdbg(ctrl->dev,
+ dev_vdbg(priv->dev,
"fsl_elbc_run_command: fmr=%08x fir=%08x fcr=%08x\n",
in_be32(&lbc->fmr), in_be32(&lbc->fir), in_be32(&lbc->fcr));
- dev_vdbg(ctrl->dev,
+ dev_vdbg(priv->dev,
"fsl_elbc_run_command: fbar=%08x fpar=%08x "
"fbcr=%08x bank=%d\n",
in_be32(&lbc->fbar), in_be32(&lbc->fpar),
@@ -229,19 +229,18 @@ static int fsl_elbc_run_command(struct mtd_info *mtd)
/* wait for FCM complete flag or timeout */
wait_event_timeout(ctrl->irq_wait, ctrl->irq_status,
FCM_TIMEOUT_MSECS * HZ/1000);
- ctrl->status = ctrl->irq_status;
-
+ elbc_fcm_ctrl->status = ctrl->irq_status;
/* store mdr value in case it was needed */
- if (ctrl->use_mdr)
- ctrl->mdr = in_be32(&lbc->mdr);
+ if (elbc_fcm_ctrl->use_mdr)
+ elbc_fcm_ctrl->mdr = in_be32(&lbc->mdr);
- ctrl->use_mdr = 0;
+ elbc_fcm_ctrl->use_mdr = 0;
- if (ctrl->status != LTESR_CC) {
- dev_info(ctrl->dev,
+ if (elbc_fcm_ctrl->status != LTESR_CC) {
+ dev_info(priv->dev,
"command failed: fir %x fcr %x status %x mdr %x\n",
in_be32(&lbc->fir), in_be32(&lbc->fcr),
- ctrl->status, ctrl->mdr);
+ elbc_fcm_ctrl->status, elbc_fcm_ctrl->mdr);
return -EIO;
}
@@ -251,7 +250,7 @@ static int fsl_elbc_run_command(struct mtd_info *mtd)
static void fsl_elbc_do_read(struct nand_chip *chip, int oob)
{
struct fsl_elbc_mtd *priv = chip->priv;
- struct fsl_elbc_ctrl *ctrl = priv->ctrl;
+ struct fsl_lbc_ctrl *ctrl = priv->ctrl;
struct fsl_lbc_regs __iomem *lbc = ctrl->regs;
if (priv->page_size) {
@@ -284,15 +283,16 @@ static void fsl_elbc_cmdfunc(struct mtd_info *mtd, unsigned int command,
{
struct nand_chip *chip = mtd->priv;
struct fsl_elbc_mtd *priv = chip->priv;
- struct fsl_elbc_ctrl *ctrl = priv->ctrl;
+ struct fsl_lbc_ctrl *ctrl = priv->ctrl;
+ struct fsl_elbc_fcm_ctrl *elbc_fcm_ctrl = ctrl->nand;
struct fsl_lbc_regs __iomem *lbc = ctrl->regs;
- ctrl->use_mdr = 0;
+ elbc_fcm_ctrl->use_mdr = 0;
/* clear the read buffer */
- ctrl->read_bytes = 0;
+ elbc_fcm_ctrl->read_bytes = 0;
if (command != NAND_CMD_PAGEPROG)
- ctrl->index = 0;
+ elbc_fcm_ctrl->index = 0;
switch (command) {
/* READ0 and READ1 read the entire buffer to use hardware ECC. */
@@ -301,7 +301,7 @@ static void fsl_elbc_cmdfunc(struct mtd_info *mtd, unsigned int command,
/* fall-through */
case NAND_CMD_READ0:
- dev_dbg(ctrl->dev,
+ dev_dbg(priv->dev,
"fsl_elbc_cmdfunc: NAND_CMD_READ0, page_addr:"
" 0x%x, column: 0x%x.\n", page_addr, column);
@@ -309,8 +309,8 @@ static void fsl_elbc_cmdfunc(struct mtd_info *mtd, unsigned int command,
out_be32(&lbc->fbcr, 0); /* read entire page to enable ECC */
set_addr(mtd, 0, page_addr, 0);
- ctrl->read_bytes = mtd->writesize + mtd->oobsize;
- ctrl->index += column;
+ elbc_fcm_ctrl->read_bytes = mtd->writesize + mtd->oobsize;
+ elbc_fcm_ctrl->index += column;
fsl_elbc_do_read(chip, 0);
fsl_elbc_run_command(mtd);
@@ -318,14 +318,14 @@ static void fsl_elbc_cmdfunc(struct mtd_info *mtd, unsigned int command,
/* READOOB reads only the OOB because no ECC is performed. */
case NAND_CMD_READOOB:
- dev_vdbg(ctrl->dev,
+ dev_vdbg(priv->dev,
"fsl_elbc_cmdfunc: NAND_CMD_READOOB, page_addr:"
" 0x%x, column: 0x%x.\n", page_addr, column);
out_be32(&lbc->fbcr, mtd->oobsize - column);
set_addr(mtd, column, page_addr, 1);
- ctrl->read_bytes = mtd->writesize + mtd->oobsize;
+ elbc_fcm_ctrl->read_bytes = mtd->writesize + mtd->oobsize;
fsl_elbc_do_read(chip, 1);
fsl_elbc_run_command(mtd);
@@ -333,7 +333,7 @@ static void fsl_elbc_cmdfunc(struct mtd_info *mtd, unsigned int command,
/* READID must read all 5 possible bytes while CEB is active */
case NAND_CMD_READID:
- dev_vdbg(ctrl->dev, "fsl_elbc_cmdfunc: NAND_CMD_READID.\n");
+ dev_vdbg(priv->dev, "fsl_elbc_cmdfunc: NAND_CMD_READID.\n");
out_be32(&lbc->fir, (FIR_OP_CM0 << FIR_OP0_SHIFT) |
(FIR_OP_UA << FIR_OP1_SHIFT) |
@@ -341,9 +341,9 @@ static void fsl_elbc_cmdfunc(struct mtd_info *mtd, unsigned int command,
out_be32(&lbc->fcr, NAND_CMD_READID << FCR_CMD0_SHIFT);
/* 5 bytes for manuf, device and exts */
out_be32(&lbc->fbcr, 5);
- ctrl->read_bytes = 5;
- ctrl->use_mdr = 1;
- ctrl->mdr = 0;
+ elbc_fcm_ctrl->read_bytes = 5;
+ elbc_fcm_ctrl->use_mdr = 1;
+ elbc_fcm_ctrl->mdr = 0;
set_addr(mtd, 0, 0, 0);
fsl_elbc_run_command(mtd);
@@ -351,7 +351,7 @@ static void fsl_elbc_cmdfunc(struct mtd_info *mtd, unsigned int command,
/* ERASE1 stores the block and page address */
case NAND_CMD_ERASE1:
- dev_vdbg(ctrl->dev,
+ dev_vdbg(priv->dev,
"fsl_elbc_cmdfunc: NAND_CMD_ERASE1, "
"page_addr: 0x%x.\n", page_addr);
set_addr(mtd, 0, page_addr, 0);
@@ -359,7 +359,7 @@ static void fsl_elbc_cmdfunc(struct mtd_info *mtd, unsigned int command,
/* ERASE2 uses the block and page address from ERASE1 */
case NAND_CMD_ERASE2:
- dev_vdbg(ctrl->dev, "fsl_elbc_cmdfunc: NAND_CMD_ERASE2.\n");
+ dev_vdbg(priv->dev, "fsl_elbc_cmdfunc: NAND_CMD_ERASE2.\n");
out_be32(&lbc->fir,
(FIR_OP_CM0 << FIR_OP0_SHIFT) |
@@ -374,8 +374,8 @@ static void fsl_elbc_cmdfunc(struct mtd_info *mtd, unsigned int command,
(NAND_CMD_ERASE2 << FCR_CMD2_SHIFT));
out_be32(&lbc->fbcr, 0);
- ctrl->read_bytes = 0;
- ctrl->use_mdr = 1;
+ elbc_fcm_ctrl->read_bytes = 0;
+ elbc_fcm_ctrl->use_mdr = 1;
fsl_elbc_run_command(mtd);
return;
@@ -383,14 +383,12 @@ static void fsl_elbc_cmdfunc(struct mtd_info *mtd, unsigned int command,
/* SEQIN sets up the addr buffer and all registers except the length */
case NAND_CMD_SEQIN: {
__be32 fcr;
- dev_vdbg(ctrl->dev,
- "fsl_elbc_cmdfunc: NAND_CMD_SEQIN/PAGE_PROG, "
+ dev_vdbg(priv->dev,
+ "fsl_elbc_cmdfunc: NAND_CMD_SEQIN/PAGE_PROG, "
"page_addr: 0x%x, column: 0x%x.\n",
page_addr, column);
- ctrl->column = column;
- ctrl->oob = 0;
- ctrl->use_mdr = 1;
+ elbc_fcm_ctrl->use_mdr = 1;
fcr = (NAND_CMD_STATUS << FCR_CMD1_SHIFT) |
(NAND_CMD_SEQIN << FCR_CMD2_SHIFT) |
@@ -420,7 +418,7 @@ static void fsl_elbc_cmdfunc(struct mtd_info *mtd, unsigned int command,
/* OOB area --> READOOB */
column -= mtd->writesize;
fcr |= NAND_CMD_READOOB << FCR_CMD0_SHIFT;
- ctrl->oob = 1;
+ elbc_fcm_ctrl->oob = 1;
} else {
WARN_ON(column != 0);
/* First 256 bytes --> READ0 */
@@ -429,24 +427,24 @@ static void fsl_elbc_cmdfunc(struct mtd_info *mtd, unsigned int command,
}
out_be32(&lbc->fcr, fcr);
- set_addr(mtd, column, page_addr, ctrl->oob);
+ set_addr(mtd, column, page_addr, elbc_fcm_ctrl->oob);
return;
}
/* PAGEPROG reuses all of the setup from SEQIN and adds the length */
case NAND_CMD_PAGEPROG: {
int full_page;
- dev_vdbg(ctrl->dev,
+ dev_vdbg(priv->dev,
"fsl_elbc_cmdfunc: NAND_CMD_PAGEPROG "
- "writing %d bytes.\n", ctrl->index);
+ "writing %d bytes.\n", elbc_fcm_ctrl->index);
/* if the write did not start at 0 or is not a full page
* then set the exact length, otherwise use a full page
* write so the HW generates the ECC.
*/
- if (ctrl->oob || ctrl->column != 0 ||
- ctrl->index != mtd->writesize + mtd->oobsize) {
- out_be32(&lbc->fbcr, ctrl->index);
+ if (elbc_fcm_ctrl->oob || elbc_fcm_ctrl->column != 0 ||
+ elbc_fcm_ctrl->index != mtd->writesize + mtd->oobsize) {
+ out_be32(&lbc->fbcr, elbc_fcm_ctrl->index);
full_page = 0;
} else {
out_be32(&lbc->fbcr, 0);
@@ -458,21 +456,21 @@ static void fsl_elbc_cmdfunc(struct mtd_info *mtd, unsigned int command,
/* Read back the page in order to fill in the ECC for the
* caller. Is this really needed?
*/
- if (full_page && ctrl->oob_poi) {
+ if (full_page && elbc_fcm_ctrl->oob_poi) {
out_be32(&lbc->fbcr, 3);
set_addr(mtd, 6, page_addr, 1);
- ctrl->read_bytes = mtd->writesize + 9;
+ elbc_fcm_ctrl->read_bytes = mtd->writesize + 9;
fsl_elbc_do_read(chip, 1);
fsl_elbc_run_command(mtd);
- memcpy_fromio(ctrl->oob_poi + 6,
- &ctrl->addr[ctrl->index], 3);
- ctrl->index += 3;
+ memcpy_fromio(elbc_fcm_ctrl->oob_poi + 6,
+ &elbc_fcm_ctrl->addr[elbc_fcm_ctrl->index], 3);
+ elbc_fcm_ctrl->index += 3;
}
- ctrl->oob_poi = NULL;
+ elbc_fcm_ctrl->oob_poi = NULL;
return;
}
@@ -485,26 +483,26 @@ static void fsl_elbc_cmdfunc(struct mtd_info *mtd, unsigned int command,
out_be32(&lbc->fcr, NAND_CMD_STATUS << FCR_CMD0_SHIFT);
out_be32(&lbc->fbcr, 1);
set_addr(mtd, 0, 0, 0);
- ctrl->read_bytes = 1;
+ elbc_fcm_ctrl->read_bytes = 1;
fsl_elbc_run_command(mtd);
/* The chip always seems to report that it is
* write-protected, even when it is not.
*/
- setbits8(ctrl->addr, NAND_STATUS_WP);
+ setbits8(elbc_fcm_ctrl->addr, NAND_STATUS_WP);
return;
/* RESET without waiting for the ready line */
case NAND_CMD_RESET:
- dev_dbg(ctrl->dev, "fsl_elbc_cmdfunc: NAND_CMD_RESET.\n");
+ dev_dbg(priv->dev, "fsl_elbc_cmdfunc: NAND_CMD_RESET.\n");
out_be32(&lbc->fir, FIR_OP_CM0 << FIR_OP0_SHIFT);
out_be32(&lbc->fcr, NAND_CMD_RESET << FCR_CMD0_SHIFT);
fsl_elbc_run_command(mtd);
return;
default:
- dev_err(ctrl->dev,
+ dev_err(priv->dev,
"fsl_elbc_cmdfunc: error, unsupported command 0x%x.\n",
command);
}
@@ -524,24 +522,24 @@ static void fsl_elbc_write_buf(struct mtd_info *mtd, const u8 *buf, int len)
{
struct nand_chip *chip = mtd->priv;
struct fsl_elbc_mtd *priv = chip->priv;
- struct fsl_elbc_ctrl *ctrl = priv->ctrl;
+ struct fsl_elbc_fcm_ctrl *elbc_fcm_ctrl = priv->ctrl->nand;
unsigned int bufsize = mtd->writesize + mtd->oobsize;
if (len <= 0) {
- dev_err(ctrl->dev, "write_buf of %d bytes", len);
- ctrl->status = 0;
+ dev_err(priv->dev, "write_buf of %d bytes", len);
+ elbc_fcm_ctrl->status = 0;
return;
}
- if ((unsigned int)len > bufsize - ctrl->index) {
- dev_err(ctrl->dev,
+ if ((unsigned int)len > bufsize - elbc_fcm_ctrl->index) {
+ dev_err(priv->dev,
"write_buf beyond end of buffer "
"(%d requested, %u available)\n",
- len, bufsize - ctrl->index);
- len = bufsize - ctrl->index;
+ len, bufsize - elbc_fcm_ctrl->index);
+ len = bufsize - elbc_fcm_ctrl->index;
}
- memcpy_toio(&ctrl->addr[ctrl->index], buf, len);
+ memcpy_toio(&elbc_fcm_ctrl->addr[elbc_fcm_ctrl->index], buf, len);
/*
* This is workaround for the weird elbc hangs during nand write,
* Scott Wood says: "...perhaps difference in how long it takes a
@@ -549,9 +547,9 @@ static void fsl_elbc_write_buf(struct mtd_info *mtd, const u8 *buf, int len)
* is causing problems, and sync isn't helping for some reason."
* Reading back the last byte helps though.
*/
- in_8(&ctrl->addr[ctrl->index] + len - 1);
+ in_8(&elbc_fcm_ctrl->addr[elbc_fcm_ctrl->index] + len - 1);
- ctrl->index += len;
+ elbc_fcm_ctrl->index += len;
}
/*
@@ -562,13 +560,13 @@ static u8 fsl_elbc_read_byte(struct mtd_info *mtd)
{
struct nand_chip *chip = mtd->priv;
struct fsl_elbc_mtd *priv = chip->priv;
- struct fsl_elbc_ctrl *ctrl = priv->ctrl;
+ struct fsl_elbc_fcm_ctrl *elbc_fcm_ctrl = priv->ctrl->nand;
/* If there are still bytes in the FCM, then use the next byte. */
- if (ctrl->index < ctrl->read_bytes)
- return in_8(&ctrl->addr[ctrl->index++]);
+ if (elbc_fcm_ctrl->index < elbc_fcm_ctrl->read_bytes)
+ return in_8(&elbc_fcm_ctrl->addr[elbc_fcm_ctrl->index++]);
- dev_err(ctrl->dev, "read_byte beyond end of buffer\n");
+ dev_err(priv->dev, "read_byte beyond end of buffer\n");
return ERR_BYTE;
}
@@ -579,18 +577,19 @@ static void fsl_elbc_read_buf(struct mtd_info *mtd, u8 *buf, int len)
{
struct nand_chip *chip = mtd->priv;
struct fsl_elbc_mtd *priv = chip->priv;
- struct fsl_elbc_ctrl *ctrl = priv->ctrl;
+ struct fsl_elbc_fcm_ctrl *elbc_fcm_ctrl = priv->ctrl->nand;
int avail;
if (len < 0)
return;
- avail = min((unsigned int)len, ctrl->read_bytes - ctrl->index);
- memcpy_fromio(buf, &ctrl->addr[ctrl->index], avail);
- ctrl->index += avail;
+ avail = min((unsigned int)len,
+ elbc_fcm_ctrl->read_bytes - elbc_fcm_ctrl->index);
+ memcpy_fromio(buf, &elbc_fcm_ctrl->addr[elbc_fcm_ctrl->index], avail);
+ elbc_fcm_ctrl->index += avail;
if (len > avail)
- dev_err(ctrl->dev,
+ dev_err(priv->dev,
"read_buf beyond end of buffer "
"(%d requested, %d available)\n",
len, avail);
@@ -603,30 +602,32 @@ static int fsl_elbc_verify_buf(struct mtd_info *mtd, const u_char *buf, int len)
{
struct nand_chip *chip = mtd->priv;
struct fsl_elbc_mtd *priv = chip->priv;
- struct fsl_elbc_ctrl *ctrl = priv->ctrl;
+ struct fsl_elbc_fcm_ctrl *elbc_fcm_ctrl = priv->ctrl->nand;
int i;
if (len < 0) {
- dev_err(ctrl->dev, "write_buf of %d bytes", len);
+ dev_err(priv->dev, "write_buf of %d bytes", len);
return -EINVAL;
}
- if ((unsigned int)len > ctrl->read_bytes - ctrl->index) {
- dev_err(ctrl->dev,
- "verify_buf beyond end of buffer "
- "(%d requested, %u available)\n",
- len, ctrl->read_bytes - ctrl->index);
+ if ((unsigned int)len >
+ elbc_fcm_ctrl->read_bytes - elbc_fcm_ctrl->index) {
+ dev_err(priv->dev,
+ "verify_buf beyond end of buffer "
+ "(%d requested, %u available)\n",
+ len, elbc_fcm_ctrl->read_bytes - elbc_fcm_ctrl->index);
- ctrl->index = ctrl->read_bytes;
+ elbc_fcm_ctrl->index = elbc_fcm_ctrl->read_bytes;
return -EINVAL;
}
for (i = 0; i < len; i++)
- if (in_8(&ctrl->addr[ctrl->index + i]) != buf[i])
+ if (in_8(&elbc_fcm_ctrl->addr[elbc_fcm_ctrl->index + i])
+ != buf[i])
break;
- ctrl->index += len;
- return i == len && ctrl->status == LTESR_CC ? 0 : -EIO;
+ elbc_fcm_ctrl->index += len;
+ return i == len && elbc_fcm_ctrl->status == LTESR_CC ? 0 : -EIO;
}
/* This function is called after Program and Erase Operations to
@@ -635,22 +636,22 @@ static int fsl_elbc_verify_buf(struct mtd_info *mtd, const u_char *buf, int len)
static int fsl_elbc_wait(struct mtd_info *mtd, struct nand_chip *chip)
{
struct fsl_elbc_mtd *priv = chip->priv;
- struct fsl_elbc_ctrl *ctrl = priv->ctrl;
+ struct fsl_elbc_fcm_ctrl *elbc_fcm_ctrl = priv->ctrl->nand;
- if (ctrl->status != LTESR_CC)
+ if (elbc_fcm_ctrl->status != LTESR_CC)
return NAND_STATUS_FAIL;
/* The chip always seems to report that it is
* write-protected, even when it is not.
*/
- return (ctrl->mdr & 0xff) | NAND_STATUS_WP;
+ return (elbc_fcm_ctrl->mdr & 0xff) | NAND_STATUS_WP;
}
static int fsl_elbc_chip_init_tail(struct mtd_info *mtd)
{
struct nand_chip *chip = mtd->priv;
struct fsl_elbc_mtd *priv = chip->priv;
- struct fsl_elbc_ctrl *ctrl = priv->ctrl;
+ struct fsl_lbc_ctrl *ctrl = priv->ctrl;
struct fsl_lbc_regs __iomem *lbc = ctrl->regs;
unsigned int al;
@@ -665,41 +666,41 @@ static int fsl_elbc_chip_init_tail(struct mtd_info *mtd)
priv->fmr |= (12 << FMR_CWTO_SHIFT) | /* Timeout > 12 ms */
(al << FMR_AL_SHIFT);
- dev_dbg(ctrl->dev, "fsl_elbc_init: nand->numchips = %d\n",
+ dev_dbg(priv->dev, "fsl_elbc_init: nand->numchips = %d\n",
chip->numchips);
- dev_dbg(ctrl->dev, "fsl_elbc_init: nand->chipsize = %lld\n",
+ dev_dbg(priv->dev, "fsl_elbc_init: nand->chipsize = %lld\n",
chip->chipsize);
- dev_dbg(ctrl->dev, "fsl_elbc_init: nand->pagemask = %8x\n",
+ dev_dbg(priv->dev, "fsl_elbc_init: nand->pagemask = %8x\n",
chip->pagemask);
- dev_dbg(ctrl->dev, "fsl_elbc_init: nand->chip_delay = %d\n",
+ dev_dbg(priv->dev, "fsl_elbc_init: nand->chip_delay = %d\n",
chip->chip_delay);
- dev_dbg(ctrl->dev, "fsl_elbc_init: nand->badblockpos = %d\n",
+ dev_dbg(priv->dev, "fsl_elbc_init: nand->badblockpos = %d\n",
chip->badblockpos);
- dev_dbg(ctrl->dev, "fsl_elbc_init: nand->chip_shift = %d\n",
+ dev_dbg(priv->dev, "fsl_elbc_init: nand->chip_shift = %d\n",
chip->chip_shift);
- dev_dbg(ctrl->dev, "fsl_elbc_init: nand->page_shift = %d\n",
+ dev_dbg(priv->dev, "fsl_elbc_init: nand->page_shift = %d\n",
chip->page_shift);
- dev_dbg(ctrl->dev, "fsl_elbc_init: nand->phys_erase_shift = %d\n",
+ dev_dbg(priv->dev, "fsl_elbc_init: nand->phys_erase_shift = %d\n",
chip->phys_erase_shift);
- dev_dbg(ctrl->dev, "fsl_elbc_init: nand->ecclayout = %p\n",
+ dev_dbg(priv->dev, "fsl_elbc_init: nand->ecclayout = %p\n",
chip->ecclayout);
- dev_dbg(ctrl->dev, "fsl_elbc_init: nand->ecc.mode = %d\n",
+ dev_dbg(priv->dev, "fsl_elbc_init: nand->ecc.mode = %d\n",
chip->ecc.mode);
- dev_dbg(ctrl->dev, "fsl_elbc_init: nand->ecc.steps = %d\n",
+ dev_dbg(priv->dev, "fsl_elbc_init: nand->ecc.steps = %d\n",
chip->ecc.steps);
- dev_dbg(ctrl->dev, "fsl_elbc_init: nand->ecc.bytes = %d\n",
+ dev_dbg(priv->dev, "fsl_elbc_init: nand->ecc.bytes = %d\n",
chip->ecc.bytes);
- dev_dbg(ctrl->dev, "fsl_elbc_init: nand->ecc.total = %d\n",
+ dev_dbg(priv->dev, "fsl_elbc_init: nand->ecc.total = %d\n",
chip->ecc.total);
- dev_dbg(ctrl->dev, "fsl_elbc_init: nand->ecc.layout = %p\n",
+ dev_dbg(priv->dev, "fsl_elbc_init: nand->ecc.layout = %p\n",
chip->ecc.layout);
- dev_dbg(ctrl->dev, "fsl_elbc_init: mtd->flags = %08x\n", mtd->flags);
- dev_dbg(ctrl->dev, "fsl_elbc_init: mtd->size = %lld\n", mtd->size);
- dev_dbg(ctrl->dev, "fsl_elbc_init: mtd->erasesize = %d\n",
+ dev_dbg(priv->dev, "fsl_elbc_init: mtd->flags = %08x\n", mtd->flags);
+ dev_dbg(priv->dev, "fsl_elbc_init: mtd->size = %lld\n", mtd->size);
+ dev_dbg(priv->dev, "fsl_elbc_init: mtd->erasesize = %d\n",
mtd->erasesize);
- dev_dbg(ctrl->dev, "fsl_elbc_init: mtd->writesize = %d\n",
+ dev_dbg(priv->dev, "fsl_elbc_init: mtd->writesize = %d\n",
mtd->writesize);
- dev_dbg(ctrl->dev, "fsl_elbc_init: mtd->oobsize = %d\n",
+ dev_dbg(priv->dev, "fsl_elbc_init: mtd->oobsize = %d\n",
mtd->oobsize);
/* adjust Option Register and ECC to match Flash page size */
@@ -719,7 +720,7 @@ static int fsl_elbc_chip_init_tail(struct mtd_info *mtd)
chip->badblock_pattern = &largepage_memorybased;
}
} else {
- dev_err(ctrl->dev,
+ dev_err(priv->dev,
"fsl_elbc_init: page size %d is not supported\n",
mtd->writesize);
return -1;
@@ -750,18 +751,19 @@ static void fsl_elbc_write_page(struct mtd_info *mtd,
const uint8_t *buf)
{
struct fsl_elbc_mtd *priv = chip->priv;
- struct fsl_elbc_ctrl *ctrl = priv->ctrl;
+ struct fsl_elbc_fcm_ctrl *elbc_fcm_ctrl = priv->ctrl->nand;
fsl_elbc_write_buf(mtd, buf, mtd->writesize);
fsl_elbc_write_buf(mtd, chip->oob_poi, mtd->oobsize);
- ctrl->oob_poi = chip->oob_poi;
+ elbc_fcm_ctrl->oob_poi = chip->oob_poi;
}
static int fsl_elbc_chip_init(struct fsl_elbc_mtd *priv)
{
- struct fsl_elbc_ctrl *ctrl = priv->ctrl;
+ struct fsl_lbc_ctrl *ctrl = priv->ctrl;
struct fsl_lbc_regs __iomem *lbc = ctrl->regs;
+ struct fsl_elbc_fcm_ctrl *elbc_fcm_ctrl = ctrl->nand;
struct nand_chip *chip = &priv->chip;
dev_dbg(priv->dev, "eLBC Set Information for bank %d\n", priv->bank);
@@ -790,7 +792,7 @@ static int fsl_elbc_chip_init(struct fsl_elbc_mtd *priv)
chip->options = NAND_NO_READRDY | NAND_NO_AUTOINCR |
NAND_USE_FLASH_BBT;
- chip->controller = &ctrl->controller;
+ chip->controller = &elbc_fcm_ctrl->controller;
chip->priv = priv;
chip->ecc.read_page = fsl_elbc_read_page;
@@ -815,8 +817,7 @@ static int fsl_elbc_chip_init(struct fsl_elbc_mtd *priv)
static int fsl_elbc_chip_remove(struct fsl_elbc_mtd *priv)
{
- struct fsl_elbc_ctrl *ctrl = priv->ctrl;
-
+ struct fsl_elbc_fcm_ctrl *elbc_fcm_ctrl = priv->ctrl->nand;
nand_release(&priv->mtd);
kfree(priv->mtd.name);
@@ -824,18 +825,21 @@ static int fsl_elbc_chip_remove(struct fsl_elbc_mtd *priv)
if (priv->vbase)
iounmap(priv->vbase);
- ctrl->chips[priv->bank] = NULL;
+ elbc_fcm_ctrl->chips[priv->bank] = NULL;
kfree(priv);
-
+ kfree(elbc_fcm_ctrl);
return 0;
}
-static int __devinit fsl_elbc_chip_probe(struct fsl_elbc_ctrl *ctrl,
- struct device_node *node)
+static DEFINE_MUTEX(fsl_elbc_nand_mutex);
+
+static int __devinit fsl_elbc_nand_probe(struct platform_device *pdev)
{
- struct fsl_lbc_regs __iomem *lbc = ctrl->regs;
+ struct fsl_lbc_regs __iomem *lbc;
struct fsl_elbc_mtd *priv;
struct resource res;
+ struct fsl_elbc_fcm_ctrl *elbc_fcm_ctrl;
+
#ifdef CONFIG_MTD_PARTITIONS
static const char *part_probe_types[]
= { "cmdlinepart", "RedBoot", NULL };
@@ -843,11 +847,18 @@ static int __devinit fsl_elbc_chip_probe(struct fsl_elbc_ctrl *ctrl,
#endif
int ret;
int bank;
+ struct device *dev;
+ struct device_node *node = pdev->dev.of_node;
+
+ if (!fsl_lbc_ctrl_dev || !fsl_lbc_ctrl_dev->regs)
+ return -ENODEV;
+ lbc = fsl_lbc_ctrl_dev->regs;
+ dev = fsl_lbc_ctrl_dev->dev;
/* get, allocate and map the memory resource */
ret = of_address_to_resource(node, 0, &res);
if (ret) {
- dev_err(ctrl->dev, "failed to get resource\n");
+ dev_err(dev, "failed to get resource\n");
return ret;
}
@@ -857,11 +868,11 @@ static int __devinit fsl_elbc_chip_probe(struct fsl_elbc_ctrl *ctrl,
(in_be32(&lbc->bank[bank].br) & BR_MSEL) == BR_MS_FCM &&
(in_be32(&lbc->bank[bank].br) &
in_be32(&lbc->bank[bank].or) & BR_BA)
- == res.start)
+ == fsl_lbc_addr(res.start))
break;
if (bank >= MAX_BANKS) {
- dev_err(ctrl->dev, "address did not match any chip selects\n");
+ dev_err(dev, "address did not match any chip selects\n");
return -ENODEV;
}
@@ -869,14 +880,33 @@ static int __devinit fsl_elbc_chip_probe(struct fsl_elbc_ctrl *ctrl,
if (!priv)
return -ENOMEM;
- ctrl->chips[bank] = priv;
+ mutex_lock(&fsl_elbc_nand_mutex);
+ if (!fsl_lbc_ctrl_dev->nand) {
+ elbc_fcm_ctrl = kzalloc(sizeof(*elbc_fcm_ctrl), GFP_KERNEL);
+ if (!elbc_fcm_ctrl) {
+ dev_err(dev, "failed to allocate memory\n");
+ mutex_unlock(&fsl_elbc_nand_mutex);
+ ret = -ENOMEM;
+ goto err;
+ }
+ elbc_fcm_ctrl->counter++;
+
+ spin_lock_init(&elbc_fcm_ctrl->controller.lock);
+ init_waitqueue_head(&elbc_fcm_ctrl->controller.wq);
+ fsl_lbc_ctrl_dev->nand = elbc_fcm_ctrl;
+ } else {
+ elbc_fcm_ctrl = fsl_lbc_ctrl_dev->nand;
+ }
+ mutex_unlock(&fsl_elbc_nand_mutex);
+
+ elbc_fcm_ctrl->chips[bank] = priv;
priv->bank = bank;
- priv->ctrl = ctrl;
- priv->dev = ctrl->dev;
+ priv->ctrl = fsl_lbc_ctrl_dev;
+ priv->dev = dev;
priv->vbase = ioremap(res.start, resource_size(&res));
if (!priv->vbase) {
- dev_err(ctrl->dev, "failed to map chip region\n");
+ dev_err(dev, "failed to map chip region\n");
ret = -ENOMEM;
goto err;
}
@@ -933,171 +963,53 @@ err:
return ret;
}
-static int __devinit fsl_elbc_ctrl_init(struct fsl_elbc_ctrl *ctrl)
+static int fsl_elbc_nand_remove(struct platform_device *pdev)
{
- struct fsl_lbc_regs __iomem *lbc = ctrl->regs;
-
- /*
- * NAND transactions can tie up the bus for a long time, so set the
- * bus timeout to max by clearing LBCR[BMT] (highest base counter
- * value) and setting LBCR[BMTPS] to the highest prescaler value.
- */
- clrsetbits_be32(&lbc->lbcr, LBCR_BMT, 15);
-
- /* clear event registers */
- setbits32(&lbc->ltesr, LTESR_NAND_MASK);
- out_be32(&lbc->lteatr, 0);
-
- /* Enable interrupts for any detected events */
- out_be32(&lbc->lteir, LTESR_NAND_MASK);
-
- ctrl->read_bytes = 0;
- ctrl->index = 0;
- ctrl->addr = NULL;
-
- return 0;
-}
-
-static int fsl_elbc_ctrl_remove(struct platform_device *ofdev)
-{
- struct fsl_elbc_ctrl *ctrl = dev_get_drvdata(&ofdev->dev);
int i;
-
+ struct fsl_elbc_fcm_ctrl *elbc_fcm_ctrl = fsl_lbc_ctrl_dev->nand;
for (i = 0; i < MAX_BANKS; i++)
- if (ctrl->chips[i])
- fsl_elbc_chip_remove(ctrl->chips[i]);
-
- if (ctrl->irq)
- free_irq(ctrl->irq, ctrl);
-
- if (ctrl->regs)
- iounmap(ctrl->regs);
-
- dev_set_drvdata(&ofdev->dev, NULL);
- kfree(ctrl);
- return 0;
-}
-
-/* NOTE: This interrupt is also used to report other localbus events,
- * such as transaction errors on other chipselects. If we want to
- * capture those, we'll need to move the IRQ code into a shared
- * LBC driver.
- */
-
-static irqreturn_t fsl_elbc_ctrl_irq(int irqno, void *data)
-{
- struct fsl_elbc_ctrl *ctrl = data;
- struct fsl_lbc_regs __iomem *lbc = ctrl->regs;
- __be32 status = in_be32(&lbc->ltesr) & LTESR_NAND_MASK;
-
- if (status) {
- out_be32(&lbc->ltesr, status);
- out_be32(&lbc->lteatr, 0);
-
- ctrl->irq_status = status;
- smp_wmb();
- wake_up(&ctrl->irq_wait);
-
- return IRQ_HANDLED;
+ if (elbc_fcm_ctrl->chips[i])
+ fsl_elbc_chip_remove(elbc_fcm_ctrl->chips[i]);
+
+ mutex_lock(&fsl_elbc_nand_mutex);
+ elbc_fcm_ctrl->counter--;
+ if (!elbc_fcm_ctrl->counter) {
+ fsl_lbc_ctrl_dev->nand = NULL;
+ kfree(elbc_fcm_ctrl);
}
-
- return IRQ_NONE;
-}
-
-/* fsl_elbc_ctrl_probe
- *
- * called by device layer when it finds a device matching
- * one our driver can handled. This code allocates all of
- * the resources needed for the controller only. The
- * resources for the NAND banks themselves are allocated
- * in the chip probe function.
-*/
-
-static int __devinit fsl_elbc_ctrl_probe(struct platform_device *ofdev,
- const struct of_device_id *match)
-{
- struct device_node *child;
- struct fsl_elbc_ctrl *ctrl;
- int ret;
-
- ctrl = kzalloc(sizeof(*ctrl), GFP_KERNEL);
- if (!ctrl)
- return -ENOMEM;
-
- dev_set_drvdata(&ofdev->dev, ctrl);
-
- spin_lock_init(&ctrl->controller.lock);
- init_waitqueue_head(&ctrl->controller.wq);
- init_waitqueue_head(&ctrl->irq_wait);
-
- ctrl->regs = of_iomap(ofdev->dev.of_node, 0);
- if (!ctrl->regs) {
- dev_err(&ofdev->dev, "failed to get memory region\n");
- ret = -ENODEV;
- goto err;
- }
-
- ctrl->irq = of_irq_to_resource(ofdev->dev.of_node, 0, NULL);
- if (ctrl->irq == NO_IRQ) {
- dev_err(&ofdev->dev, "failed to get irq resource\n");
- ret = -ENODEV;
- goto err;
- }
-
- ctrl->dev = &ofdev->dev;
-
- ret = fsl_elbc_ctrl_init(ctrl);
- if (ret < 0)
- goto err;
-
- ret = request_irq(ctrl->irq, fsl_elbc_ctrl_irq, 0, "fsl-elbc", ctrl);
- if (ret != 0) {
- dev_err(&ofdev->dev, "failed to install irq (%d)\n",
- ctrl->irq);
- ret = ctrl->irq;
- goto err;
- }
-
- for_each_child_of_node(ofdev->dev.of_node, child)
- if (of_device_is_compatible(child, "fsl,elbc-fcm-nand"))
- fsl_elbc_chip_probe(ctrl, child);
+ mutex_unlock(&fsl_elbc_nand_mutex);
return 0;
-err:
- fsl_elbc_ctrl_remove(ofdev);
- return ret;
}
-static const struct of_device_id fsl_elbc_match[] = {
- {
- .compatible = "fsl,elbc",
- },
+static const struct of_device_id fsl_elbc_nand_match[] = {
+ { .compatible = "fsl,elbc-fcm-nand", },
{}
};
-static struct of_platform_driver fsl_elbc_ctrl_driver = {
+static struct platform_driver fsl_elbc_nand_driver = {
.driver = {
- .name = "fsl-elbc",
+ .name = "fsl,elbc-fcm-nand",
.owner = THIS_MODULE,
- .of_match_table = fsl_elbc_match,
+ .of_match_table = fsl_elbc_nand_match,
},
- .probe = fsl_elbc_ctrl_probe,
- .remove = fsl_elbc_ctrl_remove,
+ .probe = fsl_elbc_nand_probe,
+ .remove = fsl_elbc_nand_remove,
};
-static int __init fsl_elbc_init(void)
+static int __init fsl_elbc_nand_init(void)
{
- return of_register_platform_driver(&fsl_elbc_ctrl_driver);
+ return platform_driver_register(&fsl_elbc_nand_driver);
}
-static void __exit fsl_elbc_exit(void)
+static void __exit fsl_elbc_nand_exit(void)
{
- of_unregister_platform_driver(&fsl_elbc_ctrl_driver);
+ platform_driver_unregister(&fsl_elbc_nand_driver);
}
-module_init(fsl_elbc_init);
-module_exit(fsl_elbc_exit);
+module_init(fsl_elbc_nand_init);
+module_exit(fsl_elbc_nand_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Freescale");
diff --git a/drivers/mtd/nand/fsl_upm.c b/drivers/mtd/nand/fsl_upm.c
index 4eff8b25e5af..efdcca94ce55 100644
--- a/drivers/mtd/nand/fsl_upm.c
+++ b/drivers/mtd/nand/fsl_upm.c
@@ -186,7 +186,7 @@ static int __devinit fun_chip_init(struct fsl_upm_nand *fun,
if (!flash_np)
return -ENODEV;
- fun->mtd.name = kasprintf(GFP_KERNEL, "%x.%s", io_res->start,
+ fun->mtd.name = kasprintf(GFP_KERNEL, "0x%llx.%s", (u64)io_res->start,
flash_np->name);
if (!fun->mtd.name) {
ret = -ENOMEM;
@@ -222,7 +222,7 @@ static int __devinit fun_probe(struct platform_device *ofdev,
{
struct fsl_upm_nand *fun;
struct resource io_res;
- const uint32_t *prop;
+ const __be32 *prop;
int rnb_gpio;
int ret;
int size;
@@ -270,7 +270,7 @@ static int __devinit fun_probe(struct platform_device *ofdev,
goto err1;
}
for (i = 0; i < fun->mchip_count; i++)
- fun->mchip_offsets[i] = prop[i];
+ fun->mchip_offsets[i] = be32_to_cpu(prop[i]);
} else {
fun->mchip_count = 1;
}
@@ -295,13 +295,13 @@ static int __devinit fun_probe(struct platform_device *ofdev,
prop = of_get_property(ofdev->dev.of_node, "chip-delay", NULL);
if (prop)
- fun->chip_delay = *prop;
+ fun->chip_delay = be32_to_cpup(prop);
else
fun->chip_delay = 50;
prop = of_get_property(ofdev->dev.of_node, "fsl,upm-wait-flags", &size);
if (prop && size == sizeof(uint32_t))
- fun->wait_flags = *prop;
+ fun->wait_flags = be32_to_cpup(prop);
else
fun->wait_flags = FSL_UPM_WAIT_RUN_PATTERN |
FSL_UPM_WAIT_WRITE_BYTE;
diff --git a/drivers/mtd/nand/fsmc_nand.c b/drivers/mtd/nand/fsmc_nand.c
new file mode 100644
index 000000000000..02edfba25b0c
--- /dev/null
+++ b/drivers/mtd/nand/fsmc_nand.c
@@ -0,0 +1,866 @@
+/*
+ * drivers/mtd/nand/fsmc_nand.c
+ *
+ * ST Microelectronics
+ * Flexible Static Memory Controller (FSMC)
+ * Driver for NAND portions
+ *
+ * Copyright © 2010 ST Microelectronics
+ * Vipin Kumar <vipin.kumar@st.com>
+ * Ashish Priyadarshi
+ *
+ * Based on drivers/mtd/nand/nomadik_nand.c
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/resource.h>
+#include <linux/sched.h>
+#include <linux/types.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/nand.h>
+#include <linux/mtd/nand_ecc.h>
+#include <linux/platform_device.h>
+#include <linux/mtd/partitions.h>
+#include <linux/io.h>
+#include <linux/slab.h>
+#include <linux/mtd/fsmc.h>
+#include <mtd/mtd-abi.h>
+
+static struct nand_ecclayout fsmc_ecc1_layout = {
+ .eccbytes = 24,
+ .eccpos = {2, 3, 4, 18, 19, 20, 34, 35, 36, 50, 51, 52,
+ 66, 67, 68, 82, 83, 84, 98, 99, 100, 114, 115, 116},
+ .oobfree = {
+ {.offset = 8, .length = 8},
+ {.offset = 24, .length = 8},
+ {.offset = 40, .length = 8},
+ {.offset = 56, .length = 8},
+ {.offset = 72, .length = 8},
+ {.offset = 88, .length = 8},
+ {.offset = 104, .length = 8},
+ {.offset = 120, .length = 8}
+ }
+};
+
+static struct nand_ecclayout fsmc_ecc4_lp_layout = {
+ .eccbytes = 104,
+ .eccpos = { 2, 3, 4, 5, 6, 7, 8,
+ 9, 10, 11, 12, 13, 14,
+ 18, 19, 20, 21, 22, 23, 24,
+ 25, 26, 27, 28, 29, 30,
+ 34, 35, 36, 37, 38, 39, 40,
+ 41, 42, 43, 44, 45, 46,
+ 50, 51, 52, 53, 54, 55, 56,
+ 57, 58, 59, 60, 61, 62,
+ 66, 67, 68, 69, 70, 71, 72,
+ 73, 74, 75, 76, 77, 78,
+ 82, 83, 84, 85, 86, 87, 88,
+ 89, 90, 91, 92, 93, 94,
+ 98, 99, 100, 101, 102, 103, 104,
+ 105, 106, 107, 108, 109, 110,
+ 114, 115, 116, 117, 118, 119, 120,
+ 121, 122, 123, 124, 125, 126
+ },
+ .oobfree = {
+ {.offset = 15, .length = 3},
+ {.offset = 31, .length = 3},
+ {.offset = 47, .length = 3},
+ {.offset = 63, .length = 3},
+ {.offset = 79, .length = 3},
+ {.offset = 95, .length = 3},
+ {.offset = 111, .length = 3},
+ {.offset = 127, .length = 1}
+ }
+};
+
+/*
+ * ECC placement definitions in oobfree type format.
+ * There are 13 bytes of ecc for every 512 byte block and it has to be read
+ * consecutively and immediately after the 512 byte data block for hardware to
+ * generate the error bit offsets in 512 byte data.
+ * Managing the ecc bytes in the following way makes it easier for software to
+ * read ecc bytes consecutive to data bytes. This way is similar to
+ * oobfree structure maintained already in generic nand driver
+ */
+static struct fsmc_eccplace fsmc_ecc4_lp_place = {
+ .eccplace = {
+ {.offset = 2, .length = 13},
+ {.offset = 18, .length = 13},
+ {.offset = 34, .length = 13},
+ {.offset = 50, .length = 13},
+ {.offset = 66, .length = 13},
+ {.offset = 82, .length = 13},
+ {.offset = 98, .length = 13},
+ {.offset = 114, .length = 13}
+ }
+};
+
+static struct nand_ecclayout fsmc_ecc4_sp_layout = {
+ .eccbytes = 13,
+ .eccpos = { 0, 1, 2, 3, 6, 7, 8,
+ 9, 10, 11, 12, 13, 14
+ },
+ .oobfree = {
+ {.offset = 15, .length = 1},
+ }
+};
+
+static struct fsmc_eccplace fsmc_ecc4_sp_place = {
+ .eccplace = {
+ {.offset = 0, .length = 4},
+ {.offset = 6, .length = 9}
+ }
+};
+
+/*
+ * Default partition tables to be used if the partition information not
+ * provided through platform data
+ */
+#define PARTITION(n, off, sz) {.name = n, .offset = off, .size = sz}
+
+/*
+ * Default partition layout for small page(= 512 bytes) devices
+ * Size for "Root file system" is updated in driver based on actual device size
+ */
+static struct mtd_partition partition_info_16KB_blk[] = {
+ PARTITION("X-loader", 0, 4 * 0x4000),
+ PARTITION("U-Boot", 0x10000, 20 * 0x4000),
+ PARTITION("Kernel", 0x60000, 256 * 0x4000),
+ PARTITION("Root File System", 0x460000, 0),
+};
+
+/*
+ * Default partition layout for large page(> 512 bytes) devices
+ * Size for "Root file system" is updated in driver based on actual device size
+ */
+static struct mtd_partition partition_info_128KB_blk[] = {
+ PARTITION("X-loader", 0, 4 * 0x20000),
+ PARTITION("U-Boot", 0x80000, 12 * 0x20000),
+ PARTITION("Kernel", 0x200000, 48 * 0x20000),
+ PARTITION("Root File System", 0x800000, 0),
+};
+
+#ifdef CONFIG_MTD_CMDLINE_PARTS
+const char *part_probes[] = { "cmdlinepart", NULL };
+#endif
+
+/**
+ * struct fsmc_nand_data - atructure for FSMC NAND device state
+ *
+ * @mtd: MTD info for a NAND flash.
+ * @nand: Chip related info for a NAND flash.
+ * @partitions: Partition info for a NAND Flash.
+ * @nr_partitions: Total number of partition of a NAND flash.
+ *
+ * @ecc_place: ECC placing locations in oobfree type format.
+ * @bank: Bank number for probed device.
+ * @clk: Clock structure for FSMC.
+ *
+ * @data_va: NAND port for Data.
+ * @cmd_va: NAND port for Command.
+ * @addr_va: NAND port for Address.
+ * @regs_va: FSMC regs base address.
+ */
+struct fsmc_nand_data {
+ struct mtd_info mtd;
+ struct nand_chip nand;
+ struct mtd_partition *partitions;
+ unsigned int nr_partitions;
+
+ struct fsmc_eccplace *ecc_place;
+ unsigned int bank;
+ struct clk *clk;
+
+ struct resource *resregs;
+ struct resource *rescmd;
+ struct resource *resaddr;
+ struct resource *resdata;
+
+ void __iomem *data_va;
+ void __iomem *cmd_va;
+ void __iomem *addr_va;
+ void __iomem *regs_va;
+
+ void (*select_chip)(uint32_t bank, uint32_t busw);
+};
+
+/* Assert CS signal based on chipnr */
+static void fsmc_select_chip(struct mtd_info *mtd, int chipnr)
+{
+ struct nand_chip *chip = mtd->priv;
+ struct fsmc_nand_data *host;
+
+ host = container_of(mtd, struct fsmc_nand_data, mtd);
+
+ switch (chipnr) {
+ case -1:
+ chip->cmd_ctrl(mtd, NAND_CMD_NONE, 0 | NAND_CTRL_CHANGE);
+ break;
+ case 0:
+ case 1:
+ case 2:
+ case 3:
+ if (host->select_chip)
+ host->select_chip(chipnr,
+ chip->options & NAND_BUSWIDTH_16);
+ break;
+
+ default:
+ BUG();
+ }
+}
+
+/*
+ * fsmc_cmd_ctrl - For facilitaing Hardware access
+ * This routine allows hardware specific access to control-lines(ALE,CLE)
+ */
+static void fsmc_cmd_ctrl(struct mtd_info *mtd, int cmd, unsigned int ctrl)
+{
+ struct nand_chip *this = mtd->priv;
+ struct fsmc_nand_data *host = container_of(mtd,
+ struct fsmc_nand_data, mtd);
+ struct fsmc_regs *regs = host->regs_va;
+ unsigned int bank = host->bank;
+
+ if (ctrl & NAND_CTRL_CHANGE) {
+ if (ctrl & NAND_CLE) {
+ this->IO_ADDR_R = (void __iomem *)host->cmd_va;
+ this->IO_ADDR_W = (void __iomem *)host->cmd_va;
+ } else if (ctrl & NAND_ALE) {
+ this->IO_ADDR_R = (void __iomem *)host->addr_va;
+ this->IO_ADDR_W = (void __iomem *)host->addr_va;
+ } else {
+ this->IO_ADDR_R = (void __iomem *)host->data_va;
+ this->IO_ADDR_W = (void __iomem *)host->data_va;
+ }
+
+ if (ctrl & NAND_NCE) {
+ writel(readl(&regs->bank_regs[bank].pc) | FSMC_ENABLE,
+ &regs->bank_regs[bank].pc);
+ } else {
+ writel(readl(&regs->bank_regs[bank].pc) & ~FSMC_ENABLE,
+ &regs->bank_regs[bank].pc);
+ }
+ }
+
+ mb();
+
+ if (cmd != NAND_CMD_NONE)
+ writeb(cmd, this->IO_ADDR_W);
+}
+
+/*
+ * fsmc_nand_setup - FSMC (Flexible Static Memory Controller) init routine
+ *
+ * This routine initializes timing parameters related to NAND memory access in
+ * FSMC registers
+ */
+static void __init fsmc_nand_setup(struct fsmc_regs *regs, uint32_t bank,
+ uint32_t busw)
+{
+ uint32_t value = FSMC_DEVTYPE_NAND | FSMC_ENABLE | FSMC_WAITON;
+
+ if (busw)
+ writel(value | FSMC_DEVWID_16, &regs->bank_regs[bank].pc);
+ else
+ writel(value | FSMC_DEVWID_8, &regs->bank_regs[bank].pc);
+
+ writel(readl(&regs->bank_regs[bank].pc) | FSMC_TCLR_1 | FSMC_TAR_1,
+ &regs->bank_regs[bank].pc);
+ writel(FSMC_THIZ_1 | FSMC_THOLD_4 | FSMC_TWAIT_6 | FSMC_TSET_0,
+ &regs->bank_regs[bank].comm);
+ writel(FSMC_THIZ_1 | FSMC_THOLD_4 | FSMC_TWAIT_6 | FSMC_TSET_0,
+ &regs->bank_regs[bank].attrib);
+}
+
+/*
+ * fsmc_enable_hwecc - Enables Hardware ECC through FSMC registers
+ */
+static void fsmc_enable_hwecc(struct mtd_info *mtd, int mode)
+{
+ struct fsmc_nand_data *host = container_of(mtd,
+ struct fsmc_nand_data, mtd);
+ struct fsmc_regs *regs = host->regs_va;
+ uint32_t bank = host->bank;
+
+ writel(readl(&regs->bank_regs[bank].pc) & ~FSMC_ECCPLEN_256,
+ &regs->bank_regs[bank].pc);
+ writel(readl(&regs->bank_regs[bank].pc) & ~FSMC_ECCEN,
+ &regs->bank_regs[bank].pc);
+ writel(readl(&regs->bank_regs[bank].pc) | FSMC_ECCEN,
+ &regs->bank_regs[bank].pc);
+}
+
+/*
+ * fsmc_read_hwecc_ecc4 - Hardware ECC calculator for ecc4 option supported by
+ * FSMC. ECC is 13 bytes for 512 bytes of data (supports error correction upto
+ * max of 8-bits)
+ */
+static int fsmc_read_hwecc_ecc4(struct mtd_info *mtd, const uint8_t *data,
+ uint8_t *ecc)
+{
+ struct fsmc_nand_data *host = container_of(mtd,
+ struct fsmc_nand_data, mtd);
+ struct fsmc_regs *regs = host->regs_va;
+ uint32_t bank = host->bank;
+ uint32_t ecc_tmp;
+ unsigned long deadline = jiffies + FSMC_BUSY_WAIT_TIMEOUT;
+
+ do {
+ if (readl(&regs->bank_regs[bank].sts) & FSMC_CODE_RDY)
+ break;
+ else
+ cond_resched();
+ } while (!time_after_eq(jiffies, deadline));
+
+ ecc_tmp = readl(&regs->bank_regs[bank].ecc1);
+ ecc[0] = (uint8_t) (ecc_tmp >> 0);
+ ecc[1] = (uint8_t) (ecc_tmp >> 8);
+ ecc[2] = (uint8_t) (ecc_tmp >> 16);
+ ecc[3] = (uint8_t) (ecc_tmp >> 24);
+
+ ecc_tmp = readl(&regs->bank_regs[bank].ecc2);
+ ecc[4] = (uint8_t) (ecc_tmp >> 0);
+ ecc[5] = (uint8_t) (ecc_tmp >> 8);
+ ecc[6] = (uint8_t) (ecc_tmp >> 16);
+ ecc[7] = (uint8_t) (ecc_tmp >> 24);
+
+ ecc_tmp = readl(&regs->bank_regs[bank].ecc3);
+ ecc[8] = (uint8_t) (ecc_tmp >> 0);
+ ecc[9] = (uint8_t) (ecc_tmp >> 8);
+ ecc[10] = (uint8_t) (ecc_tmp >> 16);
+ ecc[11] = (uint8_t) (ecc_tmp >> 24);
+
+ ecc_tmp = readl(&regs->bank_regs[bank].sts);
+ ecc[12] = (uint8_t) (ecc_tmp >> 16);
+
+ return 0;
+}
+
+/*
+ * fsmc_read_hwecc_ecc1 - Hardware ECC calculator for ecc1 option supported by
+ * FSMC. ECC is 3 bytes for 512 bytes of data (supports error correction upto
+ * max of 1-bit)
+ */
+static int fsmc_read_hwecc_ecc1(struct mtd_info *mtd, const uint8_t *data,
+ uint8_t *ecc)
+{
+ struct fsmc_nand_data *host = container_of(mtd,
+ struct fsmc_nand_data, mtd);
+ struct fsmc_regs *regs = host->regs_va;
+ uint32_t bank = host->bank;
+ uint32_t ecc_tmp;
+
+ ecc_tmp = readl(&regs->bank_regs[bank].ecc1);
+ ecc[0] = (uint8_t) (ecc_tmp >> 0);
+ ecc[1] = (uint8_t) (ecc_tmp >> 8);
+ ecc[2] = (uint8_t) (ecc_tmp >> 16);
+
+ return 0;
+}
+
+/*
+ * fsmc_read_page_hwecc
+ * @mtd: mtd info structure
+ * @chip: nand chip info structure
+ * @buf: buffer to store read data
+ * @page: page number to read
+ *
+ * This routine is needed for fsmc verison 8 as reading from NAND chip has to be
+ * performed in a strict sequence as follows:
+ * data(512 byte) -> ecc(13 byte)
+ * After this read, fsmc hardware generates and reports error data bits(upto a
+ * max of 8 bits)
+ */
+static int fsmc_read_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip,
+ uint8_t *buf, int page)
+{
+ struct fsmc_nand_data *host = container_of(mtd,
+ struct fsmc_nand_data, mtd);
+ struct fsmc_eccplace *ecc_place = host->ecc_place;
+ int i, j, s, stat, eccsize = chip->ecc.size;
+ int eccbytes = chip->ecc.bytes;
+ int eccsteps = chip->ecc.steps;
+ uint8_t *p = buf;
+ uint8_t *ecc_calc = chip->buffers->ecccalc;
+ uint8_t *ecc_code = chip->buffers->ecccode;
+ int off, len, group = 0;
+ /*
+ * ecc_oob is intentionally taken as uint16_t. In 16bit devices, we
+ * end up reading 14 bytes (7 words) from oob. The local array is
+ * to maintain word alignment
+ */
+ uint16_t ecc_oob[7];
+ uint8_t *oob = (uint8_t *)&ecc_oob[0];
+
+ for (i = 0, s = 0; s < eccsteps; s++, i += eccbytes, p += eccsize) {
+
+ chip->cmdfunc(mtd, NAND_CMD_READ0, s * eccsize, page);
+ chip->ecc.hwctl(mtd, NAND_ECC_READ);
+ chip->read_buf(mtd, p, eccsize);
+
+ for (j = 0; j < eccbytes;) {
+ off = ecc_place->eccplace[group].offset;
+ len = ecc_place->eccplace[group].length;
+ group++;
+
+ /*
+ * length is intentionally kept a higher multiple of 2
+ * to read at least 13 bytes even in case of 16 bit NAND
+ * devices
+ */
+ len = roundup(len, 2);
+ chip->cmdfunc(mtd, NAND_CMD_READOOB, off, page);
+ chip->read_buf(mtd, oob + j, len);
+ j += len;
+ }
+
+ memcpy(&ecc_code[i], oob, 13);
+ chip->ecc.calculate(mtd, p, &ecc_calc[i]);
+
+ stat = chip->ecc.correct(mtd, p, &ecc_code[i], &ecc_calc[i]);
+ if (stat < 0)
+ mtd->ecc_stats.failed++;
+ else
+ mtd->ecc_stats.corrected += stat;
+ }
+
+ return 0;
+}
+
+/*
+ * fsmc_correct_data
+ * @mtd: mtd info structure
+ * @dat: buffer of read data
+ * @read_ecc: ecc read from device spare area
+ * @calc_ecc: ecc calculated from read data
+ *
+ * calc_ecc is a 104 bit information containing maximum of 8 error
+ * offset informations of 13 bits each in 512 bytes of read data.
+ */
+static int fsmc_correct_data(struct mtd_info *mtd, uint8_t *dat,
+ uint8_t *read_ecc, uint8_t *calc_ecc)
+{
+ struct fsmc_nand_data *host = container_of(mtd,
+ struct fsmc_nand_data, mtd);
+ struct fsmc_regs *regs = host->regs_va;
+ unsigned int bank = host->bank;
+ uint16_t err_idx[8];
+ uint64_t ecc_data[2];
+ uint32_t num_err, i;
+
+ /* The calculated ecc is actually the correction index in data */
+ memcpy(ecc_data, calc_ecc, 13);
+
+ /*
+ * ------------------- calc_ecc[] bit wise -----------|--13 bits--|
+ * |---idx[7]--|--.....-----|---idx[2]--||---idx[1]--||---idx[0]--|
+ *
+ * calc_ecc is a 104 bit information containing maximum of 8 error
+ * offset informations of 13 bits each. calc_ecc is copied into a
+ * uint64_t array and error offset indexes are populated in err_idx
+ * array
+ */
+ for (i = 0; i < 8; i++) {
+ if (i == 4) {
+ err_idx[4] = ((ecc_data[1] & 0x1) << 12) | ecc_data[0];
+ ecc_data[1] >>= 1;
+ continue;
+ }
+ err_idx[i] = (ecc_data[i/4] & 0x1FFF);
+ ecc_data[i/4] >>= 13;
+ }
+
+ num_err = (readl(&regs->bank_regs[bank].sts) >> 10) & 0xF;
+
+ if (num_err == 0xF)
+ return -EBADMSG;
+
+ i = 0;
+ while (num_err--) {
+ change_bit(0, (unsigned long *)&err_idx[i]);
+ change_bit(1, (unsigned long *)&err_idx[i]);
+
+ if (err_idx[i] <= 512 * 8) {
+ change_bit(err_idx[i], (unsigned long *)dat);
+ i++;
+ }
+ }
+ return i;
+}
+
+/*
+ * fsmc_nand_probe - Probe function
+ * @pdev: platform device structure
+ */
+static int __init fsmc_nand_probe(struct platform_device *pdev)
+{
+ struct fsmc_nand_platform_data *pdata = dev_get_platdata(&pdev->dev);
+ struct fsmc_nand_data *host;
+ struct mtd_info *mtd;
+ struct nand_chip *nand;
+ struct fsmc_regs *regs;
+ struct resource *res;
+ int nr_parts, ret = 0;
+
+ if (!pdata) {
+ dev_err(&pdev->dev, "platform data is NULL\n");
+ return -EINVAL;
+ }
+
+ /* Allocate memory for the device structure (and zero it) */
+ host = kzalloc(sizeof(*host), GFP_KERNEL);
+ if (!host) {
+ dev_err(&pdev->dev, "failed to allocate device structure\n");
+ return -ENOMEM;
+ }
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "nand_data");
+ if (!res) {
+ ret = -EIO;
+ goto err_probe1;
+ }
+
+ host->resdata = request_mem_region(res->start, resource_size(res),
+ pdev->name);
+ if (!host->resdata) {
+ ret = -EIO;
+ goto err_probe1;
+ }
+
+ host->data_va = ioremap(res->start, resource_size(res));
+ if (!host->data_va) {
+ ret = -EIO;
+ goto err_probe1;
+ }
+
+ host->resaddr = request_mem_region(res->start + PLAT_NAND_ALE,
+ resource_size(res), pdev->name);
+ if (!host->resaddr) {
+ ret = -EIO;
+ goto err_probe1;
+ }
+
+ host->addr_va = ioremap(res->start + PLAT_NAND_ALE, resource_size(res));
+ if (!host->addr_va) {
+ ret = -EIO;
+ goto err_probe1;
+ }
+
+ host->rescmd = request_mem_region(res->start + PLAT_NAND_CLE,
+ resource_size(res), pdev->name);
+ if (!host->rescmd) {
+ ret = -EIO;
+ goto err_probe1;
+ }
+
+ host->cmd_va = ioremap(res->start + PLAT_NAND_CLE, resource_size(res));
+ if (!host->cmd_va) {
+ ret = -EIO;
+ goto err_probe1;
+ }
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "fsmc_regs");
+ if (!res) {
+ ret = -EIO;
+ goto err_probe1;
+ }
+
+ host->resregs = request_mem_region(res->start, resource_size(res),
+ pdev->name);
+ if (!host->resregs) {
+ ret = -EIO;
+ goto err_probe1;
+ }
+
+ host->regs_va = ioremap(res->start, resource_size(res));
+ if (!host->regs_va) {
+ ret = -EIO;
+ goto err_probe1;
+ }
+
+ host->clk = clk_get(&pdev->dev, NULL);
+ if (IS_ERR(host->clk)) {
+ dev_err(&pdev->dev, "failed to fetch block clock\n");
+ ret = PTR_ERR(host->clk);
+ host->clk = NULL;
+ goto err_probe1;
+ }
+
+ ret = clk_enable(host->clk);
+ if (ret)
+ goto err_probe1;
+
+ host->bank = pdata->bank;
+ host->select_chip = pdata->select_bank;
+ regs = host->regs_va;
+
+ /* Link all private pointers */
+ mtd = &host->mtd;
+ nand = &host->nand;
+ mtd->priv = nand;
+ nand->priv = host;
+
+ host->mtd.owner = THIS_MODULE;
+ nand->IO_ADDR_R = host->data_va;
+ nand->IO_ADDR_W = host->data_va;
+ nand->cmd_ctrl = fsmc_cmd_ctrl;
+ nand->chip_delay = 30;
+
+ nand->ecc.mode = NAND_ECC_HW;
+ nand->ecc.hwctl = fsmc_enable_hwecc;
+ nand->ecc.size = 512;
+ nand->options = pdata->options;
+ nand->select_chip = fsmc_select_chip;
+
+ if (pdata->width == FSMC_NAND_BW16)
+ nand->options |= NAND_BUSWIDTH_16;
+
+ fsmc_nand_setup(regs, host->bank, nand->options & NAND_BUSWIDTH_16);
+
+ if (get_fsmc_version(host->regs_va) == FSMC_VER8) {
+ nand->ecc.read_page = fsmc_read_page_hwecc;
+ nand->ecc.calculate = fsmc_read_hwecc_ecc4;
+ nand->ecc.correct = fsmc_correct_data;
+ nand->ecc.bytes = 13;
+ } else {
+ nand->ecc.calculate = fsmc_read_hwecc_ecc1;
+ nand->ecc.correct = nand_correct_data;
+ nand->ecc.bytes = 3;
+ }
+
+ /*
+ * Scan to find existance of the device
+ */
+ if (nand_scan_ident(&host->mtd, 1, NULL)) {
+ ret = -ENXIO;
+ dev_err(&pdev->dev, "No NAND Device found!\n");
+ goto err_probe;
+ }
+
+ if (get_fsmc_version(host->regs_va) == FSMC_VER8) {
+ if (host->mtd.writesize == 512) {
+ nand->ecc.layout = &fsmc_ecc4_sp_layout;
+ host->ecc_place = &fsmc_ecc4_sp_place;
+ } else {
+ nand->ecc.layout = &fsmc_ecc4_lp_layout;
+ host->ecc_place = &fsmc_ecc4_lp_place;
+ }
+ } else {
+ nand->ecc.layout = &fsmc_ecc1_layout;
+ }
+
+ /* Second stage of scan to fill MTD data-structures */
+ if (nand_scan_tail(&host->mtd)) {
+ ret = -ENXIO;
+ goto err_probe;
+ }
+
+ /*
+ * The partition information can is accessed by (in the same precedence)
+ *
+ * command line through Bootloader,
+ * platform data,
+ * default partition information present in driver.
+ */
+#ifdef CONFIG_MTD_PARTITIONS
+#ifdef CONFIG_MTD_CMDLINE_PARTS
+ /*
+ * Check if partition info passed via command line
+ */
+ host->mtd.name = "nand";
+ nr_parts = parse_mtd_partitions(&host->mtd, part_probes,
+ &host->partitions, 0);
+ if (nr_parts > 0) {
+ host->nr_partitions = nr_parts;
+ } else {
+#endif
+ /*
+ * Check if partition info passed via command line
+ */
+ if (pdata->partitions) {
+ host->partitions = pdata->partitions;
+ host->nr_partitions = pdata->nr_partitions;
+ } else {
+ struct mtd_partition *partition;
+ int i;
+
+ /* Select the default partitions info */
+ switch (host->mtd.size) {
+ case 0x01000000:
+ case 0x02000000:
+ case 0x04000000:
+ host->partitions = partition_info_16KB_blk;
+ host->nr_partitions =
+ sizeof(partition_info_16KB_blk) /
+ sizeof(struct mtd_partition);
+ break;
+ case 0x08000000:
+ case 0x10000000:
+ case 0x20000000:
+ case 0x40000000:
+ host->partitions = partition_info_128KB_blk;
+ host->nr_partitions =
+ sizeof(partition_info_128KB_blk) /
+ sizeof(struct mtd_partition);
+ break;
+ default:
+ ret = -ENXIO;
+ pr_err("Unsupported NAND size\n");
+ goto err_probe;
+ }
+
+ partition = host->partitions;
+ for (i = 0; i < host->nr_partitions; i++, partition++) {
+ if (partition->size == 0) {
+ partition->size = host->mtd.size -
+ partition->offset;
+ break;
+ }
+ }
+ }
+#ifdef CONFIG_MTD_CMDLINE_PARTS
+ }
+#endif
+
+ if (host->partitions) {
+ ret = add_mtd_partitions(&host->mtd, host->partitions,
+ host->nr_partitions);
+ if (ret)
+ goto err_probe;
+ }
+#else
+ dev_info(&pdev->dev, "Registering %s as whole device\n", mtd->name);
+ if (!add_mtd_device(mtd)) {
+ ret = -ENXIO;
+ goto err_probe;
+ }
+#endif
+
+ platform_set_drvdata(pdev, host);
+ dev_info(&pdev->dev, "FSMC NAND driver registration successful\n");
+ return 0;
+
+err_probe:
+ clk_disable(host->clk);
+err_probe1:
+ if (host->clk)
+ clk_put(host->clk);
+ if (host->regs_va)
+ iounmap(host->regs_va);
+ if (host->resregs)
+ release_mem_region(host->resregs->start,
+ resource_size(host->resregs));
+ if (host->cmd_va)
+ iounmap(host->cmd_va);
+ if (host->rescmd)
+ release_mem_region(host->rescmd->start,
+ resource_size(host->rescmd));
+ if (host->addr_va)
+ iounmap(host->addr_va);
+ if (host->resaddr)
+ release_mem_region(host->resaddr->start,
+ resource_size(host->resaddr));
+ if (host->data_va)
+ iounmap(host->data_va);
+ if (host->resdata)
+ release_mem_region(host->resdata->start,
+ resource_size(host->resdata));
+
+ kfree(host);
+ return ret;
+}
+
+/*
+ * Clean up routine
+ */
+static int fsmc_nand_remove(struct platform_device *pdev)
+{
+ struct fsmc_nand_data *host = platform_get_drvdata(pdev);
+
+ platform_set_drvdata(pdev, NULL);
+
+ if (host) {
+#ifdef CONFIG_MTD_PARTITIONS
+ del_mtd_partitions(&host->mtd);
+#else
+ del_mtd_device(&host->mtd);
+#endif
+ clk_disable(host->clk);
+ clk_put(host->clk);
+
+ iounmap(host->regs_va);
+ release_mem_region(host->resregs->start,
+ resource_size(host->resregs));
+ iounmap(host->cmd_va);
+ release_mem_region(host->rescmd->start,
+ resource_size(host->rescmd));
+ iounmap(host->addr_va);
+ release_mem_region(host->resaddr->start,
+ resource_size(host->resaddr));
+ iounmap(host->data_va);
+ release_mem_region(host->resdata->start,
+ resource_size(host->resdata));
+
+ kfree(host);
+ }
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int fsmc_nand_suspend(struct device *dev)
+{
+ struct fsmc_nand_data *host = dev_get_drvdata(dev);
+ if (host)
+ clk_disable(host->clk);
+ return 0;
+}
+
+static int fsmc_nand_resume(struct device *dev)
+{
+ struct fsmc_nand_data *host = dev_get_drvdata(dev);
+ if (host)
+ clk_enable(host->clk);
+ return 0;
+}
+
+static const struct dev_pm_ops fsmc_nand_pm_ops = {
+ .suspend = fsmc_nand_suspend,
+ .resume = fsmc_nand_resume,
+};
+#endif
+
+static struct platform_driver fsmc_nand_driver = {
+ .remove = fsmc_nand_remove,
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = "fsmc-nand",
+#ifdef CONFIG_PM
+ .pm = &fsmc_nand_pm_ops,
+#endif
+ },
+};
+
+static int __init fsmc_nand_init(void)
+{
+ return platform_driver_probe(&fsmc_nand_driver,
+ fsmc_nand_probe);
+}
+module_init(fsmc_nand_init);
+
+static void __exit fsmc_nand_exit(void)
+{
+ platform_driver_unregister(&fsmc_nand_driver);
+}
+module_exit(fsmc_nand_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Vipin Kumar <vipin.kumar@st.com>, Ashish Priyadarshi");
+MODULE_DESCRIPTION("NAND driver for SPEAr Platforms");
diff --git a/drivers/mtd/nand/mpc5121_nfc.c b/drivers/mtd/nand/mpc5121_nfc.c
index df0c1da4ff49..469e649c911c 100644
--- a/drivers/mtd/nand/mpc5121_nfc.c
+++ b/drivers/mtd/nand/mpc5121_nfc.c
@@ -568,6 +568,7 @@ static int mpc5121_nfc_read_hw_config(struct mtd_info *mtd)
uint rcw_width;
uint rcwh;
uint romloc, ps;
+ int ret = 0;
rmnode = of_find_compatible_node(NULL, NULL, "fsl,mpc5121-reset");
if (!rmnode) {
@@ -579,7 +580,8 @@ static int mpc5121_nfc_read_hw_config(struct mtd_info *mtd)
rm = of_iomap(rmnode, 0);
if (!rm) {
dev_err(prv->dev, "Error mapping reset module node!\n");
- return -EBUSY;
+ ret = -EBUSY;
+ goto out;
}
rcwh = in_be32(&rm->rcwhr);
@@ -628,8 +630,9 @@ static int mpc5121_nfc_read_hw_config(struct mtd_info *mtd)
rcw_width * 8, rcw_pagesize,
rcw_sparesize);
iounmap(rm);
+out:
of_node_put(rmnode);
- return 0;
+ return ret;
}
/* Free driver resources */
@@ -660,7 +663,7 @@ static int __devinit mpc5121_nfc_probe(struct platform_device *op,
#endif
struct nand_chip *chip;
unsigned long regs_paddr, regs_size;
- const uint *chips_no;
+ const __be32 *chips_no;
int resettime = 0;
int retval = 0;
int rev, len;
@@ -803,7 +806,7 @@ static int __devinit mpc5121_nfc_probe(struct platform_device *op,
}
/* Detect NAND chips */
- if (nand_scan(mtd, *chips_no)) {
+ if (nand_scan(mtd, be32_to_cpup(chips_no))) {
dev_err(dev, "NAND Flash not found !\n");
devm_free_irq(dev, prv->irq, mtd);
retval = -ENXIO;
diff --git a/drivers/mtd/nand/nand_base.c b/drivers/mtd/nand/nand_base.c
index d551ddd9537a..1f75a1b1f7c3 100644
--- a/drivers/mtd/nand/nand_base.c
+++ b/drivers/mtd/nand/nand_base.c
@@ -45,7 +45,7 @@
#include <linux/interrupt.h>
#include <linux/bitops.h>
#include <linux/leds.h>
-#include <asm/io.h>
+#include <linux/io.h>
#ifdef CONFIG_MTD_PARTITIONS
#include <linux/mtd/partitions.h>
@@ -59,7 +59,7 @@ static struct nand_ecclayout nand_oob_8 = {
{.offset = 3,
.length = 2},
{.offset = 6,
- .length = 2}}
+ .length = 2} }
};
static struct nand_ecclayout nand_oob_16 = {
@@ -67,7 +67,7 @@ static struct nand_ecclayout nand_oob_16 = {
.eccpos = {0, 1, 2, 3, 6, 7},
.oobfree = {
{.offset = 8,
- . length = 8}}
+ . length = 8} }
};
static struct nand_ecclayout nand_oob_64 = {
@@ -78,7 +78,7 @@ static struct nand_ecclayout nand_oob_64 = {
56, 57, 58, 59, 60, 61, 62, 63},
.oobfree = {
{.offset = 2,
- .length = 38}}
+ .length = 38} }
};
static struct nand_ecclayout nand_oob_128 = {
@@ -92,7 +92,7 @@ static struct nand_ecclayout nand_oob_128 = {
120, 121, 122, 123, 124, 125, 126, 127},
.oobfree = {
{.offset = 2,
- .length = 78}}
+ .length = 78} }
};
static int nand_get_device(struct nand_chip *chip, struct mtd_info *mtd,
@@ -612,7 +612,8 @@ static void nand_command(struct mtd_info *mtd, unsigned int command,
NAND_CTRL_CLE | NAND_CTRL_CHANGE);
chip->cmd_ctrl(mtd,
NAND_CMD_NONE, NAND_NCE | NAND_CTRL_CHANGE);
- while (!(chip->read_byte(mtd) & NAND_STATUS_READY)) ;
+ while (!(chip->read_byte(mtd) & NAND_STATUS_READY))
+ ;
return;
/* This applies to read commands */
@@ -718,7 +719,8 @@ static void nand_command_lp(struct mtd_info *mtd, unsigned int command,
NAND_NCE | NAND_CLE | NAND_CTRL_CHANGE);
chip->cmd_ctrl(mtd, NAND_CMD_NONE,
NAND_NCE | NAND_CTRL_CHANGE);
- while (!(chip->read_byte(mtd) & NAND_STATUS_READY)) ;
+ while (!(chip->read_byte(mtd) & NAND_STATUS_READY))
+ ;
return;
case NAND_CMD_RNDOUT:
@@ -784,7 +786,7 @@ nand_get_device(struct nand_chip *chip, struct mtd_info *mtd, int new_state)
spinlock_t *lock = &chip->controller->lock;
wait_queue_head_t *wq = &chip->controller->wq;
DECLARE_WAITQUEUE(wait, current);
- retry:
+retry:
spin_lock(lock);
/* Hardware controller shared among independent devices */
@@ -834,7 +836,7 @@ static void panic_nand_wait(struct mtd_info *mtd, struct nand_chip *chip,
break;
}
mdelay(1);
- }
+ }
}
/**
@@ -980,6 +982,7 @@ out:
return ret;
}
+EXPORT_SYMBOL(nand_unlock);
/**
* nand_lock - [REPLACEABLE] locks all blocks present in the device
@@ -1049,6 +1052,7 @@ out:
return ret;
}
+EXPORT_SYMBOL(nand_lock);
/**
* nand_read_page_raw - [Intern] read raw page data without ecc
@@ -1076,8 +1080,9 @@ static int nand_read_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
*
* We need a special oob layout and handling even when OOB isn't used.
*/
-static int nand_read_page_raw_syndrome(struct mtd_info *mtd, struct nand_chip *chip,
- uint8_t *buf, int page)
+static int nand_read_page_raw_syndrome(struct mtd_info *mtd,
+ struct nand_chip *chip,
+ uint8_t *buf, int page)
{
int eccsize = chip->ecc.size;
int eccbytes = chip->ecc.bytes;
@@ -1158,7 +1163,8 @@ static int nand_read_page_swecc(struct mtd_info *mtd, struct nand_chip *chip,
* @readlen: data length
* @bufpoi: buffer to store read data
*/
-static int nand_read_subpage(struct mtd_info *mtd, struct nand_chip *chip, uint32_t data_offs, uint32_t readlen, uint8_t *bufpoi)
+static int nand_read_subpage(struct mtd_info *mtd, struct nand_chip *chip,
+ uint32_t data_offs, uint32_t readlen, uint8_t *bufpoi)
{
int start_step, end_step, num_steps;
uint32_t *eccpos = chip->ecc.layout->eccpos;
@@ -1166,6 +1172,7 @@ static int nand_read_subpage(struct mtd_info *mtd, struct nand_chip *chip, uint3
int data_col_addr, i, gaps = 0;
int datafrag_len, eccfrag_len, aligned_len, aligned_pos;
int busw = (chip->options & NAND_BUSWIDTH_16) ? 2 : 1;
+ int index = 0;
/* Column address wihin the page aligned to ECC size (256bytes). */
start_step = data_offs / chip->ecc.size;
@@ -1204,26 +1211,30 @@ static int nand_read_subpage(struct mtd_info *mtd, struct nand_chip *chip, uint3
} else {
/* send the command to read the particular ecc bytes */
/* take care about buswidth alignment in read_buf */
- aligned_pos = eccpos[start_step * chip->ecc.bytes] & ~(busw - 1);
+ index = start_step * chip->ecc.bytes;
+
+ aligned_pos = eccpos[index] & ~(busw - 1);
aligned_len = eccfrag_len;
- if (eccpos[start_step * chip->ecc.bytes] & (busw - 1))
+ if (eccpos[index] & (busw - 1))
aligned_len++;
- if (eccpos[(start_step + num_steps) * chip->ecc.bytes] & (busw - 1))
+ if (eccpos[index + (num_steps * chip->ecc.bytes)] & (busw - 1))
aligned_len++;
- chip->cmdfunc(mtd, NAND_CMD_RNDOUT, mtd->writesize + aligned_pos, -1);
+ chip->cmdfunc(mtd, NAND_CMD_RNDOUT,
+ mtd->writesize + aligned_pos, -1);
chip->read_buf(mtd, &chip->oob_poi[aligned_pos], aligned_len);
}
for (i = 0; i < eccfrag_len; i++)
- chip->buffers->ecccode[i] = chip->oob_poi[eccpos[i + start_step * chip->ecc.bytes]];
+ chip->buffers->ecccode[i] = chip->oob_poi[eccpos[i + index]];
p = bufpoi + data_col_addr;
for (i = 0; i < eccfrag_len ; i += chip->ecc.bytes, p += chip->ecc.size) {
int stat;
- stat = chip->ecc.correct(mtd, p, &chip->buffers->ecccode[i], &chip->buffers->ecccalc[i]);
- if (stat == -1)
+ stat = chip->ecc.correct(mtd, p,
+ &chip->buffers->ecccode[i], &chip->buffers->ecccalc[i]);
+ if (stat < 0)
mtd->ecc_stats.failed++;
else
mtd->ecc_stats.corrected += stat;
@@ -1390,7 +1401,7 @@ static int nand_read_page_syndrome(struct mtd_info *mtd, struct nand_chip *chip,
static uint8_t *nand_transfer_oob(struct nand_chip *chip, uint8_t *oob,
struct mtd_oob_ops *ops, size_t len)
{
- switch(ops->mode) {
+ switch (ops->mode) {
case MTD_OOB_PLACE:
case MTD_OOB_RAW:
@@ -1402,7 +1413,7 @@ static uint8_t *nand_transfer_oob(struct nand_chip *chip, uint8_t *oob,
uint32_t boffs = 0, roffs = ops->ooboffs;
size_t bytes = 0;
- for(; free->length && len; free++, len -= bytes) {
+ for (; free->length && len; free++, len -= bytes) {
/* Read request not from offset 0 ? */
if (unlikely(roffs)) {
if (roffs >= free->length) {
@@ -1466,7 +1477,7 @@ static int nand_do_read_ops(struct mtd_info *mtd, loff_t from,
buf = ops->datbuf;
oob = ops->oobbuf;
- while(1) {
+ while (1) {
bytes = min(mtd->writesize - col, readlen);
aligned = (bytes == mtd->writesize);
@@ -1484,7 +1495,8 @@ static int nand_do_read_ops(struct mtd_info *mtd, loff_t from,
ret = chip->ecc.read_page_raw(mtd, chip,
bufpoi, page);
else if (!aligned && NAND_SUBPAGE_READ(chip) && !oob)
- ret = chip->ecc.read_subpage(mtd, chip, col, bytes, bufpoi);
+ ret = chip->ecc.read_subpage(mtd, chip,
+ col, bytes, bufpoi);
else
ret = chip->ecc.read_page(mtd, chip, bufpoi,
page);
@@ -1493,7 +1505,8 @@ static int nand_do_read_ops(struct mtd_info *mtd, loff_t from,
/* Transfer not aligned data */
if (!aligned) {
- if (!NAND_SUBPAGE_READ(chip) && !oob)
+ if (!NAND_SUBPAGE_READ(chip) && !oob &&
+ !(mtd->ecc_stats.failed - stats.failed))
chip->pagebuf = realpage;
memcpy(buf, chip->buffers->databuf + col, bytes);
}
@@ -1791,7 +1804,7 @@ static int nand_do_read_oob(struct mtd_info *mtd, loff_t from,
realpage = (int)(from >> chip->page_shift);
page = realpage & chip->pagemask;
- while(1) {
+ while (1) {
sndcmd = chip->ecc.read_oob(mtd, chip, page, sndcmd);
len = min(len, readlen);
@@ -1861,7 +1874,7 @@ static int nand_read_oob(struct mtd_info *mtd, loff_t from,
nand_get_device(chip, mtd, FL_READING);
- switch(ops->mode) {
+ switch (ops->mode) {
case MTD_OOB_PLACE:
case MTD_OOB_AUTO:
case MTD_OOB_RAW:
@@ -1876,7 +1889,7 @@ static int nand_read_oob(struct mtd_info *mtd, loff_t from,
else
ret = nand_do_read_ops(mtd, from, ops);
- out:
+out:
nand_release_device(mtd);
return ret;
}
@@ -1905,8 +1918,9 @@ static void nand_write_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
*
* We need a special oob layout and handling even when ECC isn't checked.
*/
-static void nand_write_page_raw_syndrome(struct mtd_info *mtd, struct nand_chip *chip,
- const uint8_t *buf)
+static void nand_write_page_raw_syndrome(struct mtd_info *mtd,
+ struct nand_chip *chip,
+ const uint8_t *buf)
{
int eccsize = chip->ecc.size;
int eccbytes = chip->ecc.bytes;
@@ -2099,7 +2113,7 @@ static int nand_write_page(struct mtd_info *mtd, struct nand_chip *chip,
static uint8_t *nand_fill_oob(struct nand_chip *chip, uint8_t *oob, size_t len,
struct mtd_oob_ops *ops)
{
- switch(ops->mode) {
+ switch (ops->mode) {
case MTD_OOB_PLACE:
case MTD_OOB_RAW:
@@ -2111,7 +2125,7 @@ static uint8_t *nand_fill_oob(struct nand_chip *chip, uint8_t *oob, size_t len,
uint32_t boffs = 0, woffs = ops->ooboffs;
size_t bytes = 0;
- for(; free->length && len; free++, len -= bytes) {
+ for (; free->length && len; free++, len -= bytes) {
/* Write request not from offset 0 ? */
if (unlikely(woffs)) {
if (woffs >= free->length) {
@@ -2137,7 +2151,7 @@ static uint8_t *nand_fill_oob(struct nand_chip *chip, uint8_t *oob, size_t len,
return NULL;
}
-#define NOTALIGNED(x) (x & (chip->subpagesize - 1)) != 0
+#define NOTALIGNED(x) ((x & (chip->subpagesize - 1)) != 0)
/**
* nand_do_write_ops - [Internal] NAND write with ECC
@@ -2200,10 +2214,10 @@ static int nand_do_write_ops(struct mtd_info *mtd, loff_t to,
memset(chip->oob_poi, 0xff, mtd->oobsize);
/* Don't allow multipage oob writes with offset */
- if (ops->ooboffs && (ops->ooboffs + ops->ooblen > oobmaxlen))
+ if (oob && ops->ooboffs && (ops->ooboffs + ops->ooblen > oobmaxlen))
return -EINVAL;
- while(1) {
+ while (1) {
int bytes = mtd->writesize;
int cached = writelen > bytes && page != blockmask;
uint8_t *wbuf = buf;
@@ -2431,7 +2445,7 @@ static int nand_write_oob(struct mtd_info *mtd, loff_t to,
nand_get_device(chip, mtd, FL_WRITING);
- switch(ops->mode) {
+ switch (ops->mode) {
case MTD_OOB_PLACE:
case MTD_OOB_AUTO:
case MTD_OOB_RAW:
@@ -2446,7 +2460,7 @@ static int nand_write_oob(struct mtd_info *mtd, loff_t to,
else
ret = nand_do_write_ops(mtd, to, ops);
- out:
+out:
nand_release_device(mtd);
return ret;
}
@@ -2511,7 +2525,7 @@ int nand_erase_nand(struct mtd_info *mtd, struct erase_info *instr,
{
int page, status, pages_per_block, ret, chipnr;
struct nand_chip *chip = mtd->priv;
- loff_t rewrite_bbt[NAND_MAX_CHIPS]={0};
+ loff_t rewrite_bbt[NAND_MAX_CHIPS] = {0};
unsigned int bbt_masked_page = 0xffffffff;
loff_t len;
@@ -2632,7 +2646,7 @@ int nand_erase_nand(struct mtd_info *mtd, struct erase_info *instr,
}
instr->state = MTD_ERASE_DONE;
- erase_exit:
+erase_exit:
ret = instr->state == MTD_ERASE_DONE ? 0 : -EIO;
@@ -2706,7 +2720,8 @@ static int nand_block_markbad(struct mtd_info *mtd, loff_t ofs)
struct nand_chip *chip = mtd->priv;
int ret;
- if ((ret = nand_block_isbad(mtd, ofs))) {
+ ret = nand_block_isbad(mtd, ofs);
+ if (ret) {
/* If it was bad already, return success and do nothing. */
if (ret > 0)
return 0;
@@ -2787,15 +2802,115 @@ static void nand_set_defaults(struct nand_chip *chip, int busw)
}
/*
+ * sanitize ONFI strings so we can safely print them
+ */
+static void sanitize_string(uint8_t *s, size_t len)
+{
+ ssize_t i;
+
+ /* null terminate */
+ s[len - 1] = 0;
+
+ /* remove non printable chars */
+ for (i = 0; i < len - 1; i++) {
+ if (s[i] < ' ' || s[i] > 127)
+ s[i] = '?';
+ }
+
+ /* remove trailing spaces */
+ strim(s);
+}
+
+static u16 onfi_crc16(u16 crc, u8 const *p, size_t len)
+{
+ int i;
+ while (len--) {
+ crc ^= *p++ << 8;
+ for (i = 0; i < 8; i++)
+ crc = (crc << 1) ^ ((crc & 0x8000) ? 0x8005 : 0);
+ }
+
+ return crc;
+}
+
+/*
+ * Check if the NAND chip is ONFI compliant, returns 1 if it is, 0 otherwise
+ */
+static int nand_flash_detect_onfi(struct mtd_info *mtd, struct nand_chip *chip,
+ int busw)
+{
+ struct nand_onfi_params *p = &chip->onfi_params;
+ int i;
+ int val;
+
+ /* try ONFI for unknow chip or LP */
+ chip->cmdfunc(mtd, NAND_CMD_READID, 0x20, -1);
+ if (chip->read_byte(mtd) != 'O' || chip->read_byte(mtd) != 'N' ||
+ chip->read_byte(mtd) != 'F' || chip->read_byte(mtd) != 'I')
+ return 0;
+
+ printk(KERN_INFO "ONFI flash detected\n");
+ chip->cmdfunc(mtd, NAND_CMD_PARAM, 0, -1);
+ for (i = 0; i < 3; i++) {
+ chip->read_buf(mtd, (uint8_t *)p, sizeof(*p));
+ if (onfi_crc16(ONFI_CRC_BASE, (uint8_t *)p, 254) ==
+ le16_to_cpu(p->crc)) {
+ printk(KERN_INFO "ONFI param page %d valid\n", i);
+ break;
+ }
+ }
+
+ if (i == 3)
+ return 0;
+
+ /* check version */
+ val = le16_to_cpu(p->revision);
+ if (val == 1 || val > (1 << 4)) {
+ printk(KERN_INFO "%s: unsupported ONFI version: %d\n",
+ __func__, val);
+ return 0;
+ }
+
+ if (val & (1 << 4))
+ chip->onfi_version = 22;
+ else if (val & (1 << 3))
+ chip->onfi_version = 21;
+ else if (val & (1 << 2))
+ chip->onfi_version = 20;
+ else
+ chip->onfi_version = 10;
+
+ sanitize_string(p->manufacturer, sizeof(p->manufacturer));
+ sanitize_string(p->model, sizeof(p->model));
+ if (!mtd->name)
+ mtd->name = p->model;
+ mtd->writesize = le32_to_cpu(p->byte_per_page);
+ mtd->erasesize = le32_to_cpu(p->pages_per_block) * mtd->writesize;
+ mtd->oobsize = le16_to_cpu(p->spare_bytes_per_page);
+ chip->chipsize = le32_to_cpu(p->blocks_per_lun) * mtd->erasesize;
+ busw = 0;
+ if (le16_to_cpu(p->features) & 1)
+ busw = NAND_BUSWIDTH_16;
+
+ chip->options &= ~NAND_CHIPOPTIONS_MSK;
+ chip->options |= (NAND_NO_READRDY |
+ NAND_NO_AUTOINCR) & NAND_CHIPOPTIONS_MSK;
+
+ return 1;
+}
+
+/*
* Get the flash and manufacturer id and lookup if the type is supported
*/
static struct nand_flash_dev *nand_get_flash_type(struct mtd_info *mtd,
struct nand_chip *chip,
- int busw, int *maf_id,
+ int busw,
+ int *maf_id, int *dev_id,
struct nand_flash_dev *type)
{
- int i, dev_id, maf_idx;
+ int i, maf_idx;
u8 id_data[8];
+ int ret;
/* Select the device */
chip->select_chip(mtd, 0);
@@ -2811,7 +2926,7 @@ static struct nand_flash_dev *nand_get_flash_type(struct mtd_info *mtd,
/* Read manufacturer and device IDs */
*maf_id = chip->read_byte(mtd);
- dev_id = chip->read_byte(mtd);
+ *dev_id = chip->read_byte(mtd);
/* Try again to make sure, as some systems the bus-hold or other
* interface concerns can cause random data which looks like a
@@ -2821,15 +2936,13 @@ static struct nand_flash_dev *nand_get_flash_type(struct mtd_info *mtd,
chip->cmdfunc(mtd, NAND_CMD_READID, 0x00, -1);
- /* Read entire ID string */
-
- for (i = 0; i < 8; i++)
+ for (i = 0; i < 2; i++)
id_data[i] = chip->read_byte(mtd);
- if (id_data[0] != *maf_id || id_data[1] != dev_id) {
+ if (id_data[0] != *maf_id || id_data[1] != *dev_id) {
printk(KERN_INFO "%s: second ID read did not match "
"%02x,%02x against %02x,%02x\n", __func__,
- *maf_id, dev_id, id_data[0], id_data[1]);
+ *maf_id, *dev_id, id_data[0], id_data[1]);
return ERR_PTR(-ENODEV);
}
@@ -2837,8 +2950,23 @@ static struct nand_flash_dev *nand_get_flash_type(struct mtd_info *mtd,
type = nand_flash_ids;
for (; type->name != NULL; type++)
- if (dev_id == type->id)
- break;
+ if (*dev_id == type->id)
+ break;
+
+ chip->onfi_version = 0;
+ if (!type->name || !type->pagesize) {
+ /* Check is chip is ONFI compliant */
+ ret = nand_flash_detect_onfi(mtd, chip, busw);
+ if (ret)
+ goto ident_done;
+ }
+
+ chip->cmdfunc(mtd, NAND_CMD_READID, 0x00, -1);
+
+ /* Read entire ID string */
+
+ for (i = 0; i < 8; i++)
+ id_data[i] = chip->read_byte(mtd);
if (!type->name)
return ERR_PTR(-ENODEV);
@@ -2848,8 +2976,10 @@ static struct nand_flash_dev *nand_get_flash_type(struct mtd_info *mtd,
chip->chipsize = (uint64_t)type->chipsize << 20;
- /* Newer devices have all the information in additional id bytes */
- if (!type->pagesize) {
+ if (!type->pagesize && chip->init_size) {
+ /* set the pagesize, oobsize, erasesize by the driver*/
+ busw = chip->init_size(mtd, chip, id_data);
+ } else if (!type->pagesize) {
int extid;
/* The 3rd id byte holds MLC / multichip data */
chip->cellinfo = id_data[2];
@@ -2859,7 +2989,7 @@ static struct nand_flash_dev *nand_get_flash_type(struct mtd_info *mtd,
/*
* Field definitions are in the following datasheets:
* Old style (4,5 byte ID): Samsung K9GAG08U0M (p.32)
- * New style (6 byte ID): Samsung K9GAG08U0D (p.40)
+ * New style (6 byte ID): Samsung K9GBG08U0M (p.40)
*
* Check for wraparound + Samsung ID + nonzero 6th byte
* to decide what to do.
@@ -2872,7 +3002,20 @@ static struct nand_flash_dev *nand_get_flash_type(struct mtd_info *mtd,
mtd->writesize = 2048 << (extid & 0x03);
extid >>= 2;
/* Calc oobsize */
- mtd->oobsize = (extid & 0x03) == 0x01 ? 128 : 218;
+ switch (extid & 0x03) {
+ case 1:
+ mtd->oobsize = 128;
+ break;
+ case 2:
+ mtd->oobsize = 218;
+ break;
+ case 3:
+ mtd->oobsize = 400;
+ break;
+ default:
+ mtd->oobsize = 436;
+ break;
+ }
extid >>= 2;
/* Calc blocksize */
mtd->erasesize = (128 * 1024) <<
@@ -2900,7 +3043,35 @@ static struct nand_flash_dev *nand_get_flash_type(struct mtd_info *mtd,
mtd->writesize = type->pagesize;
mtd->oobsize = mtd->writesize / 32;
busw = type->options & NAND_BUSWIDTH_16;
+
+ /*
+ * Check for Spansion/AMD ID + repeating 5th, 6th byte since
+ * some Spansion chips have erasesize that conflicts with size
+ * listed in nand_ids table
+ * Data sheet (5 byte ID): Spansion S30ML-P ORNAND (p.39)
+ */
+ if (*maf_id == NAND_MFR_AMD && id_data[4] != 0x00 &&
+ id_data[5] == 0x00 && id_data[6] == 0x00 &&
+ id_data[7] == 0x00 && mtd->writesize == 512) {
+ mtd->erasesize = 128 * 1024;
+ mtd->erasesize <<= ((id_data[3] & 0x03) << 1);
+ }
}
+ /* Get chip options, preserve non chip based options */
+ chip->options &= ~NAND_CHIPOPTIONS_MSK;
+ chip->options |= type->options & NAND_CHIPOPTIONS_MSK;
+
+ /* Check if chip is a not a samsung device. Do not clear the
+ * options for chips which are not having an extended id.
+ */
+ if (*maf_id != NAND_MFR_SAMSUNG && !type->pagesize)
+ chip->options &= ~NAND_SAMSUNG_LP_OPTIONS;
+ident_done:
+
+ /*
+ * Set chip as a default. Board drivers can override it, if necessary
+ */
+ chip->options |= NAND_NO_AUTOINCR;
/* Try to identify manufacturer */
for (maf_idx = 0; nand_manuf_ids[maf_idx].id != 0x0; maf_idx++) {
@@ -2915,7 +3086,7 @@ static struct nand_flash_dev *nand_get_flash_type(struct mtd_info *mtd,
if (busw != (chip->options & NAND_BUSWIDTH_16)) {
printk(KERN_INFO "NAND device: Manufacturer ID:"
" 0x%02x, Chip ID: 0x%02x (%s %s)\n", *maf_id,
- dev_id, nand_manuf_ids[maf_idx].name, mtd->name);
+ *dev_id, nand_manuf_ids[maf_idx].name, mtd->name);
printk(KERN_WARNING "NAND bus width %d instead %d bit\n",
(chip->options & NAND_BUSWIDTH_16) ? 16 : 8,
busw ? 16 : 8);
@@ -2931,8 +3102,10 @@ static struct nand_flash_dev *nand_get_flash_type(struct mtd_info *mtd,
ffs(mtd->erasesize) - 1;
if (chip->chipsize & 0xffffffff)
chip->chip_shift = ffs((unsigned)chip->chipsize) - 1;
- else
- chip->chip_shift = ffs((unsigned)(chip->chipsize >> 32)) + 32 - 1;
+ else {
+ chip->chip_shift = ffs((unsigned)(chip->chipsize >> 32));
+ chip->chip_shift += 32 - 1;
+ }
/* Set the bad block position */
if (mtd->writesize > 512 || (busw & NAND_BUSWIDTH_16))
@@ -2940,27 +3113,12 @@ static struct nand_flash_dev *nand_get_flash_type(struct mtd_info *mtd,
else
chip->badblockpos = NAND_SMALL_BADBLOCK_POS;
- /* Get chip options, preserve non chip based options */
- chip->options &= ~NAND_CHIPOPTIONS_MSK;
- chip->options |= type->options & NAND_CHIPOPTIONS_MSK;
-
- /*
- * Set chip as a default. Board drivers can override it, if necessary
- */
- chip->options |= NAND_NO_AUTOINCR;
-
- /* Check if chip is a not a samsung device. Do not clear the
- * options for chips which are not having an extended id.
- */
- if (*maf_id != NAND_MFR_SAMSUNG && !type->pagesize)
- chip->options &= ~NAND_SAMSUNG_LP_OPTIONS;
-
/*
* Bad block marker is stored in the last page of each block
* on Samsung and Hynix MLC devices; stored in first two pages
* of each block on Micron devices with 2KiB pages and on
- * SLC Samsung, Hynix, and AMD/Spansion. All others scan only
- * the first page.
+ * SLC Samsung, Hynix, Toshiba and AMD/Spansion. All others scan
+ * only the first page.
*/
if ((chip->cellinfo & NAND_CI_CELLTYPE_MSK) &&
(*maf_id == NAND_MFR_SAMSUNG ||
@@ -2969,6 +3127,7 @@ static struct nand_flash_dev *nand_get_flash_type(struct mtd_info *mtd,
else if ((!(chip->cellinfo & NAND_CI_CELLTYPE_MSK) &&
(*maf_id == NAND_MFR_SAMSUNG ||
*maf_id == NAND_MFR_HYNIX ||
+ *maf_id == NAND_MFR_TOSHIBA ||
*maf_id == NAND_MFR_AMD)) ||
(mtd->writesize == 2048 &&
*maf_id == NAND_MFR_MICRON))
@@ -2994,9 +3153,11 @@ static struct nand_flash_dev *nand_get_flash_type(struct mtd_info *mtd,
if (mtd->writesize > 512 && chip->cmdfunc == nand_command)
chip->cmdfunc = nand_command_lp;
+ /* TODO onfi flash name */
printk(KERN_INFO "NAND device: Manufacturer ID:"
- " 0x%02x, Chip ID: 0x%02x (%s %s)\n", *maf_id, dev_id,
- nand_manuf_ids[maf_idx].name, type->name);
+ " 0x%02x, Chip ID: 0x%02x (%s %s)\n", *maf_id, *dev_id,
+ nand_manuf_ids[maf_idx].name,
+ chip->onfi_version ? type->name : chip->onfi_params.model);
return type;
}
@@ -3015,7 +3176,7 @@ static struct nand_flash_dev *nand_get_flash_type(struct mtd_info *mtd,
int nand_scan_ident(struct mtd_info *mtd, int maxchips,
struct nand_flash_dev *table)
{
- int i, busw, nand_maf_id;
+ int i, busw, nand_maf_id, nand_dev_id;
struct nand_chip *chip = mtd->priv;
struct nand_flash_dev *type;
@@ -3025,7 +3186,8 @@ int nand_scan_ident(struct mtd_info *mtd, int maxchips,
nand_set_defaults(chip, busw);
/* Read the flash type */
- type = nand_get_flash_type(mtd, chip, busw, &nand_maf_id, table);
+ type = nand_get_flash_type(mtd, chip, busw,
+ &nand_maf_id, &nand_dev_id, table);
if (IS_ERR(type)) {
if (!(chip->options & NAND_SCAN_SILENT_NODEV))
@@ -3043,7 +3205,7 @@ int nand_scan_ident(struct mtd_info *mtd, int maxchips,
chip->cmdfunc(mtd, NAND_CMD_READID, 0x00, -1);
/* Read manufacturer and device IDs */
if (nand_maf_id != chip->read_byte(mtd) ||
- type->id != chip->read_byte(mtd))
+ nand_dev_id != chip->read_byte(mtd))
break;
}
if (i > 1)
@@ -3055,6 +3217,7 @@ int nand_scan_ident(struct mtd_info *mtd, int maxchips,
return 0;
}
+EXPORT_SYMBOL(nand_scan_ident);
/**
@@ -3219,7 +3382,7 @@ int nand_scan_tail(struct mtd_info *mtd)
* mode
*/
chip->ecc.steps = mtd->writesize / chip->ecc.size;
- if(chip->ecc.steps * chip->ecc.size != mtd->writesize) {
+ if (chip->ecc.steps * chip->ecc.size != mtd->writesize) {
printk(KERN_WARNING "Invalid ecc parameters\n");
BUG();
}
@@ -3231,7 +3394,7 @@ int nand_scan_tail(struct mtd_info *mtd)
*/
if (!(chip->options & NAND_NO_SUBPAGE_WRITE) &&
!(chip->cellinfo & NAND_CI_CELLTYPE_MSK)) {
- switch(chip->ecc.steps) {
+ switch (chip->ecc.steps) {
case 2:
mtd->subpage_sft = 1;
break;
@@ -3283,10 +3446,11 @@ int nand_scan_tail(struct mtd_info *mtd)
/* Build bad block table */
return chip->scan_bbt(mtd);
}
+EXPORT_SYMBOL(nand_scan_tail);
/* is_module_text_address() isn't exported, and it's mostly a pointless
- test if this is a module _anyway_ -- they'd have to try _really_ hard
- to call us from in-kernel code if the core NAND support is modular. */
+ * test if this is a module _anyway_ -- they'd have to try _really_ hard
+ * to call us from in-kernel code if the core NAND support is modular. */
#ifdef MODULE
#define caller_is_module() (1)
#else
@@ -3322,6 +3486,7 @@ int nand_scan(struct mtd_info *mtd, int maxchips)
ret = nand_scan_tail(mtd);
return ret;
}
+EXPORT_SYMBOL(nand_scan);
/**
* nand_release - [NAND Interface] Free resources held by the NAND device
@@ -3348,12 +3513,6 @@ void nand_release(struct mtd_info *mtd)
& NAND_BBT_DYNAMICSTRUCT)
kfree(chip->badblock_pattern);
}
-
-EXPORT_SYMBOL_GPL(nand_lock);
-EXPORT_SYMBOL_GPL(nand_unlock);
-EXPORT_SYMBOL_GPL(nand_scan);
-EXPORT_SYMBOL_GPL(nand_scan_ident);
-EXPORT_SYMBOL_GPL(nand_scan_tail);
EXPORT_SYMBOL_GPL(nand_release);
static int __init nand_base_init(void)
@@ -3371,5 +3530,6 @@ module_init(nand_base_init);
module_exit(nand_base_exit);
MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Steven J. Hill <sjhill@realitydiluted.com>, Thomas Gleixner <tglx@linutronix.de>");
+MODULE_AUTHOR("Steven J. Hill <sjhill@realitydiluted.com>");
+MODULE_AUTHOR("Thomas Gleixner <tglx@linutronix.de>");
MODULE_DESCRIPTION("Generic NAND flash driver code");
diff --git a/drivers/mtd/nand/nand_bbt.c b/drivers/mtd/nand/nand_bbt.c
index 5fedf4a74f16..586b981f0e61 100644
--- a/drivers/mtd/nand/nand_bbt.c
+++ b/drivers/mtd/nand/nand_bbt.c
@@ -13,28 +13,37 @@
* Description:
*
* When nand_scan_bbt is called, then it tries to find the bad block table
- * depending on the options in the bbt descriptor(s). If a bbt is found
- * then the contents are read and the memory based bbt is created. If a
- * mirrored bbt is selected then the mirror is searched too and the
- * versions are compared. If the mirror has a greater version number
- * than the mirror bbt is used to build the memory based bbt.
+ * depending on the options in the BBT descriptor(s). If no flash based BBT
+ * (NAND_USE_FLASH_BBT) is specified then the device is scanned for factory
+ * marked good / bad blocks. This information is used to create a memory BBT.
+ * Once a new bad block is discovered then the "factory" information is updated
+ * on the device.
+ * If a flash based BBT is specified then the function first tries to find the
+ * BBT on flash. If a BBT is found then the contents are read and the memory
+ * based BBT is created. If a mirrored BBT is selected then the mirror is
+ * searched too and the versions are compared. If the mirror has a greater
+ * version number than the mirror BBT is used to build the memory based BBT.
* If the tables are not versioned, then we "or" the bad block information.
- * If one of the bbt's is out of date or does not exist it is (re)created.
- * If no bbt exists at all then the device is scanned for factory marked
+ * If one of the BBTs is out of date or does not exist it is (re)created.
+ * If no BBT exists at all then the device is scanned for factory marked
* good / bad blocks and the bad block tables are created.
*
- * For manufacturer created bbts like the one found on M-SYS DOC devices
- * the bbt is searched and read but never created
+ * For manufacturer created BBTs like the one found on M-SYS DOC devices
+ * the BBT is searched and read but never created
*
- * The autogenerated bad block table is located in the last good blocks
+ * The auto generated bad block table is located in the last good blocks
* of the device. The table is mirrored, so it can be updated eventually.
- * The table is marked in the oob area with an ident pattern and a version
- * number which indicates which of both tables is more up to date.
+ * The table is marked in the OOB area with an ident pattern and a version
+ * number which indicates which of both tables is more up to date. If the NAND
+ * controller needs the complete OOB area for the ECC information then the
+ * option NAND_USE_FLASH_BBT_NO_OOB should be used: it moves the ident pattern
+ * and the version byte into the data area and the OOB area will remain
+ * untouched.
*
* The table uses 2 bits per block
- * 11b: block is good
- * 00b: block is factory marked bad
- * 01b, 10b: block is marked bad due to wear
+ * 11b: block is good
+ * 00b: block is factory marked bad
+ * 01b, 10b: block is marked bad due to wear
*
* The memory bad block table uses the following scheme:
* 00b: block is good
@@ -59,6 +68,16 @@
#include <linux/delay.h>
#include <linux/vmalloc.h>
+static int check_pattern_no_oob(uint8_t *buf, struct nand_bbt_descr *td)
+{
+ int ret;
+
+ ret = memcmp(buf, td->pattern, td->len);
+ if (!ret)
+ return ret;
+ return -1;
+}
+
/**
* check_pattern - [GENERIC] check if a pattern is in the buffer
* @buf: the buffer to search
@@ -77,6 +96,9 @@ static int check_pattern(uint8_t *buf, int len, int paglen, struct nand_bbt_desc
int i, end = 0;
uint8_t *p = buf;
+ if (td->options & NAND_BBT_NO_OOB)
+ return check_pattern_no_oob(buf, td);
+
end = paglen + td->offs;
if (td->options & NAND_BBT_SCANEMPTY) {
for (i = 0; i < end; i++) {
@@ -156,32 +178,63 @@ static int check_short_pattern(uint8_t *buf, struct nand_bbt_descr *td)
}
/**
+ * add_marker_len - compute the length of the marker in data area
+ * @td: BBT descriptor used for computation
+ *
+ * The length will be 0 if the markeris located in OOB area.
+ */
+static u32 add_marker_len(struct nand_bbt_descr *td)
+{
+ u32 len;
+
+ if (!(td->options & NAND_BBT_NO_OOB))
+ return 0;
+
+ len = td->len;
+ if (td->options & NAND_BBT_VERSION)
+ len++;
+ return len;
+}
+
+/**
* read_bbt - [GENERIC] Read the bad block table starting from page
* @mtd: MTD device structure
* @buf: temporary buffer
* @page: the starting page
* @num: the number of bbt descriptors to read
- * @bits: number of bits per block
+ * @td: the bbt describtion table
* @offs: offset in the memory table
- * @reserved_block_code: Pattern to identify reserved blocks
*
* Read the bad block table starting from page.
*
*/
static int read_bbt(struct mtd_info *mtd, uint8_t *buf, int page, int num,
- int bits, int offs, int reserved_block_code)
+ struct nand_bbt_descr *td, int offs)
{
int res, i, j, act = 0;
struct nand_chip *this = mtd->priv;
size_t retlen, len, totlen;
loff_t from;
+ int bits = td->options & NAND_BBT_NRBITS_MSK;
uint8_t msk = (uint8_t) ((1 << bits) - 1);
+ u32 marker_len;
+ int reserved_block_code = td->reserved_block_code;
totlen = (num * bits) >> 3;
+ marker_len = add_marker_len(td);
from = ((loff_t) page) << this->page_shift;
while (totlen) {
len = min(totlen, (size_t) (1 << this->bbt_erase_shift));
+ if (marker_len) {
+ /*
+ * In case the BBT marker is not in the OOB area it
+ * will be just in the first page.
+ */
+ len -= marker_len;
+ from += marker_len;
+ marker_len = 0;
+ }
res = mtd->read(mtd, from, len, &retlen, buf);
if (res < 0) {
if (retlen != len) {
@@ -238,20 +291,21 @@ static int read_abs_bbt(struct mtd_info *mtd, uint8_t *buf, struct nand_bbt_desc
{
struct nand_chip *this = mtd->priv;
int res = 0, i;
- int bits;
- bits = td->options & NAND_BBT_NRBITS_MSK;
if (td->options & NAND_BBT_PERCHIP) {
int offs = 0;
for (i = 0; i < this->numchips; i++) {
if (chip == -1 || chip == i)
- res = read_bbt (mtd, buf, td->pages[i], this->chipsize >> this->bbt_erase_shift, bits, offs, td->reserved_block_code);
+ res = read_bbt(mtd, buf, td->pages[i],
+ this->chipsize >> this->bbt_erase_shift,
+ td, offs);
if (res)
return res;
offs += this->chipsize >> (this->bbt_erase_shift + 2);
}
} else {
- res = read_bbt (mtd, buf, td->pages[0], mtd->size >> this->bbt_erase_shift, bits, 0, td->reserved_block_code);
+ res = read_bbt(mtd, buf, td->pages[0],
+ mtd->size >> this->bbt_erase_shift, td, 0);
if (res)
return res;
}
@@ -259,9 +313,25 @@ static int read_abs_bbt(struct mtd_info *mtd, uint8_t *buf, struct nand_bbt_desc
}
/*
+ * BBT marker is in the first page, no OOB.
+ */
+static int scan_read_raw_data(struct mtd_info *mtd, uint8_t *buf, loff_t offs,
+ struct nand_bbt_descr *td)
+{
+ size_t retlen;
+ size_t len;
+
+ len = td->len;
+ if (td->options & NAND_BBT_VERSION)
+ len++;
+
+ return mtd->read(mtd, offs, len, &retlen, buf);
+}
+
+/*
* Scan read raw data from flash
*/
-static int scan_read_raw(struct mtd_info *mtd, uint8_t *buf, loff_t offs,
+static int scan_read_raw_oob(struct mtd_info *mtd, uint8_t *buf, loff_t offs,
size_t len)
{
struct mtd_oob_ops ops;
@@ -294,6 +364,15 @@ static int scan_read_raw(struct mtd_info *mtd, uint8_t *buf, loff_t offs,
return 0;
}
+static int scan_read_raw(struct mtd_info *mtd, uint8_t *buf, loff_t offs,
+ size_t len, struct nand_bbt_descr *td)
+{
+ if (td->options & NAND_BBT_NO_OOB)
+ return scan_read_raw_data(mtd, buf, offs, td);
+ else
+ return scan_read_raw_oob(mtd, buf, offs, len);
+}
+
/*
* Scan write data with oob to flash
*/
@@ -312,6 +391,15 @@ static int scan_write_bbt(struct mtd_info *mtd, loff_t offs, size_t len,
return mtd->write_oob(mtd, offs, &ops);
}
+static u32 bbt_get_ver_offs(struct mtd_info *mtd, struct nand_bbt_descr *td)
+{
+ u32 ver_offs = td->veroffs;
+
+ if (!(td->options & NAND_BBT_NO_OOB))
+ ver_offs += mtd->writesize;
+ return ver_offs;
+}
+
/**
* read_abs_bbts - [GENERIC] Read the bad block table(s) for all chips starting at a given page
* @mtd: MTD device structure
@@ -331,8 +419,8 @@ static int read_abs_bbts(struct mtd_info *mtd, uint8_t *buf,
/* Read the primary version, if available */
if (td->options & NAND_BBT_VERSION) {
scan_read_raw(mtd, buf, (loff_t)td->pages[0] << this->page_shift,
- mtd->writesize);
- td->version[0] = buf[mtd->writesize + td->veroffs];
+ mtd->writesize, td);
+ td->version[0] = buf[bbt_get_ver_offs(mtd, td)];
printk(KERN_DEBUG "Bad block table at page %d, version 0x%02X\n",
td->pages[0], td->version[0]);
}
@@ -340,8 +428,8 @@ static int read_abs_bbts(struct mtd_info *mtd, uint8_t *buf,
/* Read the mirror version, if available */
if (md && (md->options & NAND_BBT_VERSION)) {
scan_read_raw(mtd, buf, (loff_t)md->pages[0] << this->page_shift,
- mtd->writesize);
- md->version[0] = buf[mtd->writesize + md->veroffs];
+ mtd->writesize, td);
+ md->version[0] = buf[bbt_get_ver_offs(mtd, md)];
printk(KERN_DEBUG "Bad block table at page %d, version 0x%02X\n",
md->pages[0], md->version[0]);
}
@@ -357,7 +445,7 @@ static int scan_block_full(struct mtd_info *mtd, struct nand_bbt_descr *bd,
{
int ret, j;
- ret = scan_read_raw(mtd, buf, offs, readlen);
+ ret = scan_read_raw_oob(mtd, buf, offs, readlen);
if (ret)
return ret;
@@ -464,6 +552,8 @@ static int create_bbt(struct mtd_info *mtd, uint8_t *buf,
for (i = startblock; i < numblocks;) {
int ret;
+ BUG_ON(bd->options & NAND_BBT_NO_OOB);
+
if (bd->options & NAND_BBT_SCANALLPAGES)
ret = scan_block_full(mtd, bd, from, buf, readlen,
scanlen, len);
@@ -545,11 +635,12 @@ static int search_bbt(struct mtd_info *mtd, uint8_t *buf, struct nand_bbt_descr
loff_t offs = (loff_t)actblock << this->bbt_erase_shift;
/* Read first page */
- scan_read_raw(mtd, buf, offs, mtd->writesize);
+ scan_read_raw(mtd, buf, offs, mtd->writesize, td);
if (!check_pattern(buf, scanlen, mtd->writesize, td)) {
td->pages[i] = actblock << blocktopage;
if (td->options & NAND_BBT_VERSION) {
- td->version[i] = buf[mtd->writesize + td->veroffs];
+ offs = bbt_get_ver_offs(mtd, td);
+ td->version[i] = buf[offs];
}
break;
}
@@ -733,12 +824,26 @@ static int write_bbt(struct mtd_info *mtd, uint8_t *buf,
memset(&buf[offs], 0xff, (size_t) (numblocks >> sft));
ooboffs = len + (pageoffs * mtd->oobsize);
+ } else if (td->options & NAND_BBT_NO_OOB) {
+ ooboffs = 0;
+ offs = td->len;
+ /* the version byte */
+ if (td->options & NAND_BBT_VERSION)
+ offs++;
+ /* Calc length */
+ len = (size_t) (numblocks >> sft);
+ len += offs;
+ /* Make it page aligned ! */
+ len = ALIGN(len, mtd->writesize);
+ /* Preset the buffer with 0xff */
+ memset(buf, 0xff, len);
+ /* Pattern is located at the begin of first page */
+ memcpy(buf, td->pattern, td->len);
} else {
/* Calc length */
len = (size_t) (numblocks >> sft);
/* Make it page aligned ! */
- len = (len + (mtd->writesize - 1)) &
- ~(mtd->writesize - 1);
+ len = ALIGN(len, mtd->writesize);
/* Preset the buffer with 0xff */
memset(buf, 0xff, len +
(len >> this->page_shift)* mtd->oobsize);
@@ -772,7 +877,9 @@ static int write_bbt(struct mtd_info *mtd, uint8_t *buf,
if (res < 0)
goto outerr;
- res = scan_write_bbt(mtd, to, len, buf, &buf[len]);
+ res = scan_write_bbt(mtd, to, len, buf,
+ td->options & NAND_BBT_NO_OOB ? NULL :
+ &buf[len]);
if (res < 0)
goto outerr;
@@ -892,7 +999,8 @@ static int check_create(struct mtd_info *mtd, uint8_t *buf, struct nand_bbt_desc
continue;
/* Create the table in memory by scanning the chip(s) */
- create_bbt(mtd, buf, bd, chipsel);
+ if (!(this->options & NAND_CREATE_EMPTY_BBT))
+ create_bbt(mtd, buf, bd, chipsel);
td->version[i] = 1;
if (md)
@@ -983,6 +1091,49 @@ static void mark_bbt_region(struct mtd_info *mtd, struct nand_bbt_descr *td)
}
/**
+ * verify_bbt_descr - verify the bad block description
+ * @bd: the table to verify
+ *
+ * This functions performs a few sanity checks on the bad block description
+ * table.
+ */
+static void verify_bbt_descr(struct mtd_info *mtd, struct nand_bbt_descr *bd)
+{
+ struct nand_chip *this = mtd->priv;
+ u32 pattern_len = bd->len;
+ u32 bits = bd->options & NAND_BBT_NRBITS_MSK;
+ u32 table_size;
+
+ if (!bd)
+ return;
+ BUG_ON((this->options & NAND_USE_FLASH_BBT_NO_OOB) &&
+ !(this->options & NAND_USE_FLASH_BBT));
+ BUG_ON(!bits);
+
+ if (bd->options & NAND_BBT_VERSION)
+ pattern_len++;
+
+ if (bd->options & NAND_BBT_NO_OOB) {
+ BUG_ON(!(this->options & NAND_USE_FLASH_BBT));
+ BUG_ON(!(this->options & NAND_USE_FLASH_BBT_NO_OOB));
+ BUG_ON(bd->offs);
+ if (bd->options & NAND_BBT_VERSION)
+ BUG_ON(bd->veroffs != bd->len);
+ BUG_ON(bd->options & NAND_BBT_SAVECONTENT);
+ }
+
+ if (bd->options & NAND_BBT_PERCHIP)
+ table_size = this->chipsize >> this->bbt_erase_shift;
+ else
+ table_size = mtd->size >> this->bbt_erase_shift;
+ table_size >>= 3;
+ table_size *= bits;
+ if (bd->options & NAND_BBT_NO_OOB)
+ table_size += pattern_len;
+ BUG_ON(table_size > (1 << this->bbt_erase_shift));
+}
+
+/**
* nand_scan_bbt - [NAND Interface] scan, find, read and maybe create bad block table(s)
* @mtd: MTD device structure
* @bd: descriptor for the good/bad block search pattern
@@ -1023,6 +1174,8 @@ int nand_scan_bbt(struct mtd_info *mtd, struct nand_bbt_descr *bd)
}
return res;
}
+ verify_bbt_descr(mtd, td);
+ verify_bbt_descr(mtd, md);
/* Allocate a temporary buffer for one eraseblock incl. oob */
len = (1 << this->bbt_erase_shift);
@@ -1166,6 +1319,26 @@ static struct nand_bbt_descr bbt_mirror_descr = {
.pattern = mirror_pattern
};
+static struct nand_bbt_descr bbt_main_no_bbt_descr = {
+ .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
+ | NAND_BBT_2BIT | NAND_BBT_VERSION | NAND_BBT_PERCHIP
+ | NAND_BBT_NO_OOB,
+ .len = 4,
+ .veroffs = 4,
+ .maxblocks = 4,
+ .pattern = bbt_pattern
+};
+
+static struct nand_bbt_descr bbt_mirror_no_bbt_descr = {
+ .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
+ | NAND_BBT_2BIT | NAND_BBT_VERSION | NAND_BBT_PERCHIP
+ | NAND_BBT_NO_OOB,
+ .len = 4,
+ .veroffs = 4,
+ .maxblocks = 4,
+ .pattern = mirror_pattern
+};
+
#define BBT_SCAN_OPTIONS (NAND_BBT_SCANLASTPAGE | NAND_BBT_SCAN2NDPAGE | \
NAND_BBT_SCANBYTE1AND6)
/**
@@ -1236,8 +1409,13 @@ int nand_default_bbt(struct mtd_info *mtd)
if (this->options & NAND_USE_FLASH_BBT) {
/* Use the default pattern descriptors */
if (!this->bbt_td) {
- this->bbt_td = &bbt_main_descr;
- this->bbt_md = &bbt_mirror_descr;
+ if (this->options & NAND_USE_FLASH_BBT_NO_OOB) {
+ this->bbt_td = &bbt_main_no_bbt_descr;
+ this->bbt_md = &bbt_mirror_no_bbt_descr;
+ } else {
+ this->bbt_td = &bbt_main_descr;
+ this->bbt_md = &bbt_mirror_descr;
+ }
}
if (!this->badblock_pattern) {
this->badblock_pattern = (mtd->writesize > 512) ? &largepage_flashbased : &smallpage_flashbased;
diff --git a/drivers/mtd/nand/nand_ids.c b/drivers/mtd/nand/nand_ids.c
index c65f19074bc8..00cf1b0d6053 100644
--- a/drivers/mtd/nand/nand_ids.c
+++ b/drivers/mtd/nand/nand_ids.c
@@ -75,9 +75,13 @@ struct nand_flash_dev nand_flash_ids[] = {
/*512 Megabit */
{"NAND 64MiB 1,8V 8-bit", 0xA2, 0, 64, 0, LP_OPTIONS},
+ {"NAND 64MiB 1,8V 8-bit", 0xA0, 0, 64, 0, LP_OPTIONS},
{"NAND 64MiB 3,3V 8-bit", 0xF2, 0, 64, 0, LP_OPTIONS},
+ {"NAND 64MiB 3,3V 8-bit", 0xD0, 0, 64, 0, LP_OPTIONS},
{"NAND 64MiB 1,8V 16-bit", 0xB2, 0, 64, 0, LP_OPTIONS16},
+ {"NAND 64MiB 1,8V 16-bit", 0xB0, 0, 64, 0, LP_OPTIONS16},
{"NAND 64MiB 3,3V 16-bit", 0xC2, 0, 64, 0, LP_OPTIONS16},
+ {"NAND 64MiB 3,3V 16-bit", 0xC0, 0, 64, 0, LP_OPTIONS16},
/* 1 Gigabit */
{"NAND 128MiB 1,8V 8-bit", 0xA1, 0, 128, 0, LP_OPTIONS},
@@ -112,7 +116,34 @@ struct nand_flash_dev nand_flash_ids[] = {
{"NAND 2GiB 3,3V 16-bit", 0xC5, 0, 2048, 0, LP_OPTIONS16},
/* 32 Gigabit */
+ {"NAND 4GiB 1,8V 8-bit", 0xA7, 0, 4096, 0, LP_OPTIONS},
{"NAND 4GiB 3,3V 8-bit", 0xD7, 0, 4096, 0, LP_OPTIONS},
+ {"NAND 4GiB 1,8V 16-bit", 0xB7, 0, 4096, 0, LP_OPTIONS16},
+ {"NAND 4GiB 3,3V 16-bit", 0xC7, 0, 4096, 0, LP_OPTIONS16},
+
+ /* 64 Gigabit */
+ {"NAND 8GiB 1,8V 8-bit", 0xAE, 0, 8192, 0, LP_OPTIONS},
+ {"NAND 8GiB 3,3V 8-bit", 0xDE, 0, 8192, 0, LP_OPTIONS},
+ {"NAND 8GiB 1,8V 16-bit", 0xBE, 0, 8192, 0, LP_OPTIONS16},
+ {"NAND 8GiB 3,3V 16-bit", 0xCE, 0, 8192, 0, LP_OPTIONS16},
+
+ /* 128 Gigabit */
+ {"NAND 16GiB 1,8V 8-bit", 0x1A, 0, 16384, 0, LP_OPTIONS},
+ {"NAND 16GiB 3,3V 8-bit", 0x3A, 0, 16384, 0, LP_OPTIONS},
+ {"NAND 16GiB 1,8V 16-bit", 0x2A, 0, 16384, 0, LP_OPTIONS16},
+ {"NAND 16GiB 3,3V 16-bit", 0x4A, 0, 16384, 0, LP_OPTIONS16},
+
+ /* 256 Gigabit */
+ {"NAND 32GiB 1,8V 8-bit", 0x1C, 0, 32768, 0, LP_OPTIONS},
+ {"NAND 32GiB 3,3V 8-bit", 0x3C, 0, 32768, 0, LP_OPTIONS},
+ {"NAND 32GiB 1,8V 16-bit", 0x2C, 0, 32768, 0, LP_OPTIONS16},
+ {"NAND 32GiB 3,3V 16-bit", 0x4C, 0, 32768, 0, LP_OPTIONS16},
+
+ /* 512 Gigabit */
+ {"NAND 64GiB 1,8V 8-bit", 0x1E, 0, 65536, 0, LP_OPTIONS},
+ {"NAND 64GiB 3,3V 8-bit", 0x3E, 0, 65536, 0, LP_OPTIONS},
+ {"NAND 64GiB 1,8V 16-bit", 0x2E, 0, 65536, 0, LP_OPTIONS16},
+ {"NAND 64GiB 3,3V 16-bit", 0x4E, 0, 65536, 0, LP_OPTIONS16},
/*
* Renesas AND 1 Gigabit. Those chips do not support extended id and
diff --git a/drivers/mtd/nand/nandsim.c b/drivers/mtd/nand/nandsim.c
index c25648bb5793..a6a73aab1253 100644
--- a/drivers/mtd/nand/nandsim.c
+++ b/drivers/mtd/nand/nandsim.c
@@ -107,6 +107,7 @@ static char *gravepages = NULL;
static unsigned int rptwear = 0;
static unsigned int overridesize = 0;
static char *cache_file = NULL;
+static unsigned int bbt;
module_param(first_id_byte, uint, 0400);
module_param(second_id_byte, uint, 0400);
@@ -130,6 +131,7 @@ module_param(gravepages, charp, 0400);
module_param(rptwear, uint, 0400);
module_param(overridesize, uint, 0400);
module_param(cache_file, charp, 0400);
+module_param(bbt, uint, 0400);
MODULE_PARM_DESC(first_id_byte, "The first byte returned by NAND Flash 'read ID' command (manufacturer ID)");
MODULE_PARM_DESC(second_id_byte, "The second byte returned by NAND Flash 'read ID' command (chip ID)");
@@ -162,6 +164,7 @@ MODULE_PARM_DESC(overridesize, "Specifies the NAND Flash size overriding the I
"The size is specified in erase blocks and as the exponent of a power of two"
" e.g. 5 means a size of 32 erase blocks");
MODULE_PARM_DESC(cache_file, "File to use to cache nand pages instead of memory");
+MODULE_PARM_DESC(bbt, "0 OOB, 1 BBT with marker in OOB, 2 BBT with marker in data area");
/* The largest possible page size */
#define NS_LARGEST_PAGE_SIZE 4096
@@ -2264,6 +2267,18 @@ static int __init ns_init_module(void)
/* and 'badblocks' parameters to work */
chip->options |= NAND_SKIP_BBTSCAN;
+ switch (bbt) {
+ case 2:
+ chip->options |= NAND_USE_FLASH_BBT_NO_OOB;
+ case 1:
+ chip->options |= NAND_USE_FLASH_BBT;
+ case 0:
+ break;
+ default:
+ NS_ERR("bbt has to be 0..2\n");
+ retval = -EINVAL;
+ goto error;
+ }
/*
* Perform minimum nandsim structure initialization to handle
* the initial ID read command correctly
@@ -2321,10 +2336,10 @@ static int __init ns_init_module(void)
if ((retval = init_nandsim(nsmtd)) != 0)
goto err_exit;
- if ((retval = parse_badblocks(nand, nsmtd)) != 0)
+ if ((retval = nand_default_bbt(nsmtd)) != 0)
goto err_exit;
- if ((retval = nand_default_bbt(nsmtd)) != 0)
+ if ((retval = parse_badblocks(nand, nsmtd)) != 0)
goto err_exit;
/* Register NAND partitions */
diff --git a/drivers/mtd/nand/ndfc.c b/drivers/mtd/nand/ndfc.c
index 510554e6c115..c9ae0a5023b6 100644
--- a/drivers/mtd/nand/ndfc.c
+++ b/drivers/mtd/nand/ndfc.c
@@ -229,7 +229,7 @@ static int __devinit ndfc_probe(struct platform_device *ofdev,
const struct of_device_id *match)
{
struct ndfc_controller *ndfc = &ndfc_ctrl;
- const u32 *reg;
+ const __be32 *reg;
u32 ccr;
int err, len;
@@ -244,7 +244,7 @@ static int __devinit ndfc_probe(struct platform_device *ofdev,
dev_err(&ofdev->dev, "unable read reg property (%d)\n", len);
return -ENOENT;
}
- ndfc->chip_select = reg[0];
+ ndfc->chip_select = be32_to_cpu(reg[0]);
ndfc->ndfcbase = of_iomap(ofdev->dev.of_node, 0);
if (!ndfc->ndfcbase) {
@@ -257,7 +257,7 @@ static int __devinit ndfc_probe(struct platform_device *ofdev,
/* It is ok if ccr does not exist - just default to 0 */
reg = of_get_property(ofdev->dev.of_node, "ccr", NULL);
if (reg)
- ccr |= *reg;
+ ccr |= be32_to_cpup(reg);
out_be32(ndfc->ndfcbase + NDFC_CCR, ccr);
@@ -265,7 +265,7 @@ static int __devinit ndfc_probe(struct platform_device *ofdev,
reg = of_get_property(ofdev->dev.of_node, "bank-settings", NULL);
if (reg) {
int offset = NDFC_BCFG0 + (ndfc->chip_select << 2);
- out_be32(ndfc->ndfcbase + offset, *reg);
+ out_be32(ndfc->ndfcbase + offset, be32_to_cpup(reg));
}
err = ndfc_chip_init(ndfc, ofdev->dev.of_node);
diff --git a/drivers/mtd/nand/omap2.c b/drivers/mtd/nand/omap2.c
index 513e0a76a4a7..cd41c58b5bbd 100644
--- a/drivers/mtd/nand/omap2.c
+++ b/drivers/mtd/nand/omap2.c
@@ -111,11 +111,11 @@ static int use_dma = 1;
module_param(use_dma, bool, 0);
MODULE_PARM_DESC(use_dma, "enable/disable use of DMA");
#else
-const int use_dma;
+static const int use_dma;
#endif
#else
const int use_prefetch;
-const int use_dma;
+static const int use_dma;
#endif
struct omap_nand_info {
diff --git a/drivers/mtd/nand/pxa3xx_nand.c b/drivers/mtd/nand/pxa3xx_nand.c
index 4d01cda68844..17f8518cc5eb 100644
--- a/drivers/mtd/nand/pxa3xx_nand.c
+++ b/drivers/mtd/nand/pxa3xx_nand.c
@@ -117,7 +117,7 @@ struct pxa3xx_nand_info {
struct nand_chip nand_chip;
struct platform_device *pdev;
- const struct pxa3xx_nand_flash *flash_info;
+ struct pxa3xx_nand_cmdset *cmdset;
struct clk *clk;
void __iomem *mmio_base;
@@ -131,6 +131,7 @@ struct pxa3xx_nand_info {
int drcmr_cmd;
unsigned char *data_buff;
+ unsigned char *oob_buff;
dma_addr_t data_buff_phys;
size_t data_buff_size;
int data_dma_ch;
@@ -149,7 +150,8 @@ struct pxa3xx_nand_info {
int use_ecc; /* use HW ECC ? */
int use_dma; /* use DMA ? */
- size_t data_size; /* data size in FIFO */
+ unsigned int page_size; /* page size of attached chip */
+ unsigned int data_size; /* data size in FIFO */
int retcode;
struct completion cmd_complete;
@@ -158,6 +160,10 @@ struct pxa3xx_nand_info {
uint32_t ndcb1;
uint32_t ndcb2;
+ /* timing calcuted from setting */
+ uint32_t ndtr0cs0;
+ uint32_t ndtr1cs0;
+
/* calculated from pxa3xx_nand_flash data */
size_t oob_size;
size_t read_id_bytes;
@@ -174,23 +180,7 @@ MODULE_PARM_DESC(use_dma, "enable DMA for data transfering to/from NAND HW");
* Default NAND flash controller configuration setup by the
* bootloader. This configuration is used only when pdata->keep_config is set
*/
-static struct pxa3xx_nand_timing default_timing;
-static struct pxa3xx_nand_flash default_flash;
-
-static struct pxa3xx_nand_cmdset smallpage_cmdset = {
- .read1 = 0x0000,
- .read2 = 0x0050,
- .program = 0x1080,
- .read_status = 0x0070,
- .read_id = 0x0090,
- .erase = 0xD060,
- .reset = 0x00FF,
- .lock = 0x002A,
- .unlock = 0x2423,
- .lock_status = 0x007A,
-};
-
-static struct pxa3xx_nand_cmdset largepage_cmdset = {
+static struct pxa3xx_nand_cmdset default_cmdset = {
.read1 = 0x3000,
.read2 = 0x0050,
.program = 0x1080,
@@ -203,142 +193,27 @@ static struct pxa3xx_nand_cmdset largepage_cmdset = {
.lock_status = 0x007A,
};
-#ifdef CONFIG_MTD_NAND_PXA3xx_BUILTIN
-static struct pxa3xx_nand_timing samsung512MbX16_timing = {
- .tCH = 10,
- .tCS = 0,
- .tWH = 20,
- .tWP = 40,
- .tRH = 30,
- .tRP = 40,
- .tR = 11123,
- .tWHR = 110,
- .tAR = 10,
-};
-
-static struct pxa3xx_nand_flash samsung512MbX16 = {
- .timing = &samsung512MbX16_timing,
- .cmdset = &smallpage_cmdset,
- .page_per_block = 32,
- .page_size = 512,
- .flash_width = 16,
- .dfc_width = 16,
- .num_blocks = 4096,
- .chip_id = 0x46ec,
-};
-
-static struct pxa3xx_nand_flash samsung2GbX8 = {
- .timing = &samsung512MbX16_timing,
- .cmdset = &smallpage_cmdset,
- .page_per_block = 64,
- .page_size = 2048,
- .flash_width = 8,
- .dfc_width = 8,
- .num_blocks = 2048,
- .chip_id = 0xdaec,
+static struct pxa3xx_nand_timing timing[] = {
+ { 40, 80, 60, 100, 80, 100, 90000, 400, 40, },
+ { 10, 0, 20, 40, 30, 40, 11123, 110, 10, },
+ { 10, 25, 15, 25, 15, 30, 25000, 60, 10, },
+ { 10, 35, 15, 25, 15, 25, 25000, 60, 10, },
};
-static struct pxa3xx_nand_flash samsung32GbX8 = {
- .timing = &samsung512MbX16_timing,
- .cmdset = &smallpage_cmdset,
- .page_per_block = 128,
- .page_size = 4096,
- .flash_width = 8,
- .dfc_width = 8,
- .num_blocks = 8192,
- .chip_id = 0xd7ec,
+static struct pxa3xx_nand_flash builtin_flash_types[] = {
+ { 0, 0, 2048, 8, 8, 0, &default_cmdset, &timing[0] },
+ { 0x46ec, 32, 512, 16, 16, 4096, &default_cmdset, &timing[1] },
+ { 0xdaec, 64, 2048, 8, 8, 2048, &default_cmdset, &timing[1] },
+ { 0xd7ec, 128, 4096, 8, 8, 8192, &default_cmdset, &timing[1] },
+ { 0xa12c, 64, 2048, 8, 8, 1024, &default_cmdset, &timing[2] },
+ { 0xb12c, 64, 2048, 16, 16, 1024, &default_cmdset, &timing[2] },
+ { 0xdc2c, 64, 2048, 8, 8, 4096, &default_cmdset, &timing[2] },
+ { 0xcc2c, 64, 2048, 16, 16, 4096, &default_cmdset, &timing[2] },
+ { 0xba20, 64, 2048, 16, 16, 2048, &default_cmdset, &timing[3] },
};
-static struct pxa3xx_nand_timing micron_timing = {
- .tCH = 10,
- .tCS = 25,
- .tWH = 15,
- .tWP = 25,
- .tRH = 15,
- .tRP = 30,
- .tR = 25000,
- .tWHR = 60,
- .tAR = 10,
-};
-
-static struct pxa3xx_nand_flash micron1GbX8 = {
- .timing = &micron_timing,
- .cmdset = &largepage_cmdset,
- .page_per_block = 64,
- .page_size = 2048,
- .flash_width = 8,
- .dfc_width = 8,
- .num_blocks = 1024,
- .chip_id = 0xa12c,
-};
-
-static struct pxa3xx_nand_flash micron1GbX16 = {
- .timing = &micron_timing,
- .cmdset = &largepage_cmdset,
- .page_per_block = 64,
- .page_size = 2048,
- .flash_width = 16,
- .dfc_width = 16,
- .num_blocks = 1024,
- .chip_id = 0xb12c,
-};
-
-static struct pxa3xx_nand_flash micron4GbX8 = {
- .timing = &micron_timing,
- .cmdset = &largepage_cmdset,
- .page_per_block = 64,
- .page_size = 2048,
- .flash_width = 8,
- .dfc_width = 8,
- .num_blocks = 4096,
- .chip_id = 0xdc2c,
-};
-
-static struct pxa3xx_nand_flash micron4GbX16 = {
- .timing = &micron_timing,
- .cmdset = &largepage_cmdset,
- .page_per_block = 64,
- .page_size = 2048,
- .flash_width = 16,
- .dfc_width = 16,
- .num_blocks = 4096,
- .chip_id = 0xcc2c,
-};
-
-static struct pxa3xx_nand_timing stm2GbX16_timing = {
- .tCH = 10,
- .tCS = 35,
- .tWH = 15,
- .tWP = 25,
- .tRH = 15,
- .tRP = 25,
- .tR = 25000,
- .tWHR = 60,
- .tAR = 10,
-};
-
-static struct pxa3xx_nand_flash stm2GbX16 = {
- .timing = &stm2GbX16_timing,
- .cmdset = &largepage_cmdset,
- .page_per_block = 64,
- .page_size = 2048,
- .flash_width = 16,
- .dfc_width = 16,
- .num_blocks = 2048,
- .chip_id = 0xba20,
-};
-
-static struct pxa3xx_nand_flash *builtin_flash_types[] = {
- &samsung512MbX16,
- &samsung2GbX8,
- &samsung32GbX8,
- &micron1GbX8,
- &micron1GbX16,
- &micron4GbX8,
- &micron4GbX16,
- &stm2GbX16,
-};
-#endif /* CONFIG_MTD_NAND_PXA3xx_BUILTIN */
+/* Define a default flash type setting serve as flash detecting only */
+#define DEFAULT_FLASH_TYPE (&builtin_flash_types[0])
#define NDTR0_tCH(c) (min((c), 7) << 19)
#define NDTR0_tCS(c) (min((c), 7) << 16)
@@ -351,23 +226,9 @@ static struct pxa3xx_nand_flash *builtin_flash_types[] = {
#define NDTR1_tWHR(c) (min((c), 15) << 4)
#define NDTR1_tAR(c) (min((c), 15) << 0)
-#define tCH_NDTR0(r) (((r) >> 19) & 0x7)
-#define tCS_NDTR0(r) (((r) >> 16) & 0x7)
-#define tWH_NDTR0(r) (((r) >> 11) & 0x7)
-#define tWP_NDTR0(r) (((r) >> 8) & 0x7)
-#define tRH_NDTR0(r) (((r) >> 3) & 0x7)
-#define tRP_NDTR0(r) (((r) >> 0) & 0x7)
-
-#define tR_NDTR1(r) (((r) >> 16) & 0xffff)
-#define tWHR_NDTR1(r) (((r) >> 4) & 0xf)
-#define tAR_NDTR1(r) (((r) >> 0) & 0xf)
-
/* convert nano-seconds to nand flash controller clock cycles */
#define ns2cycle(ns, clk) (int)((ns) * (clk / 1000000) / 1000)
-/* convert nand flash controller clock cycles to nano-seconds */
-#define cycle2ns(c, clk) ((((c) + 1) * 1000000 + clk / 500) / (clk / 1000))
-
static void pxa3xx_nand_set_timing(struct pxa3xx_nand_info *info,
const struct pxa3xx_nand_timing *t)
{
@@ -385,6 +246,8 @@ static void pxa3xx_nand_set_timing(struct pxa3xx_nand_info *info,
NDTR1_tWHR(ns2cycle(t->tWHR, nand_clk)) |
NDTR1_tAR(ns2cycle(t->tAR, nand_clk));
+ info->ndtr0cs0 = ndtr0;
+ info->ndtr1cs0 = ndtr1;
nand_writel(info, NDTR0CS0, ndtr0);
nand_writel(info, NDTR1CS0, ndtr1);
}
@@ -408,23 +271,31 @@ static int wait_for_event(struct pxa3xx_nand_info *info, uint32_t event)
return -ETIMEDOUT;
}
-static int prepare_read_prog_cmd(struct pxa3xx_nand_info *info,
- uint16_t cmd, int column, int page_addr)
+static void pxa3xx_set_datasize(struct pxa3xx_nand_info *info)
{
- const struct pxa3xx_nand_flash *f = info->flash_info;
- const struct pxa3xx_nand_cmdset *cmdset = f->cmdset;
+ int oob_enable = info->reg_ndcr & NDCR_SPARE_EN;
- /* calculate data size */
- switch (f->page_size) {
+ info->data_size = info->page_size;
+ if (!oob_enable) {
+ info->oob_size = 0;
+ return;
+ }
+
+ switch (info->page_size) {
case 2048:
- info->data_size = (info->use_ecc) ? 2088 : 2112;
+ info->oob_size = (info->use_ecc) ? 40 : 64;
break;
case 512:
- info->data_size = (info->use_ecc) ? 520 : 528;
+ info->oob_size = (info->use_ecc) ? 8 : 16;
break;
- default:
- return -EINVAL;
}
+}
+
+static int prepare_read_prog_cmd(struct pxa3xx_nand_info *info,
+ uint16_t cmd, int column, int page_addr)
+{
+ const struct pxa3xx_nand_cmdset *cmdset = info->cmdset;
+ pxa3xx_set_datasize(info);
/* generate values for NDCBx registers */
info->ndcb0 = cmd | ((cmd & 0xff00) ? NDCB0_DBC : 0);
@@ -463,12 +334,13 @@ static int prepare_erase_cmd(struct pxa3xx_nand_info *info,
static int prepare_other_cmd(struct pxa3xx_nand_info *info, uint16_t cmd)
{
- const struct pxa3xx_nand_cmdset *cmdset = info->flash_info->cmdset;
+ const struct pxa3xx_nand_cmdset *cmdset = info->cmdset;
info->ndcb0 = cmd | ((cmd & 0xff00) ? NDCB0_DBC : 0);
info->ndcb1 = 0;
info->ndcb2 = 0;
+ info->oob_size = 0;
if (cmd == cmdset->read_id) {
info->ndcb0 |= NDCB0_CMD_TYPE(3);
info->data_size = 8;
@@ -537,6 +409,9 @@ static int handle_data_pio(struct pxa3xx_nand_info *info)
case STATE_PIO_WRITING:
__raw_writesl(info->mmio_base + NDDB, info->data_buff,
DIV_ROUND_UP(info->data_size, 4));
+ if (info->oob_size > 0)
+ __raw_writesl(info->mmio_base + NDDB, info->oob_buff,
+ DIV_ROUND_UP(info->oob_size, 4));
enable_int(info, NDSR_CS0_BBD | NDSR_CS0_CMDD);
@@ -549,6 +424,9 @@ static int handle_data_pio(struct pxa3xx_nand_info *info)
case STATE_PIO_READING:
__raw_readsl(info->mmio_base + NDDB, info->data_buff,
DIV_ROUND_UP(info->data_size, 4));
+ if (info->oob_size > 0)
+ __raw_readsl(info->mmio_base + NDDB, info->oob_buff,
+ DIV_ROUND_UP(info->oob_size, 4));
break;
default:
printk(KERN_ERR "%s: invalid state %d\n", __func__,
@@ -563,7 +441,7 @@ static int handle_data_pio(struct pxa3xx_nand_info *info)
static void start_data_dma(struct pxa3xx_nand_info *info, int dir_out)
{
struct pxa_dma_desc *desc = info->data_desc;
- int dma_len = ALIGN(info->data_size, 32);
+ int dma_len = ALIGN(info->data_size + info->oob_size, 32);
desc->ddadr = DDADR_STOP;
desc->dcmd = DCMD_ENDIRQEN | DCMD_WIDTH4 | DCMD_BURST32 | dma_len;
@@ -700,8 +578,7 @@ static void pxa3xx_nand_cmdfunc(struct mtd_info *mtd, unsigned command,
int column, int page_addr)
{
struct pxa3xx_nand_info *info = mtd->priv;
- const struct pxa3xx_nand_flash *flash_info = info->flash_info;
- const struct pxa3xx_nand_cmdset *cmdset = flash_info->cmdset;
+ const struct pxa3xx_nand_cmdset *cmdset = info->cmdset;
int ret;
info->use_dma = (use_dma) ? 1 : 0;
@@ -925,8 +802,7 @@ static int pxa3xx_nand_ecc_correct(struct mtd_info *mtd,
static int __readid(struct pxa3xx_nand_info *info, uint32_t *id)
{
- const struct pxa3xx_nand_flash *f = info->flash_info;
- const struct pxa3xx_nand_cmdset *cmdset = f->cmdset;
+ const struct pxa3xx_nand_cmdset *cmdset = info->cmdset;
uint32_t ndcr;
uint8_t id_buff[8];
@@ -968,7 +844,9 @@ static int pxa3xx_nand_config_flash(struct pxa3xx_nand_info *info,
return -EINVAL;
/* calculate flash information */
- info->oob_size = (f->page_size == 2048) ? 64 : 16;
+ info->cmdset = f->cmdset;
+ info->page_size = f->page_size;
+ info->oob_buff = info->data_buff + f->page_size;
info->read_id_bytes = (f->page_size == 2048) ? 4 : 2;
/* calculate addressing information */
@@ -992,49 +870,20 @@ static int pxa3xx_nand_config_flash(struct pxa3xx_nand_info *info,
info->reg_ndcr = ndcr;
pxa3xx_nand_set_timing(info, f->timing);
- info->flash_info = f;
return 0;
}
-static void pxa3xx_nand_detect_timing(struct pxa3xx_nand_info *info,
- struct pxa3xx_nand_timing *t)
-{
- unsigned long nand_clk = clk_get_rate(info->clk);
- uint32_t ndtr0 = nand_readl(info, NDTR0CS0);
- uint32_t ndtr1 = nand_readl(info, NDTR1CS0);
-
- t->tCH = cycle2ns(tCH_NDTR0(ndtr0), nand_clk);
- t->tCS = cycle2ns(tCS_NDTR0(ndtr0), nand_clk);
- t->tWH = cycle2ns(tWH_NDTR0(ndtr0), nand_clk);
- t->tWP = cycle2ns(tWP_NDTR0(ndtr0), nand_clk);
- t->tRH = cycle2ns(tRH_NDTR0(ndtr0), nand_clk);
- t->tRP = cycle2ns(tRP_NDTR0(ndtr0), nand_clk);
-
- t->tR = cycle2ns(tR_NDTR1(ndtr1), nand_clk);
- t->tWHR = cycle2ns(tWHR_NDTR1(ndtr1), nand_clk);
- t->tAR = cycle2ns(tAR_NDTR1(ndtr1), nand_clk);
-}
-
static int pxa3xx_nand_detect_config(struct pxa3xx_nand_info *info)
{
uint32_t ndcr = nand_readl(info, NDCR);
struct nand_flash_dev *type = NULL;
- uint32_t id = -1;
+ uint32_t id = -1, page_per_block, num_blocks;
int i;
- default_flash.page_per_block = ndcr & NDCR_PG_PER_BLK ? 64 : 32;
- default_flash.page_size = ndcr & NDCR_PAGE_SZ ? 2048 : 512;
- default_flash.flash_width = ndcr & NDCR_DWIDTH_M ? 16 : 8;
- default_flash.dfc_width = ndcr & NDCR_DWIDTH_C ? 16 : 8;
-
- if (default_flash.page_size == 2048)
- default_flash.cmdset = &largepage_cmdset;
- else
- default_flash.cmdset = &smallpage_cmdset;
-
+ page_per_block = ndcr & NDCR_PG_PER_BLK ? 64 : 32;
+ info->page_size = ndcr & NDCR_PAGE_SZ ? 2048 : 512;
/* set info fields needed to __readid */
- info->flash_info = &default_flash;
- info->read_id_bytes = (default_flash.page_size == 2048) ? 4 : 2;
+ info->read_id_bytes = (info->page_size == 2048) ? 4 : 2;
info->reg_ndcr = ndcr;
if (__readid(info, &id))
@@ -1053,21 +902,20 @@ static int pxa3xx_nand_detect_config(struct pxa3xx_nand_info *info)
return -ENODEV;
/* fill the missing flash information */
- i = __ffs(default_flash.page_per_block * default_flash.page_size);
- default_flash.num_blocks = type->chipsize << (20 - i);
-
- info->oob_size = (default_flash.page_size == 2048) ? 64 : 16;
+ i = __ffs(page_per_block * info->page_size);
+ num_blocks = type->chipsize << (20 - i);
/* calculate addressing information */
- info->col_addr_cycles = (default_flash.page_size == 2048) ? 2 : 1;
+ info->col_addr_cycles = (info->page_size == 2048) ? 2 : 1;
- if (default_flash.num_blocks * default_flash.page_per_block > 65536)
+ if (num_blocks * page_per_block > 65536)
info->row_addr_cycles = 3;
else
info->row_addr_cycles = 2;
- pxa3xx_nand_detect_timing(info, &default_timing);
- default_flash.timing = &default_timing;
+ info->ndtr0cs0 = nand_readl(info, NDTR0CS0);
+ info->ndtr1cs0 = nand_readl(info, NDTR1CS0);
+ info->cmdset = &default_cmdset;
return 0;
}
@@ -1083,38 +931,29 @@ static int pxa3xx_nand_detect_flash(struct pxa3xx_nand_info *info,
if (pxa3xx_nand_detect_config(info) == 0)
return 0;
- for (i = 0; i<pdata->num_flash; ++i) {
- f = pdata->flash + i;
-
- if (pxa3xx_nand_config_flash(info, f))
- continue;
-
- if (__readid(info, &id))
- continue;
-
- if (id == f->chip_id)
- return 0;
- }
-
-#ifdef CONFIG_MTD_NAND_PXA3xx_BUILTIN
- for (i = 0; i < ARRAY_SIZE(builtin_flash_types); i++) {
-
- f = builtin_flash_types[i];
-
- if (pxa3xx_nand_config_flash(info, f))
- continue;
-
- if (__readid(info, &id))
- continue;
-
- if (id == f->chip_id)
+ /* we use default timing to detect id */
+ f = DEFAULT_FLASH_TYPE;
+ pxa3xx_nand_config_flash(info, f);
+ if (__readid(info, &id))
+ goto fail_detect;
+
+ for (i=0; i<ARRAY_SIZE(builtin_flash_types) + pdata->num_flash - 1; i++) {
+ /* we first choose the flash definition from platfrom */
+ if (i < pdata->num_flash)
+ f = pdata->flash + i;
+ else
+ f = &builtin_flash_types[i - pdata->num_flash + 1];
+ if (f->chip_id == id) {
+ dev_info(&info->pdev->dev, "detect chip id: 0x%x\n", id);
+ pxa3xx_nand_config_flash(info, f);
return 0;
+ }
}
-#endif
dev_warn(&info->pdev->dev,
"failed to detect configured nand flash; found %04x instead of\n",
id);
+fail_detect:
return -ENODEV;
}
@@ -1177,10 +1016,9 @@ static struct nand_ecclayout hw_largepage_ecclayout = {
static void pxa3xx_nand_init_mtd(struct mtd_info *mtd,
struct pxa3xx_nand_info *info)
{
- const struct pxa3xx_nand_flash *f = info->flash_info;
struct nand_chip *this = &info->nand_chip;
- this->options = (f->flash_width == 16) ? NAND_BUSWIDTH_16: 0;
+ this->options = (info->reg_ndcr & NDCR_DWIDTH_C) ? NAND_BUSWIDTH_16: 0;
this->waitfunc = pxa3xx_nand_waitfunc;
this->select_chip = pxa3xx_nand_select_chip;
@@ -1196,9 +1034,9 @@ static void pxa3xx_nand_init_mtd(struct mtd_info *mtd,
this->ecc.hwctl = pxa3xx_nand_ecc_hwctl;
this->ecc.calculate = pxa3xx_nand_ecc_calculate;
this->ecc.correct = pxa3xx_nand_ecc_correct;
- this->ecc.size = f->page_size;
+ this->ecc.size = info->page_size;
- if (f->page_size == 2048)
+ if (info->page_size == 2048)
this->ecc.layout = &hw_largepage_ecclayout;
else
this->ecc.layout = &hw_smallpage_ecclayout;
@@ -1411,9 +1249,11 @@ static int pxa3xx_nand_resume(struct platform_device *pdev)
struct mtd_info *mtd = (struct mtd_info *)platform_get_drvdata(pdev);
struct pxa3xx_nand_info *info = mtd->priv;
+ nand_writel(info, NDTR0CS0, info->ndtr0cs0);
+ nand_writel(info, NDTR1CS0, info->ndtr1cs0);
clk_enable(info->clk);
- return pxa3xx_nand_config_flash(info, info->flash_info);
+ return 0;
}
#else
#define pxa3xx_nand_suspend NULL
diff --git a/drivers/mtd/nand/r852.c b/drivers/mtd/nand/r852.c
index 5169ca6a66bc..d9d7efbc77cc 100644
--- a/drivers/mtd/nand/r852.c
+++ b/drivers/mtd/nand/r852.c
@@ -757,11 +757,6 @@ static irqreturn_t r852_irq(int irq, void *data)
spin_lock_irqsave(&dev->irqlock, flags);
- /* We can recieve shared interrupt while pci is suspended
- in that case reads will return 0xFFFFFFFF.... */
- if (dev->insuspend)
- goto out;
-
/* handle card detection interrupts first */
card_status = r852_read_reg(dev, R852_CARD_IRQ_STA);
r852_write_reg(dev, R852_CARD_IRQ_STA, card_status);
@@ -1035,7 +1030,6 @@ void r852_shutdown(struct pci_dev *pci_dev)
int r852_suspend(struct device *device)
{
struct r852_device *dev = pci_get_drvdata(to_pci_dev(device));
- unsigned long flags;
if (dev->ctlreg & R852_CTL_CARDENABLE)
return -EBUSY;
@@ -1047,43 +1041,22 @@ int r852_suspend(struct device *device)
r852_disable_irqs(dev);
r852_engine_disable(dev);
- spin_lock_irqsave(&dev->irqlock, flags);
- dev->insuspend = 1;
- spin_unlock_irqrestore(&dev->irqlock, flags);
-
- /* At that point, even if interrupt handler is running, it will quit */
- /* So wait for this to happen explictly */
- synchronize_irq(dev->irq);
-
/* If card was pulled off just during the suspend, which is very
unlikely, we will remove it on resume, it too late now
anyway... */
dev->card_unstable = 0;
-
- pci_save_state(to_pci_dev(device));
- return pci_prepare_to_sleep(to_pci_dev(device));
+ return 0;
}
int r852_resume(struct device *device)
{
struct r852_device *dev = pci_get_drvdata(to_pci_dev(device));
- unsigned long flags;
-
- /* Turn on the hardware */
- pci_back_from_sleep(to_pci_dev(device));
- pci_restore_state(to_pci_dev(device));
r852_disable_irqs(dev);
r852_card_update_present(dev);
r852_engine_disable(dev);
- /* Now its safe for IRQ to run */
- spin_lock_irqsave(&dev->irqlock, flags);
- dev->insuspend = 0;
- spin_unlock_irqrestore(&dev->irqlock, flags);
-
-
/* If card status changed, just do the work */
if (dev->card_detected != dev->card_registred) {
dbg("card was %s during low power state",
@@ -1121,7 +1094,6 @@ MODULE_DEVICE_TABLE(pci, r852_pci_id_tbl);
SIMPLE_DEV_PM_OPS(r852_pm_ops, r852_suspend, r852_resume);
-
static struct pci_driver r852_pci_driver = {
.name = DRV_NAME,
.id_table = r852_pci_id_tbl,
diff --git a/drivers/mtd/nand/r852.h b/drivers/mtd/nand/r852.h
index 8096cc280c73..e6a21d9d22c6 100644
--- a/drivers/mtd/nand/r852.h
+++ b/drivers/mtd/nand/r852.h
@@ -140,8 +140,6 @@ struct r852_device {
/* interrupt handling */
spinlock_t irqlock; /* IRQ protecting lock */
int irq; /* irq num */
- int insuspend; /* device is suspended */
-
/* misc */
void *tmp_buffer; /* temporary buffer */
uint8_t ctlreg; /* cached contents of control reg */
diff --git a/drivers/mtd/ofpart.c b/drivers/mtd/ofpart.c
index 8bf7dc6d1ce6..a996718fa6b0 100644
--- a/drivers/mtd/ofpart.c
+++ b/drivers/mtd/ofpart.c
@@ -44,7 +44,7 @@ int __devinit of_mtd_parse_partitions(struct device *dev,
pp = NULL;
i = 0;
while ((pp = of_get_next_child(node, pp))) {
- const u32 *reg;
+ const __be32 *reg;
int len;
reg = of_get_property(pp, "reg", &len);
@@ -53,8 +53,8 @@ int __devinit of_mtd_parse_partitions(struct device *dev,
continue;
}
- (*pparts)[i].offset = reg[0];
- (*pparts)[i].size = reg[1];
+ (*pparts)[i].offset = be32_to_cpu(reg[0]);
+ (*pparts)[i].size = be32_to_cpu(reg[1]);
partname = of_get_property(pp, "label", &len);
if (!partname)
diff --git a/drivers/mtd/onenand/Kconfig b/drivers/mtd/onenand/Kconfig
index 3f32289fdbb5..4dbd0f58eebf 100644
--- a/drivers/mtd/onenand/Kconfig
+++ b/drivers/mtd/onenand/Kconfig
@@ -32,10 +32,11 @@ config MTD_ONENAND_OMAP2
config MTD_ONENAND_SAMSUNG
tristate "OneNAND on Samsung SOC controller support"
- depends on ARCH_S3C64XX || ARCH_S5PC100 || ARCH_S5PV210
+ depends on ARCH_S3C64XX || ARCH_S5PC100 || ARCH_S5PV210 || ARCH_S5PV310
help
- Support for a OneNAND flash device connected to an Samsung SOC
- S3C64XX/S5PC1XX controller.
+ Support for a OneNAND flash device connected to an Samsung SOC.
+ S3C64XX/S5PC100 use command mapping method.
+ S5PC110/S5PC210 use generic OneNAND method.
config MTD_ONENAND_OTP
bool "OneNAND OTP Support"
diff --git a/drivers/mtd/onenand/onenand_base.c b/drivers/mtd/onenand/onenand_base.c
index a2bb520286f8..6b3a875647c9 100644
--- a/drivers/mtd/onenand/onenand_base.c
+++ b/drivers/mtd/onenand/onenand_base.c
@@ -3365,18 +3365,19 @@ static int onenand_lock_user_prot_reg(struct mtd_info *mtd, loff_t from,
static void onenand_check_features(struct mtd_info *mtd)
{
struct onenand_chip *this = mtd->priv;
- unsigned int density, process;
+ unsigned int density, process, numbufs;
/* Lock scheme depends on density and process */
density = onenand_get_density(this->device_id);
process = this->version_id >> ONENAND_VERSION_PROCESS_SHIFT;
+ numbufs = this->read_word(this->base + ONENAND_REG_NUM_BUFFERS) >> 8;
/* Lock scheme */
switch (density) {
case ONENAND_DEVICE_DENSITY_4Gb:
if (ONENAND_IS_DDP(this))
this->options |= ONENAND_HAS_2PLANE;
- else
+ else if (numbufs == 1)
this->options |= ONENAND_HAS_4KB_PAGE;
case ONENAND_DEVICE_DENSITY_2Gb:
@@ -4027,7 +4028,7 @@ int onenand_scan(struct mtd_info *mtd, int maxchips)
mtd->ecclayout = this->ecclayout;
/* Fill in remaining MTD driver data */
- mtd->type = MTD_NANDFLASH;
+ mtd->type = ONENAND_IS_MLC(this) ? MTD_MLCNANDFLASH : MTD_NANDFLASH;
mtd->flags = MTD_CAP_NANDFLASH;
mtd->erase = onenand_erase;
mtd->point = NULL;
diff --git a/drivers/mtd/onenand/samsung.c b/drivers/mtd/onenand/samsung.c
index a460f1b748c2..0de7a05e6de0 100644
--- a/drivers/mtd/onenand/samsung.c
+++ b/drivers/mtd/onenand/samsung.c
@@ -22,6 +22,7 @@
#include <linux/mtd/onenand.h>
#include <linux/mtd/partitions.h>
#include <linux/dma-mapping.h>
+#include <linux/interrupt.h>
#include <asm/mach/flash.h>
#include <plat/regs-onenand.h>
@@ -58,7 +59,7 @@ enum soc_type {
#define MAP_11 (0x3)
#define S3C64XX_CMD_MAP_SHIFT 24
-#define S5PC1XX_CMD_MAP_SHIFT 26
+#define S5PC100_CMD_MAP_SHIFT 26
#define S3C6400_FBA_SHIFT 10
#define S3C6400_FPA_SHIFT 4
@@ -81,6 +82,17 @@ enum soc_type {
#define S5PC110_DMA_TRANS_CMD 0x418
#define S5PC110_DMA_TRANS_STATUS 0x41C
#define S5PC110_DMA_TRANS_DIR 0x420
+#define S5PC110_INTC_DMA_CLR 0x1004
+#define S5PC110_INTC_ONENAND_CLR 0x1008
+#define S5PC110_INTC_DMA_MASK 0x1024
+#define S5PC110_INTC_ONENAND_MASK 0x1028
+#define S5PC110_INTC_DMA_PEND 0x1044
+#define S5PC110_INTC_ONENAND_PEND 0x1048
+#define S5PC110_INTC_DMA_STATUS 0x1064
+#define S5PC110_INTC_ONENAND_STATUS 0x1068
+
+#define S5PC110_INTC_DMA_TD (1 << 24)
+#define S5PC110_INTC_DMA_TE (1 << 16)
#define S5PC110_DMA_CFG_SINGLE (0x0 << 16)
#define S5PC110_DMA_CFG_4BURST (0x2 << 16)
@@ -134,6 +146,7 @@ struct s3c_onenand {
void __iomem *dma_addr;
struct resource *dma_res;
unsigned long phys_base;
+ struct completion complete;
#ifdef CONFIG_MTD_PARTITIONS
struct mtd_partition *parts;
#endif
@@ -191,7 +204,7 @@ static unsigned int s3c64xx_cmd_map(unsigned type, unsigned val)
static unsigned int s5pc1xx_cmd_map(unsigned type, unsigned val)
{
- return (type << S5PC1XX_CMD_MAP_SHIFT) | val;
+ return (type << S5PC100_CMD_MAP_SHIFT) | val;
}
static unsigned int s3c6400_mem_addr(int fba, int fpa, int fsa)
@@ -531,10 +544,13 @@ static int onenand_write_bufferram(struct mtd_info *mtd, int area,
return 0;
}
-static int s5pc110_dma_ops(void *dst, void *src, size_t count, int direction)
+static int (*s5pc110_dma_ops)(void *dst, void *src, size_t count, int direction);
+
+static int s5pc110_dma_poll(void *dst, void *src, size_t count, int direction)
{
void __iomem *base = onenand->dma_addr;
int status;
+ unsigned long timeout;
writel(src, base + S5PC110_DMA_SRC_ADDR);
writel(dst, base + S5PC110_DMA_DST_ADDR);
@@ -552,6 +568,13 @@ static int s5pc110_dma_ops(void *dst, void *src, size_t count, int direction)
writel(S5PC110_DMA_TRANS_CMD_TR, base + S5PC110_DMA_TRANS_CMD);
+ /*
+ * There's no exact timeout values at Spec.
+ * In real case it takes under 1 msec.
+ * So 20 msecs are enough.
+ */
+ timeout = jiffies + msecs_to_jiffies(20);
+
do {
status = readl(base + S5PC110_DMA_TRANS_STATUS);
if (status & S5PC110_DMA_TRANS_STATUS_TE) {
@@ -559,13 +582,68 @@ static int s5pc110_dma_ops(void *dst, void *src, size_t count, int direction)
base + S5PC110_DMA_TRANS_CMD);
return -EIO;
}
- } while (!(status & S5PC110_DMA_TRANS_STATUS_TD));
+ } while (!(status & S5PC110_DMA_TRANS_STATUS_TD) &&
+ time_before(jiffies, timeout));
writel(S5PC110_DMA_TRANS_CMD_TDC, base + S5PC110_DMA_TRANS_CMD);
return 0;
}
+static irqreturn_t s5pc110_onenand_irq(int irq, void *data)
+{
+ void __iomem *base = onenand->dma_addr;
+ int status, cmd = 0;
+
+ status = readl(base + S5PC110_INTC_DMA_STATUS);
+
+ if (likely(status & S5PC110_INTC_DMA_TD))
+ cmd = S5PC110_DMA_TRANS_CMD_TDC;
+
+ if (unlikely(status & S5PC110_INTC_DMA_TE))
+ cmd = S5PC110_DMA_TRANS_CMD_TEC;
+
+ writel(cmd, base + S5PC110_DMA_TRANS_CMD);
+ writel(status, base + S5PC110_INTC_DMA_CLR);
+
+ if (!onenand->complete.done)
+ complete(&onenand->complete);
+
+ return IRQ_HANDLED;
+}
+
+static int s5pc110_dma_irq(void *dst, void *src, size_t count, int direction)
+{
+ void __iomem *base = onenand->dma_addr;
+ int status;
+
+ status = readl(base + S5PC110_INTC_DMA_MASK);
+ if (status) {
+ status &= ~(S5PC110_INTC_DMA_TD | S5PC110_INTC_DMA_TE);
+ writel(status, base + S5PC110_INTC_DMA_MASK);
+ }
+
+ writel(src, base + S5PC110_DMA_SRC_ADDR);
+ writel(dst, base + S5PC110_DMA_DST_ADDR);
+
+ if (direction == S5PC110_DMA_DIR_READ) {
+ writel(S5PC110_DMA_SRC_CFG_READ, base + S5PC110_DMA_SRC_CFG);
+ writel(S5PC110_DMA_DST_CFG_READ, base + S5PC110_DMA_DST_CFG);
+ } else {
+ writel(S5PC110_DMA_SRC_CFG_WRITE, base + S5PC110_DMA_SRC_CFG);
+ writel(S5PC110_DMA_DST_CFG_WRITE, base + S5PC110_DMA_DST_CFG);
+ }
+
+ writel(count, base + S5PC110_DMA_TRANS_SIZE);
+ writel(direction, base + S5PC110_DMA_TRANS_DIR);
+
+ writel(S5PC110_DMA_TRANS_CMD_TR, base + S5PC110_DMA_TRANS_CMD);
+
+ wait_for_completion_timeout(&onenand->complete, msecs_to_jiffies(20));
+
+ return 0;
+}
+
static int s5pc110_read_bufferram(struct mtd_info *mtd, int area,
unsigned char *buffer, int offset, size_t count)
{
@@ -573,7 +651,8 @@ static int s5pc110_read_bufferram(struct mtd_info *mtd, int area,
void __iomem *p;
void *buf = (void *) buffer;
dma_addr_t dma_src, dma_dst;
- int err;
+ int err, page_dma = 0;
+ struct device *dev = &onenand->pdev->dev;
p = this->base + area;
if (ONENAND_CURRENT_BUFFERRAM(this)) {
@@ -597,21 +676,27 @@ static int s5pc110_read_bufferram(struct mtd_info *mtd, int area,
page = vmalloc_to_page(buf);
if (!page)
goto normal;
- buf = page_address(page) + ((size_t) buf & ~PAGE_MASK);
- }
- /* DMA routine */
- dma_src = onenand->phys_base + (p - this->base);
- dma_dst = dma_map_single(&onenand->pdev->dev,
- buf, count, DMA_FROM_DEVICE);
- if (dma_mapping_error(&onenand->pdev->dev, dma_dst)) {
- dev_err(&onenand->pdev->dev,
- "Couldn't map a %d byte buffer for DMA\n", count);
+ page_dma = 1;
+ /* DMA routine */
+ dma_src = onenand->phys_base + (p - this->base);
+ dma_dst = dma_map_page(dev, page, 0, count, DMA_FROM_DEVICE);
+ } else {
+ /* DMA routine */
+ dma_src = onenand->phys_base + (p - this->base);
+ dma_dst = dma_map_single(dev, buf, count, DMA_FROM_DEVICE);
+ }
+ if (dma_mapping_error(dev, dma_dst)) {
+ dev_err(dev, "Couldn't map a %d byte buffer for DMA\n", count);
goto normal;
}
err = s5pc110_dma_ops((void *) dma_dst, (void *) dma_src,
count, S5PC110_DMA_DIR_READ);
- dma_unmap_single(&onenand->pdev->dev, dma_dst, count, DMA_FROM_DEVICE);
+
+ if (page_dma)
+ dma_unmap_page(dev, dma_dst, count, DMA_FROM_DEVICE);
+ else
+ dma_unmap_single(dev, dma_dst, count, DMA_FROM_DEVICE);
if (!err)
return 0;
@@ -759,7 +844,6 @@ static void s3c_onenand_setup(struct mtd_info *mtd)
onenand->cmd_map = s5pc1xx_cmd_map;
} else if (onenand->type == TYPE_S5PC110) {
/* Use generic onenand functions */
- onenand->cmd_map = s5pc1xx_cmd_map;
this->read_bufferram = s5pc110_read_bufferram;
this->chip_probe = s5pc110_chip_probe;
return;
@@ -904,6 +988,20 @@ static int s3c_onenand_probe(struct platform_device *pdev)
}
onenand->phys_base = onenand->base_res->start;
+
+ s5pc110_dma_ops = s5pc110_dma_poll;
+ /* Interrupt support */
+ r = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+ if (r) {
+ init_completion(&onenand->complete);
+ s5pc110_dma_ops = s5pc110_dma_irq;
+ err = request_irq(r->start, s5pc110_onenand_irq,
+ IRQF_SHARED, "onenand", &onenand);
+ if (err) {
+ dev_err(&pdev->dev, "failed to get irq\n");
+ goto scan_failed;
+ }
+ }
}
if (onenand_scan(mtd, 1)) {
@@ -1000,7 +1098,7 @@ static int s3c_pm_ops_suspend(struct device *dev)
struct onenand_chip *this = mtd->priv;
this->wait(mtd, FL_PM_SUSPENDED);
- return mtd->suspend(mtd);
+ return 0;
}
static int s3c_pm_ops_resume(struct device *dev)
@@ -1009,7 +1107,6 @@ static int s3c_pm_ops_resume(struct device *dev)
struct mtd_info *mtd = platform_get_drvdata(pdev);
struct onenand_chip *this = mtd->priv;
- mtd->resume(mtd);
this->unlock_all(mtd);
return 0;
}
diff --git a/drivers/mtd/sm_ftl.h b/drivers/mtd/sm_ftl.h
index e30e48e7f63d..43bb7300785b 100644
--- a/drivers/mtd/sm_ftl.h
+++ b/drivers/mtd/sm_ftl.h
@@ -20,7 +20,7 @@
struct ftl_zone {
- int initialized;
+ bool initialized;
int16_t *lba_to_phys_table; /* LBA to physical table */
struct kfifo free_sectors; /* queue of free sectors */
};
@@ -37,8 +37,8 @@ struct sm_ftl {
int zone_count; /* number of zones */
int max_lba; /* maximum lba in a zone */
int smallpagenand; /* 256 bytes/page nand */
- int readonly; /* is FS readonly */
- int unstable;
+ bool readonly; /* is FS readonly */
+ bool unstable;
int cis_block; /* CIS block location */
int cis_boffset; /* CIS offset in the block */
int cis_page_offset; /* CIS offset in the page */
@@ -49,7 +49,7 @@ struct sm_ftl {
int cache_zone; /* zone of cached block */
unsigned char *cache_data; /* cached block data */
long unsigned int cache_data_invalid_bitmap;
- int cache_clean;
+ bool cache_clean;
struct work_struct flush_work;
struct timer_list timer;
diff --git a/drivers/mtd/ubi/io.c b/drivers/mtd/ubi/io.c
index c2960ac9f39c..811775aa8ee8 100644
--- a/drivers/mtd/ubi/io.c
+++ b/drivers/mtd/ubi/io.c
@@ -482,10 +482,17 @@ static int nor_erase_prepare(struct ubi_device *ubi, int pnum)
uint32_t data = 0;
struct ubi_vid_hdr vid_hdr;
- addr = (loff_t)pnum * ubi->peb_size + ubi->vid_hdr_aloffset;
+ /*
+ * It is important to first invalidate the EC header, and then the VID
+ * header. Otherwise a power cut may lead to valid EC header and
+ * invalid VID header, in which case UBI will treat this PEB as
+ * corrupted and will try to preserve it, and print scary warnings (see
+ * the header comment in scan.c for more information).
+ */
+ addr = (loff_t)pnum * ubi->peb_size;
err = ubi->mtd->write(ubi->mtd, addr, 4, &written, (void *)&data);
if (!err) {
- addr -= ubi->vid_hdr_aloffset;
+ addr += ubi->vid_hdr_aloffset;
err = ubi->mtd->write(ubi->mtd, addr, 4, &written,
(void *)&data);
if (!err)
@@ -494,18 +501,24 @@ static int nor_erase_prepare(struct ubi_device *ubi, int pnum)
/*
* We failed to write to the media. This was observed with Spansion
- * S29GL512N NOR flash. Most probably the eraseblock erasure was
- * interrupted at a very inappropriate moment, so it became unwritable.
- * In this case we probably anyway have garbage in this PEB.
+ * S29GL512N NOR flash. Most probably the previously eraseblock erasure
+ * was interrupted at a very inappropriate moment, so it became
+ * unwritable. In this case we probably anyway have garbage in this
+ * PEB.
*/
err1 = ubi_io_read_vid_hdr(ubi, pnum, &vid_hdr, 0);
- if (err1 == UBI_IO_BAD_HDR_EBADMSG || err1 == UBI_IO_BAD_HDR)
- /*
- * The VID header is corrupted, so we can safely erase this
- * PEB and not afraid that it will be treated as a valid PEB in
- * case of an unclean reboot.
- */
- return 0;
+ if (err1 == UBI_IO_BAD_HDR_EBADMSG || err1 == UBI_IO_BAD_HDR) {
+ struct ubi_ec_hdr ec_hdr;
+
+ err1 = ubi_io_read_ec_hdr(ubi, pnum, &ec_hdr, 0);
+ if (err1 == UBI_IO_BAD_HDR_EBADMSG || err1 == UBI_IO_BAD_HDR)
+ /*
+ * Both VID and EC headers are corrupted, so we can
+ * safely erase this PEB and not afraid that it will be
+ * treated as a valid PEB in case of an unclean reboot.
+ */
+ return 0;
+ }
/*
* The PEB contains a valid VID header, but we cannot invalidate it.
diff --git a/drivers/mtd/ubi/scan.c b/drivers/mtd/ubi/scan.c
index 3c631863bf40..79ca304fc4db 100644
--- a/drivers/mtd/ubi/scan.c
+++ b/drivers/mtd/ubi/scan.c
@@ -787,16 +787,15 @@ static int check_corruption(struct ubi_device *ubi, struct ubi_vid_hdr *vid_hdr,
* erased, so it became unstable and corrupted, and should be
* erased.
*/
- return 0;
+ err = 0;
+ goto out_unlock;
}
if (err)
- return err;
+ goto out_unlock;
- if (ubi_check_pattern(ubi->peb_buf1, 0xFF, ubi->leb_size)) {
- mutex_unlock(&ubi->buf_mutex);
- return 0;
- }
+ if (ubi_check_pattern(ubi->peb_buf1, 0xFF, ubi->leb_size))
+ goto out_unlock;
ubi_err("PEB %d contains corrupted VID header, and the data does not "
"contain all 0xFF, this may be a non-UBI PEB or a severe VID "
@@ -806,8 +805,11 @@ static int check_corruption(struct ubi_device *ubi, struct ubi_vid_hdr *vid_hdr,
pnum, ubi->leb_start, ubi->leb_size);
ubi_dbg_print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_OFFSET, 32, 1,
ubi->peb_buf1, ubi->leb_size, 1);
+ err = 1;
+
+out_unlock:
mutex_unlock(&ubi->buf_mutex);
- return 1;
+ return err;
}
/**
@@ -951,6 +953,10 @@ static int process_eb(struct ubi_device *ubi, struct ubi_scan_info *si,
* impossible to distinguish it from a PEB which just
* contains garbage because of a power cut during erase
* operation. So we just schedule this PEB for erasure.
+ *
+ * Besides, in case of NOR flash, we deliberatly
+ * corrupt both headers because NOR flash erasure is
+ * slow and can start from the end.
*/
err = 0;
else
diff --git a/drivers/net/3c59x.c b/drivers/net/3c59x.c
index e1da258bbfb7..0a92436f0538 100644
--- a/drivers/net/3c59x.c
+++ b/drivers/net/3c59x.c
@@ -699,7 +699,8 @@ DEFINE_WINDOW_IO(32)
#define DEVICE_PCI(dev) NULL
#endif
-#define VORTEX_PCI(vp) (((vp)->gendev) ? DEVICE_PCI((vp)->gendev) : NULL)
+#define VORTEX_PCI(vp) \
+ ((struct pci_dev *) (((vp)->gendev) ? DEVICE_PCI((vp)->gendev) : NULL))
#ifdef CONFIG_EISA
#define DEVICE_EISA(dev) (((dev)->bus == &eisa_bus_type) ? to_eisa_device((dev)) : NULL)
@@ -707,7 +708,8 @@ DEFINE_WINDOW_IO(32)
#define DEVICE_EISA(dev) NULL
#endif
-#define VORTEX_EISA(vp) (((vp)->gendev) ? DEVICE_EISA((vp)->gendev) : NULL)
+#define VORTEX_EISA(vp) \
+ ((struct eisa_device *) (((vp)->gendev) ? DEVICE_EISA((vp)->gendev) : NULL))
/* The action to take with a media selection timer tick.
Note that we deviate from the 3Com order by checking 10base2 before AUI.
diff --git a/drivers/net/8139cp.c b/drivers/net/8139cp.c
index ac422cd332ea..dd16e83933a2 100644
--- a/drivers/net/8139cp.c
+++ b/drivers/net/8139cp.c
@@ -490,13 +490,11 @@ static inline unsigned int cp_rx_csum_ok (u32 status)
{
unsigned int protocol = (status >> 16) & 0x3;
- if (likely((protocol == RxProtoTCP) && (!(status & TCPFail))))
+ if (((protocol == RxProtoTCP) && !(status & TCPFail)) ||
+ ((protocol == RxProtoUDP) && !(status & UDPFail)))
return 1;
- else if ((protocol == RxProtoUDP) && (!(status & UDPFail)))
- return 1;
- else if ((protocol == RxProtoIP) && (!(status & IPFail)))
- return 1;
- return 0;
+ else
+ return 0;
}
static int cp_rx_poll(struct napi_struct *napi, int budget)
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index 77c1fab7d774..4f1755bddf6b 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -883,14 +883,6 @@ config BFIN_RX_DESC_NUM
help
Set the number of buffer packets used in driver.
-config BFIN_MAC_RMII
- bool "RMII PHY Interface"
- depends on BFIN_MAC
- default y if BFIN527_EZKIT
- default n if BFIN537_STAMP
- help
- Use Reduced PHY MII Interface
-
config BFIN_MAC_USE_HWSTAMP
bool "Use IEEE 1588 hwstamp"
depends on BFIN_MAC && BF518
@@ -954,6 +946,8 @@ config NET_NETX
config TI_DAVINCI_EMAC
tristate "TI DaVinci EMAC Support"
depends on ARM && ( ARCH_DAVINCI || ARCH_OMAP3 )
+ select TI_DAVINCI_MDIO
+ select TI_DAVINCI_CPDMA
select PHYLIB
help
This driver supports TI's DaVinci Ethernet .
@@ -961,6 +955,25 @@ config TI_DAVINCI_EMAC
To compile this driver as a module, choose M here: the module
will be called davinci_emac_driver. This is recommended.
+config TI_DAVINCI_MDIO
+ tristate "TI DaVinci MDIO Support"
+ depends on ARM && ( ARCH_DAVINCI || ARCH_OMAP3 )
+ select PHYLIB
+ help
+ This driver supports TI's DaVinci MDIO module.
+
+ To compile this driver as a module, choose M here: the module
+ will be called davinci_mdio. This is recommended.
+
+config TI_DAVINCI_CPDMA
+ tristate "TI DaVinci CPDMA Support"
+ depends on ARM && ( ARCH_DAVINCI || ARCH_OMAP3 )
+ help
+ This driver supports TI's DaVinci CPDMA dma engine.
+
+ To compile this driver as a module, choose M here: the module
+ will be called davinci_cpdma. This is recommended.
+
config DM9000
tristate "DM9000 support"
depends on ARM || BLACKFIN || MIPS
@@ -1028,7 +1041,7 @@ config SMC911X
tristate "SMSC LAN911[5678] support"
select CRC32
select MII
- depends on ARM || SUPERH
+ depends on ARM || SUPERH || MN10300
help
This is a driver for SMSC's LAN911x series of Ethernet chipsets
including the new LAN9115, LAN9116, LAN9117, and LAN9118.
@@ -1042,7 +1055,7 @@ config SMC911X
config SMSC911X
tristate "SMSC LAN911x/LAN921x families embedded ethernet support"
- depends on ARM || SUPERH || BLACKFIN || MIPS
+ depends on ARM || SUPERH || BLACKFIN || MIPS || MN10300
select CRC32
select MII
select PHYLIB
@@ -1054,6 +1067,14 @@ config SMSC911X
<file:Documentation/networking/net-modules.txt>. The module
will be called smsc911x.
+config SMSC911X_ARCH_HOOKS
+ def_bool n
+ depends on SMSC911X
+ help
+ If the arch enables this, it allows the arch to implement various
+ hooks for more comprehensive interrupt control and also to override
+ the source of the MAC address.
+
config NET_VENDOR_RACAL
bool "Racal-Interlan (Micom) NI cards"
depends on ISA
@@ -2520,11 +2541,12 @@ source "drivers/net/stmmac/Kconfig"
config PCH_GBE
tristate "PCH Gigabit Ethernet"
depends on PCI
+ select MII
---help---
- This is a gigabit ethernet driver for Topcliff PCH.
- Topcliff PCH is the platform controller hub that is used in Intel's
+ This is a gigabit ethernet driver for EG20T PCH.
+ EG20T PCH is the platform controller hub that is used in Intel's
general embedded platform.
- Topcliff PCH has Gigabit Ethernet interface.
+ EG20T PCH has Gigabit Ethernet interface.
Using this interface, it is able to access system devices connected
to Gigabit Ethernet.
This driver enables Gigabit Ethernet function.
@@ -2923,6 +2945,18 @@ source "drivers/s390/net/Kconfig"
source "drivers/net/caif/Kconfig"
+config TILE_NET
+ tristate "Tilera GBE/XGBE network driver support"
+ depends on TILE
+ default y
+ select CRC32
+ help
+ This is a standard Linux network device driver for the
+ on-chip Tilera Gigabit Ethernet and XAUI interfaces.
+
+ To compile this driver as a module, choose M here: the module
+ will be called tile_net.
+
config XEN_NETDEV_FRONTEND
tristate "Xen network device frontend driver"
depends on XEN
diff --git a/drivers/net/Makefile b/drivers/net/Makefile
index b8bf93d4a132..b90738d13994 100644
--- a/drivers/net/Makefile
+++ b/drivers/net/Makefile
@@ -7,6 +7,8 @@ obj-$(CONFIG_MDIO) += mdio.o
obj-$(CONFIG_PHYLIB) += phy/
obj-$(CONFIG_TI_DAVINCI_EMAC) += davinci_emac.o
+obj-$(CONFIG_TI_DAVINCI_MDIO) += davinci_mdio.o
+obj-$(CONFIG_TI_DAVINCI_CPDMA) += davinci_cpdma.o
obj-$(CONFIG_E1000) += e1000/
obj-$(CONFIG_E1000E) += e1000e/
@@ -299,3 +301,4 @@ obj-$(CONFIG_CAIF) += caif/
obj-$(CONFIG_OCTEON_MGMT_ETHERNET) += octeon/
obj-$(CONFIG_PCH_GBE) += pch_gbe/
+obj-$(CONFIG_TILE_NET) += tile/
diff --git a/drivers/net/atarilance.c b/drivers/net/atarilance.c
index 3134e5326231..8cb27cb7bca1 100644
--- a/drivers/net/atarilance.c
+++ b/drivers/net/atarilance.c
@@ -407,7 +407,7 @@ static noinline int __init addr_accessible(volatile void *regp, int wordflag,
int writeflag)
{
int ret;
- long flags;
+ unsigned long flags;
long *vbr, save_berr;
local_irq_save(flags);
diff --git a/drivers/net/atl1c/atl1c.h b/drivers/net/atl1c/atl1c.h
index ef4115b897bf..9ab58097fa2e 100644
--- a/drivers/net/atl1c/atl1c.h
+++ b/drivers/net/atl1c/atl1c.h
@@ -631,8 +631,6 @@ struct atl1c_adapter {
extern char atl1c_driver_name[];
extern char atl1c_driver_version[];
-extern int atl1c_up(struct atl1c_adapter *adapter);
-extern void atl1c_down(struct atl1c_adapter *adapter);
extern void atl1c_reinit_locked(struct atl1c_adapter *adapter);
extern s32 atl1c_reset_hw(struct atl1c_hw *hw);
extern void atl1c_set_ethtool_ops(struct net_device *netdev);
diff --git a/drivers/net/atl1c/atl1c_hw.c b/drivers/net/atl1c/atl1c_hw.c
index 919080b2c3a5..1bf672009948 100644
--- a/drivers/net/atl1c/atl1c_hw.c
+++ b/drivers/net/atl1c/atl1c_hw.c
@@ -82,7 +82,7 @@ static int atl1c_get_permanent_address(struct atl1c_hw *hw)
addr[0] = addr[1] = 0;
AT_READ_REG(hw, REG_OTP_CTRL, &otp_ctrl_data);
if (atl1c_check_eeprom_exist(hw)) {
- if (hw->nic_type == athr_l1c || hw->nic_type == athr_l2c_b) {
+ if (hw->nic_type == athr_l1c || hw->nic_type == athr_l2c) {
/* Enable OTP CLK */
if (!(otp_ctrl_data & OTP_CTRL_CLK_EN)) {
otp_ctrl_data |= OTP_CTRL_CLK_EN;
diff --git a/drivers/net/atl1c/atl1c_main.c b/drivers/net/atl1c/atl1c_main.c
index 99ffcf667d1f..09b099bfab2b 100644
--- a/drivers/net/atl1c/atl1c_main.c
+++ b/drivers/net/atl1c/atl1c_main.c
@@ -66,6 +66,8 @@ static void atl1c_set_aspm(struct atl1c_hw *hw, bool linkup);
static void atl1c_setup_mac_ctrl(struct atl1c_adapter *adapter);
static void atl1c_clean_rx_irq(struct atl1c_adapter *adapter, u8 que,
int *work_done, int work_to_do);
+static int atl1c_up(struct atl1c_adapter *adapter);
+static void atl1c_down(struct atl1c_adapter *adapter);
static const u16 atl1c_pay_load_size[] = {
128, 256, 512, 1024, 2048, 4096,
@@ -2309,7 +2311,7 @@ static int atl1c_request_irq(struct atl1c_adapter *adapter)
return err;
}
-int atl1c_up(struct atl1c_adapter *adapter)
+static int atl1c_up(struct atl1c_adapter *adapter)
{
struct net_device *netdev = adapter->netdev;
int num;
@@ -2351,7 +2353,7 @@ err_alloc_rx:
return err;
}
-void atl1c_down(struct atl1c_adapter *adapter)
+static void atl1c_down(struct atl1c_adapter *adapter)
{
struct net_device *netdev = adapter->netdev;
diff --git a/drivers/net/atlx/atl1.c b/drivers/net/atlx/atl1.c
index dbd27b8e66bd..53363108994e 100644
--- a/drivers/net/atlx/atl1.c
+++ b/drivers/net/atlx/atl1.c
@@ -91,6 +91,8 @@ MODULE_VERSION(ATLX_DRIVER_VERSION);
/* Temporary hack for merging atl1 and atl2 */
#include "atlx.c"
+static const struct ethtool_ops atl1_ethtool_ops;
+
/*
* This is the only thing that needs to be changed to adjust the
* maximum number of ports that the driver can manage.
@@ -353,7 +355,7 @@ static bool atl1_read_eeprom(struct atl1_hw *hw, u32 offset, u32 *p_value)
* hw - Struct containing variables accessed by shared code
* reg_addr - address of the PHY register to read
*/
-s32 atl1_read_phy_reg(struct atl1_hw *hw, u16 reg_addr, u16 *phy_data)
+static s32 atl1_read_phy_reg(struct atl1_hw *hw, u16 reg_addr, u16 *phy_data)
{
u32 val;
int i;
@@ -553,7 +555,7 @@ static s32 atl1_read_mac_addr(struct atl1_hw *hw)
* 1. calcu 32bit CRC for multicast address
* 2. reverse crc with MSB to LSB
*/
-u32 atl1_hash_mc_addr(struct atl1_hw *hw, u8 *mc_addr)
+static u32 atl1_hash_mc_addr(struct atl1_hw *hw, u8 *mc_addr)
{
u32 crc32, value = 0;
int i;
@@ -570,7 +572,7 @@ u32 atl1_hash_mc_addr(struct atl1_hw *hw, u8 *mc_addr)
* hw - Struct containing variables accessed by shared code
* hash_value - Multicast address hash value
*/
-void atl1_hash_set(struct atl1_hw *hw, u32 hash_value)
+static void atl1_hash_set(struct atl1_hw *hw, u32 hash_value)
{
u32 hash_bit, hash_reg;
u32 mta;
@@ -914,7 +916,7 @@ static s32 atl1_get_speed_and_duplex(struct atl1_hw *hw, u16 *speed, u16 *duplex
return 0;
}
-void atl1_set_mac_addr(struct atl1_hw *hw)
+static void atl1_set_mac_addr(struct atl1_hw *hw)
{
u32 value;
/*
@@ -3041,7 +3043,6 @@ static int __devinit atl1_probe(struct pci_dev *pdev,
atl1_pcie_patch(adapter);
/* assume we have no link for now */
netif_carrier_off(netdev);
- netif_stop_queue(netdev);
setup_timer(&adapter->phy_config_timer, atl1_phy_config,
(unsigned long)adapter);
@@ -3658,7 +3659,7 @@ static int atl1_nway_reset(struct net_device *netdev)
return 0;
}
-const struct ethtool_ops atl1_ethtool_ops = {
+static const struct ethtool_ops atl1_ethtool_ops = {
.get_settings = atl1_get_settings,
.set_settings = atl1_set_settings,
.get_drvinfo = atl1_get_drvinfo,
diff --git a/drivers/net/atlx/atl1.h b/drivers/net/atlx/atl1.h
index 9c0ddb273ac8..68de8cbfb3ec 100644
--- a/drivers/net/atlx/atl1.h
+++ b/drivers/net/atlx/atl1.h
@@ -56,16 +56,13 @@ struct atl1_adapter;
struct atl1_hw;
/* function prototypes needed by multiple files */
-u32 atl1_hash_mc_addr(struct atl1_hw *hw, u8 *mc_addr);
-void atl1_hash_set(struct atl1_hw *hw, u32 hash_value);
-s32 atl1_read_phy_reg(struct atl1_hw *hw, u16 reg_addr, u16 *phy_data);
-void atl1_set_mac_addr(struct atl1_hw *hw);
+static u32 atl1_hash_mc_addr(struct atl1_hw *hw, u8 *mc_addr);
+static void atl1_hash_set(struct atl1_hw *hw, u32 hash_value);
+static void atl1_set_mac_addr(struct atl1_hw *hw);
static int atl1_mii_ioctl(struct net_device *netdev, struct ifreq *ifr,
int cmd);
static u32 atl1_check_link(struct atl1_adapter *adapter);
-extern const struct ethtool_ops atl1_ethtool_ops;
-
/* hardware definitions specific to L1 */
/* Block IDLE Status Register */
diff --git a/drivers/net/atlx/atlx.c b/drivers/net/atlx/atlx.c
index f979ea2d6d3c..afb7f7dd1bb1 100644
--- a/drivers/net/atlx/atlx.c
+++ b/drivers/net/atlx/atlx.c
@@ -41,6 +41,10 @@
#include "atlx.h"
+static s32 atlx_read_phy_reg(struct atl1_hw *hw, u16 reg_addr, u16 *phy_data);
+static u32 atlx_hash_mc_addr(struct atl1_hw *hw, u8 *mc_addr);
+static void atlx_set_mac_addr(struct atl1_hw *hw);
+
static struct atlx_spi_flash_dev flash_table[] = {
/* MFR_NAME WRSR READ PRGM WREN WRDI RDSR RDID SEC_ERS CHIP_ERS */
{"Atmel", 0x00, 0x03, 0x02, 0x06, 0x04, 0x05, 0x15, 0x52, 0x62},
diff --git a/drivers/net/au1000_eth.c b/drivers/net/au1000_eth.c
index 43489f89c142..53eff9ba6e95 100644
--- a/drivers/net/au1000_eth.c
+++ b/drivers/net/au1000_eth.c
@@ -155,10 +155,10 @@ static void au1000_enable_mac(struct net_device *dev, int force_reset)
spin_lock_irqsave(&aup->lock, flags);
if (force_reset || (!aup->mac_enabled)) {
- writel(MAC_EN_CLOCK_ENABLE, &aup->enable);
+ writel(MAC_EN_CLOCK_ENABLE, aup->enable);
au_sync_delay(2);
writel((MAC_EN_RESET0 | MAC_EN_RESET1 | MAC_EN_RESET2
- | MAC_EN_CLOCK_ENABLE), &aup->enable);
+ | MAC_EN_CLOCK_ENABLE), aup->enable);
au_sync_delay(2);
aup->mac_enabled = 1;
@@ -503,9 +503,9 @@ static void au1000_reset_mac_unlocked(struct net_device *dev)
au1000_hard_stop(dev);
- writel(MAC_EN_CLOCK_ENABLE, &aup->enable);
+ writel(MAC_EN_CLOCK_ENABLE, aup->enable);
au_sync_delay(2);
- writel(0, &aup->enable);
+ writel(0, aup->enable);
au_sync_delay(2);
aup->tx_full = 0;
@@ -1119,7 +1119,7 @@ static int __devinit au1000_probe(struct platform_device *pdev)
/* set a random MAC now in case platform_data doesn't provide one */
random_ether_addr(dev->dev_addr);
- writel(0, &aup->enable);
+ writel(0, aup->enable);
aup->mac_enabled = 0;
pd = pdev->dev.platform_data;
diff --git a/drivers/net/benet/be_cmds.c b/drivers/net/benet/be_cmds.c
index 1e7f305ed00b..36eca1ce75d4 100644
--- a/drivers/net/benet/be_cmds.c
+++ b/drivers/net/benet/be_cmds.c
@@ -1471,42 +1471,6 @@ err:
return status;
}
-/* Uses sync mcc */
-int be_cmd_read_port_type(struct be_adapter *adapter, u32 port,
- u8 *connector)
-{
- struct be_mcc_wrb *wrb;
- struct be_cmd_req_port_type *req;
- int status;
-
- spin_lock_bh(&adapter->mcc_lock);
-
- wrb = wrb_from_mccq(adapter);
- if (!wrb) {
- status = -EBUSY;
- goto err;
- }
- req = embedded_payload(wrb);
-
- be_wrb_hdr_prepare(wrb, sizeof(struct be_cmd_resp_port_type), true, 0,
- OPCODE_COMMON_READ_TRANSRECV_DATA);
-
- be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
- OPCODE_COMMON_READ_TRANSRECV_DATA, sizeof(*req));
-
- req->port = cpu_to_le32(port);
- req->page_num = cpu_to_le32(TR_PAGE_A0);
- status = be_mcc_notify_wait(adapter);
- if (!status) {
- struct be_cmd_resp_port_type *resp = embedded_payload(wrb);
- *connector = resp->data.connector;
- }
-
-err:
- spin_unlock_bh(&adapter->mcc_lock);
- return status;
-}
-
int be_cmd_write_flashrom(struct be_adapter *adapter, struct be_dma_mem *cmd,
u32 flash_type, u32 flash_opcode, u32 buf_size)
{
diff --git a/drivers/net/benet/be_cmds.h b/drivers/net/benet/be_cmds.h
index c7f6cdfe1c73..8469ff061f30 100644
--- a/drivers/net/benet/be_cmds.h
+++ b/drivers/net/benet/be_cmds.h
@@ -1022,8 +1022,6 @@ extern int be_cmd_set_beacon_state(struct be_adapter *adapter,
u8 port_num, u8 beacon, u8 status, u8 state);
extern int be_cmd_get_beacon_state(struct be_adapter *adapter,
u8 port_num, u32 *state);
-extern int be_cmd_read_port_type(struct be_adapter *adapter, u32 port,
- u8 *connector);
extern int be_cmd_write_flashrom(struct be_adapter *adapter,
struct be_dma_mem *cmd, u32 flash_oper,
u32 flash_opcode, u32 buf_size);
diff --git a/drivers/net/benet/be_main.c b/drivers/net/benet/be_main.c
index 45b1f6635282..93354eee2cfd 100644
--- a/drivers/net/benet/be_main.c
+++ b/drivers/net/benet/be_main.c
@@ -849,20 +849,16 @@ static void be_rx_stats_update(struct be_rx_obj *rxo,
stats->rx_mcast_pkts++;
}
-static inline bool do_pkt_csum(struct be_eth_rx_compl *rxcp, bool cso)
+static inline bool csum_passed(struct be_eth_rx_compl *rxcp)
{
- u8 l4_cksm, ip_version, ipcksm, tcpf = 0, udpf = 0, ipv6_chk;
+ u8 l4_cksm, ipv6, ipcksm;
l4_cksm = AMAP_GET_BITS(struct amap_eth_rx_compl, l4_cksm, rxcp);
ipcksm = AMAP_GET_BITS(struct amap_eth_rx_compl, ipcksm, rxcp);
- ip_version = AMAP_GET_BITS(struct amap_eth_rx_compl, ip_version, rxcp);
- if (ip_version) {
- tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl, tcpf, rxcp);
- udpf = AMAP_GET_BITS(struct amap_eth_rx_compl, udpf, rxcp);
- }
- ipv6_chk = (ip_version && (tcpf || udpf));
+ ipv6 = AMAP_GET_BITS(struct amap_eth_rx_compl, ip_version, rxcp);
- return ((l4_cksm && ipv6_chk && ipcksm) && cso) ? false : true;
+ /* Ignore ipcksm for ipv6 pkts */
+ return l4_cksm && (ipcksm || ipv6);
}
static struct be_rx_page_info *
@@ -1017,10 +1013,10 @@ static void be_rx_compl_process(struct be_adapter *adapter,
skb_fill_rx_data(adapter, rxo, skb, rxcp, num_rcvd);
- if (do_pkt_csum(rxcp, adapter->rx_csum))
- skb_checksum_none_assert(skb);
- else
+ if (likely(adapter->rx_csum && csum_passed(rxcp)))
skb->ip_summed = CHECKSUM_UNNECESSARY;
+ else
+ skb_checksum_none_assert(skb);
skb->truesize = skb->len + sizeof(struct sk_buff);
skb->protocol = eth_type_trans(skb, adapter->netdev);
@@ -1674,7 +1670,7 @@ static inline bool do_gro(struct be_adapter *adapter, struct be_rx_obj *rxo,
return (tcp_frame && !err) ? true : false;
}
-int be_poll_rx(struct napi_struct *napi, int budget)
+static int be_poll_rx(struct napi_struct *napi, int budget)
{
struct be_eq_obj *rx_eq = container_of(napi, struct be_eq_obj, napi);
struct be_rx_obj *rxo = container_of(rx_eq, struct be_rx_obj, rx_eq);
@@ -1806,6 +1802,20 @@ static void be_worker(struct work_struct *work)
struct be_rx_obj *rxo;
int i;
+ /* when interrupts are not yet enabled, just reap any pending
+ * mcc completions */
+ if (!netif_running(adapter->netdev)) {
+ int mcc_compl, status = 0;
+
+ mcc_compl = be_process_mcc(adapter, &status);
+
+ if (mcc_compl) {
+ struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
+ be_cq_notify(adapter, mcc_obj->cq.id, false, mcc_compl);
+ }
+ goto reschedule;
+ }
+
if (!adapter->stats_ioctl_sent)
be_cmd_get_stats(adapter, &adapter->stats_cmd);
@@ -1824,6 +1834,7 @@ static void be_worker(struct work_struct *work)
if (!adapter->ue_detected)
be_detect_dump_ue(adapter);
+reschedule:
schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
}
@@ -2019,8 +2030,6 @@ static int be_close(struct net_device *netdev)
struct be_eq_obj *tx_eq = &adapter->tx_eq;
int vec, i;
- cancel_delayed_work_sync(&adapter->work);
-
be_async_mcc_disable(adapter);
netif_stop_queue(netdev);
@@ -2085,8 +2094,6 @@ static int be_open(struct net_device *netdev)
/* Now that interrupts are on we can process async mcc */
be_async_mcc_enable(adapter);
- schedule_delayed_work(&adapter->work, msecs_to_jiffies(100));
-
status = be_cmd_link_status_query(adapter, &link_up, &mac_speed,
&link_speed);
if (status)
@@ -2299,9 +2306,6 @@ static int be_clear(struct be_adapter *adapter)
#define FW_FILE_HDR_SIGN "ServerEngines Corp. "
-char flash_cookie[2][16] = {"*** SE FLAS",
- "H DIRECTORY *** "};
-
static bool be_flash_redboot(struct be_adapter *adapter,
const u8 *p, u32 img_start, int image_size,
int hdr_size)
@@ -2454,6 +2458,12 @@ int be_load_fw(struct be_adapter *adapter, u8 *func)
int status, i = 0, num_imgs = 0;
const u8 *p;
+ if (!netif_running(adapter->netdev)) {
+ dev_err(&adapter->pdev->dev,
+ "Firmware load not allowed (interface is down)\n");
+ return -EPERM;
+ }
+
strcpy(fw_file, func);
status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
@@ -2559,7 +2569,6 @@ static void be_netdev_init(struct net_device *netdev)
netif_napi_add(netdev, &adapter->tx_eq.napi, be_poll_tx_mcc,
BE_NAPI_WEIGHT);
- netif_carrier_off(netdev);
netif_stop_queue(netdev);
}
@@ -2715,6 +2724,8 @@ static void __devexit be_remove(struct pci_dev *pdev)
if (!adapter)
return;
+ cancel_delayed_work_sync(&adapter->work);
+
unregister_netdev(adapter->netdev);
be_clear(adapter);
@@ -2868,8 +2879,10 @@ static int __devinit be_probe(struct pci_dev *pdev,
status = register_netdev(netdev);
if (status != 0)
goto unsetup;
+ netif_carrier_off(netdev);
dev_info(&pdev->dev, "%s port %d\n", nic_name(pdev), adapter->port_num);
+ schedule_delayed_work(&adapter->work, msecs_to_jiffies(100));
return 0;
unsetup:
diff --git a/drivers/net/bfin_mac.c b/drivers/net/bfin_mac.c
index f7233191162b..ce1e5e9d06f6 100644
--- a/drivers/net/bfin_mac.c
+++ b/drivers/net/bfin_mac.c
@@ -1,7 +1,7 @@
/*
* Blackfin On-Chip MAC Driver
*
- * Copyright 2004-2007 Analog Devices Inc.
+ * Copyright 2004-2010 Analog Devices Inc.
*
* Enter bugs at http://blackfin.uclinux.org/
*
@@ -23,7 +23,6 @@
#include <linux/device.h>
#include <linux/spinlock.h>
#include <linux/mii.h>
-#include <linux/phy.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/ethtool.h>
@@ -76,12 +75,6 @@ static struct net_dma_desc_tx *current_tx_ptr;
static struct net_dma_desc_tx *tx_desc;
static struct net_dma_desc_rx *rx_desc;
-#if defined(CONFIG_BFIN_MAC_RMII)
-static u16 pin_req[] = P_RMII0;
-#else
-static u16 pin_req[] = P_MII0;
-#endif
-
static void desc_list_free(void)
{
struct net_dma_desc_rx *r;
@@ -347,23 +340,23 @@ static void bfin_mac_adjust_link(struct net_device *dev)
}
if (phydev->speed != lp->old_speed) {
-#if defined(CONFIG_BFIN_MAC_RMII)
- u32 opmode = bfin_read_EMAC_OPMODE();
- switch (phydev->speed) {
- case 10:
- opmode |= RMII_10;
- break;
- case 100:
- opmode &= ~(RMII_10);
- break;
- default:
- printk(KERN_WARNING
- "%s: Ack! Speed (%d) is not 10/100!\n",
- DRV_NAME, phydev->speed);
- break;
+ if (phydev->interface == PHY_INTERFACE_MODE_RMII) {
+ u32 opmode = bfin_read_EMAC_OPMODE();
+ switch (phydev->speed) {
+ case 10:
+ opmode |= RMII_10;
+ break;
+ case 100:
+ opmode &= ~RMII_10;
+ break;
+ default:
+ printk(KERN_WARNING
+ "%s: Ack! Speed (%d) is not 10/100!\n",
+ DRV_NAME, phydev->speed);
+ break;
+ }
+ bfin_write_EMAC_OPMODE(opmode);
}
- bfin_write_EMAC_OPMODE(opmode);
-#endif
new_state = 1;
lp->old_speed = phydev->speed;
@@ -392,7 +385,7 @@ static void bfin_mac_adjust_link(struct net_device *dev)
/* MDC = 2.5 MHz */
#define MDC_CLK 2500000
-static int mii_probe(struct net_device *dev)
+static int mii_probe(struct net_device *dev, int phy_mode)
{
struct bfin_mac_local *lp = netdev_priv(dev);
struct phy_device *phydev = NULL;
@@ -411,8 +404,8 @@ static int mii_probe(struct net_device *dev)
sysctl = (sysctl & ~MDCDIV) | SET_MDCDIV(mdc_div);
bfin_write_EMAC_SYSCTL(sysctl);
- /* search for connect PHY device */
- for (i = 0; i < PHY_MAX_ADDR; i++) {
+ /* search for connected PHY device */
+ for (i = 0; i < PHY_MAX_ADDR; ++i) {
struct phy_device *const tmp_phydev = lp->mii_bus->phy_map[i];
if (!tmp_phydev)
@@ -429,13 +422,14 @@ static int mii_probe(struct net_device *dev)
return -ENODEV;
}
-#if defined(CONFIG_BFIN_MAC_RMII)
- phydev = phy_connect(dev, dev_name(&phydev->dev), &bfin_mac_adjust_link,
- 0, PHY_INTERFACE_MODE_RMII);
-#else
+ if (phy_mode != PHY_INTERFACE_MODE_RMII &&
+ phy_mode != PHY_INTERFACE_MODE_MII) {
+ printk(KERN_INFO "%s: Invalid phy interface mode\n", dev->name);
+ return -EINVAL;
+ }
+
phydev = phy_connect(dev, dev_name(&phydev->dev), &bfin_mac_adjust_link,
- 0, PHY_INTERFACE_MODE_MII);
-#endif
+ 0, phy_mode);
if (IS_ERR(phydev)) {
printk(KERN_ERR "%s: Could not attach to PHY\n", dev->name);
@@ -570,6 +564,8 @@ static const struct ethtool_ops bfin_mac_ethtool_ops = {
/**************************************************************************/
void setup_system_regs(struct net_device *dev)
{
+ struct bfin_mac_local *lp = netdev_priv(dev);
+ int i;
unsigned short sysctl;
/*
@@ -577,6 +573,15 @@ void setup_system_regs(struct net_device *dev)
* Configure checksum support and rcve frame word alignment
*/
sysctl = bfin_read_EMAC_SYSCTL();
+ /*
+ * check if interrupt is requested for any PHY,
+ * enable PHY interrupt only if needed
+ */
+ for (i = 0; i < PHY_MAX_ADDR; ++i)
+ if (lp->mii_bus->irq[i] != PHY_POLL)
+ break;
+ if (i < PHY_MAX_ADDR)
+ sysctl |= PHYIE;
sysctl |= RXDWA;
#if defined(BFIN_MAC_CSUM_OFFLOAD)
sysctl |= RXCKS;
@@ -1203,7 +1208,7 @@ static void bfin_mac_disable(void)
/*
* Enable Interrupts, Receive, and Transmit
*/
-static int bfin_mac_enable(void)
+static int bfin_mac_enable(struct phy_device *phydev)
{
int ret;
u32 opmode;
@@ -1233,12 +1238,13 @@ static int bfin_mac_enable(void)
opmode |= DRO | DC | PSF;
opmode |= RE;
-#if defined(CONFIG_BFIN_MAC_RMII)
- opmode |= RMII; /* For Now only 100MBit are supported */
+ if (phydev->interface == PHY_INTERFACE_MODE_RMII) {
+ opmode |= RMII; /* For Now only 100MBit are supported */
#if (defined(CONFIG_BF537) || defined(CONFIG_BF536)) && CONFIG_BF_REV_0_2
- opmode |= TE;
-#endif
+ opmode |= TE;
#endif
+ }
+
/* Turn on the EMAC rx */
bfin_write_EMAC_OPMODE(opmode);
@@ -1270,7 +1276,7 @@ static void bfin_mac_timeout(struct net_device *dev)
if (netif_queue_stopped(lp->ndev))
netif_wake_queue(lp->ndev);
- bfin_mac_enable();
+ bfin_mac_enable(lp->phydev);
/* We can accept TX packets again */
dev->trans_start = jiffies; /* prevent tx timeout */
@@ -1342,11 +1348,19 @@ static void bfin_mac_set_multicast_list(struct net_device *dev)
static int bfin_mac_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
{
+ struct bfin_mac_local *lp = netdev_priv(netdev);
+
+ if (!netif_running(netdev))
+ return -EINVAL;
+
switch (cmd) {
case SIOCSHWTSTAMP:
return bfin_mac_hwtstamp_ioctl(netdev, ifr, cmd);
default:
- return -EOPNOTSUPP;
+ if (lp->phydev)
+ return phy_mii_ioctl(lp->phydev, ifr, cmd);
+ else
+ return -EOPNOTSUPP;
}
}
@@ -1394,7 +1408,7 @@ static int bfin_mac_open(struct net_device *dev)
setup_mac_addr(dev->dev_addr);
bfin_mac_disable();
- ret = bfin_mac_enable();
+ ret = bfin_mac_enable(lp->phydev);
if (ret)
return ret;
pr_debug("hardware init finished\n");
@@ -1450,6 +1464,7 @@ static int __devinit bfin_mac_probe(struct platform_device *pdev)
struct net_device *ndev;
struct bfin_mac_local *lp;
struct platform_device *pd;
+ struct bfin_mii_bus_platform_data *mii_bus_data;
int rc;
ndev = alloc_etherdev(sizeof(struct bfin_mac_local));
@@ -1501,11 +1516,12 @@ static int __devinit bfin_mac_probe(struct platform_device *pdev)
if (!lp->mii_bus) {
dev_err(&pdev->dev, "Cannot get mii_bus!\n");
rc = -ENODEV;
- goto out_err_mii_bus_probe;
+ goto out_err_probe_mac;
}
lp->mii_bus->priv = ndev;
+ mii_bus_data = pd->dev.platform_data;
- rc = mii_probe(ndev);
+ rc = mii_probe(ndev, mii_bus_data->phy_mode);
if (rc) {
dev_err(&pdev->dev, "MII Probe failed!\n");
goto out_err_mii_probe;
@@ -1552,8 +1568,6 @@ out_err_request_irq:
out_err_mii_probe:
mdiobus_unregister(lp->mii_bus);
mdiobus_free(lp->mii_bus);
-out_err_mii_bus_probe:
- peripheral_free_list(pin_req);
out_err_probe_mac:
platform_set_drvdata(pdev, NULL);
free_netdev(ndev);
@@ -1576,8 +1590,6 @@ static int __devexit bfin_mac_remove(struct platform_device *pdev)
free_netdev(ndev);
- peripheral_free_list(pin_req);
-
return 0;
}
@@ -1623,12 +1635,21 @@ static int bfin_mac_resume(struct platform_device *pdev)
static int __devinit bfin_mii_bus_probe(struct platform_device *pdev)
{
struct mii_bus *miibus;
+ struct bfin_mii_bus_platform_data *mii_bus_pd;
+ const unsigned short *pin_req;
int rc, i;
+ mii_bus_pd = dev_get_platdata(&pdev->dev);
+ if (!mii_bus_pd) {
+ dev_err(&pdev->dev, "No peripherals in platform data!\n");
+ return -EINVAL;
+ }
+
/*
* We are setting up a network card,
* so set the GPIO pins to Ethernet mode
*/
+ pin_req = mii_bus_pd->mac_peripherals;
rc = peripheral_request_list(pin_req, DRV_NAME);
if (rc) {
dev_err(&pdev->dev, "Requesting peripherals failed!\n");
@@ -1645,13 +1666,30 @@ static int __devinit bfin_mii_bus_probe(struct platform_device *pdev)
miibus->parent = &pdev->dev;
miibus->name = "bfin_mii_bus";
+ miibus->phy_mask = mii_bus_pd->phy_mask;
+
snprintf(miibus->id, MII_BUS_ID_SIZE, "0");
miibus->irq = kmalloc(sizeof(int)*PHY_MAX_ADDR, GFP_KERNEL);
- if (miibus->irq == NULL)
- goto out_err_alloc;
- for (i = 0; i < PHY_MAX_ADDR; ++i)
+ if (!miibus->irq)
+ goto out_err_irq_alloc;
+
+ for (i = rc; i < PHY_MAX_ADDR; ++i)
miibus->irq[i] = PHY_POLL;
+ rc = clamp(mii_bus_pd->phydev_number, 0, PHY_MAX_ADDR);
+ if (rc != mii_bus_pd->phydev_number)
+ dev_err(&pdev->dev, "Invalid number (%i) of phydevs\n",
+ mii_bus_pd->phydev_number);
+ for (i = 0; i < rc; ++i) {
+ unsigned short phyaddr = mii_bus_pd->phydev_data[i].addr;
+ if (phyaddr < PHY_MAX_ADDR)
+ miibus->irq[phyaddr] = mii_bus_pd->phydev_data[i].irq;
+ else
+ dev_err(&pdev->dev,
+ "Invalid PHY address %i for phydev %i\n",
+ phyaddr, i);
+ }
+
rc = mdiobus_register(miibus);
if (rc) {
dev_err(&pdev->dev, "Cannot register MDIO bus!\n");
@@ -1663,6 +1701,7 @@ static int __devinit bfin_mii_bus_probe(struct platform_device *pdev)
out_err_mdiobus_register:
kfree(miibus->irq);
+out_err_irq_alloc:
mdiobus_free(miibus);
out_err_alloc:
peripheral_free_list(pin_req);
@@ -1673,11 +1712,15 @@ out_err_alloc:
static int __devexit bfin_mii_bus_remove(struct platform_device *pdev)
{
struct mii_bus *miibus = platform_get_drvdata(pdev);
+ struct bfin_mii_bus_platform_data *mii_bus_pd =
+ dev_get_platdata(&pdev->dev);
+
platform_set_drvdata(pdev, NULL);
mdiobus_unregister(miibus);
kfree(miibus->irq);
mdiobus_free(miibus);
- peripheral_free_list(pin_req);
+ peripheral_free_list(mii_bus_pd->mac_peripherals);
+
return 0;
}
diff --git a/drivers/net/bfin_mac.h b/drivers/net/bfin_mac.h
index 04e4050df18b..aed68bed2365 100644
--- a/drivers/net/bfin_mac.h
+++ b/drivers/net/bfin_mac.h
@@ -14,6 +14,8 @@
#include <linux/clocksource.h>
#include <linux/timecompare.h>
#include <linux/timer.h>
+#include <linux/etherdevice.h>
+#include <linux/bfin_mac.h>
#define BFIN_MAC_CSUM_OFFLOAD
diff --git a/drivers/net/bnx2x/bnx2x.h b/drivers/net/bnx2x/bnx2x.h
index 9571ecf48f35..863e73a85fbe 100644
--- a/drivers/net/bnx2x/bnx2x.h
+++ b/drivers/net/bnx2x/bnx2x.h
@@ -20,8 +20,8 @@
* (you will need to reboot afterwards) */
/* #define BNX2X_STOP_ON_ERROR */
-#define DRV_MODULE_VERSION "1.60.00-3"
-#define DRV_MODULE_RELDATE "2010/10/19"
+#define DRV_MODULE_VERSION "1.60.00-4"
+#define DRV_MODULE_RELDATE "2010/11/01"
#define BNX2X_BC_VER 0x040200
#define BNX2X_MULTI_QUEUE
@@ -1288,15 +1288,11 @@ struct bnx2x_func_init_params {
#define WAIT_RAMROD_POLL 0x01
#define WAIT_RAMROD_COMMON 0x02
-int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx,
- int *state_p, int flags);
/* dmae */
void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32);
void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr,
u32 len32);
-void bnx2x_write_dmae_phys_len(struct bnx2x *bp, dma_addr_t phys_addr,
- u32 addr, u32 len);
void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae, int idx);
u32 bnx2x_dmae_opcode_add_comp(u32 opcode, u8 comp_type);
u32 bnx2x_dmae_opcode_clr_src_reset(u32 opcode);
@@ -1307,7 +1303,6 @@ int bnx2x_get_gpio(struct bnx2x *bp, int gpio_num, u8 port);
int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port);
int bnx2x_set_gpio_int(struct bnx2x *bp, int gpio_num, u32 mode, u8 port);
u32 bnx2x_fw_command(struct bnx2x *bp, u32 command, u32 param);
-void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val);
void bnx2x_calc_fc_adv(struct bnx2x *bp);
int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
diff --git a/drivers/net/bnx2x/bnx2x_cmn.c b/drivers/net/bnx2x/bnx2x_cmn.c
index bc5837514074..94d5f59d5a6f 100644
--- a/drivers/net/bnx2x/bnx2x_cmn.c
+++ b/drivers/net/bnx2x/bnx2x_cmn.c
@@ -25,6 +25,7 @@
#include "bnx2x_init.h"
+static int bnx2x_setup_irqs(struct bnx2x *bp);
/* free skb in the packet ring at pos idx
* return idx of last bd freed
@@ -1679,7 +1680,7 @@ static inline u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
rc = XMIT_PLAIN;
else {
- if (skb->protocol == htons(ETH_P_IPV6)) {
+ if (vlan_get_protocol(skb) == htons(ETH_P_IPV6)) {
rc = XMIT_CSUM_V6;
if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
rc |= XMIT_CSUM_TCP;
@@ -2187,7 +2188,7 @@ int bnx2x_change_mac_addr(struct net_device *dev, void *p)
}
-int bnx2x_setup_irqs(struct bnx2x *bp)
+static int bnx2x_setup_irqs(struct bnx2x *bp)
{
int rc = 0;
if (bp->flags & USING_MSIX_FLAG) {
diff --git a/drivers/net/bnx2x/bnx2x_cmn.h b/drivers/net/bnx2x/bnx2x_cmn.h
index 5bfe0ab1d2d4..6b28739c5302 100644
--- a/drivers/net/bnx2x/bnx2x_cmn.h
+++ b/drivers/net/bnx2x/bnx2x_cmn.h
@@ -117,13 +117,6 @@ void bnx2x_setup_cnic_irq_info(struct bnx2x *bp);
void bnx2x_int_enable(struct bnx2x *bp);
/**
- * Disable HW interrupts.
- *
- * @param bp
- */
-void bnx2x_int_disable(struct bnx2x *bp);
-
-/**
* Disable interrupts. This function ensures that there are no
* ISRs or SP DPCs (sp_task) are running after it returns.
*
@@ -192,17 +185,6 @@ int bnx2x_setup_client(struct bnx2x *bp, struct bnx2x_fastpath *fp,
int is_leading);
/**
- * Bring down an eth client.
- *
- * @param bp
- * @param p
- *
- * @return int
- */
-int bnx2x_stop_fw_client(struct bnx2x *bp,
- struct bnx2x_client_ramrod_params *p);
-
-/**
* Set number of queues according to mode
*
* @param bp
@@ -250,34 +232,6 @@ int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource);
*/
void bnx2x_set_eth_mac(struct bnx2x *bp, int set);
-#ifdef BCM_CNIC
-/**
- * Set iSCSI MAC(s) at the next enties in the CAM after the ETH
- * MAC(s). The function will wait until the ramrod completion
- * returns.
- *
- * @param bp driver handle
- * @param set set or clear the CAM entry
- *
- * @return 0 if cussess, -ENODEV if ramrod doesn't return.
- */
-int bnx2x_set_iscsi_eth_mac_addr(struct bnx2x *bp, int set);
-#endif
-
-/**
- * Initialize status block in FW and HW
- *
- * @param bp driver handle
- * @param dma_addr_t mapping
- * @param int sb_id
- * @param int vfid
- * @param u8 vf_valid
- * @param int fw_sb_id
- * @param int igu_sb_id
- */
-void bnx2x_init_sb(struct bnx2x *bp, dma_addr_t mapping, int vfid,
- u8 vf_valid, int fw_sb_id, int igu_sb_id);
-
/**
* Set MAC filtering configurations.
*
@@ -326,7 +280,6 @@ void bnx2x_sp_event(struct bnx2x_fastpath *fp, union eth_rx_cqe *rr_cqe);
* @return int
*/
int bnx2x_func_start(struct bnx2x *bp);
-int bnx2x_func_stop(struct bnx2x *bp);
/**
* Prepare ILT configurations according to current driver
@@ -396,14 +349,6 @@ int bnx2x_enable_msix(struct bnx2x *bp);
int bnx2x_enable_msi(struct bnx2x *bp);
/**
- * Request IRQ vectors from OS.
- *
- * @param bp
- *
- * @return int
- */
-int bnx2x_setup_irqs(struct bnx2x *bp);
-/**
* NAPI callback
*
* @param napi
diff --git a/drivers/net/bnx2x/bnx2x_hsi.h b/drivers/net/bnx2x/bnx2x_hsi.h
index 18c8e23a0e82..4cfd4e9b5586 100644
--- a/drivers/net/bnx2x/bnx2x_hsi.h
+++ b/drivers/net/bnx2x/bnx2x_hsi.h
@@ -244,7 +244,14 @@ struct port_hw_cfg { /* port 0: 0x12c port 1: 0x2bc */
u16 xgxs_config_tx[4]; /* 0x1A0 */
- u32 Reserved1[57]; /* 0x1A8 */
+ u32 Reserved1[56]; /* 0x1A8 */
+ u32 default_cfg; /* 0x288 */
+ /* Enable BAM on KR */
+#define PORT_HW_CFG_ENABLE_BAM_ON_KR_MASK 0x00100000
+#define PORT_HW_CFG_ENABLE_BAM_ON_KR_SHIFT 20
+#define PORT_HW_CFG_ENABLE_BAM_ON_KR_DISABLED 0x00000000
+#define PORT_HW_CFG_ENABLE_BAM_ON_KR_ENABLED 0x00100000
+
u32 speed_capability_mask2; /* 0x28C */
#define PORT_HW_CFG_SPEED_CAPABILITY2_D3_MASK 0x0000FFFF
#define PORT_HW_CFG_SPEED_CAPABILITY2_D3_SHIFT 0
diff --git a/drivers/net/bnx2x/bnx2x_init_ops.h b/drivers/net/bnx2x/bnx2x_init_ops.h
index e65de784182c..a306b0e46b61 100644
--- a/drivers/net/bnx2x/bnx2x_init_ops.h
+++ b/drivers/net/bnx2x/bnx2x_init_ops.h
@@ -16,7 +16,9 @@
#define BNX2X_INIT_OPS_H
static int bnx2x_gunzip(struct bnx2x *bp, const u8 *zbuf, int len);
-
+static void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val);
+static void bnx2x_write_dmae_phys_len(struct bnx2x *bp, dma_addr_t phys_addr,
+ u32 addr, u32 len);
static void bnx2x_init_str_wr(struct bnx2x *bp, u32 addr, const u32 *data,
u32 len)
@@ -589,7 +591,7 @@ static int bnx2x_ilt_client_mem_op(struct bnx2x *bp, int cli_num, u8 memop)
return rc;
}
-int bnx2x_ilt_mem_op(struct bnx2x *bp, u8 memop)
+static int bnx2x_ilt_mem_op(struct bnx2x *bp, u8 memop)
{
int rc = bnx2x_ilt_client_mem_op(bp, ILT_CLIENT_CDU, memop);
if (!rc)
@@ -635,7 +637,7 @@ static void bnx2x_ilt_line_init_op(struct bnx2x *bp, struct bnx2x_ilt *ilt,
}
}
-void bnx2x_ilt_boundry_init_op(struct bnx2x *bp,
+static void bnx2x_ilt_boundry_init_op(struct bnx2x *bp,
struct ilt_client_info *ilt_cli,
u32 ilt_start, u8 initop)
{
@@ -688,8 +690,10 @@ void bnx2x_ilt_boundry_init_op(struct bnx2x *bp,
}
}
-void bnx2x_ilt_client_init_op_ilt(struct bnx2x *bp, struct bnx2x_ilt *ilt,
- struct ilt_client_info *ilt_cli, u8 initop)
+static void bnx2x_ilt_client_init_op_ilt(struct bnx2x *bp,
+ struct bnx2x_ilt *ilt,
+ struct ilt_client_info *ilt_cli,
+ u8 initop)
{
int i;
@@ -703,8 +707,8 @@ void bnx2x_ilt_client_init_op_ilt(struct bnx2x *bp, struct bnx2x_ilt *ilt,
bnx2x_ilt_boundry_init_op(bp, ilt_cli, ilt->start_line, initop);
}
-void bnx2x_ilt_client_init_op(struct bnx2x *bp,
- struct ilt_client_info *ilt_cli, u8 initop)
+static void bnx2x_ilt_client_init_op(struct bnx2x *bp,
+ struct ilt_client_info *ilt_cli, u8 initop)
{
struct bnx2x_ilt *ilt = BP_ILT(bp);
@@ -720,7 +724,7 @@ static void bnx2x_ilt_client_id_init_op(struct bnx2x *bp,
bnx2x_ilt_client_init_op(bp, ilt_cli, initop);
}
-void bnx2x_ilt_init_op(struct bnx2x *bp, u8 initop)
+static void bnx2x_ilt_init_op(struct bnx2x *bp, u8 initop)
{
bnx2x_ilt_client_id_init_op(bp, ILT_CLIENT_CDU, initop);
bnx2x_ilt_client_id_init_op(bp, ILT_CLIENT_QM, initop);
@@ -752,7 +756,7 @@ static void bnx2x_ilt_init_client_psz(struct bnx2x *bp, int cli_num,
* called during init common stage, ilt clients should be initialized
* prioir to calling this function
*/
-void bnx2x_ilt_init_page_size(struct bnx2x *bp, u8 initop)
+static void bnx2x_ilt_init_page_size(struct bnx2x *bp, u8 initop)
{
bnx2x_ilt_init_client_psz(bp, ILT_CLIENT_CDU,
PXP2_REG_RQ_CDU_P_SIZE, initop);
@@ -772,8 +776,8 @@ void bnx2x_ilt_init_page_size(struct bnx2x *bp, u8 initop)
#define QM_INIT(cid_cnt) (cid_cnt > QM_INIT_MIN_CID_COUNT)
/* called during init port stage */
-void bnx2x_qm_init_cid_count(struct bnx2x *bp, int qm_cid_count,
- u8 initop)
+static void bnx2x_qm_init_cid_count(struct bnx2x *bp, int qm_cid_count,
+ u8 initop)
{
int port = BP_PORT(bp);
@@ -814,8 +818,8 @@ static void bnx2x_qm_set_ptr_table(struct bnx2x *bp, int qm_cid_count)
}
/* called during init common stage */
-void bnx2x_qm_init_ptr_table(struct bnx2x *bp, int qm_cid_count,
- u8 initop)
+static void bnx2x_qm_init_ptr_table(struct bnx2x *bp, int qm_cid_count,
+ u8 initop)
{
if (!QM_INIT(qm_cid_count))
return;
@@ -836,8 +840,8 @@ void bnx2x_qm_init_ptr_table(struct bnx2x *bp, int qm_cid_count,
****************************************************************************/
/* called during init func stage */
-void bnx2x_src_init_t2(struct bnx2x *bp, struct src_ent *t2,
- dma_addr_t t2_mapping, int src_cid_count)
+static void bnx2x_src_init_t2(struct bnx2x *bp, struct src_ent *t2,
+ dma_addr_t t2_mapping, int src_cid_count)
{
int i;
int port = BP_PORT(bp);
diff --git a/drivers/net/bnx2x/bnx2x_link.c b/drivers/net/bnx2x/bnx2x_link.c
index 3e99bf9c42b9..580919619252 100644
--- a/drivers/net/bnx2x/bnx2x_link.c
+++ b/drivers/net/bnx2x/bnx2x_link.c
@@ -181,6 +181,12 @@
(_bank + (_addr & 0xf)), \
_val)
+static u8 bnx2x_cl45_read(struct bnx2x *bp, struct bnx2x_phy *phy,
+ u8 devad, u16 reg, u16 *ret_val);
+
+static u8 bnx2x_cl45_write(struct bnx2x *bp, struct bnx2x_phy *phy,
+ u8 devad, u16 reg, u16 val);
+
static u32 bnx2x_bits_en(struct bnx2x *bp, u32 reg, u32 bits)
{
u32 val = REG_RD(bp, reg);
@@ -594,7 +600,7 @@ static u8 bnx2x_bmac2_enable(struct link_params *params,
return 0;
}
-u8 bnx2x_bmac_enable(struct link_params *params,
+static u8 bnx2x_bmac_enable(struct link_params *params,
struct link_vars *vars,
u8 is_lb)
{
@@ -604,7 +610,7 @@ u8 bnx2x_bmac_enable(struct link_params *params,
/* reset and unreset the BigMac */
REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
(MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port));
- udelay(10);
+ msleep(1);
REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET,
(MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port));
@@ -2537,122 +2543,6 @@ static void bnx2x_set_xgxs_loopback(struct bnx2x_phy *phy,
}
}
-/*
- *------------------------------------------------------------------------
- * bnx2x_override_led_value -
- *
- * Override the led value of the requested led
- *
- *------------------------------------------------------------------------
- */
-u8 bnx2x_override_led_value(struct bnx2x *bp, u8 port,
- u32 led_idx, u32 value)
-{
- u32 reg_val;
-
- /* If port 0 then use EMAC0, else use EMAC1*/
- u32 emac_base = (port) ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
-
- DP(NETIF_MSG_LINK,
- "bnx2x_override_led_value() port %x led_idx %d value %d\n",
- port, led_idx, value);
-
- switch (led_idx) {
- case 0: /* 10MB led */
- /* Read the current value of the LED register in
- the EMAC block */
- reg_val = REG_RD(bp, emac_base + EMAC_REG_EMAC_LED);
- /* Set the OVERRIDE bit to 1 */
- reg_val |= EMAC_LED_OVERRIDE;
- /* If value is 1, set the 10M_OVERRIDE bit,
- otherwise reset it.*/
- reg_val = (value == 1) ? (reg_val | EMAC_LED_10MB_OVERRIDE) :
- (reg_val & ~EMAC_LED_10MB_OVERRIDE);
- REG_WR(bp, emac_base + EMAC_REG_EMAC_LED, reg_val);
- break;
- case 1: /*100MB led */
- /*Read the current value of the LED register in
- the EMAC block */
- reg_val = REG_RD(bp, emac_base + EMAC_REG_EMAC_LED);
- /* Set the OVERRIDE bit to 1 */
- reg_val |= EMAC_LED_OVERRIDE;
- /* If value is 1, set the 100M_OVERRIDE bit,
- otherwise reset it.*/
- reg_val = (value == 1) ? (reg_val | EMAC_LED_100MB_OVERRIDE) :
- (reg_val & ~EMAC_LED_100MB_OVERRIDE);
- REG_WR(bp, emac_base + EMAC_REG_EMAC_LED, reg_val);
- break;
- case 2: /* 1000MB led */
- /* Read the current value of the LED register in the
- EMAC block */
- reg_val = REG_RD(bp, emac_base + EMAC_REG_EMAC_LED);
- /* Set the OVERRIDE bit to 1 */
- reg_val |= EMAC_LED_OVERRIDE;
- /* If value is 1, set the 1000M_OVERRIDE bit, otherwise
- reset it. */
- reg_val = (value == 1) ? (reg_val | EMAC_LED_1000MB_OVERRIDE) :
- (reg_val & ~EMAC_LED_1000MB_OVERRIDE);
- REG_WR(bp, emac_base + EMAC_REG_EMAC_LED, reg_val);
- break;
- case 3: /* 2500MB led */
- /* Read the current value of the LED register in the
- EMAC block*/
- reg_val = REG_RD(bp, emac_base + EMAC_REG_EMAC_LED);
- /* Set the OVERRIDE bit to 1 */
- reg_val |= EMAC_LED_OVERRIDE;
- /* If value is 1, set the 2500M_OVERRIDE bit, otherwise
- reset it.*/
- reg_val = (value == 1) ? (reg_val | EMAC_LED_2500MB_OVERRIDE) :
- (reg_val & ~EMAC_LED_2500MB_OVERRIDE);
- REG_WR(bp, emac_base + EMAC_REG_EMAC_LED, reg_val);
- break;
- case 4: /*10G led */
- if (port == 0) {
- REG_WR(bp, NIG_REG_LED_10G_P0,
- value);
- } else {
- REG_WR(bp, NIG_REG_LED_10G_P1,
- value);
- }
- break;
- case 5: /* TRAFFIC led */
- /* Find if the traffic control is via BMAC or EMAC */
- if (port == 0)
- reg_val = REG_RD(bp, NIG_REG_NIG_EMAC0_EN);
- else
- reg_val = REG_RD(bp, NIG_REG_NIG_EMAC1_EN);
-
- /* Override the traffic led in the EMAC:*/
- if (reg_val == 1) {
- /* Read the current value of the LED register in
- the EMAC block */
- reg_val = REG_RD(bp, emac_base +
- EMAC_REG_EMAC_LED);
- /* Set the TRAFFIC_OVERRIDE bit to 1 */
- reg_val |= EMAC_LED_OVERRIDE;
- /* If value is 1, set the TRAFFIC bit, otherwise
- reset it.*/
- reg_val = (value == 1) ? (reg_val | EMAC_LED_TRAFFIC) :
- (reg_val & ~EMAC_LED_TRAFFIC);
- REG_WR(bp, emac_base + EMAC_REG_EMAC_LED, reg_val);
- } else { /* Override the traffic led in the BMAC: */
- REG_WR(bp, NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0
- + port*4, 1);
- REG_WR(bp, NIG_REG_LED_CONTROL_TRAFFIC_P0 + port*4,
- value);
- }
- break;
- default:
- DP(NETIF_MSG_LINK,
- "bnx2x_override_led_value() unknown led index %d "
- "(should be 0-5)\n", led_idx);
- return -EINVAL;
- }
-
- return 0;
-}
-
-
u8 bnx2x_set_led(struct link_params *params,
struct link_vars *vars, u8 mode, u32 speed)
{
@@ -3635,13 +3525,19 @@ static u8 bnx2x_8073_config_init(struct bnx2x_phy *phy,
DP(NETIF_MSG_LINK, "Before rom RX_ALARM(port1): 0x%x\n", tmp1);
/* Enable CL37 BAM */
- bnx2x_cl45_read(bp, phy,
- MDIO_AN_DEVAD,
- MDIO_AN_REG_8073_BAM, &val);
- bnx2x_cl45_write(bp, phy,
- MDIO_AN_DEVAD,
- MDIO_AN_REG_8073_BAM, val | 1);
+ if (REG_RD(bp, params->shmem_base +
+ offsetof(struct shmem_region, dev_info.
+ port_hw_config[params->port].default_cfg)) &
+ PORT_HW_CFG_ENABLE_BAM_ON_KR_ENABLED) {
+ bnx2x_cl45_read(bp, phy,
+ MDIO_AN_DEVAD,
+ MDIO_AN_REG_8073_BAM, &val);
+ bnx2x_cl45_write(bp, phy,
+ MDIO_AN_DEVAD,
+ MDIO_AN_REG_8073_BAM, val | 1);
+ DP(NETIF_MSG_LINK, "Enable CL37 BAM on KR\n");
+ }
if (params->loopback_mode == LOOPBACK_EXT) {
bnx2x_807x_force_10G(bp, phy);
DP(NETIF_MSG_LINK, "Forced speed 10G on 807X\n");
@@ -4099,9 +3995,9 @@ static u8 bnx2x_8727_read_sfp_module_eeprom(struct bnx2x_phy *phy,
return -EINVAL;
}
-u8 bnx2x_read_sfp_module_eeprom(struct bnx2x_phy *phy,
- struct link_params *params, u16 addr,
- u8 byte_cnt, u8 *o_buf)
+static u8 bnx2x_read_sfp_module_eeprom(struct bnx2x_phy *phy,
+ struct link_params *params, u16 addr,
+ u8 byte_cnt, u8 *o_buf)
{
if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726)
return bnx2x_8726_read_sfp_module_eeprom(phy, params, addr,
@@ -5412,7 +5308,7 @@ static u8 bnx2x_848xx_cmn_config_init(struct bnx2x_phy *phy,
{
struct bnx2x *bp = params->bp;
u16 autoneg_val, an_1000_val, an_10_100_val;
- bnx2x_wait_reset_complete(bp, phy);
+
bnx2x_bits_en(bp, NIG_REG_LATCH_BC_0 + params->port*4,
1 << NIG_LATCH_BC_ENABLE_MI_INT);
@@ -5541,6 +5437,7 @@ static u8 bnx2x_8481_config_init(struct bnx2x_phy *phy,
/* HW reset */
bnx2x_ext_phy_hw_reset(bp, params->port);
+ bnx2x_wait_reset_complete(bp, phy);
bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, 1<<15);
return bnx2x_848xx_cmn_config_init(phy, params, vars);
@@ -5551,7 +5448,7 @@ static u8 bnx2x_848x3_config_init(struct bnx2x_phy *phy,
struct link_vars *vars)
{
struct bnx2x *bp = params->bp;
- u8 port = params->port, initialize = 1;
+ u8 port, initialize = 1;
u16 val;
u16 temp;
u32 actual_phy_selection;
@@ -5560,11 +5457,16 @@ static u8 bnx2x_848x3_config_init(struct bnx2x_phy *phy,
/* This is just for MDIO_CTL_REG_84823_MEDIA register. */
msleep(1);
+ if (CHIP_IS_E2(bp))
+ port = BP_PATH(bp);
+ else
+ port = params->port;
bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_3,
MISC_REGISTERS_GPIO_OUTPUT_HIGH,
port);
- msleep(200); /* 100 is not enough */
-
+ bnx2x_wait_reset_complete(bp, phy);
+ /* Wait for GPHY to come out of reset */
+ msleep(50);
/* BCM84823 requires that XGXS links up first @ 10G for normal
behavior */
temp = vars->line_speed;
@@ -5735,7 +5637,11 @@ static void bnx2x_848x3_link_reset(struct bnx2x_phy *phy,
struct link_params *params)
{
struct bnx2x *bp = params->bp;
- u8 port = params->port;
+ u8 port;
+ if (CHIP_IS_E2(bp))
+ port = BP_PATH(bp);
+ else
+ port = params->port;
bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_3,
MISC_REGISTERS_GPIO_OUTPUT_LOW,
port);
@@ -6819,13 +6725,6 @@ u8 bnx2x_phy_probe(struct link_params *params)
return 0;
}
-u32 bnx2x_supported_attr(struct link_params *params, u8 phy_idx)
-{
- if (phy_idx < params->num_phys)
- return params->phy[phy_idx].supported;
- return 0;
-}
-
static void set_phy_vars(struct link_params *params)
{
struct bnx2x *bp = params->bp;
@@ -7045,7 +6944,7 @@ u8 bnx2x_link_reset(struct link_params *params, struct link_vars *vars,
u8 reset_ext_phy)
{
struct bnx2x *bp = params->bp;
- u8 phy_index, port = params->port;
+ u8 phy_index, port = params->port, clear_latch_ind = 0;
DP(NETIF_MSG_LINK, "Resetting the link of port %d\n", port);
/* disable attentions */
vars->link_status = 0;
@@ -7083,9 +6982,18 @@ u8 bnx2x_link_reset(struct link_params *params, struct link_vars *vars,
params->phy[phy_index].link_reset(
&params->phy[phy_index],
params);
+ if (params->phy[phy_index].flags &
+ FLAGS_REARM_LATCH_SIGNAL)
+ clear_latch_ind = 1;
}
}
+ if (clear_latch_ind) {
+ /* Clear latching indication */
+ bnx2x_rearm_latch_signal(bp, port, 0);
+ bnx2x_bits_dis(bp, NIG_REG_LATCH_BC_0 + port*4,
+ 1 << NIG_LATCH_BC_ENABLE_MI_INT);
+ }
if (params->phy[INT_PHY].link_reset)
params->phy[INT_PHY].link_reset(
&params->phy[INT_PHY], params);
@@ -7116,6 +7024,7 @@ static u8 bnx2x_8073_common_init_phy(struct bnx2x *bp,
s8 port;
s8 port_of_path = 0;
+ bnx2x_ext_phy_hw_reset(bp, 0);
/* PART1 - Reset both phys */
for (port = PORT_MAX - 1; port >= PORT_0; port--) {
u32 shmem_base, shmem2_base;
@@ -7138,7 +7047,8 @@ static u8 bnx2x_8073_common_init_phy(struct bnx2x *bp,
return -EINVAL;
}
/* disable attentions */
- bnx2x_bits_dis(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4,
+ bnx2x_bits_dis(bp, NIG_REG_MASK_INTERRUPT_PORT0 +
+ port_of_path*4,
(NIG_MASK_XGXS0_LINK_STATUS |
NIG_MASK_XGXS0_LINK10G |
NIG_MASK_SERDES0_LINK_STATUS |
@@ -7249,7 +7159,7 @@ static u8 bnx2x_8726_common_init_phy(struct bnx2x *bp,
(1<<(MISC_REGISTERS_GPIO_3 + MISC_REGISTERS_GPIO_PORT_SHIFT)));
REG_WR(bp, MISC_REG_GPIO_EVENT_EN, val);
- bnx2x_ext_phy_hw_reset(bp, 1);
+ bnx2x_ext_phy_hw_reset(bp, 0);
msleep(5);
for (port = 0; port < PORT_MAX; port++) {
u32 shmem_base, shmem2_base;
diff --git a/drivers/net/bnx2x/bnx2x_link.h b/drivers/net/bnx2x/bnx2x_link.h
index 58a4c7199276..171abf8097ee 100644
--- a/drivers/net/bnx2x/bnx2x_link.h
+++ b/drivers/net/bnx2x/bnx2x_link.h
@@ -279,12 +279,6 @@ u8 bnx2x_phy_read(struct link_params *params, u8 phy_addr,
u8 bnx2x_phy_write(struct link_params *params, u8 phy_addr,
u8 devad, u16 reg, u16 val);
-
-u8 bnx2x_cl45_read(struct bnx2x *bp, struct bnx2x_phy *phy,
- u8 devad, u16 reg, u16 *ret_val);
-
-u8 bnx2x_cl45_write(struct bnx2x *bp, struct bnx2x_phy *phy,
- u8 devad, u16 reg, u16 val);
/* Reads the link_status from the shmem,
and update the link vars accordingly */
void bnx2x_link_status_update(struct link_params *input,
@@ -304,8 +298,6 @@ u8 bnx2x_set_led(struct link_params *params, struct link_vars *vars,
#define LED_MODE_OPER 2
#define LED_MODE_FRONT_PANEL_OFF 3
-u8 bnx2x_override_led_value(struct bnx2x *bp, u8 port, u32 led_idx, u32 value);
-
/* bnx2x_handle_module_detect_int should be called upon module detection
interrupt */
void bnx2x_handle_module_detect_int(struct link_params *params);
@@ -325,19 +317,12 @@ void bnx2x_ext_phy_hw_reset(struct bnx2x *bp, u8 port);
/* Reset the external of SFX7101 */
void bnx2x_sfx7101_sp_sw_reset(struct bnx2x *bp, struct bnx2x_phy *phy);
-u8 bnx2x_read_sfp_module_eeprom(struct bnx2x_phy *phy,
- struct link_params *params, u16 addr,
- u8 byte_cnt, u8 *o_buf);
-
void bnx2x_hw_reset_phy(struct link_params *params);
/* Checks if HW lock is required for this phy/board type */
u8 bnx2x_hw_lock_required(struct bnx2x *bp, u32 shmem_base,
u32 shmem2_base);
-/* Returns the aggregative supported attributes of the phys on board */
-u32 bnx2x_supported_attr(struct link_params *params, u8 phy_idx);
-
/* Check swap bit and adjust PHY order */
u32 bnx2x_phy_selection(struct link_params *params);
diff --git a/drivers/net/bnx2x/bnx2x_main.c b/drivers/net/bnx2x/bnx2x_main.c
index ff99a2fc0426..9709b8569666 100644
--- a/drivers/net/bnx2x/bnx2x_main.c
+++ b/drivers/net/bnx2x/bnx2x_main.c
@@ -403,7 +403,7 @@ static inline void storm_memset_hc_disable(struct bnx2x *bp, u8 port,
/* used only at init
* locking is done by mcp
*/
-void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val)
+static void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val)
{
pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
pci_write_config_dword(bp->pdev, PCICFG_GRC_DATA, val);
@@ -429,7 +429,8 @@ static u32 bnx2x_reg_rd_ind(struct bnx2x *bp, u32 addr)
#define DMAE_DP_DST_PCI "pci dst_addr [%x:%08x]"
#define DMAE_DP_DST_NONE "dst_addr [none]"
-void bnx2x_dp_dmae(struct bnx2x *bp, struct dmae_command *dmae, int msglvl)
+static void bnx2x_dp_dmae(struct bnx2x *bp, struct dmae_command *dmae,
+ int msglvl)
{
u32 src_type = dmae->opcode & DMAE_COMMAND_SRC;
@@ -551,8 +552,9 @@ u32 bnx2x_dmae_opcode(struct bnx2x *bp, u8 src_type, u8 dst_type,
return opcode;
}
-void bnx2x_prep_dmae_with_comp(struct bnx2x *bp, struct dmae_command *dmae,
- u8 src_type, u8 dst_type)
+static void bnx2x_prep_dmae_with_comp(struct bnx2x *bp,
+ struct dmae_command *dmae,
+ u8 src_type, u8 dst_type)
{
memset(dmae, 0, sizeof(struct dmae_command));
@@ -567,7 +569,8 @@ void bnx2x_prep_dmae_with_comp(struct bnx2x *bp, struct dmae_command *dmae,
}
/* issue a dmae command over the init-channel and wailt for completion */
-int bnx2x_issue_dmae_with_comp(struct bnx2x *bp, struct dmae_command *dmae)
+static int bnx2x_issue_dmae_with_comp(struct bnx2x *bp,
+ struct dmae_command *dmae)
{
u32 *wb_comp = bnx2x_sp(bp, wb_comp);
int cnt = CHIP_REV_IS_SLOW(bp) ? (400000) : 40;
@@ -674,8 +677,8 @@ void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32)
bnx2x_issue_dmae_with_comp(bp, &dmae);
}
-void bnx2x_write_dmae_phys_len(struct bnx2x *bp, dma_addr_t phys_addr,
- u32 addr, u32 len)
+static void bnx2x_write_dmae_phys_len(struct bnx2x *bp, dma_addr_t phys_addr,
+ u32 addr, u32 len)
{
int dmae_wr_max = DMAE_LEN32_WR_MAX(bp);
int offset = 0;
@@ -1267,7 +1270,7 @@ static void bnx2x_igu_int_disable(struct bnx2x *bp)
BNX2X_ERR("BUG! proper val not read from IGU!\n");
}
-void bnx2x_int_disable(struct bnx2x *bp)
+static void bnx2x_int_disable(struct bnx2x *bp)
{
if (bp->common.int_block == INT_BLOCK_HC)
bnx2x_hc_int_disable(bp);
@@ -2236,7 +2239,7 @@ u32 bnx2x_fw_command(struct bnx2x *bp, u32 command, u32 param)
}
/* must be called under rtnl_lock */
-void bnx2x_rxq_set_mac_filters(struct bnx2x *bp, u16 cl_id, u32 filters)
+static void bnx2x_rxq_set_mac_filters(struct bnx2x *bp, u16 cl_id, u32 filters)
{
u32 mask = (1 << cl_id);
@@ -2303,7 +2306,7 @@ void bnx2x_rxq_set_mac_filters(struct bnx2x *bp, u16 cl_id, u32 filters)
bp->mac_filters.unmatched_unicast & ~mask;
}
-void bnx2x_func_init(struct bnx2x *bp, struct bnx2x_func_init_params *p)
+static void bnx2x_func_init(struct bnx2x *bp, struct bnx2x_func_init_params *p)
{
struct tstorm_eth_function_common_config tcfg = {0};
u16 rss_flgs;
@@ -2460,7 +2463,7 @@ static void bnx2x_pf_tx_cl_prep(struct bnx2x *bp,
txq_init->hc_rate = bp->tx_ticks ? (1000000 / bp->tx_ticks) : 0;
}
-void bnx2x_pf_init(struct bnx2x *bp)
+static void bnx2x_pf_init(struct bnx2x *bp)
{
struct bnx2x_func_init_params func_init = {0};
struct bnx2x_rss_params rss = {0};
@@ -3928,7 +3931,7 @@ void bnx2x_setup_ndsb_state_machine(struct hc_status_block_sm *hc_sm,
hc_sm->time_to_expire = 0xFFFFFFFF;
}
-void bnx2x_init_sb(struct bnx2x *bp, dma_addr_t mapping, int vfid,
+static void bnx2x_init_sb(struct bnx2x *bp, dma_addr_t mapping, int vfid,
u8 vf_valid, int fw_sb_id, int igu_sb_id)
{
int igu_seg_id;
@@ -6021,6 +6024,9 @@ alloc_mem_err:
/*
* Init service functions
*/
+static int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx,
+ int *state_p, int flags);
+
int bnx2x_func_start(struct bnx2x *bp)
{
bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_FUNCTION_START, 0, 0, 0, 1);
@@ -6030,7 +6036,7 @@ int bnx2x_func_start(struct bnx2x *bp)
WAIT_RAMROD_COMMON);
}
-int bnx2x_func_stop(struct bnx2x *bp)
+static int bnx2x_func_stop(struct bnx2x *bp)
{
bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_FUNCTION_STOP, 0, 0, 0, 1);
@@ -6103,8 +6109,8 @@ static void bnx2x_set_mac_addr_gen(struct bnx2x *bp, int set, u8 *mac,
bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending, ramrod_flags);
}
-int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx,
- int *state_p, int flags)
+static int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx,
+ int *state_p, int flags)
{
/* can take a while if any port is running */
int cnt = 5000;
@@ -6154,7 +6160,7 @@ int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx,
return -EBUSY;
}
-u8 bnx2x_e1h_cam_offset(struct bnx2x *bp, u8 rel_offset)
+static u8 bnx2x_e1h_cam_offset(struct bnx2x *bp, u8 rel_offset)
{
if (CHIP_IS_E1H(bp))
return E1H_FUNC_MAX * rel_offset + BP_FUNC(bp);
@@ -6273,7 +6279,7 @@ static void bnx2x_invlidate_e1_mc_list(struct bnx2x *bp)
*
* @return 0 if cussess, -ENODEV if ramrod doesn't return.
*/
-int bnx2x_set_iscsi_eth_mac_addr(struct bnx2x *bp, int set)
+static int bnx2x_set_iscsi_eth_mac_addr(struct bnx2x *bp, int set)
{
u8 cam_offset = (CHIP_IS_E1(bp) ? ((BP_PORT(bp) ? 32 : 0) + 2) :
bnx2x_e1h_cam_offset(bp, CAM_ISCSI_ETH_LINE));
@@ -6383,11 +6389,11 @@ static inline void bnx2x_set_ctx_validation(struct eth_context *cxt, u32 cid)
ETH_CONNECTION_TYPE);
}
-int bnx2x_setup_fw_client(struct bnx2x *bp,
- struct bnx2x_client_init_params *params,
- u8 activate,
- struct client_init_ramrod_data *data,
- dma_addr_t data_mapping)
+static int bnx2x_setup_fw_client(struct bnx2x *bp,
+ struct bnx2x_client_init_params *params,
+ u8 activate,
+ struct client_init_ramrod_data *data,
+ dma_addr_t data_mapping)
{
u16 hc_usec;
int ramrod = RAMROD_CMD_ID_ETH_CLIENT_SETUP;
@@ -6633,7 +6639,8 @@ int bnx2x_setup_client(struct bnx2x *bp, struct bnx2x_fastpath *fp,
return rc;
}
-int bnx2x_stop_fw_client(struct bnx2x *bp, struct bnx2x_client_ramrod_params *p)
+static int bnx2x_stop_fw_client(struct bnx2x *bp,
+ struct bnx2x_client_ramrod_params *p)
{
int rc;
@@ -7440,7 +7447,7 @@ reset_task_exit:
* Init service functions
*/
-u32 bnx2x_get_pretend_reg(struct bnx2x *bp)
+static u32 bnx2x_get_pretend_reg(struct bnx2x *bp)
{
u32 base = PXP2_REG_PGL_PRETEND_FUNC_F0;
u32 stride = PXP2_REG_PGL_PRETEND_FUNC_F1 - base;
@@ -9057,7 +9064,7 @@ static int __devinit bnx2x_init_one(struct pci_dev *pdev,
default:
pr_err("Unknown board_type (%ld), aborting\n",
ent->driver_data);
- return ENODEV;
+ return -ENODEV;
}
cid_count += CNIC_CONTEXT_USE;
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index beb3b7cecd52..71a169740d05 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -493,9 +493,9 @@ static void bond_vlan_rx_register(struct net_device *bond_dev,
struct slave *slave;
int i;
- write_lock(&bond->lock);
+ write_lock_bh(&bond->lock);
bond->vlgrp = grp;
- write_unlock(&bond->lock);
+ write_unlock_bh(&bond->lock);
bond_for_each_slave(bond, slave, i) {
struct net_device *slave_dev = slave->dev;
@@ -878,8 +878,10 @@ static void __bond_resend_igmp_join_requests(struct net_device *dev)
rcu_read_lock();
in_dev = __in_dev_get_rcu(dev);
if (in_dev) {
+ read_lock(&in_dev->mc_list_lock);
for (im = in_dev->mc_list; im; im = im->next)
ip_mc_rejoin_group(im);
+ read_unlock(&in_dev->mc_list_lock);
}
rcu_read_unlock();
diff --git a/drivers/net/caif/Kconfig b/drivers/net/caif/Kconfig
index 75bfc3a9d95f..09ed3f42d673 100644
--- a/drivers/net/caif/Kconfig
+++ b/drivers/net/caif/Kconfig
@@ -31,3 +31,10 @@ config CAIF_SPI_SYNC
Putting the next command and length in the start of the frame can
help to synchronize to the next transfer in case of over or under-runs.
This option also needs to be enabled on the modem.
+
+config CAIF_SHM
+ tristate "CAIF shared memory protocol driver"
+ depends on CAIF && U5500_MBOX
+ default n
+ ---help---
+ The CAIF shared memory protocol driver for the STE UX5500 platform.
diff --git a/drivers/net/caif/Makefile b/drivers/net/caif/Makefile
index 3a11d619452b..b38d987da67d 100644
--- a/drivers/net/caif/Makefile
+++ b/drivers/net/caif/Makefile
@@ -8,3 +8,7 @@ obj-$(CONFIG_CAIF_TTY) += caif_serial.o
# SPI slave physical interfaces module
cfspi_slave-objs := caif_spi.o caif_spi_slave.o
obj-$(CONFIG_CAIF_SPI_SLAVE) += cfspi_slave.o
+
+# Shared memory
+caif_shm-objs := caif_shmcore.o caif_shm_u5500.o
+obj-$(CONFIG_CAIF_SHM) += caif_shm.o
diff --git a/drivers/net/caif/caif_shm_u5500.c b/drivers/net/caif/caif_shm_u5500.c
new file mode 100644
index 000000000000..1cd90da86f13
--- /dev/null
+++ b/drivers/net/caif/caif_shm_u5500.c
@@ -0,0 +1,129 @@
+/*
+ * Copyright (C) ST-Ericsson AB 2010
+ * Contact: Sjur Brendeland / sjur.brandeland@stericsson.com
+ * Author: Amarnath Revanna / amarnath.bangalore.revanna@stericsson.com
+ * License terms: GNU General Public License (GPL) version 2
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ":" __func__ "():" fmt
+
+#include <linux/version.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <mach/mbox.h>
+#include <net/caif/caif_shm.h>
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("CAIF Shared Memory protocol driver");
+
+#define MAX_SHM_INSTANCES 1
+
+enum {
+ MBX_ACC0,
+ MBX_ACC1,
+ MBX_DSP
+};
+
+static struct shmdev_layer shmdev_lyr[MAX_SHM_INSTANCES];
+
+static unsigned int shm_start;
+static unsigned int shm_size;
+
+module_param(shm_size, uint , 0440);
+MODULE_PARM_DESC(shm_total_size, "Start of SHM shared memory");
+
+module_param(shm_start, uint , 0440);
+MODULE_PARM_DESC(shm_total_start, "Total Size of SHM shared memory");
+
+static int shmdev_send_msg(u32 dev_id, u32 mbx_msg)
+{
+ /* Always block until msg is written successfully */
+ mbox_send(shmdev_lyr[dev_id].hmbx, mbx_msg, true);
+ return 0;
+}
+
+static int shmdev_mbx_setup(void *pshmdrv_cb, struct shmdev_layer *pshm_dev,
+ void *pshm_drv)
+{
+ /*
+ * For UX5500, we have only 1 SHM instance which uses MBX0
+ * for communication with the peer modem
+ */
+ pshm_dev->hmbx = mbox_setup(MBX_ACC0, pshmdrv_cb, pshm_drv);
+
+ if (!pshm_dev->hmbx)
+ return -ENODEV;
+ else
+ return 0;
+}
+
+static int __init caif_shmdev_init(void)
+{
+ int i, result;
+
+ /* Loop is currently overkill, there is only one instance */
+ for (i = 0; i < MAX_SHM_INSTANCES; i++) {
+
+ shmdev_lyr[i].shm_base_addr = shm_start;
+ shmdev_lyr[i].shm_total_sz = shm_size;
+
+ if (((char *)shmdev_lyr[i].shm_base_addr == NULL)
+ || (shmdev_lyr[i].shm_total_sz <= 0)) {
+ pr_warn("ERROR,"
+ "Shared memory Address and/or Size incorrect"
+ ", Bailing out ...\n");
+ result = -EINVAL;
+ goto clean;
+ }
+
+ pr_info("SHM AREA (instance %d) STARTS"
+ " AT %p\n", i, (char *)shmdev_lyr[i].shm_base_addr);
+
+ shmdev_lyr[i].shm_id = i;
+ shmdev_lyr[i].pshmdev_mbxsend = shmdev_send_msg;
+ shmdev_lyr[i].pshmdev_mbxsetup = shmdev_mbx_setup;
+
+ /*
+ * Finally, CAIF core module is called with details in place:
+ * 1. SHM base address
+ * 2. SHM size
+ * 3. MBX handle
+ */
+ result = caif_shmcore_probe(&shmdev_lyr[i]);
+ if (result) {
+ pr_warn("ERROR[%d],"
+ "Could not probe SHM core (instance %d)"
+ " Bailing out ...\n", result, i);
+ goto clean;
+ }
+ }
+
+ return 0;
+
+clean:
+ /*
+ * For now, we assume that even if one instance of SHM fails, we bail
+ * out of the driver support completely. For this, we need to release
+ * any memory allocated and unregister any instance of SHM net device.
+ */
+ for (i = 0; i < MAX_SHM_INSTANCES; i++) {
+ if (shmdev_lyr[i].pshm_netdev)
+ unregister_netdev(shmdev_lyr[i].pshm_netdev);
+ }
+ return result;
+}
+
+static void __exit caif_shmdev_exit(void)
+{
+ int i;
+
+ for (i = 0; i < MAX_SHM_INSTANCES; i++) {
+ caif_shmcore_remove(shmdev_lyr[i].pshm_netdev);
+ kfree((void *)shmdev_lyr[i].shm_base_addr);
+ }
+
+}
+
+module_init(caif_shmdev_init);
+module_exit(caif_shmdev_exit);
diff --git a/drivers/net/caif/caif_shmcore.c b/drivers/net/caif/caif_shmcore.c
new file mode 100644
index 000000000000..19f9c0656667
--- /dev/null
+++ b/drivers/net/caif/caif_shmcore.c
@@ -0,0 +1,744 @@
+/*
+ * Copyright (C) ST-Ericsson AB 2010
+ * Contact: Sjur Brendeland / sjur.brandeland@stericsson.com
+ * Authors: Amarnath Revanna / amarnath.bangalore.revanna@stericsson.com,
+ * Daniel Martensson / daniel.martensson@stericsson.com
+ * License terms: GNU General Public License (GPL) version 2
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ":" __func__ "():" fmt
+
+#include <linux/spinlock.h>
+#include <linux/sched.h>
+#include <linux/list.h>
+#include <linux/netdevice.h>
+#include <linux/if_arp.h>
+
+#include <net/caif/caif_device.h>
+#include <net/caif/caif_shm.h>
+
+#define NR_TX_BUF 6
+#define NR_RX_BUF 6
+#define TX_BUF_SZ 0x2000
+#define RX_BUF_SZ 0x2000
+
+#define CAIF_NEEDED_HEADROOM 32
+
+#define CAIF_FLOW_ON 1
+#define CAIF_FLOW_OFF 0
+
+#define LOW_WATERMARK 3
+#define HIGH_WATERMARK 4
+
+/* Maximum number of CAIF buffers per shared memory buffer. */
+#define SHM_MAX_FRMS_PER_BUF 10
+
+/*
+ * Size in bytes of the descriptor area
+ * (With end of descriptor signalling)
+ */
+#define SHM_CAIF_DESC_SIZE ((SHM_MAX_FRMS_PER_BUF + 1) * \
+ sizeof(struct shm_pck_desc))
+
+/*
+ * Offset to the first CAIF frame within a shared memory buffer.
+ * Aligned on 32 bytes.
+ */
+#define SHM_CAIF_FRM_OFS (SHM_CAIF_DESC_SIZE + (SHM_CAIF_DESC_SIZE % 32))
+
+/* Number of bytes for CAIF shared memory header. */
+#define SHM_HDR_LEN 1
+
+/* Number of padding bytes for the complete CAIF frame. */
+#define SHM_FRM_PAD_LEN 4
+
+#define CAIF_MAX_MTU 4096
+
+#define SHM_SET_FULL(x) (((x+1) & 0x0F) << 0)
+#define SHM_GET_FULL(x) (((x >> 0) & 0x0F) - 1)
+
+#define SHM_SET_EMPTY(x) (((x+1) & 0x0F) << 4)
+#define SHM_GET_EMPTY(x) (((x >> 4) & 0x0F) - 1)
+
+#define SHM_FULL_MASK (0x0F << 0)
+#define SHM_EMPTY_MASK (0x0F << 4)
+
+struct shm_pck_desc {
+ /*
+ * Offset from start of shared memory area to start of
+ * shared memory CAIF frame.
+ */
+ u32 frm_ofs;
+ u32 frm_len;
+};
+
+struct buf_list {
+ unsigned char *desc_vptr;
+ u32 phy_addr;
+ u32 index;
+ u32 len;
+ u32 frames;
+ u32 frm_ofs;
+ struct list_head list;
+};
+
+struct shm_caif_frm {
+ /* Number of bytes of padding before the CAIF frame. */
+ u8 hdr_ofs;
+};
+
+struct shmdrv_layer {
+ /* caif_dev_common must always be first in the structure*/
+ struct caif_dev_common cfdev;
+
+ u32 shm_tx_addr;
+ u32 shm_rx_addr;
+ u32 shm_base_addr;
+ u32 tx_empty_available;
+ spinlock_t lock;
+
+ struct list_head tx_empty_list;
+ struct list_head tx_pend_list;
+ struct list_head tx_full_list;
+ struct list_head rx_empty_list;
+ struct list_head rx_pend_list;
+ struct list_head rx_full_list;
+
+ struct workqueue_struct *pshm_tx_workqueue;
+ struct workqueue_struct *pshm_rx_workqueue;
+
+ struct work_struct shm_tx_work;
+ struct work_struct shm_rx_work;
+
+ struct sk_buff_head sk_qhead;
+ struct shmdev_layer *pshm_dev;
+};
+
+static int shm_netdev_open(struct net_device *shm_netdev)
+{
+ netif_wake_queue(shm_netdev);
+ return 0;
+}
+
+static int shm_netdev_close(struct net_device *shm_netdev)
+{
+ netif_stop_queue(shm_netdev);
+ return 0;
+}
+
+int caif_shmdrv_rx_cb(u32 mbx_msg, void *priv)
+{
+ struct buf_list *pbuf;
+ struct shmdrv_layer *pshm_drv;
+ struct list_head *pos;
+ u32 avail_emptybuff = 0;
+ unsigned long flags = 0;
+
+ pshm_drv = (struct shmdrv_layer *)priv;
+
+ /* Check for received buffers. */
+ if (mbx_msg & SHM_FULL_MASK) {
+ int idx;
+
+ spin_lock_irqsave(&pshm_drv->lock, flags);
+
+ /* Check whether we have any outstanding buffers. */
+ if (list_empty(&pshm_drv->rx_empty_list)) {
+
+ /* Release spin lock. */
+ spin_unlock_irqrestore(&pshm_drv->lock, flags);
+
+ /* We print even in IRQ context... */
+ pr_warn("No empty Rx buffers to fill: "
+ "mbx_msg:%x\n", mbx_msg);
+
+ /* Bail out. */
+ goto err_sync;
+ }
+
+ pbuf =
+ list_entry(pshm_drv->rx_empty_list.next,
+ struct buf_list, list);
+ idx = pbuf->index;
+
+ /* Check buffer synchronization. */
+ if (idx != SHM_GET_FULL(mbx_msg)) {
+
+ /* We print even in IRQ context... */
+ pr_warn(
+ "phyif_shm_mbx_msg_cb: RX full out of sync:"
+ " idx:%d, msg:%x SHM_GET_FULL(mbx_msg):%x\n",
+ idx, mbx_msg, SHM_GET_FULL(mbx_msg));
+
+ spin_unlock_irqrestore(&pshm_drv->lock, flags);
+
+ /* Bail out. */
+ goto err_sync;
+ }
+
+ list_del_init(&pbuf->list);
+ list_add_tail(&pbuf->list, &pshm_drv->rx_full_list);
+
+ spin_unlock_irqrestore(&pshm_drv->lock, flags);
+
+ /* Schedule RX work queue. */
+ if (!work_pending(&pshm_drv->shm_rx_work))
+ queue_work(pshm_drv->pshm_rx_workqueue,
+ &pshm_drv->shm_rx_work);
+ }
+
+ /* Check for emptied buffers. */
+ if (mbx_msg & SHM_EMPTY_MASK) {
+ int idx;
+
+ spin_lock_irqsave(&pshm_drv->lock, flags);
+
+ /* Check whether we have any outstanding buffers. */
+ if (list_empty(&pshm_drv->tx_full_list)) {
+
+ /* We print even in IRQ context... */
+ pr_warn("No TX to empty: msg:%x\n", mbx_msg);
+
+ spin_unlock_irqrestore(&pshm_drv->lock, flags);
+
+ /* Bail out. */
+ goto err_sync;
+ }
+
+ pbuf =
+ list_entry(pshm_drv->tx_full_list.next,
+ struct buf_list, list);
+ idx = pbuf->index;
+
+ /* Check buffer synchronization. */
+ if (idx != SHM_GET_EMPTY(mbx_msg)) {
+
+ spin_unlock_irqrestore(&pshm_drv->lock, flags);
+
+ /* We print even in IRQ context... */
+ pr_warn("TX empty "
+ "out of sync:idx:%d, msg:%x\n", idx, mbx_msg);
+
+ /* Bail out. */
+ goto err_sync;
+ }
+ list_del_init(&pbuf->list);
+
+ /* Reset buffer parameters. */
+ pbuf->frames = 0;
+ pbuf->frm_ofs = SHM_CAIF_FRM_OFS;
+
+ list_add_tail(&pbuf->list, &pshm_drv->tx_empty_list);
+
+ /* Check the available no. of buffers in the empty list */
+ list_for_each(pos, &pshm_drv->tx_empty_list)
+ avail_emptybuff++;
+
+ /* Check whether we have to wake up the transmitter. */
+ if ((avail_emptybuff > HIGH_WATERMARK) &&
+ (!pshm_drv->tx_empty_available)) {
+ pshm_drv->tx_empty_available = 1;
+ pshm_drv->cfdev.flowctrl
+ (pshm_drv->pshm_dev->pshm_netdev,
+ CAIF_FLOW_ON);
+
+ spin_unlock_irqrestore(&pshm_drv->lock, flags);
+
+ /* Schedule the work queue. if required */
+ if (!work_pending(&pshm_drv->shm_tx_work))
+ queue_work(pshm_drv->pshm_tx_workqueue,
+ &pshm_drv->shm_tx_work);
+ } else
+ spin_unlock_irqrestore(&pshm_drv->lock, flags);
+ }
+
+ return 0;
+
+err_sync:
+ return -EIO;
+}
+
+static void shm_rx_work_func(struct work_struct *rx_work)
+{
+ struct shmdrv_layer *pshm_drv;
+ struct buf_list *pbuf;
+ unsigned long flags = 0;
+ struct sk_buff *skb;
+ char *p;
+ int ret;
+
+ pshm_drv = container_of(rx_work, struct shmdrv_layer, shm_rx_work);
+
+ while (1) {
+
+ struct shm_pck_desc *pck_desc;
+
+ spin_lock_irqsave(&pshm_drv->lock, flags);
+
+ /* Check for received buffers. */
+ if (list_empty(&pshm_drv->rx_full_list)) {
+ spin_unlock_irqrestore(&pshm_drv->lock, flags);
+ break;
+ }
+
+ pbuf =
+ list_entry(pshm_drv->rx_full_list.next, struct buf_list,
+ list);
+ list_del_init(&pbuf->list);
+
+ /* Retrieve pointer to start of the packet descriptor area. */
+ pck_desc = (struct shm_pck_desc *) pbuf->desc_vptr;
+
+ /*
+ * Check whether descriptor contains a CAIF shared memory
+ * frame.
+ */
+ while (pck_desc->frm_ofs) {
+ unsigned int frm_buf_ofs;
+ unsigned int frm_pck_ofs;
+ unsigned int frm_pck_len;
+ /*
+ * Check whether offset is within buffer limits
+ * (lower).
+ */
+ if (pck_desc->frm_ofs <
+ (pbuf->phy_addr - pshm_drv->shm_base_addr))
+ break;
+ /*
+ * Check whether offset is within buffer limits
+ * (higher).
+ */
+ if (pck_desc->frm_ofs >
+ ((pbuf->phy_addr - pshm_drv->shm_base_addr) +
+ pbuf->len))
+ break;
+
+ /* Calculate offset from start of buffer. */
+ frm_buf_ofs =
+ pck_desc->frm_ofs - (pbuf->phy_addr -
+ pshm_drv->shm_base_addr);
+
+ /*
+ * Calculate offset and length of CAIF packet while
+ * taking care of the shared memory header.
+ */
+ frm_pck_ofs =
+ frm_buf_ofs + SHM_HDR_LEN +
+ (*(pbuf->desc_vptr + frm_buf_ofs));
+ frm_pck_len =
+ (pck_desc->frm_len - SHM_HDR_LEN -
+ (*(pbuf->desc_vptr + frm_buf_ofs)));
+
+ /* Check whether CAIF packet is within buffer limits */
+ if ((frm_pck_ofs + pck_desc->frm_len) > pbuf->len)
+ break;
+
+ /* Get a suitable CAIF packet and copy in data. */
+ skb = netdev_alloc_skb(pshm_drv->pshm_dev->pshm_netdev,
+ frm_pck_len + 1);
+ BUG_ON(skb == NULL);
+
+ p = skb_put(skb, frm_pck_len);
+ memcpy(p, pbuf->desc_vptr + frm_pck_ofs, frm_pck_len);
+
+ skb->protocol = htons(ETH_P_CAIF);
+ skb_reset_mac_header(skb);
+ skb->dev = pshm_drv->pshm_dev->pshm_netdev;
+
+ /* Push received packet up the stack. */
+ ret = netif_rx_ni(skb);
+
+ if (!ret) {
+ pshm_drv->pshm_dev->pshm_netdev->stats.
+ rx_packets++;
+ pshm_drv->pshm_dev->pshm_netdev->stats.
+ rx_bytes += pck_desc->frm_len;
+ } else
+ ++pshm_drv->pshm_dev->pshm_netdev->stats.
+ rx_dropped;
+ /* Move to next packet descriptor. */
+ pck_desc++;
+ }
+
+ list_add_tail(&pbuf->list, &pshm_drv->rx_pend_list);
+
+ spin_unlock_irqrestore(&pshm_drv->lock, flags);
+
+ }
+
+ /* Schedule the work queue. if required */
+ if (!work_pending(&pshm_drv->shm_tx_work))
+ queue_work(pshm_drv->pshm_tx_workqueue, &pshm_drv->shm_tx_work);
+
+}
+
+static void shm_tx_work_func(struct work_struct *tx_work)
+{
+ u32 mbox_msg;
+ unsigned int frmlen, avail_emptybuff, append = 0;
+ unsigned long flags = 0;
+ struct buf_list *pbuf = NULL;
+ struct shmdrv_layer *pshm_drv;
+ struct shm_caif_frm *frm;
+ struct sk_buff *skb;
+ struct shm_pck_desc *pck_desc;
+ struct list_head *pos;
+
+ pshm_drv = container_of(tx_work, struct shmdrv_layer, shm_tx_work);
+
+ do {
+ /* Initialize mailbox message. */
+ mbox_msg = 0x00;
+ avail_emptybuff = 0;
+
+ spin_lock_irqsave(&pshm_drv->lock, flags);
+
+ /* Check for pending receive buffers. */
+ if (!list_empty(&pshm_drv->rx_pend_list)) {
+
+ pbuf = list_entry(pshm_drv->rx_pend_list.next,
+ struct buf_list, list);
+
+ list_del_init(&pbuf->list);
+ list_add_tail(&pbuf->list, &pshm_drv->rx_empty_list);
+ /*
+ * Value index is never changed,
+ * so read access should be safe.
+ */
+ mbox_msg |= SHM_SET_EMPTY(pbuf->index);
+ }
+
+ skb = skb_peek(&pshm_drv->sk_qhead);
+
+ if (skb == NULL)
+ goto send_msg;
+
+ /* Check the available no. of buffers in the empty list */
+ list_for_each(pos, &pshm_drv->tx_empty_list)
+ avail_emptybuff++;
+
+ if ((avail_emptybuff < LOW_WATERMARK) &&
+ pshm_drv->tx_empty_available) {
+ /* Update blocking condition. */
+ pshm_drv->tx_empty_available = 0;
+ pshm_drv->cfdev.flowctrl
+ (pshm_drv->pshm_dev->pshm_netdev,
+ CAIF_FLOW_OFF);
+ }
+ /*
+ * We simply return back to the caller if we do not have space
+ * either in Tx pending list or Tx empty list. In this case,
+ * we hold the received skb in the skb list, waiting to
+ * be transmitted once Tx buffers become available
+ */
+ if (list_empty(&pshm_drv->tx_empty_list))
+ goto send_msg;
+
+ /* Get the first free Tx buffer. */
+ pbuf = list_entry(pshm_drv->tx_empty_list.next,
+ struct buf_list, list);
+ do {
+ if (append) {
+ skb = skb_peek(&pshm_drv->sk_qhead);
+ if (skb == NULL)
+ break;
+ }
+
+ frm = (struct shm_caif_frm *)
+ (pbuf->desc_vptr + pbuf->frm_ofs);
+
+ frm->hdr_ofs = 0;
+ frmlen = 0;
+ frmlen += SHM_HDR_LEN + frm->hdr_ofs + skb->len;
+
+ /* Add tail padding if needed. */
+ if (frmlen % SHM_FRM_PAD_LEN)
+ frmlen += SHM_FRM_PAD_LEN -
+ (frmlen % SHM_FRM_PAD_LEN);
+
+ /*
+ * Verify that packet, header and additional padding
+ * can fit within the buffer frame area.
+ */
+ if (frmlen >= (pbuf->len - pbuf->frm_ofs))
+ break;
+
+ if (!append) {
+ list_del_init(&pbuf->list);
+ append = 1;
+ }
+
+ skb = skb_dequeue(&pshm_drv->sk_qhead);
+ /* Copy in CAIF frame. */
+ skb_copy_bits(skb, 0, pbuf->desc_vptr +
+ pbuf->frm_ofs + SHM_HDR_LEN +
+ frm->hdr_ofs, skb->len);
+
+ pshm_drv->pshm_dev->pshm_netdev->stats.tx_packets++;
+ pshm_drv->pshm_dev->pshm_netdev->stats.tx_bytes +=
+ frmlen;
+ dev_kfree_skb(skb);
+
+ /* Fill in the shared memory packet descriptor area. */
+ pck_desc = (struct shm_pck_desc *) (pbuf->desc_vptr);
+ /* Forward to current frame. */
+ pck_desc += pbuf->frames;
+ pck_desc->frm_ofs = (pbuf->phy_addr -
+ pshm_drv->shm_base_addr) +
+ pbuf->frm_ofs;
+ pck_desc->frm_len = frmlen;
+ /* Terminate packet descriptor area. */
+ pck_desc++;
+ pck_desc->frm_ofs = 0;
+ /* Update buffer parameters. */
+ pbuf->frames++;
+ pbuf->frm_ofs += frmlen + (frmlen % 32);
+
+ } while (pbuf->frames < SHM_MAX_FRMS_PER_BUF);
+
+ /* Assign buffer as full. */
+ list_add_tail(&pbuf->list, &pshm_drv->tx_full_list);
+ append = 0;
+ mbox_msg |= SHM_SET_FULL(pbuf->index);
+send_msg:
+ spin_unlock_irqrestore(&pshm_drv->lock, flags);
+
+ if (mbox_msg)
+ pshm_drv->pshm_dev->pshmdev_mbxsend
+ (pshm_drv->pshm_dev->shm_id, mbox_msg);
+ } while (mbox_msg);
+}
+
+static int shm_netdev_tx(struct sk_buff *skb, struct net_device *shm_netdev)
+{
+ struct shmdrv_layer *pshm_drv;
+ unsigned long flags = 0;
+
+ pshm_drv = netdev_priv(shm_netdev);
+
+ spin_lock_irqsave(&pshm_drv->lock, flags);
+
+ skb_queue_tail(&pshm_drv->sk_qhead, skb);
+
+ spin_unlock_irqrestore(&pshm_drv->lock, flags);
+
+ /* Schedule Tx work queue. for deferred processing of skbs*/
+ if (!work_pending(&pshm_drv->shm_tx_work))
+ queue_work(pshm_drv->pshm_tx_workqueue, &pshm_drv->shm_tx_work);
+
+ return 0;
+}
+
+static const struct net_device_ops netdev_ops = {
+ .ndo_open = shm_netdev_open,
+ .ndo_stop = shm_netdev_close,
+ .ndo_start_xmit = shm_netdev_tx,
+};
+
+static void shm_netdev_setup(struct net_device *pshm_netdev)
+{
+ struct shmdrv_layer *pshm_drv;
+ pshm_netdev->netdev_ops = &netdev_ops;
+
+ pshm_netdev->mtu = CAIF_MAX_MTU;
+ pshm_netdev->type = ARPHRD_CAIF;
+ pshm_netdev->hard_header_len = CAIF_NEEDED_HEADROOM;
+ pshm_netdev->tx_queue_len = 0;
+ pshm_netdev->destructor = free_netdev;
+
+ pshm_drv = netdev_priv(pshm_netdev);
+
+ /* Initialize structures in a clean state. */
+ memset(pshm_drv, 0, sizeof(struct shmdrv_layer));
+
+ pshm_drv->cfdev.link_select = CAIF_LINK_LOW_LATENCY;
+}
+
+int caif_shmcore_probe(struct shmdev_layer *pshm_dev)
+{
+ int result, j;
+ struct shmdrv_layer *pshm_drv = NULL;
+
+ pshm_dev->pshm_netdev = alloc_netdev(sizeof(struct shmdrv_layer),
+ "cfshm%d", shm_netdev_setup);
+ if (!pshm_dev->pshm_netdev)
+ return -ENOMEM;
+
+ pshm_drv = netdev_priv(pshm_dev->pshm_netdev);
+ pshm_drv->pshm_dev = pshm_dev;
+
+ /*
+ * Initialization starts with the verification of the
+ * availability of MBX driver by calling its setup function.
+ * MBX driver must be available by this time for proper
+ * functioning of SHM driver.
+ */
+ if ((pshm_dev->pshmdev_mbxsetup
+ (caif_shmdrv_rx_cb, pshm_dev, pshm_drv)) != 0) {
+ pr_warn("Could not config. SHM Mailbox,"
+ " Bailing out.....\n");
+ free_netdev(pshm_dev->pshm_netdev);
+ return -ENODEV;
+ }
+
+ skb_queue_head_init(&pshm_drv->sk_qhead);
+
+ pr_info("SHM DEVICE[%d] PROBED BY DRIVER, NEW SHM DRIVER"
+ " INSTANCE AT pshm_drv =0x%p\n",
+ pshm_drv->pshm_dev->shm_id, pshm_drv);
+
+ if (pshm_dev->shm_total_sz <
+ (NR_TX_BUF * TX_BUF_SZ + NR_RX_BUF * RX_BUF_SZ)) {
+
+ pr_warn("ERROR, Amount of available"
+ " Phys. SHM cannot accomodate current SHM "
+ "driver configuration, Bailing out ...\n");
+ free_netdev(pshm_dev->pshm_netdev);
+ return -ENOMEM;
+ }
+
+ pshm_drv->shm_base_addr = pshm_dev->shm_base_addr;
+ pshm_drv->shm_tx_addr = pshm_drv->shm_base_addr;
+
+ if (pshm_dev->shm_loopback)
+ pshm_drv->shm_rx_addr = pshm_drv->shm_tx_addr;
+ else
+ pshm_drv->shm_rx_addr = pshm_dev->shm_base_addr +
+ (NR_TX_BUF * TX_BUF_SZ);
+
+ INIT_LIST_HEAD(&pshm_drv->tx_empty_list);
+ INIT_LIST_HEAD(&pshm_drv->tx_pend_list);
+ INIT_LIST_HEAD(&pshm_drv->tx_full_list);
+
+ INIT_LIST_HEAD(&pshm_drv->rx_empty_list);
+ INIT_LIST_HEAD(&pshm_drv->rx_pend_list);
+ INIT_LIST_HEAD(&pshm_drv->rx_full_list);
+
+ INIT_WORK(&pshm_drv->shm_tx_work, shm_tx_work_func);
+ INIT_WORK(&pshm_drv->shm_rx_work, shm_rx_work_func);
+
+ pshm_drv->pshm_tx_workqueue =
+ create_singlethread_workqueue("shm_tx_work");
+ pshm_drv->pshm_rx_workqueue =
+ create_singlethread_workqueue("shm_rx_work");
+
+ for (j = 0; j < NR_TX_BUF; j++) {
+ struct buf_list *tx_buf =
+ kmalloc(sizeof(struct buf_list), GFP_KERNEL);
+
+ if (tx_buf == NULL) {
+ pr_warn("ERROR, Could not"
+ " allocate dynamic mem. for tx_buf,"
+ " Bailing out ...\n");
+ free_netdev(pshm_dev->pshm_netdev);
+ return -ENOMEM;
+ }
+ tx_buf->index = j;
+ tx_buf->phy_addr = pshm_drv->shm_tx_addr + (TX_BUF_SZ * j);
+ tx_buf->len = TX_BUF_SZ;
+ tx_buf->frames = 0;
+ tx_buf->frm_ofs = SHM_CAIF_FRM_OFS;
+
+ if (pshm_dev->shm_loopback)
+ tx_buf->desc_vptr = (char *)tx_buf->phy_addr;
+ else
+ tx_buf->desc_vptr =
+ ioremap(tx_buf->phy_addr, TX_BUF_SZ);
+
+ list_add_tail(&tx_buf->list, &pshm_drv->tx_empty_list);
+ }
+
+ for (j = 0; j < NR_RX_BUF; j++) {
+ struct buf_list *rx_buf =
+ kmalloc(sizeof(struct buf_list), GFP_KERNEL);
+
+ if (rx_buf == NULL) {
+ pr_warn("ERROR, Could not"
+ " allocate dynamic mem.for rx_buf,"
+ " Bailing out ...\n");
+ free_netdev(pshm_dev->pshm_netdev);
+ return -ENOMEM;
+ }
+ rx_buf->index = j;
+ rx_buf->phy_addr = pshm_drv->shm_rx_addr + (RX_BUF_SZ * j);
+ rx_buf->len = RX_BUF_SZ;
+
+ if (pshm_dev->shm_loopback)
+ rx_buf->desc_vptr = (char *)rx_buf->phy_addr;
+ else
+ rx_buf->desc_vptr =
+ ioremap(rx_buf->phy_addr, RX_BUF_SZ);
+ list_add_tail(&rx_buf->list, &pshm_drv->rx_empty_list);
+ }
+
+ pshm_drv->tx_empty_available = 1;
+ result = register_netdev(pshm_dev->pshm_netdev);
+ if (result)
+ pr_warn("ERROR[%d], SHM could not, "
+ "register with NW FRMWK Bailing out ...\n", result);
+
+ return result;
+}
+
+void caif_shmcore_remove(struct net_device *pshm_netdev)
+{
+ struct buf_list *pbuf;
+ struct shmdrv_layer *pshm_drv = NULL;
+
+ pshm_drv = netdev_priv(pshm_netdev);
+
+ while (!(list_empty(&pshm_drv->tx_pend_list))) {
+ pbuf =
+ list_entry(pshm_drv->tx_pend_list.next,
+ struct buf_list, list);
+
+ list_del(&pbuf->list);
+ kfree(pbuf);
+ }
+
+ while (!(list_empty(&pshm_drv->tx_full_list))) {
+ pbuf =
+ list_entry(pshm_drv->tx_full_list.next,
+ struct buf_list, list);
+ list_del(&pbuf->list);
+ kfree(pbuf);
+ }
+
+ while (!(list_empty(&pshm_drv->tx_empty_list))) {
+ pbuf =
+ list_entry(pshm_drv->tx_empty_list.next,
+ struct buf_list, list);
+ list_del(&pbuf->list);
+ kfree(pbuf);
+ }
+
+ while (!(list_empty(&pshm_drv->rx_full_list))) {
+ pbuf =
+ list_entry(pshm_drv->tx_full_list.next,
+ struct buf_list, list);
+ list_del(&pbuf->list);
+ kfree(pbuf);
+ }
+
+ while (!(list_empty(&pshm_drv->rx_pend_list))) {
+ pbuf =
+ list_entry(pshm_drv->tx_pend_list.next,
+ struct buf_list, list);
+ list_del(&pbuf->list);
+ kfree(pbuf);
+ }
+
+ while (!(list_empty(&pshm_drv->rx_empty_list))) {
+ pbuf =
+ list_entry(pshm_drv->rx_empty_list.next,
+ struct buf_list, list);
+ list_del(&pbuf->list);
+ kfree(pbuf);
+ }
+
+ /* Destroy work queues. */
+ destroy_workqueue(pshm_drv->pshm_tx_workqueue);
+ destroy_workqueue(pshm_drv->pshm_rx_workqueue);
+
+ unregister_netdev(pshm_netdev);
+}
diff --git a/drivers/net/caif/caif_spi.c b/drivers/net/caif/caif_spi.c
index 8427533fe313..20da1996d354 100644
--- a/drivers/net/caif/caif_spi.c
+++ b/drivers/net/caif/caif_spi.c
@@ -33,6 +33,9 @@ MODULE_LICENSE("GPL");
MODULE_AUTHOR("Daniel Martensson<daniel.martensson@stericsson.com>");
MODULE_DESCRIPTION("CAIF SPI driver");
+/* Returns the number of padding bytes for alignment. */
+#define PAD_POW2(x, pow) ((((x)&((pow)-1))==0) ? 0 : (((pow)-((x)&((pow)-1)))))
+
static int spi_loop;
module_param(spi_loop, bool, S_IRUGO);
MODULE_PARM_DESC(spi_loop, "SPI running in loopback mode.");
@@ -41,7 +44,10 @@ MODULE_PARM_DESC(spi_loop, "SPI running in loopback mode.");
module_param(spi_frm_align, int, S_IRUGO);
MODULE_PARM_DESC(spi_frm_align, "SPI frame alignment.");
-/* SPI padding options. */
+/*
+ * SPI padding options.
+ * Warning: must be a base of 2 (& operation used) and can not be zero !
+ */
module_param(spi_up_head_align, int, S_IRUGO);
MODULE_PARM_DESC(spi_up_head_align, "SPI uplink head alignment.");
@@ -240,15 +246,13 @@ static ssize_t dbgfs_frame(struct file *file, char __user *user_buf,
static const struct file_operations dbgfs_state_fops = {
.open = dbgfs_open,
.read = dbgfs_state,
- .owner = THIS_MODULE,
- .llseek = default_llseek,
+ .owner = THIS_MODULE
};
static const struct file_operations dbgfs_frame_fops = {
.open = dbgfs_open,
.read = dbgfs_frame,
- .owner = THIS_MODULE,
- .llseek = default_llseek,
+ .owner = THIS_MODULE
};
static inline void dev_debugfs_add(struct cfspi *cfspi)
@@ -337,6 +341,9 @@ int cfspi_xmitfrm(struct cfspi *cfspi, u8 *buf, size_t len)
u8 *dst = buf;
caif_assert(buf);
+ if (cfspi->slave && !cfspi->slave_talked)
+ cfspi->slave_talked = true;
+
do {
struct sk_buff *skb;
struct caif_payload_info *info;
@@ -357,8 +364,8 @@ int cfspi_xmitfrm(struct cfspi *cfspi, u8 *buf, size_t len)
* Compute head offset i.e. number of bytes to add to
* get the start of the payload aligned.
*/
- if (spi_up_head_align) {
- spad = 1 + ((info->hdr_len + 1) & spi_up_head_align);
+ if (spi_up_head_align > 1) {
+ spad = 1 + PAD_POW2((info->hdr_len + 1), spi_up_head_align);
*dst = (u8)(spad - 1);
dst += spad;
}
@@ -373,7 +380,7 @@ int cfspi_xmitfrm(struct cfspi *cfspi, u8 *buf, size_t len)
* Compute tail offset i.e. number of bytes to add to
* get the complete CAIF frame aligned.
*/
- epad = (skb->len + spad) & spi_up_tail_align;
+ epad = PAD_POW2((skb->len + spad), spi_up_tail_align);
dst += epad;
dev_kfree_skb(skb);
@@ -417,14 +424,14 @@ int cfspi_xmitlen(struct cfspi *cfspi)
* Compute head offset i.e. number of bytes to add to
* get the start of the payload aligned.
*/
- if (spi_up_head_align)
- spad = 1 + ((info->hdr_len + 1) & spi_up_head_align);
+ if (spi_up_head_align > 1)
+ spad = 1 + PAD_POW2((info->hdr_len + 1), spi_up_head_align);
/*
* Compute tail offset i.e. number of bytes to add to
* get the complete CAIF frame aligned.
*/
- epad = (skb->len + spad) & spi_up_tail_align;
+ epad = PAD_POW2((skb->len + spad), spi_up_tail_align);
if ((skb->len + spad + epad + frm_len) <= CAIF_MAX_SPI_FRAME) {
skb_queue_tail(&cfspi->chead, skb);
@@ -433,6 +440,7 @@ int cfspi_xmitlen(struct cfspi *cfspi)
} else {
/* Put back packet. */
skb_queue_head(&cfspi->qhead, skb);
+ break;
}
} while (pkts <= CAIF_MAX_SPI_PKTS);
@@ -453,6 +461,15 @@ static void cfspi_ss_cb(bool assert, struct cfspi_ifc *ifc)
{
struct cfspi *cfspi = (struct cfspi *)ifc->priv;
+ /*
+ * The slave device is the master on the link. Interrupts before the
+ * slave has transmitted are considered spurious.
+ */
+ if (cfspi->slave && !cfspi->slave_talked) {
+ printk(KERN_WARNING "CFSPI: Spurious SS interrupt.\n");
+ return;
+ }
+
if (!in_interrupt())
spin_lock(&cfspi->lock);
if (assert) {
@@ -465,7 +482,8 @@ static void cfspi_ss_cb(bool assert, struct cfspi_ifc *ifc)
spin_unlock(&cfspi->lock);
/* Wake up the xfer thread. */
- wake_up_interruptible(&cfspi->wait);
+ if (assert)
+ wake_up_interruptible(&cfspi->wait);
}
static void cfspi_xfer_done_cb(struct cfspi_ifc *ifc)
@@ -523,7 +541,7 @@ int cfspi_rxfrm(struct cfspi *cfspi, u8 *buf, size_t len)
* Compute head offset i.e. number of bytes added to
* get the start of the payload aligned.
*/
- if (spi_down_head_align) {
+ if (spi_down_head_align > 1) {
spad = 1 + *src;
src += spad;
}
@@ -564,7 +582,7 @@ int cfspi_rxfrm(struct cfspi *cfspi, u8 *buf, size_t len)
* Compute tail offset i.e. number of bytes added to
* get the complete CAIF frame aligned.
*/
- epad = (pkt_len + spad) & spi_down_tail_align;
+ epad = PAD_POW2((pkt_len + spad), spi_down_tail_align);
src += epad;
} while ((src - buf) < len);
@@ -617,19 +635,28 @@ int cfspi_spi_probe(struct platform_device *pdev)
ndev = alloc_netdev(sizeof(struct cfspi),
"cfspi%d", cfspi_setup);
- if (!dev)
- return -ENODEV;
+ if (!ndev)
+ return -ENOMEM;
cfspi = netdev_priv(ndev);
netif_stop_queue(ndev);
cfspi->ndev = ndev;
cfspi->pdev = pdev;
- /* Set flow info */
+ /* Set flow info. */
cfspi->flow_off_sent = 0;
cfspi->qd_low_mark = LOW_WATER_MARK;
cfspi->qd_high_mark = HIGH_WATER_MARK;
+ /* Set slave info. */
+ if (!strncmp(cfspi_spi_driver.driver.name, "cfspi_sspi", 10)) {
+ cfspi->slave = true;
+ cfspi->slave_talked = false;
+ } else {
+ cfspi->slave = false;
+ cfspi->slave_talked = false;
+ }
+
/* Assign the SPI device. */
cfspi->dev = dev;
/* Assign the device ifc to this SPI interface. */
diff --git a/drivers/net/caif/caif_spi_slave.c b/drivers/net/caif/caif_spi_slave.c
index 2111dbfea6fe..1b9943a4edab 100644
--- a/drivers/net/caif/caif_spi_slave.c
+++ b/drivers/net/caif/caif_spi_slave.c
@@ -36,10 +36,15 @@ static inline int forward_to_spi_cmd(struct cfspi *cfspi)
#endif
int spi_frm_align = 2;
-int spi_up_head_align = 1;
-int spi_up_tail_align;
-int spi_down_head_align = 3;
-int spi_down_tail_align = 1;
+
+/*
+ * SPI padding options.
+ * Warning: must be a base of 2 (& operation used) and can not be zero !
+ */
+int spi_up_head_align = 1 << 1;
+int spi_up_tail_align = 1 << 0;
+int spi_down_head_align = 1 << 2;
+int spi_down_tail_align = 1 << 1;
#ifdef CONFIG_DEBUG_FS
static inline void debugfs_store_prev(struct cfspi *cfspi)
diff --git a/drivers/net/can/Kconfig b/drivers/net/can/Kconfig
index 9d9e45394433..080574b0fff0 100644
--- a/drivers/net/can/Kconfig
+++ b/drivers/net/can/Kconfig
@@ -82,6 +82,14 @@ config CAN_FLEXCAN
---help---
Say Y here if you want to support for Freescale FlexCAN.
+config PCH_CAN
+ tristate "PCH CAN"
+ depends on CAN_DEV && PCI
+ ---help---
+ This driver is for PCH CAN of Topcliff which is an IOH for x86
+ embedded processor.
+ This driver can access CAN bus.
+
source "drivers/net/can/mscan/Kconfig"
source "drivers/net/can/sja1000/Kconfig"
diff --git a/drivers/net/can/Makefile b/drivers/net/can/Makefile
index 00575373bbd0..90af15a4f106 100644
--- a/drivers/net/can/Makefile
+++ b/drivers/net/can/Makefile
@@ -17,5 +17,6 @@ obj-$(CONFIG_CAN_MCP251X) += mcp251x.o
obj-$(CONFIG_CAN_BFIN) += bfin_can.o
obj-$(CONFIG_CAN_JANZ_ICAN3) += janz-ican3.o
obj-$(CONFIG_CAN_FLEXCAN) += flexcan.o
+obj-$(CONFIG_PCH_CAN) += pch_can.o
ccflags-$(CONFIG_CAN_DEBUG_DEVICES) := -DDEBUG
diff --git a/drivers/net/can/at91_can.c b/drivers/net/can/at91_can.c
index 2d8bd86bc5e2..7ef83d06f7ed 100644
--- a/drivers/net/can/at91_can.c
+++ b/drivers/net/can/at91_can.c
@@ -1,8 +1,8 @@
/*
* at91_can.c - CAN network driver for AT91 SoC CAN controller
*
- * (C) 2007 by Hans J. Koch <hjk@linutronix.de>
- * (C) 2008, 2009 by Marc Kleine-Budde <kernel@pengutronix.de>
+ * (C) 2007 by Hans J. Koch <hjk@hansjkoch.de>
+ * (C) 2008, 2009, 2010 by Marc Kleine-Budde <kernel@pengutronix.de>
*
* This software may be distributed under the terms of the GNU General
* Public License ("GPL") version 2 as distributed in the 'COPYING'
@@ -40,7 +40,6 @@
#include <mach/board.h>
-#define DRV_NAME "at91_can"
#define AT91_NAPI_WEIGHT 12
/*
@@ -172,6 +171,7 @@ struct at91_priv {
};
static struct can_bittiming_const at91_bittiming_const = {
+ .name = KBUILD_MODNAME,
.tseg1_min = 4,
.tseg1_max = 16,
.tseg2_min = 2,
@@ -199,13 +199,13 @@ static inline int get_tx_echo_mb(const struct at91_priv *priv)
static inline u32 at91_read(const struct at91_priv *priv, enum at91_reg reg)
{
- return readl(priv->reg_base + reg);
+ return __raw_readl(priv->reg_base + reg);
}
static inline void at91_write(const struct at91_priv *priv, enum at91_reg reg,
u32 value)
{
- writel(value, priv->reg_base + reg);
+ __raw_writel(value, priv->reg_base + reg);
}
static inline void set_mb_mode_prio(const struct at91_priv *priv,
@@ -243,6 +243,12 @@ static void at91_setup_mailboxes(struct net_device *dev)
set_mb_mode(priv, i, AT91_MB_MODE_RX);
set_mb_mode(priv, AT91_MB_RX_LAST, AT91_MB_MODE_RX_OVRWR);
+ /* reset acceptance mask and id register */
+ for (i = AT91_MB_RX_FIRST; i <= AT91_MB_RX_LAST; i++) {
+ at91_write(priv, AT91_MAM(i), 0x0 );
+ at91_write(priv, AT91_MID(i), AT91_MID_MIDE);
+ }
+
/* The last 4 mailboxes are used for transmitting. */
for (i = AT91_MB_TX_FIRST; i <= AT91_MB_TX_LAST; i++)
set_mb_mode_prio(priv, i, AT91_MB_MODE_TX, 0);
@@ -257,18 +263,30 @@ static int at91_set_bittiming(struct net_device *dev)
const struct can_bittiming *bt = &priv->can.bittiming;
u32 reg_br;
- reg_br = ((priv->can.ctrlmode & CAN_CTRLMODE_3_SAMPLES) << 24) |
- ((bt->brp - 1) << 16) | ((bt->sjw - 1) << 12) |
+ reg_br = ((priv->can.ctrlmode & CAN_CTRLMODE_3_SAMPLES) ? 1 << 24 : 0) |
+ ((bt->brp - 1) << 16) | ((bt->sjw - 1) << 12) |
((bt->prop_seg - 1) << 8) | ((bt->phase_seg1 - 1) << 4) |
((bt->phase_seg2 - 1) << 0);
- dev_info(dev->dev.parent, "writing AT91_BR: 0x%08x\n", reg_br);
+ netdev_info(dev, "writing AT91_BR: 0x%08x\n", reg_br);
at91_write(priv, AT91_BR, reg_br);
return 0;
}
+static int at91_get_berr_counter(const struct net_device *dev,
+ struct can_berr_counter *bec)
+{
+ const struct at91_priv *priv = netdev_priv(dev);
+ u32 reg_ecr = at91_read(priv, AT91_ECR);
+
+ bec->rxerr = reg_ecr & 0xff;
+ bec->txerr = reg_ecr >> 16;
+
+ return 0;
+}
+
static void at91_chip_start(struct net_device *dev)
{
struct at91_priv *priv = netdev_priv(dev);
@@ -281,6 +299,7 @@ static void at91_chip_start(struct net_device *dev)
reg_mr = at91_read(priv, AT91_MR);
at91_write(priv, AT91_MR, reg_mr & ~AT91_MR_CANEN);
+ at91_set_bittiming(dev);
at91_setup_mailboxes(dev);
at91_transceiver_switch(priv, 1);
@@ -350,8 +369,7 @@ static netdev_tx_t at91_start_xmit(struct sk_buff *skb, struct net_device *dev)
if (unlikely(!(at91_read(priv, AT91_MSR(mb)) & AT91_MSR_MRDY))) {
netif_stop_queue(dev);
- dev_err(dev->dev.parent,
- "BUG! TX buffer full when queue awake!\n");
+ netdev_err(dev, "BUG! TX buffer full when queue awake!\n");
return NETDEV_TX_BUSY;
}
@@ -435,7 +453,7 @@ static void at91_rx_overflow_err(struct net_device *dev)
struct sk_buff *skb;
struct can_frame *cf;
- dev_dbg(dev->dev.parent, "RX buffer overflow\n");
+ netdev_dbg(dev, "RX buffer overflow\n");
stats->rx_over_errors++;
stats->rx_errors++;
@@ -480,6 +498,9 @@ static void at91_read_mb(struct net_device *dev, unsigned int mb,
*(u32 *)(cf->data + 0) = at91_read(priv, AT91_MDL(mb));
*(u32 *)(cf->data + 4) = at91_read(priv, AT91_MDH(mb));
+ /* allow RX of extended frames */
+ at91_write(priv, AT91_MID(mb), AT91_MID_MIDE);
+
if (unlikely(mb == AT91_MB_RX_LAST && reg_msr & AT91_MSR_MMI))
at91_rx_overflow_err(dev);
}
@@ -565,8 +586,8 @@ static int at91_poll_rx(struct net_device *dev, int quota)
if (priv->rx_next > AT91_MB_RX_LOW_LAST &&
reg_sr & AT91_MB_RX_LOW_MASK)
- dev_info(dev->dev.parent,
- "order of incoming frames cannot be guaranteed\n");
+ netdev_info(dev,
+ "order of incoming frames cannot be guaranteed\n");
again:
for (mb = find_next_bit(addr, AT91_MB_RX_NUM, priv->rx_next);
@@ -604,7 +625,7 @@ static void at91_poll_err_frame(struct net_device *dev,
/* CRC error */
if (reg_sr & AT91_IRQ_CERR) {
- dev_dbg(dev->dev.parent, "CERR irq\n");
+ netdev_dbg(dev, "CERR irq\n");
dev->stats.rx_errors++;
priv->can.can_stats.bus_error++;
cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR;
@@ -612,7 +633,7 @@ static void at91_poll_err_frame(struct net_device *dev,
/* Stuffing Error */
if (reg_sr & AT91_IRQ_SERR) {
- dev_dbg(dev->dev.parent, "SERR irq\n");
+ netdev_dbg(dev, "SERR irq\n");
dev->stats.rx_errors++;
priv->can.can_stats.bus_error++;
cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR;
@@ -621,14 +642,14 @@ static void at91_poll_err_frame(struct net_device *dev,
/* Acknowledgement Error */
if (reg_sr & AT91_IRQ_AERR) {
- dev_dbg(dev->dev.parent, "AERR irq\n");
+ netdev_dbg(dev, "AERR irq\n");
dev->stats.tx_errors++;
cf->can_id |= CAN_ERR_ACK;
}
/* Form error */
if (reg_sr & AT91_IRQ_FERR) {
- dev_dbg(dev->dev.parent, "FERR irq\n");
+ netdev_dbg(dev, "FERR irq\n");
dev->stats.rx_errors++;
priv->can.can_stats.bus_error++;
cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR;
@@ -637,7 +658,7 @@ static void at91_poll_err_frame(struct net_device *dev,
/* Bit Error */
if (reg_sr & AT91_IRQ_BERR) {
- dev_dbg(dev->dev.parent, "BERR irq\n");
+ netdev_dbg(dev, "BERR irq\n");
dev->stats.tx_errors++;
priv->can.can_stats.bus_error++;
cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR;
@@ -755,12 +776,10 @@ static void at91_irq_err_state(struct net_device *dev,
struct can_frame *cf, enum can_state new_state)
{
struct at91_priv *priv = netdev_priv(dev);
- u32 reg_idr, reg_ier, reg_ecr;
- u8 tec, rec;
+ u32 reg_idr = 0, reg_ier = 0;
+ struct can_berr_counter bec;
- reg_ecr = at91_read(priv, AT91_ECR);
- rec = reg_ecr & 0xff;
- tec = reg_ecr >> 16;
+ at91_get_berr_counter(dev, &bec);
switch (priv->can.state) {
case CAN_STATE_ERROR_ACTIVE:
@@ -771,11 +790,11 @@ static void at91_irq_err_state(struct net_device *dev,
*/
if (new_state >= CAN_STATE_ERROR_WARNING &&
new_state <= CAN_STATE_BUS_OFF) {
- dev_dbg(dev->dev.parent, "Error Warning IRQ\n");
+ netdev_dbg(dev, "Error Warning IRQ\n");
priv->can.can_stats.error_warning++;
cf->can_id |= CAN_ERR_CRTL;
- cf->data[1] = (tec > rec) ?
+ cf->data[1] = (bec.txerr > bec.rxerr) ?
CAN_ERR_CRTL_TX_WARNING :
CAN_ERR_CRTL_RX_WARNING;
}
@@ -787,11 +806,11 @@ static void at91_irq_err_state(struct net_device *dev,
*/
if (new_state >= CAN_STATE_ERROR_PASSIVE &&
new_state <= CAN_STATE_BUS_OFF) {
- dev_dbg(dev->dev.parent, "Error Passive IRQ\n");
+ netdev_dbg(dev, "Error Passive IRQ\n");
priv->can.can_stats.error_passive++;
cf->can_id |= CAN_ERR_CRTL;
- cf->data[1] = (tec > rec) ?
+ cf->data[1] = (bec.txerr > bec.rxerr) ?
CAN_ERR_CRTL_TX_PASSIVE :
CAN_ERR_CRTL_RX_PASSIVE;
}
@@ -804,7 +823,7 @@ static void at91_irq_err_state(struct net_device *dev,
if (new_state <= CAN_STATE_ERROR_PASSIVE) {
cf->can_id |= CAN_ERR_RESTARTED;
- dev_dbg(dev->dev.parent, "restarted\n");
+ netdev_dbg(dev, "restarted\n");
priv->can.can_stats.restarts++;
netif_carrier_on(dev);
@@ -825,7 +844,7 @@ static void at91_irq_err_state(struct net_device *dev,
* circumstances. so just enable AT91_IRQ_ERRP, thus
* the "fallthrough"
*/
- dev_dbg(dev->dev.parent, "Error Active\n");
+ netdev_dbg(dev, "Error Active\n");
cf->can_id |= CAN_ERR_PROT;
cf->data[2] = CAN_ERR_PROT_ACTIVE;
case CAN_STATE_ERROR_WARNING: /* fallthrough */
@@ -843,7 +862,7 @@ static void at91_irq_err_state(struct net_device *dev,
cf->can_id |= CAN_ERR_BUSOFF;
- dev_dbg(dev->dev.parent, "bus-off\n");
+ netdev_dbg(dev, "bus-off\n");
netif_carrier_off(dev);
priv->can.can_stats.bus_off++;
@@ -881,7 +900,7 @@ static void at91_irq_err(struct net_device *dev)
else if (likely(reg_sr & AT91_IRQ_ERRA))
new_state = CAN_STATE_ERROR_ACTIVE;
else {
- dev_err(dev->dev.parent, "BUG! hardware in undefined state\n");
+ netdev_err(dev, "BUG! hardware in undefined state\n");
return;
}
@@ -1018,7 +1037,7 @@ static const struct net_device_ops at91_netdev_ops = {
.ndo_start_xmit = at91_start_xmit,
};
-static int __init at91_can_probe(struct platform_device *pdev)
+static int __devinit at91_can_probe(struct platform_device *pdev)
{
struct net_device *dev;
struct at91_priv *priv;
@@ -1067,8 +1086,8 @@ static int __init at91_can_probe(struct platform_device *pdev)
priv = netdev_priv(dev);
priv->can.clock.freq = clk_get_rate(clk);
priv->can.bittiming_const = &at91_bittiming_const;
- priv->can.do_set_bittiming = at91_set_bittiming;
priv->can.do_set_mode = at91_set_mode;
+ priv->can.do_get_berr_counter = at91_get_berr_counter;
priv->can.ctrlmode_supported = CAN_CTRLMODE_3_SAMPLES;
priv->reg_base = addr;
priv->dev = dev;
@@ -1092,7 +1111,7 @@ static int __init at91_can_probe(struct platform_device *pdev)
return 0;
exit_free:
- free_netdev(dev);
+ free_candev(dev);
exit_iounmap:
iounmap(addr);
exit_release:
@@ -1113,8 +1132,6 @@ static int __devexit at91_can_remove(struct platform_device *pdev)
platform_set_drvdata(pdev, NULL);
- free_netdev(dev);
-
iounmap(priv->reg_base);
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@@ -1122,6 +1139,8 @@ static int __devexit at91_can_remove(struct platform_device *pdev)
clk_put(priv->clk);
+ free_candev(dev);
+
return 0;
}
@@ -1129,21 +1148,19 @@ static struct platform_driver at91_can_driver = {
.probe = at91_can_probe,
.remove = __devexit_p(at91_can_remove),
.driver = {
- .name = DRV_NAME,
+ .name = KBUILD_MODNAME,
.owner = THIS_MODULE,
},
};
static int __init at91_can_module_init(void)
{
- printk(KERN_INFO "%s netdevice driver\n", DRV_NAME);
return platform_driver_register(&at91_can_driver);
}
static void __exit at91_can_module_exit(void)
{
platform_driver_unregister(&at91_can_driver);
- printk(KERN_INFO "%s: driver removed\n", DRV_NAME);
}
module_init(at91_can_module_init);
@@ -1151,4 +1168,4 @@ module_exit(at91_can_module_exit);
MODULE_AUTHOR("Marc Kleine-Budde <mkl@pengutronix.de>");
MODULE_LICENSE("GPL v2");
-MODULE_DESCRIPTION(DRV_NAME " CAN netdevice driver");
+MODULE_DESCRIPTION(KBUILD_MODNAME " CAN netdevice driver");
diff --git a/drivers/net/can/flexcan.c b/drivers/net/can/flexcan.c
index ef443a090ba7..d4990568baee 100644
--- a/drivers/net/can/flexcan.c
+++ b/drivers/net/can/flexcan.c
@@ -992,7 +992,6 @@ static int __devexit flexcan_remove(struct platform_device *pdev)
unregister_flexcandev(dev);
platform_set_drvdata(pdev, NULL);
- free_candev(dev);
iounmap(priv->base);
mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@@ -1000,6 +999,8 @@ static int __devexit flexcan_remove(struct platform_device *pdev)
clk_put(priv->clk);
+ free_candev(dev);
+
return 0;
}
diff --git a/drivers/net/can/mcp251x.c b/drivers/net/can/mcp251x.c
index 6aadc3e32bd5..7ab534aee452 100644
--- a/drivers/net/can/mcp251x.c
+++ b/drivers/net/can/mcp251x.c
@@ -169,6 +169,7 @@
# define RXBSIDH_SHIFT 3
#define RXBSIDL(n) (((n) * 0x10) + 0x60 + RXBSIDL_OFF)
# define RXBSIDL_IDE 0x08
+# define RXBSIDL_SRR 0x10
# define RXBSIDL_EID 3
# define RXBSIDL_SHIFT 5
#define RXBEID8(n) (((n) * 0x10) + 0x60 + RXBEID8_OFF)
@@ -475,6 +476,8 @@ static void mcp251x_hw_rx(struct spi_device *spi, int buf_idx)
frame->can_id =
(buf[RXBSIDH_OFF] << RXBSIDH_SHIFT) |
(buf[RXBSIDL_OFF] >> RXBSIDL_SHIFT);
+ if (buf[RXBSIDL_OFF] & RXBSIDL_SRR)
+ frame->can_id |= CAN_RTR_FLAG;
}
/* Data length */
frame->can_dlc = get_can_dlc(buf[RXBDLC_OFF] & RXBDLC_LEN_MASK);
diff --git a/drivers/net/can/pch_can.c b/drivers/net/can/pch_can.c
new file mode 100644
index 000000000000..672718261c68
--- /dev/null
+++ b/drivers/net/can/pch_can.c
@@ -0,0 +1,1463 @@
+/*
+ * Copyright (C) 1999 - 2010 Intel Corporation.
+ * Copyright (C) 2010 OKI SEMICONDUCTOR Co., LTD.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/errno.h>
+#include <linux/netdevice.h>
+#include <linux/skbuff.h>
+#include <linux/can.h>
+#include <linux/can/dev.h>
+#include <linux/can/error.h>
+
+#define MAX_MSG_OBJ 32
+#define MSG_OBJ_RX 0 /* The receive message object flag. */
+#define MSG_OBJ_TX 1 /* The transmit message object flag. */
+
+#define ENABLE 1 /* The enable flag */
+#define DISABLE 0 /* The disable flag */
+#define CAN_CTRL_INIT 0x0001 /* The INIT bit of CANCONT register. */
+#define CAN_CTRL_IE 0x0002 /* The IE bit of CAN control register */
+#define CAN_CTRL_IE_SIE_EIE 0x000e
+#define CAN_CTRL_CCE 0x0040
+#define CAN_CTRL_OPT 0x0080 /* The OPT bit of CANCONT register. */
+#define CAN_OPT_SILENT 0x0008 /* The Silent bit of CANOPT reg. */
+#define CAN_OPT_LBACK 0x0010 /* The LoopBack bit of CANOPT reg. */
+#define CAN_CMASK_RX_TX_SET 0x00f3
+#define CAN_CMASK_RX_TX_GET 0x0073
+#define CAN_CMASK_ALL 0xff
+#define CAN_CMASK_RDWR 0x80
+#define CAN_CMASK_ARB 0x20
+#define CAN_CMASK_CTRL 0x10
+#define CAN_CMASK_MASK 0x40
+#define CAN_CMASK_NEWDAT 0x04
+#define CAN_CMASK_CLRINTPND 0x08
+
+#define CAN_IF_MCONT_NEWDAT 0x8000
+#define CAN_IF_MCONT_INTPND 0x2000
+#define CAN_IF_MCONT_UMASK 0x1000
+#define CAN_IF_MCONT_TXIE 0x0800
+#define CAN_IF_MCONT_RXIE 0x0400
+#define CAN_IF_MCONT_RMTEN 0x0200
+#define CAN_IF_MCONT_TXRQXT 0x0100
+#define CAN_IF_MCONT_EOB 0x0080
+#define CAN_IF_MCONT_DLC 0x000f
+#define CAN_IF_MCONT_MSGLOST 0x4000
+#define CAN_MASK2_MDIR_MXTD 0xc000
+#define CAN_ID2_DIR 0x2000
+#define CAN_ID_MSGVAL 0x8000
+
+#define CAN_STATUS_INT 0x8000
+#define CAN_IF_CREQ_BUSY 0x8000
+#define CAN_ID2_XTD 0x4000
+
+#define CAN_REC 0x00007f00
+#define CAN_TEC 0x000000ff
+
+#define PCH_RX_OK 0x00000010
+#define PCH_TX_OK 0x00000008
+#define PCH_BUS_OFF 0x00000080
+#define PCH_EWARN 0x00000040
+#define PCH_EPASSIV 0x00000020
+#define PCH_LEC0 0x00000001
+#define PCH_LEC1 0x00000002
+#define PCH_LEC2 0x00000004
+#define PCH_LEC_ALL (PCH_LEC0 | PCH_LEC1 | PCH_LEC2)
+#define PCH_STUF_ERR PCH_LEC0
+#define PCH_FORM_ERR PCH_LEC1
+#define PCH_ACK_ERR (PCH_LEC0 | PCH_LEC1)
+#define PCH_BIT1_ERR PCH_LEC2
+#define PCH_BIT0_ERR (PCH_LEC0 | PCH_LEC2)
+#define PCH_CRC_ERR (PCH_LEC1 | PCH_LEC2)
+
+/* bit position of certain controller bits. */
+#define BIT_BITT_BRP 0
+#define BIT_BITT_SJW 6
+#define BIT_BITT_TSEG1 8
+#define BIT_BITT_TSEG2 12
+#define BIT_IF1_MCONT_RXIE 10
+#define BIT_IF2_MCONT_TXIE 11
+#define BIT_BRPE_BRPE 6
+#define BIT_ES_TXERRCNT 0
+#define BIT_ES_RXERRCNT 8
+#define MSK_BITT_BRP 0x3f
+#define MSK_BITT_SJW 0xc0
+#define MSK_BITT_TSEG1 0xf00
+#define MSK_BITT_TSEG2 0x7000
+#define MSK_BRPE_BRPE 0x3c0
+#define MSK_BRPE_GET 0x0f
+#define MSK_CTRL_IE_SIE_EIE 0x07
+#define MSK_MCONT_TXIE 0x08
+#define MSK_MCONT_RXIE 0x10
+#define PCH_CAN_NO_TX_BUFF 1
+#define COUNTER_LIMIT 10
+
+#define PCH_CAN_CLK 50000000 /* 50MHz */
+
+/* Define the number of message object.
+ * PCH CAN communications are done via Message RAM.
+ * The Message RAM consists of 32 message objects. */
+#define PCH_RX_OBJ_NUM 26 /* 1~ PCH_RX_OBJ_NUM is Rx*/
+#define PCH_TX_OBJ_NUM 6 /* PCH_RX_OBJ_NUM is RX ~ Tx*/
+#define PCH_OBJ_NUM (PCH_TX_OBJ_NUM + PCH_RX_OBJ_NUM)
+
+#define PCH_FIFO_THRESH 16
+
+enum pch_can_mode {
+ PCH_CAN_ENABLE,
+ PCH_CAN_DISABLE,
+ PCH_CAN_ALL,
+ PCH_CAN_NONE,
+ PCH_CAN_STOP,
+ PCH_CAN_RUN
+};
+
+struct pch_can_regs {
+ u32 cont;
+ u32 stat;
+ u32 errc;
+ u32 bitt;
+ u32 intr;
+ u32 opt;
+ u32 brpe;
+ u32 reserve1;
+ u32 if1_creq;
+ u32 if1_cmask;
+ u32 if1_mask1;
+ u32 if1_mask2;
+ u32 if1_id1;
+ u32 if1_id2;
+ u32 if1_mcont;
+ u32 if1_dataa1;
+ u32 if1_dataa2;
+ u32 if1_datab1;
+ u32 if1_datab2;
+ u32 reserve2;
+ u32 reserve3[12];
+ u32 if2_creq;
+ u32 if2_cmask;
+ u32 if2_mask1;
+ u32 if2_mask2;
+ u32 if2_id1;
+ u32 if2_id2;
+ u32 if2_mcont;
+ u32 if2_dataa1;
+ u32 if2_dataa2;
+ u32 if2_datab1;
+ u32 if2_datab2;
+ u32 reserve4;
+ u32 reserve5[20];
+ u32 treq1;
+ u32 treq2;
+ u32 reserve6[2];
+ u32 reserve7[56];
+ u32 reserve8[3];
+ u32 srst;
+};
+
+struct pch_can_priv {
+ struct can_priv can;
+ unsigned int can_num;
+ struct pci_dev *dev;
+ unsigned int tx_enable[MAX_MSG_OBJ];
+ unsigned int rx_enable[MAX_MSG_OBJ];
+ unsigned int rx_link[MAX_MSG_OBJ];
+ unsigned int int_enables;
+ unsigned int int_stat;
+ struct net_device *ndev;
+ spinlock_t msgif_reg_lock; /* Message Interface Registers Access Lock*/
+ unsigned int msg_obj[MAX_MSG_OBJ];
+ struct pch_can_regs __iomem *regs;
+ struct napi_struct napi;
+ unsigned int tx_obj; /* Point next Tx Obj index */
+ unsigned int use_msi;
+};
+
+static struct can_bittiming_const pch_can_bittiming_const = {
+ .name = KBUILD_MODNAME,
+ .tseg1_min = 1,
+ .tseg1_max = 16,
+ .tseg2_min = 1,
+ .tseg2_max = 8,
+ .sjw_max = 4,
+ .brp_min = 1,
+ .brp_max = 1024, /* 6bit + extended 4bit */
+ .brp_inc = 1,
+};
+
+static DEFINE_PCI_DEVICE_TABLE(pch_pci_tbl) = {
+ {PCI_VENDOR_ID_INTEL, 0x8818, PCI_ANY_ID, PCI_ANY_ID,},
+ {0,}
+};
+MODULE_DEVICE_TABLE(pci, pch_pci_tbl);
+
+static inline void pch_can_bit_set(void __iomem *addr, u32 mask)
+{
+ iowrite32(ioread32(addr) | mask, addr);
+}
+
+static inline void pch_can_bit_clear(void __iomem *addr, u32 mask)
+{
+ iowrite32(ioread32(addr) & ~mask, addr);
+}
+
+static void pch_can_set_run_mode(struct pch_can_priv *priv,
+ enum pch_can_mode mode)
+{
+ switch (mode) {
+ case PCH_CAN_RUN:
+ pch_can_bit_clear(&priv->regs->cont, CAN_CTRL_INIT);
+ break;
+
+ case PCH_CAN_STOP:
+ pch_can_bit_set(&priv->regs->cont, CAN_CTRL_INIT);
+ break;
+
+ default:
+ dev_err(&priv->ndev->dev, "%s -> Invalid Mode.\n", __func__);
+ break;
+ }
+}
+
+static void pch_can_set_optmode(struct pch_can_priv *priv)
+{
+ u32 reg_val = ioread32(&priv->regs->opt);
+
+ if (priv->can.ctrlmode & CAN_CTRLMODE_LISTENONLY)
+ reg_val |= CAN_OPT_SILENT;
+
+ if (priv->can.ctrlmode & CAN_CTRLMODE_LOOPBACK)
+ reg_val |= CAN_OPT_LBACK;
+
+ pch_can_bit_set(&priv->regs->cont, CAN_CTRL_OPT);
+ iowrite32(reg_val, &priv->regs->opt);
+}
+
+static void pch_can_set_int_custom(struct pch_can_priv *priv)
+{
+ /* Clearing the IE, SIE and EIE bits of Can control register. */
+ pch_can_bit_clear(&priv->regs->cont, CAN_CTRL_IE_SIE_EIE);
+
+ /* Appropriately setting them. */
+ pch_can_bit_set(&priv->regs->cont,
+ ((priv->int_enables & MSK_CTRL_IE_SIE_EIE) << 1));
+}
+
+/* This function retrieves interrupt enabled for the CAN device. */
+static void pch_can_get_int_enables(struct pch_can_priv *priv, u32 *enables)
+{
+ /* Obtaining the status of IE, SIE and EIE interrupt bits. */
+ *enables = ((ioread32(&priv->regs->cont) & CAN_CTRL_IE_SIE_EIE) >> 1);
+}
+
+static void pch_can_set_int_enables(struct pch_can_priv *priv,
+ enum pch_can_mode interrupt_no)
+{
+ switch (interrupt_no) {
+ case PCH_CAN_ENABLE:
+ pch_can_bit_set(&priv->regs->cont, CAN_CTRL_IE);
+ break;
+
+ case PCH_CAN_DISABLE:
+ pch_can_bit_clear(&priv->regs->cont, CAN_CTRL_IE);
+ break;
+
+ case PCH_CAN_ALL:
+ pch_can_bit_set(&priv->regs->cont, CAN_CTRL_IE_SIE_EIE);
+ break;
+
+ case PCH_CAN_NONE:
+ pch_can_bit_clear(&priv->regs->cont, CAN_CTRL_IE_SIE_EIE);
+ break;
+
+ default:
+ dev_err(&priv->ndev->dev, "Invalid interrupt number.\n");
+ break;
+ }
+}
+
+static void pch_can_check_if_busy(u32 __iomem *creq_addr, u32 num)
+{
+ u32 counter = COUNTER_LIMIT;
+ u32 ifx_creq;
+
+ iowrite32(num, creq_addr);
+ while (counter) {
+ ifx_creq = ioread32(creq_addr) & CAN_IF_CREQ_BUSY;
+ if (!ifx_creq)
+ break;
+ counter--;
+ udelay(1);
+ }
+ if (!counter)
+ pr_err("%s:IF1 BUSY Flag is set forever.\n", __func__);
+}
+
+static void pch_can_set_rx_enable(struct pch_can_priv *priv, u32 buff_num,
+ u32 set)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&priv->msgif_reg_lock, flags);
+ /* Reading the receive buffer data from RAM to Interface1 registers */
+ iowrite32(CAN_CMASK_RX_TX_GET, &priv->regs->if1_cmask);
+ pch_can_check_if_busy(&priv->regs->if1_creq, buff_num);
+
+ /* Setting the IF1MASK1 register to access MsgVal and RxIE bits */
+ iowrite32(CAN_CMASK_RDWR | CAN_CMASK_ARB | CAN_CMASK_CTRL,
+ &priv->regs->if1_cmask);
+
+ if (set == ENABLE) {
+ /* Setting the MsgVal and RxIE bits */
+ pch_can_bit_set(&priv->regs->if1_mcont, CAN_IF_MCONT_RXIE);
+ pch_can_bit_set(&priv->regs->if1_id2, CAN_ID_MSGVAL);
+
+ } else if (set == DISABLE) {
+ /* Resetting the MsgVal and RxIE bits */
+ pch_can_bit_clear(&priv->regs->if1_mcont, CAN_IF_MCONT_RXIE);
+ pch_can_bit_clear(&priv->regs->if1_id2, CAN_ID_MSGVAL);
+ }
+
+ pch_can_check_if_busy(&priv->regs->if1_creq, buff_num);
+ spin_unlock_irqrestore(&priv->msgif_reg_lock, flags);
+}
+
+static void pch_can_rx_enable_all(struct pch_can_priv *priv)
+{
+ int i;
+
+ /* Traversing to obtain the object configured as receivers. */
+ for (i = 0; i < PCH_OBJ_NUM; i++) {
+ if (priv->msg_obj[i] == MSG_OBJ_RX)
+ pch_can_set_rx_enable(priv, i + 1, ENABLE);
+ }
+}
+
+static void pch_can_rx_disable_all(struct pch_can_priv *priv)
+{
+ int i;
+
+ /* Traversing to obtain the object configured as receivers. */
+ for (i = 0; i < PCH_OBJ_NUM; i++) {
+ if (priv->msg_obj[i] == MSG_OBJ_RX)
+ pch_can_set_rx_enable(priv, i + 1, DISABLE);
+ }
+}
+
+static void pch_can_set_tx_enable(struct pch_can_priv *priv, u32 buff_num,
+ u32 set)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&priv->msgif_reg_lock, flags);
+ /* Reading the Msg buffer from Message RAM to Interface2 registers. */
+ iowrite32(CAN_CMASK_RX_TX_GET, &priv->regs->if2_cmask);
+ pch_can_check_if_busy(&priv->regs->if2_creq, buff_num);
+
+ /* Setting the IF2CMASK register for accessing the
+ MsgVal and TxIE bits */
+ iowrite32(CAN_CMASK_RDWR | CAN_CMASK_ARB | CAN_CMASK_CTRL,
+ &priv->regs->if2_cmask);
+
+ if (set == ENABLE) {
+ /* Setting the MsgVal and TxIE bits */
+ pch_can_bit_set(&priv->regs->if2_mcont, CAN_IF_MCONT_TXIE);
+ pch_can_bit_set(&priv->regs->if2_id2, CAN_ID_MSGVAL);
+ } else if (set == DISABLE) {
+ /* Resetting the MsgVal and TxIE bits. */
+ pch_can_bit_clear(&priv->regs->if2_mcont, CAN_IF_MCONT_TXIE);
+ pch_can_bit_clear(&priv->regs->if2_id2, CAN_ID_MSGVAL);
+ }
+
+ pch_can_check_if_busy(&priv->regs->if2_creq, buff_num);
+ spin_unlock_irqrestore(&priv->msgif_reg_lock, flags);
+}
+
+static void pch_can_tx_enable_all(struct pch_can_priv *priv)
+{
+ int i;
+
+ /* Traversing to obtain the object configured as transmit object. */
+ for (i = 0; i < PCH_OBJ_NUM; i++) {
+ if (priv->msg_obj[i] == MSG_OBJ_TX)
+ pch_can_set_tx_enable(priv, i + 1, ENABLE);
+ }
+}
+
+static void pch_can_tx_disable_all(struct pch_can_priv *priv)
+{
+ int i;
+
+ /* Traversing to obtain the object configured as transmit object. */
+ for (i = 0; i < PCH_OBJ_NUM; i++) {
+ if (priv->msg_obj[i] == MSG_OBJ_TX)
+ pch_can_set_tx_enable(priv, i + 1, DISABLE);
+ }
+}
+
+static void pch_can_get_rx_enable(struct pch_can_priv *priv, u32 buff_num,
+ u32 *enable)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&priv->msgif_reg_lock, flags);
+ iowrite32(CAN_CMASK_RX_TX_GET, &priv->regs->if1_cmask);
+ pch_can_check_if_busy(&priv->regs->if1_creq, buff_num);
+
+ if (((ioread32(&priv->regs->if1_id2)) & CAN_ID_MSGVAL) &&
+ ((ioread32(&priv->regs->if1_mcont)) &
+ CAN_IF_MCONT_RXIE))
+ *enable = ENABLE;
+ else
+ *enable = DISABLE;
+ spin_unlock_irqrestore(&priv->msgif_reg_lock, flags);
+}
+
+static void pch_can_get_tx_enable(struct pch_can_priv *priv, u32 buff_num,
+ u32 *enable)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&priv->msgif_reg_lock, flags);
+ iowrite32(CAN_CMASK_RX_TX_GET, &priv->regs->if2_cmask);
+ pch_can_check_if_busy(&priv->regs->if2_creq, buff_num);
+
+ if (((ioread32(&priv->regs->if2_id2)) & CAN_ID_MSGVAL) &&
+ ((ioread32(&priv->regs->if2_mcont)) &
+ CAN_IF_MCONT_TXIE)) {
+ *enable = ENABLE;
+ } else {
+ *enable = DISABLE;
+ }
+ spin_unlock_irqrestore(&priv->msgif_reg_lock, flags);
+}
+
+static int pch_can_int_pending(struct pch_can_priv *priv)
+{
+ return ioread32(&priv->regs->intr) & 0xffff;
+}
+
+static void pch_can_set_rx_buffer_link(struct pch_can_priv *priv,
+ u32 buffer_num, u32 set)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&priv->msgif_reg_lock, flags);
+ iowrite32(CAN_CMASK_RX_TX_GET, &priv->regs->if1_cmask);
+ pch_can_check_if_busy(&priv->regs->if1_creq, buffer_num);
+ iowrite32(CAN_CMASK_RDWR | CAN_CMASK_CTRL, &priv->regs->if1_cmask);
+ if (set == ENABLE)
+ pch_can_bit_clear(&priv->regs->if1_mcont, CAN_IF_MCONT_EOB);
+ else
+ pch_can_bit_set(&priv->regs->if1_mcont, CAN_IF_MCONT_EOB);
+
+ pch_can_check_if_busy(&priv->regs->if1_creq, buffer_num);
+ spin_unlock_irqrestore(&priv->msgif_reg_lock, flags);
+}
+
+static void pch_can_get_rx_buffer_link(struct pch_can_priv *priv,
+ u32 buffer_num, u32 *link)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&priv->msgif_reg_lock, flags);
+ iowrite32(CAN_CMASK_RX_TX_GET, &priv->regs->if1_cmask);
+ pch_can_check_if_busy(&priv->regs->if1_creq, buffer_num);
+
+ if (ioread32(&priv->regs->if1_mcont) & CAN_IF_MCONT_EOB)
+ *link = DISABLE;
+ else
+ *link = ENABLE;
+ spin_unlock_irqrestore(&priv->msgif_reg_lock, flags);
+}
+
+static void pch_can_clear_buffers(struct pch_can_priv *priv)
+{
+ int i;
+
+ for (i = 0; i < PCH_RX_OBJ_NUM; i++) {
+ iowrite32(CAN_CMASK_RX_TX_SET, &priv->regs->if1_cmask);
+ iowrite32(0xffff, &priv->regs->if1_mask1);
+ iowrite32(0xffff, &priv->regs->if1_mask2);
+ iowrite32(0x0, &priv->regs->if1_id1);
+ iowrite32(0x0, &priv->regs->if1_id2);
+ iowrite32(0x0, &priv->regs->if1_mcont);
+ iowrite32(0x0, &priv->regs->if1_dataa1);
+ iowrite32(0x0, &priv->regs->if1_dataa2);
+ iowrite32(0x0, &priv->regs->if1_datab1);
+ iowrite32(0x0, &priv->regs->if1_datab2);
+ iowrite32(CAN_CMASK_RDWR | CAN_CMASK_MASK |
+ CAN_CMASK_ARB | CAN_CMASK_CTRL,
+ &priv->regs->if1_cmask);
+ pch_can_check_if_busy(&priv->regs->if1_creq, i+1);
+ }
+
+ for (i = i; i < PCH_OBJ_NUM; i++) {
+ iowrite32(CAN_CMASK_RX_TX_SET, &priv->regs->if2_cmask);
+ iowrite32(0xffff, &priv->regs->if2_mask1);
+ iowrite32(0xffff, &priv->regs->if2_mask2);
+ iowrite32(0x0, &priv->regs->if2_id1);
+ iowrite32(0x0, &priv->regs->if2_id2);
+ iowrite32(0x0, &priv->regs->if2_mcont);
+ iowrite32(0x0, &priv->regs->if2_dataa1);
+ iowrite32(0x0, &priv->regs->if2_dataa2);
+ iowrite32(0x0, &priv->regs->if2_datab1);
+ iowrite32(0x0, &priv->regs->if2_datab2);
+ iowrite32(CAN_CMASK_RDWR | CAN_CMASK_MASK |
+ CAN_CMASK_ARB | CAN_CMASK_CTRL,
+ &priv->regs->if2_cmask);
+ pch_can_check_if_busy(&priv->regs->if2_creq, i+1);
+ }
+}
+
+static void pch_can_config_rx_tx_buffers(struct pch_can_priv *priv)
+{
+ int i;
+ unsigned long flags;
+
+ spin_lock_irqsave(&priv->msgif_reg_lock, flags);
+
+ for (i = 0; i < PCH_OBJ_NUM; i++) {
+ if (priv->msg_obj[i] == MSG_OBJ_RX) {
+ iowrite32(CAN_CMASK_RX_TX_GET,
+ &priv->regs->if1_cmask);
+ pch_can_check_if_busy(&priv->regs->if1_creq, i+1);
+
+ iowrite32(0x0, &priv->regs->if1_id1);
+ iowrite32(0x0, &priv->regs->if1_id2);
+
+ pch_can_bit_set(&priv->regs->if1_mcont,
+ CAN_IF_MCONT_UMASK);
+
+ /* Set FIFO mode set to 0 except last Rx Obj*/
+ pch_can_bit_clear(&priv->regs->if1_mcont,
+ CAN_IF_MCONT_EOB);
+ /* In case FIFO mode, Last EoB of Rx Obj must be 1 */
+ if (i == (PCH_RX_OBJ_NUM - 1))
+ pch_can_bit_set(&priv->regs->if1_mcont,
+ CAN_IF_MCONT_EOB);
+
+ iowrite32(0, &priv->regs->if1_mask1);
+ pch_can_bit_clear(&priv->regs->if1_mask2,
+ 0x1fff | CAN_MASK2_MDIR_MXTD);
+
+ /* Setting CMASK for writing */
+ iowrite32(CAN_CMASK_RDWR | CAN_CMASK_MASK |
+ CAN_CMASK_ARB | CAN_CMASK_CTRL,
+ &priv->regs->if1_cmask);
+
+ pch_can_check_if_busy(&priv->regs->if1_creq, i+1);
+ } else if (priv->msg_obj[i] == MSG_OBJ_TX) {
+ iowrite32(CAN_CMASK_RX_TX_GET,
+ &priv->regs->if2_cmask);
+ pch_can_check_if_busy(&priv->regs->if2_creq, i+1);
+
+ /* Resetting DIR bit for reception */
+ iowrite32(0x0, &priv->regs->if2_id1);
+ iowrite32(0x0, &priv->regs->if2_id2);
+ pch_can_bit_set(&priv->regs->if2_id2, CAN_ID2_DIR);
+
+ /* Setting EOB bit for transmitter */
+ iowrite32(CAN_IF_MCONT_EOB, &priv->regs->if2_mcont);
+
+ pch_can_bit_set(&priv->regs->if2_mcont,
+ CAN_IF_MCONT_UMASK);
+
+ iowrite32(0, &priv->regs->if2_mask1);
+ pch_can_bit_clear(&priv->regs->if2_mask2, 0x1fff);
+
+ /* Setting CMASK for writing */
+ iowrite32(CAN_CMASK_RDWR | CAN_CMASK_MASK |
+ CAN_CMASK_ARB | CAN_CMASK_CTRL,
+ &priv->regs->if2_cmask);
+
+ pch_can_check_if_busy(&priv->regs->if2_creq, i+1);
+ }
+ }
+ spin_unlock_irqrestore(&priv->msgif_reg_lock, flags);
+}
+
+static void pch_can_init(struct pch_can_priv *priv)
+{
+ /* Stopping the Can device. */
+ pch_can_set_run_mode(priv, PCH_CAN_STOP);
+
+ /* Clearing all the message object buffers. */
+ pch_can_clear_buffers(priv);
+
+ /* Configuring the respective message object as either rx/tx object. */
+ pch_can_config_rx_tx_buffers(priv);
+
+ /* Enabling the interrupts. */
+ pch_can_set_int_enables(priv, PCH_CAN_ALL);
+}
+
+static void pch_can_release(struct pch_can_priv *priv)
+{
+ /* Stooping the CAN device. */
+ pch_can_set_run_mode(priv, PCH_CAN_STOP);
+
+ /* Disabling the interrupts. */
+ pch_can_set_int_enables(priv, PCH_CAN_NONE);
+
+ /* Disabling all the receive object. */
+ pch_can_rx_disable_all(priv);
+
+ /* Disabling all the transmit object. */
+ pch_can_tx_disable_all(priv);
+}
+
+/* This function clears interrupt(s) from the CAN device. */
+static void pch_can_int_clr(struct pch_can_priv *priv, u32 mask)
+{
+ if (mask == CAN_STATUS_INT) {
+ ioread32(&priv->regs->stat);
+ return;
+ }
+
+ /* Clear interrupt for transmit object */
+ if (priv->msg_obj[mask - 1] == MSG_OBJ_TX) {
+ /* Setting CMASK for clearing interrupts for
+ frame transmission. */
+ iowrite32(CAN_CMASK_RDWR | CAN_CMASK_CTRL | CAN_CMASK_ARB,
+ &priv->regs->if2_cmask);
+
+ /* Resetting the ID registers. */
+ pch_can_bit_set(&priv->regs->if2_id2,
+ CAN_ID2_DIR | (0x7ff << 2));
+ iowrite32(0x0, &priv->regs->if2_id1);
+
+ /* Claring NewDat, TxRqst & IntPnd */
+ pch_can_bit_clear(&priv->regs->if2_mcont,
+ CAN_IF_MCONT_NEWDAT | CAN_IF_MCONT_INTPND |
+ CAN_IF_MCONT_TXRQXT);
+ pch_can_check_if_busy(&priv->regs->if2_creq, mask);
+ } else if (priv->msg_obj[mask - 1] == MSG_OBJ_RX) {
+ /* Setting CMASK for clearing the reception interrupts. */
+ iowrite32(CAN_CMASK_RDWR | CAN_CMASK_CTRL | CAN_CMASK_ARB,
+ &priv->regs->if1_cmask);
+
+ /* Clearing the Dir bit. */
+ pch_can_bit_clear(&priv->regs->if1_id2, CAN_ID2_DIR);
+
+ /* Clearing NewDat & IntPnd */
+ pch_can_bit_clear(&priv->regs->if1_mcont,
+ CAN_IF_MCONT_NEWDAT | CAN_IF_MCONT_INTPND);
+
+ pch_can_check_if_busy(&priv->regs->if1_creq, mask);
+ }
+}
+
+static int pch_can_get_buffer_status(struct pch_can_priv *priv)
+{
+ return (ioread32(&priv->regs->treq1) & 0xffff) |
+ ((ioread32(&priv->regs->treq2) & 0xffff) << 16);
+}
+
+static void pch_can_reset(struct pch_can_priv *priv)
+{
+ /* write to sw reset register */
+ iowrite32(1, &priv->regs->srst);
+ iowrite32(0, &priv->regs->srst);
+}
+
+static void pch_can_error(struct net_device *ndev, u32 status)
+{
+ struct sk_buff *skb;
+ struct pch_can_priv *priv = netdev_priv(ndev);
+ struct can_frame *cf;
+ u32 errc;
+ struct net_device_stats *stats = &(priv->ndev->stats);
+ enum can_state state = priv->can.state;
+
+ skb = alloc_can_err_skb(ndev, &cf);
+ if (!skb)
+ return;
+
+ if (status & PCH_BUS_OFF) {
+ pch_can_tx_disable_all(priv);
+ pch_can_rx_disable_all(priv);
+ state = CAN_STATE_BUS_OFF;
+ cf->can_id |= CAN_ERR_BUSOFF;
+ can_bus_off(ndev);
+ pch_can_set_run_mode(priv, PCH_CAN_RUN);
+ dev_err(&ndev->dev, "%s -> Bus Off occurres.\n", __func__);
+ }
+
+ /* Warning interrupt. */
+ if (status & PCH_EWARN) {
+ state = CAN_STATE_ERROR_WARNING;
+ priv->can.can_stats.error_warning++;
+ cf->can_id |= CAN_ERR_CRTL;
+ errc = ioread32(&priv->regs->errc);
+ if (((errc & CAN_REC) >> 8) > 96)
+ cf->data[1] |= CAN_ERR_CRTL_RX_WARNING;
+ if ((errc & CAN_TEC) > 96)
+ cf->data[1] |= CAN_ERR_CRTL_TX_WARNING;
+ dev_warn(&ndev->dev,
+ "%s -> Error Counter is more than 96.\n", __func__);
+ }
+ /* Error passive interrupt. */
+ if (status & PCH_EPASSIV) {
+ priv->can.can_stats.error_passive++;
+ state = CAN_STATE_ERROR_PASSIVE;
+ cf->can_id |= CAN_ERR_CRTL;
+ errc = ioread32(&priv->regs->errc);
+ if (((errc & CAN_REC) >> 8) > 127)
+ cf->data[1] |= CAN_ERR_CRTL_RX_PASSIVE;
+ if ((errc & CAN_TEC) > 127)
+ cf->data[1] |= CAN_ERR_CRTL_TX_PASSIVE;
+ dev_err(&ndev->dev,
+ "%s -> CAN controller is ERROR PASSIVE .\n", __func__);
+ }
+
+ if (status & PCH_LEC_ALL) {
+ priv->can.can_stats.bus_error++;
+ stats->rx_errors++;
+ switch (status & PCH_LEC_ALL) {
+ case PCH_STUF_ERR:
+ cf->data[2] |= CAN_ERR_PROT_STUFF;
+ break;
+ case PCH_FORM_ERR:
+ cf->data[2] |= CAN_ERR_PROT_FORM;
+ break;
+ case PCH_ACK_ERR:
+ cf->data[2] |= CAN_ERR_PROT_LOC_ACK |
+ CAN_ERR_PROT_LOC_ACK_DEL;
+ break;
+ case PCH_BIT1_ERR:
+ case PCH_BIT0_ERR:
+ cf->data[2] |= CAN_ERR_PROT_BIT;
+ break;
+ case PCH_CRC_ERR:
+ cf->data[2] |= CAN_ERR_PROT_LOC_CRC_SEQ |
+ CAN_ERR_PROT_LOC_CRC_DEL;
+ break;
+ default:
+ iowrite32(status | PCH_LEC_ALL, &priv->regs->stat);
+ break;
+ }
+
+ }
+
+ priv->can.state = state;
+ netif_rx(skb);
+
+ stats->rx_packets++;
+ stats->rx_bytes += cf->can_dlc;
+}
+
+static irqreturn_t pch_can_interrupt(int irq, void *dev_id)
+{
+ struct net_device *ndev = (struct net_device *)dev_id;
+ struct pch_can_priv *priv = netdev_priv(ndev);
+
+ pch_can_set_int_enables(priv, PCH_CAN_NONE);
+
+ napi_schedule(&priv->napi);
+
+ return IRQ_HANDLED;
+}
+
+static int pch_can_rx_normal(struct net_device *ndev, u32 int_stat)
+{
+ u32 reg;
+ canid_t id;
+ u32 ide;
+ u32 rtr;
+ int i, j, k;
+ int rcv_pkts = 0;
+ struct sk_buff *skb;
+ struct can_frame *cf;
+ struct pch_can_priv *priv = netdev_priv(ndev);
+ struct net_device_stats *stats = &(priv->ndev->stats);
+
+ /* Reading the messsage object from the Message RAM */
+ iowrite32(CAN_CMASK_RX_TX_GET, &priv->regs->if1_cmask);
+ pch_can_check_if_busy(&priv->regs->if1_creq, int_stat);
+
+ /* Reading the MCONT register. */
+ reg = ioread32(&priv->regs->if1_mcont);
+ reg &= 0xffff;
+
+ for (k = int_stat; !(reg & CAN_IF_MCONT_EOB); k++) {
+ /* If MsgLost bit set. */
+ if (reg & CAN_IF_MCONT_MSGLOST) {
+ dev_err(&priv->ndev->dev, "Msg Obj is overwritten.\n");
+ pch_can_bit_clear(&priv->regs->if1_mcont,
+ CAN_IF_MCONT_MSGLOST);
+ iowrite32(CAN_CMASK_RDWR | CAN_CMASK_CTRL,
+ &priv->regs->if1_cmask);
+ pch_can_check_if_busy(&priv->regs->if1_creq, k);
+
+ skb = alloc_can_err_skb(ndev, &cf);
+ if (!skb)
+ return -ENOMEM;
+
+ priv->can.can_stats.error_passive++;
+ priv->can.state = CAN_STATE_ERROR_PASSIVE;
+ cf->can_id |= CAN_ERR_CRTL;
+ cf->data[1] |= CAN_ERR_CRTL_RX_OVERFLOW;
+ cf->data[2] |= CAN_ERR_PROT_OVERLOAD;
+ stats->rx_packets++;
+ stats->rx_bytes += cf->can_dlc;
+
+ netif_receive_skb(skb);
+ rcv_pkts++;
+ goto RX_NEXT;
+ }
+ if (!(reg & CAN_IF_MCONT_NEWDAT))
+ goto RX_NEXT;
+
+ skb = alloc_can_skb(priv->ndev, &cf);
+ if (!skb)
+ return -ENOMEM;
+
+ /* Get Received data */
+ ide = ((ioread32(&priv->regs->if1_id2)) & CAN_ID2_XTD) >> 14;
+ if (ide) {
+ id = (ioread32(&priv->regs->if1_id1) & 0xffff);
+ id |= (((ioread32(&priv->regs->if1_id2)) &
+ 0x1fff) << 16);
+ cf->can_id = (id & CAN_EFF_MASK) | CAN_EFF_FLAG;
+ } else {
+ id = (((ioread32(&priv->regs->if1_id2)) &
+ (CAN_SFF_MASK << 2)) >> 2);
+ cf->can_id = (id & CAN_SFF_MASK);
+ }
+
+ rtr = (ioread32(&priv->regs->if1_id2) & CAN_ID2_DIR);
+ if (rtr) {
+ cf->can_dlc = 0;
+ cf->can_id |= CAN_RTR_FLAG;
+ } else {
+ cf->can_dlc = ((ioread32(&priv->regs->if1_mcont)) &
+ 0x0f);
+ }
+
+ for (i = 0, j = 0; i < cf->can_dlc; j++) {
+ reg = ioread32(&priv->regs->if1_dataa1 + j*4);
+ cf->data[i++] = cpu_to_le32(reg & 0xff);
+ if (i == cf->can_dlc)
+ break;
+ cf->data[i++] = cpu_to_le32((reg >> 8) & 0xff);
+ }
+
+ netif_receive_skb(skb);
+ rcv_pkts++;
+ stats->rx_packets++;
+ stats->rx_bytes += cf->can_dlc;
+
+ if (k < PCH_FIFO_THRESH) {
+ iowrite32(CAN_CMASK_RDWR | CAN_CMASK_CTRL |
+ CAN_CMASK_ARB, &priv->regs->if1_cmask);
+
+ /* Clearing the Dir bit. */
+ pch_can_bit_clear(&priv->regs->if1_id2, CAN_ID2_DIR);
+
+ /* Clearing NewDat & IntPnd */
+ pch_can_bit_clear(&priv->regs->if1_mcont,
+ CAN_IF_MCONT_INTPND);
+ pch_can_check_if_busy(&priv->regs->if1_creq, k);
+ } else if (k > PCH_FIFO_THRESH) {
+ pch_can_int_clr(priv, k);
+ } else if (k == PCH_FIFO_THRESH) {
+ int cnt;
+ for (cnt = 0; cnt < PCH_FIFO_THRESH; cnt++)
+ pch_can_int_clr(priv, cnt+1);
+ }
+RX_NEXT:
+ /* Reading the messsage object from the Message RAM */
+ iowrite32(CAN_CMASK_RX_TX_GET, &priv->regs->if1_cmask);
+ pch_can_check_if_busy(&priv->regs->if1_creq, k + 1);
+ reg = ioread32(&priv->regs->if1_mcont);
+ }
+
+ return rcv_pkts;
+}
+static int pch_can_rx_poll(struct napi_struct *napi, int quota)
+{
+ struct net_device *ndev = napi->dev;
+ struct pch_can_priv *priv = netdev_priv(ndev);
+ struct net_device_stats *stats = &(priv->ndev->stats);
+ u32 dlc;
+ u32 int_stat;
+ int rcv_pkts = 0;
+ u32 reg_stat;
+ unsigned long flags;
+
+ int_stat = pch_can_int_pending(priv);
+ if (!int_stat)
+ return 0;
+
+INT_STAT:
+ if (int_stat == CAN_STATUS_INT) {
+ reg_stat = ioread32(&priv->regs->stat);
+ if (reg_stat & (PCH_BUS_OFF | PCH_LEC_ALL)) {
+ if ((reg_stat & PCH_LEC_ALL) != PCH_LEC_ALL)
+ pch_can_error(ndev, reg_stat);
+ }
+
+ if (reg_stat & PCH_TX_OK) {
+ spin_lock_irqsave(&priv->msgif_reg_lock, flags);
+ iowrite32(CAN_CMASK_RX_TX_GET, &priv->regs->if2_cmask);
+ pch_can_check_if_busy(&priv->regs->if2_creq,
+ ioread32(&priv->regs->intr));
+ spin_unlock_irqrestore(&priv->msgif_reg_lock, flags);
+ pch_can_bit_clear(&priv->regs->stat, PCH_TX_OK);
+ }
+
+ if (reg_stat & PCH_RX_OK)
+ pch_can_bit_clear(&priv->regs->stat, PCH_RX_OK);
+
+ int_stat = pch_can_int_pending(priv);
+ if (int_stat == CAN_STATUS_INT)
+ goto INT_STAT;
+ }
+
+MSG_OBJ:
+ if ((int_stat >= 1) && (int_stat <= PCH_RX_OBJ_NUM)) {
+ spin_lock_irqsave(&priv->msgif_reg_lock, flags);
+ rcv_pkts = pch_can_rx_normal(ndev, int_stat);
+ spin_unlock_irqrestore(&priv->msgif_reg_lock, flags);
+ if (rcv_pkts < 0)
+ return 0;
+ } else if ((int_stat > PCH_RX_OBJ_NUM) && (int_stat <= PCH_OBJ_NUM)) {
+ if (priv->msg_obj[int_stat - 1] == MSG_OBJ_TX) {
+ /* Handle transmission interrupt */
+ can_get_echo_skb(ndev, int_stat - PCH_RX_OBJ_NUM - 1);
+ spin_lock_irqsave(&priv->msgif_reg_lock, flags);
+ iowrite32(CAN_CMASK_RX_TX_GET | CAN_CMASK_CLRINTPND,
+ &priv->regs->if2_cmask);
+ dlc = ioread32(&priv->regs->if2_mcont) &
+ CAN_IF_MCONT_DLC;
+ pch_can_check_if_busy(&priv->regs->if2_creq, int_stat);
+ spin_unlock_irqrestore(&priv->msgif_reg_lock, flags);
+ if (dlc > 8)
+ dlc = 8;
+ stats->tx_bytes += dlc;
+ stats->tx_packets++;
+ }
+ }
+
+ int_stat = pch_can_int_pending(priv);
+ if (int_stat == CAN_STATUS_INT)
+ goto INT_STAT;
+ else if (int_stat >= 1 && int_stat <= 32)
+ goto MSG_OBJ;
+
+ napi_complete(napi);
+ pch_can_set_int_enables(priv, PCH_CAN_ALL);
+
+ return rcv_pkts;
+}
+
+static int pch_set_bittiming(struct net_device *ndev)
+{
+ struct pch_can_priv *priv = netdev_priv(ndev);
+ const struct can_bittiming *bt = &priv->can.bittiming;
+ u32 canbit;
+ u32 bepe;
+ u32 brp;
+
+ /* Setting the CCE bit for accessing the Can Timing register. */
+ pch_can_bit_set(&priv->regs->cont, CAN_CTRL_CCE);
+
+ brp = (bt->tq) / (1000000000/PCH_CAN_CLK) - 1;
+ canbit = brp & MSK_BITT_BRP;
+ canbit |= (bt->sjw - 1) << BIT_BITT_SJW;
+ canbit |= (bt->phase_seg1 + bt->prop_seg - 1) << BIT_BITT_TSEG1;
+ canbit |= (bt->phase_seg2 - 1) << BIT_BITT_TSEG2;
+ bepe = (brp & MSK_BRPE_BRPE) >> BIT_BRPE_BRPE;
+ iowrite32(canbit, &priv->regs->bitt);
+ iowrite32(bepe, &priv->regs->brpe);
+ pch_can_bit_clear(&priv->regs->cont, CAN_CTRL_CCE);
+
+ return 0;
+}
+
+static void pch_can_start(struct net_device *ndev)
+{
+ struct pch_can_priv *priv = netdev_priv(ndev);
+
+ if (priv->can.state != CAN_STATE_STOPPED)
+ pch_can_reset(priv);
+
+ pch_set_bittiming(ndev);
+ pch_can_set_optmode(priv);
+
+ pch_can_tx_enable_all(priv);
+ pch_can_rx_enable_all(priv);
+
+ /* Setting the CAN to run mode. */
+ pch_can_set_run_mode(priv, PCH_CAN_RUN);
+
+ priv->can.state = CAN_STATE_ERROR_ACTIVE;
+
+ return;
+}
+
+static int pch_can_do_set_mode(struct net_device *ndev, enum can_mode mode)
+{
+ int ret = 0;
+
+ switch (mode) {
+ case CAN_MODE_START:
+ pch_can_start(ndev);
+ netif_wake_queue(ndev);
+ break;
+ default:
+ ret = -EOPNOTSUPP;
+ break;
+ }
+
+ return ret;
+}
+
+static int pch_can_open(struct net_device *ndev)
+{
+ struct pch_can_priv *priv = netdev_priv(ndev);
+ int retval;
+
+ retval = pci_enable_msi(priv->dev);
+ if (retval) {
+ dev_info(&ndev->dev, "PCH CAN opened without MSI\n");
+ priv->use_msi = 0;
+ } else {
+ dev_info(&ndev->dev, "PCH CAN opened with MSI\n");
+ priv->use_msi = 1;
+ }
+
+ /* Regsitering the interrupt. */
+ retval = request_irq(priv->dev->irq, pch_can_interrupt, IRQF_SHARED,
+ ndev->name, ndev);
+ if (retval) {
+ dev_err(&ndev->dev, "request_irq failed.\n");
+ goto req_irq_err;
+ }
+
+ /* Open common can device */
+ retval = open_candev(ndev);
+ if (retval) {
+ dev_err(ndev->dev.parent, "open_candev() failed %d\n", retval);
+ goto err_open_candev;
+ }
+
+ pch_can_init(priv);
+ pch_can_start(ndev);
+ napi_enable(&priv->napi);
+ netif_start_queue(ndev);
+
+ return 0;
+
+err_open_candev:
+ free_irq(priv->dev->irq, ndev);
+req_irq_err:
+ if (priv->use_msi)
+ pci_disable_msi(priv->dev);
+
+ pch_can_release(priv);
+
+ return retval;
+}
+
+static int pch_close(struct net_device *ndev)
+{
+ struct pch_can_priv *priv = netdev_priv(ndev);
+
+ netif_stop_queue(ndev);
+ napi_disable(&priv->napi);
+ pch_can_release(priv);
+ free_irq(priv->dev->irq, ndev);
+ if (priv->use_msi)
+ pci_disable_msi(priv->dev);
+ close_candev(ndev);
+ priv->can.state = CAN_STATE_STOPPED;
+ return 0;
+}
+
+static int pch_get_msg_obj_sts(struct net_device *ndev, u32 obj_id)
+{
+ u32 buffer_status = 0;
+ struct pch_can_priv *priv = netdev_priv(ndev);
+
+ /* Getting the message object status. */
+ buffer_status = (u32) pch_can_get_buffer_status(priv);
+
+ return buffer_status & obj_id;
+}
+
+
+static netdev_tx_t pch_xmit(struct sk_buff *skb, struct net_device *ndev)
+{
+ int i, j;
+ unsigned long flags;
+ struct pch_can_priv *priv = netdev_priv(ndev);
+ struct can_frame *cf = (struct can_frame *)skb->data;
+ int tx_buffer_avail = 0;
+
+ if (can_dropped_invalid_skb(ndev, skb))
+ return NETDEV_TX_OK;
+
+ if (priv->tx_obj == (PCH_OBJ_NUM + 1)) { /* Point tail Obj */
+ while (pch_get_msg_obj_sts(ndev, (((1 << PCH_TX_OBJ_NUM)-1) <<
+ PCH_RX_OBJ_NUM)))
+ udelay(500);
+
+ priv->tx_obj = PCH_RX_OBJ_NUM + 1; /* Point head of Tx Obj ID */
+ tx_buffer_avail = priv->tx_obj; /* Point Tail of Tx Obj */
+ } else {
+ tx_buffer_avail = priv->tx_obj;
+ }
+ priv->tx_obj++;
+
+ /* Attaining the lock. */
+ spin_lock_irqsave(&priv->msgif_reg_lock, flags);
+
+ /* Reading the Msg Obj from the Msg RAM to the Interface register. */
+ iowrite32(CAN_CMASK_RX_TX_GET, &priv->regs->if2_cmask);
+ pch_can_check_if_busy(&priv->regs->if2_creq, tx_buffer_avail);
+
+ /* Setting the CMASK register. */
+ pch_can_bit_set(&priv->regs->if2_cmask, CAN_CMASK_ALL);
+
+ /* If ID extended is set. */
+ pch_can_bit_clear(&priv->regs->if2_id1, 0xffff);
+ pch_can_bit_clear(&priv->regs->if2_id2, 0x1fff | CAN_ID2_XTD);
+ if (cf->can_id & CAN_EFF_FLAG) {
+ pch_can_bit_set(&priv->regs->if2_id1, cf->can_id & 0xffff);
+ pch_can_bit_set(&priv->regs->if2_id2,
+ ((cf->can_id >> 16) & 0x1fff) | CAN_ID2_XTD);
+ } else {
+ pch_can_bit_set(&priv->regs->if2_id1, 0);
+ pch_can_bit_set(&priv->regs->if2_id2,
+ (cf->can_id & CAN_SFF_MASK) << 2);
+ }
+
+ /* If remote frame has to be transmitted.. */
+ if (cf->can_id & CAN_RTR_FLAG)
+ pch_can_bit_clear(&priv->regs->if2_id2, CAN_ID2_DIR);
+
+ for (i = 0, j = 0; i < cf->can_dlc; j++) {
+ iowrite32(le32_to_cpu(cf->data[i++]),
+ (&priv->regs->if2_dataa1) + j*4);
+ if (i == cf->can_dlc)
+ break;
+ iowrite32(le32_to_cpu(cf->data[i++] << 8),
+ (&priv->regs->if2_dataa1) + j*4);
+ }
+
+ can_put_echo_skb(skb, ndev, tx_buffer_avail - PCH_RX_OBJ_NUM - 1);
+
+ /* Updating the size of the data. */
+ pch_can_bit_clear(&priv->regs->if2_mcont, 0x0f);
+ pch_can_bit_set(&priv->regs->if2_mcont, cf->can_dlc);
+
+ /* Clearing IntPend, NewDat & TxRqst */
+ pch_can_bit_clear(&priv->regs->if2_mcont,
+ CAN_IF_MCONT_NEWDAT | CAN_IF_MCONT_INTPND |
+ CAN_IF_MCONT_TXRQXT);
+
+ /* Setting NewDat, TxRqst bits */
+ pch_can_bit_set(&priv->regs->if2_mcont,
+ CAN_IF_MCONT_NEWDAT | CAN_IF_MCONT_TXRQXT);
+
+ pch_can_check_if_busy(&priv->regs->if2_creq, tx_buffer_avail);
+
+ spin_unlock_irqrestore(&priv->msgif_reg_lock, flags);
+
+ return NETDEV_TX_OK;
+}
+
+static const struct net_device_ops pch_can_netdev_ops = {
+ .ndo_open = pch_can_open,
+ .ndo_stop = pch_close,
+ .ndo_start_xmit = pch_xmit,
+};
+
+static void __devexit pch_can_remove(struct pci_dev *pdev)
+{
+ struct net_device *ndev = pci_get_drvdata(pdev);
+ struct pch_can_priv *priv = netdev_priv(ndev);
+
+ unregister_candev(priv->ndev);
+ free_candev(priv->ndev);
+ pci_iounmap(pdev, priv->regs);
+ pci_release_regions(pdev);
+ pci_disable_device(pdev);
+ pci_set_drvdata(pdev, NULL);
+ pch_can_reset(priv);
+}
+
+#ifdef CONFIG_PM
+static int pch_can_suspend(struct pci_dev *pdev, pm_message_t state)
+{
+ int i; /* Counter variable. */
+ int retval; /* Return value. */
+ u32 buf_stat; /* Variable for reading the transmit buffer status. */
+ u32 counter = 0xFFFFFF;
+
+ struct net_device *dev = pci_get_drvdata(pdev);
+ struct pch_can_priv *priv = netdev_priv(dev);
+
+ /* Stop the CAN controller */
+ pch_can_set_run_mode(priv, PCH_CAN_STOP);
+
+ /* Indicate that we are aboutto/in suspend */
+ priv->can.state = CAN_STATE_SLEEPING;
+
+ /* Waiting for all transmission to complete. */
+ while (counter) {
+ buf_stat = pch_can_get_buffer_status(priv);
+ if (!buf_stat)
+ break;
+ counter--;
+ udelay(1);
+ }
+ if (!counter)
+ dev_err(&pdev->dev, "%s -> Transmission time out.\n", __func__);
+
+ /* Save interrupt configuration and then disable them */
+ pch_can_get_int_enables(priv, &(priv->int_enables));
+ pch_can_set_int_enables(priv, PCH_CAN_DISABLE);
+
+ /* Save Tx buffer enable state */
+ for (i = 0; i < PCH_OBJ_NUM; i++) {
+ if (priv->msg_obj[i] == MSG_OBJ_TX)
+ pch_can_get_tx_enable(priv, i + 1,
+ &(priv->tx_enable[i]));
+ }
+
+ /* Disable all Transmit buffers */
+ pch_can_tx_disable_all(priv);
+
+ /* Save Rx buffer enable state */
+ for (i = 0; i < PCH_OBJ_NUM; i++) {
+ if (priv->msg_obj[i] == MSG_OBJ_RX) {
+ pch_can_get_rx_enable(priv, i + 1,
+ &(priv->rx_enable[i]));
+ pch_can_get_rx_buffer_link(priv, i + 1,
+ &(priv->rx_link[i]));
+ }
+ }
+
+ /* Disable all Receive buffers */
+ pch_can_rx_disable_all(priv);
+ retval = pci_save_state(pdev);
+ if (retval) {
+ dev_err(&pdev->dev, "pci_save_state failed.\n");
+ } else {
+ pci_enable_wake(pdev, PCI_D3hot, 0);
+ pci_disable_device(pdev);
+ pci_set_power_state(pdev, pci_choose_state(pdev, state));
+ }
+
+ return retval;
+}
+
+static int pch_can_resume(struct pci_dev *pdev)
+{
+ int i; /* Counter variable. */
+ int retval; /* Return variable. */
+ struct net_device *dev = pci_get_drvdata(pdev);
+ struct pch_can_priv *priv = netdev_priv(dev);
+
+ pci_set_power_state(pdev, PCI_D0);
+ pci_restore_state(pdev);
+ retval = pci_enable_device(pdev);
+ if (retval) {
+ dev_err(&pdev->dev, "pci_enable_device failed.\n");
+ return retval;
+ }
+
+ pci_enable_wake(pdev, PCI_D3hot, 0);
+
+ priv->can.state = CAN_STATE_ERROR_ACTIVE;
+
+ /* Disabling all interrupts. */
+ pch_can_set_int_enables(priv, PCH_CAN_DISABLE);
+
+ /* Setting the CAN device in Stop Mode. */
+ pch_can_set_run_mode(priv, PCH_CAN_STOP);
+
+ /* Configuring the transmit and receive buffers. */
+ pch_can_config_rx_tx_buffers(priv);
+
+ /* Restore the CAN state */
+ pch_set_bittiming(dev);
+
+ /* Listen/Active */
+ pch_can_set_optmode(priv);
+
+ /* Enabling the transmit buffer. */
+ for (i = 0; i < PCH_OBJ_NUM; i++) {
+ if (priv->msg_obj[i] == MSG_OBJ_TX) {
+ pch_can_set_tx_enable(priv, i + 1,
+ priv->tx_enable[i]);
+ }
+ }
+
+ /* Configuring the receive buffer and enabling them. */
+ for (i = 0; i < PCH_OBJ_NUM; i++) {
+ if (priv->msg_obj[i] == MSG_OBJ_RX) {
+ /* Restore buffer link */
+ pch_can_set_rx_buffer_link(priv, i + 1,
+ priv->rx_link[i]);
+
+ /* Restore buffer enables */
+ pch_can_set_rx_enable(priv, i + 1, priv->rx_enable[i]);
+ }
+ }
+
+ /* Enable CAN Interrupts */
+ pch_can_set_int_custom(priv);
+
+ /* Restore Run Mode */
+ pch_can_set_run_mode(priv, PCH_CAN_RUN);
+
+ return retval;
+}
+#else
+#define pch_can_suspend NULL
+#define pch_can_resume NULL
+#endif
+
+static int pch_can_get_berr_counter(const struct net_device *dev,
+ struct can_berr_counter *bec)
+{
+ struct pch_can_priv *priv = netdev_priv(dev);
+
+ bec->txerr = ioread32(&priv->regs->errc) & CAN_TEC;
+ bec->rxerr = (ioread32(&priv->regs->errc) & CAN_REC) >> 8;
+
+ return 0;
+}
+
+static int __devinit pch_can_probe(struct pci_dev *pdev,
+ const struct pci_device_id *id)
+{
+ struct net_device *ndev;
+ struct pch_can_priv *priv;
+ int rc;
+ int index;
+ void __iomem *addr;
+
+ rc = pci_enable_device(pdev);
+ if (rc) {
+ dev_err(&pdev->dev, "Failed pci_enable_device %d\n", rc);
+ goto probe_exit_endev;
+ }
+
+ rc = pci_request_regions(pdev, KBUILD_MODNAME);
+ if (rc) {
+ dev_err(&pdev->dev, "Failed pci_request_regions %d\n", rc);
+ goto probe_exit_pcireq;
+ }
+
+ addr = pci_iomap(pdev, 1, 0);
+ if (!addr) {
+ rc = -EIO;
+ dev_err(&pdev->dev, "Failed pci_iomap\n");
+ goto probe_exit_ipmap;
+ }
+
+ ndev = alloc_candev(sizeof(struct pch_can_priv), PCH_TX_OBJ_NUM);
+ if (!ndev) {
+ rc = -ENOMEM;
+ dev_err(&pdev->dev, "Failed alloc_candev\n");
+ goto probe_exit_alloc_candev;
+ }
+
+ priv = netdev_priv(ndev);
+ priv->ndev = ndev;
+ priv->regs = addr;
+ priv->dev = pdev;
+ priv->can.bittiming_const = &pch_can_bittiming_const;
+ priv->can.do_set_mode = pch_can_do_set_mode;
+ priv->can.do_get_berr_counter = pch_can_get_berr_counter;
+ priv->can.ctrlmode_supported = CAN_CTRLMODE_LISTENONLY |
+ CAN_CTRLMODE_LOOPBACK;
+ priv->tx_obj = PCH_RX_OBJ_NUM + 1; /* Point head of Tx Obj */
+
+ ndev->irq = pdev->irq;
+ ndev->flags |= IFF_ECHO;
+
+ pci_set_drvdata(pdev, ndev);
+ SET_NETDEV_DEV(ndev, &pdev->dev);
+ ndev->netdev_ops = &pch_can_netdev_ops;
+
+ priv->can.clock.freq = PCH_CAN_CLK; /* Hz */
+ for (index = 0; index < PCH_RX_OBJ_NUM;)
+ priv->msg_obj[index++] = MSG_OBJ_RX;
+
+ for (index = index; index < PCH_OBJ_NUM;)
+ priv->msg_obj[index++] = MSG_OBJ_TX;
+
+ netif_napi_add(ndev, &priv->napi, pch_can_rx_poll, PCH_RX_OBJ_NUM);
+
+ rc = register_candev(ndev);
+ if (rc) {
+ dev_err(&pdev->dev, "Failed register_candev %d\n", rc);
+ goto probe_exit_reg_candev;
+ }
+
+ return 0;
+
+probe_exit_reg_candev:
+ free_candev(ndev);
+probe_exit_alloc_candev:
+ pci_iounmap(pdev, addr);
+probe_exit_ipmap:
+ pci_release_regions(pdev);
+probe_exit_pcireq:
+ pci_disable_device(pdev);
+probe_exit_endev:
+ return rc;
+}
+
+static struct pci_driver pch_can_pci_driver = {
+ .name = "pch_can",
+ .id_table = pch_pci_tbl,
+ .probe = pch_can_probe,
+ .remove = __devexit_p(pch_can_remove),
+ .suspend = pch_can_suspend,
+ .resume = pch_can_resume,
+};
+
+static int __init pch_can_pci_init(void)
+{
+ return pci_register_driver(&pch_can_pci_driver);
+}
+module_init(pch_can_pci_init);
+
+static void __exit pch_can_pci_exit(void)
+{
+ pci_unregister_driver(&pch_can_pci_driver);
+}
+module_exit(pch_can_pci_exit);
+
+MODULE_DESCRIPTION("Controller Area Network Driver");
+MODULE_LICENSE("GPL v2");
+MODULE_VERSION("0.94");
diff --git a/drivers/net/can/sja1000/Kconfig b/drivers/net/can/sja1000/Kconfig
index ae3505afd682..6fdc031daaae 100644
--- a/drivers/net/can/sja1000/Kconfig
+++ b/drivers/net/can/sja1000/Kconfig
@@ -58,4 +58,16 @@ config CAN_PLX_PCI
- esd CAN-PCIe/2000
- Marathon CAN-bus-PCI card (http://www.marathon.ru/)
- TEWS TECHNOLOGIES TPMC810 card (http://www.tews.com/)
+
+config CAN_TSCAN1
+ tristate "TS-CAN1 PC104 boards"
+ depends on ISA
+ help
+ This driver is for Technologic Systems' TSCAN-1 PC104 boards.
+ http://www.embeddedarm.com/products/board-detail.php?product=TS-CAN1
+ The driver supports multiple boards and automatically configures them:
+ PLD IO base addresses are read from jumpers JP1 and JP2,
+ IRQ numbers are read from jumpers JP4 and JP5,
+ SJA1000 IO base addresses are chosen heuristically (first that works).
+
endif
diff --git a/drivers/net/can/sja1000/Makefile b/drivers/net/can/sja1000/Makefile
index ce924553995d..2c591eb321c7 100644
--- a/drivers/net/can/sja1000/Makefile
+++ b/drivers/net/can/sja1000/Makefile
@@ -9,5 +9,6 @@ obj-$(CONFIG_CAN_SJA1000_OF_PLATFORM) += sja1000_of_platform.o
obj-$(CONFIG_CAN_EMS_PCI) += ems_pci.o
obj-$(CONFIG_CAN_KVASER_PCI) += kvaser_pci.o
obj-$(CONFIG_CAN_PLX_PCI) += plx_pci.o
+obj-$(CONFIG_CAN_TSCAN1) += tscan1.o
ccflags-$(CONFIG_CAN_DEBUG_DEVICES) := -DDEBUG
diff --git a/drivers/net/can/sja1000/tscan1.c b/drivers/net/can/sja1000/tscan1.c
new file mode 100644
index 000000000000..9756099a883a
--- /dev/null
+++ b/drivers/net/can/sja1000/tscan1.c
@@ -0,0 +1,216 @@
+/*
+ * tscan1.c: driver for Technologic Systems TS-CAN1 PC104 boards
+ *
+ * Copyright 2010 Andre B. Oliveira
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+/*
+ * References:
+ * - Getting started with TS-CAN1, Technologic Systems, Jun 2009
+ * http://www.embeddedarm.com/documentation/ts-can1-manual.pdf
+ */
+
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/ioport.h>
+#include <linux/isa.h>
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include "sja1000.h"
+
+MODULE_DESCRIPTION("Driver for Technologic Systems TS-CAN1 PC104 boards");
+MODULE_AUTHOR("Andre B. Oliveira <anbadeol@gmail.com>");
+MODULE_LICENSE("GPL");
+
+/* Maximum number of boards (one in each JP1:JP2 setting of IO address) */
+#define TSCAN1_MAXDEV 4
+
+/* PLD registers address offsets */
+#define TSCAN1_ID1 0
+#define TSCAN1_ID2 1
+#define TSCAN1_VERSION 2
+#define TSCAN1_LED 3
+#define TSCAN1_PAGE 4
+#define TSCAN1_MODE 5
+#define TSCAN1_JUMPERS 6
+
+/* PLD board identifier registers magic values */
+#define TSCAN1_ID1_VALUE 0xf6
+#define TSCAN1_ID2_VALUE 0xb9
+
+/* PLD mode register SJA1000 IO enable bit */
+#define TSCAN1_MODE_ENABLE 0x40
+
+/* PLD jumpers register bits */
+#define TSCAN1_JP4 0x10
+#define TSCAN1_JP5 0x20
+
+/* PLD IO base addresses start */
+#define TSCAN1_PLD_ADDRESS 0x150
+
+/* PLD register space size */
+#define TSCAN1_PLD_SIZE 8
+
+/* SJA1000 register space size */
+#define TSCAN1_SJA1000_SIZE 32
+
+/* SJA1000 crystal frequency (16MHz) */
+#define TSCAN1_SJA1000_XTAL 16000000
+
+/* SJA1000 IO base addresses */
+static const unsigned short tscan1_sja1000_addresses[] __devinitconst = {
+ 0x100, 0x120, 0x180, 0x1a0, 0x200, 0x240, 0x280, 0x320
+};
+
+/* Read SJA1000 register */
+static u8 tscan1_read(const struct sja1000_priv *priv, int reg)
+{
+ return inb((unsigned long)priv->reg_base + reg);
+}
+
+/* Write SJA1000 register */
+static void tscan1_write(const struct sja1000_priv *priv, int reg, u8 val)
+{
+ outb(val, (unsigned long)priv->reg_base + reg);
+}
+
+/* Probe for a TS-CAN1 board with JP2:JP1 jumper setting ID */
+static int __devinit tscan1_probe(struct device *dev, unsigned id)
+{
+ struct net_device *netdev;
+ struct sja1000_priv *priv;
+ unsigned long pld_base, sja1000_base;
+ int irq, i;
+
+ pld_base = TSCAN1_PLD_ADDRESS + id * TSCAN1_PLD_SIZE;
+ if (!request_region(pld_base, TSCAN1_PLD_SIZE, dev_name(dev)))
+ return -EBUSY;
+
+ if (inb(pld_base + TSCAN1_ID1) != TSCAN1_ID1_VALUE ||
+ inb(pld_base + TSCAN1_ID2) != TSCAN1_ID2_VALUE) {
+ release_region(pld_base, TSCAN1_PLD_SIZE);
+ return -ENODEV;
+ }
+
+ switch (inb(pld_base + TSCAN1_JUMPERS) & (TSCAN1_JP4 | TSCAN1_JP5)) {
+ case TSCAN1_JP4:
+ irq = 6;
+ break;
+ case TSCAN1_JP5:
+ irq = 7;
+ break;
+ case TSCAN1_JP4 | TSCAN1_JP5:
+ irq = 5;
+ break;
+ default:
+ dev_err(dev, "invalid JP4:JP5 setting (no IRQ)\n");
+ release_region(pld_base, TSCAN1_PLD_SIZE);
+ return -EINVAL;
+ }
+
+ netdev = alloc_sja1000dev(0);
+ if (!netdev) {
+ release_region(pld_base, TSCAN1_PLD_SIZE);
+ return -ENOMEM;
+ }
+
+ dev_set_drvdata(dev, netdev);
+ SET_NETDEV_DEV(netdev, dev);
+
+ netdev->base_addr = pld_base;
+ netdev->irq = irq;
+
+ priv = netdev_priv(netdev);
+ priv->read_reg = tscan1_read;
+ priv->write_reg = tscan1_write;
+ priv->can.clock.freq = TSCAN1_SJA1000_XTAL / 2;
+ priv->cdr = CDR_CBP | CDR_CLK_OFF;
+ priv->ocr = OCR_TX0_PUSHPULL;
+
+ /* Select the first SJA1000 IO address that is free and that works */
+ for (i = 0; i < ARRAY_SIZE(tscan1_sja1000_addresses); i++) {
+ sja1000_base = tscan1_sja1000_addresses[i];
+ if (!request_region(sja1000_base, TSCAN1_SJA1000_SIZE,
+ dev_name(dev)))
+ continue;
+
+ /* Set SJA1000 IO base address and enable it */
+ outb(TSCAN1_MODE_ENABLE | i, pld_base + TSCAN1_MODE);
+
+ priv->reg_base = (void __iomem *)sja1000_base;
+ if (!register_sja1000dev(netdev)) {
+ /* SJA1000 probe succeeded; turn LED off and return */
+ outb(0, pld_base + TSCAN1_LED);
+ netdev_info(netdev, "TS-CAN1 at 0x%lx 0x%lx irq %d\n",
+ pld_base, sja1000_base, irq);
+ return 0;
+ }
+
+ /* SJA1000 probe failed; release and try next address */
+ outb(0, pld_base + TSCAN1_MODE);
+ release_region(sja1000_base, TSCAN1_SJA1000_SIZE);
+ }
+
+ dev_err(dev, "failed to assign SJA1000 IO address\n");
+ dev_set_drvdata(dev, NULL);
+ free_sja1000dev(netdev);
+ release_region(pld_base, TSCAN1_PLD_SIZE);
+ return -ENXIO;
+}
+
+static int __devexit tscan1_remove(struct device *dev, unsigned id /*unused*/)
+{
+ struct net_device *netdev;
+ struct sja1000_priv *priv;
+ unsigned long pld_base, sja1000_base;
+
+ netdev = dev_get_drvdata(dev);
+ unregister_sja1000dev(netdev);
+ dev_set_drvdata(dev, NULL);
+
+ priv = netdev_priv(netdev);
+ pld_base = netdev->base_addr;
+ sja1000_base = (unsigned long)priv->reg_base;
+
+ outb(0, pld_base + TSCAN1_MODE); /* disable SJA1000 IO space */
+
+ release_region(sja1000_base, TSCAN1_SJA1000_SIZE);
+ release_region(pld_base, TSCAN1_PLD_SIZE);
+
+ free_sja1000dev(netdev);
+
+ return 0;
+}
+
+static struct isa_driver tscan1_isa_driver = {
+ .probe = tscan1_probe,
+ .remove = __devexit_p(tscan1_remove),
+ .driver = {
+ .name = "tscan1",
+ },
+};
+
+static int __init tscan1_init(void)
+{
+ return isa_register_driver(&tscan1_isa_driver, TSCAN1_MAXDEV);
+}
+module_init(tscan1_init);
+
+static void __exit tscan1_exit(void)
+{
+ isa_unregister_driver(&tscan1_isa_driver);
+}
+module_exit(tscan1_exit);
diff --git a/drivers/net/cxgb3/cxgb3_main.c b/drivers/net/cxgb3/cxgb3_main.c
index a04ce6a5f637..046d846c652d 100644
--- a/drivers/net/cxgb3/cxgb3_main.c
+++ b/drivers/net/cxgb3/cxgb3_main.c
@@ -1266,11 +1266,13 @@ static int cxgb_up(struct adapter *adap)
}
if (!(adap->flags & QUEUES_BOUND)) {
- err = bind_qsets(adap);
- if (err) {
- CH_ERR(adap, "failed to bind qsets, err %d\n", err);
+ int ret = bind_qsets(adap);
+
+ if (ret < 0) {
+ CH_ERR(adap, "failed to bind qsets, err %d\n", ret);
t3_intr_disable(adap);
free_irq_resources(adap);
+ err = ret;
goto out;
}
adap->flags |= QUEUES_BOUND;
@@ -3299,7 +3301,6 @@ static int __devinit init_one(struct pci_dev *pdev,
pi->rx_offload = T3_RX_CSUM | T3_LRO;
pi->port_id = i;
netif_carrier_off(netdev);
- netif_tx_stop_all_queues(netdev);
netdev->irq = pdev->irq;
netdev->mem_start = mmio_start;
netdev->mem_end = mmio_start + mmio_len - 1;
diff --git a/drivers/net/cxgb3/sge.c b/drivers/net/cxgb3/sge.c
index 5d72bda54389..f9f6645b2e61 100644
--- a/drivers/net/cxgb3/sge.c
+++ b/drivers/net/cxgb3/sge.c
@@ -296,8 +296,10 @@ static void free_tx_desc(struct adapter *adapter, struct sge_txq *q,
if (d->skb) { /* an SGL is present */
if (need_unmap)
unmap_skb(d->skb, q, cidx, pdev);
- if (d->eop)
+ if (d->eop) {
kfree_skb(d->skb);
+ d->skb = NULL;
+ }
}
++d;
if (++cidx == q->size) {
diff --git a/drivers/net/cxgb4/cxgb4.h b/drivers/net/cxgb4/cxgb4.h
index eaa49e4119f1..3d4253d311eb 100644
--- a/drivers/net/cxgb4/cxgb4.h
+++ b/drivers/net/cxgb4/cxgb4.h
@@ -281,7 +281,6 @@ struct sge_rspq;
struct port_info {
struct adapter *adapter;
- struct vlan_group *vlan_grp;
u16 viid;
s16 xact_addr_filt; /* index of exact MAC address filter */
u16 rss_size; /* size of VI's RSS table slice */
diff --git a/drivers/net/cxgb4/cxgb4_main.c b/drivers/net/cxgb4/cxgb4_main.c
index 87054e0a5746..f50bc98310f8 100644
--- a/drivers/net/cxgb4/cxgb4_main.c
+++ b/drivers/net/cxgb4/cxgb4_main.c
@@ -403,7 +403,7 @@ static int link_start(struct net_device *dev)
* that step explicitly.
*/
ret = t4_set_rxmode(pi->adapter, mb, pi->viid, dev->mtu, -1, -1, -1,
- pi->vlan_grp != NULL, true);
+ !!(dev->features & NETIF_F_HW_VLAN_RX), true);
if (ret == 0) {
ret = t4_change_mac(pi->adapter, mb, pi->viid,
pi->xact_addr_filt, dev->dev_addr, true,
@@ -1881,7 +1881,24 @@ static int set_tso(struct net_device *dev, u32 value)
static int set_flags(struct net_device *dev, u32 flags)
{
- return ethtool_op_set_flags(dev, flags, ETH_FLAG_RXHASH);
+ int err;
+ unsigned long old_feat = dev->features;
+
+ err = ethtool_op_set_flags(dev, flags, ETH_FLAG_RXHASH |
+ ETH_FLAG_RXVLAN | ETH_FLAG_TXVLAN);
+ if (err)
+ return err;
+
+ if ((old_feat ^ dev->features) & NETIF_F_HW_VLAN_RX) {
+ const struct port_info *pi = netdev_priv(dev);
+
+ err = t4_set_rxmode(pi->adapter, pi->adapter->fn, pi->viid, -1,
+ -1, -1, -1, !!(flags & ETH_FLAG_RXVLAN),
+ true);
+ if (err)
+ dev->features = old_feat;
+ }
+ return err;
}
static int get_rss_table(struct net_device *dev, struct ethtool_rxfh_indir *p)
@@ -2842,15 +2859,6 @@ static int cxgb_set_mac_addr(struct net_device *dev, void *p)
return 0;
}
-static void vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
-{
- struct port_info *pi = netdev_priv(dev);
-
- pi->vlan_grp = grp;
- t4_set_rxmode(pi->adapter, pi->adapter->fn, pi->viid, -1, -1, -1, -1,
- grp != NULL, true);
-}
-
#ifdef CONFIG_NET_POLL_CONTROLLER
static void cxgb_netpoll(struct net_device *dev)
{
@@ -2878,7 +2886,6 @@ static const struct net_device_ops cxgb4_netdev_ops = {
.ndo_validate_addr = eth_validate_addr,
.ndo_do_ioctl = cxgb_ioctl,
.ndo_change_mtu = cxgb_change_mtu,
- .ndo_vlan_rx_register = vlan_rx_register,
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = cxgb_netpoll,
#endif
@@ -3658,7 +3665,6 @@ static int __devinit init_one(struct pci_dev *pdev,
pi->rx_offload = RX_CSO;
pi->port_id = i;
netif_carrier_off(netdev);
- netif_tx_stop_all_queues(netdev);
netdev->irq = pdev->irq;
netdev->features |= NETIF_F_SG | TSO_FLAGS;
diff --git a/drivers/net/cxgb4/sge.c b/drivers/net/cxgb4/sge.c
index 9967f3debce7..17022258ed68 100644
--- a/drivers/net/cxgb4/sge.c
+++ b/drivers/net/cxgb4/sge.c
@@ -1530,18 +1530,11 @@ static void do_gro(struct sge_eth_rxq *rxq, const struct pkt_gl *gl,
skb->rxhash = (__force u32)pkt->rsshdr.hash_val;
if (unlikely(pkt->vlan_ex)) {
- struct port_info *pi = netdev_priv(rxq->rspq.netdev);
- struct vlan_group *grp = pi->vlan_grp;
-
+ __vlan_hwaccel_put_tag(skb, ntohs(pkt->vlan));
rxq->stats.vlan_ex++;
- if (likely(grp)) {
- ret = vlan_gro_frags(&rxq->rspq.napi, grp,
- ntohs(pkt->vlan));
- goto stats;
- }
}
ret = napi_gro_frags(&rxq->rspq.napi);
-stats: if (ret == GRO_HELD)
+ if (ret == GRO_HELD)
rxq->stats.lro_pkts++;
else if (ret == GRO_MERGED || ret == GRO_MERGED_FREE)
rxq->stats.lro_merged++;
@@ -1608,16 +1601,10 @@ int t4_ethrx_handler(struct sge_rspq *q, const __be64 *rsp,
skb_checksum_none_assert(skb);
if (unlikely(pkt->vlan_ex)) {
- struct vlan_group *grp = pi->vlan_grp;
-
+ __vlan_hwaccel_put_tag(skb, ntohs(pkt->vlan));
rxq->stats.vlan_ex++;
- if (likely(grp))
- vlan_hwaccel_receive_skb(skb, grp, ntohs(pkt->vlan));
- else
- dev_kfree_skb_any(skb);
- } else
- netif_receive_skb(skb);
-
+ }
+ netif_receive_skb(skb);
return 0;
}
diff --git a/drivers/net/cxgb4vf/cxgb4vf_main.c b/drivers/net/cxgb4vf/cxgb4vf_main.c
index 555ecc5a2e93..d887a76cd39d 100644
--- a/drivers/net/cxgb4vf/cxgb4vf_main.c
+++ b/drivers/net/cxgb4vf/cxgb4vf_main.c
@@ -753,7 +753,9 @@ static int cxgb4vf_open(struct net_device *dev)
if (err)
return err;
set_bit(pi->port_id, &adapter->open_device_map);
- link_start(dev);
+ err = link_start(dev);
+ if (err)
+ return err;
netif_tx_start_all_queues(dev);
return 0;
}
@@ -814,40 +816,48 @@ static struct net_device_stats *cxgb4vf_get_stats(struct net_device *dev)
}
/*
- * Collect up to maxaddrs worth of a netdevice's unicast addresses into an
- * array of addrss pointers and return the number collected.
+ * Collect up to maxaddrs worth of a netdevice's unicast addresses, starting
+ * at a specified offset within the list, into an array of addrss pointers and
+ * return the number collected.
*/
-static inline int collect_netdev_uc_list_addrs(const struct net_device *dev,
- const u8 **addr,
- unsigned int maxaddrs)
+static inline unsigned int collect_netdev_uc_list_addrs(const struct net_device *dev,
+ const u8 **addr,
+ unsigned int offset,
+ unsigned int maxaddrs)
{
+ unsigned int index = 0;
unsigned int naddr = 0;
const struct netdev_hw_addr *ha;
- for_each_dev_addr(dev, ha) {
- addr[naddr++] = ha->addr;
- if (naddr >= maxaddrs)
- break;
- }
+ for_each_dev_addr(dev, ha)
+ if (index++ >= offset) {
+ addr[naddr++] = ha->addr;
+ if (naddr >= maxaddrs)
+ break;
+ }
return naddr;
}
/*
- * Collect up to maxaddrs worth of a netdevice's multicast addresses into an
- * array of addrss pointers and return the number collected.
+ * Collect up to maxaddrs worth of a netdevice's multicast addresses, starting
+ * at a specified offset within the list, into an array of addrss pointers and
+ * return the number collected.
*/
-static inline int collect_netdev_mc_list_addrs(const struct net_device *dev,
- const u8 **addr,
- unsigned int maxaddrs)
+static inline unsigned int collect_netdev_mc_list_addrs(const struct net_device *dev,
+ const u8 **addr,
+ unsigned int offset,
+ unsigned int maxaddrs)
{
+ unsigned int index = 0;
unsigned int naddr = 0;
const struct netdev_hw_addr *ha;
- netdev_for_each_mc_addr(ha, dev) {
- addr[naddr++] = ha->addr;
- if (naddr >= maxaddrs)
- break;
- }
+ netdev_for_each_mc_addr(ha, dev)
+ if (index++ >= offset) {
+ addr[naddr++] = ha->addr;
+ if (naddr >= maxaddrs)
+ break;
+ }
return naddr;
}
@@ -860,16 +870,20 @@ static int set_addr_filters(const struct net_device *dev, bool sleep)
u64 mhash = 0;
u64 uhash = 0;
bool free = true;
- u16 filt_idx[7];
+ unsigned int offset, naddr;
const u8 *addr[7];
- int ret, naddr = 0;
+ int ret;
const struct port_info *pi = netdev_priv(dev);
/* first do the secondary unicast addresses */
- naddr = collect_netdev_uc_list_addrs(dev, addr, ARRAY_SIZE(addr));
- if (naddr > 0) {
+ for (offset = 0; ; offset += naddr) {
+ naddr = collect_netdev_uc_list_addrs(dev, addr, offset,
+ ARRAY_SIZE(addr));
+ if (naddr == 0)
+ break;
+
ret = t4vf_alloc_mac_filt(pi->adapter, pi->viid, free,
- naddr, addr, filt_idx, &uhash, sleep);
+ naddr, addr, NULL, &uhash, sleep);
if (ret < 0)
return ret;
@@ -877,12 +891,17 @@ static int set_addr_filters(const struct net_device *dev, bool sleep)
}
/* next set up the multicast addresses */
- naddr = collect_netdev_mc_list_addrs(dev, addr, ARRAY_SIZE(addr));
- if (naddr > 0) {
+ for (offset = 0; ; offset += naddr) {
+ naddr = collect_netdev_mc_list_addrs(dev, addr, offset,
+ ARRAY_SIZE(addr));
+ if (naddr == 0)
+ break;
+
ret = t4vf_alloc_mac_filt(pi->adapter, pi->viid, free,
- naddr, addr, filt_idx, &mhash, sleep);
+ naddr, addr, NULL, &mhash, sleep);
if (ret < 0)
return ret;
+ free = false;
}
return t4vf_set_addr_hash(pi->adapter, pi->viid, uhash != 0,
@@ -1103,18 +1122,6 @@ static int cxgb4vf_set_mac_addr(struct net_device *dev, void *_addr)
return 0;
}
-/*
- * Return a TX Queue on which to send the specified skb.
- */
-static u16 cxgb4vf_select_queue(struct net_device *dev, struct sk_buff *skb)
-{
- /*
- * XXX For now just use the default hash but we probably want to
- * XXX look at other possibilities ...
- */
- return skb_tx_hash(dev, skb);
-}
-
#ifdef CONFIG_NET_POLL_CONTROLLER
/*
* Poll all of our receive queues. This is called outside of normal interrupt
@@ -2075,6 +2082,22 @@ static int adap_init0(struct adapter *adapter)
}
/*
+ * Some environments do not properly handle PCIE FLRs -- e.g. in Linux
+ * 2.6.31 and later we can't call pci_reset_function() in order to
+ * issue an FLR because of a self- deadlock on the device semaphore.
+ * Meanwhile, the OS infrastructure doesn't issue FLRs in all the
+ * cases where they're needed -- for instance, some versions of KVM
+ * fail to reset "Assigned Devices" when the VM reboots. Therefore we
+ * use the firmware based reset in order to reset any per function
+ * state.
+ */
+ err = t4vf_fw_reset(adapter);
+ if (err < 0) {
+ dev_err(adapter->pdev_dev, "FW reset failed: err=%d\n", err);
+ return err;
+ }
+
+ /*
* Grab basic operational parameters. These will predominantly have
* been set up by the Physical Function Driver or will be hard coded
* into the adapter. We just have to live with them ... Note that
@@ -2417,7 +2440,6 @@ static const struct net_device_ops cxgb4vf_netdev_ops = {
.ndo_get_stats = cxgb4vf_get_stats,
.ndo_set_rx_mode = cxgb4vf_set_rxmode,
.ndo_set_mac_address = cxgb4vf_set_mac_addr,
- .ndo_select_queue = cxgb4vf_select_queue,
.ndo_validate_addr = eth_validate_addr,
.ndo_do_ioctl = cxgb4vf_do_ioctl,
.ndo_change_mtu = cxgb4vf_change_mtu,
@@ -2600,7 +2622,6 @@ static int __devinit cxgb4vf_pci_probe(struct pci_dev *pdev,
pi->xact_addr_filt = -1;
pi->rx_offload = RX_CSO;
netif_carrier_off(netdev);
- netif_tx_stop_all_queues(netdev);
netdev->irq = pdev->irq;
netdev->features = (NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
@@ -2625,7 +2646,6 @@ static int __devinit cxgb4vf_pci_probe(struct pci_dev *pdev,
netdev->do_ioctl = cxgb4vf_do_ioctl;
netdev->change_mtu = cxgb4vf_change_mtu;
netdev->set_mac_address = cxgb4vf_set_mac_addr;
- netdev->select_queue = cxgb4vf_select_queue;
#ifdef CONFIG_NET_POLL_CONTROLLER
netdev->poll_controller = cxgb4vf_poll_controller;
#endif
@@ -2844,6 +2864,14 @@ static struct pci_device_id cxgb4vf_pci_tbl[] = {
CH_DEVICE(0x4800, 0), /* T440-dbg */
CH_DEVICE(0x4801, 0), /* T420-cr */
CH_DEVICE(0x4802, 0), /* T422-cr */
+ CH_DEVICE(0x4803, 0), /* T440-cr */
+ CH_DEVICE(0x4804, 0), /* T420-bch */
+ CH_DEVICE(0x4805, 0), /* T440-bch */
+ CH_DEVICE(0x4806, 0), /* T460-ch */
+ CH_DEVICE(0x4807, 0), /* T420-so */
+ CH_DEVICE(0x4808, 0), /* T420-cx */
+ CH_DEVICE(0x4809, 0), /* T420-bt */
+ CH_DEVICE(0x480a, 0), /* T404-bt */
{ 0, }
};
diff --git a/drivers/net/cxgb4vf/sge.c b/drivers/net/cxgb4vf/sge.c
index f10864ddafbe..ecf0770bf0ff 100644
--- a/drivers/net/cxgb4vf/sge.c
+++ b/drivers/net/cxgb4vf/sge.c
@@ -154,13 +154,14 @@ enum {
*/
RX_COPY_THRES = 256,
RX_PULL_LEN = 128,
-};
-/*
- * Can't define this in the above enum because PKTSHIFT isn't a constant in
- * the VF Driver ...
- */
-#define RX_PKT_PULL_LEN (RX_PULL_LEN + PKTSHIFT)
+ /*
+ * Main body length for sk_buffs used for RX Ethernet packets with
+ * fragments. Should be >= RX_PULL_LEN but possibly bigger to give
+ * pskb_may_pull() some room.
+ */
+ RX_SKB_LEN = 512,
+};
/*
* Software state per TX descriptor.
@@ -1355,6 +1356,67 @@ out_free:
}
/**
+ * t4vf_pktgl_to_skb - build an sk_buff from a packet gather list
+ * @gl: the gather list
+ * @skb_len: size of sk_buff main body if it carries fragments
+ * @pull_len: amount of data to move to the sk_buff's main body
+ *
+ * Builds an sk_buff from the given packet gather list. Returns the
+ * sk_buff or %NULL if sk_buff allocation failed.
+ */
+struct sk_buff *t4vf_pktgl_to_skb(const struct pkt_gl *gl,
+ unsigned int skb_len, unsigned int pull_len)
+{
+ struct sk_buff *skb;
+ struct skb_shared_info *ssi;
+
+ /*
+ * If the ingress packet is small enough, allocate an skb large enough
+ * for all of the data and copy it inline. Otherwise, allocate an skb
+ * with enough room to pull in the header and reference the rest of
+ * the data via the skb fragment list.
+ *
+ * Below we rely on RX_COPY_THRES being less than the smallest Rx
+ * buff! size, which is expected since buffers are at least
+ * PAGE_SIZEd. In this case packets up to RX_COPY_THRES have only one
+ * fragment.
+ */
+ if (gl->tot_len <= RX_COPY_THRES) {
+ /* small packets have only one fragment */
+ skb = alloc_skb(gl->tot_len, GFP_ATOMIC);
+ if (unlikely(!skb))
+ goto out;
+ __skb_put(skb, gl->tot_len);
+ skb_copy_to_linear_data(skb, gl->va, gl->tot_len);
+ } else {
+ skb = alloc_skb(skb_len, GFP_ATOMIC);
+ if (unlikely(!skb))
+ goto out;
+ __skb_put(skb, pull_len);
+ skb_copy_to_linear_data(skb, gl->va, pull_len);
+
+ ssi = skb_shinfo(skb);
+ ssi->frags[0].page = gl->frags[0].page;
+ ssi->frags[0].page_offset = gl->frags[0].page_offset + pull_len;
+ ssi->frags[0].size = gl->frags[0].size - pull_len;
+ if (gl->nfrags > 1)
+ memcpy(&ssi->frags[1], &gl->frags[1],
+ (gl->nfrags-1) * sizeof(skb_frag_t));
+ ssi->nr_frags = gl->nfrags;
+
+ skb->len = gl->tot_len;
+ skb->data_len = skb->len - pull_len;
+ skb->truesize += skb->data_len;
+
+ /* Get a reference for the last page, we don't own it */
+ get_page(gl->frags[gl->nfrags - 1].page);
+ }
+
+out:
+ return skb;
+}
+
+/**
* t4vf_pktgl_free - free a packet gather list
* @gl: the gather list
*
@@ -1463,10 +1525,8 @@ int t4vf_ethrx_handler(struct sge_rspq *rspq, const __be64 *rsp,
{
struct sk_buff *skb;
struct port_info *pi;
- struct skb_shared_info *ssi;
const struct cpl_rx_pkt *pkt = (void *)&rsp[1];
bool csum_ok = pkt->csum_calc && !pkt->err_vec;
- unsigned int len = be16_to_cpu(pkt->len);
struct sge_eth_rxq *rxq = container_of(rspq, struct sge_eth_rxq, rspq);
/*
@@ -1481,42 +1541,14 @@ int t4vf_ethrx_handler(struct sge_rspq *rspq, const __be64 *rsp,
}
/*
- * If the ingress packet is small enough, allocate an skb large enough
- * for all of the data and copy it inline. Otherwise, allocate an skb
- * with enough room to pull in the header and reference the rest of
- * the data via the skb fragment list.
+ * Convert the Packet Gather List into an skb.
*/
- if (len <= RX_COPY_THRES) {
- /* small packets have only one fragment */
- skb = alloc_skb(gl->frags[0].size, GFP_ATOMIC);
- if (!skb)
- goto nomem;
- __skb_put(skb, gl->frags[0].size);
- skb_copy_to_linear_data(skb, gl->va, gl->frags[0].size);
- } else {
- skb = alloc_skb(RX_PKT_PULL_LEN, GFP_ATOMIC);
- if (!skb)
- goto nomem;
- __skb_put(skb, RX_PKT_PULL_LEN);
- skb_copy_to_linear_data(skb, gl->va, RX_PKT_PULL_LEN);
-
- ssi = skb_shinfo(skb);
- ssi->frags[0].page = gl->frags[0].page;
- ssi->frags[0].page_offset = (gl->frags[0].page_offset +
- RX_PKT_PULL_LEN);
- ssi->frags[0].size = gl->frags[0].size - RX_PKT_PULL_LEN;
- if (gl->nfrags > 1)
- memcpy(&ssi->frags[1], &gl->frags[1],
- (gl->nfrags-1) * sizeof(skb_frag_t));
- ssi->nr_frags = gl->nfrags;
- skb->len = len + PKTSHIFT;
- skb->data_len = skb->len - RX_PKT_PULL_LEN;
- skb->truesize += skb->data_len;
-
- /* Get a reference for the last page, we don't own it */
- get_page(gl->frags[gl->nfrags - 1].page);
+ skb = t4vf_pktgl_to_skb(gl, RX_SKB_LEN, RX_PULL_LEN);
+ if (unlikely(!skb)) {
+ t4vf_pktgl_free(gl);
+ rxq->stats.rx_drops++;
+ return 0;
}
-
__skb_pull(skb, PKTSHIFT);
skb->protocol = eth_type_trans(skb, rspq->netdev);
skb_record_rx_queue(skb, rspq->idx);
@@ -1549,11 +1581,6 @@ int t4vf_ethrx_handler(struct sge_rspq *rspq, const __be64 *rsp,
netif_receive_skb(skb);
return 0;
-
-nomem:
- t4vf_pktgl_free(gl);
- rxq->stats.rx_drops++;
- return 0;
}
/**
@@ -1679,6 +1706,7 @@ int process_responses(struct sge_rspq *rspq, int budget)
}
len = RSPD_LEN(len);
}
+ gl.tot_len = len;
/*
* Gather packet fragments.
diff --git a/drivers/net/cxgb4vf/t4vf_common.h b/drivers/net/cxgb4vf/t4vf_common.h
index 873cb7d86c57..a65c80aed1f2 100644
--- a/drivers/net/cxgb4vf/t4vf_common.h
+++ b/drivers/net/cxgb4vf/t4vf_common.h
@@ -235,6 +235,7 @@ static inline int t4vf_wr_mbox_ns(struct adapter *adapter, const void *cmd,
int __devinit t4vf_wait_dev_ready(struct adapter *);
int __devinit t4vf_port_init(struct adapter *, int);
+int t4vf_fw_reset(struct adapter *);
int t4vf_query_params(struct adapter *, unsigned int, const u32 *, u32 *);
int t4vf_set_params(struct adapter *, unsigned int, const u32 *, const u32 *);
diff --git a/drivers/net/cxgb4vf/t4vf_hw.c b/drivers/net/cxgb4vf/t4vf_hw.c
index ea1c123f0cb4..19520afe1a12 100644
--- a/drivers/net/cxgb4vf/t4vf_hw.c
+++ b/drivers/net/cxgb4vf/t4vf_hw.c
@@ -326,6 +326,25 @@ int __devinit t4vf_port_init(struct adapter *adapter, int pidx)
}
/**
+ * t4vf_fw_reset - issue a reset to FW
+ * @adapter: the adapter
+ *
+ * Issues a reset command to FW. For a Physical Function this would
+ * result in the Firmware reseting all of its state. For a Virtual
+ * Function this just resets the state associated with the VF.
+ */
+int t4vf_fw_reset(struct adapter *adapter)
+{
+ struct fw_reset_cmd cmd;
+
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.op_to_write = cpu_to_be32(FW_CMD_OP(FW_RESET_CMD) |
+ FW_CMD_WRITE);
+ cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd));
+ return t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), NULL);
+}
+
+/**
* t4vf_query_params - query FW or device parameters
* @adapter: the adapter
* @nparams: the number of parameters
@@ -995,48 +1014,72 @@ int t4vf_alloc_mac_filt(struct adapter *adapter, unsigned int viid, bool free,
unsigned int naddr, const u8 **addr, u16 *idx,
u64 *hash, bool sleep_ok)
{
- int i, ret;
+ int offset, ret = 0;
+ unsigned nfilters = 0;
+ unsigned int rem = naddr;
struct fw_vi_mac_cmd cmd, rpl;
- struct fw_vi_mac_exact *p;
- size_t len16;
- if (naddr > ARRAY_SIZE(cmd.u.exact))
+ if (naddr > FW_CLS_TCAM_NUM_ENTRIES)
return -EINVAL;
- len16 = DIV_ROUND_UP(offsetof(struct fw_vi_mac_cmd,
- u.exact[naddr]), 16);
- memset(&cmd, 0, sizeof(cmd));
- cmd.op_to_viid = cpu_to_be32(FW_CMD_OP(FW_VI_MAC_CMD) |
- FW_CMD_REQUEST |
- FW_CMD_WRITE |
- (free ? FW_CMD_EXEC : 0) |
- FW_VI_MAC_CMD_VIID(viid));
- cmd.freemacs_to_len16 = cpu_to_be32(FW_VI_MAC_CMD_FREEMACS(free) |
- FW_CMD_LEN16(len16));
+ for (offset = 0; offset < naddr; /**/) {
+ unsigned int fw_naddr = (rem < ARRAY_SIZE(cmd.u.exact)
+ ? rem
+ : ARRAY_SIZE(cmd.u.exact));
+ size_t len16 = DIV_ROUND_UP(offsetof(struct fw_vi_mac_cmd,
+ u.exact[fw_naddr]), 16);
+ struct fw_vi_mac_exact *p;
+ int i;
+
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.op_to_viid = cpu_to_be32(FW_CMD_OP(FW_VI_MAC_CMD) |
+ FW_CMD_REQUEST |
+ FW_CMD_WRITE |
+ (free ? FW_CMD_EXEC : 0) |
+ FW_VI_MAC_CMD_VIID(viid));
+ cmd.freemacs_to_len16 =
+ cpu_to_be32(FW_VI_MAC_CMD_FREEMACS(free) |
+ FW_CMD_LEN16(len16));
+
+ for (i = 0, p = cmd.u.exact; i < fw_naddr; i++, p++) {
+ p->valid_to_idx = cpu_to_be16(
+ FW_VI_MAC_CMD_VALID |
+ FW_VI_MAC_CMD_IDX(FW_VI_MAC_ADD_MAC));
+ memcpy(p->macaddr, addr[offset+i], sizeof(p->macaddr));
+ }
- for (i = 0, p = cmd.u.exact; i < naddr; i++, p++) {
- p->valid_to_idx =
- cpu_to_be16(FW_VI_MAC_CMD_VALID |
- FW_VI_MAC_CMD_IDX(FW_VI_MAC_ADD_MAC));
- memcpy(p->macaddr, addr[i], sizeof(p->macaddr));
- }
- ret = t4vf_wr_mbox_core(adapter, &cmd, sizeof(cmd), &rpl, sleep_ok);
- if (ret)
- return ret;
-
- for (i = 0, p = rpl.u.exact; i < naddr; i++, p++) {
- u16 index = FW_VI_MAC_CMD_IDX_GET(be16_to_cpu(p->valid_to_idx));
-
- if (idx)
- idx[i] = (index >= FW_CLS_TCAM_NUM_ENTRIES
- ? 0xffff
- : index);
- if (index < FW_CLS_TCAM_NUM_ENTRIES)
- ret++;
- else if (hash)
- *hash |= (1 << hash_mac_addr(addr[i]));
+ ret = t4vf_wr_mbox_core(adapter, &cmd, sizeof(cmd), &rpl,
+ sleep_ok);
+ if (ret && ret != -ENOMEM)
+ break;
+
+ for (i = 0, p = rpl.u.exact; i < fw_naddr; i++, p++) {
+ u16 index = FW_VI_MAC_CMD_IDX_GET(
+ be16_to_cpu(p->valid_to_idx));
+
+ if (idx)
+ idx[offset+i] =
+ (index >= FW_CLS_TCAM_NUM_ENTRIES
+ ? 0xffff
+ : index);
+ if (index < FW_CLS_TCAM_NUM_ENTRIES)
+ nfilters++;
+ else if (hash)
+ *hash |= (1ULL << hash_mac_addr(addr[offset+i]));
+ }
+
+ free = false;
+ offset += fw_naddr;
+ rem -= fw_naddr;
}
+
+ /*
+ * If there were no errors or we merely ran out of room in our MAC
+ * address arena, return the number of filters actually written.
+ */
+ if (ret == 0 || ret == -ENOMEM)
+ ret = nfilters;
return ret;
}
diff --git a/drivers/net/davinci_cpdma.c b/drivers/net/davinci_cpdma.c
new file mode 100644
index 000000000000..e92b2b6cd8c4
--- /dev/null
+++ b/drivers/net/davinci_cpdma.c
@@ -0,0 +1,965 @@
+/*
+ * Texas Instruments CPDMA Driver
+ *
+ * Copyright (C) 2010 Texas Instruments
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation version 2.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#include <linux/kernel.h>
+#include <linux/spinlock.h>
+#include <linux/device.h>
+#include <linux/slab.h>
+#include <linux/err.h>
+#include <linux/dma-mapping.h>
+#include <linux/io.h>
+
+#include "davinci_cpdma.h"
+
+/* DMA Registers */
+#define CPDMA_TXIDVER 0x00
+#define CPDMA_TXCONTROL 0x04
+#define CPDMA_TXTEARDOWN 0x08
+#define CPDMA_RXIDVER 0x10
+#define CPDMA_RXCONTROL 0x14
+#define CPDMA_SOFTRESET 0x1c
+#define CPDMA_RXTEARDOWN 0x18
+#define CPDMA_TXINTSTATRAW 0x80
+#define CPDMA_TXINTSTATMASKED 0x84
+#define CPDMA_TXINTMASKSET 0x88
+#define CPDMA_TXINTMASKCLEAR 0x8c
+#define CPDMA_MACINVECTOR 0x90
+#define CPDMA_MACEOIVECTOR 0x94
+#define CPDMA_RXINTSTATRAW 0xa0
+#define CPDMA_RXINTSTATMASKED 0xa4
+#define CPDMA_RXINTMASKSET 0xa8
+#define CPDMA_RXINTMASKCLEAR 0xac
+#define CPDMA_DMAINTSTATRAW 0xb0
+#define CPDMA_DMAINTSTATMASKED 0xb4
+#define CPDMA_DMAINTMASKSET 0xb8
+#define CPDMA_DMAINTMASKCLEAR 0xbc
+#define CPDMA_DMAINT_HOSTERR BIT(1)
+
+/* the following exist only if has_ext_regs is set */
+#define CPDMA_DMACONTROL 0x20
+#define CPDMA_DMASTATUS 0x24
+#define CPDMA_RXBUFFOFS 0x28
+#define CPDMA_EM_CONTROL 0x2c
+
+/* Descriptor mode bits */
+#define CPDMA_DESC_SOP BIT(31)
+#define CPDMA_DESC_EOP BIT(30)
+#define CPDMA_DESC_OWNER BIT(29)
+#define CPDMA_DESC_EOQ BIT(28)
+#define CPDMA_DESC_TD_COMPLETE BIT(27)
+#define CPDMA_DESC_PASS_CRC BIT(26)
+
+#define CPDMA_TEARDOWN_VALUE 0xfffffffc
+
+struct cpdma_desc {
+ /* hardware fields */
+ u32 hw_next;
+ u32 hw_buffer;
+ u32 hw_len;
+ u32 hw_mode;
+ /* software fields */
+ void *sw_token;
+ u32 sw_buffer;
+ u32 sw_len;
+};
+
+struct cpdma_desc_pool {
+ u32 phys;
+ void __iomem *iomap; /* ioremap map */
+ void *cpumap; /* dma_alloc map */
+ int desc_size, mem_size;
+ int num_desc, used_desc;
+ unsigned long *bitmap;
+ struct device *dev;
+ spinlock_t lock;
+};
+
+enum cpdma_state {
+ CPDMA_STATE_IDLE,
+ CPDMA_STATE_ACTIVE,
+ CPDMA_STATE_TEARDOWN,
+};
+
+const char *cpdma_state_str[] = { "idle", "active", "teardown" };
+
+struct cpdma_ctlr {
+ enum cpdma_state state;
+ struct cpdma_params params;
+ struct device *dev;
+ struct cpdma_desc_pool *pool;
+ spinlock_t lock;
+ struct cpdma_chan *channels[2 * CPDMA_MAX_CHANNELS];
+};
+
+struct cpdma_chan {
+ enum cpdma_state state;
+ struct cpdma_ctlr *ctlr;
+ int chan_num;
+ spinlock_t lock;
+ struct cpdma_desc __iomem *head, *tail;
+ int count;
+ void __iomem *hdp, *cp, *rxfree;
+ u32 mask;
+ cpdma_handler_fn handler;
+ enum dma_data_direction dir;
+ struct cpdma_chan_stats stats;
+ /* offsets into dmaregs */
+ int int_set, int_clear, td;
+};
+
+/* The following make access to common cpdma_ctlr params more readable */
+#define dmaregs params.dmaregs
+#define num_chan params.num_chan
+
+/* various accessors */
+#define dma_reg_read(ctlr, ofs) __raw_readl((ctlr)->dmaregs + (ofs))
+#define chan_read(chan, fld) __raw_readl((chan)->fld)
+#define desc_read(desc, fld) __raw_readl(&(desc)->fld)
+#define dma_reg_write(ctlr, ofs, v) __raw_writel(v, (ctlr)->dmaregs + (ofs))
+#define chan_write(chan, fld, v) __raw_writel(v, (chan)->fld)
+#define desc_write(desc, fld, v) __raw_writel((u32)(v), &(desc)->fld)
+
+/*
+ * Utility constructs for a cpdma descriptor pool. Some devices (e.g. davinci
+ * emac) have dedicated on-chip memory for these descriptors. Some other
+ * devices (e.g. cpsw switches) use plain old memory. Descriptor pools
+ * abstract out these details
+ */
+static struct cpdma_desc_pool *
+cpdma_desc_pool_create(struct device *dev, u32 phys, int size, int align)
+{
+ int bitmap_size;
+ struct cpdma_desc_pool *pool;
+
+ pool = kzalloc(sizeof(*pool), GFP_KERNEL);
+ if (!pool)
+ return NULL;
+
+ spin_lock_init(&pool->lock);
+
+ pool->dev = dev;
+ pool->mem_size = size;
+ pool->desc_size = ALIGN(sizeof(struct cpdma_desc), align);
+ pool->num_desc = size / pool->desc_size;
+
+ bitmap_size = (pool->num_desc / BITS_PER_LONG) * sizeof(long);
+ pool->bitmap = kzalloc(bitmap_size, GFP_KERNEL);
+ if (!pool->bitmap)
+ goto fail;
+
+ if (phys) {
+ pool->phys = phys;
+ pool->iomap = ioremap(phys, size);
+ } else {
+ pool->cpumap = dma_alloc_coherent(dev, size, &pool->phys,
+ GFP_KERNEL);
+ pool->iomap = (void __force __iomem *)pool->cpumap;
+ }
+
+ if (pool->iomap)
+ return pool;
+
+fail:
+ kfree(pool->bitmap);
+ kfree(pool);
+ return NULL;
+}
+
+static void cpdma_desc_pool_destroy(struct cpdma_desc_pool *pool)
+{
+ unsigned long flags;
+
+ if (!pool)
+ return;
+
+ spin_lock_irqsave(&pool->lock, flags);
+ WARN_ON(pool->used_desc);
+ kfree(pool->bitmap);
+ if (pool->cpumap) {
+ dma_free_coherent(pool->dev, pool->mem_size, pool->cpumap,
+ pool->phys);
+ } else {
+ iounmap(pool->iomap);
+ }
+ spin_unlock_irqrestore(&pool->lock, flags);
+ kfree(pool);
+}
+
+static inline dma_addr_t desc_phys(struct cpdma_desc_pool *pool,
+ struct cpdma_desc __iomem *desc)
+{
+ if (!desc)
+ return 0;
+ return pool->phys + (__force dma_addr_t)desc -
+ (__force dma_addr_t)pool->iomap;
+}
+
+static inline struct cpdma_desc __iomem *
+desc_from_phys(struct cpdma_desc_pool *pool, dma_addr_t dma)
+{
+ return dma ? pool->iomap + dma - pool->phys : NULL;
+}
+
+static struct cpdma_desc __iomem *
+cpdma_desc_alloc(struct cpdma_desc_pool *pool, int num_desc)
+{
+ unsigned long flags;
+ int index;
+ struct cpdma_desc __iomem *desc = NULL;
+
+ spin_lock_irqsave(&pool->lock, flags);
+
+ index = bitmap_find_next_zero_area(pool->bitmap, pool->num_desc, 0,
+ num_desc, 0);
+ if (index < pool->num_desc) {
+ bitmap_set(pool->bitmap, index, num_desc);
+ desc = pool->iomap + pool->desc_size * index;
+ pool->used_desc++;
+ }
+
+ spin_unlock_irqrestore(&pool->lock, flags);
+ return desc;
+}
+
+static void cpdma_desc_free(struct cpdma_desc_pool *pool,
+ struct cpdma_desc __iomem *desc, int num_desc)
+{
+ unsigned long flags, index;
+
+ index = ((unsigned long)desc - (unsigned long)pool->iomap) /
+ pool->desc_size;
+ spin_lock_irqsave(&pool->lock, flags);
+ bitmap_clear(pool->bitmap, index, num_desc);
+ pool->used_desc--;
+ spin_unlock_irqrestore(&pool->lock, flags);
+}
+
+struct cpdma_ctlr *cpdma_ctlr_create(struct cpdma_params *params)
+{
+ struct cpdma_ctlr *ctlr;
+
+ ctlr = kzalloc(sizeof(*ctlr), GFP_KERNEL);
+ if (!ctlr)
+ return NULL;
+
+ ctlr->state = CPDMA_STATE_IDLE;
+ ctlr->params = *params;
+ ctlr->dev = params->dev;
+ spin_lock_init(&ctlr->lock);
+
+ ctlr->pool = cpdma_desc_pool_create(ctlr->dev,
+ ctlr->params.desc_mem_phys,
+ ctlr->params.desc_mem_size,
+ ctlr->params.desc_align);
+ if (!ctlr->pool) {
+ kfree(ctlr);
+ return NULL;
+ }
+
+ if (WARN_ON(ctlr->num_chan > CPDMA_MAX_CHANNELS))
+ ctlr->num_chan = CPDMA_MAX_CHANNELS;
+ return ctlr;
+}
+
+int cpdma_ctlr_start(struct cpdma_ctlr *ctlr)
+{
+ unsigned long flags;
+ int i;
+
+ spin_lock_irqsave(&ctlr->lock, flags);
+ if (ctlr->state != CPDMA_STATE_IDLE) {
+ spin_unlock_irqrestore(&ctlr->lock, flags);
+ return -EBUSY;
+ }
+
+ if (ctlr->params.has_soft_reset) {
+ unsigned long timeout = jiffies + HZ/10;
+
+ dma_reg_write(ctlr, CPDMA_SOFTRESET, 1);
+ while (time_before(jiffies, timeout)) {
+ if (dma_reg_read(ctlr, CPDMA_SOFTRESET) == 0)
+ break;
+ }
+ WARN_ON(!time_before(jiffies, timeout));
+ }
+
+ for (i = 0; i < ctlr->num_chan; i++) {
+ __raw_writel(0, ctlr->params.txhdp + 4 * i);
+ __raw_writel(0, ctlr->params.rxhdp + 4 * i);
+ __raw_writel(0, ctlr->params.txcp + 4 * i);
+ __raw_writel(0, ctlr->params.rxcp + 4 * i);
+ }
+
+ dma_reg_write(ctlr, CPDMA_RXINTMASKCLEAR, 0xffffffff);
+ dma_reg_write(ctlr, CPDMA_TXINTMASKCLEAR, 0xffffffff);
+
+ dma_reg_write(ctlr, CPDMA_TXCONTROL, 1);
+ dma_reg_write(ctlr, CPDMA_RXCONTROL, 1);
+
+ ctlr->state = CPDMA_STATE_ACTIVE;
+
+ for (i = 0; i < ARRAY_SIZE(ctlr->channels); i++) {
+ if (ctlr->channels[i])
+ cpdma_chan_start(ctlr->channels[i]);
+ }
+ spin_unlock_irqrestore(&ctlr->lock, flags);
+ return 0;
+}
+
+int cpdma_ctlr_stop(struct cpdma_ctlr *ctlr)
+{
+ unsigned long flags;
+ int i;
+
+ spin_lock_irqsave(&ctlr->lock, flags);
+ if (ctlr->state != CPDMA_STATE_ACTIVE) {
+ spin_unlock_irqrestore(&ctlr->lock, flags);
+ return -EINVAL;
+ }
+
+ ctlr->state = CPDMA_STATE_TEARDOWN;
+
+ for (i = 0; i < ARRAY_SIZE(ctlr->channels); i++) {
+ if (ctlr->channels[i])
+ cpdma_chan_stop(ctlr->channels[i]);
+ }
+
+ dma_reg_write(ctlr, CPDMA_RXINTMASKCLEAR, 0xffffffff);
+ dma_reg_write(ctlr, CPDMA_TXINTMASKCLEAR, 0xffffffff);
+
+ dma_reg_write(ctlr, CPDMA_TXCONTROL, 0);
+ dma_reg_write(ctlr, CPDMA_RXCONTROL, 0);
+
+ ctlr->state = CPDMA_STATE_IDLE;
+
+ spin_unlock_irqrestore(&ctlr->lock, flags);
+ return 0;
+}
+
+int cpdma_ctlr_dump(struct cpdma_ctlr *ctlr)
+{
+ struct device *dev = ctlr->dev;
+ unsigned long flags;
+ int i;
+
+ spin_lock_irqsave(&ctlr->lock, flags);
+
+ dev_info(dev, "CPDMA: state: %s", cpdma_state_str[ctlr->state]);
+
+ dev_info(dev, "CPDMA: txidver: %x",
+ dma_reg_read(ctlr, CPDMA_TXIDVER));
+ dev_info(dev, "CPDMA: txcontrol: %x",
+ dma_reg_read(ctlr, CPDMA_TXCONTROL));
+ dev_info(dev, "CPDMA: txteardown: %x",
+ dma_reg_read(ctlr, CPDMA_TXTEARDOWN));
+ dev_info(dev, "CPDMA: rxidver: %x",
+ dma_reg_read(ctlr, CPDMA_RXIDVER));
+ dev_info(dev, "CPDMA: rxcontrol: %x",
+ dma_reg_read(ctlr, CPDMA_RXCONTROL));
+ dev_info(dev, "CPDMA: softreset: %x",
+ dma_reg_read(ctlr, CPDMA_SOFTRESET));
+ dev_info(dev, "CPDMA: rxteardown: %x",
+ dma_reg_read(ctlr, CPDMA_RXTEARDOWN));
+ dev_info(dev, "CPDMA: txintstatraw: %x",
+ dma_reg_read(ctlr, CPDMA_TXINTSTATRAW));
+ dev_info(dev, "CPDMA: txintstatmasked: %x",
+ dma_reg_read(ctlr, CPDMA_TXINTSTATMASKED));
+ dev_info(dev, "CPDMA: txintmaskset: %x",
+ dma_reg_read(ctlr, CPDMA_TXINTMASKSET));
+ dev_info(dev, "CPDMA: txintmaskclear: %x",
+ dma_reg_read(ctlr, CPDMA_TXINTMASKCLEAR));
+ dev_info(dev, "CPDMA: macinvector: %x",
+ dma_reg_read(ctlr, CPDMA_MACINVECTOR));
+ dev_info(dev, "CPDMA: maceoivector: %x",
+ dma_reg_read(ctlr, CPDMA_MACEOIVECTOR));
+ dev_info(dev, "CPDMA: rxintstatraw: %x",
+ dma_reg_read(ctlr, CPDMA_RXINTSTATRAW));
+ dev_info(dev, "CPDMA: rxintstatmasked: %x",
+ dma_reg_read(ctlr, CPDMA_RXINTSTATMASKED));
+ dev_info(dev, "CPDMA: rxintmaskset: %x",
+ dma_reg_read(ctlr, CPDMA_RXINTMASKSET));
+ dev_info(dev, "CPDMA: rxintmaskclear: %x",
+ dma_reg_read(ctlr, CPDMA_RXINTMASKCLEAR));
+ dev_info(dev, "CPDMA: dmaintstatraw: %x",
+ dma_reg_read(ctlr, CPDMA_DMAINTSTATRAW));
+ dev_info(dev, "CPDMA: dmaintstatmasked: %x",
+ dma_reg_read(ctlr, CPDMA_DMAINTSTATMASKED));
+ dev_info(dev, "CPDMA: dmaintmaskset: %x",
+ dma_reg_read(ctlr, CPDMA_DMAINTMASKSET));
+ dev_info(dev, "CPDMA: dmaintmaskclear: %x",
+ dma_reg_read(ctlr, CPDMA_DMAINTMASKCLEAR));
+
+ if (!ctlr->params.has_ext_regs) {
+ dev_info(dev, "CPDMA: dmacontrol: %x",
+ dma_reg_read(ctlr, CPDMA_DMACONTROL));
+ dev_info(dev, "CPDMA: dmastatus: %x",
+ dma_reg_read(ctlr, CPDMA_DMASTATUS));
+ dev_info(dev, "CPDMA: rxbuffofs: %x",
+ dma_reg_read(ctlr, CPDMA_RXBUFFOFS));
+ }
+
+ for (i = 0; i < ARRAY_SIZE(ctlr->channels); i++)
+ if (ctlr->channels[i])
+ cpdma_chan_dump(ctlr->channels[i]);
+
+ spin_unlock_irqrestore(&ctlr->lock, flags);
+ return 0;
+}
+
+int cpdma_ctlr_destroy(struct cpdma_ctlr *ctlr)
+{
+ unsigned long flags;
+ int ret = 0, i;
+
+ if (!ctlr)
+ return -EINVAL;
+
+ spin_lock_irqsave(&ctlr->lock, flags);
+ if (ctlr->state != CPDMA_STATE_IDLE)
+ cpdma_ctlr_stop(ctlr);
+
+ for (i = 0; i < ARRAY_SIZE(ctlr->channels); i++) {
+ if (ctlr->channels[i])
+ cpdma_chan_destroy(ctlr->channels[i]);
+ }
+
+ cpdma_desc_pool_destroy(ctlr->pool);
+ spin_unlock_irqrestore(&ctlr->lock, flags);
+ kfree(ctlr);
+ return ret;
+}
+
+int cpdma_ctlr_int_ctrl(struct cpdma_ctlr *ctlr, bool enable)
+{
+ unsigned long flags;
+ int i, reg;
+
+ spin_lock_irqsave(&ctlr->lock, flags);
+ if (ctlr->state != CPDMA_STATE_ACTIVE) {
+ spin_unlock_irqrestore(&ctlr->lock, flags);
+ return -EINVAL;
+ }
+
+ reg = enable ? CPDMA_DMAINTMASKSET : CPDMA_DMAINTMASKCLEAR;
+ dma_reg_write(ctlr, reg, CPDMA_DMAINT_HOSTERR);
+
+ for (i = 0; i < ARRAY_SIZE(ctlr->channels); i++) {
+ if (ctlr->channels[i])
+ cpdma_chan_int_ctrl(ctlr->channels[i], enable);
+ }
+
+ spin_unlock_irqrestore(&ctlr->lock, flags);
+ return 0;
+}
+
+void cpdma_ctlr_eoi(struct cpdma_ctlr *ctlr)
+{
+ dma_reg_write(ctlr, CPDMA_MACEOIVECTOR, 0);
+}
+
+struct cpdma_chan *cpdma_chan_create(struct cpdma_ctlr *ctlr, int chan_num,
+ cpdma_handler_fn handler)
+{
+ struct cpdma_chan *chan;
+ int ret, offset = (chan_num % CPDMA_MAX_CHANNELS) * 4;
+ unsigned long flags;
+
+ if (__chan_linear(chan_num) >= ctlr->num_chan)
+ return NULL;
+
+ ret = -ENOMEM;
+ chan = kzalloc(sizeof(*chan), GFP_KERNEL);
+ if (!chan)
+ goto err_chan_alloc;
+
+ spin_lock_irqsave(&ctlr->lock, flags);
+ ret = -EBUSY;
+ if (ctlr->channels[chan_num])
+ goto err_chan_busy;
+
+ chan->ctlr = ctlr;
+ chan->state = CPDMA_STATE_IDLE;
+ chan->chan_num = chan_num;
+ chan->handler = handler;
+
+ if (is_rx_chan(chan)) {
+ chan->hdp = ctlr->params.rxhdp + offset;
+ chan->cp = ctlr->params.rxcp + offset;
+ chan->rxfree = ctlr->params.rxfree + offset;
+ chan->int_set = CPDMA_RXINTMASKSET;
+ chan->int_clear = CPDMA_RXINTMASKCLEAR;
+ chan->td = CPDMA_RXTEARDOWN;
+ chan->dir = DMA_FROM_DEVICE;
+ } else {
+ chan->hdp = ctlr->params.txhdp + offset;
+ chan->cp = ctlr->params.txcp + offset;
+ chan->int_set = CPDMA_TXINTMASKSET;
+ chan->int_clear = CPDMA_TXINTMASKCLEAR;
+ chan->td = CPDMA_TXTEARDOWN;
+ chan->dir = DMA_TO_DEVICE;
+ }
+ chan->mask = BIT(chan_linear(chan));
+
+ spin_lock_init(&chan->lock);
+
+ ctlr->channels[chan_num] = chan;
+ spin_unlock_irqrestore(&ctlr->lock, flags);
+ return chan;
+
+err_chan_busy:
+ spin_unlock_irqrestore(&ctlr->lock, flags);
+ kfree(chan);
+err_chan_alloc:
+ return ERR_PTR(ret);
+}
+
+int cpdma_chan_destroy(struct cpdma_chan *chan)
+{
+ struct cpdma_ctlr *ctlr = chan->ctlr;
+ unsigned long flags;
+
+ if (!chan)
+ return -EINVAL;
+
+ spin_lock_irqsave(&ctlr->lock, flags);
+ if (chan->state != CPDMA_STATE_IDLE)
+ cpdma_chan_stop(chan);
+ ctlr->channels[chan->chan_num] = NULL;
+ spin_unlock_irqrestore(&ctlr->lock, flags);
+ kfree(chan);
+ return 0;
+}
+
+int cpdma_chan_get_stats(struct cpdma_chan *chan,
+ struct cpdma_chan_stats *stats)
+{
+ unsigned long flags;
+ if (!chan)
+ return -EINVAL;
+ spin_lock_irqsave(&chan->lock, flags);
+ memcpy(stats, &chan->stats, sizeof(*stats));
+ spin_unlock_irqrestore(&chan->lock, flags);
+ return 0;
+}
+
+int cpdma_chan_dump(struct cpdma_chan *chan)
+{
+ unsigned long flags;
+ struct device *dev = chan->ctlr->dev;
+
+ spin_lock_irqsave(&chan->lock, flags);
+
+ dev_info(dev, "channel %d (%s %d) state %s",
+ chan->chan_num, is_rx_chan(chan) ? "rx" : "tx",
+ chan_linear(chan), cpdma_state_str[chan->state]);
+ dev_info(dev, "\thdp: %x\n", chan_read(chan, hdp));
+ dev_info(dev, "\tcp: %x\n", chan_read(chan, cp));
+ if (chan->rxfree) {
+ dev_info(dev, "\trxfree: %x\n",
+ chan_read(chan, rxfree));
+ }
+
+ dev_info(dev, "\tstats head_enqueue: %d\n",
+ chan->stats.head_enqueue);
+ dev_info(dev, "\tstats tail_enqueue: %d\n",
+ chan->stats.tail_enqueue);
+ dev_info(dev, "\tstats pad_enqueue: %d\n",
+ chan->stats.pad_enqueue);
+ dev_info(dev, "\tstats misqueued: %d\n",
+ chan->stats.misqueued);
+ dev_info(dev, "\tstats desc_alloc_fail: %d\n",
+ chan->stats.desc_alloc_fail);
+ dev_info(dev, "\tstats pad_alloc_fail: %d\n",
+ chan->stats.pad_alloc_fail);
+ dev_info(dev, "\tstats runt_receive_buff: %d\n",
+ chan->stats.runt_receive_buff);
+ dev_info(dev, "\tstats runt_transmit_buff: %d\n",
+ chan->stats.runt_transmit_buff);
+ dev_info(dev, "\tstats empty_dequeue: %d\n",
+ chan->stats.empty_dequeue);
+ dev_info(dev, "\tstats busy_dequeue: %d\n",
+ chan->stats.busy_dequeue);
+ dev_info(dev, "\tstats good_dequeue: %d\n",
+ chan->stats.good_dequeue);
+ dev_info(dev, "\tstats requeue: %d\n",
+ chan->stats.requeue);
+ dev_info(dev, "\tstats teardown_dequeue: %d\n",
+ chan->stats.teardown_dequeue);
+
+ spin_unlock_irqrestore(&chan->lock, flags);
+ return 0;
+}
+
+static void __cpdma_chan_submit(struct cpdma_chan *chan,
+ struct cpdma_desc __iomem *desc)
+{
+ struct cpdma_ctlr *ctlr = chan->ctlr;
+ struct cpdma_desc __iomem *prev = chan->tail;
+ struct cpdma_desc_pool *pool = ctlr->pool;
+ dma_addr_t desc_dma;
+ u32 mode;
+
+ desc_dma = desc_phys(pool, desc);
+
+ /* simple case - idle channel */
+ if (!chan->head) {
+ chan->stats.head_enqueue++;
+ chan->head = desc;
+ chan->tail = desc;
+ if (chan->state == CPDMA_STATE_ACTIVE)
+ chan_write(chan, hdp, desc_dma);
+ return;
+ }
+
+ /* first chain the descriptor at the tail of the list */
+ desc_write(prev, hw_next, desc_dma);
+ chan->tail = desc;
+ chan->stats.tail_enqueue++;
+
+ /* next check if EOQ has been triggered already */
+ mode = desc_read(prev, hw_mode);
+ if (((mode & (CPDMA_DESC_EOQ | CPDMA_DESC_OWNER)) == CPDMA_DESC_EOQ) &&
+ (chan->state == CPDMA_STATE_ACTIVE)) {
+ desc_write(prev, hw_mode, mode & ~CPDMA_DESC_EOQ);
+ chan_write(chan, hdp, desc_dma);
+ chan->stats.misqueued++;
+ }
+}
+
+int cpdma_chan_submit(struct cpdma_chan *chan, void *token, void *data,
+ int len, gfp_t gfp_mask)
+{
+ struct cpdma_ctlr *ctlr = chan->ctlr;
+ struct cpdma_desc __iomem *desc;
+ dma_addr_t buffer;
+ unsigned long flags;
+ u32 mode;
+ int ret = 0;
+
+ spin_lock_irqsave(&chan->lock, flags);
+
+ if (chan->state == CPDMA_STATE_TEARDOWN) {
+ ret = -EINVAL;
+ goto unlock_ret;
+ }
+
+ desc = cpdma_desc_alloc(ctlr->pool, 1);
+ if (!desc) {
+ chan->stats.desc_alloc_fail++;
+ ret = -ENOMEM;
+ goto unlock_ret;
+ }
+
+ if (len < ctlr->params.min_packet_size) {
+ len = ctlr->params.min_packet_size;
+ chan->stats.runt_transmit_buff++;
+ }
+
+ buffer = dma_map_single(ctlr->dev, data, len, chan->dir);
+ mode = CPDMA_DESC_OWNER | CPDMA_DESC_SOP | CPDMA_DESC_EOP;
+
+ desc_write(desc, hw_next, 0);
+ desc_write(desc, hw_buffer, buffer);
+ desc_write(desc, hw_len, len);
+ desc_write(desc, hw_mode, mode | len);
+ desc_write(desc, sw_token, token);
+ desc_write(desc, sw_buffer, buffer);
+ desc_write(desc, sw_len, len);
+
+ __cpdma_chan_submit(chan, desc);
+
+ if (chan->state == CPDMA_STATE_ACTIVE && chan->rxfree)
+ chan_write(chan, rxfree, 1);
+
+ chan->count++;
+
+unlock_ret:
+ spin_unlock_irqrestore(&chan->lock, flags);
+ return ret;
+}
+
+static void __cpdma_chan_free(struct cpdma_chan *chan,
+ struct cpdma_desc __iomem *desc,
+ int outlen, int status)
+{
+ struct cpdma_ctlr *ctlr = chan->ctlr;
+ struct cpdma_desc_pool *pool = ctlr->pool;
+ dma_addr_t buff_dma;
+ int origlen;
+ void *token;
+
+ token = (void *)desc_read(desc, sw_token);
+ buff_dma = desc_read(desc, sw_buffer);
+ origlen = desc_read(desc, sw_len);
+
+ dma_unmap_single(ctlr->dev, buff_dma, origlen, chan->dir);
+ cpdma_desc_free(pool, desc, 1);
+ (*chan->handler)(token, outlen, status);
+}
+
+static int __cpdma_chan_process(struct cpdma_chan *chan)
+{
+ struct cpdma_ctlr *ctlr = chan->ctlr;
+ struct cpdma_desc __iomem *desc;
+ int status, outlen;
+ struct cpdma_desc_pool *pool = ctlr->pool;
+ dma_addr_t desc_dma;
+ unsigned long flags;
+
+ spin_lock_irqsave(&chan->lock, flags);
+
+ desc = chan->head;
+ if (!desc) {
+ chan->stats.empty_dequeue++;
+ status = -ENOENT;
+ goto unlock_ret;
+ }
+ desc_dma = desc_phys(pool, desc);
+
+ status = __raw_readl(&desc->hw_mode);
+ outlen = status & 0x7ff;
+ if (status & CPDMA_DESC_OWNER) {
+ chan->stats.busy_dequeue++;
+ status = -EBUSY;
+ goto unlock_ret;
+ }
+ status = status & (CPDMA_DESC_EOQ | CPDMA_DESC_TD_COMPLETE);
+
+ chan->head = desc_from_phys(pool, desc_read(desc, hw_next));
+ chan_write(chan, cp, desc_dma);
+ chan->count--;
+ chan->stats.good_dequeue++;
+
+ if (status & CPDMA_DESC_EOQ) {
+ chan->stats.requeue++;
+ chan_write(chan, hdp, desc_phys(pool, chan->head));
+ }
+
+ spin_unlock_irqrestore(&chan->lock, flags);
+
+ __cpdma_chan_free(chan, desc, outlen, status);
+ return status;
+
+unlock_ret:
+ spin_unlock_irqrestore(&chan->lock, flags);
+ return status;
+}
+
+int cpdma_chan_process(struct cpdma_chan *chan, int quota)
+{
+ int used = 0, ret = 0;
+
+ if (chan->state != CPDMA_STATE_ACTIVE)
+ return -EINVAL;
+
+ while (used < quota) {
+ ret = __cpdma_chan_process(chan);
+ if (ret < 0)
+ break;
+ used++;
+ }
+ return used;
+}
+
+int cpdma_chan_start(struct cpdma_chan *chan)
+{
+ struct cpdma_ctlr *ctlr = chan->ctlr;
+ struct cpdma_desc_pool *pool = ctlr->pool;
+ unsigned long flags;
+
+ spin_lock_irqsave(&chan->lock, flags);
+ if (chan->state != CPDMA_STATE_IDLE) {
+ spin_unlock_irqrestore(&chan->lock, flags);
+ return -EBUSY;
+ }
+ if (ctlr->state != CPDMA_STATE_ACTIVE) {
+ spin_unlock_irqrestore(&chan->lock, flags);
+ return -EINVAL;
+ }
+ dma_reg_write(ctlr, chan->int_set, chan->mask);
+ chan->state = CPDMA_STATE_ACTIVE;
+ if (chan->head) {
+ chan_write(chan, hdp, desc_phys(pool, chan->head));
+ if (chan->rxfree)
+ chan_write(chan, rxfree, chan->count);
+ }
+
+ spin_unlock_irqrestore(&chan->lock, flags);
+ return 0;
+}
+
+int cpdma_chan_stop(struct cpdma_chan *chan)
+{
+ struct cpdma_ctlr *ctlr = chan->ctlr;
+ struct cpdma_desc_pool *pool = ctlr->pool;
+ unsigned long flags;
+ int ret;
+ unsigned long timeout;
+
+ spin_lock_irqsave(&chan->lock, flags);
+ if (chan->state != CPDMA_STATE_ACTIVE) {
+ spin_unlock_irqrestore(&chan->lock, flags);
+ return -EINVAL;
+ }
+
+ chan->state = CPDMA_STATE_TEARDOWN;
+ dma_reg_write(ctlr, chan->int_clear, chan->mask);
+
+ /* trigger teardown */
+ dma_reg_write(ctlr, chan->td, chan->chan_num);
+
+ /* wait for teardown complete */
+ timeout = jiffies + HZ/10; /* 100 msec */
+ while (time_before(jiffies, timeout)) {
+ u32 cp = chan_read(chan, cp);
+ if ((cp & CPDMA_TEARDOWN_VALUE) == CPDMA_TEARDOWN_VALUE)
+ break;
+ cpu_relax();
+ }
+ WARN_ON(!time_before(jiffies, timeout));
+ chan_write(chan, cp, CPDMA_TEARDOWN_VALUE);
+
+ /* handle completed packets */
+ do {
+ ret = __cpdma_chan_process(chan);
+ if (ret < 0)
+ break;
+ } while ((ret & CPDMA_DESC_TD_COMPLETE) == 0);
+
+ /* remaining packets haven't been tx/rx'ed, clean them up */
+ while (chan->head) {
+ struct cpdma_desc __iomem *desc = chan->head;
+ dma_addr_t next_dma;
+
+ next_dma = desc_read(desc, hw_next);
+ chan->head = desc_from_phys(pool, next_dma);
+ chan->stats.teardown_dequeue++;
+
+ /* issue callback without locks held */
+ spin_unlock_irqrestore(&chan->lock, flags);
+ __cpdma_chan_free(chan, desc, 0, -ENOSYS);
+ spin_lock_irqsave(&chan->lock, flags);
+ }
+
+ chan->state = CPDMA_STATE_IDLE;
+ spin_unlock_irqrestore(&chan->lock, flags);
+ return 0;
+}
+
+int cpdma_chan_int_ctrl(struct cpdma_chan *chan, bool enable)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&chan->lock, flags);
+ if (chan->state != CPDMA_STATE_ACTIVE) {
+ spin_unlock_irqrestore(&chan->lock, flags);
+ return -EINVAL;
+ }
+
+ dma_reg_write(chan->ctlr, enable ? chan->int_set : chan->int_clear,
+ chan->mask);
+ spin_unlock_irqrestore(&chan->lock, flags);
+
+ return 0;
+}
+
+struct cpdma_control_info {
+ u32 reg;
+ u32 shift, mask;
+ int access;
+#define ACCESS_RO BIT(0)
+#define ACCESS_WO BIT(1)
+#define ACCESS_RW (ACCESS_RO | ACCESS_WO)
+};
+
+struct cpdma_control_info controls[] = {
+ [CPDMA_CMD_IDLE] = {CPDMA_DMACONTROL, 3, 1, ACCESS_WO},
+ [CPDMA_COPY_ERROR_FRAMES] = {CPDMA_DMACONTROL, 4, 1, ACCESS_RW},
+ [CPDMA_RX_OFF_LEN_UPDATE] = {CPDMA_DMACONTROL, 2, 1, ACCESS_RW},
+ [CPDMA_RX_OWNERSHIP_FLIP] = {CPDMA_DMACONTROL, 1, 1, ACCESS_RW},
+ [CPDMA_TX_PRIO_FIXED] = {CPDMA_DMACONTROL, 0, 1, ACCESS_RW},
+ [CPDMA_STAT_IDLE] = {CPDMA_DMASTATUS, 31, 1, ACCESS_RO},
+ [CPDMA_STAT_TX_ERR_CODE] = {CPDMA_DMASTATUS, 20, 0xf, ACCESS_RW},
+ [CPDMA_STAT_TX_ERR_CHAN] = {CPDMA_DMASTATUS, 16, 0x7, ACCESS_RW},
+ [CPDMA_STAT_RX_ERR_CODE] = {CPDMA_DMASTATUS, 12, 0xf, ACCESS_RW},
+ [CPDMA_STAT_RX_ERR_CHAN] = {CPDMA_DMASTATUS, 8, 0x7, ACCESS_RW},
+ [CPDMA_RX_BUFFER_OFFSET] = {CPDMA_RXBUFFOFS, 0, 0xffff, ACCESS_RW},
+};
+
+int cpdma_control_get(struct cpdma_ctlr *ctlr, int control)
+{
+ unsigned long flags;
+ struct cpdma_control_info *info = &controls[control];
+ int ret;
+
+ spin_lock_irqsave(&ctlr->lock, flags);
+
+ ret = -ENOTSUPP;
+ if (!ctlr->params.has_ext_regs)
+ goto unlock_ret;
+
+ ret = -EINVAL;
+ if (ctlr->state != CPDMA_STATE_ACTIVE)
+ goto unlock_ret;
+
+ ret = -ENOENT;
+ if (control < 0 || control >= ARRAY_SIZE(controls))
+ goto unlock_ret;
+
+ ret = -EPERM;
+ if ((info->access & ACCESS_RO) != ACCESS_RO)
+ goto unlock_ret;
+
+ ret = (dma_reg_read(ctlr, info->reg) >> info->shift) & info->mask;
+
+unlock_ret:
+ spin_unlock_irqrestore(&ctlr->lock, flags);
+ return ret;
+}
+
+int cpdma_control_set(struct cpdma_ctlr *ctlr, int control, int value)
+{
+ unsigned long flags;
+ struct cpdma_control_info *info = &controls[control];
+ int ret;
+ u32 val;
+
+ spin_lock_irqsave(&ctlr->lock, flags);
+
+ ret = -ENOTSUPP;
+ if (!ctlr->params.has_ext_regs)
+ goto unlock_ret;
+
+ ret = -EINVAL;
+ if (ctlr->state != CPDMA_STATE_ACTIVE)
+ goto unlock_ret;
+
+ ret = -ENOENT;
+ if (control < 0 || control >= ARRAY_SIZE(controls))
+ goto unlock_ret;
+
+ ret = -EPERM;
+ if ((info->access & ACCESS_WO) != ACCESS_WO)
+ goto unlock_ret;
+
+ val = dma_reg_read(ctlr, info->reg);
+ val &= ~(info->mask << info->shift);
+ val |= (value & info->mask) << info->shift;
+ dma_reg_write(ctlr, info->reg, val);
+ ret = 0;
+
+unlock_ret:
+ spin_unlock_irqrestore(&ctlr->lock, flags);
+ return ret;
+}
diff --git a/drivers/net/davinci_cpdma.h b/drivers/net/davinci_cpdma.h
new file mode 100644
index 000000000000..868e50ebde45
--- /dev/null
+++ b/drivers/net/davinci_cpdma.h
@@ -0,0 +1,108 @@
+/*
+ * Texas Instruments CPDMA Driver
+ *
+ * Copyright (C) 2010 Texas Instruments
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation version 2.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#ifndef __DAVINCI_CPDMA_H__
+#define __DAVINCI_CPDMA_H__
+
+#define CPDMA_MAX_CHANNELS BITS_PER_LONG
+
+#define tx_chan_num(chan) (chan)
+#define rx_chan_num(chan) ((chan) + CPDMA_MAX_CHANNELS)
+#define is_rx_chan(chan) ((chan)->chan_num >= CPDMA_MAX_CHANNELS)
+#define is_tx_chan(chan) (!is_rx_chan(chan))
+#define __chan_linear(chan_num) ((chan_num) & (CPDMA_MAX_CHANNELS - 1))
+#define chan_linear(chan) __chan_linear((chan)->chan_num)
+
+struct cpdma_params {
+ struct device *dev;
+ void __iomem *dmaregs;
+ void __iomem *txhdp, *rxhdp, *txcp, *rxcp;
+ void __iomem *rxthresh, *rxfree;
+ int num_chan;
+ bool has_soft_reset;
+ int min_packet_size;
+ u32 desc_mem_phys;
+ int desc_mem_size;
+ int desc_align;
+
+ /*
+ * Some instances of embedded cpdma controllers have extra control and
+ * status registers. The following flag enables access to these
+ * "extended" registers.
+ */
+ bool has_ext_regs;
+};
+
+struct cpdma_chan_stats {
+ u32 head_enqueue;
+ u32 tail_enqueue;
+ u32 pad_enqueue;
+ u32 misqueued;
+ u32 desc_alloc_fail;
+ u32 pad_alloc_fail;
+ u32 runt_receive_buff;
+ u32 runt_transmit_buff;
+ u32 empty_dequeue;
+ u32 busy_dequeue;
+ u32 good_dequeue;
+ u32 requeue;
+ u32 teardown_dequeue;
+};
+
+struct cpdma_ctlr;
+struct cpdma_chan;
+
+typedef void (*cpdma_handler_fn)(void *token, int len, int status);
+
+struct cpdma_ctlr *cpdma_ctlr_create(struct cpdma_params *params);
+int cpdma_ctlr_destroy(struct cpdma_ctlr *ctlr);
+int cpdma_ctlr_start(struct cpdma_ctlr *ctlr);
+int cpdma_ctlr_stop(struct cpdma_ctlr *ctlr);
+int cpdma_ctlr_dump(struct cpdma_ctlr *ctlr);
+
+struct cpdma_chan *cpdma_chan_create(struct cpdma_ctlr *ctlr, int chan_num,
+ cpdma_handler_fn handler);
+int cpdma_chan_destroy(struct cpdma_chan *chan);
+int cpdma_chan_start(struct cpdma_chan *chan);
+int cpdma_chan_stop(struct cpdma_chan *chan);
+int cpdma_chan_dump(struct cpdma_chan *chan);
+
+int cpdma_chan_get_stats(struct cpdma_chan *chan,
+ struct cpdma_chan_stats *stats);
+int cpdma_chan_submit(struct cpdma_chan *chan, void *token, void *data,
+ int len, gfp_t gfp_mask);
+int cpdma_chan_process(struct cpdma_chan *chan, int quota);
+
+int cpdma_ctlr_int_ctrl(struct cpdma_ctlr *ctlr, bool enable);
+void cpdma_ctlr_eoi(struct cpdma_ctlr *ctlr);
+int cpdma_chan_int_ctrl(struct cpdma_chan *chan, bool enable);
+
+enum cpdma_control {
+ CPDMA_CMD_IDLE, /* write-only */
+ CPDMA_COPY_ERROR_FRAMES, /* read-write */
+ CPDMA_RX_OFF_LEN_UPDATE, /* read-write */
+ CPDMA_RX_OWNERSHIP_FLIP, /* read-write */
+ CPDMA_TX_PRIO_FIXED, /* read-write */
+ CPDMA_STAT_IDLE, /* read-only */
+ CPDMA_STAT_TX_ERR_CHAN, /* read-only */
+ CPDMA_STAT_TX_ERR_CODE, /* read-only */
+ CPDMA_STAT_RX_ERR_CHAN, /* read-only */
+ CPDMA_STAT_RX_ERR_CODE, /* read-only */
+ CPDMA_RX_BUFFER_OFFSET, /* read-write */
+};
+
+int cpdma_control_get(struct cpdma_ctlr *ctlr, int control);
+int cpdma_control_set(struct cpdma_ctlr *ctlr, int control, int value);
+
+#endif
diff --git a/drivers/net/davinci_emac.c b/drivers/net/davinci_emac.c
index 7fbd052ddb0a..2a628d17d178 100644
--- a/drivers/net/davinci_emac.c
+++ b/drivers/net/davinci_emac.c
@@ -63,6 +63,8 @@
#include <asm/irq.h>
#include <asm/page.h>
+#include "davinci_cpdma.h"
+
static int debug_level;
module_param(debug_level, int, 0);
MODULE_PARM_DESC(debug_level, "DaVinci EMAC debug level (NETIF_MSG bits)");
@@ -113,7 +115,7 @@ static const char emac_version_string[] = "TI DaVinci EMAC Linux v6.1";
#define EMAC_DEF_MAX_FRAME_SIZE (1500 + 14 + 4 + 4)
#define EMAC_DEF_TX_CH (0) /* Default 0th channel */
#define EMAC_DEF_RX_CH (0) /* Default 0th channel */
-#define EMAC_DEF_MDIO_TICK_MS (10) /* typically 1 tick=1 ms) */
+#define EMAC_DEF_RX_NUM_DESC (128)
#define EMAC_DEF_MAX_TX_CH (1) /* Max TX channels configured */
#define EMAC_DEF_MAX_RX_CH (1) /* Max RX channels configured */
#define EMAC_POLL_WEIGHT (64) /* Default NAPI poll weight */
@@ -125,7 +127,6 @@ static const char emac_version_string[] = "TI DaVinci EMAC Linux v6.1";
/* EMAC register related defines */
#define EMAC_ALL_MULTI_REG_VALUE (0xFFFFFFFF)
#define EMAC_NUM_MULTICAST_BITS (64)
-#define EMAC_TEARDOWN_VALUE (0xFFFFFFFC)
#define EMAC_TX_CONTROL_TX_ENABLE_VAL (0x1)
#define EMAC_RX_CONTROL_RX_ENABLE_VAL (0x1)
#define EMAC_MAC_HOST_ERR_INTMASK_VAL (0x2)
@@ -212,24 +213,10 @@ static const char emac_version_string[] = "TI DaVinci EMAC Linux v6.1";
#define EMAC_DEF_MAX_MULTICAST_ADDRESSES (64) /* Max mcast addr's */
/* EMAC Peripheral Device Register Memory Layout structure */
-#define EMAC_TXIDVER 0x0
-#define EMAC_TXCONTROL 0x4
-#define EMAC_TXTEARDOWN 0x8
-#define EMAC_RXIDVER 0x10
-#define EMAC_RXCONTROL 0x14
-#define EMAC_RXTEARDOWN 0x18
-#define EMAC_TXINTSTATRAW 0x80
-#define EMAC_TXINTSTATMASKED 0x84
-#define EMAC_TXINTMASKSET 0x88
-#define EMAC_TXINTMASKCLEAR 0x8C
#define EMAC_MACINVECTOR 0x90
#define EMAC_DM646X_MACEOIVECTOR 0x94
-#define EMAC_RXINTSTATRAW 0xA0
-#define EMAC_RXINTSTATMASKED 0xA4
-#define EMAC_RXINTMASKSET 0xA8
-#define EMAC_RXINTMASKCLEAR 0xAC
#define EMAC_MACINTSTATRAW 0xB0
#define EMAC_MACINTSTATMASKED 0xB4
#define EMAC_MACINTMASKSET 0xB8
@@ -256,12 +243,6 @@ static const char emac_version_string[] = "TI DaVinci EMAC Linux v6.1";
#define EMAC_MACADDRHI 0x504
#define EMAC_MACINDEX 0x508
-/* EMAC HDP and Completion registors */
-#define EMAC_TXHDP(ch) (0x600 + (ch * 4))
-#define EMAC_RXHDP(ch) (0x620 + (ch * 4))
-#define EMAC_TXCP(ch) (0x640 + (ch * 4))
-#define EMAC_RXCP(ch) (0x660 + (ch * 4))
-
/* EMAC statistics registers */
#define EMAC_RXGOODFRAMES 0x200
#define EMAC_RXBCASTFRAMES 0x204
@@ -303,25 +284,6 @@ static const char emac_version_string[] = "TI DaVinci EMAC Linux v6.1";
#define EMAC_DM644X_INTMIN_INTVL 0x1
#define EMAC_DM644X_INTMAX_INTVL (EMAC_DM644X_EWINTCNT_MASK)
-/* EMAC MDIO related */
-/* Mask & Control defines */
-#define MDIO_CONTROL_CLKDIV (0xFF)
-#define MDIO_CONTROL_ENABLE BIT(30)
-#define MDIO_USERACCESS_GO BIT(31)
-#define MDIO_USERACCESS_WRITE BIT(30)
-#define MDIO_USERACCESS_READ (0)
-#define MDIO_USERACCESS_REGADR (0x1F << 21)
-#define MDIO_USERACCESS_PHYADR (0x1F << 16)
-#define MDIO_USERACCESS_DATA (0xFFFF)
-#define MDIO_USERPHYSEL_LINKSEL BIT(7)
-#define MDIO_VER_MODID (0xFFFF << 16)
-#define MDIO_VER_REVMAJ (0xFF << 8)
-#define MDIO_VER_REVMIN (0xFF)
-
-#define MDIO_USERACCESS(inst) (0x80 + (inst * 8))
-#define MDIO_USERPHYSEL(inst) (0x84 + (inst * 8))
-#define MDIO_CONTROL (0x04)
-
/* EMAC DM646X control module registers */
#define EMAC_DM646X_CMINTCTRL 0x0C
#define EMAC_DM646X_CMRXINTEN 0x14
@@ -345,120 +307,6 @@ static const char emac_version_string[] = "TI DaVinci EMAC Linux v6.1";
/* EMAC Stats Clear Mask */
#define EMAC_STATS_CLR_MASK (0xFFFFFFFF)
-/** net_buf_obj: EMAC network bufferdata structure
- *
- * EMAC network buffer data structure
- */
-struct emac_netbufobj {
- void *buf_token;
- char *data_ptr;
- int length;
-};
-
-/** net_pkt_obj: EMAC network packet data structure
- *
- * EMAC network packet data structure - supports buffer list (for future)
- */
-struct emac_netpktobj {
- void *pkt_token; /* data token may hold tx/rx chan id */
- struct emac_netbufobj *buf_list; /* array of network buffer objects */
- int num_bufs;
- int pkt_length;
-};
-
-/** emac_tx_bd: EMAC TX Buffer descriptor data structure
- *
- * EMAC TX Buffer descriptor data structure
- */
-struct emac_tx_bd {
- int h_next;
- int buff_ptr;
- int off_b_len;
- int mode; /* SOP, EOP, ownership, EOQ, teardown,Qstarv, length */
- struct emac_tx_bd __iomem *next;
- void *buf_token;
-};
-
-/** emac_txch: EMAC TX Channel data structure
- *
- * EMAC TX Channel data structure
- */
-struct emac_txch {
- /* Config related */
- u32 num_bd;
- u32 service_max;
-
- /* CPPI specific */
- u32 alloc_size;
- void __iomem *bd_mem;
- struct emac_tx_bd __iomem *bd_pool_head;
- struct emac_tx_bd __iomem *active_queue_head;
- struct emac_tx_bd __iomem *active_queue_tail;
- struct emac_tx_bd __iomem *last_hw_bdprocessed;
- u32 queue_active;
- u32 teardown_pending;
- u32 *tx_complete;
-
- /** statistics */
- u32 proc_count; /* TX: # of times emac_tx_bdproc is called */
- u32 mis_queued_packets;
- u32 queue_reinit;
- u32 end_of_queue_add;
- u32 out_of_tx_bd;
- u32 no_active_pkts; /* IRQ when there were no packets to process */
- u32 active_queue_count;
-};
-
-/** emac_rx_bd: EMAC RX Buffer descriptor data structure
- *
- * EMAC RX Buffer descriptor data structure
- */
-struct emac_rx_bd {
- int h_next;
- int buff_ptr;
- int off_b_len;
- int mode;
- struct emac_rx_bd __iomem *next;
- void *data_ptr;
- void *buf_token;
-};
-
-/** emac_rxch: EMAC RX Channel data structure
- *
- * EMAC RX Channel data structure
- */
-struct emac_rxch {
- /* configuration info */
- u32 num_bd;
- u32 service_max;
- u32 buf_size;
- char mac_addr[6];
-
- /** CPPI specific */
- u32 alloc_size;
- void __iomem *bd_mem;
- struct emac_rx_bd __iomem *bd_pool_head;
- struct emac_rx_bd __iomem *active_queue_head;
- struct emac_rx_bd __iomem *active_queue_tail;
- u32 queue_active;
- u32 teardown_pending;
-
- /* packet and buffer objects */
- struct emac_netpktobj pkt_queue;
- struct emac_netbufobj buf_queue;
-
- /** statistics */
- u32 proc_count; /* number of times emac_rx_bdproc is called */
- u32 processed_bd;
- u32 recycled_bd;
- u32 out_of_rx_bd;
- u32 out_of_rx_buffers;
- u32 queue_reinit;
- u32 end_of_queue_add;
- u32 end_of_queue;
- u32 mis_queued_packets;
-};
-
/* emac_priv: EMAC private data structure
*
* EMAC adapter private data structure
@@ -469,17 +317,13 @@ struct emac_priv {
struct platform_device *pdev;
struct napi_struct napi;
char mac_addr[6];
- spinlock_t tx_lock;
- spinlock_t rx_lock;
void __iomem *remap_addr;
u32 emac_base_phys;
void __iomem *emac_base;
void __iomem *ctrl_base;
- void __iomem *emac_ctrl_ram;
- u32 ctrl_ram_size;
- u32 hw_ram_addr;
- struct emac_txch *txch[EMAC_DEF_MAX_TX_CH];
- struct emac_rxch *rxch[EMAC_DEF_MAX_RX_CH];
+ struct cpdma_ctlr *dma;
+ struct cpdma_chan *txchan;
+ struct cpdma_chan *rxchan;
u32 link; /* 1=link on, 0=link off */
u32 speed; /* 0=Auto Neg, 1=No PHY, 10,100, 1000 - mbps */
u32 duplex; /* Link duplex: 0=Half, 1=Full */
@@ -493,13 +337,7 @@ struct emac_priv {
u32 mac_hash2;
u32 multicast_hash_cnt[EMAC_NUM_MULTICAST_BITS];
u32 rx_addr_type;
- /* periodic timer required for MDIO polling */
- struct timer_list periodic_timer;
- u32 periodic_ticks;
- u32 timer_active;
- u32 phy_mask;
- /* mii_bus,phy members */
- struct mii_bus *mii_bus;
+ const char *phy_id;
struct phy_device *phydev;
spinlock_t lock;
/*platform specific members*/
@@ -510,19 +348,6 @@ struct emac_priv {
/* clock frequency for EMAC */
static struct clk *emac_clk;
static unsigned long emac_bus_frequency;
-static unsigned long mdio_max_freq;
-
-#define emac_virt_to_phys(addr, priv) \
- (((u32 __force)(addr) - (u32 __force)(priv->emac_ctrl_ram)) \
- + priv->hw_ram_addr)
-
-/* Cache macros - Packet buffers would be from skb pool which is cached */
-#define EMAC_VIRT_NOCACHE(addr) (addr)
-
-/* DM644x does not have BD's in cached memory - so no cache functions */
-#define BD_CACHE_INVALIDATE(addr, size)
-#define BD_CACHE_WRITEBACK(addr, size)
-#define BD_CACHE_WRITEBACK_INVALIDATE(addr, size)
/* EMAC TX Host Error description strings */
static char *emac_txhost_errcodes[16] = {
@@ -548,9 +373,6 @@ static char *emac_rxhost_errcodes[16] = {
#define emac_ctrl_read(reg) ioread32((priv->ctrl_base + (reg)))
#define emac_ctrl_write(reg, val) iowrite32(val, (priv->ctrl_base + (reg)))
-#define emac_mdio_read(reg) ioread32(bus->priv + (reg))
-#define emac_mdio_write(reg, val) iowrite32(val, (bus->priv + (reg)))
-
/**
* emac_dump_regs: Dump important EMAC registers to debug terminal
* @priv: The DaVinci EMAC private adapter structure
@@ -569,20 +391,6 @@ static void emac_dump_regs(struct emac_priv *priv)
emac_ctrl_read(EMAC_CTRL_EWCTL),
emac_ctrl_read(EMAC_CTRL_EWINTTCNT));
}
- dev_info(emac_dev, "EMAC: TXID: %08X %s, RXID: %08X %s\n",
- emac_read(EMAC_TXIDVER),
- ((emac_read(EMAC_TXCONTROL)) ? "enabled" : "disabled"),
- emac_read(EMAC_RXIDVER),
- ((emac_read(EMAC_RXCONTROL)) ? "enabled" : "disabled"));
- dev_info(emac_dev, "EMAC: TXIntRaw:%08X, TxIntMasked: %08X, "\
- "TxIntMasSet: %08X\n", emac_read(EMAC_TXINTSTATRAW),
- emac_read(EMAC_TXINTSTATMASKED), emac_read(EMAC_TXINTMASKSET));
- dev_info(emac_dev, "EMAC: RXIntRaw:%08X, RxIntMasked: %08X, "\
- "RxIntMasSet: %08X\n", emac_read(EMAC_RXINTSTATRAW),
- emac_read(EMAC_RXINTSTATMASKED), emac_read(EMAC_RXINTMASKSET));
- dev_info(emac_dev, "EMAC: MacIntRaw:%08X, MacIntMasked: %08X, "\
- "MacInVector=%08X\n", emac_read(EMAC_MACINTSTATRAW),
- emac_read(EMAC_MACINTSTATMASKED), emac_read(EMAC_MACINVECTOR));
dev_info(emac_dev, "EMAC: EmuControl:%08X, FifoControl: %08X\n",
emac_read(EMAC_EMCONTROL), emac_read(EMAC_FIFOCONTROL));
dev_info(emac_dev, "EMAC: MBPEnable:%08X, RXUnicastSet: %08X, "\
@@ -591,8 +399,6 @@ static void emac_dump_regs(struct emac_priv *priv)
dev_info(emac_dev, "EMAC: MacControl:%08X, MacStatus: %08X, "\
"MacConfig=%08X\n", emac_read(EMAC_MACCONTROL),
emac_read(EMAC_MACSTATUS), emac_read(EMAC_MACCONFIG));
- dev_info(emac_dev, "EMAC: TXHDP[0]:%08X, RXHDP[0]: %08X\n",
- emac_read(EMAC_TXHDP(0)), emac_read(EMAC_RXHDP(0)));
dev_info(emac_dev, "EMAC Statistics\n");
dev_info(emac_dev, "EMAC: rx_good_frames:%d\n",
emac_read(EMAC_RXGOODFRAMES));
@@ -654,11 +460,10 @@ static void emac_dump_regs(struct emac_priv *priv)
emac_read(EMAC_RXMOFOVERRUNS));
dev_info(emac_dev, "EMAC: rx_dma_overruns:%d\n",
emac_read(EMAC_RXDMAOVERRUNS));
+
+ cpdma_ctlr_dump(priv->dma);
}
-/*************************************************************************
- * EMAC MDIO/Phy Functionality
- *************************************************************************/
/**
* emac_get_drvinfo: Get EMAC driver information
* @ndev: The DaVinci EMAC network adapter
@@ -686,7 +491,7 @@ static int emac_get_settings(struct net_device *ndev,
struct ethtool_cmd *ecmd)
{
struct emac_priv *priv = netdev_priv(ndev);
- if (priv->phy_mask)
+ if (priv->phydev)
return phy_ethtool_gset(priv->phydev, ecmd);
else
return -EOPNOTSUPP;
@@ -704,7 +509,7 @@ static int emac_get_settings(struct net_device *ndev,
static int emac_set_settings(struct net_device *ndev, struct ethtool_cmd *ecmd)
{
struct emac_priv *priv = netdev_priv(ndev);
- if (priv->phy_mask)
+ if (priv->phydev)
return phy_ethtool_sset(priv->phydev, ecmd);
else
return -EOPNOTSUPP;
@@ -841,7 +646,7 @@ static void emac_update_phystatus(struct emac_priv *priv)
mac_control = emac_read(EMAC_MACCONTROL);
cur_duplex = (mac_control & EMAC_MACCONTROL_FULLDUPLEXEN) ?
DUPLEX_FULL : DUPLEX_HALF;
- if (priv->phy_mask)
+ if (priv->phydev)
new_duplex = priv->phydev->duplex;
else
new_duplex = DUPLEX_FULL;
@@ -1184,371 +989,68 @@ static irqreturn_t emac_irq(int irq, void *dev_id)
return IRQ_HANDLED;
}
-/** EMAC on-chip buffer descriptor memory
- *
- * WARNING: Please note that the on chip memory is used for both TX and RX
- * buffer descriptor queues and is equally divided between TX and RX desc's
- * If the number of TX or RX descriptors change this memory pointers need
- * to be adjusted. If external memory is allocated then these pointers can
- * pointer to the memory
- *
- */
-#define EMAC_TX_BD_MEM(priv) ((priv)->emac_ctrl_ram)
-#define EMAC_RX_BD_MEM(priv) ((priv)->emac_ctrl_ram + \
- (((priv)->ctrl_ram_size) >> 1))
-
-/**
- * emac_init_txch: TX channel initialization
- * @priv: The DaVinci EMAC private adapter structure
- * @ch: RX channel number
- *
- * Called during device init to setup a TX channel (allocate buffer desc
- * create free pool and keep ready for transmission
- *
- * Returns success(0) or mem alloc failures error code
- */
-static int emac_init_txch(struct emac_priv *priv, u32 ch)
-{
- struct device *emac_dev = &priv->ndev->dev;
- u32 cnt, bd_size;
- void __iomem *mem;
- struct emac_tx_bd __iomem *curr_bd;
- struct emac_txch *txch = NULL;
-
- txch = kzalloc(sizeof(struct emac_txch), GFP_KERNEL);
- if (NULL == txch) {
- dev_err(emac_dev, "DaVinci EMAC: TX Ch mem alloc failed");
- return -ENOMEM;
- }
- priv->txch[ch] = txch;
- txch->service_max = EMAC_DEF_TX_MAX_SERVICE;
- txch->active_queue_head = NULL;
- txch->active_queue_tail = NULL;
- txch->queue_active = 0;
- txch->teardown_pending = 0;
-
- /* allocate memory for TX CPPI channel on a 4 byte boundry */
- txch->tx_complete = kzalloc(txch->service_max * sizeof(u32),
- GFP_KERNEL);
- if (NULL == txch->tx_complete) {
- dev_err(emac_dev, "DaVinci EMAC: Tx service mem alloc failed");
- kfree(txch);
- return -ENOMEM;
- }
-
- /* allocate buffer descriptor pool align every BD on four word
- * boundry for future requirements */
- bd_size = (sizeof(struct emac_tx_bd) + 0xF) & ~0xF;
- txch->num_bd = (priv->ctrl_ram_size >> 1) / bd_size;
- txch->alloc_size = (((bd_size * txch->num_bd) + 0xF) & ~0xF);
-
- /* alloc TX BD memory */
- txch->bd_mem = EMAC_TX_BD_MEM(priv);
- __memzero((void __force *)txch->bd_mem, txch->alloc_size);
-
- /* initialize the BD linked list */
- mem = (void __force __iomem *)
- (((u32 __force) txch->bd_mem + 0xF) & ~0xF);
- txch->bd_pool_head = NULL;
- for (cnt = 0; cnt < txch->num_bd; cnt++) {
- curr_bd = mem + (cnt * bd_size);
- curr_bd->next = txch->bd_pool_head;
- txch->bd_pool_head = curr_bd;
- }
-
- /* reset statistics counters */
- txch->out_of_tx_bd = 0;
- txch->no_active_pkts = 0;
- txch->active_queue_count = 0;
-
- return 0;
-}
-
-/**
- * emac_cleanup_txch: Book-keep function to clean TX channel resources
- * @priv: The DaVinci EMAC private adapter structure
- * @ch: TX channel number
- *
- * Called to clean up TX channel resources
- *
- */
-static void emac_cleanup_txch(struct emac_priv *priv, u32 ch)
+static struct sk_buff *emac_rx_alloc(struct emac_priv *priv)
{
- struct emac_txch *txch = priv->txch[ch];
-
- if (txch) {
- if (txch->bd_mem)
- txch->bd_mem = NULL;
- kfree(txch->tx_complete);
- kfree(txch);
- priv->txch[ch] = NULL;
- }
+ struct sk_buff *skb = dev_alloc_skb(priv->rx_buf_size);
+ if (WARN_ON(!skb))
+ return NULL;
+ skb->dev = priv->ndev;
+ skb_reserve(skb, NET_IP_ALIGN);
+ return skb;
}
-/**
- * emac_net_tx_complete: TX packet completion function
- * @priv: The DaVinci EMAC private adapter structure
- * @net_data_tokens: packet token - skb pointer
- * @num_tokens: number of skb's to free
- * @ch: TX channel number
- *
- * Frees the skb once packet is transmitted
- *
- */
-static int emac_net_tx_complete(struct emac_priv *priv,
- void **net_data_tokens,
- int num_tokens, u32 ch)
+static void emac_rx_handler(void *token, int len, int status)
{
- struct net_device *ndev = priv->ndev;
- u32 cnt;
-
- if (unlikely(num_tokens && netif_queue_stopped(ndev)))
- netif_start_queue(ndev);
- for (cnt = 0; cnt < num_tokens; cnt++) {
- struct sk_buff *skb = (struct sk_buff *)net_data_tokens[cnt];
- if (skb == NULL)
- continue;
- ndev->stats.tx_packets++;
- ndev->stats.tx_bytes += skb->len;
+ struct sk_buff *skb = token;
+ struct net_device *ndev = skb->dev;
+ struct emac_priv *priv = netdev_priv(ndev);
+ struct device *emac_dev = &ndev->dev;
+ int ret;
+
+ /* free and bail if we are shutting down */
+ if (unlikely(!netif_running(ndev))) {
dev_kfree_skb_any(skb);
+ return;
}
- return 0;
-}
-
-/**
- * emac_txch_teardown: TX channel teardown
- * @priv: The DaVinci EMAC private adapter structure
- * @ch: TX channel number
- *
- * Called to teardown TX channel
- *
- */
-static void emac_txch_teardown(struct emac_priv *priv, u32 ch)
-{
- struct device *emac_dev = &priv->ndev->dev;
- u32 teardown_cnt = 0xFFFFFFF0; /* Some high value */
- struct emac_txch *txch = priv->txch[ch];
- struct emac_tx_bd __iomem *curr_bd;
-
- while ((emac_read(EMAC_TXCP(ch)) & EMAC_TEARDOWN_VALUE) !=
- EMAC_TEARDOWN_VALUE) {
- /* wait till tx teardown complete */
- cpu_relax(); /* TODO: check if this helps ... */
- --teardown_cnt;
- if (0 == teardown_cnt) {
- dev_err(emac_dev, "EMAC: TX teardown aborted\n");
- break;
- }
- }
- emac_write(EMAC_TXCP(ch), EMAC_TEARDOWN_VALUE);
-
- /* process sent packets and return skb's to upper layer */
- if (1 == txch->queue_active) {
- curr_bd = txch->active_queue_head;
- while (curr_bd != NULL) {
- dma_unmap_single(emac_dev, curr_bd->buff_ptr,
- curr_bd->off_b_len & EMAC_RX_BD_BUF_SIZE,
- DMA_TO_DEVICE);
-
- emac_net_tx_complete(priv, (void __force *)
- &curr_bd->buf_token, 1, ch);
- if (curr_bd != txch->active_queue_tail)
- curr_bd = curr_bd->next;
- else
- break;
- }
- txch->bd_pool_head = txch->active_queue_head;
- txch->active_queue_head =
- txch->active_queue_tail = NULL;
- }
-}
-/**
- * emac_stop_txch: Stop TX channel operation
- * @priv: The DaVinci EMAC private adapter structure
- * @ch: TX channel number
- *
- * Called to stop TX channel operation
- *
- */
-static void emac_stop_txch(struct emac_priv *priv, u32 ch)
-{
- struct emac_txch *txch = priv->txch[ch];
-
- if (txch) {
- txch->teardown_pending = 1;
- emac_write(EMAC_TXTEARDOWN, 0);
- emac_txch_teardown(priv, ch);
- txch->teardown_pending = 0;
- emac_write(EMAC_TXINTMASKCLEAR, BIT(ch));
+ /* recycle on recieve error */
+ if (status < 0) {
+ ndev->stats.rx_errors++;
+ goto recycle;
}
-}
-/**
- * emac_tx_bdproc: TX buffer descriptor (packet) processing
- * @priv: The DaVinci EMAC private adapter structure
- * @ch: TX channel number to process buffer descriptors for
- * @budget: number of packets allowed to process
- * @pending: indication to caller that packets are pending to process
- *
- * Processes TX buffer descriptors after packets are transmitted - checks
- * ownership bit on the TX * descriptor and requeues it to free pool & frees
- * the SKB buffer. Only "budget" number of packets are processed and
- * indication of pending packets provided to the caller
- *
- * Returns number of packets processed
- */
-static int emac_tx_bdproc(struct emac_priv *priv, u32 ch, u32 budget)
-{
- struct device *emac_dev = &priv->ndev->dev;
- unsigned long flags;
- u32 frame_status;
- u32 pkts_processed = 0;
- u32 tx_complete_cnt = 0;
- struct emac_tx_bd __iomem *curr_bd;
- struct emac_txch *txch = priv->txch[ch];
- u32 *tx_complete_ptr = txch->tx_complete;
-
- if (unlikely(1 == txch->teardown_pending)) {
- if (netif_msg_tx_err(priv) && net_ratelimit()) {
- dev_err(emac_dev, "DaVinci EMAC:emac_tx_bdproc: "\
- "teardown pending\n");
- }
- return 0; /* dont handle any pkt completions */
- }
+ /* feed received packet up the stack */
+ skb_put(skb, len);
+ skb->protocol = eth_type_trans(skb, ndev);
+ netif_receive_skb(skb);
+ ndev->stats.rx_bytes += len;
+ ndev->stats.rx_packets++;
- ++txch->proc_count;
- spin_lock_irqsave(&priv->tx_lock, flags);
- curr_bd = txch->active_queue_head;
- if (NULL == curr_bd) {
- emac_write(EMAC_TXCP(ch),
- emac_virt_to_phys(txch->last_hw_bdprocessed, priv));
- txch->no_active_pkts++;
- spin_unlock_irqrestore(&priv->tx_lock, flags);
- return 0;
+ /* alloc a new packet for receive */
+ skb = emac_rx_alloc(priv);
+ if (!skb) {
+ if (netif_msg_rx_err(priv) && net_ratelimit())
+ dev_err(emac_dev, "failed rx buffer alloc\n");
+ return;
}
- BD_CACHE_INVALIDATE(curr_bd, EMAC_BD_LENGTH_FOR_CACHE);
- frame_status = curr_bd->mode;
- while ((curr_bd) &&
- ((frame_status & EMAC_CPPI_OWNERSHIP_BIT) == 0) &&
- (pkts_processed < budget)) {
- emac_write(EMAC_TXCP(ch), emac_virt_to_phys(curr_bd, priv));
- txch->active_queue_head = curr_bd->next;
- if (frame_status & EMAC_CPPI_EOQ_BIT) {
- if (curr_bd->next) { /* misqueued packet */
- emac_write(EMAC_TXHDP(ch), curr_bd->h_next);
- ++txch->mis_queued_packets;
- } else {
- txch->queue_active = 0; /* end of queue */
- }
- }
- dma_unmap_single(emac_dev, curr_bd->buff_ptr,
- curr_bd->off_b_len & EMAC_RX_BD_BUF_SIZE,
- DMA_TO_DEVICE);
-
- *tx_complete_ptr = (u32) curr_bd->buf_token;
- ++tx_complete_ptr;
- ++tx_complete_cnt;
- curr_bd->next = txch->bd_pool_head;
- txch->bd_pool_head = curr_bd;
- --txch->active_queue_count;
- pkts_processed++;
- txch->last_hw_bdprocessed = curr_bd;
- curr_bd = txch->active_queue_head;
- if (curr_bd) {
- BD_CACHE_INVALIDATE(curr_bd, EMAC_BD_LENGTH_FOR_CACHE);
- frame_status = curr_bd->mode;
- }
- } /* end of pkt processing loop */
-
- emac_net_tx_complete(priv,
- (void *)&txch->tx_complete[0],
- tx_complete_cnt, ch);
- spin_unlock_irqrestore(&priv->tx_lock, flags);
- return pkts_processed;
+recycle:
+ ret = cpdma_chan_submit(priv->rxchan, skb, skb->data,
+ skb_tailroom(skb), GFP_KERNEL);
+ if (WARN_ON(ret < 0))
+ dev_kfree_skb_any(skb);
}
-#define EMAC_ERR_TX_OUT_OF_BD -1
-
-/**
- * emac_send: EMAC Transmit function (internal)
- * @priv: The DaVinci EMAC private adapter structure
- * @pkt: packet pointer (contains skb ptr)
- * @ch: TX channel number
- *
- * Called by the transmit function to queue the packet in EMAC hardware queue
- *
- * Returns success(0) or error code (typically out of desc's)
- */
-static int emac_send(struct emac_priv *priv, struct emac_netpktobj *pkt, u32 ch)
+static void emac_tx_handler(void *token, int len, int status)
{
- unsigned long flags;
- struct emac_tx_bd __iomem *curr_bd;
- struct emac_txch *txch;
- struct emac_netbufobj *buf_list;
-
- txch = priv->txch[ch];
- buf_list = pkt->buf_list; /* get handle to the buffer array */
-
- /* check packet size and pad if short */
- if (pkt->pkt_length < EMAC_DEF_MIN_ETHPKTSIZE) {
- buf_list->length += (EMAC_DEF_MIN_ETHPKTSIZE - pkt->pkt_length);
- pkt->pkt_length = EMAC_DEF_MIN_ETHPKTSIZE;
- }
+ struct sk_buff *skb = token;
+ struct net_device *ndev = skb->dev;
- spin_lock_irqsave(&priv->tx_lock, flags);
- curr_bd = txch->bd_pool_head;
- if (curr_bd == NULL) {
- txch->out_of_tx_bd++;
- spin_unlock_irqrestore(&priv->tx_lock, flags);
- return EMAC_ERR_TX_OUT_OF_BD;
- }
-
- txch->bd_pool_head = curr_bd->next;
- curr_bd->buf_token = buf_list->buf_token;
- curr_bd->buff_ptr = dma_map_single(&priv->ndev->dev, buf_list->data_ptr,
- buf_list->length, DMA_TO_DEVICE);
- curr_bd->off_b_len = buf_list->length;
- curr_bd->h_next = 0;
- curr_bd->next = NULL;
- curr_bd->mode = (EMAC_CPPI_SOP_BIT | EMAC_CPPI_OWNERSHIP_BIT |
- EMAC_CPPI_EOP_BIT | pkt->pkt_length);
-
- /* flush the packet from cache if write back cache is present */
- BD_CACHE_WRITEBACK_INVALIDATE(curr_bd, EMAC_BD_LENGTH_FOR_CACHE);
-
- /* send the packet */
- if (txch->active_queue_head == NULL) {
- txch->active_queue_head = curr_bd;
- txch->active_queue_tail = curr_bd;
- if (1 != txch->queue_active) {
- emac_write(EMAC_TXHDP(ch),
- emac_virt_to_phys(curr_bd, priv));
- txch->queue_active = 1;
- }
- ++txch->queue_reinit;
- } else {
- register struct emac_tx_bd __iomem *tail_bd;
- register u32 frame_status;
-
- tail_bd = txch->active_queue_tail;
- tail_bd->next = curr_bd;
- txch->active_queue_tail = curr_bd;
- tail_bd = EMAC_VIRT_NOCACHE(tail_bd);
- tail_bd->h_next = (int)emac_virt_to_phys(curr_bd, priv);
- frame_status = tail_bd->mode;
- if (frame_status & EMAC_CPPI_EOQ_BIT) {
- emac_write(EMAC_TXHDP(ch),
- emac_virt_to_phys(curr_bd, priv));
- frame_status &= ~(EMAC_CPPI_EOQ_BIT);
- tail_bd->mode = frame_status;
- ++txch->end_of_queue_add;
- }
- }
- txch->active_queue_count++;
- spin_unlock_irqrestore(&priv->tx_lock, flags);
- return 0;
+ if (unlikely(netif_queue_stopped(ndev)))
+ netif_start_queue(ndev);
+ ndev->stats.tx_packets++;
+ ndev->stats.tx_bytes += len;
+ dev_kfree_skb_any(skb);
}
/**
@@ -1565,42 +1067,36 @@ static int emac_dev_xmit(struct sk_buff *skb, struct net_device *ndev)
{
struct device *emac_dev = &ndev->dev;
int ret_code;
- struct emac_netbufobj tx_buf; /* buffer obj-only single frame support */
- struct emac_netpktobj tx_packet; /* packet object */
struct emac_priv *priv = netdev_priv(ndev);
/* If no link, return */
if (unlikely(!priv->link)) {
if (netif_msg_tx_err(priv) && net_ratelimit())
dev_err(emac_dev, "DaVinci EMAC: No link to transmit");
- return NETDEV_TX_BUSY;
+ goto fail_tx;
+ }
+
+ ret_code = skb_padto(skb, EMAC_DEF_MIN_ETHPKTSIZE);
+ if (unlikely(ret_code < 0)) {
+ if (netif_msg_tx_err(priv) && net_ratelimit())
+ dev_err(emac_dev, "DaVinci EMAC: packet pad failed");
+ goto fail_tx;
}
- /* Build the buffer and packet objects - Since only single fragment is
- * supported, need not set length and token in both packet & object.
- * Doing so for completeness sake & to show that this needs to be done
- * in multifragment case
- */
- tx_packet.buf_list = &tx_buf;
- tx_packet.num_bufs = 1; /* only single fragment supported */
- tx_packet.pkt_length = skb->len;
- tx_packet.pkt_token = (void *)skb;
- tx_buf.length = skb->len;
- tx_buf.buf_token = (void *)skb;
- tx_buf.data_ptr = skb->data;
- ret_code = emac_send(priv, &tx_packet, EMAC_DEF_TX_CH);
+ ret_code = cpdma_chan_submit(priv->txchan, skb, skb->data, skb->len,
+ GFP_KERNEL);
if (unlikely(ret_code != 0)) {
- if (ret_code == EMAC_ERR_TX_OUT_OF_BD) {
- if (netif_msg_tx_err(priv) && net_ratelimit())
- dev_err(emac_dev, "DaVinci EMAC: xmit() fatal"\
- " err. Out of TX BD's");
- netif_stop_queue(priv->ndev);
- }
- ndev->stats.tx_dropped++;
- return NETDEV_TX_BUSY;
+ if (netif_msg_tx_err(priv) && net_ratelimit())
+ dev_err(emac_dev, "DaVinci EMAC: desc submit failed");
+ goto fail_tx;
}
return NETDEV_TX_OK;
+
+fail_tx:
+ ndev->stats.tx_dropped++;
+ netif_stop_queue(ndev);
+ return NETDEV_TX_BUSY;
}
/**
@@ -1621,218 +1117,16 @@ static void emac_dev_tx_timeout(struct net_device *ndev)
if (netif_msg_tx_err(priv))
dev_err(emac_dev, "DaVinci EMAC: xmit timeout, restarting TX");
+ emac_dump_regs(priv);
+
ndev->stats.tx_errors++;
emac_int_disable(priv);
- emac_stop_txch(priv, EMAC_DEF_TX_CH);
- emac_cleanup_txch(priv, EMAC_DEF_TX_CH);
- emac_init_txch(priv, EMAC_DEF_TX_CH);
- emac_write(EMAC_TXHDP(0), 0);
- emac_write(EMAC_TXINTMASKSET, BIT(EMAC_DEF_TX_CH));
+ cpdma_chan_stop(priv->txchan);
+ cpdma_chan_start(priv->txchan);
emac_int_enable(priv);
}
/**
- * emac_net_alloc_rx_buf: Allocate a skb for RX
- * @priv: The DaVinci EMAC private adapter structure
- * @buf_size: size of SKB data buffer to allocate
- * @data_token: data token returned (skb handle for storing in buffer desc)
- * @ch: RX channel number
- *
- * Called during RX channel setup - allocates skb buffer of required size
- * and provides the skb handle and allocated buffer data pointer to caller
- *
- * Returns skb data pointer or 0 on failure to alloc skb
- */
-static void *emac_net_alloc_rx_buf(struct emac_priv *priv, int buf_size,
- void **data_token, u32 ch)
-{
- struct net_device *ndev = priv->ndev;
- struct device *emac_dev = &ndev->dev;
- struct sk_buff *p_skb;
-
- p_skb = dev_alloc_skb(buf_size);
- if (unlikely(NULL == p_skb)) {
- if (netif_msg_rx_err(priv) && net_ratelimit())
- dev_err(emac_dev, "DaVinci EMAC: failed to alloc skb");
- return NULL;
- }
-
- /* set device pointer in skb and reserve space for extra bytes */
- p_skb->dev = ndev;
- skb_reserve(p_skb, NET_IP_ALIGN);
- *data_token = (void *) p_skb;
- return p_skb->data;
-}
-
-/**
- * emac_init_rxch: RX channel initialization
- * @priv: The DaVinci EMAC private adapter structure
- * @ch: RX channel number
- * @param: mac address for RX channel
- *
- * Called during device init to setup a RX channel (allocate buffers and
- * buffer descriptors, create queue and keep ready for reception
- *
- * Returns success(0) or mem alloc failures error code
- */
-static int emac_init_rxch(struct emac_priv *priv, u32 ch, char *param)
-{
- struct device *emac_dev = &priv->ndev->dev;
- u32 cnt, bd_size;
- void __iomem *mem;
- struct emac_rx_bd __iomem *curr_bd;
- struct emac_rxch *rxch = NULL;
-
- rxch = kzalloc(sizeof(struct emac_rxch), GFP_KERNEL);
- if (NULL == rxch) {
- dev_err(emac_dev, "DaVinci EMAC: RX Ch mem alloc failed");
- return -ENOMEM;
- }
- priv->rxch[ch] = rxch;
- rxch->buf_size = priv->rx_buf_size;
- rxch->service_max = EMAC_DEF_RX_MAX_SERVICE;
- rxch->queue_active = 0;
- rxch->teardown_pending = 0;
-
- /* save mac address */
- for (cnt = 0; cnt < 6; cnt++)
- rxch->mac_addr[cnt] = param[cnt];
-
- /* allocate buffer descriptor pool align every BD on four word
- * boundry for future requirements */
- bd_size = (sizeof(struct emac_rx_bd) + 0xF) & ~0xF;
- rxch->num_bd = (priv->ctrl_ram_size >> 1) / bd_size;
- rxch->alloc_size = (((bd_size * rxch->num_bd) + 0xF) & ~0xF);
- rxch->bd_mem = EMAC_RX_BD_MEM(priv);
- __memzero((void __force *)rxch->bd_mem, rxch->alloc_size);
- rxch->pkt_queue.buf_list = &rxch->buf_queue;
-
- /* allocate RX buffer and initialize the BD linked list */
- mem = (void __force __iomem *)
- (((u32 __force) rxch->bd_mem + 0xF) & ~0xF);
- rxch->active_queue_head = NULL;
- rxch->active_queue_tail = mem;
- for (cnt = 0; cnt < rxch->num_bd; cnt++) {
- curr_bd = mem + (cnt * bd_size);
- /* for future use the last parameter contains the BD ptr */
- curr_bd->data_ptr = emac_net_alloc_rx_buf(priv,
- rxch->buf_size,
- (void __force **)&curr_bd->buf_token,
- EMAC_DEF_RX_CH);
- if (curr_bd->data_ptr == NULL) {
- dev_err(emac_dev, "DaVinci EMAC: RX buf mem alloc " \
- "failed for ch %d\n", ch);
- kfree(rxch);
- return -ENOMEM;
- }
-
- /* populate the hardware descriptor */
- curr_bd->h_next = emac_virt_to_phys(rxch->active_queue_head,
- priv);
- curr_bd->buff_ptr = dma_map_single(emac_dev, curr_bd->data_ptr,
- rxch->buf_size, DMA_FROM_DEVICE);
- curr_bd->off_b_len = rxch->buf_size;
- curr_bd->mode = EMAC_CPPI_OWNERSHIP_BIT;
-
- /* write back to hardware memory */
- BD_CACHE_WRITEBACK_INVALIDATE((u32) curr_bd,
- EMAC_BD_LENGTH_FOR_CACHE);
- curr_bd->next = rxch->active_queue_head;
- rxch->active_queue_head = curr_bd;
- }
-
- /* At this point rxCppi->activeQueueHead points to the first
- RX BD ready to be given to RX HDP and rxch->active_queue_tail
- points to the last RX BD
- */
- return 0;
-}
-
-/**
- * emac_rxch_teardown: RX channel teardown
- * @priv: The DaVinci EMAC private adapter structure
- * @ch: RX channel number
- *
- * Called during device stop to teardown RX channel
- *
- */
-static void emac_rxch_teardown(struct emac_priv *priv, u32 ch)
-{
- struct device *emac_dev = &priv->ndev->dev;
- u32 teardown_cnt = 0xFFFFFFF0; /* Some high value */
-
- while ((emac_read(EMAC_RXCP(ch)) & EMAC_TEARDOWN_VALUE) !=
- EMAC_TEARDOWN_VALUE) {
- /* wait till tx teardown complete */
- cpu_relax(); /* TODO: check if this helps ... */
- --teardown_cnt;
- if (0 == teardown_cnt) {
- dev_err(emac_dev, "EMAC: RX teardown aborted\n");
- break;
- }
- }
- emac_write(EMAC_RXCP(ch), EMAC_TEARDOWN_VALUE);
-}
-
-/**
- * emac_stop_rxch: Stop RX channel operation
- * @priv: The DaVinci EMAC private adapter structure
- * @ch: RX channel number
- *
- * Called during device stop to stop RX channel operation
- *
- */
-static void emac_stop_rxch(struct emac_priv *priv, u32 ch)
-{
- struct emac_rxch *rxch = priv->rxch[ch];
-
- if (rxch) {
- rxch->teardown_pending = 1;
- emac_write(EMAC_RXTEARDOWN, ch);
- /* wait for teardown complete */
- emac_rxch_teardown(priv, ch);
- rxch->teardown_pending = 0;
- emac_write(EMAC_RXINTMASKCLEAR, BIT(ch));
- }
-}
-
-/**
- * emac_cleanup_rxch: Book-keep function to clean RX channel resources
- * @priv: The DaVinci EMAC private adapter structure
- * @ch: RX channel number
- *
- * Called during device stop to clean up RX channel resources
- *
- */
-static void emac_cleanup_rxch(struct emac_priv *priv, u32 ch)
-{
- struct emac_rxch *rxch = priv->rxch[ch];
- struct emac_rx_bd __iomem *curr_bd;
-
- if (rxch) {
- /* free the receive buffers previously allocated */
- curr_bd = rxch->active_queue_head;
- while (curr_bd) {
- if (curr_bd->buf_token) {
- dma_unmap_single(&priv->ndev->dev,
- curr_bd->buff_ptr,
- curr_bd->off_b_len
- & EMAC_RX_BD_BUF_SIZE,
- DMA_FROM_DEVICE);
-
- dev_kfree_skb_any((struct sk_buff *)\
- curr_bd->buf_token);
- }
- curr_bd = curr_bd->next;
- }
- if (rxch->bd_mem)
- rxch->bd_mem = NULL;
- kfree(rxch);
- priv->rxch[ch] = NULL;
- }
-}
-
-/**
* emac_set_type0addr: Set EMAC Type0 mac address
* @priv: The DaVinci EMAC private adapter structure
* @ch: RX channel number
@@ -1948,7 +1242,6 @@ static void emac_setmac(struct emac_priv *priv, u32 ch, char *mac_addr)
static int emac_dev_setmac_addr(struct net_device *ndev, void *addr)
{
struct emac_priv *priv = netdev_priv(ndev);
- struct emac_rxch *rxch = priv->rxch[EMAC_DEF_RX_CH];
struct device *emac_dev = &priv->ndev->dev;
struct sockaddr *sa = addr;
@@ -1959,11 +1252,10 @@ static int emac_dev_setmac_addr(struct net_device *ndev, void *addr)
memcpy(priv->mac_addr, sa->sa_data, ndev->addr_len);
memcpy(ndev->dev_addr, sa->sa_data, ndev->addr_len);
- /* If the interface is down - rxch is NULL. */
/* MAC address is configured only after the interface is enabled. */
if (netif_running(ndev)) {
- memcpy(rxch->mac_addr, sa->sa_data, ndev->addr_len);
- emac_setmac(priv, EMAC_DEF_RX_CH, rxch->mac_addr);
+ memcpy(priv->mac_addr, sa->sa_data, ndev->addr_len);
+ emac_setmac(priv, EMAC_DEF_RX_CH, priv->mac_addr);
}
if (netif_msg_drv(priv))
@@ -1974,194 +1266,6 @@ static int emac_dev_setmac_addr(struct net_device *ndev, void *addr)
}
/**
- * emac_addbd_to_rx_queue: Recycle RX buffer descriptor
- * @priv: The DaVinci EMAC private adapter structure
- * @ch: RX channel number to process buffer descriptors for
- * @curr_bd: current buffer descriptor
- * @buffer: buffer pointer for descriptor
- * @buf_token: buffer token (stores skb information)
- *
- * Prepares the recycled buffer descriptor and addes it to hardware
- * receive queue - if queue empty this descriptor becomes the head
- * else addes the descriptor to end of queue
- *
- */
-static void emac_addbd_to_rx_queue(struct emac_priv *priv, u32 ch,
- struct emac_rx_bd __iomem *curr_bd,
- char *buffer, void *buf_token)
-{
- struct emac_rxch *rxch = priv->rxch[ch];
-
- /* populate the hardware descriptor */
- curr_bd->h_next = 0;
- curr_bd->buff_ptr = dma_map_single(&priv->ndev->dev, buffer,
- rxch->buf_size, DMA_FROM_DEVICE);
- curr_bd->off_b_len = rxch->buf_size;
- curr_bd->mode = EMAC_CPPI_OWNERSHIP_BIT;
- curr_bd->next = NULL;
- curr_bd->data_ptr = buffer;
- curr_bd->buf_token = buf_token;
-
- /* write back */
- BD_CACHE_WRITEBACK_INVALIDATE(curr_bd, EMAC_BD_LENGTH_FOR_CACHE);
- if (rxch->active_queue_head == NULL) {
- rxch->active_queue_head = curr_bd;
- rxch->active_queue_tail = curr_bd;
- if (0 != rxch->queue_active) {
- emac_write(EMAC_RXHDP(ch),
- emac_virt_to_phys(rxch->active_queue_head, priv));
- rxch->queue_active = 1;
- }
- } else {
- struct emac_rx_bd __iomem *tail_bd;
- u32 frame_status;
-
- tail_bd = rxch->active_queue_tail;
- rxch->active_queue_tail = curr_bd;
- tail_bd->next = curr_bd;
- tail_bd = EMAC_VIRT_NOCACHE(tail_bd);
- tail_bd->h_next = emac_virt_to_phys(curr_bd, priv);
- frame_status = tail_bd->mode;
- if (frame_status & EMAC_CPPI_EOQ_BIT) {
- emac_write(EMAC_RXHDP(ch),
- emac_virt_to_phys(curr_bd, priv));
- frame_status &= ~(EMAC_CPPI_EOQ_BIT);
- tail_bd->mode = frame_status;
- ++rxch->end_of_queue_add;
- }
- }
- ++rxch->recycled_bd;
-}
-
-/**
- * emac_net_rx_cb: Prepares packet and sends to upper layer
- * @priv: The DaVinci EMAC private adapter structure
- * @net_pkt_list: Network packet list (received packets)
- *
- * Invalidates packet buffer memory and sends the received packet to upper
- * layer
- *
- * Returns success or appropriate error code (none as of now)
- */
-static int emac_net_rx_cb(struct emac_priv *priv,
- struct emac_netpktobj *net_pkt_list)
-{
- struct net_device *ndev = priv->ndev;
- struct sk_buff *p_skb = net_pkt_list->pkt_token;
- /* set length of packet */
- skb_put(p_skb, net_pkt_list->pkt_length);
- p_skb->protocol = eth_type_trans(p_skb, priv->ndev);
- netif_receive_skb(p_skb);
- ndev->stats.rx_bytes += net_pkt_list->pkt_length;
- ndev->stats.rx_packets++;
- return 0;
-}
-
-/**
- * emac_rx_bdproc: RX buffer descriptor (packet) processing
- * @priv: The DaVinci EMAC private adapter structure
- * @ch: RX channel number to process buffer descriptors for
- * @budget: number of packets allowed to process
- * @pending: indication to caller that packets are pending to process
- *
- * Processes RX buffer descriptors - checks ownership bit on the RX buffer
- * descriptor, sends the receive packet to upper layer, allocates a new SKB
- * and recycles the buffer descriptor (requeues it in hardware RX queue).
- * Only "budget" number of packets are processed and indication of pending
- * packets provided to the caller.
- *
- * Returns number of packets processed (and indication of pending packets)
- */
-static int emac_rx_bdproc(struct emac_priv *priv, u32 ch, u32 budget)
-{
- unsigned long flags;
- u32 frame_status;
- u32 pkts_processed = 0;
- char *new_buffer;
- struct emac_rx_bd __iomem *curr_bd;
- struct emac_rx_bd __iomem *last_bd;
- struct emac_netpktobj *curr_pkt, pkt_obj;
- struct emac_netbufobj buf_obj;
- struct emac_netbufobj *rx_buf_obj;
- void *new_buf_token;
- struct emac_rxch *rxch = priv->rxch[ch];
-
- if (unlikely(1 == rxch->teardown_pending))
- return 0;
- ++rxch->proc_count;
- spin_lock_irqsave(&priv->rx_lock, flags);
- pkt_obj.buf_list = &buf_obj;
- curr_pkt = &pkt_obj;
- curr_bd = rxch->active_queue_head;
- BD_CACHE_INVALIDATE(curr_bd, EMAC_BD_LENGTH_FOR_CACHE);
- frame_status = curr_bd->mode;
-
- while ((curr_bd) &&
- ((frame_status & EMAC_CPPI_OWNERSHIP_BIT) == 0) &&
- (pkts_processed < budget)) {
-
- new_buffer = emac_net_alloc_rx_buf(priv, rxch->buf_size,
- &new_buf_token, EMAC_DEF_RX_CH);
- if (unlikely(NULL == new_buffer)) {
- ++rxch->out_of_rx_buffers;
- goto end_emac_rx_bdproc;
- }
-
- /* populate received packet data structure */
- rx_buf_obj = &curr_pkt->buf_list[0];
- rx_buf_obj->data_ptr = (char *)curr_bd->data_ptr;
- rx_buf_obj->length = curr_bd->off_b_len & EMAC_RX_BD_BUF_SIZE;
- rx_buf_obj->buf_token = curr_bd->buf_token;
-
- dma_unmap_single(&priv->ndev->dev, curr_bd->buff_ptr,
- curr_bd->off_b_len & EMAC_RX_BD_BUF_SIZE,
- DMA_FROM_DEVICE);
-
- curr_pkt->pkt_token = curr_pkt->buf_list->buf_token;
- curr_pkt->num_bufs = 1;
- curr_pkt->pkt_length =
- (frame_status & EMAC_RX_BD_PKT_LENGTH_MASK);
- emac_write(EMAC_RXCP(ch), emac_virt_to_phys(curr_bd, priv));
- ++rxch->processed_bd;
- last_bd = curr_bd;
- curr_bd = last_bd->next;
- rxch->active_queue_head = curr_bd;
-
- /* check if end of RX queue ? */
- if (frame_status & EMAC_CPPI_EOQ_BIT) {
- if (curr_bd) {
- ++rxch->mis_queued_packets;
- emac_write(EMAC_RXHDP(ch),
- emac_virt_to_phys(curr_bd, priv));
- } else {
- ++rxch->end_of_queue;
- rxch->queue_active = 0;
- }
- }
-
- /* recycle BD */
- emac_addbd_to_rx_queue(priv, ch, last_bd, new_buffer,
- new_buf_token);
-
- /* return the packet to the user - BD ptr passed in
- * last parameter for potential *future* use */
- spin_unlock_irqrestore(&priv->rx_lock, flags);
- emac_net_rx_cb(priv, curr_pkt);
- spin_lock_irqsave(&priv->rx_lock, flags);
- curr_bd = rxch->active_queue_head;
- if (curr_bd) {
- BD_CACHE_INVALIDATE(curr_bd, EMAC_BD_LENGTH_FOR_CACHE);
- frame_status = curr_bd->mode;
- }
- ++pkts_processed;
- }
-
-end_emac_rx_bdproc:
- spin_unlock_irqrestore(&priv->rx_lock, flags);
- return pkts_processed;
-}
-
-/**
* emac_hw_enable: Enable EMAC hardware for packet transmission/reception
* @priv: The DaVinci EMAC private adapter structure
*
@@ -2172,7 +1276,7 @@ end_emac_rx_bdproc:
*/
static int emac_hw_enable(struct emac_priv *priv)
{
- u32 ch, val, mbp_enable, mac_control;
+ u32 val, mbp_enable, mac_control;
/* Soft reset */
emac_write(EMAC_SOFTRESET, 1);
@@ -2215,26 +1319,9 @@ static int emac_hw_enable(struct emac_priv *priv)
emac_write(EMAC_RXUNICASTCLEAR, EMAC_RX_UNICAST_CLEAR_ALL);
priv->rx_addr_type = (emac_read(EMAC_MACCONFIG) >> 8) & 0xFF;
- val = emac_read(EMAC_TXCONTROL);
- val |= EMAC_TX_CONTROL_TX_ENABLE_VAL;
- emac_write(EMAC_TXCONTROL, val);
- val = emac_read(EMAC_RXCONTROL);
- val |= EMAC_RX_CONTROL_RX_ENABLE_VAL;
- emac_write(EMAC_RXCONTROL, val);
emac_write(EMAC_MACINTMASKSET, EMAC_MAC_HOST_ERR_INTMASK_VAL);
- for (ch = 0; ch < EMAC_DEF_MAX_TX_CH; ch++) {
- emac_write(EMAC_TXHDP(ch), 0);
- emac_write(EMAC_TXINTMASKSET, BIT(ch));
- }
- for (ch = 0; ch < EMAC_DEF_MAX_RX_CH; ch++) {
- struct emac_rxch *rxch = priv->rxch[ch];
- emac_setmac(priv, ch, rxch->mac_addr);
- emac_write(EMAC_RXINTMASKSET, BIT(ch));
- rxch->queue_active = 1;
- emac_write(EMAC_RXHDP(ch),
- emac_virt_to_phys(rxch->active_queue_head, priv));
- }
+ emac_setmac(priv, EMAC_DEF_RX_CH, priv->mac_addr);
/* Enable MII */
val = emac_read(EMAC_MACCONTROL);
@@ -2279,8 +1366,8 @@ static int emac_poll(struct napi_struct *napi, int budget)
mask = EMAC_DM646X_MAC_IN_VECTOR_TX_INT_VEC;
if (status & mask) {
- num_tx_pkts = emac_tx_bdproc(priv, EMAC_DEF_TX_CH,
- EMAC_DEF_TX_MAX_SERVICE);
+ num_tx_pkts = cpdma_chan_process(priv->txchan,
+ EMAC_DEF_TX_MAX_SERVICE);
} /* TX processing */
mask = EMAC_DM644X_MAC_IN_VECTOR_RX_INT_VEC;
@@ -2289,7 +1376,7 @@ static int emac_poll(struct napi_struct *napi, int budget)
mask = EMAC_DM646X_MAC_IN_VECTOR_RX_INT_VEC;
if (status & mask) {
- num_rx_pkts = emac_rx_bdproc(priv, EMAC_DEF_RX_CH, budget);
+ num_rx_pkts = cpdma_chan_process(priv->rxchan, budget);
} /* RX processing */
mask = EMAC_DM644X_MAC_IN_VECTOR_HOST_INT;
@@ -2348,79 +1435,6 @@ void emac_poll_controller(struct net_device *ndev)
}
#endif
-/* PHY/MII bus related */
-
-/* Wait until mdio is ready for next command */
-#define MDIO_WAIT_FOR_USER_ACCESS\
- while ((emac_mdio_read((MDIO_USERACCESS(0))) &\
- MDIO_USERACCESS_GO) != 0)
-
-static int emac_mii_read(struct mii_bus *bus, int phy_id, int phy_reg)
-{
- unsigned int phy_data = 0;
- unsigned int phy_control;
-
- /* Wait until mdio is ready for next command */
- MDIO_WAIT_FOR_USER_ACCESS;
-
- phy_control = (MDIO_USERACCESS_GO |
- MDIO_USERACCESS_READ |
- ((phy_reg << 21) & MDIO_USERACCESS_REGADR) |
- ((phy_id << 16) & MDIO_USERACCESS_PHYADR) |
- (phy_data & MDIO_USERACCESS_DATA));
- emac_mdio_write(MDIO_USERACCESS(0), phy_control);
-
- /* Wait until mdio is ready for next command */
- MDIO_WAIT_FOR_USER_ACCESS;
-
- return emac_mdio_read(MDIO_USERACCESS(0)) & MDIO_USERACCESS_DATA;
-
-}
-
-static int emac_mii_write(struct mii_bus *bus, int phy_id,
- int phy_reg, u16 phy_data)
-{
-
- unsigned int control;
-
- /* until mdio is ready for next command */
- MDIO_WAIT_FOR_USER_ACCESS;
-
- control = (MDIO_USERACCESS_GO |
- MDIO_USERACCESS_WRITE |
- ((phy_reg << 21) & MDIO_USERACCESS_REGADR) |
- ((phy_id << 16) & MDIO_USERACCESS_PHYADR) |
- (phy_data & MDIO_USERACCESS_DATA));
- emac_mdio_write(MDIO_USERACCESS(0), control);
-
- return 0;
-}
-
-static int emac_mii_reset(struct mii_bus *bus)
-{
- unsigned int clk_div;
- int mdio_bus_freq = emac_bus_frequency;
-
- if (mdio_max_freq && mdio_bus_freq)
- clk_div = ((mdio_bus_freq / mdio_max_freq) - 1);
- else
- clk_div = 0xFF;
-
- clk_div &= MDIO_CONTROL_CLKDIV;
-
- /* Set enable and clock divider in MDIOControl */
- emac_mdio_write(MDIO_CONTROL, (clk_div | MDIO_CONTROL_ENABLE));
-
- return 0;
-
-}
-
-static int mii_irqs[PHY_MAX_ADDR] = { PHY_POLL, PHY_POLL };
-
-/* emac_driver: EMAC MII bus structure */
-
-static struct mii_bus *emac_mii;
-
static void emac_adjust_link(struct net_device *ndev)
{
struct emac_priv *priv = netdev_priv(ndev);
@@ -2485,6 +1499,11 @@ static int emac_devioctl(struct net_device *ndev, struct ifreq *ifrq, int cmd)
return -EOPNOTSUPP;
}
+static int match_first_device(struct device *dev, void *data)
+{
+ return 1;
+}
+
/**
* emac_dev_open: EMAC device open
* @ndev: The DaVinci EMAC network adapter
@@ -2498,10 +1517,9 @@ static int emac_devioctl(struct net_device *ndev, struct ifreq *ifrq, int cmd)
static int emac_dev_open(struct net_device *ndev)
{
struct device *emac_dev = &ndev->dev;
- u32 rc, cnt, ch;
- int phy_addr;
+ u32 cnt;
struct resource *res;
- int q, m;
+ int q, m, ret;
int i = 0;
int k = 0;
struct emac_priv *priv = netdev_priv(ndev);
@@ -2513,29 +1531,21 @@ static int emac_dev_open(struct net_device *ndev)
/* Configuration items */
priv->rx_buf_size = EMAC_DEF_MAX_FRAME_SIZE + NET_IP_ALIGN;
- /* Clear basic hardware */
- for (ch = 0; ch < EMAC_MAX_TXRX_CHANNELS; ch++) {
- emac_write(EMAC_TXHDP(ch), 0);
- emac_write(EMAC_RXHDP(ch), 0);
- emac_write(EMAC_RXHDP(ch), 0);
- emac_write(EMAC_RXINTMASKCLEAR, EMAC_INT_MASK_CLEAR);
- emac_write(EMAC_TXINTMASKCLEAR, EMAC_INT_MASK_CLEAR);
- }
priv->mac_hash1 = 0;
priv->mac_hash2 = 0;
emac_write(EMAC_MACHASH1, 0);
emac_write(EMAC_MACHASH2, 0);
- /* multi ch not supported - open 1 TX, 1RX ch by default */
- rc = emac_init_txch(priv, EMAC_DEF_TX_CH);
- if (0 != rc) {
- dev_err(emac_dev, "DaVinci EMAC: emac_init_txch() failed");
- return rc;
- }
- rc = emac_init_rxch(priv, EMAC_DEF_RX_CH, priv->mac_addr);
- if (0 != rc) {
- dev_err(emac_dev, "DaVinci EMAC: emac_init_rxch() failed");
- return rc;
+ for (i = 0; i < EMAC_DEF_RX_NUM_DESC; i++) {
+ struct sk_buff *skb = emac_rx_alloc(priv);
+
+ if (!skb)
+ break;
+
+ ret = cpdma_chan_submit(priv->rxchan, skb, skb->data,
+ skb_tailroom(skb), GFP_KERNEL);
+ if (WARN_ON(ret < 0))
+ break;
}
/* Request IRQ */
@@ -2560,28 +1570,28 @@ static int emac_dev_open(struct net_device *ndev)
emac_set_coalesce(ndev, &coal);
}
- /* find the first phy */
+ cpdma_ctlr_start(priv->dma);
+
priv->phydev = NULL;
- if (priv->phy_mask) {
- emac_mii_reset(priv->mii_bus);
- for (phy_addr = 0; phy_addr < PHY_MAX_ADDR; phy_addr++) {
- if (priv->mii_bus->phy_map[phy_addr]) {
- priv->phydev = priv->mii_bus->phy_map[phy_addr];
- break;
- }
- }
+ /* use the first phy on the bus if pdata did not give us a phy id */
+ if (!priv->phy_id) {
+ struct device *phy;
- if (!priv->phydev) {
- printk(KERN_ERR "%s: no PHY found\n", ndev->name);
- return -1;
- }
+ phy = bus_find_device(&mdio_bus_type, NULL, NULL,
+ match_first_device);
+ if (phy)
+ priv->phy_id = dev_name(phy);
+ }
- priv->phydev = phy_connect(ndev, dev_name(&priv->phydev->dev),
- &emac_adjust_link, 0, PHY_INTERFACE_MODE_MII);
+ if (priv->phy_id && *priv->phy_id) {
+ priv->phydev = phy_connect(ndev, priv->phy_id,
+ &emac_adjust_link, 0,
+ PHY_INTERFACE_MODE_MII);
if (IS_ERR(priv->phydev)) {
- printk(KERN_ERR "%s: Could not attach to PHY\n",
- ndev->name);
+ dev_err(emac_dev, "could not connect to phy %s\n",
+ priv->phy_id);
+ priv->phydev = NULL;
return PTR_ERR(priv->phydev);
}
@@ -2589,12 +1599,13 @@ static int emac_dev_open(struct net_device *ndev)
priv->speed = 0;
priv->duplex = ~0;
- printk(KERN_INFO "%s: attached PHY driver [%s] "
- "(mii_bus:phy_addr=%s, id=%x)\n", ndev->name,
+ dev_info(emac_dev, "attached PHY driver [%s] "
+ "(mii_bus:phy_addr=%s, id=%x)\n",
priv->phydev->drv->name, dev_name(&priv->phydev->dev),
priv->phydev->phy_id);
- } else{
+ } else {
/* No PHY , fix the link, speed and duplex settings */
+ dev_notice(emac_dev, "no phy, defaulting to 100/full\n");
priv->link = 1;
priv->speed = SPEED_100;
priv->duplex = DUPLEX_FULL;
@@ -2607,7 +1618,7 @@ static int emac_dev_open(struct net_device *ndev)
if (netif_msg_drv(priv))
dev_notice(emac_dev, "DaVinci EMAC: Opened %s\n", ndev->name);
- if (priv->phy_mask)
+ if (priv->phydev)
phy_start(priv->phydev);
return 0;
@@ -2648,10 +1659,7 @@ static int emac_dev_stop(struct net_device *ndev)
netif_carrier_off(ndev);
emac_int_disable(priv);
- emac_stop_txch(priv, EMAC_DEF_TX_CH);
- emac_stop_rxch(priv, EMAC_DEF_RX_CH);
- emac_cleanup_txch(priv, EMAC_DEF_TX_CH);
- emac_cleanup_rxch(priv, EMAC_DEF_RX_CH);
+ cpdma_ctlr_stop(priv->dma);
emac_write(EMAC_SOFTRESET, 1);
if (priv->phydev)
@@ -2756,9 +1764,10 @@ static int __devinit davinci_emac_probe(struct platform_device *pdev)
struct resource *res;
struct net_device *ndev;
struct emac_priv *priv;
- unsigned long size;
+ unsigned long size, hw_ram_addr;
struct emac_platform_data *pdata;
struct device *emac_dev;
+ struct cpdma_params dma_params;
/* obtain emac clock from kernel */
emac_clk = clk_get(&pdev->dev, NULL);
@@ -2782,8 +1791,6 @@ static int __devinit davinci_emac_probe(struct platform_device *pdev)
priv->ndev = ndev;
priv->msg_enable = netif_msg_init(debug_level, DAVINCI_EMAC_DEBUG);
- spin_lock_init(&priv->tx_lock);
- spin_lock_init(&priv->rx_lock);
spin_lock_init(&priv->lock);
pdata = pdev->dev.platform_data;
@@ -2794,7 +1801,7 @@ static int __devinit davinci_emac_probe(struct platform_device *pdev)
/* MAC addr and PHY mask , RMII enable info from platform_data */
memcpy(priv->mac_addr, pdata->mac_addr, 6);
- priv->phy_mask = pdata->phy_mask;
+ priv->phy_id = pdata->phy_id;
priv->rmii_en = pdata->rmii_en;
priv->version = pdata->version;
priv->int_enable = pdata->interrupt_enable;
@@ -2831,14 +1838,41 @@ static int __devinit davinci_emac_probe(struct platform_device *pdev)
ndev->base_addr = (unsigned long)priv->remap_addr;
priv->ctrl_base = priv->remap_addr + pdata->ctrl_mod_reg_offset;
- priv->ctrl_ram_size = pdata->ctrl_ram_size;
- priv->emac_ctrl_ram = priv->remap_addr + pdata->ctrl_ram_offset;
- if (pdata->hw_ram_addr)
- priv->hw_ram_addr = pdata->hw_ram_addr;
- else
- priv->hw_ram_addr = (u32 __force)res->start +
- pdata->ctrl_ram_offset;
+ hw_ram_addr = pdata->hw_ram_addr;
+ if (!hw_ram_addr)
+ hw_ram_addr = (u32 __force)res->start + pdata->ctrl_ram_offset;
+
+ memset(&dma_params, 0, sizeof(dma_params));
+ dma_params.dev = emac_dev;
+ dma_params.dmaregs = priv->emac_base;
+ dma_params.rxthresh = priv->emac_base + 0x120;
+ dma_params.rxfree = priv->emac_base + 0x140;
+ dma_params.txhdp = priv->emac_base + 0x600;
+ dma_params.rxhdp = priv->emac_base + 0x620;
+ dma_params.txcp = priv->emac_base + 0x640;
+ dma_params.rxcp = priv->emac_base + 0x660;
+ dma_params.num_chan = EMAC_MAX_TXRX_CHANNELS;
+ dma_params.min_packet_size = EMAC_DEF_MIN_ETHPKTSIZE;
+ dma_params.desc_mem_phys = hw_ram_addr;
+ dma_params.desc_mem_size = pdata->ctrl_ram_size;
+ dma_params.desc_align = 16;
+
+ priv->dma = cpdma_ctlr_create(&dma_params);
+ if (!priv->dma) {
+ dev_err(emac_dev, "DaVinci EMAC: Error initializing DMA\n");
+ rc = -ENOMEM;
+ goto no_dma;
+ }
+
+ priv->txchan = cpdma_chan_create(priv->dma, tx_chan_num(EMAC_DEF_TX_CH),
+ emac_tx_handler);
+ priv->rxchan = cpdma_chan_create(priv->dma, rx_chan_num(EMAC_DEF_RX_CH),
+ emac_rx_handler);
+ if (WARN_ON(!priv->txchan || !priv->rxchan)) {
+ rc = -ENOMEM;
+ goto no_irq_res;
+ }
res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
if (!res) {
@@ -2871,32 +1905,6 @@ static int __devinit davinci_emac_probe(struct platform_device *pdev)
}
- /* MII/Phy intialisation, mdio bus registration */
- emac_mii = mdiobus_alloc();
- if (emac_mii == NULL) {
- dev_err(emac_dev, "DaVinci EMAC: Error allocating mii_bus\n");
- rc = -ENOMEM;
- goto mdio_alloc_err;
- }
-
- priv->mii_bus = emac_mii;
- emac_mii->name = "emac-mii",
- emac_mii->read = emac_mii_read,
- emac_mii->write = emac_mii_write,
- emac_mii->reset = emac_mii_reset,
- emac_mii->irq = mii_irqs,
- emac_mii->phy_mask = ~(priv->phy_mask);
- emac_mii->parent = &pdev->dev;
- emac_mii->priv = priv->remap_addr + pdata->mdio_reg_offset;
- snprintf(priv->mii_bus->id, MII_BUS_ID_SIZE, "%x", priv->pdev->id);
- mdio_max_freq = pdata->mdio_max_freq;
- emac_mii->reset(emac_mii);
-
- /* Register the MII bus */
- rc = mdiobus_register(emac_mii);
- if (rc)
- goto mdiobus_quit;
-
if (netif_msg_probe(priv)) {
dev_notice(emac_dev, "DaVinci EMAC Probe found device "\
"(regs: %p, irq: %d)\n",
@@ -2904,13 +1912,15 @@ static int __devinit davinci_emac_probe(struct platform_device *pdev)
}
return 0;
-mdiobus_quit:
- mdiobus_free(emac_mii);
-
netdev_reg_err:
-mdio_alloc_err:
clk_disable(emac_clk);
no_irq_res:
+ if (priv->txchan)
+ cpdma_chan_destroy(priv->txchan);
+ if (priv->rxchan)
+ cpdma_chan_destroy(priv->rxchan);
+ cpdma_ctlr_destroy(priv->dma);
+no_dma:
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
release_mem_region(res->start, res->end - res->start + 1);
iounmap(priv->remap_addr);
@@ -2938,8 +1948,12 @@ static int __devexit davinci_emac_remove(struct platform_device *pdev)
platform_set_drvdata(pdev, NULL);
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- mdiobus_unregister(priv->mii_bus);
- mdiobus_free(priv->mii_bus);
+
+ if (priv->txchan)
+ cpdma_chan_destroy(priv->txchan);
+ if (priv->rxchan)
+ cpdma_chan_destroy(priv->rxchan);
+ cpdma_ctlr_destroy(priv->dma);
release_mem_region(res->start, res->end - res->start + 1);
diff --git a/drivers/net/davinci_mdio.c b/drivers/net/davinci_mdio.c
new file mode 100644
index 000000000000..7615040df756
--- /dev/null
+++ b/drivers/net/davinci_mdio.c
@@ -0,0 +1,475 @@
+/*
+ * DaVinci MDIO Module driver
+ *
+ * Copyright (C) 2010 Texas Instruments.
+ *
+ * Shamelessly ripped out of davinci_emac.c, original copyrights follow:
+ *
+ * Copyright (C) 2009 Texas Instruments.
+ *
+ * ---------------------------------------------------------------------------
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ * ---------------------------------------------------------------------------
+ */
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/platform_device.h>
+#include <linux/delay.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/phy.h>
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/davinci_emac.h>
+
+/*
+ * This timeout definition is a worst-case ultra defensive measure against
+ * unexpected controller lock ups. Ideally, we should never ever hit this
+ * scenario in practice.
+ */
+#define MDIO_TIMEOUT 100 /* msecs */
+
+#define PHY_REG_MASK 0x1f
+#define PHY_ID_MASK 0x1f
+
+#define DEF_OUT_FREQ 2200000 /* 2.2 MHz */
+
+struct davinci_mdio_regs {
+ u32 version;
+ u32 control;
+#define CONTROL_IDLE BIT(31)
+#define CONTROL_ENABLE BIT(30)
+#define CONTROL_MAX_DIV (0xff)
+
+ u32 alive;
+ u32 link;
+ u32 linkintraw;
+ u32 linkintmasked;
+ u32 __reserved_0[2];
+ u32 userintraw;
+ u32 userintmasked;
+ u32 userintmaskset;
+ u32 userintmaskclr;
+ u32 __reserved_1[20];
+
+ struct {
+ u32 access;
+#define USERACCESS_GO BIT(31)
+#define USERACCESS_WRITE BIT(30)
+#define USERACCESS_ACK BIT(29)
+#define USERACCESS_READ (0)
+#define USERACCESS_DATA (0xffff)
+
+ u32 physel;
+ } user[0];
+};
+
+struct mdio_platform_data default_pdata = {
+ .bus_freq = DEF_OUT_FREQ,
+};
+
+struct davinci_mdio_data {
+ struct mdio_platform_data pdata;
+ struct davinci_mdio_regs __iomem *regs;
+ spinlock_t lock;
+ struct clk *clk;
+ struct device *dev;
+ struct mii_bus *bus;
+ bool suspended;
+ unsigned long access_time; /* jiffies */
+};
+
+static void __davinci_mdio_reset(struct davinci_mdio_data *data)
+{
+ u32 mdio_in, div, mdio_out_khz, access_time;
+
+ mdio_in = clk_get_rate(data->clk);
+ div = (mdio_in / data->pdata.bus_freq) - 1;
+ if (div > CONTROL_MAX_DIV)
+ div = CONTROL_MAX_DIV;
+
+ /* set enable and clock divider */
+ __raw_writel(div | CONTROL_ENABLE, &data->regs->control);
+
+ /*
+ * One mdio transaction consists of:
+ * 32 bits of preamble
+ * 32 bits of transferred data
+ * 24 bits of bus yield (not needed unless shared?)
+ */
+ mdio_out_khz = mdio_in / (1000 * (div + 1));
+ access_time = (88 * 1000) / mdio_out_khz;
+
+ /*
+ * In the worst case, we could be kicking off a user-access immediately
+ * after the mdio bus scan state-machine triggered its own read. If
+ * so, our request could get deferred by one access cycle. We
+ * defensively allow for 4 access cycles.
+ */
+ data->access_time = usecs_to_jiffies(access_time * 4);
+ if (!data->access_time)
+ data->access_time = 1;
+}
+
+static int davinci_mdio_reset(struct mii_bus *bus)
+{
+ struct davinci_mdio_data *data = bus->priv;
+ u32 phy_mask, ver;
+
+ __davinci_mdio_reset(data);
+
+ /* wait for scan logic to settle */
+ msleep(PHY_MAX_ADDR * data->access_time);
+
+ /* dump hardware version info */
+ ver = __raw_readl(&data->regs->version);
+ dev_info(data->dev, "davinci mdio revision %d.%d\n",
+ (ver >> 8) & 0xff, ver & 0xff);
+
+ /* get phy mask from the alive register */
+ phy_mask = __raw_readl(&data->regs->alive);
+ if (phy_mask) {
+ /* restrict mdio bus to live phys only */
+ dev_info(data->dev, "detected phy mask %x\n", ~phy_mask);
+ phy_mask = ~phy_mask;
+ } else {
+ /* desperately scan all phys */
+ dev_warn(data->dev, "no live phy, scanning all\n");
+ phy_mask = 0;
+ }
+ data->bus->phy_mask = phy_mask;
+
+ return 0;
+}
+
+/* wait until hardware is ready for another user access */
+static inline int wait_for_user_access(struct davinci_mdio_data *data)
+{
+ struct davinci_mdio_regs __iomem *regs = data->regs;
+ unsigned long timeout = jiffies + msecs_to_jiffies(MDIO_TIMEOUT);
+ u32 reg;
+
+ while (time_after(timeout, jiffies)) {
+ reg = __raw_readl(&regs->user[0].access);
+ if ((reg & USERACCESS_GO) == 0)
+ return 0;
+
+ reg = __raw_readl(&regs->control);
+ if ((reg & CONTROL_IDLE) == 0)
+ continue;
+
+ /*
+ * An emac soft_reset may have clobbered the mdio controller's
+ * state machine. We need to reset and retry the current
+ * operation
+ */
+ dev_warn(data->dev, "resetting idled controller\n");
+ __davinci_mdio_reset(data);
+ return -EAGAIN;
+ }
+ dev_err(data->dev, "timed out waiting for user access\n");
+ return -ETIMEDOUT;
+}
+
+/* wait until hardware state machine is idle */
+static inline int wait_for_idle(struct davinci_mdio_data *data)
+{
+ struct davinci_mdio_regs __iomem *regs = data->regs;
+ unsigned long timeout = jiffies + msecs_to_jiffies(MDIO_TIMEOUT);
+
+ while (time_after(timeout, jiffies)) {
+ if (__raw_readl(&regs->control) & CONTROL_IDLE)
+ return 0;
+ }
+ dev_err(data->dev, "timed out waiting for idle\n");
+ return -ETIMEDOUT;
+}
+
+static int davinci_mdio_read(struct mii_bus *bus, int phy_id, int phy_reg)
+{
+ struct davinci_mdio_data *data = bus->priv;
+ u32 reg;
+ int ret;
+
+ if (phy_reg & ~PHY_REG_MASK || phy_id & ~PHY_ID_MASK)
+ return -EINVAL;
+
+ spin_lock(&data->lock);
+
+ if (data->suspended) {
+ spin_unlock(&data->lock);
+ return -ENODEV;
+ }
+
+ reg = (USERACCESS_GO | USERACCESS_READ | (phy_reg << 21) |
+ (phy_id << 16));
+
+ while (1) {
+ ret = wait_for_user_access(data);
+ if (ret == -EAGAIN)
+ continue;
+ if (ret < 0)
+ break;
+
+ __raw_writel(reg, &data->regs->user[0].access);
+
+ ret = wait_for_user_access(data);
+ if (ret == -EAGAIN)
+ continue;
+ if (ret < 0)
+ break;
+
+ reg = __raw_readl(&data->regs->user[0].access);
+ ret = (reg & USERACCESS_ACK) ? (reg & USERACCESS_DATA) : -EIO;
+ break;
+ }
+
+ spin_unlock(&data->lock);
+
+ return ret;
+}
+
+static int davinci_mdio_write(struct mii_bus *bus, int phy_id,
+ int phy_reg, u16 phy_data)
+{
+ struct davinci_mdio_data *data = bus->priv;
+ u32 reg;
+ int ret;
+
+ if (phy_reg & ~PHY_REG_MASK || phy_id & ~PHY_ID_MASK)
+ return -EINVAL;
+
+ spin_lock(&data->lock);
+
+ if (data->suspended) {
+ spin_unlock(&data->lock);
+ return -ENODEV;
+ }
+
+ reg = (USERACCESS_GO | USERACCESS_WRITE | (phy_reg << 21) |
+ (phy_id << 16) | (phy_data & USERACCESS_DATA));
+
+ while (1) {
+ ret = wait_for_user_access(data);
+ if (ret == -EAGAIN)
+ continue;
+ if (ret < 0)
+ break;
+
+ __raw_writel(reg, &data->regs->user[0].access);
+
+ ret = wait_for_user_access(data);
+ if (ret == -EAGAIN)
+ continue;
+ break;
+ }
+
+ spin_unlock(&data->lock);
+
+ return 0;
+}
+
+static int __devinit davinci_mdio_probe(struct platform_device *pdev)
+{
+ struct mdio_platform_data *pdata = pdev->dev.platform_data;
+ struct device *dev = &pdev->dev;
+ struct davinci_mdio_data *data;
+ struct resource *res;
+ struct phy_device *phy;
+ int ret, addr;
+
+ data = kzalloc(sizeof(*data), GFP_KERNEL);
+ if (!data) {
+ dev_err(dev, "failed to alloc device data\n");
+ return -ENOMEM;
+ }
+
+ data->pdata = pdata ? (*pdata) : default_pdata;
+
+ data->bus = mdiobus_alloc();
+ if (!data->bus) {
+ dev_err(dev, "failed to alloc mii bus\n");
+ ret = -ENOMEM;
+ goto bail_out;
+ }
+
+ data->bus->name = dev_name(dev);
+ data->bus->read = davinci_mdio_read,
+ data->bus->write = davinci_mdio_write,
+ data->bus->reset = davinci_mdio_reset,
+ data->bus->parent = dev;
+ data->bus->priv = data;
+ snprintf(data->bus->id, MII_BUS_ID_SIZE, "%x", pdev->id);
+
+ data->clk = clk_get(dev, NULL);
+ if (IS_ERR(data->clk)) {
+ data->clk = NULL;
+ dev_err(dev, "failed to get device clock\n");
+ ret = PTR_ERR(data->clk);
+ goto bail_out;
+ }
+
+ clk_enable(data->clk);
+
+ dev_set_drvdata(dev, data);
+ data->dev = dev;
+ spin_lock_init(&data->lock);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ dev_err(dev, "could not find register map resource\n");
+ ret = -ENOENT;
+ goto bail_out;
+ }
+
+ res = devm_request_mem_region(dev, res->start, resource_size(res),
+ dev_name(dev));
+ if (!res) {
+ dev_err(dev, "could not allocate register map resource\n");
+ ret = -ENXIO;
+ goto bail_out;
+ }
+
+ data->regs = devm_ioremap_nocache(dev, res->start, resource_size(res));
+ if (!data->regs) {
+ dev_err(dev, "could not map mdio registers\n");
+ ret = -ENOMEM;
+ goto bail_out;
+ }
+
+ /* register the mii bus */
+ ret = mdiobus_register(data->bus);
+ if (ret)
+ goto bail_out;
+
+ /* scan and dump the bus */
+ for (addr = 0; addr < PHY_MAX_ADDR; addr++) {
+ phy = data->bus->phy_map[addr];
+ if (phy) {
+ dev_info(dev, "phy[%d]: device %s, driver %s\n",
+ phy->addr, dev_name(&phy->dev),
+ phy->drv ? phy->drv->name : "unknown");
+ }
+ }
+
+ return 0;
+
+bail_out:
+ if (data->bus)
+ mdiobus_free(data->bus);
+
+ if (data->clk) {
+ clk_disable(data->clk);
+ clk_put(data->clk);
+ }
+
+ kfree(data);
+
+ return ret;
+}
+
+static int __devexit davinci_mdio_remove(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct davinci_mdio_data *data = dev_get_drvdata(dev);
+
+ if (data->bus)
+ mdiobus_free(data->bus);
+
+ if (data->clk) {
+ clk_disable(data->clk);
+ clk_put(data->clk);
+ }
+
+ dev_set_drvdata(dev, NULL);
+
+ kfree(data);
+
+ return 0;
+}
+
+static int davinci_mdio_suspend(struct device *dev)
+{
+ struct davinci_mdio_data *data = dev_get_drvdata(dev);
+ u32 ctrl;
+
+ spin_lock(&data->lock);
+
+ /* shutdown the scan state machine */
+ ctrl = __raw_readl(&data->regs->control);
+ ctrl &= ~CONTROL_ENABLE;
+ __raw_writel(ctrl, &data->regs->control);
+ wait_for_idle(data);
+
+ if (data->clk)
+ clk_disable(data->clk);
+
+ data->suspended = true;
+ spin_unlock(&data->lock);
+
+ return 0;
+}
+
+static int davinci_mdio_resume(struct device *dev)
+{
+ struct davinci_mdio_data *data = dev_get_drvdata(dev);
+ u32 ctrl;
+
+ spin_lock(&data->lock);
+ if (data->clk)
+ clk_enable(data->clk);
+
+ /* restart the scan state machine */
+ ctrl = __raw_readl(&data->regs->control);
+ ctrl |= CONTROL_ENABLE;
+ __raw_writel(ctrl, &data->regs->control);
+
+ data->suspended = false;
+ spin_unlock(&data->lock);
+
+ return 0;
+}
+
+static const struct dev_pm_ops davinci_mdio_pm_ops = {
+ .suspend = davinci_mdio_suspend,
+ .resume = davinci_mdio_resume,
+};
+
+static struct platform_driver davinci_mdio_driver = {
+ .driver = {
+ .name = "davinci_mdio",
+ .owner = THIS_MODULE,
+ .pm = &davinci_mdio_pm_ops,
+ },
+ .probe = davinci_mdio_probe,
+ .remove = __devexit_p(davinci_mdio_remove),
+};
+
+static int __init davinci_mdio_init(void)
+{
+ return platform_driver_register(&davinci_mdio_driver);
+}
+device_initcall(davinci_mdio_init);
+
+static void __exit davinci_mdio_exit(void)
+{
+ platform_driver_unregister(&davinci_mdio_driver);
+}
+module_exit(davinci_mdio_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("DaVinci MDIO driver");
diff --git a/drivers/net/e1000/e1000_main.c b/drivers/net/e1000/e1000_main.c
index a117f2a0252e..4d62f7bfa036 100644
--- a/drivers/net/e1000/e1000_main.c
+++ b/drivers/net/e1000/e1000_main.c
@@ -31,7 +31,7 @@
char e1000_driver_name[] = "e1000";
static char e1000_driver_string[] = "Intel(R) PRO/1000 Network Driver";
-#define DRV_VERSION "7.3.21-k6-NAPI"
+#define DRV_VERSION "7.3.21-k8-NAPI"
const char e1000_driver_version[] = DRV_VERSION;
static const char e1000_copyright[] = "Copyright (c) 1999-2006 Intel Corporation.";
@@ -485,9 +485,6 @@ void e1000_down(struct e1000_adapter *adapter)
struct net_device *netdev = adapter->netdev;
u32 rctl, tctl;
- /* signal that we're down so the interrupt handler does not
- * reschedule our watchdog timer */
- set_bit(__E1000_DOWN, &adapter->flags);
/* disable receives in the hardware */
rctl = er32(RCTL);
@@ -508,6 +505,13 @@ void e1000_down(struct e1000_adapter *adapter)
e1000_irq_disable(adapter);
+ /*
+ * Setting DOWN must be after irq_disable to prevent
+ * a screaming interrupt. Setting DOWN also prevents
+ * timers and tasks from rescheduling.
+ */
+ set_bit(__E1000_DOWN, &adapter->flags);
+
del_timer_sync(&adapter->tx_fifo_stall_timer);
del_timer_sync(&adapter->watchdog_timer);
del_timer_sync(&adapter->phy_info_timer);
@@ -521,7 +525,7 @@ void e1000_down(struct e1000_adapter *adapter)
e1000_clean_all_rx_rings(adapter);
}
-void e1000_reinit_safe(struct e1000_adapter *adapter)
+static void e1000_reinit_safe(struct e1000_adapter *adapter)
{
while (test_and_set_bit(__E1000_RESETTING, &adapter->flags))
msleep(1);
diff --git a/drivers/net/e1000e/82571.c b/drivers/net/e1000e/82571.c
index ca663f19d7df..7236f1a53ba0 100644
--- a/drivers/net/e1000e/82571.c
+++ b/drivers/net/e1000e/82571.c
@@ -52,6 +52,10 @@
(ID_LED_DEF1_DEF2))
#define E1000_GCR_L1_ACT_WITHOUT_L0S_RX 0x08000000
+#define E1000_BASE1000T_STATUS 10
+#define E1000_IDLE_ERROR_COUNT_MASK 0xFF
+#define E1000_RECEIVE_ERROR_COUNTER 21
+#define E1000_RECEIVE_ERROR_MAX 0xFFFF
#define E1000_NVM_INIT_CTRL2_MNGM 0x6000 /* Manageability Operation Mode mask */
@@ -1243,6 +1247,39 @@ static s32 e1000_led_on_82574(struct e1000_hw *hw)
}
/**
+ * e1000_check_phy_82574 - check 82574 phy hung state
+ * @hw: pointer to the HW structure
+ *
+ * Returns whether phy is hung or not
+ **/
+bool e1000_check_phy_82574(struct e1000_hw *hw)
+{
+ u16 status_1kbt = 0;
+ u16 receive_errors = 0;
+ bool phy_hung = false;
+ s32 ret_val = 0;
+
+ /*
+ * Read PHY Receive Error counter first, if its is max - all F's then
+ * read the Base1000T status register If both are max then PHY is hung.
+ */
+ ret_val = e1e_rphy(hw, E1000_RECEIVE_ERROR_COUNTER, &receive_errors);
+
+ if (ret_val)
+ goto out;
+ if (receive_errors == E1000_RECEIVE_ERROR_MAX) {
+ ret_val = e1e_rphy(hw, E1000_BASE1000T_STATUS, &status_1kbt);
+ if (ret_val)
+ goto out;
+ if ((status_1kbt & E1000_IDLE_ERROR_COUNT_MASK) ==
+ E1000_IDLE_ERROR_COUNT_MASK)
+ phy_hung = true;
+ }
+out:
+ return phy_hung;
+}
+
+/**
* e1000_setup_link_82571 - Setup flow control and link settings
* @hw: pointer to the HW structure
*
@@ -1859,6 +1896,7 @@ struct e1000_info e1000_82574_info = {
| FLAG_HAS_SMART_POWER_DOWN
| FLAG_HAS_AMT
| FLAG_HAS_CTRLEXT_ON_LOAD,
+ .flags2 = FLAG2_CHECK_PHY_HANG,
.pba = 36,
.max_hw_frame_size = DEFAULT_JUMBO,
.get_variants = e1000_get_variants_82571,
diff --git a/drivers/net/e1000e/e1000.h b/drivers/net/e1000e/e1000.h
index cee882dd67bf..fdc67fead4ea 100644
--- a/drivers/net/e1000e/e1000.h
+++ b/drivers/net/e1000e/e1000.h
@@ -397,6 +397,7 @@ struct e1000_adapter {
struct work_struct print_hang_task;
bool idle_check;
+ int phy_hang_count;
};
struct e1000_info {
@@ -454,6 +455,7 @@ struct e1000_info {
#define FLAG2_HAS_EEE (1 << 5)
#define FLAG2_DMA_BURST (1 << 6)
#define FLAG2_DISABLE_AIM (1 << 8)
+#define FLAG2_CHECK_PHY_HANG (1 << 9)
#define E1000_RX_DESC_PS(R, i) \
(&(((union e1000_rx_desc_packet_split *)((R).desc))[i]))
@@ -631,6 +633,7 @@ extern s32 e1000_get_phy_info_ife(struct e1000_hw *hw);
extern s32 e1000_check_polarity_ife(struct e1000_hw *hw);
extern s32 e1000_phy_force_speed_duplex_ife(struct e1000_hw *hw);
extern s32 e1000_check_polarity_igp(struct e1000_hw *hw);
+extern bool e1000_check_phy_82574(struct e1000_hw *hw);
static inline s32 e1000_phy_hw_reset(struct e1000_hw *hw)
{
diff --git a/drivers/net/e1000e/netdev.c b/drivers/net/e1000e/netdev.c
index ec8cf3f51423..c4ca1629f532 100644
--- a/drivers/net/e1000e/netdev.c
+++ b/drivers/net/e1000e/netdev.c
@@ -4098,6 +4098,25 @@ static void e1000e_enable_receives(struct e1000_adapter *adapter)
}
}
+static void e1000e_check_82574_phy_workaround(struct e1000_adapter *adapter)
+{
+ struct e1000_hw *hw = &adapter->hw;
+
+ /*
+ * With 82574 controllers, PHY needs to be checked periodically
+ * for hung state and reset, if two calls return true
+ */
+ if (e1000_check_phy_82574(hw))
+ adapter->phy_hang_count++;
+ else
+ adapter->phy_hang_count = 0;
+
+ if (adapter->phy_hang_count > 1) {
+ adapter->phy_hang_count = 0;
+ schedule_work(&adapter->reset_task);
+ }
+}
+
/**
* e1000_watchdog - Timer Call-back
* @data: pointer to adapter cast into an unsigned long
@@ -4333,6 +4352,9 @@ link_up:
if (e1000e_get_laa_state_82571(hw))
e1000e_rar_set(hw, adapter->hw.mac.addr, 0);
+ if (adapter->flags2 & FLAG2_CHECK_PHY_HANG)
+ e1000e_check_82574_phy_workaround(adapter);
+
/* Reset the timer */
if (!test_bit(__E1000_DOWN, &adapter->state))
mod_timer(&adapter->watchdog_timer,
@@ -4860,8 +4882,11 @@ static void e1000_reset_task(struct work_struct *work)
struct e1000_adapter *adapter;
adapter = container_of(work, struct e1000_adapter, reset_task);
- e1000e_dump(adapter);
- e_err("Reset adapter\n");
+ if (!((adapter->flags & FLAG_RX_NEEDS_RESTART) &&
+ (adapter->flags & FLAG_RX_RESTART_NOW))) {
+ e1000e_dump(adapter);
+ e_err("Reset adapter\n");
+ }
e1000e_reinit_locked(adapter);
}
diff --git a/drivers/net/ehea/ehea.h b/drivers/net/ehea/ehea.h
index 1321cb6401cf..8e745e74828d 100644
--- a/drivers/net/ehea/ehea.h
+++ b/drivers/net/ehea/ehea.h
@@ -396,7 +396,9 @@ struct ehea_port_res {
int swqe_ll_count;
u32 swqe_id_counter;
u64 tx_packets;
+ u64 tx_bytes;
u64 rx_packets;
+ u64 rx_bytes;
u32 poll_counter;
struct net_lro_mgr lro_mgr;
struct net_lro_desc lro_desc[MAX_LRO_DESCRIPTORS];
diff --git a/drivers/net/ehea/ehea_main.c b/drivers/net/ehea/ehea_main.c
index bb7d306fb446..3d0af08483a1 100644
--- a/drivers/net/ehea/ehea_main.c
+++ b/drivers/net/ehea/ehea_main.c
@@ -330,7 +330,7 @@ static struct net_device_stats *ehea_get_stats(struct net_device *dev)
struct ehea_port *port = netdev_priv(dev);
struct net_device_stats *stats = &port->stats;
struct hcp_ehea_port_cb2 *cb2;
- u64 hret, rx_packets, tx_packets;
+ u64 hret, rx_packets, tx_packets, rx_bytes = 0, tx_bytes = 0;
int i;
memset(stats, 0, sizeof(*stats));
@@ -353,18 +353,22 @@ static struct net_device_stats *ehea_get_stats(struct net_device *dev)
ehea_dump(cb2, sizeof(*cb2), "net_device_stats");
rx_packets = 0;
- for (i = 0; i < port->num_def_qps; i++)
+ for (i = 0; i < port->num_def_qps; i++) {
rx_packets += port->port_res[i].rx_packets;
+ rx_bytes += port->port_res[i].rx_bytes;
+ }
tx_packets = 0;
- for (i = 0; i < port->num_def_qps + port->num_add_tx_qps; i++)
+ for (i = 0; i < port->num_def_qps + port->num_add_tx_qps; i++) {
tx_packets += port->port_res[i].tx_packets;
+ tx_bytes += port->port_res[i].tx_bytes;
+ }
stats->tx_packets = tx_packets;
stats->multicast = cb2->rxmcp;
stats->rx_errors = cb2->rxuerr;
- stats->rx_bytes = cb2->rxo;
- stats->tx_bytes = cb2->txo;
+ stats->rx_bytes = rx_bytes;
+ stats->tx_bytes = tx_bytes;
stats->rx_packets = rx_packets;
out_herr:
@@ -396,6 +400,7 @@ static void ehea_refill_rq1(struct ehea_port_res *pr, int index, int nr_of_wqes)
skb_arr_rq1[index] = netdev_alloc_skb(dev,
EHEA_L_PKT_SIZE);
if (!skb_arr_rq1[index]) {
+ ehea_info("Unable to allocate enough skb in the array\n");
pr->rq1_skba.os_skbs = fill_wqes - i;
break;
}
@@ -418,13 +423,20 @@ static void ehea_init_fill_rq1(struct ehea_port_res *pr, int nr_rq1a)
struct net_device *dev = pr->port->netdev;
int i;
- for (i = 0; i < pr->rq1_skba.len; i++) {
+ if (nr_rq1a > pr->rq1_skba.len) {
+ ehea_error("NR_RQ1A bigger than skb array len\n");
+ return;
+ }
+
+ for (i = 0; i < nr_rq1a; i++) {
skb_arr_rq1[i] = netdev_alloc_skb(dev, EHEA_L_PKT_SIZE);
- if (!skb_arr_rq1[i])
+ if (!skb_arr_rq1[i]) {
+ ehea_info("No enough memory to allocate skb array\n");
break;
+ }
}
/* Ring doorbell */
- ehea_update_rq1a(pr->qp, nr_rq1a);
+ ehea_update_rq1a(pr->qp, i);
}
static int ehea_refill_rq_def(struct ehea_port_res *pr,
@@ -703,6 +715,7 @@ static int ehea_proc_rwqes(struct net_device *dev,
int skb_arr_rq2_len = pr->rq2_skba.len;
int skb_arr_rq3_len = pr->rq3_skba.len;
int processed, processed_rq1, processed_rq2, processed_rq3;
+ u64 processed_bytes = 0;
int wqe_index, last_wqe_index, rq, port_reset;
processed = processed_rq1 = processed_rq2 = processed_rq3 = 0;
@@ -730,8 +743,10 @@ static int ehea_proc_rwqes(struct net_device *dev,
skb = netdev_alloc_skb(dev,
EHEA_L_PKT_SIZE);
- if (!skb)
+ if (!skb) {
+ ehea_info("Not enough memory to allocate skb\n");
break;
+ }
}
skb_copy_to_linear_data(skb, ((char *)cqe) + 64,
cqe->num_bytes_transfered - 4);
@@ -760,6 +775,7 @@ static int ehea_proc_rwqes(struct net_device *dev,
processed_rq3++;
}
+ processed_bytes += skb->len;
ehea_proc_skb(pr, cqe, skb);
} else {
pr->p_stats.poll_receive_errors++;
@@ -775,6 +791,7 @@ static int ehea_proc_rwqes(struct net_device *dev,
lro_flush_all(&pr->lro_mgr);
pr->rx_packets += processed;
+ pr->rx_bytes += processed_bytes;
ehea_refill_rq1(pr, last_wqe_index, processed_rq1);
ehea_refill_rq2(pr, processed_rq2);
@@ -1509,9 +1526,20 @@ static int ehea_init_port_res(struct ehea_port *port, struct ehea_port_res *pr,
enum ehea_eq_type eq_type = EHEA_EQ;
struct ehea_qp_init_attr *init_attr = NULL;
int ret = -EIO;
+ u64 tx_bytes, rx_bytes, tx_packets, rx_packets;
+
+ tx_bytes = pr->tx_bytes;
+ tx_packets = pr->tx_packets;
+ rx_bytes = pr->rx_bytes;
+ rx_packets = pr->rx_packets;
memset(pr, 0, sizeof(struct ehea_port_res));
+ pr->tx_bytes = rx_bytes;
+ pr->tx_packets = tx_packets;
+ pr->rx_bytes = rx_bytes;
+ pr->rx_packets = rx_packets;
+
pr->port = port;
spin_lock_init(&pr->xmit_lock);
spin_lock_init(&pr->netif_queue);
@@ -2249,6 +2277,14 @@ static int ehea_start_xmit(struct sk_buff *skb, struct net_device *dev)
memset(swqe, 0, SWQE_HEADER_SIZE);
atomic_dec(&pr->swqe_avail);
+ if (vlan_tx_tag_present(skb)) {
+ swqe->tx_control |= EHEA_SWQE_VLAN_INSERT;
+ swqe->vlan_tag = vlan_tx_tag_get(skb);
+ }
+
+ pr->tx_packets++;
+ pr->tx_bytes += skb->len;
+
if (skb->len <= SWQE3_MAX_IMM) {
u32 sig_iv = port->sig_comp_iv;
u32 swqe_num = pr->swqe_id_counter;
@@ -2279,11 +2315,6 @@ static int ehea_start_xmit(struct sk_buff *skb, struct net_device *dev)
}
pr->swqe_id_counter += 1;
- if (vlan_tx_tag_present(skb)) {
- swqe->tx_control |= EHEA_SWQE_VLAN_INSERT;
- swqe->vlan_tag = vlan_tx_tag_get(skb);
- }
-
if (netif_msg_tx_queued(port)) {
ehea_info("post swqe on QP %d", pr->qp->init_attr.qp_nr);
ehea_dump(swqe, 512, "swqe");
@@ -2295,7 +2326,6 @@ static int ehea_start_xmit(struct sk_buff *skb, struct net_device *dev)
}
ehea_post_swqe(pr->qp, swqe);
- pr->tx_packets++;
if (unlikely(atomic_read(&pr->swqe_avail) <= 1)) {
spin_lock_irqsave(&pr->netif_queue, flags);
diff --git a/drivers/net/gianfar.c b/drivers/net/gianfar.c
index 4c4cc80ec0a1..d1bec6269173 100644
--- a/drivers/net/gianfar.c
+++ b/drivers/net/gianfar.c
@@ -577,11 +577,10 @@ static int gfar_parse_group(struct device_node *np,
irq_of_parse_and_map(np, 1);
priv->gfargrp[priv->num_grps].interruptError =
irq_of_parse_and_map(np,2);
- if (priv->gfargrp[priv->num_grps].interruptTransmit < 0 ||
- priv->gfargrp[priv->num_grps].interruptReceive < 0 ||
- priv->gfargrp[priv->num_grps].interruptError < 0) {
+ if (priv->gfargrp[priv->num_grps].interruptTransmit == NO_IRQ ||
+ priv->gfargrp[priv->num_grps].interruptReceive == NO_IRQ ||
+ priv->gfargrp[priv->num_grps].interruptError == NO_IRQ)
return -EINVAL;
- }
}
priv->gfargrp[priv->num_grps].grp_id = priv->num_grps;
@@ -2511,7 +2510,7 @@ static int gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue)
skb_recycle_check(skb, priv->rx_buffer_size +
RXBUF_ALIGNMENT)) {
gfar_align_skb(skb);
- __skb_queue_head(&priv->rx_recycle, skb);
+ skb_queue_head(&priv->rx_recycle, skb);
} else
dev_kfree_skb_any(skb);
@@ -2594,7 +2593,7 @@ struct sk_buff * gfar_new_skb(struct net_device *dev)
struct gfar_private *priv = netdev_priv(dev);
struct sk_buff *skb = NULL;
- skb = __skb_dequeue(&priv->rx_recycle);
+ skb = skb_dequeue(&priv->rx_recycle);
if (!skb)
skb = gfar_alloc_skb(dev);
@@ -2750,7 +2749,7 @@ int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit)
if (unlikely(!newskb))
newskb = skb;
else if (skb)
- __skb_queue_head(&priv->rx_recycle, skb);
+ skb_queue_head(&priv->rx_recycle, skb);
} else {
/* Increment the number of packets */
rx_queue->stats.rx_packets++;
diff --git a/drivers/net/gianfar_ethtool.c b/drivers/net/gianfar_ethtool.c
index 5c566ebc54b8..3bc8e276ba4d 100644
--- a/drivers/net/gianfar_ethtool.c
+++ b/drivers/net/gianfar_ethtool.c
@@ -635,9 +635,10 @@ static int gfar_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
if (wol->wolopts & ~WAKE_MAGIC)
return -EINVAL;
+ device_set_wakeup_enable(&dev->dev, wol->wolopts & WAKE_MAGIC);
+
spin_lock_irqsave(&priv->bflock, flags);
- priv->wol_en = wol->wolopts & WAKE_MAGIC ? 1 : 0;
- device_set_wakeup_enable(&dev->dev, priv->wol_en);
+ priv->wol_en = !!device_may_wakeup(&dev->dev);
spin_unlock_irqrestore(&priv->bflock, flags);
return 0;
diff --git a/drivers/net/ibm_newemac/core.c b/drivers/net/ibm_newemac/core.c
index 385dc3204cb7..06bb9b799458 100644
--- a/drivers/net/ibm_newemac/core.c
+++ b/drivers/net/ibm_newemac/core.c
@@ -2871,7 +2871,6 @@ static int __devinit emac_probe(struct platform_device *ofdev,
SET_ETHTOOL_OPS(ndev, &emac_ethtool_ops);
netif_carrier_off(ndev);
- netif_stop_queue(ndev);
err = register_netdev(ndev);
if (err) {
diff --git a/drivers/net/igb/igb_main.c b/drivers/net/igb/igb_main.c
index 14db09e2fa8b..892d196f17ac 100644
--- a/drivers/net/igb/igb_main.c
+++ b/drivers/net/igb/igb_main.c
@@ -4107,7 +4107,6 @@ static inline int igb_maybe_stop_tx(struct igb_ring *tx_ring, int size)
netdev_tx_t igb_xmit_frame_ring_adv(struct sk_buff *skb,
struct igb_ring *tx_ring)
{
- struct igb_adapter *adapter = netdev_priv(tx_ring->netdev);
int tso = 0, count;
u32 tx_flags = 0;
u16 first;
diff --git a/drivers/net/igbvf/netdev.c b/drivers/net/igbvf/netdev.c
index ebfaa68ee630..28af019c97bb 100644
--- a/drivers/net/igbvf/netdev.c
+++ b/drivers/net/igbvf/netdev.c
@@ -2783,15 +2783,15 @@ static int __devinit igbvf_probe(struct pci_dev *pdev,
/* reset the hardware with the new settings */
igbvf_reset(adapter);
- /* tell the stack to leave us alone until igbvf_open() is called */
- netif_carrier_off(netdev);
- netif_stop_queue(netdev);
-
strcpy(netdev->name, "eth%d");
err = register_netdev(netdev);
if (err)
goto err_hw_init;
+ /* tell the stack to leave us alone until igbvf_open() is called */
+ netif_carrier_off(netdev);
+ netif_stop_queue(netdev);
+
igbvf_print_device_info(adapter);
igbvf_initialize_last_counter_stats(adapter);
diff --git a/drivers/net/ipg.c b/drivers/net/ipg.c
index dc0198092343..aa93655c3aa7 100644
--- a/drivers/net/ipg.c
+++ b/drivers/net/ipg.c
@@ -88,16 +88,14 @@ static const char *ipg_brand_name[] = {
"IC PLUS IP1000 1000/100/10 based NIC",
"Sundance Technology ST2021 based NIC",
"Tamarack Microelectronics TC9020/9021 based NIC",
- "Tamarack Microelectronics TC9020/9021 based NIC",
"D-Link NIC IP1000A"
};
static DEFINE_PCI_DEVICE_TABLE(ipg_pci_tbl) = {
{ PCI_VDEVICE(SUNDANCE, 0x1023), 0 },
{ PCI_VDEVICE(SUNDANCE, 0x2021), 1 },
- { PCI_VDEVICE(SUNDANCE, 0x1021), 2 },
- { PCI_VDEVICE(DLINK, 0x9021), 3 },
- { PCI_VDEVICE(DLINK, 0x4020), 4 },
+ { PCI_VDEVICE(DLINK, 0x9021), 2 },
+ { PCI_VDEVICE(DLINK, 0x4020), 3 },
{ 0, }
};
diff --git a/drivers/net/irda/sh_sir.c b/drivers/net/irda/sh_sir.c
index 00b38bccd6d0..52a7c86af663 100644
--- a/drivers/net/irda/sh_sir.c
+++ b/drivers/net/irda/sh_sir.c
@@ -258,7 +258,7 @@ static int sh_sir_set_baudrate(struct sh_sir_self *self, u32 baudrate)
/* Baud Rate Error Correction x 10000 */
u32 rate_err_array[] = {
- 0000, 0625, 1250, 1875,
+ 0, 625, 1250, 1875,
2500, 3125, 3750, 4375,
5000, 5625, 6250, 6875,
7500, 8125, 8750, 9375,
diff --git a/drivers/net/ixgb/ixgb_main.c b/drivers/net/ixgb/ixgb_main.c
index 666207a9c039..caa8192fff2a 100644
--- a/drivers/net/ixgb/ixgb_main.c
+++ b/drivers/net/ixgb/ixgb_main.c
@@ -533,6 +533,7 @@ ixgb_remove(struct pci_dev *pdev)
pci_release_regions(pdev);
free_netdev(netdev);
+ pci_disable_device(pdev);
}
/**
diff --git a/drivers/net/ixgbe/ixgbe_dcb.c b/drivers/net/ixgbe/ixgbe_dcb.c
index 8bb9ddb6dffe..0d44c6470ca3 100644
--- a/drivers/net/ixgbe/ixgbe_dcb.c
+++ b/drivers/net/ixgbe/ixgbe_dcb.c
@@ -43,9 +43,12 @@
* ixgbe_dcb_check_config().
*/
s32 ixgbe_dcb_calculate_tc_credits(struct ixgbe_dcb_config *dcb_config,
- u8 direction)
+ int max_frame, u8 direction)
{
struct tc_bw_alloc *p;
+ int min_credit;
+ int min_multiplier;
+ int min_percent = 100;
s32 ret_val = 0;
/* Initialization values default for Tx settings */
u32 credit_refill = 0;
@@ -59,6 +62,31 @@ s32 ixgbe_dcb_calculate_tc_credits(struct ixgbe_dcb_config *dcb_config,
goto out;
}
+ min_credit = ((max_frame / 2) + DCB_CREDIT_QUANTUM - 1) /
+ DCB_CREDIT_QUANTUM;
+
+ /* Find smallest link percentage */
+ for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
+ p = &dcb_config->tc_config[i].path[direction];
+ bw_percent = dcb_config->bw_percentage[direction][p->bwg_id];
+ link_percentage = p->bwg_percent;
+
+ link_percentage = (link_percentage * bw_percent) / 100;
+
+ if (link_percentage && link_percentage < min_percent)
+ min_percent = link_percentage;
+ }
+
+ /*
+ * The ratio between traffic classes will control the bandwidth
+ * percentages seen on the wire. To calculate this ratio we use
+ * a multiplier. It is required that the refill credits must be
+ * larger than the max frame size so here we find the smallest
+ * multiplier that will allow all bandwidth percentages to be
+ * greater than the max frame size.
+ */
+ min_multiplier = (min_credit / min_percent) + 1;
+
/* Find out the link percentage for each TC first */
for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
p = &dcb_config->tc_config[i].path[direction];
@@ -73,8 +101,9 @@ s32 ixgbe_dcb_calculate_tc_credits(struct ixgbe_dcb_config *dcb_config,
/* Save link_percentage for reference */
p->link_percent = (u8)link_percentage;
- /* Calculate credit refill and save it */
- credit_refill = link_percentage * MINIMUM_CREDIT_REFILL;
+ /* Calculate credit refill ratio using multiplier */
+ credit_refill = min(link_percentage * min_multiplier,
+ MAX_CREDIT_REFILL);
p->data_credits_refill = (u16)credit_refill;
/* Calculate maximum credit for the TC */
@@ -85,8 +114,8 @@ s32 ixgbe_dcb_calculate_tc_credits(struct ixgbe_dcb_config *dcb_config,
* of a TC is too small, the maximum credit may not be
* enough to send out a jumbo frame in data plane arbitration.
*/
- if (credit_max && (credit_max < MINIMUM_CREDIT_FOR_JUMBO))
- credit_max = MINIMUM_CREDIT_FOR_JUMBO;
+ if (credit_max && (credit_max < min_credit))
+ credit_max = min_credit;
if (direction == DCB_TX_CONFIG) {
/*
diff --git a/drivers/net/ixgbe/ixgbe_dcb.h b/drivers/net/ixgbe/ixgbe_dcb.h
index eb1059f09da0..0208a87b129e 100644
--- a/drivers/net/ixgbe/ixgbe_dcb.h
+++ b/drivers/net/ixgbe/ixgbe_dcb.h
@@ -150,15 +150,14 @@ struct ixgbe_dcb_config {
/* DCB driver APIs */
/* DCB credits calculation */
-s32 ixgbe_dcb_calculate_tc_credits(struct ixgbe_dcb_config *, u8);
+s32 ixgbe_dcb_calculate_tc_credits(struct ixgbe_dcb_config *, int, u8);
/* DCB hw initialization */
s32 ixgbe_dcb_hw_config(struct ixgbe_hw *, struct ixgbe_dcb_config *);
/* DCB definitions for credit calculation */
+#define DCB_CREDIT_QUANTUM 64 /* DCB Quantum */
#define MAX_CREDIT_REFILL 511 /* 0x1FF * 64B = 32704B */
-#define MINIMUM_CREDIT_REFILL 5 /* 5*64B = 320B */
-#define MINIMUM_CREDIT_FOR_JUMBO 145 /* 145= UpperBound((9*1024+54)/64B) for 9KB jumbo frame */
#define DCB_MAX_TSO_SIZE (32*1024) /* MAX TSO packet size supported in DCB mode */
#define MINIMUM_CREDIT_FOR_TSO (DCB_MAX_TSO_SIZE/64 + 1) /* 513 for 32KB TSO packet */
#define MAX_CREDIT 4095 /* Maximum credit supported: 256KB * 1204 / 64B */
diff --git a/drivers/net/ixgbe/ixgbe_dcb_82599.c b/drivers/net/ixgbe/ixgbe_dcb_82599.c
index 67c219f86c3a..05f224715073 100644
--- a/drivers/net/ixgbe/ixgbe_dcb_82599.c
+++ b/drivers/net/ixgbe/ixgbe_dcb_82599.c
@@ -397,6 +397,11 @@ static s32 ixgbe_dcb_config_82599(struct ixgbe_hw *hw)
reg &= ~IXGBE_RTTDCS_ARBDIS;
IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, reg);
+ /* Enable Security TX Buffer IFG for DCB */
+ reg = IXGBE_READ_REG(hw, IXGBE_SECTXMINIFG);
+ reg |= IXGBE_SECTX_DCB;
+ IXGBE_WRITE_REG(hw, IXGBE_SECTXMINIFG, reg);
+
return 0;
}
diff --git a/drivers/net/ixgbe/ixgbe_dcb_82599.h b/drivers/net/ixgbe/ixgbe_dcb_82599.h
index 18d7fbf6c292..3841649fb954 100644
--- a/drivers/net/ixgbe/ixgbe_dcb_82599.h
+++ b/drivers/net/ixgbe/ixgbe_dcb_82599.h
@@ -95,6 +95,9 @@
#define IXGBE_TXPBTHRESH_DCB 0xA /* THRESH value for DCB mode */
+/* SECTXMINIFG DCB */
+#define IXGBE_SECTX_DCB 0x00001F00 /* DCB TX Buffer IFG */
+
/* DCB hardware-specific driver APIs */
diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c
index f85631263af8..fbad4d819608 100644
--- a/drivers/net/ixgbe/ixgbe_main.c
+++ b/drivers/net/ixgbe/ixgbe_main.c
@@ -764,8 +764,9 @@ static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector,
#ifdef IXGBE_FCOE
/* adjust for FCoE Sequence Offload */
if ((adapter->flags & IXGBE_FLAG_FCOE_ENABLED)
- && (skb->protocol == htons(ETH_P_FCOE)) &&
- skb_is_gso(skb)) {
+ && skb_is_gso(skb)
+ && vlan_get_protocol(skb) ==
+ htons(ETH_P_FCOE)) {
hlen = skb_transport_offset(skb) +
sizeof(struct fc_frame_header) +
sizeof(struct fcoe_crc_eof);
@@ -3347,6 +3348,7 @@ static void ixgbe_napi_disable_all(struct ixgbe_adapter *adapter)
static void ixgbe_configure_dcb(struct ixgbe_adapter *adapter)
{
struct ixgbe_hw *hw = &adapter->hw;
+ int max_frame = adapter->netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
u32 txdctl;
int i, j;
@@ -3359,8 +3361,15 @@ static void ixgbe_configure_dcb(struct ixgbe_adapter *adapter)
if (hw->mac.type == ixgbe_mac_82598EB)
netif_set_gso_max_size(adapter->netdev, 32768);
- ixgbe_dcb_calculate_tc_credits(&adapter->dcb_cfg, DCB_TX_CONFIG);
- ixgbe_dcb_calculate_tc_credits(&adapter->dcb_cfg, DCB_RX_CONFIG);
+#ifdef CONFIG_FCOE
+ if (adapter->netdev->features & NETIF_F_FCOE_MTU)
+ max_frame = max(max_frame, IXGBE_FCOE_JUMBO_FRAME_SIZE);
+#endif
+
+ ixgbe_dcb_calculate_tc_credits(&adapter->dcb_cfg, max_frame,
+ DCB_TX_CONFIG);
+ ixgbe_dcb_calculate_tc_credits(&adapter->dcb_cfg, max_frame,
+ DCB_RX_CONFIG);
/* reconfigure the hardware */
ixgbe_dcb_hw_config(&adapter->hw, &adapter->dcb_cfg);
@@ -5815,7 +5824,7 @@ static void ixgbe_watchdog_task(struct work_struct *work)
static int ixgbe_tso(struct ixgbe_adapter *adapter,
struct ixgbe_ring *tx_ring, struct sk_buff *skb,
- u32 tx_flags, u8 *hdr_len)
+ u32 tx_flags, u8 *hdr_len, __be16 protocol)
{
struct ixgbe_adv_tx_context_desc *context_desc;
unsigned int i;
@@ -5833,7 +5842,7 @@ static int ixgbe_tso(struct ixgbe_adapter *adapter,
l4len = tcp_hdrlen(skb);
*hdr_len += l4len;
- if (skb->protocol == htons(ETH_P_IP)) {
+ if (protocol == htons(ETH_P_IP)) {
struct iphdr *iph = ip_hdr(skb);
iph->tot_len = 0;
iph->check = 0;
@@ -5872,7 +5881,7 @@ static int ixgbe_tso(struct ixgbe_adapter *adapter,
type_tucmd_mlhl = (IXGBE_TXD_CMD_DEXT |
IXGBE_ADVTXD_DTYP_CTXT);
- if (skb->protocol == htons(ETH_P_IP))
+ if (protocol == htons(ETH_P_IP))
type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV4;
type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_TCP;
context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd_mlhl);
@@ -5898,16 +5907,10 @@ static int ixgbe_tso(struct ixgbe_adapter *adapter,
return false;
}
-static u32 ixgbe_psum(struct ixgbe_adapter *adapter, struct sk_buff *skb)
+static u32 ixgbe_psum(struct ixgbe_adapter *adapter, struct sk_buff *skb,
+ __be16 protocol)
{
u32 rtn = 0;
- __be16 protocol;
-
- if (skb->protocol == cpu_to_be16(ETH_P_8021Q))
- protocol = ((const struct vlan_ethhdr *)skb->data)->
- h_vlan_encapsulated_proto;
- else
- protocol = skb->protocol;
switch (protocol) {
case cpu_to_be16(ETH_P_IP):
@@ -5935,7 +5938,7 @@ static u32 ixgbe_psum(struct ixgbe_adapter *adapter, struct sk_buff *skb)
default:
if (unlikely(net_ratelimit()))
e_warn(probe, "partial checksum but proto=%x!\n",
- skb->protocol);
+ protocol);
break;
}
@@ -5944,7 +5947,8 @@ static u32 ixgbe_psum(struct ixgbe_adapter *adapter, struct sk_buff *skb)
static bool ixgbe_tx_csum(struct ixgbe_adapter *adapter,
struct ixgbe_ring *tx_ring,
- struct sk_buff *skb, u32 tx_flags)
+ struct sk_buff *skb, u32 tx_flags,
+ __be16 protocol)
{
struct ixgbe_adv_tx_context_desc *context_desc;
unsigned int i;
@@ -5973,7 +5977,7 @@ static bool ixgbe_tx_csum(struct ixgbe_adapter *adapter,
IXGBE_ADVTXD_DTYP_CTXT);
if (skb->ip_summed == CHECKSUM_PARTIAL)
- type_tucmd_mlhl |= ixgbe_psum(adapter, skb);
+ type_tucmd_mlhl |= ixgbe_psum(adapter, skb, protocol);
context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd_mlhl);
/* use index zero for tx checksum offload */
@@ -6171,7 +6175,7 @@ static void ixgbe_tx_queue(struct ixgbe_adapter *adapter,
}
static void ixgbe_atr(struct ixgbe_adapter *adapter, struct sk_buff *skb,
- int queue, u32 tx_flags)
+ int queue, u32 tx_flags, __be16 protocol)
{
struct ixgbe_atr_input atr_input;
struct tcphdr *th;
@@ -6182,7 +6186,7 @@ static void ixgbe_atr(struct ixgbe_adapter *adapter, struct sk_buff *skb,
u8 l4type = 0;
/* Right now, we support IPv4 only */
- if (skb->protocol != htons(ETH_P_IP))
+ if (protocol != htons(ETH_P_IP))
return;
/* check if we're UDP or TCP */
if (iph->protocol == IPPROTO_TCP) {
@@ -6249,10 +6253,13 @@ static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb)
{
struct ixgbe_adapter *adapter = netdev_priv(dev);
int txq = smp_processor_id();
-
#ifdef IXGBE_FCOE
- if ((skb->protocol == htons(ETH_P_FCOE)) ||
- (skb->protocol == htons(ETH_P_FIP))) {
+ __be16 protocol;
+
+ protocol = vlan_get_protocol(skb);
+
+ if ((protocol == htons(ETH_P_FCOE)) ||
+ (protocol == htons(ETH_P_FIP))) {
if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) {
txq &= (adapter->ring_feature[RING_F_FCOE].indices - 1);
txq += adapter->ring_feature[RING_F_FCOE].mask;
@@ -6295,6 +6302,9 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb, struct net_device *netdev
int tso;
int count = 0;
unsigned int f;
+ __be16 protocol;
+
+ protocol = vlan_get_protocol(skb);
if (vlan_tx_tag_present(skb)) {
tx_flags |= vlan_tx_tag_get(skb);
@@ -6315,8 +6325,8 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb, struct net_device *netdev
/* for FCoE with DCB, we force the priority to what
* was specified by the switch */
if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED &&
- (skb->protocol == htons(ETH_P_FCOE) ||
- skb->protocol == htons(ETH_P_FIP))) {
+ (protocol == htons(ETH_P_FCOE) ||
+ protocol == htons(ETH_P_FIP))) {
#ifdef CONFIG_IXGBE_DCB
if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
tx_flags &= ~(IXGBE_TX_FLAGS_VLAN_PRIO_MASK
@@ -6326,7 +6336,7 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb, struct net_device *netdev
}
#endif
/* flag for FCoE offloads */
- if (skb->protocol == htons(ETH_P_FCOE))
+ if (protocol == htons(ETH_P_FCOE))
tx_flags |= IXGBE_TX_FLAGS_FCOE;
}
#endif
@@ -6360,9 +6370,10 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb, struct net_device *netdev
tx_flags |= IXGBE_TX_FLAGS_FSO;
#endif /* IXGBE_FCOE */
} else {
- if (skb->protocol == htons(ETH_P_IP))
+ if (protocol == htons(ETH_P_IP))
tx_flags |= IXGBE_TX_FLAGS_IPV4;
- tso = ixgbe_tso(adapter, tx_ring, skb, tx_flags, &hdr_len);
+ tso = ixgbe_tso(adapter, tx_ring, skb, tx_flags, &hdr_len,
+ protocol);
if (tso < 0) {
dev_kfree_skb_any(skb);
return NETDEV_TX_OK;
@@ -6370,7 +6381,8 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb, struct net_device *netdev
if (tso)
tx_flags |= IXGBE_TX_FLAGS_TSO;
- else if (ixgbe_tx_csum(adapter, tx_ring, skb, tx_flags) &&
+ else if (ixgbe_tx_csum(adapter, tx_ring, skb, tx_flags,
+ protocol) &&
(skb->ip_summed == CHECKSUM_PARTIAL))
tx_flags |= IXGBE_TX_FLAGS_CSUM;
}
@@ -6384,7 +6396,7 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb, struct net_device *netdev
test_bit(__IXGBE_FDIR_INIT_DONE,
&tx_ring->reinit_state)) {
ixgbe_atr(adapter, skb, tx_ring->queue_index,
- tx_flags);
+ tx_flags, protocol);
tx_ring->atr_count = 0;
}
}
diff --git a/drivers/net/jme.c b/drivers/net/jme.c
index d7a975ee2add..c57d9a43ceca 100644
--- a/drivers/net/jme.c
+++ b/drivers/net/jme.c
@@ -1623,12 +1623,12 @@ err_out:
return rc;
}
-#ifdef CONFIG_PM
static void
jme_set_100m_half(struct jme_adapter *jme)
{
u32 bmcr, tmp;
+ jme_phy_on(jme);
bmcr = jme_mdio_read(jme->dev, jme->mii_if.phy_id, MII_BMCR);
tmp = bmcr & ~(BMCR_ANENABLE | BMCR_SPEED100 |
BMCR_SPEED1000 | BMCR_FULLDPLX);
@@ -1656,7 +1656,6 @@ jme_wait_link(struct jme_adapter *jme)
phylink = jme_linkstat_from_phy(jme);
}
}
-#endif
static inline void
jme_phy_off(struct jme_adapter *jme)
@@ -1664,6 +1663,21 @@ jme_phy_off(struct jme_adapter *jme)
jme_mdio_write(jme->dev, jme->mii_if.phy_id, MII_BMCR, BMCR_PDOWN);
}
+static void
+jme_powersave_phy(struct jme_adapter *jme)
+{
+ if (jme->reg_pmcs) {
+ jme_set_100m_half(jme);
+
+ if (jme->reg_pmcs & (PMCS_LFEN | PMCS_LREN))
+ jme_wait_link(jme);
+
+ jwrite32(jme, JME_PMCS, jme->reg_pmcs);
+ } else {
+ jme_phy_off(jme);
+ }
+}
+
static int
jme_close(struct net_device *netdev)
{
@@ -2941,11 +2955,7 @@ jme_init_one(struct pci_dev *pdev,
* Tell stack that we are not ready to work until open()
*/
netif_carrier_off(netdev);
- netif_stop_queue(netdev);
- /*
- * Register netdev
- */
rc = register_netdev(netdev);
if (rc) {
pr_err("Cannot register net device\n");
@@ -2991,6 +3001,16 @@ jme_remove_one(struct pci_dev *pdev)
}
+static void
+jme_shutdown(struct pci_dev *pdev)
+{
+ struct net_device *netdev = pci_get_drvdata(pdev);
+ struct jme_adapter *jme = netdev_priv(netdev);
+
+ jme_powersave_phy(jme);
+ pci_pme_active(pdev, true);
+}
+
#ifdef CONFIG_PM
static int
jme_suspend(struct pci_dev *pdev, pm_message_t state)
@@ -3028,19 +3048,9 @@ jme_suspend(struct pci_dev *pdev, pm_message_t state)
tasklet_hi_enable(&jme->rxempty_task);
pci_save_state(pdev);
- if (jme->reg_pmcs) {
- jme_set_100m_half(jme);
-
- if (jme->reg_pmcs & (PMCS_LFEN | PMCS_LREN))
- jme_wait_link(jme);
-
- jwrite32(jme, JME_PMCS, jme->reg_pmcs);
-
- pci_enable_wake(pdev, PCI_D3cold, true);
- } else {
- jme_phy_off(jme);
- }
- pci_set_power_state(pdev, PCI_D3cold);
+ jme_powersave_phy(jme);
+ pci_enable_wake(jme->pdev, PCI_D3hot, true);
+ pci_set_power_state(pdev, PCI_D3hot);
return 0;
}
@@ -3087,6 +3097,7 @@ static struct pci_driver jme_driver = {
.suspend = jme_suspend,
.resume = jme_resume,
#endif /* CONFIG_PM */
+ .shutdown = jme_shutdown,
};
static int __init
diff --git a/drivers/net/lib8390.c b/drivers/net/lib8390.c
index 316bb70775b1..e7030ceb178b 100644
--- a/drivers/net/lib8390.c
+++ b/drivers/net/lib8390.c
@@ -1077,7 +1077,6 @@ static void __NS8390_init(struct net_device *dev, int startp)
ei_outb_p(ei_local->rx_start_page, e8390_base + EN1_CURPAG);
ei_outb_p(E8390_NODMA+E8390_PAGE0+E8390_STOP, e8390_base+E8390_CMD);
- netif_start_queue(dev);
ei_local->tx1 = ei_local->tx2 = 0;
ei_local->txing = 0;
diff --git a/drivers/net/macb.c b/drivers/net/macb.c
index 4297f6e8c4bc..f69e73e2191e 100644
--- a/drivers/net/macb.c
+++ b/drivers/net/macb.c
@@ -515,14 +515,15 @@ static int macb_poll(struct napi_struct *napi, int budget)
(unsigned long)status, budget);
work_done = macb_rx(bp, budget);
- if (work_done < budget)
+ if (work_done < budget) {
napi_complete(napi);
- /*
- * We've done what we can to clean the buffers. Make sure we
- * get notified when new packets arrive.
- */
- macb_writel(bp, IER, MACB_RX_INT_FLAGS);
+ /*
+ * We've done what we can to clean the buffers. Make sure we
+ * get notified when new packets arrive.
+ */
+ macb_writel(bp, IER, MACB_RX_INT_FLAGS);
+ }
/* TODO: Handle errors */
@@ -550,12 +551,16 @@ static irqreturn_t macb_interrupt(int irq, void *dev_id)
}
if (status & MACB_RX_INT_FLAGS) {
+ /*
+ * There's no point taking any more interrupts
+ * until we have processed the buffers. The
+ * scheduling call may fail if the poll routine
+ * is already scheduled, so disable interrupts
+ * now.
+ */
+ macb_writel(bp, IDR, MACB_RX_INT_FLAGS);
+
if (napi_schedule_prep(&bp->napi)) {
- /*
- * There's no point taking any more interrupts
- * until we have processed the buffers
- */
- macb_writel(bp, IDR, MACB_RX_INT_FLAGS);
dev_dbg(&bp->pdev->dev,
"scheduling RX softirq\n");
__napi_schedule(&bp->napi);
diff --git a/drivers/net/mlx4/en_main.c b/drivers/net/mlx4/en_main.c
index 143906417048..f6e0d40cd876 100644
--- a/drivers/net/mlx4/en_main.c
+++ b/drivers/net/mlx4/en_main.c
@@ -124,6 +124,13 @@ static int mlx4_en_get_profile(struct mlx4_en_dev *mdev)
return 0;
}
+static void *mlx4_en_get_netdev(struct mlx4_dev *dev, void *ctx, u8 port)
+{
+ struct mlx4_en_dev *endev = ctx;
+
+ return endev->pndev[port];
+}
+
static void mlx4_en_event(struct mlx4_dev *dev, void *endev_ptr,
enum mlx4_dev_event event, int port)
{
@@ -282,9 +289,11 @@ err_free_res:
}
static struct mlx4_interface mlx4_en_interface = {
- .add = mlx4_en_add,
- .remove = mlx4_en_remove,
- .event = mlx4_en_event,
+ .add = mlx4_en_add,
+ .remove = mlx4_en_remove,
+ .event = mlx4_en_event,
+ .get_dev = mlx4_en_get_netdev,
+ .protocol = MLX4_PROTOCOL_EN,
};
static int __init mlx4_en_init(void)
diff --git a/drivers/net/mlx4/en_netdev.c b/drivers/net/mlx4/en_netdev.c
index 79478bd4211a..6d6806b361e3 100644
--- a/drivers/net/mlx4/en_netdev.c
+++ b/drivers/net/mlx4/en_netdev.c
@@ -69,6 +69,7 @@ static void mlx4_en_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
struct mlx4_en_priv *priv = netdev_priv(dev);
struct mlx4_en_dev *mdev = priv->mdev;
int err;
+ int idx;
if (!priv->vlgrp)
return;
@@ -83,7 +84,10 @@ static void mlx4_en_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
if (err)
en_err(priv, "Failed configuring VLAN filter\n");
}
+ if (mlx4_register_vlan(mdev->dev, priv->port, vid, &idx))
+ en_err(priv, "failed adding vlan %d\n", vid);
mutex_unlock(&mdev->state_lock);
+
}
static void mlx4_en_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
@@ -91,6 +95,7 @@ static void mlx4_en_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
struct mlx4_en_priv *priv = netdev_priv(dev);
struct mlx4_en_dev *mdev = priv->mdev;
int err;
+ int idx;
if (!priv->vlgrp)
return;
@@ -101,6 +106,11 @@ static void mlx4_en_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
/* Remove VID from port VLAN filter */
mutex_lock(&mdev->state_lock);
+ if (!mlx4_find_cached_vlan(mdev->dev, priv->port, vid, &idx))
+ mlx4_unregister_vlan(mdev->dev, priv->port, idx);
+ else
+ en_err(priv, "could not find vid %d in cache\n", vid);
+
if (mdev->device_up && priv->port_up) {
err = mlx4_SET_VLAN_FLTR(mdev->dev, priv->port, priv->vlgrp);
if (err)
diff --git a/drivers/net/mlx4/en_port.c b/drivers/net/mlx4/en_port.c
index aa3ef2aee5bf..7f5a3221e0c1 100644
--- a/drivers/net/mlx4/en_port.c
+++ b/drivers/net/mlx4/en_port.c
@@ -127,8 +127,8 @@ int mlx4_SET_PORT_qpn_calc(struct mlx4_dev *dev, u8 port, u32 base_qpn,
memset(context, 0, sizeof *context);
context->base_qpn = cpu_to_be32(base_qpn);
- context->promisc = cpu_to_be32(promisc << SET_PORT_PROMISC_SHIFT | base_qpn);
- context->mcast = cpu_to_be32(1 << SET_PORT_PROMISC_SHIFT | base_qpn);
+ context->promisc = cpu_to_be32(promisc << SET_PORT_PROMISC_EN_SHIFT | base_qpn);
+ context->mcast = cpu_to_be32(1 << SET_PORT_PROMISC_MODE_SHIFT | base_qpn);
context->intra_no_vlan = 0;
context->no_vlan = MLX4_NO_VLAN_IDX;
context->intra_vlan_miss = 0;
diff --git a/drivers/net/mlx4/en_port.h b/drivers/net/mlx4/en_port.h
index f6511aa2b7df..092e814b1981 100644
--- a/drivers/net/mlx4/en_port.h
+++ b/drivers/net/mlx4/en_port.h
@@ -36,7 +36,8 @@
#define SET_PORT_GEN_ALL_VALID 0x7
-#define SET_PORT_PROMISC_SHIFT 31
+#define SET_PORT_PROMISC_EN_SHIFT 31
+#define SET_PORT_PROMISC_MODE_SHIFT 30
enum {
MLX4_CMD_SET_VLAN_FLTR = 0x47,
diff --git a/drivers/net/mlx4/fw.c b/drivers/net/mlx4/fw.c
index b716e1a1b298..7a7e18ba278a 100644
--- a/drivers/net/mlx4/fw.c
+++ b/drivers/net/mlx4/fw.c
@@ -98,7 +98,8 @@ static void dump_dev_cap_flags(struct mlx4_dev *dev, u32 flags)
[20] = "Address vector port checking support",
[21] = "UD multicast support",
[24] = "Demand paging support",
- [25] = "Router support"
+ [25] = "Router support",
+ [30] = "IBoE support"
};
int i;
@@ -288,6 +289,10 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
MLX4_GET(field, outbox, QUERY_DEV_CAP_LOG_BF_REG_SZ_OFFSET);
dev_cap->bf_reg_size = 1 << (field & 0x1f);
MLX4_GET(field, outbox, QUERY_DEV_CAP_LOG_MAX_BF_REGS_PER_PAGE_OFFSET);
+ if ((1 << (field & 0x3f)) > (PAGE_SIZE / dev_cap->bf_reg_size)) {
+ mlx4_warn(dev, "firmware bug: log2 # of blue flame regs is invalid (%d), forcing 3\n", field & 0x1f);
+ field = 3;
+ }
dev_cap->bf_regs_per_page = 1 << (field & 0x3f);
mlx4_dbg(dev, "BlueFlame available (reg size %d, regs/page %d)\n",
dev_cap->bf_reg_size, dev_cap->bf_regs_per_page);
diff --git a/drivers/net/mlx4/icm.c b/drivers/net/mlx4/icm.c
index b07e4dee80aa..02393fdf44c1 100644
--- a/drivers/net/mlx4/icm.c
+++ b/drivers/net/mlx4/icm.c
@@ -210,38 +210,12 @@ static int mlx4_MAP_ICM(struct mlx4_dev *dev, struct mlx4_icm *icm, u64 virt)
return mlx4_map_cmd(dev, MLX4_CMD_MAP_ICM, icm, virt);
}
-int mlx4_UNMAP_ICM(struct mlx4_dev *dev, u64 virt, u32 page_count)
+static int mlx4_UNMAP_ICM(struct mlx4_dev *dev, u64 virt, u32 page_count)
{
return mlx4_cmd(dev, virt, page_count, 0, MLX4_CMD_UNMAP_ICM,
MLX4_CMD_TIME_CLASS_B);
}
-int mlx4_MAP_ICM_page(struct mlx4_dev *dev, u64 dma_addr, u64 virt)
-{
- struct mlx4_cmd_mailbox *mailbox;
- __be64 *inbox;
- int err;
-
- mailbox = mlx4_alloc_cmd_mailbox(dev);
- if (IS_ERR(mailbox))
- return PTR_ERR(mailbox);
- inbox = mailbox->buf;
-
- inbox[0] = cpu_to_be64(virt);
- inbox[1] = cpu_to_be64(dma_addr);
-
- err = mlx4_cmd(dev, mailbox->dma, 1, 0, MLX4_CMD_MAP_ICM,
- MLX4_CMD_TIME_CLASS_B);
-
- mlx4_free_cmd_mailbox(dev, mailbox);
-
- if (!err)
- mlx4_dbg(dev, "Mapped page at %llx to %llx for ICM.\n",
- (unsigned long long) dma_addr, (unsigned long long) virt);
-
- return err;
-}
-
int mlx4_MAP_ICM_AUX(struct mlx4_dev *dev, struct mlx4_icm *icm)
{
return mlx4_map_cmd(dev, MLX4_CMD_MAP_ICM_AUX, icm, -1);
diff --git a/drivers/net/mlx4/icm.h b/drivers/net/mlx4/icm.h
index ab56a2f89b65..b10c07a1dc1a 100644
--- a/drivers/net/mlx4/icm.h
+++ b/drivers/net/mlx4/icm.h
@@ -128,8 +128,6 @@ static inline unsigned long mlx4_icm_size(struct mlx4_icm_iter *iter)
return sg_dma_len(&iter->chunk->mem[iter->page_idx]);
}
-int mlx4_UNMAP_ICM(struct mlx4_dev *dev, u64 virt, u32 page_count);
-int mlx4_MAP_ICM_page(struct mlx4_dev *dev, u64 dma_addr, u64 virt);
int mlx4_MAP_ICM_AUX(struct mlx4_dev *dev, struct mlx4_icm *icm);
int mlx4_UNMAP_ICM_AUX(struct mlx4_dev *dev);
diff --git a/drivers/net/mlx4/intf.c b/drivers/net/mlx4/intf.c
index 555067802751..73c94fcdfddf 100644
--- a/drivers/net/mlx4/intf.c
+++ b/drivers/net/mlx4/intf.c
@@ -161,3 +161,24 @@ void mlx4_unregister_device(struct mlx4_dev *dev)
mutex_unlock(&intf_mutex);
}
+
+void *mlx4_get_protocol_dev(struct mlx4_dev *dev, enum mlx4_protocol proto, int port)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ struct mlx4_device_context *dev_ctx;
+ unsigned long flags;
+ void *result = NULL;
+
+ spin_lock_irqsave(&priv->ctx_lock, flags);
+
+ list_for_each_entry(dev_ctx, &priv->ctx_list, list)
+ if (dev_ctx->intf->protocol == proto && dev_ctx->intf->get_dev) {
+ result = dev_ctx->intf->get_dev(dev, dev_ctx->context, port);
+ break;
+ }
+
+ spin_unlock_irqrestore(&priv->ctx_lock, flags);
+
+ return result;
+}
+EXPORT_SYMBOL_GPL(mlx4_get_protocol_dev);
diff --git a/drivers/net/mlx4/main.c b/drivers/net/mlx4/main.c
index 569fa3df381f..782f11d8fa71 100644
--- a/drivers/net/mlx4/main.c
+++ b/drivers/net/mlx4/main.c
@@ -103,7 +103,7 @@ MODULE_PARM_DESC(use_prio, "Enable steering by VLAN priority on ETH ports "
static int log_mtts_per_seg = ilog2(MLX4_MTT_ENTRY_PER_SEG);
module_param_named(log_mtts_per_seg, log_mtts_per_seg, int, 0444);
-MODULE_PARM_DESC(log_mtts_per_seg, "Log2 number of MTT entries per segment (1-5)");
+MODULE_PARM_DESC(log_mtts_per_seg, "Log2 number of MTT entries per segment (1-7)");
int mlx4_check_port_params(struct mlx4_dev *dev,
enum mlx4_port_type *port_type)
@@ -1310,7 +1310,7 @@ static int __init mlx4_verify_params(void)
return -1;
}
- if ((log_mtts_per_seg < 1) || (log_mtts_per_seg > 5)) {
+ if ((log_mtts_per_seg < 1) || (log_mtts_per_seg > 7)) {
pr_warning("mlx4_core: bad log_mtts_per_seg: %d\n", log_mtts_per_seg);
return -1;
}
diff --git a/drivers/net/mlx4/mlx4_en.h b/drivers/net/mlx4/mlx4_en.h
index 1fc16ab7ad2f..dfed6a07c2d7 100644
--- a/drivers/net/mlx4/mlx4_en.h
+++ b/drivers/net/mlx4/mlx4_en.h
@@ -475,6 +475,7 @@ struct mlx4_en_priv {
char *mc_addrs;
int mc_addrs_cnt;
struct mlx4_en_stat_out_mbox hw_stats;
+ int vids[128];
};
diff --git a/drivers/net/mlx4/port.c b/drivers/net/mlx4/port.c
index 606aa58afdea..451339559bdc 100644
--- a/drivers/net/mlx4/port.c
+++ b/drivers/net/mlx4/port.c
@@ -111,6 +111,12 @@ int mlx4_register_mac(struct mlx4_dev *dev, u8 port, u64 mac, int *index)
goto out;
}
}
+
+ if (free < 0) {
+ err = -ENOMEM;
+ goto out;
+ }
+
mlx4_dbg(dev, "Free MAC index is %d\n", free);
if (table->total == table->max) {
@@ -182,6 +188,25 @@ static int mlx4_set_port_vlan_table(struct mlx4_dev *dev, u8 port,
return err;
}
+int mlx4_find_cached_vlan(struct mlx4_dev *dev, u8 port, u16 vid, int *idx)
+{
+ struct mlx4_vlan_table *table = &mlx4_priv(dev)->port[port].vlan_table;
+ int i;
+
+ for (i = 0; i < MLX4_MAX_VLAN_NUM; ++i) {
+ if (table->refs[i] &&
+ (vid == (MLX4_VLAN_MASK &
+ be32_to_cpu(table->entries[i])))) {
+ /* VLAN already registered, increase reference count */
+ *idx = i;
+ return 0;
+ }
+ }
+
+ return -ENOENT;
+}
+EXPORT_SYMBOL_GPL(mlx4_find_cached_vlan);
+
int mlx4_register_vlan(struct mlx4_dev *dev, u8 port, u16 vlan, int *index)
{
struct mlx4_vlan_table *table = &mlx4_priv(dev)->port[port].vlan_table;
@@ -205,6 +230,11 @@ int mlx4_register_vlan(struct mlx4_dev *dev, u8 port, u16 vlan, int *index)
}
}
+ if (free < 0) {
+ err = -ENOMEM;
+ goto out;
+ }
+
if (table->total == table->max) {
/* No free vlan entries */
err = -ENOSPC;
diff --git a/drivers/net/netxen/netxen_nic_ctx.c b/drivers/net/netxen/netxen_nic_ctx.c
index 12612127a087..f7d06cbc70ae 100644
--- a/drivers/net/netxen/netxen_nic_ctx.c
+++ b/drivers/net/netxen/netxen_nic_ctx.c
@@ -255,19 +255,6 @@ out_free_rq:
}
static void
-nx_fw_cmd_reset_ctx(struct netxen_adapter *adapter)
-{
-
- netxen_issue_cmd(adapter, adapter->ahw.pci_func, NXHAL_VERSION,
- adapter->ahw.pci_func, NX_DESTROY_CTX_RESET, 0,
- NX_CDRP_CMD_DESTROY_RX_CTX);
-
- netxen_issue_cmd(adapter, adapter->ahw.pci_func, NXHAL_VERSION,
- adapter->ahw.pci_func, NX_DESTROY_CTX_RESET, 0,
- NX_CDRP_CMD_DESTROY_TX_CTX);
-}
-
-static void
nx_fw_cmd_destroy_rx_ctx(struct netxen_adapter *adapter)
{
struct netxen_recv_context *recv_ctx = &adapter->recv_ctx;
@@ -698,8 +685,6 @@ int netxen_alloc_hw_resources(struct netxen_adapter *adapter)
if (!NX_IS_REVISION_P2(adapter->ahw.revision_id)) {
if (test_and_set_bit(__NX_FW_ATTACHED, &adapter->state))
goto done;
- if (reset_devices)
- nx_fw_cmd_reset_ctx(adapter);
err = nx_fw_cmd_create_rx_ctx(adapter);
if (err)
goto err_out_free;
diff --git a/drivers/net/netxen/netxen_nic_main.c b/drivers/net/netxen/netxen_nic_main.c
index 50820beac3aa..e1d30d7f2071 100644
--- a/drivers/net/netxen/netxen_nic_main.c
+++ b/drivers/net/netxen/netxen_nic_main.c
@@ -41,9 +41,6 @@
MODULE_DESCRIPTION("QLogic/NetXen (1/10) GbE Converged Ethernet Driver");
MODULE_LICENSE("GPL");
MODULE_VERSION(NETXEN_NIC_LINUX_VERSIONID);
-MODULE_FIRMWARE(NX_P2_MN_ROMIMAGE_NAME);
-MODULE_FIRMWARE(NX_P3_CT_ROMIMAGE_NAME);
-MODULE_FIRMWARE(NX_P3_MN_ROMIMAGE_NAME);
MODULE_FIRMWARE(NX_UNIFIED_ROMIMAGE_NAME);
char netxen_nic_driver_name[] = "netxen_nic";
@@ -1240,7 +1237,6 @@ netxen_setup_netdev(struct netxen_adapter *adapter,
dev_warn(&pdev->dev, "failed to read mac addr\n");
netif_carrier_off(netdev);
- netif_stop_queue(netdev);
err = register_netdev(netdev);
if (err) {
@@ -1356,6 +1352,13 @@ netxen_nic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
break;
}
+ if (reset_devices) {
+ if (adapter->portnum == 0) {
+ NXWR32(adapter, NX_CRB_DEV_REF_COUNT, 0);
+ adapter->need_fw_reset = 1;
+ }
+ }
+
err = netxen_start_firmware(adapter);
if (err)
goto err_out_decr_ref;
diff --git a/drivers/net/pch_gbe/pch_gbe_main.c b/drivers/net/pch_gbe/pch_gbe_main.c
index 472056b47440..03a1d280105f 100644
--- a/drivers/net/pch_gbe/pch_gbe_main.c
+++ b/drivers/net/pch_gbe/pch_gbe_main.c
@@ -1,6 +1,6 @@
/*
* Copyright (C) 1999 - 2010 Intel Corporation.
- * Copyright (C) 2010 OKI SEMICONDUCTOR Co., LTD.
+ * Copyright (C) 2010 OKI SEMICONDUCTOR CO., LTD.
*
* This code was derived from the Intel e1000e Linux driver.
*
@@ -2464,8 +2464,8 @@ static void __exit pch_gbe_exit_module(void)
module_init(pch_gbe_init_module);
module_exit(pch_gbe_exit_module);
-MODULE_DESCRIPTION("OKI semiconductor PCH Gigabit ethernet Driver");
-MODULE_AUTHOR("OKI semiconductor, <masa-korg@dsn.okisemi.com>");
+MODULE_DESCRIPTION("EG20T PCH Gigabit ethernet Driver");
+MODULE_AUTHOR("OKI SEMICONDUCTOR, <toshiharu-linux@dsn.okisemi.com>");
MODULE_LICENSE("GPL");
MODULE_VERSION(DRV_VERSION);
MODULE_DEVICE_TABLE(pci, pch_gbe_pcidev_id);
diff --git a/drivers/net/pch_gbe/pch_gbe_param.c b/drivers/net/pch_gbe/pch_gbe_param.c
index 2510146fc560..ef0996a0eaaa 100644
--- a/drivers/net/pch_gbe/pch_gbe_param.c
+++ b/drivers/net/pch_gbe/pch_gbe_param.c
@@ -434,8 +434,8 @@ void pch_gbe_check_options(struct pch_gbe_adapter *adapter)
.err = "using default of "
__MODULE_STRING(PCH_GBE_DEFAULT_TXD),
.def = PCH_GBE_DEFAULT_TXD,
- .arg = { .r = { .min = PCH_GBE_MIN_TXD } },
- .arg = { .r = { .max = PCH_GBE_MAX_TXD } }
+ .arg = { .r = { .min = PCH_GBE_MIN_TXD,
+ .max = PCH_GBE_MAX_TXD } }
};
struct pch_gbe_tx_ring *tx_ring = adapter->tx_ring;
tx_ring->count = TxDescriptors;
@@ -450,8 +450,8 @@ void pch_gbe_check_options(struct pch_gbe_adapter *adapter)
.err = "using default of "
__MODULE_STRING(PCH_GBE_DEFAULT_RXD),
.def = PCH_GBE_DEFAULT_RXD,
- .arg = { .r = { .min = PCH_GBE_MIN_RXD } },
- .arg = { .r = { .max = PCH_GBE_MAX_RXD } }
+ .arg = { .r = { .min = PCH_GBE_MIN_RXD,
+ .max = PCH_GBE_MAX_RXD } }
};
struct pch_gbe_rx_ring *rx_ring = adapter->rx_ring;
rx_ring->count = RxDescriptors;
diff --git a/drivers/net/pcmcia/axnet_cs.c b/drivers/net/pcmcia/axnet_cs.c
index d2e166e29dda..8a4d19e5de06 100644
--- a/drivers/net/pcmcia/axnet_cs.c
+++ b/drivers/net/pcmcia/axnet_cs.c
@@ -111,13 +111,14 @@ static irqreturn_t ax_interrupt(int irq, void *dev_id);
typedef struct axnet_dev_t {
struct pcmcia_device *p_dev;
- caddr_t base;
- struct timer_list watchdog;
- int stale, fast_poll;
- u_short link_status;
- u_char duplex_flag;
- int phy_id;
- int flags;
+ caddr_t base;
+ struct timer_list watchdog;
+ int stale, fast_poll;
+ u_short link_status;
+ u_char duplex_flag;
+ int phy_id;
+ int flags;
+ int active_low;
} axnet_dev_t;
static inline axnet_dev_t *PRIV(struct net_device *dev)
@@ -322,6 +323,8 @@ static int axnet_config(struct pcmcia_device *link)
if (info->flags & IS_AX88790)
outb(0x10, dev->base_addr + AXNET_GPIO); /* select Internal PHY */
+ info->active_low = 0;
+
for (i = 0; i < 32; i++) {
j = mdio_read(dev->base_addr + AXNET_MII_EEP, i, 1);
j2 = mdio_read(dev->base_addr + AXNET_MII_EEP, i, 2);
@@ -329,15 +332,18 @@ static int axnet_config(struct pcmcia_device *link)
if ((j != 0) && (j != 0xffff)) break;
}
- /* Maybe PHY is in power down mode. (PPD_SET = 1)
- Bit 2 of CCSR is active low. */
if (i == 32) {
+ /* Maybe PHY is in power down mode. (PPD_SET = 1)
+ Bit 2 of CCSR is active low. */
pcmcia_write_config_byte(link, CISREG_CCSR, 0x04);
for (i = 0; i < 32; i++) {
j = mdio_read(dev->base_addr + AXNET_MII_EEP, i, 1);
j2 = mdio_read(dev->base_addr + AXNET_MII_EEP, i, 2);
if (j == j2) continue;
- if ((j != 0) && (j != 0xffff)) break;
+ if ((j != 0) && (j != 0xffff)) {
+ info->active_low = 1;
+ break;
+ }
}
}
@@ -383,8 +389,12 @@ static int axnet_suspend(struct pcmcia_device *link)
static int axnet_resume(struct pcmcia_device *link)
{
struct net_device *dev = link->priv;
+ axnet_dev_t *info = PRIV(dev);
if (link->open) {
+ if (info->active_low == 1)
+ pcmcia_write_config_byte(link, CISREG_CCSR, 0x04);
+
axnet_reset_8390(dev);
AX88190_init(dev, 1);
netif_device_attach(dev);
diff --git a/drivers/net/pcmcia/pcnet_cs.c b/drivers/net/pcmcia/pcnet_cs.c
index 03096c80103d..d05c44692f08 100644
--- a/drivers/net/pcmcia/pcnet_cs.c
+++ b/drivers/net/pcmcia/pcnet_cs.c
@@ -1536,6 +1536,7 @@ static struct pcmcia_device_id pcnet_ids[] = {
PCMCIA_DEVICE_PROD_ID12("COMPU-SHACK", "FASTline PCMCIA 10/100 Fast-Ethernet", 0xfa2e424d, 0x3953d9b9),
PCMCIA_DEVICE_PROD_ID12("CONTEC", "C-NET(PC)C-10L", 0x21cab552, 0xf6f90722),
PCMCIA_DEVICE_PROD_ID12("corega", "FEther PCC-TXF", 0x0a21501a, 0xa51564a2),
+ PCMCIA_DEVICE_PROD_ID12("corega", "Ether CF-TD", 0x0a21501a, 0x6589340a),
PCMCIA_DEVICE_PROD_ID12("corega K.K.", "corega EtherII PCC-T", 0x5261440f, 0xfa9d85bd),
PCMCIA_DEVICE_PROD_ID12("corega K.K.", "corega EtherII PCC-TD", 0x5261440f, 0xc49bd73d),
PCMCIA_DEVICE_PROD_ID12("Corega K.K.", "corega EtherII PCC-TD", 0xd4fdcbd8, 0xc49bd73d),
diff --git a/drivers/net/phy/marvell.c b/drivers/net/phy/marvell.c
index e2afdce0a437..e8b9c53c304b 100644
--- a/drivers/net/phy/marvell.c
+++ b/drivers/net/phy/marvell.c
@@ -30,11 +30,14 @@
#include <linux/ethtool.h>
#include <linux/phy.h>
#include <linux/marvell_phy.h>
+#include <linux/of.h>
#include <asm/io.h>
#include <asm/irq.h>
#include <asm/uaccess.h>
+#define MII_MARVELL_PHY_PAGE 22
+
#define MII_M1011_IEVENT 0x13
#define MII_M1011_IEVENT_CLEAR 0x0000
@@ -74,13 +77,12 @@
#define MII_88E1121_PHY_MSCR_TX_DELAY BIT(4)
#define MII_88E1121_PHY_MSCR_DELAY_MASK (~(0x3 << 4))
-#define MII_88EC048_PHY_MSCR1_REG 16
-#define MII_88EC048_PHY_MSCR1_PAD_ODD BIT(6)
+#define MII_88E1318S_PHY_MSCR1_REG 16
+#define MII_88E1318S_PHY_MSCR1_PAD_ODD BIT(6)
#define MII_88E1121_PHY_LED_CTRL 16
#define MII_88E1121_PHY_LED_PAGE 3
#define MII_88E1121_PHY_LED_DEF 0x0030
-#define MII_88E1121_PHY_PAGE 22
#define MII_M1011_PHY_STATUS 0x11
#define MII_M1011_PHY_STATUS_1000 0x8000
@@ -186,13 +188,94 @@ static int marvell_config_aneg(struct phy_device *phydev)
return 0;
}
+#ifdef CONFIG_OF_MDIO
+/*
+ * Set and/or override some configuration registers based on the
+ * marvell,reg-init property stored in the of_node for the phydev.
+ *
+ * marvell,reg-init = <reg-page reg mask value>,...;
+ *
+ * There may be one or more sets of <reg-page reg mask value>:
+ *
+ * reg-page: which register bank to use.
+ * reg: the register.
+ * mask: if non-zero, ANDed with existing register value.
+ * value: ORed with the masked value and written to the regiser.
+ *
+ */
+static int marvell_of_reg_init(struct phy_device *phydev)
+{
+ const __be32 *paddr;
+ int len, i, saved_page, current_page, page_changed, ret;
+
+ if (!phydev->dev.of_node)
+ return 0;
+
+ paddr = of_get_property(phydev->dev.of_node, "marvell,reg-init", &len);
+ if (!paddr || len < (4 * sizeof(*paddr)))
+ return 0;
+
+ saved_page = phy_read(phydev, MII_MARVELL_PHY_PAGE);
+ if (saved_page < 0)
+ return saved_page;
+ page_changed = 0;
+ current_page = saved_page;
+
+ ret = 0;
+ len /= sizeof(*paddr);
+ for (i = 0; i < len - 3; i += 4) {
+ u16 reg_page = be32_to_cpup(paddr + i);
+ u16 reg = be32_to_cpup(paddr + i + 1);
+ u16 mask = be32_to_cpup(paddr + i + 2);
+ u16 val_bits = be32_to_cpup(paddr + i + 3);
+ int val;
+
+ if (reg_page != current_page) {
+ current_page = reg_page;
+ page_changed = 1;
+ ret = phy_write(phydev, MII_MARVELL_PHY_PAGE, reg_page);
+ if (ret < 0)
+ goto err;
+ }
+
+ val = 0;
+ if (mask) {
+ val = phy_read(phydev, reg);
+ if (val < 0) {
+ ret = val;
+ goto err;
+ }
+ val &= mask;
+ }
+ val |= val_bits;
+
+ ret = phy_write(phydev, reg, val);
+ if (ret < 0)
+ goto err;
+
+ }
+err:
+ if (page_changed) {
+ i = phy_write(phydev, MII_MARVELL_PHY_PAGE, saved_page);
+ if (ret == 0)
+ ret = i;
+ }
+ return ret;
+}
+#else
+static int marvell_of_reg_init(struct phy_device *phydev)
+{
+ return 0;
+}
+#endif /* CONFIG_OF_MDIO */
+
static int m88e1121_config_aneg(struct phy_device *phydev)
{
int err, oldpage, mscr;
- oldpage = phy_read(phydev, MII_88E1121_PHY_PAGE);
+ oldpage = phy_read(phydev, MII_MARVELL_PHY_PAGE);
- err = phy_write(phydev, MII_88E1121_PHY_PAGE,
+ err = phy_write(phydev, MII_MARVELL_PHY_PAGE,
MII_88E1121_PHY_MSCR_PAGE);
if (err < 0)
return err;
@@ -218,7 +301,7 @@ static int m88e1121_config_aneg(struct phy_device *phydev)
return err;
}
- phy_write(phydev, MII_88E1121_PHY_PAGE, oldpage);
+ phy_write(phydev, MII_MARVELL_PHY_PAGE, oldpage);
err = phy_write(phydev, MII_BMCR, BMCR_RESET);
if (err < 0)
@@ -229,36 +312,36 @@ static int m88e1121_config_aneg(struct phy_device *phydev)
if (err < 0)
return err;
- oldpage = phy_read(phydev, MII_88E1121_PHY_PAGE);
+ oldpage = phy_read(phydev, MII_MARVELL_PHY_PAGE);
- phy_write(phydev, MII_88E1121_PHY_PAGE, MII_88E1121_PHY_LED_PAGE);
+ phy_write(phydev, MII_MARVELL_PHY_PAGE, MII_88E1121_PHY_LED_PAGE);
phy_write(phydev, MII_88E1121_PHY_LED_CTRL, MII_88E1121_PHY_LED_DEF);
- phy_write(phydev, MII_88E1121_PHY_PAGE, oldpage);
+ phy_write(phydev, MII_MARVELL_PHY_PAGE, oldpage);
err = genphy_config_aneg(phydev);
return err;
}
-static int m88ec048_config_aneg(struct phy_device *phydev)
+static int m88e1318_config_aneg(struct phy_device *phydev)
{
int err, oldpage, mscr;
- oldpage = phy_read(phydev, MII_88E1121_PHY_PAGE);
+ oldpage = phy_read(phydev, MII_MARVELL_PHY_PAGE);
- err = phy_write(phydev, MII_88E1121_PHY_PAGE,
+ err = phy_write(phydev, MII_MARVELL_PHY_PAGE,
MII_88E1121_PHY_MSCR_PAGE);
if (err < 0)
return err;
- mscr = phy_read(phydev, MII_88EC048_PHY_MSCR1_REG);
- mscr |= MII_88EC048_PHY_MSCR1_PAD_ODD;
+ mscr = phy_read(phydev, MII_88E1318S_PHY_MSCR1_REG);
+ mscr |= MII_88E1318S_PHY_MSCR1_PAD_ODD;
- err = phy_write(phydev, MII_88E1121_PHY_MSCR_REG, mscr);
+ err = phy_write(phydev, MII_88E1318S_PHY_MSCR1_REG, mscr);
if (err < 0)
return err;
- err = phy_write(phydev, MII_88E1121_PHY_PAGE, oldpage);
+ err = phy_write(phydev, MII_MARVELL_PHY_PAGE, oldpage);
if (err < 0)
return err;
@@ -368,6 +451,9 @@ static int m88e1111_config_init(struct phy_device *phydev)
return err;
}
+ err = marvell_of_reg_init(phydev);
+ if (err < 0)
+ return err;
err = phy_write(phydev, MII_BMCR, BMCR_RESET);
if (err < 0)
@@ -398,7 +484,7 @@ static int m88e1118_config_init(struct phy_device *phydev)
int err;
/* Change address */
- err = phy_write(phydev, 0x16, 0x0002);
+ err = phy_write(phydev, MII_MARVELL_PHY_PAGE, 0x0002);
if (err < 0)
return err;
@@ -408,7 +494,7 @@ static int m88e1118_config_init(struct phy_device *phydev)
return err;
/* Change address */
- err = phy_write(phydev, 0x16, 0x0003);
+ err = phy_write(phydev, MII_MARVELL_PHY_PAGE, 0x0003);
if (err < 0)
return err;
@@ -420,8 +506,42 @@ static int m88e1118_config_init(struct phy_device *phydev)
if (err < 0)
return err;
+ err = marvell_of_reg_init(phydev);
+ if (err < 0)
+ return err;
+
/* Reset address */
- err = phy_write(phydev, 0x16, 0x0);
+ err = phy_write(phydev, MII_MARVELL_PHY_PAGE, 0x0);
+ if (err < 0)
+ return err;
+
+ err = phy_write(phydev, MII_BMCR, BMCR_RESET);
+ if (err < 0)
+ return err;
+
+ return 0;
+}
+
+static int m88e1149_config_init(struct phy_device *phydev)
+{
+ int err;
+
+ /* Change address */
+ err = phy_write(phydev, MII_MARVELL_PHY_PAGE, 0x0002);
+ if (err < 0)
+ return err;
+
+ /* Enable 1000 Mbit */
+ err = phy_write(phydev, 0x15, 0x1048);
+ if (err < 0)
+ return err;
+
+ err = marvell_of_reg_init(phydev);
+ if (err < 0)
+ return err;
+
+ /* Reset address */
+ err = phy_write(phydev, MII_MARVELL_PHY_PAGE, 0x0);
if (err < 0)
return err;
@@ -491,6 +611,10 @@ static int m88e1145_config_init(struct phy_device *phydev)
}
}
+ err = marvell_of_reg_init(phydev);
+ if (err < 0)
+ return err;
+
return 0;
}
@@ -659,12 +783,12 @@ static struct phy_driver marvell_drivers[] = {
.driver = { .owner = THIS_MODULE },
},
{
- .phy_id = MARVELL_PHY_ID_88EC048,
+ .phy_id = MARVELL_PHY_ID_88E1318S,
.phy_id_mask = MARVELL_PHY_ID_MASK,
- .name = "Marvell 88EC048",
+ .name = "Marvell 88E1318S",
.features = PHY_GBIT_FEATURES,
.flags = PHY_HAS_INTERRUPT,
- .config_aneg = &m88ec048_config_aneg,
+ .config_aneg = &m88e1318_config_aneg,
.read_status = &marvell_read_status,
.ack_interrupt = &marvell_ack_interrupt,
.config_intr = &marvell_config_intr,
@@ -685,6 +809,19 @@ static struct phy_driver marvell_drivers[] = {
.driver = { .owner = THIS_MODULE },
},
{
+ .phy_id = MARVELL_PHY_ID_88E1149R,
+ .phy_id_mask = MARVELL_PHY_ID_MASK,
+ .name = "Marvell 88E1149R",
+ .features = PHY_GBIT_FEATURES,
+ .flags = PHY_HAS_INTERRUPT,
+ .config_init = &m88e1149_config_init,
+ .config_aneg = &m88e1118_config_aneg,
+ .read_status = &genphy_read_status,
+ .ack_interrupt = &marvell_ack_interrupt,
+ .config_intr = &marvell_config_intr,
+ .driver = { .owner = THIS_MODULE },
+ },
+ {
.phy_id = MARVELL_PHY_ID_88E1240,
.phy_id_mask = MARVELL_PHY_ID_MASK,
.name = "Marvell 88E1240",
@@ -735,6 +872,7 @@ static struct mdio_device_id __maybe_unused marvell_tbl[] = {
{ 0x01410e10, 0xfffffff0 },
{ 0x01410cb0, 0xfffffff0 },
{ 0x01410cd0, 0xfffffff0 },
+ { 0x01410e50, 0xfffffff0 },
{ 0x01410e30, 0xfffffff0 },
{ 0x01410e90, 0xfffffff0 },
{ }
diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c
index 1bb16cb79433..7670aac0e93f 100644
--- a/drivers/net/phy/phy.c
+++ b/drivers/net/phy/phy.c
@@ -65,7 +65,7 @@ EXPORT_SYMBOL(phy_print_status);
*
* Returns 0 on success on < 0 on error.
*/
-int phy_clear_interrupt(struct phy_device *phydev)
+static int phy_clear_interrupt(struct phy_device *phydev)
{
int err = 0;
@@ -82,7 +82,7 @@ int phy_clear_interrupt(struct phy_device *phydev)
*
* Returns 0 on success on < 0 on error.
*/
-int phy_config_interrupt(struct phy_device *phydev, u32 interrupts)
+static int phy_config_interrupt(struct phy_device *phydev, u32 interrupts)
{
int err = 0;
@@ -208,7 +208,7 @@ static inline int phy_find_valid(int idx, u32 features)
* duplexes. Drop down by one in this order: 1000/FULL,
* 1000/HALF, 100/FULL, 100/HALF, 10/FULL, 10/HALF.
*/
-void phy_sanitize_settings(struct phy_device *phydev)
+static void phy_sanitize_settings(struct phy_device *phydev)
{
u32 features = phydev->supported;
int idx;
@@ -223,7 +223,6 @@ void phy_sanitize_settings(struct phy_device *phydev)
phydev->speed = settings[idx].speed;
phydev->duplex = settings[idx].duplex;
}
-EXPORT_SYMBOL(phy_sanitize_settings);
/**
* phy_ethtool_sset - generic ethtool sset function, handles all the details
@@ -532,7 +531,7 @@ static irqreturn_t phy_interrupt(int irq, void *phy_dat)
* phy_enable_interrupts - Enable the interrupts from the PHY side
* @phydev: target phy_device struct
*/
-int phy_enable_interrupts(struct phy_device *phydev)
+static int phy_enable_interrupts(struct phy_device *phydev)
{
int err;
@@ -545,13 +544,12 @@ int phy_enable_interrupts(struct phy_device *phydev)
return err;
}
-EXPORT_SYMBOL(phy_enable_interrupts);
/**
* phy_disable_interrupts - Disable the PHY interrupts from the PHY side
* @phydev: target phy_device struct
*/
-int phy_disable_interrupts(struct phy_device *phydev)
+static int phy_disable_interrupts(struct phy_device *phydev)
{
int err;
@@ -574,7 +572,6 @@ phy_err:
return err;
}
-EXPORT_SYMBOL(phy_disable_interrupts);
/**
* phy_start_interrupts - request and enable interrupts for a PHY device
diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
index 16ddc77313cb..993c52c82aeb 100644
--- a/drivers/net/phy/phy_device.c
+++ b/drivers/net/phy/phy_device.c
@@ -57,6 +57,9 @@ extern void mdio_bus_exit(void);
static LIST_HEAD(phy_fixup_list);
static DEFINE_MUTEX(phy_fixup_lock);
+static int phy_attach_direct(struct net_device *dev, struct phy_device *phydev,
+ u32 flags, phy_interface_t interface);
+
/*
* Creates a new phy_fixup and adds it to the list
* @bus_id: A string which matches phydev->dev.bus_id (or PHY_ANY_ID)
@@ -146,7 +149,8 @@ int phy_scan_fixups(struct phy_device *phydev)
}
EXPORT_SYMBOL(phy_scan_fixups);
-struct phy_device* phy_device_create(struct mii_bus *bus, int addr, int phy_id)
+static struct phy_device* phy_device_create(struct mii_bus *bus,
+ int addr, int phy_id)
{
struct phy_device *dev;
@@ -193,7 +197,6 @@ struct phy_device* phy_device_create(struct mii_bus *bus, int addr, int phy_id)
return dev;
}
-EXPORT_SYMBOL(phy_device_create);
/**
* get_phy_id - reads the specified addr for its ID.
@@ -316,7 +319,7 @@ EXPORT_SYMBOL(phy_find_first);
* If you want to monitor your own link state, don't call
* this function.
*/
-void phy_prepare_link(struct phy_device *phydev,
+static void phy_prepare_link(struct phy_device *phydev,
void (*handler)(struct net_device *))
{
phydev->adjust_link = handler;
@@ -435,8 +438,8 @@ int phy_init_hw(struct phy_device *phydev)
* the attaching device, and given a callback for link status
* change. The phy_device is returned to the attaching driver.
*/
-int phy_attach_direct(struct net_device *dev, struct phy_device *phydev,
- u32 flags, phy_interface_t interface)
+static int phy_attach_direct(struct net_device *dev, struct phy_device *phydev,
+ u32 flags, phy_interface_t interface)
{
struct device *d = &phydev->dev;
@@ -473,7 +476,6 @@ int phy_attach_direct(struct net_device *dev, struct phy_device *phydev,
* (dev_flags and interface) */
return phy_init_hw(phydev);
}
-EXPORT_SYMBOL(phy_attach_direct);
/**
* phy_attach - attach a network device to a particular PHY device
@@ -540,7 +542,7 @@ EXPORT_SYMBOL(phy_detach);
* what is supported. Returns < 0 on error, 0 if the PHY's advertisement
* hasn't changed, and > 0 if it has changed.
*/
-int genphy_config_advert(struct phy_device *phydev)
+static int genphy_config_advert(struct phy_device *phydev)
{
u32 advertise;
int oldadv, adv;
@@ -605,7 +607,6 @@ int genphy_config_advert(struct phy_device *phydev)
return changed;
}
-EXPORT_SYMBOL(genphy_config_advert);
/**
* genphy_setup_forced - configures/forces speed/duplex from @phydev
@@ -615,7 +616,7 @@ EXPORT_SYMBOL(genphy_config_advert);
* to the values in phydev. Assumes that the values are valid.
* Please see phy_sanitize_settings().
*/
-int genphy_setup_forced(struct phy_device *phydev)
+static int genphy_setup_forced(struct phy_device *phydev)
{
int err;
int ctl = 0;
diff --git a/drivers/net/ppp_generic.c b/drivers/net/ppp_generic.c
index 09cf56d0416a..39659976a1ac 100644
--- a/drivers/net/ppp_generic.c
+++ b/drivers/net/ppp_generic.c
@@ -2584,16 +2584,16 @@ ppp_create_interface(struct net *net, int unit, int *retp)
*/
dev_net_set(dev, net);
- ret = -EEXIST;
mutex_lock(&pn->all_ppp_mutex);
if (unit < 0) {
unit = unit_get(&pn->units_idr, ppp);
if (unit < 0) {
- *retp = unit;
+ ret = unit;
goto out2;
}
} else {
+ ret = -EEXIST;
if (unit_find(&pn->units_idr, unit))
goto out2; /* unit already exists */
/*
@@ -2668,10 +2668,10 @@ static void ppp_shutdown_interface(struct ppp *ppp)
ppp->closing = 1;
ppp_unlock(ppp);
unregister_netdev(ppp->dev);
+ unit_put(&pn->units_idr, ppp->file.index);
} else
ppp_unlock(ppp);
- unit_put(&pn->units_idr, ppp->file.index);
ppp->file.dead = 1;
ppp->owner = NULL;
wake_up_interruptible(&ppp->file.rwait);
@@ -2859,8 +2859,7 @@ static void __exit ppp_cleanup(void)
* by holding all_ppp_mutex
*/
-/* associate pointer with specified number */
-static int unit_set(struct idr *p, void *ptr, int n)
+static int __unit_alloc(struct idr *p, void *ptr, int n)
{
int unit, err;
@@ -2871,10 +2870,24 @@ again:
}
err = idr_get_new_above(p, ptr, n, &unit);
- if (err == -EAGAIN)
- goto again;
+ if (err < 0) {
+ if (err == -EAGAIN)
+ goto again;
+ return err;
+ }
+
+ return unit;
+}
+
+/* associate pointer with specified number */
+static int unit_set(struct idr *p, void *ptr, int n)
+{
+ int unit;
- if (unit != n) {
+ unit = __unit_alloc(p, ptr, n);
+ if (unit < 0)
+ return unit;
+ else if (unit != n) {
idr_remove(p, unit);
return -EINVAL;
}
@@ -2885,19 +2898,7 @@ again:
/* get new free unit number and associate pointer with it */
static int unit_get(struct idr *p, void *ptr)
{
- int unit, err;
-
-again:
- if (!idr_pre_get(p, GFP_KERNEL)) {
- printk(KERN_ERR "PPP: No free memory for idr\n");
- return -ENOMEM;
- }
-
- err = idr_get_new_above(p, ptr, 0, &unit);
- if (err == -EAGAIN)
- goto again;
-
- return unit;
+ return __unit_alloc(p, ptr, 0);
}
/* put unit number back to a pool */
diff --git a/drivers/net/qlcnic/qlcnic.h b/drivers/net/qlcnic/qlcnic.h
index 26c37d3a5868..8ecc170c9b74 100644
--- a/drivers/net/qlcnic/qlcnic.h
+++ b/drivers/net/qlcnic/qlcnic.h
@@ -146,11 +146,13 @@
#define MAX_CMD_DESCRIPTORS 1024
#define MAX_RCV_DESCRIPTORS_1G 4096
#define MAX_RCV_DESCRIPTORS_10G 8192
+#define MAX_RCV_DESCRIPTORS_VF 2048
#define MAX_JUMBO_RCV_DESCRIPTORS_1G 512
#define MAX_JUMBO_RCV_DESCRIPTORS_10G 1024
#define DEFAULT_RCV_DESCRIPTORS_1G 2048
#define DEFAULT_RCV_DESCRIPTORS_10G 4096
+#define DEFAULT_RCV_DESCRIPTORS_VF 1024
#define MAX_RDS_RINGS 2
#define get_next_index(index, length) \
@@ -942,6 +944,7 @@ struct qlcnic_ipaddr {
#define QLCNIC_LOOPBACK_TEST 2
#define QLCNIC_FILTER_AGE 80
+#define QLCNIC_READD_AGE 20
#define QLCNIC_LB_MAX_FILTERS 64
struct qlcnic_filter {
@@ -970,6 +973,8 @@ struct qlcnic_adapter {
u16 num_txd;
u16 num_rxd;
u16 num_jumbo_rxd;
+ u16 max_rxd;
+ u16 max_jumbo_rxd;
u8 max_rds_rings;
u8 max_sds_rings;
@@ -1129,7 +1134,7 @@ struct qlcnic_eswitch {
#define MAX_RX_QUEUES 4
#define DEFAULT_MAC_LEARN 1
-#define IS_VALID_VLAN(vlan) (vlan >= MIN_VLAN_ID && vlan <= MAX_VLAN_ID)
+#define IS_VALID_VLAN(vlan) (vlan >= MIN_VLAN_ID && vlan < MAX_VLAN_ID)
#define IS_VALID_BW(bw) (bw >= MIN_BW && bw <= MAX_BW)
#define IS_VALID_TX_QUEUES(que) (que > 0 && que <= MAX_TX_QUEUES)
#define IS_VALID_RX_QUEUES(que) (que > 0 && que <= MAX_RX_QUEUES)
diff --git a/drivers/net/qlcnic/qlcnic_ethtool.c b/drivers/net/qlcnic/qlcnic_ethtool.c
index 25e93a53fca0..ec21d24015c4 100644
--- a/drivers/net/qlcnic/qlcnic_ethtool.c
+++ b/drivers/net/qlcnic/qlcnic_ethtool.c
@@ -437,14 +437,8 @@ qlcnic_get_ringparam(struct net_device *dev,
ring->rx_jumbo_pending = adapter->num_jumbo_rxd;
ring->tx_pending = adapter->num_txd;
- if (adapter->ahw.port_type == QLCNIC_GBE) {
- ring->rx_max_pending = MAX_RCV_DESCRIPTORS_1G;
- ring->rx_jumbo_max_pending = MAX_JUMBO_RCV_DESCRIPTORS_1G;
- } else {
- ring->rx_max_pending = MAX_RCV_DESCRIPTORS_10G;
- ring->rx_jumbo_max_pending = MAX_JUMBO_RCV_DESCRIPTORS_10G;
- }
-
+ ring->rx_max_pending = adapter->max_rxd;
+ ring->rx_jumbo_max_pending = adapter->max_jumbo_rxd;
ring->tx_max_pending = MAX_CMD_DESCRIPTORS;
ring->rx_mini_max_pending = 0;
@@ -472,24 +466,17 @@ qlcnic_set_ringparam(struct net_device *dev,
struct ethtool_ringparam *ring)
{
struct qlcnic_adapter *adapter = netdev_priv(dev);
- u16 max_rcv_desc = MAX_RCV_DESCRIPTORS_10G;
- u16 max_jumbo_desc = MAX_JUMBO_RCV_DESCRIPTORS_10G;
u16 num_rxd, num_jumbo_rxd, num_txd;
-
if (ring->rx_mini_pending)
return -EOPNOTSUPP;
- if (adapter->ahw.port_type == QLCNIC_GBE) {
- max_rcv_desc = MAX_RCV_DESCRIPTORS_1G;
- max_jumbo_desc = MAX_JUMBO_RCV_DESCRIPTORS_10G;
- }
-
num_rxd = qlcnic_validate_ringparam(ring->rx_pending,
- MIN_RCV_DESCRIPTORS, max_rcv_desc, "rx");
+ MIN_RCV_DESCRIPTORS, adapter->max_rxd, "rx");
num_jumbo_rxd = qlcnic_validate_ringparam(ring->rx_jumbo_pending,
- MIN_JUMBO_DESCRIPTORS, max_jumbo_desc, "rx jumbo");
+ MIN_JUMBO_DESCRIPTORS, adapter->max_jumbo_rxd,
+ "rx jumbo");
num_txd = qlcnic_validate_ringparam(ring->tx_pending,
MIN_CMD_DESCRIPTORS, MAX_CMD_DESCRIPTORS, "tx");
diff --git a/drivers/net/qlcnic/qlcnic_main.c b/drivers/net/qlcnic/qlcnic_main.c
index f047c7c48314..a3dcd04be22f 100644
--- a/drivers/net/qlcnic/qlcnic_main.c
+++ b/drivers/net/qlcnic/qlcnic_main.c
@@ -656,13 +656,23 @@ qlcnic_check_options(struct qlcnic_adapter *adapter)
dev_info(&pdev->dev, "firmware v%d.%d.%d\n",
fw_major, fw_minor, fw_build);
-
if (adapter->ahw.port_type == QLCNIC_XGBE) {
- adapter->num_rxd = DEFAULT_RCV_DESCRIPTORS_10G;
+ if (adapter->flags & QLCNIC_ESWITCH_ENABLED) {
+ adapter->num_rxd = DEFAULT_RCV_DESCRIPTORS_VF;
+ adapter->max_rxd = MAX_RCV_DESCRIPTORS_VF;
+ } else {
+ adapter->num_rxd = DEFAULT_RCV_DESCRIPTORS_10G;
+ adapter->max_rxd = MAX_RCV_DESCRIPTORS_10G;
+ }
+
adapter->num_jumbo_rxd = MAX_JUMBO_RCV_DESCRIPTORS_10G;
+ adapter->max_jumbo_rxd = MAX_JUMBO_RCV_DESCRIPTORS_10G;
+
} else if (adapter->ahw.port_type == QLCNIC_GBE) {
adapter->num_rxd = DEFAULT_RCV_DESCRIPTORS_1G;
adapter->num_jumbo_rxd = MAX_JUMBO_RCV_DESCRIPTORS_1G;
+ adapter->max_jumbo_rxd = MAX_JUMBO_RCV_DESCRIPTORS_1G;
+ adapter->max_rxd = MAX_RCV_DESCRIPTORS_1G;
}
adapter->msix_supported = !!use_msi_x;
@@ -1440,7 +1450,6 @@ qlcnic_setup_netdev(struct qlcnic_adapter *adapter,
netdev->irq = adapter->msix_entries[0].vector;
netif_carrier_off(netdev);
- netif_stop_queue(netdev);
err = register_netdev(netdev);
if (err) {
@@ -1860,6 +1869,11 @@ qlcnic_send_filter(struct qlcnic_adapter *adapter,
hlist_for_each_entry_safe(tmp_fil, tmp_hnode, n, head, fnode) {
if (!memcmp(tmp_fil->faddr, &src_addr, ETH_ALEN) &&
tmp_fil->vlan_id == vlan_id) {
+
+ if (jiffies >
+ (QLCNIC_READD_AGE * HZ + tmp_fil->ftime))
+ qlcnic_change_filter(adapter, src_addr, vlan_id,
+ tx_ring);
tmp_fil->ftime = jiffies;
return;
}
diff --git a/drivers/net/qlge/qlge.h b/drivers/net/qlge/qlge.h
index a478786840a6..22821398fc63 100644
--- a/drivers/net/qlge/qlge.h
+++ b/drivers/net/qlge/qlge.h
@@ -2226,7 +2226,6 @@ int ql_dump_risc_ram_area(struct ql_adapter *qdev, void *buf,
int ql_core_dump(struct ql_adapter *qdev,
struct ql_mpi_coredump *mpi_coredump);
int ql_mb_about_fw(struct ql_adapter *qdev);
-int ql_wol(struct ql_adapter *qdev);
int ql_mb_wol_set_magic(struct ql_adapter *qdev, u32 enable_wol);
int ql_mb_wol_mode(struct ql_adapter *qdev, u32 wol);
int ql_mb_set_led_cfg(struct ql_adapter *qdev, u32 led_config);
@@ -2243,16 +2242,13 @@ netdev_tx_t ql_lb_send(struct sk_buff *skb, struct net_device *ndev);
void ql_check_lb_frame(struct ql_adapter *, struct sk_buff *);
int ql_own_firmware(struct ql_adapter *qdev);
int ql_clean_lb_rx_ring(struct rx_ring *rx_ring, int budget);
-void qlge_set_multicast_list(struct net_device *ndev);
-#if 1
-#define QL_ALL_DUMP
-#define QL_REG_DUMP
-#define QL_DEV_DUMP
-#define QL_CB_DUMP
+/* #define QL_ALL_DUMP */
+/* #define QL_REG_DUMP */
+/* #define QL_DEV_DUMP */
+/* #define QL_CB_DUMP */
/* #define QL_IB_DUMP */
/* #define QL_OB_DUMP */
-#endif
#ifdef QL_REG_DUMP
extern void ql_dump_xgmac_control_regs(struct ql_adapter *qdev);
diff --git a/drivers/net/qlge/qlge_main.c b/drivers/net/qlge/qlge_main.c
index ba0053d8515e..528eaef5308f 100644
--- a/drivers/net/qlge/qlge_main.c
+++ b/drivers/net/qlge/qlge_main.c
@@ -62,15 +62,15 @@ static const u32 default_msg =
/* NETIF_MSG_PKTDATA | */
NETIF_MSG_HW | NETIF_MSG_WOL | 0;
-static int debug = 0x00007fff; /* defaults above */
-module_param(debug, int, 0);
+static int debug = -1; /* defaults above */
+module_param(debug, int, 0664);
MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
#define MSIX_IRQ 0
#define MSI_IRQ 1
#define LEG_IRQ 2
static int qlge_irq_type = MSIX_IRQ;
-module_param(qlge_irq_type, int, MSIX_IRQ);
+module_param(qlge_irq_type, int, 0664);
MODULE_PARM_DESC(qlge_irq_type, "0 = MSI-X, 1 = MSI, 2 = Legacy.");
static int qlge_mpi_coredump;
@@ -94,6 +94,9 @@ static DEFINE_PCI_DEVICE_TABLE(qlge_pci_tbl) = {
MODULE_DEVICE_TABLE(pci, qlge_pci_tbl);
+static int ql_wol(struct ql_adapter *qdev);
+static void qlge_set_multicast_list(struct net_device *ndev);
+
/* This hardware semaphore causes exclusive access to
* resources shared between the NIC driver, MPI firmware,
* FCOE firmware and the FC driver.
@@ -2382,6 +2385,20 @@ static void qlge_vlan_rx_kill_vid(struct net_device *ndev, u16 vid)
}
+static void qlge_restore_vlan(struct ql_adapter *qdev)
+{
+ qlge_vlan_rx_register(qdev->ndev, qdev->vlgrp);
+
+ if (qdev->vlgrp) {
+ u16 vid;
+ for (vid = 0; vid < VLAN_N_VID; vid++) {
+ if (!vlan_group_get_device(qdev->vlgrp, vid))
+ continue;
+ qlge_vlan_rx_add_vid(qdev->ndev, vid);
+ }
+ }
+}
+
/* MSI-X Multiple Vector Interrupt Handler for inbound completions. */
static irqreturn_t qlge_msix_rx_isr(int irq, void *dev_id)
{
@@ -3842,7 +3859,7 @@ static void ql_display_dev_info(struct net_device *ndev)
"MAC address %pM\n", ndev->dev_addr);
}
-int ql_wol(struct ql_adapter *qdev)
+static int ql_wol(struct ql_adapter *qdev)
{
int status = 0;
u32 wol = MB_WOL_DISABLE;
@@ -3957,6 +3974,9 @@ static int ql_adapter_up(struct ql_adapter *qdev)
clear_bit(QL_PROMISCUOUS, &qdev->flags);
qlge_set_multicast_list(qdev->ndev);
+ /* Restore vlan setting. */
+ qlge_restore_vlan(qdev);
+
ql_enable_interrupts(qdev);
ql_enable_all_completion_interrupts(qdev);
netif_tx_start_all_queues(qdev->ndev);
@@ -4242,7 +4262,7 @@ static struct net_device_stats *qlge_get_stats(struct net_device
return &ndev->stats;
}
-void qlge_set_multicast_list(struct net_device *ndev)
+static void qlge_set_multicast_list(struct net_device *ndev)
{
struct ql_adapter *qdev = (struct ql_adapter *)netdev_priv(ndev);
struct netdev_hw_addr *ha;
diff --git a/drivers/net/qlge/qlge_mpi.c b/drivers/net/qlge/qlge_mpi.c
index f84e8570c7cb..0e7c7c7ee164 100644
--- a/drivers/net/qlge/qlge_mpi.c
+++ b/drivers/net/qlge/qlge_mpi.c
@@ -87,7 +87,7 @@ exit:
return status;
}
-int ql_soft_reset_mpi_risc(struct ql_adapter *qdev)
+static int ql_soft_reset_mpi_risc(struct ql_adapter *qdev)
{
int status;
status = ql_write_mpi_reg(qdev, 0x00001010, 1);
@@ -681,7 +681,7 @@ int ql_mb_get_fw_state(struct ql_adapter *qdev)
/* Send and ACK mailbox command to the firmware to
* let it continue with the change.
*/
-int ql_mb_idc_ack(struct ql_adapter *qdev)
+static int ql_mb_idc_ack(struct ql_adapter *qdev)
{
struct mbox_params mbc;
struct mbox_params *mbcp = &mbc;
@@ -744,7 +744,7 @@ int ql_mb_set_port_cfg(struct ql_adapter *qdev)
return status;
}
-int ql_mb_dump_ram(struct ql_adapter *qdev, u64 req_dma, u32 addr,
+static int ql_mb_dump_ram(struct ql_adapter *qdev, u64 req_dma, u32 addr,
u32 size)
{
int status = 0;
diff --git a/drivers/net/r8169.c b/drivers/net/r8169.c
index d88ce9fb1cbd..7d33ef4bcb4a 100644
--- a/drivers/net/r8169.c
+++ b/drivers/net/r8169.c
@@ -846,10 +846,10 @@ static int rtl8169_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
else
tp->features &= ~RTL_FEATURE_WOL;
__rtl8169_set_wol(tp, wol->wolopts);
- device_set_wakeup_enable(&tp->pci_dev->dev, wol->wolopts);
-
spin_unlock_irq(&tp->lock);
+ device_set_wakeup_enable(&tp->pci_dev->dev, wol->wolopts);
+
return 0;
}
@@ -2931,7 +2931,7 @@ static const struct rtl_cfg_info {
.hw_start = rtl_hw_start_8168,
.region = 2,
.align = 8,
- .intr_event = SYSErr | RxFIFOOver | LinkChg | RxOverflow |
+ .intr_event = SYSErr | LinkChg | RxOverflow |
TxErr | TxOK | RxOK | RxErr,
.napi_event = TxErr | TxOK | RxOK | RxOverflow,
.features = RTL_FEATURE_GMII | RTL_FEATURE_MSI,
@@ -4440,8 +4440,7 @@ static inline void rtl8169_rx_csum(struct sk_buff *skb, u32 opts1)
u32 status = opts1 & RxProtoMask;
if (((status == RxProtoTCP) && !(opts1 & TCPFail)) ||
- ((status == RxProtoUDP) && !(opts1 & UDPFail)) ||
- ((status == RxProtoIP) && !(opts1 & IPFail)))
+ ((status == RxProtoUDP) && !(opts1 & UDPFail)))
skb->ip_summed = CHECKSUM_UNNECESSARY;
else
skb_checksum_none_assert(skb);
@@ -4588,7 +4587,8 @@ static irqreturn_t rtl8169_interrupt(int irq, void *dev_instance)
}
/* Work around for rx fifo overflow */
- if (unlikely(status & RxFIFOOver)) {
+ if (unlikely(status & RxFIFOOver) &&
+ (tp->mac_version == RTL_GIGA_MAC_VER_11)) {
netif_stop_queue(dev);
rtl8169_tx_timeout(dev);
break;
diff --git a/drivers/net/sb1000.c b/drivers/net/sb1000.c
index a9ae505e1baf..66c2f1a01963 100644
--- a/drivers/net/sb1000.c
+++ b/drivers/net/sb1000.c
@@ -961,9 +961,9 @@ sb1000_open(struct net_device *dev)
lp->rx_error_count = 0;
lp->rx_error_dpc_count = 0;
lp->rx_session_id[0] = 0x50;
- lp->rx_session_id[0] = 0x48;
- lp->rx_session_id[0] = 0x44;
- lp->rx_session_id[0] = 0x42;
+ lp->rx_session_id[1] = 0x48;
+ lp->rx_session_id[2] = 0x44;
+ lp->rx_session_id[3] = 0x42;
lp->rx_frame_id[0] = 0;
lp->rx_frame_id[1] = 0;
lp->rx_frame_id[2] = 0;
diff --git a/drivers/net/sgiseeq.c b/drivers/net/sgiseeq.c
index 9265315baa0b..3a0cc63428ee 100644
--- a/drivers/net/sgiseeq.c
+++ b/drivers/net/sgiseeq.c
@@ -531,7 +531,7 @@ static int sgiseeq_open(struct net_device *dev)
if (request_irq(irq, sgiseeq_interrupt, 0, sgiseeqstr, dev)) {
printk(KERN_ERR "Seeq8003: Can't get irq %d\n", dev->irq);
- err = -EAGAIN;
+ return -EAGAIN;
}
err = init_seeq(dev, sp, sregs);
diff --git a/drivers/net/skge.c b/drivers/net/skge.c
index bfec2e0f5275..220e0398f1d5 100644
--- a/drivers/net/skge.c
+++ b/drivers/net/skge.c
@@ -3858,7 +3858,6 @@ static struct net_device *skge_devinit(struct skge_hw *hw, int port,
/* device is off until link detection */
netif_carrier_off(dev);
- netif_stop_queue(dev);
return dev;
}
diff --git a/drivers/net/slhc.c b/drivers/net/slhc.c
index ac279fad9d45..ab9e3b785b5b 100644
--- a/drivers/net/slhc.c
+++ b/drivers/net/slhc.c
@@ -688,18 +688,8 @@ slhc_toss(struct slcompress *comp)
return 0;
}
-
-/* VJ header compression */
-EXPORT_SYMBOL(slhc_init);
-EXPORT_SYMBOL(slhc_free);
-EXPORT_SYMBOL(slhc_remember);
-EXPORT_SYMBOL(slhc_compress);
-EXPORT_SYMBOL(slhc_uncompress);
-EXPORT_SYMBOL(slhc_toss);
-
#else /* CONFIG_INET */
-
int
slhc_toss(struct slcompress *comp)
{
@@ -738,6 +728,10 @@ slhc_init(int rslots, int tslots)
printk(KERN_DEBUG "Called IP function on non IP-system: slhc_init");
return NULL;
}
+
+#endif /* CONFIG_INET */
+
+/* VJ header compression */
EXPORT_SYMBOL(slhc_init);
EXPORT_SYMBOL(slhc_free);
EXPORT_SYMBOL(slhc_remember);
@@ -745,5 +739,4 @@ EXPORT_SYMBOL(slhc_compress);
EXPORT_SYMBOL(slhc_uncompress);
EXPORT_SYMBOL(slhc_toss);
-#endif /* CONFIG_INET */
MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/smsc911x.c b/drivers/net/smsc911x.c
index a8e5856ce882..64bfdae5956f 100644
--- a/drivers/net/smsc911x.c
+++ b/drivers/net/smsc911x.c
@@ -2075,7 +2075,7 @@ static int __devinit smsc911x_drv_probe(struct platform_device *pdev)
} else {
/* Try reading mac address from device. if EEPROM is present
* it will already have been set */
- smsc911x_read_mac_address(dev);
+ smsc_get_mac(dev);
if (is_valid_ether_addr(dev->dev_addr)) {
/* eeprom values are valid so use them */
@@ -2176,6 +2176,7 @@ static struct platform_driver smsc911x_driver = {
/* Entry point for loading the module */
static int __init smsc911x_init_module(void)
{
+ SMSC_INITIALIZE();
return platform_driver_register(&smsc911x_driver);
}
diff --git a/drivers/net/smsc911x.h b/drivers/net/smsc911x.h
index 016360c65ce2..50f712e99e96 100644
--- a/drivers/net/smsc911x.h
+++ b/drivers/net/smsc911x.h
@@ -22,7 +22,7 @@
#define __SMSC911X_H__
#define TX_FIFO_LOW_THRESHOLD ((u32)1600)
-#define SMSC911X_EEPROM_SIZE ((u32)7)
+#define SMSC911X_EEPROM_SIZE ((u32)128)
#define USE_DEBUG 0
/* This is the maximum number of packets to be received every
@@ -394,4 +394,15 @@
#define LPA_PAUSE_ALL (LPA_PAUSE_CAP | \
LPA_PAUSE_ASYM)
+/*
+ * Provide hooks to let the arch add to the initialisation procedure
+ * and to override the source of the MAC address.
+ */
+#define SMSC_INITIALIZE() do {} while (0)
+#define smsc_get_mac(dev) smsc911x_read_mac_address((dev))
+
+#ifdef CONFIG_SMSC911X_ARCH_HOOKS
+#include <asm/smsc911x.h>
+#endif
+
#endif /* __SMSC911X_H__ */
diff --git a/drivers/net/stmmac/stmmac_main.c b/drivers/net/stmmac/stmmac_main.c
index 823b9e6431d5..06bc6034ce81 100644
--- a/drivers/net/stmmac/stmmac_main.c
+++ b/drivers/net/stmmac/stmmac_main.c
@@ -337,33 +337,19 @@ static int stmmac_init_phy(struct net_device *dev)
return 0;
}
-static inline void stmmac_mac_enable_rx(void __iomem *ioaddr)
+static inline void stmmac_enable_mac(void __iomem *ioaddr)
{
u32 value = readl(ioaddr + MAC_CTRL_REG);
- value |= MAC_RNABLE_RX;
- /* Set the RE (receive enable bit into the MAC CTRL register). */
- writel(value, ioaddr + MAC_CTRL_REG);
-}
-static inline void stmmac_mac_enable_tx(void __iomem *ioaddr)
-{
- u32 value = readl(ioaddr + MAC_CTRL_REG);
- value |= MAC_ENABLE_TX;
- /* Set the TE (transmit enable bit into the MAC CTRL register). */
+ value |= MAC_RNABLE_RX | MAC_ENABLE_TX;
writel(value, ioaddr + MAC_CTRL_REG);
}
-static inline void stmmac_mac_disable_rx(void __iomem *ioaddr)
+static inline void stmmac_disable_mac(void __iomem *ioaddr)
{
u32 value = readl(ioaddr + MAC_CTRL_REG);
- value &= ~MAC_RNABLE_RX;
- writel(value, ioaddr + MAC_CTRL_REG);
-}
-static inline void stmmac_mac_disable_tx(void __iomem *ioaddr)
-{
- u32 value = readl(ioaddr + MAC_CTRL_REG);
- value &= ~MAC_ENABLE_TX;
+ value &= ~(MAC_ENABLE_TX | MAC_RNABLE_RX);
writel(value, ioaddr + MAC_CTRL_REG);
}
@@ -857,8 +843,7 @@ static int stmmac_open(struct net_device *dev)
writel(0xffffffff, priv->ioaddr + MMC_LOW_INTR_MASK);
/* Enable the MAC Rx/Tx */
- stmmac_mac_enable_rx(priv->ioaddr);
- stmmac_mac_enable_tx(priv->ioaddr);
+ stmmac_enable_mac(priv->ioaddr);
/* Set the HW DMA mode and the COE */
stmmac_dma_operation_mode(priv);
@@ -928,9 +913,8 @@ static int stmmac_release(struct net_device *dev)
/* Release and free the Rx/Tx resources */
free_dma_desc_resources(priv);
- /* Disable the MAC core */
- stmmac_mac_disable_tx(priv->ioaddr);
- stmmac_mac_disable_rx(priv->ioaddr);
+ /* Disable the MAC Rx/Tx */
+ stmmac_disable_mac(priv->ioaddr);
netif_carrier_off(dev);
@@ -1787,8 +1771,7 @@ static int stmmac_dvr_remove(struct platform_device *pdev)
priv->hw->dma->stop_rx(priv->ioaddr);
priv->hw->dma->stop_tx(priv->ioaddr);
- stmmac_mac_disable_rx(priv->ioaddr);
- stmmac_mac_disable_tx(priv->ioaddr);
+ stmmac_disable_mac(priv->ioaddr);
netif_carrier_off(ndev);
@@ -1839,13 +1822,11 @@ static int stmmac_suspend(struct platform_device *pdev, pm_message_t state)
dis_ic);
priv->hw->desc->init_tx_desc(priv->dma_tx, priv->dma_tx_size);
- stmmac_mac_disable_tx(priv->ioaddr);
-
/* Enable Power down mode by programming the PMT regs */
if (device_can_wakeup(priv->device))
priv->hw->mac->pmt(priv->ioaddr, priv->wolopts);
else
- stmmac_mac_disable_rx(priv->ioaddr);
+ stmmac_disable_mac(priv->ioaddr);
} else {
priv->shutdown = 1;
/* Although this can appear slightly redundant it actually
@@ -1886,8 +1867,7 @@ static int stmmac_resume(struct platform_device *pdev)
netif_device_attach(dev);
/* Enable the MAC and DMA */
- stmmac_mac_enable_rx(priv->ioaddr);
- stmmac_mac_enable_tx(priv->ioaddr);
+ stmmac_enable_mac(priv->ioaddr);
priv->hw->dma->start_tx(priv->ioaddr);
priv->hw->dma->start_rx(priv->ioaddr);
diff --git a/drivers/net/tg3.c b/drivers/net/tg3.c
index 852e917778f8..30ccbb6d097a 100644
--- a/drivers/net/tg3.c
+++ b/drivers/net/tg3.c
@@ -9948,16 +9948,16 @@ static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
!((tp->tg3_flags & TG3_FLAG_WOL_CAP) && device_can_wakeup(dp)))
return -EINVAL;
+ device_set_wakeup_enable(dp, wol->wolopts & WAKE_MAGIC);
+
spin_lock_bh(&tp->lock);
- if (wol->wolopts & WAKE_MAGIC) {
+ if (device_may_wakeup(dp))
tp->tg3_flags |= TG3_FLAG_WOL_ENABLE;
- device_set_wakeup_enable(dp, true);
- } else {
+ else
tp->tg3_flags &= ~TG3_FLAG_WOL_ENABLE;
- device_set_wakeup_enable(dp, false);
- }
spin_unlock_bh(&tp->lock);
+
return 0;
}
diff --git a/drivers/net/tile/Makefile b/drivers/net/tile/Makefile
new file mode 100644
index 000000000000..f634f142cab4
--- /dev/null
+++ b/drivers/net/tile/Makefile
@@ -0,0 +1,10 @@
+#
+# Makefile for the TILE on-chip networking support.
+#
+
+obj-$(CONFIG_TILE_NET) += tile_net.o
+ifdef CONFIG_TILEGX
+tile_net-objs := tilegx.o mpipe.o iorpc_mpipe.o dma_queue.o
+else
+tile_net-objs := tilepro.o
+endif
diff --git a/drivers/net/tile/tilepro.c b/drivers/net/tile/tilepro.c
new file mode 100644
index 000000000000..0e6bac5ec65b
--- /dev/null
+++ b/drivers/net/tile/tilepro.c
@@ -0,0 +1,2406 @@
+/*
+ * Copyright 2010 Tilera Corporation. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation, version 2.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ * NON INFRINGEMENT. See the GNU General Public License for
+ * more details.
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/moduleparam.h>
+#include <linux/sched.h>
+#include <linux/kernel.h> /* printk() */
+#include <linux/slab.h> /* kmalloc() */
+#include <linux/errno.h> /* error codes */
+#include <linux/types.h> /* size_t */
+#include <linux/interrupt.h>
+#include <linux/in.h>
+#include <linux/netdevice.h> /* struct device, and other headers */
+#include <linux/etherdevice.h> /* eth_type_trans */
+#include <linux/skbuff.h>
+#include <linux/ioctl.h>
+#include <linux/cdev.h>
+#include <linux/hugetlb.h>
+#include <linux/in6.h>
+#include <linux/timer.h>
+#include <linux/io.h>
+#include <asm/checksum.h>
+#include <asm/homecache.h>
+
+#include <hv/drv_xgbe_intf.h>
+#include <hv/drv_xgbe_impl.h>
+#include <hv/hypervisor.h>
+#include <hv/netio_intf.h>
+
+/* For TSO */
+#include <linux/ip.h>
+#include <linux/tcp.h>
+
+
+/* There is no singlethread_cpu, so schedule work on the current cpu. */
+#define singlethread_cpu -1
+
+
+/*
+ * First, "tile_net_init_module()" initializes all four "devices" which
+ * can be used by linux.
+ *
+ * Then, "ifconfig DEVICE up" calls "tile_net_open()", which analyzes
+ * the network cpus, then uses "tile_net_open_aux()" to initialize
+ * LIPP/LEPP, and then uses "tile_net_open_inner()" to register all
+ * the tiles, provide buffers to LIPP, allow ingress to start, and
+ * turn on hypervisor interrupt handling (and NAPI) on all tiles.
+ *
+ * If registration fails due to the link being down, then "retry_work"
+ * is used to keep calling "tile_net_open_inner()" until it succeeds.
+ *
+ * If "ifconfig DEVICE down" is called, it uses "tile_net_stop()" to
+ * stop egress, drain the LIPP buffers, unregister all the tiles, stop
+ * LIPP/LEPP, and wipe the LEPP queue.
+ *
+ * We start out with the ingress interrupt enabled on each CPU. When
+ * this interrupt fires, we disable it, and call "napi_schedule()".
+ * This will cause "tile_net_poll()" to be called, which will pull
+ * packets from the netio queue, filtering them out, or passing them
+ * to "netif_receive_skb()". If our budget is exhausted, we will
+ * return, knowing we will be called again later. Otherwise, we
+ * reenable the ingress interrupt, and call "napi_complete()".
+ *
+ *
+ * NOTE: The use of "native_driver" ensures that EPP exists, and that
+ * "epp_sendv" is legal, and that "LIPP" is being used.
+ *
+ * NOTE: Failing to free completions for an arbitrarily long time
+ * (which is defined to be illegal) does in fact cause bizarre
+ * problems. The "egress_timer" helps prevent this from happening.
+ *
+ * NOTE: The egress code can be interrupted by the interrupt handler.
+ */
+
+
+/* HACK: Allow use of "jumbo" packets. */
+/* This should be 1500 if "jumbo" is not set in LIPP. */
+/* This should be at most 10226 (10240 - 14) if "jumbo" is set in LIPP. */
+/* ISSUE: This has not been thoroughly tested (except at 1500). */
+#define TILE_NET_MTU 1500
+
+/* HACK: Define to support GSO. */
+/* ISSUE: This may actually hurt performance of the TCP blaster. */
+/* #define TILE_NET_GSO */
+
+/* Define this to collapse "duplicate" acks. */
+/* #define IGNORE_DUP_ACKS */
+
+/* HACK: Define this to verify incoming packets. */
+/* #define TILE_NET_VERIFY_INGRESS */
+
+/* Use 3000 to enable the Linux Traffic Control (QoS) layer, else 0. */
+#define TILE_NET_TX_QUEUE_LEN 0
+
+/* Define to dump packets (prints out the whole packet on tx and rx). */
+/* #define TILE_NET_DUMP_PACKETS */
+
+/* Define to enable debug spew (all PDEBUG's are enabled). */
+/* #define TILE_NET_DEBUG */
+
+
+/* Define to activate paranoia checks. */
+/* #define TILE_NET_PARANOIA */
+
+/* Default transmit lockup timeout period, in jiffies. */
+#define TILE_NET_TIMEOUT (5 * HZ)
+
+/* Default retry interval for bringing up the NetIO interface, in jiffies. */
+#define TILE_NET_RETRY_INTERVAL (5 * HZ)
+
+/* Number of ports (xgbe0, xgbe1, gbe0, gbe1). */
+#define TILE_NET_DEVS 4
+
+
+
+/* Paranoia. */
+#if NET_IP_ALIGN != LIPP_PACKET_PADDING
+#error "NET_IP_ALIGN must match LIPP_PACKET_PADDING."
+#endif
+
+
+/* Debug print. */
+#ifdef TILE_NET_DEBUG
+#define PDEBUG(fmt, args...) net_printk(fmt, ## args)
+#else
+#define PDEBUG(fmt, args...)
+#endif
+
+
+MODULE_AUTHOR("Tilera");
+MODULE_LICENSE("GPL");
+
+
+#define IS_MULTICAST(mac_addr) \
+ (((u8 *)(mac_addr))[0] & 0x01)
+
+#define IS_BROADCAST(mac_addr) \
+ (((u16 *)(mac_addr))[0] == 0xffff)
+
+
+/*
+ * Queue of incoming packets for a specific cpu and device.
+ *
+ * Includes a pointer to the "system" data, and the actual "user" data.
+ */
+struct tile_netio_queue {
+ netio_queue_impl_t *__system_part;
+ netio_queue_user_impl_t __user_part;
+
+};
+
+
+/*
+ * Statistics counters for a specific cpu and device.
+ */
+struct tile_net_stats_t {
+ u32 rx_packets;
+ u32 rx_bytes;
+ u32 tx_packets;
+ u32 tx_bytes;
+};
+
+
+/*
+ * Info for a specific cpu and device.
+ *
+ * ISSUE: There is a "dev" pointer in "napi" as well.
+ */
+struct tile_net_cpu {
+ /* The NAPI struct. */
+ struct napi_struct napi;
+ /* Packet queue. */
+ struct tile_netio_queue queue;
+ /* Statistics. */
+ struct tile_net_stats_t stats;
+ /* ISSUE: Is this needed? */
+ bool napi_enabled;
+ /* True if this tile has succcessfully registered with the IPP. */
+ bool registered;
+ /* True if the link was down last time we tried to register. */
+ bool link_down;
+ /* True if "egress_timer" is scheduled. */
+ bool egress_timer_scheduled;
+ /* Number of small sk_buffs which must still be provided. */
+ unsigned int num_needed_small_buffers;
+ /* Number of large sk_buffs which must still be provided. */
+ unsigned int num_needed_large_buffers;
+ /* A timer for handling egress completions. */
+ struct timer_list egress_timer;
+};
+
+
+/*
+ * Info for a specific device.
+ */
+struct tile_net_priv {
+ /* Our network device. */
+ struct net_device *dev;
+ /* The actual egress queue. */
+ lepp_queue_t *epp_queue;
+ /* Protects "epp_queue->cmd_tail" and "epp_queue->comp_tail" */
+ spinlock_t cmd_lock;
+ /* Protects "epp_queue->comp_head". */
+ spinlock_t comp_lock;
+ /* The hypervisor handle for this interface. */
+ int hv_devhdl;
+ /* The intr bit mask that IDs this device. */
+ u32 intr_id;
+ /* True iff "tile_net_open_aux()" has succeeded. */
+ int partly_opened;
+ /* True iff "tile_net_open_inner()" has succeeded. */
+ int fully_opened;
+ /* Effective network cpus. */
+ struct cpumask network_cpus_map;
+ /* Number of network cpus. */
+ int network_cpus_count;
+ /* Credits per network cpu. */
+ int network_cpus_credits;
+ /* Network stats. */
+ struct net_device_stats stats;
+ /* For NetIO bringup retries. */
+ struct delayed_work retry_work;
+ /* Quick access to per cpu data. */
+ struct tile_net_cpu *cpu[NR_CPUS];
+};
+
+
+/*
+ * The actual devices (xgbe0, xgbe1, gbe0, gbe1).
+ */
+static struct net_device *tile_net_devs[TILE_NET_DEVS];
+
+/*
+ * The "tile_net_cpu" structures for each device.
+ */
+static DEFINE_PER_CPU(struct tile_net_cpu, hv_xgbe0);
+static DEFINE_PER_CPU(struct tile_net_cpu, hv_xgbe1);
+static DEFINE_PER_CPU(struct tile_net_cpu, hv_gbe0);
+static DEFINE_PER_CPU(struct tile_net_cpu, hv_gbe1);
+
+
+/*
+ * True if "network_cpus" was specified.
+ */
+static bool network_cpus_used;
+
+/*
+ * The actual cpus in "network_cpus".
+ */
+static struct cpumask network_cpus_map;
+
+
+
+#ifdef TILE_NET_DEBUG
+/*
+ * printk with extra stuff.
+ *
+ * We print the CPU we're running in brackets.
+ */
+static void net_printk(char *fmt, ...)
+{
+ int i;
+ int len;
+ va_list args;
+ static char buf[256];
+
+ len = sprintf(buf, "tile_net[%2.2d]: ", smp_processor_id());
+ va_start(args, fmt);
+ i = vscnprintf(buf + len, sizeof(buf) - len - 1, fmt, args);
+ va_end(args);
+ buf[255] = '\0';
+ pr_notice(buf);
+}
+#endif
+
+
+#ifdef TILE_NET_DUMP_PACKETS
+/*
+ * Dump a packet.
+ */
+static void dump_packet(unsigned char *data, unsigned long length, char *s)
+{
+ unsigned long i;
+ static unsigned int count;
+
+ pr_info("dump_packet(data %p, length 0x%lx s %s count 0x%x)\n",
+ data, length, s, count++);
+
+ pr_info("\n");
+
+ for (i = 0; i < length; i++) {
+ if ((i & 0xf) == 0)
+ sprintf(buf, "%8.8lx:", i);
+ sprintf(buf + strlen(buf), " %2.2x", data[i]);
+ if ((i & 0xf) == 0xf || i == length - 1)
+ pr_info("%s\n", buf);
+ }
+}
+#endif
+
+
+/*
+ * Provide support for the __netio_fastio1() swint
+ * (see <hv/drv_xgbe_intf.h> for how it is used).
+ *
+ * The fastio swint2 call may clobber all the caller-saved registers.
+ * It rarely clobbers memory, but we allow for the possibility in
+ * the signature just to be on the safe side.
+ *
+ * Also, gcc doesn't seem to allow an input operand to be
+ * clobbered, so we fake it with dummy outputs.
+ *
+ * This function can't be static because of the way it is declared
+ * in the netio header.
+ */
+inline int __netio_fastio1(u32 fastio_index, u32 arg0)
+{
+ long result, clobber_r1, clobber_r10;
+ asm volatile("swint2"
+ : "=R00" (result),
+ "=R01" (clobber_r1), "=R10" (clobber_r10)
+ : "R10" (fastio_index), "R01" (arg0)
+ : "memory", "r2", "r3", "r4",
+ "r5", "r6", "r7", "r8", "r9",
+ "r11", "r12", "r13", "r14",
+ "r15", "r16", "r17", "r18", "r19",
+ "r20", "r21", "r22", "r23", "r24",
+ "r25", "r26", "r27", "r28", "r29");
+ return result;
+}
+
+
+/*
+ * Provide a linux buffer to LIPP.
+ */
+static void tile_net_provide_linux_buffer(struct tile_net_cpu *info,
+ void *va, bool small)
+{
+ struct tile_netio_queue *queue = &info->queue;
+
+ /* Convert "va" and "small" to "linux_buffer_t". */
+ unsigned int buffer = ((unsigned int)(__pa(va) >> 7) << 1) + small;
+
+ __netio_fastio_free_buffer(queue->__user_part.__fastio_index, buffer);
+}
+
+
+/*
+ * Provide a linux buffer for LIPP.
+ */
+static bool tile_net_provide_needed_buffer(struct tile_net_cpu *info,
+ bool small)
+{
+ /* ISSUE: What should we use here? */
+ unsigned int large_size = NET_IP_ALIGN + TILE_NET_MTU + 100;
+
+ /* Round up to ensure to avoid "false sharing" with last cache line. */
+ unsigned int buffer_size =
+ (((small ? LIPP_SMALL_PACKET_SIZE : large_size) +
+ CHIP_L2_LINE_SIZE() - 1) & -CHIP_L2_LINE_SIZE());
+
+ /*
+ * ISSUE: Since CPAs are 38 bits, and we can only encode the
+ * high 31 bits in a "linux_buffer_t", the low 7 bits must be
+ * zero, and thus, we must align the actual "va" mod 128.
+ */
+ const unsigned long align = 128;
+
+ struct sk_buff *skb;
+ void *va;
+
+ struct sk_buff **skb_ptr;
+
+ /* Note that "dev_alloc_skb()" adds NET_SKB_PAD more bytes, */
+ /* and also "reserves" that many bytes. */
+ /* ISSUE: Can we "share" the NET_SKB_PAD bytes with "skb_ptr"? */
+ int len = sizeof(*skb_ptr) + align + buffer_size;
+
+ while (1) {
+
+ /* Allocate (or fail). */
+ skb = dev_alloc_skb(len);
+ if (skb == NULL)
+ return false;
+
+ /* Make room for a back-pointer to 'skb'. */
+ skb_reserve(skb, sizeof(*skb_ptr));
+
+ /* Make sure we are aligned. */
+ skb_reserve(skb, -(long)skb->data & (align - 1));
+
+ /* This address is given to IPP. */
+ va = skb->data;
+
+ if (small)
+ break;
+
+ /* ISSUE: This has never been observed! */
+ /* Large buffers must not span a huge page. */
+ if (((((long)va & ~HPAGE_MASK) + 1535) & HPAGE_MASK) == 0)
+ break;
+ pr_err("Leaking unaligned linux buffer at %p.\n", va);
+ }
+
+ /* Skip two bytes to satisfy LIPP assumptions. */
+ /* Note that this aligns IP on a 16 byte boundary. */
+ /* ISSUE: Do this when the packet arrives? */
+ skb_reserve(skb, NET_IP_ALIGN);
+
+ /* Save a back-pointer to 'skb'. */
+ skb_ptr = va - sizeof(*skb_ptr);
+ *skb_ptr = skb;
+
+ /* Invalidate the packet buffer. */
+ if (!hash_default)
+ __inv_buffer(skb->data, buffer_size);
+
+ /* Make sure "skb_ptr" has been flushed. */
+ __insn_mf();
+
+#ifdef TILE_NET_PARANOIA
+#if CHIP_HAS_CBOX_HOME_MAP()
+ if (hash_default) {
+ HV_PTE pte = *virt_to_pte(current->mm, (unsigned long)va);
+ if (hv_pte_get_mode(pte) != HV_PTE_MODE_CACHE_HASH_L3)
+ panic("Non-coherent ingress buffer!");
+ }
+#endif
+#endif
+
+ /* Provide the new buffer. */
+ tile_net_provide_linux_buffer(info, va, small);
+
+ return true;
+}
+
+
+/*
+ * Provide linux buffers for LIPP.
+ */
+static void tile_net_provide_needed_buffers(struct tile_net_cpu *info)
+{
+ while (info->num_needed_small_buffers != 0) {
+ if (!tile_net_provide_needed_buffer(info, true))
+ goto oops;
+ info->num_needed_small_buffers--;
+ }
+
+ while (info->num_needed_large_buffers != 0) {
+ if (!tile_net_provide_needed_buffer(info, false))
+ goto oops;
+ info->num_needed_large_buffers--;
+ }
+
+ return;
+
+oops:
+
+ /* Add a description to the page allocation failure dump. */
+ pr_notice("Could not provide a linux buffer to LIPP.\n");
+}
+
+
+/*
+ * Grab some LEPP completions, and store them in "comps", of size
+ * "comps_size", and return the number of completions which were
+ * stored, so the caller can free them.
+ *
+ * If "pending" is not NULL, it will be set to true if there might
+ * still be some pending completions caused by this tile, else false.
+ */
+static unsigned int tile_net_lepp_grab_comps(struct net_device *dev,
+ struct sk_buff *comps[],
+ unsigned int comps_size,
+ bool *pending)
+{
+ struct tile_net_priv *priv = netdev_priv(dev);
+
+ lepp_queue_t *eq = priv->epp_queue;
+
+ unsigned int n = 0;
+
+ unsigned int comp_head;
+ unsigned int comp_busy;
+ unsigned int comp_tail;
+
+ spin_lock(&priv->comp_lock);
+
+ comp_head = eq->comp_head;
+ comp_busy = eq->comp_busy;
+ comp_tail = eq->comp_tail;
+
+ while (comp_head != comp_busy && n < comps_size) {
+ comps[n++] = eq->comps[comp_head];
+ LEPP_QINC(comp_head);
+ }
+
+ if (pending != NULL)
+ *pending = (comp_head != comp_tail);
+
+ eq->comp_head = comp_head;
+
+ spin_unlock(&priv->comp_lock);
+
+ return n;
+}
+
+
+/*
+ * Make sure the egress timer is scheduled.
+ *
+ * Note that we use "schedule if not scheduled" logic instead of the more
+ * obvious "reschedule" logic, because "reschedule" is fairly expensive.
+ */
+static void tile_net_schedule_egress_timer(struct tile_net_cpu *info)
+{
+ if (!info->egress_timer_scheduled) {
+ mod_timer_pinned(&info->egress_timer, jiffies + 1);
+ info->egress_timer_scheduled = true;
+ }
+}
+
+
+/*
+ * The "function" for "info->egress_timer".
+ *
+ * This timer will reschedule itself as long as there are any pending
+ * completions expected (on behalf of any tile).
+ *
+ * ISSUE: Realistically, will the timer ever stop scheduling itself?
+ *
+ * ISSUE: This timer is almost never actually needed, so just use a global
+ * timer that can run on any tile.
+ *
+ * ISSUE: Maybe instead track number of expected completions, and free
+ * only that many, resetting to zero if "pending" is ever false.
+ */
+static void tile_net_handle_egress_timer(unsigned long arg)
+{
+ struct tile_net_cpu *info = (struct tile_net_cpu *)arg;
+ struct net_device *dev = info->napi.dev;
+
+ struct sk_buff *olds[32];
+ unsigned int wanted = 32;
+ unsigned int i, nolds = 0;
+ bool pending;
+
+ /* The timer is no longer scheduled. */
+ info->egress_timer_scheduled = false;
+
+ nolds = tile_net_lepp_grab_comps(dev, olds, wanted, &pending);
+
+ for (i = 0; i < nolds; i++)
+ kfree_skb(olds[i]);
+
+ /* Reschedule timer if needed. */
+ if (pending)
+ tile_net_schedule_egress_timer(info);
+}
+
+
+#ifdef IGNORE_DUP_ACKS
+
+/*
+ * Help detect "duplicate" ACKs. These are sequential packets (for a
+ * given flow) which are exactly 66 bytes long, sharing everything but
+ * ID=2@0x12, Hsum=2@0x18, Ack=4@0x2a, WinSize=2@0x30, Csum=2@0x32,
+ * Tstamps=10@0x38. The ID's are +1, the Hsum's are -1, the Ack's are
+ * +N, and the Tstamps are usually identical.
+ *
+ * NOTE: Apparently truly duplicate acks (with identical "ack" values),
+ * should not be collapsed, as they are used for some kind of flow control.
+ */
+static bool is_dup_ack(char *s1, char *s2, unsigned int len)
+{
+ int i;
+
+ unsigned long long ignorable = 0;
+
+ /* Identification. */
+ ignorable |= (1ULL << 0x12);
+ ignorable |= (1ULL << 0x13);
+
+ /* Header checksum. */
+ ignorable |= (1ULL << 0x18);
+ ignorable |= (1ULL << 0x19);
+
+ /* ACK. */
+ ignorable |= (1ULL << 0x2a);
+ ignorable |= (1ULL << 0x2b);
+ ignorable |= (1ULL << 0x2c);
+ ignorable |= (1ULL << 0x2d);
+
+ /* WinSize. */
+ ignorable |= (1ULL << 0x30);
+ ignorable |= (1ULL << 0x31);
+
+ /* Checksum. */
+ ignorable |= (1ULL << 0x32);
+ ignorable |= (1ULL << 0x33);
+
+ for (i = 0; i < len; i++, ignorable >>= 1) {
+
+ if ((ignorable & 1) || (s1[i] == s2[i]))
+ continue;
+
+#ifdef TILE_NET_DEBUG
+ /* HACK: Mention non-timestamp diffs. */
+ if (i < 0x38 && i != 0x2f &&
+ net_ratelimit())
+ pr_info("Diff at 0x%x\n", i);
+#endif
+
+ return false;
+ }
+
+#ifdef TILE_NET_NO_SUPPRESS_DUP_ACKS
+ /* HACK: Do not suppress truly duplicate ACKs. */
+ /* ISSUE: Is this actually necessary or helpful? */
+ if (s1[0x2a] == s2[0x2a] &&
+ s1[0x2b] == s2[0x2b] &&
+ s1[0x2c] == s2[0x2c] &&
+ s1[0x2d] == s2[0x2d]) {
+ return false;
+ }
+#endif
+
+ return true;
+}
+
+#endif
+
+
+
+/*
+ * Like "tile_net_handle_packets()", but just discard packets.
+ */
+static void tile_net_discard_packets(struct net_device *dev)
+{
+ struct tile_net_priv *priv = netdev_priv(dev);
+ int my_cpu = smp_processor_id();
+ struct tile_net_cpu *info = priv->cpu[my_cpu];
+ struct tile_netio_queue *queue = &info->queue;
+ netio_queue_impl_t *qsp = queue->__system_part;
+ netio_queue_user_impl_t *qup = &queue->__user_part;
+
+ while (qup->__packet_receive_read !=
+ qsp->__packet_receive_queue.__packet_write) {
+
+ int index = qup->__packet_receive_read;
+
+ int index2_aux = index + sizeof(netio_pkt_t);
+ int index2 =
+ ((index2_aux ==
+ qsp->__packet_receive_queue.__last_packet_plus_one) ?
+ 0 : index2_aux);
+
+ netio_pkt_t *pkt = (netio_pkt_t *)
+ ((unsigned long) &qsp[1] + index);
+
+ /* Extract the "linux_buffer_t". */
+ unsigned int buffer = pkt->__packet.word;
+
+ /* Convert "linux_buffer_t" to "va". */
+ void *va = __va((phys_addr_t)(buffer >> 1) << 7);
+
+ /* Acquire the associated "skb". */
+ struct sk_buff **skb_ptr = va - sizeof(*skb_ptr);
+ struct sk_buff *skb = *skb_ptr;
+
+ kfree_skb(skb);
+
+ /* Consume this packet. */
+ qup->__packet_receive_read = index2;
+ }
+}
+
+
+/*
+ * Handle the next packet. Return true if "processed", false if "filtered".
+ */
+static bool tile_net_poll_aux(struct tile_net_cpu *info, int index)
+{
+ struct net_device *dev = info->napi.dev;
+
+ struct tile_netio_queue *queue = &info->queue;
+ netio_queue_impl_t *qsp = queue->__system_part;
+ netio_queue_user_impl_t *qup = &queue->__user_part;
+ struct tile_net_stats_t *stats = &info->stats;
+
+ int filter;
+
+ int index2_aux = index + sizeof(netio_pkt_t);
+ int index2 =
+ ((index2_aux ==
+ qsp->__packet_receive_queue.__last_packet_plus_one) ?
+ 0 : index2_aux);
+
+ netio_pkt_t *pkt = (netio_pkt_t *)((unsigned long) &qsp[1] + index);
+
+ netio_pkt_metadata_t *metadata = NETIO_PKT_METADATA(pkt);
+
+ /* Extract the packet size. */
+ unsigned long len =
+ (NETIO_PKT_CUSTOM_LENGTH(pkt) +
+ NET_IP_ALIGN - NETIO_PACKET_PADDING);
+
+ /* Extract the "linux_buffer_t". */
+ unsigned int buffer = pkt->__packet.word;
+
+ /* Extract "small" (vs "large"). */
+ bool small = ((buffer & 1) != 0);
+
+ /* Convert "linux_buffer_t" to "va". */
+ void *va = __va((phys_addr_t)(buffer >> 1) << 7);
+
+ /* Extract the packet data pointer. */
+ /* Compare to "NETIO_PKT_CUSTOM_DATA(pkt)". */
+ unsigned char *buf = va + NET_IP_ALIGN;
+
+#ifdef IGNORE_DUP_ACKS
+
+ static int other;
+ static int final;
+ static int keep;
+ static int skip;
+
+#endif
+
+ /* Invalidate the packet buffer. */
+ if (!hash_default)
+ __inv_buffer(buf, len);
+
+ /* ISSUE: Is this needed? */
+ dev->last_rx = jiffies;
+
+#ifdef TILE_NET_DUMP_PACKETS
+ dump_packet(buf, len, "rx");
+#endif /* TILE_NET_DUMP_PACKETS */
+
+#ifdef TILE_NET_VERIFY_INGRESS
+ if (!NETIO_PKT_L4_CSUM_CORRECT_M(metadata, pkt) &&
+ NETIO_PKT_L4_CSUM_CALCULATED_M(metadata, pkt)) {
+ /*
+ * FIXME: This complains about UDP packets
+ * with a "zero" checksum (bug 6624).
+ */
+#ifdef TILE_NET_PANIC_ON_BAD
+ dump_packet(buf, len, "rx");
+ panic("Bad L4 checksum.");
+#else
+ pr_warning("Bad L4 checksum on %d byte packet.\n", len);
+#endif
+ }
+ if (!NETIO_PKT_L3_CSUM_CORRECT_M(metadata, pkt) &&
+ NETIO_PKT_L3_CSUM_CALCULATED_M(metadata, pkt)) {
+ dump_packet(buf, len, "rx");
+ panic("Bad L3 checksum.");
+ }
+ switch (NETIO_PKT_STATUS_M(metadata, pkt)) {
+ case NETIO_PKT_STATUS_OVERSIZE:
+ if (len >= 64) {
+ dump_packet(buf, len, "rx");
+ panic("Unexpected OVERSIZE.");
+ }
+ break;
+ case NETIO_PKT_STATUS_BAD:
+#ifdef TILE_NET_PANIC_ON_BAD
+ dump_packet(buf, len, "rx");
+ panic("Unexpected BAD packet.");
+#else
+ pr_warning("Unexpected BAD %d byte packet.\n", len);
+#endif
+ }
+#endif
+
+ filter = 0;
+
+ if (!(dev->flags & IFF_UP)) {
+ /* Filter packets received before we're up. */
+ filter = 1;
+ } else if (!(dev->flags & IFF_PROMISC)) {
+ /*
+ * FIXME: Implement HW multicast filter.
+ */
+ if (!IS_MULTICAST(buf) && !IS_BROADCAST(buf)) {
+ /* Filter packets not for our address. */
+ const u8 *mine = dev->dev_addr;
+ filter = compare_ether_addr(mine, buf);
+ }
+ }
+
+#ifdef IGNORE_DUP_ACKS
+
+ if (len != 66) {
+ /* FIXME: Must check "is_tcp_ack(buf, len)" somehow. */
+
+ other++;
+
+ } else if (index2 ==
+ qsp->__packet_receive_queue.__packet_write) {
+
+ final++;
+
+ } else {
+
+ netio_pkt_t *pkt2 = (netio_pkt_t *)
+ ((unsigned long) &qsp[1] + index2);
+
+ netio_pkt_metadata_t *metadata2 =
+ NETIO_PKT_METADATA(pkt2);
+
+ /* Extract the packet size. */
+ unsigned long len2 =
+ (NETIO_PKT_CUSTOM_LENGTH(pkt2) +
+ NET_IP_ALIGN - NETIO_PACKET_PADDING);
+
+ if (len2 == 66 &&
+ NETIO_PKT_FLOW_HASH_M(metadata, pkt) ==
+ NETIO_PKT_FLOW_HASH_M(metadata2, pkt2)) {
+
+ /* Extract the "linux_buffer_t". */
+ unsigned int buffer2 = pkt2->__packet.word;
+
+ /* Convert "linux_buffer_t" to "va". */
+ void *va2 =
+ __va((phys_addr_t)(buffer2 >> 1) << 7);
+
+ /* Extract the packet data pointer. */
+ /* Compare to "NETIO_PKT_CUSTOM_DATA(pkt)". */
+ unsigned char *buf2 = va2 + NET_IP_ALIGN;
+
+ /* Invalidate the packet buffer. */
+ if (!hash_default)
+ __inv_buffer(buf2, len2);
+
+ if (is_dup_ack(buf, buf2, len)) {
+ skip++;
+ filter = 1;
+ } else {
+ keep++;
+ }
+ }
+ }
+
+ if (net_ratelimit())
+ pr_info("Other %d Final %d Keep %d Skip %d.\n",
+ other, final, keep, skip);
+
+#endif
+
+ if (filter) {
+
+ /* ISSUE: Update "drop" statistics? */
+
+ tile_net_provide_linux_buffer(info, va, small);
+
+ } else {
+
+ /* Acquire the associated "skb". */
+ struct sk_buff **skb_ptr = va - sizeof(*skb_ptr);
+ struct sk_buff *skb = *skb_ptr;
+
+ /* Paranoia. */
+ if (skb->data != buf)
+ panic("Corrupt linux buffer from LIPP! "
+ "VA=%p, skb=%p, skb->data=%p\n",
+ va, skb, skb->data);
+
+ /* Encode the actual packet length. */
+ skb_put(skb, len);
+
+ /* NOTE: This call also sets "skb->dev = dev". */
+ skb->protocol = eth_type_trans(skb, dev);
+
+ /* ISSUE: Discard corrupt packets? */
+ /* ISSUE: Discard packets with bad checksums? */
+
+ /* Avoid recomputing TCP/UDP checksums. */
+ if (NETIO_PKT_L4_CSUM_CORRECT_M(metadata, pkt))
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+
+ netif_receive_skb(skb);
+
+ stats->rx_packets++;
+ stats->rx_bytes += len;
+
+ if (small)
+ info->num_needed_small_buffers++;
+ else
+ info->num_needed_large_buffers++;
+ }
+
+ /* Return four credits after every fourth packet. */
+ if (--qup->__receive_credit_remaining == 0) {
+ u32 interval = qup->__receive_credit_interval;
+ qup->__receive_credit_remaining = interval;
+ __netio_fastio_return_credits(qup->__fastio_index, interval);
+ }
+
+ /* Consume this packet. */
+ qup->__packet_receive_read = index2;
+
+ return !filter;
+}
+
+
+/*
+ * Handle some packets for the given device on the current CPU.
+ *
+ * ISSUE: The "rotting packet" race condition occurs if a packet
+ * arrives after the queue appears to be empty, and before the
+ * hypervisor interrupt is re-enabled.
+ */
+static int tile_net_poll(struct napi_struct *napi, int budget)
+{
+ struct net_device *dev = napi->dev;
+ struct tile_net_priv *priv = netdev_priv(dev);
+ int my_cpu = smp_processor_id();
+ struct tile_net_cpu *info = priv->cpu[my_cpu];
+ struct tile_netio_queue *queue = &info->queue;
+ netio_queue_impl_t *qsp = queue->__system_part;
+ netio_queue_user_impl_t *qup = &queue->__user_part;
+
+ unsigned int work = 0;
+
+ while (1) {
+ int index = qup->__packet_receive_read;
+ if (index == qsp->__packet_receive_queue.__packet_write)
+ break;
+
+ if (tile_net_poll_aux(info, index)) {
+ if (++work >= budget)
+ goto done;
+ }
+ }
+
+ napi_complete(&info->napi);
+
+ /* Re-enable hypervisor interrupts. */
+ enable_percpu_irq(priv->intr_id);
+
+ /* HACK: Avoid the "rotting packet" problem. */
+ if (qup->__packet_receive_read !=
+ qsp->__packet_receive_queue.__packet_write)
+ napi_schedule(&info->napi);
+
+ /* ISSUE: Handle completions? */
+
+done:
+
+ tile_net_provide_needed_buffers(info);
+
+ return work;
+}
+
+
+/*
+ * Handle an ingress interrupt for the given device on the current cpu.
+ */
+static irqreturn_t tile_net_handle_ingress_interrupt(int irq, void *dev_ptr)
+{
+ struct net_device *dev = (struct net_device *)dev_ptr;
+ struct tile_net_priv *priv = netdev_priv(dev);
+ int my_cpu = smp_processor_id();
+ struct tile_net_cpu *info = priv->cpu[my_cpu];
+
+ /* Disable hypervisor interrupt. */
+ disable_percpu_irq(priv->intr_id);
+
+ napi_schedule(&info->napi);
+
+ return IRQ_HANDLED;
+}
+
+
+/*
+ * One time initialization per interface.
+ */
+static int tile_net_open_aux(struct net_device *dev)
+{
+ struct tile_net_priv *priv = netdev_priv(dev);
+
+ int ret;
+ int dummy;
+ unsigned int epp_lotar;
+
+ /*
+ * Find out where EPP memory should be homed.
+ */
+ ret = hv_dev_pread(priv->hv_devhdl, 0,
+ (HV_VirtAddr)&epp_lotar, sizeof(epp_lotar),
+ NETIO_EPP_SHM_OFF);
+ if (ret < 0) {
+ pr_err("could not read epp_shm_queue lotar.\n");
+ return -EIO;
+ }
+
+ /*
+ * Home the page on the EPP.
+ */
+ {
+ int epp_home = hv_lotar_to_cpu(epp_lotar);
+ struct page *page = virt_to_page(priv->epp_queue);
+ homecache_change_page_home(page, 0, epp_home);
+ }
+
+ /*
+ * Register the EPP shared memory queue.
+ */
+ {
+ netio_ipp_address_t ea = {
+ .va = 0,
+ .pa = __pa(priv->epp_queue),
+ .pte = hv_pte(0),
+ .size = PAGE_SIZE,
+ };
+ ea.pte = hv_pte_set_lotar(ea.pte, epp_lotar);
+ ea.pte = hv_pte_set_mode(ea.pte, HV_PTE_MODE_CACHE_TILE_L3);
+ ret = hv_dev_pwrite(priv->hv_devhdl, 0,
+ (HV_VirtAddr)&ea,
+ sizeof(ea),
+ NETIO_EPP_SHM_OFF);
+ if (ret < 0)
+ return -EIO;
+ }
+
+ /*
+ * Start LIPP/LEPP.
+ */
+ if (hv_dev_pwrite(priv->hv_devhdl, 0, (HV_VirtAddr)&dummy,
+ sizeof(dummy), NETIO_IPP_START_SHIM_OFF) < 0) {
+ pr_warning("Failed to start LIPP/LEPP.\n");
+ return -EIO;
+ }
+
+ return 0;
+}
+
+
+/*
+ * Register with hypervisor on each CPU.
+ *
+ * Strangely, this function does important things even if it "fails",
+ * which is especially common if the link is not up yet. Hopefully
+ * these things are all "harmless" if done twice!
+ */
+static void tile_net_register(void *dev_ptr)
+{
+ struct net_device *dev = (struct net_device *)dev_ptr;
+ struct tile_net_priv *priv = netdev_priv(dev);
+ int my_cpu = smp_processor_id();
+ struct tile_net_cpu *info;
+
+ struct tile_netio_queue *queue;
+
+ /* Only network cpus can receive packets. */
+ int queue_id =
+ cpumask_test_cpu(my_cpu, &priv->network_cpus_map) ? 0 : 255;
+
+ netio_input_config_t config = {
+ .flags = 0,
+ .num_receive_packets = priv->network_cpus_credits,
+ .queue_id = queue_id
+ };
+
+ int ret = 0;
+ netio_queue_impl_t *queuep;
+
+ PDEBUG("tile_net_register(queue_id %d)\n", queue_id);
+
+ if (!strcmp(dev->name, "xgbe0"))
+ info = &__get_cpu_var(hv_xgbe0);
+ else if (!strcmp(dev->name, "xgbe1"))
+ info = &__get_cpu_var(hv_xgbe1);
+ else if (!strcmp(dev->name, "gbe0"))
+ info = &__get_cpu_var(hv_gbe0);
+ else if (!strcmp(dev->name, "gbe1"))
+ info = &__get_cpu_var(hv_gbe1);
+ else
+ BUG();
+
+ /* Initialize the egress timer. */
+ init_timer(&info->egress_timer);
+ info->egress_timer.data = (long)info;
+ info->egress_timer.function = tile_net_handle_egress_timer;
+
+ priv->cpu[my_cpu] = info;
+
+ /*
+ * Register ourselves with the IPP.
+ */
+ ret = hv_dev_pwrite(priv->hv_devhdl, 0,
+ (HV_VirtAddr)&config,
+ sizeof(netio_input_config_t),
+ NETIO_IPP_INPUT_REGISTER_OFF);
+ PDEBUG("hv_dev_pwrite(NETIO_IPP_INPUT_REGISTER_OFF) returned %d\n",
+ ret);
+ if (ret < 0) {
+ printk(KERN_DEBUG "hv_dev_pwrite NETIO_IPP_INPUT_REGISTER_OFF"
+ " failure %d\n", ret);
+ info->link_down = (ret == NETIO_LINK_DOWN);
+ return;
+ }
+
+ /*
+ * Get the pointer to our queue's system part.
+ */
+
+ ret = hv_dev_pread(priv->hv_devhdl, 0,
+ (HV_VirtAddr)&queuep,
+ sizeof(netio_queue_impl_t *),
+ NETIO_IPP_INPUT_REGISTER_OFF);
+ PDEBUG("hv_dev_pread(NETIO_IPP_INPUT_REGISTER_OFF) returned %d\n",
+ ret);
+ PDEBUG("queuep %p\n", queuep);
+ if (ret <= 0) {
+ /* ISSUE: Shouldn't this be a fatal error? */
+ pr_err("hv_dev_pread NETIO_IPP_INPUT_REGISTER_OFF failure\n");
+ return;
+ }
+
+ queue = &info->queue;
+
+ queue->__system_part = queuep;
+
+ memset(&queue->__user_part, 0, sizeof(netio_queue_user_impl_t));
+
+ /* This is traditionally "config.num_receive_packets / 2". */
+ queue->__user_part.__receive_credit_interval = 4;
+ queue->__user_part.__receive_credit_remaining =
+ queue->__user_part.__receive_credit_interval;
+
+ /*
+ * Get a fastio index from the hypervisor.
+ * ISSUE: Shouldn't this check the result?
+ */
+ ret = hv_dev_pread(priv->hv_devhdl, 0,
+ (HV_VirtAddr)&queue->__user_part.__fastio_index,
+ sizeof(queue->__user_part.__fastio_index),
+ NETIO_IPP_GET_FASTIO_OFF);
+ PDEBUG("hv_dev_pread(NETIO_IPP_GET_FASTIO_OFF) returned %d\n", ret);
+
+ netif_napi_add(dev, &info->napi, tile_net_poll, 64);
+
+ /* Now we are registered. */
+ info->registered = true;
+}
+
+
+/*
+ * Unregister with hypervisor on each CPU.
+ */
+static void tile_net_unregister(void *dev_ptr)
+{
+ struct net_device *dev = (struct net_device *)dev_ptr;
+ struct tile_net_priv *priv = netdev_priv(dev);
+ int my_cpu = smp_processor_id();
+ struct tile_net_cpu *info = priv->cpu[my_cpu];
+
+ int ret = 0;
+ int dummy = 0;
+
+ /* Do nothing if never registered. */
+ if (info == NULL)
+ return;
+
+ /* Do nothing if already unregistered. */
+ if (!info->registered)
+ return;
+
+ /*
+ * Unregister ourselves with LIPP.
+ */
+ ret = hv_dev_pwrite(priv->hv_devhdl, 0, (HV_VirtAddr)&dummy,
+ sizeof(dummy), NETIO_IPP_INPUT_UNREGISTER_OFF);
+ PDEBUG("hv_dev_pwrite(NETIO_IPP_INPUT_UNREGISTER_OFF) returned %d\n",
+ ret);
+ if (ret < 0) {
+ /* FIXME: Just panic? */
+ pr_err("hv_dev_pwrite NETIO_IPP_INPUT_UNREGISTER_OFF"
+ " failure %d\n", ret);
+ }
+
+ /*
+ * Discard all packets still in our NetIO queue. Hopefully,
+ * once the unregister call is complete, there will be no
+ * packets still in flight on the IDN.
+ */
+ tile_net_discard_packets(dev);
+
+ /* Reset state. */
+ info->num_needed_small_buffers = 0;
+ info->num_needed_large_buffers = 0;
+
+ /* Cancel egress timer. */
+ del_timer(&info->egress_timer);
+ info->egress_timer_scheduled = false;
+
+ netif_napi_del(&info->napi);
+
+ /* Now we are unregistered. */
+ info->registered = false;
+}
+
+
+/*
+ * Helper function for "tile_net_stop()".
+ *
+ * Also used to handle registration failure in "tile_net_open_inner()",
+ * when "fully_opened" is known to be false, and the various extra
+ * steps in "tile_net_stop()" are not necessary. ISSUE: It might be
+ * simpler if we could just call "tile_net_stop()" anyway.
+ */
+static void tile_net_stop_aux(struct net_device *dev)
+{
+ struct tile_net_priv *priv = netdev_priv(dev);
+
+ int dummy = 0;
+
+ /* Unregister all tiles, so LIPP will stop delivering packets. */
+ on_each_cpu(tile_net_unregister, (void *)dev, 1);
+
+ /* Stop LIPP/LEPP. */
+ if (hv_dev_pwrite(priv->hv_devhdl, 0, (HV_VirtAddr)&dummy,
+ sizeof(dummy), NETIO_IPP_STOP_SHIM_OFF) < 0)
+ panic("Failed to stop LIPP/LEPP!\n");
+
+ priv->partly_opened = 0;
+}
+
+
+/*
+ * Disable ingress interrupts for the given device on the current cpu.
+ */
+static void tile_net_disable_intr(void *dev_ptr)
+{
+ struct net_device *dev = (struct net_device *)dev_ptr;
+ struct tile_net_priv *priv = netdev_priv(dev);
+ int my_cpu = smp_processor_id();
+ struct tile_net_cpu *info = priv->cpu[my_cpu];
+
+ /* Disable hypervisor interrupt. */
+ disable_percpu_irq(priv->intr_id);
+
+ /* Disable NAPI if needed. */
+ if (info != NULL && info->napi_enabled) {
+ napi_disable(&info->napi);
+ info->napi_enabled = false;
+ }
+}
+
+
+/*
+ * Enable ingress interrupts for the given device on the current cpu.
+ */
+static void tile_net_enable_intr(void *dev_ptr)
+{
+ struct net_device *dev = (struct net_device *)dev_ptr;
+ struct tile_net_priv *priv = netdev_priv(dev);
+ int my_cpu = smp_processor_id();
+ struct tile_net_cpu *info = priv->cpu[my_cpu];
+
+ /* Enable hypervisor interrupt. */
+ enable_percpu_irq(priv->intr_id);
+
+ /* Enable NAPI. */
+ napi_enable(&info->napi);
+ info->napi_enabled = true;
+}
+
+
+/*
+ * tile_net_open_inner does most of the work of bringing up the interface.
+ * It's called from tile_net_open(), and also from tile_net_retry_open().
+ * The return value is 0 if the interface was brought up, < 0 if
+ * tile_net_open() should return the return value as an error, and > 0 if
+ * tile_net_open() should return success and schedule a work item to
+ * periodically retry the bringup.
+ */
+static int tile_net_open_inner(struct net_device *dev)
+{
+ struct tile_net_priv *priv = netdev_priv(dev);
+ int my_cpu = smp_processor_id();
+ struct tile_net_cpu *info;
+ struct tile_netio_queue *queue;
+ unsigned int irq;
+ int i;
+
+ /*
+ * First try to register just on the local CPU, and handle any
+ * semi-expected "link down" failure specially. Note that we
+ * do NOT call "tile_net_stop_aux()", unlike below.
+ */
+ tile_net_register(dev);
+ info = priv->cpu[my_cpu];
+ if (!info->registered) {
+ if (info->link_down)
+ return 1;
+ return -EAGAIN;
+ }
+
+ /*
+ * Now register everywhere else. If any registration fails,
+ * even for "link down" (which might not be possible), we
+ * clean up using "tile_net_stop_aux()".
+ */
+ smp_call_function(tile_net_register, (void *)dev, 1);
+ for_each_online_cpu(i) {
+ if (!priv->cpu[i]->registered) {
+ tile_net_stop_aux(dev);
+ return -EAGAIN;
+ }
+ }
+
+ queue = &info->queue;
+
+ /*
+ * Set the device intr bit mask.
+ * The tile_net_register above sets per tile __intr_id.
+ */
+ priv->intr_id = queue->__system_part->__intr_id;
+ BUG_ON(!priv->intr_id);
+
+ /*
+ * Register the device interrupt handler.
+ * The __ffs() function returns the index into the interrupt handler
+ * table from the interrupt bit mask which should have one bit
+ * and one bit only set.
+ */
+ irq = __ffs(priv->intr_id);
+ tile_irq_activate(irq, TILE_IRQ_PERCPU);
+ BUG_ON(request_irq(irq, tile_net_handle_ingress_interrupt,
+ 0, dev->name, (void *)dev) != 0);
+
+ /* ISSUE: How could "priv->fully_opened" ever be "true" here? */
+
+ if (!priv->fully_opened) {
+
+ int dummy = 0;
+
+ /* Allocate initial buffers. */
+
+ int max_buffers =
+ priv->network_cpus_count * priv->network_cpus_credits;
+
+ info->num_needed_small_buffers =
+ min(LIPP_SMALL_BUFFERS, max_buffers);
+
+ info->num_needed_large_buffers =
+ min(LIPP_LARGE_BUFFERS, max_buffers);
+
+ tile_net_provide_needed_buffers(info);
+
+ if (info->num_needed_small_buffers != 0 ||
+ info->num_needed_large_buffers != 0)
+ panic("Insufficient memory for buffer stack!");
+
+ /* Start LIPP/LEPP and activate "ingress" at the shim. */
+ if (hv_dev_pwrite(priv->hv_devhdl, 0, (HV_VirtAddr)&dummy,
+ sizeof(dummy), NETIO_IPP_INPUT_INIT_OFF) < 0)
+ panic("Failed to activate the LIPP Shim!\n");
+
+ priv->fully_opened = 1;
+ }
+
+ /* On each tile, enable the hypervisor to trigger interrupts. */
+ /* ISSUE: Do this before starting LIPP/LEPP? */
+ on_each_cpu(tile_net_enable_intr, (void *)dev, 1);
+
+ /* Start our transmit queue. */
+ netif_start_queue(dev);
+
+ return 0;
+}
+
+
+/*
+ * Called periodically to retry bringing up the NetIO interface,
+ * if it doesn't come up cleanly during tile_net_open().
+ */
+static void tile_net_open_retry(struct work_struct *w)
+{
+ struct delayed_work *dw =
+ container_of(w, struct delayed_work, work);
+
+ struct tile_net_priv *priv =
+ container_of(dw, struct tile_net_priv, retry_work);
+
+ /*
+ * Try to bring the NetIO interface up. If it fails, reschedule
+ * ourselves to try again later; otherwise, tell Linux we now have
+ * a working link. ISSUE: What if the return value is negative?
+ */
+ if (tile_net_open_inner(priv->dev))
+ schedule_delayed_work_on(singlethread_cpu, &priv->retry_work,
+ TILE_NET_RETRY_INTERVAL);
+ else
+ netif_carrier_on(priv->dev);
+}
+
+
+/*
+ * Called when a network interface is made active.
+ *
+ * Returns 0 on success, negative value on failure.
+ *
+ * The open entry point is called when a network interface is made
+ * active by the system (IFF_UP). At this point all resources needed
+ * for transmit and receive operations are allocated, the interrupt
+ * handler is registered with the OS, the watchdog timer is started,
+ * and the stack is notified that the interface is ready.
+ *
+ * If the actual link is not available yet, then we tell Linux that
+ * we have no carrier, and we keep checking until the link comes up.
+ */
+static int tile_net_open(struct net_device *dev)
+{
+ int ret = 0;
+ struct tile_net_priv *priv = netdev_priv(dev);
+
+ /*
+ * We rely on priv->partly_opened to tell us if this is the
+ * first time this interface is being brought up. If it is
+ * set, the IPP was already initialized and should not be
+ * initialized again.
+ */
+ if (!priv->partly_opened) {
+
+ int count;
+ int credits;
+
+ /* Initialize LIPP/LEPP, and start the Shim. */
+ ret = tile_net_open_aux(dev);
+ if (ret < 0) {
+ pr_err("tile_net_open_aux failed: %d\n", ret);
+ return ret;
+ }
+
+ /* Analyze the network cpus. */
+
+ if (network_cpus_used)
+ cpumask_copy(&priv->network_cpus_map,
+ &network_cpus_map);
+ else
+ cpumask_copy(&priv->network_cpus_map, cpu_online_mask);
+
+
+ count = cpumask_weight(&priv->network_cpus_map);
+
+ /* Limit credits to available buffers, and apply min. */
+ credits = max(16, (LIPP_LARGE_BUFFERS / count) & ~1);
+
+ /* Apply "GBE" max limit. */
+ /* ISSUE: Use higher limit for XGBE? */
+ credits = min(NETIO_MAX_RECEIVE_PKTS, credits);
+
+ priv->network_cpus_count = count;
+ priv->network_cpus_credits = credits;
+
+#ifdef TILE_NET_DEBUG
+ pr_info("Using %d network cpus, with %d credits each\n",
+ priv->network_cpus_count, priv->network_cpus_credits);
+#endif
+
+ priv->partly_opened = 1;
+ }
+
+ /*
+ * Attempt to bring up the link.
+ */
+ ret = tile_net_open_inner(dev);
+ if (ret <= 0) {
+ if (ret == 0)
+ netif_carrier_on(dev);
+ return ret;
+ }
+
+ /*
+ * We were unable to bring up the NetIO interface, but we want to
+ * try again in a little bit. Tell Linux that we have no carrier
+ * so it doesn't try to use the interface before the link comes up
+ * and then remember to try again later.
+ */
+ netif_carrier_off(dev);
+ schedule_delayed_work_on(singlethread_cpu, &priv->retry_work,
+ TILE_NET_RETRY_INTERVAL);
+
+ return 0;
+}
+
+
+/*
+ * Disables a network interface.
+ *
+ * Returns 0, this is not allowed to fail.
+ *
+ * The close entry point is called when an interface is de-activated
+ * by the OS. The hardware is still under the drivers control, but
+ * needs to be disabled. A global MAC reset is issued to stop the
+ * hardware, and all transmit and receive resources are freed.
+ *
+ * ISSUE: Can this can be called while "tile_net_poll()" is running?
+ */
+static int tile_net_stop(struct net_device *dev)
+{
+ struct tile_net_priv *priv = netdev_priv(dev);
+
+ bool pending = true;
+
+ PDEBUG("tile_net_stop()\n");
+
+ /* ISSUE: Only needed if not yet fully open. */
+ cancel_delayed_work_sync(&priv->retry_work);
+
+ /* Can't transmit any more. */
+ netif_stop_queue(dev);
+
+ /*
+ * Disable hypervisor interrupts on each tile.
+ */
+ on_each_cpu(tile_net_disable_intr, (void *)dev, 1);
+
+ /*
+ * Unregister the interrupt handler.
+ * The __ffs() function returns the index into the interrupt handler
+ * table from the interrupt bit mask which should have one bit
+ * and one bit only set.
+ */
+ if (priv->intr_id)
+ free_irq(__ffs(priv->intr_id), dev);
+
+ /*
+ * Drain all the LIPP buffers.
+ */
+
+ while (true) {
+ int buffer;
+
+ /* NOTE: This should never fail. */
+ if (hv_dev_pread(priv->hv_devhdl, 0, (HV_VirtAddr)&buffer,
+ sizeof(buffer), NETIO_IPP_DRAIN_OFF) < 0)
+ break;
+
+ /* Stop when done. */
+ if (buffer == 0)
+ break;
+
+ {
+ /* Convert "linux_buffer_t" to "va". */
+ void *va = __va((phys_addr_t)(buffer >> 1) << 7);
+
+ /* Acquire the associated "skb". */
+ struct sk_buff **skb_ptr = va - sizeof(*skb_ptr);
+ struct sk_buff *skb = *skb_ptr;
+
+ kfree_skb(skb);
+ }
+ }
+
+ /* Stop LIPP/LEPP. */
+ tile_net_stop_aux(dev);
+
+
+ priv->fully_opened = 0;
+
+
+ /*
+ * XXX: ISSUE: It appears that, in practice anyway, by the
+ * time we get here, there are no pending completions.
+ */
+ while (pending) {
+
+ struct sk_buff *olds[32];
+ unsigned int wanted = 32;
+ unsigned int i, nolds = 0;
+
+ nolds = tile_net_lepp_grab_comps(dev, olds,
+ wanted, &pending);
+
+ /* ISSUE: We have never actually seen this debug spew. */
+ if (nolds != 0)
+ pr_info("During tile_net_stop(), grabbed %d comps.\n",
+ nolds);
+
+ for (i = 0; i < nolds; i++)
+ kfree_skb(olds[i]);
+ }
+
+
+ /* Wipe the EPP queue. */
+ memset(priv->epp_queue, 0, sizeof(lepp_queue_t));
+
+ /* Evict the EPP queue. */
+ finv_buffer(priv->epp_queue, PAGE_SIZE);
+
+ return 0;
+}
+
+
+/*
+ * Prepare the "frags" info for the resulting LEPP command.
+ *
+ * If needed, flush the memory used by the frags.
+ */
+static unsigned int tile_net_tx_frags(lepp_frag_t *frags,
+ struct sk_buff *skb,
+ void *b_data, unsigned int b_len)
+{
+ unsigned int i, n = 0;
+
+ struct skb_shared_info *sh = skb_shinfo(skb);
+
+ phys_addr_t cpa;
+
+ if (b_len != 0) {
+
+ if (!hash_default)
+ finv_buffer_remote(b_data, b_len);
+
+ cpa = __pa(b_data);
+ frags[n].cpa_lo = cpa;
+ frags[n].cpa_hi = cpa >> 32;
+ frags[n].length = b_len;
+ frags[n].hash_for_home = hash_default;
+ n++;
+ }
+
+ for (i = 0; i < sh->nr_frags; i++) {
+
+ skb_frag_t *f = &sh->frags[i];
+ unsigned long pfn = page_to_pfn(f->page);
+
+ /* FIXME: Compute "hash_for_home" properly. */
+ /* ISSUE: The hypervisor checks CHIP_HAS_REV1_DMA_PACKETS(). */
+ int hash_for_home = hash_default;
+
+ /* FIXME: Hmmm. */
+ if (!hash_default) {
+ void *va = pfn_to_kaddr(pfn) + f->page_offset;
+ BUG_ON(PageHighMem(f->page));
+ finv_buffer_remote(va, f->size);
+ }
+
+ cpa = ((phys_addr_t)pfn << PAGE_SHIFT) + f->page_offset;
+ frags[n].cpa_lo = cpa;
+ frags[n].cpa_hi = cpa >> 32;
+ frags[n].length = f->size;
+ frags[n].hash_for_home = hash_for_home;
+ n++;
+ }
+
+ return n;
+}
+
+
+/*
+ * This function takes "skb", consisting of a header template and a
+ * payload, and hands it to LEPP, to emit as one or more segments,
+ * each consisting of a possibly modified header, plus a piece of the
+ * payload, via a process known as "tcp segmentation offload".
+ *
+ * Usually, "data" will contain the header template, of size "sh_len",
+ * and "sh->frags" will contain "skb->data_len" bytes of payload, and
+ * there will be "sh->gso_segs" segments.
+ *
+ * Sometimes, if "sendfile()" requires copying, we will be called with
+ * "data" containing the header and payload, with "frags" being empty.
+ *
+ * In theory, "sh->nr_frags" could be 3, but in practice, it seems
+ * that this will never actually happen.
+ *
+ * See "emulate_large_send_offload()" for some reference code, which
+ * does not handle checksumming.
+ *
+ * ISSUE: How do we make sure that high memory DMA does not migrate?
+ */
+static int tile_net_tx_tso(struct sk_buff *skb, struct net_device *dev)
+{
+ struct tile_net_priv *priv = netdev_priv(dev);
+ int my_cpu = smp_processor_id();
+ struct tile_net_cpu *info = priv->cpu[my_cpu];
+ struct tile_net_stats_t *stats = &info->stats;
+
+ struct skb_shared_info *sh = skb_shinfo(skb);
+
+ unsigned char *data = skb->data;
+
+ /* The ip header follows the ethernet header. */
+ struct iphdr *ih = ip_hdr(skb);
+ unsigned int ih_len = ih->ihl * 4;
+
+ /* Note that "nh == ih", by definition. */
+ unsigned char *nh = skb_network_header(skb);
+ unsigned int eh_len = nh - data;
+
+ /* The tcp header follows the ip header. */
+ struct tcphdr *th = (struct tcphdr *)(nh + ih_len);
+ unsigned int th_len = th->doff * 4;
+
+ /* The total number of header bytes. */
+ /* NOTE: This may be less than skb_headlen(skb). */
+ unsigned int sh_len = eh_len + ih_len + th_len;
+
+ /* The number of payload bytes at "skb->data + sh_len". */
+ /* This is non-zero for sendfile() without HIGHDMA. */
+ unsigned int b_len = skb_headlen(skb) - sh_len;
+
+ /* The total number of payload bytes. */
+ unsigned int d_len = b_len + skb->data_len;
+
+ /* The maximum payload size. */
+ unsigned int p_len = sh->gso_size;
+
+ /* The total number of segments. */
+ unsigned int num_segs = sh->gso_segs;
+
+ /* The temporary copy of the command. */
+ u32 cmd_body[(LEPP_MAX_CMD_SIZE + 3) / 4];
+ lepp_tso_cmd_t *cmd = (lepp_tso_cmd_t *)cmd_body;
+
+ /* Analyze the "frags". */
+ unsigned int num_frags =
+ tile_net_tx_frags(cmd->frags, skb, data + sh_len, b_len);
+
+ /* The size of the command, including frags and header. */
+ size_t cmd_size = LEPP_TSO_CMD_SIZE(num_frags, sh_len);
+
+ /* The command header. */
+ lepp_tso_cmd_t cmd_init = {
+ .tso = true,
+ .header_size = sh_len,
+ .ip_offset = eh_len,
+ .tcp_offset = eh_len + ih_len,
+ .payload_size = p_len,
+ .num_frags = num_frags,
+ };
+
+ unsigned long irqflags;
+
+ lepp_queue_t *eq = priv->epp_queue;
+
+ struct sk_buff *olds[4];
+ unsigned int wanted = 4;
+ unsigned int i, nolds = 0;
+
+ unsigned int cmd_head, cmd_tail, cmd_next;
+ unsigned int comp_tail;
+
+ unsigned int free_slots;
+
+
+ /* Paranoia. */
+ BUG_ON(skb->protocol != htons(ETH_P_IP));
+ BUG_ON(ih->protocol != IPPROTO_TCP);
+ BUG_ON(skb->ip_summed != CHECKSUM_PARTIAL);
+ BUG_ON(num_frags > LEPP_MAX_FRAGS);
+ /*--BUG_ON(num_segs != (d_len + (p_len - 1)) / p_len); */
+ BUG_ON(num_segs <= 1);
+
+
+ /* Finish preparing the command. */
+
+ /* Copy the command header. */
+ *cmd = cmd_init;
+
+ /* Copy the "header". */
+ memcpy(&cmd->frags[num_frags], data, sh_len);
+
+
+ /* Prefetch and wait, to minimize time spent holding the spinlock. */
+ prefetch_L1(&eq->comp_tail);
+ prefetch_L1(&eq->cmd_tail);
+ mb();
+
+
+ /* Enqueue the command. */
+
+ spin_lock_irqsave(&priv->cmd_lock, irqflags);
+
+ /*
+ * Handle completions if needed to make room.
+ * HACK: Spin until there is sufficient room.
+ */
+ free_slots = lepp_num_free_comp_slots(eq);
+ if (free_slots < 1) {
+spin:
+ nolds += tile_net_lepp_grab_comps(dev, olds + nolds,
+ wanted - nolds, NULL);
+ if (lepp_num_free_comp_slots(eq) < 1)
+ goto spin;
+ }
+
+ cmd_head = eq->cmd_head;
+ cmd_tail = eq->cmd_tail;
+
+ /* NOTE: The "gotos" below are untested. */
+
+ /* Prepare to advance, detecting full queue. */
+ cmd_next = cmd_tail + cmd_size;
+ if (cmd_tail < cmd_head && cmd_next >= cmd_head)
+ goto spin;
+ if (cmd_next > LEPP_CMD_LIMIT) {
+ cmd_next = 0;
+ if (cmd_next == cmd_head)
+ goto spin;
+ }
+
+ /* Copy the command. */
+ memcpy(&eq->cmds[cmd_tail], cmd, cmd_size);
+
+ /* Advance. */
+ cmd_tail = cmd_next;
+
+ /* Record "skb" for eventual freeing. */
+ comp_tail = eq->comp_tail;
+ eq->comps[comp_tail] = skb;
+ LEPP_QINC(comp_tail);
+ eq->comp_tail = comp_tail;
+
+ /* Flush before allowing LEPP to handle the command. */
+ __insn_mf();
+
+ eq->cmd_tail = cmd_tail;
+
+ spin_unlock_irqrestore(&priv->cmd_lock, irqflags);
+
+ if (nolds == 0)
+ nolds = tile_net_lepp_grab_comps(dev, olds, wanted, NULL);
+
+ /* Handle completions. */
+ for (i = 0; i < nolds; i++)
+ kfree_skb(olds[i]);
+
+ /* Update stats. */
+ stats->tx_packets += num_segs;
+ stats->tx_bytes += (num_segs * sh_len) + d_len;
+
+ /* Make sure the egress timer is scheduled. */
+ tile_net_schedule_egress_timer(info);
+
+ return NETDEV_TX_OK;
+}
+
+
+/*
+ * Transmit a packet (called by the kernel via "hard_start_xmit" hook).
+ */
+static int tile_net_tx(struct sk_buff *skb, struct net_device *dev)
+{
+ struct tile_net_priv *priv = netdev_priv(dev);
+ int my_cpu = smp_processor_id();
+ struct tile_net_cpu *info = priv->cpu[my_cpu];
+ struct tile_net_stats_t *stats = &info->stats;
+
+ unsigned long irqflags;
+
+ struct skb_shared_info *sh = skb_shinfo(skb);
+
+ unsigned int len = skb->len;
+ unsigned char *data = skb->data;
+
+ unsigned int csum_start = skb->csum_start - skb_headroom(skb);
+
+ lepp_frag_t frags[LEPP_MAX_FRAGS];
+
+ unsigned int num_frags;
+
+ lepp_queue_t *eq = priv->epp_queue;
+
+ struct sk_buff *olds[4];
+ unsigned int wanted = 4;
+ unsigned int i, nolds = 0;
+
+ unsigned int cmd_size = sizeof(lepp_cmd_t);
+
+ unsigned int cmd_head, cmd_tail, cmd_next;
+ unsigned int comp_tail;
+
+ lepp_cmd_t cmds[LEPP_MAX_FRAGS];
+
+ unsigned int free_slots;
+
+
+ /*
+ * This is paranoia, since we think that if the link doesn't come
+ * up, telling Linux we have no carrier will keep it from trying
+ * to transmit. If it does, though, we can't execute this routine,
+ * since data structures we depend on aren't set up yet.
+ */
+ if (!info->registered)
+ return NETDEV_TX_BUSY;
+
+
+ /* Save the timestamp. */
+ dev->trans_start = jiffies;
+
+
+#ifdef TILE_NET_PARANOIA
+#if CHIP_HAS_CBOX_HOME_MAP()
+ if (hash_default) {
+ HV_PTE pte = *virt_to_pte(current->mm, (unsigned long)data);
+ if (hv_pte_get_mode(pte) != HV_PTE_MODE_CACHE_HASH_L3)
+ panic("Non-coherent egress buffer!");
+ }
+#endif
+#endif
+
+
+#ifdef TILE_NET_DUMP_PACKETS
+ /* ISSUE: Does not dump the "frags". */
+ dump_packet(data, skb_headlen(skb), "tx");
+#endif /* TILE_NET_DUMP_PACKETS */
+
+
+ if (sh->gso_size != 0)
+ return tile_net_tx_tso(skb, dev);
+
+
+ /* Prepare the commands. */
+
+ num_frags = tile_net_tx_frags(frags, skb, data, skb_headlen(skb));
+
+ for (i = 0; i < num_frags; i++) {
+
+ bool final = (i == num_frags - 1);
+
+ lepp_cmd_t cmd = {
+ .cpa_lo = frags[i].cpa_lo,
+ .cpa_hi = frags[i].cpa_hi,
+ .length = frags[i].length,
+ .hash_for_home = frags[i].hash_for_home,
+ .send_completion = final,
+ .end_of_packet = final
+ };
+
+ if (i == 0 && skb->ip_summed == CHECKSUM_PARTIAL) {
+ cmd.compute_checksum = 1;
+ cmd.checksum_data.bits.start_byte = csum_start;
+ cmd.checksum_data.bits.count = len - csum_start;
+ cmd.checksum_data.bits.destination_byte =
+ csum_start + skb->csum_offset;
+ }
+
+ cmds[i] = cmd;
+ }
+
+
+ /* Prefetch and wait, to minimize time spent holding the spinlock. */
+ prefetch_L1(&eq->comp_tail);
+ prefetch_L1(&eq->cmd_tail);
+ mb();
+
+
+ /* Enqueue the commands. */
+
+ spin_lock_irqsave(&priv->cmd_lock, irqflags);
+
+ /*
+ * Handle completions if needed to make room.
+ * HACK: Spin until there is sufficient room.
+ */
+ free_slots = lepp_num_free_comp_slots(eq);
+ if (free_slots < 1) {
+spin:
+ nolds += tile_net_lepp_grab_comps(dev, olds + nolds,
+ wanted - nolds, NULL);
+ if (lepp_num_free_comp_slots(eq) < 1)
+ goto spin;
+ }
+
+ cmd_head = eq->cmd_head;
+ cmd_tail = eq->cmd_tail;
+
+ /* NOTE: The "gotos" below are untested. */
+
+ /* Copy the commands, or fail. */
+ for (i = 0; i < num_frags; i++) {
+
+ /* Prepare to advance, detecting full queue. */
+ cmd_next = cmd_tail + cmd_size;
+ if (cmd_tail < cmd_head && cmd_next >= cmd_head)
+ goto spin;
+ if (cmd_next > LEPP_CMD_LIMIT) {
+ cmd_next = 0;
+ if (cmd_next == cmd_head)
+ goto spin;
+ }
+
+ /* Copy the command. */
+ *(lepp_cmd_t *)&eq->cmds[cmd_tail] = cmds[i];
+
+ /* Advance. */
+ cmd_tail = cmd_next;
+ }
+
+ /* Record "skb" for eventual freeing. */
+ comp_tail = eq->comp_tail;
+ eq->comps[comp_tail] = skb;
+ LEPP_QINC(comp_tail);
+ eq->comp_tail = comp_tail;
+
+ /* Flush before allowing LEPP to handle the command. */
+ __insn_mf();
+
+ eq->cmd_tail = cmd_tail;
+
+ spin_unlock_irqrestore(&priv->cmd_lock, irqflags);
+
+ if (nolds == 0)
+ nolds = tile_net_lepp_grab_comps(dev, olds, wanted, NULL);
+
+ /* Handle completions. */
+ for (i = 0; i < nolds; i++)
+ kfree_skb(olds[i]);
+
+ /* HACK: Track "expanded" size for short packets (e.g. 42 < 60). */
+ stats->tx_packets++;
+ stats->tx_bytes += ((len >= ETH_ZLEN) ? len : ETH_ZLEN);
+
+ /* Make sure the egress timer is scheduled. */
+ tile_net_schedule_egress_timer(info);
+
+ return NETDEV_TX_OK;
+}
+
+
+/*
+ * Deal with a transmit timeout.
+ */
+static void tile_net_tx_timeout(struct net_device *dev)
+{
+ PDEBUG("tile_net_tx_timeout()\n");
+ PDEBUG("Transmit timeout at %ld, latency %ld\n", jiffies,
+ jiffies - dev->trans_start);
+
+ /* XXX: ISSUE: This doesn't seem useful for us. */
+ netif_wake_queue(dev);
+}
+
+
+/*
+ * Ioctl commands.
+ */
+static int tile_net_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
+{
+ return -EOPNOTSUPP;
+}
+
+
+/*
+ * Get System Network Statistics.
+ *
+ * Returns the address of the device statistics structure.
+ */
+static struct net_device_stats *tile_net_get_stats(struct net_device *dev)
+{
+ struct tile_net_priv *priv = netdev_priv(dev);
+ u32 rx_packets = 0;
+ u32 tx_packets = 0;
+ u32 rx_bytes = 0;
+ u32 tx_bytes = 0;
+ int i;
+
+ for_each_online_cpu(i) {
+ if (priv->cpu[i]) {
+ rx_packets += priv->cpu[i]->stats.rx_packets;
+ rx_bytes += priv->cpu[i]->stats.rx_bytes;
+ tx_packets += priv->cpu[i]->stats.tx_packets;
+ tx_bytes += priv->cpu[i]->stats.tx_bytes;
+ }
+ }
+
+ priv->stats.rx_packets = rx_packets;
+ priv->stats.rx_bytes = rx_bytes;
+ priv->stats.tx_packets = tx_packets;
+ priv->stats.tx_bytes = tx_bytes;
+
+ return &priv->stats;
+}
+
+
+/*
+ * Change the "mtu".
+ *
+ * The "change_mtu" method is usually not needed.
+ * If you need it, it must be like this.
+ */
+static int tile_net_change_mtu(struct net_device *dev, int new_mtu)
+{
+ PDEBUG("tile_net_change_mtu()\n");
+
+ /* Check ranges. */
+ if ((new_mtu < 68) || (new_mtu > 1500))
+ return -EINVAL;
+
+ /* Accept the value. */
+ dev->mtu = new_mtu;
+
+ return 0;
+}
+
+
+/*
+ * Change the Ethernet Address of the NIC.
+ *
+ * The hypervisor driver does not support changing MAC address. However,
+ * the IPP does not do anything with the MAC address, so the address which
+ * gets used on outgoing packets, and which is accepted on incoming packets,
+ * is completely up to the NetIO program or kernel driver which is actually
+ * handling them.
+ *
+ * Returns 0 on success, negative on failure.
+ */
+static int tile_net_set_mac_address(struct net_device *dev, void *p)
+{
+ struct sockaddr *addr = p;
+
+ if (!is_valid_ether_addr(addr->sa_data))
+ return -EINVAL;
+
+ /* ISSUE: Note that "dev_addr" is now a pointer. */
+ memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
+
+ return 0;
+}
+
+
+/*
+ * Obtain the MAC address from the hypervisor.
+ * This must be done before opening the device.
+ */
+static int tile_net_get_mac(struct net_device *dev)
+{
+ struct tile_net_priv *priv = netdev_priv(dev);
+
+ char hv_dev_name[32];
+ int len;
+
+ __netio_getset_offset_t offset = { .word = NETIO_IPP_PARAM_OFF };
+
+ int ret;
+
+ /* For example, "xgbe0". */
+ strcpy(hv_dev_name, dev->name);
+ len = strlen(hv_dev_name);
+
+ /* For example, "xgbe/0". */
+ hv_dev_name[len] = hv_dev_name[len - 1];
+ hv_dev_name[len - 1] = '/';
+ len++;
+
+ /* For example, "xgbe/0/native_hash". */
+ strcpy(hv_dev_name + len, hash_default ? "/native_hash" : "/native");
+
+ /* Get the hypervisor handle for this device. */
+ priv->hv_devhdl = hv_dev_open((HV_VirtAddr)hv_dev_name, 0);
+ PDEBUG("hv_dev_open(%s) returned %d %p\n",
+ hv_dev_name, priv->hv_devhdl, &priv->hv_devhdl);
+ if (priv->hv_devhdl < 0) {
+ if (priv->hv_devhdl == HV_ENODEV)
+ printk(KERN_DEBUG "Ignoring unconfigured device %s\n",
+ hv_dev_name);
+ else
+ printk(KERN_DEBUG "hv_dev_open(%s) returned %d\n",
+ hv_dev_name, priv->hv_devhdl);
+ return -1;
+ }
+
+ /*
+ * Read the hardware address from the hypervisor.
+ * ISSUE: Note that "dev_addr" is now a pointer.
+ */
+ offset.bits.class = NETIO_PARAM;
+ offset.bits.addr = NETIO_PARAM_MAC;
+ ret = hv_dev_pread(priv->hv_devhdl, 0,
+ (HV_VirtAddr)dev->dev_addr, dev->addr_len,
+ offset.word);
+ PDEBUG("hv_dev_pread(NETIO_PARAM_MAC) returned %d\n", ret);
+ if (ret <= 0) {
+ printk(KERN_DEBUG "hv_dev_pread(NETIO_PARAM_MAC) %s failed\n",
+ dev->name);
+ /*
+ * Since the device is configured by the hypervisor but we
+ * can't get its MAC address, we are most likely running
+ * the simulator, so let's generate a random MAC address.
+ */
+ random_ether_addr(dev->dev_addr);
+ }
+
+ return 0;
+}
+
+
+static struct net_device_ops tile_net_ops = {
+ .ndo_open = tile_net_open,
+ .ndo_stop = tile_net_stop,
+ .ndo_start_xmit = tile_net_tx,
+ .ndo_do_ioctl = tile_net_ioctl,
+ .ndo_get_stats = tile_net_get_stats,
+ .ndo_change_mtu = tile_net_change_mtu,
+ .ndo_tx_timeout = tile_net_tx_timeout,
+ .ndo_set_mac_address = tile_net_set_mac_address
+};
+
+
+/*
+ * The setup function.
+ *
+ * This uses ether_setup() to assign various fields in dev, including
+ * setting IFF_BROADCAST and IFF_MULTICAST, then sets some extra fields.
+ */
+static void tile_net_setup(struct net_device *dev)
+{
+ PDEBUG("tile_net_setup()\n");
+
+ ether_setup(dev);
+
+ dev->netdev_ops = &tile_net_ops;
+
+ dev->watchdog_timeo = TILE_NET_TIMEOUT;
+
+ /* We want lockless xmit. */
+ dev->features |= NETIF_F_LLTX;
+
+ /* We support hardware tx checksums. */
+ dev->features |= NETIF_F_HW_CSUM;
+
+ /* We support scatter/gather. */
+ dev->features |= NETIF_F_SG;
+
+ /* We support TSO. */
+ dev->features |= NETIF_F_TSO;
+
+#ifdef TILE_NET_GSO
+ /* We support GSO. */
+ dev->features |= NETIF_F_GSO;
+#endif
+
+ if (hash_default)
+ dev->features |= NETIF_F_HIGHDMA;
+
+ /* ISSUE: We should support NETIF_F_UFO. */
+
+ dev->tx_queue_len = TILE_NET_TX_QUEUE_LEN;
+
+ dev->mtu = TILE_NET_MTU;
+}
+
+
+/*
+ * Allocate the device structure, register the device, and obtain the
+ * MAC address from the hypervisor.
+ */
+static struct net_device *tile_net_dev_init(const char *name)
+{
+ int ret;
+ struct net_device *dev;
+ struct tile_net_priv *priv;
+ struct page *page;
+
+ /*
+ * Allocate the device structure. This allocates "priv", calls
+ * tile_net_setup(), and saves "name". Normally, "name" is a
+ * template, instantiated by register_netdev(), but not for us.
+ */
+ dev = alloc_netdev(sizeof(*priv), name, tile_net_setup);
+ if (!dev) {
+ pr_err("alloc_netdev(%s) failed\n", name);
+ return NULL;
+ }
+
+ priv = netdev_priv(dev);
+
+ /* Initialize "priv". */
+
+ memset(priv, 0, sizeof(*priv));
+
+ /* Save "dev" for "tile_net_open_retry()". */
+ priv->dev = dev;
+
+ INIT_DELAYED_WORK(&priv->retry_work, tile_net_open_retry);
+
+ spin_lock_init(&priv->cmd_lock);
+ spin_lock_init(&priv->comp_lock);
+
+ /* Allocate "epp_queue". */
+ BUG_ON(get_order(sizeof(lepp_queue_t)) != 0);
+ page = alloc_pages(GFP_KERNEL | __GFP_ZERO, 0);
+ if (!page) {
+ free_netdev(dev);
+ return NULL;
+ }
+ priv->epp_queue = page_address(page);
+
+ /* Register the network device. */
+ ret = register_netdev(dev);
+ if (ret) {
+ pr_err("register_netdev %s failed %d\n", dev->name, ret);
+ free_page((unsigned long)priv->epp_queue);
+ free_netdev(dev);
+ return NULL;
+ }
+
+ /* Get the MAC address. */
+ ret = tile_net_get_mac(dev);
+ if (ret < 0) {
+ unregister_netdev(dev);
+ free_page((unsigned long)priv->epp_queue);
+ free_netdev(dev);
+ return NULL;
+ }
+
+ return dev;
+}
+
+
+/*
+ * Module cleanup.
+ */
+static void tile_net_cleanup(void)
+{
+ int i;
+
+ for (i = 0; i < TILE_NET_DEVS; i++) {
+ if (tile_net_devs[i]) {
+ struct net_device *dev = tile_net_devs[i];
+ struct tile_net_priv *priv = netdev_priv(dev);
+ unregister_netdev(dev);
+ finv_buffer(priv->epp_queue, PAGE_SIZE);
+ free_page((unsigned long)priv->epp_queue);
+ free_netdev(dev);
+ }
+ }
+}
+
+
+/*
+ * Module initialization.
+ */
+static int tile_net_init_module(void)
+{
+ pr_info("Tilera IPP Net Driver\n");
+
+ tile_net_devs[0] = tile_net_dev_init("xgbe0");
+ tile_net_devs[1] = tile_net_dev_init("xgbe1");
+ tile_net_devs[2] = tile_net_dev_init("gbe0");
+ tile_net_devs[3] = tile_net_dev_init("gbe1");
+
+ return 0;
+}
+
+
+#ifndef MODULE
+/*
+ * The "network_cpus" boot argument specifies the cpus that are dedicated
+ * to handle ingress packets.
+ *
+ * The parameter should be in the form "network_cpus=m-n[,x-y]", where
+ * m, n, x, y are integer numbers that represent the cpus that can be
+ * neither a dedicated cpu nor a dataplane cpu.
+ */
+static int __init network_cpus_setup(char *str)
+{
+ int rc = cpulist_parse_crop(str, &network_cpus_map);
+ if (rc != 0) {
+ pr_warning("network_cpus=%s: malformed cpu list\n",
+ str);
+ } else {
+
+ /* Remove dedicated cpus. */
+ cpumask_and(&network_cpus_map, &network_cpus_map,
+ cpu_possible_mask);
+
+
+ if (cpumask_empty(&network_cpus_map)) {
+ pr_warning("Ignoring network_cpus='%s'.\n",
+ str);
+ } else {
+ char buf[1024];
+ cpulist_scnprintf(buf, sizeof(buf), &network_cpus_map);
+ pr_info("Linux network CPUs: %s\n", buf);
+ network_cpus_used = true;
+ }
+ }
+
+ return 0;
+}
+__setup("network_cpus=", network_cpus_setup);
+#endif
+
+
+module_init(tile_net_init_module);
+module_exit(tile_net_cleanup);
diff --git a/drivers/net/tokenring/tms380tr.c b/drivers/net/tokenring/tms380tr.c
index 663b8860a531..793020347e54 100644
--- a/drivers/net/tokenring/tms380tr.c
+++ b/drivers/net/tokenring/tms380tr.c
@@ -1220,7 +1220,7 @@ void tms380tr_wait(unsigned long time)
tmp = schedule_timeout_interruptible(tmp);
} while(time_after(tmp, jiffies));
#else
- udelay(time);
+ mdelay(time / 1000);
#endif
}
diff --git a/drivers/net/tulip/de2104x.c b/drivers/net/tulip/de2104x.c
index 28e1ffb13db9..c78a50586c1d 100644
--- a/drivers/net/tulip/de2104x.c
+++ b/drivers/net/tulip/de2104x.c
@@ -2021,7 +2021,6 @@ static int __devinit de_init_one (struct pci_dev *pdev,
de->media_timer.data = (unsigned long) de;
netif_carrier_off(dev);
- netif_stop_queue(dev);
/* wake up device, assign resources */
rc = pci_enable_device(pdev);
diff --git a/drivers/net/typhoon.c b/drivers/net/typhoon.c
index 1cc67138adbf..5b83c3f35f47 100644
--- a/drivers/net/typhoon.c
+++ b/drivers/net/typhoon.c
@@ -24,10 +24,6 @@
3XP Processor. It has been tested on x86 and sparc64.
KNOWN ISSUES:
- *) The current firmware always strips the VLAN tag off, even if
- we tell it not to. You should filter VLANs at the switch
- as a workaround (good practice in any event) until we can
- get this fixed.
*) Cannot DMA Rx packets to a 2 byte aligned address. Also firmware
issue. Hopefully 3Com will fix it.
*) Waiting for a command response takes 8ms due to non-preemptable
@@ -280,8 +276,6 @@ struct typhoon {
struct pci_dev * pdev;
struct net_device * dev;
struct napi_struct napi;
- spinlock_t state_lock;
- struct vlan_group * vlgrp;
struct basic_ring rxHiRing;
struct basic_ring rxBuffRing;
struct rxbuff_ent rxbuffers[RXENT_ENTRIES];
@@ -695,44 +689,6 @@ out:
return err;
}
-static void
-typhoon_vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
-{
- struct typhoon *tp = netdev_priv(dev);
- struct cmd_desc xp_cmd;
- int err;
-
- spin_lock_bh(&tp->state_lock);
- if(!tp->vlgrp != !grp) {
- /* We've either been turned on for the first time, or we've
- * been turned off. Update the 3XP.
- */
- if(grp)
- tp->offload |= TYPHOON_OFFLOAD_VLAN;
- else
- tp->offload &= ~TYPHOON_OFFLOAD_VLAN;
-
- /* If the interface is up, the runtime is running -- and we
- * must be up for the vlan core to call us.
- *
- * Do the command outside of the spin lock, as it is slow.
- */
- INIT_COMMAND_WITH_RESPONSE(&xp_cmd,
- TYPHOON_CMD_SET_OFFLOAD_TASKS);
- xp_cmd.parm2 = tp->offload;
- xp_cmd.parm3 = tp->offload;
- spin_unlock_bh(&tp->state_lock);
- err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
- if(err < 0)
- netdev_err(tp->dev, "vlan offload error %d\n", -err);
- spin_lock_bh(&tp->state_lock);
- }
-
- /* now make the change visible */
- tp->vlgrp = grp;
- spin_unlock_bh(&tp->state_lock);
-}
-
static inline void
typhoon_tso_fill(struct sk_buff *skb, struct transmit_ring *txRing,
u32 ring_dma)
@@ -818,7 +774,7 @@ typhoon_start_tx(struct sk_buff *skb, struct net_device *dev)
first_txd->processFlags |=
TYPHOON_TX_PF_INSERT_VLAN | TYPHOON_TX_PF_VLAN_PRIORITY;
first_txd->processFlags |=
- cpu_to_le32(ntohs(vlan_tx_tag_get(skb)) <<
+ cpu_to_le32(htons(vlan_tx_tag_get(skb)) <<
TYPHOON_TX_PF_VLAN_TAG_SHIFT);
}
@@ -936,7 +892,7 @@ typhoon_set_rx_mode(struct net_device *dev)
filter |= TYPHOON_RX_FILTER_MCAST_HASH;
}
- INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_SET_RX_FILTER);
+ INIT_COMMAND_WITH_RESPONSE(&xp_cmd, TYPHOON_CMD_SET_RX_FILTER);
xp_cmd.parm1 = filter;
typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
}
@@ -1198,6 +1154,20 @@ typhoon_get_rx_csum(struct net_device *dev)
return 1;
}
+static int
+typhoon_set_flags(struct net_device *dev, u32 data)
+{
+ /* There's no way to turn off the RX VLAN offloading and stripping
+ * on the current 3XP firmware -- it does not respect the offload
+ * settings -- so we only allow the user to toggle the TX processing.
+ */
+ if (!(data & ETH_FLAG_RXVLAN))
+ return -EINVAL;
+
+ return ethtool_op_set_flags(dev, data,
+ ETH_FLAG_RXVLAN | ETH_FLAG_TXVLAN);
+}
+
static void
typhoon_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
{
@@ -1224,6 +1194,8 @@ static const struct ethtool_ops typhoon_ethtool_ops = {
.set_sg = ethtool_op_set_sg,
.set_tso = ethtool_op_set_tso,
.get_ringparam = typhoon_get_ringparam,
+ .set_flags = typhoon_set_flags,
+ .get_flags = ethtool_op_get_flags,
};
static int
@@ -1309,9 +1281,9 @@ typhoon_init_interface(struct typhoon *tp)
tp->offload = TYPHOON_OFFLOAD_IP_CHKSUM | TYPHOON_OFFLOAD_TCP_CHKSUM;
tp->offload |= TYPHOON_OFFLOAD_UDP_CHKSUM | TSO_OFFLOAD_ON;
+ tp->offload |= TYPHOON_OFFLOAD_VLAN;
spin_lock_init(&tp->command_lock);
- spin_lock_init(&tp->state_lock);
/* Force the writes to the shared memory area out before continuing. */
wmb();
@@ -1328,7 +1300,7 @@ typhoon_init_rings(struct typhoon *tp)
tp->rxHiRing.lastWrite = 0;
tp->rxBuffRing.lastWrite = 0;
tp->cmdRing.lastWrite = 0;
- tp->cmdRing.lastWrite = 0;
+ tp->respRing.lastWrite = 0;
tp->txLoRing.lastRead = 0;
tp->txHiRing.lastRead = 0;
@@ -1762,13 +1734,10 @@ typhoon_rx(struct typhoon *tp, struct basic_ring *rxRing, volatile __le32 * read
} else
skb_checksum_none_assert(new_skb);
- spin_lock(&tp->state_lock);
- if(tp->vlgrp != NULL && rx->rxStatus & TYPHOON_RX_VLAN)
- vlan_hwaccel_receive_skb(new_skb, tp->vlgrp,
- ntohl(rx->vlanTag) & 0xffff);
- else
- netif_receive_skb(new_skb);
- spin_unlock(&tp->state_lock);
+ if (rx->rxStatus & TYPHOON_RX_VLAN)
+ __vlan_hwaccel_put_tag(new_skb,
+ ntohl(rx->vlanTag) & 0xffff);
+ netif_receive_skb(new_skb);
received++;
budget--;
@@ -1989,11 +1958,9 @@ typhoon_start_runtime(struct typhoon *tp)
goto error_out;
INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_SET_OFFLOAD_TASKS);
- spin_lock_bh(&tp->state_lock);
xp_cmd.parm2 = tp->offload;
xp_cmd.parm3 = tp->offload;
err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
- spin_unlock_bh(&tp->state_lock);
if(err < 0)
goto error_out;
@@ -2231,13 +2198,9 @@ typhoon_suspend(struct pci_dev *pdev, pm_message_t state)
if(!netif_running(dev))
return 0;
- spin_lock_bh(&tp->state_lock);
- if(tp->vlgrp && tp->wol_events & TYPHOON_WAKE_MAGIC_PKT) {
- spin_unlock_bh(&tp->state_lock);
- netdev_err(dev, "cannot do WAKE_MAGIC with VLANS\n");
- return -EBUSY;
- }
- spin_unlock_bh(&tp->state_lock);
+ /* TYPHOON_OFFLOAD_VLAN is always on now, so this doesn't work */
+ if(tp->wol_events & TYPHOON_WAKE_MAGIC_PKT)
+ netdev_warn(dev, "cannot do WAKE_MAGIC with VLAN offloading\n");
netif_device_detach(dev);
@@ -2338,7 +2301,6 @@ static const struct net_device_ops typhoon_netdev_ops = {
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = typhoon_set_mac_address,
.ndo_change_mtu = eth_change_mtu,
- .ndo_vlan_rx_register = typhoon_vlan_rx_register,
};
static int __devinit
diff --git a/drivers/net/ucc_geth.c b/drivers/net/ucc_geth.c
index a4c3f5708246..acbdab3d66ca 100644
--- a/drivers/net/ucc_geth.c
+++ b/drivers/net/ucc_geth.c
@@ -2050,12 +2050,16 @@ static void ucc_geth_stop(struct ucc_geth_private *ugeth)
ugeth_vdbg("%s: IN", __func__);
+ /*
+ * Tell the kernel the link is down.
+ * Must be done before disabling the controller
+ * or deadlock may happen.
+ */
+ phy_stop(phydev);
+
/* Disable the controller */
ugeth_disable(ugeth, COMM_DIR_RX_AND_TX);
- /* Tell the kernel the link is down */
- phy_stop(phydev);
-
/* Mask all interrupts */
out_be32(ugeth->uccf->p_uccm, 0x00000000);
@@ -2065,9 +2069,6 @@ static void ucc_geth_stop(struct ucc_geth_private *ugeth)
/* Disable Rx and Tx */
clrbits32(&ug_regs->maccfg1, MACCFG1_ENABLE_RX | MACCFG1_ENABLE_TX);
- phy_disconnect(ugeth->phydev);
- ugeth->phydev = NULL;
-
ucc_geth_memclean(ugeth);
}
@@ -3550,7 +3551,10 @@ static int ucc_geth_close(struct net_device *dev)
napi_disable(&ugeth->napi);
+ cancel_work_sync(&ugeth->timeout_work);
ucc_geth_stop(ugeth);
+ phy_disconnect(ugeth->phydev);
+ ugeth->phydev = NULL;
free_irq(ugeth->ug_info->uf_info.irq, ugeth->ndev);
@@ -3579,8 +3583,12 @@ static void ucc_geth_timeout_work(struct work_struct *work)
* Must reset MAC *and* PHY. This is done by reopening
* the device.
*/
- ucc_geth_close(dev);
- ucc_geth_open(dev);
+ netif_tx_stop_all_queues(dev);
+ ucc_geth_stop(ugeth);
+ ucc_geth_init_mac(ugeth);
+ /* Must start PHY here */
+ phy_start(ugeth->phydev);
+ netif_tx_start_all_queues(dev);
}
netif_tx_schedule_all(dev);
@@ -3594,7 +3602,6 @@ static void ucc_geth_timeout(struct net_device *dev)
{
struct ucc_geth_private *ugeth = netdev_priv(dev);
- netif_carrier_off(dev);
schedule_work(&ugeth->timeout_work);
}
diff --git a/drivers/net/ucc_geth.h b/drivers/net/ucc_geth.h
index 05a95586f3c5..055b87ab4f07 100644
--- a/drivers/net/ucc_geth.h
+++ b/drivers/net/ucc_geth.h
@@ -899,7 +899,8 @@ struct ucc_geth_hardware_statistics {
#define UCC_GETH_UTFS_INIT 512 /* Tx virtual FIFO size
*/
#define UCC_GETH_UTFET_INIT 256 /* 1/2 utfs */
-#define UCC_GETH_UTFTT_INIT 512
+#define UCC_GETH_UTFTT_INIT 256 /* 1/2 utfs
+ due to errata */
/* Gigabit Ethernet (1000 Mbps) */
#define UCC_GETH_URFS_GIGA_INIT 4096/*2048*/ /* Rx virtual
FIFO size */
diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c
index b154a94de03e..62e9e8dc8190 100644
--- a/drivers/net/usb/hso.c
+++ b/drivers/net/usb/hso.c
@@ -2994,12 +2994,14 @@ static int hso_probe(struct usb_interface *interface,
case HSO_INTF_BULK:
/* It's a regular bulk interface */
- if (((port_spec & HSO_PORT_MASK) == HSO_PORT_NETWORK) &&
- !disable_net)
- hso_dev = hso_create_net_device(interface, port_spec);
- else
+ if ((port_spec & HSO_PORT_MASK) == HSO_PORT_NETWORK) {
+ if (!disable_net)
+ hso_dev =
+ hso_create_net_device(interface, port_spec);
+ } else {
hso_dev =
hso_create_bulk_serial_device(interface, port_spec);
+ }
if (!hso_dev)
goto exit;
break;
diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c
index ca7fc9df1ccf..c04d49e31f81 100644
--- a/drivers/net/usb/usbnet.c
+++ b/drivers/net/usb/usbnet.c
@@ -45,6 +45,7 @@
#include <linux/usb/usbnet.h>
#include <linux/slab.h>
#include <linux/kernel.h>
+#include <linux/pm_runtime.h>
#define DRIVER_VERSION "22-Aug-2005"
@@ -1273,6 +1274,16 @@ usbnet_probe (struct usb_interface *udev, const struct usb_device_id *prod)
struct usb_device *xdev;
int status;
const char *name;
+ struct usb_driver *driver = to_usb_driver(udev->dev.driver);
+
+ /* usbnet already took usb runtime pm, so have to enable the feature
+ * for usb interface, otherwise usb_autopm_get_interface may return
+ * failure if USB_SUSPEND(RUNTIME_PM) is enabled.
+ */
+ if (!driver->supports_autosuspend) {
+ driver->supports_autosuspend = 1;
+ pm_runtime_enable(&udev->dev);
+ }
name = udev->dev.driver->name;
info = (struct driver_info *) prod->driver_info;
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index bb6b67f6b0cc..b6d402806ae6 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -986,9 +986,15 @@ static int virtnet_probe(struct virtio_device *vdev)
goto unregister;
}
- vi->status = VIRTIO_NET_S_LINK_UP;
- virtnet_update_status(vi);
- netif_carrier_on(dev);
+ /* Assume link up if device can't report link status,
+ otherwise get link status from config. */
+ if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_STATUS)) {
+ netif_carrier_off(dev);
+ virtnet_update_status(vi);
+ } else {
+ vi->status = VIRTIO_NET_S_LINK_UP;
+ netif_carrier_on(dev);
+ }
pr_debug("virtnet: registered device %s\n", dev->name);
return 0;
diff --git a/drivers/net/vmxnet3/upt1_defs.h b/drivers/net/vmxnet3/upt1_defs.h
index 37108fb226d3..969c751ee404 100644
--- a/drivers/net/vmxnet3/upt1_defs.h
+++ b/drivers/net/vmxnet3/upt1_defs.h
@@ -88,9 +88,9 @@ struct UPT1_RSSConf {
/* features */
enum {
- UPT1_F_RXCSUM = 0x0001, /* rx csum verification */
- UPT1_F_RSS = 0x0002,
- UPT1_F_RXVLAN = 0x0004, /* VLAN tag stripping */
- UPT1_F_LRO = 0x0008,
+ UPT1_F_RXCSUM = cpu_to_le64(0x0001), /* rx csum verification */
+ UPT1_F_RSS = cpu_to_le64(0x0002),
+ UPT1_F_RXVLAN = cpu_to_le64(0x0004), /* VLAN tag stripping */
+ UPT1_F_LRO = cpu_to_le64(0x0008),
};
#endif
diff --git a/drivers/net/vmxnet3/vmxnet3_defs.h b/drivers/net/vmxnet3/vmxnet3_defs.h
index ca7727b940ad..4d84912c99ba 100644
--- a/drivers/net/vmxnet3/vmxnet3_defs.h
+++ b/drivers/net/vmxnet3/vmxnet3_defs.h
@@ -523,9 +523,9 @@ struct Vmxnet3_RxFilterConf {
#define VMXNET3_PM_MAX_PATTERN_SIZE 128
#define VMXNET3_PM_MAX_MASK_SIZE (VMXNET3_PM_MAX_PATTERN_SIZE / 8)
-#define VMXNET3_PM_WAKEUP_MAGIC 0x01 /* wake up on magic pkts */
-#define VMXNET3_PM_WAKEUP_FILTER 0x02 /* wake up on pkts matching
- * filters */
+#define VMXNET3_PM_WAKEUP_MAGIC cpu_to_le16(0x01) /* wake up on magic pkts */
+#define VMXNET3_PM_WAKEUP_FILTER cpu_to_le16(0x02) /* wake up on pkts matching
+ * filters */
struct Vmxnet3_PM_PktFilter {
diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c
index 3f60e0e3097b..21314e06e6d7 100644
--- a/drivers/net/vmxnet3/vmxnet3_drv.c
+++ b/drivers/net/vmxnet3/vmxnet3_drv.c
@@ -873,7 +873,7 @@ vmxnet3_tq_xmit(struct sk_buff *skb, struct vmxnet3_tx_queue *tq,
count = VMXNET3_TXD_NEEDED(skb_headlen(skb)) +
skb_shinfo(skb)->nr_frags + 1;
- ctx.ipv4 = (skb->protocol == __constant_ntohs(ETH_P_IP));
+ ctx.ipv4 = (skb->protocol == cpu_to_be16(ETH_P_IP));
ctx.mss = skb_shinfo(skb)->gso_size;
if (ctx.mss) {
@@ -1563,8 +1563,7 @@ vmxnet3_vlan_rx_register(struct net_device *netdev, struct vlan_group *grp)
adapter->vlan_grp = grp;
/* update FEATURES to device */
- set_flag_le64(&devRead->misc.uptFeatures,
- UPT1_F_RXVLAN);
+ devRead->misc.uptFeatures |= UPT1_F_RXVLAN;
VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
VMXNET3_CMD_UPDATE_FEATURE);
/*
@@ -1587,7 +1586,7 @@ vmxnet3_vlan_rx_register(struct net_device *netdev, struct vlan_group *grp)
struct Vmxnet3_DSDevRead *devRead = &shared->devRead;
adapter->vlan_grp = NULL;
- if (le64_to_cpu(devRead->misc.uptFeatures) & UPT1_F_RXVLAN) {
+ if (devRead->misc.uptFeatures & UPT1_F_RXVLAN) {
int i;
for (i = 0; i < VMXNET3_VFT_SIZE; i++) {
@@ -1600,8 +1599,7 @@ vmxnet3_vlan_rx_register(struct net_device *netdev, struct vlan_group *grp)
VMXNET3_CMD_UPDATE_VLAN_FILTERS);
/* update FEATURES to device */
- reset_flag_le64(&devRead->misc.uptFeatures,
- UPT1_F_RXVLAN);
+ devRead->misc.uptFeatures &= ~UPT1_F_RXVLAN;
VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
VMXNET3_CMD_UPDATE_FEATURE);
}
@@ -1762,15 +1760,15 @@ vmxnet3_setup_driver_shared(struct vmxnet3_adapter *adapter)
/* set up feature flags */
if (adapter->rxcsum)
- set_flag_le64(&devRead->misc.uptFeatures, UPT1_F_RXCSUM);
+ devRead->misc.uptFeatures |= UPT1_F_RXCSUM;
if (adapter->lro) {
- set_flag_le64(&devRead->misc.uptFeatures, UPT1_F_LRO);
+ devRead->misc.uptFeatures |= UPT1_F_LRO;
devRead->misc.maxNumRxSG = cpu_to_le16(1 + MAX_SKB_FRAGS);
}
if ((adapter->netdev->features & NETIF_F_HW_VLAN_RX) &&
adapter->vlan_grp) {
- set_flag_le64(&devRead->misc.uptFeatures, UPT1_F_RXVLAN);
+ devRead->misc.uptFeatures |= UPT1_F_RXVLAN;
}
devRead->misc.mtu = cpu_to_le32(adapter->netdev->mtu);
@@ -2577,7 +2575,7 @@ vmxnet3_suspend(struct device *device)
memcpy(pmConf->filters[i].pattern, netdev->dev_addr, ETH_ALEN);
pmConf->filters[i].mask[0] = 0x3F; /* LSB ETH_ALEN bits */
- set_flag_le16(&pmConf->wakeUpEvents, VMXNET3_PM_WAKEUP_FILTER);
+ pmConf->wakeUpEvents |= VMXNET3_PM_WAKEUP_FILTER;
i++;
}
@@ -2619,13 +2617,13 @@ vmxnet3_suspend(struct device *device)
pmConf->filters[i].mask[5] = 0x03; /* IPv4 TIP */
in_dev_put(in_dev);
- set_flag_le16(&pmConf->wakeUpEvents, VMXNET3_PM_WAKEUP_FILTER);
+ pmConf->wakeUpEvents |= VMXNET3_PM_WAKEUP_FILTER;
i++;
}
skip_arp:
if (adapter->wol & WAKE_MAGIC)
- set_flag_le16(&pmConf->wakeUpEvents, VMXNET3_PM_WAKEUP_MAGIC);
+ pmConf->wakeUpEvents |= VMXNET3_PM_WAKEUP_MAGIC;
pmConf->numFilters = i;
@@ -2667,7 +2665,7 @@ vmxnet3_resume(struct device *device)
adapter->shared->devRead.pmConfDesc.confVer = cpu_to_le32(1);
adapter->shared->devRead.pmConfDesc.confLen = cpu_to_le32(sizeof(
*pmConf));
- adapter->shared->devRead.pmConfDesc.confPA = cpu_to_le32(virt_to_phys(
+ adapter->shared->devRead.pmConfDesc.confPA = cpu_to_le64(virt_to_phys(
pmConf));
netif_device_attach(netdev);
diff --git a/drivers/net/vmxnet3/vmxnet3_ethtool.c b/drivers/net/vmxnet3/vmxnet3_ethtool.c
index 7e4b5a89165a..b79070bcc92e 100644
--- a/drivers/net/vmxnet3/vmxnet3_ethtool.c
+++ b/drivers/net/vmxnet3/vmxnet3_ethtool.c
@@ -50,13 +50,11 @@ vmxnet3_set_rx_csum(struct net_device *netdev, u32 val)
adapter->rxcsum = val;
if (netif_running(netdev)) {
if (val)
- set_flag_le64(
- &adapter->shared->devRead.misc.uptFeatures,
- UPT1_F_RXCSUM);
+ adapter->shared->devRead.misc.uptFeatures |=
+ UPT1_F_RXCSUM;
else
- reset_flag_le64(
- &adapter->shared->devRead.misc.uptFeatures,
- UPT1_F_RXCSUM);
+ adapter->shared->devRead.misc.uptFeatures &=
+ ~UPT1_F_RXCSUM;
VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
VMXNET3_CMD_UPDATE_FEATURE);
@@ -292,10 +290,10 @@ vmxnet3_set_flags(struct net_device *netdev, u32 data)
/* update harware LRO capability accordingly */
if (lro_requested)
adapter->shared->devRead.misc.uptFeatures |=
- cpu_to_le64(UPT1_F_LRO);
+ UPT1_F_LRO;
else
adapter->shared->devRead.misc.uptFeatures &=
- cpu_to_le64(~UPT1_F_LRO);
+ ~UPT1_F_LRO;
VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
VMXNET3_CMD_UPDATE_FEATURE);
}
diff --git a/drivers/net/vmxnet3/vmxnet3_int.h b/drivers/net/vmxnet3/vmxnet3_int.h
index c88ea5cbba0d..edf228843afc 100644
--- a/drivers/net/vmxnet3/vmxnet3_int.h
+++ b/drivers/net/vmxnet3/vmxnet3_int.h
@@ -301,8 +301,8 @@ struct vmxnet3_adapter {
struct net_device *netdev;
struct pci_dev *pdev;
- u8 *hw_addr0; /* for BAR 0 */
- u8 *hw_addr1; /* for BAR 1 */
+ u8 __iomem *hw_addr0; /* for BAR 0 */
+ u8 __iomem *hw_addr1; /* for BAR 1 */
/* feature control */
bool rxcsum;
@@ -330,14 +330,14 @@ struct vmxnet3_adapter {
};
#define VMXNET3_WRITE_BAR0_REG(adapter, reg, val) \
- writel(cpu_to_le32(val), (adapter)->hw_addr0 + (reg))
+ writel((val), (adapter)->hw_addr0 + (reg))
#define VMXNET3_READ_BAR0_REG(adapter, reg) \
- le32_to_cpu(readl((adapter)->hw_addr0 + (reg)))
+ readl((adapter)->hw_addr0 + (reg))
#define VMXNET3_WRITE_BAR1_REG(adapter, reg, val) \
- writel(cpu_to_le32(val), (adapter)->hw_addr1 + (reg))
+ writel((val), (adapter)->hw_addr1 + (reg))
#define VMXNET3_READ_BAR1_REG(adapter, reg) \
- le32_to_cpu(readl((adapter)->hw_addr1 + (reg)))
+ readl((adapter)->hw_addr1 + (reg))
#define VMXNET3_WAKE_QUEUE_THRESHOLD(tq) (5)
#define VMXNET3_RX_ALLOC_THRESHOLD(rq, ring_idx, adapter) \
@@ -353,21 +353,6 @@ struct vmxnet3_adapter {
#define VMXNET3_MAX_ETH_HDR_SIZE 22
#define VMXNET3_MAX_SKB_BUF_SIZE (3*1024)
-static inline void set_flag_le16(__le16 *data, u16 flag)
-{
- *data = cpu_to_le16(le16_to_cpu(*data) | flag);
-}
-
-static inline void set_flag_le64(__le64 *data, u64 flag)
-{
- *data = cpu_to_le64(le64_to_cpu(*data) | flag);
-}
-
-static inline void reset_flag_le64(__le64 *data, u64 flag)
-{
- *data = cpu_to_le64(le64_to_cpu(*data) & ~flag);
-}
-
int
vmxnet3_quiesce_dev(struct vmxnet3_adapter *adapter);
diff --git a/drivers/net/vxge/vxge-config.c b/drivers/net/vxge/vxge-config.c
index 0e6db5935609..906a3ca3676b 100644
--- a/drivers/net/vxge/vxge-config.c
+++ b/drivers/net/vxge/vxge-config.c
@@ -20,6 +20,179 @@
#include "vxge-traffic.h"
#include "vxge-config.h"
+static enum vxge_hw_status
+__vxge_hw_fifo_create(
+ struct __vxge_hw_vpath_handle *vpath_handle,
+ struct vxge_hw_fifo_attr *attr);
+
+static enum vxge_hw_status
+__vxge_hw_fifo_abort(
+ struct __vxge_hw_fifo *fifoh);
+
+static enum vxge_hw_status
+__vxge_hw_fifo_reset(
+ struct __vxge_hw_fifo *ringh);
+
+static enum vxge_hw_status
+__vxge_hw_fifo_delete(
+ struct __vxge_hw_vpath_handle *vpath_handle);
+
+static struct __vxge_hw_blockpool_entry *
+__vxge_hw_blockpool_block_allocate(struct __vxge_hw_device *hldev,
+ u32 size);
+
+static void
+__vxge_hw_blockpool_block_free(struct __vxge_hw_device *hldev,
+ struct __vxge_hw_blockpool_entry *entry);
+
+static void vxge_hw_blockpool_block_add(struct __vxge_hw_device *devh,
+ void *block_addr,
+ u32 length,
+ struct pci_dev *dma_h,
+ struct pci_dev *acc_handle);
+
+static enum vxge_hw_status
+__vxge_hw_blockpool_create(struct __vxge_hw_device *hldev,
+ struct __vxge_hw_blockpool *blockpool,
+ u32 pool_size,
+ u32 pool_max);
+
+static void
+__vxge_hw_blockpool_destroy(struct __vxge_hw_blockpool *blockpool);
+
+static void *
+__vxge_hw_blockpool_malloc(struct __vxge_hw_device *hldev,
+ u32 size,
+ struct vxge_hw_mempool_dma *dma_object);
+
+static void
+__vxge_hw_blockpool_free(struct __vxge_hw_device *hldev,
+ void *memblock,
+ u32 size,
+ struct vxge_hw_mempool_dma *dma_object);
+
+
+static struct __vxge_hw_channel*
+__vxge_hw_channel_allocate(struct __vxge_hw_vpath_handle *vph,
+ enum __vxge_hw_channel_type type, u32 length,
+ u32 per_dtr_space, void *userdata);
+
+static void
+__vxge_hw_channel_free(
+ struct __vxge_hw_channel *channel);
+
+static enum vxge_hw_status
+__vxge_hw_channel_initialize(
+ struct __vxge_hw_channel *channel);
+
+static enum vxge_hw_status
+__vxge_hw_channel_reset(
+ struct __vxge_hw_channel *channel);
+
+static enum vxge_hw_status __vxge_hw_ring_delete(struct __vxge_hw_vpath_handle *vp);
+
+static enum vxge_hw_status
+__vxge_hw_device_fifo_config_check(struct vxge_hw_fifo_config *fifo_config);
+
+static enum vxge_hw_status
+__vxge_hw_device_config_check(struct vxge_hw_device_config *new_config);
+
+static void
+__vxge_hw_device_id_get(struct __vxge_hw_device *hldev);
+
+static void
+__vxge_hw_device_host_info_get(struct __vxge_hw_device *hldev);
+
+static enum vxge_hw_status
+__vxge_hw_vpath_card_info_get(
+ u32 vp_id,
+ struct vxge_hw_vpath_reg __iomem *vpath_reg,
+ struct vxge_hw_device_hw_info *hw_info);
+
+static enum vxge_hw_status
+__vxge_hw_device_initialize(struct __vxge_hw_device *hldev);
+
+static void
+__vxge_hw_device_pci_e_init(struct __vxge_hw_device *hldev);
+
+static enum vxge_hw_status
+__vxge_hw_device_reg_addr_get(struct __vxge_hw_device *hldev);
+
+static enum vxge_hw_status
+__vxge_hw_device_register_poll(
+ void __iomem *reg,
+ u64 mask, u32 max_millis);
+
+static inline enum vxge_hw_status
+__vxge_hw_pio_mem_write64(u64 val64, void __iomem *addr,
+ u64 mask, u32 max_millis)
+{
+ __vxge_hw_pio_mem_write32_lower((u32)vxge_bVALn(val64, 32, 32), addr);
+ wmb();
+
+ __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(val64, 0, 32), addr);
+ wmb();
+
+ return __vxge_hw_device_register_poll(addr, mask, max_millis);
+}
+
+static struct vxge_hw_mempool*
+__vxge_hw_mempool_create(struct __vxge_hw_device *devh, u32 memblock_size,
+ u32 item_size, u32 private_size, u32 items_initial,
+ u32 items_max, struct vxge_hw_mempool_cbs *mp_callback,
+ void *userdata);
+static void __vxge_hw_mempool_destroy(struct vxge_hw_mempool *mempool);
+
+static enum vxge_hw_status
+__vxge_hw_vpath_stats_get(struct __vxge_hw_virtualpath *vpath,
+ struct vxge_hw_vpath_stats_hw_info *hw_stats);
+
+static enum vxge_hw_status
+vxge_hw_vpath_stats_enable(struct __vxge_hw_vpath_handle *vpath_handle);
+
+static enum vxge_hw_status
+__vxge_hw_legacy_swapper_set(struct vxge_hw_legacy_reg __iomem *legacy_reg);
+
+static u64
+__vxge_hw_vpath_pci_func_mode_get(u32 vp_id,
+ struct vxge_hw_vpath_reg __iomem *vpath_reg);
+
+static u32
+__vxge_hw_vpath_func_id_get(u32 vp_id, struct vxge_hw_vpmgmt_reg __iomem *vpmgmt_reg);
+
+static enum vxge_hw_status
+__vxge_hw_vpath_addr_get(u32 vp_id, struct vxge_hw_vpath_reg __iomem *vpath_reg,
+ u8 (macaddr)[ETH_ALEN], u8 (macaddr_mask)[ETH_ALEN]);
+
+static enum vxge_hw_status
+__vxge_hw_vpath_reset_check(struct __vxge_hw_virtualpath *vpath);
+
+
+static enum vxge_hw_status
+__vxge_hw_vpath_sw_reset(struct __vxge_hw_device *devh, u32 vp_id);
+
+static enum vxge_hw_status
+__vxge_hw_vpath_fw_ver_get(u32 vp_id, struct vxge_hw_vpath_reg __iomem *vpath_reg,
+ struct vxge_hw_device_hw_info *hw_info);
+
+static enum vxge_hw_status
+__vxge_hw_vpath_mac_configure(struct __vxge_hw_device *devh, u32 vp_id);
+
+static void
+__vxge_hw_vp_terminate(struct __vxge_hw_device *devh, u32 vp_id);
+
+static enum vxge_hw_status
+__vxge_hw_vpath_stats_access(struct __vxge_hw_virtualpath *vpath,
+ u32 operation, u32 offset, u64 *stat);
+
+static enum vxge_hw_status
+__vxge_hw_vpath_xmac_tx_stats_get(struct __vxge_hw_virtualpath *vpath,
+ struct vxge_hw_xmac_vpath_tx_stats *vpath_tx_stats);
+
+static enum vxge_hw_status
+__vxge_hw_vpath_xmac_rx_stats_get(struct __vxge_hw_virtualpath *vpath,
+ struct vxge_hw_xmac_vpath_rx_stats *vpath_rx_stats);
+
/*
* __vxge_hw_channel_allocate - Allocate memory for channel
* This function allocates required memory for the channel and various arrays
@@ -190,7 +363,7 @@ __vxge_hw_device_pci_e_init(struct __vxge_hw_device *hldev)
* Will poll certain register for specified amount of time.
* Will poll until masked bit is not cleared.
*/
-enum vxge_hw_status
+static enum vxge_hw_status
__vxge_hw_device_register_poll(void __iomem *reg, u64 mask, u32 max_millis)
{
u64 val64;
@@ -221,7 +394,7 @@ __vxge_hw_device_register_poll(void __iomem *reg, u64 mask, u32 max_millis)
* in progress
* This routine checks the vpath reset in progress register is turned zero
*/
-enum vxge_hw_status
+static enum vxge_hw_status
__vxge_hw_device_vpath_reset_in_prog_check(u64 __iomem *vpath_rst_in_prog)
{
enum vxge_hw_status status;
@@ -236,7 +409,7 @@ __vxge_hw_device_vpath_reset_in_prog_check(u64 __iomem *vpath_rst_in_prog)
* This routine sets the swapper and reads the toc pointer and returns the
* memory mapped address of the toc
*/
-struct vxge_hw_toc_reg __iomem *
+static struct vxge_hw_toc_reg __iomem *
__vxge_hw_device_toc_get(void __iomem *bar0)
{
u64 val64;
@@ -779,7 +952,7 @@ exit:
* vxge_hw_device_xmac_aggr_stats_get - Get the Statistics on aggregate port
* Get the Statistics on aggregate port
*/
-enum vxge_hw_status
+static enum vxge_hw_status
vxge_hw_device_xmac_aggr_stats_get(struct __vxge_hw_device *hldev, u32 port,
struct vxge_hw_xmac_aggr_stats *aggr_stats)
{
@@ -814,7 +987,7 @@ exit:
* vxge_hw_device_xmac_port_stats_get - Get the Statistics on a port
* Get the Statistics on port
*/
-enum vxge_hw_status
+static enum vxge_hw_status
vxge_hw_device_xmac_port_stats_get(struct __vxge_hw_device *hldev, u32 port,
struct vxge_hw_xmac_port_stats *port_stats)
{
@@ -952,20 +1125,6 @@ u32 vxge_hw_device_trace_level_get(struct __vxge_hw_device *hldev)
return 0;
#endif
}
-/*
- * vxge_hw_device_debug_mask_get - Get the debug mask
- * This routine returns the current debug mask set
- */
-u32 vxge_hw_device_debug_mask_get(struct __vxge_hw_device *hldev)
-{
-#if defined(VXGE_DEBUG_TRACE_MASK) || defined(VXGE_DEBUG_ERR_MASK)
- if (hldev == NULL)
- return 0;
- return hldev->debug_module_mask;
-#else
- return 0;
-#endif
-}
/*
* vxge_hw_getpause_data -Pause frame frame generation and reception.
@@ -1090,7 +1249,7 @@ __vxge_hw_ring_block_next_pointer_set(u8 *block, dma_addr_t dma_next)
* first block
* Returns the dma address of the first RxD block
*/
-u64 __vxge_hw_ring_first_block_address_get(struct __vxge_hw_ring *ring)
+static u64 __vxge_hw_ring_first_block_address_get(struct __vxge_hw_ring *ring)
{
struct vxge_hw_mempool_dma *dma_object;
@@ -1252,7 +1411,7 @@ exit:
* This function creates Ring and initializes it.
*
*/
-enum vxge_hw_status
+static enum vxge_hw_status
__vxge_hw_ring_create(struct __vxge_hw_vpath_handle *vp,
struct vxge_hw_ring_attr *attr)
{
@@ -1363,7 +1522,7 @@ exit:
* __vxge_hw_ring_abort - Returns the RxD
* This function terminates the RxDs of ring
*/
-enum vxge_hw_status __vxge_hw_ring_abort(struct __vxge_hw_ring *ring)
+static enum vxge_hw_status __vxge_hw_ring_abort(struct __vxge_hw_ring *ring)
{
void *rxdh;
struct __vxge_hw_channel *channel;
@@ -1392,7 +1551,7 @@ enum vxge_hw_status __vxge_hw_ring_abort(struct __vxge_hw_ring *ring)
* __vxge_hw_ring_reset - Resets the ring
* This function resets the ring during vpath reset operation
*/
-enum vxge_hw_status __vxge_hw_ring_reset(struct __vxge_hw_ring *ring)
+static enum vxge_hw_status __vxge_hw_ring_reset(struct __vxge_hw_ring *ring)
{
enum vxge_hw_status status = VXGE_HW_OK;
struct __vxge_hw_channel *channel;
@@ -1419,7 +1578,7 @@ exit:
* __vxge_hw_ring_delete - Removes the ring
* This function freeup the memory pool and removes the ring
*/
-enum vxge_hw_status __vxge_hw_ring_delete(struct __vxge_hw_vpath_handle *vp)
+static enum vxge_hw_status __vxge_hw_ring_delete(struct __vxge_hw_vpath_handle *vp)
{
struct __vxge_hw_ring *ring = vp->vpath->ringh;
@@ -1438,7 +1597,7 @@ enum vxge_hw_status __vxge_hw_ring_delete(struct __vxge_hw_vpath_handle *vp)
* __vxge_hw_mempool_grow
* Will resize mempool up to %num_allocate value.
*/
-enum vxge_hw_status
+static enum vxge_hw_status
__vxge_hw_mempool_grow(struct vxge_hw_mempool *mempool, u32 num_allocate,
u32 *num_allocated)
{
@@ -1527,7 +1686,7 @@ exit:
* with size enough to hold %items_initial number of items. Memory is
* DMA-able but client must map/unmap before interoperating with the device.
*/
-struct vxge_hw_mempool*
+static struct vxge_hw_mempool*
__vxge_hw_mempool_create(
struct __vxge_hw_device *devh,
u32 memblock_size,
@@ -1644,7 +1803,7 @@ exit:
/*
* vxge_hw_mempool_destroy
*/
-void __vxge_hw_mempool_destroy(struct vxge_hw_mempool *mempool)
+static void __vxge_hw_mempool_destroy(struct vxge_hw_mempool *mempool)
{
u32 i, j;
struct __vxge_hw_device *devh = mempool->devh;
@@ -1700,7 +1859,7 @@ __vxge_hw_device_fifo_config_check(struct vxge_hw_fifo_config *fifo_config)
* __vxge_hw_device_vpath_config_check - Check vpath configuration.
* Check the vpath configuration
*/
-enum vxge_hw_status
+static enum vxge_hw_status
__vxge_hw_device_vpath_config_check(struct vxge_hw_vp_config *vp_config)
{
enum vxge_hw_status status;
@@ -1922,7 +2081,7 @@ vxge_hw_device_config_default_get(struct vxge_hw_device_config *device_config)
* _hw_legacy_swapper_set - Set the swapper bits for the legacy secion.
* Set the swapper bits appropriately for the lagacy section.
*/
-enum vxge_hw_status
+static enum vxge_hw_status
__vxge_hw_legacy_swapper_set(struct vxge_hw_legacy_reg __iomem *legacy_reg)
{
u64 val64;
@@ -1977,7 +2136,7 @@ __vxge_hw_legacy_swapper_set(struct vxge_hw_legacy_reg __iomem *legacy_reg)
* __vxge_hw_vpath_swapper_set - Set the swapper bits for the vpath.
* Set the swapper bits appropriately for the vpath.
*/
-enum vxge_hw_status
+static enum vxge_hw_status
__vxge_hw_vpath_swapper_set(struct vxge_hw_vpath_reg __iomem *vpath_reg)
{
#ifndef __BIG_ENDIAN
@@ -1996,7 +2155,7 @@ __vxge_hw_vpath_swapper_set(struct vxge_hw_vpath_reg __iomem *vpath_reg)
* __vxge_hw_kdfc_swapper_set - Set the swapper bits for the kdfc.
* Set the swapper bits appropriately for the vpath.
*/
-enum vxge_hw_status
+static enum vxge_hw_status
__vxge_hw_kdfc_swapper_set(
struct vxge_hw_legacy_reg __iomem *legacy_reg,
struct vxge_hw_vpath_reg __iomem *vpath_reg)
@@ -2021,28 +2180,6 @@ __vxge_hw_kdfc_swapper_set(
}
/*
- * vxge_hw_mgmt_device_config - Retrieve device configuration.
- * Get device configuration. Permits to retrieve at run-time configuration
- * values that were used to initialize and configure the device.
- */
-enum vxge_hw_status
-vxge_hw_mgmt_device_config(struct __vxge_hw_device *hldev,
- struct vxge_hw_device_config *dev_config, int size)
-{
-
- if ((hldev == NULL) || (hldev->magic != VXGE_HW_DEVICE_MAGIC))
- return VXGE_HW_ERR_INVALID_DEVICE;
-
- if (size != sizeof(struct vxge_hw_device_config))
- return VXGE_HW_ERR_VERSION_CONFLICT;
-
- memcpy(dev_config, &hldev->config,
- sizeof(struct vxge_hw_device_config));
-
- return VXGE_HW_OK;
-}
-
-/*
* vxge_hw_mgmt_reg_read - Read Titan register.
*/
enum vxge_hw_status
@@ -2438,7 +2575,7 @@ exit:
* __vxge_hw_fifo_abort - Returns the TxD
* This function terminates the TxDs of fifo
*/
-enum vxge_hw_status __vxge_hw_fifo_abort(struct __vxge_hw_fifo *fifo)
+static enum vxge_hw_status __vxge_hw_fifo_abort(struct __vxge_hw_fifo *fifo)
{
void *txdlh;
@@ -2466,7 +2603,7 @@ enum vxge_hw_status __vxge_hw_fifo_abort(struct __vxge_hw_fifo *fifo)
* __vxge_hw_fifo_reset - Resets the fifo
* This function resets the fifo during vpath reset operation
*/
-enum vxge_hw_status __vxge_hw_fifo_reset(struct __vxge_hw_fifo *fifo)
+static enum vxge_hw_status __vxge_hw_fifo_reset(struct __vxge_hw_fifo *fifo)
{
enum vxge_hw_status status = VXGE_HW_OK;
@@ -2501,7 +2638,7 @@ enum vxge_hw_status __vxge_hw_fifo_delete(struct __vxge_hw_vpath_handle *vp)
* in pci config space.
* Read from the vpath pci config space.
*/
-enum vxge_hw_status
+static enum vxge_hw_status
__vxge_hw_vpath_pci_read(struct __vxge_hw_virtualpath *vpath,
u32 phy_func_0, u32 offset, u32 *val)
{
@@ -2542,7 +2679,7 @@ exit:
* __vxge_hw_vpath_func_id_get - Get the function id of the vpath.
* Returns the function number of the vpath.
*/
-u32
+static u32
__vxge_hw_vpath_func_id_get(u32 vp_id,
struct vxge_hw_vpmgmt_reg __iomem *vpmgmt_reg)
{
@@ -2573,7 +2710,7 @@ __vxge_hw_read_rts_ds(struct vxge_hw_vpath_reg __iomem *vpath_reg,
* __vxge_hw_vpath_card_info_get - Get the serial numbers,
* part number and product description.
*/
-enum vxge_hw_status
+static enum vxge_hw_status
__vxge_hw_vpath_card_info_get(
u32 vp_id,
struct vxge_hw_vpath_reg __iomem *vpath_reg,
@@ -2695,7 +2832,7 @@ __vxge_hw_vpath_card_info_get(
* __vxge_hw_vpath_fw_ver_get - Get the fw version
* Returns FW Version
*/
-enum vxge_hw_status
+static enum vxge_hw_status
__vxge_hw_vpath_fw_ver_get(
u32 vp_id,
struct vxge_hw_vpath_reg __iomem *vpath_reg,
@@ -2789,7 +2926,7 @@ exit:
* __vxge_hw_vpath_pci_func_mode_get - Get the pci mode
* Returns pci function mode
*/
-u64
+static u64
__vxge_hw_vpath_pci_func_mode_get(
u32 vp_id,
struct vxge_hw_vpath_reg __iomem *vpath_reg)
@@ -2995,7 +3132,7 @@ exit:
* __vxge_hw_vpath_addr_get - Get the hw address entry for this vpath
* from MAC address table.
*/
-enum vxge_hw_status
+static enum vxge_hw_status
__vxge_hw_vpath_addr_get(
u32 vp_id, struct vxge_hw_vpath_reg __iomem *vpath_reg,
u8 (macaddr)[ETH_ALEN], u8 (macaddr_mask)[ETH_ALEN])
@@ -3347,7 +3484,7 @@ __vxge_hw_vpath_mgmt_read(
* This routine checks the vpath_rst_in_prog register to see if
* adapter completed the reset process for the vpath
*/
-enum vxge_hw_status
+static enum vxge_hw_status
__vxge_hw_vpath_reset_check(struct __vxge_hw_virtualpath *vpath)
{
enum vxge_hw_status status;
@@ -3365,7 +3502,7 @@ __vxge_hw_vpath_reset_check(struct __vxge_hw_virtualpath *vpath)
* __vxge_hw_vpath_reset
* This routine resets the vpath on the device
*/
-enum vxge_hw_status
+static enum vxge_hw_status
__vxge_hw_vpath_reset(struct __vxge_hw_device *hldev, u32 vp_id)
{
u64 val64;
@@ -3383,7 +3520,7 @@ __vxge_hw_vpath_reset(struct __vxge_hw_device *hldev, u32 vp_id)
* __vxge_hw_vpath_sw_reset
* This routine resets the vpath structures
*/
-enum vxge_hw_status
+static enum vxge_hw_status
__vxge_hw_vpath_sw_reset(struct __vxge_hw_device *hldev, u32 vp_id)
{
enum vxge_hw_status status = VXGE_HW_OK;
@@ -3408,7 +3545,7 @@ exit:
* This routine configures the prc registers of virtual path using the config
* passed
*/
-void
+static void
__vxge_hw_vpath_prc_configure(struct __vxge_hw_device *hldev, u32 vp_id)
{
u64 val64;
@@ -3480,7 +3617,7 @@ __vxge_hw_vpath_prc_configure(struct __vxge_hw_device *hldev, u32 vp_id)
* This routine configures the kdfc registers of virtual path using the
* config passed
*/
-enum vxge_hw_status
+static enum vxge_hw_status
__vxge_hw_vpath_kdfc_configure(struct __vxge_hw_device *hldev, u32 vp_id)
{
u64 val64;
@@ -3553,7 +3690,7 @@ exit:
* __vxge_hw_vpath_mac_configure
* This routine configures the mac of virtual path using the config passed
*/
-enum vxge_hw_status
+static enum vxge_hw_status
__vxge_hw_vpath_mac_configure(struct __vxge_hw_device *hldev, u32 vp_id)
{
u64 val64;
@@ -3621,7 +3758,7 @@ __vxge_hw_vpath_mac_configure(struct __vxge_hw_device *hldev, u32 vp_id)
* This routine configures the tim registers of virtual path using the config
* passed
*/
-enum vxge_hw_status
+static enum vxge_hw_status
__vxge_hw_vpath_tim_configure(struct __vxge_hw_device *hldev, u32 vp_id)
{
u64 val64;
@@ -3897,7 +4034,7 @@ vxge_hw_vpath_tti_ci_set(struct __vxge_hw_device *hldev, u32 vp_id)
* This routine is the final phase of init which initializes the
* registers of the vpath using the configuration passed.
*/
-enum vxge_hw_status
+static enum vxge_hw_status
__vxge_hw_vpath_initialize(struct __vxge_hw_device *hldev, u32 vp_id)
{
u64 val64;
@@ -3966,7 +4103,7 @@ exit:
* This routine is the initial phase of init which resets the vpath and
* initializes the software support structures.
*/
-enum vxge_hw_status
+static enum vxge_hw_status
__vxge_hw_vp_initialize(struct __vxge_hw_device *hldev, u32 vp_id,
struct vxge_hw_vp_config *config)
{
@@ -4022,7 +4159,7 @@ exit:
* __vxge_hw_vp_terminate - Terminate Virtual Path structure
* This routine closes all channels it opened and freeup memory
*/
-void
+static void
__vxge_hw_vp_terminate(struct __vxge_hw_device *hldev, u32 vp_id)
{
struct __vxge_hw_virtualpath *vpath;
@@ -4384,7 +4521,7 @@ vxge_hw_vpath_enable(struct __vxge_hw_vpath_handle *vp)
* Enable the DMA vpath statistics. The function is to be called to re-enable
* the adapter to update stats into the host memory
*/
-enum vxge_hw_status
+static enum vxge_hw_status
vxge_hw_vpath_stats_enable(struct __vxge_hw_vpath_handle *vp)
{
enum vxge_hw_status status = VXGE_HW_OK;
@@ -4409,7 +4546,7 @@ exit:
* __vxge_hw_vpath_stats_access - Get the statistics from the given location
* and offset and perform an operation
*/
-enum vxge_hw_status
+static enum vxge_hw_status
__vxge_hw_vpath_stats_access(struct __vxge_hw_virtualpath *vpath,
u32 operation, u32 offset, u64 *stat)
{
@@ -4445,7 +4582,7 @@ vpath_stats_access_exit:
/*
* __vxge_hw_vpath_xmac_tx_stats_get - Get the TX Statistics of a vpath
*/
-enum vxge_hw_status
+static enum vxge_hw_status
__vxge_hw_vpath_xmac_tx_stats_get(
struct __vxge_hw_virtualpath *vpath,
struct vxge_hw_xmac_vpath_tx_stats *vpath_tx_stats)
@@ -4478,9 +4615,9 @@ exit:
/*
* __vxge_hw_vpath_xmac_rx_stats_get - Get the RX Statistics of a vpath
*/
-enum vxge_hw_status
+static enum vxge_hw_status
__vxge_hw_vpath_xmac_rx_stats_get(struct __vxge_hw_virtualpath *vpath,
- struct vxge_hw_xmac_vpath_rx_stats *vpath_rx_stats)
+ struct vxge_hw_xmac_vpath_rx_stats *vpath_rx_stats)
{
u64 *val64;
enum vxge_hw_status status = VXGE_HW_OK;
@@ -4509,9 +4646,9 @@ exit:
/*
* __vxge_hw_vpath_stats_get - Get the vpath hw statistics.
*/
-enum vxge_hw_status __vxge_hw_vpath_stats_get(
- struct __vxge_hw_virtualpath *vpath,
- struct vxge_hw_vpath_stats_hw_info *hw_stats)
+static enum vxge_hw_status
+__vxge_hw_vpath_stats_get(struct __vxge_hw_virtualpath *vpath,
+ struct vxge_hw_vpath_stats_hw_info *hw_stats)
{
u64 val64;
enum vxge_hw_status status = VXGE_HW_OK;
@@ -4643,6 +4780,32 @@ exit:
return status;
}
+
+static void vxge_os_dma_malloc_async(struct pci_dev *pdev, void *devh,
+ unsigned long size)
+{
+ gfp_t flags;
+ void *vaddr;
+
+ if (in_interrupt())
+ flags = GFP_ATOMIC | GFP_DMA;
+ else
+ flags = GFP_KERNEL | GFP_DMA;
+
+ vaddr = kmalloc((size), flags);
+
+ vxge_hw_blockpool_block_add(devh, vaddr, size, pdev, pdev);
+}
+
+static void vxge_os_dma_free(struct pci_dev *pdev, const void *vaddr,
+ struct pci_dev **p_dma_acch)
+{
+ unsigned long misaligned = *(unsigned long *)p_dma_acch;
+ u8 *tmp = (u8 *)vaddr;
+ tmp -= misaligned;
+ kfree((void *)tmp);
+}
+
/*
* __vxge_hw_blockpool_create - Create block pool
*/
@@ -4845,12 +5008,11 @@ void __vxge_hw_blockpool_blocks_remove(struct __vxge_hw_blockpool *blockpool)
* vxge_hw_blockpool_block_add - callback for vxge_os_dma_malloc_async
* Adds a block to block pool
*/
-void vxge_hw_blockpool_block_add(
- struct __vxge_hw_device *devh,
- void *block_addr,
- u32 length,
- struct pci_dev *dma_h,
- struct pci_dev *acc_handle)
+static void vxge_hw_blockpool_block_add(struct __vxge_hw_device *devh,
+ void *block_addr,
+ u32 length,
+ struct pci_dev *dma_h,
+ struct pci_dev *acc_handle)
{
struct __vxge_hw_blockpool *blockpool;
struct __vxge_hw_blockpool_entry *entry = NULL;
diff --git a/drivers/net/vxge/vxge-config.h b/drivers/net/vxge/vxge-config.h
index 1a94343023cb..5c00861b6c2c 100644
--- a/drivers/net/vxge/vxge-config.h
+++ b/drivers/net/vxge/vxge-config.h
@@ -183,11 +183,6 @@ struct vxge_hw_device_version {
char version[VXGE_HW_FW_STRLEN];
};
-u64
-__vxge_hw_vpath_pci_func_mode_get(
- u32 vp_id,
- struct vxge_hw_vpath_reg __iomem *vpath_reg);
-
/**
* struct vxge_hw_fifo_config - Configuration of fifo.
* @enable: Is this fifo to be commissioned
@@ -1426,9 +1421,6 @@ struct vxge_hw_rth_hash_types {
u8 hash_type_ipv6ex_en;
};
-u32
-vxge_hw_device_debug_mask_get(struct __vxge_hw_device *devh);
-
void vxge_hw_device_debug_set(
struct __vxge_hw_device *devh,
enum vxge_debug_level level,
@@ -1440,9 +1432,6 @@ vxge_hw_device_error_level_get(struct __vxge_hw_device *devh);
u32
vxge_hw_device_trace_level_get(struct __vxge_hw_device *devh);
-u32
-vxge_hw_device_debug_mask_get(struct __vxge_hw_device *devh);
-
/**
* vxge_hw_ring_rxd_size_get - Get the size of ring descriptor.
* @buf_mode: Buffer mode (1, 3 or 5)
@@ -1817,60 +1806,10 @@ struct vxge_hw_vpath_attr {
struct vxge_hw_fifo_attr fifo_attr;
};
-enum vxge_hw_status
-__vxge_hw_blockpool_create(struct __vxge_hw_device *hldev,
- struct __vxge_hw_blockpool *blockpool,
- u32 pool_size,
- u32 pool_max);
-
-void
-__vxge_hw_blockpool_destroy(struct __vxge_hw_blockpool *blockpool);
-
-struct __vxge_hw_blockpool_entry *
-__vxge_hw_blockpool_block_allocate(struct __vxge_hw_device *hldev,
- u32 size);
-
-void
-__vxge_hw_blockpool_block_free(struct __vxge_hw_device *hldev,
- struct __vxge_hw_blockpool_entry *entry);
-
-void *
-__vxge_hw_blockpool_malloc(struct __vxge_hw_device *hldev,
- u32 size,
- struct vxge_hw_mempool_dma *dma_object);
-
-void
-__vxge_hw_blockpool_free(struct __vxge_hw_device *hldev,
- void *memblock,
- u32 size,
- struct vxge_hw_mempool_dma *dma_object);
-
-enum vxge_hw_status
-__vxge_hw_device_fifo_config_check(struct vxge_hw_fifo_config *fifo_config);
-
-enum vxge_hw_status
-__vxge_hw_device_config_check(struct vxge_hw_device_config *new_config);
-
-enum vxge_hw_status
-vxge_hw_mgmt_device_config(struct __vxge_hw_device *devh,
- struct vxge_hw_device_config *dev_config, int size);
-
enum vxge_hw_status __devinit vxge_hw_device_hw_info_get(
void __iomem *bar0,
struct vxge_hw_device_hw_info *hw_info);
-enum vxge_hw_status
-__vxge_hw_vpath_fw_ver_get(
- u32 vp_id,
- struct vxge_hw_vpath_reg __iomem *vpath_reg,
- struct vxge_hw_device_hw_info *hw_info);
-
-enum vxge_hw_status
-__vxge_hw_vpath_card_info_get(
- u32 vp_id,
- struct vxge_hw_vpath_reg __iomem *vpath_reg,
- struct vxge_hw_device_hw_info *hw_info);
-
enum vxge_hw_status __devinit vxge_hw_device_config_default_get(
struct vxge_hw_device_config *device_config);
@@ -1954,38 +1893,6 @@ out:
return vaddr;
}
-extern void vxge_hw_blockpool_block_add(
- struct __vxge_hw_device *devh,
- void *block_addr,
- u32 length,
- struct pci_dev *dma_h,
- struct pci_dev *acc_handle);
-
-static inline void vxge_os_dma_malloc_async(struct pci_dev *pdev, void *devh,
- unsigned long size)
-{
- gfp_t flags;
- void *vaddr;
-
- if (in_interrupt())
- flags = GFP_ATOMIC | GFP_DMA;
- else
- flags = GFP_KERNEL | GFP_DMA;
-
- vaddr = kmalloc((size), flags);
-
- vxge_hw_blockpool_block_add(devh, vaddr, size, pdev, pdev);
-}
-
-static inline void vxge_os_dma_free(struct pci_dev *pdev, const void *vaddr,
- struct pci_dev **p_dma_acch)
-{
- unsigned long misaligned = *(unsigned long *)p_dma_acch;
- u8 *tmp = (u8 *)vaddr;
- tmp -= misaligned;
- kfree((void *)tmp);
-}
-
/*
* __vxge_hw_mempool_item_priv - will return pointer on per item private space
*/
@@ -2010,40 +1917,6 @@ __vxge_hw_mempool_item_priv(
(*memblock_item_idx) * mempool->items_priv_size;
}
-enum vxge_hw_status
-__vxge_hw_mempool_grow(
- struct vxge_hw_mempool *mempool,
- u32 num_allocate,
- u32 *num_allocated);
-
-struct vxge_hw_mempool*
-__vxge_hw_mempool_create(
- struct __vxge_hw_device *devh,
- u32 memblock_size,
- u32 item_size,
- u32 private_size,
- u32 items_initial,
- u32 items_max,
- struct vxge_hw_mempool_cbs *mp_callback,
- void *userdata);
-
-struct __vxge_hw_channel*
-__vxge_hw_channel_allocate(struct __vxge_hw_vpath_handle *vph,
- enum __vxge_hw_channel_type type, u32 length,
- u32 per_dtr_space, void *userdata);
-
-void
-__vxge_hw_channel_free(
- struct __vxge_hw_channel *channel);
-
-enum vxge_hw_status
-__vxge_hw_channel_initialize(
- struct __vxge_hw_channel *channel);
-
-enum vxge_hw_status
-__vxge_hw_channel_reset(
- struct __vxge_hw_channel *channel);
-
/*
* __vxge_hw_fifo_txdl_priv - Return the max fragments allocated
* for the fifo.
@@ -2065,9 +1938,6 @@ enum vxge_hw_status vxge_hw_vpath_open(
struct vxge_hw_vpath_attr *attr,
struct __vxge_hw_vpath_handle **vpath_handle);
-enum vxge_hw_status
-__vxge_hw_device_vpath_reset_in_prog_check(u64 __iomem *vpath_rst_in_prog);
-
enum vxge_hw_status vxge_hw_vpath_close(
struct __vxge_hw_vpath_handle *vpath_handle);
@@ -2089,54 +1959,9 @@ enum vxge_hw_status vxge_hw_vpath_mtu_set(
struct __vxge_hw_vpath_handle *vpath_handle,
u32 new_mtu);
-enum vxge_hw_status vxge_hw_vpath_stats_enable(
- struct __vxge_hw_vpath_handle *vpath_handle);
-
-enum vxge_hw_status
-__vxge_hw_vpath_stats_access(
- struct __vxge_hw_virtualpath *vpath,
- u32 operation,
- u32 offset,
- u64 *stat);
-
-enum vxge_hw_status
-__vxge_hw_vpath_xmac_tx_stats_get(
- struct __vxge_hw_virtualpath *vpath,
- struct vxge_hw_xmac_vpath_tx_stats *vpath_tx_stats);
-
-enum vxge_hw_status
-__vxge_hw_vpath_xmac_rx_stats_get(
- struct __vxge_hw_virtualpath *vpath,
- struct vxge_hw_xmac_vpath_rx_stats *vpath_rx_stats);
-
-enum vxge_hw_status
-__vxge_hw_vpath_stats_get(
- struct __vxge_hw_virtualpath *vpath,
- struct vxge_hw_vpath_stats_hw_info *hw_stats);
-
void
vxge_hw_vpath_rx_doorbell_init(struct __vxge_hw_vpath_handle *vp);
-enum vxge_hw_status
-__vxge_hw_device_vpath_config_check(struct vxge_hw_vp_config *vp_config);
-
-void
-__vxge_hw_device_pci_e_init(struct __vxge_hw_device *hldev);
-
-enum vxge_hw_status
-__vxge_hw_legacy_swapper_set(struct vxge_hw_legacy_reg __iomem *legacy_reg);
-
-enum vxge_hw_status
-__vxge_hw_vpath_swapper_set(struct vxge_hw_vpath_reg __iomem *vpath_reg);
-
-enum vxge_hw_status
-__vxge_hw_kdfc_swapper_set(struct vxge_hw_legacy_reg __iomem *legacy_reg,
- struct vxge_hw_vpath_reg __iomem *vpath_reg);
-
-enum vxge_hw_status
-__vxge_hw_device_register_poll(
- void __iomem *reg,
- u64 mask, u32 max_millis);
#ifndef readq
static inline u64 readq(void __iomem *addr)
@@ -2168,62 +1993,12 @@ static inline void __vxge_hw_pio_mem_write32_lower(u32 val, void __iomem *addr)
writel(val, addr);
}
-static inline enum vxge_hw_status
-__vxge_hw_pio_mem_write64(u64 val64, void __iomem *addr,
- u64 mask, u32 max_millis)
-{
- enum vxge_hw_status status = VXGE_HW_OK;
-
- __vxge_hw_pio_mem_write32_lower((u32)vxge_bVALn(val64, 32, 32), addr);
- wmb();
- __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(val64, 0, 32), addr);
- wmb();
-
- status = __vxge_hw_device_register_poll(addr, mask, max_millis);
- return status;
-}
-
-struct vxge_hw_toc_reg __iomem *
-__vxge_hw_device_toc_get(void __iomem *bar0);
-
-enum vxge_hw_status
-__vxge_hw_device_reg_addr_get(struct __vxge_hw_device *hldev);
-
-void
-__vxge_hw_device_id_get(struct __vxge_hw_device *hldev);
-
-void
-__vxge_hw_device_host_info_get(struct __vxge_hw_device *hldev);
-
enum vxge_hw_status
vxge_hw_device_flick_link_led(struct __vxge_hw_device *devh, u64 on_off);
enum vxge_hw_status
-__vxge_hw_device_initialize(struct __vxge_hw_device *hldev);
-
-enum vxge_hw_status
-__vxge_hw_vpath_pci_read(
- struct __vxge_hw_virtualpath *vpath,
- u32 phy_func_0,
- u32 offset,
- u32 *val);
-
-enum vxge_hw_status
-__vxge_hw_vpath_addr_get(
- u32 vp_id,
- struct vxge_hw_vpath_reg __iomem *vpath_reg,
- u8 (macaddr)[ETH_ALEN],
- u8 (macaddr_mask)[ETH_ALEN]);
-
-u32
-__vxge_hw_vpath_func_id_get(
- u32 vp_id, struct vxge_hw_vpmgmt_reg __iomem *vpmgmt_reg);
-
-enum vxge_hw_status
-__vxge_hw_vpath_reset_check(struct __vxge_hw_virtualpath *vpath);
-
-enum vxge_hw_status
vxge_hw_vpath_strip_fcs_check(struct __vxge_hw_device *hldev, u64 vpath_mask);
+
/**
* vxge_debug
* @level: level of debug verbosity.
diff --git a/drivers/net/vxge/vxge-ethtool.c b/drivers/net/vxge/vxge-ethtool.c
index 05679e306fdd..b67746eef923 100644
--- a/drivers/net/vxge/vxge-ethtool.c
+++ b/drivers/net/vxge/vxge-ethtool.c
@@ -1142,7 +1142,7 @@ static const struct ethtool_ops vxge_ethtool_ops = {
.get_ethtool_stats = vxge_get_ethtool_stats,
};
-void initialize_ethtool_ops(struct net_device *ndev)
+void vxge_initialize_ethtool_ops(struct net_device *ndev)
{
SET_ETHTOOL_OPS(ndev, &vxge_ethtool_ops);
}
diff --git a/drivers/net/vxge/vxge-main.c b/drivers/net/vxge/vxge-main.c
index a69542ecb68d..813829f3d024 100644
--- a/drivers/net/vxge/vxge-main.c
+++ b/drivers/net/vxge/vxge-main.c
@@ -82,6 +82,16 @@ module_param_array(bw_percentage, uint, NULL, 0);
static struct vxge_drv_config *driver_config;
+static enum vxge_hw_status vxge_add_mac_addr(struct vxgedev *vdev,
+ struct macInfo *mac);
+static enum vxge_hw_status vxge_del_mac_addr(struct vxgedev *vdev,
+ struct macInfo *mac);
+static int vxge_mac_list_add(struct vxge_vpath *vpath, struct macInfo *mac);
+static int vxge_mac_list_del(struct vxge_vpath *vpath, struct macInfo *mac);
+static enum vxge_hw_status vxge_restore_vpath_vid_table(struct vxge_vpath *vpath);
+static enum vxge_hw_status vxge_restore_vpath_mac_addr(struct vxge_vpath *vpath);
+static enum vxge_hw_status vxge_reset_all_vpaths(struct vxgedev *vdev);
+
static inline int is_vxge_card_up(struct vxgedev *vdev)
{
return test_bit(__VXGE_STATE_CARD_UP, &vdev->state);
@@ -138,7 +148,7 @@ static inline void VXGE_COMPLETE_ALL_RX(struct vxgedev *vdev)
* This function is called during interrupt context to notify link up state
* change.
*/
-void
+static void
vxge_callback_link_up(struct __vxge_hw_device *hldev)
{
struct net_device *dev = hldev->ndev;
@@ -162,7 +172,7 @@ vxge_callback_link_up(struct __vxge_hw_device *hldev)
* This function is called during interrupt context to notify link down state
* change.
*/
-void
+static void
vxge_callback_link_down(struct __vxge_hw_device *hldev)
{
struct net_device *dev = hldev->ndev;
@@ -354,7 +364,7 @@ static inline void vxge_post(int *dtr_cnt, void **first_dtr,
* If the interrupt is because of a received frame or if the receive ring
* contains fresh as yet un-processed frames, this function is called.
*/
-enum vxge_hw_status
+static enum vxge_hw_status
vxge_rx_1b_compl(struct __vxge_hw_ring *ringh, void *dtr,
u8 t_code, void *userdata)
{
@@ -531,7 +541,7 @@ vxge_rx_1b_compl(struct __vxge_hw_ring *ringh, void *dtr,
* freed and frees all skbs whose data have already DMA'ed into the NICs
* internal memory.
*/
-enum vxge_hw_status
+static enum vxge_hw_status
vxge_xmit_compl(struct __vxge_hw_fifo *fifo_hw, void *dtr,
enum vxge_hw_fifo_tcode t_code, void *userdata,
struct sk_buff ***skb_ptr, int nr_skb, int *more)
@@ -1246,7 +1256,7 @@ static int vxge_set_mac_addr(struct net_device *dev, void *p)
*
* Enables the interrupts for the vpath
*/
-void vxge_vpath_intr_enable(struct vxgedev *vdev, int vp_id)
+static void vxge_vpath_intr_enable(struct vxgedev *vdev, int vp_id)
{
struct vxge_vpath *vpath = &vdev->vpaths[vp_id];
int msix_id = 0;
@@ -1279,7 +1289,7 @@ void vxge_vpath_intr_enable(struct vxgedev *vdev, int vp_id)
*
* Disables the interrupts for the vpath
*/
-void vxge_vpath_intr_disable(struct vxgedev *vdev, int vp_id)
+static void vxge_vpath_intr_disable(struct vxgedev *vdev, int vp_id)
{
struct vxge_vpath *vpath = &vdev->vpaths[vp_id];
int msix_id;
@@ -1553,7 +1563,7 @@ out:
*
* driver may reset the chip on events of serr, eccerr, etc
*/
-int vxge_reset(struct vxgedev *vdev)
+static int vxge_reset(struct vxgedev *vdev)
{
return do_vxge_reset(vdev, VXGE_LL_FULL_RESET);
}
@@ -1724,7 +1734,7 @@ static enum vxge_hw_status vxge_rth_configure(struct vxgedev *vdev)
return status;
}
-int vxge_mac_list_add(struct vxge_vpath *vpath, struct macInfo *mac)
+static int vxge_mac_list_add(struct vxge_vpath *vpath, struct macInfo *mac)
{
struct vxge_mac_addrs *new_mac_entry;
u8 *mac_address = NULL;
@@ -1757,7 +1767,8 @@ int vxge_mac_list_add(struct vxge_vpath *vpath, struct macInfo *mac)
}
/* Add a mac address to DA table */
-enum vxge_hw_status vxge_add_mac_addr(struct vxgedev *vdev, struct macInfo *mac)
+static enum vxge_hw_status vxge_add_mac_addr(struct vxgedev *vdev,
+ struct macInfo *mac)
{
enum vxge_hw_status status = VXGE_HW_OK;
struct vxge_vpath *vpath;
@@ -1782,7 +1793,7 @@ enum vxge_hw_status vxge_add_mac_addr(struct vxgedev *vdev, struct macInfo *mac)
return status;
}
-int vxge_mac_list_del(struct vxge_vpath *vpath, struct macInfo *mac)
+static int vxge_mac_list_del(struct vxge_vpath *vpath, struct macInfo *mac)
{
struct list_head *entry, *next;
u64 del_mac = 0;
@@ -1807,7 +1818,8 @@ int vxge_mac_list_del(struct vxge_vpath *vpath, struct macInfo *mac)
return FALSE;
}
/* delete a mac address from DA table */
-enum vxge_hw_status vxge_del_mac_addr(struct vxgedev *vdev, struct macInfo *mac)
+static enum vxge_hw_status vxge_del_mac_addr(struct vxgedev *vdev,
+ struct macInfo *mac)
{
enum vxge_hw_status status = VXGE_HW_OK;
struct vxge_vpath *vpath;
@@ -1854,7 +1866,7 @@ static vxge_search_mac_addr_in_da_table(struct vxge_vpath *vpath,
}
/* Store all vlan ids from the list to the vid table */
-enum vxge_hw_status vxge_restore_vpath_vid_table(struct vxge_vpath *vpath)
+static enum vxge_hw_status vxge_restore_vpath_vid_table(struct vxge_vpath *vpath)
{
enum vxge_hw_status status = VXGE_HW_OK;
struct vxgedev *vdev = vpath->vdev;
@@ -1874,7 +1886,7 @@ enum vxge_hw_status vxge_restore_vpath_vid_table(struct vxge_vpath *vpath)
}
/* Store all mac addresses from the list to the DA table */
-enum vxge_hw_status vxge_restore_vpath_mac_addr(struct vxge_vpath *vpath)
+static enum vxge_hw_status vxge_restore_vpath_mac_addr(struct vxge_vpath *vpath)
{
enum vxge_hw_status status = VXGE_HW_OK;
struct macInfo mac_info;
@@ -1916,7 +1928,7 @@ enum vxge_hw_status vxge_restore_vpath_mac_addr(struct vxge_vpath *vpath)
}
/* reset vpaths */
-enum vxge_hw_status vxge_reset_all_vpaths(struct vxgedev *vdev)
+static enum vxge_hw_status vxge_reset_all_vpaths(struct vxgedev *vdev)
{
enum vxge_hw_status status = VXGE_HW_OK;
struct vxge_vpath *vpath;
@@ -1948,7 +1960,7 @@ enum vxge_hw_status vxge_reset_all_vpaths(struct vxgedev *vdev)
}
/* close vpaths */
-void vxge_close_vpaths(struct vxgedev *vdev, int index)
+static void vxge_close_vpaths(struct vxgedev *vdev, int index)
{
struct vxge_vpath *vpath;
int i;
@@ -1966,7 +1978,7 @@ void vxge_close_vpaths(struct vxgedev *vdev, int index)
}
/* open vpaths */
-int vxge_open_vpaths(struct vxgedev *vdev)
+static int vxge_open_vpaths(struct vxgedev *vdev)
{
struct vxge_hw_vpath_attr attr;
enum vxge_hw_status status;
@@ -2517,7 +2529,7 @@ static void vxge_poll_vp_lockup(unsigned long data)
* Return value: '0' on success and an appropriate (-)ve integer as
* defined in errno.h file on failure.
*/
-int
+static int
vxge_open(struct net_device *dev)
{
enum vxge_hw_status status;
@@ -2721,7 +2733,7 @@ out0:
}
/* Loop throught the mac address list and delete all the entries */
-void vxge_free_mac_add_list(struct vxge_vpath *vpath)
+static void vxge_free_mac_add_list(struct vxge_vpath *vpath)
{
struct list_head *entry, *next;
@@ -2745,7 +2757,7 @@ static void vxge_napi_del_all(struct vxgedev *vdev)
}
}
-int do_vxge_close(struct net_device *dev, int do_io)
+static int do_vxge_close(struct net_device *dev, int do_io)
{
enum vxge_hw_status status;
struct vxgedev *vdev;
@@ -2856,7 +2868,7 @@ int do_vxge_close(struct net_device *dev, int do_io)
* Return value: '0' on success and an appropriate (-)ve integer as
* defined in errno.h file on failure.
*/
-int
+static int
vxge_close(struct net_device *dev)
{
do_vxge_close(dev, 1);
@@ -3113,10 +3125,10 @@ static const struct net_device_ops vxge_netdev_ops = {
#endif
};
-int __devinit vxge_device_register(struct __vxge_hw_device *hldev,
- struct vxge_config *config,
- int high_dma, int no_of_vpath,
- struct vxgedev **vdev_out)
+static int __devinit vxge_device_register(struct __vxge_hw_device *hldev,
+ struct vxge_config *config,
+ int high_dma, int no_of_vpath,
+ struct vxgedev **vdev_out)
{
struct net_device *ndev;
enum vxge_hw_status status = VXGE_HW_OK;
@@ -3164,7 +3176,7 @@ int __devinit vxge_device_register(struct __vxge_hw_device *hldev,
ndev->watchdog_timeo = VXGE_LL_WATCH_DOG_TIMEOUT;
- initialize_ethtool_ops(ndev);
+ vxge_initialize_ethtool_ops(ndev);
/* Allocate memory for vpath */
vdev->vpaths = kzalloc((sizeof(struct vxge_vpath)) *
@@ -3249,7 +3261,7 @@ _out0:
*
* This function will unregister and free network device
*/
-void
+static void
vxge_device_unregister(struct __vxge_hw_device *hldev)
{
struct vxgedev *vdev;
diff --git a/drivers/net/vxge/vxge-main.h b/drivers/net/vxge/vxge-main.h
index d4be07eaacd7..de64536cb7d0 100644
--- a/drivers/net/vxge/vxge-main.h
+++ b/drivers/net/vxge/vxge-main.h
@@ -396,64 +396,7 @@ struct vxge_tx_priv {
mod_timer(&timer, (jiffies + exp)); \
} while (0);
-int __devinit vxge_device_register(struct __vxge_hw_device *devh,
- struct vxge_config *config,
- int high_dma, int no_of_vpath,
- struct vxgedev **vdev);
-
-void vxge_device_unregister(struct __vxge_hw_device *devh);
-
-void vxge_vpath_intr_enable(struct vxgedev *vdev, int vp_id);
-
-void vxge_vpath_intr_disable(struct vxgedev *vdev, int vp_id);
-
-void vxge_callback_link_up(struct __vxge_hw_device *devh);
-
-void vxge_callback_link_down(struct __vxge_hw_device *devh);
-
-enum vxge_hw_status vxge_add_mac_addr(struct vxgedev *vdev,
- struct macInfo *mac);
-
-int vxge_mac_list_del(struct vxge_vpath *vpath, struct macInfo *mac);
-
-int vxge_reset(struct vxgedev *vdev);
-
-enum vxge_hw_status
-vxge_rx_1b_compl(struct __vxge_hw_ring *ringh, void *dtr,
- u8 t_code, void *userdata);
-
-enum vxge_hw_status
-vxge_xmit_compl(struct __vxge_hw_fifo *fifo_hw, void *dtr,
- enum vxge_hw_fifo_tcode t_code, void *userdata,
- struct sk_buff ***skb_ptr, int nr_skbs, int *more);
-
-int vxge_close(struct net_device *dev);
-
-int vxge_open(struct net_device *dev);
-
-void vxge_close_vpaths(struct vxgedev *vdev, int index);
-
-int vxge_open_vpaths(struct vxgedev *vdev);
-
-enum vxge_hw_status vxge_reset_all_vpaths(struct vxgedev *vdev);
-
-enum vxge_hw_status vxge_add_mac_addr(struct vxgedev *vdev,
- struct macInfo *mac);
-
-enum vxge_hw_status vxge_del_mac_addr(struct vxgedev *vdev,
- struct macInfo *mac);
-
-int vxge_mac_list_add(struct vxge_vpath *vpath,
- struct macInfo *mac);
-
-void vxge_free_mac_add_list(struct vxge_vpath *vpath);
-
-enum vxge_hw_status vxge_restore_vpath_mac_addr(struct vxge_vpath *vpath);
-
-enum vxge_hw_status vxge_restore_vpath_vid_table(struct vxge_vpath *vpath);
-
-int do_vxge_close(struct net_device *dev, int do_io);
-extern void initialize_ethtool_ops(struct net_device *ndev);
+extern void vxge_initialize_ethtool_ops(struct net_device *ndev);
/**
* #define VXGE_DEBUG_INIT: debug for initialization functions
* #define VXGE_DEBUG_TX : debug transmit related functions
diff --git a/drivers/net/vxge/vxge-traffic.c b/drivers/net/vxge/vxge-traffic.c
index cedf08f99cb3..4bdb611a6842 100644
--- a/drivers/net/vxge/vxge-traffic.c
+++ b/drivers/net/vxge/vxge-traffic.c
@@ -17,6 +17,13 @@
#include "vxge-config.h"
#include "vxge-main.h"
+static enum vxge_hw_status
+__vxge_hw_device_handle_error(struct __vxge_hw_device *hldev,
+ u32 vp_id, enum vxge_hw_event type);
+static enum vxge_hw_status
+__vxge_hw_vpath_alarm_process(struct __vxge_hw_virtualpath *vpath,
+ u32 skip_alarms);
+
/*
* vxge_hw_vpath_intr_enable - Enable vpath interrupts.
* @vp: Virtual Path handle.
@@ -513,7 +520,7 @@ exit:
* Link up indication handler. The function is invoked by HW when
* Titan indicates that the link is up for programmable amount of time.
*/
-enum vxge_hw_status
+static enum vxge_hw_status
__vxge_hw_device_handle_link_up_ind(struct __vxge_hw_device *hldev)
{
/*
@@ -538,7 +545,7 @@ exit:
* Link down indication handler. The function is invoked by HW when
* Titan indicates that the link is down.
*/
-enum vxge_hw_status
+static enum vxge_hw_status
__vxge_hw_device_handle_link_down_ind(struct __vxge_hw_device *hldev)
{
/*
@@ -564,7 +571,7 @@ exit:
*
* Handle error.
*/
-enum vxge_hw_status
+static enum vxge_hw_status
__vxge_hw_device_handle_error(
struct __vxge_hw_device *hldev,
u32 vp_id,
@@ -646,7 +653,7 @@ void vxge_hw_device_clear_tx_rx(struct __vxge_hw_device *hldev)
* it swaps the reserve and free arrays.
*
*/
-enum vxge_hw_status
+static enum vxge_hw_status
vxge_hw_channel_dtr_alloc(struct __vxge_hw_channel *channel, void **dtrh)
{
void **tmp_arr;
@@ -692,7 +699,8 @@ _alloc_after_swap:
* Posts a dtr to work array.
*
*/
-void vxge_hw_channel_dtr_post(struct __vxge_hw_channel *channel, void *dtrh)
+static void vxge_hw_channel_dtr_post(struct __vxge_hw_channel *channel,
+ void *dtrh)
{
vxge_assert(channel->work_arr[channel->post_index] == NULL);
@@ -1658,37 +1666,6 @@ exit:
}
/**
- * vxge_hw_vpath_vid_get_next - Get the next vid entry for this vpath
- * from vlan id table.
- * @vp: Vpath handle.
- * @vid: Buffer to return vlan id
- *
- * Returns the next vlan id in the list for this vpath.
- * see also: vxge_hw_vpath_vid_get
- *
- */
-enum vxge_hw_status
-vxge_hw_vpath_vid_get_next(struct __vxge_hw_vpath_handle *vp, u64 *vid)
-{
- u64 data;
- enum vxge_hw_status status = VXGE_HW_OK;
-
- if (vp == NULL) {
- status = VXGE_HW_ERR_INVALID_HANDLE;
- goto exit;
- }
-
- status = __vxge_hw_vpath_rts_table_get(vp,
- VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_LIST_NEXT_ENTRY,
- VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_VID,
- 0, vid, &data);
-
- *vid = VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_VLAN_ID(*vid);
-exit:
- return status;
-}
-
-/**
* vxge_hw_vpath_vid_delete - Delete the vlan id entry for this vpath
* to vlan id table.
* @vp: Vpath handle.
@@ -1898,9 +1875,9 @@ exit:
* Process vpath alarms.
*
*/
-enum vxge_hw_status __vxge_hw_vpath_alarm_process(
- struct __vxge_hw_virtualpath *vpath,
- u32 skip_alarms)
+static enum vxge_hw_status
+__vxge_hw_vpath_alarm_process(struct __vxge_hw_virtualpath *vpath,
+ u32 skip_alarms)
{
u64 val64;
u64 alarm_status;
@@ -2265,36 +2242,6 @@ vxge_hw_vpath_msix_mask(struct __vxge_hw_vpath_handle *vp, int msix_id)
}
/**
- * vxge_hw_vpath_msix_clear - Clear MSIX Vector.
- * @vp: Virtual Path handle.
- * @msix_id: MSI ID
- *
- * The function clears the msix interrupt for the given msix_id
- *
- * Returns: 0,
- * Otherwise, VXGE_HW_ERR_WRONG_IRQ if the msix index is out of range
- * status.
- * See also:
- */
-void
-vxge_hw_vpath_msix_clear(struct __vxge_hw_vpath_handle *vp, int msix_id)
-{
- struct __vxge_hw_device *hldev = vp->vpath->hldev;
- if (hldev->config.intr_mode ==
- VXGE_HW_INTR_MODE_MSIX_ONE_SHOT) {
- __vxge_hw_pio_mem_write32_upper(
- (u32)vxge_bVALn(vxge_mBIT(msix_id >> 2), 0, 32),
- &hldev->common_reg->
- clr_msix_one_shot_vec[msix_id%4]);
- } else {
- __vxge_hw_pio_mem_write32_upper(
- (u32)vxge_bVALn(vxge_mBIT(msix_id >> 2), 0, 32),
- &hldev->common_reg->
- clear_msix_mask_vect[msix_id%4]);
- }
-}
-
-/**
* vxge_hw_vpath_msix_unmask - Unmask the MSIX Vector.
* @vp: Virtual Path handle.
* @msix_id: MSI ID
@@ -2316,22 +2263,6 @@ vxge_hw_vpath_msix_unmask(struct __vxge_hw_vpath_handle *vp, int msix_id)
}
/**
- * vxge_hw_vpath_msix_mask_all - Mask all MSIX vectors for the vpath.
- * @vp: Virtual Path handle.
- *
- * The function masks all msix interrupt for the given vpath
- *
- */
-void
-vxge_hw_vpath_msix_mask_all(struct __vxge_hw_vpath_handle *vp)
-{
-
- __vxge_hw_pio_mem_write32_upper(
- (u32)vxge_bVALn(vxge_mBIT(vp->vpath->vp_id), 0, 32),
- &vp->vpath->hldev->common_reg->set_msix_mask_all_vect);
-}
-
-/**
* vxge_hw_vpath_inta_mask_tx_rx - Mask Tx and Rx interrupts.
* @vp: Virtual Path handle.
*
diff --git a/drivers/net/vxge/vxge-traffic.h b/drivers/net/vxge/vxge-traffic.h
index 6fa07d13798e..9890d4d596d0 100644
--- a/drivers/net/vxge/vxge-traffic.h
+++ b/drivers/net/vxge/vxge-traffic.h
@@ -1749,14 +1749,6 @@ vxge_hw_mrpcim_stats_access(
u64 *stat);
enum vxge_hw_status
-vxge_hw_device_xmac_aggr_stats_get(struct __vxge_hw_device *devh, u32 port,
- struct vxge_hw_xmac_aggr_stats *aggr_stats);
-
-enum vxge_hw_status
-vxge_hw_device_xmac_port_stats_get(struct __vxge_hw_device *devh, u32 port,
- struct vxge_hw_xmac_port_stats *port_stats);
-
-enum vxge_hw_status
vxge_hw_device_xmac_stats_get(struct __vxge_hw_device *devh,
struct vxge_hw_xmac_stats *xmac_stats);
@@ -2117,49 +2109,10 @@ struct __vxge_hw_ring_rxd_priv {
#endif
};
-/* ========================= RING PRIVATE API ============================= */
-u64
-__vxge_hw_ring_first_block_address_get(
- struct __vxge_hw_ring *ringh);
-
-enum vxge_hw_status
-__vxge_hw_ring_create(
- struct __vxge_hw_vpath_handle *vpath_handle,
- struct vxge_hw_ring_attr *attr);
-
-enum vxge_hw_status
-__vxge_hw_ring_abort(
- struct __vxge_hw_ring *ringh);
-
-enum vxge_hw_status
-__vxge_hw_ring_reset(
- struct __vxge_hw_ring *ringh);
-
-enum vxge_hw_status
-__vxge_hw_ring_delete(
- struct __vxge_hw_vpath_handle *vpath_handle);
-
/* ========================= FIFO PRIVATE API ============================= */
struct vxge_hw_fifo_attr;
-enum vxge_hw_status
-__vxge_hw_fifo_create(
- struct __vxge_hw_vpath_handle *vpath_handle,
- struct vxge_hw_fifo_attr *attr);
-
-enum vxge_hw_status
-__vxge_hw_fifo_abort(
- struct __vxge_hw_fifo *fifoh);
-
-enum vxge_hw_status
-__vxge_hw_fifo_reset(
- struct __vxge_hw_fifo *ringh);
-
-enum vxge_hw_status
-__vxge_hw_fifo_delete(
- struct __vxge_hw_vpath_handle *vpath_handle);
-
struct vxge_hw_mempool_cbs {
void (*item_func_alloc)(
struct vxge_hw_mempool *mempoolh,
@@ -2169,10 +2122,6 @@ struct vxge_hw_mempool_cbs {
u32 is_last);
};
-void
-__vxge_hw_mempool_destroy(
- struct vxge_hw_mempool *mempool);
-
#define VXGE_HW_VIRTUAL_PATH_HANDLE(vpath) \
((struct __vxge_hw_vpath_handle *)(vpath)->vpath_handles.next)
@@ -2195,61 +2144,10 @@ __vxge_hw_vpath_rts_table_set(
u64 data2);
enum vxge_hw_status
-__vxge_hw_vpath_reset(
- struct __vxge_hw_device *devh,
- u32 vp_id);
-
-enum vxge_hw_status
-__vxge_hw_vpath_sw_reset(
- struct __vxge_hw_device *devh,
- u32 vp_id);
-
-enum vxge_hw_status
__vxge_hw_vpath_enable(
struct __vxge_hw_device *devh,
u32 vp_id);
-void
-__vxge_hw_vpath_prc_configure(
- struct __vxge_hw_device *devh,
- u32 vp_id);
-
-enum vxge_hw_status
-__vxge_hw_vpath_kdfc_configure(
- struct __vxge_hw_device *devh,
- u32 vp_id);
-
-enum vxge_hw_status
-__vxge_hw_vpath_mac_configure(
- struct __vxge_hw_device *devh,
- u32 vp_id);
-
-enum vxge_hw_status
-__vxge_hw_vpath_tim_configure(
- struct __vxge_hw_device *devh,
- u32 vp_id);
-
-enum vxge_hw_status
-__vxge_hw_vpath_initialize(
- struct __vxge_hw_device *devh,
- u32 vp_id);
-
-enum vxge_hw_status
-__vxge_hw_vp_initialize(
- struct __vxge_hw_device *devh,
- u32 vp_id,
- struct vxge_hw_vp_config *config);
-
-void
-__vxge_hw_vp_terminate(
- struct __vxge_hw_device *devh,
- u32 vp_id);
-
-enum vxge_hw_status
-__vxge_hw_vpath_alarm_process(
- struct __vxge_hw_virtualpath *vpath,
- u32 skip_alarms);
-
void vxge_hw_device_intr_enable(
struct __vxge_hw_device *devh);
@@ -2321,11 +2219,6 @@ vxge_hw_vpath_vid_get(
u64 *vid);
enum vxge_hw_status
-vxge_hw_vpath_vid_get_next(
- struct __vxge_hw_vpath_handle *vpath_handle,
- u64 *vid);
-
-enum vxge_hw_status
vxge_hw_vpath_vid_delete(
struct __vxge_hw_vpath_handle *vpath_handle,
u64 vid);
@@ -2387,16 +2280,9 @@ vxge_hw_vpath_msix_mask(struct __vxge_hw_vpath_handle *vpath_handle,
void vxge_hw_device_flush_io(struct __vxge_hw_device *devh);
void
-vxge_hw_vpath_msix_clear(struct __vxge_hw_vpath_handle *vpath_handle,
- int msix_id);
-
-void
vxge_hw_vpath_msix_unmask(struct __vxge_hw_vpath_handle *vpath_handle,
int msix_id);
-void
-vxge_hw_vpath_msix_mask_all(struct __vxge_hw_vpath_handle *vpath_handle);
-
enum vxge_hw_status vxge_hw_vpath_intr_enable(
struct __vxge_hw_vpath_handle *vpath_handle);
@@ -2415,12 +2301,6 @@ vxge_hw_channel_msix_mask(struct __vxge_hw_channel *channelh, int msix_id);
void
vxge_hw_channel_msix_unmask(struct __vxge_hw_channel *channelh, int msix_id);
-enum vxge_hw_status
-vxge_hw_channel_dtr_alloc(struct __vxge_hw_channel *channel, void **dtrh);
-
-void
-vxge_hw_channel_dtr_post(struct __vxge_hw_channel *channel, void *dtrh);
-
void
vxge_hw_channel_dtr_try_complete(struct __vxge_hw_channel *channel,
void **dtrh);
@@ -2436,18 +2316,4 @@ vxge_hw_channel_dtr_count(struct __vxge_hw_channel *channel);
void
vxge_hw_vpath_tti_ci_set(struct __vxge_hw_device *hldev, u32 vp_id);
-/* ========================== PRIVATE API ================================= */
-
-enum vxge_hw_status
-__vxge_hw_device_handle_link_up_ind(struct __vxge_hw_device *hldev);
-
-enum vxge_hw_status
-__vxge_hw_device_handle_link_down_ind(struct __vxge_hw_device *hldev);
-
-enum vxge_hw_status
-__vxge_hw_device_handle_error(
- struct __vxge_hw_device *hldev,
- u32 vp_id,
- enum vxge_hw_event type);
-
#endif
diff --git a/drivers/net/wan/x25_asy.c b/drivers/net/wan/x25_asy.c
index d81ad8397885..24297b274cd4 100644
--- a/drivers/net/wan/x25_asy.c
+++ b/drivers/net/wan/x25_asy.c
@@ -498,7 +498,6 @@ norbuff:
static int x25_asy_close(struct net_device *dev)
{
struct x25_asy *sl = netdev_priv(dev);
- int err;
spin_lock(&sl->lock);
if (sl->tty)
@@ -507,10 +506,6 @@ static int x25_asy_close(struct net_device *dev)
netif_stop_queue(dev);
sl->rcount = 0;
sl->xleft = 0;
- err = lapb_unregister(dev);
- if (err != LAPB_OK)
- printk(KERN_ERR "x25_asy_close: lapb_unregister error -%d\n",
- err);
spin_unlock(&sl->lock);
return 0;
}
@@ -582,7 +577,7 @@ static int x25_asy_open_tty(struct tty_struct *tty)
if (err)
return err;
/* Done. We have linked the TTY line to a channel. */
- return sl->dev->base_addr;
+ return 0;
}
@@ -595,6 +590,7 @@ static int x25_asy_open_tty(struct tty_struct *tty)
static void x25_asy_close_tty(struct tty_struct *tty)
{
struct x25_asy *sl = tty->disc_data;
+ int err;
/* First make sure we're connected. */
if (!sl || sl->magic != X25_ASY_MAGIC)
@@ -605,6 +601,11 @@ static void x25_asy_close_tty(struct tty_struct *tty)
dev_close(sl->dev);
rtnl_unlock();
+ err = lapb_unregister(sl->dev);
+ if (err != LAPB_OK)
+ printk(KERN_ERR "x25_asy_close: lapb_unregister error -%d\n",
+ err);
+
tty->disc_data = NULL;
sl->tty = NULL;
x25_asy_free(sl);
diff --git a/drivers/net/wireless/ath/ath5k/attach.c b/drivers/net/wireless/ath/ath5k/attach.c
index cd0b14a0a93a..fbe8aca975d8 100644
--- a/drivers/net/wireless/ath/ath5k/attach.c
+++ b/drivers/net/wireless/ath/ath5k/attach.c
@@ -139,12 +139,12 @@ int ath5k_hw_attach(struct ath5k_softc *sc)
/* Fill the ath5k_hw struct with the needed functions */
ret = ath5k_hw_init_desc_functions(ah);
if (ret)
- goto err_free;
+ goto err;
/* Bring device out of sleep and reset its units */
ret = ath5k_hw_nic_wakeup(ah, 0, true);
if (ret)
- goto err_free;
+ goto err;
/* Get MAC, PHY and RADIO revisions */
ah->ah_mac_srev = srev;
@@ -234,7 +234,7 @@ int ath5k_hw_attach(struct ath5k_softc *sc)
} else {
ATH5K_ERR(sc, "Couldn't identify radio revision.\n");
ret = -ENODEV;
- goto err_free;
+ goto err;
}
}
@@ -244,7 +244,7 @@ int ath5k_hw_attach(struct ath5k_softc *sc)
(srev < AR5K_SREV_AR2425)) {
ATH5K_ERR(sc, "Device not yet supported.\n");
ret = -ENODEV;
- goto err_free;
+ goto err;
}
/*
@@ -252,7 +252,7 @@ int ath5k_hw_attach(struct ath5k_softc *sc)
*/
ret = ath5k_hw_post(ah);
if (ret)
- goto err_free;
+ goto err;
/* Enable pci core retry fix on Hainan (5213A) and later chips */
if (srev >= AR5K_SREV_AR5213A)
@@ -265,7 +265,7 @@ int ath5k_hw_attach(struct ath5k_softc *sc)
ret = ath5k_eeprom_init(ah);
if (ret) {
ATH5K_ERR(sc, "unable to init EEPROM\n");
- goto err_free;
+ goto err;
}
ee = &ah->ah_capabilities.cap_eeprom;
@@ -307,7 +307,7 @@ int ath5k_hw_attach(struct ath5k_softc *sc)
if (ret) {
ATH5K_ERR(sc, "unable to get device capabilities: 0x%04x\n",
sc->pdev->device);
- goto err_free;
+ goto err;
}
/* Crypto settings */
@@ -341,8 +341,7 @@ int ath5k_hw_attach(struct ath5k_softc *sc)
ath5k_hw_set_ledstate(ah, AR5K_LED_INIT);
return 0;
-err_free:
- kfree(ah);
+err:
return ret;
}
diff --git a/drivers/net/wireless/ath/ath5k/base.c b/drivers/net/wireless/ath/ath5k/base.c
index f1ae75d35d5d..8251946842e6 100644
--- a/drivers/net/wireless/ath/ath5k/base.c
+++ b/drivers/net/wireless/ath/ath5k/base.c
@@ -3580,6 +3580,7 @@ ath5k_pci_probe(struct pci_dev *pdev,
common->ah = sc->ah;
common->hw = hw;
common->cachelsz = csz << 2; /* convert to bytes */
+ spin_lock_init(&common->cc_lock);
/* Initialize device */
ret = ath5k_hw_attach(sc);
diff --git a/drivers/net/wireless/ath/ath9k/ar9002_hw.c b/drivers/net/wireless/ath/ath9k/ar9002_hw.c
index a0471f2e1c7a..48261b7252d0 100644
--- a/drivers/net/wireless/ath/ath9k/ar9002_hw.c
+++ b/drivers/net/wireless/ath/ath9k/ar9002_hw.c
@@ -410,6 +410,9 @@ static void ar9002_hw_configpcipowersave(struct ath_hw *ah,
val &= ~(AR_WA_BIT6 | AR_WA_BIT7);
}
+ if (AR_SREV_9280(ah))
+ val |= AR_WA_BIT22;
+
if (AR_SREV_9285E_20(ah))
val |= AR_WA_BIT23;
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_2p2_initvals.h b/drivers/net/wireless/ath/ath9k/ar9003_2p2_initvals.h
index ec98ab50748a..a14a5e43cf56 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_2p2_initvals.h
+++ b/drivers/net/wireless/ath/ath9k/ar9003_2p2_initvals.h
@@ -34,6 +34,10 @@ static const u32 ar9300_2p2_radio_postamble[][5] = {
static const u32 ar9300Modes_lowest_ob_db_tx_gain_table_2p2[][5] = {
/* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
+ {0x0000a2dc, 0x0380c7fc, 0x0380c7fc, 0x00637800, 0x00637800},
+ {0x0000a2e0, 0x0000f800, 0x0000f800, 0x03838000, 0x03838000},
+ {0x0000a2e4, 0x03ff0000, 0x03ff0000, 0x03fc0000, 0x03fc0000},
+ {0x0000a2e8, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
{0x0000a410, 0x000050d9, 0x000050d9, 0x000050d9, 0x000050d9},
{0x0000a500, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
{0x0000a504, 0x06000003, 0x06000003, 0x04000002, 0x04000002},
@@ -99,6 +103,30 @@ static const u32 ar9300Modes_lowest_ob_db_tx_gain_table_2p2[][5] = {
{0x0000a5f4, 0x7782b08c, 0x7782b08c, 0x5d801eec, 0x5d801eec},
{0x0000a5f8, 0x7782b08c, 0x7782b08c, 0x5d801eec, 0x5d801eec},
{0x0000a5fc, 0x7782b08c, 0x7782b08c, 0x5d801eec, 0x5d801eec},
+ {0x0000a600, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a604, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a608, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a60c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a610, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a614, 0x01404000, 0x01404000, 0x01404000, 0x01404000},
+ {0x0000a618, 0x01404501, 0x01404501, 0x01404501, 0x01404501},
+ {0x0000a61c, 0x02008802, 0x02008802, 0x02008501, 0x02008501},
+ {0x0000a620, 0x0300cc03, 0x0300cc03, 0x0280ca03, 0x0280ca03},
+ {0x0000a624, 0x0300cc03, 0x0300cc03, 0x03010c04, 0x03010c04},
+ {0x0000a628, 0x0300cc03, 0x0300cc03, 0x04014c04, 0x04014c04},
+ {0x0000a62c, 0x03810c03, 0x03810c03, 0x04015005, 0x04015005},
+ {0x0000a630, 0x03810e04, 0x03810e04, 0x04015005, 0x04015005},
+ {0x0000a634, 0x03810e04, 0x03810e04, 0x04015005, 0x04015005},
+ {0x0000a638, 0x03810e04, 0x03810e04, 0x04015005, 0x04015005},
+ {0x0000a63c, 0x03810e04, 0x03810e04, 0x04015005, 0x04015005},
+ {0x0000b2dc, 0x0380c7fc, 0x0380c7fc, 0x00637800, 0x00637800},
+ {0x0000b2e0, 0x0000f800, 0x0000f800, 0x03838000, 0x03838000},
+ {0x0000b2e4, 0x03ff0000, 0x03ff0000, 0x03fc0000, 0x03fc0000},
+ {0x0000b2e8, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000c2dc, 0x0380c7fc, 0x0380c7fc, 0x00637800, 0x00637800},
+ {0x0000c2e0, 0x0000f800, 0x0000f800, 0x03838000, 0x03838000},
+ {0x0000c2e4, 0x03ff0000, 0x03ff0000, 0x03fc0000, 0x03fc0000},
+ {0x0000c2e8, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
{0x00016044, 0x012492d4, 0x012492d4, 0x012492d4, 0x012492d4},
{0x00016048, 0x62480001, 0x62480001, 0x62480001, 0x62480001},
{0x00016068, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c},
@@ -118,7 +146,7 @@ static const u32 ar9300Modes_fast_clock_2p2[][3] = {
{0x00008014, 0x044c044c, 0x08980898},
{0x0000801c, 0x148ec02b, 0x148ec057},
{0x00008318, 0x000044c0, 0x00008980},
- {0x00009e00, 0x03721821, 0x03721821},
+ {0x00009e00, 0x0372131c, 0x0372131c},
{0x0000a230, 0x0000000b, 0x00000016},
{0x0000a254, 0x00000898, 0x00001130},
};
@@ -595,15 +623,16 @@ static const u32 ar9300_2p2_baseband_postamble[][5] = {
{0x0000982c, 0x05eea6d4, 0x05eea6d4, 0x05eea6d4, 0x05eea6d4},
{0x00009830, 0x0000059c, 0x0000059c, 0x0000119c, 0x0000119c},
{0x00009c00, 0x000000c4, 0x000000c4, 0x000000c4, 0x000000c4},
- {0x00009e00, 0x0372161e, 0x0372161e, 0x037216a0, 0x037216a0},
- {0x00009e04, 0x00802020, 0x00802020, 0x00802020, 0x00802020},
+ {0x00009e00, 0x0372111a, 0x0372111a, 0x037216a0, 0x037216a0},
+ {0x00009e04, 0x001c2020, 0x001c2020, 0x001c2020, 0x001c2020},
{0x00009e0c, 0x6c4000e2, 0x6d4000e2, 0x6d4000e2, 0x6c4000e2},
{0x00009e10, 0x7ec88d2e, 0x7ec88d2e, 0x7ec84d2e, 0x7ec84d2e},
- {0x00009e14, 0x31395d5e, 0x3139605e, 0x3139605e, 0x31395d5e},
+ {0x00009e14, 0x37b95d5e, 0x37b9605e, 0x3379605e, 0x33795d5e},
{0x00009e18, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
{0x00009e1c, 0x0001cf9c, 0x0001cf9c, 0x00021f9c, 0x00021f9c},
{0x00009e20, 0x000003b5, 0x000003b5, 0x000003ce, 0x000003ce},
{0x00009e2c, 0x0000001c, 0x0000001c, 0x00000021, 0x00000021},
+ {0x00009e3c, 0xcf946220, 0xcf946220, 0xcf946222, 0xcf946222},
{0x00009e44, 0x02321e27, 0x02321e27, 0x02291e27, 0x02291e27},
{0x00009e48, 0x5030201a, 0x5030201a, 0x50302012, 0x50302012},
{0x00009fc8, 0x0003f000, 0x0003f000, 0x0001a000, 0x0001a000},
@@ -624,16 +653,16 @@ static const u32 ar9300_2p2_baseband_postamble[][5] = {
{0x0000a28c, 0x00022222, 0x00022222, 0x00022222, 0x00022222},
{0x0000a2c4, 0x00158d18, 0x00158d18, 0x00158d18, 0x00158d18},
{0x0000a2d0, 0x00071981, 0x00071981, 0x00071981, 0x00071982},
- {0x0000a2d8, 0xf999a83a, 0xf999a83a, 0xf999a83a, 0xf999a83a},
+ {0x0000a2d8, 0x7999a83a, 0x7999a83a, 0x7999a83a, 0x7999a83a},
{0x0000a358, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
{0x0000a830, 0x0000019c, 0x0000019c, 0x0000019c, 0x0000019c},
- {0x0000ae04, 0x00800000, 0x00800000, 0x00800000, 0x00800000},
+ {0x0000ae04, 0x001c0000, 0x001c0000, 0x001c0000, 0x001c0000},
{0x0000ae18, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
{0x0000ae1c, 0x0000019c, 0x0000019c, 0x0000019c, 0x0000019c},
{0x0000ae20, 0x000001b5, 0x000001b5, 0x000001ce, 0x000001ce},
{0x0000b284, 0x00000000, 0x00000000, 0x00000150, 0x00000150},
{0x0000b830, 0x0000019c, 0x0000019c, 0x0000019c, 0x0000019c},
- {0x0000be04, 0x00800000, 0x00800000, 0x00800000, 0x00800000},
+ {0x0000be04, 0x001c0000, 0x001c0000, 0x001c0000, 0x001c0000},
{0x0000be18, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
{0x0000be1c, 0x0000019c, 0x0000019c, 0x0000019c, 0x0000019c},
{0x0000be20, 0x000001b5, 0x000001b5, 0x000001ce, 0x000001ce},
@@ -649,13 +678,13 @@ static const u32 ar9300_2p2_baseband_core[][2] = {
{0x00009814, 0x9280c00a},
{0x00009818, 0x00000000},
{0x0000981c, 0x00020028},
- {0x00009834, 0x5f3ca3de},
+ {0x00009834, 0x6400a290},
{0x00009838, 0x0108ecff},
{0x0000983c, 0x14750600},
{0x00009880, 0x201fff00},
{0x00009884, 0x00001042},
{0x000098a4, 0x00200400},
- {0x000098b0, 0x52440bbe},
+ {0x000098b0, 0x32840bbe},
{0x000098d0, 0x004b6a8e},
{0x000098d4, 0x00000820},
{0x000098dc, 0x00000000},
@@ -681,7 +710,6 @@ static const u32 ar9300_2p2_baseband_core[][2] = {
{0x00009e30, 0x06336f77},
{0x00009e34, 0x6af6532f},
{0x00009e38, 0x0cc80c00},
- {0x00009e3c, 0xcf946222},
{0x00009e40, 0x0d261820},
{0x00009e4c, 0x00001004},
{0x00009e50, 0x00ff03f1},
@@ -694,7 +722,7 @@ static const u32 ar9300_2p2_baseband_core[][2] = {
{0x0000a220, 0x00000000},
{0x0000a224, 0x00000000},
{0x0000a228, 0x10002310},
- {0x0000a22c, 0x01036a1e},
+ {0x0000a22c, 0x01036a27},
{0x0000a23c, 0x00000000},
{0x0000a244, 0x0c000000},
{0x0000a2a0, 0x00000001},
@@ -702,10 +730,6 @@ static const u32 ar9300_2p2_baseband_core[][2] = {
{0x0000a2c8, 0x00000000},
{0x0000a2cc, 0x18c43433},
{0x0000a2d4, 0x00000000},
- {0x0000a2dc, 0x00000000},
- {0x0000a2e0, 0x00000000},
- {0x0000a2e4, 0x00000000},
- {0x0000a2e8, 0x00000000},
{0x0000a2ec, 0x00000000},
{0x0000a2f0, 0x00000000},
{0x0000a2f4, 0x00000000},
@@ -753,33 +777,17 @@ static const u32 ar9300_2p2_baseband_core[][2] = {
{0x0000a430, 0x1ce739ce},
{0x0000a434, 0x00000000},
{0x0000a438, 0x00001801},
- {0x0000a43c, 0x00000000},
+ {0x0000a43c, 0x00100000},
{0x0000a440, 0x00000000},
{0x0000a444, 0x00000000},
{0x0000a448, 0x06000080},
{0x0000a44c, 0x00000001},
{0x0000a450, 0x00010000},
{0x0000a458, 0x00000000},
- {0x0000a600, 0x00000000},
- {0x0000a604, 0x00000000},
- {0x0000a608, 0x00000000},
- {0x0000a60c, 0x00000000},
- {0x0000a610, 0x00000000},
- {0x0000a614, 0x00000000},
- {0x0000a618, 0x00000000},
- {0x0000a61c, 0x00000000},
- {0x0000a620, 0x00000000},
- {0x0000a624, 0x00000000},
- {0x0000a628, 0x00000000},
- {0x0000a62c, 0x00000000},
- {0x0000a630, 0x00000000},
- {0x0000a634, 0x00000000},
- {0x0000a638, 0x00000000},
- {0x0000a63c, 0x00000000},
{0x0000a640, 0x00000000},
{0x0000a644, 0x3fad9d74},
{0x0000a648, 0x0048060a},
- {0x0000a64c, 0x00000637},
+ {0x0000a64c, 0x00003c37},
{0x0000a670, 0x03020100},
{0x0000a674, 0x09080504},
{0x0000a678, 0x0d0c0b0a},
@@ -802,10 +810,6 @@ static const u32 ar9300_2p2_baseband_core[][2] = {
{0x0000a8f4, 0x00000000},
{0x0000b2d0, 0x00000080},
{0x0000b2d4, 0x00000000},
- {0x0000b2dc, 0x00000000},
- {0x0000b2e0, 0x00000000},
- {0x0000b2e4, 0x00000000},
- {0x0000b2e8, 0x00000000},
{0x0000b2ec, 0x00000000},
{0x0000b2f0, 0x00000000},
{0x0000b2f4, 0x00000000},
@@ -820,10 +824,6 @@ static const u32 ar9300_2p2_baseband_core[][2] = {
{0x0000b8f4, 0x00000000},
{0x0000c2d0, 0x00000080},
{0x0000c2d4, 0x00000000},
- {0x0000c2dc, 0x00000000},
- {0x0000c2e0, 0x00000000},
- {0x0000c2e4, 0x00000000},
- {0x0000c2e8, 0x00000000},
{0x0000c2ec, 0x00000000},
{0x0000c2f0, 0x00000000},
{0x0000c2f4, 0x00000000},
@@ -835,6 +835,10 @@ static const u32 ar9300_2p2_baseband_core[][2] = {
static const u32 ar9300Modes_high_power_tx_gain_table_2p2[][5] = {
/* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
+ {0x0000a2dc, 0x0380c7fc, 0x0380c7fc, 0x00637800, 0x00637800},
+ {0x0000a2e0, 0x0000f800, 0x0000f800, 0x03838000, 0x03838000},
+ {0x0000a2e4, 0x03ff0000, 0x03ff0000, 0x03fc0000, 0x03fc0000},
+ {0x0000a2e8, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
{0x0000a410, 0x000050d8, 0x000050d8, 0x000050d9, 0x000050d9},
{0x0000a500, 0x00002220, 0x00002220, 0x00000000, 0x00000000},
{0x0000a504, 0x04002222, 0x04002222, 0x04000002, 0x04000002},
@@ -855,7 +859,7 @@ static const u32 ar9300Modes_high_power_tx_gain_table_2p2[][5] = {
{0x0000a540, 0x49005e72, 0x49005e72, 0x38001660, 0x38001660},
{0x0000a544, 0x4e005eb2, 0x4e005eb2, 0x3b001861, 0x3b001861},
{0x0000a548, 0x53005f12, 0x53005f12, 0x3e001a81, 0x3e001a81},
- {0x0000a54c, 0x59025eb5, 0x59025eb5, 0x42001a83, 0x42001a83},
+ {0x0000a54c, 0x59025eb2, 0x59025eb2, 0x42001a83, 0x42001a83},
{0x0000a550, 0x5e025f12, 0x5e025f12, 0x44001c84, 0x44001c84},
{0x0000a554, 0x61027f12, 0x61027f12, 0x48001ce3, 0x48001ce3},
{0x0000a558, 0x6702bf12, 0x6702bf12, 0x4c001ce5, 0x4c001ce5},
@@ -900,6 +904,30 @@ static const u32 ar9300Modes_high_power_tx_gain_table_2p2[][5] = {
{0x0000a5f4, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec},
{0x0000a5f8, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec},
{0x0000a5fc, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec},
+ {0x0000a600, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a604, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a608, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a60c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a610, 0x00804000, 0x00804000, 0x00000000, 0x00000000},
+ {0x0000a614, 0x00804201, 0x00804201, 0x01404000, 0x01404000},
+ {0x0000a618, 0x0280c802, 0x0280c802, 0x01404501, 0x01404501},
+ {0x0000a61c, 0x0280ca03, 0x0280ca03, 0x02008501, 0x02008501},
+ {0x0000a620, 0x04c15104, 0x04c15104, 0x0280ca03, 0x0280ca03},
+ {0x0000a624, 0x04c15305, 0x04c15305, 0x03010c04, 0x03010c04},
+ {0x0000a628, 0x04c15305, 0x04c15305, 0x04014c04, 0x04014c04},
+ {0x0000a62c, 0x04c15305, 0x04c15305, 0x04015005, 0x04015005},
+ {0x0000a630, 0x04c15305, 0x04c15305, 0x04015005, 0x04015005},
+ {0x0000a634, 0x04c15305, 0x04c15305, 0x04015005, 0x04015005},
+ {0x0000a638, 0x04c15305, 0x04c15305, 0x04015005, 0x04015005},
+ {0x0000a63c, 0x04c15305, 0x04c15305, 0x04015005, 0x04015005},
+ {0x0000b2dc, 0x0380c7fc, 0x0380c7fc, 0x00637800, 0x00637800},
+ {0x0000b2e0, 0x0000f800, 0x0000f800, 0x03838000, 0x03838000},
+ {0x0000b2e4, 0x03ff0000, 0x03ff0000, 0x03fc0000, 0x03fc0000},
+ {0x0000b2e8, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000c2dc, 0x0380c7fc, 0x0380c7fc, 0x00637800, 0x00637800},
+ {0x0000c2e0, 0x0000f800, 0x0000f800, 0x03838000, 0x03838000},
+ {0x0000c2e4, 0x03ff0000, 0x03ff0000, 0x03fc0000, 0x03fc0000},
+ {0x0000c2e8, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
{0x00016044, 0x056db2e6, 0x056db2e6, 0x056db2e6, 0x056db2e6},
{0x00016048, 0xae480001, 0xae480001, 0xae480001, 0xae480001},
{0x00016068, 0x6eb6db6c, 0x6eb6db6c, 0x6eb6db6c, 0x6eb6db6c},
@@ -913,6 +941,10 @@ static const u32 ar9300Modes_high_power_tx_gain_table_2p2[][5] = {
static const u32 ar9300Modes_high_ob_db_tx_gain_table_2p2[][5] = {
/* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
+ {0x0000a2dc, 0x01feee00, 0x01feee00, 0x00637800, 0x00637800},
+ {0x0000a2e0, 0x0000f000, 0x0000f000, 0x03838000, 0x03838000},
+ {0x0000a2e4, 0x01ff0000, 0x01ff0000, 0x03fc0000, 0x03fc0000},
+ {0x0000a2e8, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
{0x0000a410, 0x000050d8, 0x000050d8, 0x000050d9, 0x000050d9},
{0x0000a500, 0x00002220, 0x00002220, 0x00000000, 0x00000000},
{0x0000a504, 0x04002222, 0x04002222, 0x04000002, 0x04000002},
@@ -933,7 +965,7 @@ static const u32 ar9300Modes_high_ob_db_tx_gain_table_2p2[][5] = {
{0x0000a540, 0x49005e72, 0x49005e72, 0x38001660, 0x38001660},
{0x0000a544, 0x4e005eb2, 0x4e005eb2, 0x3b001861, 0x3b001861},
{0x0000a548, 0x53005f12, 0x53005f12, 0x3e001a81, 0x3e001a81},
- {0x0000a54c, 0x59025eb5, 0x59025eb5, 0x42001a83, 0x42001a83},
+ {0x0000a54c, 0x59025eb2, 0x59025eb2, 0x42001a83, 0x42001a83},
{0x0000a550, 0x5e025f12, 0x5e025f12, 0x44001c84, 0x44001c84},
{0x0000a554, 0x61027f12, 0x61027f12, 0x48001ce3, 0x48001ce3},
{0x0000a558, 0x6702bf12, 0x6702bf12, 0x4c001ce5, 0x4c001ce5},
@@ -978,6 +1010,30 @@ static const u32 ar9300Modes_high_ob_db_tx_gain_table_2p2[][5] = {
{0x0000a5f4, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec},
{0x0000a5f8, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec},
{0x0000a5fc, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec},
+ {0x0000a600, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a604, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a608, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a60c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a610, 0x00804000, 0x00804000, 0x00000000, 0x00000000},
+ {0x0000a614, 0x00804201, 0x00804201, 0x01404000, 0x01404000},
+ {0x0000a618, 0x0280c802, 0x0280c802, 0x01404501, 0x01404501},
+ {0x0000a61c, 0x0280ca03, 0x0280ca03, 0x02008501, 0x02008501},
+ {0x0000a620, 0x04c15104, 0x04c15104, 0x0280ca03, 0x0280ca03},
+ {0x0000a624, 0x04c15305, 0x04c15305, 0x03010c04, 0x03010c04},
+ {0x0000a628, 0x04c15305, 0x04c15305, 0x04014c04, 0x04014c04},
+ {0x0000a62c, 0x04c15305, 0x04c15305, 0x04015005, 0x04015005},
+ {0x0000a630, 0x04c15305, 0x04c15305, 0x04015005, 0x04015005},
+ {0x0000a634, 0x04c15305, 0x04c15305, 0x04015005, 0x04015005},
+ {0x0000a638, 0x04c15305, 0x04c15305, 0x04015005, 0x04015005},
+ {0x0000a63c, 0x04c15305, 0x04c15305, 0x04015005, 0x04015005},
+ {0x0000b2dc, 0x01feee00, 0x01feee00, 0x00637800, 0x00637800},
+ {0x0000b2e0, 0x0000f000, 0x0000f000, 0x03838000, 0x03838000},
+ {0x0000b2e4, 0x01ff0000, 0x01ff0000, 0x03fc0000, 0x03fc0000},
+ {0x0000b2e8, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000c2dc, 0x01feee00, 0x01feee00, 0x00637800, 0x00637800},
+ {0x0000c2e0, 0x0000f000, 0x0000f000, 0x03838000, 0x03838000},
+ {0x0000c2e4, 0x01ff0000, 0x01ff0000, 0x03fc0000, 0x03fc0000},
+ {0x0000c2e8, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
{0x00016044, 0x056db2e4, 0x056db2e4, 0x056db2e4, 0x056db2e4},
{0x00016048, 0x8e480001, 0x8e480001, 0x8e480001, 0x8e480001},
{0x00016068, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c},
@@ -1151,14 +1207,14 @@ static const u32 ar9300Common_rx_gain_table_2p2[][2] = {
{0x0000b074, 0x00000000},
{0x0000b078, 0x00000000},
{0x0000b07c, 0x00000000},
- {0x0000b080, 0x32323232},
- {0x0000b084, 0x2f2f3232},
- {0x0000b088, 0x23282a2d},
- {0x0000b08c, 0x1c1e2123},
- {0x0000b090, 0x14171919},
- {0x0000b094, 0x0e0e1214},
- {0x0000b098, 0x03050707},
- {0x0000b09c, 0x00030303},
+ {0x0000b080, 0x2a2d2f32},
+ {0x0000b084, 0x21232328},
+ {0x0000b088, 0x19191c1e},
+ {0x0000b08c, 0x12141417},
+ {0x0000b090, 0x07070e0e},
+ {0x0000b094, 0x03030305},
+ {0x0000b098, 0x00000003},
+ {0x0000b09c, 0x00000000},
{0x0000b0a0, 0x00000000},
{0x0000b0a4, 0x00000000},
{0x0000b0a8, 0x00000000},
@@ -1251,6 +1307,10 @@ static const u32 ar9300Common_rx_gain_table_2p2[][2] = {
static const u32 ar9300Modes_low_ob_db_tx_gain_table_2p2[][5] = {
/* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
+ {0x0000a2dc, 0x0380c7fc, 0x0380c7fc, 0x00637800, 0x00637800},
+ {0x0000a2e0, 0x0000f800, 0x0000f800, 0x03838000, 0x03838000},
+ {0x0000a2e4, 0x03ff0000, 0x03ff0000, 0x03fc0000, 0x03fc0000},
+ {0x0000a2e8, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
{0x0000a410, 0x000050d9, 0x000050d9, 0x000050d9, 0x000050d9},
{0x0000a500, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
{0x0000a504, 0x06000003, 0x06000003, 0x04000002, 0x04000002},
@@ -1316,6 +1376,30 @@ static const u32 ar9300Modes_low_ob_db_tx_gain_table_2p2[][5] = {
{0x0000a5f4, 0x7782b08c, 0x7782b08c, 0x5d801eec, 0x5d801eec},
{0x0000a5f8, 0x7782b08c, 0x7782b08c, 0x5d801eec, 0x5d801eec},
{0x0000a5fc, 0x7782b08c, 0x7782b08c, 0x5d801eec, 0x5d801eec},
+ {0x0000a600, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a604, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a608, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a60c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a610, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a614, 0x01404000, 0x01404000, 0x01404000, 0x01404000},
+ {0x0000a618, 0x01404501, 0x01404501, 0x01404501, 0x01404501},
+ {0x0000a61c, 0x02008802, 0x02008802, 0x02008501, 0x02008501},
+ {0x0000a620, 0x0300cc03, 0x0300cc03, 0x0280ca03, 0x0280ca03},
+ {0x0000a624, 0x0300cc03, 0x0300cc03, 0x03010c04, 0x03010c04},
+ {0x0000a628, 0x0300cc03, 0x0300cc03, 0x04014c04, 0x04014c04},
+ {0x0000a62c, 0x03810c03, 0x03810c03, 0x04015005, 0x04015005},
+ {0x0000a630, 0x03810e04, 0x03810e04, 0x04015005, 0x04015005},
+ {0x0000a634, 0x03810e04, 0x03810e04, 0x04015005, 0x04015005},
+ {0x0000a638, 0x03810e04, 0x03810e04, 0x04015005, 0x04015005},
+ {0x0000a63c, 0x03810e04, 0x03810e04, 0x04015005, 0x04015005},
+ {0x0000b2dc, 0x0380c7fc, 0x0380c7fc, 0x00637800, 0x00637800},
+ {0x0000b2e0, 0x0000f800, 0x0000f800, 0x03838000, 0x03838000},
+ {0x0000b2e4, 0x03ff0000, 0x03ff0000, 0x03fc0000, 0x03fc0000},
+ {0x0000b2e8, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000c2dc, 0x0380c7fc, 0x0380c7fc, 0x00637800, 0x00637800},
+ {0x0000c2e0, 0x0000f800, 0x0000f800, 0x03838000, 0x03838000},
+ {0x0000c2e4, 0x03ff0000, 0x03ff0000, 0x03fc0000, 0x03fc0000},
+ {0x0000c2e8, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
{0x00016044, 0x012492d4, 0x012492d4, 0x012492d4, 0x012492d4},
{0x00016048, 0x66480001, 0x66480001, 0x66480001, 0x66480001},
{0x00016068, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c},
@@ -1414,15 +1498,10 @@ static const u32 ar9300_2p2_mac_core[][2] = {
{0x00008144, 0xffffffff},
{0x00008168, 0x00000000},
{0x0000816c, 0x00000000},
- {0x00008170, 0x18486200},
- {0x00008174, 0x33332210},
- {0x00008178, 0x00000000},
- {0x0000817c, 0x00020000},
{0x000081c0, 0x00000000},
{0x000081c4, 0x33332210},
{0x000081c8, 0x00000000},
{0x000081cc, 0x00000000},
- {0x000081d4, 0x00000000},
{0x000081ec, 0x00000000},
{0x000081f0, 0x00000000},
{0x000081f4, 0x00000000},
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_paprd.c b/drivers/net/wireless/ath/ath9k/ar9003_paprd.c
index 7c38229ba670..716db414c258 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_paprd.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_paprd.c
@@ -347,6 +347,10 @@ static bool create_pa_curve(u32 *data_L, u32 *data_U, u32 *pa_table, u16 *gain)
(((Y[6] - Y[3]) * 1 << scale_factor) +
(x_est[6] - x_est[3])) / (x_est[6] - x_est[3]);
+ /* prevent division by zero */
+ if (G_fxp == 0)
+ return false;
+
Y_intercept =
(G_fxp * (x_est[0] - x_est[3]) +
(1 << scale_factor)) / (1 << scale_factor) + Y[3];
@@ -356,14 +360,12 @@ static bool create_pa_curve(u32 *data_L, u32 *data_U, u32 *pa_table, u16 *gain)
for (i = 0; i <= 3; i++) {
y_est[i] = i * 32;
-
- /* prevent division by zero */
- if (G_fxp == 0)
- return false;
-
x_est[i] = ((y_est[i] * 1 << scale_factor) + G_fxp) / G_fxp;
}
+ if (y_est[max_index] == 0)
+ return false;
+
x_est_fxp1_nonlin =
x_est[max_index] - ((1 << scale_factor) * y_est[max_index] +
G_fxp) / G_fxp;
@@ -457,6 +459,8 @@ static bool create_pa_curve(u32 *data_L, u32 *data_U, u32 *pa_table, u16 *gain)
Q_scale_B = find_proper_scale(find_expn(abs(scale_B)), 10);
scale_B = scale_B / (1 << Q_scale_B);
+ if (scale_B == 0)
+ return false;
Q_beta = find_proper_scale(find_expn(abs(beta_raw)), 10);
Q_alpha = find_proper_scale(find_expn(abs(alpha_raw)), 10);
beta_raw = beta_raw / (1 << Q_beta);
diff --git a/drivers/net/wireless/ath/ath9k/ath9k.h b/drivers/net/wireless/ath/ath9k/ath9k.h
index 973c919fdd27..170d44a35ccb 100644
--- a/drivers/net/wireless/ath/ath9k/ath9k.h
+++ b/drivers/net/wireless/ath/ath9k/ath9k.h
@@ -310,7 +310,7 @@ struct ath_rx {
u8 rxotherant;
u32 *rxlink;
unsigned int rxfilter;
- spinlock_t rxflushlock;
+ spinlock_t pcu_lock;
spinlock_t rxbuflock;
struct list_head rxbuf;
struct ath_descdma rxdma;
@@ -675,6 +675,7 @@ static inline void ath_read_cachesize(struct ath_common *common, int *csz)
}
extern struct ieee80211_ops ath9k_ops;
+extern struct pm_qos_request_list ath9k_pm_qos_req;
extern int modparam_nohwcrypt;
extern int led_blink;
diff --git a/drivers/net/wireless/ath/ath9k/beacon.c b/drivers/net/wireless/ath/ath9k/beacon.c
index 4ed010d4ef96..19891e7d49ae 100644
--- a/drivers/net/wireless/ath/ath9k/beacon.c
+++ b/drivers/net/wireless/ath/ath9k/beacon.c
@@ -370,7 +370,7 @@ void ath_beacon_tasklet(unsigned long data)
ath_print(common, ATH_DBG_BSTUCK,
"beacon is officially stuck\n");
sc->sc_flags |= SC_OP_TSF_RESET;
- ath_reset(sc, false);
+ ath_reset(sc, true);
}
return;
diff --git a/drivers/net/wireless/ath/ath9k/eeprom_9287.c b/drivers/net/wireless/ath/ath9k/eeprom_9287.c
index 966b9496a9dd..195406db3bd8 100644
--- a/drivers/net/wireless/ath/ath9k/eeprom_9287.c
+++ b/drivers/net/wireless/ath/ath9k/eeprom_9287.c
@@ -37,7 +37,7 @@ static bool ath9k_hw_ar9287_fill_eeprom(struct ath_hw *ah)
int addr, eep_start_loc;
eep_data = (u16 *)eep;
- if (ah->hw_version.devid == 0x7015)
+ if (AR9287_HTC_DEVID(ah))
eep_start_loc = AR9287_HTC_EEP_START_LOC;
else
eep_start_loc = AR9287_EEP_START_LOC;
diff --git a/drivers/net/wireless/ath/ath9k/hif_usb.c b/drivers/net/wireless/ath/ath9k/hif_usb.c
index 728d904c74d7..dfb6560dab92 100644
--- a/drivers/net/wireless/ath/ath9k/hif_usb.c
+++ b/drivers/net/wireless/ath/ath9k/hif_usb.c
@@ -35,8 +35,14 @@ static struct usb_device_id ath9k_hif_usb_ids[] = {
{ USB_DEVICE(0x07D1, 0x3A10) }, /* Dlink Wireless 150 */
{ USB_DEVICE(0x13D3, 0x3327) }, /* Azurewave */
{ USB_DEVICE(0x13D3, 0x3328) }, /* Azurewave */
+ { USB_DEVICE(0x13D3, 0x3346) }, /* IMC Networks */
+ { USB_DEVICE(0x13D3, 0x3348) }, /* Azurewave */
+ { USB_DEVICE(0x13D3, 0x3349) }, /* Azurewave */
+ { USB_DEVICE(0x13D3, 0x3350) }, /* Azurewave */
{ USB_DEVICE(0x04CA, 0x4605) }, /* Liteon */
{ USB_DEVICE(0x083A, 0xA704) }, /* SMC Networks */
+ { USB_DEVICE(0x040D, 0x3801) }, /* VIA */
+ { USB_DEVICE(0x1668, 0x1200) }, /* Verizon */
{ },
};
@@ -540,11 +546,11 @@ static void ath9k_hif_usb_reg_in_cb(struct urb *urb)
return;
}
- usb_fill_int_urb(urb, hif_dev->udev,
+ usb_fill_bulk_urb(urb, hif_dev->udev,
usb_rcvbulkpipe(hif_dev->udev,
USB_REG_IN_PIPE),
nskb->data, MAX_REG_IN_BUF_SIZE,
- ath9k_hif_usb_reg_in_cb, nskb, 1);
+ ath9k_hif_usb_reg_in_cb, nskb);
ret = usb_submit_urb(urb, GFP_ATOMIC);
if (ret) {
@@ -720,11 +726,11 @@ static int ath9k_hif_usb_alloc_reg_in_urb(struct hif_device_usb *hif_dev)
if (!skb)
goto err;
- usb_fill_int_urb(hif_dev->reg_in_urb, hif_dev->udev,
+ usb_fill_bulk_urb(hif_dev->reg_in_urb, hif_dev->udev,
usb_rcvbulkpipe(hif_dev->udev,
USB_REG_IN_PIPE),
skb->data, MAX_REG_IN_BUF_SIZE,
- ath9k_hif_usb_reg_in_cb, skb, 1);
+ ath9k_hif_usb_reg_in_cb, skb);
if (usb_submit_urb(hif_dev->reg_in_urb, GFP_KERNEL) != 0)
goto err;
@@ -801,10 +807,18 @@ static int ath9k_hif_usb_download_fw(struct hif_device_usb *hif_dev)
}
kfree(buf);
- if ((hif_dev->device_id == 0x7010) || (hif_dev->device_id == 0x7015))
+ switch (hif_dev->device_id) {
+ case 0x7010:
+ case 0x7015:
+ case 0x9018:
+ case 0xA704:
+ case 0x1200:
firm_offset = AR7010_FIRMWARE_TEXT;
- else
+ break;
+ default:
firm_offset = AR9271_FIRMWARE_TEXT;
+ break;
+ }
/*
* Issue FW download complete command to firmware.
@@ -837,14 +851,6 @@ static int ath9k_hif_usb_dev_init(struct hif_device_usb *hif_dev)
goto err_fw_req;
}
- /* Alloc URBs */
- ret = ath9k_hif_usb_alloc_urbs(hif_dev);
- if (ret) {
- dev_err(&hif_dev->udev->dev,
- "ath9k_htc: Unable to allocate URBs\n");
- goto err_urb;
- }
-
/* Download firmware */
ret = ath9k_hif_usb_download_fw(hif_dev);
if (ret) {
@@ -860,16 +866,22 @@ static int ath9k_hif_usb_dev_init(struct hif_device_usb *hif_dev)
*/
for (idx = 0; idx < alt->desc.bNumEndpoints; idx++) {
endp = &alt->endpoint[idx].desc;
- if (((endp->bEndpointAddress & USB_ENDPOINT_NUMBER_MASK)
- == 0x04) &&
- ((endp->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK)
- == USB_ENDPOINT_XFER_INT)) {
+ if ((endp->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK)
+ == USB_ENDPOINT_XFER_INT) {
endp->bmAttributes &= ~USB_ENDPOINT_XFERTYPE_MASK;
endp->bmAttributes |= USB_ENDPOINT_XFER_BULK;
endp->bInterval = 0;
}
}
+ /* Alloc URBs */
+ ret = ath9k_hif_usb_alloc_urbs(hif_dev);
+ if (ret) {
+ dev_err(&hif_dev->udev->dev,
+ "ath9k_htc: Unable to allocate URBs\n");
+ goto err_urb;
+ }
+
return 0;
err_fw_download:
@@ -923,6 +935,8 @@ static int ath9k_hif_usb_probe(struct usb_interface *interface,
case 0x7010:
case 0x7015:
case 0x9018:
+ case 0xA704:
+ case 0x1200:
if (le16_to_cpu(udev->descriptor.bcdDevice) == 0x0202)
hif_dev->fw_name = FIRMWARE_AR7010_1_1;
else
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_init.c b/drivers/net/wireless/ath/ath9k/htc_drv_init.c
index 3d7b97f1b3ae..7c8a38d04561 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_init.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_init.c
@@ -249,6 +249,8 @@ static int ath9k_init_htc_services(struct ath9k_htc_priv *priv, u16 devid)
case 0x7010:
case 0x7015:
case 0x9018:
+ case 0xA704:
+ case 0x1200:
priv->htc->credits = 45;
break;
default:
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c b/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c
index 3d19b5bc937f..29d80ca78393 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c
@@ -121,7 +121,7 @@ int ath9k_htc_tx_start(struct ath9k_htc_priv *priv, struct sk_buff *skb)
tx_hdr.data_type = ATH9K_HTC_NORMAL;
}
- if (ieee80211_is_data(fc)) {
+ if (ieee80211_is_data_qos(fc)) {
qc = ieee80211_get_qos_ctl(hdr);
tx_hdr.tidno = qc[0] & IEEE80211_QOS_CTL_TID_MASK;
}
diff --git a/drivers/net/wireless/ath/ath9k/hw.c b/drivers/net/wireless/ath/ath9k/hw.c
index cc13ee117823..6ebc68bca91f 100644
--- a/drivers/net/wireless/ath/ath9k/hw.c
+++ b/drivers/net/wireless/ath/ath9k/hw.c
@@ -484,6 +484,7 @@ static int ath9k_hw_post_init(struct ath_hw *ah)
ath_print(ath9k_hw_common(ah), ATH_DBG_FATAL,
"Failed allocating banks for "
"external radio\n");
+ ath9k_hw_rf_free_ext_banks(ah);
return ecode;
}
@@ -952,9 +953,12 @@ static void ath9k_hw_set_operating_mode(struct ath_hw *ah, int opmode)
REG_SET_BIT(ah, AR_CFG, AR_CFG_AP_ADHOC_INDICATION);
break;
case NL80211_IFTYPE_STATION:
- case NL80211_IFTYPE_MONITOR:
REG_WRITE(ah, AR_STA_ID1, val | AR_STA_ID1_KSRCH_MODE);
break;
+ default:
+ if (ah->is_monitoring)
+ REG_WRITE(ah, AR_STA_ID1, val | AR_STA_ID1_KSRCH_MODE);
+ break;
}
}
@@ -1634,7 +1638,6 @@ void ath9k_hw_beaconinit(struct ath_hw *ah, u32 next_beacon, u32 beacon_period)
switch (ah->opmode) {
case NL80211_IFTYPE_STATION:
- case NL80211_IFTYPE_MONITOR:
REG_WRITE(ah, AR_NEXT_TBTT_TIMER, TU_TO_USEC(next_beacon));
REG_WRITE(ah, AR_NEXT_DMA_BEACON_ALERT, 0xffff);
REG_WRITE(ah, AR_NEXT_SWBA, 0x7ffff);
@@ -1663,6 +1666,14 @@ void ath9k_hw_beaconinit(struct ath_hw *ah, u32 next_beacon, u32 beacon_period)
AR_TBTT_TIMER_EN | AR_DBA_TIMER_EN | AR_SWBA_TIMER_EN;
break;
default:
+ if (ah->is_monitoring) {
+ REG_WRITE(ah, AR_NEXT_TBTT_TIMER,
+ TU_TO_USEC(next_beacon));
+ REG_WRITE(ah, AR_NEXT_DMA_BEACON_ALERT, 0xffff);
+ REG_WRITE(ah, AR_NEXT_SWBA, 0x7ffff);
+ flags |= AR_TBTT_TIMER_EN;
+ break;
+ }
ath_print(ath9k_hw_common(ah), ATH_DBG_BEACON,
"%s: unsupported opmode: %d\n",
__func__, ah->opmode);
diff --git a/drivers/net/wireless/ath/ath9k/hw.h b/drivers/net/wireless/ath/ath9k/hw.h
index d032939768b0..d47d1b4b6002 100644
--- a/drivers/net/wireless/ath/ath9k/hw.h
+++ b/drivers/net/wireless/ath/ath9k/hw.h
@@ -622,6 +622,7 @@ struct ath_hw {
bool sw_mgmt_crypto;
bool is_pciexpress;
+ bool is_monitoring;
bool need_an_top2_fixup;
u16 tx_trig_level;
diff --git a/drivers/net/wireless/ath/ath9k/init.c b/drivers/net/wireless/ath/ath9k/init.c
index bc6c4df9712c..92bc5c5f4876 100644
--- a/drivers/net/wireless/ath/ath9k/init.c
+++ b/drivers/net/wireless/ath/ath9k/init.c
@@ -15,6 +15,7 @@
*/
#include <linux/slab.h>
+#include <linux/pm_qos_params.h>
#include "ath9k.h"
@@ -179,6 +180,8 @@ static const struct ath_ops ath9k_common_ops = {
.write = ath9k_iowrite32,
};
+struct pm_qos_request_list ath9k_pm_qos_req;
+
/**************************/
/* Initialization */
/**************************/
@@ -577,6 +580,7 @@ static int ath9k_init_softc(u16 devid, struct ath_softc *sc, u16 subsysid,
common->hw = sc->hw;
common->priv = sc;
common->debug_mask = ath9k_debug;
+ spin_lock_init(&common->cc_lock);
spin_lock_init(&sc->wiphy_lock);
spin_lock_init(&sc->sc_resetlock);
@@ -755,6 +759,9 @@ int ath9k_init_device(u16 devid, struct ath_softc *sc, u16 subsysid,
ath_init_leds(sc);
ath_start_rfkill_poll(sc);
+ pm_qos_add_request(&ath9k_pm_qos_req, PM_QOS_CPU_DMA_LATENCY,
+ PM_QOS_DEFAULT_VALUE);
+
return 0;
error_world:
@@ -823,6 +830,7 @@ void ath9k_deinit_device(struct ath_softc *sc)
}
ieee80211_unregister_hw(hw);
+ pm_qos_remove_request(&ath9k_pm_qos_req);
ath_rx_cleanup(sc);
ath_tx_cleanup(sc);
ath9k_deinit_softc(sc);
diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c
index 3ff0e476c2b3..25d3ef4c338e 100644
--- a/drivers/net/wireless/ath/ath9k/main.c
+++ b/drivers/net/wireless/ath/ath9k/main.c
@@ -15,6 +15,7 @@
*/
#include <linux/nl80211.h>
+#include <linux/pm_qos_params.h>
#include "ath9k.h"
#include "btcoex.h"
@@ -93,11 +94,13 @@ void ath9k_ps_wakeup(struct ath_softc *sc)
{
struct ath_common *common = ath9k_hw_common(sc->sc_ah);
unsigned long flags;
+ enum ath9k_power_mode power_mode;
spin_lock_irqsave(&sc->sc_pm_lock, flags);
if (++sc->ps_usecount != 1)
goto unlock;
+ power_mode = sc->sc_ah->power_mode;
ath9k_hw_setpower(sc->sc_ah, ATH9K_PM_AWAKE);
/*
@@ -105,10 +108,12 @@ void ath9k_ps_wakeup(struct ath_softc *sc)
* useful data. Better clear them now so that they don't mess up
* survey data results.
*/
- spin_lock(&common->cc_lock);
- ath_hw_cycle_counters_update(common);
- memset(&common->cc_survey, 0, sizeof(common->cc_survey));
- spin_unlock(&common->cc_lock);
+ if (power_mode != ATH9K_PM_AWAKE) {
+ spin_lock(&common->cc_lock);
+ ath_hw_cycle_counters_update(common);
+ memset(&common->cc_survey, 0, sizeof(common->cc_survey));
+ spin_unlock(&common->cc_lock);
+ }
unlock:
spin_unlock_irqrestore(&sc->sc_pm_lock, flags);
@@ -182,6 +187,9 @@ static void ath_update_survey_stats(struct ath_softc *sc)
struct ath_cycle_counters *cc = &common->cc_survey;
unsigned int div = common->clockrate * 1000;
+ if (!ah->curchan)
+ return;
+
if (ah->power_mode == ATH9K_PM_AWAKE)
ath_hw_cycle_counters_update(common);
@@ -238,6 +246,9 @@ int ath_set_channel(struct ath_softc *sc, struct ieee80211_hw *hw,
*/
ath9k_hw_set_interrupts(ah, 0);
ath_drain_all_txq(sc, false);
+
+ spin_lock_bh(&sc->rx.pcu_lock);
+
stopped = ath_stoprecv(sc);
/* XXX: do not flush receive queue here. We don't want
@@ -265,6 +276,7 @@ int ath_set_channel(struct ath_softc *sc, struct ieee80211_hw *hw,
"reset status %d\n",
channel->center_freq, r);
spin_unlock_bh(&sc->sc_resetlock);
+ spin_unlock_bh(&sc->rx.pcu_lock);
goto ps_restore;
}
spin_unlock_bh(&sc->sc_resetlock);
@@ -273,9 +285,12 @@ int ath_set_channel(struct ath_softc *sc, struct ieee80211_hw *hw,
ath_print(common, ATH_DBG_FATAL,
"Unable to restart recv logic\n");
r = -EIO;
+ spin_unlock_bh(&sc->rx.pcu_lock);
goto ps_restore;
}
+ spin_unlock_bh(&sc->rx.pcu_lock);
+
ath_update_txpow(sc);
ath9k_hw_set_interrupts(ah, ah->imask);
@@ -577,7 +592,7 @@ void ath_hw_check(struct work_struct *work)
msleep(1);
}
- ath_reset(sc, false);
+ ath_reset(sc, true);
out:
ath9k_ps_restore(sc);
@@ -595,7 +610,7 @@ void ath9k_tasklet(unsigned long data)
ath9k_ps_wakeup(sc);
if (status & ATH9K_INT_FATAL) {
- ath_reset(sc, false);
+ ath_reset(sc, true);
ath9k_ps_restore(sc);
return;
}
@@ -610,7 +625,7 @@ void ath9k_tasklet(unsigned long data)
rxmask = (ATH9K_INT_RX | ATH9K_INT_RXEOL | ATH9K_INT_RXORN);
if (status & rxmask) {
- spin_lock_bh(&sc->rx.rxflushlock);
+ spin_lock_bh(&sc->rx.pcu_lock);
/* Check for high priority Rx first */
if ((ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) &&
@@ -618,7 +633,7 @@ void ath9k_tasklet(unsigned long data)
ath_rx_tasklet(sc, 0, true);
ath_rx_tasklet(sc, 0, false);
- spin_unlock_bh(&sc->rx.rxflushlock);
+ spin_unlock_bh(&sc->rx.pcu_lock);
}
if (status & ATH9K_INT_TX) {
@@ -873,6 +888,7 @@ void ath_radio_enable(struct ath_softc *sc, struct ieee80211_hw *hw)
if (!ah->curchan)
ah->curchan = ath_get_curchannel(sc, sc->hw);
+ spin_lock_bh(&sc->rx.pcu_lock);
spin_lock_bh(&sc->sc_resetlock);
r = ath9k_hw_reset(ah, ah->curchan, ah->caldata, false);
if (r) {
@@ -887,8 +903,10 @@ void ath_radio_enable(struct ath_softc *sc, struct ieee80211_hw *hw)
if (ath_startrecv(sc) != 0) {
ath_print(common, ATH_DBG_FATAL,
"Unable to restart recv logic\n");
+ spin_unlock_bh(&sc->rx.pcu_lock);
return;
}
+ spin_unlock_bh(&sc->rx.pcu_lock);
if (sc->sc_flags & SC_OP_BEACONS)
ath_beacon_config(sc, NULL); /* restart beacons */
@@ -927,6 +945,9 @@ void ath_radio_disable(struct ath_softc *sc, struct ieee80211_hw *hw)
ath9k_hw_set_interrupts(ah, 0);
ath_drain_all_txq(sc, false); /* clear pending tx frames */
+
+ spin_lock_bh(&sc->rx.pcu_lock);
+
ath_stoprecv(sc); /* turn off frame recv */
ath_flushrecv(sc); /* flush recv queue */
@@ -944,6 +965,9 @@ void ath_radio_disable(struct ath_softc *sc, struct ieee80211_hw *hw)
spin_unlock_bh(&sc->sc_resetlock);
ath9k_hw_phy_disable(ah);
+
+ spin_unlock_bh(&sc->rx.pcu_lock);
+
ath9k_hw_configpcipowersave(ah, 1, 1);
ath9k_ps_restore(sc);
ath9k_setpower(sc, ATH9K_PM_FULL_SLEEP);
@@ -963,6 +987,9 @@ int ath_reset(struct ath_softc *sc, bool retry_tx)
ath9k_hw_set_interrupts(ah, 0);
ath_drain_all_txq(sc, retry_tx);
+
+ spin_lock_bh(&sc->rx.pcu_lock);
+
ath_stoprecv(sc);
ath_flushrecv(sc);
@@ -977,6 +1004,8 @@ int ath_reset(struct ath_softc *sc, bool retry_tx)
ath_print(common, ATH_DBG_FATAL,
"Unable to start recv logic\n");
+ spin_unlock_bh(&sc->rx.pcu_lock);
+
/*
* We may be doing a reset in response to a request
* that changes the channel so update any state that
@@ -1139,6 +1168,7 @@ static int ath9k_start(struct ieee80211_hw *hw)
* be followed by initialization of the appropriate bits
* and then setup of the interrupt mask.
*/
+ spin_lock_bh(&sc->rx.pcu_lock);
spin_lock_bh(&sc->sc_resetlock);
r = ath9k_hw_reset(ah, init_channel, ah->caldata, false);
if (r) {
@@ -1147,6 +1177,7 @@ static int ath9k_start(struct ieee80211_hw *hw)
"(freq %u MHz)\n", r,
curchan->center_freq);
spin_unlock_bh(&sc->sc_resetlock);
+ spin_unlock_bh(&sc->rx.pcu_lock);
goto mutex_unlock;
}
spin_unlock_bh(&sc->sc_resetlock);
@@ -1168,8 +1199,10 @@ static int ath9k_start(struct ieee80211_hw *hw)
ath_print(common, ATH_DBG_FATAL,
"Unable to start recv logic\n");
r = -EIO;
+ spin_unlock_bh(&sc->rx.pcu_lock);
goto mutex_unlock;
}
+ spin_unlock_bh(&sc->rx.pcu_lock);
/* Setup our intr mask. */
ah->imask = ATH9K_INT_TX | ATH9K_INT_RXEOL |
@@ -1189,6 +1222,7 @@ static int ath9k_start(struct ieee80211_hw *hw)
ah->imask |= ATH9K_INT_CST;
sc->sc_flags &= ~SC_OP_INVALID;
+ sc->sc_ah->is_monitoring = false;
/* Disable BMISS interrupt when we're not associated */
ah->imask &= ~(ATH9K_INT_SWBA | ATH9K_INT_BMISS);
@@ -1210,6 +1244,8 @@ static int ath9k_start(struct ieee80211_hw *hw)
ath9k_btcoex_timer_resume(sc);
}
+ pm_qos_update_request(&ath9k_pm_qos_req, 55);
+
mutex_unlock:
mutex_unlock(&sc->mutex);
@@ -1368,12 +1404,14 @@ static void ath9k_stop(struct ieee80211_hw *hw)
* before setting the invalid flag. */
ath9k_hw_set_interrupts(ah, 0);
+ spin_lock_bh(&sc->rx.pcu_lock);
if (!(sc->sc_flags & SC_OP_INVALID)) {
ath_drain_all_txq(sc, false);
ath_stoprecv(sc);
ath9k_hw_phy_disable(ah);
} else
sc->rx.rxlink = NULL;
+ spin_unlock_bh(&sc->rx.pcu_lock);
/* disable HAL and put h/w to sleep */
ath9k_hw_disable(ah);
@@ -1385,6 +1423,8 @@ static void ath9k_stop(struct ieee80211_hw *hw)
sc->sc_flags |= SC_OP_INVALID;
+ pm_qos_update_request(&ath9k_pm_qos_req, PM_QOS_DEFAULT_VALUE);
+
mutex_unlock(&sc->mutex);
ath_print(common, ATH_DBG_CONFIG, "Driver halt\n");
@@ -1463,8 +1503,7 @@ static int ath9k_add_interface(struct ieee80211_hw *hw,
ath9k_hw_set_interrupts(ah, ah->imask);
if (vif->type == NL80211_IFTYPE_AP ||
- vif->type == NL80211_IFTYPE_ADHOC ||
- vif->type == NL80211_IFTYPE_MONITOR) {
+ vif->type == NL80211_IFTYPE_ADHOC) {
sc->sc_flags |= SC_OP_ANI_RUN;
ath_start_ani(common);
}
@@ -1614,8 +1653,12 @@ static int ath9k_config(struct ieee80211_hw *hw, u32 changed)
if (changed & IEEE80211_CONF_CHANGE_MONITOR) {
if (conf->flags & IEEE80211_CONF_MONITOR) {
ath_print(common, ATH_DBG_CONFIG,
- "HW opmode set to Monitor mode\n");
- sc->sc_ah->opmode = NL80211_IFTYPE_MONITOR;
+ "Monitor mode is enabled\n");
+ sc->sc_ah->is_monitoring = true;
+ } else {
+ ath_print(common, ATH_DBG_CONFIG,
+ "Monitor mode is disabled\n");
+ sc->sc_ah->is_monitoring = false;
}
}
diff --git a/drivers/net/wireless/ath/ath9k/rc.c b/drivers/net/wireless/ath/ath9k/rc.c
index 0cee90cf8dc9..89978d71617f 100644
--- a/drivers/net/wireless/ath/ath9k/rc.c
+++ b/drivers/net/wireless/ath/ath9k/rc.c
@@ -527,7 +527,7 @@ static u8 ath_rc_setvalid_rates(struct ath_rate_priv *ath_rc_priv,
for (i = 0; i < rateset->rs_nrates; i++) {
for (j = 0; j < rate_table->rate_cnt; j++) {
u32 phy = rate_table->info[j].phy;
- u16 rate_flags = rate_table->info[i].rate_flags;
+ u16 rate_flags = rate_table->info[j].rate_flags;
u8 rate = rateset->rs_rates[i];
u8 dot11rate = rate_table->info[j].dot11rate;
diff --git a/drivers/net/wireless/ath/ath9k/recv.c b/drivers/net/wireless/ath/ath9k/recv.c
index fe73fc50082a..1a62e351ec77 100644
--- a/drivers/net/wireless/ath/ath9k/recv.c
+++ b/drivers/net/wireless/ath/ath9k/recv.c
@@ -297,19 +297,17 @@ static void ath_edma_start_recv(struct ath_softc *sc)
ath_rx_addbuffer_edma(sc, ATH9K_RX_QUEUE_LP,
sc->rx.rx_edma[ATH9K_RX_QUEUE_LP].rx_fifo_hwsize);
- spin_unlock_bh(&sc->rx.rxbuflock);
-
ath_opmode_init(sc);
ath9k_hw_startpcureceive(sc->sc_ah, (sc->sc_flags & SC_OP_OFFCHANNEL));
+
+ spin_unlock_bh(&sc->rx.rxbuflock);
}
static void ath_edma_stop_recv(struct ath_softc *sc)
{
- spin_lock_bh(&sc->rx.rxbuflock);
ath_rx_remove_buffer(sc, ATH9K_RX_QUEUE_HP);
ath_rx_remove_buffer(sc, ATH9K_RX_QUEUE_LP);
- spin_unlock_bh(&sc->rx.rxbuflock);
}
int ath_rx_init(struct ath_softc *sc, int nbufs)
@@ -319,7 +317,7 @@ int ath_rx_init(struct ath_softc *sc, int nbufs)
struct ath_buf *bf;
int error = 0;
- spin_lock_init(&sc->rx.rxflushlock);
+ spin_lock_init(&sc->rx.pcu_lock);
sc->sc_flags &= ~SC_OP_RXFLUSH;
spin_lock_init(&sc->rx.rxbuflock);
@@ -443,7 +441,7 @@ u32 ath_calcrxfilter(struct ath_softc *sc)
*/
if (((sc->sc_ah->opmode != NL80211_IFTYPE_AP) &&
(sc->rx.rxfilter & FIF_PROMISC_IN_BSS)) ||
- (sc->sc_ah->opmode == NL80211_IFTYPE_MONITOR))
+ (sc->sc_ah->is_monitoring))
rfilt |= ATH9K_RX_FILTER_PROM;
if (sc->rx.rxfilter & FIF_CONTROL)
@@ -506,10 +504,11 @@ int ath_startrecv(struct ath_softc *sc)
ath9k_hw_rxena(ah);
start_recv:
- spin_unlock_bh(&sc->rx.rxbuflock);
ath_opmode_init(sc);
ath9k_hw_startpcureceive(ah, (sc->sc_flags & SC_OP_OFFCHANNEL));
+ spin_unlock_bh(&sc->rx.rxbuflock);
+
return 0;
}
@@ -518,7 +517,8 @@ bool ath_stoprecv(struct ath_softc *sc)
struct ath_hw *ah = sc->sc_ah;
bool stopped;
- ath9k_hw_stoppcurecv(ah);
+ spin_lock_bh(&sc->rx.rxbuflock);
+ ath9k_hw_abortpcurecv(ah);
ath9k_hw_setrxfilter(ah, 0);
stopped = ath9k_hw_stopdmarecv(ah);
@@ -526,19 +526,18 @@ bool ath_stoprecv(struct ath_softc *sc)
ath_edma_stop_recv(sc);
else
sc->rx.rxlink = NULL;
+ spin_unlock_bh(&sc->rx.rxbuflock);
return stopped;
}
void ath_flushrecv(struct ath_softc *sc)
{
- spin_lock_bh(&sc->rx.rxflushlock);
sc->sc_flags |= SC_OP_RXFLUSH;
if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA)
ath_rx_tasklet(sc, 1, true);
ath_rx_tasklet(sc, 1, false);
sc->sc_flags &= ~SC_OP_RXFLUSH;
- spin_unlock_bh(&sc->rx.rxflushlock);
}
static bool ath_beacon_dtim_pending_cab(struct sk_buff *skb)
@@ -898,7 +897,7 @@ static bool ath9k_rx_accept(struct ath_common *common,
* decryption and MIC failures. For monitor mode,
* we also ignore the CRC error.
*/
- if (ah->opmode == NL80211_IFTYPE_MONITOR) {
+ if (ah->is_monitoring) {
if (rx_stats->rs_status &
~(ATH9K_RXERR_DECRYPT | ATH9K_RXERR_MIC |
ATH9K_RXERR_CRC))
diff --git a/drivers/net/wireless/ath/ath9k/reg.h b/drivers/net/wireless/ath/ath9k/reg.h
index 42976b0a01c1..dddf579aacf1 100644
--- a/drivers/net/wireless/ath/ath9k/reg.h
+++ b/drivers/net/wireless/ath/ath9k/reg.h
@@ -703,6 +703,7 @@
#define AR_WA_RESET_EN (1 << 18) /* Sw Control to enable PCI-Reset to POR (bit 15) */
#define AR_WA_ANALOG_SHIFT (1 << 20)
#define AR_WA_POR_SHORT (1 << 21) /* PCI-E Phy reset control */
+#define AR_WA_BIT22 (1 << 22)
#define AR9285_WA_DEFAULT 0x004a050b
#define AR9280_WA_DEFAULT 0x0040073b
#define AR_WA_DEFAULT 0x0000073f
@@ -865,7 +866,13 @@
#define AR_DEVID_7010(_ah) \
(((_ah)->hw_version.devid == 0x7010) || \
((_ah)->hw_version.devid == 0x7015) || \
- ((_ah)->hw_version.devid == 0x9018))
+ ((_ah)->hw_version.devid == 0x9018) || \
+ ((_ah)->hw_version.devid == 0xA704) || \
+ ((_ah)->hw_version.devid == 0x1200))
+
+#define AR9287_HTC_DEVID(_ah) \
+ (((_ah)->hw_version.devid == 0x7015) || \
+ ((_ah)->hw_version.devid == 0x1200))
#define AR_RADIO_SREV_MAJOR 0xf0
#define AR_RAD5133_SREV_MAJOR 0xc0
diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c
index d077186da870..f2ade2402ce2 100644
--- a/drivers/net/wireless/ath/ath9k/xmit.c
+++ b/drivers/net/wireless/ath/ath9k/xmit.c
@@ -673,6 +673,7 @@ static enum ATH_AGGR_STATUS ath_tx_form_aggr(struct ath_softc *sc,
u16 aggr_limit = 0, al = 0, bpad = 0,
al_delta, h_baw = tid->baw_size / 2;
enum ATH_AGGR_STATUS status = ATH_AGGR_DONE;
+ struct ieee80211_tx_info *tx_info;
bf_first = list_first_entry(&tid->buf_q, struct ath_buf, list);
@@ -699,6 +700,11 @@ static enum ATH_AGGR_STATUS ath_tx_form_aggr(struct ath_softc *sc,
break;
}
+ tx_info = IEEE80211_SKB_CB(bf->bf_mpdu);
+ if (nframes && ((tx_info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE) ||
+ !(tx_info->control.rates[0].flags & IEEE80211_TX_RC_MCS)))
+ break;
+
/* do not exceed subframe limit */
if (nframes >= min((int)h_baw, ATH_AMPDU_SUBFRAME_DEFAULT)) {
status = ATH_AGGR_LIMITED;
@@ -1083,15 +1089,6 @@ void ath_draintxq(struct ath_softc *sc, struct ath_txq *txq, bool retry_tx)
txq->axq_tx_inprogress = false;
spin_unlock_bh(&txq->axq_lock);
- /* flush any pending frames if aggregation is enabled */
- if (sc->sc_flags & SC_OP_TXAGGR) {
- if (!retry_tx) {
- spin_lock_bh(&txq->axq_lock);
- ath_txq_drain_pending_buffers(sc, txq);
- spin_unlock_bh(&txq->axq_lock);
- }
- }
-
if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
spin_lock_bh(&txq->axq_lock);
while (!list_empty(&txq->txq_fifo_pending)) {
@@ -1112,6 +1109,15 @@ void ath_draintxq(struct ath_softc *sc, struct ath_txq *txq, bool retry_tx)
}
spin_unlock_bh(&txq->axq_lock);
}
+
+ /* flush any pending frames if aggregation is enabled */
+ if (sc->sc_flags & SC_OP_TXAGGR) {
+ if (!retry_tx) {
+ spin_lock_bh(&txq->axq_lock);
+ ath_txq_drain_pending_buffers(sc, txq);
+ spin_unlock_bh(&txq->axq_lock);
+ }
+ }
}
void ath_drain_all_txq(struct ath_softc *sc, bool retry_tx)
@@ -2157,7 +2163,7 @@ static void ath_tx_complete_poll_work(struct work_struct *work)
ath_print(ath9k_hw_common(sc->sc_ah), ATH_DBG_RESET,
"tx hung, resetting the chip\n");
ath9k_ps_wakeup(sc);
- ath_reset(sc, false);
+ ath_reset(sc, true);
ath9k_ps_restore(sc);
}
diff --git a/drivers/net/wireless/ath/carl9170/cmd.h b/drivers/net/wireless/ath/carl9170/cmd.h
index f78728c38294..568174c71b94 100644
--- a/drivers/net/wireless/ath/carl9170/cmd.h
+++ b/drivers/net/wireless/ath/carl9170/cmd.h
@@ -116,8 +116,9 @@ __regwrite_out : \
} while (0);
-#define carl9170_async_get_buf() \
+#define carl9170_async_regwrite_get_buf() \
do { \
+ __nreg = 0; \
__cmd = carl9170_cmd_buf(__carl, CARL9170_CMD_WREG_ASYNC, \
CARL9170_MAX_CMD_PAYLOAD_LEN); \
if (__cmd == NULL) { \
@@ -128,38 +129,42 @@ do { \
#define carl9170_async_regwrite_begin(carl) \
do { \
- int __nreg = 0, __err = 0; \
struct ar9170 *__carl = carl; \
struct carl9170_cmd *__cmd; \
- carl9170_async_get_buf(); \
+ unsigned int __nreg; \
+ int __err = 0; \
+ carl9170_async_regwrite_get_buf(); \
+
+#define carl9170_async_regwrite_flush() \
+do { \
+ if (__cmd == NULL || __nreg == 0) \
+ break; \
+ \
+ if (IS_ACCEPTING_CMD(__carl) && __nreg) { \
+ __cmd->hdr.len = 8 * __nreg; \
+ __err = __carl9170_exec_cmd(__carl, __cmd, true); \
+ __cmd = NULL; \
+ break; \
+ } \
+ goto __async_regwrite_out; \
+} while (0)
#define carl9170_async_regwrite(r, v) do { \
+ if (__cmd == NULL) \
+ carl9170_async_regwrite_get_buf(); \
__cmd->wreg.regs[__nreg].addr = cpu_to_le32(r); \
__cmd->wreg.regs[__nreg].val = cpu_to_le32(v); \
__nreg++; \
- if ((__nreg >= PAYLOAD_MAX/2)) { \
- if (IS_ACCEPTING_CMD(__carl)) { \
- __cmd->hdr.len = 8 * __nreg; \
- __err = __carl9170_exec_cmd(__carl, __cmd, true);\
- __cmd = NULL; \
- carl9170_async_get_buf(); \
- } else { \
- goto __async_regwrite_out; \
- } \
- __nreg = 0; \
- if (__err) \
- goto __async_regwrite_out; \
- } \
+ if ((__nreg >= PAYLOAD_MAX / 2)) \
+ carl9170_async_regwrite_flush(); \
} while (0)
-#define carl9170_async_regwrite_finish() \
+#define carl9170_async_regwrite_finish() do { \
__async_regwrite_out : \
- if (__err == 0 && __nreg) { \
- __cmd->hdr.len = 8 * __nreg; \
- if (IS_ACCEPTING_CMD(__carl)) \
- __err = __carl9170_exec_cmd(__carl, __cmd, true);\
- __nreg = 0; \
- }
+ if (__cmd != NULL && __err == 0) \
+ carl9170_async_regwrite_flush(); \
+ kfree(__cmd); \
+} while (0) \
#define carl9170_async_regwrite_result() \
__err; \
diff --git a/drivers/net/wireless/ath/carl9170/main.c b/drivers/net/wireless/ath/carl9170/main.c
index 3cc99f3f7ab5..a314c2c2bfbe 100644
--- a/drivers/net/wireless/ath/carl9170/main.c
+++ b/drivers/net/wireless/ath/carl9170/main.c
@@ -639,15 +639,15 @@ init:
if (err)
goto unlock;
} else {
- err = carl9170_mod_virtual_mac(ar, vif_id, vif->addr);
rcu_read_unlock();
+ err = carl9170_mod_virtual_mac(ar, vif_id, vif->addr);
if (err)
goto unlock;
}
unlock:
- if (err && (vif_id != -1)) {
+ if (err && (vif_id >= 0)) {
vif_priv->active = false;
bitmap_release_region(&ar->vif_bitmap, vif_id, 0);
ar->vifs--;
diff --git a/drivers/net/wireless/ath/carl9170/usb.c b/drivers/net/wireless/ath/carl9170/usb.c
index c7f6193934ea..7504ed14c725 100644
--- a/drivers/net/wireless/ath/carl9170/usb.c
+++ b/drivers/net/wireless/ath/carl9170/usb.c
@@ -82,9 +82,11 @@ static struct usb_device_id carl9170_usb_ids[] = {
{ USB_DEVICE(0x07d1, 0x3c10) },
/* D-Link DWA 160 A2 */
{ USB_DEVICE(0x07d1, 0x3a09) },
+ /* D-Link DWA 130 D */
+ { USB_DEVICE(0x07d1, 0x3a0f) },
/* Netgear WNA1000 */
{ USB_DEVICE(0x0846, 0x9040) },
- /* Netgear WNDA3100 */
+ /* Netgear WNDA3100 (v1) */
{ USB_DEVICE(0x0846, 0x9010) },
/* Netgear WN111 v2 */
{ USB_DEVICE(0x0846, 0x9001), .driver_info = CARL9170_ONE_LED },
@@ -551,12 +553,12 @@ static int carl9170_usb_flush(struct ar9170 *ar)
usb_free_urb(urb);
}
- ret = usb_wait_anchor_empty_timeout(&ar->tx_cmd, HZ);
+ ret = usb_wait_anchor_empty_timeout(&ar->tx_cmd, 1000);
if (ret == 0)
err = -ETIMEDOUT;
/* lets wait a while until the tx - queues are dried out */
- ret = usb_wait_anchor_empty_timeout(&ar->tx_anch, HZ);
+ ret = usb_wait_anchor_empty_timeout(&ar->tx_anch, 1000);
if (ret == 0)
err = -ETIMEDOUT;
@@ -591,16 +593,23 @@ int __carl9170_exec_cmd(struct ar9170 *ar, struct carl9170_cmd *cmd,
const bool free_buf)
{
struct urb *urb;
+ int err = 0;
- if (!IS_INITIALIZED(ar))
- return -EPERM;
+ if (!IS_INITIALIZED(ar)) {
+ err = -EPERM;
+ goto err_free;
+ }
- if (WARN_ON(cmd->hdr.len > CARL9170_MAX_CMD_LEN - 4))
- return -EINVAL;
+ if (WARN_ON(cmd->hdr.len > CARL9170_MAX_CMD_LEN - 4)) {
+ err = -EINVAL;
+ goto err_free;
+ }
urb = usb_alloc_urb(0, GFP_ATOMIC);
- if (!urb)
- return -ENOMEM;
+ if (!urb) {
+ err = -ENOMEM;
+ goto err_free;
+ }
usb_fill_int_urb(urb, ar->udev, usb_sndintpipe(ar->udev,
AR9170_USB_EP_CMD), cmd, cmd->hdr.len + 4,
@@ -613,6 +622,12 @@ int __carl9170_exec_cmd(struct ar9170 *ar, struct carl9170_cmd *cmd,
usb_free_urb(urb);
return carl9170_usb_submit_cmd_urb(ar);
+
+err_free:
+ if (free_buf)
+ kfree(cmd);
+
+ return err;
}
int carl9170_exec_cmd(struct ar9170 *ar, const enum carl9170_cmd_oids cmd,
diff --git a/drivers/net/wireless/b43/phy_n.c b/drivers/net/wireless/b43/phy_n.c
index dfec5496055e..e0f2d122e124 100644
--- a/drivers/net/wireless/b43/phy_n.c
+++ b/drivers/net/wireless/b43/phy_n.c
@@ -2964,7 +2964,7 @@ static int b43_nphy_rev2_cal_rx_iq(struct b43_wldev *dev,
(2 - i));
}
- for (j = 0; i < 4; j++) {
+ for (j = 0; j < 4; j++) {
if (j < 3) {
cur_lna = lna[j];
cur_hpf1 = hpf1[j];
diff --git a/drivers/net/wireless/b43/sdio.c b/drivers/net/wireless/b43/sdio.c
index 45933cf8e8c2..09e2dfd7b175 100644
--- a/drivers/net/wireless/b43/sdio.c
+++ b/drivers/net/wireless/b43/sdio.c
@@ -163,6 +163,7 @@ static int b43_sdio_probe(struct sdio_func *func,
err_free_ssb:
kfree(sdio);
err_disable_func:
+ sdio_claim_host(func);
sdio_disable_func(func);
err_release_host:
sdio_release_host(func);
@@ -175,7 +176,9 @@ static void b43_sdio_remove(struct sdio_func *func)
struct b43_sdio *sdio = sdio_get_drvdata(func);
ssb_bus_unregister(&sdio->ssb);
+ sdio_claim_host(func);
sdio_disable_func(func);
+ sdio_release_host(func);
kfree(sdio);
sdio_set_drvdata(func, NULL);
}
diff --git a/drivers/net/wireless/ipw2x00/libipw_module.c b/drivers/net/wireless/ipw2x00/libipw_module.c
index 32dee2ce5d31..d5ef696298ee 100644
--- a/drivers/net/wireless/ipw2x00/libipw_module.c
+++ b/drivers/net/wireless/ipw2x00/libipw_module.c
@@ -54,6 +54,7 @@
#define DRV_DESCRIPTION "802.11 data/management/control stack"
#define DRV_NAME "libipw"
+#define DRV_PROCNAME "ieee80211"
#define DRV_VERSION LIBIPW_VERSION
#define DRV_COPYRIGHT "Copyright (C) 2004-2005 Intel Corporation <jketreno@linux.intel.com>"
@@ -293,16 +294,16 @@ static int __init libipw_init(void)
struct proc_dir_entry *e;
libipw_debug_level = debug;
- libipw_proc = proc_mkdir("ieee80211", init_net.proc_net);
+ libipw_proc = proc_mkdir(DRV_PROCNAME, init_net.proc_net);
if (libipw_proc == NULL) {
- LIBIPW_ERROR("Unable to create " DRV_NAME
+ LIBIPW_ERROR("Unable to create " DRV_PROCNAME
" proc directory\n");
return -EIO;
}
e = proc_create("debug_level", S_IRUGO | S_IWUSR, libipw_proc,
&debug_level_proc_fops);
if (!e) {
- remove_proc_entry(DRV_NAME, init_net.proc_net);
+ remove_proc_entry(DRV_PROCNAME, init_net.proc_net);
libipw_proc = NULL;
return -EIO;
}
@@ -319,7 +320,7 @@ static void __exit libipw_exit(void)
#ifdef CONFIG_LIBIPW_DEBUG
if (libipw_proc) {
remove_proc_entry("debug_level", libipw_proc);
- remove_proc_entry(DRV_NAME, init_net.proc_net);
+ remove_proc_entry(DRV_PROCNAME, init_net.proc_net);
libipw_proc = NULL;
}
#endif /* CONFIG_LIBIPW_DEBUG */
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-tx.c b/drivers/net/wireless/iwlwifi/iwl-agn-tx.c
index db57aea629d9..2b078a995729 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-tx.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-tx.c
@@ -1227,7 +1227,8 @@ static int iwlagn_tx_status_reply_compressed_ba(struct iwl_priv *priv,
struct ieee80211_tx_info *info;
if (unlikely(!agg->wait_for_ba)) {
- IWL_ERR(priv, "Received BA when not expected\n");
+ if (unlikely(ba_resp->bitmap))
+ IWL_ERR(priv, "Received BA when not expected\n");
return -EINVAL;
}
diff --git a/drivers/net/wireless/iwlwifi/iwl3945-base.c b/drivers/net/wireless/iwlwifi/iwl3945-base.c
index 8f8c4b73f8b9..7edf8c2fb8c7 100644
--- a/drivers/net/wireless/iwlwifi/iwl3945-base.c
+++ b/drivers/net/wireless/iwlwifi/iwl3945-base.c
@@ -4000,7 +4000,8 @@ static int iwl3945_pci_probe(struct pci_dev *pdev, const struct pci_device_id *e
* "the hard way", rather than using device's scan.
*/
if (iwl3945_mod_params.disable_hw_scan) {
- IWL_ERR(priv, "sw scan support is deprecated\n");
+ dev_printk(KERN_DEBUG, &(pdev->dev),
+ "sw scan support is deprecated\n");
iwl3945_hw_ops.hw_scan = NULL;
}
diff --git a/drivers/net/wireless/libertas/cfg.c b/drivers/net/wireless/libertas/cfg.c
index 5046a0005034..373930afc26b 100644
--- a/drivers/net/wireless/libertas/cfg.c
+++ b/drivers/net/wireless/libertas/cfg.c
@@ -700,8 +700,9 @@ static void lbs_scan_worker(struct work_struct *work)
if (priv->scan_channel < priv->scan_req->n_channels) {
cancel_delayed_work(&priv->scan_work);
- queue_delayed_work(priv->work_thread, &priv->scan_work,
- msecs_to_jiffies(300));
+ if (!priv->stopping)
+ queue_delayed_work(priv->work_thread, &priv->scan_work,
+ msecs_to_jiffies(300));
}
/* This is the final data we are about to send */
diff --git a/drivers/net/wireless/libertas/dev.h b/drivers/net/wireless/libertas/dev.h
index f062ed583901..cb14c38caf3a 100644
--- a/drivers/net/wireless/libertas/dev.h
+++ b/drivers/net/wireless/libertas/dev.h
@@ -36,6 +36,7 @@ struct lbs_private {
/* CFG80211 */
struct wireless_dev *wdev;
bool wiphy_registered;
+ bool stopping;
struct cfg80211_scan_request *scan_req;
u8 assoc_bss[ETH_ALEN];
u8 disassoc_reason;
diff --git a/drivers/net/wireless/libertas/if_sdio.c b/drivers/net/wireless/libertas/if_sdio.c
index 296fd00a5129..e5685dc317a8 100644
--- a/drivers/net/wireless/libertas/if_sdio.c
+++ b/drivers/net/wireless/libertas/if_sdio.c
@@ -684,18 +684,40 @@ static int if_sdio_prog_firmware(struct if_sdio_card *card)
lbs_deb_enter(LBS_DEB_SDIO);
+ /*
+ * Disable interrupts
+ */
+ sdio_claim_host(card->func);
+ sdio_writeb(card->func, 0x00, IF_SDIO_H_INT_MASK, &ret);
+ sdio_release_host(card->func);
+
sdio_claim_host(card->func);
scratch = if_sdio_read_scratch(card, &ret);
sdio_release_host(card->func);
+ lbs_deb_sdio("firmware status = %#x\n", scratch);
+ lbs_deb_sdio("scratch ret = %d\n", ret);
+
if (ret)
goto out;
- lbs_deb_sdio("firmware status = %#x\n", scratch);
+ /*
+ * The manual clearly describes that FEDC is the right code to use
+ * to detect firmware presence, but for SD8686 it is not that simple.
+ * Scratch is also used to store the RX packet length, so we lose
+ * the FEDC value early on. So we use a non-zero check in order
+ * to validate firmware presence.
+ * Additionally, the SD8686 in the Gumstix always has the high scratch
+ * bit set, even when the firmware is not loaded. So we have to
+ * exclude that from the test.
+ */
if (scratch == IF_SDIO_FIRMWARE_OK) {
lbs_deb_sdio("firmware already loaded\n");
goto success;
+ } else if ((card->model == MODEL_8686) && (scratch & 0x7fff)) {
+ lbs_deb_sdio("firmware may be running\n");
+ goto success;
}
ret = lbs_get_firmware(&card->func->dev, lbs_helper_name, lbs_fw_name,
@@ -709,10 +731,14 @@ static int if_sdio_prog_firmware(struct if_sdio_card *card)
if (ret)
goto out;
+ lbs_deb_sdio("Helper firmware loaded\n");
+
ret = if_sdio_prog_real(card, mainfw);
if (ret)
goto out;
+ lbs_deb_sdio("Firmware loaded\n");
+
success:
sdio_claim_host(card->func);
sdio_set_block_size(card->func, IF_SDIO_BLOCK_SIZE);
@@ -1042,8 +1068,6 @@ static int if_sdio_probe(struct sdio_func *func,
priv->exit_deep_sleep = if_sdio_exit_deep_sleep;
priv->reset_deep_sleep_wakeup = if_sdio_reset_deep_sleep_wakeup;
- priv->fw_ready = 1;
-
sdio_claim_host(func);
/*
@@ -1064,6 +1088,8 @@ static int if_sdio_probe(struct sdio_func *func,
if (ret)
goto reclaim;
+ priv->fw_ready = 1;
+
/*
* FUNC_INIT is required for SD8688 WLAN/BT multiple functions
*/
diff --git a/drivers/net/wireless/libertas/main.c b/drivers/net/wireless/libertas/main.c
index 47ce5a6ba120..46b88b118c99 100644
--- a/drivers/net/wireless/libertas/main.c
+++ b/drivers/net/wireless/libertas/main.c
@@ -104,6 +104,7 @@ static int lbs_dev_open(struct net_device *dev)
lbs_deb_enter(LBS_DEB_NET);
spin_lock_irq(&priv->driver_lock);
+ priv->stopping = false;
if (priv->connect_status == LBS_CONNECTED)
netif_carrier_on(dev);
@@ -131,10 +132,16 @@ static int lbs_eth_stop(struct net_device *dev)
lbs_deb_enter(LBS_DEB_NET);
spin_lock_irq(&priv->driver_lock);
+ priv->stopping = true;
netif_stop_queue(dev);
spin_unlock_irq(&priv->driver_lock);
schedule_work(&priv->mcast_work);
+ cancel_delayed_work_sync(&priv->scan_work);
+ if (priv->scan_req) {
+ cfg80211_scan_done(priv->scan_req, false);
+ priv->scan_req = NULL;
+ }
lbs_deb_leave(LBS_DEB_NET);
return 0;
diff --git a/drivers/net/wireless/orinoco/orinoco_usb.c b/drivers/net/wireless/orinoco/orinoco_usb.c
index a38a7bd25f19..b9aedf18a046 100644
--- a/drivers/net/wireless/orinoco/orinoco_usb.c
+++ b/drivers/net/wireless/orinoco/orinoco_usb.c
@@ -57,7 +57,6 @@
#include <linux/fcntl.h>
#include <linux/spinlock.h>
#include <linux/list.h>
-#include <linux/smp_lock.h>
#include <linux/usb.h>
#include <linux/timer.h>
diff --git a/drivers/net/wireless/rt2x00/Kconfig b/drivers/net/wireless/rt2x00/Kconfig
index eea1ef2f502b..4396d4b9bfb9 100644
--- a/drivers/net/wireless/rt2x00/Kconfig
+++ b/drivers/net/wireless/rt2x00/Kconfig
@@ -221,9 +221,6 @@ config RT2X00_LIB_LEDS
boolean
default y if (RT2X00_LIB=y && LEDS_CLASS=y) || (RT2X00_LIB=m && LEDS_CLASS!=n)
-comment "rt2x00 leds support disabled due to modularized LEDS_CLASS and built-in rt2x00"
- depends on RT2X00_LIB=y && LEDS_CLASS=m
-
config RT2X00_LIB_DEBUGFS
bool "Ralink debugfs support"
depends on RT2X00_LIB && MAC80211_DEBUGFS
diff --git a/drivers/net/wireless/wl1251/Makefile b/drivers/net/wireless/wl1251/Makefile
index 4fe246824db3..58b4f935a3f6 100644
--- a/drivers/net/wireless/wl1251/Makefile
+++ b/drivers/net/wireless/wl1251/Makefile
@@ -1,6 +1,8 @@
wl1251-objs = main.o event.o tx.o rx.o ps.o cmd.o \
acx.o boot.o init.o debugfs.o io.o
+wl1251_spi-objs += spi.o
+wl1251_sdio-objs += sdio.o
-obj-$(CONFIG_WL1251) += wl1251.o
-obj-$(CONFIG_WL1251_SPI) += spi.o
-obj-$(CONFIG_WL1251_SDIO) += sdio.o
+obj-$(CONFIG_WL1251) += wl1251.o
+obj-$(CONFIG_WL1251_SPI) += wl1251_spi.o
+obj-$(CONFIG_WL1251_SDIO) += wl1251_sdio.o
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
index 630fb8664768..458bb57914a3 100644
--- a/drivers/net/xen-netfront.c
+++ b/drivers/net/xen-netfront.c
@@ -1610,6 +1610,8 @@ static void netback_changed(struct xenbus_device *dev,
switch (backend_state) {
case XenbusStateInitialising:
case XenbusStateInitialised:
+ case XenbusStateReconfiguring:
+ case XenbusStateReconfigured:
case XenbusStateConnected:
case XenbusStateUnknown:
case XenbusStateClosed:
diff --git a/drivers/net/xilinx_emaclite.c b/drivers/net/xilinx_emaclite.c
index f3f8be5a35fa..14f0955eca68 100644
--- a/drivers/net/xilinx_emaclite.c
+++ b/drivers/net/xilinx_emaclite.c
@@ -430,8 +430,8 @@ static u16 xemaclite_recv_data(struct net_local *drvdata, u8 *data)
}
/* Get the protocol type of the ethernet frame that arrived */
- proto_type = ((in_be32(addr + XEL_HEADER_OFFSET +
- XEL_RXBUFF_OFFSET) >> XEL_HEADER_SHIFT) &
+ proto_type = ((ntohl(in_be32(addr + XEL_HEADER_OFFSET +
+ XEL_RXBUFF_OFFSET)) >> XEL_HEADER_SHIFT) &
XEL_RPLR_LENGTH_MASK);
/* Check if received ethernet frame is a raw ethernet frame
@@ -439,9 +439,9 @@ static u16 xemaclite_recv_data(struct net_local *drvdata, u8 *data)
if (proto_type > (ETH_FRAME_LEN + ETH_FCS_LEN)) {
if (proto_type == ETH_P_IP) {
- length = ((in_be32(addr +
+ length = ((ntohl(in_be32(addr +
XEL_HEADER_IP_LENGTH_OFFSET +
- XEL_RXBUFF_OFFSET) >>
+ XEL_RXBUFF_OFFSET)) >>
XEL_HEADER_SHIFT) &
XEL_RPLR_LENGTH_MASK);
length += ETH_HLEN + ETH_FCS_LEN;
diff --git a/drivers/of/Kconfig b/drivers/of/Kconfig
index 6acbff389ab6..aa675ebd8eb3 100644
--- a/drivers/of/Kconfig
+++ b/drivers/of/Kconfig
@@ -4,7 +4,7 @@ config DTC
config OF
bool
-menu "Flattened Device Tree and Open Firmware support"
+menu "Device Tree and Open Firmware support"
depends on OF
config PROC_DEVICETREE
@@ -19,6 +19,9 @@ config OF_FLATTREE
bool
select DTC
+config OF_PROMTREE
+ bool
+
config OF_DYNAMIC
def_bool y
depends on PPC_OF
diff --git a/drivers/of/Makefile b/drivers/of/Makefile
index 0052c405463a..7888155bea08 100644
--- a/drivers/of/Makefile
+++ b/drivers/of/Makefile
@@ -1,5 +1,6 @@
obj-y = base.o
obj-$(CONFIG_OF_FLATTREE) += fdt.o
+obj-$(CONFIG_OF_PROMTREE) += pdt.o
obj-$(CONFIG_OF_ADDRESS) += address.o
obj-$(CONFIG_OF_IRQ) += irq.o
obj-$(CONFIG_OF_DEVICE) += device.o platform.o
diff --git a/drivers/of/address.c b/drivers/of/address.c
index fcadb726d4f9..3a1c7e70b192 100644
--- a/drivers/of/address.c
+++ b/drivers/of/address.c
@@ -163,7 +163,7 @@ static int of_bus_pci_translate(u32 *addr, u64 offset, int na)
const u32 *of_get_pci_address(struct device_node *dev, int bar_no, u64 *size,
unsigned int *flags)
{
- const u32 *prop;
+ const __be32 *prop;
unsigned int psize;
struct device_node *parent;
struct of_bus *bus;
diff --git a/drivers/of/base.c b/drivers/of/base.c
index aa805250de76..710b53bfac6d 100644
--- a/drivers/of/base.c
+++ b/drivers/of/base.c
@@ -33,7 +33,7 @@ DEFINE_RWLOCK(devtree_lock);
int of_n_addr_cells(struct device_node *np)
{
- const int *ip;
+ const __be32 *ip;
do {
if (np->parent)
@@ -49,7 +49,7 @@ EXPORT_SYMBOL(of_n_addr_cells);
int of_n_size_cells(struct device_node *np)
{
- const int *ip;
+ const __be32 *ip;
do {
if (np->parent)
diff --git a/drivers/of/device.c b/drivers/of/device.c
index 92de0eb74aea..45d86530799f 100644
--- a/drivers/of/device.c
+++ b/drivers/of/device.c
@@ -81,29 +81,10 @@ struct device_attribute of_platform_device_attrs[] = {
__ATTR_NULL
};
-/**
- * of_release_dev - free an of device structure when all users of it are finished.
- * @dev: device that's been disconnected
- *
- * Will be called only by the device core when all users of this of device are
- * done.
- */
-void of_release_dev(struct device *dev)
-{
- struct platform_device *ofdev;
-
- ofdev = to_platform_device(dev);
- of_node_put(ofdev->dev.of_node);
- kfree(ofdev);
-}
-EXPORT_SYMBOL(of_release_dev);
-
-int of_device_register(struct platform_device *ofdev)
+int of_device_add(struct platform_device *ofdev)
{
BUG_ON(ofdev->dev.of_node == NULL);
- device_initialize(&ofdev->dev);
-
/* name and id have to be set so that the platform bus doesn't get
* confused on matching */
ofdev->name = dev_name(&ofdev->dev);
@@ -117,6 +98,12 @@ int of_device_register(struct platform_device *ofdev)
return device_add(&ofdev->dev);
}
+
+int of_device_register(struct platform_device *pdev)
+{
+ device_initialize(&pdev->dev);
+ return of_device_add(pdev);
+}
EXPORT_SYMBOL(of_device_register);
void of_device_unregister(struct platform_device *ofdev)
diff --git a/drivers/of/fdt.c b/drivers/of/fdt.c
index 65da5aec7552..c1360e02f921 100644
--- a/drivers/of/fdt.c
+++ b/drivers/of/fdt.c
@@ -533,8 +533,6 @@ int __init early_init_dt_scan_chosen(unsigned long node, const char *uname,
strlcpy(cmd_line, CONFIG_CMDLINE, COMMAND_LINE_SIZE);
#endif /* CONFIG_CMDLINE */
- early_init_dt_scan_chosen_arch(node);
-
pr_debug("Command line is: %s\n", cmd_line);
/* break now */
diff --git a/drivers/of/irq.c b/drivers/of/irq.c
index 6e595e5a3977..75b0d3cb7676 100644
--- a/drivers/of/irq.c
+++ b/drivers/of/irq.c
@@ -24,6 +24,11 @@
#include <linux/of_irq.h>
#include <linux/string.h>
+/* For archs that don't support NO_IRQ (such as x86), provide a dummy value */
+#ifndef NO_IRQ
+#define NO_IRQ 0
+#endif
+
/**
* irq_of_parse_and_map - Parse and map an interrupt into linux virq space
* @device: Device node of the device whose interrupt is to be mapped
@@ -347,3 +352,37 @@ int of_irq_to_resource(struct device_node *dev, int index, struct resource *r)
return irq;
}
EXPORT_SYMBOL_GPL(of_irq_to_resource);
+
+/**
+ * of_irq_count - Count the number of IRQs a node uses
+ * @dev: pointer to device tree node
+ */
+int of_irq_count(struct device_node *dev)
+{
+ int nr = 0;
+
+ while (of_irq_to_resource(dev, nr, NULL) != NO_IRQ)
+ nr++;
+
+ return nr;
+}
+
+/**
+ * of_irq_to_resource_table - Fill in resource table with node's IRQ info
+ * @dev: pointer to device tree node
+ * @res: array of resources to fill in
+ * @nr_irqs: the number of IRQs (and upper bound for num of @res elements)
+ *
+ * Returns the size of the filled in table (up to @nr_irqs).
+ */
+int of_irq_to_resource_table(struct device_node *dev, struct resource *res,
+ int nr_irqs)
+{
+ int i;
+
+ for (i = 0; i < nr_irqs; i++, res++)
+ if (of_irq_to_resource(dev, i, res) == NO_IRQ)
+ break;
+
+ return i;
+}
diff --git a/drivers/of/of_i2c.c b/drivers/of/of_i2c.c
index 0a694debd226..c85d3c7421fc 100644
--- a/drivers/of/of_i2c.c
+++ b/drivers/of/of_i2c.c
@@ -12,6 +12,7 @@
*/
#include <linux/i2c.h>
+#include <linux/irq.h>
#include <linux/of.h>
#include <linux/of_i2c.h>
#include <linux/of_irq.h>
diff --git a/drivers/of/pdt.c b/drivers/of/pdt.c
new file mode 100644
index 000000000000..28295d0a50f6
--- /dev/null
+++ b/drivers/of/pdt.c
@@ -0,0 +1,276 @@
+/* pdt.c: OF PROM device tree support code.
+ *
+ * Paul Mackerras August 1996.
+ * Copyright (C) 1996-2005 Paul Mackerras.
+ *
+ * Adapted for 64bit PowerPC by Dave Engebretsen and Peter Bergner.
+ * {engebret|bergner}@us.ibm.com
+ *
+ * Adapted for sparc by David S. Miller davem@davemloft.net
+ * Adapted for multiple architectures by Andres Salomon <dilinger@queued.net>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/errno.h>
+#include <linux/mutex.h>
+#include <linux/slab.h>
+#include <linux/of.h>
+#include <linux/of_pdt.h>
+#include <asm/prom.h>
+
+static struct of_pdt_ops *of_pdt_prom_ops __initdata;
+
+void __initdata (*of_pdt_build_more)(struct device_node *dp,
+ struct device_node ***nextp);
+
+#if defined(CONFIG_SPARC)
+unsigned int of_pdt_unique_id __initdata;
+
+#define of_pdt_incr_unique_id(p) do { \
+ (p)->unique_id = of_pdt_unique_id++; \
+} while (0)
+
+static inline const char *of_pdt_node_name(struct device_node *dp)
+{
+ return dp->path_component_name;
+}
+
+#else
+
+static inline void of_pdt_incr_unique_id(void *p) { }
+static inline void irq_trans_init(struct device_node *dp) { }
+
+static inline const char *of_pdt_node_name(struct device_node *dp)
+{
+ return dp->name;
+}
+
+#endif /* !CONFIG_SPARC */
+
+static struct property * __init of_pdt_build_one_prop(phandle node, char *prev,
+ char *special_name,
+ void *special_val,
+ int special_len)
+{
+ static struct property *tmp = NULL;
+ struct property *p;
+ int err;
+
+ if (tmp) {
+ p = tmp;
+ memset(p, 0, sizeof(*p) + 32);
+ tmp = NULL;
+ } else {
+ p = prom_early_alloc(sizeof(struct property) + 32);
+ of_pdt_incr_unique_id(p);
+ }
+
+ p->name = (char *) (p + 1);
+ if (special_name) {
+ strcpy(p->name, special_name);
+ p->length = special_len;
+ p->value = prom_early_alloc(special_len);
+ memcpy(p->value, special_val, special_len);
+ } else {
+ err = of_pdt_prom_ops->nextprop(node, prev, p->name);
+ if (err) {
+ tmp = p;
+ return NULL;
+ }
+ p->length = of_pdt_prom_ops->getproplen(node, p->name);
+ if (p->length <= 0) {
+ p->length = 0;
+ } else {
+ int len;
+
+ p->value = prom_early_alloc(p->length + 1);
+ len = of_pdt_prom_ops->getproperty(node, p->name,
+ p->value, p->length);
+ if (len <= 0)
+ p->length = 0;
+ ((unsigned char *)p->value)[p->length] = '\0';
+ }
+ }
+ return p;
+}
+
+static struct property * __init of_pdt_build_prop_list(phandle node)
+{
+ struct property *head, *tail;
+
+ head = tail = of_pdt_build_one_prop(node, NULL,
+ ".node", &node, sizeof(node));
+
+ tail->next = of_pdt_build_one_prop(node, NULL, NULL, NULL, 0);
+ tail = tail->next;
+ while(tail) {
+ tail->next = of_pdt_build_one_prop(node, tail->name,
+ NULL, NULL, 0);
+ tail = tail->next;
+ }
+
+ return head;
+}
+
+static char * __init of_pdt_get_one_property(phandle node, const char *name)
+{
+ char *buf = "<NULL>";
+ int len;
+
+ len = of_pdt_prom_ops->getproplen(node, name);
+ if (len > 0) {
+ buf = prom_early_alloc(len);
+ len = of_pdt_prom_ops->getproperty(node, name, buf, len);
+ }
+
+ return buf;
+}
+
+static char * __init of_pdt_try_pkg2path(phandle node)
+{
+ char *res, *buf = NULL;
+ int len;
+
+ if (!of_pdt_prom_ops->pkg2path)
+ return NULL;
+
+ if (of_pdt_prom_ops->pkg2path(node, buf, 0, &len))
+ return NULL;
+ buf = prom_early_alloc(len + 1);
+ if (of_pdt_prom_ops->pkg2path(node, buf, len, &len)) {
+ pr_err("%s: package-to-path failed\n", __func__);
+ return NULL;
+ }
+
+ res = strrchr(buf, '/');
+ if (!res) {
+ pr_err("%s: couldn't find / in %s\n", __func__, buf);
+ return NULL;
+ }
+ return res+1;
+}
+
+/*
+ * When fetching the node's name, first try using package-to-path; if
+ * that fails (either because the arch hasn't supplied a PROM callback,
+ * or some other random failure), fall back to just looking at the node's
+ * 'name' property.
+ */
+static char * __init of_pdt_build_name(phandle node)
+{
+ char *buf;
+
+ buf = of_pdt_try_pkg2path(node);
+ if (!buf)
+ buf = of_pdt_get_one_property(node, "name");
+
+ return buf;
+}
+
+static struct device_node * __init of_pdt_create_node(phandle node,
+ struct device_node *parent)
+{
+ struct device_node *dp;
+
+ if (!node)
+ return NULL;
+
+ dp = prom_early_alloc(sizeof(*dp));
+ of_pdt_incr_unique_id(dp);
+ dp->parent = parent;
+
+ kref_init(&dp->kref);
+
+ dp->name = of_pdt_build_name(node);
+ dp->type = of_pdt_get_one_property(node, "device_type");
+ dp->phandle = node;
+
+ dp->properties = of_pdt_build_prop_list(node);
+
+ irq_trans_init(dp);
+
+ return dp;
+}
+
+static char * __init of_pdt_build_full_name(struct device_node *dp)
+{
+ int len, ourlen, plen;
+ char *n;
+
+ plen = strlen(dp->parent->full_name);
+ ourlen = strlen(of_pdt_node_name(dp));
+ len = ourlen + plen + 2;
+
+ n = prom_early_alloc(len);
+ strcpy(n, dp->parent->full_name);
+ if (!of_node_is_root(dp->parent)) {
+ strcpy(n + plen, "/");
+ plen++;
+ }
+ strcpy(n + plen, of_pdt_node_name(dp));
+
+ return n;
+}
+
+static struct device_node * __init of_pdt_build_tree(struct device_node *parent,
+ phandle node,
+ struct device_node ***nextp)
+{
+ struct device_node *ret = NULL, *prev_sibling = NULL;
+ struct device_node *dp;
+
+ while (1) {
+ dp = of_pdt_create_node(node, parent);
+ if (!dp)
+ break;
+
+ if (prev_sibling)
+ prev_sibling->sibling = dp;
+
+ if (!ret)
+ ret = dp;
+ prev_sibling = dp;
+
+ *(*nextp) = dp;
+ *nextp = &dp->allnext;
+
+#if defined(CONFIG_SPARC)
+ dp->path_component_name = build_path_component(dp);
+#endif
+ dp->full_name = of_pdt_build_full_name(dp);
+
+ dp->child = of_pdt_build_tree(dp,
+ of_pdt_prom_ops->getchild(node), nextp);
+
+ if (of_pdt_build_more)
+ of_pdt_build_more(dp, nextp);
+
+ node = of_pdt_prom_ops->getsibling(node);
+ }
+
+ return ret;
+}
+
+void __init of_pdt_build_devicetree(phandle root_node, struct of_pdt_ops *ops)
+{
+ struct device_node **nextp;
+
+ BUG_ON(!ops);
+ of_pdt_prom_ops = ops;
+
+ allnodes = of_pdt_create_node(root_node, NULL);
+#if defined(CONFIG_SPARC)
+ allnodes->path_component_name = "";
+#endif
+ allnodes->full_name = "/";
+
+ nextp = &allnodes->allnext;
+ allnodes->child = of_pdt_build_tree(allnodes,
+ of_pdt_prom_ops->getchild(allnodes->phandle), &nextp);
+}
diff --git a/drivers/of/platform.c b/drivers/of/platform.c
index bb72223c22ae..5b4a07f1220e 100644
--- a/drivers/of/platform.c
+++ b/drivers/of/platform.c
@@ -584,34 +584,33 @@ struct platform_device *of_device_alloc(struct device_node *np,
struct device *parent)
{
struct platform_device *dev;
- int rc, i, num_reg = 0, num_irq = 0;
+ int rc, i, num_reg = 0, num_irq;
struct resource *res, temp_res;
- /* First count how many resources are needed */
- while (of_address_to_resource(np, num_reg, &temp_res) == 0)
- num_reg++;
- while (of_irq_to_resource(np, num_irq, &temp_res) != NO_IRQ)
- num_irq++;
-
- /* Allocate memory for both the struct device and the resource table */
- dev = kzalloc(sizeof(*dev) + (sizeof(*res) * (num_reg + num_irq)),
- GFP_KERNEL);
+ dev = platform_device_alloc("", -1);
if (!dev)
return NULL;
- res = (struct resource *) &dev[1];
+
+ /* count the io and irq resources */
+ while (of_address_to_resource(np, num_reg, &temp_res) == 0)
+ num_reg++;
+ num_irq = of_irq_count(np);
/* Populate the resource table */
if (num_irq || num_reg) {
+ res = kzalloc(sizeof(*res) * (num_irq + num_reg), GFP_KERNEL);
+ if (!res) {
+ platform_device_put(dev);
+ return NULL;
+ }
+
dev->num_resources = num_reg + num_irq;
dev->resource = res;
for (i = 0; i < num_reg; i++, res++) {
rc = of_address_to_resource(np, i, res);
WARN_ON(rc);
}
- for (i = 0; i < num_irq; i++, res++) {
- rc = of_irq_to_resource(np, i, res);
- WARN_ON(rc == NO_IRQ);
- }
+ WARN_ON(of_irq_to_resource_table(np, res, num_irq) != num_irq);
}
dev->dev.of_node = of_node_get(np);
@@ -619,7 +618,6 @@ struct platform_device *of_device_alloc(struct device_node *np,
dev->dev.dma_mask = &dev->archdata.dma_mask;
#endif
dev->dev.parent = parent;
- dev->dev.release = of_release_dev;
if (bus_id)
dev_set_name(&dev->dev, "%s", bus_id);
@@ -657,8 +655,8 @@ struct platform_device *of_platform_device_create(struct device_node *np,
* to do such, possibly using a device notifier
*/
- if (of_device_register(dev) != 0) {
- of_device_free(dev);
+ if (of_device_add(dev) != 0) {
+ platform_device_put(dev);
return NULL;
}
diff --git a/drivers/oprofile/buffer_sync.c b/drivers/oprofile/buffer_sync.c
index b7e755f4178a..a3984f4ef192 100644
--- a/drivers/oprofile/buffer_sync.c
+++ b/drivers/oprofile/buffer_sync.c
@@ -190,7 +190,7 @@ void sync_stop(void)
profile_event_unregister(PROFILE_TASK_EXIT, &task_exit_nb);
task_handoff_unregister(&task_free_nb);
mutex_unlock(&buffer_mutex);
- flush_scheduled_work();
+ flush_cpu_work();
/* make sure we don't leak task structs */
process_task_mortuary();
diff --git a/drivers/oprofile/cpu_buffer.c b/drivers/oprofile/cpu_buffer.c
index f179ac2ea801..59f55441e075 100644
--- a/drivers/oprofile/cpu_buffer.c
+++ b/drivers/oprofile/cpu_buffer.c
@@ -111,14 +111,18 @@ void start_cpu_work(void)
void end_cpu_work(void)
{
- int i;
-
work_enabled = 0;
+}
+
+void flush_cpu_work(void)
+{
+ int i;
for_each_online_cpu(i) {
struct oprofile_cpu_buffer *b = &per_cpu(op_cpu_buffer, i);
- cancel_delayed_work(&b->work);
+ /* these works are per-cpu, no need for flush_sync */
+ flush_delayed_work(&b->work);
}
}
diff --git a/drivers/oprofile/cpu_buffer.h b/drivers/oprofile/cpu_buffer.h
index 68ea16ab645f..e1d097e250ae 100644
--- a/drivers/oprofile/cpu_buffer.h
+++ b/drivers/oprofile/cpu_buffer.h
@@ -25,6 +25,7 @@ void free_cpu_buffers(void);
void start_cpu_work(void);
void end_cpu_work(void);
+void flush_cpu_work(void);
/* CPU buffer is composed of such entries (which are
* also used for context switch notes)
diff --git a/drivers/oprofile/oprofilefs.c b/drivers/oprofile/oprofilefs.c
index 95f711b251ad..e9ff6f7770be 100644
--- a/drivers/oprofile/oprofilefs.c
+++ b/drivers/oprofile/oprofilefs.c
@@ -28,6 +28,7 @@ static struct inode *oprofilefs_get_inode(struct super_block *sb, int mode)
struct inode *inode = new_inode(sb);
if (inode) {
+ inode->i_ino = get_next_ino();
inode->i_mode = mode;
inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
}
@@ -258,17 +259,17 @@ static int oprofilefs_fill_super(struct super_block *sb, void *data, int silent)
}
-static int oprofilefs_get_sb(struct file_system_type *fs_type,
- int flags, const char *dev_name, void *data, struct vfsmount *mnt)
+static struct dentry *oprofilefs_mount(struct file_system_type *fs_type,
+ int flags, const char *dev_name, void *data)
{
- return get_sb_single(fs_type, flags, data, oprofilefs_fill_super, mnt);
+ return mount_single(fs_type, flags, data, oprofilefs_fill_super);
}
static struct file_system_type oprofilefs_type = {
.owner = THIS_MODULE,
.name = "oprofilefs",
- .get_sb = oprofilefs_get_sb,
+ .mount = oprofilefs_mount,
.kill_sb = kill_litter_super,
};
diff --git a/drivers/oprofile/timer_int.c b/drivers/oprofile/timer_int.c
index dc0ae4d14dff..010725117dbb 100644
--- a/drivers/oprofile/timer_int.c
+++ b/drivers/oprofile/timer_int.c
@@ -21,6 +21,7 @@
#include "oprof.h"
static DEFINE_PER_CPU(struct hrtimer, oprofile_hrtimer);
+static int ctr_running;
static enum hrtimer_restart oprofile_hrtimer_notify(struct hrtimer *hrtimer)
{
@@ -33,6 +34,9 @@ static void __oprofile_hrtimer_start(void *unused)
{
struct hrtimer *hrtimer = &__get_cpu_var(oprofile_hrtimer);
+ if (!ctr_running)
+ return;
+
hrtimer_init(hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
hrtimer->function = oprofile_hrtimer_notify;
@@ -42,7 +46,10 @@ static void __oprofile_hrtimer_start(void *unused)
static int oprofile_hrtimer_start(void)
{
+ get_online_cpus();
+ ctr_running = 1;
on_each_cpu(__oprofile_hrtimer_start, NULL, 1);
+ put_online_cpus();
return 0;
}
@@ -50,6 +57,9 @@ static void __oprofile_hrtimer_stop(int cpu)
{
struct hrtimer *hrtimer = &per_cpu(oprofile_hrtimer, cpu);
+ if (!ctr_running)
+ return;
+
hrtimer_cancel(hrtimer);
}
@@ -57,8 +67,11 @@ static void oprofile_hrtimer_stop(void)
{
int cpu;
+ get_online_cpus();
for_each_online_cpu(cpu)
__oprofile_hrtimer_stop(cpu);
+ ctr_running = 0;
+ put_online_cpus();
}
static int __cpuinit oprofile_cpu_notify(struct notifier_block *self,
diff --git a/drivers/parisc/dino.c b/drivers/parisc/dino.c
index c542c7bb7454..9383063d2b16 100644
--- a/drivers/parisc/dino.c
+++ b/drivers/parisc/dino.c
@@ -296,10 +296,9 @@ static struct pci_port_ops dino_port_ops = {
.outl = dino_out32
};
-static void dino_disable_irq(unsigned int irq)
+static void dino_mask_irq(unsigned int irq)
{
- struct irq_desc *desc = irq_to_desc(irq);
- struct dino_device *dino_dev = desc->chip_data;
+ struct dino_device *dino_dev = get_irq_chip_data(irq);
int local_irq = gsc_find_local_irq(irq, dino_dev->global_irq, DINO_LOCAL_IRQS);
DBG(KERN_WARNING "%s(0x%p, %d)\n", __func__, dino_dev, irq);
@@ -309,10 +308,9 @@ static void dino_disable_irq(unsigned int irq)
__raw_writel(dino_dev->imr, dino_dev->hba.base_addr+DINO_IMR);
}
-static void dino_enable_irq(unsigned int irq)
+static void dino_unmask_irq(unsigned int irq)
{
- struct irq_desc *desc = irq_to_desc(irq);
- struct dino_device *dino_dev = desc->chip_data;
+ struct dino_device *dino_dev = get_irq_chip_data(irq);
int local_irq = gsc_find_local_irq(irq, dino_dev->global_irq, DINO_LOCAL_IRQS);
u32 tmp;
@@ -347,20 +345,10 @@ static void dino_enable_irq(unsigned int irq)
}
}
-static unsigned int dino_startup_irq(unsigned int irq)
-{
- dino_enable_irq(irq);
- return 0;
-}
-
static struct irq_chip dino_interrupt_type = {
- .name = "GSC-PCI",
- .startup = dino_startup_irq,
- .shutdown = dino_disable_irq,
- .enable = dino_enable_irq,
- .disable = dino_disable_irq,
- .ack = no_ack_irq,
- .end = no_end_irq,
+ .name = "GSC-PCI",
+ .unmask = dino_unmask_irq,
+ .mask = dino_mask_irq,
};
@@ -391,7 +379,7 @@ ilr_again:
int irq = dino_dev->global_irq[local_irq];
DBG(KERN_DEBUG "%s(%d, %p) mask 0x%x\n",
__func__, irq, intr_dev, mask);
- __do_IRQ(irq);
+ generic_handle_irq(irq);
mask &= ~(1 << local_irq);
} while (mask);
diff --git a/drivers/parisc/eisa.c b/drivers/parisc/eisa.c
index 46f503fb7fc5..e860038b0b84 100644
--- a/drivers/parisc/eisa.c
+++ b/drivers/parisc/eisa.c
@@ -144,7 +144,7 @@ static unsigned int eisa_irq_level __read_mostly; /* default to edge triggered *
/* called by free irq */
-static void eisa_disable_irq(unsigned int irq)
+static void eisa_mask_irq(unsigned int irq)
{
unsigned long flags;
@@ -164,7 +164,7 @@ static void eisa_disable_irq(unsigned int irq)
}
/* called by request irq */
-static void eisa_enable_irq(unsigned int irq)
+static void eisa_unmask_irq(unsigned int irq)
{
unsigned long flags;
EISA_DBG("enable irq %d\n", irq);
@@ -182,20 +182,10 @@ static void eisa_enable_irq(unsigned int irq)
EISA_DBG("pic1 mask %02x\n", eisa_in8(0xa1));
}
-static unsigned int eisa_startup_irq(unsigned int irq)
-{
- eisa_enable_irq(irq);
- return 0;
-}
-
static struct irq_chip eisa_interrupt_type = {
- .name = "EISA",
- .startup = eisa_startup_irq,
- .shutdown = eisa_disable_irq,
- .enable = eisa_enable_irq,
- .disable = eisa_disable_irq,
- .ack = no_ack_irq,
- .end = no_end_irq,
+ .name = "EISA",
+ .unmask = eisa_unmask_irq,
+ .mask = eisa_mask_irq,
};
static irqreturn_t eisa_irq(int wax_irq, void *intr_dev)
@@ -233,7 +223,7 @@ static irqreturn_t eisa_irq(int wax_irq, void *intr_dev)
}
spin_unlock_irqrestore(&eisa_irq_lock, flags);
- __do_IRQ(irq);
+ generic_handle_irq(irq);
spin_lock_irqsave(&eisa_irq_lock, flags);
/* unmask */
@@ -346,10 +336,10 @@ static int __init eisa_probe(struct parisc_device *dev)
}
/* Reserve IRQ2 */
- irq_to_desc(2)->action = &irq2_action;
-
+ setup_irq(2, &irq2_action);
for (i = 0; i < 16; i++) {
- irq_to_desc(i)->chip = &eisa_interrupt_type;
+ set_irq_chip_and_handler(i, &eisa_interrupt_type,
+ handle_simple_irq);
}
EISA_bus = 1;
diff --git a/drivers/parisc/eisa_eeprom.c b/drivers/parisc/eisa_eeprom.c
index cce00ed81f37..af212c6a6158 100644
--- a/drivers/parisc/eisa_eeprom.c
+++ b/drivers/parisc/eisa_eeprom.c
@@ -24,7 +24,6 @@
#include <linux/kernel.h>
#include <linux/miscdevice.h>
#include <linux/slab.h>
-#include <linux/smp_lock.h>
#include <linux/fs.h>
#include <asm/io.h>
#include <asm/uaccess.h>
diff --git a/drivers/parisc/gsc.c b/drivers/parisc/gsc.c
index 20a1bce1a031..772b1939ac21 100644
--- a/drivers/parisc/gsc.c
+++ b/drivers/parisc/gsc.c
@@ -86,7 +86,7 @@ irqreturn_t gsc_asic_intr(int gsc_asic_irq, void *dev)
do {
int local_irq = __ffs(irr);
unsigned int irq = gsc_asic->global_irq[local_irq];
- __do_IRQ(irq);
+ generic_handle_irq(irq);
irr &= ~(1 << local_irq);
} while (irr);
@@ -105,10 +105,9 @@ int gsc_find_local_irq(unsigned int irq, int *global_irqs, int limit)
return NO_IRQ;
}
-static void gsc_asic_disable_irq(unsigned int irq)
+static void gsc_asic_mask_irq(unsigned int irq)
{
- struct irq_desc *desc = irq_to_desc(irq);
- struct gsc_asic *irq_dev = desc->chip_data;
+ struct gsc_asic *irq_dev = get_irq_chip_data(irq);
int local_irq = gsc_find_local_irq(irq, irq_dev->global_irq, 32);
u32 imr;
@@ -121,10 +120,9 @@ static void gsc_asic_disable_irq(unsigned int irq)
gsc_writel(imr, irq_dev->hpa + OFFSET_IMR);
}
-static void gsc_asic_enable_irq(unsigned int irq)
+static void gsc_asic_unmask_irq(unsigned int irq)
{
- struct irq_desc *desc = irq_to_desc(irq);
- struct gsc_asic *irq_dev = desc->chip_data;
+ struct gsc_asic *irq_dev = get_irq_chip_data(irq);
int local_irq = gsc_find_local_irq(irq, irq_dev->global_irq, 32);
u32 imr;
@@ -141,33 +139,22 @@ static void gsc_asic_enable_irq(unsigned int irq)
*/
}
-static unsigned int gsc_asic_startup_irq(unsigned int irq)
-{
- gsc_asic_enable_irq(irq);
- return 0;
-}
-
static struct irq_chip gsc_asic_interrupt_type = {
- .name = "GSC-ASIC",
- .startup = gsc_asic_startup_irq,
- .shutdown = gsc_asic_disable_irq,
- .enable = gsc_asic_enable_irq,
- .disable = gsc_asic_disable_irq,
- .ack = no_ack_irq,
- .end = no_end_irq,
+ .name = "GSC-ASIC",
+ .unmask = gsc_asic_unmask_irq,
+ .mask = gsc_asic_mask_irq,
};
int gsc_assign_irq(struct irq_chip *type, void *data)
{
static int irq = GSC_IRQ_BASE;
- struct irq_desc *desc;
if (irq > GSC_IRQ_MAX)
return NO_IRQ;
- desc = irq_to_desc(irq);
- desc->chip = type;
- desc->chip_data = data;
+ set_irq_chip_and_handler(irq, type, handle_simple_irq);
+ set_irq_chip_data(irq, data);
+
return irq++;
}
diff --git a/drivers/parisc/iosapic.c b/drivers/parisc/iosapic.c
index c76836727cae..0327894bf235 100644
--- a/drivers/parisc/iosapic.c
+++ b/drivers/parisc/iosapic.c
@@ -615,17 +615,10 @@ iosapic_set_irt_data( struct vector_info *vi, u32 *dp0, u32 *dp1)
}
-static struct vector_info *iosapic_get_vector(unsigned int irq)
-{
- struct irq_desc *desc = irq_to_desc(irq);
-
- return desc->chip_data;
-}
-
-static void iosapic_disable_irq(unsigned int irq)
+static void iosapic_mask_irq(unsigned int irq)
{
unsigned long flags;
- struct vector_info *vi = iosapic_get_vector(irq);
+ struct vector_info *vi = get_irq_chip_data(irq);
u32 d0, d1;
spin_lock_irqsave(&iosapic_lock, flags);
@@ -635,9 +628,9 @@ static void iosapic_disable_irq(unsigned int irq)
spin_unlock_irqrestore(&iosapic_lock, flags);
}
-static void iosapic_enable_irq(unsigned int irq)
+static void iosapic_unmask_irq(unsigned int irq)
{
- struct vector_info *vi = iosapic_get_vector(irq);
+ struct vector_info *vi = get_irq_chip_data(irq);
u32 d0, d1;
/* data is initialized by fixup_irq */
@@ -678,34 +671,19 @@ printk("\n");
iosapic_eoi(vi->eoi_addr, vi->eoi_data);
}
-/*
- * PARISC only supports PCI devices below I/O SAPIC.
- * PCI only supports level triggered in order to share IRQ lines.
- * ergo I/O SAPIC must always issue EOI on parisc.
- *
- * i386/ia64 support ISA devices and have to deal with
- * edge-triggered interrupts too.
- */
-static void iosapic_end_irq(unsigned int irq)
+static void iosapic_eoi_irq(unsigned int irq)
{
- struct vector_info *vi = iosapic_get_vector(irq);
- DBG(KERN_DEBUG "end_irq(%d): eoi(%p, 0x%x)\n", irq,
- vi->eoi_addr, vi->eoi_data);
- iosapic_eoi(vi->eoi_addr, vi->eoi_data);
- cpu_end_irq(irq);
-}
+ struct vector_info *vi = get_irq_chip_data(irq);
-static unsigned int iosapic_startup_irq(unsigned int irq)
-{
- iosapic_enable_irq(irq);
- return 0;
+ iosapic_eoi(vi->eoi_addr, vi->eoi_data);
+ cpu_eoi_irq(irq);
}
#ifdef CONFIG_SMP
static int iosapic_set_affinity_irq(unsigned int irq,
const struct cpumask *dest)
{
- struct vector_info *vi = iosapic_get_vector(irq);
+ struct vector_info *vi = get_irq_chip_data(irq);
u32 d0, d1, dummy_d0;
unsigned long flags;
int dest_cpu;
@@ -730,13 +708,11 @@ static int iosapic_set_affinity_irq(unsigned int irq,
#endif
static struct irq_chip iosapic_interrupt_type = {
- .name = "IO-SAPIC-level",
- .startup = iosapic_startup_irq,
- .shutdown = iosapic_disable_irq,
- .enable = iosapic_enable_irq,
- .disable = iosapic_disable_irq,
- .ack = cpu_ack_irq,
- .end = iosapic_end_irq,
+ .name = "IO-SAPIC-level",
+ .unmask = iosapic_unmask_irq,
+ .mask = iosapic_mask_irq,
+ .ack = cpu_ack_irq,
+ .eoi = iosapic_eoi_irq,
#ifdef CONFIG_SMP
.set_affinity = iosapic_set_affinity_irq,
#endif
@@ -891,8 +867,8 @@ void *iosapic_register(unsigned long hpa)
isi->isi_version = iosapic_rd_version(isi);
isi->isi_num_vectors = IOSAPIC_IRDT_MAX_ENTRY(isi->isi_version) + 1;
- vip = isi->isi_vector = (struct vector_info *)
- kzalloc(sizeof(struct vector_info) * isi->isi_num_vectors, GFP_KERNEL);
+ vip = isi->isi_vector = kcalloc(isi->isi_num_vectors,
+ sizeof(struct vector_info), GFP_KERNEL);
if (vip == NULL) {
kfree(isi);
return NULL;
diff --git a/drivers/parisc/led.c b/drivers/parisc/led.c
index c5c14dd3734f..f2f501e5b6a0 100644
--- a/drivers/parisc/led.c
+++ b/drivers/parisc/led.c
@@ -64,6 +64,7 @@ static unsigned int led_diskio __read_mostly = 1;
static unsigned int led_lanrxtx __read_mostly = 1;
static char lcd_text[32] __read_mostly;
static char lcd_text_default[32] __read_mostly;
+static int lcd_no_led_support __read_mostly = 0; /* KittyHawk doesn't support LED on its LCD */
static struct workqueue_struct *led_wq;
@@ -115,7 +116,7 @@ lcd_info __attribute__((aligned(8))) __read_mostly =
.lcd_width = 16,
.lcd_cmd_reg_addr = KITTYHAWK_LCD_CMD,
.lcd_data_reg_addr = KITTYHAWK_LCD_DATA,
- .min_cmd_delay = 40,
+ .min_cmd_delay = 80,
.reset_cmd1 = 0x80,
.reset_cmd2 = 0xc0,
};
@@ -135,6 +136,9 @@ static int start_task(void)
/* Display the default text now */
if (led_type == LED_HASLCD) lcd_print( lcd_text_default );
+ /* KittyHawk has no LED support on its LCD */
+ if (lcd_no_led_support) return 0;
+
/* Create the work queue and queue the LED task */
led_wq = create_singlethread_workqueue("led_wq");
queue_delayed_work(led_wq, &led_task, 0);
@@ -248,9 +252,13 @@ static int __init led_create_procfs(void)
proc_pdc_root = proc_mkdir("pdc", 0);
if (!proc_pdc_root) return -1;
- ent = proc_create_data("led", S_IRUGO|S_IWUSR, proc_pdc_root,
- &led_proc_fops, (void *)LED_NOLCD); /* LED */
- if (!ent) return -1;
+
+ if (!lcd_no_led_support)
+ {
+ ent = proc_create_data("led", S_IRUGO|S_IWUSR, proc_pdc_root,
+ &led_proc_fops, (void *)LED_NOLCD); /* LED */
+ if (!ent) return -1;
+ }
if (led_type == LED_HASLCD)
{
@@ -346,8 +354,8 @@ static __inline__ int led_get_net_activity(void)
#ifndef CONFIG_NET
return 0;
#else
- static unsigned long rx_total_last, tx_total_last;
- unsigned long rx_total, tx_total;
+ static u64 rx_total_last, tx_total_last;
+ u64 rx_total, tx_total;
struct net_device *dev;
int retval;
@@ -356,7 +364,7 @@ static __inline__ int led_get_net_activity(void)
/* we are running as a workqueue task, so we can use an RCU lookup */
rcu_read_lock();
for_each_netdev_rcu(&init_net, dev) {
- const struct net_device_stats *stats;
+ const struct rtnl_link_stats64 *stats;
struct rtnl_link_stats64 temp;
struct in_device *in_dev = __in_dev_get_rcu(dev);
if (!in_dev || !in_dev->ifa_list)
@@ -692,6 +700,7 @@ int __init led_init(void)
case 0x58B: /* KittyHawk DC2 100 (K200) */
printk(KERN_INFO "%s: KittyHawk-Machine (hversion 0x%x) found, "
"LED detection skipped.\n", __FILE__, CPU_HVERSION);
+ lcd_no_led_support = 1;
goto found; /* use the preinitialized values of lcd_info */
}
diff --git a/drivers/parisc/superio.c b/drivers/parisc/superio.c
index f7806d81f1e0..28241532c0fd 100644
--- a/drivers/parisc/superio.c
+++ b/drivers/parisc/superio.c
@@ -139,7 +139,7 @@ superio_interrupt(int parent_irq, void *devp)
}
/* Call the appropriate device's interrupt */
- __do_IRQ(local_irq);
+ generic_handle_irq(local_irq);
/* set EOI - forces a new interrupt if a lower priority device
* still needs service.
@@ -286,7 +286,7 @@ superio_init(struct pci_dev *pcidev)
}
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NS, PCI_DEVICE_ID_NS_87560_LIO, superio_init);
-static void superio_disable_irq(unsigned int irq)
+static void superio_mask_irq(unsigned int irq)
{
u8 r8;
@@ -303,7 +303,7 @@ static void superio_disable_irq(unsigned int irq)
outb (r8,IC_PIC1+1);
}
-static void superio_enable_irq(unsigned int irq)
+static void superio_unmask_irq(unsigned int irq)
{
u8 r8;
@@ -319,20 +319,10 @@ static void superio_enable_irq(unsigned int irq)
outb (r8,IC_PIC1+1);
}
-static unsigned int superio_startup_irq(unsigned int irq)
-{
- superio_enable_irq(irq);
- return 0;
-}
-
static struct irq_chip superio_interrupt_type = {
- .name = SUPERIO,
- .startup = superio_startup_irq,
- .shutdown = superio_disable_irq,
- .enable = superio_enable_irq,
- .disable = superio_disable_irq,
- .ack = no_ack_irq,
- .end = no_end_irq,
+ .name = SUPERIO,
+ .unmask = superio_unmask_irq,
+ .mask = superio_mask_irq,
};
#ifdef DEBUG_SUPERIO_INIT
@@ -363,9 +353,7 @@ int superio_fixup_irq(struct pci_dev *pcidev)
#endif
for (i = 0; i < 16; i++) {
- struct irq_desc *desc = irq_to_desc(i);
-
- desc->chip = &superio_interrupt_type;
+ set_irq_chip_and_handler(i, &superio_interrupt_type, handle_simple_irq);
}
/*
diff --git a/drivers/pci/Kconfig b/drivers/pci/Kconfig
index 34ef70d562b2..5b1630e4e9e3 100644
--- a/drivers/pci/Kconfig
+++ b/drivers/pci/Kconfig
@@ -40,6 +40,27 @@ config PCI_STUB
When in doubt, say N.
+config XEN_PCIDEV_FRONTEND
+ tristate "Xen PCI Frontend"
+ depends on PCI && X86 && XEN
+ select HOTPLUG
+ select PCI_XEN
+ default y
+ help
+ The PCI device frontend driver allows the kernel to import arbitrary
+ PCI devices from a PCI backend to support PCI driver domains.
+
+config XEN_PCIDEV_FE_DEBUG
+ bool "Xen PCI Frontend debugging"
+ depends on XEN_PCIDEV_FRONTEND && PCI_DEBUG
+ help
+ Say Y here if you want the Xen PCI frontend to produce a bunch of debug
+ messages to the system log. Select this if you are having a
+ problem with Xen PCI frontend support and want to see more of what is
+ going on.
+
+ When in doubt, say N.
+
config HT_IRQ
bool "Interrupts on hypertransport devices"
default y
diff --git a/drivers/pci/Makefile b/drivers/pci/Makefile
index dc1aa0922868..98e6fdf34d30 100644
--- a/drivers/pci/Makefile
+++ b/drivers/pci/Makefile
@@ -49,6 +49,7 @@ obj-$(CONFIG_MIPS) += setup-bus.o setup-irq.o
obj-$(CONFIG_X86_VISWS) += setup-irq.o
obj-$(CONFIG_MN10300) += setup-bus.o
obj-$(CONFIG_MICROBLAZE) += setup-bus.o
+obj-$(CONFIG_TILE) += setup-bus.o setup-irq.o
#
# ACPI Related PCI FW Functions
@@ -65,6 +66,6 @@ obj-$(CONFIG_PCI_SYSCALL) += syscall.o
obj-$(CONFIG_PCI_STUB) += pci-stub.o
-ifeq ($(CONFIG_PCI_DEBUG),y)
-EXTRA_CFLAGS += -DDEBUG
-endif
+obj-$(CONFIG_XEN_PCIDEV_FRONTEND) += xen-pcifront.o
+
+ccflags-$(CONFIG_PCI_DEBUG) := -DDEBUG
diff --git a/drivers/pci/bus.c b/drivers/pci/bus.c
index 7f0af0e9b826..003170ea2e39 100644
--- a/drivers/pci/bus.c
+++ b/drivers/pci/bus.c
@@ -64,6 +64,77 @@ void pci_bus_remove_resources(struct pci_bus *bus)
}
}
+static bool pci_bus_resource_better(struct resource *res1, bool pos1,
+ struct resource *res2, bool pos2)
+{
+ /* If exactly one is positive decode, always prefer that one */
+ if (pos1 != pos2)
+ return pos1 ? true : false;
+
+ /* Prefer the one that contains the highest address */
+ if (res1->end != res2->end)
+ return (res1->end > res2->end) ? true : false;
+
+ /* Otherwise, prefer the one with highest "center of gravity" */
+ if (res1->start != res2->start)
+ return (res1->start > res2->start) ? true : false;
+
+ /* Otherwise, choose one arbitrarily (but consistently) */
+ return (res1 > res2) ? true : false;
+}
+
+static bool pci_bus_resource_positive(struct pci_bus *bus, struct resource *res)
+{
+ struct pci_bus_resource *bus_res;
+
+ /*
+ * This relies on the fact that pci_bus.resource[] refers to P2P or
+ * CardBus bridge base/limit registers, which are always positively
+ * decoded. The pci_bus.resources list contains host bridge or
+ * subtractively decoded resources.
+ */
+ list_for_each_entry(bus_res, &bus->resources, list) {
+ if (bus_res->res == res)
+ return (bus_res->flags & PCI_SUBTRACTIVE_DECODE) ?
+ false : true;
+ }
+ return true;
+}
+
+/*
+ * Find the next-best bus resource after the cursor "res". If the cursor is
+ * NULL, return the best resource. "Best" means that we prefer positive
+ * decode regions over subtractive decode, then those at higher addresses.
+ */
+static struct resource *pci_bus_find_resource_prev(struct pci_bus *bus,
+ unsigned int type,
+ struct resource *res)
+{
+ bool res_pos, r_pos, prev_pos = false;
+ struct resource *r, *prev = NULL;
+ int i;
+
+ res_pos = pci_bus_resource_positive(bus, res);
+ pci_bus_for_each_resource(bus, r, i) {
+ if (!r)
+ continue;
+
+ if ((r->flags & IORESOURCE_TYPE_BITS) != type)
+ continue;
+
+ r_pos = pci_bus_resource_positive(bus, r);
+ if (!res || pci_bus_resource_better(res, res_pos, r, r_pos)) {
+ if (!prev || pci_bus_resource_better(r, r_pos,
+ prev, prev_pos)) {
+ prev = r;
+ prev_pos = r_pos;
+ }
+ }
+ }
+
+ return prev;
+}
+
/**
* pci_bus_alloc_resource - allocate a resource from a parent bus
* @bus: PCI bus
@@ -89,9 +160,10 @@ pci_bus_alloc_resource(struct pci_bus *bus, struct resource *res,
resource_size_t),
void *alignf_data)
{
- int i, ret = -ENOMEM;
+ int ret = -ENOMEM;
struct resource *r;
resource_size_t max = -1;
+ unsigned int type = res->flags & IORESOURCE_TYPE_BITS;
type_mask |= IORESOURCE_IO | IORESOURCE_MEM;
@@ -99,10 +171,9 @@ pci_bus_alloc_resource(struct pci_bus *bus, struct resource *res,
if (!(res->flags & IORESOURCE_MEM_64))
max = PCIBIOS_MAX_MEM_32;
- pci_bus_for_each_resource(bus, r, i) {
- if (!r)
- continue;
-
+ /* Look for space at highest addresses first */
+ r = pci_bus_find_resource_prev(bus, type, NULL);
+ for ( ; r; r = pci_bus_find_resource_prev(bus, type, r)) {
/* type_mask must match */
if ((res->flags ^ r->flags) & type_mask)
continue;
@@ -299,6 +370,7 @@ void pci_walk_bus(struct pci_bus *top, int (*cb)(struct pci_dev *, void *),
}
up_read(&pci_bus_sem);
}
+EXPORT_SYMBOL_GPL(pci_walk_bus);
EXPORT_SYMBOL(pci_bus_alloc_resource);
EXPORT_SYMBOL_GPL(pci_bus_add_device);
diff --git a/drivers/pci/hotplug/ibmphp_ebda.c b/drivers/pci/hotplug/ibmphp_ebda.c
index 5becbdee4027..2850e64dedae 100644
--- a/drivers/pci/hotplug/ibmphp_ebda.c
+++ b/drivers/pci/hotplug/ibmphp_ebda.c
@@ -276,6 +276,12 @@ int __init ibmphp_access_ebda (void)
for (;;) {
offset = next_offset;
+
+ /* Make sure what we read is still in the mapped section */
+ if (WARN(offset > (ebda_sz * 1024 - 4),
+ "ibmphp_ebda: next read is beyond ebda_sz\n"))
+ break;
+
next_offset = readw (io_mem + offset); /* offset of next blk */
offset += 2;
diff --git a/drivers/pci/hotplug/ibmphp_hpc.c b/drivers/pci/hotplug/ibmphp_hpc.c
index 1aaf3f32d3cd..f59ed30512b5 100644
--- a/drivers/pci/hotplug/ibmphp_hpc.c
+++ b/drivers/pci/hotplug/ibmphp_hpc.c
@@ -133,8 +133,8 @@ void __init ibmphp_hpc_initvars (void)
debug ("%s - Entry\n", __func__);
mutex_init(&sem_hpcaccess);
- init_MUTEX (&semOperations);
- init_MUTEX_LOCKED (&sem_exit);
+ sema_init(&semOperations, 1);
+ sema_init(&sem_exit, 0);
to_debug = 0;
debug ("%s - Exit\n", __func__);
diff --git a/drivers/pci/msi.c b/drivers/pci/msi.c
index 5fcf5aec680f..7c24dcef2989 100644
--- a/drivers/pci/msi.c
+++ b/drivers/pci/msi.c
@@ -35,7 +35,12 @@ int arch_msi_check_device(struct pci_dev *dev, int nvec, int type)
#endif
#ifndef arch_setup_msi_irqs
-int arch_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
+# define arch_setup_msi_irqs default_setup_msi_irqs
+# define HAVE_DEFAULT_MSI_SETUP_IRQS
+#endif
+
+#ifdef HAVE_DEFAULT_MSI_SETUP_IRQS
+int default_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
{
struct msi_desc *entry;
int ret;
@@ -60,7 +65,12 @@ int arch_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
#endif
#ifndef arch_teardown_msi_irqs
-void arch_teardown_msi_irqs(struct pci_dev *dev)
+# define arch_teardown_msi_irqs default_teardown_msi_irqs
+# define HAVE_DEFAULT_MSI_TEARDOWN_IRQS
+#endif
+
+#ifdef HAVE_DEFAULT_MSI_TEARDOWN_IRQS
+void default_teardown_msi_irqs(struct pci_dev *dev)
{
struct msi_desc *entry;
diff --git a/drivers/pci/msi.h b/drivers/pci/msi.h
index de27c1cb5a2b..feff3bee6fe5 100644
--- a/drivers/pci/msi.h
+++ b/drivers/pci/msi.h
@@ -22,8 +22,8 @@
#define is_64bit_address(control) (!!(control & PCI_MSI_FLAGS_64BIT))
#define is_mask_bit_support(control) (!!(control & PCI_MSI_FLAGS_MASKBIT))
-#define msix_table_offset_reg(base) (base + 0x04)
-#define msix_pba_offset_reg(base) (base + 0x08)
+#define msix_table_offset_reg(base) (base + PCI_MSIX_TABLE)
+#define msix_pba_offset_reg(base) (base + PCI_MSIX_PBA)
#define msix_table_size(control) ((control & PCI_MSIX_FLAGS_QSIZE)+1)
#define multi_msix_capable(control) msix_table_size((control))
diff --git a/drivers/pci/pci-sysfs.c b/drivers/pci/pci-sysfs.c
index b5a7d9bfcb24..63d5042f2079 100644
--- a/drivers/pci/pci-sysfs.c
+++ b/drivers/pci/pci-sysfs.c
@@ -705,17 +705,21 @@ void pci_remove_legacy_files(struct pci_bus *b)
#ifdef HAVE_PCI_MMAP
-int pci_mmap_fits(struct pci_dev *pdev, int resno, struct vm_area_struct *vma)
+int pci_mmap_fits(struct pci_dev *pdev, int resno, struct vm_area_struct *vma,
+ enum pci_mmap_api mmap_api)
{
- unsigned long nr, start, size;
+ unsigned long nr, start, size, pci_start;
+ if (pci_resource_len(pdev, resno) == 0)
+ return 0;
nr = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
start = vma->vm_pgoff;
size = ((pci_resource_len(pdev, resno) - 1) >> PAGE_SHIFT) + 1;
- if (start < size && size - start >= nr)
+ pci_start = (mmap_api == PCI_MMAP_PROCFS) ?
+ pci_resource_start(pdev, resno) >> PAGE_SHIFT : 0;
+ if (start >= pci_start && start < pci_start + size &&
+ start + nr <= pci_start + size)
return 1;
- WARN(1, "process \"%s\" tried to map 0x%08lx-0x%08lx on %s BAR %d (size 0x%08lx)\n",
- current->comm, start, start+nr, pci_name(pdev), resno, size);
return 0;
}
@@ -745,8 +749,15 @@ pci_mmap_resource(struct kobject *kobj, struct bin_attribute *attr,
if (i >= PCI_ROM_RESOURCE)
return -ENODEV;
- if (!pci_mmap_fits(pdev, i, vma))
+ if (!pci_mmap_fits(pdev, i, vma, PCI_MMAP_SYSFS)) {
+ WARN(1, "process \"%s\" tried to map 0x%08lx bytes "
+ "at page 0x%08lx on %s BAR %d (start 0x%16Lx, size 0x%16Lx)\n",
+ current->comm, vma->vm_end-vma->vm_start, vma->vm_pgoff,
+ pci_name(pdev), i,
+ (u64)pci_resource_start(pdev, i),
+ (u64)pci_resource_len(pdev, i));
return -EINVAL;
+ }
/* pci_mmap_page_range() expects the same kind of entry as coming
* from /proc/bus/pci/ which is a "user visible" value. If this is
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index 7fa3cbd742c5..710c8a29be0d 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -38,6 +38,19 @@ EXPORT_SYMBOL(pci_pci_problems);
unsigned int pci_pm_d3_delay;
+static void pci_pme_list_scan(struct work_struct *work);
+
+static LIST_HEAD(pci_pme_list);
+static DEFINE_MUTEX(pci_pme_list_mutex);
+static DECLARE_DELAYED_WORK(pci_pme_work, pci_pme_list_scan);
+
+struct pci_pme_device {
+ struct list_head list;
+ struct pci_dev *dev;
+};
+
+#define PME_TIMEOUT 1000 /* How long between PME checks */
+
static void pci_dev_d3_sleep(struct pci_dev *dev)
{
unsigned int delay = dev->d3_delay;
@@ -994,6 +1007,18 @@ static int __pci_enable_device_flags(struct pci_dev *dev,
int err;
int i, bars = 0;
+ /*
+ * Power state could be unknown at this point, either due to a fresh
+ * boot or a device removal call. So get the current power state
+ * so that things like MSI message writing will behave as expected
+ * (e.g. if the device really is in D0 at enable time).
+ */
+ if (dev->pm_cap) {
+ u16 pmcsr;
+ pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
+ dev->current_state = (pmcsr & PCI_PM_CTRL_STATE_MASK);
+ }
+
if (atomic_add_return(1, &dev->enable_cnt) > 1)
return 0; /* already enabled */
@@ -1331,6 +1356,32 @@ bool pci_pme_capable(struct pci_dev *dev, pci_power_t state)
return !!(dev->pme_support & (1 << state));
}
+static void pci_pme_list_scan(struct work_struct *work)
+{
+ struct pci_pme_device *pme_dev;
+
+ mutex_lock(&pci_pme_list_mutex);
+ if (!list_empty(&pci_pme_list)) {
+ list_for_each_entry(pme_dev, &pci_pme_list, list)
+ pci_pme_wakeup(pme_dev->dev, NULL);
+ schedule_delayed_work(&pci_pme_work, msecs_to_jiffies(PME_TIMEOUT));
+ }
+ mutex_unlock(&pci_pme_list_mutex);
+}
+
+/**
+ * pci_external_pme - is a device an external PCI PME source?
+ * @dev: PCI device to check
+ *
+ */
+
+static bool pci_external_pme(struct pci_dev *dev)
+{
+ if (pci_is_pcie(dev) || dev->bus->number == 0)
+ return false;
+ return true;
+}
+
/**
* pci_pme_active - enable or disable PCI device's PME# function
* @dev: PCI device to handle.
@@ -1354,6 +1405,44 @@ void pci_pme_active(struct pci_dev *dev, bool enable)
pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, pmcsr);
+ /* PCI (as opposed to PCIe) PME requires that the device have
+ its PME# line hooked up correctly. Not all hardware vendors
+ do this, so the PME never gets delivered and the device
+ remains asleep. The easiest way around this is to
+ periodically walk the list of suspended devices and check
+ whether any have their PME flag set. The assumption is that
+ we'll wake up often enough anyway that this won't be a huge
+ hit, and the power savings from the devices will still be a
+ win. */
+
+ if (pci_external_pme(dev)) {
+ struct pci_pme_device *pme_dev;
+ if (enable) {
+ pme_dev = kmalloc(sizeof(struct pci_pme_device),
+ GFP_KERNEL);
+ if (!pme_dev)
+ goto out;
+ pme_dev->dev = dev;
+ mutex_lock(&pci_pme_list_mutex);
+ list_add(&pme_dev->list, &pci_pme_list);
+ if (list_is_singular(&pci_pme_list))
+ schedule_delayed_work(&pci_pme_work,
+ msecs_to_jiffies(PME_TIMEOUT));
+ mutex_unlock(&pci_pme_list_mutex);
+ } else {
+ mutex_lock(&pci_pme_list_mutex);
+ list_for_each_entry(pme_dev, &pci_pme_list, list) {
+ if (pme_dev->dev == dev) {
+ list_del(&pme_dev->list);
+ kfree(pme_dev);
+ break;
+ }
+ }
+ mutex_unlock(&pci_pme_list_mutex);
+ }
+ }
+
+out:
dev_printk(KERN_DEBUG, &dev->dev, "PME# %s\n",
enable ? "enabled" : "disabled");
}
@@ -2689,7 +2778,7 @@ int pcie_get_readrq(struct pci_dev *dev)
ret = pci_read_config_word(dev, cap + PCI_EXP_DEVCTL, &ctl);
if (!ret)
- ret = 128 << ((ctl & PCI_EXP_DEVCTL_READRQ) >> 12);
+ ret = 128 << ((ctl & PCI_EXP_DEVCTL_READRQ) >> 12);
return ret;
}
diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h
index 6beb11b617a9..7d33f6673868 100644
--- a/drivers/pci/pci.h
+++ b/drivers/pci/pci.h
@@ -22,8 +22,13 @@ extern void pci_remove_firmware_label_files(struct pci_dev *pdev);
#endif
extern void pci_cleanup_rom(struct pci_dev *dev);
#ifdef HAVE_PCI_MMAP
+enum pci_mmap_api {
+ PCI_MMAP_SYSFS, /* mmap on /sys/bus/pci/devices/<BDF>/resource<N> */
+ PCI_MMAP_PROCFS /* mmap on /proc/bus/pci/<BDF> */
+};
extern int pci_mmap_fits(struct pci_dev *pdev, int resno,
- struct vm_area_struct *vma);
+ struct vm_area_struct *vmai,
+ enum pci_mmap_api mmap_api);
#endif
int pci_probe_reset_function(struct pci_dev *dev);
@@ -63,11 +68,8 @@ struct pci_platform_pm_ops {
extern int pci_set_platform_pm(struct pci_platform_pm_ops *ops);
extern void pci_update_current_state(struct pci_dev *dev, pci_power_t state);
extern void pci_disable_enabled_device(struct pci_dev *dev);
-extern bool pci_check_pme_status(struct pci_dev *dev);
extern int pci_finish_runtime_suspend(struct pci_dev *dev);
-extern void pci_wakeup_event(struct pci_dev *dev);
extern int __pci_pme_wakeup(struct pci_dev *dev, void *ign);
-extern void pci_pme_wakeup_bus(struct pci_bus *bus);
extern void pci_pm_init(struct pci_dev *dev);
extern void platform_pci_wakeup_init(struct pci_dev *dev);
extern void pci_allocate_cap_save_buffers(struct pci_dev *dev);
diff --git a/drivers/pci/pcie/aer/aerdrv.c b/drivers/pci/pcie/aer/aerdrv.c
index f409948e1a9b..2b2b6508efde 100644
--- a/drivers/pci/pcie/aer/aerdrv.c
+++ b/drivers/pci/pcie/aer/aerdrv.c
@@ -416,7 +416,7 @@ static void aer_error_resume(struct pci_dev *dev)
*/
static int __init aer_service_init(void)
{
- if (!pci_aer_available())
+ if (!pci_aer_available() || aer_acpi_firmware_first())
return -ENXIO;
return pcie_port_service_register(&aerdriver);
}
diff --git a/drivers/pci/pcie/aer/aerdrv.h b/drivers/pci/pcie/aer/aerdrv.h
index 80c11d131499..9656e3060412 100644
--- a/drivers/pci/pcie/aer/aerdrv.h
+++ b/drivers/pci/pcie/aer/aerdrv.h
@@ -132,6 +132,7 @@ static inline int aer_osc_setup(struct pcie_device *pciedev)
#ifdef CONFIG_ACPI_APEI
extern int pcie_aer_get_firmware_first(struct pci_dev *pci_dev);
+extern bool aer_acpi_firmware_first(void);
#else
static inline int pcie_aer_get_firmware_first(struct pci_dev *pci_dev)
{
@@ -139,6 +140,8 @@ static inline int pcie_aer_get_firmware_first(struct pci_dev *pci_dev)
return pci_dev->__aer_firmware_first;
return 0;
}
+
+static inline bool aer_acpi_firmware_first(void) { return false; }
#endif
static inline void pcie_aer_force_firmware_first(struct pci_dev *pci_dev,
diff --git a/drivers/pci/pcie/aer/aerdrv_acpi.c b/drivers/pci/pcie/aer/aerdrv_acpi.c
index 2bb9b8972211..275bf158ffa7 100644
--- a/drivers/pci/pcie/aer/aerdrv_acpi.c
+++ b/drivers/pci/pcie/aer/aerdrv_acpi.c
@@ -93,4 +93,38 @@ int pcie_aer_get_firmware_first(struct pci_dev *dev)
aer_set_firmware_first(dev);
return dev->__aer_firmware_first;
}
+
+static bool aer_firmware_first;
+
+static int aer_hest_parse_aff(struct acpi_hest_header *hest_hdr, void *data)
+{
+ struct acpi_hest_aer_common *p;
+
+ if (aer_firmware_first)
+ return 0;
+
+ switch (hest_hdr->type) {
+ case ACPI_HEST_TYPE_AER_ROOT_PORT:
+ case ACPI_HEST_TYPE_AER_ENDPOINT:
+ case ACPI_HEST_TYPE_AER_BRIDGE:
+ p = (struct acpi_hest_aer_common *)(hest_hdr + 1);
+ aer_firmware_first = !!(p->flags & ACPI_HEST_FIRMWARE_FIRST);
+ default:
+ return 0;
+ }
+}
+
+/**
+ * aer_acpi_firmware_first - Check if APEI should control AER.
+ */
+bool aer_acpi_firmware_first(void)
+{
+ static bool parsed = false;
+
+ if (!parsed) {
+ apei_hest_parse(aer_hest_parse_aff, NULL);
+ parsed = true;
+ }
+ return aer_firmware_first;
+}
#endif
diff --git a/drivers/pci/pcie/aer/aerdrv_core.c b/drivers/pci/pcie/aer/aerdrv_core.c
index 29e268fadf14..43421fbe080a 100644
--- a/drivers/pci/pcie/aer/aerdrv_core.c
+++ b/drivers/pci/pcie/aer/aerdrv_core.c
@@ -754,7 +754,7 @@ void aer_isr(struct work_struct *work)
{
struct aer_rpc *rpc = container_of(work, struct aer_rpc, dpc_handler);
struct pcie_device *p_device = rpc->rpd;
- struct aer_err_source e_src;
+ struct aer_err_source uninitialized_var(e_src);
mutex_lock(&rpc->rpc_mutex);
while (get_e_source(rpc, &e_src))
diff --git a/drivers/pci/pcie/portdrv_acpi.c b/drivers/pci/pcie/portdrv_acpi.c
index b7c4cb1ccb23..5982b6a63b89 100644
--- a/drivers/pci/pcie/portdrv_acpi.c
+++ b/drivers/pci/pcie/portdrv_acpi.c
@@ -49,7 +49,7 @@ int pcie_port_acpi_setup(struct pci_dev *port, int *srv_mask)
| OSC_PCI_EXPRESS_PME_CONTROL;
if (pci_aer_available()) {
- if (pcie_aer_get_firmware_first(port))
+ if (aer_acpi_firmware_first())
dev_dbg(&port->dev, "PCIe errors handled by BIOS.\n");
else
flags |= OSC_PCI_EXPRESS_AER_CONTROL;
diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
index 12625d90f8b5..c84900da3c59 100644
--- a/drivers/pci/probe.c
+++ b/drivers/pci/probe.c
@@ -961,8 +961,8 @@ int pci_setup_device(struct pci_dev *dev)
dev->class = class;
class >>= 8;
- dev_dbg(&dev->dev, "found [%04x:%04x] class %06x header type %02x\n",
- dev->vendor, dev->device, class, dev->hdr_type);
+ dev_printk(KERN_DEBUG, &dev->dev, "[%04x:%04x] type %d class %#08x\n",
+ dev->vendor, dev->device, dev->hdr_type, class);
/* need to have dev->class ready */
dev->cfg_size = pci_cfg_space_size(dev);
diff --git a/drivers/pci/proc.c b/drivers/pci/proc.c
index 01f0306525a5..27911b55c2a5 100644
--- a/drivers/pci/proc.c
+++ b/drivers/pci/proc.c
@@ -10,7 +10,6 @@
#include <linux/module.h>
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
-#include <linux/smp_lock.h>
#include <linux/capability.h>
#include <asm/uaccess.h>
#include <asm/byteorder.h>
@@ -212,8 +211,6 @@ static long proc_bus_pci_ioctl(struct file *file, unsigned int cmd,
#endif /* HAVE_PCI_MMAP */
int ret = 0;
- lock_kernel();
-
switch (cmd) {
case PCIIOC_CONTROLLER:
ret = pci_domain_nr(dev->bus);
@@ -242,7 +239,6 @@ static long proc_bus_pci_ioctl(struct file *file, unsigned int cmd,
break;
};
- unlock_kernel();
return ret;
}
@@ -260,7 +256,7 @@ static int proc_bus_pci_mmap(struct file *file, struct vm_area_struct *vma)
/* Make sure the caller is mapping a real resource for this device */
for (i = 0; i < PCI_ROM_RESOURCE; i++) {
- if (pci_mmap_fits(dev, i, vma))
+ if (pci_mmap_fits(dev, i, vma, PCI_MMAP_PROCFS))
break;
}
@@ -306,6 +302,7 @@ static const struct file_operations proc_bus_pci_operations = {
.read = proc_bus_pci_read,
.write = proc_bus_pci_write,
.unlocked_ioctl = proc_bus_pci_ioctl,
+ .compat_ioctl = proc_bus_pci_ioctl,
#ifdef HAVE_PCI_MMAP
.open = proc_bus_pci_open,
.release = proc_bus_pci_release,
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
index cc96c7142dac..6f9350cabbd5 100644
--- a/drivers/pci/quirks.c
+++ b/drivers/pci/quirks.c
@@ -2136,6 +2136,24 @@ DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82865_HB,
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82875_HB,
quirk_unhide_mch_dev6);
+#ifdef CONFIG_TILE
+/*
+ * The Tilera TILEmpower platform needs to set the link speed
+ * to 2.5GT(Giga-Transfers)/s (Gen 1). The default link speed
+ * setting is 5GT/s (Gen 2). 0x98 is the Link Control2 PCIe
+ * capability register of the PEX8624 PCIe switch. The switch
+ * supports link speed auto negotiation, but falsely sets
+ * the link speed to 5GT/s.
+ */
+static void __devinit quirk_tile_plx_gen1(struct pci_dev *dev)
+{
+ if (tile_plx_gen1) {
+ pci_write_config_dword(dev, 0x98, 0x1);
+ mdelay(50);
+ }
+}
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_PLX, 0x8624, quirk_tile_plx_gen1);
+#endif /* CONFIG_TILE */
#ifdef CONFIG_PCI_MSI
/* Some chipsets do not support MSI. We cannot easily rely on setting
@@ -2297,6 +2315,37 @@ DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_NVIDIA,
PCI_DEVICE_ID_NVIDIA_NVENET_15,
nvenet_msi_disable);
+/*
+ * Some versions of the MCP55 bridge from nvidia have a legacy irq routing
+ * config register. This register controls the routing of legacy interrupts
+ * from devices that route through the MCP55. If this register is misprogramed
+ * interrupts are only sent to the bsp, unlike conventional systems where the
+ * irq is broadxast to all online cpus. Not having this register set
+ * properly prevents kdump from booting up properly, so lets make sure that
+ * we have it set correctly.
+ * Note this is an undocumented register.
+ */
+static void __devinit nvbridge_check_legacy_irq_routing(struct pci_dev *dev)
+{
+ u32 cfg;
+
+ pci_read_config_dword(dev, 0x74, &cfg);
+
+ if (cfg & ((1 << 2) | (1 << 15))) {
+ printk(KERN_INFO "Rewriting irq routing register on MCP55\n");
+ cfg &= ~((1 << 2) | (1 << 15));
+ pci_write_config_dword(dev, 0x74, cfg);
+ }
+}
+
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_NVIDIA,
+ PCI_DEVICE_ID_NVIDIA_MCP55_BRIDGE_V0,
+ nvbridge_check_legacy_irq_routing);
+
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_NVIDIA,
+ PCI_DEVICE_ID_NVIDIA_MCP55_BRIDGE_V4,
+ nvbridge_check_legacy_irq_routing);
+
static int __devinit ht_check_msi_mapping(struct pci_dev *dev)
{
int pos, ttl = 48;
diff --git a/drivers/pci/setup-res.c b/drivers/pci/setup-res.c
index 2aaa13150de3..bc0e6eea0fff 100644
--- a/drivers/pci/setup-res.c
+++ b/drivers/pci/setup-res.c
@@ -85,7 +85,7 @@ void pci_update_resource(struct pci_dev *dev, int resno)
}
}
res->flags &= ~IORESOURCE_UNSET;
- dev_info(&dev->dev, "BAR %d: set to %pR (PCI address [%#llx-%#llx]\n",
+ dev_info(&dev->dev, "BAR %d: set to %pR (PCI address [%#llx-%#llx])\n",
resno, res, (unsigned long long)region.start,
(unsigned long long)region.end);
}
diff --git a/drivers/pci/xen-pcifront.c b/drivers/pci/xen-pcifront.c
new file mode 100644
index 000000000000..3a5a6fcc0ead
--- /dev/null
+++ b/drivers/pci/xen-pcifront.c
@@ -0,0 +1,1148 @@
+/*
+ * Xen PCI Frontend.
+ *
+ * Author: Ryan Wilson <hap9@epoch.ncsc.mil>
+ */
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/mm.h>
+#include <xen/xenbus.h>
+#include <xen/events.h>
+#include <xen/grant_table.h>
+#include <xen/page.h>
+#include <linux/spinlock.h>
+#include <linux/pci.h>
+#include <linux/msi.h>
+#include <xen/interface/io/pciif.h>
+#include <asm/xen/pci.h>
+#include <linux/interrupt.h>
+#include <asm/atomic.h>
+#include <linux/workqueue.h>
+#include <linux/bitops.h>
+#include <linux/time.h>
+
+#define INVALID_GRANT_REF (0)
+#define INVALID_EVTCHN (-1)
+
+struct pci_bus_entry {
+ struct list_head list;
+ struct pci_bus *bus;
+};
+
+#define _PDEVB_op_active (0)
+#define PDEVB_op_active (1 << (_PDEVB_op_active))
+
+struct pcifront_device {
+ struct xenbus_device *xdev;
+ struct list_head root_buses;
+
+ int evtchn;
+ int gnt_ref;
+
+ int irq;
+
+ /* Lock this when doing any operations in sh_info */
+ spinlock_t sh_info_lock;
+ struct xen_pci_sharedinfo *sh_info;
+ struct work_struct op_work;
+ unsigned long flags;
+
+};
+
+struct pcifront_sd {
+ int domain;
+ struct pcifront_device *pdev;
+};
+
+static inline struct pcifront_device *
+pcifront_get_pdev(struct pcifront_sd *sd)
+{
+ return sd->pdev;
+}
+
+static inline void pcifront_init_sd(struct pcifront_sd *sd,
+ unsigned int domain, unsigned int bus,
+ struct pcifront_device *pdev)
+{
+ sd->domain = domain;
+ sd->pdev = pdev;
+}
+
+static DEFINE_SPINLOCK(pcifront_dev_lock);
+static struct pcifront_device *pcifront_dev;
+
+static int verbose_request;
+module_param(verbose_request, int, 0644);
+
+static int errno_to_pcibios_err(int errno)
+{
+ switch (errno) {
+ case XEN_PCI_ERR_success:
+ return PCIBIOS_SUCCESSFUL;
+
+ case XEN_PCI_ERR_dev_not_found:
+ return PCIBIOS_DEVICE_NOT_FOUND;
+
+ case XEN_PCI_ERR_invalid_offset:
+ case XEN_PCI_ERR_op_failed:
+ return PCIBIOS_BAD_REGISTER_NUMBER;
+
+ case XEN_PCI_ERR_not_implemented:
+ return PCIBIOS_FUNC_NOT_SUPPORTED;
+
+ case XEN_PCI_ERR_access_denied:
+ return PCIBIOS_SET_FAILED;
+ }
+ return errno;
+}
+
+static inline void schedule_pcifront_aer_op(struct pcifront_device *pdev)
+{
+ if (test_bit(_XEN_PCIB_active, (unsigned long *)&pdev->sh_info->flags)
+ && !test_and_set_bit(_PDEVB_op_active, &pdev->flags)) {
+ dev_dbg(&pdev->xdev->dev, "schedule aer frontend job\n");
+ schedule_work(&pdev->op_work);
+ }
+}
+
+static int do_pci_op(struct pcifront_device *pdev, struct xen_pci_op *op)
+{
+ int err = 0;
+ struct xen_pci_op *active_op = &pdev->sh_info->op;
+ unsigned long irq_flags;
+ evtchn_port_t port = pdev->evtchn;
+ unsigned irq = pdev->irq;
+ s64 ns, ns_timeout;
+ struct timeval tv;
+
+ spin_lock_irqsave(&pdev->sh_info_lock, irq_flags);
+
+ memcpy(active_op, op, sizeof(struct xen_pci_op));
+
+ /* Go */
+ wmb();
+ set_bit(_XEN_PCIF_active, (unsigned long *)&pdev->sh_info->flags);
+ notify_remote_via_evtchn(port);
+
+ /*
+ * We set a poll timeout of 3 seconds but give up on return after
+ * 2 seconds. It is better to time out too late rather than too early
+ * (in the latter case we end up continually re-executing poll() with a
+ * timeout in the past). 1s difference gives plenty of slack for error.
+ */
+ do_gettimeofday(&tv);
+ ns_timeout = timeval_to_ns(&tv) + 2 * (s64)NSEC_PER_SEC;
+
+ xen_clear_irq_pending(irq);
+
+ while (test_bit(_XEN_PCIF_active,
+ (unsigned long *)&pdev->sh_info->flags)) {
+ xen_poll_irq_timeout(irq, jiffies + 3*HZ);
+ xen_clear_irq_pending(irq);
+ do_gettimeofday(&tv);
+ ns = timeval_to_ns(&tv);
+ if (ns > ns_timeout) {
+ dev_err(&pdev->xdev->dev,
+ "pciback not responding!!!\n");
+ clear_bit(_XEN_PCIF_active,
+ (unsigned long *)&pdev->sh_info->flags);
+ err = XEN_PCI_ERR_dev_not_found;
+ goto out;
+ }
+ }
+
+ /*
+ * We might lose backend service request since we
+ * reuse same evtchn with pci_conf backend response. So re-schedule
+ * aer pcifront service.
+ */
+ if (test_bit(_XEN_PCIB_active,
+ (unsigned long *)&pdev->sh_info->flags)) {
+ dev_err(&pdev->xdev->dev,
+ "schedule aer pcifront service\n");
+ schedule_pcifront_aer_op(pdev);
+ }
+
+ memcpy(op, active_op, sizeof(struct xen_pci_op));
+
+ err = op->err;
+out:
+ spin_unlock_irqrestore(&pdev->sh_info_lock, irq_flags);
+ return err;
+}
+
+/* Access to this function is spinlocked in drivers/pci/access.c */
+static int pcifront_bus_read(struct pci_bus *bus, unsigned int devfn,
+ int where, int size, u32 *val)
+{
+ int err = 0;
+ struct xen_pci_op op = {
+ .cmd = XEN_PCI_OP_conf_read,
+ .domain = pci_domain_nr(bus),
+ .bus = bus->number,
+ .devfn = devfn,
+ .offset = where,
+ .size = size,
+ };
+ struct pcifront_sd *sd = bus->sysdata;
+ struct pcifront_device *pdev = pcifront_get_pdev(sd);
+
+ if (verbose_request)
+ dev_info(&pdev->xdev->dev,
+ "read dev=%04x:%02x:%02x.%01x - offset %x size %d\n",
+ pci_domain_nr(bus), bus->number, PCI_SLOT(devfn),
+ PCI_FUNC(devfn), where, size);
+
+ err = do_pci_op(pdev, &op);
+
+ if (likely(!err)) {
+ if (verbose_request)
+ dev_info(&pdev->xdev->dev, "read got back value %x\n",
+ op.value);
+
+ *val = op.value;
+ } else if (err == -ENODEV) {
+ /* No device here, pretend that it just returned 0 */
+ err = 0;
+ *val = 0;
+ }
+
+ return errno_to_pcibios_err(err);
+}
+
+/* Access to this function is spinlocked in drivers/pci/access.c */
+static int pcifront_bus_write(struct pci_bus *bus, unsigned int devfn,
+ int where, int size, u32 val)
+{
+ struct xen_pci_op op = {
+ .cmd = XEN_PCI_OP_conf_write,
+ .domain = pci_domain_nr(bus),
+ .bus = bus->number,
+ .devfn = devfn,
+ .offset = where,
+ .size = size,
+ .value = val,
+ };
+ struct pcifront_sd *sd = bus->sysdata;
+ struct pcifront_device *pdev = pcifront_get_pdev(sd);
+
+ if (verbose_request)
+ dev_info(&pdev->xdev->dev,
+ "write dev=%04x:%02x:%02x.%01x - "
+ "offset %x size %d val %x\n",
+ pci_domain_nr(bus), bus->number,
+ PCI_SLOT(devfn), PCI_FUNC(devfn), where, size, val);
+
+ return errno_to_pcibios_err(do_pci_op(pdev, &op));
+}
+
+struct pci_ops pcifront_bus_ops = {
+ .read = pcifront_bus_read,
+ .write = pcifront_bus_write,
+};
+
+#ifdef CONFIG_PCI_MSI
+static int pci_frontend_enable_msix(struct pci_dev *dev,
+ int **vector, int nvec)
+{
+ int err;
+ int i;
+ struct xen_pci_op op = {
+ .cmd = XEN_PCI_OP_enable_msix,
+ .domain = pci_domain_nr(dev->bus),
+ .bus = dev->bus->number,
+ .devfn = dev->devfn,
+ .value = nvec,
+ };
+ struct pcifront_sd *sd = dev->bus->sysdata;
+ struct pcifront_device *pdev = pcifront_get_pdev(sd);
+ struct msi_desc *entry;
+
+ if (nvec > SH_INFO_MAX_VEC) {
+ dev_err(&dev->dev, "too much vector for pci frontend: %x."
+ " Increase SH_INFO_MAX_VEC.\n", nvec);
+ return -EINVAL;
+ }
+
+ i = 0;
+ list_for_each_entry(entry, &dev->msi_list, list) {
+ op.msix_entries[i].entry = entry->msi_attrib.entry_nr;
+ /* Vector is useless at this point. */
+ op.msix_entries[i].vector = -1;
+ i++;
+ }
+
+ err = do_pci_op(pdev, &op);
+
+ if (likely(!err)) {
+ if (likely(!op.value)) {
+ /* we get the result */
+ for (i = 0; i < nvec; i++)
+ *(*vector+i) = op.msix_entries[i].vector;
+ return 0;
+ } else {
+ printk(KERN_DEBUG "enable msix get value %x\n",
+ op.value);
+ return op.value;
+ }
+ } else {
+ dev_err(&dev->dev, "enable msix get err %x\n", err);
+ return err;
+ }
+}
+
+static void pci_frontend_disable_msix(struct pci_dev *dev)
+{
+ int err;
+ struct xen_pci_op op = {
+ .cmd = XEN_PCI_OP_disable_msix,
+ .domain = pci_domain_nr(dev->bus),
+ .bus = dev->bus->number,
+ .devfn = dev->devfn,
+ };
+ struct pcifront_sd *sd = dev->bus->sysdata;
+ struct pcifront_device *pdev = pcifront_get_pdev(sd);
+
+ err = do_pci_op(pdev, &op);
+
+ /* What should do for error ? */
+ if (err)
+ dev_err(&dev->dev, "pci_disable_msix get err %x\n", err);
+}
+
+static int pci_frontend_enable_msi(struct pci_dev *dev, int **vector)
+{
+ int err;
+ struct xen_pci_op op = {
+ .cmd = XEN_PCI_OP_enable_msi,
+ .domain = pci_domain_nr(dev->bus),
+ .bus = dev->bus->number,
+ .devfn = dev->devfn,
+ };
+ struct pcifront_sd *sd = dev->bus->sysdata;
+ struct pcifront_device *pdev = pcifront_get_pdev(sd);
+
+ err = do_pci_op(pdev, &op);
+ if (likely(!err)) {
+ *(*vector) = op.value;
+ } else {
+ dev_err(&dev->dev, "pci frontend enable msi failed for dev "
+ "%x:%x\n", op.bus, op.devfn);
+ err = -EINVAL;
+ }
+ return err;
+}
+
+static void pci_frontend_disable_msi(struct pci_dev *dev)
+{
+ int err;
+ struct xen_pci_op op = {
+ .cmd = XEN_PCI_OP_disable_msi,
+ .domain = pci_domain_nr(dev->bus),
+ .bus = dev->bus->number,
+ .devfn = dev->devfn,
+ };
+ struct pcifront_sd *sd = dev->bus->sysdata;
+ struct pcifront_device *pdev = pcifront_get_pdev(sd);
+
+ err = do_pci_op(pdev, &op);
+ if (err == XEN_PCI_ERR_dev_not_found) {
+ /* XXX No response from backend, what shall we do? */
+ printk(KERN_DEBUG "get no response from backend for disable MSI\n");
+ return;
+ }
+ if (err)
+ /* how can pciback notify us fail? */
+ printk(KERN_DEBUG "get fake response frombackend\n");
+}
+
+static struct xen_pci_frontend_ops pci_frontend_ops = {
+ .enable_msi = pci_frontend_enable_msi,
+ .disable_msi = pci_frontend_disable_msi,
+ .enable_msix = pci_frontend_enable_msix,
+ .disable_msix = pci_frontend_disable_msix,
+};
+
+static void pci_frontend_registrar(int enable)
+{
+ if (enable)
+ xen_pci_frontend = &pci_frontend_ops;
+ else
+ xen_pci_frontend = NULL;
+};
+#else
+static inline void pci_frontend_registrar(int enable) { };
+#endif /* CONFIG_PCI_MSI */
+
+/* Claim resources for the PCI frontend as-is, backend won't allow changes */
+static int pcifront_claim_resource(struct pci_dev *dev, void *data)
+{
+ struct pcifront_device *pdev = data;
+ int i;
+ struct resource *r;
+
+ for (i = 0; i < PCI_NUM_RESOURCES; i++) {
+ r = &dev->resource[i];
+
+ if (!r->parent && r->start && r->flags) {
+ dev_info(&pdev->xdev->dev, "claiming resource %s/%d\n",
+ pci_name(dev), i);
+ if (pci_claim_resource(dev, i)) {
+ dev_err(&pdev->xdev->dev, "Could not claim "
+ "resource %s/%d! Device offline. Try "
+ "giving less than 4GB to domain.\n",
+ pci_name(dev), i);
+ }
+ }
+ }
+
+ return 0;
+}
+
+static int __devinit pcifront_scan_bus(struct pcifront_device *pdev,
+ unsigned int domain, unsigned int bus,
+ struct pci_bus *b)
+{
+ struct pci_dev *d;
+ unsigned int devfn;
+
+ /* Scan the bus for functions and add.
+ * We omit handling of PCI bridge attachment because pciback prevents
+ * bridges from being exported.
+ */
+ for (devfn = 0; devfn < 0x100; devfn++) {
+ d = pci_get_slot(b, devfn);
+ if (d) {
+ /* Device is already known. */
+ pci_dev_put(d);
+ continue;
+ }
+
+ d = pci_scan_single_device(b, devfn);
+ if (d)
+ dev_info(&pdev->xdev->dev, "New device on "
+ "%04x:%02x:%02x.%02x found.\n", domain, bus,
+ PCI_SLOT(devfn), PCI_FUNC(devfn));
+ }
+
+ return 0;
+}
+
+static int __devinit pcifront_scan_root(struct pcifront_device *pdev,
+ unsigned int domain, unsigned int bus)
+{
+ struct pci_bus *b;
+ struct pcifront_sd *sd = NULL;
+ struct pci_bus_entry *bus_entry = NULL;
+ int err = 0;
+
+#ifndef CONFIG_PCI_DOMAINS
+ if (domain != 0) {
+ dev_err(&pdev->xdev->dev,
+ "PCI Root in non-zero PCI Domain! domain=%d\n", domain);
+ dev_err(&pdev->xdev->dev,
+ "Please compile with CONFIG_PCI_DOMAINS\n");
+ err = -EINVAL;
+ goto err_out;
+ }
+#endif
+
+ dev_info(&pdev->xdev->dev, "Creating PCI Frontend Bus %04x:%02x\n",
+ domain, bus);
+
+ bus_entry = kmalloc(sizeof(*bus_entry), GFP_KERNEL);
+ sd = kmalloc(sizeof(*sd), GFP_KERNEL);
+ if (!bus_entry || !sd) {
+ err = -ENOMEM;
+ goto err_out;
+ }
+ pcifront_init_sd(sd, domain, bus, pdev);
+
+ b = pci_scan_bus_parented(&pdev->xdev->dev, bus,
+ &pcifront_bus_ops, sd);
+ if (!b) {
+ dev_err(&pdev->xdev->dev,
+ "Error creating PCI Frontend Bus!\n");
+ err = -ENOMEM;
+ goto err_out;
+ }
+
+ bus_entry->bus = b;
+
+ list_add(&bus_entry->list, &pdev->root_buses);
+
+ /* pci_scan_bus_parented skips devices which do not have a have
+ * devfn==0. The pcifront_scan_bus enumerates all devfn. */
+ err = pcifront_scan_bus(pdev, domain, bus, b);
+
+ /* Claim resources before going "live" with our devices */
+ pci_walk_bus(b, pcifront_claim_resource, pdev);
+
+ /* Create SysFS and notify udev of the devices. Aka: "going live" */
+ pci_bus_add_devices(b);
+
+ return err;
+
+err_out:
+ kfree(bus_entry);
+ kfree(sd);
+
+ return err;
+}
+
+static int __devinit pcifront_rescan_root(struct pcifront_device *pdev,
+ unsigned int domain, unsigned int bus)
+{
+ int err;
+ struct pci_bus *b;
+
+#ifndef CONFIG_PCI_DOMAINS
+ if (domain != 0) {
+ dev_err(&pdev->xdev->dev,
+ "PCI Root in non-zero PCI Domain! domain=%d\n", domain);
+ dev_err(&pdev->xdev->dev,
+ "Please compile with CONFIG_PCI_DOMAINS\n");
+ return -EINVAL;
+ }
+#endif
+
+ dev_info(&pdev->xdev->dev, "Rescanning PCI Frontend Bus %04x:%02x\n",
+ domain, bus);
+
+ b = pci_find_bus(domain, bus);
+ if (!b)
+ /* If the bus is unknown, create it. */
+ return pcifront_scan_root(pdev, domain, bus);
+
+ err = pcifront_scan_bus(pdev, domain, bus, b);
+
+ /* Claim resources before going "live" with our devices */
+ pci_walk_bus(b, pcifront_claim_resource, pdev);
+
+ /* Create SysFS and notify udev of the devices. Aka: "going live" */
+ pci_bus_add_devices(b);
+
+ return err;
+}
+
+static void free_root_bus_devs(struct pci_bus *bus)
+{
+ struct pci_dev *dev;
+
+ while (!list_empty(&bus->devices)) {
+ dev = container_of(bus->devices.next, struct pci_dev,
+ bus_list);
+ dev_dbg(&dev->dev, "removing device\n");
+ pci_remove_bus_device(dev);
+ }
+}
+
+static void pcifront_free_roots(struct pcifront_device *pdev)
+{
+ struct pci_bus_entry *bus_entry, *t;
+
+ dev_dbg(&pdev->xdev->dev, "cleaning up root buses\n");
+
+ list_for_each_entry_safe(bus_entry, t, &pdev->root_buses, list) {
+ list_del(&bus_entry->list);
+
+ free_root_bus_devs(bus_entry->bus);
+
+ kfree(bus_entry->bus->sysdata);
+
+ device_unregister(bus_entry->bus->bridge);
+ pci_remove_bus(bus_entry->bus);
+
+ kfree(bus_entry);
+ }
+}
+
+static pci_ers_result_t pcifront_common_process(int cmd,
+ struct pcifront_device *pdev,
+ pci_channel_state_t state)
+{
+ pci_ers_result_t result;
+ struct pci_driver *pdrv;
+ int bus = pdev->sh_info->aer_op.bus;
+ int devfn = pdev->sh_info->aer_op.devfn;
+ struct pci_dev *pcidev;
+ int flag = 0;
+
+ dev_dbg(&pdev->xdev->dev,
+ "pcifront AER process: cmd %x (bus:%x, devfn%x)",
+ cmd, bus, devfn);
+ result = PCI_ERS_RESULT_NONE;
+
+ pcidev = pci_get_bus_and_slot(bus, devfn);
+ if (!pcidev || !pcidev->driver) {
+ dev_err(&pdev->xdev->dev, "device or AER driver is NULL\n");
+ if (pcidev)
+ pci_dev_put(pcidev);
+ return result;
+ }
+ pdrv = pcidev->driver;
+
+ if (get_driver(&pdrv->driver)) {
+ if (pdrv->err_handler && pdrv->err_handler->error_detected) {
+ dev_dbg(&pcidev->dev,
+ "trying to call AER service\n");
+ if (pcidev) {
+ flag = 1;
+ switch (cmd) {
+ case XEN_PCI_OP_aer_detected:
+ result = pdrv->err_handler->
+ error_detected(pcidev, state);
+ break;
+ case XEN_PCI_OP_aer_mmio:
+ result = pdrv->err_handler->
+ mmio_enabled(pcidev);
+ break;
+ case XEN_PCI_OP_aer_slotreset:
+ result = pdrv->err_handler->
+ slot_reset(pcidev);
+ break;
+ case XEN_PCI_OP_aer_resume:
+ pdrv->err_handler->resume(pcidev);
+ break;
+ default:
+ dev_err(&pdev->xdev->dev,
+ "bad request in aer recovery "
+ "operation!\n");
+
+ }
+ }
+ }
+ put_driver(&pdrv->driver);
+ }
+ if (!flag)
+ result = PCI_ERS_RESULT_NONE;
+
+ return result;
+}
+
+
+static void pcifront_do_aer(struct work_struct *data)
+{
+ struct pcifront_device *pdev =
+ container_of(data, struct pcifront_device, op_work);
+ int cmd = pdev->sh_info->aer_op.cmd;
+ pci_channel_state_t state =
+ (pci_channel_state_t)pdev->sh_info->aer_op.err;
+
+ /*If a pci_conf op is in progress,
+ we have to wait until it is done before service aer op*/
+ dev_dbg(&pdev->xdev->dev,
+ "pcifront service aer bus %x devfn %x\n",
+ pdev->sh_info->aer_op.bus, pdev->sh_info->aer_op.devfn);
+
+ pdev->sh_info->aer_op.err = pcifront_common_process(cmd, pdev, state);
+
+ /* Post the operation to the guest. */
+ wmb();
+ clear_bit(_XEN_PCIB_active, (unsigned long *)&pdev->sh_info->flags);
+ notify_remote_via_evtchn(pdev->evtchn);
+
+ /*in case of we lost an aer request in four lines time_window*/
+ smp_mb__before_clear_bit();
+ clear_bit(_PDEVB_op_active, &pdev->flags);
+ smp_mb__after_clear_bit();
+
+ schedule_pcifront_aer_op(pdev);
+
+}
+
+static irqreturn_t pcifront_handler_aer(int irq, void *dev)
+{
+ struct pcifront_device *pdev = dev;
+ schedule_pcifront_aer_op(pdev);
+ return IRQ_HANDLED;
+}
+static int pcifront_connect(struct pcifront_device *pdev)
+{
+ int err = 0;
+
+ spin_lock(&pcifront_dev_lock);
+
+ if (!pcifront_dev) {
+ dev_info(&pdev->xdev->dev, "Installing PCI frontend\n");
+ pcifront_dev = pdev;
+ } else {
+ dev_err(&pdev->xdev->dev, "PCI frontend already installed!\n");
+ err = -EEXIST;
+ }
+
+ spin_unlock(&pcifront_dev_lock);
+
+ return err;
+}
+
+static void pcifront_disconnect(struct pcifront_device *pdev)
+{
+ spin_lock(&pcifront_dev_lock);
+
+ if (pdev == pcifront_dev) {
+ dev_info(&pdev->xdev->dev,
+ "Disconnecting PCI Frontend Buses\n");
+ pcifront_dev = NULL;
+ }
+
+ spin_unlock(&pcifront_dev_lock);
+}
+static struct pcifront_device *alloc_pdev(struct xenbus_device *xdev)
+{
+ struct pcifront_device *pdev;
+
+ pdev = kzalloc(sizeof(struct pcifront_device), GFP_KERNEL);
+ if (pdev == NULL)
+ goto out;
+
+ pdev->sh_info =
+ (struct xen_pci_sharedinfo *)__get_free_page(GFP_KERNEL);
+ if (pdev->sh_info == NULL) {
+ kfree(pdev);
+ pdev = NULL;
+ goto out;
+ }
+ pdev->sh_info->flags = 0;
+
+ /*Flag for registering PV AER handler*/
+ set_bit(_XEN_PCIB_AERHANDLER, (void *)&pdev->sh_info->flags);
+
+ dev_set_drvdata(&xdev->dev, pdev);
+ pdev->xdev = xdev;
+
+ INIT_LIST_HEAD(&pdev->root_buses);
+
+ spin_lock_init(&pdev->sh_info_lock);
+
+ pdev->evtchn = INVALID_EVTCHN;
+ pdev->gnt_ref = INVALID_GRANT_REF;
+ pdev->irq = -1;
+
+ INIT_WORK(&pdev->op_work, pcifront_do_aer);
+
+ dev_dbg(&xdev->dev, "Allocated pdev @ 0x%p pdev->sh_info @ 0x%p\n",
+ pdev, pdev->sh_info);
+out:
+ return pdev;
+}
+
+static void free_pdev(struct pcifront_device *pdev)
+{
+ dev_dbg(&pdev->xdev->dev, "freeing pdev @ 0x%p\n", pdev);
+
+ pcifront_free_roots(pdev);
+
+ /*For PCIE_AER error handling job*/
+ flush_scheduled_work();
+
+ if (pdev->irq >= 0)
+ unbind_from_irqhandler(pdev->irq, pdev);
+
+ if (pdev->evtchn != INVALID_EVTCHN)
+ xenbus_free_evtchn(pdev->xdev, pdev->evtchn);
+
+ if (pdev->gnt_ref != INVALID_GRANT_REF)
+ gnttab_end_foreign_access(pdev->gnt_ref, 0 /* r/w page */,
+ (unsigned long)pdev->sh_info);
+ else
+ free_page((unsigned long)pdev->sh_info);
+
+ dev_set_drvdata(&pdev->xdev->dev, NULL);
+
+ kfree(pdev);
+}
+
+static int pcifront_publish_info(struct pcifront_device *pdev)
+{
+ int err = 0;
+ struct xenbus_transaction trans;
+
+ err = xenbus_grant_ring(pdev->xdev, virt_to_mfn(pdev->sh_info));
+ if (err < 0)
+ goto out;
+
+ pdev->gnt_ref = err;
+
+ err = xenbus_alloc_evtchn(pdev->xdev, &pdev->evtchn);
+ if (err)
+ goto out;
+
+ err = bind_evtchn_to_irqhandler(pdev->evtchn, pcifront_handler_aer,
+ 0, "pcifront", pdev);
+
+ if (err < 0)
+ return err;
+
+ pdev->irq = err;
+
+do_publish:
+ err = xenbus_transaction_start(&trans);
+ if (err) {
+ xenbus_dev_fatal(pdev->xdev, err,
+ "Error writing configuration for backend "
+ "(start transaction)");
+ goto out;
+ }
+
+ err = xenbus_printf(trans, pdev->xdev->nodename,
+ "pci-op-ref", "%u", pdev->gnt_ref);
+ if (!err)
+ err = xenbus_printf(trans, pdev->xdev->nodename,
+ "event-channel", "%u", pdev->evtchn);
+ if (!err)
+ err = xenbus_printf(trans, pdev->xdev->nodename,
+ "magic", XEN_PCI_MAGIC);
+
+ if (err) {
+ xenbus_transaction_end(trans, 1);
+ xenbus_dev_fatal(pdev->xdev, err,
+ "Error writing configuration for backend");
+ goto out;
+ } else {
+ err = xenbus_transaction_end(trans, 0);
+ if (err == -EAGAIN)
+ goto do_publish;
+ else if (err) {
+ xenbus_dev_fatal(pdev->xdev, err,
+ "Error completing transaction "
+ "for backend");
+ goto out;
+ }
+ }
+
+ xenbus_switch_state(pdev->xdev, XenbusStateInitialised);
+
+ dev_dbg(&pdev->xdev->dev, "publishing successful!\n");
+
+out:
+ return err;
+}
+
+static int __devinit pcifront_try_connect(struct pcifront_device *pdev)
+{
+ int err = -EFAULT;
+ int i, num_roots, len;
+ char str[64];
+ unsigned int domain, bus;
+
+
+ /* Only connect once */
+ if (xenbus_read_driver_state(pdev->xdev->nodename) !=
+ XenbusStateInitialised)
+ goto out;
+
+ err = pcifront_connect(pdev);
+ if (err) {
+ xenbus_dev_fatal(pdev->xdev, err,
+ "Error connecting PCI Frontend");
+ goto out;
+ }
+
+ err = xenbus_scanf(XBT_NIL, pdev->xdev->otherend,
+ "root_num", "%d", &num_roots);
+ if (err == -ENOENT) {
+ xenbus_dev_error(pdev->xdev, err,
+ "No PCI Roots found, trying 0000:00");
+ err = pcifront_scan_root(pdev, 0, 0);
+ num_roots = 0;
+ } else if (err != 1) {
+ if (err == 0)
+ err = -EINVAL;
+ xenbus_dev_fatal(pdev->xdev, err,
+ "Error reading number of PCI roots");
+ goto out;
+ }
+
+ for (i = 0; i < num_roots; i++) {
+ len = snprintf(str, sizeof(str), "root-%d", i);
+ if (unlikely(len >= (sizeof(str) - 1))) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ err = xenbus_scanf(XBT_NIL, pdev->xdev->otherend, str,
+ "%x:%x", &domain, &bus);
+ if (err != 2) {
+ if (err >= 0)
+ err = -EINVAL;
+ xenbus_dev_fatal(pdev->xdev, err,
+ "Error reading PCI root %d", i);
+ goto out;
+ }
+
+ err = pcifront_scan_root(pdev, domain, bus);
+ if (err) {
+ xenbus_dev_fatal(pdev->xdev, err,
+ "Error scanning PCI root %04x:%02x",
+ domain, bus);
+ goto out;
+ }
+ }
+
+ err = xenbus_switch_state(pdev->xdev, XenbusStateConnected);
+
+out:
+ return err;
+}
+
+static int pcifront_try_disconnect(struct pcifront_device *pdev)
+{
+ int err = 0;
+ enum xenbus_state prev_state;
+
+
+ prev_state = xenbus_read_driver_state(pdev->xdev->nodename);
+
+ if (prev_state >= XenbusStateClosing)
+ goto out;
+
+ if (prev_state == XenbusStateConnected) {
+ pcifront_free_roots(pdev);
+ pcifront_disconnect(pdev);
+ }
+
+ err = xenbus_switch_state(pdev->xdev, XenbusStateClosed);
+
+out:
+
+ return err;
+}
+
+static int __devinit pcifront_attach_devices(struct pcifront_device *pdev)
+{
+ int err = -EFAULT;
+ int i, num_roots, len;
+ unsigned int domain, bus;
+ char str[64];
+
+ if (xenbus_read_driver_state(pdev->xdev->nodename) !=
+ XenbusStateReconfiguring)
+ goto out;
+
+ err = xenbus_scanf(XBT_NIL, pdev->xdev->otherend,
+ "root_num", "%d", &num_roots);
+ if (err == -ENOENT) {
+ xenbus_dev_error(pdev->xdev, err,
+ "No PCI Roots found, trying 0000:00");
+ err = pcifront_rescan_root(pdev, 0, 0);
+ num_roots = 0;
+ } else if (err != 1) {
+ if (err == 0)
+ err = -EINVAL;
+ xenbus_dev_fatal(pdev->xdev, err,
+ "Error reading number of PCI roots");
+ goto out;
+ }
+
+ for (i = 0; i < num_roots; i++) {
+ len = snprintf(str, sizeof(str), "root-%d", i);
+ if (unlikely(len >= (sizeof(str) - 1))) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ err = xenbus_scanf(XBT_NIL, pdev->xdev->otherend, str,
+ "%x:%x", &domain, &bus);
+ if (err != 2) {
+ if (err >= 0)
+ err = -EINVAL;
+ xenbus_dev_fatal(pdev->xdev, err,
+ "Error reading PCI root %d", i);
+ goto out;
+ }
+
+ err = pcifront_rescan_root(pdev, domain, bus);
+ if (err) {
+ xenbus_dev_fatal(pdev->xdev, err,
+ "Error scanning PCI root %04x:%02x",
+ domain, bus);
+ goto out;
+ }
+ }
+
+ xenbus_switch_state(pdev->xdev, XenbusStateConnected);
+
+out:
+ return err;
+}
+
+static int pcifront_detach_devices(struct pcifront_device *pdev)
+{
+ int err = 0;
+ int i, num_devs;
+ unsigned int domain, bus, slot, func;
+ struct pci_bus *pci_bus;
+ struct pci_dev *pci_dev;
+ char str[64];
+
+ if (xenbus_read_driver_state(pdev->xdev->nodename) !=
+ XenbusStateConnected)
+ goto out;
+
+ err = xenbus_scanf(XBT_NIL, pdev->xdev->otherend, "num_devs", "%d",
+ &num_devs);
+ if (err != 1) {
+ if (err >= 0)
+ err = -EINVAL;
+ xenbus_dev_fatal(pdev->xdev, err,
+ "Error reading number of PCI devices");
+ goto out;
+ }
+
+ /* Find devices being detached and remove them. */
+ for (i = 0; i < num_devs; i++) {
+ int l, state;
+ l = snprintf(str, sizeof(str), "state-%d", i);
+ if (unlikely(l >= (sizeof(str) - 1))) {
+ err = -ENOMEM;
+ goto out;
+ }
+ err = xenbus_scanf(XBT_NIL, pdev->xdev->otherend, str, "%d",
+ &state);
+ if (err != 1)
+ state = XenbusStateUnknown;
+
+ if (state != XenbusStateClosing)
+ continue;
+
+ /* Remove device. */
+ l = snprintf(str, sizeof(str), "vdev-%d", i);
+ if (unlikely(l >= (sizeof(str) - 1))) {
+ err = -ENOMEM;
+ goto out;
+ }
+ err = xenbus_scanf(XBT_NIL, pdev->xdev->otherend, str,
+ "%x:%x:%x.%x", &domain, &bus, &slot, &func);
+ if (err != 4) {
+ if (err >= 0)
+ err = -EINVAL;
+ xenbus_dev_fatal(pdev->xdev, err,
+ "Error reading PCI device %d", i);
+ goto out;
+ }
+
+ pci_bus = pci_find_bus(domain, bus);
+ if (!pci_bus) {
+ dev_dbg(&pdev->xdev->dev, "Cannot get bus %04x:%02x\n",
+ domain, bus);
+ continue;
+ }
+ pci_dev = pci_get_slot(pci_bus, PCI_DEVFN(slot, func));
+ if (!pci_dev) {
+ dev_dbg(&pdev->xdev->dev,
+ "Cannot get PCI device %04x:%02x:%02x.%02x\n",
+ domain, bus, slot, func);
+ continue;
+ }
+ pci_remove_bus_device(pci_dev);
+ pci_dev_put(pci_dev);
+
+ dev_dbg(&pdev->xdev->dev,
+ "PCI device %04x:%02x:%02x.%02x removed.\n",
+ domain, bus, slot, func);
+ }
+
+ err = xenbus_switch_state(pdev->xdev, XenbusStateReconfiguring);
+
+out:
+ return err;
+}
+
+static void __init_refok pcifront_backend_changed(struct xenbus_device *xdev,
+ enum xenbus_state be_state)
+{
+ struct pcifront_device *pdev = dev_get_drvdata(&xdev->dev);
+
+ switch (be_state) {
+ case XenbusStateUnknown:
+ case XenbusStateInitialising:
+ case XenbusStateInitWait:
+ case XenbusStateInitialised:
+ case XenbusStateClosed:
+ break;
+
+ case XenbusStateConnected:
+ pcifront_try_connect(pdev);
+ break;
+
+ case XenbusStateClosing:
+ dev_warn(&xdev->dev, "backend going away!\n");
+ pcifront_try_disconnect(pdev);
+ break;
+
+ case XenbusStateReconfiguring:
+ pcifront_detach_devices(pdev);
+ break;
+
+ case XenbusStateReconfigured:
+ pcifront_attach_devices(pdev);
+ break;
+ }
+}
+
+static int pcifront_xenbus_probe(struct xenbus_device *xdev,
+ const struct xenbus_device_id *id)
+{
+ int err = 0;
+ struct pcifront_device *pdev = alloc_pdev(xdev);
+
+ if (pdev == NULL) {
+ err = -ENOMEM;
+ xenbus_dev_fatal(xdev, err,
+ "Error allocating pcifront_device struct");
+ goto out;
+ }
+
+ err = pcifront_publish_info(pdev);
+ if (err)
+ free_pdev(pdev);
+
+out:
+ return err;
+}
+
+static int pcifront_xenbus_remove(struct xenbus_device *xdev)
+{
+ struct pcifront_device *pdev = dev_get_drvdata(&xdev->dev);
+ if (pdev)
+ free_pdev(pdev);
+
+ return 0;
+}
+
+static const struct xenbus_device_id xenpci_ids[] = {
+ {"pci"},
+ {""},
+};
+
+static struct xenbus_driver xenbus_pcifront_driver = {
+ .name = "pcifront",
+ .owner = THIS_MODULE,
+ .ids = xenpci_ids,
+ .probe = pcifront_xenbus_probe,
+ .remove = pcifront_xenbus_remove,
+ .otherend_changed = pcifront_backend_changed,
+};
+
+static int __init pcifront_init(void)
+{
+ if (!xen_pv_domain() || xen_initial_domain())
+ return -ENODEV;
+
+ pci_frontend_registrar(1 /* enable */);
+
+ return xenbus_register_frontend(&xenbus_pcifront_driver);
+}
+
+static void __exit pcifront_cleanup(void)
+{
+ xenbus_unregister_driver(&xenbus_pcifront_driver);
+ pci_frontend_registrar(0 /* disable */);
+}
+module_init(pcifront_init);
+module_exit(pcifront_cleanup);
+
+MODULE_DESCRIPTION("Xen PCI passthrough frontend.");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("xen:pci");
diff --git a/drivers/pcmcia/pd6729.c b/drivers/pcmcia/pd6729.c
index 8cbfa067171f..96c72e90b79c 100644
--- a/drivers/pcmcia/pd6729.c
+++ b/drivers/pcmcia/pd6729.c
@@ -725,17 +725,17 @@ static int __devinit pd6729_pci_probe(struct pci_dev *dev,
return 0;
- err_out_free_res2:
+err_out_free_res2:
if (irq_mode == 1)
free_irq(dev->irq, socket);
else
del_timer_sync(&socket->poll_timer);
- err_out_free_res:
+err_out_free_res:
pci_release_regions(dev);
- err_out_disable:
+err_out_disable:
pci_disable_device(dev);
- err_out_free_mem:
+err_out_free_mem:
kfree(socket);
return ret;
}
diff --git a/drivers/pcmcia/pd6729.h b/drivers/pcmcia/pd6729.h
index 41418d394c55..c8e84bdece38 100644
--- a/drivers/pcmcia/pd6729.h
+++ b/drivers/pcmcia/pd6729.h
@@ -15,7 +15,7 @@
struct pd6729_socket {
int number;
int card_irq;
- unsigned long io_base; /* base io address of the socket */
+ unsigned long io_base; /* base io address of the socket */
struct pcmcia_socket socket;
struct timer_list poll_timer;
};
diff --git a/drivers/pcmcia/pxa2xx_sharpsl.c b/drivers/pcmcia/pxa2xx_sharpsl.c
index 0ea3b29440e6..81af2b3bcc00 100644
--- a/drivers/pcmcia/pxa2xx_sharpsl.c
+++ b/drivers/pcmcia/pxa2xx_sharpsl.c
@@ -237,7 +237,7 @@ static struct pcmcia_low_level sharpsl_pcmcia_ops __initdata = {
#ifdef CONFIG_SA1100_COLLIE
#include "sa11xx_base.h"
-int __init pcmcia_collie_init(struct device *dev)
+int __devinit pcmcia_collie_init(struct device *dev)
{
int ret = -ENODEV;
diff --git a/drivers/pcmcia/sa1100_assabet.c b/drivers/pcmcia/sa1100_assabet.c
index fd013a1ef47a..f1e882272ab0 100644
--- a/drivers/pcmcia/sa1100_assabet.c
+++ b/drivers/pcmcia/sa1100_assabet.c
@@ -130,7 +130,7 @@ static struct pcmcia_low_level assabet_pcmcia_ops = {
.socket_suspend = assabet_pcmcia_socket_suspend,
};
-int pcmcia_assabet_init(struct device *dev)
+int __devinit pcmcia_assabet_init(struct device *dev)
{
int ret = -ENODEV;
diff --git a/drivers/pcmcia/sa1100_cerf.c b/drivers/pcmcia/sa1100_cerf.c
index 9bf088b17275..30560df8c76b 100644
--- a/drivers/pcmcia/sa1100_cerf.c
+++ b/drivers/pcmcia/sa1100_cerf.c
@@ -97,7 +97,7 @@ static struct pcmcia_low_level cerf_pcmcia_ops = {
.socket_suspend = cerf_pcmcia_socket_suspend,
};
-int __init pcmcia_cerf_init(struct device *dev)
+int __devinit pcmcia_cerf_init(struct device *dev)
{
int ret = -ENODEV;
diff --git a/drivers/pcmcia/sa1100_generic.c b/drivers/pcmcia/sa1100_generic.c
index 945857f8c284..6b228590b3fd 100644
--- a/drivers/pcmcia/sa1100_generic.c
+++ b/drivers/pcmcia/sa1100_generic.c
@@ -64,7 +64,7 @@ static int (*sa11x0_pcmcia_hw_init[])(struct device *dev) = {
#endif
};
-static int sa11x0_drv_pcmcia_probe(struct platform_device *dev)
+static int __devinit sa11x0_drv_pcmcia_probe(struct platform_device *dev)
{
int i, ret = -ENODEV;
diff --git a/drivers/pcmcia/sa1100_h3600.c b/drivers/pcmcia/sa1100_h3600.c
index 56329ad575a9..edf8f0028898 100644
--- a/drivers/pcmcia/sa1100_h3600.c
+++ b/drivers/pcmcia/sa1100_h3600.c
@@ -219,7 +219,7 @@ struct pcmcia_low_level h3600_pcmcia_ops = {
.socket_suspend = h3600_pcmcia_socket_suspend,
};
-int __init pcmcia_h3600_init(struct device *dev)
+int __devinit pcmcia_h3600_init(struct device *dev)
{
int ret = -ENODEV;
diff --git a/drivers/pcmcia/sa1100_shannon.c b/drivers/pcmcia/sa1100_shannon.c
index c4d51867a050..7ff1b43540b8 100644
--- a/drivers/pcmcia/sa1100_shannon.c
+++ b/drivers/pcmcia/sa1100_shannon.c
@@ -113,7 +113,7 @@ static struct pcmcia_low_level shannon_pcmcia_ops = {
.socket_suspend = shannon_pcmcia_socket_suspend,
};
-int __init pcmcia_shannon_init(struct device *dev)
+int __devinit pcmcia_shannon_init(struct device *dev)
{
int ret = -ENODEV;
diff --git a/drivers/pcmcia/sa1100_simpad.c b/drivers/pcmcia/sa1100_simpad.c
index 05bd504e6f18..c998f7aaadbc 100644
--- a/drivers/pcmcia/sa1100_simpad.c
+++ b/drivers/pcmcia/sa1100_simpad.c
@@ -123,7 +123,7 @@ static struct pcmcia_low_level simpad_pcmcia_ops = {
.socket_suspend = simpad_pcmcia_socket_suspend,
};
-int __init pcmcia_simpad_init(struct device *dev)
+int __devinit pcmcia_simpad_init(struct device *dev)
{
int ret = -ENODEV;
diff --git a/drivers/pcmcia/soc_common.c b/drivers/pcmcia/soc_common.c
index 689e3c02edb8..2fe8cb8e95cd 100644
--- a/drivers/pcmcia/soc_common.c
+++ b/drivers/pcmcia/soc_common.c
@@ -57,14 +57,20 @@ module_param(pc_debug, int, 0644);
void soc_pcmcia_debug(struct soc_pcmcia_socket *skt, const char *func,
int lvl, const char *fmt, ...)
{
+ struct va_format vaf;
va_list args;
if (pc_debug > lvl) {
- printk(KERN_DEBUG "skt%u: %s: ", skt->nr, func);
va_start(args, fmt);
- vprintk(fmt, args);
+
+ vaf.fmt = fmt;
+ vaf.va = &args;
+
+ printk(KERN_DEBUG "skt%u: %s: %pV", skt->nr, func, &vaf);
+
va_end(args);
}
}
+EXPORT_SYMBOL(soc_pcmcia_debug);
#endif
diff --git a/drivers/platform/x86/Kconfig b/drivers/platform/x86/Kconfig
index cff7cc2c1f02..faec777b1ed4 100644
--- a/drivers/platform/x86/Kconfig
+++ b/drivers/platform/x86/Kconfig
@@ -92,6 +92,7 @@ config DELL_WMI
tristate "Dell WMI extras"
depends on ACPI_WMI
depends on INPUT
+ select INPUT_SPARSEKMAP
---help---
Say Y here if you want to support WMI-based hotkeys on Dell laptops.
@@ -140,6 +141,7 @@ config HP_WMI
depends on ACPI_WMI
depends on INPUT
depends on RFKILL || RFKILL = n
+ select INPUT_SPARSEKMAP
help
Say Y here if you want to support WMI-based hotkeys on HP laptops and
to read data from WMI such as docking or ambient light sensor state.
@@ -171,6 +173,7 @@ config PANASONIC_LAPTOP
tristate "Panasonic Laptop Extras"
depends on INPUT && ACPI
depends on BACKLIGHT_CLASS_DEVICE
+ select INPUT_SPARSEKMAP
---help---
This driver adds support for access to backlight control and hotkeys
on Panasonic Let's Note laptops.
@@ -219,8 +222,8 @@ config SONYPI_COMPAT
---help---
Build the sonypi driver compatibility code into the sony-laptop driver.
-config IDEAPAD_ACPI
- tristate "Lenovo IdeaPad ACPI Laptop Extras"
+config IDEAPAD_LAPTOP
+ tristate "Lenovo IdeaPad Laptop Extras"
depends on ACPI
depends on RFKILL
help
@@ -365,6 +368,26 @@ config THINKPAD_ACPI_HOTKEY_POLL
If you are not sure, say Y here. The driver enables polling only if
it is strictly necessary to do so.
+config SENSORS_HDAPS
+ tristate "Thinkpad Hard Drive Active Protection System (hdaps)"
+ depends on INPUT && X86
+ select INPUT_POLLDEV
+ default n
+ help
+ This driver provides support for the IBM Hard Drive Active Protection
+ System (hdaps), which provides an accelerometer and other misc. data.
+ ThinkPads starting with the R50, T41, and X40 are supported. The
+ accelerometer data is readable via sysfs.
+
+ This driver also provides an absolute input class device, allowing
+ the laptop to act as a pinball machine-esque joystick.
+
+ If your ThinkPad is not recognized by the driver, please update to latest
+ BIOS. This is especially the case for some R52 ThinkPads.
+
+ Say Y here if you have an applicable laptop and want to experience
+ the awesome power of hdaps.
+
config INTEL_MENLOW
tristate "Thermal Management driver for Intel menlow platform"
depends on ACPI_THERMAL
@@ -478,6 +501,7 @@ config TOPSTAR_LAPTOP
tristate "Topstar Laptop Extras"
depends on ACPI
depends on INPUT
+ select INPUT_SPARSEKMAP
---help---
This driver adds support for hotkeys found on Topstar laptops.
@@ -492,6 +516,7 @@ config ACPI_TOSHIBA
depends on INPUT
depends on RFKILL || RFKILL = n
select INPUT_POLLDEV
+ select INPUT_SPARSEKMAP
---help---
This driver adds support for access to certain system settings
on "legacy free" Toshiba laptops. These laptops can be recognized by
@@ -590,4 +615,28 @@ config INTEL_IPS
functionality. If in doubt, say Y here; it will only load on
supported platforms.
+config IBM_RTL
+ tristate "Device driver to enable PRTL support"
+ depends on X86 && PCI
+ ---help---
+ Enable support for IBM Premium Real Time Mode (PRTM).
+ This module will allow you the enter and exit PRTM in the BIOS via
+ sysfs on platforms that support this feature. System in PRTM will
+ not receive CPU-generated SMIs for recoverable errors. Use of this
+ feature without proper support may void your hardware warranty.
+
+ If the proper BIOS support is found the driver will load and create
+ /sys/devices/system/ibm_rtl/. The "state" variable will indicate
+ whether or not the BIOS is in PRTM.
+ state = 0 (BIOS SMIs on)
+ state = 1 (BIOS SMIs off)
+
+config XO1_RFKILL
+ tristate "OLPC XO-1 software RF kill switch"
+ depends on OLPC
+ depends on RFKILL
+ ---help---
+ Support for enabling/disabling the WLAN interface on the OLPC XO-1
+ laptop.
+
endif # X86_PLATFORM_DEVICES
diff --git a/drivers/platform/x86/Makefile b/drivers/platform/x86/Makefile
index 85fb2b84f57e..9950ccc940b5 100644
--- a/drivers/platform/x86/Makefile
+++ b/drivers/platform/x86/Makefile
@@ -15,8 +15,9 @@ obj-$(CONFIG_ACERHDF) += acerhdf.o
obj-$(CONFIG_HP_WMI) += hp-wmi.o
obj-$(CONFIG_TC1100_WMI) += tc1100-wmi.o
obj-$(CONFIG_SONY_LAPTOP) += sony-laptop.o
-obj-$(CONFIG_IDEAPAD_ACPI) += ideapad_acpi.o
+obj-$(CONFIG_IDEAPAD_LAPTOP) += ideapad-laptop.o
obj-$(CONFIG_THINKPAD_ACPI) += thinkpad_acpi.o
+obj-$(CONFIG_SENSORS_HDAPS) += hdaps.o
obj-$(CONFIG_FUJITSU_LAPTOP) += fujitsu-laptop.o
obj-$(CONFIG_PANASONIC_LAPTOP) += panasonic-laptop.o
obj-$(CONFIG_INTEL_MENLOW) += intel_menlow.o
@@ -30,4 +31,5 @@ obj-$(CONFIG_INTEL_SCU_IPC) += intel_scu_ipc.o
obj-$(CONFIG_RAR_REGISTER) += intel_rar_register.o
obj-$(CONFIG_INTEL_IPS) += intel_ips.o
obj-$(CONFIG_GPIO_INTEL_PMIC) += intel_pmic_gpio.o
-
+obj-$(CONFIG_XO1_RFKILL) += xo1-rfkill.o
+obj-$(CONFIG_IBM_RTL) += ibm_rtl.o
diff --git a/drivers/platform/x86/acer-wmi.c b/drivers/platform/x86/acer-wmi.c
index 2badee2fdeed..c8c65375bfe2 100644
--- a/drivers/platform/x86/acer-wmi.c
+++ b/drivers/platform/x86/acer-wmi.c
@@ -1314,7 +1314,7 @@ static int __init acer_wmi_init(void)
AMW0_find_mailled();
if (!interface) {
- printk(ACER_ERR "No or unsupported WMI interface, unable to "
+ printk(ACER_INFO "No or unsupported WMI interface, unable to "
"load\n");
return -ENODEV;
}
diff --git a/drivers/platform/x86/asus-laptop.c b/drivers/platform/x86/asus-laptop.c
index b756e07d41b4..d235f44fd7a3 100644
--- a/drivers/platform/x86/asus-laptop.c
+++ b/drivers/platform/x86/asus-laptop.c
@@ -81,6 +81,8 @@ MODULE_PARM_DESC(wapf, "WAPF value");
static int wlan_status = 1;
static int bluetooth_status = 1;
+static int wimax_status = -1;
+static int wwan_status = -1;
module_param(wlan_status, int, 0444);
MODULE_PARM_DESC(wlan_status, "Set the wireless status on boot "
@@ -92,6 +94,16 @@ MODULE_PARM_DESC(bluetooth_status, "Set the wireless status on boot "
"(0 = disabled, 1 = enabled, -1 = don't do anything). "
"default is 1");
+module_param(wimax_status, int, 0444);
+MODULE_PARM_DESC(wimax_status, "Set the wireless status on boot "
+ "(0 = disabled, 1 = enabled, -1 = don't do anything). "
+ "default is 1");
+
+module_param(wwan_status, int, 0444);
+MODULE_PARM_DESC(wwan_status, "Set the wireless status on boot "
+ "(0 = disabled, 1 = enabled, -1 = don't do anything). "
+ "default is 1");
+
/*
* Some events we use, same for all Asus
*/
@@ -114,6 +126,8 @@ MODULE_PARM_DESC(bluetooth_status, "Set the wireless status on boot "
*/
#define WL_RSTS 0x01 /* internal Wifi */
#define BT_RSTS 0x02 /* internal Bluetooth */
+#define WM_RSTS 0x08 /* internal wimax */
+#define WW_RSTS 0x20 /* internal wwan */
/* LED */
#define METHOD_MLED "MLED"
@@ -132,6 +146,11 @@ MODULE_PARM_DESC(bluetooth_status, "Set the wireless status on boot "
*/
#define METHOD_WLAN "WLED"
#define METHOD_BLUETOOTH "BLED"
+
+/* WWAN and WIMAX */
+#define METHOD_WWAN "GSMC"
+#define METHOD_WIMAX "WMXC"
+
#define METHOD_WL_STATUS "RSTS"
/* Brightness */
@@ -236,7 +255,6 @@ struct asus_laptop {
u8 light_level; /* light sensor level */
u8 light_switch; /* light sensor switch value */
u16 event_count[128]; /* count for each event TODO make this better */
- u16 *keycode_map;
};
static const struct key_entry asus_keymap[] = {
@@ -278,6 +296,7 @@ static const struct key_entry asus_keymap[] = {
{KE_KEY, 0x99, { KEY_PHONE } },
{KE_KEY, 0xc4, { KEY_KBDILLUMUP } },
{KE_KEY, 0xc5, { KEY_KBDILLUMDOWN } },
+ {KE_KEY, 0xb5, { KEY_CALC } },
{KE_END, 0},
};
@@ -639,29 +658,29 @@ static int asus_backlight_notify(struct asus_laptop *asus)
static int asus_backlight_init(struct asus_laptop *asus)
{
struct backlight_device *bd;
- struct device *dev = &asus->platform_device->dev;
struct backlight_properties props;
- if (!acpi_check_handle(asus->handle, METHOD_BRIGHTNESS_GET, NULL) &&
- !acpi_check_handle(asus->handle, METHOD_BRIGHTNESS_SET, NULL) &&
- lcd_switch_handle) {
- memset(&props, 0, sizeof(struct backlight_properties));
- props.max_brightness = 15;
-
- bd = backlight_device_register(ASUS_LAPTOP_FILE, dev,
- asus, &asusbl_ops, &props);
- if (IS_ERR(bd)) {
- pr_err("Could not register asus backlight device\n");
- asus->backlight_device = NULL;
- return PTR_ERR(bd);
- }
+ if (acpi_check_handle(asus->handle, METHOD_BRIGHTNESS_GET, NULL) ||
+ acpi_check_handle(asus->handle, METHOD_BRIGHTNESS_SET, NULL) ||
+ !lcd_switch_handle)
+ return 0;
- asus->backlight_device = bd;
+ memset(&props, 0, sizeof(struct backlight_properties));
+ props.max_brightness = 15;
- bd->props.power = FB_BLANK_UNBLANK;
- bd->props.brightness = asus_read_brightness(bd);
- backlight_update_status(bd);
+ bd = backlight_device_register(ASUS_LAPTOP_FILE,
+ &asus->platform_device->dev, asus,
+ &asusbl_ops, &props);
+ if (IS_ERR(bd)) {
+ pr_err("Could not register asus backlight device\n");
+ asus->backlight_device = NULL;
+ return PTR_ERR(bd);
}
+
+ asus->backlight_device = bd;
+ bd->props.brightness = asus_read_brightness(bd);
+ bd->props.power = FB_BLANK_UNBLANK;
+ backlight_update_status(bd);
return 0;
}
@@ -883,6 +902,64 @@ static ssize_t store_bluetooth(struct device *dev,
}
/*
+ * Wimax
+ */
+static int asus_wimax_set(struct asus_laptop *asus, int status)
+{
+ if (write_acpi_int(asus->handle, METHOD_WIMAX, !!status)) {
+ pr_warning("Error setting wimax status to %d", status);
+ return -EIO;
+ }
+ return 0;
+}
+
+static ssize_t show_wimax(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct asus_laptop *asus = dev_get_drvdata(dev);
+
+ return sprintf(buf, "%d\n", asus_wireless_status(asus, WM_RSTS));
+}
+
+static ssize_t store_wimax(struct device *dev,
+ struct device_attribute *attr, const char *buf,
+ size_t count)
+{
+ struct asus_laptop *asus = dev_get_drvdata(dev);
+
+ return sysfs_acpi_set(asus, buf, count, METHOD_WIMAX);
+}
+
+/*
+ * Wwan
+ */
+static int asus_wwan_set(struct asus_laptop *asus, int status)
+{
+ if (write_acpi_int(asus->handle, METHOD_WWAN, !!status)) {
+ pr_warning("Error setting wwan status to %d", status);
+ return -EIO;
+ }
+ return 0;
+}
+
+static ssize_t show_wwan(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct asus_laptop *asus = dev_get_drvdata(dev);
+
+ return sprintf(buf, "%d\n", asus_wireless_status(asus, WW_RSTS));
+}
+
+static ssize_t store_wwan(struct device *dev,
+ struct device_attribute *attr, const char *buf,
+ size_t count)
+{
+ struct asus_laptop *asus = dev_get_drvdata(dev);
+
+ return sysfs_acpi_set(asus, buf, count, METHOD_WWAN);
+}
+
+/*
* Display
*/
static void asus_set_display(struct asus_laptop *asus, int value)
@@ -1065,9 +1142,9 @@ static ssize_t store_gps(struct device *dev, struct device_attribute *attr,
*/
static int asus_gps_rfkill_set(void *data, bool blocked)
{
- acpi_handle handle = data;
+ struct asus_laptop *asus = data;
- return asus_gps_switch(handle, !blocked);
+ return asus_gps_switch(asus, !blocked);
}
static const struct rfkill_ops asus_gps_rfkill_ops = {
@@ -1094,7 +1171,7 @@ static int asus_rfkill_init(struct asus_laptop *asus)
asus->gps_rfkill = rfkill_alloc("asus-gps", &asus->platform_device->dev,
RFKILL_TYPE_GPS,
- &asus_gps_rfkill_ops, NULL);
+ &asus_gps_rfkill_ops, asus);
if (!asus->gps_rfkill)
return -EINVAL;
@@ -1130,7 +1207,6 @@ static int asus_input_init(struct asus_laptop *asus)
input->phys = ASUS_LAPTOP_FILE "/input0";
input->id.bustype = BUS_HOST;
input->dev.parent = &asus->platform_device->dev;
- input_set_drvdata(input, asus);
error = sparse_keymap_setup(input, asus_keymap, NULL);
if (error) {
@@ -1159,6 +1235,7 @@ static void asus_input_exit(struct asus_laptop *asus)
sparse_keymap_free(asus->inputdev);
input_unregister_device(asus->inputdev);
}
+ asus->inputdev = NULL;
}
/*
@@ -1200,111 +1277,111 @@ static void asus_acpi_notify(struct acpi_device *device, u32 event)
static DEVICE_ATTR(infos, S_IRUGO, show_infos, NULL);
static DEVICE_ATTR(wlan, S_IRUGO | S_IWUSR, show_wlan, store_wlan);
-static DEVICE_ATTR(bluetooth, S_IRUGO | S_IWUSR, show_bluetooth,
- store_bluetooth);
+static DEVICE_ATTR(bluetooth, S_IRUGO | S_IWUSR,
+ show_bluetooth, store_bluetooth);
+static DEVICE_ATTR(wimax, S_IRUGO | S_IWUSR, show_wimax, store_wimax);
+static DEVICE_ATTR(wwan, S_IRUGO | S_IWUSR, show_wwan, store_wwan);
static DEVICE_ATTR(display, S_IRUGO | S_IWUSR, show_disp, store_disp);
static DEVICE_ATTR(ledd, S_IRUGO | S_IWUSR, show_ledd, store_ledd);
static DEVICE_ATTR(ls_level, S_IRUGO | S_IWUSR, show_lslvl, store_lslvl);
static DEVICE_ATTR(ls_switch, S_IRUGO | S_IWUSR, show_lssw, store_lssw);
static DEVICE_ATTR(gps, S_IRUGO | S_IWUSR, show_gps, store_gps);
-static void asus_sysfs_exit(struct asus_laptop *asus)
-{
- struct platform_device *device = asus->platform_device;
-
- device_remove_file(&device->dev, &dev_attr_infos);
- device_remove_file(&device->dev, &dev_attr_wlan);
- device_remove_file(&device->dev, &dev_attr_bluetooth);
- device_remove_file(&device->dev, &dev_attr_display);
- device_remove_file(&device->dev, &dev_attr_ledd);
- device_remove_file(&device->dev, &dev_attr_ls_switch);
- device_remove_file(&device->dev, &dev_attr_ls_level);
- device_remove_file(&device->dev, &dev_attr_gps);
-}
+static struct attribute *asus_attributes[] = {
+ &dev_attr_infos.attr,
+ &dev_attr_wlan.attr,
+ &dev_attr_bluetooth.attr,
+ &dev_attr_wimax.attr,
+ &dev_attr_wwan.attr,
+ &dev_attr_display.attr,
+ &dev_attr_ledd.attr,
+ &dev_attr_ls_level.attr,
+ &dev_attr_ls_switch.attr,
+ &dev_attr_gps.attr,
+ NULL
+};
-static int asus_sysfs_init(struct asus_laptop *asus)
+static mode_t asus_sysfs_is_visible(struct kobject *kobj,
+ struct attribute *attr,
+ int idx)
{
- struct platform_device *device = asus->platform_device;
- int err;
+ struct device *dev = container_of(kobj, struct device, kobj);
+ struct platform_device *pdev = to_platform_device(dev);
+ struct asus_laptop *asus = platform_get_drvdata(pdev);
+ acpi_handle handle = asus->handle;
+ bool supported;
- err = device_create_file(&device->dev, &dev_attr_infos);
- if (err)
- return err;
+ if (attr == &dev_attr_wlan.attr) {
+ supported = !acpi_check_handle(handle, METHOD_WLAN, NULL);
- if (!acpi_check_handle(asus->handle, METHOD_WLAN, NULL)) {
- err = device_create_file(&device->dev, &dev_attr_wlan);
- if (err)
- return err;
- }
+ } else if (attr == &dev_attr_bluetooth.attr) {
+ supported = !acpi_check_handle(handle, METHOD_BLUETOOTH, NULL);
- if (!acpi_check_handle(asus->handle, METHOD_BLUETOOTH, NULL)) {
- err = device_create_file(&device->dev, &dev_attr_bluetooth);
- if (err)
- return err;
- }
+ } else if (attr == &dev_attr_display.attr) {
+ supported = !acpi_check_handle(handle, METHOD_SWITCH_DISPLAY, NULL);
- if (!acpi_check_handle(asus->handle, METHOD_SWITCH_DISPLAY, NULL)) {
- err = device_create_file(&device->dev, &dev_attr_display);
- if (err)
- return err;
- }
+ } else if (attr == &dev_attr_wimax.attr) {
+ supported =
+ !acpi_check_handle(asus->handle, METHOD_WIMAX, NULL);
- if (!acpi_check_handle(asus->handle, METHOD_LEDD, NULL)) {
- err = device_create_file(&device->dev, &dev_attr_ledd);
- if (err)
- return err;
- }
+ } else if (attr == &dev_attr_wwan.attr) {
+ supported = !acpi_check_handle(asus->handle, METHOD_WWAN, NULL);
- if (!acpi_check_handle(asus->handle, METHOD_ALS_CONTROL, NULL) &&
- !acpi_check_handle(asus->handle, METHOD_ALS_LEVEL, NULL)) {
- err = device_create_file(&device->dev, &dev_attr_ls_switch);
- if (err)
- return err;
- err = device_create_file(&device->dev, &dev_attr_ls_level);
- if (err)
- return err;
- }
+ } else if (attr == &dev_attr_ledd.attr) {
+ supported = !acpi_check_handle(handle, METHOD_LEDD, NULL);
+
+ } else if (attr == &dev_attr_ls_switch.attr ||
+ attr == &dev_attr_ls_level.attr) {
+ supported = !acpi_check_handle(handle, METHOD_ALS_CONTROL, NULL) &&
+ !acpi_check_handle(handle, METHOD_ALS_LEVEL, NULL);
- if (!acpi_check_handle(asus->handle, METHOD_GPS_ON, NULL) &&
- !acpi_check_handle(asus->handle, METHOD_GPS_OFF, NULL) &&
- !acpi_check_handle(asus->handle, METHOD_GPS_STATUS, NULL)) {
- err = device_create_file(&device->dev, &dev_attr_gps);
- if (err)
- return err;
+ } else if (attr == &dev_attr_gps.attr) {
+ supported = !acpi_check_handle(handle, METHOD_GPS_ON, NULL) &&
+ !acpi_check_handle(handle, METHOD_GPS_OFF, NULL) &&
+ !acpi_check_handle(handle, METHOD_GPS_STATUS, NULL);
+ } else {
+ supported = true;
}
- return err;
+ return supported ? attr->mode : 0;
}
+
+static const struct attribute_group asus_attr_group = {
+ .is_visible = asus_sysfs_is_visible,
+ .attrs = asus_attributes,
+};
+
static int asus_platform_init(struct asus_laptop *asus)
{
- int err;
+ int result;
asus->platform_device = platform_device_alloc(ASUS_LAPTOP_FILE, -1);
if (!asus->platform_device)
return -ENOMEM;
platform_set_drvdata(asus->platform_device, asus);
- err = platform_device_add(asus->platform_device);
- if (err)
+ result = platform_device_add(asus->platform_device);
+ if (result)
goto fail_platform_device;
- err = asus_sysfs_init(asus);
- if (err)
+ result = sysfs_create_group(&asus->platform_device->dev.kobj,
+ &asus_attr_group);
+ if (result)
goto fail_sysfs;
+
return 0;
fail_sysfs:
- asus_sysfs_exit(asus);
platform_device_del(asus->platform_device);
fail_platform_device:
platform_device_put(asus->platform_device);
- return err;
+ return result;
}
static void asus_platform_exit(struct asus_laptop *asus)
{
- asus_sysfs_exit(asus);
+ sysfs_remove_group(&asus->platform_device->dev.kobj, &asus_attr_group);
platform_device_unregister(asus->platform_device);
}
@@ -1408,7 +1485,8 @@ static int asus_laptop_get_info(struct asus_laptop *asus)
/*
* The HWRS method return informations about the hardware.
- * 0x80 bit is for WLAN, 0x100 for Bluetooth.
+ * 0x80 bit is for WLAN, 0x100 for Bluetooth,
+ * 0x40 for WWAN, 0x10 for WIMAX.
* The significance of others is yet to be found.
*/
status =
@@ -1428,8 +1506,6 @@ static int asus_laptop_get_info(struct asus_laptop *asus)
return AE_OK;
}
-static bool asus_device_present;
-
static int __devinit asus_acpi_init(struct asus_laptop *asus)
{
int result = 0;
@@ -1453,6 +1529,12 @@ static int __devinit asus_acpi_init(struct asus_laptop *asus)
if (wlan_status >= 0)
asus_wlan_set(asus, !!wlan_status);
+ if (wimax_status >= 0)
+ asus_wimax_set(asus, !!wimax_status);
+
+ if (wwan_status >= 0)
+ asus_wwan_set(asus, !!wwan_status);
+
/* Keyboard Backlight is on by default */
if (!acpi_check_handle(asus->handle, METHOD_KBD_LIGHT_SET, NULL))
asus_kled_set(asus, 1);
@@ -1474,6 +1556,8 @@ static int __devinit asus_acpi_init(struct asus_laptop *asus)
return result;
}
+static bool asus_device_present;
+
static int __devinit asus_acpi_add(struct acpi_device *device)
{
struct asus_laptop *asus;
diff --git a/drivers/platform/x86/dell-laptop.c b/drivers/platform/x86/dell-laptop.c
index 4413975912e0..cf8a89a0d8f5 100644
--- a/drivers/platform/x86/dell-laptop.c
+++ b/drivers/platform/x86/dell-laptop.c
@@ -25,6 +25,8 @@
#include <linux/mm.h>
#include <linux/i8042.h>
#include <linux/slab.h>
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
#include "../../firmware/dcdbas.h"
#define BRIGHTNESS_TOKEN 0x7d
@@ -325,6 +327,75 @@ static const struct rfkill_ops dell_rfkill_ops = {
.query = dell_rfkill_query,
};
+static struct dentry *dell_laptop_dir;
+
+static int dell_debugfs_show(struct seq_file *s, void *data)
+{
+ int status;
+
+ get_buffer();
+ dell_send_request(buffer, 17, 11);
+ status = buffer->output[1];
+ release_buffer();
+
+ seq_printf(s, "status:\t0x%X\n", status);
+ seq_printf(s, "Bit 0 : Hardware switch supported: %lu\n",
+ status & BIT(0));
+ seq_printf(s, "Bit 1 : Wifi locator supported: %lu\n",
+ (status & BIT(1)) >> 1);
+ seq_printf(s, "Bit 2 : Wifi is supported: %lu\n",
+ (status & BIT(2)) >> 2);
+ seq_printf(s, "Bit 3 : Bluetooth is supported: %lu\n",
+ (status & BIT(3)) >> 3);
+ seq_printf(s, "Bit 4 : WWAN is supported: %lu\n",
+ (status & BIT(4)) >> 4);
+ seq_printf(s, "Bit 5 : Wireless keyboard supported: %lu\n",
+ (status & BIT(5)) >> 5);
+ seq_printf(s, "Bit 8 : Wifi is installed: %lu\n",
+ (status & BIT(8)) >> 8);
+ seq_printf(s, "Bit 9 : Bluetooth is installed: %lu\n",
+ (status & BIT(9)) >> 9);
+ seq_printf(s, "Bit 10: WWAN is installed: %lu\n",
+ (status & BIT(10)) >> 10);
+ seq_printf(s, "Bit 16: Hardware switch is on: %lu\n",
+ (status & BIT(16)) >> 16);
+ seq_printf(s, "Bit 17: Wifi is blocked: %lu\n",
+ (status & BIT(17)) >> 17);
+ seq_printf(s, "Bit 18: Bluetooth is blocked: %lu\n",
+ (status & BIT(18)) >> 18);
+ seq_printf(s, "Bit 19: WWAN is blocked: %lu\n",
+ (status & BIT(19)) >> 19);
+
+ seq_printf(s, "\nhwswitch_state:\t0x%X\n", hwswitch_state);
+ seq_printf(s, "Bit 0 : Wifi controlled by switch: %lu\n",
+ hwswitch_state & BIT(0));
+ seq_printf(s, "Bit 1 : Bluetooth controlled by switch: %lu\n",
+ (hwswitch_state & BIT(1)) >> 1);
+ seq_printf(s, "Bit 2 : WWAN controlled by switch: %lu\n",
+ (hwswitch_state & BIT(2)) >> 2);
+ seq_printf(s, "Bit 7 : Wireless switch config locked: %lu\n",
+ (hwswitch_state & BIT(7)) >> 7);
+ seq_printf(s, "Bit 8 : Wifi locator enabled: %lu\n",
+ (hwswitch_state & BIT(8)) >> 8);
+ seq_printf(s, "Bit 15: Wifi locator setting locked: %lu\n",
+ (hwswitch_state & BIT(15)) >> 15);
+
+ return 0;
+}
+
+static int dell_debugfs_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, dell_debugfs_show, inode->i_private);
+}
+
+static const struct file_operations dell_debugfs_fops = {
+ .owner = THIS_MODULE,
+ .open = dell_debugfs_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
static void dell_update_rfkill(struct work_struct *ignored)
{
if (wifi_rfkill)
@@ -556,6 +627,11 @@ static int __init dell_init(void)
goto fail_filter;
}
+ dell_laptop_dir = debugfs_create_dir("dell_laptop", NULL);
+ if (dell_laptop_dir != NULL)
+ debugfs_create_file("rfkill", 0444, dell_laptop_dir, NULL,
+ &dell_debugfs_fops);
+
#ifdef CONFIG_ACPI
/* In the event of an ACPI backlight being available, don't
* register the platform controller.
@@ -615,6 +691,7 @@ fail_platform_driver:
static void __exit dell_exit(void)
{
+ debugfs_remove_recursive(dell_laptop_dir);
i8042_remove_filter(dell_laptop_i8042_filter);
cancel_delayed_work_sync(&dell_rfkill_work);
backlight_device_unregister(dell_backlight_device);
diff --git a/drivers/platform/x86/dell-wmi.c b/drivers/platform/x86/dell-wmi.c
index 08fb70f6d9bf..77f1d55414c6 100644
--- a/drivers/platform/x86/dell-wmi.c
+++ b/drivers/platform/x86/dell-wmi.c
@@ -29,6 +29,7 @@
#include <linux/slab.h>
#include <linux/types.h>
#include <linux/input.h>
+#include <linux/input/sparse-keymap.h>
#include <acpi/acpi_drivers.h>
#include <linux/acpi.h>
#include <linux/string.h>
@@ -44,78 +45,70 @@ static int acpi_video;
MODULE_ALIAS("wmi:"DELL_EVENT_GUID);
-struct key_entry {
- char type; /* See KE_* below */
- u16 code;
- u16 keycode;
-};
-
-enum { KE_KEY, KE_SW, KE_IGNORE, KE_END };
-
/*
* Certain keys are flagged as KE_IGNORE. All of these are either
* notifications (rather than requests for change) or are also sent
* via the keyboard controller so should not be sent again.
*/
-static struct key_entry dell_legacy_wmi_keymap[] = {
- {KE_KEY, 0xe045, KEY_PROG1},
- {KE_KEY, 0xe009, KEY_EJECTCD},
+static const struct key_entry dell_wmi_legacy_keymap[] __initconst = {
+ { KE_KEY, 0xe045, { KEY_PROG1 } },
+ { KE_KEY, 0xe009, { KEY_EJECTCD } },
/* These also contain the brightness level at offset 6 */
- {KE_KEY, 0xe006, KEY_BRIGHTNESSUP},
- {KE_KEY, 0xe005, KEY_BRIGHTNESSDOWN},
+ { KE_KEY, 0xe006, { KEY_BRIGHTNESSUP } },
+ { KE_KEY, 0xe005, { KEY_BRIGHTNESSDOWN } },
/* Battery health status button */
- {KE_KEY, 0xe007, KEY_BATTERY},
+ { KE_KEY, 0xe007, { KEY_BATTERY } },
/* This is actually for all radios. Although physically a
* switch, the notification does not provide an indication of
* state and so it should be reported as a key */
- {KE_KEY, 0xe008, KEY_WLAN},
+ { KE_KEY, 0xe008, { KEY_WLAN } },
/* The next device is at offset 6, the active devices are at
offset 8 and the attached devices at offset 10 */
- {KE_KEY, 0xe00b, KEY_SWITCHVIDEOMODE},
+ { KE_KEY, 0xe00b, { KEY_SWITCHVIDEOMODE } },
- {KE_IGNORE, 0xe00c, KEY_KBDILLUMTOGGLE},
+ { KE_IGNORE, 0xe00c, { KEY_KBDILLUMTOGGLE } },
/* BIOS error detected */
- {KE_IGNORE, 0xe00d, KEY_RESERVED},
+ { KE_IGNORE, 0xe00d, { KEY_RESERVED } },
/* Wifi Catcher */
- {KE_KEY, 0xe011, KEY_PROG2},
+ { KE_KEY, 0xe011, {KEY_PROG2 } },
/* Ambient light sensor toggle */
- {KE_IGNORE, 0xe013, KEY_RESERVED},
-
- {KE_IGNORE, 0xe020, KEY_MUTE},
- {KE_IGNORE, 0xe02e, KEY_VOLUMEDOWN},
- {KE_IGNORE, 0xe030, KEY_VOLUMEUP},
- {KE_IGNORE, 0xe033, KEY_KBDILLUMUP},
- {KE_IGNORE, 0xe034, KEY_KBDILLUMDOWN},
- {KE_IGNORE, 0xe03a, KEY_CAPSLOCK},
- {KE_IGNORE, 0xe045, KEY_NUMLOCK},
- {KE_IGNORE, 0xe046, KEY_SCROLLLOCK},
- {KE_END, 0}
+ { KE_IGNORE, 0xe013, { KEY_RESERVED } },
+
+ { KE_IGNORE, 0xe020, { KEY_MUTE } },
+ { KE_IGNORE, 0xe02e, { KEY_VOLUMEDOWN } },
+ { KE_IGNORE, 0xe030, { KEY_VOLUMEUP } },
+ { KE_IGNORE, 0xe033, { KEY_KBDILLUMUP } },
+ { KE_IGNORE, 0xe034, { KEY_KBDILLUMDOWN } },
+ { KE_IGNORE, 0xe03a, { KEY_CAPSLOCK } },
+ { KE_IGNORE, 0xe045, { KEY_NUMLOCK } },
+ { KE_IGNORE, 0xe046, { KEY_SCROLLLOCK } },
+ { KE_END, 0 }
};
static bool dell_new_hk_type;
-struct dell_new_keymap_entry {
+struct dell_bios_keymap_entry {
u16 scancode;
u16 keycode;
};
-struct dell_hotkey_table {
+struct dell_bios_hotkey_table {
struct dmi_header header;
- struct dell_new_keymap_entry keymap[];
+ struct dell_bios_keymap_entry keymap[];
};
-static struct key_entry *dell_new_wmi_keymap;
+static const struct dell_bios_hotkey_table *dell_bios_hotkey_table;
-static u16 bios_to_linux_keycode[256] = {
+static const u16 bios_to_linux_keycode[256] __initconst = {
KEY_MEDIA, KEY_NEXTSONG, KEY_PLAYPAUSE, KEY_PREVIOUSSONG,
KEY_STOPCD, KEY_UNKNOWN, KEY_UNKNOWN, KEY_UNKNOWN,
@@ -138,68 +131,11 @@ static u16 bios_to_linux_keycode[256] = {
KEY_PROG3
};
-
-static struct key_entry *dell_wmi_keymap = dell_legacy_wmi_keymap;
-
static struct input_dev *dell_wmi_input_dev;
-static struct key_entry *dell_wmi_get_entry_by_scancode(unsigned int code)
-{
- struct key_entry *key;
-
- for (key = dell_wmi_keymap; key->type != KE_END; key++)
- if (code == key->code)
- return key;
-
- return NULL;
-}
-
-static struct key_entry *dell_wmi_get_entry_by_keycode(unsigned int keycode)
-{
- struct key_entry *key;
-
- for (key = dell_wmi_keymap; key->type != KE_END; key++)
- if (key->type == KE_KEY && keycode == key->keycode)
- return key;
-
- return NULL;
-}
-
-static int dell_wmi_getkeycode(struct input_dev *dev,
- unsigned int scancode, unsigned int *keycode)
-{
- struct key_entry *key = dell_wmi_get_entry_by_scancode(scancode);
-
- if (key && key->type == KE_KEY) {
- *keycode = key->keycode;
- return 0;
- }
-
- return -EINVAL;
-}
-
-static int dell_wmi_setkeycode(struct input_dev *dev,
- unsigned int scancode, unsigned int keycode)
-{
- struct key_entry *key;
- unsigned int old_keycode;
-
- key = dell_wmi_get_entry_by_scancode(scancode);
- if (key && key->type == KE_KEY) {
- old_keycode = key->keycode;
- key->keycode = keycode;
- set_bit(keycode, dev->keybit);
- if (!dell_wmi_get_entry_by_keycode(old_keycode))
- clear_bit(old_keycode, dev->keybit);
- return 0;
- }
- return -EINVAL;
-}
-
static void dell_wmi_notify(u32 value, void *context)
{
struct acpi_buffer response = { ACPI_ALLOCATE_BUFFER, NULL };
- static struct key_entry *key;
union acpi_object *obj;
acpi_status status;
@@ -212,8 +148,10 @@ static void dell_wmi_notify(u32 value, void *context)
obj = (union acpi_object *)response.pointer;
if (obj && obj->type == ACPI_TYPE_BUFFER) {
+ const struct key_entry *key;
int reported_key;
u16 *buffer_entry = (u16 *)obj->buffer.pointer;
+
if (dell_new_hk_type && (buffer_entry[1] != 0x10)) {
printk(KERN_INFO "dell-wmi: Received unknown WMI event"
" (0x%x)\n", buffer_entry[1]);
@@ -226,8 +164,8 @@ static void dell_wmi_notify(u32 value, void *context)
else
reported_key = (int)buffer_entry[1] & 0xffff;
- key = dell_wmi_get_entry_by_scancode(reported_key);
-
+ key = sparse_keymap_entry_from_scancode(dell_wmi_input_dev,
+ reported_key);
if (!key) {
printk(KERN_INFO "dell-wmi: Unknown key %x pressed\n",
reported_key);
@@ -237,92 +175,98 @@ static void dell_wmi_notify(u32 value, void *context)
* come via ACPI */
;
} else {
- input_report_key(dell_wmi_input_dev, key->keycode, 1);
- input_sync(dell_wmi_input_dev);
- input_report_key(dell_wmi_input_dev, key->keycode, 0);
- input_sync(dell_wmi_input_dev);
+ sparse_keymap_report_entry(dell_wmi_input_dev, key,
+ 1, true);
}
}
kfree(obj);
}
-
-static void setup_new_hk_map(const struct dmi_header *dm)
+static const struct key_entry * __init dell_wmi_prepare_new_keymap(void)
{
-
+ int hotkey_num = (dell_bios_hotkey_table->header.length - 4) /
+ sizeof(struct dell_bios_keymap_entry);
+ struct key_entry *keymap;
int i;
- int hotkey_num = (dm->length-4)/sizeof(struct dell_new_keymap_entry);
- struct dell_hotkey_table *table =
- container_of(dm, struct dell_hotkey_table, header);
- dell_new_wmi_keymap = kzalloc((hotkey_num+1) *
- sizeof(struct key_entry), GFP_KERNEL);
+ keymap = kcalloc(hotkey_num + 1, sizeof(struct key_entry), GFP_KERNEL);
+ if (!keymap)
+ return NULL;
for (i = 0; i < hotkey_num; i++) {
- dell_new_wmi_keymap[i].type = KE_KEY;
- dell_new_wmi_keymap[i].code = table->keymap[i].scancode;
- dell_new_wmi_keymap[i].keycode =
- (table->keymap[i].keycode > 255) ? 0 :
- bios_to_linux_keycode[table->keymap[i].keycode];
+ const struct dell_bios_keymap_entry *bios_entry =
+ &dell_bios_hotkey_table->keymap[i];
+ keymap[i].type = KE_KEY;
+ keymap[i].code = bios_entry->scancode;
+ keymap[i].keycode = bios_entry->keycode < 256 ?
+ bios_to_linux_keycode[bios_entry->keycode] :
+ KEY_RESERVED;
}
- dell_new_wmi_keymap[i].type = KE_END;
- dell_new_wmi_keymap[i].code = 0;
- dell_new_wmi_keymap[i].keycode = 0;
-
- dell_wmi_keymap = dell_new_wmi_keymap;
+ keymap[hotkey_num].type = KE_END;
+ return keymap;
}
-
-static void find_hk_type(const struct dmi_header *dm, void *dummy)
-{
-
- if ((dm->type == 0xb2) && (dm->length > 6)) {
- dell_new_hk_type = true;
- setup_new_hk_map(dm);
- }
-
-}
-
-
static int __init dell_wmi_input_setup(void)
{
- struct key_entry *key;
int err;
dell_wmi_input_dev = input_allocate_device();
-
if (!dell_wmi_input_dev)
return -ENOMEM;
dell_wmi_input_dev->name = "Dell WMI hotkeys";
dell_wmi_input_dev->phys = "wmi/input0";
dell_wmi_input_dev->id.bustype = BUS_HOST;
- dell_wmi_input_dev->getkeycode = dell_wmi_getkeycode;
- dell_wmi_input_dev->setkeycode = dell_wmi_setkeycode;
-
- for (key = dell_wmi_keymap; key->type != KE_END; key++) {
- switch (key->type) {
- case KE_KEY:
- set_bit(EV_KEY, dell_wmi_input_dev->evbit);
- set_bit(key->keycode, dell_wmi_input_dev->keybit);
- break;
- case KE_SW:
- set_bit(EV_SW, dell_wmi_input_dev->evbit);
- set_bit(key->keycode, dell_wmi_input_dev->swbit);
- break;
+
+ if (dell_new_hk_type) {
+ const struct key_entry *keymap = dell_wmi_prepare_new_keymap();
+ if (!keymap) {
+ err = -ENOMEM;
+ goto err_free_dev;
}
- }
- err = input_register_device(dell_wmi_input_dev);
+ err = sparse_keymap_setup(dell_wmi_input_dev, keymap, NULL);
- if (err) {
- input_free_device(dell_wmi_input_dev);
- return err;
+ /*
+ * Sparse keymap library makes a copy of keymap so we
+ * don't need the original one that was allocated.
+ */
+ kfree(keymap);
+ } else {
+ err = sparse_keymap_setup(dell_wmi_input_dev,
+ dell_wmi_legacy_keymap, NULL);
}
+ if (err)
+ goto err_free_dev;
+
+ err = input_register_device(dell_wmi_input_dev);
+ if (err)
+ goto err_free_keymap;
return 0;
+
+ err_free_keymap:
+ sparse_keymap_free(dell_wmi_input_dev);
+ err_free_dev:
+ input_free_device(dell_wmi_input_dev);
+ return err;
+}
+
+static void dell_wmi_input_destroy(void)
+{
+ sparse_keymap_free(dell_wmi_input_dev);
+ input_unregister_device(dell_wmi_input_dev);
+}
+
+static void __init find_hk_type(const struct dmi_header *dm, void *dummy)
+{
+ if (dm->type == 0xb2 && dm->length > 6) {
+ dell_new_hk_type = true;
+ dell_bios_hotkey_table =
+ container_of(dm, struct dell_bios_hotkey_table, header);
+ }
}
static int __init dell_wmi_init(void)
@@ -339,18 +283,13 @@ static int __init dell_wmi_init(void)
acpi_video = acpi_video_backlight_support();
err = dell_wmi_input_setup();
- if (err) {
- if (dell_new_hk_type)
- kfree(dell_wmi_keymap);
+ if (err)
return err;
- }
status = wmi_install_notify_handler(DELL_EVENT_GUID,
dell_wmi_notify, NULL);
if (ACPI_FAILURE(status)) {
- input_unregister_device(dell_wmi_input_dev);
- if (dell_new_hk_type)
- kfree(dell_wmi_keymap);
+ dell_wmi_input_destroy();
printk(KERN_ERR
"dell-wmi: Unable to register notify handler - %d\n",
status);
@@ -359,14 +298,11 @@ static int __init dell_wmi_init(void)
return 0;
}
+module_init(dell_wmi_init);
static void __exit dell_wmi_exit(void)
{
wmi_remove_notify_handler(DELL_EVENT_GUID);
- input_unregister_device(dell_wmi_input_dev);
- if (dell_new_hk_type)
- kfree(dell_wmi_keymap);
+ dell_wmi_input_destroy();
}
-
-module_init(dell_wmi_init);
module_exit(dell_wmi_exit);
diff --git a/drivers/platform/x86/eeepc-laptop.c b/drivers/platform/x86/eeepc-laptop.c
index 6b8e06206c46..b2edfdcdcb84 100644
--- a/drivers/platform/x86/eeepc-laptop.c
+++ b/drivers/platform/x86/eeepc-laptop.c
@@ -165,6 +165,7 @@ struct eeepc_laptop {
u16 event_count[128]; /* count for each event */
struct platform_device *platform_device;
+ struct acpi_device *device; /* the device we are in */
struct device *hwmon_device;
struct backlight_device *backlight_device;
@@ -1193,9 +1194,9 @@ static int eeepc_input_init(struct eeepc_laptop *eeepc)
eeepc->inputdev = input;
return 0;
- err_free_keymap:
+err_free_keymap:
sparse_keymap_free(input);
- err_free_dev:
+err_free_dev:
input_free_device(input);
return error;
}
@@ -1206,6 +1207,7 @@ static void eeepc_input_exit(struct eeepc_laptop *eeepc)
sparse_keymap_free(eeepc->inputdev);
input_unregister_device(eeepc->inputdev);
}
+ eeepc->inputdev = NULL;
}
/*
@@ -1326,16 +1328,15 @@ static void cmsg_quirks(struct eeepc_laptop *eeepc)
cmsg_quirk(eeepc, CM_ASL_TPD, "TPD");
}
-static int eeepc_acpi_init(struct eeepc_laptop *eeepc,
- struct acpi_device *device)
+static int __devinit eeepc_acpi_init(struct eeepc_laptop *eeepc)
{
unsigned int init_flags;
int result;
- result = acpi_bus_get_status(device);
+ result = acpi_bus_get_status(eeepc->device);
if (result)
return result;
- if (!device->status.present) {
+ if (!eeepc->device->status.present) {
pr_err("Hotkey device not present, aborting\n");
return -ENODEV;
}
@@ -1384,12 +1385,13 @@ static int __devinit eeepc_acpi_add(struct acpi_device *device)
strcpy(acpi_device_name(device), EEEPC_ACPI_DEVICE_NAME);
strcpy(acpi_device_class(device), EEEPC_ACPI_CLASS);
device->driver_data = eeepc;
+ eeepc->device = device;
eeepc->hotplug_disabled = hotplug_disabled;
eeepc_dmi_check(eeepc);
- result = eeepc_acpi_init(eeepc, device);
+ result = eeepc_acpi_init(eeepc);
if (result)
goto fail_platform;
eeepc_enable_camera(eeepc);
diff --git a/drivers/platform/x86/eeepc-wmi.c b/drivers/platform/x86/eeepc-wmi.c
index 9dc50fbf3d0b..0d50fbbe2478 100644
--- a/drivers/platform/x86/eeepc-wmi.c
+++ b/drivers/platform/x86/eeepc-wmi.c
@@ -57,6 +57,7 @@ MODULE_ALIAS("wmi:"EEEPC_WMI_MGMT_GUID);
#define EEEPC_WMI_METHODID_DEVS 0x53564544
#define EEEPC_WMI_METHODID_DSTS 0x53544344
+#define EEEPC_WMI_METHODID_CFVS 0x53564643
#define EEEPC_WMI_DEVID_BACKLIGHT 0x00050012
@@ -69,6 +70,11 @@ static const struct key_entry eeepc_wmi_keymap[] = {
{ KE_IGNORE, NOTIFY_BRNDOWN_MIN, { KEY_BRIGHTNESSDOWN } },
{ KE_IGNORE, NOTIFY_BRNUP_MIN, { KEY_BRIGHTNESSUP } },
{ KE_KEY, 0xcc, { KEY_SWITCHVIDEOMODE } },
+ { KE_KEY, 0x6b, { KEY_F13 } }, /* Disable Touchpad */
+ { KE_KEY, 0xe1, { KEY_F14 } },
+ { KE_KEY, 0xe9, { KEY_DISPLAY_OFF } },
+ { KE_KEY, 0xe0, { KEY_PROG1 } },
+ { KE_KEY, 0x5c, { KEY_F15 } },
{ KE_END, 0},
};
@@ -292,6 +298,49 @@ static void eeepc_wmi_notify(u32 value, void *context)
kfree(obj);
}
+static ssize_t store_cpufv(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ int value;
+ struct acpi_buffer input = { (acpi_size)sizeof(value), &value };
+ acpi_status status;
+
+ if (!count || sscanf(buf, "%i", &value) != 1)
+ return -EINVAL;
+ if (value < 0 || value > 2)
+ return -EINVAL;
+
+ status = wmi_evaluate_method(EEEPC_WMI_MGMT_GUID,
+ 1, EEEPC_WMI_METHODID_CFVS, &input, NULL);
+
+ if (ACPI_FAILURE(status))
+ return -EIO;
+ else
+ return count;
+}
+
+static DEVICE_ATTR(cpufv, S_IRUGO | S_IWUSR, NULL, store_cpufv);
+
+static void eeepc_wmi_sysfs_exit(struct platform_device *device)
+{
+ device_remove_file(&device->dev, &dev_attr_cpufv);
+}
+
+static int eeepc_wmi_sysfs_init(struct platform_device *device)
+{
+ int retval = -ENOMEM;
+
+ retval = device_create_file(&device->dev, &dev_attr_cpufv);
+ if (retval)
+ goto error_sysfs;
+
+ return 0;
+
+error_sysfs:
+ eeepc_wmi_sysfs_exit(platform_device);
+ return retval;
+}
+
static int __devinit eeepc_wmi_platform_probe(struct platform_device *device)
{
struct eeepc_wmi *eeepc;
@@ -387,8 +436,14 @@ static int __init eeepc_wmi_init(void)
goto del_dev;
}
+ err = eeepc_wmi_sysfs_init(platform_device);
+ if (err)
+ goto del_sysfs;
+
return 0;
+del_sysfs:
+ eeepc_wmi_sysfs_exit(platform_device);
del_dev:
platform_device_del(platform_device);
put_dev:
@@ -403,6 +458,7 @@ static void __exit eeepc_wmi_exit(void)
{
struct eeepc_wmi *eeepc;
+ eeepc_wmi_sysfs_exit(platform_device);
eeepc = platform_get_drvdata(platform_device);
platform_driver_unregister(&platform_driver);
platform_device_unregister(platform_device);
diff --git a/drivers/hwmon/hdaps.c b/drivers/platform/x86/hdaps.c
index bfd42f18924b..067bf36d32f3 100644
--- a/drivers/hwmon/hdaps.c
+++ b/drivers/platform/x86/hdaps.c
@@ -1,5 +1,5 @@
/*
- * drivers/hwmon/hdaps.c - driver for IBM's Hard Drive Active Protection System
+ * hdaps.c - driver for IBM's Hard Drive Active Protection System
*
* Copyright (C) 2005 Robert Love <rml@novell.com>
* Copyright (C) 2005 Jesper Juhl <jesper.juhl@gmail.com>
diff --git a/drivers/platform/x86/hp-wmi.c b/drivers/platform/x86/hp-wmi.c
index c1741142a4cb..9e05af9c41cb 100644
--- a/drivers/platform/x86/hp-wmi.c
+++ b/drivers/platform/x86/hp-wmi.c
@@ -29,6 +29,7 @@
#include <linux/slab.h>
#include <linux/types.h>
#include <linux/input.h>
+#include <linux/input/sparse-keymap.h>
#include <linux/platform_device.h>
#include <linux/acpi.h>
#include <linux/rfkill.h>
@@ -88,24 +89,16 @@ struct bios_return {
u32 value;
};
-struct key_entry {
- char type; /* See KE_* below */
- u16 code;
- u16 keycode;
-};
-
-enum { KE_KEY, KE_END };
-
-static struct key_entry hp_wmi_keymap[] = {
- {KE_KEY, 0x02, KEY_BRIGHTNESSUP},
- {KE_KEY, 0x03, KEY_BRIGHTNESSDOWN},
- {KE_KEY, 0x20e6, KEY_PROG1},
- {KE_KEY, 0x20e8, KEY_MEDIA},
- {KE_KEY, 0x2142, KEY_MEDIA},
- {KE_KEY, 0x213b, KEY_INFO},
- {KE_KEY, 0x2169, KEY_DIRECTION},
- {KE_KEY, 0x231b, KEY_HELP},
- {KE_END, 0}
+static const struct key_entry hp_wmi_keymap[] = {
+ { KE_KEY, 0x02, { KEY_BRIGHTNESSUP } },
+ { KE_KEY, 0x03, { KEY_BRIGHTNESSDOWN } },
+ { KE_KEY, 0x20e6, { KEY_PROG1 } },
+ { KE_KEY, 0x20e8, { KEY_MEDIA } },
+ { KE_KEY, 0x2142, { KEY_MEDIA } },
+ { KE_KEY, 0x213b, { KEY_INFO } },
+ { KE_KEY, 0x2169, { KEY_DIRECTION } },
+ { KE_KEY, 0x231b, { KEY_HELP } },
+ { KE_END, 0 }
};
static struct input_dev *hp_wmi_input_dev;
@@ -179,6 +172,8 @@ static int hp_wmi_perform_query(int query, int write, u32 *buffer,
bios_return = *((struct bios_return *)obj->buffer.pointer);
memcpy(buffer, &bios_return.value, sizeof(bios_return.value));
+
+ kfree(obj);
return 0;
}
@@ -347,64 +342,9 @@ static DEVICE_ATTR(als, S_IRUGO | S_IWUSR, show_als, set_als);
static DEVICE_ATTR(dock, S_IRUGO, show_dock, NULL);
static DEVICE_ATTR(tablet, S_IRUGO, show_tablet, NULL);
-static struct key_entry *hp_wmi_get_entry_by_scancode(unsigned int code)
-{
- struct key_entry *key;
-
- for (key = hp_wmi_keymap; key->type != KE_END; key++)
- if (code == key->code)
- return key;
-
- return NULL;
-}
-
-static struct key_entry *hp_wmi_get_entry_by_keycode(unsigned int keycode)
-{
- struct key_entry *key;
-
- for (key = hp_wmi_keymap; key->type != KE_END; key++)
- if (key->type == KE_KEY && keycode == key->keycode)
- return key;
-
- return NULL;
-}
-
-static int hp_wmi_getkeycode(struct input_dev *dev,
- unsigned int scancode, unsigned int *keycode)
-{
- struct key_entry *key = hp_wmi_get_entry_by_scancode(scancode);
-
- if (key && key->type == KE_KEY) {
- *keycode = key->keycode;
- return 0;
- }
-
- return -EINVAL;
-}
-
-static int hp_wmi_setkeycode(struct input_dev *dev,
- unsigned int scancode, unsigned int keycode)
-{
- struct key_entry *key;
- unsigned int old_keycode;
-
- key = hp_wmi_get_entry_by_scancode(scancode);
- if (key && key->type == KE_KEY) {
- old_keycode = key->keycode;
- key->keycode = keycode;
- set_bit(keycode, dev->keybit);
- if (!hp_wmi_get_entry_by_keycode(old_keycode))
- clear_bit(old_keycode, dev->keybit);
- return 0;
- }
-
- return -EINVAL;
-}
-
static void hp_wmi_notify(u32 value, void *context)
{
struct acpi_buffer response = { ACPI_ALLOCATE_BUFFER, NULL };
- static struct key_entry *key;
union acpi_object *obj;
u32 event_id, event_data;
int key_code = 0, ret;
@@ -465,19 +405,9 @@ static void hp_wmi_notify(u32 value, void *context)
sizeof(key_code));
if (ret)
break;
- key = hp_wmi_get_entry_by_scancode(key_code);
- if (key) {
- switch (key->type) {
- case KE_KEY:
- input_report_key(hp_wmi_input_dev,
- key->keycode, 1);
- input_sync(hp_wmi_input_dev);
- input_report_key(hp_wmi_input_dev,
- key->keycode, 0);
- input_sync(hp_wmi_input_dev);
- break;
- }
- } else
+
+ if (!sparse_keymap_report_event(hp_wmi_input_dev,
+ key_code, 1, true))
printk(KERN_INFO PREFIX "Unknown key code - 0x%x\n",
key_code);
break;
@@ -510,7 +440,7 @@ static void hp_wmi_notify(u32 value, void *context)
static int __init hp_wmi_input_setup(void)
{
- struct key_entry *key;
+ acpi_status status;
int err;
hp_wmi_input_dev = input_allocate_device();
@@ -520,21 +450,14 @@ static int __init hp_wmi_input_setup(void)
hp_wmi_input_dev->name = "HP WMI hotkeys";
hp_wmi_input_dev->phys = "wmi/input0";
hp_wmi_input_dev->id.bustype = BUS_HOST;
- hp_wmi_input_dev->getkeycode = hp_wmi_getkeycode;
- hp_wmi_input_dev->setkeycode = hp_wmi_setkeycode;
-
- for (key = hp_wmi_keymap; key->type != KE_END; key++) {
- switch (key->type) {
- case KE_KEY:
- set_bit(EV_KEY, hp_wmi_input_dev->evbit);
- set_bit(key->keycode, hp_wmi_input_dev->keybit);
- break;
- }
- }
- set_bit(EV_SW, hp_wmi_input_dev->evbit);
- set_bit(SW_DOCK, hp_wmi_input_dev->swbit);
- set_bit(SW_TABLET_MODE, hp_wmi_input_dev->swbit);
+ __set_bit(EV_SW, hp_wmi_input_dev->evbit);
+ __set_bit(SW_DOCK, hp_wmi_input_dev->swbit);
+ __set_bit(SW_TABLET_MODE, hp_wmi_input_dev->swbit);
+
+ err = sparse_keymap_setup(hp_wmi_input_dev, hp_wmi_keymap, NULL);
+ if (err)
+ goto err_free_dev;
/* Set initial hardware state */
input_report_switch(hp_wmi_input_dev, SW_DOCK, hp_wmi_dock_state());
@@ -542,14 +465,32 @@ static int __init hp_wmi_input_setup(void)
hp_wmi_tablet_state());
input_sync(hp_wmi_input_dev);
- err = input_register_device(hp_wmi_input_dev);
-
- if (err) {
- input_free_device(hp_wmi_input_dev);
- return err;
+ status = wmi_install_notify_handler(HPWMI_EVENT_GUID, hp_wmi_notify, NULL);
+ if (ACPI_FAILURE(status)) {
+ err = -EIO;
+ goto err_free_keymap;
}
+ err = input_register_device(hp_wmi_input_dev);
+ if (err)
+ goto err_uninstall_notifier;
+
return 0;
+
+ err_uninstall_notifier:
+ wmi_remove_notify_handler(HPWMI_EVENT_GUID);
+ err_free_keymap:
+ sparse_keymap_free(hp_wmi_input_dev);
+ err_free_dev:
+ input_free_device(hp_wmi_input_dev);
+ return err;
+}
+
+static void hp_wmi_input_destroy(void)
+{
+ wmi_remove_notify_handler(HPWMI_EVENT_GUID);
+ sparse_keymap_free(hp_wmi_input_dev);
+ input_unregister_device(hp_wmi_input_dev);
}
static void cleanup_sysfs(struct platform_device *device)
@@ -704,15 +645,9 @@ static int __init hp_wmi_init(void)
int bios_capable = wmi_has_guid(HPWMI_BIOS_GUID);
if (event_capable) {
- err = wmi_install_notify_handler(HPWMI_EVENT_GUID,
- hp_wmi_notify, NULL);
- if (ACPI_FAILURE(err))
- return -EINVAL;
err = hp_wmi_input_setup();
- if (err) {
- wmi_remove_notify_handler(HPWMI_EVENT_GUID);
+ if (err)
return err;
- }
}
if (bios_capable) {
@@ -739,20 +674,17 @@ err_device_add:
err_device_alloc:
platform_driver_unregister(&hp_wmi_driver);
err_driver_reg:
- if (wmi_has_guid(HPWMI_EVENT_GUID)) {
- input_unregister_device(hp_wmi_input_dev);
- wmi_remove_notify_handler(HPWMI_EVENT_GUID);
- }
+ if (event_capable)
+ hp_wmi_input_destroy();
return err;
}
static void __exit hp_wmi_exit(void)
{
- if (wmi_has_guid(HPWMI_EVENT_GUID)) {
- wmi_remove_notify_handler(HPWMI_EVENT_GUID);
- input_unregister_device(hp_wmi_input_dev);
- }
+ if (wmi_has_guid(HPWMI_EVENT_GUID))
+ hp_wmi_input_destroy();
+
if (hp_wmi_platform_dev) {
platform_device_unregister(hp_wmi_platform_dev);
platform_driver_unregister(&hp_wmi_driver);
diff --git a/drivers/platform/x86/ibm_rtl.c b/drivers/platform/x86/ibm_rtl.c
new file mode 100644
index 000000000000..94a114aa8e28
--- /dev/null
+++ b/drivers/platform/x86/ibm_rtl.c
@@ -0,0 +1,323 @@
+/*
+ * IBM Real-Time Linux driver
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * Copyright (C) IBM Corporation, 2010
+ *
+ * Author: Keith Mannthey <kmannth@us.ibm.com>
+ * Vernon Mauery <vernux@us.ibm.com>
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/delay.h>
+#include <linux/module.h>
+#include <linux/io.h>
+#include <linux/sysdev.h>
+#include <linux/dmi.h>
+#include <linux/efi.h>
+#include <linux/mutex.h>
+#include <asm/bios_ebda.h>
+
+static bool force;
+module_param(force, bool, 0);
+MODULE_PARM_DESC(force, "Force driver load, ignore DMI data");
+
+static bool debug;
+module_param(debug, bool, 0644);
+MODULE_PARM_DESC(debug, "Show debug output");
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Keith Mannthey <kmmanth@us.ibm.com>");
+MODULE_AUTHOR("Vernon Mauery <vernux@us.ibm.com>");
+
+#define RTL_ADDR_TYPE_IO 1
+#define RTL_ADDR_TYPE_MMIO 2
+
+#define RTL_CMD_ENTER_PRTM 1
+#define RTL_CMD_EXIT_PRTM 2
+
+/* The RTL table as presented by the EBDA: */
+struct ibm_rtl_table {
+ char signature[5]; /* signature should be "_RTL_" */
+ u8 version;
+ u8 rt_status;
+ u8 command;
+ u8 command_status;
+ u8 cmd_address_type;
+ u8 cmd_granularity;
+ u8 cmd_offset;
+ u16 reserve1;
+ u32 cmd_port_address; /* platform dependent address */
+ u32 cmd_port_value; /* platform dependent value */
+} __attribute__((packed));
+
+/* to locate "_RTL_" signature do a masked 5-byte integer compare */
+#define RTL_SIGNATURE 0x0000005f4c54525fULL
+#define RTL_MASK 0x000000ffffffffffULL
+
+#define RTL_DEBUG(A, ...) do { \
+ if (debug) \
+ pr_info("ibm-rtl: " A, ##__VA_ARGS__ ); \
+} while (0)
+
+static DEFINE_MUTEX(rtl_lock);
+static struct ibm_rtl_table __iomem *rtl_table;
+static void __iomem *ebda_map;
+static void __iomem *rtl_cmd_addr;
+static u8 rtl_cmd_type;
+static u8 rtl_cmd_width;
+
+static void __iomem *rtl_port_map(phys_addr_t addr, unsigned long len)
+{
+ if (rtl_cmd_type == RTL_ADDR_TYPE_MMIO)
+ return ioremap(addr, len);
+ return ioport_map(addr, len);
+}
+
+static void rtl_port_unmap(void __iomem *addr)
+{
+ if (addr && rtl_cmd_type == RTL_ADDR_TYPE_MMIO)
+ iounmap(addr);
+ else
+ ioport_unmap(addr);
+}
+
+static int ibm_rtl_write(u8 value)
+{
+ int ret = 0, count = 0;
+ static u32 cmd_port_val;
+
+ RTL_DEBUG("%s(%d)\n", __FUNCTION__, value);
+
+ value = value == 1 ? RTL_CMD_ENTER_PRTM : RTL_CMD_EXIT_PRTM;
+
+ mutex_lock(&rtl_lock);
+
+ if (ioread8(&rtl_table->rt_status) != value) {
+ iowrite8(value, &rtl_table->command);
+
+ switch (rtl_cmd_width) {
+ case 8:
+ cmd_port_val = ioread8(&rtl_table->cmd_port_value);
+ RTL_DEBUG("cmd_port_val = %u\n", cmd_port_val);
+ iowrite8((u8)cmd_port_val, rtl_cmd_addr);
+ break;
+ case 16:
+ cmd_port_val = ioread16(&rtl_table->cmd_port_value);
+ RTL_DEBUG("cmd_port_val = %u\n", cmd_port_val);
+ iowrite16((u16)cmd_port_val, rtl_cmd_addr);
+ break;
+ case 32:
+ cmd_port_val = ioread32(&rtl_table->cmd_port_value);
+ RTL_DEBUG("cmd_port_val = %u\n", cmd_port_val);
+ iowrite32(cmd_port_val, rtl_cmd_addr);
+ break;
+ }
+
+ while (ioread8(&rtl_table->command)) {
+ msleep(10);
+ if (count++ > 500) {
+ pr_err("ibm-rtl: Hardware not responding to "
+ "mode switch request\n");
+ ret = -EIO;
+ break;
+ }
+
+ }
+
+ if (ioread8(&rtl_table->command_status)) {
+ RTL_DEBUG("command_status reports failed command\n");
+ ret = -EIO;
+ }
+ }
+
+ mutex_unlock(&rtl_lock);
+ return ret;
+}
+
+static ssize_t rtl_show_version(struct sysdev_class * dev,
+ struct sysdev_class_attribute *attr,
+ char *buf)
+{
+ return sprintf(buf, "%d\n", (int)ioread8(&rtl_table->version));
+}
+
+static ssize_t rtl_show_state(struct sysdev_class *dev,
+ struct sysdev_class_attribute *attr,
+ char *buf)
+{
+ return sprintf(buf, "%d\n", ioread8(&rtl_table->rt_status));
+}
+
+static ssize_t rtl_set_state(struct sysdev_class *dev,
+ struct sysdev_class_attribute *attr,
+ const char *buf,
+ size_t count)
+{
+ ssize_t ret;
+
+ if (count < 1 || count > 2)
+ return -EINVAL;
+
+ switch (buf[0]) {
+ case '0':
+ ret = ibm_rtl_write(0);
+ break;
+ case '1':
+ ret = ibm_rtl_write(1);
+ break;
+ default:
+ ret = -EINVAL;
+ }
+ if (ret >= 0)
+ ret = count;
+
+ return ret;
+}
+
+static struct sysdev_class class_rtl = {
+ .name = "ibm_rtl",
+};
+
+static SYSDEV_CLASS_ATTR(version, S_IRUGO, rtl_show_version, NULL);
+static SYSDEV_CLASS_ATTR(state, 0600, rtl_show_state, rtl_set_state);
+
+static struct sysdev_class_attribute *rtl_attributes[] = {
+ &attr_version,
+ &attr_state,
+ NULL
+};
+
+
+static int rtl_setup_sysfs(void) {
+ int ret, i;
+ ret = sysdev_class_register(&class_rtl);
+
+ if (!ret) {
+ for (i = 0; rtl_attributes[i]; i ++)
+ sysdev_class_create_file(&class_rtl, rtl_attributes[i]);
+ }
+ return ret;
+}
+
+static void rtl_teardown_sysfs(void) {
+ int i;
+ for (i = 0; rtl_attributes[i]; i ++)
+ sysdev_class_remove_file(&class_rtl, rtl_attributes[i]);
+ sysdev_class_unregister(&class_rtl);
+}
+
+
+static struct dmi_system_id __initdata ibm_rtl_dmi_table[] = {
+ { \
+ .matches = { \
+ DMI_MATCH(DMI_SYS_VENDOR, "IBM"), \
+ }, \
+ },
+ { }
+};
+
+static int __init ibm_rtl_init(void) {
+ unsigned long ebda_addr, ebda_size;
+ unsigned int ebda_kb;
+ int ret = -ENODEV, i;
+
+ if (force)
+ pr_warning("ibm-rtl: module loaded by force\n");
+ /* first ensure that we are running on IBM HW */
+ else if (efi_enabled || !dmi_check_system(ibm_rtl_dmi_table))
+ return -ENODEV;
+
+ /* Get the address for the Extended BIOS Data Area */
+ ebda_addr = get_bios_ebda();
+ if (!ebda_addr) {
+ RTL_DEBUG("no BIOS EBDA found\n");
+ return -ENODEV;
+ }
+
+ ebda_map = ioremap(ebda_addr, 4);
+ if (!ebda_map)
+ return -ENOMEM;
+
+ /* First word in the EDBA is the Size in KB */
+ ebda_kb = ioread16(ebda_map);
+ RTL_DEBUG("EBDA is %d kB\n", ebda_kb);
+
+ if (ebda_kb == 0)
+ goto out;
+
+ iounmap(ebda_map);
+ ebda_size = ebda_kb*1024;
+
+ /* Remap the whole table */
+ ebda_map = ioremap(ebda_addr, ebda_size);
+ if (!ebda_map)
+ return -ENOMEM;
+
+ /* search for the _RTL_ signature at the start of the table */
+ for (i = 0 ; i < ebda_size/sizeof(unsigned int); i++) {
+ struct ibm_rtl_table __iomem * tmp;
+ tmp = (struct ibm_rtl_table __iomem *) (ebda_map+i);
+ if ((readq(&tmp->signature) & RTL_MASK) == RTL_SIGNATURE) {
+ phys_addr_t addr;
+ unsigned int plen;
+ RTL_DEBUG("found RTL_SIGNATURE at %#llx\n", (u64)tmp);
+ rtl_table = tmp;
+ /* The address, value, width and offset are platform
+ * dependent and found in the ibm_rtl_table */
+ rtl_cmd_width = ioread8(&rtl_table->cmd_granularity);
+ rtl_cmd_type = ioread8(&rtl_table->cmd_address_type);
+ RTL_DEBUG("rtl_cmd_width = %u, rtl_cmd_type = %u\n",
+ rtl_cmd_width, rtl_cmd_type);
+ addr = ioread32(&rtl_table->cmd_port_address);
+ RTL_DEBUG("addr = %#llx\n", (unsigned long long)addr);
+ plen = rtl_cmd_width/sizeof(char);
+ rtl_cmd_addr = rtl_port_map(addr, plen);
+ RTL_DEBUG("rtl_cmd_addr = %#llx\n", (u64)rtl_cmd_addr);
+ if (!rtl_cmd_addr) {
+ ret = -ENOMEM;
+ break;
+ }
+ ret = rtl_setup_sysfs();
+ break;
+ }
+ }
+
+out:
+ if (ret) {
+ iounmap(ebda_map);
+ rtl_port_unmap(rtl_cmd_addr);
+ }
+
+ return ret;
+}
+
+static void __exit ibm_rtl_exit(void)
+{
+ if (rtl_table) {
+ RTL_DEBUG("cleaning up");
+ /* do not leave the machine in SMI-free mode */
+ ibm_rtl_write(0);
+ /* unmap, unlink and remove all traces */
+ rtl_teardown_sysfs();
+ iounmap(ebda_map);
+ rtl_port_unmap(rtl_cmd_addr);
+ }
+}
+
+module_init(ibm_rtl_init);
+module_exit(ibm_rtl_exit);
diff --git a/drivers/platform/x86/ideapad_acpi.c b/drivers/platform/x86/ideapad-laptop.c
index 798496353e8c..5ff12205aa6b 100644
--- a/drivers/platform/x86/ideapad_acpi.c
+++ b/drivers/platform/x86/ideapad-laptop.c
@@ -35,112 +35,162 @@
#define IDEAPAD_DEV_KILLSW 4
struct ideapad_private {
+ acpi_handle handle;
struct rfkill *rfk[5];
-};
+} *ideapad_priv;
static struct {
char *name;
+ int cfgbit;
+ int opcode;
int type;
} ideapad_rfk_data[] = {
- /* camera has no rfkill */
- { "ideapad_wlan", RFKILL_TYPE_WLAN },
- { "ideapad_bluetooth", RFKILL_TYPE_BLUETOOTH },
- { "ideapad_3g", RFKILL_TYPE_WWAN },
- { "ideapad_killsw", RFKILL_TYPE_WLAN }
+ { "ideapad_camera", 19, 0x1E, NUM_RFKILL_TYPES },
+ { "ideapad_wlan", 18, 0x15, RFKILL_TYPE_WLAN },
+ { "ideapad_bluetooth", 16, 0x17, RFKILL_TYPE_BLUETOOTH },
+ { "ideapad_3g", 17, 0x20, RFKILL_TYPE_WWAN },
+ { "ideapad_killsw", 0, 0, RFKILL_TYPE_WLAN }
};
-static int ideapad_dev_exists(int device)
-{
- acpi_status status;
- union acpi_object in_param;
- struct acpi_object_list input = { 1, &in_param };
- struct acpi_buffer output;
- union acpi_object out_obj;
+static bool no_bt_rfkill;
+module_param(no_bt_rfkill, bool, 0444);
+MODULE_PARM_DESC(no_bt_rfkill, "No rfkill for bluetooth.");
- output.length = sizeof(out_obj);
- output.pointer = &out_obj;
+/*
+ * ACPI Helpers
+ */
+#define IDEAPAD_EC_TIMEOUT (100) /* in ms */
- in_param.type = ACPI_TYPE_INTEGER;
- in_param.integer.value = device + 1;
+static int read_method_int(acpi_handle handle, const char *method, int *val)
+{
+ acpi_status status;
+ unsigned long long result;
- status = acpi_evaluate_object(NULL, "\\_SB_.DECN", &input, &output);
+ status = acpi_evaluate_integer(handle, (char *)method, NULL, &result);
if (ACPI_FAILURE(status)) {
- printk(KERN_WARNING "IdeaPAD \\_SB_.DECN method failed %d. Is this an IdeaPAD?\n", status);
- return -ENODEV;
- }
- if (out_obj.type != ACPI_TYPE_INTEGER) {
- printk(KERN_WARNING "IdeaPAD \\_SB_.DECN method returned unexpected type\n");
- return -ENODEV;
+ *val = -1;
+ return -1;
+ } else {
+ *val = result;
+ return 0;
}
- return out_obj.integer.value;
}
-static int ideapad_dev_get_state(int device)
+static int method_vpcr(acpi_handle handle, int cmd, int *ret)
{
acpi_status status;
- union acpi_object in_param;
- struct acpi_object_list input = { 1, &in_param };
- struct acpi_buffer output;
- union acpi_object out_obj;
+ unsigned long long result;
+ struct acpi_object_list params;
+ union acpi_object in_obj;
- output.length = sizeof(out_obj);
- output.pointer = &out_obj;
+ params.count = 1;
+ params.pointer = &in_obj;
+ in_obj.type = ACPI_TYPE_INTEGER;
+ in_obj.integer.value = cmd;
- in_param.type = ACPI_TYPE_INTEGER;
- in_param.integer.value = device + 1;
+ status = acpi_evaluate_integer(handle, "VPCR", &params, &result);
- status = acpi_evaluate_object(NULL, "\\_SB_.GECN", &input, &output);
if (ACPI_FAILURE(status)) {
- printk(KERN_WARNING "IdeaPAD \\_SB_.GECN method failed %d\n", status);
- return -ENODEV;
- }
- if (out_obj.type != ACPI_TYPE_INTEGER) {
- printk(KERN_WARNING "IdeaPAD \\_SB_.GECN method returned unexpected type\n");
- return -ENODEV;
+ *ret = -1;
+ return -1;
+ } else {
+ *ret = result;
+ return 0;
}
- return out_obj.integer.value;
}
-static int ideapad_dev_set_state(int device, int state)
+static int method_vpcw(acpi_handle handle, int cmd, int data)
{
+ struct acpi_object_list params;
+ union acpi_object in_obj[2];
acpi_status status;
- union acpi_object in_params[2];
- struct acpi_object_list input = { 2, in_params };
- in_params[0].type = ACPI_TYPE_INTEGER;
- in_params[0].integer.value = device + 1;
- in_params[1].type = ACPI_TYPE_INTEGER;
- in_params[1].integer.value = state;
+ params.count = 2;
+ params.pointer = in_obj;
+ in_obj[0].type = ACPI_TYPE_INTEGER;
+ in_obj[0].integer.value = cmd;
+ in_obj[1].type = ACPI_TYPE_INTEGER;
+ in_obj[1].integer.value = data;
- status = acpi_evaluate_object(NULL, "\\_SB_.SECN", &input, NULL);
- if (ACPI_FAILURE(status)) {
- printk(KERN_WARNING "IdeaPAD \\_SB_.SECN method failed %d\n", status);
- return -ENODEV;
- }
+ status = acpi_evaluate_object(handle, "VPCW", &params, NULL);
+ if (status != AE_OK)
+ return -1;
return 0;
}
+
+static int read_ec_data(acpi_handle handle, int cmd, unsigned long *data)
+{
+ int val;
+ unsigned long int end_jiffies;
+
+ if (method_vpcw(handle, 1, cmd))
+ return -1;
+
+ for (end_jiffies = jiffies+(HZ)*IDEAPAD_EC_TIMEOUT/1000+1;
+ time_before(jiffies, end_jiffies);) {
+ schedule();
+ if (method_vpcr(handle, 1, &val))
+ return -1;
+ if (val == 0) {
+ if (method_vpcr(handle, 0, &val))
+ return -1;
+ *data = val;
+ return 0;
+ }
+ }
+ pr_err("timeout in read_ec_cmd\n");
+ return -1;
+}
+
+static int write_ec_cmd(acpi_handle handle, int cmd, unsigned long data)
+{
+ int val;
+ unsigned long int end_jiffies;
+
+ if (method_vpcw(handle, 0, data))
+ return -1;
+ if (method_vpcw(handle, 1, cmd))
+ return -1;
+
+ for (end_jiffies = jiffies+(HZ)*IDEAPAD_EC_TIMEOUT/1000+1;
+ time_before(jiffies, end_jiffies);) {
+ schedule();
+ if (method_vpcr(handle, 1, &val))
+ return -1;
+ if (val == 0)
+ return 0;
+ }
+ pr_err("timeout in write_ec_cmd\n");
+ return -1;
+}
+/* the above is ACPI helpers */
+
static ssize_t show_ideapad_cam(struct device *dev,
struct device_attribute *attr,
char *buf)
{
- int state = ideapad_dev_get_state(IDEAPAD_DEV_CAMERA);
- if (state < 0)
- return state;
+ struct ideapad_private *priv = dev_get_drvdata(dev);
+ acpi_handle handle = priv->handle;
+ unsigned long result;
- return sprintf(buf, "%d\n", state);
+ if (read_ec_data(handle, 0x1D, &result))
+ return sprintf(buf, "-1\n");
+ return sprintf(buf, "%lu\n", result);
}
static ssize_t store_ideapad_cam(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
+ struct ideapad_private *priv = dev_get_drvdata(dev);
+ acpi_handle handle = priv->handle;
int ret, state;
if (!count)
return 0;
if (sscanf(buf, "%i", &state) != 1)
return -EINVAL;
- ret = ideapad_dev_set_state(IDEAPAD_DEV_CAMERA, !!state);
+ ret = write_ec_cmd(handle, 0x1E, state);
if (ret < 0)
return ret;
return count;
@@ -154,7 +204,10 @@ static int ideapad_rfk_set(void *data, bool blocked)
if (device == IDEAPAD_DEV_KILLSW)
return -EINVAL;
- return ideapad_dev_set_state(device, !blocked);
+
+ return write_ec_cmd(ideapad_priv->handle,
+ ideapad_rfk_data[device].opcode,
+ !blocked);
}
static struct rfkill_ops ideapad_rfk_ops = {
@@ -164,32 +217,47 @@ static struct rfkill_ops ideapad_rfk_ops = {
static void ideapad_sync_rfk_state(struct acpi_device *adevice)
{
struct ideapad_private *priv = dev_get_drvdata(&adevice->dev);
- int hw_blocked = !ideapad_dev_get_state(IDEAPAD_DEV_KILLSW);
+ acpi_handle handle = priv->handle;
+ unsigned long hw_blocked;
int i;
- rfkill_set_hw_state(priv->rfk[IDEAPAD_DEV_KILLSW], hw_blocked);
- for (i = IDEAPAD_DEV_WLAN; i < IDEAPAD_DEV_KILLSW; i++)
- if (priv->rfk[i])
- rfkill_set_hw_state(priv->rfk[i], hw_blocked);
- if (hw_blocked)
+ if (read_ec_data(handle, 0x23, &hw_blocked))
return;
+ hw_blocked = !hw_blocked;
- for (i = IDEAPAD_DEV_WLAN; i < IDEAPAD_DEV_KILLSW; i++)
+ for (i = IDEAPAD_DEV_WLAN; i <= IDEAPAD_DEV_KILLSW; i++)
if (priv->rfk[i])
- rfkill_set_sw_state(priv->rfk[i], !ideapad_dev_get_state(i));
+ rfkill_set_hw_state(priv->rfk[i], hw_blocked);
}
static int ideapad_register_rfkill(struct acpi_device *adevice, int dev)
{
struct ideapad_private *priv = dev_get_drvdata(&adevice->dev);
int ret;
+ unsigned long sw_blocked;
+
+ if (no_bt_rfkill &&
+ (ideapad_rfk_data[dev].type == RFKILL_TYPE_BLUETOOTH)) {
+ /* Force to enable bluetooth when no_bt_rfkill=1 */
+ write_ec_cmd(ideapad_priv->handle,
+ ideapad_rfk_data[dev].opcode, 1);
+ return 0;
+ }
- priv->rfk[dev] = rfkill_alloc(ideapad_rfk_data[dev-1].name, &adevice->dev,
- ideapad_rfk_data[dev-1].type, &ideapad_rfk_ops,
+ priv->rfk[dev] = rfkill_alloc(ideapad_rfk_data[dev].name, &adevice->dev,
+ ideapad_rfk_data[dev].type, &ideapad_rfk_ops,
(void *)(long)dev);
if (!priv->rfk[dev])
return -ENOMEM;
+ if (read_ec_data(ideapad_priv->handle, ideapad_rfk_data[dev].opcode-1,
+ &sw_blocked)) {
+ rfkill_init_sw_state(priv->rfk[dev], 0);
+ } else {
+ sw_blocked = !sw_blocked;
+ rfkill_init_sw_state(priv->rfk[dev], sw_blocked);
+ }
+
ret = rfkill_register(priv->rfk[dev]);
if (ret) {
rfkill_destroy(priv->rfk[dev]);
@@ -217,14 +285,18 @@ MODULE_DEVICE_TABLE(acpi, ideapad_device_ids);
static int ideapad_acpi_add(struct acpi_device *adevice)
{
- int i;
+ int i, cfg;
int devs_present[5];
struct ideapad_private *priv;
+ if (read_method_int(adevice->handle, "_CFG", &cfg))
+ return -ENODEV;
+
for (i = IDEAPAD_DEV_CAMERA; i < IDEAPAD_DEV_KILLSW; i++) {
- devs_present[i] = ideapad_dev_exists(i);
- if (devs_present[i] < 0)
- return devs_present[i];
+ if (test_bit(ideapad_rfk_data[i].cfgbit, (unsigned long *)&cfg))
+ devs_present[i] = 1;
+ else
+ devs_present[i] = 0;
}
/* The hardware switch is always present */
@@ -242,7 +314,9 @@ static int ideapad_acpi_add(struct acpi_device *adevice)
}
}
+ priv->handle = adevice->handle;
dev_set_drvdata(&adevice->dev, priv);
+ ideapad_priv = priv;
for (i = IDEAPAD_DEV_WLAN; i <= IDEAPAD_DEV_KILLSW; i++) {
if (!devs_present[i])
continue;
@@ -270,7 +344,21 @@ static int ideapad_acpi_remove(struct acpi_device *adevice, int type)
static void ideapad_acpi_notify(struct acpi_device *adevice, u32 event)
{
- ideapad_sync_rfk_state(adevice);
+ acpi_handle handle = adevice->handle;
+ unsigned long vpc1, vpc2, vpc_bit;
+
+ if (read_ec_data(handle, 0x10, &vpc1))
+ return;
+ if (read_ec_data(handle, 0x1A, &vpc2))
+ return;
+
+ vpc1 = (vpc2 << 8) | vpc1;
+ for (vpc_bit = 0; vpc_bit < 16; vpc_bit++) {
+ if (test_bit(vpc_bit, &vpc1)) {
+ if (vpc_bit == 9)
+ ideapad_sync_rfk_state(adevice);
+ }
+ }
}
static struct acpi_driver ideapad_acpi_driver = {
diff --git a/drivers/platform/x86/intel_pmic_gpio.c b/drivers/platform/x86/intel_pmic_gpio.c
index 5cdcff653918..e61db9dfebef 100644
--- a/drivers/platform/x86/intel_pmic_gpio.c
+++ b/drivers/platform/x86/intel_pmic_gpio.c
@@ -29,7 +29,6 @@
#include <linux/init.h>
#include <linux/io.h>
#include <linux/gpio.h>
-#include <linux/interrupt.h>
#include <asm/intel_scu_ipc.h>
#include <linux/device.h>
#include <linux/intel_pmic_gpio.h>
@@ -142,16 +141,16 @@ static int pmic_gpio_direction_output(struct gpio_chip *chip,
if (offset < 8)/* it is GPIO */
rc = intel_scu_ipc_update_register(GPIO0 + offset,
- GPIO_DRV | GPIO_DOU | GPIO_DIR,
- GPIO_DRV | (value ? GPIO_DOU : 0));
+ GPIO_DRV | (value ? GPIO_DOU : 0),
+ GPIO_DRV | GPIO_DOU | GPIO_DIR);
else if (offset < 16)/* it is GPOSW */
rc = intel_scu_ipc_update_register(GPOSWCTL0 + offset - 8,
- GPOSW_DRV | GPOSW_DOU | GPOSW_RDRV,
- GPOSW_DRV | (value ? GPOSW_DOU : 0));
+ GPOSW_DRV | (value ? GPOSW_DOU : 0),
+ GPOSW_DRV | GPOSW_DOU | GPOSW_RDRV);
else if (offset > 15 && offset < 24)/* it is GPO */
rc = intel_scu_ipc_update_register(GPO,
- 1 << (offset - 16),
- value ? 1 << (offset - 16) : 0);
+ value ? 1 << (offset - 16) : 0,
+ 1 << (offset - 16));
else {
printk(KERN_ERR
"%s: invalid PMIC GPIO pin %d!\n", __func__, offset);
@@ -179,16 +178,16 @@ static void pmic_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
{
if (offset < 8)/* it is GPIO */
intel_scu_ipc_update_register(GPIO0 + offset,
- GPIO_DRV | GPIO_DOU,
- GPIO_DRV | (value ? GPIO_DOU : 0));
+ GPIO_DRV | (value ? GPIO_DOU : 0),
+ GPIO_DRV | GPIO_DOU);
else if (offset < 16)/* it is GPOSW */
intel_scu_ipc_update_register(GPOSWCTL0 + offset - 8,
- GPOSW_DRV | GPOSW_DOU | GPOSW_RDRV,
- GPOSW_DRV | (value ? GPOSW_DOU : 0));
+ GPOSW_DRV | (value ? GPOSW_DOU : 0),
+ GPOSW_DRV | GPOSW_DOU | GPOSW_RDRV);
else if (offset > 15 && offset < 24) /* it is GPO */
intel_scu_ipc_update_register(GPO,
- 1 << (offset - 16),
- value ? 1 << (offset - 16) : 0);
+ value ? 1 << (offset - 16) : 0,
+ 1 << (offset - 16));
}
static int pmic_irq_type(unsigned irq, unsigned type)
@@ -197,7 +196,7 @@ static int pmic_irq_type(unsigned irq, unsigned type)
u32 gpio = irq - pg->irq_base;
unsigned long flags;
- if (gpio > pg->chip.ngpio)
+ if (gpio >= pg->chip.ngpio)
return -EINVAL;
spin_lock_irqsave(&pg->irqtypes.lock, flags);
diff --git a/drivers/platform/x86/intel_scu_ipc.c b/drivers/platform/x86/intel_scu_ipc.c
index 6abe18e638e9..41a9e34899ac 100644
--- a/drivers/platform/x86/intel_scu_ipc.c
+++ b/drivers/platform/x86/intel_scu_ipc.c
@@ -23,6 +23,7 @@
#include <linux/pm.h>
#include <linux/pci.h>
#include <linux/interrupt.h>
+#include <linux/sfi.h>
#include <asm/mrst.h>
#include <asm/intel_scu_ipc.h>
diff --git a/drivers/platform/x86/msi-wmi.c b/drivers/platform/x86/msi-wmi.c
index 42a5469a2459..35278ad7e628 100644
--- a/drivers/platform/x86/msi-wmi.c
+++ b/drivers/platform/x86/msi-wmi.c
@@ -43,16 +43,18 @@ MODULE_ALIAS("wmi:B6F3EEF2-3D2F-49DC-9DE3-85BCE18C62F2");
#define dprintk(msg...) pr_debug(DRV_PFX msg)
-#define KEYCODE_BASE 0xD0
-#define MSI_WMI_BRIGHTNESSUP KEYCODE_BASE
-#define MSI_WMI_BRIGHTNESSDOWN (KEYCODE_BASE + 1)
-#define MSI_WMI_VOLUMEUP (KEYCODE_BASE + 2)
-#define MSI_WMI_VOLUMEDOWN (KEYCODE_BASE + 3)
+#define SCANCODE_BASE 0xD0
+#define MSI_WMI_BRIGHTNESSUP SCANCODE_BASE
+#define MSI_WMI_BRIGHTNESSDOWN (SCANCODE_BASE + 1)
+#define MSI_WMI_VOLUMEUP (SCANCODE_BASE + 2)
+#define MSI_WMI_VOLUMEDOWN (SCANCODE_BASE + 3)
+#define MSI_WMI_MUTE (SCANCODE_BASE + 4)
static struct key_entry msi_wmi_keymap[] = {
{ KE_KEY, MSI_WMI_BRIGHTNESSUP, {KEY_BRIGHTNESSUP} },
{ KE_KEY, MSI_WMI_BRIGHTNESSDOWN, {KEY_BRIGHTNESSDOWN} },
{ KE_KEY, MSI_WMI_VOLUMEUP, {KEY_VOLUMEUP} },
{ KE_KEY, MSI_WMI_VOLUMEDOWN, {KEY_VOLUMEDOWN} },
+ { KE_KEY, MSI_WMI_MUTE, {KEY_MUTE} },
{ KE_END, 0}
};
static ktime_t last_pressed[ARRAY_SIZE(msi_wmi_keymap) - 1];
@@ -169,7 +171,7 @@ static void msi_wmi_notify(u32 value, void *context)
ktime_t diff;
cur = ktime_get_real();
diff = ktime_sub(cur, last_pressed[key->code -
- KEYCODE_BASE]);
+ SCANCODE_BASE]);
/* Ignore event if the same event happened in a 50 ms
timeframe -> Key press may result in 10-20 GPEs */
if (ktime_to_us(diff) < 1000 * 50) {
@@ -178,7 +180,7 @@ static void msi_wmi_notify(u32 value, void *context)
key->code, ktime_to_us(diff));
return;
}
- last_pressed[key->code - KEYCODE_BASE] = cur;
+ last_pressed[key->code - SCANCODE_BASE] = cur;
if (key->type == KE_KEY &&
/* Brightness is served via acpi video driver */
diff --git a/drivers/platform/x86/panasonic-laptop.c b/drivers/platform/x86/panasonic-laptop.c
index ec01c3d8fc5a..cc1e0ba104d7 100644
--- a/drivers/platform/x86/panasonic-laptop.c
+++ b/drivers/platform/x86/panasonic-laptop.c
@@ -128,6 +128,7 @@
#include <acpi/acpi_bus.h>
#include <acpi/acpi_drivers.h>
#include <linux/input.h>
+#include <linux/input/sparse-keymap.h>
#ifndef ACPI_HOTKEY_COMPONENT
@@ -200,30 +201,29 @@ static struct acpi_driver acpi_pcc_driver = {
},
};
-#define KEYMAP_SIZE 11
-static const unsigned int initial_keymap[KEYMAP_SIZE] = {
- /* 0 */ KEY_RESERVED,
- /* 1 */ KEY_BRIGHTNESSDOWN,
- /* 2 */ KEY_BRIGHTNESSUP,
- /* 3 */ KEY_DISPLAYTOGGLE,
- /* 4 */ KEY_MUTE,
- /* 5 */ KEY_VOLUMEDOWN,
- /* 6 */ KEY_VOLUMEUP,
- /* 7 */ KEY_SLEEP,
- /* 8 */ KEY_PROG1, /* Change CPU boost */
- /* 9 */ KEY_BATTERY,
- /* 10 */ KEY_SUSPEND,
+static const struct key_entry panasonic_keymap[] = {
+ { KE_KEY, 0, { KEY_RESERVED } },
+ { KE_KEY, 1, { KEY_BRIGHTNESSDOWN } },
+ { KE_KEY, 2, { KEY_BRIGHTNESSUP } },
+ { KE_KEY, 3, { KEY_DISPLAYTOGGLE } },
+ { KE_KEY, 4, { KEY_MUTE } },
+ { KE_KEY, 5, { KEY_VOLUMEDOWN } },
+ { KE_KEY, 6, { KEY_VOLUMEUP } },
+ { KE_KEY, 7, { KEY_SLEEP } },
+ { KE_KEY, 8, { KEY_PROG1 } }, /* Change CPU boost */
+ { KE_KEY, 9, { KEY_BATTERY } },
+ { KE_KEY, 10, { KEY_SUSPEND } },
+ { KE_END, 0 }
};
struct pcc_acpi {
acpi_handle handle;
unsigned long num_sifr;
int sticky_mode;
- u32 *sinf;
+ u32 *sinf;
struct acpi_device *device;
struct input_dev *input_dev;
struct backlight_device *backlight;
- unsigned int keymap[KEYMAP_SIZE];
};
struct pcc_keyinput {
@@ -267,7 +267,7 @@ static inline int acpi_pcc_get_sqty(struct acpi_device *device)
}
}
-static int acpi_pcc_retrieve_biosdata(struct pcc_acpi *pcc, u32 *sinf)
+static int acpi_pcc_retrieve_biosdata(struct pcc_acpi *pcc)
{
acpi_status status;
struct acpi_buffer buffer = {ACPI_ALLOCATE_BUFFER, NULL};
@@ -285,6 +285,7 @@ static int acpi_pcc_retrieve_biosdata(struct pcc_acpi *pcc, u32 *sinf)
hkey = buffer.pointer;
if (!hkey || (hkey->type != ACPI_TYPE_PACKAGE)) {
ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "Invalid HKEY.SINF\n"));
+ status = AE_ERROR;
goto end;
}
@@ -298,12 +299,12 @@ static int acpi_pcc_retrieve_biosdata(struct pcc_acpi *pcc, u32 *sinf)
for (i = 0; i < hkey->package.count; i++) {
union acpi_object *element = &(hkey->package.elements[i]);
if (likely(element->type == ACPI_TYPE_INTEGER)) {
- sinf[i] = element->integer.value;
+ pcc->sinf[i] = element->integer.value;
} else
ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
"Invalid HKEY.SINF data\n"));
}
- sinf[hkey->package.count] = -1;
+ pcc->sinf[hkey->package.count] = -1;
end:
kfree(buffer.pointer);
@@ -321,7 +322,7 @@ static int bl_get(struct backlight_device *bd)
{
struct pcc_acpi *pcc = bl_get_data(bd);
- if (!acpi_pcc_retrieve_biosdata(pcc, pcc->sinf))
+ if (!acpi_pcc_retrieve_biosdata(pcc))
return -EIO;
return pcc->sinf[SINF_AC_CUR_BRIGHT];
@@ -333,7 +334,7 @@ static int bl_set_status(struct backlight_device *bd)
int bright = bd->props.brightness;
int rc;
- if (!acpi_pcc_retrieve_biosdata(pcc, pcc->sinf))
+ if (!acpi_pcc_retrieve_biosdata(pcc))
return -EIO;
if (bright < pcc->sinf[SINF_AC_MIN_BRIGHT])
@@ -367,7 +368,7 @@ static ssize_t show_numbatt(struct device *dev, struct device_attribute *attr,
struct acpi_device *acpi = to_acpi_device(dev);
struct pcc_acpi *pcc = acpi_driver_data(acpi);
- if (!acpi_pcc_retrieve_biosdata(pcc, pcc->sinf))
+ if (!acpi_pcc_retrieve_biosdata(pcc))
return -EIO;
return snprintf(buf, PAGE_SIZE, "%u\n", pcc->sinf[SINF_NUM_BATTERIES]);
@@ -379,7 +380,7 @@ static ssize_t show_lcdtype(struct device *dev, struct device_attribute *attr,
struct acpi_device *acpi = to_acpi_device(dev);
struct pcc_acpi *pcc = acpi_driver_data(acpi);
- if (!acpi_pcc_retrieve_biosdata(pcc, pcc->sinf))
+ if (!acpi_pcc_retrieve_biosdata(pcc))
return -EIO;
return snprintf(buf, PAGE_SIZE, "%u\n", pcc->sinf[SINF_LCD_TYPE]);
@@ -391,7 +392,7 @@ static ssize_t show_mute(struct device *dev, struct device_attribute *attr,
struct acpi_device *acpi = to_acpi_device(dev);
struct pcc_acpi *pcc = acpi_driver_data(acpi);
- if (!acpi_pcc_retrieve_biosdata(pcc, pcc->sinf))
+ if (!acpi_pcc_retrieve_biosdata(pcc))
return -EIO;
return snprintf(buf, PAGE_SIZE, "%u\n", pcc->sinf[SINF_MUTE]);
@@ -403,7 +404,7 @@ static ssize_t show_sticky(struct device *dev, struct device_attribute *attr,
struct acpi_device *acpi = to_acpi_device(dev);
struct pcc_acpi *pcc = acpi_driver_data(acpi);
- if (!acpi_pcc_retrieve_biosdata(pcc, pcc->sinf))
+ if (!acpi_pcc_retrieve_biosdata(pcc))
return -EIO;
return snprintf(buf, PAGE_SIZE, "%u\n", pcc->sinf[SINF_STICKY_KEY]);
@@ -446,56 +447,10 @@ static struct attribute_group pcc_attr_group = {
/* hotkey input device driver */
-static int pcc_getkeycode(struct input_dev *dev,
- unsigned int scancode, unsigned int *keycode)
-{
- struct pcc_acpi *pcc = input_get_drvdata(dev);
-
- if (scancode >= ARRAY_SIZE(pcc->keymap))
- return -EINVAL;
-
- *keycode = pcc->keymap[scancode];
-
- return 0;
-}
-
-static int keymap_get_by_keycode(struct pcc_acpi *pcc, unsigned int keycode)
-{
- int i;
-
- for (i = 0; i < ARRAY_SIZE(pcc->keymap); i++) {
- if (pcc->keymap[i] == keycode)
- return i+1;
- }
-
- return 0;
-}
-
-static int pcc_setkeycode(struct input_dev *dev,
- unsigned int scancode, unsigned int keycode)
-{
- struct pcc_acpi *pcc = input_get_drvdata(dev);
- int oldkeycode;
-
- if (scancode >= ARRAY_SIZE(pcc->keymap))
- return -EINVAL;
-
- oldkeycode = pcc->keymap[scancode];
- pcc->keymap[scancode] = keycode;
-
- set_bit(keycode, dev->keybit);
-
- if (!keymap_get_by_keycode(pcc, oldkeycode))
- clear_bit(oldkeycode, dev->keybit);
-
- return 0;
-}
-
static void acpi_pcc_generate_keyinput(struct pcc_acpi *pcc)
{
struct input_dev *hotk_input_dev = pcc->input_dev;
int rc;
- int key_code, hkey_num;
unsigned long long result;
rc = acpi_evaluate_integer(pcc->handle, METHOD_HKEY_QUERY,
@@ -508,25 +463,10 @@ static void acpi_pcc_generate_keyinput(struct pcc_acpi *pcc)
acpi_bus_generate_proc_event(pcc->device, HKEY_NOTIFY, result);
- hkey_num = result & 0xf;
-
- if (hkey_num < 0 || hkey_num >= ARRAY_SIZE(pcc->keymap)) {
+ if (!sparse_keymap_report_event(hotk_input_dev,
+ result & 0xf, result & 0x80, false))
ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
- "hotkey number out of range: %d\n",
- hkey_num));
- return;
- }
-
- key_code = pcc->keymap[hkey_num];
-
- if (key_code != KEY_RESERVED) {
- int pushed = (result & 0x80) ? TRUE : FALSE;
-
- input_report_key(hotk_input_dev, key_code, pushed);
- input_sync(hotk_input_dev);
- }
-
- return;
+ "Unknown hotkey event: %d\n", result));
}
static void acpi_pcc_hotkey_notify(struct acpi_device *device, u32 event)
@@ -545,40 +485,55 @@ static void acpi_pcc_hotkey_notify(struct acpi_device *device, u32 event)
static int acpi_pcc_init_input(struct pcc_acpi *pcc)
{
- int i, rc;
+ struct input_dev *input_dev;
+ int error;
- pcc->input_dev = input_allocate_device();
- if (!pcc->input_dev) {
+ input_dev = input_allocate_device();
+ if (!input_dev) {
ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
"Couldn't allocate input device for hotkey"));
return -ENOMEM;
}
- pcc->input_dev->evbit[0] = BIT(EV_KEY);
-
- pcc->input_dev->name = ACPI_PCC_DRIVER_NAME;
- pcc->input_dev->phys = ACPI_PCC_INPUT_PHYS;
- pcc->input_dev->id.bustype = BUS_HOST;
- pcc->input_dev->id.vendor = 0x0001;
- pcc->input_dev->id.product = 0x0001;
- pcc->input_dev->id.version = 0x0100;
- pcc->input_dev->getkeycode = pcc_getkeycode;
- pcc->input_dev->setkeycode = pcc_setkeycode;
+ input_dev->name = ACPI_PCC_DRIVER_NAME;
+ input_dev->phys = ACPI_PCC_INPUT_PHYS;
+ input_dev->id.bustype = BUS_HOST;
+ input_dev->id.vendor = 0x0001;
+ input_dev->id.product = 0x0001;
+ input_dev->id.version = 0x0100;
- /* load initial keymap */
- memcpy(pcc->keymap, initial_keymap, sizeof(pcc->keymap));
+ error = sparse_keymap_setup(input_dev, panasonic_keymap, NULL);
+ if (error) {
+ ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
+ "Unable to setup input device keymap\n"));
+ goto err_free_dev;
+ }
- for (i = 0; i < ARRAY_SIZE(pcc->keymap); i++)
- __set_bit(pcc->keymap[i], pcc->input_dev->keybit);
- __clear_bit(KEY_RESERVED, pcc->input_dev->keybit);
+ error = input_register_device(input_dev);
+ if (error) {
+ ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
+ "Unable to register input device\n"));
+ goto err_free_keymap;
+ }
- input_set_drvdata(pcc->input_dev, pcc);
+ pcc->input_dev = input_dev;
+ return 0;
- rc = input_register_device(pcc->input_dev);
- if (rc < 0)
- input_free_device(pcc->input_dev);
+ err_free_keymap:
+ sparse_keymap_free(input_dev);
+ err_free_dev:
+ input_free_device(input_dev);
+ return error;
+}
- return rc;
+static void acpi_pcc_destroy_input(struct pcc_acpi *pcc)
+{
+ sparse_keymap_free(pcc->input_dev);
+ input_unregister_device(pcc->input_dev);
+ /*
+ * No need to input_free_device() since core input API refcounts
+ * and free()s the device.
+ */
}
/* kernel module interface */
@@ -636,12 +591,13 @@ static int acpi_pcc_hotkey_add(struct acpi_device *device)
if (result) {
ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
"Error installing keyinput handler\n"));
- goto out_hotkey;
+ goto out_sinf;
}
- if (!acpi_pcc_retrieve_biosdata(pcc, pcc->sinf)) {
+ if (!acpi_pcc_retrieve_biosdata(pcc)) {
ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
"Couldn't retrieve BIOS data\n"));
+ result = -EIO;
goto out_input;
}
/* initialize backlight */
@@ -651,7 +607,7 @@ static int acpi_pcc_hotkey_add(struct acpi_device *device)
&pcc_backlight_ops, &props);
if (IS_ERR(pcc->backlight)) {
result = PTR_ERR(pcc->backlight);
- goto out_sinf;
+ goto out_input;
}
/* read the initial brightness setting from the hardware */
@@ -669,12 +625,10 @@ static int acpi_pcc_hotkey_add(struct acpi_device *device)
out_backlight:
backlight_device_unregister(pcc->backlight);
+out_input:
+ acpi_pcc_destroy_input(pcc);
out_sinf:
kfree(pcc->sinf);
-out_input:
- input_unregister_device(pcc->input_dev);
- /* no need to input_free_device() since core input API refcount and
- * free()s the device */
out_hotkey:
kfree(pcc);
@@ -709,9 +663,7 @@ static int acpi_pcc_hotkey_remove(struct acpi_device *device, int type)
backlight_device_unregister(pcc->backlight);
- input_unregister_device(pcc->input_dev);
- /* no need to input_free_device() since core input API refcount and
- * free()s the device */
+ acpi_pcc_destroy_input(pcc);
kfree(pcc->sinf);
kfree(pcc);
diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c
index 2d61186ad5a2..e8c21994b36d 100644
--- a/drivers/platform/x86/thinkpad_acpi.c
+++ b/drivers/platform/x86/thinkpad_acpi.c
@@ -8497,7 +8497,6 @@ static void ibm_exit(struct ibm_struct *ibm)
ibm->acpi->type,
dispatch_acpi_notify);
ibm->flags.acpi_notify_installed = 0;
- ibm->flags.acpi_notify_installed = 0;
}
if (ibm->flags.proc_created) {
diff --git a/drivers/platform/x86/topstar-laptop.c b/drivers/platform/x86/topstar-laptop.c
index ff4b476f1950..1d07d6d09f27 100644
--- a/drivers/platform/x86/topstar-laptop.c
+++ b/drivers/platform/x86/topstar-laptop.c
@@ -19,6 +19,7 @@
#include <linux/slab.h>
#include <linux/acpi.h>
#include <linux/input.h>
+#include <linux/input/sparse-keymap.h>
#define ACPI_TOPSTAR_CLASS "topstar"
@@ -26,52 +27,37 @@ struct topstar_hkey {
struct input_dev *inputdev;
};
-struct tps_key_entry {
- u8 code;
- u16 keycode;
-};
-
-static struct tps_key_entry topstar_keymap[] = {
- { 0x80, KEY_BRIGHTNESSUP },
- { 0x81, KEY_BRIGHTNESSDOWN },
- { 0x83, KEY_VOLUMEUP },
- { 0x84, KEY_VOLUMEDOWN },
- { 0x85, KEY_MUTE },
- { 0x86, KEY_SWITCHVIDEOMODE },
- { 0x87, KEY_F13 }, /* touchpad enable/disable key */
- { 0x88, KEY_WLAN },
- { 0x8a, KEY_WWW },
- { 0x8b, KEY_MAIL },
- { 0x8c, KEY_MEDIA },
- { 0x96, KEY_F14 }, /* G key? */
- { }
-};
-
-static struct tps_key_entry *tps_get_key_by_scancode(unsigned int code)
-{
- struct tps_key_entry *key;
-
- for (key = topstar_keymap; key->code; key++)
- if (code == key->code)
- return key;
+static const struct key_entry topstar_keymap[] = {
+ { KE_KEY, 0x80, { KEY_BRIGHTNESSUP } },
+ { KE_KEY, 0x81, { KEY_BRIGHTNESSDOWN } },
+ { KE_KEY, 0x83, { KEY_VOLUMEUP } },
+ { KE_KEY, 0x84, { KEY_VOLUMEDOWN } },
+ { KE_KEY, 0x85, { KEY_MUTE } },
+ { KE_KEY, 0x86, { KEY_SWITCHVIDEOMODE } },
+ { KE_KEY, 0x87, { KEY_F13 } }, /* touchpad enable/disable key */
+ { KE_KEY, 0x88, { KEY_WLAN } },
+ { KE_KEY, 0x8a, { KEY_WWW } },
+ { KE_KEY, 0x8b, { KEY_MAIL } },
+ { KE_KEY, 0x8c, { KEY_MEDIA } },
- return NULL;
-}
-
-static struct tps_key_entry *tps_get_key_by_keycode(unsigned int code)
-{
- struct tps_key_entry *key;
+ /* Known non hotkey events don't handled or that we don't care yet */
+ { KE_IGNORE, 0x8e, },
+ { KE_IGNORE, 0x8f, },
+ { KE_IGNORE, 0x90, },
- for (key = topstar_keymap; key->code; key++)
- if (code == key->keycode)
- return key;
+ /*
+ * 'G key' generate two event codes, convert to only
+ * one event/key code for now, consider replacing by
+ * a switch (3G switch - SW_3G?)
+ */
+ { KE_KEY, 0x96, { KEY_F14 } },
+ { KE_KEY, 0x97, { KEY_F14 } },
- return NULL;
-}
+ { KE_END, 0 }
+};
static void acpi_topstar_notify(struct acpi_device *device, u32 event)
{
- struct tps_key_entry *key;
static bool dup_evnt[2];
bool *dup;
struct topstar_hkey *hkey = acpi_driver_data(device);
@@ -86,27 +72,8 @@ static void acpi_topstar_notify(struct acpi_device *device, u32 event)
*dup = true;
}
- /*
- * 'G key' generate two event codes, convert to only
- * one event/key code for now (3G switch?)
- */
- if (event == 0x97)
- event = 0x96;
-
- key = tps_get_key_by_scancode(event);
- if (key) {
- input_report_key(hkey->inputdev, key->keycode, 1);
- input_sync(hkey->inputdev);
- input_report_key(hkey->inputdev, key->keycode, 0);
- input_sync(hkey->inputdev);
- return;
- }
-
- /* Known non hotkey events don't handled or that we don't care yet */
- if (event == 0x8e || event == 0x8f || event == 0x90)
- return;
-
- pr_info("unknown event = 0x%02x\n", event);
+ if (!sparse_keymap_report_event(hkey->inputdev, event, 1, true))
+ pr_info("unknown event = 0x%02x\n", event);
}
static int acpi_topstar_fncx_switch(struct acpi_device *device, bool state)
@@ -127,62 +94,41 @@ static int acpi_topstar_fncx_switch(struct acpi_device *device, bool state)
return 0;
}
-static int topstar_getkeycode(struct input_dev *dev,
- unsigned int scancode, unsigned int *keycode)
-{
- struct tps_key_entry *key = tps_get_key_by_scancode(scancode);
-
- if (!key)
- return -EINVAL;
-
- *keycode = key->keycode;
- return 0;
-}
-
-static int topstar_setkeycode(struct input_dev *dev,
- unsigned int scancode, unsigned int keycode)
-{
- struct tps_key_entry *key;
- int old_keycode;
-
- key = tps_get_key_by_scancode(scancode);
-
- if (!key)
- return -EINVAL;
-
- old_keycode = key->keycode;
- key->keycode = keycode;
- set_bit(keycode, dev->keybit);
- if (!tps_get_key_by_keycode(old_keycode))
- clear_bit(old_keycode, dev->keybit);
- return 0;
-}
-
static int acpi_topstar_init_hkey(struct topstar_hkey *hkey)
{
- struct tps_key_entry *key;
+ struct input_dev *input;
+ int error;
- hkey->inputdev = input_allocate_device();
- if (!hkey->inputdev) {
+ input = input_allocate_device();
+ if (!input) {
pr_err("Unable to allocate input device\n");
- return -ENODEV;
+ return -ENOMEM;
}
- hkey->inputdev->name = "Topstar Laptop extra buttons";
- hkey->inputdev->phys = "topstar/input0";
- hkey->inputdev->id.bustype = BUS_HOST;
- hkey->inputdev->getkeycode = topstar_getkeycode;
- hkey->inputdev->setkeycode = topstar_setkeycode;
- for (key = topstar_keymap; key->code; key++) {
- set_bit(EV_KEY, hkey->inputdev->evbit);
- set_bit(key->keycode, hkey->inputdev->keybit);
+
+ input->name = "Topstar Laptop extra buttons";
+ input->phys = "topstar/input0";
+ input->id.bustype = BUS_HOST;
+
+ error = sparse_keymap_setup(input, topstar_keymap, NULL);
+ if (error) {
+ pr_err("Unable to setup input device keymap\n");
+ goto err_free_dev;
}
- if (input_register_device(hkey->inputdev)) {
+
+ error = input_register_device(input);
+ if (error) {
pr_err("Unable to register input device\n");
- input_free_device(hkey->inputdev);
- return -ENODEV;
+ goto err_free_keymap;
}
+ hkey->inputdev = input;
return 0;
+
+ err_free_keymap:
+ sparse_keymap_free(input);
+ err_free_dev:
+ input_free_device(input);
+ return error;
}
static int acpi_topstar_add(struct acpi_device *device)
@@ -216,6 +162,7 @@ static int acpi_topstar_remove(struct acpi_device *device, int type)
acpi_topstar_fncx_switch(device, false);
+ sparse_keymap_free(tps_hkey->inputdev);
input_unregister_device(tps_hkey->inputdev);
kfree(tps_hkey);
diff --git a/drivers/platform/x86/toshiba_acpi.c b/drivers/platform/x86/toshiba_acpi.c
index 7d67a45bb2b0..4276da7291b8 100644
--- a/drivers/platform/x86/toshiba_acpi.c
+++ b/drivers/platform/x86/toshiba_acpi.c
@@ -48,6 +48,7 @@
#include <linux/platform_device.h>
#include <linux/rfkill.h>
#include <linux/input.h>
+#include <linux/input/sparse-keymap.h>
#include <linux/leds.h>
#include <linux/slab.h>
@@ -121,36 +122,29 @@ static const struct acpi_device_id toshiba_device_ids[] = {
};
MODULE_DEVICE_TABLE(acpi, toshiba_device_ids);
-struct key_entry {
- char type;
- u16 code;
- u16 keycode;
-};
-
-enum {KE_KEY, KE_END};
-
-static struct key_entry toshiba_acpi_keymap[] = {
- {KE_KEY, 0x101, KEY_MUTE},
- {KE_KEY, 0x102, KEY_ZOOMOUT},
- {KE_KEY, 0x103, KEY_ZOOMIN},
- {KE_KEY, 0x13b, KEY_COFFEE},
- {KE_KEY, 0x13c, KEY_BATTERY},
- {KE_KEY, 0x13d, KEY_SLEEP},
- {KE_KEY, 0x13e, KEY_SUSPEND},
- {KE_KEY, 0x13f, KEY_SWITCHVIDEOMODE},
- {KE_KEY, 0x140, KEY_BRIGHTNESSDOWN},
- {KE_KEY, 0x141, KEY_BRIGHTNESSUP},
- {KE_KEY, 0x142, KEY_WLAN},
- {KE_KEY, 0x143, KEY_PROG1},
- {KE_KEY, 0xb05, KEY_PROG2},
- {KE_KEY, 0xb06, KEY_WWW},
- {KE_KEY, 0xb07, KEY_MAIL},
- {KE_KEY, 0xb30, KEY_STOP},
- {KE_KEY, 0xb31, KEY_PREVIOUSSONG},
- {KE_KEY, 0xb32, KEY_NEXTSONG},
- {KE_KEY, 0xb33, KEY_PLAYPAUSE},
- {KE_KEY, 0xb5a, KEY_MEDIA},
- {KE_END, 0, 0},
+static const struct key_entry toshiba_acpi_keymap[] __initconst = {
+ { KE_KEY, 0x101, { KEY_MUTE } },
+ { KE_KEY, 0x102, { KEY_ZOOMOUT } },
+ { KE_KEY, 0x103, { KEY_ZOOMIN } },
+ { KE_KEY, 0x13b, { KEY_COFFEE } },
+ { KE_KEY, 0x13c, { KEY_BATTERY } },
+ { KE_KEY, 0x13d, { KEY_SLEEP } },
+ { KE_KEY, 0x13e, { KEY_SUSPEND } },
+ { KE_KEY, 0x13f, { KEY_SWITCHVIDEOMODE } },
+ { KE_KEY, 0x140, { KEY_BRIGHTNESSDOWN } },
+ { KE_KEY, 0x141, { KEY_BRIGHTNESSUP } },
+ { KE_KEY, 0x142, { KEY_WLAN } },
+ { KE_KEY, 0x143, { KEY_PROG1 } },
+ { KE_KEY, 0x17f, { KEY_FN } },
+ { KE_KEY, 0xb05, { KEY_PROG2 } },
+ { KE_KEY, 0xb06, { KEY_WWW } },
+ { KE_KEY, 0xb07, { KEY_MAIL } },
+ { KE_KEY, 0xb30, { KEY_STOP } },
+ { KE_KEY, 0xb31, { KEY_PREVIOUSSONG } },
+ { KE_KEY, 0xb32, { KEY_NEXTSONG } },
+ { KE_KEY, 0xb33, { KEY_PLAYPAUSE } },
+ { KE_KEY, 0xb5a, { KEY_MEDIA } },
+ { KE_END, 0 },
};
/* utility
@@ -852,64 +846,9 @@ static struct backlight_ops toshiba_backlight_data = {
.update_status = set_lcd_status,
};
-static struct key_entry *toshiba_acpi_get_entry_by_scancode(unsigned int code)
-{
- struct key_entry *key;
-
- for (key = toshiba_acpi_keymap; key->type != KE_END; key++)
- if (code == key->code)
- return key;
-
- return NULL;
-}
-
-static struct key_entry *toshiba_acpi_get_entry_by_keycode(unsigned int code)
-{
- struct key_entry *key;
-
- for (key = toshiba_acpi_keymap; key->type != KE_END; key++)
- if (code == key->keycode && key->type == KE_KEY)
- return key;
-
- return NULL;
-}
-
-static int toshiba_acpi_getkeycode(struct input_dev *dev,
- unsigned int scancode, unsigned int *keycode)
-{
- struct key_entry *key = toshiba_acpi_get_entry_by_scancode(scancode);
-
- if (key && key->type == KE_KEY) {
- *keycode = key->keycode;
- return 0;
- }
-
- return -EINVAL;
-}
-
-static int toshiba_acpi_setkeycode(struct input_dev *dev,
- unsigned int scancode, unsigned int keycode)
-{
- struct key_entry *key;
- unsigned int old_keycode;
-
- key = toshiba_acpi_get_entry_by_scancode(scancode);
- if (key && key->type == KE_KEY) {
- old_keycode = key->keycode;
- key->keycode = keycode;
- set_bit(keycode, dev->keybit);
- if (!toshiba_acpi_get_entry_by_keycode(old_keycode))
- clear_bit(old_keycode, dev->keybit);
- return 0;
- }
-
- return -EINVAL;
-}
-
static void toshiba_acpi_notify(acpi_handle handle, u32 event, void *context)
{
u32 hci_result, value;
- struct key_entry *key;
if (event != 0x80)
return;
@@ -922,19 +861,11 @@ static void toshiba_acpi_notify(acpi_handle handle, u32 event, void *context)
if (value & 0x80)
continue;
- key = toshiba_acpi_get_entry_by_scancode
- (value);
- if (!key) {
+ if (!sparse_keymap_report_event(toshiba_acpi.hotkey_dev,
+ value, 1, true)) {
printk(MY_INFO "Unknown key %x\n",
value);
- continue;
}
- input_report_key(toshiba_acpi.hotkey_dev,
- key->keycode, 1);
- input_sync(toshiba_acpi.hotkey_dev);
- input_report_key(toshiba_acpi.hotkey_dev,
- key->keycode, 0);
- input_sync(toshiba_acpi.hotkey_dev);
} else if (hci_result == HCI_NOT_SUPPORTED) {
/* This is a workaround for an unresolved issue on
* some machines where system events sporadically
@@ -945,34 +876,17 @@ static void toshiba_acpi_notify(acpi_handle handle, u32 event, void *context)
} while (hci_result != HCI_EMPTY);
}
-static int toshiba_acpi_setup_keyboard(char *device)
+static int __init toshiba_acpi_setup_keyboard(char *device)
{
acpi_status status;
- acpi_handle handle;
- int result;
- const struct key_entry *key;
+ int error;
- status = acpi_get_handle(NULL, device, &handle);
+ status = acpi_get_handle(NULL, device, &toshiba_acpi.handle);
if (ACPI_FAILURE(status)) {
printk(MY_INFO "Unable to get notification device\n");
return -ENODEV;
}
- toshiba_acpi.handle = handle;
-
- status = acpi_evaluate_object(handle, "ENAB", NULL, NULL);
- if (ACPI_FAILURE(status)) {
- printk(MY_INFO "Unable to enable hotkeys\n");
- return -ENODEV;
- }
-
- status = acpi_install_notify_handler(handle, ACPI_DEVICE_NOTIFY,
- toshiba_acpi_notify, NULL);
- if (ACPI_FAILURE(status)) {
- printk(MY_INFO "Unable to install hotkey notification\n");
- return -ENODEV;
- }
-
toshiba_acpi.hotkey_dev = input_allocate_device();
if (!toshiba_acpi.hotkey_dev) {
printk(MY_INFO "Unable to register input device\n");
@@ -982,27 +896,54 @@ static int toshiba_acpi_setup_keyboard(char *device)
toshiba_acpi.hotkey_dev->name = "Toshiba input device";
toshiba_acpi.hotkey_dev->phys = device;
toshiba_acpi.hotkey_dev->id.bustype = BUS_HOST;
- toshiba_acpi.hotkey_dev->getkeycode = toshiba_acpi_getkeycode;
- toshiba_acpi.hotkey_dev->setkeycode = toshiba_acpi_setkeycode;
- for (key = toshiba_acpi_keymap; key->type != KE_END; key++) {
- set_bit(EV_KEY, toshiba_acpi.hotkey_dev->evbit);
- set_bit(key->keycode, toshiba_acpi.hotkey_dev->keybit);
+ error = sparse_keymap_setup(toshiba_acpi.hotkey_dev,
+ toshiba_acpi_keymap, NULL);
+ if (error)
+ goto err_free_dev;
+
+ status = acpi_install_notify_handler(toshiba_acpi.handle,
+ ACPI_DEVICE_NOTIFY, toshiba_acpi_notify, NULL);
+ if (ACPI_FAILURE(status)) {
+ printk(MY_INFO "Unable to install hotkey notification\n");
+ error = -ENODEV;
+ goto err_free_keymap;
+ }
+
+ status = acpi_evaluate_object(toshiba_acpi.handle, "ENAB", NULL, NULL);
+ if (ACPI_FAILURE(status)) {
+ printk(MY_INFO "Unable to enable hotkeys\n");
+ error = -ENODEV;
+ goto err_remove_notify;
}
- result = input_register_device(toshiba_acpi.hotkey_dev);
- if (result) {
+ error = input_register_device(toshiba_acpi.hotkey_dev);
+ if (error) {
printk(MY_INFO "Unable to register input device\n");
- return result;
+ goto err_remove_notify;
}
return 0;
+
+ err_remove_notify:
+ acpi_remove_notify_handler(toshiba_acpi.handle,
+ ACPI_DEVICE_NOTIFY, toshiba_acpi_notify);
+ err_free_keymap:
+ sparse_keymap_free(toshiba_acpi.hotkey_dev);
+ err_free_dev:
+ input_free_device(toshiba_acpi.hotkey_dev);
+ toshiba_acpi.hotkey_dev = NULL;
+ return error;
}
static void toshiba_acpi_exit(void)
{
- if (toshiba_acpi.hotkey_dev)
+ if (toshiba_acpi.hotkey_dev) {
+ acpi_remove_notify_handler(toshiba_acpi.handle,
+ ACPI_DEVICE_NOTIFY, toshiba_acpi_notify);
+ sparse_keymap_free(toshiba_acpi.hotkey_dev);
input_unregister_device(toshiba_acpi.hotkey_dev);
+ }
if (toshiba_acpi.bt_rfk) {
rfkill_unregister(toshiba_acpi.bt_rfk);
@@ -1017,9 +958,6 @@ static void toshiba_acpi_exit(void)
if (toshiba_proc_dir)
remove_proc_entry(PROC_TOSHIBA, acpi_root_dir);
- acpi_remove_notify_handler(toshiba_acpi.handle, ACPI_DEVICE_NOTIFY,
- toshiba_acpi_notify);
-
if (toshiba_acpi.illumination_installed)
led_classdev_unregister(&toshiba_led);
diff --git a/drivers/platform/x86/wmi.c b/drivers/platform/x86/wmi.c
index b2978a04317f..aecd9a9b549f 100644
--- a/drivers/platform/x86/wmi.c
+++ b/drivers/platform/x86/wmi.c
@@ -27,6 +27,8 @@
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/types.h>
@@ -44,9 +46,8 @@ MODULE_LICENSE("GPL");
#define ACPI_WMI_CLASS "wmi"
-#define PREFIX "ACPI: WMI: "
-
static DEFINE_MUTEX(wmi_data_lock);
+static LIST_HEAD(wmi_block_list);
struct guid_block {
char guid[16];
@@ -67,10 +68,9 @@ struct wmi_block {
acpi_handle handle;
wmi_notify_handler handler;
void *handler_data;
- struct device *dev;
+ struct device dev;
};
-static struct wmi_block wmi_blocks;
/*
* If the GUID data block is marked as expensive, we must enable and
@@ -110,7 +110,7 @@ static struct acpi_driver acpi_wmi_driver = {
.add = acpi_wmi_add,
.remove = acpi_wmi_remove,
.notify = acpi_wmi_notify,
- },
+ },
};
/*
@@ -128,30 +128,18 @@ static struct acpi_driver acpi_wmi_driver = {
*/
static int wmi_parse_hexbyte(const u8 *src)
{
- unsigned int x; /* For correct wrapping */
int h;
+ int value;
/* high part */
- x = src[0];
- if (x - '0' <= '9' - '0') {
- h = x - '0';
- } else if (x - 'a' <= 'f' - 'a') {
- h = x - 'a' + 10;
- } else if (x - 'A' <= 'F' - 'A') {
- h = x - 'A' + 10;
- } else {
+ h = value = hex_to_bin(src[0]);
+ if (value < 0)
return -1;
- }
- h <<= 4;
/* low part */
- x = src[1];
- if (x - '0' <= '9' - '0')
- return h | (x - '0');
- if (x - 'a' <= 'f' - 'a')
- return h | (x - 'a' + 10);
- if (x - 'A' <= 'F' - 'A')
- return h | (x - 'A' + 10);
+ value = hex_to_bin(src[1]);
+ if (value >= 0)
+ return (h << 4) | value;
return -1;
}
@@ -232,7 +220,7 @@ static int wmi_gtoa(const char *in, char *out)
for (i = 10; i <= 15; i++)
out += sprintf(out, "%02X", in[i] & 0xFF);
- out = '\0';
+ *out = '\0';
return 0;
}
@@ -246,7 +234,7 @@ static bool find_guid(const char *guid_string, struct wmi_block **out)
wmi_parse_guid(guid_string, tmp);
wmi_swap_bytes(tmp, guid_input);
- list_for_each(p, &wmi_blocks.list) {
+ list_for_each(p, &wmi_block_list) {
wblock = list_entry(p, struct wmi_block, list);
block = &wblock->gblock;
@@ -487,30 +475,29 @@ const struct acpi_buffer *in)
}
EXPORT_SYMBOL_GPL(wmi_set_block);
-static void wmi_dump_wdg(struct guid_block *g)
+static void wmi_dump_wdg(const struct guid_block *g)
{
char guid_string[37];
wmi_gtoa(g->guid, guid_string);
- printk(KERN_INFO PREFIX "%s:\n", guid_string);
- printk(KERN_INFO PREFIX "\tobject_id: %c%c\n",
- g->object_id[0], g->object_id[1]);
- printk(KERN_INFO PREFIX "\tnotify_id: %02X\n", g->notify_id);
- printk(KERN_INFO PREFIX "\treserved: %02X\n", g->reserved);
- printk(KERN_INFO PREFIX "\tinstance_count: %d\n", g->instance_count);
- printk(KERN_INFO PREFIX "\tflags: %#x", g->flags);
+
+ pr_info("%s:\n", guid_string);
+ pr_info("\tobject_id: %c%c\n", g->object_id[0], g->object_id[1]);
+ pr_info("\tnotify_id: %02X\n", g->notify_id);
+ pr_info("\treserved: %02X\n", g->reserved);
+ pr_info("\tinstance_count: %d\n", g->instance_count);
+ pr_info("\tflags: %#x ", g->flags);
if (g->flags) {
- printk(" ");
if (g->flags & ACPI_WMI_EXPENSIVE)
- printk("ACPI_WMI_EXPENSIVE ");
+ pr_cont("ACPI_WMI_EXPENSIVE ");
if (g->flags & ACPI_WMI_METHOD)
- printk("ACPI_WMI_METHOD ");
+ pr_cont("ACPI_WMI_METHOD ");
if (g->flags & ACPI_WMI_STRING)
- printk("ACPI_WMI_STRING ");
+ pr_cont("ACPI_WMI_STRING ");
if (g->flags & ACPI_WMI_EVENT)
- printk("ACPI_WMI_EVENT ");
+ pr_cont("ACPI_WMI_EVENT ");
}
- printk("\n");
+ pr_cont("\n");
}
@@ -522,7 +509,7 @@ static void wmi_notify_debug(u32 value, void *context)
status = wmi_get_event_data(value, &response);
if (status != AE_OK) {
- printk(KERN_INFO "wmi: bad event status 0x%x\n", status);
+ pr_info("bad event status 0x%x\n", status);
return;
}
@@ -531,22 +518,22 @@ static void wmi_notify_debug(u32 value, void *context)
if (!obj)
return;
- printk(KERN_INFO PREFIX "DEBUG Event ");
+ pr_info("DEBUG Event ");
switch(obj->type) {
case ACPI_TYPE_BUFFER:
- printk("BUFFER_TYPE - length %d\n", obj->buffer.length);
+ pr_cont("BUFFER_TYPE - length %d\n", obj->buffer.length);
break;
case ACPI_TYPE_STRING:
- printk("STRING_TYPE - %s\n", obj->string.pointer);
+ pr_cont("STRING_TYPE - %s\n", obj->string.pointer);
break;
case ACPI_TYPE_INTEGER:
- printk("INTEGER_TYPE - %llu\n", obj->integer.value);
+ pr_cont("INTEGER_TYPE - %llu\n", obj->integer.value);
break;
case ACPI_TYPE_PACKAGE:
- printk("PACKAGE_TYPE - %d elements\n", obj->package.count);
+ pr_cont("PACKAGE_TYPE - %d elements\n", obj->package.count);
break;
default:
- printk("object type 0x%X\n", obj->type);
+ pr_cont("object type 0x%X\n", obj->type);
}
kfree(obj);
}
@@ -633,7 +620,7 @@ acpi_status wmi_get_event_data(u32 event, struct acpi_buffer *out)
params[0].type = ACPI_TYPE_INTEGER;
params[0].integer.value = event;
- list_for_each(p, &wmi_blocks.list) {
+ list_for_each(p, &wmi_block_list) {
wblock = list_entry(p, struct wmi_block, list);
gblock = &wblock->gblock;
@@ -662,7 +649,7 @@ EXPORT_SYMBOL_GPL(wmi_has_guid);
/*
* sysfs interface
*/
-static ssize_t show_modalias(struct device *dev, struct device_attribute *attr,
+static ssize_t modalias_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
char guid_string[37];
@@ -676,7 +663,11 @@ static ssize_t show_modalias(struct device *dev, struct device_attribute *attr,
return sprintf(buf, "wmi:%s\n", guid_string);
}
-static DEVICE_ATTR(modalias, S_IRUGO, show_modalias, NULL);
+
+static struct device_attribute wmi_dev_attrs[] = {
+ __ATTR_RO(modalias),
+ __ATTR_NULL
+};
static int wmi_dev_uevent(struct device *dev, struct kobj_uevent_env *env)
{
@@ -702,108 +693,71 @@ static int wmi_dev_uevent(struct device *dev, struct kobj_uevent_env *env)
static void wmi_dev_free(struct device *dev)
{
- kfree(dev);
+ struct wmi_block *wmi_block = container_of(dev, struct wmi_block, dev);
+
+ kfree(wmi_block);
}
static struct class wmi_class = {
.name = "wmi",
.dev_release = wmi_dev_free,
.dev_uevent = wmi_dev_uevent,
+ .dev_attrs = wmi_dev_attrs,
};
-static int wmi_create_devs(void)
+static struct wmi_block *wmi_create_device(const struct guid_block *gblock,
+ acpi_handle handle)
{
- int result;
- char guid_string[37];
- struct guid_block *gblock;
struct wmi_block *wblock;
- struct list_head *p;
- struct device *guid_dev;
-
- /* Create devices for all the GUIDs */
- list_for_each(p, &wmi_blocks.list) {
- wblock = list_entry(p, struct wmi_block, list);
-
- guid_dev = kzalloc(sizeof(struct device), GFP_KERNEL);
- if (!guid_dev)
- return -ENOMEM;
-
- wblock->dev = guid_dev;
-
- guid_dev->class = &wmi_class;
- dev_set_drvdata(guid_dev, wblock);
-
- gblock = &wblock->gblock;
-
- wmi_gtoa(gblock->guid, guid_string);
- dev_set_name(guid_dev, guid_string);
-
- result = device_register(guid_dev);
- if (result)
- return result;
+ int error;
+ char guid_string[37];
- result = device_create_file(guid_dev, &dev_attr_modalias);
- if (result)
- return result;
+ wblock = kzalloc(sizeof(struct wmi_block), GFP_KERNEL);
+ if (!wblock) {
+ error = -ENOMEM;
+ goto err_out;
}
- return 0;
-}
+ wblock->handle = handle;
+ wblock->gblock = *gblock;
-static void wmi_remove_devs(void)
-{
- struct guid_block *gblock;
- struct wmi_block *wblock;
- struct list_head *p;
- struct device *guid_dev;
+ wblock->dev.class = &wmi_class;
- /* Delete devices for all the GUIDs */
- list_for_each(p, &wmi_blocks.list) {
- wblock = list_entry(p, struct wmi_block, list);
+ wmi_gtoa(gblock->guid, guid_string);
+ dev_set_name(&wblock->dev, guid_string);
- guid_dev = wblock->dev;
- gblock = &wblock->gblock;
+ dev_set_drvdata(&wblock->dev, wblock);
- device_remove_file(guid_dev, &dev_attr_modalias);
+ error = device_register(&wblock->dev);
+ if (error)
+ goto err_free;
- device_unregister(guid_dev);
- }
-}
+ list_add_tail(&wblock->list, &wmi_block_list);
+ return wblock;
-static void wmi_class_exit(void)
-{
- wmi_remove_devs();
- class_unregister(&wmi_class);
+err_free:
+ kfree(wblock);
+err_out:
+ return ERR_PTR(error);
}
-static int wmi_class_init(void)
+static void wmi_free_devices(void)
{
- int ret;
-
- ret = class_register(&wmi_class);
- if (ret)
- return ret;
+ struct wmi_block *wblock, *next;
- ret = wmi_create_devs();
- if (ret)
- wmi_class_exit();
-
- return ret;
+ /* Delete devices for all the GUIDs */
+ list_for_each_entry_safe(wblock, next, &wmi_block_list, list)
+ device_unregister(&wblock->dev);
}
static bool guid_already_parsed(const char *guid_string)
{
- struct guid_block *gblock;
struct wmi_block *wblock;
- struct list_head *p;
- list_for_each(p, &wmi_blocks.list) {
- wblock = list_entry(p, struct wmi_block, list);
- gblock = &wblock->gblock;
-
- if (strncmp(gblock->guid, guid_string, 16) == 0)
+ list_for_each_entry(wblock, &wmi_block_list, list)
+ if (memcmp(wblock->gblock.guid, guid_string, 16) == 0)
return true;
- }
+
return false;
}
@@ -814,30 +768,29 @@ static acpi_status parse_wdg(acpi_handle handle)
{
struct acpi_buffer out = {ACPI_ALLOCATE_BUFFER, NULL};
union acpi_object *obj;
- struct guid_block *gblock;
+ const struct guid_block *gblock;
struct wmi_block *wblock;
char guid_string[37];
acpi_status status;
+ int retval;
u32 i, total;
status = acpi_evaluate_object(handle, "_WDG", NULL, &out);
-
if (ACPI_FAILURE(status))
- return status;
+ return -ENXIO;
obj = (union acpi_object *) out.pointer;
+ if (!obj)
+ return -ENXIO;
- if (obj->type != ACPI_TYPE_BUFFER)
- return AE_ERROR;
-
- total = obj->buffer.length / sizeof(struct guid_block);
-
- gblock = kmemdup(obj->buffer.pointer, obj->buffer.length, GFP_KERNEL);
- if (!gblock) {
- status = AE_NO_MEMORY;
+ if (obj->type != ACPI_TYPE_BUFFER) {
+ retval = -ENXIO;
goto out_free_pointer;
}
+ gblock = (const struct guid_block *)obj->buffer.pointer;
+ total = obj->buffer.length / sizeof(struct guid_block);
+
for (i = 0; i < total; i++) {
/*
Some WMI devices, like those for nVidia hooks, have a
@@ -848,34 +801,32 @@ static acpi_status parse_wdg(acpi_handle handle)
*/
if (guid_already_parsed(gblock[i].guid) == true) {
wmi_gtoa(gblock[i].guid, guid_string);
- printk(KERN_INFO PREFIX "Skipping duplicate GUID %s\n",
- guid_string);
+ pr_info("Skipping duplicate GUID %s\n", guid_string);
continue;
}
+
if (debug_dump_wdg)
wmi_dump_wdg(&gblock[i]);
- wblock = kzalloc(sizeof(struct wmi_block), GFP_KERNEL);
- if (!wblock) {
- status = AE_NO_MEMORY;
- goto out_free_gblock;
+ wblock = wmi_create_device(&gblock[i], handle);
+ if (IS_ERR(wblock)) {
+ retval = PTR_ERR(wblock);
+ wmi_free_devices();
+ break;
}
- wblock->gblock = gblock[i];
- wblock->handle = handle;
if (debug_event) {
wblock->handler = wmi_notify_debug;
- status = wmi_method_enable(wblock, 1);
+ wmi_method_enable(wblock, 1);
}
- list_add_tail(&wblock->list, &wmi_blocks.list);
}
-out_free_gblock:
- kfree(gblock);
+ retval = 0;
+
out_free_pointer:
kfree(out.pointer);
- return status;
+ return retval;
}
/*
@@ -929,7 +880,7 @@ static void acpi_wmi_notify(struct acpi_device *device, u32 event)
struct list_head *p;
char guid_string[37];
- list_for_each(p, &wmi_blocks.list) {
+ list_for_each(p, &wmi_block_list) {
wblock = list_entry(p, struct wmi_block, list);
block = &wblock->gblock;
@@ -939,8 +890,7 @@ static void acpi_wmi_notify(struct acpi_device *device, u32 event)
wblock->handler(event, wblock->handler_data);
if (debug_event) {
wmi_gtoa(wblock->gblock.guid, guid_string);
- printk(KERN_INFO PREFIX "DEBUG Event GUID:"
- " %s\n", guid_string);
+ pr_info("DEBUG Event GUID: %s\n", guid_string);
}
acpi_bus_generate_netlink_event(
@@ -955,6 +905,7 @@ static int acpi_wmi_remove(struct acpi_device *device, int type)
{
acpi_remove_address_space_handler(device->handle,
ACPI_ADR_SPACE_EC, &acpi_wmi_ec_space_handler);
+ wmi_free_devices();
return 0;
}
@@ -962,68 +913,57 @@ static int acpi_wmi_remove(struct acpi_device *device, int type)
static int acpi_wmi_add(struct acpi_device *device)
{
acpi_status status;
- int result = 0;
+ int error;
status = acpi_install_address_space_handler(device->handle,
ACPI_ADR_SPACE_EC,
&acpi_wmi_ec_space_handler,
NULL, NULL);
- if (ACPI_FAILURE(status))
- return -ENODEV;
-
- status = parse_wdg(device->handle);
if (ACPI_FAILURE(status)) {
- printk(KERN_ERR PREFIX "Error installing EC region handler\n");
+ pr_err("Error installing EC region handler\n");
return -ENODEV;
}
- return result;
+ error = parse_wdg(device->handle);
+ if (error) {
+ acpi_remove_address_space_handler(device->handle,
+ ACPI_ADR_SPACE_EC,
+ &acpi_wmi_ec_space_handler);
+ pr_err("Failed to parse WDG method\n");
+ return error;
+ }
+
+ return 0;
}
static int __init acpi_wmi_init(void)
{
- int result;
-
- INIT_LIST_HEAD(&wmi_blocks.list);
+ int error;
if (acpi_disabled)
return -ENODEV;
- result = acpi_bus_register_driver(&acpi_wmi_driver);
+ error = class_register(&wmi_class);
+ if (error)
+ return error;
- if (result < 0) {
- printk(KERN_INFO PREFIX "Error loading mapper\n");
- return -ENODEV;
+ error = acpi_bus_register_driver(&acpi_wmi_driver);
+ if (error) {
+ pr_err("Error loading mapper\n");
+ class_unregister(&wmi_class);
+ return error;
}
- result = wmi_class_init();
- if (result) {
- acpi_bus_unregister_driver(&acpi_wmi_driver);
- return result;
- }
-
- printk(KERN_INFO PREFIX "Mapper loaded\n");
-
- return result;
+ pr_info("Mapper loaded\n");
+ return 0;
}
static void __exit acpi_wmi_exit(void)
{
- struct list_head *p, *tmp;
- struct wmi_block *wblock;
-
- wmi_class_exit();
-
acpi_bus_unregister_driver(&acpi_wmi_driver);
+ class_unregister(&wmi_class);
- list_for_each_safe(p, tmp, &wmi_blocks.list) {
- wblock = list_entry(p, struct wmi_block, list);
-
- list_del(p);
- kfree(wblock);
- }
-
- printk(KERN_INFO PREFIX "Mapper unloaded\n");
+ pr_info("Mapper unloaded\n");
}
subsys_initcall(acpi_wmi_init);
diff --git a/drivers/platform/x86/xo1-rfkill.c b/drivers/platform/x86/xo1-rfkill.c
new file mode 100644
index 000000000000..e549eeeda121
--- /dev/null
+++ b/drivers/platform/x86/xo1-rfkill.c
@@ -0,0 +1,85 @@
+/*
+ * Support for rfkill through the OLPC XO-1 laptop embedded controller
+ *
+ * Copyright (C) 2010 One Laptop per Child
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/rfkill.h>
+
+#include <asm/olpc.h>
+
+static int rfkill_set_block(void *data, bool blocked)
+{
+ unsigned char cmd;
+ if (blocked)
+ cmd = EC_WLAN_ENTER_RESET;
+ else
+ cmd = EC_WLAN_LEAVE_RESET;
+
+ return olpc_ec_cmd(cmd, NULL, 0, NULL, 0);
+}
+
+static const struct rfkill_ops rfkill_ops = {
+ .set_block = rfkill_set_block,
+};
+
+static int __devinit xo1_rfkill_probe(struct platform_device *pdev)
+{
+ struct rfkill *rfk;
+ int r;
+
+ rfk = rfkill_alloc(pdev->name, &pdev->dev, RFKILL_TYPE_WLAN,
+ &rfkill_ops, NULL);
+ if (!rfk)
+ return -ENOMEM;
+
+ r = rfkill_register(rfk);
+ if (r) {
+ rfkill_destroy(rfk);
+ return r;
+ }
+
+ platform_set_drvdata(pdev, rfk);
+ return 0;
+}
+
+static int __devexit xo1_rfkill_remove(struct platform_device *pdev)
+{
+ struct rfkill *rfk = platform_get_drvdata(pdev);
+ rfkill_unregister(rfk);
+ rfkill_destroy(rfk);
+ return 0;
+}
+
+static struct platform_driver xo1_rfkill_driver = {
+ .driver = {
+ .name = "xo1-rfkill",
+ .owner = THIS_MODULE,
+ },
+ .probe = xo1_rfkill_probe,
+ .remove = __devexit_p(xo1_rfkill_remove),
+};
+
+static int __init xo1_rfkill_init(void)
+{
+ return platform_driver_register(&xo1_rfkill_driver);
+}
+
+static void __exit xo1_rfkill_exit(void)
+{
+ platform_driver_unregister(&xo1_rfkill_driver);
+}
+
+MODULE_AUTHOR("Daniel Drake <dsd@laptop.org>");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:xo1-rfkill");
+
+module_init(xo1_rfkill_init);
+module_exit(xo1_rfkill_exit);
diff --git a/drivers/pnp/base.h b/drivers/pnp/base.h
index 0bab84ebb15d..19bc73695475 100644
--- a/drivers/pnp/base.h
+++ b/drivers/pnp/base.h
@@ -12,11 +12,12 @@ void pnp_unregister_protocol(struct pnp_protocol *protocol);
#define PNP_EISA_ID_MASK 0x7fffffff
void pnp_eisa_id_to_string(u32 id, char *str);
-struct pnp_dev *pnp_alloc_dev(struct pnp_protocol *, int id, char *pnpid);
+struct pnp_dev *pnp_alloc_dev(struct pnp_protocol *, int id,
+ const char *pnpid);
struct pnp_card *pnp_alloc_card(struct pnp_protocol *, int id, char *pnpid);
int pnp_add_device(struct pnp_dev *dev);
-struct pnp_id *pnp_add_id(struct pnp_dev *dev, char *id);
+struct pnp_id *pnp_add_id(struct pnp_dev *dev, const char *id);
int pnp_add_card(struct pnp_card *card);
void pnp_remove_card(struct pnp_card *card);
diff --git a/drivers/pnp/core.c b/drivers/pnp/core.c
index 88b3cde52596..0f34d962fd3c 100644
--- a/drivers/pnp/core.c
+++ b/drivers/pnp/core.c
@@ -124,7 +124,8 @@ static void pnp_release_device(struct device *dmdev)
kfree(dev);
}
-struct pnp_dev *pnp_alloc_dev(struct pnp_protocol *protocol, int id, char *pnpid)
+struct pnp_dev *pnp_alloc_dev(struct pnp_protocol *protocol, int id,
+ const char *pnpid)
{
struct pnp_dev *dev;
struct pnp_id *dev_id;
@@ -194,8 +195,9 @@ int pnp_add_device(struct pnp_dev *dev)
for (id = dev->id; id; id = id->next)
len += scnprintf(buf + len, sizeof(buf) - len, " %s", id->id);
- pnp_dbg(&dev->dev, "%s device, IDs%s (%s)\n",
- dev->protocol->name, buf, dev->active ? "active" : "disabled");
+ dev_printk(KERN_DEBUG, &dev->dev, "%s device, IDs%s (%s)\n",
+ dev->protocol->name, buf,
+ dev->active ? "active" : "disabled");
return 0;
}
diff --git a/drivers/pnp/driver.c b/drivers/pnp/driver.c
index cd11b113494f..d1dbb9df53fa 100644
--- a/drivers/pnp/driver.c
+++ b/drivers/pnp/driver.c
@@ -236,7 +236,7 @@ void pnp_unregister_driver(struct pnp_driver *drv)
* @dev: pointer to the desired device
* @id: pointer to an EISA id string
*/
-struct pnp_id *pnp_add_id(struct pnp_dev *dev, char *id)
+struct pnp_id *pnp_add_id(struct pnp_dev *dev, const char *id)
{
struct pnp_id *dev_id, *ptr;
diff --git a/drivers/pnp/isapnp/proc.c b/drivers/pnp/isapnp/proc.c
index e73ebefdf3e0..315b3112aca8 100644
--- a/drivers/pnp/isapnp/proc.c
+++ b/drivers/pnp/isapnp/proc.c
@@ -21,7 +21,6 @@
#include <linux/isapnp.h>
#include <linux/proc_fs.h>
#include <linux/init.h>
-#include <linux/smp_lock.h>
#include <asm/uaccess.h>
extern struct pnp_protocol isapnp_protocol;
diff --git a/drivers/pnp/pnpacpi/core.c b/drivers/pnp/pnpacpi/core.c
index dc4e32e031e9..2d73dfcecdbb 100644
--- a/drivers/pnp/pnpacpi/core.c
+++ b/drivers/pnp/pnpacpi/core.c
@@ -28,7 +28,7 @@
#include "../base.h"
#include "pnpacpi.h"
-static int num = 0;
+static int num;
/* We need only to blacklist devices that have already an acpi driver that
* can't use pnp layer. We don't need to blacklist device that are directly
@@ -59,7 +59,7 @@ static inline int __init is_exclusive_device(struct acpi_device *dev)
#define TEST_ALPHA(c) \
if (!('@' <= (c) || (c) <= 'Z')) \
return 0
-static int __init ispnpidacpi(char *id)
+static int __init ispnpidacpi(const char *id)
{
TEST_ALPHA(id[0]);
TEST_ALPHA(id[1]);
@@ -180,11 +180,24 @@ struct pnp_protocol pnpacpi_protocol = {
};
EXPORT_SYMBOL(pnpacpi_protocol);
+static char *pnpacpi_get_id(struct acpi_device *device)
+{
+ struct acpi_hardware_id *id;
+
+ list_for_each_entry(id, &device->pnp.ids, list) {
+ if (ispnpidacpi(id->id))
+ return id->id;
+ }
+
+ return NULL;
+}
+
static int __init pnpacpi_add_device(struct acpi_device *device)
{
acpi_handle temp = NULL;
acpi_status status;
struct pnp_dev *dev;
+ char *pnpid;
struct acpi_hardware_id *id;
/*
@@ -192,11 +205,17 @@ static int __init pnpacpi_add_device(struct acpi_device *device)
* driver should not be loaded.
*/
status = acpi_get_handle(device->handle, "_CRS", &temp);
- if (ACPI_FAILURE(status) || !ispnpidacpi(acpi_device_hid(device)) ||
- is_exclusive_device(device) || (!device->status.present))
+ if (ACPI_FAILURE(status))
+ return 0;
+
+ pnpid = pnpacpi_get_id(device);
+ if (!pnpid)
+ return 0;
+
+ if (is_exclusive_device(device) || !device->status.present)
return 0;
- dev = pnp_alloc_dev(&pnpacpi_protocol, num, acpi_device_hid(device));
+ dev = pnp_alloc_dev(&pnpacpi_protocol, num, pnpid);
if (!dev)
return -ENOMEM;
@@ -227,7 +246,7 @@ static int __init pnpacpi_add_device(struct acpi_device *device)
pnpacpi_parse_resource_option_data(dev);
list_for_each_entry(id, &device->pnp.ids, list) {
- if (!strcmp(id->id, acpi_device_hid(device)))
+ if (!strcmp(id->id, pnpid))
continue;
if (!ispnpidacpi(id->id))
continue;
diff --git a/drivers/pnp/resource.c b/drivers/pnp/resource.c
index e3446ab8b563..a925e6b63d72 100644
--- a/drivers/pnp/resource.c
+++ b/drivers/pnp/resource.c
@@ -523,7 +523,7 @@ struct pnp_resource *pnp_add_irq_resource(struct pnp_dev *dev, int irq,
res->start = irq;
res->end = irq;
- pnp_dbg(&dev->dev, " add %pr\n", res);
+ dev_printk(KERN_DEBUG, &dev->dev, "%pR\n", res);
return pnp_res;
}
@@ -544,7 +544,7 @@ struct pnp_resource *pnp_add_dma_resource(struct pnp_dev *dev, int dma,
res->start = dma;
res->end = dma;
- pnp_dbg(&dev->dev, " add %pr\n", res);
+ dev_printk(KERN_DEBUG, &dev->dev, "%pR\n", res);
return pnp_res;
}
@@ -568,7 +568,7 @@ struct pnp_resource *pnp_add_io_resource(struct pnp_dev *dev,
res->start = start;
res->end = end;
- pnp_dbg(&dev->dev, " add %pr\n", res);
+ dev_printk(KERN_DEBUG, &dev->dev, "%pR\n", res);
return pnp_res;
}
@@ -592,7 +592,7 @@ struct pnp_resource *pnp_add_mem_resource(struct pnp_dev *dev,
res->start = start;
res->end = end;
- pnp_dbg(&dev->dev, " add %pr\n", res);
+ dev_printk(KERN_DEBUG, &dev->dev, "%pR\n", res);
return pnp_res;
}
@@ -616,7 +616,7 @@ struct pnp_resource *pnp_add_bus_resource(struct pnp_dev *dev,
res->start = start;
res->end = end;
- pnp_dbg(&dev->dev, " add %pr\n", res);
+ dev_printk(KERN_DEBUG, &dev->dev, "%pR\n", res);
return pnp_res;
}
diff --git a/drivers/power/Kconfig b/drivers/power/Kconfig
index 07343568a12e..60d83d983a36 100644
--- a/drivers/power/Kconfig
+++ b/drivers/power/Kconfig
@@ -64,8 +64,7 @@ config TEST_POWER
config BATTERY_DS2760
tristate "DS2760 battery driver (HP iPAQ & others)"
- select W1
- select W1_SLAVE_DS2760
+ depends on W1 && W1_SLAVE_DS2760
help
Say Y here to enable support for batteries with ds2760 chip.
@@ -109,6 +108,13 @@ config BATTERY_WM97XX
help
Say Y to enable support for battery measured by WM97xx aux port.
+config BATTERY_BQ20Z75
+ tristate "TI BQ20z75 gas gauge"
+ depends on I2C
+ help
+ Say Y to include support for TI BQ20z75 SBS-compliant
+ gas gauge and protection IC.
+
config BATTERY_BQ27x00
tristate "BQ27x00 battery driver"
depends on I2C
@@ -166,4 +172,17 @@ config BATTERY_INTEL_MID
Say Y here to enable the battery driver on Intel MID
platforms.
+config CHARGER_ISP1704
+ tristate "ISP1704 USB Charger Detection"
+ depends on USB_OTG_UTILS
+ help
+ Say Y to enable support for USB Charger Detection with
+ ISP1707/ISP1704 USB transceivers.
+
+config CHARGER_TWL4030
+ tristate "OMAP TWL4030 BCI charger driver"
+ depends on TWL4030_CORE
+ help
+ Say Y here to enable support for TWL4030 Battery Charge Interface.
+
endif # POWER_SUPPLY
diff --git a/drivers/power/Makefile b/drivers/power/Makefile
index 10143aaf4ee3..c75772eb157c 100644
--- a/drivers/power/Makefile
+++ b/drivers/power/Makefile
@@ -1,16 +1,8 @@
-power_supply-objs := power_supply_core.o
+ccflags-$(CONFIG_POWER_SUPPLY_DEBUG) := -DDEBUG
-ifeq ($(CONFIG_SYSFS),y)
-power_supply-objs += power_supply_sysfs.o
-endif
-
-ifeq ($(CONFIG_LEDS_TRIGGERS),y)
-power_supply-objs += power_supply_leds.o
-endif
-
-ifeq ($(CONFIG_POWER_SUPPLY_DEBUG),y)
-EXTRA_CFLAGS += -DDEBUG
-endif
+power_supply-y := power_supply_core.o
+power_supply-$(CONFIG_SYSFS) += power_supply_sysfs.o
+power_supply-$(CONFIG_LEDS_TRIGGERS) += power_supply_leds.o
obj-$(CONFIG_POWER_SUPPLY) += power_supply.o
@@ -29,6 +21,7 @@ obj-$(CONFIG_BATTERY_OLPC) += olpc_battery.o
obj-$(CONFIG_BATTERY_TOSA) += tosa_battery.o
obj-$(CONFIG_BATTERY_COLLIE) += collie_battery.o
obj-$(CONFIG_BATTERY_WM97XX) += wm97xx_battery.o
+obj-$(CONFIG_BATTERY_BQ20Z75) += bq20z75.o
obj-$(CONFIG_BATTERY_BQ27x00) += bq27x00_battery.o
obj-$(CONFIG_BATTERY_DA9030) += da9030_battery.o
obj-$(CONFIG_BATTERY_MAX17040) += max17040_battery.o
@@ -37,3 +30,5 @@ obj-$(CONFIG_BATTERY_S3C_ADC) += s3c_adc_battery.o
obj-$(CONFIG_CHARGER_PCF50633) += pcf50633-charger.o
obj-$(CONFIG_BATTERY_JZ4740) += jz4740-battery.o
obj-$(CONFIG_BATTERY_INTEL_MID) += intel_mid_battery.o
+obj-$(CONFIG_CHARGER_ISP1704) += isp1704_charger.o
+obj-$(CONFIG_CHARGER_TWL4030) += twl4030_charger.o
diff --git a/drivers/power/bq20z75.c b/drivers/power/bq20z75.c
new file mode 100644
index 000000000000..492da27e1a47
--- /dev/null
+++ b/drivers/power/bq20z75.c
@@ -0,0 +1,493 @@
+/*
+ * Gas Gauge driver for TI's BQ20Z75
+ *
+ * Copyright (c) 2010, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/err.h>
+#include <linux/power_supply.h>
+#include <linux/i2c.h>
+#include <linux/slab.h>
+
+enum {
+ REG_MANUFACTURER_DATA,
+ REG_TEMPERATURE,
+ REG_VOLTAGE,
+ REG_CURRENT,
+ REG_CAPACITY,
+ REG_TIME_TO_EMPTY,
+ REG_TIME_TO_FULL,
+ REG_STATUS,
+ REG_CYCLE_COUNT,
+ REG_SERIAL_NUMBER,
+ REG_REMAINING_CAPACITY,
+ REG_FULL_CHARGE_CAPACITY,
+ REG_DESIGN_CAPACITY,
+ REG_DESIGN_VOLTAGE,
+};
+
+/* manufacturer access defines */
+#define MANUFACTURER_ACCESS_STATUS 0x0006
+#define MANUFACTURER_ACCESS_SLEEP 0x0011
+
+/* battery status value bits */
+#define BATTERY_DISCHARGING 0x40
+#define BATTERY_FULL_CHARGED 0x20
+#define BATTERY_FULL_DISCHARGED 0x10
+
+#define BQ20Z75_DATA(_psp, _addr, _min_value, _max_value) { \
+ .psp = _psp, \
+ .addr = _addr, \
+ .min_value = _min_value, \
+ .max_value = _max_value, \
+}
+
+static const struct bq20z75_device_data {
+ enum power_supply_property psp;
+ u8 addr;
+ int min_value;
+ int max_value;
+} bq20z75_data[] = {
+ [REG_MANUFACTURER_DATA] =
+ BQ20Z75_DATA(POWER_SUPPLY_PROP_PRESENT, 0x00, 0, 65535),
+ [REG_TEMPERATURE] =
+ BQ20Z75_DATA(POWER_SUPPLY_PROP_TEMP, 0x08, 0, 65535),
+ [REG_VOLTAGE] =
+ BQ20Z75_DATA(POWER_SUPPLY_PROP_VOLTAGE_NOW, 0x09, 0, 20000),
+ [REG_CURRENT] =
+ BQ20Z75_DATA(POWER_SUPPLY_PROP_CURRENT_NOW, 0x0A, -32768,
+ 32767),
+ [REG_CAPACITY] =
+ BQ20Z75_DATA(POWER_SUPPLY_PROP_CAPACITY, 0x0E, 0, 100),
+ [REG_REMAINING_CAPACITY] =
+ BQ20Z75_DATA(POWER_SUPPLY_PROP_ENERGY_NOW, 0x0F, 0, 65535),
+ [REG_FULL_CHARGE_CAPACITY] =
+ BQ20Z75_DATA(POWER_SUPPLY_PROP_ENERGY_FULL, 0x10, 0, 65535),
+ [REG_TIME_TO_EMPTY] =
+ BQ20Z75_DATA(POWER_SUPPLY_PROP_TIME_TO_EMPTY_AVG, 0x12, 0,
+ 65535),
+ [REG_TIME_TO_FULL] =
+ BQ20Z75_DATA(POWER_SUPPLY_PROP_TIME_TO_FULL_AVG, 0x13, 0,
+ 65535),
+ [REG_STATUS] =
+ BQ20Z75_DATA(POWER_SUPPLY_PROP_STATUS, 0x16, 0, 65535),
+ [REG_CYCLE_COUNT] =
+ BQ20Z75_DATA(POWER_SUPPLY_PROP_CYCLE_COUNT, 0x17, 0, 65535),
+ [REG_DESIGN_CAPACITY] =
+ BQ20Z75_DATA(POWER_SUPPLY_PROP_ENERGY_FULL_DESIGN, 0x18, 0,
+ 65535),
+ [REG_DESIGN_VOLTAGE] =
+ BQ20Z75_DATA(POWER_SUPPLY_PROP_VOLTAGE_MAX_DESIGN, 0x19, 0,
+ 65535),
+ [REG_SERIAL_NUMBER] =
+ BQ20Z75_DATA(POWER_SUPPLY_PROP_SERIAL_NUMBER, 0x1C, 0, 65535),
+};
+
+static enum power_supply_property bq20z75_properties[] = {
+ POWER_SUPPLY_PROP_STATUS,
+ POWER_SUPPLY_PROP_HEALTH,
+ POWER_SUPPLY_PROP_PRESENT,
+ POWER_SUPPLY_PROP_TECHNOLOGY,
+ POWER_SUPPLY_PROP_CYCLE_COUNT,
+ POWER_SUPPLY_PROP_VOLTAGE_NOW,
+ POWER_SUPPLY_PROP_CURRENT_NOW,
+ POWER_SUPPLY_PROP_CAPACITY,
+ POWER_SUPPLY_PROP_TEMP,
+ POWER_SUPPLY_PROP_TIME_TO_EMPTY_AVG,
+ POWER_SUPPLY_PROP_TIME_TO_FULL_AVG,
+ POWER_SUPPLY_PROP_SERIAL_NUMBER,
+ POWER_SUPPLY_PROP_VOLTAGE_MAX_DESIGN,
+ POWER_SUPPLY_PROP_ENERGY_NOW,
+ POWER_SUPPLY_PROP_ENERGY_FULL,
+ POWER_SUPPLY_PROP_ENERGY_FULL_DESIGN,
+};
+
+struct bq20z75_info {
+ struct i2c_client *client;
+ struct power_supply power_supply;
+};
+
+static int bq20z75_read_word_data(struct i2c_client *client, u8 address)
+{
+ s32 ret;
+
+ ret = i2c_smbus_read_word_data(client, address);
+ if (ret < 0) {
+ dev_err(&client->dev,
+ "%s: i2c read at address 0x%x failed\n",
+ __func__, address);
+ return ret;
+ }
+ return le16_to_cpu(ret);
+}
+
+static int bq20z75_write_word_data(struct i2c_client *client, u8 address,
+ u16 value)
+{
+ s32 ret;
+
+ ret = i2c_smbus_write_word_data(client, address, le16_to_cpu(value));
+ if (ret < 0) {
+ dev_err(&client->dev,
+ "%s: i2c write to address 0x%x failed\n",
+ __func__, address);
+ return ret;
+ }
+ return 0;
+}
+
+static int bq20z75_get_battery_presence_and_health(
+ struct i2c_client *client, enum power_supply_property psp,
+ union power_supply_propval *val)
+{
+ s32 ret;
+
+ /* Write to ManufacturerAccess with
+ * ManufacturerAccess command and then
+ * read the status */
+ ret = bq20z75_write_word_data(client,
+ bq20z75_data[REG_MANUFACTURER_DATA].addr,
+ MANUFACTURER_ACCESS_STATUS);
+ if (ret < 0)
+ return ret;
+
+
+ ret = bq20z75_read_word_data(client,
+ bq20z75_data[REG_MANUFACTURER_DATA].addr);
+ if (ret < 0)
+ return ret;
+
+ if (ret < bq20z75_data[REG_MANUFACTURER_DATA].min_value ||
+ ret > bq20z75_data[REG_MANUFACTURER_DATA].max_value) {
+ val->intval = 0;
+ return 0;
+ }
+
+ /* Mask the upper nibble of 2nd byte and
+ * lower byte of response then
+ * shift the result by 8 to get status*/
+ ret &= 0x0F00;
+ ret >>= 8;
+ if (psp == POWER_SUPPLY_PROP_PRESENT) {
+ if (ret == 0x0F)
+ /* battery removed */
+ val->intval = 0;
+ else
+ val->intval = 1;
+ } else if (psp == POWER_SUPPLY_PROP_HEALTH) {
+ if (ret == 0x09)
+ val->intval = POWER_SUPPLY_HEALTH_UNSPEC_FAILURE;
+ else if (ret == 0x0B)
+ val->intval = POWER_SUPPLY_HEALTH_OVERHEAT;
+ else if (ret == 0x0C)
+ val->intval = POWER_SUPPLY_HEALTH_DEAD;
+ else
+ val->intval = POWER_SUPPLY_HEALTH_GOOD;
+ }
+
+ return 0;
+}
+
+static int bq20z75_get_battery_property(struct i2c_client *client,
+ int reg_offset, enum power_supply_property psp,
+ union power_supply_propval *val)
+{
+ s32 ret;
+
+ ret = bq20z75_read_word_data(client,
+ bq20z75_data[reg_offset].addr);
+ if (ret < 0)
+ return ret;
+
+ /* returned values are 16 bit */
+ if (bq20z75_data[reg_offset].min_value < 0)
+ ret = (s16)ret;
+
+ if (ret >= bq20z75_data[reg_offset].min_value &&
+ ret <= bq20z75_data[reg_offset].max_value) {
+ val->intval = ret;
+ if (psp == POWER_SUPPLY_PROP_STATUS) {
+ if (ret & BATTERY_FULL_CHARGED)
+ val->intval = POWER_SUPPLY_STATUS_FULL;
+ else if (ret & BATTERY_FULL_DISCHARGED)
+ val->intval = POWER_SUPPLY_STATUS_NOT_CHARGING;
+ else if (ret & BATTERY_DISCHARGING)
+ val->intval = POWER_SUPPLY_STATUS_DISCHARGING;
+ else
+ val->intval = POWER_SUPPLY_STATUS_CHARGING;
+ }
+ } else {
+ if (psp == POWER_SUPPLY_PROP_STATUS)
+ val->intval = POWER_SUPPLY_STATUS_UNKNOWN;
+ else
+ val->intval = 0;
+ }
+
+ return 0;
+}
+
+static void bq20z75_unit_adjustment(struct i2c_client *client,
+ enum power_supply_property psp, union power_supply_propval *val)
+{
+#define BASE_UNIT_CONVERSION 1000
+#define BATTERY_MODE_CAP_MULT_WATT (10 * BASE_UNIT_CONVERSION)
+#define TIME_UNIT_CONVERSION 600
+#define TEMP_KELVIN_TO_CELCIUS 2731
+ switch (psp) {
+ case POWER_SUPPLY_PROP_ENERGY_NOW:
+ case POWER_SUPPLY_PROP_ENERGY_FULL:
+ case POWER_SUPPLY_PROP_ENERGY_FULL_DESIGN:
+ val->intval *= BATTERY_MODE_CAP_MULT_WATT;
+ break;
+
+ case POWER_SUPPLY_PROP_VOLTAGE_NOW:
+ case POWER_SUPPLY_PROP_VOLTAGE_MAX_DESIGN:
+ case POWER_SUPPLY_PROP_CURRENT_NOW:
+ val->intval *= BASE_UNIT_CONVERSION;
+ break;
+
+ case POWER_SUPPLY_PROP_TEMP:
+ /* bq20z75 provides battery tempreture in 0.1°K
+ * so convert it to 0.1°C */
+ val->intval -= TEMP_KELVIN_TO_CELCIUS;
+ val->intval *= 10;
+ break;
+
+ case POWER_SUPPLY_PROP_TIME_TO_EMPTY_AVG:
+ case POWER_SUPPLY_PROP_TIME_TO_FULL_AVG:
+ val->intval *= TIME_UNIT_CONVERSION;
+ break;
+
+ default:
+ dev_dbg(&client->dev,
+ "%s: no need for unit conversion %d\n", __func__, psp);
+ }
+}
+
+static int bq20z75_get_battery_capacity(struct i2c_client *client,
+ int reg_offset, enum power_supply_property psp,
+ union power_supply_propval *val)
+{
+ s32 ret;
+
+ ret = bq20z75_read_word_data(client, bq20z75_data[reg_offset].addr);
+ if (ret < 0)
+ return ret;
+
+ if (psp == POWER_SUPPLY_PROP_CAPACITY) {
+ /* bq20z75 spec says that this can be >100 %
+ * even if max value is 100 % */
+ val->intval = min(ret, 100);
+ } else
+ val->intval = ret;
+
+ return 0;
+}
+
+static char bq20z75_serial[5];
+static int bq20z75_get_battery_serial_number(struct i2c_client *client,
+ union power_supply_propval *val)
+{
+ int ret;
+
+ ret = bq20z75_read_word_data(client,
+ bq20z75_data[REG_SERIAL_NUMBER].addr);
+ if (ret < 0)
+ return ret;
+
+ ret = sprintf(bq20z75_serial, "%04x", ret);
+ val->strval = bq20z75_serial;
+
+ return 0;
+}
+
+static int bq20z75_get_property(struct power_supply *psy,
+ enum power_supply_property psp,
+ union power_supply_propval *val)
+{
+ int count;
+ int ret;
+ struct bq20z75_info *bq20z75_device = container_of(psy,
+ struct bq20z75_info, power_supply);
+ struct i2c_client *client = bq20z75_device->client;
+
+ switch (psp) {
+ case POWER_SUPPLY_PROP_PRESENT:
+ case POWER_SUPPLY_PROP_HEALTH:
+ ret = bq20z75_get_battery_presence_and_health(client, psp, val);
+ if (ret)
+ return ret;
+ break;
+
+ case POWER_SUPPLY_PROP_TECHNOLOGY:
+ val->intval = POWER_SUPPLY_TECHNOLOGY_LION;
+ break;
+
+ case POWER_SUPPLY_PROP_ENERGY_NOW:
+ case POWER_SUPPLY_PROP_ENERGY_FULL:
+ case POWER_SUPPLY_PROP_ENERGY_FULL_DESIGN:
+ case POWER_SUPPLY_PROP_CAPACITY:
+ for (count = 0; count < ARRAY_SIZE(bq20z75_data); count++) {
+ if (psp == bq20z75_data[count].psp)
+ break;
+ }
+
+ ret = bq20z75_get_battery_capacity(client, count, psp, val);
+ if (ret)
+ return ret;
+
+ break;
+
+ case POWER_SUPPLY_PROP_SERIAL_NUMBER:
+ ret = bq20z75_get_battery_serial_number(client, val);
+ if (ret)
+ return ret;
+ break;
+
+ case POWER_SUPPLY_PROP_STATUS:
+ case POWER_SUPPLY_PROP_CYCLE_COUNT:
+ case POWER_SUPPLY_PROP_VOLTAGE_NOW:
+ case POWER_SUPPLY_PROP_CURRENT_NOW:
+ case POWER_SUPPLY_PROP_TEMP:
+ case POWER_SUPPLY_PROP_TIME_TO_EMPTY_AVG:
+ case POWER_SUPPLY_PROP_TIME_TO_FULL_AVG:
+ case POWER_SUPPLY_PROP_VOLTAGE_MAX_DESIGN:
+ for (count = 0; count < ARRAY_SIZE(bq20z75_data); count++) {
+ if (psp == bq20z75_data[count].psp)
+ break;
+ }
+
+ ret = bq20z75_get_battery_property(client, count, psp, val);
+ if (ret)
+ return ret;
+
+ break;
+
+ default:
+ dev_err(&client->dev,
+ "%s: INVALID property\n", __func__);
+ return -EINVAL;
+ }
+
+ /* Convert units to match requirements for power supply class */
+ bq20z75_unit_adjustment(client, psp, val);
+
+ dev_dbg(&client->dev,
+ "%s: property = %d, value = %d\n", __func__, psp, val->intval);
+
+ return 0;
+}
+
+static int bq20z75_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct bq20z75_info *bq20z75_device;
+ int rc;
+
+ bq20z75_device = kzalloc(sizeof(struct bq20z75_info), GFP_KERNEL);
+ if (!bq20z75_device)
+ return -ENOMEM;
+
+ bq20z75_device->client = client;
+ bq20z75_device->power_supply.name = "battery";
+ bq20z75_device->power_supply.type = POWER_SUPPLY_TYPE_BATTERY;
+ bq20z75_device->power_supply.properties = bq20z75_properties;
+ bq20z75_device->power_supply.num_properties =
+ ARRAY_SIZE(bq20z75_properties);
+ bq20z75_device->power_supply.get_property = bq20z75_get_property;
+
+ i2c_set_clientdata(client, bq20z75_device);
+
+ rc = power_supply_register(&client->dev, &bq20z75_device->power_supply);
+ if (rc) {
+ dev_err(&client->dev,
+ "%s: Failed to register power supply\n", __func__);
+ kfree(bq20z75_device);
+ return rc;
+ }
+
+ dev_info(&client->dev,
+ "%s: battery gas gauge device registered\n", client->name);
+
+ return 0;
+}
+
+static int bq20z75_remove(struct i2c_client *client)
+{
+ struct bq20z75_info *bq20z75_device = i2c_get_clientdata(client);
+
+ power_supply_unregister(&bq20z75_device->power_supply);
+ kfree(bq20z75_device);
+ bq20z75_device = NULL;
+
+ return 0;
+}
+
+#if defined CONFIG_PM
+static int bq20z75_suspend(struct i2c_client *client,
+ pm_message_t state)
+{
+ s32 ret;
+
+ /* write to manufacturer access with sleep command */
+ ret = bq20z75_write_word_data(client,
+ bq20z75_data[REG_MANUFACTURER_DATA].addr,
+ MANUFACTURER_ACCESS_SLEEP);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+#else
+#define bq20z75_suspend NULL
+#endif
+/* any smbus transaction will wake up bq20z75 */
+#define bq20z75_resume NULL
+
+static const struct i2c_device_id bq20z75_id[] = {
+ { "bq20z75", 0 },
+ {}
+};
+
+static struct i2c_driver bq20z75_battery_driver = {
+ .probe = bq20z75_probe,
+ .remove = bq20z75_remove,
+ .suspend = bq20z75_suspend,
+ .resume = bq20z75_resume,
+ .id_table = bq20z75_id,
+ .driver = {
+ .name = "bq20z75-battery",
+ },
+};
+
+static int __init bq20z75_battery_init(void)
+{
+ return i2c_add_driver(&bq20z75_battery_driver);
+}
+module_init(bq20z75_battery_init);
+
+static void __exit bq20z75_battery_exit(void)
+{
+ i2c_del_driver(&bq20z75_battery_driver);
+}
+module_exit(bq20z75_battery_exit);
+
+MODULE_DESCRIPTION("BQ20z75 battery monitor driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/power/bq27x00_battery.c b/drivers/power/bq27x00_battery.c
index 3ec9c6a8896b..eff0273d4030 100644
--- a/drivers/power/bq27x00_battery.c
+++ b/drivers/power/bq27x00_battery.c
@@ -418,6 +418,7 @@ static int bq27x00_battery_remove(struct i2c_client *client)
power_supply_unregister(&di->bat);
+ kfree(di->bus);
kfree(di->bat.name);
mutex_lock(&battery_mutex);
diff --git a/drivers/power/ds2760_battery.c b/drivers/power/ds2760_battery.c
index 4d3b27228a2e..b3c01c16a164 100644
--- a/drivers/power/ds2760_battery.c
+++ b/drivers/power/ds2760_battery.c
@@ -586,6 +586,7 @@ static int ds2760_battery_remove(struct platform_device *pdev)
&di->set_charged_work);
destroy_workqueue(di->monitor_wqueue);
power_supply_unregister(&di->bat);
+ kfree(di);
return 0;
}
diff --git a/drivers/power/ds2782_battery.c b/drivers/power/ds2782_battery.c
index 84d3c43cf2bc..6957e8af6449 100644
--- a/drivers/power/ds2782_battery.c
+++ b/drivers/power/ds2782_battery.c
@@ -44,8 +44,8 @@ struct ds278x_info;
struct ds278x_battery_ops {
int (*get_battery_current)(struct ds278x_info *info, int *current_uA);
- int (*get_battery_voltage)(struct ds278x_info *info, int *voltage_uA);
- int (*get_battery_capacity)(struct ds278x_info *info, int *capacity_uA);
+ int (*get_battery_voltage)(struct ds278x_info *info, int *voltage_uV);
+ int (*get_battery_capacity)(struct ds278x_info *info, int *capacity);
};
#define to_ds278x_info(x) container_of(x, struct ds278x_info, battery)
@@ -137,7 +137,7 @@ static int ds2782_get_current(struct ds278x_info *info, int *current_uA)
return 0;
}
-static int ds2782_get_voltage(struct ds278x_info *info, int *voltage_uA)
+static int ds2782_get_voltage(struct ds278x_info *info, int *voltage_uV)
{
s16 raw;
int err;
@@ -149,7 +149,7 @@ static int ds2782_get_voltage(struct ds278x_info *info, int *voltage_uA)
err = ds278x_read_reg16(info, DS278x_REG_VOLT_MSB, &raw);
if (err)
return err;
- *voltage_uA = (raw / 32) * 4800;
+ *voltage_uV = (raw / 32) * 4800;
return 0;
}
@@ -177,7 +177,7 @@ static int ds2786_get_current(struct ds278x_info *info, int *current_uA)
return 0;
}
-static int ds2786_get_voltage(struct ds278x_info *info, int *voltage_uA)
+static int ds2786_get_voltage(struct ds278x_info *info, int *voltage_uV)
{
s16 raw;
int err;
@@ -189,7 +189,7 @@ static int ds2786_get_voltage(struct ds278x_info *info, int *voltage_uA)
err = ds278x_read_reg16(info, DS278x_REG_VOLT_MSB, &raw);
if (err)
return err;
- *voltage_uA = (raw / 8) * 1220;
+ *voltage_uV = (raw / 8) * 1220;
return 0;
}
diff --git a/drivers/power/isp1704_charger.c b/drivers/power/isp1704_charger.c
new file mode 100644
index 000000000000..72512185f3e2
--- /dev/null
+++ b/drivers/power/isp1704_charger.c
@@ -0,0 +1,369 @@
+/*
+ * ISP1704 USB Charger Detection driver
+ *
+ * Copyright (C) 2010 Nokia Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/device.h>
+#include <linux/sysfs.h>
+#include <linux/platform_device.h>
+#include <linux/power_supply.h>
+#include <linux/delay.h>
+
+#include <linux/usb/otg.h>
+#include <linux/usb/ulpi.h>
+#include <linux/usb/ch9.h>
+#include <linux/usb/gadget.h>
+
+/* Vendor specific Power Control register */
+#define ISP1704_PWR_CTRL 0x3d
+#define ISP1704_PWR_CTRL_SWCTRL (1 << 0)
+#define ISP1704_PWR_CTRL_DET_COMP (1 << 1)
+#define ISP1704_PWR_CTRL_BVALID_RISE (1 << 2)
+#define ISP1704_PWR_CTRL_BVALID_FALL (1 << 3)
+#define ISP1704_PWR_CTRL_DP_WKPU_EN (1 << 4)
+#define ISP1704_PWR_CTRL_VDAT_DET (1 << 5)
+#define ISP1704_PWR_CTRL_DPVSRC_EN (1 << 6)
+#define ISP1704_PWR_CTRL_HWDETECT (1 << 7)
+
+#define NXP_VENDOR_ID 0x04cc
+
+static u16 isp170x_id[] = {
+ 0x1704,
+ 0x1707,
+};
+
+struct isp1704_charger {
+ struct device *dev;
+ struct power_supply psy;
+ struct otg_transceiver *otg;
+ struct notifier_block nb;
+ struct work_struct work;
+
+ char model[7];
+ unsigned present:1;
+};
+
+/*
+ * ISP1704 detects PS/2 adapters as charger. To make sure the detected charger
+ * is actually a dedicated charger, the following steps need to be taken.
+ */
+static inline int isp1704_charger_verify(struct isp1704_charger *isp)
+{
+ int ret = 0;
+ u8 r;
+
+ /* Reset the transceiver */
+ r = otg_io_read(isp->otg, ULPI_FUNC_CTRL);
+ r |= ULPI_FUNC_CTRL_RESET;
+ otg_io_write(isp->otg, ULPI_FUNC_CTRL, r);
+ usleep_range(1000, 2000);
+
+ /* Set normal mode */
+ r &= ~(ULPI_FUNC_CTRL_RESET | ULPI_FUNC_CTRL_OPMODE_MASK);
+ otg_io_write(isp->otg, ULPI_FUNC_CTRL, r);
+
+ /* Clear the DP and DM pull-down bits */
+ r = ULPI_OTG_CTRL_DP_PULLDOWN | ULPI_OTG_CTRL_DM_PULLDOWN;
+ otg_io_write(isp->otg, ULPI_CLR(ULPI_OTG_CTRL), r);
+
+ /* Enable strong pull-up on DP (1.5K) and reset */
+ r = ULPI_FUNC_CTRL_TERMSELECT | ULPI_FUNC_CTRL_RESET;
+ otg_io_write(isp->otg, ULPI_SET(ULPI_FUNC_CTRL), r);
+ usleep_range(1000, 2000);
+
+ /* Read the line state */
+ if (!otg_io_read(isp->otg, ULPI_DEBUG)) {
+ /* Disable strong pull-up on DP (1.5K) */
+ otg_io_write(isp->otg, ULPI_CLR(ULPI_FUNC_CTRL),
+ ULPI_FUNC_CTRL_TERMSELECT);
+ return 1;
+ }
+
+ /* Is it a charger or PS/2 connection */
+
+ /* Enable weak pull-up resistor on DP */
+ otg_io_write(isp->otg, ULPI_SET(ISP1704_PWR_CTRL),
+ ISP1704_PWR_CTRL_DP_WKPU_EN);
+
+ /* Disable strong pull-up on DP (1.5K) */
+ otg_io_write(isp->otg, ULPI_CLR(ULPI_FUNC_CTRL),
+ ULPI_FUNC_CTRL_TERMSELECT);
+
+ /* Enable weak pull-down resistor on DM */
+ otg_io_write(isp->otg, ULPI_SET(ULPI_OTG_CTRL),
+ ULPI_OTG_CTRL_DM_PULLDOWN);
+
+ /* It's a charger if the line states are clear */
+ if (!(otg_io_read(isp->otg, ULPI_DEBUG)))
+ ret = 1;
+
+ /* Disable weak pull-up resistor on DP */
+ otg_io_write(isp->otg, ULPI_CLR(ISP1704_PWR_CTRL),
+ ISP1704_PWR_CTRL_DP_WKPU_EN);
+
+ return ret;
+}
+
+static inline int isp1704_charger_detect(struct isp1704_charger *isp)
+{
+ unsigned long timeout;
+ u8 r;
+ int ret = 0;
+
+ /* set SW control bit in PWR_CTRL register */
+ otg_io_write(isp->otg, ISP1704_PWR_CTRL,
+ ISP1704_PWR_CTRL_SWCTRL);
+
+ /* enable manual charger detection */
+ r = (ISP1704_PWR_CTRL_SWCTRL | ISP1704_PWR_CTRL_DPVSRC_EN);
+ otg_io_write(isp->otg, ULPI_SET(ISP1704_PWR_CTRL), r);
+ usleep_range(1000, 2000);
+
+ timeout = jiffies + msecs_to_jiffies(300);
+ do {
+ /* Check if there is a charger */
+ if (otg_io_read(isp->otg, ISP1704_PWR_CTRL)
+ & ISP1704_PWR_CTRL_VDAT_DET) {
+ ret = isp1704_charger_verify(isp);
+ break;
+ }
+ } while (!time_after(jiffies, timeout));
+
+ return ret;
+}
+
+static void isp1704_charger_work(struct work_struct *data)
+{
+ int detect;
+ struct isp1704_charger *isp =
+ container_of(data, struct isp1704_charger, work);
+
+ /*
+ * FIXME Only supporting dedicated chargers even though isp1704 can
+ * detect HUB and HOST chargers. If the device has already been
+ * enumerated, the detection will break the connection.
+ */
+ if (isp->otg->state != OTG_STATE_B_IDLE)
+ return;
+
+ /* disable data pullups */
+ if (isp->otg->gadget)
+ usb_gadget_disconnect(isp->otg->gadget);
+
+ /* detect charger */
+ detect = isp1704_charger_detect(isp);
+ if (detect) {
+ isp->present = detect;
+ power_supply_changed(&isp->psy);
+ }
+
+ /* enable data pullups */
+ if (isp->otg->gadget)
+ usb_gadget_connect(isp->otg->gadget);
+}
+
+static int isp1704_notifier_call(struct notifier_block *nb,
+ unsigned long event, void *unused)
+{
+ struct isp1704_charger *isp =
+ container_of(nb, struct isp1704_charger, nb);
+
+ switch (event) {
+ case USB_EVENT_VBUS:
+ schedule_work(&isp->work);
+ break;
+ case USB_EVENT_NONE:
+ if (isp->present) {
+ isp->present = 0;
+ power_supply_changed(&isp->psy);
+ }
+ break;
+ default:
+ return NOTIFY_DONE;
+ }
+
+ return NOTIFY_OK;
+}
+
+static int isp1704_charger_get_property(struct power_supply *psy,
+ enum power_supply_property psp,
+ union power_supply_propval *val)
+{
+ struct isp1704_charger *isp =
+ container_of(psy, struct isp1704_charger, psy);
+
+ switch (psp) {
+ case POWER_SUPPLY_PROP_PRESENT:
+ val->intval = isp->present;
+ break;
+ case POWER_SUPPLY_PROP_MODEL_NAME:
+ val->strval = isp->model;
+ break;
+ case POWER_SUPPLY_PROP_MANUFACTURER:
+ val->strval = "NXP";
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static enum power_supply_property power_props[] = {
+ POWER_SUPPLY_PROP_PRESENT,
+ POWER_SUPPLY_PROP_MODEL_NAME,
+ POWER_SUPPLY_PROP_MANUFACTURER,
+};
+
+static inline int isp1704_test_ulpi(struct isp1704_charger *isp)
+{
+ int vendor;
+ int product;
+ int i;
+ int ret = -ENODEV;
+
+ /* Test ULPI interface */
+ ret = otg_io_write(isp->otg, ULPI_SCRATCH, 0xaa);
+ if (ret < 0)
+ return ret;
+
+ ret = otg_io_read(isp->otg, ULPI_SCRATCH);
+ if (ret < 0)
+ return ret;
+
+ if (ret != 0xaa)
+ return -ENODEV;
+
+ /* Verify the product and vendor id matches */
+ vendor = otg_io_read(isp->otg, ULPI_VENDOR_ID_LOW);
+ vendor |= otg_io_read(isp->otg, ULPI_VENDOR_ID_HIGH) << 8;
+ if (vendor != NXP_VENDOR_ID)
+ return -ENODEV;
+
+ product = otg_io_read(isp->otg, ULPI_PRODUCT_ID_LOW);
+ product |= otg_io_read(isp->otg, ULPI_PRODUCT_ID_HIGH) << 8;
+
+ for (i = 0; i < ARRAY_SIZE(isp170x_id); i++) {
+ if (product == isp170x_id[i]) {
+ sprintf(isp->model, "isp%x", product);
+ return product;
+ }
+ }
+
+ dev_err(isp->dev, "product id %x not matching known ids", product);
+
+ return -ENODEV;
+}
+
+static int __devinit isp1704_charger_probe(struct platform_device *pdev)
+{
+ struct isp1704_charger *isp;
+ int ret = -ENODEV;
+
+ isp = kzalloc(sizeof *isp, GFP_KERNEL);
+ if (!isp)
+ return -ENOMEM;
+
+ isp->otg = otg_get_transceiver();
+ if (!isp->otg)
+ goto fail0;
+
+ ret = isp1704_test_ulpi(isp);
+ if (ret < 0)
+ goto fail1;
+
+ isp->dev = &pdev->dev;
+ platform_set_drvdata(pdev, isp);
+
+ isp->psy.name = "isp1704";
+ isp->psy.type = POWER_SUPPLY_TYPE_USB;
+ isp->psy.properties = power_props;
+ isp->psy.num_properties = ARRAY_SIZE(power_props);
+ isp->psy.get_property = isp1704_charger_get_property;
+
+ ret = power_supply_register(isp->dev, &isp->psy);
+ if (ret)
+ goto fail1;
+
+ /*
+ * REVISIT: using work in order to allow the otg notifications to be
+ * made atomically in the future.
+ */
+ INIT_WORK(&isp->work, isp1704_charger_work);
+
+ isp->nb.notifier_call = isp1704_notifier_call;
+
+ ret = otg_register_notifier(isp->otg, &isp->nb);
+ if (ret)
+ goto fail2;
+
+ dev_info(isp->dev, "registered with product id %s\n", isp->model);
+
+ return 0;
+fail2:
+ power_supply_unregister(&isp->psy);
+fail1:
+ otg_put_transceiver(isp->otg);
+fail0:
+ kfree(isp);
+
+ dev_err(&pdev->dev, "failed to register isp1704 with error %d\n", ret);
+
+ return ret;
+}
+
+static int __devexit isp1704_charger_remove(struct platform_device *pdev)
+{
+ struct isp1704_charger *isp = platform_get_drvdata(pdev);
+
+ otg_unregister_notifier(isp->otg, &isp->nb);
+ power_supply_unregister(&isp->psy);
+ otg_put_transceiver(isp->otg);
+ kfree(isp);
+
+ return 0;
+}
+
+static struct platform_driver isp1704_charger_driver = {
+ .driver = {
+ .name = "isp1704_charger",
+ },
+ .probe = isp1704_charger_probe,
+ .remove = __devexit_p(isp1704_charger_remove),
+};
+
+static int __init isp1704_charger_init(void)
+{
+ return platform_driver_register(&isp1704_charger_driver);
+}
+module_init(isp1704_charger_init);
+
+static void __exit isp1704_charger_exit(void)
+{
+ platform_driver_unregister(&isp1704_charger_driver);
+}
+module_exit(isp1704_charger_exit);
+
+MODULE_ALIAS("platform:isp1704_charger");
+MODULE_AUTHOR("Nokia Corporation");
+MODULE_DESCRIPTION("ISP170x USB Charger driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/power/jz4740-battery.c b/drivers/power/jz4740-battery.c
index 20c4b952e9bd..a8108a73593e 100644
--- a/drivers/power/jz4740-battery.c
+++ b/drivers/power/jz4740-battery.c
@@ -383,6 +383,7 @@ static int __devexit jz_battery_remove(struct platform_device *pdev)
iounmap(jz_battery->base);
release_mem_region(jz_battery->mem->start, resource_size(jz_battery->mem));
+ kfree(jz_battery);
return 0;
}
diff --git a/drivers/power/olpc_battery.c b/drivers/power/olpc_battery.c
index aafc1c506eda..5bc1dcf7785e 100644
--- a/drivers/power/olpc_battery.c
+++ b/drivers/power/olpc_battery.c
@@ -271,14 +271,14 @@ static int olpc_bat_get_property(struct power_supply *psy,
if (ret)
return ret;
- val->intval = (int)be16_to_cpu(ec_word) * 9760L / 32;
+ val->intval = (s16)be16_to_cpu(ec_word) * 9760L / 32;
break;
case POWER_SUPPLY_PROP_CURRENT_AVG:
ret = olpc_ec_cmd(EC_BAT_CURRENT, NULL, 0, (void *)&ec_word, 2);
if (ret)
return ret;
- val->intval = (int)be16_to_cpu(ec_word) * 15625L / 120;
+ val->intval = (s16)be16_to_cpu(ec_word) * 15625L / 120;
break;
case POWER_SUPPLY_PROP_CAPACITY:
ret = olpc_ec_cmd(EC_BAT_SOC, NULL, 0, &ec_byte, 1);
@@ -299,7 +299,7 @@ static int olpc_bat_get_property(struct power_supply *psy,
if (ret)
return ret;
- val->intval = (int)be16_to_cpu(ec_word) * 100 / 256;
+ val->intval = (s16)be16_to_cpu(ec_word) * 100 / 256;
break;
case POWER_SUPPLY_PROP_TEMP_AMBIENT:
ret = olpc_ec_cmd(EC_AMB_TEMP, NULL, 0, (void *)&ec_word, 2);
@@ -313,7 +313,7 @@ static int olpc_bat_get_property(struct power_supply *psy,
if (ret)
return ret;
- val->intval = (int)be16_to_cpu(ec_word) * 6250 / 15;
+ val->intval = (s16)be16_to_cpu(ec_word) * 6250 / 15;
break;
case POWER_SUPPLY_PROP_SERIAL_NUMBER:
ret = olpc_ec_cmd(EC_BAT_SERIAL, NULL, 0, (void *)&ser_buf, 8);
diff --git a/drivers/power/pcf50633-charger.c b/drivers/power/pcf50633-charger.c
index 066f994e6fe5..4fa52e1781a2 100644
--- a/drivers/power/pcf50633-charger.c
+++ b/drivers/power/pcf50633-charger.c
@@ -456,6 +456,7 @@ static int __devexit pcf50633_mbc_remove(struct platform_device *pdev)
for (i = 0; i < ARRAY_SIZE(mbc_irq_handlers); i++)
pcf50633_free_irq(mbc->pcf, mbc_irq_handlers[i]);
+ sysfs_remove_group(&pdev->dev.kobj, &mbc_attr_group);
power_supply_unregister(&mbc->usb);
power_supply_unregister(&mbc->adapter);
power_supply_unregister(&mbc->ac);
diff --git a/drivers/power/power_supply_sysfs.c b/drivers/power/power_supply_sysfs.c
index 9d30eeb8c810..cd1f90754a3a 100644
--- a/drivers/power/power_supply_sysfs.c
+++ b/drivers/power/power_supply_sysfs.c
@@ -42,7 +42,8 @@ static ssize_t power_supply_show_property(struct device *dev,
struct device_attribute *attr,
char *buf) {
static char *type_text[] = {
- "Battery", "UPS", "Mains", "USB"
+ "Battery", "UPS", "Mains", "USB",
+ "USB_DCP", "USB_CDP", "USB_ACA"
};
static char *status_text[] = {
"Unknown", "Charging", "Discharging", "Not charging", "Full"
@@ -138,6 +139,7 @@ static struct device_attribute power_supply_attrs[] = {
POWER_SUPPLY_ATTR(voltage_min_design),
POWER_SUPPLY_ATTR(voltage_now),
POWER_SUPPLY_ATTR(voltage_avg),
+ POWER_SUPPLY_ATTR(current_max),
POWER_SUPPLY_ATTR(current_now),
POWER_SUPPLY_ATTR(current_avg),
POWER_SUPPLY_ATTR(power_now),
diff --git a/drivers/power/twl4030_charger.c b/drivers/power/twl4030_charger.c
new file mode 100644
index 000000000000..ff1f42398a2e
--- /dev/null
+++ b/drivers/power/twl4030_charger.c
@@ -0,0 +1,565 @@
+/*
+ * TWL4030/TPS65950 BCI (Battery Charger Interface) driver
+ *
+ * Copyright (C) 2010 Gražvydas Ignotas <notasas@gmail.com>
+ *
+ * based on twl4030_bci_battery.c by TI
+ * Copyright (C) 2008 Texas Instruments, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/platform_device.h>
+#include <linux/interrupt.h>
+#include <linux/i2c/twl.h>
+#include <linux/power_supply.h>
+#include <linux/notifier.h>
+#include <linux/usb/otg.h>
+
+#define TWL4030_BCIMSTATEC 0x02
+#define TWL4030_BCIICHG 0x08
+#define TWL4030_BCIVAC 0x0a
+#define TWL4030_BCIVBUS 0x0c
+#define TWL4030_BCIMFSTS4 0x10
+#define TWL4030_BCICTL1 0x23
+
+#define TWL4030_BCIAUTOWEN BIT(5)
+#define TWL4030_CONFIG_DONE BIT(4)
+#define TWL4030_BCIAUTOUSB BIT(1)
+#define TWL4030_BCIAUTOAC BIT(0)
+#define TWL4030_CGAIN BIT(5)
+#define TWL4030_USBFASTMCHG BIT(2)
+#define TWL4030_STS_VBUS BIT(7)
+#define TWL4030_STS_USB_ID BIT(2)
+
+/* BCI interrupts */
+#define TWL4030_WOVF BIT(0) /* Watchdog overflow */
+#define TWL4030_TMOVF BIT(1) /* Timer overflow */
+#define TWL4030_ICHGHIGH BIT(2) /* Battery charge current high */
+#define TWL4030_ICHGLOW BIT(3) /* Battery cc. low / FSM state change */
+#define TWL4030_ICHGEOC BIT(4) /* Battery current end-of-charge */
+#define TWL4030_TBATOR2 BIT(5) /* Battery temperature out of range 2 */
+#define TWL4030_TBATOR1 BIT(6) /* Battery temperature out of range 1 */
+#define TWL4030_BATSTS BIT(7) /* Battery status */
+
+#define TWL4030_VBATLVL BIT(0) /* VBAT level */
+#define TWL4030_VBATOV BIT(1) /* VBAT overvoltage */
+#define TWL4030_VBUSOV BIT(2) /* VBUS overvoltage */
+#define TWL4030_ACCHGOV BIT(3) /* Ac charger overvoltage */
+
+#define TWL4030_MSTATEC_USB BIT(4)
+#define TWL4030_MSTATEC_AC BIT(5)
+#define TWL4030_MSTATEC_MASK 0x0f
+#define TWL4030_MSTATEC_QUICK1 0x02
+#define TWL4030_MSTATEC_QUICK7 0x07
+#define TWL4030_MSTATEC_COMPLETE1 0x0b
+#define TWL4030_MSTATEC_COMPLETE4 0x0e
+
+static bool allow_usb;
+module_param(allow_usb, bool, 1);
+MODULE_PARM_DESC(allow_usb, "Allow USB charge drawing default current");
+
+struct twl4030_bci {
+ struct device *dev;
+ struct power_supply ac;
+ struct power_supply usb;
+ struct otg_transceiver *transceiver;
+ struct notifier_block otg_nb;
+ int irq_chg;
+ int irq_bci;
+};
+
+/*
+ * clear and set bits on an given register on a given module
+ */
+static int twl4030_clear_set(u8 mod_no, u8 clear, u8 set, u8 reg)
+{
+ u8 val = 0;
+ int ret;
+
+ ret = twl_i2c_read_u8(mod_no, &val, reg);
+ if (ret)
+ return ret;
+
+ val &= ~clear;
+ val |= set;
+
+ return twl_i2c_write_u8(mod_no, val, reg);
+}
+
+static int twl4030_bci_read(u8 reg, u8 *val)
+{
+ return twl_i2c_read_u8(TWL4030_MODULE_MAIN_CHARGE, val, reg);
+}
+
+static int twl4030_clear_set_boot_bci(u8 clear, u8 set)
+{
+ return twl4030_clear_set(TWL4030_MODULE_PM_MASTER, 0,
+ TWL4030_CONFIG_DONE | TWL4030_BCIAUTOWEN | set,
+ TWL4030_PM_MASTER_BOOT_BCI);
+}
+
+static int twl4030bci_read_adc_val(u8 reg)
+{
+ int ret, temp;
+ u8 val;
+
+ /* read MSB */
+ ret = twl4030_bci_read(reg + 1, &val);
+ if (ret)
+ return ret;
+
+ temp = (int)(val & 0x03) << 8;
+
+ /* read LSB */
+ ret = twl4030_bci_read(reg, &val);
+ if (ret)
+ return ret;
+
+ return temp | val;
+}
+
+/*
+ * Check if VBUS power is present
+ */
+static int twl4030_bci_have_vbus(struct twl4030_bci *bci)
+{
+ int ret;
+ u8 hwsts;
+
+ ret = twl_i2c_read_u8(TWL4030_MODULE_PM_MASTER, &hwsts,
+ TWL4030_PM_MASTER_STS_HW_CONDITIONS);
+ if (ret < 0)
+ return 0;
+
+ dev_dbg(bci->dev, "check_vbus: HW_CONDITIONS %02x\n", hwsts);
+
+ /* in case we also have STS_USB_ID, VBUS is driven by TWL itself */
+ if ((hwsts & TWL4030_STS_VBUS) && !(hwsts & TWL4030_STS_USB_ID))
+ return 1;
+
+ return 0;
+}
+
+/*
+ * Enable/Disable USB Charge funtionality.
+ */
+static int twl4030_charger_enable_usb(struct twl4030_bci *bci, bool enable)
+{
+ int ret;
+
+ if (enable) {
+ /* Check for USB charger conneted */
+ if (!twl4030_bci_have_vbus(bci))
+ return -ENODEV;
+
+ /*
+ * Until we can find out what current the device can provide,
+ * require a module param to enable USB charging.
+ */
+ if (!allow_usb) {
+ dev_warn(bci->dev, "USB charging is disabled.\n");
+ return -EACCES;
+ }
+
+ /* forcing the field BCIAUTOUSB (BOOT_BCI[1]) to 1 */
+ ret = twl4030_clear_set_boot_bci(0, TWL4030_BCIAUTOUSB);
+ if (ret < 0)
+ return ret;
+
+ /* forcing USBFASTMCHG(BCIMFSTS4[2]) to 1 */
+ ret = twl4030_clear_set(TWL4030_MODULE_MAIN_CHARGE, 0,
+ TWL4030_USBFASTMCHG, TWL4030_BCIMFSTS4);
+ } else {
+ ret = twl4030_clear_set_boot_bci(TWL4030_BCIAUTOUSB, 0);
+ }
+
+ return ret;
+}
+
+/*
+ * Enable/Disable AC Charge funtionality.
+ */
+static int twl4030_charger_enable_ac(bool enable)
+{
+ int ret;
+
+ if (enable)
+ ret = twl4030_clear_set_boot_bci(0, TWL4030_BCIAUTOAC);
+ else
+ ret = twl4030_clear_set_boot_bci(TWL4030_BCIAUTOAC, 0);
+
+ return ret;
+}
+
+/*
+ * TWL4030 CHG_PRES (AC charger presence) events
+ */
+static irqreturn_t twl4030_charger_interrupt(int irq, void *arg)
+{
+ struct twl4030_bci *bci = arg;
+
+ dev_dbg(bci->dev, "CHG_PRES irq\n");
+ power_supply_changed(&bci->ac);
+ power_supply_changed(&bci->usb);
+
+ return IRQ_HANDLED;
+}
+
+/*
+ * TWL4030 BCI monitoring events
+ */
+static irqreturn_t twl4030_bci_interrupt(int irq, void *arg)
+{
+ struct twl4030_bci *bci = arg;
+ u8 irqs1, irqs2;
+ int ret;
+
+ ret = twl_i2c_read_u8(TWL4030_MODULE_INTERRUPTS, &irqs1,
+ TWL4030_INTERRUPTS_BCIISR1A);
+ if (ret < 0)
+ return IRQ_HANDLED;
+
+ ret = twl_i2c_read_u8(TWL4030_MODULE_INTERRUPTS, &irqs2,
+ TWL4030_INTERRUPTS_BCIISR2A);
+ if (ret < 0)
+ return IRQ_HANDLED;
+
+ dev_dbg(bci->dev, "BCI irq %02x %02x\n", irqs2, irqs1);
+
+ if (irqs1 & (TWL4030_ICHGLOW | TWL4030_ICHGEOC)) {
+ /* charger state change, inform the core */
+ power_supply_changed(&bci->ac);
+ power_supply_changed(&bci->usb);
+ }
+
+ /* various monitoring events, for now we just log them here */
+ if (irqs1 & (TWL4030_TBATOR2 | TWL4030_TBATOR1))
+ dev_warn(bci->dev, "battery temperature out of range\n");
+
+ if (irqs1 & TWL4030_BATSTS)
+ dev_crit(bci->dev, "battery disconnected\n");
+
+ if (irqs2 & TWL4030_VBATOV)
+ dev_crit(bci->dev, "VBAT overvoltage\n");
+
+ if (irqs2 & TWL4030_VBUSOV)
+ dev_crit(bci->dev, "VBUS overvoltage\n");
+
+ if (irqs2 & TWL4030_ACCHGOV)
+ dev_crit(bci->dev, "Ac charger overvoltage\n");
+
+ return IRQ_HANDLED;
+}
+
+static int twl4030_bci_usb_ncb(struct notifier_block *nb, unsigned long val,
+ void *priv)
+{
+ struct twl4030_bci *bci = container_of(nb, struct twl4030_bci, otg_nb);
+
+ dev_dbg(bci->dev, "OTG notify %lu\n", val);
+
+ switch (val) {
+ case USB_EVENT_VBUS:
+ case USB_EVENT_CHARGER:
+ twl4030_charger_enable_usb(bci, true);
+ break;
+ case USB_EVENT_NONE:
+ twl4030_charger_enable_usb(bci, false);
+ break;
+ }
+
+ return NOTIFY_OK;
+}
+
+/*
+ * TI provided formulas:
+ * CGAIN == 0: ICHG = (BCIICHG * 1.7) / (2^10 - 1) - 0.85
+ * CGAIN == 1: ICHG = (BCIICHG * 3.4) / (2^10 - 1) - 1.7
+ * Here we use integer approximation of:
+ * CGAIN == 0: val * 1.6618 - 0.85
+ * CGAIN == 1: (val * 1.6618 - 0.85) * 2
+ */
+static int twl4030_charger_get_current(void)
+{
+ int curr;
+ int ret;
+ u8 bcictl1;
+
+ curr = twl4030bci_read_adc_val(TWL4030_BCIICHG);
+ if (curr < 0)
+ return curr;
+
+ ret = twl4030_bci_read(TWL4030_BCICTL1, &bcictl1);
+ if (ret)
+ return ret;
+
+ ret = (curr * 16618 - 850 * 10000) / 10;
+ if (bcictl1 & TWL4030_CGAIN)
+ ret *= 2;
+
+ return ret;
+}
+
+/*
+ * Returns the main charge FSM state
+ * Or < 0 on failure.
+ */
+static int twl4030bci_state(struct twl4030_bci *bci)
+{
+ int ret;
+ u8 state;
+
+ ret = twl4030_bci_read(TWL4030_BCIMSTATEC, &state);
+ if (ret) {
+ pr_err("twl4030_bci: error reading BCIMSTATEC\n");
+ return ret;
+ }
+
+ dev_dbg(bci->dev, "state: %02x\n", state);
+
+ return state;
+}
+
+static int twl4030_bci_state_to_status(int state)
+{
+ state &= TWL4030_MSTATEC_MASK;
+ if (TWL4030_MSTATEC_QUICK1 <= state && state <= TWL4030_MSTATEC_QUICK7)
+ return POWER_SUPPLY_STATUS_CHARGING;
+ else if (TWL4030_MSTATEC_COMPLETE1 <= state &&
+ state <= TWL4030_MSTATEC_COMPLETE4)
+ return POWER_SUPPLY_STATUS_FULL;
+ else
+ return POWER_SUPPLY_STATUS_NOT_CHARGING;
+}
+
+static int twl4030_bci_get_property(struct power_supply *psy,
+ enum power_supply_property psp,
+ union power_supply_propval *val)
+{
+ struct twl4030_bci *bci = dev_get_drvdata(psy->dev->parent);
+ int is_charging;
+ int state;
+ int ret;
+
+ state = twl4030bci_state(bci);
+ if (state < 0)
+ return state;
+
+ if (psy->type == POWER_SUPPLY_TYPE_USB)
+ is_charging = state & TWL4030_MSTATEC_USB;
+ else
+ is_charging = state & TWL4030_MSTATEC_AC;
+
+ switch (psp) {
+ case POWER_SUPPLY_PROP_STATUS:
+ if (is_charging)
+ val->intval = twl4030_bci_state_to_status(state);
+ else
+ val->intval = POWER_SUPPLY_STATUS_NOT_CHARGING;
+ break;
+ case POWER_SUPPLY_PROP_VOLTAGE_NOW:
+ /* charging must be active for meaningful result */
+ if (!is_charging)
+ return -ENODATA;
+ if (psy->type == POWER_SUPPLY_TYPE_USB) {
+ ret = twl4030bci_read_adc_val(TWL4030_BCIVBUS);
+ if (ret < 0)
+ return ret;
+ /* BCIVBUS uses ADCIN8, 7/1023 V/step */
+ val->intval = ret * 6843;
+ } else {
+ ret = twl4030bci_read_adc_val(TWL4030_BCIVAC);
+ if (ret < 0)
+ return ret;
+ /* BCIVAC uses ADCIN11, 10/1023 V/step */
+ val->intval = ret * 9775;
+ }
+ break;
+ case POWER_SUPPLY_PROP_CURRENT_NOW:
+ if (!is_charging)
+ return -ENODATA;
+ /* current measurement is shared between AC and USB */
+ ret = twl4030_charger_get_current();
+ if (ret < 0)
+ return ret;
+ val->intval = ret;
+ break;
+ case POWER_SUPPLY_PROP_ONLINE:
+ val->intval = is_charging &&
+ twl4030_bci_state_to_status(state) !=
+ POWER_SUPPLY_STATUS_NOT_CHARGING;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static enum power_supply_property twl4030_charger_props[] = {
+ POWER_SUPPLY_PROP_STATUS,
+ POWER_SUPPLY_PROP_ONLINE,
+ POWER_SUPPLY_PROP_VOLTAGE_NOW,
+ POWER_SUPPLY_PROP_CURRENT_NOW,
+};
+
+static int __init twl4030_bci_probe(struct platform_device *pdev)
+{
+ struct twl4030_bci *bci;
+ int ret;
+ int reg;
+
+ bci = kzalloc(sizeof(*bci), GFP_KERNEL);
+ if (bci == NULL)
+ return -ENOMEM;
+
+ bci->dev = &pdev->dev;
+ bci->irq_chg = platform_get_irq(pdev, 0);
+ bci->irq_bci = platform_get_irq(pdev, 1);
+
+ platform_set_drvdata(pdev, bci);
+
+ bci->ac.name = "twl4030_ac";
+ bci->ac.type = POWER_SUPPLY_TYPE_MAINS;
+ bci->ac.properties = twl4030_charger_props;
+ bci->ac.num_properties = ARRAY_SIZE(twl4030_charger_props);
+ bci->ac.get_property = twl4030_bci_get_property;
+
+ ret = power_supply_register(&pdev->dev, &bci->ac);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to register ac: %d\n", ret);
+ goto fail_register_ac;
+ }
+
+ bci->usb.name = "twl4030_usb";
+ bci->usb.type = POWER_SUPPLY_TYPE_USB;
+ bci->usb.properties = twl4030_charger_props;
+ bci->usb.num_properties = ARRAY_SIZE(twl4030_charger_props);
+ bci->usb.get_property = twl4030_bci_get_property;
+
+ ret = power_supply_register(&pdev->dev, &bci->usb);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to register usb: %d\n", ret);
+ goto fail_register_usb;
+ }
+
+ ret = request_threaded_irq(bci->irq_chg, NULL,
+ twl4030_charger_interrupt, 0, pdev->name, bci);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "could not request irq %d, status %d\n",
+ bci->irq_chg, ret);
+ goto fail_chg_irq;
+ }
+
+ ret = request_threaded_irq(bci->irq_bci, NULL,
+ twl4030_bci_interrupt, 0, pdev->name, bci);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "could not request irq %d, status %d\n",
+ bci->irq_bci, ret);
+ goto fail_bci_irq;
+ }
+
+ bci->transceiver = otg_get_transceiver();
+ if (bci->transceiver != NULL) {
+ bci->otg_nb.notifier_call = twl4030_bci_usb_ncb;
+ otg_register_notifier(bci->transceiver, &bci->otg_nb);
+ }
+
+ /* Enable interrupts now. */
+ reg = ~(TWL4030_ICHGLOW | TWL4030_ICHGEOC | TWL4030_TBATOR2 |
+ TWL4030_TBATOR1 | TWL4030_BATSTS);
+ ret = twl_i2c_write_u8(TWL4030_MODULE_INTERRUPTS, reg,
+ TWL4030_INTERRUPTS_BCIIMR1A);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "failed to unmask interrupts: %d\n", ret);
+ goto fail_unmask_interrupts;
+ }
+
+ reg = ~(TWL4030_VBATOV | TWL4030_VBUSOV | TWL4030_ACCHGOV);
+ ret = twl_i2c_write_u8(TWL4030_MODULE_INTERRUPTS, reg,
+ TWL4030_INTERRUPTS_BCIIMR2A);
+ if (ret < 0)
+ dev_warn(&pdev->dev, "failed to unmask interrupts: %d\n", ret);
+
+ twl4030_charger_enable_ac(true);
+ twl4030_charger_enable_usb(bci, true);
+
+ return 0;
+
+fail_unmask_interrupts:
+ if (bci->transceiver != NULL) {
+ otg_unregister_notifier(bci->transceiver, &bci->otg_nb);
+ otg_put_transceiver(bci->transceiver);
+ }
+ free_irq(bci->irq_bci, bci);
+fail_bci_irq:
+ free_irq(bci->irq_chg, bci);
+fail_chg_irq:
+ power_supply_unregister(&bci->usb);
+fail_register_usb:
+ power_supply_unregister(&bci->ac);
+fail_register_ac:
+ platform_set_drvdata(pdev, NULL);
+ kfree(bci);
+
+ return ret;
+}
+
+static int __exit twl4030_bci_remove(struct platform_device *pdev)
+{
+ struct twl4030_bci *bci = platform_get_drvdata(pdev);
+
+ twl4030_charger_enable_ac(false);
+ twl4030_charger_enable_usb(bci, false);
+
+ /* mask interrupts */
+ twl_i2c_write_u8(TWL4030_MODULE_INTERRUPTS, 0xff,
+ TWL4030_INTERRUPTS_BCIIMR1A);
+ twl_i2c_write_u8(TWL4030_MODULE_INTERRUPTS, 0xff,
+ TWL4030_INTERRUPTS_BCIIMR2A);
+
+ if (bci->transceiver != NULL) {
+ otg_unregister_notifier(bci->transceiver, &bci->otg_nb);
+ otg_put_transceiver(bci->transceiver);
+ }
+ free_irq(bci->irq_bci, bci);
+ free_irq(bci->irq_chg, bci);
+ power_supply_unregister(&bci->usb);
+ power_supply_unregister(&bci->ac);
+ platform_set_drvdata(pdev, NULL);
+ kfree(bci);
+
+ return 0;
+}
+
+static struct platform_driver twl4030_bci_driver = {
+ .driver = {
+ .name = "twl4030_bci",
+ .owner = THIS_MODULE,
+ },
+ .remove = __exit_p(twl4030_bci_remove),
+};
+
+static int __init twl4030_bci_init(void)
+{
+ return platform_driver_probe(&twl4030_bci_driver, twl4030_bci_probe);
+}
+module_init(twl4030_bci_init);
+
+static void __exit twl4030_bci_exit(void)
+{
+ platform_driver_unregister(&twl4030_bci_driver);
+}
+module_exit(twl4030_bci_exit);
+
+MODULE_AUTHOR("Gražydas Ignotas");
+MODULE_DESCRIPTION("TWL4030 Battery Charger Interface driver");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:twl4030_bci");
diff --git a/drivers/power/wm831x_power.c b/drivers/power/wm831x_power.c
index fbcc36dae470..ddf8cf5f3204 100644
--- a/drivers/power/wm831x_power.c
+++ b/drivers/power/wm831x_power.c
@@ -267,7 +267,6 @@ static void wm831x_config_battery(struct wm831x *wm831x)
ret = wm831x_set_bits(wm831x, WM831X_CHARGER_CONTROL_1,
WM831X_CHG_ENA_MASK |
WM831X_CHG_FAST_MASK |
- WM831X_CHG_ITERM_MASK |
WM831X_CHG_ITERM_MASK,
reg1);
if (ret != 0)
@@ -612,6 +611,7 @@ static __devexit int wm831x_power_remove(struct platform_device *pdev)
power_supply_unregister(&wm831x_power->battery);
power_supply_unregister(&wm831x_power->wall);
power_supply_unregister(&wm831x_power->usb);
+ kfree(wm831x_power);
return 0;
}
diff --git a/drivers/rapidio/rio-driver.c b/drivers/rapidio/rio-driver.c
index 3222fa3c808c..0f4a53bdaa3c 100644
--- a/drivers/rapidio/rio-driver.c
+++ b/drivers/rapidio/rio-driver.c
@@ -192,7 +192,7 @@ static int rio_match_bus(struct device *dev, struct device_driver *drv)
out:return 0;
}
-static struct device rio_bus = {
+struct device rio_bus = {
.init_name = "rapidio",
};
diff --git a/drivers/rapidio/rio-scan.c b/drivers/rapidio/rio-scan.c
index 8070e074c739..1eb82c4c712e 100644
--- a/drivers/rapidio/rio-scan.c
+++ b/drivers/rapidio/rio-scan.c
@@ -48,7 +48,7 @@ DEFINE_SPINLOCK(rio_global_list_lock);
static int next_destid = 0;
static int next_switchid = 0;
static int next_net = 0;
-static int next_comptag;
+static int next_comptag = 1;
static struct timer_list rio_enum_timer =
TIMER_INITIALIZER(rio_enum_timeout, 0, 0);
@@ -121,27 +121,6 @@ static int rio_clear_locks(struct rio_mport *port)
u32 result;
int ret = 0;
- /* Assign component tag to all devices */
- next_comptag = 1;
- rio_local_write_config_32(port, RIO_COMPONENT_TAG_CSR, next_comptag++);
-
- list_for_each_entry(rdev, &rio_devices, global_list) {
- /* Mark device as discovered */
- rio_read_config_32(rdev,
- rdev->phys_efptr + RIO_PORT_GEN_CTL_CSR,
- &result);
- rio_write_config_32(rdev,
- rdev->phys_efptr + RIO_PORT_GEN_CTL_CSR,
- result | RIO_PORT_GEN_DISCOVERED);
-
- rio_write_config_32(rdev, RIO_COMPONENT_TAG_CSR, next_comptag);
- rdev->comp_tag = next_comptag++;
- if (next_comptag >= 0x10000) {
- pr_err("RIO: Component Tag Counter Overflow\n");
- break;
- }
- }
-
/* Release host device id locks */
rio_local_write_config_32(port, RIO_HOST_DID_LOCK_CSR,
port->host_deviceid);
@@ -162,6 +141,15 @@ static int rio_clear_locks(struct rio_mport *port)
rdev->vid, rdev->did);
ret = -EINVAL;
}
+
+ /* Mark device as discovered and enable master */
+ rio_read_config_32(rdev,
+ rdev->phys_efptr + RIO_PORT_GEN_CTL_CSR,
+ &result);
+ result |= RIO_PORT_GEN_DISCOVERED | RIO_PORT_GEN_MASTER;
+ rio_write_config_32(rdev,
+ rdev->phys_efptr + RIO_PORT_GEN_CTL_CSR,
+ result);
}
return ret;
@@ -420,11 +408,27 @@ static struct rio_dev __devinit *rio_setup_device(struct rio_net *net,
hopcount, RIO_EFB_ERR_MGMNT);
}
+ if (rdev->pef & (RIO_PEF_SWITCH | RIO_PEF_MULTIPORT)) {
+ rio_mport_read_config_32(port, destid, hopcount,
+ RIO_SWP_INFO_CAR, &rdev->swpinfo);
+ }
+
rio_mport_read_config_32(port, destid, hopcount, RIO_SRC_OPS_CAR,
&rdev->src_ops);
rio_mport_read_config_32(port, destid, hopcount, RIO_DST_OPS_CAR,
&rdev->dst_ops);
+ if (do_enum) {
+ /* Assign component tag to device */
+ if (next_comptag >= 0x10000) {
+ pr_err("RIO: Component Tag Counter Overflow\n");
+ goto cleanup;
+ }
+ rio_mport_write_config_32(port, destid, hopcount,
+ RIO_COMPONENT_TAG_CSR, next_comptag);
+ rdev->comp_tag = next_comptag++;
+ }
+
if (rio_device_has_destid(port, rdev->src_ops, rdev->dst_ops)) {
if (do_enum) {
rio_set_device_id(port, destid, hopcount, next_destid);
@@ -439,9 +443,10 @@ static struct rio_dev __devinit *rio_setup_device(struct rio_net *net,
/* If a PE has both switch and other functions, show it as a switch */
if (rio_is_switch(rdev)) {
- rio_mport_read_config_32(port, destid, hopcount,
- RIO_SWP_INFO_CAR, &rdev->swpinfo);
- rswitch = kzalloc(sizeof(struct rio_switch), GFP_KERNEL);
+ rswitch = kzalloc(sizeof(*rswitch) +
+ RIO_GET_TOTAL_PORTS(rdev->swpinfo) *
+ sizeof(rswitch->nextdev[0]),
+ GFP_KERNEL);
if (!rswitch)
goto cleanup;
rswitch->switchid = next_switchid;
@@ -458,6 +463,7 @@ static struct rio_dev __devinit *rio_setup_device(struct rio_net *net,
rdid++)
rswitch->route_table[rdid] = RIO_INVALID_ROUTE;
rdev->rswitch = rswitch;
+ rswitch->rdev = rdev;
dev_set_name(&rdev->dev, "%02x:s:%04x", rdev->net->id,
rdev->rswitch->switchid);
rio_switch_init(rdev, do_enum);
@@ -478,6 +484,7 @@ static struct rio_dev __devinit *rio_setup_device(struct rio_net *net,
}
rdev->dev.bus = &rio_bus_type;
+ rdev->dev.parent = &rio_bus;
device_initialize(&rdev->dev);
rdev->dev.release = rio_release_dev;
@@ -718,86 +725,53 @@ static u16 rio_get_host_deviceid_lock(struct rio_mport *port, u8 hopcount)
}
/**
- * rio_get_swpinfo_inport- Gets the ingress port number
- * @mport: Master port to send transaction
- * @destid: Destination ID associated with the switch
- * @hopcount: Number of hops to the device
- *
- * Returns port number being used to access the switch device.
- */
-static u8
-rio_get_swpinfo_inport(struct rio_mport *mport, u16 destid, u8 hopcount)
-{
- u32 result;
-
- rio_mport_read_config_32(mport, destid, hopcount, RIO_SWP_INFO_CAR,
- &result);
-
- return (u8) (result & 0xff);
-}
-
-/**
- * rio_get_swpinfo_tports- Gets total number of ports on the switch
- * @mport: Master port to send transaction
- * @destid: Destination ID associated with the switch
- * @hopcount: Number of hops to the device
- *
- * Returns total numbers of ports implemented by the switch device.
- */
-static u8 rio_get_swpinfo_tports(struct rio_mport *mport, u16 destid,
- u8 hopcount)
-{
- u32 result;
-
- rio_mport_read_config_32(mport, destid, hopcount, RIO_SWP_INFO_CAR,
- &result);
-
- return RIO_GET_TOTAL_PORTS(result);
-}
-
-/**
- * rio_net_add_mport- Add a master port to a RIO network
- * @net: RIO network
- * @port: Master port to add
- *
- * Adds a master port to the network list of associated master
- * ports..
- */
-static void rio_net_add_mport(struct rio_net *net, struct rio_mport *port)
-{
- spin_lock(&rio_global_list_lock);
- list_add_tail(&port->nnode, &net->mports);
- spin_unlock(&rio_global_list_lock);
-}
-
-/**
* rio_enum_peer- Recursively enumerate a RIO network through a master port
* @net: RIO network being enumerated
* @port: Master port to send transactions
* @hopcount: Number of hops into the network
+ * @prev: Previous RIO device connected to the enumerated one
+ * @prev_port: Port on previous RIO device
*
* Recursively enumerates a RIO network. Transactions are sent via the
* master port passed in @port.
*/
static int __devinit rio_enum_peer(struct rio_net *net, struct rio_mport *port,
- u8 hopcount)
+ u8 hopcount, struct rio_dev *prev, int prev_port)
{
int port_num;
- int num_ports;
int cur_destid;
int sw_destid;
int sw_inport;
struct rio_dev *rdev;
u16 destid;
+ u32 regval;
int tmp;
+ if (rio_mport_chk_dev_access(port,
+ RIO_ANY_DESTID(port->sys_size), hopcount)) {
+ pr_debug("RIO: device access check failed\n");
+ return -1;
+ }
+
if (rio_get_host_deviceid_lock(port, hopcount) == port->host_deviceid) {
pr_debug("RIO: PE already discovered by this host\n");
/*
* Already discovered by this host. Add it as another
- * master port for the current network.
+ * link to the existing device.
*/
- rio_net_add_mport(net, port);
+ rio_mport_read_config_32(port, RIO_ANY_DESTID(port->sys_size),
+ hopcount, RIO_COMPONENT_TAG_CSR, &regval);
+
+ if (regval) {
+ rdev = rio_get_comptag((regval & 0xffff), NULL);
+
+ if (rdev && prev && rio_is_switch(prev)) {
+ pr_debug("RIO: redundant path to %s\n",
+ rio_name(rdev));
+ prev->rswitch->nextdev[prev_port] = rdev;
+ }
+ }
+
return 0;
}
@@ -828,13 +802,15 @@ static int __devinit rio_enum_peer(struct rio_net *net, struct rio_mport *port,
if (rdev) {
/* Add device to the global and bus/net specific list. */
list_add_tail(&rdev->net_list, &net->devices);
+ rdev->prev = prev;
+ if (prev && rio_is_switch(prev))
+ prev->rswitch->nextdev[prev_port] = rdev;
} else
return -1;
if (rio_is_switch(rdev)) {
next_switchid++;
- sw_inport = rio_get_swpinfo_inport(port,
- RIO_ANY_DESTID(port->sys_size), hopcount);
+ sw_inport = RIO_GET_PORT_NUM(rdev->swpinfo);
rio_route_add_entry(port, rdev->rswitch, RIO_GLOBAL_TABLE,
port->host_deviceid, sw_inport, 0);
rdev->rswitch->route_table[port->host_deviceid] = sw_inport;
@@ -847,14 +823,14 @@ static int __devinit rio_enum_peer(struct rio_net *net, struct rio_mport *port,
rdev->rswitch->route_table[destid] = sw_inport;
}
- num_ports =
- rio_get_swpinfo_tports(port, RIO_ANY_DESTID(port->sys_size),
- hopcount);
pr_debug(
"RIO: found %s (vid %4.4x did %4.4x) with %d ports\n",
- rio_name(rdev), rdev->vid, rdev->did, num_ports);
+ rio_name(rdev), rdev->vid, rdev->did,
+ RIO_GET_TOTAL_PORTS(rdev->swpinfo));
sw_destid = next_destid;
- for (port_num = 0; port_num < num_ports; port_num++) {
+ for (port_num = 0;
+ port_num < RIO_GET_TOTAL_PORTS(rdev->swpinfo);
+ port_num++) {
/*Enable Input Output Port (transmitter reviever)*/
rio_enable_rx_tx_port(port, 0,
RIO_ANY_DESTID(port->sys_size),
@@ -879,7 +855,8 @@ static int __devinit rio_enum_peer(struct rio_net *net, struct rio_mport *port,
RIO_ANY_DESTID(port->sys_size),
port_num, 0);
- if (rio_enum_peer(net, port, hopcount + 1) < 0)
+ if (rio_enum_peer(net, port, hopcount + 1,
+ rdev, port_num) < 0)
return -1;
/* Update routing tables */
@@ -945,10 +922,11 @@ static int __devinit rio_enum_peer(struct rio_net *net, struct rio_mport *port,
*/
static int rio_enum_complete(struct rio_mport *port)
{
- u32 tag_csr;
+ u32 regval;
- rio_local_read_config_32(port, RIO_COMPONENT_TAG_CSR, &tag_csr);
- return (tag_csr & 0xffff) ? 1 : 0;
+ rio_local_read_config_32(port, port->phys_efptr + RIO_PORT_GEN_CTL_CSR,
+ &regval);
+ return (regval & RIO_PORT_GEN_MASTER) ? 1 : 0;
}
/**
@@ -966,7 +944,6 @@ rio_disc_peer(struct rio_net *net, struct rio_mport *port, u16 destid,
u8 hopcount)
{
u8 port_num, route_port;
- int num_ports;
struct rio_dev *rdev;
u16 ndestid;
@@ -983,13 +960,14 @@ rio_disc_peer(struct rio_net *net, struct rio_mport *port, u16 destid,
/* Associated destid is how we accessed this switch */
rdev->rswitch->destid = destid;
- num_ports = rio_get_swpinfo_tports(port, destid, hopcount);
pr_debug(
"RIO: found %s (vid %4.4x did %4.4x) with %d ports\n",
- rio_name(rdev), rdev->vid, rdev->did, num_ports);
- for (port_num = 0; port_num < num_ports; port_num++) {
- if (rio_get_swpinfo_inport(port, destid, hopcount) ==
- port_num)
+ rio_name(rdev), rdev->vid, rdev->did,
+ RIO_GET_TOTAL_PORTS(rdev->swpinfo));
+ for (port_num = 0;
+ port_num < RIO_GET_TOTAL_PORTS(rdev->swpinfo);
+ port_num++) {
+ if (RIO_GET_PORT_NUM(rdev->swpinfo) == port_num)
continue;
if (rio_sport_is_active
@@ -1011,6 +989,8 @@ rio_disc_peer(struct rio_net *net, struct rio_mport *port, u16 destid,
break;
}
+ if (ndestid == RIO_ANY_DESTID(port->sys_size))
+ continue;
rio_unlock_device(port, destid, hopcount);
if (rio_disc_peer
(net, port, ndestid, hopcount + 1) < 0)
@@ -1108,8 +1088,7 @@ static void rio_update_route_tables(struct rio_mport *port)
if (rswitch->destid == destid)
continue;
- sport = rio_get_swpinfo_inport(port,
- rswitch->destid, rswitch->hopcount);
+ sport = RIO_GET_PORT_NUM(rswitch->rdev->swpinfo);
if (rswitch->add_entry) {
rio_route_add_entry(port, rswitch,
@@ -1184,7 +1163,11 @@ int __devinit rio_enum_mport(struct rio_mport *mport)
/* Enable Input Output Port (transmitter reviever) */
rio_enable_rx_tx_port(mport, 1, 0, 0, 0);
- if (rio_enum_peer(net, mport, 0) < 0) {
+ /* Set component tag for host */
+ rio_local_write_config_32(mport, RIO_COMPONENT_TAG_CSR,
+ next_comptag++);
+
+ if (rio_enum_peer(net, mport, 0, NULL, 0) < 0) {
/* A higher priority host won enumeration, bail. */
printk(KERN_INFO
"RIO: master port %d device has lost enumeration to a remote host\n",
diff --git a/drivers/rapidio/rio-sysfs.c b/drivers/rapidio/rio-sysfs.c
index 00b475658356..137ed93ee33f 100644
--- a/drivers/rapidio/rio-sysfs.c
+++ b/drivers/rapidio/rio-sysfs.c
@@ -40,9 +40,6 @@ static ssize_t routes_show(struct device *dev, struct device_attribute *attr, ch
char *str = buf;
int i;
- if (!rdev->rswitch)
- goto out;
-
for (i = 0; i < RIO_MAX_ROUTE_ENTRIES(rdev->net->hport->sys_size);
i++) {
if (rdev->rswitch->route_table[i] == RIO_INVALID_ROUTE)
@@ -52,7 +49,6 @@ static ssize_t routes_show(struct device *dev, struct device_attribute *attr, ch
rdev->rswitch->route_table[i]);
}
- out:
return (str - buf);
}
@@ -63,10 +59,11 @@ struct device_attribute rio_dev_attrs[] = {
__ATTR_RO(asm_did),
__ATTR_RO(asm_vid),
__ATTR_RO(asm_rev),
- __ATTR_RO(routes),
__ATTR_NULL,
};
+static DEVICE_ATTR(routes, S_IRUGO, routes_show, NULL);
+
static ssize_t
rio_read_config(struct file *filp, struct kobject *kobj,
struct bin_attribute *bin_attr,
@@ -218,7 +215,17 @@ int rio_create_sysfs_dev_files(struct rio_dev *rdev)
{
int err = 0;
- err = sysfs_create_bin_file(&rdev->dev.kobj, &rio_config_attr);
+ err = device_create_bin_file(&rdev->dev, &rio_config_attr);
+
+ if (!err && rdev->rswitch) {
+ err = device_create_file(&rdev->dev, &dev_attr_routes);
+ if (!err && rdev->rswitch->sw_sysfs)
+ err = rdev->rswitch->sw_sysfs(rdev, RIO_SW_SYSFS_CREATE);
+ }
+
+ if (err)
+ pr_warning("RIO: Failed to create attribute file(s) for %s\n",
+ rio_name(rdev));
return err;
}
@@ -231,5 +238,10 @@ int rio_create_sysfs_dev_files(struct rio_dev *rdev)
*/
void rio_remove_sysfs_dev_files(struct rio_dev *rdev)
{
- sysfs_remove_bin_file(&rdev->dev.kobj, &rio_config_attr);
+ device_remove_bin_file(&rdev->dev, &rio_config_attr);
+ if (rdev->rswitch) {
+ device_remove_file(&rdev->dev, &dev_attr_routes);
+ if (rdev->rswitch->sw_sysfs)
+ rdev->rswitch->sw_sysfs(rdev, RIO_SW_SYSFS_REMOVE);
+ }
}
diff --git a/drivers/rapidio/rio.c b/drivers/rapidio/rio.c
index 74e9d22d95fb..7b5080c45569 100644
--- a/drivers/rapidio/rio.c
+++ b/drivers/rapidio/rio.c
@@ -443,7 +443,7 @@ rio_mport_get_physefb(struct rio_mport *port, int local,
* @from is not %NULL, searches continue from next device on the global
* list.
*/
-static struct rio_dev *rio_get_comptag(u32 comp_tag, struct rio_dev *from)
+struct rio_dev *rio_get_comptag(u32 comp_tag, struct rio_dev *from)
{
struct list_head *n;
struct rio_dev *rdev;
@@ -495,6 +495,232 @@ int rio_set_port_lockout(struct rio_dev *rdev, u32 pnum, int lock)
}
/**
+ * rio_chk_dev_route - Validate route to the specified device.
+ * @rdev: RIO device failed to respond
+ * @nrdev: Last active device on the route to rdev
+ * @npnum: nrdev's port number on the route to rdev
+ *
+ * Follows a route to the specified RIO device to determine the last available
+ * device (and corresponding RIO port) on the route.
+ */
+static int
+rio_chk_dev_route(struct rio_dev *rdev, struct rio_dev **nrdev, int *npnum)
+{
+ u32 result;
+ int p_port, dstid, rc = -EIO;
+ struct rio_dev *prev = NULL;
+
+ /* Find switch with failed RIO link */
+ while (rdev->prev && (rdev->prev->pef & RIO_PEF_SWITCH)) {
+ if (!rio_read_config_32(rdev->prev, RIO_DEV_ID_CAR, &result)) {
+ prev = rdev->prev;
+ break;
+ }
+ rdev = rdev->prev;
+ }
+
+ if (prev == NULL)
+ goto err_out;
+
+ dstid = (rdev->pef & RIO_PEF_SWITCH) ?
+ rdev->rswitch->destid : rdev->destid;
+ p_port = prev->rswitch->route_table[dstid];
+
+ if (p_port != RIO_INVALID_ROUTE) {
+ pr_debug("RIO: link failed on [%s]-P%d\n",
+ rio_name(prev), p_port);
+ *nrdev = prev;
+ *npnum = p_port;
+ rc = 0;
+ } else
+ pr_debug("RIO: failed to trace route to %s\n", rio_name(rdev));
+err_out:
+ return rc;
+}
+
+/**
+ * rio_mport_chk_dev_access - Validate access to the specified device.
+ * @mport: Master port to send transactions
+ * @destid: Device destination ID in network
+ * @hopcount: Number of hops into the network
+ */
+int
+rio_mport_chk_dev_access(struct rio_mport *mport, u16 destid, u8 hopcount)
+{
+ int i = 0;
+ u32 tmp;
+
+ while (rio_mport_read_config_32(mport, destid, hopcount,
+ RIO_DEV_ID_CAR, &tmp)) {
+ i++;
+ if (i == RIO_MAX_CHK_RETRY)
+ return -EIO;
+ mdelay(1);
+ }
+
+ return 0;
+}
+
+/**
+ * rio_chk_dev_access - Validate access to the specified device.
+ * @rdev: Pointer to RIO device control structure
+ */
+static int rio_chk_dev_access(struct rio_dev *rdev)
+{
+ u8 hopcount = 0xff;
+ u16 destid = rdev->destid;
+
+ if (rdev->rswitch) {
+ destid = rdev->rswitch->destid;
+ hopcount = rdev->rswitch->hopcount;
+ }
+
+ return rio_mport_chk_dev_access(rdev->net->hport, destid, hopcount);
+}
+
+/**
+ * rio_get_input_status - Sends a Link-Request/Input-Status control symbol and
+ * returns link-response (if requested).
+ * @rdev: RIO devive to issue Input-status command
+ * @pnum: Device port number to issue the command
+ * @lnkresp: Response from a link partner
+ */
+static int
+rio_get_input_status(struct rio_dev *rdev, int pnum, u32 *lnkresp)
+{
+ struct rio_mport *mport = rdev->net->hport;
+ u16 destid = rdev->rswitch->destid;
+ u8 hopcount = rdev->rswitch->hopcount;
+ u32 regval;
+ int checkcount;
+
+ if (lnkresp) {
+ /* Read from link maintenance response register
+ * to clear valid bit */
+ rio_mport_read_config_32(mport, destid, hopcount,
+ rdev->phys_efptr + RIO_PORT_N_MNT_RSP_CSR(pnum),
+ &regval);
+ udelay(50);
+ }
+
+ /* Issue Input-status command */
+ rio_mport_write_config_32(mport, destid, hopcount,
+ rdev->phys_efptr + RIO_PORT_N_MNT_REQ_CSR(pnum),
+ RIO_MNT_REQ_CMD_IS);
+
+ /* Exit if the response is not expected */
+ if (lnkresp == NULL)
+ return 0;
+
+ checkcount = 3;
+ while (checkcount--) {
+ udelay(50);
+ rio_mport_read_config_32(mport, destid, hopcount,
+ rdev->phys_efptr + RIO_PORT_N_MNT_RSP_CSR(pnum),
+ &regval);
+ if (regval & RIO_PORT_N_MNT_RSP_RVAL) {
+ *lnkresp = regval;
+ return 0;
+ }
+ }
+
+ return -EIO;
+}
+
+/**
+ * rio_clr_err_stopped - Clears port Error-stopped states.
+ * @rdev: Pointer to RIO device control structure
+ * @pnum: Switch port number to clear errors
+ * @err_status: port error status (if 0 reads register from device)
+ */
+static int rio_clr_err_stopped(struct rio_dev *rdev, u32 pnum, u32 err_status)
+{
+ struct rio_mport *mport = rdev->net->hport;
+ u16 destid = rdev->rswitch->destid;
+ u8 hopcount = rdev->rswitch->hopcount;
+ struct rio_dev *nextdev = rdev->rswitch->nextdev[pnum];
+ u32 regval;
+ u32 far_ackid, far_linkstat, near_ackid;
+
+ if (err_status == 0)
+ rio_mport_read_config_32(mport, destid, hopcount,
+ rdev->phys_efptr + RIO_PORT_N_ERR_STS_CSR(pnum),
+ &err_status);
+
+ if (err_status & RIO_PORT_N_ERR_STS_PW_OUT_ES) {
+ pr_debug("RIO_EM: servicing Output Error-Stopped state\n");
+ /*
+ * Send a Link-Request/Input-Status control symbol
+ */
+ if (rio_get_input_status(rdev, pnum, &regval)) {
+ pr_debug("RIO_EM: Input-status response timeout\n");
+ goto rd_err;
+ }
+
+ pr_debug("RIO_EM: SP%d Input-status response=0x%08x\n",
+ pnum, regval);
+ far_ackid = (regval & RIO_PORT_N_MNT_RSP_ASTAT) >> 5;
+ far_linkstat = regval & RIO_PORT_N_MNT_RSP_LSTAT;
+ rio_mport_read_config_32(mport, destid, hopcount,
+ rdev->phys_efptr + RIO_PORT_N_ACK_STS_CSR(pnum),
+ &regval);
+ pr_debug("RIO_EM: SP%d_ACK_STS_CSR=0x%08x\n", pnum, regval);
+ near_ackid = (regval & RIO_PORT_N_ACK_INBOUND) >> 24;
+ pr_debug("RIO_EM: SP%d far_ackID=0x%02x far_linkstat=0x%02x" \
+ " near_ackID=0x%02x\n",
+ pnum, far_ackid, far_linkstat, near_ackid);
+
+ /*
+ * If required, synchronize ackIDs of near and
+ * far sides.
+ */
+ if ((far_ackid != ((regval & RIO_PORT_N_ACK_OUTSTAND) >> 8)) ||
+ (far_ackid != (regval & RIO_PORT_N_ACK_OUTBOUND))) {
+ /* Align near outstanding/outbound ackIDs with
+ * far inbound.
+ */
+ rio_mport_write_config_32(mport, destid,
+ hopcount, rdev->phys_efptr +
+ RIO_PORT_N_ACK_STS_CSR(pnum),
+ (near_ackid << 24) |
+ (far_ackid << 8) | far_ackid);
+ /* Align far outstanding/outbound ackIDs with
+ * near inbound.
+ */
+ far_ackid++;
+ if (nextdev)
+ rio_write_config_32(nextdev,
+ nextdev->phys_efptr +
+ RIO_PORT_N_ACK_STS_CSR(RIO_GET_PORT_NUM(nextdev->swpinfo)),
+ (far_ackid << 24) |
+ (near_ackid << 8) | near_ackid);
+ else
+ pr_debug("RIO_EM: Invalid nextdev pointer (NULL)\n");
+ }
+rd_err:
+ rio_mport_read_config_32(mport, destid, hopcount,
+ rdev->phys_efptr + RIO_PORT_N_ERR_STS_CSR(pnum),
+ &err_status);
+ pr_debug("RIO_EM: SP%d_ERR_STS_CSR=0x%08x\n", pnum, err_status);
+ }
+
+ if ((err_status & RIO_PORT_N_ERR_STS_PW_INP_ES) && nextdev) {
+ pr_debug("RIO_EM: servicing Input Error-Stopped state\n");
+ rio_get_input_status(nextdev,
+ RIO_GET_PORT_NUM(nextdev->swpinfo), NULL);
+ udelay(50);
+
+ rio_mport_read_config_32(mport, destid, hopcount,
+ rdev->phys_efptr + RIO_PORT_N_ERR_STS_CSR(pnum),
+ &err_status);
+ pr_debug("RIO_EM: SP%d_ERR_STS_CSR=0x%08x\n", pnum, err_status);
+ }
+
+ return (err_status & (RIO_PORT_N_ERR_STS_PW_OUT_ES |
+ RIO_PORT_N_ERR_STS_PW_INP_ES)) ? 1 : 0;
+}
+
+/**
* rio_inb_pwrite_handler - process inbound port-write message
* @pw_msg: pointer to inbound port-write message
*
@@ -507,13 +733,13 @@ int rio_inb_pwrite_handler(union rio_pw_msg *pw_msg)
struct rio_mport *mport;
u8 hopcount;
u16 destid;
- u32 err_status;
+ u32 err_status, em_perrdet, em_ltlerrdet;
int rc, portnum;
rdev = rio_get_comptag(pw_msg->em.comptag, NULL);
if (rdev == NULL) {
- /* Someting bad here (probably enumeration error) */
- pr_err("RIO: %s No matching device for CTag 0x%08x\n",
+ /* Device removed or enumeration error */
+ pr_debug("RIO: %s No matching device for CTag 0x%08x\n",
__func__, pw_msg->em.comptag);
return -EIO;
}
@@ -524,12 +750,11 @@ int rio_inb_pwrite_handler(union rio_pw_msg *pw_msg)
{
u32 i;
for (i = 0; i < RIO_PW_MSG_SIZE/sizeof(u32);) {
- pr_debug("0x%02x: %08x %08x %08x %08x",
+ pr_debug("0x%02x: %08x %08x %08x %08x\n",
i*4, pw_msg->raw[i], pw_msg->raw[i + 1],
pw_msg->raw[i + 2], pw_msg->raw[i + 3]);
i += 4;
}
- pr_debug("\n");
}
#endif
@@ -545,6 +770,26 @@ int rio_inb_pwrite_handler(union rio_pw_msg *pw_msg)
return 0;
}
+ portnum = pw_msg->em.is_port & 0xFF;
+
+ /* Check if device and route to it are functional:
+ * Sometimes devices may send PW message(s) just before being
+ * powered down (or link being lost).
+ */
+ if (rio_chk_dev_access(rdev)) {
+ pr_debug("RIO: device access failed - get link partner\n");
+ /* Scan route to the device and identify failed link.
+ * This will replace device and port reported in PW message.
+ * PW message should not be used after this point.
+ */
+ if (rio_chk_dev_route(rdev, &rdev, &portnum)) {
+ pr_err("RIO: Route trace for %s failed\n",
+ rio_name(rdev));
+ return -EIO;
+ }
+ pw_msg = NULL;
+ }
+
/* For End-point devices processing stops here */
if (!(rdev->pef & RIO_PEF_SWITCH))
return 0;
@@ -562,9 +807,6 @@ int rio_inb_pwrite_handler(union rio_pw_msg *pw_msg)
/*
* Process the port-write notification from switch
*/
-
- portnum = pw_msg->em.is_port & 0xFF;
-
if (rdev->rswitch->em_handle)
rdev->rswitch->em_handle(rdev, portnum);
@@ -573,29 +815,28 @@ int rio_inb_pwrite_handler(union rio_pw_msg *pw_msg)
&err_status);
pr_debug("RIO_PW: SP%d_ERR_STS_CSR=0x%08x\n", portnum, err_status);
- if (pw_msg->em.errdetect) {
- pr_debug("RIO_PW: RIO_EM_P%d_ERR_DETECT=0x%08x\n",
- portnum, pw_msg->em.errdetect);
- /* Clear EM Port N Error Detect CSR */
- rio_mport_write_config_32(mport, destid, hopcount,
- rdev->em_efptr + RIO_EM_PN_ERR_DETECT(portnum), 0);
- }
+ if (err_status & RIO_PORT_N_ERR_STS_PORT_OK) {
- if (pw_msg->em.ltlerrdet) {
- pr_debug("RIO_PW: RIO_EM_LTL_ERR_DETECT=0x%08x\n",
- pw_msg->em.ltlerrdet);
- /* Clear EM L/T Layer Error Detect CSR */
- rio_mport_write_config_32(mport, destid, hopcount,
- rdev->em_efptr + RIO_EM_LTL_ERR_DETECT, 0);
- }
+ if (!(rdev->rswitch->port_ok & (1 << portnum))) {
+ rdev->rswitch->port_ok |= (1 << portnum);
+ rio_set_port_lockout(rdev, portnum, 0);
+ /* Schedule Insertion Service */
+ pr_debug("RIO_PW: Device Insertion on [%s]-P%d\n",
+ rio_name(rdev), portnum);
+ }
- /* Clear Port Errors */
- rio_mport_write_config_32(mport, destid, hopcount,
- rdev->phys_efptr + RIO_PORT_N_ERR_STS_CSR(portnum),
- err_status & RIO_PORT_N_ERR_STS_CLR_MASK);
+ /* Clear error-stopped states (if reported).
+ * Depending on the link partner state, two attempts
+ * may be needed for successful recovery.
+ */
+ if (err_status & (RIO_PORT_N_ERR_STS_PW_OUT_ES |
+ RIO_PORT_N_ERR_STS_PW_INP_ES)) {
+ if (rio_clr_err_stopped(rdev, portnum, err_status))
+ rio_clr_err_stopped(rdev, portnum, 0);
+ }
+ } else { /* if (err_status & RIO_PORT_N_ERR_STS_PORT_UNINIT) */
- if (rdev->rswitch->port_ok & (1 << portnum)) {
- if (err_status & RIO_PORT_N_ERR_STS_PORT_UNINIT) {
+ if (rdev->rswitch->port_ok & (1 << portnum)) {
rdev->rswitch->port_ok &= ~(1 << portnum);
rio_set_port_lockout(rdev, portnum, 1);
@@ -608,21 +849,32 @@ int rio_inb_pwrite_handler(union rio_pw_msg *pw_msg)
pr_debug("RIO_PW: Device Extraction on [%s]-P%d\n",
rio_name(rdev), portnum);
}
- } else {
- if (err_status & RIO_PORT_N_ERR_STS_PORT_OK) {
- rdev->rswitch->port_ok |= (1 << portnum);
- rio_set_port_lockout(rdev, portnum, 0);
+ }
- /* Schedule Insertion Service */
- pr_debug("RIO_PW: Device Insertion on [%s]-P%d\n",
- rio_name(rdev), portnum);
- }
+ rio_mport_read_config_32(mport, destid, hopcount,
+ rdev->em_efptr + RIO_EM_PN_ERR_DETECT(portnum), &em_perrdet);
+ if (em_perrdet) {
+ pr_debug("RIO_PW: RIO_EM_P%d_ERR_DETECT=0x%08x\n",
+ portnum, em_perrdet);
+ /* Clear EM Port N Error Detect CSR */
+ rio_mport_write_config_32(mport, destid, hopcount,
+ rdev->em_efptr + RIO_EM_PN_ERR_DETECT(portnum), 0);
+ }
+
+ rio_mport_read_config_32(mport, destid, hopcount,
+ rdev->em_efptr + RIO_EM_LTL_ERR_DETECT, &em_ltlerrdet);
+ if (em_ltlerrdet) {
+ pr_debug("RIO_PW: RIO_EM_LTL_ERR_DETECT=0x%08x\n",
+ em_ltlerrdet);
+ /* Clear EM L/T Layer Error Detect CSR */
+ rio_mport_write_config_32(mport, destid, hopcount,
+ rdev->em_efptr + RIO_EM_LTL_ERR_DETECT, 0);
}
- /* Clear Port-Write Pending bit */
+ /* Clear remaining error bits and Port-Write Pending bit */
rio_mport_write_config_32(mport, destid, hopcount,
rdev->phys_efptr + RIO_PORT_N_ERR_STS_CSR(portnum),
- RIO_PORT_N_ERR_STS_PW_PEND);
+ err_status);
return 0;
}
@@ -907,11 +1159,11 @@ int __devinit rio_init_mports(void)
list_for_each_entry(port, &rio_mports, node) {
if (!request_mem_region(port->iores.start,
- port->iores.end - port->iores.start,
+ resource_size(&port->iores),
port->name)) {
printk(KERN_ERR
"RIO: Error requesting master port region 0x%016llx-0x%016llx\n",
- (u64)port->iores.start, (u64)port->iores.end - 1);
+ (u64)port->iores.start, (u64)port->iores.end);
rc = -ENOMEM;
goto out;
}
diff --git a/drivers/rapidio/rio.h b/drivers/rapidio/rio.h
index f27b7a9c47d2..b1af414f15e6 100644
--- a/drivers/rapidio/rio.h
+++ b/drivers/rapidio/rio.h
@@ -14,6 +14,8 @@
#include <linux/list.h>
#include <linux/rio.h>
+#define RIO_MAX_CHK_RETRY 3
+
/* Functions internal to the RIO core code */
extern u32 rio_mport_get_feature(struct rio_mport *mport, int local, u16 destid,
@@ -22,6 +24,8 @@ extern u32 rio_mport_get_physefb(struct rio_mport *port, int local,
u16 destid, u8 hopcount);
extern u32 rio_mport_get_efb(struct rio_mport *port, int local, u16 destid,
u8 hopcount, u32 from);
+extern int rio_mport_chk_dev_access(struct rio_mport *mport, u16 destid,
+ u8 hopcount);
extern int rio_create_sysfs_dev_files(struct rio_dev *rdev);
extern int rio_enum_mport(struct rio_mport *mport);
extern int rio_disc_mport(struct rio_mport *mport);
@@ -34,6 +38,7 @@ extern int rio_std_route_get_entry(struct rio_mport *mport, u16 destid,
extern int rio_std_route_clr_table(struct rio_mport *mport, u16 destid,
u8 hopcount, u16 table);
extern int rio_set_port_lockout(struct rio_dev *rdev, u32 pnum, int lock);
+extern struct rio_dev *rio_get_comptag(u32 comp_tag, struct rio_dev *from);
/* Structures internal to the RIO core code */
extern struct device_attribute rio_dev_attrs[];
diff --git a/drivers/rapidio/switches/Kconfig b/drivers/rapidio/switches/Kconfig
index 2b4e9b2b6631..f47fee5d4563 100644
--- a/drivers/rapidio/switches/Kconfig
+++ b/drivers/rapidio/switches/Kconfig
@@ -20,6 +20,13 @@ config RAPIDIO_TSI568
---help---
Includes support for IDT Tsi568 serial RapidIO switch.
+config RAPIDIO_CPS_GEN2
+ bool "IDT CPS Gen.2 SRIO switch support"
+ depends on RAPIDIO
+ default n
+ ---help---
+ Includes support for ITD CPS Gen.2 serial RapidIO switches.
+
config RAPIDIO_TSI500
bool "Tsi500 Parallel RapidIO switch support"
depends on RAPIDIO
diff --git a/drivers/rapidio/switches/Makefile b/drivers/rapidio/switches/Makefile
index fe4adc3e8d5f..48d67a6b98c8 100644
--- a/drivers/rapidio/switches/Makefile
+++ b/drivers/rapidio/switches/Makefile
@@ -6,6 +6,7 @@ obj-$(CONFIG_RAPIDIO_TSI57X) += tsi57x.o
obj-$(CONFIG_RAPIDIO_CPS_XX) += idtcps.o
obj-$(CONFIG_RAPIDIO_TSI568) += tsi568.o
obj-$(CONFIG_RAPIDIO_TSI500) += tsi500.o
+obj-$(CONFIG_RAPIDIO_CPS_GEN2) += idt_gen2.o
ifeq ($(CONFIG_RAPIDIO_DEBUG),y)
EXTRA_CFLAGS += -DDEBUG
diff --git a/drivers/rapidio/switches/idt_gen2.c b/drivers/rapidio/switches/idt_gen2.c
new file mode 100644
index 000000000000..0bb871cb5c40
--- /dev/null
+++ b/drivers/rapidio/switches/idt_gen2.c
@@ -0,0 +1,447 @@
+/*
+ * IDT CPS Gen.2 Serial RapidIO switch family support
+ *
+ * Copyright 2010 Integrated Device Technology, Inc.
+ * Alexandre Bounine <alexandre.bounine@idt.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#include <linux/rio.h>
+#include <linux/rio_drv.h>
+#include <linux/rio_ids.h>
+#include <linux/delay.h>
+#include "../rio.h"
+
+#define LOCAL_RTE_CONF_DESTID_SEL 0x010070
+#define LOCAL_RTE_CONF_DESTID_SEL_PSEL 0x0000001f
+
+#define IDT_LT_ERR_REPORT_EN 0x03100c
+
+#define IDT_PORT_ERR_REPORT_EN(n) (0x031044 + (n)*0x40)
+#define IDT_PORT_ERR_REPORT_EN_BC 0x03ff04
+
+#define IDT_PORT_ISERR_REPORT_EN(n) (0x03104C + (n)*0x40)
+#define IDT_PORT_ISERR_REPORT_EN_BC 0x03ff0c
+#define IDT_PORT_INIT_TX_ACQUIRED 0x00000020
+
+#define IDT_LANE_ERR_REPORT_EN(n) (0x038010 + (n)*0x100)
+#define IDT_LANE_ERR_REPORT_EN_BC 0x03ff10
+
+#define IDT_DEV_CTRL_1 0xf2000c
+#define IDT_DEV_CTRL_1_GENPW 0x02000000
+#define IDT_DEV_CTRL_1_PRSTBEH 0x00000001
+
+#define IDT_CFGBLK_ERR_CAPTURE_EN 0x020008
+#define IDT_CFGBLK_ERR_REPORT 0xf20014
+#define IDT_CFGBLK_ERR_REPORT_GENPW 0x00000002
+
+#define IDT_AUX_PORT_ERR_CAP_EN 0x020000
+#define IDT_AUX_ERR_REPORT_EN 0xf20018
+#define IDT_AUX_PORT_ERR_LOG_I2C 0x00000002
+#define IDT_AUX_PORT_ERR_LOG_JTAG 0x00000001
+
+#define IDT_ISLTL_ADDRESS_CAP 0x021014
+
+#define IDT_RIO_DOMAIN 0xf20020
+#define IDT_RIO_DOMAIN_MASK 0x000000ff
+
+#define IDT_PW_INFO_CSR 0xf20024
+
+#define IDT_SOFT_RESET 0xf20040
+#define IDT_SOFT_RESET_REQ 0x00030097
+
+#define IDT_I2C_MCTRL 0xf20050
+#define IDT_I2C_MCTRL_GENPW 0x04000000
+
+#define IDT_JTAG_CTRL 0xf2005c
+#define IDT_JTAG_CTRL_GENPW 0x00000002
+
+#define IDT_LANE_CTRL(n) (0xff8000 + (n)*0x100)
+#define IDT_LANE_CTRL_BC 0xffff00
+#define IDT_LANE_CTRL_GENPW 0x00200000
+#define IDT_LANE_DFE_1_BC 0xffff18
+#define IDT_LANE_DFE_2_BC 0xffff1c
+
+#define IDT_PORT_OPS(n) (0xf40004 + (n)*0x100)
+#define IDT_PORT_OPS_GENPW 0x08000000
+#define IDT_PORT_OPS_PL_ELOG 0x00000040
+#define IDT_PORT_OPS_LL_ELOG 0x00000020
+#define IDT_PORT_OPS_LT_ELOG 0x00000010
+#define IDT_PORT_OPS_BC 0xf4ff04
+
+#define IDT_PORT_ISERR_DET(n) (0xf40008 + (n)*0x100)
+
+#define IDT_ERR_CAP 0xfd0000
+#define IDT_ERR_CAP_LOG_OVERWR 0x00000004
+
+#define IDT_ERR_RD 0xfd0004
+
+#define IDT_DEFAULT_ROUTE 0xde
+#define IDT_NO_ROUTE 0xdf
+
+static int
+idtg2_route_add_entry(struct rio_mport *mport, u16 destid, u8 hopcount,
+ u16 table, u16 route_destid, u8 route_port)
+{
+ /*
+ * Select routing table to update
+ */
+ if (table == RIO_GLOBAL_TABLE)
+ table = 0;
+ else
+ table++;
+
+ rio_mport_write_config_32(mport, destid, hopcount,
+ LOCAL_RTE_CONF_DESTID_SEL, table);
+
+ /*
+ * Program destination port for the specified destID
+ */
+ rio_mport_write_config_32(mport, destid, hopcount,
+ RIO_STD_RTE_CONF_DESTID_SEL_CSR,
+ (u32)route_destid);
+
+ rio_mport_write_config_32(mport, destid, hopcount,
+ RIO_STD_RTE_CONF_PORT_SEL_CSR,
+ (u32)route_port);
+ udelay(10);
+
+ return 0;
+}
+
+static int
+idtg2_route_get_entry(struct rio_mport *mport, u16 destid, u8 hopcount,
+ u16 table, u16 route_destid, u8 *route_port)
+{
+ u32 result;
+
+ /*
+ * Select routing table to read
+ */
+ if (table == RIO_GLOBAL_TABLE)
+ table = 0;
+ else
+ table++;
+
+ rio_mport_write_config_32(mport, destid, hopcount,
+ LOCAL_RTE_CONF_DESTID_SEL, table);
+
+ rio_mport_write_config_32(mport, destid, hopcount,
+ RIO_STD_RTE_CONF_DESTID_SEL_CSR,
+ route_destid);
+
+ rio_mport_read_config_32(mport, destid, hopcount,
+ RIO_STD_RTE_CONF_PORT_SEL_CSR, &result);
+
+ if (IDT_DEFAULT_ROUTE == (u8)result || IDT_NO_ROUTE == (u8)result)
+ *route_port = RIO_INVALID_ROUTE;
+ else
+ *route_port = (u8)result;
+
+ return 0;
+}
+
+static int
+idtg2_route_clr_table(struct rio_mport *mport, u16 destid, u8 hopcount,
+ u16 table)
+{
+ u32 i;
+
+ /*
+ * Select routing table to read
+ */
+ if (table == RIO_GLOBAL_TABLE)
+ table = 0;
+ else
+ table++;
+
+ rio_mport_write_config_32(mport, destid, hopcount,
+ LOCAL_RTE_CONF_DESTID_SEL, table);
+
+ for (i = RIO_STD_RTE_CONF_EXTCFGEN;
+ i <= (RIO_STD_RTE_CONF_EXTCFGEN | 0xff);) {
+ rio_mport_write_config_32(mport, destid, hopcount,
+ RIO_STD_RTE_CONF_DESTID_SEL_CSR, i);
+ rio_mport_write_config_32(mport, destid, hopcount,
+ RIO_STD_RTE_CONF_PORT_SEL_CSR,
+ (IDT_DEFAULT_ROUTE << 24) | (IDT_DEFAULT_ROUTE << 16) |
+ (IDT_DEFAULT_ROUTE << 8) | IDT_DEFAULT_ROUTE);
+ i += 4;
+ }
+
+ return 0;
+}
+
+
+static int
+idtg2_set_domain(struct rio_mport *mport, u16 destid, u8 hopcount,
+ u8 sw_domain)
+{
+ /*
+ * Switch domain configuration operates only at global level
+ */
+ rio_mport_write_config_32(mport, destid, hopcount,
+ IDT_RIO_DOMAIN, (u32)sw_domain);
+ return 0;
+}
+
+static int
+idtg2_get_domain(struct rio_mport *mport, u16 destid, u8 hopcount,
+ u8 *sw_domain)
+{
+ u32 regval;
+
+ /*
+ * Switch domain configuration operates only at global level
+ */
+ rio_mport_read_config_32(mport, destid, hopcount,
+ IDT_RIO_DOMAIN, &regval);
+
+ *sw_domain = (u8)(regval & 0xff);
+
+ return 0;
+}
+
+static int
+idtg2_em_init(struct rio_dev *rdev)
+{
+ struct rio_mport *mport = rdev->net->hport;
+ u16 destid = rdev->rswitch->destid;
+ u8 hopcount = rdev->rswitch->hopcount;
+ u32 regval;
+ int i, tmp;
+
+ /*
+ * This routine performs device-specific initialization only.
+ * All standard EM configuration should be performed at upper level.
+ */
+
+ pr_debug("RIO: %s [%d:%d]\n", __func__, destid, hopcount);
+
+ /* Set Port-Write info CSR: PRIO=3 and CRF=1 */
+ rio_mport_write_config_32(mport, destid, hopcount,
+ IDT_PW_INFO_CSR, 0x0000e000);
+
+ /*
+ * Configure LT LAYER error reporting.
+ */
+
+ /* Enable standard (RIO.p8) error reporting */
+ rio_mport_write_config_32(mport, destid, hopcount,
+ IDT_LT_ERR_REPORT_EN,
+ REM_LTL_ERR_ILLTRAN | REM_LTL_ERR_UNSOLR |
+ REM_LTL_ERR_UNSUPTR);
+
+ /* Use Port-Writes for LT layer error reporting.
+ * Enable per-port reset
+ */
+ rio_mport_read_config_32(mport, destid, hopcount,
+ IDT_DEV_CTRL_1, &regval);
+ rio_mport_write_config_32(mport, destid, hopcount,
+ IDT_DEV_CTRL_1,
+ regval | IDT_DEV_CTRL_1_GENPW | IDT_DEV_CTRL_1_PRSTBEH);
+
+ /*
+ * Configure PORT error reporting.
+ */
+
+ /* Report all RIO.p8 errors supported by device */
+ rio_mport_write_config_32(mport, destid, hopcount,
+ IDT_PORT_ERR_REPORT_EN_BC, 0x807e8037);
+
+ /* Configure reporting of implementation specific errors/events */
+ rio_mport_write_config_32(mport, destid, hopcount,
+ IDT_PORT_ISERR_REPORT_EN_BC, IDT_PORT_INIT_TX_ACQUIRED);
+
+ /* Use Port-Writes for port error reporting and enable error logging */
+ tmp = RIO_GET_TOTAL_PORTS(rdev->swpinfo);
+ for (i = 0; i < tmp; i++) {
+ rio_mport_read_config_32(mport, destid, hopcount,
+ IDT_PORT_OPS(i), &regval);
+ rio_mport_write_config_32(mport, destid, hopcount,
+ IDT_PORT_OPS(i), regval | IDT_PORT_OPS_GENPW |
+ IDT_PORT_OPS_PL_ELOG |
+ IDT_PORT_OPS_LL_ELOG |
+ IDT_PORT_OPS_LT_ELOG);
+ }
+ /* Overwrite error log if full */
+ rio_mport_write_config_32(mport, destid, hopcount,
+ IDT_ERR_CAP, IDT_ERR_CAP_LOG_OVERWR);
+
+ /*
+ * Configure LANE error reporting.
+ */
+
+ /* Disable line error reporting */
+ rio_mport_write_config_32(mport, destid, hopcount,
+ IDT_LANE_ERR_REPORT_EN_BC, 0);
+
+ /* Use Port-Writes for lane error reporting (when enabled)
+ * (do per-lane update because lanes may have different configuration)
+ */
+ tmp = (rdev->did == RIO_DID_IDTCPS1848) ? 48 : 16;
+ for (i = 0; i < tmp; i++) {
+ rio_mport_read_config_32(mport, destid, hopcount,
+ IDT_LANE_CTRL(i), &regval);
+ rio_mport_write_config_32(mport, destid, hopcount,
+ IDT_LANE_CTRL(i), regval | IDT_LANE_CTRL_GENPW);
+ }
+
+ /*
+ * Configure AUX error reporting.
+ */
+
+ /* Disable JTAG and I2C Error capture */
+ rio_mport_write_config_32(mport, destid, hopcount,
+ IDT_AUX_PORT_ERR_CAP_EN, 0);
+
+ /* Disable JTAG and I2C Error reporting/logging */
+ rio_mport_write_config_32(mport, destid, hopcount,
+ IDT_AUX_ERR_REPORT_EN, 0);
+
+ /* Disable Port-Write notification from JTAG */
+ rio_mport_write_config_32(mport, destid, hopcount,
+ IDT_JTAG_CTRL, 0);
+
+ /* Disable Port-Write notification from I2C */
+ rio_mport_read_config_32(mport, destid, hopcount,
+ IDT_I2C_MCTRL, &regval);
+ rio_mport_write_config_32(mport, destid, hopcount,
+ IDT_I2C_MCTRL,
+ regval & ~IDT_I2C_MCTRL_GENPW);
+
+ /*
+ * Configure CFG_BLK error reporting.
+ */
+
+ /* Disable Configuration Block error capture */
+ rio_mport_write_config_32(mport, destid, hopcount,
+ IDT_CFGBLK_ERR_CAPTURE_EN, 0);
+
+ /* Disable Port-Writes for Configuration Block error reporting */
+ rio_mport_read_config_32(mport, destid, hopcount,
+ IDT_CFGBLK_ERR_REPORT, &regval);
+ rio_mport_write_config_32(mport, destid, hopcount,
+ IDT_CFGBLK_ERR_REPORT,
+ regval & ~IDT_CFGBLK_ERR_REPORT_GENPW);
+
+ /* set TVAL = ~50us */
+ rio_mport_write_config_32(mport, destid, hopcount,
+ rdev->phys_efptr + RIO_PORT_LINKTO_CTL_CSR, 0x8e << 8);
+
+ return 0;
+}
+
+static int
+idtg2_em_handler(struct rio_dev *rdev, u8 portnum)
+{
+ struct rio_mport *mport = rdev->net->hport;
+ u16 destid = rdev->rswitch->destid;
+ u8 hopcount = rdev->rswitch->hopcount;
+ u32 regval, em_perrdet, em_ltlerrdet;
+
+ rio_mport_read_config_32(mport, destid, hopcount,
+ rdev->em_efptr + RIO_EM_LTL_ERR_DETECT, &em_ltlerrdet);
+ if (em_ltlerrdet) {
+ /* Service Logical/Transport Layer Error(s) */
+ if (em_ltlerrdet & REM_LTL_ERR_IMPSPEC) {
+ /* Implementation specific error reported */
+ rio_mport_read_config_32(mport, destid, hopcount,
+ IDT_ISLTL_ADDRESS_CAP, &regval);
+
+ pr_debug("RIO: %s Implementation Specific LTL errors" \
+ " 0x%x @(0x%x)\n",
+ rio_name(rdev), em_ltlerrdet, regval);
+
+ /* Clear implementation specific address capture CSR */
+ rio_mport_write_config_32(mport, destid, hopcount,
+ IDT_ISLTL_ADDRESS_CAP, 0);
+
+ }
+ }
+
+ rio_mport_read_config_32(mport, destid, hopcount,
+ rdev->em_efptr + RIO_EM_PN_ERR_DETECT(portnum), &em_perrdet);
+ if (em_perrdet) {
+ /* Service Port-Level Error(s) */
+ if (em_perrdet & REM_PED_IMPL_SPEC) {
+ /* Implementation Specific port error reported */
+
+ /* Get IS errors reported */
+ rio_mport_read_config_32(mport, destid, hopcount,
+ IDT_PORT_ISERR_DET(portnum), &regval);
+
+ pr_debug("RIO: %s Implementation Specific Port" \
+ " errors 0x%x\n", rio_name(rdev), regval);
+
+ /* Clear all implementation specific events */
+ rio_mport_write_config_32(mport, destid, hopcount,
+ IDT_PORT_ISERR_DET(portnum), 0);
+ }
+ }
+
+ return 0;
+}
+
+static ssize_t
+idtg2_show_errlog(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct rio_dev *rdev = to_rio_dev(dev);
+ struct rio_mport *mport = rdev->net->hport;
+ u16 destid = rdev->rswitch->destid;
+ u8 hopcount = rdev->rswitch->hopcount;
+ ssize_t len = 0;
+ u32 regval;
+
+ while (!rio_mport_read_config_32(mport, destid, hopcount,
+ IDT_ERR_RD, &regval)) {
+ if (!regval) /* 0 = end of log */
+ break;
+ len += snprintf(buf + len, PAGE_SIZE - len,
+ "%08x\n", regval);
+ if (len >= (PAGE_SIZE - 10))
+ break;
+ }
+
+ return len;
+}
+
+static DEVICE_ATTR(errlog, S_IRUGO, idtg2_show_errlog, NULL);
+
+static int idtg2_sysfs(struct rio_dev *rdev, int create)
+{
+ struct device *dev = &rdev->dev;
+ int err = 0;
+
+ if (create == RIO_SW_SYSFS_CREATE) {
+ /* Initialize sysfs entries */
+ err = device_create_file(dev, &dev_attr_errlog);
+ if (err)
+ dev_err(dev, "Unable create sysfs errlog file\n");
+ } else
+ device_remove_file(dev, &dev_attr_errlog);
+
+ return err;
+}
+
+static int idtg2_switch_init(struct rio_dev *rdev, int do_enum)
+{
+ pr_debug("RIO: %s for %s\n", __func__, rio_name(rdev));
+ rdev->rswitch->add_entry = idtg2_route_add_entry;
+ rdev->rswitch->get_entry = idtg2_route_get_entry;
+ rdev->rswitch->clr_table = idtg2_route_clr_table;
+ rdev->rswitch->set_domain = idtg2_set_domain;
+ rdev->rswitch->get_domain = idtg2_get_domain;
+ rdev->rswitch->em_init = idtg2_em_init;
+ rdev->rswitch->em_handle = idtg2_em_handler;
+ rdev->rswitch->sw_sysfs = idtg2_sysfs;
+
+ return 0;
+}
+
+DECLARE_RIO_SWITCH_INIT(RIO_VID_IDT, RIO_DID_IDTCPS1848, idtg2_switch_init);
+DECLARE_RIO_SWITCH_INIT(RIO_VID_IDT, RIO_DID_IDTCPS1616, idtg2_switch_init);
diff --git a/drivers/rapidio/switches/idtcps.c b/drivers/rapidio/switches/idtcps.c
index 2c790c144f89..fc9f6374f759 100644
--- a/drivers/rapidio/switches/idtcps.c
+++ b/drivers/rapidio/switches/idtcps.c
@@ -117,6 +117,10 @@ idtcps_get_domain(struct rio_mport *mport, u16 destid, u8 hopcount,
static int idtcps_switch_init(struct rio_dev *rdev, int do_enum)
{
+ struct rio_mport *mport = rdev->net->hport;
+ u16 destid = rdev->rswitch->destid;
+ u8 hopcount = rdev->rswitch->hopcount;
+
pr_debug("RIO: %s for %s\n", __func__, rio_name(rdev));
rdev->rswitch->add_entry = idtcps_route_add_entry;
rdev->rswitch->get_entry = idtcps_route_get_entry;
@@ -126,6 +130,12 @@ static int idtcps_switch_init(struct rio_dev *rdev, int do_enum)
rdev->rswitch->em_init = NULL;
rdev->rswitch->em_handle = NULL;
+ if (do_enum) {
+ /* set TVAL = ~50us */
+ rio_mport_write_config_32(mport, destid, hopcount,
+ rdev->phys_efptr + RIO_PORT_LINKTO_CTL_CSR, 0x8e << 8);
+ }
+
return 0;
}
diff --git a/drivers/rapidio/switches/tsi568.c b/drivers/rapidio/switches/tsi568.c
index f7fd7898606e..b9a389b9f812 100644
--- a/drivers/rapidio/switches/tsi568.c
+++ b/drivers/rapidio/switches/tsi568.c
@@ -29,7 +29,7 @@
#define SPP_ROUTE_CFG_DESTID(n) (0x11070 + 0x100*n)
#define SPP_ROUTE_CFG_PORT(n) (0x11074 + 0x100*n)
-#define TSI568_SP_MODE_BC 0x10004
+#define TSI568_SP_MODE(n) (0x11004 + 0x100*n)
#define TSI568_SP_MODE_PW_DIS 0x08000000
static int
@@ -117,14 +117,19 @@ tsi568_em_init(struct rio_dev *rdev)
u16 destid = rdev->rswitch->destid;
u8 hopcount = rdev->rswitch->hopcount;
u32 regval;
+ int portnum;
pr_debug("TSI568 %s [%d:%d]\n", __func__, destid, hopcount);
/* Make sure that Port-Writes are disabled (for all ports) */
- rio_mport_read_config_32(mport, destid, hopcount,
- TSI568_SP_MODE_BC, &regval);
- rio_mport_write_config_32(mport, destid, hopcount,
- TSI568_SP_MODE_BC, regval | TSI568_SP_MODE_PW_DIS);
+ for (portnum = 0;
+ portnum < RIO_GET_TOTAL_PORTS(rdev->swpinfo); portnum++) {
+ rio_mport_read_config_32(mport, destid, hopcount,
+ TSI568_SP_MODE(portnum), &regval);
+ rio_mport_write_config_32(mport, destid, hopcount,
+ TSI568_SP_MODE(portnum),
+ regval | TSI568_SP_MODE_PW_DIS);
+ }
return 0;
}
diff --git a/drivers/rapidio/switches/tsi57x.c b/drivers/rapidio/switches/tsi57x.c
index d34df722d95f..2003fb63c404 100644
--- a/drivers/rapidio/switches/tsi57x.c
+++ b/drivers/rapidio/switches/tsi57x.c
@@ -166,7 +166,8 @@ tsi57x_em_init(struct rio_dev *rdev)
pr_debug("TSI578 %s [%d:%d]\n", __func__, destid, hopcount);
- for (portnum = 0; portnum < 16; portnum++) {
+ for (portnum = 0;
+ portnum < RIO_GET_TOTAL_PORTS(rdev->swpinfo); portnum++) {
/* Make sure that Port-Writes are enabled (for all ports) */
rio_mport_read_config_32(mport, destid, hopcount,
TSI578_SP_MODE(portnum), &regval);
@@ -205,6 +206,10 @@ tsi57x_em_init(struct rio_dev *rdev)
portnum++;
}
+ /* set TVAL = ~50us */
+ rio_mport_write_config_32(mport, destid, hopcount,
+ rdev->phys_efptr + RIO_PORT_LINKTO_CTL_CSR, 0x9a << 8);
+
return 0;
}
diff --git a/drivers/regulator/Kconfig b/drivers/regulator/Kconfig
index 172951bf23a4..dd30e883d4a7 100644
--- a/drivers/regulator/Kconfig
+++ b/drivers/regulator/Kconfig
@@ -100,6 +100,14 @@ config REGULATOR_MAX8925
help
Say y here to support the voltage regulaltor of Maxim MAX8925 PMIC.
+config REGULATOR_MAX8952
+ tristate "Maxim MAX8952 Power Management IC"
+ depends on I2C
+ help
+ This driver controls a Maxim 8952 voltage output regulator
+ via I2C bus. Maxim 8952 has one voltage output and supports 4 DVS
+ modes ranging from 0.77V to 1.40V by 0.01V steps.
+
config REGULATOR_MAX8998
tristate "Maxim 8998 voltage regulator"
depends on MFD_MAX8998
@@ -164,6 +172,13 @@ config REGULATOR_LP3971
Say Y here to support the voltage regulators and convertors
on National Semiconductors LP3971 PMIC
+config REGULATOR_LP3972
+ tristate "National Semiconductors LP3972 PMIC regulator driver"
+ depends on I2C
+ help
+ Say Y here to support the voltage regulators and convertors
+ on National Semiconductors LP3972 PMIC
+
config REGULATOR_PCAP
tristate "PCAP2 regulator driver"
depends on EZX_PCAP
diff --git a/drivers/regulator/Makefile b/drivers/regulator/Makefile
index 8285fd832e16..bff815736780 100644
--- a/drivers/regulator/Makefile
+++ b/drivers/regulator/Makefile
@@ -3,20 +3,21 @@
#
-obj-$(CONFIG_REGULATOR) += core.o
+obj-$(CONFIG_REGULATOR) += core.o dummy.o
obj-$(CONFIG_REGULATOR_FIXED_VOLTAGE) += fixed.o
obj-$(CONFIG_REGULATOR_VIRTUAL_CONSUMER) += virtual.o
obj-$(CONFIG_REGULATOR_USERSPACE_CONSUMER) += userspace-consumer.o
obj-$(CONFIG_REGULATOR_AD5398) += ad5398.o
obj-$(CONFIG_REGULATOR_BQ24022) += bq24022.o
-obj-$(CONFIG_REGULATOR_DUMMY) += dummy.o
obj-$(CONFIG_REGULATOR_LP3971) += lp3971.o
+obj-$(CONFIG_REGULATOR_LP3972) += lp3972.o
obj-$(CONFIG_REGULATOR_MAX1586) += max1586.o
obj-$(CONFIG_REGULATOR_TWL4030) += twl-regulator.o
obj-$(CONFIG_REGULATOR_MAX8649) += max8649.o
obj-$(CONFIG_REGULATOR_MAX8660) += max8660.o
obj-$(CONFIG_REGULATOR_MAX8925) += max8925-regulator.o
+obj-$(CONFIG_REGULATOR_MAX8952) += max8952.o
obj-$(CONFIG_REGULATOR_MAX8998) += max8998.o
obj-$(CONFIG_REGULATOR_WM831X) += wm831x-dcdc.o
obj-$(CONFIG_REGULATOR_WM831X) += wm831x-isink.o
diff --git a/drivers/regulator/ab8500.c b/drivers/regulator/ab8500.c
index 28c7ae67cec9..db6b70f20511 100644
--- a/drivers/regulator/ab8500.c
+++ b/drivers/regulator/ab8500.c
@@ -21,6 +21,7 @@
#include <linux/err.h>
#include <linux/platform_device.h>
#include <linux/mfd/ab8500.h>
+#include <linux/mfd/abx500.h>
#include <linux/regulator/driver.h>
#include <linux/regulator/machine.h>
#include <linux/regulator/ab8500.h>
@@ -33,9 +34,11 @@
* @max_uV: maximum voltage (for variable voltage supplies)
* @min_uV: minimum voltage (for variable voltage supplies)
* @fixed_uV: typical voltage (for fixed voltage supplies)
+ * @update_bank: bank to control on/off
* @update_reg: register to control on/off
* @mask: mask to enable/disable regulator
* @enable: bits to enable the regulator in normal(high power) mode
+ * @voltage_bank: bank to control regulator voltage
* @voltage_reg: register to control regulator voltage
* @voltage_mask: mask to control regulator voltage
* @supported_voltages: supported voltage table
@@ -49,11 +52,13 @@ struct ab8500_regulator_info {
int max_uV;
int min_uV;
int fixed_uV;
- int update_reg;
- int mask;
- int enable;
- int voltage_reg;
- int voltage_mask;
+ u8 update_bank;
+ u8 update_reg;
+ u8 mask;
+ u8 enable;
+ u8 voltage_bank;
+ u8 voltage_reg;
+ u8 voltage_mask;
int const *supported_voltages;
int voltages_len;
};
@@ -97,8 +102,8 @@ static int ab8500_regulator_enable(struct regulator_dev *rdev)
if (regulator_id >= AB8500_NUM_REGULATORS)
return -EINVAL;
- ret = ab8500_set_bits(info->ab8500, info->update_reg,
- info->mask, info->enable);
+ ret = abx500_mask_and_set_register_interruptible(info->dev,
+ info->update_bank, info->update_reg, info->mask, info->enable);
if (ret < 0)
dev_err(rdev_get_dev(rdev),
"couldn't set enable bits for regulator\n");
@@ -114,8 +119,8 @@ static int ab8500_regulator_disable(struct regulator_dev *rdev)
if (regulator_id >= AB8500_NUM_REGULATORS)
return -EINVAL;
- ret = ab8500_set_bits(info->ab8500, info->update_reg,
- info->mask, 0x0);
+ ret = abx500_mask_and_set_register_interruptible(info->dev,
+ info->update_bank, info->update_reg, info->mask, 0x0);
if (ret < 0)
dev_err(rdev_get_dev(rdev),
"couldn't set disable bits for regulator\n");
@@ -126,19 +131,21 @@ static int ab8500_regulator_is_enabled(struct regulator_dev *rdev)
{
int regulator_id, ret;
struct ab8500_regulator_info *info = rdev_get_drvdata(rdev);
+ u8 value;
regulator_id = rdev_get_id(rdev);
if (regulator_id >= AB8500_NUM_REGULATORS)
return -EINVAL;
- ret = ab8500_read(info->ab8500, info->update_reg);
+ ret = abx500_get_register_interruptible(info->dev,
+ info->update_bank, info->update_reg, &value);
if (ret < 0) {
dev_err(rdev_get_dev(rdev),
"couldn't read 0x%x register\n", info->update_reg);
return ret;
}
- if (ret & info->mask)
+ if (value & info->mask)
return true;
else
return false;
@@ -165,14 +172,16 @@ static int ab8500_list_voltage(struct regulator_dev *rdev, unsigned selector)
static int ab8500_regulator_get_voltage(struct regulator_dev *rdev)
{
- int regulator_id, ret, val;
+ int regulator_id, ret;
struct ab8500_regulator_info *info = rdev_get_drvdata(rdev);
+ u8 value;
regulator_id = rdev_get_id(rdev);
if (regulator_id >= AB8500_NUM_REGULATORS)
return -EINVAL;
- ret = ab8500_read(info->ab8500, info->voltage_reg);
+ ret = abx500_get_register_interruptible(info->dev, info->voltage_bank,
+ info->voltage_reg, &value);
if (ret < 0) {
dev_err(rdev_get_dev(rdev),
"couldn't read voltage reg for regulator\n");
@@ -180,11 +189,11 @@ static int ab8500_regulator_get_voltage(struct regulator_dev *rdev)
}
/* vintcore has a different layout */
- val = ret & info->voltage_mask;
+ value &= info->voltage_mask;
if (regulator_id == AB8500_LDO_INTCORE)
- ret = info->supported_voltages[val >> 0x3];
+ ret = info->supported_voltages[value >> 0x3];
else
- ret = info->supported_voltages[val];
+ ret = info->supported_voltages[value];
return ret;
}
@@ -224,8 +233,9 @@ static int ab8500_regulator_set_voltage(struct regulator_dev *rdev,
}
/* set the registers for the request */
- ret = ab8500_set_bits(info->ab8500, info->voltage_reg,
- info->voltage_mask, ret);
+ ret = abx500_mask_and_set_register_interruptible(info->dev,
+ info->voltage_bank, info->voltage_reg,
+ info->voltage_mask, (u8)ret);
if (ret < 0)
dev_err(rdev_get_dev(rdev),
"couldn't set voltage reg for regulator\n");
@@ -262,9 +272,9 @@ static struct regulator_ops ab8500_ldo_fixed_ops = {
.list_voltage = ab8500_list_voltage,
};
-#define AB8500_LDO(_id, min, max, reg, reg_mask, reg_enable, \
- volt_reg, volt_mask, voltages, \
- len_volts) \
+#define AB8500_LDO(_id, min, max, bank, reg, reg_mask, \
+ reg_enable, volt_bank, volt_reg, volt_mask, \
+ voltages, len_volts) \
{ \
.desc = { \
.name = "LDO-" #_id, \
@@ -275,9 +285,11 @@ static struct regulator_ops ab8500_ldo_fixed_ops = {
}, \
.min_uV = (min) * 1000, \
.max_uV = (max) * 1000, \
+ .update_bank = bank, \
.update_reg = reg, \
.mask = reg_mask, \
.enable = reg_enable, \
+ .voltage_bank = volt_bank, \
.voltage_reg = volt_reg, \
.voltage_mask = volt_mask, \
.supported_voltages = voltages, \
@@ -285,8 +297,8 @@ static struct regulator_ops ab8500_ldo_fixed_ops = {
.fixed_uV = 0, \
}
-#define AB8500_FIXED_LDO(_id, fixed, reg, reg_mask, \
- reg_enable) \
+#define AB8500_FIXED_LDO(_id, fixed, bank, reg, \
+ reg_mask, reg_enable) \
{ \
.desc = { \
.name = "LDO-" #_id, \
@@ -296,6 +308,7 @@ static struct regulator_ops ab8500_ldo_fixed_ops = {
.owner = THIS_MODULE, \
}, \
.fixed_uV = fixed * 1000, \
+ .update_bank = bank, \
.update_reg = reg, \
.mask = reg_mask, \
.enable = reg_enable, \
@@ -304,28 +317,29 @@ static struct regulator_ops ab8500_ldo_fixed_ops = {
static struct ab8500_regulator_info ab8500_regulator_info[] = {
/*
* Variable Voltage LDOs
- * name, min uV, max uV, ctrl reg, reg mask, enable mask,
- * volt ctrl reg, volt ctrl mask, volt table, num supported volts
+ * name, min uV, max uV, ctrl bank, ctrl reg, reg mask, enable mask,
+ * volt ctrl bank, volt ctrl reg, volt ctrl mask, volt table,
+ * num supported volts
*/
- AB8500_LDO(AUX1, 1100, 3300, 0x0409, 0x3, 0x1, 0x041f, 0xf,
+ AB8500_LDO(AUX1, 1100, 3300, 0x04, 0x09, 0x3, 0x1, 0x04, 0x1f, 0xf,
ldo_vauxn_voltages, ARRAY_SIZE(ldo_vauxn_voltages)),
- AB8500_LDO(AUX2, 1100, 3300, 0x0409, 0xc, 0x4, 0x0420, 0xf,
+ AB8500_LDO(AUX2, 1100, 3300, 0x04, 0x09, 0xc, 0x4, 0x04, 0x20, 0xf,
ldo_vauxn_voltages, ARRAY_SIZE(ldo_vauxn_voltages)),
- AB8500_LDO(AUX3, 1100, 3300, 0x040a, 0x3, 0x1, 0x0421, 0xf,
+ AB8500_LDO(AUX3, 1100, 3300, 0x04, 0x0a, 0x3, 0x1, 0x04, 0x21, 0xf,
ldo_vauxn_voltages, ARRAY_SIZE(ldo_vauxn_voltages)),
- AB8500_LDO(INTCORE, 1100, 3300, 0x0380, 0x4, 0x4, 0x0380, 0x38,
+ AB8500_LDO(INTCORE, 1100, 3300, 0x03, 0x80, 0x4, 0x4, 0x03, 0x80, 0x38,
ldo_vintcore_voltages, ARRAY_SIZE(ldo_vintcore_voltages)),
/*
* Fixed Voltage LDOs
- * name, o/p uV, ctrl reg, enable, disable
+ * name, o/p uV, ctrl bank, ctrl reg, enable, disable
*/
- AB8500_FIXED_LDO(TVOUT, 2000, 0x0380, 0x2, 0x2),
- AB8500_FIXED_LDO(AUDIO, 2000, 0x0383, 0x2, 0x2),
- AB8500_FIXED_LDO(ANAMIC1, 2050, 0x0383, 0x4, 0x4),
- AB8500_FIXED_LDO(ANAMIC2, 2050, 0x0383, 0x8, 0x8),
- AB8500_FIXED_LDO(DMIC, 1800, 0x0383, 0x10, 0x10),
- AB8500_FIXED_LDO(ANA, 1200, 0x0383, 0xc, 0x4),
+ AB8500_FIXED_LDO(TVOUT, 2000, 0x03, 0x80, 0x2, 0x2),
+ AB8500_FIXED_LDO(AUDIO, 2000, 0x03, 0x83, 0x2, 0x2),
+ AB8500_FIXED_LDO(ANAMIC1, 2050, 0x03, 0x83, 0x4, 0x4),
+ AB8500_FIXED_LDO(ANAMIC2, 2050, 0x03, 0x83, 0x8, 0x8),
+ AB8500_FIXED_LDO(DMIC, 1800, 0x03, 0x83, 0x10, 0x10),
+ AB8500_FIXED_LDO(ANA, 1200, 0x03, 0x83, 0xc, 0x4),
};
static inline struct ab8500_regulator_info *find_regulator_info(int id)
diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c
index cc8b337b9119..ba521f0f0fac 100644
--- a/drivers/regulator/core.c
+++ b/drivers/regulator/core.c
@@ -33,6 +33,7 @@ static DEFINE_MUTEX(regulator_list_mutex);
static LIST_HEAD(regulator_list);
static LIST_HEAD(regulator_map_list);
static int has_full_constraints;
+static bool board_wants_dummy_regulator;
/*
* struct regulator_map
@@ -63,7 +64,8 @@ struct regulator {
};
static int _regulator_is_enabled(struct regulator_dev *rdev);
-static int _regulator_disable(struct regulator_dev *rdev);
+static int _regulator_disable(struct regulator_dev *rdev,
+ struct regulator_dev **supply_rdev_ptr);
static int _regulator_get_voltage(struct regulator_dev *rdev);
static int _regulator_get_current_limit(struct regulator_dev *rdev);
static unsigned int _regulator_get_mode(struct regulator_dev *rdev);
@@ -909,7 +911,7 @@ out:
}
/**
- * set_consumer_device_supply: Bind a regulator to a symbolic supply
+ * set_consumer_device_supply - Bind a regulator to a symbolic supply
* @rdev: regulator source
* @consumer_dev: device the supply applies to
* @consumer_dev_name: dev_name() string for device supply applies to
@@ -1050,7 +1052,6 @@ static struct regulator *create_regulator(struct regulator_dev *rdev,
printk(KERN_WARNING
"%s: could not add device link %s err %d\n",
__func__, dev->kobj.name, err);
- device_remove_file(dev, &regulator->dev_attr);
goto link_name_err;
}
}
@@ -1108,6 +1109,11 @@ static struct regulator *_regulator_get(struct device *dev, const char *id,
}
}
+ if (board_wants_dummy_regulator) {
+ rdev = dummy_regulator_rdev;
+ goto found;
+ }
+
#ifdef CONFIG_REGULATOR_DUMMY
if (!devname)
devname = "deviceless";
@@ -1261,13 +1267,17 @@ static int _regulator_enable(struct regulator_dev *rdev)
{
int ret, delay;
- /* do we need to enable the supply regulator first */
- if (rdev->supply) {
- ret = _regulator_enable(rdev->supply);
- if (ret < 0) {
- printk(KERN_ERR "%s: failed to enable %s: %d\n",
- __func__, rdev_get_name(rdev), ret);
- return ret;
+ if (rdev->use_count == 0) {
+ /* do we need to enable the supply regulator first */
+ if (rdev->supply) {
+ mutex_lock(&rdev->supply->mutex);
+ ret = _regulator_enable(rdev->supply);
+ mutex_unlock(&rdev->supply->mutex);
+ if (ret < 0) {
+ printk(KERN_ERR "%s: failed to enable %s: %d\n",
+ __func__, rdev_get_name(rdev), ret);
+ return ret;
+ }
}
}
@@ -1306,10 +1316,12 @@ static int _regulator_enable(struct regulator_dev *rdev)
if (ret < 0)
return ret;
- if (delay >= 1000)
+ if (delay >= 1000) {
mdelay(delay / 1000);
- else if (delay)
+ udelay(delay % 1000);
+ } else if (delay) {
udelay(delay);
+ }
} else if (ret < 0) {
printk(KERN_ERR "%s: is_enabled() failed for %s: %d\n",
@@ -1348,9 +1360,11 @@ int regulator_enable(struct regulator *regulator)
EXPORT_SYMBOL_GPL(regulator_enable);
/* locks held by regulator_disable() */
-static int _regulator_disable(struct regulator_dev *rdev)
+static int _regulator_disable(struct regulator_dev *rdev,
+ struct regulator_dev **supply_rdev_ptr)
{
int ret = 0;
+ *supply_rdev_ptr = NULL;
if (WARN(rdev->use_count <= 0,
"unbalanced disables for %s\n",
@@ -1376,8 +1390,7 @@ static int _regulator_disable(struct regulator_dev *rdev)
}
/* decrease our supplies ref count and disable if required */
- if (rdev->supply)
- _regulator_disable(rdev->supply);
+ *supply_rdev_ptr = rdev->supply;
rdev->use_count = 0;
} else if (rdev->use_count > 1) {
@@ -1407,17 +1420,29 @@ static int _regulator_disable(struct regulator_dev *rdev)
int regulator_disable(struct regulator *regulator)
{
struct regulator_dev *rdev = regulator->rdev;
+ struct regulator_dev *supply_rdev = NULL;
int ret = 0;
mutex_lock(&rdev->mutex);
- ret = _regulator_disable(rdev);
+ ret = _regulator_disable(rdev, &supply_rdev);
mutex_unlock(&rdev->mutex);
+
+ /* decrease our supplies ref count and disable if required */
+ while (supply_rdev != NULL) {
+ rdev = supply_rdev;
+
+ mutex_lock(&rdev->mutex);
+ _regulator_disable(rdev, &supply_rdev);
+ mutex_unlock(&rdev->mutex);
+ }
+
return ret;
}
EXPORT_SYMBOL_GPL(regulator_disable);
/* locks held by regulator_force_disable() */
-static int _regulator_force_disable(struct regulator_dev *rdev)
+static int _regulator_force_disable(struct regulator_dev *rdev,
+ struct regulator_dev **supply_rdev_ptr)
{
int ret = 0;
@@ -1436,8 +1461,7 @@ static int _regulator_force_disable(struct regulator_dev *rdev)
}
/* decrease our supplies ref count and disable if required */
- if (rdev->supply)
- _regulator_disable(rdev->supply);
+ *supply_rdev_ptr = rdev->supply;
rdev->use_count = 0;
return ret;
@@ -1454,12 +1478,17 @@ static int _regulator_force_disable(struct regulator_dev *rdev)
*/
int regulator_force_disable(struct regulator *regulator)
{
+ struct regulator_dev *supply_rdev = NULL;
int ret;
mutex_lock(&regulator->rdev->mutex);
regulator->uA_load = 0;
- ret = _regulator_force_disable(regulator->rdev);
+ ret = _regulator_force_disable(regulator->rdev, &supply_rdev);
mutex_unlock(&regulator->rdev->mutex);
+
+ if (supply_rdev)
+ regulator_disable(get_device_regulator(rdev_get_dev(supply_rdev)));
+
return ret;
}
EXPORT_SYMBOL_GPL(regulator_force_disable);
@@ -2323,6 +2352,7 @@ struct regulator_dev *regulator_register(struct regulator_desc *regulator_desc,
if (init_data->supply_regulator && init_data->supply_regulator_dev) {
dev_err(dev,
"Supply regulator specified by both name and dev\n");
+ ret = -EINVAL;
goto scrub;
}
@@ -2341,6 +2371,7 @@ struct regulator_dev *regulator_register(struct regulator_desc *regulator_desc,
if (!found) {
dev_err(dev, "Failed to find supply %s\n",
init_data->supply_regulator);
+ ret = -ENODEV;
goto scrub;
}
@@ -2463,6 +2494,22 @@ void regulator_has_full_constraints(void)
EXPORT_SYMBOL_GPL(regulator_has_full_constraints);
/**
+ * regulator_use_dummy_regulator - Provide a dummy regulator when none is found
+ *
+ * Calling this function will cause the regulator API to provide a
+ * dummy regulator to consumers if no physical regulator is found,
+ * allowing most consumers to proceed as though a regulator were
+ * configured. This allows systems such as those with software
+ * controllable regulators for the CPU core only to be brought up more
+ * readily.
+ */
+void regulator_use_dummy_regulator(void)
+{
+ board_wants_dummy_regulator = true;
+}
+EXPORT_SYMBOL_GPL(regulator_use_dummy_regulator);
+
+/**
* rdev_get_drvdata - get rdev regulator driver data
* @rdev: regulator
*
diff --git a/drivers/regulator/dummy.h b/drivers/regulator/dummy.h
index 3921c0e24249..97a11b7e8882 100644
--- a/drivers/regulator/dummy.h
+++ b/drivers/regulator/dummy.h
@@ -22,10 +22,6 @@ struct regulator_dev;
extern struct regulator_dev *dummy_regulator_rdev;
-#ifdef CONFIG_REGULATOR_DUMMY
void __init regulator_dummy_init(void);
-#else
-static inline void regulator_dummy_init(void) { }
-#endif
#endif
diff --git a/drivers/regulator/lp3972.c b/drivers/regulator/lp3972.c
new file mode 100644
index 000000000000..e07062fd0b42
--- /dev/null
+++ b/drivers/regulator/lp3972.c
@@ -0,0 +1,660 @@
+/*
+ * Regulator driver for National Semiconductors LP3972 PMIC chip
+ *
+ * Based on lp3971.c
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#include <linux/bug.h>
+#include <linux/err.h>
+#include <linux/i2c.h>
+#include <linux/kernel.h>
+#include <linux/regulator/driver.h>
+#include <linux/regulator/lp3972.h>
+#include <linux/slab.h>
+
+struct lp3972 {
+ struct device *dev;
+ struct mutex io_lock;
+ struct i2c_client *i2c;
+ int num_regulators;
+ struct regulator_dev **rdev;
+};
+
+/* LP3972 Control Registers */
+#define LP3972_SCR_REG 0x07
+#define LP3972_OVER1_REG 0x10
+#define LP3972_OVSR1_REG 0x11
+#define LP3972_OVER2_REG 0x12
+#define LP3972_OVSR2_REG 0x13
+#define LP3972_VCC1_REG 0x20
+#define LP3972_ADTV1_REG 0x23
+#define LP3972_ADTV2_REG 0x24
+#define LP3972_AVRC_REG 0x25
+#define LP3972_CDTC1_REG 0x26
+#define LP3972_CDTC2_REG 0x27
+#define LP3972_SDTV1_REG 0x29
+#define LP3972_SDTV2_REG 0x2A
+#define LP3972_MDTV1_REG 0x32
+#define LP3972_MDTV2_REG 0x33
+#define LP3972_L2VCR_REG 0x39
+#define LP3972_L34VCR_REG 0x3A
+#define LP3972_SCR1_REG 0x80
+#define LP3972_SCR2_REG 0x81
+#define LP3972_OEN3_REG 0x82
+#define LP3972_OSR3_REG 0x83
+#define LP3972_LOER4_REG 0x84
+#define LP3972_B2TV_REG 0x85
+#define LP3972_B3TV_REG 0x86
+#define LP3972_B32RC_REG 0x87
+#define LP3972_ISRA_REG 0x88
+#define LP3972_BCCR_REG 0x89
+#define LP3972_II1RR_REG 0x8E
+#define LP3972_II2RR_REG 0x8F
+
+#define LP3972_SYS_CONTROL1_REG LP3972_SCR1_REG
+/* System control register 1 initial value,
+ * bits 5, 6 and 7 are EPROM programmable */
+#define SYS_CONTROL1_INIT_VAL 0x02
+#define SYS_CONTROL1_INIT_MASK 0x1F
+
+#define LP3972_VOL_CHANGE_REG LP3972_VCC1_REG
+#define LP3972_VOL_CHANGE_FLAG_GO 0x01
+#define LP3972_VOL_CHANGE_FLAG_MASK 0x03
+
+/* LDO output enable mask */
+#define LP3972_OEN3_L1EN BIT(0)
+#define LP3972_OVER2_LDO2_EN BIT(2)
+#define LP3972_OVER2_LDO3_EN BIT(3)
+#define LP3972_OVER2_LDO4_EN BIT(4)
+#define LP3972_OVER1_S_EN BIT(2)
+
+static const int ldo1_voltage_map[] = {
+ 1700, 1725, 1750, 1775, 1800, 1825, 1850, 1875,
+ 1900, 1925, 1950, 1975, 2000,
+};
+
+static const int ldo23_voltage_map[] = {
+ 1800, 1900, 2000, 2100, 2200, 2300, 2400, 2500,
+ 2600, 2700, 2800, 2900, 3000, 3100, 3200, 3300,
+};
+
+static const int ldo4_voltage_map[] = {
+ 1000, 1050, 1100, 1150, 1200, 1250, 1300, 1350,
+ 1400, 1500, 1800, 1900, 2500, 2800, 3000, 3300,
+};
+
+static const int ldo5_voltage_map[] = {
+ 0, 0, 0, 0, 0, 850, 875, 900,
+ 925, 950, 975, 1000, 1025, 1050, 1075, 1100,
+ 1125, 1150, 1175, 1200, 1225, 1250, 1275, 1300,
+ 1325, 1350, 1375, 1400, 1425, 1450, 1475, 1500,
+};
+
+static const int buck1_voltage_map[] = {
+ 725, 750, 775, 800, 825, 850, 875, 900,
+ 925, 950, 975, 1000, 1025, 1050, 1075, 1100,
+ 1125, 1150, 1175, 1200, 1225, 1250, 1275, 1300,
+ 1325, 1350, 1375, 1400, 1425, 1450, 1475, 1500,
+};
+
+static const int buck23_voltage_map[] = {
+ 0, 800, 850, 900, 950, 1000, 1050, 1100,
+ 1150, 1200, 1250, 1300, 1350, 1400, 1450, 1500,
+ 1550, 1600, 1650, 1700, 1800, 1900, 2500, 2800,
+ 3000, 3300,
+};
+
+static const int *ldo_voltage_map[] = {
+ ldo1_voltage_map,
+ ldo23_voltage_map,
+ ldo23_voltage_map,
+ ldo4_voltage_map,
+ ldo5_voltage_map,
+};
+
+static const int *buck_voltage_map[] = {
+ buck1_voltage_map,
+ buck23_voltage_map,
+ buck23_voltage_map,
+};
+
+static const int ldo_output_enable_mask[] = {
+ LP3972_OEN3_L1EN,
+ LP3972_OVER2_LDO2_EN,
+ LP3972_OVER2_LDO3_EN,
+ LP3972_OVER2_LDO4_EN,
+ LP3972_OVER1_S_EN,
+};
+
+static const int ldo_output_enable_addr[] = {
+ LP3972_OEN3_REG,
+ LP3972_OVER2_REG,
+ LP3972_OVER2_REG,
+ LP3972_OVER2_REG,
+ LP3972_OVER1_REG,
+};
+
+static const int ldo_vol_ctl_addr[] = {
+ LP3972_MDTV1_REG,
+ LP3972_L2VCR_REG,
+ LP3972_L34VCR_REG,
+ LP3972_L34VCR_REG,
+ LP3972_SDTV1_REG,
+};
+
+static const int buck_vol_enable_addr[] = {
+ LP3972_OVER1_REG,
+ LP3972_OEN3_REG,
+ LP3972_OEN3_REG,
+};
+
+static const int buck_base_addr[] = {
+ LP3972_ADTV1_REG,
+ LP3972_B2TV_REG,
+ LP3972_B3TV_REG,
+};
+
+#define LP3972_LDO_VOL_VALUE_MAP(x) (ldo_voltage_map[x])
+#define LP3972_LDO_OUTPUT_ENABLE_MASK(x) (ldo_output_enable_mask[x])
+#define LP3972_LDO_OUTPUT_ENABLE_REG(x) (ldo_output_enable_addr[x])
+
+/* LDO voltage control registers shift:
+ LP3972_LDO1 -> 0, LP3972_LDO2 -> 4
+ LP3972_LDO3 -> 0, LP3972_LDO4 -> 4
+ LP3972_LDO5 -> 0
+*/
+#define LP3972_LDO_VOL_CONTR_SHIFT(x) (((x) & 1) << 2)
+#define LP3972_LDO_VOL_CONTR_REG(x) (ldo_vol_ctl_addr[x])
+#define LP3972_LDO_VOL_CHANGE_SHIFT(x) ((x) ? 4 : 6)
+
+#define LP3972_LDO_VOL_MASK(x) (((x) % 4) ? 0x0f : 0x1f)
+#define LP3972_LDO_VOL_MIN_IDX(x) (((x) == 4) ? 0x05 : 0x00)
+#define LP3972_LDO_VOL_MAX_IDX(x) ((x) ? (((x) == 4) ? 0x1f : 0x0f) : 0x0c)
+
+#define LP3972_BUCK_VOL_VALUE_MAP(x) (buck_voltage_map[x])
+#define LP3972_BUCK_VOL_ENABLE_REG(x) (buck_vol_enable_addr[x])
+#define LP3972_BUCK_VOL1_REG(x) (buck_base_addr[x])
+#define LP3972_BUCK_VOL_MASK 0x1f
+#define LP3972_BUCK_VOL_MIN_IDX(x) ((x) ? 0x01 : 0x00)
+#define LP3972_BUCK_VOL_MAX_IDX(x) ((x) ? 0x19 : 0x1f)
+
+static int lp3972_i2c_read(struct i2c_client *i2c, char reg, int count,
+ u16 *dest)
+{
+ int ret;
+
+ if (count != 1)
+ return -EIO;
+ ret = i2c_smbus_read_byte_data(i2c, reg);
+ if (ret < 0)
+ return ret;
+
+ *dest = ret;
+ return 0;
+}
+
+static int lp3972_i2c_write(struct i2c_client *i2c, char reg, int count,
+ const u16 *src)
+{
+ if (count != 1)
+ return -EIO;
+ return i2c_smbus_write_byte_data(i2c, reg, *src);
+}
+
+static u8 lp3972_reg_read(struct lp3972 *lp3972, u8 reg)
+{
+ u16 val = 0;
+
+ mutex_lock(&lp3972->io_lock);
+
+ lp3972_i2c_read(lp3972->i2c, reg, 1, &val);
+
+ dev_dbg(lp3972->dev, "reg read 0x%02x -> 0x%02x\n", (int)reg,
+ (unsigned)val & 0xff);
+
+ mutex_unlock(&lp3972->io_lock);
+
+ return val & 0xff;
+}
+
+static int lp3972_set_bits(struct lp3972 *lp3972, u8 reg, u16 mask, u16 val)
+{
+ u16 tmp;
+ int ret;
+
+ mutex_lock(&lp3972->io_lock);
+
+ ret = lp3972_i2c_read(lp3972->i2c, reg, 1, &tmp);
+ tmp = (tmp & ~mask) | val;
+ if (ret == 0) {
+ ret = lp3972_i2c_write(lp3972->i2c, reg, 1, &tmp);
+ dev_dbg(lp3972->dev, "reg write 0x%02x -> 0x%02x\n", (int)reg,
+ (unsigned)val & 0xff);
+ }
+ mutex_unlock(&lp3972->io_lock);
+
+ return ret;
+}
+
+static int lp3972_ldo_list_voltage(struct regulator_dev *dev, unsigned index)
+{
+ int ldo = rdev_get_id(dev) - LP3972_LDO1;
+ return 1000 * LP3972_LDO_VOL_VALUE_MAP(ldo)[index];
+}
+
+static int lp3972_ldo_is_enabled(struct regulator_dev *dev)
+{
+ struct lp3972 *lp3972 = rdev_get_drvdata(dev);
+ int ldo = rdev_get_id(dev) - LP3972_LDO1;
+ u16 mask = LP3972_LDO_OUTPUT_ENABLE_MASK(ldo);
+ u16 val;
+
+ val = lp3972_reg_read(lp3972, LP3972_LDO_OUTPUT_ENABLE_REG(ldo));
+ return !!(val & mask);
+}
+
+static int lp3972_ldo_enable(struct regulator_dev *dev)
+{
+ struct lp3972 *lp3972 = rdev_get_drvdata(dev);
+ int ldo = rdev_get_id(dev) - LP3972_LDO1;
+ u16 mask = LP3972_LDO_OUTPUT_ENABLE_MASK(ldo);
+
+ return lp3972_set_bits(lp3972, LP3972_LDO_OUTPUT_ENABLE_REG(ldo),
+ mask, mask);
+}
+
+static int lp3972_ldo_disable(struct regulator_dev *dev)
+{
+ struct lp3972 *lp3972 = rdev_get_drvdata(dev);
+ int ldo = rdev_get_id(dev) - LP3972_LDO1;
+ u16 mask = LP3972_LDO_OUTPUT_ENABLE_MASK(ldo);
+
+ return lp3972_set_bits(lp3972, LP3972_LDO_OUTPUT_ENABLE_REG(ldo),
+ mask, 0);
+}
+
+static int lp3972_ldo_get_voltage(struct regulator_dev *dev)
+{
+ struct lp3972 *lp3972 = rdev_get_drvdata(dev);
+ int ldo = rdev_get_id(dev) - LP3972_LDO1;
+ u16 mask = LP3972_LDO_VOL_MASK(ldo);
+ u16 val, reg;
+
+ reg = lp3972_reg_read(lp3972, LP3972_LDO_VOL_CONTR_REG(ldo));
+ val = (reg >> LP3972_LDO_VOL_CONTR_SHIFT(ldo)) & mask;
+
+ return 1000 * LP3972_LDO_VOL_VALUE_MAP(ldo)[val];
+}
+
+static int lp3972_ldo_set_voltage(struct regulator_dev *dev,
+ int min_uV, int max_uV)
+{
+ struct lp3972 *lp3972 = rdev_get_drvdata(dev);
+ int ldo = rdev_get_id(dev) - LP3972_LDO1;
+ int min_vol = min_uV / 1000, max_vol = max_uV / 1000;
+ const int *vol_map = LP3972_LDO_VOL_VALUE_MAP(ldo);
+ u16 val;
+ int shift, ret;
+
+ if (min_vol < vol_map[LP3972_LDO_VOL_MIN_IDX(ldo)] ||
+ min_vol > vol_map[LP3972_LDO_VOL_MAX_IDX(ldo)])
+ return -EINVAL;
+
+ for (val = LP3972_LDO_VOL_MIN_IDX(ldo);
+ val <= LP3972_LDO_VOL_MAX_IDX(ldo); val++)
+ if (vol_map[val] >= min_vol)
+ break;
+
+ if (val > LP3972_LDO_VOL_MAX_IDX(ldo) || vol_map[val] > max_vol)
+ return -EINVAL;
+
+ shift = LP3972_LDO_VOL_CONTR_SHIFT(ldo);
+ ret = lp3972_set_bits(lp3972, LP3972_LDO_VOL_CONTR_REG(ldo),
+ LP3972_LDO_VOL_MASK(ldo) << shift, val << shift);
+
+ if (ret)
+ return ret;
+
+ /*
+ * LDO1 and LDO5 support voltage control by either target voltage1
+ * or target voltage2 register.
+ * We use target voltage1 register for LDO1 and LDO5 in this driver.
+ * We need to update voltage change control register(0x20) to enable
+ * LDO1 and LDO5 to change to their programmed target values.
+ */
+ switch (ldo) {
+ case LP3972_LDO1:
+ case LP3972_LDO5:
+ shift = LP3972_LDO_VOL_CHANGE_SHIFT(ldo);
+ ret = lp3972_set_bits(lp3972, LP3972_VOL_CHANGE_REG,
+ LP3972_VOL_CHANGE_FLAG_MASK << shift,
+ LP3972_VOL_CHANGE_FLAG_GO << shift);
+ if (ret)
+ return ret;
+
+ ret = lp3972_set_bits(lp3972, LP3972_VOL_CHANGE_REG,
+ LP3972_VOL_CHANGE_FLAG_MASK << shift, 0);
+ break;
+ }
+
+ return ret;
+}
+
+static struct regulator_ops lp3972_ldo_ops = {
+ .list_voltage = lp3972_ldo_list_voltage,
+ .is_enabled = lp3972_ldo_is_enabled,
+ .enable = lp3972_ldo_enable,
+ .disable = lp3972_ldo_disable,
+ .get_voltage = lp3972_ldo_get_voltage,
+ .set_voltage = lp3972_ldo_set_voltage,
+};
+
+static int lp3972_dcdc_list_voltage(struct regulator_dev *dev, unsigned index)
+{
+ int buck = rdev_get_id(dev) - LP3972_DCDC1;
+ return 1000 * buck_voltage_map[buck][index];
+}
+
+static int lp3972_dcdc_is_enabled(struct regulator_dev *dev)
+{
+ struct lp3972 *lp3972 = rdev_get_drvdata(dev);
+ int buck = rdev_get_id(dev) - LP3972_DCDC1;
+ u16 mask = 1 << (buck * 2);
+ u16 val;
+
+ val = lp3972_reg_read(lp3972, LP3972_BUCK_VOL_ENABLE_REG(buck));
+ return !!(val & mask);
+}
+
+static int lp3972_dcdc_enable(struct regulator_dev *dev)
+{
+ struct lp3972 *lp3972 = rdev_get_drvdata(dev);
+ int buck = rdev_get_id(dev) - LP3972_DCDC1;
+ u16 mask = 1 << (buck * 2);
+ u16 val;
+
+ val = lp3972_set_bits(lp3972, LP3972_BUCK_VOL_ENABLE_REG(buck),
+ mask, mask);
+ return val;
+}
+
+static int lp3972_dcdc_disable(struct regulator_dev *dev)
+{
+ struct lp3972 *lp3972 = rdev_get_drvdata(dev);
+ int buck = rdev_get_id(dev) - LP3972_DCDC1;
+ u16 mask = 1 << (buck * 2);
+ u16 val;
+
+ val = lp3972_set_bits(lp3972, LP3972_BUCK_VOL_ENABLE_REG(buck),
+ mask, 0);
+ return val;
+}
+
+static int lp3972_dcdc_get_voltage(struct regulator_dev *dev)
+{
+ struct lp3972 *lp3972 = rdev_get_drvdata(dev);
+ int buck = rdev_get_id(dev) - LP3972_DCDC1;
+ u16 reg;
+ int val;
+
+ reg = lp3972_reg_read(lp3972, LP3972_BUCK_VOL1_REG(buck));
+ reg &= LP3972_BUCK_VOL_MASK;
+ if (reg <= LP3972_BUCK_VOL_MAX_IDX(buck))
+ val = 1000 * buck_voltage_map[buck][reg];
+ else {
+ val = 0;
+ dev_warn(&dev->dev, "chip reported incorrect voltage value."
+ " reg = %d\n", reg);
+ }
+
+ return val;
+}
+
+static int lp3972_dcdc_set_voltage(struct regulator_dev *dev,
+ int min_uV, int max_uV)
+{
+ struct lp3972 *lp3972 = rdev_get_drvdata(dev);
+ int buck = rdev_get_id(dev) - LP3972_DCDC1;
+ int min_vol = min_uV / 1000, max_vol = max_uV / 1000;
+ const int *vol_map = buck_voltage_map[buck];
+ u16 val;
+ int ret;
+
+ if (min_vol < vol_map[LP3972_BUCK_VOL_MIN_IDX(buck)] ||
+ min_vol > vol_map[LP3972_BUCK_VOL_MAX_IDX(buck)])
+ return -EINVAL;
+
+ for (val = LP3972_BUCK_VOL_MIN_IDX(buck);
+ val <= LP3972_BUCK_VOL_MAX_IDX(buck); val++)
+ if (vol_map[val] >= min_vol)
+ break;
+
+ if (val > LP3972_BUCK_VOL_MAX_IDX(buck) ||
+ vol_map[val] > max_vol)
+ return -EINVAL;
+
+ ret = lp3972_set_bits(lp3972, LP3972_BUCK_VOL1_REG(buck),
+ LP3972_BUCK_VOL_MASK, val);
+ if (ret)
+ return ret;
+
+ if (buck != 0)
+ return ret;
+
+ ret = lp3972_set_bits(lp3972, LP3972_VOL_CHANGE_REG,
+ LP3972_VOL_CHANGE_FLAG_MASK, LP3972_VOL_CHANGE_FLAG_GO);
+ if (ret)
+ return ret;
+
+ return lp3972_set_bits(lp3972, LP3972_VOL_CHANGE_REG,
+ LP3972_VOL_CHANGE_FLAG_MASK, 0);
+}
+
+static struct regulator_ops lp3972_dcdc_ops = {
+ .list_voltage = lp3972_dcdc_list_voltage,
+ .is_enabled = lp3972_dcdc_is_enabled,
+ .enable = lp3972_dcdc_enable,
+ .disable = lp3972_dcdc_disable,
+ .get_voltage = lp3972_dcdc_get_voltage,
+ .set_voltage = lp3972_dcdc_set_voltage,
+};
+
+static struct regulator_desc regulators[] = {
+ {
+ .name = "LDO1",
+ .id = LP3972_LDO1,
+ .ops = &lp3972_ldo_ops,
+ .n_voltages = ARRAY_SIZE(ldo1_voltage_map),
+ .type = REGULATOR_VOLTAGE,
+ .owner = THIS_MODULE,
+ },
+ {
+ .name = "LDO2",
+ .id = LP3972_LDO2,
+ .ops = &lp3972_ldo_ops,
+ .n_voltages = ARRAY_SIZE(ldo23_voltage_map),
+ .type = REGULATOR_VOLTAGE,
+ .owner = THIS_MODULE,
+ },
+ {
+ .name = "LDO3",
+ .id = LP3972_LDO3,
+ .ops = &lp3972_ldo_ops,
+ .n_voltages = ARRAY_SIZE(ldo23_voltage_map),
+ .type = REGULATOR_VOLTAGE,
+ .owner = THIS_MODULE,
+ },
+ {
+ .name = "LDO4",
+ .id = LP3972_LDO4,
+ .ops = &lp3972_ldo_ops,
+ .n_voltages = ARRAY_SIZE(ldo4_voltage_map),
+ .type = REGULATOR_VOLTAGE,
+ .owner = THIS_MODULE,
+ },
+ {
+ .name = "LDO5",
+ .id = LP3972_LDO5,
+ .ops = &lp3972_ldo_ops,
+ .n_voltages = ARRAY_SIZE(ldo5_voltage_map),
+ .type = REGULATOR_VOLTAGE,
+ .owner = THIS_MODULE,
+ },
+ {
+ .name = "DCDC1",
+ .id = LP3972_DCDC1,
+ .ops = &lp3972_dcdc_ops,
+ .n_voltages = ARRAY_SIZE(buck1_voltage_map),
+ .type = REGULATOR_VOLTAGE,
+ .owner = THIS_MODULE,
+ },
+ {
+ .name = "DCDC2",
+ .id = LP3972_DCDC2,
+ .ops = &lp3972_dcdc_ops,
+ .n_voltages = ARRAY_SIZE(buck23_voltage_map),
+ .type = REGULATOR_VOLTAGE,
+ .owner = THIS_MODULE,
+ },
+ {
+ .name = "DCDC3",
+ .id = LP3972_DCDC3,
+ .ops = &lp3972_dcdc_ops,
+ .n_voltages = ARRAY_SIZE(buck23_voltage_map),
+ .type = REGULATOR_VOLTAGE,
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __devinit setup_regulators(struct lp3972 *lp3972,
+ struct lp3972_platform_data *pdata)
+{
+ int i, err;
+
+ lp3972->num_regulators = pdata->num_regulators;
+ lp3972->rdev = kcalloc(pdata->num_regulators,
+ sizeof(struct regulator_dev *), GFP_KERNEL);
+ if (!lp3972->rdev) {
+ err = -ENOMEM;
+ goto err_nomem;
+ }
+
+ /* Instantiate the regulators */
+ for (i = 0; i < pdata->num_regulators; i++) {
+ struct lp3972_regulator_subdev *reg = &pdata->regulators[i];
+ lp3972->rdev[i] = regulator_register(&regulators[reg->id],
+ lp3972->dev, reg->initdata, lp3972);
+
+ if (IS_ERR(lp3972->rdev[i])) {
+ err = PTR_ERR(lp3972->rdev[i]);
+ dev_err(lp3972->dev, "regulator init failed: %d\n",
+ err);
+ goto error;
+ }
+ }
+
+ return 0;
+error:
+ while (--i >= 0)
+ regulator_unregister(lp3972->rdev[i]);
+ kfree(lp3972->rdev);
+ lp3972->rdev = NULL;
+err_nomem:
+ return err;
+}
+
+static int __devinit lp3972_i2c_probe(struct i2c_client *i2c,
+ const struct i2c_device_id *id)
+{
+ struct lp3972 *lp3972;
+ struct lp3972_platform_data *pdata = i2c->dev.platform_data;
+ int ret;
+ u16 val;
+
+ if (!pdata) {
+ dev_dbg(&i2c->dev, "No platform init data supplied\n");
+ return -ENODEV;
+ }
+
+ lp3972 = kzalloc(sizeof(struct lp3972), GFP_KERNEL);
+ if (!lp3972)
+ return -ENOMEM;
+
+ lp3972->i2c = i2c;
+ lp3972->dev = &i2c->dev;
+
+ mutex_init(&lp3972->io_lock);
+
+ /* Detect LP3972 */
+ ret = lp3972_i2c_read(i2c, LP3972_SYS_CONTROL1_REG, 1, &val);
+ if (ret == 0 &&
+ (val & SYS_CONTROL1_INIT_MASK) != SYS_CONTROL1_INIT_VAL) {
+ ret = -ENODEV;
+ dev_err(&i2c->dev, "chip reported: val = 0x%x\n", val);
+ }
+ if (ret < 0) {
+ dev_err(&i2c->dev, "failed to detect device. ret = %d\n", ret);
+ goto err_detect;
+ }
+
+ ret = setup_regulators(lp3972, pdata);
+ if (ret < 0)
+ goto err_detect;
+
+ i2c_set_clientdata(i2c, lp3972);
+ return 0;
+
+err_detect:
+ kfree(lp3972);
+ return ret;
+}
+
+static int __devexit lp3972_i2c_remove(struct i2c_client *i2c)
+{
+ struct lp3972 *lp3972 = i2c_get_clientdata(i2c);
+ int i;
+
+ for (i = 0; i < lp3972->num_regulators; i++)
+ regulator_unregister(lp3972->rdev[i]);
+ kfree(lp3972->rdev);
+ kfree(lp3972);
+
+ return 0;
+}
+
+static const struct i2c_device_id lp3972_i2c_id[] = {
+ { "lp3972", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, lp3972_i2c_id);
+
+static struct i2c_driver lp3972_i2c_driver = {
+ .driver = {
+ .name = "lp3972",
+ .owner = THIS_MODULE,
+ },
+ .probe = lp3972_i2c_probe,
+ .remove = __devexit_p(lp3972_i2c_remove),
+ .id_table = lp3972_i2c_id,
+};
+
+static int __init lp3972_module_init(void)
+{
+ return i2c_add_driver(&lp3972_i2c_driver);
+}
+subsys_initcall(lp3972_module_init);
+
+static void __exit lp3972_module_exit(void)
+{
+ i2c_del_driver(&lp3972_i2c_driver);
+}
+module_exit(lp3972_module_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Axel Lin <axel.lin@gmail.com>");
+MODULE_DESCRIPTION("LP3972 PMIC driver");
diff --git a/drivers/regulator/max8952.c b/drivers/regulator/max8952.c
new file mode 100644
index 000000000000..0d5dda4fd911
--- /dev/null
+++ b/drivers/regulator/max8952.c
@@ -0,0 +1,366 @@
+/*
+ * max8952.c - Voltage and current regulation for the Maxim 8952
+ *
+ * Copyright (C) 2010 Samsung Electronics
+ * MyungJoo Ham <myungjoo.ham@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/i2c.h>
+#include <linux/err.h>
+#include <linux/platform_device.h>
+#include <linux/regulator/driver.h>
+#include <linux/regulator/max8952.h>
+#include <linux/mutex.h>
+#include <linux/gpio.h>
+#include <linux/io.h>
+#include <linux/slab.h>
+
+/* Registers */
+enum {
+ MAX8952_REG_MODE0,
+ MAX8952_REG_MODE1,
+ MAX8952_REG_MODE2,
+ MAX8952_REG_MODE3,
+ MAX8952_REG_CONTROL,
+ MAX8952_REG_SYNC,
+ MAX8952_REG_RAMP,
+ MAX8952_REG_CHIP_ID1,
+ MAX8952_REG_CHIP_ID2,
+};
+
+struct max8952_data {
+ struct i2c_client *client;
+ struct device *dev;
+ struct mutex mutex;
+ struct max8952_platform_data *pdata;
+ struct regulator_dev *rdev;
+
+ bool vid0;
+ bool vid1;
+ bool en;
+};
+
+static int max8952_read_reg(struct max8952_data *max8952, u8 reg)
+{
+ int ret = i2c_smbus_read_byte_data(max8952->client, reg);
+ if (ret > 0)
+ ret &= 0xff;
+
+ return ret;
+}
+
+static int max8952_write_reg(struct max8952_data *max8952,
+ u8 reg, u8 value)
+{
+ return i2c_smbus_write_byte_data(max8952->client, reg, value);
+}
+
+static int max8952_voltage(struct max8952_data *max8952, u8 mode)
+{
+ return (max8952->pdata->dvs_mode[mode] * 10 + 770) * 1000;
+}
+
+static int max8952_list_voltage(struct regulator_dev *rdev,
+ unsigned int selector)
+{
+ struct max8952_data *max8952 = rdev_get_drvdata(rdev);
+
+ if (rdev_get_id(rdev) != 0)
+ return -EINVAL;
+
+ return max8952_voltage(max8952, selector);
+}
+
+static int max8952_is_enabled(struct regulator_dev *rdev)
+{
+ struct max8952_data *max8952 = rdev_get_drvdata(rdev);
+ return max8952->en;
+}
+
+static int max8952_enable(struct regulator_dev *rdev)
+{
+ struct max8952_data *max8952 = rdev_get_drvdata(rdev);
+
+ /* If not valid, assume "ALWAYS_HIGH" */
+ if (gpio_is_valid(max8952->pdata->gpio_en))
+ gpio_set_value(max8952->pdata->gpio_en, 1);
+
+ max8952->en = true;
+ return 0;
+}
+
+static int max8952_disable(struct regulator_dev *rdev)
+{
+ struct max8952_data *max8952 = rdev_get_drvdata(rdev);
+
+ /* If not valid, assume "ALWAYS_HIGH" -> not permitted */
+ if (gpio_is_valid(max8952->pdata->gpio_en))
+ gpio_set_value(max8952->pdata->gpio_en, 0);
+ else
+ return -EPERM;
+
+ max8952->en = false;
+ return 0;
+}
+
+static int max8952_get_voltage(struct regulator_dev *rdev)
+{
+ struct max8952_data *max8952 = rdev_get_drvdata(rdev);
+ u8 vid = 0;
+
+ if (max8952->vid0)
+ vid += 1;
+ if (max8952->vid1)
+ vid += 2;
+
+ return max8952_voltage(max8952, vid);
+}
+
+static int max8952_set_voltage(struct regulator_dev *rdev,
+ int min_uV, int max_uV)
+{
+ struct max8952_data *max8952 = rdev_get_drvdata(rdev);
+ s8 vid = -1, i;
+
+ if (!gpio_is_valid(max8952->pdata->gpio_vid0) ||
+ !gpio_is_valid(max8952->pdata->gpio_vid0)) {
+ /* DVS not supported */
+ return -EPERM;
+ }
+
+ for (i = 0; i < MAX8952_NUM_DVS_MODE; i++) {
+ int volt = max8952_voltage(max8952, i);
+
+ /* Set the voltage as low as possible within the range */
+ if (volt <= max_uV && volt >= min_uV)
+ if (vid == -1 || max8952_voltage(max8952, vid) > volt)
+ vid = i;
+ }
+
+ if (vid >= 0 && vid < MAX8952_NUM_DVS_MODE) {
+ max8952->vid0 = (vid % 2 == 1);
+ max8952->vid1 = (((vid >> 1) % 2) == 1);
+ gpio_set_value(max8952->pdata->gpio_vid0, max8952->vid0);
+ gpio_set_value(max8952->pdata->gpio_vid1, max8952->vid1);
+ } else
+ return -EINVAL;
+
+ return 0;
+}
+
+static struct regulator_ops max8952_ops = {
+ .list_voltage = max8952_list_voltage,
+ .is_enabled = max8952_is_enabled,
+ .enable = max8952_enable,
+ .disable = max8952_disable,
+ .get_voltage = max8952_get_voltage,
+ .set_voltage = max8952_set_voltage,
+ .set_suspend_disable = max8952_disable,
+};
+
+static struct regulator_desc regulator = {
+ .name = "MAX8952_VOUT",
+ .id = 0,
+ .n_voltages = MAX8952_NUM_DVS_MODE,
+ .ops = &max8952_ops,
+ .type = REGULATOR_VOLTAGE,
+ .owner = THIS_MODULE,
+};
+
+static int __devinit max8952_pmic_probe(struct i2c_client *client,
+ const struct i2c_device_id *i2c_id)
+{
+ struct i2c_adapter *adapter = to_i2c_adapter(client->dev.parent);
+ struct max8952_platform_data *pdata = client->dev.platform_data;
+ struct max8952_data *max8952;
+
+ int ret = 0, err = 0;
+
+ if (!pdata) {
+ dev_err(&client->dev, "Require the platform data\n");
+ return -EINVAL;
+ }
+
+ if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE))
+ return -EIO;
+
+ max8952 = kzalloc(sizeof(struct max8952_data), GFP_KERNEL);
+ if (!max8952)
+ return -ENOMEM;
+
+ max8952->client = client;
+ max8952->dev = &client->dev;
+ max8952->pdata = pdata;
+ mutex_init(&max8952->mutex);
+
+ max8952->rdev = regulator_register(&regulator, max8952->dev,
+ &pdata->reg_data, max8952);
+
+ if (IS_ERR(max8952->rdev)) {
+ ret = PTR_ERR(max8952->rdev);
+ dev_err(max8952->dev, "regulator init failed (%d)\n", ret);
+ goto err_reg;
+ }
+
+ max8952->en = !!(pdata->reg_data.constraints.boot_on);
+ max8952->vid0 = (pdata->default_mode % 2) == 1;
+ max8952->vid1 = ((pdata->default_mode >> 1) % 2) == 1;
+
+ if (gpio_is_valid(pdata->gpio_en)) {
+ if (!gpio_request(pdata->gpio_en, "MAX8952 EN"))
+ gpio_direction_output(pdata->gpio_en, max8952->en);
+ else
+ err = 1;
+ } else
+ err = 2;
+
+ if (err) {
+ dev_info(max8952->dev, "EN gpio invalid: assume that EN"
+ "is always High\n");
+ max8952->en = 1;
+ pdata->gpio_en = -1; /* Mark invalid */
+ }
+
+ err = 0;
+
+ if (gpio_is_valid(pdata->gpio_vid0) &&
+ gpio_is_valid(pdata->gpio_vid1)) {
+ if (!gpio_request(pdata->gpio_vid0, "MAX8952 VID0"))
+ gpio_direction_output(pdata->gpio_vid0,
+ (pdata->default_mode) % 2);
+ else
+ err = 1;
+
+ if (!gpio_request(pdata->gpio_vid1, "MAX8952 VID1"))
+ gpio_direction_output(pdata->gpio_vid1,
+ (pdata->default_mode >> 1) % 2);
+ else {
+ if (!err)
+ gpio_free(pdata->gpio_vid0);
+ err = 2;
+ }
+
+ } else
+ err = 3;
+
+ if (err) {
+ dev_warn(max8952->dev, "VID0/1 gpio invalid: "
+ "DVS not avilable.\n");
+ max8952->vid0 = 0;
+ max8952->vid1 = 0;
+ /* Mark invalid */
+ pdata->gpio_vid0 = -1;
+ pdata->gpio_vid1 = -1;
+
+ /* Disable Pulldown of EN only */
+ max8952_write_reg(max8952, MAX8952_REG_CONTROL, 0x60);
+
+ dev_err(max8952->dev, "DVS modes disabled because VID0 and VID1"
+ " do not have proper controls.\n");
+ } else {
+ /*
+ * Disable Pulldown on EN, VID0, VID1 to reduce
+ * leakage current of MAX8952 assuming that MAX8952
+ * is turned on (EN==1). Note that without having VID0/1
+ * properly connected, turning pulldown off can be
+ * problematic. Thus, turn this off only when they are
+ * controllable by GPIO.
+ */
+ max8952_write_reg(max8952, MAX8952_REG_CONTROL, 0x0);
+ }
+
+ max8952_write_reg(max8952, MAX8952_REG_MODE0,
+ (max8952_read_reg(max8952,
+ MAX8952_REG_MODE0) & 0xC0) |
+ (pdata->dvs_mode[0] & 0x3F));
+ max8952_write_reg(max8952, MAX8952_REG_MODE1,
+ (max8952_read_reg(max8952,
+ MAX8952_REG_MODE1) & 0xC0) |
+ (pdata->dvs_mode[1] & 0x3F));
+ max8952_write_reg(max8952, MAX8952_REG_MODE2,
+ (max8952_read_reg(max8952,
+ MAX8952_REG_MODE2) & 0xC0) |
+ (pdata->dvs_mode[2] & 0x3F));
+ max8952_write_reg(max8952, MAX8952_REG_MODE3,
+ (max8952_read_reg(max8952,
+ MAX8952_REG_MODE3) & 0xC0) |
+ (pdata->dvs_mode[3] & 0x3F));
+
+ max8952_write_reg(max8952, MAX8952_REG_SYNC,
+ (max8952_read_reg(max8952, MAX8952_REG_SYNC) & 0x3F) |
+ ((pdata->sync_freq & 0x3) << 6));
+ max8952_write_reg(max8952, MAX8952_REG_RAMP,
+ (max8952_read_reg(max8952, MAX8952_REG_RAMP) & 0x1F) |
+ ((pdata->ramp_speed & 0x7) << 5));
+
+ i2c_set_clientdata(client, max8952);
+
+ return 0;
+
+err_reg:
+ kfree(max8952);
+ return ret;
+}
+
+static int __devexit max8952_pmic_remove(struct i2c_client *client)
+{
+ struct max8952_data *max8952 = i2c_get_clientdata(client);
+ struct max8952_platform_data *pdata = max8952->pdata;
+ struct regulator_dev *rdev = max8952->rdev;
+
+ regulator_unregister(rdev);
+
+ gpio_free(pdata->gpio_vid0);
+ gpio_free(pdata->gpio_vid1);
+ gpio_free(pdata->gpio_en);
+
+ kfree(max8952);
+ return 0;
+}
+
+static const struct i2c_device_id max8952_ids[] = {
+ { "max8952", 0 },
+ { },
+};
+MODULE_DEVICE_TABLE(i2c, max8952_ids);
+
+static struct i2c_driver max8952_pmic_driver = {
+ .probe = max8952_pmic_probe,
+ .remove = __devexit_p(max8952_pmic_remove),
+ .driver = {
+ .name = "max8952",
+ },
+ .id_table = max8952_ids,
+};
+
+static int __init max8952_pmic_init(void)
+{
+ return i2c_add_driver(&max8952_pmic_driver);
+}
+subsys_initcall(max8952_pmic_init);
+
+static void __exit max8952_pmic_exit(void)
+{
+ i2c_del_driver(&max8952_pmic_driver);
+}
+module_exit(max8952_pmic_exit);
+
+MODULE_DESCRIPTION("MAXIM 8952 voltage regulator driver");
+MODULE_AUTHOR("MyungJoo Ham <myungjoo.ham@samsung.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/regulator/max8998.c b/drivers/regulator/max8998.c
index a1baf1fbe004..5c20756db607 100644
--- a/drivers/regulator/max8998.c
+++ b/drivers/regulator/max8998.c
@@ -39,6 +39,11 @@ struct max8998_data {
struct max8998_dev *iodev;
int num_regulators;
struct regulator_dev **rdev;
+ u8 buck1_vol[4]; /* voltages for selection */
+ u8 buck2_vol[2];
+ unsigned int buck1_idx; /* index to last changed voltage */
+ /* value in a set */
+ unsigned int buck2_idx;
};
struct voltage_map_desc {
@@ -173,6 +178,7 @@ static int max8998_get_enable_register(struct regulator_dev *rdev,
static int max8998_ldo_is_enabled(struct regulator_dev *rdev)
{
struct max8998_data *max8998 = rdev_get_drvdata(rdev);
+ struct i2c_client *i2c = max8998->iodev->i2c;
int ret, reg, shift = 8;
u8 val;
@@ -180,7 +186,7 @@ static int max8998_ldo_is_enabled(struct regulator_dev *rdev)
if (ret)
return ret;
- ret = max8998_read_reg(max8998->iodev, reg, &val);
+ ret = max8998_read_reg(i2c, reg, &val);
if (ret)
return ret;
@@ -190,31 +196,34 @@ static int max8998_ldo_is_enabled(struct regulator_dev *rdev)
static int max8998_ldo_enable(struct regulator_dev *rdev)
{
struct max8998_data *max8998 = rdev_get_drvdata(rdev);
+ struct i2c_client *i2c = max8998->iodev->i2c;
int reg, shift = 8, ret;
ret = max8998_get_enable_register(rdev, &reg, &shift);
if (ret)
return ret;
- return max8998_update_reg(max8998->iodev, reg, 1<<shift, 1<<shift);
+ return max8998_update_reg(i2c, reg, 1<<shift, 1<<shift);
}
static int max8998_ldo_disable(struct regulator_dev *rdev)
{
struct max8998_data *max8998 = rdev_get_drvdata(rdev);
+ struct i2c_client *i2c = max8998->iodev->i2c;
int reg, shift = 8, ret;
ret = max8998_get_enable_register(rdev, &reg, &shift);
if (ret)
return ret;
- return max8998_update_reg(max8998->iodev, reg, 0, 1<<shift);
+ return max8998_update_reg(i2c, reg, 0, 1<<shift);
}
static int max8998_get_voltage_register(struct regulator_dev *rdev,
int *_reg, int *_shift, int *_mask)
{
int ldo = max8998_get_ldo(rdev);
+ struct max8998_data *max8998 = rdev_get_drvdata(rdev);
int reg, shift = 0, mask = 0xff;
switch (ldo) {
@@ -251,10 +260,10 @@ static int max8998_get_voltage_register(struct regulator_dev *rdev,
reg = MAX8998_REG_LDO12 + (ldo - MAX8998_LDO12);
break;
case MAX8998_BUCK1:
- reg = MAX8998_REG_BUCK1_DVSARM1;
+ reg = MAX8998_REG_BUCK1_VOLTAGE1 + max8998->buck1_idx;
break;
case MAX8998_BUCK2:
- reg = MAX8998_REG_BUCK2_DVSINT1;
+ reg = MAX8998_REG_BUCK2_VOLTAGE1 + max8998->buck2_idx;
break;
case MAX8998_BUCK3:
reg = MAX8998_REG_BUCK3;
@@ -276,6 +285,7 @@ static int max8998_get_voltage_register(struct regulator_dev *rdev,
static int max8998_get_voltage(struct regulator_dev *rdev)
{
struct max8998_data *max8998 = rdev_get_drvdata(rdev);
+ struct i2c_client *i2c = max8998->iodev->i2c;
int reg, shift = 0, mask, ret;
u8 val;
@@ -283,7 +293,7 @@ static int max8998_get_voltage(struct regulator_dev *rdev)
if (ret)
return ret;
- ret = max8998_read_reg(max8998->iodev, reg, &val);
+ ret = max8998_read_reg(i2c, reg, &val);
if (ret)
return ret;
@@ -293,18 +303,16 @@ static int max8998_get_voltage(struct regulator_dev *rdev)
return max8998_list_voltage(rdev, val);
}
-static int max8998_set_voltage(struct regulator_dev *rdev,
+static int max8998_set_voltage_ldo(struct regulator_dev *rdev,
int min_uV, int max_uV)
{
struct max8998_data *max8998 = rdev_get_drvdata(rdev);
+ struct i2c_client *i2c = max8998->iodev->i2c;
int min_vol = min_uV / 1000, max_vol = max_uV / 1000;
- int previous_vol = 0;
const struct voltage_map_desc *desc;
int ldo = max8998_get_ldo(rdev);
int reg, shift = 0, mask, ret;
int i = 0;
- u8 val;
- bool en_ramp = false;
if (ldo >= ARRAY_SIZE(ldo_voltage_map))
return -EINVAL;
@@ -327,24 +335,155 @@ static int max8998_set_voltage(struct regulator_dev *rdev,
if (ret)
return ret;
- /* wait for RAMP_UP_DELAY if rdev is BUCK1/2 and
- * ENRAMP is ON */
- if (ldo == MAX8998_BUCK1 || ldo == MAX8998_BUCK2) {
- max8998_read_reg(max8998->iodev, MAX8998_REG_ONOFF4, &val);
- if (val & (1 << 4)) {
- en_ramp = true;
- previous_vol = max8998_get_voltage(rdev);
- }
+ ret = max8998_update_reg(i2c, reg, i<<shift, mask<<shift);
+
+ return ret;
+}
+
+static inline void buck1_gpio_set(int gpio1, int gpio2, int v)
+{
+ gpio_set_value(gpio1, v & 0x1);
+ gpio_set_value(gpio2, (v >> 1) & 0x1);
+}
+
+static inline void buck2_gpio_set(int gpio, int v)
+{
+ gpio_set_value(gpio, v & 0x1);
+}
+
+static int max8998_set_voltage_buck(struct regulator_dev *rdev,
+ int min_uV, int max_uV)
+{
+ struct max8998_data *max8998 = rdev_get_drvdata(rdev);
+ struct max8998_platform_data *pdata =
+ dev_get_platdata(max8998->iodev->dev);
+ struct i2c_client *i2c = max8998->iodev->i2c;
+ int min_vol = min_uV / 1000, max_vol = max_uV / 1000;
+ const struct voltage_map_desc *desc;
+ int buck = max8998_get_ldo(rdev);
+ int reg, shift = 0, mask, ret;
+ int difference = 0, i = 0, j = 0, previous_vol = 0;
+ u8 val = 0;
+ static u8 buck1_last_val;
+
+ if (buck >= ARRAY_SIZE(ldo_voltage_map))
+ return -EINVAL;
+
+ desc = ldo_voltage_map[buck];
+
+ if (desc == NULL)
+ return -EINVAL;
+
+ if (max_vol < desc->min || min_vol > desc->max)
+ return -EINVAL;
+
+ while (desc->min + desc->step*i < min_vol &&
+ desc->min + desc->step*i < desc->max)
+ i++;
+
+ if (desc->min + desc->step*i > max_vol)
+ return -EINVAL;
+
+ ret = max8998_get_voltage_register(rdev, &reg, &shift, &mask);
+ if (ret)
+ return ret;
+
+ previous_vol = max8998_get_voltage(rdev);
+
+ /* Check if voltage needs to be changed */
+ /* if previous_voltage equal new voltage, return */
+ if (previous_vol == max8998_list_voltage(rdev, i)) {
+ dev_dbg(max8998->dev, "No voltage change, old:%d, new:%d\n",
+ previous_vol, max8998_list_voltage(rdev, i));
+ return ret;
}
- ret = max8998_update_reg(max8998->iodev, reg, i<<shift, mask<<shift);
+ switch (buck) {
+ case MAX8998_BUCK1:
+ dev_dbg(max8998->dev,
+ "BUCK1, i:%d, buck1_vol1:%d, buck1_vol2:%d\n\
+ buck1_vol3:%d, buck1_vol4:%d\n",
+ i, max8998->buck1_vol[0], max8998->buck1_vol[1],
+ max8998->buck1_vol[2], max8998->buck1_vol[3]);
+
+ if (gpio_is_valid(pdata->buck1_set1) &&
+ gpio_is_valid(pdata->buck1_set2)) {
+
+ /* check if requested voltage */
+ /* value is already defined */
+ for (j = 0; j < ARRAY_SIZE(max8998->buck1_vol); j++) {
+ if (max8998->buck1_vol[j] == i) {
+ max8998->buck1_idx = j;
+ buck1_gpio_set(pdata->buck1_set1,
+ pdata->buck1_set2, j);
+ goto buck1_exit;
+ }
+ }
+
+ /* no predefine regulator found */
+ max8998->buck1_idx = (buck1_last_val % 2) + 2;
+ dev_dbg(max8998->dev, "max8998->buck1_idx:%d\n",
+ max8998->buck1_idx);
+ max8998->buck1_vol[max8998->buck1_idx] = i;
+ ret = max8998_get_voltage_register(rdev, &reg,
+ &shift,
+ &mask);
+ ret = max8998_write_reg(i2c, reg, i);
+ buck1_gpio_set(pdata->buck1_set1,
+ pdata->buck1_set2, max8998->buck1_idx);
+ buck1_last_val++;
+buck1_exit:
+ dev_dbg(max8998->dev, "%s: SET1:%d, SET2:%d\n",
+ i2c->name, gpio_get_value(pdata->buck1_set1),
+ gpio_get_value(pdata->buck1_set2));
+ break;
+ } else {
+ ret = max8998_write_reg(i2c, reg, i);
+ }
+ break;
+
+ case MAX8998_BUCK2:
+ dev_dbg(max8998->dev,
+ "BUCK2, i:%d buck2_vol1:%d, buck2_vol2:%d\n"
+ , i, max8998->buck2_vol[0], max8998->buck2_vol[1]);
+ if (gpio_is_valid(pdata->buck2_set3)) {
+ if (max8998->buck2_vol[0] == i) {
+ max8998->buck1_idx = 0;
+ buck2_gpio_set(pdata->buck2_set3, 0);
+ } else {
+ max8998->buck1_idx = 1;
+ ret = max8998_get_voltage_register(rdev, &reg,
+ &shift,
+ &mask);
+ ret = max8998_write_reg(i2c, reg, i);
+ max8998->buck2_vol[1] = i;
+ buck2_gpio_set(pdata->buck2_set3, 1);
+ }
+ dev_dbg(max8998->dev, "%s: SET3:%d\n", i2c->name,
+ gpio_get_value(pdata->buck2_set3));
+ } else {
+ ret = max8998_write_reg(i2c, reg, i);
+ }
+ break;
- if (en_ramp == true) {
- int difference = desc->min + desc->step*i - previous_vol/1000;
- if (difference > 0)
- udelay(difference / ((val & 0x0f) + 1));
+ case MAX8998_BUCK3:
+ case MAX8998_BUCK4:
+ ret = max8998_update_reg(i2c, reg, i<<shift, mask<<shift);
+ break;
}
+ /* Voltage stabilization */
+ max8998_read_reg(i2c, MAX8998_REG_ONOFF4, &val);
+
+ /* lp3974 hasn't got ENRAMP bit - ramp is assumed as true */
+ /* MAX8998 has ENRAMP bit implemented, so test it*/
+ if (max8998->iodev->type == TYPE_MAX8998 && !(val & MAX8998_ENRAMP))
+ return ret;
+
+ difference = desc->min + desc->step*i - previous_vol/1000;
+ if (difference > 0)
+ udelay(difference / ((val & 0x0f) + 1));
+
return ret;
}
@@ -354,7 +493,7 @@ static struct regulator_ops max8998_ldo_ops = {
.enable = max8998_ldo_enable,
.disable = max8998_ldo_disable,
.get_voltage = max8998_get_voltage,
- .set_voltage = max8998_set_voltage,
+ .set_voltage = max8998_set_voltage_ldo,
.set_suspend_enable = max8998_ldo_enable,
.set_suspend_disable = max8998_ldo_disable,
};
@@ -365,7 +504,7 @@ static struct regulator_ops max8998_buck_ops = {
.enable = max8998_ldo_enable,
.disable = max8998_ldo_disable,
.get_voltage = max8998_get_voltage,
- .set_voltage = max8998_set_voltage,
+ .set_voltage = max8998_set_voltage_buck,
.set_suspend_enable = max8998_ldo_enable,
.set_suspend_disable = max8998_ldo_disable,
};
@@ -538,6 +677,7 @@ static __devinit int max8998_pmic_probe(struct platform_device *pdev)
struct max8998_platform_data *pdata = dev_get_platdata(iodev->dev);
struct regulator_dev **rdev;
struct max8998_data *max8998;
+ struct i2c_client *i2c;
int i, ret, size;
if (!pdata) {
@@ -561,6 +701,86 @@ static __devinit int max8998_pmic_probe(struct platform_device *pdev)
max8998->iodev = iodev;
max8998->num_regulators = pdata->num_regulators;
platform_set_drvdata(pdev, max8998);
+ i2c = max8998->iodev->i2c;
+
+ /* NOTE: */
+ /* For unused GPIO NOT marked as -1 (thereof equal to 0) WARN_ON */
+ /* will be displayed */
+
+ /* Check if MAX8998 voltage selection GPIOs are defined */
+ if (gpio_is_valid(pdata->buck1_set1) &&
+ gpio_is_valid(pdata->buck1_set2)) {
+ /* Check if SET1 is not equal to 0 */
+ if (!pdata->buck1_set1) {
+ printk(KERN_ERR "MAX8998 SET1 GPIO defined as 0 !\n");
+ WARN_ON(!pdata->buck1_set1);
+ return -EIO;
+ }
+ /* Check if SET2 is not equal to 0 */
+ if (!pdata->buck1_set2) {
+ printk(KERN_ERR "MAX8998 SET2 GPIO defined as 0 !\n");
+ WARN_ON(!pdata->buck1_set2);
+ return -EIO;
+ }
+
+ gpio_request(pdata->buck1_set1, "MAX8998 BUCK1_SET1");
+ gpio_direction_output(pdata->buck1_set1,
+ max8998->buck1_idx & 0x1);
+
+
+ gpio_request(pdata->buck1_set2, "MAX8998 BUCK1_SET2");
+ gpio_direction_output(pdata->buck1_set2,
+ (max8998->buck1_idx >> 1) & 0x1);
+ /* Set predefined value for BUCK1 register 1 */
+ i = 0;
+ while (buck12_voltage_map_desc.min +
+ buck12_voltage_map_desc.step*i
+ != (pdata->buck1_max_voltage1 / 1000))
+ i++;
+ printk(KERN_ERR "i:%d, buck1_idx:%d\n", i, max8998->buck1_idx);
+ max8998->buck1_vol[0] = i;
+ ret = max8998_write_reg(i2c, MAX8998_REG_BUCK1_VOLTAGE1, i);
+
+ /* Set predefined value for BUCK1 register 2 */
+ i = 0;
+ while (buck12_voltage_map_desc.min +
+ buck12_voltage_map_desc.step*i
+ != (pdata->buck1_max_voltage2 / 1000))
+ i++;
+
+ max8998->buck1_vol[1] = i;
+ printk(KERN_ERR "i:%d, buck1_idx:%d\n", i, max8998->buck1_idx);
+ ret = max8998_write_reg(i2c, MAX8998_REG_BUCK1_VOLTAGE2, i)
+ + ret;
+ if (ret)
+ return ret;
+
+ }
+
+ if (gpio_is_valid(pdata->buck2_set3)) {
+ /* Check if SET3 is not equal to 0 */
+ if (!pdata->buck2_set3) {
+ printk(KERN_ERR "MAX8998 SET3 GPIO defined as 0 !\n");
+ WARN_ON(!pdata->buck2_set3);
+ return -EIO;
+ }
+ gpio_request(pdata->buck2_set3, "MAX8998 BUCK2_SET3");
+ gpio_direction_output(pdata->buck2_set3,
+ max8998->buck2_idx & 0x1);
+
+ /* BUCK2 - set preset default voltage value to buck2_vol[0] */
+ i = 0;
+ while (buck12_voltage_map_desc.min +
+ buck12_voltage_map_desc.step*i
+ != (pdata->buck2_max_voltage / 1000))
+ i++;
+ printk(KERN_ERR "i:%d, buck2_idx:%d\n", i, max8998->buck2_idx);
+ max8998->buck2_vol[0] = i;
+ ret = max8998_write_reg(i2c, MAX8998_REG_BUCK2_VOLTAGE1, i);
+ if (ret)
+ return ret;
+
+ }
for (i = 0; i < pdata->num_regulators; i++) {
const struct voltage_map_desc *desc;
diff --git a/drivers/regulator/mc13783-regulator.c b/drivers/regulator/mc13783-regulator.c
index 4597d508a229..ecd99f59dba8 100644
--- a/drivers/regulator/mc13783-regulator.c
+++ b/drivers/regulator/mc13783-regulator.c
@@ -465,8 +465,8 @@ static struct regulator_ops mc13783_fixed_regulator_ops = {
.get_voltage = mc13783_fixed_regulator_get_voltage,
};
-int mc13783_powermisc_rmw(struct mc13783_regulator_priv *priv, u32 mask,
- u32 val)
+static int mc13783_powermisc_rmw(struct mc13783_regulator_priv *priv, u32 mask,
+ u32 val)
{
struct mc13783 *mc13783 = priv->mc13783;
int ret;
diff --git a/drivers/regulator/twl-regulator.c b/drivers/regulator/twl-regulator.c
index 7e5892efc437..a57262a4fa6c 100644
--- a/drivers/regulator/twl-regulator.c
+++ b/drivers/regulator/twl-regulator.c
@@ -219,12 +219,12 @@ static int twlreg_set_mode(struct regulator_dev *rdev, unsigned mode)
return -EACCES;
status = twl_i2c_write_u8(TWL_MODULE_PM_MASTER,
- message >> 8, 0x15 /* PB_WORD_MSB */ );
- if (status >= 0)
+ message >> 8, TWL4030_PM_MASTER_PB_WORD_MSB);
+ if (status < 0)
return status;
return twl_i2c_write_u8(TWL_MODULE_PM_MASTER,
- message, 0x16 /* PB_WORD_LSB */ );
+ message & 0xff, TWL4030_PM_MASTER_PB_WORD_LSB);
}
/*----------------------------------------------------------------------*/
diff --git a/drivers/rtc/Kconfig b/drivers/rtc/Kconfig
index 48ca7132cc05..2883428d5ac8 100644
--- a/drivers/rtc/Kconfig
+++ b/drivers/rtc/Kconfig
@@ -171,7 +171,8 @@ config RTC_DRV_DS3232
depends on RTC_CLASS && I2C
help
If you say yes here you get support for Dallas Semiconductor
- DS3232 real-time clock chips.
+ DS3232 real-time clock chips. If an interrupt is associated
+ with the device, the alarm functionality is supported.
This driver can also be built as a module. If so, the module
will be called rtc-ds3232.
@@ -195,6 +196,16 @@ config RTC_DRV_MAX8925
This driver can also be built as a module. If so, the module
will be called rtc-max8925.
+config RTC_DRV_MAX8998
+ tristate "Maxim MAX8998"
+ depends on MFD_MAX8998
+ help
+ If you say yes here you will get support for the
+ RTC of Maxim MAX8998 PMIC.
+
+ This driver can also be built as a module. If so, the module
+ will be called rtc-max8998.
+
config RTC_DRV_RS5C372
tristate "Ricoh R2025S/D, RS5C372A/B, RV5C386, RV5C387A"
help
@@ -765,15 +776,15 @@ config RTC_DRV_AT32AP700X
AT32AP700x family processors.
config RTC_DRV_AT91RM9200
- tristate "AT91RM9200 or AT91SAM9RL"
- depends on ARCH_AT91RM9200 || ARCH_AT91SAM9RL
+ tristate "AT91RM9200 or some AT91SAM9 RTC"
+ depends on ARCH_AT91RM9200 || ARCH_AT91SAM9RL || ARCH_AT91SAM9G45
help
Driver for the internal RTC (Realtime Clock) module found on
- Atmel AT91RM9200's and AT91SAM9RL chips. On SAM9RL chips
+ Atmel AT91RM9200's and some AT91SAM9 chips. On AT91SAM9 chips
this is powered by the backup power supply.
config RTC_DRV_AT91SAM9
- tristate "AT91SAM9x/AT91CAP9"
+ tristate "AT91SAM9x/AT91CAP9 RTT as RTC"
depends on ARCH_AT91 && !(ARCH_AT91RM9200 || ARCH_AT91X40)
help
RTC driver for the Atmel AT91SAM9x and AT91CAP9 internal RTT
@@ -781,8 +792,8 @@ config RTC_DRV_AT91SAM9
supply (such as a small coin cell battery), but do not need to
be used as RTCs.
- (On AT91SAM9rl chips you probably want to use the dedicated RTC
- module and leave the RTT available for other uses.)
+ (On AT91SAM9rl and AT91SAM9G45 chips you probably want to use the
+ dedicated RTC module and leave the RTT available for other uses.)
config RTC_DRV_AT91SAM9_RTT
int
@@ -925,11 +936,12 @@ config RTC_DRV_PCAP
If you say Y here you will get support for the RTC found on
the PCAP2 ASIC used on some Motorola phones.
-config RTC_DRV_MC13783
- depends on MFD_MC13783
- tristate "Freescale MC13783 RTC"
+config RTC_DRV_MC13XXX
+ depends on MFD_MC13XXX
+ tristate "Freescale MC13xxx RTC"
help
- This enables support for the Freescale MC13783 PMIC RTC
+ This enables support for the RTCs found on Freescale's PMICs
+ MC13783 and MC13892.
config RTC_DRV_MPC5121
tristate "Freescale MPC5121 built-in RTC"
@@ -952,4 +964,13 @@ config RTC_DRV_JZ4740
This driver can also be buillt as a module. If so, the module
will be called rtc-jz4740.
+config RTC_DRV_LPC32XX
+ depends on ARCH_LPC32XX
+ tristate "NXP LPC32XX RTC"
+ help
+ This enables support for the NXP RTC in the LPC32XX
+
+ This driver can also be buillt as a module. If so, the module
+ will be called rtc-lpc32xx.
+
endif # RTC_CLASS
diff --git a/drivers/rtc/Makefile b/drivers/rtc/Makefile
index 0f207b3b5833..4c2832df4697 100644
--- a/drivers/rtc/Makefile
+++ b/drivers/rtc/Makefile
@@ -51,6 +51,7 @@ obj-$(CONFIG_RTC_DRV_IMXDI) += rtc-imxdi.o
obj-$(CONFIG_RTC_DRV_ISL1208) += rtc-isl1208.o
obj-$(CONFIG_RTC_DRV_ISL12022) += rtc-isl12022.o
obj-$(CONFIG_RTC_DRV_JZ4740) += rtc-jz4740.o
+obj-$(CONFIG_RTC_DRV_LPC32XX) += rtc-lpc32xx.o
obj-$(CONFIG_RTC_DRV_M41T80) += rtc-m41t80.o
obj-$(CONFIG_RTC_DRV_M41T94) += rtc-m41t94.o
obj-$(CONFIG_RTC_DRV_M48T35) += rtc-m48t35.o
@@ -59,8 +60,9 @@ obj-$(CONFIG_RTC_DRV_M48T86) += rtc-m48t86.o
obj-$(CONFIG_RTC_MXC) += rtc-mxc.o
obj-$(CONFIG_RTC_DRV_MAX6900) += rtc-max6900.o
obj-$(CONFIG_RTC_DRV_MAX8925) += rtc-max8925.o
+obj-$(CONFIG_RTC_DRV_MAX8998) += rtc-max8998.o
obj-$(CONFIG_RTC_DRV_MAX6902) += rtc-max6902.o
-obj-$(CONFIG_RTC_DRV_MC13783) += rtc-mc13783.o
+obj-$(CONFIG_RTC_DRV_MC13XXX) += rtc-mc13xxx.o
obj-$(CONFIG_RTC_DRV_MSM6242) += rtc-msm6242.o
obj-$(CONFIG_RTC_DRV_MPC5121) += rtc-mpc5121.o
obj-$(CONFIG_RTC_DRV_MV) += rtc-mv.o
diff --git a/drivers/rtc/class.c b/drivers/rtc/class.c
index 565562ba6ac9..e6539cbabb35 100644
--- a/drivers/rtc/class.c
+++ b/drivers/rtc/class.c
@@ -158,8 +158,10 @@ struct rtc_device *rtc_device_register(const char *name, struct device *dev,
rtc_dev_prepare(rtc);
err = device_register(&rtc->dev);
- if (err)
+ if (err) {
+ put_device(&rtc->dev);
goto exit_kfree;
+ }
rtc_dev_add_device(rtc);
rtc_sysfs_add_device(rtc);
diff --git a/drivers/rtc/rtc-ab8500.c b/drivers/rtc/rtc-ab8500.c
index 2fda03125e55..e346705aae92 100644
--- a/drivers/rtc/rtc-ab8500.c
+++ b/drivers/rtc/rtc-ab8500.c
@@ -14,26 +14,26 @@
#include <linux/init.h>
#include <linux/platform_device.h>
#include <linux/rtc.h>
+#include <linux/mfd/abx500.h>
#include <linux/mfd/ab8500.h>
#include <linux/delay.h>
-#define AB8500_RTC_SOFF_STAT_REG 0x0F00
-#define AB8500_RTC_CC_CONF_REG 0x0F01
-#define AB8500_RTC_READ_REQ_REG 0x0F02
-#define AB8500_RTC_WATCH_TSECMID_REG 0x0F03
-#define AB8500_RTC_WATCH_TSECHI_REG 0x0F04
-#define AB8500_RTC_WATCH_TMIN_LOW_REG 0x0F05
-#define AB8500_RTC_WATCH_TMIN_MID_REG 0x0F06
-#define AB8500_RTC_WATCH_TMIN_HI_REG 0x0F07
-#define AB8500_RTC_ALRM_MIN_LOW_REG 0x0F08
-#define AB8500_RTC_ALRM_MIN_MID_REG 0x0F09
-#define AB8500_RTC_ALRM_MIN_HI_REG 0x0F0A
-#define AB8500_RTC_STAT_REG 0x0F0B
-#define AB8500_RTC_BKUP_CHG_REG 0x0F0C
-#define AB8500_RTC_FORCE_BKUP_REG 0x0F0D
-#define AB8500_RTC_CALIB_REG 0x0F0E
-#define AB8500_RTC_SWITCH_STAT_REG 0x0F0F
-#define AB8500_REV_REG 0x1080
+#define AB8500_RTC_SOFF_STAT_REG 0x00
+#define AB8500_RTC_CC_CONF_REG 0x01
+#define AB8500_RTC_READ_REQ_REG 0x02
+#define AB8500_RTC_WATCH_TSECMID_REG 0x03
+#define AB8500_RTC_WATCH_TSECHI_REG 0x04
+#define AB8500_RTC_WATCH_TMIN_LOW_REG 0x05
+#define AB8500_RTC_WATCH_TMIN_MID_REG 0x06
+#define AB8500_RTC_WATCH_TMIN_HI_REG 0x07
+#define AB8500_RTC_ALRM_MIN_LOW_REG 0x08
+#define AB8500_RTC_ALRM_MIN_MID_REG 0x09
+#define AB8500_RTC_ALRM_MIN_HI_REG 0x0A
+#define AB8500_RTC_STAT_REG 0x0B
+#define AB8500_RTC_BKUP_CHG_REG 0x0C
+#define AB8500_RTC_FORCE_BKUP_REG 0x0D
+#define AB8500_RTC_CALIB_REG 0x0E
+#define AB8500_RTC_SWITCH_STAT_REG 0x0F
/* RtcReadRequest bits */
#define RTC_READ_REQUEST 0x01
@@ -46,13 +46,13 @@
#define COUNTS_PER_SEC (0xF000 / 60)
#define AB8500_RTC_EPOCH 2000
-static const unsigned long ab8500_rtc_time_regs[] = {
+static const u8 ab8500_rtc_time_regs[] = {
AB8500_RTC_WATCH_TMIN_HI_REG, AB8500_RTC_WATCH_TMIN_MID_REG,
AB8500_RTC_WATCH_TMIN_LOW_REG, AB8500_RTC_WATCH_TSECHI_REG,
AB8500_RTC_WATCH_TSECMID_REG
};
-static const unsigned long ab8500_rtc_alarm_regs[] = {
+static const u8 ab8500_rtc_alarm_regs[] = {
AB8500_RTC_ALRM_MIN_HI_REG, AB8500_RTC_ALRM_MIN_MID_REG,
AB8500_RTC_ALRM_MIN_LOW_REG
};
@@ -76,29 +76,30 @@ static unsigned long get_elapsed_seconds(int year)
static int ab8500_rtc_read_time(struct device *dev, struct rtc_time *tm)
{
- struct ab8500 *ab8500 = dev_get_drvdata(dev->parent);
unsigned long timeout = jiffies + HZ;
int retval, i;
unsigned long mins, secs;
unsigned char buf[ARRAY_SIZE(ab8500_rtc_time_regs)];
+ u8 value;
/* Request a data read */
- retval = ab8500_write(ab8500, AB8500_RTC_READ_REQ_REG,
- RTC_READ_REQUEST);
+ retval = abx500_set_register_interruptible(dev,
+ AB8500_RTC, AB8500_RTC_READ_REQ_REG, RTC_READ_REQUEST);
if (retval < 0)
return retval;
/* Early AB8500 chips will not clear the rtc read request bit */
- if (ab8500->revision == 0) {
+ if (abx500_get_chip_id(dev) == 0) {
msleep(1);
} else {
/* Wait for some cycles after enabling the rtc read in ab8500 */
while (time_before(jiffies, timeout)) {
- retval = ab8500_read(ab8500, AB8500_RTC_READ_REQ_REG);
+ retval = abx500_get_register_interruptible(dev,
+ AB8500_RTC, AB8500_RTC_READ_REQ_REG, &value);
if (retval < 0)
return retval;
- if (!(retval & RTC_READ_REQUEST))
+ if (!(value & RTC_READ_REQUEST))
break;
msleep(1);
@@ -107,10 +108,11 @@ static int ab8500_rtc_read_time(struct device *dev, struct rtc_time *tm)
/* Read the Watchtime registers */
for (i = 0; i < ARRAY_SIZE(ab8500_rtc_time_regs); i++) {
- retval = ab8500_read(ab8500, ab8500_rtc_time_regs[i]);
+ retval = abx500_get_register_interruptible(dev,
+ AB8500_RTC, ab8500_rtc_time_regs[i], &value);
if (retval < 0)
return retval;
- buf[i] = retval;
+ buf[i] = value;
}
mins = (buf[0] << 16) | (buf[1] << 8) | buf[2];
@@ -128,7 +130,6 @@ static int ab8500_rtc_read_time(struct device *dev, struct rtc_time *tm)
static int ab8500_rtc_set_time(struct device *dev, struct rtc_time *tm)
{
- struct ab8500 *ab8500 = dev_get_drvdata(dev->parent);
int retval, i;
unsigned char buf[ARRAY_SIZE(ab8500_rtc_time_regs)];
unsigned long no_secs, no_mins, secs = 0;
@@ -162,27 +163,29 @@ static int ab8500_rtc_set_time(struct device *dev, struct rtc_time *tm)
buf[0] = (no_mins >> 16) & 0xFF;
for (i = 0; i < ARRAY_SIZE(ab8500_rtc_time_regs); i++) {
- retval = ab8500_write(ab8500, ab8500_rtc_time_regs[i], buf[i]);
+ retval = abx500_set_register_interruptible(dev, AB8500_RTC,
+ ab8500_rtc_time_regs[i], buf[i]);
if (retval < 0)
return retval;
}
/* Request a data write */
- return ab8500_write(ab8500, AB8500_RTC_READ_REQ_REG, RTC_WRITE_REQUEST);
+ return abx500_set_register_interruptible(dev, AB8500_RTC,
+ AB8500_RTC_READ_REQ_REG, RTC_WRITE_REQUEST);
}
static int ab8500_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alarm)
{
- struct ab8500 *ab8500 = dev_get_drvdata(dev->parent);
int retval, i;
- int rtc_ctrl;
+ u8 rtc_ctrl, value;
unsigned char buf[ARRAY_SIZE(ab8500_rtc_alarm_regs)];
unsigned long secs, mins;
/* Check if the alarm is enabled or not */
- rtc_ctrl = ab8500_read(ab8500, AB8500_RTC_STAT_REG);
- if (rtc_ctrl < 0)
- return rtc_ctrl;
+ retval = abx500_get_register_interruptible(dev, AB8500_RTC,
+ AB8500_RTC_STAT_REG, &rtc_ctrl);
+ if (retval < 0)
+ return retval;
if (rtc_ctrl & RTC_ALARM_ENA)
alarm->enabled = 1;
@@ -192,10 +195,11 @@ static int ab8500_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alarm)
alarm->pending = 0;
for (i = 0; i < ARRAY_SIZE(ab8500_rtc_alarm_regs); i++) {
- retval = ab8500_read(ab8500, ab8500_rtc_alarm_regs[i]);
+ retval = abx500_get_register_interruptible(dev, AB8500_RTC,
+ ab8500_rtc_alarm_regs[i], &value);
if (retval < 0)
return retval;
- buf[i] = retval;
+ buf[i] = value;
}
mins = (buf[0] << 16) | (buf[1] << 8) | (buf[2]);
@@ -211,15 +215,13 @@ static int ab8500_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alarm)
static int ab8500_rtc_irq_enable(struct device *dev, unsigned int enabled)
{
- struct ab8500 *ab8500 = dev_get_drvdata(dev->parent);
-
- return ab8500_set_bits(ab8500, AB8500_RTC_STAT_REG, RTC_ALARM_ENA,
- enabled ? RTC_ALARM_ENA : 0);
+ return abx500_mask_and_set_register_interruptible(dev, AB8500_RTC,
+ AB8500_RTC_STAT_REG, RTC_ALARM_ENA,
+ enabled ? RTC_ALARM_ENA : 0);
}
static int ab8500_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alarm)
{
- struct ab8500 *ab8500 = dev_get_drvdata(dev->parent);
int retval, i;
unsigned char buf[ARRAY_SIZE(ab8500_rtc_alarm_regs)];
unsigned long mins, secs = 0;
@@ -247,7 +249,8 @@ static int ab8500_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alarm)
/* Set the alarm time */
for (i = 0; i < ARRAY_SIZE(ab8500_rtc_alarm_regs); i++) {
- retval = ab8500_write(ab8500, ab8500_rtc_alarm_regs[i], buf[i]);
+ retval = abx500_set_register_interruptible(dev, AB8500_RTC,
+ ab8500_rtc_alarm_regs[i], buf[i]);
if (retval < 0)
return retval;
}
@@ -276,10 +279,9 @@ static const struct rtc_class_ops ab8500_rtc_ops = {
static int __devinit ab8500_rtc_probe(struct platform_device *pdev)
{
- struct ab8500 *ab8500 = dev_get_drvdata(pdev->dev.parent);
int err;
struct rtc_device *rtc;
- int rtc_ctrl;
+ u8 rtc_ctrl;
int irq;
irq = platform_get_irq_byname(pdev, "ALARM");
@@ -287,17 +289,18 @@ static int __devinit ab8500_rtc_probe(struct platform_device *pdev)
return irq;
/* For RTC supply test */
- err = ab8500_set_bits(ab8500, AB8500_RTC_STAT_REG, RTC_STATUS_DATA,
- RTC_STATUS_DATA);
+ err = abx500_mask_and_set_register_interruptible(&pdev->dev, AB8500_RTC,
+ AB8500_RTC_STAT_REG, RTC_STATUS_DATA, RTC_STATUS_DATA);
if (err < 0)
return err;
/* Wait for reset by the PorRtc */
msleep(1);
- rtc_ctrl = ab8500_read(ab8500, AB8500_RTC_STAT_REG);
- if (rtc_ctrl < 0)
- return rtc_ctrl;
+ err = abx500_get_register_interruptible(&pdev->dev, AB8500_RTC,
+ AB8500_RTC_STAT_REG, &rtc_ctrl);
+ if (err < 0)
+ return err;
/* Check if the RTC Supply fails */
if (!(rtc_ctrl & RTC_STATUS_DATA)) {
diff --git a/drivers/rtc/rtc-bfin.c b/drivers/rtc/rtc-bfin.c
index d4fb82d85e9b..b4b6087f2234 100644
--- a/drivers/rtc/rtc-bfin.c
+++ b/drivers/rtc/rtc-bfin.c
@@ -2,7 +2,7 @@
* Blackfin On-Chip Real Time Clock Driver
* Supports BF51x/BF52x/BF53[123]/BF53[467]/BF54x
*
- * Copyright 2004-2009 Analog Devices Inc.
+ * Copyright 2004-2010 Analog Devices Inc.
*
* Enter bugs at http://blackfin.uclinux.org/
*
@@ -183,29 +183,33 @@ static irqreturn_t bfin_rtc_interrupt(int irq, void *dev_id)
struct bfin_rtc *rtc = dev_get_drvdata(dev);
unsigned long events = 0;
bool write_complete = false;
- u16 rtc_istat, rtc_ictl;
+ u16 rtc_istat, rtc_istat_clear, rtc_ictl, bits;
dev_dbg_stamp(dev);
rtc_istat = bfin_read_RTC_ISTAT();
rtc_ictl = bfin_read_RTC_ICTL();
+ rtc_istat_clear = 0;
- if (rtc_istat & RTC_ISTAT_WRITE_COMPLETE) {
- bfin_write_RTC_ISTAT(RTC_ISTAT_WRITE_COMPLETE);
+ bits = RTC_ISTAT_WRITE_COMPLETE;
+ if (rtc_istat & bits) {
+ rtc_istat_clear |= bits;
write_complete = true;
complete(&bfin_write_complete);
}
- if (rtc_ictl & (RTC_ISTAT_ALARM | RTC_ISTAT_ALARM_DAY)) {
- if (rtc_istat & (RTC_ISTAT_ALARM | RTC_ISTAT_ALARM_DAY)) {
- bfin_write_RTC_ISTAT(RTC_ISTAT_ALARM | RTC_ISTAT_ALARM_DAY);
+ bits = (RTC_ISTAT_ALARM | RTC_ISTAT_ALARM_DAY);
+ if (rtc_ictl & bits) {
+ if (rtc_istat & bits) {
+ rtc_istat_clear |= bits;
events |= RTC_AF | RTC_IRQF;
}
}
- if (rtc_ictl & RTC_ISTAT_SEC) {
- if (rtc_istat & RTC_ISTAT_SEC) {
- bfin_write_RTC_ISTAT(RTC_ISTAT_SEC);
+ bits = RTC_ISTAT_SEC;
+ if (rtc_ictl & bits) {
+ if (rtc_istat & bits) {
+ rtc_istat_clear |= bits;
events |= RTC_UF | RTC_IRQF;
}
}
@@ -213,9 +217,10 @@ static irqreturn_t bfin_rtc_interrupt(int irq, void *dev_id)
if (events)
rtc_update_irq(rtc->rtc_dev, 1, events);
- if (write_complete || events)
+ if (write_complete || events) {
+ bfin_write_RTC_ISTAT(rtc_istat_clear);
return IRQ_HANDLED;
- else
+ } else
return IRQ_NONE;
}
@@ -422,9 +427,13 @@ static int __devexit bfin_rtc_remove(struct platform_device *pdev)
#ifdef CONFIG_PM
static int bfin_rtc_suspend(struct platform_device *pdev, pm_message_t state)
{
- if (device_may_wakeup(&pdev->dev)) {
+ struct device *dev = &pdev->dev;
+
+ dev_dbg_stamp(dev);
+
+ if (device_may_wakeup(dev)) {
enable_irq_wake(IRQ_RTC);
- bfin_rtc_sync_pending(&pdev->dev);
+ bfin_rtc_sync_pending(dev);
} else
bfin_rtc_int_clear(0);
@@ -433,7 +442,11 @@ static int bfin_rtc_suspend(struct platform_device *pdev, pm_message_t state)
static int bfin_rtc_resume(struct platform_device *pdev)
{
- if (device_may_wakeup(&pdev->dev))
+ struct device *dev = &pdev->dev;
+
+ dev_dbg_stamp(dev);
+
+ if (device_may_wakeup(dev))
disable_irq_wake(IRQ_RTC);
/*
diff --git a/drivers/rtc/rtc-ds1302.c b/drivers/rtc/rtc-ds1302.c
index 359d1e04626c..f0d638922644 100644
--- a/drivers/rtc/rtc-ds1302.c
+++ b/drivers/rtc/rtc-ds1302.c
@@ -35,7 +35,7 @@
#ifdef CONFIG_SH_SECUREEDGE5410
#include <asm/rtc.h>
-#include <mach/snapgear.h>
+#include <mach/secureedge5410.h>
#define RTC_RESET 0x1000
#define RTC_IODATA 0x0800
diff --git a/drivers/rtc/rtc-ds3232.c b/drivers/rtc/rtc-ds3232.c
index 9de8516e3531..57063552d3b7 100644
--- a/drivers/rtc/rtc-ds3232.c
+++ b/drivers/rtc/rtc-ds3232.c
@@ -2,6 +2,7 @@
* RTC client/driver for the Maxim/Dallas DS3232 Real-Time Clock over I2C
*
* Copyright (C) 2009-2010 Freescale Semiconductor.
+ * Author: Jack Lan <jack.lan@freescale.com>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
@@ -175,6 +176,182 @@ static int ds3232_set_time(struct device *dev, struct rtc_time *time)
DS3232_REG_SECONDS, 7, buf);
}
+/*
+ * DS3232 has two alarm, we only use alarm1
+ * According to linux specification, only support one-shot alarm
+ * no periodic alarm mode
+ */
+static int ds3232_read_alarm(struct device *dev, struct rtc_wkalrm *alarm)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct ds3232 *ds3232 = i2c_get_clientdata(client);
+ int control, stat;
+ int ret;
+ u8 buf[4];
+
+ mutex_lock(&ds3232->mutex);
+
+ ret = i2c_smbus_read_byte_data(client, DS3232_REG_SR);
+ if (ret < 0)
+ goto out;
+ stat = ret;
+ ret = i2c_smbus_read_byte_data(client, DS3232_REG_CR);
+ if (ret < 0)
+ goto out;
+ control = ret;
+ ret = i2c_smbus_read_i2c_block_data(client, DS3232_REG_ALARM1, 4, buf);
+ if (ret < 0)
+ goto out;
+
+ alarm->time.tm_sec = bcd2bin(buf[0] & 0x7F);
+ alarm->time.tm_min = bcd2bin(buf[1] & 0x7F);
+ alarm->time.tm_hour = bcd2bin(buf[2] & 0x7F);
+ alarm->time.tm_mday = bcd2bin(buf[3] & 0x7F);
+
+ alarm->time.tm_mon = -1;
+ alarm->time.tm_year = -1;
+ alarm->time.tm_wday = -1;
+ alarm->time.tm_yday = -1;
+ alarm->time.tm_isdst = -1;
+
+ alarm->enabled = !!(control & DS3232_REG_CR_A1IE);
+ alarm->pending = !!(stat & DS3232_REG_SR_A1F);
+
+ ret = 0;
+out:
+ mutex_unlock(&ds3232->mutex);
+ return ret;
+}
+
+/*
+ * linux rtc-module does not support wday alarm
+ * and only 24h time mode supported indeed
+ */
+static int ds3232_set_alarm(struct device *dev, struct rtc_wkalrm *alarm)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct ds3232 *ds3232 = i2c_get_clientdata(client);
+ int control, stat;
+ int ret;
+ u8 buf[4];
+
+ if (client->irq <= 0)
+ return -EINVAL;
+
+ mutex_lock(&ds3232->mutex);
+
+ buf[0] = bin2bcd(alarm->time.tm_sec);
+ buf[1] = bin2bcd(alarm->time.tm_min);
+ buf[2] = bin2bcd(alarm->time.tm_hour);
+ buf[3] = bin2bcd(alarm->time.tm_mday);
+
+ /* clear alarm interrupt enable bit */
+ ret = i2c_smbus_read_byte_data(client, DS3232_REG_CR);
+ if (ret < 0)
+ goto out;
+ control = ret;
+ control &= ~(DS3232_REG_CR_A1IE | DS3232_REG_CR_A2IE);
+ ret = i2c_smbus_write_byte_data(client, DS3232_REG_CR, control);
+ if (ret < 0)
+ goto out;
+
+ /* clear any pending alarm flag */
+ ret = i2c_smbus_read_byte_data(client, DS3232_REG_SR);
+ if (ret < 0)
+ goto out;
+ stat = ret;
+ stat &= ~(DS3232_REG_SR_A1F | DS3232_REG_SR_A2F);
+ ret = i2c_smbus_write_byte_data(client, DS3232_REG_SR, stat);
+ if (ret < 0)
+ goto out;
+
+ ret = i2c_smbus_write_i2c_block_data(client, DS3232_REG_ALARM1, 4, buf);
+
+ if (alarm->enabled) {
+ control |= DS3232_REG_CR_A1IE;
+ ret = i2c_smbus_write_byte_data(client, DS3232_REG_CR, control);
+ }
+out:
+ mutex_unlock(&ds3232->mutex);
+ return ret;
+}
+
+static void ds3232_update_alarm(struct i2c_client *client)
+{
+ struct ds3232 *ds3232 = i2c_get_clientdata(client);
+ int control;
+ int ret;
+ u8 buf[4];
+
+ mutex_lock(&ds3232->mutex);
+
+ ret = i2c_smbus_read_i2c_block_data(client, DS3232_REG_ALARM1, 4, buf);
+ if (ret < 0)
+ goto unlock;
+
+ buf[0] = bcd2bin(buf[0]) < 0 || (ds3232->rtc->irq_data & RTC_UF) ?
+ 0x80 : buf[0];
+ buf[1] = bcd2bin(buf[1]) < 0 || (ds3232->rtc->irq_data & RTC_UF) ?
+ 0x80 : buf[1];
+ buf[2] = bcd2bin(buf[2]) < 0 || (ds3232->rtc->irq_data & RTC_UF) ?
+ 0x80 : buf[2];
+ buf[3] = bcd2bin(buf[3]) < 0 || (ds3232->rtc->irq_data & RTC_UF) ?
+ 0x80 : buf[3];
+
+ ret = i2c_smbus_write_i2c_block_data(client, DS3232_REG_ALARM1, 4, buf);
+ if (ret < 0)
+ goto unlock;
+
+ control = i2c_smbus_read_byte_data(client, DS3232_REG_CR);
+ if (control < 0)
+ goto unlock;
+
+ if (ds3232->rtc->irq_data & (RTC_AF | RTC_UF))
+ /* enable alarm1 interrupt */
+ control |= DS3232_REG_CR_A1IE;
+ else
+ /* disable alarm1 interrupt */
+ control &= ~(DS3232_REG_CR_A1IE);
+ i2c_smbus_write_byte_data(client, DS3232_REG_CR, control);
+
+unlock:
+ mutex_unlock(&ds3232->mutex);
+}
+
+static int ds3232_alarm_irq_enable(struct device *dev, unsigned int enabled)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct ds3232 *ds3232 = i2c_get_clientdata(client);
+
+ if (client->irq <= 0)
+ return -EINVAL;
+
+ if (enabled)
+ ds3232->rtc->irq_data |= RTC_AF;
+ else
+ ds3232->rtc->irq_data &= ~RTC_AF;
+
+ ds3232_update_alarm(client);
+ return 0;
+}
+
+static int ds3232_update_irq_enable(struct device *dev, unsigned int enabled)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct ds3232 *ds3232 = i2c_get_clientdata(client);
+
+ if (client->irq <= 0)
+ return -EINVAL;
+
+ if (enabled)
+ ds3232->rtc->irq_data |= RTC_UF;
+ else
+ ds3232->rtc->irq_data &= ~RTC_UF;
+
+ ds3232_update_alarm(client);
+ return 0;
+}
+
static irqreturn_t ds3232_irq(int irq, void *dev_id)
{
struct i2c_client *client = dev_id;
@@ -222,6 +399,10 @@ unlock:
static const struct rtc_class_ops ds3232_rtc_ops = {
.read_time = ds3232_read_time,
.set_time = ds3232_set_time,
+ .read_alarm = ds3232_read_alarm,
+ .set_alarm = ds3232_set_alarm,
+ .alarm_irq_enable = ds3232_alarm_irq_enable,
+ .update_irq_enable = ds3232_update_irq_enable,
};
static int __devinit ds3232_probe(struct i2c_client *client,
diff --git a/drivers/rtc/rtc-jz4740.c b/drivers/rtc/rtc-jz4740.c
index 2619d57b91d7..2e16f72c9056 100644
--- a/drivers/rtc/rtc-jz4740.c
+++ b/drivers/rtc/rtc-jz4740.c
@@ -1,5 +1,6 @@
/*
* Copyright (C) 2009-2010, Lars-Peter Clausen <lars@metafoo.de>
+ * Copyright (C) 2010, Paul Cercueil <paul@crapouillou.net>
* JZ4740 SoC RTC driver
*
* This program is free software; you can redistribute it and/or modify it
@@ -161,7 +162,8 @@ static int jz4740_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alrm)
ret = jz4740_rtc_reg_write(rtc, JZ_REG_RTC_SEC_ALARM, secs);
if (!ret)
- ret = jz4740_rtc_ctrl_set_bits(rtc, JZ_RTC_CTRL_AE, alrm->enabled);
+ ret = jz4740_rtc_ctrl_set_bits(rtc,
+ JZ_RTC_CTRL_AE | JZ_RTC_CTRL_AF_IRQ, alrm->enabled);
return ret;
}
@@ -258,6 +260,8 @@ static int __devinit jz4740_rtc_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, rtc);
+ device_init_wakeup(&pdev->dev, 1);
+
rtc->rtc = rtc_device_register(pdev->name, &pdev->dev, &jz4740_rtc_ops,
THIS_MODULE);
if (IS_ERR(rtc->rtc)) {
@@ -318,12 +322,43 @@ static int __devexit jz4740_rtc_remove(struct platform_device *pdev)
return 0;
}
+
+#ifdef CONFIG_PM
+static int jz4740_rtc_suspend(struct device *dev)
+{
+ struct jz4740_rtc *rtc = dev_get_drvdata(dev);
+
+ if (device_may_wakeup(dev))
+ enable_irq_wake(rtc->irq);
+ return 0;
+}
+
+static int jz4740_rtc_resume(struct device *dev)
+{
+ struct jz4740_rtc *rtc = dev_get_drvdata(dev);
+
+ if (device_may_wakeup(dev))
+ disable_irq_wake(rtc->irq);
+ return 0;
+}
+
+static const struct dev_pm_ops jz4740_pm_ops = {
+ .suspend = jz4740_rtc_suspend,
+ .resume = jz4740_rtc_resume,
+};
+#define JZ4740_RTC_PM_OPS (&jz4740_pm_ops)
+
+#else
+#define JZ4740_RTC_PM_OPS NULL
+#endif /* CONFIG_PM */
+
struct platform_driver jz4740_rtc_driver = {
- .probe = jz4740_rtc_probe,
- .remove = __devexit_p(jz4740_rtc_remove),
- .driver = {
- .name = "jz4740-rtc",
+ .probe = jz4740_rtc_probe,
+ .remove = __devexit_p(jz4740_rtc_remove),
+ .driver = {
+ .name = "jz4740-rtc",
.owner = THIS_MODULE,
+ .pm = JZ4740_RTC_PM_OPS,
},
};
diff --git a/drivers/rtc/rtc-lpc32xx.c b/drivers/rtc/rtc-lpc32xx.c
new file mode 100644
index 000000000000..ec8701ce99f9
--- /dev/null
+++ b/drivers/rtc/rtc-lpc32xx.c
@@ -0,0 +1,414 @@
+/*
+ * Copyright (C) 2010 NXP Semiconductors
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/platform_device.h>
+#include <linux/spinlock.h>
+#include <linux/rtc.h>
+#include <linux/slab.h>
+#include <linux/io.h>
+
+/*
+ * Clock and Power control register offsets
+ */
+#define LPC32XX_RTC_UCOUNT 0x00
+#define LPC32XX_RTC_DCOUNT 0x04
+#define LPC32XX_RTC_MATCH0 0x08
+#define LPC32XX_RTC_MATCH1 0x0C
+#define LPC32XX_RTC_CTRL 0x10
+#define LPC32XX_RTC_INTSTAT 0x14
+#define LPC32XX_RTC_KEY 0x18
+#define LPC32XX_RTC_SRAM 0x80
+
+#define LPC32XX_RTC_CTRL_MATCH0 (1 << 0)
+#define LPC32XX_RTC_CTRL_MATCH1 (1 << 1)
+#define LPC32XX_RTC_CTRL_ONSW_MATCH0 (1 << 2)
+#define LPC32XX_RTC_CTRL_ONSW_MATCH1 (1 << 3)
+#define LPC32XX_RTC_CTRL_SW_RESET (1 << 4)
+#define LPC32XX_RTC_CTRL_CNTR_DIS (1 << 6)
+#define LPC32XX_RTC_CTRL_ONSW_FORCE_HI (1 << 7)
+
+#define LPC32XX_RTC_INTSTAT_MATCH0 (1 << 0)
+#define LPC32XX_RTC_INTSTAT_MATCH1 (1 << 1)
+#define LPC32XX_RTC_INTSTAT_ONSW (1 << 2)
+
+#define LPC32XX_RTC_KEY_ONSW_LOADVAL 0xB5C13F27
+
+#define RTC_NAME "rtc-lpc32xx"
+
+#define rtc_readl(dev, reg) \
+ __raw_readl((dev)->rtc_base + (reg))
+#define rtc_writel(dev, reg, val) \
+ __raw_writel((val), (dev)->rtc_base + (reg))
+
+struct lpc32xx_rtc {
+ void __iomem *rtc_base;
+ int irq;
+ unsigned char alarm_enabled;
+ struct rtc_device *rtc;
+ spinlock_t lock;
+};
+
+static int lpc32xx_rtc_read_time(struct device *dev, struct rtc_time *time)
+{
+ unsigned long elapsed_sec;
+ struct lpc32xx_rtc *rtc = dev_get_drvdata(dev);
+
+ elapsed_sec = rtc_readl(rtc, LPC32XX_RTC_UCOUNT);
+ rtc_time_to_tm(elapsed_sec, time);
+
+ return rtc_valid_tm(time);
+}
+
+static int lpc32xx_rtc_set_mmss(struct device *dev, unsigned long secs)
+{
+ struct lpc32xx_rtc *rtc = dev_get_drvdata(dev);
+ u32 tmp;
+
+ spin_lock_irq(&rtc->lock);
+
+ /* RTC must be disabled during count update */
+ tmp = rtc_readl(rtc, LPC32XX_RTC_CTRL);
+ rtc_writel(rtc, LPC32XX_RTC_CTRL, tmp | LPC32XX_RTC_CTRL_CNTR_DIS);
+ rtc_writel(rtc, LPC32XX_RTC_UCOUNT, secs);
+ rtc_writel(rtc, LPC32XX_RTC_DCOUNT, 0xFFFFFFFF - secs);
+ rtc_writel(rtc, LPC32XX_RTC_CTRL, tmp &= ~LPC32XX_RTC_CTRL_CNTR_DIS);
+
+ spin_unlock_irq(&rtc->lock);
+
+ return 0;
+}
+
+static int lpc32xx_rtc_read_alarm(struct device *dev,
+ struct rtc_wkalrm *wkalrm)
+{
+ struct lpc32xx_rtc *rtc = dev_get_drvdata(dev);
+
+ rtc_time_to_tm(rtc_readl(rtc, LPC32XX_RTC_MATCH0), &wkalrm->time);
+ wkalrm->enabled = rtc->alarm_enabled;
+ wkalrm->pending = !!(rtc_readl(rtc, LPC32XX_RTC_INTSTAT) &
+ LPC32XX_RTC_INTSTAT_MATCH0);
+
+ return rtc_valid_tm(&wkalrm->time);
+}
+
+static int lpc32xx_rtc_set_alarm(struct device *dev,
+ struct rtc_wkalrm *wkalrm)
+{
+ struct lpc32xx_rtc *rtc = dev_get_drvdata(dev);
+ unsigned long alarmsecs;
+ u32 tmp;
+ int ret;
+
+ ret = rtc_tm_to_time(&wkalrm->time, &alarmsecs);
+ if (ret < 0) {
+ dev_warn(dev, "Failed to convert time: %d\n", ret);
+ return ret;
+ }
+
+ spin_lock_irq(&rtc->lock);
+
+ /* Disable alarm during update */
+ tmp = rtc_readl(rtc, LPC32XX_RTC_CTRL);
+ rtc_writel(rtc, LPC32XX_RTC_CTRL, tmp & ~LPC32XX_RTC_CTRL_MATCH0);
+
+ rtc_writel(rtc, LPC32XX_RTC_MATCH0, alarmsecs);
+
+ rtc->alarm_enabled = wkalrm->enabled;
+ if (wkalrm->enabled) {
+ rtc_writel(rtc, LPC32XX_RTC_INTSTAT,
+ LPC32XX_RTC_INTSTAT_MATCH0);
+ rtc_writel(rtc, LPC32XX_RTC_CTRL, tmp |
+ LPC32XX_RTC_CTRL_MATCH0);
+ }
+
+ spin_unlock_irq(&rtc->lock);
+
+ return 0;
+}
+
+static int lpc32xx_rtc_alarm_irq_enable(struct device *dev,
+ unsigned int enabled)
+{
+ struct lpc32xx_rtc *rtc = dev_get_drvdata(dev);
+ u32 tmp;
+
+ spin_lock_irq(&rtc->lock);
+ tmp = rtc_readl(rtc, LPC32XX_RTC_CTRL);
+
+ if (enabled) {
+ rtc->alarm_enabled = 1;
+ tmp |= LPC32XX_RTC_CTRL_MATCH0;
+ } else {
+ rtc->alarm_enabled = 0;
+ tmp &= ~LPC32XX_RTC_CTRL_MATCH0;
+ }
+
+ rtc_writel(rtc, LPC32XX_RTC_CTRL, tmp);
+ spin_unlock_irq(&rtc->lock);
+
+ return 0;
+}
+
+static irqreturn_t lpc32xx_rtc_alarm_interrupt(int irq, void *dev)
+{
+ struct lpc32xx_rtc *rtc = dev;
+
+ spin_lock(&rtc->lock);
+
+ /* Disable alarm interrupt */
+ rtc_writel(rtc, LPC32XX_RTC_CTRL,
+ rtc_readl(rtc, LPC32XX_RTC_CTRL) &
+ ~LPC32XX_RTC_CTRL_MATCH0);
+ rtc->alarm_enabled = 0;
+
+ /*
+ * Write a large value to the match value so the RTC won't
+ * keep firing the match status
+ */
+ rtc_writel(rtc, LPC32XX_RTC_MATCH0, 0xFFFFFFFF);
+ rtc_writel(rtc, LPC32XX_RTC_INTSTAT, LPC32XX_RTC_INTSTAT_MATCH0);
+
+ spin_unlock(&rtc->lock);
+
+ rtc_update_irq(rtc->rtc, 1, RTC_IRQF | RTC_AF);
+
+ return IRQ_HANDLED;
+}
+
+static const struct rtc_class_ops lpc32xx_rtc_ops = {
+ .read_time = lpc32xx_rtc_read_time,
+ .set_mmss = lpc32xx_rtc_set_mmss,
+ .read_alarm = lpc32xx_rtc_read_alarm,
+ .set_alarm = lpc32xx_rtc_set_alarm,
+ .alarm_irq_enable = lpc32xx_rtc_alarm_irq_enable,
+};
+
+static int __devinit lpc32xx_rtc_probe(struct platform_device *pdev)
+{
+ struct resource *res;
+ struct lpc32xx_rtc *rtc;
+ resource_size_t size;
+ int rtcirq;
+ u32 tmp;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ dev_err(&pdev->dev, "Can't get memory resource\n");
+ return -ENOENT;
+ }
+
+ rtcirq = platform_get_irq(pdev, 0);
+ if (rtcirq < 0 || rtcirq >= NR_IRQS) {
+ dev_warn(&pdev->dev, "Can't get interrupt resource\n");
+ rtcirq = -1;
+ }
+
+ rtc = devm_kzalloc(&pdev->dev, sizeof(*rtc), GFP_KERNEL);
+ if (unlikely(!rtc)) {
+ dev_err(&pdev->dev, "Can't allocate memory\n");
+ return -ENOMEM;
+ }
+ rtc->irq = rtcirq;
+
+ size = resource_size(res);
+
+ if (!devm_request_mem_region(&pdev->dev, res->start, size,
+ pdev->name)) {
+ dev_err(&pdev->dev, "RTC registers are not free\n");
+ return -EBUSY;
+ }
+
+ rtc->rtc_base = devm_ioremap(&pdev->dev, res->start, size);
+ if (!rtc->rtc_base) {
+ dev_err(&pdev->dev, "Can't map memory\n");
+ return -ENOMEM;
+ }
+
+ spin_lock_init(&rtc->lock);
+
+ /*
+ * The RTC is on a seperate power domain and can keep it's state
+ * across a chip power cycle. If the RTC has never been previously
+ * setup, then set it up now for the first time.
+ */
+ tmp = rtc_readl(rtc, LPC32XX_RTC_CTRL);
+ if (rtc_readl(rtc, LPC32XX_RTC_KEY) != LPC32XX_RTC_KEY_ONSW_LOADVAL) {
+ tmp &= ~(LPC32XX_RTC_CTRL_SW_RESET |
+ LPC32XX_RTC_CTRL_CNTR_DIS |
+ LPC32XX_RTC_CTRL_MATCH0 |
+ LPC32XX_RTC_CTRL_MATCH1 |
+ LPC32XX_RTC_CTRL_ONSW_MATCH0 |
+ LPC32XX_RTC_CTRL_ONSW_MATCH1 |
+ LPC32XX_RTC_CTRL_ONSW_FORCE_HI);
+ rtc_writel(rtc, LPC32XX_RTC_CTRL, tmp);
+
+ /* Clear latched interrupt states */
+ rtc_writel(rtc, LPC32XX_RTC_MATCH0, 0xFFFFFFFF);
+ rtc_writel(rtc, LPC32XX_RTC_INTSTAT,
+ LPC32XX_RTC_INTSTAT_MATCH0 |
+ LPC32XX_RTC_INTSTAT_MATCH1 |
+ LPC32XX_RTC_INTSTAT_ONSW);
+
+ /* Write key value to RTC so it won't reload on reset */
+ rtc_writel(rtc, LPC32XX_RTC_KEY,
+ LPC32XX_RTC_KEY_ONSW_LOADVAL);
+ } else {
+ rtc_writel(rtc, LPC32XX_RTC_CTRL,
+ tmp & ~LPC32XX_RTC_CTRL_MATCH0);
+ }
+
+ platform_set_drvdata(pdev, rtc);
+
+ rtc->rtc = rtc_device_register(RTC_NAME, &pdev->dev, &lpc32xx_rtc_ops,
+ THIS_MODULE);
+ if (IS_ERR(rtc->rtc)) {
+ dev_err(&pdev->dev, "Can't get RTC\n");
+ platform_set_drvdata(pdev, NULL);
+ return PTR_ERR(rtc->rtc);
+ }
+
+ /*
+ * IRQ is enabled after device registration in case alarm IRQ
+ * is pending upon suspend exit.
+ */
+ if (rtc->irq >= 0) {
+ if (devm_request_irq(&pdev->dev, rtc->irq,
+ lpc32xx_rtc_alarm_interrupt,
+ IRQF_DISABLED, pdev->name, rtc) < 0) {
+ dev_warn(&pdev->dev, "Can't request interrupt.\n");
+ rtc->irq = -1;
+ } else {
+ device_init_wakeup(&pdev->dev, 1);
+ }
+ }
+
+ return 0;
+}
+
+static int __devexit lpc32xx_rtc_remove(struct platform_device *pdev)
+{
+ struct lpc32xx_rtc *rtc = platform_get_drvdata(pdev);
+
+ if (rtc->irq >= 0)
+ device_init_wakeup(&pdev->dev, 0);
+
+ platform_set_drvdata(pdev, NULL);
+ rtc_device_unregister(rtc->rtc);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int lpc32xx_rtc_suspend(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct lpc32xx_rtc *rtc = platform_get_drvdata(pdev);
+
+ if (rtc->irq >= 0) {
+ if (device_may_wakeup(&pdev->dev))
+ enable_irq_wake(rtc->irq);
+ else
+ disable_irq_wake(rtc->irq);
+ }
+
+ return 0;
+}
+
+static int lpc32xx_rtc_resume(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct lpc32xx_rtc *rtc = platform_get_drvdata(pdev);
+
+ if (rtc->irq >= 0 && device_may_wakeup(&pdev->dev))
+ disable_irq_wake(rtc->irq);
+
+ return 0;
+}
+
+/* Unconditionally disable the alarm */
+static int lpc32xx_rtc_freeze(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct lpc32xx_rtc *rtc = platform_get_drvdata(pdev);
+
+ spin_lock_irq(&rtc->lock);
+
+ rtc_writel(rtc, LPC32XX_RTC_CTRL,
+ rtc_readl(rtc, LPC32XX_RTC_CTRL) &
+ ~LPC32XX_RTC_CTRL_MATCH0);
+
+ spin_unlock_irq(&rtc->lock);
+
+ return 0;
+}
+
+static int lpc32xx_rtc_thaw(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct lpc32xx_rtc *rtc = platform_get_drvdata(pdev);
+
+ if (rtc->alarm_enabled) {
+ spin_lock_irq(&rtc->lock);
+
+ rtc_writel(rtc, LPC32XX_RTC_CTRL,
+ rtc_readl(rtc, LPC32XX_RTC_CTRL) |
+ LPC32XX_RTC_CTRL_MATCH0);
+
+ spin_unlock_irq(&rtc->lock);
+ }
+
+ return 0;
+}
+
+static const struct dev_pm_ops lpc32xx_rtc_pm_ops = {
+ .suspend = lpc32xx_rtc_suspend,
+ .resume = lpc32xx_rtc_resume,
+ .freeze = lpc32xx_rtc_freeze,
+ .thaw = lpc32xx_rtc_thaw,
+ .restore = lpc32xx_rtc_resume
+};
+
+#define LPC32XX_RTC_PM_OPS (&lpc32xx_rtc_pm_ops)
+#else
+#define LPC32XX_RTC_PM_OPS NULL
+#endif
+
+static struct platform_driver lpc32xx_rtc_driver = {
+ .probe = lpc32xx_rtc_probe,
+ .remove = __devexit_p(lpc32xx_rtc_remove),
+ .driver = {
+ .name = RTC_NAME,
+ .owner = THIS_MODULE,
+ .pm = LPC32XX_RTC_PM_OPS
+ },
+};
+
+static int __init lpc32xx_rtc_init(void)
+{
+ return platform_driver_register(&lpc32xx_rtc_driver);
+}
+module_init(lpc32xx_rtc_init);
+
+static void __exit lpc32xx_rtc_exit(void)
+{
+ platform_driver_unregister(&lpc32xx_rtc_driver);
+}
+module_exit(lpc32xx_rtc_exit);
+
+MODULE_AUTHOR("Kevin Wells <wellsk40@gmail.com");
+MODULE_DESCRIPTION("RTC driver for the LPC32xx SoC");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:rtc-lpc32xx");
diff --git a/drivers/rtc/rtc-max8998.c b/drivers/rtc/rtc-max8998.c
new file mode 100644
index 000000000000..f22dee35f330
--- /dev/null
+++ b/drivers/rtc/rtc-max8998.c
@@ -0,0 +1,300 @@
+/*
+ * RTC driver for Maxim MAX8998
+ *
+ * Copyright (C) 2010 Samsung Electronics Co.Ltd
+ * Author: Minkyu Kang <mk7.kang@samsung.com>
+ * Author: Joonyoung Shim <jy0922.shim@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/i2c.h>
+#include <linux/slab.h>
+#include <linux/bcd.h>
+#include <linux/rtc.h>
+#include <linux/platform_device.h>
+#include <linux/mfd/max8998.h>
+#include <linux/mfd/max8998-private.h>
+
+#define MAX8998_RTC_SEC 0x00
+#define MAX8998_RTC_MIN 0x01
+#define MAX8998_RTC_HOUR 0x02
+#define MAX8998_RTC_WEEKDAY 0x03
+#define MAX8998_RTC_DATE 0x04
+#define MAX8998_RTC_MONTH 0x05
+#define MAX8998_RTC_YEAR1 0x06
+#define MAX8998_RTC_YEAR2 0x07
+#define MAX8998_ALARM0_SEC 0x08
+#define MAX8998_ALARM0_MIN 0x09
+#define MAX8998_ALARM0_HOUR 0x0a
+#define MAX8998_ALARM0_WEEKDAY 0x0b
+#define MAX8998_ALARM0_DATE 0x0c
+#define MAX8998_ALARM0_MONTH 0x0d
+#define MAX8998_ALARM0_YEAR1 0x0e
+#define MAX8998_ALARM0_YEAR2 0x0f
+#define MAX8998_ALARM1_SEC 0x10
+#define MAX8998_ALARM1_MIN 0x11
+#define MAX8998_ALARM1_HOUR 0x12
+#define MAX8998_ALARM1_WEEKDAY 0x13
+#define MAX8998_ALARM1_DATE 0x14
+#define MAX8998_ALARM1_MONTH 0x15
+#define MAX8998_ALARM1_YEAR1 0x16
+#define MAX8998_ALARM1_YEAR2 0x17
+#define MAX8998_ALARM0_CONF 0x18
+#define MAX8998_ALARM1_CONF 0x19
+#define MAX8998_RTC_STATUS 0x1a
+#define MAX8998_WTSR_SMPL_CNTL 0x1b
+#define MAX8998_TEST 0x1f
+
+#define HOUR_12 (1 << 7)
+#define HOUR_PM (1 << 5)
+#define ALARM0_STATUS (1 << 1)
+#define ALARM1_STATUS (1 << 2)
+
+enum {
+ RTC_SEC = 0,
+ RTC_MIN,
+ RTC_HOUR,
+ RTC_WEEKDAY,
+ RTC_DATE,
+ RTC_MONTH,
+ RTC_YEAR1,
+ RTC_YEAR2,
+};
+
+struct max8998_rtc_info {
+ struct device *dev;
+ struct max8998_dev *max8998;
+ struct i2c_client *rtc;
+ struct rtc_device *rtc_dev;
+ int irq;
+};
+
+static void max8998_data_to_tm(u8 *data, struct rtc_time *tm)
+{
+ tm->tm_sec = bcd2bin(data[RTC_SEC]);
+ tm->tm_min = bcd2bin(data[RTC_MIN]);
+ if (data[RTC_HOUR] & HOUR_12) {
+ tm->tm_hour = bcd2bin(data[RTC_HOUR] & 0x1f);
+ if (data[RTC_HOUR] & HOUR_PM)
+ tm->tm_hour += 12;
+ } else
+ tm->tm_hour = bcd2bin(data[RTC_HOUR] & 0x3f);
+
+ tm->tm_wday = data[RTC_WEEKDAY] & 0x07;
+ tm->tm_mday = bcd2bin(data[RTC_DATE]);
+ tm->tm_mon = bcd2bin(data[RTC_MONTH]);
+ tm->tm_year = bcd2bin(data[RTC_YEAR1]) + bcd2bin(data[RTC_YEAR2]) * 100;
+ tm->tm_year -= 1900;
+}
+
+static void max8998_tm_to_data(struct rtc_time *tm, u8 *data)
+{
+ data[RTC_SEC] = bin2bcd(tm->tm_sec);
+ data[RTC_MIN] = bin2bcd(tm->tm_min);
+ data[RTC_HOUR] = bin2bcd(tm->tm_hour);
+ data[RTC_WEEKDAY] = tm->tm_wday;
+ data[RTC_DATE] = bin2bcd(tm->tm_mday);
+ data[RTC_MONTH] = bin2bcd(tm->tm_mon);
+ data[RTC_YEAR1] = bin2bcd(tm->tm_year % 100);
+ data[RTC_YEAR2] = bin2bcd((tm->tm_year + 1900) / 100);
+}
+
+static int max8998_rtc_read_time(struct device *dev, struct rtc_time *tm)
+{
+ struct max8998_rtc_info *info = dev_get_drvdata(dev);
+ u8 data[8];
+ int ret;
+
+ ret = max8998_bulk_read(info->rtc, MAX8998_RTC_SEC, 8, data);
+ if (ret < 0)
+ return ret;
+
+ max8998_data_to_tm(data, tm);
+
+ return rtc_valid_tm(tm);
+}
+
+static int max8998_rtc_set_time(struct device *dev, struct rtc_time *tm)
+{
+ struct max8998_rtc_info *info = dev_get_drvdata(dev);
+ u8 data[8];
+
+ max8998_tm_to_data(tm, data);
+
+ return max8998_bulk_write(info->rtc, MAX8998_RTC_SEC, 8, data);
+}
+
+static int max8998_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alrm)
+{
+ struct max8998_rtc_info *info = dev_get_drvdata(dev);
+ u8 data[8];
+ u8 val;
+ int ret;
+
+ ret = max8998_bulk_read(info->rtc, MAX8998_ALARM0_SEC, 8, data);
+ if (ret < 0)
+ return ret;
+
+ max8998_data_to_tm(data, &alrm->time);
+
+ ret = max8998_read_reg(info->rtc, MAX8998_ALARM0_CONF, &val);
+ if (ret < 0)
+ return ret;
+
+ alrm->enabled = !!val;
+
+ ret = max8998_read_reg(info->rtc, MAX8998_RTC_STATUS, &val);
+ if (ret < 0)
+ return ret;
+
+ if (val & ALARM0_STATUS)
+ alrm->pending = 1;
+ else
+ alrm->pending = 0;
+
+ return 0;
+}
+
+static int max8998_rtc_stop_alarm(struct max8998_rtc_info *info)
+{
+ return max8998_write_reg(info->rtc, MAX8998_ALARM0_CONF, 0);
+}
+
+static int max8998_rtc_start_alarm(struct max8998_rtc_info *info)
+{
+ return max8998_write_reg(info->rtc, MAX8998_ALARM0_CONF, 0x77);
+}
+
+static int max8998_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alrm)
+{
+ struct max8998_rtc_info *info = dev_get_drvdata(dev);
+ u8 data[8];
+ int ret;
+
+ max8998_tm_to_data(&alrm->time, data);
+
+ ret = max8998_rtc_stop_alarm(info);
+ if (ret < 0)
+ return ret;
+
+ ret = max8998_bulk_write(info->rtc, MAX8998_ALARM0_SEC, 8, data);
+ if (ret < 0)
+ return ret;
+
+ if (alrm->enabled)
+ return max8998_rtc_start_alarm(info);
+
+ return 0;
+}
+
+static int max8998_rtc_alarm_irq_enable(struct device *dev,
+ unsigned int enabled)
+{
+ struct max8998_rtc_info *info = dev_get_drvdata(dev);
+
+ if (enabled)
+ return max8998_rtc_start_alarm(info);
+ else
+ return max8998_rtc_stop_alarm(info);
+}
+
+static irqreturn_t max8998_rtc_alarm_irq(int irq, void *data)
+{
+ struct max8998_rtc_info *info = data;
+
+ rtc_update_irq(info->rtc_dev, 1, RTC_IRQF | RTC_AF);
+
+ return IRQ_HANDLED;
+}
+
+static const struct rtc_class_ops max8998_rtc_ops = {
+ .read_time = max8998_rtc_read_time,
+ .set_time = max8998_rtc_set_time,
+ .read_alarm = max8998_rtc_read_alarm,
+ .set_alarm = max8998_rtc_set_alarm,
+ .alarm_irq_enable = max8998_rtc_alarm_irq_enable,
+};
+
+static int __devinit max8998_rtc_probe(struct platform_device *pdev)
+{
+ struct max8998_dev *max8998 = dev_get_drvdata(pdev->dev.parent);
+ struct max8998_rtc_info *info;
+ int ret;
+
+ info = kzalloc(sizeof(struct max8998_rtc_info), GFP_KERNEL);
+ if (!info)
+ return -ENOMEM;
+
+ info->dev = &pdev->dev;
+ info->max8998 = max8998;
+ info->rtc = max8998->rtc;
+ info->irq = max8998->irq_base + MAX8998_IRQ_ALARM0;
+
+ info->rtc_dev = rtc_device_register("max8998-rtc", &pdev->dev,
+ &max8998_rtc_ops, THIS_MODULE);
+
+ if (IS_ERR(info->rtc_dev)) {
+ ret = PTR_ERR(info->rtc_dev);
+ dev_err(&pdev->dev, "Failed to register RTC device: %d\n", ret);
+ goto out_rtc;
+ }
+
+ platform_set_drvdata(pdev, info);
+
+ ret = request_threaded_irq(info->irq, NULL, max8998_rtc_alarm_irq, 0,
+ "rtc-alarm0", info);
+ if (ret < 0)
+ dev_err(&pdev->dev, "Failed to request alarm IRQ: %d: %d\n",
+ info->irq, ret);
+
+ return 0;
+
+out_rtc:
+ kfree(info);
+ return ret;
+}
+
+static int __devexit max8998_rtc_remove(struct platform_device *pdev)
+{
+ struct max8998_rtc_info *info = platform_get_drvdata(pdev);
+
+ if (info) {
+ free_irq(info->irq, info);
+ rtc_device_unregister(info->rtc_dev);
+ kfree(info);
+ }
+
+ return 0;
+}
+
+static struct platform_driver max8998_rtc_driver = {
+ .driver = {
+ .name = "max8998-rtc",
+ .owner = THIS_MODULE,
+ },
+ .probe = max8998_rtc_probe,
+ .remove = __devexit_p(max8998_rtc_remove),
+};
+
+static int __init max8998_rtc_init(void)
+{
+ return platform_driver_register(&max8998_rtc_driver);
+}
+module_init(max8998_rtc_init);
+
+static void __exit max8998_rtc_exit(void)
+{
+ platform_driver_unregister(&max8998_rtc_driver);
+}
+module_exit(max8998_rtc_exit);
+
+MODULE_AUTHOR("Minkyu Kang <mk7.kang@samsung.com>");
+MODULE_AUTHOR("Joonyoung Shim <jy0922.shim@samsung.com>");
+MODULE_DESCRIPTION("Maxim MAX8998 RTC driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/rtc/rtc-mc13783.c b/drivers/rtc/rtc-mc13783.c
deleted file mode 100644
index 675bfb515367..000000000000
--- a/drivers/rtc/rtc-mc13783.c
+++ /dev/null
@@ -1,428 +0,0 @@
-/*
- * Real Time Clock driver for Freescale MC13783 PMIC
- *
- * (C) 2009 Sascha Hauer, Pengutronix
- * (C) 2009 Uwe Kleine-Koenig, Pengutronix
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#include <linux/mfd/mc13783.h>
-#include <linux/platform_device.h>
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/slab.h>
-#include <linux/rtc.h>
-
-#define DRIVER_NAME "mc13783-rtc"
-
-#define MC13783_RTCTOD 20
-#define MC13783_RTCTODA 21
-#define MC13783_RTCDAY 22
-#define MC13783_RTCDAYA 23
-
-struct mc13783_rtc {
- struct rtc_device *rtc;
- struct mc13783 *mc13783;
- int valid;
-};
-
-static int mc13783_rtc_irq_enable_unlocked(struct device *dev,
- unsigned int enabled, int irq)
-{
- struct mc13783_rtc *priv = dev_get_drvdata(dev);
- int (*func)(struct mc13783 *mc13783, int irq);
-
- if (!priv->valid)
- return -ENODATA;
-
- func = enabled ? mc13783_irq_unmask : mc13783_irq_mask;
- return func(priv->mc13783, irq);
-}
-
-static int mc13783_rtc_irq_enable(struct device *dev,
- unsigned int enabled, int irq)
-{
- struct mc13783_rtc *priv = dev_get_drvdata(dev);
- int ret;
-
- mc13783_lock(priv->mc13783);
-
- ret = mc13783_rtc_irq_enable_unlocked(dev, enabled, irq);
-
- mc13783_unlock(priv->mc13783);
-
- return ret;
-}
-
-static int mc13783_rtc_read_time(struct device *dev, struct rtc_time *tm)
-{
- struct mc13783_rtc *priv = dev_get_drvdata(dev);
- unsigned int seconds, days1, days2;
- unsigned long s1970;
- int ret;
-
- mc13783_lock(priv->mc13783);
-
- if (!priv->valid) {
- ret = -ENODATA;
- goto out;
- }
-
- ret = mc13783_reg_read(priv->mc13783, MC13783_RTCDAY, &days1);
- if (unlikely(ret))
- goto out;
-
- ret = mc13783_reg_read(priv->mc13783, MC13783_RTCTOD, &seconds);
- if (unlikely(ret))
- goto out;
-
- ret = mc13783_reg_read(priv->mc13783, MC13783_RTCDAY, &days2);
-out:
- mc13783_unlock(priv->mc13783);
-
- if (ret)
- return ret;
-
- if (days2 == days1 + 1) {
- if (seconds >= 86400 / 2)
- days2 = days1;
- else
- days1 = days2;
- }
-
- if (days1 != days2)
- return -EIO;
-
- s1970 = days1 * 86400 + seconds;
-
- rtc_time_to_tm(s1970, tm);
-
- return rtc_valid_tm(tm);
-}
-
-static int mc13783_rtc_set_mmss(struct device *dev, unsigned long secs)
-{
- struct mc13783_rtc *priv = dev_get_drvdata(dev);
- unsigned int seconds, days;
- unsigned int alarmseconds;
- int ret;
-
- seconds = secs % 86400;
- days = secs / 86400;
-
- mc13783_lock(priv->mc13783);
-
- /*
- * temporarily invalidate alarm to prevent triggering it when the day is
- * already updated while the time isn't yet.
- */
- ret = mc13783_reg_read(priv->mc13783, MC13783_RTCTODA, &alarmseconds);
- if (unlikely(ret))
- goto out;
-
- if (alarmseconds < 86400) {
- ret = mc13783_reg_write(priv->mc13783,
- MC13783_RTCTODA, 0x1ffff);
- if (unlikely(ret))
- goto out;
- }
-
- /*
- * write seconds=0 to prevent a day switch between writing days
- * and seconds below
- */
- ret = mc13783_reg_write(priv->mc13783, MC13783_RTCTOD, 0);
- if (unlikely(ret))
- goto out;
-
- ret = mc13783_reg_write(priv->mc13783, MC13783_RTCDAY, days);
- if (unlikely(ret))
- goto out;
-
- ret = mc13783_reg_write(priv->mc13783, MC13783_RTCTOD, seconds);
- if (unlikely(ret))
- goto out;
-
- /* restore alarm */
- if (alarmseconds < 86400) {
- ret = mc13783_reg_write(priv->mc13783,
- MC13783_RTCTODA, alarmseconds);
- if (unlikely(ret))
- goto out;
- }
-
- ret = mc13783_irq_ack(priv->mc13783, MC13783_IRQ_RTCRST);
- if (unlikely(ret))
- goto out;
-
- ret = mc13783_irq_unmask(priv->mc13783, MC13783_IRQ_RTCRST);
-out:
- priv->valid = !ret;
-
- mc13783_unlock(priv->mc13783);
-
- return ret;
-}
-
-static int mc13783_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alarm)
-{
- struct mc13783_rtc *priv = dev_get_drvdata(dev);
- unsigned seconds, days;
- unsigned long s1970;
- int enabled, pending;
- int ret;
-
- mc13783_lock(priv->mc13783);
-
- ret = mc13783_reg_read(priv->mc13783, MC13783_RTCTODA, &seconds);
- if (unlikely(ret))
- goto out;
- if (seconds >= 86400) {
- ret = -ENODATA;
- goto out;
- }
-
- ret = mc13783_reg_read(priv->mc13783, MC13783_RTCDAY, &days);
- if (unlikely(ret))
- goto out;
-
- ret = mc13783_irq_status(priv->mc13783, MC13783_IRQ_TODA,
- &enabled, &pending);
-
-out:
- mc13783_unlock(priv->mc13783);
-
- if (ret)
- return ret;
-
- alarm->enabled = enabled;
- alarm->pending = pending;
-
- s1970 = days * 86400 + seconds;
-
- rtc_time_to_tm(s1970, &alarm->time);
- dev_dbg(dev, "%s: %lu\n", __func__, s1970);
-
- return 0;
-}
-
-static int mc13783_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alarm)
-{
- struct mc13783_rtc *priv = dev_get_drvdata(dev);
- unsigned long s1970;
- unsigned seconds, days;
- int ret;
-
- mc13783_lock(priv->mc13783);
-
- /* disable alarm to prevent false triggering */
- ret = mc13783_reg_write(priv->mc13783, MC13783_RTCTODA, 0x1ffff);
- if (unlikely(ret))
- goto out;
-
- ret = mc13783_irq_ack(priv->mc13783, MC13783_IRQ_TODA);
- if (unlikely(ret))
- goto out;
-
- ret = rtc_tm_to_time(&alarm->time, &s1970);
- if (unlikely(ret))
- goto out;
-
- dev_dbg(dev, "%s: o%2.s %lu\n", __func__, alarm->enabled ? "n" : "ff",
- s1970);
-
- ret = mc13783_rtc_irq_enable_unlocked(dev, alarm->enabled,
- MC13783_IRQ_TODA);
- if (unlikely(ret))
- goto out;
-
- seconds = s1970 % 86400;
- days = s1970 / 86400;
-
- ret = mc13783_reg_write(priv->mc13783, MC13783_RTCDAYA, days);
- if (unlikely(ret))
- goto out;
-
- ret = mc13783_reg_write(priv->mc13783, MC13783_RTCTODA, seconds);
-
-out:
- mc13783_unlock(priv->mc13783);
-
- return ret;
-}
-
-static irqreturn_t mc13783_rtc_alarm_handler(int irq, void *dev)
-{
- struct mc13783_rtc *priv = dev;
- struct mc13783 *mc13783 = priv->mc13783;
-
- dev_dbg(&priv->rtc->dev, "Alarm\n");
-
- rtc_update_irq(priv->rtc, 1, RTC_IRQF | RTC_AF);
-
- mc13783_irq_ack(mc13783, irq);
-
- return IRQ_HANDLED;
-}
-
-static irqreturn_t mc13783_rtc_update_handler(int irq, void *dev)
-{
- struct mc13783_rtc *priv = dev;
- struct mc13783 *mc13783 = priv->mc13783;
-
- dev_dbg(&priv->rtc->dev, "1HZ\n");
-
- rtc_update_irq(priv->rtc, 1, RTC_IRQF | RTC_UF);
-
- mc13783_irq_ack(mc13783, irq);
-
- return IRQ_HANDLED;
-}
-
-static int mc13783_rtc_update_irq_enable(struct device *dev,
- unsigned int enabled)
-{
- return mc13783_rtc_irq_enable(dev, enabled, MC13783_IRQ_1HZ);
-}
-
-static int mc13783_rtc_alarm_irq_enable(struct device *dev,
- unsigned int enabled)
-{
- return mc13783_rtc_irq_enable(dev, enabled, MC13783_IRQ_TODA);
-}
-
-static const struct rtc_class_ops mc13783_rtc_ops = {
- .read_time = mc13783_rtc_read_time,
- .set_mmss = mc13783_rtc_set_mmss,
- .read_alarm = mc13783_rtc_read_alarm,
- .set_alarm = mc13783_rtc_set_alarm,
- .alarm_irq_enable = mc13783_rtc_alarm_irq_enable,
- .update_irq_enable = mc13783_rtc_update_irq_enable,
-};
-
-static irqreturn_t mc13783_rtc_reset_handler(int irq, void *dev)
-{
- struct mc13783_rtc *priv = dev;
- struct mc13783 *mc13783 = priv->mc13783;
-
- dev_dbg(&priv->rtc->dev, "RTCRST\n");
- priv->valid = 0;
-
- mc13783_irq_mask(mc13783, irq);
-
- return IRQ_HANDLED;
-}
-
-static int __devinit mc13783_rtc_probe(struct platform_device *pdev)
-{
- int ret;
- struct mc13783_rtc *priv;
- struct mc13783 *mc13783;
- int rtcrst_pending;
-
- priv = kzalloc(sizeof(*priv), GFP_KERNEL);
- if (!priv)
- return -ENOMEM;
-
- mc13783 = dev_get_drvdata(pdev->dev.parent);
- priv->mc13783 = mc13783;
-
- platform_set_drvdata(pdev, priv);
-
- mc13783_lock(mc13783);
-
- ret = mc13783_irq_request(mc13783, MC13783_IRQ_RTCRST,
- mc13783_rtc_reset_handler, DRIVER_NAME, priv);
- if (ret)
- goto err_reset_irq_request;
-
- ret = mc13783_irq_status(mc13783, MC13783_IRQ_RTCRST,
- NULL, &rtcrst_pending);
- if (ret)
- goto err_reset_irq_status;
-
- priv->valid = !rtcrst_pending;
-
- ret = mc13783_irq_request_nounmask(mc13783, MC13783_IRQ_1HZ,
- mc13783_rtc_update_handler, DRIVER_NAME, priv);
- if (ret)
- goto err_update_irq_request;
-
- ret = mc13783_irq_request_nounmask(mc13783, MC13783_IRQ_TODA,
- mc13783_rtc_alarm_handler, DRIVER_NAME, priv);
- if (ret)
- goto err_alarm_irq_request;
-
- priv->rtc = rtc_device_register(pdev->name,
- &pdev->dev, &mc13783_rtc_ops, THIS_MODULE);
- if (IS_ERR(priv->rtc)) {
- ret = PTR_ERR(priv->rtc);
-
- mc13783_irq_free(mc13783, MC13783_IRQ_TODA, priv);
-err_alarm_irq_request:
-
- mc13783_irq_free(mc13783, MC13783_IRQ_1HZ, priv);
-err_update_irq_request:
-
-err_reset_irq_status:
-
- mc13783_irq_free(mc13783, MC13783_IRQ_RTCRST, priv);
-err_reset_irq_request:
-
- platform_set_drvdata(pdev, NULL);
- kfree(priv);
- }
-
- mc13783_unlock(mc13783);
-
- return ret;
-}
-
-static int __devexit mc13783_rtc_remove(struct platform_device *pdev)
-{
- struct mc13783_rtc *priv = platform_get_drvdata(pdev);
-
- mc13783_lock(priv->mc13783);
-
- rtc_device_unregister(priv->rtc);
-
- mc13783_irq_free(priv->mc13783, MC13783_IRQ_TODA, priv);
- mc13783_irq_free(priv->mc13783, MC13783_IRQ_1HZ, priv);
- mc13783_irq_free(priv->mc13783, MC13783_IRQ_RTCRST, priv);
-
- mc13783_unlock(priv->mc13783);
-
- platform_set_drvdata(pdev, NULL);
-
- kfree(priv);
-
- return 0;
-}
-
-static struct platform_driver mc13783_rtc_driver = {
- .remove = __devexit_p(mc13783_rtc_remove),
- .driver = {
- .name = DRIVER_NAME,
- .owner = THIS_MODULE,
- },
-};
-
-static int __init mc13783_rtc_init(void)
-{
- return platform_driver_probe(&mc13783_rtc_driver, &mc13783_rtc_probe);
-}
-module_init(mc13783_rtc_init);
-
-static void __exit mc13783_rtc_exit(void)
-{
- platform_driver_unregister(&mc13783_rtc_driver);
-}
-module_exit(mc13783_rtc_exit);
-
-MODULE_AUTHOR("Sascha Hauer <s.hauer@pengutronix.de>");
-MODULE_DESCRIPTION("RTC driver for Freescale MC13783 PMIC");
-MODULE_LICENSE("GPL v2");
-MODULE_ALIAS("platform:" DRIVER_NAME);
diff --git a/drivers/rtc/rtc-mc13xxx.c b/drivers/rtc/rtc-mc13xxx.c
new file mode 100644
index 000000000000..5314b153bfba
--- /dev/null
+++ b/drivers/rtc/rtc-mc13xxx.c
@@ -0,0 +1,437 @@
+/*
+ * Real Time Clock driver for Freescale MC13XXX PMIC
+ *
+ * (C) 2009 Sascha Hauer, Pengutronix
+ * (C) 2009 Uwe Kleine-Koenig, Pengutronix
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/mfd/mc13xxx.h>
+#include <linux/platform_device.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/rtc.h>
+
+#define DRIVER_NAME "mc13xxx-rtc"
+
+#define MC13XXX_RTCTOD 20
+#define MC13XXX_RTCTODA 21
+#define MC13XXX_RTCDAY 22
+#define MC13XXX_RTCDAYA 23
+
+struct mc13xxx_rtc {
+ struct rtc_device *rtc;
+ struct mc13xxx *mc13xxx;
+ int valid;
+};
+
+static int mc13xxx_rtc_irq_enable_unlocked(struct device *dev,
+ unsigned int enabled, int irq)
+{
+ struct mc13xxx_rtc *priv = dev_get_drvdata(dev);
+ int (*func)(struct mc13xxx *mc13xxx, int irq);
+
+ if (!priv->valid)
+ return -ENODATA;
+
+ func = enabled ? mc13xxx_irq_unmask : mc13xxx_irq_mask;
+ return func(priv->mc13xxx, irq);
+}
+
+static int mc13xxx_rtc_irq_enable(struct device *dev,
+ unsigned int enabled, int irq)
+{
+ struct mc13xxx_rtc *priv = dev_get_drvdata(dev);
+ int ret;
+
+ mc13xxx_lock(priv->mc13xxx);
+
+ ret = mc13xxx_rtc_irq_enable_unlocked(dev, enabled, irq);
+
+ mc13xxx_unlock(priv->mc13xxx);
+
+ return ret;
+}
+
+static int mc13xxx_rtc_read_time(struct device *dev, struct rtc_time *tm)
+{
+ struct mc13xxx_rtc *priv = dev_get_drvdata(dev);
+ unsigned int seconds, days1, days2;
+ unsigned long s1970;
+ int ret;
+
+ mc13xxx_lock(priv->mc13xxx);
+
+ if (!priv->valid) {
+ ret = -ENODATA;
+ goto out;
+ }
+
+ ret = mc13xxx_reg_read(priv->mc13xxx, MC13XXX_RTCDAY, &days1);
+ if (unlikely(ret))
+ goto out;
+
+ ret = mc13xxx_reg_read(priv->mc13xxx, MC13XXX_RTCTOD, &seconds);
+ if (unlikely(ret))
+ goto out;
+
+ ret = mc13xxx_reg_read(priv->mc13xxx, MC13XXX_RTCDAY, &days2);
+out:
+ mc13xxx_unlock(priv->mc13xxx);
+
+ if (ret)
+ return ret;
+
+ if (days2 == days1 + 1) {
+ if (seconds >= 86400 / 2)
+ days2 = days1;
+ else
+ days1 = days2;
+ }
+
+ if (days1 != days2)
+ return -EIO;
+
+ s1970 = days1 * 86400 + seconds;
+
+ rtc_time_to_tm(s1970, tm);
+
+ return rtc_valid_tm(tm);
+}
+
+static int mc13xxx_rtc_set_mmss(struct device *dev, unsigned long secs)
+{
+ struct mc13xxx_rtc *priv = dev_get_drvdata(dev);
+ unsigned int seconds, days;
+ unsigned int alarmseconds;
+ int ret;
+
+ seconds = secs % 86400;
+ days = secs / 86400;
+
+ mc13xxx_lock(priv->mc13xxx);
+
+ /*
+ * temporarily invalidate alarm to prevent triggering it when the day is
+ * already updated while the time isn't yet.
+ */
+ ret = mc13xxx_reg_read(priv->mc13xxx, MC13XXX_RTCTODA, &alarmseconds);
+ if (unlikely(ret))
+ goto out;
+
+ if (alarmseconds < 86400) {
+ ret = mc13xxx_reg_write(priv->mc13xxx,
+ MC13XXX_RTCTODA, 0x1ffff);
+ if (unlikely(ret))
+ goto out;
+ }
+
+ /*
+ * write seconds=0 to prevent a day switch between writing days
+ * and seconds below
+ */
+ ret = mc13xxx_reg_write(priv->mc13xxx, MC13XXX_RTCTOD, 0);
+ if (unlikely(ret))
+ goto out;
+
+ ret = mc13xxx_reg_write(priv->mc13xxx, MC13XXX_RTCDAY, days);
+ if (unlikely(ret))
+ goto out;
+
+ ret = mc13xxx_reg_write(priv->mc13xxx, MC13XXX_RTCTOD, seconds);
+ if (unlikely(ret))
+ goto out;
+
+ /* restore alarm */
+ if (alarmseconds < 86400) {
+ ret = mc13xxx_reg_write(priv->mc13xxx,
+ MC13XXX_RTCTODA, alarmseconds);
+ if (unlikely(ret))
+ goto out;
+ }
+
+ ret = mc13xxx_irq_ack(priv->mc13xxx, MC13XXX_IRQ_RTCRST);
+ if (unlikely(ret))
+ goto out;
+
+ ret = mc13xxx_irq_unmask(priv->mc13xxx, MC13XXX_IRQ_RTCRST);
+out:
+ priv->valid = !ret;
+
+ mc13xxx_unlock(priv->mc13xxx);
+
+ return ret;
+}
+
+static int mc13xxx_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alarm)
+{
+ struct mc13xxx_rtc *priv = dev_get_drvdata(dev);
+ unsigned seconds, days;
+ unsigned long s1970;
+ int enabled, pending;
+ int ret;
+
+ mc13xxx_lock(priv->mc13xxx);
+
+ ret = mc13xxx_reg_read(priv->mc13xxx, MC13XXX_RTCTODA, &seconds);
+ if (unlikely(ret))
+ goto out;
+ if (seconds >= 86400) {
+ ret = -ENODATA;
+ goto out;
+ }
+
+ ret = mc13xxx_reg_read(priv->mc13xxx, MC13XXX_RTCDAY, &days);
+ if (unlikely(ret))
+ goto out;
+
+ ret = mc13xxx_irq_status(priv->mc13xxx, MC13XXX_IRQ_TODA,
+ &enabled, &pending);
+
+out:
+ mc13xxx_unlock(priv->mc13xxx);
+
+ if (ret)
+ return ret;
+
+ alarm->enabled = enabled;
+ alarm->pending = pending;
+
+ s1970 = days * 86400 + seconds;
+
+ rtc_time_to_tm(s1970, &alarm->time);
+ dev_dbg(dev, "%s: %lu\n", __func__, s1970);
+
+ return 0;
+}
+
+static int mc13xxx_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alarm)
+{
+ struct mc13xxx_rtc *priv = dev_get_drvdata(dev);
+ unsigned long s1970;
+ unsigned seconds, days;
+ int ret;
+
+ mc13xxx_lock(priv->mc13xxx);
+
+ /* disable alarm to prevent false triggering */
+ ret = mc13xxx_reg_write(priv->mc13xxx, MC13XXX_RTCTODA, 0x1ffff);
+ if (unlikely(ret))
+ goto out;
+
+ ret = mc13xxx_irq_ack(priv->mc13xxx, MC13XXX_IRQ_TODA);
+ if (unlikely(ret))
+ goto out;
+
+ ret = rtc_tm_to_time(&alarm->time, &s1970);
+ if (unlikely(ret))
+ goto out;
+
+ dev_dbg(dev, "%s: o%2.s %lu\n", __func__, alarm->enabled ? "n" : "ff",
+ s1970);
+
+ ret = mc13xxx_rtc_irq_enable_unlocked(dev, alarm->enabled,
+ MC13XXX_IRQ_TODA);
+ if (unlikely(ret))
+ goto out;
+
+ seconds = s1970 % 86400;
+ days = s1970 / 86400;
+
+ ret = mc13xxx_reg_write(priv->mc13xxx, MC13XXX_RTCDAYA, days);
+ if (unlikely(ret))
+ goto out;
+
+ ret = mc13xxx_reg_write(priv->mc13xxx, MC13XXX_RTCTODA, seconds);
+
+out:
+ mc13xxx_unlock(priv->mc13xxx);
+
+ return ret;
+}
+
+static irqreturn_t mc13xxx_rtc_alarm_handler(int irq, void *dev)
+{
+ struct mc13xxx_rtc *priv = dev;
+ struct mc13xxx *mc13xxx = priv->mc13xxx;
+
+ dev_dbg(&priv->rtc->dev, "Alarm\n");
+
+ rtc_update_irq(priv->rtc, 1, RTC_IRQF | RTC_AF);
+
+ mc13xxx_irq_ack(mc13xxx, irq);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t mc13xxx_rtc_update_handler(int irq, void *dev)
+{
+ struct mc13xxx_rtc *priv = dev;
+ struct mc13xxx *mc13xxx = priv->mc13xxx;
+
+ dev_dbg(&priv->rtc->dev, "1HZ\n");
+
+ rtc_update_irq(priv->rtc, 1, RTC_IRQF | RTC_UF);
+
+ mc13xxx_irq_ack(mc13xxx, irq);
+
+ return IRQ_HANDLED;
+}
+
+static int mc13xxx_rtc_update_irq_enable(struct device *dev,
+ unsigned int enabled)
+{
+ return mc13xxx_rtc_irq_enable(dev, enabled, MC13XXX_IRQ_1HZ);
+}
+
+static int mc13xxx_rtc_alarm_irq_enable(struct device *dev,
+ unsigned int enabled)
+{
+ return mc13xxx_rtc_irq_enable(dev, enabled, MC13XXX_IRQ_TODA);
+}
+
+static const struct rtc_class_ops mc13xxx_rtc_ops = {
+ .read_time = mc13xxx_rtc_read_time,
+ .set_mmss = mc13xxx_rtc_set_mmss,
+ .read_alarm = mc13xxx_rtc_read_alarm,
+ .set_alarm = mc13xxx_rtc_set_alarm,
+ .alarm_irq_enable = mc13xxx_rtc_alarm_irq_enable,
+ .update_irq_enable = mc13xxx_rtc_update_irq_enable,
+};
+
+static irqreturn_t mc13xxx_rtc_reset_handler(int irq, void *dev)
+{
+ struct mc13xxx_rtc *priv = dev;
+ struct mc13xxx *mc13xxx = priv->mc13xxx;
+
+ dev_dbg(&priv->rtc->dev, "RTCRST\n");
+ priv->valid = 0;
+
+ mc13xxx_irq_mask(mc13xxx, irq);
+
+ return IRQ_HANDLED;
+}
+
+static int __devinit mc13xxx_rtc_probe(struct platform_device *pdev)
+{
+ int ret;
+ struct mc13xxx_rtc *priv;
+ struct mc13xxx *mc13xxx;
+ int rtcrst_pending;
+
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ mc13xxx = dev_get_drvdata(pdev->dev.parent);
+ priv->mc13xxx = mc13xxx;
+
+ platform_set_drvdata(pdev, priv);
+
+ mc13xxx_lock(mc13xxx);
+
+ ret = mc13xxx_irq_request(mc13xxx, MC13XXX_IRQ_RTCRST,
+ mc13xxx_rtc_reset_handler, DRIVER_NAME, priv);
+ if (ret)
+ goto err_reset_irq_request;
+
+ ret = mc13xxx_irq_status(mc13xxx, MC13XXX_IRQ_RTCRST,
+ NULL, &rtcrst_pending);
+ if (ret)
+ goto err_reset_irq_status;
+
+ priv->valid = !rtcrst_pending;
+
+ ret = mc13xxx_irq_request_nounmask(mc13xxx, MC13XXX_IRQ_1HZ,
+ mc13xxx_rtc_update_handler, DRIVER_NAME, priv);
+ if (ret)
+ goto err_update_irq_request;
+
+ ret = mc13xxx_irq_request_nounmask(mc13xxx, MC13XXX_IRQ_TODA,
+ mc13xxx_rtc_alarm_handler, DRIVER_NAME, priv);
+ if (ret)
+ goto err_alarm_irq_request;
+
+ priv->rtc = rtc_device_register(pdev->name,
+ &pdev->dev, &mc13xxx_rtc_ops, THIS_MODULE);
+ if (IS_ERR(priv->rtc)) {
+ ret = PTR_ERR(priv->rtc);
+
+ mc13xxx_irq_free(mc13xxx, MC13XXX_IRQ_TODA, priv);
+err_alarm_irq_request:
+
+ mc13xxx_irq_free(mc13xxx, MC13XXX_IRQ_1HZ, priv);
+err_update_irq_request:
+
+err_reset_irq_status:
+
+ mc13xxx_irq_free(mc13xxx, MC13XXX_IRQ_RTCRST, priv);
+err_reset_irq_request:
+
+ platform_set_drvdata(pdev, NULL);
+ kfree(priv);
+ }
+
+ mc13xxx_unlock(mc13xxx);
+
+ return ret;
+}
+
+static int __devexit mc13xxx_rtc_remove(struct platform_device *pdev)
+{
+ struct mc13xxx_rtc *priv = platform_get_drvdata(pdev);
+
+ mc13xxx_lock(priv->mc13xxx);
+
+ rtc_device_unregister(priv->rtc);
+
+ mc13xxx_irq_free(priv->mc13xxx, MC13XXX_IRQ_TODA, priv);
+ mc13xxx_irq_free(priv->mc13xxx, MC13XXX_IRQ_1HZ, priv);
+ mc13xxx_irq_free(priv->mc13xxx, MC13XXX_IRQ_RTCRST, priv);
+
+ mc13xxx_unlock(priv->mc13xxx);
+
+ platform_set_drvdata(pdev, NULL);
+
+ kfree(priv);
+
+ return 0;
+}
+
+const struct platform_device_id mc13xxx_rtc_idtable[] = {
+ {
+ .name = "mc13783-rtc",
+ }, {
+ .name = "mc13892-rtc",
+ },
+};
+
+static struct platform_driver mc13xxx_rtc_driver = {
+ .id_table = mc13xxx_rtc_idtable,
+ .remove = __devexit_p(mc13xxx_rtc_remove),
+ .driver = {
+ .name = DRIVER_NAME,
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init mc13xxx_rtc_init(void)
+{
+ return platform_driver_probe(&mc13xxx_rtc_driver, &mc13xxx_rtc_probe);
+}
+module_init(mc13xxx_rtc_init);
+
+static void __exit mc13xxx_rtc_exit(void)
+{
+ platform_driver_unregister(&mc13xxx_rtc_driver);
+}
+module_exit(mc13xxx_rtc_exit);
+
+MODULE_AUTHOR("Sascha Hauer <s.hauer@pengutronix.de>");
+MODULE_DESCRIPTION("RTC driver for Freescale MC13XXX PMIC");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:" DRIVER_NAME);
diff --git a/drivers/rtc/rtc-omap.c b/drivers/rtc/rtc-omap.c
index 64d9727b7229..73377b0d65da 100644
--- a/drivers/rtc/rtc-omap.c
+++ b/drivers/rtc/rtc-omap.c
@@ -34,7 +34,8 @@
* Board-specific wiring options include using split power mode with
* RTC_OFF_NOFF used as the reset signal (so the RTC won't be reset),
* and wiring RTC_WAKE_INT (so the RTC alarm can wake the system from
- * low power modes). See the BOARD-SPECIFIC CUSTOMIZATION comment.
+ * low power modes) for OMAP1 boards (OMAP-L138 has this built into
+ * the SoC). See the BOARD-SPECIFIC CUSTOMIZATION comment.
*/
#define OMAP_RTC_BASE 0xfffb4800
@@ -401,16 +402,17 @@ static int __init omap_rtc_probe(struct platform_device *pdev)
/* BOARD-SPECIFIC CUSTOMIZATION CAN GO HERE:
*
- * - Boards wired so that RTC_WAKE_INT does something, and muxed
- * right (W13_1610_RTC_WAKE_INT is the default after chip reset),
- * should initialize the device wakeup flag appropriately.
+ * - Device wake-up capability setting should come through chip
+ * init logic. OMAP1 boards should initialize the "wakeup capable"
+ * flag in the platform device if the board is wired right for
+ * being woken up by RTC alarm. For OMAP-L138, this capability
+ * is built into the SoC by the "Deep Sleep" capability.
*
* - Boards wired so RTC_ON_nOFF is used as the reset signal,
* rather than nPWRON_RESET, should forcibly enable split
* power mode. (Some chip errata report that RTC_CTRL_SPLIT
* is write-only, and always reads as zero...)
*/
- device_init_wakeup(&pdev->dev, 0);
if (new_ctrl & (u8) OMAP_RTC_CTRL_SPLIT)
pr_info("%s: split power mode\n", pdev->name);
diff --git a/drivers/rtc/rtc-rs5c313.c b/drivers/rtc/rtc-rs5c313.c
index e6ea3f5ee1eb..e3ff179b99ca 100644
--- a/drivers/rtc/rtc-rs5c313.c
+++ b/drivers/rtc/rtc-rs5c313.c
@@ -80,21 +80,21 @@
/* SCSPTR1 data */
unsigned char scsptr1_data;
-#define RS5C313_CEENABLE ctrl_outb(RS5C313_CE_RTCCE, RS5C313_CE);
-#define RS5C313_CEDISABLE ctrl_outb(0x00, RS5C313_CE)
-#define RS5C313_MISCOP ctrl_outb(0x02, 0xB0000008)
+#define RS5C313_CEENABLE __raw_writeb(RS5C313_CE_RTCCE, RS5C313_CE);
+#define RS5C313_CEDISABLE __raw_writeb(0x00, RS5C313_CE)
+#define RS5C313_MISCOP __raw_writeb(0x02, 0xB0000008)
static void rs5c313_init_port(void)
{
/* Set SCK as I/O port and Initialize SCSPTR1 data & I/O port. */
- ctrl_outb(ctrl_inb(SCSMR1) & ~SCSMR1_CA, SCSMR1);
- ctrl_outb(ctrl_inb(SCSCR1) & ~SCSCR1_CKE, SCSCR1);
+ __raw_writeb(__raw_readb(SCSMR1) & ~SCSMR1_CA, SCSMR1);
+ __raw_writeb(__raw_readb(SCSCR1) & ~SCSCR1_CKE, SCSCR1);
/* And Initialize SCL for RS5C313 clock */
- scsptr1_data = ctrl_inb(SCSPTR1) | SCL; /* SCL:H */
- ctrl_outb(scsptr1_data, SCSPTR1);
- scsptr1_data = ctrl_inb(SCSPTR1) | SCL_OEN; /* SCL output enable */
- ctrl_outb(scsptr1_data, SCSPTR1);
+ scsptr1_data = __raw_readb(SCSPTR1) | SCL; /* SCL:H */
+ __raw_writeb(scsptr1_data, SCSPTR1);
+ scsptr1_data = __raw_readb(SCSPTR1) | SCL_OEN; /* SCL output enable */
+ __raw_writeb(scsptr1_data, SCSPTR1);
RS5C313_CEDISABLE; /* CE:L */
}
@@ -106,21 +106,21 @@ static void rs5c313_write_data(unsigned char data)
/* SDA:Write Data */
scsptr1_data = (scsptr1_data & ~SDA) |
((((0x80 >> i) & data) >> (7 - i)) << 2);
- ctrl_outb(scsptr1_data, SCSPTR1);
+ __raw_writeb(scsptr1_data, SCSPTR1);
if (i == 0) {
scsptr1_data |= SDA_OEN; /* SDA:output enable */
- ctrl_outb(scsptr1_data, SCSPTR1);
+ __raw_writeb(scsptr1_data, SCSPTR1);
}
ndelay(700);
scsptr1_data &= ~SCL; /* SCL:L */
- ctrl_outb(scsptr1_data, SCSPTR1);
+ __raw_writeb(scsptr1_data, SCSPTR1);
ndelay(700);
scsptr1_data |= SCL; /* SCL:H */
- ctrl_outb(scsptr1_data, SCSPTR1);
+ __raw_writeb(scsptr1_data, SCSPTR1);
}
scsptr1_data &= ~SDA_OEN; /* SDA:output disable */
- ctrl_outb(scsptr1_data, SCSPTR1);
+ __raw_writeb(scsptr1_data, SCSPTR1);
}
static unsigned char rs5c313_read_data(void)
@@ -131,12 +131,12 @@ static unsigned char rs5c313_read_data(void)
for (i = 0; i < 8; i++) {
ndelay(700);
/* SDA:Read Data */
- data |= ((ctrl_inb(SCSPTR1) & SDA) >> 2) << (7 - i);
+ data |= ((__raw_readb(SCSPTR1) & SDA) >> 2) << (7 - i);
scsptr1_data &= ~SCL; /* SCL:L */
- ctrl_outb(scsptr1_data, SCSPTR1);
+ __raw_writeb(scsptr1_data, SCSPTR1);
ndelay(700);
scsptr1_data |= SCL; /* SCL:H */
- ctrl_outb(scsptr1_data, SCSPTR1);
+ __raw_writeb(scsptr1_data, SCSPTR1);
}
return data & 0x0F;
}
diff --git a/drivers/rtc/rtc-s3c.c b/drivers/rtc/rtc-s3c.c
index f57a87f4ae96..cf953ecbfca9 100644
--- a/drivers/rtc/rtc-s3c.c
+++ b/drivers/rtc/rtc-s3c.c
@@ -100,7 +100,7 @@ static int s3c_rtc_setpie(struct device *dev, int enabled)
spin_lock_irq(&s3c_rtc_pie_lock);
if (s3c_rtc_cpu_type == TYPE_S3C64XX) {
- tmp = readb(s3c_rtc_base + S3C2410_RTCCON);
+ tmp = readw(s3c_rtc_base + S3C2410_RTCCON);
tmp &= ~S3C64XX_RTCCON_TICEN;
if (enabled)
@@ -171,8 +171,8 @@ static int s3c_rtc_gettime(struct device *dev, struct rtc_time *rtc_tm)
goto retry_get_time;
}
- pr_debug("read time %02x.%02x.%02x %02x/%02x/%02x\n",
- rtc_tm->tm_year, rtc_tm->tm_mon, rtc_tm->tm_mday,
+ pr_debug("read time %04d.%02d.%02d %02d:%02d:%02d\n",
+ 1900 + rtc_tm->tm_year, rtc_tm->tm_mon, rtc_tm->tm_mday,
rtc_tm->tm_hour, rtc_tm->tm_min, rtc_tm->tm_sec);
rtc_tm->tm_sec = bcd2bin(rtc_tm->tm_sec);
@@ -185,7 +185,7 @@ static int s3c_rtc_gettime(struct device *dev, struct rtc_time *rtc_tm)
rtc_tm->tm_year += 100;
rtc_tm->tm_mon -= 1;
- return 0;
+ return rtc_valid_tm(rtc_tm);
}
static int s3c_rtc_settime(struct device *dev, struct rtc_time *tm)
@@ -193,8 +193,8 @@ static int s3c_rtc_settime(struct device *dev, struct rtc_time *tm)
void __iomem *base = s3c_rtc_base;
int year = tm->tm_year - 100;
- pr_debug("set time %02d.%02d.%02d %02d/%02d/%02d\n",
- tm->tm_year, tm->tm_mon, tm->tm_mday,
+ pr_debug("set time %04d.%02d.%02d %02d:%02d:%02d\n",
+ 1900 + tm->tm_year, tm->tm_mon, tm->tm_mday,
tm->tm_hour, tm->tm_min, tm->tm_sec);
/* we get around y2k by simply not supporting it */
@@ -231,9 +231,9 @@ static int s3c_rtc_getalarm(struct device *dev, struct rtc_wkalrm *alrm)
alrm->enabled = (alm_en & S3C2410_RTCALM_ALMEN) ? 1 : 0;
- pr_debug("read alarm %02x %02x.%02x.%02x %02x/%02x/%02x\n",
+ pr_debug("read alarm %d, %04d.%02d.%02d %02d:%02d:%02d\n",
alm_en,
- alm_tm->tm_year, alm_tm->tm_mon, alm_tm->tm_mday,
+ 1900 + alm_tm->tm_year, alm_tm->tm_mon, alm_tm->tm_mday,
alm_tm->tm_hour, alm_tm->tm_min, alm_tm->tm_sec);
@@ -242,34 +242,34 @@ static int s3c_rtc_getalarm(struct device *dev, struct rtc_wkalrm *alrm)
if (alm_en & S3C2410_RTCALM_SECEN)
alm_tm->tm_sec = bcd2bin(alm_tm->tm_sec);
else
- alm_tm->tm_sec = 0xff;
+ alm_tm->tm_sec = -1;
if (alm_en & S3C2410_RTCALM_MINEN)
alm_tm->tm_min = bcd2bin(alm_tm->tm_min);
else
- alm_tm->tm_min = 0xff;
+ alm_tm->tm_min = -1;
if (alm_en & S3C2410_RTCALM_HOUREN)
alm_tm->tm_hour = bcd2bin(alm_tm->tm_hour);
else
- alm_tm->tm_hour = 0xff;
+ alm_tm->tm_hour = -1;
if (alm_en & S3C2410_RTCALM_DAYEN)
alm_tm->tm_mday = bcd2bin(alm_tm->tm_mday);
else
- alm_tm->tm_mday = 0xff;
+ alm_tm->tm_mday = -1;
if (alm_en & S3C2410_RTCALM_MONEN) {
alm_tm->tm_mon = bcd2bin(alm_tm->tm_mon);
alm_tm->tm_mon -= 1;
} else {
- alm_tm->tm_mon = 0xff;
+ alm_tm->tm_mon = -1;
}
if (alm_en & S3C2410_RTCALM_YEAREN)
alm_tm->tm_year = bcd2bin(alm_tm->tm_year);
else
- alm_tm->tm_year = 0xffff;
+ alm_tm->tm_year = -1;
return 0;
}
@@ -280,10 +280,10 @@ static int s3c_rtc_setalarm(struct device *dev, struct rtc_wkalrm *alrm)
void __iomem *base = s3c_rtc_base;
unsigned int alrm_en;
- pr_debug("s3c_rtc_setalarm: %d, %02x/%02x/%02x %02x.%02x.%02x\n",
+ pr_debug("s3c_rtc_setalarm: %d, %04d.%02d.%02d %02d:%02d:%02d\n",
alrm->enabled,
- tm->tm_mday & 0xff, tm->tm_mon & 0xff, tm->tm_year & 0xff,
- tm->tm_hour & 0xff, tm->tm_min & 0xff, tm->tm_sec);
+ 1900 + tm->tm_year, tm->tm_mon, tm->tm_mday,
+ tm->tm_hour, tm->tm_min, tm->tm_sec);
alrm_en = readb(base + S3C2410_RTCALM) & S3C2410_RTCALM_ALMEN;
@@ -318,7 +318,7 @@ static int s3c_rtc_proc(struct device *dev, struct seq_file *seq)
unsigned int ticnt;
if (s3c_rtc_cpu_type == TYPE_S3C64XX) {
- ticnt = readb(s3c_rtc_base + S3C2410_RTCCON);
+ ticnt = readw(s3c_rtc_base + S3C2410_RTCCON);
ticnt &= S3C64XX_RTCCON_TICEN;
} else {
ticnt = readb(s3c_rtc_base + S3C2410_TICNT);
@@ -379,7 +379,8 @@ static const struct rtc_class_ops s3c_rtcops = {
.set_alarm = s3c_rtc_setalarm,
.irq_set_freq = s3c_rtc_setfreq,
.irq_set_state = s3c_rtc_setpie,
- .proc = s3c_rtc_proc,
+ .proc = s3c_rtc_proc,
+ .alarm_irq_enable = s3c_rtc_setaie,
};
static void s3c_rtc_enable(struct platform_device *pdev, int en)
@@ -391,11 +392,11 @@ static void s3c_rtc_enable(struct platform_device *pdev, int en)
return;
if (!en) {
- tmp = readb(base + S3C2410_RTCCON);
+ tmp = readw(base + S3C2410_RTCCON);
if (s3c_rtc_cpu_type == TYPE_S3C64XX)
tmp &= ~S3C64XX_RTCCON_TICEN;
tmp &= ~S3C2410_RTCCON_RTCEN;
- writeb(tmp, base + S3C2410_RTCCON);
+ writew(tmp, base + S3C2410_RTCCON);
if (s3c_rtc_cpu_type == TYPE_S3C2410) {
tmp = readb(base + S3C2410_TICNT);
@@ -405,25 +406,28 @@ static void s3c_rtc_enable(struct platform_device *pdev, int en)
} else {
/* re-enable the device, and check it is ok */
- if ((readb(base+S3C2410_RTCCON) & S3C2410_RTCCON_RTCEN) == 0){
+ if ((readw(base+S3C2410_RTCCON) & S3C2410_RTCCON_RTCEN) == 0) {
dev_info(&pdev->dev, "rtc disabled, re-enabling\n");
- tmp = readb(base + S3C2410_RTCCON);
- writeb(tmp|S3C2410_RTCCON_RTCEN, base+S3C2410_RTCCON);
+ tmp = readw(base + S3C2410_RTCCON);
+ writew(tmp | S3C2410_RTCCON_RTCEN,
+ base + S3C2410_RTCCON);
}
- if ((readb(base + S3C2410_RTCCON) & S3C2410_RTCCON_CNTSEL)){
+ if ((readw(base + S3C2410_RTCCON) & S3C2410_RTCCON_CNTSEL)) {
dev_info(&pdev->dev, "removing RTCCON_CNTSEL\n");
- tmp = readb(base + S3C2410_RTCCON);
- writeb(tmp& ~S3C2410_RTCCON_CNTSEL, base+S3C2410_RTCCON);
+ tmp = readw(base + S3C2410_RTCCON);
+ writew(tmp & ~S3C2410_RTCCON_CNTSEL,
+ base + S3C2410_RTCCON);
}
- if ((readb(base + S3C2410_RTCCON) & S3C2410_RTCCON_CLKRST)){
+ if ((readw(base + S3C2410_RTCCON) & S3C2410_RTCCON_CLKRST)) {
dev_info(&pdev->dev, "removing RTCCON_CLKRST\n");
- tmp = readb(base + S3C2410_RTCCON);
- writeb(tmp & ~S3C2410_RTCCON_CLKRST, base+S3C2410_RTCCON);
+ tmp = readw(base + S3C2410_RTCCON);
+ writew(tmp & ~S3C2410_RTCCON_CLKRST,
+ base + S3C2410_RTCCON);
}
}
}
@@ -452,8 +456,8 @@ static int __devexit s3c_rtc_remove(struct platform_device *dev)
static int __devinit s3c_rtc_probe(struct platform_device *pdev)
{
struct rtc_device *rtc;
+ struct rtc_time rtc_tm;
struct resource *res;
- unsigned int tmp, i;
int ret;
pr_debug("%s: probe=%p\n", __func__, pdev);
@@ -514,8 +518,8 @@ static int __devinit s3c_rtc_probe(struct platform_device *pdev)
s3c_rtc_enable(pdev, 1);
- pr_debug("s3c2410_rtc: RTCCON=%02x\n",
- readb(s3c_rtc_base + S3C2410_RTCCON));
+ pr_debug("s3c2410_rtc: RTCCON=%02x\n",
+ readw(s3c_rtc_base + S3C2410_RTCCON));
device_init_wakeup(&pdev->dev, 1);
@@ -534,11 +538,19 @@ static int __devinit s3c_rtc_probe(struct platform_device *pdev)
/* Check RTC Time */
- for (i = S3C2410_RTCSEC; i <= S3C2410_RTCYEAR; i += 0x4) {
- tmp = readb(s3c_rtc_base + i);
+ s3c_rtc_gettime(NULL, &rtc_tm);
+
+ if (rtc_valid_tm(&rtc_tm)) {
+ rtc_tm.tm_year = 100;
+ rtc_tm.tm_mon = 0;
+ rtc_tm.tm_mday = 1;
+ rtc_tm.tm_hour = 0;
+ rtc_tm.tm_min = 0;
+ rtc_tm.tm_sec = 0;
+
+ s3c_rtc_settime(NULL, &rtc_tm);
- if ((tmp & 0xf) > 0x9 || ((tmp >> 4) & 0xf) > 0x9)
- writeb(0, s3c_rtc_base + i);
+ dev_warn(&pdev->dev, "warning: invalid RTC value so initializing it\n");
}
if (s3c_rtc_cpu_type == TYPE_S3C64XX)
@@ -578,7 +590,7 @@ static int s3c_rtc_suspend(struct platform_device *pdev, pm_message_t state)
/* save TICNT for anyone using periodic interrupts */
ticnt_save = readb(s3c_rtc_base + S3C2410_TICNT);
if (s3c_rtc_cpu_type == TYPE_S3C64XX) {
- ticnt_en_save = readb(s3c_rtc_base + S3C2410_RTCCON);
+ ticnt_en_save = readw(s3c_rtc_base + S3C2410_RTCCON);
ticnt_en_save &= S3C64XX_RTCCON_TICEN;
}
s3c_rtc_enable(pdev, 0);
@@ -596,8 +608,8 @@ static int s3c_rtc_resume(struct platform_device *pdev)
s3c_rtc_enable(pdev, 1);
writeb(ticnt_save, s3c_rtc_base + S3C2410_TICNT);
if (s3c_rtc_cpu_type == TYPE_S3C64XX && ticnt_en_save) {
- tmp = readb(s3c_rtc_base + S3C2410_RTCCON);
- writeb(tmp | ticnt_en_save, s3c_rtc_base + S3C2410_RTCCON);
+ tmp = readw(s3c_rtc_base + S3C2410_RTCCON);
+ writew(tmp | ticnt_en_save, s3c_rtc_base + S3C2410_RTCCON);
}
if (device_may_wakeup(&pdev->dev))
diff --git a/drivers/rtc/rtc-sh.c b/drivers/rtc/rtc-sh.c
index 5efbd5990ff8..06e41ed93230 100644
--- a/drivers/rtc/rtc-sh.c
+++ b/drivers/rtc/rtc-sh.c
@@ -761,7 +761,7 @@ err_unmap:
clk_put(rtc->clk);
iounmap(rtc->regbase);
err_badmap:
- release_resource(rtc->res);
+ release_mem_region(rtc->res->start, rtc->regsize);
err_badres:
kfree(rtc);
@@ -786,7 +786,7 @@ static int __exit sh_rtc_remove(struct platform_device *pdev)
}
iounmap(rtc->regbase);
- release_resource(rtc->res);
+ release_mem_region(rtc->res->start, rtc->regsize);
clk_disable(rtc->clk);
clk_put(rtc->clk);
diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c
index aa95f1001761..fb613d70c2cb 100644
--- a/drivers/s390/block/dasd.c
+++ b/drivers/s390/block/dasd.c
@@ -1099,16 +1099,30 @@ void dasd_int_handler(struct ccw_device *cdev, unsigned long intparm,
cqr = (struct dasd_ccw_req *) intparm;
if (!cqr || ((scsw_cc(&irb->scsw) == 1) &&
(scsw_fctl(&irb->scsw) & SCSW_FCTL_START_FUNC) &&
- (scsw_stctl(&irb->scsw) & SCSW_STCTL_STATUS_PEND))) {
+ ((scsw_stctl(&irb->scsw) == SCSW_STCTL_STATUS_PEND) ||
+ (scsw_stctl(&irb->scsw) == (SCSW_STCTL_STATUS_PEND |
+ SCSW_STCTL_ALERT_STATUS))))) {
if (cqr && cqr->status == DASD_CQR_IN_IO)
cqr->status = DASD_CQR_QUEUED;
+ if (cqr)
+ memcpy(&cqr->irb, irb, sizeof(*irb));
device = dasd_device_from_cdev_locked(cdev);
- if (!IS_ERR(device)) {
- dasd_device_clear_timer(device);
- device->discipline->handle_unsolicited_interrupt(device,
- irb);
+ if (IS_ERR(device))
+ return;
+ /* ignore unsolicited interrupts for DIAG discipline */
+ if (device->discipline == dasd_diag_discipline_pointer) {
dasd_put_device(device);
+ return;
}
+ device->discipline->dump_sense_dbf(device, irb,
+ "unsolicited");
+ if ((device->features & DASD_FEATURE_ERPLOG))
+ device->discipline->dump_sense(device, cqr,
+ irb);
+ dasd_device_clear_timer(device);
+ device->discipline->handle_unsolicited_interrupt(device,
+ irb);
+ dasd_put_device(device);
return;
}
diff --git a/drivers/s390/block/dasd_3990_erp.c b/drivers/s390/block/dasd_3990_erp.c
index e82d427ff5eb..968c76cf7127 100644
--- a/drivers/s390/block/dasd_3990_erp.c
+++ b/drivers/s390/block/dasd_3990_erp.c
@@ -221,6 +221,7 @@ dasd_3990_erp_DCTL(struct dasd_ccw_req * erp, char modifier)
ccw->cmd_code = CCW_CMD_DCTL;
ccw->count = 4;
ccw->cda = (__u32)(addr_t) DCTL_data;
+ dctl_cqr->flags = erp->flags;
dctl_cqr->function = dasd_3990_erp_DCTL;
dctl_cqr->refers = erp;
dctl_cqr->startdev = device;
@@ -1710,6 +1711,7 @@ dasd_3990_erp_action_1B_32(struct dasd_ccw_req * default_erp, char *sense)
ccw->cda = cpa;
/* fill erp related fields */
+ erp->flags = default_erp->flags;
erp->function = dasd_3990_erp_action_1B_32;
erp->refers = default_erp->refers;
erp->startdev = device;
@@ -2354,6 +2356,7 @@ static struct dasd_ccw_req *dasd_3990_erp_add_erp(struct dasd_ccw_req *cqr)
ccw->cda = (long)(cqr->cpaddr);
}
+ erp->flags = cqr->flags;
erp->function = dasd_3990_erp_add_erp;
erp->refers = cqr;
erp->startdev = device;
diff --git a/drivers/s390/block/dasd_diag.c b/drivers/s390/block/dasd_diag.c
index 2b3bc3ec0541..266b34b55403 100644
--- a/drivers/s390/block/dasd_diag.c
+++ b/drivers/s390/block/dasd_diag.c
@@ -228,25 +228,22 @@ dasd_diag_term_IO(struct dasd_ccw_req * cqr)
}
/* Handle external interruption. */
-static void
-dasd_ext_handler(__u16 code)
+static void dasd_ext_handler(unsigned int ext_int_code,
+ unsigned int param32, unsigned long param64)
{
struct dasd_ccw_req *cqr, *next;
struct dasd_device *device;
unsigned long long expires;
unsigned long flags;
- u8 int_code, status;
addr_t ip;
int rc;
- int_code = *((u8 *) DASD_DIAG_LC_INT_CODE);
- status = *((u8 *) DASD_DIAG_LC_INT_STATUS);
- switch (int_code) {
+ switch (ext_int_code >> 24) {
case DASD_DIAG_CODE_31BIT:
- ip = (addr_t) *((u32 *) DASD_DIAG_LC_INT_PARM_31BIT);
+ ip = (addr_t) param32;
break;
case DASD_DIAG_CODE_64BIT:
- ip = (addr_t) *((u64 *) DASD_DIAG_LC_INT_PARM_64BIT);
+ ip = (addr_t) param64;
break;
default:
return;
@@ -281,7 +278,7 @@ dasd_ext_handler(__u16 code)
cqr->stopclk = get_clock();
expires = 0;
- if (status == 0) {
+ if ((ext_int_code & 0xff0000) == 0) {
cqr->status = DASD_CQR_SUCCESS;
/* Start first request on queue if possible -> fast_io. */
if (!list_empty(&device->ccw_queue)) {
@@ -296,8 +293,8 @@ dasd_ext_handler(__u16 code)
} else {
cqr->status = DASD_CQR_QUEUED;
DBF_DEV_EVENT(DBF_DEBUG, device, "interrupt status for "
- "request %p was %d (%d retries left)", cqr, status,
- cqr->retries);
+ "request %p was %d (%d retries left)", cqr,
+ (ext_int_code >> 16) & 0xff, cqr->retries);
dasd_diag_erp(device);
}
diff --git a/drivers/s390/block/dasd_diag.h b/drivers/s390/block/dasd_diag.h
index b8c78267ff3e..4f71fbe60c82 100644
--- a/drivers/s390/block/dasd_diag.h
+++ b/drivers/s390/block/dasd_diag.h
@@ -18,10 +18,6 @@
#define DEV_CLASS_FBA 0x01
#define DEV_CLASS_ECKD 0x04
-#define DASD_DIAG_LC_INT_CODE 132
-#define DASD_DIAG_LC_INT_STATUS 133
-#define DASD_DIAG_LC_INT_PARM_31BIT 128
-#define DASD_DIAG_LC_INT_PARM_64BIT 4536
#define DASD_DIAG_CODE_31BIT 0x03
#define DASD_DIAG_CODE_64BIT 0x07
diff --git a/drivers/s390/block/dasd_eckd.c b/drivers/s390/block/dasd_eckd.c
index 59b4ecfb967b..bf61274af3bb 100644
--- a/drivers/s390/block/dasd_eckd.c
+++ b/drivers/s390/block/dasd_eckd.c
@@ -1776,13 +1776,13 @@ static void dasd_eckd_handle_unsolicited_interrupt(struct dasd_device *device,
}
/* summary unit check */
- if ((scsw_dstat(&irb->scsw) & DEV_STAT_UNIT_CHECK) &&
- (irb->ecw[7] == 0x0D)) {
+ sense = dasd_get_sense(irb);
+ if (sense && (sense[7] == 0x0D) &&
+ (scsw_dstat(&irb->scsw) & DEV_STAT_UNIT_CHECK)) {
dasd_alias_handle_summary_unit_check(device, irb);
return;
}
- sense = dasd_get_sense(irb);
/* service information message SIM */
if (sense && !(sense[27] & DASD_SENSE_BIT_0) &&
((sense[6] & DASD_SIM_SENSE) == DASD_SIM_SENSE)) {
@@ -1791,26 +1791,15 @@ static void dasd_eckd_handle_unsolicited_interrupt(struct dasd_device *device,
return;
}
- if ((scsw_cc(&irb->scsw) == 1) &&
- (scsw_fctl(&irb->scsw) & SCSW_FCTL_START_FUNC) &&
- (scsw_actl(&irb->scsw) & SCSW_ACTL_START_PEND) &&
- (scsw_stctl(&irb->scsw) & SCSW_STCTL_STATUS_PEND)) {
+ if ((scsw_cc(&irb->scsw) == 1) && !sense &&
+ (scsw_fctl(&irb->scsw) == SCSW_FCTL_START_FUNC) &&
+ (scsw_actl(&irb->scsw) == SCSW_ACTL_START_PEND) &&
+ (scsw_stctl(&irb->scsw) == SCSW_STCTL_STATUS_PEND)) {
/* fake irb do nothing, they are handled elsewhere */
dasd_schedule_device_bh(device);
return;
}
- if (!sense) {
- /* just report other unsolicited interrupts */
- DBF_DEV_EVENT(DBF_ERR, device, "%s",
- "unsolicited interrupt received");
- } else {
- DBF_DEV_EVENT(DBF_ERR, device, "%s",
- "unsolicited interrupt received "
- "(sense available)");
- device->discipline->dump_sense_dbf(device, irb, "unsolicited");
- }
-
dasd_schedule_device_bh(device);
return;
};
@@ -2813,6 +2802,73 @@ dasd_eckd_steal_lock(struct dasd_device *device)
}
/*
+ * SNID - Sense Path Group ID
+ * This ioctl may be used in situations where I/O is stalled due to
+ * a reserve, so if the normal dasd_smalloc_request fails, we use the
+ * preallocated dasd_reserve_req.
+ */
+static int dasd_eckd_snid(struct dasd_device *device,
+ void __user *argp)
+{
+ struct dasd_ccw_req *cqr;
+ int rc;
+ struct ccw1 *ccw;
+ int useglobal;
+ struct dasd_snid_ioctl_data usrparm;
+
+ if (!capable(CAP_SYS_ADMIN))
+ return -EACCES;
+
+ if (copy_from_user(&usrparm, argp, sizeof(usrparm)))
+ return -EFAULT;
+
+ useglobal = 0;
+ cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1,
+ sizeof(struct dasd_snid_data), device);
+ if (IS_ERR(cqr)) {
+ mutex_lock(&dasd_reserve_mutex);
+ useglobal = 1;
+ cqr = &dasd_reserve_req->cqr;
+ memset(cqr, 0, sizeof(*cqr));
+ memset(&dasd_reserve_req->ccw, 0,
+ sizeof(dasd_reserve_req->ccw));
+ cqr->cpaddr = &dasd_reserve_req->ccw;
+ cqr->data = &dasd_reserve_req->data;
+ cqr->magic = DASD_ECKD_MAGIC;
+ }
+ ccw = cqr->cpaddr;
+ ccw->cmd_code = DASD_ECKD_CCW_SNID;
+ ccw->flags |= CCW_FLAG_SLI;
+ ccw->count = 12;
+ ccw->cda = (__u32)(addr_t) cqr->data;
+ cqr->startdev = device;
+ cqr->memdev = device;
+ clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
+ set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags);
+ cqr->retries = 5;
+ cqr->expires = 10 * HZ;
+ cqr->buildclk = get_clock();
+ cqr->status = DASD_CQR_FILLED;
+ cqr->lpm = usrparm.path_mask;
+
+ rc = dasd_sleep_on_immediatly(cqr);
+ /* verify that I/O processing didn't modify the path mask */
+ if (!rc && usrparm.path_mask && (cqr->lpm != usrparm.path_mask))
+ rc = -EIO;
+ if (!rc) {
+ usrparm.data = *((struct dasd_snid_data *)cqr->data);
+ if (copy_to_user(argp, &usrparm, sizeof(usrparm)))
+ rc = -EFAULT;
+ }
+
+ if (useglobal)
+ mutex_unlock(&dasd_reserve_mutex);
+ else
+ dasd_sfree_request(cqr, cqr->memdev);
+ return rc;
+}
+
+/*
* Read performance statistics
*/
static int
@@ -3047,6 +3103,8 @@ dasd_eckd_ioctl(struct dasd_block *block, unsigned int cmd, void __user *argp)
return dasd_eckd_reserve(device);
case BIODASDSLCK:
return dasd_eckd_steal_lock(device);
+ case BIODASDSNID:
+ return dasd_eckd_snid(device, argp);
case BIODASDSYMMIO:
return dasd_symm_io(device, argp);
default:
@@ -3093,19 +3151,19 @@ dasd_eckd_dump_sense_dbf(struct dasd_device *device, struct irb *irb,
char *reason)
{
u64 *sense;
+ u64 *stat;
sense = (u64 *) dasd_get_sense(irb);
+ stat = (u64 *) &irb->scsw;
if (sense) {
- DBF_DEV_EVENT(DBF_EMERG, device,
- "%s: %s %02x%02x%02x %016llx %016llx %016llx "
- "%016llx", reason,
- scsw_is_tm(&irb->scsw) ? "t" : "c",
- scsw_cc(&irb->scsw), scsw_cstat(&irb->scsw),
- scsw_dstat(&irb->scsw), sense[0], sense[1],
- sense[2], sense[3]);
+ DBF_DEV_EVENT(DBF_EMERG, device, "%s: %016llx %08x : "
+ "%016llx %016llx %016llx %016llx",
+ reason, *stat, *((u32 *) (stat + 1)),
+ sense[0], sense[1], sense[2], sense[3]);
} else {
- DBF_DEV_EVENT(DBF_EMERG, device, "%s",
- "SORRY - NO VALID SENSE AVAILABLE\n");
+ DBF_DEV_EVENT(DBF_EMERG, device, "%s: %016llx %08x : %s",
+ reason, *stat, *((u32 *) (stat + 1)),
+ "NO VALID SENSE");
}
}
@@ -3131,9 +3189,12 @@ static void dasd_eckd_dump_sense_ccw(struct dasd_device *device,
" I/O status report for device %s:\n",
dev_name(&device->cdev->dev));
len += sprintf(page + len, KERN_ERR PRINTK_HEADER
- " in req: %p CS: 0x%02X DS: 0x%02X CC: 0x%02X RC: %d\n",
- req, scsw_cstat(&irb->scsw), scsw_dstat(&irb->scsw),
- scsw_cc(&irb->scsw), req ? req->intrc : 0);
+ " in req: %p CC:%02X FC:%02X AC:%02X SC:%02X DS:%02X "
+ "CS:%02X RC:%d\n",
+ req, scsw_cc(&irb->scsw), scsw_fctl(&irb->scsw),
+ scsw_actl(&irb->scsw), scsw_stctl(&irb->scsw),
+ scsw_dstat(&irb->scsw), scsw_cstat(&irb->scsw),
+ req ? req->intrc : 0);
len += sprintf(page + len, KERN_ERR PRINTK_HEADER
" device %s: Failing CCW: %p\n",
dev_name(&device->cdev->dev),
@@ -3234,11 +3295,13 @@ static void dasd_eckd_dump_sense_tcw(struct dasd_device *device,
" I/O status report for device %s:\n",
dev_name(&device->cdev->dev));
len += sprintf(page + len, KERN_ERR PRINTK_HEADER
- " in req: %p CS: 0x%02X DS: 0x%02X CC: 0x%02X RC: %d "
- "fcxs: 0x%02X schxs: 0x%02X\n", req,
- scsw_cstat(&irb->scsw), scsw_dstat(&irb->scsw),
- scsw_cc(&irb->scsw), req->intrc,
- irb->scsw.tm.fcxs, irb->scsw.tm.schxs);
+ " in req: %p CC:%02X FC:%02X AC:%02X SC:%02X DS:%02X "
+ "CS:%02X fcxs:%02X schxs:%02X RC:%d\n",
+ req, scsw_cc(&irb->scsw), scsw_fctl(&irb->scsw),
+ scsw_actl(&irb->scsw), scsw_stctl(&irb->scsw),
+ scsw_dstat(&irb->scsw), scsw_cstat(&irb->scsw),
+ irb->scsw.tm.fcxs, irb->scsw.tm.schxs,
+ req ? req->intrc : 0);
len += sprintf(page + len, KERN_ERR PRINTK_HEADER
" device %s: Failing TCW: %p\n",
dev_name(&device->cdev->dev),
@@ -3246,7 +3309,7 @@ static void dasd_eckd_dump_sense_tcw(struct dasd_device *device,
tsb = NULL;
sense = NULL;
- if (irb->scsw.tm.tcw && (irb->scsw.tm.fcxs == 0x01))
+ if (irb->scsw.tm.tcw && (irb->scsw.tm.fcxs & 0x01))
tsb = tcw_get_tsb(
(struct tcw *)(unsigned long)irb->scsw.tm.tcw);
@@ -3344,7 +3407,7 @@ static void dasd_eckd_dump_sense_tcw(struct dasd_device *device,
static void dasd_eckd_dump_sense(struct dasd_device *device,
struct dasd_ccw_req *req, struct irb *irb)
{
- if (req && scsw_is_tm(&req->irb.scsw))
+ if (scsw_is_tm(&irb->scsw))
dasd_eckd_dump_sense_tcw(device, req, irb);
else
dasd_eckd_dump_sense_ccw(device, req, irb);
diff --git a/drivers/s390/block/dasd_eckd.h b/drivers/s390/block/dasd_eckd.h
index 0eb49655a6cd..12097c24f2f5 100644
--- a/drivers/s390/block/dasd_eckd.h
+++ b/drivers/s390/block/dasd_eckd.h
@@ -27,6 +27,7 @@
#define DASD_ECKD_CCW_WRITE_CKD 0x1d
#define DASD_ECKD_CCW_READ_CKD 0x1e
#define DASD_ECKD_CCW_PSF 0x27
+#define DASD_ECKD_CCW_SNID 0x34
#define DASD_ECKD_CCW_RSSD 0x3e
#define DASD_ECKD_CCW_LOCATE_RECORD 0x47
#define DASD_ECKD_CCW_SNSS 0x54
diff --git a/drivers/s390/block/dasd_eer.c b/drivers/s390/block/dasd_eer.c
index c71d89dba302..83b4615a3b62 100644
--- a/drivers/s390/block/dasd_eer.c
+++ b/drivers/s390/block/dasd_eer.c
@@ -17,7 +17,6 @@
#include <linux/device.h>
#include <linux/poll.h>
#include <linux/mutex.h>
-#include <linux/smp_lock.h>
#include <linux/err.h>
#include <linux/slab.h>
diff --git a/drivers/s390/block/dasd_proc.c b/drivers/s390/block/dasd_proc.c
index 2eb025592809..c4a6a31bd9cd 100644
--- a/drivers/s390/block/dasd_proc.c
+++ b/drivers/s390/block/dasd_proc.c
@@ -251,7 +251,6 @@ static ssize_t dasd_stats_proc_write(struct file *file,
buffer = dasd_get_user_string(user_buf, user_len);
if (IS_ERR(buffer))
return PTR_ERR(buffer);
- DBF_EVENT(DBF_DEBUG, "/proc/dasd/statictics: '%s'\n", buffer);
/* check for valid verbs */
str = skip_spaces(buffer);
diff --git a/drivers/s390/char/fs3270.c b/drivers/s390/char/fs3270.c
index eb28fb01a38a..f6489eb7e976 100644
--- a/drivers/s390/char/fs3270.c
+++ b/drivers/s390/char/fs3270.c
@@ -14,7 +14,6 @@
#include <linux/list.h>
#include <linux/slab.h>
#include <linux/types.h>
-#include <linux/smp_lock.h>
#include <asm/compat.h>
#include <asm/ccwdev.h>
diff --git a/drivers/s390/char/sclp.c b/drivers/s390/char/sclp.c
index 5707a80b96b6..35cc4686b99b 100644
--- a/drivers/s390/char/sclp.c
+++ b/drivers/s390/char/sclp.c
@@ -395,16 +395,16 @@ __sclp_find_req(u32 sccb)
/* Handler for external interruption. Perform request post-processing.
* Prepare read event data request if necessary. Start processing of next
* request on queue. */
-static void
-sclp_interrupt_handler(__u16 code)
+static void sclp_interrupt_handler(unsigned int ext_int_code,
+ unsigned int param32, unsigned long param64)
{
struct sclp_req *req;
u32 finished_sccb;
u32 evbuf_pending;
spin_lock(&sclp_lock);
- finished_sccb = S390_lowcore.ext_params & 0xfffffff8;
- evbuf_pending = S390_lowcore.ext_params & 0x3;
+ finished_sccb = param32 & 0xfffffff8;
+ evbuf_pending = param32 & 0x3;
if (finished_sccb) {
del_timer(&sclp_request_timer);
sclp_running_state = sclp_running_state_reset_pending;
@@ -819,12 +819,12 @@ EXPORT_SYMBOL(sclp_reactivate);
/* Handler for external interruption used during initialization. Modify
* request state to done. */
-static void
-sclp_check_handler(__u16 code)
+static void sclp_check_handler(unsigned int ext_int_code,
+ unsigned int param32, unsigned long param64)
{
u32 finished_sccb;
- finished_sccb = S390_lowcore.ext_params & 0xfffffff8;
+ finished_sccb = param32 & 0xfffffff8;
/* Is this the interrupt we are waiting for? */
if (finished_sccb == 0)
return;
diff --git a/drivers/s390/char/tape_char.c b/drivers/s390/char/tape_char.c
index 883e2db02bd3..e090a307fdee 100644
--- a/drivers/s390/char/tape_char.c
+++ b/drivers/s390/char/tape_char.c
@@ -17,7 +17,6 @@
#include <linux/types.h>
#include <linux/proc_fs.h>
#include <linux/mtio.h>
-#include <linux/smp_lock.h>
#include <linux/compat.h>
#include <asm/uaccess.h>
diff --git a/drivers/s390/char/tape_core.c b/drivers/s390/char/tape_core.c
index 29c2d73d719d..b3a3e8e8656e 100644
--- a/drivers/s390/char/tape_core.c
+++ b/drivers/s390/char/tape_core.c
@@ -209,29 +209,79 @@ tape_state_set(struct tape_device *device, enum tape_state newstate)
wake_up(&device->state_change_wq);
}
+struct tape_med_state_work_data {
+ struct tape_device *device;
+ enum tape_medium_state state;
+ struct work_struct work;
+};
+
+static void
+tape_med_state_work_handler(struct work_struct *work)
+{
+ static char env_state_loaded[] = "MEDIUM_STATE=LOADED";
+ static char env_state_unloaded[] = "MEDIUM_STATE=UNLOADED";
+ struct tape_med_state_work_data *p =
+ container_of(work, struct tape_med_state_work_data, work);
+ struct tape_device *device = p->device;
+ char *envp[] = { NULL, NULL };
+
+ switch (p->state) {
+ case MS_UNLOADED:
+ pr_info("%s: The tape cartridge has been successfully "
+ "unloaded\n", dev_name(&device->cdev->dev));
+ envp[0] = env_state_unloaded;
+ kobject_uevent_env(&device->cdev->dev.kobj, KOBJ_CHANGE, envp);
+ break;
+ case MS_LOADED:
+ pr_info("%s: A tape cartridge has been mounted\n",
+ dev_name(&device->cdev->dev));
+ envp[0] = env_state_loaded;
+ kobject_uevent_env(&device->cdev->dev.kobj, KOBJ_CHANGE, envp);
+ break;
+ default:
+ break;
+ }
+ tape_put_device(device);
+ kfree(p);
+}
+
+static void
+tape_med_state_work(struct tape_device *device, enum tape_medium_state state)
+{
+ struct tape_med_state_work_data *p;
+
+ p = kzalloc(sizeof(*p), GFP_ATOMIC);
+ if (p) {
+ INIT_WORK(&p->work, tape_med_state_work_handler);
+ p->device = tape_get_device(device);
+ p->state = state;
+ schedule_work(&p->work);
+ }
+}
+
void
tape_med_state_set(struct tape_device *device, enum tape_medium_state newstate)
{
- if (device->medium_state == newstate)
+ enum tape_medium_state oldstate;
+
+ oldstate = device->medium_state;
+ if (oldstate == newstate)
return;
+ device->medium_state = newstate;
switch(newstate){
case MS_UNLOADED:
device->tape_generic_status |= GMT_DR_OPEN(~0);
- if (device->medium_state == MS_LOADED)
- pr_info("%s: The tape cartridge has been successfully "
- "unloaded\n", dev_name(&device->cdev->dev));
+ if (oldstate == MS_LOADED)
+ tape_med_state_work(device, MS_UNLOADED);
break;
case MS_LOADED:
device->tape_generic_status &= ~GMT_DR_OPEN(~0);
- if (device->medium_state == MS_UNLOADED)
- pr_info("%s: A tape cartridge has been mounted\n",
- dev_name(&device->cdev->dev));
+ if (oldstate == MS_UNLOADED)
+ tape_med_state_work(device, MS_LOADED);
break;
default:
- // print nothing
break;
}
- device->medium_state = newstate;
wake_up(&device->state_change_wq);
}
@@ -1077,15 +1127,14 @@ __tape_do_irq (struct ccw_device *cdev, unsigned long intparm, struct irb *irb)
/* FIXME: What to do with the request? */
switch (PTR_ERR(irb)) {
case -ETIMEDOUT:
- DBF_LH(1, "(%s): Request timed out\n",
- dev_name(&cdev->dev));
+ DBF_LH(1, "(%08x): Request timed out\n",
+ device->cdev_id);
case -EIO:
__tape_end_request(device, request, -EIO);
break;
default:
- DBF_LH(1, "(%s): Unexpected i/o error %li\n",
- dev_name(&cdev->dev),
- PTR_ERR(irb));
+ DBF_LH(1, "(%08x): Unexpected i/o error %li\n",
+ device->cdev_id, PTR_ERR(irb));
}
return;
}
diff --git a/drivers/s390/char/tape_std.c b/drivers/s390/char/tape_std.c
index 03f07e5dd6e9..3c3f342149ec 100644
--- a/drivers/s390/char/tape_std.c
+++ b/drivers/s390/char/tape_std.c
@@ -47,8 +47,8 @@ tape_std_assign_timeout(unsigned long data)
device->cdev_id);
rc = tape_cancel_io(device, request);
if(rc)
- DBF_EVENT(3, "(%s): Assign timeout: Cancel failed with rc = %i\n",
- dev_name(&device->cdev->dev), rc);
+ DBF_EVENT(3, "(%08x): Assign timeout: Cancel failed with rc = "
+ "%i\n", device->cdev_id, rc);
}
int
diff --git a/drivers/s390/char/vmlogrdr.c b/drivers/s390/char/vmlogrdr.c
index 0d6dc4b92cc2..c837d7419a6a 100644
--- a/drivers/s390/char/vmlogrdr.c
+++ b/drivers/s390/char/vmlogrdr.c
@@ -30,7 +30,6 @@
#include <linux/kmod.h>
#include <linux/cdev.h>
#include <linux/device.h>
-#include <linux/smp_lock.h>
#include <linux/string.h>
MODULE_AUTHOR
@@ -215,7 +214,7 @@ static void vmlogrdr_iucv_message_pending(struct iucv_path *path,
static int vmlogrdr_get_recording_class_AB(void)
{
- char cp_command[]="QUERY COMMAND RECORDING ";
+ static const char cp_command[] = "QUERY COMMAND RECORDING ";
char cp_response[80];
char *tail;
int len,i;
@@ -249,27 +248,25 @@ static int vmlogrdr_recording(struct vmlogrdr_priv_t * logptr,
char cp_command[80];
char cp_response[160];
char *onoff, *qid_string;
+ int rc;
- memset(cp_command, 0x00, sizeof(cp_command));
- memset(cp_response, 0x00, sizeof(cp_response));
-
- onoff = ((action == 1) ? "ON" : "OFF");
+ onoff = ((action == 1) ? "ON" : "OFF");
qid_string = ((recording_class_AB == 1) ? " QID * " : "");
- /*
+ /*
* The recording commands needs to be called with option QID
* for guests that have previlege classes A or B.
* Purging has to be done as separate step, because recording
* can't be switched on as long as records are on the queue.
* Doing both at the same time doesn't work.
*/
-
- if (purge) {
+ if (purge && (action == 1)) {
+ memset(cp_command, 0x00, sizeof(cp_command));
+ memset(cp_response, 0x00, sizeof(cp_response));
snprintf(cp_command, sizeof(cp_command),
"RECORDING %s PURGE %s",
logptr->recording_name,
qid_string);
-
cpcmd(cp_command, cp_response, sizeof(cp_response), NULL);
}
@@ -279,19 +276,33 @@ static int vmlogrdr_recording(struct vmlogrdr_priv_t * logptr,
logptr->recording_name,
onoff,
qid_string);
-
cpcmd(cp_command, cp_response, sizeof(cp_response), NULL);
/* The recording command will usually answer with 'Command complete'
* on success, but when the specific service was never connected
* before then there might be an additional informational message
* 'HCPCRC8072I Recording entry not found' before the
- * 'Command complete'. So I use strstr rather then the strncmp.
+ * 'Command complete'. So I use strstr rather then the strncmp.
*/
if (strstr(cp_response,"Command complete"))
- return 0;
+ rc = 0;
else
- return -EIO;
+ rc = -EIO;
+ /*
+ * If we turn recording off, we have to purge any remaining records
+ * afterwards, as a large number of queued records may impact z/VM
+ * performance.
+ */
+ if (purge && (action == 0)) {
+ memset(cp_command, 0x00, sizeof(cp_command));
+ memset(cp_response, 0x00, sizeof(cp_response));
+ snprintf(cp_command, sizeof(cp_command),
+ "RECORDING %s PURGE %s",
+ logptr->recording_name,
+ qid_string);
+ cpcmd(cp_command, cp_response, sizeof(cp_response), NULL);
+ }
+ return rc;
}
@@ -638,7 +649,7 @@ static ssize_t vmlogrdr_recording_status_show(struct device_driver *driver,
char *buf)
{
- char cp_command[] = "QUERY RECORDING ";
+ static const char cp_command[] = "QUERY RECORDING ";
int len;
cpcmd(cp_command, buf, 4096, NULL);
diff --git a/drivers/s390/char/vmur.c b/drivers/s390/char/vmur.c
index 1de672f21037..f7e4ae6bf15a 100644
--- a/drivers/s390/char/vmur.c
+++ b/drivers/s390/char/vmur.c
@@ -13,7 +13,6 @@
#include <linux/cdev.h>
#include <linux/slab.h>
-#include <linux/smp_lock.h>
#include <asm/uaccess.h>
#include <asm/cio.h>
diff --git a/drivers/s390/cio/blacklist.c b/drivers/s390/cio/blacklist.c
index 13cb60162e42..76058a5166ed 100644
--- a/drivers/s390/cio/blacklist.c
+++ b/drivers/s390/cio/blacklist.c
@@ -79,17 +79,15 @@ static int pure_hex(char **cp, unsigned int *val, int min_digit,
int max_digit, int max_val)
{
int diff;
- unsigned int value;
diff = 0;
*val = 0;
- while (isxdigit(**cp) && (diff <= max_digit)) {
+ while (diff <= max_digit) {
+ int value = hex_to_bin(**cp);
- if (isdigit(**cp))
- value = **cp - '0';
- else
- value = tolower(**cp) - 'a' + 10;
+ if (value < 0)
+ break;
*val = *val * 16 + value;
(*cp)++;
diff++;
diff --git a/drivers/s390/cio/chp.c b/drivers/s390/cio/chp.c
index 6c9fa15aac7b..2d32233943a9 100644
--- a/drivers/s390/cio/chp.c
+++ b/drivers/s390/cio/chp.c
@@ -1,7 +1,7 @@
/*
* drivers/s390/cio/chp.c
*
- * Copyright IBM Corp. 1999,2007
+ * Copyright IBM Corp. 1999,2010
* Author(s): Cornelia Huck (cornelia.huck@de.ibm.com)
* Arnd Bergmann (arndb@de.ibm.com)
* Peter Oberparleiter <peter.oberparleiter@de.ibm.com>
@@ -54,12 +54,6 @@ static struct work_struct cfg_work;
/* Wait queue for configure completion events. */
static wait_queue_head_t cfg_wait_queue;
-/* Return channel_path struct for given chpid. */
-static inline struct channel_path *chpid_to_chp(struct chp_id chpid)
-{
- return channel_subsystems[chpid.cssid]->chps[chpid.id];
-}
-
/* Set vary state for given chpid. */
static void set_chp_logically_online(struct chp_id chpid, int onoff)
{
@@ -241,11 +235,13 @@ static ssize_t chp_status_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct channel_path *chp = to_channelpath(dev);
+ int status;
- if (!chp)
- return 0;
- return (chp_get_status(chp->chpid) ? sprintf(buf, "online\n") :
- sprintf(buf, "offline\n"));
+ mutex_lock(&chp->lock);
+ status = chp->state;
+ mutex_unlock(&chp->lock);
+
+ return status ? sprintf(buf, "online\n") : sprintf(buf, "offline\n");
}
static ssize_t chp_status_write(struct device *dev,
@@ -261,15 +257,18 @@ static ssize_t chp_status_write(struct device *dev,
if (!num_args)
return count;
- if (!strnicmp(cmd, "on", 2) || !strcmp(cmd, "1"))
+ if (!strnicmp(cmd, "on", 2) || !strcmp(cmd, "1")) {
+ mutex_lock(&cp->lock);
error = s390_vary_chpid(cp->chpid, 1);
- else if (!strnicmp(cmd, "off", 3) || !strcmp(cmd, "0"))
+ mutex_unlock(&cp->lock);
+ } else if (!strnicmp(cmd, "off", 3) || !strcmp(cmd, "0")) {
+ mutex_lock(&cp->lock);
error = s390_vary_chpid(cp->chpid, 0);
- else
+ mutex_unlock(&cp->lock);
+ } else
error = -EINVAL;
return error < 0 ? error : count;
-
}
static DEVICE_ATTR(status, 0644, chp_status_show, chp_status_write);
@@ -315,10 +314,12 @@ static ssize_t chp_type_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct channel_path *chp = to_channelpath(dev);
+ u8 type;
- if (!chp)
- return 0;
- return sprintf(buf, "%x\n", chp->desc.desc);
+ mutex_lock(&chp->lock);
+ type = chp->desc.desc;
+ mutex_unlock(&chp->lock);
+ return sprintf(buf, "%x\n", type);
}
static DEVICE_ATTR(type, 0444, chp_type_show, NULL);
@@ -395,6 +396,7 @@ int chp_new(struct chp_id chpid)
chp->state = 1;
chp->dev.parent = &channel_subsystems[chpid.cssid]->device;
chp->dev.release = chp_release;
+ mutex_init(&chp->lock);
/* Obtain channel path description and fill it in. */
ret = chsc_determine_base_channel_path_desc(chpid, &chp->desc);
@@ -464,7 +466,10 @@ void *chp_get_chp_desc(struct chp_id chpid)
desc = kmalloc(sizeof(struct channel_path_desc), GFP_KERNEL);
if (!desc)
return NULL;
+
+ mutex_lock(&chp->lock);
memcpy(desc, &chp->desc, sizeof(struct channel_path_desc));
+ mutex_unlock(&chp->lock);
return desc;
}
diff --git a/drivers/s390/cio/chp.h b/drivers/s390/cio/chp.h
index 26c3d2246176..12b4903d6fe3 100644
--- a/drivers/s390/cio/chp.h
+++ b/drivers/s390/cio/chp.h
@@ -1,7 +1,7 @@
/*
* drivers/s390/cio/chp.h
*
- * Copyright IBM Corp. 2007
+ * Copyright IBM Corp. 2007,2010
* Author(s): Peter Oberparleiter <peter.oberparleiter@de.ibm.com>
*/
@@ -10,6 +10,7 @@
#include <linux/types.h>
#include <linux/device.h>
+#include <linux/mutex.h>
#include <asm/chpid.h>
#include "chsc.h"
#include "css.h"
@@ -40,16 +41,23 @@ static inline int chp_test_bit(u8 *bitmap, int num)
struct channel_path {
+ struct device dev;
struct chp_id chpid;
+ struct mutex lock; /* Serialize access to below members. */
int state;
struct channel_path_desc desc;
/* Channel-measurement related stuff: */
int cmg;
int shared;
void *cmg_chars;
- struct device dev;
};
+/* Return channel_path struct for given chpid. */
+static inline struct channel_path *chpid_to_chp(struct chp_id chpid)
+{
+ return channel_subsystems[chpid.cssid]->chps[chpid.id];
+}
+
int chp_get_status(struct chp_id chpid);
u8 chp_get_sch_opm(struct subchannel *sch);
int chp_is_registered(struct chp_id chpid);
diff --git a/drivers/s390/cio/chsc.c b/drivers/s390/cio/chsc.c
index 4cbb1a6ca33c..1aaddea673e0 100644
--- a/drivers/s390/cio/chsc.c
+++ b/drivers/s390/cio/chsc.c
@@ -2,7 +2,7 @@
* drivers/s390/cio/chsc.c
* S/390 common I/O routines -- channel subsystem call
*
- * Copyright IBM Corp. 1999,2008
+ * Copyright IBM Corp. 1999,2010
* Author(s): Ingo Adlung (adlung@de.ibm.com)
* Cornelia Huck (cornelia.huck@de.ibm.com)
* Arnd Bergmann (arndb@de.ibm.com)
@@ -29,8 +29,8 @@
#include "chsc.h"
static void *sei_page;
-static DEFINE_SPINLOCK(siosl_lock);
-static DEFINE_SPINLOCK(sda_lock);
+static void *chsc_page;
+static DEFINE_SPINLOCK(chsc_page_lock);
/**
* chsc_error_from_response() - convert a chsc response to an error
@@ -85,17 +85,15 @@ struct chsc_ssd_area {
int chsc_get_ssd_info(struct subchannel_id schid, struct chsc_ssd_info *ssd)
{
- unsigned long page;
struct chsc_ssd_area *ssd_area;
int ccode;
int ret;
int i;
int mask;
- page = get_zeroed_page(GFP_KERNEL | GFP_DMA);
- if (!page)
- return -ENOMEM;
- ssd_area = (struct chsc_ssd_area *) page;
+ spin_lock_irq(&chsc_page_lock);
+ memset(chsc_page, 0, PAGE_SIZE);
+ ssd_area = chsc_page;
ssd_area->request.length = 0x0010;
ssd_area->request.code = 0x0004;
ssd_area->ssid = schid.ssid;
@@ -106,25 +104,25 @@ int chsc_get_ssd_info(struct subchannel_id schid, struct chsc_ssd_info *ssd)
/* Check response. */
if (ccode > 0) {
ret = (ccode == 3) ? -ENODEV : -EBUSY;
- goto out_free;
+ goto out;
}
ret = chsc_error_from_response(ssd_area->response.code);
if (ret != 0) {
CIO_MSG_EVENT(2, "chsc: ssd failed for 0.%x.%04x (rc=%04x)\n",
schid.ssid, schid.sch_no,
ssd_area->response.code);
- goto out_free;
+ goto out;
}
if (!ssd_area->sch_valid) {
ret = -ENODEV;
- goto out_free;
+ goto out;
}
/* Copy data */
ret = 0;
memset(ssd, 0, sizeof(struct chsc_ssd_info));
if ((ssd_area->st != SUBCHANNEL_TYPE_IO) &&
(ssd_area->st != SUBCHANNEL_TYPE_MSG))
- goto out_free;
+ goto out;
ssd->path_mask = ssd_area->path_mask;
ssd->fla_valid_mask = ssd_area->fla_valid_mask;
for (i = 0; i < 8; i++) {
@@ -136,8 +134,8 @@ int chsc_get_ssd_info(struct subchannel_id schid, struct chsc_ssd_info *ssd)
if (ssd_area->fla_valid_mask & mask)
ssd->fla[i] = ssd_area->fla[i];
}
-out_free:
- free_page(page);
+out:
+ spin_unlock_irq(&chsc_page_lock);
return ret;
}
@@ -497,6 +495,7 @@ __s390_vary_chpid_on(struct subchannel_id schid, void *data)
*/
int chsc_chp_vary(struct chp_id chpid, int on)
{
+ struct channel_path *chp = chpid_to_chp(chpid);
struct chp_link link;
memset(&link, 0, sizeof(struct chp_link));
@@ -506,11 +505,12 @@ int chsc_chp_vary(struct chp_id chpid, int on)
/*
* Redo PathVerification on the devices the chpid connects to
*/
-
- if (on)
+ if (on) {
+ /* Try to update the channel path descritor. */
+ chsc_determine_base_channel_path_desc(chpid, &chp->desc);
for_each_subchannel_staged(s390_subchannel_vary_chpid_on,
__s390_vary_chpid_on, &link);
- else
+ } else
for_each_subchannel_staged(s390_subchannel_vary_chpid_off,
NULL, &link);
@@ -552,7 +552,7 @@ cleanup:
return ret;
}
-int __chsc_do_secm(struct channel_subsystem *css, int enable, void *page)
+int __chsc_do_secm(struct channel_subsystem *css, int enable)
{
struct {
struct chsc_header request;
@@ -573,7 +573,9 @@ int __chsc_do_secm(struct channel_subsystem *css, int enable, void *page)
} __attribute__ ((packed)) *secm_area;
int ret, ccode;
- secm_area = page;
+ spin_lock_irq(&chsc_page_lock);
+ memset(chsc_page, 0, PAGE_SIZE);
+ secm_area = chsc_page;
secm_area->request.length = 0x0050;
secm_area->request.code = 0x0016;
@@ -584,8 +586,10 @@ int __chsc_do_secm(struct channel_subsystem *css, int enable, void *page)
secm_area->operation_code = enable ? 0 : 1;
ccode = chsc(secm_area);
- if (ccode > 0)
- return (ccode == 3) ? -ENODEV : -EBUSY;
+ if (ccode > 0) {
+ ret = (ccode == 3) ? -ENODEV : -EBUSY;
+ goto out;
+ }
switch (secm_area->response.code) {
case 0x0102:
@@ -598,37 +602,32 @@ int __chsc_do_secm(struct channel_subsystem *css, int enable, void *page)
if (ret != 0)
CIO_CRW_EVENT(2, "chsc: secm failed (rc=%04x)\n",
secm_area->response.code);
+out:
+ spin_unlock_irq(&chsc_page_lock);
return ret;
}
int
chsc_secm(struct channel_subsystem *css, int enable)
{
- void *secm_area;
int ret;
- secm_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
- if (!secm_area)
- return -ENOMEM;
-
if (enable && !css->cm_enabled) {
css->cub_addr1 = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
css->cub_addr2 = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
if (!css->cub_addr1 || !css->cub_addr2) {
free_page((unsigned long)css->cub_addr1);
free_page((unsigned long)css->cub_addr2);
- free_page((unsigned long)secm_area);
return -ENOMEM;
}
}
- ret = __chsc_do_secm(css, enable, secm_area);
+ ret = __chsc_do_secm(css, enable);
if (!ret) {
css->cm_enabled = enable;
if (css->cm_enabled) {
ret = chsc_add_cmg_attr(css);
if (ret) {
- memset(secm_area, 0, PAGE_SIZE);
- __chsc_do_secm(css, 0, secm_area);
+ __chsc_do_secm(css, 0);
css->cm_enabled = 0;
}
} else
@@ -638,44 +637,24 @@ chsc_secm(struct channel_subsystem *css, int enable)
free_page((unsigned long)css->cub_addr1);
free_page((unsigned long)css->cub_addr2);
}
- free_page((unsigned long)secm_area);
return ret;
}
int chsc_determine_channel_path_desc(struct chp_id chpid, int fmt, int rfmt,
- int c, int m,
- struct chsc_response_struct *resp)
+ int c, int m, void *page)
{
+ struct chsc_scpd *scpd_area;
int ccode, ret;
- struct {
- struct chsc_header request;
- u32 : 2;
- u32 m : 1;
- u32 c : 1;
- u32 fmt : 4;
- u32 cssid : 8;
- u32 : 4;
- u32 rfmt : 4;
- u32 first_chpid : 8;
- u32 : 24;
- u32 last_chpid : 8;
- u32 zeroes1;
- struct chsc_header response;
- u8 data[PAGE_SIZE - 20];
- } __attribute__ ((packed)) *scpd_area;
-
if ((rfmt == 1) && !css_general_characteristics.fcs)
return -EINVAL;
if ((rfmt == 2) && !css_general_characteristics.cib)
return -EINVAL;
- scpd_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
- if (!scpd_area)
- return -ENOMEM;
+ memset(page, 0, PAGE_SIZE);
+ scpd_area = page;
scpd_area->request.length = 0x0010;
scpd_area->request.code = 0x0002;
-
scpd_area->cssid = chpid.cssid;
scpd_area->first_chpid = chpid.id;
scpd_area->last_chpid = chpid.id;
@@ -685,20 +664,13 @@ int chsc_determine_channel_path_desc(struct chp_id chpid, int fmt, int rfmt,
scpd_area->rfmt = rfmt;
ccode = chsc(scpd_area);
- if (ccode > 0) {
- ret = (ccode == 3) ? -ENODEV : -EBUSY;
- goto out;
- }
+ if (ccode > 0)
+ return (ccode == 3) ? -ENODEV : -EBUSY;
ret = chsc_error_from_response(scpd_area->response.code);
- if (ret == 0)
- /* Success. */
- memcpy(resp, &scpd_area->response, scpd_area->response.length);
- else
+ if (ret)
CIO_CRW_EVENT(2, "chsc: scpd failed (rc=%04x)\n",
scpd_area->response.code);
-out:
- free_page((unsigned long)scpd_area);
return ret;
}
EXPORT_SYMBOL_GPL(chsc_determine_channel_path_desc);
@@ -707,17 +679,19 @@ int chsc_determine_base_channel_path_desc(struct chp_id chpid,
struct channel_path_desc *desc)
{
struct chsc_response_struct *chsc_resp;
+ struct chsc_scpd *scpd_area;
+ unsigned long flags;
int ret;
- chsc_resp = kzalloc(sizeof(*chsc_resp), GFP_KERNEL);
- if (!chsc_resp)
- return -ENOMEM;
- ret = chsc_determine_channel_path_desc(chpid, 0, 0, 0, 0, chsc_resp);
+ spin_lock_irqsave(&chsc_page_lock, flags);
+ scpd_area = chsc_page;
+ ret = chsc_determine_channel_path_desc(chpid, 0, 0, 0, 0, scpd_area);
if (ret)
- goto out_free;
+ goto out;
+ chsc_resp = (void *)&scpd_area->response;
memcpy(desc, &chsc_resp->data, sizeof(*desc));
-out_free:
- kfree(chsc_resp);
+out:
+ spin_unlock_irqrestore(&chsc_page_lock, flags);
return ret;
}
@@ -725,33 +699,22 @@ static void
chsc_initialize_cmg_chars(struct channel_path *chp, u8 cmcv,
struct cmg_chars *chars)
{
- switch (chp->cmg) {
- case 2:
- case 3:
- chp->cmg_chars = kmalloc(sizeof(struct cmg_chars),
- GFP_KERNEL);
- if (chp->cmg_chars) {
- int i, mask;
- struct cmg_chars *cmg_chars;
-
- cmg_chars = chp->cmg_chars;
- for (i = 0; i < NR_MEASUREMENT_CHARS; i++) {
- mask = 0x80 >> (i + 3);
- if (cmcv & mask)
- cmg_chars->values[i] = chars->values[i];
- else
- cmg_chars->values[i] = 0;
- }
- }
- break;
- default:
- /* No cmg-dependent data. */
- break;
+ struct cmg_chars *cmg_chars;
+ int i, mask;
+
+ cmg_chars = chp->cmg_chars;
+ for (i = 0; i < NR_MEASUREMENT_CHARS; i++) {
+ mask = 0x80 >> (i + 3);
+ if (cmcv & mask)
+ cmg_chars->values[i] = chars->values[i];
+ else
+ cmg_chars->values[i] = 0;
}
}
int chsc_get_channel_measurement_chars(struct channel_path *chp)
{
+ struct cmg_chars *cmg_chars;
int ccode, ret;
struct {
@@ -775,13 +738,16 @@ int chsc_get_channel_measurement_chars(struct channel_path *chp)
u32 data[NR_MEASUREMENT_CHARS];
} __attribute__ ((packed)) *scmc_area;
- scmc_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
- if (!scmc_area)
+ chp->cmg_chars = NULL;
+ cmg_chars = kmalloc(sizeof(*cmg_chars), GFP_KERNEL);
+ if (!cmg_chars)
return -ENOMEM;
+ spin_lock_irq(&chsc_page_lock);
+ memset(chsc_page, 0, PAGE_SIZE);
+ scmc_area = chsc_page;
scmc_area->request.length = 0x0010;
scmc_area->request.code = 0x0022;
-
scmc_area->first_chpid = chp->chpid.id;
scmc_area->last_chpid = chp->chpid.id;
@@ -792,53 +758,65 @@ int chsc_get_channel_measurement_chars(struct channel_path *chp)
}
ret = chsc_error_from_response(scmc_area->response.code);
- if (ret == 0) {
- /* Success. */
- if (!scmc_area->not_valid) {
- chp->cmg = scmc_area->cmg;
- chp->shared = scmc_area->shared;
- chsc_initialize_cmg_chars(chp, scmc_area->cmcv,
- (struct cmg_chars *)
- &scmc_area->data);
- } else {
- chp->cmg = -1;
- chp->shared = -1;
- }
- } else {
+ if (ret) {
CIO_CRW_EVENT(2, "chsc: scmc failed (rc=%04x)\n",
scmc_area->response.code);
+ goto out;
+ }
+ if (scmc_area->not_valid) {
+ chp->cmg = -1;
+ chp->shared = -1;
+ goto out;
}
+ chp->cmg = scmc_area->cmg;
+ chp->shared = scmc_area->shared;
+ if (chp->cmg != 2 && chp->cmg != 3) {
+ /* No cmg-dependent data. */
+ goto out;
+ }
+ chp->cmg_chars = cmg_chars;
+ chsc_initialize_cmg_chars(chp, scmc_area->cmcv,
+ (struct cmg_chars *) &scmc_area->data);
out:
- free_page((unsigned long)scmc_area);
+ spin_unlock_irq(&chsc_page_lock);
+ if (!chp->cmg_chars)
+ kfree(cmg_chars);
+
return ret;
}
-int __init chsc_alloc_sei_area(void)
+int __init chsc_init(void)
{
int ret;
sei_page = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
- if (!sei_page) {
- CIO_MSG_EVENT(0, "Can't allocate page for processing of "
- "chsc machine checks!\n");
- return -ENOMEM;
+ chsc_page = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
+ if (!sei_page || !chsc_page) {
+ ret = -ENOMEM;
+ goto out_err;
}
ret = crw_register_handler(CRW_RSC_CSS, chsc_process_crw);
if (ret)
- kfree(sei_page);
+ goto out_err;
+ return ret;
+out_err:
+ free_page((unsigned long)chsc_page);
+ free_page((unsigned long)sei_page);
return ret;
}
-void __init chsc_free_sei_area(void)
+void __init chsc_init_cleanup(void)
{
crw_unregister_handler(CRW_RSC_CSS);
- kfree(sei_page);
+ free_page((unsigned long)chsc_page);
+ free_page((unsigned long)sei_page);
}
int chsc_enable_facility(int operation_code)
{
+ unsigned long flags;
int ret;
- static struct {
+ struct {
struct chsc_header request;
u8 reserved1:4;
u8 format:4;
@@ -851,32 +829,33 @@ int chsc_enable_facility(int operation_code)
u32 reserved5:4;
u32 format2:4;
u32 reserved6:24;
- } __attribute__ ((packed, aligned(4096))) sda_area;
+ } __attribute__ ((packed)) *sda_area;
- spin_lock(&sda_lock);
- memset(&sda_area, 0, sizeof(sda_area));
- sda_area.request.length = 0x0400;
- sda_area.request.code = 0x0031;
- sda_area.operation_code = operation_code;
+ spin_lock_irqsave(&chsc_page_lock, flags);
+ memset(chsc_page, 0, PAGE_SIZE);
+ sda_area = chsc_page;
+ sda_area->request.length = 0x0400;
+ sda_area->request.code = 0x0031;
+ sda_area->operation_code = operation_code;
- ret = chsc(&sda_area);
+ ret = chsc(sda_area);
if (ret > 0) {
ret = (ret == 3) ? -ENODEV : -EBUSY;
goto out;
}
- switch (sda_area.response.code) {
+ switch (sda_area->response.code) {
case 0x0101:
ret = -EOPNOTSUPP;
break;
default:
- ret = chsc_error_from_response(sda_area.response.code);
+ ret = chsc_error_from_response(sda_area->response.code);
}
if (ret != 0)
CIO_CRW_EVENT(2, "chsc: sda (oc=%x) failed (rc=%04x)\n",
- operation_code, sda_area.response.code);
- out:
- spin_unlock(&sda_lock);
+ operation_code, sda_area->response.code);
+out:
+ spin_unlock_irqrestore(&chsc_page_lock, flags);
return ret;
}
@@ -895,13 +874,12 @@ chsc_determine_css_characteristics(void)
struct chsc_header response;
u32 reserved4;
u32 general_char[510];
- u32 chsc_char[518];
+ u32 chsc_char[508];
} __attribute__ ((packed)) *scsc_area;
- scsc_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
- if (!scsc_area)
- return -ENOMEM;
-
+ spin_lock_irq(&chsc_page_lock);
+ memset(chsc_page, 0, PAGE_SIZE);
+ scsc_area = chsc_page;
scsc_area->request.length = 0x0010;
scsc_area->request.code = 0x0010;
@@ -921,7 +899,7 @@ chsc_determine_css_characteristics(void)
CIO_CRW_EVENT(2, "chsc: scsc failed (rc=%04x)\n",
scsc_area->response.code);
exit:
- free_page ((unsigned long) scsc_area);
+ spin_unlock_irq(&chsc_page_lock);
return result;
}
@@ -976,29 +954,29 @@ int chsc_sstpi(void *page, void *result, size_t size)
return (rr->response.code == 0x0001) ? 0 : -EIO;
}
-static struct {
- struct chsc_header request;
- u32 word1;
- struct subchannel_id sid;
- u32 word3;
- struct chsc_header response;
- u32 word[11];
-} __attribute__ ((packed)) siosl_area __attribute__ ((__aligned__(PAGE_SIZE)));
-
int chsc_siosl(struct subchannel_id schid)
{
+ struct {
+ struct chsc_header request;
+ u32 word1;
+ struct subchannel_id sid;
+ u32 word3;
+ struct chsc_header response;
+ u32 word[11];
+ } __attribute__ ((packed)) *siosl_area;
unsigned long flags;
int ccode;
int rc;
- spin_lock_irqsave(&siosl_lock, flags);
- memset(&siosl_area, 0, sizeof(siosl_area));
- siosl_area.request.length = 0x0010;
- siosl_area.request.code = 0x0046;
- siosl_area.word1 = 0x80000000;
- siosl_area.sid = schid;
+ spin_lock_irqsave(&chsc_page_lock, flags);
+ memset(chsc_page, 0, PAGE_SIZE);
+ siosl_area = chsc_page;
+ siosl_area->request.length = 0x0010;
+ siosl_area->request.code = 0x0046;
+ siosl_area->word1 = 0x80000000;
+ siosl_area->sid = schid;
- ccode = chsc(&siosl_area);
+ ccode = chsc(siosl_area);
if (ccode > 0) {
if (ccode == 3)
rc = -ENODEV;
@@ -1008,17 +986,16 @@ int chsc_siosl(struct subchannel_id schid)
schid.ssid, schid.sch_no, ccode);
goto out;
}
- rc = chsc_error_from_response(siosl_area.response.code);
+ rc = chsc_error_from_response(siosl_area->response.code);
if (rc)
CIO_MSG_EVENT(2, "chsc: siosl failed for 0.%x.%04x (rc=%04x)\n",
schid.ssid, schid.sch_no,
- siosl_area.response.code);
+ siosl_area->response.code);
else
CIO_MSG_EVENT(4, "chsc: siosl succeeded for 0.%x.%04x\n",
schid.ssid, schid.sch_no);
out:
- spin_unlock_irqrestore(&siosl_lock, flags);
-
+ spin_unlock_irqrestore(&chsc_page_lock, flags);
return rc;
}
EXPORT_SYMBOL_GPL(chsc_siosl);
diff --git a/drivers/s390/cio/chsc.h b/drivers/s390/cio/chsc.h
index 5453013f094b..6693f5e3176f 100644
--- a/drivers/s390/cio/chsc.h
+++ b/drivers/s390/cio/chsc.h
@@ -57,21 +57,39 @@ struct chsc_ssd_info {
struct chp_id chpid[8];
u16 fla[8];
};
+
+struct chsc_scpd {
+ struct chsc_header request;
+ u32:2;
+ u32 m:1;
+ u32 c:1;
+ u32 fmt:4;
+ u32 cssid:8;
+ u32:4;
+ u32 rfmt:4;
+ u32 first_chpid:8;
+ u32:24;
+ u32 last_chpid:8;
+ u32 zeroes1;
+ struct chsc_header response;
+ u8 data[PAGE_SIZE - 20];
+} __attribute__ ((packed));
+
+
extern int chsc_get_ssd_info(struct subchannel_id schid,
struct chsc_ssd_info *ssd);
extern int chsc_determine_css_characteristics(void);
-extern int chsc_alloc_sei_area(void);
-extern void chsc_free_sei_area(void);
+extern int chsc_init(void);
+extern void chsc_init_cleanup(void);
extern int chsc_enable_facility(int);
struct channel_subsystem;
extern int chsc_secm(struct channel_subsystem *, int);
-int __chsc_do_secm(struct channel_subsystem *css, int enable, void *page);
+int __chsc_do_secm(struct channel_subsystem *css, int enable);
int chsc_chp_vary(struct chp_id chpid, int on);
int chsc_determine_channel_path_desc(struct chp_id chpid, int fmt, int rfmt,
- int c, int m,
- struct chsc_response_struct *resp);
+ int c, int m, void *page);
int chsc_determine_base_channel_path_desc(struct chp_id chpid,
struct channel_path_desc *desc);
void chsc_chp_online(struct chp_id chpid);
diff --git a/drivers/s390/cio/chsc_sch.c b/drivers/s390/cio/chsc_sch.c
index f2b77e7bfc6f..3c3f3ffe2179 100644
--- a/drivers/s390/cio/chsc_sch.c
+++ b/drivers/s390/cio/chsc_sch.c
@@ -688,25 +688,31 @@ out_free:
static int chsc_ioctl_chpd(void __user *user_chpd)
{
+ struct chsc_scpd *scpd_area;
struct chsc_cpd_info *chpd;
int ret;
chpd = kzalloc(sizeof(*chpd), GFP_KERNEL);
- if (!chpd)
- return -ENOMEM;
+ scpd_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
+ if (!scpd_area || !chpd) {
+ ret = -ENOMEM;
+ goto out_free;
+ }
if (copy_from_user(chpd, user_chpd, sizeof(*chpd))) {
ret = -EFAULT;
goto out_free;
}
ret = chsc_determine_channel_path_desc(chpd->chpid, chpd->fmt,
chpd->rfmt, chpd->c, chpd->m,
- &chpd->chpdb);
+ scpd_area);
if (ret)
goto out_free;
+ memcpy(&chpd->chpdb, &scpd_area->response, scpd_area->response.length);
if (copy_to_user(user_chpd, chpd, sizeof(*chpd)))
ret = -EFAULT;
out_free:
kfree(chpd);
+ free_page((unsigned long)scpd_area);
return ret;
}
diff --git a/drivers/s390/cio/css.c b/drivers/s390/cio/css.c
index ca8e1c240c3c..825951b6b83f 100644
--- a/drivers/s390/cio/css.c
+++ b/drivers/s390/cio/css.c
@@ -1,7 +1,7 @@
/*
* driver for channel subsystem
*
- * Copyright IBM Corp. 2002, 2009
+ * Copyright IBM Corp. 2002, 2010
*
* Author(s): Arnd Bergmann (arndb@de.ibm.com)
* Cornelia Huck (cornelia.huck@de.ibm.com)
@@ -577,7 +577,7 @@ static int __unset_registered(struct device *dev, void *data)
return 0;
}
-void css_schedule_eval_all_unreg(void)
+static void css_schedule_eval_all_unreg(void)
{
unsigned long flags;
struct idset *unreg_set;
@@ -635,7 +635,7 @@ static void css_process_crw(struct crw *crw0, struct crw *crw1, int overflow)
init_subchannel_id(&mchk_schid);
mchk_schid.sch_no = crw0->rsid;
if (crw1)
- mchk_schid.ssid = (crw1->rsid >> 8) & 3;
+ mchk_schid.ssid = (crw1->rsid >> 4) & 3;
/*
* Since we are always presented with IPI in the CRW, we have to
@@ -790,7 +790,6 @@ static struct notifier_block css_reboot_notifier = {
static int css_power_event(struct notifier_block *this, unsigned long event,
void *ptr)
{
- void *secm_area;
int ret, i;
switch (event) {
@@ -806,15 +805,8 @@ static int css_power_event(struct notifier_block *this, unsigned long event,
mutex_unlock(&css->mutex);
continue;
}
- secm_area = (void *)get_zeroed_page(GFP_KERNEL |
- GFP_DMA);
- if (secm_area) {
- if (__chsc_do_secm(css, 0, secm_area))
- ret = NOTIFY_BAD;
- free_page((unsigned long)secm_area);
- } else
+ if (__chsc_do_secm(css, 0))
ret = NOTIFY_BAD;
-
mutex_unlock(&css->mutex);
}
break;
@@ -830,15 +822,8 @@ static int css_power_event(struct notifier_block *this, unsigned long event,
mutex_unlock(&css->mutex);
continue;
}
- secm_area = (void *)get_zeroed_page(GFP_KERNEL |
- GFP_DMA);
- if (secm_area) {
- if (__chsc_do_secm(css, 1, secm_area))
- ret = NOTIFY_BAD;
- free_page((unsigned long)secm_area);
- } else
+ if (__chsc_do_secm(css, 1))
ret = NOTIFY_BAD;
-
mutex_unlock(&css->mutex);
}
/* search for subchannels, which appeared during hibernation */
@@ -863,14 +848,11 @@ static int __init css_bus_init(void)
{
int ret, i;
- ret = chsc_determine_css_characteristics();
- if (ret == -ENOMEM)
- goto out;
-
- ret = chsc_alloc_sei_area();
+ ret = chsc_init();
if (ret)
- goto out;
+ return ret;
+ chsc_determine_css_characteristics();
/* Try to enable MSS. */
ret = chsc_enable_facility(CHSC_SDA_OC_MSS);
if (ret)
@@ -956,9 +938,9 @@ out_unregister:
}
bus_unregister(&css_bus_type);
out:
- crw_unregister_handler(CRW_RSC_CSS);
- chsc_free_sei_area();
+ crw_unregister_handler(CRW_RSC_SCH);
idset_free(slow_subchannel_set);
+ chsc_init_cleanup();
pr_alert("The CSS device driver initialization failed with "
"errno=%d\n", ret);
return ret;
@@ -978,9 +960,9 @@ static void __init css_bus_cleanup(void)
device_unregister(&css->device);
}
bus_unregister(&css_bus_type);
- crw_unregister_handler(CRW_RSC_CSS);
- chsc_free_sei_area();
+ crw_unregister_handler(CRW_RSC_SCH);
idset_free(slow_subchannel_set);
+ chsc_init_cleanup();
isc_unregister(IO_SCH_ISC);
}
@@ -1048,7 +1030,16 @@ subsys_initcall_sync(channel_subsystem_init_sync);
void channel_subsystem_reinit(void)
{
+ struct channel_path *chp;
+ struct chp_id chpid;
+
chsc_enable_facility(CHSC_SDA_OC_MSS);
+ chp_id_for_each(&chpid) {
+ chp = chpid_to_chp(chpid);
+ if (!chp)
+ continue;
+ chsc_determine_base_channel_path_desc(chpid, &chp->desc);
+ }
}
#ifdef CONFIG_PROC_FS
@@ -1200,6 +1191,7 @@ static int css_pm_restore(struct device *dev)
struct subchannel *sch = to_subchannel(dev);
struct css_driver *drv;
+ css_update_ssd_info(sch);
if (!sch->dev.driver)
return 0;
drv = to_cssdriver(sch->dev.driver);
diff --git a/drivers/s390/cio/device.c b/drivers/s390/cio/device.c
index 51bd3687d163..e8391b89eff4 100644
--- a/drivers/s390/cio/device.c
+++ b/drivers/s390/cio/device.c
@@ -1147,6 +1147,7 @@ err:
static int io_subchannel_chp_event(struct subchannel *sch,
struct chp_link *link, int event)
{
+ struct ccw_device *cdev = sch_get_cdev(sch);
int mask;
mask = chp_ssd_get_mask(&sch->ssd_info, link);
@@ -1156,22 +1157,30 @@ static int io_subchannel_chp_event(struct subchannel *sch,
case CHP_VARY_OFF:
sch->opm &= ~mask;
sch->lpm &= ~mask;
+ if (cdev)
+ cdev->private->path_gone_mask |= mask;
io_subchannel_terminate_path(sch, mask);
break;
case CHP_VARY_ON:
sch->opm |= mask;
sch->lpm |= mask;
+ if (cdev)
+ cdev->private->path_new_mask |= mask;
io_subchannel_verify(sch);
break;
case CHP_OFFLINE:
if (cio_update_schib(sch))
return -ENODEV;
+ if (cdev)
+ cdev->private->path_gone_mask |= mask;
io_subchannel_terminate_path(sch, mask);
break;
case CHP_ONLINE:
if (cio_update_schib(sch))
return -ENODEV;
sch->lpm |= mask & sch->opm;
+ if (cdev)
+ cdev->private->path_new_mask |= mask;
io_subchannel_verify(sch);
break;
}
@@ -1196,6 +1205,7 @@ static void io_subchannel_quiesce(struct subchannel *sch)
cdev->handler(cdev, cdev->private->intparm, ERR_PTR(-EIO));
while (ret == -EBUSY) {
cdev->private->state = DEV_STATE_QUIESCE;
+ cdev->private->iretry = 255;
ret = ccw_device_cancel_halt_clear(cdev);
if (ret == -EBUSY) {
ccw_device_set_timeout(cdev, HZ/10);
@@ -1445,7 +1455,16 @@ static int io_subchannel_sch_event(struct subchannel *sch, int process)
break;
case IO_SCH_UNREG_ATTACH:
case IO_SCH_UNREG:
- if (cdev)
+ if (!cdev)
+ break;
+ if (cdev->private->state == DEV_STATE_SENSE_ID) {
+ /*
+ * Note: delayed work triggered by this event
+ * and repeated calls to sch_event are synchronized
+ * by the above check for work_pending(cdev).
+ */
+ dev_fsm_event(cdev, DEV_EVENT_NOTOPER);
+ } else
ccw_device_set_notoper(cdev);
break;
case IO_SCH_NOP:
@@ -1468,9 +1487,13 @@ static int io_subchannel_sch_event(struct subchannel *sch, int process)
goto out;
break;
case IO_SCH_UNREG_ATTACH:
+ if (cdev->private->flags.resuming) {
+ /* Device will be handled later. */
+ rc = 0;
+ goto out;
+ }
/* Unregister ccw device. */
- if (!cdev->private->flags.resuming)
- ccw_device_unregister(cdev);
+ ccw_device_unregister(cdev);
break;
default:
break;
diff --git a/drivers/s390/cio/device_fsm.c b/drivers/s390/cio/device_fsm.c
index c9b852647f01..a845695ac314 100644
--- a/drivers/s390/cio/device_fsm.c
+++ b/drivers/s390/cio/device_fsm.c
@@ -174,7 +174,10 @@ ccw_device_cancel_halt_clear(struct ccw_device *cdev)
ret = cio_clear (sch);
return (ret == 0) ? -EBUSY : ret;
}
- panic("Can't stop i/o on subchannel.\n");
+ /* Function was unsuccessful */
+ CIO_MSG_EVENT(0, "0.%x.%04x: could not stop I/O\n",
+ cdev->private->dev_id.ssid, cdev->private->dev_id.devno);
+ return -EIO;
}
void ccw_device_update_sense_data(struct ccw_device *cdev)
@@ -349,9 +352,13 @@ out:
static void ccw_device_oper_notify(struct ccw_device *cdev)
{
+ struct subchannel *sch = to_subchannel(cdev->dev.parent);
+
if (ccw_device_notify(cdev, CIO_OPER) == NOTIFY_OK) {
/* Reenable channel measurements, if needed. */
ccw_device_sched_todo(cdev, CDEV_TODO_ENABLE_CMF);
+ /* Save indication for new paths. */
+ cdev->private->path_new_mask = sch->vpm;
return;
}
/* Driver doesn't want device back. */
@@ -462,6 +469,32 @@ static void ccw_device_request_event(struct ccw_device *cdev, enum dev_event e)
}
}
+static void ccw_device_report_path_events(struct ccw_device *cdev)
+{
+ struct subchannel *sch = to_subchannel(cdev->dev.parent);
+ int path_event[8];
+ int chp, mask;
+
+ for (chp = 0, mask = 0x80; chp < 8; chp++, mask >>= 1) {
+ path_event[chp] = PE_NONE;
+ if (mask & cdev->private->path_gone_mask & ~(sch->vpm))
+ path_event[chp] |= PE_PATH_GONE;
+ if (mask & cdev->private->path_new_mask & sch->vpm)
+ path_event[chp] |= PE_PATH_AVAILABLE;
+ if (mask & cdev->private->pgid_reset_mask & sch->vpm)
+ path_event[chp] |= PE_PATHGROUP_ESTABLISHED;
+ }
+ if (cdev->online && cdev->drv->path_event)
+ cdev->drv->path_event(cdev, path_event);
+}
+
+static void ccw_device_reset_path_events(struct ccw_device *cdev)
+{
+ cdev->private->path_gone_mask = 0;
+ cdev->private->path_new_mask = 0;
+ cdev->private->pgid_reset_mask = 0;
+}
+
void
ccw_device_verify_done(struct ccw_device *cdev, int err)
{
@@ -498,6 +531,7 @@ callback:
&cdev->private->irb);
memset(&cdev->private->irb, 0, sizeof(struct irb));
}
+ ccw_device_report_path_events(cdev);
break;
case -ETIME:
case -EUSERS:
@@ -516,6 +550,7 @@ callback:
ccw_device_done(cdev, DEV_STATE_NOT_OPER);
break;
}
+ ccw_device_reset_path_events(cdev);
}
/*
@@ -734,13 +769,14 @@ ccw_device_online_timeout(struct ccw_device *cdev, enum dev_event dev_event)
int ret;
ccw_device_set_timeout(cdev, 0);
+ cdev->private->iretry = 255;
ret = ccw_device_cancel_halt_clear(cdev);
if (ret == -EBUSY) {
ccw_device_set_timeout(cdev, 3*HZ);
cdev->private->state = DEV_STATE_TIMEOUT_KILL;
return;
}
- if (ret == -ENODEV)
+ if (ret)
dev_fsm_event(cdev, DEV_EVENT_NOTOPER);
else if (cdev->handler)
cdev->handler(cdev, cdev->private->intparm,
@@ -837,6 +873,7 @@ void ccw_device_kill_io(struct ccw_device *cdev)
{
int ret;
+ cdev->private->iretry = 255;
ret = ccw_device_cancel_halt_clear(cdev);
if (ret == -EBUSY) {
ccw_device_set_timeout(cdev, 3*HZ);
diff --git a/drivers/s390/cio/device_pgid.c b/drivers/s390/cio/device_pgid.c
index 82a5ad0d63f6..07a4fd29f096 100644
--- a/drivers/s390/cio/device_pgid.c
+++ b/drivers/s390/cio/device_pgid.c
@@ -213,6 +213,17 @@ static void spid_start(struct ccw_device *cdev)
spid_do(cdev);
}
+static int pgid_is_reset(struct pgid *p)
+{
+ char *c;
+
+ for (c = (char *)p + 1; c < (char *)(p + 1); c++) {
+ if (*c != 0)
+ return 0;
+ }
+ return 1;
+}
+
static int pgid_cmp(struct pgid *p1, struct pgid *p2)
{
return memcmp((char *) p1 + 1, (char *) p2 + 1,
@@ -223,7 +234,7 @@ static int pgid_cmp(struct pgid *p1, struct pgid *p2)
* Determine pathgroup state from PGID data.
*/
static void pgid_analyze(struct ccw_device *cdev, struct pgid **p,
- int *mismatch, int *reserved, int *reset)
+ int *mismatch, int *reserved, u8 *reset)
{
struct pgid *pgid = &cdev->private->pgid[0];
struct pgid *first = NULL;
@@ -238,9 +249,8 @@ static void pgid_analyze(struct ccw_device *cdev, struct pgid **p,
continue;
if (pgid->inf.ps.state2 == SNID_STATE2_RESVD_ELSE)
*reserved = 1;
- if (pgid->inf.ps.state1 == SNID_STATE1_RESET) {
- /* A PGID was reset. */
- *reset = 1;
+ if (pgid_is_reset(pgid)) {
+ *reset |= lpm;
continue;
}
if (!first) {
@@ -307,7 +317,7 @@ static void snid_done(struct ccw_device *cdev, int rc)
struct pgid *pgid;
int mismatch = 0;
int reserved = 0;
- int reset = 0;
+ u8 reset = 0;
u8 donepm;
if (rc)
@@ -321,11 +331,12 @@ static void snid_done(struct ccw_device *cdev, int rc)
donepm = pgid_to_donepm(cdev);
sch->vpm = donepm & sch->opm;
cdev->private->pgid_todo_mask &= ~donepm;
+ cdev->private->pgid_reset_mask |= reset;
pgid_fill(cdev, pgid);
}
out:
CIO_MSG_EVENT(2, "snid: device 0.%x.%04x: rc=%d pvm=%02x vpm=%02x "
- "todo=%02x mism=%d rsvd=%d reset=%d\n", id->ssid,
+ "todo=%02x mism=%d rsvd=%d reset=%02x\n", id->ssid,
id->devno, rc, cdev->private->pgid_valid_mask, sch->vpm,
cdev->private->pgid_todo_mask, mismatch, reserved, reset);
switch (rc) {
diff --git a/drivers/s390/cio/io_sch.h b/drivers/s390/cio/io_sch.h
index 469ef93f2302..d024d2c21897 100644
--- a/drivers/s390/cio/io_sch.h
+++ b/drivers/s390/cio/io_sch.h
@@ -151,8 +151,11 @@ struct ccw_device_private {
struct subchannel_id schid; /* subchannel number */
struct ccw_request req; /* internal I/O request */
int iretry;
- u8 pgid_valid_mask; /* mask of valid PGIDs */
- u8 pgid_todo_mask; /* mask of PGIDs to be adjusted */
+ u8 pgid_valid_mask; /* mask of valid PGIDs */
+ u8 pgid_todo_mask; /* mask of PGIDs to be adjusted */
+ u8 pgid_reset_mask; /* mask of PGIDs which were reset */
+ u8 path_gone_mask; /* mask of paths, that became unavailable */
+ u8 path_new_mask; /* mask of paths, that became available */
struct {
unsigned int fast:1; /* post with "channel end" */
unsigned int repall:1; /* report every interrupt status */
diff --git a/drivers/s390/cio/qdio_thinint.c b/drivers/s390/cio/qdio_thinint.c
index 752dbee06af5..5d9c66627b6e 100644
--- a/drivers/s390/cio/qdio_thinint.c
+++ b/drivers/s390/cio/qdio_thinint.c
@@ -292,8 +292,8 @@ void qdio_shutdown_thinint(struct qdio_irq *irq_ptr)
return;
/* reset adapter interrupt indicators */
- put_indicator(irq_ptr->dsci);
set_subchannel_ind(irq_ptr, 1);
+ put_indicator(irq_ptr->dsci);
}
void __exit tiqdio_unregister_thinints(void)
diff --git a/drivers/s390/crypto/ap_bus.c b/drivers/s390/crypto/ap_bus.c
index 91c6028d7b74..8fd8c62455e9 100644
--- a/drivers/s390/crypto/ap_bus.c
+++ b/drivers/s390/crypto/ap_bus.c
@@ -154,14 +154,7 @@ static inline int ap_instructions_available(void)
*/
static int ap_interrupts_available(void)
{
- unsigned long long facility_bits[2];
-
- if (stfle(facility_bits, 2) <= 1)
- return 0;
- if (!(facility_bits[0] & (1ULL << 61)) ||
- !(facility_bits[1] & (1ULL << 62)))
- return 0;
- return 1;
+ return test_facility(1) && test_facility(2);
}
/**
diff --git a/drivers/s390/crypto/zcrypt_api.c b/drivers/s390/crypto/zcrypt_api.c
index f5221749d180..7fca9c10ffcf 100644
--- a/drivers/s390/crypto/zcrypt_api.c
+++ b/drivers/s390/crypto/zcrypt_api.c
@@ -35,7 +35,6 @@
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
#include <linux/compat.h>
-#include <linux/smp_lock.h>
#include <linux/slab.h>
#include <asm/atomic.h>
#include <asm/uaccess.h>
diff --git a/drivers/s390/kvm/kvm_virtio.c b/drivers/s390/kvm/kvm_virtio.c
index 5a46b8c5d68a..375aeeaf9ea5 100644
--- a/drivers/s390/kvm/kvm_virtio.c
+++ b/drivers/s390/kvm/kvm_virtio.c
@@ -372,21 +372,22 @@ static void hotplug_devices(struct work_struct *dummy)
/*
* we emulate the request_irq behaviour on top of s390 extints
*/
-static void kvm_extint_handler(u16 code)
+static void kvm_extint_handler(unsigned int ext_int_code,
+ unsigned int param32, unsigned long param64)
{
struct virtqueue *vq;
u16 subcode;
u32 param;
- subcode = S390_lowcore.cpu_addr;
+ subcode = ext_int_code >> 16;
if ((subcode & 0xff00) != VIRTIO_SUBCODE_64)
return;
/* The LSB might be overloaded, we have to mask it */
- vq = (struct virtqueue *)(S390_lowcore.ext_params2 & ~1UL);
+ vq = (struct virtqueue *)(param64 & ~1UL);
/* We use ext_params to decide what this interrupt means */
- param = S390_lowcore.ext_params & VIRTIO_PARAM_MASK;
+ param = param32 & VIRTIO_PARAM_MASK;
switch (param) {
case VIRTIO_PARAM_CONFIG_CHANGED:
diff --git a/drivers/s390/net/qeth_core.h b/drivers/s390/net/qeth_core.h
index 6be43eb126b4..f47a714538db 100644
--- a/drivers/s390/net/qeth_core.h
+++ b/drivers/s390/net/qeth_core.h
@@ -440,7 +440,6 @@ struct qeth_qdio_out_q {
* index of buffer to be filled by driver; state EMPTY or PACKING
*/
int next_buf_to_fill;
- int sync_iqdio_error;
/*
* number of buffers that are currently filled (PRIMED)
* -> these buffers are hardware-owned
@@ -695,14 +694,6 @@ struct qeth_mc_mac {
int is_vmac;
};
-struct qeth_skb_data {
- __u32 magic;
- int count;
-};
-
-#define QETH_SKB_MAGIC 0x71657468
-#define QETH_SIGA_CC2_RETRIES 3
-
struct qeth_rx {
int b_count;
int b_index;
diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c
index 764267062601..e6b2df0e73f5 100644
--- a/drivers/s390/net/qeth_core_main.c
+++ b/drivers/s390/net/qeth_core_main.c
@@ -877,8 +877,8 @@ out:
return;
}
-static void __qeth_clear_output_buffer(struct qeth_qdio_out_q *queue,
- struct qeth_qdio_out_buffer *buf, unsigned int qeth_skip_skb)
+static void qeth_clear_output_buffer(struct qeth_qdio_out_q *queue,
+ struct qeth_qdio_out_buffer *buf)
{
int i;
struct sk_buff *skb;
@@ -887,13 +887,11 @@ static void __qeth_clear_output_buffer(struct qeth_qdio_out_q *queue,
if (buf->buffer->element[0].flags & 0x40)
atomic_dec(&queue->set_pci_flags_count);
- if (!qeth_skip_skb) {
+ skb = skb_dequeue(&buf->skb_list);
+ while (skb) {
+ atomic_dec(&skb->users);
+ dev_kfree_skb_any(skb);
skb = skb_dequeue(&buf->skb_list);
- while (skb) {
- atomic_dec(&skb->users);
- dev_kfree_skb_any(skb);
- skb = skb_dequeue(&buf->skb_list);
- }
}
for (i = 0; i < QETH_MAX_BUFFER_ELEMENTS(queue->card); ++i) {
if (buf->buffer->element[i].addr && buf->is_header[i])
@@ -909,12 +907,6 @@ static void __qeth_clear_output_buffer(struct qeth_qdio_out_q *queue,
atomic_set(&buf->state, QETH_QDIO_BUF_EMPTY);
}
-static void qeth_clear_output_buffer(struct qeth_qdio_out_q *queue,
- struct qeth_qdio_out_buffer *buf)
-{
- __qeth_clear_output_buffer(queue, buf, 0);
-}
-
void qeth_clear_qdio_buffers(struct qeth_card *card)
{
int i, j;
@@ -2833,7 +2825,6 @@ static void qeth_flush_buffers(struct qeth_qdio_out_q *queue, int index,
}
}
- queue->sync_iqdio_error = 0;
queue->card->dev->trans_start = jiffies;
if (queue->card->options.performance_stats) {
queue->card->perf_stats.outbound_do_qdio_cnt++;
@@ -2849,10 +2840,6 @@ static void qeth_flush_buffers(struct qeth_qdio_out_q *queue, int index,
queue->card->perf_stats.outbound_do_qdio_time +=
qeth_get_micros() -
queue->card->perf_stats.outbound_do_qdio_start_time;
- if (rc > 0) {
- if (!(rc & QDIO_ERROR_SIGA_BUSY))
- queue->sync_iqdio_error = rc & 3;
- }
if (rc) {
queue->card->stats.tx_errors += count;
/* ignore temporary SIGA errors without busy condition */
@@ -2916,7 +2903,7 @@ void qeth_qdio_start_poll(struct ccw_device *ccwdev, int queue,
{
struct qeth_card *card = (struct qeth_card *)card_ptr;
- if (card->dev)
+ if (card->dev && (card->dev->flags & IFF_UP))
napi_schedule(&card->napi);
}
EXPORT_SYMBOL_GPL(qeth_qdio_start_poll);
@@ -2940,7 +2927,6 @@ void qeth_qdio_output_handler(struct ccw_device *ccwdev,
struct qeth_qdio_out_q *queue = card->qdio.out_qs[__queue];
struct qeth_qdio_out_buffer *buffer;
int i;
- unsigned qeth_send_err;
QETH_CARD_TEXT(card, 6, "qdouhdl");
if (qdio_error & QDIO_ERROR_ACTIVATE_CHECK_CONDITION) {
@@ -2956,9 +2942,8 @@ void qeth_qdio_output_handler(struct ccw_device *ccwdev,
}
for (i = first_element; i < (first_element + count); ++i) {
buffer = &queue->bufs[i % QDIO_MAX_BUFFERS_PER_Q];
- qeth_send_err = qeth_handle_send_error(card, buffer, qdio_error);
- __qeth_clear_output_buffer(queue, buffer,
- (qeth_send_err == QETH_SEND_ERROR_RETRY) ? 1 : 0);
+ qeth_handle_send_error(card, buffer, qdio_error);
+ qeth_clear_output_buffer(queue, buffer);
}
atomic_sub(count, &queue->used_buffers);
/* check if we need to do something on this outbound queue */
@@ -3183,10 +3168,7 @@ int qeth_do_send_packet_fast(struct qeth_card *card,
int offset, int hd_len)
{
struct qeth_qdio_out_buffer *buffer;
- struct sk_buff *skb1;
- struct qeth_skb_data *retry_ctrl;
int index;
- int rc;
/* spin until we get the queue ... */
while (atomic_cmpxchg(&queue->state, QETH_OUT_Q_UNLOCKED,
@@ -3205,25 +3187,6 @@ int qeth_do_send_packet_fast(struct qeth_card *card,
atomic_set(&queue->state, QETH_OUT_Q_UNLOCKED);
qeth_fill_buffer(queue, buffer, skb, hdr, offset, hd_len);
qeth_flush_buffers(queue, index, 1);
- if (queue->sync_iqdio_error == 2) {
- skb1 = skb_dequeue(&buffer->skb_list);
- while (skb1) {
- atomic_dec(&skb1->users);
- skb1 = skb_dequeue(&buffer->skb_list);
- }
- retry_ctrl = (struct qeth_skb_data *) &skb->cb[16];
- if (retry_ctrl->magic != QETH_SKB_MAGIC) {
- retry_ctrl->magic = QETH_SKB_MAGIC;
- retry_ctrl->count = 0;
- }
- if (retry_ctrl->count < QETH_SIGA_CC2_RETRIES) {
- retry_ctrl->count++;
- rc = dev_queue_xmit(skb);
- } else {
- dev_kfree_skb_any(skb);
- QETH_CARD_TEXT(card, 2, "qrdrop");
- }
- }
return 0;
out:
atomic_set(&queue->state, QETH_OUT_Q_UNLOCKED);
diff --git a/drivers/s390/scsi/zfcp_fc.h b/drivers/s390/scsi/zfcp_fc.h
index 938d50360166..b464ae01086c 100644
--- a/drivers/s390/scsi/zfcp_fc.h
+++ b/drivers/s390/scsi/zfcp_fc.h
@@ -270,7 +270,7 @@ void zfcp_fc_eval_fcp_rsp(struct fcp_resp_with_ext *fcp_rsp,
if (unlikely(rsp_flags & FCP_SNS_LEN_VAL)) {
sense = (char *) &fcp_rsp[1];
if (rsp_flags & FCP_RSP_LEN_VAL)
- sense += fcp_rsp->ext.fr_sns_len;
+ sense += fcp_rsp->ext.fr_rsp_len;
sense_len = min(fcp_rsp->ext.fr_sns_len,
(u32) SCSI_SENSE_BUFFERSIZE);
memcpy(scsi->sense_buffer, sense, sense_len);
diff --git a/drivers/s390/scsi/zfcp_fsf.c b/drivers/s390/scsi/zfcp_fsf.c
index beaf0916ceab..be0317457147 100644
--- a/drivers/s390/scsi/zfcp_fsf.c
+++ b/drivers/s390/scsi/zfcp_fsf.c
@@ -532,9 +532,6 @@ static void zfcp_fsf_exchange_config_data_handler(struct zfcp_fsf_req *req)
fc_host_port_type(shost) = FC_PORTTYPE_UNKNOWN;
adapter->hydra_version = 0;
- atomic_set_mask(ZFCP_STATUS_ADAPTER_XCONFIG_OK,
- &adapter->status);
-
zfcp_fsf_link_down_info_eval(req,
&qtcb->header.fsf_status_qual.link_down_info);
break;
diff --git a/drivers/s390/scsi/zfcp_scsi.c b/drivers/s390/scsi/zfcp_scsi.c
index 50286d8707f3..6bd2dbc4c316 100644
--- a/drivers/s390/scsi/zfcp_scsi.c
+++ b/drivers/s390/scsi/zfcp_scsi.c
@@ -76,7 +76,7 @@ static void zfcp_scsi_command_fail(struct scsi_cmnd *scpnt, int result)
scpnt->scsi_done(scpnt);
}
-static int zfcp_scsi_queuecommand(struct scsi_cmnd *scpnt,
+static int zfcp_scsi_queuecommand_lck(struct scsi_cmnd *scpnt,
void (*done) (struct scsi_cmnd *))
{
struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(scpnt->device);
@@ -127,6 +127,8 @@ static int zfcp_scsi_queuecommand(struct scsi_cmnd *scpnt,
return ret;
}
+static DEF_SCSI_QCMD(zfcp_scsi_queuecommand)
+
static int zfcp_scsi_slave_alloc(struct scsi_device *sdev)
{
struct fc_rport *rport = starget_to_rport(scsi_target(sdev));
diff --git a/drivers/s390/scsi/zfcp_unit.c b/drivers/s390/scsi/zfcp_unit.c
index 1119c535a667..20796ebc33ce 100644
--- a/drivers/s390/scsi/zfcp_unit.c
+++ b/drivers/s390/scsi/zfcp_unit.c
@@ -142,6 +142,8 @@ int zfcp_unit_add(struct zfcp_port *port, u64 fcp_lun)
return -ENOMEM;
}
+ get_device(&port->dev);
+
if (device_register(&unit->dev)) {
put_device(&unit->dev);
return -ENOMEM;
@@ -152,8 +154,6 @@ int zfcp_unit_add(struct zfcp_port *port, u64 fcp_lun)
return -EINVAL;
}
- get_device(&port->dev);
-
write_lock_irq(&port->unit_list_lock);
list_add_tail(&unit->list, &port->unit_list);
write_unlock_irq(&port->unit_list_lock);
diff --git a/drivers/sbus/char/jsflash.c b/drivers/sbus/char/jsflash.c
index 13f48e28a1e1..a624f5af4320 100644
--- a/drivers/sbus/char/jsflash.c
+++ b/drivers/sbus/char/jsflash.c
@@ -461,7 +461,7 @@ static int jsflash_init(void)
{
int rc;
struct jsflash *jsf;
- int node;
+ phandle node;
char banner[128];
struct linux_prom_registers reg0;
diff --git a/drivers/scsi/3w-9xxx.c b/drivers/scsi/3w-9xxx.c
index fcf08b3f52c1..b7bd5b0cc7aa 100644
--- a/drivers/scsi/3w-9xxx.c
+++ b/drivers/scsi/3w-9xxx.c
@@ -1765,7 +1765,7 @@ out:
} /* End twa_scsi_eh_reset() */
/* This is the main scsi queue function to handle scsi opcodes */
-static int twa_scsi_queue(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_cmnd *))
+static int twa_scsi_queue_lck(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_cmnd *))
{
int request_id, retval;
TW_Device_Extension *tw_dev = (TW_Device_Extension *)SCpnt->device->host->hostdata;
@@ -1812,6 +1812,8 @@ out:
return retval;
} /* End twa_scsi_queue() */
+static DEF_SCSI_QCMD(twa_scsi_queue)
+
/* This function hands scsi cdb's to the firmware */
static int twa_scsiop_execute_scsi(TW_Device_Extension *tw_dev, int request_id, char *cdb, int use_sg, TW_SG_Entry *sglistarg)
{
diff --git a/drivers/scsi/3w-sas.c b/drivers/scsi/3w-sas.c
index 6a95d111d207..13e39e1fdfe2 100644
--- a/drivers/scsi/3w-sas.c
+++ b/drivers/scsi/3w-sas.c
@@ -1501,7 +1501,7 @@ out:
} /* End twl_scsi_eh_reset() */
/* This is the main scsi queue function to handle scsi opcodes */
-static int twl_scsi_queue(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_cmnd *))
+static int twl_scsi_queue_lck(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_cmnd *))
{
int request_id, retval;
TW_Device_Extension *tw_dev = (TW_Device_Extension *)SCpnt->device->host->hostdata;
@@ -1536,6 +1536,8 @@ out:
return retval;
} /* End twl_scsi_queue() */
+static DEF_SCSI_QCMD(twl_scsi_queue)
+
/* This function tells the controller to shut down */
static void __twl_shutdown(TW_Device_Extension *tw_dev)
{
diff --git a/drivers/scsi/3w-xxxx.c b/drivers/scsi/3w-xxxx.c
index b1125341f4c8..7fe96ff60c58 100644
--- a/drivers/scsi/3w-xxxx.c
+++ b/drivers/scsi/3w-xxxx.c
@@ -1947,7 +1947,7 @@ static int tw_scsiop_test_unit_ready_complete(TW_Device_Extension *tw_dev, int r
} /* End tw_scsiop_test_unit_ready_complete() */
/* This is the main scsi queue function to handle scsi opcodes */
-static int tw_scsi_queue(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_cmnd *))
+static int tw_scsi_queue_lck(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_cmnd *))
{
unsigned char *command = SCpnt->cmnd;
int request_id = 0;
@@ -2023,6 +2023,8 @@ static int tw_scsi_queue(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_cmnd
return retval;
} /* End tw_scsi_queue() */
+static DEF_SCSI_QCMD(tw_scsi_queue)
+
/* This function is the interrupt service routine */
static irqreturn_t tw_interrupt(int irq, void *dev_instance)
{
diff --git a/drivers/scsi/53c700.c b/drivers/scsi/53c700.c
index 89fc1c8af86b..f672491774eb 100644
--- a/drivers/scsi/53c700.c
+++ b/drivers/scsi/53c700.c
@@ -167,7 +167,7 @@ MODULE_LICENSE("GPL");
#include "53c700_d.h"
-STATIC int NCR_700_queuecommand(struct scsi_cmnd *, void (*done)(struct scsi_cmnd *));
+STATIC int NCR_700_queuecommand(struct Scsi_Host *h, struct scsi_cmnd *);
STATIC int NCR_700_abort(struct scsi_cmnd * SCpnt);
STATIC int NCR_700_bus_reset(struct scsi_cmnd * SCpnt);
STATIC int NCR_700_host_reset(struct scsi_cmnd * SCpnt);
@@ -1749,8 +1749,8 @@ NCR_700_intr(int irq, void *dev_id)
return IRQ_RETVAL(handled);
}
-STATIC int
-NCR_700_queuecommand(struct scsi_cmnd *SCp, void (*done)(struct scsi_cmnd *))
+static int
+NCR_700_queuecommand_lck(struct scsi_cmnd *SCp, void (*done)(struct scsi_cmnd *))
{
struct NCR_700_Host_Parameters *hostdata =
(struct NCR_700_Host_Parameters *)SCp->device->host->hostdata[0];
@@ -1904,6 +1904,8 @@ NCR_700_queuecommand(struct scsi_cmnd *SCp, void (*done)(struct scsi_cmnd *))
return 0;
}
+STATIC DEF_SCSI_QCMD(NCR_700_queuecommand)
+
STATIC int
NCR_700_abort(struct scsi_cmnd * SCp)
{
diff --git a/drivers/scsi/BusLogic.c b/drivers/scsi/BusLogic.c
index fc0b4b81d552..f66c33b9ab41 100644
--- a/drivers/scsi/BusLogic.c
+++ b/drivers/scsi/BusLogic.c
@@ -2807,7 +2807,7 @@ static int BusLogic_host_reset(struct scsi_cmnd * SCpnt)
Outgoing Mailbox for execution by the associated Host Adapter.
*/
-static int BusLogic_QueueCommand(struct scsi_cmnd *Command, void (*CompletionRoutine) (struct scsi_cmnd *))
+static int BusLogic_QueueCommand_lck(struct scsi_cmnd *Command, void (*CompletionRoutine) (struct scsi_cmnd *))
{
struct BusLogic_HostAdapter *HostAdapter = (struct BusLogic_HostAdapter *) Command->device->host->hostdata;
struct BusLogic_TargetFlags *TargetFlags = &HostAdapter->TargetFlags[Command->device->id];
@@ -2994,6 +2994,7 @@ static int BusLogic_QueueCommand(struct scsi_cmnd *Command, void (*CompletionRou
return 0;
}
+static DEF_SCSI_QCMD(BusLogic_QueueCommand)
#if 0
/*
diff --git a/drivers/scsi/BusLogic.h b/drivers/scsi/BusLogic.h
index 73f237a1ed94..649fcb31f26d 100644
--- a/drivers/scsi/BusLogic.h
+++ b/drivers/scsi/BusLogic.h
@@ -1319,7 +1319,7 @@ static inline void BusLogic_IncrementSizeBucket(BusLogic_CommandSizeBuckets_T Co
*/
static const char *BusLogic_DriverInfo(struct Scsi_Host *);
-static int BusLogic_QueueCommand(struct scsi_cmnd *, void (*CompletionRoutine) (struct scsi_cmnd *));
+static int BusLogic_QueueCommand(struct Scsi_Host *h, struct scsi_cmnd *);
static int BusLogic_BIOSDiskParameters(struct scsi_device *, struct block_device *, sector_t, int *);
static int BusLogic_ProcDirectoryInfo(struct Scsi_Host *, char *, char **, off_t, int, int);
static int BusLogic_SlaveConfigure(struct scsi_device *);
diff --git a/drivers/scsi/NCR5380.c b/drivers/scsi/NCR5380.c
index 5d2f148889ad..9a5629f94f95 100644
--- a/drivers/scsi/NCR5380.c
+++ b/drivers/scsi/NCR5380.c
@@ -952,7 +952,7 @@ static void NCR5380_exit(struct Scsi_Host *instance)
* Locks: host lock taken by caller
*/
-static int NCR5380_queue_command(Scsi_Cmnd * cmd, void (*done) (Scsi_Cmnd *))
+static int NCR5380_queue_command_lck(Scsi_Cmnd * cmd, void (*done) (Scsi_Cmnd *))
{
struct Scsi_Host *instance = cmd->device->host;
struct NCR5380_hostdata *hostdata = (struct NCR5380_hostdata *) instance->hostdata;
@@ -1021,6 +1021,7 @@ static int NCR5380_queue_command(Scsi_Cmnd * cmd, void (*done) (Scsi_Cmnd *))
return 0;
}
+static DEF_SCSI_QCMD(NCR5380_queue_command)
/**
* NCR5380_main - NCR state machines
diff --git a/drivers/scsi/NCR5380.h b/drivers/scsi/NCR5380.h
index bdc468c9e1d9..fd40a32b1f6f 100644
--- a/drivers/scsi/NCR5380.h
+++ b/drivers/scsi/NCR5380.h
@@ -313,7 +313,7 @@ static void NCR5380_print(struct Scsi_Host *instance);
#endif
static int NCR5380_abort(Scsi_Cmnd * cmd);
static int NCR5380_bus_reset(Scsi_Cmnd * cmd);
-static int NCR5380_queue_command(Scsi_Cmnd * cmd, void (*done) (Scsi_Cmnd *));
+static int NCR5380_queue_command(struct Scsi_Host *, struct scsi_cmnd *);
static int __maybe_unused NCR5380_proc_info(struct Scsi_Host *instance,
char *buffer, char **start, off_t offset, int length, int inout);
diff --git a/drivers/scsi/NCR53c406a.c b/drivers/scsi/NCR53c406a.c
index 6961f78742ae..c91888a0a23c 100644
--- a/drivers/scsi/NCR53c406a.c
+++ b/drivers/scsi/NCR53c406a.c
@@ -693,7 +693,7 @@ static void wait_intr(void)
}
#endif
-static int NCR53c406a_queue(Scsi_Cmnd * SCpnt, void (*done) (Scsi_Cmnd *))
+static int NCR53c406a_queue_lck(Scsi_Cmnd * SCpnt, void (*done) (Scsi_Cmnd *))
{
int i;
@@ -726,6 +726,8 @@ static int NCR53c406a_queue(Scsi_Cmnd * SCpnt, void (*done) (Scsi_Cmnd *))
return 0;
}
+static DEF_SCSI_QCMD(NCR53c406a_queue)
+
static int NCR53c406a_host_reset(Scsi_Cmnd * SCpnt)
{
DEB(printk("NCR53c406a_reset called\n"));
diff --git a/drivers/scsi/a100u2w.c b/drivers/scsi/a100u2w.c
index dbbc601948e5..dc5ac6e528c4 100644
--- a/drivers/scsi/a100u2w.c
+++ b/drivers/scsi/a100u2w.c
@@ -911,7 +911,7 @@ static int inia100_build_scb(struct orc_host * host, struct orc_scb * scb, struc
* queue the command down to the controller
*/
-static int inia100_queue(struct scsi_cmnd * cmd, void (*done) (struct scsi_cmnd *))
+static int inia100_queue_lck(struct scsi_cmnd * cmd, void (*done) (struct scsi_cmnd *))
{
struct orc_scb *scb;
struct orc_host *host; /* Point to Host adapter control block */
@@ -930,6 +930,8 @@ static int inia100_queue(struct scsi_cmnd * cmd, void (*done) (struct scsi_cmnd
return 0;
}
+static DEF_SCSI_QCMD(inia100_queue)
+
/*****************************************************************************
Function name : inia100_abort
Description : Abort a queued command.
diff --git a/drivers/scsi/aacraid/linit.c b/drivers/scsi/aacraid/linit.c
index 29c0ed1cf507..2c93d9496d62 100644
--- a/drivers/scsi/aacraid/linit.c
+++ b/drivers/scsi/aacraid/linit.c
@@ -248,7 +248,7 @@ static struct aac_driver_ident aac_drivers[] = {
* TODO: unify with aac_scsi_cmd().
*/
-static int aac_queuecommand(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *))
+static int aac_queuecommand_lck(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *))
{
struct Scsi_Host *host = cmd->device->host;
struct aac_dev *dev = (struct aac_dev *)host->hostdata;
@@ -267,6 +267,8 @@ static int aac_queuecommand(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd
return (aac_scsi_cmd(cmd) ? FAILED : 0);
}
+static DEF_SCSI_QCMD(aac_queuecommand)
+
/**
* aac_info - Returns the host adapter name
* @shost: Scsi host to report on
diff --git a/drivers/scsi/advansys.c b/drivers/scsi/advansys.c
index 0ec3da6f3e12..081c6de92bc5 100644
--- a/drivers/scsi/advansys.c
+++ b/drivers/scsi/advansys.c
@@ -9500,7 +9500,7 @@ static int asc_execute_scsi_cmnd(struct scsi_cmnd *scp)
* in the 'scp' result field.
*/
static int
-advansys_queuecommand(struct scsi_cmnd *scp, void (*done)(struct scsi_cmnd *))
+advansys_queuecommand_lck(struct scsi_cmnd *scp, void (*done)(struct scsi_cmnd *))
{
struct Scsi_Host *shost = scp->device->host;
int asc_res, result = 0;
@@ -9525,6 +9525,8 @@ advansys_queuecommand(struct scsi_cmnd *scp, void (*done)(struct scsi_cmnd *))
return result;
}
+static DEF_SCSI_QCMD(advansys_queuecommand)
+
static ushort __devinit AscGetEisaChipCfg(PortAddr iop_base)
{
PortAddr eisa_cfg_iop = (PortAddr) ASC_GET_EISA_SLOT(iop_base) |
diff --git a/drivers/scsi/aha152x.c b/drivers/scsi/aha152x.c
index 8eab8587ff21..c5169f01c1cd 100644
--- a/drivers/scsi/aha152x.c
+++ b/drivers/scsi/aha152x.c
@@ -1056,7 +1056,7 @@ static int aha152x_internal_queue(Scsi_Cmnd *SCpnt, struct completion *complete,
* queue a command
*
*/
-static int aha152x_queue(Scsi_Cmnd *SCpnt, void (*done)(Scsi_Cmnd *))
+static int aha152x_queue_lck(Scsi_Cmnd *SCpnt, void (*done)(Scsi_Cmnd *))
{
#if 0
if(*SCpnt->cmnd == REQUEST_SENSE) {
@@ -1070,6 +1070,8 @@ static int aha152x_queue(Scsi_Cmnd *SCpnt, void (*done)(Scsi_Cmnd *))
return aha152x_internal_queue(SCpnt, NULL, 0, done);
}
+static DEF_SCSI_QCMD(aha152x_queue)
+
/*
*
diff --git a/drivers/scsi/aha1542.c b/drivers/scsi/aha1542.c
index 4f785f254c1f..195823a51aab 100644
--- a/drivers/scsi/aha1542.c
+++ b/drivers/scsi/aha1542.c
@@ -558,7 +558,7 @@ static void aha1542_intr_handle(struct Scsi_Host *shost)
};
}
-static int aha1542_queuecommand(Scsi_Cmnd * SCpnt, void (*done) (Scsi_Cmnd *))
+static int aha1542_queuecommand_lck(Scsi_Cmnd * SCpnt, void (*done) (Scsi_Cmnd *))
{
unchar ahacmd = CMD_START_SCSI;
unchar direction;
@@ -718,6 +718,8 @@ static int aha1542_queuecommand(Scsi_Cmnd * SCpnt, void (*done) (Scsi_Cmnd *))
return 0;
}
+static DEF_SCSI_QCMD(aha1542_queuecommand)
+
/* Initialize mailboxes */
static void setup_mailboxes(int bse, struct Scsi_Host *shpnt)
{
diff --git a/drivers/scsi/aha1542.h b/drivers/scsi/aha1542.h
index 1db538552d56..b871d2b57f93 100644
--- a/drivers/scsi/aha1542.h
+++ b/drivers/scsi/aha1542.h
@@ -132,7 +132,7 @@ struct ccb { /* Command Control Block 5.3 */
};
static int aha1542_detect(struct scsi_host_template *);
-static int aha1542_queuecommand(Scsi_Cmnd *, void (*done)(Scsi_Cmnd *));
+static int aha1542_queuecommand(struct Scsi_Host *, struct scsi_cmnd *);
static int aha1542_bus_reset(Scsi_Cmnd * SCpnt);
static int aha1542_dev_reset(Scsi_Cmnd * SCpnt);
static int aha1542_host_reset(Scsi_Cmnd * SCpnt);
diff --git a/drivers/scsi/aha1740.c b/drivers/scsi/aha1740.c
index 0107a4cc3331..d058f1ab82b5 100644
--- a/drivers/scsi/aha1740.c
+++ b/drivers/scsi/aha1740.c
@@ -331,7 +331,7 @@ static irqreturn_t aha1740_intr_handle(int irq, void *dev_id)
return IRQ_RETVAL(handled);
}
-static int aha1740_queuecommand(Scsi_Cmnd * SCpnt, void (*done)(Scsi_Cmnd *))
+static int aha1740_queuecommand_lck(Scsi_Cmnd * SCpnt, void (*done)(Scsi_Cmnd *))
{
unchar direction;
unchar *cmd = (unchar *) SCpnt->cmnd;
@@ -503,6 +503,8 @@ static int aha1740_queuecommand(Scsi_Cmnd * SCpnt, void (*done)(Scsi_Cmnd *))
return 0;
}
+static DEF_SCSI_QCMD(aha1740_queuecommand)
+
/* Query the board for its irq_level and irq_type. Nothing else matters
in enhanced mode on an EISA bus. */
diff --git a/drivers/scsi/aic7xxx/aic79xx_osm.c b/drivers/scsi/aic7xxx/aic79xx_osm.c
index 88ad8482ef59..25d066624476 100644
--- a/drivers/scsi/aic7xxx/aic79xx_osm.c
+++ b/drivers/scsi/aic7xxx/aic79xx_osm.c
@@ -573,7 +573,7 @@ ahd_linux_info(struct Scsi_Host *host)
* Queue an SCB to the controller.
*/
static int
-ahd_linux_queue(struct scsi_cmnd * cmd, void (*scsi_done) (struct scsi_cmnd *))
+ahd_linux_queue_lck(struct scsi_cmnd * cmd, void (*scsi_done) (struct scsi_cmnd *))
{
struct ahd_softc *ahd;
struct ahd_linux_device *dev = scsi_transport_device_data(cmd->device);
@@ -588,6 +588,8 @@ ahd_linux_queue(struct scsi_cmnd * cmd, void (*scsi_done) (struct scsi_cmnd *))
return rtn;
}
+static DEF_SCSI_QCMD(ahd_linux_queue)
+
static struct scsi_target **
ahd_linux_target_in_softc(struct scsi_target *starget)
{
diff --git a/drivers/scsi/aic7xxx/aic7xxx_osm.c b/drivers/scsi/aic7xxx/aic7xxx_osm.c
index aeea7a61478e..4a359bb307c6 100644
--- a/drivers/scsi/aic7xxx/aic7xxx_osm.c
+++ b/drivers/scsi/aic7xxx/aic7xxx_osm.c
@@ -528,7 +528,7 @@ ahc_linux_info(struct Scsi_Host *host)
* Queue an SCB to the controller.
*/
static int
-ahc_linux_queue(struct scsi_cmnd * cmd, void (*scsi_done) (struct scsi_cmnd *))
+ahc_linux_queue_lck(struct scsi_cmnd * cmd, void (*scsi_done) (struct scsi_cmnd *))
{
struct ahc_softc *ahc;
struct ahc_linux_device *dev = scsi_transport_device_data(cmd->device);
@@ -548,6 +548,8 @@ ahc_linux_queue(struct scsi_cmnd * cmd, void (*scsi_done) (struct scsi_cmnd *))
return rtn;
}
+static DEF_SCSI_QCMD(ahc_linux_queue)
+
static inline struct scsi_target **
ahc_linux_target_in_softc(struct scsi_target *starget)
{
diff --git a/drivers/scsi/aic7xxx_old.c b/drivers/scsi/aic7xxx_old.c
index aee73fafccc8..4ff60a08df0f 100644
--- a/drivers/scsi/aic7xxx_old.c
+++ b/drivers/scsi/aic7xxx_old.c
@@ -10234,7 +10234,7 @@ static void aic7xxx_buildscb(struct aic7xxx_host *p, struct scsi_cmnd *cmd,
* Description:
* Queue a SCB to the controller.
*-F*************************************************************************/
-static int aic7xxx_queue(struct scsi_cmnd *cmd, void (*fn)(struct scsi_cmnd *))
+static int aic7xxx_queue_lck(struct scsi_cmnd *cmd, void (*fn)(struct scsi_cmnd *))
{
struct aic7xxx_host *p;
struct aic7xxx_scb *scb;
@@ -10292,6 +10292,8 @@ static int aic7xxx_queue(struct scsi_cmnd *cmd, void (*fn)(struct scsi_cmnd *))
return (0);
}
+static DEF_SCSI_QCMD(aic7xxx_queue)
+
/*+F*************************************************************************
* Function:
* aic7xxx_bus_device_reset
diff --git a/drivers/scsi/arcmsr/arcmsr_hba.c b/drivers/scsi/arcmsr/arcmsr_hba.c
index 05a78e515a24..17e3df4f016f 100644
--- a/drivers/scsi/arcmsr/arcmsr_hba.c
+++ b/drivers/scsi/arcmsr/arcmsr_hba.c
@@ -85,8 +85,7 @@ static int arcmsr_abort(struct scsi_cmnd *);
static int arcmsr_bus_reset(struct scsi_cmnd *);
static int arcmsr_bios_param(struct scsi_device *sdev,
struct block_device *bdev, sector_t capacity, int *info);
-static int arcmsr_queue_command(struct scsi_cmnd *cmd,
- void (*done) (struct scsi_cmnd *));
+static int arcmsr_queue_command(struct Scsi_Host *h, struct scsi_cmnd *cmd);
static int arcmsr_probe(struct pci_dev *pdev,
const struct pci_device_id *id);
static void arcmsr_remove(struct pci_dev *pdev);
@@ -2081,7 +2080,7 @@ static void arcmsr_handle_virtual_command(struct AdapterControlBlock *acb,
}
}
-static int arcmsr_queue_command(struct scsi_cmnd *cmd,
+static int arcmsr_queue_command_lck(struct scsi_cmnd *cmd,
void (* done)(struct scsi_cmnd *))
{
struct Scsi_Host *host = cmd->device->host;
@@ -2124,6 +2123,8 @@ static int arcmsr_queue_command(struct scsi_cmnd *cmd,
return 0;
}
+static DEF_SCSI_QCMD(arcmsr_queue_command)
+
static bool arcmsr_get_hba_config(struct AdapterControlBlock *acb)
{
struct MessageUnit_A __iomem *reg = acb->pmuA;
diff --git a/drivers/scsi/arm/acornscsi.c b/drivers/scsi/arm/acornscsi.c
index 918ccf818757..ec166726b314 100644
--- a/drivers/scsi/arm/acornscsi.c
+++ b/drivers/scsi/arm/acornscsi.c
@@ -2511,7 +2511,7 @@ acornscsi_intr(int irq, void *dev_id)
* done - function called on completion, with pointer to command descriptor
* Returns : 0, or < 0 on error.
*/
-int acornscsi_queuecmd(struct scsi_cmnd *SCpnt,
+static int acornscsi_queuecmd_lck(struct scsi_cmnd *SCpnt,
void (*done)(struct scsi_cmnd *))
{
AS_Host *host = (AS_Host *)SCpnt->device->host->hostdata;
@@ -2561,6 +2561,8 @@ int acornscsi_queuecmd(struct scsi_cmnd *SCpnt,
return 0;
}
+DEF_SCSI_QCMD(acornscsi_queuecmd)
+
/*
* Prototype: void acornscsi_reportstatus(struct scsi_cmnd **SCpntp1, struct scsi_cmnd **SCpntp2, int result)
* Purpose : pass a result to *SCpntp1, and check if *SCpntp1 = *SCpntp2
diff --git a/drivers/scsi/arm/fas216.c b/drivers/scsi/arm/fas216.c
index 9e71ac611146..2b2ce21e227e 100644
--- a/drivers/scsi/arm/fas216.c
+++ b/drivers/scsi/arm/fas216.c
@@ -2198,7 +2198,7 @@ no_command:
* Returns: 0 on success, else error.
* Notes: io_request_lock is held, interrupts are disabled.
*/
-int fas216_queue_command(struct scsi_cmnd *SCpnt,
+static int fas216_queue_command_lck(struct scsi_cmnd *SCpnt,
void (*done)(struct scsi_cmnd *))
{
FAS216_Info *info = (FAS216_Info *)SCpnt->device->host->hostdata;
@@ -2240,6 +2240,8 @@ int fas216_queue_command(struct scsi_cmnd *SCpnt,
return result;
}
+DEF_SCSI_QCMD(fas216_queue_command)
+
/**
* fas216_internal_done - trigger restart of a waiting thread in fas216_noqueue_command
* @SCpnt: Command to wake
@@ -2263,7 +2265,7 @@ static void fas216_internal_done(struct scsi_cmnd *SCpnt)
* Returns: scsi result code.
* Notes: io_request_lock is held, interrupts are disabled.
*/
-int fas216_noqueue_command(struct scsi_cmnd *SCpnt,
+static int fas216_noqueue_command_lck(struct scsi_cmnd *SCpnt,
void (*done)(struct scsi_cmnd *))
{
FAS216_Info *info = (FAS216_Info *)SCpnt->device->host->hostdata;
@@ -2277,7 +2279,7 @@ int fas216_noqueue_command(struct scsi_cmnd *SCpnt,
BUG_ON(info->scsi.irq != NO_IRQ);
info->internal_done = 0;
- fas216_queue_command(SCpnt, fas216_internal_done);
+ fas216_queue_command_lck(SCpnt, fas216_internal_done);
/*
* This wastes time, since we can't return until the command is
@@ -2310,6 +2312,8 @@ int fas216_noqueue_command(struct scsi_cmnd *SCpnt,
return 0;
}
+DEF_SCSI_QCMD(fas216_noqueue_command)
+
/*
* Error handler timeout function. Indicate that we timed out,
* and wake up any error handler process so it can continue.
diff --git a/drivers/scsi/arm/fas216.h b/drivers/scsi/arm/fas216.h
index b65f4cf0eec9..f30f8d659dc4 100644
--- a/drivers/scsi/arm/fas216.h
+++ b/drivers/scsi/arm/fas216.h
@@ -331,23 +331,21 @@ extern int fas216_init (struct Scsi_Host *instance);
*/
extern int fas216_add (struct Scsi_Host *instance, struct device *dev);
-/* Function: int fas216_queue_command(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_cmnd *))
+/* Function: int fas216_queue_command(struct Scsi_Host *h, struct scsi_cmnd *SCpnt)
* Purpose : queue a command for adapter to process.
- * Params : SCpnt - Command to queue
- * done - done function to call once command is complete
+ * Params : h - host adapter
+ * : SCpnt - Command to queue
* Returns : 0 - success, else error
*/
-extern int fas216_queue_command(struct scsi_cmnd *,
- void (*done)(struct scsi_cmnd *));
+extern int fas216_queue_command(struct Scsi_Host *h, struct scsi_cmnd *SCpnt);
-/* Function: int fas216_noqueue_command(istruct scsi_cmnd *SCpnt, void (*done)(struct scsi_cmnd *))
+/* Function: int fas216_noqueue_command(struct Scsi_Host *h, struct scsi_cmnd *SCpnt)
* Purpose : queue a command for adapter to process, and process it to completion.
- * Params : SCpnt - Command to queue
- * done - done function to call once command is complete
+ * Params : h - host adapter
+ * : SCpnt - Command to queue
* Returns : 0 - success, else error
*/
-extern int fas216_noqueue_command(struct scsi_cmnd *,
- void (*done)(struct scsi_cmnd *));
+extern int fas216_noqueue_command(struct Scsi_Host *, struct scsi_cmnd *);
/* Function: irqreturn_t fas216_intr (FAS216_Info *info)
* Purpose : handle interrupts from the interface to progress a command
diff --git a/drivers/scsi/atari_NCR5380.c b/drivers/scsi/atari_NCR5380.c
index 158ebc3644d8..88b2928b4d3b 100644
--- a/drivers/scsi/atari_NCR5380.c
+++ b/drivers/scsi/atari_NCR5380.c
@@ -910,7 +910,7 @@ static int __init NCR5380_init(struct Scsi_Host *instance, int flags)
*
*/
-static int NCR5380_queue_command(Scsi_Cmnd *cmd, void (*done)(Scsi_Cmnd *))
+static int NCR5380_queue_command_lck(Scsi_Cmnd *cmd, void (*done)(Scsi_Cmnd *))
{
SETUP_HOSTDATA(cmd->device->host);
Scsi_Cmnd *tmp;
@@ -1022,6 +1022,8 @@ static int NCR5380_queue_command(Scsi_Cmnd *cmd, void (*done)(Scsi_Cmnd *))
return 0;
}
+static DEF_SCSI_QCMD(NCR5380_queue_command)
+
/*
* Function : NCR5380_main (void)
*
diff --git a/drivers/scsi/atari_scsi.c b/drivers/scsi/atari_scsi.c
index ad7a23aef0ec..3e8658e2f154 100644
--- a/drivers/scsi/atari_scsi.c
+++ b/drivers/scsi/atari_scsi.c
@@ -572,23 +572,6 @@ static void falcon_get_lock(void)
}
-/* This is the wrapper function for NCR5380_queue_command(). It just
- * tries to get the lock on the ST-DMA (see above) and then calls the
- * original function.
- */
-
-#if 0
-int atari_queue_command(Scsi_Cmnd *cmd, void (*done)(Scsi_Cmnd *))
-{
- /* falcon_get_lock();
- * ++guenther: moved to NCR5380_queue_command() to prevent
- * race condition, see there for an explanation.
- */
- return NCR5380_queue_command(cmd, done);
-}
-#endif
-
-
int __init atari_scsi_detect(struct scsi_host_template *host)
{
static int called = 0;
diff --git a/drivers/scsi/atp870u.c b/drivers/scsi/atp870u.c
index ab5bdda6903e..76029d570beb 100644
--- a/drivers/scsi/atp870u.c
+++ b/drivers/scsi/atp870u.c
@@ -605,7 +605,7 @@ handled:
*
* Queue a command to the ATP queue. Called with the host lock held.
*/
-static int atp870u_queuecommand(struct scsi_cmnd * req_p,
+static int atp870u_queuecommand_lck(struct scsi_cmnd *req_p,
void (*done) (struct scsi_cmnd *))
{
unsigned char c;
@@ -694,6 +694,8 @@ static int atp870u_queuecommand(struct scsi_cmnd * req_p,
return 0;
}
+static DEF_SCSI_QCMD(atp870u_queuecommand)
+
/**
* send_s870 - send a command to the controller
* @host: host
diff --git a/drivers/scsi/bfa/bfa.h b/drivers/scsi/bfa/bfa.h
index ceaac65a91ff..ff2bd07161f7 100644
--- a/drivers/scsi/bfa/bfa.h
+++ b/drivers/scsi/bfa/bfa.h
@@ -29,13 +29,13 @@ struct bfa_s;
typedef void (*bfa_isr_func_t) (struct bfa_s *bfa, struct bfi_msg_s *m);
typedef void (*bfa_cb_cbfn_t) (void *cbarg, bfa_boolean_t complete);
-/**
+/*
* Interrupt message handlers
*/
void bfa_isr_unhandled(struct bfa_s *bfa, struct bfi_msg_s *m);
void bfa_isr_bind(enum bfi_mclass mc, bfa_isr_func_t isr_func);
-/**
+/*
* Request and response queue related defines
*/
#define BFA_REQQ_NELEMS_MIN (4)
@@ -58,9 +58,9 @@ void bfa_isr_bind(enum bfi_mclass mc, bfa_isr_func_t isr_func);
#define bfa_reqq_produce(__bfa, __reqq) do { \
(__bfa)->iocfc.req_cq_pi[__reqq]++; \
(__bfa)->iocfc.req_cq_pi[__reqq] &= \
- ((__bfa)->iocfc.cfg.drvcfg.num_reqq_elems - 1); \
- bfa_reg_write((__bfa)->iocfc.bfa_regs.cpe_q_pi[__reqq], \
- (__bfa)->iocfc.req_cq_pi[__reqq]); \
+ ((__bfa)->iocfc.cfg.drvcfg.num_reqq_elems - 1); \
+ writel((__bfa)->iocfc.req_cq_pi[__reqq], \
+ (__bfa)->iocfc.bfa_regs.cpe_q_pi[__reqq]); \
mmiowb(); \
} while (0)
@@ -76,7 +76,7 @@ void bfa_isr_bind(enum bfi_mclass mc, bfa_isr_func_t isr_func);
(__index) &= ((__size) - 1); \
} while (0)
-/**
+/*
* Queue element to wait for room in request queue. FIFO order is
* maintained when fullfilling requests.
*/
@@ -86,7 +86,7 @@ struct bfa_reqq_wait_s {
void *cbarg;
};
-/**
+/*
* Circular queue usage assignments
*/
enum {
@@ -113,7 +113,7 @@ bfa_reqq_winit(struct bfa_reqq_wait_s *wqe, void (*qresume) (void *cbarg),
#define bfa_reqq(__bfa, __reqq) (&(__bfa)->reqq_waitq[__reqq])
-/**
+/*
* static inline void
* bfa_reqq_wait(struct bfa_s *bfa, int reqq, struct bfa_reqq_wait_s *wqe)
*/
@@ -130,7 +130,7 @@ bfa_reqq_winit(struct bfa_reqq_wait_s *wqe, void (*qresume) (void *cbarg),
#define bfa_reqq_wcancel(__wqe) list_del(&(__wqe)->qe)
-/**
+/*
* Generic BFA callback element.
*/
struct bfa_cb_qe_s {
@@ -163,7 +163,7 @@ struct bfa_cb_qe_s {
} while (0)
-/**
+/*
* PCI devices supported by the current BFA
*/
struct bfa_pciid_s {
@@ -173,7 +173,7 @@ struct bfa_pciid_s {
extern char bfa_version[];
-/**
+/*
* BFA memory resources
*/
enum bfa_mem_type {
@@ -202,19 +202,19 @@ struct bfa_meminfo_s {
((_m)->meminfo[BFA_MEM_TYPE_DMA - 1].dma_curp)
struct bfa_iocfc_regs_s {
- bfa_os_addr_t intr_status;
- bfa_os_addr_t intr_mask;
- bfa_os_addr_t cpe_q_pi[BFI_IOC_MAX_CQS];
- bfa_os_addr_t cpe_q_ci[BFI_IOC_MAX_CQS];
- bfa_os_addr_t cpe_q_depth[BFI_IOC_MAX_CQS];
- bfa_os_addr_t cpe_q_ctrl[BFI_IOC_MAX_CQS];
- bfa_os_addr_t rme_q_ci[BFI_IOC_MAX_CQS];
- bfa_os_addr_t rme_q_pi[BFI_IOC_MAX_CQS];
- bfa_os_addr_t rme_q_depth[BFI_IOC_MAX_CQS];
- bfa_os_addr_t rme_q_ctrl[BFI_IOC_MAX_CQS];
+ void __iomem *intr_status;
+ void __iomem *intr_mask;
+ void __iomem *cpe_q_pi[BFI_IOC_MAX_CQS];
+ void __iomem *cpe_q_ci[BFI_IOC_MAX_CQS];
+ void __iomem *cpe_q_depth[BFI_IOC_MAX_CQS];
+ void __iomem *cpe_q_ctrl[BFI_IOC_MAX_CQS];
+ void __iomem *rme_q_ci[BFI_IOC_MAX_CQS];
+ void __iomem *rme_q_pi[BFI_IOC_MAX_CQS];
+ void __iomem *rme_q_depth[BFI_IOC_MAX_CQS];
+ void __iomem *rme_q_ctrl[BFI_IOC_MAX_CQS];
};
-/**
+/*
* MSIX vector handlers
*/
#define BFA_MSIX_MAX_VECTORS 22
@@ -224,7 +224,7 @@ struct bfa_msix_s {
bfa_msix_handler_t handler[BFA_MSIX_MAX_VECTORS];
};
-/**
+/*
* Chip specific interfaces
*/
struct bfa_hwif_s {
@@ -343,7 +343,7 @@ int bfa_iocfc_get_pbc_vports(struct bfa_s *bfa,
struct bfi_pbc_vport_s *pbc_vport);
-/**
+/*
*----------------------------------------------------------------------
* BFA public interfaces
*----------------------------------------------------------------------
diff --git a/drivers/scsi/bfa/bfa_cb_ioim.h b/drivers/scsi/bfa/bfa_cb_ioim.h
index a989a94c38da..6f021015f1f6 100644
--- a/drivers/scsi/bfa/bfa_cb_ioim.h
+++ b/drivers/scsi/bfa/bfa_cb_ioim.h
@@ -37,18 +37,18 @@ bfad_int_to_lun(u32 luno)
} lun;
lun.bfa_lun = 0;
- lun.scsi_lun[0] = bfa_os_htons(luno);
+ lun.scsi_lun[0] = cpu_to_be16(luno);
return lun.bfa_lun;
}
-/**
+/*
* Get LUN for the I/O request
*/
#define bfa_cb_ioim_get_lun(__dio) \
bfad_int_to_lun(((struct scsi_cmnd *)__dio)->device->lun)
-/**
+/*
* Get CDB for the I/O request
*/
static inline u8 *
@@ -59,7 +59,7 @@ bfa_cb_ioim_get_cdb(struct bfad_ioim_s *dio)
return (u8 *) cmnd->cmnd;
}
-/**
+/*
* Get I/O direction (read/write) for the I/O request
*/
static inline enum fcp_iodir
@@ -77,7 +77,7 @@ bfa_cb_ioim_get_iodir(struct bfad_ioim_s *dio)
return FCP_IODIR_NONE;
}
-/**
+/*
* Get IO size in bytes for the I/O request
*/
static inline u32
@@ -88,7 +88,7 @@ bfa_cb_ioim_get_size(struct bfad_ioim_s *dio)
return scsi_bufflen(cmnd);
}
-/**
+/*
* Get timeout for the I/O request
*/
static inline u8
@@ -104,7 +104,7 @@ bfa_cb_ioim_get_timeout(struct bfad_ioim_s *dio)
return 0;
}
-/**
+/*
* Get Command Reference Number for the I/O request. 0 if none.
*/
static inline u8
@@ -113,7 +113,7 @@ bfa_cb_ioim_get_crn(struct bfad_ioim_s *dio)
return 0;
}
-/**
+/*
* Get SAM-3 priority for the I/O request. 0 is default.
*/
static inline u8
@@ -122,7 +122,7 @@ bfa_cb_ioim_get_priority(struct bfad_ioim_s *dio)
return 0;
}
-/**
+/*
* Get task attributes for the I/O request. Default is FCP_TASK_ATTR_SIMPLE(0).
*/
static inline u8
@@ -148,7 +148,7 @@ bfa_cb_ioim_get_taskattr(struct bfad_ioim_s *dio)
return task_attr;
}
-/**
+/*
* Get CDB length in bytes for the I/O request. Default is FCP_CMND_CDB_LEN(16).
*/
static inline u8
@@ -159,7 +159,7 @@ bfa_cb_ioim_get_cdblen(struct bfad_ioim_s *dio)
return cmnd->cmd_len;
}
-/**
+/*
* Assign queue to be used for the I/O request. This value depends on whether
* the driver wants to use the queues via any specific algorithm. Currently,
* this is not supported.
diff --git a/drivers/scsi/bfa/bfa_core.c b/drivers/scsi/bfa/bfa_core.c
index c2fa07f2485d..2345f48dc57f 100644
--- a/drivers/scsi/bfa/bfa_core.c
+++ b/drivers/scsi/bfa/bfa_core.c
@@ -21,11 +21,11 @@
BFA_TRC_FILE(HAL, CORE);
-/**
+/*
* BFA IOC FC related definitions
*/
-/**
+/*
* IOC local definitions
*/
#define BFA_IOCFC_TOV 5000 /* msecs */
@@ -54,7 +54,7 @@ enum {
#define DEF_CFG_NUM_SBOOT_TGTS 16
#define DEF_CFG_NUM_SBOOT_LUNS 16
-/**
+/*
* forward declaration for IOC FC functions
*/
static void bfa_iocfc_enable_cbfn(void *bfa_arg, enum bfa_status status);
@@ -63,7 +63,7 @@ static void bfa_iocfc_hbfail_cbfn(void *bfa_arg);
static void bfa_iocfc_reset_cbfn(void *bfa_arg);
static struct bfa_ioc_cbfn_s bfa_iocfc_cbfn;
-/**
+/*
* BFA Interrupt handling functions
*/
static void
@@ -86,7 +86,7 @@ bfa_reqq_resume(struct bfa_s *bfa, int qid)
waitq = bfa_reqq(bfa, qid);
list_for_each_safe(qe, qen, waitq) {
- /**
+ /*
* Callback only as long as there is room in request queue
*/
if (bfa_reqq_full(bfa, qid))
@@ -104,7 +104,7 @@ bfa_msix_all(struct bfa_s *bfa, int vec)
bfa_intx(bfa);
}
-/**
+/*
* hal_intr_api
*/
bfa_boolean_t
@@ -113,15 +113,15 @@ bfa_intx(struct bfa_s *bfa)
u32 intr, qintr;
int queue;
- intr = bfa_reg_read(bfa->iocfc.bfa_regs.intr_status);
+ intr = readl(bfa->iocfc.bfa_regs.intr_status);
if (!intr)
return BFA_FALSE;
- /**
+ /*
* RME completion queue interrupt
*/
qintr = intr & __HFN_INT_RME_MASK;
- bfa_reg_write(bfa->iocfc.bfa_regs.intr_status, qintr);
+ writel(qintr, bfa->iocfc.bfa_regs.intr_status);
for (queue = 0; queue < BFI_IOC_MAX_CQS_ASIC; queue++) {
if (intr & (__HFN_INT_RME_Q0 << queue))
@@ -131,11 +131,11 @@ bfa_intx(struct bfa_s *bfa)
if (!intr)
return BFA_TRUE;
- /**
+ /*
* CPE completion queue interrupt
*/
qintr = intr & __HFN_INT_CPE_MASK;
- bfa_reg_write(bfa->iocfc.bfa_regs.intr_status, qintr);
+ writel(qintr, bfa->iocfc.bfa_regs.intr_status);
for (queue = 0; queue < BFI_IOC_MAX_CQS_ASIC; queue++) {
if (intr & (__HFN_INT_CPE_Q0 << queue))
@@ -153,13 +153,13 @@ bfa_intx(struct bfa_s *bfa)
void
bfa_intx_enable(struct bfa_s *bfa)
{
- bfa_reg_write(bfa->iocfc.bfa_regs.intr_mask, bfa->iocfc.intr_mask);
+ writel(bfa->iocfc.intr_mask, bfa->iocfc.bfa_regs.intr_mask);
}
void
bfa_intx_disable(struct bfa_s *bfa)
{
- bfa_reg_write(bfa->iocfc.bfa_regs.intr_mask, -1L);
+ writel(-1L, bfa->iocfc.bfa_regs.intr_mask);
}
void
@@ -188,8 +188,8 @@ bfa_isr_enable(struct bfa_s *bfa)
__HFN_INT_RME_Q6 | __HFN_INT_RME_Q7 |
__HFN_INT_MBOX_LPU1);
- bfa_reg_write(bfa->iocfc.bfa_regs.intr_status, intr_unmask);
- bfa_reg_write(bfa->iocfc.bfa_regs.intr_mask, ~intr_unmask);
+ writel(intr_unmask, bfa->iocfc.bfa_regs.intr_status);
+ writel(~intr_unmask, bfa->iocfc.bfa_regs.intr_mask);
bfa->iocfc.intr_mask = ~intr_unmask;
bfa_isr_mode_set(bfa, bfa->msix.nvecs != 0);
}
@@ -198,7 +198,7 @@ void
bfa_isr_disable(struct bfa_s *bfa)
{
bfa_isr_mode_set(bfa, BFA_FALSE);
- bfa_reg_write(bfa->iocfc.bfa_regs.intr_mask, -1L);
+ writel(-1L, bfa->iocfc.bfa_regs.intr_mask);
bfa_msix_uninstall(bfa);
}
@@ -211,7 +211,7 @@ bfa_msix_reqq(struct bfa_s *bfa, int qid)
bfa->iocfc.hwif.hw_reqq_ack(bfa, qid);
- /**
+ /*
* Resume any pending requests in the corresponding reqq.
*/
waitq = bfa_reqq(bfa, qid);
@@ -259,14 +259,14 @@ bfa_msix_rspq(struct bfa_s *bfa, int qid)
}
}
- /**
+ /*
* update CI
*/
bfa_rspq_ci(bfa, qid) = pi;
- bfa_reg_write(bfa->iocfc.bfa_regs.rme_q_ci[qid], pi);
+ writel(pi, bfa->iocfc.bfa_regs.rme_q_ci[qid]);
mmiowb();
- /**
+ /*
* Resume any pending requests in the corresponding reqq.
*/
waitq = bfa_reqq(bfa, qid);
@@ -279,7 +279,7 @@ bfa_msix_lpu_err(struct bfa_s *bfa, int vec)
{
u32 intr, curr_value;
- intr = bfa_reg_read(bfa->iocfc.bfa_regs.intr_status);
+ intr = readl(bfa->iocfc.bfa_regs.intr_status);
if (intr & (__HFN_INT_MBOX_LPU0 | __HFN_INT_MBOX_LPU1))
bfa_msix_lpu(bfa);
@@ -289,30 +289,30 @@ bfa_msix_lpu_err(struct bfa_s *bfa, int vec)
if (intr) {
if (intr & __HFN_INT_LL_HALT) {
- /**
+ /*
* If LL_HALT bit is set then FW Init Halt LL Port
* Register needs to be cleared as well so Interrupt
* Status Register will be cleared.
*/
- curr_value = bfa_reg_read(bfa->ioc.ioc_regs.ll_halt);
+ curr_value = readl(bfa->ioc.ioc_regs.ll_halt);
curr_value &= ~__FW_INIT_HALT_P;
- bfa_reg_write(bfa->ioc.ioc_regs.ll_halt, curr_value);
+ writel(curr_value, bfa->ioc.ioc_regs.ll_halt);
}
if (intr & __HFN_INT_ERR_PSS) {
- /**
+ /*
* ERR_PSS bit needs to be cleared as well in case
* interrups are shared so driver's interrupt handler is
* still called eventhough it is already masked out.
*/
- curr_value = bfa_reg_read(
+ curr_value = readl(
bfa->ioc.ioc_regs.pss_err_status_reg);
curr_value &= __PSS_ERR_STATUS_SET;
- bfa_reg_write(bfa->ioc.ioc_regs.pss_err_status_reg,
- curr_value);
+ writel(curr_value,
+ bfa->ioc.ioc_regs.pss_err_status_reg);
}
- bfa_reg_write(bfa->iocfc.bfa_regs.intr_status, intr);
+ writel(intr, bfa->iocfc.bfa_regs.intr_status);
bfa_msix_errint(bfa, intr);
}
}
@@ -323,11 +323,11 @@ bfa_isr_bind(enum bfi_mclass mc, bfa_isr_func_t isr_func)
bfa_isrs[mc] = isr_func;
}
-/**
+/*
* BFA IOC FC related functions
*/
-/**
+/*
* hal_ioc_pvt BFA IOC private functions
*/
@@ -366,7 +366,7 @@ bfa_iocfc_fw_cfg_sz(struct bfa_iocfc_cfg_s *cfg, u32 *dm_len)
BFA_CACHELINE_SZ);
}
-/**
+/*
* Use the Mailbox interface to send BFI_IOCFC_H2I_CFG_REQ
*/
static void
@@ -384,14 +384,14 @@ bfa_iocfc_send_cfg(void *bfa_arg)
bfa_iocfc_reset_queues(bfa);
- /**
+ /*
* initialize IOC configuration info
*/
cfg_info->endian_sig = BFI_IOC_ENDIAN_SIG;
cfg_info->num_cqs = cfg->fwcfg.num_cqs;
bfa_dma_be_addr_set(cfg_info->cfgrsp_addr, iocfc->cfgrsp_dma.pa);
- /**
+ /*
* dma map REQ and RSP circular queues and shadow pointers
*/
for (i = 0; i < cfg->fwcfg.num_cqs; i++) {
@@ -400,17 +400,17 @@ bfa_iocfc_send_cfg(void *bfa_arg)
bfa_dma_be_addr_set(cfg_info->req_shadow_ci[i],
iocfc->req_cq_shadow_ci[i].pa);
cfg_info->req_cq_elems[i] =
- bfa_os_htons(cfg->drvcfg.num_reqq_elems);
+ cpu_to_be16(cfg->drvcfg.num_reqq_elems);
bfa_dma_be_addr_set(cfg_info->rsp_cq_ba[i],
iocfc->rsp_cq_ba[i].pa);
bfa_dma_be_addr_set(cfg_info->rsp_shadow_pi[i],
iocfc->rsp_cq_shadow_pi[i].pa);
cfg_info->rsp_cq_elems[i] =
- bfa_os_htons(cfg->drvcfg.num_rspq_elems);
+ cpu_to_be16(cfg->drvcfg.num_rspq_elems);
}
- /**
+ /*
* Enable interrupt coalescing if it is driver init path
* and not ioc disable/enable path.
*/
@@ -419,7 +419,7 @@ bfa_iocfc_send_cfg(void *bfa_arg)
iocfc->cfgdone = BFA_FALSE;
- /**
+ /*
* dma map IOC configuration itself
*/
bfi_h2i_set(cfg_req.mh, BFI_MC_IOCFC, BFI_IOCFC_H2I_CFG_REQ,
@@ -440,9 +440,9 @@ bfa_iocfc_init_mem(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
iocfc->bfa = bfa;
iocfc->action = BFA_IOCFC_ACT_NONE;
- bfa_os_assign(iocfc->cfg, *cfg);
+ iocfc->cfg = *cfg;
- /**
+ /*
* Initialize chip specific handlers.
*/
if (bfa_asic_id_ct(bfa_ioc_devid(&bfa->ioc))) {
@@ -503,13 +503,13 @@ bfa_iocfc_mem_claim(struct bfa_s *bfa, struct bfa_iocfc_cfg_s *cfg,
for (i = 0; i < cfg->fwcfg.num_cqs; i++) {
iocfc->req_cq_ba[i].kva = dm_kva;
iocfc->req_cq_ba[i].pa = dm_pa;
- bfa_os_memset(dm_kva, 0, per_reqq_sz);
+ memset(dm_kva, 0, per_reqq_sz);
dm_kva += per_reqq_sz;
dm_pa += per_reqq_sz;
iocfc->rsp_cq_ba[i].kva = dm_kva;
iocfc->rsp_cq_ba[i].pa = dm_pa;
- bfa_os_memset(dm_kva, 0, per_rspq_sz);
+ memset(dm_kva, 0, per_rspq_sz);
dm_kva += per_rspq_sz;
dm_pa += per_rspq_sz;
}
@@ -559,7 +559,7 @@ bfa_iocfc_mem_claim(struct bfa_s *bfa, struct bfa_iocfc_cfg_s *cfg,
}
}
-/**
+/*
* Start BFA submodules.
*/
static void
@@ -573,7 +573,7 @@ bfa_iocfc_start_submod(struct bfa_s *bfa)
hal_mods[i]->start(bfa);
}
-/**
+/*
* Disable BFA submodules.
*/
static void
@@ -623,7 +623,7 @@ bfa_iocfc_disable_cb(void *bfa_arg, bfa_boolean_t compl)
complete(&bfad->disable_comp);
}
-/**
+/*
* Update BFA configuration from firmware configuration.
*/
static void
@@ -634,15 +634,15 @@ bfa_iocfc_cfgrsp(struct bfa_s *bfa)
struct bfa_iocfc_fwcfg_s *fwcfg = &cfgrsp->fwcfg;
fwcfg->num_cqs = fwcfg->num_cqs;
- fwcfg->num_ioim_reqs = bfa_os_ntohs(fwcfg->num_ioim_reqs);
- fwcfg->num_tskim_reqs = bfa_os_ntohs(fwcfg->num_tskim_reqs);
- fwcfg->num_fcxp_reqs = bfa_os_ntohs(fwcfg->num_fcxp_reqs);
- fwcfg->num_uf_bufs = bfa_os_ntohs(fwcfg->num_uf_bufs);
- fwcfg->num_rports = bfa_os_ntohs(fwcfg->num_rports);
+ fwcfg->num_ioim_reqs = be16_to_cpu(fwcfg->num_ioim_reqs);
+ fwcfg->num_tskim_reqs = be16_to_cpu(fwcfg->num_tskim_reqs);
+ fwcfg->num_fcxp_reqs = be16_to_cpu(fwcfg->num_fcxp_reqs);
+ fwcfg->num_uf_bufs = be16_to_cpu(fwcfg->num_uf_bufs);
+ fwcfg->num_rports = be16_to_cpu(fwcfg->num_rports);
iocfc->cfgdone = BFA_TRUE;
- /**
+ /*
* Configuration is complete - initialize/start submodules
*/
bfa_fcport_init(bfa);
@@ -665,7 +665,7 @@ bfa_iocfc_reset_queues(struct bfa_s *bfa)
}
}
-/**
+/*
* IOC enable request is complete
*/
static void
@@ -684,7 +684,7 @@ bfa_iocfc_enable_cbfn(void *bfa_arg, enum bfa_status status)
bfa_iocfc_send_cfg(bfa);
}
-/**
+/*
* IOC disable request is complete
*/
static void
@@ -705,7 +705,7 @@ bfa_iocfc_disable_cbfn(void *bfa_arg)
}
}
-/**
+/*
* Notify sub-modules of hardware failure.
*/
static void
@@ -723,7 +723,7 @@ bfa_iocfc_hbfail_cbfn(void *bfa_arg)
bfa);
}
-/**
+/*
* Actions on chip-reset completion.
*/
static void
@@ -735,11 +735,11 @@ bfa_iocfc_reset_cbfn(void *bfa_arg)
bfa_isr_enable(bfa);
}
-/**
+/*
* hal_ioc_public
*/
-/**
+/*
* Query IOC memory requirement information.
*/
void
@@ -754,7 +754,7 @@ bfa_iocfc_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *km_len,
*km_len += bfa_ioc_debug_trcsz(bfa_auto_recover);
}
-/**
+/*
* Query IOC memory requirement information.
*/
void
@@ -772,7 +772,7 @@ bfa_iocfc_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
ioc->trcmod = bfa->trcmod;
bfa_ioc_attach(&bfa->ioc, bfa, &bfa_iocfc_cbfn, &bfa->timer_mod);
- /**
+ /*
* Set FC mode for BFA_PCI_DEVICE_ID_CT_FC.
*/
if (pcidev->device_id == BFA_PCI_DEVICE_ID_CT_FC)
@@ -790,7 +790,7 @@ bfa_iocfc_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
INIT_LIST_HEAD(&bfa->reqq_waitq[i]);
}
-/**
+/*
* Query IOC memory requirement information.
*/
void
@@ -799,7 +799,7 @@ bfa_iocfc_detach(struct bfa_s *bfa)
bfa_ioc_detach(&bfa->ioc);
}
-/**
+/*
* Query IOC memory requirement information.
*/
void
@@ -809,7 +809,7 @@ bfa_iocfc_init(struct bfa_s *bfa)
bfa_ioc_enable(&bfa->ioc);
}
-/**
+/*
* IOC start called from bfa_start(). Called to start IOC operations
* at driver instantiation for this instance.
*/
@@ -820,7 +820,7 @@ bfa_iocfc_start(struct bfa_s *bfa)
bfa_iocfc_start_submod(bfa);
}
-/**
+/*
* IOC stop called from bfa_stop(). Called only when driver is unloaded
* for this instance.
*/
@@ -876,12 +876,12 @@ bfa_iocfc_get_attr(struct bfa_s *bfa, struct bfa_iocfc_attr_s *attr)
attr->intr_attr.coalesce = iocfc->cfginfo->intr_attr.coalesce;
attr->intr_attr.delay = iocfc->cfginfo->intr_attr.delay ?
- bfa_os_ntohs(iocfc->cfginfo->intr_attr.delay) :
- bfa_os_ntohs(iocfc->cfgrsp->intr_attr.delay);
+ be16_to_cpu(iocfc->cfginfo->intr_attr.delay) :
+ be16_to_cpu(iocfc->cfgrsp->intr_attr.delay);
attr->intr_attr.latency = iocfc->cfginfo->intr_attr.latency ?
- bfa_os_ntohs(iocfc->cfginfo->intr_attr.latency) :
- bfa_os_ntohs(iocfc->cfgrsp->intr_attr.latency);
+ be16_to_cpu(iocfc->cfginfo->intr_attr.latency) :
+ be16_to_cpu(iocfc->cfgrsp->intr_attr.latency);
attr->config = iocfc->cfg;
}
@@ -893,8 +893,8 @@ bfa_iocfc_israttr_set(struct bfa_s *bfa, struct bfa_iocfc_intr_attr_s *attr)
struct bfi_iocfc_set_intr_req_s *m;
iocfc->cfginfo->intr_attr.coalesce = attr->coalesce;
- iocfc->cfginfo->intr_attr.delay = bfa_os_htons(attr->delay);
- iocfc->cfginfo->intr_attr.latency = bfa_os_htons(attr->latency);
+ iocfc->cfginfo->intr_attr.delay = cpu_to_be16(attr->delay);
+ iocfc->cfginfo->intr_attr.latency = cpu_to_be16(attr->latency);
if (!bfa_iocfc_is_operational(bfa))
return BFA_STATUS_OK;
@@ -924,7 +924,7 @@ bfa_iocfc_set_snsbase(struct bfa_s *bfa, u64 snsbase_pa)
iocfc->cfginfo->sense_buf_len = (BFI_IOIM_SNSLEN - 1);
bfa_dma_be_addr_set(iocfc->cfginfo->ioim_snsbase, snsbase_pa);
}
-/**
+/*
* Enable IOC after it is disabled.
*/
void
@@ -953,7 +953,7 @@ bfa_iocfc_is_operational(struct bfa_s *bfa)
return bfa_ioc_is_operational(&bfa->ioc) && bfa->iocfc.cfgdone;
}
-/**
+/*
* Return boot target port wwns -- read from boot information in flash.
*/
void
@@ -998,11 +998,11 @@ bfa_iocfc_get_pbc_vports(struct bfa_s *bfa, struct bfi_pbc_vport_s *pbc_vport)
return cfgrsp->pbc_cfg.nvports;
}
-/**
+/*
* hal_api
*/
-/**
+/*
* Use this function query the memory requirement of the BFA library.
* This function needs to be called before bfa_attach() to get the
* memory required of the BFA layer for a given driver configuration.
@@ -1038,7 +1038,7 @@ bfa_cfg_get_meminfo(struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *meminfo)
bfa_assert((cfg != NULL) && (meminfo != NULL));
- bfa_os_memset((void *)meminfo, 0, sizeof(struct bfa_meminfo_s));
+ memset((void *)meminfo, 0, sizeof(struct bfa_meminfo_s));
meminfo->meminfo[BFA_MEM_TYPE_KVA - 1].mem_type =
BFA_MEM_TYPE_KVA;
meminfo->meminfo[BFA_MEM_TYPE_DMA - 1].mem_type =
@@ -1055,7 +1055,7 @@ bfa_cfg_get_meminfo(struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *meminfo)
meminfo->meminfo[BFA_MEM_TYPE_DMA - 1].mem_len = dm_len;
}
-/**
+/*
* Use this function to do attach the driver instance with the BFA
* library. This function will not trigger any HW initialization
* process (which will be done in bfa_init() call)
@@ -1092,7 +1092,7 @@ bfa_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
bfa_assert((cfg != NULL) && (meminfo != NULL));
- /**
+ /*
* initialize all memory pointers for iterative allocation
*/
for (i = 0; i < BFA_MEM_TYPE_MAX; i++) {
@@ -1109,7 +1109,7 @@ bfa_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
bfa_com_port_attach(bfa, meminfo);
}
-/**
+/*
* Use this function to delete a BFA IOC. IOC should be stopped (by
* calling bfa_stop()) before this function call.
*
@@ -1146,7 +1146,7 @@ bfa_init_plog(struct bfa_s *bfa, struct bfa_plog_s *plog)
bfa->plog = plog;
}
-/**
+/*
* Initialize IOC.
*
* This function will return immediately, when the IOC initialization is
@@ -1169,7 +1169,7 @@ bfa_init(struct bfa_s *bfa)
bfa_iocfc_init(bfa);
}
-/**
+/*
* Use this function initiate the IOC configuration setup. This function
* will return immediately.
*
@@ -1183,7 +1183,7 @@ bfa_start(struct bfa_s *bfa)
bfa_iocfc_start(bfa);
}
-/**
+/*
* Use this function quiese the IOC. This function will return immediately,
* when the IOC is actually stopped, the bfad->comp will be set.
*
@@ -1243,7 +1243,7 @@ bfa_attach_fcs(struct bfa_s *bfa)
bfa->fcs = BFA_TRUE;
}
-/**
+/*
* Periodic timer heart beat from driver
*/
void
@@ -1252,7 +1252,7 @@ bfa_timer_tick(struct bfa_s *bfa)
bfa_timer_beat(&bfa->timer_mod);
}
-/**
+/*
* Return the list of PCI vendor/device id lists supported by this
* BFA instance.
*/
@@ -1270,7 +1270,7 @@ bfa_get_pciids(struct bfa_pciid_s **pciids, int *npciids)
*pciids = __pciids;
}
-/**
+/*
* Use this function query the default struct bfa_iocfc_cfg_s value (compiled
* into BFA layer). The OS driver can then turn back and overwrite entries that
* have been configured by the user.
@@ -1328,7 +1328,7 @@ bfa_get_attr(struct bfa_s *bfa, struct bfa_ioc_attr_s *ioc_attr)
bfa_ioc_get_attr(&bfa->ioc, ioc_attr);
}
-/**
+/*
* Retrieve firmware trace information on IOC failure.
*/
bfa_status_t
@@ -1337,7 +1337,7 @@ bfa_debug_fwsave(struct bfa_s *bfa, void *trcdata, int *trclen)
return bfa_ioc_debug_fwsave(&bfa->ioc, trcdata, trclen);
}
-/**
+/*
* Clear the saved firmware trace information of an IOC.
*/
void
@@ -1346,7 +1346,7 @@ bfa_debug_fwsave_clear(struct bfa_s *bfa)
bfa_ioc_debug_fwsave_clear(&bfa->ioc);
}
-/**
+/*
* Fetch firmware trace data.
*
* @param[in] bfa BFA instance
@@ -1362,7 +1362,7 @@ bfa_debug_fwtrc(struct bfa_s *bfa, void *trcdata, int *trclen)
return bfa_ioc_debug_fwtrc(&bfa->ioc, trcdata, trclen);
}
-/**
+/*
* Dump firmware memory.
*
* @param[in] bfa BFA instance
@@ -1378,7 +1378,7 @@ bfa_debug_fwcore(struct bfa_s *bfa, void *buf, u32 *offset, int *buflen)
{
return bfa_ioc_debug_fwcore(&bfa->ioc, buf, offset, buflen);
}
-/**
+/*
* Reset hw semaphore & usage cnt regs and initialize.
*/
void
@@ -1388,7 +1388,7 @@ bfa_chip_reset(struct bfa_s *bfa)
bfa_ioc_pll_init(&bfa->ioc);
}
-/**
+/*
* Fetch firmware statistics data.
*
* @param[in] bfa BFA instance
diff --git a/drivers/scsi/bfa/bfa_cs.h b/drivers/scsi/bfa/bfa_cs.h
index 7260c74620f8..99f242b9aa31 100644
--- a/drivers/scsi/bfa/bfa_cs.h
+++ b/drivers/scsi/bfa/bfa_cs.h
@@ -15,7 +15,7 @@
* General Public License for more details.
*/
-/**
+/*
* bfa_cs.h BFA common services
*/
@@ -24,7 +24,7 @@
#include "bfa_os_inc.h"
-/**
+/*
* BFA TRC
*/
@@ -73,7 +73,7 @@ enum {
#define BFA_TRC_MOD_SH 10
#define BFA_TRC_MOD(__mod) ((BFA_TRC_ ## __mod) << BFA_TRC_MOD_SH)
-/**
+/*
* Define a new tracing file (module). Module should match one defined above.
*/
#define BFA_TRC_FILE(__mod, __submod) \
@@ -155,7 +155,7 @@ __bfa_trc32(struct bfa_trc_mod_s *trcm, int fileno, int line, u32 data)
#define bfa_trc_fp(_trcp, _data)
#endif
-/**
+/*
* @ BFA LOG interfaces
*/
#define bfa_assert(__cond) do { \
@@ -249,13 +249,13 @@ bfa_q_is_on_q_func(struct list_head *q, struct list_head *qe)
#define bfa_q_is_on_q(_q, _qe) \
bfa_q_is_on_q_func(_q, (struct list_head *)(_qe))
-/**
+/*
* @ BFA state machine interfaces
*/
typedef void (*bfa_sm_t)(void *sm, int event);
-/**
+/*
* oc - object class eg. bfa_ioc
* st - state, eg. reset
* otype - object type, eg. struct bfa_ioc_s
@@ -269,7 +269,7 @@ typedef void (*bfa_sm_t)(void *sm, int event);
#define bfa_sm_get_state(_sm) ((_sm)->sm)
#define bfa_sm_cmp_state(_sm, _state) ((_sm)->sm == (bfa_sm_t)(_state))
-/**
+/*
* For converting from state machine function to state encoding.
*/
struct bfa_sm_table_s {
@@ -279,12 +279,12 @@ struct bfa_sm_table_s {
};
#define BFA_SM(_sm) ((bfa_sm_t)(_sm))
-/**
+/*
* State machine with entry actions.
*/
typedef void (*bfa_fsm_t)(void *fsm, int event);
-/**
+/*
* oc - object class eg. bfa_ioc
* st - state, eg. reset
* otype - object type, eg. struct bfa_ioc_s
@@ -314,7 +314,7 @@ bfa_sm_to_state(struct bfa_sm_table_s *smt, bfa_sm_t sm)
return smt[i].state;
}
-/**
+/*
* @ Generic wait counter.
*/
@@ -340,7 +340,7 @@ bfa_wc_down(struct bfa_wc_s *wc)
wc->wc_resume(wc->wc_cbarg);
}
-/**
+/*
* Initialize a waiting counter.
*/
static inline void
@@ -352,7 +352,7 @@ bfa_wc_init(struct bfa_wc_s *wc, bfa_wc_resume_t wc_resume, void *wc_cbarg)
bfa_wc_up(wc);
}
-/**
+/*
* Wait for counter to reach zero
*/
static inline void
diff --git a/drivers/scsi/bfa/bfa_defs.h b/drivers/scsi/bfa/bfa_defs.h
index d49877ff5140..4b5b9e35abb9 100644
--- a/drivers/scsi/bfa/bfa_defs.h
+++ b/drivers/scsi/bfa/bfa_defs.h
@@ -24,7 +24,7 @@
#define BFA_MFG_SERIALNUM_SIZE 11
#define STRSZ(_n) (((_n) + 4) & ~3)
-/**
+/*
* Manufacturing card type
*/
enum {
@@ -45,7 +45,7 @@ enum {
#pragma pack(1)
-/**
+/*
* Check if Mezz card
*/
#define bfa_mfg_is_mezz(type) (( \
@@ -55,7 +55,7 @@ enum {
(type) == BFA_MFG_TYPE_LIGHTNING_P0 || \
(type) == BFA_MFG_TYPE_LIGHTNING))
-/**
+/*
* Check if the card having old wwn/mac handling
*/
#define bfa_mfg_is_old_wwn_mac_model(type) (( \
@@ -78,12 +78,12 @@ do { \
(m)[2] = t & 0xFF; \
} while (0)
-/**
+/*
* VPD data length
*/
#define BFA_MFG_VPD_LEN 512
-/**
+/*
* VPD vendor tag
*/
enum {
@@ -97,7 +97,7 @@ enum {
BFA_MFG_VPD_PCI_BRCD = 0xf8, /* PCI VPD Brocade */
};
-/**
+/*
* All numerical fields are in big-endian format.
*/
struct bfa_mfg_vpd_s {
@@ -112,7 +112,7 @@ struct bfa_mfg_vpd_s {
#pragma pack()
-/**
+/*
* Status return values
*/
enum bfa_status {
@@ -167,11 +167,11 @@ enum bfa_boolean {
#define BFA_STRING_32 32
#define BFA_VERSION_LEN 64
-/**
+/*
* ---------------------- adapter definitions ------------
*/
-/**
+/*
* BFA adapter level attributes.
*/
enum {
@@ -215,7 +215,7 @@ struct bfa_adapter_attr_s {
u8 trunk_capable;
};
-/**
+/*
* ---------------------- IOC definitions ------------
*/
@@ -224,7 +224,7 @@ enum {
BFA_IOC_CHIP_REV_LEN = 8,
};
-/**
+/*
* Driver and firmware versions.
*/
struct bfa_ioc_driver_attr_s {
@@ -236,7 +236,7 @@ struct bfa_ioc_driver_attr_s {
char ob_ver[BFA_VERSION_LEN]; /* openboot version */
};
-/**
+/*
* IOC PCI device attributes
*/
struct bfa_ioc_pci_attr_s {
@@ -249,7 +249,7 @@ struct bfa_ioc_pci_attr_s {
char chip_rev[BFA_IOC_CHIP_REV_LEN]; /* chip revision */
};
-/**
+/*
* IOC states
*/
enum bfa_ioc_state {
@@ -267,7 +267,7 @@ enum bfa_ioc_state {
BFA_IOC_ENABLING = 12, /* IOC is being enabled */
};
-/**
+/*
* IOC firmware stats
*/
struct bfa_fw_ioc_stats_s {
@@ -279,7 +279,7 @@ struct bfa_fw_ioc_stats_s {
u32 unknown_reqs;
};
-/**
+/*
* IOC driver stats
*/
struct bfa_ioc_drv_stats_s {
@@ -296,7 +296,7 @@ struct bfa_ioc_drv_stats_s {
u32 enable_replies;
};
-/**
+/*
* IOC statistics
*/
struct bfa_ioc_stats_s {
@@ -310,7 +310,7 @@ enum bfa_ioc_type_e {
BFA_IOC_TYPE_LL = 3,
};
-/**
+/*
* IOC attributes returned in queries
*/
struct bfa_ioc_attr_s {
@@ -323,11 +323,11 @@ struct bfa_ioc_attr_s {
u8 rsvd[7]; /* 64bit align */
};
-/**
+/*
* ---------------------- mfg definitions ------------
*/
-/**
+/*
* Checksum size
*/
#define BFA_MFG_CHKSUM_SIZE 16
@@ -340,7 +340,7 @@ struct bfa_ioc_attr_s {
#pragma pack(1)
-/**
+/*
* All numerical fields are in big-endian format.
*/
struct bfa_mfg_block_s {
@@ -373,11 +373,11 @@ struct bfa_mfg_block_s {
#pragma pack()
-/**
+/*
* ---------------------- pci definitions ------------
*/
-/**
+/*
* PCI device and vendor ID information
*/
enum {
@@ -392,14 +392,14 @@ enum {
((devid) == BFA_PCI_DEVICE_ID_CT || \
(devid) == BFA_PCI_DEVICE_ID_CT_FC)
-/**
+/*
* PCI sub-system device and vendor ID information
*/
enum {
BFA_PCI_FCOE_SSDEVICE_ID = 0x14,
};
-/**
+/*
* Maximum number of device address ranges mapped through different BAR(s)
*/
#define BFA_PCI_ACCESS_RANGES 1
@@ -430,7 +430,7 @@ enum {
#define BOOT_CFG_REV1 1
#define BOOT_CFG_VLAN 1
-/**
+/*
* Boot options setting. Boot options setting determines from where
* to get the boot lun information
*/
@@ -442,7 +442,7 @@ enum bfa_boot_bootopt {
};
#pragma pack(1)
-/**
+/*
* Boot lun information.
*/
struct bfa_boot_bootlun_s {
@@ -451,7 +451,7 @@ struct bfa_boot_bootlun_s {
};
#pragma pack()
-/**
+/*
* BOOT boot configuraton
*/
struct bfa_boot_pbc_s {
diff --git a/drivers/scsi/bfa/bfa_defs_fcs.h b/drivers/scsi/bfa/bfa_defs_fcs.h
index 96905d301828..191d34a58b9c 100644
--- a/drivers/scsi/bfa/bfa_defs_fcs.h
+++ b/drivers/scsi/bfa/bfa_defs_fcs.h
@@ -21,7 +21,7 @@
#include "bfa_fc.h"
#include "bfa_defs_svc.h"
-/**
+/*
* VF states
*/
enum bfa_vf_state {
@@ -35,7 +35,7 @@ enum bfa_vf_state {
BFA_VF_ISOLATED = 7, /* port isolated due to vf_id mismatch */
};
-/**
+/*
* VF statistics
*/
struct bfa_vf_stats_s {
@@ -55,7 +55,7 @@ struct bfa_vf_stats_s {
u32 resvd; /* padding for 64 bit alignment */
};
-/**
+/*
* VF attributes returned in queries
*/
struct bfa_vf_attr_s {
@@ -67,7 +67,7 @@ struct bfa_vf_attr_s {
#define BFA_FCS_MAX_LPORTS 256
#define BFA_FCS_FABRIC_IPADDR_SZ 16
-/**
+/*
* symbolic names for base port/virtual port
*/
#define BFA_SYMNAME_MAXLEN 128 /* 128 bytes */
@@ -75,7 +75,7 @@ struct bfa_lport_symname_s {
char symname[BFA_SYMNAME_MAXLEN];
};
-/**
+/*
* Roles of FCS port:
* - FCP IM and FCP TM roles cannot be enabled together for a FCS port
* - Create multiple ports if both IM and TM functions required.
@@ -86,19 +86,19 @@ enum bfa_lport_role {
BFA_LPORT_ROLE_FCP_MAX = BFA_LPORT_ROLE_FCP_IM,
};
-/**
+/*
* FCS port configuration.
*/
struct bfa_lport_cfg_s {
wwn_t pwwn; /* port wwn */
wwn_t nwwn; /* node wwn */
struct bfa_lport_symname_s sym_name; /* vm port symbolic name */
- bfa_boolean_t preboot_vp; /* vport created from PBC */
+ bfa_boolean_t preboot_vp; /* vport created from PBC */
enum bfa_lport_role roles; /* FCS port roles */
u8 tag[16]; /* opaque tag from application */
};
-/**
+/*
* FCS port states
*/
enum bfa_lport_state {
@@ -108,7 +108,7 @@ enum bfa_lport_state {
BFA_LPORT_OFFLINE = 3, /* No login to fabric */
};
-/**
+/*
* FCS port type.
*/
enum bfa_lport_type {
@@ -116,7 +116,7 @@ enum bfa_lport_type {
BFA_LPORT_TYPE_VIRTUAL,
};
-/**
+/*
* FCS port offline reason.
*/
enum bfa_lport_offline_reason {
@@ -128,7 +128,7 @@ enum bfa_lport_offline_reason {
BFA_LPORT_OFFLINE_FAB_LOGOUT,
};
-/**
+/*
* FCS lport info.
*/
struct bfa_lport_info_s {
@@ -150,7 +150,7 @@ struct bfa_lport_info_s {
};
-/**
+/*
* FCS port statistics
*/
struct bfa_lport_stats_s {
@@ -222,7 +222,7 @@ struct bfa_lport_stats_s {
* (max retry of plogi) */
};
-/**
+/*
* BFA port attribute returned in queries
*/
struct bfa_lport_attr_s {
@@ -239,7 +239,7 @@ struct bfa_lport_attr_s {
};
-/**
+/*
* VPORT states
*/
enum bfa_vport_state {
@@ -258,7 +258,7 @@ enum bfa_vport_state {
BFA_FCS_VPORT_MAX_STATE,
};
-/**
+/*
* vport statistics
*/
struct bfa_vport_stats_s {
@@ -296,7 +296,7 @@ struct bfa_vport_stats_s {
u32 rsvd;
};
-/**
+/*
* BFA vport attribute returned in queries
*/
struct bfa_vport_attr_s {
@@ -305,7 +305,7 @@ struct bfa_vport_attr_s {
u32 rsvd;
};
-/**
+/*
* FCS remote port states
*/
enum bfa_rport_state {
@@ -321,7 +321,7 @@ enum bfa_rport_state {
BFA_RPORT_NSDISC = 9, /* re-discover rport */
};
-/**
+/*
* Rport Scsi Function : Initiator/Target.
*/
enum bfa_rport_function {
@@ -329,7 +329,7 @@ enum bfa_rport_function {
BFA_RPORT_TARGET = 0x02, /* SCSI Target */
};
-/**
+/*
* port/node symbolic names for rport
*/
#define BFA_RPORT_SYMNAME_MAXLEN 255
@@ -337,7 +337,7 @@ struct bfa_rport_symname_s {
char symname[BFA_RPORT_SYMNAME_MAXLEN];
};
-/**
+/*
* FCS remote port statistics
*/
struct bfa_rport_stats_s {
@@ -374,7 +374,7 @@ struct bfa_rport_stats_s {
struct bfa_rport_hal_stats_s hal_stats; /* BFA rport stats */
};
-/**
+/*
* FCS remote port attributes returned in queries
*/
struct bfa_rport_attr_s {
@@ -411,7 +411,7 @@ struct bfa_rport_remote_link_stats_s {
#define BFA_MAX_IO_INDEX 7
#define BFA_NO_IO_INDEX 9
-/**
+/*
* FCS itnim states
*/
enum bfa_itnim_state {
@@ -425,7 +425,7 @@ enum bfa_itnim_state {
BFA_ITNIM_INITIATIOR = 7, /* initiator */
};
-/**
+/*
* FCS remote port statistics
*/
struct bfa_itnim_stats_s {
@@ -443,7 +443,7 @@ struct bfa_itnim_stats_s {
u32 rsvd; /* padding for 64 bit alignment */
};
-/**
+/*
* FCS itnim attributes returned in queries
*/
struct bfa_itnim_attr_s {
diff --git a/drivers/scsi/bfa/bfa_defs_svc.h b/drivers/scsi/bfa/bfa_defs_svc.h
index 56226fcf9470..e24e9f7ca81f 100644
--- a/drivers/scsi/bfa/bfa_defs_svc.h
+++ b/drivers/scsi/bfa/bfa_defs_svc.h
@@ -27,7 +27,7 @@
#define BFA_IOCFCOE_INTR_DELAY 25
#define BFA_IOCFCOE_INTR_LATENCY 5
-/**
+/*
* Interrupt coalescing configuration.
*/
#pragma pack(1)
@@ -38,7 +38,7 @@ struct bfa_iocfc_intr_attr_s {
u16 delay; /* delay in microseconds */
};
-/**
+/*
* IOC firmware configuraton
*/
struct bfa_iocfc_fwcfg_s {
@@ -71,7 +71,7 @@ struct bfa_iocfc_drvcfg_s {
u32 rsvd;
};
-/**
+/*
* IOC configuration
*/
struct bfa_iocfc_cfg_s {
@@ -79,7 +79,7 @@ struct bfa_iocfc_cfg_s {
struct bfa_iocfc_drvcfg_s drvcfg; /* driver side config */
};
-/**
+/*
* IOC firmware IO stats
*/
struct bfa_fw_io_stats_s {
@@ -152,7 +152,7 @@ struct bfa_fw_io_stats_s {
*/
};
-/**
+/*
* IOC port firmware stats
*/
@@ -262,7 +262,7 @@ struct bfa_fw_fcoe_stats_s {
u32 mac_invalids; /* Invalid mac assigned */
};
-/**
+/*
* IOC firmware FCoE port stats
*/
struct bfa_fw_fcoe_port_stats_s {
@@ -270,7 +270,7 @@ struct bfa_fw_fcoe_port_stats_s {
struct bfa_fw_fip_stats_s fip_stats;
};
-/**
+/*
* IOC firmware FC uport stats
*/
struct bfa_fw_fc_uport_stats_s {
@@ -278,7 +278,7 @@ struct bfa_fw_fc_uport_stats_s {
struct bfa_fw_port_lksm_stats_s lksm_stats;
};
-/**
+/*
* IOC firmware FC port stats
*/
union bfa_fw_fc_port_stats_s {
@@ -286,7 +286,7 @@ union bfa_fw_fc_port_stats_s {
struct bfa_fw_fcoe_port_stats_s fcoe_stats;
};
-/**
+/*
* IOC firmware port stats
*/
struct bfa_fw_port_stats_s {
@@ -295,7 +295,7 @@ struct bfa_fw_port_stats_s {
union bfa_fw_fc_port_stats_s fc_port;
};
-/**
+/*
* fcxchg module statistics
*/
struct bfa_fw_fcxchg_stats_s {
@@ -308,7 +308,7 @@ struct bfa_fw_lpsm_stats_s {
u32 cls_tx;
};
-/**
+/*
* Trunk statistics
*/
struct bfa_fw_trunk_stats_s {
@@ -334,7 +334,7 @@ struct bfa_fw_advsm_stats_s {
u32 elp_dropped; /* ELP dropped */
};
-/**
+/*
* IOCFC firmware stats
*/
struct bfa_fw_iocfc_stats_s {
@@ -345,7 +345,7 @@ struct bfa_fw_iocfc_stats_s {
u32 set_intr_reqs; /* set interrupt reqs */
};
-/**
+/*
* IOC attributes returned in queries
*/
struct bfa_iocfc_attr_s {
@@ -353,7 +353,7 @@ struct bfa_iocfc_attr_s {
struct bfa_iocfc_intr_attr_s intr_attr; /* interrupt attr */
};
-/**
+/*
* Eth_sndrcv mod stats
*/
struct bfa_fw_eth_sndrcv_stats_s {
@@ -361,7 +361,7 @@ struct bfa_fw_eth_sndrcv_stats_s {
u32 rsvd; /* 64bit align */
};
-/**
+/*
* CT MAC mod stats
*/
struct bfa_fw_mac_mod_stats_s {
@@ -379,7 +379,7 @@ struct bfa_fw_mac_mod_stats_s {
u32 rsvd; /* 64bit align */
};
-/**
+/*
* CT MOD stats
*/
struct bfa_fw_ct_mod_stats_s {
@@ -391,7 +391,7 @@ struct bfa_fw_ct_mod_stats_s {
u32 rsvd; /* 64bit align */
};
-/**
+/*
* IOC firmware stats
*/
struct bfa_fw_stats_s {
@@ -412,7 +412,7 @@ struct bfa_fw_stats_s {
#define BFA_IOCFC_PATHTOV_MAX 60
#define BFA_IOCFC_QDEPTH_MAX 2000
-/**
+/*
* QoS states
*/
enum bfa_qos_state {
@@ -420,7 +420,7 @@ enum bfa_qos_state {
BFA_QOS_OFFLINE = 2, /* QoS is offline */
};
-/**
+/*
* QoS Priority levels.
*/
enum bfa_qos_priority {
@@ -430,7 +430,7 @@ enum bfa_qos_priority {
BFA_QOS_LOW = 3, /* QoS Priority Level Low */
};
-/**
+/*
* QoS bandwidth allocation for each priority level
*/
enum bfa_qos_bw_alloc {
@@ -439,7 +439,7 @@ enum bfa_qos_bw_alloc {
BFA_QOS_BW_LOW = 10, /* bandwidth allocation for Low */
};
#pragma pack(1)
-/**
+/*
* QoS attribute returned in QoS Query
*/
struct bfa_qos_attr_s {
@@ -448,7 +448,7 @@ struct bfa_qos_attr_s {
u32 total_bb_cr; /* Total BB Credits */
};
-/**
+/*
* These fields should be displayed only from the CLI.
* There will be a separate BFAL API (get_qos_vc_attr ?)
* to retrieve this.
@@ -471,7 +471,7 @@ struct bfa_qos_vc_attr_s {
* total_vc_count */
};
-/**
+/*
* QoS statistics
*/
struct bfa_qos_stats_s {
@@ -489,7 +489,7 @@ struct bfa_qos_stats_s {
u32 rsvd; /* padding for 64 bit alignment */
};
-/**
+/*
* FCoE statistics
*/
struct bfa_fcoe_stats_s {
@@ -540,7 +540,7 @@ struct bfa_fcoe_stats_s {
u64 rxf_bcast_vlan; /* Rx FCoE broadcast vlan frames */
};
-/**
+/*
* QoS or FCoE stats (fcport stats excluding physical FC port stats)
*/
union bfa_fcport_stats_u {
@@ -639,7 +639,7 @@ enum bfa_port_states {
BFA_PORT_ST_MAX_STATE,
};
-/**
+/*
* Port operational type (in sync with SNIA port type).
*/
enum bfa_port_type {
@@ -651,7 +651,7 @@ enum bfa_port_type {
BFA_PORT_TYPE_VPORT = 22, /* NPIV - virtual port */
};
-/**
+/*
* Port topology setting. A port's topology and fabric login status
* determine its operational type.
*/
@@ -662,7 +662,7 @@ enum bfa_port_topology {
BFA_PORT_TOPOLOGY_AUTO = 3, /* auto topology selection */
};
-/**
+/*
* Physical port loopback types.
*/
enum bfa_port_opmode {
@@ -679,7 +679,7 @@ enum bfa_port_opmode {
(_mode == BFA_PORT_OPMODE_LB_SLW) || \
(_mode == BFA_PORT_OPMODE_LB_EXT))
-/**
+/*
* Port link state
*/
enum bfa_port_linkstate {
@@ -687,7 +687,7 @@ enum bfa_port_linkstate {
BFA_PORT_LINKDOWN = 2, /* Physical port/Trunk link down */
};
-/**
+/*
* Port link state reason code
*/
enum bfa_port_linkstate_rsn {
@@ -733,7 +733,7 @@ enum bfa_port_linkstate_rsn {
CEE_ISCSI_PRI_OVERLAP_FCOE_PRI = 43
};
#pragma pack(1)
-/**
+/*
* Physical port configuration
*/
struct bfa_port_cfg_s {
@@ -753,7 +753,7 @@ struct bfa_port_cfg_s {
};
#pragma pack()
-/**
+/*
* Port attribute values.
*/
struct bfa_port_attr_s {
@@ -800,7 +800,7 @@ struct bfa_port_attr_s {
u8 rsvd1[6];
};
-/**
+/*
* Port FCP mappings.
*/
struct bfa_port_fcpmap_s {
@@ -815,7 +815,7 @@ struct bfa_port_fcpmap_s {
char luid[256];
};
-/**
+/*
* Port RNID info.
*/
struct bfa_port_rnid_s {
@@ -848,7 +848,7 @@ struct bfa_fcport_fcf_s {
mac_t mac; /* FCF mac */
};
-/**
+/*
* Trunk states for BCU/BFAL
*/
enum bfa_trunk_state {
@@ -857,7 +857,7 @@ enum bfa_trunk_state {
BFA_TRUNK_OFFLINE = 2, /* Trunk is offline */
};
-/**
+/*
* VC attributes for trunked link
*/
struct bfa_trunk_vc_attr_s {
@@ -867,7 +867,7 @@ struct bfa_trunk_vc_attr_s {
u16 vc_credits[8];
};
-/**
+/*
* Link state information
*/
struct bfa_port_link_s {
@@ -959,7 +959,7 @@ struct bfa_rport_hal_stats_s {
u32 rsvd;
};
#pragma pack(1)
-/**
+/*
* Rport's QoS attributes
*/
struct bfa_rport_qos_attr_s {
@@ -987,7 +987,7 @@ struct bfa_itnim_ioprofile_s {
struct bfa_itnim_latency_s io_latency;
};
-/**
+/*
* FC physical port statistics.
*/
struct bfa_port_fc_stats_s {
@@ -1022,7 +1022,7 @@ struct bfa_port_fc_stats_s {
u64 err_enc; /* Encoding err frame_8b10b */
};
-/**
+/*
* Eth Physical Port statistics.
*/
struct bfa_port_eth_stats_s {
@@ -1070,7 +1070,7 @@ struct bfa_port_eth_stats_s {
u64 tx_iscsi_zero_pause; /* Tx iSCSI zero pause */
};
-/**
+/*
* Port statistics.
*/
union bfa_port_stats_u {
diff --git a/drivers/scsi/bfa/bfa_drv.c b/drivers/scsi/bfa/bfa_drv.c
index 14127646dc54..0222d7c88a9a 100644
--- a/drivers/scsi/bfa/bfa_drv.c
+++ b/drivers/scsi/bfa/bfa_drv.c
@@ -17,7 +17,7 @@
#include "bfa_modules.h"
-/**
+/*
* BFA module list terminated by NULL
*/
struct bfa_module_s *hal_mods[] = {
@@ -31,7 +31,7 @@ struct bfa_module_s *hal_mods[] = {
NULL
};
-/**
+/*
* Message handlers for various modules.
*/
bfa_isr_func_t bfa_isrs[BFI_MC_MAX] = {
@@ -70,7 +70,7 @@ bfa_isr_func_t bfa_isrs[BFI_MC_MAX] = {
};
-/**
+/*
* Message handlers for mailbox command classes
*/
bfa_ioc_mbox_mcfunc_t bfa_mbox_isrs[BFI_MC_MAX] = {
diff --git a/drivers/scsi/bfa/bfa_fc.h b/drivers/scsi/bfa/bfa_fc.h
index 6eff705564eb..e929d25b09e3 100644
--- a/drivers/scsi/bfa/bfa_fc.h
+++ b/drivers/scsi/bfa/bfa_fc.h
@@ -1029,7 +1029,7 @@ struct link_e2e_beacon_req_s {
struct link_e2e_beacon_param_s beacon_parm;
};
-/**
+/*
* If RPSC request is sent to the Domain Controller, the request is for
* all the ports within that domain (TODO - I don't think FOS implements
* this...).
@@ -1049,7 +1049,7 @@ struct fc_rpsc_acc_s {
struct fc_rpsc_speed_info_s speed_info[1];
};
-/**
+/*
* If RPSC2 request is sent to the Domain Controller,
*/
#define FC_BRCD_TOKEN 0x42524344
@@ -1094,7 +1094,7 @@ struct fc_rpsc2_acc_s {
struct fc_rpsc2_port_info_s port_info[1]; /* port information */
};
-/**
+/*
* bit fields so that multiple classes can be specified
*/
enum fc_cos {
@@ -1131,7 +1131,7 @@ struct fc_alpabm_s {
#define FC_VF_ID_MAX 0xEFF
#define FC_VF_ID_CTL 0xFEF /* control VF_ID */
-/**
+/*
* Virtual Fabric Tagging header format
* @caution This is defined only in BIG ENDIAN format.
*/
@@ -1463,7 +1463,7 @@ struct fcgs_gidpn_resp_s {
u32 dap:24; /* port identifier */
};
-/**
+/*
* RFT_ID
*/
struct fcgs_rftid_req_s {
@@ -1472,7 +1472,7 @@ struct fcgs_rftid_req_s {
u32 fc4_type[8]; /* fc4 types */
};
-/**
+/*
* RFF_ID : Register FC4 features.
*/
@@ -1487,7 +1487,7 @@ struct fcgs_rffid_req_s {
u32 fc4_type:8; /* corresponding FC4 Type */
};
-/**
+/*
* GID_FT Request
*/
struct fcgs_gidft_req_s {
@@ -1497,7 +1497,7 @@ struct fcgs_gidft_req_s {
u8 fc4_type; /* FC_TYPE_FCP for SCSI devices */
}; /* GID_FT Request */
-/**
+/*
* GID_FT Response
*/
struct fcgs_gidft_resp_s {
@@ -1506,7 +1506,7 @@ struct fcgs_gidft_resp_s {
u32 pid:24; /* port identifier */
}; /* GID_FT Response */
-/**
+/*
* RSPN_ID
*/
struct fcgs_rspnid_req_s {
@@ -1516,7 +1516,7 @@ struct fcgs_rspnid_req_s {
u8 spn[256]; /* symbolic port name */
};
-/**
+/*
* RPN_ID
*/
struct fcgs_rpnid_req_s {
@@ -1525,7 +1525,7 @@ struct fcgs_rpnid_req_s {
wwn_t port_name;
};
-/**
+/*
* RNN_ID
*/
struct fcgs_rnnid_req_s {
@@ -1534,7 +1534,7 @@ struct fcgs_rnnid_req_s {
wwn_t node_name;
};
-/**
+/*
* RCS_ID
*/
struct fcgs_rcsid_req_s {
@@ -1543,7 +1543,7 @@ struct fcgs_rcsid_req_s {
u32 cos;
};
-/**
+/*
* RPT_ID
*/
struct fcgs_rptid_req_s {
@@ -1553,7 +1553,7 @@ struct fcgs_rptid_req_s {
u32 rsvd1:24;
};
-/**
+/*
* GA_NXT Request
*/
struct fcgs_ganxt_req_s {
@@ -1561,7 +1561,7 @@ struct fcgs_ganxt_req_s {
u32 port_id:24;
};
-/**
+/*
* GA_NXT Response
*/
struct fcgs_ganxt_rsp_s {
diff --git a/drivers/scsi/bfa/bfa_fcbuild.c b/drivers/scsi/bfa/bfa_fcbuild.c
index b7d2657ca82a..9c725314b513 100644
--- a/drivers/scsi/bfa/bfa_fcbuild.c
+++ b/drivers/scsi/bfa/bfa_fcbuild.c
@@ -94,13 +94,13 @@ fcbuild_init(void)
*/
plogi_tmpl.csp.verhi = FC_PH_VER_PH_3;
plogi_tmpl.csp.verlo = FC_PH_VER_4_3;
- plogi_tmpl.csp.bbcred = bfa_os_htons(0x0004);
+ plogi_tmpl.csp.bbcred = cpu_to_be16(0x0004);
plogi_tmpl.csp.ciro = 0x1;
plogi_tmpl.csp.cisc = 0x0;
plogi_tmpl.csp.altbbcred = 0x0;
- plogi_tmpl.csp.conseq = bfa_os_htons(0x00FF);
- plogi_tmpl.csp.ro_bitmap = bfa_os_htons(0x0002);
- plogi_tmpl.csp.e_d_tov = bfa_os_htonl(2000);
+ plogi_tmpl.csp.conseq = cpu_to_be16(0x00FF);
+ plogi_tmpl.csp.ro_bitmap = cpu_to_be16(0x0002);
+ plogi_tmpl.csp.e_d_tov = cpu_to_be32(2000);
plogi_tmpl.class3.class_valid = 1;
plogi_tmpl.class3.sequential = 1;
@@ -112,7 +112,7 @@ fcbuild_init(void)
*/
prli_tmpl.command = FC_ELS_PRLI;
prli_tmpl.pglen = 0x10;
- prli_tmpl.pagebytes = bfa_os_htons(0x0014);
+ prli_tmpl.pagebytes = cpu_to_be16(0x0014);
prli_tmpl.parampage.type = FC_TYPE_FCP;
prli_tmpl.parampage.imagepair = 1;
prli_tmpl.parampage.servparams.rxrdisab = 1;
@@ -137,7 +137,7 @@ fcbuild_init(void)
static void
fc_gs_fchdr_build(struct fchs_s *fchs, u32 d_id, u32 s_id, u32 ox_id)
{
- bfa_os_memset(fchs, 0, sizeof(struct fchs_s));
+ memset(fchs, 0, sizeof(struct fchs_s));
fchs->routing = FC_RTG_FC4_DEV_DATA;
fchs->cat_info = FC_CAT_UNSOLICIT_CTRL;
@@ -148,9 +148,9 @@ fc_gs_fchdr_build(struct fchs_s *fchs, u32 d_id, u32 s_id, u32 ox_id)
fchs->rx_id = FC_RXID_ANY;
fchs->d_id = (d_id);
fchs->s_id = (s_id);
- fchs->ox_id = bfa_os_htons(ox_id);
+ fchs->ox_id = cpu_to_be16(ox_id);
- /**
+ /*
* @todo no need to set ox_id for request
* no need to set rx_id for response
*/
@@ -159,16 +159,16 @@ fc_gs_fchdr_build(struct fchs_s *fchs, u32 d_id, u32 s_id, u32 ox_id)
void
fc_els_req_build(struct fchs_s *fchs, u32 d_id, u32 s_id, u16 ox_id)
{
- bfa_os_memcpy(fchs, &fc_els_req_tmpl, sizeof(struct fchs_s));
+ memcpy(fchs, &fc_els_req_tmpl, sizeof(struct fchs_s));
fchs->d_id = (d_id);
fchs->s_id = (s_id);
- fchs->ox_id = bfa_os_htons(ox_id);
+ fchs->ox_id = cpu_to_be16(ox_id);
}
static void
fc_els_rsp_build(struct fchs_s *fchs, u32 d_id, u32 s_id, u16 ox_id)
{
- bfa_os_memcpy(fchs, &fc_els_rsp_tmpl, sizeof(struct fchs_s));
+ memcpy(fchs, &fc_els_rsp_tmpl, sizeof(struct fchs_s));
fchs->d_id = d_id;
fchs->s_id = s_id;
fchs->ox_id = ox_id;
@@ -198,7 +198,7 @@ fc_els_rsp_parse(struct fchs_s *fchs, int len)
static void
fc_bls_rsp_build(struct fchs_s *fchs, u32 d_id, u32 s_id, u16 ox_id)
{
- bfa_os_memcpy(fchs, &fc_bls_rsp_tmpl, sizeof(struct fchs_s));
+ memcpy(fchs, &fc_bls_rsp_tmpl, sizeof(struct fchs_s));
fchs->d_id = d_id;
fchs->s_id = s_id;
fchs->ox_id = ox_id;
@@ -211,7 +211,7 @@ fc_plogi_x_build(struct fchs_s *fchs, void *pld, u32 d_id, u32 s_id,
{
struct fc_logi_s *plogi = (struct fc_logi_s *) (pld);
- bfa_os_memcpy(plogi, &plogi_tmpl, sizeof(struct fc_logi_s));
+ memcpy(plogi, &plogi_tmpl, sizeof(struct fc_logi_s));
plogi->els_cmd.els_code = els_code;
if (els_code == FC_ELS_PLOGI)
@@ -219,10 +219,10 @@ fc_plogi_x_build(struct fchs_s *fchs, void *pld, u32 d_id, u32 s_id,
else
fc_els_rsp_build(fchs, d_id, s_id, ox_id);
- plogi->csp.rxsz = plogi->class3.rxsz = bfa_os_htons(pdu_size);
+ plogi->csp.rxsz = plogi->class3.rxsz = cpu_to_be16(pdu_size);
- bfa_os_memcpy(&plogi->port_name, &port_name, sizeof(wwn_t));
- bfa_os_memcpy(&plogi->node_name, &node_name, sizeof(wwn_t));
+ memcpy(&plogi->port_name, &port_name, sizeof(wwn_t));
+ memcpy(&plogi->node_name, &node_name, sizeof(wwn_t));
return sizeof(struct fc_logi_s);
}
@@ -235,12 +235,12 @@ fc_flogi_build(struct fchs_s *fchs, struct fc_logi_s *flogi, u32 s_id,
u32 d_id = bfa_os_hton3b(FC_FABRIC_PORT);
u32 *vvl_info;
- bfa_os_memcpy(flogi, &plogi_tmpl, sizeof(struct fc_logi_s));
+ memcpy(flogi, &plogi_tmpl, sizeof(struct fc_logi_s));
flogi->els_cmd.els_code = FC_ELS_FLOGI;
fc_els_req_build(fchs, d_id, s_id, ox_id);
- flogi->csp.rxsz = flogi->class3.rxsz = bfa_os_htons(pdu_size);
+ flogi->csp.rxsz = flogi->class3.rxsz = cpu_to_be16(pdu_size);
flogi->port_name = port_name;
flogi->node_name = node_name;
@@ -253,14 +253,14 @@ fc_flogi_build(struct fchs_s *fchs, struct fc_logi_s *flogi, u32 s_id,
/* set AUTH capability */
flogi->csp.security = set_auth;
- flogi->csp.bbcred = bfa_os_htons(local_bb_credits);
+ flogi->csp.bbcred = cpu_to_be16(local_bb_credits);
/* Set brcd token in VVL */
vvl_info = (u32 *)&flogi->vvl[0];
/* set the flag to indicate the presence of VVL */
flogi->csp.npiv_supp = 1; /* @todo. field name is not correct */
- vvl_info[0] = bfa_os_htonl(FLOGI_VVL_BRCD);
+ vvl_info[0] = cpu_to_be32(FLOGI_VVL_BRCD);
return sizeof(struct fc_logi_s);
}
@@ -272,15 +272,15 @@ fc_flogi_acc_build(struct fchs_s *fchs, struct fc_logi_s *flogi, u32 s_id,
{
u32 d_id = 0;
- bfa_os_memcpy(flogi, &plogi_tmpl, sizeof(struct fc_logi_s));
+ memcpy(flogi, &plogi_tmpl, sizeof(struct fc_logi_s));
fc_els_rsp_build(fchs, d_id, s_id, ox_id);
flogi->els_cmd.els_code = FC_ELS_ACC;
- flogi->csp.rxsz = flogi->class3.rxsz = bfa_os_htons(pdu_size);
+ flogi->csp.rxsz = flogi->class3.rxsz = cpu_to_be16(pdu_size);
flogi->port_name = port_name;
flogi->node_name = node_name;
- flogi->csp.bbcred = bfa_os_htons(local_bb_credits);
+ flogi->csp.bbcred = cpu_to_be16(local_bb_credits);
return sizeof(struct fc_logi_s);
}
@@ -291,12 +291,12 @@ fc_fdisc_build(struct fchs_s *fchs, struct fc_logi_s *flogi, u32 s_id,
{
u32 d_id = bfa_os_hton3b(FC_FABRIC_PORT);
- bfa_os_memcpy(flogi, &plogi_tmpl, sizeof(struct fc_logi_s));
+ memcpy(flogi, &plogi_tmpl, sizeof(struct fc_logi_s));
flogi->els_cmd.els_code = FC_ELS_FDISC;
fc_els_req_build(fchs, d_id, s_id, ox_id);
- flogi->csp.rxsz = flogi->class3.rxsz = bfa_os_htons(pdu_size);
+ flogi->csp.rxsz = flogi->class3.rxsz = cpu_to_be16(pdu_size);
flogi->port_name = port_name;
flogi->node_name = node_name;
@@ -346,7 +346,7 @@ fc_plogi_rsp_parse(struct fchs_s *fchs, int len, wwn_t port_name)
if (!plogi->class3.class_valid)
return FC_PARSE_FAILURE;
- if (bfa_os_ntohs(plogi->class3.rxsz) < (FC_MIN_PDUSZ))
+ if (be16_to_cpu(plogi->class3.rxsz) < (FC_MIN_PDUSZ))
return FC_PARSE_FAILURE;
return FC_PARSE_OK;
@@ -363,8 +363,8 @@ fc_plogi_parse(struct fchs_s *fchs)
if (plogi->class3.class_valid != 1)
return FC_PARSE_FAILURE;
- if ((bfa_os_ntohs(plogi->class3.rxsz) < FC_MIN_PDUSZ)
- || (bfa_os_ntohs(plogi->class3.rxsz) > FC_MAX_PDUSZ)
+ if ((be16_to_cpu(plogi->class3.rxsz) < FC_MIN_PDUSZ)
+ || (be16_to_cpu(plogi->class3.rxsz) > FC_MAX_PDUSZ)
|| (plogi->class3.rxsz == 0))
return FC_PARSE_FAILURE;
@@ -378,7 +378,7 @@ fc_prli_build(struct fchs_s *fchs, void *pld, u32 d_id, u32 s_id,
struct fc_prli_s *prli = (struct fc_prli_s *) (pld);
fc_els_req_build(fchs, d_id, s_id, ox_id);
- bfa_os_memcpy(prli, &prli_tmpl, sizeof(struct fc_prli_s));
+ memcpy(prli, &prli_tmpl, sizeof(struct fc_prli_s));
prli->command = FC_ELS_PRLI;
prli->parampage.servparams.initiator = 1;
@@ -397,7 +397,7 @@ fc_prli_acc_build(struct fchs_s *fchs, void *pld, u32 d_id, u32 s_id,
struct fc_prli_s *prli = (struct fc_prli_s *) (pld);
fc_els_rsp_build(fchs, d_id, s_id, ox_id);
- bfa_os_memcpy(prli, &prli_tmpl, sizeof(struct fc_prli_s));
+ memcpy(prli, &prli_tmpl, sizeof(struct fc_prli_s));
prli->command = FC_ELS_ACC;
@@ -448,7 +448,7 @@ fc_logo_build(struct fchs_s *fchs, struct fc_logo_s *logo, u32 d_id, u32 s_id,
{
fc_els_req_build(fchs, d_id, s_id, ox_id);
- bfa_os_memset(logo, '\0', sizeof(struct fc_logo_s));
+ memset(logo, '\0', sizeof(struct fc_logo_s));
logo->els_cmd.els_code = FC_ELS_LOGO;
logo->nport_id = (s_id);
logo->orig_port_name = port_name;
@@ -461,7 +461,7 @@ fc_adisc_x_build(struct fchs_s *fchs, struct fc_adisc_s *adisc, u32 d_id,
u32 s_id, u16 ox_id, wwn_t port_name,
wwn_t node_name, u8 els_code)
{
- bfa_os_memset(adisc, '\0', sizeof(struct fc_adisc_s));
+ memset(adisc, '\0', sizeof(struct fc_adisc_s));
adisc->els_cmd.els_code = els_code;
@@ -537,7 +537,7 @@ fc_pdisc_parse(struct fchs_s *fchs, wwn_t node_name, wwn_t port_name)
if (pdisc->class3.class_valid != 1)
return FC_PARSE_FAILURE;
- if ((bfa_os_ntohs(pdisc->class3.rxsz) <
+ if ((be16_to_cpu(pdisc->class3.rxsz) <
(FC_MIN_PDUSZ - sizeof(struct fchs_s)))
|| (pdisc->class3.rxsz == 0))
return FC_PARSE_FAILURE;
@@ -554,11 +554,11 @@ fc_pdisc_parse(struct fchs_s *fchs, wwn_t node_name, wwn_t port_name)
u16
fc_abts_build(struct fchs_s *fchs, u32 d_id, u32 s_id, u16 ox_id)
{
- bfa_os_memcpy(fchs, &fc_bls_req_tmpl, sizeof(struct fchs_s));
+ memcpy(fchs, &fc_bls_req_tmpl, sizeof(struct fchs_s));
fchs->cat_info = FC_CAT_ABTS;
fchs->d_id = (d_id);
fchs->s_id = (s_id);
- fchs->ox_id = bfa_os_htons(ox_id);
+ fchs->ox_id = cpu_to_be16(ox_id);
return sizeof(struct fchs_s);
}
@@ -582,9 +582,9 @@ fc_rrq_build(struct fchs_s *fchs, struct fc_rrq_s *rrq, u32 d_id, u32 s_id,
/*
* build rrq payload
*/
- bfa_os_memcpy(rrq, &rrq_tmpl, sizeof(struct fc_rrq_s));
+ memcpy(rrq, &rrq_tmpl, sizeof(struct fc_rrq_s));
rrq->s_id = (s_id);
- rrq->ox_id = bfa_os_htons(rrq_oxid);
+ rrq->ox_id = cpu_to_be16(rrq_oxid);
rrq->rx_id = FC_RXID_ANY;
return sizeof(struct fc_rrq_s);
@@ -598,7 +598,7 @@ fc_logo_acc_build(struct fchs_s *fchs, void *pld, u32 d_id, u32 s_id,
fc_els_rsp_build(fchs, d_id, s_id, ox_id);
- bfa_os_memset(acc, 0, sizeof(struct fc_els_cmd_s));
+ memset(acc, 0, sizeof(struct fc_els_cmd_s));
acc->els_code = FC_ELS_ACC;
return sizeof(struct fc_els_cmd_s);
@@ -610,7 +610,7 @@ fc_ls_rjt_build(struct fchs_s *fchs, struct fc_ls_rjt_s *ls_rjt, u32 d_id,
u8 reason_code_expl)
{
fc_els_rsp_build(fchs, d_id, s_id, ox_id);
- bfa_os_memset(ls_rjt, 0, sizeof(struct fc_ls_rjt_s));
+ memset(ls_rjt, 0, sizeof(struct fc_ls_rjt_s));
ls_rjt->els_cmd.els_code = FC_ELS_LS_RJT;
ls_rjt->reason_code = reason_code;
@@ -626,7 +626,7 @@ fc_ba_acc_build(struct fchs_s *fchs, struct fc_ba_acc_s *ba_acc, u32 d_id,
{
fc_bls_rsp_build(fchs, d_id, s_id, ox_id);
- bfa_os_memcpy(ba_acc, &ba_acc_tmpl, sizeof(struct fc_ba_acc_s));
+ memcpy(ba_acc, &ba_acc_tmpl, sizeof(struct fc_ba_acc_s));
fchs->rx_id = rx_id;
@@ -641,7 +641,7 @@ fc_ls_acc_build(struct fchs_s *fchs, struct fc_els_cmd_s *els_cmd, u32 d_id,
u32 s_id, u16 ox_id)
{
fc_els_rsp_build(fchs, d_id, s_id, ox_id);
- bfa_os_memset(els_cmd, 0, sizeof(struct fc_els_cmd_s));
+ memset(els_cmd, 0, sizeof(struct fc_els_cmd_s));
els_cmd->els_code = FC_ELS_ACC;
return sizeof(struct fc_els_cmd_s);
@@ -656,10 +656,10 @@ fc_logout_params_pages(struct fchs_s *fc_frame, u8 els_code)
if (els_code == FC_ELS_PRLO) {
prlo = (struct fc_prlo_s *) (fc_frame + 1);
- num_pages = (bfa_os_ntohs(prlo->payload_len) - 4) / 16;
+ num_pages = (be16_to_cpu(prlo->payload_len) - 4) / 16;
} else {
tprlo = (struct fc_tprlo_s *) (fc_frame + 1);
- num_pages = (bfa_os_ntohs(tprlo->payload_len) - 4) / 16;
+ num_pages = (be16_to_cpu(tprlo->payload_len) - 4) / 16;
}
return num_pages;
}
@@ -672,11 +672,11 @@ fc_tprlo_acc_build(struct fchs_s *fchs, struct fc_tprlo_acc_s *tprlo_acc,
fc_els_rsp_build(fchs, d_id, s_id, ox_id);
- bfa_os_memset(tprlo_acc, 0, (num_pages * 16) + 4);
+ memset(tprlo_acc, 0, (num_pages * 16) + 4);
tprlo_acc->command = FC_ELS_ACC;
tprlo_acc->page_len = 0x10;
- tprlo_acc->payload_len = bfa_os_htons((num_pages * 16) + 4);
+ tprlo_acc->payload_len = cpu_to_be16((num_pages * 16) + 4);
for (page = 0; page < num_pages; page++) {
tprlo_acc->tprlo_acc_params[page].opa_valid = 0;
@@ -685,7 +685,7 @@ fc_tprlo_acc_build(struct fchs_s *fchs, struct fc_tprlo_acc_s *tprlo_acc,
tprlo_acc->tprlo_acc_params[page].orig_process_assc = 0;
tprlo_acc->tprlo_acc_params[page].resp_process_assc = 0;
}
- return bfa_os_ntohs(tprlo_acc->payload_len);
+ return be16_to_cpu(tprlo_acc->payload_len);
}
u16
@@ -696,10 +696,10 @@ fc_prlo_acc_build(struct fchs_s *fchs, struct fc_prlo_acc_s *prlo_acc, u32 d_id,
fc_els_rsp_build(fchs, d_id, s_id, ox_id);
- bfa_os_memset(prlo_acc, 0, (num_pages * 16) + 4);
+ memset(prlo_acc, 0, (num_pages * 16) + 4);
prlo_acc->command = FC_ELS_ACC;
prlo_acc->page_len = 0x10;
- prlo_acc->payload_len = bfa_os_htons((num_pages * 16) + 4);
+ prlo_acc->payload_len = cpu_to_be16((num_pages * 16) + 4);
for (page = 0; page < num_pages; page++) {
prlo_acc->prlo_acc_params[page].opa_valid = 0;
@@ -709,7 +709,7 @@ fc_prlo_acc_build(struct fchs_s *fchs, struct fc_prlo_acc_s *prlo_acc, u32 d_id,
prlo_acc->prlo_acc_params[page].resp_process_assc = 0;
}
- return bfa_os_ntohs(prlo_acc->payload_len);
+ return be16_to_cpu(prlo_acc->payload_len);
}
u16
@@ -718,7 +718,7 @@ fc_rnid_build(struct fchs_s *fchs, struct fc_rnid_cmd_s *rnid, u32 d_id,
{
fc_els_req_build(fchs, d_id, s_id, ox_id);
- bfa_os_memset(rnid, 0, sizeof(struct fc_rnid_cmd_s));
+ memset(rnid, 0, sizeof(struct fc_rnid_cmd_s));
rnid->els_cmd.els_code = FC_ELS_RNID;
rnid->node_id_data_format = data_format;
@@ -732,7 +732,7 @@ fc_rnid_acc_build(struct fchs_s *fchs, struct fc_rnid_acc_s *rnid_acc, u32 d_id,
struct fc_rnid_common_id_data_s *common_id_data,
struct fc_rnid_general_topology_data_s *gen_topo_data)
{
- bfa_os_memset(rnid_acc, 0, sizeof(struct fc_rnid_acc_s));
+ memset(rnid_acc, 0, sizeof(struct fc_rnid_acc_s));
fc_els_rsp_build(fchs, d_id, s_id, ox_id);
@@ -745,7 +745,7 @@ fc_rnid_acc_build(struct fchs_s *fchs, struct fc_rnid_acc_s *rnid_acc, u32 d_id,
if (data_format == RNID_NODEID_DATA_FORMAT_DISCOVERY) {
rnid_acc->specific_id_data_length =
sizeof(struct fc_rnid_general_topology_data_s);
- bfa_os_assign(rnid_acc->gen_topology_data, *gen_topo_data);
+ rnid_acc->gen_topology_data = *gen_topo_data;
return sizeof(struct fc_rnid_acc_s);
} else {
return sizeof(struct fc_rnid_acc_s) -
@@ -760,7 +760,7 @@ fc_rpsc_build(struct fchs_s *fchs, struct fc_rpsc_cmd_s *rpsc, u32 d_id,
{
fc_els_req_build(fchs, d_id, s_id, ox_id);
- bfa_os_memset(rpsc, 0, sizeof(struct fc_rpsc_cmd_s));
+ memset(rpsc, 0, sizeof(struct fc_rpsc_cmd_s));
rpsc->els_cmd.els_code = FC_ELS_RPSC;
return sizeof(struct fc_rpsc_cmd_s);
@@ -775,11 +775,11 @@ fc_rpsc2_build(struct fchs_s *fchs, struct fc_rpsc2_cmd_s *rpsc2, u32 d_id,
fc_els_req_build(fchs, bfa_os_hton3b(dctlr_id), s_id, 0);
- bfa_os_memset(rpsc2, 0, sizeof(struct fc_rpsc2_cmd_s));
+ memset(rpsc2, 0, sizeof(struct fc_rpsc2_cmd_s));
rpsc2->els_cmd.els_code = FC_ELS_RPSC;
- rpsc2->token = bfa_os_htonl(FC_BRCD_TOKEN);
- rpsc2->num_pids = bfa_os_htons(npids);
+ rpsc2->token = cpu_to_be32(FC_BRCD_TOKEN);
+ rpsc2->num_pids = cpu_to_be16(npids);
for (i = 0; i < npids; i++)
rpsc2->pid_list[i].pid = pid_list[i];
@@ -791,18 +791,18 @@ fc_rpsc_acc_build(struct fchs_s *fchs, struct fc_rpsc_acc_s *rpsc_acc,
u32 d_id, u32 s_id, u16 ox_id,
struct fc_rpsc_speed_info_s *oper_speed)
{
- bfa_os_memset(rpsc_acc, 0, sizeof(struct fc_rpsc_acc_s));
+ memset(rpsc_acc, 0, sizeof(struct fc_rpsc_acc_s));
fc_els_rsp_build(fchs, d_id, s_id, ox_id);
rpsc_acc->command = FC_ELS_ACC;
- rpsc_acc->num_entries = bfa_os_htons(1);
+ rpsc_acc->num_entries = cpu_to_be16(1);
rpsc_acc->speed_info[0].port_speed_cap =
- bfa_os_htons(oper_speed->port_speed_cap);
+ cpu_to_be16(oper_speed->port_speed_cap);
rpsc_acc->speed_info[0].port_op_speed =
- bfa_os_htons(oper_speed->port_op_speed);
+ cpu_to_be16(oper_speed->port_op_speed);
return sizeof(struct fc_rpsc_acc_s);
}
@@ -830,12 +830,12 @@ fc_pdisc_build(struct fchs_s *fchs, u32 d_id, u32 s_id, u16 ox_id,
{
struct fc_logi_s *pdisc = (struct fc_logi_s *) (fchs + 1);
- bfa_os_memcpy(pdisc, &plogi_tmpl, sizeof(struct fc_logi_s));
+ memcpy(pdisc, &plogi_tmpl, sizeof(struct fc_logi_s));
pdisc->els_cmd.els_code = FC_ELS_PDISC;
fc_els_req_build(fchs, d_id, s_id, ox_id);
- pdisc->csp.rxsz = pdisc->class3.rxsz = bfa_os_htons(pdu_size);
+ pdisc->csp.rxsz = pdisc->class3.rxsz = cpu_to_be16(pdu_size);
pdisc->port_name = port_name;
pdisc->node_name = node_name;
@@ -859,7 +859,7 @@ fc_pdisc_rsp_parse(struct fchs_s *fchs, int len, wwn_t port_name)
if (!pdisc->class3.class_valid)
return FC_PARSE_NWWN_NOT_EQUAL;
- if (bfa_os_ntohs(pdisc->class3.rxsz) < (FC_MIN_PDUSZ))
+ if (be16_to_cpu(pdisc->class3.rxsz) < (FC_MIN_PDUSZ))
return FC_PARSE_RXSZ_INVAL;
return FC_PARSE_OK;
@@ -873,10 +873,10 @@ fc_prlo_build(struct fchs_s *fchs, u32 d_id, u32 s_id, u16 ox_id,
int page;
fc_els_req_build(fchs, d_id, s_id, ox_id);
- bfa_os_memset(prlo, 0, (num_pages * 16) + 4);
+ memset(prlo, 0, (num_pages * 16) + 4);
prlo->command = FC_ELS_PRLO;
prlo->page_len = 0x10;
- prlo->payload_len = bfa_os_htons((num_pages * 16) + 4);
+ prlo->payload_len = cpu_to_be16((num_pages * 16) + 4);
for (page = 0; page < num_pages; page++) {
prlo->prlo_params[page].type = FC_TYPE_FCP;
@@ -886,7 +886,7 @@ fc_prlo_build(struct fchs_s *fchs, u32 d_id, u32 s_id, u16 ox_id,
prlo->prlo_params[page].resp_process_assc = 0;
}
- return bfa_os_ntohs(prlo->payload_len);
+ return be16_to_cpu(prlo->payload_len);
}
u16
@@ -901,7 +901,7 @@ fc_prlo_rsp_parse(struct fchs_s *fchs, int len)
if (prlo->command != FC_ELS_ACC)
return FC_PARSE_FAILURE;
- num_pages = ((bfa_os_ntohs(prlo->payload_len)) - 4) / 16;
+ num_pages = ((be16_to_cpu(prlo->payload_len)) - 4) / 16;
for (page = 0; page < num_pages; page++) {
if (prlo->prlo_acc_params[page].type != FC_TYPE_FCP)
@@ -931,10 +931,10 @@ fc_tprlo_build(struct fchs_s *fchs, u32 d_id, u32 s_id, u16 ox_id,
int page;
fc_els_req_build(fchs, d_id, s_id, ox_id);
- bfa_os_memset(tprlo, 0, (num_pages * 16) + 4);
+ memset(tprlo, 0, (num_pages * 16) + 4);
tprlo->command = FC_ELS_TPRLO;
tprlo->page_len = 0x10;
- tprlo->payload_len = bfa_os_htons((num_pages * 16) + 4);
+ tprlo->payload_len = cpu_to_be16((num_pages * 16) + 4);
for (page = 0; page < num_pages; page++) {
tprlo->tprlo_params[page].type = FC_TYPE_FCP;
@@ -950,7 +950,7 @@ fc_tprlo_build(struct fchs_s *fchs, u32 d_id, u32 s_id, u16 ox_id,
}
}
- return bfa_os_ntohs(tprlo->payload_len);
+ return be16_to_cpu(tprlo->payload_len);
}
u16
@@ -965,7 +965,7 @@ fc_tprlo_rsp_parse(struct fchs_s *fchs, int len)
if (tprlo->command != FC_ELS_ACC)
return FC_PARSE_ACC_INVAL;
- num_pages = (bfa_os_ntohs(tprlo->payload_len) - 4) / 16;
+ num_pages = (be16_to_cpu(tprlo->payload_len) - 4) / 16;
for (page = 0; page < num_pages; page++) {
if (tprlo->tprlo_acc_params[page].type != FC_TYPE_FCP)
@@ -1011,32 +1011,32 @@ fc_ba_rjt_build(struct fchs_s *fchs, u32 d_id, u32 s_id, u16 ox_id,
static void
fc_gs_cthdr_build(struct ct_hdr_s *cthdr, u32 s_id, u16 cmd_code)
{
- bfa_os_memset(cthdr, 0, sizeof(struct ct_hdr_s));
+ memset(cthdr, 0, sizeof(struct ct_hdr_s));
cthdr->rev_id = CT_GS3_REVISION;
cthdr->gs_type = CT_GSTYPE_DIRSERVICE;
cthdr->gs_sub_type = CT_GSSUBTYPE_NAMESERVER;
- cthdr->cmd_rsp_code = bfa_os_htons(cmd_code);
+ cthdr->cmd_rsp_code = cpu_to_be16(cmd_code);
}
static void
fc_gs_fdmi_cthdr_build(struct ct_hdr_s *cthdr, u32 s_id, u16 cmd_code)
{
- bfa_os_memset(cthdr, 0, sizeof(struct ct_hdr_s));
+ memset(cthdr, 0, sizeof(struct ct_hdr_s));
cthdr->rev_id = CT_GS3_REVISION;
cthdr->gs_type = CT_GSTYPE_MGMTSERVICE;
cthdr->gs_sub_type = CT_GSSUBTYPE_HBA_MGMTSERVER;
- cthdr->cmd_rsp_code = bfa_os_htons(cmd_code);
+ cthdr->cmd_rsp_code = cpu_to_be16(cmd_code);
}
static void
fc_gs_ms_cthdr_build(struct ct_hdr_s *cthdr, u32 s_id, u16 cmd_code,
u8 sub_type)
{
- bfa_os_memset(cthdr, 0, sizeof(struct ct_hdr_s));
+ memset(cthdr, 0, sizeof(struct ct_hdr_s));
cthdr->rev_id = CT_GS3_REVISION;
cthdr->gs_type = CT_GSTYPE_MGMTSERVICE;
cthdr->gs_sub_type = sub_type;
- cthdr->cmd_rsp_code = bfa_os_htons(cmd_code);
+ cthdr->cmd_rsp_code = cpu_to_be16(cmd_code);
}
u16
@@ -1050,7 +1050,7 @@ fc_gidpn_build(struct fchs_s *fchs, void *pyld, u32 s_id, u16 ox_id,
fc_gs_fchdr_build(fchs, d_id, s_id, ox_id);
fc_gs_cthdr_build(cthdr, s_id, GS_GID_PN);
- bfa_os_memset(gidpn, 0, sizeof(struct fcgs_gidpn_req_s));
+ memset(gidpn, 0, sizeof(struct fcgs_gidpn_req_s));
gidpn->port_name = port_name;
return sizeof(struct fcgs_gidpn_req_s) + sizeof(struct ct_hdr_s);
}
@@ -1066,7 +1066,7 @@ fc_gpnid_build(struct fchs_s *fchs, void *pyld, u32 s_id, u16 ox_id,
fc_gs_fchdr_build(fchs, d_id, s_id, ox_id);
fc_gs_cthdr_build(cthdr, s_id, GS_GPN_ID);
- bfa_os_memset(gpnid, 0, sizeof(fcgs_gpnid_req_t));
+ memset(gpnid, 0, sizeof(fcgs_gpnid_req_t));
gpnid->dap = port_id;
return sizeof(fcgs_gpnid_req_t) + sizeof(struct ct_hdr_s);
}
@@ -1082,7 +1082,7 @@ fc_gnnid_build(struct fchs_s *fchs, void *pyld, u32 s_id, u16 ox_id,
fc_gs_fchdr_build(fchs, d_id, s_id, ox_id);
fc_gs_cthdr_build(cthdr, s_id, GS_GNN_ID);
- bfa_os_memset(gnnid, 0, sizeof(fcgs_gnnid_req_t));
+ memset(gnnid, 0, sizeof(fcgs_gnnid_req_t));
gnnid->dap = port_id;
return sizeof(fcgs_gnnid_req_t) + sizeof(struct ct_hdr_s);
}
@@ -1090,7 +1090,7 @@ fc_gnnid_build(struct fchs_s *fchs, void *pyld, u32 s_id, u16 ox_id,
u16
fc_ct_rsp_parse(struct ct_hdr_s *cthdr)
{
- if (bfa_os_ntohs(cthdr->cmd_rsp_code) != CT_RSP_ACCEPT) {
+ if (be16_to_cpu(cthdr->cmd_rsp_code) != CT_RSP_ACCEPT) {
if (cthdr->reason_code == CT_RSN_LOGICAL_BUSY)
return FC_PARSE_BUSY;
else
@@ -1108,7 +1108,7 @@ fc_scr_build(struct fchs_s *fchs, struct fc_scr_s *scr,
fc_els_req_build(fchs, d_id, s_id, ox_id);
- bfa_os_memset(scr, 0, sizeof(struct fc_scr_s));
+ memset(scr, 0, sizeof(struct fc_scr_s));
scr->command = FC_ELS_SCR;
scr->reg_func = FC_SCR_REG_FUNC_FULL;
if (set_br_reg)
@@ -1129,7 +1129,7 @@ fc_rscn_build(struct fchs_s *fchs, struct fc_rscn_pl_s *rscn,
rscn->pagelen = sizeof(rscn->event[0]);
payldlen = sizeof(u32) + rscn->pagelen;
- rscn->payldlen = bfa_os_htons(payldlen);
+ rscn->payldlen = cpu_to_be16(payldlen);
rscn->event[0].format = FC_RSCN_FORMAT_PORTID;
rscn->event[0].portid = s_id;
@@ -1149,14 +1149,14 @@ fc_rftid_build(struct fchs_s *fchs, void *pyld, u32 s_id, u16 ox_id,
fc_gs_fchdr_build(fchs, d_id, s_id, ox_id);
fc_gs_cthdr_build(cthdr, s_id, GS_RFT_ID);
- bfa_os_memset(rftid, 0, sizeof(struct fcgs_rftid_req_s));
+ memset(rftid, 0, sizeof(struct fcgs_rftid_req_s));
rftid->dap = s_id;
/* By default, FCP FC4 Type is registered */
index = FC_TYPE_FCP >> 5;
type_value = 1 << (FC_TYPE_FCP % 32);
- rftid->fc4_type[index] = bfa_os_htonl(type_value);
+ rftid->fc4_type[index] = cpu_to_be32(type_value);
return sizeof(struct fcgs_rftid_req_s) + sizeof(struct ct_hdr_s);
}
@@ -1172,10 +1172,10 @@ fc_rftid_build_sol(struct fchs_s *fchs, void *pyld, u32 s_id, u16 ox_id,
fc_gs_fchdr_build(fchs, d_id, s_id, ox_id);
fc_gs_cthdr_build(cthdr, s_id, GS_RFT_ID);
- bfa_os_memset(rftid, 0, sizeof(struct fcgs_rftid_req_s));
+ memset(rftid, 0, sizeof(struct fcgs_rftid_req_s));
rftid->dap = s_id;
- bfa_os_memcpy((void *)rftid->fc4_type, (void *)fc4_bitmap,
+ memcpy((void *)rftid->fc4_type, (void *)fc4_bitmap,
(bitmap_size < 32 ? bitmap_size : 32));
return sizeof(struct fcgs_rftid_req_s) + sizeof(struct ct_hdr_s);
@@ -1192,7 +1192,7 @@ fc_rffid_build(struct fchs_s *fchs, void *pyld, u32 s_id, u16 ox_id,
fc_gs_fchdr_build(fchs, d_id, s_id, ox_id);
fc_gs_cthdr_build(cthdr, s_id, GS_RFF_ID);
- bfa_os_memset(rffid, 0, sizeof(struct fcgs_rffid_req_s));
+ memset(rffid, 0, sizeof(struct fcgs_rffid_req_s));
rffid->dap = s_id;
rffid->fc4ftr_bits = fc4_ftrs;
@@ -1214,7 +1214,7 @@ fc_rspnid_build(struct fchs_s *fchs, void *pyld, u32 s_id, u16 ox_id,
fc_gs_fchdr_build(fchs, d_id, s_id, ox_id);
fc_gs_cthdr_build(cthdr, s_id, GS_RSPN_ID);
- bfa_os_memset(rspnid, 0, sizeof(struct fcgs_rspnid_req_s));
+ memset(rspnid, 0, sizeof(struct fcgs_rspnid_req_s));
rspnid->dap = s_id;
rspnid->spn_len = (u8) strlen((char *)name);
@@ -1235,7 +1235,7 @@ fc_gid_ft_build(struct fchs_s *fchs, void *pyld, u32 s_id, u8 fc4_type)
fc_gs_cthdr_build(cthdr, s_id, GS_GID_FT);
- bfa_os_memset(gidft, 0, sizeof(struct fcgs_gidft_req_s));
+ memset(gidft, 0, sizeof(struct fcgs_gidft_req_s));
gidft->fc4_type = fc4_type;
gidft->domain_id = 0;
gidft->area_id = 0;
@@ -1254,7 +1254,7 @@ fc_rpnid_build(struct fchs_s *fchs, void *pyld, u32 s_id, u32 port_id,
fc_gs_fchdr_build(fchs, d_id, s_id, 0);
fc_gs_cthdr_build(cthdr, s_id, GS_RPN_ID);
- bfa_os_memset(rpnid, 0, sizeof(struct fcgs_rpnid_req_s));
+ memset(rpnid, 0, sizeof(struct fcgs_rpnid_req_s));
rpnid->port_id = port_id;
rpnid->port_name = port_name;
@@ -1272,7 +1272,7 @@ fc_rnnid_build(struct fchs_s *fchs, void *pyld, u32 s_id, u32 port_id,
fc_gs_fchdr_build(fchs, d_id, s_id, 0);
fc_gs_cthdr_build(cthdr, s_id, GS_RNN_ID);
- bfa_os_memset(rnnid, 0, sizeof(struct fcgs_rnnid_req_s));
+ memset(rnnid, 0, sizeof(struct fcgs_rnnid_req_s));
rnnid->port_id = port_id;
rnnid->node_name = node_name;
@@ -1291,7 +1291,7 @@ fc_rcsid_build(struct fchs_s *fchs, void *pyld, u32 s_id, u32 port_id,
fc_gs_fchdr_build(fchs, d_id, s_id, 0);
fc_gs_cthdr_build(cthdr, s_id, GS_RCS_ID);
- bfa_os_memset(rcsid, 0, sizeof(struct fcgs_rcsid_req_s));
+ memset(rcsid, 0, sizeof(struct fcgs_rcsid_req_s));
rcsid->port_id = port_id;
rcsid->cos = cos;
@@ -1309,7 +1309,7 @@ fc_rptid_build(struct fchs_s *fchs, void *pyld, u32 s_id, u32 port_id,
fc_gs_fchdr_build(fchs, d_id, s_id, 0);
fc_gs_cthdr_build(cthdr, s_id, GS_RPT_ID);
- bfa_os_memset(rptid, 0, sizeof(struct fcgs_rptid_req_s));
+ memset(rptid, 0, sizeof(struct fcgs_rptid_req_s));
rptid->port_id = port_id;
rptid->port_type = port_type;
@@ -1326,7 +1326,7 @@ fc_ganxt_build(struct fchs_s *fchs, void *pyld, u32 s_id, u32 port_id)
fc_gs_fchdr_build(fchs, d_id, s_id, 0);
fc_gs_cthdr_build(cthdr, s_id, GS_GA_NXT);
- bfa_os_memset(ganxt, 0, sizeof(struct fcgs_ganxt_req_s));
+ memset(ganxt, 0, sizeof(struct fcgs_ganxt_req_s));
ganxt->port_id = port_id;
return sizeof(struct ct_hdr_s) + sizeof(struct fcgs_ganxt_req_s);
@@ -1365,7 +1365,7 @@ fc_get_fc4type_bitmask(u8 fc4_type, u8 *bit_mask)
index = fc4_type >> 5;
type_value = 1 << (fc4_type % 32);
- ptr[index] = bfa_os_htonl(type_value);
+ ptr[index] = cpu_to_be32(type_value);
}
@@ -1383,7 +1383,7 @@ fc_gmal_req_build(struct fchs_s *fchs, void *pyld, u32 s_id, wwn_t wwn)
fc_gs_ms_cthdr_build(cthdr, s_id, GS_FC_GMAL_CMD,
CT_GSSUBTYPE_CFGSERVER);
- bfa_os_memset(gmal, 0, sizeof(fcgs_gmal_req_t));
+ memset(gmal, 0, sizeof(fcgs_gmal_req_t));
gmal->wwn = wwn;
return sizeof(struct ct_hdr_s) + sizeof(fcgs_gmal_req_t);
@@ -1403,7 +1403,7 @@ fc_gfn_req_build(struct fchs_s *fchs, void *pyld, u32 s_id, wwn_t wwn)
fc_gs_ms_cthdr_build(cthdr, s_id, GS_FC_GFN_CMD,
CT_GSSUBTYPE_CFGSERVER);
- bfa_os_memset(gfn, 0, sizeof(fcgs_gfn_req_t));
+ memset(gfn, 0, sizeof(fcgs_gfn_req_t));
gfn->wwn = wwn;
return sizeof(struct ct_hdr_s) + sizeof(fcgs_gfn_req_t);
diff --git a/drivers/scsi/bfa/bfa_fcpim.c b/drivers/scsi/bfa/bfa_fcpim.c
index 33c8dd51f474..135c4427801c 100644
--- a/drivers/scsi/bfa/bfa_fcpim.c
+++ b/drivers/scsi/bfa/bfa_fcpim.c
@@ -26,7 +26,7 @@ BFA_MODULE(fcpim);
(__l->__stats += __r->__stats)
-/**
+/*
* BFA ITNIM Related definitions
*/
static void bfa_itnim_update_del_itn_stats(struct bfa_itnim_s *itnim);
@@ -72,7 +72,7 @@ static void bfa_itnim_update_del_itn_stats(struct bfa_itnim_s *itnim);
} \
} while (0)
-/**
+/*
* bfa_itnim_sm BFA itnim state machine
*/
@@ -89,7 +89,7 @@ enum bfa_itnim_event {
BFA_ITNIM_SM_QRESUME = 9, /* queue space available */
};
-/**
+/*
* BFA IOIM related definitions
*/
#define bfa_ioim_move_to_comp_q(__ioim) do { \
@@ -107,11 +107,11 @@ enum bfa_itnim_event {
if ((__fcpim)->profile_start) \
(__fcpim)->profile_start(__ioim); \
} while (0)
-/**
+/*
* hal_ioim_sm
*/
-/**
+/*
* IO state machine events
*/
enum bfa_ioim_event {
@@ -136,11 +136,11 @@ enum bfa_ioim_event {
};
-/**
+/*
* BFA TSKIM related definitions
*/
-/**
+/*
* task management completion handling
*/
#define bfa_tskim_qcomp(__tskim, __cbfn) do { \
@@ -165,7 +165,7 @@ enum bfa_tskim_event {
BFA_TSKIM_SM_CLEANUP_DONE = 9, /* TM abort completion */
};
-/**
+/*
* forward declaration for BFA ITNIM functions
*/
static void bfa_itnim_iocdisable_cleanup(struct bfa_itnim_s *itnim);
@@ -183,7 +183,7 @@ static void bfa_itnim_iotov_start(struct bfa_itnim_s *itnim);
static void bfa_itnim_iotov_stop(struct bfa_itnim_s *itnim);
static void bfa_itnim_iotov_delete(struct bfa_itnim_s *itnim);
-/**
+/*
* forward declaration of ITNIM state machine
*/
static void bfa_itnim_sm_uninit(struct bfa_itnim_s *itnim,
@@ -217,7 +217,7 @@ static void bfa_itnim_sm_fwdelete_qfull(struct bfa_itnim_s *itnim,
static void bfa_itnim_sm_deleting_qfull(struct bfa_itnim_s *itnim,
enum bfa_itnim_event event);
-/**
+/*
* forward declaration for BFA IOIM functions
*/
static bfa_boolean_t bfa_ioim_send_ioreq(struct bfa_ioim_s *ioim);
@@ -233,7 +233,7 @@ static void __bfa_cb_ioim_pathtov(void *cbarg, bfa_boolean_t complete);
static bfa_boolean_t bfa_ioim_is_abortable(struct bfa_ioim_s *ioim);
-/**
+/*
* forward declaration of BFA IO state machine
*/
static void bfa_ioim_sm_uninit(struct bfa_ioim_s *ioim,
@@ -261,7 +261,7 @@ static void bfa_ioim_sm_resfree(struct bfa_ioim_s *ioim,
static void bfa_ioim_sm_cmnd_retry(struct bfa_ioim_s *ioim,
enum bfa_ioim_event event);
-/**
+/*
* forward declaration for BFA TSKIM functions
*/
static void __bfa_cb_tskim_done(void *cbarg, bfa_boolean_t complete);
@@ -276,7 +276,7 @@ static bfa_boolean_t bfa_tskim_send_abort(struct bfa_tskim_s *tskim);
static void bfa_tskim_iocdisable_ios(struct bfa_tskim_s *tskim);
-/**
+/*
* forward declaration of BFA TSKIM state machine
*/
static void bfa_tskim_sm_uninit(struct bfa_tskim_s *tskim,
@@ -294,11 +294,11 @@ static void bfa_tskim_sm_cleanup_qfull(struct bfa_tskim_s *tskim,
static void bfa_tskim_sm_hcb(struct bfa_tskim_s *tskim,
enum bfa_tskim_event event);
-/**
+/*
* hal_fcpim_mod BFA FCP Initiator Mode module
*/
-/**
+/*
* Compute and return memory needed by FCP(im) module.
*/
static void
@@ -307,7 +307,7 @@ bfa_fcpim_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *km_len,
{
bfa_itnim_meminfo(cfg, km_len, dm_len);
- /**
+ /*
* IO memory
*/
if (cfg->fwcfg.num_ioim_reqs < BFA_IOIM_MIN)
@@ -320,7 +320,7 @@ bfa_fcpim_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *km_len,
*dm_len += cfg->fwcfg.num_ioim_reqs * BFI_IOIM_SNSLEN;
- /**
+ /*
* task management command memory
*/
if (cfg->fwcfg.num_tskim_reqs < BFA_TSKIM_MIN)
@@ -463,7 +463,7 @@ bfa_fcpim_port_iostats(struct bfa_s *bfa, struct bfa_itnim_iostats_s *stats,
struct bfa_itnim_s *itnim;
/* accumulate IO stats from itnim */
- bfa_os_memset(stats, 0, sizeof(struct bfa_itnim_iostats_s));
+ memset(stats, 0, sizeof(struct bfa_itnim_iostats_s));
list_for_each_safe(qe, qen, &fcpim->itnim_q) {
itnim = (struct bfa_itnim_s *) qe;
if (itnim->rport->rport_info.lp_tag != lp_tag)
@@ -480,7 +480,7 @@ bfa_fcpim_get_modstats(struct bfa_s *bfa, struct bfa_itnim_iostats_s *modstats)
struct bfa_itnim_s *itnim;
/* accumulate IO stats from itnim */
- bfa_os_memset(modstats, 0, sizeof(struct bfa_itnim_iostats_s));
+ memset(modstats, 0, sizeof(struct bfa_itnim_iostats_s));
list_for_each_safe(qe, qen, &fcpim->itnim_q) {
itnim = (struct bfa_itnim_s *) qe;
bfa_fcpim_add_stats(modstats, &(itnim->stats));
@@ -560,7 +560,7 @@ bfa_fcpim_clr_modstats(struct bfa_s *bfa)
itnim = (struct bfa_itnim_s *) qe;
bfa_itnim_clear_stats(itnim);
}
- bfa_os_memset(&fcpim->del_itn_stats, 0,
+ memset(&fcpim->del_itn_stats, 0,
sizeof(struct bfa_fcpim_del_itn_stats_s));
return BFA_STATUS_OK;
@@ -604,11 +604,11 @@ bfa_fcpim_set_ioredirect(struct bfa_s *bfa, bfa_boolean_t state)
-/**
+/*
* BFA ITNIM module state machine functions
*/
-/**
+/*
* Beginning/unallocated state - no events expected.
*/
static void
@@ -629,7 +629,7 @@ bfa_itnim_sm_uninit(struct bfa_itnim_s *itnim, enum bfa_itnim_event event)
}
}
-/**
+/*
* Beginning state, only online event expected.
*/
static void
@@ -660,7 +660,7 @@ bfa_itnim_sm_created(struct bfa_itnim_s *itnim, enum bfa_itnim_event event)
}
}
-/**
+/*
* Waiting for itnim create response from firmware.
*/
static void
@@ -732,7 +732,7 @@ bfa_itnim_sm_fwcreate_qfull(struct bfa_itnim_s *itnim,
}
}
-/**
+/*
* Waiting for itnim create response from firmware, a delete is pending.
*/
static void
@@ -760,7 +760,7 @@ bfa_itnim_sm_delete_pending(struct bfa_itnim_s *itnim,
}
}
-/**
+/*
* Online state - normal parking state.
*/
static void
@@ -802,7 +802,7 @@ bfa_itnim_sm_online(struct bfa_itnim_s *itnim, enum bfa_itnim_event event)
}
}
-/**
+/*
* Second level error recovery need.
*/
static void
@@ -833,7 +833,7 @@ bfa_itnim_sm_sler(struct bfa_itnim_s *itnim, enum bfa_itnim_event event)
}
}
-/**
+/*
* Going offline. Waiting for active IO cleanup.
*/
static void
@@ -870,7 +870,7 @@ bfa_itnim_sm_cleanup_offline(struct bfa_itnim_s *itnim,
}
}
-/**
+/*
* Deleting itnim. Waiting for active IO cleanup.
*/
static void
@@ -898,7 +898,7 @@ bfa_itnim_sm_cleanup_delete(struct bfa_itnim_s *itnim,
}
}
-/**
+/*
* Rport offline. Fimrware itnim is being deleted - awaiting f/w response.
*/
static void
@@ -955,7 +955,7 @@ bfa_itnim_sm_fwdelete_qfull(struct bfa_itnim_s *itnim,
}
}
-/**
+/*
* Offline state.
*/
static void
@@ -987,7 +987,7 @@ bfa_itnim_sm_offline(struct bfa_itnim_s *itnim, enum bfa_itnim_event event)
}
}
-/**
+/*
* IOC h/w failed state.
*/
static void
@@ -1023,7 +1023,7 @@ bfa_itnim_sm_iocdisable(struct bfa_itnim_s *itnim,
}
}
-/**
+/*
* Itnim is deleted, waiting for firmware response to delete.
*/
static void
@@ -1068,7 +1068,7 @@ bfa_itnim_sm_deleting_qfull(struct bfa_itnim_s *itnim,
}
}
-/**
+/*
* Initiate cleanup of all IOs on an IOC failure.
*/
static void
@@ -1088,7 +1088,7 @@ bfa_itnim_iocdisable_cleanup(struct bfa_itnim_s *itnim)
bfa_ioim_iocdisable(ioim);
}
- /**
+ /*
* For IO request in pending queue, we pretend an early timeout.
*/
list_for_each_safe(qe, qen, &itnim->pending_q) {
@@ -1102,7 +1102,7 @@ bfa_itnim_iocdisable_cleanup(struct bfa_itnim_s *itnim)
}
}
-/**
+/*
* IO cleanup completion
*/
static void
@@ -1114,7 +1114,7 @@ bfa_itnim_cleanp_comp(void *itnim_cbarg)
bfa_sm_send_event(itnim, BFA_ITNIM_SM_CLEANUP);
}
-/**
+/*
* Initiate cleanup of all IOs.
*/
static void
@@ -1129,7 +1129,7 @@ bfa_itnim_cleanup(struct bfa_itnim_s *itnim)
list_for_each_safe(qe, qen, &itnim->io_q) {
ioim = (struct bfa_ioim_s *) qe;
- /**
+ /*
* Move IO to a cleanup queue from active queue so that a later
* TM will not pickup this IO.
*/
@@ -1176,7 +1176,7 @@ __bfa_cb_itnim_sler(void *cbarg, bfa_boolean_t complete)
bfa_cb_itnim_sler(itnim->ditn);
}
-/**
+/*
* Call to resume any I/O requests waiting for room in request queue.
*/
static void
@@ -1190,7 +1190,7 @@ bfa_itnim_qresume(void *cbarg)
-/**
+/*
* bfa_itnim_public
*/
@@ -1210,7 +1210,7 @@ void
bfa_itnim_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *km_len,
u32 *dm_len)
{
- /**
+ /*
* ITN memory
*/
*km_len += cfg->fwcfg.num_rports * sizeof(struct bfa_itnim_s);
@@ -1229,7 +1229,7 @@ bfa_itnim_attach(struct bfa_fcpim_mod_s *fcpim, struct bfa_meminfo_s *minfo)
fcpim->itnim_arr = itnim;
for (i = 0; i < fcpim->num_itnims; i++, itnim++) {
- bfa_os_memset(itnim, 0, sizeof(struct bfa_itnim_s));
+ memset(itnim, 0, sizeof(struct bfa_itnim_s));
itnim->bfa = bfa;
itnim->fcpim = fcpim;
itnim->reqq = BFA_REQQ_QOS_LO;
@@ -1264,7 +1264,7 @@ bfa_itnim_send_fwcreate(struct bfa_itnim_s *itnim)
itnim->msg_no++;
- /**
+ /*
* check for room in queue to send request now
*/
m = bfa_reqq_next(itnim->bfa, itnim->reqq);
@@ -1281,7 +1281,7 @@ bfa_itnim_send_fwcreate(struct bfa_itnim_s *itnim)
m->msg_no = itnim->msg_no;
bfa_stats(itnim, fw_create);
- /**
+ /*
* queue I/O message to firmware
*/
bfa_reqq_produce(itnim->bfa, itnim->reqq);
@@ -1293,7 +1293,7 @@ bfa_itnim_send_fwdelete(struct bfa_itnim_s *itnim)
{
struct bfi_itnim_delete_req_s *m;
- /**
+ /*
* check for room in queue to send request now
*/
m = bfa_reqq_next(itnim->bfa, itnim->reqq);
@@ -1307,14 +1307,14 @@ bfa_itnim_send_fwdelete(struct bfa_itnim_s *itnim)
m->fw_handle = itnim->rport->fw_handle;
bfa_stats(itnim, fw_delete);
- /**
+ /*
* queue I/O message to firmware
*/
bfa_reqq_produce(itnim->bfa, itnim->reqq);
return BFA_TRUE;
}
-/**
+/*
* Cleanup all pending failed inflight requests.
*/
static void
@@ -1329,7 +1329,7 @@ bfa_itnim_delayed_comp(struct bfa_itnim_s *itnim, bfa_boolean_t iotov)
}
}
-/**
+/*
* Start all pending IO requests.
*/
static void
@@ -1339,12 +1339,12 @@ bfa_itnim_iotov_online(struct bfa_itnim_s *itnim)
bfa_itnim_iotov_stop(itnim);
- /**
+ /*
* Abort all inflight IO requests in the queue
*/
bfa_itnim_delayed_comp(itnim, BFA_FALSE);
- /**
+ /*
* Start all pending IO requests.
*/
while (!list_empty(&itnim->pending_q)) {
@@ -1354,7 +1354,7 @@ bfa_itnim_iotov_online(struct bfa_itnim_s *itnim)
}
}
-/**
+/*
* Fail all pending IO requests
*/
static void
@@ -1362,12 +1362,12 @@ bfa_itnim_iotov_cleanup(struct bfa_itnim_s *itnim)
{
struct bfa_ioim_s *ioim;
- /**
+ /*
* Fail all inflight IO requests in the queue
*/
bfa_itnim_delayed_comp(itnim, BFA_TRUE);
- /**
+ /*
* Fail any pending IO requests.
*/
while (!list_empty(&itnim->pending_q)) {
@@ -1377,7 +1377,7 @@ bfa_itnim_iotov_cleanup(struct bfa_itnim_s *itnim)
}
}
-/**
+/*
* IO TOV timer callback. Fail any pending IO requests.
*/
static void
@@ -1392,7 +1392,7 @@ bfa_itnim_iotov(void *itnim_arg)
bfa_cb_itnim_tov(itnim->ditn);
}
-/**
+/*
* Start IO TOV timer for failing back pending IO requests in offline state.
*/
static void
@@ -1407,7 +1407,7 @@ bfa_itnim_iotov_start(struct bfa_itnim_s *itnim)
}
}
-/**
+/*
* Stop IO TOV timer.
*/
static void
@@ -1419,7 +1419,7 @@ bfa_itnim_iotov_stop(struct bfa_itnim_s *itnim)
}
}
-/**
+/*
* Stop IO TOV timer.
*/
static void
@@ -1459,11 +1459,11 @@ bfa_itnim_update_del_itn_stats(struct bfa_itnim_s *itnim)
-/**
+/*
* bfa_itnim_public
*/
-/**
+/*
* Itnim interrupt processing.
*/
void
@@ -1509,7 +1509,7 @@ bfa_itnim_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
-/**
+/*
* bfa_itnim_api
*/
@@ -1552,7 +1552,7 @@ bfa_itnim_offline(struct bfa_itnim_s *itnim)
bfa_sm_send_event(itnim, BFA_ITNIM_SM_OFFLINE);
}
-/**
+/*
* Return true if itnim is considered offline for holding off IO request.
* IO is not held if itnim is being deleted.
*/
@@ -1597,17 +1597,17 @@ void
bfa_itnim_clear_stats(struct bfa_itnim_s *itnim)
{
int j;
- bfa_os_memset(&itnim->stats, 0, sizeof(itnim->stats));
- bfa_os_memset(&itnim->ioprofile, 0, sizeof(itnim->ioprofile));
+ memset(&itnim->stats, 0, sizeof(itnim->stats));
+ memset(&itnim->ioprofile, 0, sizeof(itnim->ioprofile));
for (j = 0; j < BFA_IOBUCKET_MAX; j++)
itnim->ioprofile.io_latency.min[j] = ~0;
}
-/**
+/*
* BFA IO module state machine functions
*/
-/**
+/*
* IO is not started (unallocated).
*/
static void
@@ -1657,7 +1657,7 @@ bfa_ioim_sm_uninit(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
break;
case BFA_IOIM_SM_ABORT:
- /**
+ /*
* IO in pending queue can get abort requests. Complete abort
* requests immediately.
*/
@@ -1672,7 +1672,7 @@ bfa_ioim_sm_uninit(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
}
}
-/**
+/*
* IO is waiting for SG pages.
*/
static void
@@ -1719,7 +1719,7 @@ bfa_ioim_sm_sgalloc(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
}
}
-/**
+/*
* IO is active.
*/
static void
@@ -1803,7 +1803,7 @@ bfa_ioim_sm_active(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
}
}
-/**
+/*
* IO is retried with new tag.
*/
static void
@@ -1844,7 +1844,7 @@ bfa_ioim_sm_cmnd_retry(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
break;
case BFA_IOIM_SM_ABORT:
- /** in this state IO abort is done.
+ /* in this state IO abort is done.
* Waiting for IO tag resource free.
*/
bfa_sm_set_state(ioim, bfa_ioim_sm_hcb_free);
@@ -1857,7 +1857,7 @@ bfa_ioim_sm_cmnd_retry(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
}
}
-/**
+/*
* IO is being aborted, waiting for completion from firmware.
*/
static void
@@ -1919,7 +1919,7 @@ bfa_ioim_sm_abort(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
}
}
-/**
+/*
* IO is being cleaned up (implicit abort), waiting for completion from
* firmware.
*/
@@ -1937,7 +1937,7 @@ bfa_ioim_sm_cleanup(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
break;
case BFA_IOIM_SM_ABORT:
- /**
+ /*
* IO is already being aborted implicitly
*/
ioim->io_cbfn = __bfa_cb_ioim_abort;
@@ -1969,7 +1969,7 @@ bfa_ioim_sm_cleanup(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
break;
case BFA_IOIM_SM_CLEANUP:
- /**
+ /*
* IO can be in cleanup state already due to TM command.
* 2nd cleanup request comes from ITN offline event.
*/
@@ -1980,7 +1980,7 @@ bfa_ioim_sm_cleanup(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
}
}
-/**
+/*
* IO is waiting for room in request CQ
*/
static void
@@ -2024,7 +2024,7 @@ bfa_ioim_sm_qfull(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
}
}
-/**
+/*
* Active IO is being aborted, waiting for room in request CQ.
*/
static void
@@ -2075,7 +2075,7 @@ bfa_ioim_sm_abort_qfull(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
}
}
-/**
+/*
* Active IO is being cleaned up, waiting for room in request CQ.
*/
static void
@@ -2091,7 +2091,7 @@ bfa_ioim_sm_cleanup_qfull(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
break;
case BFA_IOIM_SM_ABORT:
- /**
+ /*
* IO is alraedy being cleaned up implicitly
*/
ioim->io_cbfn = __bfa_cb_ioim_abort;
@@ -2125,7 +2125,7 @@ bfa_ioim_sm_cleanup_qfull(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
}
}
-/**
+/*
* IO bfa callback is pending.
*/
static void
@@ -2152,7 +2152,7 @@ bfa_ioim_sm_hcb(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
}
}
-/**
+/*
* IO bfa callback is pending. IO resource cannot be freed.
*/
static void
@@ -2185,7 +2185,7 @@ bfa_ioim_sm_hcb_free(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
}
}
-/**
+/*
* IO is completed, waiting resource free from firmware.
*/
static void
@@ -2214,7 +2214,7 @@ bfa_ioim_sm_resfree(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
-/**
+/*
* hal_ioim_private
*/
@@ -2247,7 +2247,7 @@ __bfa_cb_ioim_comp(void *cbarg, bfa_boolean_t complete)
m = (struct bfi_ioim_rsp_s *) &ioim->iosp->comp_rspmsg;
if (m->io_status == BFI_IOIM_STS_OK) {
- /**
+ /*
* setup sense information, if present
*/
if ((m->scsi_status == SCSI_STATUS_CHECK_CONDITION) &&
@@ -2256,15 +2256,15 @@ __bfa_cb_ioim_comp(void *cbarg, bfa_boolean_t complete)
snsinfo = ioim->iosp->snsinfo;
}
- /**
+ /*
* setup residue value correctly for normal completions
*/
if (m->resid_flags == FCP_RESID_UNDER) {
- residue = bfa_os_ntohl(m->residue);
+ residue = be32_to_cpu(m->residue);
bfa_stats(ioim->itnim, iocomp_underrun);
}
if (m->resid_flags == FCP_RESID_OVER) {
- residue = bfa_os_ntohl(m->residue);
+ residue = be32_to_cpu(m->residue);
residue = -residue;
bfa_stats(ioim->itnim, iocomp_overrun);
}
@@ -2327,7 +2327,7 @@ bfa_ioim_sgpg_alloced(void *cbarg)
bfa_sm_send_event(ioim, BFA_IOIM_SM_SGALLOCED);
}
-/**
+/*
* Send I/O request to firmware.
*/
static bfa_boolean_t
@@ -2343,7 +2343,7 @@ bfa_ioim_send_ioreq(struct bfa_ioim_s *ioim)
struct scatterlist *sg;
struct scsi_cmnd *cmnd = (struct scsi_cmnd *) ioim->dio;
- /**
+ /*
* check for room in queue to send request now
*/
m = bfa_reqq_next(ioim->bfa, ioim->reqq);
@@ -2354,14 +2354,14 @@ bfa_ioim_send_ioreq(struct bfa_ioim_s *ioim)
return BFA_FALSE;
}
- /**
+ /*
* build i/o request message next
*/
- m->io_tag = bfa_os_htons(ioim->iotag);
+ m->io_tag = cpu_to_be16(ioim->iotag);
m->rport_hdl = ioim->itnim->rport->fw_handle;
m->io_timeout = bfa_cb_ioim_get_timeout(ioim->dio);
- /**
+ /*
* build inline IO SG element here
*/
sge = &m->sges[0];
@@ -2387,18 +2387,17 @@ bfa_ioim_send_ioreq(struct bfa_ioim_s *ioim)
sge->flags = BFI_SGE_PGDLEN;
bfa_sge_to_be(sge);
- /**
+ /*
* set up I/O command parameters
*/
- bfa_os_assign(m->cmnd, cmnd_z0);
+ m->cmnd = cmnd_z0;
m->cmnd.lun = bfa_cb_ioim_get_lun(ioim->dio);
m->cmnd.iodir = bfa_cb_ioim_get_iodir(ioim->dio);
- bfa_os_assign(m->cmnd.cdb,
- *(scsi_cdb_t *)bfa_cb_ioim_get_cdb(ioim->dio));
+ m->cmnd.cdb = *(scsi_cdb_t *)bfa_cb_ioim_get_cdb(ioim->dio);
fcp_dl = bfa_cb_ioim_get_size(ioim->dio);
- m->cmnd.fcp_dl = bfa_os_htonl(fcp_dl);
+ m->cmnd.fcp_dl = cpu_to_be32(fcp_dl);
- /**
+ /*
* set up I/O message header
*/
switch (m->cmnd.iodir) {
@@ -2427,28 +2426,28 @@ bfa_ioim_send_ioreq(struct bfa_ioim_s *ioim)
m->cmnd.priority = bfa_cb_ioim_get_priority(ioim->dio);
m->cmnd.taskattr = bfa_cb_ioim_get_taskattr(ioim->dio);
- /**
+ /*
* Handle large CDB (>16 bytes).
*/
m->cmnd.addl_cdb_len = (bfa_cb_ioim_get_cdblen(ioim->dio) -
FCP_CMND_CDB_LEN) / sizeof(u32);
if (m->cmnd.addl_cdb_len) {
- bfa_os_memcpy(&m->cmnd.cdb + 1, (scsi_cdb_t *)
+ memcpy(&m->cmnd.cdb + 1, (scsi_cdb_t *)
bfa_cb_ioim_get_cdb(ioim->dio) + 1,
m->cmnd.addl_cdb_len * sizeof(u32));
fcp_cmnd_fcpdl(&m->cmnd) =
- bfa_os_htonl(bfa_cb_ioim_get_size(ioim->dio));
+ cpu_to_be32(bfa_cb_ioim_get_size(ioim->dio));
}
#endif
- /**
+ /*
* queue I/O message to firmware
*/
bfa_reqq_produce(ioim->bfa, ioim->reqq);
return BFA_TRUE;
}
-/**
+/*
* Setup any additional SG pages needed.Inline SG element is setup
* at queuing time.
*/
@@ -2459,7 +2458,7 @@ bfa_ioim_sge_setup(struct bfa_ioim_s *ioim)
bfa_assert(ioim->nsges > BFI_SGE_INLINE);
- /**
+ /*
* allocate SG pages needed
*/
nsgpgs = BFA_SGPG_NPAGE(ioim->nsges);
@@ -2508,7 +2507,7 @@ bfa_ioim_sgpg_setup(struct bfa_ioim_s *ioim)
sge->sg_len = sg_dma_len(sg);
pgcumsz += sge->sg_len;
- /**
+ /*
* set flags
*/
if (i < (nsges - 1))
@@ -2523,7 +2522,7 @@ bfa_ioim_sgpg_setup(struct bfa_ioim_s *ioim)
sgpg = (struct bfa_sgpg_s *) bfa_q_next(sgpg);
- /**
+ /*
* set the link element of each page
*/
if (sgeid == ioim->nsges) {
@@ -2540,7 +2539,7 @@ bfa_ioim_sgpg_setup(struct bfa_ioim_s *ioim)
} while (sgeid < ioim->nsges);
}
-/**
+/*
* Send I/O abort request to firmware.
*/
static bfa_boolean_t
@@ -2549,14 +2548,14 @@ bfa_ioim_send_abort(struct bfa_ioim_s *ioim)
struct bfi_ioim_abort_req_s *m;
enum bfi_ioim_h2i msgop;
- /**
+ /*
* check for room in queue to send request now
*/
m = bfa_reqq_next(ioim->bfa, ioim->reqq);
if (!m)
return BFA_FALSE;
- /**
+ /*
* build i/o request message next
*/
if (ioim->iosp->abort_explicit)
@@ -2565,17 +2564,17 @@ bfa_ioim_send_abort(struct bfa_ioim_s *ioim)
msgop = BFI_IOIM_H2I_IOCLEANUP_REQ;
bfi_h2i_set(m->mh, BFI_MC_IOIM, msgop, bfa_lpuid(ioim->bfa));
- m->io_tag = bfa_os_htons(ioim->iotag);
+ m->io_tag = cpu_to_be16(ioim->iotag);
m->abort_tag = ++ioim->abort_tag;
- /**
+ /*
* queue I/O message to firmware
*/
bfa_reqq_produce(ioim->bfa, ioim->reqq);
return BFA_TRUE;
}
-/**
+/*
* Call to resume any I/O requests waiting for room in request queue.
*/
static void
@@ -2591,7 +2590,7 @@ bfa_ioim_qresume(void *cbarg)
static void
bfa_ioim_notify_cleanup(struct bfa_ioim_s *ioim)
{
- /**
+ /*
* Move IO from itnim queue to fcpim global queue since itnim will be
* freed.
*/
@@ -2624,13 +2623,13 @@ bfa_ioim_is_abortable(struct bfa_ioim_s *ioim)
return BFA_TRUE;
}
-/**
+/*
* or after the link comes back.
*/
void
bfa_ioim_delayed_comp(struct bfa_ioim_s *ioim, bfa_boolean_t iotov)
{
- /**
+ /*
* If path tov timer expired, failback with PATHTOV status - these
* IO requests are not normally retried by IO stack.
*
@@ -2645,7 +2644,7 @@ bfa_ioim_delayed_comp(struct bfa_ioim_s *ioim, bfa_boolean_t iotov)
}
bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, ioim->io_cbfn, ioim);
- /**
+ /*
* Move IO to fcpim global queue since itnim will be
* freed.
*/
@@ -2655,11 +2654,11 @@ bfa_ioim_delayed_comp(struct bfa_ioim_s *ioim, bfa_boolean_t iotov)
-/**
+/*
* hal_ioim_friend
*/
-/**
+/*
* Memory allocation and initialization.
*/
void
@@ -2671,7 +2670,7 @@ bfa_ioim_attach(struct bfa_fcpim_mod_s *fcpim, struct bfa_meminfo_s *minfo)
u8 *snsinfo;
u32 snsbufsz;
- /**
+ /*
* claim memory first
*/
ioim = (struct bfa_ioim_s *) bfa_meminfo_kva(minfo);
@@ -2682,7 +2681,7 @@ bfa_ioim_attach(struct bfa_fcpim_mod_s *fcpim, struct bfa_meminfo_s *minfo)
fcpim->ioim_sp_arr = iosp;
bfa_meminfo_kva(minfo) = (u8 *) (iosp + fcpim->num_ioim_reqs);
- /**
+ /*
* Claim DMA memory for per IO sense data.
*/
snsbufsz = fcpim->num_ioim_reqs * BFI_IOIM_SNSLEN;
@@ -2694,7 +2693,7 @@ bfa_ioim_attach(struct bfa_fcpim_mod_s *fcpim, struct bfa_meminfo_s *minfo)
snsinfo = fcpim->snsbase.kva;
bfa_iocfc_set_snsbase(fcpim->bfa, fcpim->snsbase.pa);
- /**
+ /*
* Initialize ioim free queues
*/
INIT_LIST_HEAD(&fcpim->ioim_free_q);
@@ -2706,7 +2705,7 @@ bfa_ioim_attach(struct bfa_fcpim_mod_s *fcpim, struct bfa_meminfo_s *minfo)
/*
* initialize IOIM
*/
- bfa_os_memset(ioim, 0, sizeof(struct bfa_ioim_s));
+ memset(ioim, 0, sizeof(struct bfa_ioim_s));
ioim->iotag = i;
ioim->bfa = fcpim->bfa;
ioim->fcpim = fcpim;
@@ -2723,7 +2722,7 @@ bfa_ioim_attach(struct bfa_fcpim_mod_s *fcpim, struct bfa_meminfo_s *minfo)
}
}
-/**
+/*
* Driver detach time call.
*/
void
@@ -2740,7 +2739,7 @@ bfa_ioim_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
u16 iotag;
enum bfa_ioim_event evt = BFA_IOIM_SM_COMP;
- iotag = bfa_os_ntohs(rsp->io_tag);
+ iotag = be16_to_cpu(rsp->io_tag);
ioim = BFA_IOIM_FROM_TAG(fcpim, iotag);
bfa_assert(ioim->iotag == iotag);
@@ -2750,7 +2749,7 @@ bfa_ioim_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
bfa_trc(ioim->bfa, rsp->reuse_io_tag);
if (bfa_sm_cmp_state(ioim, bfa_ioim_sm_active))
- bfa_os_assign(ioim->iosp->comp_rspmsg, *m);
+ ioim->iosp->comp_rspmsg = *m;
switch (rsp->io_status) {
case BFI_IOIM_STS_OK:
@@ -2823,7 +2822,7 @@ bfa_ioim_good_comp_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
struct bfa_ioim_s *ioim;
u16 iotag;
- iotag = bfa_os_ntohs(rsp->io_tag);
+ iotag = be16_to_cpu(rsp->io_tag);
ioim = BFA_IOIM_FROM_TAG(fcpim, iotag);
bfa_assert(ioim->iotag == iotag);
@@ -2837,7 +2836,7 @@ bfa_ioim_good_comp_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
void
bfa_ioim_profile_start(struct bfa_ioim_s *ioim)
{
- ioim->start_time = bfa_os_get_clock();
+ ioim->start_time = jiffies;
}
void
@@ -2845,7 +2844,7 @@ bfa_ioim_profile_comp(struct bfa_ioim_s *ioim)
{
u32 fcp_dl = bfa_cb_ioim_get_size(ioim->dio);
u32 index = bfa_ioim_get_index(fcp_dl);
- u64 end_time = bfa_os_get_clock();
+ u64 end_time = jiffies;
struct bfa_itnim_latency_s *io_lat =
&(ioim->itnim->ioprofile.io_latency);
u32 val = (u32)(end_time - ioim->start_time);
@@ -2859,7 +2858,7 @@ bfa_ioim_profile_comp(struct bfa_ioim_s *ioim)
io_lat->max[index] : val;
io_lat->avg[index] += val;
}
-/**
+/*
* Called by itnim to clean up IO while going offline.
*/
void
@@ -2882,7 +2881,7 @@ bfa_ioim_cleanup_tm(struct bfa_ioim_s *ioim, struct bfa_tskim_s *tskim)
bfa_sm_send_event(ioim, BFA_IOIM_SM_CLEANUP);
}
-/**
+/*
* IOC failure handling.
*/
void
@@ -2893,7 +2892,7 @@ bfa_ioim_iocdisable(struct bfa_ioim_s *ioim)
bfa_sm_send_event(ioim, BFA_IOIM_SM_HWFAIL);
}
-/**
+/*
* IO offline TOV popped. Fail the pending IO.
*/
void
@@ -2905,11 +2904,11 @@ bfa_ioim_tov(struct bfa_ioim_s *ioim)
-/**
+/*
* hal_ioim_api
*/
-/**
+/*
* Allocate IOIM resource for initiator mode I/O request.
*/
struct bfa_ioim_s *
@@ -2919,7 +2918,7 @@ bfa_ioim_alloc(struct bfa_s *bfa, struct bfad_ioim_s *dio,
struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa);
struct bfa_ioim_s *ioim;
- /**
+ /*
* alocate IOIM resource
*/
bfa_q_deq(&fcpim->ioim_free_q, &ioim);
@@ -2970,7 +2969,7 @@ bfa_ioim_start(struct bfa_ioim_s *ioim)
bfa_ioim_cb_profile_start(ioim->fcpim, ioim);
- /**
+ /*
* Obtain the queue over which this request has to be issued
*/
ioim->reqq = bfa_fcpim_ioredirect_enabled(ioim->bfa) ?
@@ -2980,7 +2979,7 @@ bfa_ioim_start(struct bfa_ioim_s *ioim)
bfa_sm_send_event(ioim, BFA_IOIM_SM_START);
}
-/**
+/*
* Driver I/O abort request.
*/
bfa_status_t
@@ -2999,11 +2998,11 @@ bfa_ioim_abort(struct bfa_ioim_s *ioim)
}
-/**
+/*
* BFA TSKIM state machine functions
*/
-/**
+/*
* Task management command beginning state.
*/
static void
@@ -3016,7 +3015,7 @@ bfa_tskim_sm_uninit(struct bfa_tskim_s *tskim, enum bfa_tskim_event event)
bfa_sm_set_state(tskim, bfa_tskim_sm_active);
bfa_tskim_gather_ios(tskim);
- /**
+ /*
* If device is offline, do not send TM on wire. Just cleanup
* any pending IO requests and complete TM request.
*/
@@ -3040,7 +3039,7 @@ bfa_tskim_sm_uninit(struct bfa_tskim_s *tskim, enum bfa_tskim_event event)
}
}
-/**
+/*
* brief
* TM command is active, awaiting completion from firmware to
* cleanup IO requests in TM scope.
@@ -3077,7 +3076,7 @@ bfa_tskim_sm_active(struct bfa_tskim_s *tskim, enum bfa_tskim_event event)
}
}
-/**
+/*
* An active TM is being cleaned up since ITN is offline. Awaiting cleanup
* completion event from firmware.
*/
@@ -3088,7 +3087,7 @@ bfa_tskim_sm_cleanup(struct bfa_tskim_s *tskim, enum bfa_tskim_event event)
switch (event) {
case BFA_TSKIM_SM_DONE:
- /**
+ /*
* Ignore and wait for ABORT completion from firmware.
*/
break;
@@ -3121,7 +3120,7 @@ bfa_tskim_sm_iocleanup(struct bfa_tskim_s *tskim, enum bfa_tskim_event event)
break;
case BFA_TSKIM_SM_CLEANUP:
- /**
+ /*
* Ignore, TM command completed on wire.
* Notify TM conmpletion on IO cleanup completion.
*/
@@ -3138,7 +3137,7 @@ bfa_tskim_sm_iocleanup(struct bfa_tskim_s *tskim, enum bfa_tskim_event event)
}
}
-/**
+/*
* Task management command is waiting for room in request CQ
*/
static void
@@ -3153,7 +3152,7 @@ bfa_tskim_sm_qfull(struct bfa_tskim_s *tskim, enum bfa_tskim_event event)
break;
case BFA_TSKIM_SM_CLEANUP:
- /**
+ /*
* No need to send TM on wire since ITN is offline.
*/
bfa_sm_set_state(tskim, bfa_tskim_sm_iocleanup);
@@ -3173,7 +3172,7 @@ bfa_tskim_sm_qfull(struct bfa_tskim_s *tskim, enum bfa_tskim_event event)
}
}
-/**
+/*
* Task management command is active, awaiting for room in request CQ
* to send clean up request.
*/
@@ -3186,7 +3185,7 @@ bfa_tskim_sm_cleanup_qfull(struct bfa_tskim_s *tskim,
switch (event) {
case BFA_TSKIM_SM_DONE:
bfa_reqq_wcancel(&tskim->reqq_wait);
- /**
+ /*
*
* Fall through !!!
*/
@@ -3208,7 +3207,7 @@ bfa_tskim_sm_cleanup_qfull(struct bfa_tskim_s *tskim,
}
}
-/**
+/*
* BFA callback is pending
*/
static void
@@ -3236,7 +3235,7 @@ bfa_tskim_sm_hcb(struct bfa_tskim_s *tskim, enum bfa_tskim_event event)
-/**
+/*
* hal_tskim_private
*/
@@ -3289,7 +3288,7 @@ bfa_tskim_match_scope(struct bfa_tskim_s *tskim, lun_t lun)
return BFA_FALSE;
}
-/**
+/*
* Gather affected IO requests and task management commands.
*/
static void
@@ -3301,7 +3300,7 @@ bfa_tskim_gather_ios(struct bfa_tskim_s *tskim)
INIT_LIST_HEAD(&tskim->io_q);
- /**
+ /*
* Gather any active IO requests first.
*/
list_for_each_safe(qe, qen, &itnim->io_q) {
@@ -3313,7 +3312,7 @@ bfa_tskim_gather_ios(struct bfa_tskim_s *tskim)
}
}
- /**
+ /*
* Failback any pending IO requests immediately.
*/
list_for_each_safe(qe, qen, &itnim->pending_q) {
@@ -3327,7 +3326,7 @@ bfa_tskim_gather_ios(struct bfa_tskim_s *tskim)
}
}
-/**
+/*
* IO cleanup completion
*/
static void
@@ -3339,7 +3338,7 @@ bfa_tskim_cleanp_comp(void *tskim_cbarg)
bfa_sm_send_event(tskim, BFA_TSKIM_SM_IOS_DONE);
}
-/**
+/*
* Gather affected IO requests and task management commands.
*/
static void
@@ -3359,7 +3358,7 @@ bfa_tskim_cleanup_ios(struct bfa_tskim_s *tskim)
bfa_wc_wait(&tskim->wc);
}
-/**
+/*
* Send task management request to firmware.
*/
static bfa_boolean_t
@@ -3368,33 +3367,33 @@ bfa_tskim_send(struct bfa_tskim_s *tskim)
struct bfa_itnim_s *itnim = tskim->itnim;
struct bfi_tskim_req_s *m;
- /**
+ /*
* check for room in queue to send request now
*/
m = bfa_reqq_next(tskim->bfa, itnim->reqq);
if (!m)
return BFA_FALSE;
- /**
+ /*
* build i/o request message next
*/
bfi_h2i_set(m->mh, BFI_MC_TSKIM, BFI_TSKIM_H2I_TM_REQ,
bfa_lpuid(tskim->bfa));
- m->tsk_tag = bfa_os_htons(tskim->tsk_tag);
+ m->tsk_tag = cpu_to_be16(tskim->tsk_tag);
m->itn_fhdl = tskim->itnim->rport->fw_handle;
m->t_secs = tskim->tsecs;
m->lun = tskim->lun;
m->tm_flags = tskim->tm_cmnd;
- /**
+ /*
* queue I/O message to firmware
*/
bfa_reqq_produce(tskim->bfa, itnim->reqq);
return BFA_TRUE;
}
-/**
+/*
* Send abort request to cleanup an active TM to firmware.
*/
static bfa_boolean_t
@@ -3403,29 +3402,29 @@ bfa_tskim_send_abort(struct bfa_tskim_s *tskim)
struct bfa_itnim_s *itnim = tskim->itnim;
struct bfi_tskim_abortreq_s *m;
- /**
+ /*
* check for room in queue to send request now
*/
m = bfa_reqq_next(tskim->bfa, itnim->reqq);
if (!m)
return BFA_FALSE;
- /**
+ /*
* build i/o request message next
*/
bfi_h2i_set(m->mh, BFI_MC_TSKIM, BFI_TSKIM_H2I_ABORT_REQ,
bfa_lpuid(tskim->bfa));
- m->tsk_tag = bfa_os_htons(tskim->tsk_tag);
+ m->tsk_tag = cpu_to_be16(tskim->tsk_tag);
- /**
+ /*
* queue I/O message to firmware
*/
bfa_reqq_produce(tskim->bfa, itnim->reqq);
return BFA_TRUE;
}
-/**
+/*
* Call to resume task management cmnd waiting for room in request queue.
*/
static void
@@ -3437,7 +3436,7 @@ bfa_tskim_qresume(void *cbarg)
bfa_sm_send_event(tskim, BFA_TSKIM_SM_QRESUME);
}
-/**
+/*
* Cleanup IOs associated with a task mangement command on IOC failures.
*/
static void
@@ -3454,11 +3453,11 @@ bfa_tskim_iocdisable_ios(struct bfa_tskim_s *tskim)
-/**
+/*
* hal_tskim_friend
*/
-/**
+/*
* Notification on completions from related ioim.
*/
void
@@ -3467,7 +3466,7 @@ bfa_tskim_iodone(struct bfa_tskim_s *tskim)
bfa_wc_down(&tskim->wc);
}
-/**
+/*
* Handle IOC h/w failure notification from itnim.
*/
void
@@ -3478,7 +3477,7 @@ bfa_tskim_iocdisable(struct bfa_tskim_s *tskim)
bfa_sm_send_event(tskim, BFA_TSKIM_SM_HWFAIL);
}
-/**
+/*
* Cleanup TM command and associated IOs as part of ITNIM offline.
*/
void
@@ -3489,7 +3488,7 @@ bfa_tskim_cleanup(struct bfa_tskim_s *tskim)
bfa_sm_send_event(tskim, BFA_TSKIM_SM_CLEANUP);
}
-/**
+/*
* Memory allocation and initialization.
*/
void
@@ -3507,7 +3506,7 @@ bfa_tskim_attach(struct bfa_fcpim_mod_s *fcpim, struct bfa_meminfo_s *minfo)
/*
* initialize TSKIM
*/
- bfa_os_memset(tskim, 0, sizeof(struct bfa_tskim_s));
+ memset(tskim, 0, sizeof(struct bfa_tskim_s));
tskim->tsk_tag = i;
tskim->bfa = fcpim->bfa;
tskim->fcpim = fcpim;
@@ -3525,7 +3524,7 @@ bfa_tskim_attach(struct bfa_fcpim_mod_s *fcpim, struct bfa_meminfo_s *minfo)
void
bfa_tskim_detach(struct bfa_fcpim_mod_s *fcpim)
{
- /**
+ /*
* @todo
*/
}
@@ -3536,14 +3535,14 @@ bfa_tskim_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa);
struct bfi_tskim_rsp_s *rsp = (struct bfi_tskim_rsp_s *) m;
struct bfa_tskim_s *tskim;
- u16 tsk_tag = bfa_os_ntohs(rsp->tsk_tag);
+ u16 tsk_tag = be16_to_cpu(rsp->tsk_tag);
tskim = BFA_TSKIM_FROM_TAG(fcpim, tsk_tag);
bfa_assert(tskim->tsk_tag == tsk_tag);
tskim->tsk_status = rsp->tsk_status;
- /**
+ /*
* Firmware sends BFI_TSKIM_STS_ABORTED status for abort
* requests. All other statuses are for normal completions.
*/
@@ -3558,7 +3557,7 @@ bfa_tskim_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
-/**
+/*
* hal_tskim_api
*/
@@ -3585,7 +3584,7 @@ bfa_tskim_free(struct bfa_tskim_s *tskim)
list_add_tail(&tskim->qe, &tskim->fcpim->tskim_free_q);
}
-/**
+/*
* Start a task management command.
*
* @param[in] tskim BFA task management command instance
diff --git a/drivers/scsi/bfa/bfa_fcpim.h b/drivers/scsi/bfa/bfa_fcpim.h
index 3bf343160aac..db53717eeb4b 100644
--- a/drivers/scsi/bfa/bfa_fcpim.h
+++ b/drivers/scsi/bfa/bfa_fcpim.h
@@ -104,7 +104,7 @@ struct bfa_fcpim_mod_s {
bfa_fcpim_profile_t profile_start;
};
-/**
+/*
* BFA IO (initiator mode)
*/
struct bfa_ioim_s {
@@ -137,7 +137,7 @@ struct bfa_ioim_sp_s {
struct bfa_tskim_s *tskim; /* Relevant TM cmd */
};
-/**
+/*
* BFA Task management command (initiator mode)
*/
struct bfa_tskim_s {
@@ -160,7 +160,7 @@ struct bfa_tskim_s {
};
-/**
+/*
* BFA i-t-n (initiator mode)
*/
struct bfa_itnim_s {
@@ -303,7 +303,7 @@ bfa_status_t bfa_itnim_get_ioprofile(struct bfa_itnim_s *itnim,
struct bfa_itnim_ioprofile_s *ioprofile);
#define bfa_itnim_get_reqq(__ioim) (((struct bfa_ioim_s *)__ioim)->itnim->reqq)
-/**
+/*
* BFA completion callback for bfa_itnim_online().
*
* @param[in] itnim FCS or driver itnim instance
@@ -312,7 +312,7 @@ bfa_status_t bfa_itnim_get_ioprofile(struct bfa_itnim_s *itnim,
*/
void bfa_cb_itnim_online(void *itnim);
-/**
+/*
* BFA completion callback for bfa_itnim_offline().
*
* @param[in] itnim FCS or driver itnim instance
@@ -323,7 +323,7 @@ void bfa_cb_itnim_offline(void *itnim);
void bfa_cb_itnim_tov_begin(void *itnim);
void bfa_cb_itnim_tov(void *itnim);
-/**
+/*
* BFA notification to FCS/driver for second level error recovery.
*
* Atleast one I/O request has timedout and target is unresponsive to
@@ -351,7 +351,7 @@ void bfa_ioim_delayed_comp(struct bfa_ioim_s *ioim,
bfa_boolean_t iotov);
-/**
+/*
* I/O completion notification.
*
* @param[in] dio driver IO structure
@@ -368,7 +368,7 @@ void bfa_cb_ioim_done(void *bfad, struct bfad_ioim_s *dio,
u8 scsi_status, int sns_len,
u8 *sns_info, s32 residue);
-/**
+/*
* I/O good completion notification.
*
* @param[in] dio driver IO structure
@@ -377,7 +377,7 @@ void bfa_cb_ioim_done(void *bfad, struct bfad_ioim_s *dio,
*/
void bfa_cb_ioim_good_comp(void *bfad, struct bfad_ioim_s *dio);
-/**
+/*
* I/O abort completion notification
*
* @param[in] dio driver IO that was aborted
diff --git a/drivers/scsi/bfa/bfa_fcs.c b/drivers/scsi/bfa/bfa_fcs.c
index 9cebbe30a678..c94502dfac66 100644
--- a/drivers/scsi/bfa/bfa_fcs.c
+++ b/drivers/scsi/bfa/bfa_fcs.c
@@ -15,7 +15,7 @@
* General Public License for more details.
*/
-/**
+/*
* bfa_fcs.c BFA FCS main
*/
@@ -25,7 +25,7 @@
BFA_TRC_FILE(FCS, FCS);
-/**
+/*
* FCS sub-modules
*/
struct bfa_fcs_mod_s {
@@ -43,7 +43,7 @@ static struct bfa_fcs_mod_s fcs_modules[] = {
bfa_fcs_fabric_modexit },
};
-/**
+/*
* fcs_api BFA FCS API
*/
@@ -58,11 +58,11 @@ bfa_fcs_exit_comp(void *fcs_cbarg)
-/**
+/*
* fcs_api BFA FCS API
*/
-/**
+/*
* fcs attach -- called once to initialize data structures at driver attach time
*/
void
@@ -86,7 +86,7 @@ bfa_fcs_attach(struct bfa_fcs_s *fcs, struct bfa_s *bfa, struct bfad_s *bfad,
}
}
-/**
+/*
* fcs initialization, called once after bfa initialization is complete
*/
void
@@ -110,7 +110,7 @@ bfa_fcs_init(struct bfa_fcs_s *fcs)
}
}
-/**
+/*
* Start FCS operations.
*/
void
@@ -119,7 +119,7 @@ bfa_fcs_start(struct bfa_fcs_s *fcs)
bfa_fcs_fabric_modstart(fcs);
}
-/**
+/*
* brief
* FCS driver details initialization.
*
@@ -138,7 +138,7 @@ bfa_fcs_driver_info_init(struct bfa_fcs_s *fcs,
bfa_fcs_fabric_psymb_init(&fcs->fabric);
}
-/**
+/*
* brief
* FCS FDMI Driver Parameter Initialization
*
@@ -154,7 +154,7 @@ bfa_fcs_set_fdmi_param(struct bfa_fcs_s *fcs, bfa_boolean_t fdmi_enable)
fcs->fdmi_enabled = fdmi_enable;
}
-/**
+/*
* brief
* FCS instance cleanup and exit.
*
@@ -196,7 +196,7 @@ bfa_fcs_modexit_comp(struct bfa_fcs_s *fcs)
bfa_wc_down(&fcs->wc);
}
-/**
+/*
* Fabric module implementation.
*/
@@ -232,11 +232,11 @@ static void bfa_fcs_fabric_flogiacc_comp(void *fcsarg,
u32 rsp_len,
u32 resid_len,
struct fchs_s *rspfchs);
-/**
+/*
* fcs_fabric_sm fabric state machine functions
*/
-/**
+/*
* Fabric state machine events
*/
enum bfa_fcs_fabric_event {
@@ -286,7 +286,7 @@ static void bfa_fcs_fabric_sm_isolated(struct bfa_fcs_fabric_s *fabric,
enum bfa_fcs_fabric_event event);
static void bfa_fcs_fabric_sm_deleting(struct bfa_fcs_fabric_s *fabric,
enum bfa_fcs_fabric_event event);
-/**
+/*
* Beginning state before fabric creation.
*/
static void
@@ -312,7 +312,7 @@ bfa_fcs_fabric_sm_uninit(struct bfa_fcs_fabric_s *fabric,
}
}
-/**
+/*
* Beginning state before fabric creation.
*/
static void
@@ -345,7 +345,7 @@ bfa_fcs_fabric_sm_created(struct bfa_fcs_fabric_s *fabric,
}
}
-/**
+/*
* Link is down, awaiting LINK UP event from port. This is also the
* first state at fabric creation.
*/
@@ -375,7 +375,7 @@ bfa_fcs_fabric_sm_linkdown(struct bfa_fcs_fabric_s *fabric,
}
}
-/**
+/*
* FLOGI is in progress, awaiting FLOGI reply.
*/
static void
@@ -468,7 +468,7 @@ bfa_fcs_fabric_sm_flogi_retry(struct bfa_fcs_fabric_s *fabric,
}
}
-/**
+/*
* Authentication is in progress, awaiting authentication results.
*/
static void
@@ -508,7 +508,7 @@ bfa_fcs_fabric_sm_auth(struct bfa_fcs_fabric_s *fabric,
}
}
-/**
+/*
* Authentication failed
*/
static void
@@ -534,7 +534,7 @@ bfa_fcs_fabric_sm_auth_failed(struct bfa_fcs_fabric_s *fabric,
}
}
-/**
+/*
* Port is in loopback mode.
*/
static void
@@ -560,7 +560,7 @@ bfa_fcs_fabric_sm_loopback(struct bfa_fcs_fabric_s *fabric,
}
}
-/**
+/*
* There is no attached fabric - private loop or NPort-to-NPort topology.
*/
static void
@@ -593,7 +593,7 @@ bfa_fcs_fabric_sm_nofabric(struct bfa_fcs_fabric_s *fabric,
}
}
-/**
+/*
* Fabric is online - normal operating state.
*/
static void
@@ -628,7 +628,7 @@ bfa_fcs_fabric_sm_online(struct bfa_fcs_fabric_s *fabric,
}
}
-/**
+/*
* Exchanging virtual fabric parameters.
*/
static void
@@ -652,7 +652,7 @@ bfa_fcs_fabric_sm_evfp(struct bfa_fcs_fabric_s *fabric,
}
}
-/**
+/*
* EVFP exchange complete and VFT tagging is enabled.
*/
static void
@@ -663,7 +663,7 @@ bfa_fcs_fabric_sm_evfp_done(struct bfa_fcs_fabric_s *fabric,
bfa_trc(fabric->fcs, event);
}
-/**
+/*
* Port is isolated after EVFP exchange due to VF_ID mismatch (N and F).
*/
static void
@@ -684,7 +684,7 @@ bfa_fcs_fabric_sm_isolated(struct bfa_fcs_fabric_s *fabric,
fabric->event_arg.swp_vfid);
}
-/**
+/*
* Fabric is being deleted, awaiting vport delete completions.
*/
static void
@@ -714,7 +714,7 @@ bfa_fcs_fabric_sm_deleting(struct bfa_fcs_fabric_s *fabric,
-/**
+/*
* fcs_fabric_private fabric private functions
*/
@@ -728,7 +728,7 @@ bfa_fcs_fabric_init(struct bfa_fcs_fabric_s *fabric)
port_cfg->pwwn = bfa_ioc_get_pwwn(&fabric->fcs->bfa->ioc);
}
-/**
+/*
* Port Symbolic Name Creation for base port.
*/
void
@@ -789,7 +789,7 @@ bfa_fcs_fabric_psymb_init(struct bfa_fcs_fabric_s *fabric)
port_cfg->sym_name.symname[BFA_SYMNAME_MAXLEN - 1] = 0;
}
-/**
+/*
* bfa lps login completion callback
*/
void
@@ -867,7 +867,7 @@ bfa_cb_lps_flogi_comp(void *bfad, void *uarg, bfa_status_t status)
bfa_trc(fabric->fcs, fabric->is_npiv);
bfa_trc(fabric->fcs, fabric->is_auth);
}
-/**
+/*
* Allocate and send FLOGI.
*/
static void
@@ -897,7 +897,7 @@ bfa_fcs_fabric_notify_online(struct bfa_fcs_fabric_s *fabric)
bfa_fcs_fabric_set_opertype(fabric);
fabric->stats.fabric_onlines++;
- /**
+ /*
* notify online event to base and then virtual ports
*/
bfa_fcs_lport_online(&fabric->bport);
@@ -917,7 +917,7 @@ bfa_fcs_fabric_notify_offline(struct bfa_fcs_fabric_s *fabric)
bfa_trc(fabric->fcs, fabric->fabric_name);
fabric->stats.fabric_offlines++;
- /**
+ /*
* notify offline event first to vports and then base port.
*/
list_for_each_safe(qe, qen, &fabric->vport_q) {
@@ -939,7 +939,7 @@ bfa_fcs_fabric_delay(void *cbarg)
bfa_sm_send_event(fabric, BFA_FCS_FABRIC_SM_DELAYED);
}
-/**
+/*
* Delete all vports and wait for vport delete completions.
*/
static void
@@ -965,11 +965,11 @@ bfa_fcs_fabric_delete_comp(void *cbarg)
bfa_sm_send_event(fabric, BFA_FCS_FABRIC_SM_DELCOMP);
}
-/**
+/*
* fcs_fabric_public fabric public functions
*/
-/**
+/*
* Attach time initialization.
*/
void
@@ -978,9 +978,9 @@ bfa_fcs_fabric_attach(struct bfa_fcs_s *fcs)
struct bfa_fcs_fabric_s *fabric;
fabric = &fcs->fabric;
- bfa_os_memset(fabric, 0, sizeof(struct bfa_fcs_fabric_s));
+ memset(fabric, 0, sizeof(struct bfa_fcs_fabric_s));
- /**
+ /*
* Initialize base fabric.
*/
fabric->fcs = fcs;
@@ -989,7 +989,7 @@ bfa_fcs_fabric_attach(struct bfa_fcs_s *fcs)
fabric->lps = bfa_lps_alloc(fcs->bfa);
bfa_assert(fabric->lps);
- /**
+ /*
* Initialize fabric delete completion handler. Fabric deletion is
* complete when the last vport delete is complete.
*/
@@ -1007,7 +1007,7 @@ bfa_fcs_fabric_modinit(struct bfa_fcs_s *fcs)
bfa_trc(fcs, 0);
}
-/**
+/*
* Module cleanup
*/
void
@@ -1017,7 +1017,7 @@ bfa_fcs_fabric_modexit(struct bfa_fcs_s *fcs)
bfa_trc(fcs, 0);
- /**
+ /*
* Cleanup base fabric.
*/
fabric = &fcs->fabric;
@@ -1025,7 +1025,7 @@ bfa_fcs_fabric_modexit(struct bfa_fcs_s *fcs)
bfa_sm_send_event(fabric, BFA_FCS_FABRIC_SM_DELETE);
}
-/**
+/*
* Fabric module start -- kick starts FCS actions
*/
void
@@ -1038,7 +1038,7 @@ bfa_fcs_fabric_modstart(struct bfa_fcs_s *fcs)
bfa_sm_send_event(fabric, BFA_FCS_FABRIC_SM_START);
}
-/**
+/*
* Suspend fabric activity as part of driver suspend.
*/
void
@@ -1064,7 +1064,7 @@ bfa_fcs_fabric_port_type(struct bfa_fcs_fabric_s *fabric)
return fabric->oper_type;
}
-/**
+/*
* Link up notification from BFA physical port module.
*/
void
@@ -1074,7 +1074,7 @@ bfa_fcs_fabric_link_up(struct bfa_fcs_fabric_s *fabric)
bfa_sm_send_event(fabric, BFA_FCS_FABRIC_SM_LINK_UP);
}
-/**
+/*
* Link down notification from BFA physical port module.
*/
void
@@ -1084,7 +1084,7 @@ bfa_fcs_fabric_link_down(struct bfa_fcs_fabric_s *fabric)
bfa_sm_send_event(fabric, BFA_FCS_FABRIC_SM_LINK_DOWN);
}
-/**
+/*
* A child vport is being created in the fabric.
*
* Call from vport module at vport creation. A list of base port and vports
@@ -1099,7 +1099,7 @@ void
bfa_fcs_fabric_addvport(struct bfa_fcs_fabric_s *fabric,
struct bfa_fcs_vport_s *vport)
{
- /**
+ /*
* - add vport to fabric's vport_q
*/
bfa_trc(fabric->fcs, fabric->vf_id);
@@ -1109,7 +1109,7 @@ bfa_fcs_fabric_addvport(struct bfa_fcs_fabric_s *fabric,
bfa_wc_up(&fabric->wc);
}
-/**
+/*
* A child vport is being deleted from fabric.
*
* Vport is being deleted.
@@ -1123,7 +1123,7 @@ bfa_fcs_fabric_delvport(struct bfa_fcs_fabric_s *fabric,
bfa_wc_down(&fabric->wc);
}
-/**
+/*
* Base port is deleted.
*/
void
@@ -1133,7 +1133,7 @@ bfa_fcs_fabric_port_delete_comp(struct bfa_fcs_fabric_s *fabric)
}
-/**
+/*
* Check if fabric is online.
*
* param[in] fabric - Fabric instance. This can be a base fabric or vf.
@@ -1146,7 +1146,7 @@ bfa_fcs_fabric_is_online(struct bfa_fcs_fabric_s *fabric)
return bfa_sm_cmp_state(fabric, bfa_fcs_fabric_sm_online);
}
-/**
+/*
* brief
*
*/
@@ -1158,7 +1158,7 @@ bfa_fcs_fabric_addvf(struct bfa_fcs_fabric_s *vf, struct bfa_fcs_s *fcs,
return BFA_STATUS_OK;
}
-/**
+/*
* Lookup for a vport withing a fabric given its pwwn
*/
struct bfa_fcs_vport_s *
@@ -1176,7 +1176,7 @@ bfa_fcs_fabric_vport_lookup(struct bfa_fcs_fabric_s *fabric, wwn_t pwwn)
return NULL;
}
-/**
+/*
* In a given fabric, return the number of lports.
*
* param[in] fabric - Fabric instance. This can be a base fabric or vf.
@@ -1214,7 +1214,7 @@ bfa_fcs_fabric_get_switch_oui(struct bfa_fcs_fabric_s *fabric)
return oui;
}
-/**
+/*
* Unsolicited frame receive handling.
*/
void
@@ -1230,7 +1230,7 @@ bfa_fcs_fabric_uf_recv(struct bfa_fcs_fabric_s *fabric, struct fchs_s *fchs,
bfa_trc(fabric->fcs, len);
bfa_trc(fabric->fcs, pid);
- /**
+ /*
* Look for our own FLOGI frames being looped back. This means an
* external loopback cable is in place. Our own FLOGI frames are
* sometimes looped back when switch port gets temporarily bypassed.
@@ -1242,7 +1242,7 @@ bfa_fcs_fabric_uf_recv(struct bfa_fcs_fabric_s *fabric, struct fchs_s *fchs,
return;
}
- /**
+ /*
* FLOGI/EVFP exchanges should be consumed by base fabric.
*/
if (fchs->d_id == bfa_os_hton3b(FC_FABRIC_PORT)) {
@@ -1252,7 +1252,7 @@ bfa_fcs_fabric_uf_recv(struct bfa_fcs_fabric_s *fabric, struct fchs_s *fchs,
}
if (fabric->bport.pid == pid) {
- /**
+ /*
* All authentication frames should be routed to auth
*/
bfa_trc(fabric->fcs, els_cmd->els_code);
@@ -1266,7 +1266,7 @@ bfa_fcs_fabric_uf_recv(struct bfa_fcs_fabric_s *fabric, struct fchs_s *fchs,
return;
}
- /**
+ /*
* look for a matching local port ID
*/
list_for_each(qe, &fabric->vport_q) {
@@ -1280,7 +1280,7 @@ bfa_fcs_fabric_uf_recv(struct bfa_fcs_fabric_s *fabric, struct fchs_s *fchs,
bfa_fcs_lport_uf_recv(&fabric->bport, fchs, len);
}
-/**
+/*
* Unsolicited frames to be processed by fabric.
*/
static void
@@ -1304,7 +1304,7 @@ bfa_fcs_fabric_process_uf(struct bfa_fcs_fabric_s *fabric, struct fchs_s *fchs,
}
}
-/**
+/*
* Process incoming FLOGI
*/
static void
@@ -1329,7 +1329,7 @@ bfa_fcs_fabric_process_flogi(struct bfa_fcs_fabric_s *fabric,
return;
}
- fabric->bb_credit = bfa_os_ntohs(flogi->csp.bbcred);
+ fabric->bb_credit = be16_to_cpu(flogi->csp.bbcred);
bport->port_topo.pn2n.rem_port_wwn = flogi->port_name;
bport->port_topo.pn2n.reply_oxid = fchs->ox_id;
@@ -1351,7 +1351,7 @@ bfa_fcs_fabric_send_flogi_acc(struct bfa_fcs_fabric_s *fabric)
struct fchs_s fchs;
fcxp = bfa_fcs_fcxp_alloc(fabric->fcs);
- /**
+ /*
* Do not expect this failure -- expect remote node to retry
*/
if (!fcxp)
@@ -1370,7 +1370,7 @@ bfa_fcs_fabric_send_flogi_acc(struct bfa_fcs_fabric_s *fabric)
FC_MAX_PDUSZ, 0);
}
-/**
+/*
* Flogi Acc completion callback.
*/
static void
@@ -1417,130 +1417,7 @@ bfa_fcs_fabric_set_fabric_name(struct bfa_fcs_fabric_s *fabric,
}
}
-/**
- * fcs_vf_api virtual fabrics API
- */
-
-/**
- * Enable VF mode.
- *
- * @param[in] fcs fcs module instance
- * @param[in] vf_id default vf_id of port, FC_VF_ID_NULL
- * to use standard default vf_id of 1.
- *
- * @retval BFA_STATUS_OK vf mode is enabled
- * @retval BFA_STATUS_BUSY Port is active. Port must be disabled
- * before VF mode can be enabled.
- */
-bfa_status_t
-bfa_fcs_vf_mode_enable(struct bfa_fcs_s *fcs, u16 vf_id)
-{
- return BFA_STATUS_OK;
-}
-
-/**
- * Disable VF mode.
- *
- * @param[in] fcs fcs module instance
- *
- * @retval BFA_STATUS_OK vf mode is disabled
- * @retval BFA_STATUS_BUSY VFs are present and being used. All
- * VFs must be deleted before disabling
- * VF mode.
- */
-bfa_status_t
-bfa_fcs_vf_mode_disable(struct bfa_fcs_s *fcs)
-{
- return BFA_STATUS_OK;
-}
-
-/**
- * Create a new VF instance.
- *
- * A new VF is created using the given VF configuration. A VF is identified
- * by VF id. No duplicate VF creation is allowed with the same VF id. Once
- * a VF is created, VF is automatically started after link initialization
- * and EVFP exchange is completed.
- *
- * param[in] vf - FCS vf data structure. Memory is
- * allocated by caller (driver)
- * param[in] fcs - FCS module
- * param[in] vf_cfg - VF configuration
- * param[in] vf_drv - Opaque handle back to the driver's
- * virtual vf structure
- *
- * retval BFA_STATUS_OK VF creation is successful
- * retval BFA_STATUS_FAILED VF creation failed
- * retval BFA_STATUS_EEXIST A VF exists with the given vf_id
- */
-bfa_status_t
-bfa_fcs_vf_create(bfa_fcs_vf_t *vf, struct bfa_fcs_s *fcs, u16 vf_id,
- struct bfa_lport_cfg_s *port_cfg, struct bfad_vf_s *vf_drv)
-{
- bfa_trc(fcs, vf_id);
- return BFA_STATUS_OK;
-}
-
-/**
- * Use this function to delete a BFA VF object. VF object should
- * be stopped before this function call.
- *
- * param[in] vf - pointer to bfa_vf_t.
- *
- * retval BFA_STATUS_OK On vf deletion success
- * retval BFA_STATUS_BUSY VF is not in a stopped state
- * retval BFA_STATUS_INPROGRESS VF deletion in in progress
- */
-bfa_status_t
-bfa_fcs_vf_delete(bfa_fcs_vf_t *vf)
-{
- bfa_trc(vf->fcs, vf->vf_id);
- return BFA_STATUS_OK;
-}
-
-
-/**
- * Returns attributes of the given VF.
- *
- * param[in] vf pointer to bfa_vf_t.
- * param[out] vf_attr vf attributes returned
- *
- * return None
- */
-void
-bfa_fcs_vf_get_attr(bfa_fcs_vf_t *vf, struct bfa_vf_attr_s *vf_attr)
-{
- bfa_trc(vf->fcs, vf->vf_id);
-}
-
-/**
- * Return statistics associated with the given vf.
- *
- * param[in] vf pointer to bfa_vf_t.
- * param[out] vf_stats vf statistics returned
- *
- * @return None
- */
-void
-bfa_fcs_vf_get_stats(bfa_fcs_vf_t *vf, struct bfa_vf_stats_s *vf_stats)
-{
- bfa_os_memcpy(vf_stats, &vf->stats, sizeof(struct bfa_vf_stats_s));
-}
-
-/**
- * clear statistics associated with the given vf.
- *
- * param[in] vf pointer to bfa_vf_t.
- *
- * @return None
- */
-void
-bfa_fcs_vf_clear_stats(bfa_fcs_vf_t *vf)
-{
- bfa_os_memset(&vf->stats, 0, sizeof(struct bfa_vf_stats_s));
-}
-
-/**
+/*
* Returns FCS vf structure for a given vf_id.
*
* param[in] vf_id - VF_ID
@@ -1558,81 +1435,7 @@ bfa_fcs_vf_lookup(struct bfa_fcs_s *fcs, u16 vf_id)
return NULL;
}
-/**
- * Return the list of VFs configured.
- *
- * param[in] fcs fcs module instance
- * param[out] vf_ids returned list of vf_ids
- * param[in,out] nvfs in:size of vf_ids array,
- * out:total elements present,
- * actual elements returned is limited by the size
- *
- * return Driver VF structure
- */
-void
-bfa_fcs_vf_list(struct bfa_fcs_s *fcs, u16 *vf_ids, int *nvfs)
-{
- bfa_trc(fcs, *nvfs);
-}
-
-/**
- * Return the list of all VFs visible from fabric.
- *
- * param[in] fcs fcs module instance
- * param[out] vf_ids returned list of vf_ids
- * param[in,out] nvfs in:size of vf_ids array,
- * out:total elements present,
- * actual elements returned is limited by the size
- *
- * return Driver VF structure
- */
-void
-bfa_fcs_vf_list_all(struct bfa_fcs_s *fcs, u16 *vf_ids, int *nvfs)
-{
- bfa_trc(fcs, *nvfs);
-}
-
-/**
- * Return the list of local logical ports present in the given VF.
- *
- * param[in] vf vf for which logical ports are returned
- * param[out] lpwwn returned logical port wwn list
- * param[in,out] nlports in:size of lpwwn list;
- * out:total elements present,
- * actual elements returned is limited by the size
- */
-void
-bfa_fcs_vf_get_ports(bfa_fcs_vf_t *vf, wwn_t lpwwn[], int *nlports)
-{
- struct list_head *qe;
- struct bfa_fcs_vport_s *vport;
- int i;
- struct bfa_fcs_s *fcs;
-
- if (vf == NULL || lpwwn == NULL || *nlports == 0)
- return;
-
- fcs = vf->fcs;
-
- bfa_trc(fcs, vf->vf_id);
- bfa_trc(fcs, (u32) *nlports);
-
- i = 0;
- lpwwn[i++] = vf->bport.port_cfg.pwwn;
-
- list_for_each(qe, &vf->vport_q) {
- if (i >= *nlports)
- break;
-
- vport = (struct bfa_fcs_vport_s *) qe;
- lpwwn[i++] = vport->lport.port_cfg.pwwn;
- }
-
- bfa_trc(fcs, i);
- *nlports = i;
-}
-
-/**
+/*
* BFA FCS PPORT ( physical port)
*/
static void
@@ -1662,11 +1465,11 @@ bfa_fcs_port_attach(struct bfa_fcs_s *fcs)
bfa_fcport_event_register(fcs->bfa, bfa_fcs_port_event_handler, fcs);
}
-/**
+/*
* BFA FCS UF ( Unsolicited Frames)
*/
-/**
+/*
* BFA callback for unsolicited frame receive handler.
*
* @param[in] cbarg callback arg for receive handler
@@ -1683,7 +1486,7 @@ bfa_fcs_uf_recv(void *cbarg, struct bfa_uf_s *uf)
struct fc_vft_s *vft;
struct bfa_fcs_fabric_s *fabric;
- /**
+ /*
* check for VFT header
*/
if (fchs->routing == FC_RTG_EXT_HDR &&
@@ -1695,7 +1498,7 @@ bfa_fcs_uf_recv(void *cbarg, struct bfa_uf_s *uf)
else
fabric = bfa_fcs_vf_lookup(fcs, (u16) vft->vf_id);
- /**
+ /*
* drop frame if vfid is unknown
*/
if (!fabric) {
@@ -1705,7 +1508,7 @@ bfa_fcs_uf_recv(void *cbarg, struct bfa_uf_s *uf)
return;
}
- /**
+ /*
* skip vft header
*/
fchs = (struct fchs_s *) (vft + 1);
diff --git a/drivers/scsi/bfa/bfa_fcs.h b/drivers/scsi/bfa/bfa_fcs.h
index d75045df1e7e..9cb6a55977c3 100644
--- a/drivers/scsi/bfa/bfa_fcs.h
+++ b/drivers/scsi/bfa/bfa_fcs.h
@@ -196,7 +196,7 @@ struct bfa_fcs_fabric_s {
#define bfa_fcs_fabric_is_switched(__f) \
((__f)->fab_type == BFA_FCS_FABRIC_SWITCHED)
-/**
+/*
* The design calls for a single implementation of base fabric and vf.
*/
#define bfa_fcs_vf_t struct bfa_fcs_fabric_s
@@ -216,7 +216,7 @@ struct bfa_fcs_fabric_s;
#define bfa_fcs_lport_t struct bfa_fcs_lport_s
-/**
+/*
* Symbolic Name related defines
* Total bytes 255.
* Physical Port's symbolic name 128 bytes.
@@ -239,7 +239,7 @@ struct bfa_fcs_fabric_s;
#define BFA_FCS_PORT_SYMBNAME_OSINFO_SZ 48
#define BFA_FCS_PORT_SYMBNAME_OSPATCH_SZ 16
-/**
+/*
* Get FC port ID for a logical port.
*/
#define bfa_fcs_lport_get_fcid(_lport) ((_lport)->pid)
@@ -262,7 +262,7 @@ bfa_fcs_lport_get_drvport(struct bfa_fcs_lport_s *port)
#define bfa_fcs_lport_get_fabric_ipaddr(_lport) \
((_lport)->fabric->fabric_ip_addr)
-/**
+/*
* bfa fcs port public functions
*/
@@ -342,7 +342,7 @@ struct bfa_fcs_vport_s {
#define bfa_fcs_vport_get_port(vport) \
((struct bfa_fcs_lport_s *)(&vport->port))
-/**
+/*
* bfa fcs vport public functions
*/
bfa_status_t bfa_fcs_vport_create(struct bfa_fcs_vport_s *vport,
@@ -393,7 +393,7 @@ struct bfa_fcs_rpf_s {
enum bfa_port_speed rpsc_speed;
/* Current Speed from RPSC. O if RPSC fails */
enum bfa_port_speed assigned_speed;
- /**
+ /*
* Speed assigned by the user. will be used if RPSC is
* not supported by the rport.
*/
@@ -434,7 +434,7 @@ bfa_fcs_rport_get_halrport(struct bfa_fcs_rport_s *rport)
return rport->bfa_rport;
}
-/**
+/*
* bfa fcs rport API functions
*/
bfa_status_t bfa_fcs_rport_add(struct bfa_fcs_lport_s *port, wwn_t *pwwn,
@@ -573,7 +573,7 @@ bfa_fcs_itnim_get_halitn(struct bfa_fcs_itnim_s *itnim)
return itnim->bfa_itnim;
}
-/**
+/*
* bfa fcs FCP Initiator mode API functions
*/
void bfa_fcs_itnim_get_attr(struct bfa_fcs_itnim_s *itnim,
@@ -677,22 +677,9 @@ void bfa_fcs_exit(struct bfa_fcs_s *fcs);
void bfa_fcs_trc_init(struct bfa_fcs_s *fcs, struct bfa_trc_mod_s *trcmod);
void bfa_fcs_start(struct bfa_fcs_s *fcs);
-/**
+/*
* bfa fcs vf public functions
*/
-bfa_status_t bfa_fcs_vf_mode_enable(struct bfa_fcs_s *fcs, u16 vf_id);
-bfa_status_t bfa_fcs_vf_mode_disable(struct bfa_fcs_s *fcs);
-bfa_status_t bfa_fcs_vf_create(bfa_fcs_vf_t *vf, struct bfa_fcs_s *fcs,
- u16 vf_id, struct bfa_lport_cfg_s *port_cfg,
- struct bfad_vf_s *vf_drv);
-bfa_status_t bfa_fcs_vf_delete(bfa_fcs_vf_t *vf);
-void bfa_fcs_vf_list(struct bfa_fcs_s *fcs, u16 *vf_ids, int *nvfs);
-void bfa_fcs_vf_list_all(struct bfa_fcs_s *fcs, u16 *vf_ids, int *nvfs);
-void bfa_fcs_vf_get_attr(bfa_fcs_vf_t *vf, struct bfa_vf_attr_s *vf_attr);
-void bfa_fcs_vf_get_stats(bfa_fcs_vf_t *vf,
- struct bfa_vf_stats_s *vf_stats);
-void bfa_fcs_vf_clear_stats(bfa_fcs_vf_t *vf);
-void bfa_fcs_vf_get_ports(bfa_fcs_vf_t *vf, wwn_t vpwwn[], int *nports);
bfa_fcs_vf_t *bfa_fcs_vf_lookup(struct bfa_fcs_s *fcs, u16 vf_id);
u16 bfa_fcs_fabric_vport_count(struct bfa_fcs_fabric_s *fabric);
@@ -729,11 +716,11 @@ u16 bfa_fcs_fabric_get_switch_oui(struct bfa_fcs_fabric_s *fabric);
void bfa_fcs_uf_attach(struct bfa_fcs_s *fcs);
void bfa_fcs_port_attach(struct bfa_fcs_s *fcs);
-/**
+/*
* BFA FCS callback interfaces
*/
-/**
+/*
* fcb Main fcs callbacks
*/
@@ -742,7 +729,7 @@ struct bfad_vf_s;
struct bfad_vport_s;
struct bfad_rport_s;
-/**
+/*
* lport callbacks
*/
struct bfad_port_s *bfa_fcb_lport_new(struct bfad_s *bfad,
@@ -754,19 +741,19 @@ void bfa_fcb_lport_delete(struct bfad_s *bfad, enum bfa_lport_role roles,
struct bfad_vf_s *vf_drv,
struct bfad_vport_s *vp_drv);
-/**
+/*
* vport callbacks
*/
void bfa_fcb_pbc_vport_create(struct bfad_s *bfad, struct bfi_pbc_vport_s);
-/**
+/*
* rport callbacks
*/
bfa_status_t bfa_fcb_rport_alloc(struct bfad_s *bfad,
struct bfa_fcs_rport_s **rport,
struct bfad_rport_s **rport_drv);
-/**
+/*
* itnim callbacks
*/
void bfa_fcb_itnim_alloc(struct bfad_s *bfad, struct bfa_fcs_itnim_s **itnim,
diff --git a/drivers/scsi/bfa/bfa_fcs_fcpim.c b/drivers/scsi/bfa/bfa_fcs_fcpim.c
index 569dfefab70d..9662bcdeb41d 100644
--- a/drivers/scsi/bfa/bfa_fcs_fcpim.c
+++ b/drivers/scsi/bfa/bfa_fcs_fcpim.c
@@ -15,7 +15,7 @@
* General Public License for more details.
*/
-/**
+/*
* fcpim.c - FCP initiator mode i-t nexus state machine
*/
@@ -38,7 +38,7 @@ static void bfa_fcs_itnim_prli_response(void *fcsarg,
bfa_status_t req_status, u32 rsp_len,
u32 resid_len, struct fchs_s *rsp_fchs);
-/**
+/*
* fcs_itnim_sm FCS itnim state machine events
*/
@@ -84,7 +84,7 @@ static struct bfa_sm_table_s itnim_sm_table[] = {
{BFA_SM(bfa_fcs_itnim_sm_initiator), BFA_ITNIM_INITIATIOR},
};
-/**
+/*
* fcs_itnim_sm FCS itnim state machine
*/
@@ -494,11 +494,11 @@ bfa_fcs_itnim_free(struct bfa_fcs_itnim_s *itnim)
-/**
+/*
* itnim_public FCS ITNIM public interfaces
*/
-/**
+/*
* Called by rport when a new rport is created.
*
* @param[in] rport - remote port.
@@ -554,7 +554,7 @@ bfa_fcs_itnim_create(struct bfa_fcs_rport_s *rport)
return itnim;
}
-/**
+/*
* Called by rport to delete the instance of FCPIM.
*
* @param[in] rport - remote port.
@@ -566,7 +566,7 @@ bfa_fcs_itnim_delete(struct bfa_fcs_itnim_s *itnim)
bfa_sm_send_event(itnim, BFA_FCS_ITNIM_SM_DELETE);
}
-/**
+/*
* Notification from rport that PLOGI is complete to initiate FC-4 session.
*/
void
@@ -586,7 +586,7 @@ bfa_fcs_itnim_rport_online(struct bfa_fcs_itnim_s *itnim)
}
}
-/**
+/*
* Called by rport to handle a remote device offline.
*/
void
@@ -596,7 +596,7 @@ bfa_fcs_itnim_rport_offline(struct bfa_fcs_itnim_s *itnim)
bfa_sm_send_event(itnim, BFA_FCS_ITNIM_SM_OFFLINE);
}
-/**
+/*
* Called by rport when remote port is known to be an initiator from
* PRLI received.
*/
@@ -608,7 +608,7 @@ bfa_fcs_itnim_is_initiator(struct bfa_fcs_itnim_s *itnim)
bfa_sm_send_event(itnim, BFA_FCS_ITNIM_SM_INITIATOR);
}
-/**
+/*
* Called by rport to check if the itnim is online.
*/
bfa_status_t
@@ -625,7 +625,7 @@ bfa_fcs_itnim_get_online_state(struct bfa_fcs_itnim_s *itnim)
}
}
-/**
+/*
* BFA completion callback for bfa_itnim_online().
*/
void
@@ -637,7 +637,7 @@ bfa_cb_itnim_online(void *cbarg)
bfa_sm_send_event(itnim, BFA_FCS_ITNIM_SM_HCB_ONLINE);
}
-/**
+/*
* BFA completion callback for bfa_itnim_offline().
*/
void
@@ -649,7 +649,7 @@ bfa_cb_itnim_offline(void *cb_arg)
bfa_sm_send_event(itnim, BFA_FCS_ITNIM_SM_HCB_OFFLINE);
}
-/**
+/*
* Mark the beginning of PATH TOV handling. IO completion callbacks
* are still pending.
*/
@@ -661,7 +661,7 @@ bfa_cb_itnim_tov_begin(void *cb_arg)
bfa_trc(itnim->fcs, itnim->rport->pwwn);
}
-/**
+/*
* Mark the end of PATH TOV handling. All pending IOs are already cleaned up.
*/
void
@@ -674,7 +674,7 @@ bfa_cb_itnim_tov(void *cb_arg)
itnim_drv->state = ITNIM_STATE_TIMEOUT;
}
-/**
+/*
* BFA notification to FCS/driver for second level error recovery.
*
* Atleast one I/O request has timedout and target is unresponsive to
@@ -736,7 +736,7 @@ bfa_fcs_itnim_stats_get(struct bfa_fcs_lport_s *port, wwn_t rpwwn,
if (itnim == NULL)
return BFA_STATUS_NO_FCPIM_NEXUS;
- bfa_os_memcpy(stats, &itnim->stats, sizeof(struct bfa_itnim_stats_s));
+ memcpy(stats, &itnim->stats, sizeof(struct bfa_itnim_stats_s));
return BFA_STATUS_OK;
}
@@ -753,7 +753,7 @@ bfa_fcs_itnim_stats_clear(struct bfa_fcs_lport_s *port, wwn_t rpwwn)
if (itnim == NULL)
return BFA_STATUS_NO_FCPIM_NEXUS;
- bfa_os_memset(&itnim->stats, 0, sizeof(struct bfa_itnim_stats_s));
+ memset(&itnim->stats, 0, sizeof(struct bfa_itnim_stats_s));
return BFA_STATUS_OK;
}
diff --git a/drivers/scsi/bfa/bfa_fcs_lport.c b/drivers/scsi/bfa/bfa_fcs_lport.c
index b522bf30247a..377cbfff6f2e 100644
--- a/drivers/scsi/bfa/bfa_fcs_lport.c
+++ b/drivers/scsi/bfa/bfa_fcs_lport.c
@@ -15,10 +15,6 @@
* General Public License for more details.
*/
-/**
- * bfa_fcs_lport.c BFA FCS port
- */
-
#include "bfa_fcs.h"
#include "bfa_fcbuild.h"
#include "bfa_fc.h"
@@ -26,10 +22,6 @@
BFA_TRC_FILE(FCS, PORT);
-/**
- * Forward declarations
- */
-
static void bfa_fcs_lport_send_ls_rjt(struct bfa_fcs_lport_s *port,
struct fchs_s *rx_fchs, u8 reason_code,
u8 reason_code_expl);
@@ -72,7 +64,7 @@ static struct {
bfa_fcs_lport_n2n_offline},
};
-/**
+/*
* fcs_port_sm FCS logical port state machine
*/
@@ -240,7 +232,7 @@ bfa_fcs_lport_sm_deleting(
}
}
-/**
+/*
* fcs_port_pvt
*/
@@ -272,7 +264,7 @@ bfa_fcs_lport_send_ls_rjt(struct bfa_fcs_lport_s *port, struct fchs_s *rx_fchs,
FC_MAX_PDUSZ, 0);
}
-/**
+/*
* Process incoming plogi from a remote port.
*/
static void
@@ -303,7 +295,7 @@ bfa_fcs_lport_plogi(struct bfa_fcs_lport_s *port,
return;
}
- /**
+ /*
* Direct Attach P2P mode : verify address assigned by the r-port.
*/
if ((!bfa_fcs_fabric_is_switched(port->fabric)) &&
@@ -319,12 +311,12 @@ bfa_fcs_lport_plogi(struct bfa_fcs_lport_s *port,
port->pid = rx_fchs->d_id;
}
- /**
+ /*
* First, check if we know the device by pwwn.
*/
rport = bfa_fcs_lport_get_rport_by_pwwn(port, plogi->port_name);
if (rport) {
- /**
+ /*
* Direct Attach P2P mode : handle address assigned by r-port.
*/
if ((!bfa_fcs_fabric_is_switched(port->fabric)) &&
@@ -337,37 +329,37 @@ bfa_fcs_lport_plogi(struct bfa_fcs_lport_s *port,
return;
}
- /**
+ /*
* Next, lookup rport by PID.
*/
rport = bfa_fcs_lport_get_rport_by_pid(port, rx_fchs->s_id);
if (!rport) {
- /**
+ /*
* Inbound PLOGI from a new device.
*/
bfa_fcs_rport_plogi_create(port, rx_fchs, plogi);
return;
}
- /**
+ /*
* Rport is known only by PID.
*/
if (rport->pwwn) {
- /**
+ /*
* This is a different device with the same pid. Old device
* disappeared. Send implicit LOGO to old device.
*/
bfa_assert(rport->pwwn != plogi->port_name);
bfa_fcs_rport_logo_imp(rport);
- /**
+ /*
* Inbound PLOGI from a new device (with old PID).
*/
bfa_fcs_rport_plogi_create(port, rx_fchs, plogi);
return;
}
- /**
+ /*
* PLOGI crossing each other.
*/
bfa_assert(rport->pwwn == WWN_NULL);
@@ -479,12 +471,12 @@ static void
bfa_fs_port_get_gen_topo_data(struct bfa_fcs_lport_s *port,
struct fc_rnid_general_topology_data_s *gen_topo_data)
{
- bfa_os_memset(gen_topo_data, 0,
+ memset(gen_topo_data, 0,
sizeof(struct fc_rnid_general_topology_data_s));
- gen_topo_data->asso_type = bfa_os_htonl(RNID_ASSOCIATED_TYPE_HOST);
+ gen_topo_data->asso_type = cpu_to_be32(RNID_ASSOCIATED_TYPE_HOST);
gen_topo_data->phy_port_num = 0; /* @todo */
- gen_topo_data->num_attached_nodes = bfa_os_htonl(1);
+ gen_topo_data->num_attached_nodes = cpu_to_be32(1);
}
static void
@@ -598,10 +590,10 @@ bfa_fcs_lport_deleted(struct bfa_fcs_lport_s *port)
-/**
+/*
* fcs_lport_api BFA FCS port API
*/
-/**
+/*
* Module initialization
*/
void
@@ -610,7 +602,7 @@ bfa_fcs_lport_modinit(struct bfa_fcs_s *fcs)
}
-/**
+/*
* Module cleanup
*/
void
@@ -619,7 +611,7 @@ bfa_fcs_lport_modexit(struct bfa_fcs_s *fcs)
bfa_fcs_modexit_comp(fcs);
}
-/**
+/*
* Unsolicited frame receive handling.
*/
void
@@ -637,7 +629,7 @@ bfa_fcs_lport_uf_recv(struct bfa_fcs_lport_s *lport,
return;
}
- /**
+ /*
* First, handle ELSs that donot require a login.
*/
/*
@@ -673,7 +665,7 @@ bfa_fcs_lport_uf_recv(struct bfa_fcs_lport_s *lport,
bfa_fcs_lport_abts_acc(lport, fchs);
return;
}
- /**
+ /*
* look for a matching remote port ID
*/
rport = bfa_fcs_lport_get_rport_by_pid(lport, pid);
@@ -686,7 +678,7 @@ bfa_fcs_lport_uf_recv(struct bfa_fcs_lport_s *lport,
return;
}
- /**
+ /*
* Only handles ELS frames for now.
*/
if (fchs->type != FC_TYPE_ELS) {
@@ -702,20 +694,20 @@ bfa_fcs_lport_uf_recv(struct bfa_fcs_lport_s *lport,
}
if (els_cmd->els_code == FC_ELS_LOGO) {
- /**
+ /*
* @todo Handle LOGO frames received.
*/
return;
}
if (els_cmd->els_code == FC_ELS_PRLI) {
- /**
+ /*
* @todo Handle PRLI frames received.
*/
return;
}
- /**
+ /*
* Unhandled ELS frames. Send a LS_RJT.
*/
bfa_fcs_lport_send_ls_rjt(lport, fchs, FC_LS_RJT_RSN_CMD_NOT_SUPP,
@@ -723,7 +715,7 @@ bfa_fcs_lport_uf_recv(struct bfa_fcs_lport_s *lport,
}
-/**
+/*
* PID based Lookup for a R-Port in the Port R-Port Queue
*/
struct bfa_fcs_rport_s *
@@ -742,7 +734,7 @@ bfa_fcs_lport_get_rport_by_pid(struct bfa_fcs_lport_s *port, u32 pid)
return NULL;
}
-/**
+/*
* PWWN based Lookup for a R-Port in the Port R-Port Queue
*/
struct bfa_fcs_rport_s *
@@ -761,7 +753,7 @@ bfa_fcs_lport_get_rport_by_pwwn(struct bfa_fcs_lport_s *port, wwn_t pwwn)
return NULL;
}
-/**
+/*
* NWWN based Lookup for a R-Port in the Port R-Port Queue
*/
struct bfa_fcs_rport_s *
@@ -780,7 +772,7 @@ bfa_fcs_lport_get_rport_by_nwwn(struct bfa_fcs_lport_s *port, wwn_t nwwn)
return NULL;
}
-/**
+/*
* Called by rport module when new rports are discovered.
*/
void
@@ -792,7 +784,7 @@ bfa_fcs_lport_add_rport(
port->num_rports++;
}
-/**
+/*
* Called by rport module to when rports are deleted.
*/
void
@@ -807,7 +799,7 @@ bfa_fcs_lport_del_rport(
bfa_sm_send_event(port, BFA_FCS_PORT_SM_DELRPORT);
}
-/**
+/*
* Called by fabric for base port when fabric login is complete.
* Called by vport for virtual ports when FDISC is complete.
*/
@@ -817,7 +809,7 @@ bfa_fcs_lport_online(struct bfa_fcs_lport_s *port)
bfa_sm_send_event(port, BFA_FCS_PORT_SM_ONLINE);
}
-/**
+/*
* Called by fabric for base port when fabric goes offline.
* Called by vport for virtual ports when virtual port becomes offline.
*/
@@ -827,7 +819,7 @@ bfa_fcs_lport_offline(struct bfa_fcs_lport_s *port)
bfa_sm_send_event(port, BFA_FCS_PORT_SM_OFFLINE);
}
-/**
+/*
* Called by fabric to delete base lport and associated resources.
*
* Called by vport to delete lport and associated resources. Should call
@@ -839,7 +831,7 @@ bfa_fcs_lport_delete(struct bfa_fcs_lport_s *port)
bfa_sm_send_event(port, BFA_FCS_PORT_SM_DELETE);
}
-/**
+/*
* Return TRUE if port is online, else return FALSE
*/
bfa_boolean_t
@@ -848,7 +840,7 @@ bfa_fcs_lport_is_online(struct bfa_fcs_lport_s *port)
return bfa_sm_cmp_state(port, bfa_fcs_lport_sm_online);
}
-/**
+/*
* Attach time initialization of logical ports.
*/
void
@@ -865,7 +857,7 @@ bfa_fcs_lport_attach(struct bfa_fcs_lport_s *lport, struct bfa_fcs_s *fcs,
lport->num_rports = 0;
}
-/**
+/*
* Logical port initialization of base or virtual port.
* Called by fabric for base port or by vport for virtual ports.
*/
@@ -878,7 +870,7 @@ bfa_fcs_lport_init(struct bfa_fcs_lport_s *lport,
struct bfad_s *bfad = (struct bfad_s *)lport->fcs->bfad;
char lpwwn_buf[BFA_STRING_32];
- bfa_os_assign(lport->port_cfg, *port_cfg);
+ lport->port_cfg = *port_cfg;
lport->bfad_port = bfa_fcb_lport_new(lport->fcs->bfad, lport,
lport->port_cfg.roles,
@@ -894,7 +886,7 @@ bfa_fcs_lport_init(struct bfa_fcs_lport_s *lport,
bfa_sm_send_event(lport, BFA_FCS_PORT_SM_CREATE);
}
-/**
+/*
* fcs_lport_api
*/
@@ -934,11 +926,11 @@ bfa_fcs_lport_get_attr(
}
}
-/**
+/*
* bfa_fcs_lport_fab port fab functions
*/
-/**
+/*
* Called by port to initialize fabric services of the base port.
*/
static void
@@ -949,7 +941,7 @@ bfa_fcs_lport_fab_init(struct bfa_fcs_lport_s *port)
bfa_fcs_lport_ms_init(port);
}
-/**
+/*
* Called by port to notify transition to online state.
*/
static void
@@ -959,7 +951,7 @@ bfa_fcs_lport_fab_online(struct bfa_fcs_lport_s *port)
bfa_fcs_lport_scn_online(port);
}
-/**
+/*
* Called by port to notify transition to offline state.
*/
static void
@@ -970,11 +962,11 @@ bfa_fcs_lport_fab_offline(struct bfa_fcs_lport_s *port)
bfa_fcs_lport_ms_offline(port);
}
-/**
+/*
* bfa_fcs_lport_n2n functions
*/
-/**
+/*
* Called by fcs/port to initialize N2N topology.
*/
static void
@@ -982,7 +974,7 @@ bfa_fcs_lport_n2n_init(struct bfa_fcs_lport_s *port)
{
}
-/**
+/*
* Called by fcs/port to notify transition to online state.
*/
static void
@@ -1006,7 +998,7 @@ bfa_fcs_lport_n2n_online(struct bfa_fcs_lport_s *port)
((void *)&pcfg->pwwn, (void *)&n2n_port->rem_port_wwn,
sizeof(wwn_t)) > 0) {
port->pid = N2N_LOCAL_PID;
- /**
+ /*
* First, check if we know the device by pwwn.
*/
rport = bfa_fcs_lport_get_rport_by_pwwn(port,
@@ -1035,7 +1027,7 @@ bfa_fcs_lport_n2n_online(struct bfa_fcs_lport_s *port)
}
}
-/**
+/*
* Called by fcs/port to notify transition to offline state.
*/
static void
@@ -1094,11 +1086,11 @@ static void bfa_fcs_fdmi_get_hbaattr(struct bfa_fcs_lport_fdmi_s *fdmi,
struct bfa_fcs_fdmi_hba_attr_s *hba_attr);
static void bfa_fcs_fdmi_get_portattr(struct bfa_fcs_lport_fdmi_s *fdmi,
struct bfa_fcs_fdmi_port_attr_s *port_attr);
-/**
+/*
* fcs_fdmi_sm FCS FDMI state machine
*/
-/**
+/*
* FDMI State Machine events
*/
enum port_fdmi_event {
@@ -1143,7 +1135,7 @@ static void bfa_fcs_lport_fdmi_sm_online(struct bfa_fcs_lport_fdmi_s *fdmi,
static void bfa_fcs_lport_fdmi_sm_disabled(
struct bfa_fcs_lport_fdmi_s *fdmi,
enum port_fdmi_event event);
-/**
+/*
* Start in offline state - awaiting MS to send start.
*/
static void
@@ -1510,7 +1502,7 @@ bfa_fcs_lport_fdmi_sm_online(struct bfa_fcs_lport_fdmi_s *fdmi,
bfa_sm_fault(port->fcs, event);
}
}
-/**
+/*
* FDMI is disabled state.
*/
static void
@@ -1525,7 +1517,7 @@ bfa_fcs_lport_fdmi_sm_disabled(struct bfa_fcs_lport_fdmi_s *fdmi,
/* No op State. It can only be enabled at Driver Init. */
}
-/**
+/*
* RHBA : Register HBA Attributes.
*/
static void
@@ -1549,7 +1541,7 @@ bfa_fcs_lport_fdmi_send_rhba(void *fdmi_cbarg, struct bfa_fcxp_s *fcxp_alloced)
fdmi->fcxp = fcxp;
pyld = bfa_fcxp_get_reqbuf(fcxp);
- bfa_os_memset(pyld, 0, FC_MAX_PDUSZ);
+ memset(pyld, 0, FC_MAX_PDUSZ);
len = fc_fdmi_reqhdr_build(&fchs, pyld, bfa_fcs_lport_get_fcid(port),
FDMI_RHBA);
@@ -1584,7 +1576,7 @@ bfa_fcs_lport_fdmi_build_rhba_pyld(struct bfa_fcs_lport_fdmi_s *fdmi, u8 *pyld)
bfa_fcs_fdmi_get_hbaattr(fdmi, fcs_hba_attr);
rhba->hba_id = bfa_fcs_lport_get_pwwn(port);
- rhba->port_list.num_ports = bfa_os_htonl(1);
+ rhba->port_list.num_ports = cpu_to_be32(1);
rhba->port_list.port_entry = bfa_fcs_lport_get_pwwn(port);
len = sizeof(rhba->hba_id) + sizeof(rhba->port_list);
@@ -1601,86 +1593,69 @@ bfa_fcs_lport_fdmi_build_rhba_pyld(struct bfa_fcs_lport_fdmi_s *fdmi, u8 *pyld)
* Node Name
*/
attr = (struct fdmi_attr_s *) curr_ptr;
- attr->type = bfa_os_htons(FDMI_HBA_ATTRIB_NODENAME);
+ attr->type = cpu_to_be16(FDMI_HBA_ATTRIB_NODENAME);
attr->len = sizeof(wwn_t);
memcpy(attr->value, &bfa_fcs_lport_get_nwwn(port), attr->len);
curr_ptr += sizeof(attr->type) + sizeof(attr->len) + attr->len;
len += attr->len;
count++;
- attr->len =
- bfa_os_htons(attr->len + sizeof(attr->type) +
+ attr->len = cpu_to_be16(attr->len + sizeof(attr->type) +
sizeof(attr->len));
/*
* Manufacturer
*/
attr = (struct fdmi_attr_s *) curr_ptr;
- attr->type = bfa_os_htons(FDMI_HBA_ATTRIB_MANUFACTURER);
+ attr->type = cpu_to_be16(FDMI_HBA_ATTRIB_MANUFACTURER);
attr->len = (u16) strlen(fcs_hba_attr->manufacturer);
memcpy(attr->value, fcs_hba_attr->manufacturer, attr->len);
- attr->len = fc_roundup(attr->len, sizeof(u32)); /* variable
- *fields need
- *to be 4 byte
- *aligned */
+ attr->len = fc_roundup(attr->len, sizeof(u32));
curr_ptr += sizeof(attr->type) + sizeof(attr->len) + attr->len;
len += attr->len;
count++;
- attr->len =
- bfa_os_htons(attr->len + sizeof(attr->type) +
+ attr->len = cpu_to_be16(attr->len + sizeof(attr->type) +
sizeof(attr->len));
/*
* Serial Number
*/
attr = (struct fdmi_attr_s *) curr_ptr;
- attr->type = bfa_os_htons(FDMI_HBA_ATTRIB_SERIALNUM);
+ attr->type = cpu_to_be16(FDMI_HBA_ATTRIB_SERIALNUM);
attr->len = (u16) strlen(fcs_hba_attr->serial_num);
memcpy(attr->value, fcs_hba_attr->serial_num, attr->len);
- attr->len = fc_roundup(attr->len, sizeof(u32)); /* variable
- *fields need
- *to be 4 byte
- *aligned */
+ attr->len = fc_roundup(attr->len, sizeof(u32));
curr_ptr += sizeof(attr->type) + sizeof(attr->len) + attr->len;
len += attr->len;
count++;
- attr->len =
- bfa_os_htons(attr->len + sizeof(attr->type) +
+ attr->len = cpu_to_be16(attr->len + sizeof(attr->type) +
sizeof(attr->len));
/*
* Model
*/
attr = (struct fdmi_attr_s *) curr_ptr;
- attr->type = bfa_os_htons(FDMI_HBA_ATTRIB_MODEL);
+ attr->type = cpu_to_be16(FDMI_HBA_ATTRIB_MODEL);
attr->len = (u16) strlen(fcs_hba_attr->model);
memcpy(attr->value, fcs_hba_attr->model, attr->len);
- attr->len = fc_roundup(attr->len, sizeof(u32)); /* variable
- *fields need
- *to be 4 byte
- *aligned */
+ attr->len = fc_roundup(attr->len, sizeof(u32));
curr_ptr += sizeof(attr->type) + sizeof(attr->len) + attr->len;
len += attr->len;
count++;
- attr->len =
- bfa_os_htons(attr->len + sizeof(attr->type) +
+ attr->len = cpu_to_be16(attr->len + sizeof(attr->type) +
sizeof(attr->len));
/*
* Model Desc
*/
attr = (struct fdmi_attr_s *) curr_ptr;
- attr->type = bfa_os_htons(FDMI_HBA_ATTRIB_MODEL_DESC);
+ attr->type = cpu_to_be16(FDMI_HBA_ATTRIB_MODEL_DESC);
attr->len = (u16) strlen(fcs_hba_attr->model_desc);
memcpy(attr->value, fcs_hba_attr->model_desc, attr->len);
- attr->len = fc_roundup(attr->len, sizeof(u32)); /* variable
- *fields need
- *to be 4 byte
- *aligned */
+ attr->len = fc_roundup(attr->len, sizeof(u32));
curr_ptr += sizeof(attr->type) + sizeof(attr->len) + attr->len;
len += attr->len;
count++;
- attr->len =
- bfa_os_htons(attr->len + sizeof(attr->type) +
+ attr->len = cpu_to_be16(attr->len + sizeof(attr->type) +
sizeof(attr->len));
/*
@@ -1688,18 +1663,14 @@ bfa_fcs_lport_fdmi_build_rhba_pyld(struct bfa_fcs_lport_fdmi_s *fdmi, u8 *pyld)
*/
if (fcs_hba_attr->hw_version[0] != '\0') {
attr = (struct fdmi_attr_s *) curr_ptr;
- attr->type = bfa_os_htons(FDMI_HBA_ATTRIB_HW_VERSION);
+ attr->type = cpu_to_be16(FDMI_HBA_ATTRIB_HW_VERSION);
attr->len = (u16) strlen(fcs_hba_attr->hw_version);
memcpy(attr->value, fcs_hba_attr->hw_version, attr->len);
- attr->len = fc_roundup(attr->len, sizeof(u32)); /* variable
- *fields need
- *to be 4 byte
- *aligned */
+ attr->len = fc_roundup(attr->len, sizeof(u32));
curr_ptr += sizeof(attr->type) + sizeof(attr->len) + attr->len;
len += attr->len;
count++;
- attr->len =
- bfa_os_htons(attr->len + sizeof(attr->type) +
+ attr->len = cpu_to_be16(attr->len + sizeof(attr->type) +
sizeof(attr->len));
}
@@ -1707,18 +1678,14 @@ bfa_fcs_lport_fdmi_build_rhba_pyld(struct bfa_fcs_lport_fdmi_s *fdmi, u8 *pyld)
* Driver Version
*/
attr = (struct fdmi_attr_s *) curr_ptr;
- attr->type = bfa_os_htons(FDMI_HBA_ATTRIB_DRIVER_VERSION);
+ attr->type = cpu_to_be16(FDMI_HBA_ATTRIB_DRIVER_VERSION);
attr->len = (u16) strlen(fcs_hba_attr->driver_version);
memcpy(attr->value, fcs_hba_attr->driver_version, attr->len);
- attr->len = fc_roundup(attr->len, sizeof(u32)); /* variable
- *fields need
- *to be 4 byte
- *aligned */
+ attr->len = fc_roundup(attr->len, sizeof(u32));
curr_ptr += sizeof(attr->type) + sizeof(attr->len) + attr->len;
len += attr->len;;
count++;
- attr->len =
- bfa_os_htons(attr->len + sizeof(attr->type) +
+ attr->len = cpu_to_be16(attr->len + sizeof(attr->type) +
sizeof(attr->len));
/*
@@ -1726,18 +1693,14 @@ bfa_fcs_lport_fdmi_build_rhba_pyld(struct bfa_fcs_lport_fdmi_s *fdmi, u8 *pyld)
*/
if (fcs_hba_attr->option_rom_ver[0] != '\0') {
attr = (struct fdmi_attr_s *) curr_ptr;
- attr->type = bfa_os_htons(FDMI_HBA_ATTRIB_ROM_VERSION);
+ attr->type = cpu_to_be16(FDMI_HBA_ATTRIB_ROM_VERSION);
attr->len = (u16) strlen(fcs_hba_attr->option_rom_ver);
memcpy(attr->value, fcs_hba_attr->option_rom_ver, attr->len);
- attr->len = fc_roundup(attr->len, sizeof(u32)); /* variable
- *fields need
- *to be 4 byte
- *aligned */
+ attr->len = fc_roundup(attr->len, sizeof(u32));
curr_ptr += sizeof(attr->type) + sizeof(attr->len) + attr->len;
len += attr->len;
count++;
- attr->len =
- bfa_os_htons(attr->len + sizeof(attr->type) +
+ attr->len = cpu_to_be16(attr->len + sizeof(attr->type) +
sizeof(attr->len));
}
@@ -1745,18 +1708,14 @@ bfa_fcs_lport_fdmi_build_rhba_pyld(struct bfa_fcs_lport_fdmi_s *fdmi, u8 *pyld)
* f/w Version = driver version
*/
attr = (struct fdmi_attr_s *) curr_ptr;
- attr->type = bfa_os_htons(FDMI_HBA_ATTRIB_FW_VERSION);
+ attr->type = cpu_to_be16(FDMI_HBA_ATTRIB_FW_VERSION);
attr->len = (u16) strlen(fcs_hba_attr->driver_version);
memcpy(attr->value, fcs_hba_attr->driver_version, attr->len);
- attr->len = fc_roundup(attr->len, sizeof(u32)); /* variable
- *fields need
- *to be 4 byte
- *aligned */
+ attr->len = fc_roundup(attr->len, sizeof(u32));
curr_ptr += sizeof(attr->type) + sizeof(attr->len) + attr->len;
len += attr->len;
count++;
- attr->len =
- bfa_os_htons(attr->len + sizeof(attr->type) +
+ attr->len = cpu_to_be16(attr->len + sizeof(attr->type) +
sizeof(attr->len));
/*
@@ -1764,18 +1723,14 @@ bfa_fcs_lport_fdmi_build_rhba_pyld(struct bfa_fcs_lport_fdmi_s *fdmi, u8 *pyld)
*/
if (fcs_hba_attr->os_name[0] != '\0') {
attr = (struct fdmi_attr_s *) curr_ptr;
- attr->type = bfa_os_htons(FDMI_HBA_ATTRIB_OS_NAME);
+ attr->type = cpu_to_be16(FDMI_HBA_ATTRIB_OS_NAME);
attr->len = (u16) strlen(fcs_hba_attr->os_name);
memcpy(attr->value, fcs_hba_attr->os_name, attr->len);
- attr->len = fc_roundup(attr->len, sizeof(u32)); /* variable
- *fields need
- *to be 4 byte
- *aligned */
+ attr->len = fc_roundup(attr->len, sizeof(u32));
curr_ptr += sizeof(attr->type) + sizeof(attr->len) + attr->len;
len += attr->len;
count++;
- attr->len =
- bfa_os_htons(attr->len + sizeof(attr->type) +
+ attr->len = cpu_to_be16(attr->len + sizeof(attr->type) +
sizeof(attr->len));
}
@@ -1783,22 +1738,20 @@ bfa_fcs_lport_fdmi_build_rhba_pyld(struct bfa_fcs_lport_fdmi_s *fdmi, u8 *pyld)
* MAX_CT_PAYLOAD
*/
attr = (struct fdmi_attr_s *) curr_ptr;
- attr->type = bfa_os_htons(FDMI_HBA_ATTRIB_MAX_CT);
+ attr->type = cpu_to_be16(FDMI_HBA_ATTRIB_MAX_CT);
attr->len = sizeof(fcs_hba_attr->max_ct_pyld);
memcpy(attr->value, &fcs_hba_attr->max_ct_pyld, attr->len);
len += attr->len;
count++;
- attr->len =
- bfa_os_htons(attr->len + sizeof(attr->type) +
+ attr->len = cpu_to_be16(attr->len + sizeof(attr->type) +
sizeof(attr->len));
/*
* Update size of payload
*/
- len += ((sizeof(attr->type) +
- sizeof(attr->len)) * count);
+ len += ((sizeof(attr->type) + sizeof(attr->len)) * count);
- rhba->hba_attr_blk.attr_count = bfa_os_htonl(count);
+ rhba->hba_attr_blk.attr_count = cpu_to_be32(count);
return len;
}
@@ -1825,7 +1778,7 @@ bfa_fcs_lport_fdmi_rhba_response(void *fcsarg, struct bfa_fcxp_s *fcxp,
}
cthdr = (struct ct_hdr_s *) BFA_FCXP_RSP_PLD(fcxp);
- cthdr->cmd_rsp_code = bfa_os_ntohs(cthdr->cmd_rsp_code);
+ cthdr->cmd_rsp_code = be16_to_cpu(cthdr->cmd_rsp_code);
if (cthdr->cmd_rsp_code == CT_RSP_ACCEPT) {
bfa_sm_send_event(fdmi, FDMISM_EVENT_RSP_OK);
@@ -1837,7 +1790,7 @@ bfa_fcs_lport_fdmi_rhba_response(void *fcsarg, struct bfa_fcxp_s *fcxp,
bfa_sm_send_event(fdmi, FDMISM_EVENT_RSP_ERROR);
}
-/**
+/*
* RPRT : Register Port
*/
static void
@@ -1861,7 +1814,7 @@ bfa_fcs_lport_fdmi_send_rprt(void *fdmi_cbarg, struct bfa_fcxp_s *fcxp_alloced)
fdmi->fcxp = fcxp;
pyld = bfa_fcxp_get_reqbuf(fcxp);
- bfa_os_memset(pyld, 0, FC_MAX_PDUSZ);
+ memset(pyld, 0, FC_MAX_PDUSZ);
len = fc_fdmi_reqhdr_build(&fchs, pyld, bfa_fcs_lport_get_fcid(port),
FDMI_RPRT);
@@ -1879,7 +1832,7 @@ bfa_fcs_lport_fdmi_send_rprt(void *fdmi_cbarg, struct bfa_fcxp_s *fcxp_alloced)
bfa_sm_send_event(fdmi, FDMISM_EVENT_RPRT_SENT);
}
-/**
+/*
* This routine builds Port Attribute Block that used in RPA, RPRT commands.
*/
static u16
@@ -1909,56 +1862,54 @@ bfa_fcs_lport_fdmi_build_portattr_block(struct bfa_fcs_lport_fdmi_s *fdmi,
* FC4 Types
*/
attr = (struct fdmi_attr_s *) curr_ptr;
- attr->type = bfa_os_htons(FDMI_PORT_ATTRIB_FC4_TYPES);
+ attr->type = cpu_to_be16(FDMI_PORT_ATTRIB_FC4_TYPES);
attr->len = sizeof(fcs_port_attr.supp_fc4_types);
memcpy(attr->value, fcs_port_attr.supp_fc4_types, attr->len);
curr_ptr += sizeof(attr->type) + sizeof(attr->len) + attr->len;
len += attr->len;
++count;
attr->len =
- bfa_os_htons(attr->len + sizeof(attr->type) +
+ cpu_to_be16(attr->len + sizeof(attr->type) +
sizeof(attr->len));
/*
* Supported Speed
*/
attr = (struct fdmi_attr_s *) curr_ptr;
- attr->type = bfa_os_htons(FDMI_PORT_ATTRIB_SUPP_SPEED);
+ attr->type = cpu_to_be16(FDMI_PORT_ATTRIB_SUPP_SPEED);
attr->len = sizeof(fcs_port_attr.supp_speed);
memcpy(attr->value, &fcs_port_attr.supp_speed, attr->len);
curr_ptr += sizeof(attr->type) + sizeof(attr->len) + attr->len;
len += attr->len;
++count;
attr->len =
- bfa_os_htons(attr->len + sizeof(attr->type) +
+ cpu_to_be16(attr->len + sizeof(attr->type) +
sizeof(attr->len));
/*
* current Port Speed
*/
attr = (struct fdmi_attr_s *) curr_ptr;
- attr->type = bfa_os_htons(FDMI_PORT_ATTRIB_PORT_SPEED);
+ attr->type = cpu_to_be16(FDMI_PORT_ATTRIB_PORT_SPEED);
attr->len = sizeof(fcs_port_attr.curr_speed);
memcpy(attr->value, &fcs_port_attr.curr_speed, attr->len);
curr_ptr += sizeof(attr->type) + sizeof(attr->len) + attr->len;
len += attr->len;
++count;
- attr->len =
- bfa_os_htons(attr->len + sizeof(attr->type) +
+ attr->len = cpu_to_be16(attr->len + sizeof(attr->type) +
sizeof(attr->len));
/*
* max frame size
*/
attr = (struct fdmi_attr_s *) curr_ptr;
- attr->type = bfa_os_htons(FDMI_PORT_ATTRIB_FRAME_SIZE);
+ attr->type = cpu_to_be16(FDMI_PORT_ATTRIB_FRAME_SIZE);
attr->len = sizeof(fcs_port_attr.max_frm_size);
memcpy(attr->value, &fcs_port_attr.max_frm_size, attr->len);
curr_ptr += sizeof(attr->type) + sizeof(attr->len) + attr->len;
len += attr->len;
++count;
- attr->len =
- bfa_os_htons(attr->len + sizeof(attr->type) +
+ attr->len = cpu_to_be16(attr->len + sizeof(attr->type) +
sizeof(attr->len));
/*
@@ -1966,18 +1917,14 @@ bfa_fcs_lport_fdmi_build_portattr_block(struct bfa_fcs_lport_fdmi_s *fdmi,
*/
if (fcs_port_attr.os_device_name[0] != '\0') {
attr = (struct fdmi_attr_s *) curr_ptr;
- attr->type = bfa_os_htons(FDMI_PORT_ATTRIB_DEV_NAME);
+ attr->type = cpu_to_be16(FDMI_PORT_ATTRIB_DEV_NAME);
attr->len = (u16) strlen(fcs_port_attr.os_device_name);
memcpy(attr->value, fcs_port_attr.os_device_name, attr->len);
- attr->len = fc_roundup(attr->len, sizeof(u32)); /* variable
- *fields need
- *to be 4 byte
- *aligned */
+ attr->len = fc_roundup(attr->len, sizeof(u32));
curr_ptr += sizeof(attr->type) + sizeof(attr->len) + attr->len;
len += attr->len;
++count;
- attr->len =
- bfa_os_htons(attr->len + sizeof(attr->type) +
+ attr->len = cpu_to_be16(attr->len + sizeof(attr->type) +
sizeof(attr->len));
}
/*
@@ -1985,27 +1932,22 @@ bfa_fcs_lport_fdmi_build_portattr_block(struct bfa_fcs_lport_fdmi_s *fdmi,
*/
if (fcs_port_attr.host_name[0] != '\0') {
attr = (struct fdmi_attr_s *) curr_ptr;
- attr->type = bfa_os_htons(FDMI_PORT_ATTRIB_HOST_NAME);
+ attr->type = cpu_to_be16(FDMI_PORT_ATTRIB_HOST_NAME);
attr->len = (u16) strlen(fcs_port_attr.host_name);
memcpy(attr->value, fcs_port_attr.host_name, attr->len);
- attr->len = fc_roundup(attr->len, sizeof(u32)); /* variable
- *fields need
- *to be 4 byte
- *aligned */
+ attr->len = fc_roundup(attr->len, sizeof(u32));
curr_ptr += sizeof(attr->type) + sizeof(attr->len) + attr->len;
len += attr->len;
++count;
- attr->len =
- bfa_os_htons(attr->len + sizeof(attr->type) +
+ attr->len = cpu_to_be16(attr->len + sizeof(attr->type) +
sizeof(attr->len));
}
/*
* Update size of payload
*/
- port_attrib->attr_count = bfa_os_htonl(count);
- len += ((sizeof(attr->type) +
- sizeof(attr->len)) * count);
+ port_attrib->attr_count = cpu_to_be32(count);
+ len += ((sizeof(attr->type) + sizeof(attr->len)) * count);
return len;
}
@@ -2050,7 +1992,7 @@ bfa_fcs_lport_fdmi_rprt_response(void *fcsarg, struct bfa_fcxp_s *fcxp,
}
cthdr = (struct ct_hdr_s *) BFA_FCXP_RSP_PLD(fcxp);
- cthdr->cmd_rsp_code = bfa_os_ntohs(cthdr->cmd_rsp_code);
+ cthdr->cmd_rsp_code = be16_to_cpu(cthdr->cmd_rsp_code);
if (cthdr->cmd_rsp_code == CT_RSP_ACCEPT) {
bfa_sm_send_event(fdmi, FDMISM_EVENT_RSP_OK);
@@ -2062,7 +2004,7 @@ bfa_fcs_lport_fdmi_rprt_response(void *fcsarg, struct bfa_fcxp_s *fcxp,
bfa_sm_send_event(fdmi, FDMISM_EVENT_RSP_ERROR);
}
-/**
+/*
* RPA : Register Port Attributes.
*/
static void
@@ -2086,15 +2028,13 @@ bfa_fcs_lport_fdmi_send_rpa(void *fdmi_cbarg, struct bfa_fcxp_s *fcxp_alloced)
fdmi->fcxp = fcxp;
pyld = bfa_fcxp_get_reqbuf(fcxp);
- bfa_os_memset(pyld, 0, FC_MAX_PDUSZ);
+ memset(pyld, 0, FC_MAX_PDUSZ);
len = fc_fdmi_reqhdr_build(&fchs, pyld, bfa_fcs_lport_get_fcid(port),
FDMI_RPA);
- attr_len =
- bfa_fcs_lport_fdmi_build_rpa_pyld(fdmi,
- (u8 *) ((struct ct_hdr_s *) pyld
- + 1));
+ attr_len = bfa_fcs_lport_fdmi_build_rpa_pyld(fdmi,
+ (u8 *) ((struct ct_hdr_s *) pyld + 1));
bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE,
FC_CLASS_3, len + attr_len, &fchs,
@@ -2143,7 +2083,7 @@ bfa_fcs_lport_fdmi_rpa_response(void *fcsarg, struct bfa_fcxp_s *fcxp,
}
cthdr = (struct ct_hdr_s *) BFA_FCXP_RSP_PLD(fcxp);
- cthdr->cmd_rsp_code = bfa_os_ntohs(cthdr->cmd_rsp_code);
+ cthdr->cmd_rsp_code = be16_to_cpu(cthdr->cmd_rsp_code);
if (cthdr->cmd_rsp_code == CT_RSP_ACCEPT) {
bfa_sm_send_event(fdmi, FDMISM_EVENT_RSP_OK);
@@ -2170,7 +2110,7 @@ bfa_fcs_fdmi_get_hbaattr(struct bfa_fcs_lport_fdmi_s *fdmi,
struct bfa_fcs_lport_s *port = fdmi->ms->port;
struct bfa_fcs_driver_info_s *driver_info = &port->fcs->driver_info;
- bfa_os_memset(hba_attr, 0, sizeof(struct bfa_fcs_fdmi_hba_attr_s));
+ memset(hba_attr, 0, sizeof(struct bfa_fcs_fdmi_hba_attr_s));
bfa_ioc_get_adapter_manufacturer(&port->fcs->bfa->ioc,
hba_attr->manufacturer);
@@ -2204,7 +2144,7 @@ bfa_fcs_fdmi_get_hbaattr(struct bfa_fcs_lport_fdmi_s *fdmi,
sizeof(driver_info->host_os_patch));
}
- hba_attr->max_ct_pyld = bfa_os_htonl(FC_MAX_PDUSZ);
+ hba_attr->max_ct_pyld = cpu_to_be32(FC_MAX_PDUSZ);
}
void
@@ -2215,7 +2155,7 @@ bfa_fcs_fdmi_get_portattr(struct bfa_fcs_lport_fdmi_s *fdmi,
struct bfa_fcs_driver_info_s *driver_info = &port->fcs->driver_info;
struct bfa_port_attr_s pport_attr;
- bfa_os_memset(port_attr, 0, sizeof(struct bfa_fcs_fdmi_port_attr_s));
+ memset(port_attr, 0, sizeof(struct bfa_fcs_fdmi_port_attr_s));
/*
* get pport attributes from hal
@@ -2230,17 +2170,17 @@ bfa_fcs_fdmi_get_portattr(struct bfa_fcs_lport_fdmi_s *fdmi,
/*
* Supported Speeds
*/
- port_attr->supp_speed = bfa_os_htonl(BFA_FCS_FDMI_SUPORTED_SPEEDS);
+ port_attr->supp_speed = cpu_to_be32(BFA_FCS_FDMI_SUPORTED_SPEEDS);
/*
* Current Speed
*/
- port_attr->curr_speed = bfa_os_htonl(pport_attr.speed);
+ port_attr->curr_speed = cpu_to_be32(pport_attr.speed);
/*
* Max PDU Size.
*/
- port_attr->max_frm_size = bfa_os_htonl(FC_MAX_PDUSZ);
+ port_attr->max_frm_size = cpu_to_be32(FC_MAX_PDUSZ);
/*
* OS device Name
@@ -2321,11 +2261,11 @@ static void bfa_fcs_lport_ms_gfn_response(void *fcsarg,
u32 rsp_len,
u32 resid_len,
struct fchs_s *rsp_fchs);
-/**
+/*
* fcs_ms_sm FCS MS state machine
*/
-/**
+/*
* MS State Machine events
*/
enum port_ms_event {
@@ -2360,7 +2300,7 @@ static void bfa_fcs_lport_ms_sm_gfn_retry(struct bfa_fcs_lport_ms_s *ms,
enum port_ms_event event);
static void bfa_fcs_lport_ms_sm_online(struct bfa_fcs_lport_ms_s *ms,
enum port_ms_event event);
-/**
+/*
* Start in offline state - awaiting NS to send start.
*/
static void
@@ -2432,7 +2372,7 @@ bfa_fcs_lport_ms_sm_plogi(struct bfa_fcs_lport_ms_s *ms,
*/
bfa_fcs_lport_fdmi_online(ms);
- /**
+ /*
* if this is a Vport, go to online state.
*/
if (ms->port->vport) {
@@ -2595,7 +2535,7 @@ bfa_fcs_lport_ms_sm_gmal_retry(struct bfa_fcs_lport_ms_s *ms,
bfa_sm_fault(ms->port->fcs, event);
}
}
-/**
+/*
* ms_pvt MS local functions
*/
@@ -2657,12 +2597,12 @@ bfa_fcs_lport_ms_gmal_response(void *fcsarg, struct bfa_fcxp_s *fcxp,
}
cthdr = (struct ct_hdr_s *) BFA_FCXP_RSP_PLD(fcxp);
- cthdr->cmd_rsp_code = bfa_os_ntohs(cthdr->cmd_rsp_code);
+ cthdr->cmd_rsp_code = be16_to_cpu(cthdr->cmd_rsp_code);
if (cthdr->cmd_rsp_code == CT_RSP_ACCEPT) {
gmal_resp = (struct fcgs_gmal_resp_s *)(cthdr + 1);
- num_entries = bfa_os_ntohl(gmal_resp->ms_len);
+ num_entries = be32_to_cpu(gmal_resp->ms_len);
if (num_entries == 0) {
bfa_sm_send_event(ms, MSSM_EVENT_RSP_ERROR);
return;
@@ -2795,7 +2735,7 @@ bfa_fcs_lport_ms_sm_gfn_retry(struct bfa_fcs_lport_ms_s *ms,
bfa_sm_fault(ms->port->fcs, event);
}
}
-/**
+/*
* ms_pvt MS local functions
*/
@@ -2853,7 +2793,7 @@ bfa_fcs_lport_ms_gfn_response(void *fcsarg, struct bfa_fcxp_s *fcxp,
}
cthdr = (struct ct_hdr_s *) BFA_FCXP_RSP_PLD(fcxp);
- cthdr->cmd_rsp_code = bfa_os_ntohs(cthdr->cmd_rsp_code);
+ cthdr->cmd_rsp_code = be16_to_cpu(cthdr->cmd_rsp_code);
if (cthdr->cmd_rsp_code == CT_RSP_ACCEPT) {
gfn_resp = (wwn_t *)(cthdr + 1);
@@ -2871,7 +2811,7 @@ bfa_fcs_lport_ms_gfn_response(void *fcsarg, struct bfa_fcxp_s *fcxp,
bfa_sm_send_event(ms, MSSM_EVENT_RSP_ERROR);
}
-/**
+/*
* ms_pvt MS local functions
*/
@@ -3017,7 +2957,7 @@ bfa_fcs_lport_ms_fabric_rscn(struct bfa_fcs_lport_s *port)
bfa_sm_send_event(ms, MSSM_EVENT_PORT_FABRIC_RSCN);
}
-/**
+/*
* @page ns_sm_info VPORT NS State Machine
*
* @section ns_sm_interactions VPORT NS State Machine Interactions
@@ -3080,11 +3020,11 @@ static void bfa_fcs_lport_ns_process_gidft_pids(
u32 *pid_buf, u32 n_pids);
static void bfa_fcs_lport_ns_boot_target_disc(bfa_fcs_lport_t *port);
-/**
+/*
* fcs_ns_sm FCS nameserver interface state machine
*/
-/**
+/*
* VPort NS State Machine events
*/
enum vport_ns_event {
@@ -3139,7 +3079,7 @@ static void bfa_fcs_lport_ns_sm_gid_ft_retry(struct bfa_fcs_lport_ns_s *ns,
enum vport_ns_event event);
static void bfa_fcs_lport_ns_sm_online(struct bfa_fcs_lport_ns_s *ns,
enum vport_ns_event event);
-/**
+/*
* Start in offline state - awaiting linkup
*/
static void
@@ -3628,7 +3568,7 @@ bfa_fcs_lport_ns_sm_online(struct bfa_fcs_lport_ns_s *ns,
-/**
+/*
* ns_pvt Nameserver local functions
*/
@@ -3724,7 +3664,7 @@ bfa_fcs_lport_ns_plogi_response(void *fcsarg, struct bfa_fcxp_s *fcxp,
}
}
-/**
+/*
* Register the symbolic port name.
*/
static void
@@ -3738,7 +3678,7 @@ bfa_fcs_lport_ns_send_rspn_id(void *ns_cbarg, struct bfa_fcxp_s *fcxp_alloced)
u8 symbl[256];
u8 *psymbl = &symbl[0];
- bfa_os_memset(symbl, 0, sizeof(symbl));
+ memset(symbl, 0, sizeof(symbl));
bfa_trc(port->fcs, port->port_cfg.pwwn);
@@ -3755,7 +3695,7 @@ bfa_fcs_lport_ns_send_rspn_id(void *ns_cbarg, struct bfa_fcxp_s *fcxp_alloced)
* for V-Port, form a Port Symbolic Name
*/
if (port->vport) {
- /**
+ /*
* For Vports, we append the vport's port symbolic name
* to that of the base port.
*/
@@ -3815,7 +3755,7 @@ bfa_fcs_lport_ns_rspn_id_response(void *fcsarg, struct bfa_fcxp_s *fcxp,
}
cthdr = (struct ct_hdr_s *) BFA_FCXP_RSP_PLD(fcxp);
- cthdr->cmd_rsp_code = bfa_os_ntohs(cthdr->cmd_rsp_code);
+ cthdr->cmd_rsp_code = be16_to_cpu(cthdr->cmd_rsp_code);
if (cthdr->cmd_rsp_code == CT_RSP_ACCEPT) {
port->stats.ns_rspnid_accepts++;
@@ -3829,7 +3769,7 @@ bfa_fcs_lport_ns_rspn_id_response(void *fcsarg, struct bfa_fcxp_s *fcxp,
bfa_sm_send_event(ns, NSSM_EVENT_RSP_ERROR);
}
-/**
+/*
* Register FC4-Types
*/
static void
@@ -3887,7 +3827,7 @@ bfa_fcs_lport_ns_rft_id_response(void *fcsarg, struct bfa_fcxp_s *fcxp,
}
cthdr = (struct ct_hdr_s *) BFA_FCXP_RSP_PLD(fcxp);
- cthdr->cmd_rsp_code = bfa_os_ntohs(cthdr->cmd_rsp_code);
+ cthdr->cmd_rsp_code = be16_to_cpu(cthdr->cmd_rsp_code);
if (cthdr->cmd_rsp_code == CT_RSP_ACCEPT) {
port->stats.ns_rftid_accepts++;
@@ -3901,7 +3841,7 @@ bfa_fcs_lport_ns_rft_id_response(void *fcsarg, struct bfa_fcxp_s *fcxp,
bfa_sm_send_event(ns, NSSM_EVENT_RSP_ERROR);
}
-/**
+/*
* Register FC4-Features : Should be done after RFT_ID
*/
static void
@@ -3964,7 +3904,7 @@ bfa_fcs_lport_ns_rff_id_response(void *fcsarg, struct bfa_fcxp_s *fcxp,
}
cthdr = (struct ct_hdr_s *) BFA_FCXP_RSP_PLD(fcxp);
- cthdr->cmd_rsp_code = bfa_os_ntohs(cthdr->cmd_rsp_code);
+ cthdr->cmd_rsp_code = be16_to_cpu(cthdr->cmd_rsp_code);
if (cthdr->cmd_rsp_code == CT_RSP_ACCEPT) {
port->stats.ns_rffid_accepts++;
@@ -3982,7 +3922,7 @@ bfa_fcs_lport_ns_rff_id_response(void *fcsarg, struct bfa_fcxp_s *fcxp,
} else
bfa_sm_send_event(ns, NSSM_EVENT_RSP_ERROR);
}
-/**
+/*
* Query Fabric for FC4-Types Devices.
*
* TBD : Need to use a local (FCS private) response buffer, since the response
@@ -4058,7 +3998,7 @@ bfa_fcs_lport_ns_gid_ft_response(void *fcsarg, struct bfa_fcxp_s *fcxp,
}
cthdr = (struct ct_hdr_s *) BFA_FCXP_RSP_PLD(fcxp);
- cthdr->cmd_rsp_code = bfa_os_ntohs(cthdr->cmd_rsp_code);
+ cthdr->cmd_rsp_code = be16_to_cpu(cthdr->cmd_rsp_code);
switch (cthdr->cmd_rsp_code) {
@@ -4102,7 +4042,7 @@ bfa_fcs_lport_ns_gid_ft_response(void *fcsarg, struct bfa_fcxp_s *fcxp,
}
}
-/**
+/*
* This routine will be called by bfa_timer on timer timeouts.
*
* param[in] port - pointer to bfa_fcs_lport_t.
@@ -4166,7 +4106,7 @@ bfa_fcs_lport_ns_process_gidft_pids(struct bfa_fcs_lport_s *port, u32 *pid_buf,
}
}
-/**
+/*
* fcs_ns_public FCS nameserver public interfaces
*/
@@ -4227,7 +4167,7 @@ bfa_fcs_lport_ns_boot_target_disc(bfa_fcs_lport_t *port)
}
}
-/**
+/*
* FCS SCN
*/
@@ -4250,11 +4190,11 @@ static void bfa_fcs_lport_scn_send_ls_acc(struct bfa_fcs_lport_s *port,
struct fchs_s *rx_fchs);
static void bfa_fcs_lport_scn_timeout(void *arg);
-/**
+/*
* fcs_scm_sm FCS SCN state machine
*/
-/**
+/*
* VPort SCN State Machine events
*/
enum port_scn_event {
@@ -4278,7 +4218,7 @@ static void bfa_fcs_lport_scn_sm_scr_retry(struct bfa_fcs_lport_scn_s *scn,
static void bfa_fcs_lport_scn_sm_online(struct bfa_fcs_lport_scn_s *scn,
enum port_scn_event event);
-/**
+/*
* Starting state - awaiting link up.
*/
static void
@@ -4382,11 +4322,11 @@ bfa_fcs_lport_scn_sm_online(struct bfa_fcs_lport_scn_s *scn,
-/**
+/*
* fcs_scn_private FCS SCN private functions
*/
-/**
+/*
* This routine will be called to send a SCR command.
*/
static void
@@ -4499,7 +4439,7 @@ bfa_fcs_lport_scn_send_ls_acc(struct bfa_fcs_lport_s *port,
FC_MAX_PDUSZ, 0);
}
-/**
+/*
* This routine will be called by bfa_timer on timer timeouts.
*
* param[in] vport - pointer to bfa_fcs_lport_t.
@@ -4522,7 +4462,7 @@ bfa_fcs_lport_scn_timeout(void *arg)
-/**
+/*
* fcs_scn_public FCS state change notification public interfaces
*/
@@ -4563,7 +4503,7 @@ bfa_fcs_lport_scn_portid_rscn(struct bfa_fcs_lport_s *port, u32 rpid)
bfa_trc(port->fcs, rpid);
- /**
+ /*
* If this is an unknown device, then it just came online.
* Otherwise let rport handle the RSCN event.
*/
@@ -4579,7 +4519,7 @@ bfa_fcs_lport_scn_portid_rscn(struct bfa_fcs_lport_s *port, u32 rpid)
bfa_fcs_rport_scn(rport);
}
-/**
+/*
* rscn format based PID comparison
*/
#define __fc_pid_match(__c0, __c1, __fmt) \
@@ -4624,7 +4564,7 @@ bfa_fcs_lport_scn_process_rscn(struct bfa_fcs_lport_s *port,
int i = 0, j;
num_entries =
- (bfa_os_ntohs(rscn->payldlen) -
+ (be16_to_cpu(rscn->payldlen) -
sizeof(u32)) / sizeof(rscn->event[0]);
bfa_trc(port->fcs, num_entries);
@@ -4691,18 +4631,18 @@ bfa_fcs_lport_scn_process_rscn(struct bfa_fcs_lport_s *port,
}
}
- /**
- * If any of area, domain or fabric RSCN is received, do a fresh discovery
- * to find new devices.
+ /*
+ * If any of area, domain or fabric RSCN is received, do a fresh
+ * discovery to find new devices.
*/
if (nsquery)
bfa_fcs_lport_ns_query(port);
}
-/**
+/*
* BFA FCS port
*/
-/**
+/*
* fcs_port_api BFA FCS port API
*/
struct bfa_fcs_lport_s *
@@ -4943,10 +4883,10 @@ bfa_fcs_lport_get_stats(struct bfa_fcs_lport_s *fcs_port,
void
bfa_fcs_lport_clear_stats(struct bfa_fcs_lport_s *fcs_port)
{
- bfa_os_memset(&fcs_port->stats, 0, sizeof(struct bfa_lport_stats_s));
+ memset(&fcs_port->stats, 0, sizeof(struct bfa_lport_stats_s));
}
-/**
+/*
* FCS virtual port state machine
*/
@@ -4967,11 +4907,11 @@ static void bfa_fcs_vport_timeout(void *vport_arg);
static void bfa_fcs_vport_do_logo(struct bfa_fcs_vport_s *vport);
static void bfa_fcs_vport_free(struct bfa_fcs_vport_s *vport);
-/**
+/*
* fcs_vport_sm FCS virtual port state machine
*/
-/**
+/*
* VPort State Machine events
*/
enum bfa_fcs_vport_event {
@@ -5024,7 +4964,7 @@ static struct bfa_sm_table_s vport_sm_table[] = {
{BFA_SM(bfa_fcs_vport_sm_error), BFA_FCS_VPORT_ERROR}
};
-/**
+/*
* Beginning state.
*/
static void
@@ -5045,7 +4985,7 @@ bfa_fcs_vport_sm_uninit(struct bfa_fcs_vport_s *vport,
}
}
-/**
+/*
* Created state - a start event is required to start up the state machine.
*/
static void
@@ -5062,7 +5002,7 @@ bfa_fcs_vport_sm_created(struct bfa_fcs_vport_s *vport,
bfa_sm_set_state(vport, bfa_fcs_vport_sm_fdisc);
bfa_fcs_vport_do_fdisc(vport);
} else {
- /**
+ /*
* Fabric is offline or not NPIV capable, stay in
* offline state.
*/
@@ -5078,7 +5018,7 @@ bfa_fcs_vport_sm_created(struct bfa_fcs_vport_s *vport,
case BFA_FCS_VPORT_SM_ONLINE:
case BFA_FCS_VPORT_SM_OFFLINE:
- /**
+ /*
* Ignore ONLINE/OFFLINE events from fabric
* till vport is started.
*/
@@ -5089,7 +5029,7 @@ bfa_fcs_vport_sm_created(struct bfa_fcs_vport_s *vport,
}
}
-/**
+/*
* Offline state - awaiting ONLINE event from fabric SM.
*/
static void
@@ -5127,7 +5067,7 @@ bfa_fcs_vport_sm_offline(struct bfa_fcs_vport_s *vport,
}
-/**
+/*
* FDISC is sent and awaiting reply from fabric.
*/
static void
@@ -5174,7 +5114,7 @@ bfa_fcs_vport_sm_fdisc(struct bfa_fcs_vport_s *vport,
}
}
-/**
+/*
* FDISC attempt failed - a timer is active to retry FDISC.
*/
static void
@@ -5208,7 +5148,7 @@ bfa_fcs_vport_sm_fdisc_retry(struct bfa_fcs_vport_s *vport,
}
}
-/**
+/*
* Vport is online (FDISC is complete).
*/
static void
@@ -5235,7 +5175,7 @@ bfa_fcs_vport_sm_online(struct bfa_fcs_vport_s *vport,
}
}
-/**
+/*
* Vport is being deleted - awaiting lport delete completion to send
* LOGO to fabric.
*/
@@ -5264,7 +5204,7 @@ bfa_fcs_vport_sm_deleting(struct bfa_fcs_vport_s *vport,
}
}
-/**
+/*
* Error State.
* This state will be set when the Vport Creation fails due
* to errors like Dup WWN. In this state only operation allowed
@@ -5288,7 +5228,7 @@ bfa_fcs_vport_sm_error(struct bfa_fcs_vport_s *vport,
}
}
-/**
+/*
* Lport cleanup is in progress since vport is being deleted. Fabric is
* offline, so no LOGO is needed to complete vport deletion.
*/
@@ -5313,7 +5253,7 @@ bfa_fcs_vport_sm_cleanup(struct bfa_fcs_vport_s *vport,
}
}
-/**
+/*
* LOGO is sent to fabric. Vport delete is in progress. Lport delete cleanup
* is done.
*/
@@ -5347,10 +5287,10 @@ bfa_fcs_vport_sm_logo(struct bfa_fcs_vport_s *vport,
-/**
+/*
* fcs_vport_private FCS virtual port private functions
*/
-/**
+/*
* This routine will be called to send a FDISC command.
*/
static void
@@ -5397,7 +5337,7 @@ bfa_fcs_vport_fdisc_rejected(struct bfa_fcs_vport_s *vport)
}
}
-/**
+/*
* Called to send a logout to the fabric. Used when a V-Port is
* deleted/stopped.
*/
@@ -5411,7 +5351,7 @@ bfa_fcs_vport_do_logo(struct bfa_fcs_vport_s *vport)
}
-/**
+/*
* This routine will be called by bfa_timer on timer timeouts.
*
* param[in] vport - pointer to bfa_fcs_vport_t.
@@ -5449,11 +5389,11 @@ bfa_fcs_vport_free(struct bfa_fcs_vport_s *vport)
-/**
+/*
* fcs_vport_public FCS virtual port public interfaces
*/
-/**
+/*
* Online notification from fabric SM.
*/
void
@@ -5463,7 +5403,7 @@ bfa_fcs_vport_online(struct bfa_fcs_vport_s *vport)
bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_ONLINE);
}
-/**
+/*
* Offline notification from fabric SM.
*/
void
@@ -5473,7 +5413,7 @@ bfa_fcs_vport_offline(struct bfa_fcs_vport_s *vport)
bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_OFFLINE);
}
-/**
+/*
* Cleanup notification from fabric SM on link timer expiry.
*/
void
@@ -5481,7 +5421,7 @@ bfa_fcs_vport_cleanup(struct bfa_fcs_vport_s *vport)
{
vport->vport_stats.fab_cleanup++;
}
-/**
+/*
* delete notification from fabric SM. To be invoked from within FCS.
*/
void
@@ -5490,7 +5430,7 @@ bfa_fcs_vport_fcs_delete(struct bfa_fcs_vport_s *vport)
bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_DELETE);
}
-/**
+/*
* Delete completion callback from associated lport
*/
void
@@ -5501,11 +5441,11 @@ bfa_fcs_vport_delete_comp(struct bfa_fcs_vport_s *vport)
-/**
+/*
* fcs_vport_api Virtual port API
*/
-/**
+/*
* Use this function to instantiate a new FCS vport object. This
* function will not trigger any HW initialization process (which will be
* done in vport_start() call)
@@ -5555,7 +5495,7 @@ bfa_fcs_vport_create(struct bfa_fcs_vport_s *vport, struct bfa_fcs_s *fcs,
return BFA_STATUS_OK;
}
-/**
+/*
* Use this function to instantiate a new FCS PBC vport object. This
* function will not trigger any HW initialization process (which will be
* done in vport_start() call)
@@ -5585,7 +5525,7 @@ bfa_fcs_pbc_vport_create(struct bfa_fcs_vport_s *vport, struct bfa_fcs_s *fcs,
return rc;
}
-/**
+/*
* Use this function to findout if this is a pbc vport or not.
*
* @param[in] vport - pointer to bfa_fcs_vport_t.
@@ -5603,7 +5543,7 @@ bfa_fcs_is_pbc_vport(struct bfa_fcs_vport_s *vport)
}
-/**
+/*
* Use this function initialize the vport.
*
* @param[in] vport - pointer to bfa_fcs_vport_t.
@@ -5618,7 +5558,7 @@ bfa_fcs_vport_start(struct bfa_fcs_vport_s *vport)
return BFA_STATUS_OK;
}
-/**
+/*
* Use this function quiese the vport object. This function will return
* immediately, when the vport is actually stopped, the
* bfa_drv_vport_stop_cb() will be called.
@@ -5635,7 +5575,7 @@ bfa_fcs_vport_stop(struct bfa_fcs_vport_s *vport)
return BFA_STATUS_OK;
}
-/**
+/*
* Use this function to delete a vport object. Fabric object should
* be stopped before this function call.
*
@@ -5657,7 +5597,7 @@ bfa_fcs_vport_delete(struct bfa_fcs_vport_s *vport)
return BFA_STATUS_OK;
}
-/**
+/*
* Use this function to get vport's current status info.
*
* param[in] vport pointer to bfa_fcs_vport_t.
@@ -5672,13 +5612,13 @@ bfa_fcs_vport_get_attr(struct bfa_fcs_vport_s *vport,
if (vport == NULL || attr == NULL)
return;
- bfa_os_memset(attr, 0, sizeof(struct bfa_vport_attr_s));
+ memset(attr, 0, sizeof(struct bfa_vport_attr_s));
bfa_fcs_lport_get_attr(&vport->lport, &attr->port_attr);
attr->vport_state = bfa_sm_to_state(vport_sm_table, vport->sm);
}
-/**
+/*
* Use this function to get vport's statistics.
*
* param[in] vport pointer to bfa_fcs_vport_t.
@@ -5693,7 +5633,7 @@ bfa_fcs_vport_get_stats(struct bfa_fcs_vport_s *vport,
*stats = vport->vport_stats;
}
-/**
+/*
* Use this function to clear vport's statistics.
*
* param[in] vport pointer to bfa_fcs_vport_t.
@@ -5703,10 +5643,10 @@ bfa_fcs_vport_get_stats(struct bfa_fcs_vport_s *vport,
void
bfa_fcs_vport_clr_stats(struct bfa_fcs_vport_s *vport)
{
- bfa_os_memset(&vport->vport_stats, 0, sizeof(struct bfa_vport_stats_s));
+ memset(&vport->vport_stats, 0, sizeof(struct bfa_vport_stats_s));
}
-/**
+/*
* Lookup a virtual port. Excludes base port from lookup.
*/
struct bfa_fcs_vport_s *
@@ -5728,7 +5668,7 @@ bfa_fcs_vport_lookup(struct bfa_fcs_s *fcs, u16 vf_id, wwn_t vpwwn)
return vport;
}
-/**
+/*
* FDISC Response
*/
void
@@ -5784,7 +5724,7 @@ bfa_cb_lps_fdisc_comp(void *bfad, void *uarg, bfa_status_t status)
}
}
-/**
+/*
* LOGO response
*/
void
@@ -5794,7 +5734,7 @@ bfa_cb_lps_fdisclogo_comp(void *bfad, void *uarg)
bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_RSP_OK);
}
-/**
+/*
* Received clear virtual link
*/
void
diff --git a/drivers/scsi/bfa/bfa_fcs_rport.c b/drivers/scsi/bfa/bfa_fcs_rport.c
index 635f0cd88714..47f35c0ef29a 100644
--- a/drivers/scsi/bfa/bfa_fcs_rport.c
+++ b/drivers/scsi/bfa/bfa_fcs_rport.c
@@ -15,7 +15,7 @@
* General Public License for more details.
*/
-/**
+/*
* rport.c Remote port implementation.
*/
@@ -75,7 +75,7 @@ static void bfa_fcs_rport_send_ls_rjt(struct bfa_fcs_rport_s *rport,
static void bfa_fcs_rport_process_adisc(struct bfa_fcs_rport_s *rport,
struct fchs_s *rx_fchs, u16 len);
static void bfa_fcs_rport_send_prlo_acc(struct bfa_fcs_rport_s *rport);
-/**
+/*
* fcs_rport_sm FCS rport state machine events
*/
@@ -172,7 +172,7 @@ static struct bfa_sm_table_s rport_sm_table[] = {
{BFA_SM(bfa_fcs_rport_sm_nsdisc_sent), BFA_RPORT_NSDISC},
};
-/**
+/*
* Beginning state.
*/
static void
@@ -210,7 +210,7 @@ bfa_fcs_rport_sm_uninit(struct bfa_fcs_rport_s *rport, enum rport_event event)
}
}
-/**
+/*
* PLOGI is being sent.
*/
static void
@@ -262,7 +262,7 @@ bfa_fcs_rport_sm_plogi_sending(struct bfa_fcs_rport_s *rport,
}
}
-/**
+/*
* PLOGI is being sent.
*/
static void
@@ -287,7 +287,7 @@ bfa_fcs_rport_sm_plogiacc_sending(struct bfa_fcs_rport_s *rport,
case RPSM_EVENT_PLOGI_RCVD:
case RPSM_EVENT_SCN:
- /**
+ /*
* Ignore, SCN is possibly online notification.
*/
break;
@@ -309,7 +309,7 @@ bfa_fcs_rport_sm_plogiacc_sending(struct bfa_fcs_rport_s *rport,
break;
case RPSM_EVENT_HCB_OFFLINE:
- /**
+ /*
* Ignore BFA callback, on a PLOGI receive we call bfa offline.
*/
break;
@@ -319,7 +319,7 @@ bfa_fcs_rport_sm_plogiacc_sending(struct bfa_fcs_rport_s *rport,
}
}
-/**
+/*
* PLOGI is sent.
*/
static void
@@ -380,7 +380,7 @@ bfa_fcs_rport_sm_plogi_retry(struct bfa_fcs_rport_s *rport,
}
}
-/**
+/*
* PLOGI is sent.
*/
static void
@@ -475,7 +475,7 @@ bfa_fcs_rport_sm_plogi(struct bfa_fcs_rport_s *rport, enum rport_event event)
}
}
-/**
+/*
* PLOGI is complete. Awaiting BFA rport online callback. FC-4s
* are offline.
*/
@@ -519,7 +519,7 @@ bfa_fcs_rport_sm_hal_online(struct bfa_fcs_rport_s *rport,
break;
case RPSM_EVENT_SCN:
- /**
+ /*
* @todo
* Ignore SCN - PLOGI just completed, FC-4 login should detect
* device failures.
@@ -531,7 +531,7 @@ bfa_fcs_rport_sm_hal_online(struct bfa_fcs_rport_s *rport,
}
}
-/**
+/*
* Rport is ONLINE. FC-4s active.
*/
static void
@@ -580,7 +580,7 @@ bfa_fcs_rport_sm_online(struct bfa_fcs_rport_s *rport, enum rport_event event)
}
}
-/**
+/*
* An SCN event is received in ONLINE state. NS query is being sent
* prior to ADISC authentication with rport. FC-4s are paused.
*/
@@ -604,7 +604,7 @@ bfa_fcs_rport_sm_nsquery_sending(struct bfa_fcs_rport_s *rport,
break;
case RPSM_EVENT_SCN:
- /**
+ /*
* ignore SCN, wait for response to query itself
*/
break;
@@ -638,7 +638,7 @@ bfa_fcs_rport_sm_nsquery_sending(struct bfa_fcs_rport_s *rport,
}
}
-/**
+/*
* An SCN event is received in ONLINE state. NS query is sent to rport.
* FC-4s are paused.
*/
@@ -697,7 +697,7 @@ bfa_fcs_rport_sm_nsquery(struct bfa_fcs_rport_s *rport, enum rport_event event)
}
}
-/**
+/*
* An SCN event is received in ONLINE state. ADISC is being sent for
* authenticating with rport. FC-4s are paused.
*/
@@ -748,7 +748,7 @@ bfa_fcs_rport_sm_adisc_sending(struct bfa_fcs_rport_s *rport,
}
}
-/**
+/*
* An SCN event is received in ONLINE state. ADISC is to rport.
* FC-4s are paused.
*/
@@ -765,7 +765,7 @@ bfa_fcs_rport_sm_adisc(struct bfa_fcs_rport_s *rport, enum rport_event event)
break;
case RPSM_EVENT_PLOGI_RCVD:
- /**
+ /*
* Too complex to cleanup FC-4 & rport and then acc to PLOGI.
* At least go offline when a PLOGI is received.
*/
@@ -787,7 +787,7 @@ bfa_fcs_rport_sm_adisc(struct bfa_fcs_rport_s *rport, enum rport_event event)
break;
case RPSM_EVENT_SCN:
- /**
+ /*
* already processing RSCN
*/
break;
@@ -810,7 +810,7 @@ bfa_fcs_rport_sm_adisc(struct bfa_fcs_rport_s *rport, enum rport_event event)
}
}
-/**
+/*
* Rport has sent LOGO. Awaiting FC-4 offline completion callback.
*/
static void
@@ -841,7 +841,7 @@ bfa_fcs_rport_sm_fc4_logorcv(struct bfa_fcs_rport_s *rport,
}
}
-/**
+/*
* LOGO needs to be sent to rport. Awaiting FC-4 offline completion
* callback.
*/
@@ -864,7 +864,7 @@ bfa_fcs_rport_sm_fc4_logosend(struct bfa_fcs_rport_s *rport,
}
}
-/**
+/*
* Rport is going offline. Awaiting FC-4 offline completion callback.
*/
static void
@@ -886,7 +886,7 @@ bfa_fcs_rport_sm_fc4_offline(struct bfa_fcs_rport_s *rport,
case RPSM_EVENT_LOGO_RCVD:
case RPSM_EVENT_PRLO_RCVD:
case RPSM_EVENT_ADDRESS_CHANGE:
- /**
+ /*
* rport is already going offline.
* SCN - ignore and wait till transitioning to offline state
*/
@@ -901,7 +901,7 @@ bfa_fcs_rport_sm_fc4_offline(struct bfa_fcs_rport_s *rport,
}
}
-/**
+/*
* Rport is offline. FC-4s are offline. Awaiting BFA rport offline
* callback.
*/
@@ -945,7 +945,7 @@ bfa_fcs_rport_sm_hcb_offline(struct bfa_fcs_rport_s *rport,
case RPSM_EVENT_SCN:
case RPSM_EVENT_LOGO_RCVD:
case RPSM_EVENT_PRLO_RCVD:
- /**
+ /*
* Ignore, already offline.
*/
break;
@@ -955,7 +955,7 @@ bfa_fcs_rport_sm_hcb_offline(struct bfa_fcs_rport_s *rport,
}
}
-/**
+/*
* Rport is offline. FC-4s are offline. Awaiting BFA rport offline
* callback to send LOGO accept.
*/
@@ -1009,7 +1009,7 @@ bfa_fcs_rport_sm_hcb_logorcv(struct bfa_fcs_rport_s *rport,
case RPSM_EVENT_LOGO_RCVD:
case RPSM_EVENT_PRLO_RCVD:
- /**
+ /*
* Ignore - already processing a LOGO.
*/
break;
@@ -1019,7 +1019,7 @@ bfa_fcs_rport_sm_hcb_logorcv(struct bfa_fcs_rport_s *rport,
}
}
-/**
+/*
* Rport is being deleted. FC-4s are offline.
* Awaiting BFA rport offline
* callback to send LOGO.
@@ -1048,7 +1048,7 @@ bfa_fcs_rport_sm_hcb_logosend(struct bfa_fcs_rport_s *rport,
}
}
-/**
+/*
* Rport is being deleted. FC-4s are offline. LOGO is being sent.
*/
static void
@@ -1082,7 +1082,7 @@ bfa_fcs_rport_sm_logo_sending(struct bfa_fcs_rport_s *rport,
}
}
-/**
+/*
* Rport is offline. FC-4s are offline. BFA rport is offline.
* Timer active to delete stale rport.
*/
@@ -1142,7 +1142,7 @@ bfa_fcs_rport_sm_offline(struct bfa_fcs_rport_s *rport, enum rport_event event)
}
}
-/**
+/*
* Rport address has changed. Nameserver discovery request is being sent.
*/
static void
@@ -1199,7 +1199,7 @@ bfa_fcs_rport_sm_nsdisc_sending(struct bfa_fcs_rport_s *rport,
}
}
-/**
+/*
* Nameserver discovery failed. Waiting for timeout to retry.
*/
static void
@@ -1263,7 +1263,7 @@ bfa_fcs_rport_sm_nsdisc_retry(struct bfa_fcs_rport_s *rport,
}
}
-/**
+/*
* Rport address has changed. Nameserver discovery request is sent.
*/
static void
@@ -1329,13 +1329,13 @@ bfa_fcs_rport_sm_nsdisc_sent(struct bfa_fcs_rport_s *rport,
bfa_fcs_rport_send_prlo_acc(rport);
break;
case RPSM_EVENT_SCN:
- /**
+ /*
* ignore, wait for NS query response
*/
break;
case RPSM_EVENT_LOGO_RCVD:
- /**
+ /*
* Not logged-in yet. Accept LOGO.
*/
bfa_fcs_rport_send_logo_acc(rport);
@@ -1354,7 +1354,7 @@ bfa_fcs_rport_sm_nsdisc_sent(struct bfa_fcs_rport_s *rport,
-/**
+/*
* fcs_rport_private FCS RPORT provate functions
*/
@@ -1415,7 +1415,7 @@ bfa_fcs_rport_plogi_response(void *fcsarg, struct bfa_fcxp_s *fcxp, void *cbarg,
plogi_rsp = (struct fc_logi_s *) BFA_FCXP_RSP_PLD(fcxp);
- /**
+ /*
* Check for failure first.
*/
if (plogi_rsp->els_cmd.els_code != FC_ELS_ACC) {
@@ -1436,7 +1436,7 @@ bfa_fcs_rport_plogi_response(void *fcsarg, struct bfa_fcxp_s *fcxp, void *cbarg,
return;
}
- /**
+ /*
* PLOGI is complete. Make sure this device is not one of the known
* device with a new FC port address.
*/
@@ -1468,7 +1468,7 @@ bfa_fcs_rport_plogi_response(void *fcsarg, struct bfa_fcxp_s *fcxp, void *cbarg,
}
}
- /**
+ /*
* Normal login path -- no evil twins.
*/
rport->stats.plogi_accs++;
@@ -1621,7 +1621,7 @@ bfa_fcs_rport_gidpn_response(void *fcsarg, struct bfa_fcxp_s *fcxp, void *cbarg,
bfa_trc(rport->fcs, rport->pwwn);
cthdr = (struct ct_hdr_s *) BFA_FCXP_RSP_PLD(fcxp);
- cthdr->cmd_rsp_code = bfa_os_ntohs(cthdr->cmd_rsp_code);
+ cthdr->cmd_rsp_code = be16_to_cpu(cthdr->cmd_rsp_code);
if (cthdr->cmd_rsp_code == CT_RSP_ACCEPT) {
/* Check if the pid is the same as before. */
@@ -1691,7 +1691,7 @@ bfa_fcs_rport_gpnid_response(void *fcsarg, struct bfa_fcxp_s *fcxp, void *cbarg,
bfa_trc(rport->fcs, rport->pwwn);
cthdr = (struct ct_hdr_s *) BFA_FCXP_RSP_PLD(fcxp);
- cthdr->cmd_rsp_code = bfa_os_ntohs(cthdr->cmd_rsp_code);
+ cthdr->cmd_rsp_code = be16_to_cpu(cthdr->cmd_rsp_code);
if (cthdr->cmd_rsp_code == CT_RSP_ACCEPT) {
bfa_sm_send_event(rport, RPSM_EVENT_ACCEPTED);
@@ -1722,7 +1722,7 @@ bfa_fcs_rport_gpnid_response(void *fcsarg, struct bfa_fcxp_s *fcxp, void *cbarg,
}
}
-/**
+/*
* Called to send a logout to the rport.
*/
static void
@@ -1759,7 +1759,7 @@ bfa_fcs_rport_send_logo(void *rport_cbarg, struct bfa_fcxp_s *fcxp_alloced)
bfa_sm_send_event(rport, RPSM_EVENT_FCXP_SENT);
}
-/**
+/*
* Send ACC for a LOGO received.
*/
static void
@@ -1788,7 +1788,7 @@ bfa_fcs_rport_send_logo_acc(void *rport_cbarg)
FC_CLASS_3, len, &fchs, NULL, NULL, FC_MAX_PDUSZ, 0);
}
-/**
+/*
* brief
* This routine will be called by bfa_timer on timer timeouts.
*
@@ -1961,7 +1961,7 @@ bfa_fcs_rport_alloc(struct bfa_fcs_lport_s *port, wwn_t pwwn, u32 rpid)
struct bfa_fcs_rport_s *rport;
struct bfad_rport_s *rport_drv;
- /**
+ /*
* allocate rport
*/
if (bfa_fcb_rport_alloc(fcs->bfad, &rport, &rport_drv)
@@ -1979,7 +1979,7 @@ bfa_fcs_rport_alloc(struct bfa_fcs_lport_s *port, wwn_t pwwn, u32 rpid)
rport->pid = rpid;
rport->pwwn = pwwn;
- /**
+ /*
* allocate BFA rport
*/
rport->bfa_rport = bfa_rport_create(port->fcs->bfa, rport);
@@ -1989,7 +1989,7 @@ bfa_fcs_rport_alloc(struct bfa_fcs_lport_s *port, wwn_t pwwn, u32 rpid)
return NULL;
}
- /**
+ /*
* allocate FC-4s
*/
bfa_assert(bfa_fcs_lport_is_initiator(port));
@@ -2021,7 +2021,7 @@ bfa_fcs_rport_free(struct bfa_fcs_rport_s *rport)
{
struct bfa_fcs_lport_s *port = rport->port;
- /**
+ /*
* - delete FC-4s
* - delete BFA rport
* - remove from queue of rports
@@ -2093,7 +2093,7 @@ bfa_fcs_rport_offline_action(struct bfa_fcs_rport_s *rport)
}
}
-/**
+/*
* Update rport parameters from PLOGI or PLOGI accept.
*/
static void
@@ -2101,14 +2101,14 @@ bfa_fcs_rport_update(struct bfa_fcs_rport_s *rport, struct fc_logi_s *plogi)
{
bfa_fcs_lport_t *port = rport->port;
- /**
+ /*
* - port name
* - node name
*/
rport->pwwn = plogi->port_name;
rport->nwwn = plogi->node_name;
- /**
+ /*
* - class of service
*/
rport->fc_cos = 0;
@@ -2118,16 +2118,16 @@ bfa_fcs_rport_update(struct bfa_fcs_rport_s *rport, struct fc_logi_s *plogi)
if (plogi->class2.class_valid)
rport->fc_cos |= FC_CLASS_2;
- /**
+ /*
* - CISC
* - MAX receive frame size
*/
rport->cisc = plogi->csp.cisc;
- rport->maxfrsize = bfa_os_ntohs(plogi->class3.rxsz);
+ rport->maxfrsize = be16_to_cpu(plogi->class3.rxsz);
- bfa_trc(port->fcs, bfa_os_ntohs(plogi->csp.bbcred));
+ bfa_trc(port->fcs, be16_to_cpu(plogi->csp.bbcred));
bfa_trc(port->fcs, port->fabric->bb_credit);
- /**
+ /*
* Direct Attach P2P mode :
* This is to handle a bug (233476) in IBM targets in Direct Attach
* Mode. Basically, in FLOGI Accept the target would have
@@ -2136,19 +2136,19 @@ bfa_fcs_rport_update(struct bfa_fcs_rport_s *rport, struct fc_logi_s *plogi)
* in PLOGI.
*/
if ((!bfa_fcs_fabric_is_switched(port->fabric)) &&
- (bfa_os_ntohs(plogi->csp.bbcred) < port->fabric->bb_credit)) {
+ (be16_to_cpu(plogi->csp.bbcred) < port->fabric->bb_credit)) {
- bfa_trc(port->fcs, bfa_os_ntohs(plogi->csp.bbcred));
+ bfa_trc(port->fcs, be16_to_cpu(plogi->csp.bbcred));
bfa_trc(port->fcs, port->fabric->bb_credit);
- port->fabric->bb_credit = bfa_os_ntohs(plogi->csp.bbcred);
+ port->fabric->bb_credit = be16_to_cpu(plogi->csp.bbcred);
bfa_fcport_set_tx_bbcredit(port->fcs->bfa,
port->fabric->bb_credit);
}
}
-/**
+/*
* Called to handle LOGO received from an existing remote port.
*/
static void
@@ -2164,11 +2164,11 @@ bfa_fcs_rport_process_logo(struct bfa_fcs_rport_s *rport, struct fchs_s *fchs)
-/**
+/*
* fcs_rport_public FCS rport public interfaces
*/
-/**
+/*
* Called by bport/vport to create a remote port instance for a discovered
* remote device.
*
@@ -2191,7 +2191,7 @@ bfa_fcs_rport_create(struct bfa_fcs_lport_s *port, u32 rpid)
return rport;
}
-/**
+/*
* Called to create a rport for which only the wwn is known.
*
* @param[in] port - base port
@@ -2211,7 +2211,7 @@ bfa_fcs_rport_create_by_wwn(struct bfa_fcs_lport_s *port, wwn_t rpwwn)
bfa_sm_send_event(rport, RPSM_EVENT_ADDRESS_DISC);
return rport;
}
-/**
+/*
* Called by bport in private loop topology to indicate that a
* rport has been discovered and plogi has been completed.
*
@@ -2233,7 +2233,7 @@ bfa_fcs_rport_start(struct bfa_fcs_lport_s *port, struct fchs_s *fchs,
bfa_sm_send_event(rport, RPSM_EVENT_PLOGI_COMP);
}
-/**
+/*
* Called by bport/vport to handle PLOGI received from a new remote port.
* If an existing rport does a plogi, it will be handled separately.
*/
@@ -2272,7 +2272,7 @@ wwn_compare(wwn_t wwn1, wwn_t wwn2)
return 0;
}
-/**
+/*
* Called by bport/vport to handle PLOGI received from an existing
* remote port.
*/
@@ -2280,7 +2280,7 @@ void
bfa_fcs_rport_plogi(struct bfa_fcs_rport_s *rport, struct fchs_s *rx_fchs,
struct fc_logi_s *plogi)
{
- /**
+ /*
* @todo Handle P2P and initiator-initiator.
*/
@@ -2289,7 +2289,7 @@ bfa_fcs_rport_plogi(struct bfa_fcs_rport_s *rport, struct fchs_s *rx_fchs,
rport->reply_oxid = rx_fchs->ox_id;
bfa_trc(rport->fcs, rport->reply_oxid);
- /**
+ /*
* In Switched fabric topology,
* PLOGI to each other. If our pwwn is smaller, ignore it,
* if it is not a well known address.
@@ -2307,7 +2307,7 @@ bfa_fcs_rport_plogi(struct bfa_fcs_rport_s *rport, struct fchs_s *rx_fchs,
bfa_sm_send_event(rport, RPSM_EVENT_PLOGI_RCVD);
}
-/**
+/*
* Called by bport/vport to delete a remote port instance.
*
* Rport delete is called under the following conditions:
@@ -2321,7 +2321,7 @@ bfa_fcs_rport_delete(struct bfa_fcs_rport_s *rport)
bfa_sm_send_event(rport, RPSM_EVENT_DELETE);
}
-/**
+/*
* Called by bport/vport to when a target goes offline.
*
*/
@@ -2331,7 +2331,7 @@ bfa_fcs_rport_offline(struct bfa_fcs_rport_s *rport)
bfa_sm_send_event(rport, RPSM_EVENT_LOGO_IMP);
}
-/**
+/*
* Called by bport in n2n when a target (attached port) becomes online.
*
*/
@@ -2340,7 +2340,7 @@ bfa_fcs_rport_online(struct bfa_fcs_rport_s *rport)
{
bfa_sm_send_event(rport, RPSM_EVENT_PLOGI_SEND);
}
-/**
+/*
* Called by bport/vport to notify SCN for the remote port
*/
void
@@ -2350,7 +2350,7 @@ bfa_fcs_rport_scn(struct bfa_fcs_rport_s *rport)
bfa_sm_send_event(rport, RPSM_EVENT_SCN);
}
-/**
+/*
* Called by fcpim to notify that the ITN cleanup is done.
*/
void
@@ -2359,7 +2359,7 @@ bfa_fcs_rport_itnim_ack(struct bfa_fcs_rport_s *rport)
bfa_sm_send_event(rport, RPSM_EVENT_FC4_OFFLINE);
}
-/**
+/*
* Called by fcptm to notify that the ITN cleanup is done.
*/
void
@@ -2368,7 +2368,7 @@ bfa_fcs_rport_tin_ack(struct bfa_fcs_rport_s *rport)
bfa_sm_send_event(rport, RPSM_EVENT_FC4_OFFLINE);
}
-/**
+/*
* brief
* This routine BFA callback for bfa_rport_online() call.
*
@@ -2391,7 +2391,7 @@ bfa_cb_rport_online(void *cbarg)
bfa_sm_send_event(rport, RPSM_EVENT_HCB_ONLINE);
}
-/**
+/*
* brief
* This routine BFA callback for bfa_rport_offline() call.
*
@@ -2413,7 +2413,7 @@ bfa_cb_rport_offline(void *cbarg)
bfa_sm_send_event(rport, RPSM_EVENT_HCB_OFFLINE);
}
-/**
+/*
* brief
* This routine is a static BFA callback when there is a QoS flow_id
* change notification
@@ -2437,7 +2437,7 @@ bfa_cb_rport_qos_scn_flowid(void *cbarg,
bfa_trc(rport->fcs, rport->pwwn);
}
-/**
+/*
* brief
* This routine is a static BFA callback when there is a QoS priority
* change notification
@@ -2461,7 +2461,7 @@ bfa_cb_rport_qos_scn_prio(void *cbarg,
bfa_trc(rport->fcs, rport->pwwn);
}
-/**
+/*
* Called to process any unsolicted frames from this remote port
*/
void
@@ -2470,7 +2470,7 @@ bfa_fcs_rport_logo_imp(struct bfa_fcs_rport_s *rport)
bfa_sm_send_event(rport, RPSM_EVENT_LOGO_IMP);
}
-/**
+/*
* Called to process any unsolicted frames from this remote port
*/
void
@@ -2577,7 +2577,7 @@ bfa_fcs_rport_send_ls_rjt(struct bfa_fcs_rport_s *rport, struct fchs_s *rx_fchs,
FC_MAX_PDUSZ, 0);
}
-/**
+/*
* Return state of rport.
*/
int
@@ -2586,7 +2586,7 @@ bfa_fcs_rport_get_state(struct bfa_fcs_rport_s *rport)
return bfa_sm_to_state(rport_sm_table, rport->sm);
}
-/**
+/*
* brief
* Called by the Driver to set rport delete/ageout timeout
*
@@ -2613,15 +2613,15 @@ bfa_fcs_rport_prlo(struct bfa_fcs_rport_s *rport, u16 ox_id)
-/**
+/*
* Remote port implementation.
*/
-/**
+/*
* fcs_rport_api FCS rport API.
*/
-/**
+/*
* Direct API to add a target by port wwn. This interface is used, for
* example, by bios when target pwwn is known from boot lun configuration.
*/
@@ -2634,7 +2634,7 @@ bfa_fcs_rport_add(struct bfa_fcs_lport_s *port, wwn_t *pwwn,
return BFA_STATUS_OK;
}
-/**
+/*
* Direct API to remove a target and its associated resources. This
* interface is used, for example, by driver to remove target
* ports from the target list for a VM.
@@ -2663,7 +2663,7 @@ bfa_fcs_rport_remove(struct bfa_fcs_rport_s *rport_in)
}
-/**
+/*
* Remote device status for display/debug.
*/
void
@@ -2674,7 +2674,7 @@ bfa_fcs_rport_get_attr(struct bfa_fcs_rport_s *rport,
bfa_fcs_lport_t *port = rport->port;
bfa_port_speed_t rport_speed = rport->rpf.rpsc_speed;
- bfa_os_memset(rport_attr, 0, sizeof(struct bfa_rport_attr_s));
+ memset(rport_attr, 0, sizeof(struct bfa_rport_attr_s));
rport_attr->pid = rport->pid;
rport_attr->pwwn = rport->pwwn;
@@ -2704,7 +2704,7 @@ bfa_fcs_rport_get_attr(struct bfa_fcs_rport_s *rport,
}
}
-/**
+/*
* Per remote device statistics.
*/
void
@@ -2717,7 +2717,7 @@ bfa_fcs_rport_get_stats(struct bfa_fcs_rport_s *rport,
void
bfa_fcs_rport_clear_stats(struct bfa_fcs_rport_s *rport)
{
- bfa_os_memset((char *)&rport->stats, 0,
+ memset((char *)&rport->stats, 0,
sizeof(struct bfa_rport_stats_s));
}
@@ -2767,7 +2767,7 @@ bfa_fcs_rport_set_speed(struct bfa_fcs_rport_s *rport, bfa_port_speed_t speed)
-/**
+/*
* Remote port features (RPF) implementation.
*/
@@ -2786,7 +2786,7 @@ static void bfa_fcs_rpf_rpsc2_response(void *fcsarg,
static void bfa_fcs_rpf_timeout(void *arg);
-/**
+/*
* fcs_rport_ftrs_sm FCS rport state machine events
*/
@@ -2981,7 +2981,7 @@ bfa_fcs_rpf_sm_offline(struct bfa_fcs_rpf_s *rpf, enum rpf_event event)
bfa_sm_fault(rport->fcs, event);
}
}
-/**
+/*
* Called when Rport is created.
*/
void
@@ -2995,7 +2995,7 @@ bfa_fcs_rpf_init(struct bfa_fcs_rport_s *rport)
bfa_sm_set_state(rpf, bfa_fcs_rpf_sm_uninit);
}
-/**
+/*
* Called when Rport becomes online
*/
void
@@ -3010,7 +3010,7 @@ bfa_fcs_rpf_rport_online(struct bfa_fcs_rport_s *rport)
bfa_sm_send_event(&rport->rpf, RPFSM_EVENT_RPORT_ONLINE);
}
-/**
+/*
* Called when Rport becomes offline
*/
void
@@ -3090,16 +3090,16 @@ bfa_fcs_rpf_rpsc2_response(void *fcsarg, struct bfa_fcxp_s *fcxp, void *cbarg,
rpsc2_acc = (struct fc_rpsc2_acc_s *) BFA_FCXP_RSP_PLD(fcxp);
if (rpsc2_acc->els_cmd == FC_ELS_ACC) {
rport->stats.rpsc_accs++;
- num_ents = bfa_os_ntohs(rpsc2_acc->num_pids);
+ num_ents = be16_to_cpu(rpsc2_acc->num_pids);
bfa_trc(rport->fcs, num_ents);
if (num_ents > 0) {
bfa_assert(rpsc2_acc->port_info[0].pid != rport->pid);
bfa_trc(rport->fcs,
- bfa_os_ntohs(rpsc2_acc->port_info[0].pid));
+ be16_to_cpu(rpsc2_acc->port_info[0].pid));
bfa_trc(rport->fcs,
- bfa_os_ntohs(rpsc2_acc->port_info[0].speed));
+ be16_to_cpu(rpsc2_acc->port_info[0].speed));
bfa_trc(rport->fcs,
- bfa_os_ntohs(rpsc2_acc->port_info[0].index));
+ be16_to_cpu(rpsc2_acc->port_info[0].index));
bfa_trc(rport->fcs,
rpsc2_acc->port_info[0].type);
@@ -3109,7 +3109,7 @@ bfa_fcs_rpf_rpsc2_response(void *fcsarg, struct bfa_fcxp_s *fcxp, void *cbarg,
}
rpf->rpsc_speed = fc_rpsc_operspeed_to_bfa_speed(
- bfa_os_ntohs(rpsc2_acc->port_info[0].speed));
+ be16_to_cpu(rpsc2_acc->port_info[0].speed));
bfa_sm_send_event(rpf, RPFSM_EVENT_RPSC_COMP);
}
diff --git a/drivers/scsi/bfa/bfa_hw_cb.c b/drivers/scsi/bfa/bfa_hw_cb.c
index c787d3af0886..d8464ae60070 100644
--- a/drivers/scsi/bfa/bfa_hw_cb.c
+++ b/drivers/scsi/bfa/bfa_hw_cb.c
@@ -22,7 +22,7 @@ void
bfa_hwcb_reginit(struct bfa_s *bfa)
{
struct bfa_iocfc_regs_s *bfa_regs = &bfa->iocfc.bfa_regs;
- bfa_os_addr_t kva = bfa_ioc_bar0(&bfa->ioc);
+ void __iomem *kva = bfa_ioc_bar0(&bfa->ioc);
int i, q, fn = bfa_ioc_pcifn(&bfa->ioc);
if (fn == 0) {
@@ -60,8 +60,8 @@ bfa_hwcb_reqq_ack(struct bfa_s *bfa, int reqq)
static void
bfa_hwcb_reqq_ack_msix(struct bfa_s *bfa, int reqq)
{
- bfa_reg_write(bfa->iocfc.bfa_regs.intr_status,
- __HFN_INT_CPE_Q0 << CPE_Q_NUM(bfa_ioc_pcifn(&bfa->ioc), reqq));
+ writel(__HFN_INT_CPE_Q0 << CPE_Q_NUM(bfa_ioc_pcifn(&bfa->ioc), reqq),
+ bfa->iocfc.bfa_regs.intr_status);
}
void
@@ -72,8 +72,8 @@ bfa_hwcb_rspq_ack(struct bfa_s *bfa, int rspq)
static void
bfa_hwcb_rspq_ack_msix(struct bfa_s *bfa, int rspq)
{
- bfa_reg_write(bfa->iocfc.bfa_regs.intr_status,
- __HFN_INT_RME_Q0 << RME_Q_NUM(bfa_ioc_pcifn(&bfa->ioc), rspq));
+ writel(__HFN_INT_RME_Q0 << RME_Q_NUM(bfa_ioc_pcifn(&bfa->ioc), rspq),
+ bfa->iocfc.bfa_regs.intr_status);
}
void
@@ -102,7 +102,7 @@ bfa_hwcb_msix_getvecs(struct bfa_s *bfa, u32 *msix_vecs_bmap,
*num_vecs = __HFN_NUMINTS;
}
-/**
+/*
* No special setup required for crossbow -- vector assignments are implicit.
*/
void
@@ -129,7 +129,7 @@ bfa_hwcb_msix_init(struct bfa_s *bfa, int nvecs)
bfa->msix.handler[i] = bfa_msix_lpu_err;
}
-/**
+/*
* Crossbow -- dummy, interrupts are masked
*/
void
@@ -142,7 +142,7 @@ bfa_hwcb_msix_uninstall(struct bfa_s *bfa)
{
}
-/**
+/*
* No special enable/disable -- vector assignments are implicit.
*/
void
diff --git a/drivers/scsi/bfa/bfa_hw_ct.c b/drivers/scsi/bfa/bfa_hw_ct.c
index c97ebafec5ea..b0efbc713ffe 100644
--- a/drivers/scsi/bfa/bfa_hw_ct.c
+++ b/drivers/scsi/bfa/bfa_hw_ct.c
@@ -31,15 +31,15 @@ static void
bfa_hwct_msix_lpu_err_set(struct bfa_s *bfa, bfa_boolean_t msix, int vec)
{
int fn = bfa_ioc_pcifn(&bfa->ioc);
- bfa_os_addr_t kva = bfa_ioc_bar0(&bfa->ioc);
+ void __iomem *kva = bfa_ioc_bar0(&bfa->ioc);
if (msix)
- bfa_reg_write(kva + __ct_msix_err_vec_reg[fn], vec);
+ writel(vec, kva + __ct_msix_err_vec_reg[fn]);
else
- bfa_reg_write(kva + __ct_msix_err_vec_reg[fn], 0);
+ writel(0, kva + __ct_msix_err_vec_reg[fn]);
}
-/**
+/*
* Dummy interrupt handler for handling spurious interrupt during chip-reinit.
*/
static void
@@ -51,7 +51,7 @@ void
bfa_hwct_reginit(struct bfa_s *bfa)
{
struct bfa_iocfc_regs_s *bfa_regs = &bfa->iocfc.bfa_regs;
- bfa_os_addr_t kva = bfa_ioc_bar0(&bfa->ioc);
+ void __iomem *kva = bfa_ioc_bar0(&bfa->ioc);
int i, q, fn = bfa_ioc_pcifn(&bfa->ioc);
if (fn == 0) {
@@ -88,8 +88,8 @@ bfa_hwct_reqq_ack(struct bfa_s *bfa, int reqq)
{
u32 r32;
- r32 = bfa_reg_read(bfa->iocfc.bfa_regs.cpe_q_ctrl[reqq]);
- bfa_reg_write(bfa->iocfc.bfa_regs.cpe_q_ctrl[reqq], r32);
+ r32 = readl(bfa->iocfc.bfa_regs.cpe_q_ctrl[reqq]);
+ writel(r32, bfa->iocfc.bfa_regs.cpe_q_ctrl[reqq]);
}
void
@@ -97,8 +97,8 @@ bfa_hwct_rspq_ack(struct bfa_s *bfa, int rspq)
{
u32 r32;
- r32 = bfa_reg_read(bfa->iocfc.bfa_regs.rme_q_ctrl[rspq]);
- bfa_reg_write(bfa->iocfc.bfa_regs.rme_q_ctrl[rspq], r32);
+ r32 = readl(bfa->iocfc.bfa_regs.rme_q_ctrl[rspq]);
+ writel(r32, bfa->iocfc.bfa_regs.rme_q_ctrl[rspq]);
}
void
@@ -110,7 +110,7 @@ bfa_hwct_msix_getvecs(struct bfa_s *bfa, u32 *msix_vecs_bmap,
*num_vecs = BFA_MSIX_CT_MAX;
}
-/**
+/*
* Setup MSI-X vector for catapult
*/
void
@@ -156,7 +156,7 @@ bfa_hwct_msix_uninstall(struct bfa_s *bfa)
bfa->msix.handler[i] = bfa_hwct_msix_dummy;
}
-/**
+/*
* Enable MSI-X vectors
*/
void
diff --git a/drivers/scsi/bfa/bfa_ioc.c b/drivers/scsi/bfa/bfa_ioc.c
index 6795b247791a..54475b53a5ab 100644
--- a/drivers/scsi/bfa/bfa_ioc.c
+++ b/drivers/scsi/bfa/bfa_ioc.c
@@ -23,7 +23,7 @@
BFA_TRC_FILE(CNA, IOC);
-/**
+/*
* IOC local definitions
*/
#define BFA_IOC_TOV 3000 /* msecs */
@@ -49,7 +49,7 @@ BFA_TRC_FILE(CNA, IOC);
BFA_TRC_MAX * sizeof(struct bfa_trc_s)))
#define BFA_DBG_FWTRC_OFF(_fn) (BFI_IOC_TRC_OFF + BFA_DBG_FWTRC_LEN * (_fn))
-/**
+/*
* Asic specific macros : see bfa_hw_cb.c and bfa_hw_ct.c for details.
*/
@@ -73,7 +73,7 @@ BFA_TRC_FILE(CNA, IOC);
#define bfa_ioc_mbox_cmd_pending(__ioc) \
(!list_empty(&((__ioc)->mbox_mod.cmd_q)) || \
- bfa_reg_read((__ioc)->ioc_regs.hfn_mbox_cmd))
+ readl((__ioc)->ioc_regs.hfn_mbox_cmd))
bfa_boolean_t bfa_auto_recover = BFA_TRUE;
@@ -101,11 +101,11 @@ static void bfa_ioc_pf_disabled(struct bfa_ioc_s *ioc);
static void bfa_ioc_pf_failed(struct bfa_ioc_s *ioc);
static void bfa_ioc_pf_fwmismatch(struct bfa_ioc_s *ioc);
-/**
+/*
* hal_ioc_sm
*/
-/**
+/*
* IOC state machine definitions/declarations
*/
enum ioc_event {
@@ -144,7 +144,7 @@ static struct bfa_sm_table_s ioc_sm_table[] = {
{BFA_SM(bfa_ioc_sm_disabled), BFA_IOC_DISABLED},
};
-/**
+/*
* IOCPF state machine definitions/declarations
*/
@@ -174,7 +174,7 @@ static void bfa_iocpf_stop(struct bfa_ioc_s *ioc);
static void bfa_iocpf_timeout(void *ioc_arg);
static void bfa_iocpf_sem_timeout(void *ioc_arg);
-/**
+/*
* IOCPF state machine events
*/
enum iocpf_event {
@@ -191,7 +191,7 @@ enum iocpf_event {
IOCPF_E_TIMEOUT = 11, /* f/w response timeout */
};
-/**
+/*
* IOCPF states
*/
enum bfa_iocpf_state {
@@ -232,11 +232,11 @@ static struct bfa_sm_table_s iocpf_sm_table[] = {
{BFA_SM(bfa_iocpf_sm_disabled), BFA_IOCPF_DISABLED},
};
-/**
+/*
* IOC State Machine
*/
-/**
+/*
* Beginning state. IOC uninit state.
*/
@@ -245,7 +245,7 @@ bfa_ioc_sm_uninit_entry(struct bfa_ioc_s *ioc)
{
}
-/**
+/*
* IOC is in uninit state.
*/
static void
@@ -262,7 +262,7 @@ bfa_ioc_sm_uninit(struct bfa_ioc_s *ioc, enum ioc_event event)
bfa_sm_fault(ioc, event);
}
}
-/**
+/*
* Reset entry actions -- initialize state machine
*/
static void
@@ -271,7 +271,7 @@ bfa_ioc_sm_reset_entry(struct bfa_ioc_s *ioc)
bfa_fsm_set_state(&ioc->iocpf, bfa_iocpf_sm_reset);
}
-/**
+/*
* IOC is in reset state.
*/
static void
@@ -304,7 +304,7 @@ bfa_ioc_sm_enabling_entry(struct bfa_ioc_s *ioc)
bfa_iocpf_enable(ioc);
}
-/**
+/*
* Host IOC function is being enabled, awaiting response from firmware.
* Semaphore is acquired.
*/
@@ -352,7 +352,7 @@ bfa_ioc_sm_getattr_entry(struct bfa_ioc_s *ioc)
bfa_ioc_send_getattr(ioc);
}
-/**
+/*
* IOC configuration in progress. Timer is active.
*/
static void
@@ -447,7 +447,7 @@ bfa_ioc_sm_disabling_entry(struct bfa_ioc_s *ioc)
BFA_LOG(KERN_INFO, bfad, log_level, "IOC disabled\n");
}
-/**
+/*
* IOC is being disabled
*/
static void
@@ -474,7 +474,7 @@ bfa_ioc_sm_disabling(struct bfa_ioc_s *ioc, enum ioc_event event)
}
}
-/**
+/*
* IOC disable completion entry.
*/
static void
@@ -514,7 +514,7 @@ bfa_ioc_sm_initfail_entry(struct bfa_ioc_s *ioc)
ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
}
-/**
+/*
* Hardware initialization failed.
*/
static void
@@ -528,7 +528,7 @@ bfa_ioc_sm_initfail(struct bfa_ioc_s *ioc, enum ioc_event event)
break;
case IOC_E_FAILED:
- /**
+ /*
* Initialization failure during iocpf init retry.
*/
ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
@@ -556,7 +556,7 @@ bfa_ioc_sm_fail_entry(struct bfa_ioc_s *ioc)
struct bfa_ioc_hbfail_notify_s *notify;
struct bfad_s *bfad = (struct bfad_s *)ioc->bfa->bfad;
- /**
+ /*
* Notify driver and common modules registered for notification.
*/
ioc->cbfn->hbfail_cbfn(ioc->bfa);
@@ -569,7 +569,7 @@ bfa_ioc_sm_fail_entry(struct bfa_ioc_s *ioc)
"Heart Beat of IOC has failed\n");
}
-/**
+/*
* IOC failure.
*/
static void
@@ -580,7 +580,7 @@ bfa_ioc_sm_fail(struct bfa_ioc_s *ioc, enum ioc_event event)
switch (event) {
case IOC_E_FAILED:
- /**
+ /*
* Initialization failure during iocpf recovery.
* !!! Fall through !!!
*/
@@ -608,12 +608,12 @@ bfa_ioc_sm_fail(struct bfa_ioc_s *ioc, enum ioc_event event)
-/**
+/*
* IOCPF State Machine
*/
-/**
+/*
* Reset entry actions -- initialize state machine
*/
static void
@@ -623,7 +623,7 @@ bfa_iocpf_sm_reset_entry(struct bfa_iocpf_s *iocpf)
iocpf->auto_recover = bfa_auto_recover;
}
-/**
+/*
* Beginning state. IOC is in reset state.
*/
static void
@@ -646,7 +646,7 @@ bfa_iocpf_sm_reset(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
}
}
-/**
+/*
* Semaphore should be acquired for version check.
*/
static void
@@ -655,7 +655,7 @@ bfa_iocpf_sm_fwcheck_entry(struct bfa_iocpf_s *iocpf)
bfa_ioc_hw_sem_get(iocpf->ioc);
}
-/**
+/*
* Awaiting h/w semaphore to continue with version check.
*/
static void
@@ -692,7 +692,7 @@ bfa_iocpf_sm_fwcheck(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
}
}
-/**
+/*
* Notify enable completion callback.
*/
static void
@@ -708,7 +708,7 @@ bfa_iocpf_sm_mismatch_entry(struct bfa_iocpf_s *iocpf)
bfa_iocpf_timer_start(iocpf->ioc);
}
-/**
+/*
* Awaiting firmware version match.
*/
static void
@@ -739,7 +739,7 @@ bfa_iocpf_sm_mismatch(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
}
}
-/**
+/*
* Request for semaphore.
*/
static void
@@ -748,7 +748,7 @@ bfa_iocpf_sm_semwait_entry(struct bfa_iocpf_s *iocpf)
bfa_ioc_hw_sem_get(iocpf->ioc);
}
-/**
+/*
* Awaiting semaphore for h/w initialzation.
*/
static void
@@ -782,7 +782,7 @@ bfa_iocpf_sm_hwinit_entry(struct bfa_iocpf_s *iocpf)
bfa_ioc_reset(iocpf->ioc, BFA_FALSE);
}
-/**
+/*
* Hardware is being initialized. Interrupts are enabled.
* Holding hardware semaphore lock.
*/
@@ -839,7 +839,7 @@ bfa_iocpf_sm_enabling_entry(struct bfa_iocpf_s *iocpf)
bfa_ioc_send_enable(iocpf->ioc);
}
-/**
+/*
* Host IOC function is being enabled, awaiting response from firmware.
* Semaphore is acquired.
*/
@@ -866,8 +866,7 @@ bfa_iocpf_sm_enabling(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
case IOCPF_E_TIMEOUT:
iocpf->retry_count++;
if (iocpf->retry_count < BFA_IOC_HWINIT_MAX) {
- bfa_reg_write(ioc->ioc_regs.ioc_fwstate,
- BFI_IOC_UNINIT);
+ writel(BFI_IOC_UNINIT, ioc->ioc_regs.ioc_fwstate);
bfa_fsm_set_state(iocpf, bfa_iocpf_sm_hwinit);
break;
}
@@ -944,7 +943,7 @@ bfa_iocpf_sm_disabling_entry(struct bfa_iocpf_s *iocpf)
bfa_ioc_send_disable(iocpf->ioc);
}
-/**
+/*
* IOC is being disabled
*/
static void
@@ -968,7 +967,7 @@ bfa_iocpf_sm_disabling(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
*/
case IOCPF_E_TIMEOUT:
- bfa_reg_write(ioc->ioc_regs.ioc_fwstate, BFI_IOC_FAIL);
+ writel(BFI_IOC_FAIL, ioc->ioc_regs.ioc_fwstate);
bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled);
break;
@@ -980,7 +979,7 @@ bfa_iocpf_sm_disabling(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
}
}
-/**
+/*
* IOC disable completion entry.
*/
static void
@@ -1018,7 +1017,7 @@ bfa_iocpf_sm_initfail_entry(struct bfa_iocpf_s *iocpf)
bfa_iocpf_timer_start(iocpf->ioc);
}
-/**
+/*
* Hardware initialization failed.
*/
static void
@@ -1053,18 +1052,18 @@ bfa_iocpf_sm_initfail(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
static void
bfa_iocpf_sm_fail_entry(struct bfa_iocpf_s *iocpf)
{
- /**
+ /*
* Mark IOC as failed in hardware and stop firmware.
*/
bfa_ioc_lpu_stop(iocpf->ioc);
- bfa_reg_write(iocpf->ioc->ioc_regs.ioc_fwstate, BFI_IOC_FAIL);
+ writel(BFI_IOC_FAIL, iocpf->ioc->ioc_regs.ioc_fwstate);
- /**
+ /*
* Notify other functions on HB failure.
*/
bfa_ioc_notify_hbfail(iocpf->ioc);
- /**
+ /*
* Flush any queued up mailbox requests.
*/
bfa_ioc_mbox_hbfail(iocpf->ioc);
@@ -1073,7 +1072,7 @@ bfa_iocpf_sm_fail_entry(struct bfa_iocpf_s *iocpf)
bfa_iocpf_recovery_timer_start(iocpf->ioc);
}
-/**
+/*
* IOC is in failed state.
*/
static void
@@ -1101,7 +1100,7 @@ bfa_iocpf_sm_fail(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
-/**
+/*
* hal_ioc_pvt BFA IOC private functions
*/
@@ -1113,7 +1112,7 @@ bfa_ioc_disable_comp(struct bfa_ioc_s *ioc)
ioc->cbfn->disable_cbfn(ioc->bfa);
- /**
+ /*
* Notify common modules registered for notification.
*/
list_for_each(qe, &ioc->hb_notify_q) {
@@ -1123,18 +1122,18 @@ bfa_ioc_disable_comp(struct bfa_ioc_s *ioc)
}
bfa_boolean_t
-bfa_ioc_sem_get(bfa_os_addr_t sem_reg)
+bfa_ioc_sem_get(void __iomem *sem_reg)
{
u32 r32;
int cnt = 0;
#define BFA_SEM_SPINCNT 3000
- r32 = bfa_reg_read(sem_reg);
+ r32 = readl(sem_reg);
while (r32 && (cnt < BFA_SEM_SPINCNT)) {
cnt++;
- bfa_os_udelay(2);
- r32 = bfa_reg_read(sem_reg);
+ udelay(2);
+ r32 = readl(sem_reg);
}
if (r32 == 0)
@@ -1145,9 +1144,9 @@ bfa_ioc_sem_get(bfa_os_addr_t sem_reg)
}
void
-bfa_ioc_sem_release(bfa_os_addr_t sem_reg)
+bfa_ioc_sem_release(void __iomem *sem_reg)
{
- bfa_reg_write(sem_reg, 1);
+ writel(1, sem_reg);
}
static void
@@ -1155,11 +1154,11 @@ bfa_ioc_hw_sem_get(struct bfa_ioc_s *ioc)
{
u32 r32;
- /**
+ /*
* First read to the semaphore register will return 0, subsequent reads
* will return 1. Semaphore is released by writing 1 to the register
*/
- r32 = bfa_reg_read(ioc->ioc_regs.ioc_sem_reg);
+ r32 = readl(ioc->ioc_regs.ioc_sem_reg);
if (r32 == 0) {
bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_SEMLOCKED);
return;
@@ -1171,7 +1170,7 @@ bfa_ioc_hw_sem_get(struct bfa_ioc_s *ioc)
void
bfa_ioc_hw_sem_release(struct bfa_ioc_s *ioc)
{
- bfa_reg_write(ioc->ioc_regs.ioc_sem_reg, 1);
+ writel(1, ioc->ioc_regs.ioc_sem_reg);
}
static void
@@ -1180,7 +1179,7 @@ bfa_ioc_hw_sem_get_cancel(struct bfa_ioc_s *ioc)
bfa_sem_timer_stop(ioc);
}
-/**
+/*
* Initialize LPU local memory (aka secondary memory / SRAM)
*/
static void
@@ -1190,7 +1189,7 @@ bfa_ioc_lmem_init(struct bfa_ioc_s *ioc)
int i;
#define PSS_LMEM_INIT_TIME 10000
- pss_ctl = bfa_reg_read(ioc->ioc_regs.pss_ctl_reg);
+ pss_ctl = readl(ioc->ioc_regs.pss_ctl_reg);
pss_ctl &= ~__PSS_LMEM_RESET;
pss_ctl |= __PSS_LMEM_INIT_EN;
@@ -1198,18 +1197,18 @@ bfa_ioc_lmem_init(struct bfa_ioc_s *ioc)
* i2c workaround 12.5khz clock
*/
pss_ctl |= __PSS_I2C_CLK_DIV(3UL);
- bfa_reg_write(ioc->ioc_regs.pss_ctl_reg, pss_ctl);
+ writel(pss_ctl, ioc->ioc_regs.pss_ctl_reg);
- /**
+ /*
* wait for memory initialization to be complete
*/
i = 0;
do {
- pss_ctl = bfa_reg_read(ioc->ioc_regs.pss_ctl_reg);
+ pss_ctl = readl(ioc->ioc_regs.pss_ctl_reg);
i++;
} while (!(pss_ctl & __PSS_LMEM_INIT_DONE) && (i < PSS_LMEM_INIT_TIME));
- /**
+ /*
* If memory initialization is not successful, IOC timeout will catch
* such failures.
*/
@@ -1217,7 +1216,7 @@ bfa_ioc_lmem_init(struct bfa_ioc_s *ioc)
bfa_trc(ioc, pss_ctl);
pss_ctl &= ~(__PSS_LMEM_INIT_DONE | __PSS_LMEM_INIT_EN);
- bfa_reg_write(ioc->ioc_regs.pss_ctl_reg, pss_ctl);
+ writel(pss_ctl, ioc->ioc_regs.pss_ctl_reg);
}
static void
@@ -1225,13 +1224,13 @@ bfa_ioc_lpu_start(struct bfa_ioc_s *ioc)
{
u32 pss_ctl;
- /**
+ /*
* Take processor out of reset.
*/
- pss_ctl = bfa_reg_read(ioc->ioc_regs.pss_ctl_reg);
+ pss_ctl = readl(ioc->ioc_regs.pss_ctl_reg);
pss_ctl &= ~__PSS_LPU0_RESET;
- bfa_reg_write(ioc->ioc_regs.pss_ctl_reg, pss_ctl);
+ writel(pss_ctl, ioc->ioc_regs.pss_ctl_reg);
}
static void
@@ -1239,16 +1238,16 @@ bfa_ioc_lpu_stop(struct bfa_ioc_s *ioc)
{
u32 pss_ctl;
- /**
+ /*
* Put processors in reset.
*/
- pss_ctl = bfa_reg_read(ioc->ioc_regs.pss_ctl_reg);
+ pss_ctl = readl(ioc->ioc_regs.pss_ctl_reg);
pss_ctl |= (__PSS_LPU0_RESET | __PSS_LPU1_RESET);
- bfa_reg_write(ioc->ioc_regs.pss_ctl_reg, pss_ctl);
+ writel(pss_ctl, ioc->ioc_regs.pss_ctl_reg);
}
-/**
+/*
* Get driver and firmware versions.
*/
void
@@ -1261,7 +1260,7 @@ bfa_ioc_fwver_get(struct bfa_ioc_s *ioc, struct bfi_ioc_image_hdr_s *fwhdr)
pgnum = bfa_ioc_smem_pgnum(ioc, loff);
pgoff = bfa_ioc_smem_pgoff(ioc, loff);
- bfa_reg_write(ioc->ioc_regs.host_page_num_fn, pgnum);
+ writel(pgnum, ioc->ioc_regs.host_page_num_fn);
for (i = 0; i < (sizeof(struct bfi_ioc_image_hdr_s) / sizeof(u32));
i++) {
@@ -1271,7 +1270,7 @@ bfa_ioc_fwver_get(struct bfa_ioc_s *ioc, struct bfi_ioc_image_hdr_s *fwhdr)
}
}
-/**
+/*
* Returns TRUE if same.
*/
bfa_boolean_t
@@ -1296,7 +1295,7 @@ bfa_ioc_fwver_cmp(struct bfa_ioc_s *ioc, struct bfi_ioc_image_hdr_s *fwhdr)
return BFA_TRUE;
}
-/**
+/*
* Return true if current running version is valid. Firmware signature and
* execution context (driver/bios) must match.
*/
@@ -1305,7 +1304,7 @@ bfa_ioc_fwver_valid(struct bfa_ioc_s *ioc, u32 boot_env)
{
struct bfi_ioc_image_hdr_s fwhdr, *drv_fwhdr;
- /**
+ /*
* If bios/efi boot (flash based) -- return true
*/
if (bfa_ioc_is_bios_optrom(ioc))
@@ -1321,7 +1320,7 @@ bfa_ioc_fwver_valid(struct bfa_ioc_s *ioc, u32 boot_env)
return BFA_FALSE;
}
- if (bfa_os_swap32(fwhdr.param) != boot_env) {
+ if (swab32(fwhdr.param) != boot_env) {
bfa_trc(ioc, fwhdr.param);
bfa_trc(ioc, boot_env);
return BFA_FALSE;
@@ -1330,7 +1329,7 @@ bfa_ioc_fwver_valid(struct bfa_ioc_s *ioc, u32 boot_env)
return bfa_ioc_fwver_cmp(ioc, &fwhdr);
}
-/**
+/*
* Conditionally flush any pending message from firmware at start.
*/
static void
@@ -1338,9 +1337,9 @@ bfa_ioc_msgflush(struct bfa_ioc_s *ioc)
{
u32 r32;
- r32 = bfa_reg_read(ioc->ioc_regs.lpu_mbox_cmd);
+ r32 = readl(ioc->ioc_regs.lpu_mbox_cmd);
if (r32)
- bfa_reg_write(ioc->ioc_regs.lpu_mbox_cmd, 1);
+ writel(1, ioc->ioc_regs.lpu_mbox_cmd);
}
@@ -1352,7 +1351,7 @@ bfa_ioc_hwinit(struct bfa_ioc_s *ioc, bfa_boolean_t force)
u32 boot_type;
u32 boot_env;
- ioc_fwstate = bfa_reg_read(ioc->ioc_regs.ioc_fwstate);
+ ioc_fwstate = readl(ioc->ioc_regs.ioc_fwstate);
if (force)
ioc_fwstate = BFI_IOC_UNINIT;
@@ -1362,7 +1361,7 @@ bfa_ioc_hwinit(struct bfa_ioc_s *ioc, bfa_boolean_t force)
boot_type = BFI_BOOT_TYPE_NORMAL;
boot_env = BFI_BOOT_LOADER_OS;
- /**
+ /*
* Flash based firmware boot BIOS env.
*/
if (bfa_ioc_is_bios_optrom(ioc)) {
@@ -1370,7 +1369,7 @@ bfa_ioc_hwinit(struct bfa_ioc_s *ioc, bfa_boolean_t force)
boot_env = BFI_BOOT_LOADER_BIOS;
}
- /**
+ /*
* Flash based firmware boot UEFI env.
*/
if (bfa_ioc_is_uefi(ioc)) {
@@ -1378,7 +1377,7 @@ bfa_ioc_hwinit(struct bfa_ioc_s *ioc, bfa_boolean_t force)
boot_env = BFI_BOOT_LOADER_UEFI;
}
- /**
+ /*
* check if firmware is valid
*/
fwvalid = (ioc_fwstate == BFI_IOC_UNINIT) ?
@@ -1389,7 +1388,7 @@ bfa_ioc_hwinit(struct bfa_ioc_s *ioc, bfa_boolean_t force)
return;
}
- /**
+ /*
* If hardware initialization is in progress (initialized by other IOC),
* just wait for an initialization completion interrupt.
*/
@@ -1398,7 +1397,7 @@ bfa_ioc_hwinit(struct bfa_ioc_s *ioc, bfa_boolean_t force)
return;
}
- /**
+ /*
* If IOC function is disabled and firmware version is same,
* just re-enable IOC.
*
@@ -1409,7 +1408,7 @@ bfa_ioc_hwinit(struct bfa_ioc_s *ioc, bfa_boolean_t force)
if (ioc_fwstate == BFI_IOC_DISABLED ||
(!bfa_ioc_is_bios_optrom(ioc) && ioc_fwstate == BFI_IOC_OP)) {
- /**
+ /*
* When using MSI-X any pending firmware ready event should
* be flushed. Otherwise MSI-X interrupts are not delivered.
*/
@@ -1419,7 +1418,7 @@ bfa_ioc_hwinit(struct bfa_ioc_s *ioc, bfa_boolean_t force)
return;
}
- /**
+ /*
* Initialize the h/w for any other states.
*/
bfa_ioc_boot(ioc, boot_type, boot_env);
@@ -1449,17 +1448,17 @@ bfa_ioc_mbox_send(struct bfa_ioc_s *ioc, void *ioc_msg, int len)
* first write msg to mailbox registers
*/
for (i = 0; i < len / sizeof(u32); i++)
- bfa_reg_write(ioc->ioc_regs.hfn_mbox + i * sizeof(u32),
- bfa_os_wtole(msgp[i]));
+ writel(cpu_to_le32(msgp[i]),
+ ioc->ioc_regs.hfn_mbox + i * sizeof(u32));
for (; i < BFI_IOC_MSGLEN_MAX / sizeof(u32); i++)
- bfa_reg_write(ioc->ioc_regs.hfn_mbox + i * sizeof(u32), 0);
+ writel(0, ioc->ioc_regs.hfn_mbox + i * sizeof(u32));
/*
* write 1 to mailbox CMD to trigger LPU event
*/
- bfa_reg_write(ioc->ioc_regs.hfn_mbox_cmd, 1);
- (void) bfa_reg_read(ioc->ioc_regs.hfn_mbox_cmd);
+ writel(1, ioc->ioc_regs.hfn_mbox_cmd);
+ (void) readl(ioc->ioc_regs.hfn_mbox_cmd);
}
static void
@@ -1472,7 +1471,7 @@ bfa_ioc_send_enable(struct bfa_ioc_s *ioc)
bfa_ioc_portid(ioc));
enable_req.ioc_class = ioc->ioc_mc;
bfa_os_gettimeofday(&tv);
- enable_req.tv_sec = bfa_os_ntohl(tv.tv_sec);
+ enable_req.tv_sec = be32_to_cpu(tv.tv_sec);
bfa_ioc_mbox_send(ioc, &enable_req, sizeof(struct bfi_ioc_ctrl_req_s));
}
@@ -1503,7 +1502,7 @@ bfa_ioc_hb_check(void *cbarg)
struct bfa_ioc_s *ioc = cbarg;
u32 hb_count;
- hb_count = bfa_reg_read(ioc->ioc_regs.heartbeat);
+ hb_count = readl(ioc->ioc_regs.heartbeat);
if (ioc->hb_count == hb_count) {
printk(KERN_CRIT "Firmware heartbeat failure at %d", hb_count);
bfa_ioc_recover(ioc);
@@ -1519,7 +1518,7 @@ bfa_ioc_hb_check(void *cbarg)
static void
bfa_ioc_hb_monitor(struct bfa_ioc_s *ioc)
{
- ioc->hb_count = bfa_reg_read(ioc->ioc_regs.heartbeat);
+ ioc->hb_count = readl(ioc->ioc_regs.heartbeat);
bfa_hb_timer_start(ioc);
}
@@ -1530,7 +1529,7 @@ bfa_ioc_hb_stop(struct bfa_ioc_s *ioc)
}
-/**
+/*
* Initiate a full firmware download.
*/
static void
@@ -1543,7 +1542,7 @@ bfa_ioc_download_fw(struct bfa_ioc_s *ioc, u32 boot_type,
u32 chunkno = 0;
u32 i;
- /**
+ /*
* Initialize LMEM first before code download
*/
bfa_ioc_lmem_init(ioc);
@@ -1554,7 +1553,7 @@ bfa_ioc_download_fw(struct bfa_ioc_s *ioc, u32 boot_type,
pgnum = bfa_ioc_smem_pgnum(ioc, loff);
pgoff = bfa_ioc_smem_pgoff(ioc, loff);
- bfa_reg_write(ioc->ioc_regs.host_page_num_fn, pgnum);
+ writel(pgnum, ioc->ioc_regs.host_page_num_fn);
for (i = 0; i < bfa_cb_image_get_size(BFA_IOC_FWIMG_TYPE(ioc)); i++) {
@@ -1564,7 +1563,7 @@ bfa_ioc_download_fw(struct bfa_ioc_s *ioc, u32 boot_type,
BFA_IOC_FLASH_CHUNK_ADDR(chunkno));
}
- /**
+ /*
* write smem
*/
bfa_mem_write(ioc->ioc_regs.smem_page_start, loff,
@@ -1572,27 +1571,25 @@ bfa_ioc_download_fw(struct bfa_ioc_s *ioc, u32 boot_type,
loff += sizeof(u32);
- /**
+ /*
* handle page offset wrap around
*/
loff = PSS_SMEM_PGOFF(loff);
if (loff == 0) {
pgnum++;
- bfa_reg_write(ioc->ioc_regs.host_page_num_fn,
- pgnum);
+ writel(pgnum, ioc->ioc_regs.host_page_num_fn);
}
}
- bfa_reg_write(ioc->ioc_regs.host_page_num_fn,
- bfa_ioc_smem_pgnum(ioc, 0));
+ writel(bfa_ioc_smem_pgnum(ioc, 0), ioc->ioc_regs.host_page_num_fn);
/*
* Set boot type and boot param at the end.
*/
bfa_mem_write(ioc->ioc_regs.smem_page_start, BFI_BOOT_TYPE_OFF,
- bfa_os_swap32(boot_type));
+ swab32(boot_type));
bfa_mem_write(ioc->ioc_regs.smem_page_start, BFI_BOOT_LOADER_OFF,
- bfa_os_swap32(boot_env));
+ swab32(boot_env));
}
static void
@@ -1601,7 +1598,7 @@ bfa_ioc_reset(struct bfa_ioc_s *ioc, bfa_boolean_t force)
bfa_ioc_hwinit(ioc, force);
}
-/**
+/*
* Update BFA configuration from firmware configuration.
*/
static void
@@ -1609,14 +1606,14 @@ bfa_ioc_getattr_reply(struct bfa_ioc_s *ioc)
{
struct bfi_ioc_attr_s *attr = ioc->attr;
- attr->adapter_prop = bfa_os_ntohl(attr->adapter_prop);
- attr->card_type = bfa_os_ntohl(attr->card_type);
- attr->maxfrsize = bfa_os_ntohs(attr->maxfrsize);
+ attr->adapter_prop = be32_to_cpu(attr->adapter_prop);
+ attr->card_type = be32_to_cpu(attr->card_type);
+ attr->maxfrsize = be16_to_cpu(attr->maxfrsize);
bfa_fsm_send_event(ioc, IOC_E_FWRSP_GETATTR);
}
-/**
+/*
* Attach time initialization of mbox logic.
*/
static void
@@ -1632,7 +1629,7 @@ bfa_ioc_mbox_attach(struct bfa_ioc_s *ioc)
}
}
-/**
+/*
* Mbox poll timer -- restarts any pending mailbox requests.
*/
static void
@@ -1642,27 +1639,27 @@ bfa_ioc_mbox_poll(struct bfa_ioc_s *ioc)
struct bfa_mbox_cmd_s *cmd;
u32 stat;
- /**
+ /*
* If no command pending, do nothing
*/
if (list_empty(&mod->cmd_q))
return;
- /**
+ /*
* If previous command is not yet fetched by firmware, do nothing
*/
- stat = bfa_reg_read(ioc->ioc_regs.hfn_mbox_cmd);
+ stat = readl(ioc->ioc_regs.hfn_mbox_cmd);
if (stat)
return;
- /**
+ /*
* Enqueue command to firmware.
*/
bfa_q_deq(&mod->cmd_q, &cmd);
bfa_ioc_mbox_send(ioc, cmd->msg, sizeof(cmd->msg));
}
-/**
+/*
* Cleanup any pending requests.
*/
static void
@@ -1675,7 +1672,7 @@ bfa_ioc_mbox_hbfail(struct bfa_ioc_s *ioc)
bfa_q_deq(&mod->cmd_q, &cmd);
}
-/**
+/*
* Read data from SMEM to host through PCI memmap
*
* @param[in] ioc memory for IOC
@@ -1704,26 +1701,25 @@ bfa_ioc_smem_read(struct bfa_ioc_s *ioc, void *tbuf, u32 soff, u32 sz)
return BFA_STATUS_FAILED;
}
- bfa_reg_write(ioc->ioc_regs.host_page_num_fn, pgnum);
+ writel(pgnum, ioc->ioc_regs.host_page_num_fn);
len = sz/sizeof(u32);
bfa_trc(ioc, len);
for (i = 0; i < len; i++) {
r32 = bfa_mem_read(ioc->ioc_regs.smem_page_start, loff);
- buf[i] = bfa_os_ntohl(r32);
+ buf[i] = be32_to_cpu(r32);
loff += sizeof(u32);
- /**
+ /*
* handle page offset wrap around
*/
loff = PSS_SMEM_PGOFF(loff);
if (loff == 0) {
pgnum++;
- bfa_reg_write(ioc->ioc_regs.host_page_num_fn, pgnum);
+ writel(pgnum, ioc->ioc_regs.host_page_num_fn);
}
}
- bfa_reg_write(ioc->ioc_regs.host_page_num_fn,
- bfa_ioc_smem_pgnum(ioc, 0));
+ writel(bfa_ioc_smem_pgnum(ioc, 0), ioc->ioc_regs.host_page_num_fn);
/*
* release semaphore.
*/
@@ -1733,7 +1729,7 @@ bfa_ioc_smem_read(struct bfa_ioc_s *ioc, void *tbuf, u32 soff, u32 sz)
return BFA_STATUS_OK;
}
-/**
+/*
* Clear SMEM data from host through PCI memmap
*
* @param[in] ioc memory for IOC
@@ -1760,7 +1756,7 @@ bfa_ioc_smem_clr(struct bfa_ioc_s *ioc, u32 soff, u32 sz)
return BFA_STATUS_FAILED;
}
- bfa_reg_write(ioc->ioc_regs.host_page_num_fn, pgnum);
+ writel(pgnum, ioc->ioc_regs.host_page_num_fn);
len = sz/sizeof(u32); /* len in words */
bfa_trc(ioc, len);
@@ -1768,17 +1764,16 @@ bfa_ioc_smem_clr(struct bfa_ioc_s *ioc, u32 soff, u32 sz)
bfa_mem_write(ioc->ioc_regs.smem_page_start, loff, 0);
loff += sizeof(u32);
- /**
+ /*
* handle page offset wrap around
*/
loff = PSS_SMEM_PGOFF(loff);
if (loff == 0) {
pgnum++;
- bfa_reg_write(ioc->ioc_regs.host_page_num_fn, pgnum);
+ writel(pgnum, ioc->ioc_regs.host_page_num_fn);
}
}
- bfa_reg_write(ioc->ioc_regs.host_page_num_fn,
- bfa_ioc_smem_pgnum(ioc, 0));
+ writel(bfa_ioc_smem_pgnum(ioc, 0), ioc->ioc_regs.host_page_num_fn);
/*
* release semaphore.
@@ -1788,7 +1783,7 @@ bfa_ioc_smem_clr(struct bfa_ioc_s *ioc, u32 soff, u32 sz)
return BFA_STATUS_OK;
}
-/**
+/*
* hal iocpf to ioc interface
*/
static void
@@ -1813,7 +1808,7 @@ static void
bfa_ioc_pf_fwmismatch(struct bfa_ioc_s *ioc)
{
struct bfad_s *bfad = (struct bfad_s *)ioc->bfa->bfad;
- /**
+ /*
* Provide enable completion callback.
*/
ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
@@ -1824,7 +1819,7 @@ bfa_ioc_pf_fwmismatch(struct bfa_ioc_s *ioc)
-/**
+/*
* hal_ioc_public
*/
@@ -1848,43 +1843,43 @@ bfa_ioc_pll_init(struct bfa_ioc_s *ioc)
return BFA_STATUS_OK;
}
-/**
+/*
* Interface used by diag module to do firmware boot with memory test
* as the entry vector.
*/
void
bfa_ioc_boot(struct bfa_ioc_s *ioc, u32 boot_type, u32 boot_env)
{
- bfa_os_addr_t rb;
+ void __iomem *rb;
bfa_ioc_stats(ioc, ioc_boots);
if (bfa_ioc_pll_init(ioc) != BFA_STATUS_OK)
return;
- /**
+ /*
* Initialize IOC state of all functions on a chip reset.
*/
rb = ioc->pcidev.pci_bar_kva;
if (boot_type == BFI_BOOT_TYPE_MEMTEST) {
- bfa_reg_write((rb + BFA_IOC0_STATE_REG), BFI_IOC_MEMTEST);
- bfa_reg_write((rb + BFA_IOC1_STATE_REG), BFI_IOC_MEMTEST);
+ writel(BFI_IOC_MEMTEST, (rb + BFA_IOC0_STATE_REG));
+ writel(BFI_IOC_MEMTEST, (rb + BFA_IOC1_STATE_REG));
} else {
- bfa_reg_write((rb + BFA_IOC0_STATE_REG), BFI_IOC_INITING);
- bfa_reg_write((rb + BFA_IOC1_STATE_REG), BFI_IOC_INITING);
+ writel(BFI_IOC_INITING, (rb + BFA_IOC0_STATE_REG));
+ writel(BFI_IOC_INITING, (rb + BFA_IOC1_STATE_REG));
}
bfa_ioc_msgflush(ioc);
bfa_ioc_download_fw(ioc, boot_type, boot_env);
- /**
+ /*
* Enable interrupts just before starting LPU
*/
ioc->cbfn->reset_cbfn(ioc->bfa);
bfa_ioc_lpu_start(ioc);
}
-/**
+/*
* Enable/disable IOC failure auto recovery.
*/
void
@@ -1904,7 +1899,7 @@ bfa_ioc_is_operational(struct bfa_ioc_s *ioc)
bfa_boolean_t
bfa_ioc_is_initialized(struct bfa_ioc_s *ioc)
{
- u32 r32 = bfa_reg_read(ioc->ioc_regs.ioc_fwstate);
+ u32 r32 = readl(ioc->ioc_regs.ioc_fwstate);
return ((r32 != BFI_IOC_UNINIT) &&
(r32 != BFI_IOC_INITING) &&
@@ -1918,21 +1913,21 @@ bfa_ioc_msgget(struct bfa_ioc_s *ioc, void *mbmsg)
u32 r32;
int i;
- /**
+ /*
* read the MBOX msg
*/
for (i = 0; i < (sizeof(union bfi_ioc_i2h_msg_u) / sizeof(u32));
i++) {
- r32 = bfa_reg_read(ioc->ioc_regs.lpu_mbox +
+ r32 = readl(ioc->ioc_regs.lpu_mbox +
i * sizeof(u32));
- msgp[i] = bfa_os_htonl(r32);
+ msgp[i] = cpu_to_be32(r32);
}
- /**
+ /*
* turn off mailbox interrupt by clearing mailbox status
*/
- bfa_reg_write(ioc->ioc_regs.lpu_mbox_cmd, 1);
- bfa_reg_read(ioc->ioc_regs.lpu_mbox_cmd);
+ writel(1, ioc->ioc_regs.lpu_mbox_cmd);
+ readl(ioc->ioc_regs.lpu_mbox_cmd);
}
void
@@ -1971,7 +1966,7 @@ bfa_ioc_isr(struct bfa_ioc_s *ioc, struct bfi_mbmsg_s *m)
}
}
-/**
+/*
* IOC attach time initialization and setup.
*
* @param[in] ioc memory for IOC
@@ -1996,7 +1991,7 @@ bfa_ioc_attach(struct bfa_ioc_s *ioc, void *bfa, struct bfa_ioc_cbfn_s *cbfn,
bfa_fsm_send_event(ioc, IOC_E_RESET);
}
-/**
+/*
* Driver detach time IOC cleanup.
*/
void
@@ -2005,7 +2000,7 @@ bfa_ioc_detach(struct bfa_ioc_s *ioc)
bfa_fsm_send_event(ioc, IOC_E_DETACH);
}
-/**
+/*
* Setup IOC PCI properties.
*
* @param[in] pcidev PCI device information for this IOC
@@ -2019,7 +2014,7 @@ bfa_ioc_pci_init(struct bfa_ioc_s *ioc, struct bfa_pcidev_s *pcidev,
ioc->ctdev = bfa_asic_id_ct(ioc->pcidev.device_id);
ioc->cna = ioc->ctdev && !ioc->fcmode;
- /**
+ /*
* Set asic specific interfaces. See bfa_ioc_cb.c and bfa_ioc_ct.c
*/
if (ioc->ctdev)
@@ -2031,7 +2026,7 @@ bfa_ioc_pci_init(struct bfa_ioc_s *ioc, struct bfa_pcidev_s *pcidev,
bfa_ioc_reg_init(ioc);
}
-/**
+/*
* Initialize IOC dma memory
*
* @param[in] dm_kva kernel virtual address of IOC dma memory
@@ -2040,7 +2035,7 @@ bfa_ioc_pci_init(struct bfa_ioc_s *ioc, struct bfa_pcidev_s *pcidev,
void
bfa_ioc_mem_claim(struct bfa_ioc_s *ioc, u8 *dm_kva, u64 dm_pa)
{
- /**
+ /*
* dma memory for firmware attribute
*/
ioc->attr_dma.kva = dm_kva;
@@ -2048,7 +2043,7 @@ bfa_ioc_mem_claim(struct bfa_ioc_s *ioc, u8 *dm_kva, u64 dm_pa)
ioc->attr = (struct bfi_ioc_attr_s *) dm_kva;
}
-/**
+/*
* Return size of dma memory required.
*/
u32
@@ -2073,7 +2068,7 @@ bfa_ioc_disable(struct bfa_ioc_s *ioc)
bfa_fsm_send_event(ioc, IOC_E_DISABLE);
}
-/**
+/*
* Returns memory required for saving firmware trace in case of crash.
* Driver must call this interface to allocate memory required for
* automatic saving of firmware trace. Driver should call
@@ -2086,7 +2081,7 @@ bfa_ioc_debug_trcsz(bfa_boolean_t auto_recover)
return (auto_recover) ? BFA_DBG_FWTRC_LEN : 0;
}
-/**
+/*
* Initialize memory for saving firmware trace. Driver must initialize
* trace memory before call bfa_ioc_enable().
*/
@@ -2109,7 +2104,7 @@ bfa_ioc_smem_pgoff(struct bfa_ioc_s *ioc, u32 fmaddr)
return PSS_SMEM_PGOFF(fmaddr);
}
-/**
+/*
* Register mailbox message handler functions
*
* @param[in] ioc IOC instance
@@ -2125,7 +2120,7 @@ bfa_ioc_mbox_register(struct bfa_ioc_s *ioc, bfa_ioc_mbox_mcfunc_t *mcfuncs)
mod->mbhdlr[mc].cbfn = mcfuncs[mc];
}
-/**
+/*
* Register mailbox message handler function, to be called by common modules
*/
void
@@ -2138,7 +2133,7 @@ bfa_ioc_mbox_regisr(struct bfa_ioc_s *ioc, enum bfi_mclass mc,
mod->mbhdlr[mc].cbarg = cbarg;
}
-/**
+/*
* Queue a mailbox command request to firmware. Waits if mailbox is busy.
* Responsibility of caller to serialize
*
@@ -2151,7 +2146,7 @@ bfa_ioc_mbox_queue(struct bfa_ioc_s *ioc, struct bfa_mbox_cmd_s *cmd)
struct bfa_ioc_mbox_mod_s *mod = &ioc->mbox_mod;
u32 stat;
- /**
+ /*
* If a previous command is pending, queue new command
*/
if (!list_empty(&mod->cmd_q)) {
@@ -2159,22 +2154,22 @@ bfa_ioc_mbox_queue(struct bfa_ioc_s *ioc, struct bfa_mbox_cmd_s *cmd)
return;
}
- /**
+ /*
* If mailbox is busy, queue command for poll timer
*/
- stat = bfa_reg_read(ioc->ioc_regs.hfn_mbox_cmd);
+ stat = readl(ioc->ioc_regs.hfn_mbox_cmd);
if (stat) {
list_add_tail(&cmd->qe, &mod->cmd_q);
return;
}
- /**
+ /*
* mailbox is free -- queue command to firmware
*/
bfa_ioc_mbox_send(ioc, cmd->msg, sizeof(cmd->msg));
}
-/**
+/*
* Handle mailbox interrupts
*/
void
@@ -2186,7 +2181,7 @@ bfa_ioc_mbox_isr(struct bfa_ioc_s *ioc)
bfa_ioc_msgget(ioc, &m);
- /**
+ /*
* Treat IOC message class as special.
*/
mc = m.mh.msg_class;
@@ -2214,7 +2209,7 @@ bfa_ioc_set_fcmode(struct bfa_ioc_s *ioc)
ioc->port_id = bfa_ioc_pcifn(ioc);
}
-/**
+/*
* return true if IOC is disabled
*/
bfa_boolean_t
@@ -2224,7 +2219,7 @@ bfa_ioc_is_disabled(struct bfa_ioc_s *ioc)
bfa_fsm_cmp_state(ioc, bfa_ioc_sm_disabled);
}
-/**
+/*
* return true if IOC firmware is different.
*/
bfa_boolean_t
@@ -2243,7 +2238,7 @@ bfa_ioc_fw_mismatch(struct bfa_ioc_s *ioc)
((__sm) == BFI_IOC_FAIL) || \
((__sm) == BFI_IOC_CFG_DISABLED))
-/**
+/*
* Check if adapter is disabled -- both IOCs should be in a disabled
* state.
*/
@@ -2251,17 +2246,17 @@ bfa_boolean_t
bfa_ioc_adapter_is_disabled(struct bfa_ioc_s *ioc)
{
u32 ioc_state;
- bfa_os_addr_t rb = ioc->pcidev.pci_bar_kva;
+ void __iomem *rb = ioc->pcidev.pci_bar_kva;
if (!bfa_fsm_cmp_state(ioc, bfa_ioc_sm_disabled))
return BFA_FALSE;
- ioc_state = bfa_reg_read(rb + BFA_IOC0_STATE_REG);
+ ioc_state = readl(rb + BFA_IOC0_STATE_REG);
if (!bfa_ioc_state_disabled(ioc_state))
return BFA_FALSE;
if (ioc->pcidev.device_id != BFA_PCI_DEVICE_ID_FC_8G1P) {
- ioc_state = bfa_reg_read(rb + BFA_IOC1_STATE_REG);
+ ioc_state = readl(rb + BFA_IOC1_STATE_REG);
if (!bfa_ioc_state_disabled(ioc_state))
return BFA_FALSE;
}
@@ -2269,7 +2264,7 @@ bfa_ioc_adapter_is_disabled(struct bfa_ioc_s *ioc)
return BFA_TRUE;
}
-/**
+/*
* Add to IOC heartbeat failure notification queue. To be used by common
* modules such as cee, port, diag.
*/
@@ -2293,7 +2288,7 @@ bfa_ioc_get_adapter_attr(struct bfa_ioc_s *ioc,
bfa_ioc_get_adapter_fw_ver(ioc, ad_attr->fw_ver);
bfa_ioc_get_adapter_optrom_ver(ioc, ad_attr->optrom_ver);
bfa_ioc_get_adapter_manufacturer(ioc, ad_attr->manufacturer);
- bfa_os_memcpy(&ad_attr->vpd, &ioc_attr->vpd,
+ memcpy(&ad_attr->vpd, &ioc_attr->vpd,
sizeof(struct bfa_mfg_vpd_s));
ad_attr->nports = bfa_ioc_get_nports(ioc);
@@ -2343,8 +2338,8 @@ bfa_ioc_get_type(struct bfa_ioc_s *ioc)
void
bfa_ioc_get_adapter_serial_num(struct bfa_ioc_s *ioc, char *serial_num)
{
- bfa_os_memset((void *)serial_num, 0, BFA_ADAPTER_SERIAL_NUM_LEN);
- bfa_os_memcpy((void *)serial_num,
+ memset((void *)serial_num, 0, BFA_ADAPTER_SERIAL_NUM_LEN);
+ memcpy((void *)serial_num,
(void *)ioc->attr->brcd_serialnum,
BFA_ADAPTER_SERIAL_NUM_LEN);
}
@@ -2352,8 +2347,8 @@ bfa_ioc_get_adapter_serial_num(struct bfa_ioc_s *ioc, char *serial_num)
void
bfa_ioc_get_adapter_fw_ver(struct bfa_ioc_s *ioc, char *fw_ver)
{
- bfa_os_memset((void *)fw_ver, 0, BFA_VERSION_LEN);
- bfa_os_memcpy(fw_ver, ioc->attr->fw_version, BFA_VERSION_LEN);
+ memset((void *)fw_ver, 0, BFA_VERSION_LEN);
+ memcpy(fw_ver, ioc->attr->fw_version, BFA_VERSION_LEN);
}
void
@@ -2361,7 +2356,7 @@ bfa_ioc_get_pci_chip_rev(struct bfa_ioc_s *ioc, char *chip_rev)
{
bfa_assert(chip_rev);
- bfa_os_memset((void *)chip_rev, 0, BFA_IOC_CHIP_REV_LEN);
+ memset((void *)chip_rev, 0, BFA_IOC_CHIP_REV_LEN);
chip_rev[0] = 'R';
chip_rev[1] = 'e';
@@ -2374,16 +2369,16 @@ bfa_ioc_get_pci_chip_rev(struct bfa_ioc_s *ioc, char *chip_rev)
void
bfa_ioc_get_adapter_optrom_ver(struct bfa_ioc_s *ioc, char *optrom_ver)
{
- bfa_os_memset((void *)optrom_ver, 0, BFA_VERSION_LEN);
- bfa_os_memcpy(optrom_ver, ioc->attr->optrom_version,
+ memset((void *)optrom_ver, 0, BFA_VERSION_LEN);
+ memcpy(optrom_ver, ioc->attr->optrom_version,
BFA_VERSION_LEN);
}
void
bfa_ioc_get_adapter_manufacturer(struct bfa_ioc_s *ioc, char *manufacturer)
{
- bfa_os_memset((void *)manufacturer, 0, BFA_ADAPTER_MFG_NAME_LEN);
- bfa_os_memcpy(manufacturer, BFA_MFG_NAME, BFA_ADAPTER_MFG_NAME_LEN);
+ memset((void *)manufacturer, 0, BFA_ADAPTER_MFG_NAME_LEN);
+ memcpy(manufacturer, BFA_MFG_NAME, BFA_ADAPTER_MFG_NAME_LEN);
}
void
@@ -2392,14 +2387,14 @@ bfa_ioc_get_adapter_model(struct bfa_ioc_s *ioc, char *model)
struct bfi_ioc_attr_s *ioc_attr;
bfa_assert(model);
- bfa_os_memset((void *)model, 0, BFA_ADAPTER_MODEL_NAME_LEN);
+ memset((void *)model, 0, BFA_ADAPTER_MODEL_NAME_LEN);
ioc_attr = ioc->attr;
- /**
+ /*
* model name
*/
- bfa_os_snprintf(model, BFA_ADAPTER_MODEL_NAME_LEN, "%s-%u",
+ snprintf(model, BFA_ADAPTER_MODEL_NAME_LEN, "%s-%u",
BFA_MFG_NAME, ioc_attr->card_type);
}
@@ -2446,7 +2441,7 @@ bfa_ioc_get_state(struct bfa_ioc_s *ioc)
void
bfa_ioc_get_attr(struct bfa_ioc_s *ioc, struct bfa_ioc_attr_s *ioc_attr)
{
- bfa_os_memset((void *)ioc_attr, 0, sizeof(struct bfa_ioc_attr_s));
+ memset((void *)ioc_attr, 0, sizeof(struct bfa_ioc_attr_s));
ioc_attr->state = bfa_ioc_get_state(ioc);
ioc_attr->port_id = ioc->port_id;
@@ -2460,7 +2455,7 @@ bfa_ioc_get_attr(struct bfa_ioc_s *ioc, struct bfa_ioc_attr_s *ioc_attr)
bfa_ioc_get_pci_chip_rev(ioc, ioc_attr->pci_attr.chip_rev);
}
-/**
+/*
* hal_wwn_public
*/
wwn_t
@@ -2526,7 +2521,7 @@ bfa_ioc_get_fcmode(struct bfa_ioc_s *ioc)
return ioc->fcmode || !bfa_asic_id_ct(ioc->pcidev.device_id);
}
-/**
+/*
* Retrieve saved firmware trace from a prior IOC failure.
*/
bfa_status_t
@@ -2541,12 +2536,12 @@ bfa_ioc_debug_fwsave(struct bfa_ioc_s *ioc, void *trcdata, int *trclen)
if (tlen > ioc->dbg_fwsave_len)
tlen = ioc->dbg_fwsave_len;
- bfa_os_memcpy(trcdata, ioc->dbg_fwsave, tlen);
+ memcpy(trcdata, ioc->dbg_fwsave, tlen);
*trclen = tlen;
return BFA_STATUS_OK;
}
-/**
+/*
* Clear saved firmware trace
*/
void
@@ -2555,7 +2550,7 @@ bfa_ioc_debug_fwsave_clear(struct bfa_ioc_s *ioc)
ioc->dbg_fwsave_once = BFA_TRUE;
}
-/**
+/*
* Retrieve saved firmware trace from a prior IOC failure.
*/
bfa_status_t
@@ -2595,7 +2590,7 @@ bfa_ioc_fwsync(struct bfa_ioc_s *ioc)
bfa_ioc_send_fwsync(ioc);
- /**
+ /*
* After sending a fw sync mbox command wait for it to
* take effect. We will not wait for a response because
* 1. fw_sync mbox cmd doesn't have a response.
@@ -2610,7 +2605,7 @@ bfa_ioc_fwsync(struct bfa_ioc_s *ioc)
fwsync_iter--;
}
-/**
+/*
* Dump firmware smem
*/
bfa_status_t
@@ -2630,7 +2625,7 @@ bfa_ioc_debug_fwcore(struct bfa_ioc_s *ioc, void *buf,
loff = *offset;
dlen = *buflen;
- /**
+ /*
* First smem read, sync smem before proceeding
* No need to sync before reading every chunk.
*/
@@ -2657,7 +2652,7 @@ bfa_ioc_debug_fwcore(struct bfa_ioc_s *ioc, void *buf,
return status;
}
-/**
+/*
* Firmware statistics
*/
bfa_status_t
@@ -2702,7 +2697,7 @@ bfa_ioc_fw_stats_clear(struct bfa_ioc_s *ioc)
return status;
}
-/**
+/*
* Save firmware trace if configured.
*/
static void
@@ -2716,7 +2711,7 @@ bfa_ioc_debug_save(struct bfa_ioc_s *ioc)
}
}
-/**
+/*
* Firmware failure detected. Start recovery actions.
*/
static void
@@ -2738,7 +2733,7 @@ bfa_ioc_check_attr_wwns(struct bfa_ioc_s *ioc)
return;
}
-/**
+/*
* hal_iocpf_pvt BFA IOC PF private functions
*/
@@ -2795,7 +2790,7 @@ bfa_iocpf_sem_timeout(void *ioc_arg)
bfa_ioc_hw_sem_get(ioc);
}
-/**
+/*
* bfa timer function
*/
void
@@ -2840,7 +2835,7 @@ bfa_timer_beat(struct bfa_timer_mod_s *mod)
}
}
-/**
+/*
* Should be called with lock protection
*/
void
@@ -2858,7 +2853,7 @@ bfa_timer_begin(struct bfa_timer_mod_s *mod, struct bfa_timer_s *timer,
list_add_tail(&timer->qe, &mod->timer_q);
}
-/**
+/*
* Should be called with lock protection
*/
void
diff --git a/drivers/scsi/bfa/bfa_ioc.h b/drivers/scsi/bfa/bfa_ioc.h
index 288c5801aace..9c407a87a1a1 100644
--- a/drivers/scsi/bfa/bfa_ioc.h
+++ b/drivers/scsi/bfa/bfa_ioc.h
@@ -22,29 +22,29 @@
#include "bfa_cs.h"
#include "bfi.h"
-/**
+/*
* BFA timer declarations
*/
typedef void (*bfa_timer_cbfn_t)(void *);
-/**
+/*
* BFA timer data structure
*/
struct bfa_timer_s {
struct list_head qe;
bfa_timer_cbfn_t timercb;
void *arg;
- int timeout; /**< in millisecs. */
+ int timeout; /* in millisecs */
};
-/**
+/*
* Timer module structure
*/
struct bfa_timer_mod_s {
struct list_head timer_q;
};
-#define BFA_TIMER_FREQ 200 /**< specified in millisecs */
+#define BFA_TIMER_FREQ 200 /* specified in millisecs */
void bfa_timer_beat(struct bfa_timer_mod_s *mod);
void bfa_timer_init(struct bfa_timer_mod_s *mod);
@@ -53,7 +53,7 @@ void bfa_timer_begin(struct bfa_timer_mod_s *mod, struct bfa_timer_s *timer,
unsigned int timeout);
void bfa_timer_stop(struct bfa_timer_s *timer);
-/**
+/*
* Generic Scatter Gather Element used by driver
*/
struct bfa_sge_s {
@@ -62,9 +62,9 @@ struct bfa_sge_s {
};
#define bfa_sge_word_swap(__sge) do { \
- ((u32 *)(__sge))[0] = bfa_os_swap32(((u32 *)(__sge))[0]); \
- ((u32 *)(__sge))[1] = bfa_os_swap32(((u32 *)(__sge))[1]); \
- ((u32 *)(__sge))[2] = bfa_os_swap32(((u32 *)(__sge))[2]); \
+ ((u32 *)(__sge))[0] = swab32(((u32 *)(__sge))[0]); \
+ ((u32 *)(__sge))[1] = swab32(((u32 *)(__sge))[1]); \
+ ((u32 *)(__sge))[2] = swab32(((u32 *)(__sge))[2]); \
} while (0)
#define bfa_swap_words(_x) ( \
@@ -80,17 +80,17 @@ struct bfa_sge_s {
#define bfa_sgaddr_le(_x) (_x)
#endif
-/**
+/*
* PCI device information required by IOC
*/
struct bfa_pcidev_s {
int pci_slot;
u8 pci_func;
- u16 device_id;
- bfa_os_addr_t pci_bar_kva;
+ u16 device_id;
+ void __iomem *pci_bar_kva;
};
-/**
+/*
* Structure used to remember the DMA-able memory block's KVA and Physical
* Address
*/
@@ -102,7 +102,7 @@ struct bfa_dma_s {
#define BFA_DMA_ALIGN_SZ 256
#define BFA_ROUNDUP(_l, _s) (((_l) + ((_s) - 1)) & ~((_s) - 1))
-/**
+/*
* smem size for Crossbow and Catapult
*/
#define BFI_SMEM_CB_SIZE 0x200000U /* ! 2MB for crossbow */
@@ -125,40 +125,38 @@ __bfa_dma_addr_set(union bfi_addr_u *dma_addr, u64 pa)
static inline void
__bfa_dma_be_addr_set(union bfi_addr_u *dma_addr, u64 pa)
{
- dma_addr->a32.addr_lo = (u32) bfa_os_htonl(pa);
- dma_addr->a32.addr_hi = (u32) bfa_os_htonl(bfa_os_u32(pa));
+ dma_addr->a32.addr_lo = (u32) cpu_to_be32(pa);
+ dma_addr->a32.addr_hi = (u32) cpu_to_be32(bfa_os_u32(pa));
}
struct bfa_ioc_regs_s {
- bfa_os_addr_t hfn_mbox_cmd;
- bfa_os_addr_t hfn_mbox;
- bfa_os_addr_t lpu_mbox_cmd;
- bfa_os_addr_t lpu_mbox;
- bfa_os_addr_t pss_ctl_reg;
- bfa_os_addr_t pss_err_status_reg;
- bfa_os_addr_t app_pll_fast_ctl_reg;
- bfa_os_addr_t app_pll_slow_ctl_reg;
- bfa_os_addr_t ioc_sem_reg;
- bfa_os_addr_t ioc_usage_sem_reg;
- bfa_os_addr_t ioc_init_sem_reg;
- bfa_os_addr_t ioc_usage_reg;
- bfa_os_addr_t host_page_num_fn;
- bfa_os_addr_t heartbeat;
- bfa_os_addr_t ioc_fwstate;
- bfa_os_addr_t ll_halt;
- bfa_os_addr_t err_set;
- bfa_os_addr_t shirq_isr_next;
- bfa_os_addr_t shirq_msk_next;
- bfa_os_addr_t smem_page_start;
+ void __iomem *hfn_mbox_cmd;
+ void __iomem *hfn_mbox;
+ void __iomem *lpu_mbox_cmd;
+ void __iomem *lpu_mbox;
+ void __iomem *pss_ctl_reg;
+ void __iomem *pss_err_status_reg;
+ void __iomem *app_pll_fast_ctl_reg;
+ void __iomem *app_pll_slow_ctl_reg;
+ void __iomem *ioc_sem_reg;
+ void __iomem *ioc_usage_sem_reg;
+ void __iomem *ioc_init_sem_reg;
+ void __iomem *ioc_usage_reg;
+ void __iomem *host_page_num_fn;
+ void __iomem *heartbeat;
+ void __iomem *ioc_fwstate;
+ void __iomem *ll_halt;
+ void __iomem *err_set;
+ void __iomem *shirq_isr_next;
+ void __iomem *shirq_msk_next;
+ void __iomem *smem_page_start;
u32 smem_pg0;
};
-#define bfa_reg_read(_raddr) bfa_os_reg_read(_raddr)
-#define bfa_reg_write(_raddr, _val) bfa_os_reg_write(_raddr, _val)
-#define bfa_mem_read(_raddr, _off) bfa_os_mem_read(_raddr, _off)
+#define bfa_mem_read(_raddr, _off) swab32(readl(((_raddr) + (_off))))
#define bfa_mem_write(_raddr, _off, _val) \
- bfa_os_mem_write(_raddr, _off, _val)
-/**
+ writel(swab32((_val)), ((_raddr) + (_off)))
+/*
* IOC Mailbox structures
*/
struct bfa_mbox_cmd_s {
@@ -166,7 +164,7 @@ struct bfa_mbox_cmd_s {
u32 msg[BFI_IOC_MSGSZ];
};
-/**
+/*
* IOC mailbox module
*/
typedef void (*bfa_ioc_mbox_mcfunc_t)(void *cbarg, struct bfi_mbmsg_s *m);
@@ -179,7 +177,7 @@ struct bfa_ioc_mbox_mod_s {
} mbhdlr[BFI_MC_MAX];
};
-/**
+/*
* IOC callback function interfaces
*/
typedef void (*bfa_ioc_enable_cbfn_t)(void *bfa, enum bfa_status status);
@@ -193,7 +191,7 @@ struct bfa_ioc_cbfn_s {
bfa_ioc_reset_cbfn_t reset_cbfn;
};
-/**
+/*
* Heartbeat failure notification queue element.
*/
struct bfa_ioc_hbfail_notify_s {
@@ -202,7 +200,7 @@ struct bfa_ioc_hbfail_notify_s {
void *cbarg;
};
-/**
+/*
* Initialize a heartbeat failure notification structure
*/
#define bfa_ioc_hbfail_init(__notify, __cbfn, __cbarg) do { \
@@ -249,7 +247,7 @@ struct bfa_ioc_s {
};
struct bfa_ioc_hwif_s {
- bfa_status_t (*ioc_pll_init) (bfa_os_addr_t rb, bfa_boolean_t fcmode);
+ bfa_status_t (*ioc_pll_init) (void __iomem *rb, bfa_boolean_t fcmode);
bfa_boolean_t (*ioc_firmware_lock) (struct bfa_ioc_s *ioc);
void (*ioc_firmware_unlock) (struct bfa_ioc_s *ioc);
void (*ioc_reg_init) (struct bfa_ioc_s *ioc);
@@ -267,7 +265,7 @@ struct bfa_ioc_hwif_s {
#define bfa_ioc_fetch_stats(__ioc, __stats) \
(((__stats)->drv_stats) = (__ioc)->stats)
#define bfa_ioc_clr_stats(__ioc) \
- bfa_os_memset(&(__ioc)->stats, 0, sizeof((__ioc)->stats))
+ memset(&(__ioc)->stats, 0, sizeof((__ioc)->stats))
#define bfa_ioc_maxfrsize(__ioc) ((__ioc)->attr->maxfrsize)
#define bfa_ioc_rx_bbcredit(__ioc) ((__ioc)->attr->rx_bbcredit)
#define bfa_ioc_speed_sup(__ioc) \
@@ -287,7 +285,7 @@ struct bfa_ioc_hwif_s {
#define BFA_IOC_FLASH_OFFSET_IN_CHUNK(off) (off % BFI_FLASH_CHUNK_SZ_WORDS)
#define BFA_IOC_FLASH_CHUNK_ADDR(chunkno) (chunkno * BFI_FLASH_CHUNK_SZ_WORDS)
-/**
+/*
* IOC mailbox interface
*/
void bfa_ioc_mbox_queue(struct bfa_ioc_s *ioc, struct bfa_mbox_cmd_s *cmd);
@@ -299,7 +297,7 @@ void bfa_ioc_msgget(struct bfa_ioc_s *ioc, void *mbmsg);
void bfa_ioc_mbox_regisr(struct bfa_ioc_s *ioc, enum bfi_mclass mc,
bfa_ioc_mbox_mcfunc_t cbfn, void *cbarg);
-/**
+/*
* IOC interfaces
*/
@@ -308,9 +306,9 @@ void bfa_ioc_mbox_regisr(struct bfa_ioc_s *ioc, enum bfi_mclass mc,
(__ioc)->fcmode))
bfa_status_t bfa_ioc_pll_init(struct bfa_ioc_s *ioc);
-bfa_status_t bfa_ioc_cb_pll_init(bfa_os_addr_t rb, bfa_boolean_t fcmode);
-bfa_boolean_t bfa_ioc_ct_pll_init_complete(bfa_os_addr_t rb);
-bfa_status_t bfa_ioc_ct_pll_init(bfa_os_addr_t rb, bfa_boolean_t fcmode);
+bfa_status_t bfa_ioc_cb_pll_init(void __iomem *rb, bfa_boolean_t fcmode);
+bfa_boolean_t bfa_ioc_ct_pll_init_complete(void __iomem *rb);
+bfa_status_t bfa_ioc_ct_pll_init(void __iomem *rb, bfa_boolean_t fcmode);
#define bfa_ioc_isr_mode_set(__ioc, __msix) \
((__ioc)->ioc_hwif->ioc_isr_mode_set(__ioc, __msix))
@@ -370,8 +368,8 @@ void bfa_ioc_set_fcmode(struct bfa_ioc_s *ioc);
bfa_boolean_t bfa_ioc_get_fcmode(struct bfa_ioc_s *ioc);
void bfa_ioc_hbfail_register(struct bfa_ioc_s *ioc,
struct bfa_ioc_hbfail_notify_s *notify);
-bfa_boolean_t bfa_ioc_sem_get(bfa_os_addr_t sem_reg);
-void bfa_ioc_sem_release(bfa_os_addr_t sem_reg);
+bfa_boolean_t bfa_ioc_sem_get(void __iomem *sem_reg);
+void bfa_ioc_sem_release(void __iomem *sem_reg);
void bfa_ioc_hw_sem_release(struct bfa_ioc_s *ioc);
void bfa_ioc_fwver_get(struct bfa_ioc_s *ioc,
struct bfi_ioc_image_hdr_s *fwhdr);
@@ -441,7 +439,7 @@ bfa_cb_image_get_size(int type)
}
}
-/**
+/*
* CNA TRCMOD declaration
*/
/*
diff --git a/drivers/scsi/bfa/bfa_ioc_cb.c b/drivers/scsi/bfa/bfa_ioc_cb.c
index d7ac864d8539..909945043850 100644
--- a/drivers/scsi/bfa/bfa_ioc_cb.c
+++ b/drivers/scsi/bfa/bfa_ioc_cb.c
@@ -34,7 +34,7 @@ static void bfa_ioc_cb_ownership_reset(struct bfa_ioc_s *ioc);
struct bfa_ioc_hwif_s hwif_cb;
-/**
+/*
* Called from bfa_ioc_attach() to map asic specific calls.
*/
void
@@ -52,7 +52,7 @@ bfa_ioc_set_cb_hwif(struct bfa_ioc_s *ioc)
ioc->ioc_hwif = &hwif_cb;
}
-/**
+/*
* Return true if firmware of current driver matches the running firmware.
*/
static bfa_boolean_t
@@ -66,17 +66,17 @@ bfa_ioc_cb_firmware_unlock(struct bfa_ioc_s *ioc)
{
}
-/**
+/*
* Notify other functions on HB failure.
*/
static void
bfa_ioc_cb_notify_hbfail(struct bfa_ioc_s *ioc)
{
- bfa_reg_write(ioc->ioc_regs.err_set, __PSS_ERR_STATUS_SET);
- bfa_reg_read(ioc->ioc_regs.err_set);
+ writel(__PSS_ERR_STATUS_SET, ioc->ioc_regs.err_set);
+ readl(ioc->ioc_regs.err_set);
}
-/**
+/*
* Host to LPU mailbox message addresses
*/
static struct { u32 hfn_mbox, lpu_mbox, hfn_pgn; } iocreg_fnreg[] = {
@@ -84,7 +84,7 @@ static struct { u32 hfn_mbox, lpu_mbox, hfn_pgn; } iocreg_fnreg[] = {
{ HOSTFN1_LPU_MBOX0_8, LPU_HOSTFN1_MBOX0_8, HOST_PAGE_NUM_FN1 }
};
-/**
+/*
* Host <-> LPU mailbox command/status registers
*/
static struct { u32 hfn, lpu; } iocreg_mbcmd[] = {
@@ -96,7 +96,7 @@ static struct { u32 hfn, lpu; } iocreg_mbcmd[] = {
static void
bfa_ioc_cb_reg_init(struct bfa_ioc_s *ioc)
{
- bfa_os_addr_t rb;
+ void __iomem *rb;
int pcifn = bfa_ioc_pcifn(ioc);
rb = bfa_ioc_bar0(ioc);
@@ -113,7 +113,7 @@ bfa_ioc_cb_reg_init(struct bfa_ioc_s *ioc)
ioc->ioc_regs.ioc_fwstate = (rb + BFA_IOC1_STATE_REG);
}
- /**
+ /*
* Host <-> LPU mailbox command/status registers
*/
ioc->ioc_regs.hfn_mbox_cmd = rb + iocreg_mbcmd[pcifn].hfn;
@@ -133,7 +133,7 @@ bfa_ioc_cb_reg_init(struct bfa_ioc_s *ioc)
ioc->ioc_regs.ioc_sem_reg = (rb + HOST_SEM0_REG);
ioc->ioc_regs.ioc_init_sem_reg = (rb + HOST_SEM2_REG);
- /**
+ /*
* sram memory access
*/
ioc->ioc_regs.smem_page_start = (rb + PSS_SMEM_PAGE_START);
@@ -145,14 +145,14 @@ bfa_ioc_cb_reg_init(struct bfa_ioc_s *ioc)
ioc->ioc_regs.err_set = (rb + ERR_SET_REG);
}
-/**
+/*
* Initialize IOC to port mapping.
*/
static void
bfa_ioc_cb_map_port(struct bfa_ioc_s *ioc)
{
- /**
+ /*
* For crossbow, port id is same as pci function.
*/
ioc->port_id = bfa_ioc_pcifn(ioc);
@@ -160,7 +160,7 @@ bfa_ioc_cb_map_port(struct bfa_ioc_s *ioc)
bfa_trc(ioc, ioc->port_id);
}
-/**
+/*
* Set interrupt mode for a function: INTX or MSIX
*/
static void
@@ -168,7 +168,7 @@ bfa_ioc_cb_isr_mode_set(struct bfa_ioc_s *ioc, bfa_boolean_t msix)
{
}
-/**
+/*
* Cleanup hw semaphore and usecnt registers
*/
static void
@@ -180,14 +180,14 @@ bfa_ioc_cb_ownership_reset(struct bfa_ioc_s *ioc)
* before we clear it. If it is not locked, writing 1
* will lock it instead of clearing it.
*/
- bfa_reg_read(ioc->ioc_regs.ioc_sem_reg);
+ readl(ioc->ioc_regs.ioc_sem_reg);
bfa_ioc_hw_sem_release(ioc);
}
bfa_status_t
-bfa_ioc_cb_pll_init(bfa_os_addr_t rb, bfa_boolean_t fcmode)
+bfa_ioc_cb_pll_init(void __iomem *rb, bfa_boolean_t fcmode)
{
u32 pll_sclk, pll_fclk;
@@ -199,38 +199,32 @@ bfa_ioc_cb_pll_init(bfa_os_addr_t rb, bfa_boolean_t fcmode)
__APP_PLL_400_RSEL200500 | __APP_PLL_400_P0_1(3U) |
__APP_PLL_400_JITLMT0_1(3U) |
__APP_PLL_400_CNTLMT0_1(3U);
- bfa_reg_write((rb + BFA_IOC0_STATE_REG), BFI_IOC_UNINIT);
- bfa_reg_write((rb + BFA_IOC1_STATE_REG), BFI_IOC_UNINIT);
- bfa_reg_write((rb + HOSTFN0_INT_MSK), 0xffffffffU);
- bfa_reg_write((rb + HOSTFN1_INT_MSK), 0xffffffffU);
- bfa_reg_write((rb + HOSTFN0_INT_STATUS), 0xffffffffU);
- bfa_reg_write((rb + HOSTFN1_INT_STATUS), 0xffffffffU);
- bfa_reg_write((rb + HOSTFN0_INT_MSK), 0xffffffffU);
- bfa_reg_write((rb + HOSTFN1_INT_MSK), 0xffffffffU);
- bfa_reg_write(rb + APP_PLL_212_CTL_REG,
- __APP_PLL_212_LOGIC_SOFT_RESET);
- bfa_reg_write(rb + APP_PLL_212_CTL_REG,
- __APP_PLL_212_BYPASS |
- __APP_PLL_212_LOGIC_SOFT_RESET);
- bfa_reg_write(rb + APP_PLL_400_CTL_REG,
- __APP_PLL_400_LOGIC_SOFT_RESET);
- bfa_reg_write(rb + APP_PLL_400_CTL_REG,
- __APP_PLL_400_BYPASS |
- __APP_PLL_400_LOGIC_SOFT_RESET);
- bfa_os_udelay(2);
- bfa_reg_write(rb + APP_PLL_212_CTL_REG,
- __APP_PLL_212_LOGIC_SOFT_RESET);
- bfa_reg_write(rb + APP_PLL_400_CTL_REG,
- __APP_PLL_400_LOGIC_SOFT_RESET);
- bfa_reg_write(rb + APP_PLL_212_CTL_REG,
- pll_sclk | __APP_PLL_212_LOGIC_SOFT_RESET);
- bfa_reg_write(rb + APP_PLL_400_CTL_REG,
- pll_fclk | __APP_PLL_400_LOGIC_SOFT_RESET);
- bfa_os_udelay(2000);
- bfa_reg_write((rb + HOSTFN0_INT_STATUS), 0xffffffffU);
- bfa_reg_write((rb + HOSTFN1_INT_STATUS), 0xffffffffU);
- bfa_reg_write((rb + APP_PLL_212_CTL_REG), pll_sclk);
- bfa_reg_write((rb + APP_PLL_400_CTL_REG), pll_fclk);
+ writel(BFI_IOC_UNINIT, (rb + BFA_IOC0_STATE_REG));
+ writel(BFI_IOC_UNINIT, (rb + BFA_IOC1_STATE_REG));
+ writel(0xffffffffU, (rb + HOSTFN0_INT_MSK));
+ writel(0xffffffffU, (rb + HOSTFN1_INT_MSK));
+ writel(0xffffffffU, (rb + HOSTFN0_INT_STATUS));
+ writel(0xffffffffU, (rb + HOSTFN1_INT_STATUS));
+ writel(0xffffffffU, (rb + HOSTFN0_INT_MSK));
+ writel(0xffffffffU, (rb + HOSTFN1_INT_MSK));
+ writel(__APP_PLL_212_LOGIC_SOFT_RESET, rb + APP_PLL_212_CTL_REG);
+ writel(__APP_PLL_212_BYPASS | __APP_PLL_212_LOGIC_SOFT_RESET,
+ rb + APP_PLL_212_CTL_REG);
+ writel(__APP_PLL_400_LOGIC_SOFT_RESET, rb + APP_PLL_400_CTL_REG);
+ writel(__APP_PLL_400_BYPASS | __APP_PLL_400_LOGIC_SOFT_RESET,
+ rb + APP_PLL_400_CTL_REG);
+ udelay(2);
+ writel(__APP_PLL_212_LOGIC_SOFT_RESET, rb + APP_PLL_212_CTL_REG);
+ writel(__APP_PLL_400_LOGIC_SOFT_RESET, rb + APP_PLL_400_CTL_REG);
+ writel(pll_sclk | __APP_PLL_212_LOGIC_SOFT_RESET,
+ rb + APP_PLL_212_CTL_REG);
+ writel(pll_fclk | __APP_PLL_400_LOGIC_SOFT_RESET,
+ rb + APP_PLL_400_CTL_REG);
+ udelay(2000);
+ writel(0xffffffffU, (rb + HOSTFN0_INT_STATUS));
+ writel(0xffffffffU, (rb + HOSTFN1_INT_STATUS));
+ writel(pll_sclk, (rb + APP_PLL_212_CTL_REG));
+ writel(pll_fclk, (rb + APP_PLL_400_CTL_REG));
return BFA_STATUS_OK;
}
diff --git a/drivers/scsi/bfa/bfa_ioc_ct.c b/drivers/scsi/bfa/bfa_ioc_ct.c
index f21b82c5f64c..115730c0aa77 100644
--- a/drivers/scsi/bfa/bfa_ioc_ct.c
+++ b/drivers/scsi/bfa/bfa_ioc_ct.c
@@ -34,7 +34,7 @@ static void bfa_ioc_ct_ownership_reset(struct bfa_ioc_s *ioc);
struct bfa_ioc_hwif_s hwif_ct;
-/**
+/*
* Called from bfa_ioc_attach() to map asic specific calls.
*/
void
@@ -52,7 +52,7 @@ bfa_ioc_set_ct_hwif(struct bfa_ioc_s *ioc)
ioc->ioc_hwif = &hwif_ct;
}
-/**
+/*
* Return true if firmware of current driver matches the running firmware.
*/
static bfa_boolean_t
@@ -62,13 +62,13 @@ bfa_ioc_ct_firmware_lock(struct bfa_ioc_s *ioc)
u32 usecnt;
struct bfi_ioc_image_hdr_s fwhdr;
- /**
+ /*
* Firmware match check is relevant only for CNA.
*/
if (!ioc->cna)
return BFA_TRUE;
- /**
+ /*
* If bios boot (flash based) -- do not increment usage count
*/
if (bfa_cb_image_get_size(BFA_IOC_FWIMG_TYPE(ioc)) <
@@ -76,27 +76,27 @@ bfa_ioc_ct_firmware_lock(struct bfa_ioc_s *ioc)
return BFA_TRUE;
bfa_ioc_sem_get(ioc->ioc_regs.ioc_usage_sem_reg);
- usecnt = bfa_reg_read(ioc->ioc_regs.ioc_usage_reg);
+ usecnt = readl(ioc->ioc_regs.ioc_usage_reg);
- /**
+ /*
* If usage count is 0, always return TRUE.
*/
if (usecnt == 0) {
- bfa_reg_write(ioc->ioc_regs.ioc_usage_reg, 1);
+ writel(1, ioc->ioc_regs.ioc_usage_reg);
bfa_ioc_sem_release(ioc->ioc_regs.ioc_usage_sem_reg);
bfa_trc(ioc, usecnt);
return BFA_TRUE;
}
- ioc_fwstate = bfa_reg_read(ioc->ioc_regs.ioc_fwstate);
+ ioc_fwstate = readl(ioc->ioc_regs.ioc_fwstate);
bfa_trc(ioc, ioc_fwstate);
- /**
+ /*
* Use count cannot be non-zero and chip in uninitialized state.
*/
bfa_assert(ioc_fwstate != BFI_IOC_UNINIT);
- /**
+ /*
* Check if another driver with a different firmware is active
*/
bfa_ioc_fwver_get(ioc, &fwhdr);
@@ -106,11 +106,11 @@ bfa_ioc_ct_firmware_lock(struct bfa_ioc_s *ioc)
return BFA_FALSE;
}
- /**
+ /*
* Same firmware version. Increment the reference count.
*/
usecnt++;
- bfa_reg_write(ioc->ioc_regs.ioc_usage_reg, usecnt);
+ writel(usecnt, ioc->ioc_regs.ioc_usage_reg);
bfa_ioc_sem_release(ioc->ioc_regs.ioc_usage_sem_reg);
bfa_trc(ioc, usecnt);
return BFA_TRUE;
@@ -121,50 +121,50 @@ bfa_ioc_ct_firmware_unlock(struct bfa_ioc_s *ioc)
{
u32 usecnt;
- /**
+ /*
* Firmware lock is relevant only for CNA.
*/
if (!ioc->cna)
return;
- /**
+ /*
* If bios boot (flash based) -- do not decrement usage count
*/
if (bfa_cb_image_get_size(BFA_IOC_FWIMG_TYPE(ioc)) <
BFA_IOC_FWIMG_MINSZ)
return;
- /**
+ /*
* decrement usage count
*/
bfa_ioc_sem_get(ioc->ioc_regs.ioc_usage_sem_reg);
- usecnt = bfa_reg_read(ioc->ioc_regs.ioc_usage_reg);
+ usecnt = readl(ioc->ioc_regs.ioc_usage_reg);
bfa_assert(usecnt > 0);
usecnt--;
- bfa_reg_write(ioc->ioc_regs.ioc_usage_reg, usecnt);
+ writel(usecnt, ioc->ioc_regs.ioc_usage_reg);
bfa_trc(ioc, usecnt);
bfa_ioc_sem_release(ioc->ioc_regs.ioc_usage_sem_reg);
}
-/**
+/*
* Notify other functions on HB failure.
*/
static void
bfa_ioc_ct_notify_hbfail(struct bfa_ioc_s *ioc)
{
if (ioc->cna) {
- bfa_reg_write(ioc->ioc_regs.ll_halt, __FW_INIT_HALT_P);
+ writel(__FW_INIT_HALT_P, ioc->ioc_regs.ll_halt);
/* Wait for halt to take effect */
- bfa_reg_read(ioc->ioc_regs.ll_halt);
+ readl(ioc->ioc_regs.ll_halt);
} else {
- bfa_reg_write(ioc->ioc_regs.err_set, __PSS_ERR_STATUS_SET);
- bfa_reg_read(ioc->ioc_regs.err_set);
+ writel(__PSS_ERR_STATUS_SET, ioc->ioc_regs.err_set);
+ readl(ioc->ioc_regs.err_set);
}
}
-/**
+/*
* Host to LPU mailbox message addresses
*/
static struct { u32 hfn_mbox, lpu_mbox, hfn_pgn; } iocreg_fnreg[] = {
@@ -174,7 +174,7 @@ static struct { u32 hfn_mbox, lpu_mbox, hfn_pgn; } iocreg_fnreg[] = {
{ HOSTFN3_LPU_MBOX0_8, LPU_HOSTFN3_MBOX0_8, HOST_PAGE_NUM_FN3 }
};
-/**
+/*
* Host <-> LPU mailbox command/status registers - port 0
*/
static struct { u32 hfn, lpu; } iocreg_mbcmd_p0[] = {
@@ -184,7 +184,7 @@ static struct { u32 hfn, lpu; } iocreg_mbcmd_p0[] = {
{ HOSTFN3_LPU0_MBOX0_CMD_STAT, LPU0_HOSTFN3_MBOX0_CMD_STAT }
};
-/**
+/*
* Host <-> LPU mailbox command/status registers - port 1
*/
static struct { u32 hfn, lpu; } iocreg_mbcmd_p1[] = {
@@ -197,7 +197,7 @@ static struct { u32 hfn, lpu; } iocreg_mbcmd_p1[] = {
static void
bfa_ioc_ct_reg_init(struct bfa_ioc_s *ioc)
{
- bfa_os_addr_t rb;
+ void __iomem *rb;
int pcifn = bfa_ioc_pcifn(ioc);
rb = bfa_ioc_bar0(ioc);
@@ -236,7 +236,7 @@ bfa_ioc_ct_reg_init(struct bfa_ioc_s *ioc)
ioc->ioc_regs.ioc_init_sem_reg = (rb + HOST_SEM2_REG);
ioc->ioc_regs.ioc_usage_reg = (rb + BFA_FW_USE_COUNT);
- /**
+ /*
* sram memory access
*/
ioc->ioc_regs.smem_page_start = (rb + PSS_SMEM_PAGE_START);
@@ -248,7 +248,7 @@ bfa_ioc_ct_reg_init(struct bfa_ioc_s *ioc)
ioc->ioc_regs.err_set = (rb + ERR_SET_REG);
}
-/**
+/*
* Initialize IOC to port mapping.
*/
@@ -256,13 +256,13 @@ bfa_ioc_ct_reg_init(struct bfa_ioc_s *ioc)
static void
bfa_ioc_ct_map_port(struct bfa_ioc_s *ioc)
{
- bfa_os_addr_t rb = ioc->pcidev.pci_bar_kva;
+ void __iomem *rb = ioc->pcidev.pci_bar_kva;
u32 r32;
- /**
+ /*
* For catapult, base port id on personality register and IOC type
*/
- r32 = bfa_reg_read(rb + FNC_PERS_REG);
+ r32 = readl(rb + FNC_PERS_REG);
r32 >>= FNC_PERS_FN_SHIFT(bfa_ioc_pcifn(ioc));
ioc->port_id = (r32 & __F0_PORT_MAP_MK) >> __F0_PORT_MAP_SH;
@@ -270,22 +270,22 @@ bfa_ioc_ct_map_port(struct bfa_ioc_s *ioc)
bfa_trc(ioc, ioc->port_id);
}
-/**
+/*
* Set interrupt mode for a function: INTX or MSIX
*/
static void
bfa_ioc_ct_isr_mode_set(struct bfa_ioc_s *ioc, bfa_boolean_t msix)
{
- bfa_os_addr_t rb = ioc->pcidev.pci_bar_kva;
+ void __iomem *rb = ioc->pcidev.pci_bar_kva;
u32 r32, mode;
- r32 = bfa_reg_read(rb + FNC_PERS_REG);
+ r32 = readl(rb + FNC_PERS_REG);
bfa_trc(ioc, r32);
mode = (r32 >> FNC_PERS_FN_SHIFT(bfa_ioc_pcifn(ioc))) &
__F0_INTX_STATUS;
- /**
+ /*
* If already in desired mode, do not change anything
*/
if (!msix && mode)
@@ -300,10 +300,10 @@ bfa_ioc_ct_isr_mode_set(struct bfa_ioc_s *ioc, bfa_boolean_t msix)
r32 |= (mode << FNC_PERS_FN_SHIFT(bfa_ioc_pcifn(ioc)));
bfa_trc(ioc, r32);
- bfa_reg_write(rb + FNC_PERS_REG, r32);
+ writel(r32, rb + FNC_PERS_REG);
}
-/**
+/*
* Cleanup hw semaphore and usecnt registers
*/
static void
@@ -312,7 +312,7 @@ bfa_ioc_ct_ownership_reset(struct bfa_ioc_s *ioc)
if (ioc->cna) {
bfa_ioc_sem_get(ioc->ioc_regs.ioc_usage_sem_reg);
- bfa_reg_write(ioc->ioc_regs.ioc_usage_reg, 0);
+ writel(0, ioc->ioc_regs.ioc_usage_reg);
bfa_ioc_sem_release(ioc->ioc_regs.ioc_usage_sem_reg);
}
@@ -321,7 +321,7 @@ bfa_ioc_ct_ownership_reset(struct bfa_ioc_s *ioc)
* before we clear it. If it is not locked, writing 1
* will lock it instead of clearing it.
*/
- bfa_reg_read(ioc->ioc_regs.ioc_sem_reg);
+ readl(ioc->ioc_regs.ioc_sem_reg);
bfa_ioc_hw_sem_release(ioc);
}
@@ -331,17 +331,17 @@ bfa_ioc_ct_ownership_reset(struct bfa_ioc_s *ioc)
* Check the firmware state to know if pll_init has been completed already
*/
bfa_boolean_t
-bfa_ioc_ct_pll_init_complete(bfa_os_addr_t rb)
+bfa_ioc_ct_pll_init_complete(void __iomem *rb)
{
- if ((bfa_reg_read(rb + BFA_IOC0_STATE_REG) == BFI_IOC_OP) ||
- (bfa_reg_read(rb + BFA_IOC1_STATE_REG) == BFI_IOC_OP))
+ if ((readl(rb + BFA_IOC0_STATE_REG) == BFI_IOC_OP) ||
+ (readl(rb + BFA_IOC1_STATE_REG) == BFI_IOC_OP))
return BFA_TRUE;
return BFA_FALSE;
}
bfa_status_t
-bfa_ioc_ct_pll_init(bfa_os_addr_t rb, bfa_boolean_t fcmode)
+bfa_ioc_ct_pll_init(void __iomem *rb, bfa_boolean_t fcmode)
{
u32 pll_sclk, pll_fclk, r32;
@@ -354,56 +354,51 @@ bfa_ioc_ct_pll_init(bfa_os_addr_t rb, bfa_boolean_t fcmode)
__APP_PLL_425_JITLMT0_1(3U) |
__APP_PLL_425_CNTLMT0_1(1U);
if (fcmode) {
- bfa_reg_write((rb + OP_MODE), 0);
- bfa_reg_write((rb + ETH_MAC_SER_REG),
- __APP_EMS_CMLCKSEL |
- __APP_EMS_REFCKBUFEN2 |
- __APP_EMS_CHANNEL_SEL);
+ writel(0, (rb + OP_MODE));
+ writel(__APP_EMS_CMLCKSEL | __APP_EMS_REFCKBUFEN2 |
+ __APP_EMS_CHANNEL_SEL, (rb + ETH_MAC_SER_REG));
} else {
- bfa_reg_write((rb + OP_MODE), __GLOBAL_FCOE_MODE);
- bfa_reg_write((rb + ETH_MAC_SER_REG),
- __APP_EMS_REFCKBUFEN1);
+ writel(__GLOBAL_FCOE_MODE, (rb + OP_MODE));
+ writel(__APP_EMS_REFCKBUFEN1, (rb + ETH_MAC_SER_REG));
}
- bfa_reg_write((rb + BFA_IOC0_STATE_REG), BFI_IOC_UNINIT);
- bfa_reg_write((rb + BFA_IOC1_STATE_REG), BFI_IOC_UNINIT);
- bfa_reg_write((rb + HOSTFN0_INT_MSK), 0xffffffffU);
- bfa_reg_write((rb + HOSTFN1_INT_MSK), 0xffffffffU);
- bfa_reg_write((rb + HOSTFN0_INT_STATUS), 0xffffffffU);
- bfa_reg_write((rb + HOSTFN1_INT_STATUS), 0xffffffffU);
- bfa_reg_write((rb + HOSTFN0_INT_MSK), 0xffffffffU);
- bfa_reg_write((rb + HOSTFN1_INT_MSK), 0xffffffffU);
- bfa_reg_write(rb + APP_PLL_312_CTL_REG, pll_sclk |
- __APP_PLL_312_LOGIC_SOFT_RESET);
- bfa_reg_write(rb + APP_PLL_425_CTL_REG, pll_fclk |
- __APP_PLL_425_LOGIC_SOFT_RESET);
- bfa_reg_write(rb + APP_PLL_312_CTL_REG, pll_sclk |
- __APP_PLL_312_LOGIC_SOFT_RESET | __APP_PLL_312_ENABLE);
- bfa_reg_write(rb + APP_PLL_425_CTL_REG, pll_fclk |
- __APP_PLL_425_LOGIC_SOFT_RESET | __APP_PLL_425_ENABLE);
- bfa_reg_read(rb + HOSTFN0_INT_MSK);
- bfa_os_udelay(2000);
- bfa_reg_write((rb + HOSTFN0_INT_STATUS), 0xffffffffU);
- bfa_reg_write((rb + HOSTFN1_INT_STATUS), 0xffffffffU);
- bfa_reg_write(rb + APP_PLL_312_CTL_REG, pll_sclk |
- __APP_PLL_312_ENABLE);
- bfa_reg_write(rb + APP_PLL_425_CTL_REG, pll_fclk |
- __APP_PLL_425_ENABLE);
+ writel(BFI_IOC_UNINIT, (rb + BFA_IOC0_STATE_REG));
+ writel(BFI_IOC_UNINIT, (rb + BFA_IOC1_STATE_REG));
+ writel(0xffffffffU, (rb + HOSTFN0_INT_MSK));
+ writel(0xffffffffU, (rb + HOSTFN1_INT_MSK));
+ writel(0xffffffffU, (rb + HOSTFN0_INT_STATUS));
+ writel(0xffffffffU, (rb + HOSTFN1_INT_STATUS));
+ writel(0xffffffffU, (rb + HOSTFN0_INT_MSK));
+ writel(0xffffffffU, (rb + HOSTFN1_INT_MSK));
+ writel(pll_sclk | __APP_PLL_312_LOGIC_SOFT_RESET,
+ rb + APP_PLL_312_CTL_REG);
+ writel(pll_fclk | __APP_PLL_425_LOGIC_SOFT_RESET,
+ rb + APP_PLL_425_CTL_REG);
+ writel(pll_sclk | __APP_PLL_312_LOGIC_SOFT_RESET | __APP_PLL_312_ENABLE,
+ rb + APP_PLL_312_CTL_REG);
+ writel(pll_fclk | __APP_PLL_425_LOGIC_SOFT_RESET | __APP_PLL_425_ENABLE,
+ rb + APP_PLL_425_CTL_REG);
+ readl(rb + HOSTFN0_INT_MSK);
+ udelay(2000);
+ writel(0xffffffffU, (rb + HOSTFN0_INT_STATUS));
+ writel(0xffffffffU, (rb + HOSTFN1_INT_STATUS));
+ writel(pll_sclk | __APP_PLL_312_ENABLE, rb + APP_PLL_312_CTL_REG);
+ writel(pll_fclk | __APP_PLL_425_ENABLE, rb + APP_PLL_425_CTL_REG);
if (!fcmode) {
- bfa_reg_write((rb + PMM_1T_RESET_REG_P0), __PMM_1T_RESET_P);
- bfa_reg_write((rb + PMM_1T_RESET_REG_P1), __PMM_1T_RESET_P);
+ writel(__PMM_1T_RESET_P, (rb + PMM_1T_RESET_REG_P0));
+ writel(__PMM_1T_RESET_P, (rb + PMM_1T_RESET_REG_P1));
}
- r32 = bfa_reg_read((rb + PSS_CTL_REG));
+ r32 = readl((rb + PSS_CTL_REG));
r32 &= ~__PSS_LMEM_RESET;
- bfa_reg_write((rb + PSS_CTL_REG), r32);
- bfa_os_udelay(1000);
+ writel(r32, (rb + PSS_CTL_REG));
+ udelay(1000);
if (!fcmode) {
- bfa_reg_write((rb + PMM_1T_RESET_REG_P0), 0);
- bfa_reg_write((rb + PMM_1T_RESET_REG_P1), 0);
+ writel(0, (rb + PMM_1T_RESET_REG_P0));
+ writel(0, (rb + PMM_1T_RESET_REG_P1));
}
- bfa_reg_write((rb + MBIST_CTL_REG), __EDRAM_BISTR_START);
- bfa_os_udelay(1000);
- r32 = bfa_reg_read((rb + MBIST_STAT_REG));
- bfa_reg_write((rb + MBIST_CTL_REG), 0);
+ writel(__EDRAM_BISTR_START, (rb + MBIST_CTL_REG));
+ udelay(1000);
+ r32 = readl((rb + MBIST_STAT_REG));
+ writel(0, (rb + MBIST_CTL_REG));
return BFA_STATUS_OK;
}
diff --git a/drivers/scsi/bfa/bfa_modules.h b/drivers/scsi/bfa/bfa_modules.h
index 2cd527338677..15407ab39e77 100644
--- a/drivers/scsi/bfa/bfa_modules.h
+++ b/drivers/scsi/bfa/bfa_modules.h
@@ -15,7 +15,7 @@
* General Public License for more details.
*/
-/**
+/*
* bfa_modules.h BFA modules
*/
@@ -52,7 +52,7 @@ enum {
};
-/**
+/*
* Macro to define a new BFA module
*/
#define BFA_MODULE(__mod) \
@@ -80,7 +80,7 @@ enum {
#define BFA_CACHELINE_SZ (256)
-/**
+/*
* Structure used to interact between different BFA sub modules
*
* Each sub module needs to implement only the entry points relevant to it (and
diff --git a/drivers/scsi/bfa/bfa_os_inc.h b/drivers/scsi/bfa/bfa_os_inc.h
index 788a250ffb8a..65df62ef437f 100644
--- a/drivers/scsi/bfa/bfa_os_inc.h
+++ b/drivers/scsi/bfa/bfa_os_inc.h
@@ -15,10 +15,6 @@
* General Public License for more details.
*/
-/**
- * Contains declarations all OS Specific files needed for BFA layer
- */
-
#ifndef __BFA_OS_INC_H__
#define __BFA_OS_INC_H__
@@ -44,11 +40,6 @@
#define __BIGENDIAN
#endif
-static inline u64 bfa_os_get_clock(void)
-{
- return jiffies;
-}
-
static inline u64 bfa_os_get_log_time(void)
{
u64 system_time = 0;
@@ -63,13 +54,6 @@ static inline u64 bfa_os_get_log_time(void)
#define bfa_io_lat_clock_res_div HZ
#define bfa_io_lat_clock_res_mul 1000
-#define BFA_ASSERT(p) do { \
- if (!(p)) { \
- printk(KERN_ERR "assert(%s) failed at %s:%d\n", \
- #p, __FILE__, __LINE__); \
- } \
-} while (0)
-
#define BFA_LOG(level, bfad, mask, fmt, arg...) \
do { \
if (((mask) == 4) || (level[1] <= '4')) \
@@ -81,22 +65,6 @@ do { \
((_x) & 0x00ff00) | \
(((_x) & 0xff0000) >> 16))
-#define bfa_swap_8b(_x) \
- ((((_x) & 0xff00000000000000ull) >> 56) \
- | (((_x) & 0x00ff000000000000ull) >> 40) \
- | (((_x) & 0x0000ff0000000000ull) >> 24) \
- | (((_x) & 0x000000ff00000000ull) >> 8) \
- | (((_x) & 0x00000000ff000000ull) << 8) \
- | (((_x) & 0x0000000000ff0000ull) << 24) \
- | (((_x) & 0x000000000000ff00ull) << 40) \
- | (((_x) & 0x00000000000000ffull) << 56))
-
-#define bfa_os_swap32(_x) \
- ((((_x) & 0xff) << 24) | \
- (((_x) & 0x0000ff00) << 8) | \
- (((_x) & 0x00ff0000) >> 8) | \
- (((_x) & 0xff000000) >> 24))
-
#define bfa_os_swap_sgaddr(_x) ((u64)( \
(((u64)(_x) & (u64)0x00000000000000ffull) << 32) | \
(((u64)(_x) & (u64)0x000000000000ff00ull) << 32) | \
@@ -108,59 +76,27 @@ do { \
(((u64)(_x) & (u64)0xff00000000000000ull) >> 32)))
#ifndef __BIGENDIAN
-#define bfa_os_htons(_x) ((u16)((((_x) & 0xff00) >> 8) | \
- (((_x) & 0x00ff) << 8)))
-#define bfa_os_htonl(_x) bfa_os_swap32(_x)
-#define bfa_os_htonll(_x) bfa_swap_8b(_x)
-#define bfa_os_hton3b(_x) bfa_swap_3b(_x)
-#define bfa_os_wtole(_x) (_x)
+#define bfa_os_hton3b(_x) bfa_swap_3b(_x)
#define bfa_os_sgaddr(_x) (_x)
-
#else
-
-#define bfa_os_htons(_x) (_x)
-#define bfa_os_htonl(_x) (_x)
#define bfa_os_hton3b(_x) (_x)
-#define bfa_os_htonll(_x) (_x)
-#define bfa_os_wtole(_x) bfa_os_swap32(_x)
#define bfa_os_sgaddr(_x) bfa_os_swap_sgaddr(_x)
-
#endif
-#define bfa_os_ntohs(_x) bfa_os_htons(_x)
-#define bfa_os_ntohl(_x) bfa_os_htonl(_x)
-#define bfa_os_ntohll(_x) bfa_os_htonll(_x)
#define bfa_os_ntoh3b(_x) bfa_os_hton3b(_x)
-
#define bfa_os_u32(__pa64) ((__pa64) >> 32)
-#define bfa_os_memset memset
-#define bfa_os_memcpy memcpy
-#define bfa_os_udelay udelay
-#define bfa_os_vsprintf vsprintf
-#define bfa_os_snprintf snprintf
-
-#define bfa_os_assign(__t, __s) __t = __s
-#define bfa_os_addr_t void __iomem *
-
-#define bfa_os_reg_read(_raddr) readl(_raddr)
-#define bfa_os_reg_write(_raddr, _val) writel((_val), (_raddr))
-#define bfa_os_mem_read(_raddr, _off) \
- bfa_os_swap32(readl(((_raddr) + (_off))))
-#define bfa_os_mem_write(_raddr, _off, _val) \
- writel(bfa_os_swap32((_val)), ((_raddr) + (_off)))
-
-#define BFA_TRC_TS(_trcm) \
- ({ \
- struct timeval tv; \
- \
- do_gettimeofday(&tv); \
- (tv.tv_sec*1000000+tv.tv_usec); \
- })
+#define BFA_TRC_TS(_trcm) \
+ ({ \
+ struct timeval tv; \
+ \
+ do_gettimeofday(&tv); \
+ (tv.tv_sec*1000000+tv.tv_usec); \
+ })
#define boolean_t int
-/**
+/*
* For current time stamp, OS API will fill-in
*/
struct bfa_timeval_s {
diff --git a/drivers/scsi/bfa/bfa_port.c b/drivers/scsi/bfa/bfa_port.c
index b6d170a13bea..fff96226a383 100644
--- a/drivers/scsi/bfa/bfa_port.c
+++ b/drivers/scsi/bfa/bfa_port.c
@@ -37,16 +37,16 @@ bfa_port_stats_swap(struct bfa_port_s *port, union bfa_port_stats_u *stats)
t0 = dip[i];
t1 = dip[i + 1];
#ifdef __BIGENDIAN
- dip[i] = bfa_os_ntohl(t0);
- dip[i + 1] = bfa_os_ntohl(t1);
+ dip[i] = be32_to_cpu(t0);
+ dip[i + 1] = be32_to_cpu(t1);
#else
- dip[i] = bfa_os_ntohl(t1);
- dip[i + 1] = bfa_os_ntohl(t0);
+ dip[i] = be32_to_cpu(t1);
+ dip[i + 1] = be32_to_cpu(t0);
#endif
}
}
-/**
+/*
* bfa_port_enable_isr()
*
*
@@ -63,7 +63,7 @@ bfa_port_enable_isr(struct bfa_port_s *port, bfa_status_t status)
port->endis_cbfn(port->endis_cbarg, status);
}
-/**
+/*
* bfa_port_disable_isr()
*
*
@@ -80,7 +80,7 @@ bfa_port_disable_isr(struct bfa_port_s *port, bfa_status_t status)
port->endis_cbfn(port->endis_cbarg, status);
}
-/**
+/*
* bfa_port_get_stats_isr()
*
*
@@ -112,7 +112,7 @@ bfa_port_get_stats_isr(struct bfa_port_s *port, bfa_status_t status)
}
}
-/**
+/*
* bfa_port_clear_stats_isr()
*
*
@@ -129,7 +129,7 @@ bfa_port_clear_stats_isr(struct bfa_port_s *port, bfa_status_t status)
port->stats_status = status;
port->stats_busy = BFA_FALSE;
- /**
+ /*
* re-initialize time stamp for stats reset
*/
bfa_os_gettimeofday(&tv);
@@ -141,7 +141,7 @@ bfa_port_clear_stats_isr(struct bfa_port_s *port, bfa_status_t status)
}
}
-/**
+/*
* bfa_port_isr()
*
*
@@ -189,7 +189,7 @@ bfa_port_isr(void *cbarg, struct bfi_mbmsg_s *m)
}
}
-/**
+/*
* bfa_port_meminfo()
*
*
@@ -203,7 +203,7 @@ bfa_port_meminfo(void)
return BFA_ROUNDUP(sizeof(union bfa_port_stats_u), BFA_DMA_ALIGN_SZ);
}
-/**
+/*
* bfa_port_mem_claim()
*
*
@@ -220,7 +220,7 @@ bfa_port_mem_claim(struct bfa_port_s *port, u8 *dma_kva, u64 dma_pa)
port->stats_dma.pa = dma_pa;
}
-/**
+/*
* bfa_port_enable()
*
* Send the Port enable request to the f/w
@@ -264,7 +264,7 @@ bfa_port_enable(struct bfa_port_s *port, bfa_port_endis_cbfn_t cbfn,
return BFA_STATUS_OK;
}
-/**
+/*
* bfa_port_disable()
*
* Send the Port disable request to the f/w
@@ -308,7 +308,7 @@ bfa_port_disable(struct bfa_port_s *port, bfa_port_endis_cbfn_t cbfn,
return BFA_STATUS_OK;
}
-/**
+/*
* bfa_port_get_stats()
*
* Send the request to the f/w to fetch Port statistics.
@@ -348,7 +348,7 @@ bfa_port_get_stats(struct bfa_port_s *port, union bfa_port_stats_u *stats,
return BFA_STATUS_OK;
}
-/**
+/*
* bfa_port_clear_stats()
*
*
@@ -385,7 +385,7 @@ bfa_port_clear_stats(struct bfa_port_s *port, bfa_port_stats_cbfn_t cbfn,
return BFA_STATUS_OK;
}
-/**
+/*
* bfa_port_hbfail()
*
*
@@ -415,7 +415,7 @@ bfa_port_hbfail(void *arg)
}
}
-/**
+/*
* bfa_port_attach()
*
*
@@ -449,7 +449,7 @@ bfa_port_attach(struct bfa_port_s *port, struct bfa_ioc_s *ioc,
bfa_ioc_hbfail_init(&port->hbfail, bfa_port_hbfail, port);
bfa_ioc_hbfail_register(port->ioc, &port->hbfail);
- /**
+ /*
* initialize time stamp for stats reset
*/
bfa_os_gettimeofday(&tv);
@@ -458,7 +458,7 @@ bfa_port_attach(struct bfa_port_s *port, struct bfa_ioc_s *ioc,
bfa_trc(port, 0);
}
-/**
+/*
* bfa_port_detach()
*
*
diff --git a/drivers/scsi/bfa/bfa_svc.c b/drivers/scsi/bfa/bfa_svc.c
index aa1dc749b281..c768143f4805 100644
--- a/drivers/scsi/bfa/bfa_svc.c
+++ b/drivers/scsi/bfa/bfa_svc.c
@@ -29,7 +29,7 @@ BFA_MODULE(fcport);
BFA_MODULE(rport);
BFA_MODULE(uf);
-/**
+/*
* LPS related definitions
*/
#define BFA_LPS_MIN_LPORTS (1)
@@ -41,7 +41,7 @@ BFA_MODULE(uf);
#define BFA_LPS_MAX_VPORTS_SUPP_CB 255
#define BFA_LPS_MAX_VPORTS_SUPP_CT 190
-/**
+/*
* lps_pvt BFA LPS private functions
*/
@@ -55,7 +55,7 @@ enum bfa_lps_event {
BFA_LPS_SM_RX_CVL = 7, /* Rx clear virtual link */
};
-/**
+/*
* FC PORT related definitions
*/
/*
@@ -67,7 +67,7 @@ enum bfa_lps_event {
(bfa_ioc_is_disabled(&bfa->ioc) == BFA_TRUE))
-/**
+/*
* BFA port state machine events
*/
enum bfa_fcport_sm_event {
@@ -82,7 +82,7 @@ enum bfa_fcport_sm_event {
BFA_FCPORT_SM_HWFAIL = 9, /* IOC h/w failure */
};
-/**
+/*
* BFA port link notification state machine events
*/
@@ -92,7 +92,7 @@ enum bfa_fcport_ln_sm_event {
BFA_FCPORT_LN_SM_NOTIFICATION = 3 /* done notification */
};
-/**
+/*
* RPORT related definitions
*/
#define bfa_rport_offline_cb(__rp) do { \
@@ -126,7 +126,7 @@ enum bfa_rport_event {
BFA_RPORT_SM_QRESUME = 9, /* space in requeue queue */
};
-/**
+/*
* forward declarations FCXP related functions
*/
static void __bfa_fcxp_send_cbfn(void *cbarg, bfa_boolean_t complete);
@@ -138,7 +138,7 @@ static void bfa_fcxp_qresume(void *cbarg);
static void bfa_fcxp_queue(struct bfa_fcxp_s *fcxp,
struct bfi_fcxp_send_req_s *send_req);
-/**
+/*
* forward declarations for LPS functions
*/
static void bfa_lps_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *ndm_len,
@@ -163,7 +163,7 @@ static void bfa_lps_login_comp(struct bfa_lps_s *lps);
static void bfa_lps_logout_comp(struct bfa_lps_s *lps);
static void bfa_lps_cvl_event(struct bfa_lps_s *lps);
-/**
+/*
* forward declaration for LPS state machine
*/
static void bfa_lps_sm_init(struct bfa_lps_s *lps, enum bfa_lps_event event);
@@ -175,7 +175,7 @@ static void bfa_lps_sm_logout(struct bfa_lps_s *lps, enum bfa_lps_event event);
static void bfa_lps_sm_logowait(struct bfa_lps_s *lps, enum bfa_lps_event
event);
-/**
+/*
* forward declaration for FC Port functions
*/
static bfa_boolean_t bfa_fcport_send_enable(struct bfa_fcport_s *fcport);
@@ -193,7 +193,7 @@ static void bfa_fcport_stats_get_timeout(void *cbarg);
static void bfa_fcport_stats_clr_timeout(void *cbarg);
static void bfa_trunk_iocdisable(struct bfa_s *bfa);
-/**
+/*
* forward declaration for FC PORT state machine
*/
static void bfa_fcport_sm_uninit(struct bfa_fcport_s *fcport,
@@ -252,7 +252,7 @@ static struct bfa_sm_table_s hal_port_sm_table[] = {
};
-/**
+/*
* forward declaration for RPORT related functions
*/
static struct bfa_rport_s *bfa_rport_alloc(struct bfa_rport_mod_s *rp_mod);
@@ -265,7 +265,7 @@ static void __bfa_cb_rport_online(void *cbarg,
static void __bfa_cb_rport_offline(void *cbarg,
bfa_boolean_t complete);
-/**
+/*
* forward declaration for RPORT state machine
*/
static void bfa_rport_sm_uninit(struct bfa_rport_s *rp,
@@ -295,7 +295,7 @@ static void bfa_rport_sm_fwdelete_qfull(struct bfa_rport_s *rp,
static void bfa_rport_sm_deleting_qfull(struct bfa_rport_s *rp,
enum bfa_rport_event event);
-/**
+/*
* PLOG related definitions
*/
static int
@@ -330,7 +330,7 @@ bfa_plog_add(struct bfa_plog_s *plog, struct bfa_plog_rec_s *pl_rec)
pl_recp = &(plog->plog_recs[tail]);
- bfa_os_memcpy(pl_recp, pl_rec, sizeof(struct bfa_plog_rec_s));
+ memcpy(pl_recp, pl_rec, sizeof(struct bfa_plog_rec_s));
pl_recp->tv = bfa_os_get_log_time();
BFA_PL_LOG_REC_INCR(plog->tail);
@@ -342,9 +342,9 @@ bfa_plog_add(struct bfa_plog_s *plog, struct bfa_plog_rec_s *pl_rec)
void
bfa_plog_init(struct bfa_plog_s *plog)
{
- bfa_os_memset((char *)plog, 0, sizeof(struct bfa_plog_s));
+ memset((char *)plog, 0, sizeof(struct bfa_plog_s));
- bfa_os_memcpy(plog->plog_sig, BFA_PL_SIG_STR, BFA_PL_SIG_LEN);
+ memcpy(plog->plog_sig, BFA_PL_SIG_STR, BFA_PL_SIG_LEN);
plog->head = plog->tail = 0;
plog->plog_enabled = 1;
}
@@ -357,7 +357,7 @@ bfa_plog_str(struct bfa_plog_s *plog, enum bfa_plog_mid mid,
struct bfa_plog_rec_s lp;
if (plog->plog_enabled) {
- bfa_os_memset(&lp, 0, sizeof(struct bfa_plog_rec_s));
+ memset(&lp, 0, sizeof(struct bfa_plog_rec_s));
lp.mid = mid;
lp.eid = event;
lp.log_type = BFA_PL_LOG_TYPE_STRING;
@@ -381,15 +381,14 @@ bfa_plog_intarr(struct bfa_plog_s *plog, enum bfa_plog_mid mid,
num_ints = BFA_PL_INT_LOG_SZ;
if (plog->plog_enabled) {
- bfa_os_memset(&lp, 0, sizeof(struct bfa_plog_rec_s));
+ memset(&lp, 0, sizeof(struct bfa_plog_rec_s));
lp.mid = mid;
lp.eid = event;
lp.log_type = BFA_PL_LOG_TYPE_INT;
lp.misc = misc;
for (i = 0; i < num_ints; i++)
- bfa_os_assign(lp.log_entry.int_log[i],
- intarr[i]);
+ lp.log_entry.int_log[i] = intarr[i];
lp.log_num_ints = (u8) num_ints;
@@ -407,7 +406,7 @@ bfa_plog_fchdr(struct bfa_plog_s *plog, enum bfa_plog_mid mid,
u32 ints[BFA_PL_INT_LOG_SZ];
if (plog->plog_enabled) {
- bfa_os_memset(&lp, 0, sizeof(struct bfa_plog_rec_s));
+ memset(&lp, 0, sizeof(struct bfa_plog_rec_s));
ints[0] = tmp_int[0];
ints[1] = tmp_int[1];
@@ -427,7 +426,7 @@ bfa_plog_fchdr_and_pl(struct bfa_plog_s *plog, enum bfa_plog_mid mid,
u32 ints[BFA_PL_INT_LOG_SZ];
if (plog->plog_enabled) {
- bfa_os_memset(&lp, 0, sizeof(struct bfa_plog_rec_s));
+ memset(&lp, 0, sizeof(struct bfa_plog_rec_s));
ints[0] = tmp_int[0];
ints[1] = tmp_int[1];
@@ -462,7 +461,7 @@ bfa_plog_get_setting(struct bfa_plog_s *plog)
return (bfa_boolean_t)plog->plog_enabled;
}
-/**
+/*
* fcxp_pvt BFA FCXP private functions
*/
@@ -485,7 +484,7 @@ claim_fcxp_req_rsp_mem(struct bfa_fcxp_mod_s *mod, struct bfa_meminfo_s *mi)
mod->req_pld_list_pa = dm_pa;
dm_kva += buf_pool_sz;
dm_pa += buf_pool_sz;
- bfa_os_memset(mod->req_pld_list_kva, 0, buf_pool_sz);
+ memset(mod->req_pld_list_kva, 0, buf_pool_sz);
/*
* Initialize the fcxp rsp payload list
@@ -495,7 +494,7 @@ claim_fcxp_req_rsp_mem(struct bfa_fcxp_mod_s *mod, struct bfa_meminfo_s *mi)
mod->rsp_pld_list_pa = dm_pa;
dm_kva += buf_pool_sz;
dm_pa += buf_pool_sz;
- bfa_os_memset(mod->rsp_pld_list_kva, 0, buf_pool_sz);
+ memset(mod->rsp_pld_list_kva, 0, buf_pool_sz);
bfa_meminfo_dma_virt(mi) = dm_kva;
bfa_meminfo_dma_phys(mi) = dm_pa;
@@ -508,7 +507,7 @@ claim_fcxps_mem(struct bfa_fcxp_mod_s *mod, struct bfa_meminfo_s *mi)
struct bfa_fcxp_s *fcxp;
fcxp = (struct bfa_fcxp_s *) bfa_meminfo_kva(mi);
- bfa_os_memset(fcxp, 0, sizeof(struct bfa_fcxp_s) * mod->num_fcxps);
+ memset(fcxp, 0, sizeof(struct bfa_fcxp_s) * mod->num_fcxps);
INIT_LIST_HEAD(&mod->fcxp_free_q);
INIT_LIST_HEAD(&mod->fcxp_active_q);
@@ -559,11 +558,11 @@ bfa_fcxp_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
{
struct bfa_fcxp_mod_s *mod = BFA_FCXP_MOD(bfa);
- bfa_os_memset(mod, 0, sizeof(struct bfa_fcxp_mod_s));
+ memset(mod, 0, sizeof(struct bfa_fcxp_mod_s));
mod->bfa = bfa;
mod->num_fcxps = cfg->fwcfg.num_fcxp_reqs;
- /**
+ /*
* Initialize FCXP request and response payload sizes.
*/
mod->req_pld_sz = mod->rsp_pld_sz = BFA_FCXP_MAX_IBUF_SZ;
@@ -741,20 +740,20 @@ hal_fcxp_send_comp(struct bfa_s *bfa, struct bfi_fcxp_send_rsp_s *fcxp_rsp)
{
struct bfa_fcxp_mod_s *mod = BFA_FCXP_MOD(bfa);
struct bfa_fcxp_s *fcxp;
- u16 fcxp_tag = bfa_os_ntohs(fcxp_rsp->fcxp_tag);
+ u16 fcxp_tag = be16_to_cpu(fcxp_rsp->fcxp_tag);
bfa_trc(bfa, fcxp_tag);
- fcxp_rsp->rsp_len = bfa_os_ntohl(fcxp_rsp->rsp_len);
+ fcxp_rsp->rsp_len = be32_to_cpu(fcxp_rsp->rsp_len);
- /**
+ /*
* @todo f/w should not set residue to non-0 when everything
* is received.
*/
if (fcxp_rsp->req_status == BFA_STATUS_OK)
fcxp_rsp->residue_len = 0;
else
- fcxp_rsp->residue_len = bfa_os_ntohl(fcxp_rsp->residue_len);
+ fcxp_rsp->residue_len = be32_to_cpu(fcxp_rsp->residue_len);
fcxp = BFA_FCXP_FROM_TAG(mod, fcxp_tag);
@@ -856,7 +855,7 @@ hal_fcxp_rx_plog(struct bfa_s *bfa, struct bfa_fcxp_s *fcxp,
}
}
-/**
+/*
* Handler to resume sending fcxp when space in available in cpe queue.
*/
static void
@@ -871,7 +870,7 @@ bfa_fcxp_qresume(void *cbarg)
bfa_fcxp_queue(fcxp, send_req);
}
-/**
+/*
* Queue fcxp send request to foimrware.
*/
static void
@@ -885,26 +884,26 @@ bfa_fcxp_queue(struct bfa_fcxp_s *fcxp, struct bfi_fcxp_send_req_s *send_req)
bfi_h2i_set(send_req->mh, BFI_MC_FCXP, BFI_FCXP_H2I_SEND_REQ,
bfa_lpuid(bfa));
- send_req->fcxp_tag = bfa_os_htons(fcxp->fcxp_tag);
+ send_req->fcxp_tag = cpu_to_be16(fcxp->fcxp_tag);
if (rport) {
send_req->rport_fw_hndl = rport->fw_handle;
- send_req->max_frmsz = bfa_os_htons(rport->rport_info.max_frmsz);
+ send_req->max_frmsz = cpu_to_be16(rport->rport_info.max_frmsz);
if (send_req->max_frmsz == 0)
- send_req->max_frmsz = bfa_os_htons(FC_MAX_PDUSZ);
+ send_req->max_frmsz = cpu_to_be16(FC_MAX_PDUSZ);
} else {
send_req->rport_fw_hndl = 0;
- send_req->max_frmsz = bfa_os_htons(FC_MAX_PDUSZ);
+ send_req->max_frmsz = cpu_to_be16(FC_MAX_PDUSZ);
}
- send_req->vf_id = bfa_os_htons(reqi->vf_id);
+ send_req->vf_id = cpu_to_be16(reqi->vf_id);
send_req->lp_tag = reqi->lp_tag;
send_req->class = reqi->class;
send_req->rsp_timeout = rspi->rsp_timeout;
send_req->cts = reqi->cts;
send_req->fchs = reqi->fchs;
- send_req->req_len = bfa_os_htonl(reqi->req_tot_len);
- send_req->rsp_maxlen = bfa_os_htonl(rspi->rsp_maxlen);
+ send_req->req_len = cpu_to_be32(reqi->req_tot_len);
+ send_req->rsp_maxlen = cpu_to_be32(rspi->rsp_maxlen);
/*
* setup req sgles
@@ -955,11 +954,11 @@ bfa_fcxp_queue(struct bfa_fcxp_s *fcxp, struct bfi_fcxp_send_req_s *send_req)
bfa_trc(bfa, bfa_reqq_ci(bfa, BFA_REQQ_FCXP));
}
-/**
+/*
* hal_fcxp_api BFA FCXP API
*/
-/**
+/*
* Allocate an FCXP instance to send a response or to send a request
* that has a response. Request/response buffers are allocated by caller.
*
@@ -1005,7 +1004,7 @@ bfa_fcxp_alloc(void *caller, struct bfa_s *bfa, int nreq_sgles,
return fcxp;
}
-/**
+/*
* Get the internal request buffer pointer
*
* @param[in] fcxp BFA fcxp pointer
@@ -1032,7 +1031,7 @@ bfa_fcxp_get_reqbufsz(struct bfa_fcxp_s *fcxp)
return mod->req_pld_sz;
}
-/**
+/*
* Get the internal response buffer pointer
*
* @param[in] fcxp BFA fcxp pointer
@@ -1052,7 +1051,7 @@ bfa_fcxp_get_rspbuf(struct bfa_fcxp_s *fcxp)
return rspbuf;
}
-/**
+/*
* Free the BFA FCXP
*
* @param[in] fcxp BFA fcxp pointer
@@ -1069,7 +1068,7 @@ bfa_fcxp_free(struct bfa_fcxp_s *fcxp)
bfa_fcxp_put(fcxp);
}
-/**
+/*
* Send a FCXP request
*
* @param[in] fcxp BFA fcxp pointer
@@ -1103,7 +1102,7 @@ bfa_fcxp_send(struct bfa_fcxp_s *fcxp, struct bfa_rport_s *rport,
bfa_trc(bfa, fcxp->fcxp_tag);
- /**
+ /*
* setup request/response info
*/
reqi->bfa_rport = rport;
@@ -1118,7 +1117,7 @@ bfa_fcxp_send(struct bfa_fcxp_s *fcxp, struct bfa_rport_s *rport,
fcxp->send_cbfn = cbfn ? cbfn : bfa_fcxp_null_comp;
fcxp->send_cbarg = cbarg;
- /**
+ /*
* If no room in CPE queue, wait for space in request queue
*/
send_req = bfa_reqq_next(bfa, BFA_REQQ_FCXP);
@@ -1132,7 +1131,7 @@ bfa_fcxp_send(struct bfa_fcxp_s *fcxp, struct bfa_rport_s *rport,
bfa_fcxp_queue(fcxp, send_req);
}
-/**
+/*
* Abort a BFA FCXP
*
* @param[in] fcxp BFA fcxp pointer
@@ -1186,7 +1185,7 @@ bfa_fcxp_walloc_cancel(struct bfa_s *bfa, struct bfa_fcxp_wqe_s *wqe)
void
bfa_fcxp_discard(struct bfa_fcxp_s *fcxp)
{
- /**
+ /*
* If waiting for room in request queue, cancel reqq wait
* and free fcxp.
*/
@@ -1202,7 +1201,7 @@ bfa_fcxp_discard(struct bfa_fcxp_s *fcxp)
-/**
+/*
* hal_fcxp_public BFA FCXP public functions
*/
@@ -1229,11 +1228,11 @@ bfa_fcxp_get_maxrsp(struct bfa_s *bfa)
}
-/**
+/*
* BFA LPS state machine functions
*/
-/**
+/*
* Init state -- no login
*/
static void
@@ -1285,7 +1284,7 @@ bfa_lps_sm_init(struct bfa_lps_s *lps, enum bfa_lps_event event)
}
}
-/**
+/*
* login is in progress -- awaiting response from firmware
*/
static void
@@ -1327,7 +1326,7 @@ bfa_lps_sm_login(struct bfa_lps_s *lps, enum bfa_lps_event event)
}
}
-/**
+/*
* login pending - awaiting space in request queue
*/
static void
@@ -1359,7 +1358,7 @@ bfa_lps_sm_loginwait(struct bfa_lps_s *lps, enum bfa_lps_event event)
}
}
-/**
+/*
* login complete
*/
static void
@@ -1400,7 +1399,7 @@ bfa_lps_sm_online(struct bfa_lps_s *lps, enum bfa_lps_event event)
}
}
-/**
+/*
* logout in progress - awaiting firmware response
*/
static void
@@ -1424,7 +1423,7 @@ bfa_lps_sm_logout(struct bfa_lps_s *lps, enum bfa_lps_event event)
}
}
-/**
+/*
* logout pending -- awaiting space in request queue
*/
static void
@@ -1451,11 +1450,11 @@ bfa_lps_sm_logowait(struct bfa_lps_s *lps, enum bfa_lps_event event)
-/**
+/*
* lps_pvt BFA LPS private functions
*/
-/**
+/*
* return memory requirement
*/
static void
@@ -1468,7 +1467,7 @@ bfa_lps_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *ndm_len,
*ndm_len += sizeof(struct bfa_lps_s) * BFA_LPS_MAX_LPORTS;
}
-/**
+/*
* bfa module attach at initialization time
*/
static void
@@ -1479,7 +1478,7 @@ bfa_lps_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
struct bfa_lps_s *lps;
int i;
- bfa_os_memset(mod, 0, sizeof(struct bfa_lps_mod_s));
+ memset(mod, 0, sizeof(struct bfa_lps_mod_s));
mod->num_lps = BFA_LPS_MAX_LPORTS;
if (cfg->drvcfg.min_cfg)
mod->num_lps = BFA_LPS_MIN_LPORTS;
@@ -1516,7 +1515,7 @@ bfa_lps_stop(struct bfa_s *bfa)
{
}
-/**
+/*
* IOC in disabled state -- consider all lps offline
*/
static void
@@ -1532,7 +1531,7 @@ bfa_lps_iocdisable(struct bfa_s *bfa)
}
}
-/**
+/*
* Firmware login response
*/
static void
@@ -1550,7 +1549,7 @@ bfa_lps_login_rsp(struct bfa_s *bfa, struct bfi_lps_login_rsp_s *rsp)
lps->fport = rsp->f_port;
lps->npiv_en = rsp->npiv_en;
lps->lp_pid = rsp->lp_pid;
- lps->pr_bbcred = bfa_os_ntohs(rsp->bb_credit);
+ lps->pr_bbcred = be16_to_cpu(rsp->bb_credit);
lps->pr_pwwn = rsp->port_name;
lps->pr_nwwn = rsp->node_name;
lps->auth_req = rsp->auth_req;
@@ -1579,7 +1578,7 @@ bfa_lps_login_rsp(struct bfa_s *bfa, struct bfi_lps_login_rsp_s *rsp)
bfa_sm_send_event(lps, BFA_LPS_SM_FWRSP);
}
-/**
+/*
* Firmware logout response
*/
static void
@@ -1594,7 +1593,7 @@ bfa_lps_logout_rsp(struct bfa_s *bfa, struct bfi_lps_logout_rsp_s *rsp)
bfa_sm_send_event(lps, BFA_LPS_SM_FWRSP);
}
-/**
+/*
* Firmware received a Clear virtual link request (for FCoE)
*/
static void
@@ -1608,7 +1607,7 @@ bfa_lps_rx_cvl_event(struct bfa_s *bfa, struct bfi_lps_cvl_event_s *cvl)
bfa_sm_send_event(lps, BFA_LPS_SM_RX_CVL);
}
-/**
+/*
* Space is available in request queue, resume queueing request to firmware.
*/
static void
@@ -1619,7 +1618,7 @@ bfa_lps_reqq_resume(void *lps_arg)
bfa_sm_send_event(lps, BFA_LPS_SM_RESUME);
}
-/**
+/*
* lps is freed -- triggered by vport delete
*/
static void
@@ -1632,7 +1631,7 @@ bfa_lps_free(struct bfa_lps_s *lps)
list_add_tail(&lps->qe, &mod->lps_free_q);
}
-/**
+/*
* send login request to firmware
*/
static void
@@ -1648,7 +1647,7 @@ bfa_lps_send_login(struct bfa_lps_s *lps)
m->lp_tag = lps->lp_tag;
m->alpa = lps->alpa;
- m->pdu_size = bfa_os_htons(lps->pdusz);
+ m->pdu_size = cpu_to_be16(lps->pdusz);
m->pwwn = lps->pwwn;
m->nwwn = lps->nwwn;
m->fdisc = lps->fdisc;
@@ -1657,7 +1656,7 @@ bfa_lps_send_login(struct bfa_lps_s *lps)
bfa_reqq_produce(lps->bfa, lps->reqq);
}
-/**
+/*
* send logout request to firmware
*/
static void
@@ -1676,7 +1675,7 @@ bfa_lps_send_logout(struct bfa_lps_s *lps)
bfa_reqq_produce(lps->bfa, lps->reqq);
}
-/**
+/*
* Indirect login completion handler for non-fcs
*/
static void
@@ -1693,7 +1692,7 @@ bfa_lps_login_comp_cb(void *arg, bfa_boolean_t complete)
bfa_cb_lps_flogi_comp(lps->bfa->bfad, lps->uarg, lps->status);
}
-/**
+/*
* Login completion handler -- direct call for fcs, queue for others
*/
static void
@@ -1711,7 +1710,7 @@ bfa_lps_login_comp(struct bfa_lps_s *lps)
bfa_cb_lps_flogi_comp(lps->bfa->bfad, lps->uarg, lps->status);
}
-/**
+/*
* Indirect logout completion handler for non-fcs
*/
static void
@@ -1726,7 +1725,7 @@ bfa_lps_logout_comp_cb(void *arg, bfa_boolean_t complete)
bfa_cb_lps_fdisclogo_comp(lps->bfa->bfad, lps->uarg);
}
-/**
+/*
* Logout completion handler -- direct call for fcs, queue for others
*/
static void
@@ -1741,7 +1740,7 @@ bfa_lps_logout_comp(struct bfa_lps_s *lps)
bfa_cb_lps_fdisclogo_comp(lps->bfa->bfad, lps->uarg);
}
-/**
+/*
* Clear virtual link completion handler for non-fcs
*/
static void
@@ -1757,7 +1756,7 @@ bfa_lps_cvl_event_cb(void *arg, bfa_boolean_t complete)
bfa_cb_lps_cvl_event(lps->bfa->bfad, lps->uarg);
}
-/**
+/*
* Received Clear virtual link event --direct call for fcs,
* queue for others
*/
@@ -1777,7 +1776,7 @@ bfa_lps_cvl_event(struct bfa_lps_s *lps)
-/**
+/*
* lps_public BFA LPS public functions
*/
@@ -1790,7 +1789,7 @@ bfa_lps_get_max_vport(struct bfa_s *bfa)
return BFA_LPS_MAX_VPORTS_SUPP_CB;
}
-/**
+/*
* Allocate a lport srvice tag.
*/
struct bfa_lps_s *
@@ -1810,7 +1809,7 @@ bfa_lps_alloc(struct bfa_s *bfa)
return lps;
}
-/**
+/*
* Free lport service tag. This can be called anytime after an alloc.
* No need to wait for any pending login/logout completions.
*/
@@ -1820,7 +1819,7 @@ bfa_lps_delete(struct bfa_lps_s *lps)
bfa_sm_send_event(lps, BFA_LPS_SM_DELETE);
}
-/**
+/*
* Initiate a lport login.
*/
void
@@ -1837,7 +1836,7 @@ bfa_lps_flogi(struct bfa_lps_s *lps, void *uarg, u8 alpa, u16 pdusz,
bfa_sm_send_event(lps, BFA_LPS_SM_LOGIN);
}
-/**
+/*
* Initiate a lport fdisc login.
*/
void
@@ -1854,7 +1853,7 @@ bfa_lps_fdisc(struct bfa_lps_s *lps, void *uarg, u16 pdusz, wwn_t pwwn,
bfa_sm_send_event(lps, BFA_LPS_SM_LOGIN);
}
-/**
+/*
* Initiate a lport logout (flogi).
*/
void
@@ -1863,7 +1862,7 @@ bfa_lps_flogo(struct bfa_lps_s *lps)
bfa_sm_send_event(lps, BFA_LPS_SM_LOGOUT);
}
-/**
+/*
* Initiate a lport FDSIC logout.
*/
void
@@ -1872,7 +1871,7 @@ bfa_lps_fdisclogo(struct bfa_lps_s *lps)
bfa_sm_send_event(lps, BFA_LPS_SM_LOGOUT);
}
-/**
+/*
* Discard a pending login request -- should be called only for
* link down handling.
*/
@@ -1882,7 +1881,7 @@ bfa_lps_discard(struct bfa_lps_s *lps)
bfa_sm_send_event(lps, BFA_LPS_SM_OFFLINE);
}
-/**
+/*
* Return lport services tag
*/
u8
@@ -1891,7 +1890,7 @@ bfa_lps_get_tag(struct bfa_lps_s *lps)
return lps->lp_tag;
}
-/**
+/*
* Return lport services tag given the pid
*/
u8
@@ -1910,7 +1909,7 @@ bfa_lps_get_tag_from_pid(struct bfa_s *bfa, u32 pid)
return 0;
}
-/**
+/*
* return if fabric login indicates support for NPIV
*/
bfa_boolean_t
@@ -1919,7 +1918,7 @@ bfa_lps_is_npiv_en(struct bfa_lps_s *lps)
return lps->npiv_en;
}
-/**
+/*
* Return TRUE if attached to F-Port, else return FALSE
*/
bfa_boolean_t
@@ -1928,7 +1927,7 @@ bfa_lps_is_fport(struct bfa_lps_s *lps)
return lps->fport;
}
-/**
+/*
* Return TRUE if attached to a Brocade Fabric
*/
bfa_boolean_t
@@ -1936,7 +1935,7 @@ bfa_lps_is_brcd_fabric(struct bfa_lps_s *lps)
{
return lps->brcd_switch;
}
-/**
+/*
* return TRUE if authentication is required
*/
bfa_boolean_t
@@ -1951,7 +1950,7 @@ bfa_lps_get_extstatus(struct bfa_lps_s *lps)
return lps->ext_status;
}
-/**
+/*
* return port id assigned to the lport
*/
u32
@@ -1960,7 +1959,7 @@ bfa_lps_get_pid(struct bfa_lps_s *lps)
return lps->lp_pid;
}
-/**
+/*
* return port id assigned to the base lport
*/
u32
@@ -1971,7 +1970,7 @@ bfa_lps_get_base_pid(struct bfa_s *bfa)
return BFA_LPS_FROM_TAG(mod, 0)->lp_pid;
}
-/**
+/*
* Return bb_credit assigned in FLOGI response
*/
u16
@@ -1980,7 +1979,7 @@ bfa_lps_get_peer_bbcredit(struct bfa_lps_s *lps)
return lps->pr_bbcred;
}
-/**
+/*
* Return peer port name
*/
wwn_t
@@ -1989,7 +1988,7 @@ bfa_lps_get_peer_pwwn(struct bfa_lps_s *lps)
return lps->pr_pwwn;
}
-/**
+/*
* Return peer node name
*/
wwn_t
@@ -1998,7 +1997,7 @@ bfa_lps_get_peer_nwwn(struct bfa_lps_s *lps)
return lps->pr_nwwn;
}
-/**
+/*
* return reason code if login request is rejected
*/
u8
@@ -2007,7 +2006,7 @@ bfa_lps_get_lsrjt_rsn(struct bfa_lps_s *lps)
return lps->lsrjt_rsn;
}
-/**
+/*
* return explanation code if login request is rejected
*/
u8
@@ -2016,7 +2015,7 @@ bfa_lps_get_lsrjt_expl(struct bfa_lps_s *lps)
return lps->lsrjt_expl;
}
-/**
+/*
* Return fpma/spma MAC for lport
*/
mac_t
@@ -2025,7 +2024,7 @@ bfa_lps_get_lp_mac(struct bfa_lps_s *lps)
return lps->lp_mac;
}
-/**
+/*
* LPS firmware message class handler.
*/
void
@@ -2055,7 +2054,7 @@ bfa_lps_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
}
}
-/**
+/*
* FC PORT state machine functions
*/
static void
@@ -2066,7 +2065,7 @@ bfa_fcport_sm_uninit(struct bfa_fcport_s *fcport,
switch (event) {
case BFA_FCPORT_SM_START:
- /**
+ /*
* Start event after IOC is configured and BFA is started.
*/
if (bfa_fcport_send_enable(fcport)) {
@@ -2080,7 +2079,7 @@ bfa_fcport_sm_uninit(struct bfa_fcport_s *fcport,
break;
case BFA_FCPORT_SM_ENABLE:
- /**
+ /*
* Port is persistently configured to be in enabled state. Do
* not change state. Port enabling is done when START event is
* received.
@@ -2088,7 +2087,7 @@ bfa_fcport_sm_uninit(struct bfa_fcport_s *fcport,
break;
case BFA_FCPORT_SM_DISABLE:
- /**
+ /*
* If a port is persistently configured to be disabled, the
* first event will a port disable request.
*/
@@ -2124,13 +2123,13 @@ bfa_fcport_sm_enabling_qwait(struct bfa_fcport_s *fcport,
break;
case BFA_FCPORT_SM_ENABLE:
- /**
+ /*
* Already enable is in progress.
*/
break;
case BFA_FCPORT_SM_DISABLE:
- /**
+ /*
* Just send disable request to firmware when room becomes
* available in request queue.
*/
@@ -2145,7 +2144,7 @@ bfa_fcport_sm_enabling_qwait(struct bfa_fcport_s *fcport,
case BFA_FCPORT_SM_LINKUP:
case BFA_FCPORT_SM_LINKDOWN:
- /**
+ /*
* Possible to get link events when doing back-to-back
* enable/disables.
*/
@@ -2184,7 +2183,7 @@ bfa_fcport_sm_enabling(struct bfa_fcport_s *fcport,
break;
case BFA_FCPORT_SM_ENABLE:
- /**
+ /*
* Already being enabled.
*/
break;
@@ -2257,13 +2256,13 @@ bfa_fcport_sm_linkdown(struct bfa_fcport_s *fcport,
break;
case BFA_FCPORT_SM_LINKDOWN:
- /**
+ /*
* Possible to get link down event.
*/
break;
case BFA_FCPORT_SM_ENABLE:
- /**
+ /*
* Already enabled.
*/
break;
@@ -2306,7 +2305,7 @@ bfa_fcport_sm_linkup(struct bfa_fcport_s *fcport,
switch (event) {
case BFA_FCPORT_SM_ENABLE:
- /**
+ /*
* Already enabled.
*/
break;
@@ -2399,14 +2398,14 @@ bfa_fcport_sm_disabling_qwait(struct bfa_fcport_s *fcport,
break;
case BFA_FCPORT_SM_DISABLE:
- /**
+ /*
* Already being disabled.
*/
break;
case BFA_FCPORT_SM_LINKUP:
case BFA_FCPORT_SM_LINKDOWN:
- /**
+ /*
* Possible to get link events when doing back-to-back
* enable/disables.
*/
@@ -2453,7 +2452,7 @@ bfa_fcport_sm_toggling_qwait(struct bfa_fcport_s *fcport,
case BFA_FCPORT_SM_LINKUP:
case BFA_FCPORT_SM_LINKDOWN:
- /**
+ /*
* Possible to get link events when doing back-to-back
* enable/disables.
*/
@@ -2483,7 +2482,7 @@ bfa_fcport_sm_disabling(struct bfa_fcport_s *fcport,
break;
case BFA_FCPORT_SM_DISABLE:
- /**
+ /*
* Already being disabled.
*/
break;
@@ -2508,7 +2507,7 @@ bfa_fcport_sm_disabling(struct bfa_fcport_s *fcport,
case BFA_FCPORT_SM_LINKUP:
case BFA_FCPORT_SM_LINKDOWN:
- /**
+ /*
* Possible to get link events when doing back-to-back
* enable/disables.
*/
@@ -2533,7 +2532,7 @@ bfa_fcport_sm_disabled(struct bfa_fcport_s *fcport,
switch (event) {
case BFA_FCPORT_SM_START:
- /**
+ /*
* Ignore start event for a port that is disabled.
*/
break;
@@ -2557,7 +2556,7 @@ bfa_fcport_sm_disabled(struct bfa_fcport_s *fcport,
break;
case BFA_FCPORT_SM_DISABLE:
- /**
+ /*
* Already disabled.
*/
break;
@@ -2587,14 +2586,14 @@ bfa_fcport_sm_stopped(struct bfa_fcport_s *fcport,
break;
default:
- /**
+ /*
* Ignore all other events.
*/
;
}
}
-/**
+/*
* Port is enabled. IOC is down/failed.
*/
static void
@@ -2613,14 +2612,14 @@ bfa_fcport_sm_iocdown(struct bfa_fcport_s *fcport,
break;
default:
- /**
+ /*
* Ignore all events.
*/
;
}
}
-/**
+/*
* Port is disabled. IOC is down/failed.
*/
static void
@@ -2639,14 +2638,14 @@ bfa_fcport_sm_iocfail(struct bfa_fcport_s *fcport,
break;
default:
- /**
+ /*
* Ignore all events.
*/
;
}
}
-/**
+/*
* Link state is down
*/
static void
@@ -2666,7 +2665,7 @@ bfa_fcport_ln_sm_dn(struct bfa_fcport_ln_s *ln,
}
}
-/**
+/*
* Link state is waiting for down notification
*/
static void
@@ -2689,7 +2688,7 @@ bfa_fcport_ln_sm_dn_nf(struct bfa_fcport_ln_s *ln,
}
}
-/**
+/*
* Link state is waiting for down notification and there is a pending up
*/
static void
@@ -2713,7 +2712,7 @@ bfa_fcport_ln_sm_dn_up_nf(struct bfa_fcport_ln_s *ln,
}
}
-/**
+/*
* Link state is up
*/
static void
@@ -2733,7 +2732,7 @@ bfa_fcport_ln_sm_up(struct bfa_fcport_ln_s *ln,
}
}
-/**
+/*
* Link state is waiting for up notification
*/
static void
@@ -2756,7 +2755,7 @@ bfa_fcport_ln_sm_up_nf(struct bfa_fcport_ln_s *ln,
}
}
-/**
+/*
* Link state is waiting for up notification and there is a pending down
*/
static void
@@ -2780,7 +2779,7 @@ bfa_fcport_ln_sm_up_dn_nf(struct bfa_fcport_ln_s *ln,
}
}
-/**
+/*
* Link state is waiting for up notification and there are pending down and up
*/
static void
@@ -2806,7 +2805,7 @@ bfa_fcport_ln_sm_up_dn_up_nf(struct bfa_fcport_ln_s *ln,
-/**
+/*
* hal_port_private
*/
@@ -2821,7 +2820,7 @@ __bfa_cb_fcport_event(void *cbarg, bfa_boolean_t complete)
bfa_sm_send_event(ln, BFA_FCPORT_LN_SM_NOTIFICATION);
}
-/**
+/*
* Send SCN notification to upper layers.
* trunk - false if caller is fcport to ignore fcport event in trunked mode
*/
@@ -2897,7 +2896,7 @@ bfa_fcport_mem_claim(struct bfa_fcport_s *fcport, struct bfa_meminfo_s *meminfo)
bfa_meminfo_dma_phys(meminfo) = dm_pa;
}
-/**
+/*
* Memory initialization.
*/
static void
@@ -2909,7 +2908,7 @@ bfa_fcport_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
struct bfa_fcport_ln_s *ln = &fcport->ln;
struct bfa_timeval_s tv;
- bfa_os_memset(fcport, 0, sizeof(struct bfa_fcport_s));
+ memset(fcport, 0, sizeof(struct bfa_fcport_s));
fcport->bfa = bfa;
ln->fcport = fcport;
@@ -2918,13 +2917,13 @@ bfa_fcport_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
bfa_sm_set_state(fcport, bfa_fcport_sm_uninit);
bfa_sm_set_state(ln, bfa_fcport_ln_sm_dn);
- /**
+ /*
* initialize time stamp for stats reset
*/
bfa_os_gettimeofday(&tv);
fcport->stats_reset_time = tv.tv_sec;
- /**
+ /*
* initialize and set default configuration
*/
port_cfg->topology = BFA_PORT_TOPOLOGY_P2P;
@@ -2942,7 +2941,7 @@ bfa_fcport_detach(struct bfa_s *bfa)
{
}
-/**
+/*
* Called when IOC is ready.
*/
static void
@@ -2951,7 +2950,7 @@ bfa_fcport_start(struct bfa_s *bfa)
bfa_sm_send_event(BFA_FCPORT_MOD(bfa), BFA_FCPORT_SM_START);
}
-/**
+/*
* Called before IOC is stopped.
*/
static void
@@ -2961,7 +2960,7 @@ bfa_fcport_stop(struct bfa_s *bfa)
bfa_trunk_iocdisable(bfa);
}
-/**
+/*
* Called when IOC failure is detected.
*/
static void
@@ -2986,18 +2985,17 @@ bfa_fcport_update_linkinfo(struct bfa_fcport_s *fcport)
fcport->myalpa = 0;
/* QoS Details */
- bfa_os_assign(fcport->qos_attr, pevent->link_state.qos_attr);
- bfa_os_assign(fcport->qos_vc_attr,
- pevent->link_state.vc_fcf.qos_vc_attr);
+ fcport->qos_attr = pevent->link_state.qos_attr;
+ fcport->qos_vc_attr = pevent->link_state.vc_fcf.qos_vc_attr;
- /**
+ /*
* update trunk state if applicable
*/
if (!fcport->cfg.trunked)
trunk->attr.state = BFA_TRUNK_DISABLED;
/* update FCoE specific */
- fcport->fcoe_vlan = bfa_os_ntohs(pevent->link_state.vc_fcf.fcf.vlan);
+ fcport->fcoe_vlan = be16_to_cpu(pevent->link_state.vc_fcf.fcf.vlan);
bfa_trc(fcport->bfa, fcport->speed);
bfa_trc(fcport->bfa, fcport->topology);
@@ -3010,7 +3008,7 @@ bfa_fcport_reset_linkinfo(struct bfa_fcport_s *fcport)
fcport->topology = BFA_PORT_TOPOLOGY_NONE;
}
-/**
+/*
* Send port enable message to firmware.
*/
static bfa_boolean_t
@@ -3018,13 +3016,13 @@ bfa_fcport_send_enable(struct bfa_fcport_s *fcport)
{
struct bfi_fcport_enable_req_s *m;
- /**
+ /*
* Increment message tag before queue check, so that responses to old
* requests are discarded.
*/
fcport->msgtag++;
- /**
+ /*
* check for room in queue to send request now
*/
m = bfa_reqq_next(fcport->bfa, BFA_REQQ_PORT);
@@ -3040,19 +3038,19 @@ bfa_fcport_send_enable(struct bfa_fcport_s *fcport)
m->pwwn = fcport->pwwn;
m->port_cfg = fcport->cfg;
m->msgtag = fcport->msgtag;
- m->port_cfg.maxfrsize = bfa_os_htons(fcport->cfg.maxfrsize);
+ m->port_cfg.maxfrsize = cpu_to_be16(fcport->cfg.maxfrsize);
bfa_dma_be_addr_set(m->stats_dma_addr, fcport->stats_pa);
bfa_trc(fcport->bfa, m->stats_dma_addr.a32.addr_lo);
bfa_trc(fcport->bfa, m->stats_dma_addr.a32.addr_hi);
- /**
+ /*
* queue I/O message to firmware
*/
bfa_reqq_produce(fcport->bfa, BFA_REQQ_PORT);
return BFA_TRUE;
}
-/**
+/*
* Send port disable message to firmware.
*/
static bfa_boolean_t
@@ -3060,13 +3058,13 @@ bfa_fcport_send_disable(struct bfa_fcport_s *fcport)
{
struct bfi_fcport_req_s *m;
- /**
+ /*
* Increment message tag before queue check, so that responses to old
* requests are discarded.
*/
fcport->msgtag++;
- /**
+ /*
* check for room in queue to send request now
*/
m = bfa_reqq_next(fcport->bfa, BFA_REQQ_PORT);
@@ -3080,7 +3078,7 @@ bfa_fcport_send_disable(struct bfa_fcport_s *fcport)
bfa_lpuid(fcport->bfa));
m->msgtag = fcport->msgtag;
- /**
+ /*
* queue I/O message to firmware
*/
bfa_reqq_produce(fcport->bfa, BFA_REQQ_PORT);
@@ -3105,7 +3103,7 @@ bfa_fcport_send_txcredit(void *port_cbarg)
struct bfa_fcport_s *fcport = port_cbarg;
struct bfi_fcport_set_svc_params_req_s *m;
- /**
+ /*
* check for room in queue to send request now
*/
m = bfa_reqq_next(fcport->bfa, BFA_REQQ_PORT);
@@ -3116,9 +3114,9 @@ bfa_fcport_send_txcredit(void *port_cbarg)
bfi_h2i_set(m->mh, BFI_MC_FCPORT, BFI_FCPORT_H2I_SET_SVC_PARAMS_REQ,
bfa_lpuid(fcport->bfa));
- m->tx_bbcredit = bfa_os_htons((u16)fcport->cfg.tx_bbcredit);
+ m->tx_bbcredit = cpu_to_be16((u16)fcport->cfg.tx_bbcredit);
- /**
+ /*
* queue I/O message to firmware
*/
bfa_reqq_produce(fcport->bfa, BFA_REQQ_PORT);
@@ -3134,7 +3132,7 @@ bfa_fcport_qos_stats_swap(struct bfa_qos_stats_s *d,
/* Now swap the 32 bit fields */
for (i = 0; i < (sizeof(struct bfa_qos_stats_s)/sizeof(u32)); ++i)
- dip[i] = bfa_os_ntohl(sip[i]);
+ dip[i] = be32_to_cpu(sip[i]);
}
static void
@@ -3148,11 +3146,11 @@ bfa_fcport_fcoe_stats_swap(struct bfa_fcoe_stats_s *d,
for (i = 0; i < ((sizeof(struct bfa_fcoe_stats_s))/sizeof(u32));
i = i + 2) {
#ifdef __BIGENDIAN
- dip[i] = bfa_os_ntohl(sip[i]);
- dip[i + 1] = bfa_os_ntohl(sip[i + 1]);
+ dip[i] = be32_to_cpu(sip[i]);
+ dip[i + 1] = be32_to_cpu(sip[i + 1]);
#else
- dip[i] = bfa_os_ntohl(sip[i + 1]);
- dip[i + 1] = bfa_os_ntohl(sip[i]);
+ dip[i] = be32_to_cpu(sip[i + 1]);
+ dip[i + 1] = be32_to_cpu(sip[i]);
#endif
}
}
@@ -3223,7 +3221,7 @@ bfa_fcport_send_stats_get(void *cbarg)
}
fcport->stats_qfull = BFA_FALSE;
- bfa_os_memset(msg, 0, sizeof(struct bfi_fcport_req_s));
+ memset(msg, 0, sizeof(struct bfi_fcport_req_s));
bfi_h2i_set(msg->mh, BFI_MC_FCPORT, BFI_FCPORT_H2I_STATS_GET_REQ,
bfa_lpuid(fcport->bfa));
bfa_reqq_produce(fcport->bfa, BFA_REQQ_PORT);
@@ -3237,7 +3235,7 @@ __bfa_cb_fcport_stats_clr(void *cbarg, bfa_boolean_t complete)
if (complete) {
struct bfa_timeval_s tv;
- /**
+ /*
* re-initialize time stamp for stats reset
*/
bfa_os_gettimeofday(&tv);
@@ -3285,13 +3283,13 @@ bfa_fcport_send_stats_clear(void *cbarg)
}
fcport->stats_qfull = BFA_FALSE;
- bfa_os_memset(msg, 0, sizeof(struct bfi_fcport_req_s));
+ memset(msg, 0, sizeof(struct bfi_fcport_req_s));
bfi_h2i_set(msg->mh, BFI_MC_FCPORT, BFI_FCPORT_H2I_STATS_CLEAR_REQ,
bfa_lpuid(fcport->bfa));
bfa_reqq_produce(fcport->bfa, BFA_REQQ_PORT);
}
-/**
+/*
* Handle trunk SCN event from firmware.
*/
static void
@@ -3312,7 +3310,7 @@ bfa_trunk_scn(struct bfa_fcport_s *fcport, struct bfi_fcport_trunk_scn_s *scn)
bfa_trc(fcport->bfa, scn->trunk_state);
bfa_trc(fcport->bfa, scn->trunk_speed);
- /**
+ /*
* Save off new state for trunk attribute query
*/
state_prev = trunk->attr.state;
@@ -3327,7 +3325,7 @@ bfa_trunk_scn(struct bfa_fcport_s *fcport, struct bfi_fcport_trunk_scn_s *scn)
lattr->trunk_wwn = tlink->trunk_wwn;
lattr->fctl = tlink->fctl;
lattr->speed = tlink->speed;
- lattr->deskew = bfa_os_ntohl(tlink->deskew);
+ lattr->deskew = be32_to_cpu(tlink->deskew);
if (tlink->state == BFA_TRUNK_LINK_STATE_UP) {
fcport->speed = tlink->speed;
@@ -3360,7 +3358,7 @@ bfa_trunk_scn(struct bfa_fcport_s *fcport, struct bfi_fcport_trunk_scn_s *scn)
BFA_PL_EID_TRUNK_SCN, 0, "Trunk down");
}
- /**
+ /*
* Notify upper layers if trunk state changed.
*/
if ((state_prev != trunk->attr.state) ||
@@ -3376,7 +3374,7 @@ bfa_trunk_iocdisable(struct bfa_s *bfa)
struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
int i = 0;
- /**
+ /*
* In trunked mode, notify upper layers that link is down
*/
if (fcport->cfg.trunked) {
@@ -3400,11 +3398,11 @@ bfa_trunk_iocdisable(struct bfa_s *bfa)
-/**
+/*
* hal_port_public
*/
-/**
+/*
* Called to initialize port attributes
*/
void
@@ -3412,7 +3410,7 @@ bfa_fcport_init(struct bfa_s *bfa)
{
struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
- /**
+ /*
* Initialize port attributes from IOC hardware data.
*/
bfa_fcport_set_wwns(fcport);
@@ -3426,7 +3424,7 @@ bfa_fcport_init(struct bfa_s *bfa)
bfa_assert(fcport->speed_sup);
}
-/**
+/*
* Firmware message handler.
*/
void
@@ -3507,11 +3505,11 @@ bfa_fcport_isr(struct bfa_s *bfa, struct bfi_msg_s *msg)
-/**
+/*
* hal_port_api
*/
-/**
+/*
* Registered callback for port events.
*/
void
@@ -3552,7 +3550,7 @@ bfa_fcport_disable(struct bfa_s *bfa)
return BFA_STATUS_OK;
}
-/**
+/*
* Configure port speed.
*/
bfa_status_t
@@ -3574,7 +3572,7 @@ bfa_fcport_cfg_speed(struct bfa_s *bfa, enum bfa_port_speed speed)
return BFA_STATUS_OK;
}
-/**
+/*
* Get current speed.
*/
enum bfa_port_speed
@@ -3585,7 +3583,7 @@ bfa_fcport_get_speed(struct bfa_s *bfa)
return fcport->speed;
}
-/**
+/*
* Configure port topology.
*/
bfa_status_t
@@ -3610,7 +3608,7 @@ bfa_fcport_cfg_topology(struct bfa_s *bfa, enum bfa_port_topology topology)
return BFA_STATUS_OK;
}
-/**
+/*
* Get current topology.
*/
enum bfa_port_topology
@@ -3710,7 +3708,7 @@ bfa_fcport_set_tx_bbcredit(struct bfa_s *bfa, u16 tx_bbcredit)
bfa_fcport_send_txcredit(fcport);
}
-/**
+/*
* Get port attributes.
*/
@@ -3729,7 +3727,7 @@ bfa_fcport_get_attr(struct bfa_s *bfa, struct bfa_port_attr_s *attr)
{
struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
- bfa_os_memset(attr, 0, sizeof(struct bfa_port_attr_s));
+ memset(attr, 0, sizeof(struct bfa_port_attr_s));
attr->nwwn = fcport->nwwn;
attr->pwwn = fcport->pwwn;
@@ -3737,7 +3735,7 @@ bfa_fcport_get_attr(struct bfa_s *bfa, struct bfa_port_attr_s *attr)
attr->factorypwwn = bfa_ioc_get_mfg_pwwn(&bfa->ioc);
attr->factorynwwn = bfa_ioc_get_mfg_nwwn(&bfa->ioc);
- bfa_os_memcpy(&attr->pport_cfg, &fcport->cfg,
+ memcpy(&attr->pport_cfg, &fcport->cfg,
sizeof(struct bfa_port_cfg_s));
/* speed attributes */
attr->pport_cfg.speed = fcport->cfg.speed;
@@ -3770,7 +3768,7 @@ bfa_fcport_get_attr(struct bfa_s *bfa, struct bfa_port_attr_s *attr)
#define BFA_FCPORT_STATS_TOV 1000
-/**
+/*
* Fetch port statistics (FCQoS or FCoE).
*/
bfa_status_t
@@ -3796,7 +3794,7 @@ bfa_fcport_get_stats(struct bfa_s *bfa, union bfa_fcport_stats_u *stats,
return BFA_STATUS_OK;
}
-/**
+/*
* Reset port statistics (FCQoS or FCoE).
*/
bfa_status_t
@@ -3820,7 +3818,7 @@ bfa_fcport_clear_stats(struct bfa_s *bfa, bfa_cb_port_t cbfn, void *cbarg)
return BFA_STATUS_OK;
}
-/**
+/*
* Fetch FCQoS port statistics
*/
bfa_status_t
@@ -3833,7 +3831,7 @@ bfa_fcport_get_qos_stats(struct bfa_s *bfa, union bfa_fcport_stats_u *stats,
return bfa_fcport_get_stats(bfa, stats, cbfn, cbarg);
}
-/**
+/*
* Reset FCoE port statistics
*/
bfa_status_t
@@ -3845,7 +3843,7 @@ bfa_fcport_clear_qos_stats(struct bfa_s *bfa, bfa_cb_port_t cbfn, void *cbarg)
return bfa_fcport_clear_stats(bfa, cbfn, cbarg);
}
-/**
+/*
* Fetch FCQoS port statistics
*/
bfa_status_t
@@ -3858,7 +3856,7 @@ bfa_fcport_get_fcoe_stats(struct bfa_s *bfa, union bfa_fcport_stats_u *stats,
return bfa_fcport_get_stats(bfa, stats, cbfn, cbarg);
}
-/**
+/*
* Reset FCoE port statistics
*/
bfa_status_t
@@ -3876,7 +3874,7 @@ bfa_fcport_qos_get_attr(struct bfa_s *bfa, struct bfa_qos_attr_s *qos_attr)
struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
qos_attr->state = fcport->qos_attr.state;
- qos_attr->total_bb_cr = bfa_os_ntohl(fcport->qos_attr.total_bb_cr);
+ qos_attr->total_bb_cr = be32_to_cpu(fcport->qos_attr.total_bb_cr);
}
void
@@ -3887,10 +3885,10 @@ bfa_fcport_qos_get_vc_attr(struct bfa_s *bfa,
struct bfa_qos_vc_attr_s *bfa_vc_attr = &fcport->qos_vc_attr;
u32 i = 0;
- qos_vc_attr->total_vc_count = bfa_os_ntohs(bfa_vc_attr->total_vc_count);
- qos_vc_attr->shared_credit = bfa_os_ntohs(bfa_vc_attr->shared_credit);
+ qos_vc_attr->total_vc_count = be16_to_cpu(bfa_vc_attr->total_vc_count);
+ qos_vc_attr->shared_credit = be16_to_cpu(bfa_vc_attr->shared_credit);
qos_vc_attr->elp_opmode_flags =
- bfa_os_ntohl(bfa_vc_attr->elp_opmode_flags);
+ be32_to_cpu(bfa_vc_attr->elp_opmode_flags);
/* Individual VC info */
while (i < qos_vc_attr->total_vc_count) {
@@ -3904,7 +3902,7 @@ bfa_fcport_qos_get_vc_attr(struct bfa_s *bfa,
}
}
-/**
+/*
* Fetch port attributes.
*/
bfa_boolean_t
@@ -3939,7 +3937,7 @@ bfa_fcport_cfg_qos(struct bfa_s *bfa, bfa_boolean_t on_off)
if (ioc_type == BFA_IOC_TYPE_FC) {
fcport->cfg.qos_enabled = on_off;
- /**
+ /*
* Notify fcpim of the change in QoS state
*/
bfa_fcpim_update_ioredirect(bfa);
@@ -3959,7 +3957,7 @@ bfa_fcport_cfg_ratelim(struct bfa_s *bfa, bfa_boolean_t on_off)
fcport->cfg.trl_def_speed = BFA_PORT_SPEED_1GBPS;
}
-/**
+/*
* Configure default minimum ratelim speed
*/
bfa_status_t
@@ -3980,7 +3978,7 @@ bfa_fcport_cfg_ratelim_speed(struct bfa_s *bfa, enum bfa_port_speed speed)
return BFA_STATUS_OK;
}
-/**
+/*
* Get default minimum ratelim speed
*/
enum bfa_port_speed
@@ -4095,10 +4093,10 @@ bfa_trunk_disable(struct bfa_s *bfa)
}
-/**
+/*
* Rport State machine functions
*/
-/**
+/*
* Beginning state, only online event expected.
*/
static void
@@ -4151,7 +4149,7 @@ bfa_rport_sm_created(struct bfa_rport_s *rp, enum bfa_rport_event event)
}
}
-/**
+/*
* Waiting for rport create response from firmware.
*/
static void
@@ -4188,7 +4186,7 @@ bfa_rport_sm_fwcreate(struct bfa_rport_s *rp, enum bfa_rport_event event)
}
}
-/**
+/*
* Request queue is full, awaiting queue resume to send create request.
*/
static void
@@ -4229,7 +4227,7 @@ bfa_rport_sm_fwcreate_qfull(struct bfa_rport_s *rp, enum bfa_rport_event event)
}
}
-/**
+/*
* Online state - normal parking state.
*/
static void
@@ -4275,9 +4273,9 @@ bfa_rport_sm_online(struct bfa_rport_s *rp, enum bfa_rport_event event)
bfa_trc(rp->bfa, qos_scn->new_qos_attr.qos_priority);
qos_scn->old_qos_attr.qos_flow_id =
- bfa_os_ntohl(qos_scn->old_qos_attr.qos_flow_id);
+ be32_to_cpu(qos_scn->old_qos_attr.qos_flow_id);
qos_scn->new_qos_attr.qos_flow_id =
- bfa_os_ntohl(qos_scn->new_qos_attr.qos_flow_id);
+ be32_to_cpu(qos_scn->new_qos_attr.qos_flow_id);
if (qos_scn->old_qos_attr.qos_flow_id !=
qos_scn->new_qos_attr.qos_flow_id)
@@ -4297,7 +4295,7 @@ bfa_rport_sm_online(struct bfa_rport_s *rp, enum bfa_rport_event event)
}
}
-/**
+/*
* Firmware rport is being deleted - awaiting f/w response.
*/
static void
@@ -4360,7 +4358,7 @@ bfa_rport_sm_fwdelete_qfull(struct bfa_rport_s *rp, enum bfa_rport_event event)
}
}
-/**
+/*
* Offline state.
*/
static void
@@ -4395,7 +4393,7 @@ bfa_rport_sm_offline(struct bfa_rport_s *rp, enum bfa_rport_event event)
}
}
-/**
+/*
* Rport is deleted, waiting for firmware response to delete.
*/
static void
@@ -4447,7 +4445,7 @@ bfa_rport_sm_deleting_qfull(struct bfa_rport_s *rp, enum bfa_rport_event event)
}
}
-/**
+/*
* Waiting for rport create response from firmware. A delete is pending.
*/
static void
@@ -4478,7 +4476,7 @@ bfa_rport_sm_delete_pending(struct bfa_rport_s *rp,
}
}
-/**
+/*
* Waiting for rport create response from firmware. Rport offline is pending.
*/
static void
@@ -4513,7 +4511,7 @@ bfa_rport_sm_offline_pending(struct bfa_rport_s *rp,
}
}
-/**
+/*
* IOC h/w failed.
*/
static void
@@ -4553,7 +4551,7 @@ bfa_rport_sm_iocdisable(struct bfa_rport_s *rp, enum bfa_rport_event event)
-/**
+/*
* bfa_rport_private BFA rport private functions
*/
@@ -4612,12 +4610,12 @@ bfa_rport_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
!(mod->num_rports & (mod->num_rports - 1)));
for (i = 0; i < mod->num_rports; i++, rp++) {
- bfa_os_memset(rp, 0, sizeof(struct bfa_rport_s));
+ memset(rp, 0, sizeof(struct bfa_rport_s));
rp->bfa = bfa;
rp->rport_tag = i;
bfa_sm_set_state(rp, bfa_rport_sm_uninit);
- /**
+ /*
* - is unused
*/
if (i)
@@ -4626,7 +4624,7 @@ bfa_rport_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
bfa_reqq_winit(&rp->reqq_wait, bfa_rport_qresume, rp);
}
- /**
+ /*
* consume memory
*/
bfa_meminfo_kva(meminfo) = (u8 *) rp;
@@ -4687,7 +4685,7 @@ bfa_rport_send_fwcreate(struct bfa_rport_s *rp)
{
struct bfi_rport_create_req_s *m;
- /**
+ /*
* check for room in queue to send request now
*/
m = bfa_reqq_next(rp->bfa, BFA_REQQ_RPORT);
@@ -4699,7 +4697,7 @@ bfa_rport_send_fwcreate(struct bfa_rport_s *rp)
bfi_h2i_set(m->mh, BFI_MC_RPORT, BFI_RPORT_H2I_CREATE_REQ,
bfa_lpuid(rp->bfa));
m->bfa_handle = rp->rport_tag;
- m->max_frmsz = bfa_os_htons(rp->rport_info.max_frmsz);
+ m->max_frmsz = cpu_to_be16(rp->rport_info.max_frmsz);
m->pid = rp->rport_info.pid;
m->lp_tag = rp->rport_info.lp_tag;
m->local_pid = rp->rport_info.local_pid;
@@ -4708,7 +4706,7 @@ bfa_rport_send_fwcreate(struct bfa_rport_s *rp)
m->vf_id = rp->rport_info.vf_id;
m->cisc = rp->rport_info.cisc;
- /**
+ /*
* queue I/O message to firmware
*/
bfa_reqq_produce(rp->bfa, BFA_REQQ_RPORT);
@@ -4720,7 +4718,7 @@ bfa_rport_send_fwdelete(struct bfa_rport_s *rp)
{
struct bfi_rport_delete_req_s *m;
- /**
+ /*
* check for room in queue to send request now
*/
m = bfa_reqq_next(rp->bfa, BFA_REQQ_RPORT);
@@ -4733,7 +4731,7 @@ bfa_rport_send_fwdelete(struct bfa_rport_s *rp)
bfa_lpuid(rp->bfa));
m->fw_handle = rp->fw_handle;
- /**
+ /*
* queue I/O message to firmware
*/
bfa_reqq_produce(rp->bfa, BFA_REQQ_RPORT);
@@ -4745,7 +4743,7 @@ bfa_rport_send_fwspeed(struct bfa_rport_s *rp)
{
struct bfa_rport_speed_req_s *m;
- /**
+ /*
* check for room in queue to send request now
*/
m = bfa_reqq_next(rp->bfa, BFA_REQQ_RPORT);
@@ -4759,7 +4757,7 @@ bfa_rport_send_fwspeed(struct bfa_rport_s *rp)
m->fw_handle = rp->fw_handle;
m->speed = (u8)rp->rport_info.speed;
- /**
+ /*
* queue I/O message to firmware
*/
bfa_reqq_produce(rp->bfa, BFA_REQQ_RPORT);
@@ -4768,11 +4766,11 @@ bfa_rport_send_fwspeed(struct bfa_rport_s *rp)
-/**
+/*
* bfa_rport_public
*/
-/**
+/*
* Rport interrupt processing.
*/
void
@@ -4814,7 +4812,7 @@ bfa_rport_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
-/**
+/*
* bfa_rport_api
*/
@@ -4849,7 +4847,7 @@ bfa_rport_online(struct bfa_rport_s *rport, struct bfa_rport_info_s *rport_info)
{
bfa_assert(rport_info->max_frmsz != 0);
- /**
+ /*
* Some JBODs are seen to be not setting PDU size correctly in PLOGI
* responses. Default to minimum size.
*/
@@ -4858,7 +4856,7 @@ bfa_rport_online(struct bfa_rport_s *rport, struct bfa_rport_info_s *rport_info)
rport_info->max_frmsz = FC_MIN_PDUSZ;
}
- bfa_os_assign(rport->rport_info, *rport_info);
+ rport->rport_info = *rport_info;
bfa_sm_send_event(rport, BFA_RPORT_SM_ONLINE);
}
@@ -4890,22 +4888,22 @@ bfa_rport_get_qos_attr(struct bfa_rport_s *rport,
struct bfa_rport_qos_attr_s *qos_attr)
{
qos_attr->qos_priority = rport->qos_attr.qos_priority;
- qos_attr->qos_flow_id = bfa_os_ntohl(rport->qos_attr.qos_flow_id);
+ qos_attr->qos_flow_id = be32_to_cpu(rport->qos_attr.qos_flow_id);
}
void
bfa_rport_clear_stats(struct bfa_rport_s *rport)
{
- bfa_os_memset(&rport->stats, 0, sizeof(rport->stats));
+ memset(&rport->stats, 0, sizeof(rport->stats));
}
-/**
+/*
* SGPG related functions
*/
-/**
+/*
* Compute and return memory needed by FCP(im) module.
*/
static void
@@ -4957,8 +4955,8 @@ bfa_sgpg_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
bfa_assert(!(sgpg_pa.pa & (sizeof(struct bfi_sgpg_s) - 1)));
for (i = 0; i < mod->num_sgpgs; i++) {
- bfa_os_memset(hsgpg, 0, sizeof(*hsgpg));
- bfa_os_memset(sgpg, 0, sizeof(*sgpg));
+ memset(hsgpg, 0, sizeof(*hsgpg));
+ memset(sgpg, 0, sizeof(*sgpg));
hsgpg->sgpg = sgpg;
sgpg_pa_tmp.pa = bfa_sgaddr_le(sgpg_pa.pa);
@@ -4997,7 +4995,7 @@ bfa_sgpg_iocdisable(struct bfa_s *bfa)
-/**
+/*
* hal_sgpg_public BFA SGPG public functions
*/
@@ -5039,7 +5037,7 @@ bfa_sgpg_mfree(struct bfa_s *bfa, struct list_head *sgpg_q, int nsgpg)
if (list_empty(&mod->sgpg_wait_q))
return;
- /**
+ /*
* satisfy as many waiting requests as possible
*/
do {
@@ -5067,11 +5065,11 @@ bfa_sgpg_wait(struct bfa_s *bfa, struct bfa_sgpg_wqe_s *wqe, int nsgpg)
wqe->nsgpg_total = wqe->nsgpg = nsgpg;
- /**
+ /*
* allocate any left to this one first
*/
if (mod->free_sgpgs) {
- /**
+ /*
* no one else is waiting for SGPG
*/
bfa_assert(list_empty(&mod->sgpg_wait_q));
@@ -5105,7 +5103,7 @@ bfa_sgpg_winit(struct bfa_sgpg_wqe_s *wqe, void (*cbfn) (void *cbarg),
wqe->cbarg = cbarg;
}
-/**
+/*
* UF related functions
*/
/*
@@ -5136,7 +5134,7 @@ claim_uf_pbs(struct bfa_uf_mod_s *ufm, struct bfa_meminfo_s *mi)
bfa_meminfo_dma_virt(mi) += uf_pb_tot_sz;
bfa_meminfo_dma_phys(mi) += uf_pb_tot_sz;
- bfa_os_memset((void *)ufm->uf_pbs_kva, 0, uf_pb_tot_sz);
+ memset((void *)ufm->uf_pbs_kva, 0, uf_pb_tot_sz);
}
static void
@@ -5153,11 +5151,11 @@ claim_uf_post_msgs(struct bfa_uf_mod_s *ufm, struct bfa_meminfo_s *mi)
for (i = 0, uf_bp_msg = ufm->uf_buf_posts; i < ufm->num_ufs;
i++, uf_bp_msg++) {
- bfa_os_memset(uf_bp_msg, 0, sizeof(struct bfi_uf_buf_post_s));
+ memset(uf_bp_msg, 0, sizeof(struct bfi_uf_buf_post_s));
uf_bp_msg->buf_tag = i;
buf_len = sizeof(struct bfa_uf_buf_s);
- uf_bp_msg->buf_len = bfa_os_htons(buf_len);
+ uf_bp_msg->buf_len = cpu_to_be16(buf_len);
bfi_h2i_set(uf_bp_msg->mh, BFI_MC_UF, BFI_UF_H2I_BUF_POST,
bfa_lpuid(ufm->bfa));
@@ -5173,7 +5171,7 @@ claim_uf_post_msgs(struct bfa_uf_mod_s *ufm, struct bfa_meminfo_s *mi)
bfa_sge_to_be(&sge[1]);
}
- /**
+ /*
* advance pointer beyond consumed memory
*/
bfa_meminfo_kva(mi) = (u8 *) uf_bp_msg;
@@ -5194,7 +5192,7 @@ claim_ufs(struct bfa_uf_mod_s *ufm, struct bfa_meminfo_s *mi)
* Initialize UFs and queue it in UF free queue
*/
for (i = 0, uf = ufm->uf_list; i < ufm->num_ufs; i++, uf++) {
- bfa_os_memset(uf, 0, sizeof(struct bfa_uf_s));
+ memset(uf, 0, sizeof(struct bfa_uf_s));
uf->bfa = ufm->bfa;
uf->uf_tag = i;
uf->pb_len = sizeof(struct bfa_uf_buf_s);
@@ -5203,7 +5201,7 @@ claim_ufs(struct bfa_uf_mod_s *ufm, struct bfa_meminfo_s *mi)
list_add_tail(&uf->qe, &ufm->uf_free_q);
}
- /**
+ /*
* advance memory pointer
*/
bfa_meminfo_kva(mi) = (u8 *) uf;
@@ -5241,7 +5239,7 @@ bfa_uf_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
{
struct bfa_uf_mod_s *ufm = BFA_UF_MOD(bfa);
- bfa_os_memset(ufm, 0, sizeof(struct bfa_uf_mod_s));
+ memset(ufm, 0, sizeof(struct bfa_uf_mod_s));
ufm->bfa = bfa;
ufm->num_ufs = cfg->fwcfg.num_uf_bufs;
INIT_LIST_HEAD(&ufm->uf_free_q);
@@ -5279,7 +5277,7 @@ bfa_uf_post(struct bfa_uf_mod_s *ufm, struct bfa_uf_s *uf)
if (!uf_post_msg)
return BFA_STATUS_FAILED;
- bfa_os_memcpy(uf_post_msg, &ufm->uf_buf_posts[uf->uf_tag],
+ memcpy(uf_post_msg, &ufm->uf_buf_posts[uf->uf_tag],
sizeof(struct bfi_uf_buf_post_s));
bfa_reqq_produce(ufm->bfa, BFA_REQQ_FCXP);
@@ -5310,8 +5308,8 @@ uf_recv(struct bfa_s *bfa, struct bfi_uf_frm_rcvd_s *m)
u8 *buf = &uf_buf->d[0];
struct fchs_s *fchs;
- m->frm_len = bfa_os_ntohs(m->frm_len);
- m->xfr_len = bfa_os_ntohs(m->xfr_len);
+ m->frm_len = be16_to_cpu(m->frm_len);
+ m->xfr_len = be16_to_cpu(m->xfr_len);
fchs = (struct fchs_s *)uf_buf;
@@ -5365,11 +5363,11 @@ bfa_uf_start(struct bfa_s *bfa)
-/**
+/*
* hal_uf_api
*/
-/**
+/*
* Register handler for all unsolicted recieve frames.
*
* @param[in] bfa BFA instance
@@ -5385,7 +5383,7 @@ bfa_uf_recv_register(struct bfa_s *bfa, bfa_cb_uf_recv_t ufrecv, void *cbarg)
ufm->cbarg = cbarg;
}
-/**
+/*
* Free an unsolicited frame back to BFA.
*
* @param[in] uf unsolicited frame to be freed
@@ -5401,7 +5399,7 @@ bfa_uf_free(struct bfa_uf_s *uf)
-/**
+/*
* uf_pub BFA uf module public functions
*/
void
diff --git a/drivers/scsi/bfa/bfa_svc.h b/drivers/scsi/bfa/bfa_svc.h
index 9921dad0d039..e2349d5cdb93 100644
--- a/drivers/scsi/bfa/bfa_svc.h
+++ b/drivers/scsi/bfa/bfa_svc.h
@@ -22,12 +22,12 @@
#include "bfi_ms.h"
-/**
+/*
* Scatter-gather DMA related defines
*/
#define BFA_SGPG_MIN (16)
-/**
+/*
* Alignment macro for SG page allocation
*/
#define BFA_SGPG_ROUNDUP(_l) (((_l) + (sizeof(struct bfi_sgpg_s) - 1)) \
@@ -48,7 +48,7 @@ struct bfa_sgpg_s {
union bfi_addr_u sgpg_pa; /* pa of SG page */
};
-/**
+/*
* Given number of SG elements, BFA_SGPG_NPAGE() returns the number of
* SG pages required.
*/
@@ -75,7 +75,7 @@ void bfa_sgpg_wait(struct bfa_s *bfa, struct bfa_sgpg_wqe_s *wqe, int nsgpgs);
void bfa_sgpg_wcancel(struct bfa_s *bfa, struct bfa_sgpg_wqe_s *wqe);
-/**
+/*
* FCXP related defines
*/
#define BFA_FCXP_MIN (1)
@@ -115,12 +115,12 @@ typedef void (*bfa_fcxp_alloc_cbfn_t) (void *cbarg, struct bfa_fcxp_s *fcxp);
-/**
+/*
* Information needed for a FCXP request
*/
struct bfa_fcxp_req_info_s {
struct bfa_rport_s *bfa_rport;
- /** Pointer to the bfa rport that was
+ /* Pointer to the bfa rport that was
* returned from bfa_rport_create().
* This could be left NULL for WKA or
* for FCXP interactions before the
@@ -137,11 +137,10 @@ struct bfa_fcxp_req_info_s {
struct bfa_fcxp_rsp_info_s {
struct fchs_s rsp_fchs;
- /** !< Response frame's FC header will
+ /* Response frame's FC header will
* be sent back in this field */
u8 rsp_timeout;
- /** !< timeout in seconds, 0-no response
- */
+ /* timeout in seconds, 0-no response */
u8 rsvd2[3];
u32 rsp_maxlen; /* max response length expected */
};
@@ -218,7 +217,7 @@ struct bfa_fcxp_wqe_s {
void bfa_fcxp_isr(struct bfa_s *bfa, struct bfi_msg_s *msg);
-/**
+/*
* RPORT related defines
*/
#define BFA_RPORT_MIN 4
@@ -232,7 +231,7 @@ struct bfa_rport_mod_s {
#define BFA_RPORT_MOD(__bfa) (&(__bfa)->modules.rport_mod)
-/**
+/*
* Convert rport tag to RPORT
*/
#define BFA_RPORT_FROM_TAG(__bfa, _tag) \
@@ -244,7 +243,7 @@ struct bfa_rport_mod_s {
*/
void bfa_rport_isr(struct bfa_s *bfa, struct bfi_msg_s *msg);
-/**
+/*
* BFA rport information.
*/
struct bfa_rport_info_s {
@@ -259,7 +258,7 @@ struct bfa_rport_info_s {
enum bfa_port_speed speed; /* Rport's current speed */
};
-/**
+/*
* BFA rport data structure
*/
struct bfa_rport_s {
@@ -282,7 +281,7 @@ struct bfa_rport_s {
#define BFA_RPORT_FC_COS(_rport) ((_rport)->rport_info.fc_class)
-/**
+/*
* UF - unsolicited receive related defines
*/
@@ -305,7 +304,7 @@ struct bfa_uf_s {
struct bfa_sge_s sges[BFI_SGE_INLINE_MAX];
};
-/**
+/*
* Callback prototype for unsolicited frame receive handler.
*
* @param[in] cbarg callback arg for receive handler
@@ -338,7 +337,7 @@ void bfa_uf_isr(struct bfa_s *bfa, struct bfi_msg_s *msg);
#define BFA_UF_BUFSZ (2 * 1024 + 256)
-/**
+/*
* @todo private
*/
struct bfa_uf_buf_s {
@@ -346,7 +345,7 @@ struct bfa_uf_buf_s {
};
-/**
+/*
* LPS - bfa lport login/logout service interface
*/
struct bfa_lps_s {
@@ -397,14 +396,14 @@ struct bfa_lps_mod_s {
void bfa_lps_isr(struct bfa_s *bfa, struct bfi_msg_s *msg);
-/**
+/*
* FCPORT related defines
*/
#define BFA_FCPORT(_bfa) (&((_bfa)->modules.port))
typedef void (*bfa_cb_port_t) (void *cbarg, enum bfa_status status);
-/**
+/*
* Link notification data structure
*/
struct bfa_fcport_ln_s {
@@ -418,7 +417,7 @@ struct bfa_fcport_trunk_s {
struct bfa_trunk_attr_s attr;
};
-/**
+/*
* BFA FC port data structure
*/
struct bfa_fcport_s {
@@ -613,7 +612,7 @@ void bfa_uf_recv_register(struct bfa_s *bfa, bfa_cb_uf_recv_t ufrecv,
void *cbarg);
void bfa_uf_free(struct bfa_uf_s *uf);
-/**
+/*
* bfa lport service api
*/
diff --git a/drivers/scsi/bfa/bfad.c b/drivers/scsi/bfa/bfad.c
index 4d8784e06e14..1f938974b848 100644
--- a/drivers/scsi/bfa/bfad.c
+++ b/drivers/scsi/bfa/bfad.c
@@ -15,7 +15,7 @@
* General Public License for more details.
*/
-/**
+/*
* bfad.c Linux driver PCI interface module.
*/
#include <linux/module.h>
@@ -151,7 +151,7 @@ bfad_sm_failed(struct bfad_s *bfad, enum bfad_sm_event event);
static void
bfad_sm_fcs_exit(struct bfad_s *bfad, enum bfad_sm_event event);
-/**
+/*
* Beginning state for the driver instance, awaiting the pci_probe event
*/
static void
@@ -181,7 +181,7 @@ bfad_sm_uninit(struct bfad_s *bfad, enum bfad_sm_event event)
}
}
-/**
+/*
* Driver Instance is created, awaiting event INIT to initialize the bfad
*/
static void
@@ -364,7 +364,7 @@ bfad_sm_stopping(struct bfad_s *bfad, enum bfad_sm_event event)
}
}
-/**
+/*
* BFA callbacks
*/
void
@@ -376,7 +376,7 @@ bfad_hcb_comp(void *arg, bfa_status_t status)
complete(&fcomp->comp);
}
-/**
+/*
* bfa_init callback
*/
void
@@ -401,7 +401,7 @@ bfa_cb_init(void *drv, bfa_status_t init_status)
complete(&bfad->comp);
}
-/**
+/*
* BFA_FCS callbacks
*/
struct bfad_port_s *
@@ -457,7 +457,7 @@ bfa_fcb_lport_delete(struct bfad_s *bfad, enum bfa_lport_role roles,
}
}
-/**
+/*
* FCS RPORT alloc callback, after successful PLOGI by FCS
*/
bfa_status_t
@@ -478,7 +478,7 @@ ext:
return rc;
}
-/**
+/*
* FCS PBC VPORT Create
*/
void
@@ -663,7 +663,7 @@ ext:
return rc;
}
-/**
+/*
* Create a vport under a vf.
*/
bfa_status_t
@@ -716,30 +716,6 @@ ext:
return rc;
}
-/**
- * Create a vf and its base vport implicitely.
- */
-bfa_status_t
-bfad_vf_create(struct bfad_s *bfad, u16 vf_id,
- struct bfa_lport_cfg_s *port_cfg)
-{
- struct bfad_vf_s *vf;
- int rc = BFA_STATUS_OK;
-
- vf = kzalloc(sizeof(struct bfad_vf_s), GFP_KERNEL);
- if (!vf) {
- rc = BFA_STATUS_FAILED;
- goto ext;
- }
-
- rc = bfa_fcs_vf_create(&vf->fcs_vf, &bfad->bfa_fcs, vf_id, port_cfg,
- vf);
- if (rc != BFA_STATUS_OK)
- kfree(vf);
-ext:
- return rc;
-}
-
void
bfad_bfa_tmo(unsigned long data)
{
@@ -885,20 +861,6 @@ bfad_pci_uninit(struct pci_dev *pdev, struct bfad_s *bfad)
pci_set_drvdata(pdev, NULL);
}
-void
-bfad_fcs_port_cfg(struct bfad_s *bfad)
-{
- struct bfa_lport_cfg_s port_cfg;
- struct bfa_port_attr_s attr;
- char symname[BFA_SYMNAME_MAXLEN];
-
- sprintf(symname, "%s-%d", BFAD_DRIVER_NAME, bfad->inst_no);
- memcpy(port_cfg.sym_name.symname, symname, strlen(symname));
- bfa_fcport_get_attr(&bfad->bfa, &attr);
- port_cfg.nwwn = attr.nwwn;
- port_cfg.pwwn = attr.pwwn;
-}
-
bfa_status_t
bfad_drv_init(struct bfad_s *bfad)
{
@@ -1089,9 +1051,6 @@ bfad_start_ops(struct bfad_s *bfad) {
bfa_fcs_init(&bfad->bfa_fcs);
spin_unlock_irqrestore(&bfad->bfad_lock, flags);
- /* PPORT FCS config */
- bfad_fcs_port_cfg(bfad);
-
retval = bfad_cfg_pport(bfad, BFA_LPORT_ROLE_FCP_IM);
if (retval != BFA_STATUS_OK) {
if (bfa_sm_cmp_state(bfad, bfad_sm_initializing))
@@ -1181,7 +1140,7 @@ bfad_worker(void *ptr)
return 0;
}
-/**
+/*
* BFA driver interrupt functions
*/
irqreturn_t
@@ -1240,7 +1199,7 @@ bfad_msix(int irq, void *dev_id)
return IRQ_HANDLED;
}
-/**
+/*
* Initialize the MSIX entry table.
*/
static void
@@ -1293,7 +1252,7 @@ bfad_install_msix_handler(struct bfad_s *bfad)
return 0;
}
-/**
+/*
* Setup MSIX based interrupt.
*/
int
@@ -1374,7 +1333,7 @@ bfad_remove_intr(struct bfad_s *bfad)
}
}
-/**
+/*
* PCI probe entry.
*/
int
@@ -1460,7 +1419,7 @@ out:
return error;
}
-/**
+/*
* PCI remove entry.
*/
void
@@ -1541,7 +1500,7 @@ static struct pci_driver bfad_pci_driver = {
.remove = __devexit_p(bfad_pci_remove),
};
-/**
+/*
* Driver module init.
*/
static int __init
@@ -1581,7 +1540,7 @@ ext:
return error;
}
-/**
+/*
* Driver module exit.
*/
static void __exit
diff --git a/drivers/scsi/bfa/bfad_attr.c b/drivers/scsi/bfa/bfad_attr.c
index d8843720eac1..ed9fff440b5c 100644
--- a/drivers/scsi/bfa/bfad_attr.c
+++ b/drivers/scsi/bfa/bfad_attr.c
@@ -15,14 +15,14 @@
* General Public License for more details.
*/
-/**
+/*
* bfa_attr.c Linux driver configuration interface module.
*/
#include "bfad_drv.h"
#include "bfad_im.h"
-/**
+/*
* FC transport template entry, get SCSI target port ID.
*/
void
@@ -48,7 +48,7 @@ bfad_im_get_starget_port_id(struct scsi_target *starget)
spin_unlock_irqrestore(&bfad->bfad_lock, flags);
}
-/**
+/*
* FC transport template entry, get SCSI target nwwn.
*/
void
@@ -70,11 +70,11 @@ bfad_im_get_starget_node_name(struct scsi_target *starget)
if (itnim)
node_name = bfa_fcs_itnim_get_nwwn(&itnim->fcs_itnim);
- fc_starget_node_name(starget) = bfa_os_htonll(node_name);
+ fc_starget_node_name(starget) = cpu_to_be64(node_name);
spin_unlock_irqrestore(&bfad->bfad_lock, flags);
}
-/**
+/*
* FC transport template entry, get SCSI target pwwn.
*/
void
@@ -96,11 +96,11 @@ bfad_im_get_starget_port_name(struct scsi_target *starget)
if (itnim)
port_name = bfa_fcs_itnim_get_pwwn(&itnim->fcs_itnim);
- fc_starget_port_name(starget) = bfa_os_htonll(port_name);
+ fc_starget_port_name(starget) = cpu_to_be64(port_name);
spin_unlock_irqrestore(&bfad->bfad_lock, flags);
}
-/**
+/*
* FC transport template entry, get SCSI host port ID.
*/
void
@@ -114,7 +114,7 @@ bfad_im_get_host_port_id(struct Scsi_Host *shost)
bfa_os_hton3b(bfa_fcs_lport_get_fcid(port->fcs_port));
}
-/**
+/*
* FC transport template entry, get SCSI host port type.
*/
static void
@@ -146,7 +146,7 @@ bfad_im_get_host_port_type(struct Scsi_Host *shost)
}
}
-/**
+/*
* FC transport template entry, get SCSI host port state.
*/
static void
@@ -183,7 +183,7 @@ bfad_im_get_host_port_state(struct Scsi_Host *shost)
}
}
-/**
+/*
* FC transport template entry, get SCSI host active fc4s.
*/
static void
@@ -202,7 +202,7 @@ bfad_im_get_host_active_fc4s(struct Scsi_Host *shost)
fc_host_active_fc4s(shost)[7] = 1;
}
-/**
+/*
* FC transport template entry, get SCSI host link speed.
*/
static void
@@ -236,7 +236,7 @@ bfad_im_get_host_speed(struct Scsi_Host *shost)
}
}
-/**
+/*
* FC transport template entry, get SCSI host port type.
*/
static void
@@ -249,11 +249,11 @@ bfad_im_get_host_fabric_name(struct Scsi_Host *shost)
fabric_nwwn = bfa_fcs_lport_get_fabric_name(port->fcs_port);
- fc_host_fabric_name(shost) = bfa_os_htonll(fabric_nwwn);
+ fc_host_fabric_name(shost) = cpu_to_be64(fabric_nwwn);
}
-/**
+/*
* FC transport template entry, get BFAD statistics.
*/
static struct fc_host_statistics *
@@ -304,7 +304,7 @@ bfad_im_get_stats(struct Scsi_Host *shost)
return hstats;
}
-/**
+/*
* FC transport template entry, reset BFAD statistics.
*/
static void
@@ -331,7 +331,7 @@ bfad_im_reset_stats(struct Scsi_Host *shost)
return;
}
-/**
+/*
* FC transport template entry, get rport loss timeout.
*/
static void
@@ -347,7 +347,7 @@ bfad_im_get_rport_loss_tmo(struct fc_rport *rport)
spin_unlock_irqrestore(&bfad->bfad_lock, flags);
}
-/**
+/*
* FC transport template entry, set rport loss timeout.
*/
static void
@@ -633,7 +633,7 @@ struct fc_function_template bfad_im_vport_fc_function_template = {
.set_rport_dev_loss_tmo = bfad_im_set_rport_loss_tmo,
};
-/**
+/*
* Scsi_Host_attrs SCSI host attributes
*/
static ssize_t
@@ -733,7 +733,7 @@ bfad_im_node_name_show(struct device *dev, struct device_attribute *attr,
u64 nwwn;
nwwn = bfa_fcs_lport_get_nwwn(port->fcs_port);
- return snprintf(buf, PAGE_SIZE, "0x%llx\n", bfa_os_htonll(nwwn));
+ return snprintf(buf, PAGE_SIZE, "0x%llx\n", cpu_to_be64(nwwn));
}
static ssize_t
diff --git a/drivers/scsi/bfa/bfad_debugfs.c b/drivers/scsi/bfa/bfad_debugfs.c
index 69ed1c4a903e..1fedeeb4ac1f 100644
--- a/drivers/scsi/bfa/bfad_debugfs.c
+++ b/drivers/scsi/bfa/bfad_debugfs.c
@@ -318,7 +318,7 @@ bfad_debugfs_write_regrd(struct file *file, const char __user *buf,
regbuf = (u32 *)bfad->regdata;
spin_lock_irqsave(&bfad->bfad_lock, flags);
for (i = 0; i < len; i++) {
- *regbuf = bfa_reg_read(reg_addr);
+ *regbuf = readl(reg_addr);
regbuf++;
reg_addr += sizeof(u32);
}
@@ -361,7 +361,7 @@ bfad_debugfs_write_regwr(struct file *file, const char __user *buf,
reg_addr = (u32 *) ((u8 *) bfa_ioc_bar0(ioc) + addr);
spin_lock_irqsave(&bfad->bfad_lock, flags);
- bfa_reg_write(reg_addr, val);
+ writel(val, reg_addr);
spin_unlock_irqrestore(&bfad->bfad_lock, flags);
return nbytes;
diff --git a/drivers/scsi/bfa/bfad_drv.h b/drivers/scsi/bfa/bfad_drv.h
index 98420bbb4f3f..97f9b6c0937e 100644
--- a/drivers/scsi/bfa/bfad_drv.h
+++ b/drivers/scsi/bfa/bfad_drv.h
@@ -15,11 +15,11 @@
* General Public License for more details.
*/
-/**
+/*
* Contains base driver definitions.
*/
-/**
+/*
* bfa_drv.h Linux driver data structures.
*/
@@ -309,7 +309,6 @@ void bfad_bfa_tmo(unsigned long data);
void bfad_init_timer(struct bfad_s *bfad);
int bfad_pci_init(struct pci_dev *pdev, struct bfad_s *bfad);
void bfad_pci_uninit(struct pci_dev *pdev, struct bfad_s *bfad);
-void bfad_fcs_port_cfg(struct bfad_s *bfad);
void bfad_drv_uninit(struct bfad_s *bfad);
int bfad_worker(void *ptr);
void bfad_debugfs_init(struct bfad_port_s *port);
diff --git a/drivers/scsi/bfa/bfad_im.c b/drivers/scsi/bfa/bfad_im.c
index d950ee44016e..8ca967dee66d 100644
--- a/drivers/scsi/bfa/bfad_im.c
+++ b/drivers/scsi/bfa/bfad_im.c
@@ -15,7 +15,7 @@
* General Public License for more details.
*/
-/**
+/*
* bfad_im.c Linux driver IM module.
*/
@@ -30,8 +30,7 @@ DEFINE_IDR(bfad_im_port_index);
struct scsi_transport_template *bfad_im_scsi_transport_template;
struct scsi_transport_template *bfad_im_scsi_vport_transport_template;
static void bfad_im_itnim_work_handler(struct work_struct *work);
-static int bfad_im_queuecommand(struct scsi_cmnd *cmnd,
- void (*done)(struct scsi_cmnd *));
+static int bfad_im_queuecommand(struct Scsi_Host *h, struct scsi_cmnd *cmnd);
static int bfad_im_slave_alloc(struct scsi_device *sdev);
static void bfad_im_fc_rport_add(struct bfad_im_port_s *im_port,
struct bfad_itnim_s *itnim);
@@ -164,10 +163,10 @@ bfa_cb_tskim_done(void *bfad, struct bfad_tskim_s *dtsk,
wake_up(wq);
}
-/**
+/*
* Scsi_Host_template SCSI host template
*/
-/**
+/*
* Scsi_Host template entry, returns BFAD PCI info.
*/
static const char *
@@ -196,7 +195,7 @@ bfad_im_info(struct Scsi_Host *shost)
return bfa_buf;
}
-/**
+/*
* Scsi_Host template entry, aborts the specified SCSI command.
*
* Returns: SUCCESS or FAILED.
@@ -280,7 +279,7 @@ out:
return rc;
}
-/**
+/*
* Scsi_Host template entry, resets a LUN and abort its all commands.
*
* Returns: SUCCESS or FAILED.
@@ -319,7 +318,7 @@ bfad_im_reset_lun_handler(struct scsi_cmnd *cmnd)
goto out;
}
- /**
+ /*
* Set host_scribble to NULL to avoid aborting a task command
* if happens.
*/
@@ -346,7 +345,7 @@ out:
return rc;
}
-/**
+/*
* Scsi_Host template entry, resets the bus and abort all commands.
*/
static int
@@ -396,7 +395,7 @@ bfad_im_reset_bus_handler(struct scsi_cmnd *cmnd)
return SUCCESS;
}
-/**
+/*
* Scsi_Host template entry slave_destroy.
*/
static void
@@ -406,11 +405,11 @@ bfad_im_slave_destroy(struct scsi_device *sdev)
return;
}
-/**
+/*
* BFA FCS itnim callbacks
*/
-/**
+/*
* BFA FCS itnim alloc callback, after successful PRLI
* Context: Interrupt
*/
@@ -433,7 +432,7 @@ bfa_fcb_itnim_alloc(struct bfad_s *bfad, struct bfa_fcs_itnim_s **itnim,
bfad->bfad_flags |= BFAD_RPORT_ONLINE;
}
-/**
+/*
* BFA FCS itnim free callback.
* Context: Interrupt. bfad_lock is held
*/
@@ -471,7 +470,7 @@ bfa_fcb_itnim_free(struct bfad_s *bfad, struct bfad_itnim_s *itnim_drv)
queue_work(im->drv_workq, &itnim_drv->itnim_work);
}
-/**
+/*
* BFA FCS itnim online callback.
* Context: Interrupt. bfad_lock is held
*/
@@ -492,7 +491,7 @@ bfa_fcb_itnim_online(struct bfad_itnim_s *itnim_drv)
queue_work(im->drv_workq, &itnim_drv->itnim_work);
}
-/**
+/*
* BFA FCS itnim offline callback.
* Context: Interrupt. bfad_lock is held
*/
@@ -519,7 +518,7 @@ bfa_fcb_itnim_offline(struct bfad_itnim_s *itnim_drv)
queue_work(im->drv_workq, &itnim_drv->itnim_work);
}
-/**
+/*
* Allocate a Scsi_Host for a port.
*/
int
@@ -751,7 +750,7 @@ bfad_os_thread_workq(struct bfad_s *bfad)
return BFA_STATUS_OK;
}
-/**
+/*
* Scsi_Host template entry.
*
* Description:
@@ -896,7 +895,7 @@ bfad_os_get_itnim(struct bfad_im_port_s *im_port, int id)
return NULL;
}
-/**
+/*
* Scsi_Host template entry slave_alloc
*/
static int
@@ -915,12 +914,16 @@ bfad_im_slave_alloc(struct scsi_device *sdev)
static u32
bfad_im_supported_speeds(struct bfa_s *bfa)
{
- struct bfa_ioc_attr_s ioc_attr;
+ struct bfa_ioc_attr_s *ioc_attr;
u32 supported_speed = 0;
- bfa_get_attr(bfa, &ioc_attr);
- if (ioc_attr.adapter_attr.max_speed == BFA_PORT_SPEED_8GBPS) {
- if (ioc_attr.adapter_attr.is_mezz) {
+ ioc_attr = kzalloc(sizeof(struct bfa_ioc_attr_s), GFP_KERNEL);
+ if (!ioc_attr)
+ return 0;
+
+ bfa_get_attr(bfa, ioc_attr);
+ if (ioc_attr->adapter_attr.max_speed == BFA_PORT_SPEED_8GBPS) {
+ if (ioc_attr->adapter_attr.is_mezz) {
supported_speed |= FC_PORTSPEED_8GBIT |
FC_PORTSPEED_4GBIT |
FC_PORTSPEED_2GBIT | FC_PORTSPEED_1GBIT;
@@ -929,12 +932,13 @@ bfad_im_supported_speeds(struct bfa_s *bfa)
FC_PORTSPEED_4GBIT |
FC_PORTSPEED_2GBIT;
}
- } else if (ioc_attr.adapter_attr.max_speed == BFA_PORT_SPEED_4GBPS) {
+ } else if (ioc_attr->adapter_attr.max_speed == BFA_PORT_SPEED_4GBPS) {
supported_speed |= FC_PORTSPEED_4GBIT | FC_PORTSPEED_2GBIT |
FC_PORTSPEED_1GBIT;
- } else if (ioc_attr.adapter_attr.max_speed == BFA_PORT_SPEED_10GBPS) {
+ } else if (ioc_attr->adapter_attr.max_speed == BFA_PORT_SPEED_10GBPS) {
supported_speed |= FC_PORTSPEED_10GBIT;
}
+ kfree(ioc_attr);
return supported_speed;
}
@@ -944,14 +948,13 @@ bfad_os_fc_host_init(struct bfad_im_port_s *im_port)
struct Scsi_Host *host = im_port->shost;
struct bfad_s *bfad = im_port->bfad;
struct bfad_port_s *port = im_port->port;
- struct bfa_port_attr_s pattr;
- struct bfa_lport_attr_s port_attr;
char symname[BFA_SYMNAME_MAXLEN];
+ struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(&bfad->bfa);
fc_host_node_name(host) =
- bfa_os_htonll((bfa_fcs_lport_get_nwwn(port->fcs_port)));
+ cpu_to_be64((bfa_fcs_lport_get_nwwn(port->fcs_port)));
fc_host_port_name(host) =
- bfa_os_htonll((bfa_fcs_lport_get_pwwn(port->fcs_port)));
+ cpu_to_be64((bfa_fcs_lport_get_pwwn(port->fcs_port)));
fc_host_max_npiv_vports(host) = bfa_lps_get_max_vport(&bfad->bfa);
fc_host_supported_classes(host) = FC_COS_CLASS3;
@@ -964,15 +967,12 @@ bfad_os_fc_host_init(struct bfad_im_port_s *im_port)
/* For fibre channel services type 0x20 */
fc_host_supported_fc4s(host)[7] = 1;
- bfa_fcs_lport_get_attr(&bfad->bfa_fcs.fabric.bport, &port_attr);
- strncpy(symname, port_attr.port_cfg.sym_name.symname,
+ strncpy(symname, bfad->bfa_fcs.fabric.bport.port_cfg.sym_name.symname,
BFA_SYMNAME_MAXLEN);
sprintf(fc_host_symbolic_name(host), "%s", symname);
fc_host_supported_speeds(host) = bfad_im_supported_speeds(&bfad->bfa);
-
- bfa_fcport_get_attr(&bfad->bfa, &pattr);
- fc_host_maxframe_size(host) = pattr.pport_cfg.maxfrsize;
+ fc_host_maxframe_size(host) = fcport->cfg.maxfrsize;
}
static void
@@ -983,9 +983,9 @@ bfad_im_fc_rport_add(struct bfad_im_port_s *im_port, struct bfad_itnim_s *itnim)
struct bfad_itnim_data_s *itnim_data;
rport_ids.node_name =
- bfa_os_htonll(bfa_fcs_itnim_get_nwwn(&itnim->fcs_itnim));
+ cpu_to_be64(bfa_fcs_itnim_get_nwwn(&itnim->fcs_itnim));
rport_ids.port_name =
- bfa_os_htonll(bfa_fcs_itnim_get_pwwn(&itnim->fcs_itnim));
+ cpu_to_be64(bfa_fcs_itnim_get_pwwn(&itnim->fcs_itnim));
rport_ids.port_id =
bfa_os_hton3b(bfa_fcs_itnim_get_fcid(&itnim->fcs_itnim));
rport_ids.roles = FC_RPORT_ROLE_UNKNOWN;
@@ -1015,7 +1015,7 @@ bfad_im_fc_rport_add(struct bfad_im_port_s *im_port, struct bfad_itnim_s *itnim)
return;
}
-/**
+/*
* Work queue handler using FC transport service
* Context: kernel
*/
@@ -1115,11 +1115,11 @@ bfad_im_itnim_work_handler(struct work_struct *work)
spin_unlock_irqrestore(&bfad->bfad_lock, flags);
}
-/**
+/*
* Scsi_Host template entry, queue a SCSI command to the BFAD.
*/
static int
-bfad_im_queuecommand(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *))
+bfad_im_queuecommand_lck(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *))
{
struct bfad_im_port_s *im_port =
(struct bfad_im_port_s *) cmnd->device->host->hostdata[0];
@@ -1186,6 +1186,8 @@ out_fail_cmd:
return 0;
}
+static DEF_SCSI_QCMD(bfad_im_queuecommand)
+
void
bfad_os_rport_online_wait(struct bfad_s *bfad)
{
diff --git a/drivers/scsi/bfa/bfi.h b/drivers/scsi/bfa/bfi.h
index 85f2224a5733..58796d1284b7 100644
--- a/drivers/scsi/bfa/bfi.h
+++ b/drivers/scsi/bfa/bfi.h
@@ -23,7 +23,7 @@
#pragma pack(1)
-/**
+/*
* BFI FW image type
*/
#define BFI_FLASH_CHUNK_SZ 256 /* Flash chunk size */
@@ -35,7 +35,7 @@ enum {
BFI_IMAGE_MAX,
};
-/**
+/*
* Msg header common to all msgs
*/
struct bfi_mhdr_s {
@@ -68,7 +68,7 @@ struct bfi_mhdr_s {
#define BFI_I2H_OPCODE_BASE 128
#define BFA_I2HM(_x) ((_x) + BFI_I2H_OPCODE_BASE)
-/**
+/*
****************************************************************************
*
* Scatter Gather Element and Page definition
@@ -79,7 +79,7 @@ struct bfi_mhdr_s {
#define BFI_SGE_INLINE 1
#define BFI_SGE_INLINE_MAX (BFI_SGE_INLINE + 1)
-/**
+/*
* SG Flags
*/
enum {
@@ -90,7 +90,7 @@ enum {
BFI_SGE_PGDLEN = 2, /* cumulative data length for page */
};
-/**
+/*
* DMA addresses
*/
union bfi_addr_u {
@@ -100,7 +100,7 @@ union bfi_addr_u {
} a32;
};
-/**
+/*
* Scatter Gather Element
*/
struct bfi_sge_s {
@@ -116,7 +116,7 @@ struct bfi_sge_s {
union bfi_addr_u sga;
};
-/**
+/*
* Scatter Gather Page
*/
#define BFI_SGPG_DATA_SGES 7
@@ -139,7 +139,7 @@ struct bfi_msg_s {
u32 pl[BFI_LMSG_PL_WSZ];
};
-/**
+/*
* Mailbox message structure
*/
#define BFI_MBMSG_SZ 7
@@ -148,7 +148,7 @@ struct bfi_mbmsg_s {
u32 pl[BFI_MBMSG_SZ];
};
-/**
+/*
* Message Classes
*/
enum bfi_mclass {
@@ -186,7 +186,7 @@ enum bfi_mclass {
#define BFI_BOOT_LOADER_BIOS 1
#define BFI_BOOT_LOADER_UEFI 2
-/**
+/*
*----------------------------------------------------------------------
* IOC
*----------------------------------------------------------------------
@@ -208,7 +208,7 @@ enum bfi_ioc_i2h_msgs {
BFI_IOC_I2H_HBEAT = BFA_I2HM(5),
};
-/**
+/*
* BFI_IOC_H2I_GETATTR_REQ message
*/
struct bfi_ioc_getattr_req_s {
@@ -242,7 +242,7 @@ struct bfi_ioc_attr_s {
u32 card_type; /* card type */
};
-/**
+/*
* BFI_IOC_I2H_GETATTR_REPLY message
*/
struct bfi_ioc_getattr_reply_s {
@@ -251,19 +251,19 @@ struct bfi_ioc_getattr_reply_s {
u8 rsvd[3];
};
-/**
+/*
* Firmware memory page offsets
*/
#define BFI_IOC_SMEM_PG0_CB (0x40)
#define BFI_IOC_SMEM_PG0_CT (0x180)
-/**
+/*
* Firmware statistic offset
*/
#define BFI_IOC_FWSTATS_OFF (0x6B40)
#define BFI_IOC_FWSTATS_SZ (4096)
-/**
+/*
* Firmware trace offset
*/
#define BFI_IOC_TRC_OFF (0x4b00)
@@ -280,7 +280,7 @@ struct bfi_ioc_image_hdr_s {
u32 md5sum[BFI_IOC_MD5SUM_SZ];
};
-/**
+/*
* BFI_IOC_I2H_READY_EVENT message
*/
struct bfi_ioc_rdy_event_s {
@@ -294,7 +294,7 @@ struct bfi_ioc_hbeat_s {
u32 hb_count; /* current heart beat count */
};
-/**
+/*
* IOC hardware/firmware state
*/
enum bfi_ioc_state {
@@ -340,7 +340,7 @@ enum {
((__adap_type) & (BFI_ADAPTER_TTV | BFI_ADAPTER_PROTO | \
BFI_ADAPTER_UNSUPP))
-/**
+/*
* BFI_IOC_H2I_ENABLE_REQ & BFI_IOC_H2I_DISABLE_REQ messages
*/
struct bfi_ioc_ctrl_req_s {
@@ -352,7 +352,7 @@ struct bfi_ioc_ctrl_req_s {
#define bfi_ioc_enable_req_t struct bfi_ioc_ctrl_req_s;
#define bfi_ioc_disable_req_t struct bfi_ioc_ctrl_req_s;
-/**
+/*
* BFI_IOC_I2H_ENABLE_REPLY & BFI_IOC_I2H_DISABLE_REPLY messages
*/
struct bfi_ioc_ctrl_reply_s {
@@ -364,7 +364,7 @@ struct bfi_ioc_ctrl_reply_s {
#define bfi_ioc_disable_reply_t struct bfi_ioc_ctrl_reply_s;
#define BFI_IOC_MSGSZ 8
-/**
+/*
* H2I Messages
*/
union bfi_ioc_h2i_msg_u {
@@ -375,7 +375,7 @@ union bfi_ioc_h2i_msg_u {
u32 mboxmsg[BFI_IOC_MSGSZ];
};
-/**
+/*
* I2H Messages
*/
union bfi_ioc_i2h_msg_u {
@@ -385,7 +385,7 @@ union bfi_ioc_i2h_msg_u {
};
-/**
+/*
*----------------------------------------------------------------------
* PBC
*----------------------------------------------------------------------
@@ -394,7 +394,7 @@ union bfi_ioc_i2h_msg_u {
#define BFI_PBC_MAX_BLUNS 8
#define BFI_PBC_MAX_VPORTS 16
-/**
+/*
* PBC boot lun configuration
*/
struct bfi_pbc_blun_s {
@@ -402,7 +402,7 @@ struct bfi_pbc_blun_s {
lun_t tgt_lun;
};
-/**
+/*
* PBC virtual port configuration
*/
struct bfi_pbc_vport_s {
@@ -410,7 +410,7 @@ struct bfi_pbc_vport_s {
wwn_t vp_nwwn;
};
-/**
+/*
* BFI pre-boot configuration information
*/
struct bfi_pbc_s {
@@ -427,7 +427,7 @@ struct bfi_pbc_s {
struct bfi_pbc_vport_s vport[BFI_PBC_MAX_VPORTS];
};
-/**
+/*
*----------------------------------------------------------------------
* MSGQ
*----------------------------------------------------------------------
@@ -531,7 +531,7 @@ enum bfi_port_i2h {
BFI_PORT_I2H_CLEAR_STATS_RSP = BFA_I2HM(4),
};
-/**
+/*
* Generic REQ type
*/
struct bfi_port_generic_req_s {
@@ -540,7 +540,7 @@ struct bfi_port_generic_req_s {
u32 rsvd;
};
-/**
+/*
* Generic RSP type
*/
struct bfi_port_generic_rsp_s {
@@ -550,7 +550,7 @@ struct bfi_port_generic_rsp_s {
u32 msgtag; /* msgtag for reply */
};
-/**
+/*
* BFI_PORT_H2I_GET_STATS_REQ
*/
struct bfi_port_get_stats_req_s {
diff --git a/drivers/scsi/bfa/bfi_ms.h b/drivers/scsi/bfa/bfi_ms.h
index 69ac85f9e938..fa9f6fb9d45b 100644
--- a/drivers/scsi/bfa/bfi_ms.h
+++ b/drivers/scsi/bfa/bfi_ms.h
@@ -41,7 +41,7 @@ struct bfi_iocfc_cfg_s {
u16 rsvd_1;
u32 endian_sig; /* endian signature of host */
- /**
+ /*
* Request and response circular queue base addresses, size and
* shadow index pointers.
*/
@@ -58,7 +58,7 @@ struct bfi_iocfc_cfg_s {
struct bfa_iocfc_intr_attr_s intr_attr; /* IOC interrupt attributes */
};
-/**
+/*
* Boot target wwn information for this port. This contains either the stored
* or discovered boot target port wwns for the port.
*/
@@ -75,7 +75,7 @@ struct bfi_iocfc_cfgrsp_s {
struct bfi_pbc_s pbc_cfg;
};
-/**
+/*
* BFI_IOCFC_H2I_CFG_REQ message
*/
struct bfi_iocfc_cfg_req_s {
@@ -84,7 +84,7 @@ struct bfi_iocfc_cfg_req_s {
};
-/**
+/*
* BFI_IOCFC_I2H_CFG_REPLY message
*/
struct bfi_iocfc_cfg_reply_s {
@@ -95,7 +95,7 @@ struct bfi_iocfc_cfg_reply_s {
};
-/**
+/*
* BFI_IOCFC_H2I_SET_INTR_REQ message
*/
struct bfi_iocfc_set_intr_req_s {
@@ -107,7 +107,7 @@ struct bfi_iocfc_set_intr_req_s {
};
-/**
+/*
* BFI_IOCFC_H2I_UPDATEQ_REQ message
*/
struct bfi_iocfc_updateq_req_s {
@@ -119,7 +119,7 @@ struct bfi_iocfc_updateq_req_s {
};
-/**
+/*
* BFI_IOCFC_I2H_UPDATEQ_RSP message
*/
struct bfi_iocfc_updateq_rsp_s {
@@ -129,7 +129,7 @@ struct bfi_iocfc_updateq_rsp_s {
};
-/**
+/*
* H2I Messages
*/
union bfi_iocfc_h2i_msg_u {
@@ -140,7 +140,7 @@ union bfi_iocfc_h2i_msg_u {
};
-/**
+/*
* I2H Messages
*/
union bfi_iocfc_i2h_msg_u {
@@ -173,7 +173,7 @@ enum bfi_fcport_i2h {
};
-/**
+/*
* Generic REQ type
*/
struct bfi_fcport_req_s {
@@ -181,7 +181,7 @@ struct bfi_fcport_req_s {
u32 msgtag; /* msgtag for reply */
};
-/**
+/*
* Generic RSP type
*/
struct bfi_fcport_rsp_s {
@@ -191,7 +191,7 @@ struct bfi_fcport_rsp_s {
u32 msgtag; /* msgtag for reply */
};
-/**
+/*
* BFI_FCPORT_H2I_ENABLE_REQ
*/
struct bfi_fcport_enable_req_s {
@@ -205,7 +205,7 @@ struct bfi_fcport_enable_req_s {
u32 rsvd2;
};
-/**
+/*
* BFI_FCPORT_H2I_SET_SVC_PARAMS_REQ
*/
struct bfi_fcport_set_svc_params_req_s {
@@ -214,7 +214,7 @@ struct bfi_fcport_set_svc_params_req_s {
u16 rsvd;
};
-/**
+/*
* BFI_FCPORT_I2H_EVENT
*/
struct bfi_fcport_event_s {
@@ -222,7 +222,7 @@ struct bfi_fcport_event_s {
struct bfa_port_link_s link_state;
};
-/**
+/*
* BFI_FCPORT_I2H_TRUNK_SCN
*/
struct bfi_fcport_trunk_link_s {
@@ -243,7 +243,7 @@ struct bfi_fcport_trunk_scn_s {
struct bfi_fcport_trunk_link_s tlink[BFI_FCPORT_MAX_LINKS];
};
-/**
+/*
* fcport H2I message
*/
union bfi_fcport_h2i_msg_u {
@@ -255,7 +255,7 @@ union bfi_fcport_h2i_msg_u {
struct bfi_fcport_req_s *pstatsclear;
};
-/**
+/*
* fcport I2H message
*/
union bfi_fcport_i2h_msg_u {
@@ -279,7 +279,7 @@ enum bfi_fcxp_i2h {
#define BFA_FCXP_MAX_SGES 2
-/**
+/*
* FCXP send request structure
*/
struct bfi_fcxp_send_req_s {
@@ -299,7 +299,7 @@ struct bfi_fcxp_send_req_s {
struct bfi_sge_s rsp_sge[BFA_FCXP_MAX_SGES]; /* response buf */
};
-/**
+/*
* FCXP send response structure
*/
struct bfi_fcxp_send_rsp_s {
@@ -565,14 +565,14 @@ enum bfi_ioim_i2h {
BFI_IOIM_I2H_IOABORT_RSP = BFA_I2HM(2), /* ABORT rsp */
};
-/**
+/*
* IO command DIF info
*/
struct bfi_ioim_dif_s {
u32 dif_info[4];
};
-/**
+/*
* FCP IO messages overview
*
* @note
@@ -587,7 +587,7 @@ struct bfi_ioim_req_s {
u16 rport_hdl; /* itnim/rport firmware handle */
struct fcp_cmnd_s cmnd; /* IO request info */
- /**
+ /*
* SG elements array within the IO request must be double word
* aligned. This aligment is required to optimize SGM setup for the IO.
*/
@@ -598,7 +598,7 @@ struct bfi_ioim_req_s {
struct bfi_ioim_dif_s dif;
};
-/**
+/*
* This table shows various IO status codes from firmware and their
* meaning. Host driver can use these status codes to further process
* IO completions.
@@ -684,7 +684,7 @@ enum bfi_ioim_status {
};
#define BFI_IOIM_SNSLEN (256)
-/**
+/*
* I/O response message
*/
struct bfi_ioim_rsp_s {
@@ -746,7 +746,7 @@ enum bfi_tskim_status {
BFI_TSKIM_STS_NOT_SUPP = 4,
BFI_TSKIM_STS_FAILED = 5,
- /**
+ /*
* Defined by BFA
*/
BFI_TSKIM_STS_TIMEOUT = 10, /* TM request timedout */
diff --git a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
index 99f2b8c5dd63..8c04fada710b 100644
--- a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
+++ b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
@@ -692,6 +692,9 @@ static void do_act_open_rpl(struct cxgbi_device *cdev, struct sk_buff *skb)
&csk->daddr.sin_addr.s_addr, ntohs(csk->daddr.sin_port),
atid, tid, status, csk, csk->state, csk->flags);
+ if (status == CPL_ERR_RTX_NEG_ADVICE)
+ goto rel_skb;
+
if (status && status != CPL_ERR_TCAM_FULL &&
status != CPL_ERR_CONN_EXIST &&
status != CPL_ERR_ARP_MISS)
diff --git a/drivers/scsi/dc395x.c b/drivers/scsi/dc395x.c
index 54f50b07dac7..8f1b5c8bf903 100644
--- a/drivers/scsi/dc395x.c
+++ b/drivers/scsi/dc395x.c
@@ -1080,7 +1080,7 @@ static void build_srb(struct scsi_cmnd *cmd, struct DeviceCtlBlk *dcb,
* and is expected to be held on return.
*
**/
-static int dc395x_queue_command(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *))
+static int dc395x_queue_command_lck(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *))
{
struct DeviceCtlBlk *dcb;
struct ScsiReqBlk *srb;
@@ -1154,6 +1154,7 @@ complete:
return 0;
}
+static DEF_SCSI_QCMD(dc395x_queue_command)
/*
* Return the disk geometry for the given SCSI device.
diff --git a/drivers/scsi/device_handler/scsi_dh_rdac.c b/drivers/scsi/device_handler/scsi_dh_rdac.c
index b9bcfa4c7d26..5be3ae15cb71 100644
--- a/drivers/scsi/device_handler/scsi_dh_rdac.c
+++ b/drivers/scsi/device_handler/scsi_dh_rdac.c
@@ -773,6 +773,8 @@ static const struct scsi_dh_devlist rdac_dev_list[] = {
{"ENGENIO", "INF-01-00"},
{"STK", "FLEXLINE 380"},
{"SUN", "CSM100_R_FC"},
+ {"SUN", "STK6580_6780"},
+ {"SUN", "SUN_6180"},
{NULL, NULL},
};
diff --git a/drivers/scsi/dpt_i2o.c b/drivers/scsi/dpt_i2o.c
index 23dec0063385..cffcb108ac96 100644
--- a/drivers/scsi/dpt_i2o.c
+++ b/drivers/scsi/dpt_i2o.c
@@ -423,7 +423,7 @@ static int adpt_slave_configure(struct scsi_device * device)
return 0;
}
-static int adpt_queue(struct scsi_cmnd * cmd, void (*done) (struct scsi_cmnd *))
+static int adpt_queue_lck(struct scsi_cmnd * cmd, void (*done) (struct scsi_cmnd *))
{
adpt_hba* pHba = NULL;
struct adpt_device* pDev = NULL; /* dpt per device information */
@@ -491,6 +491,8 @@ static int adpt_queue(struct scsi_cmnd * cmd, void (*done) (struct scsi_cmnd *))
return adpt_scsi_to_i2o(pHba, cmd, pDev);
}
+static DEF_SCSI_QCMD(adpt_queue)
+
static int adpt_bios_param(struct scsi_device *sdev, struct block_device *dev,
sector_t capacity, int geom[])
{
diff --git a/drivers/scsi/dpti.h b/drivers/scsi/dpti.h
index 337746d46043..beded716f93f 100644
--- a/drivers/scsi/dpti.h
+++ b/drivers/scsi/dpti.h
@@ -29,7 +29,7 @@
*/
static int adpt_detect(struct scsi_host_template * sht);
-static int adpt_queue(struct scsi_cmnd * cmd, void (*cmdcomplete) (struct scsi_cmnd *));
+static int adpt_queue(struct Scsi_Host *h, struct scsi_cmnd * cmd);
static int adpt_abort(struct scsi_cmnd * cmd);
static int adpt_reset(struct scsi_cmnd* cmd);
static int adpt_release(struct Scsi_Host *host);
diff --git a/drivers/scsi/dtc.h b/drivers/scsi/dtc.h
index 0b205f8c7326..cdc621204b66 100644
--- a/drivers/scsi/dtc.h
+++ b/drivers/scsi/dtc.h
@@ -36,7 +36,7 @@ static int dtc_abort(Scsi_Cmnd *);
static int dtc_biosparam(struct scsi_device *, struct block_device *,
sector_t, int*);
static int dtc_detect(struct scsi_host_template *);
-static int dtc_queue_command(Scsi_Cmnd *, void (*done)(Scsi_Cmnd *));
+static int dtc_queue_command(struct Scsi_Host *, struct scsi_cmnd *);
static int dtc_bus_reset(Scsi_Cmnd *);
#ifndef CMD_PER_LUN
diff --git a/drivers/scsi/eata.c b/drivers/scsi/eata.c
index d1c31378f6da..53925ac178fd 100644
--- a/drivers/scsi/eata.c
+++ b/drivers/scsi/eata.c
@@ -505,8 +505,7 @@
static int eata2x_detect(struct scsi_host_template *);
static int eata2x_release(struct Scsi_Host *);
-static int eata2x_queuecommand(struct scsi_cmnd *,
- void (*done) (struct scsi_cmnd *));
+static int eata2x_queuecommand(struct Scsi_Host *, struct scsi_cmnd *);
static int eata2x_eh_abort(struct scsi_cmnd *);
static int eata2x_eh_host_reset(struct scsi_cmnd *);
static int eata2x_bios_param(struct scsi_device *, struct block_device *,
@@ -1758,7 +1757,7 @@ static void scsi_to_dev_dir(unsigned int i, struct hostdata *ha)
}
-static int eata2x_queuecommand(struct scsi_cmnd *SCpnt,
+static int eata2x_queuecommand_lck(struct scsi_cmnd *SCpnt,
void (*done) (struct scsi_cmnd *))
{
struct Scsi_Host *shost = SCpnt->device->host;
@@ -1843,6 +1842,8 @@ static int eata2x_queuecommand(struct scsi_cmnd *SCpnt,
return 0;
}
+static DEF_SCSI_QCMD(eata2x_queuecommand)
+
static int eata2x_eh_abort(struct scsi_cmnd *SCarg)
{
struct Scsi_Host *shost = SCarg->device->host;
diff --git a/drivers/scsi/eata_pio.c b/drivers/scsi/eata_pio.c
index 60886c19065e..4a9641e69f54 100644
--- a/drivers/scsi/eata_pio.c
+++ b/drivers/scsi/eata_pio.c
@@ -335,7 +335,7 @@ static inline unsigned int eata_pio_send_command(unsigned long base, unsigned ch
return 0;
}
-static int eata_pio_queue(struct scsi_cmnd *cmd,
+static int eata_pio_queue_lck(struct scsi_cmnd *cmd,
void (*done)(struct scsi_cmnd *))
{
unsigned int x, y;
@@ -438,6 +438,8 @@ static int eata_pio_queue(struct scsi_cmnd *cmd,
return 0;
}
+static DEF_SCSI_QCMD(eata_pio_queue)
+
static int eata_pio_abort(struct scsi_cmnd *cmd)
{
unsigned int loop = 100;
diff --git a/drivers/scsi/esp_scsi.c b/drivers/scsi/esp_scsi.c
index e2bc779f86c1..57558523c1b8 100644
--- a/drivers/scsi/esp_scsi.c
+++ b/drivers/scsi/esp_scsi.c
@@ -916,7 +916,7 @@ static void esp_event_queue_full(struct esp *esp, struct esp_cmd_entry *ent)
scsi_track_queue_full(dev, lp->num_tagged - 1);
}
-static int esp_queuecommand(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *))
+static int esp_queuecommand_lck(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *))
{
struct scsi_device *dev = cmd->device;
struct esp *esp = shost_priv(dev->host);
@@ -941,6 +941,8 @@ static int esp_queuecommand(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd
return 0;
}
+static DEF_SCSI_QCMD(esp_queuecommand)
+
static int esp_check_gross_error(struct esp *esp)
{
if (esp->sreg & ESP_STAT_SPAM) {
diff --git a/drivers/scsi/fcoe/fcoe.c b/drivers/scsi/fcoe/fcoe.c
index 844d618b84bd..d23a538a9dfc 100644
--- a/drivers/scsi/fcoe/fcoe.c
+++ b/drivers/scsi/fcoe/fcoe.c
@@ -117,7 +117,7 @@ static void fcoe_recv_frame(struct sk_buff *skb);
static void fcoe_get_lesb(struct fc_lport *, struct fc_els_lesb *);
-module_param_call(create, fcoe_create, NULL, (void *)FIP_MODE_AUTO, S_IWUSR);
+module_param_call(create, fcoe_create, NULL, (void *)FIP_MODE_FABRIC, S_IWUSR);
__MODULE_PARM_TYPE(create, "string");
MODULE_PARM_DESC(create, " Creates fcoe instance on a ethernet interface");
module_param_call(create_vn2vn, fcoe_create, NULL,
@@ -1243,7 +1243,6 @@ int fcoe_rcv(struct sk_buff *skb, struct net_device *netdev,
struct fcoe_interface *fcoe;
struct fc_frame_header *fh;
struct fcoe_percpu_s *fps;
- struct fcoe_port *port;
struct ethhdr *eh;
unsigned int cpu;
@@ -1262,16 +1261,7 @@ int fcoe_rcv(struct sk_buff *skb, struct net_device *netdev,
skb_tail_pointer(skb), skb_end_pointer(skb),
skb->csum, skb->dev ? skb->dev->name : "<NULL>");
- /* check for mac addresses */
eh = eth_hdr(skb);
- port = lport_priv(lport);
- if (compare_ether_addr(eh->h_dest, port->data_src_addr) &&
- compare_ether_addr(eh->h_dest, fcoe->ctlr.ctl_src_addr) &&
- compare_ether_addr(eh->h_dest, (u8[6])FC_FCOE_FLOGI_MAC)) {
- FCOE_NETDEV_DBG(netdev, "wrong destination mac address:%pM\n",
- eh->h_dest);
- goto err;
- }
if (is_fip_mode(&fcoe->ctlr) &&
compare_ether_addr(eh->h_source, fcoe->ctlr.dest_addr)) {
@@ -1291,6 +1281,12 @@ int fcoe_rcv(struct sk_buff *skb, struct net_device *netdev,
skb_set_transport_header(skb, sizeof(struct fcoe_hdr));
fh = (struct fc_frame_header *) skb_transport_header(skb);
+ if (ntoh24(&eh->h_dest[3]) != ntoh24(fh->fh_d_id)) {
+ FCOE_NETDEV_DBG(netdev, "FC frame d_id mismatch with MAC:%pM\n",
+ eh->h_dest);
+ goto err;
+ }
+
fr = fcoe_dev_from_skb(skb);
fr->fr_dev = lport;
fr->ptype = ptype;
diff --git a/drivers/scsi/fcoe/libfcoe.c b/drivers/scsi/fcoe/libfcoe.c
index aa503d83092a..bc17c7123202 100644
--- a/drivers/scsi/fcoe/libfcoe.c
+++ b/drivers/scsi/fcoe/libfcoe.c
@@ -2296,7 +2296,7 @@ static int fcoe_ctlr_vn_recv(struct fcoe_ctlr *fip, struct sk_buff *skb)
{
struct fip_header *fiph;
enum fip_vn2vn_subcode sub;
- union {
+ struct {
struct fc_rport_priv rdata;
struct fcoe_rport frport;
} buf;
diff --git a/drivers/scsi/fd_mcs.c b/drivers/scsi/fd_mcs.c
index 2ad95aa8f585..a2c6135d337e 100644
--- a/drivers/scsi/fd_mcs.c
+++ b/drivers/scsi/fd_mcs.c
@@ -1072,7 +1072,7 @@ static int fd_mcs_release(struct Scsi_Host *shpnt)
return 0;
}
-static int fd_mcs_queue(Scsi_Cmnd * SCpnt, void (*done) (Scsi_Cmnd *))
+static int fd_mcs_queue_lck(Scsi_Cmnd * SCpnt, void (*done) (Scsi_Cmnd *))
{
struct Scsi_Host *shpnt = SCpnt->device->host;
@@ -1122,6 +1122,8 @@ static int fd_mcs_queue(Scsi_Cmnd * SCpnt, void (*done) (Scsi_Cmnd *))
return 0;
}
+static DEF_SCSI_QCMD(fd_mcs_queue)
+
#if DEBUG_ABORT || DEBUG_RESET
static void fd_mcs_print_info(Scsi_Cmnd * SCpnt)
{
diff --git a/drivers/scsi/fdomain.c b/drivers/scsi/fdomain.c
index e296bcc57d5c..69b7aa54f43f 100644
--- a/drivers/scsi/fdomain.c
+++ b/drivers/scsi/fdomain.c
@@ -1419,7 +1419,7 @@ static irqreturn_t do_fdomain_16x0_intr(int irq, void *dev_id)
return IRQ_HANDLED;
}
-static int fdomain_16x0_queue(struct scsi_cmnd *SCpnt,
+static int fdomain_16x0_queue_lck(struct scsi_cmnd *SCpnt,
void (*done)(struct scsi_cmnd *))
{
if (in_command) {
@@ -1469,6 +1469,8 @@ static int fdomain_16x0_queue(struct scsi_cmnd *SCpnt,
return 0;
}
+static DEF_SCSI_QCMD(fdomain_16x0_queue)
+
#if DEBUG_ABORT
static void print_info(struct scsi_cmnd *SCpnt)
{
diff --git a/drivers/scsi/fnic/fnic.h b/drivers/scsi/fnic/fnic.h
index cbb20b13b228..92f185081e62 100644
--- a/drivers/scsi/fnic/fnic.h
+++ b/drivers/scsi/fnic/fnic.h
@@ -246,7 +246,7 @@ void fnic_set_port_id(struct fc_lport *, u32, struct fc_frame *);
void fnic_update_mac(struct fc_lport *, u8 *new);
void fnic_update_mac_locked(struct fnic *, u8 *new);
-int fnic_queuecommand(struct scsi_cmnd *, void (*done)(struct scsi_cmnd *));
+int fnic_queuecommand(struct Scsi_Host *, struct scsi_cmnd *);
int fnic_abort_cmd(struct scsi_cmnd *);
int fnic_device_reset(struct scsi_cmnd *);
int fnic_host_reset(struct scsi_cmnd *);
diff --git a/drivers/scsi/fnic/fnic_scsi.c b/drivers/scsi/fnic/fnic_scsi.c
index 198cbab3e894..22d02404d15f 100644
--- a/drivers/scsi/fnic/fnic_scsi.c
+++ b/drivers/scsi/fnic/fnic_scsi.c
@@ -349,7 +349,7 @@ static inline int fnic_queue_wq_copy_desc(struct fnic *fnic,
* Routine to send a scsi cdb
* Called with host_lock held and interrupts disabled.
*/
-int fnic_queuecommand(struct scsi_cmnd *sc, void (*done)(struct scsi_cmnd *))
+static int fnic_queuecommand_lck(struct scsi_cmnd *sc, void (*done)(struct scsi_cmnd *))
{
struct fc_lport *lp;
struct fc_rport *rport;
@@ -457,6 +457,8 @@ out:
return ret;
}
+DEF_SCSI_QCMD(fnic_queuecommand)
+
/*
* fnic_fcpio_fw_reset_cmpl_handler
* Routine to handle fw reset completion
diff --git a/drivers/scsi/g_NCR5380.h b/drivers/scsi/g_NCR5380.h
index 921764c9ab24..1bcdb7beb77b 100644
--- a/drivers/scsi/g_NCR5380.h
+++ b/drivers/scsi/g_NCR5380.h
@@ -46,7 +46,7 @@
static int generic_NCR5380_abort(Scsi_Cmnd *);
static int generic_NCR5380_detect(struct scsi_host_template *);
static int generic_NCR5380_release_resources(struct Scsi_Host *);
-static int generic_NCR5380_queue_command(Scsi_Cmnd *, void (*done)(Scsi_Cmnd *));
+static int generic_NCR5380_queue_command(struct Scsi_Host *, struct scsi_cmnd *);
static int generic_NCR5380_bus_reset(Scsi_Cmnd *);
static const char* generic_NCR5380_info(struct Scsi_Host *);
diff --git a/drivers/scsi/gdth.c b/drivers/scsi/gdth.c
index 5a3f93101017..76365700e2d5 100644
--- a/drivers/scsi/gdth.c
+++ b/drivers/scsi/gdth.c
@@ -185,7 +185,7 @@ static long gdth_unlocked_ioctl(struct file *filep, unsigned int cmd,
unsigned long arg);
static void gdth_flush(gdth_ha_str *ha);
-static int gdth_queuecommand(Scsi_Cmnd *scp,void (*done)(Scsi_Cmnd *));
+static int gdth_queuecommand(struct Scsi_Host *h, struct scsi_cmnd *cmd);
static int __gdth_queuecommand(gdth_ha_str *ha, struct scsi_cmnd *scp,
struct gdth_cmndinfo *cmndinfo);
static void gdth_scsi_done(struct scsi_cmnd *scp);
@@ -4004,7 +4004,7 @@ static int gdth_bios_param(struct scsi_device *sdev,struct block_device *bdev,se
}
-static int gdth_queuecommand(struct scsi_cmnd *scp,
+static int gdth_queuecommand_lck(struct scsi_cmnd *scp,
void (*done)(struct scsi_cmnd *))
{
gdth_ha_str *ha = shost_priv(scp->device->host);
@@ -4022,6 +4022,8 @@ static int gdth_queuecommand(struct scsi_cmnd *scp,
return __gdth_queuecommand(ha, scp, cmndinfo);
}
+static DEF_SCSI_QCMD(gdth_queuecommand)
+
static int __gdth_queuecommand(gdth_ha_str *ha, struct scsi_cmnd *scp,
struct gdth_cmndinfo *cmndinfo)
{
@@ -4177,6 +4179,14 @@ static int ioc_general(void __user *arg, char *cmnd)
ha = gdth_find_ha(gen.ionode);
if (!ha)
return -EFAULT;
+
+ if (gen.data_len > INT_MAX)
+ return -EINVAL;
+ if (gen.sense_len > INT_MAX)
+ return -EINVAL;
+ if (gen.data_len + gen.sense_len > INT_MAX)
+ return -EINVAL;
+
if (gen.data_len + gen.sense_len != 0) {
if (!(buf = gdth_ioctl_alloc(ha, gen.data_len + gen.sense_len,
FALSE, &paddr)))
diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
index c5d0606ad097..b2fb2b2a6e70 100644
--- a/drivers/scsi/hpsa.c
+++ b/drivers/scsi/hpsa.c
@@ -31,7 +31,6 @@
#include <linux/seq_file.h>
#include <linux/init.h>
#include <linux/spinlock.h>
-#include <linux/smp_lock.h>
#include <linux/compat.h>
#include <linux/blktrace_api.h>
#include <linux/uaccess.h>
@@ -143,8 +142,7 @@ static void fill_cmd(struct CommandList *c, u8 cmd, struct ctlr_info *h,
void *buff, size_t size, u8 page_code, unsigned char *scsi3addr,
int cmd_type);
-static int hpsa_scsi_queue_command(struct scsi_cmnd *cmd,
- void (*done)(struct scsi_cmnd *));
+static int hpsa_scsi_queue_command(struct Scsi_Host *h, struct scsi_cmnd *cmd);
static void hpsa_scan_start(struct Scsi_Host *);
static int hpsa_scan_finished(struct Scsi_Host *sh,
unsigned long elapsed_time);
@@ -1926,7 +1924,7 @@ sglist_finished:
}
-static int hpsa_scsi_queue_command(struct scsi_cmnd *cmd,
+static int hpsa_scsi_queue_command_lck(struct scsi_cmnd *cmd,
void (*done)(struct scsi_cmnd *))
{
struct ctlr_info *h;
@@ -2020,6 +2018,8 @@ static int hpsa_scsi_queue_command(struct scsi_cmnd *cmd,
return 0;
}
+static DEF_SCSI_QCMD(hpsa_scsi_queue_command)
+
static void hpsa_scan_start(struct Scsi_Host *sh)
{
struct ctlr_info *h = shost_to_hba(sh);
diff --git a/drivers/scsi/hptiop.c b/drivers/scsi/hptiop.c
index 0729f150b33a..10b65556937b 100644
--- a/drivers/scsi/hptiop.c
+++ b/drivers/scsi/hptiop.c
@@ -751,7 +751,7 @@ static void hptiop_post_req_mv(struct hptiop_hba *hba,
MVIOP_MU_QUEUE_ADDR_HOST_BIT | size_bit, hba);
}
-static int hptiop_queuecommand(struct scsi_cmnd *scp,
+static int hptiop_queuecommand_lck(struct scsi_cmnd *scp,
void (*done)(struct scsi_cmnd *))
{
struct Scsi_Host *host = scp->device->host;
@@ -819,6 +819,8 @@ cmd_done:
return 0;
}
+static DEF_SCSI_QCMD(hptiop_queuecommand)
+
static const char *hptiop_info(struct Scsi_Host *host)
{
return driver_name_long;
diff --git a/drivers/scsi/ibmmca.c b/drivers/scsi/ibmmca.c
index 9a4b69d4f4eb..67fc8ffd52e6 100644
--- a/drivers/scsi/ibmmca.c
+++ b/drivers/scsi/ibmmca.c
@@ -39,7 +39,7 @@
#include <scsi/scsi_host.h>
/* Common forward declarations for all Linux-versions: */
-static int ibmmca_queuecommand (Scsi_Cmnd *, void (*done) (Scsi_Cmnd *));
+static int ibmmca_queuecommand (struct Scsi_Host *, struct scsi_cmnd *);
static int ibmmca_abort (Scsi_Cmnd *);
static int ibmmca_host_reset (Scsi_Cmnd *);
static int ibmmca_biosparam (struct scsi_device *, struct block_device *, sector_t, int *);
@@ -1691,7 +1691,7 @@ static int __devexit ibmmca_remove(struct device *dev)
}
/* The following routine is the SCSI command queue for the midlevel driver */
-static int ibmmca_queuecommand(Scsi_Cmnd * cmd, void (*done) (Scsi_Cmnd *))
+static int ibmmca_queuecommand_lck(Scsi_Cmnd * cmd, void (*done) (Scsi_Cmnd *))
{
unsigned int ldn;
unsigned int scsi_cmd;
@@ -1996,6 +1996,8 @@ static int ibmmca_queuecommand(Scsi_Cmnd * cmd, void (*done) (Scsi_Cmnd *))
return 0;
}
+static DEF_SCSI_QCMD(ibmmca_queuecommand)
+
static int __ibmmca_abort(Scsi_Cmnd * cmd)
{
/* Abort does not work, as the adapter never generates an interrupt on
diff --git a/drivers/scsi/ibmvscsi/ibmvfc.c b/drivers/scsi/ibmvscsi/ibmvfc.c
index 00d08b25425f..57cad7e20caa 100644
--- a/drivers/scsi/ibmvscsi/ibmvfc.c
+++ b/drivers/scsi/ibmvscsi/ibmvfc.c
@@ -1606,7 +1606,7 @@ static inline int ibmvfc_host_chkready(struct ibmvfc_host *vhost)
* Returns:
* 0 on success / other on failure
**/
-static int ibmvfc_queuecommand(struct scsi_cmnd *cmnd,
+static int ibmvfc_queuecommand_lck(struct scsi_cmnd *cmnd,
void (*done) (struct scsi_cmnd *))
{
struct ibmvfc_host *vhost = shost_priv(cmnd->device->host);
@@ -1672,6 +1672,8 @@ static int ibmvfc_queuecommand(struct scsi_cmnd *cmnd,
return 0;
}
+static DEF_SCSI_QCMD(ibmvfc_queuecommand)
+
/**
* ibmvfc_sync_completion - Signal that a synchronous command has completed
* @evt: ibmvfc event struct
diff --git a/drivers/scsi/ibmvscsi/ibmvscsi.c b/drivers/scsi/ibmvscsi/ibmvscsi.c
index 67f78a470f5f..041958453e2a 100644
--- a/drivers/scsi/ibmvscsi/ibmvscsi.c
+++ b/drivers/scsi/ibmvscsi/ibmvscsi.c
@@ -713,7 +713,7 @@ static inline u16 lun_from_dev(struct scsi_device *dev)
* @cmd: struct scsi_cmnd to be executed
* @done: Callback function to be called when cmd is completed
*/
-static int ibmvscsi_queuecommand(struct scsi_cmnd *cmnd,
+static int ibmvscsi_queuecommand_lck(struct scsi_cmnd *cmnd,
void (*done) (struct scsi_cmnd *))
{
struct srp_cmd *srp_cmd;
@@ -766,6 +766,8 @@ static int ibmvscsi_queuecommand(struct scsi_cmnd *cmnd,
return ibmvscsi_send_srp_event(evt_struct, hostdata, 0);
}
+static DEF_SCSI_QCMD(ibmvscsi_queuecommand)
+
/* ------------------------------------------------------------
* Routines for driver initialization
*/
diff --git a/drivers/scsi/imm.c b/drivers/scsi/imm.c
index 4734ab0b3ff6..99aa0e5699bc 100644
--- a/drivers/scsi/imm.c
+++ b/drivers/scsi/imm.c
@@ -926,7 +926,7 @@ static int imm_engine(imm_struct *dev, struct scsi_cmnd *cmd)
return 0;
}
-static int imm_queuecommand(struct scsi_cmnd *cmd,
+static int imm_queuecommand_lck(struct scsi_cmnd *cmd,
void (*done)(struct scsi_cmnd *))
{
imm_struct *dev = imm_dev(cmd->device->host);
@@ -949,6 +949,8 @@ static int imm_queuecommand(struct scsi_cmnd *cmd,
return 0;
}
+static DEF_SCSI_QCMD(imm_queuecommand)
+
/*
* Apparently the disk->capacity attribute is off by 1 sector
* for all disk drives. We add the one here, but it should really
diff --git a/drivers/scsi/in2000.c b/drivers/scsi/in2000.c
index 52bdc6df6b92..6568aab745a0 100644
--- a/drivers/scsi/in2000.c
+++ b/drivers/scsi/in2000.c
@@ -334,7 +334,7 @@ static uchar calc_sync_xfer(unsigned int period, unsigned int offset)
static void in2000_execute(struct Scsi_Host *instance);
-static int in2000_queuecommand(Scsi_Cmnd * cmd, void (*done) (Scsi_Cmnd *))
+static int in2000_queuecommand_lck(Scsi_Cmnd * cmd, void (*done) (Scsi_Cmnd *))
{
struct Scsi_Host *instance;
struct IN2000_hostdata *hostdata;
@@ -431,6 +431,8 @@ static int in2000_queuecommand(Scsi_Cmnd * cmd, void (*done) (Scsi_Cmnd *))
return 0;
}
+static DEF_SCSI_QCMD(in2000_queuecommand)
+
/*
diff --git a/drivers/scsi/in2000.h b/drivers/scsi/in2000.h
index 0fb8b06b8392..5821e1fbce08 100644
--- a/drivers/scsi/in2000.h
+++ b/drivers/scsi/in2000.h
@@ -396,7 +396,7 @@ struct IN2000_hostdata {
flags)
static int in2000_detect(struct scsi_host_template *) in2000__INIT;
-static int in2000_queuecommand(Scsi_Cmnd *, void (*done)(Scsi_Cmnd *));
+static int in2000_queuecommand(struct Scsi_Host *, struct scsi_cmnd *);
static int in2000_abort(Scsi_Cmnd *);
static void in2000_setup(char *, int *) in2000__INIT;
static int in2000_biosparam(struct scsi_device *, struct block_device *,
diff --git a/drivers/scsi/initio.c b/drivers/scsi/initio.c
index 108797761b95..9627d062e16b 100644
--- a/drivers/scsi/initio.c
+++ b/drivers/scsi/initio.c
@@ -2639,7 +2639,7 @@ static void initio_build_scb(struct initio_host * host, struct scsi_ctrl_blk * c
* will cause the mid layer to call us again later with the command)
*/
-static int i91u_queuecommand(struct scsi_cmnd *cmd,
+static int i91u_queuecommand_lck(struct scsi_cmnd *cmd,
void (*done)(struct scsi_cmnd *))
{
struct initio_host *host = (struct initio_host *) cmd->device->host->hostdata;
@@ -2656,6 +2656,8 @@ static int i91u_queuecommand(struct scsi_cmnd *cmd,
return 0;
}
+static DEF_SCSI_QCMD(i91u_queuecommand)
+
/**
* i91u_bus_reset - reset the SCSI bus
* @cmnd: Command block we want to trigger the reset for
diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c
index df9a12c8b373..5bbaee597e88 100644
--- a/drivers/scsi/ipr.c
+++ b/drivers/scsi/ipr.c
@@ -5709,7 +5709,7 @@ static void ipr_scsi_done(struct ipr_cmnd *ipr_cmd)
* SCSI_MLQUEUE_DEVICE_BUSY if device is busy
* SCSI_MLQUEUE_HOST_BUSY if host is busy
**/
-static int ipr_queuecommand(struct scsi_cmnd *scsi_cmd,
+static int ipr_queuecommand_lck(struct scsi_cmnd *scsi_cmd,
void (*done) (struct scsi_cmnd *))
{
struct ipr_ioa_cfg *ioa_cfg;
@@ -5792,6 +5792,8 @@ static int ipr_queuecommand(struct scsi_cmnd *scsi_cmd,
return 0;
}
+static DEF_SCSI_QCMD(ipr_queuecommand)
+
/**
* ipr_ioctl - IOCTL handler
* @sdev: scsi device struct
@@ -9025,6 +9027,8 @@ static struct pci_device_id ipr_pci_table[] __devinitdata = {
PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_574D, 0, 0, 0 },
{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B2, 0, 0, 0 },
+ { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
+ PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C4, 0, 0, 0 },
{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_ASIC_E2,
PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B4, 0, 0, 0 },
{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_ASIC_E2,
diff --git a/drivers/scsi/ipr.h b/drivers/scsi/ipr.h
index aa8bb2f2c6ee..b28a00f1082c 100644
--- a/drivers/scsi/ipr.h
+++ b/drivers/scsi/ipr.h
@@ -82,6 +82,7 @@
#define IPR_SUBS_DEV_ID_57B4 0x033B
#define IPR_SUBS_DEV_ID_57B2 0x035F
+#define IPR_SUBS_DEV_ID_57C4 0x0354
#define IPR_SUBS_DEV_ID_57C6 0x0357
#define IPR_SUBS_DEV_ID_57CC 0x035C
diff --git a/drivers/scsi/ips.c b/drivers/scsi/ips.c
index f83a116955f2..b2511acd39bd 100644
--- a/drivers/scsi/ips.c
+++ b/drivers/scsi/ips.c
@@ -232,7 +232,7 @@ static int ips_detect(struct scsi_host_template *);
static int ips_release(struct Scsi_Host *);
static int ips_eh_abort(struct scsi_cmnd *);
static int ips_eh_reset(struct scsi_cmnd *);
-static int ips_queue(struct scsi_cmnd *, void (*)(struct scsi_cmnd *));
+static int ips_queue(struct Scsi_Host *, struct scsi_cmnd *);
static const char *ips_info(struct Scsi_Host *);
static irqreturn_t do_ipsintr(int, void *);
static int ips_hainit(ips_ha_t *);
@@ -1046,7 +1046,7 @@ static int ips_eh_reset(struct scsi_cmnd *SC)
/* Linux obtains io_request_lock before calling this function */
/* */
/****************************************************************************/
-static int ips_queue(struct scsi_cmnd *SC, void (*done) (struct scsi_cmnd *))
+static int ips_queue_lck(struct scsi_cmnd *SC, void (*done) (struct scsi_cmnd *))
{
ips_ha_t *ha;
ips_passthru_t *pt;
@@ -1137,6 +1137,8 @@ static int ips_queue(struct scsi_cmnd *SC, void (*done) (struct scsi_cmnd *))
return (0);
}
+static DEF_SCSI_QCMD(ips_queue)
+
/****************************************************************************/
/* */
/* Routine Name: ips_biosparam */
diff --git a/drivers/scsi/libfc/fc_disc.c b/drivers/scsi/libfc/fc_disc.c
index 32f67c4b03fc..911b2736cafa 100644
--- a/drivers/scsi/libfc/fc_disc.c
+++ b/drivers/scsi/libfc/fc_disc.c
@@ -684,10 +684,9 @@ void fc_disc_stop(struct fc_lport *lport)
{
struct fc_disc *disc = &lport->disc;
- if (disc) {
+ if (disc->pending)
cancel_delayed_work_sync(&disc->disc_work);
- fc_disc_stop_rports(disc);
- }
+ fc_disc_stop_rports(disc);
}
/**
diff --git a/drivers/scsi/libfc/fc_fcp.c b/drivers/scsi/libfc/fc_fcp.c
index c797f6b48f05..2924363d142b 100644
--- a/drivers/scsi/libfc/fc_fcp.c
+++ b/drivers/scsi/libfc/fc_fcp.c
@@ -58,8 +58,7 @@ struct kmem_cache *scsi_pkt_cachep;
#define FC_SRB_WRITE (1 << 0)
/*
- * The SCp.ptr should be tested and set under the host lock. NULL indicates
- * that the command has been retruned to the scsi layer.
+ * The SCp.ptr should be tested and set under the scsi_pkt_queue lock
*/
#define CMD_SP(Cmnd) ((struct fc_fcp_pkt *)(Cmnd)->SCp.ptr)
#define CMD_ENTRY_STATUS(Cmnd) ((Cmnd)->SCp.have_data_in)
@@ -1754,7 +1753,7 @@ static inline int fc_fcp_lport_queue_ready(struct fc_lport *lport)
* This is the i/o strategy routine, called by the SCSI layer. This routine
* is called with the host_lock held.
*/
-int fc_queuecommand(struct scsi_cmnd *sc_cmd, void (*done)(struct scsi_cmnd *))
+static int fc_queuecommand_lck(struct scsi_cmnd *sc_cmd, void (*done)(struct scsi_cmnd *))
{
struct fc_lport *lport;
struct fc_rport *rport = starget_to_rport(scsi_target(sc_cmd->device));
@@ -1852,6 +1851,8 @@ out:
spin_lock_irq(lport->host->host_lock);
return rc;
}
+
+DEF_SCSI_QCMD(fc_queuecommand)
EXPORT_SYMBOL(fc_queuecommand);
/**
@@ -1880,8 +1881,6 @@ static void fc_io_compl(struct fc_fcp_pkt *fsp)
lport = fsp->lp;
si = fc_get_scsi_internal(lport);
- if (!fsp->cmd)
- return;
/*
* if can_queue ramp down is done then try can_queue ramp up
@@ -1891,11 +1890,6 @@ static void fc_io_compl(struct fc_fcp_pkt *fsp)
fc_fcp_can_queue_ramp_up(lport);
sc_cmd = fsp->cmd;
- fsp->cmd = NULL;
-
- if (!sc_cmd->SCp.ptr)
- return;
-
CMD_SCSI_STATUS(sc_cmd) = fsp->cdb_status;
switch (fsp->status_code) {
case FC_COMPLETE:
@@ -1971,15 +1965,13 @@ static void fc_io_compl(struct fc_fcp_pkt *fsp)
break;
}
- if (lport->state != LPORT_ST_READY && fsp->status_code != FC_COMPLETE) {
- sc_cmd->result = (DID_REQUEUE << 16);
- FC_FCP_DBG(fsp, "Returning DID_REQUEUE to scsi-ml\n");
- }
+ if (lport->state != LPORT_ST_READY && fsp->status_code != FC_COMPLETE)
+ sc_cmd->result = (DID_TRANSPORT_DISRUPTED << 16);
spin_lock_irqsave(&si->scsi_queue_lock, flags);
list_del(&fsp->list);
- spin_unlock_irqrestore(&si->scsi_queue_lock, flags);
sc_cmd->SCp.ptr = NULL;
+ spin_unlock_irqrestore(&si->scsi_queue_lock, flags);
sc_cmd->scsi_done(sc_cmd);
/* release ref from initial allocation in queue command */
@@ -1997,6 +1989,7 @@ int fc_eh_abort(struct scsi_cmnd *sc_cmd)
{
struct fc_fcp_pkt *fsp;
struct fc_lport *lport;
+ struct fc_fcp_internal *si;
int rc = FAILED;
unsigned long flags;
@@ -2006,7 +1999,8 @@ int fc_eh_abort(struct scsi_cmnd *sc_cmd)
else if (!lport->link_up)
return rc;
- spin_lock_irqsave(lport->host->host_lock, flags);
+ si = fc_get_scsi_internal(lport);
+ spin_lock_irqsave(&si->scsi_queue_lock, flags);
fsp = CMD_SP(sc_cmd);
if (!fsp) {
/* command completed while scsi eh was setting up */
@@ -2015,7 +2009,7 @@ int fc_eh_abort(struct scsi_cmnd *sc_cmd)
}
/* grab a ref so the fsp and sc_cmd cannot be relased from under us */
fc_fcp_pkt_hold(fsp);
- spin_unlock_irqrestore(lport->host->host_lock, flags);
+ spin_unlock_irqrestore(&si->scsi_queue_lock, flags);
if (fc_fcp_lock_pkt(fsp)) {
/* completed while we were waiting for timer to be deleted */
diff --git a/drivers/scsi/libfc/fc_lport.c b/drivers/scsi/libfc/fc_lport.c
index d9b6e11b0e88..9be63edbf8fb 100644
--- a/drivers/scsi/libfc/fc_lport.c
+++ b/drivers/scsi/libfc/fc_lport.c
@@ -1447,13 +1447,7 @@ void fc_lport_flogi_resp(struct fc_seq *sp, struct fc_frame *fp,
}
did = fc_frame_did(fp);
-
- if (!did) {
- FC_LPORT_DBG(lport, "Bad FLOGI response\n");
- goto out;
- }
-
- if (fc_frame_payload_op(fp) == ELS_LS_ACC) {
+ if (fc_frame_payload_op(fp) == ELS_LS_ACC && did) {
flp = fc_frame_payload_get(fp, sizeof(*flp));
if (flp) {
mfs = ntohs(flp->fl_csp.sp_bb_data) &
@@ -1492,8 +1486,10 @@ void fc_lport_flogi_resp(struct fc_seq *sp, struct fc_frame *fp,
fc_lport_enter_dns(lport);
}
}
- } else
+ } else {
+ FC_LPORT_DBG(lport, "FLOGI RJT or bad response\n");
fc_lport_error(lport, fp);
+ }
out:
fc_frame_free(fp);
diff --git a/drivers/scsi/libfc/fc_rport.c b/drivers/scsi/libfc/fc_rport.c
index b9f2286fe0cb..a84ef13ed74a 100644
--- a/drivers/scsi/libfc/fc_rport.c
+++ b/drivers/scsi/libfc/fc_rport.c
@@ -196,9 +196,9 @@ static const char *fc_rport_state(struct fc_rport_priv *rdata)
void fc_set_rport_loss_tmo(struct fc_rport *rport, u32 timeout)
{
if (timeout)
- rport->dev_loss_tmo = timeout + 5;
+ rport->dev_loss_tmo = timeout;
else
- rport->dev_loss_tmo = 30;
+ rport->dev_loss_tmo = 1;
}
EXPORT_SYMBOL(fc_set_rport_loss_tmo);
diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c
index 633e09036357..c15fde808c33 100644
--- a/drivers/scsi/libiscsi.c
+++ b/drivers/scsi/libiscsi.c
@@ -1599,7 +1599,7 @@ enum {
FAILURE_SESSION_NOT_READY,
};
-int iscsi_queuecommand(struct scsi_cmnd *sc, void (*done)(struct scsi_cmnd *))
+static int iscsi_queuecommand_lck(struct scsi_cmnd *sc, void (*done)(struct scsi_cmnd *))
{
struct iscsi_cls_session *cls_session;
struct Scsi_Host *host;
@@ -1736,6 +1736,8 @@ fault:
spin_lock(host->host_lock);
return 0;
}
+
+DEF_SCSI_QCMD(iscsi_queuecommand)
EXPORT_SYMBOL_GPL(iscsi_queuecommand);
int iscsi_change_queue_depth(struct scsi_device *sdev, int depth, int reason)
diff --git a/drivers/scsi/libsas/sas_scsi_host.c b/drivers/scsi/libsas/sas_scsi_host.c
index 55f09e92ab59..29251fabecc6 100644
--- a/drivers/scsi/libsas/sas_scsi_host.c
+++ b/drivers/scsi/libsas/sas_scsi_host.c
@@ -189,7 +189,7 @@ int sas_queue_up(struct sas_task *task)
* Note: XXX: Remove the host unlock/lock pair when SCSI Core can
* call us without holding an IRQ spinlock...
*/
-int sas_queuecommand(struct scsi_cmnd *cmd,
+static int sas_queuecommand_lck(struct scsi_cmnd *cmd,
void (*scsi_done)(struct scsi_cmnd *))
__releases(host->host_lock)
__acquires(dev->sata_dev.ap->lock)
@@ -254,6 +254,8 @@ out:
return res;
}
+DEF_SCSI_QCMD(sas_queuecommand)
+
static void sas_eh_finish_cmd(struct scsi_cmnd *cmd)
{
struct sas_task *task = TO_SAS_TASK(cmd);
diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
index a50aa03b8ac1..196de40b906c 100644
--- a/drivers/scsi/lpfc/lpfc.h
+++ b/drivers/scsi/lpfc/lpfc.h
@@ -202,9 +202,12 @@ struct lpfc_stats {
uint32_t elsRcvPRLO;
uint32_t elsRcvPRLI;
uint32_t elsRcvLIRR;
+ uint32_t elsRcvRLS;
uint32_t elsRcvRPS;
uint32_t elsRcvRPL;
uint32_t elsRcvRRQ;
+ uint32_t elsRcvRTV;
+ uint32_t elsRcvECHO;
uint32_t elsXmitFLOGI;
uint32_t elsXmitFDISC;
uint32_t elsXmitPLOGI;
@@ -549,9 +552,11 @@ struct lpfc_hba {
#define ELS_XRI_ABORT_EVENT 0x40
#define ASYNC_EVENT 0x80
#define LINK_DISABLED 0x100 /* Link disabled by user */
-#define FCF_DISC_INPROGRESS 0x200 /* FCF discovery in progress */
-#define HBA_FIP_SUPPORT 0x400 /* FIP support in HBA */
-#define HBA_AER_ENABLED 0x800 /* AER enabled with HBA */
+#define FCF_TS_INPROG 0x200 /* FCF table scan in progress */
+#define FCF_RR_INPROG 0x400 /* FCF roundrobin flogi in progress */
+#define HBA_FIP_SUPPORT 0x800 /* FIP support in HBA */
+#define HBA_AER_ENABLED 0x1000 /* AER enabled with HBA */
+#define HBA_DEVLOSS_TMO 0x2000 /* HBA in devloss timeout */
uint32_t fcp_ring_in_use; /* When polling test if intr-hndlr active*/
struct lpfc_dmabuf slim2p;
@@ -573,6 +578,7 @@ struct lpfc_hba {
/* These fields used to be binfo */
uint32_t fc_pref_DID; /* preferred D_ID */
uint8_t fc_pref_ALPA; /* preferred AL_PA */
+ uint32_t fc_edtovResol; /* E_D_TOV timer resolution */
uint32_t fc_edtov; /* E_D_TOV timer value */
uint32_t fc_arbtov; /* ARB_TOV timer value */
uint32_t fc_ratov; /* R_A_TOV timer value */
diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c
index f681eea57730..c1cbec01345d 100644
--- a/drivers/scsi/lpfc/lpfc_attr.c
+++ b/drivers/scsi/lpfc/lpfc_attr.c
@@ -3789,8 +3789,13 @@ sysfs_mbox_read(struct file *filp, struct kobject *kobj,
break;
case MBX_SECURITY_MGMT:
case MBX_AUTH_PORT:
- if (phba->pci_dev_grp == LPFC_PCI_DEV_OC)
+ if (phba->pci_dev_grp == LPFC_PCI_DEV_OC) {
+ printk(KERN_WARNING "mbox_read:Command 0x%x "
+ "is not permitted\n", pmb->mbxCommand);
+ sysfs_mbox_idle(phba);
+ spin_unlock_irq(&phba->hbalock);
return -EPERM;
+ }
break;
case MBX_READ_SPARM64:
case MBX_READ_LA:
diff --git a/drivers/scsi/lpfc/lpfc_bsg.c b/drivers/scsi/lpfc/lpfc_bsg.c
index f5d60b55f53a..7260c3af555a 100644
--- a/drivers/scsi/lpfc/lpfc_bsg.c
+++ b/drivers/scsi/lpfc/lpfc_bsg.c
@@ -3142,12 +3142,12 @@ lpfc_bsg_menlo_cmd_cmp(struct lpfc_hba *phba,
job = menlo->set_job;
job->dd_data = NULL; /* so timeout handler does not reply */
- spin_lock_irqsave(&phba->hbalock, flags);
+ spin_lock(&phba->hbalock);
cmdiocbq->iocb_flag |= LPFC_IO_WAKE;
if (cmdiocbq->context2 && rspiocbq)
memcpy(&((struct lpfc_iocbq *)cmdiocbq->context2)->iocb,
&rspiocbq->iocb, sizeof(IOCB_t));
- spin_unlock_irqrestore(&phba->hbalock, flags);
+ spin_unlock(&phba->hbalock);
bmp = menlo->bmp;
rspiocbq = menlo->rspiocbq;
diff --git a/drivers/scsi/lpfc/lpfc_crtn.h b/drivers/scsi/lpfc/lpfc_crtn.h
index 03f4ddc18572..a5f5a093a8a4 100644
--- a/drivers/scsi/lpfc/lpfc_crtn.h
+++ b/drivers/scsi/lpfc/lpfc_crtn.h
@@ -44,6 +44,8 @@ int lpfc_reg_rpi(struct lpfc_hba *, uint16_t, uint32_t, uint8_t *,
void lpfc_set_var(struct lpfc_hba *, LPFC_MBOXQ_t *, uint32_t, uint32_t);
void lpfc_unreg_login(struct lpfc_hba *, uint16_t, uint32_t, LPFC_MBOXQ_t *);
void lpfc_unreg_did(struct lpfc_hba *, uint16_t, uint32_t, LPFC_MBOXQ_t *);
+void lpfc_sli4_unreg_all_rpis(struct lpfc_vport *);
+
void lpfc_reg_vpi(struct lpfc_vport *, LPFC_MBOXQ_t *);
void lpfc_register_new_vport(struct lpfc_hba *, struct lpfc_vport *,
struct lpfc_nodelist *);
@@ -229,6 +231,7 @@ void lpfc_sli4_fcf_dead_failthrough(struct lpfc_hba *);
uint16_t lpfc_sli4_fcf_rr_next_index_get(struct lpfc_hba *);
int lpfc_sli4_fcf_rr_index_set(struct lpfc_hba *, uint16_t);
void lpfc_sli4_fcf_rr_index_clear(struct lpfc_hba *, uint16_t);
+int lpfc_sli4_fcf_rr_next_proc(struct lpfc_vport *, uint16_t);
int lpfc_mem_alloc(struct lpfc_hba *, int align);
void lpfc_mem_free(struct lpfc_hba *);
@@ -271,6 +274,7 @@ int lpfc_sli_issue_iocb(struct lpfc_hba *, uint32_t,
void lpfc_sli_pcimem_bcopy(void *, void *, uint32_t);
void lpfc_sli_bemem_bcopy(void *, void *, uint32_t);
void lpfc_sli_abort_iocb_ring(struct lpfc_hba *, struct lpfc_sli_ring *);
+void lpfc_sli_hba_iocb_abort(struct lpfc_hba *);
void lpfc_sli_flush_fcp_rings(struct lpfc_hba *);
int lpfc_sli_ringpostbuf_put(struct lpfc_hba *, struct lpfc_sli_ring *,
struct lpfc_dmabuf *);
diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c
index e6ca12f6c6cb..884f4d321799 100644
--- a/drivers/scsi/lpfc/lpfc_els.c
+++ b/drivers/scsi/lpfc/lpfc_els.c
@@ -177,15 +177,18 @@ lpfc_prep_els_iocb(struct lpfc_vport *vport, uint8_t expectRsp,
(elscmd == ELS_CMD_LOGO)))
switch (elscmd) {
case ELS_CMD_FLOGI:
- elsiocb->iocb_flag |= ((ELS_ID_FLOGI << LPFC_FIP_ELS_ID_SHIFT)
+ elsiocb->iocb_flag |=
+ ((LPFC_ELS_ID_FLOGI << LPFC_FIP_ELS_ID_SHIFT)
& LPFC_FIP_ELS_ID_MASK);
break;
case ELS_CMD_FDISC:
- elsiocb->iocb_flag |= ((ELS_ID_FDISC << LPFC_FIP_ELS_ID_SHIFT)
+ elsiocb->iocb_flag |=
+ ((LPFC_ELS_ID_FDISC << LPFC_FIP_ELS_ID_SHIFT)
& LPFC_FIP_ELS_ID_MASK);
break;
case ELS_CMD_LOGO:
- elsiocb->iocb_flag |= ((ELS_ID_LOGO << LPFC_FIP_ELS_ID_SHIFT)
+ elsiocb->iocb_flag |=
+ ((LPFC_ELS_ID_LOGO << LPFC_FIP_ELS_ID_SHIFT)
& LPFC_FIP_ELS_ID_MASK);
break;
}
@@ -517,18 +520,13 @@ lpfc_cmpl_els_flogi_fabric(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
if (sp->cmn.edtovResolution) /* E_D_TOV ticks are in nanoseconds */
phba->fc_edtov = (phba->fc_edtov + 999999) / 1000000;
+ phba->fc_edtovResol = sp->cmn.edtovResolution;
phba->fc_ratov = (be32_to_cpu(sp->cmn.w2.r_a_tov) + 999) / 1000;
if (phba->fc_topology == TOPOLOGY_LOOP) {
spin_lock_irq(shost->host_lock);
vport->fc_flag |= FC_PUBLIC_LOOP;
spin_unlock_irq(shost->host_lock);
- } else {
- /*
- * If we are a N-port connected to a Fabric, fixup sparam's so
- * logins to devices on remote loops work.
- */
- vport->fc_sparam.cmn.altBbCredit = 1;
}
vport->fc_myDID = irsp->un.ulpWord[4] & Mask_DID;
@@ -585,6 +583,10 @@ lpfc_cmpl_els_flogi_fabric(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
lpfc_unreg_rpi(vport, np);
}
lpfc_cleanup_pending_mbox(vport);
+
+ if (phba->sli_rev == LPFC_SLI_REV4)
+ lpfc_sli4_unreg_all_rpis(vport);
+
if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) {
lpfc_mbx_unreg_vpi(vport);
spin_lock_irq(shost->host_lock);
@@ -800,7 +802,7 @@ lpfc_cmpl_els_flogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
if (irsp->ulpStatus) {
/*
- * In case of FIP mode, perform round robin FCF failover
+ * In case of FIP mode, perform roundrobin FCF failover
* due to new FCF discovery
*/
if ((phba->hba_flag & HBA_FIP_SUPPORT) &&
@@ -808,48 +810,16 @@ lpfc_cmpl_els_flogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
(irsp->ulpStatus != IOSTAT_LOCAL_REJECT) &&
(irsp->un.ulpWord[4] != IOERR_SLI_ABORTED)) {
lpfc_printf_log(phba, KERN_WARNING, LOG_FIP | LOG_ELS,
- "2611 FLOGI failed on registered "
- "FCF record fcf_index(%d), status: "
- "x%x/x%x, tmo:x%x, trying to perform "
- "round robin failover\n",
+ "2611 FLOGI failed on FCF (x%x), "
+ "status:x%x/x%x, tmo:x%x, perform "
+ "roundrobin FCF failover\n",
phba->fcf.current_rec.fcf_indx,
irsp->ulpStatus, irsp->un.ulpWord[4],
irsp->ulpTimeout);
fcf_index = lpfc_sli4_fcf_rr_next_index_get(phba);
- if (fcf_index == LPFC_FCOE_FCF_NEXT_NONE) {
- /*
- * Exhausted the eligible FCF record list,
- * fail through to retry FLOGI on current
- * FCF record.
- */
- lpfc_printf_log(phba, KERN_WARNING,
- LOG_FIP | LOG_ELS,
- "2760 Completed one round "
- "of FLOGI FCF round robin "
- "failover list, retry FLOGI "
- "on currently registered "
- "FCF index:%d\n",
- phba->fcf.current_rec.fcf_indx);
- } else {
- lpfc_printf_log(phba, KERN_INFO,
- LOG_FIP | LOG_ELS,
- "2794 FLOGI FCF round robin "
- "failover to FCF index x%x\n",
- fcf_index);
- rc = lpfc_sli4_fcf_rr_read_fcf_rec(phba,
- fcf_index);
- if (rc)
- lpfc_printf_log(phba, KERN_WARNING,
- LOG_FIP | LOG_ELS,
- "2761 FLOGI round "
- "robin FCF failover "
- "read FCF failed "
- "rc:x%x, fcf_index:"
- "%d\n", rc,
- phba->fcf.current_rec.fcf_indx);
- else
- goto out;
- }
+ rc = lpfc_sli4_fcf_rr_next_proc(vport, fcf_index);
+ if (rc)
+ goto out;
}
/* FLOGI failure */
@@ -939,6 +909,7 @@ lpfc_cmpl_els_flogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
lpfc_nlp_put(ndlp);
spin_lock_irq(&phba->hbalock);
phba->fcf.fcf_flag &= ~FCF_DISCOVERY;
+ phba->hba_flag &= ~(FCF_RR_INPROG | HBA_DEVLOSS_TMO);
spin_unlock_irq(&phba->hbalock);
goto out;
}
@@ -947,13 +918,12 @@ lpfc_cmpl_els_flogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
if (phba->hba_flag & HBA_FIP_SUPPORT)
lpfc_printf_vlog(vport, KERN_INFO, LOG_FIP |
LOG_ELS,
- "2769 FLOGI successful on FCF "
- "record: current_fcf_index:"
- "x%x, terminate FCF round "
- "robin failover process\n",
+ "2769 FLOGI to FCF (x%x) "
+ "completed successfully\n",
phba->fcf.current_rec.fcf_indx);
spin_lock_irq(&phba->hbalock);
phba->fcf.fcf_flag &= ~FCF_DISCOVERY;
+ phba->hba_flag &= ~(FCF_RR_INPROG | HBA_DEVLOSS_TMO);
spin_unlock_irq(&phba->hbalock);
goto out;
}
@@ -1175,12 +1145,13 @@ lpfc_initial_flogi(struct lpfc_vport *vport)
return 0;
}
- if (lpfc_issue_els_flogi(vport, ndlp, 0))
+ if (lpfc_issue_els_flogi(vport, ndlp, 0)) {
/* This decrement of reference count to node shall kick off
* the release of the node.
*/
lpfc_nlp_put(ndlp);
-
+ return 0;
+ }
return 1;
}
@@ -1645,6 +1616,13 @@ lpfc_issue_els_plogi(struct lpfc_vport *vport, uint32_t did, uint8_t retry)
memcpy(pcmd, &vport->fc_sparam, sizeof(struct serv_parm));
sp = (struct serv_parm *) pcmd;
+ /*
+ * If we are a N-port connected to a Fabric, fix-up paramm's so logins
+ * to device on remote loops work.
+ */
+ if ((vport->fc_flag & FC_FABRIC) && !(vport->fc_flag & FC_PUBLIC_LOOP))
+ sp->cmn.altBbCredit = 1;
+
if (sp->cmn.fcphLow < FC_PH_4_3)
sp->cmn.fcphLow = FC_PH_4_3;
@@ -3926,6 +3904,64 @@ lpfc_els_rsp_rnid_acc(struct lpfc_vport *vport, uint8_t format,
}
/**
+ * lpfc_els_rsp_echo_acc - Issue echo acc response
+ * @vport: pointer to a virtual N_Port data structure.
+ * @data: pointer to echo data to return in the accept.
+ * @oldiocb: pointer to the original lpfc command iocb data structure.
+ * @ndlp: pointer to a node-list data structure.
+ *
+ * Return code
+ * 0 - Successfully issued acc echo response
+ * 1 - Failed to issue acc echo response
+ **/
+static int
+lpfc_els_rsp_echo_acc(struct lpfc_vport *vport, uint8_t *data,
+ struct lpfc_iocbq *oldiocb, struct lpfc_nodelist *ndlp)
+{
+ struct lpfc_hba *phba = vport->phba;
+ struct lpfc_iocbq *elsiocb;
+ struct lpfc_sli *psli;
+ uint8_t *pcmd;
+ uint16_t cmdsize;
+ int rc;
+
+ psli = &phba->sli;
+ cmdsize = oldiocb->iocb.unsli3.rcvsli3.acc_len;
+
+ elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp,
+ ndlp->nlp_DID, ELS_CMD_ACC);
+ if (!elsiocb)
+ return 1;
+
+ elsiocb->iocb.ulpContext = oldiocb->iocb.ulpContext; /* Xri */
+ /* Xmit ECHO ACC response tag <ulpIoTag> */
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
+ "2876 Xmit ECHO ACC response tag x%x xri x%x\n",
+ elsiocb->iotag, elsiocb->iocb.ulpContext);
+ pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
+ *((uint32_t *) (pcmd)) = ELS_CMD_ACC;
+ pcmd += sizeof(uint32_t);
+ memcpy(pcmd, data, cmdsize - sizeof(uint32_t));
+
+ lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP,
+ "Issue ACC ECHO: did:x%x flg:x%x",
+ ndlp->nlp_DID, ndlp->nlp_flag, 0);
+
+ phba->fc_stat.elsXmitACC++;
+ elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
+ lpfc_nlp_put(ndlp);
+ elsiocb->context1 = NULL; /* Don't need ndlp for cmpl,
+ * it could be freed */
+
+ rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
+ if (rc == IOCB_ERROR) {
+ lpfc_els_free_iocb(phba, elsiocb);
+ return 1;
+ }
+ return 0;
+}
+
+/**
* lpfc_els_disc_adisc - Issue remaining adisc iocbs to npr nodes of a vport
* @vport: pointer to a host virtual N_Port data structure.
*
@@ -4684,6 +4720,30 @@ lpfc_els_rcv_rnid(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
}
/**
+ * lpfc_els_rcv_echo - Process an unsolicited echo iocb
+ * @vport: pointer to a host virtual N_Port data structure.
+ * @cmdiocb: pointer to lpfc command iocb data structure.
+ * @ndlp: pointer to a node-list data structure.
+ *
+ * Return code
+ * 0 - Successfully processed echo iocb (currently always return 0)
+ **/
+static int
+lpfc_els_rcv_echo(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
+ struct lpfc_nodelist *ndlp)
+{
+ uint8_t *pcmd;
+
+ pcmd = (uint8_t *) (((struct lpfc_dmabuf *) cmdiocb->context2)->virt);
+
+ /* skip over first word of echo command to find echo data */
+ pcmd += sizeof(uint32_t);
+
+ lpfc_els_rsp_echo_acc(vport, pcmd, cmdiocb, ndlp);
+ return 0;
+}
+
+/**
* lpfc_els_rcv_lirr - Process an unsolicited lirr iocb
* @vport: pointer to a host virtual N_Port data structure.
* @cmdiocb: pointer to lpfc command iocb data structure.
@@ -4735,6 +4795,89 @@ lpfc_els_rcv_rrq(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
}
/**
+ * lpfc_els_rsp_rls_acc - Completion callbk func for MBX_READ_LNK_STAT mbox cmd
+ * @phba: pointer to lpfc hba data structure.
+ * @pmb: pointer to the driver internal queue element for mailbox command.
+ *
+ * This routine is the completion callback function for the MBX_READ_LNK_STAT
+ * mailbox command. This callback function is to actually send the Accept
+ * (ACC) response to a Read Port Status (RPS) unsolicited IOCB event. It
+ * collects the link statistics from the completion of the MBX_READ_LNK_STAT
+ * mailbox command, constructs the RPS response with the link statistics
+ * collected, and then invokes the lpfc_sli_issue_iocb() routine to send ACC
+ * response to the RPS.
+ *
+ * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
+ * will be incremented by 1 for holding the ndlp and the reference to ndlp
+ * will be stored into the context1 field of the IOCB for the completion
+ * callback function to the RPS Accept Response ELS IOCB command.
+ *
+ **/
+static void
+lpfc_els_rsp_rls_acc(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
+{
+ MAILBOX_t *mb;
+ IOCB_t *icmd;
+ struct RLS_RSP *rls_rsp;
+ uint8_t *pcmd;
+ struct lpfc_iocbq *elsiocb;
+ struct lpfc_nodelist *ndlp;
+ uint16_t xri;
+ uint32_t cmdsize;
+
+ mb = &pmb->u.mb;
+
+ ndlp = (struct lpfc_nodelist *) pmb->context2;
+ xri = (uint16_t) ((unsigned long)(pmb->context1));
+ pmb->context1 = NULL;
+ pmb->context2 = NULL;
+
+ if (mb->mbxStatus) {
+ mempool_free(pmb, phba->mbox_mem_pool);
+ return;
+ }
+
+ cmdsize = sizeof(struct RLS_RSP) + sizeof(uint32_t);
+ mempool_free(pmb, phba->mbox_mem_pool);
+ elsiocb = lpfc_prep_els_iocb(phba->pport, 0, cmdsize,
+ lpfc_max_els_tries, ndlp,
+ ndlp->nlp_DID, ELS_CMD_ACC);
+
+ /* Decrement the ndlp reference count from previous mbox command */
+ lpfc_nlp_put(ndlp);
+
+ if (!elsiocb)
+ return;
+
+ icmd = &elsiocb->iocb;
+ icmd->ulpContext = xri;
+
+ pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
+ *((uint32_t *) (pcmd)) = ELS_CMD_ACC;
+ pcmd += sizeof(uint32_t); /* Skip past command */
+ rls_rsp = (struct RLS_RSP *)pcmd;
+
+ rls_rsp->linkFailureCnt = cpu_to_be32(mb->un.varRdLnk.linkFailureCnt);
+ rls_rsp->lossSyncCnt = cpu_to_be32(mb->un.varRdLnk.lossSyncCnt);
+ rls_rsp->lossSignalCnt = cpu_to_be32(mb->un.varRdLnk.lossSignalCnt);
+ rls_rsp->primSeqErrCnt = cpu_to_be32(mb->un.varRdLnk.primSeqErrCnt);
+ rls_rsp->invalidXmitWord = cpu_to_be32(mb->un.varRdLnk.invalidXmitWord);
+ rls_rsp->crcCnt = cpu_to_be32(mb->un.varRdLnk.crcCnt);
+
+ /* Xmit ELS RLS ACC response tag <ulpIoTag> */
+ lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_ELS,
+ "2874 Xmit ELS RLS ACC response tag x%x xri x%x, "
+ "did x%x, nlp_flag x%x, nlp_state x%x, rpi x%x\n",
+ elsiocb->iotag, elsiocb->iocb.ulpContext,
+ ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state,
+ ndlp->nlp_rpi);
+ elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
+ phba->fc_stat.elsXmitACC++;
+ if (lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0) == IOCB_ERROR)
+ lpfc_els_free_iocb(phba, elsiocb);
+}
+
+/**
* lpfc_els_rsp_rps_acc - Completion callbk func for MBX_READ_LNK_STAT mbox cmd
* @phba: pointer to lpfc hba data structure.
* @pmb: pointer to the driver internal queue element for mailbox command.
@@ -4827,7 +4970,155 @@ lpfc_els_rsp_rps_acc(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
}
/**
- * lpfc_els_rcv_rps - Process an unsolicited rps iocb
+ * lpfc_els_rcv_rls - Process an unsolicited rls iocb
+ * @vport: pointer to a host virtual N_Port data structure.
+ * @cmdiocb: pointer to lpfc command iocb data structure.
+ * @ndlp: pointer to a node-list data structure.
+ *
+ * This routine processes Read Port Status (RPL) IOCB received as an
+ * ELS unsolicited event. It first checks the remote port state. If the
+ * remote port is not in NLP_STE_UNMAPPED_NODE state or NLP_STE_MAPPED_NODE
+ * state, it invokes the lpfc_els_rsl_reject() routine to send the reject
+ * response. Otherwise, it issue the MBX_READ_LNK_STAT mailbox command
+ * for reading the HBA link statistics. It is for the callback function,
+ * lpfc_els_rsp_rls_acc(), set to the MBX_READ_LNK_STAT mailbox command
+ * to actually sending out RPL Accept (ACC) response.
+ *
+ * Return codes
+ * 0 - Successfully processed rls iocb (currently always return 0)
+ **/
+static int
+lpfc_els_rcv_rls(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
+ struct lpfc_nodelist *ndlp)
+{
+ struct lpfc_hba *phba = vport->phba;
+ LPFC_MBOXQ_t *mbox;
+ struct lpfc_dmabuf *pcmd;
+ struct ls_rjt stat;
+
+ if ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) &&
+ (ndlp->nlp_state != NLP_STE_MAPPED_NODE))
+ /* reject the unsolicited RPS request and done with it */
+ goto reject_out;
+
+ pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
+
+ mbox = mempool_alloc(phba->mbox_mem_pool, GFP_ATOMIC);
+ if (mbox) {
+ lpfc_read_lnk_stat(phba, mbox);
+ mbox->context1 =
+ (void *)((unsigned long) cmdiocb->iocb.ulpContext);
+ mbox->context2 = lpfc_nlp_get(ndlp);
+ mbox->vport = vport;
+ mbox->mbox_cmpl = lpfc_els_rsp_rls_acc;
+ if (lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT)
+ != MBX_NOT_FINISHED)
+ /* Mbox completion will send ELS Response */
+ return 0;
+ /* Decrement reference count used for the failed mbox
+ * command.
+ */
+ lpfc_nlp_put(ndlp);
+ mempool_free(mbox, phba->mbox_mem_pool);
+ }
+reject_out:
+ /* issue rejection response */
+ stat.un.b.lsRjtRsvd0 = 0;
+ stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
+ stat.un.b.lsRjtRsnCodeExp = LSEXP_CANT_GIVE_DATA;
+ stat.un.b.vendorUnique = 0;
+ lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL);
+ return 0;
+}
+
+/**
+ * lpfc_els_rcv_rtv - Process an unsolicited rtv iocb
+ * @vport: pointer to a host virtual N_Port data structure.
+ * @cmdiocb: pointer to lpfc command iocb data structure.
+ * @ndlp: pointer to a node-list data structure.
+ *
+ * This routine processes Read Timout Value (RTV) IOCB received as an
+ * ELS unsolicited event. It first checks the remote port state. If the
+ * remote port is not in NLP_STE_UNMAPPED_NODE state or NLP_STE_MAPPED_NODE
+ * state, it invokes the lpfc_els_rsl_reject() routine to send the reject
+ * response. Otherwise, it sends the Accept(ACC) response to a Read Timeout
+ * Value (RTV) unsolicited IOCB event.
+ *
+ * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
+ * will be incremented by 1 for holding the ndlp and the reference to ndlp
+ * will be stored into the context1 field of the IOCB for the completion
+ * callback function to the RPS Accept Response ELS IOCB command.
+ *
+ * Return codes
+ * 0 - Successfully processed rtv iocb (currently always return 0)
+ **/
+static int
+lpfc_els_rcv_rtv(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
+ struct lpfc_nodelist *ndlp)
+{
+ struct lpfc_hba *phba = vport->phba;
+ struct ls_rjt stat;
+ struct RTV_RSP *rtv_rsp;
+ uint8_t *pcmd;
+ struct lpfc_iocbq *elsiocb;
+ uint32_t cmdsize;
+
+
+ if ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) &&
+ (ndlp->nlp_state != NLP_STE_MAPPED_NODE))
+ /* reject the unsolicited RPS request and done with it */
+ goto reject_out;
+
+ cmdsize = sizeof(struct RTV_RSP) + sizeof(uint32_t);
+ elsiocb = lpfc_prep_els_iocb(phba->pport, 0, cmdsize,
+ lpfc_max_els_tries, ndlp,
+ ndlp->nlp_DID, ELS_CMD_ACC);
+
+ if (!elsiocb)
+ return 1;
+
+ pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
+ *((uint32_t *) (pcmd)) = ELS_CMD_ACC;
+ pcmd += sizeof(uint32_t); /* Skip past command */
+
+ /* use the command's xri in the response */
+ elsiocb->iocb.ulpContext = cmdiocb->iocb.ulpContext;
+
+ rtv_rsp = (struct RTV_RSP *)pcmd;
+
+ /* populate RTV payload */
+ rtv_rsp->ratov = cpu_to_be32(phba->fc_ratov * 1000); /* report msecs */
+ rtv_rsp->edtov = cpu_to_be32(phba->fc_edtov);
+ bf_set(qtov_edtovres, rtv_rsp, phba->fc_edtovResol ? 1 : 0);
+ bf_set(qtov_rttov, rtv_rsp, 0); /* Field is for FC ONLY */
+ rtv_rsp->qtov = cpu_to_be32(rtv_rsp->qtov);
+
+ /* Xmit ELS RLS ACC response tag <ulpIoTag> */
+ lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_ELS,
+ "2875 Xmit ELS RTV ACC response tag x%x xri x%x, "
+ "did x%x, nlp_flag x%x, nlp_state x%x, rpi x%x, "
+ "Data: x%x x%x x%x\n",
+ elsiocb->iotag, elsiocb->iocb.ulpContext,
+ ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state,
+ ndlp->nlp_rpi,
+ rtv_rsp->ratov, rtv_rsp->edtov, rtv_rsp->qtov);
+ elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
+ phba->fc_stat.elsXmitACC++;
+ if (lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0) == IOCB_ERROR)
+ lpfc_els_free_iocb(phba, elsiocb);
+ return 0;
+
+reject_out:
+ /* issue rejection response */
+ stat.un.b.lsRjtRsvd0 = 0;
+ stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
+ stat.un.b.lsRjtRsnCodeExp = LSEXP_CANT_GIVE_DATA;
+ stat.un.b.vendorUnique = 0;
+ lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL);
+ return 0;
+}
+
+/* lpfc_els_rcv_rps - Process an unsolicited rps iocb
* @vport: pointer to a host virtual N_Port data structure.
* @cmdiocb: pointer to lpfc command iocb data structure.
* @ndlp: pointer to a node-list data structure.
@@ -5017,7 +5308,6 @@ lpfc_els_rcv_rpl(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
lp = (uint32_t *) pcmd->virt;
rpl = (RPL *) (lp + 1);
-
maxsize = be32_to_cpu(rpl->maxsize);
/* We support only one port */
@@ -5836,6 +6126,16 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
if (newnode)
lpfc_nlp_put(ndlp);
break;
+ case ELS_CMD_RLS:
+ lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
+ "RCV RLS: did:x%x/ste:x%x flg:x%x",
+ did, vport->port_state, ndlp->nlp_flag);
+
+ phba->fc_stat.elsRcvRLS++;
+ lpfc_els_rcv_rls(vport, elsiocb, ndlp);
+ if (newnode)
+ lpfc_nlp_put(ndlp);
+ break;
case ELS_CMD_RPS:
lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
"RCV RPS: did:x%x/ste:x%x flg:x%x",
@@ -5866,6 +6166,15 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
if (newnode)
lpfc_nlp_put(ndlp);
break;
+ case ELS_CMD_RTV:
+ lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
+ "RCV RTV: did:x%x/ste:x%x flg:x%x",
+ did, vport->port_state, ndlp->nlp_flag);
+ phba->fc_stat.elsRcvRTV++;
+ lpfc_els_rcv_rtv(vport, elsiocb, ndlp);
+ if (newnode)
+ lpfc_nlp_put(ndlp);
+ break;
case ELS_CMD_RRQ:
lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
"RCV RRQ: did:x%x/ste:x%x flg:x%x",
@@ -5876,6 +6185,16 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
if (newnode)
lpfc_nlp_put(ndlp);
break;
+ case ELS_CMD_ECHO:
+ lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
+ "RCV ECHO: did:x%x/ste:x%x flg:x%x",
+ did, vport->port_state, ndlp->nlp_flag);
+
+ phba->fc_stat.elsRcvECHO++;
+ lpfc_els_rcv_echo(vport, elsiocb, ndlp);
+ if (newnode)
+ lpfc_nlp_put(ndlp);
+ break;
default:
lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
"RCV ELS cmd: cmd:x%x did:x%x/ste:x%x",
@@ -6170,6 +6489,8 @@ lpfc_cmpl_reg_new_vport(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
default:
/* Try to recover from this error */
+ if (phba->sli_rev == LPFC_SLI_REV4)
+ lpfc_sli4_unreg_all_rpis(vport);
lpfc_mbx_unreg_vpi(vport);
spin_lock_irq(shost->host_lock);
vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
@@ -6437,6 +6758,10 @@ lpfc_cmpl_els_fdisc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
lpfc_unreg_rpi(vport, np);
}
lpfc_cleanup_pending_mbox(vport);
+
+ if (phba->sli_rev == LPFC_SLI_REV4)
+ lpfc_sli4_unreg_all_rpis(vport);
+
lpfc_mbx_unreg_vpi(vport);
spin_lock_irq(shost->host_lock);
vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
@@ -6452,7 +6777,7 @@ lpfc_cmpl_els_fdisc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
* to update the MAC address.
*/
lpfc_register_new_vport(phba, vport, ndlp);
- return ;
+ goto out;
}
if (vport->fc_flag & FC_VPORT_NEEDS_INIT_VPI)
diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c
index a345dde16c86..a5d1695dac3d 100644
--- a/drivers/scsi/lpfc/lpfc_hbadisc.c
+++ b/drivers/scsi/lpfc/lpfc_hbadisc.c
@@ -20,6 +20,7 @@
*******************************************************************/
#include <linux/blkdev.h>
+#include <linux/delay.h>
#include <linux/slab.h>
#include <linux/pci.h>
#include <linux/kthread.h>
@@ -63,6 +64,7 @@ static uint8_t lpfcAlpaArray[] = {
static void lpfc_disc_timeout_handler(struct lpfc_vport *);
static void lpfc_disc_flush_list(struct lpfc_vport *vport);
static void lpfc_unregister_fcfi_cmpl(struct lpfc_hba *, LPFC_MBOXQ_t *);
+static int lpfc_fcf_inuse(struct lpfc_hba *);
void
lpfc_terminate_rport_io(struct fc_rport *rport)
@@ -160,11 +162,17 @@ lpfc_dev_loss_tmo_callbk(struct fc_rport *rport)
return;
}
-/*
- * This function is called from the worker thread when dev_loss_tmo
- * expire.
- */
-static void
+/**
+ * lpfc_dev_loss_tmo_handler - Remote node devloss timeout handler
+ * @ndlp: Pointer to remote node object.
+ *
+ * This function is called from the worker thread when devloss timeout timer
+ * expires. For SLI4 host, this routine shall return 1 when at lease one
+ * remote node, including this @ndlp, is still in use of FCF; otherwise, this
+ * routine shall return 0 when there is no remote node is still in use of FCF
+ * when devloss timeout happened to this @ndlp.
+ **/
+static int
lpfc_dev_loss_tmo_handler(struct lpfc_nodelist *ndlp)
{
struct lpfc_rport_data *rdata;
@@ -175,17 +183,21 @@ lpfc_dev_loss_tmo_handler(struct lpfc_nodelist *ndlp)
int put_node;
int put_rport;
int warn_on = 0;
+ int fcf_inuse = 0;
rport = ndlp->rport;
if (!rport)
- return;
+ return fcf_inuse;
rdata = rport->dd_data;
name = (uint8_t *) &ndlp->nlp_portname;
vport = ndlp->vport;
phba = vport->phba;
+ if (phba->sli_rev == LPFC_SLI_REV4)
+ fcf_inuse = lpfc_fcf_inuse(phba);
+
lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_RPORT,
"rport devlosstmo:did:x%x type:x%x id:x%x",
ndlp->nlp_DID, ndlp->nlp_type, rport->scsi_target_id);
@@ -209,7 +221,7 @@ lpfc_dev_loss_tmo_handler(struct lpfc_nodelist *ndlp)
lpfc_nlp_put(ndlp);
if (put_rport)
put_device(&rport->dev);
- return;
+ return fcf_inuse;
}
if (ndlp->nlp_state == NLP_STE_MAPPED_NODE) {
@@ -220,7 +232,7 @@ lpfc_dev_loss_tmo_handler(struct lpfc_nodelist *ndlp)
*name, *(name+1), *(name+2), *(name+3),
*(name+4), *(name+5), *(name+6), *(name+7),
ndlp->nlp_DID);
- return;
+ return fcf_inuse;
}
if (ndlp->nlp_type & NLP_FABRIC) {
@@ -233,7 +245,7 @@ lpfc_dev_loss_tmo_handler(struct lpfc_nodelist *ndlp)
lpfc_nlp_put(ndlp);
if (put_rport)
put_device(&rport->dev);
- return;
+ return fcf_inuse;
}
if (ndlp->nlp_sid != NLP_NO_SID) {
@@ -280,6 +292,74 @@ lpfc_dev_loss_tmo_handler(struct lpfc_nodelist *ndlp)
(ndlp->nlp_state != NLP_STE_PRLI_ISSUE))
lpfc_disc_state_machine(vport, ndlp, NULL, NLP_EVT_DEVICE_RM);
+ return fcf_inuse;
+}
+
+/**
+ * lpfc_sli4_post_dev_loss_tmo_handler - SLI4 post devloss timeout handler
+ * @phba: Pointer to hba context object.
+ * @fcf_inuse: SLI4 FCF in-use state reported from devloss timeout handler.
+ * @nlp_did: remote node identifer with devloss timeout.
+ *
+ * This function is called from the worker thread after invoking devloss
+ * timeout handler and releasing the reference count for the ndlp with
+ * which the devloss timeout was handled for SLI4 host. For the devloss
+ * timeout of the last remote node which had been in use of FCF, when this
+ * routine is invoked, it shall be guaranteed that none of the remote are
+ * in-use of FCF. When devloss timeout to the last remote using the FCF,
+ * if the FIP engine is neither in FCF table scan process nor roundrobin
+ * failover process, the in-use FCF shall be unregistered. If the FIP
+ * engine is in FCF discovery process, the devloss timeout state shall
+ * be set for either the FCF table scan process or roundrobin failover
+ * process to unregister the in-use FCF.
+ **/
+static void
+lpfc_sli4_post_dev_loss_tmo_handler(struct lpfc_hba *phba, int fcf_inuse,
+ uint32_t nlp_did)
+{
+ /* If devloss timeout happened to a remote node when FCF had no
+ * longer been in-use, do nothing.
+ */
+ if (!fcf_inuse)
+ return;
+
+ if ((phba->hba_flag & HBA_FIP_SUPPORT) && !lpfc_fcf_inuse(phba)) {
+ spin_lock_irq(&phba->hbalock);
+ if (phba->fcf.fcf_flag & FCF_DISCOVERY) {
+ if (phba->hba_flag & HBA_DEVLOSS_TMO) {
+ spin_unlock_irq(&phba->hbalock);
+ return;
+ }
+ phba->hba_flag |= HBA_DEVLOSS_TMO;
+ lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
+ "2847 Last remote node (x%x) using "
+ "FCF devloss tmo\n", nlp_did);
+ }
+ if (phba->fcf.fcf_flag & FCF_REDISC_PROG) {
+ spin_unlock_irq(&phba->hbalock);
+ lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
+ "2868 Devloss tmo to FCF rediscovery "
+ "in progress\n");
+ return;
+ }
+ if (!(phba->hba_flag & (FCF_TS_INPROG | FCF_RR_INPROG))) {
+ spin_unlock_irq(&phba->hbalock);
+ lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
+ "2869 Devloss tmo to idle FIP engine, "
+ "unreg in-use FCF and rescan.\n");
+ /* Unregister in-use FCF and rescan */
+ lpfc_unregister_fcf_rescan(phba);
+ return;
+ }
+ spin_unlock_irq(&phba->hbalock);
+ if (phba->hba_flag & FCF_TS_INPROG)
+ lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
+ "2870 FCF table scan in progress\n");
+ if (phba->hba_flag & FCF_RR_INPROG)
+ lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
+ "2871 FLOGI roundrobin FCF failover "
+ "in progress\n");
+ }
lpfc_unregister_unused_fcf(phba);
}
@@ -408,6 +488,8 @@ lpfc_work_list_done(struct lpfc_hba *phba)
struct lpfc_work_evt *evtp = NULL;
struct lpfc_nodelist *ndlp;
int free_evt;
+ int fcf_inuse;
+ uint32_t nlp_did;
spin_lock_irq(&phba->hbalock);
while (!list_empty(&phba->work_list)) {
@@ -427,12 +509,17 @@ lpfc_work_list_done(struct lpfc_hba *phba)
break;
case LPFC_EVT_DEV_LOSS:
ndlp = (struct lpfc_nodelist *)(evtp->evt_arg1);
- lpfc_dev_loss_tmo_handler(ndlp);
+ fcf_inuse = lpfc_dev_loss_tmo_handler(ndlp);
free_evt = 0;
/* decrement the node reference count held for
* this queued work
*/
+ nlp_did = ndlp->nlp_DID;
lpfc_nlp_put(ndlp);
+ if (phba->sli_rev == LPFC_SLI_REV4)
+ lpfc_sli4_post_dev_loss_tmo_handler(phba,
+ fcf_inuse,
+ nlp_did);
break;
case LPFC_EVT_ONLINE:
if (phba->link_state < LPFC_LINK_DOWN)
@@ -707,6 +794,8 @@ lpfc_cleanup_rpis(struct lpfc_vport *vport, int remove)
: NLP_EVT_DEVICE_RECOVERY);
}
if (phba->sli3_options & LPFC_SLI3_VPORT_TEARDOWN) {
+ if (phba->sli_rev == LPFC_SLI_REV4)
+ lpfc_sli4_unreg_all_rpis(vport);
lpfc_mbx_unreg_vpi(vport);
spin_lock_irq(shost->host_lock);
vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
@@ -1021,8 +1110,7 @@ lpfc_mbx_cmpl_reg_fcfi(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
"2017 REG_FCFI mbxStatus error x%x "
"HBA state x%x\n",
mboxq->u.mb.mbxStatus, vport->port_state);
- mempool_free(mboxq, phba->mbox_mem_pool);
- return;
+ goto fail_out;
}
/* Start FCoE discovery by sending a FLOGI. */
@@ -1031,20 +1119,30 @@ lpfc_mbx_cmpl_reg_fcfi(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
spin_lock_irq(&phba->hbalock);
phba->fcf.fcf_flag |= FCF_REGISTERED;
spin_unlock_irq(&phba->hbalock);
+
/* If there is a pending FCoE event, restart FCF table scan. */
- if (lpfc_check_pending_fcoe_event(phba, 1)) {
- mempool_free(mboxq, phba->mbox_mem_pool);
- return;
- }
+ if (lpfc_check_pending_fcoe_event(phba, LPFC_UNREG_FCF))
+ goto fail_out;
+
+ /* Mark successful completion of FCF table scan */
spin_lock_irq(&phba->hbalock);
phba->fcf.fcf_flag |= (FCF_SCAN_DONE | FCF_IN_USE);
- phba->hba_flag &= ~FCF_DISC_INPROGRESS;
- spin_unlock_irq(&phba->hbalock);
- if (vport->port_state != LPFC_FLOGI)
+ phba->hba_flag &= ~FCF_TS_INPROG;
+ if (vport->port_state != LPFC_FLOGI) {
+ phba->hba_flag |= FCF_RR_INPROG;
+ spin_unlock_irq(&phba->hbalock);
lpfc_initial_flogi(vport);
+ goto out;
+ }
+ spin_unlock_irq(&phba->hbalock);
+ goto out;
+fail_out:
+ spin_lock_irq(&phba->hbalock);
+ phba->hba_flag &= ~FCF_RR_INPROG;
+ spin_unlock_irq(&phba->hbalock);
+out:
mempool_free(mboxq, phba->mbox_mem_pool);
- return;
}
/**
@@ -1241,10 +1339,9 @@ lpfc_register_fcf(struct lpfc_hba *phba)
int rc;
spin_lock_irq(&phba->hbalock);
-
/* If the FCF is not availabe do nothing. */
if (!(phba->fcf.fcf_flag & FCF_AVAILABLE)) {
- phba->hba_flag &= ~FCF_DISC_INPROGRESS;
+ phba->hba_flag &= ~(FCF_TS_INPROG | FCF_RR_INPROG);
spin_unlock_irq(&phba->hbalock);
return;
}
@@ -1252,19 +1349,22 @@ lpfc_register_fcf(struct lpfc_hba *phba)
/* The FCF is already registered, start discovery */
if (phba->fcf.fcf_flag & FCF_REGISTERED) {
phba->fcf.fcf_flag |= (FCF_SCAN_DONE | FCF_IN_USE);
- phba->hba_flag &= ~FCF_DISC_INPROGRESS;
- spin_unlock_irq(&phba->hbalock);
- if (phba->pport->port_state != LPFC_FLOGI)
+ phba->hba_flag &= ~FCF_TS_INPROG;
+ if (phba->pport->port_state != LPFC_FLOGI) {
+ phba->hba_flag |= FCF_RR_INPROG;
+ spin_unlock_irq(&phba->hbalock);
lpfc_initial_flogi(phba->pport);
+ return;
+ }
+ spin_unlock_irq(&phba->hbalock);
return;
}
spin_unlock_irq(&phba->hbalock);
- fcf_mbxq = mempool_alloc(phba->mbox_mem_pool,
- GFP_KERNEL);
+ fcf_mbxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
if (!fcf_mbxq) {
spin_lock_irq(&phba->hbalock);
- phba->hba_flag &= ~FCF_DISC_INPROGRESS;
+ phba->hba_flag &= ~(FCF_TS_INPROG | FCF_RR_INPROG);
spin_unlock_irq(&phba->hbalock);
return;
}
@@ -1275,7 +1375,7 @@ lpfc_register_fcf(struct lpfc_hba *phba)
rc = lpfc_sli_issue_mbox(phba, fcf_mbxq, MBX_NOWAIT);
if (rc == MBX_NOT_FINISHED) {
spin_lock_irq(&phba->hbalock);
- phba->hba_flag &= ~FCF_DISC_INPROGRESS;
+ phba->hba_flag &= ~(FCF_TS_INPROG | FCF_RR_INPROG);
spin_unlock_irq(&phba->hbalock);
mempool_free(fcf_mbxq, phba->mbox_mem_pool);
}
@@ -1493,7 +1593,7 @@ lpfc_check_pending_fcoe_event(struct lpfc_hba *phba, uint8_t unreg_fcf)
* FCF discovery, no need to restart FCF discovery.
*/
if ((phba->link_state >= LPFC_LINK_UP) &&
- (phba->fcoe_eventtag == phba->fcoe_eventtag_at_fcf_scan))
+ (phba->fcoe_eventtag == phba->fcoe_eventtag_at_fcf_scan))
return 0;
lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
@@ -1517,14 +1617,14 @@ lpfc_check_pending_fcoe_event(struct lpfc_hba *phba, uint8_t unreg_fcf)
lpfc_sli4_fcf_scan_read_fcf_rec(phba, LPFC_FCOE_FCF_GET_FIRST);
} else {
/*
- * Do not continue FCF discovery and clear FCF_DISC_INPROGRESS
+ * Do not continue FCF discovery and clear FCF_TS_INPROG
* flag
*/
lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY,
"2833 Stop FCF discovery process due to link "
"state change (x%x)\n", phba->link_state);
spin_lock_irq(&phba->hbalock);
- phba->hba_flag &= ~FCF_DISC_INPROGRESS;
+ phba->hba_flag &= ~(FCF_TS_INPROG | FCF_RR_INPROG);
phba->fcf.fcf_flag &= ~(FCF_REDISC_FOV | FCF_DISCOVERY);
spin_unlock_irq(&phba->hbalock);
}
@@ -1729,6 +1829,65 @@ lpfc_sli4_fcf_record_match(struct lpfc_hba *phba,
}
/**
+ * lpfc_sli4_fcf_rr_next_proc - processing next roundrobin fcf
+ * @vport: Pointer to vport object.
+ * @fcf_index: index to next fcf.
+ *
+ * This function processing the roundrobin fcf failover to next fcf index.
+ * When this function is invoked, there will be a current fcf registered
+ * for flogi.
+ * Return: 0 for continue retrying flogi on currently registered fcf;
+ * 1 for stop flogi on currently registered fcf;
+ */
+int lpfc_sli4_fcf_rr_next_proc(struct lpfc_vport *vport, uint16_t fcf_index)
+{
+ struct lpfc_hba *phba = vport->phba;
+ int rc;
+
+ if (fcf_index == LPFC_FCOE_FCF_NEXT_NONE) {
+ spin_lock_irq(&phba->hbalock);
+ if (phba->hba_flag & HBA_DEVLOSS_TMO) {
+ spin_unlock_irq(&phba->hbalock);
+ lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
+ "2872 Devloss tmo with no eligible "
+ "FCF, unregister in-use FCF (x%x) "
+ "and rescan FCF table\n",
+ phba->fcf.current_rec.fcf_indx);
+ lpfc_unregister_fcf_rescan(phba);
+ goto stop_flogi_current_fcf;
+ }
+ /* Mark the end to FLOGI roundrobin failover */
+ phba->hba_flag &= ~FCF_RR_INPROG;
+ /* Allow action to new fcf asynchronous event */
+ phba->fcf.fcf_flag &= ~(FCF_AVAILABLE | FCF_SCAN_DONE);
+ spin_unlock_irq(&phba->hbalock);
+ lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
+ "2865 No FCF available, stop roundrobin FCF "
+ "failover and change port state:x%x/x%x\n",
+ phba->pport->port_state, LPFC_VPORT_UNKNOWN);
+ phba->pport->port_state = LPFC_VPORT_UNKNOWN;
+ goto stop_flogi_current_fcf;
+ } else {
+ lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_ELS,
+ "2794 Try FLOGI roundrobin FCF failover to "
+ "(x%x)\n", fcf_index);
+ rc = lpfc_sli4_fcf_rr_read_fcf_rec(phba, fcf_index);
+ if (rc)
+ lpfc_printf_log(phba, KERN_WARNING, LOG_FIP | LOG_ELS,
+ "2761 FLOGI roundrobin FCF failover "
+ "failed (rc:x%x) to read FCF (x%x)\n",
+ rc, phba->fcf.current_rec.fcf_indx);
+ else
+ goto stop_flogi_current_fcf;
+ }
+ return 0;
+
+stop_flogi_current_fcf:
+ lpfc_can_disctmo(vport);
+ return 1;
+}
+
+/**
* lpfc_mbx_cmpl_fcf_scan_read_fcf_rec - fcf scan read_fcf mbox cmpl handler.
* @phba: pointer to lpfc hba data structure.
* @mboxq: pointer to mailbox object.
@@ -1756,7 +1915,7 @@ lpfc_mbx_cmpl_fcf_scan_read_fcf_rec(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
int rc;
/* If there is pending FCoE event restart FCF table scan */
- if (lpfc_check_pending_fcoe_event(phba, 0)) {
+ if (lpfc_check_pending_fcoe_event(phba, LPFC_SKIP_UNREG_FCF)) {
lpfc_sli4_mbox_cmd_free(phba, mboxq);
return;
}
@@ -1765,12 +1924,12 @@ lpfc_mbx_cmpl_fcf_scan_read_fcf_rec(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
new_fcf_record = lpfc_sli4_fcf_rec_mbox_parse(phba, mboxq,
&next_fcf_index);
if (!new_fcf_record) {
- lpfc_printf_log(phba, KERN_WARNING, LOG_FIP,
+ lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
"2765 Mailbox command READ_FCF_RECORD "
"failed to retrieve a FCF record.\n");
/* Let next new FCF event trigger fast failover */
spin_lock_irq(&phba->hbalock);
- phba->hba_flag &= ~FCF_DISC_INPROGRESS;
+ phba->hba_flag &= ~FCF_TS_INPROG;
spin_unlock_irq(&phba->hbalock);
lpfc_sli4_mbox_cmd_free(phba, mboxq);
return;
@@ -1787,13 +1946,12 @@ lpfc_mbx_cmpl_fcf_scan_read_fcf_rec(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
/*
* If the fcf record does not match with connect list entries
* read the next entry; otherwise, this is an eligible FCF
- * record for round robin FCF failover.
+ * record for roundrobin FCF failover.
*/
if (!rc) {
lpfc_printf_log(phba, KERN_WARNING, LOG_FIP,
- "2781 FCF record (x%x) failed FCF "
- "connection list check, fcf_avail:x%x, "
- "fcf_valid:x%x\n",
+ "2781 FCF (x%x) failed connection "
+ "list check: (x%x/x%x)\n",
bf_get(lpfc_fcf_record_fcf_index,
new_fcf_record),
bf_get(lpfc_fcf_record_fcf_avail,
@@ -1803,6 +1961,16 @@ lpfc_mbx_cmpl_fcf_scan_read_fcf_rec(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
if ((phba->fcf.fcf_flag & FCF_IN_USE) &&
lpfc_sli4_fcf_record_match(phba, &phba->fcf.current_rec,
new_fcf_record, LPFC_FCOE_IGNORE_VID)) {
+ if (bf_get(lpfc_fcf_record_fcf_index, new_fcf_record) !=
+ phba->fcf.current_rec.fcf_indx) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
+ "2862 FCF (x%x) matches property "
+ "of in-use FCF (x%x)\n",
+ bf_get(lpfc_fcf_record_fcf_index,
+ new_fcf_record),
+ phba->fcf.current_rec.fcf_indx);
+ goto read_next_fcf;
+ }
/*
* In case the current in-use FCF record becomes
* invalid/unavailable during FCF discovery that
@@ -1813,9 +1981,8 @@ lpfc_mbx_cmpl_fcf_scan_read_fcf_rec(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
!(phba->fcf.fcf_flag & FCF_REDISC_FOV)) {
lpfc_printf_log(phba, KERN_WARNING, LOG_FIP,
"2835 Invalid in-use FCF "
- "record (x%x) reported, "
- "entering fast FCF failover "
- "mode scanning.\n",
+ "(x%x), enter FCF failover "
+ "table scan.\n",
phba->fcf.current_rec.fcf_indx);
spin_lock_irq(&phba->hbalock);
phba->fcf.fcf_flag |= FCF_REDISC_FOV;
@@ -1844,22 +2011,29 @@ lpfc_mbx_cmpl_fcf_scan_read_fcf_rec(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
if (phba->fcf.fcf_flag & FCF_IN_USE) {
if (lpfc_sli4_fcf_record_match(phba, &phba->fcf.current_rec,
new_fcf_record, vlan_id)) {
- phba->fcf.fcf_flag |= FCF_AVAILABLE;
- if (phba->fcf.fcf_flag & FCF_REDISC_PEND)
- /* Stop FCF redisc wait timer if pending */
- __lpfc_sli4_stop_fcf_redisc_wait_timer(phba);
- else if (phba->fcf.fcf_flag & FCF_REDISC_FOV)
- /* If in fast failover, mark it's completed */
- phba->fcf.fcf_flag &= ~FCF_REDISC_FOV;
- spin_unlock_irq(&phba->hbalock);
- lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
- "2836 The new FCF record (x%x) "
- "matches the in-use FCF record "
- "(x%x)\n",
- phba->fcf.current_rec.fcf_indx,
+ if (bf_get(lpfc_fcf_record_fcf_index, new_fcf_record) ==
+ phba->fcf.current_rec.fcf_indx) {
+ phba->fcf.fcf_flag |= FCF_AVAILABLE;
+ if (phba->fcf.fcf_flag & FCF_REDISC_PEND)
+ /* Stop FCF redisc wait timer */
+ __lpfc_sli4_stop_fcf_redisc_wait_timer(
+ phba);
+ else if (phba->fcf.fcf_flag & FCF_REDISC_FOV)
+ /* Fast failover, mark completed */
+ phba->fcf.fcf_flag &= ~FCF_REDISC_FOV;
+ spin_unlock_irq(&phba->hbalock);
+ lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
+ "2836 New FCF matches in-use "
+ "FCF (x%x)\n",
+ phba->fcf.current_rec.fcf_indx);
+ goto out;
+ } else
+ lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
+ "2863 New FCF (x%x) matches "
+ "property of in-use FCF (x%x)\n",
bf_get(lpfc_fcf_record_fcf_index,
- new_fcf_record));
- goto out;
+ new_fcf_record),
+ phba->fcf.current_rec.fcf_indx);
}
/*
* Read next FCF record from HBA searching for the matching
@@ -1953,8 +2127,8 @@ lpfc_mbx_cmpl_fcf_scan_read_fcf_rec(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
*/
if (fcf_rec) {
lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
- "2840 Update current FCF record "
- "with initial FCF record (x%x)\n",
+ "2840 Update initial FCF candidate "
+ "with FCF (x%x)\n",
bf_get(lpfc_fcf_record_fcf_index,
new_fcf_record));
__lpfc_update_fcf_record(phba, fcf_rec, new_fcf_record,
@@ -1984,20 +2158,28 @@ read_next_fcf:
*/
if (!(phba->fcf.failover_rec.flag & RECORD_VALID)) {
lpfc_printf_log(phba, KERN_WARNING, LOG_FIP,
- "2782 No suitable FCF record "
- "found during this round of "
- "post FCF rediscovery scan: "
- "fcf_evt_tag:x%x, fcf_index: "
- "x%x\n",
+ "2782 No suitable FCF found: "
+ "(x%x/x%x)\n",
phba->fcoe_eventtag_at_fcf_scan,
bf_get(lpfc_fcf_record_fcf_index,
new_fcf_record));
+ spin_lock_irq(&phba->hbalock);
+ if (phba->hba_flag & HBA_DEVLOSS_TMO) {
+ phba->hba_flag &= ~FCF_TS_INPROG;
+ spin_unlock_irq(&phba->hbalock);
+ /* Unregister in-use FCF and rescan */
+ lpfc_printf_log(phba, KERN_INFO,
+ LOG_FIP,
+ "2864 On devloss tmo "
+ "unreg in-use FCF and "
+ "rescan FCF table\n");
+ lpfc_unregister_fcf_rescan(phba);
+ return;
+ }
/*
- * Let next new FCF event trigger fast
- * failover
+ * Let next new FCF event trigger fast failover
*/
- spin_lock_irq(&phba->hbalock);
- phba->hba_flag &= ~FCF_DISC_INPROGRESS;
+ phba->hba_flag &= ~FCF_TS_INPROG;
spin_unlock_irq(&phba->hbalock);
return;
}
@@ -2015,9 +2197,8 @@ read_next_fcf:
/* Replace in-use record with the new record */
lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
- "2842 Replace the current in-use "
- "FCF record (x%x) with failover FCF "
- "record (x%x)\n",
+ "2842 Replace in-use FCF (x%x) "
+ "with failover FCF (x%x)\n",
phba->fcf.current_rec.fcf_indx,
phba->fcf.failover_rec.fcf_indx);
memcpy(&phba->fcf.current_rec,
@@ -2029,15 +2210,8 @@ read_next_fcf:
* FCF failover.
*/
spin_lock_irq(&phba->hbalock);
- phba->fcf.fcf_flag &=
- ~(FCF_REDISC_FOV | FCF_REDISC_RRU);
+ phba->fcf.fcf_flag &= ~FCF_REDISC_FOV;
spin_unlock_irq(&phba->hbalock);
- /*
- * Set up the initial registered FCF index for FLOGI
- * round robin FCF failover.
- */
- phba->fcf.fcf_rr_init_indx =
- phba->fcf.failover_rec.fcf_indx;
/* Register to the new FCF record */
lpfc_register_fcf(phba);
} else {
@@ -2069,28 +2243,6 @@ read_next_fcf:
LPFC_FCOE_FCF_GET_FIRST);
return;
}
-
- /*
- * Otherwise, initial scan or post linkdown rescan,
- * register with the best FCF record found so far
- * through the FCF scanning process.
- */
-
- /*
- * Mark the initial FCF discovery completed and
- * the start of the first round of the roundrobin
- * FCF failover.
- */
- spin_lock_irq(&phba->hbalock);
- phba->fcf.fcf_flag &=
- ~(FCF_INIT_DISC | FCF_REDISC_RRU);
- spin_unlock_irq(&phba->hbalock);
- /*
- * Set up the initial registered FCF index for FLOGI
- * round robin FCF failover
- */
- phba->fcf.fcf_rr_init_indx =
- phba->fcf.current_rec.fcf_indx;
/* Register to the new FCF record */
lpfc_register_fcf(phba);
}
@@ -2106,11 +2258,11 @@ out:
}
/**
- * lpfc_mbx_cmpl_fcf_rr_read_fcf_rec - fcf round robin read_fcf mbox cmpl hdler
+ * lpfc_mbx_cmpl_fcf_rr_read_fcf_rec - fcf roundrobin read_fcf mbox cmpl hdler
* @phba: pointer to lpfc hba data structure.
* @mboxq: pointer to mailbox object.
*
- * This is the callback function for FLOGI failure round robin FCF failover
+ * This is the callback function for FLOGI failure roundrobin FCF failover
* read FCF record mailbox command from the eligible FCF record bmask for
* performing the failover. If the FCF read back is not valid/available, it
* fails through to retrying FLOGI to the currently registered FCF again.
@@ -2125,17 +2277,18 @@ lpfc_mbx_cmpl_fcf_rr_read_fcf_rec(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
{
struct fcf_record *new_fcf_record;
uint32_t boot_flag, addr_mode;
- uint16_t next_fcf_index;
+ uint16_t next_fcf_index, fcf_index;
uint16_t current_fcf_index;
uint16_t vlan_id;
+ int rc;
- /* If link state is not up, stop the round robin failover process */
+ /* If link state is not up, stop the roundrobin failover process */
if (phba->link_state < LPFC_LINK_UP) {
spin_lock_irq(&phba->hbalock);
phba->fcf.fcf_flag &= ~FCF_DISCOVERY;
+ phba->hba_flag &= ~FCF_RR_INPROG;
spin_unlock_irq(&phba->hbalock);
- lpfc_sli4_mbox_cmd_free(phba, mboxq);
- return;
+ goto out;
}
/* Parse the FCF record from the non-embedded mailbox command */
@@ -2145,23 +2298,47 @@ lpfc_mbx_cmpl_fcf_rr_read_fcf_rec(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
lpfc_printf_log(phba, KERN_WARNING, LOG_FIP,
"2766 Mailbox command READ_FCF_RECORD "
"failed to retrieve a FCF record.\n");
- goto out;
+ goto error_out;
}
/* Get the needed parameters from FCF record */
- lpfc_match_fcf_conn_list(phba, new_fcf_record, &boot_flag,
- &addr_mode, &vlan_id);
+ rc = lpfc_match_fcf_conn_list(phba, new_fcf_record, &boot_flag,
+ &addr_mode, &vlan_id);
/* Log the FCF record information if turned on */
lpfc_sli4_log_fcf_record_info(phba, new_fcf_record, vlan_id,
next_fcf_index);
+ fcf_index = bf_get(lpfc_fcf_record_fcf_index, new_fcf_record);
+ if (!rc) {
+ lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
+ "2848 Remove ineligible FCF (x%x) from "
+ "from roundrobin bmask\n", fcf_index);
+ /* Clear roundrobin bmask bit for ineligible FCF */
+ lpfc_sli4_fcf_rr_index_clear(phba, fcf_index);
+ /* Perform next round of roundrobin FCF failover */
+ fcf_index = lpfc_sli4_fcf_rr_next_index_get(phba);
+ rc = lpfc_sli4_fcf_rr_next_proc(phba->pport, fcf_index);
+ if (rc)
+ goto out;
+ goto error_out;
+ }
+
+ if (fcf_index == phba->fcf.current_rec.fcf_indx) {
+ lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
+ "2760 Perform FLOGI roundrobin FCF failover: "
+ "FCF (x%x) back to FCF (x%x)\n",
+ phba->fcf.current_rec.fcf_indx, fcf_index);
+ /* Wait 500 ms before retrying FLOGI to current FCF */
+ msleep(500);
+ lpfc_initial_flogi(phba->pport);
+ goto out;
+ }
+
/* Upload new FCF record to the failover FCF record */
lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
- "2834 Update the current FCF record (x%x) "
- "with the next FCF record (x%x)\n",
- phba->fcf.failover_rec.fcf_indx,
- bf_get(lpfc_fcf_record_fcf_index, new_fcf_record));
+ "2834 Update current FCF (x%x) with new FCF (x%x)\n",
+ phba->fcf.failover_rec.fcf_indx, fcf_index);
spin_lock_irq(&phba->hbalock);
__lpfc_update_fcf_record(phba, &phba->fcf.failover_rec,
new_fcf_record, addr_mode, vlan_id,
@@ -2178,14 +2355,13 @@ lpfc_mbx_cmpl_fcf_rr_read_fcf_rec(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
sizeof(struct lpfc_fcf_rec));
lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
- "2783 FLOGI round robin FCF failover from FCF "
- "(x%x) to FCF (x%x).\n",
- current_fcf_index,
- bf_get(lpfc_fcf_record_fcf_index, new_fcf_record));
+ "2783 Perform FLOGI roundrobin FCF failover: FCF "
+ "(x%x) to FCF (x%x)\n", current_fcf_index, fcf_index);
+error_out:
+ lpfc_register_fcf(phba);
out:
lpfc_sli4_mbox_cmd_free(phba, mboxq);
- lpfc_register_fcf(phba);
}
/**
@@ -2194,10 +2370,10 @@ out:
* @mboxq: pointer to mailbox object.
*
* This is the callback function of read FCF record mailbox command for
- * updating the eligible FCF bmask for FLOGI failure round robin FCF
+ * updating the eligible FCF bmask for FLOGI failure roundrobin FCF
* failover when a new FCF event happened. If the FCF read back is
* valid/available and it passes the connection list check, it updates
- * the bmask for the eligible FCF record for round robin failover.
+ * the bmask for the eligible FCF record for roundrobin failover.
*/
void
lpfc_mbx_cmpl_read_fcf_rec(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
@@ -2639,7 +2815,7 @@ lpfc_mbx_process_link_up(struct lpfc_hba *phba, READ_LA_VAR *la)
* and get the FCF Table.
*/
spin_lock_irq(&phba->hbalock);
- if (phba->hba_flag & FCF_DISC_INPROGRESS) {
+ if (phba->hba_flag & FCF_TS_INPROG) {
spin_unlock_irq(&phba->hbalock);
return;
}
@@ -3906,6 +4082,11 @@ lpfc_unreg_all_rpis(struct lpfc_vport *vport)
LPFC_MBOXQ_t *mbox;
int rc;
+ if (phba->sli_rev == LPFC_SLI_REV4) {
+ lpfc_sli4_unreg_all_rpis(vport);
+ return;
+ }
+
mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
if (mbox) {
lpfc_unreg_login(phba, vport->vpi, 0xffff, mbox);
@@ -3992,6 +4173,16 @@ lpfc_cleanup_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
}
spin_lock_irq(&phba->hbalock);
+ /* Cleanup REG_LOGIN completions which are not yet processed */
+ list_for_each_entry(mb, &phba->sli.mboxq_cmpl, list) {
+ if ((mb->u.mb.mbxCommand != MBX_REG_LOGIN64) ||
+ (ndlp != (struct lpfc_nodelist *) mb->context2))
+ continue;
+
+ mb->context2 = NULL;
+ mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
+ }
+
list_for_each_entry_safe(mb, nextmb, &phba->sli.mboxq, list) {
if ((mb->u.mb.mbxCommand == MBX_REG_LOGIN64) &&
(ndlp == (struct lpfc_nodelist *) mb->context2)) {
@@ -5170,6 +5361,8 @@ lpfc_unregister_fcf_prep(struct lpfc_hba *phba)
if (ndlp)
lpfc_cancel_retry_delay_tmo(vports[i], ndlp);
lpfc_cleanup_pending_mbox(vports[i]);
+ if (phba->sli_rev == LPFC_SLI_REV4)
+ lpfc_sli4_unreg_all_rpis(vports[i]);
lpfc_mbx_unreg_vpi(vports[i]);
shost = lpfc_shost_from_vport(vports[i]);
spin_lock_irq(shost->host_lock);
diff --git a/drivers/scsi/lpfc/lpfc_hw.h b/drivers/scsi/lpfc/lpfc_hw.h
index a631647051d9..9b8333456465 100644
--- a/drivers/scsi/lpfc/lpfc_hw.h
+++ b/drivers/scsi/lpfc/lpfc_hw.h
@@ -861,6 +861,47 @@ typedef struct _RPS_RSP { /* Structure is in Big Endian format */
uint32_t crcCnt;
} RPS_RSP;
+struct RLS { /* Structure is in Big Endian format */
+ uint32_t rls;
+#define rls_rsvd_SHIFT 24
+#define rls_rsvd_MASK 0x000000ff
+#define rls_rsvd_WORD rls
+#define rls_did_SHIFT 0
+#define rls_did_MASK 0x00ffffff
+#define rls_did_WORD rls
+};
+
+struct RLS_RSP { /* Structure is in Big Endian format */
+ uint32_t linkFailureCnt;
+ uint32_t lossSyncCnt;
+ uint32_t lossSignalCnt;
+ uint32_t primSeqErrCnt;
+ uint32_t invalidXmitWord;
+ uint32_t crcCnt;
+};
+
+struct RTV_RSP { /* Structure is in Big Endian format */
+ uint32_t ratov;
+ uint32_t edtov;
+ uint32_t qtov;
+#define qtov_rsvd0_SHIFT 28
+#define qtov_rsvd0_MASK 0x0000000f
+#define qtov_rsvd0_WORD qtov /* reserved */
+#define qtov_edtovres_SHIFT 27
+#define qtov_edtovres_MASK 0x00000001
+#define qtov_edtovres_WORD qtov /* E_D_TOV Resolution */
+#define qtov__rsvd1_SHIFT 19
+#define qtov_rsvd1_MASK 0x0000003f
+#define qtov_rsvd1_WORD qtov /* reserved */
+#define qtov_rttov_SHIFT 18
+#define qtov_rttov_MASK 0x00000001
+#define qtov_rttov_WORD qtov /* R_T_TOV value */
+#define qtov_rsvd2_SHIFT 0
+#define qtov_rsvd2_MASK 0x0003ffff
+#define qtov_rsvd2_WORD qtov /* reserved */
+};
+
+
typedef struct _RPL { /* Structure is in Big Endian format */
uint32_t maxsize;
uint32_t index;
diff --git a/drivers/scsi/lpfc/lpfc_hw4.h b/drivers/scsi/lpfc/lpfc_hw4.h
index bbdcf96800f6..6e4bc34e1d0d 100644
--- a/drivers/scsi/lpfc/lpfc_hw4.h
+++ b/drivers/scsi/lpfc/lpfc_hw4.h
@@ -424,79 +424,6 @@ struct lpfc_rcqe {
#define FCOE_SOFn3 0x36
};
-struct lpfc_wqe_generic{
- struct ulp_bde64 bde;
- uint32_t word3;
- uint32_t word4;
- uint32_t word5;
- uint32_t word6;
-#define lpfc_wqe_gen_context_SHIFT 16
-#define lpfc_wqe_gen_context_MASK 0x0000FFFF
-#define lpfc_wqe_gen_context_WORD word6
-#define lpfc_wqe_gen_xri_SHIFT 0
-#define lpfc_wqe_gen_xri_MASK 0x0000FFFF
-#define lpfc_wqe_gen_xri_WORD word6
- uint32_t word7;
-#define lpfc_wqe_gen_lnk_SHIFT 23
-#define lpfc_wqe_gen_lnk_MASK 0x00000001
-#define lpfc_wqe_gen_lnk_WORD word7
-#define lpfc_wqe_gen_erp_SHIFT 22
-#define lpfc_wqe_gen_erp_MASK 0x00000001
-#define lpfc_wqe_gen_erp_WORD word7
-#define lpfc_wqe_gen_pu_SHIFT 20
-#define lpfc_wqe_gen_pu_MASK 0x00000003
-#define lpfc_wqe_gen_pu_WORD word7
-#define lpfc_wqe_gen_class_SHIFT 16
-#define lpfc_wqe_gen_class_MASK 0x00000007
-#define lpfc_wqe_gen_class_WORD word7
-#define lpfc_wqe_gen_command_SHIFT 8
-#define lpfc_wqe_gen_command_MASK 0x000000FF
-#define lpfc_wqe_gen_command_WORD word7
-#define lpfc_wqe_gen_status_SHIFT 4
-#define lpfc_wqe_gen_status_MASK 0x0000000F
-#define lpfc_wqe_gen_status_WORD word7
-#define lpfc_wqe_gen_ct_SHIFT 2
-#define lpfc_wqe_gen_ct_MASK 0x00000003
-#define lpfc_wqe_gen_ct_WORD word7
- uint32_t abort_tag;
- uint32_t word9;
-#define lpfc_wqe_gen_request_tag_SHIFT 0
-#define lpfc_wqe_gen_request_tag_MASK 0x0000FFFF
-#define lpfc_wqe_gen_request_tag_WORD word9
- uint32_t word10;
-#define lpfc_wqe_gen_ccp_SHIFT 24
-#define lpfc_wqe_gen_ccp_MASK 0x000000FF
-#define lpfc_wqe_gen_ccp_WORD word10
-#define lpfc_wqe_gen_ccpe_SHIFT 23
-#define lpfc_wqe_gen_ccpe_MASK 0x00000001
-#define lpfc_wqe_gen_ccpe_WORD word10
-#define lpfc_wqe_gen_pv_SHIFT 19
-#define lpfc_wqe_gen_pv_MASK 0x00000001
-#define lpfc_wqe_gen_pv_WORD word10
-#define lpfc_wqe_gen_pri_SHIFT 16
-#define lpfc_wqe_gen_pri_MASK 0x00000007
-#define lpfc_wqe_gen_pri_WORD word10
- uint32_t word11;
-#define lpfc_wqe_gen_cq_id_SHIFT 16
-#define lpfc_wqe_gen_cq_id_MASK 0x0000FFFF
-#define lpfc_wqe_gen_cq_id_WORD word11
-#define LPFC_WQE_CQ_ID_DEFAULT 0xffff
-#define lpfc_wqe_gen_wqec_SHIFT 7
-#define lpfc_wqe_gen_wqec_MASK 0x00000001
-#define lpfc_wqe_gen_wqec_WORD word11
-#define ELS_ID_FLOGI 3
-#define ELS_ID_FDISC 2
-#define ELS_ID_LOGO 1
-#define ELS_ID_DEFAULT 0
-#define lpfc_wqe_gen_els_id_SHIFT 4
-#define lpfc_wqe_gen_els_id_MASK 0x00000003
-#define lpfc_wqe_gen_els_id_WORD word11
-#define lpfc_wqe_gen_cmd_type_SHIFT 0
-#define lpfc_wqe_gen_cmd_type_MASK 0x0000000F
-#define lpfc_wqe_gen_cmd_type_WORD word11
- uint32_t payload[4];
-};
-
struct lpfc_rqe {
uint32_t address_hi;
uint32_t address_lo;
@@ -2279,9 +2206,36 @@ struct wqe_common {
#define wqe_reqtag_MASK 0x0000FFFF
#define wqe_reqtag_WORD word9
#define wqe_rcvoxid_SHIFT 16
-#define wqe_rcvoxid_MASK 0x0000FFFF
-#define wqe_rcvoxid_WORD word9
+#define wqe_rcvoxid_MASK 0x0000FFFF
+#define wqe_rcvoxid_WORD word9
uint32_t word10;
+#define wqe_ebde_cnt_SHIFT 0
+#define wqe_ebde_cnt_MASK 0x00000007
+#define wqe_ebde_cnt_WORD word10
+#define wqe_lenloc_SHIFT 7
+#define wqe_lenloc_MASK 0x00000003
+#define wqe_lenloc_WORD word10
+#define LPFC_WQE_LENLOC_NONE 0
+#define LPFC_WQE_LENLOC_WORD3 1
+#define LPFC_WQE_LENLOC_WORD12 2
+#define LPFC_WQE_LENLOC_WORD4 3
+#define wqe_qosd_SHIFT 9
+#define wqe_qosd_MASK 0x00000001
+#define wqe_qosd_WORD word10
+#define wqe_xbl_SHIFT 11
+#define wqe_xbl_MASK 0x00000001
+#define wqe_xbl_WORD word10
+#define wqe_iod_SHIFT 13
+#define wqe_iod_MASK 0x00000001
+#define wqe_iod_WORD word10
+#define LPFC_WQE_IOD_WRITE 0
+#define LPFC_WQE_IOD_READ 1
+#define wqe_dbde_SHIFT 14
+#define wqe_dbde_MASK 0x00000001
+#define wqe_dbde_WORD word10
+#define wqe_wqes_SHIFT 15
+#define wqe_wqes_MASK 0x00000001
+#define wqe_wqes_WORD word10
#define wqe_pri_SHIFT 16
#define wqe_pri_MASK 0x00000007
#define wqe_pri_WORD word10
@@ -2295,18 +2249,26 @@ struct wqe_common {
#define wqe_ccpe_MASK 0x00000001
#define wqe_ccpe_WORD word10
#define wqe_ccp_SHIFT 24
-#define wqe_ccp_MASK 0x000000ff
-#define wqe_ccp_WORD word10
+#define wqe_ccp_MASK 0x000000ff
+#define wqe_ccp_WORD word10
uint32_t word11;
-#define wqe_cmd_type_SHIFT 0
-#define wqe_cmd_type_MASK 0x0000000f
-#define wqe_cmd_type_WORD word11
-#define wqe_wqec_SHIFT 7
-#define wqe_wqec_MASK 0x00000001
-#define wqe_wqec_WORD word11
-#define wqe_cqid_SHIFT 16
-#define wqe_cqid_MASK 0x0000ffff
-#define wqe_cqid_WORD word11
+#define wqe_cmd_type_SHIFT 0
+#define wqe_cmd_type_MASK 0x0000000f
+#define wqe_cmd_type_WORD word11
+#define wqe_els_id_SHIFT 4
+#define wqe_els_id_MASK 0x00000003
+#define wqe_els_id_WORD word11
+#define LPFC_ELS_ID_FLOGI 3
+#define LPFC_ELS_ID_FDISC 2
+#define LPFC_ELS_ID_LOGO 1
+#define LPFC_ELS_ID_DEFAULT 0
+#define wqe_wqec_SHIFT 7
+#define wqe_wqec_MASK 0x00000001
+#define wqe_wqec_WORD word11
+#define wqe_cqid_SHIFT 16
+#define wqe_cqid_MASK 0x0000ffff
+#define wqe_cqid_WORD word11
+#define LPFC_WQE_CQ_ID_DEFAULT 0xffff
};
struct wqe_did {
@@ -2325,6 +2287,15 @@ struct wqe_did {
#define wqe_xmit_bls_xo_WORD word5
};
+struct lpfc_wqe_generic{
+ struct ulp_bde64 bde;
+ uint32_t word3;
+ uint32_t word4;
+ uint32_t word5;
+ struct wqe_common wqe_com;
+ uint32_t payload[4];
+};
+
struct els_request64_wqe {
struct ulp_bde64 bde;
uint32_t payload_len;
@@ -2356,9 +2327,9 @@ struct els_request64_wqe {
struct xmit_els_rsp64_wqe {
struct ulp_bde64 bde;
- uint32_t rsvd3;
+ uint32_t response_payload_len;
uint32_t rsvd4;
- struct wqe_did wqe_dest;
+ struct wqe_did wqe_dest;
struct wqe_common wqe_com; /* words 6-11 */
uint32_t rsvd_12_15[4];
};
@@ -2427,7 +2398,7 @@ struct wqe_rctl_dfctl {
struct xmit_seq64_wqe {
struct ulp_bde64 bde;
- uint32_t paylaod_offset;
+ uint32_t rsvd3;
uint32_t relative_offset;
struct wqe_rctl_dfctl wge_ctl;
struct wqe_common wqe_com; /* words 6-11 */
@@ -2437,7 +2408,7 @@ struct xmit_seq64_wqe {
};
struct xmit_bcast64_wqe {
struct ulp_bde64 bde;
- uint32_t paylaod_len;
+ uint32_t seq_payload_len;
uint32_t rsvd4;
struct wqe_rctl_dfctl wge_ctl; /* word 5 */
struct wqe_common wqe_com; /* words 6-11 */
@@ -2446,8 +2417,8 @@ struct xmit_bcast64_wqe {
struct gen_req64_wqe {
struct ulp_bde64 bde;
- uint32_t command_len;
- uint32_t payload_len;
+ uint32_t request_payload_len;
+ uint32_t relative_offset;
struct wqe_rctl_dfctl wge_ctl; /* word 5 */
struct wqe_common wqe_com; /* words 6-11 */
uint32_t rsvd_12_15[4];
@@ -2480,7 +2451,7 @@ struct abort_cmd_wqe {
struct fcp_iwrite64_wqe {
struct ulp_bde64 bde;
- uint32_t payload_len;
+ uint32_t payload_offset_len;
uint32_t total_xfer_len;
uint32_t initial_xfer_len;
struct wqe_common wqe_com; /* words 6-11 */
@@ -2489,7 +2460,7 @@ struct fcp_iwrite64_wqe {
struct fcp_iread64_wqe {
struct ulp_bde64 bde;
- uint32_t payload_len; /* word 3 */
+ uint32_t payload_offset_len; /* word 3 */
uint32_t total_xfer_len; /* word 4 */
uint32_t rsrvd5; /* word 5 */
struct wqe_common wqe_com; /* words 6-11 */
@@ -2497,10 +2468,12 @@ struct fcp_iread64_wqe {
};
struct fcp_icmnd64_wqe {
- struct ulp_bde64 bde; /* words 0-2 */
- uint32_t rsrvd[3]; /* words 3-5 */
+ struct ulp_bde64 bde; /* words 0-2 */
+ uint32_t rsrvd3; /* word 3 */
+ uint32_t rsrvd4; /* word 4 */
+ uint32_t rsrvd5; /* word 5 */
struct wqe_common wqe_com; /* words 6-11 */
- uint32_t rsvd_12_15[4]; /* word 12-15 */
+ uint32_t rsvd_12_15[4]; /* word 12-15 */
};
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index 295c7ddb36c1..b3065791f303 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -813,6 +813,7 @@ lpfc_hba_down_post_s3(struct lpfc_hba *phba)
return 0;
}
+
/**
* lpfc_hba_down_post_s4 - Perform lpfc uninitialization after HBA reset
* @phba: pointer to lpfc HBA data structure.
@@ -2234,10 +2235,9 @@ lpfc_stop_vport_timers(struct lpfc_vport *vport)
void
__lpfc_sli4_stop_fcf_redisc_wait_timer(struct lpfc_hba *phba)
{
- /* Clear pending FCF rediscovery wait and failover in progress flags */
- phba->fcf.fcf_flag &= ~(FCF_REDISC_PEND |
- FCF_DEAD_DISC |
- FCF_ACVL_DISC);
+ /* Clear pending FCF rediscovery wait flag */
+ phba->fcf.fcf_flag &= ~FCF_REDISC_PEND;
+
/* Now, try to stop the timer */
del_timer(&phba->fcf.redisc_wait);
}
@@ -2261,6 +2261,8 @@ lpfc_sli4_stop_fcf_redisc_wait_timer(struct lpfc_hba *phba)
return;
}
__lpfc_sli4_stop_fcf_redisc_wait_timer(phba);
+ /* Clear failover in progress flags */
+ phba->fcf.fcf_flag &= ~(FCF_DEAD_DISC | FCF_ACVL_DISC);
spin_unlock_irq(&phba->hbalock);
}
@@ -2935,8 +2937,7 @@ lpfc_sli4_fcf_redisc_wait_tmo(unsigned long ptr)
phba->fcf.fcf_flag |= FCF_REDISC_EVT;
spin_unlock_irq(&phba->hbalock);
lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
- "2776 FCF rediscover wait timer expired, post "
- "a worker thread event for FCF table scan\n");
+ "2776 FCF rediscover quiescent timer expired\n");
/* wake up worker thread */
lpfc_worker_wake_up(phba);
}
@@ -3311,35 +3312,34 @@ lpfc_sli4_async_fcoe_evt(struct lpfc_hba *phba,
if (event_type == LPFC_FCOE_EVENT_TYPE_NEW_FCF)
lpfc_printf_log(phba, KERN_ERR, LOG_FIP |
LOG_DISCOVERY,
- "2546 New FCF found event: "
- "evt_tag:x%x, fcf_index:x%x\n",
+ "2546 New FCF event, evt_tag:x%x, "
+ "index:x%x\n",
acqe_fcoe->event_tag,
acqe_fcoe->index);
else
lpfc_printf_log(phba, KERN_WARNING, LOG_FIP |
LOG_DISCOVERY,
- "2788 FCF parameter modified event: "
- "evt_tag:x%x, fcf_index:x%x\n",
+ "2788 FCF param modified event, "
+ "evt_tag:x%x, index:x%x\n",
acqe_fcoe->event_tag,
acqe_fcoe->index);
if (phba->fcf.fcf_flag & FCF_DISCOVERY) {
/*
* During period of FCF discovery, read the FCF
* table record indexed by the event to update
- * FCF round robin failover eligible FCF bmask.
+ * FCF roundrobin failover eligible FCF bmask.
*/
lpfc_printf_log(phba, KERN_INFO, LOG_FIP |
LOG_DISCOVERY,
- "2779 Read new FCF record with "
- "fcf_index:x%x for updating FCF "
- "round robin failover bmask\n",
+ "2779 Read FCF (x%x) for updating "
+ "roundrobin FCF failover bmask\n",
acqe_fcoe->index);
rc = lpfc_sli4_read_fcf_rec(phba, acqe_fcoe->index);
}
/* If the FCF discovery is in progress, do nothing. */
spin_lock_irq(&phba->hbalock);
- if (phba->hba_flag & FCF_DISC_INPROGRESS) {
+ if (phba->hba_flag & FCF_TS_INPROG) {
spin_unlock_irq(&phba->hbalock);
break;
}
@@ -3358,15 +3358,15 @@ lpfc_sli4_async_fcoe_evt(struct lpfc_hba *phba,
/* Otherwise, scan the entire FCF table and re-discover SAN */
lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY,
- "2770 Start FCF table scan due to new FCF "
- "event: evt_tag:x%x, fcf_index:x%x\n",
+ "2770 Start FCF table scan per async FCF "
+ "event, evt_tag:x%x, index:x%x\n",
acqe_fcoe->event_tag, acqe_fcoe->index);
rc = lpfc_sli4_fcf_scan_read_fcf_rec(phba,
LPFC_FCOE_FCF_GET_FIRST);
if (rc)
lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY,
"2547 Issue FCF scan read FCF mailbox "
- "command failed 0x%x\n", rc);
+ "command failed (x%x)\n", rc);
break;
case LPFC_FCOE_EVENT_TYPE_FCF_TABLE_FULL:
@@ -3378,9 +3378,8 @@ lpfc_sli4_async_fcoe_evt(struct lpfc_hba *phba,
case LPFC_FCOE_EVENT_TYPE_FCF_DEAD:
lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY,
- "2549 FCF disconnected from network index 0x%x"
- " tag 0x%x\n", acqe_fcoe->index,
- acqe_fcoe->event_tag);
+ "2549 FCF (x%x) disconnected from network, "
+ "tag:x%x\n", acqe_fcoe->index, acqe_fcoe->event_tag);
/*
* If we are in the middle of FCF failover process, clear
* the corresponding FCF bit in the roundrobin bitmap.
@@ -3494,9 +3493,8 @@ lpfc_sli4_async_fcoe_evt(struct lpfc_hba *phba,
spin_unlock_irq(&phba->hbalock);
lpfc_printf_log(phba, KERN_INFO, LOG_FIP |
LOG_DISCOVERY,
- "2773 Start FCF fast failover due "
- "to CVL event: evt_tag:x%x\n",
- acqe_fcoe->event_tag);
+ "2773 Start FCF failover per CVL, "
+ "evt_tag:x%x\n", acqe_fcoe->event_tag);
rc = lpfc_sli4_redisc_fcf_table(phba);
if (rc) {
lpfc_printf_log(phba, KERN_ERR, LOG_FIP |
@@ -3646,8 +3644,7 @@ void lpfc_sli4_fcf_redisc_event_proc(struct lpfc_hba *phba)
/* Scan FCF table from the first entry to re-discover SAN */
lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY,
- "2777 Start FCF table scan after FCF "
- "rediscovery quiescent period over\n");
+ "2777 Start post-quiescent FCF table scan\n");
rc = lpfc_sli4_fcf_scan_read_fcf_rec(phba, LPFC_FCOE_FCF_GET_FIRST);
if (rc)
lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY,
@@ -4165,7 +4162,7 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
goto out_free_active_sgl;
}
- /* Allocate eligible FCF bmask memory for FCF round robin failover */
+ /* Allocate eligible FCF bmask memory for FCF roundrobin failover */
longs = (LPFC_SLI4_FCF_TBL_INDX_MAX + BITS_PER_LONG - 1)/BITS_PER_LONG;
phba->fcf.fcf_rr_bmask = kzalloc(longs * sizeof(unsigned long),
GFP_KERNEL);
@@ -7271,6 +7268,51 @@ lpfc_sli4_unset_hba(struct lpfc_hba *phba)
}
/**
+ * lpfc_sli4_xri_exchange_busy_wait - Wait for device XRI exchange busy
+ * @phba: Pointer to HBA context object.
+ *
+ * This function is called in the SLI4 code path to wait for completion
+ * of device's XRIs exchange busy. It will check the XRI exchange busy
+ * on outstanding FCP and ELS I/Os every 10ms for up to 10 seconds; after
+ * that, it will check the XRI exchange busy on outstanding FCP and ELS
+ * I/Os every 30 seconds, log error message, and wait forever. Only when
+ * all XRI exchange busy complete, the driver unload shall proceed with
+ * invoking the function reset ioctl mailbox command to the CNA and the
+ * the rest of the driver unload resource release.
+ **/
+static void
+lpfc_sli4_xri_exchange_busy_wait(struct lpfc_hba *phba)
+{
+ int wait_time = 0;
+ int fcp_xri_cmpl = list_empty(&phba->sli4_hba.lpfc_abts_scsi_buf_list);
+ int els_xri_cmpl = list_empty(&phba->sli4_hba.lpfc_abts_els_sgl_list);
+
+ while (!fcp_xri_cmpl || !els_xri_cmpl) {
+ if (wait_time > LPFC_XRI_EXCH_BUSY_WAIT_TMO) {
+ if (!fcp_xri_cmpl)
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "2877 FCP XRI exchange busy "
+ "wait time: %d seconds.\n",
+ wait_time/1000);
+ if (!els_xri_cmpl)
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "2878 ELS XRI exchange busy "
+ "wait time: %d seconds.\n",
+ wait_time/1000);
+ msleep(LPFC_XRI_EXCH_BUSY_WAIT_T2);
+ wait_time += LPFC_XRI_EXCH_BUSY_WAIT_T2;
+ } else {
+ msleep(LPFC_XRI_EXCH_BUSY_WAIT_T1);
+ wait_time += LPFC_XRI_EXCH_BUSY_WAIT_T1;
+ }
+ fcp_xri_cmpl =
+ list_empty(&phba->sli4_hba.lpfc_abts_scsi_buf_list);
+ els_xri_cmpl =
+ list_empty(&phba->sli4_hba.lpfc_abts_els_sgl_list);
+ }
+}
+
+/**
* lpfc_sli4_hba_unset - Unset the fcoe hba
* @phba: Pointer to HBA context object.
*
@@ -7315,6 +7357,12 @@ lpfc_sli4_hba_unset(struct lpfc_hba *phba)
spin_unlock_irq(&phba->hbalock);
}
+ /* Abort all iocbs associated with the hba */
+ lpfc_sli_hba_iocb_abort(phba);
+
+ /* Wait for completion of device XRI exchange busy */
+ lpfc_sli4_xri_exchange_busy_wait(phba);
+
/* Disable PCI subsystem interrupt */
lpfc_sli4_disable_intr(phba);
diff --git a/drivers/scsi/lpfc/lpfc_mbox.c b/drivers/scsi/lpfc/lpfc_mbox.c
index 0dfa310cd609..62d0957e1d4c 100644
--- a/drivers/scsi/lpfc/lpfc_mbox.c
+++ b/drivers/scsi/lpfc/lpfc_mbox.c
@@ -797,6 +797,34 @@ lpfc_unreg_login(struct lpfc_hba *phba, uint16_t vpi, uint32_t rpi,
}
/**
+ * lpfc_sli4_unreg_all_rpis - unregister all RPIs for a vport on SLI4 HBA.
+ * @vport: pointer to a vport object.
+ *
+ * This routine sends mailbox command to unregister all active RPIs for
+ * a vport.
+ **/
+void
+lpfc_sli4_unreg_all_rpis(struct lpfc_vport *vport)
+{
+ struct lpfc_hba *phba = vport->phba;
+ LPFC_MBOXQ_t *mbox;
+ int rc;
+
+ mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+ if (mbox) {
+ lpfc_unreg_login(phba, vport->vpi,
+ vport->vpi + phba->vpi_base, mbox);
+ mbox->u.mb.un.varUnregLogin.rsvd1 = 0x4000 ;
+ mbox->vport = vport;
+ mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
+ mbox->context1 = NULL;
+ rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
+ if (rc == MBX_NOT_FINISHED)
+ mempool_free(mbox, phba->mbox_mem_pool);
+ }
+}
+
+/**
* lpfc_reg_vpi - Prepare a mailbox command for registering vport identifier
* @phba: pointer to lpfc hba data structure.
* @vpi: virtual N_Port identifier.
diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
index 3a658953486c..581837b3c71a 100644
--- a/drivers/scsi/lpfc/lpfc_scsi.c
+++ b/drivers/scsi/lpfc/lpfc_scsi.c
@@ -169,6 +169,7 @@ lpfc_update_stats(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
spin_lock_irqsave(shost->host_lock, flags);
if (!vport->stat_data_enabled ||
vport->stat_data_blocked ||
+ !pnode ||
!pnode->lat_data ||
(phba->bucket_type == LPFC_NO_BUCKET)) {
spin_unlock_irqrestore(shost->host_lock, flags);
@@ -2040,6 +2041,9 @@ lpfc_send_scsi_error_event(struct lpfc_hba *phba, struct lpfc_vport *vport,
struct lpfc_nodelist *pnode = lpfc_cmd->rdata->pnode;
unsigned long flags;
+ if (!pnode || !NLP_CHK_NODE_ACT(pnode))
+ return;
+
/* If there is queuefull or busy condition send a scsi event */
if ((cmnd->result == SAM_STAT_TASK_SET_FULL) ||
(cmnd->result == SAM_STAT_BUSY)) {
@@ -2895,7 +2899,7 @@ void lpfc_poll_timeout(unsigned long ptr)
* SCSI_MLQUEUE_HOST_BUSY - Block all devices served by this host temporarily.
**/
static int
-lpfc_queuecommand(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *))
+lpfc_queuecommand_lck(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *))
{
struct Scsi_Host *shost = cmnd->device->host;
struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
@@ -3056,6 +3060,8 @@ lpfc_queuecommand(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *))
return 0;
}
+static DEF_SCSI_QCMD(lpfc_queuecommand)
+
/**
* lpfc_abort_handler - scsi_host_template eh_abort_handler entry point
* @cmnd: Pointer to scsi_cmnd data structure.
@@ -3226,10 +3232,11 @@ lpfc_send_taskmgmt(struct lpfc_vport *vport, struct lpfc_rport_data *rdata,
struct lpfc_scsi_buf *lpfc_cmd;
struct lpfc_iocbq *iocbq;
struct lpfc_iocbq *iocbqrsp;
+ struct lpfc_nodelist *pnode = rdata->pnode;
int ret;
int status;
- if (!rdata->pnode || !NLP_CHK_NODE_ACT(rdata->pnode))
+ if (!pnode || !NLP_CHK_NODE_ACT(pnode))
return FAILED;
lpfc_cmd = lpfc_get_scsi_buf(phba);
@@ -3256,7 +3263,7 @@ lpfc_send_taskmgmt(struct lpfc_vport *vport, struct lpfc_rport_data *rdata,
"0702 Issue %s to TGT %d LUN %d "
"rpi x%x nlp_flag x%x\n",
lpfc_taskmgmt_name(task_mgmt_cmd), tgt_id, lun_id,
- rdata->pnode->nlp_rpi, rdata->pnode->nlp_flag);
+ pnode->nlp_rpi, pnode->nlp_flag);
status = lpfc_sli_issue_iocb_wait(phba, LPFC_FCP_RING,
iocbq, iocbqrsp, lpfc_cmd->timeout);
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
index 0d1e187b005d..554efa6623f4 100644
--- a/drivers/scsi/lpfc/lpfc_sli.c
+++ b/drivers/scsi/lpfc/lpfc_sli.c
@@ -95,7 +95,7 @@ lpfc_sli4_wq_put(struct lpfc_queue *q, union lpfc_wqe *wqe)
return -ENOMEM;
/* set consumption flag every once in a while */
if (!((q->host_index + 1) % LPFC_RELEASE_NOTIFICATION_INTERVAL))
- bf_set(lpfc_wqe_gen_wqec, &wqe->generic, 1);
+ bf_set(wqe_wqec, &wqe->generic.wqe_com, 1);
lpfc_sli_pcimem_bcopy(wqe, temp_wqe, q->entry_size);
@@ -1735,6 +1735,7 @@ lpfc_sli_def_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
struct lpfc_vport *vport = pmb->vport;
struct lpfc_dmabuf *mp;
struct lpfc_nodelist *ndlp;
+ struct Scsi_Host *shost;
uint16_t rpi, vpi;
int rc;
@@ -1746,7 +1747,8 @@ lpfc_sli_def_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
}
if ((pmb->u.mb.mbxCommand == MBX_UNREG_LOGIN) &&
- (phba->sli_rev == LPFC_SLI_REV4))
+ (phba->sli_rev == LPFC_SLI_REV4) &&
+ (pmb->u.mb.un.varUnregLogin.rsvd1 == 0x0))
lpfc_sli4_free_rpi(phba, pmb->u.mb.un.varUnregLogin.rpi);
/*
@@ -1765,16 +1767,14 @@ lpfc_sli_def_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
return;
}
- /* Unreg VPI, if the REG_VPI succeed after VLink failure */
if ((pmb->u.mb.mbxCommand == MBX_REG_VPI) &&
!(phba->pport->load_flag & FC_UNLOADING) &&
!pmb->u.mb.mbxStatus) {
- lpfc_unreg_vpi(phba, pmb->u.mb.un.varRegVpi.vpi, pmb);
- pmb->vport = vport;
- pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
- rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
- if (rc != MBX_NOT_FINISHED)
- return;
+ shost = lpfc_shost_from_vport(vport);
+ spin_lock_irq(shost->host_lock);
+ vport->vpi_state |= LPFC_VPI_REGISTERED;
+ vport->fc_flag &= ~FC_VPORT_NEEDS_REG_VPI;
+ spin_unlock_irq(shost->host_lock);
}
if (pmb->u.mb.mbxCommand == MBX_REG_LOGIN64) {
@@ -5921,7 +5921,7 @@ lpfc_sli4_bpl2sgl(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq,
* lpfc_sli4_scmd_to_wqidx_distr - scsi command to SLI4 WQ index distribution
* @phba: Pointer to HBA context object.
*
- * This routine performs a round robin SCSI command to SLI4 FCP WQ index
+ * This routine performs a roundrobin SCSI command to SLI4 FCP WQ index
* distribution. This is called by __lpfc_sli_issue_iocb_s4() with the hbalock
* held.
*
@@ -5965,7 +5965,7 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
uint16_t abrt_iotag;
struct lpfc_iocbq *abrtiocbq;
struct ulp_bde64 *bpl = NULL;
- uint32_t els_id = ELS_ID_DEFAULT;
+ uint32_t els_id = LPFC_ELS_ID_DEFAULT;
int numBdes, i;
struct ulp_bde64 bde;
@@ -5982,7 +5982,7 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
memcpy(wqe, &iocbq->iocb, sizeof(union lpfc_wqe));
abort_tag = (uint32_t) iocbq->iotag;
xritag = iocbq->sli4_xritag;
- wqe->words[7] = 0; /* The ct field has moved so reset */
+ wqe->generic.wqe_com.word7 = 0; /* The ct field has moved so reset */
/* words0-2 bpl convert bde */
if (iocbq->iocb.un.genreq64.bdl.bdeFlags == BUFF_TYPE_BLP_64) {
numBdes = iocbq->iocb.un.genreq64.bdl.bdeSize /
@@ -6033,109 +6033,117 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
* contains the FCFI and remote N_Port_ID is
* in word 5.
*/
-
ct = ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l);
- bf_set(lpfc_wqe_gen_context, &wqe->generic,
- iocbq->iocb.ulpContext);
-
- bf_set(lpfc_wqe_gen_ct, &wqe->generic, ct);
- bf_set(lpfc_wqe_gen_pu, &wqe->generic, 0);
+ bf_set(wqe_ctxt_tag, &wqe->els_req.wqe_com,
+ iocbq->iocb.ulpContext);
+ bf_set(wqe_ct, &wqe->els_req.wqe_com, ct);
+ bf_set(wqe_pu, &wqe->els_req.wqe_com, 0);
/* CCP CCPE PV PRI in word10 were set in the memcpy */
-
if (command_type == ELS_COMMAND_FIP) {
els_id = ((iocbq->iocb_flag & LPFC_FIP_ELS_ID_MASK)
>> LPFC_FIP_ELS_ID_SHIFT);
}
- bf_set(lpfc_wqe_gen_els_id, &wqe->generic, els_id);
-
+ bf_set(wqe_els_id, &wqe->els_req.wqe_com, els_id);
+ bf_set(wqe_dbde, &wqe->els_req.wqe_com, 1);
+ bf_set(wqe_iod, &wqe->els_req.wqe_com, LPFC_WQE_IOD_READ);
+ bf_set(wqe_qosd, &wqe->els_req.wqe_com, 1);
+ bf_set(wqe_lenloc, &wqe->els_req.wqe_com, LPFC_WQE_LENLOC_NONE);
+ bf_set(wqe_ebde_cnt, &wqe->els_req.wqe_com, 0);
break;
case CMD_XMIT_SEQUENCE64_CX:
- bf_set(lpfc_wqe_gen_context, &wqe->generic,
- iocbq->iocb.un.ulpWord[3]);
- wqe->generic.word3 = 0;
- bf_set(wqe_rcvoxid, &wqe->generic, iocbq->iocb.ulpContext);
+ bf_set(wqe_ctxt_tag, &wqe->xmit_sequence.wqe_com,
+ iocbq->iocb.un.ulpWord[3]);
+ bf_set(wqe_rcvoxid, &wqe->xmit_sequence.wqe_com,
+ iocbq->iocb.ulpContext);
/* The entire sequence is transmitted for this IOCB */
xmit_len = total_len;
cmnd = CMD_XMIT_SEQUENCE64_CR;
case CMD_XMIT_SEQUENCE64_CR:
- /* word3 iocb=io_tag32 wqe=payload_offset */
- /* payload offset used for multilpe outstanding
- * sequences on the same exchange
- */
- wqe->words[3] = 0;
+ /* word3 iocb=io_tag32 wqe=reserved */
+ wqe->xmit_sequence.rsvd3 = 0;
/* word4 relative_offset memcpy */
/* word5 r_ctl/df_ctl memcpy */
- bf_set(lpfc_wqe_gen_pu, &wqe->generic, 0);
+ bf_set(wqe_pu, &wqe->xmit_sequence.wqe_com, 0);
+ bf_set(wqe_dbde, &wqe->xmit_sequence.wqe_com, 1);
+ bf_set(wqe_iod, &wqe->xmit_sequence.wqe_com,
+ LPFC_WQE_IOD_WRITE);
+ bf_set(wqe_lenloc, &wqe->xmit_sequence.wqe_com,
+ LPFC_WQE_LENLOC_WORD12);
+ bf_set(wqe_ebde_cnt, &wqe->xmit_sequence.wqe_com, 0);
wqe->xmit_sequence.xmit_len = xmit_len;
command_type = OTHER_COMMAND;
break;
case CMD_XMIT_BCAST64_CN:
- /* word3 iocb=iotag32 wqe=payload_len */
- wqe->words[3] = 0; /* no definition for this in wqe */
+ /* word3 iocb=iotag32 wqe=seq_payload_len */
+ wqe->xmit_bcast64.seq_payload_len = xmit_len;
/* word4 iocb=rsvd wqe=rsvd */
/* word5 iocb=rctl/type/df_ctl wqe=rctl/type/df_ctl memcpy */
/* word6 iocb=ctxt_tag/io_tag wqe=ctxt_tag/xri */
- bf_set(lpfc_wqe_gen_ct, &wqe->generic,
+ bf_set(wqe_ct, &wqe->xmit_bcast64.wqe_com,
((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l));
+ bf_set(wqe_dbde, &wqe->xmit_bcast64.wqe_com, 1);
+ bf_set(wqe_iod, &wqe->xmit_bcast64.wqe_com, LPFC_WQE_IOD_WRITE);
+ bf_set(wqe_lenloc, &wqe->xmit_bcast64.wqe_com,
+ LPFC_WQE_LENLOC_WORD3);
+ bf_set(wqe_ebde_cnt, &wqe->xmit_bcast64.wqe_com, 0);
break;
case CMD_FCP_IWRITE64_CR:
command_type = FCP_COMMAND_DATA_OUT;
- /* The struct for wqe fcp_iwrite has 3 fields that are somewhat
- * confusing.
- * word3 is payload_len: byte offset to the sgl entry for the
- * fcp_command.
- * word4 is total xfer len, same as the IOCB->ulpParameter.
- * word5 is initial xfer len 0 = wait for xfer-ready
- */
-
- /* Always wait for xfer-ready before sending data */
- wqe->fcp_iwrite.initial_xfer_len = 0;
- /* word 4 (xfer length) should have been set on the memcpy */
-
- /* allow write to fall through to read */
+ /* word3 iocb=iotag wqe=payload_offset_len */
+ /* Add the FCP_CMD and FCP_RSP sizes to get the offset */
+ wqe->fcp_iwrite.payload_offset_len =
+ xmit_len + sizeof(struct fcp_rsp);
+ /* word4 iocb=parameter wqe=total_xfer_length memcpy */
+ /* word5 iocb=initial_xfer_len wqe=initial_xfer_len memcpy */
+ bf_set(wqe_erp, &wqe->fcp_iwrite.wqe_com,
+ iocbq->iocb.ulpFCP2Rcvy);
+ bf_set(wqe_lnk, &wqe->fcp_iwrite.wqe_com, iocbq->iocb.ulpXS);
+ /* Always open the exchange */
+ bf_set(wqe_xc, &wqe->fcp_iwrite.wqe_com, 0);
+ bf_set(wqe_dbde, &wqe->fcp_iwrite.wqe_com, 1);
+ bf_set(wqe_iod, &wqe->fcp_iwrite.wqe_com, LPFC_WQE_IOD_WRITE);
+ bf_set(wqe_lenloc, &wqe->fcp_iwrite.wqe_com,
+ LPFC_WQE_LENLOC_WORD4);
+ bf_set(wqe_ebde_cnt, &wqe->fcp_iwrite.wqe_com, 0);
+ bf_set(wqe_pu, &wqe->fcp_iwrite.wqe_com, iocbq->iocb.ulpPU);
+ break;
case CMD_FCP_IREAD64_CR:
- /* FCP_CMD is always the 1st sgl entry */
- wqe->fcp_iread.payload_len =
+ /* word3 iocb=iotag wqe=payload_offset_len */
+ /* Add the FCP_CMD and FCP_RSP sizes to get the offset */
+ wqe->fcp_iread.payload_offset_len =
xmit_len + sizeof(struct fcp_rsp);
-
- /* word 4 (xfer length) should have been set on the memcpy */
-
- bf_set(lpfc_wqe_gen_erp, &wqe->generic,
- iocbq->iocb.ulpFCP2Rcvy);
- bf_set(lpfc_wqe_gen_lnk, &wqe->generic, iocbq->iocb.ulpXS);
- /* The XC bit and the XS bit are similar. The driver never
- * tracked whether or not the exchange was previouslly open.
- * XC = Exchange create, 0 is create. 1 is already open.
- * XS = link cmd: 1 do not close the exchange after command.
- * XS = 0 close exchange when command completes.
- * The only time we would not set the XC bit is when the XS bit
- * is set and we are sending our 2nd or greater command on
- * this exchange.
- */
+ /* word4 iocb=parameter wqe=total_xfer_length memcpy */
+ /* word5 iocb=initial_xfer_len wqe=initial_xfer_len memcpy */
+ bf_set(wqe_erp, &wqe->fcp_iread.wqe_com,
+ iocbq->iocb.ulpFCP2Rcvy);
+ bf_set(wqe_lnk, &wqe->fcp_iread.wqe_com, iocbq->iocb.ulpXS);
/* Always open the exchange */
bf_set(wqe_xc, &wqe->fcp_iread.wqe_com, 0);
-
- wqe->words[10] &= 0xffff0000; /* zero out ebde count */
- bf_set(lpfc_wqe_gen_pu, &wqe->generic, iocbq->iocb.ulpPU);
- break;
+ bf_set(wqe_dbde, &wqe->fcp_iread.wqe_com, 1);
+ bf_set(wqe_iod, &wqe->fcp_iread.wqe_com, LPFC_WQE_IOD_READ);
+ bf_set(wqe_lenloc, &wqe->fcp_iread.wqe_com,
+ LPFC_WQE_LENLOC_WORD4);
+ bf_set(wqe_ebde_cnt, &wqe->fcp_iread.wqe_com, 0);
+ bf_set(wqe_pu, &wqe->fcp_iread.wqe_com, iocbq->iocb.ulpPU);
+ break;
case CMD_FCP_ICMND64_CR:
+ /* word3 iocb=IO_TAG wqe=reserved */
+ wqe->fcp_icmd.rsrvd3 = 0;
+ bf_set(wqe_pu, &wqe->fcp_icmd.wqe_com, 0);
/* Always open the exchange */
- bf_set(wqe_xc, &wqe->fcp_iread.wqe_com, 0);
-
- wqe->words[4] = 0;
- wqe->words[10] &= 0xffff0000; /* zero out ebde count */
- bf_set(lpfc_wqe_gen_pu, &wqe->generic, 0);
+ bf_set(wqe_xc, &wqe->fcp_icmd.wqe_com, 0);
+ bf_set(wqe_dbde, &wqe->fcp_icmd.wqe_com, 1);
+ bf_set(wqe_iod, &wqe->fcp_icmd.wqe_com, LPFC_WQE_IOD_WRITE);
+ bf_set(wqe_qosd, &wqe->fcp_icmd.wqe_com, 1);
+ bf_set(wqe_lenloc, &wqe->fcp_icmd.wqe_com,
+ LPFC_WQE_LENLOC_NONE);
+ bf_set(wqe_ebde_cnt, &wqe->fcp_icmd.wqe_com, 0);
break;
case CMD_GEN_REQUEST64_CR:
- /* word3 command length is described as byte offset to the
- * rsp_data. Would always be 16, sizeof(struct sli4_sge)
- * sgl[0] = cmnd
- * sgl[1] = rsp.
- *
- */
- wqe->gen_req.command_len = xmit_len;
- /* Word4 parameter copied in the memcpy */
- /* Word5 [rctl, type, df_ctl, la] copied in memcpy */
+ /* word3 iocb=IO_TAG wqe=request_payload_len */
+ wqe->gen_req.request_payload_len = xmit_len;
+ /* word4 iocb=parameter wqe=relative_offset memcpy */
+ /* word5 [rctl, type, df_ctl, la] copied in memcpy */
/* word6 context tag copied in memcpy */
if (iocbq->iocb.ulpCt_h || iocbq->iocb.ulpCt_l) {
ct = ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l);
@@ -6144,31 +6152,39 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
ct, iocbq->iocb.ulpCommand);
return IOCB_ERROR;
}
- bf_set(lpfc_wqe_gen_ct, &wqe->generic, 0);
- bf_set(wqe_tmo, &wqe->gen_req.wqe_com,
- iocbq->iocb.ulpTimeout);
-
- bf_set(lpfc_wqe_gen_pu, &wqe->generic, iocbq->iocb.ulpPU);
+ bf_set(wqe_ct, &wqe->gen_req.wqe_com, 0);
+ bf_set(wqe_tmo, &wqe->gen_req.wqe_com, iocbq->iocb.ulpTimeout);
+ bf_set(wqe_pu, &wqe->gen_req.wqe_com, iocbq->iocb.ulpPU);
+ bf_set(wqe_dbde, &wqe->gen_req.wqe_com, 1);
+ bf_set(wqe_iod, &wqe->gen_req.wqe_com, LPFC_WQE_IOD_READ);
+ bf_set(wqe_qosd, &wqe->gen_req.wqe_com, 1);
+ bf_set(wqe_lenloc, &wqe->gen_req.wqe_com, LPFC_WQE_LENLOC_NONE);
+ bf_set(wqe_ebde_cnt, &wqe->gen_req.wqe_com, 0);
command_type = OTHER_COMMAND;
break;
case CMD_XMIT_ELS_RSP64_CX:
/* words0-2 BDE memcpy */
- /* word3 iocb=iotag32 wqe=rsvd */
- wqe->words[3] = 0;
+ /* word3 iocb=iotag32 wqe=response_payload_len */
+ wqe->xmit_els_rsp.response_payload_len = xmit_len;
/* word4 iocb=did wge=rsvd. */
- wqe->words[4] = 0;
+ wqe->xmit_els_rsp.rsvd4 = 0;
/* word5 iocb=rsvd wge=did */
bf_set(wqe_els_did, &wqe->xmit_els_rsp.wqe_dest,
iocbq->iocb.un.elsreq64.remoteID);
-
- bf_set(lpfc_wqe_gen_ct, &wqe->generic,
- ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l));
-
- bf_set(lpfc_wqe_gen_pu, &wqe->generic, iocbq->iocb.ulpPU);
- bf_set(wqe_rcvoxid, &wqe->generic, iocbq->iocb.ulpContext);
+ bf_set(wqe_ct, &wqe->xmit_els_rsp.wqe_com,
+ ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l));
+ bf_set(wqe_pu, &wqe->xmit_els_rsp.wqe_com, iocbq->iocb.ulpPU);
+ bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com,
+ iocbq->iocb.ulpContext);
if (!iocbq->iocb.ulpCt_h && iocbq->iocb.ulpCt_l)
- bf_set(lpfc_wqe_gen_context, &wqe->generic,
+ bf_set(wqe_ctxt_tag, &wqe->xmit_els_rsp.wqe_com,
iocbq->vport->vpi + phba->vpi_base);
+ bf_set(wqe_dbde, &wqe->xmit_els_rsp.wqe_com, 1);
+ bf_set(wqe_iod, &wqe->xmit_els_rsp.wqe_com, LPFC_WQE_IOD_WRITE);
+ bf_set(wqe_qosd, &wqe->xmit_els_rsp.wqe_com, 1);
+ bf_set(wqe_lenloc, &wqe->xmit_els_rsp.wqe_com,
+ LPFC_WQE_LENLOC_WORD3);
+ bf_set(wqe_ebde_cnt, &wqe->xmit_els_rsp.wqe_com, 0);
command_type = OTHER_COMMAND;
break;
case CMD_CLOSE_XRI_CN:
@@ -6193,15 +6209,19 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
else
bf_set(abort_cmd_ia, &wqe->abort_cmd, 0);
bf_set(abort_cmd_criteria, &wqe->abort_cmd, T_XRI_TAG);
- wqe->words[5] = 0;
- bf_set(lpfc_wqe_gen_ct, &wqe->generic,
+ /* word5 iocb=CONTEXT_TAG|IO_TAG wqe=reserved */
+ wqe->abort_cmd.rsrvd5 = 0;
+ bf_set(wqe_ct, &wqe->abort_cmd.wqe_com,
((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l));
abort_tag = iocbq->iocb.un.acxri.abortIoTag;
/*
* The abort handler will send us CMD_ABORT_XRI_CN or
* CMD_CLOSE_XRI_CN and the fw only accepts CMD_ABORT_XRI_CX
*/
- bf_set(lpfc_wqe_gen_command, &wqe->generic, CMD_ABORT_XRI_CX);
+ bf_set(wqe_cmnd, &wqe->abort_cmd.wqe_com, CMD_ABORT_XRI_CX);
+ bf_set(wqe_qosd, &wqe->abort_cmd.wqe_com, 1);
+ bf_set(wqe_lenloc, &wqe->abort_cmd.wqe_com,
+ LPFC_WQE_LENLOC_NONE);
cmnd = CMD_ABORT_XRI_CX;
command_type = OTHER_COMMAND;
xritag = 0;
@@ -6235,18 +6255,14 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
bf_set(wqe_xmit_bls_pt, &wqe->xmit_bls_rsp.wqe_dest, 0x1);
bf_set(wqe_ctxt_tag, &wqe->xmit_bls_rsp.wqe_com,
iocbq->iocb.ulpContext);
+ bf_set(wqe_qosd, &wqe->xmit_bls_rsp.wqe_com, 1);
+ bf_set(wqe_lenloc, &wqe->xmit_bls_rsp.wqe_com,
+ LPFC_WQE_LENLOC_NONE);
/* Overwrite the pre-set comnd type with OTHER_COMMAND */
command_type = OTHER_COMMAND;
break;
case CMD_XRI_ABORTED_CX:
case CMD_CREATE_XRI_CR: /* Do we expect to use this? */
- /* words0-2 are all 0's no bde */
- /* word3 and word4 are rsvrd */
- wqe->words[3] = 0;
- wqe->words[4] = 0;
- /* word5 iocb=rsvd wge=did */
- /* There is no remote port id in the IOCB? */
- /* Let this fall through and fail */
case CMD_IOCB_FCP_IBIDIR64_CR: /* bidirectional xfer */
case CMD_FCP_TSEND64_CX: /* Target mode send xfer-ready */
case CMD_FCP_TRSP64_CX: /* Target mode rcv */
@@ -6257,16 +6273,14 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
iocbq->iocb.ulpCommand);
return IOCB_ERROR;
break;
-
}
- bf_set(lpfc_wqe_gen_xri, &wqe->generic, xritag);
- bf_set(lpfc_wqe_gen_request_tag, &wqe->generic, iocbq->iotag);
- wqe->generic.abort_tag = abort_tag;
- bf_set(lpfc_wqe_gen_cmd_type, &wqe->generic, command_type);
- bf_set(lpfc_wqe_gen_command, &wqe->generic, cmnd);
- bf_set(lpfc_wqe_gen_class, &wqe->generic, iocbq->iocb.ulpClass);
- bf_set(lpfc_wqe_gen_cq_id, &wqe->generic, LPFC_WQE_CQ_ID_DEFAULT);
-
+ bf_set(wqe_xri_tag, &wqe->generic.wqe_com, xritag);
+ bf_set(wqe_reqtag, &wqe->generic.wqe_com, iocbq->iotag);
+ wqe->generic.wqe_com.abort_tag = abort_tag;
+ bf_set(wqe_cmd_type, &wqe->generic.wqe_com, command_type);
+ bf_set(wqe_cmnd, &wqe->generic.wqe_com, cmnd);
+ bf_set(wqe_class, &wqe->generic.wqe_com, iocbq->iocb.ulpClass);
+ bf_set(wqe_cqid, &wqe->generic.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
return 0;
}
@@ -7257,25 +7271,26 @@ lpfc_ignore_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
}
/**
- * lpfc_sli_issue_abort_iotag - Abort function for a command iocb
+ * lpfc_sli_abort_iotag_issue - Issue abort for a command iocb
* @phba: Pointer to HBA context object.
* @pring: Pointer to driver SLI ring object.
* @cmdiocb: Pointer to driver command iocb object.
*
- * This function issues an abort iocb for the provided command
- * iocb. This function is called with hbalock held.
- * The function returns 0 when it fails due to memory allocation
- * failure or when the command iocb is an abort request.
+ * This function issues an abort iocb for the provided command iocb down to
+ * the port. Other than the case the outstanding command iocb is an abort
+ * request, this function issues abort out unconditionally. This function is
+ * called with hbalock held. The function returns 0 when it fails due to
+ * memory allocation failure or when the command iocb is an abort request.
**/
-int
-lpfc_sli_issue_abort_iotag(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
+static int
+lpfc_sli_abort_iotag_issue(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
struct lpfc_iocbq *cmdiocb)
{
struct lpfc_vport *vport = cmdiocb->vport;
struct lpfc_iocbq *abtsiocbp;
IOCB_t *icmd = NULL;
IOCB_t *iabt = NULL;
- int retval = IOCB_ERROR;
+ int retval;
/*
* There are certain command types we don't want to abort. And we
@@ -7288,18 +7303,6 @@ lpfc_sli_issue_abort_iotag(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
(cmdiocb->iocb_flag & LPFC_DRIVER_ABORTED) != 0)
return 0;
- /* If we're unloading, don't abort iocb on the ELS ring, but change the
- * callback so that nothing happens when it finishes.
- */
- if ((vport->load_flag & FC_UNLOADING) &&
- (pring->ringno == LPFC_ELS_RING)) {
- if (cmdiocb->iocb_flag & LPFC_IO_FABRIC)
- cmdiocb->fabric_iocb_cmpl = lpfc_ignore_els_cmpl;
- else
- cmdiocb->iocb_cmpl = lpfc_ignore_els_cmpl;
- goto abort_iotag_exit;
- }
-
/* issue ABTS for this IOCB based on iotag */
abtsiocbp = __lpfc_sli_get_iocbq(phba);
if (abtsiocbp == NULL)
@@ -7344,6 +7347,63 @@ lpfc_sli_issue_abort_iotag(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
if (retval)
__lpfc_sli_release_iocbq(phba, abtsiocbp);
+
+ /*
+ * Caller to this routine should check for IOCB_ERROR
+ * and handle it properly. This routine no longer removes
+ * iocb off txcmplq and call compl in case of IOCB_ERROR.
+ */
+ return retval;
+}
+
+/**
+ * lpfc_sli_issue_abort_iotag - Abort function for a command iocb
+ * @phba: Pointer to HBA context object.
+ * @pring: Pointer to driver SLI ring object.
+ * @cmdiocb: Pointer to driver command iocb object.
+ *
+ * This function issues an abort iocb for the provided command iocb. In case
+ * of unloading, the abort iocb will not be issued to commands on the ELS
+ * ring. Instead, the callback function shall be changed to those commands
+ * so that nothing happens when them finishes. This function is called with
+ * hbalock held. The function returns 0 when the command iocb is an abort
+ * request.
+ **/
+int
+lpfc_sli_issue_abort_iotag(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
+ struct lpfc_iocbq *cmdiocb)
+{
+ struct lpfc_vport *vport = cmdiocb->vport;
+ int retval = IOCB_ERROR;
+ IOCB_t *icmd = NULL;
+
+ /*
+ * There are certain command types we don't want to abort. And we
+ * don't want to abort commands that are already in the process of
+ * being aborted.
+ */
+ icmd = &cmdiocb->iocb;
+ if (icmd->ulpCommand == CMD_ABORT_XRI_CN ||
+ icmd->ulpCommand == CMD_CLOSE_XRI_CN ||
+ (cmdiocb->iocb_flag & LPFC_DRIVER_ABORTED) != 0)
+ return 0;
+
+ /*
+ * If we're unloading, don't abort iocb on the ELS ring, but change
+ * the callback so that nothing happens when it finishes.
+ */
+ if ((vport->load_flag & FC_UNLOADING) &&
+ (pring->ringno == LPFC_ELS_RING)) {
+ if (cmdiocb->iocb_flag & LPFC_IO_FABRIC)
+ cmdiocb->fabric_iocb_cmpl = lpfc_ignore_els_cmpl;
+ else
+ cmdiocb->iocb_cmpl = lpfc_ignore_els_cmpl;
+ goto abort_iotag_exit;
+ }
+
+ /* Now, we try to issue the abort to the cmdiocb out */
+ retval = lpfc_sli_abort_iotag_issue(phba, pring, cmdiocb);
+
abort_iotag_exit:
/*
* Caller to this routine should check for IOCB_ERROR
@@ -7354,6 +7414,62 @@ abort_iotag_exit:
}
/**
+ * lpfc_sli_iocb_ring_abort - Unconditionally abort all iocbs on an iocb ring
+ * @phba: Pointer to HBA context object.
+ * @pring: Pointer to driver SLI ring object.
+ *
+ * This function aborts all iocbs in the given ring and frees all the iocb
+ * objects in txq. This function issues abort iocbs unconditionally for all
+ * the iocb commands in txcmplq. The iocbs in the txcmplq is not guaranteed
+ * to complete before the return of this function. The caller is not required
+ * to hold any locks.
+ **/
+static void
+lpfc_sli_iocb_ring_abort(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
+{
+ LIST_HEAD(completions);
+ struct lpfc_iocbq *iocb, *next_iocb;
+
+ if (pring->ringno == LPFC_ELS_RING)
+ lpfc_fabric_abort_hba(phba);
+
+ spin_lock_irq(&phba->hbalock);
+
+ /* Take off all the iocbs on txq for cancelling */
+ list_splice_init(&pring->txq, &completions);
+ pring->txq_cnt = 0;
+
+ /* Next issue ABTS for everything on the txcmplq */
+ list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list)
+ lpfc_sli_abort_iotag_issue(phba, pring, iocb);
+
+ spin_unlock_irq(&phba->hbalock);
+
+ /* Cancel all the IOCBs from the completions list */
+ lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
+ IOERR_SLI_ABORTED);
+}
+
+/**
+ * lpfc_sli_hba_iocb_abort - Abort all iocbs to an hba.
+ * @phba: pointer to lpfc HBA data structure.
+ *
+ * This routine will abort all pending and outstanding iocbs to an HBA.
+ **/
+void
+lpfc_sli_hba_iocb_abort(struct lpfc_hba *phba)
+{
+ struct lpfc_sli *psli = &phba->sli;
+ struct lpfc_sli_ring *pring;
+ int i;
+
+ for (i = 0; i < psli->num_rings; i++) {
+ pring = &psli->ring[i];
+ lpfc_sli_iocb_ring_abort(phba, pring);
+ }
+}
+
+/**
* lpfc_sli_validate_fcp_iocb - find commands associated with a vport or LUN
* @iocbq: Pointer to driver iocb object.
* @vport: Pointer to driver virtual port object.
@@ -12242,13 +12358,15 @@ lpfc_sli4_fcf_scan_read_fcf_rec(struct lpfc_hba *phba, uint16_t fcf_index)
/* Issue the mailbox command asynchronously */
mboxq->vport = phba->pport;
mboxq->mbox_cmpl = lpfc_mbx_cmpl_fcf_scan_read_fcf_rec;
+
+ spin_lock_irq(&phba->hbalock);
+ phba->hba_flag |= FCF_TS_INPROG;
+ spin_unlock_irq(&phba->hbalock);
+
rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
if (rc == MBX_NOT_FINISHED)
error = -EIO;
else {
- spin_lock_irq(&phba->hbalock);
- phba->hba_flag |= FCF_DISC_INPROGRESS;
- spin_unlock_irq(&phba->hbalock);
/* Reset eligible FCF count for new scan */
if (fcf_index == LPFC_FCOE_FCF_GET_FIRST)
phba->fcf.eligible_fcf_cnt = 0;
@@ -12258,21 +12376,21 @@ fail_fcf_scan:
if (error) {
if (mboxq)
lpfc_sli4_mbox_cmd_free(phba, mboxq);
- /* FCF scan failed, clear FCF_DISC_INPROGRESS flag */
+ /* FCF scan failed, clear FCF_TS_INPROG flag */
spin_lock_irq(&phba->hbalock);
- phba->hba_flag &= ~FCF_DISC_INPROGRESS;
+ phba->hba_flag &= ~FCF_TS_INPROG;
spin_unlock_irq(&phba->hbalock);
}
return error;
}
/**
- * lpfc_sli4_fcf_rr_read_fcf_rec - Read hba fcf record for round robin fcf.
+ * lpfc_sli4_fcf_rr_read_fcf_rec - Read hba fcf record for roundrobin fcf.
* @phba: pointer to lpfc hba data structure.
* @fcf_index: FCF table entry offset.
*
* This routine is invoked to read an FCF record indicated by @fcf_index
- * and to use it for FLOGI round robin FCF failover.
+ * and to use it for FLOGI roundrobin FCF failover.
*
* Return 0 if the mailbox command is submitted sucessfully, none 0
* otherwise.
@@ -12318,7 +12436,7 @@ fail_fcf_read:
* @fcf_index: FCF table entry offset.
*
* This routine is invoked to read an FCF record indicated by @fcf_index to
- * determine whether it's eligible for FLOGI round robin failover list.
+ * determine whether it's eligible for FLOGI roundrobin failover list.
*
* Return 0 if the mailbox command is submitted sucessfully, none 0
* otherwise.
@@ -12364,7 +12482,7 @@ fail_fcf_read:
*
* This routine is to get the next eligible FCF record index in a round
* robin fashion. If the next eligible FCF record index equals to the
- * initial round robin FCF record index, LPFC_FCOE_FCF_NEXT_NONE (0xFFFF)
+ * initial roundrobin FCF record index, LPFC_FCOE_FCF_NEXT_NONE (0xFFFF)
* shall be returned, otherwise, the next eligible FCF record's index
* shall be returned.
**/
@@ -12392,28 +12510,10 @@ lpfc_sli4_fcf_rr_next_index_get(struct lpfc_hba *phba)
return LPFC_FCOE_FCF_NEXT_NONE;
}
- /* Check roundrobin failover index bmask stop condition */
- if (next_fcf_index == phba->fcf.fcf_rr_init_indx) {
- if (!(phba->fcf.fcf_flag & FCF_REDISC_RRU)) {
- lpfc_printf_log(phba, KERN_WARNING, LOG_FIP,
- "2847 Round robin failover FCF index "
- "search hit stop condition:x%x\n",
- next_fcf_index);
- return LPFC_FCOE_FCF_NEXT_NONE;
- }
- /* The roundrobin failover index bmask updated, start over */
- lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
- "2848 Round robin failover FCF index bmask "
- "updated, start over\n");
- spin_lock_irq(&phba->hbalock);
- phba->fcf.fcf_flag &= ~FCF_REDISC_RRU;
- spin_unlock_irq(&phba->hbalock);
- return phba->fcf.fcf_rr_init_indx;
- }
-
lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
- "2845 Get next round robin failover "
- "FCF index x%x\n", next_fcf_index);
+ "2845 Get next roundrobin failover FCF (x%x)\n",
+ next_fcf_index);
+
return next_fcf_index;
}
@@ -12422,7 +12522,7 @@ lpfc_sli4_fcf_rr_next_index_get(struct lpfc_hba *phba)
* @phba: pointer to lpfc hba data structure.
*
* This routine sets the FCF record index in to the eligible bmask for
- * round robin failover search. It checks to make sure that the index
+ * roundrobin failover search. It checks to make sure that the index
* does not go beyond the range of the driver allocated bmask dimension
* before setting the bit.
*
@@ -12434,22 +12534,16 @@ lpfc_sli4_fcf_rr_index_set(struct lpfc_hba *phba, uint16_t fcf_index)
{
if (fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) {
lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
- "2610 HBA FCF index reached driver's "
- "book keeping dimension: fcf_index:%d, "
- "driver_bmask_max:%d\n",
+ "2610 FCF (x%x) reached driver's book "
+ "keeping dimension:x%x\n",
fcf_index, LPFC_SLI4_FCF_TBL_INDX_MAX);
return -EINVAL;
}
/* Set the eligible FCF record index bmask */
set_bit(fcf_index, phba->fcf.fcf_rr_bmask);
- /* Set the roundrobin index bmask updated */
- spin_lock_irq(&phba->hbalock);
- phba->fcf.fcf_flag |= FCF_REDISC_RRU;
- spin_unlock_irq(&phba->hbalock);
-
lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
- "2790 Set FCF index x%x to round robin failover "
+ "2790 Set FCF (x%x) to roundrobin FCF failover "
"bmask\n", fcf_index);
return 0;
@@ -12460,7 +12554,7 @@ lpfc_sli4_fcf_rr_index_set(struct lpfc_hba *phba, uint16_t fcf_index)
* @phba: pointer to lpfc hba data structure.
*
* This routine clears the FCF record index from the eligible bmask for
- * round robin failover search. It checks to make sure that the index
+ * roundrobin failover search. It checks to make sure that the index
* does not go beyond the range of the driver allocated bmask dimension
* before clearing the bit.
**/
@@ -12469,9 +12563,8 @@ lpfc_sli4_fcf_rr_index_clear(struct lpfc_hba *phba, uint16_t fcf_index)
{
if (fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) {
lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
- "2762 HBA FCF index goes beyond driver's "
- "book keeping dimension: fcf_index:%d, "
- "driver_bmask_max:%d\n",
+ "2762 FCF (x%x) reached driver's book "
+ "keeping dimension:x%x\n",
fcf_index, LPFC_SLI4_FCF_TBL_INDX_MAX);
return;
}
@@ -12479,7 +12572,7 @@ lpfc_sli4_fcf_rr_index_clear(struct lpfc_hba *phba, uint16_t fcf_index)
clear_bit(fcf_index, phba->fcf.fcf_rr_bmask);
lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
- "2791 Clear FCF index x%x from round robin failover "
+ "2791 Clear FCF (x%x) from roundrobin failover "
"bmask\n", fcf_index);
}
@@ -12530,8 +12623,7 @@ lpfc_mbx_cmpl_redisc_fcf_table(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox)
}
} else {
lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
- "2775 Start FCF rediscovery quiescent period "
- "wait timer before scaning FCF table\n");
+ "2775 Start FCF rediscover quiescent timer\n");
/*
* Start FCF rediscovery wait timer for pending FCF
* before rescan FCF record table.
diff --git a/drivers/scsi/lpfc/lpfc_sli4.h b/drivers/scsi/lpfc/lpfc_sli4.h
index a0ca572ec28b..c4483feb8b71 100644
--- a/drivers/scsi/lpfc/lpfc_sli4.h
+++ b/drivers/scsi/lpfc/lpfc_sli4.h
@@ -19,10 +19,16 @@
*******************************************************************/
#define LPFC_ACTIVE_MBOX_WAIT_CNT 100
+#define LPFC_XRI_EXCH_BUSY_WAIT_TMO 10000
+#define LPFC_XRI_EXCH_BUSY_WAIT_T1 10
+#define LPFC_XRI_EXCH_BUSY_WAIT_T2 30000
#define LPFC_RELEASE_NOTIFICATION_INTERVAL 32
#define LPFC_GET_QE_REL_INT 32
#define LPFC_RPI_LOW_WATER_MARK 10
+#define LPFC_UNREG_FCF 1
+#define LPFC_SKIP_UNREG_FCF 0
+
/* Amount of time in seconds for waiting FCF rediscovery to complete */
#define LPFC_FCF_REDISCOVER_WAIT_TMO 2000 /* msec */
@@ -163,9 +169,8 @@ struct lpfc_fcf {
#define FCF_REDISC_PEND 0x80 /* FCF rediscovery pending */
#define FCF_REDISC_EVT 0x100 /* FCF rediscovery event to worker thread */
#define FCF_REDISC_FOV 0x200 /* Post FCF rediscovery fast failover */
-#define FCF_REDISC_RRU 0x400 /* Roundrobin bitmap updated */
+#define FCF_REDISC_PROG (FCF_REDISC_PEND | FCF_REDISC_EVT)
uint32_t addr_mode;
- uint16_t fcf_rr_init_indx;
uint32_t eligible_fcf_cnt;
struct lpfc_fcf_rec current_rec;
struct lpfc_fcf_rec failover_rec;
diff --git a/drivers/scsi/lpfc/lpfc_version.h b/drivers/scsi/lpfc/lpfc_version.h
index f93120e4c796..7a1b5b112a0b 100644
--- a/drivers/scsi/lpfc/lpfc_version.h
+++ b/drivers/scsi/lpfc/lpfc_version.h
@@ -18,7 +18,7 @@
* included with this package. *
*******************************************************************/
-#define LPFC_DRIVER_VERSION "8.3.17"
+#define LPFC_DRIVER_VERSION "8.3.18"
#define LPFC_DRIVER_NAME "lpfc"
#define LPFC_SP_DRIVER_HANDLER_NAME "lpfc:sp"
#define LPFC_FP_DRIVER_HANDLER_NAME "lpfc:fp"
diff --git a/drivers/scsi/mac53c94.c b/drivers/scsi/mac53c94.c
index 3ddb4dc62d5d..6c42dff0f4d3 100644
--- a/drivers/scsi/mac53c94.c
+++ b/drivers/scsi/mac53c94.c
@@ -66,7 +66,7 @@ static void cmd_done(struct fsc_state *, int result);
static void set_dma_cmds(struct fsc_state *, struct scsi_cmnd *);
-static int mac53c94_queue(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *))
+static int mac53c94_queue_lck(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *))
{
struct fsc_state *state;
@@ -99,6 +99,8 @@ static int mac53c94_queue(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *
return 0;
}
+static DEF_SCSI_QCMD(mac53c94_queue)
+
static int mac53c94_host_reset(struct scsi_cmnd *cmd)
{
struct fsc_state *state = (struct fsc_state *) cmd->device->host->hostdata;
diff --git a/drivers/scsi/megaraid.c b/drivers/scsi/megaraid.c
index 7ceb5cf12c6b..9aa048525eb2 100644
--- a/drivers/scsi/megaraid.c
+++ b/drivers/scsi/megaraid.c
@@ -366,7 +366,7 @@ mega_runpendq(adapter_t *adapter)
* The command queuing entry point for the mid-layer.
*/
static int
-megaraid_queue(Scsi_Cmnd *scmd, void (*done)(Scsi_Cmnd *))
+megaraid_queue_lck(Scsi_Cmnd *scmd, void (*done)(Scsi_Cmnd *))
{
adapter_t *adapter;
scb_t *scb;
@@ -409,6 +409,8 @@ megaraid_queue(Scsi_Cmnd *scmd, void (*done)(Scsi_Cmnd *))
return busy;
}
+static DEF_SCSI_QCMD(megaraid_queue)
+
/**
* mega_allocate_scb()
* @adapter - pointer to our soft state
@@ -4456,7 +4458,7 @@ mega_internal_command(adapter_t *adapter, megacmd_t *mc, mega_passthru *pthru)
scb->idx = CMDID_INT_CMDS;
- megaraid_queue(scmd, mega_internal_done);
+ megaraid_queue_lck(scmd, mega_internal_done);
wait_for_completion(&adapter->int_waitq);
diff --git a/drivers/scsi/megaraid.h b/drivers/scsi/megaraid.h
index 2b4a048cadf1..f5644745e24e 100644
--- a/drivers/scsi/megaraid.h
+++ b/drivers/scsi/megaraid.h
@@ -987,7 +987,7 @@ static int mega_query_adapter(adapter_t *);
static int issue_scb(adapter_t *, scb_t *);
static int mega_setup_mailbox(adapter_t *);
-static int megaraid_queue (Scsi_Cmnd *, void (*)(Scsi_Cmnd *));
+static int megaraid_queue (struct Scsi_Host *, struct scsi_cmnd *);
static scb_t * mega_build_cmd(adapter_t *, Scsi_Cmnd *, int *);
static void __mega_runpendq(adapter_t *);
static int issue_scb_block(adapter_t *, u_char *);
diff --git a/drivers/scsi/megaraid/megaraid_mbox.c b/drivers/scsi/megaraid/megaraid_mbox.c
index a7810a106b37..5708cb27d078 100644
--- a/drivers/scsi/megaraid/megaraid_mbox.c
+++ b/drivers/scsi/megaraid/megaraid_mbox.c
@@ -113,8 +113,7 @@ static int megaraid_mbox_fire_sync_cmd(adapter_t *);
static void megaraid_mbox_display_scb(adapter_t *, scb_t *);
static void megaraid_mbox_setup_device_map(adapter_t *);
-static int megaraid_queue_command(struct scsi_cmnd *,
- void (*)(struct scsi_cmnd *));
+static int megaraid_queue_command(struct Scsi_Host *, struct scsi_cmnd *);
static scb_t *megaraid_mbox_build_cmd(adapter_t *, struct scsi_cmnd *, int *);
static void megaraid_mbox_runpendq(adapter_t *, scb_t *);
static void megaraid_mbox_prepare_pthru(adapter_t *, scb_t *,
@@ -1484,7 +1483,7 @@ mbox_post_cmd(adapter_t *adapter, scb_t *scb)
* Queue entry point for mailbox based controllers.
*/
static int
-megaraid_queue_command(struct scsi_cmnd *scp, void (*done)(struct scsi_cmnd *))
+megaraid_queue_command_lck(struct scsi_cmnd *scp, void (*done)(struct scsi_cmnd *))
{
adapter_t *adapter;
scb_t *scb;
@@ -1513,6 +1512,8 @@ megaraid_queue_command(struct scsi_cmnd *scp, void (*done)(struct scsi_cmnd *))
return if_busy;
}
+static DEF_SCSI_QCMD(megaraid_queue_command)
+
/**
* megaraid_mbox_build_cmd - transform the mid-layer scsi commands
* @adapter : controller's soft state
diff --git a/drivers/scsi/megaraid/megaraid_sas.c b/drivers/scsi/megaraid/megaraid_sas.c
index d3c9cdee292b..7451bc096a01 100644
--- a/drivers/scsi/megaraid/megaraid_sas.c
+++ b/drivers/scsi/megaraid/megaraid_sas.c
@@ -10,7 +10,7 @@
* 2 of the License, or (at your option) any later version.
*
* FILE : megaraid_sas.c
- * Version : v00.00.04.17.1-rc1
+ * Version : v00.00.04.31-rc1
*
* Authors:
* (email-id : megaraidlinux@lsi.com)
@@ -56,6 +56,15 @@ module_param_named(poll_mode_io, poll_mode_io, int, 0);
MODULE_PARM_DESC(poll_mode_io,
"Complete cmds from IO path, (default=0)");
+/*
+ * Number of sectors per IO command
+ * Will be set in megasas_init_mfi if user does not provide
+ */
+static unsigned int max_sectors;
+module_param_named(max_sectors, max_sectors, int, 0);
+MODULE_PARM_DESC(max_sectors,
+ "Maximum number of sectors per IO command");
+
MODULE_LICENSE("GPL");
MODULE_VERSION(MEGASAS_VERSION);
MODULE_AUTHOR("megaraidlinux@lsi.com");
@@ -103,6 +112,7 @@ static int megasas_poll_wait_aen;
static DECLARE_WAIT_QUEUE_HEAD(megasas_poll_wait);
static u32 support_poll_for_event;
static u32 megasas_dbg_lvl;
+static u32 support_device_change;
/* define lock for aen poll */
spinlock_t poll_aen_lock;
@@ -718,6 +728,10 @@ static int
megasas_check_reset_gen2(struct megasas_instance *instance,
struct megasas_register_set __iomem *regs)
{
+ if (instance->adprecovery != MEGASAS_HBA_OPERATIONAL) {
+ return 1;
+ }
+
return 0;
}
@@ -930,6 +944,7 @@ megasas_make_sgl_skinny(struct megasas_instance *instance,
mfi_sgl->sge_skinny[i].length = sg_dma_len(os_sgl);
mfi_sgl->sge_skinny[i].phys_addr =
sg_dma_address(os_sgl);
+ mfi_sgl->sge_skinny[i].flag = 0;
}
}
return sge_count;
@@ -1319,7 +1334,7 @@ megasas_dump_pending_frames(struct megasas_instance *instance)
* @done: Callback entry point
*/
static int
-megasas_queue_command(struct scsi_cmnd *scmd, void (*done) (struct scsi_cmnd *))
+megasas_queue_command_lck(struct scsi_cmnd *scmd, void (*done) (struct scsi_cmnd *))
{
u32 frame_count;
struct megasas_cmd *cmd;
@@ -1402,6 +1417,8 @@ megasas_queue_command(struct scsi_cmnd *scmd, void (*done) (struct scsi_cmnd *))
return 0;
}
+static DEF_SCSI_QCMD(megasas_queue_command)
+
static struct megasas_instance *megasas_lookup_instance(u16 host_no)
{
int i;
@@ -1557,6 +1574,28 @@ static void megasas_complete_cmd_dpc(unsigned long instance_addr)
}
}
+static void
+megasas_internal_reset_defer_cmds(struct megasas_instance *instance);
+
+static void
+process_fw_state_change_wq(struct work_struct *work);
+
+void megasas_do_ocr(struct megasas_instance *instance)
+{
+ if ((instance->pdev->device == PCI_DEVICE_ID_LSI_SAS1064R) ||
+ (instance->pdev->device == PCI_DEVICE_ID_DELL_PERC5) ||
+ (instance->pdev->device == PCI_DEVICE_ID_LSI_VERDE_ZCR)) {
+ *instance->consumer = MEGASAS_ADPRESET_INPROG_SIGN;
+ }
+ instance->instancet->disable_intr(instance->reg_set);
+ instance->adprecovery = MEGASAS_ADPRESET_SM_INFAULT;
+ instance->issuepend_done = 0;
+
+ atomic_set(&instance->fw_outstanding, 0);
+ megasas_internal_reset_defer_cmds(instance);
+ process_fw_state_change_wq(&instance->work_init);
+}
+
/**
* megasas_wait_for_outstanding - Wait for all outstanding cmds
* @instance: Adapter soft state
@@ -1574,6 +1613,8 @@ static int megasas_wait_for_outstanding(struct megasas_instance *instance)
unsigned long flags;
struct list_head clist_local;
struct megasas_cmd *reset_cmd;
+ u32 fw_state;
+ u8 kill_adapter_flag;
spin_lock_irqsave(&instance->hba_lock, flags);
adprecovery = instance->adprecovery;
@@ -1659,7 +1700,45 @@ static int megasas_wait_for_outstanding(struct megasas_instance *instance)
msleep(1000);
}
- if (atomic_read(&instance->fw_outstanding)) {
+ i = 0;
+ kill_adapter_flag = 0;
+ do {
+ fw_state = instance->instancet->read_fw_status_reg(
+ instance->reg_set) & MFI_STATE_MASK;
+ if ((fw_state == MFI_STATE_FAULT) &&
+ (instance->disableOnlineCtrlReset == 0)) {
+ if (i == 3) {
+ kill_adapter_flag = 2;
+ break;
+ }
+ megasas_do_ocr(instance);
+ kill_adapter_flag = 1;
+
+ /* wait for 1 secs to let FW finish the pending cmds */
+ msleep(1000);
+ }
+ i++;
+ } while (i <= 3);
+
+ if (atomic_read(&instance->fw_outstanding) &&
+ !kill_adapter_flag) {
+ if (instance->disableOnlineCtrlReset == 0) {
+
+ megasas_do_ocr(instance);
+
+ /* wait for 5 secs to let FW finish the pending cmds */
+ for (i = 0; i < wait_time; i++) {
+ int outstanding =
+ atomic_read(&instance->fw_outstanding);
+ if (!outstanding)
+ return SUCCESS;
+ msleep(1000);
+ }
+ }
+ }
+
+ if (atomic_read(&instance->fw_outstanding) ||
+ (kill_adapter_flag == 2)) {
printk(KERN_NOTICE "megaraid_sas: pending cmds after reset\n");
/*
* Send signal to FW to stop processing any pending cmds.
@@ -2669,6 +2748,7 @@ static int megasas_create_frame_pool(struct megasas_instance *instance)
return -ENOMEM;
}
+ memset(cmd->frame, 0, total_sz);
cmd->frame->io.context = cmd->index;
cmd->frame->io.pad_0 = 0;
}
@@ -3585,6 +3665,27 @@ static int megasas_io_attach(struct megasas_instance *instance)
instance->max_fw_cmds - MEGASAS_INT_CMDS;
host->this_id = instance->init_id;
host->sg_tablesize = instance->max_num_sge;
+ /*
+ * Check if the module parameter value for max_sectors can be used
+ */
+ if (max_sectors && max_sectors < instance->max_sectors_per_req)
+ instance->max_sectors_per_req = max_sectors;
+ else {
+ if (max_sectors) {
+ if (((instance->pdev->device ==
+ PCI_DEVICE_ID_LSI_SAS1078GEN2) ||
+ (instance->pdev->device ==
+ PCI_DEVICE_ID_LSI_SAS0079GEN2)) &&
+ (max_sectors <= MEGASAS_MAX_SECTORS)) {
+ instance->max_sectors_per_req = max_sectors;
+ } else {
+ printk(KERN_INFO "megasas: max_sectors should be > 0"
+ "and <= %d (or < 1MB for GEN2 controller)\n",
+ instance->max_sectors_per_req);
+ }
+ }
+ }
+
host->max_sectors = instance->max_sectors_per_req;
host->cmd_per_lun = 128;
host->max_channel = MEGASAS_MAX_CHANNELS - 1;
@@ -4658,6 +4759,15 @@ megasas_sysfs_show_support_poll_for_event(struct device_driver *dd, char *buf)
static DRIVER_ATTR(support_poll_for_event, S_IRUGO,
megasas_sysfs_show_support_poll_for_event, NULL);
+ static ssize_t
+megasas_sysfs_show_support_device_change(struct device_driver *dd, char *buf)
+{
+ return sprintf(buf, "%u\n", support_device_change);
+}
+
+static DRIVER_ATTR(support_device_change, S_IRUGO,
+ megasas_sysfs_show_support_device_change, NULL);
+
static ssize_t
megasas_sysfs_show_dbg_lvl(struct device_driver *dd, char *buf)
{
@@ -4978,6 +5088,7 @@ static int __init megasas_init(void)
MEGASAS_EXT_VERSION);
support_poll_for_event = 2;
+ support_device_change = 1;
memset(&megasas_mgmt_info, 0, sizeof(megasas_mgmt_info));
@@ -5026,8 +5137,17 @@ static int __init megasas_init(void)
if (rval)
goto err_dcf_poll_mode_io;
+ rval = driver_create_file(&megasas_pci_driver.driver,
+ &driver_attr_support_device_change);
+ if (rval)
+ goto err_dcf_support_device_change;
+
return rval;
+err_dcf_support_device_change:
+ driver_remove_file(&megasas_pci_driver.driver,
+ &driver_attr_poll_mode_io);
+
err_dcf_poll_mode_io:
driver_remove_file(&megasas_pci_driver.driver,
&driver_attr_dbg_lvl);
@@ -5058,6 +5178,10 @@ static void __exit megasas_exit(void)
driver_remove_file(&megasas_pci_driver.driver,
&driver_attr_dbg_lvl);
driver_remove_file(&megasas_pci_driver.driver,
+ &driver_attr_support_poll_for_event);
+ driver_remove_file(&megasas_pci_driver.driver,
+ &driver_attr_support_device_change);
+ driver_remove_file(&megasas_pci_driver.driver,
&driver_attr_release_date);
driver_remove_file(&megasas_pci_driver.driver, &driver_attr_version);
diff --git a/drivers/scsi/megaraid/megaraid_sas.h b/drivers/scsi/megaraid/megaraid_sas.h
index 16a4f68a34b0..ad16f5e60046 100644
--- a/drivers/scsi/megaraid/megaraid_sas.h
+++ b/drivers/scsi/megaraid/megaraid_sas.h
@@ -18,9 +18,9 @@
/*
* MegaRAID SAS Driver meta data
*/
-#define MEGASAS_VERSION "00.00.04.17.1-rc1"
-#define MEGASAS_RELDATE "Oct. 29, 2009"
-#define MEGASAS_EXT_VERSION "Thu. Oct. 29, 11:41:51 PST 2009"
+#define MEGASAS_VERSION "00.00.04.31-rc1"
+#define MEGASAS_RELDATE "May 3, 2010"
+#define MEGASAS_EXT_VERSION "Mon. May 3, 11:41:51 PST 2010"
/*
* Device IDs
@@ -706,6 +706,7 @@ struct megasas_ctrl_info {
#define MEGASAS_MAX_LD_IDS (MEGASAS_MAX_LD_CHANNELS * \
MEGASAS_MAX_DEV_PER_CHANNEL)
+#define MEGASAS_MAX_SECTORS (2*1024)
#define MEGASAS_DBG_LVL 1
#define MEGASAS_FW_BUSY 1
diff --git a/drivers/scsi/mesh.c b/drivers/scsi/mesh.c
index 1f784fde2510..197aa1b3f0f3 100644
--- a/drivers/scsi/mesh.c
+++ b/drivers/scsi/mesh.c
@@ -1627,7 +1627,7 @@ static void cmd_complete(struct mesh_state *ms)
* Called by midlayer with host locked to queue a new
* request
*/
-static int mesh_queue(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *))
+static int mesh_queue_lck(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *))
{
struct mesh_state *ms;
@@ -1648,6 +1648,8 @@ static int mesh_queue(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *))
return 0;
}
+static DEF_SCSI_QCMD(mesh_queue)
+
/*
* Called to handle interrupts, either call by the interrupt
* handler (do_mesh_interrupt) or by other functions in
diff --git a/drivers/scsi/mpt2sas/mpt2sas_scsih.c b/drivers/scsi/mpt2sas/mpt2sas_scsih.c
index 16e99b686354..1a96a00418a4 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_scsih.c
+++ b/drivers/scsi/mpt2sas/mpt2sas_scsih.c
@@ -3315,7 +3315,7 @@ _scsih_eedp_error_handling(struct scsi_cmnd *scmd, u16 ioc_status)
* SCSI_MLQUEUE_HOST_BUSY if the entire host queue is full
*/
static int
-_scsih_qcmd(struct scsi_cmnd *scmd, void (*done)(struct scsi_cmnd *))
+_scsih_qcmd_lck(struct scsi_cmnd *scmd, void (*done)(struct scsi_cmnd *))
{
struct MPT2SAS_ADAPTER *ioc = shost_priv(scmd->device->host);
struct MPT2SAS_DEVICE *sas_device_priv_data;
@@ -3441,6 +3441,8 @@ _scsih_qcmd(struct scsi_cmnd *scmd, void (*done)(struct scsi_cmnd *))
return SCSI_MLQUEUE_HOST_BUSY;
}
+static DEF_SCSI_QCMD(_scsih_qcmd)
+
/**
* _scsih_normalize_sense - normalize descriptor and fixed format sense data
* @sense_buffer: sense data returned by target
diff --git a/drivers/scsi/ncr53c8xx.c b/drivers/scsi/ncr53c8xx.c
index d013a2aa2fd5..46cc3825638d 100644
--- a/drivers/scsi/ncr53c8xx.c
+++ b/drivers/scsi/ncr53c8xx.c
@@ -8029,7 +8029,7 @@ static int ncr53c8xx_slave_configure(struct scsi_device *device)
return 0;
}
-static int ncr53c8xx_queue_command (struct scsi_cmnd *cmd, void (* done)(struct scsi_cmnd *))
+static int ncr53c8xx_queue_command_lck (struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *))
{
struct ncb *np = ((struct host_data *) cmd->device->host->hostdata)->ncb;
unsigned long flags;
@@ -8068,6 +8068,8 @@ printk("ncr53c8xx : command successfully queued\n");
return sts;
}
+static DEF_SCSI_QCMD(ncr53c8xx_queue_command)
+
irqreturn_t ncr53c8xx_intr(int irq, void *dev_id)
{
unsigned long flags;
diff --git a/drivers/scsi/nsp32.c b/drivers/scsi/nsp32.c
index 4c1e54545200..6b8b021400f8 100644
--- a/drivers/scsi/nsp32.c
+++ b/drivers/scsi/nsp32.c
@@ -196,8 +196,7 @@ static void __exit exit_nsp32 (void);
static int nsp32_proc_info (struct Scsi_Host *, char *, char **, off_t, int, int);
static int nsp32_detect (struct pci_dev *pdev);
-static int nsp32_queuecommand(struct scsi_cmnd *,
- void (*done)(struct scsi_cmnd *));
+static int nsp32_queuecommand(struct Scsi_Host *, struct scsi_cmnd *);
static const char *nsp32_info (struct Scsi_Host *);
static int nsp32_release (struct Scsi_Host *);
@@ -909,7 +908,7 @@ static int nsp32_setup_sg_table(struct scsi_cmnd *SCpnt)
return TRUE;
}
-static int nsp32_queuecommand(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_cmnd *))
+static int nsp32_queuecommand_lck(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_cmnd *))
{
nsp32_hw_data *data = (nsp32_hw_data *)SCpnt->device->host->hostdata;
nsp32_target *target;
@@ -1050,6 +1049,8 @@ static int nsp32_queuecommand(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_
return 0;
}
+static DEF_SCSI_QCMD(nsp32_queuecommand)
+
/* initialize asic */
static int nsp32hw_init(nsp32_hw_data *data)
{
diff --git a/drivers/scsi/osd/osd_initiator.c b/drivers/scsi/osd/osd_initiator.c
index e88bbdde49c5..0433ea6f27c9 100644
--- a/drivers/scsi/osd/osd_initiator.c
+++ b/drivers/scsi/osd/osd_initiator.c
@@ -452,10 +452,6 @@ void osd_end_request(struct osd_request *or)
{
struct request *rq = or->request;
- _osd_free_seg(or, &or->set_attr);
- _osd_free_seg(or, &or->enc_get_attr);
- _osd_free_seg(or, &or->get_attr);
-
if (rq) {
if (rq->next_rq) {
_put_request(rq->next_rq);
@@ -464,6 +460,12 @@ void osd_end_request(struct osd_request *or)
_put_request(rq);
}
+
+ _osd_free_seg(or, &or->get_attr);
+ _osd_free_seg(or, &or->enc_get_attr);
+ _osd_free_seg(or, &or->set_attr);
+ _osd_free_seg(or, &or->cdb_cont);
+
_osd_request_free(or);
}
EXPORT_SYMBOL(osd_end_request);
@@ -547,6 +549,12 @@ static int _osd_realloc_seg(struct osd_request *or,
return 0;
}
+static int _alloc_cdb_cont(struct osd_request *or, unsigned total_bytes)
+{
+ OSD_DEBUG("total_bytes=%d\n", total_bytes);
+ return _osd_realloc_seg(or, &or->cdb_cont, total_bytes);
+}
+
static int _alloc_set_attr_list(struct osd_request *or,
const struct osd_attr *oa, unsigned nelem, unsigned add_bytes)
{
@@ -885,6 +893,199 @@ int osd_req_read_kern(struct osd_request *or,
}
EXPORT_SYMBOL(osd_req_read_kern);
+static int _add_sg_continuation_descriptor(struct osd_request *or,
+ const struct osd_sg_entry *sglist, unsigned numentries, u64 *len)
+{
+ struct osd_sg_continuation_descriptor *oscd;
+ u32 oscd_size;
+ unsigned i;
+ int ret;
+
+ oscd_size = sizeof(*oscd) + numentries * sizeof(oscd->entries[0]);
+
+ if (!or->cdb_cont.total_bytes) {
+ /* First time, jump over the header, we will write to:
+ * cdb_cont.buff + cdb_cont.total_bytes
+ */
+ or->cdb_cont.total_bytes =
+ sizeof(struct osd_continuation_segment_header);
+ }
+
+ ret = _alloc_cdb_cont(or, or->cdb_cont.total_bytes + oscd_size);
+ if (unlikely(ret))
+ return ret;
+
+ oscd = or->cdb_cont.buff + or->cdb_cont.total_bytes;
+ oscd->hdr.type = cpu_to_be16(SCATTER_GATHER_LIST);
+ oscd->hdr.pad_length = 0;
+ oscd->hdr.length = cpu_to_be32(oscd_size - sizeof(*oscd));
+
+ *len = 0;
+ /* copy the sg entries and convert to network byte order */
+ for (i = 0; i < numentries; i++) {
+ oscd->entries[i].offset = cpu_to_be64(sglist[i].offset);
+ oscd->entries[i].len = cpu_to_be64(sglist[i].len);
+ *len += sglist[i].len;
+ }
+
+ or->cdb_cont.total_bytes += oscd_size;
+ OSD_DEBUG("total_bytes=%d oscd_size=%d numentries=%d\n",
+ or->cdb_cont.total_bytes, oscd_size, numentries);
+ return 0;
+}
+
+static int _osd_req_finalize_cdb_cont(struct osd_request *or, const u8 *cap_key)
+{
+ struct request_queue *req_q = osd_request_queue(or->osd_dev);
+ struct bio *bio;
+ struct osd_cdb_head *cdbh = osd_cdb_head(&or->cdb);
+ struct osd_continuation_segment_header *cont_seg_hdr;
+
+ if (!or->cdb_cont.total_bytes)
+ return 0;
+
+ cont_seg_hdr = or->cdb_cont.buff;
+ cont_seg_hdr->format = CDB_CONTINUATION_FORMAT_V2;
+ cont_seg_hdr->service_action = cdbh->varlen_cdb.service_action;
+
+ /* create a bio for continuation segment */
+ bio = bio_map_kern(req_q, or->cdb_cont.buff, or->cdb_cont.total_bytes,
+ GFP_KERNEL);
+ if (unlikely(!bio))
+ return -ENOMEM;
+
+ bio->bi_rw |= REQ_WRITE;
+
+ /* integrity check the continuation before the bio is linked
+ * with the other data segments since the continuation
+ * integrity is separate from the other data segments.
+ */
+ osd_sec_sign_data(cont_seg_hdr->integrity_check, bio, cap_key);
+
+ cdbh->v2.cdb_continuation_length = cpu_to_be32(or->cdb_cont.total_bytes);
+
+ /* we can't use _req_append_segment, because we need to link in the
+ * continuation bio to the head of the bio list - the
+ * continuation segment (if it exists) is always the first segment in
+ * the out data buffer.
+ */
+ bio->bi_next = or->out.bio;
+ or->out.bio = bio;
+ or->out.total_bytes += or->cdb_cont.total_bytes;
+
+ return 0;
+}
+
+/* osd_req_write_sg: Takes a @bio that points to the data out buffer and an
+ * @sglist that has the scatter gather entries. Scatter-gather enables a write
+ * of multiple none-contiguous areas of an object, in a single call. The extents
+ * may overlap and/or be in any order. The only constrain is that:
+ * total_bytes(sglist) >= total_bytes(bio)
+ */
+int osd_req_write_sg(struct osd_request *or,
+ const struct osd_obj_id *obj, struct bio *bio,
+ const struct osd_sg_entry *sglist, unsigned numentries)
+{
+ u64 len;
+ int ret = _add_sg_continuation_descriptor(or, sglist, numentries, &len);
+
+ if (ret)
+ return ret;
+ osd_req_write(or, obj, 0, bio, len);
+
+ return 0;
+}
+EXPORT_SYMBOL(osd_req_write_sg);
+
+/* osd_req_read_sg: Read multiple extents of an object into @bio
+ * See osd_req_write_sg
+ */
+int osd_req_read_sg(struct osd_request *or,
+ const struct osd_obj_id *obj, struct bio *bio,
+ const struct osd_sg_entry *sglist, unsigned numentries)
+{
+ u64 len;
+ int ret = _add_sg_continuation_descriptor(or, sglist, numentries, &len);
+
+ if (ret)
+ return ret;
+ osd_req_read(or, obj, 0, bio, len);
+
+ return 0;
+}
+EXPORT_SYMBOL(osd_req_read_sg);
+
+/* SG-list write/read Kern API
+ *
+ * osd_req_{write,read}_sg_kern takes an array of @buff pointers and an array
+ * of sg_entries. @numentries indicates how many pointers and sg_entries there
+ * are. By requiring an array of buff pointers. This allows a caller to do a
+ * single write/read and scatter into multiple buffers.
+ * NOTE: Each buffer + len should not cross a page boundary.
+ */
+static struct bio *_create_sg_bios(struct osd_request *or,
+ void **buff, const struct osd_sg_entry *sglist, unsigned numentries)
+{
+ struct request_queue *q = osd_request_queue(or->osd_dev);
+ struct bio *bio;
+ unsigned i;
+
+ bio = bio_kmalloc(GFP_KERNEL, numentries);
+ if (unlikely(!bio)) {
+ OSD_DEBUG("Faild to allocate BIO size=%u\n", numentries);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ for (i = 0; i < numentries; i++) {
+ unsigned offset = offset_in_page(buff[i]);
+ struct page *page = virt_to_page(buff[i]);
+ unsigned len = sglist[i].len;
+ unsigned added_len;
+
+ BUG_ON(offset + len > PAGE_SIZE);
+ added_len = bio_add_pc_page(q, bio, page, len, offset);
+ if (unlikely(len != added_len)) {
+ OSD_DEBUG("bio_add_pc_page len(%d) != added_len(%d)\n",
+ len, added_len);
+ bio_put(bio);
+ return ERR_PTR(-ENOMEM);
+ }
+ }
+
+ return bio;
+}
+
+int osd_req_write_sg_kern(struct osd_request *or,
+ const struct osd_obj_id *obj, void **buff,
+ const struct osd_sg_entry *sglist, unsigned numentries)
+{
+ struct bio *bio = _create_sg_bios(or, buff, sglist, numentries);
+ if (IS_ERR(bio))
+ return PTR_ERR(bio);
+
+ bio->bi_rw |= REQ_WRITE;
+ osd_req_write_sg(or, obj, bio, sglist, numentries);
+
+ return 0;
+}
+EXPORT_SYMBOL(osd_req_write_sg_kern);
+
+int osd_req_read_sg_kern(struct osd_request *or,
+ const struct osd_obj_id *obj, void **buff,
+ const struct osd_sg_entry *sglist, unsigned numentries)
+{
+ struct bio *bio = _create_sg_bios(or, buff, sglist, numentries);
+ if (IS_ERR(bio))
+ return PTR_ERR(bio);
+
+ osd_req_read_sg(or, obj, bio, sglist, numentries);
+
+ return 0;
+}
+EXPORT_SYMBOL(osd_req_read_sg_kern);
+
+
+
void osd_req_get_attributes(struct osd_request *or,
const struct osd_obj_id *obj)
{
@@ -1218,17 +1419,18 @@ int osd_req_add_get_attr_page(struct osd_request *or,
or->get_attr.buff = attar_page;
or->get_attr.total_bytes = max_page_len;
- or->set_attr.buff = set_one_attr->val_ptr;
- or->set_attr.total_bytes = set_one_attr->len;
-
cdbh->attrs_page.get_attr_page = cpu_to_be32(page_id);
cdbh->attrs_page.get_attr_alloc_length = cpu_to_be32(max_page_len);
- /* ocdb->attrs_page.get_attr_offset; */
+
+ if (!set_one_attr || !set_one_attr->attr_page)
+ return 0; /* The set is optional */
+
+ or->set_attr.buff = set_one_attr->val_ptr;
+ or->set_attr.total_bytes = set_one_attr->len;
cdbh->attrs_page.set_attr_page = cpu_to_be32(set_one_attr->attr_page);
cdbh->attrs_page.set_attr_id = cpu_to_be32(set_one_attr->attr_id);
cdbh->attrs_page.set_attr_length = cpu_to_be32(set_one_attr->len);
- /* ocdb->attrs_page.set_attr_offset; */
return 0;
}
EXPORT_SYMBOL(osd_req_add_get_attr_page);
@@ -1248,11 +1450,14 @@ static int _osd_req_finalize_attr_page(struct osd_request *or)
if (ret)
return ret;
+ if (or->set_attr.total_bytes == 0)
+ return 0;
+
/* set one value */
cdbh->attrs_page.set_attr_offset =
osd_req_encode_offset(or, or->out.total_bytes, &out_padding);
- ret = _req_append_segment(or, out_padding, &or->enc_get_attr, NULL,
+ ret = _req_append_segment(or, out_padding, &or->set_attr, NULL,
&or->out);
return ret;
}
@@ -1276,7 +1481,8 @@ static inline void osd_sec_parms_set_in_offset(bool is_v1,
}
static int _osd_req_finalize_data_integrity(struct osd_request *or,
- bool has_in, bool has_out, u64 out_data_bytes, const u8 *cap_key)
+ bool has_in, bool has_out, struct bio *out_data_bio, u64 out_data_bytes,
+ const u8 *cap_key)
{
struct osd_security_parameters *sec_parms = _osd_req_sec_params(or);
int ret;
@@ -1307,7 +1513,7 @@ static int _osd_req_finalize_data_integrity(struct osd_request *or,
or->out.last_seg = NULL;
/* they are now all chained to request sign them all together */
- osd_sec_sign_data(&or->out_data_integ, or->out.req->bio,
+ osd_sec_sign_data(&or->out_data_integ, out_data_bio,
cap_key);
}
@@ -1403,6 +1609,8 @@ int osd_finalize_request(struct osd_request *or,
{
struct osd_cdb_head *cdbh = osd_cdb_head(&or->cdb);
bool has_in, has_out;
+ /* Save for data_integrity without the cdb_continuation */
+ struct bio *out_data_bio = or->out.bio;
u64 out_data_bytes = or->out.total_bytes;
int ret;
@@ -1418,9 +1626,14 @@ int osd_finalize_request(struct osd_request *or,
osd_set_caps(&or->cdb, cap);
has_in = or->in.bio || or->get_attr.total_bytes;
- has_out = or->out.bio || or->set_attr.total_bytes ||
- or->enc_get_attr.total_bytes;
+ has_out = or->out.bio || or->cdb_cont.total_bytes ||
+ or->set_attr.total_bytes || or->enc_get_attr.total_bytes;
+ ret = _osd_req_finalize_cdb_cont(or, cap_key);
+ if (ret) {
+ OSD_DEBUG("_osd_req_finalize_cdb_cont failed\n");
+ return ret;
+ }
ret = _init_blk_request(or, has_in, has_out);
if (ret) {
OSD_DEBUG("_init_blk_request failed\n");
@@ -1458,7 +1671,8 @@ int osd_finalize_request(struct osd_request *or,
}
ret = _osd_req_finalize_data_integrity(or, has_in, has_out,
- out_data_bytes, cap_key);
+ out_data_bio, out_data_bytes,
+ cap_key);
if (ret)
return ret;
diff --git a/drivers/scsi/pas16.h b/drivers/scsi/pas16.h
index 8dc5b1a5f5da..a04281cace2e 100644
--- a/drivers/scsi/pas16.h
+++ b/drivers/scsi/pas16.h
@@ -118,7 +118,7 @@ static int pas16_abort(Scsi_Cmnd *);
static int pas16_biosparam(struct scsi_device *, struct block_device *,
sector_t, int*);
static int pas16_detect(struct scsi_host_template *);
-static int pas16_queue_command(Scsi_Cmnd *, void (*done)(Scsi_Cmnd *));
+static int pas16_queue_command(struct Scsi_Host *, struct scsi_cmnd *);
static int pas16_bus_reset(Scsi_Cmnd *);
#ifndef CMD_PER_LUN
diff --git a/drivers/scsi/pcmcia/nsp_cs.c b/drivers/scsi/pcmcia/nsp_cs.c
index 9326c2c14880..be3f33d31a99 100644
--- a/drivers/scsi/pcmcia/nsp_cs.c
+++ b/drivers/scsi/pcmcia/nsp_cs.c
@@ -184,7 +184,7 @@ static void nsp_scsi_done(struct scsi_cmnd *SCpnt)
SCpnt->scsi_done(SCpnt);
}
-static int nsp_queuecommand(struct scsi_cmnd *SCpnt,
+static int nsp_queuecommand_lck(struct scsi_cmnd *SCpnt,
void (*done)(struct scsi_cmnd *))
{
#ifdef NSP_DEBUG
@@ -264,6 +264,8 @@ static int nsp_queuecommand(struct scsi_cmnd *SCpnt,
return 0;
}
+static DEF_SCSI_QCMD(nsp_queuecommand)
+
/*
* setup PIO FIFO transfer mode and enable/disable to data out
*/
diff --git a/drivers/scsi/pcmcia/nsp_cs.h b/drivers/scsi/pcmcia/nsp_cs.h
index d68c9f267c5e..7fc9a9d0a448 100644
--- a/drivers/scsi/pcmcia/nsp_cs.h
+++ b/drivers/scsi/pcmcia/nsp_cs.h
@@ -299,8 +299,7 @@ static int nsp_proc_info (
off_t offset,
int length,
int inout);
-static int nsp_queuecommand(struct scsi_cmnd *SCpnt,
- void (* done)(struct scsi_cmnd *SCpnt));
+static int nsp_queuecommand(struct Scsi_Host *h, struct scsi_cmnd *SCpnt);
/* Error handler */
/*static int nsp_eh_abort (struct scsi_cmnd *SCpnt);*/
diff --git a/drivers/scsi/pcmcia/sym53c500_cs.c b/drivers/scsi/pcmcia/sym53c500_cs.c
index 0ae27cb5cd6f..8552296edaa1 100644
--- a/drivers/scsi/pcmcia/sym53c500_cs.c
+++ b/drivers/scsi/pcmcia/sym53c500_cs.c
@@ -547,7 +547,7 @@ SYM53C500_info(struct Scsi_Host *SChost)
}
static int
-SYM53C500_queue(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_cmnd *))
+SYM53C500_queue_lck(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_cmnd *))
{
int i;
int port_base = SCpnt->device->host->io_port;
@@ -583,6 +583,8 @@ SYM53C500_queue(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_cmnd *))
return 0;
}
+static DEF_SCSI_QCMD(SYM53C500_queue)
+
static int
SYM53C500_host_reset(struct scsi_cmnd *SCpnt)
{
diff --git a/drivers/scsi/pm8001/pm8001_sas.h b/drivers/scsi/pm8001/pm8001_sas.h
index 8e38ca8cd101..7f064f9ca828 100644
--- a/drivers/scsi/pm8001/pm8001_sas.h
+++ b/drivers/scsi/pm8001/pm8001_sas.h
@@ -50,7 +50,6 @@
#include <linux/dma-mapping.h>
#include <linux/pci.h>
#include <linux/interrupt.h>
-#include <linux/smp_lock.h>
#include <scsi/libsas.h>
#include <scsi/scsi_tcq.h>
#include <scsi/sas_ata.h>
diff --git a/drivers/scsi/pmcraid.c b/drivers/scsi/pmcraid.c
index 4b8765785aeb..5e76a624cb08 100644
--- a/drivers/scsi/pmcraid.c
+++ b/drivers/scsi/pmcraid.c
@@ -1594,10 +1594,12 @@ static void pmcraid_handle_config_change(struct pmcraid_instance *pinstance)
cfg_entry = &ccn_hcam->cfg_entry;
fw_version = be16_to_cpu(pinstance->inq_data->fw_version);
- pmcraid_info
- ("CCN(%x): %x type: %x lost: %x flags: %x res: %x:%x:%x:%x\n",
+ pmcraid_info("CCN(%x): %x timestamp: %llx type: %x lost: %x flags: %x \
+ res: %x:%x:%x:%x\n",
pinstance->ccn.hcam->ilid,
pinstance->ccn.hcam->op_code,
+ ((pinstance->ccn.hcam->timestamp1) |
+ ((pinstance->ccn.hcam->timestamp2 & 0xffffffffLL) << 32)),
pinstance->ccn.hcam->notification_type,
pinstance->ccn.hcam->notification_lost,
pinstance->ccn.hcam->flags,
@@ -1850,6 +1852,7 @@ static void pmcraid_process_ccn(struct pmcraid_cmd *cmd)
* none
*/
static void pmcraid_initiate_reset(struct pmcraid_instance *);
+static void pmcraid_set_timestamp(struct pmcraid_cmd *cmd);
static void pmcraid_process_ldn(struct pmcraid_cmd *cmd)
{
@@ -1881,6 +1884,10 @@ static void pmcraid_process_ldn(struct pmcraid_cmd *cmd)
lock_flags);
return;
}
+ if (fd_ioasc == PMCRAID_IOASC_TIME_STAMP_OUT_OF_SYNC) {
+ pinstance->timestamp_error = 1;
+ pmcraid_set_timestamp(cmd);
+ }
} else {
dev_info(&pinstance->pdev->dev,
"Host RCB(LDN) failed with IOASC: 0x%08X\n", ioasc);
@@ -3363,7 +3370,7 @@ static struct pmcraid_sglist *pmcraid_alloc_sglist(int buflen)
sg_size = buflen;
for (i = 0; i < num_elem; i++) {
- page = alloc_pages(GFP_KERNEL|GFP_DMA, order);
+ page = alloc_pages(GFP_KERNEL|GFP_DMA|__GFP_ZERO, order);
if (!page) {
for (j = i - 1; j >= 0; j--)
__free_pages(sg_page(&scatterlist[j]), order);
@@ -3471,7 +3478,7 @@ static int pmcraid_copy_sglist(
* SCSI_MLQUEUE_DEVICE_BUSY if device is busy
* SCSI_MLQUEUE_HOST_BUSY if host is busy
*/
-static int pmcraid_queuecommand(
+static int pmcraid_queuecommand_lck(
struct scsi_cmnd *scsi_cmd,
void (*done) (struct scsi_cmnd *)
)
@@ -3577,6 +3584,8 @@ static int pmcraid_queuecommand(
return rc;
}
+static DEF_SCSI_QCMD(pmcraid_queuecommand)
+
/**
* pmcraid_open -char node "open" entry, allowed only users with admin access
*/
@@ -3739,6 +3748,7 @@ static long pmcraid_ioctl_passthrough(
unsigned long request_buffer;
unsigned long request_offset;
unsigned long lock_flags;
+ void *ioasa;
u32 ioasc;
int request_size;
int buffer_size;
@@ -3780,6 +3790,11 @@ static long pmcraid_ioctl_passthrough(
rc = __copy_from_user(buffer,
(struct pmcraid_passthrough_ioctl_buffer *) arg,
sizeof(struct pmcraid_passthrough_ioctl_buffer));
+
+ ioasa =
+ (void *)(arg +
+ offsetof(struct pmcraid_passthrough_ioctl_buffer, ioasa));
+
if (rc) {
pmcraid_err("ioctl: can't copy passthrough buffer\n");
rc = -EFAULT;
@@ -3947,22 +3962,14 @@ static long pmcraid_ioctl_passthrough(
}
out_handle_response:
- /* If the command failed for any reason, copy entire IOASA buffer and
- * return IOCTL success. If copying IOASA to user-buffer fails, return
+ /* copy entire IOASA buffer and return IOCTL success.
+ * If copying IOASA to user-buffer fails, return
* EFAULT
*/
- if (PMCRAID_IOASC_SENSE_KEY(le32_to_cpu(cmd->ioa_cb->ioasa.ioasc))) {
- void *ioasa =
- (void *)(arg +
- offsetof(struct pmcraid_passthrough_ioctl_buffer, ioasa));
-
- pmcraid_info("command failed with %x\n",
- le32_to_cpu(cmd->ioa_cb->ioasa.ioasc));
- if (copy_to_user(ioasa, &cmd->ioa_cb->ioasa,
- sizeof(struct pmcraid_ioasa))) {
- pmcraid_err("failed to copy ioasa buffer to user\n");
- rc = -EFAULT;
- }
+ if (copy_to_user(ioasa, &cmd->ioa_cb->ioasa,
+ sizeof(struct pmcraid_ioasa))) {
+ pmcraid_err("failed to copy ioasa buffer to user\n");
+ rc = -EFAULT;
}
/* If the data transfer was from device, copy the data onto user
@@ -5147,6 +5154,16 @@ static void pmcraid_release_buffers(struct pmcraid_instance *pinstance)
pinstance->inq_data = NULL;
pinstance->inq_data_baddr = 0;
}
+
+ if (pinstance->timestamp_data != NULL) {
+ pci_free_consistent(pinstance->pdev,
+ sizeof(struct pmcraid_timestamp_data),
+ pinstance->timestamp_data,
+ pinstance->timestamp_data_baddr);
+
+ pinstance->timestamp_data = NULL;
+ pinstance->timestamp_data_baddr = 0;
+ }
}
/**
@@ -5205,6 +5222,20 @@ static int __devinit pmcraid_init_buffers(struct pmcraid_instance *pinstance)
return -ENOMEM;
}
+ /* allocate DMAable memory for set timestamp data buffer */
+ pinstance->timestamp_data = pci_alloc_consistent(
+ pinstance->pdev,
+ sizeof(struct pmcraid_timestamp_data),
+ &pinstance->timestamp_data_baddr);
+
+ if (pinstance->timestamp_data == NULL) {
+ pmcraid_err("couldn't allocate DMA memory for \
+ set time_stamp \n");
+ pmcraid_release_buffers(pinstance);
+ return -ENOMEM;
+ }
+
+
/* Initialize all the command blocks and add them to free pool. No
* need to lock (free_pool_lock) as this is done in initialization
* itself
@@ -5610,6 +5641,68 @@ static void pmcraid_set_supported_devs(struct pmcraid_cmd *cmd)
}
/**
+ * pmcraid_set_timestamp - set the timestamp to IOAFP
+ *
+ * @cmd: pointer to pmcraid_cmd structure
+ *
+ * Return Value
+ * 0 for success or non-zero for failure cases
+ */
+static void pmcraid_set_timestamp(struct pmcraid_cmd *cmd)
+{
+ struct pmcraid_instance *pinstance = cmd->drv_inst;
+ struct pmcraid_ioarcb *ioarcb = &cmd->ioa_cb->ioarcb;
+ __be32 time_stamp_len = cpu_to_be32(PMCRAID_TIMESTAMP_LEN);
+ struct pmcraid_ioadl_desc *ioadl = ioarcb->add_data.u.ioadl;
+
+ struct timeval tv;
+ __le64 timestamp;
+
+ do_gettimeofday(&tv);
+ timestamp = tv.tv_sec * 1000;
+
+ pinstance->timestamp_data->timestamp[0] = (__u8)(timestamp);
+ pinstance->timestamp_data->timestamp[1] = (__u8)((timestamp) >> 8);
+ pinstance->timestamp_data->timestamp[2] = (__u8)((timestamp) >> 16);
+ pinstance->timestamp_data->timestamp[3] = (__u8)((timestamp) >> 24);
+ pinstance->timestamp_data->timestamp[4] = (__u8)((timestamp) >> 32);
+ pinstance->timestamp_data->timestamp[5] = (__u8)((timestamp) >> 40);
+
+ pmcraid_reinit_cmdblk(cmd);
+ ioarcb->request_type = REQ_TYPE_SCSI;
+ ioarcb->resource_handle = cpu_to_le32(PMCRAID_IOA_RES_HANDLE);
+ ioarcb->cdb[0] = PMCRAID_SCSI_SET_TIMESTAMP;
+ ioarcb->cdb[1] = PMCRAID_SCSI_SERVICE_ACTION;
+ memcpy(&(ioarcb->cdb[6]), &time_stamp_len, sizeof(time_stamp_len));
+
+ ioarcb->ioadl_bus_addr = cpu_to_le64((cmd->ioa_cb_bus_addr) +
+ offsetof(struct pmcraid_ioarcb,
+ add_data.u.ioadl[0]));
+ ioarcb->ioadl_length = cpu_to_le32(sizeof(struct pmcraid_ioadl_desc));
+ ioarcb->ioarcb_bus_addr &= ~(0x1FULL);
+
+ ioarcb->request_flags0 |= NO_LINK_DESCS;
+ ioarcb->request_flags0 |= TRANSFER_DIR_WRITE;
+ ioarcb->data_transfer_length =
+ cpu_to_le32(sizeof(struct pmcraid_timestamp_data));
+ ioadl = &(ioarcb->add_data.u.ioadl[0]);
+ ioadl->flags = IOADL_FLAGS_LAST_DESC;
+ ioadl->address = cpu_to_le64(pinstance->timestamp_data_baddr);
+ ioadl->data_len = cpu_to_le32(sizeof(struct pmcraid_timestamp_data));
+
+ if (!pinstance->timestamp_error) {
+ pinstance->timestamp_error = 0;
+ pmcraid_send_cmd(cmd, pmcraid_set_supported_devs,
+ PMCRAID_INTERNAL_TIMEOUT, pmcraid_timeout_handler);
+ } else {
+ pmcraid_send_cmd(cmd, pmcraid_return_cmd,
+ PMCRAID_INTERNAL_TIMEOUT, pmcraid_timeout_handler);
+ return;
+ }
+}
+
+
+/**
* pmcraid_init_res_table - Initialize the resource table
* @cmd: pointer to pmcraid command struct
*
@@ -5720,7 +5813,7 @@ static void pmcraid_init_res_table(struct pmcraid_cmd *cmd)
/* release the resource list lock */
spin_unlock_irqrestore(&pinstance->resource_lock, lock_flags);
- pmcraid_set_supported_devs(cmd);
+ pmcraid_set_timestamp(cmd);
}
/**
@@ -6054,10 +6147,10 @@ out_init:
static void __exit pmcraid_exit(void)
{
pmcraid_netlink_release();
- class_destroy(pmcraid_class);
unregister_chrdev_region(MKDEV(pmcraid_major, 0),
PMCRAID_MAX_ADAPTERS);
pci_unregister_driver(&pmcraid_driver);
+ class_destroy(pmcraid_class);
}
module_init(pmcraid_init);
diff --git a/drivers/scsi/pmcraid.h b/drivers/scsi/pmcraid.h
index 6cfa0145a1d7..1134279604e8 100644
--- a/drivers/scsi/pmcraid.h
+++ b/drivers/scsi/pmcraid.h
@@ -42,7 +42,7 @@
*/
#define PMCRAID_DRIVER_NAME "PMC MaxRAID"
#define PMCRAID_DEVFILE "pmcsas"
-#define PMCRAID_DRIVER_VERSION "2.0.2"
+#define PMCRAID_DRIVER_VERSION "2.0.3"
#define PMCRAID_DRIVER_DATE __DATE__
#define PMCRAID_FW_VERSION_1 0x002
@@ -184,6 +184,7 @@
#define PMCRAID_IOASC_IR_INVALID_RESOURCE_HANDLE 0x05250000
#define PMCRAID_IOASC_AC_TERMINATED_BY_HOST 0x0B5A0000
#define PMCRAID_IOASC_UA_BUS_WAS_RESET 0x06290000
+#define PMCRAID_IOASC_TIME_STAMP_OUT_OF_SYNC 0x06908B00
#define PMCRAID_IOASC_UA_BUS_WAS_RESET_BY_OTHER 0x06298000
/* Driver defined IOASCs */
@@ -561,6 +562,17 @@ struct pmcraid_inquiry_data {
__u8 reserved3[16];
};
+#define PMCRAID_TIMESTAMP_LEN 12
+#define PMCRAID_REQ_TM_STR_LEN 6
+#define PMCRAID_SCSI_SET_TIMESTAMP 0xA4
+#define PMCRAID_SCSI_SERVICE_ACTION 0x0F
+
+struct pmcraid_timestamp_data {
+ __u8 reserved1[4];
+ __u8 timestamp[PMCRAID_REQ_TM_STR_LEN]; /* current time value */
+ __u8 reserved2[2];
+};
+
/* pmcraid_cmd - LLD representation of SCSI command */
struct pmcraid_cmd {
@@ -568,7 +580,6 @@ struct pmcraid_cmd {
struct pmcraid_control_block *ioa_cb;
dma_addr_t ioa_cb_bus_addr;
dma_addr_t dma_handle;
- u8 *sense_buffer;
/* pointer to mid layer structure of SCSI commands */
struct scsi_cmnd *scsi_cmd;
@@ -705,6 +716,9 @@ struct pmcraid_instance {
struct pmcraid_inquiry_data *inq_data;
dma_addr_t inq_data_baddr;
+ struct pmcraid_timestamp_data *timestamp_data;
+ dma_addr_t timestamp_data_baddr;
+
/* size of configuration table entry, varies based on the firmware */
u32 config_table_entry_size;
@@ -791,6 +805,7 @@ struct pmcraid_instance {
#define SHUTDOWN_NONE 0x0
#define SHUTDOWN_NORMAL 0x1
#define SHUTDOWN_ABBREV 0x2
+ u32 timestamp_error:1; /* indicate set timestamp for out of sync */
};
@@ -1056,10 +1071,10 @@ struct pmcraid_passthrough_ioctl_buffer {
#define PMCRAID_PASSTHROUGH_IOCTL 'F'
#define DRV_IOCTL(n, size) \
- _IOC(_IOC_READ|_IOC_WRITE, PMCRAID_DRIVER_IOCTL, (n), (size))
+ _IOC(_IOC_READ|_IOC_WRITE, PMCRAID_DRIVER_IOCTL, (n), (size))
#define FMW_IOCTL(n, size) \
- _IOC(_IOC_READ|_IOC_WRITE, PMCRAID_PASSTHROUGH_IOCTL, (n), (size))
+ _IOC(_IOC_READ|_IOC_WRITE, PMCRAID_PASSTHROUGH_IOCTL, (n), (size))
/*
* _ARGSIZE: macro that gives size of the argument type passed to an IOCTL cmd.
diff --git a/drivers/scsi/ppa.c b/drivers/scsi/ppa.c
index 7bc2d796e403..d164c9639361 100644
--- a/drivers/scsi/ppa.c
+++ b/drivers/scsi/ppa.c
@@ -798,7 +798,7 @@ static int ppa_engine(ppa_struct *dev, struct scsi_cmnd *cmd)
return 0;
}
-static int ppa_queuecommand(struct scsi_cmnd *cmd,
+static int ppa_queuecommand_lck(struct scsi_cmnd *cmd,
void (*done) (struct scsi_cmnd *))
{
ppa_struct *dev = ppa_dev(cmd->device->host);
@@ -821,6 +821,8 @@ static int ppa_queuecommand(struct scsi_cmnd *cmd,
return 0;
}
+static DEF_SCSI_QCMD(ppa_queuecommand)
+
/*
* Apparently the disk->capacity attribute is off by 1 sector
* for all disk drives. We add the one here, but it should really
diff --git a/drivers/scsi/ps3rom.c b/drivers/scsi/ps3rom.c
index 92ffbb510498..cd178b9e40cd 100644
--- a/drivers/scsi/ps3rom.c
+++ b/drivers/scsi/ps3rom.c
@@ -211,7 +211,7 @@ static int ps3rom_write_request(struct ps3_storage_device *dev,
return 0;
}
-static int ps3rom_queuecommand(struct scsi_cmnd *cmd,
+static int ps3rom_queuecommand_lck(struct scsi_cmnd *cmd,
void (*done)(struct scsi_cmnd *))
{
struct ps3rom_private *priv = shost_priv(cmd->device->host);
@@ -260,6 +260,8 @@ static int ps3rom_queuecommand(struct scsi_cmnd *cmd,
return 0;
}
+static DEF_SCSI_QCMD(ps3rom_queuecommand)
+
static int decode_lv1_status(u64 status, unsigned char *sense_key,
unsigned char *asc, unsigned char *ascq)
{
diff --git a/drivers/scsi/qla1280.c b/drivers/scsi/qla1280.c
index b8166ecfd0e3..5dec684bf010 100644
--- a/drivers/scsi/qla1280.c
+++ b/drivers/scsi/qla1280.c
@@ -727,7 +727,7 @@ qla1280_info(struct Scsi_Host *host)
* context which is a big NO! NO!.
**************************************************************************/
static int
-qla1280_queuecommand(struct scsi_cmnd *cmd, void (*fn)(struct scsi_cmnd *))
+qla1280_queuecommand_lck(struct scsi_cmnd *cmd, void (*fn)(struct scsi_cmnd *))
{
struct Scsi_Host *host = cmd->device->host;
struct scsi_qla_host *ha = (struct scsi_qla_host *)host->hostdata;
@@ -756,6 +756,8 @@ qla1280_queuecommand(struct scsi_cmnd *cmd, void (*fn)(struct scsi_cmnd *))
return status;
}
+static DEF_SCSI_QCMD(qla1280_queuecommand)
+
enum action {
ABORT_COMMAND,
DEVICE_RESET,
diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c
index 2ff4342ae362..bc8194f74625 100644
--- a/drivers/scsi/qla2xxx/qla_attr.c
+++ b/drivers/scsi/qla2xxx/qla_attr.c
@@ -1538,6 +1538,10 @@ qla2x00_dev_loss_tmo_callbk(struct fc_rport *rport)
if (!fcport)
return;
+ /* Now that the rport has been deleted, set the fcport state to
+ FCS_DEVICE_DEAD */
+ atomic_set(&fcport->state, FCS_DEVICE_DEAD);
+
/*
* Transport has effectively 'deleted' the rport, clear
* all local references.
diff --git a/drivers/scsi/qla2xxx/qla_bsg.c b/drivers/scsi/qla2xxx/qla_bsg.c
index fdfbf83a6330..31a4121a2be1 100644
--- a/drivers/scsi/qla2xxx/qla_bsg.c
+++ b/drivers/scsi/qla2xxx/qla_bsg.c
@@ -1307,6 +1307,125 @@ qla24xx_iidma(struct fc_bsg_job *bsg_job)
}
static int
+qla2x00_optrom_setup(struct fc_bsg_job *bsg_job, struct qla_hw_data *ha,
+ uint8_t is_update)
+{
+ uint32_t start = 0;
+ int valid = 0;
+
+ bsg_job->reply->reply_payload_rcv_len = 0;
+
+ if (unlikely(pci_channel_offline(ha->pdev)))
+ return -EINVAL;
+
+ start = bsg_job->request->rqst_data.h_vendor.vendor_cmd[1];
+ if (start > ha->optrom_size)
+ return -EINVAL;
+
+ if (ha->optrom_state != QLA_SWAITING)
+ return -EBUSY;
+
+ ha->optrom_region_start = start;
+
+ if (is_update) {
+ if (ha->optrom_size == OPTROM_SIZE_2300 && start == 0)
+ valid = 1;
+ else if (start == (ha->flt_region_boot * 4) ||
+ start == (ha->flt_region_fw * 4))
+ valid = 1;
+ else if (IS_QLA24XX_TYPE(ha) || IS_QLA25XX(ha) ||
+ IS_QLA8XXX_TYPE(ha))
+ valid = 1;
+ if (!valid) {
+ qla_printk(KERN_WARNING, ha,
+ "Invalid start region 0x%x/0x%x.\n",
+ start, bsg_job->request_payload.payload_len);
+ return -EINVAL;
+ }
+
+ ha->optrom_region_size = start +
+ bsg_job->request_payload.payload_len > ha->optrom_size ?
+ ha->optrom_size - start :
+ bsg_job->request_payload.payload_len;
+ ha->optrom_state = QLA_SWRITING;
+ } else {
+ ha->optrom_region_size = start +
+ bsg_job->reply_payload.payload_len > ha->optrom_size ?
+ ha->optrom_size - start :
+ bsg_job->reply_payload.payload_len;
+ ha->optrom_state = QLA_SREADING;
+ }
+
+ ha->optrom_buffer = vmalloc(ha->optrom_region_size);
+ if (!ha->optrom_buffer) {
+ qla_printk(KERN_WARNING, ha,
+ "Read: Unable to allocate memory for optrom retrieval "
+ "(%x).\n", ha->optrom_region_size);
+
+ ha->optrom_state = QLA_SWAITING;
+ return -ENOMEM;
+ }
+
+ memset(ha->optrom_buffer, 0, ha->optrom_region_size);
+ return 0;
+}
+
+static int
+qla2x00_read_optrom(struct fc_bsg_job *bsg_job)
+{
+ struct Scsi_Host *host = bsg_job->shost;
+ scsi_qla_host_t *vha = shost_priv(host);
+ struct qla_hw_data *ha = vha->hw;
+ int rval = 0;
+
+ rval = qla2x00_optrom_setup(bsg_job, ha, 0);
+ if (rval)
+ return rval;
+
+ ha->isp_ops->read_optrom(vha, ha->optrom_buffer,
+ ha->optrom_region_start, ha->optrom_region_size);
+
+ sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
+ bsg_job->reply_payload.sg_cnt, ha->optrom_buffer,
+ ha->optrom_region_size);
+
+ bsg_job->reply->reply_payload_rcv_len = ha->optrom_region_size;
+ bsg_job->reply->result = DID_OK;
+ vfree(ha->optrom_buffer);
+ ha->optrom_buffer = NULL;
+ ha->optrom_state = QLA_SWAITING;
+ bsg_job->job_done(bsg_job);
+ return rval;
+}
+
+static int
+qla2x00_update_optrom(struct fc_bsg_job *bsg_job)
+{
+ struct Scsi_Host *host = bsg_job->shost;
+ scsi_qla_host_t *vha = shost_priv(host);
+ struct qla_hw_data *ha = vha->hw;
+ int rval = 0;
+
+ rval = qla2x00_optrom_setup(bsg_job, ha, 1);
+ if (rval)
+ return rval;
+
+ sg_copy_to_buffer(bsg_job->request_payload.sg_list,
+ bsg_job->request_payload.sg_cnt, ha->optrom_buffer,
+ ha->optrom_region_size);
+
+ ha->isp_ops->write_optrom(vha, ha->optrom_buffer,
+ ha->optrom_region_start, ha->optrom_region_size);
+
+ bsg_job->reply->result = DID_OK;
+ vfree(ha->optrom_buffer);
+ ha->optrom_buffer = NULL;
+ ha->optrom_state = QLA_SWAITING;
+ bsg_job->job_done(bsg_job);
+ return rval;
+}
+
+static int
qla2x00_process_vendor_specific(struct fc_bsg_job *bsg_job)
{
switch (bsg_job->request->rqst_data.h_vendor.vendor_cmd[0]) {
@@ -1328,6 +1447,12 @@ qla2x00_process_vendor_specific(struct fc_bsg_job *bsg_job)
case QL_VND_FCP_PRIO_CFG_CMD:
return qla24xx_proc_fcp_prio_cfg_cmd(bsg_job);
+ case QL_VND_READ_FLASH:
+ return qla2x00_read_optrom(bsg_job);
+
+ case QL_VND_UPDATE_FLASH:
+ return qla2x00_update_optrom(bsg_job);
+
default:
bsg_job->reply->result = (DID_ERROR << 16);
bsg_job->job_done(bsg_job);
diff --git a/drivers/scsi/qla2xxx/qla_bsg.h b/drivers/scsi/qla2xxx/qla_bsg.h
index cc7c52f87a11..074a999c7017 100644
--- a/drivers/scsi/qla2xxx/qla_bsg.h
+++ b/drivers/scsi/qla2xxx/qla_bsg.h
@@ -14,6 +14,8 @@
#define QL_VND_A84_MGMT_CMD 0x04
#define QL_VND_IIDMA 0x05
#define QL_VND_FCP_PRIO_CFG_CMD 0x06
+#define QL_VND_READ_FLASH 0x07
+#define QL_VND_UPDATE_FLASH 0x08
/* BSG definations for interpreting CommandSent field */
#define INT_DEF_LB_LOOPBACK_CMD 0
diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h
index e1d3ad40a946..3a22effced5f 100644
--- a/drivers/scsi/qla2xxx/qla_def.h
+++ b/drivers/scsi/qla2xxx/qla_def.h
@@ -1700,9 +1700,7 @@ typedef struct fc_port {
atomic_t state;
uint32_t flags;
- int port_login_retry_count;
int login_retry;
- atomic_t port_down_timer;
struct fc_rport *rport, *drport;
u32 supported_classes;
diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h
index c33dec827e1e..9382a816c133 100644
--- a/drivers/scsi/qla2xxx/qla_gbl.h
+++ b/drivers/scsi/qla2xxx/qla_gbl.h
@@ -92,6 +92,7 @@ extern int ql2xshiftctondsd;
extern int ql2xdbwr;
extern int ql2xdontresethba;
extern int ql2xasynctmfenable;
+extern int ql2xgffidenable;
extern int ql2xenabledif;
extern int ql2xenablehba_err_chk;
extern int ql2xtargetreset;
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
index 3cafbef40737..259f51137493 100644
--- a/drivers/scsi/qla2xxx/qla_init.c
+++ b/drivers/scsi/qla2xxx/qla_init.c
@@ -71,7 +71,7 @@ qla2x00_ctx_sp_free(srb_t *sp)
struct srb_iocb *iocb = ctx->u.iocb_cmd;
struct scsi_qla_host *vha = sp->fcport->vha;
- del_timer_sync(&iocb->timer);
+ del_timer(&iocb->timer);
kfree(iocb);
kfree(ctx);
mempool_free(sp, sp->fcport->vha->hw->srb_mempool);
@@ -1344,6 +1344,13 @@ cont_alloc:
qla_printk(KERN_WARNING, ha, "Unable to allocate (%d KB) for "
"firmware dump!!!\n", dump_size / 1024);
+ if (ha->fce) {
+ dma_free_coherent(&ha->pdev->dev, FCE_SIZE, ha->fce,
+ ha->fce_dma);
+ ha->fce = NULL;
+ ha->fce_dma = 0;
+ }
+
if (ha->eft) {
dma_free_coherent(&ha->pdev->dev, eft_size, ha->eft,
ha->eft_dma);
@@ -1818,14 +1825,14 @@ qla2x00_init_rings(scsi_qla_host_t *vha)
qla2x00_init_response_q_entries(rsp);
}
- spin_lock_irqsave(&ha->vport_slock, flags);
+ spin_lock(&ha->vport_slock);
/* Clear RSCN queue. */
list_for_each_entry(vp, &ha->vp_list, list) {
vp->rscn_in_ptr = 0;
vp->rscn_out_ptr = 0;
}
- spin_unlock_irqrestore(&ha->vport_slock, flags);
+ spin_unlock(&ha->vport_slock);
ha->isp_ops->config_rings(vha);
@@ -2916,21 +2923,13 @@ qla2x00_reg_remote_port(scsi_qla_host_t *vha, fc_port_t *fcport)
void
qla2x00_update_fcport(scsi_qla_host_t *vha, fc_port_t *fcport)
{
- struct qla_hw_data *ha = vha->hw;
-
fcport->vha = vha;
fcport->login_retry = 0;
- fcport->port_login_retry_count = ha->port_down_retry_count *
- PORT_RETRY_TIME;
- atomic_set(&fcport->port_down_timer, ha->port_down_retry_count *
- PORT_RETRY_TIME);
fcport->flags &= ~(FCF_LOGIN_NEEDED | FCF_ASYNC_SENT);
qla2x00_iidma_fcport(vha, fcport);
-
- atomic_set(&fcport->state, FCS_ONLINE);
-
qla2x00_reg_remote_port(vha, fcport);
+ atomic_set(&fcport->state, FCS_ONLINE);
}
/*
@@ -3292,8 +3291,9 @@ qla2x00_find_all_fabric_devs(scsi_qla_host_t *vha,
continue;
/* Bypass ports whose FCP-4 type is not FCP_SCSI */
- if (new_fcport->fc4_type != FC4_TYPE_FCP_SCSI &&
- new_fcport->fc4_type != FC4_TYPE_UNKNOWN)
+ if (ql2xgffidenable &&
+ (new_fcport->fc4_type != FC4_TYPE_FCP_SCSI &&
+ new_fcport->fc4_type != FC4_TYPE_UNKNOWN))
continue;
/* Locate matching device in database. */
diff --git a/drivers/scsi/qla2xxx/qla_iocb.c b/drivers/scsi/qla2xxx/qla_iocb.c
index 579f02854665..5f94430b42f0 100644
--- a/drivers/scsi/qla2xxx/qla_iocb.c
+++ b/drivers/scsi/qla2xxx/qla_iocb.c
@@ -992,8 +992,8 @@ qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt,
ha = vha->hw;
DEBUG18(printk(KERN_DEBUG
- "%s(%ld): Executing cmd sp %p, pid=%ld, prot_op=%u.\n", __func__,
- vha->host_no, sp, cmd->serial_number, scsi_get_prot_op(sp->cmd)));
+ "%s(%ld): Executing cmd sp %p, prot_op=%u.\n", __func__,
+ vha->host_no, sp, scsi_get_prot_op(sp->cmd)));
cmd_pkt->vp_index = sp->fcport->vp_idx;
diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c
index e0e43d9e7ed1..1f06ddd9bdd1 100644
--- a/drivers/scsi/qla2xxx/qla_isr.c
+++ b/drivers/scsi/qla2xxx/qla_isr.c
@@ -1240,12 +1240,6 @@ qla24xx_logio_entry(scsi_qla_host_t *vha, struct req_que *req,
case LSC_SCODE_NPORT_USED:
data[0] = MBS_LOOP_ID_USED;
break;
- case LSC_SCODE_CMD_FAILED:
- if ((iop[1] & 0xff) == 0x05) {
- data[0] = MBS_NOT_LOGGED_IN;
- break;
- }
- /* Fall through. */
default:
data[0] = MBS_COMMAND_ERROR;
break;
@@ -1431,9 +1425,8 @@ qla2x00_handle_sense(srb_t *sp, uint8_t *sense_data, uint32_t par_sense_len,
rsp->status_srb = sp;
DEBUG5(printk("%s(): Check condition Sense data, scsi(%ld:%d:%d:%d) "
- "cmd=%p pid=%ld\n", __func__, sp->fcport->vha->host_no,
- cp->device->channel, cp->device->id, cp->device->lun, cp,
- cp->serial_number));
+ "cmd=%p\n", __func__, sp->fcport->vha->host_no,
+ cp->device->channel, cp->device->id, cp->device->lun, cp));
if (sense_len)
DEBUG5(qla2x00_dump_buffer(cp->sense_buffer, sense_len));
}
@@ -1757,6 +1750,8 @@ check_scsi_status:
case CS_INCOMPLETE:
case CS_PORT_UNAVAILABLE:
case CS_TIMEOUT:
+ case CS_RESET:
+
/*
* We are going to have the fc class block the rport
* while we try to recover so instruct the mid layer
@@ -1781,10 +1776,6 @@ check_scsi_status:
qla2x00_mark_device_lost(fcport->vha, fcport, 1, 1);
break;
- case CS_RESET:
- cp->result = DID_TRANSPORT_DISRUPTED << 16;
- break;
-
case CS_ABORTED:
cp->result = DID_RESET << 16;
break;
@@ -1801,10 +1792,10 @@ out:
if (logit)
DEBUG2(qla_printk(KERN_INFO, ha,
"scsi(%ld:%d:%d) FCP command status: 0x%x-0x%x (0x%x) "
- "oxid=0x%x ser=0x%lx cdb=%02x%02x%02x len=0x%x "
+ "oxid=0x%x cdb=%02x%02x%02x len=0x%x "
"rsp_info=0x%x resid=0x%x fw_resid=0x%x\n", vha->host_no,
cp->device->id, cp->device->lun, comp_status, scsi_status,
- cp->result, ox_id, cp->serial_number, cp->cmnd[0],
+ cp->result, ox_id, cp->cmnd[0],
cp->cmnd[1], cp->cmnd[2], scsi_bufflen(cp), rsp_info_len,
resid_len, fw_resid_len));
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index 800ea9269752..1644eabaafeb 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -160,6 +160,11 @@ MODULE_PARM_DESC(ql2xtargetreset,
"Enable target reset."
"Default is 1 - use hw defaults.");
+int ql2xgffidenable;
+module_param(ql2xgffidenable, int, S_IRUGO|S_IRUSR);
+MODULE_PARM_DESC(ql2xgffidenable,
+ "Enables GFF_ID checks of port type. "
+ "Default is 0 - Do not use GFF_ID information.");
int ql2xasynctmfenable;
module_param(ql2xasynctmfenable, int, S_IRUGO|S_IRUSR);
@@ -174,8 +179,7 @@ static int qla2xxx_slave_alloc(struct scsi_device *);
static int qla2xxx_scan_finished(struct Scsi_Host *, unsigned long time);
static void qla2xxx_scan_start(struct Scsi_Host *);
static void qla2xxx_slave_destroy(struct scsi_device *);
-static int qla2xxx_queuecommand(struct scsi_cmnd *cmd,
- void (*fn)(struct scsi_cmnd *));
+static int qla2xxx_queuecommand(struct Scsi_Host *h, struct scsi_cmnd *cmd);
static int qla2xxx_eh_abort(struct scsi_cmnd *);
static int qla2xxx_eh_device_reset(struct scsi_cmnd *);
static int qla2xxx_eh_target_reset(struct scsi_cmnd *);
@@ -255,6 +259,7 @@ static void qla2x00_rst_aen(scsi_qla_host_t *);
static int qla2x00_mem_alloc(struct qla_hw_data *, uint16_t, uint16_t,
struct req_que **, struct rsp_que **);
+static void qla2x00_free_fw_dump(struct qla_hw_data *);
static void qla2x00_mem_free(struct qla_hw_data *);
static void qla2x00_sp_free_dma(srb_t *);
@@ -529,7 +534,7 @@ qla2x00_get_new_sp(scsi_qla_host_t *vha, fc_port_t *fcport,
}
static int
-qla2xxx_queuecommand(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *))
+qla2xxx_queuecommand_lck(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *))
{
scsi_qla_host_t *vha = shost_priv(cmd->device->host);
fc_port_t *fcport = (struct fc_port *) cmd->device->hostdata;
@@ -539,6 +544,7 @@ qla2xxx_queuecommand(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *))
srb_t *sp;
int rval;
+ spin_unlock_irq(vha->host->host_lock);
if (ha->flags.eeh_busy) {
if (ha->flags.pci_channel_io_perm_failure)
cmd->result = DID_NO_CONNECT << 16;
@@ -553,10 +559,6 @@ qla2xxx_queuecommand(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *))
goto qc24_fail_command;
}
- /* Close window on fcport/rport state-transitioning. */
- if (fcport->drport)
- goto qc24_target_busy;
-
if (!vha->flags.difdix_supported &&
scsi_get_prot_op(cmd) != SCSI_PROT_NORMAL) {
DEBUG2(qla_printk(KERN_ERR, ha,
@@ -567,15 +569,14 @@ qla2xxx_queuecommand(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *))
}
if (atomic_read(&fcport->state) != FCS_ONLINE) {
if (atomic_read(&fcport->state) == FCS_DEVICE_DEAD ||
- atomic_read(&base_vha->loop_state) == LOOP_DEAD) {
+ atomic_read(&fcport->state) == FCS_DEVICE_LOST ||
+ atomic_read(&base_vha->loop_state) == LOOP_DEAD) {
cmd->result = DID_NO_CONNECT << 16;
goto qc24_fail_command;
}
goto qc24_target_busy;
}
- spin_unlock_irq(vha->host->host_lock);
-
sp = qla2x00_get_new_sp(base_vha, fcport, cmd, done);
if (!sp)
goto qc24_host_busy_lock;
@@ -597,14 +598,18 @@ qc24_host_busy_lock:
return SCSI_MLQUEUE_HOST_BUSY;
qc24_target_busy:
+ spin_lock_irq(vha->host->host_lock);
return SCSI_MLQUEUE_TARGET_BUSY;
qc24_fail_command:
+ spin_lock_irq(vha->host->host_lock);
done(cmd);
return 0;
}
+static DEF_SCSI_QCMD(qla2xxx_queuecommand)
+
/*
* qla2x00_eh_wait_on_command
@@ -824,81 +829,58 @@ qla2xxx_eh_abort(struct scsi_cmnd *cmd)
{
scsi_qla_host_t *vha = shost_priv(cmd->device->host);
srb_t *sp;
- int ret, i;
+ int ret;
unsigned int id, lun;
- unsigned long serial;
unsigned long flags;
int wait = 0;
struct qla_hw_data *ha = vha->hw;
- struct req_que *req = vha->req;
- srb_t *spt;
- int got_ref = 0;
fc_block_scsi_eh(cmd);
if (!CMD_SP(cmd))
return SUCCESS;
- ret = SUCCESS;
-
id = cmd->device->id;
lun = cmd->device->lun;
- serial = cmd->serial_number;
- spt = (srb_t *) CMD_SP(cmd);
- if (!spt)
- return SUCCESS;
- /* Check active list for command command. */
spin_lock_irqsave(&ha->hardware_lock, flags);
- for (i = 1; i < MAX_OUTSTANDING_COMMANDS; i++) {
- sp = req->outstanding_cmds[i];
-
- if (sp == NULL)
- continue;
- if ((sp->ctx) && !(sp->flags & SRB_FCP_CMND_DMA_VALID) &&
- !IS_PROT_IO(sp))
- continue;
- if (sp->cmd != cmd)
- continue;
+ sp = (srb_t *) CMD_SP(cmd);
+ if (!sp) {
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+ return SUCCESS;
+ }
- DEBUG2(printk("%s(%ld): aborting sp %p from RISC."
- " pid=%ld.\n", __func__, vha->host_no, sp, serial));
+ DEBUG2(printk("%s(%ld): aborting sp %p from RISC.",
+ __func__, vha->host_no, sp));
- /* Get a reference to the sp and drop the lock.*/
- sp_get(sp);
- got_ref++;
+ /* Get a reference to the sp and drop the lock.*/
+ sp_get(sp);
- spin_unlock_irqrestore(&ha->hardware_lock, flags);
- if (ha->isp_ops->abort_command(sp)) {
- DEBUG2(printk("%s(%ld): abort_command "
- "mbx failed.\n", __func__, vha->host_no));
- ret = FAILED;
- } else {
- DEBUG3(printk("%s(%ld): abort_command "
- "mbx success.\n", __func__, vha->host_no));
- wait = 1;
- }
- spin_lock_irqsave(&ha->hardware_lock, flags);
- break;
- }
spin_unlock_irqrestore(&ha->hardware_lock, flags);
+ if (ha->isp_ops->abort_command(sp)) {
+ DEBUG2(printk("%s(%ld): abort_command "
+ "mbx failed.\n", __func__, vha->host_no));
+ ret = FAILED;
+ } else {
+ DEBUG3(printk("%s(%ld): abort_command "
+ "mbx success.\n", __func__, vha->host_no));
+ wait = 1;
+ }
+ qla2x00_sp_compl(ha, sp);
/* Wait for the command to be returned. */
if (wait) {
if (qla2x00_eh_wait_on_command(cmd) != QLA_SUCCESS) {
qla_printk(KERN_ERR, ha,
- "scsi(%ld:%d:%d): Abort handler timed out -- %lx "
- "%x.\n", vha->host_no, id, lun, serial, ret);
+ "scsi(%ld:%d:%d): Abort handler timed out -- %x.\n",
+ vha->host_no, id, lun, ret);
ret = FAILED;
}
}
- if (got_ref)
- qla2x00_sp_compl(ha, sp);
-
qla_printk(KERN_INFO, ha,
- "scsi(%ld:%d:%d): Abort command issued -- %d %lx %x.\n",
- vha->host_no, id, lun, wait, serial, ret);
+ "scsi(%ld:%d:%d): Abort command issued -- %d %x.\n",
+ vha->host_no, id, lun, wait, ret);
return ret;
}
@@ -1043,13 +1025,11 @@ qla2xxx_eh_bus_reset(struct scsi_cmnd *cmd)
fc_port_t *fcport = (struct fc_port *) cmd->device->hostdata;
int ret = FAILED;
unsigned int id, lun;
- unsigned long serial;
fc_block_scsi_eh(cmd);
id = cmd->device->id;
lun = cmd->device->lun;
- serial = cmd->serial_number;
if (!fcport)
return ret;
@@ -1104,14 +1084,12 @@ qla2xxx_eh_host_reset(struct scsi_cmnd *cmd)
struct qla_hw_data *ha = vha->hw;
int ret = FAILED;
unsigned int id, lun;
- unsigned long serial;
scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
fc_block_scsi_eh(cmd);
id = cmd->device->id;
lun = cmd->device->lun;
- serial = cmd->serial_number;
if (!fcport)
return ret;
@@ -1974,6 +1952,7 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
ha->bars = bars;
ha->mem_only = mem_only;
spin_lock_init(&ha->hardware_lock);
+ spin_lock_init(&ha->vport_slock);
/* Set ISP-type information. */
qla2x00_set_isp_flags(ha);
@@ -2342,6 +2321,42 @@ probe_out:
}
static void
+qla2x00_shutdown(struct pci_dev *pdev)
+{
+ scsi_qla_host_t *vha;
+ struct qla_hw_data *ha;
+
+ vha = pci_get_drvdata(pdev);
+ ha = vha->hw;
+
+ /* Turn-off FCE trace */
+ if (ha->flags.fce_enabled) {
+ qla2x00_disable_fce_trace(vha, NULL, NULL);
+ ha->flags.fce_enabled = 0;
+ }
+
+ /* Turn-off EFT trace */
+ if (ha->eft)
+ qla2x00_disable_eft_trace(vha);
+
+ /* Stop currently executing firmware. */
+ qla2x00_try_to_stop_firmware(vha);
+
+ /* Turn adapter off line */
+ vha->flags.online = 0;
+
+ /* turn-off interrupts on the card */
+ if (ha->interrupts_on) {
+ vha->flags.init_done = 0;
+ ha->isp_ops->disable_intrs(ha);
+ }
+
+ qla2x00_free_irqs(vha);
+
+ qla2x00_free_fw_dump(ha);
+}
+
+static void
qla2x00_remove_one(struct pci_dev *pdev)
{
scsi_qla_host_t *base_vha, *vha;
@@ -2597,12 +2612,12 @@ qla2x00_mark_all_devices_lost(scsi_qla_host_t *vha, int defer)
if (atomic_read(&fcport->state) == FCS_DEVICE_DEAD)
continue;
if (atomic_read(&fcport->state) == FCS_ONLINE) {
+ atomic_set(&fcport->state, FCS_DEVICE_LOST);
if (defer)
qla2x00_schedule_rport_del(vha, fcport, defer);
else if (vha->vp_idx == fcport->vp_idx)
qla2x00_schedule_rport_del(vha, fcport, defer);
}
- atomic_set(&fcport->state, FCS_DEVICE_LOST);
}
}
@@ -2830,28 +2845,48 @@ fail:
}
/*
-* qla2x00_mem_free
-* Frees all adapter allocated memory.
+* qla2x00_free_fw_dump
+* Frees fw dump stuff.
*
* Input:
-* ha = adapter block pointer.
+* ha = adapter block pointer.
*/
static void
-qla2x00_mem_free(struct qla_hw_data *ha)
+qla2x00_free_fw_dump(struct qla_hw_data *ha)
{
- if (ha->srb_mempool)
- mempool_destroy(ha->srb_mempool);
-
if (ha->fce)
dma_free_coherent(&ha->pdev->dev, FCE_SIZE, ha->fce,
- ha->fce_dma);
+ ha->fce_dma);
if (ha->fw_dump) {
if (ha->eft)
dma_free_coherent(&ha->pdev->dev,
- ntohl(ha->fw_dump->eft_size), ha->eft, ha->eft_dma);
+ ntohl(ha->fw_dump->eft_size), ha->eft, ha->eft_dma);
vfree(ha->fw_dump);
}
+ ha->fce = NULL;
+ ha->fce_dma = 0;
+ ha->eft = NULL;
+ ha->eft_dma = 0;
+ ha->fw_dump = NULL;
+ ha->fw_dumped = 0;
+ ha->fw_dump_reading = 0;
+}
+
+/*
+* qla2x00_mem_free
+* Frees all adapter allocated memory.
+*
+* Input:
+* ha = adapter block pointer.
+*/
+static void
+qla2x00_mem_free(struct qla_hw_data *ha)
+{
+ qla2x00_free_fw_dump(ha);
+
+ if (ha->srb_mempool)
+ mempool_destroy(ha->srb_mempool);
if (ha->dcbx_tlv)
dma_free_coherent(&ha->pdev->dev, DCBX_TLV_DATA_SIZE,
@@ -2925,8 +2960,6 @@ qla2x00_mem_free(struct qla_hw_data *ha)
ha->srb_mempool = NULL;
ha->ctx_mempool = NULL;
- ha->eft = NULL;
- ha->eft_dma = 0;
ha->sns_cmd = NULL;
ha->sns_cmd_dma = 0;
ha->ct_sns = NULL;
@@ -2946,10 +2979,6 @@ qla2x00_mem_free(struct qla_hw_data *ha)
ha->gid_list = NULL;
ha->gid_list_dma = 0;
-
- ha->fw_dump = NULL;
- ha->fw_dumped = 0;
- ha->fw_dump_reading = 0;
}
struct scsi_qla_host *qla2x00_create_host(struct scsi_host_template *sht,
@@ -3547,11 +3576,9 @@ void
qla2x00_timer(scsi_qla_host_t *vha)
{
unsigned long cpu_flags = 0;
- fc_port_t *fcport;
int start_dpc = 0;
int index;
srb_t *sp;
- int t;
uint16_t w;
struct qla_hw_data *ha = vha->hw;
struct req_que *req;
@@ -3567,34 +3594,6 @@ qla2x00_timer(scsi_qla_host_t *vha)
/* Hardware read to raise pending EEH errors during mailbox waits. */
if (!pci_channel_offline(ha->pdev))
pci_read_config_word(ha->pdev, PCI_VENDOR_ID, &w);
- /*
- * Ports - Port down timer.
- *
- * Whenever, a port is in the LOST state we start decrementing its port
- * down timer every second until it reaches zero. Once it reaches zero
- * the port it marked DEAD.
- */
- t = 0;
- list_for_each_entry(fcport, &vha->vp_fcports, list) {
- if (fcport->port_type != FCT_TARGET)
- continue;
-
- if (atomic_read(&fcport->state) == FCS_DEVICE_LOST) {
-
- if (atomic_read(&fcport->port_down_timer) == 0)
- continue;
-
- if (atomic_dec_and_test(&fcport->port_down_timer) != 0)
- atomic_set(&fcport->state, FCS_DEVICE_DEAD);
-
- DEBUG(printk("scsi(%ld): fcport-%d - port retry count: "
- "%d remaining\n",
- vha->host_no,
- t, atomic_read(&fcport->port_down_timer)));
- }
- t++;
- } /* End of for fcport */
-
/* Loop down handler. */
if (atomic_read(&vha->loop_down_timer) > 0 &&
@@ -4079,6 +4078,7 @@ static struct pci_driver qla2xxx_pci_driver = {
.id_table = qla2xxx_pci_tbl,
.probe = qla2x00_probe_one,
.remove = qla2x00_remove_one,
+ .shutdown = qla2x00_shutdown,
.err_handler = &qla2xxx_err_handler,
};
diff --git a/drivers/scsi/qla4xxx/ql4_dbg.c b/drivers/scsi/qla4xxx/ql4_dbg.c
index cbceb0ebabf7..edcf048215dd 100644
--- a/drivers/scsi/qla4xxx/ql4_dbg.c
+++ b/drivers/scsi/qla4xxx/ql4_dbg.c
@@ -30,3 +30,104 @@ void qla4xxx_dump_buffer(void *b, uint32_t size)
printk(KERN_INFO "\n");
}
+void qla4xxx_dump_registers(struct scsi_qla_host *ha)
+{
+ uint8_t i;
+
+ if (is_qla8022(ha)) {
+ for (i = 1; i < MBOX_REG_COUNT; i++)
+ printk(KERN_INFO "mailbox[%d] = 0x%08X\n",
+ i, readl(&ha->qla4_8xxx_reg->mailbox_in[i]));
+ return;
+ }
+
+ for (i = 0; i < MBOX_REG_COUNT; i++) {
+ printk(KERN_INFO "0x%02X mailbox[%d] = 0x%08X\n",
+ (uint8_t) offsetof(struct isp_reg, mailbox[i]), i,
+ readw(&ha->reg->mailbox[i]));
+ }
+
+ printk(KERN_INFO "0x%02X flash_address = 0x%08X\n",
+ (uint8_t) offsetof(struct isp_reg, flash_address),
+ readw(&ha->reg->flash_address));
+ printk(KERN_INFO "0x%02X flash_data = 0x%08X\n",
+ (uint8_t) offsetof(struct isp_reg, flash_data),
+ readw(&ha->reg->flash_data));
+ printk(KERN_INFO "0x%02X ctrl_status = 0x%08X\n",
+ (uint8_t) offsetof(struct isp_reg, ctrl_status),
+ readw(&ha->reg->ctrl_status));
+
+ if (is_qla4010(ha)) {
+ printk(KERN_INFO "0x%02X nvram = 0x%08X\n",
+ (uint8_t) offsetof(struct isp_reg, u1.isp4010.nvram),
+ readw(&ha->reg->u1.isp4010.nvram));
+ } else if (is_qla4022(ha) | is_qla4032(ha)) {
+ printk(KERN_INFO "0x%02X intr_mask = 0x%08X\n",
+ (uint8_t) offsetof(struct isp_reg, u1.isp4022.intr_mask),
+ readw(&ha->reg->u1.isp4022.intr_mask));
+ printk(KERN_INFO "0x%02X nvram = 0x%08X\n",
+ (uint8_t) offsetof(struct isp_reg, u1.isp4022.nvram),
+ readw(&ha->reg->u1.isp4022.nvram));
+ printk(KERN_INFO "0x%02X semaphore = 0x%08X\n",
+ (uint8_t) offsetof(struct isp_reg, u1.isp4022.semaphore),
+ readw(&ha->reg->u1.isp4022.semaphore));
+ }
+ printk(KERN_INFO "0x%02X req_q_in = 0x%08X\n",
+ (uint8_t) offsetof(struct isp_reg, req_q_in),
+ readw(&ha->reg->req_q_in));
+ printk(KERN_INFO "0x%02X rsp_q_out = 0x%08X\n",
+ (uint8_t) offsetof(struct isp_reg, rsp_q_out),
+ readw(&ha->reg->rsp_q_out));
+
+ if (is_qla4010(ha)) {
+ printk(KERN_INFO "0x%02X ext_hw_conf = 0x%08X\n",
+ (uint8_t) offsetof(struct isp_reg, u2.isp4010.ext_hw_conf),
+ readw(&ha->reg->u2.isp4010.ext_hw_conf));
+ printk(KERN_INFO "0x%02X port_ctrl = 0x%08X\n",
+ (uint8_t) offsetof(struct isp_reg, u2.isp4010.port_ctrl),
+ readw(&ha->reg->u2.isp4010.port_ctrl));
+ printk(KERN_INFO "0x%02X port_status = 0x%08X\n",
+ (uint8_t) offsetof(struct isp_reg, u2.isp4010.port_status),
+ readw(&ha->reg->u2.isp4010.port_status));
+ printk(KERN_INFO "0x%02X req_q_out = 0x%08X\n",
+ (uint8_t) offsetof(struct isp_reg, u2.isp4010.req_q_out),
+ readw(&ha->reg->u2.isp4010.req_q_out));
+ printk(KERN_INFO "0x%02X gp_out = 0x%08X\n",
+ (uint8_t) offsetof(struct isp_reg, u2.isp4010.gp_out),
+ readw(&ha->reg->u2.isp4010.gp_out));
+ printk(KERN_INFO "0x%02X gp_in = 0x%08X\n",
+ (uint8_t) offsetof(struct isp_reg, u2.isp4010.gp_in),
+ readw(&ha->reg->u2.isp4010.gp_in));
+ printk(KERN_INFO "0x%02X port_err_status = 0x%08X\n", (uint8_t)
+ offsetof(struct isp_reg, u2.isp4010.port_err_status),
+ readw(&ha->reg->u2.isp4010.port_err_status));
+ } else if (is_qla4022(ha) | is_qla4032(ha)) {
+ printk(KERN_INFO "Page 0 Registers:\n");
+ printk(KERN_INFO "0x%02X ext_hw_conf = 0x%08X\n", (uint8_t)
+ offsetof(struct isp_reg, u2.isp4022.p0.ext_hw_conf),
+ readw(&ha->reg->u2.isp4022.p0.ext_hw_conf));
+ printk(KERN_INFO "0x%02X port_ctrl = 0x%08X\n", (uint8_t)
+ offsetof(struct isp_reg, u2.isp4022.p0.port_ctrl),
+ readw(&ha->reg->u2.isp4022.p0.port_ctrl));
+ printk(KERN_INFO "0x%02X port_status = 0x%08X\n", (uint8_t)
+ offsetof(struct isp_reg, u2.isp4022.p0.port_status),
+ readw(&ha->reg->u2.isp4022.p0.port_status));
+ printk(KERN_INFO "0x%02X gp_out = 0x%08X\n",
+ (uint8_t) offsetof(struct isp_reg, u2.isp4022.p0.gp_out),
+ readw(&ha->reg->u2.isp4022.p0.gp_out));
+ printk(KERN_INFO "0x%02X gp_in = 0x%08X\n",
+ (uint8_t) offsetof(struct isp_reg, u2.isp4022.p0.gp_in),
+ readw(&ha->reg->u2.isp4022.p0.gp_in));
+ printk(KERN_INFO "0x%02X port_err_status = 0x%08X\n", (uint8_t)
+ offsetof(struct isp_reg, u2.isp4022.p0.port_err_status),
+ readw(&ha->reg->u2.isp4022.p0.port_err_status));
+ printk(KERN_INFO "Page 1 Registers:\n");
+ writel(HOST_MEM_CFG_PAGE & set_rmask(CSR_SCSI_PAGE_SELECT),
+ &ha->reg->ctrl_status);
+ printk(KERN_INFO "0x%02X req_q_out = 0x%08X\n",
+ (uint8_t) offsetof(struct isp_reg, u2.isp4022.p1.req_q_out),
+ readw(&ha->reg->u2.isp4022.p1.req_q_out));
+ writel(PORT_CTRL_STAT_PAGE & set_rmask(CSR_SCSI_PAGE_SELECT),
+ &ha->reg->ctrl_status);
+ }
+}
diff --git a/drivers/scsi/qla4xxx/ql4_def.h b/drivers/scsi/qla4xxx/ql4_def.h
index 9dc0a6616edd..0f3bfc3da5cf 100644
--- a/drivers/scsi/qla4xxx/ql4_def.h
+++ b/drivers/scsi/qla4xxx/ql4_def.h
@@ -24,6 +24,7 @@
#include <linux/delay.h>
#include <linux/interrupt.h>
#include <linux/mutex.h>
+#include <linux/aer.h>
#include <net/tcp.h>
#include <scsi/scsi.h>
@@ -36,24 +37,6 @@
#include "ql4_dbg.h"
#include "ql4_nx.h"
-#if defined(CONFIG_PCIEAER)
-#include <linux/aer.h>
-#else
-/* AER releated */
-static inline int pci_enable_pcie_error_reporting(struct pci_dev *dev)
-{
- return -EINVAL;
-}
-static inline int pci_disable_pcie_error_reporting(struct pci_dev *dev)
-{
- return -EINVAL;
-}
-static inline int pci_cleanup_aer_uncorrect_error_status(struct pci_dev *dev)
-{
- return -EINVAL;
-}
-#endif
-
#ifndef PCI_DEVICE_ID_QLOGIC_ISP4010
#define PCI_DEVICE_ID_QLOGIC_ISP4010 0x4010
#endif
@@ -179,6 +162,7 @@ static inline int pci_cleanup_aer_uncorrect_error_status(struct pci_dev *dev)
#define IOCB_TOV_MARGIN 10
#define RELOGIN_TOV 18
#define ISNS_DEREG_TOV 5
+#define HBA_ONLINE_TOV 30
#define MAX_RESET_HA_RETRIES 2
diff --git a/drivers/scsi/qla4xxx/ql4_fw.h b/drivers/scsi/qla4xxx/ql4_fw.h
index 0336c6db8cb3..5e757d7fff7d 100644
--- a/drivers/scsi/qla4xxx/ql4_fw.h
+++ b/drivers/scsi/qla4xxx/ql4_fw.h
@@ -416,6 +416,8 @@ struct qla_flt_region {
#define MBOX_ASTS_IPV6_ND_PREFIX_IGNORED 0x802C
#define MBOX_ASTS_IPV6_LCL_PREFIX_IGNORED 0x802D
#define MBOX_ASTS_ICMPV6_ERROR_MSG_RCVD 0x802E
+#define MBOX_ASTS_TXSCVR_INSERTED 0x8130
+#define MBOX_ASTS_TXSCVR_REMOVED 0x8131
#define ISNS_EVENT_DATA_RECEIVED 0x0000
#define ISNS_EVENT_CONNECTION_OPENED 0x0001
@@ -446,6 +448,7 @@ struct addr_ctrl_blk {
#define FWOPT_SESSION_MODE 0x0040
#define FWOPT_INITIATOR_MODE 0x0020
#define FWOPT_TARGET_MODE 0x0010
+#define FWOPT_ENABLE_CRBDB 0x8000
uint16_t exec_throttle; /* 04-05 */
uint8_t zio_count; /* 06 */
diff --git a/drivers/scsi/qla4xxx/ql4_glbl.h b/drivers/scsi/qla4xxx/ql4_glbl.h
index 95a26fb1626c..6575a47501e5 100644
--- a/drivers/scsi/qla4xxx/ql4_glbl.h
+++ b/drivers/scsi/qla4xxx/ql4_glbl.h
@@ -94,6 +94,7 @@ void qla4xxx_process_response_queue(struct scsi_qla_host *ha);
void qla4xxx_wake_dpc(struct scsi_qla_host *ha);
void qla4xxx_get_conn_event_log(struct scsi_qla_host *ha);
void qla4xxx_mailbox_premature_completion(struct scsi_qla_host *ha);
+void qla4xxx_dump_registers(struct scsi_qla_host *ha);
void qla4_8xxx_pci_config(struct scsi_qla_host *);
int qla4_8xxx_iospace_config(struct scsi_qla_host *ha);
diff --git a/drivers/scsi/qla4xxx/ql4_init.c b/drivers/scsi/qla4xxx/ql4_init.c
index 4c9be77ee70b..dc01fa3da5d1 100644
--- a/drivers/scsi/qla4xxx/ql4_init.c
+++ b/drivers/scsi/qla4xxx/ql4_init.c
@@ -1207,8 +1207,8 @@ static int qla4xxx_start_firmware_from_flash(struct scsi_qla_host *ha)
break;
DEBUG2(printk(KERN_INFO "scsi%ld: %s: Waiting for boot "
- "firmware to complete... ctrl_sts=0x%x\n",
- ha->host_no, __func__, ctrl_status));
+ "firmware to complete... ctrl_sts=0x%x, remaining=%ld\n",
+ ha->host_no, __func__, ctrl_status, max_wait_time));
msleep_interruptible(250);
} while (!time_after_eq(jiffies, max_wait_time));
@@ -1459,6 +1459,12 @@ int qla4xxx_initialize_adapter(struct scsi_qla_host *ha,
exit_init_online:
set_bit(AF_ONLINE, &ha->flags);
exit_init_hba:
+ if (is_qla8022(ha) && (status == QLA_ERROR)) {
+ /* Since interrupts are registered in start_firmware for
+ * 82xx, release them here if initialize_adapter fails */
+ qla4xxx_free_irqs(ha);
+ }
+
DEBUG2(printk("scsi%ld: initialize adapter: %s\n", ha->host_no,
status == QLA_ERROR ? "FAILED" : "SUCCEDED"));
return status;
diff --git a/drivers/scsi/qla4xxx/ql4_iocb.c b/drivers/scsi/qla4xxx/ql4_iocb.c
index 4ef9ba112ee8..5ae49fd87846 100644
--- a/drivers/scsi/qla4xxx/ql4_iocb.c
+++ b/drivers/scsi/qla4xxx/ql4_iocb.c
@@ -202,19 +202,11 @@ static void qla4xxx_build_scsi_iocbs(struct srb *srb,
void qla4_8xxx_queue_iocb(struct scsi_qla_host *ha)
{
uint32_t dbval = 0;
- unsigned long wtime;
dbval = 0x14 | (ha->func_num << 5);
dbval = dbval | (0 << 8) | (ha->request_in << 16);
- writel(dbval, (unsigned long __iomem *)ha->nx_db_wr_ptr);
- wmb();
- wtime = jiffies + (2 * HZ);
- while (readl((void __iomem *)ha->nx_db_rd_ptr) != dbval &&
- !time_after_eq(jiffies, wtime)) {
- writel(dbval, (unsigned long __iomem *)ha->nx_db_wr_ptr);
- wmb();
- }
+ qla4_8xxx_wr_32(ha, ha->nx_db_wr_ptr, ha->request_in);
}
/**
diff --git a/drivers/scsi/qla4xxx/ql4_isr.c b/drivers/scsi/qla4xxx/ql4_isr.c
index 2a1ab63f3eb0..7c33fd5943d5 100644
--- a/drivers/scsi/qla4xxx/ql4_isr.c
+++ b/drivers/scsi/qla4xxx/ql4_isr.c
@@ -72,7 +72,7 @@ qla4xxx_status_cont_entry(struct scsi_qla_host *ha,
{
struct srb *srb = ha->status_srb;
struct scsi_cmnd *cmd;
- uint8_t sense_len;
+ uint16_t sense_len;
if (srb == NULL)
return;
@@ -487,6 +487,8 @@ static void qla4xxx_isr_decode_mailbox(struct scsi_qla_host * ha,
case MBOX_ASTS_SYSTEM_ERROR:
/* Log Mailbox registers */
ql4_printk(KERN_INFO, ha, "%s: System Err\n", __func__);
+ qla4xxx_dump_registers(ha);
+
if (ql4xdontresethba) {
DEBUG2(printk("scsi%ld: %s:Don't Reset HBA\n",
ha->host_no, __func__));
@@ -621,6 +623,18 @@ static void qla4xxx_isr_decode_mailbox(struct scsi_qla_host * ha,
}
break;
+ case MBOX_ASTS_TXSCVR_INSERTED:
+ DEBUG2(printk(KERN_WARNING
+ "scsi%ld: AEN %04x Transceiver"
+ " inserted\n", ha->host_no, mbox_sts[0]));
+ break;
+
+ case MBOX_ASTS_TXSCVR_REMOVED:
+ DEBUG2(printk(KERN_WARNING
+ "scsi%ld: AEN %04x Transceiver"
+ " removed\n", ha->host_no, mbox_sts[0]));
+ break;
+
default:
DEBUG2(printk(KERN_WARNING
"scsi%ld: AEN %04x UNKNOWN\n",
diff --git a/drivers/scsi/qla4xxx/ql4_mbx.c b/drivers/scsi/qla4xxx/ql4_mbx.c
index 90021704d8ca..2d2f9c879bfd 100644
--- a/drivers/scsi/qla4xxx/ql4_mbx.c
+++ b/drivers/scsi/qla4xxx/ql4_mbx.c
@@ -299,6 +299,10 @@ qla4xxx_set_ifcb(struct scsi_qla_host *ha, uint32_t *mbox_cmd,
{
memset(mbox_cmd, 0, sizeof(mbox_cmd[0]) * MBOX_REG_COUNT);
memset(mbox_sts, 0, sizeof(mbox_sts[0]) * MBOX_REG_COUNT);
+
+ if (is_qla8022(ha))
+ qla4_8xxx_wr_32(ha, ha->nx_db_wr_ptr, 0);
+
mbox_cmd[0] = MBOX_CMD_INITIALIZE_FIRMWARE;
mbox_cmd[1] = 0;
mbox_cmd[2] = LSDW(init_fw_cb_dma);
@@ -472,6 +476,11 @@ int qla4xxx_initialize_fw_cb(struct scsi_qla_host * ha)
init_fw_cb->fw_options |=
__constant_cpu_to_le16(FWOPT_SESSION_MODE |
FWOPT_INITIATOR_MODE);
+
+ if (is_qla8022(ha))
+ init_fw_cb->fw_options |=
+ __constant_cpu_to_le16(FWOPT_ENABLE_CRBDB);
+
init_fw_cb->fw_options &= __constant_cpu_to_le16(~FWOPT_TARGET_MODE);
if (qla4xxx_set_ifcb(ha, &mbox_cmd[0], &mbox_sts[0], init_fw_cb_dma)
@@ -592,7 +601,7 @@ int qla4xxx_get_firmware_status(struct scsi_qla_host * ha)
}
ql4_printk(KERN_INFO, ha, "%ld firmare IOCBs available (%d).\n",
- ha->host_no, mbox_cmd[2]);
+ ha->host_no, mbox_sts[2]);
return QLA_SUCCESS;
}
diff --git a/drivers/scsi/qla4xxx/ql4_nx.c b/drivers/scsi/qla4xxx/ql4_nx.c
index 449256f2c5f8..474b10d71364 100644
--- a/drivers/scsi/qla4xxx/ql4_nx.c
+++ b/drivers/scsi/qla4xxx/ql4_nx.c
@@ -839,8 +839,11 @@ qla4_8xxx_rom_lock(struct scsi_qla_host *ha)
done = qla4_8xxx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM2_LOCK));
if (done == 1)
break;
- if (timeout >= qla4_8xxx_rom_lock_timeout)
+ if (timeout >= qla4_8xxx_rom_lock_timeout) {
+ ql4_printk(KERN_WARNING, ha,
+ "%s: Failed to acquire rom lock", __func__);
return -1;
+ }
timeout++;
@@ -1078,21 +1081,6 @@ qla4_8xxx_pinit_from_rom(struct scsi_qla_host *ha, int verbose)
return 0;
}
-static int qla4_8xxx_check_for_bad_spd(struct scsi_qla_host *ha)
-{
- u32 val = 0;
- val = qla4_8xxx_rd_32(ha, BOOT_LOADER_DIMM_STATUS) ;
- val &= QLA82XX_BOOT_LOADER_MN_ISSUE;
- if (val & QLA82XX_PEG_TUNE_MN_SPD_ZEROED) {
- printk("Memory DIMM SPD not programmed. Assumed valid.\n");
- return 1;
- } else if (val) {
- printk("Memory DIMM type incorrect. Info:%08X.\n", val);
- return 2;
- }
- return 0;
-}
-
static int
qla4_8xxx_load_from_flash(struct scsi_qla_host *ha, uint32_t image_start)
{
@@ -1377,8 +1365,6 @@ static int qla4_8xxx_cmdpeg_ready(struct scsi_qla_host *ha, int pegtune_val)
} while (--retries);
- qla4_8xxx_check_for_bad_spd(ha);
-
if (!retries) {
pegtune_val = qla4_8xxx_rd_32(ha,
QLA82XX_ROMUSB_GLB_PEGTUNE_DONE);
@@ -1540,14 +1526,31 @@ qla4_8xxx_try_start_fw(struct scsi_qla_host *ha)
ql4_printk(KERN_INFO, ha,
"FW: Attempting to load firmware from flash...\n");
rval = qla4_8xxx_start_firmware(ha, ha->hw.flt_region_fw);
- if (rval == QLA_SUCCESS)
- return rval;
- ql4_printk(KERN_ERR, ha, "FW: Load firmware from flash FAILED...\n");
+ if (rval != QLA_SUCCESS) {
+ ql4_printk(KERN_ERR, ha, "FW: Load firmware from flash"
+ " FAILED...\n");
+ return rval;
+ }
return rval;
}
+static void qla4_8xxx_rom_lock_recovery(struct scsi_qla_host *ha)
+{
+ if (qla4_8xxx_rom_lock(ha)) {
+ /* Someone else is holding the lock. */
+ dev_info(&ha->pdev->dev, "Resetting rom_lock\n");
+ }
+
+ /*
+ * Either we got the lock, or someone
+ * else died while holding it.
+ * In either case, unlock.
+ */
+ qla4_8xxx_rom_unlock(ha);
+}
+
/**
* qla4_8xxx_device_bootstrap - Initialize device, set DEV_READY, start fw
* @ha: pointer to adapter structure
@@ -1557,11 +1560,12 @@ qla4_8xxx_try_start_fw(struct scsi_qla_host *ha)
static int
qla4_8xxx_device_bootstrap(struct scsi_qla_host *ha)
{
- int rval, i, timeout;
+ int rval = QLA_ERROR;
+ int i, timeout;
uint32_t old_count, count;
+ int need_reset = 0, peg_stuck = 1;
- if (qla4_8xxx_need_reset(ha))
- goto dev_initialize;
+ need_reset = qla4_8xxx_need_reset(ha);
old_count = qla4_8xxx_rd_32(ha, QLA82XX_PEG_ALIVE_COUNTER);
@@ -1570,12 +1574,30 @@ qla4_8xxx_device_bootstrap(struct scsi_qla_host *ha)
if (timeout) {
qla4_8xxx_wr_32(ha, QLA82XX_CRB_DEV_STATE,
QLA82XX_DEV_FAILED);
- return QLA_ERROR;
+ return rval;
}
count = qla4_8xxx_rd_32(ha, QLA82XX_PEG_ALIVE_COUNTER);
if (count != old_count)
+ peg_stuck = 0;
+ }
+
+ if (need_reset) {
+ /* We are trying to perform a recovery here. */
+ if (peg_stuck)
+ qla4_8xxx_rom_lock_recovery(ha);
+ goto dev_initialize;
+ } else {
+ /* Start of day for this ha context. */
+ if (peg_stuck) {
+ /* Either we are the first or recovery in progress. */
+ qla4_8xxx_rom_lock_recovery(ha);
+ goto dev_initialize;
+ } else {
+ /* Firmware already running. */
+ rval = QLA_SUCCESS;
goto dev_ready;
+ }
}
dev_initialize:
@@ -1601,7 +1623,7 @@ dev_ready:
ql4_printk(KERN_INFO, ha, "HW State: READY\n");
qla4_8xxx_wr_32(ha, QLA82XX_CRB_DEV_STATE, QLA82XX_DEV_READY);
- return QLA_SUCCESS;
+ return rval;
}
/**
@@ -1764,20 +1786,9 @@ int qla4_8xxx_load_risc(struct scsi_qla_host *ha)
int retval;
retval = qla4_8xxx_device_state_handler(ha);
- if (retval == QLA_SUCCESS &&
- !test_bit(AF_INIT_DONE, &ha->flags)) {
+ if (retval == QLA_SUCCESS && !test_bit(AF_INIT_DONE, &ha->flags))
retval = qla4xxx_request_irqs(ha);
- if (retval != QLA_SUCCESS) {
- ql4_printk(KERN_WARNING, ha,
- "Failed to reserve interrupt %d already in use.\n",
- ha->pdev->irq);
- } else {
- set_bit(AF_IRQ_ATTACHED, &ha->flags);
- ha->host->irq = ha->pdev->irq;
- ql4_printk(KERN_INFO, ha, "%s: irq %d attached\n",
- __func__, ha->pdev->irq);
- }
- }
+
return retval;
}
diff --git a/drivers/scsi/qla4xxx/ql4_nx.h b/drivers/scsi/qla4xxx/ql4_nx.h
index 931ad3f1e918..ff689bf53007 100644
--- a/drivers/scsi/qla4xxx/ql4_nx.h
+++ b/drivers/scsi/qla4xxx/ql4_nx.h
@@ -24,7 +24,6 @@
#define CRB_CMDPEG_STATE QLA82XX_REG(0x50)
#define CRB_RCVPEG_STATE QLA82XX_REG(0x13c)
-#define BOOT_LOADER_DIMM_STATUS QLA82XX_REG(0x54)
#define CRB_DMA_SHIFT QLA82XX_REG(0xcc)
#define QLA82XX_HW_H0_CH_HUB_ADR 0x05
@@ -529,12 +528,12 @@
# define QLA82XX_CAM_RAM_BASE (QLA82XX_CRB_CAM + 0x02000)
# define QLA82XX_CAM_RAM(reg) (QLA82XX_CAM_RAM_BASE + (reg))
-#define QLA82XX_PEG_TUNE_MN_SPD_ZEROED 0x80000000
-#define QLA82XX_BOOT_LOADER_MN_ISSUE 0xff00ffff
#define QLA82XX_PORT_MODE_ADDR (QLA82XX_CAM_RAM(0x24))
#define QLA82XX_PEG_HALT_STATUS1 (QLA82XX_CAM_RAM(0xa8))
#define QLA82XX_PEG_HALT_STATUS2 (QLA82XX_CAM_RAM(0xac))
#define QLA82XX_PEG_ALIVE_COUNTER (QLA82XX_CAM_RAM(0xb0))
+#define QLA82XX_CAM_RAM_DB1 (QLA82XX_CAM_RAM(0x1b0))
+#define QLA82XX_CAM_RAM_DB2 (QLA82XX_CAM_RAM(0x1b4))
#define HALT_STATUS_UNRECOVERABLE 0x80000000
#define HALT_STATUS_RECOVERABLE 0x40000000
diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c
index 370d40ff1529..0d48fb4d1044 100644
--- a/drivers/scsi/qla4xxx/ql4_os.c
+++ b/drivers/scsi/qla4xxx/ql4_os.c
@@ -79,8 +79,7 @@ static enum blk_eh_timer_return qla4xxx_eh_cmd_timed_out(struct scsi_cmnd *sc);
/*
* SCSI host template entry points
*/
-static int qla4xxx_queuecommand(struct scsi_cmnd *cmd,
- void (*done) (struct scsi_cmnd *));
+static int qla4xxx_queuecommand(struct Scsi_Host *h, struct scsi_cmnd *cmd);
static int qla4xxx_eh_abort(struct scsi_cmnd *cmd);
static int qla4xxx_eh_device_reset(struct scsi_cmnd *cmd);
static int qla4xxx_eh_target_reset(struct scsi_cmnd *cmd);
@@ -167,8 +166,6 @@ static void qla4xxx_recovery_timedout(struct iscsi_cls_session *session)
"of (%d) secs exhausted, marking device DEAD.\n",
ha->host_no, __func__, ddb_entry->fw_ddb_index,
QL4_SESS_RECOVERY_TMO));
-
- qla4xxx_wake_dpc(ha);
}
}
@@ -466,7 +463,7 @@ void qla4xxx_srb_compl(struct kref *ref)
* completion handling). Unfortunely, it sometimes calls the scheduler
* in interrupt context which is a big NO! NO!.
**/
-static int qla4xxx_queuecommand(struct scsi_cmnd *cmd,
+static int qla4xxx_queuecommand_lck(struct scsi_cmnd *cmd,
void (*done)(struct scsi_cmnd *))
{
struct scsi_qla_host *ha = to_qla_host(cmd->device->host);
@@ -540,6 +537,8 @@ qc_fail_command:
return 0;
}
+static DEF_SCSI_QCMD(qla4xxx_queuecommand)
+
/**
* qla4xxx_mem_free - frees memory allocated to adapter
* @ha: Pointer to host adapter structure.
@@ -573,10 +572,6 @@ static void qla4xxx_mem_free(struct scsi_qla_host *ha)
if (ha->nx_pcibase)
iounmap(
(struct device_reg_82xx __iomem *)ha->nx_pcibase);
-
- if (ha->nx_db_wr_ptr)
- iounmap(
- (struct device_reg_82xx __iomem *)ha->nx_db_wr_ptr);
} else if (ha->reg)
iounmap(ha->reg);
pci_release_regions(ha->pdev);
@@ -692,7 +687,9 @@ static void qla4_8xxx_check_fw_alive(struct scsi_qla_host *ha)
qla4xxx_wake_dpc(ha);
qla4xxx_mailbox_premature_completion(ha);
}
- }
+ } else
+ ha->seconds_since_last_heartbeat = 0;
+
ha->fw_heartbeat_counter = fw_heartbeat_counter;
}
@@ -885,7 +882,13 @@ static int qla4xxx_cmd_wait(struct scsi_qla_host *ha)
/* Find a command that hasn't completed. */
for (index = 0; index < ha->host->can_queue; index++) {
cmd = scsi_host_find_tag(ha->host, index);
- if (cmd != NULL)
+ /*
+ * We cannot just check if the index is valid,
+ * becase if we are run from the scsi eh, then
+ * the scsi/block layer is going to prevent
+ * the tag from being released.
+ */
+ if (cmd != NULL && CMD_SP(cmd))
break;
}
spin_unlock_irqrestore(&ha->hardware_lock, flags);
@@ -937,11 +940,14 @@ int qla4xxx_soft_reset(struct scsi_qla_host *ha)
{
uint32_t max_wait_time;
unsigned long flags = 0;
- int status = QLA_ERROR;
+ int status;
uint32_t ctrl_status;
- qla4xxx_hw_reset(ha);
+ status = qla4xxx_hw_reset(ha);
+ if (status != QLA_SUCCESS)
+ return status;
+ status = QLA_ERROR;
/* Wait until the Network Reset Intr bit is cleared */
max_wait_time = RESET_INTR_TOV;
do {
@@ -1101,7 +1107,8 @@ static int qla4xxx_recover_adapter(struct scsi_qla_host *ha)
ha->host_no, __func__));
status = ha->isp_ops->reset_firmware(ha);
if (status == QLA_SUCCESS) {
- qla4xxx_cmd_wait(ha);
+ if (!test_bit(AF_FW_RECOVERY, &ha->flags))
+ qla4xxx_cmd_wait(ha);
ha->isp_ops->disable_intrs(ha);
qla4xxx_process_aen(ha, FLUSH_DDB_CHANGED_AENS);
qla4xxx_abort_active_cmds(ha, DID_RESET << 16);
@@ -1118,7 +1125,8 @@ static int qla4xxx_recover_adapter(struct scsi_qla_host *ha)
* or if stop_firmware fails for ISP-82xx.
* This is the default case for ISP-4xxx */
if (!is_qla8022(ha) || reset_chip) {
- qla4xxx_cmd_wait(ha);
+ if (!test_bit(AF_FW_RECOVERY, &ha->flags))
+ qla4xxx_cmd_wait(ha);
qla4xxx_process_aen(ha, FLUSH_DDB_CHANGED_AENS);
qla4xxx_abort_active_cmds(ha, DID_RESET << 16);
DEBUG2(ql4_printk(KERN_INFO, ha,
@@ -1471,24 +1479,10 @@ int qla4_8xxx_iospace_config(struct scsi_qla_host *ha)
db_base = pci_resource_start(pdev, 4); /* doorbell is on bar 4 */
db_len = pci_resource_len(pdev, 4);
- /* mapping of doorbell write pointer */
- ha->nx_db_wr_ptr = (unsigned long)ioremap(db_base +
- (ha->pdev->devfn << 12), 4);
- if (!ha->nx_db_wr_ptr) {
- printk(KERN_ERR
- "cannot remap MMIO doorbell-write (%s), aborting\n",
- pci_name(pdev));
- goto iospace_error_exit;
- }
- /* mapping of doorbell read pointer */
- ha->nx_db_rd_ptr = (uint8_t *) ha->nx_pcibase + (512 * 1024) +
- (ha->pdev->devfn * 8);
- if (!ha->nx_db_rd_ptr)
- printk(KERN_ERR
- "cannot remap MMIO doorbell-read (%s), aborting\n",
- pci_name(pdev));
- return 0;
+ ha->nx_db_wr_ptr = (ha->pdev->devfn == 4 ? QLA82XX_CAM_RAM_DB1 :
+ QLA82XX_CAM_RAM_DB2);
+ return 0;
iospace_error_exit:
return -ENOMEM;
}
@@ -1960,13 +1954,11 @@ static int qla4xxx_wait_for_hba_online(struct scsi_qla_host *ha)
{
unsigned long wait_online;
- wait_online = jiffies + (30 * HZ);
+ wait_online = jiffies + (HBA_ONLINE_TOV * HZ);
while (time_before(jiffies, wait_online)) {
if (adapter_up(ha))
return QLA_SUCCESS;
- else if (ha->retry_reset_ha_cnt == 0)
- return QLA_ERROR;
msleep(2000);
}
@@ -2021,6 +2013,7 @@ static int qla4xxx_eh_abort(struct scsi_cmnd *cmd)
unsigned int id = cmd->device->id;
unsigned int lun = cmd->device->lun;
unsigned long serial = cmd->serial_number;
+ unsigned long flags;
struct srb *srb = NULL;
int ret = SUCCESS;
int wait = 0;
@@ -2029,12 +2022,14 @@ static int qla4xxx_eh_abort(struct scsi_cmnd *cmd)
"scsi%ld:%d:%d: Abort command issued cmd=%p, pid=%ld\n",
ha->host_no, id, lun, cmd, serial);
+ spin_lock_irqsave(&ha->hardware_lock, flags);
srb = (struct srb *) CMD_SP(cmd);
-
- if (!srb)
+ if (!srb) {
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
return SUCCESS;
-
+ }
kref_get(&srb->srb_ref);
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
if (qla4xxx_abort_task(ha, srb) != QLA_SUCCESS) {
DEBUG3(printk("scsi%ld:%d:%d: Abort_task mbx failed.\n",
@@ -2267,6 +2262,8 @@ qla4xxx_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
qla4xxx_mailbox_premature_completion(ha);
qla4xxx_free_irqs(ha);
pci_disable_device(pdev);
+ /* Return back all IOs */
+ qla4xxx_abort_active_cmds(ha, DID_RESET << 16);
return PCI_ERS_RESULT_NEED_RESET;
case pci_channel_io_perm_failure:
set_bit(AF_EEH_BUSY, &ha->flags);
@@ -2290,17 +2287,13 @@ qla4xxx_pci_mmio_enabled(struct pci_dev *pdev)
if (!is_aer_supported(ha))
return PCI_ERS_RESULT_NONE;
- if (test_bit(AF_FW_RECOVERY, &ha->flags)) {
- ql4_printk(KERN_WARNING, ha, "scsi%ld: %s: firmware hang -- "
- "mmio_enabled\n", ha->host_no, __func__);
- return PCI_ERS_RESULT_NEED_RESET;
- } else
- return PCI_ERS_RESULT_RECOVERED;
+ return PCI_ERS_RESULT_RECOVERED;
}
-uint32_t qla4_8xxx_error_recovery(struct scsi_qla_host *ha)
+static uint32_t qla4_8xxx_error_recovery(struct scsi_qla_host *ha)
{
uint32_t rval = QLA_ERROR;
+ uint32_t ret = 0;
int fn;
struct pci_dev *other_pdev = NULL;
@@ -2312,7 +2305,6 @@ uint32_t qla4_8xxx_error_recovery(struct scsi_qla_host *ha)
clear_bit(AF_ONLINE, &ha->flags);
qla4xxx_mark_all_devices_missing(ha);
qla4xxx_process_aen(ha, FLUSH_DDB_CHANGED_AENS);
- qla4xxx_abort_active_cmds(ha, DID_RESET << 16);
}
fn = PCI_FUNC(ha->pdev->devfn);
@@ -2375,7 +2367,16 @@ uint32_t qla4_8xxx_error_recovery(struct scsi_qla_host *ha)
/* Clear driver state register */
qla4_8xxx_wr_32(ha, QLA82XX_CRB_DRV_STATE, 0);
qla4_8xxx_set_drv_active(ha);
- ha->isp_ops->enable_intrs(ha);
+ ret = qla4xxx_request_irqs(ha);
+ if (ret) {
+ ql4_printk(KERN_WARNING, ha, "Failed to "
+ "reserve interrupt %d already in use.\n",
+ ha->pdev->irq);
+ rval = QLA_ERROR;
+ } else {
+ ha->isp_ops->enable_intrs(ha);
+ rval = QLA_SUCCESS;
+ }
}
qla4_8xxx_idc_unlock(ha);
} else {
@@ -2387,8 +2388,18 @@ uint32_t qla4_8xxx_error_recovery(struct scsi_qla_host *ha)
clear_bit(AF_FW_RECOVERY, &ha->flags);
rval = qla4xxx_initialize_adapter(ha,
PRESERVE_DDB_LIST);
- if (rval == QLA_SUCCESS)
- ha->isp_ops->enable_intrs(ha);
+ if (rval == QLA_SUCCESS) {
+ ret = qla4xxx_request_irqs(ha);
+ if (ret) {
+ ql4_printk(KERN_WARNING, ha, "Failed to"
+ " reserve interrupt %d already in"
+ " use.\n", ha->pdev->irq);
+ rval = QLA_ERROR;
+ } else {
+ ha->isp_ops->enable_intrs(ha);
+ rval = QLA_SUCCESS;
+ }
+ }
qla4_8xxx_idc_lock(ha);
qla4_8xxx_set_drv_active(ha);
qla4_8xxx_idc_unlock(ha);
@@ -2430,12 +2441,7 @@ qla4xxx_pci_slot_reset(struct pci_dev *pdev)
goto exit_slot_reset;
}
- ret = qla4xxx_request_irqs(ha);
- if (ret) {
- ql4_printk(KERN_WARNING, ha, "Failed to reserve interrupt %d"
- " already in use.\n", pdev->irq);
- goto exit_slot_reset;
- }
+ ha->isp_ops->disable_intrs(ha);
if (is_qla8022(ha)) {
if (qla4_8xxx_error_recovery(ha) == QLA_SUCCESS) {
diff --git a/drivers/scsi/qla4xxx/ql4_version.h b/drivers/scsi/qla4xxx/ql4_version.h
index a77b973f2cbc..9bfacf4ed137 100644
--- a/drivers/scsi/qla4xxx/ql4_version.h
+++ b/drivers/scsi/qla4xxx/ql4_version.h
@@ -5,4 +5,4 @@
* See LICENSE.qla4xxx for copyright and licensing details.
*/
-#define QLA4XXX_DRIVER_VERSION "5.02.00-k3"
+#define QLA4XXX_DRIVER_VERSION "5.02.00-k4"
diff --git a/drivers/scsi/qlogicfas408.c b/drivers/scsi/qlogicfas408.c
index 1ad51552d6b1..c3a9151ca823 100644
--- a/drivers/scsi/qlogicfas408.c
+++ b/drivers/scsi/qlogicfas408.c
@@ -439,7 +439,7 @@ irqreturn_t qlogicfas408_ihandl(int irq, void *dev_id)
* Queued command
*/
-int qlogicfas408_queuecommand(struct scsi_cmnd *cmd,
+static int qlogicfas408_queuecommand_lck(struct scsi_cmnd *cmd,
void (*done) (struct scsi_cmnd *))
{
struct qlogicfas408_priv *priv = get_priv_by_cmd(cmd);
@@ -459,6 +459,8 @@ int qlogicfas408_queuecommand(struct scsi_cmnd *cmd,
return 0;
}
+DEF_SCSI_QCMD(qlogicfas408_queuecommand)
+
/*
* Return bios parameters
*/
diff --git a/drivers/scsi/qlogicfas408.h b/drivers/scsi/qlogicfas408.h
index 260626427a32..2f6c0a166200 100644
--- a/drivers/scsi/qlogicfas408.h
+++ b/drivers/scsi/qlogicfas408.h
@@ -103,8 +103,7 @@ struct qlogicfas408_priv {
#define get_priv_by_host(x) (struct qlogicfas408_priv *)&((x)->hostdata[0])
irqreturn_t qlogicfas408_ihandl(int irq, void *dev_id);
-int qlogicfas408_queuecommand(struct scsi_cmnd * cmd,
- void (*done) (struct scsi_cmnd *));
+int qlogicfas408_queuecommand(struct Scsi_Host *h, struct scsi_cmnd * cmd);
int qlogicfas408_biosparam(struct scsi_device * disk,
struct block_device *dev,
sector_t capacity, int ip[]);
diff --git a/drivers/scsi/qlogicpti.c b/drivers/scsi/qlogicpti.c
index f8c561cf751e..664c9572d0c9 100644
--- a/drivers/scsi/qlogicpti.c
+++ b/drivers/scsi/qlogicpti.c
@@ -1003,7 +1003,7 @@ static int qlogicpti_slave_configure(struct scsi_device *sdev)
*
* "This code must fly." -davem
*/
-static int qlogicpti_queuecommand(struct scsi_cmnd *Cmnd, void (*done)(struct scsi_cmnd *))
+static int qlogicpti_queuecommand_lck(struct scsi_cmnd *Cmnd, void (*done)(struct scsi_cmnd *))
{
struct Scsi_Host *host = Cmnd->device->host;
struct qlogicpti *qpti = (struct qlogicpti *) host->hostdata;
@@ -1052,6 +1052,8 @@ toss_command:
return 1;
}
+static DEF_SCSI_QCMD(qlogicpti_queuecommand)
+
static int qlogicpti_return_status(struct Status_Entry *sts, int id)
{
int host_status = DID_ERROR;
diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c
index 348fba0a8976..2aeb2e9c4d3b 100644
--- a/drivers/scsi/scsi.c
+++ b/drivers/scsi/scsi.c
@@ -634,12 +634,13 @@ void scsi_log_completion(struct scsi_cmnd *cmd, int disposition)
* Description: a serial number identifies a request for error recovery
* and debugging purposes. Protected by the Host_Lock of host.
*/
-static inline void scsi_cmd_get_serial(struct Scsi_Host *host, struct scsi_cmnd *cmd)
+void scsi_cmd_get_serial(struct Scsi_Host *host, struct scsi_cmnd *cmd)
{
cmd->serial_number = host->cmd_serial_number++;
if (cmd->serial_number == 0)
cmd->serial_number = host->cmd_serial_number++;
}
+EXPORT_SYMBOL(scsi_cmd_get_serial);
/**
* scsi_dispatch_command - Dispatch a command to the low-level driver.
@@ -651,7 +652,6 @@ static inline void scsi_cmd_get_serial(struct Scsi_Host *host, struct scsi_cmnd
int scsi_dispatch_cmd(struct scsi_cmnd *cmd)
{
struct Scsi_Host *host = cmd->device->host;
- unsigned long flags = 0;
unsigned long timeout;
int rtn = 0;
@@ -737,23 +737,15 @@ int scsi_dispatch_cmd(struct scsi_cmnd *cmd)
goto out;
}
- spin_lock_irqsave(host->host_lock, flags);
- /*
- * AK: unlikely race here: for some reason the timer could
- * expire before the serial number is set up below.
- *
- * TODO: kill serial or move to blk layer
- */
- scsi_cmd_get_serial(host, cmd);
-
if (unlikely(host->shost_state == SHOST_DEL)) {
cmd->result = (DID_NO_CONNECT << 16);
scsi_done(cmd);
} else {
trace_scsi_dispatch_cmd_start(cmd);
- rtn = host->hostt->queuecommand(cmd, scsi_done);
+ cmd->scsi_done = scsi_done;
+ rtn = host->hostt->queuecommand(host, cmd);
}
- spin_unlock_irqrestore(host->host_lock, flags);
+
if (rtn) {
trace_scsi_dispatch_cmd_error(cmd, rtn);
if (rtn != SCSI_MLQUEUE_DEVICE_BUSY &&
diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c
index 2c36bae3bd4b..2f1f9b079b10 100644
--- a/drivers/scsi/scsi_debug.c
+++ b/drivers/scsi/scsi_debug.c
@@ -3538,7 +3538,7 @@ static void sdebug_remove_adapter(void)
}
static
-int scsi_debug_queuecommand(struct scsi_cmnd *SCpnt, done_funct_t done)
+int scsi_debug_queuecommand_lck(struct scsi_cmnd *SCpnt, done_funct_t done)
{
unsigned char *cmd = (unsigned char *) SCpnt->cmnd;
int len, k;
@@ -3884,6 +3884,8 @@ write:
(delay_override ? 0 : scsi_debug_delay));
}
+static DEF_SCSI_QCMD(scsi_debug_queuecommand)
+
static struct scsi_host_template sdebug_driver_template = {
.proc_info = scsi_debug_proc_info,
.proc_name = sdebug_proc_name,
diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c
index 1de30eb83bb0..824b8fc03ce5 100644
--- a/drivers/scsi/scsi_error.c
+++ b/drivers/scsi/scsi_error.c
@@ -320,19 +320,11 @@ static int scsi_check_sense(struct scsi_cmnd *scmd)
"changed. The Linux SCSI layer does not "
"automatically adjust these parameters.\n");
- if (scmd->request->cmd_flags & REQ_HARDBARRIER)
- /*
- * barrier requests should always retry on UA
- * otherwise block will get a spurious error
- */
- return NEEDS_RETRY;
- else
- /*
- * for normal (non barrier) commands, pass the
- * UA upwards for a determination in the
- * completion functions
- */
- return SUCCESS;
+ /*
+ * Pass the UA upwards for a determination in the completion
+ * functions.
+ */
+ return SUCCESS;
/* these three are not supported */
case COPY_ABORTED:
@@ -781,17 +773,15 @@ static int scsi_send_eh_cmnd(struct scsi_cmnd *scmd, unsigned char *cmnd,
struct Scsi_Host *shost = sdev->host;
DECLARE_COMPLETION_ONSTACK(done);
unsigned long timeleft;
- unsigned long flags;
struct scsi_eh_save ses;
int rtn;
scsi_eh_prep_cmnd(scmd, &ses, cmnd, cmnd_size, sense_bytes);
shost->eh_action = &done;
- spin_lock_irqsave(shost->host_lock, flags);
scsi_log_send(scmd);
- shost->hostt->queuecommand(scmd, scsi_eh_done);
- spin_unlock_irqrestore(shost->host_lock, flags);
+ scmd->scsi_done = scsi_eh_done;
+ shost->hostt->queuecommand(shost, scmd);
timeleft = wait_for_completion_timeout(&done, timeout);
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index 8041fe1ab179..eafeeda6e194 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -2438,7 +2438,8 @@ scsi_internal_device_unblock(struct scsi_device *sdev)
sdev->sdev_state = SDEV_RUNNING;
else if (sdev->sdev_state == SDEV_CREATED_BLOCK)
sdev->sdev_state = SDEV_CREATED;
- else
+ else if (sdev->sdev_state != SDEV_CANCEL &&
+ sdev->sdev_state != SDEV_OFFLINE)
return -EINVAL;
spin_lock_irqsave(q->queue_lock, flags);
diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
index 20ad59dff730..76ee2e784f75 100644
--- a/drivers/scsi/scsi_sysfs.c
+++ b/drivers/scsi/scsi_sysfs.c
@@ -964,10 +964,11 @@ static void __scsi_remove_target(struct scsi_target *starget)
list_for_each_entry(sdev, &shost->__devices, siblings) {
if (sdev->channel != starget->channel ||
sdev->id != starget->id ||
- sdev->sdev_state == SDEV_DEL)
+ scsi_device_get(sdev))
continue;
spin_unlock_irqrestore(shost->host_lock, flags);
scsi_remove_device(sdev);
+ scsi_device_put(sdev);
spin_lock_irqsave(shost->host_lock, flags);
goto restart;
}
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index 57d1e3e1bd44..956496182c80 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -46,7 +46,6 @@
#include <linux/blkdev.h>
#include <linux/blkpg.h>
#include <linux/delay.h>
-#include <linux/smp_lock.h>
#include <linux/mutex.h>
#include <linux/string_helpers.h>
#include <linux/async.h>
@@ -259,6 +258,28 @@ sd_show_protection_type(struct device *dev, struct device_attribute *attr,
}
static ssize_t
+sd_show_protection_mode(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct scsi_disk *sdkp = to_scsi_disk(dev);
+ struct scsi_device *sdp = sdkp->device;
+ unsigned int dif, dix;
+
+ dif = scsi_host_dif_capable(sdp->host, sdkp->protection_type);
+ dix = scsi_host_dix_capable(sdp->host, sdkp->protection_type);
+
+ if (!dix && scsi_host_dix_capable(sdp->host, SD_DIF_TYPE0_PROTECTION)) {
+ dif = 0;
+ dix = 1;
+ }
+
+ if (!dif && !dix)
+ return snprintf(buf, 20, "none\n");
+
+ return snprintf(buf, 20, "%s%u\n", dix ? "dix" : "dif", dif);
+}
+
+static ssize_t
sd_show_app_tag_own(struct device *dev, struct device_attribute *attr,
char *buf)
{
@@ -285,6 +306,7 @@ static struct device_attribute sd_disk_attrs[] = {
__ATTR(manage_start_stop, S_IRUGO|S_IWUSR, sd_show_manage_start_stop,
sd_store_manage_start_stop),
__ATTR(protection_type, S_IRUGO, sd_show_protection_type, NULL),
+ __ATTR(protection_mode, S_IRUGO, sd_show_protection_mode, NULL),
__ATTR(app_tag_own, S_IRUGO, sd_show_app_tag_own, NULL),
__ATTR(thin_provisioning, S_IRUGO, sd_show_thin_provisioning, NULL),
__ATTR_NULL,
diff --git a/drivers/scsi/sr_ioctl.c b/drivers/scsi/sr_ioctl.c
index cbb38c5197fa..3cd8ffbad577 100644
--- a/drivers/scsi/sr_ioctl.c
+++ b/drivers/scsi/sr_ioctl.c
@@ -325,6 +325,15 @@ int sr_drive_status(struct cdrom_device_info *cdi, int slot)
}
/*
+ * SK/ASC/ASCQ of 2/4/2 means "initialization required"
+ * Using CD_TRAY_OPEN results in an START_STOP_UNIT to close
+ * the tray, which resolves the initialization requirement.
+ */
+ if (scsi_sense_valid(&sshdr) && sshdr.sense_key == NOT_READY
+ && sshdr.asc == 0x04 && sshdr.ascq == 0x02)
+ return CDS_TRAY_OPEN;
+
+ /*
* 0x04 is format in progress .. but there must be a disc present!
*/
if (sshdr.sense_key == NOT_READY && sshdr.asc == 0x04)
diff --git a/drivers/scsi/stex.c b/drivers/scsi/stex.c
index 9c73dbda3bbb..606215e54b88 100644
--- a/drivers/scsi/stex.c
+++ b/drivers/scsi/stex.c
@@ -572,7 +572,7 @@ stex_slave_destroy(struct scsi_device *sdev)
}
static int
-stex_queuecommand(struct scsi_cmnd *cmd, void (* done)(struct scsi_cmnd *))
+stex_queuecommand_lck(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *))
{
struct st_hba *hba;
struct Scsi_Host *host;
@@ -698,6 +698,8 @@ stex_queuecommand(struct scsi_cmnd *cmd, void (* done)(struct scsi_cmnd *))
return 0;
}
+static DEF_SCSI_QCMD(stex_queuecommand)
+
static void stex_scsi_done(struct st_ccb *ccb)
{
struct scsi_cmnd *cmd = ccb->cmd;
diff --git a/drivers/scsi/sun3_NCR5380.c b/drivers/scsi/sun3_NCR5380.c
index 713620ed70d9..4f0e5485ffde 100644
--- a/drivers/scsi/sun3_NCR5380.c
+++ b/drivers/scsi/sun3_NCR5380.c
@@ -908,7 +908,7 @@ static int NCR5380_init (struct Scsi_Host *instance, int flags)
*/
/* Only make static if a wrapper function is used */
-static int NCR5380_queue_command(struct scsi_cmnd *cmd,
+static int NCR5380_queue_command_lck(struct scsi_cmnd *cmd,
void (*done)(struct scsi_cmnd *))
{
SETUP_HOSTDATA(cmd->device->host);
@@ -1019,6 +1019,8 @@ static int NCR5380_queue_command(struct scsi_cmnd *cmd,
return 0;
}
+static DEF_SCSI_QCMD(NCR5380_queue_command)
+
/*
* Function : NCR5380_main (void)
*
diff --git a/drivers/scsi/sun3_scsi.h b/drivers/scsi/sun3_scsi.h
index b29a9d661ca4..bcefd8458e65 100644
--- a/drivers/scsi/sun3_scsi.h
+++ b/drivers/scsi/sun3_scsi.h
@@ -51,8 +51,7 @@ static int sun3scsi_abort(struct scsi_cmnd *);
static int sun3scsi_detect (struct scsi_host_template *);
static const char *sun3scsi_info (struct Scsi_Host *);
static int sun3scsi_bus_reset(struct scsi_cmnd *);
-static int sun3scsi_queue_command(struct scsi_cmnd *,
- void (*done)(struct scsi_cmnd *));
+static int sun3scsi_queue_command(struct Scsi_Host *, struct scsi_cmnd *);
static int sun3scsi_release (struct Scsi_Host *);
#ifndef CMD_PER_LUN
diff --git a/drivers/scsi/sym53c416.c b/drivers/scsi/sym53c416.c
index e5c369bb568f..190107ae120b 100644
--- a/drivers/scsi/sym53c416.c
+++ b/drivers/scsi/sym53c416.c
@@ -734,7 +734,7 @@ const char *sym53c416_info(struct Scsi_Host *SChost)
return info;
}
-int sym53c416_queuecommand(Scsi_Cmnd *SCpnt, void (*done)(Scsi_Cmnd *))
+static int sym53c416_queuecommand_lck(Scsi_Cmnd *SCpnt, void (*done)(Scsi_Cmnd *))
{
int base;
unsigned long flags = 0;
@@ -761,6 +761,8 @@ int sym53c416_queuecommand(Scsi_Cmnd *SCpnt, void (*done)(Scsi_Cmnd *))
return 0;
}
+DEF_SCSI_QCMD(sym53c416_queuecommand)
+
static int sym53c416_host_reset(Scsi_Cmnd *SCpnt)
{
int base;
diff --git a/drivers/scsi/sym53c416.h b/drivers/scsi/sym53c416.h
index 77860d0748ff..387de5d80a70 100644
--- a/drivers/scsi/sym53c416.h
+++ b/drivers/scsi/sym53c416.h
@@ -25,7 +25,7 @@
static int sym53c416_detect(struct scsi_host_template *);
static const char *sym53c416_info(struct Scsi_Host *);
static int sym53c416_release(struct Scsi_Host *);
-static int sym53c416_queuecommand(Scsi_Cmnd *, void (*done)(Scsi_Cmnd *));
+static int sym53c416_queuecommand(struct Scsi_Host *, struct scsi_cmnd *);
static int sym53c416_host_reset(Scsi_Cmnd *);
static int sym53c416_bios_param(struct scsi_device *, struct block_device *,
sector_t, int *);
diff --git a/drivers/scsi/sym53c8xx_2/sym_glue.c b/drivers/scsi/sym53c8xx_2/sym_glue.c
index 8b955b534a36..6b97ded9d45d 100644
--- a/drivers/scsi/sym53c8xx_2/sym_glue.c
+++ b/drivers/scsi/sym53c8xx_2/sym_glue.c
@@ -505,7 +505,7 @@ void sym_log_bus_error(struct Scsi_Host *shost)
* queuecommand method. Entered with the host adapter lock held and
* interrupts disabled.
*/
-static int sym53c8xx_queue_command(struct scsi_cmnd *cmd,
+static int sym53c8xx_queue_command_lck(struct scsi_cmnd *cmd,
void (*done)(struct scsi_cmnd *))
{
struct sym_hcb *np = SYM_SOFTC_PTR(cmd);
@@ -536,6 +536,8 @@ static int sym53c8xx_queue_command(struct scsi_cmnd *cmd,
return 0;
}
+static DEF_SCSI_QCMD(sym53c8xx_queue_command)
+
/*
* Linux entry point of the interrupt handler.
*/
diff --git a/drivers/scsi/t128.h b/drivers/scsi/t128.h
index 76a069b7ac0b..ada1115079c9 100644
--- a/drivers/scsi/t128.h
+++ b/drivers/scsi/t128.h
@@ -96,8 +96,7 @@ static int t128_abort(struct scsi_cmnd *);
static int t128_biosparam(struct scsi_device *, struct block_device *,
sector_t, int*);
static int t128_detect(struct scsi_host_template *);
-static int t128_queue_command(struct scsi_cmnd *,
- void (*done)(struct scsi_cmnd *));
+static int t128_queue_command(struct Scsi_Host *, struct scsi_cmnd *);
static int t128_bus_reset(struct scsi_cmnd *);
#ifndef CMD_PER_LUN
diff --git a/drivers/scsi/tmscsim.c b/drivers/scsi/tmscsim.c
index 27866b0adfeb..a124a28f2ccb 100644
--- a/drivers/scsi/tmscsim.c
+++ b/drivers/scsi/tmscsim.c
@@ -1883,7 +1883,7 @@ dc390_ScsiRstDetect( struct dc390_acb* pACB )
return;
}
-static int DC390_queuecommand(struct scsi_cmnd *cmd,
+static int DC390_queuecommand_lck(struct scsi_cmnd *cmd,
void (*done)(struct scsi_cmnd *))
{
struct scsi_device *sdev = cmd->device;
@@ -1944,6 +1944,8 @@ static int DC390_queuecommand(struct scsi_cmnd *cmd,
return SCSI_MLQUEUE_DEVICE_BUSY;
}
+static DEF_SCSI_QCMD(DC390_queuecommand)
+
static void dc390_dumpinfo (struct dc390_acb* pACB, struct dc390_dcb* pDCB, struct dc390_srb* pSRB)
{
struct pci_dev *pdev;
diff --git a/drivers/scsi/u14-34f.c b/drivers/scsi/u14-34f.c
index 5d9fdeeb2315..edfc5da8be4c 100644
--- a/drivers/scsi/u14-34f.c
+++ b/drivers/scsi/u14-34f.c
@@ -433,7 +433,7 @@
static int u14_34f_detect(struct scsi_host_template *);
static int u14_34f_release(struct Scsi_Host *);
-static int u14_34f_queuecommand(struct scsi_cmnd *, void (*done)(struct scsi_cmnd *));
+static int u14_34f_queuecommand(struct Scsi_Host *, struct scsi_cmnd *);
static int u14_34f_eh_abort(struct scsi_cmnd *);
static int u14_34f_eh_host_reset(struct scsi_cmnd *);
static int u14_34f_bios_param(struct scsi_device *, struct block_device *,
@@ -1248,7 +1248,7 @@ static void scsi_to_dev_dir(unsigned int i, unsigned int j) {
}
-static int u14_34f_queuecommand(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_cmnd *)) {
+static int u14_34f_queuecommand_lck(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_cmnd *)) {
unsigned int i, j, k;
struct mscp *cpp;
@@ -1329,6 +1329,8 @@ static int u14_34f_queuecommand(struct scsi_cmnd *SCpnt, void (*done)(struct scs
return 0;
}
+static DEF_SCSI_QCMD(u14_34f_queuecommand)
+
static int u14_34f_eh_abort(struct scsi_cmnd *SCarg) {
unsigned int i, j;
diff --git a/drivers/scsi/ultrastor.c b/drivers/scsi/ultrastor.c
index 27aa40f3980e..0571ef9639cb 100644
--- a/drivers/scsi/ultrastor.c
+++ b/drivers/scsi/ultrastor.c
@@ -700,7 +700,7 @@ static inline void build_sg_list(struct mscp *mscp, struct scsi_cmnd *SCpnt)
mscp->transfer_data_length = transfer_length;
}
-static int ultrastor_queuecommand(struct scsi_cmnd *SCpnt,
+static int ultrastor_queuecommand_lck(struct scsi_cmnd *SCpnt,
void (*done) (struct scsi_cmnd *))
{
struct mscp *my_mscp;
@@ -825,6 +825,8 @@ retry:
return 0;
}
+static DEF_SCSI_QCMD(ultrastor_queuecommand)
+
/* This code must deal with 2 cases:
1. The command has not been written to the OGM. In this case, set
diff --git a/drivers/scsi/ultrastor.h b/drivers/scsi/ultrastor.h
index a692905f95f7..165c18b5cf5f 100644
--- a/drivers/scsi/ultrastor.h
+++ b/drivers/scsi/ultrastor.h
@@ -15,8 +15,7 @@
static int ultrastor_detect(struct scsi_host_template *);
static const char *ultrastor_info(struct Scsi_Host *shpnt);
-static int ultrastor_queuecommand(struct scsi_cmnd *,
- void (*done)(struct scsi_cmnd *));
+static int ultrastor_queuecommand(struct Scsi_Host *, struct scsi_cmnd *);
static int ultrastor_abort(struct scsi_cmnd *);
static int ultrastor_host_reset(struct scsi_cmnd *);
static int ultrastor_biosparam(struct scsi_device *, struct block_device *,
diff --git a/drivers/scsi/vmw_pvscsi.c b/drivers/scsi/vmw_pvscsi.c
index 26894459c37f..a18996d24466 100644
--- a/drivers/scsi/vmw_pvscsi.c
+++ b/drivers/scsi/vmw_pvscsi.c
@@ -690,7 +690,7 @@ static int pvscsi_queue_ring(struct pvscsi_adapter *adapter,
return 0;
}
-static int pvscsi_queue(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *))
+static int pvscsi_queue_lck(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *))
{
struct Scsi_Host *host = cmd->device->host;
struct pvscsi_adapter *adapter = shost_priv(host);
@@ -719,6 +719,8 @@ static int pvscsi_queue(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *))
return 0;
}
+static DEF_SCSI_QCMD(pvscsi_queue)
+
static int pvscsi_abort(struct scsi_cmnd *cmd)
{
struct pvscsi_adapter *adapter = shost_priv(cmd->device->host);
diff --git a/drivers/scsi/wd33c93.c b/drivers/scsi/wd33c93.c
index b701bf2cc187..5f697e0bd009 100644
--- a/drivers/scsi/wd33c93.c
+++ b/drivers/scsi/wd33c93.c
@@ -371,8 +371,8 @@ calc_sync_msg(unsigned int period, unsigned int offset, unsigned int fast,
msg[1] = offset;
}
-int
-wd33c93_queuecommand(struct scsi_cmnd *cmd,
+static int
+wd33c93_queuecommand_lck(struct scsi_cmnd *cmd,
void (*done)(struct scsi_cmnd *))
{
struct WD33C93_hostdata *hostdata;
@@ -468,6 +468,8 @@ wd33c93_queuecommand(struct scsi_cmnd *cmd,
return 0;
}
+DEF_SCSI_QCMD(wd33c93_queuecommand)
+
/*
* This routine attempts to start a scsi command. If the host_card is
* already connected, we give up immediately. Otherwise, look through
diff --git a/drivers/scsi/wd33c93.h b/drivers/scsi/wd33c93.h
index 1ed5f3bf388e..3b463d7304dc 100644
--- a/drivers/scsi/wd33c93.h
+++ b/drivers/scsi/wd33c93.h
@@ -343,8 +343,7 @@ struct WD33C93_hostdata {
void wd33c93_init (struct Scsi_Host *instance, const wd33c93_regs regs,
dma_setup_t setup, dma_stop_t stop, int clock_freq);
int wd33c93_abort (struct scsi_cmnd *cmd);
-int wd33c93_queuecommand (struct scsi_cmnd *cmd,
- void (*done)(struct scsi_cmnd *));
+int wd33c93_queuecommand (struct Scsi_Host *h, struct scsi_cmnd *cmd);
void wd33c93_intr (struct Scsi_Host *instance);
int wd33c93_proc_info(struct Scsi_Host *, char *, char **, off_t, int, int);
int wd33c93_host_reset (struct scsi_cmnd *);
diff --git a/drivers/scsi/wd7000.c b/drivers/scsi/wd7000.c
index 333580bf37c5..db451ae0a368 100644
--- a/drivers/scsi/wd7000.c
+++ b/drivers/scsi/wd7000.c
@@ -1082,7 +1082,7 @@ static irqreturn_t wd7000_intr(int irq, void *dev_id)
return IRQ_HANDLED;
}
-static int wd7000_queuecommand(struct scsi_cmnd *SCpnt,
+static int wd7000_queuecommand_lck(struct scsi_cmnd *SCpnt,
void (*done)(struct scsi_cmnd *))
{
Scb *scb;
@@ -1139,6 +1139,8 @@ static int wd7000_queuecommand(struct scsi_cmnd *SCpnt,
return 0;
}
+static DEF_SCSI_QCMD(wd7000_queuecommand)
+
static int wd7000_diagnostics(Adapter * host, int code)
{
static IcbDiag icb = { ICB_OP_DIAGNOSTICS };
diff --git a/drivers/serial/68328serial.h b/drivers/serial/68328serial.h
index 58aa2154655b..664ceb0a158c 100644
--- a/drivers/serial/68328serial.h
+++ b/drivers/serial/68328serial.h
@@ -181,13 +181,8 @@ struct m68k_serial {
/*
* Define the number of ports supported and their irqs.
*/
-#ifndef CONFIG_68328_SERIAL_UART2
#define NR_PORTS 1
#define UART_IRQ_DEFNS {UART_IRQ_NUM}
-#else
-#define NR_PORTS 2
-#define UART_IRQ_DEFNS {UART1_IRQ_NUM, UART2_IRQ_NUM}
-#endif
#endif /* __KERNEL__ */
#endif /* !(_MC683XX_SERIAL_H) */
diff --git a/drivers/serial/8250.c b/drivers/serial/8250.c
index 4d8e14b7aa93..09a550860dcf 100644
--- a/drivers/serial/8250.c
+++ b/drivers/serial/8250.c
@@ -2872,7 +2872,7 @@ static struct console serial8250_console = {
.device = uart_console_device,
.setup = serial8250_console_setup,
.early_setup = serial8250_console_early_setup,
- .flags = CON_PRINTBUFFER,
+ .flags = CON_PRINTBUFFER | CON_ANYTIME,
.index = -1,
.data = &serial8250_reg,
};
diff --git a/drivers/serial/8250_pci.c b/drivers/serial/8250_pci.c
index 53be4d35a0aa..842e3b2a02b1 100644
--- a/drivers/serial/8250_pci.c
+++ b/drivers/serial/8250_pci.c
@@ -2285,6 +2285,8 @@ static struct pciserial_board pci_boards[] __devinitdata = {
static const struct pci_device_id softmodem_blacklist[] = {
{ PCI_VDEVICE(AL, 0x5457), }, /* ALi Corporation M5457 AC'97 Modem */
+ { PCI_VDEVICE(MOTOROLA, 0x3052), }, /* Motorola Si3052-based modem */
+ { PCI_DEVICE(0x1543, 0x3052), }, /* Si3052-based modem, default IDs */
};
/*
@@ -2863,6 +2865,9 @@ static struct pci_device_id serial_pci_tbl[] = {
PCI_SUBVENDOR_ID_SIIG, PCI_SUBDEVICE_ID_SIIG_QUARTET_SERIAL,
0, 0,
pbn_b0_4_1152000 },
+ { PCI_VENDOR_ID_OXSEMI, 0x9505,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ pbn_b0_bt_2_921600 },
/*
* The below card is a little controversial since it is the
diff --git a/drivers/serial/Kconfig b/drivers/serial/Kconfig
index 927816484397..aff9dcd051c6 100644
--- a/drivers/serial/Kconfig
+++ b/drivers/serial/Kconfig
@@ -1410,6 +1410,33 @@ config SERIAL_OF_PLATFORM
Currently, only 8250 compatible ports are supported, but
others can easily be added.
+config SERIAL_OMAP
+ tristate "OMAP serial port support"
+ depends on ARCH_OMAP2 || ARCH_OMAP3 || ARCH_OMAP4
+ select SERIAL_CORE
+ help
+ If you have a machine based on an Texas Instruments OMAP CPU you
+ can enable its onboard serial ports by enabling this option.
+
+ By enabling this option you take advantage of dma feature available
+ with the omap-serial driver. DMA support can be enabled from platform
+ data.
+
+config SERIAL_OMAP_CONSOLE
+ bool "Console on OMAP serial port"
+ depends on SERIAL_OMAP
+ select SERIAL_CORE_CONSOLE
+ help
+ Select this option if you would like to use omap serial port as
+ console.
+
+ Even if you say Y here, the currently visible virtual console
+ (/dev/tty0) will still be used as the system console by default, but
+ you can alter that using a kernel command line option such as
+ "console=ttyOx". (Try "man bootparam" or see the documentation of
+ your boot loader about how to pass options to the kernel at
+ boot time.)
+
config SERIAL_OF_PLATFORM_NWPSERIAL
tristate "NWP serial port driver"
depends on PPC_OF && PPC_DCR
diff --git a/drivers/serial/Makefile b/drivers/serial/Makefile
index 1ca4fd599ffe..c5705765454f 100644
--- a/drivers/serial/Makefile
+++ b/drivers/serial/Makefile
@@ -88,3 +88,4 @@ obj-$(CONFIG_SERIAL_ALTERA_JTAGUART) += altera_jtaguart.o
obj-$(CONFIG_SERIAL_ALTERA_UART) += altera_uart.o
obj-$(CONFIG_SERIAL_MRST_MAX3110) += mrst_max3110.o
obj-$(CONFIG_SERIAL_MFD_HSU) += mfd.o
+obj-$(CONFIG_SERIAL_OMAP) += omap-serial.o
diff --git a/drivers/serial/bfin_5xx.c b/drivers/serial/bfin_5xx.c
index a9eff2b18eab..19cac9f610fd 100644
--- a/drivers/serial/bfin_5xx.c
+++ b/drivers/serial/bfin_5xx.c
@@ -23,6 +23,7 @@
#include <linux/tty.h>
#include <linux/tty_flip.h>
#include <linux/serial_core.h>
+#include <linux/dma-mapping.h>
#if defined(CONFIG_KGDB_SERIAL_CONSOLE) || \
defined(CONFIG_KGDB_SERIAL_CONSOLE_MODULE)
@@ -33,12 +34,10 @@
#include <asm/gpio.h>
#include <mach/bfin_serial_5xx.h>
-#ifdef CONFIG_SERIAL_BFIN_DMA
-#include <linux/dma-mapping.h>
+#include <asm/dma.h>
#include <asm/io.h>
#include <asm/irq.h>
#include <asm/cacheflush.h>
-#endif
#ifdef CONFIG_SERIAL_BFIN_MODULE
# undef CONFIG_EARLY_PRINTK
@@ -360,7 +359,6 @@ static void bfin_serial_tx_chars(struct bfin_serial_port *uart)
UART_PUT_CHAR(uart, xmit->buf[xmit->tail]);
xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
uart->port.icount.tx++;
- SSYNC();
}
if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
@@ -688,6 +686,13 @@ static int bfin_serial_startup(struct uart_port *port)
# ifdef CONFIG_BF54x
{
+ /*
+ * UART2 and UART3 on BF548 share interrupt PINs and DMA
+ * controllers with SPORT2 and SPORT3. UART rx and tx
+ * interrupts are generated in PIO mode only when configure
+ * their peripheral mapping registers properly, which means
+ * request corresponding DMA channels in PIO mode as well.
+ */
unsigned uart_dma_ch_rx, uart_dma_ch_tx;
switch (uart->port.irq) {
@@ -734,8 +739,7 @@ static int bfin_serial_startup(struct uart_port *port)
IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING |
IRQF_DISABLED, "BFIN_UART_CTS", uart)) {
uart->cts_pin = -1;
- pr_info("Unable to attach BlackFin UART CTS interrupt.\
- So, disable it.\n");
+ pr_info("Unable to attach BlackFin UART CTS interrupt. So, disable it.\n");
}
}
if (uart->rts_pin >= 0) {
@@ -747,8 +751,7 @@ static int bfin_serial_startup(struct uart_port *port)
if (request_irq(uart->status_irq,
bfin_serial_mctrl_cts_int,
IRQF_DISABLED, "BFIN_UART_MODEM_STATUS", uart)) {
- pr_info("Unable to attach BlackFin UART Modem \
- Status interrupt.\n");
+ pr_info("Unable to attach BlackFin UART Modem Status interrupt.\n");
}
/* CTS RTS PINs are negative assertive. */
@@ -846,6 +849,8 @@ bfin_serial_set_termios(struct uart_port *port, struct ktermios *termios,
if (termios->c_cflag & CMSPAR)
lcr |= STP;
+ spin_lock_irqsave(&uart->port.lock, flags);
+
port->read_status_mask = OE;
if (termios->c_iflag & INPCK)
port->read_status_mask |= (FE | PE);
@@ -875,8 +880,6 @@ bfin_serial_set_termios(struct uart_port *port, struct ktermios *termios,
if (termios->c_line != N_IRDA)
quot -= ANOMALY_05000230;
- spin_lock_irqsave(&uart->port.lock, flags);
-
UART_SET_ANOMALY_THRESHOLD(uart, USEC_PER_SEC / baud * 15);
/* Disable UART */
@@ -1321,6 +1324,14 @@ struct console __init *bfin_earlyserial_init(unsigned int port,
struct bfin_serial_port *uart;
struct ktermios t;
+#ifdef CONFIG_SERIAL_BFIN_CONSOLE
+ /*
+ * If we are using early serial, don't let the normal console rewind
+ * log buffer, since that causes things to be printed multiple times
+ */
+ bfin_serial_console.flags &= ~CON_PRINTBUFFER;
+#endif
+
if (port == -1 || port >= nr_active_ports)
port = 0;
bfin_serial_init_ports();
diff --git a/drivers/serial/crisv10.c b/drivers/serial/crisv10.c
index c856905bb3bd..bcc31f2140ac 100644
--- a/drivers/serial/crisv10.c
+++ b/drivers/serial/crisv10.c
@@ -18,7 +18,6 @@ static char *serial_version = "$Revision: 1.25 $";
#include <linux/tty.h>
#include <linux/tty_flip.h>
#include <linux/major.h>
-#include <linux/smp_lock.h>
#include <linux/string.h>
#include <linux/fcntl.h>
#include <linux/mm.h>
@@ -1411,11 +1410,12 @@ e100_enable_rs485(struct tty_struct *tty, struct serial_rs485 *r)
CONFIG_ETRAX_RS485_LTC1387_RXEN_PORT_G_BIT, 1);
#endif
- info->rs485.flags = r->flags;
- if (r->delay_rts_before_send >= 1000)
+ info->rs485 = *r;
+
+ /* Maximum delay before RTS equal to 1000 */
+ if (info->rs485.delay_rts_before_send >= 1000)
info->rs485.delay_rts_before_send = 1000;
- else
- info->rs485.delay_rts_before_send = r->delay_rts_before_send;
+
/* printk("rts: on send = %i, after = %i, enabled = %i",
info->rs485.rts_on_send,
info->rs485.rts_after_sent,
@@ -3234,9 +3234,9 @@ rs_write(struct tty_struct *tty,
e100_disable_rx(info);
e100_enable_rx_irq(info);
#endif
-
- if (info->rs485.delay_rts_before_send > 0)
- msleep(info->rs485.delay_rts_before_send);
+ if ((info->rs485.flags & SER_RS485_RTS_BEFORE_SEND) &&
+ (info->rs485.delay_rts_before_send > 0))
+ msleep(info->rs485.delay_rts_before_send);
}
#endif /* CONFIG_ETRAX_RS485 */
@@ -3694,6 +3694,11 @@ rs_ioctl(struct tty_struct *tty, struct file * file,
rs485data.delay_rts_before_send = rs485ctrl.delay_rts_before_send;
rs485data.flags = 0;
+ if (rs485data.delay_rts_before_send != 0)
+ rs485data.flags |= SER_RS485_RTS_BEFORE_SEND;
+ else
+ rs485data.flags &= ~(SER_RS485_RTS_BEFORE_SEND);
+
if (rs485ctrl.enabled)
rs485data.flags |= SER_RS485_ENABLED;
else
@@ -3731,7 +3736,7 @@ rs_ioctl(struct tty_struct *tty, struct file * file,
/* This is the ioctl to get RS485 data from user-space */
if (copy_to_user((struct serial_rs485 *) arg,
rs485data,
- sizeof(serial_rs485)))
+ sizeof(struct serial_rs485)))
return -EFAULT;
break;
}
@@ -4527,6 +4532,7 @@ static int __init rs_init(void)
/* Set sane defaults */
info->rs485.flags &= ~(SER_RS485_RTS_ON_SEND);
info->rs485.flags |= SER_RS485_RTS_AFTER_SEND;
+ info->rs485.flags &= ~(SER_RS485_RTS_BEFORE_SEND);
info->rs485.delay_rts_before_send = 0;
info->rs485.flags &= ~(SER_RS485_ENABLED);
#endif
diff --git a/drivers/serial/kgdboc.c b/drivers/serial/kgdboc.c
index d4b711c9a416..3374618300af 100644
--- a/drivers/serial/kgdboc.c
+++ b/drivers/serial/kgdboc.c
@@ -18,6 +18,7 @@
#include <linux/tty.h>
#include <linux/console.h>
#include <linux/vt_kern.h>
+#include <linux/input.h>
#define MAX_CONFIG_LEN 40
@@ -37,6 +38,61 @@ static struct tty_driver *kgdb_tty_driver;
static int kgdb_tty_line;
#ifdef CONFIG_KDB_KEYBOARD
+static int kgdboc_reset_connect(struct input_handler *handler,
+ struct input_dev *dev,
+ const struct input_device_id *id)
+{
+ input_reset_device(dev);
+
+ /* Retrun an error - we do not want to bind, just to reset */
+ return -ENODEV;
+}
+
+static void kgdboc_reset_disconnect(struct input_handle *handle)
+{
+ /* We do not expect anyone to actually bind to us */
+ BUG();
+}
+
+static const struct input_device_id kgdboc_reset_ids[] = {
+ {
+ .flags = INPUT_DEVICE_ID_MATCH_EVBIT,
+ .evbit = { BIT_MASK(EV_KEY) },
+ },
+ { }
+};
+
+static struct input_handler kgdboc_reset_handler = {
+ .connect = kgdboc_reset_connect,
+ .disconnect = kgdboc_reset_disconnect,
+ .name = "kgdboc_reset",
+ .id_table = kgdboc_reset_ids,
+};
+
+static DEFINE_MUTEX(kgdboc_reset_mutex);
+
+static void kgdboc_restore_input_helper(struct work_struct *dummy)
+{
+ /*
+ * We need to take a mutex to prevent several instances of
+ * this work running on different CPUs so they don't try
+ * to register again already registered handler.
+ */
+ mutex_lock(&kgdboc_reset_mutex);
+
+ if (input_register_handler(&kgdboc_reset_handler) == 0)
+ input_unregister_handler(&kgdboc_reset_handler);
+
+ mutex_unlock(&kgdboc_reset_mutex);
+}
+
+static DECLARE_WORK(kgdboc_restore_input_work, kgdboc_restore_input_helper);
+
+static void kgdboc_restore_input(void)
+{
+ schedule_work(&kgdboc_restore_input_work);
+}
+
static int kgdboc_register_kbd(char **cptr)
{
if (strncmp(*cptr, "kbd", 3) == 0) {
@@ -64,10 +120,12 @@ static void kgdboc_unregister_kbd(void)
i--;
}
}
+ flush_work_sync(&kgdboc_restore_input_work);
}
#else /* ! CONFIG_KDB_KEYBOARD */
#define kgdboc_register_kbd(x) 0
#define kgdboc_unregister_kbd()
+#define kgdboc_restore_input()
#endif /* ! CONFIG_KDB_KEYBOARD */
static int kgdboc_option_setup(char *opt)
@@ -231,6 +289,7 @@ static void kgdboc_post_exp_handler(void)
dbg_restore_graphics = 0;
con_debug_leave();
}
+ kgdboc_restore_input();
}
static struct kgdb_io kgdboc_io_ops = {
diff --git a/drivers/serial/mfd.c b/drivers/serial/mfd.c
index 5fc699e929dc..d40010a22ecd 100644
--- a/drivers/serial/mfd.c
+++ b/drivers/serial/mfd.c
@@ -900,8 +900,7 @@ serial_hsu_set_termios(struct uart_port *port, struct ktermios *termios,
unsigned char cval, fcr = 0;
unsigned long flags;
unsigned int baud, quot;
- u32 mul = 0x3600;
- u32 ps = 0x10;
+ u32 ps, mul;
switch (termios->c_cflag & CSIZE) {
case CS5:
@@ -943,31 +942,24 @@ serial_hsu_set_termios(struct uart_port *port, struct ktermios *termios,
baud = uart_get_baud_rate(port, termios, old, 0, 4000000);
quot = 1;
+ ps = 0x10;
+ mul = 0x3600;
switch (baud) {
case 3500000:
mul = 0x3345;
ps = 0xC;
break;
- case 3000000:
- mul = 0x2EE0;
- break;
- case 2500000:
- mul = 0x2710;
- break;
- case 2000000:
- mul = 0x1F40;
- break;
case 1843200:
mul = 0x2400;
break;
+ case 3000000:
+ case 2500000:
+ case 2000000:
case 1500000:
- mul = 0x1770;
- break;
case 1000000:
- mul = 0xFA0;
- break;
case 500000:
- mul = 0x7D0;
+ /* mul/ps/quot = 0x9C4/0x10/0x1 will make a 500000 bps */
+ mul = baud / 500000 * 0x9C4;
break;
default:
/* Use uart_get_divisor to get quot for other baud rates */
diff --git a/drivers/serial/of_serial.c b/drivers/serial/of_serial.c
index 2af8fd113123..17849dcb9adc 100644
--- a/drivers/serial/of_serial.c
+++ b/drivers/serial/of_serial.c
@@ -31,8 +31,8 @@ static int __devinit of_platform_serial_setup(struct platform_device *ofdev,
{
struct resource resource;
struct device_node *np = ofdev->dev.of_node;
- const unsigned int *clk, *spd;
- const u32 *prop;
+ const __be32 *clk, *spd;
+ const __be32 *prop;
int ret, prop_size;
memset(port, 0, sizeof *port);
@@ -55,23 +55,23 @@ static int __devinit of_platform_serial_setup(struct platform_device *ofdev,
/* Check for shifted address mapping */
prop = of_get_property(np, "reg-offset", &prop_size);
if (prop && (prop_size == sizeof(u32)))
- port->mapbase += *prop;
+ port->mapbase += be32_to_cpup(prop);
/* Check for registers offset within the devices address range */
prop = of_get_property(np, "reg-shift", &prop_size);
if (prop && (prop_size == sizeof(u32)))
- port->regshift = *prop;
+ port->regshift = be32_to_cpup(prop);
port->irq = irq_of_parse_and_map(np, 0);
port->iotype = UPIO_MEM;
port->type = type;
- port->uartclk = *clk;
+ port->uartclk = be32_to_cpup(clk);
port->flags = UPF_SHARE_IRQ | UPF_BOOT_AUTOCONF | UPF_IOREMAP
| UPF_FIXED_PORT | UPF_FIXED_TYPE;
port->dev = &ofdev->dev;
/* If current-speed was set, then try not to change it. */
if (spd)
- port->custom_divisor = *clk / (16 * (*spd));
+ port->custom_divisor = be32_to_cpup(clk) / (16 * (be32_to_cpup(spd)));
return 0;
}
diff --git a/drivers/serial/omap-serial.c b/drivers/serial/omap-serial.c
new file mode 100644
index 000000000000..14365f72b664
--- /dev/null
+++ b/drivers/serial/omap-serial.c
@@ -0,0 +1,1333 @@
+/*
+ * Driver for OMAP-UART controller.
+ * Based on drivers/serial/8250.c
+ *
+ * Copyright (C) 2010 Texas Instruments.
+ *
+ * Authors:
+ * Govindraj R <govindraj.raja@ti.com>
+ * Thara Gopinath <thara@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * Note: This driver is made seperate from 8250 driver as we cannot
+ * over load 8250 driver with omap platform specific configuration for
+ * features like DMA, it makes easier to implement features like DMA and
+ * hardware flow control and software flow control configuration with
+ * this driver as required for the omap-platform.
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/console.h>
+#include <linux/serial_reg.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/tty.h>
+#include <linux/tty_flip.h>
+#include <linux/io.h>
+#include <linux/dma-mapping.h>
+#include <linux/clk.h>
+#include <linux/serial_core.h>
+#include <linux/irq.h>
+
+#include <plat/dma.h>
+#include <plat/dmtimer.h>
+#include <plat/omap-serial.h>
+
+static struct uart_omap_port *ui[OMAP_MAX_HSUART_PORTS];
+
+/* Forward declaration of functions */
+static void uart_tx_dma_callback(int lch, u16 ch_status, void *data);
+static void serial_omap_rx_timeout(unsigned long uart_no);
+static int serial_omap_start_rxdma(struct uart_omap_port *up);
+
+static inline unsigned int serial_in(struct uart_omap_port *up, int offset)
+{
+ offset <<= up->port.regshift;
+ return readw(up->port.membase + offset);
+}
+
+static inline void serial_out(struct uart_omap_port *up, int offset, int value)
+{
+ offset <<= up->port.regshift;
+ writew(value, up->port.membase + offset);
+}
+
+static inline void serial_omap_clear_fifos(struct uart_omap_port *up)
+{
+ serial_out(up, UART_FCR, UART_FCR_ENABLE_FIFO);
+ serial_out(up, UART_FCR, UART_FCR_ENABLE_FIFO |
+ UART_FCR_CLEAR_RCVR | UART_FCR_CLEAR_XMIT);
+ serial_out(up, UART_FCR, 0);
+}
+
+/*
+ * serial_omap_get_divisor - calculate divisor value
+ * @port: uart port info
+ * @baud: baudrate for which divisor needs to be calculated.
+ *
+ * We have written our own function to get the divisor so as to support
+ * 13x mode. 3Mbps Baudrate as an different divisor.
+ * Reference OMAP TRM Chapter 17:
+ * Table 17-1. UART Mode Baud Rates, Divisor Values, and Error Rates
+ * referring to oversampling - divisor value
+ * baudrate 460,800 to 3,686,400 all have divisor 13
+ * except 3,000,000 which has divisor value 16
+ */
+static unsigned int
+serial_omap_get_divisor(struct uart_port *port, unsigned int baud)
+{
+ unsigned int divisor;
+
+ if (baud > OMAP_MODE13X_SPEED && baud != 3000000)
+ divisor = 13;
+ else
+ divisor = 16;
+ return port->uartclk/(baud * divisor);
+}
+
+static void serial_omap_stop_rxdma(struct uart_omap_port *up)
+{
+ if (up->uart_dma.rx_dma_used) {
+ del_timer(&up->uart_dma.rx_timer);
+ omap_stop_dma(up->uart_dma.rx_dma_channel);
+ omap_free_dma(up->uart_dma.rx_dma_channel);
+ up->uart_dma.rx_dma_channel = OMAP_UART_DMA_CH_FREE;
+ up->uart_dma.rx_dma_used = false;
+ }
+}
+
+static void serial_omap_enable_ms(struct uart_port *port)
+{
+ struct uart_omap_port *up = (struct uart_omap_port *)port;
+
+ dev_dbg(up->port.dev, "serial_omap_enable_ms+%d\n", up->pdev->id);
+ up->ier |= UART_IER_MSI;
+ serial_out(up, UART_IER, up->ier);
+}
+
+static void serial_omap_stop_tx(struct uart_port *port)
+{
+ struct uart_omap_port *up = (struct uart_omap_port *)port;
+
+ if (up->use_dma &&
+ up->uart_dma.tx_dma_channel != OMAP_UART_DMA_CH_FREE) {
+ /*
+ * Check if dma is still active. If yes do nothing,
+ * return. Else stop dma
+ */
+ if (omap_get_dma_active_status(up->uart_dma.tx_dma_channel))
+ return;
+ omap_stop_dma(up->uart_dma.tx_dma_channel);
+ omap_free_dma(up->uart_dma.tx_dma_channel);
+ up->uart_dma.tx_dma_channel = OMAP_UART_DMA_CH_FREE;
+ }
+
+ if (up->ier & UART_IER_THRI) {
+ up->ier &= ~UART_IER_THRI;
+ serial_out(up, UART_IER, up->ier);
+ }
+}
+
+static void serial_omap_stop_rx(struct uart_port *port)
+{
+ struct uart_omap_port *up = (struct uart_omap_port *)port;
+
+ if (up->use_dma)
+ serial_omap_stop_rxdma(up);
+ up->ier &= ~UART_IER_RLSI;
+ up->port.read_status_mask &= ~UART_LSR_DR;
+ serial_out(up, UART_IER, up->ier);
+}
+
+static inline void receive_chars(struct uart_omap_port *up, int *status)
+{
+ struct tty_struct *tty = up->port.state->port.tty;
+ unsigned int flag;
+ unsigned char ch, lsr = *status;
+ int max_count = 256;
+
+ do {
+ if (likely(lsr & UART_LSR_DR))
+ ch = serial_in(up, UART_RX);
+ flag = TTY_NORMAL;
+ up->port.icount.rx++;
+
+ if (unlikely(lsr & UART_LSR_BRK_ERROR_BITS)) {
+ /*
+ * For statistics only
+ */
+ if (lsr & UART_LSR_BI) {
+ lsr &= ~(UART_LSR_FE | UART_LSR_PE);
+ up->port.icount.brk++;
+ /*
+ * We do the SysRQ and SAK checking
+ * here because otherwise the break
+ * may get masked by ignore_status_mask
+ * or read_status_mask.
+ */
+ if (uart_handle_break(&up->port))
+ goto ignore_char;
+ } else if (lsr & UART_LSR_PE) {
+ up->port.icount.parity++;
+ } else if (lsr & UART_LSR_FE) {
+ up->port.icount.frame++;
+ }
+
+ if (lsr & UART_LSR_OE)
+ up->port.icount.overrun++;
+
+ /*
+ * Mask off conditions which should be ignored.
+ */
+ lsr &= up->port.read_status_mask;
+
+#ifdef CONFIG_SERIAL_OMAP_CONSOLE
+ if (up->port.line == up->port.cons->index) {
+ /* Recover the break flag from console xmit */
+ lsr |= up->lsr_break_flag;
+ up->lsr_break_flag = 0;
+ }
+#endif
+ if (lsr & UART_LSR_BI)
+ flag = TTY_BREAK;
+ else if (lsr & UART_LSR_PE)
+ flag = TTY_PARITY;
+ else if (lsr & UART_LSR_FE)
+ flag = TTY_FRAME;
+ }
+
+ if (uart_handle_sysrq_char(&up->port, ch))
+ goto ignore_char;
+ uart_insert_char(&up->port, lsr, UART_LSR_OE, ch, flag);
+ignore_char:
+ lsr = serial_in(up, UART_LSR);
+ } while ((lsr & (UART_LSR_DR | UART_LSR_BI)) && (max_count-- > 0));
+ spin_unlock(&up->port.lock);
+ tty_flip_buffer_push(tty);
+ spin_lock(&up->port.lock);
+}
+
+static void transmit_chars(struct uart_omap_port *up)
+{
+ struct circ_buf *xmit = &up->port.state->xmit;
+ int count;
+
+ if (up->port.x_char) {
+ serial_out(up, UART_TX, up->port.x_char);
+ up->port.icount.tx++;
+ up->port.x_char = 0;
+ return;
+ }
+ if (uart_circ_empty(xmit) || uart_tx_stopped(&up->port)) {
+ serial_omap_stop_tx(&up->port);
+ return;
+ }
+ count = up->port.fifosize / 4;
+ do {
+ serial_out(up, UART_TX, xmit->buf[xmit->tail]);
+ xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
+ up->port.icount.tx++;
+ if (uart_circ_empty(xmit))
+ break;
+ } while (--count > 0);
+
+ if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
+ uart_write_wakeup(&up->port);
+
+ if (uart_circ_empty(xmit))
+ serial_omap_stop_tx(&up->port);
+}
+
+static inline void serial_omap_enable_ier_thri(struct uart_omap_port *up)
+{
+ if (!(up->ier & UART_IER_THRI)) {
+ up->ier |= UART_IER_THRI;
+ serial_out(up, UART_IER, up->ier);
+ }
+}
+
+static void serial_omap_start_tx(struct uart_port *port)
+{
+ struct uart_omap_port *up = (struct uart_omap_port *)port;
+ struct circ_buf *xmit;
+ unsigned int start;
+ int ret = 0;
+
+ if (!up->use_dma) {
+ serial_omap_enable_ier_thri(up);
+ return;
+ }
+
+ if (up->uart_dma.tx_dma_used)
+ return;
+
+ xmit = &up->port.state->xmit;
+
+ if (up->uart_dma.tx_dma_channel == OMAP_UART_DMA_CH_FREE) {
+ ret = omap_request_dma(up->uart_dma.uart_dma_tx,
+ "UART Tx DMA",
+ (void *)uart_tx_dma_callback, up,
+ &(up->uart_dma.tx_dma_channel));
+
+ if (ret < 0) {
+ serial_omap_enable_ier_thri(up);
+ return;
+ }
+ }
+ spin_lock(&(up->uart_dma.tx_lock));
+ up->uart_dma.tx_dma_used = true;
+ spin_unlock(&(up->uart_dma.tx_lock));
+
+ start = up->uart_dma.tx_buf_dma_phys +
+ (xmit->tail & (UART_XMIT_SIZE - 1));
+
+ up->uart_dma.tx_buf_size = uart_circ_chars_pending(xmit);
+ /*
+ * It is a circular buffer. See if the buffer has wounded back.
+ * If yes it will have to be transferred in two separate dma
+ * transfers
+ */
+ if (start + up->uart_dma.tx_buf_size >=
+ up->uart_dma.tx_buf_dma_phys + UART_XMIT_SIZE)
+ up->uart_dma.tx_buf_size =
+ (up->uart_dma.tx_buf_dma_phys +
+ UART_XMIT_SIZE) - start;
+
+ omap_set_dma_dest_params(up->uart_dma.tx_dma_channel, 0,
+ OMAP_DMA_AMODE_CONSTANT,
+ up->uart_dma.uart_base, 0, 0);
+ omap_set_dma_src_params(up->uart_dma.tx_dma_channel, 0,
+ OMAP_DMA_AMODE_POST_INC, start, 0, 0);
+ omap_set_dma_transfer_params(up->uart_dma.tx_dma_channel,
+ OMAP_DMA_DATA_TYPE_S8,
+ up->uart_dma.tx_buf_size, 1,
+ OMAP_DMA_SYNC_ELEMENT,
+ up->uart_dma.uart_dma_tx, 0);
+ /* FIXME: Cache maintenance needed here? */
+ omap_start_dma(up->uart_dma.tx_dma_channel);
+}
+
+static unsigned int check_modem_status(struct uart_omap_port *up)
+{
+ unsigned int status;
+
+ status = serial_in(up, UART_MSR);
+ status |= up->msr_saved_flags;
+ up->msr_saved_flags = 0;
+ if ((status & UART_MSR_ANY_DELTA) == 0)
+ return status;
+
+ if (status & UART_MSR_ANY_DELTA && up->ier & UART_IER_MSI &&
+ up->port.state != NULL) {
+ if (status & UART_MSR_TERI)
+ up->port.icount.rng++;
+ if (status & UART_MSR_DDSR)
+ up->port.icount.dsr++;
+ if (status & UART_MSR_DDCD)
+ uart_handle_dcd_change
+ (&up->port, status & UART_MSR_DCD);
+ if (status & UART_MSR_DCTS)
+ uart_handle_cts_change
+ (&up->port, status & UART_MSR_CTS);
+ wake_up_interruptible(&up->port.state->port.delta_msr_wait);
+ }
+
+ return status;
+}
+
+/**
+ * serial_omap_irq() - This handles the interrupt from one port
+ * @irq: uart port irq number
+ * @dev_id: uart port info
+ */
+static inline irqreturn_t serial_omap_irq(int irq, void *dev_id)
+{
+ struct uart_omap_port *up = dev_id;
+ unsigned int iir, lsr;
+ unsigned long flags;
+
+ iir = serial_in(up, UART_IIR);
+ if (iir & UART_IIR_NO_INT)
+ return IRQ_NONE;
+
+ spin_lock_irqsave(&up->port.lock, flags);
+ lsr = serial_in(up, UART_LSR);
+ if (iir & UART_IIR_RLSI) {
+ if (!up->use_dma) {
+ if (lsr & UART_LSR_DR)
+ receive_chars(up, &lsr);
+ } else {
+ up->ier &= ~(UART_IER_RDI | UART_IER_RLSI);
+ serial_out(up, UART_IER, up->ier);
+ if ((serial_omap_start_rxdma(up) != 0) &&
+ (lsr & UART_LSR_DR))
+ receive_chars(up, &lsr);
+ }
+ }
+
+ check_modem_status(up);
+ if ((lsr & UART_LSR_THRE) && (iir & UART_IIR_THRI))
+ transmit_chars(up);
+
+ spin_unlock_irqrestore(&up->port.lock, flags);
+ up->port_activity = jiffies;
+ return IRQ_HANDLED;
+}
+
+static unsigned int serial_omap_tx_empty(struct uart_port *port)
+{
+ struct uart_omap_port *up = (struct uart_omap_port *)port;
+ unsigned long flags = 0;
+ unsigned int ret = 0;
+
+ dev_dbg(up->port.dev, "serial_omap_tx_empty+%d\n", up->pdev->id);
+ spin_lock_irqsave(&up->port.lock, flags);
+ ret = serial_in(up, UART_LSR) & UART_LSR_TEMT ? TIOCSER_TEMT : 0;
+ spin_unlock_irqrestore(&up->port.lock, flags);
+
+ return ret;
+}
+
+static unsigned int serial_omap_get_mctrl(struct uart_port *port)
+{
+ struct uart_omap_port *up = (struct uart_omap_port *)port;
+ unsigned char status;
+ unsigned int ret = 0;
+
+ status = check_modem_status(up);
+ dev_dbg(up->port.dev, "serial_omap_get_mctrl+%d\n", up->pdev->id);
+
+ if (status & UART_MSR_DCD)
+ ret |= TIOCM_CAR;
+ if (status & UART_MSR_RI)
+ ret |= TIOCM_RNG;
+ if (status & UART_MSR_DSR)
+ ret |= TIOCM_DSR;
+ if (status & UART_MSR_CTS)
+ ret |= TIOCM_CTS;
+ return ret;
+}
+
+static void serial_omap_set_mctrl(struct uart_port *port, unsigned int mctrl)
+{
+ struct uart_omap_port *up = (struct uart_omap_port *)port;
+ unsigned char mcr = 0;
+
+ dev_dbg(up->port.dev, "serial_omap_set_mctrl+%d\n", up->pdev->id);
+ if (mctrl & TIOCM_RTS)
+ mcr |= UART_MCR_RTS;
+ if (mctrl & TIOCM_DTR)
+ mcr |= UART_MCR_DTR;
+ if (mctrl & TIOCM_OUT1)
+ mcr |= UART_MCR_OUT1;
+ if (mctrl & TIOCM_OUT2)
+ mcr |= UART_MCR_OUT2;
+ if (mctrl & TIOCM_LOOP)
+ mcr |= UART_MCR_LOOP;
+
+ mcr |= up->mcr;
+ serial_out(up, UART_MCR, mcr);
+}
+
+static void serial_omap_break_ctl(struct uart_port *port, int break_state)
+{
+ struct uart_omap_port *up = (struct uart_omap_port *)port;
+ unsigned long flags = 0;
+
+ dev_dbg(up->port.dev, "serial_omap_break_ctl+%d\n", up->pdev->id);
+ spin_lock_irqsave(&up->port.lock, flags);
+ if (break_state == -1)
+ up->lcr |= UART_LCR_SBC;
+ else
+ up->lcr &= ~UART_LCR_SBC;
+ serial_out(up, UART_LCR, up->lcr);
+ spin_unlock_irqrestore(&up->port.lock, flags);
+}
+
+static int serial_omap_startup(struct uart_port *port)
+{
+ struct uart_omap_port *up = (struct uart_omap_port *)port;
+ unsigned long flags = 0;
+ int retval;
+
+ /*
+ * Allocate the IRQ
+ */
+ retval = request_irq(up->port.irq, serial_omap_irq, up->port.irqflags,
+ up->name, up);
+ if (retval)
+ return retval;
+
+ dev_dbg(up->port.dev, "serial_omap_startup+%d\n", up->pdev->id);
+
+ /*
+ * Clear the FIFO buffers and disable them.
+ * (they will be reenabled in set_termios())
+ */
+ serial_omap_clear_fifos(up);
+ /* For Hardware flow control */
+ serial_out(up, UART_MCR, UART_MCR_RTS);
+
+ /*
+ * Clear the interrupt registers.
+ */
+ (void) serial_in(up, UART_LSR);
+ if (serial_in(up, UART_LSR) & UART_LSR_DR)
+ (void) serial_in(up, UART_RX);
+ (void) serial_in(up, UART_IIR);
+ (void) serial_in(up, UART_MSR);
+
+ /*
+ * Now, initialize the UART
+ */
+ serial_out(up, UART_LCR, UART_LCR_WLEN8);
+ spin_lock_irqsave(&up->port.lock, flags);
+ /*
+ * Most PC uarts need OUT2 raised to enable interrupts.
+ */
+ up->port.mctrl |= TIOCM_OUT2;
+ serial_omap_set_mctrl(&up->port, up->port.mctrl);
+ spin_unlock_irqrestore(&up->port.lock, flags);
+
+ up->msr_saved_flags = 0;
+ if (up->use_dma) {
+ free_page((unsigned long)up->port.state->xmit.buf);
+ up->port.state->xmit.buf = dma_alloc_coherent(NULL,
+ UART_XMIT_SIZE,
+ (dma_addr_t *)&(up->uart_dma.tx_buf_dma_phys),
+ 0);
+ init_timer(&(up->uart_dma.rx_timer));
+ up->uart_dma.rx_timer.function = serial_omap_rx_timeout;
+ up->uart_dma.rx_timer.data = up->pdev->id;
+ /* Currently the buffer size is 4KB. Can increase it */
+ up->uart_dma.rx_buf = dma_alloc_coherent(NULL,
+ up->uart_dma.rx_buf_size,
+ (dma_addr_t *)&(up->uart_dma.rx_buf_dma_phys), 0);
+ }
+ /*
+ * Finally, enable interrupts. Note: Modem status interrupts
+ * are set via set_termios(), which will be occurring imminently
+ * anyway, so we don't enable them here.
+ */
+ up->ier = UART_IER_RLSI | UART_IER_RDI;
+ serial_out(up, UART_IER, up->ier);
+
+ up->port_activity = jiffies;
+ return 0;
+}
+
+static void serial_omap_shutdown(struct uart_port *port)
+{
+ struct uart_omap_port *up = (struct uart_omap_port *)port;
+ unsigned long flags = 0;
+
+ dev_dbg(up->port.dev, "serial_omap_shutdown+%d\n", up->pdev->id);
+ /*
+ * Disable interrupts from this port
+ */
+ up->ier = 0;
+ serial_out(up, UART_IER, 0);
+
+ spin_lock_irqsave(&up->port.lock, flags);
+ up->port.mctrl &= ~TIOCM_OUT2;
+ serial_omap_set_mctrl(&up->port, up->port.mctrl);
+ spin_unlock_irqrestore(&up->port.lock, flags);
+
+ /*
+ * Disable break condition and FIFOs
+ */
+ serial_out(up, UART_LCR, serial_in(up, UART_LCR) & ~UART_LCR_SBC);
+ serial_omap_clear_fifos(up);
+
+ /*
+ * Read data port to reset things, and then free the irq
+ */
+ if (serial_in(up, UART_LSR) & UART_LSR_DR)
+ (void) serial_in(up, UART_RX);
+ if (up->use_dma) {
+ dma_free_coherent(up->port.dev,
+ UART_XMIT_SIZE, up->port.state->xmit.buf,
+ up->uart_dma.tx_buf_dma_phys);
+ up->port.state->xmit.buf = NULL;
+ serial_omap_stop_rx(port);
+ dma_free_coherent(up->port.dev,
+ up->uart_dma.rx_buf_size, up->uart_dma.rx_buf,
+ up->uart_dma.rx_buf_dma_phys);
+ up->uart_dma.rx_buf = NULL;
+ }
+ free_irq(up->port.irq, up);
+}
+
+static inline void
+serial_omap_configure_xonxoff
+ (struct uart_omap_port *up, struct ktermios *termios)
+{
+ unsigned char efr = 0;
+
+ up->lcr = serial_in(up, UART_LCR);
+ serial_out(up, UART_LCR, OMAP_UART_LCR_CONF_MDB);
+ up->efr = serial_in(up, UART_EFR);
+ serial_out(up, UART_EFR, up->efr & ~UART_EFR_ECB);
+
+ serial_out(up, UART_XON1, termios->c_cc[VSTART]);
+ serial_out(up, UART_XOFF1, termios->c_cc[VSTOP]);
+
+ /* clear SW control mode bits */
+ efr = up->efr;
+ efr &= OMAP_UART_SW_CLR;
+
+ /*
+ * IXON Flag:
+ * Enable XON/XOFF flow control on output.
+ * Transmit XON1, XOFF1
+ */
+ if (termios->c_iflag & IXON)
+ efr |= OMAP_UART_SW_TX;
+
+ /*
+ * IXOFF Flag:
+ * Enable XON/XOFF flow control on input.
+ * Receiver compares XON1, XOFF1.
+ */
+ if (termios->c_iflag & IXOFF)
+ efr |= OMAP_UART_SW_RX;
+
+ serial_out(up, UART_EFR, up->efr | UART_EFR_ECB);
+ serial_out(up, UART_LCR, UART_LCR_DLAB);
+
+ up->mcr = serial_in(up, UART_MCR);
+
+ /*
+ * IXANY Flag:
+ * Enable any character to restart output.
+ * Operation resumes after receiving any
+ * character after recognition of the XOFF character
+ */
+ if (termios->c_iflag & IXANY)
+ up->mcr |= UART_MCR_XONANY;
+
+ serial_out(up, UART_MCR, up->mcr | UART_MCR_TCRTLR);
+ serial_out(up, UART_LCR, OMAP_UART_LCR_CONF_MDB);
+ serial_out(up, UART_TI752_TCR, OMAP_UART_TCR_TRIG);
+ /* Enable special char function UARTi.EFR_REG[5] and
+ * load the new software flow control mode IXON or IXOFF
+ * and restore the UARTi.EFR_REG[4] ENHANCED_EN value.
+ */
+ serial_out(up, UART_EFR, efr | UART_EFR_SCD);
+ serial_out(up, UART_LCR, UART_LCR_DLAB);
+
+ serial_out(up, UART_MCR, up->mcr & ~UART_MCR_TCRTLR);
+ serial_out(up, UART_LCR, up->lcr);
+}
+
+static void
+serial_omap_set_termios(struct uart_port *port, struct ktermios *termios,
+ struct ktermios *old)
+{
+ struct uart_omap_port *up = (struct uart_omap_port *)port;
+ unsigned char cval = 0;
+ unsigned char efr = 0;
+ unsigned long flags = 0;
+ unsigned int baud, quot;
+
+ switch (termios->c_cflag & CSIZE) {
+ case CS5:
+ cval = UART_LCR_WLEN5;
+ break;
+ case CS6:
+ cval = UART_LCR_WLEN6;
+ break;
+ case CS7:
+ cval = UART_LCR_WLEN7;
+ break;
+ default:
+ case CS8:
+ cval = UART_LCR_WLEN8;
+ break;
+ }
+
+ if (termios->c_cflag & CSTOPB)
+ cval |= UART_LCR_STOP;
+ if (termios->c_cflag & PARENB)
+ cval |= UART_LCR_PARITY;
+ if (!(termios->c_cflag & PARODD))
+ cval |= UART_LCR_EPAR;
+
+ /*
+ * Ask the core to calculate the divisor for us.
+ */
+
+ baud = uart_get_baud_rate(port, termios, old, 0, port->uartclk/13);
+ quot = serial_omap_get_divisor(port, baud);
+
+ up->fcr = UART_FCR_R_TRIG_01 | UART_FCR_T_TRIG_01 |
+ UART_FCR_ENABLE_FIFO;
+ if (up->use_dma)
+ up->fcr |= UART_FCR_DMA_SELECT;
+
+ /*
+ * Ok, we're now changing the port state. Do it with
+ * interrupts disabled.
+ */
+ spin_lock_irqsave(&up->port.lock, flags);
+
+ /*
+ * Update the per-port timeout.
+ */
+ uart_update_timeout(port, termios->c_cflag, baud);
+
+ up->port.read_status_mask = UART_LSR_OE | UART_LSR_THRE | UART_LSR_DR;
+ if (termios->c_iflag & INPCK)
+ up->port.read_status_mask |= UART_LSR_FE | UART_LSR_PE;
+ if (termios->c_iflag & (BRKINT | PARMRK))
+ up->port.read_status_mask |= UART_LSR_BI;
+
+ /*
+ * Characters to ignore
+ */
+ up->port.ignore_status_mask = 0;
+ if (termios->c_iflag & IGNPAR)
+ up->port.ignore_status_mask |= UART_LSR_PE | UART_LSR_FE;
+ if (termios->c_iflag & IGNBRK) {
+ up->port.ignore_status_mask |= UART_LSR_BI;
+ /*
+ * If we're ignoring parity and break indicators,
+ * ignore overruns too (for real raw support).
+ */
+ if (termios->c_iflag & IGNPAR)
+ up->port.ignore_status_mask |= UART_LSR_OE;
+ }
+
+ /*
+ * ignore all characters if CREAD is not set
+ */
+ if ((termios->c_cflag & CREAD) == 0)
+ up->port.ignore_status_mask |= UART_LSR_DR;
+
+ /*
+ * Modem status interrupts
+ */
+ up->ier &= ~UART_IER_MSI;
+ if (UART_ENABLE_MS(&up->port, termios->c_cflag))
+ up->ier |= UART_IER_MSI;
+ serial_out(up, UART_IER, up->ier);
+ serial_out(up, UART_LCR, cval); /* reset DLAB */
+
+ /* FIFOs and DMA Settings */
+
+ /* FCR can be changed only when the
+ * baud clock is not running
+ * DLL_REG and DLH_REG set to 0.
+ */
+ serial_out(up, UART_LCR, UART_LCR_DLAB);
+ serial_out(up, UART_DLL, 0);
+ serial_out(up, UART_DLM, 0);
+ serial_out(up, UART_LCR, 0);
+
+ serial_out(up, UART_LCR, OMAP_UART_LCR_CONF_MDB);
+
+ up->efr = serial_in(up, UART_EFR);
+ serial_out(up, UART_EFR, up->efr | UART_EFR_ECB);
+
+ serial_out(up, UART_LCR, UART_LCR_DLAB);
+ up->mcr = serial_in(up, UART_MCR);
+ serial_out(up, UART_MCR, up->mcr | UART_MCR_TCRTLR);
+ /* FIFO ENABLE, DMA MODE */
+ serial_out(up, UART_FCR, up->fcr);
+ serial_out(up, UART_LCR, OMAP_UART_LCR_CONF_MDB);
+
+ if (up->use_dma) {
+ serial_out(up, UART_TI752_TLR, 0);
+ serial_out(up, UART_OMAP_SCR,
+ (UART_FCR_TRIGGER_4 | UART_FCR_TRIGGER_8));
+ }
+
+ serial_out(up, UART_EFR, up->efr);
+ serial_out(up, UART_LCR, UART_LCR_DLAB);
+ serial_out(up, UART_MCR, up->mcr);
+
+ /* Protocol, Baud Rate, and Interrupt Settings */
+
+ serial_out(up, UART_OMAP_MDR1, OMAP_MDR1_DISABLE);
+ serial_out(up, UART_LCR, OMAP_UART_LCR_CONF_MDB);
+
+ up->efr = serial_in(up, UART_EFR);
+ serial_out(up, UART_EFR, up->efr | UART_EFR_ECB);
+
+ serial_out(up, UART_LCR, 0);
+ serial_out(up, UART_IER, 0);
+ serial_out(up, UART_LCR, OMAP_UART_LCR_CONF_MDB);
+
+ serial_out(up, UART_DLL, quot & 0xff); /* LS of divisor */
+ serial_out(up, UART_DLM, quot >> 8); /* MS of divisor */
+
+ serial_out(up, UART_LCR, 0);
+ serial_out(up, UART_IER, up->ier);
+ serial_out(up, UART_LCR, OMAP_UART_LCR_CONF_MDB);
+
+ serial_out(up, UART_EFR, up->efr);
+ serial_out(up, UART_LCR, cval);
+
+ if (baud > 230400 && baud != 3000000)
+ serial_out(up, UART_OMAP_MDR1, OMAP_MDR1_MODE13X);
+ else
+ serial_out(up, UART_OMAP_MDR1, OMAP_MDR1_MODE16X);
+
+ /* Hardware Flow Control Configuration */
+
+ if (termios->c_cflag & CRTSCTS) {
+ efr |= (UART_EFR_CTS | UART_EFR_RTS);
+ serial_out(up, UART_LCR, UART_LCR_DLAB);
+
+ up->mcr = serial_in(up, UART_MCR);
+ serial_out(up, UART_MCR, up->mcr | UART_MCR_TCRTLR);
+
+ serial_out(up, UART_LCR, OMAP_UART_LCR_CONF_MDB);
+ up->efr = serial_in(up, UART_EFR);
+ serial_out(up, UART_EFR, up->efr | UART_EFR_ECB);
+
+ serial_out(up, UART_TI752_TCR, OMAP_UART_TCR_TRIG);
+ serial_out(up, UART_EFR, efr); /* Enable AUTORTS and AUTOCTS */
+ serial_out(up, UART_LCR, UART_LCR_DLAB);
+ serial_out(up, UART_MCR, up->mcr | UART_MCR_RTS);
+ serial_out(up, UART_LCR, cval);
+ }
+
+ serial_omap_set_mctrl(&up->port, up->port.mctrl);
+ /* Software Flow Control Configuration */
+ if (termios->c_iflag & (IXON | IXOFF))
+ serial_omap_configure_xonxoff(up, termios);
+
+ spin_unlock_irqrestore(&up->port.lock, flags);
+ dev_dbg(up->port.dev, "serial_omap_set_termios+%d\n", up->pdev->id);
+}
+
+static void
+serial_omap_pm(struct uart_port *port, unsigned int state,
+ unsigned int oldstate)
+{
+ struct uart_omap_port *up = (struct uart_omap_port *)port;
+ unsigned char efr;
+
+ dev_dbg(up->port.dev, "serial_omap_pm+%d\n", up->pdev->id);
+ serial_out(up, UART_LCR, OMAP_UART_LCR_CONF_MDB);
+ efr = serial_in(up, UART_EFR);
+ serial_out(up, UART_EFR, efr | UART_EFR_ECB);
+ serial_out(up, UART_LCR, 0);
+
+ serial_out(up, UART_IER, (state != 0) ? UART_IERX_SLEEP : 0);
+ serial_out(up, UART_LCR, OMAP_UART_LCR_CONF_MDB);
+ serial_out(up, UART_EFR, efr);
+ serial_out(up, UART_LCR, 0);
+ /* Enable module level wake up */
+ serial_out(up, UART_OMAP_WER,
+ (state != 0) ? OMAP_UART_WER_MOD_WKUP : 0);
+}
+
+static void serial_omap_release_port(struct uart_port *port)
+{
+ dev_dbg(port->dev, "serial_omap_release_port+\n");
+}
+
+static int serial_omap_request_port(struct uart_port *port)
+{
+ dev_dbg(port->dev, "serial_omap_request_port+\n");
+ return 0;
+}
+
+static void serial_omap_config_port(struct uart_port *port, int flags)
+{
+ struct uart_omap_port *up = (struct uart_omap_port *)port;
+
+ dev_dbg(up->port.dev, "serial_omap_config_port+%d\n",
+ up->pdev->id);
+ up->port.type = PORT_OMAP;
+}
+
+static int
+serial_omap_verify_port(struct uart_port *port, struct serial_struct *ser)
+{
+ /* we don't want the core code to modify any port params */
+ dev_dbg(port->dev, "serial_omap_verify_port+\n");
+ return -EINVAL;
+}
+
+static const char *
+serial_omap_type(struct uart_port *port)
+{
+ struct uart_omap_port *up = (struct uart_omap_port *)port;
+
+ dev_dbg(up->port.dev, "serial_omap_type+%d\n", up->pdev->id);
+ return up->name;
+}
+
+#ifdef CONFIG_SERIAL_OMAP_CONSOLE
+
+static struct uart_omap_port *serial_omap_console_ports[4];
+
+static struct uart_driver serial_omap_reg;
+
+#define BOTH_EMPTY (UART_LSR_TEMT | UART_LSR_THRE)
+
+static inline void wait_for_xmitr(struct uart_omap_port *up)
+{
+ unsigned int status, tmout = 10000;
+
+ /* Wait up to 10ms for the character(s) to be sent. */
+ do {
+ status = serial_in(up, UART_LSR);
+
+ if (status & UART_LSR_BI)
+ up->lsr_break_flag = UART_LSR_BI;
+
+ if (--tmout == 0)
+ break;
+ udelay(1);
+ } while ((status & BOTH_EMPTY) != BOTH_EMPTY);
+
+ /* Wait up to 1s for flow control if necessary */
+ if (up->port.flags & UPF_CONS_FLOW) {
+ tmout = 1000000;
+ for (tmout = 1000000; tmout; tmout--) {
+ unsigned int msr = serial_in(up, UART_MSR);
+
+ up->msr_saved_flags |= msr & MSR_SAVE_FLAGS;
+ if (msr & UART_MSR_CTS)
+ break;
+
+ udelay(1);
+ }
+ }
+}
+
+static void serial_omap_console_putchar(struct uart_port *port, int ch)
+{
+ struct uart_omap_port *up = (struct uart_omap_port *)port;
+
+ wait_for_xmitr(up);
+ serial_out(up, UART_TX, ch);
+}
+
+static void
+serial_omap_console_write(struct console *co, const char *s,
+ unsigned int count)
+{
+ struct uart_omap_port *up = serial_omap_console_ports[co->index];
+ unsigned long flags;
+ unsigned int ier;
+ int locked = 1;
+
+ local_irq_save(flags);
+ if (up->port.sysrq)
+ locked = 0;
+ else if (oops_in_progress)
+ locked = spin_trylock(&up->port.lock);
+ else
+ spin_lock(&up->port.lock);
+
+ /*
+ * First save the IER then disable the interrupts
+ */
+ ier = serial_in(up, UART_IER);
+ serial_out(up, UART_IER, 0);
+
+ uart_console_write(&up->port, s, count, serial_omap_console_putchar);
+
+ /*
+ * Finally, wait for transmitter to become empty
+ * and restore the IER
+ */
+ wait_for_xmitr(up);
+ serial_out(up, UART_IER, ier);
+ /*
+ * The receive handling will happen properly because the
+ * receive ready bit will still be set; it is not cleared
+ * on read. However, modem control will not, we must
+ * call it if we have saved something in the saved flags
+ * while processing with interrupts off.
+ */
+ if (up->msr_saved_flags)
+ check_modem_status(up);
+
+ if (locked)
+ spin_unlock(&up->port.lock);
+ local_irq_restore(flags);
+}
+
+static int __init
+serial_omap_console_setup(struct console *co, char *options)
+{
+ struct uart_omap_port *up;
+ int baud = 115200;
+ int bits = 8;
+ int parity = 'n';
+ int flow = 'n';
+
+ if (serial_omap_console_ports[co->index] == NULL)
+ return -ENODEV;
+ up = serial_omap_console_ports[co->index];
+
+ if (options)
+ uart_parse_options(options, &baud, &parity, &bits, &flow);
+
+ return uart_set_options(&up->port, co, baud, parity, bits, flow);
+}
+
+static struct console serial_omap_console = {
+ .name = OMAP_SERIAL_NAME,
+ .write = serial_omap_console_write,
+ .device = uart_console_device,
+ .setup = serial_omap_console_setup,
+ .flags = CON_PRINTBUFFER,
+ .index = -1,
+ .data = &serial_omap_reg,
+};
+
+static void serial_omap_add_console_port(struct uart_omap_port *up)
+{
+ serial_omap_console_ports[up->pdev->id] = up;
+}
+
+#define OMAP_CONSOLE (&serial_omap_console)
+
+#else
+
+#define OMAP_CONSOLE NULL
+
+static inline void serial_omap_add_console_port(struct uart_omap_port *up)
+{}
+
+#endif
+
+static struct uart_ops serial_omap_pops = {
+ .tx_empty = serial_omap_tx_empty,
+ .set_mctrl = serial_omap_set_mctrl,
+ .get_mctrl = serial_omap_get_mctrl,
+ .stop_tx = serial_omap_stop_tx,
+ .start_tx = serial_omap_start_tx,
+ .stop_rx = serial_omap_stop_rx,
+ .enable_ms = serial_omap_enable_ms,
+ .break_ctl = serial_omap_break_ctl,
+ .startup = serial_omap_startup,
+ .shutdown = serial_omap_shutdown,
+ .set_termios = serial_omap_set_termios,
+ .pm = serial_omap_pm,
+ .type = serial_omap_type,
+ .release_port = serial_omap_release_port,
+ .request_port = serial_omap_request_port,
+ .config_port = serial_omap_config_port,
+ .verify_port = serial_omap_verify_port,
+};
+
+static struct uart_driver serial_omap_reg = {
+ .owner = THIS_MODULE,
+ .driver_name = "OMAP-SERIAL",
+ .dev_name = OMAP_SERIAL_NAME,
+ .nr = OMAP_MAX_HSUART_PORTS,
+ .cons = OMAP_CONSOLE,
+};
+
+static int
+serial_omap_suspend(struct platform_device *pdev, pm_message_t state)
+{
+ struct uart_omap_port *up = platform_get_drvdata(pdev);
+
+ if (up)
+ uart_suspend_port(&serial_omap_reg, &up->port);
+ return 0;
+}
+
+static int serial_omap_resume(struct platform_device *dev)
+{
+ struct uart_omap_port *up = platform_get_drvdata(dev);
+
+ if (up)
+ uart_resume_port(&serial_omap_reg, &up->port);
+ return 0;
+}
+
+static void serial_omap_rx_timeout(unsigned long uart_no)
+{
+ struct uart_omap_port *up = ui[uart_no];
+ unsigned int curr_dma_pos, curr_transmitted_size;
+ int ret = 0;
+
+ curr_dma_pos = omap_get_dma_dst_pos(up->uart_dma.rx_dma_channel);
+ if ((curr_dma_pos == up->uart_dma.prev_rx_dma_pos) ||
+ (curr_dma_pos == 0)) {
+ if (jiffies_to_msecs(jiffies - up->port_activity) <
+ RX_TIMEOUT) {
+ mod_timer(&up->uart_dma.rx_timer, jiffies +
+ usecs_to_jiffies(up->uart_dma.rx_timeout));
+ } else {
+ serial_omap_stop_rxdma(up);
+ up->ier |= (UART_IER_RDI | UART_IER_RLSI);
+ serial_out(up, UART_IER, up->ier);
+ }
+ return;
+ }
+
+ curr_transmitted_size = curr_dma_pos -
+ up->uart_dma.prev_rx_dma_pos;
+ up->port.icount.rx += curr_transmitted_size;
+ tty_insert_flip_string(up->port.state->port.tty,
+ up->uart_dma.rx_buf +
+ (up->uart_dma.prev_rx_dma_pos -
+ up->uart_dma.rx_buf_dma_phys),
+ curr_transmitted_size);
+ tty_flip_buffer_push(up->port.state->port.tty);
+ up->uart_dma.prev_rx_dma_pos = curr_dma_pos;
+ if (up->uart_dma.rx_buf_size +
+ up->uart_dma.rx_buf_dma_phys == curr_dma_pos) {
+ ret = serial_omap_start_rxdma(up);
+ if (ret < 0) {
+ serial_omap_stop_rxdma(up);
+ up->ier |= (UART_IER_RDI | UART_IER_RLSI);
+ serial_out(up, UART_IER, up->ier);
+ }
+ } else {
+ mod_timer(&up->uart_dma.rx_timer, jiffies +
+ usecs_to_jiffies(up->uart_dma.rx_timeout));
+ }
+ up->port_activity = jiffies;
+}
+
+static void uart_rx_dma_callback(int lch, u16 ch_status, void *data)
+{
+ return;
+}
+
+static int serial_omap_start_rxdma(struct uart_omap_port *up)
+{
+ int ret = 0;
+
+ if (up->uart_dma.rx_dma_channel == -1) {
+ ret = omap_request_dma(up->uart_dma.uart_dma_rx,
+ "UART Rx DMA",
+ (void *)uart_rx_dma_callback, up,
+ &(up->uart_dma.rx_dma_channel));
+ if (ret < 0)
+ return ret;
+
+ omap_set_dma_src_params(up->uart_dma.rx_dma_channel, 0,
+ OMAP_DMA_AMODE_CONSTANT,
+ up->uart_dma.uart_base, 0, 0);
+ omap_set_dma_dest_params(up->uart_dma.rx_dma_channel, 0,
+ OMAP_DMA_AMODE_POST_INC,
+ up->uart_dma.rx_buf_dma_phys, 0, 0);
+ omap_set_dma_transfer_params(up->uart_dma.rx_dma_channel,
+ OMAP_DMA_DATA_TYPE_S8,
+ up->uart_dma.rx_buf_size, 1,
+ OMAP_DMA_SYNC_ELEMENT,
+ up->uart_dma.uart_dma_rx, 0);
+ }
+ up->uart_dma.prev_rx_dma_pos = up->uart_dma.rx_buf_dma_phys;
+ /* FIXME: Cache maintenance needed here? */
+ omap_start_dma(up->uart_dma.rx_dma_channel);
+ mod_timer(&up->uart_dma.rx_timer, jiffies +
+ usecs_to_jiffies(up->uart_dma.rx_timeout));
+ up->uart_dma.rx_dma_used = true;
+ return ret;
+}
+
+static void serial_omap_continue_tx(struct uart_omap_port *up)
+{
+ struct circ_buf *xmit = &up->port.state->xmit;
+ unsigned int start = up->uart_dma.tx_buf_dma_phys
+ + (xmit->tail & (UART_XMIT_SIZE - 1));
+
+ if (uart_circ_empty(xmit))
+ return;
+
+ up->uart_dma.tx_buf_size = uart_circ_chars_pending(xmit);
+ /*
+ * It is a circular buffer. See if the buffer has wounded back.
+ * If yes it will have to be transferred in two separate dma
+ * transfers
+ */
+ if (start + up->uart_dma.tx_buf_size >=
+ up->uart_dma.tx_buf_dma_phys + UART_XMIT_SIZE)
+ up->uart_dma.tx_buf_size =
+ (up->uart_dma.tx_buf_dma_phys + UART_XMIT_SIZE) - start;
+ omap_set_dma_dest_params(up->uart_dma.tx_dma_channel, 0,
+ OMAP_DMA_AMODE_CONSTANT,
+ up->uart_dma.uart_base, 0, 0);
+ omap_set_dma_src_params(up->uart_dma.tx_dma_channel, 0,
+ OMAP_DMA_AMODE_POST_INC, start, 0, 0);
+ omap_set_dma_transfer_params(up->uart_dma.tx_dma_channel,
+ OMAP_DMA_DATA_TYPE_S8,
+ up->uart_dma.tx_buf_size, 1,
+ OMAP_DMA_SYNC_ELEMENT,
+ up->uart_dma.uart_dma_tx, 0);
+ /* FIXME: Cache maintenance needed here? */
+ omap_start_dma(up->uart_dma.tx_dma_channel);
+}
+
+static void uart_tx_dma_callback(int lch, u16 ch_status, void *data)
+{
+ struct uart_omap_port *up = (struct uart_omap_port *)data;
+ struct circ_buf *xmit = &up->port.state->xmit;
+
+ xmit->tail = (xmit->tail + up->uart_dma.tx_buf_size) & \
+ (UART_XMIT_SIZE - 1);
+ up->port.icount.tx += up->uart_dma.tx_buf_size;
+
+ if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
+ uart_write_wakeup(&up->port);
+
+ if (uart_circ_empty(xmit)) {
+ spin_lock(&(up->uart_dma.tx_lock));
+ serial_omap_stop_tx(&up->port);
+ up->uart_dma.tx_dma_used = false;
+ spin_unlock(&(up->uart_dma.tx_lock));
+ } else {
+ omap_stop_dma(up->uart_dma.tx_dma_channel);
+ serial_omap_continue_tx(up);
+ }
+ up->port_activity = jiffies;
+ return;
+}
+
+static int serial_omap_probe(struct platform_device *pdev)
+{
+ struct uart_omap_port *up;
+ struct resource *mem, *irq, *dma_tx, *dma_rx;
+ struct omap_uart_port_info *omap_up_info = pdev->dev.platform_data;
+ int ret = -ENOSPC;
+
+ mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!mem) {
+ dev_err(&pdev->dev, "no mem resource?\n");
+ return -ENODEV;
+ }
+
+ irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+ if (!irq) {
+ dev_err(&pdev->dev, "no irq resource?\n");
+ return -ENODEV;
+ }
+
+ if (!request_mem_region(mem->start, (mem->end - mem->start) + 1,
+ pdev->dev.driver->name)) {
+ dev_err(&pdev->dev, "memory region already claimed\n");
+ return -EBUSY;
+ }
+
+ dma_rx = platform_get_resource_byname(pdev, IORESOURCE_DMA, "rx");
+ if (!dma_rx) {
+ ret = -EINVAL;
+ goto err;
+ }
+
+ dma_tx = platform_get_resource_byname(pdev, IORESOURCE_DMA, "tx");
+ if (!dma_tx) {
+ ret = -EINVAL;
+ goto err;
+ }
+
+ up = kzalloc(sizeof(*up), GFP_KERNEL);
+ if (up == NULL) {
+ ret = -ENOMEM;
+ goto do_release_region;
+ }
+ sprintf(up->name, "OMAP UART%d", pdev->id);
+ up->pdev = pdev;
+ up->port.dev = &pdev->dev;
+ up->port.type = PORT_OMAP;
+ up->port.iotype = UPIO_MEM;
+ up->port.irq = irq->start;
+
+ up->port.regshift = 2;
+ up->port.fifosize = 64;
+ up->port.ops = &serial_omap_pops;
+ up->port.line = pdev->id;
+
+ up->port.membase = omap_up_info->membase;
+ up->port.mapbase = omap_up_info->mapbase;
+ up->port.flags = omap_up_info->flags;
+ up->port.irqflags = omap_up_info->irqflags;
+ up->port.uartclk = omap_up_info->uartclk;
+ up->uart_dma.uart_base = mem->start;
+
+ if (omap_up_info->dma_enabled) {
+ up->uart_dma.uart_dma_tx = dma_tx->start;
+ up->uart_dma.uart_dma_rx = dma_rx->start;
+ up->use_dma = 1;
+ up->uart_dma.rx_buf_size = 4096;
+ up->uart_dma.rx_timeout = 2;
+ spin_lock_init(&(up->uart_dma.tx_lock));
+ spin_lock_init(&(up->uart_dma.rx_lock));
+ up->uart_dma.tx_dma_channel = OMAP_UART_DMA_CH_FREE;
+ up->uart_dma.rx_dma_channel = OMAP_UART_DMA_CH_FREE;
+ }
+
+ ui[pdev->id] = up;
+ serial_omap_add_console_port(up);
+
+ ret = uart_add_one_port(&serial_omap_reg, &up->port);
+ if (ret != 0)
+ goto do_release_region;
+
+ platform_set_drvdata(pdev, up);
+ return 0;
+err:
+ dev_err(&pdev->dev, "[UART%d]: failure [%s]: %d\n",
+ pdev->id, __func__, ret);
+do_release_region:
+ release_mem_region(mem->start, (mem->end - mem->start) + 1);
+ return ret;
+}
+
+static int serial_omap_remove(struct platform_device *dev)
+{
+ struct uart_omap_port *up = platform_get_drvdata(dev);
+
+ platform_set_drvdata(dev, NULL);
+ if (up) {
+ uart_remove_one_port(&serial_omap_reg, &up->port);
+ kfree(up);
+ }
+ return 0;
+}
+
+static struct platform_driver serial_omap_driver = {
+ .probe = serial_omap_probe,
+ .remove = serial_omap_remove,
+
+ .suspend = serial_omap_suspend,
+ .resume = serial_omap_resume,
+ .driver = {
+ .name = DRIVER_NAME,
+ },
+};
+
+static int __init serial_omap_init(void)
+{
+ int ret;
+
+ ret = uart_register_driver(&serial_omap_reg);
+ if (ret != 0)
+ return ret;
+ ret = platform_driver_register(&serial_omap_driver);
+ if (ret != 0)
+ uart_unregister_driver(&serial_omap_reg);
+ return ret;
+}
+
+static void __exit serial_omap_exit(void)
+{
+ platform_driver_unregister(&serial_omap_driver);
+ uart_unregister_driver(&serial_omap_reg);
+}
+
+module_init(serial_omap_init);
+module_exit(serial_omap_exit);
+
+MODULE_DESCRIPTION("OMAP High Speed UART driver");
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Texas Instruments Inc");
diff --git a/drivers/serial/serial_core.c b/drivers/serial/serial_core.c
index c4ea14670d44..9ffa5bee44ab 100644
--- a/drivers/serial/serial_core.c
+++ b/drivers/serial/serial_core.c
@@ -29,7 +29,6 @@
#include <linux/console.h>
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
-#include <linux/smp_lock.h>
#include <linux/device.h>
#include <linux/serial.h> /* for serial_state and serial_icounter_struct */
#include <linux/serial_core.h>
diff --git a/drivers/serial/sh-sci.h b/drivers/serial/sh-sci.h
index 9b52f77a9305..d2352ac437c5 100644
--- a/drivers/serial/sh-sci.h
+++ b/drivers/serial/sh-sci.h
@@ -140,7 +140,15 @@
# define SCSPTR0 0xffe00024 /* 16 bit SCIF */
# define SCSPTR1 0xffe10024 /* 16 bit SCIF */
# define SCIF_ORER 0x0001 /* Overrun error bit */
-# define SCSCR_INIT(port) 0x3a /* TIE=0,RIE=0,TE=1,RE=1,REIE=1 */
+
+#if defined(CONFIG_SH_SH2007)
+/* TIE=0,RIE=0,TE=1,RE=1,REIE=1,CKE1=0 */
+# define SCSCR_INIT(port) 0x38
+#else
+/* TIE=0,RIE=0,TE=1,RE=1,REIE=1,CKE1=1 */
+# define SCSCR_INIT(port) 0x3a
+#endif
+
#elif defined(CONFIG_CPU_SUBTYPE_SH7785) || \
defined(CONFIG_CPU_SUBTYPE_SH7786)
# define SCSPTR0 0xffea0024 /* 16 bit SCIF */
@@ -616,9 +624,10 @@ static inline int sci_rxd_in(struct uart_port *port)
* -- Mitch Davis - 15 Jul 2000
*/
-#if defined(CONFIG_CPU_SUBTYPE_SH7780) || \
- defined(CONFIG_CPU_SUBTYPE_SH7785) || \
- defined(CONFIG_CPU_SUBTYPE_SH7786)
+#if (defined(CONFIG_CPU_SUBTYPE_SH7780) || \
+ defined(CONFIG_CPU_SUBTYPE_SH7785) || \
+ defined(CONFIG_CPU_SUBTYPE_SH7786)) && \
+ !defined(CONFIG_SH_SH2007)
#define SCBRR_VALUE(bps, clk) ((clk+16*bps)/(16*bps)-1)
#elif defined(CONFIG_CPU_SUBTYPE_SH7705) || \
defined(CONFIG_CPU_SUBTYPE_SH7720) || \
diff --git a/drivers/sh/Kconfig b/drivers/sh/Kconfig
index a54de0b9b3df..f168a6159961 100644
--- a/drivers/sh/Kconfig
+++ b/drivers/sh/Kconfig
@@ -1,24 +1,5 @@
-config INTC_USERIMASK
- bool "Userspace interrupt masking support"
- depends on ARCH_SHMOBILE || (SUPERH && CPU_SH4A)
- help
- This enables support for hardware-assisted userspace hardirq
- masking.
+menu "SuperH / SH-Mobile Driver Options"
- SH-4A and newer interrupt blocks all support a special shadowed
- page with all non-masking registers obscured when mapped in to
- userspace. This is primarily for use by userspace device
- drivers that are using special priority levels.
+source "drivers/sh/intc/Kconfig"
- If in doubt, say N.
-
-config INTC_BALANCING
- bool "Hardware IRQ balancing support"
- depends on SMP && SUPERH && CPU_SUBTYPE_SH7786
- help
- This enables support for IRQ auto-distribution mode on SH-X3
- SMP parts. All of the balancing and CPU wakeup decisions are
- taken care of automatically by hardware for distributed
- vectors.
-
- If in doubt, say N.
+endmenu
diff --git a/drivers/sh/Makefile b/drivers/sh/Makefile
index 08fc653a825c..24e6cec0ae8d 100644
--- a/drivers/sh/Makefile
+++ b/drivers/sh/Makefile
@@ -1,10 +1,9 @@
#
# Makefile for the SuperH specific drivers.
#
-obj-y := clk.o intc.o
+obj-y := intc/
-obj-$(CONFIG_SUPERHYWAY) += superhyway/
+obj-$(CONFIG_HAVE_CLK) += clk/
obj-$(CONFIG_MAPLE) += maple/
-
+obj-$(CONFIG_SUPERHYWAY) += superhyway/
obj-$(CONFIG_GENERIC_GPIO) += pfc.o
-obj-$(CONFIG_SH_CLK_CPG) += clk-cpg.o
diff --git a/drivers/sh/clk/Makefile b/drivers/sh/clk/Makefile
new file mode 100644
index 000000000000..5d15ebfaa074
--- /dev/null
+++ b/drivers/sh/clk/Makefile
@@ -0,0 +1,3 @@
+obj-y := core.o
+
+obj-$(CONFIG_SH_CLK_CPG) += cpg.o
diff --git a/drivers/sh/clk.c b/drivers/sh/clk/core.c
index 5d84adac9ec4..3f5e387ed564 100644
--- a/drivers/sh/clk.c
+++ b/drivers/sh/clk/core.c
@@ -1,7 +1,7 @@
/*
- * drivers/sh/clk.c - SuperH clock framework
+ * SuperH clock framework
*
- * Copyright (C) 2005 - 2009 Paul Mundt
+ * Copyright (C) 2005 - 2010 Paul Mundt
*
* This clock framework is derived from the OMAP version by:
*
@@ -14,6 +14,8 @@
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*/
+#define pr_fmt(fmt) "clock: " fmt
+
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/module.h>
@@ -23,7 +25,7 @@
#include <linux/sysdev.h>
#include <linux/seq_file.h>
#include <linux/err.h>
-#include <linux/platform_device.h>
+#include <linux/io.h>
#include <linux/debugfs.h>
#include <linux/cpufreq.h>
#include <linux/clk.h>
@@ -43,6 +45,8 @@ void clk_rate_table_build(struct clk *clk,
unsigned long freq;
int i;
+ clk->nr_freqs = nr_freqs;
+
for (i = 0; i < nr_freqs; i++) {
div = 1;
mult = 1;
@@ -67,29 +71,39 @@ void clk_rate_table_build(struct clk *clk,
freq_table[i].frequency = CPUFREQ_TABLE_END;
}
-long clk_rate_table_round(struct clk *clk,
- struct cpufreq_frequency_table *freq_table,
- unsigned long rate)
+struct clk_rate_round_data;
+
+struct clk_rate_round_data {
+ unsigned long rate;
+ unsigned int min, max;
+ long (*func)(unsigned int, struct clk_rate_round_data *);
+ void *arg;
+};
+
+#define for_each_frequency(pos, r, freq) \
+ for (pos = r->min, freq = r->func(pos, r); \
+ pos <= r->max; pos++, freq = r->func(pos, r)) \
+ if (unlikely(freq == 0)) \
+ ; \
+ else
+
+static long clk_rate_round_helper(struct clk_rate_round_data *rounder)
{
unsigned long rate_error, rate_error_prev = ~0UL;
- unsigned long rate_best_fit = rate;
- unsigned long highest, lowest;
+ unsigned long highest, lowest, freq;
+ long rate_best_fit = -ENOENT;
int i;
- highest = lowest = 0;
-
- for (i = 0; freq_table[i].frequency != CPUFREQ_TABLE_END; i++) {
- unsigned long freq = freq_table[i].frequency;
-
- if (freq == CPUFREQ_ENTRY_INVALID)
- continue;
+ highest = 0;
+ lowest = ~0UL;
+ for_each_frequency(i, rounder, freq) {
if (freq > highest)
highest = freq;
if (freq < lowest)
lowest = freq;
- rate_error = abs(freq - rate);
+ rate_error = abs(freq - rounder->rate);
if (rate_error < rate_error_prev) {
rate_best_fit = freq;
rate_error_prev = rate_error;
@@ -99,14 +113,64 @@ long clk_rate_table_round(struct clk *clk,
break;
}
- if (rate >= highest)
+ if (rounder->rate >= highest)
rate_best_fit = highest;
- if (rate <= lowest)
+ if (rounder->rate <= lowest)
rate_best_fit = lowest;
return rate_best_fit;
}
+static long clk_rate_table_iter(unsigned int pos,
+ struct clk_rate_round_data *rounder)
+{
+ struct cpufreq_frequency_table *freq_table = rounder->arg;
+ unsigned long freq = freq_table[pos].frequency;
+
+ if (freq == CPUFREQ_ENTRY_INVALID)
+ freq = 0;
+
+ return freq;
+}
+
+long clk_rate_table_round(struct clk *clk,
+ struct cpufreq_frequency_table *freq_table,
+ unsigned long rate)
+{
+ struct clk_rate_round_data table_round = {
+ .min = 0,
+ .max = clk->nr_freqs - 1,
+ .func = clk_rate_table_iter,
+ .arg = freq_table,
+ .rate = rate,
+ };
+
+ if (clk->nr_freqs < 1)
+ return -ENOSYS;
+
+ return clk_rate_round_helper(&table_round);
+}
+
+static long clk_rate_div_range_iter(unsigned int pos,
+ struct clk_rate_round_data *rounder)
+{
+ return clk_get_rate(rounder->arg) / pos;
+}
+
+long clk_rate_div_range_round(struct clk *clk, unsigned int div_min,
+ unsigned int div_max, unsigned long rate)
+{
+ struct clk_rate_round_data div_range_round = {
+ .min = div_min,
+ .max = div_max,
+ .func = clk_rate_div_range_iter,
+ .arg = clk_get_parent(clk),
+ .rate = rate,
+ };
+
+ return clk_rate_round_helper(&div_range_round);
+}
+
int clk_rate_table_find(struct clk *clk,
struct cpufreq_frequency_table *freq_table,
unsigned long rate)
@@ -160,8 +224,8 @@ void propagate_rate(struct clk *tclk)
static void __clk_disable(struct clk *clk)
{
- if (WARN(!clk->usecount, "Trying to disable clock %s with 0 usecount\n",
- clk->name))
+ if (WARN(!clk->usecount, "Trying to disable clock %p with 0 usecount\n",
+ clk))
return;
if (!(--clk->usecount)) {
@@ -248,8 +312,88 @@ void recalculate_root_clocks(void)
}
}
+static struct clk_mapping dummy_mapping;
+
+static struct clk *lookup_root_clock(struct clk *clk)
+{
+ while (clk->parent)
+ clk = clk->parent;
+
+ return clk;
+}
+
+static int clk_establish_mapping(struct clk *clk)
+{
+ struct clk_mapping *mapping = clk->mapping;
+
+ /*
+ * Propagate mappings.
+ */
+ if (!mapping) {
+ struct clk *clkp;
+
+ /*
+ * dummy mapping for root clocks with no specified ranges
+ */
+ if (!clk->parent) {
+ clk->mapping = &dummy_mapping;
+ return 0;
+ }
+
+ /*
+ * If we're on a child clock and it provides no mapping of its
+ * own, inherit the mapping from its root clock.
+ */
+ clkp = lookup_root_clock(clk);
+ mapping = clkp->mapping;
+ BUG_ON(!mapping);
+ }
+
+ /*
+ * Establish initial mapping.
+ */
+ if (!mapping->base && mapping->phys) {
+ kref_init(&mapping->ref);
+
+ mapping->base = ioremap_nocache(mapping->phys, mapping->len);
+ if (unlikely(!mapping->base))
+ return -ENXIO;
+ } else if (mapping->base) {
+ /*
+ * Bump the refcount for an existing mapping
+ */
+ kref_get(&mapping->ref);
+ }
+
+ clk->mapping = mapping;
+ return 0;
+}
+
+static void clk_destroy_mapping(struct kref *kref)
+{
+ struct clk_mapping *mapping;
+
+ mapping = container_of(kref, struct clk_mapping, ref);
+
+ iounmap(mapping->base);
+}
+
+static void clk_teardown_mapping(struct clk *clk)
+{
+ struct clk_mapping *mapping = clk->mapping;
+
+ /* Nothing to do */
+ if (mapping == &dummy_mapping)
+ return;
+
+ kref_put(&mapping->ref, clk_destroy_mapping);
+ clk->mapping = NULL;
+}
+
int clk_register(struct clk *clk)
{
+ int ret;
+
if (clk == NULL || IS_ERR(clk))
return -EINVAL;
@@ -264,17 +408,26 @@ int clk_register(struct clk *clk)
INIT_LIST_HEAD(&clk->children);
clk->usecount = 0;
+ ret = clk_establish_mapping(clk);
+ if (unlikely(ret))
+ goto out_unlock;
+
if (clk->parent)
list_add(&clk->sibling, &clk->parent->children);
else
list_add(&clk->sibling, &root_clks);
list_add(&clk->node, &clock_list);
+
+#ifdef CONFIG_SH_CLK_CPG_LEGACY
if (clk->ops && clk->ops->init)
clk->ops->init(clk);
+#endif
+
+out_unlock:
mutex_unlock(&clock_list_sem);
- return 0;
+ return ret;
}
EXPORT_SYMBOL_GPL(clk_register);
@@ -283,6 +436,7 @@ void clk_unregister(struct clk *clk)
mutex_lock(&clock_list_sem);
list_del(&clk->sibling);
list_del(&clk->node);
+ clk_teardown_mapping(clk);
mutex_unlock(&clock_list_sem);
}
EXPORT_SYMBOL_GPL(clk_unregister);
@@ -304,19 +458,13 @@ EXPORT_SYMBOL_GPL(clk_get_rate);
int clk_set_rate(struct clk *clk, unsigned long rate)
{
- return clk_set_rate_ex(clk, rate, 0);
-}
-EXPORT_SYMBOL_GPL(clk_set_rate);
-
-int clk_set_rate_ex(struct clk *clk, unsigned long rate, int algo_id)
-{
int ret = -EOPNOTSUPP;
unsigned long flags;
spin_lock_irqsave(&clock_lock, flags);
if (likely(clk->ops && clk->ops->set_rate)) {
- ret = clk->ops->set_rate(clk, rate, algo_id);
+ ret = clk->ops->set_rate(clk, rate);
if (ret != 0)
goto out_unlock;
} else {
@@ -334,7 +482,7 @@ out_unlock:
return ret;
}
-EXPORT_SYMBOL_GPL(clk_set_rate_ex);
+EXPORT_SYMBOL_GPL(clk_set_rate);
int clk_set_parent(struct clk *clk, struct clk *parent)
{
@@ -354,10 +502,10 @@ int clk_set_parent(struct clk *clk, struct clk *parent)
ret = clk_reparent(clk, parent);
if (ret == 0) {
- pr_debug("clock: set parent of %s to %s (new rate %ld)\n",
- clk->name, clk->parent->name, clk->rate);
if (clk->ops->recalc)
clk->rate = clk->ops->recalc(clk);
+ pr_debug("set parent of %p to %p (new rate %ld)\n",
+ clk, clk->parent, clk->rate);
propagate_rate(clk);
}
} else
@@ -390,6 +538,98 @@ long clk_round_rate(struct clk *clk, unsigned long rate)
}
EXPORT_SYMBOL_GPL(clk_round_rate);
+long clk_round_parent(struct clk *clk, unsigned long target,
+ unsigned long *best_freq, unsigned long *parent_freq,
+ unsigned int div_min, unsigned int div_max)
+{
+ struct cpufreq_frequency_table *freq, *best = NULL;
+ unsigned long error = ULONG_MAX, freq_high, freq_low, div;
+ struct clk *parent = clk_get_parent(clk);
+
+ if (!parent) {
+ *parent_freq = 0;
+ *best_freq = clk_round_rate(clk, target);
+ return abs(target - *best_freq);
+ }
+
+ for (freq = parent->freq_table; freq->frequency != CPUFREQ_TABLE_END;
+ freq++) {
+ if (freq->frequency == CPUFREQ_ENTRY_INVALID)
+ continue;
+
+ if (unlikely(freq->frequency / target <= div_min - 1)) {
+ unsigned long freq_max;
+
+ freq_max = (freq->frequency + div_min / 2) / div_min;
+ if (error > target - freq_max) {
+ error = target - freq_max;
+ best = freq;
+ if (best_freq)
+ *best_freq = freq_max;
+ }
+
+ pr_debug("too low freq %u, error %lu\n", freq->frequency,
+ target - freq_max);
+
+ if (!error)
+ break;
+
+ continue;
+ }
+
+ if (unlikely(freq->frequency / target >= div_max)) {
+ unsigned long freq_min;
+
+ freq_min = (freq->frequency + div_max / 2) / div_max;
+ if (error > freq_min - target) {
+ error = freq_min - target;
+ best = freq;
+ if (best_freq)
+ *best_freq = freq_min;
+ }
+
+ pr_debug("too high freq %u, error %lu\n", freq->frequency,
+ freq_min - target);
+
+ if (!error)
+ break;
+
+ continue;
+ }
+
+ div = freq->frequency / target;
+ freq_high = freq->frequency / div;
+ freq_low = freq->frequency / (div + 1);
+
+ if (freq_high - target < error) {
+ error = freq_high - target;
+ best = freq;
+ if (best_freq)
+ *best_freq = freq_high;
+ }
+
+ if (target - freq_low < error) {
+ error = target - freq_low;
+ best = freq;
+ if (best_freq)
+ *best_freq = freq_low;
+ }
+
+ pr_debug("%u / %lu = %lu, / %lu = %lu, best %lu, parent %u\n",
+ freq->frequency, div, freq_high, div + 1, freq_low,
+ *best_freq, best->frequency);
+
+ if (!error)
+ break;
+ }
+
+ if (parent_freq)
+ *parent_freq = best->frequency;
+
+ return error;
+}
+EXPORT_SYMBOL_GPL(clk_round_parent);
+
#ifdef CONFIG_PM
static int clks_sysdev_suspend(struct sys_device *dev, pm_message_t state)
{
@@ -410,8 +650,7 @@ static int clks_sysdev_suspend(struct sys_device *dev, pm_message_t state)
clkp->ops->set_parent(clkp,
clkp->parent);
if (likely(clkp->ops->set_rate))
- clkp->ops->set_rate(clkp,
- rate, NO_CHANGE);
+ clkp->ops->set_rate(clkp, rate);
else if (likely(clkp->ops->recalc))
clkp->rate = clkp->ops->recalc(clkp);
}
@@ -469,9 +708,7 @@ static int clk_debugfs_register_one(struct clk *c)
char s[255];
char *p = s;
- p += sprintf(p, "%s", c->name);
- if (c->id >= 0)
- sprintf(p, ":%d", c->id);
+ p += sprintf(p, "%p", c);
d = debugfs_create_dir(s, pa ? pa->dentry : clk_debugfs_root);
if (!d)
return -ENOMEM;
@@ -513,7 +750,7 @@ static int clk_debugfs_register(struct clk *c)
return err;
}
- if (!c->dentry && c->name) {
+ if (!c->dentry) {
err = clk_debugfs_register_one(c);
if (err)
return err;
diff --git a/drivers/sh/clk-cpg.c b/drivers/sh/clk/cpg.c
index 8c024b984ed8..6172335ae323 100644
--- a/drivers/sh/clk-cpg.c
+++ b/drivers/sh/clk/cpg.c
@@ -1,3 +1,12 @@
+/*
+ * Helper routines for SuperH Clock Pulse Generator blocks (CPG).
+ *
+ * Copyright (C) 2010 Magnus Damm
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
#include <linux/clk.h>
#include <linux/compiler.h>
#include <linux/slab.h>
@@ -101,8 +110,7 @@ static int sh_clk_div6_set_parent(struct clk *clk, struct clk *parent)
return 0;
}
-static int sh_clk_div6_set_rate(struct clk *clk,
- unsigned long rate, int algo_id)
+static int sh_clk_div6_set_rate(struct clk *clk, unsigned long rate)
{
unsigned long value;
int idx;
@@ -123,7 +131,7 @@ static int sh_clk_div6_enable(struct clk *clk)
unsigned long value;
int ret;
- ret = sh_clk_div6_set_rate(clk, clk->rate, 0);
+ ret = sh_clk_div6_set_rate(clk, clk->rate);
if (ret == 0) {
value = __raw_readl(clk->enable_reg);
value &= ~0x100; /* clear stop bit to enable clock */
@@ -180,7 +188,6 @@ static int __init sh_clk_div6_register_ops(struct clk *clks, int nr,
clkp = clks + k;
clkp->ops = ops;
- clkp->id = -1;
clkp->freq_table = freq_table + (k * freq_table_size);
clkp->freq_table[nr_divs].frequency = CPUFREQ_TABLE_END;
@@ -245,7 +252,7 @@ static int sh_clk_div4_set_parent(struct clk *clk, struct clk *parent)
return 0;
}
-static int sh_clk_div4_set_rate(struct clk *clk, unsigned long rate, int algo_id)
+static int sh_clk_div4_set_rate(struct clk *clk, unsigned long rate)
{
struct clk_div4_table *d4t = clk->priv;
unsigned long value;
@@ -319,7 +326,6 @@ static int __init sh_clk_div4_register_ops(struct clk *clks, int nr,
clkp = clks + k;
clkp->ops = ops;
- clkp->id = -1;
clkp->priv = table;
clkp->freq_table = freq_table + (k * freq_table_size);
diff --git a/drivers/sh/intc.c b/drivers/sh/intc.c
deleted file mode 100644
index e91a23e5ffd8..000000000000
--- a/drivers/sh/intc.c
+++ /dev/null
@@ -1,1390 +0,0 @@
-/*
- * Shared interrupt handling code for IPR and INTC2 types of IRQs.
- *
- * Copyright (C) 2007, 2008 Magnus Damm
- * Copyright (C) 2009, 2010 Paul Mundt
- *
- * Based on intc2.c and ipr.c
- *
- * Copyright (C) 1999 Niibe Yutaka & Takeshi Yaegashi
- * Copyright (C) 2000 Kazumoto Kojima
- * Copyright (C) 2001 David J. Mckay (david.mckay@st.com)
- * Copyright (C) 2003 Takashi Kusuda <kusuda-takashi@hitachi-ul.co.jp>
- * Copyright (C) 2005, 2006 Paul Mundt
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- */
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <linux/init.h>
-#include <linux/irq.h>
-#include <linux/module.h>
-#include <linux/io.h>
-#include <linux/slab.h>
-#include <linux/interrupt.h>
-#include <linux/sh_intc.h>
-#include <linux/sysdev.h>
-#include <linux/list.h>
-#include <linux/topology.h>
-#include <linux/bitmap.h>
-#include <linux/cpumask.h>
-#include <asm/sizes.h>
-
-#define _INTC_MK(fn, mode, addr_e, addr_d, width, shift) \
- ((shift) | ((width) << 5) | ((fn) << 9) | ((mode) << 13) | \
- ((addr_e) << 16) | ((addr_d << 24)))
-
-#define _INTC_SHIFT(h) (h & 0x1f)
-#define _INTC_WIDTH(h) ((h >> 5) & 0xf)
-#define _INTC_FN(h) ((h >> 9) & 0xf)
-#define _INTC_MODE(h) ((h >> 13) & 0x7)
-#define _INTC_ADDR_E(h) ((h >> 16) & 0xff)
-#define _INTC_ADDR_D(h) ((h >> 24) & 0xff)
-
-struct intc_handle_int {
- unsigned int irq;
- unsigned long handle;
-};
-
-struct intc_window {
- phys_addr_t phys;
- void __iomem *virt;
- unsigned long size;
-};
-
-struct intc_desc_int {
- struct list_head list;
- struct sys_device sysdev;
- pm_message_t state;
- unsigned long *reg;
-#ifdef CONFIG_SMP
- unsigned long *smp;
-#endif
- unsigned int nr_reg;
- struct intc_handle_int *prio;
- unsigned int nr_prio;
- struct intc_handle_int *sense;
- unsigned int nr_sense;
- struct intc_window *window;
- unsigned int nr_windows;
- struct irq_chip chip;
-};
-
-static LIST_HEAD(intc_list);
-
-/*
- * The intc_irq_map provides a global map of bound IRQ vectors for a
- * given platform. Allocation of IRQs are either static through the CPU
- * vector map, or dynamic in the case of board mux vectors or MSI.
- *
- * As this is a central point for all IRQ controllers on the system,
- * each of the available sources are mapped out here. This combined with
- * sparseirq makes it quite trivial to keep the vector map tightly packed
- * when dynamically creating IRQs, as well as tying in to otherwise
- * unused irq_desc positions in the sparse array.
- */
-static DECLARE_BITMAP(intc_irq_map, NR_IRQS);
-static DEFINE_SPINLOCK(vector_lock);
-
-#ifdef CONFIG_SMP
-#define IS_SMP(x) x.smp
-#define INTC_REG(d, x, c) (d->reg[(x)] + ((d->smp[(x)] & 0xff) * c))
-#define SMP_NR(d, x) ((d->smp[(x)] >> 8) ? (d->smp[(x)] >> 8) : 1)
-#else
-#define IS_SMP(x) 0
-#define INTC_REG(d, x, c) (d->reg[(x)])
-#define SMP_NR(d, x) 1
-#endif
-
-static unsigned int intc_prio_level[NR_IRQS]; /* for now */
-static unsigned int default_prio_level = 2; /* 2 - 16 */
-static unsigned long ack_handle[NR_IRQS];
-#ifdef CONFIG_INTC_BALANCING
-static unsigned long dist_handle[NR_IRQS];
-#endif
-
-static inline struct intc_desc_int *get_intc_desc(unsigned int irq)
-{
- struct irq_chip *chip = get_irq_chip(irq);
- return container_of(chip, struct intc_desc_int, chip);
-}
-
-static unsigned long intc_phys_to_virt(struct intc_desc_int *d,
- unsigned long address)
-{
- struct intc_window *window;
- int k;
-
- /* scan through physical windows and convert address */
- for (k = 0; k < d->nr_windows; k++) {
- window = d->window + k;
-
- if (address < window->phys)
- continue;
-
- if (address >= (window->phys + window->size))
- continue;
-
- address -= window->phys;
- address += (unsigned long)window->virt;
-
- return address;
- }
-
- /* no windows defined, register must be 1:1 mapped virt:phys */
- return address;
-}
-
-static unsigned int intc_get_reg(struct intc_desc_int *d, unsigned long address)
-{
- unsigned int k;
-
- address = intc_phys_to_virt(d, address);
-
- for (k = 0; k < d->nr_reg; k++) {
- if (d->reg[k] == address)
- return k;
- }
-
- BUG();
- return 0;
-}
-
-static inline unsigned int set_field(unsigned int value,
- unsigned int field_value,
- unsigned int handle)
-{
- unsigned int width = _INTC_WIDTH(handle);
- unsigned int shift = _INTC_SHIFT(handle);
-
- value &= ~(((1 << width) - 1) << shift);
- value |= field_value << shift;
- return value;
-}
-
-static void write_8(unsigned long addr, unsigned long h, unsigned long data)
-{
- __raw_writeb(set_field(0, data, h), addr);
- (void)__raw_readb(addr); /* Defeat write posting */
-}
-
-static void write_16(unsigned long addr, unsigned long h, unsigned long data)
-{
- __raw_writew(set_field(0, data, h), addr);
- (void)__raw_readw(addr); /* Defeat write posting */
-}
-
-static void write_32(unsigned long addr, unsigned long h, unsigned long data)
-{
- __raw_writel(set_field(0, data, h), addr);
- (void)__raw_readl(addr); /* Defeat write posting */
-}
-
-static void modify_8(unsigned long addr, unsigned long h, unsigned long data)
-{
- unsigned long flags;
- local_irq_save(flags);
- __raw_writeb(set_field(__raw_readb(addr), data, h), addr);
- (void)__raw_readb(addr); /* Defeat write posting */
- local_irq_restore(flags);
-}
-
-static void modify_16(unsigned long addr, unsigned long h, unsigned long data)
-{
- unsigned long flags;
- local_irq_save(flags);
- __raw_writew(set_field(__raw_readw(addr), data, h), addr);
- (void)__raw_readw(addr); /* Defeat write posting */
- local_irq_restore(flags);
-}
-
-static void modify_32(unsigned long addr, unsigned long h, unsigned long data)
-{
- unsigned long flags;
- local_irq_save(flags);
- __raw_writel(set_field(__raw_readl(addr), data, h), addr);
- (void)__raw_readl(addr); /* Defeat write posting */
- local_irq_restore(flags);
-}
-
-enum { REG_FN_ERR = 0, REG_FN_WRITE_BASE = 1, REG_FN_MODIFY_BASE = 5 };
-
-static void (*intc_reg_fns[])(unsigned long addr,
- unsigned long h,
- unsigned long data) = {
- [REG_FN_WRITE_BASE + 0] = write_8,
- [REG_FN_WRITE_BASE + 1] = write_16,
- [REG_FN_WRITE_BASE + 3] = write_32,
- [REG_FN_MODIFY_BASE + 0] = modify_8,
- [REG_FN_MODIFY_BASE + 1] = modify_16,
- [REG_FN_MODIFY_BASE + 3] = modify_32,
-};
-
-enum { MODE_ENABLE_REG = 0, /* Bit(s) set -> interrupt enabled */
- MODE_MASK_REG, /* Bit(s) set -> interrupt disabled */
- MODE_DUAL_REG, /* Two registers, set bit to enable / disable */
- MODE_PRIO_REG, /* Priority value written to enable interrupt */
- MODE_PCLR_REG, /* Above plus all bits set to disable interrupt */
-};
-
-static void intc_mode_field(unsigned long addr,
- unsigned long handle,
- void (*fn)(unsigned long,
- unsigned long,
- unsigned long),
- unsigned int irq)
-{
- fn(addr, handle, ((1 << _INTC_WIDTH(handle)) - 1));
-}
-
-static void intc_mode_zero(unsigned long addr,
- unsigned long handle,
- void (*fn)(unsigned long,
- unsigned long,
- unsigned long),
- unsigned int irq)
-{
- fn(addr, handle, 0);
-}
-
-static void intc_mode_prio(unsigned long addr,
- unsigned long handle,
- void (*fn)(unsigned long,
- unsigned long,
- unsigned long),
- unsigned int irq)
-{
- fn(addr, handle, intc_prio_level[irq]);
-}
-
-static void (*intc_enable_fns[])(unsigned long addr,
- unsigned long handle,
- void (*fn)(unsigned long,
- unsigned long,
- unsigned long),
- unsigned int irq) = {
- [MODE_ENABLE_REG] = intc_mode_field,
- [MODE_MASK_REG] = intc_mode_zero,
- [MODE_DUAL_REG] = intc_mode_field,
- [MODE_PRIO_REG] = intc_mode_prio,
- [MODE_PCLR_REG] = intc_mode_prio,
-};
-
-static void (*intc_disable_fns[])(unsigned long addr,
- unsigned long handle,
- void (*fn)(unsigned long,
- unsigned long,
- unsigned long),
- unsigned int irq) = {
- [MODE_ENABLE_REG] = intc_mode_zero,
- [MODE_MASK_REG] = intc_mode_field,
- [MODE_DUAL_REG] = intc_mode_field,
- [MODE_PRIO_REG] = intc_mode_zero,
- [MODE_PCLR_REG] = intc_mode_field,
-};
-
-#ifdef CONFIG_INTC_BALANCING
-static inline void intc_balancing_enable(unsigned int irq)
-{
- struct intc_desc_int *d = get_intc_desc(irq);
- unsigned long handle = dist_handle[irq];
- unsigned long addr;
-
- if (irq_balancing_disabled(irq) || !handle)
- return;
-
- addr = INTC_REG(d, _INTC_ADDR_D(handle), 0);
- intc_reg_fns[_INTC_FN(handle)](addr, handle, 1);
-}
-
-static inline void intc_balancing_disable(unsigned int irq)
-{
- struct intc_desc_int *d = get_intc_desc(irq);
- unsigned long handle = dist_handle[irq];
- unsigned long addr;
-
- if (irq_balancing_disabled(irq) || !handle)
- return;
-
- addr = INTC_REG(d, _INTC_ADDR_D(handle), 0);
- intc_reg_fns[_INTC_FN(handle)](addr, handle, 0);
-}
-
-static unsigned int intc_dist_data(struct intc_desc *desc,
- struct intc_desc_int *d,
- intc_enum enum_id)
-{
- struct intc_mask_reg *mr = desc->hw.mask_regs;
- unsigned int i, j, fn, mode;
- unsigned long reg_e, reg_d;
-
- for (i = 0; mr && enum_id && i < desc->hw.nr_mask_regs; i++) {
- mr = desc->hw.mask_regs + i;
-
- /*
- * Skip this entry if there's no auto-distribution
- * register associated with it.
- */
- if (!mr->dist_reg)
- continue;
-
- for (j = 0; j < ARRAY_SIZE(mr->enum_ids); j++) {
- if (mr->enum_ids[j] != enum_id)
- continue;
-
- fn = REG_FN_MODIFY_BASE;
- mode = MODE_ENABLE_REG;
- reg_e = mr->dist_reg;
- reg_d = mr->dist_reg;
-
- fn += (mr->reg_width >> 3) - 1;
- return _INTC_MK(fn, mode,
- intc_get_reg(d, reg_e),
- intc_get_reg(d, reg_d),
- 1,
- (mr->reg_width - 1) - j);
- }
- }
-
- /*
- * It's possible we've gotten here with no distribution options
- * available for the IRQ in question, so we just skip over those.
- */
- return 0;
-}
-#else
-static inline void intc_balancing_enable(unsigned int irq)
-{
-}
-
-static inline void intc_balancing_disable(unsigned int irq)
-{
-}
-#endif
-
-static inline void _intc_enable(unsigned int irq, unsigned long handle)
-{
- struct intc_desc_int *d = get_intc_desc(irq);
- unsigned long addr;
- unsigned int cpu;
-
- for (cpu = 0; cpu < SMP_NR(d, _INTC_ADDR_E(handle)); cpu++) {
-#ifdef CONFIG_SMP
- if (!cpumask_test_cpu(cpu, irq_to_desc(irq)->affinity))
- continue;
-#endif
- addr = INTC_REG(d, _INTC_ADDR_E(handle), cpu);
- intc_enable_fns[_INTC_MODE(handle)](addr, handle, intc_reg_fns\
- [_INTC_FN(handle)], irq);
- }
-
- intc_balancing_enable(irq);
-}
-
-static void intc_enable(unsigned int irq)
-{
- _intc_enable(irq, (unsigned long)get_irq_chip_data(irq));
-}
-
-static void intc_disable(unsigned int irq)
-{
- struct intc_desc_int *d = get_intc_desc(irq);
- unsigned long handle = (unsigned long)get_irq_chip_data(irq);
- unsigned long addr;
- unsigned int cpu;
-
- intc_balancing_disable(irq);
-
- for (cpu = 0; cpu < SMP_NR(d, _INTC_ADDR_D(handle)); cpu++) {
-#ifdef CONFIG_SMP
- if (!cpumask_test_cpu(cpu, irq_to_desc(irq)->affinity))
- continue;
-#endif
- addr = INTC_REG(d, _INTC_ADDR_D(handle), cpu);
- intc_disable_fns[_INTC_MODE(handle)](addr, handle,intc_reg_fns\
- [_INTC_FN(handle)], irq);
- }
-}
-
-static void (*intc_enable_noprio_fns[])(unsigned long addr,
- unsigned long handle,
- void (*fn)(unsigned long,
- unsigned long,
- unsigned long),
- unsigned int irq) = {
- [MODE_ENABLE_REG] = intc_mode_field,
- [MODE_MASK_REG] = intc_mode_zero,
- [MODE_DUAL_REG] = intc_mode_field,
- [MODE_PRIO_REG] = intc_mode_field,
- [MODE_PCLR_REG] = intc_mode_field,
-};
-
-static void intc_enable_disable(struct intc_desc_int *d,
- unsigned long handle, int do_enable)
-{
- unsigned long addr;
- unsigned int cpu;
- void (*fn)(unsigned long, unsigned long,
- void (*)(unsigned long, unsigned long, unsigned long),
- unsigned int);
-
- if (do_enable) {
- for (cpu = 0; cpu < SMP_NR(d, _INTC_ADDR_E(handle)); cpu++) {
- addr = INTC_REG(d, _INTC_ADDR_E(handle), cpu);
- fn = intc_enable_noprio_fns[_INTC_MODE(handle)];
- fn(addr, handle, intc_reg_fns[_INTC_FN(handle)], 0);
- }
- } else {
- for (cpu = 0; cpu < SMP_NR(d, _INTC_ADDR_D(handle)); cpu++) {
- addr = INTC_REG(d, _INTC_ADDR_D(handle), cpu);
- fn = intc_disable_fns[_INTC_MODE(handle)];
- fn(addr, handle, intc_reg_fns[_INTC_FN(handle)], 0);
- }
- }
-}
-
-static int intc_set_wake(unsigned int irq, unsigned int on)
-{
- return 0; /* allow wakeup, but setup hardware in intc_suspend() */
-}
-
-#ifdef CONFIG_SMP
-/*
- * This is held with the irq desc lock held, so we don't require any
- * additional locking here at the intc desc level. The affinity mask is
- * later tested in the enable/disable paths.
- */
-static int intc_set_affinity(unsigned int irq, const struct cpumask *cpumask)
-{
- if (!cpumask_intersects(cpumask, cpu_online_mask))
- return -1;
-
- cpumask_copy(irq_to_desc(irq)->affinity, cpumask);
-
- return 0;
-}
-#endif
-
-static void intc_mask_ack(unsigned int irq)
-{
- struct intc_desc_int *d = get_intc_desc(irq);
- unsigned long handle = ack_handle[irq];
- unsigned long addr;
-
- intc_disable(irq);
-
- /* read register and write zero only to the associated bit */
- if (handle) {
- addr = INTC_REG(d, _INTC_ADDR_D(handle), 0);
- switch (_INTC_FN(handle)) {
- case REG_FN_MODIFY_BASE + 0: /* 8bit */
- __raw_readb(addr);
- __raw_writeb(0xff ^ set_field(0, 1, handle), addr);
- break;
- case REG_FN_MODIFY_BASE + 1: /* 16bit */
- __raw_readw(addr);
- __raw_writew(0xffff ^ set_field(0, 1, handle), addr);
- break;
- case REG_FN_MODIFY_BASE + 3: /* 32bit */
- __raw_readl(addr);
- __raw_writel(0xffffffff ^ set_field(0, 1, handle), addr);
- break;
- default:
- BUG();
- break;
- }
- }
-}
-
-static struct intc_handle_int *intc_find_irq(struct intc_handle_int *hp,
- unsigned int nr_hp,
- unsigned int irq)
-{
- int i;
-
- /*
- * this doesn't scale well, but...
- *
- * this function should only be used for cerain uncommon
- * operations such as intc_set_priority() and intc_set_sense()
- * and in those rare cases performance doesn't matter that much.
- * keeping the memory footprint low is more important.
- *
- * one rather simple way to speed this up and still keep the
- * memory footprint down is to make sure the array is sorted
- * and then perform a bisect to lookup the irq.
- */
- for (i = 0; i < nr_hp; i++) {
- if ((hp + i)->irq != irq)
- continue;
-
- return hp + i;
- }
-
- return NULL;
-}
-
-int intc_set_priority(unsigned int irq, unsigned int prio)
-{
- struct intc_desc_int *d = get_intc_desc(irq);
- struct intc_handle_int *ihp;
-
- if (!intc_prio_level[irq] || prio <= 1)
- return -EINVAL;
-
- ihp = intc_find_irq(d->prio, d->nr_prio, irq);
- if (ihp) {
- if (prio >= (1 << _INTC_WIDTH(ihp->handle)))
- return -EINVAL;
-
- intc_prio_level[irq] = prio;
-
- /*
- * only set secondary masking method directly
- * primary masking method is using intc_prio_level[irq]
- * priority level will be set during next enable()
- */
- if (_INTC_FN(ihp->handle) != REG_FN_ERR)
- _intc_enable(irq, ihp->handle);
- }
- return 0;
-}
-
-#define VALID(x) (x | 0x80)
-
-static unsigned char intc_irq_sense_table[IRQ_TYPE_SENSE_MASK + 1] = {
- [IRQ_TYPE_EDGE_FALLING] = VALID(0),
- [IRQ_TYPE_EDGE_RISING] = VALID(1),
- [IRQ_TYPE_LEVEL_LOW] = VALID(2),
- /* SH7706, SH7707 and SH7709 do not support high level triggered */
-#if !defined(CONFIG_CPU_SUBTYPE_SH7706) && \
- !defined(CONFIG_CPU_SUBTYPE_SH7707) && \
- !defined(CONFIG_CPU_SUBTYPE_SH7709)
- [IRQ_TYPE_LEVEL_HIGH] = VALID(3),
-#endif
-};
-
-static int intc_set_sense(unsigned int irq, unsigned int type)
-{
- struct intc_desc_int *d = get_intc_desc(irq);
- unsigned char value = intc_irq_sense_table[type & IRQ_TYPE_SENSE_MASK];
- struct intc_handle_int *ihp;
- unsigned long addr;
-
- if (!value)
- return -EINVAL;
-
- ihp = intc_find_irq(d->sense, d->nr_sense, irq);
- if (ihp) {
- addr = INTC_REG(d, _INTC_ADDR_E(ihp->handle), 0);
- intc_reg_fns[_INTC_FN(ihp->handle)](addr, ihp->handle, value);
- }
- return 0;
-}
-
-static intc_enum __init intc_grp_id(struct intc_desc *desc,
- intc_enum enum_id)
-{
- struct intc_group *g = desc->hw.groups;
- unsigned int i, j;
-
- for (i = 0; g && enum_id && i < desc->hw.nr_groups; i++) {
- g = desc->hw.groups + i;
-
- for (j = 0; g->enum_ids[j]; j++) {
- if (g->enum_ids[j] != enum_id)
- continue;
-
- return g->enum_id;
- }
- }
-
- return 0;
-}
-
-static unsigned int __init _intc_mask_data(struct intc_desc *desc,
- struct intc_desc_int *d,
- intc_enum enum_id,
- unsigned int *reg_idx,
- unsigned int *fld_idx)
-{
- struct intc_mask_reg *mr = desc->hw.mask_regs;
- unsigned int fn, mode;
- unsigned long reg_e, reg_d;
-
- while (mr && enum_id && *reg_idx < desc->hw.nr_mask_regs) {
- mr = desc->hw.mask_regs + *reg_idx;
-
- for (; *fld_idx < ARRAY_SIZE(mr->enum_ids); (*fld_idx)++) {
- if (mr->enum_ids[*fld_idx] != enum_id)
- continue;
-
- if (mr->set_reg && mr->clr_reg) {
- fn = REG_FN_WRITE_BASE;
- mode = MODE_DUAL_REG;
- reg_e = mr->clr_reg;
- reg_d = mr->set_reg;
- } else {
- fn = REG_FN_MODIFY_BASE;
- if (mr->set_reg) {
- mode = MODE_ENABLE_REG;
- reg_e = mr->set_reg;
- reg_d = mr->set_reg;
- } else {
- mode = MODE_MASK_REG;
- reg_e = mr->clr_reg;
- reg_d = mr->clr_reg;
- }
- }
-
- fn += (mr->reg_width >> 3) - 1;
- return _INTC_MK(fn, mode,
- intc_get_reg(d, reg_e),
- intc_get_reg(d, reg_d),
- 1,
- (mr->reg_width - 1) - *fld_idx);
- }
-
- *fld_idx = 0;
- (*reg_idx)++;
- }
-
- return 0;
-}
-
-static unsigned int __init intc_mask_data(struct intc_desc *desc,
- struct intc_desc_int *d,
- intc_enum enum_id, int do_grps)
-{
- unsigned int i = 0;
- unsigned int j = 0;
- unsigned int ret;
-
- ret = _intc_mask_data(desc, d, enum_id, &i, &j);
- if (ret)
- return ret;
-
- if (do_grps)
- return intc_mask_data(desc, d, intc_grp_id(desc, enum_id), 0);
-
- return 0;
-}
-
-static unsigned int __init _intc_prio_data(struct intc_desc *desc,
- struct intc_desc_int *d,
- intc_enum enum_id,
- unsigned int *reg_idx,
- unsigned int *fld_idx)
-{
- struct intc_prio_reg *pr = desc->hw.prio_regs;
- unsigned int fn, n, mode, bit;
- unsigned long reg_e, reg_d;
-
- while (pr && enum_id && *reg_idx < desc->hw.nr_prio_regs) {
- pr = desc->hw.prio_regs + *reg_idx;
-
- for (; *fld_idx < ARRAY_SIZE(pr->enum_ids); (*fld_idx)++) {
- if (pr->enum_ids[*fld_idx] != enum_id)
- continue;
-
- if (pr->set_reg && pr->clr_reg) {
- fn = REG_FN_WRITE_BASE;
- mode = MODE_PCLR_REG;
- reg_e = pr->set_reg;
- reg_d = pr->clr_reg;
- } else {
- fn = REG_FN_MODIFY_BASE;
- mode = MODE_PRIO_REG;
- if (!pr->set_reg)
- BUG();
- reg_e = pr->set_reg;
- reg_d = pr->set_reg;
- }
-
- fn += (pr->reg_width >> 3) - 1;
- n = *fld_idx + 1;
-
- BUG_ON(n * pr->field_width > pr->reg_width);
-
- bit = pr->reg_width - (n * pr->field_width);
-
- return _INTC_MK(fn, mode,
- intc_get_reg(d, reg_e),
- intc_get_reg(d, reg_d),
- pr->field_width, bit);
- }
-
- *fld_idx = 0;
- (*reg_idx)++;
- }
-
- return 0;
-}
-
-static unsigned int __init intc_prio_data(struct intc_desc *desc,
- struct intc_desc_int *d,
- intc_enum enum_id, int do_grps)
-{
- unsigned int i = 0;
- unsigned int j = 0;
- unsigned int ret;
-
- ret = _intc_prio_data(desc, d, enum_id, &i, &j);
- if (ret)
- return ret;
-
- if (do_grps)
- return intc_prio_data(desc, d, intc_grp_id(desc, enum_id), 0);
-
- return 0;
-}
-
-static void __init intc_enable_disable_enum(struct intc_desc *desc,
- struct intc_desc_int *d,
- intc_enum enum_id, int enable)
-{
- unsigned int i, j, data;
-
- /* go through and enable/disable all mask bits */
- i = j = 0;
- do {
- data = _intc_mask_data(desc, d, enum_id, &i, &j);
- if (data)
- intc_enable_disable(d, data, enable);
- j++;
- } while (data);
-
- /* go through and enable/disable all priority fields */
- i = j = 0;
- do {
- data = _intc_prio_data(desc, d, enum_id, &i, &j);
- if (data)
- intc_enable_disable(d, data, enable);
-
- j++;
- } while (data);
-}
-
-static unsigned int __init intc_ack_data(struct intc_desc *desc,
- struct intc_desc_int *d,
- intc_enum enum_id)
-{
- struct intc_mask_reg *mr = desc->hw.ack_regs;
- unsigned int i, j, fn, mode;
- unsigned long reg_e, reg_d;
-
- for (i = 0; mr && enum_id && i < desc->hw.nr_ack_regs; i++) {
- mr = desc->hw.ack_regs + i;
-
- for (j = 0; j < ARRAY_SIZE(mr->enum_ids); j++) {
- if (mr->enum_ids[j] != enum_id)
- continue;
-
- fn = REG_FN_MODIFY_BASE;
- mode = MODE_ENABLE_REG;
- reg_e = mr->set_reg;
- reg_d = mr->set_reg;
-
- fn += (mr->reg_width >> 3) - 1;
- return _INTC_MK(fn, mode,
- intc_get_reg(d, reg_e),
- intc_get_reg(d, reg_d),
- 1,
- (mr->reg_width - 1) - j);
- }
- }
-
- return 0;
-}
-
-static unsigned int __init intc_sense_data(struct intc_desc *desc,
- struct intc_desc_int *d,
- intc_enum enum_id)
-{
- struct intc_sense_reg *sr = desc->hw.sense_regs;
- unsigned int i, j, fn, bit;
-
- for (i = 0; sr && enum_id && i < desc->hw.nr_sense_regs; i++) {
- sr = desc->hw.sense_regs + i;
-
- for (j = 0; j < ARRAY_SIZE(sr->enum_ids); j++) {
- if (sr->enum_ids[j] != enum_id)
- continue;
-
- fn = REG_FN_MODIFY_BASE;
- fn += (sr->reg_width >> 3) - 1;
-
- BUG_ON((j + 1) * sr->field_width > sr->reg_width);
-
- bit = sr->reg_width - ((j + 1) * sr->field_width);
-
- return _INTC_MK(fn, 0, intc_get_reg(d, sr->reg),
- 0, sr->field_width, bit);
- }
- }
-
- return 0;
-}
-
-static void __init intc_register_irq(struct intc_desc *desc,
- struct intc_desc_int *d,
- intc_enum enum_id,
- unsigned int irq)
-{
- struct intc_handle_int *hp;
- unsigned int data[2], primary;
-
- /*
- * Register the IRQ position with the global IRQ map
- */
- set_bit(irq, intc_irq_map);
-
- /*
- * Prefer single interrupt source bitmap over other combinations:
- *
- * 1. bitmap, single interrupt source
- * 2. priority, single interrupt source
- * 3. bitmap, multiple interrupt sources (groups)
- * 4. priority, multiple interrupt sources (groups)
- */
- data[0] = intc_mask_data(desc, d, enum_id, 0);
- data[1] = intc_prio_data(desc, d, enum_id, 0);
-
- primary = 0;
- if (!data[0] && data[1])
- primary = 1;
-
- if (!data[0] && !data[1])
- pr_warning("missing unique irq mask for irq %d (vect 0x%04x)\n",
- irq, irq2evt(irq));
-
- data[0] = data[0] ? data[0] : intc_mask_data(desc, d, enum_id, 1);
- data[1] = data[1] ? data[1] : intc_prio_data(desc, d, enum_id, 1);
-
- if (!data[primary])
- primary ^= 1;
-
- BUG_ON(!data[primary]); /* must have primary masking method */
-
- disable_irq_nosync(irq);
- set_irq_chip_and_handler_name(irq, &d->chip,
- handle_level_irq, "level");
- set_irq_chip_data(irq, (void *)data[primary]);
-
- /*
- * set priority level
- * - this needs to be at least 2 for 5-bit priorities on 7780
- */
- intc_prio_level[irq] = default_prio_level;
-
- /* enable secondary masking method if present */
- if (data[!primary])
- _intc_enable(irq, data[!primary]);
-
- /* add irq to d->prio list if priority is available */
- if (data[1]) {
- hp = d->prio + d->nr_prio;
- hp->irq = irq;
- hp->handle = data[1];
-
- if (primary) {
- /*
- * only secondary priority should access registers, so
- * set _INTC_FN(h) = REG_FN_ERR for intc_set_priority()
- */
- hp->handle &= ~_INTC_MK(0x0f, 0, 0, 0, 0, 0);
- hp->handle |= _INTC_MK(REG_FN_ERR, 0, 0, 0, 0, 0);
- }
- d->nr_prio++;
- }
-
- /* add irq to d->sense list if sense is available */
- data[0] = intc_sense_data(desc, d, enum_id);
- if (data[0]) {
- (d->sense + d->nr_sense)->irq = irq;
- (d->sense + d->nr_sense)->handle = data[0];
- d->nr_sense++;
- }
-
- /* irq should be disabled by default */
- d->chip.mask(irq);
-
- if (desc->hw.ack_regs)
- ack_handle[irq] = intc_ack_data(desc, d, enum_id);
-
-#ifdef CONFIG_INTC_BALANCING
- if (desc->hw.mask_regs)
- dist_handle[irq] = intc_dist_data(desc, d, enum_id);
-#endif
-
-#ifdef CONFIG_ARM
- set_irq_flags(irq, IRQF_VALID); /* Enable IRQ on ARM systems */
-#endif
-}
-
-static unsigned int __init save_reg(struct intc_desc_int *d,
- unsigned int cnt,
- unsigned long value,
- unsigned int smp)
-{
- if (value) {
- value = intc_phys_to_virt(d, value);
-
- d->reg[cnt] = value;
-#ifdef CONFIG_SMP
- d->smp[cnt] = smp;
-#endif
- return 1;
- }
-
- return 0;
-}
-
-static void intc_redirect_irq(unsigned int irq, struct irq_desc *desc)
-{
- generic_handle_irq((unsigned int)get_irq_data(irq));
-}
-
-int __init register_intc_controller(struct intc_desc *desc)
-{
- unsigned int i, k, smp;
- struct intc_hw_desc *hw = &desc->hw;
- struct intc_desc_int *d;
- struct resource *res;
-
- pr_info("Registered controller '%s' with %u IRQs\n",
- desc->name, hw->nr_vectors);
-
- d = kzalloc(sizeof(*d), GFP_NOWAIT);
- if (!d)
- goto err0;
-
- INIT_LIST_HEAD(&d->list);
- list_add(&d->list, &intc_list);
-
- if (desc->num_resources) {
- d->nr_windows = desc->num_resources;
- d->window = kzalloc(d->nr_windows * sizeof(*d->window),
- GFP_NOWAIT);
- if (!d->window)
- goto err1;
-
- for (k = 0; k < d->nr_windows; k++) {
- res = desc->resource + k;
- WARN_ON(resource_type(res) != IORESOURCE_MEM);
- d->window[k].phys = res->start;
- d->window[k].size = resource_size(res);
- d->window[k].virt = ioremap_nocache(res->start,
- resource_size(res));
- if (!d->window[k].virt)
- goto err2;
- }
- }
-
- d->nr_reg = hw->mask_regs ? hw->nr_mask_regs * 2 : 0;
-#ifdef CONFIG_INTC_BALANCING
- if (d->nr_reg)
- d->nr_reg += hw->nr_mask_regs;
-#endif
- d->nr_reg += hw->prio_regs ? hw->nr_prio_regs * 2 : 0;
- d->nr_reg += hw->sense_regs ? hw->nr_sense_regs : 0;
- d->nr_reg += hw->ack_regs ? hw->nr_ack_regs : 0;
-
- d->reg = kzalloc(d->nr_reg * sizeof(*d->reg), GFP_NOWAIT);
- if (!d->reg)
- goto err2;
-
-#ifdef CONFIG_SMP
- d->smp = kzalloc(d->nr_reg * sizeof(*d->smp), GFP_NOWAIT);
- if (!d->smp)
- goto err3;
-#endif
- k = 0;
-
- if (hw->mask_regs) {
- for (i = 0; i < hw->nr_mask_regs; i++) {
- smp = IS_SMP(hw->mask_regs[i]);
- k += save_reg(d, k, hw->mask_regs[i].set_reg, smp);
- k += save_reg(d, k, hw->mask_regs[i].clr_reg, smp);
-#ifdef CONFIG_INTC_BALANCING
- k += save_reg(d, k, hw->mask_regs[i].dist_reg, 0);
-#endif
- }
- }
-
- if (hw->prio_regs) {
- d->prio = kzalloc(hw->nr_vectors * sizeof(*d->prio),
- GFP_NOWAIT);
- if (!d->prio)
- goto err4;
-
- for (i = 0; i < hw->nr_prio_regs; i++) {
- smp = IS_SMP(hw->prio_regs[i]);
- k += save_reg(d, k, hw->prio_regs[i].set_reg, smp);
- k += save_reg(d, k, hw->prio_regs[i].clr_reg, smp);
- }
- }
-
- if (hw->sense_regs) {
- d->sense = kzalloc(hw->nr_vectors * sizeof(*d->sense),
- GFP_NOWAIT);
- if (!d->sense)
- goto err5;
-
- for (i = 0; i < hw->nr_sense_regs; i++)
- k += save_reg(d, k, hw->sense_regs[i].reg, 0);
- }
-
- d->chip.name = desc->name;
- d->chip.mask = intc_disable;
- d->chip.unmask = intc_enable;
- d->chip.mask_ack = intc_disable;
- d->chip.enable = intc_enable;
- d->chip.disable = intc_disable;
- d->chip.shutdown = intc_disable;
- d->chip.set_type = intc_set_sense;
- d->chip.set_wake = intc_set_wake;
-#ifdef CONFIG_SMP
- d->chip.set_affinity = intc_set_affinity;
-#endif
-
- if (hw->ack_regs) {
- for (i = 0; i < hw->nr_ack_regs; i++)
- k += save_reg(d, k, hw->ack_regs[i].set_reg, 0);
-
- d->chip.mask_ack = intc_mask_ack;
- }
-
- /* disable bits matching force_disable before registering irqs */
- if (desc->force_disable)
- intc_enable_disable_enum(desc, d, desc->force_disable, 0);
-
- /* disable bits matching force_enable before registering irqs */
- if (desc->force_enable)
- intc_enable_disable_enum(desc, d, desc->force_enable, 0);
-
- BUG_ON(k > 256); /* _INTC_ADDR_E() and _INTC_ADDR_D() are 8 bits */
-
- /* register the vectors one by one */
- for (i = 0; i < hw->nr_vectors; i++) {
- struct intc_vect *vect = hw->vectors + i;
- unsigned int irq = evt2irq(vect->vect);
- struct irq_desc *irq_desc;
-
- if (!vect->enum_id)
- continue;
-
- irq_desc = irq_to_desc_alloc_node(irq, numa_node_id());
- if (unlikely(!irq_desc)) {
- pr_err("can't get irq_desc for %d\n", irq);
- continue;
- }
-
- intc_register_irq(desc, d, vect->enum_id, irq);
-
- for (k = i + 1; k < hw->nr_vectors; k++) {
- struct intc_vect *vect2 = hw->vectors + k;
- unsigned int irq2 = evt2irq(vect2->vect);
-
- if (vect->enum_id != vect2->enum_id)
- continue;
-
- /*
- * In the case of multi-evt handling and sparse
- * IRQ support, each vector still needs to have
- * its own backing irq_desc.
- */
- irq_desc = irq_to_desc_alloc_node(irq2, numa_node_id());
- if (unlikely(!irq_desc)) {
- pr_err("can't get irq_desc for %d\n", irq2);
- continue;
- }
-
- vect2->enum_id = 0;
-
- /* redirect this interrupts to the first one */
- set_irq_chip(irq2, &dummy_irq_chip);
- set_irq_chained_handler(irq2, intc_redirect_irq);
- set_irq_data(irq2, (void *)irq);
- }
- }
-
- /* enable bits matching force_enable after registering irqs */
- if (desc->force_enable)
- intc_enable_disable_enum(desc, d, desc->force_enable, 1);
-
- return 0;
-err5:
- kfree(d->prio);
-err4:
-#ifdef CONFIG_SMP
- kfree(d->smp);
-err3:
-#endif
- kfree(d->reg);
-err2:
- for (k = 0; k < d->nr_windows; k++)
- if (d->window[k].virt)
- iounmap(d->window[k].virt);
-
- kfree(d->window);
-err1:
- kfree(d);
-err0:
- pr_err("unable to allocate INTC memory\n");
-
- return -ENOMEM;
-}
-
-#ifdef CONFIG_INTC_USERIMASK
-static void __iomem *uimask;
-
-int register_intc_userimask(unsigned long addr)
-{
- if (unlikely(uimask))
- return -EBUSY;
-
- uimask = ioremap_nocache(addr, SZ_4K);
- if (unlikely(!uimask))
- return -ENOMEM;
-
- pr_info("userimask support registered for levels 0 -> %d\n",
- default_prio_level - 1);
-
- return 0;
-}
-
-static ssize_t
-show_intc_userimask(struct sysdev_class *cls,
- struct sysdev_class_attribute *attr, char *buf)
-{
- return sprintf(buf, "%d\n", (__raw_readl(uimask) >> 4) & 0xf);
-}
-
-static ssize_t
-store_intc_userimask(struct sysdev_class *cls,
- struct sysdev_class_attribute *attr,
- const char *buf, size_t count)
-{
- unsigned long level;
-
- level = simple_strtoul(buf, NULL, 10);
-
- /*
- * Minimal acceptable IRQ levels are in the 2 - 16 range, but
- * these are chomped so as to not interfere with normal IRQs.
- *
- * Level 1 is a special case on some CPUs in that it's not
- * directly settable, but given that USERIMASK cuts off below a
- * certain level, we don't care about this limitation here.
- * Level 0 on the other hand equates to user masking disabled.
- *
- * We use default_prio_level as a cut off so that only special
- * case opt-in IRQs can be mangled.
- */
- if (level >= default_prio_level)
- return -EINVAL;
-
- __raw_writel(0xa5 << 24 | level << 4, uimask);
-
- return count;
-}
-
-static SYSDEV_CLASS_ATTR(userimask, S_IRUSR | S_IWUSR,
- show_intc_userimask, store_intc_userimask);
-#endif
-
-static ssize_t
-show_intc_name(struct sys_device *dev, struct sysdev_attribute *attr, char *buf)
-{
- struct intc_desc_int *d;
-
- d = container_of(dev, struct intc_desc_int, sysdev);
-
- return sprintf(buf, "%s\n", d->chip.name);
-}
-
-static SYSDEV_ATTR(name, S_IRUGO, show_intc_name, NULL);
-
-static int intc_suspend(struct sys_device *dev, pm_message_t state)
-{
- struct intc_desc_int *d;
- struct irq_desc *desc;
- int irq;
-
- /* get intc controller associated with this sysdev */
- d = container_of(dev, struct intc_desc_int, sysdev);
-
- switch (state.event) {
- case PM_EVENT_ON:
- if (d->state.event != PM_EVENT_FREEZE)
- break;
- for_each_irq_desc(irq, desc) {
- if (desc->handle_irq == intc_redirect_irq)
- continue;
- if (desc->chip != &d->chip)
- continue;
- if (desc->status & IRQ_DISABLED)
- intc_disable(irq);
- else
- intc_enable(irq);
- }
- break;
- case PM_EVENT_FREEZE:
- /* nothing has to be done */
- break;
- case PM_EVENT_SUSPEND:
- /* enable wakeup irqs belonging to this intc controller */
- for_each_irq_desc(irq, desc) {
- if ((desc->status & IRQ_WAKEUP) && (desc->chip == &d->chip))
- intc_enable(irq);
- }
- break;
- }
- d->state = state;
-
- return 0;
-}
-
-static int intc_resume(struct sys_device *dev)
-{
- return intc_suspend(dev, PMSG_ON);
-}
-
-static struct sysdev_class intc_sysdev_class = {
- .name = "intc",
- .suspend = intc_suspend,
- .resume = intc_resume,
-};
-
-/* register this intc as sysdev to allow suspend/resume */
-static int __init register_intc_sysdevs(void)
-{
- struct intc_desc_int *d;
- int error;
- int id = 0;
-
- error = sysdev_class_register(&intc_sysdev_class);
-#ifdef CONFIG_INTC_USERIMASK
- if (!error && uimask)
- error = sysdev_class_create_file(&intc_sysdev_class,
- &attr_userimask);
-#endif
- if (!error) {
- list_for_each_entry(d, &intc_list, list) {
- d->sysdev.id = id;
- d->sysdev.cls = &intc_sysdev_class;
- error = sysdev_register(&d->sysdev);
- if (error == 0)
- error = sysdev_create_file(&d->sysdev,
- &attr_name);
- if (error)
- break;
-
- id++;
- }
- }
-
- if (error)
- pr_err("sysdev registration error\n");
-
- return error;
-}
-device_initcall(register_intc_sysdevs);
-
-/*
- * Dynamic IRQ allocation and deallocation
- */
-unsigned int create_irq_nr(unsigned int irq_want, int node)
-{
- unsigned int irq = 0, new;
- unsigned long flags;
- struct irq_desc *desc;
-
- spin_lock_irqsave(&vector_lock, flags);
-
- /*
- * First try the wanted IRQ
- */
- if (test_and_set_bit(irq_want, intc_irq_map) == 0) {
- new = irq_want;
- } else {
- /* .. then fall back to scanning. */
- new = find_first_zero_bit(intc_irq_map, nr_irqs);
- if (unlikely(new == nr_irqs))
- goto out_unlock;
-
- __set_bit(new, intc_irq_map);
- }
-
- desc = irq_to_desc_alloc_node(new, node);
- if (unlikely(!desc)) {
- pr_err("can't get irq_desc for %d\n", new);
- goto out_unlock;
- }
-
- desc = move_irq_desc(desc, node);
- irq = new;
-
-out_unlock:
- spin_unlock_irqrestore(&vector_lock, flags);
-
- if (irq > 0) {
- dynamic_irq_init(irq);
-#ifdef CONFIG_ARM
- set_irq_flags(irq, IRQF_VALID); /* Enable IRQ on ARM systems */
-#endif
- }
-
- return irq;
-}
-
-int create_irq(void)
-{
- int nid = cpu_to_node(smp_processor_id());
- int irq;
-
- irq = create_irq_nr(NR_IRQS_LEGACY, nid);
- if (irq == 0)
- irq = -1;
-
- return irq;
-}
-
-void destroy_irq(unsigned int irq)
-{
- unsigned long flags;
-
- dynamic_irq_cleanup(irq);
-
- spin_lock_irqsave(&vector_lock, flags);
- __clear_bit(irq, intc_irq_map);
- spin_unlock_irqrestore(&vector_lock, flags);
-}
-
-int reserve_irq_vector(unsigned int irq)
-{
- unsigned long flags;
- int ret = 0;
-
- spin_lock_irqsave(&vector_lock, flags);
- if (test_and_set_bit(irq, intc_irq_map))
- ret = -EBUSY;
- spin_unlock_irqrestore(&vector_lock, flags);
-
- return ret;
-}
-
-void reserve_irq_legacy(void)
-{
- unsigned long flags;
- int i, j;
-
- spin_lock_irqsave(&vector_lock, flags);
- j = find_first_bit(intc_irq_map, nr_irqs);
- for (i = 0; i < j; i++)
- __set_bit(i, intc_irq_map);
- spin_unlock_irqrestore(&vector_lock, flags);
-}
diff --git a/drivers/sh/intc/Kconfig b/drivers/sh/intc/Kconfig
new file mode 100644
index 000000000000..c88cbccc62b0
--- /dev/null
+++ b/drivers/sh/intc/Kconfig
@@ -0,0 +1,35 @@
+comment "Interrupt controller options"
+
+config INTC_USERIMASK
+ bool "Userspace interrupt masking support"
+ depends on ARCH_SHMOBILE || (SUPERH && CPU_SH4A)
+ help
+ This enables support for hardware-assisted userspace hardirq
+ masking.
+
+ SH-4A and newer interrupt blocks all support a special shadowed
+ page with all non-masking registers obscured when mapped in to
+ userspace. This is primarily for use by userspace device
+ drivers that are using special priority levels.
+
+ If in doubt, say N.
+
+config INTC_BALANCING
+ bool "Hardware IRQ balancing support"
+ depends on SMP && SUPERH && CPU_SHX3
+ help
+ This enables support for IRQ auto-distribution mode on SH-X3
+ SMP parts. All of the balancing and CPU wakeup decisions are
+ taken care of automatically by hardware for distributed
+ vectors.
+
+ If in doubt, say N.
+
+config INTC_MAPPING_DEBUG
+ bool "Expose IRQ to per-controller id mapping via debugfs"
+ depends on DEBUG_FS
+ help
+ This will create a debugfs entry for showing the relationship
+ between system IRQs and the per-controller id tables.
+
+ If in doubt, say N.
diff --git a/drivers/sh/intc/Makefile b/drivers/sh/intc/Makefile
new file mode 100644
index 000000000000..bb5df868d77a
--- /dev/null
+++ b/drivers/sh/intc/Makefile
@@ -0,0 +1,5 @@
+obj-y := access.o chip.o core.o dynamic.o handle.o virq.o
+
+obj-$(CONFIG_INTC_BALANCING) += balancing.o
+obj-$(CONFIG_INTC_USERIMASK) += userimask.o
+obj-$(CONFIG_INTC_MAPPING_DEBUG) += virq-debugfs.o
diff --git a/drivers/sh/intc/access.c b/drivers/sh/intc/access.c
new file mode 100644
index 000000000000..f892ae1d212a
--- /dev/null
+++ b/drivers/sh/intc/access.c
@@ -0,0 +1,237 @@
+/*
+ * Common INTC2 register accessors
+ *
+ * Copyright (C) 2007, 2008 Magnus Damm
+ * Copyright (C) 2009, 2010 Paul Mundt
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#include <linux/io.h>
+#include "internals.h"
+
+unsigned long intc_phys_to_virt(struct intc_desc_int *d, unsigned long address)
+{
+ struct intc_window *window;
+ int k;
+
+ /* scan through physical windows and convert address */
+ for (k = 0; k < d->nr_windows; k++) {
+ window = d->window + k;
+
+ if (address < window->phys)
+ continue;
+
+ if (address >= (window->phys + window->size))
+ continue;
+
+ address -= window->phys;
+ address += (unsigned long)window->virt;
+
+ return address;
+ }
+
+ /* no windows defined, register must be 1:1 mapped virt:phys */
+ return address;
+}
+
+unsigned int intc_get_reg(struct intc_desc_int *d, unsigned long address)
+{
+ unsigned int k;
+
+ address = intc_phys_to_virt(d, address);
+
+ for (k = 0; k < d->nr_reg; k++) {
+ if (d->reg[k] == address)
+ return k;
+ }
+
+ BUG();
+ return 0;
+}
+
+unsigned int intc_set_field_from_handle(unsigned int value,
+ unsigned int field_value,
+ unsigned int handle)
+{
+ unsigned int width = _INTC_WIDTH(handle);
+ unsigned int shift = _INTC_SHIFT(handle);
+
+ value &= ~(((1 << width) - 1) << shift);
+ value |= field_value << shift;
+ return value;
+}
+
+unsigned long intc_get_field_from_handle(unsigned int value, unsigned int handle)
+{
+ unsigned int width = _INTC_WIDTH(handle);
+ unsigned int shift = _INTC_SHIFT(handle);
+ unsigned int mask = ((1 << width) - 1) << shift;
+
+ return (value & mask) >> shift;
+}
+
+static unsigned long test_8(unsigned long addr, unsigned long h,
+ unsigned long ignore)
+{
+ return intc_get_field_from_handle(__raw_readb(addr), h);
+}
+
+static unsigned long test_16(unsigned long addr, unsigned long h,
+ unsigned long ignore)
+{
+ return intc_get_field_from_handle(__raw_readw(addr), h);
+}
+
+static unsigned long test_32(unsigned long addr, unsigned long h,
+ unsigned long ignore)
+{
+ return intc_get_field_from_handle(__raw_readl(addr), h);
+}
+
+static unsigned long write_8(unsigned long addr, unsigned long h,
+ unsigned long data)
+{
+ __raw_writeb(intc_set_field_from_handle(0, data, h), addr);
+ (void)__raw_readb(addr); /* Defeat write posting */
+ return 0;
+}
+
+static unsigned long write_16(unsigned long addr, unsigned long h,
+ unsigned long data)
+{
+ __raw_writew(intc_set_field_from_handle(0, data, h), addr);
+ (void)__raw_readw(addr); /* Defeat write posting */
+ return 0;
+}
+
+static unsigned long write_32(unsigned long addr, unsigned long h,
+ unsigned long data)
+{
+ __raw_writel(intc_set_field_from_handle(0, data, h), addr);
+ (void)__raw_readl(addr); /* Defeat write posting */
+ return 0;
+}
+
+static unsigned long modify_8(unsigned long addr, unsigned long h,
+ unsigned long data)
+{
+ unsigned long flags;
+ unsigned int value;
+ local_irq_save(flags);
+ value = intc_set_field_from_handle(__raw_readb(addr), data, h);
+ __raw_writeb(value, addr);
+ (void)__raw_readb(addr); /* Defeat write posting */
+ local_irq_restore(flags);
+ return 0;
+}
+
+static unsigned long modify_16(unsigned long addr, unsigned long h,
+ unsigned long data)
+{
+ unsigned long flags;
+ unsigned int value;
+ local_irq_save(flags);
+ value = intc_set_field_from_handle(__raw_readw(addr), data, h);
+ __raw_writew(value, addr);
+ (void)__raw_readw(addr); /* Defeat write posting */
+ local_irq_restore(flags);
+ return 0;
+}
+
+static unsigned long modify_32(unsigned long addr, unsigned long h,
+ unsigned long data)
+{
+ unsigned long flags;
+ unsigned int value;
+ local_irq_save(flags);
+ value = intc_set_field_from_handle(__raw_readl(addr), data, h);
+ __raw_writel(value, addr);
+ (void)__raw_readl(addr); /* Defeat write posting */
+ local_irq_restore(flags);
+ return 0;
+}
+
+static unsigned long intc_mode_field(unsigned long addr,
+ unsigned long handle,
+ unsigned long (*fn)(unsigned long,
+ unsigned long,
+ unsigned long),
+ unsigned int irq)
+{
+ return fn(addr, handle, ((1 << _INTC_WIDTH(handle)) - 1));
+}
+
+static unsigned long intc_mode_zero(unsigned long addr,
+ unsigned long handle,
+ unsigned long (*fn)(unsigned long,
+ unsigned long,
+ unsigned long),
+ unsigned int irq)
+{
+ return fn(addr, handle, 0);
+}
+
+static unsigned long intc_mode_prio(unsigned long addr,
+ unsigned long handle,
+ unsigned long (*fn)(unsigned long,
+ unsigned long,
+ unsigned long),
+ unsigned int irq)
+{
+ return fn(addr, handle, intc_get_prio_level(irq));
+}
+
+unsigned long (*intc_reg_fns[])(unsigned long addr,
+ unsigned long h,
+ unsigned long data) = {
+ [REG_FN_TEST_BASE + 0] = test_8,
+ [REG_FN_TEST_BASE + 1] = test_16,
+ [REG_FN_TEST_BASE + 3] = test_32,
+ [REG_FN_WRITE_BASE + 0] = write_8,
+ [REG_FN_WRITE_BASE + 1] = write_16,
+ [REG_FN_WRITE_BASE + 3] = write_32,
+ [REG_FN_MODIFY_BASE + 0] = modify_8,
+ [REG_FN_MODIFY_BASE + 1] = modify_16,
+ [REG_FN_MODIFY_BASE + 3] = modify_32,
+};
+
+unsigned long (*intc_enable_fns[])(unsigned long addr,
+ unsigned long handle,
+ unsigned long (*fn)(unsigned long,
+ unsigned long,
+ unsigned long),
+ unsigned int irq) = {
+ [MODE_ENABLE_REG] = intc_mode_field,
+ [MODE_MASK_REG] = intc_mode_zero,
+ [MODE_DUAL_REG] = intc_mode_field,
+ [MODE_PRIO_REG] = intc_mode_prio,
+ [MODE_PCLR_REG] = intc_mode_prio,
+};
+
+unsigned long (*intc_disable_fns[])(unsigned long addr,
+ unsigned long handle,
+ unsigned long (*fn)(unsigned long,
+ unsigned long,
+ unsigned long),
+ unsigned int irq) = {
+ [MODE_ENABLE_REG] = intc_mode_zero,
+ [MODE_MASK_REG] = intc_mode_field,
+ [MODE_DUAL_REG] = intc_mode_field,
+ [MODE_PRIO_REG] = intc_mode_zero,
+ [MODE_PCLR_REG] = intc_mode_field,
+};
+
+unsigned long (*intc_enable_noprio_fns[])(unsigned long addr,
+ unsigned long handle,
+ unsigned long (*fn)(unsigned long,
+ unsigned long,
+ unsigned long),
+ unsigned int irq) = {
+ [MODE_ENABLE_REG] = intc_mode_field,
+ [MODE_MASK_REG] = intc_mode_zero,
+ [MODE_DUAL_REG] = intc_mode_field,
+ [MODE_PRIO_REG] = intc_mode_field,
+ [MODE_PCLR_REG] = intc_mode_field,
+};
diff --git a/drivers/sh/intc/balancing.c b/drivers/sh/intc/balancing.c
new file mode 100644
index 000000000000..cec7a96f2c09
--- /dev/null
+++ b/drivers/sh/intc/balancing.c
@@ -0,0 +1,97 @@
+/*
+ * Support for hardware-managed IRQ auto-distribution.
+ *
+ * Copyright (C) 2010 Paul Mundt
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#include "internals.h"
+
+static unsigned long dist_handle[NR_IRQS];
+
+void intc_balancing_enable(unsigned int irq)
+{
+ struct intc_desc_int *d = get_intc_desc(irq);
+ unsigned long handle = dist_handle[irq];
+ unsigned long addr;
+
+ if (irq_balancing_disabled(irq) || !handle)
+ return;
+
+ addr = INTC_REG(d, _INTC_ADDR_D(handle), 0);
+ intc_reg_fns[_INTC_FN(handle)](addr, handle, 1);
+}
+
+void intc_balancing_disable(unsigned int irq)
+{
+ struct intc_desc_int *d = get_intc_desc(irq);
+ unsigned long handle = dist_handle[irq];
+ unsigned long addr;
+
+ if (irq_balancing_disabled(irq) || !handle)
+ return;
+
+ addr = INTC_REG(d, _INTC_ADDR_D(handle), 0);
+ intc_reg_fns[_INTC_FN(handle)](addr, handle, 0);
+}
+
+static unsigned int intc_dist_data(struct intc_desc *desc,
+ struct intc_desc_int *d,
+ intc_enum enum_id)
+{
+ struct intc_mask_reg *mr = desc->hw.mask_regs;
+ unsigned int i, j, fn, mode;
+ unsigned long reg_e, reg_d;
+
+ for (i = 0; mr && enum_id && i < desc->hw.nr_mask_regs; i++) {
+ mr = desc->hw.mask_regs + i;
+
+ /*
+ * Skip this entry if there's no auto-distribution
+ * register associated with it.
+ */
+ if (!mr->dist_reg)
+ continue;
+
+ for (j = 0; j < ARRAY_SIZE(mr->enum_ids); j++) {
+ if (mr->enum_ids[j] != enum_id)
+ continue;
+
+ fn = REG_FN_MODIFY_BASE;
+ mode = MODE_ENABLE_REG;
+ reg_e = mr->dist_reg;
+ reg_d = mr->dist_reg;
+
+ fn += (mr->reg_width >> 3) - 1;
+ return _INTC_MK(fn, mode,
+ intc_get_reg(d, reg_e),
+ intc_get_reg(d, reg_d),
+ 1,
+ (mr->reg_width - 1) - j);
+ }
+ }
+
+ /*
+ * It's possible we've gotten here with no distribution options
+ * available for the IRQ in question, so we just skip over those.
+ */
+ return 0;
+}
+
+void intc_set_dist_handle(unsigned int irq, struct intc_desc *desc,
+ struct intc_desc_int *d, intc_enum id)
+{
+ unsigned long flags;
+
+ /*
+ * Nothing to do for this IRQ.
+ */
+ if (!desc->hw.mask_regs)
+ return;
+
+ raw_spin_lock_irqsave(&intc_big_lock, flags);
+ dist_handle[irq] = intc_dist_data(desc, d, id);
+ raw_spin_unlock_irqrestore(&intc_big_lock, flags);
+}
diff --git a/drivers/sh/intc/chip.c b/drivers/sh/intc/chip.c
new file mode 100644
index 000000000000..de885a0f917a
--- /dev/null
+++ b/drivers/sh/intc/chip.c
@@ -0,0 +1,222 @@
+/*
+ * IRQ chip definitions for INTC IRQs.
+ *
+ * Copyright (C) 2007, 2008 Magnus Damm
+ * Copyright (C) 2009, 2010 Paul Mundt
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#include <linux/cpumask.h>
+#include <linux/io.h>
+#include "internals.h"
+
+void _intc_enable(struct irq_data *data, unsigned long handle)
+{
+ unsigned int irq = data->irq;
+ struct intc_desc_int *d = get_intc_desc(irq);
+ unsigned long addr;
+ unsigned int cpu;
+
+ for (cpu = 0; cpu < SMP_NR(d, _INTC_ADDR_E(handle)); cpu++) {
+#ifdef CONFIG_SMP
+ if (!cpumask_test_cpu(cpu, data->affinity))
+ continue;
+#endif
+ addr = INTC_REG(d, _INTC_ADDR_E(handle), cpu);
+ intc_enable_fns[_INTC_MODE(handle)](addr, handle, intc_reg_fns\
+ [_INTC_FN(handle)], irq);
+ }
+
+ intc_balancing_enable(irq);
+}
+
+static void intc_enable(struct irq_data *data)
+{
+ _intc_enable(data, (unsigned long)irq_data_get_irq_chip_data(data));
+}
+
+static void intc_disable(struct irq_data *data)
+{
+ unsigned int irq = data->irq;
+ struct intc_desc_int *d = get_intc_desc(irq);
+ unsigned long handle = (unsigned long)irq_data_get_irq_chip_data(data);
+ unsigned long addr;
+ unsigned int cpu;
+
+ intc_balancing_disable(irq);
+
+ for (cpu = 0; cpu < SMP_NR(d, _INTC_ADDR_D(handle)); cpu++) {
+#ifdef CONFIG_SMP
+ if (!cpumask_test_cpu(cpu, data->affinity))
+ continue;
+#endif
+ addr = INTC_REG(d, _INTC_ADDR_D(handle), cpu);
+ intc_disable_fns[_INTC_MODE(handle)](addr, handle,intc_reg_fns\
+ [_INTC_FN(handle)], irq);
+ }
+}
+
+static int intc_set_wake(struct irq_data *data, unsigned int on)
+{
+ return 0; /* allow wakeup, but setup hardware in intc_suspend() */
+}
+
+#ifdef CONFIG_SMP
+/*
+ * This is held with the irq desc lock held, so we don't require any
+ * additional locking here at the intc desc level. The affinity mask is
+ * later tested in the enable/disable paths.
+ */
+static int intc_set_affinity(struct irq_data *data,
+ const struct cpumask *cpumask,
+ bool force)
+{
+ if (!cpumask_intersects(cpumask, cpu_online_mask))
+ return -1;
+
+ cpumask_copy(data->affinity, cpumask);
+
+ return 0;
+}
+#endif
+
+static void intc_mask_ack(struct irq_data *data)
+{
+ unsigned int irq = data->irq;
+ struct intc_desc_int *d = get_intc_desc(irq);
+ unsigned long handle = intc_get_ack_handle(irq);
+ unsigned long addr;
+
+ intc_disable(data);
+
+ /* read register and write zero only to the associated bit */
+ if (handle) {
+ unsigned int value;
+
+ addr = INTC_REG(d, _INTC_ADDR_D(handle), 0);
+ value = intc_set_field_from_handle(0, 1, handle);
+
+ switch (_INTC_FN(handle)) {
+ case REG_FN_MODIFY_BASE + 0: /* 8bit */
+ __raw_readb(addr);
+ __raw_writeb(0xff ^ value, addr);
+ break;
+ case REG_FN_MODIFY_BASE + 1: /* 16bit */
+ __raw_readw(addr);
+ __raw_writew(0xffff ^ value, addr);
+ break;
+ case REG_FN_MODIFY_BASE + 3: /* 32bit */
+ __raw_readl(addr);
+ __raw_writel(0xffffffff ^ value, addr);
+ break;
+ default:
+ BUG();
+ break;
+ }
+ }
+}
+
+static struct intc_handle_int *intc_find_irq(struct intc_handle_int *hp,
+ unsigned int nr_hp,
+ unsigned int irq)
+{
+ int i;
+
+ /*
+ * this doesn't scale well, but...
+ *
+ * this function should only be used for cerain uncommon
+ * operations such as intc_set_priority() and intc_set_type()
+ * and in those rare cases performance doesn't matter that much.
+ * keeping the memory footprint low is more important.
+ *
+ * one rather simple way to speed this up and still keep the
+ * memory footprint down is to make sure the array is sorted
+ * and then perform a bisect to lookup the irq.
+ */
+ for (i = 0; i < nr_hp; i++) {
+ if ((hp + i)->irq != irq)
+ continue;
+
+ return hp + i;
+ }
+
+ return NULL;
+}
+
+int intc_set_priority(unsigned int irq, unsigned int prio)
+{
+ struct intc_desc_int *d = get_intc_desc(irq);
+ struct irq_data *data = irq_get_irq_data(irq);
+ struct intc_handle_int *ihp;
+
+ if (!intc_get_prio_level(irq) || prio <= 1)
+ return -EINVAL;
+
+ ihp = intc_find_irq(d->prio, d->nr_prio, irq);
+ if (ihp) {
+ if (prio >= (1 << _INTC_WIDTH(ihp->handle)))
+ return -EINVAL;
+
+ intc_set_prio_level(irq, prio);
+
+ /*
+ * only set secondary masking method directly
+ * primary masking method is using intc_prio_level[irq]
+ * priority level will be set during next enable()
+ */
+ if (_INTC_FN(ihp->handle) != REG_FN_ERR)
+ _intc_enable(data, ihp->handle);
+ }
+ return 0;
+}
+
+#define VALID(x) (x | 0x80)
+
+static unsigned char intc_irq_sense_table[IRQ_TYPE_SENSE_MASK + 1] = {
+ [IRQ_TYPE_EDGE_FALLING] = VALID(0),
+ [IRQ_TYPE_EDGE_RISING] = VALID(1),
+ [IRQ_TYPE_LEVEL_LOW] = VALID(2),
+ /* SH7706, SH7707 and SH7709 do not support high level triggered */
+#if !defined(CONFIG_CPU_SUBTYPE_SH7706) && \
+ !defined(CONFIG_CPU_SUBTYPE_SH7707) && \
+ !defined(CONFIG_CPU_SUBTYPE_SH7709)
+ [IRQ_TYPE_LEVEL_HIGH] = VALID(3),
+#endif
+};
+
+static int intc_set_type(struct irq_data *data, unsigned int type)
+{
+ unsigned int irq = data->irq;
+ struct intc_desc_int *d = get_intc_desc(irq);
+ unsigned char value = intc_irq_sense_table[type & IRQ_TYPE_SENSE_MASK];
+ struct intc_handle_int *ihp;
+ unsigned long addr;
+
+ if (!value)
+ return -EINVAL;
+
+ ihp = intc_find_irq(d->sense, d->nr_sense, irq);
+ if (ihp) {
+ addr = INTC_REG(d, _INTC_ADDR_E(ihp->handle), 0);
+ intc_reg_fns[_INTC_FN(ihp->handle)](addr, ihp->handle, value);
+ }
+
+ return 0;
+}
+
+struct irq_chip intc_irq_chip = {
+ .irq_mask = intc_disable,
+ .irq_unmask = intc_enable,
+ .irq_mask_ack = intc_mask_ack,
+ .irq_enable = intc_enable,
+ .irq_disable = intc_disable,
+ .irq_shutdown = intc_disable,
+ .irq_set_type = intc_set_type,
+ .irq_set_wake = intc_set_wake,
+#ifdef CONFIG_SMP
+ .irq_set_affinity = intc_set_affinity,
+#endif
+};
diff --git a/drivers/sh/intc/core.c b/drivers/sh/intc/core.c
new file mode 100644
index 000000000000..e5e9e6735f7d
--- /dev/null
+++ b/drivers/sh/intc/core.c
@@ -0,0 +1,482 @@
+/*
+ * Shared interrupt handling code for IPR and INTC2 types of IRQs.
+ *
+ * Copyright (C) 2007, 2008 Magnus Damm
+ * Copyright (C) 2009, 2010 Paul Mundt
+ *
+ * Based on intc2.c and ipr.c
+ *
+ * Copyright (C) 1999 Niibe Yutaka & Takeshi Yaegashi
+ * Copyright (C) 2000 Kazumoto Kojima
+ * Copyright (C) 2001 David J. Mckay (david.mckay@st.com)
+ * Copyright (C) 2003 Takashi Kusuda <kusuda-takashi@hitachi-ul.co.jp>
+ * Copyright (C) 2005, 2006 Paul Mundt
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#define pr_fmt(fmt) "intc: " fmt
+
+#include <linux/init.h>
+#include <linux/irq.h>
+#include <linux/io.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/sh_intc.h>
+#include <linux/sysdev.h>
+#include <linux/list.h>
+#include <linux/spinlock.h>
+#include <linux/radix-tree.h>
+#include "internals.h"
+
+LIST_HEAD(intc_list);
+DEFINE_RAW_SPINLOCK(intc_big_lock);
+unsigned int nr_intc_controllers;
+
+/*
+ * Default priority level
+ * - this needs to be at least 2 for 5-bit priorities on 7780
+ */
+static unsigned int default_prio_level = 2; /* 2 - 16 */
+static unsigned int intc_prio_level[NR_IRQS]; /* for now */
+
+unsigned int intc_get_dfl_prio_level(void)
+{
+ return default_prio_level;
+}
+
+unsigned int intc_get_prio_level(unsigned int irq)
+{
+ return intc_prio_level[irq];
+}
+
+void intc_set_prio_level(unsigned int irq, unsigned int level)
+{
+ unsigned long flags;
+
+ raw_spin_lock_irqsave(&intc_big_lock, flags);
+ intc_prio_level[irq] = level;
+ raw_spin_unlock_irqrestore(&intc_big_lock, flags);
+}
+
+static void intc_redirect_irq(unsigned int irq, struct irq_desc *desc)
+{
+ generic_handle_irq((unsigned int)get_irq_data(irq));
+}
+
+static void __init intc_register_irq(struct intc_desc *desc,
+ struct intc_desc_int *d,
+ intc_enum enum_id,
+ unsigned int irq)
+{
+ struct intc_handle_int *hp;
+ struct irq_data *irq_data;
+ unsigned int data[2], primary;
+ unsigned long flags;
+
+ /*
+ * Register the IRQ position with the global IRQ map, then insert
+ * it in to the radix tree.
+ */
+ irq_reserve_irq(irq);
+
+ raw_spin_lock_irqsave(&intc_big_lock, flags);
+ radix_tree_insert(&d->tree, enum_id, intc_irq_xlate_get(irq));
+ raw_spin_unlock_irqrestore(&intc_big_lock, flags);
+
+ /*
+ * Prefer single interrupt source bitmap over other combinations:
+ *
+ * 1. bitmap, single interrupt source
+ * 2. priority, single interrupt source
+ * 3. bitmap, multiple interrupt sources (groups)
+ * 4. priority, multiple interrupt sources (groups)
+ */
+ data[0] = intc_get_mask_handle(desc, d, enum_id, 0);
+ data[1] = intc_get_prio_handle(desc, d, enum_id, 0);
+
+ primary = 0;
+ if (!data[0] && data[1])
+ primary = 1;
+
+ if (!data[0] && !data[1])
+ pr_warning("missing unique irq mask for irq %d (vect 0x%04x)\n",
+ irq, irq2evt(irq));
+
+ data[0] = data[0] ? data[0] : intc_get_mask_handle(desc, d, enum_id, 1);
+ data[1] = data[1] ? data[1] : intc_get_prio_handle(desc, d, enum_id, 1);
+
+ if (!data[primary])
+ primary ^= 1;
+
+ BUG_ON(!data[primary]); /* must have primary masking method */
+
+ irq_data = irq_get_irq_data(irq);
+
+ disable_irq_nosync(irq);
+ set_irq_chip_and_handler_name(irq, &d->chip,
+ handle_level_irq, "level");
+ set_irq_chip_data(irq, (void *)data[primary]);
+
+ /*
+ * set priority level
+ */
+ intc_set_prio_level(irq, intc_get_dfl_prio_level());
+
+ /* enable secondary masking method if present */
+ if (data[!primary])
+ _intc_enable(irq_data, data[!primary]);
+
+ /* add irq to d->prio list if priority is available */
+ if (data[1]) {
+ hp = d->prio + d->nr_prio;
+ hp->irq = irq;
+ hp->handle = data[1];
+
+ if (primary) {
+ /*
+ * only secondary priority should access registers, so
+ * set _INTC_FN(h) = REG_FN_ERR for intc_set_priority()
+ */
+ hp->handle &= ~_INTC_MK(0x0f, 0, 0, 0, 0, 0);
+ hp->handle |= _INTC_MK(REG_FN_ERR, 0, 0, 0, 0, 0);
+ }
+ d->nr_prio++;
+ }
+
+ /* add irq to d->sense list if sense is available */
+ data[0] = intc_get_sense_handle(desc, d, enum_id);
+ if (data[0]) {
+ (d->sense + d->nr_sense)->irq = irq;
+ (d->sense + d->nr_sense)->handle = data[0];
+ d->nr_sense++;
+ }
+
+ /* irq should be disabled by default */
+ d->chip.irq_mask(irq_data);
+
+ intc_set_ack_handle(irq, desc, d, enum_id);
+ intc_set_dist_handle(irq, desc, d, enum_id);
+
+ activate_irq(irq);
+}
+
+static unsigned int __init save_reg(struct intc_desc_int *d,
+ unsigned int cnt,
+ unsigned long value,
+ unsigned int smp)
+{
+ if (value) {
+ value = intc_phys_to_virt(d, value);
+
+ d->reg[cnt] = value;
+#ifdef CONFIG_SMP
+ d->smp[cnt] = smp;
+#endif
+ return 1;
+ }
+
+ return 0;
+}
+
+int __init register_intc_controller(struct intc_desc *desc)
+{
+ unsigned int i, k, smp;
+ struct intc_hw_desc *hw = &desc->hw;
+ struct intc_desc_int *d;
+ struct resource *res;
+
+ pr_info("Registered controller '%s' with %u IRQs\n",
+ desc->name, hw->nr_vectors);
+
+ d = kzalloc(sizeof(*d), GFP_NOWAIT);
+ if (!d)
+ goto err0;
+
+ INIT_LIST_HEAD(&d->list);
+ list_add_tail(&d->list, &intc_list);
+
+ raw_spin_lock_init(&d->lock);
+
+ d->index = nr_intc_controllers;
+
+ if (desc->num_resources) {
+ d->nr_windows = desc->num_resources;
+ d->window = kzalloc(d->nr_windows * sizeof(*d->window),
+ GFP_NOWAIT);
+ if (!d->window)
+ goto err1;
+
+ for (k = 0; k < d->nr_windows; k++) {
+ res = desc->resource + k;
+ WARN_ON(resource_type(res) != IORESOURCE_MEM);
+ d->window[k].phys = res->start;
+ d->window[k].size = resource_size(res);
+ d->window[k].virt = ioremap_nocache(res->start,
+ resource_size(res));
+ if (!d->window[k].virt)
+ goto err2;
+ }
+ }
+
+ d->nr_reg = hw->mask_regs ? hw->nr_mask_regs * 2 : 0;
+#ifdef CONFIG_INTC_BALANCING
+ if (d->nr_reg)
+ d->nr_reg += hw->nr_mask_regs;
+#endif
+ d->nr_reg += hw->prio_regs ? hw->nr_prio_regs * 2 : 0;
+ d->nr_reg += hw->sense_regs ? hw->nr_sense_regs : 0;
+ d->nr_reg += hw->ack_regs ? hw->nr_ack_regs : 0;
+ d->nr_reg += hw->subgroups ? hw->nr_subgroups : 0;
+
+ d->reg = kzalloc(d->nr_reg * sizeof(*d->reg), GFP_NOWAIT);
+ if (!d->reg)
+ goto err2;
+
+#ifdef CONFIG_SMP
+ d->smp = kzalloc(d->nr_reg * sizeof(*d->smp), GFP_NOWAIT);
+ if (!d->smp)
+ goto err3;
+#endif
+ k = 0;
+
+ if (hw->mask_regs) {
+ for (i = 0; i < hw->nr_mask_regs; i++) {
+ smp = IS_SMP(hw->mask_regs[i]);
+ k += save_reg(d, k, hw->mask_regs[i].set_reg, smp);
+ k += save_reg(d, k, hw->mask_regs[i].clr_reg, smp);
+#ifdef CONFIG_INTC_BALANCING
+ k += save_reg(d, k, hw->mask_regs[i].dist_reg, 0);
+#endif
+ }
+ }
+
+ if (hw->prio_regs) {
+ d->prio = kzalloc(hw->nr_vectors * sizeof(*d->prio),
+ GFP_NOWAIT);
+ if (!d->prio)
+ goto err4;
+
+ for (i = 0; i < hw->nr_prio_regs; i++) {
+ smp = IS_SMP(hw->prio_regs[i]);
+ k += save_reg(d, k, hw->prio_regs[i].set_reg, smp);
+ k += save_reg(d, k, hw->prio_regs[i].clr_reg, smp);
+ }
+ }
+
+ if (hw->sense_regs) {
+ d->sense = kzalloc(hw->nr_vectors * sizeof(*d->sense),
+ GFP_NOWAIT);
+ if (!d->sense)
+ goto err5;
+
+ for (i = 0; i < hw->nr_sense_regs; i++)
+ k += save_reg(d, k, hw->sense_regs[i].reg, 0);
+ }
+
+ if (hw->subgroups)
+ for (i = 0; i < hw->nr_subgroups; i++)
+ if (hw->subgroups[i].reg)
+ k+= save_reg(d, k, hw->subgroups[i].reg, 0);
+
+ memcpy(&d->chip, &intc_irq_chip, sizeof(struct irq_chip));
+ d->chip.name = desc->name;
+
+ if (hw->ack_regs)
+ for (i = 0; i < hw->nr_ack_regs; i++)
+ k += save_reg(d, k, hw->ack_regs[i].set_reg, 0);
+ else
+ d->chip.irq_mask_ack = d->chip.irq_disable;
+
+ /* disable bits matching force_disable before registering irqs */
+ if (desc->force_disable)
+ intc_enable_disable_enum(desc, d, desc->force_disable, 0);
+
+ /* disable bits matching force_enable before registering irqs */
+ if (desc->force_enable)
+ intc_enable_disable_enum(desc, d, desc->force_enable, 0);
+
+ BUG_ON(k > 256); /* _INTC_ADDR_E() and _INTC_ADDR_D() are 8 bits */
+
+ /* register the vectors one by one */
+ for (i = 0; i < hw->nr_vectors; i++) {
+ struct intc_vect *vect = hw->vectors + i;
+ unsigned int irq = evt2irq(vect->vect);
+ int res;
+
+ if (!vect->enum_id)
+ continue;
+
+ res = irq_alloc_desc_at(irq, numa_node_id());
+ if (res != irq && res != -EEXIST) {
+ pr_err("can't get irq_desc for %d\n", irq);
+ continue;
+ }
+
+ intc_irq_xlate_set(irq, vect->enum_id, d);
+ intc_register_irq(desc, d, vect->enum_id, irq);
+
+ for (k = i + 1; k < hw->nr_vectors; k++) {
+ struct intc_vect *vect2 = hw->vectors + k;
+ unsigned int irq2 = evt2irq(vect2->vect);
+
+ if (vect->enum_id != vect2->enum_id)
+ continue;
+
+ /*
+ * In the case of multi-evt handling and sparse
+ * IRQ support, each vector still needs to have
+ * its own backing irq_desc.
+ */
+ res = irq_alloc_desc_at(irq2, numa_node_id());
+ if (res != irq2 && res != -EEXIST) {
+ pr_err("can't get irq_desc for %d\n", irq2);
+ continue;
+ }
+
+ vect2->enum_id = 0;
+
+ /* redirect this interrupts to the first one */
+ set_irq_chip(irq2, &dummy_irq_chip);
+ set_irq_chained_handler(irq2, intc_redirect_irq);
+ set_irq_data(irq2, (void *)irq);
+ }
+ }
+
+ intc_subgroup_init(desc, d);
+
+ /* enable bits matching force_enable after registering irqs */
+ if (desc->force_enable)
+ intc_enable_disable_enum(desc, d, desc->force_enable, 1);
+
+ nr_intc_controllers++;
+
+ return 0;
+err5:
+ kfree(d->prio);
+err4:
+#ifdef CONFIG_SMP
+ kfree(d->smp);
+err3:
+#endif
+ kfree(d->reg);
+err2:
+ for (k = 0; k < d->nr_windows; k++)
+ if (d->window[k].virt)
+ iounmap(d->window[k].virt);
+
+ kfree(d->window);
+err1:
+ kfree(d);
+err0:
+ pr_err("unable to allocate INTC memory\n");
+
+ return -ENOMEM;
+}
+
+static ssize_t
+show_intc_name(struct sys_device *dev, struct sysdev_attribute *attr, char *buf)
+{
+ struct intc_desc_int *d;
+
+ d = container_of(dev, struct intc_desc_int, sysdev);
+
+ return sprintf(buf, "%s\n", d->chip.name);
+}
+
+static SYSDEV_ATTR(name, S_IRUGO, show_intc_name, NULL);
+
+static int intc_suspend(struct sys_device *dev, pm_message_t state)
+{
+ struct intc_desc_int *d;
+ struct irq_data *data;
+ struct irq_desc *desc;
+ struct irq_chip *chip;
+ int irq;
+
+ /* get intc controller associated with this sysdev */
+ d = container_of(dev, struct intc_desc_int, sysdev);
+
+ switch (state.event) {
+ case PM_EVENT_ON:
+ if (d->state.event != PM_EVENT_FREEZE)
+ break;
+
+ for_each_active_irq(irq) {
+ desc = irq_to_desc(irq);
+ data = irq_get_irq_data(irq);
+ chip = irq_data_get_irq_chip(data);
+
+ /*
+ * This will catch the redirect and VIRQ cases
+ * due to the dummy_irq_chip being inserted.
+ */
+ if (chip != &d->chip)
+ continue;
+ if (desc->status & IRQ_DISABLED)
+ chip->irq_disable(data);
+ else
+ chip->irq_enable(data);
+ }
+ break;
+ case PM_EVENT_FREEZE:
+ /* nothing has to be done */
+ break;
+ case PM_EVENT_SUSPEND:
+ /* enable wakeup irqs belonging to this intc controller */
+ for_each_active_irq(irq) {
+ desc = irq_to_desc(irq);
+ data = irq_get_irq_data(irq);
+ chip = irq_data_get_irq_chip(data);
+
+ if (chip != &d->chip)
+ continue;
+ if ((desc->status & IRQ_WAKEUP))
+ chip->irq_enable(data);
+ }
+ break;
+ }
+
+ d->state = state;
+
+ return 0;
+}
+
+static int intc_resume(struct sys_device *dev)
+{
+ return intc_suspend(dev, PMSG_ON);
+}
+
+struct sysdev_class intc_sysdev_class = {
+ .name = "intc",
+ .suspend = intc_suspend,
+ .resume = intc_resume,
+};
+
+/* register this intc as sysdev to allow suspend/resume */
+static int __init register_intc_sysdevs(void)
+{
+ struct intc_desc_int *d;
+ int error;
+
+ error = sysdev_class_register(&intc_sysdev_class);
+ if (!error) {
+ list_for_each_entry(d, &intc_list, list) {
+ d->sysdev.id = d->index;
+ d->sysdev.cls = &intc_sysdev_class;
+ error = sysdev_register(&d->sysdev);
+ if (error == 0)
+ error = sysdev_create_file(&d->sysdev,
+ &attr_name);
+ if (error)
+ break;
+ }
+ }
+
+ if (error)
+ pr_err("sysdev registration error\n");
+
+ return error;
+}
+device_initcall(register_intc_sysdevs);
diff --git a/drivers/sh/intc/dynamic.c b/drivers/sh/intc/dynamic.c
new file mode 100644
index 000000000000..a3677c9dfe36
--- /dev/null
+++ b/drivers/sh/intc/dynamic.c
@@ -0,0 +1,64 @@
+/*
+ * Dynamic IRQ management
+ *
+ * Copyright (C) 2010 Paul Mundt
+ *
+ * Modelled after arch/x86/kernel/apic/io_apic.c
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#define pr_fmt(fmt) "intc: " fmt
+
+#include <linux/irq.h>
+#include <linux/bitmap.h>
+#include <linux/spinlock.h>
+#include "internals.h" /* only for activate_irq() damage.. */
+
+/*
+ * The IRQ bitmap provides a global map of bound IRQ vectors for a
+ * given platform. Allocation of IRQs are either static through the CPU
+ * vector map, or dynamic in the case of board mux vectors or MSI.
+ *
+ * As this is a central point for all IRQ controllers on the system,
+ * each of the available sources are mapped out here. This combined with
+ * sparseirq makes it quite trivial to keep the vector map tightly packed
+ * when dynamically creating IRQs, as well as tying in to otherwise
+ * unused irq_desc positions in the sparse array.
+ */
+
+/*
+ * Dynamic IRQ allocation and deallocation
+ */
+unsigned int create_irq_nr(unsigned int irq_want, int node)
+{
+ int irq = irq_alloc_desc_at(irq_want, node);
+ if (irq < 0)
+ return 0;
+
+ activate_irq(irq);
+ return irq;
+}
+
+int create_irq(void)
+{
+ int irq = irq_alloc_desc(numa_node_id());
+ if (irq >= 0)
+ activate_irq(irq);
+
+ return irq;
+}
+
+void destroy_irq(unsigned int irq)
+{
+ irq_free_desc(irq);
+}
+
+void reserve_intc_vectors(struct intc_vect *vectors, unsigned int nr_vecs)
+{
+ int i;
+
+ for (i = 0; i < nr_vecs; i++)
+ irq_reserve_irq(evt2irq(vectors[i].vect));
+}
diff --git a/drivers/sh/intc/handle.c b/drivers/sh/intc/handle.c
new file mode 100644
index 000000000000..057ce56829bf
--- /dev/null
+++ b/drivers/sh/intc/handle.c
@@ -0,0 +1,307 @@
+/*
+ * Shared interrupt handling code for IPR and INTC2 types of IRQs.
+ *
+ * Copyright (C) 2007, 2008 Magnus Damm
+ * Copyright (C) 2009, 2010 Paul Mundt
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#include <linux/init.h>
+#include <linux/irq.h>
+#include <linux/spinlock.h>
+#include "internals.h"
+
+static unsigned long ack_handle[NR_IRQS];
+
+static intc_enum __init intc_grp_id(struct intc_desc *desc,
+ intc_enum enum_id)
+{
+ struct intc_group *g = desc->hw.groups;
+ unsigned int i, j;
+
+ for (i = 0; g && enum_id && i < desc->hw.nr_groups; i++) {
+ g = desc->hw.groups + i;
+
+ for (j = 0; g->enum_ids[j]; j++) {
+ if (g->enum_ids[j] != enum_id)
+ continue;
+
+ return g->enum_id;
+ }
+ }
+
+ return 0;
+}
+
+static unsigned int __init _intc_mask_data(struct intc_desc *desc,
+ struct intc_desc_int *d,
+ intc_enum enum_id,
+ unsigned int *reg_idx,
+ unsigned int *fld_idx)
+{
+ struct intc_mask_reg *mr = desc->hw.mask_regs;
+ unsigned int fn, mode;
+ unsigned long reg_e, reg_d;
+
+ while (mr && enum_id && *reg_idx < desc->hw.nr_mask_regs) {
+ mr = desc->hw.mask_regs + *reg_idx;
+
+ for (; *fld_idx < ARRAY_SIZE(mr->enum_ids); (*fld_idx)++) {
+ if (mr->enum_ids[*fld_idx] != enum_id)
+ continue;
+
+ if (mr->set_reg && mr->clr_reg) {
+ fn = REG_FN_WRITE_BASE;
+ mode = MODE_DUAL_REG;
+ reg_e = mr->clr_reg;
+ reg_d = mr->set_reg;
+ } else {
+ fn = REG_FN_MODIFY_BASE;
+ if (mr->set_reg) {
+ mode = MODE_ENABLE_REG;
+ reg_e = mr->set_reg;
+ reg_d = mr->set_reg;
+ } else {
+ mode = MODE_MASK_REG;
+ reg_e = mr->clr_reg;
+ reg_d = mr->clr_reg;
+ }
+ }
+
+ fn += (mr->reg_width >> 3) - 1;
+ return _INTC_MK(fn, mode,
+ intc_get_reg(d, reg_e),
+ intc_get_reg(d, reg_d),
+ 1,
+ (mr->reg_width - 1) - *fld_idx);
+ }
+
+ *fld_idx = 0;
+ (*reg_idx)++;
+ }
+
+ return 0;
+}
+
+unsigned int __init
+intc_get_mask_handle(struct intc_desc *desc, struct intc_desc_int *d,
+ intc_enum enum_id, int do_grps)
+{
+ unsigned int i = 0;
+ unsigned int j = 0;
+ unsigned int ret;
+
+ ret = _intc_mask_data(desc, d, enum_id, &i, &j);
+ if (ret)
+ return ret;
+
+ if (do_grps)
+ return intc_get_mask_handle(desc, d, intc_grp_id(desc, enum_id), 0);
+
+ return 0;
+}
+
+static unsigned int __init _intc_prio_data(struct intc_desc *desc,
+ struct intc_desc_int *d,
+ intc_enum enum_id,
+ unsigned int *reg_idx,
+ unsigned int *fld_idx)
+{
+ struct intc_prio_reg *pr = desc->hw.prio_regs;
+ unsigned int fn, n, mode, bit;
+ unsigned long reg_e, reg_d;
+
+ while (pr && enum_id && *reg_idx < desc->hw.nr_prio_regs) {
+ pr = desc->hw.prio_regs + *reg_idx;
+
+ for (; *fld_idx < ARRAY_SIZE(pr->enum_ids); (*fld_idx)++) {
+ if (pr->enum_ids[*fld_idx] != enum_id)
+ continue;
+
+ if (pr->set_reg && pr->clr_reg) {
+ fn = REG_FN_WRITE_BASE;
+ mode = MODE_PCLR_REG;
+ reg_e = pr->set_reg;
+ reg_d = pr->clr_reg;
+ } else {
+ fn = REG_FN_MODIFY_BASE;
+ mode = MODE_PRIO_REG;
+ if (!pr->set_reg)
+ BUG();
+ reg_e = pr->set_reg;
+ reg_d = pr->set_reg;
+ }
+
+ fn += (pr->reg_width >> 3) - 1;
+ n = *fld_idx + 1;
+
+ BUG_ON(n * pr->field_width > pr->reg_width);
+
+ bit = pr->reg_width - (n * pr->field_width);
+
+ return _INTC_MK(fn, mode,
+ intc_get_reg(d, reg_e),
+ intc_get_reg(d, reg_d),
+ pr->field_width, bit);
+ }
+
+ *fld_idx = 0;
+ (*reg_idx)++;
+ }
+
+ return 0;
+}
+
+unsigned int __init
+intc_get_prio_handle(struct intc_desc *desc, struct intc_desc_int *d,
+ intc_enum enum_id, int do_grps)
+{
+ unsigned int i = 0;
+ unsigned int j = 0;
+ unsigned int ret;
+
+ ret = _intc_prio_data(desc, d, enum_id, &i, &j);
+ if (ret)
+ return ret;
+
+ if (do_grps)
+ return intc_get_prio_handle(desc, d, intc_grp_id(desc, enum_id), 0);
+
+ return 0;
+}
+
+static unsigned int __init intc_ack_data(struct intc_desc *desc,
+ struct intc_desc_int *d,
+ intc_enum enum_id)
+{
+ struct intc_mask_reg *mr = desc->hw.ack_regs;
+ unsigned int i, j, fn, mode;
+ unsigned long reg_e, reg_d;
+
+ for (i = 0; mr && enum_id && i < desc->hw.nr_ack_regs; i++) {
+ mr = desc->hw.ack_regs + i;
+
+ for (j = 0; j < ARRAY_SIZE(mr->enum_ids); j++) {
+ if (mr->enum_ids[j] != enum_id)
+ continue;
+
+ fn = REG_FN_MODIFY_BASE;
+ mode = MODE_ENABLE_REG;
+ reg_e = mr->set_reg;
+ reg_d = mr->set_reg;
+
+ fn += (mr->reg_width >> 3) - 1;
+ return _INTC_MK(fn, mode,
+ intc_get_reg(d, reg_e),
+ intc_get_reg(d, reg_d),
+ 1,
+ (mr->reg_width - 1) - j);
+ }
+ }
+
+ return 0;
+}
+
+static void intc_enable_disable(struct intc_desc_int *d,
+ unsigned long handle, int do_enable)
+{
+ unsigned long addr;
+ unsigned int cpu;
+ unsigned long (*fn)(unsigned long, unsigned long,
+ unsigned long (*)(unsigned long, unsigned long,
+ unsigned long),
+ unsigned int);
+
+ if (do_enable) {
+ for (cpu = 0; cpu < SMP_NR(d, _INTC_ADDR_E(handle)); cpu++) {
+ addr = INTC_REG(d, _INTC_ADDR_E(handle), cpu);
+ fn = intc_enable_noprio_fns[_INTC_MODE(handle)];
+ fn(addr, handle, intc_reg_fns[_INTC_FN(handle)], 0);
+ }
+ } else {
+ for (cpu = 0; cpu < SMP_NR(d, _INTC_ADDR_D(handle)); cpu++) {
+ addr = INTC_REG(d, _INTC_ADDR_D(handle), cpu);
+ fn = intc_disable_fns[_INTC_MODE(handle)];
+ fn(addr, handle, intc_reg_fns[_INTC_FN(handle)], 0);
+ }
+ }
+}
+
+void __init intc_enable_disable_enum(struct intc_desc *desc,
+ struct intc_desc_int *d,
+ intc_enum enum_id, int enable)
+{
+ unsigned int i, j, data;
+
+ /* go through and enable/disable all mask bits */
+ i = j = 0;
+ do {
+ data = _intc_mask_data(desc, d, enum_id, &i, &j);
+ if (data)
+ intc_enable_disable(d, data, enable);
+ j++;
+ } while (data);
+
+ /* go through and enable/disable all priority fields */
+ i = j = 0;
+ do {
+ data = _intc_prio_data(desc, d, enum_id, &i, &j);
+ if (data)
+ intc_enable_disable(d, data, enable);
+
+ j++;
+ } while (data);
+}
+
+unsigned int __init
+intc_get_sense_handle(struct intc_desc *desc, struct intc_desc_int *d,
+ intc_enum enum_id)
+{
+ struct intc_sense_reg *sr = desc->hw.sense_regs;
+ unsigned int i, j, fn, bit;
+
+ for (i = 0; sr && enum_id && i < desc->hw.nr_sense_regs; i++) {
+ sr = desc->hw.sense_regs + i;
+
+ for (j = 0; j < ARRAY_SIZE(sr->enum_ids); j++) {
+ if (sr->enum_ids[j] != enum_id)
+ continue;
+
+ fn = REG_FN_MODIFY_BASE;
+ fn += (sr->reg_width >> 3) - 1;
+
+ BUG_ON((j + 1) * sr->field_width > sr->reg_width);
+
+ bit = sr->reg_width - ((j + 1) * sr->field_width);
+
+ return _INTC_MK(fn, 0, intc_get_reg(d, sr->reg),
+ 0, sr->field_width, bit);
+ }
+ }
+
+ return 0;
+}
+
+
+void intc_set_ack_handle(unsigned int irq, struct intc_desc *desc,
+ struct intc_desc_int *d, intc_enum id)
+{
+ unsigned long flags;
+
+ /*
+ * Nothing to do for this IRQ.
+ */
+ if (!desc->hw.ack_regs)
+ return;
+
+ raw_spin_lock_irqsave(&intc_big_lock, flags);
+ ack_handle[irq] = intc_ack_data(desc, d, id);
+ raw_spin_unlock_irqrestore(&intc_big_lock, flags);
+}
+
+unsigned long intc_get_ack_handle(unsigned int irq)
+{
+ return ack_handle[irq];
+}
diff --git a/drivers/sh/intc/internals.h b/drivers/sh/intc/internals.h
new file mode 100644
index 000000000000..0cf8260971d4
--- /dev/null
+++ b/drivers/sh/intc/internals.h
@@ -0,0 +1,186 @@
+#include <linux/sh_intc.h>
+#include <linux/irq.h>
+#include <linux/list.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/radix-tree.h>
+#include <linux/sysdev.h>
+
+#define _INTC_MK(fn, mode, addr_e, addr_d, width, shift) \
+ ((shift) | ((width) << 5) | ((fn) << 9) | ((mode) << 13) | \
+ ((addr_e) << 16) | ((addr_d << 24)))
+
+#define _INTC_SHIFT(h) (h & 0x1f)
+#define _INTC_WIDTH(h) ((h >> 5) & 0xf)
+#define _INTC_FN(h) ((h >> 9) & 0xf)
+#define _INTC_MODE(h) ((h >> 13) & 0x7)
+#define _INTC_ADDR_E(h) ((h >> 16) & 0xff)
+#define _INTC_ADDR_D(h) ((h >> 24) & 0xff)
+
+#ifdef CONFIG_SMP
+#define IS_SMP(x) (x.smp)
+#define INTC_REG(d, x, c) (d->reg[(x)] + ((d->smp[(x)] & 0xff) * c))
+#define SMP_NR(d, x) ((d->smp[(x)] >> 8) ? (d->smp[(x)] >> 8) : 1)
+#else
+#define IS_SMP(x) 0
+#define INTC_REG(d, x, c) (d->reg[(x)])
+#define SMP_NR(d, x) 1
+#endif
+
+struct intc_handle_int {
+ unsigned int irq;
+ unsigned long handle;
+};
+
+struct intc_window {
+ phys_addr_t phys;
+ void __iomem *virt;
+ unsigned long size;
+};
+
+struct intc_map_entry {
+ intc_enum enum_id;
+ struct intc_desc_int *desc;
+};
+
+struct intc_subgroup_entry {
+ unsigned int pirq;
+ intc_enum enum_id;
+ unsigned long handle;
+};
+
+struct intc_desc_int {
+ struct list_head list;
+ struct sys_device sysdev;
+ struct radix_tree_root tree;
+ pm_message_t state;
+ raw_spinlock_t lock;
+ unsigned int index;
+ unsigned long *reg;
+#ifdef CONFIG_SMP
+ unsigned long *smp;
+#endif
+ unsigned int nr_reg;
+ struct intc_handle_int *prio;
+ unsigned int nr_prio;
+ struct intc_handle_int *sense;
+ unsigned int nr_sense;
+ struct intc_window *window;
+ unsigned int nr_windows;
+ struct irq_chip chip;
+};
+
+
+enum {
+ REG_FN_ERR = 0,
+ REG_FN_TEST_BASE = 1,
+ REG_FN_WRITE_BASE = 5,
+ REG_FN_MODIFY_BASE = 9
+};
+
+enum { MODE_ENABLE_REG = 0, /* Bit(s) set -> interrupt enabled */
+ MODE_MASK_REG, /* Bit(s) set -> interrupt disabled */
+ MODE_DUAL_REG, /* Two registers, set bit to enable / disable */
+ MODE_PRIO_REG, /* Priority value written to enable interrupt */
+ MODE_PCLR_REG, /* Above plus all bits set to disable interrupt */
+};
+
+static inline struct intc_desc_int *get_intc_desc(unsigned int irq)
+{
+ struct irq_chip *chip = get_irq_chip(irq);
+
+ return container_of(chip, struct intc_desc_int, chip);
+}
+
+/*
+ * Grumble.
+ */
+static inline void activate_irq(int irq)
+{
+#ifdef CONFIG_ARM
+ /* ARM requires an extra step to clear IRQ_NOREQUEST, which it
+ * sets on behalf of every irq_chip. Also sets IRQ_NOPROBE.
+ */
+ set_irq_flags(irq, IRQF_VALID);
+#else
+ /* same effect on other architectures */
+ set_irq_noprobe(irq);
+#endif
+}
+
+/* access.c */
+extern unsigned long
+(*intc_reg_fns[])(unsigned long addr, unsigned long h, unsigned long data);
+
+extern unsigned long
+(*intc_enable_fns[])(unsigned long addr, unsigned long handle,
+ unsigned long (*fn)(unsigned long,
+ unsigned long, unsigned long),
+ unsigned int irq);
+extern unsigned long
+(*intc_disable_fns[])(unsigned long addr, unsigned long handle,
+ unsigned long (*fn)(unsigned long,
+ unsigned long, unsigned long),
+ unsigned int irq);
+extern unsigned long
+(*intc_enable_noprio_fns[])(unsigned long addr, unsigned long handle,
+ unsigned long (*fn)(unsigned long,
+ unsigned long, unsigned long),
+ unsigned int irq);
+
+unsigned long intc_phys_to_virt(struct intc_desc_int *d, unsigned long address);
+unsigned int intc_get_reg(struct intc_desc_int *d, unsigned long address);
+unsigned int intc_set_field_from_handle(unsigned int value,
+ unsigned int field_value,
+ unsigned int handle);
+unsigned long intc_get_field_from_handle(unsigned int value,
+ unsigned int handle);
+
+/* balancing.c */
+#ifdef CONFIG_INTC_BALANCING
+void intc_balancing_enable(unsigned int irq);
+void intc_balancing_disable(unsigned int irq);
+void intc_set_dist_handle(unsigned int irq, struct intc_desc *desc,
+ struct intc_desc_int *d, intc_enum id);
+#else
+static inline void intc_balancing_enable(unsigned int irq) { }
+static inline void intc_balancing_disable(unsigned int irq) { }
+static inline void
+intc_set_dist_handle(unsigned int irq, struct intc_desc *desc,
+ struct intc_desc_int *d, intc_enum id) { }
+#endif
+
+/* chip.c */
+extern struct irq_chip intc_irq_chip;
+void _intc_enable(struct irq_data *data, unsigned long handle);
+
+/* core.c */
+extern struct list_head intc_list;
+extern raw_spinlock_t intc_big_lock;
+extern unsigned int nr_intc_controllers;
+extern struct sysdev_class intc_sysdev_class;
+
+unsigned int intc_get_dfl_prio_level(void);
+unsigned int intc_get_prio_level(unsigned int irq);
+void intc_set_prio_level(unsigned int irq, unsigned int level);
+
+/* handle.c */
+unsigned int intc_get_mask_handle(struct intc_desc *desc,
+ struct intc_desc_int *d,
+ intc_enum enum_id, int do_grps);
+unsigned int intc_get_prio_handle(struct intc_desc *desc,
+ struct intc_desc_int *d,
+ intc_enum enum_id, int do_grps);
+unsigned int intc_get_sense_handle(struct intc_desc *desc,
+ struct intc_desc_int *d,
+ intc_enum enum_id);
+void intc_set_ack_handle(unsigned int irq, struct intc_desc *desc,
+ struct intc_desc_int *d, intc_enum id);
+unsigned long intc_get_ack_handle(unsigned int irq);
+void intc_enable_disable_enum(struct intc_desc *desc, struct intc_desc_int *d,
+ intc_enum enum_id, int enable);
+
+/* virq.c */
+void intc_subgroup_init(struct intc_desc *desc, struct intc_desc_int *d);
+void intc_irq_xlate_set(unsigned int irq, intc_enum id, struct intc_desc_int *d);
+struct intc_map_entry *intc_irq_xlate_get(unsigned int irq);
diff --git a/drivers/sh/intc/userimask.c b/drivers/sh/intc/userimask.c
new file mode 100644
index 000000000000..e32304b66cf1
--- /dev/null
+++ b/drivers/sh/intc/userimask.c
@@ -0,0 +1,83 @@
+/*
+ * Support for hardware-assisted userspace interrupt masking.
+ *
+ * Copyright (C) 2010 Paul Mundt
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#define pr_fmt(fmt) "intc: " fmt
+
+#include <linux/errno.h>
+#include <linux/sysdev.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <asm/sizes.h>
+#include "internals.h"
+
+static void __iomem *uimask;
+
+static ssize_t
+show_intc_userimask(struct sysdev_class *cls,
+ struct sysdev_class_attribute *attr, char *buf)
+{
+ return sprintf(buf, "%d\n", (__raw_readl(uimask) >> 4) & 0xf);
+}
+
+static ssize_t
+store_intc_userimask(struct sysdev_class *cls,
+ struct sysdev_class_attribute *attr,
+ const char *buf, size_t count)
+{
+ unsigned long level;
+
+ level = simple_strtoul(buf, NULL, 10);
+
+ /*
+ * Minimal acceptable IRQ levels are in the 2 - 16 range, but
+ * these are chomped so as to not interfere with normal IRQs.
+ *
+ * Level 1 is a special case on some CPUs in that it's not
+ * directly settable, but given that USERIMASK cuts off below a
+ * certain level, we don't care about this limitation here.
+ * Level 0 on the other hand equates to user masking disabled.
+ *
+ * We use the default priority level as a cut off so that only
+ * special case opt-in IRQs can be mangled.
+ */
+ if (level >= intc_get_dfl_prio_level())
+ return -EINVAL;
+
+ __raw_writel(0xa5 << 24 | level << 4, uimask);
+
+ return count;
+}
+
+static SYSDEV_CLASS_ATTR(userimask, S_IRUSR | S_IWUSR,
+ show_intc_userimask, store_intc_userimask);
+
+
+static int __init userimask_sysdev_init(void)
+{
+ if (unlikely(!uimask))
+ return -ENXIO;
+
+ return sysdev_class_create_file(&intc_sysdev_class, &attr_userimask);
+}
+late_initcall(userimask_sysdev_init);
+
+int register_intc_userimask(unsigned long addr)
+{
+ if (unlikely(uimask))
+ return -EBUSY;
+
+ uimask = ioremap_nocache(addr, SZ_4K);
+ if (unlikely(!uimask))
+ return -ENOMEM;
+
+ pr_info("userimask support registered for levels 0 -> %d\n",
+ intc_get_dfl_prio_level() - 1);
+
+ return 0;
+}
diff --git a/drivers/sh/intc/virq-debugfs.c b/drivers/sh/intc/virq-debugfs.c
new file mode 100644
index 000000000000..9e62ba9311f0
--- /dev/null
+++ b/drivers/sh/intc/virq-debugfs.c
@@ -0,0 +1,64 @@
+/*
+ * Support for virtual IRQ subgroups debugfs mapping.
+ *
+ * Copyright (C) 2010 Paul Mundt
+ *
+ * Modelled after arch/powerpc/kernel/irq.c.
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#include <linux/seq_file.h>
+#include <linux/fs.h>
+#include <linux/init.h>
+#include <linux/irq.h>
+#include <linux/debugfs.h>
+#include "internals.h"
+
+static int intc_irq_xlate_debug(struct seq_file *m, void *priv)
+{
+ int i;
+
+ seq_printf(m, "%-5s %-7s %-15s\n", "irq", "enum", "chip name");
+
+ for (i = 1; i < nr_irqs; i++) {
+ struct intc_map_entry *entry = intc_irq_xlate_get(i);
+ struct intc_desc_int *desc = entry->desc;
+
+ if (!desc)
+ continue;
+
+ seq_printf(m, "%5d ", i);
+ seq_printf(m, "0x%05x ", entry->enum_id);
+ seq_printf(m, "%-15s\n", desc->chip.name);
+ }
+
+ return 0;
+}
+
+static int intc_irq_xlate_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, intc_irq_xlate_debug, inode->i_private);
+}
+
+static const struct file_operations intc_irq_xlate_fops = {
+ .open = intc_irq_xlate_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static int __init intc_irq_xlate_init(void)
+{
+ /*
+ * XXX.. use arch_debugfs_dir here when all of the intc users are
+ * converted.
+ */
+ if (debugfs_create_file("intc_irq_xlate", S_IRUGO, NULL, NULL,
+ &intc_irq_xlate_fops) == NULL)
+ return -ENOMEM;
+
+ return 0;
+}
+fs_initcall(intc_irq_xlate_init);
diff --git a/drivers/sh/intc/virq.c b/drivers/sh/intc/virq.c
new file mode 100644
index 000000000000..4e0ff7181164
--- /dev/null
+++ b/drivers/sh/intc/virq.c
@@ -0,0 +1,257 @@
+/*
+ * Support for virtual IRQ subgroups.
+ *
+ * Copyright (C) 2010 Paul Mundt
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#define pr_fmt(fmt) "intc: " fmt
+
+#include <linux/slab.h>
+#include <linux/irq.h>
+#include <linux/list.h>
+#include <linux/radix-tree.h>
+#include <linux/spinlock.h>
+#include "internals.h"
+
+static struct intc_map_entry intc_irq_xlate[NR_IRQS];
+
+struct intc_virq_list {
+ unsigned int irq;
+ struct intc_virq_list *next;
+};
+
+#define for_each_virq(entry, head) \
+ for (entry = head; entry; entry = entry->next)
+
+/*
+ * Tags for the radix tree
+ */
+#define INTC_TAG_VIRQ_NEEDS_ALLOC 0
+
+void intc_irq_xlate_set(unsigned int irq, intc_enum id, struct intc_desc_int *d)
+{
+ unsigned long flags;
+
+ raw_spin_lock_irqsave(&intc_big_lock, flags);
+ intc_irq_xlate[irq].enum_id = id;
+ intc_irq_xlate[irq].desc = d;
+ raw_spin_unlock_irqrestore(&intc_big_lock, flags);
+}
+
+struct intc_map_entry *intc_irq_xlate_get(unsigned int irq)
+{
+ return intc_irq_xlate + irq;
+}
+
+int intc_irq_lookup(const char *chipname, intc_enum enum_id)
+{
+ struct intc_map_entry *ptr;
+ struct intc_desc_int *d;
+ int irq = -1;
+
+ list_for_each_entry(d, &intc_list, list) {
+ int tagged;
+
+ if (strcmp(d->chip.name, chipname) != 0)
+ continue;
+
+ /*
+ * Catch early lookups for subgroup VIRQs that have not
+ * yet been allocated an IRQ. This already includes a
+ * fast-path out if the tree is untagged, so there is no
+ * need to explicitly test the root tree.
+ */
+ tagged = radix_tree_tag_get(&d->tree, enum_id,
+ INTC_TAG_VIRQ_NEEDS_ALLOC);
+ if (unlikely(tagged))
+ break;
+
+ ptr = radix_tree_lookup(&d->tree, enum_id);
+ if (ptr) {
+ irq = ptr - intc_irq_xlate;
+ break;
+ }
+ }
+
+ return irq;
+}
+EXPORT_SYMBOL_GPL(intc_irq_lookup);
+
+static int add_virq_to_pirq(unsigned int irq, unsigned int virq)
+{
+ struct intc_virq_list **last, *entry;
+ struct irq_data *data = irq_get_irq_data(irq);
+
+ /* scan for duplicates */
+ last = (struct intc_virq_list **)&data->handler_data;
+ for_each_virq(entry, data->handler_data) {
+ if (entry->irq == virq)
+ return 0;
+ last = &entry->next;
+ }
+
+ entry = kzalloc(sizeof(struct intc_virq_list), GFP_ATOMIC);
+ if (!entry) {
+ pr_err("can't allocate VIRQ mapping for %d\n", virq);
+ return -ENOMEM;
+ }
+
+ entry->irq = virq;
+
+ *last = entry;
+
+ return 0;
+}
+
+static void intc_virq_handler(unsigned int irq, struct irq_desc *desc)
+{
+ struct irq_data *data = irq_get_irq_data(irq);
+ struct irq_chip *chip = irq_data_get_irq_chip(data);
+ struct intc_virq_list *entry, *vlist = irq_data_get_irq_data(data);
+ struct intc_desc_int *d = get_intc_desc(irq);
+
+ chip->irq_mask_ack(data);
+
+ for_each_virq(entry, vlist) {
+ unsigned long addr, handle;
+
+ handle = (unsigned long)get_irq_data(entry->irq);
+ addr = INTC_REG(d, _INTC_ADDR_E(handle), 0);
+
+ if (intc_reg_fns[_INTC_FN(handle)](addr, handle, 0))
+ generic_handle_irq(entry->irq);
+ }
+
+ chip->irq_unmask(data);
+}
+
+static unsigned long __init intc_subgroup_data(struct intc_subgroup *subgroup,
+ struct intc_desc_int *d,
+ unsigned int index)
+{
+ unsigned int fn = REG_FN_TEST_BASE + (subgroup->reg_width >> 3) - 1;
+
+ return _INTC_MK(fn, MODE_ENABLE_REG, intc_get_reg(d, subgroup->reg),
+ 0, 1, (subgroup->reg_width - 1) - index);
+}
+
+static void __init intc_subgroup_init_one(struct intc_desc *desc,
+ struct intc_desc_int *d,
+ struct intc_subgroup *subgroup)
+{
+ struct intc_map_entry *mapped;
+ unsigned int pirq;
+ unsigned long flags;
+ int i;
+
+ mapped = radix_tree_lookup(&d->tree, subgroup->parent_id);
+ if (!mapped) {
+ WARN_ON(1);
+ return;
+ }
+
+ pirq = mapped - intc_irq_xlate;
+
+ raw_spin_lock_irqsave(&d->lock, flags);
+
+ for (i = 0; i < ARRAY_SIZE(subgroup->enum_ids); i++) {
+ struct intc_subgroup_entry *entry;
+ int err;
+
+ if (!subgroup->enum_ids[i])
+ continue;
+
+ entry = kmalloc(sizeof(*entry), GFP_NOWAIT);
+ if (!entry)
+ break;
+
+ entry->pirq = pirq;
+ entry->enum_id = subgroup->enum_ids[i];
+ entry->handle = intc_subgroup_data(subgroup, d, i);
+
+ err = radix_tree_insert(&d->tree, entry->enum_id, entry);
+ if (unlikely(err < 0))
+ break;
+
+ radix_tree_tag_set(&d->tree, entry->enum_id,
+ INTC_TAG_VIRQ_NEEDS_ALLOC);
+ }
+
+ raw_spin_unlock_irqrestore(&d->lock, flags);
+}
+
+void __init intc_subgroup_init(struct intc_desc *desc, struct intc_desc_int *d)
+{
+ int i;
+
+ if (!desc->hw.subgroups)
+ return;
+
+ for (i = 0; i < desc->hw.nr_subgroups; i++)
+ intc_subgroup_init_one(desc, d, desc->hw.subgroups + i);
+}
+
+static void __init intc_subgroup_map(struct intc_desc_int *d)
+{
+ struct intc_subgroup_entry *entries[32];
+ unsigned long flags;
+ unsigned int nr_found;
+ int i;
+
+ raw_spin_lock_irqsave(&d->lock, flags);
+
+restart:
+ nr_found = radix_tree_gang_lookup_tag_slot(&d->tree,
+ (void ***)entries, 0, ARRAY_SIZE(entries),
+ INTC_TAG_VIRQ_NEEDS_ALLOC);
+
+ for (i = 0; i < nr_found; i++) {
+ struct intc_subgroup_entry *entry;
+ int irq;
+
+ entry = radix_tree_deref_slot((void **)entries[i]);
+ if (unlikely(!entry))
+ continue;
+ if (radix_tree_deref_retry(entry))
+ goto restart;
+
+ irq = create_irq();
+ if (unlikely(irq < 0)) {
+ pr_err("no more free IRQs, bailing..\n");
+ break;
+ }
+
+ pr_info("Setting up a chained VIRQ from %d -> %d\n",
+ irq, entry->pirq);
+
+ intc_irq_xlate_set(irq, entry->enum_id, d);
+
+ set_irq_chip_and_handler_name(irq, get_irq_chip(entry->pirq),
+ handle_simple_irq, "virq");
+ set_irq_chip_data(irq, get_irq_chip_data(entry->pirq));
+
+ set_irq_data(irq, (void *)entry->handle);
+
+ set_irq_chained_handler(entry->pirq, intc_virq_handler);
+ add_virq_to_pirq(entry->pirq, irq);
+
+ radix_tree_tag_clear(&d->tree, entry->enum_id,
+ INTC_TAG_VIRQ_NEEDS_ALLOC);
+ radix_tree_replace_slot((void **)entries[i],
+ &intc_irq_xlate[irq]);
+ }
+
+ raw_spin_unlock_irqrestore(&d->lock, flags);
+}
+
+void __init intc_finalize(void)
+{
+ struct intc_desc_int *d;
+
+ list_for_each_entry(d, &intc_list, list)
+ if (radix_tree_tagged(&d->tree, INTC_TAG_VIRQ_NEEDS_ALLOC))
+ intc_subgroup_map(d);
+}
diff --git a/drivers/sh/maple/maple.c b/drivers/sh/maple/maple.c
index 4e8f57d4131f..1e20604257af 100644
--- a/drivers/sh/maple/maple.c
+++ b/drivers/sh/maple/maple.c
@@ -94,9 +94,9 @@ EXPORT_SYMBOL_GPL(maple_driver_unregister);
/* set hardware registers to enable next round of dma */
static void maple_dma_reset(void)
{
- ctrl_outl(MAPLE_MAGIC, MAPLE_RESET);
+ __raw_writel(MAPLE_MAGIC, MAPLE_RESET);
/* set trig type to 0 for software trigger, 1 for hardware (VBLANK) */
- ctrl_outl(1, MAPLE_TRIGTYPE);
+ __raw_writel(1, MAPLE_TRIGTYPE);
/*
* Maple system register
* bits 31 - 16 timeout in units of 20nsec
@@ -105,9 +105,9 @@ static void maple_dma_reset(void)
* bits 3 - 0 delay (in 1.3ms) between VBLANK and start of DMA
* max delay is 11
*/
- ctrl_outl(MAPLE_2MBPS | MAPLE_TIMEOUT(0xFFFF), MAPLE_SPEED);
- ctrl_outl(virt_to_phys(maple_sendbuf), MAPLE_DMAADDR);
- ctrl_outl(1, MAPLE_ENABLE);
+ __raw_writel(MAPLE_2MBPS | MAPLE_TIMEOUT(0xFFFF), MAPLE_SPEED);
+ __raw_writel(virt_to_phys(maple_sendbuf), MAPLE_DMAADDR);
+ __raw_writel(1, MAPLE_ENABLE);
}
/**
@@ -130,7 +130,7 @@ EXPORT_SYMBOL_GPL(maple_getcond_callback);
static int maple_dma_done(void)
{
- return (ctrl_inl(MAPLE_STATE) & 1) == 0;
+ return (__raw_readl(MAPLE_STATE) & 1) == 0;
}
static void maple_release_device(struct device *dev)
@@ -275,7 +275,7 @@ static void maple_send(void)
return;
/* disable DMA */
- ctrl_outl(0, MAPLE_ENABLE);
+ __raw_writel(0, MAPLE_ENABLE);
if (!list_empty(&maple_sentq))
goto finish;
@@ -450,7 +450,7 @@ static void maple_vblank_handler(struct work_struct *work)
if (!maple_dma_done())
return;
- ctrl_outl(0, MAPLE_ENABLE);
+ __raw_writel(0, MAPLE_ENABLE);
if (!list_empty(&maple_sentq))
goto finish;
@@ -636,7 +636,7 @@ static void maple_dma_handler(struct work_struct *work)
if (!maple_dma_done())
return;
- ctrl_outl(0, MAPLE_ENABLE);
+ __raw_writel(0, MAPLE_ENABLE);
if (!list_empty(&maple_sentq)) {
list_for_each_entry_safe(mq, nmq, &maple_sentq, list) {
mdev = mq->dev;
@@ -796,7 +796,7 @@ static int __init maple_bus_init(void)
int retval, i;
struct maple_device *mdev[MAPLE_PORTS];
- ctrl_outl(0, MAPLE_ENABLE);
+ __raw_writel(0, MAPLE_ENABLE);
retval = device_register(&maple_bus);
if (retval)
diff --git a/drivers/sh/pfc.c b/drivers/sh/pfc.c
index cf0303acab8e..75934e3ea34e 100644
--- a/drivers/sh/pfc.c
+++ b/drivers/sh/pfc.c
@@ -7,6 +7,8 @@
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/errno.h>
#include <linux/kernel.h>
#include <linux/list.h>
@@ -559,10 +561,8 @@ static int sh_gpio_get_value(struct pinmux_info *gpioc, unsigned gpio)
struct pinmux_data_reg *dr = NULL;
int bit = 0;
- if (!gpioc || get_data_reg(gpioc, gpio, &dr, &bit) != 0) {
- BUG();
- return 0;
- }
+ if (!gpioc || get_data_reg(gpioc, gpio, &dr, &bit) != 0)
+ return -EINVAL;
return gpio_read_reg(dr->reg, dr->reg_width, 1, bit);
}
@@ -581,7 +581,7 @@ int register_pinmux(struct pinmux_info *pip)
{
struct gpio_chip *chip = &pip->chip;
- pr_info("sh pinmux: %s handling gpio %d -> %d\n",
+ pr_info("%s handling gpio %d -> %d\n",
pip->name, pip->first_gpio, pip->last_gpio);
setup_data_regs(pip);
@@ -602,3 +602,10 @@ int register_pinmux(struct pinmux_info *pip)
return gpiochip_add(chip);
}
+
+int unregister_pinmux(struct pinmux_info *pip)
+{
+ pr_info("%s deregistering\n", pip->name);
+
+ return gpiochip_remove(&pip->chip);
+}
diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig
index 4b9eec68fad6..78f9fd02c1b2 100644
--- a/drivers/spi/Kconfig
+++ b/drivers/spi/Kconfig
@@ -329,6 +329,13 @@ config SPI_STMP3XXX
help
SPI driver for Freescale STMP37xx/378x SoC SSP interface
+config SPI_TEGRA
+ tristate "Nvidia Tegra SPI controller"
+ depends on ARCH_TEGRA
+ select TEGRA_SYSTEM_DMA
+ help
+ SPI driver for NVidia Tegra SoCs
+
config SPI_TOPCLIFF_PCH
tristate "Topcliff PCH SPI Controller"
depends on PCI
diff --git a/drivers/spi/Makefile b/drivers/spi/Makefile
index 557aaadf56b2..8bc1a5abac1f 100644
--- a/drivers/spi/Makefile
+++ b/drivers/spi/Makefile
@@ -39,6 +39,7 @@ obj-$(CONFIG_SPI_PPC4xx) += spi_ppc4xx.o
obj-$(CONFIG_SPI_S3C24XX_GPIO) += spi_s3c24xx_gpio.o
obj-$(CONFIG_SPI_S3C24XX) += spi_s3c24xx_hw.o
obj-$(CONFIG_SPI_S3C64XX) += spi_s3c64xx.o
+obj-$(CONFIG_SPI_TEGRA) += spi_tegra.o
obj-$(CONFIG_SPI_TOPCLIFF_PCH) += spi_topcliff_pch.o
obj-$(CONFIG_SPI_TXX9) += spi_txx9.o
obj-$(CONFIG_SPI_XILINX) += xilinx_spi.o
diff --git a/drivers/spi/atmel_spi.c b/drivers/spi/atmel_spi.c
index 154529aacc03..a067046c9da2 100644
--- a/drivers/spi/atmel_spi.c
+++ b/drivers/spi/atmel_spi.c
@@ -352,8 +352,12 @@ atmel_spi_dma_map_xfer(struct atmel_spi *as, struct spi_transfer *xfer)
xfer->tx_dma = xfer->rx_dma = INVALID_DMA_ADDRESS;
if (xfer->tx_buf) {
+ /* tx_buf is a const void* where we need a void * for the dma
+ * mapping */
+ void *nonconst_tx = (void *)xfer->tx_buf;
+
xfer->tx_dma = dma_map_single(dev,
- (void *) xfer->tx_buf, xfer->len,
+ nonconst_tx, xfer->len,
DMA_TO_DEVICE);
if (dma_mapping_error(dev, xfer->tx_dma))
return -ENOMEM;
diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
index b5a78a1f4421..709c836607de 100644
--- a/drivers/spi/spi.c
+++ b/drivers/spi/spi.c
@@ -29,11 +29,6 @@
#include <linux/spi/spi.h>
#include <linux/of_spi.h>
-
-/* SPI bustype and spi_master class are registered after board init code
- * provides the SPI device tables, ensuring that both are present by the
- * time controller driver registration causes spi_devices to "enumerate".
- */
static void spidev_release(struct device *dev)
{
struct spi_device *spi = to_spi_device(dev);
@@ -202,11 +197,16 @@ EXPORT_SYMBOL_GPL(spi_register_driver);
struct boardinfo {
struct list_head list;
- unsigned n_board_info;
- struct spi_board_info board_info[0];
+ struct spi_board_info board_info;
};
static LIST_HEAD(board_list);
+static LIST_HEAD(spi_master_list);
+
+/*
+ * Used to protect add/del opertion for board_info list and
+ * spi_master list, and their matching process
+ */
static DEFINE_MUTEX(board_lock);
/**
@@ -300,16 +300,16 @@ int spi_add_device(struct spi_device *spi)
*/
status = spi_setup(spi);
if (status < 0) {
- dev_err(dev, "can't %s %s, status %d\n",
- "setup", dev_name(&spi->dev), status);
+ dev_err(dev, "can't setup %s, status %d\n",
+ dev_name(&spi->dev), status);
goto done;
}
/* Device may be bound to an active driver when this returns */
status = device_add(&spi->dev);
if (status < 0)
- dev_err(dev, "can't %s %s, status %d\n",
- "add", dev_name(&spi->dev), status);
+ dev_err(dev, "can't add %s, status %d\n",
+ dev_name(&spi->dev), status);
else
dev_dbg(dev, "registered child %s\n", dev_name(&spi->dev));
@@ -371,6 +371,20 @@ struct spi_device *spi_new_device(struct spi_master *master,
}
EXPORT_SYMBOL_GPL(spi_new_device);
+static void spi_match_master_to_boardinfo(struct spi_master *master,
+ struct spi_board_info *bi)
+{
+ struct spi_device *dev;
+
+ if (master->bus_num != bi->bus_num)
+ return;
+
+ dev = spi_new_device(master, bi);
+ if (!dev)
+ dev_err(master->dev.parent, "can't create new device for %s\n",
+ bi->modalias);
+}
+
/**
* spi_register_board_info - register SPI devices for a given board
* @info: array of chip descriptors
@@ -393,43 +407,25 @@ EXPORT_SYMBOL_GPL(spi_new_device);
int __init
spi_register_board_info(struct spi_board_info const *info, unsigned n)
{
- struct boardinfo *bi;
+ struct boardinfo *bi;
+ int i;
- bi = kmalloc(sizeof(*bi) + n * sizeof *info, GFP_KERNEL);
+ bi = kzalloc(n * sizeof(*bi), GFP_KERNEL);
if (!bi)
return -ENOMEM;
- bi->n_board_info = n;
- memcpy(bi->board_info, info, n * sizeof *info);
- mutex_lock(&board_lock);
- list_add_tail(&bi->list, &board_list);
- mutex_unlock(&board_lock);
- return 0;
-}
+ for (i = 0; i < n; i++, bi++, info++) {
+ struct spi_master *master;
-/* FIXME someone should add support for a __setup("spi", ...) that
- * creates board info from kernel command lines
- */
-
-static void scan_boardinfo(struct spi_master *master)
-{
- struct boardinfo *bi;
-
- mutex_lock(&board_lock);
- list_for_each_entry(bi, &board_list, list) {
- struct spi_board_info *chip = bi->board_info;
- unsigned n;
-
- for (n = bi->n_board_info; n > 0; n--, chip++) {
- if (chip->bus_num != master->bus_num)
- continue;
- /* NOTE: this relies on spi_new_device to
- * issue diagnostics when given bogus inputs
- */
- (void) spi_new_device(master, chip);
- }
+ memcpy(&bi->board_info, info, sizeof(*info));
+ mutex_lock(&board_lock);
+ list_add_tail(&bi->list, &board_list);
+ list_for_each_entry(master, &spi_master_list, list)
+ spi_match_master_to_boardinfo(master, &bi->board_info);
+ mutex_unlock(&board_lock);
}
- mutex_unlock(&board_lock);
+
+ return 0;
}
/*-------------------------------------------------------------------------*/
@@ -512,6 +508,7 @@ int spi_register_master(struct spi_master *master)
{
static atomic_t dyn_bus_id = ATOMIC_INIT((1<<15) - 1);
struct device *dev = master->dev.parent;
+ struct boardinfo *bi;
int status = -ENODEV;
int dynamic = 0;
@@ -547,8 +544,12 @@ int spi_register_master(struct spi_master *master)
dev_dbg(dev, "registered master %s%s\n", dev_name(&master->dev),
dynamic ? " (dynamic)" : "");
- /* populate children from any spi device tables */
- scan_boardinfo(master);
+ mutex_lock(&board_lock);
+ list_add_tail(&master->list, &spi_master_list);
+ list_for_each_entry(bi, &board_list, list)
+ spi_match_master_to_boardinfo(master, &bi->board_info);
+ mutex_unlock(&board_lock);
+
status = 0;
/* Register devices from the device tree */
@@ -579,7 +580,12 @@ void spi_unregister_master(struct spi_master *master)
{
int dummy;
- dummy = device_for_each_child(&master->dev, NULL, __unregister);
+ mutex_lock(&board_lock);
+ list_del(&master->list);
+ mutex_unlock(&board_lock);
+
+ dummy = device_for_each_child(master->dev.parent, &master->dev,
+ __unregister);
device_unregister(&master->dev);
}
EXPORT_SYMBOL_GPL(spi_unregister_master);
@@ -652,7 +658,7 @@ int spi_setup(struct spi_device *spi)
*/
bad_bits = spi->mode & ~spi->master->mode_bits;
if (bad_bits) {
- dev_dbg(&spi->dev, "setup: unsupported mode bits %x\n",
+ dev_err(&spi->dev, "setup: unsupported mode bits %x\n",
bad_bits);
return -EINVAL;
}
diff --git a/drivers/spi/spi_bfin5xx.c b/drivers/spi/spi_bfin5xx.c
index ab483a0ec6d0..3f223511127b 100644
--- a/drivers/spi/spi_bfin5xx.c
+++ b/drivers/spi/spi_bfin5xx.c
@@ -504,6 +504,15 @@ static irqreturn_t bfin_spi_dma_irq_handler(int irq, void *dev_id)
"in dma_irq_handler dmastat:0x%x spistat:0x%x\n",
dmastat, spistat);
+ if (drv_data->rx != NULL) {
+ u16 cr = read_CTRL(drv_data);
+ /* discard old RX data and clear RXS */
+ bfin_spi_dummy_read(drv_data);
+ write_CTRL(drv_data, cr & ~BIT_CTL_ENABLE); /* Disable SPI */
+ write_CTRL(drv_data, cr & ~BIT_CTL_TIMOD); /* Restore State */
+ write_STAT(drv_data, BIT_STAT_CLR); /* Clear Status */
+ }
+
clear_dma_irqstat(drv_data->dma_channel);
/*
@@ -1099,12 +1108,15 @@ static int bfin_spi_setup(struct spi_device *spi)
}
if (chip->chip_select_num >= MAX_CTRL_CS) {
- ret = gpio_request(chip->cs_gpio, spi->modalias);
- if (ret) {
- dev_err(&spi->dev, "gpio_request() error\n");
- goto pin_error;
+ /* Only request on first setup */
+ if (spi_get_ctldata(spi) == NULL) {
+ ret = gpio_request(chip->cs_gpio, spi->modalias);
+ if (ret) {
+ dev_err(&spi->dev, "gpio_request() error\n");
+ goto pin_error;
+ }
+ gpio_direction_output(chip->cs_gpio, 1);
}
- gpio_direction_output(chip->cs_gpio, 1);
}
dev_dbg(&spi->dev, "setup spi chip %s, width is %d, dma is %d\n",
diff --git a/drivers/spi/spi_tegra.c b/drivers/spi/spi_tegra.c
new file mode 100644
index 000000000000..bb7df02a5472
--- /dev/null
+++ b/drivers/spi/spi_tegra.c
@@ -0,0 +1,618 @@
+/*
+ * Driver for Nvidia TEGRA spi controller.
+ *
+ * Copyright (C) 2010 Google, Inc.
+ *
+ * Author:
+ * Erik Gilling <konkers@android.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/err.h>
+#include <linux/platform_device.h>
+#include <linux/io.h>
+#include <linux/dma-mapping.h>
+#include <linux/dmapool.h>
+#include <linux/clk.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+
+#include <linux/spi/spi.h>
+
+#include <mach/dma.h>
+
+#define SLINK_COMMAND 0x000
+#define SLINK_BIT_LENGTH(x) (((x) & 0x1f) << 0)
+#define SLINK_WORD_SIZE(x) (((x) & 0x1f) << 5)
+#define SLINK_BOTH_EN (1 << 10)
+#define SLINK_CS_SW (1 << 11)
+#define SLINK_CS_VALUE (1 << 12)
+#define SLINK_CS_POLARITY (1 << 13)
+#define SLINK_IDLE_SDA_DRIVE_LOW (0 << 16)
+#define SLINK_IDLE_SDA_DRIVE_HIGH (1 << 16)
+#define SLINK_IDLE_SDA_PULL_LOW (2 << 16)
+#define SLINK_IDLE_SDA_PULL_HIGH (3 << 16)
+#define SLINK_IDLE_SDA_MASK (3 << 16)
+#define SLINK_CS_POLARITY1 (1 << 20)
+#define SLINK_CK_SDA (1 << 21)
+#define SLINK_CS_POLARITY2 (1 << 22)
+#define SLINK_CS_POLARITY3 (1 << 23)
+#define SLINK_IDLE_SCLK_DRIVE_LOW (0 << 24)
+#define SLINK_IDLE_SCLK_DRIVE_HIGH (1 << 24)
+#define SLINK_IDLE_SCLK_PULL_LOW (2 << 24)
+#define SLINK_IDLE_SCLK_PULL_HIGH (3 << 24)
+#define SLINK_IDLE_SCLK_MASK (3 << 24)
+#define SLINK_M_S (1 << 28)
+#define SLINK_WAIT (1 << 29)
+#define SLINK_GO (1 << 30)
+#define SLINK_ENB (1 << 31)
+
+#define SLINK_COMMAND2 0x004
+#define SLINK_LSBFE (1 << 0)
+#define SLINK_SSOE (1 << 1)
+#define SLINK_SPIE (1 << 4)
+#define SLINK_BIDIROE (1 << 6)
+#define SLINK_MODFEN (1 << 7)
+#define SLINK_INT_SIZE(x) (((x) & 0x1f) << 8)
+#define SLINK_CS_ACTIVE_BETWEEN (1 << 17)
+#define SLINK_SS_EN_CS(x) (((x) & 0x3) << 18)
+#define SLINK_SS_SETUP(x) (((x) & 0x3) << 20)
+#define SLINK_FIFO_REFILLS_0 (0 << 22)
+#define SLINK_FIFO_REFILLS_1 (1 << 22)
+#define SLINK_FIFO_REFILLS_2 (2 << 22)
+#define SLINK_FIFO_REFILLS_3 (3 << 22)
+#define SLINK_FIFO_REFILLS_MASK (3 << 22)
+#define SLINK_WAIT_PACK_INT(x) (((x) & 0x7) << 26)
+#define SLINK_SPC0 (1 << 29)
+#define SLINK_TXEN (1 << 30)
+#define SLINK_RXEN (1 << 31)
+
+#define SLINK_STATUS 0x008
+#define SLINK_COUNT(val) (((val) >> 0) & 0x1f)
+#define SLINK_WORD(val) (((val) >> 5) & 0x1f)
+#define SLINK_BLK_CNT(val) (((val) >> 0) & 0xffff)
+#define SLINK_MODF (1 << 16)
+#define SLINK_RX_UNF (1 << 18)
+#define SLINK_TX_OVF (1 << 19)
+#define SLINK_TX_FULL (1 << 20)
+#define SLINK_TX_EMPTY (1 << 21)
+#define SLINK_RX_FULL (1 << 22)
+#define SLINK_RX_EMPTY (1 << 23)
+#define SLINK_TX_UNF (1 << 24)
+#define SLINK_RX_OVF (1 << 25)
+#define SLINK_TX_FLUSH (1 << 26)
+#define SLINK_RX_FLUSH (1 << 27)
+#define SLINK_SCLK (1 << 28)
+#define SLINK_ERR (1 << 29)
+#define SLINK_RDY (1 << 30)
+#define SLINK_BSY (1 << 31)
+
+#define SLINK_MAS_DATA 0x010
+#define SLINK_SLAVE_DATA 0x014
+
+#define SLINK_DMA_CTL 0x018
+#define SLINK_DMA_BLOCK_SIZE(x) (((x) & 0xffff) << 0)
+#define SLINK_TX_TRIG_1 (0 << 16)
+#define SLINK_TX_TRIG_4 (1 << 16)
+#define SLINK_TX_TRIG_8 (2 << 16)
+#define SLINK_TX_TRIG_16 (3 << 16)
+#define SLINK_TX_TRIG_MASK (3 << 16)
+#define SLINK_RX_TRIG_1 (0 << 18)
+#define SLINK_RX_TRIG_4 (1 << 18)
+#define SLINK_RX_TRIG_8 (2 << 18)
+#define SLINK_RX_TRIG_16 (3 << 18)
+#define SLINK_RX_TRIG_MASK (3 << 18)
+#define SLINK_PACKED (1 << 20)
+#define SLINK_PACK_SIZE_4 (0 << 21)
+#define SLINK_PACK_SIZE_8 (1 << 21)
+#define SLINK_PACK_SIZE_16 (2 << 21)
+#define SLINK_PACK_SIZE_32 (3 << 21)
+#define SLINK_PACK_SIZE_MASK (3 << 21)
+#define SLINK_IE_TXC (1 << 26)
+#define SLINK_IE_RXC (1 << 27)
+#define SLINK_DMA_EN (1 << 31)
+
+#define SLINK_STATUS2 0x01c
+#define SLINK_TX_FIFO_EMPTY_COUNT(val) (((val) & 0x3f) >> 0)
+#define SLINK_RX_FIFO_FULL_COUNT(val) (((val) & 0x3f) >> 16)
+
+#define SLINK_TX_FIFO 0x100
+#define SLINK_RX_FIFO 0x180
+
+static const unsigned long spi_tegra_req_sels[] = {
+ TEGRA_DMA_REQ_SEL_SL2B1,
+ TEGRA_DMA_REQ_SEL_SL2B2,
+ TEGRA_DMA_REQ_SEL_SL2B3,
+ TEGRA_DMA_REQ_SEL_SL2B4,
+};
+
+#define BB_LEN 32
+
+struct spi_tegra_data {
+ struct spi_master *master;
+ struct platform_device *pdev;
+ spinlock_t lock;
+
+ struct clk *clk;
+ void __iomem *base;
+ unsigned long phys;
+
+ u32 cur_speed;
+
+ struct list_head queue;
+ struct spi_transfer *cur;
+ unsigned cur_pos;
+ unsigned cur_len;
+ unsigned cur_bytes_per_word;
+
+ /* The tegra spi controller has a bug which causes the first word
+ * in PIO transactions to be garbage. Since packed DMA transactions
+ * require transfers to be 4 byte aligned we need a bounce buffer
+ * for the generic case.
+ */
+ struct tegra_dma_req rx_dma_req;
+ struct tegra_dma_channel *rx_dma;
+ u32 *rx_bb;
+ dma_addr_t rx_bb_phys;
+};
+
+
+static inline unsigned long spi_tegra_readl(struct spi_tegra_data *tspi,
+ unsigned long reg)
+{
+ return readl(tspi->base + reg);
+}
+
+static inline void spi_tegra_writel(struct spi_tegra_data *tspi,
+ unsigned long val,
+ unsigned long reg)
+{
+ writel(val, tspi->base + reg);
+}
+
+static void spi_tegra_go(struct spi_tegra_data *tspi)
+{
+ unsigned long val;
+
+ wmb();
+
+ val = spi_tegra_readl(tspi, SLINK_DMA_CTL);
+ val &= ~SLINK_DMA_BLOCK_SIZE(~0) & ~SLINK_DMA_EN;
+ val |= SLINK_DMA_BLOCK_SIZE(tspi->rx_dma_req.size / 4 - 1);
+ spi_tegra_writel(tspi, val, SLINK_DMA_CTL);
+
+ tegra_dma_enqueue_req(tspi->rx_dma, &tspi->rx_dma_req);
+
+ val |= SLINK_DMA_EN;
+ spi_tegra_writel(tspi, val, SLINK_DMA_CTL);
+}
+
+static unsigned spi_tegra_fill_tx_fifo(struct spi_tegra_data *tspi,
+ struct spi_transfer *t)
+{
+ unsigned len = min(t->len - tspi->cur_pos, BB_LEN *
+ tspi->cur_bytes_per_word);
+ u8 *tx_buf = (u8 *)t->tx_buf + tspi->cur_pos;
+ int i, j;
+ unsigned long val;
+
+ val = spi_tegra_readl(tspi, SLINK_COMMAND);
+ val &= ~SLINK_WORD_SIZE(~0);
+ val |= SLINK_WORD_SIZE(len / tspi->cur_bytes_per_word - 1);
+ spi_tegra_writel(tspi, val, SLINK_COMMAND);
+
+ for (i = 0; i < len; i += tspi->cur_bytes_per_word) {
+ val = 0;
+ for (j = 0; j < tspi->cur_bytes_per_word; j++)
+ val |= tx_buf[i + j] << j * 8;
+
+ spi_tegra_writel(tspi, val, SLINK_TX_FIFO);
+ }
+
+ tspi->rx_dma_req.size = len / tspi->cur_bytes_per_word * 4;
+
+ return len;
+}
+
+static unsigned spi_tegra_drain_rx_fifo(struct spi_tegra_data *tspi,
+ struct spi_transfer *t)
+{
+ unsigned len = tspi->cur_len;
+ u8 *rx_buf = (u8 *)t->rx_buf + tspi->cur_pos;
+ int i, j;
+ unsigned long val;
+
+ for (i = 0; i < len; i += tspi->cur_bytes_per_word) {
+ val = tspi->rx_bb[i / tspi->cur_bytes_per_word];
+ for (j = 0; j < tspi->cur_bytes_per_word; j++)
+ rx_buf[i + j] = (val >> (j * 8)) & 0xff;
+ }
+
+ return len;
+}
+
+static void spi_tegra_start_transfer(struct spi_device *spi,
+ struct spi_transfer *t)
+{
+ struct spi_tegra_data *tspi = spi_master_get_devdata(spi->master);
+ u32 speed;
+ u8 bits_per_word;
+ unsigned long val;
+
+ speed = t->speed_hz ? t->speed_hz : spi->max_speed_hz;
+ bits_per_word = t->bits_per_word ? t->bits_per_word :
+ spi->bits_per_word;
+
+ tspi->cur_bytes_per_word = (bits_per_word - 1) / 8 + 1;
+
+ if (speed != tspi->cur_speed)
+ clk_set_rate(tspi->clk, speed);
+
+ if (tspi->cur_speed == 0)
+ clk_enable(tspi->clk);
+
+ tspi->cur_speed = speed;
+
+ val = spi_tegra_readl(tspi, SLINK_COMMAND2);
+ val &= ~SLINK_SS_EN_CS(~0) | SLINK_RXEN | SLINK_TXEN;
+ if (t->rx_buf)
+ val |= SLINK_RXEN;
+ if (t->tx_buf)
+ val |= SLINK_TXEN;
+ val |= SLINK_SS_EN_CS(spi->chip_select);
+ val |= SLINK_SPIE;
+ spi_tegra_writel(tspi, val, SLINK_COMMAND2);
+
+ val = spi_tegra_readl(tspi, SLINK_COMMAND);
+ val &= ~SLINK_BIT_LENGTH(~0);
+ val |= SLINK_BIT_LENGTH(bits_per_word - 1);
+
+ /* FIXME: should probably control CS manually so that we can be sure
+ * it does not go low between transfer and to support delay_usecs
+ * correctly.
+ */
+ val &= ~SLINK_IDLE_SCLK_MASK & ~SLINK_CK_SDA & ~SLINK_CS_SW;
+
+ if (spi->mode & SPI_CPHA)
+ val |= SLINK_CK_SDA;
+
+ if (spi->mode & SPI_CPOL)
+ val |= SLINK_IDLE_SCLK_DRIVE_HIGH;
+ else
+ val |= SLINK_IDLE_SCLK_DRIVE_LOW;
+
+ val |= SLINK_M_S;
+
+ spi_tegra_writel(tspi, val, SLINK_COMMAND);
+
+ spi_tegra_writel(tspi, SLINK_RX_FLUSH | SLINK_TX_FLUSH, SLINK_STATUS);
+
+ tspi->cur = t;
+ tspi->cur_pos = 0;
+ tspi->cur_len = spi_tegra_fill_tx_fifo(tspi, t);
+
+ spi_tegra_go(tspi);
+}
+
+static void spi_tegra_start_message(struct spi_device *spi,
+ struct spi_message *m)
+{
+ struct spi_transfer *t;
+
+ m->actual_length = 0;
+ m->status = 0;
+
+ t = list_first_entry(&m->transfers, struct spi_transfer, transfer_list);
+ spi_tegra_start_transfer(spi, t);
+}
+
+static void tegra_spi_rx_dma_complete(struct tegra_dma_req *req)
+{
+ struct spi_tegra_data *tspi = req->dev;
+ unsigned long flags;
+ struct spi_message *m;
+ struct spi_device *spi;
+ int timeout = 0;
+ unsigned long val;
+
+ /* the SPI controller may come back with both the BSY and RDY bits
+ * set. In this case we need to wait for the BSY bit to clear so
+ * that we are sure the DMA is finished. 1000 reads was empirically
+ * determined to be long enough.
+ */
+ while (timeout++ < 1000) {
+ if (!(spi_tegra_readl(tspi, SLINK_STATUS) & SLINK_BSY))
+ break;
+ }
+
+ spin_lock_irqsave(&tspi->lock, flags);
+
+ val = spi_tegra_readl(tspi, SLINK_STATUS);
+ val |= SLINK_RDY;
+ spi_tegra_writel(tspi, val, SLINK_STATUS);
+
+ m = list_first_entry(&tspi->queue, struct spi_message, queue);
+
+ if (timeout >= 1000)
+ m->status = -EIO;
+
+ spi = m->state;
+
+ tspi->cur_pos += spi_tegra_drain_rx_fifo(tspi, tspi->cur);
+ m->actual_length += tspi->cur_pos;
+
+ if (tspi->cur_pos < tspi->cur->len) {
+ tspi->cur_len = spi_tegra_fill_tx_fifo(tspi, tspi->cur);
+ spi_tegra_go(tspi);
+ } else if (!list_is_last(&tspi->cur->transfer_list,
+ &m->transfers)) {
+ tspi->cur = list_first_entry(&tspi->cur->transfer_list,
+ struct spi_transfer,
+ transfer_list);
+ spi_tegra_start_transfer(spi, tspi->cur);
+ } else {
+ list_del(&m->queue);
+
+ m->complete(m->context);
+
+ if (!list_empty(&tspi->queue)) {
+ m = list_first_entry(&tspi->queue, struct spi_message,
+ queue);
+ spi = m->state;
+ spi_tegra_start_message(spi, m);
+ } else {
+ clk_disable(tspi->clk);
+ tspi->cur_speed = 0;
+ }
+ }
+
+ spin_unlock_irqrestore(&tspi->lock, flags);
+}
+
+static int spi_tegra_setup(struct spi_device *spi)
+{
+ struct spi_tegra_data *tspi = spi_master_get_devdata(spi->master);
+ unsigned long cs_bit;
+ unsigned long val;
+ unsigned long flags;
+
+ dev_dbg(&spi->dev, "setup %d bpw, %scpol, %scpha, %dHz\n",
+ spi->bits_per_word,
+ spi->mode & SPI_CPOL ? "" : "~",
+ spi->mode & SPI_CPHA ? "" : "~",
+ spi->max_speed_hz);
+
+
+ switch (spi->chip_select) {
+ case 0:
+ cs_bit = SLINK_CS_POLARITY;
+ break;
+
+ case 1:
+ cs_bit = SLINK_CS_POLARITY1;
+ break;
+
+ case 2:
+ cs_bit = SLINK_CS_POLARITY2;
+ break;
+
+ case 4:
+ cs_bit = SLINK_CS_POLARITY3;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ spin_lock_irqsave(&tspi->lock, flags);
+
+ val = spi_tegra_readl(tspi, SLINK_COMMAND);
+ if (spi->mode & SPI_CS_HIGH)
+ val |= cs_bit;
+ else
+ val &= ~cs_bit;
+ spi_tegra_writel(tspi, val, SLINK_COMMAND);
+
+ spin_unlock_irqrestore(&tspi->lock, flags);
+
+ return 0;
+}
+
+static int spi_tegra_transfer(struct spi_device *spi, struct spi_message *m)
+{
+ struct spi_tegra_data *tspi = spi_master_get_devdata(spi->master);
+ struct spi_transfer *t;
+ unsigned long flags;
+ int was_empty;
+
+ if (list_empty(&m->transfers) || !m->complete)
+ return -EINVAL;
+
+ list_for_each_entry(t, &m->transfers, transfer_list) {
+ if (t->bits_per_word < 0 || t->bits_per_word > 32)
+ return -EINVAL;
+
+ if (t->len == 0)
+ return -EINVAL;
+
+ if (!t->rx_buf && !t->tx_buf)
+ return -EINVAL;
+ }
+
+ m->state = spi;
+
+ spin_lock_irqsave(&tspi->lock, flags);
+ was_empty = list_empty(&tspi->queue);
+ list_add_tail(&m->queue, &tspi->queue);
+
+ if (was_empty)
+ spi_tegra_start_message(spi, m);
+
+ spin_unlock_irqrestore(&tspi->lock, flags);
+
+ return 0;
+}
+
+static int __init spi_tegra_probe(struct platform_device *pdev)
+{
+ struct spi_master *master;
+ struct spi_tegra_data *tspi;
+ struct resource *r;
+ int ret;
+
+ master = spi_alloc_master(&pdev->dev, sizeof *tspi);
+ if (master == NULL) {
+ dev_err(&pdev->dev, "master allocation failed\n");
+ return -ENOMEM;
+ }
+
+ /* the spi->mode bits understood by this driver: */
+ master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH;
+
+ master->bus_num = pdev->id;
+
+ master->setup = spi_tegra_setup;
+ master->transfer = spi_tegra_transfer;
+ master->num_chipselect = 4;
+
+ dev_set_drvdata(&pdev->dev, master);
+ tspi = spi_master_get_devdata(master);
+ tspi->master = master;
+ tspi->pdev = pdev;
+ spin_lock_init(&tspi->lock);
+
+ r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (r == NULL) {
+ ret = -ENODEV;
+ goto err0;
+ }
+
+ if (!request_mem_region(r->start, (r->end - r->start) + 1,
+ dev_name(&pdev->dev))) {
+ ret = -EBUSY;
+ goto err0;
+ }
+
+ tspi->phys = r->start;
+ tspi->base = ioremap(r->start, r->end - r->start + 1);
+ if (!tspi->base) {
+ dev_err(&pdev->dev, "can't ioremap iomem\n");
+ ret = -ENOMEM;
+ goto err1;
+ }
+
+ tspi->clk = clk_get(&pdev->dev, NULL);
+ if (IS_ERR_OR_NULL(tspi->clk)) {
+ dev_err(&pdev->dev, "can not get clock\n");
+ ret = PTR_ERR(tspi->clk);
+ goto err2;
+ }
+
+ INIT_LIST_HEAD(&tspi->queue);
+
+ tspi->rx_dma = tegra_dma_allocate_channel(TEGRA_DMA_MODE_ONESHOT);
+ if (!tspi->rx_dma) {
+ dev_err(&pdev->dev, "can not allocate rx dma channel\n");
+ ret = -ENODEV;
+ goto err3;
+ }
+
+ tspi->rx_bb = dma_alloc_coherent(&pdev->dev, sizeof(u32) * BB_LEN,
+ &tspi->rx_bb_phys, GFP_KERNEL);
+ if (!tspi->rx_bb) {
+ dev_err(&pdev->dev, "can not allocate rx bounce buffer\n");
+ ret = -ENOMEM;
+ goto err4;
+ }
+
+ tspi->rx_dma_req.complete = tegra_spi_rx_dma_complete;
+ tspi->rx_dma_req.to_memory = 1;
+ tspi->rx_dma_req.dest_addr = tspi->rx_bb_phys;
+ tspi->rx_dma_req.dest_bus_width = 32;
+ tspi->rx_dma_req.source_addr = tspi->phys + SLINK_RX_FIFO;
+ tspi->rx_dma_req.source_bus_width = 32;
+ tspi->rx_dma_req.source_wrap = 4;
+ tspi->rx_dma_req.req_sel = spi_tegra_req_sels[pdev->id];
+ tspi->rx_dma_req.dev = tspi;
+
+ ret = spi_register_master(master);
+
+ if (ret < 0)
+ goto err5;
+
+ return ret;
+
+err5:
+ dma_free_coherent(&pdev->dev, sizeof(u32) * BB_LEN,
+ tspi->rx_bb, tspi->rx_bb_phys);
+err4:
+ tegra_dma_free_channel(tspi->rx_dma);
+err3:
+ clk_put(tspi->clk);
+err2:
+ iounmap(tspi->base);
+err1:
+ release_mem_region(r->start, (r->end - r->start) + 1);
+err0:
+ spi_master_put(master);
+ return ret;
+}
+
+static int __devexit spi_tegra_remove(struct platform_device *pdev)
+{
+ struct spi_master *master;
+ struct spi_tegra_data *tspi;
+ struct resource *r;
+
+ master = dev_get_drvdata(&pdev->dev);
+ tspi = spi_master_get_devdata(master);
+
+ tegra_dma_free_channel(tspi->rx_dma);
+
+ dma_free_coherent(&pdev->dev, sizeof(u32) * BB_LEN,
+ tspi->rx_bb, tspi->rx_bb_phys);
+
+ clk_put(tspi->clk);
+ iounmap(tspi->base);
+
+ spi_master_put(master);
+ r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ release_mem_region(r->start, (r->end - r->start) + 1);
+
+ return 0;
+}
+
+MODULE_ALIAS("platform:spi_tegra");
+
+static struct platform_driver spi_tegra_driver = {
+ .driver = {
+ .name = "spi_tegra",
+ .owner = THIS_MODULE,
+ },
+ .remove = __devexit_p(spi_tegra_remove),
+};
+
+static int __init spi_tegra_init(void)
+{
+ return platform_driver_probe(&spi_tegra_driver, spi_tegra_probe);
+}
+module_init(spi_tegra_init);
+
+static void __exit spi_tegra_exit(void)
+{
+ platform_driver_unregister(&spi_tegra_driver);
+}
+module_exit(spi_tegra_exit);
+
+MODULE_LICENSE("GPL");
diff --git a/drivers/ssb/b43_pci_bridge.c b/drivers/ssb/b43_pci_bridge.c
index ef9c6a04ad8f..744d3f6e4709 100644
--- a/drivers/ssb/b43_pci_bridge.c
+++ b/drivers/ssb/b43_pci_bridge.c
@@ -24,6 +24,7 @@ static const struct pci_device_id b43_pci_bridge_tbl[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4312) },
{ PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4315) },
{ PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4318) },
+ { PCI_DEVICE(PCI_VENDOR_ID_BCM_GVC, 0x4318) },
{ PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4319) },
{ PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4320) },
{ PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4321) },
diff --git a/drivers/staging/Kconfig b/drivers/staging/Kconfig
index 335311a98fdc..5eafdf435550 100644
--- a/drivers/staging/Kconfig
+++ b/drivers/staging/Kconfig
@@ -51,6 +51,10 @@ source "drivers/staging/cx25821/Kconfig"
source "drivers/staging/tm6000/Kconfig"
+source "drivers/staging/cpia/Kconfig"
+
+source "drivers/staging/stradis/Kconfig"
+
source "drivers/staging/usbip/Kconfig"
source "drivers/staging/winbond/Kconfig"
@@ -59,7 +63,7 @@ source "drivers/staging/wlan-ng/Kconfig"
source "drivers/staging/echo/Kconfig"
-source "drivers/staging/otus/Kconfig"
+source "drivers/staging/brcm80211/Kconfig"
source "drivers/staging/rt2860/Kconfig"
@@ -67,24 +71,26 @@ source "drivers/staging/rt2870/Kconfig"
source "drivers/staging/comedi/Kconfig"
+source "drivers/staging/olpc_dcon/Kconfig"
+
source "drivers/staging/asus_oled/Kconfig"
source "drivers/staging/panel/Kconfig"
source "drivers/staging/rtl8187se/Kconfig"
-source "drivers/staging/rtl8192su/Kconfig"
-
source "drivers/staging/rtl8192u/Kconfig"
source "drivers/staging/rtl8192e/Kconfig"
-source "drivers/staging/frontier/Kconfig"
+source "drivers/staging/rtl8712/Kconfig"
-source "drivers/staging/dream/Kconfig"
+source "drivers/staging/frontier/Kconfig"
source "drivers/staging/pohmelfs/Kconfig"
+source "drivers/staging/autofs/Kconfig"
+
source "drivers/staging/phison/Kconfig"
source "drivers/staging/line6/Kconfig"
@@ -139,12 +145,12 @@ source "drivers/staging/adis16255/Kconfig"
source "drivers/staging/xgifb/Kconfig"
-source "drivers/staging/mrst-touchscreen/Kconfig"
-
source "drivers/staging/msm/Kconfig"
source "drivers/staging/lirc/Kconfig"
+source "drivers/staging/smbfs/Kconfig"
+
source "drivers/staging/easycap/Kconfig"
source "drivers/staging/solo6x10/Kconfig"
@@ -153,5 +159,21 @@ source "drivers/staging/tidspbridge/Kconfig"
source "drivers/staging/quickstart/Kconfig"
+source "drivers/staging/westbridge/Kconfig"
+
+source "drivers/staging/sbe-2t3e3/Kconfig"
+
+source "drivers/staging/ath6kl/Kconfig"
+
+source "drivers/staging/keucr/Kconfig"
+
+source "drivers/staging/bcm/Kconfig"
+
+source "drivers/staging/ft1000/Kconfig"
+
+source "drivers/staging/intel_sst/Kconfig"
+
+source "drivers/staging/speakup/Kconfig"
+
endif # !STAGING_EXCLUDE_BUILD
endif # STAGING
diff --git a/drivers/staging/Makefile b/drivers/staging/Makefile
index e3f1e1b6095e..a97a955c094b 100644
--- a/drivers/staging/Makefile
+++ b/drivers/staging/Makefile
@@ -8,28 +8,32 @@ obj-$(CONFIG_SLICOSS) += slicoss/
obj-$(CONFIG_VIDEO_GO7007) += go7007/
obj-$(CONFIG_VIDEO_CX25821) += cx25821/
obj-$(CONFIG_VIDEO_TM6000) += tm6000/
+obj-$(CONFIG_VIDEO_CPIA) += cpia/
+obj-$(CONFIG_VIDEO_STRADIS) += stradis/
obj-$(CONFIG_LIRC_STAGING) += lirc/
obj-$(CONFIG_USB_IP_COMMON) += usbip/
obj-$(CONFIG_W35UND) += winbond/
obj-$(CONFIG_PRISM2_USB) += wlan-ng/
obj-$(CONFIG_ECHO) += echo/
-obj-$(CONFIG_OTUS) += otus/
+obj-$(CONFIG_BRCM80211) += brcm80211/
obj-$(CONFIG_RT2860) += rt2860/
obj-$(CONFIG_RT2870) += rt2870/
obj-$(CONFIG_COMEDI) += comedi/
+obj-$(CONFIG_FB_OLPC_DCON) += olpc_dcon/
obj-$(CONFIG_ASUS_OLED) += asus_oled/
obj-$(CONFIG_PANEL) += panel/
obj-$(CONFIG_R8187SE) += rtl8187se/
-obj-$(CONFIG_RTL8192SU) += rtl8192su/
obj-$(CONFIG_RTL8192U) += rtl8192u/
obj-$(CONFIG_RTL8192E) += rtl8192e/
+obj-$(CONFIG_R8712U) += rtl8712/
obj-$(CONFIG_SPECTRA) += spectra/
obj-$(CONFIG_TRANZPORT) += frontier/
-obj-$(CONFIG_DREAM) += dream/
obj-$(CONFIG_POHMELFS) += pohmelfs/
+obj-$(CONFIG_AUTOFS_FS) += autofs/
obj-$(CONFIG_IDE_PHISON) += phison/
obj-$(CONFIG_LINE6_USB) += line6/
obj-$(CONFIG_USB_SERIAL_QUATECH2) += serqt_usb2/
+obj-$(CONFIG_SMB_FS) += smbfs/
obj-$(CONFIG_USB_SERIAL_QUATECH_USB2) += quatech_usb2/
obj-$(CONFIG_OCTEON_ETHERNET) += octeon/
obj-$(CONFIG_VT6655) += vt6655/
@@ -51,9 +55,16 @@ obj-$(CONFIG_CXT1E1) += cxt1e1/
obj-$(CONFIG_TI_ST) += ti-st/
obj-$(CONFIG_ADIS16255) += adis16255/
obj-$(CONFIG_FB_XGI) += xgifb/
-obj-$(CONFIG_TOUCHSCREEN_MRSTOUCH) += mrst-touchscreen/
obj-$(CONFIG_MSM_STAGING) += msm/
obj-$(CONFIG_EASYCAP) += easycap/
obj-$(CONFIG_SOLO6X10) += solo6x10/
obj-$(CONFIG_TIDSPBRIDGE) += tidspbridge/
obj-$(CONFIG_ACPI_QUICKSTART) += quickstart/
+obj-$(CONFIG_WESTBRIDGE_ASTORIA) += westbridge/astoria/
+obj-$(CONFIG_SBE_2T3E3) += sbe-2t3e3/
+obj-$(CONFIG_ATH6K_LEGACY) += ath6kl/
+obj-$(CONFIG_USB_ENESTORAGE) += keucr/
+obj-$(CONFIG_BCM_WIMAX) += bcm/
+obj-$(CONFIG_FT1000) += ft1000/
+obj-$(CONFIG_SND_INTEL_SST) += intel_sst/
+obj-$(CONFIG_SPEAKUP) += speakup/
diff --git a/drivers/staging/adis16255/adis16255.c b/drivers/staging/adis16255/adis16255.c
index c3e6a4d5f334..8d4d7cbab979 100644
--- a/drivers/staging/adis16255/adis16255.c
+++ b/drivers/staging/adis16255/adis16255.c
@@ -406,12 +406,14 @@ static int __devinit spi_adis16255_probe(struct spi_device *spi)
status = spi_adis16255_bringup(spiadis);
if (status != 0)
- goto irq_err;
+ goto sysfs_err;
dev_info(&spi->dev, "spi_adis16255 driver added!\n");
return status;
+sysfs_err:
+ sysfs_remove_group(&spiadis->spi->dev.kobj, &adis16255_attr_group);
irq_err:
free_irq(spiadis->irq, spiadis);
gpio_err:
diff --git a/drivers/staging/asus_oled/asus_oled.c b/drivers/staging/asus_oled/asus_oled.c
index 8c95d8c2a4f4..016c6f7f8630 100644
--- a/drivers/staging/asus_oled/asus_oled.c
+++ b/drivers/staging/asus_oled/asus_oled.c
@@ -620,13 +620,13 @@ static ssize_t class_set_picture(struct device *device,
#define ASUS_OLED_DEVICE_ATTR(_file) dev_attr_asus_oled_##_file
-static DEVICE_ATTR(asus_oled_enabled, S_IWUGO | S_IRUGO,
+static DEVICE_ATTR(asus_oled_enabled, S_IWUSR | S_IRUGO,
get_enabled, set_enabled);
-static DEVICE_ATTR(asus_oled_picture, S_IWUGO , NULL, set_picture);
+static DEVICE_ATTR(asus_oled_picture, S_IWUSR , NULL, set_picture);
-static DEVICE_ATTR(enabled, S_IWUGO | S_IRUGO,
+static DEVICE_ATTR(enabled, S_IWUSR | S_IRUGO,
class_get_enabled, class_set_enabled);
-static DEVICE_ATTR(picture, S_IWUGO, NULL, class_set_picture);
+static DEVICE_ATTR(picture, S_IWUSR, NULL, class_set_picture);
static int asus_oled_probe(struct usb_interface *interface,
const struct usb_device_id *id)
diff --git a/drivers/staging/ath6kl/Kconfig b/drivers/staging/ath6kl/Kconfig
new file mode 100644
index 000000000000..8a5caa30b85f
--- /dev/null
+++ b/drivers/staging/ath6kl/Kconfig
@@ -0,0 +1,163 @@
+config ATH6K_LEGACY
+ tristate "Atheros AR6003 support (non mac80211)"
+ depends on MMC && WLAN
+ select WIRELESS_EXT
+ select WEXT_PRIV
+ help
+ This module adds support for wireless adapters based on Atheros AR6003 chipset running over SDIO. If you choose to build it as a module, it will be called ath6kl. Pls note that AR6002 and AR6001 are not supported by this driver.
+
+choice
+ prompt "AR6003 Board Data Configuration"
+ depends on ATH6K_LEGACY
+ default AR600x_SD31_XXX
+ help
+ Select the appropriate board data template from the list below that matches your AR6003 based reference design.
+
+config AR600x_SD31_XXX
+ bool "SD31-xxx"
+ help
+ Board Data file for a standard SD31 reference design (File: bdata.SD31.bin)
+
+config AR600x_WB31_XXX
+ bool "WB31-xxx"
+ help
+ Board Data file for a standard WB31 (BT/WiFi) reference design (File: bdata.WB31.bin)
+
+config AR600x_SD32_XXX
+ bool "SD32-xxx"
+ help
+ Board Data file for a standard SD32 (5GHz) reference design (File: bdata.SD32.bin)
+
+config AR600x_CUSTOM_XXX
+ bool "CUSTOM-xxx"
+ help
+ Board Data file for a custom reference design (File: should be named as bdata.CUSTOM.bin)
+endchoice
+
+config ATH6KL_ENABLE_COEXISTENCE
+ bool "BT Coexistence support"
+ depends on ATH6K_LEGACY
+ help
+ Enables WLAN/BT coexistence support. Select the apprpriate configuration from below.
+
+choice
+ prompt "Front-End Antenna Configuration"
+ depends on ATH6KL_ENABLE_COEXISTENCE
+ default AR600x_DUAL_ANTENNA
+ help
+ Indicates the number of antennas being used by BT and WLAN. Select the appropriate configuration from the list below that matches your AR6003 based reference design.
+
+config AR600x_DUAL_ANTENNA
+ bool "Dual Antenna"
+ help
+ Dual Antenna Design
+
+config AR600x_SINGLE_ANTENNA
+ bool "Single Antenna"
+ help
+ Single Antenna Design
+endchoice
+
+choice
+ prompt "Collocated Bluetooth Type"
+ depends on ATH6KL_ENABLE_COEXISTENCE
+ default AR600x_BT_AR3001
+ help
+ Select the appropriate configuration from the list below that matches your AR6003 based reference design.
+
+config AR600x_BT_QCOM
+ bool "Qualcomm BTS4020X"
+ help
+ Qualcomm BT (3 Wire PTA)
+
+config AR600x_BT_CSR
+ bool "CSR BC06"
+ help
+ CSR BT (3 Wire PTA)
+
+config AR600x_BT_AR3001
+ bool "Atheros AR3001"
+ help
+ Atheros BT (3 Wire PTA)
+endchoice
+
+config ATH6KL_HCI_BRIDGE
+ bool "HCI over SDIO support"
+ depends on ATH6K_LEGACY
+ help
+ Enables BT over SDIO. Applicable only for combo designs (eg: WB31)
+
+config ATH6KL_CONFIG_GPIO_BT_RESET
+ bool "Configure BT Reset GPIO"
+ depends on ATH6KL_HCI_BRIDGE
+ help
+ Configure a WLAN GPIO for use with BT.
+
+config AR600x_BT_RESET_PIN
+ int "GPIO"
+ depends on ATH6KL_CONFIG_GPIO_BT_RESET
+ default 22
+ help
+ WLAN GPIO to be used for resetting BT
+
+config ATH6KL_CFG80211
+ bool "CFG80211 support"
+ depends on ATH6K_LEGACY && CFG80211
+ help
+ Enables support for CFG80211 APIs. The default option is to use WEXT. Even with this option enabled, WEXT is not explicitly disabled and the onus of not exercising WEXT lies on the application(s) running in the user space.
+
+config ATH6KL_HTC_RAW_INTERFACE
+ bool "RAW HTC support"
+ depends on ATH6K_LEGACY
+ help
+ Enables raw HTC interface. Allows application to directly talk to the HTC interface via the ioctl interface
+
+config ATH6KL_VIRTUAL_SCATTER_GATHER
+ bool "Virtual Scatter-Gather support"
+ depends on ATH6K_LEGACY
+ help
+ Enables virtual scatter gather support for the hardware that does not support it natively.
+
+config ATH6KL_SKIP_ABI_VERSION_CHECK
+ bool "Skip ABI version check support"
+ depends on ATH6K_LEGACY
+ help
+ Forces the driver to disable ABI version check. Caution: Incompatilbity between the host driver and target firmware may lead to unknown side effects.
+
+config ATH6KL_BT_UART_FC_POLARITY
+ int "UART Flow Control Polarity"
+ depends on ATH6KL_LEGACY
+ default 0
+ help
+ Configures the polarity of UART Flow Control. A value of 0 implies active low and is the default setting. Set it to 1 for active high.
+
+config ATH6KL_DEBUG
+ bool "Debug support"
+ depends on ATH6K_LEGACY
+ help
+ Enables debug support
+
+config ATH6KL_ENABLE_HOST_DEBUG
+ bool "Host Debug support"
+ depends on ATH6KL_DEBUG
+ help
+ Enables debug support in the driver
+
+config ATH6KL_ENABLE_TARGET_DEBUG_PRINTS
+ bool "Target Debug support - Enable UART prints"
+ depends on ATH6KL_DEBUG
+ help
+ Enables uart prints
+
+config AR600x_DEBUG_UART_TX_PIN
+ int "GPIO"
+ depends on ATH6KL_ENABLE_TARGET_DEBUG_PRINTS
+ default 8
+ help
+ WLAN GPIO to be used for Debug UART (Tx)
+
+config ATH6KL_DISABLE_TARGET_DBGLOGS
+ bool "Target Debug support - Disable Debug logs"
+ depends on ATH6KL_DEBUG
+ help
+ Enables debug logs
diff --git a/drivers/staging/ath6kl/Makefile b/drivers/staging/ath6kl/Makefile
new file mode 100644
index 000000000000..ab68078699f2
--- /dev/null
+++ b/drivers/staging/ath6kl/Makefile
@@ -0,0 +1,159 @@
+#------------------------------------------------------------------------------
+# Copyright (c) 2004-2010 Atheros Communications Inc.
+# All rights reserved.
+#
+#
+#
+# Permission to use, copy, modify, and/or distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+#
+#
+#
+# Author(s): ="Atheros"
+#------------------------------------------------------------------------------
+
+ccflags-y += -I$(obj)/include
+ccflags-y += -I$(obj)/include/common
+ccflags-y += -I$(obj)/wlan/include
+ccflags-y += -I$(obj)/os/linux/include
+ccflags-y += -I$(obj)/os
+ccflags-y += -I$(obj)/bmi/include
+ccflags-y += -I$(obj)/include/common/AR6002/hw4.0
+
+ifeq ($(CONFIG_AR600x_SD31_XXX),y)
+ccflags-y += -DAR600x_SD31_XXX
+endif
+
+ifeq ($(CONFIG_AR600x_WB31_XXX),y)
+ccflags-y += -DAR600x_WB31_XXX
+endif
+
+ifeq ($(CONFIG_AR600x_SD32_XXX),y)
+ccflags-y += -DAR600x_SD32_XXX
+endif
+
+ifeq ($(CONFIG_AR600x_CUSTOM_XXX),y)
+ccflags-y += -DAR600x_CUSTOM_XXX
+endif
+
+ifeq ($(CONFIG_ATH6KL_ENABLE_COEXISTENCE),y)
+ccflags-y += -DENABLE_COEXISTENCE
+endif
+
+ifeq ($(CONFIG_AR600x_DUAL_ANTENNA),y)
+ccflags-y += -DAR600x_DUAL_ANTENNA
+endif
+
+ifeq ($(CONFIG_AR600x_SINGLE_ANTENNA),y)
+ccflags-y += -DAR600x_SINGLE_ANTENNA
+endif
+
+ifeq ($(CONFIG_AR600x_BT_QCOM),y)
+ccflags-y += -DAR600x_BT_QCOM
+endif
+
+ifeq ($(CONFIG_AR600x_BT_CSR),y)
+ccflags-y += -DAR600x_BT_CSR
+endif
+
+ifeq ($(CONFIG_AR600x_BT_AR3001),y)
+ccflags-y += -DAR600x_BT_AR3001
+endif
+
+ifeq ($(CONFIG_ATH6KL_HCI_BRIDGE),y)
+ccflags-y += -DATH_AR6K_ENABLE_GMBOX
+ccflags-y += -DHCI_TRANSPORT_SDIO
+ccflags-y += -DSETUPHCI_ENABLED
+ccflags-y += -DSETUPBTDEV_ENABLED
+ath6kl-y += htc2/AR6000/ar6k_gmbox.o
+ath6kl-y += htc2/AR6000/ar6k_gmbox_hciuart.o
+ath6kl-y += miscdrv/ar3kconfig.o
+ath6kl-y += miscdrv/ar3kps/ar3kpsconfig.o
+ath6kl-y += miscdrv/ar3kps/ar3kpsparser.o
+endif
+
+ifeq ($(CONFIG_ATH6KL_CONFIG_GPIO_BT_RESET),y)
+ccflags-y += -DATH6KL_CONFIG_GPIO_BT_RESET
+endif
+
+ifeq ($(CONFIG_ATH6KL_CFG80211),y)
+ccflags-y += -DATH6K_CONFIG_CFG80211
+ath6kl-y += os/linux/cfg80211.o
+endif
+
+ifeq ($(CONFIG_ATH6KL_HTC_RAW_INTERFACE),y)
+ccflags-y += -DHTC_RAW_INTERFACE
+endif
+
+ifeq ($(CONFIG_ATH6KL_ENABLE_HOST_DEBUG),y)
+ccflags-y += -DDEBUG
+ccflags-y += -DATH_DEBUG_MODULE
+endif
+
+ifeq ($(CONFIG_ATH6KL_ENABLE_TARGET_DEBUG_PRINTS),y)
+ccflags-y += -DENABLEUARTPRINT_SET
+endif
+
+ifeq ($(CONFIG_ATH6KL_DISABLE_TARGET_DBGLOGS),y)
+ccflags-y += -DATH6KL_DISABLE_TARGET_DBGLOGS
+endif
+
+ifeq ($(CONFIG_ATH6KL_VIRTUAL_SCATTER_GATHER),y)
+ccflags-y += -DATH6KL_CONFIG_HIF_VIRTUAL_SCATTER
+endif
+
+ifeq ($(CONFIG_ATH6KL_SKIP_ABI_VERSION_CHECK),y)
+ccflags-y += -DATH6KL_SKIP_ABI_VERSION_CHECK
+endif
+
+ccflags-y += -DLINUX -DKERNEL_2_6
+ccflags-y += -DTCMD
+ccflags-y += -DSEND_EVENT_TO_APP
+ccflags-y += -DUSER_KEYS
+ccflags-y += -DNO_SYNC_FLUSH
+ccflags-y += -DHTC_EP_STAT_PROFILING
+ccflags-y += -DATH_AR6K_11N_SUPPORT
+ccflags-y += -DWAPI_ENABLE
+ccflags-y += -DCHECKSUM_OFFLOAD
+ccflags-y += -DWLAN_HEADERS
+ccflags-y += -DINIT_MODE_DRV_ENABLED
+ccflags-y += -DBMIENABLE_SET
+
+obj-$(CONFIG_ATH6K_LEGACY) := ath6kl.o
+ath6kl-y += htc2/AR6000/ar6k.o
+ath6kl-y += htc2/AR6000/ar6k_events.o
+ath6kl-y += htc2/htc_send.o
+ath6kl-y += htc2/htc_recv.o
+ath6kl-y += htc2/htc_services.o
+ath6kl-y += htc2/htc.o
+ath6kl-y += bmi/src/bmi.o
+ath6kl-y += os/linux/ar6000_drv.o
+ath6kl-y += os/linux/ar6000_raw_if.o
+ath6kl-y += os/linux/ar6000_pm.o
+ath6kl-y += os/linux/netbuf.o
+ath6kl-y += os/linux/wireless_ext.o
+ath6kl-y += os/linux/ioctl.o
+ath6kl-y += os/linux/hci_bridge.o
+ath6kl-y += os/linux/ar6k_pal.o
+ath6kl-y += miscdrv/common_drv.o
+ath6kl-y += miscdrv/credit_dist.o
+ath6kl-y += wmi/wmi.o
+ath6kl-y += reorder/rcv_aggr.o
+ath6kl-y += wlan/src/wlan_node.o
+ath6kl-y += wlan/src/wlan_recv_beacon.o
+ath6kl-y += wlan/src/wlan_utils.o
+
+# ATH_HIF_TYPE := sdio
+ccflags-y += -I$(obj)/hif/sdio/linux_sdio/include
+ccflags-y += -DSDIO
+ath6kl-y += hif/sdio/linux_sdio/src/hif.o
+ath6kl-y += hif/sdio/linux_sdio/src/hif_scatter.o
diff --git a/drivers/staging/ath6kl/TODO b/drivers/staging/ath6kl/TODO
new file mode 100644
index 000000000000..d4629274397d
--- /dev/null
+++ b/drivers/staging/ath6kl/TODO
@@ -0,0 +1,8 @@
+- The driver is a stop-gap measure until a proper mac80211 driver is available.
+- The driver does not conform to the Linux coding style.
+- The driver has been tested on a wide variety of embedded platforms running different versions of the Linux kernel but may still have bringup/performance issues with a new platform.
+- Pls use the following link to get information about the driver's architecture, exposed APIs, supported features, limitations, testing, hardware availability and other details.
+ http://wireless.kernel.org/en/users/Drivers/ath6kl
+- Pls send any patches to
+ - Greg Kroah-Hartman <greg@kroah.com>
+ - Vipin Mehta <vmehta@atheros.com>
diff --git a/drivers/staging/ath6kl/bmi/include/bmi_internal.h b/drivers/staging/ath6kl/bmi/include/bmi_internal.h
new file mode 100644
index 000000000000..a44027cee4ea
--- /dev/null
+++ b/drivers/staging/ath6kl/bmi/include/bmi_internal.h
@@ -0,0 +1,55 @@
+//------------------------------------------------------------------------------
+// Copyright (c) 2004-2010 Atheros Communications Inc.
+// All rights reserved.
+//
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+//
+//
+//------------------------------------------------------------------------------
+//==============================================================================
+//
+// Author(s): ="Atheros"
+//==============================================================================
+#ifndef BMI_INTERNAL_H
+#define BMI_INTERNAL_H
+
+#include "a_config.h"
+#include "athdefs.h"
+#include "a_types.h"
+#include "a_osapi.h"
+#define ATH_MODULE_NAME bmi
+#include "a_debug.h"
+#include "AR6002/hw2.0/hw/mbox_host_reg.h"
+#include "bmi_msg.h"
+
+#define ATH_DEBUG_BMI ATH_DEBUG_MAKE_MODULE_MASK(0)
+
+
+#define BMI_COMMUNICATION_TIMEOUT 100000
+
+/* ------ Global Variable Declarations ------- */
+static A_BOOL bmiDone;
+
+A_STATUS
+bmiBufferSend(HIF_DEVICE *device,
+ A_UCHAR *buffer,
+ A_UINT32 length);
+
+A_STATUS
+bmiBufferReceive(HIF_DEVICE *device,
+ A_UCHAR *buffer,
+ A_UINT32 length,
+ A_BOOL want_timeout);
+
+#endif
diff --git a/drivers/staging/ath6kl/bmi/src/bmi.c b/drivers/staging/ath6kl/bmi/src/bmi.c
new file mode 100644
index 000000000000..f17f5636f5b2
--- /dev/null
+++ b/drivers/staging/ath6kl/bmi/src/bmi.c
@@ -0,0 +1,1010 @@
+//------------------------------------------------------------------------------
+// <copyright file="bmi.c" company="Atheros">
+// Copyright (c) 2004-2010 Atheros Corporation. All rights reserved.
+//
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+//
+//
+//------------------------------------------------------------------------------
+//==============================================================================
+//
+// Author(s): ="Atheros"
+//==============================================================================
+
+
+#ifdef THREAD_X
+#include <string.h>
+#endif
+
+#include "hif.h"
+#include "bmi.h"
+#include "htc_api.h"
+#include "bmi_internal.h"
+
+#ifdef ATH_DEBUG_MODULE
+static ATH_DEBUG_MASK_DESCRIPTION bmi_debug_desc[] = {
+ { ATH_DEBUG_BMI , "BMI Tracing"},
+};
+
+ATH_DEBUG_INSTANTIATE_MODULE_VAR(bmi,
+ "bmi",
+ "Boot Manager Interface",
+ ATH_DEBUG_MASK_DEFAULTS,
+ ATH_DEBUG_DESCRIPTION_COUNT(bmi_debug_desc),
+ bmi_debug_desc);
+
+#endif
+
+/*
+Although we had envisioned BMI to run on top of HTC, this is not how the
+final implementation ended up. On the Target side, BMI is a part of the BSP
+and does not use the HTC protocol nor even DMA -- it is intentionally kept
+very simple.
+*/
+
+static A_BOOL pendingEventsFuncCheck = FALSE;
+static A_UINT32 *pBMICmdCredits;
+static A_UCHAR *pBMICmdBuf;
+#define MAX_BMI_CMDBUF_SZ (BMI_DATASZ_MAX + \
+ sizeof(A_UINT32) /* cmd */ + \
+ sizeof(A_UINT32) /* addr */ + \
+ sizeof(A_UINT32))/* length */
+#define BMI_COMMAND_FITS(sz) ((sz) <= MAX_BMI_CMDBUF_SZ)
+
+/* APIs visible to the driver */
+void
+BMIInit(void)
+{
+ bmiDone = FALSE;
+ pendingEventsFuncCheck = FALSE;
+
+ /*
+ * On some platforms, it's not possible to DMA to a static variable
+ * in a device driver (e.g. Linux loadable driver module).
+ * So we need to A_MALLOC space for "command credits" and for commands.
+ *
+ * Note: implicitly relies on A_MALLOC to provide a buffer that is
+ * suitable for DMA (or PIO). This buffer will be passed down the
+ * bus stack.
+ */
+ if (!pBMICmdCredits) {
+ pBMICmdCredits = (A_UINT32 *)A_MALLOC_NOWAIT(4);
+ A_ASSERT(pBMICmdCredits);
+ }
+
+ if (!pBMICmdBuf) {
+ pBMICmdBuf = (A_UCHAR *)A_MALLOC_NOWAIT(MAX_BMI_CMDBUF_SZ);
+ A_ASSERT(pBMICmdBuf);
+ }
+
+ A_REGISTER_MODULE_DEBUG_INFO(bmi);
+}
+
+void
+BMICleanup(void)
+{
+ if (pBMICmdCredits) {
+ A_FREE(pBMICmdCredits);
+ pBMICmdCredits = NULL;
+ }
+
+ if (pBMICmdBuf) {
+ A_FREE(pBMICmdBuf);
+ pBMICmdBuf = NULL;
+ }
+}
+
+A_STATUS
+BMIDone(HIF_DEVICE *device)
+{
+ A_STATUS status;
+ A_UINT32 cid;
+
+ if (bmiDone) {
+ AR_DEBUG_PRINTF (ATH_DEBUG_BMI, ("BMIDone skipped\n"));
+ return A_OK;
+ }
+
+ AR_DEBUG_PRINTF(ATH_DEBUG_BMI, ("BMI Done: Enter (device: 0x%p)\n", device));
+ bmiDone = TRUE;
+ cid = BMI_DONE;
+
+ status = bmiBufferSend(device, (A_UCHAR *)&cid, sizeof(cid));
+ if (status != A_OK) {
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("Unable to write to the device\n"));
+ return A_ERROR;
+ }
+
+ if (pBMICmdCredits) {
+ A_FREE(pBMICmdCredits);
+ pBMICmdCredits = NULL;
+ }
+
+ if (pBMICmdBuf) {
+ A_FREE(pBMICmdBuf);
+ pBMICmdBuf = NULL;
+ }
+
+ AR_DEBUG_PRINTF(ATH_DEBUG_BMI, ("BMI Done: Exit\n"));
+
+ return A_OK;
+}
+
+A_STATUS
+BMIGetTargetInfo(HIF_DEVICE *device, struct bmi_target_info *targ_info)
+{
+ A_STATUS status;
+ A_UINT32 cid;
+
+ if (bmiDone) {
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("Command disallowed\n"));
+ return A_ERROR;
+ }
+
+ AR_DEBUG_PRINTF(ATH_DEBUG_BMI, ("BMI Get Target Info: Enter (device: 0x%p)\n", device));
+ cid = BMI_GET_TARGET_INFO;
+
+ status = bmiBufferSend(device, (A_UCHAR *)&cid, sizeof(cid));
+ if (status != A_OK) {
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("Unable to write to the device\n"));
+ return A_ERROR;
+ }
+
+ status = bmiBufferReceive(device, (A_UCHAR *)&targ_info->target_ver,
+ sizeof(targ_info->target_ver), TRUE);
+ if (status != A_OK) {
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("Unable to read Target Version from the device\n"));
+ return A_ERROR;
+ }
+
+ if (targ_info->target_ver == TARGET_VERSION_SENTINAL) {
+ /* Determine how many bytes are in the Target's targ_info */
+ status = bmiBufferReceive(device, (A_UCHAR *)&targ_info->target_info_byte_count,
+ sizeof(targ_info->target_info_byte_count), TRUE);
+ if (status != A_OK) {
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("Unable to read Target Info Byte Count from the device\n"));
+ return A_ERROR;
+ }
+
+ /*
+ * The Target's targ_info doesn't match the Host's targ_info.
+ * We need to do some backwards compatibility work to make this OK.
+ */
+ A_ASSERT(targ_info->target_info_byte_count == sizeof(*targ_info));
+
+ /* Read the remainder of the targ_info */
+ status = bmiBufferReceive(device,
+ ((A_UCHAR *)targ_info)+sizeof(targ_info->target_info_byte_count),
+ sizeof(*targ_info)-sizeof(targ_info->target_info_byte_count), TRUE);
+ if (status != A_OK) {
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("Unable to read Target Info (%d bytes) from the device\n",
+ targ_info->target_info_byte_count));
+ return A_ERROR;
+ }
+ }
+
+ AR_DEBUG_PRINTF(ATH_DEBUG_BMI, ("BMI Get Target Info: Exit (ver: 0x%x type: 0x%x)\n",
+ targ_info->target_ver, targ_info->target_type));
+
+ return A_OK;
+}
+
+A_STATUS
+BMIReadMemory(HIF_DEVICE *device,
+ A_UINT32 address,
+ A_UCHAR *buffer,
+ A_UINT32 length)
+{
+ A_UINT32 cid;
+ A_STATUS status;
+ A_UINT32 offset;
+ A_UINT32 remaining, rxlen;
+
+ A_ASSERT(BMI_COMMAND_FITS(BMI_DATASZ_MAX + sizeof(cid) + sizeof(address) + sizeof(length)));
+ memset (pBMICmdBuf, 0, BMI_DATASZ_MAX + sizeof(cid) + sizeof(address) + sizeof(length));
+
+ if (bmiDone) {
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("Command disallowed\n"));
+ return A_ERROR;
+ }
+
+ AR_DEBUG_PRINTF(ATH_DEBUG_BMI,
+ ("BMI Read Memory: Enter (device: 0x%p, address: 0x%x, length: %d)\n",
+ device, address, length));
+
+ cid = BMI_READ_MEMORY;
+
+ remaining = length;
+
+ while (remaining)
+ {
+ rxlen = (remaining < BMI_DATASZ_MAX) ? remaining : BMI_DATASZ_MAX;
+ offset = 0;
+ A_MEMCPY(&(pBMICmdBuf[offset]), &cid, sizeof(cid));
+ offset += sizeof(cid);
+ A_MEMCPY(&(pBMICmdBuf[offset]), &address, sizeof(address));
+ offset += sizeof(address);
+ A_MEMCPY(&(pBMICmdBuf[offset]), &rxlen, sizeof(rxlen));
+ offset += sizeof(length);
+
+ status = bmiBufferSend(device, pBMICmdBuf, offset);
+ if (status != A_OK) {
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("Unable to write to the device\n"));
+ return A_ERROR;
+ }
+ status = bmiBufferReceive(device, pBMICmdBuf, rxlen, TRUE);
+ if (status != A_OK) {
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("Unable to read from the device\n"));
+ return A_ERROR;
+ }
+ A_MEMCPY(&buffer[length - remaining], pBMICmdBuf, rxlen);
+ remaining -= rxlen; address += rxlen;
+ }
+
+ AR_DEBUG_PRINTF(ATH_DEBUG_BMI, ("BMI Read Memory: Exit\n"));
+ return A_OK;
+}
+
+A_STATUS
+BMIWriteMemory(HIF_DEVICE *device,
+ A_UINT32 address,
+ A_UCHAR *buffer,
+ A_UINT32 length)
+{
+ A_UINT32 cid;
+ A_STATUS status;
+ A_UINT32 offset;
+ A_UINT32 remaining, txlen;
+ const A_UINT32 header = sizeof(cid) + sizeof(address) + sizeof(length);
+ A_UCHAR alignedBuffer[BMI_DATASZ_MAX];
+ A_UCHAR *src;
+
+ A_ASSERT(BMI_COMMAND_FITS(BMI_DATASZ_MAX + header));
+ memset (pBMICmdBuf, 0, BMI_DATASZ_MAX + header);
+
+ if (bmiDone) {
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("Command disallowed\n"));
+ return A_ERROR;
+ }
+
+ AR_DEBUG_PRINTF(ATH_DEBUG_BMI,
+ ("BMI Write Memory: Enter (device: 0x%p, address: 0x%x, length: %d)\n",
+ device, address, length));
+
+ cid = BMI_WRITE_MEMORY;
+
+ remaining = length;
+ while (remaining)
+ {
+ src = &buffer[length - remaining];
+ if (remaining < (BMI_DATASZ_MAX - header)) {
+ if (remaining & 3) {
+ /* align it with 4 bytes */
+ remaining = remaining + (4 - (remaining & 3));
+ memcpy(alignedBuffer, src, remaining);
+ src = alignedBuffer;
+ }
+ txlen = remaining;
+ } else {
+ txlen = (BMI_DATASZ_MAX - header);
+ }
+ offset = 0;
+ A_MEMCPY(&(pBMICmdBuf[offset]), &cid, sizeof(cid));
+ offset += sizeof(cid);
+ A_MEMCPY(&(pBMICmdBuf[offset]), &address, sizeof(address));
+ offset += sizeof(address);
+ A_MEMCPY(&(pBMICmdBuf[offset]), &txlen, sizeof(txlen));
+ offset += sizeof(txlen);
+ A_MEMCPY(&(pBMICmdBuf[offset]), src, txlen);
+ offset += txlen;
+ status = bmiBufferSend(device, pBMICmdBuf, offset);
+ if (status != A_OK) {
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("Unable to write to the device\n"));
+ return A_ERROR;
+ }
+ remaining -= txlen; address += txlen;
+ }
+
+ AR_DEBUG_PRINTF(ATH_DEBUG_BMI, ("BMI Write Memory: Exit\n"));
+
+ return A_OK;
+}
+
+A_STATUS
+BMIExecute(HIF_DEVICE *device,
+ A_UINT32 address,
+ A_UINT32 *param)
+{
+ A_UINT32 cid;
+ A_STATUS status;
+ A_UINT32 offset;
+
+ A_ASSERT(BMI_COMMAND_FITS(sizeof(cid) + sizeof(address) + sizeof(param)));
+ memset (pBMICmdBuf, 0, sizeof(cid) + sizeof(address) + sizeof(param));
+
+ if (bmiDone) {
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("Command disallowed\n"));
+ return A_ERROR;
+ }
+
+ AR_DEBUG_PRINTF(ATH_DEBUG_BMI,
+ ("BMI Execute: Enter (device: 0x%p, address: 0x%x, param: %d)\n",
+ device, address, *param));
+
+ cid = BMI_EXECUTE;
+
+ offset = 0;
+ A_MEMCPY(&(pBMICmdBuf[offset]), &cid, sizeof(cid));
+ offset += sizeof(cid);
+ A_MEMCPY(&(pBMICmdBuf[offset]), &address, sizeof(address));
+ offset += sizeof(address);
+ A_MEMCPY(&(pBMICmdBuf[offset]), param, sizeof(*param));
+ offset += sizeof(*param);
+ status = bmiBufferSend(device, pBMICmdBuf, offset);
+ if (status != A_OK) {
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("Unable to write to the device\n"));
+ return A_ERROR;
+ }
+
+ status = bmiBufferReceive(device, pBMICmdBuf, sizeof(*param), FALSE);
+ if (status != A_OK) {
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("Unable to read from the device\n"));
+ return A_ERROR;
+ }
+
+ A_MEMCPY(param, pBMICmdBuf, sizeof(*param));
+
+ AR_DEBUG_PRINTF(ATH_DEBUG_BMI, ("BMI Execute: Exit (param: %d)\n", *param));
+ return A_OK;
+}
+
+A_STATUS
+BMISetAppStart(HIF_DEVICE *device,
+ A_UINT32 address)
+{
+ A_UINT32 cid;
+ A_STATUS status;
+ A_UINT32 offset;
+
+ A_ASSERT(BMI_COMMAND_FITS(sizeof(cid) + sizeof(address)));
+ memset (pBMICmdBuf, 0, sizeof(cid) + sizeof(address));
+
+ if (bmiDone) {
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("Command disallowed\n"));
+ return A_ERROR;
+ }
+
+ AR_DEBUG_PRINTF(ATH_DEBUG_BMI,
+ ("BMI Set App Start: Enter (device: 0x%p, address: 0x%x)\n",
+ device, address));
+
+ cid = BMI_SET_APP_START;
+
+ offset = 0;
+ A_MEMCPY(&(pBMICmdBuf[offset]), &cid, sizeof(cid));
+ offset += sizeof(cid);
+ A_MEMCPY(&(pBMICmdBuf[offset]), &address, sizeof(address));
+ offset += sizeof(address);
+ status = bmiBufferSend(device, pBMICmdBuf, offset);
+ if (status != A_OK) {
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("Unable to write to the device\n"));
+ return A_ERROR;
+ }
+
+ AR_DEBUG_PRINTF(ATH_DEBUG_BMI, ("BMI Set App Start: Exit\n"));
+ return A_OK;
+}
+
+A_STATUS
+BMIReadSOCRegister(HIF_DEVICE *device,
+ A_UINT32 address,
+ A_UINT32 *param)
+{
+ A_UINT32 cid;
+ A_STATUS status;
+ A_UINT32 offset;
+
+ A_ASSERT(BMI_COMMAND_FITS(sizeof(cid) + sizeof(address)));
+ memset (pBMICmdBuf, 0, sizeof(cid) + sizeof(address));
+
+ if (bmiDone) {
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("Command disallowed\n"));
+ return A_ERROR;
+ }
+
+ AR_DEBUG_PRINTF(ATH_DEBUG_BMI,
+ ("BMI Read SOC Register: Enter (device: 0x%p, address: 0x%x)\n",
+ device, address));
+
+ cid = BMI_READ_SOC_REGISTER;
+
+ offset = 0;
+ A_MEMCPY(&(pBMICmdBuf[offset]), &cid, sizeof(cid));
+ offset += sizeof(cid);
+ A_MEMCPY(&(pBMICmdBuf[offset]), &address, sizeof(address));
+ offset += sizeof(address);
+
+ status = bmiBufferSend(device, pBMICmdBuf, offset);
+ if (status != A_OK) {
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("Unable to write to the device\n"));
+ return A_ERROR;
+ }
+
+ status = bmiBufferReceive(device, pBMICmdBuf, sizeof(*param), TRUE);
+ if (status != A_OK) {
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("Unable to read from the device\n"));
+ return A_ERROR;
+ }
+ A_MEMCPY(param, pBMICmdBuf, sizeof(*param));
+
+ AR_DEBUG_PRINTF(ATH_DEBUG_BMI, ("BMI Read SOC Register: Exit (value: %d)\n", *param));
+ return A_OK;
+}
+
+A_STATUS
+BMIWriteSOCRegister(HIF_DEVICE *device,
+ A_UINT32 address,
+ A_UINT32 param)
+{
+ A_UINT32 cid;
+ A_STATUS status;
+ A_UINT32 offset;
+
+ A_ASSERT(BMI_COMMAND_FITS(sizeof(cid) + sizeof(address) + sizeof(param)));
+ memset (pBMICmdBuf, 0, sizeof(cid) + sizeof(address) + sizeof(param));
+
+ if (bmiDone) {
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("Command disallowed\n"));
+ return A_ERROR;
+ }
+
+ AR_DEBUG_PRINTF(ATH_DEBUG_BMI,
+ ("BMI Write SOC Register: Enter (device: 0x%p, address: 0x%x, param: %d)\n",
+ device, address, param));
+
+ cid = BMI_WRITE_SOC_REGISTER;
+
+ offset = 0;
+ A_MEMCPY(&(pBMICmdBuf[offset]), &cid, sizeof(cid));
+ offset += sizeof(cid);
+ A_MEMCPY(&(pBMICmdBuf[offset]), &address, sizeof(address));
+ offset += sizeof(address);
+ A_MEMCPY(&(pBMICmdBuf[offset]), &param, sizeof(param));
+ offset += sizeof(param);
+ status = bmiBufferSend(device, pBMICmdBuf, offset);
+ if (status != A_OK) {
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("Unable to write to the device\n"));
+ return A_ERROR;
+ }
+
+ AR_DEBUG_PRINTF(ATH_DEBUG_BMI, ("BMI Read SOC Register: Exit\n"));
+ return A_OK;
+}
+
+A_STATUS
+BMIrompatchInstall(HIF_DEVICE *device,
+ A_UINT32 ROM_addr,
+ A_UINT32 RAM_addr,
+ A_UINT32 nbytes,
+ A_UINT32 do_activate,
+ A_UINT32 *rompatch_id)
+{
+ A_UINT32 cid;
+ A_STATUS status;
+ A_UINT32 offset;
+
+ A_ASSERT(BMI_COMMAND_FITS(sizeof(cid) + sizeof(ROM_addr) + sizeof(RAM_addr) +
+ sizeof(nbytes) + sizeof(do_activate)));
+ memset(pBMICmdBuf, 0, sizeof(cid) + sizeof(ROM_addr) + sizeof(RAM_addr) +
+ sizeof(nbytes) + sizeof(do_activate));
+
+ if (bmiDone) {
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("Command disallowed\n"));
+ return A_ERROR;
+ }
+
+ AR_DEBUG_PRINTF(ATH_DEBUG_BMI,
+ ("BMI rompatch Install: Enter (device: 0x%p, ROMaddr: 0x%x, RAMaddr: 0x%x length: %d activate: %d)\n",
+ device, ROM_addr, RAM_addr, nbytes, do_activate));
+
+ cid = BMI_ROMPATCH_INSTALL;
+
+ offset = 0;
+ A_MEMCPY(&(pBMICmdBuf[offset]), &cid, sizeof(cid));
+ offset += sizeof(cid);
+ A_MEMCPY(&(pBMICmdBuf[offset]), &ROM_addr, sizeof(ROM_addr));
+ offset += sizeof(ROM_addr);
+ A_MEMCPY(&(pBMICmdBuf[offset]), &RAM_addr, sizeof(RAM_addr));
+ offset += sizeof(RAM_addr);
+ A_MEMCPY(&(pBMICmdBuf[offset]), &nbytes, sizeof(nbytes));
+ offset += sizeof(nbytes);
+ A_MEMCPY(&(pBMICmdBuf[offset]), &do_activate, sizeof(do_activate));
+ offset += sizeof(do_activate);
+ status = bmiBufferSend(device, pBMICmdBuf, offset);
+ if (status != A_OK) {
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("Unable to write to the device\n"));
+ return A_ERROR;
+ }
+
+ status = bmiBufferReceive(device, pBMICmdBuf, sizeof(*rompatch_id), TRUE);
+ if (status != A_OK) {
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("Unable to read from the device\n"));
+ return A_ERROR;
+ }
+ A_MEMCPY(rompatch_id, pBMICmdBuf, sizeof(*rompatch_id));
+
+ AR_DEBUG_PRINTF(ATH_DEBUG_BMI, ("BMI rompatch Install: (rompatch_id=%d)\n", *rompatch_id));
+ return A_OK;
+}
+
+A_STATUS
+BMIrompatchUninstall(HIF_DEVICE *device,
+ A_UINT32 rompatch_id)
+{
+ A_UINT32 cid;
+ A_STATUS status;
+ A_UINT32 offset;
+
+ A_ASSERT(BMI_COMMAND_FITS(sizeof(cid) + sizeof(rompatch_id)));
+ memset (pBMICmdBuf, 0, sizeof(cid) + sizeof(rompatch_id));
+
+ if (bmiDone) {
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("Command disallowed\n"));
+ return A_ERROR;
+ }
+
+ AR_DEBUG_PRINTF(ATH_DEBUG_BMI,
+ ("BMI rompatch Uninstall: Enter (device: 0x%p, rompatch_id: %d)\n",
+ device, rompatch_id));
+
+ cid = BMI_ROMPATCH_UNINSTALL;
+
+ offset = 0;
+ A_MEMCPY(&(pBMICmdBuf[offset]), &cid, sizeof(cid));
+ offset += sizeof(cid);
+ A_MEMCPY(&(pBMICmdBuf[offset]), &rompatch_id, sizeof(rompatch_id));
+ offset += sizeof(rompatch_id);
+ status = bmiBufferSend(device, pBMICmdBuf, offset);
+ if (status != A_OK) {
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("Unable to write to the device\n"));
+ return A_ERROR;
+ }
+
+ AR_DEBUG_PRINTF(ATH_DEBUG_BMI, ("BMI rompatch UNinstall: (rompatch_id=0x%x)\n", rompatch_id));
+ return A_OK;
+}
+
+static A_STATUS
+_BMIrompatchChangeActivation(HIF_DEVICE *device,
+ A_UINT32 rompatch_count,
+ A_UINT32 *rompatch_list,
+ A_UINT32 do_activate)
+{
+ A_UINT32 cid;
+ A_STATUS status;
+ A_UINT32 offset;
+ A_UINT32 length;
+
+ A_ASSERT(BMI_COMMAND_FITS(BMI_DATASZ_MAX + sizeof(cid) + sizeof(rompatch_count)));
+ memset(pBMICmdBuf, 0, BMI_DATASZ_MAX + sizeof(cid) + sizeof(rompatch_count));
+
+ if (bmiDone) {
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("Command disallowed\n"));
+ return A_ERROR;
+ }
+
+ AR_DEBUG_PRINTF(ATH_DEBUG_BMI,
+ ("BMI Change rompatch Activation: Enter (device: 0x%p, count: %d)\n",
+ device, rompatch_count));
+
+ cid = do_activate ? BMI_ROMPATCH_ACTIVATE : BMI_ROMPATCH_DEACTIVATE;
+
+ offset = 0;
+ A_MEMCPY(&(pBMICmdBuf[offset]), &cid, sizeof(cid));
+ offset += sizeof(cid);
+ A_MEMCPY(&(pBMICmdBuf[offset]), &rompatch_count, sizeof(rompatch_count));
+ offset += sizeof(rompatch_count);
+ length = rompatch_count * sizeof(*rompatch_list);
+ A_MEMCPY(&(pBMICmdBuf[offset]), rompatch_list, length);
+ offset += length;
+ status = bmiBufferSend(device, pBMICmdBuf, offset);
+ if (status != A_OK) {
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("Unable to write to the device\n"));
+ return A_ERROR;
+ }
+
+ AR_DEBUG_PRINTF(ATH_DEBUG_BMI, ("BMI Change rompatch Activation: Exit\n"));
+
+ return A_OK;
+}
+
+A_STATUS
+BMIrompatchActivate(HIF_DEVICE *device,
+ A_UINT32 rompatch_count,
+ A_UINT32 *rompatch_list)
+{
+ return _BMIrompatchChangeActivation(device, rompatch_count, rompatch_list, 1);
+}
+
+A_STATUS
+BMIrompatchDeactivate(HIF_DEVICE *device,
+ A_UINT32 rompatch_count,
+ A_UINT32 *rompatch_list)
+{
+ return _BMIrompatchChangeActivation(device, rompatch_count, rompatch_list, 0);
+}
+
+A_STATUS
+BMILZData(HIF_DEVICE *device,
+ A_UCHAR *buffer,
+ A_UINT32 length)
+{
+ A_UINT32 cid;
+ A_STATUS status;
+ A_UINT32 offset;
+ A_UINT32 remaining, txlen;
+ const A_UINT32 header = sizeof(cid) + sizeof(length);
+
+ A_ASSERT(BMI_COMMAND_FITS(BMI_DATASZ_MAX+header));
+ memset (pBMICmdBuf, 0, BMI_DATASZ_MAX+header);
+
+ if (bmiDone) {
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("Command disallowed\n"));
+ return A_ERROR;
+ }
+
+ AR_DEBUG_PRINTF(ATH_DEBUG_BMI,
+ ("BMI Send LZ Data: Enter (device: 0x%p, length: %d)\n",
+ device, length));
+
+ cid = BMI_LZ_DATA;
+
+ remaining = length;
+ while (remaining)
+ {
+ txlen = (remaining < (BMI_DATASZ_MAX - header)) ?
+ remaining : (BMI_DATASZ_MAX - header);
+ offset = 0;
+ A_MEMCPY(&(pBMICmdBuf[offset]), &cid, sizeof(cid));
+ offset += sizeof(cid);
+ A_MEMCPY(&(pBMICmdBuf[offset]), &txlen, sizeof(txlen));
+ offset += sizeof(txlen);
+ A_MEMCPY(&(pBMICmdBuf[offset]), &buffer[length - remaining], txlen);
+ offset += txlen;
+ status = bmiBufferSend(device, pBMICmdBuf, offset);
+ if (status != A_OK) {
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("Unable to write to the device\n"));
+ return A_ERROR;
+ }
+ remaining -= txlen;
+ }
+
+ AR_DEBUG_PRINTF(ATH_DEBUG_BMI, ("BMI LZ Data: Exit\n"));
+
+ return A_OK;
+}
+
+A_STATUS
+BMILZStreamStart(HIF_DEVICE *device,
+ A_UINT32 address)
+{
+ A_UINT32 cid;
+ A_STATUS status;
+ A_UINT32 offset;
+
+ A_ASSERT(BMI_COMMAND_FITS(sizeof(cid) + sizeof(address)));
+ memset (pBMICmdBuf, 0, sizeof(cid) + sizeof(address));
+
+ if (bmiDone) {
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("Command disallowed\n"));
+ return A_ERROR;
+ }
+
+ AR_DEBUG_PRINTF(ATH_DEBUG_BMI,
+ ("BMI LZ Stream Start: Enter (device: 0x%p, address: 0x%x)\n",
+ device, address));
+
+ cid = BMI_LZ_STREAM_START;
+ offset = 0;
+ A_MEMCPY(&(pBMICmdBuf[offset]), &cid, sizeof(cid));
+ offset += sizeof(cid);
+ A_MEMCPY(&(pBMICmdBuf[offset]), &address, sizeof(address));
+ offset += sizeof(address);
+ status = bmiBufferSend(device, pBMICmdBuf, offset);
+ if (status != A_OK) {
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("Unable to Start LZ Stream to the device\n"));
+ return A_ERROR;
+ }
+
+ AR_DEBUG_PRINTF(ATH_DEBUG_BMI, ("BMI LZ Stream Start: Exit\n"));
+
+ return A_OK;
+}
+
+/* BMI Access routines */
+A_STATUS
+bmiBufferSend(HIF_DEVICE *device,
+ A_UCHAR *buffer,
+ A_UINT32 length)
+{
+ A_STATUS status;
+ A_UINT32 timeout;
+ A_UINT32 address;
+ A_UINT32 mboxAddress[HTC_MAILBOX_NUM_MAX];
+
+ HIFConfigureDevice(device, HIF_DEVICE_GET_MBOX_ADDR,
+ &mboxAddress[0], sizeof(mboxAddress));
+
+ *pBMICmdCredits = 0;
+ timeout = BMI_COMMUNICATION_TIMEOUT;
+
+ while(timeout-- && !(*pBMICmdCredits)) {
+ /* Read the counter register to get the command credits */
+ address = COUNT_DEC_ADDRESS + (HTC_MAILBOX_NUM_MAX + ENDPOINT1) * 4;
+ /* hit the credit counter with a 4-byte access, the first byte read will hit the counter and cause
+ * a decrement, while the remaining 3 bytes has no effect. The rationale behind this is to
+ * make all HIF accesses 4-byte aligned */
+ status = HIFReadWrite(device, address, (A_UINT8 *)pBMICmdCredits, 4,
+ HIF_RD_SYNC_BYTE_INC, NULL);
+ if (status != A_OK) {
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("Unable to decrement the command credit count register\n"));
+ return A_ERROR;
+ }
+ /* the counter is only 8=bits, ignore anything in the upper 3 bytes */
+ (*pBMICmdCredits) &= 0xFF;
+ }
+
+ if (*pBMICmdCredits) {
+ address = mboxAddress[ENDPOINT1];
+ status = HIFReadWrite(device, address, buffer, length,
+ HIF_WR_SYNC_BYTE_INC, NULL);
+ if (status != A_OK) {
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("Unable to send the BMI data to the device\n"));
+ return A_ERROR;
+ }
+ } else {
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("BMI Communication timeout - bmiBufferSend\n"));
+ return A_ERROR;
+ }
+
+ return status;
+}
+
+A_STATUS
+bmiBufferReceive(HIF_DEVICE *device,
+ A_UCHAR *buffer,
+ A_UINT32 length,
+ A_BOOL want_timeout)
+{
+ A_STATUS status;
+ A_UINT32 address;
+ A_UINT32 mboxAddress[HTC_MAILBOX_NUM_MAX];
+ HIF_PENDING_EVENTS_INFO hifPendingEvents;
+ static HIF_PENDING_EVENTS_FUNC getPendingEventsFunc = NULL;
+
+ if (!pendingEventsFuncCheck) {
+ /* see if the HIF layer implements an alternative function to get pending events
+ * do this only once! */
+ HIFConfigureDevice(device,
+ HIF_DEVICE_GET_PENDING_EVENTS_FUNC,
+ &getPendingEventsFunc,
+ sizeof(getPendingEventsFunc));
+ pendingEventsFuncCheck = TRUE;
+ }
+
+ HIFConfigureDevice(device, HIF_DEVICE_GET_MBOX_ADDR,
+ &mboxAddress[0], sizeof(mboxAddress));
+
+ /*
+ * During normal bootup, small reads may be required.
+ * Rather than issue an HIF Read and then wait as the Target
+ * adds successive bytes to the FIFO, we wait here until
+ * we know that response data is available.
+ *
+ * This allows us to cleanly timeout on an unexpected
+ * Target failure rather than risk problems at the HIF level. In
+ * particular, this avoids SDIO timeouts and possibly garbage
+ * data on some host controllers. And on an interconnect
+ * such as Compact Flash (as well as some SDIO masters) which
+ * does not provide any indication on data timeout, it avoids
+ * a potential hang or garbage response.
+ *
+ * Synchronization is more difficult for reads larger than the
+ * size of the MBOX FIFO (128B), because the Target is unable
+ * to push the 129th byte of data until AFTER the Host posts an
+ * HIF Read and removes some FIFO data. So for large reads the
+ * Host proceeds to post an HIF Read BEFORE all the data is
+ * actually available to read. Fortunately, large BMI reads do
+ * not occur in practice -- they're supported for debug/development.
+ *
+ * So Host/Target BMI synchronization is divided into these cases:
+ * CASE 1: length < 4
+ * Should not happen
+ *
+ * CASE 2: 4 <= length <= 128
+ * Wait for first 4 bytes to be in FIFO
+ * If CONSERVATIVE_BMI_READ is enabled, also wait for
+ * a BMI command credit, which indicates that the ENTIRE
+ * response is available in the the FIFO
+ *
+ * CASE 3: length > 128
+ * Wait for the first 4 bytes to be in FIFO
+ *
+ * For most uses, a small timeout should be sufficient and we will
+ * usually see a response quickly; but there may be some unusual
+ * (debug) cases of BMI_EXECUTE where we want an larger timeout.
+ * For now, we use an unbounded busy loop while waiting for
+ * BMI_EXECUTE.
+ *
+ * If BMI_EXECUTE ever needs to support longer-latency execution,
+ * especially in production, this code needs to be enhanced to sleep
+ * and yield. Also note that BMI_COMMUNICATION_TIMEOUT is currently
+ * a function of Host processor speed.
+ */
+ if (length >= 4) { /* NB: Currently, always true */
+ /*
+ * NB: word_available is declared static for esoteric reasons
+ * having to do with protection on some OSes.
+ */
+ static A_UINT32 word_available;
+ A_UINT32 timeout;
+
+ word_available = 0;
+ timeout = BMI_COMMUNICATION_TIMEOUT;
+ while((!want_timeout || timeout--) && !word_available) {
+
+ if (getPendingEventsFunc != NULL) {
+ status = getPendingEventsFunc(device,
+ &hifPendingEvents,
+ NULL);
+ if (status != A_OK) {
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("BMI: Failed to get pending events \n"));
+ break;
+ }
+
+ if (hifPendingEvents.AvailableRecvBytes >= sizeof(A_UINT32)) {
+ word_available = 1;
+ }
+ continue;
+ }
+
+ status = HIFReadWrite(device, RX_LOOKAHEAD_VALID_ADDRESS, (A_UINT8 *)&word_available,
+ sizeof(word_available), HIF_RD_SYNC_BYTE_INC, NULL);
+ if (status != A_OK) {
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("Unable to read RX_LOOKAHEAD_VALID register\n"));
+ return A_ERROR;
+ }
+ /* We did a 4-byte read to the same register; all we really want is one bit */
+ word_available &= (1 << ENDPOINT1);
+ }
+
+ if (!word_available) {
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("BMI Communication timeout - bmiBufferReceive FIFO empty\n"));
+ return A_ERROR;
+ }
+ }
+
+#define CONSERVATIVE_BMI_READ 0
+#if CONSERVATIVE_BMI_READ
+ /*
+ * This is an extra-conservative CREDIT check. It guarantees
+ * that ALL data is available in the FIFO before we start to
+ * read from the interconnect.
+ *
+ * This credit check is useless when firmware chooses to
+ * allow multiple outstanding BMI Command Credits, since the next
+ * credit will already be present. To restrict the Target to one
+ * BMI Command Credit, see HI_OPTION_BMI_CRED_LIMIT.
+ *
+ * And for large reads (when HI_OPTION_BMI_CRED_LIMIT is set)
+ * we cannot wait for the next credit because the Target's FIFO
+ * will not hold the entire response. So we need the Host to
+ * start to empty the FIFO sooner. (And again, large reads are
+ * not used in practice; they are for debug/development only.)
+ *
+ * For a more conservative Host implementation (which would be
+ * safer for a Compact Flash interconnect):
+ * Set CONSERVATIVE_BMI_READ (above) to 1
+ * Set HI_OPTION_BMI_CRED_LIMIT and
+ * reduce BMI_DATASZ_MAX to 32 or 64
+ */
+ if ((length > 4) && (length < 128)) { /* check against MBOX FIFO size */
+ A_UINT32 timeout;
+
+ *pBMICmdCredits = 0;
+ timeout = BMI_COMMUNICATION_TIMEOUT;
+ while((!want_timeout || timeout--) && !(*pBMICmdCredits) {
+ /* Read the counter register to get the command credits */
+ address = COUNT_ADDRESS + (HTC_MAILBOX_NUM_MAX + ENDPOINT1) * 1;
+ /* read the counter using a 4-byte read. Since the counter is NOT auto-decrementing,
+ * we can read this counter multiple times using a non-incrementing address mode.
+ * The rationale here is to make all HIF accesses a multiple of 4 bytes */
+ status = HIFReadWrite(device, address, (A_UINT8 *)pBMICmdCredits, sizeof(*pBMICmdCredits),
+ HIF_RD_SYNC_BYTE_FIX, NULL);
+ if (status != A_OK) {
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("Unable to read the command credit count register\n"));
+ return A_ERROR;
+ }
+ /* we did a 4-byte read to the same count register so mask off upper bytes */
+ (*pBMICmdCredits) &= 0xFF;
+ }
+
+ if (!(*pBMICmdCredits)) {
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("BMI Communication timeout- bmiBufferReceive no credit\n"));
+ return A_ERROR;
+ }
+ }
+#endif
+
+ address = mboxAddress[ENDPOINT1];
+ status = HIFReadWrite(device, address, buffer, length, HIF_RD_SYNC_BYTE_INC, NULL);
+ if (status != A_OK) {
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("Unable to read the BMI data from the device\n"));
+ return A_ERROR;
+ }
+
+ return A_OK;
+}
+
+A_STATUS
+BMIFastDownload(HIF_DEVICE *device, A_UINT32 address, A_UCHAR *buffer, A_UINT32 length)
+{
+ A_STATUS status = A_ERROR;
+ A_UINT32 lastWord = 0;
+ A_UINT32 lastWordOffset = length & ~0x3;
+ A_UINT32 unalignedBytes = length & 0x3;
+
+ status = BMILZStreamStart (device, address);
+ if (A_FAILED(status)) {
+ return A_ERROR;
+ }
+
+ if (unalignedBytes) {
+ /* copy the last word into a zero padded buffer */
+ A_MEMCPY(&lastWord, &buffer[lastWordOffset], unalignedBytes);
+ }
+
+ status = BMILZData(device, buffer, lastWordOffset);
+
+ if (A_FAILED(status)) {
+ return A_ERROR;
+ }
+
+ if (unalignedBytes) {
+ status = BMILZData(device, (A_UINT8 *)&lastWord, 4);
+ }
+
+ if (A_SUCCESS(status)) {
+ //
+ // Close compressed stream and open a new (fake) one. This serves mainly to flush Target caches.
+ //
+ status = BMILZStreamStart (device, 0x00);
+ if (A_FAILED(status)) {
+ return A_ERROR;
+ }
+ }
+ return status;
+}
+
+A_STATUS
+BMIRawWrite(HIF_DEVICE *device, A_UCHAR *buffer, A_UINT32 length)
+{
+ return bmiBufferSend(device, buffer, length);
+}
+
+A_STATUS
+BMIRawRead(HIF_DEVICE *device, A_UCHAR *buffer, A_UINT32 length, A_BOOL want_timeout)
+{
+ return bmiBufferReceive(device, buffer, length, want_timeout);
+}
diff --git a/drivers/staging/ath6kl/hif/common/hif_sdio_common.h b/drivers/staging/ath6kl/hif/common/hif_sdio_common.h
new file mode 100644
index 000000000000..0f4e913cb13b
--- /dev/null
+++ b/drivers/staging/ath6kl/hif/common/hif_sdio_common.h
@@ -0,0 +1,87 @@
+//------------------------------------------------------------------------------
+// Copyright (c) 2009-2010 Atheros Corporation. All rights reserved.
+//
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+//
+//
+//------------------------------------------------------------------------------
+//==============================================================================
+// common header file for HIF modules designed for SDIO
+//
+// Author(s): ="Atheros"
+//==============================================================================
+
+#ifndef HIF_SDIO_COMMON_H_
+#define HIF_SDIO_COMMON_H_
+
+ /* SDIO manufacturer ID and Codes */
+#define MANUFACTURER_ID_AR6002_BASE 0x200
+#define MANUFACTURER_ID_AR6003_BASE 0x300
+#define MANUFACTURER_ID_AR6K_BASE_MASK 0xFF00
+#define FUNCTION_CLASS 0x0
+#define MANUFACTURER_CODE 0x271 /* Atheros */
+
+ /* Mailbox address in SDIO address space */
+#define HIF_MBOX_BASE_ADDR 0x800
+#define HIF_MBOX_WIDTH 0x800
+#define HIF_MBOX_START_ADDR(mbox) \
+ ( HIF_MBOX_BASE_ADDR + mbox * HIF_MBOX_WIDTH)
+
+#define HIF_MBOX_END_ADDR(mbox) \
+ (HIF_MBOX_START_ADDR(mbox) + HIF_MBOX_WIDTH - 1)
+
+ /* extended MBOX address for larger MBOX writes to MBOX 0*/
+#define HIF_MBOX0_EXTENDED_BASE_ADDR 0x2800
+#define HIF_MBOX0_EXTENDED_WIDTH_AR6002 (6*1024)
+#define HIF_MBOX0_EXTENDED_WIDTH_AR6003 (18*1024)
+
+ /* version 1 of the chip has only a 12K extended mbox range */
+#define HIF_MBOX0_EXTENDED_BASE_ADDR_AR6003_V1 0x4000
+#define HIF_MBOX0_EXTENDED_WIDTH_AR6003_V1 (12*1024)
+
+ /* GMBOX addresses */
+#define HIF_GMBOX_BASE_ADDR 0x7000
+#define HIF_GMBOX_WIDTH 0x4000
+
+ /* for SDIO we recommend a 128-byte block size */
+#define HIF_DEFAULT_IO_BLOCK_SIZE 128
+
+ /* set extended MBOX window information for SDIO interconnects */
+static INLINE void SetExtendedMboxWindowInfo(A_UINT16 Manfid, HIF_DEVICE_MBOX_INFO *pInfo)
+{
+ switch (Manfid & MANUFACTURER_ID_AR6K_BASE_MASK) {
+ case MANUFACTURER_ID_AR6002_BASE :
+ /* MBOX 0 has an extended range */
+ pInfo->MboxProp[0].ExtendedAddress = HIF_MBOX0_EXTENDED_BASE_ADDR;
+ pInfo->MboxProp[0].ExtendedSize = HIF_MBOX0_EXTENDED_WIDTH_AR6002;
+ break;
+ case MANUFACTURER_ID_AR6003_BASE :
+ /* MBOX 0 has an extended range */
+ pInfo->MboxProp[0].ExtendedAddress = HIF_MBOX0_EXTENDED_BASE_ADDR_AR6003_V1;
+ pInfo->MboxProp[0].ExtendedSize = HIF_MBOX0_EXTENDED_WIDTH_AR6003_V1;
+ pInfo->GMboxAddress = HIF_GMBOX_BASE_ADDR;
+ pInfo->GMboxSize = HIF_GMBOX_WIDTH;
+ break;
+ default:
+ A_ASSERT(FALSE);
+ break;
+ }
+}
+
+/* special CCCR (func 0) registers */
+
+#define CCCR_SDIO_IRQ_MODE_REG 0xF0 /* interrupt mode register */
+#define SDIO_IRQ_MODE_ASYNC_4BIT_IRQ (1 << 0) /* mode to enable special 4-bit interrupt assertion without clock*/
+
+#endif /*HIF_SDIO_COMMON_H_*/
diff --git a/drivers/staging/ath6kl/hif/sdio/linux_sdio/include/hif_internal.h b/drivers/staging/ath6kl/hif/sdio/linux_sdio/include/hif_internal.h
new file mode 100644
index 000000000000..857f35f36ca2
--- /dev/null
+++ b/drivers/staging/ath6kl/hif/sdio/linux_sdio/include/hif_internal.h
@@ -0,0 +1,134 @@
+//------------------------------------------------------------------------------
+// <copyright file="hif_internal.h" company="Atheros">
+// Copyright (c) 2004-2010 Atheros Corporation. All rights reserved.
+//
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+//
+//
+//------------------------------------------------------------------------------
+//==============================================================================
+// internal header file for hif layer
+//
+// Author(s): ="Atheros"
+//==============================================================================
+#ifndef _HIF_INTERNAL_H_
+#define _HIF_INTERNAL_H_
+
+#include "a_config.h"
+#include "athdefs.h"
+#include "a_types.h"
+#include "a_osapi.h"
+#include "hif.h"
+#include "../../../common/hif_sdio_common.h"
+#include <linux/scatterlist.h>
+#define HIF_LINUX_MMC_SCATTER_SUPPORT
+
+#define BUS_REQUEST_MAX_NUM 64
+
+#define SDIO_CLOCK_FREQUENCY_DEFAULT 25000000
+#define SDWLAN_ENABLE_DISABLE_TIMEOUT 20
+#define FLAGS_CARD_ENAB 0x02
+#define FLAGS_CARD_IRQ_UNMSK 0x04
+
+#define HIF_MBOX_BLOCK_SIZE HIF_DEFAULT_IO_BLOCK_SIZE
+#define HIF_MBOX0_BLOCK_SIZE 1
+#define HIF_MBOX1_BLOCK_SIZE HIF_MBOX_BLOCK_SIZE
+#define HIF_MBOX2_BLOCK_SIZE HIF_MBOX_BLOCK_SIZE
+#define HIF_MBOX3_BLOCK_SIZE HIF_MBOX_BLOCK_SIZE
+
+struct _HIF_SCATTER_REQ_PRIV;
+
+typedef struct bus_request {
+ struct bus_request *next; /* link list of available requests */
+ struct bus_request *inusenext; /* link list of in use requests */
+ struct semaphore sem_req;
+ A_UINT32 address; /* request data */
+ A_UCHAR *buffer;
+ A_UINT32 length;
+ A_UINT32 request;
+ void *context;
+ A_STATUS status;
+ struct _HIF_SCATTER_REQ_PRIV *pScatterReq; /* this request is a scatter request */
+} BUS_REQUEST;
+
+struct hif_device {
+ struct sdio_func *func;
+ spinlock_t asynclock;
+ struct task_struct* async_task; /* task to handle async commands */
+ struct semaphore sem_async; /* wake up for async task */
+ int async_shutdown; /* stop the async task */
+ struct completion async_completion; /* thread completion */
+ BUS_REQUEST *asyncreq; /* request for async tasklet */
+ BUS_REQUEST *taskreq; /* async tasklet data */
+ spinlock_t lock;
+ BUS_REQUEST *s_busRequestFreeQueue; /* free list */
+ BUS_REQUEST busRequest[BUS_REQUEST_MAX_NUM]; /* available bus requests */
+ void *claimedContext;
+ HTC_CALLBACKS htcCallbacks;
+ A_UINT8 *dma_buffer;
+ DL_LIST ScatterReqHead; /* scatter request list head */
+ A_BOOL scatter_enabled; /* scatter enabled flag */
+ A_BOOL is_suspend;
+ A_BOOL is_disabled;
+ atomic_t irqHandling;
+ HIF_DEVICE_POWER_CHANGE_TYPE powerConfig;
+ const struct sdio_device_id *id;
+};
+
+#define HIF_DMA_BUFFER_SIZE (32 * 1024)
+#define CMD53_FIXED_ADDRESS 1
+#define CMD53_INCR_ADDRESS 2
+
+BUS_REQUEST *hifAllocateBusRequest(HIF_DEVICE *device);
+void hifFreeBusRequest(HIF_DEVICE *device, BUS_REQUEST *busrequest);
+void AddToAsyncList(HIF_DEVICE *device, BUS_REQUEST *busrequest);
+
+#ifdef HIF_LINUX_MMC_SCATTER_SUPPORT
+
+#define MAX_SCATTER_REQUESTS 4
+#define MAX_SCATTER_ENTRIES_PER_REQ 16
+#define MAX_SCATTER_REQ_TRANSFER_SIZE 32*1024
+
+typedef struct _HIF_SCATTER_REQ_PRIV {
+ HIF_SCATTER_REQ *pHifScatterReq; /* HIF scatter request with allocated entries */
+ HIF_DEVICE *device; /* this device */
+ BUS_REQUEST *busrequest; /* request associated with request */
+ /* scatter list for linux */
+ struct scatterlist sgentries[MAX_SCATTER_ENTRIES_PER_REQ];
+} HIF_SCATTER_REQ_PRIV;
+
+#define ATH_DEBUG_SCATTER ATH_DEBUG_MAKE_MODULE_MASK(0)
+
+A_STATUS SetupHIFScatterSupport(HIF_DEVICE *device, HIF_DEVICE_SCATTER_SUPPORT_INFO *pInfo);
+void CleanupHIFScatterResources(HIF_DEVICE *device);
+A_STATUS DoHifReadWriteScatter(HIF_DEVICE *device, BUS_REQUEST *busrequest);
+
+#else // HIF_LINUX_MMC_SCATTER_SUPPORT
+
+static inline A_STATUS SetupHIFScatterSupport(HIF_DEVICE *device, HIF_DEVICE_SCATTER_SUPPORT_INFO *pInfo)
+{
+ return A_ENOTSUP;
+}
+
+static inline A_STATUS DoHifReadWriteScatter(HIF_DEVICE *device, BUS_REQUEST *busrequest)
+{
+ return A_ENOTSUP;
+}
+
+#define CleanupHIFScatterResources(d) { }
+
+#endif // HIF_LINUX_MMC_SCATTER_SUPPORT
+
+#endif // _HIF_INTERNAL_H_
+
diff --git a/drivers/staging/ath6kl/hif/sdio/linux_sdio/src/hif.c b/drivers/staging/ath6kl/hif/sdio/linux_sdio/src/hif.c
new file mode 100644
index 000000000000..c307a5559362
--- /dev/null
+++ b/drivers/staging/ath6kl/hif/sdio/linux_sdio/src/hif.c
@@ -0,0 +1,1298 @@
+//------------------------------------------------------------------------------
+// <copyright file="hif.c" company="Atheros">
+// Copyright (c) 2004-2010 Atheros Corporation. All rights reserved.
+//
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+//
+//
+//------------------------------------------------------------------------------
+//==============================================================================
+// HIF layer reference implementation for Linux Native MMC stack
+//
+// Author(s): ="Atheros"
+//==============================================================================
+#include <linux/mmc/card.h>
+#include <linux/mmc/mmc.h>
+#include <linux/mmc/host.h>
+#include <linux/mmc/sdio_func.h>
+#include <linux/mmc/sdio_ids.h>
+#include <linux/mmc/sdio.h>
+#include <linux/mmc/sd.h>
+#include <linux/kthread.h>
+
+/* by default setup a bounce buffer for the data packets, if the underlying host controller driver
+ does not use DMA you may be able to skip this step and save the memory allocation and transfer time */
+#define HIF_USE_DMA_BOUNCE_BUFFER 1
+#include "hif_internal.h"
+#define ATH_MODULE_NAME hif
+#include "a_debug.h"
+#include "AR6002/hw2.0/hw/mbox_host_reg.h"
+
+#if HIF_USE_DMA_BOUNCE_BUFFER
+/* macro to check if DMA buffer is WORD-aligned and DMA-able. Most host controllers assume the
+ * buffer is DMA'able and will bug-check otherwise (i.e. buffers on the stack).
+ * virt_addr_valid check fails on stack memory.
+ */
+#define BUFFER_NEEDS_BOUNCE(buffer) (((unsigned long)(buffer) & 0x3) || !virt_addr_valid((buffer)))
+#else
+#define BUFFER_NEEDS_BOUNCE(buffer) (FALSE)
+#endif
+
+/* ATHENV */
+#if defined(CONFIG_PM)
+#define dev_to_sdio_func(d) container_of(d, struct sdio_func, dev)
+#define to_sdio_driver(d) container_of(d, struct sdio_driver, drv)
+static int hifDeviceSuspend(struct device *dev);
+static int hifDeviceResume(struct device *dev);
+#endif /* CONFIG_PM */
+static int hifDeviceInserted(struct sdio_func *func, const struct sdio_device_id *id);
+static void hifDeviceRemoved(struct sdio_func *func);
+static HIF_DEVICE *addHifDevice(struct sdio_func *func);
+static HIF_DEVICE *getHifDevice(struct sdio_func *func);
+static void delHifDevice(HIF_DEVICE * device);
+static int Func0_CMD52WriteByte(struct mmc_card *card, unsigned int address, unsigned char byte);
+static int Func0_CMD52ReadByte(struct mmc_card *card, unsigned int address, unsigned char *byte);
+
+int reset_sdio_on_unload = 0;
+module_param(reset_sdio_on_unload, int, 0644);
+
+extern A_UINT32 nohifscattersupport;
+
+
+/* ------ Static Variables ------ */
+static const struct sdio_device_id ar6k_id_table[] = {
+ { SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_AR6002_BASE | 0x0)) },
+ { SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_AR6002_BASE | 0x1)) },
+ { SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_AR6003_BASE | 0x0)) },
+ { SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_AR6003_BASE | 0x1)) },
+ { /* null */ },
+};
+MODULE_DEVICE_TABLE(sdio, ar6k_id_table);
+
+static struct sdio_driver ar6k_driver = {
+ .name = "ar6k_wlan",
+ .id_table = ar6k_id_table,
+ .probe = hifDeviceInserted,
+ .remove = hifDeviceRemoved,
+};
+
+#if defined(CONFIG_PM)
+/* New suspend/resume based on linux-2.6.32
+ * Need to patch linux-2.6.32 with mmc2.6.32_suspend.patch
+ * Need to patch with msmsdcc2.6.29_suspend.patch for msm_sdcc host
+ */
+static struct dev_pm_ops ar6k_device_pm_ops = {
+ .suspend = hifDeviceSuspend,
+ .resume = hifDeviceResume,
+};
+#endif /* CONFIG_PM */
+
+/* make sure we only unregister when registered. */
+static int registered = 0;
+
+OSDRV_CALLBACKS osdrvCallbacks;
+extern A_UINT32 onebitmode;
+extern A_UINT32 busspeedlow;
+extern A_UINT32 debughif;
+
+static void ResetAllCards(void);
+static A_STATUS hifDisableFunc(HIF_DEVICE *device, struct sdio_func *func);
+static A_STATUS hifEnableFunc(HIF_DEVICE *device, struct sdio_func *func);
+
+#ifdef DEBUG
+
+ATH_DEBUG_INSTANTIATE_MODULE_VAR(hif,
+ "hif",
+ "(Linux MMC) Host Interconnect Framework",
+ ATH_DEBUG_MASK_DEFAULTS,
+ 0,
+ NULL);
+
+#endif
+
+
+/* ------ Functions ------ */
+A_STATUS HIFInit(OSDRV_CALLBACKS *callbacks)
+{
+ int status;
+ AR_DEBUG_ASSERT(callbacks != NULL);
+
+ A_REGISTER_MODULE_DEBUG_INFO(hif);
+
+ /* store the callback handlers */
+ osdrvCallbacks = *callbacks;
+
+ /* Register with bus driver core */
+ AR_DEBUG_PRINTF(ATH_DEBUG_TRACE, ("AR6000: HIFInit registering\n"));
+ registered = 1;
+#if defined(CONFIG_PM)
+ if (callbacks->deviceSuspendHandler && callbacks->deviceResumeHandler) {
+ ar6k_driver.drv.pm = &ar6k_device_pm_ops;
+ }
+#endif /* CONFIG_PM */
+ status = sdio_register_driver(&ar6k_driver);
+ AR_DEBUG_ASSERT(status==0);
+
+ if (status != 0) {
+ return A_ERROR;
+ }
+
+ return A_OK;
+
+}
+
+static A_STATUS
+__HIFReadWrite(HIF_DEVICE *device,
+ A_UINT32 address,
+ A_UCHAR *buffer,
+ A_UINT32 length,
+ A_UINT32 request,
+ void *context)
+{
+ A_UINT8 opcode;
+ A_STATUS status = A_OK;
+ int ret;
+ A_UINT8 *tbuffer;
+ A_BOOL bounced = FALSE;
+
+ AR_DEBUG_ASSERT(device != NULL);
+ AR_DEBUG_ASSERT(device->func != NULL);
+
+ AR_DEBUG_PRINTF(ATH_DEBUG_TRACE, ("AR6000: Device: 0x%p, buffer:0x%p (addr:0x%X)\n",
+ device, buffer, address));
+
+ do {
+ if (request & HIF_EXTENDED_IO) {
+ //AR_DEBUG_PRINTF(ATH_DEBUG_TRACE, ("AR6000: Command type: CMD53\n"));
+ } else {
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERROR,
+ ("AR6000: Invalid command type: 0x%08x\n", request));
+ status = A_EINVAL;
+ break;
+ }
+
+ if (request & HIF_BLOCK_BASIS) {
+ /* round to whole block length size */
+ length = (length / HIF_MBOX_BLOCK_SIZE) * HIF_MBOX_BLOCK_SIZE;
+ AR_DEBUG_PRINTF(ATH_DEBUG_TRACE,
+ ("AR6000: Block mode (BlockLen: %d)\n",
+ length));
+ } else if (request & HIF_BYTE_BASIS) {
+ AR_DEBUG_PRINTF(ATH_DEBUG_TRACE,
+ ("AR6000: Byte mode (BlockLen: %d)\n",
+ length));
+ } else {
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERROR,
+ ("AR6000: Invalid data mode: 0x%08x\n", request));
+ status = A_EINVAL;
+ break;
+ }
+
+#if 0
+ /* useful for checking register accesses */
+ if (length & 0x3) {
+ A_PRINTF(KERN_ALERT"AR6000: HIF (%s) is not a multiple of 4 bytes, addr:0x%X, len:%d\n",
+ request & HIF_WRITE ? "write":"read", address, length);
+ }
+#endif
+
+ if (request & HIF_WRITE) {
+ if ((address >= HIF_MBOX_START_ADDR(0)) &&
+ (address <= HIF_MBOX_END_ADDR(3)))
+ {
+
+ AR_DEBUG_ASSERT(length <= HIF_MBOX_WIDTH);
+
+ /*
+ * Mailbox write. Adjust the address so that the last byte
+ * falls on the EOM address.
+ */
+ address += (HIF_MBOX_WIDTH - length);
+ }
+ }
+
+ if (request & HIF_FIXED_ADDRESS) {
+ opcode = CMD53_FIXED_ADDRESS;
+ AR_DEBUG_PRINTF(ATH_DEBUG_TRACE, ("AR6000: Address mode: Fixed 0x%X\n", address));
+ } else if (request & HIF_INCREMENTAL_ADDRESS) {
+ opcode = CMD53_INCR_ADDRESS;
+ AR_DEBUG_PRINTF(ATH_DEBUG_TRACE, ("AR6000: Address mode: Incremental 0x%X\n", address));
+ } else {
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERROR,
+ ("AR6000: Invalid address mode: 0x%08x\n", request));
+ status = A_EINVAL;
+ break;
+ }
+
+ if (request & HIF_WRITE) {
+#if HIF_USE_DMA_BOUNCE_BUFFER
+ if (BUFFER_NEEDS_BOUNCE(buffer)) {
+ AR_DEBUG_ASSERT(device->dma_buffer != NULL);
+ tbuffer = device->dma_buffer;
+ /* copy the write data to the dma buffer */
+ AR_DEBUG_ASSERT(length <= HIF_DMA_BUFFER_SIZE);
+ memcpy(tbuffer, buffer, length);
+ bounced = TRUE;
+ } else {
+ tbuffer = buffer;
+ }
+#else
+ tbuffer = buffer;
+#endif
+ if (opcode == CMD53_FIXED_ADDRESS) {
+ ret = sdio_writesb(device->func, address, tbuffer, length);
+ AR_DEBUG_PRINTF(ATH_DEBUG_TRACE, ("AR6000: writesb ret=%d address: 0x%X, len: %d, 0x%X\n",
+ ret, address, length, *(int *)tbuffer));
+ } else {
+ ret = sdio_memcpy_toio(device->func, address, tbuffer, length);
+ AR_DEBUG_PRINTF(ATH_DEBUG_TRACE, ("AR6000: writeio ret=%d address: 0x%X, len: %d, 0x%X\n",
+ ret, address, length, *(int *)tbuffer));
+ }
+ } else if (request & HIF_READ) {
+#if HIF_USE_DMA_BOUNCE_BUFFER
+ if (BUFFER_NEEDS_BOUNCE(buffer)) {
+ AR_DEBUG_ASSERT(device->dma_buffer != NULL);
+ AR_DEBUG_ASSERT(length <= HIF_DMA_BUFFER_SIZE);
+ tbuffer = device->dma_buffer;
+ bounced = TRUE;
+ } else {
+ tbuffer = buffer;
+ }
+#else
+ tbuffer = buffer;
+#endif
+ if (opcode == CMD53_FIXED_ADDRESS) {
+ ret = sdio_readsb(device->func, tbuffer, address, length);
+ AR_DEBUG_PRINTF(ATH_DEBUG_TRACE, ("AR6000: readsb ret=%d address: 0x%X, len: %d, 0x%X\n",
+ ret, address, length, *(int *)tbuffer));
+ } else {
+ ret = sdio_memcpy_fromio(device->func, tbuffer, address, length);
+ AR_DEBUG_PRINTF(ATH_DEBUG_TRACE, ("AR6000: readio ret=%d address: 0x%X, len: %d, 0x%X\n",
+ ret, address, length, *(int *)tbuffer));
+ }
+#if HIF_USE_DMA_BOUNCE_BUFFER
+ if (bounced) {
+ /* copy the read data from the dma buffer */
+ memcpy(buffer, tbuffer, length);
+ }
+#endif
+ } else {
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERROR,
+ ("AR6000: Invalid direction: 0x%08x\n", request));
+ status = A_EINVAL;
+ break;
+ }
+
+ if (ret) {
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERROR,
+ ("AR6000: SDIO bus operation failed! MMC stack returned : %d \n", ret));
+ status = A_ERROR;
+ }
+ } while (FALSE);
+
+ return status;
+}
+
+void AddToAsyncList(HIF_DEVICE *device, BUS_REQUEST *busrequest)
+{
+ unsigned long flags;
+ BUS_REQUEST *async;
+ BUS_REQUEST *active;
+
+ spin_lock_irqsave(&device->asynclock, flags);
+ active = device->asyncreq;
+ if (active == NULL) {
+ device->asyncreq = busrequest;
+ device->asyncreq->inusenext = NULL;
+ } else {
+ for (async = device->asyncreq;
+ async != NULL;
+ async = async->inusenext) {
+ active = async;
+ }
+ active->inusenext = busrequest;
+ busrequest->inusenext = NULL;
+ }
+ spin_unlock_irqrestore(&device->asynclock, flags);
+}
+
+
+/* queue a read/write request */
+A_STATUS
+HIFReadWrite(HIF_DEVICE *device,
+ A_UINT32 address,
+ A_UCHAR *buffer,
+ A_UINT32 length,
+ A_UINT32 request,
+ void *context)
+{
+ A_STATUS status = A_OK;
+ BUS_REQUEST *busrequest;
+
+
+ AR_DEBUG_ASSERT(device != NULL);
+ AR_DEBUG_ASSERT(device->func != NULL);
+
+ AR_DEBUG_PRINTF(ATH_DEBUG_TRACE, ("AR6000: Device: %p addr:0x%X\n", device,address));
+
+ do {
+ if ((request & HIF_ASYNCHRONOUS) || (request & HIF_SYNCHRONOUS)){
+ /* serialize all requests through the async thread */
+ AR_DEBUG_PRINTF(ATH_DEBUG_TRACE, ("AR6000: Execution mode: %s\n",
+ (request & HIF_ASYNCHRONOUS)?"Async":"Synch"));
+ busrequest = hifAllocateBusRequest(device);
+ if (busrequest == NULL) {
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERROR,
+ ("AR6000: no async bus requests available (%s, addr:0x%X, len:%d) \n",
+ request & HIF_READ ? "READ":"WRITE", address, length));
+ return A_ERROR;
+ }
+ busrequest->address = address;
+ busrequest->buffer = buffer;
+ busrequest->length = length;
+ busrequest->request = request;
+ busrequest->context = context;
+
+ AddToAsyncList(device, busrequest);
+
+ if (request & HIF_SYNCHRONOUS) {
+ AR_DEBUG_PRINTF(ATH_DEBUG_TRACE, ("AR6000: queued sync req: 0x%lX\n", (unsigned long)busrequest));
+
+ /* wait for completion */
+ up(&device->sem_async);
+ if (down_interruptible(&busrequest->sem_req) != 0) {
+ /* interrupted, exit */
+ return A_ERROR;
+ } else {
+ A_STATUS status = busrequest->status;
+ AR_DEBUG_PRINTF(ATH_DEBUG_TRACE, ("AR6000: sync return freeing 0x%lX: 0x%X\n",
+ (unsigned long)busrequest, busrequest->status));
+ AR_DEBUG_PRINTF(ATH_DEBUG_TRACE, ("AR6000: freeing req: 0x%X\n", (unsigned int)request));
+ hifFreeBusRequest(device, busrequest);
+ return status;
+ }
+ } else {
+ AR_DEBUG_PRINTF(ATH_DEBUG_TRACE, ("AR6000: queued async req: 0x%lX\n", (unsigned long)busrequest));
+ up(&device->sem_async);
+ return A_PENDING;
+ }
+ } else {
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERROR,
+ ("AR6000: Invalid execution mode: 0x%08x\n", (unsigned int)request));
+ status = A_EINVAL;
+ break;
+ }
+ } while(0);
+
+ return status;
+}
+/* thread to serialize all requests, both sync and async */
+static int async_task(void *param)
+ {
+ HIF_DEVICE *device;
+ BUS_REQUEST *request;
+ A_STATUS status;
+ unsigned long flags;
+
+ device = (HIF_DEVICE *)param;
+ AR_DEBUG_PRINTF(ATH_DEBUG_TRACE, ("AR6000: async task\n"));
+ set_current_state(TASK_INTERRUPTIBLE);
+ while(!device->async_shutdown) {
+ /* wait for work */
+ if (down_interruptible(&device->sem_async) != 0) {
+ /* interrupted, exit */
+ AR_DEBUG_PRINTF(ATH_DEBUG_TRACE, ("AR6000: async task interrupted\n"));
+ break;
+ }
+ if (device->async_shutdown) {
+ AR_DEBUG_PRINTF(ATH_DEBUG_TRACE, ("AR6000: async task stopping\n"));
+ break;
+ }
+ /* we want to hold the host over multiple cmds if possible, but holding the host blocks card interrupts */
+ sdio_claim_host(device->func);
+ spin_lock_irqsave(&device->asynclock, flags);
+ /* pull the request to work on */
+ while (device->asyncreq != NULL) {
+ request = device->asyncreq;
+ if (request->inusenext != NULL) {
+ device->asyncreq = request->inusenext;
+ } else {
+ device->asyncreq = NULL;
+ }
+ spin_unlock_irqrestore(&device->asynclock, flags);
+ AR_DEBUG_PRINTF(ATH_DEBUG_TRACE, ("AR6000: async_task processing req: 0x%lX\n", (unsigned long)request));
+
+ if (request->pScatterReq != NULL) {
+ A_ASSERT(device->scatter_enabled);
+ /* this is a queued scatter request, pass the request to scatter routine which
+ * executes it synchronously, note, no need to free the request since scatter requests
+ * are maintained on a separate list */
+ status = DoHifReadWriteScatter(device,request);
+ } else {
+ /* call HIFReadWrite in sync mode to do the work */
+ status = __HIFReadWrite(device, request->address, request->buffer,
+ request->length, request->request & ~HIF_SYNCHRONOUS, NULL);
+ if (request->request & HIF_ASYNCHRONOUS) {
+ void *context = request->context;
+ AR_DEBUG_PRINTF(ATH_DEBUG_TRACE, ("AR6000: async_task freeing req: 0x%lX\n", (unsigned long)request));
+ hifFreeBusRequest(device, request);
+ AR_DEBUG_PRINTF(ATH_DEBUG_TRACE, ("AR6000: async_task completion routine req: 0x%lX\n", (unsigned long)request));
+ device->htcCallbacks.rwCompletionHandler(context, status);
+ } else {
+ AR_DEBUG_PRINTF(ATH_DEBUG_TRACE, ("AR6000: async_task upping req: 0x%lX\n", (unsigned long)request));
+ request->status = status;
+ up(&request->sem_req);
+ }
+ }
+ spin_lock_irqsave(&device->asynclock, flags);
+ }
+ spin_unlock_irqrestore(&device->asynclock, flags);
+ sdio_release_host(device->func);
+ }
+
+ complete_and_exit(&device->async_completion, 0);
+ return 0;
+}
+
+static A_INT32 IssueSDCommand(HIF_DEVICE *device, A_UINT32 opcode, A_UINT32 arg, A_UINT32 flags, A_UINT32 *resp)
+{
+ struct mmc_command cmd;
+ A_INT32 err;
+ struct mmc_host *host;
+ struct sdio_func *func;
+
+ func = device->func;
+ host = func->card->host;
+
+ memset(&cmd, 0, sizeof(struct mmc_command));
+ cmd.opcode = opcode;
+ cmd.arg = arg;
+ cmd.flags = flags;
+ err = mmc_wait_for_cmd(host, &cmd, 3);
+
+ if ((!err) && (resp)) {
+ *resp = cmd.resp[0];
+ }
+
+ return err;
+}
+
+A_STATUS ReinitSDIO(HIF_DEVICE *device)
+{
+ A_INT32 err;
+ struct mmc_host *host;
+ struct mmc_card *card;
+ struct sdio_func *func;
+ A_UINT8 cmd52_resp;
+ A_UINT32 clock;
+
+ func = device->func;
+ card = func->card;
+ host = card->host;
+
+ AR_DEBUG_PRINTF(ATH_DEBUG_TRACE, ("AR6000: +ReinitSDIO \n"));
+ sdio_claim_host(func);
+
+ do {
+ if (!device->is_suspend) {
+ A_UINT32 resp;
+ A_UINT16 rca;
+ A_UINT32 i;
+ int bit = fls(host->ocr_avail) - 1;
+ /* emulate the mmc_power_up(...) */
+ host->ios.vdd = bit;
+ host->ios.chip_select = MMC_CS_DONTCARE;
+ host->ios.bus_mode = MMC_BUSMODE_OPENDRAIN;
+ host->ios.power_mode = MMC_POWER_UP;
+ host->ios.bus_width = MMC_BUS_WIDTH_1;
+ host->ios.timing = MMC_TIMING_LEGACY;
+ host->ops->set_ios(host, &host->ios);
+ /*
+ * This delay should be sufficient to allow the power supply
+ * to reach the minimum voltage.
+ */
+ msleep(2);
+
+ host->ios.clock = host->f_min;
+ host->ios.power_mode = MMC_POWER_ON;
+ host->ops->set_ios(host, &host->ios);
+
+ /*
+ * This delay must be at least 74 clock sizes, or 1 ms, or the
+ * time required to reach a stable voltage.
+ */
+ msleep(2);
+
+ /* Issue CMD0. Goto idle state */
+ host->ios.chip_select = MMC_CS_HIGH;
+ host->ops->set_ios(host, &host->ios);
+ msleep(1);
+ err = IssueSDCommand(device, MMC_GO_IDLE_STATE, 0, (MMC_RSP_NONE | MMC_CMD_BC), NULL);
+ host->ios.chip_select = MMC_CS_DONTCARE;
+ host->ops->set_ios(host, &host->ios);
+ msleep(1);
+ host->use_spi_crc = 0;
+
+ if (err) {
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("ReinitSDIO: CMD0 failed : %d \n",err));
+ break;
+ }
+
+ if (!host->ocr) {
+ /* Issue CMD5, arg = 0 */
+ err = IssueSDCommand(device, SD_IO_SEND_OP_COND, 0, (MMC_RSP_R4 | MMC_CMD_BCR), &resp);
+ if (err) {
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("ReinitSDIO: CMD5 failed : %d \n",err));
+ break;
+ }
+ host->ocr = resp;
+ }
+
+ /* Issue CMD5, arg = ocr. Wait till card is ready */
+ for (i=0;i<100;i++) {
+ err = IssueSDCommand(device, SD_IO_SEND_OP_COND, host->ocr, (MMC_RSP_R4 | MMC_CMD_BCR), &resp);
+ if (err) {
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("ReinitSDIO: CMD5 failed : %d \n",err));
+ break;
+ }
+ if (resp & MMC_CARD_BUSY) {
+ break;
+ }
+ msleep(10);
+ }
+
+ if ((i == 100) || (err)) {
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("ReinitSDIO: card in not ready : %d %d \n",i,err));
+ break;
+ }
+
+ /* Issue CMD3, get RCA */
+ err = IssueSDCommand(device, SD_SEND_RELATIVE_ADDR, 0, MMC_RSP_R6 | MMC_CMD_BCR, &resp);
+ if (err) {
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("ReinitSDIO: CMD3 failed : %d \n",err));
+ break;
+ }
+ rca = resp >> 16;
+ host->ios.bus_mode = MMC_BUSMODE_PUSHPULL;
+ host->ops->set_ios(host, &host->ios);
+
+ /* Issue CMD7, select card */
+ err = IssueSDCommand(device, MMC_SELECT_CARD, (rca << 16), MMC_RSP_R1 | MMC_CMD_AC, NULL);
+ if (err) {
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("ReinitSDIO: CMD7 failed : %d \n",err));
+ break;
+ }
+ }
+
+ /* Enable high speed */
+ if (card->host->caps & MMC_CAP_SD_HIGHSPEED) {
+ AR_DEBUG_PRINTF(ATH_DEBUG_TRACE, ("ReinitSDIO: Set high speed mode\n"));
+ err = Func0_CMD52ReadByte(card, SDIO_CCCR_SPEED, &cmd52_resp);
+ if (err) {
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("ReinitSDIO: CMD52 read to CCCR speed register failed : %d \n",err));
+ card->state &= ~MMC_STATE_HIGHSPEED;
+ /* no need to break */
+ } else {
+ err = Func0_CMD52WriteByte(card, SDIO_CCCR_SPEED, (cmd52_resp | SDIO_SPEED_EHS));
+ if (err) {
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("ReinitSDIO: CMD52 write to CCCR speed register failed : %d \n",err));
+ break;
+ }
+ mmc_card_set_highspeed(card);
+ host->ios.timing = MMC_TIMING_SD_HS;
+ host->ops->set_ios(host, &host->ios);
+ }
+ }
+
+ /* Set clock */
+ if (mmc_card_highspeed(card)) {
+ clock = 50000000;
+ } else {
+ clock = card->cis.max_dtr;
+ }
+
+ if (clock > host->f_max) {
+ clock = host->f_max;
+ }
+ host->ios.clock = clock;
+ host->ops->set_ios(host, &host->ios);
+
+
+ if (card->host->caps & MMC_CAP_4_BIT_DATA) {
+ /* CMD52: Set bus width & disable card detect resistor */
+ err = Func0_CMD52WriteByte(card, SDIO_CCCR_IF, SDIO_BUS_CD_DISABLE | SDIO_BUS_WIDTH_4BIT);
+ if (err) {
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("ReinitSDIO: CMD52 to set bus mode failed : %d \n",err));
+ break;
+ }
+ host->ios.bus_width = MMC_BUS_WIDTH_4;
+ host->ops->set_ios(host, &host->ios);
+ }
+ } while (0);
+
+ sdio_release_host(func);
+ AR_DEBUG_PRINTF(ATH_DEBUG_TRACE, ("AR6000: -ReinitSDIO \n"));
+
+ return (err) ? A_ERROR : A_OK;
+}
+
+A_STATUS
+PowerStateChangeNotify(HIF_DEVICE *device, HIF_DEVICE_POWER_CHANGE_TYPE config)
+{
+ A_STATUS status = A_OK;
+#if defined(CONFIG_PM)
+ struct sdio_func *func = device->func;
+ int old_reset_val;
+ AR_DEBUG_PRINTF(ATH_DEBUG_TRACE, ("AR6000: +PowerStateChangeNotify %d\n", config));
+ switch (config) {
+ case HIF_DEVICE_POWER_DOWN:
+ case HIF_DEVICE_POWER_CUT:
+ old_reset_val = reset_sdio_on_unload;
+ reset_sdio_on_unload = 1;
+ status = hifDisableFunc(device, func);
+ reset_sdio_on_unload = old_reset_val;
+ if (!device->is_suspend) {
+ struct mmc_host *host = func->card->host;
+ host->ios.clock = 0;
+ host->ios.vdd = 0;
+ host->ios.bus_mode = MMC_BUSMODE_OPENDRAIN;
+ host->ios.chip_select = MMC_CS_DONTCARE;
+ host->ios.power_mode = MMC_POWER_OFF;
+ host->ios.bus_width = MMC_BUS_WIDTH_1;
+ host->ios.timing = MMC_TIMING_LEGACY;
+ host->ops->set_ios(host, &host->ios);
+ }
+ break;
+ case HIF_DEVICE_POWER_UP:
+ if (device->powerConfig == HIF_DEVICE_POWER_CUT) {
+ status = ReinitSDIO(device);
+ }
+ if (status == A_OK) {
+ status = hifEnableFunc(device, func);
+ }
+ break;
+ }
+ device->powerConfig = config;
+
+ AR_DEBUG_PRINTF(ATH_DEBUG_TRACE, ("AR6000: -PowerStateChangeNotify\n"));
+#endif
+ return status;
+}
+
+A_STATUS
+HIFConfigureDevice(HIF_DEVICE *device, HIF_DEVICE_CONFIG_OPCODE opcode,
+ void *config, A_UINT32 configLen)
+{
+ A_UINT32 count;
+ A_STATUS status = A_OK;
+
+ switch(opcode) {
+ case HIF_DEVICE_GET_MBOX_BLOCK_SIZE:
+ ((A_UINT32 *)config)[0] = HIF_MBOX0_BLOCK_SIZE;
+ ((A_UINT32 *)config)[1] = HIF_MBOX1_BLOCK_SIZE;
+ ((A_UINT32 *)config)[2] = HIF_MBOX2_BLOCK_SIZE;
+ ((A_UINT32 *)config)[3] = HIF_MBOX3_BLOCK_SIZE;
+ break;
+
+ case HIF_DEVICE_GET_MBOX_ADDR:
+ for (count = 0; count < 4; count ++) {
+ ((A_UINT32 *)config)[count] = HIF_MBOX_START_ADDR(count);
+ }
+
+ if (configLen >= sizeof(HIF_DEVICE_MBOX_INFO)) {
+ SetExtendedMboxWindowInfo((A_UINT16)device->func->device,
+ (HIF_DEVICE_MBOX_INFO *)config);
+ }
+
+ break;
+ case HIF_DEVICE_GET_IRQ_PROC_MODE:
+ *((HIF_DEVICE_IRQ_PROCESSING_MODE *)config) = HIF_DEVICE_IRQ_SYNC_ONLY;
+ break;
+ case HIF_CONFIGURE_QUERY_SCATTER_REQUEST_SUPPORT:
+ if (!device->scatter_enabled) {
+ return A_ENOTSUP;
+ }
+ status = SetupHIFScatterSupport(device, (HIF_DEVICE_SCATTER_SUPPORT_INFO *)config);
+ if (A_FAILED(status)) {
+ device->scatter_enabled = FALSE;
+ }
+ break;
+ case HIF_DEVICE_GET_OS_DEVICE:
+ /* pass back a pointer to the SDIO function's "dev" struct */
+ ((HIF_DEVICE_OS_DEVICE_INFO *)config)->pOSDevice = &device->func->dev;
+ break;
+ case HIF_DEVICE_POWER_STATE_CHANGE:
+ status = PowerStateChangeNotify(device, *(HIF_DEVICE_POWER_CHANGE_TYPE *)config);
+ break;
+ default:
+ AR_DEBUG_PRINTF(ATH_DEBUG_WARN,
+ ("AR6000: Unsupported configuration opcode: %d\n", opcode));
+ status = A_ERROR;
+ }
+
+ return status;
+}
+
+void
+HIFShutDownDevice(HIF_DEVICE *device)
+{
+ AR_DEBUG_PRINTF(ATH_DEBUG_TRACE, ("AR6000: +HIFShutDownDevice\n"));
+ if (device != NULL) {
+ AR_DEBUG_ASSERT(device->func != NULL);
+ } else {
+ /* since we are unloading the driver anyways, reset all cards in case the SDIO card
+ * is externally powered and we are unloading the SDIO stack. This avoids the problem when
+ * the SDIO stack is reloaded and attempts are made to re-enumerate a card that is already
+ * enumerated */
+ AR_DEBUG_PRINTF(ATH_DEBUG_TRACE, ("AR6000: HIFShutDownDevice, resetting\n"));
+ ResetAllCards();
+
+ /* Unregister with bus driver core */
+ if (registered) {
+ registered = 0;
+ AR_DEBUG_PRINTF(ATH_DEBUG_TRACE,
+ ("AR6000: Unregistering with the bus driver\n"));
+ sdio_unregister_driver(&ar6k_driver);
+ AR_DEBUG_PRINTF(ATH_DEBUG_TRACE,
+ ("AR6000: Unregistered\n"));
+ }
+ }
+ AR_DEBUG_PRINTF(ATH_DEBUG_TRACE, ("AR6000: -HIFShutDownDevice\n"));
+}
+
+static void
+hifIRQHandler(struct sdio_func *func)
+{
+ A_STATUS status;
+ HIF_DEVICE *device;
+ AR_DEBUG_PRINTF(ATH_DEBUG_TRACE, ("AR6000: +hifIRQHandler\n"));
+
+ device = getHifDevice(func);
+ atomic_set(&device->irqHandling, 1);
+ /* release the host during ints so we can pick it back up when we process cmds */
+ sdio_release_host(device->func);
+ status = device->htcCallbacks.dsrHandler(device->htcCallbacks.context);
+ sdio_claim_host(device->func);
+ atomic_set(&device->irqHandling, 0);
+ AR_DEBUG_ASSERT(status == A_OK || status == A_ECANCELED);
+ AR_DEBUG_PRINTF(ATH_DEBUG_TRACE, ("AR6000: -hifIRQHandler\n"));
+}
+
+/* handle HTC startup via thread*/
+static int startup_task(void *param)
+{
+ HIF_DEVICE *device;
+
+ device = (HIF_DEVICE *)param;
+ AR_DEBUG_PRINTF(ATH_DEBUG_TRACE, ("AR6000: call HTC from startup_task\n"));
+ /* start up inform DRV layer */
+ if ((osdrvCallbacks.deviceInsertedHandler(osdrvCallbacks.context,device)) != A_OK) {
+ AR_DEBUG_PRINTF(ATH_DEBUG_TRACE, ("AR6000: Device rejected\n"));
+ }
+ return 0;
+}
+
+#if defined(CONFIG_PM)
+static int enable_task(void *param)
+{
+ HIF_DEVICE *device;
+ device = (HIF_DEVICE *)param;
+ AR_DEBUG_PRINTF(ATH_DEBUG_TRACE, ("AR6000: call from resume_task\n"));
+
+ /* start up inform DRV layer */
+ if (device &&
+ device->claimedContext &&
+ osdrvCallbacks.devicePowerChangeHandler &&
+ osdrvCallbacks.devicePowerChangeHandler(device->claimedContext, HIF_DEVICE_POWER_UP) != A_OK)
+ {
+ AR_DEBUG_PRINTF(ATH_DEBUG_TRACE, ("AR6000: Device rejected\n"));
+ }
+
+ return 0;
+}
+#endif
+
+static int hifDeviceInserted(struct sdio_func *func, const struct sdio_device_id *id)
+{
+ int ret;
+ HIF_DEVICE * device;
+ int count;
+
+ AR_DEBUG_PRINTF(ATH_DEBUG_TRACE,
+ ("AR6000: hifDeviceInserted, Function: 0x%X, Vendor ID: 0x%X, Device ID: 0x%X, block size: 0x%X/0x%X\n",
+ func->num, func->vendor, func->device, func->max_blksize, func->cur_blksize));
+
+ addHifDevice(func);
+ device = getHifDevice(func);
+
+ device->id = id;
+ device->is_disabled = TRUE;
+
+ spin_lock_init(&device->lock);
+
+ spin_lock_init(&device->asynclock);
+
+ DL_LIST_INIT(&device->ScatterReqHead);
+
+ if (!nohifscattersupport) {
+ /* try to allow scatter operation on all instances,
+ * unless globally overridden */
+ device->scatter_enabled = TRUE;
+ }
+
+ /* Initialize the bus requests to be used later */
+ A_MEMZERO(device->busRequest, sizeof(device->busRequest));
+ for (count = 0; count < BUS_REQUEST_MAX_NUM; count ++) {
+ sema_init(&device->busRequest[count].sem_req, 0);
+ hifFreeBusRequest(device, &device->busRequest[count]);
+ }
+ sema_init(&device->sem_async, 0);
+
+ ret = hifEnableFunc(device, func);
+
+ return ret;
+}
+
+
+void
+HIFAckInterrupt(HIF_DEVICE *device)
+{
+ AR_DEBUG_ASSERT(device != NULL);
+
+ /* Acknowledge our function IRQ */
+}
+
+void
+HIFUnMaskInterrupt(HIF_DEVICE *device)
+{
+ int ret;;
+
+ AR_DEBUG_ASSERT(device != NULL);
+ AR_DEBUG_ASSERT(device->func != NULL);
+
+ AR_DEBUG_PRINTF(ATH_DEBUG_TRACE, ("AR6000: HIFUnMaskInterrupt\n"));
+
+ /* Register the IRQ Handler */
+ sdio_claim_host(device->func);
+ ret = sdio_claim_irq(device->func, hifIRQHandler);
+ sdio_release_host(device->func);
+ AR_DEBUG_ASSERT(ret == 0);
+}
+
+void HIFMaskInterrupt(HIF_DEVICE *device)
+{
+ int ret;
+ AR_DEBUG_ASSERT(device != NULL);
+ AR_DEBUG_ASSERT(device->func != NULL);
+
+ AR_DEBUG_PRINTF(ATH_DEBUG_TRACE, ("AR6000: HIFMaskInterrupt\n"));
+
+ /* Mask our function IRQ */
+ sdio_claim_host(device->func);
+ while (atomic_read(&device->irqHandling)) {
+ sdio_release_host(device->func);
+ schedule_timeout(HZ/10);
+ sdio_claim_host(device->func);
+ }
+ ret = sdio_release_irq(device->func);
+ sdio_release_host(device->func);
+ AR_DEBUG_ASSERT(ret == 0);
+}
+
+BUS_REQUEST *hifAllocateBusRequest(HIF_DEVICE *device)
+{
+ BUS_REQUEST *busrequest;
+ unsigned long flag;
+
+ /* Acquire lock */
+ spin_lock_irqsave(&device->lock, flag);
+
+ /* Remove first in list */
+ if((busrequest = device->s_busRequestFreeQueue) != NULL)
+ {
+ device->s_busRequestFreeQueue = busrequest->next;
+ }
+ /* Release lock */
+ spin_unlock_irqrestore(&device->lock, flag);
+ AR_DEBUG_PRINTF(ATH_DEBUG_TRACE, ("AR6000: hifAllocateBusRequest: 0x%p\n", busrequest));
+ return busrequest;
+}
+
+void
+hifFreeBusRequest(HIF_DEVICE *device, BUS_REQUEST *busrequest)
+{
+ unsigned long flag;
+
+ AR_DEBUG_ASSERT(busrequest != NULL);
+ AR_DEBUG_PRINTF(ATH_DEBUG_TRACE, ("AR6000: hifFreeBusRequest: 0x%p\n", busrequest));
+ /* Acquire lock */
+ spin_lock_irqsave(&device->lock, flag);
+
+
+ /* Insert first in list */
+ busrequest->next = device->s_busRequestFreeQueue;
+ busrequest->inusenext = NULL;
+ device->s_busRequestFreeQueue = busrequest;
+
+ /* Release lock */
+ spin_unlock_irqrestore(&device->lock, flag);
+}
+
+static A_STATUS hifDisableFunc(HIF_DEVICE *device, struct sdio_func *func)
+{
+ int ret;
+ A_STATUS status = A_OK;
+
+ AR_DEBUG_PRINTF(ATH_DEBUG_TRACE, ("AR6000: +hifDisableFunc\n"));
+ device = getHifDevice(func);
+ if (!IS_ERR(device->async_task)) {
+ init_completion(&device->async_completion);
+ device->async_shutdown = 1;
+ up(&device->sem_async);
+ wait_for_completion(&device->async_completion);
+ device->async_task = NULL;
+ }
+ /* Disable the card */
+ sdio_claim_host(device->func);
+ ret = sdio_disable_func(device->func);
+ if (ret) {
+ status = A_ERROR;
+ }
+
+ if (reset_sdio_on_unload) {
+ /* reset the SDIO interface. This is useful in automated testing where the card
+ * does not need to be removed at the end of the test. It is expected that the user will
+ * also unload/reload the host controller driver to force the bus driver to re-enumerate the slot */
+ AR_DEBUG_PRINTF(ATH_DEBUG_WARN, ("AR6000: reseting SDIO card back to uninitialized state \n"));
+
+ /* NOTE : sdio_f0_writeb() cannot be used here, that API only allows access
+ * to undefined registers in the range of: 0xF0-0xFF */
+
+ ret = Func0_CMD52WriteByte(device->func->card, SDIO_CCCR_ABORT, (1 << 3));
+ if (ret) {
+ status = A_ERROR;
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("AR6000: reset failed : %d \n",ret));
+ }
+ }
+
+ sdio_release_host(device->func);
+
+ if (status == A_OK) {
+ device->is_disabled = TRUE;
+ }
+ AR_DEBUG_PRINTF(ATH_DEBUG_TRACE, ("AR6000: -hifDisableFunc\n"));
+
+ return status;
+}
+
+static int hifEnableFunc(HIF_DEVICE *device, struct sdio_func *func)
+{
+ struct task_struct* pTask;
+ const char *taskName = NULL;
+ int (*taskFunc)(void *) = NULL;
+ int ret = A_OK;
+
+ AR_DEBUG_PRINTF(ATH_DEBUG_TRACE, ("AR6000: +hifEnableFunc\n"));
+ device = getHifDevice(func);
+
+ if (device->is_disabled) {
+ /* enable the SDIO function */
+ sdio_claim_host(func);
+
+ if ((device->id->device & MANUFACTURER_ID_AR6K_BASE_MASK) >= MANUFACTURER_ID_AR6003_BASE) {
+ /* enable 4-bit ASYNC interrupt on AR6003 or later devices */
+ ret = Func0_CMD52WriteByte(func->card, CCCR_SDIO_IRQ_MODE_REG, SDIO_IRQ_MODE_ASYNC_4BIT_IRQ);
+ if (ret) {
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("AR6000: failed to enable 4-bit ASYNC IRQ mode %d \n",ret));
+ sdio_release_host(func);
+ return A_ERROR;
+ }
+ AR_DEBUG_PRINTF(ATH_DEBUG_TRACE, ("AR6000: 4-bit ASYNC IRQ mode enabled\n"));
+ }
+ /* give us some time to enable, in ms */
+ func->enable_timeout = 100;
+ ret = sdio_enable_func(func);
+ if (ret) {
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERROR, ("AR6000: %s(), Unable to enable AR6K: 0x%X\n",
+ __FUNCTION__, ret));
+ sdio_release_host(func);
+ return A_ERROR;
+ }
+ ret = sdio_set_block_size(func, HIF_MBOX_BLOCK_SIZE);
+ sdio_release_host(func);
+ if (ret) {
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERROR, ("AR6000: %s(), Unable to set block size 0x%x AR6K: 0x%X\n",
+ __FUNCTION__, HIF_MBOX_BLOCK_SIZE, ret));
+ return A_ERROR;
+ }
+ device->is_disabled = FALSE;
+ /* create async I/O thread */
+ if (!device->async_task) {
+ device->async_shutdown = 0;
+ device->async_task = kthread_create(async_task,
+ (void *)device,
+ "AR6K Async");
+ if (IS_ERR(device->async_task)) {
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERROR, ("AR6000: %s(), to create async task\n", __FUNCTION__));
+ return A_ERROR;
+ }
+ AR_DEBUG_PRINTF(ATH_DEBUG_TRACE, ("AR6000: start async task\n"));
+ wake_up_process(device->async_task );
+ }
+ }
+
+ if (!device->claimedContext) {
+ taskFunc = startup_task;
+ taskName = "AR6K startup";
+ ret = A_OK;
+#if defined(CONFIG_PM)
+ } else {
+ taskFunc = enable_task;
+ taskName = "AR6K enable";
+ ret = A_PENDING;
+#endif /* CONFIG_PM */
+ }
+ /* create resume thread */
+ pTask = kthread_create(taskFunc, (void *)device, taskName);
+ if (IS_ERR(pTask)) {
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERROR, ("AR6000: %s(), to create enabel task\n", __FUNCTION__));
+ return A_ERROR;
+ }
+ wake_up_process(pTask);
+ AR_DEBUG_PRINTF(ATH_DEBUG_TRACE, ("AR6000: -hifEnableFunc\n"));
+
+ /* task will call the enable func, indicate pending */
+ return ret;
+}
+
+#if defined(CONFIG_PM)
+static int hifDeviceSuspend(struct device *dev)
+{
+ struct sdio_func *func=dev_to_sdio_func(dev);
+ A_STATUS status = A_OK;
+ HIF_DEVICE *device;
+
+ device = getHifDevice(func);
+ AR_DEBUG_PRINTF(ATH_DEBUG_TRACE, ("AR6000: +hifDeviceSuspend\n"));
+ if (device && device->claimedContext && osdrvCallbacks.deviceSuspendHandler) {
+ device->is_suspend = TRUE; /* set true first for PowerStateChangeNotify(..) */
+ status = osdrvCallbacks.deviceSuspendHandler(device->claimedContext);
+ if (status != A_OK) {
+ device->is_suspend = FALSE;
+ }
+ }
+ AR_DEBUG_PRINTF(ATH_DEBUG_TRACE, ("AR6000: -hifDeviceSuspend\n"));
+
+ switch (status) {
+ case A_OK:
+ return 0;
+ case A_EBUSY:
+ return -EBUSY; /* Hack for kernel in order to support deep sleep and wow */
+ default:
+ return -1;
+ }
+}
+
+static int hifDeviceResume(struct device *dev)
+{
+ struct sdio_func *func=dev_to_sdio_func(dev);
+ A_STATUS status = A_OK;
+ HIF_DEVICE *device;
+
+ device = getHifDevice(func);
+ AR_DEBUG_PRINTF(ATH_DEBUG_TRACE, ("AR6000: +hifDeviceResume\n"));
+ if (device && device->claimedContext && osdrvCallbacks.deviceSuspendHandler) {
+ status = osdrvCallbacks.deviceResumeHandler(device->claimedContext);
+ if (status == A_OK) {
+ device->is_suspend = FALSE;
+ }
+ }
+ AR_DEBUG_PRINTF(ATH_DEBUG_TRACE, ("AR6000: -hifDeviceResume\n"));
+
+ return A_SUCCESS(status) ? 0 : status;
+}
+#endif /* CONFIG_PM */
+
+static void hifDeviceRemoved(struct sdio_func *func)
+{
+ A_STATUS status = A_OK;
+ HIF_DEVICE *device;
+ AR_DEBUG_ASSERT(func != NULL);
+
+ AR_DEBUG_PRINTF(ATH_DEBUG_TRACE, ("AR6000: +hifDeviceRemoved\n"));
+ device = getHifDevice(func);
+ if (device->claimedContext != NULL) {
+ status = osdrvCallbacks.deviceRemovedHandler(device->claimedContext, device);
+ }
+
+ if (device->is_disabled) {
+ device->is_disabled = FALSE;
+ } else {
+ status = hifDisableFunc(device, func);
+ }
+ CleanupHIFScatterResources(device);
+
+ delHifDevice(device);
+ AR_DEBUG_ASSERT(status == A_OK);
+ AR_DEBUG_PRINTF(ATH_DEBUG_TRACE, ("AR6000: -hifDeviceRemoved\n"));
+}
+
+/*
+ * This should be moved to AR6K HTC layer.
+ */
+A_STATUS hifWaitForPendingRecv(HIF_DEVICE *device)
+{
+ A_INT32 cnt = 10;
+ A_UINT8 host_int_status;
+ A_STATUS status = A_OK;
+
+ do {
+ while (atomic_read(&device->irqHandling)) {
+ /* wait until irq handler finished all the jobs */
+ schedule_timeout(HZ/10);
+ }
+ /* check if there is any pending irq due to force done */
+ host_int_status = 0;
+ status = HIFReadWrite(device, HOST_INT_STATUS_ADDRESS,
+ (A_UINT8 *)&host_int_status, sizeof(host_int_status),
+ HIF_RD_SYNC_BYTE_INC, NULL);
+ host_int_status = A_SUCCESS(status) ? (host_int_status & (1 << 0)) : 0;
+ if (host_int_status) {
+ schedule(); /* schedule for next dsrHandler */
+ }
+ } while (host_int_status && --cnt > 0);
+
+ if (host_int_status && cnt == 0) {
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERROR,
+ ("AR6000: %s(), Unable clear up pending IRQ before the system suspended\n", __FUNCTION__));
+ }
+
+ return A_OK;
+}
+
+
+static HIF_DEVICE *
+addHifDevice(struct sdio_func *func)
+{
+ HIF_DEVICE *hifdevice;
+ AR_DEBUG_PRINTF(ATH_DEBUG_TRACE, ("AR6000: addHifDevice\n"));
+ AR_DEBUG_ASSERT(func != NULL);
+ hifdevice = (HIF_DEVICE *)kzalloc(sizeof(HIF_DEVICE), GFP_KERNEL);
+ AR_DEBUG_ASSERT(hifdevice != NULL);
+#if HIF_USE_DMA_BOUNCE_BUFFER
+ hifdevice->dma_buffer = kmalloc(HIF_DMA_BUFFER_SIZE, GFP_KERNEL);
+ AR_DEBUG_ASSERT(hifdevice->dma_buffer != NULL);
+#endif
+ hifdevice->func = func;
+ hifdevice->powerConfig = HIF_DEVICE_POWER_UP;
+ sdio_set_drvdata(func, hifdevice);
+ AR_DEBUG_PRINTF(ATH_DEBUG_TRACE, ("AR6000: addHifDevice; 0x%p\n", hifdevice));
+ return hifdevice;
+}
+
+static HIF_DEVICE *
+getHifDevice(struct sdio_func *func)
+{
+ AR_DEBUG_ASSERT(func != NULL);
+ return (HIF_DEVICE *)sdio_get_drvdata(func);
+}
+
+static void
+delHifDevice(HIF_DEVICE * device)
+{
+ AR_DEBUG_ASSERT(device!= NULL);
+ AR_DEBUG_PRINTF(ATH_DEBUG_TRACE, ("AR6000: delHifDevice; 0x%p\n", device));
+ if (device->dma_buffer != NULL) {
+ kfree(device->dma_buffer);
+ }
+ kfree(device);
+}
+
+static void ResetAllCards(void)
+{
+}
+
+void HIFClaimDevice(HIF_DEVICE *device, void *context)
+{
+ device->claimedContext = context;
+}
+
+void HIFReleaseDevice(HIF_DEVICE *device)
+{
+ device->claimedContext = NULL;
+}
+
+A_STATUS HIFAttachHTC(HIF_DEVICE *device, HTC_CALLBACKS *callbacks)
+{
+ if (device->htcCallbacks.context != NULL) {
+ /* already in use! */
+ return A_ERROR;
+ }
+ device->htcCallbacks = *callbacks;
+ return A_OK;
+}
+
+void HIFDetachHTC(HIF_DEVICE *device)
+{
+ A_MEMZERO(&device->htcCallbacks,sizeof(device->htcCallbacks));
+}
+
+#define SDIO_SET_CMD52_ARG(arg,rw,func,raw,address,writedata) \
+ (arg) = (((rw) & 1) << 31) | \
+ (((func) & 0x7) << 28) | \
+ (((raw) & 1) << 27) | \
+ (1 << 26) | \
+ (((address) & 0x1FFFF) << 9) | \
+ (1 << 8) | \
+ ((writedata) & 0xFF)
+
+#define SDIO_SET_CMD52_READ_ARG(arg,func,address) \
+ SDIO_SET_CMD52_ARG(arg,0,(func),0,address,0x00)
+#define SDIO_SET_CMD52_WRITE_ARG(arg,func,address,value) \
+ SDIO_SET_CMD52_ARG(arg,1,(func),0,address,value)
+
+static int Func0_CMD52WriteByte(struct mmc_card *card, unsigned int address, unsigned char byte)
+{
+ struct mmc_command ioCmd;
+ unsigned long arg;
+
+ memset(&ioCmd,0,sizeof(ioCmd));
+ SDIO_SET_CMD52_WRITE_ARG(arg,0,address,byte);
+ ioCmd.opcode = SD_IO_RW_DIRECT;
+ ioCmd.arg = arg;
+ ioCmd.flags = MMC_RSP_R5 | MMC_CMD_AC;
+
+ return mmc_wait_for_cmd(card->host, &ioCmd, 0);
+}
+
+static int Func0_CMD52ReadByte(struct mmc_card *card, unsigned int address, unsigned char *byte)
+{
+ struct mmc_command ioCmd;
+ unsigned long arg;
+ A_INT32 err;
+
+ memset(&ioCmd,0,sizeof(ioCmd));
+ SDIO_SET_CMD52_READ_ARG(arg,0,address);
+ ioCmd.opcode = SD_IO_RW_DIRECT;
+ ioCmd.arg = arg;
+ ioCmd.flags = MMC_RSP_R5 | MMC_CMD_AC;
+
+ err = mmc_wait_for_cmd(card->host, &ioCmd, 0);
+
+ if ((!err) && (byte)) {
+ *byte = ioCmd.resp[0] & 0xFF;
+ }
+
+ return err;
+}
diff --git a/drivers/staging/ath6kl/hif/sdio/linux_sdio/src/hif_scatter.c b/drivers/staging/ath6kl/hif/sdio/linux_sdio/src/hif_scatter.c
new file mode 100644
index 000000000000..ee8b47746a15
--- /dev/null
+++ b/drivers/staging/ath6kl/hif/sdio/linux_sdio/src/hif_scatter.c
@@ -0,0 +1,393 @@
+//------------------------------------------------------------------------------
+// Copyright (c) 2009-2010 Atheros Corporation. All rights reserved.
+//
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+//
+//
+//------------------------------------------------------------------------------
+//==============================================================================
+// HIF scatter implementation
+//
+// Author(s): ="Atheros"
+//==============================================================================
+
+#include <linux/mmc/card.h>
+#include <linux/mmc/host.h>
+#include <linux/mmc/sdio_func.h>
+#include <linux/mmc/sdio_ids.h>
+#include <linux/mmc/sdio.h>
+#include <linux/kthread.h>
+#include "hif_internal.h"
+#define ATH_MODULE_NAME hif
+#include "a_debug.h"
+
+#ifdef HIF_LINUX_MMC_SCATTER_SUPPORT
+
+#define _CMD53_ARG_READ 0
+#define _CMD53_ARG_WRITE 1
+#define _CMD53_ARG_BLOCK_BASIS 1
+#define _CMD53_ARG_FIXED_ADDRESS 0
+#define _CMD53_ARG_INCR_ADDRESS 1
+
+#define SDIO_SET_CMD53_ARG(arg,rw,func,mode,opcode,address,bytes_blocks) \
+ (arg) = (((rw) & 1) << 31) | \
+ (((func) & 0x7) << 28) | \
+ (((mode) & 1) << 27) | \
+ (((opcode) & 1) << 26) | \
+ (((address) & 0x1FFFF) << 9) | \
+ ((bytes_blocks) & 0x1FF)
+
+static void FreeScatterReq(HIF_DEVICE *device, HIF_SCATTER_REQ *pReq)
+{
+ unsigned long flag;
+
+ spin_lock_irqsave(&device->lock, flag);
+
+ DL_ListInsertTail(&device->ScatterReqHead, &pReq->ListLink);
+
+ spin_unlock_irqrestore(&device->lock, flag);
+
+}
+
+static HIF_SCATTER_REQ *AllocScatterReq(HIF_DEVICE *device)
+{
+ DL_LIST *pItem;
+ unsigned long flag;
+
+ spin_lock_irqsave(&device->lock, flag);
+
+ pItem = DL_ListRemoveItemFromHead(&device->ScatterReqHead);
+
+ spin_unlock_irqrestore(&device->lock, flag);
+
+ if (pItem != NULL) {
+ return A_CONTAINING_STRUCT(pItem, HIF_SCATTER_REQ, ListLink);
+ }
+
+ return NULL;
+}
+
+ /* called by async task to perform the operation synchronously using direct MMC APIs */
+A_STATUS DoHifReadWriteScatter(HIF_DEVICE *device, BUS_REQUEST *busrequest)
+{
+ int i;
+ A_UINT8 rw;
+ A_UINT8 opcode;
+ struct mmc_request mmcreq;
+ struct mmc_command cmd;
+ struct mmc_data data;
+ HIF_SCATTER_REQ_PRIV *pReqPriv;
+ HIF_SCATTER_REQ *pReq;
+ A_STATUS status = A_OK;
+ struct scatterlist *pSg;
+
+ pReqPriv = busrequest->pScatterReq;
+
+ A_ASSERT(pReqPriv != NULL);
+
+ pReq = pReqPriv->pHifScatterReq;
+
+ memset(&mmcreq, 0, sizeof(struct mmc_request));
+ memset(&cmd, 0, sizeof(struct mmc_command));
+ memset(&data, 0, sizeof(struct mmc_data));
+
+ data.blksz = HIF_MBOX_BLOCK_SIZE;
+ data.blocks = pReq->TotalLength / HIF_MBOX_BLOCK_SIZE;
+
+ AR_DEBUG_PRINTF(ATH_DEBUG_SCATTER, ("HIF-SCATTER: (%s) Address: 0x%X, (BlockLen: %d, BlockCount: %d) , (tot:%d,sg:%d)\n",
+ (pReq->Request & HIF_WRITE) ? "WRITE":"READ", pReq->Address, data.blksz, data.blocks,
+ pReq->TotalLength,pReq->ValidScatterEntries));
+
+ if (pReq->Request & HIF_WRITE) {
+ rw = _CMD53_ARG_WRITE;
+ data.flags = MMC_DATA_WRITE;
+ } else {
+ rw = _CMD53_ARG_READ;
+ data.flags = MMC_DATA_READ;
+ }
+
+ if (pReq->Request & HIF_FIXED_ADDRESS) {
+ opcode = _CMD53_ARG_FIXED_ADDRESS;
+ } else {
+ opcode = _CMD53_ARG_INCR_ADDRESS;
+ }
+
+ /* fill SG entries */
+ pSg = pReqPriv->sgentries;
+ sg_init_table(pSg, pReq->ValidScatterEntries);
+
+ /* assemble SG list */
+ for (i = 0 ; i < pReq->ValidScatterEntries ; i++, pSg++) {
+ /* setup each sg entry */
+ if ((unsigned long)pReq->ScatterList[i].pBuffer & 0x3) {
+ /* note some scatter engines can handle unaligned buffers, print this
+ * as informational only */
+ AR_DEBUG_PRINTF(ATH_DEBUG_SCATTER,
+ ("HIF: (%s) Scatter Buffer is unaligned 0x%lx\n",
+ pReq->Request & HIF_WRITE ? "WRITE":"READ",
+ (unsigned long)pReq->ScatterList[i].pBuffer));
+ }
+
+ AR_DEBUG_PRINTF(ATH_DEBUG_SCATTER, (" %d: Addr:0x%lX, Len:%d \n",
+ i,(unsigned long)pReq->ScatterList[i].pBuffer,pReq->ScatterList[i].Length));
+
+ sg_set_buf(pSg, pReq->ScatterList[i].pBuffer, pReq->ScatterList[i].Length);
+ }
+ /* set scatter-gather table for request */
+ data.sg = pReqPriv->sgentries;
+ data.sg_len = pReq->ValidScatterEntries;
+ /* set command argument */
+ SDIO_SET_CMD53_ARG(cmd.arg,
+ rw,
+ device->func->num,
+ _CMD53_ARG_BLOCK_BASIS,
+ opcode,
+ pReq->Address,
+ data.blocks);
+
+ cmd.opcode = SD_IO_RW_EXTENDED;
+ cmd.flags = MMC_RSP_SPI_R5 | MMC_RSP_R5 | MMC_CMD_ADTC;
+
+ mmcreq.cmd = &cmd;
+ mmcreq.data = &data;
+
+ mmc_set_data_timeout(&data, device->func->card);
+ /* synchronous call to process request */
+ mmc_wait_for_req(device->func->card->host, &mmcreq);
+
+ if (cmd.error) {
+ status = A_ERROR;
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERROR, ("HIF-SCATTER: cmd error: %d \n",cmd.error));
+ }
+
+ if (data.error) {
+ status = A_ERROR;
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERROR, ("HIF-SCATTER: data error: %d \n",data.error));
+ }
+
+ if (A_FAILED(status)) {
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERROR, ("HIF-SCATTER: FAILED!!! (%s) Address: 0x%X, Block mode (BlockLen: %d, BlockCount: %d)\n",
+ (pReq->Request & HIF_WRITE) ? "WRITE":"READ",pReq->Address, data.blksz, data.blocks));
+ }
+
+ /* set completion status, fail or success */
+ pReq->CompletionStatus = status;
+
+ if (pReq->Request & HIF_ASYNCHRONOUS) {
+ AR_DEBUG_PRINTF(ATH_DEBUG_SCATTER, ("HIF-SCATTER: async_task completion routine req: 0x%lX (%d)\n",(unsigned long)busrequest, status));
+ /* complete the request */
+ A_ASSERT(pReq->CompletionRoutine != NULL);
+ pReq->CompletionRoutine(pReq);
+ } else {
+ AR_DEBUG_PRINTF(ATH_DEBUG_SCATTER, ("HIF-SCATTER async_task upping busrequest : 0x%lX (%d)\n", (unsigned long)busrequest,status));
+ /* signal wait */
+ up(&busrequest->sem_req);
+ }
+
+ return status;
+}
+
+ /* callback to issue a read-write scatter request */
+static A_STATUS HifReadWriteScatter(HIF_DEVICE *device, HIF_SCATTER_REQ *pReq)
+{
+ A_STATUS status = A_EINVAL;
+ A_UINT32 request = pReq->Request;
+ HIF_SCATTER_REQ_PRIV *pReqPriv = (HIF_SCATTER_REQ_PRIV *)pReq->HIFPrivate[0];
+
+ do {
+
+ A_ASSERT(pReqPriv != NULL);
+
+ AR_DEBUG_PRINTF(ATH_DEBUG_SCATTER, ("HIF-SCATTER: total len: %d Scatter Entries: %d\n",
+ pReq->TotalLength, pReq->ValidScatterEntries));
+
+ if (!(request & HIF_EXTENDED_IO)) {
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERROR,
+ ("HIF-SCATTER: Invalid command type: 0x%08x\n", request));
+ break;
+ }
+
+ if (!(request & (HIF_SYNCHRONOUS | HIF_ASYNCHRONOUS))) {
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERROR,
+ ("HIF-SCATTER: Invalid execution mode: 0x%08x\n", request));
+ break;
+ }
+
+ if (!(request & HIF_BLOCK_BASIS)) {
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERROR,
+ ("HIF-SCATTER: Invalid data mode: 0x%08x\n", request));
+ break;
+ }
+
+ if (pReq->TotalLength > MAX_SCATTER_REQ_TRANSFER_SIZE) {
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERROR,
+ ("HIF-SCATTER: Invalid length: %d \n", pReq->TotalLength));
+ break;
+ }
+
+ if (pReq->TotalLength == 0) {
+ A_ASSERT(FALSE);
+ break;
+ }
+
+ /* add bus request to the async list for the async I/O thread to process */
+ AddToAsyncList(device, pReqPriv->busrequest);
+
+ if (request & HIF_SYNCHRONOUS) {
+ AR_DEBUG_PRINTF(ATH_DEBUG_SCATTER, ("HIF-SCATTER: queued sync req: 0x%lX\n", (unsigned long)pReqPriv->busrequest));
+ /* signal thread and wait */
+ up(&device->sem_async);
+ if (down_interruptible(&pReqPriv->busrequest->sem_req) != 0) {
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERROR,("HIF-SCATTER: interrupted! \n"));
+ /* interrupted, exit */
+ status = A_ERROR;
+ break;
+ } else {
+ status = pReq->CompletionStatus;
+ }
+ } else {
+ AR_DEBUG_PRINTF(ATH_DEBUG_SCATTER, ("HIF-SCATTER: queued async req: 0x%lX\n", (unsigned long)pReqPriv->busrequest));
+ /* wake thread, it will process and then take care of the async callback */
+ up(&device->sem_async);
+ status = A_OK;
+ }
+
+ } while (FALSE);
+
+ if (A_FAILED(status) && (request & HIF_ASYNCHRONOUS)) {
+ pReq->CompletionStatus = status;
+ pReq->CompletionRoutine(pReq);
+ status = A_OK;
+ }
+
+ return status;
+}
+
+ /* setup of HIF scatter resources */
+A_STATUS SetupHIFScatterSupport(HIF_DEVICE *device, HIF_DEVICE_SCATTER_SUPPORT_INFO *pInfo)
+{
+ A_STATUS status = A_ERROR;
+ int i;
+ HIF_SCATTER_REQ_PRIV *pReqPriv;
+ BUS_REQUEST *busrequest;
+
+ do {
+
+ /* check if host supports scatter requests and it meets our requirements */
+ if (device->func->card->host->max_segs < MAX_SCATTER_ENTRIES_PER_REQ) {
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("HIF-SCATTER : host only supports scatter of : %d entries, need: %d \n",
+ device->func->card->host->max_segs, MAX_SCATTER_ENTRIES_PER_REQ));
+ status = A_ENOTSUP;
+ break;
+ }
+
+ AR_DEBUG_PRINTF(ATH_DEBUG_ANY,("HIF-SCATTER Enabled: max scatter req : %d entries: %d \n",
+ MAX_SCATTER_REQUESTS, MAX_SCATTER_ENTRIES_PER_REQ));
+
+ for (i = 0; i < MAX_SCATTER_REQUESTS; i++) {
+ /* allocate the private request blob */
+ pReqPriv = (HIF_SCATTER_REQ_PRIV *)A_MALLOC(sizeof(HIF_SCATTER_REQ_PRIV));
+ if (NULL == pReqPriv) {
+ break;
+ }
+ A_MEMZERO(pReqPriv, sizeof(HIF_SCATTER_REQ_PRIV));
+ /* save the device instance*/
+ pReqPriv->device = device;
+ /* allocate the scatter request */
+ pReqPriv->pHifScatterReq = (HIF_SCATTER_REQ *)A_MALLOC(sizeof(HIF_SCATTER_REQ) +
+ (MAX_SCATTER_ENTRIES_PER_REQ - 1) * (sizeof(HIF_SCATTER_ITEM)));
+
+ if (NULL == pReqPriv->pHifScatterReq) {
+ A_FREE(pReqPriv);
+ break;
+ }
+ /* just zero the main part of the scatter request */
+ A_MEMZERO(pReqPriv->pHifScatterReq, sizeof(HIF_SCATTER_REQ));
+ /* back pointer to the private struct */
+ pReqPriv->pHifScatterReq->HIFPrivate[0] = pReqPriv;
+ /* allocate a bus request for this scatter request */
+ busrequest = hifAllocateBusRequest(device);
+ if (NULL == busrequest) {
+ A_FREE(pReqPriv->pHifScatterReq);
+ A_FREE(pReqPriv);
+ break;
+ }
+ /* assign the scatter request to this bus request */
+ busrequest->pScatterReq = pReqPriv;
+ /* point back to the request */
+ pReqPriv->busrequest = busrequest;
+ /* add it to the scatter pool */
+ FreeScatterReq(device,pReqPriv->pHifScatterReq);
+ }
+
+ if (i != MAX_SCATTER_REQUESTS) {
+ status = A_NO_MEMORY;
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("HIF-SCATTER : failed to alloc scatter resources !\n"));
+ break;
+ }
+
+ /* set scatter function pointers */
+ pInfo->pAllocateReqFunc = AllocScatterReq;
+ pInfo->pFreeReqFunc = FreeScatterReq;
+ pInfo->pReadWriteScatterFunc = HifReadWriteScatter;
+ pInfo->MaxScatterEntries = MAX_SCATTER_ENTRIES_PER_REQ;
+ pInfo->MaxTransferSizePerScatterReq = MAX_SCATTER_REQ_TRANSFER_SIZE;
+
+ status = A_OK;
+
+ } while (FALSE);
+
+ if (A_FAILED(status)) {
+ CleanupHIFScatterResources(device);
+ }
+
+ return status;
+}
+
+ /* clean up scatter support */
+void CleanupHIFScatterResources(HIF_DEVICE *device)
+{
+ HIF_SCATTER_REQ_PRIV *pReqPriv;
+ HIF_SCATTER_REQ *pReq;
+
+ /* empty the free list */
+
+ while (1) {
+
+ pReq = AllocScatterReq(device);
+
+ if (NULL == pReq) {
+ break;
+ }
+
+ pReqPriv = (HIF_SCATTER_REQ_PRIV *)pReq->HIFPrivate[0];
+ A_ASSERT(pReqPriv != NULL);
+
+ if (pReqPriv->busrequest != NULL) {
+ pReqPriv->busrequest->pScatterReq = NULL;
+ /* free bus request */
+ hifFreeBusRequest(device, pReqPriv->busrequest);
+ pReqPriv->busrequest = NULL;
+ }
+
+ if (pReqPriv->pHifScatterReq != NULL) {
+ A_FREE(pReqPriv->pHifScatterReq);
+ pReqPriv->pHifScatterReq = NULL;
+ }
+
+ A_FREE(pReqPriv);
+ }
+}
+
+#endif // HIF_LINUX_MMC_SCATTER_SUPPORT
diff --git a/drivers/staging/ath6kl/htc2/AR6000/ar6k.c b/drivers/staging/ath6kl/htc2/AR6000/ar6k.c
new file mode 100644
index 000000000000..1efc85ce02b2
--- /dev/null
+++ b/drivers/staging/ath6kl/htc2/AR6000/ar6k.c
@@ -0,0 +1,1471 @@
+//------------------------------------------------------------------------------
+// <copyright file="ar6k.c" company="Atheros">
+// Copyright (c) 2007-2010 Atheros Corporation. All rights reserved.
+//
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+//
+//
+//------------------------------------------------------------------------------
+//==============================================================================
+// AR6K device layer that handles register level I/O
+//
+// Author(s): ="Atheros"
+//==============================================================================
+
+#include "a_config.h"
+#include "athdefs.h"
+#include "a_types.h"
+#include "AR6002/hw2.0/hw/mbox_host_reg.h"
+#include "a_osapi.h"
+#include "../htc_debug.h"
+#include "hif.h"
+#include "htc_packet.h"
+#include "ar6k.h"
+
+#define MAILBOX_FOR_BLOCK_SIZE 1
+
+A_STATUS DevEnableInterrupts(AR6K_DEVICE *pDev);
+A_STATUS DevDisableInterrupts(AR6K_DEVICE *pDev);
+
+static void DevCleanupVirtualScatterSupport(AR6K_DEVICE *pDev);
+
+void AR6KFreeIOPacket(AR6K_DEVICE *pDev, HTC_PACKET *pPacket)
+{
+ LOCK_AR6K(pDev);
+ HTC_PACKET_ENQUEUE(&pDev->RegisterIOList,pPacket);
+ UNLOCK_AR6K(pDev);
+}
+
+HTC_PACKET *AR6KAllocIOPacket(AR6K_DEVICE *pDev)
+{
+ HTC_PACKET *pPacket;
+
+ LOCK_AR6K(pDev);
+ pPacket = HTC_PACKET_DEQUEUE(&pDev->RegisterIOList);
+ UNLOCK_AR6K(pDev);
+
+ return pPacket;
+}
+
+void DevCleanup(AR6K_DEVICE *pDev)
+{
+ DevCleanupGMbox(pDev);
+
+ if (pDev->HifAttached) {
+ HIFDetachHTC(pDev->HIFDevice);
+ pDev->HifAttached = FALSE;
+ }
+
+ DevCleanupVirtualScatterSupport(pDev);
+
+ if (A_IS_MUTEX_VALID(&pDev->Lock)) {
+ A_MUTEX_DELETE(&pDev->Lock);
+ }
+}
+
+A_STATUS DevSetup(AR6K_DEVICE *pDev)
+{
+ A_UINT32 blocksizes[AR6K_MAILBOXES];
+ A_STATUS status = A_OK;
+ int i;
+ HTC_CALLBACKS htcCallbacks;
+
+ do {
+
+ DL_LIST_INIT(&pDev->ScatterReqHead);
+ /* initialize our free list of IO packets */
+ INIT_HTC_PACKET_QUEUE(&pDev->RegisterIOList);
+ A_MUTEX_INIT(&pDev->Lock);
+
+ A_MEMZERO(&htcCallbacks, sizeof(HTC_CALLBACKS));
+ /* the device layer handles these */
+ htcCallbacks.rwCompletionHandler = DevRWCompletionHandler;
+ htcCallbacks.dsrHandler = DevDsrHandler;
+ htcCallbacks.context = pDev;
+
+ status = HIFAttachHTC(pDev->HIFDevice, &htcCallbacks);
+
+ if (A_FAILED(status)) {
+ break;
+ }
+
+ pDev->HifAttached = TRUE;
+
+ /* get the addresses for all 4 mailboxes */
+ status = HIFConfigureDevice(pDev->HIFDevice, HIF_DEVICE_GET_MBOX_ADDR,
+ &pDev->MailBoxInfo, sizeof(pDev->MailBoxInfo));
+
+ if (status != A_OK) {
+ A_ASSERT(FALSE);
+ break;
+ }
+
+ /* carve up register I/O packets (these are for ASYNC register I/O ) */
+ for (i = 0; i < AR6K_MAX_REG_IO_BUFFERS; i++) {
+ HTC_PACKET *pIOPacket;
+ pIOPacket = &pDev->RegIOBuffers[i].HtcPacket;
+ SET_HTC_PACKET_INFO_RX_REFILL(pIOPacket,
+ pDev,
+ pDev->RegIOBuffers[i].Buffer,
+ AR6K_REG_IO_BUFFER_SIZE,
+ 0); /* don't care */
+ AR6KFreeIOPacket(pDev,pIOPacket);
+ }
+
+ /* get the block sizes */
+ status = HIFConfigureDevice(pDev->HIFDevice, HIF_DEVICE_GET_MBOX_BLOCK_SIZE,
+ blocksizes, sizeof(blocksizes));
+
+ if (status != A_OK) {
+ A_ASSERT(FALSE);
+ break;
+ }
+
+ /* note: we actually get the block size of a mailbox other than 0, for SDIO the block
+ * size on mailbox 0 is artificially set to 1. So we use the block size that is set
+ * for the other 3 mailboxes */
+ pDev->BlockSize = blocksizes[MAILBOX_FOR_BLOCK_SIZE];
+ /* must be a power of 2 */
+ A_ASSERT((pDev->BlockSize & (pDev->BlockSize - 1)) == 0);
+
+ /* assemble mask, used for padding to a block */
+ pDev->BlockMask = pDev->BlockSize - 1;
+
+ AR_DEBUG_PRINTF(ATH_DEBUG_TRC,("BlockSize: %d, MailboxAddress:0x%X \n",
+ pDev->BlockSize, pDev->MailBoxInfo.MboxAddresses[HTC_MAILBOX]));
+
+ pDev->GetPendingEventsFunc = NULL;
+ /* see if the HIF layer implements the get pending events function */
+ HIFConfigureDevice(pDev->HIFDevice,
+ HIF_DEVICE_GET_PENDING_EVENTS_FUNC,
+ &pDev->GetPendingEventsFunc,
+ sizeof(pDev->GetPendingEventsFunc));
+
+ /* assume we can process HIF interrupt events asynchronously */
+ pDev->HifIRQProcessingMode = HIF_DEVICE_IRQ_ASYNC_SYNC;
+
+ /* see if the HIF layer overrides this assumption */
+ HIFConfigureDevice(pDev->HIFDevice,
+ HIF_DEVICE_GET_IRQ_PROC_MODE,
+ &pDev->HifIRQProcessingMode,
+ sizeof(pDev->HifIRQProcessingMode));
+
+ switch (pDev->HifIRQProcessingMode) {
+ case HIF_DEVICE_IRQ_SYNC_ONLY:
+ AR_DEBUG_PRINTF(ATH_DEBUG_WARN,("HIF Interrupt processing is SYNC ONLY\n"));
+ /* see if HIF layer wants HTC to yield */
+ HIFConfigureDevice(pDev->HIFDevice,
+ HIF_DEVICE_GET_IRQ_YIELD_PARAMS,
+ &pDev->HifIRQYieldParams,
+ sizeof(pDev->HifIRQYieldParams));
+
+ if (pDev->HifIRQYieldParams.RecvPacketYieldCount > 0) {
+ AR_DEBUG_PRINTF(ATH_DEBUG_WARN,
+ ("HIF requests that DSR yield per %d RECV packets \n",
+ pDev->HifIRQYieldParams.RecvPacketYieldCount));
+ pDev->DSRCanYield = TRUE;
+ }
+ break;
+ case HIF_DEVICE_IRQ_ASYNC_SYNC:
+ AR_DEBUG_PRINTF(ATH_DEBUG_TRC,("HIF Interrupt processing is ASYNC and SYNC\n"));
+ break;
+ default:
+ A_ASSERT(FALSE);
+ }
+
+ pDev->HifMaskUmaskRecvEvent = NULL;
+
+ /* see if the HIF layer implements the mask/unmask recv events function */
+ HIFConfigureDevice(pDev->HIFDevice,
+ HIF_DEVICE_GET_RECV_EVENT_MASK_UNMASK_FUNC,
+ &pDev->HifMaskUmaskRecvEvent,
+ sizeof(pDev->HifMaskUmaskRecvEvent));
+
+ AR_DEBUG_PRINTF(ATH_DEBUG_TRC,("HIF special overrides : 0x%lX , 0x%lX\n",
+ (unsigned long)pDev->GetPendingEventsFunc, (unsigned long)pDev->HifMaskUmaskRecvEvent));
+
+ status = DevDisableInterrupts(pDev);
+
+ if (A_FAILED(status)) {
+ break;
+ }
+
+ status = DevSetupGMbox(pDev);
+
+ } while (FALSE);
+
+ if (A_FAILED(status)) {
+ if (pDev->HifAttached) {
+ HIFDetachHTC(pDev->HIFDevice);
+ pDev->HifAttached = FALSE;
+ }
+ }
+
+ return status;
+
+}
+
+A_STATUS DevEnableInterrupts(AR6K_DEVICE *pDev)
+{
+ A_STATUS status;
+ AR6K_IRQ_ENABLE_REGISTERS regs;
+
+ LOCK_AR6K(pDev);
+
+ /* Enable all the interrupts except for the internal AR6000 CPU interrupt */
+ pDev->IrqEnableRegisters.int_status_enable = INT_STATUS_ENABLE_ERROR_SET(0x01) |
+ INT_STATUS_ENABLE_CPU_SET(0x01) |
+ INT_STATUS_ENABLE_COUNTER_SET(0x01);
+
+ if (NULL == pDev->GetPendingEventsFunc) {
+ pDev->IrqEnableRegisters.int_status_enable |= INT_STATUS_ENABLE_MBOX_DATA_SET(0x01);
+ } else {
+ /* The HIF layer provided us with a pending events function which means that
+ * the detection of pending mbox messages is handled in the HIF layer.
+ * This is the case for the SPI2 interface.
+ * In the normal case we enable MBOX interrupts, for the case
+ * with HIFs that offer this mechanism, we keep these interrupts
+ * masked */
+ pDev->IrqEnableRegisters.int_status_enable &= ~INT_STATUS_ENABLE_MBOX_DATA_SET(0x01);
+ }
+
+
+ /* Set up the CPU Interrupt Status Register */
+ pDev->IrqEnableRegisters.cpu_int_status_enable = CPU_INT_STATUS_ENABLE_BIT_SET(0x00);
+
+ /* Set up the Error Interrupt Status Register */
+ pDev->IrqEnableRegisters.error_status_enable =
+ ERROR_STATUS_ENABLE_RX_UNDERFLOW_SET(0x01) |
+ ERROR_STATUS_ENABLE_TX_OVERFLOW_SET(0x01);
+
+ /* Set up the Counter Interrupt Status Register (only for debug interrupt to catch fatal errors) */
+ pDev->IrqEnableRegisters.counter_int_status_enable =
+ COUNTER_INT_STATUS_ENABLE_BIT_SET(AR6K_TARGET_DEBUG_INTR_MASK);
+
+ /* copy into our temp area */
+ A_MEMCPY(&regs,&pDev->IrqEnableRegisters,AR6K_IRQ_ENABLE_REGS_SIZE);
+
+ UNLOCK_AR6K(pDev);
+
+ /* always synchronous */
+ status = HIFReadWrite(pDev->HIFDevice,
+ INT_STATUS_ENABLE_ADDRESS,
+ &regs.int_status_enable,
+ AR6K_IRQ_ENABLE_REGS_SIZE,
+ HIF_WR_SYNC_BYTE_INC,
+ NULL);
+
+ if (status != A_OK) {
+ /* Can't write it for some reason */
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERR,
+ ("Failed to update interrupt control registers err: %d\n", status));
+
+ }
+
+ return status;
+}
+
+A_STATUS DevDisableInterrupts(AR6K_DEVICE *pDev)
+{
+ AR6K_IRQ_ENABLE_REGISTERS regs;
+
+ LOCK_AR6K(pDev);
+ /* Disable all interrupts */
+ pDev->IrqEnableRegisters.int_status_enable = 0;
+ pDev->IrqEnableRegisters.cpu_int_status_enable = 0;
+ pDev->IrqEnableRegisters.error_status_enable = 0;
+ pDev->IrqEnableRegisters.counter_int_status_enable = 0;
+ /* copy into our temp area */
+ A_MEMCPY(&regs,&pDev->IrqEnableRegisters,AR6K_IRQ_ENABLE_REGS_SIZE);
+
+ UNLOCK_AR6K(pDev);
+
+ /* always synchronous */
+ return HIFReadWrite(pDev->HIFDevice,
+ INT_STATUS_ENABLE_ADDRESS,
+ &regs.int_status_enable,
+ AR6K_IRQ_ENABLE_REGS_SIZE,
+ HIF_WR_SYNC_BYTE_INC,
+ NULL);
+}
+
+/* enable device interrupts */
+A_STATUS DevUnmaskInterrupts(AR6K_DEVICE *pDev)
+{
+ /* for good measure, make sure interrupt are disabled before unmasking at the HIF
+ * layer.
+ * The rationale here is that between device insertion (where we clear the interrupts the first time)
+ * and when HTC is finally ready to handle interrupts, other software can perform target "soft" resets.
+ * The AR6K interrupt enables reset back to an "enabled" state when this happens.
+ * */
+ A_STATUS IntStatus = A_OK;
+ DevDisableInterrupts(pDev);
+
+#ifdef THREAD_X
+ // Tobe verified...
+ IntStatus = DevEnableInterrupts(pDev);
+ /* Unmask the host controller interrupts */
+ HIFUnMaskInterrupt(pDev->HIFDevice);
+#else
+ /* Unmask the host controller interrupts */
+ HIFUnMaskInterrupt(pDev->HIFDevice);
+ IntStatus = DevEnableInterrupts(pDev);
+#endif
+
+ return IntStatus;
+}
+
+/* disable all device interrupts */
+A_STATUS DevMaskInterrupts(AR6K_DEVICE *pDev)
+{
+ /* mask the interrupt at the HIF layer, we don't want a stray interrupt taken while
+ * we zero out our shadow registers in DevDisableInterrupts()*/
+ HIFMaskInterrupt(pDev->HIFDevice);
+
+ return DevDisableInterrupts(pDev);
+}
+
+/* callback when our fetch to enable/disable completes */
+static void DevDoEnableDisableRecvAsyncHandler(void *Context, HTC_PACKET *pPacket)
+{
+ AR6K_DEVICE *pDev = (AR6K_DEVICE *)Context;
+
+ AR_DEBUG_PRINTF(ATH_DEBUG_IRQ,("+DevDoEnableDisableRecvAsyncHandler: (dev: 0x%lX)\n", (unsigned long)pDev));
+
+ if (A_FAILED(pPacket->Status)) {
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERR,
+ (" Failed to disable receiver, status:%d \n", pPacket->Status));
+ }
+ /* free this IO packet */
+ AR6KFreeIOPacket(pDev,pPacket);
+ AR_DEBUG_PRINTF(ATH_DEBUG_IRQ,("-DevDoEnableDisableRecvAsyncHandler \n"));
+}
+
+/* disable packet reception (used in case the host runs out of buffers)
+ * this is the "override" method when the HIF reports another methods to
+ * disable recv events */
+static A_STATUS DevDoEnableDisableRecvOverride(AR6K_DEVICE *pDev, A_BOOL EnableRecv, A_BOOL AsyncMode)
+{
+ A_STATUS status = A_OK;
+ HTC_PACKET *pIOPacket = NULL;
+
+ AR_DEBUG_PRINTF(ATH_DEBUG_TRC,("DevDoEnableDisableRecvOverride: Enable:%d Mode:%d\n",
+ EnableRecv,AsyncMode));
+
+ do {
+
+ if (AsyncMode) {
+
+ pIOPacket = AR6KAllocIOPacket(pDev);
+
+ if (NULL == pIOPacket) {
+ status = A_NO_MEMORY;
+ A_ASSERT(FALSE);
+ break;
+ }
+
+ /* stick in our completion routine when the I/O operation completes */
+ pIOPacket->Completion = DevDoEnableDisableRecvAsyncHandler;
+ pIOPacket->pContext = pDev;
+
+ /* call the HIF layer override and do this asynchronously */
+ status = pDev->HifMaskUmaskRecvEvent(pDev->HIFDevice,
+ EnableRecv ? HIF_UNMASK_RECV : HIF_MASK_RECV,
+ pIOPacket);
+ break;
+ }
+
+ /* if we get here we are doing it synchronously */
+ status = pDev->HifMaskUmaskRecvEvent(pDev->HIFDevice,
+ EnableRecv ? HIF_UNMASK_RECV : HIF_MASK_RECV,
+ NULL);
+
+ } while (FALSE);
+
+ if (A_FAILED(status) && (pIOPacket != NULL)) {
+ AR6KFreeIOPacket(pDev,pIOPacket);
+ }
+
+ return status;
+}
+
+/* disable packet reception (used in case the host runs out of buffers)
+ * this is the "normal" method using the interrupt enable registers through
+ * the host I/F */
+static A_STATUS DevDoEnableDisableRecvNormal(AR6K_DEVICE *pDev, A_BOOL EnableRecv, A_BOOL AsyncMode)
+{
+ A_STATUS status = A_OK;
+ HTC_PACKET *pIOPacket = NULL;
+ AR6K_IRQ_ENABLE_REGISTERS regs;
+
+ /* take the lock to protect interrupt enable shadows */
+ LOCK_AR6K(pDev);
+
+ if (EnableRecv) {
+ pDev->IrqEnableRegisters.int_status_enable |= INT_STATUS_ENABLE_MBOX_DATA_SET(0x01);
+ } else {
+ pDev->IrqEnableRegisters.int_status_enable &= ~INT_STATUS_ENABLE_MBOX_DATA_SET(0x01);
+ }
+
+ /* copy into our temp area */
+ A_MEMCPY(&regs,&pDev->IrqEnableRegisters,AR6K_IRQ_ENABLE_REGS_SIZE);
+ UNLOCK_AR6K(pDev);
+
+ do {
+
+ if (AsyncMode) {
+
+ pIOPacket = AR6KAllocIOPacket(pDev);
+
+ if (NULL == pIOPacket) {
+ status = A_NO_MEMORY;
+ A_ASSERT(FALSE);
+ break;
+ }
+
+ /* copy values to write to our async I/O buffer */
+ A_MEMCPY(pIOPacket->pBuffer,&regs,AR6K_IRQ_ENABLE_REGS_SIZE);
+
+ /* stick in our completion routine when the I/O operation completes */
+ pIOPacket->Completion = DevDoEnableDisableRecvAsyncHandler;
+ pIOPacket->pContext = pDev;
+
+ /* write it out asynchronously */
+ HIFReadWrite(pDev->HIFDevice,
+ INT_STATUS_ENABLE_ADDRESS,
+ pIOPacket->pBuffer,
+ AR6K_IRQ_ENABLE_REGS_SIZE,
+ HIF_WR_ASYNC_BYTE_INC,
+ pIOPacket);
+ break;
+ }
+
+ /* if we get here we are doing it synchronously */
+
+ status = HIFReadWrite(pDev->HIFDevice,
+ INT_STATUS_ENABLE_ADDRESS,
+ &regs.int_status_enable,
+ AR6K_IRQ_ENABLE_REGS_SIZE,
+ HIF_WR_SYNC_BYTE_INC,
+ NULL);
+
+ } while (FALSE);
+
+ if (A_FAILED(status) && (pIOPacket != NULL)) {
+ AR6KFreeIOPacket(pDev,pIOPacket);
+ }
+
+ return status;
+}
+
+
+A_STATUS DevStopRecv(AR6K_DEVICE *pDev, A_BOOL AsyncMode)
+{
+ if (NULL == pDev->HifMaskUmaskRecvEvent) {
+ return DevDoEnableDisableRecvNormal(pDev,FALSE,AsyncMode);
+ } else {
+ return DevDoEnableDisableRecvOverride(pDev,FALSE,AsyncMode);
+ }
+}
+
+A_STATUS DevEnableRecv(AR6K_DEVICE *pDev, A_BOOL AsyncMode)
+{
+ if (NULL == pDev->HifMaskUmaskRecvEvent) {
+ return DevDoEnableDisableRecvNormal(pDev,TRUE,AsyncMode);
+ } else {
+ return DevDoEnableDisableRecvOverride(pDev,TRUE,AsyncMode);
+ }
+}
+
+A_STATUS DevWaitForPendingRecv(AR6K_DEVICE *pDev,A_UINT32 TimeoutInMs,A_BOOL *pbIsRecvPending)
+{
+ A_STATUS status = A_OK;
+ A_UCHAR host_int_status = 0x0;
+ A_UINT32 counter = 0x0;
+
+ if(TimeoutInMs < 100)
+ {
+ TimeoutInMs = 100;
+ }
+
+ counter = TimeoutInMs / 100;
+
+ do
+ {
+ //Read the Host Interrupt Status Register
+ status = HIFReadWrite(pDev->HIFDevice,
+ HOST_INT_STATUS_ADDRESS,
+ &host_int_status,
+ sizeof(A_UCHAR),
+ HIF_RD_SYNC_BYTE_INC,
+ NULL);
+ if(A_FAILED(status))
+ {
+ AR_DEBUG_PRINTF(ATH_LOG_ERR,("DevWaitForPendingRecv:Read HOST_INT_STATUS_ADDRESS Failed 0x%X\n",status));
+ break;
+ }
+
+ host_int_status = A_SUCCESS(status) ? (host_int_status & (1 << 0)):0;
+ if(!host_int_status)
+ {
+ status = A_OK;
+ *pbIsRecvPending = FALSE;
+ break;
+ }
+ else
+ {
+ *pbIsRecvPending = TRUE;
+ }
+
+ A_MDELAY(100);
+
+ counter--;
+
+ }while(counter);
+ return status;
+}
+
+void DevDumpRegisters(AR6K_DEVICE *pDev,
+ AR6K_IRQ_PROC_REGISTERS *pIrqProcRegs,
+ AR6K_IRQ_ENABLE_REGISTERS *pIrqEnableRegs)
+{
+
+ AR_DEBUG_PRINTF(ATH_DEBUG_ANY, ("\n<------- Register Table -------->\n"));
+
+ if (pIrqProcRegs != NULL) {
+ AR_DEBUG_PRINTF(ATH_DEBUG_ANY,
+ ("Host Int Status: 0x%x\n",pIrqProcRegs->host_int_status));
+ AR_DEBUG_PRINTF(ATH_DEBUG_ANY,
+ ("CPU Int Status: 0x%x\n",pIrqProcRegs->cpu_int_status));
+ AR_DEBUG_PRINTF(ATH_DEBUG_ANY,
+ ("Error Int Status: 0x%x\n",pIrqProcRegs->error_int_status));
+ AR_DEBUG_PRINTF(ATH_DEBUG_ANY,
+ ("Counter Int Status: 0x%x\n",pIrqProcRegs->counter_int_status));
+ AR_DEBUG_PRINTF(ATH_DEBUG_ANY,
+ ("Mbox Frame: 0x%x\n",pIrqProcRegs->mbox_frame));
+ AR_DEBUG_PRINTF(ATH_DEBUG_ANY,
+ ("Rx Lookahead Valid: 0x%x\n",pIrqProcRegs->rx_lookahead_valid));
+ AR_DEBUG_PRINTF(ATH_DEBUG_ANY,
+ ("Rx Lookahead 0: 0x%x\n",pIrqProcRegs->rx_lookahead[0]));
+ AR_DEBUG_PRINTF(ATH_DEBUG_ANY,
+ ("Rx Lookahead 1: 0x%x\n",pIrqProcRegs->rx_lookahead[1]));
+
+ if (pDev->MailBoxInfo.GMboxAddress != 0) {
+ /* if the target supports GMBOX hardware, dump some additional state */
+ AR_DEBUG_PRINTF(ATH_DEBUG_ANY,
+ ("GMBOX Host Int Status 2: 0x%x\n",pIrqProcRegs->host_int_status2));
+ AR_DEBUG_PRINTF(ATH_DEBUG_ANY,
+ ("GMBOX RX Avail: 0x%x\n",pIrqProcRegs->gmbox_rx_avail));
+ AR_DEBUG_PRINTF(ATH_DEBUG_ANY,
+ ("GMBOX lookahead alias 0: 0x%x\n",pIrqProcRegs->rx_gmbox_lookahead_alias[0]));
+ AR_DEBUG_PRINTF(ATH_DEBUG_ANY,
+ ("GMBOX lookahead alias 1: 0x%x\n",pIrqProcRegs->rx_gmbox_lookahead_alias[1]));
+ }
+
+ }
+
+ if (pIrqEnableRegs != NULL) {
+ AR_DEBUG_PRINTF(ATH_DEBUG_ANY,
+ ("Int Status Enable: 0x%x\n",pIrqEnableRegs->int_status_enable));
+ AR_DEBUG_PRINTF(ATH_DEBUG_ANY,
+ ("Counter Int Status Enable: 0x%x\n",pIrqEnableRegs->counter_int_status_enable));
+ }
+ AR_DEBUG_PRINTF(ATH_DEBUG_ANY, ("<------------------------------->\n"));
+}
+
+
+#define DEV_GET_VIRT_DMA_INFO(p) ((DEV_SCATTER_DMA_VIRTUAL_INFO *)((p)->HIFPrivate[0]))
+
+static HIF_SCATTER_REQ *DevAllocScatterReq(HIF_DEVICE *Context)
+{
+ DL_LIST *pItem;
+ AR6K_DEVICE *pDev = (AR6K_DEVICE *)Context;
+ LOCK_AR6K(pDev);
+ pItem = DL_ListRemoveItemFromHead(&pDev->ScatterReqHead);
+ UNLOCK_AR6K(pDev);
+ if (pItem != NULL) {
+ return A_CONTAINING_STRUCT(pItem, HIF_SCATTER_REQ, ListLink);
+ }
+ return NULL;
+}
+
+static void DevFreeScatterReq(HIF_DEVICE *Context, HIF_SCATTER_REQ *pReq)
+{
+ AR6K_DEVICE *pDev = (AR6K_DEVICE *)Context;
+ LOCK_AR6K(pDev);
+ DL_ListInsertTail(&pDev->ScatterReqHead, &pReq->ListLink);
+ UNLOCK_AR6K(pDev);
+}
+
+A_STATUS DevCopyScatterListToFromDMABuffer(HIF_SCATTER_REQ *pReq, A_BOOL FromDMA)
+{
+ A_UINT8 *pDMABuffer = NULL;
+ int i, remaining;
+ A_UINT32 length;
+
+ pDMABuffer = pReq->pScatterBounceBuffer;
+
+ if (pDMABuffer == NULL) {
+ A_ASSERT(FALSE);
+ return A_EINVAL;
+ }
+
+ remaining = (int)pReq->TotalLength;
+
+ for (i = 0; i < pReq->ValidScatterEntries; i++) {
+
+ length = min((int)pReq->ScatterList[i].Length, remaining);
+
+ if (length != (int)pReq->ScatterList[i].Length) {
+ A_ASSERT(FALSE);
+ /* there is a problem with the scatter list */
+ return A_EINVAL;
+ }
+
+ if (FromDMA) {
+ /* from DMA buffer */
+ A_MEMCPY(pReq->ScatterList[i].pBuffer, pDMABuffer , length);
+ } else {
+ /* to DMA buffer */
+ A_MEMCPY(pDMABuffer, pReq->ScatterList[i].pBuffer, length);
+ }
+
+ pDMABuffer += length;
+ remaining -= length;
+ }
+
+ return A_OK;
+}
+
+static void DevReadWriteScatterAsyncHandler(void *Context, HTC_PACKET *pPacket)
+{
+ AR6K_DEVICE *pDev = (AR6K_DEVICE *)Context;
+ HIF_SCATTER_REQ *pReq = (HIF_SCATTER_REQ *)pPacket->pPktContext;
+
+ AR_DEBUG_PRINTF(ATH_DEBUG_RECV,("+DevReadWriteScatterAsyncHandler: (dev: 0x%lX)\n", (unsigned long)pDev));
+
+ pReq->CompletionStatus = pPacket->Status;
+
+ AR6KFreeIOPacket(pDev,pPacket);
+
+ pReq->CompletionRoutine(pReq);
+
+ AR_DEBUG_PRINTF(ATH_DEBUG_RECV,("-DevReadWriteScatterAsyncHandler \n"));
+}
+
+static A_STATUS DevReadWriteScatter(HIF_DEVICE *Context, HIF_SCATTER_REQ *pReq)
+{
+ AR6K_DEVICE *pDev = (AR6K_DEVICE *)Context;
+ A_STATUS status = A_OK;
+ HTC_PACKET *pIOPacket = NULL;
+ A_UINT32 request = pReq->Request;
+
+ do {
+
+ if (pReq->TotalLength > AR6K_MAX_TRANSFER_SIZE_PER_SCATTER) {
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERR,
+ ("Invalid length: %d \n", pReq->TotalLength));
+ break;
+ }
+
+ if (pReq->TotalLength == 0) {
+ A_ASSERT(FALSE);
+ break;
+ }
+
+ if (request & HIF_ASYNCHRONOUS) {
+ /* use an I/O packet to carry this request */
+ pIOPacket = AR6KAllocIOPacket(pDev);
+ if (NULL == pIOPacket) {
+ status = A_NO_MEMORY;
+ break;
+ }
+
+ /* save the request */
+ pIOPacket->pPktContext = pReq;
+ /* stick in our completion routine when the I/O operation completes */
+ pIOPacket->Completion = DevReadWriteScatterAsyncHandler;
+ pIOPacket->pContext = pDev;
+ }
+
+ if (request & HIF_WRITE) {
+ /* in virtual DMA, we are issuing the requests through the legacy HIFReadWrite API
+ * this API will adjust the address automatically for the last byte to fall on the mailbox
+ * EOM. */
+
+ /* if the address is an extended address, we can adjust the address here since the extended
+ * address will bypass the normal checks in legacy HIF layers */
+ if (pReq->Address == pDev->MailBoxInfo.MboxProp[HTC_MAILBOX].ExtendedAddress) {
+ pReq->Address += pDev->MailBoxInfo.MboxProp[HTC_MAILBOX].ExtendedSize - pReq->TotalLength;
+ }
+ }
+
+ /* use legacy readwrite */
+ status = HIFReadWrite(pDev->HIFDevice,
+ pReq->Address,
+ DEV_GET_VIRT_DMA_INFO(pReq)->pVirtDmaBuffer,
+ pReq->TotalLength,
+ request,
+ (request & HIF_ASYNCHRONOUS) ? pIOPacket : NULL);
+
+ } while (FALSE);
+
+ if ((status != A_PENDING) && A_FAILED(status) && (request & HIF_ASYNCHRONOUS)) {
+ if (pIOPacket != NULL) {
+ AR6KFreeIOPacket(pDev,pIOPacket);
+ }
+ pReq->CompletionStatus = status;
+ pReq->CompletionRoutine(pReq);
+ status = A_OK;
+ }
+
+ return status;
+}
+
+
+static void DevCleanupVirtualScatterSupport(AR6K_DEVICE *pDev)
+{
+ HIF_SCATTER_REQ *pReq;
+
+ while (1) {
+ pReq = DevAllocScatterReq((HIF_DEVICE *)pDev);
+ if (NULL == pReq) {
+ break;
+ }
+ A_FREE(pReq);
+ }
+
+}
+
+ /* function to set up virtual scatter support if HIF layer has not implemented the interface */
+static A_STATUS DevSetupVirtualScatterSupport(AR6K_DEVICE *pDev)
+{
+ A_STATUS status = A_OK;
+ int bufferSize, sgreqSize;
+ int i;
+ DEV_SCATTER_DMA_VIRTUAL_INFO *pVirtualInfo;
+ HIF_SCATTER_REQ *pReq;
+
+ bufferSize = sizeof(DEV_SCATTER_DMA_VIRTUAL_INFO) +
+ 2 * (A_GET_CACHE_LINE_BYTES()) + AR6K_MAX_TRANSFER_SIZE_PER_SCATTER;
+
+ sgreqSize = sizeof(HIF_SCATTER_REQ) +
+ (AR6K_SCATTER_ENTRIES_PER_REQ - 1) * (sizeof(HIF_SCATTER_ITEM));
+
+ for (i = 0; i < AR6K_SCATTER_REQS; i++) {
+ /* allocate the scatter request, buffer info and the actual virtual buffer itself */
+ pReq = (HIF_SCATTER_REQ *)A_MALLOC(sgreqSize + bufferSize);
+
+ if (NULL == pReq) {
+ status = A_NO_MEMORY;
+ break;
+ }
+
+ A_MEMZERO(pReq, sgreqSize);
+
+ /* the virtual DMA starts after the scatter request struct */
+ pVirtualInfo = (DEV_SCATTER_DMA_VIRTUAL_INFO *)((A_UINT8 *)pReq + sgreqSize);
+ A_MEMZERO(pVirtualInfo, sizeof(DEV_SCATTER_DMA_VIRTUAL_INFO));
+
+ pVirtualInfo->pVirtDmaBuffer = &pVirtualInfo->DataArea[0];
+ /* align buffer to cache line in case host controller can actually DMA this */
+ pVirtualInfo->pVirtDmaBuffer = A_ALIGN_TO_CACHE_LINE(pVirtualInfo->pVirtDmaBuffer);
+ /* store the structure in the private area */
+ pReq->HIFPrivate[0] = pVirtualInfo;
+ /* we emulate a DMA bounce interface */
+ pReq->ScatterMethod = HIF_SCATTER_DMA_BOUNCE;
+ pReq->pScatterBounceBuffer = pVirtualInfo->pVirtDmaBuffer;
+ /* free request to the list */
+ DevFreeScatterReq((HIF_DEVICE *)pDev,pReq);
+ }
+
+ if (A_FAILED(status)) {
+ DevCleanupVirtualScatterSupport(pDev);
+ } else {
+ pDev->HifScatterInfo.pAllocateReqFunc = DevAllocScatterReq;
+ pDev->HifScatterInfo.pFreeReqFunc = DevFreeScatterReq;
+ pDev->HifScatterInfo.pReadWriteScatterFunc = DevReadWriteScatter;
+ if (pDev->MailBoxInfo.MboxBusIFType == MBOX_BUS_IF_SPI) {
+ AR_DEBUG_PRINTF(ATH_DEBUG_WARN, ("AR6K: SPI bus requires RX scatter limits\n"));
+ pDev->HifScatterInfo.MaxScatterEntries = AR6K_MIN_SCATTER_ENTRIES_PER_REQ;
+ pDev->HifScatterInfo.MaxTransferSizePerScatterReq = AR6K_MIN_TRANSFER_SIZE_PER_SCATTER;
+ } else {
+ pDev->HifScatterInfo.MaxScatterEntries = AR6K_SCATTER_ENTRIES_PER_REQ;
+ pDev->HifScatterInfo.MaxTransferSizePerScatterReq = AR6K_MAX_TRANSFER_SIZE_PER_SCATTER;
+ }
+ pDev->ScatterIsVirtual = TRUE;
+ }
+
+ return status;
+}
+
+
+A_STATUS DevSetupMsgBundling(AR6K_DEVICE *pDev, int MaxMsgsPerTransfer)
+{
+ A_STATUS status;
+
+ if (pDev->MailBoxInfo.Flags & HIF_MBOX_FLAG_NO_BUNDLING) {
+ AR_DEBUG_PRINTF(ATH_DEBUG_WARN, ("HIF requires bundling disabled\n"));
+ return A_ENOTSUP;
+ }
+
+ status = HIFConfigureDevice(pDev->HIFDevice,
+ HIF_CONFIGURE_QUERY_SCATTER_REQUEST_SUPPORT,
+ &pDev->HifScatterInfo,
+ sizeof(pDev->HifScatterInfo));
+
+ if (A_FAILED(status)) {
+ AR_DEBUG_PRINTF(ATH_DEBUG_WARN,
+ ("AR6K: ** HIF layer does not support scatter requests (%d) \n",status));
+
+ /* we can try to use a virtual DMA scatter mechanism using legacy HIFReadWrite() */
+ status = DevSetupVirtualScatterSupport(pDev);
+
+ if (A_SUCCESS(status)) {
+ AR_DEBUG_PRINTF(ATH_DEBUG_ANY,
+ ("AR6K: virtual scatter transfers enabled (max scatter items:%d: maxlen:%d) \n",
+ DEV_GET_MAX_MSG_PER_BUNDLE(pDev), DEV_GET_MAX_BUNDLE_LENGTH(pDev)));
+ }
+
+ } else {
+ AR_DEBUG_PRINTF(ATH_DEBUG_ANY,
+ ("AR6K: HIF layer supports scatter requests (max scatter items:%d: maxlen:%d) \n",
+ DEV_GET_MAX_MSG_PER_BUNDLE(pDev), DEV_GET_MAX_BUNDLE_LENGTH(pDev)));
+ }
+
+ if (A_SUCCESS(status)) {
+ /* for the recv path, the maximum number of bytes per recv bundle is just limited
+ * by the maximum transfer size at the HIF layer */
+ pDev->MaxRecvBundleSize = pDev->HifScatterInfo.MaxTransferSizePerScatterReq;
+
+ if (pDev->MailBoxInfo.MboxBusIFType == MBOX_BUS_IF_SPI) {
+ AR_DEBUG_PRINTF(ATH_DEBUG_WARN, ("AR6K : SPI bus requires TX bundling disabled\n"));
+ pDev->MaxSendBundleSize = 0;
+ } else {
+ /* for the send path, the max transfer size is limited by the existence and size of
+ * the extended mailbox address range */
+ if (pDev->MailBoxInfo.MboxProp[0].ExtendedAddress != 0) {
+ pDev->MaxSendBundleSize = pDev->MailBoxInfo.MboxProp[0].ExtendedSize;
+ } else {
+ /* legacy */
+ pDev->MaxSendBundleSize = AR6K_LEGACY_MAX_WRITE_LENGTH;
+ }
+
+ if (pDev->MaxSendBundleSize > pDev->HifScatterInfo.MaxTransferSizePerScatterReq) {
+ /* limit send bundle size to what the HIF can support for scatter requests */
+ pDev->MaxSendBundleSize = pDev->HifScatterInfo.MaxTransferSizePerScatterReq;
+ }
+ }
+
+ AR_DEBUG_PRINTF(ATH_DEBUG_ANY,
+ ("AR6K: max recv: %d max send: %d \n",
+ DEV_GET_MAX_BUNDLE_RECV_LENGTH(pDev), DEV_GET_MAX_BUNDLE_SEND_LENGTH(pDev)));
+
+ }
+ return status;
+}
+
+A_STATUS DevSubmitScatterRequest(AR6K_DEVICE *pDev, HIF_SCATTER_REQ *pScatterReq, A_BOOL Read, A_BOOL Async)
+{
+ A_STATUS status;
+
+ if (Read) {
+ /* read operation */
+ pScatterReq->Request = (Async) ? HIF_RD_ASYNC_BLOCK_FIX : HIF_RD_SYNC_BLOCK_FIX;
+ pScatterReq->Address = pDev->MailBoxInfo.MboxAddresses[HTC_MAILBOX];
+ A_ASSERT(pScatterReq->TotalLength <= (A_UINT32)DEV_GET_MAX_BUNDLE_RECV_LENGTH(pDev));
+ } else {
+ A_UINT32 mailboxWidth;
+
+ /* write operation */
+ pScatterReq->Request = (Async) ? HIF_WR_ASYNC_BLOCK_INC : HIF_WR_SYNC_BLOCK_INC;
+ A_ASSERT(pScatterReq->TotalLength <= (A_UINT32)DEV_GET_MAX_BUNDLE_SEND_LENGTH(pDev));
+ if (pScatterReq->TotalLength > AR6K_LEGACY_MAX_WRITE_LENGTH) {
+ /* for large writes use the extended address */
+ pScatterReq->Address = pDev->MailBoxInfo.MboxProp[HTC_MAILBOX].ExtendedAddress;
+ mailboxWidth = pDev->MailBoxInfo.MboxProp[HTC_MAILBOX].ExtendedSize;
+ } else {
+ pScatterReq->Address = pDev->MailBoxInfo.MboxAddresses[HTC_MAILBOX];
+ mailboxWidth = AR6K_LEGACY_MAX_WRITE_LENGTH;
+ }
+
+ if (!pDev->ScatterIsVirtual) {
+ /* we are passing this scatter list down to the HIF layer' scatter request handler, fixup the address
+ * so that the last byte falls on the EOM, we do this for those HIFs that support the
+ * scatter API */
+ pScatterReq->Address += (mailboxWidth - pScatterReq->TotalLength);
+ }
+
+ }
+
+ AR_DEBUG_PRINTF(ATH_DEBUG_RECV | ATH_DEBUG_SEND,
+ ("DevSubmitScatterRequest, Entries: %d, Total Length: %d Mbox:0x%X (mode: %s : %s)\n",
+ pScatterReq->ValidScatterEntries,
+ pScatterReq->TotalLength,
+ pScatterReq->Address,
+ Async ? "ASYNC" : "SYNC",
+ (Read) ? "RD" : "WR"));
+
+ status = DEV_PREPARE_SCATTER_OPERATION(pScatterReq);
+
+ if (A_FAILED(status)) {
+ if (Async) {
+ pScatterReq->CompletionStatus = status;
+ pScatterReq->CompletionRoutine(pScatterReq);
+ return A_OK;
+ }
+ return status;
+ }
+
+ status = pDev->HifScatterInfo.pReadWriteScatterFunc(pDev->ScatterIsVirtual ? pDev : pDev->HIFDevice,
+ pScatterReq);
+ if (!Async) {
+ /* in sync mode, we can touch the scatter request */
+ pScatterReq->CompletionStatus = status;
+ DEV_FINISH_SCATTER_OPERATION(pScatterReq);
+ } else {
+ if (status == A_PENDING) {
+ status = A_OK;
+ }
+ }
+
+ return status;
+}
+
+
+#ifdef MBOXHW_UNIT_TEST
+
+
+/* This is a mailbox hardware unit test that must be called in a schedulable context
+ * This test is very simple, it will send a list of buffers with a counting pattern
+ * and the target will invert the data and send the message back
+ *
+ * the unit test has the following constraints:
+ *
+ * The target has at least 8 buffers of 256 bytes each. The host will send
+ * the following pattern of buffers in rapid succession :
+ *
+ * 1 buffer - 128 bytes
+ * 1 buffer - 256 bytes
+ * 1 buffer - 512 bytes
+ * 1 buffer - 1024 bytes
+ *
+ * The host will send the buffers to one mailbox and wait for buffers to be reflected
+ * back from the same mailbox. The target sends the buffers FIFO order.
+ * Once the final buffer has been received for a mailbox, the next mailbox is tested.
+ *
+ *
+ * Note: To simplifythe test , we assume that the chosen buffer sizes
+ * will fall on a nice block pad
+ *
+ * It is expected that higher-order tests will be written to stress the mailboxes using
+ * a message-based protocol (with some performance timming) that can create more
+ * randomness in the packets sent over mailboxes.
+ *
+ * */
+
+#define A_ROUND_UP_PWR2(x, align) (((int) (x) + ((align)-1)) & ~((align)-1))
+
+#define BUFFER_BLOCK_PAD 128
+
+#if 0
+#define BUFFER1 128
+#define BUFFER2 256
+#define BUFFER3 512
+#define BUFFER4 1024
+#endif
+
+#if 1
+#define BUFFER1 80
+#define BUFFER2 200
+#define BUFFER3 444
+#define BUFFER4 800
+#endif
+
+#define TOTAL_BYTES (A_ROUND_UP_PWR2(BUFFER1,BUFFER_BLOCK_PAD) + \
+ A_ROUND_UP_PWR2(BUFFER2,BUFFER_BLOCK_PAD) + \
+ A_ROUND_UP_PWR2(BUFFER3,BUFFER_BLOCK_PAD) + \
+ A_ROUND_UP_PWR2(BUFFER4,BUFFER_BLOCK_PAD) )
+
+#define TEST_BYTES (BUFFER1 + BUFFER2 + BUFFER3 + BUFFER4)
+
+#define TEST_CREDITS_RECV_TIMEOUT 100
+
+static A_UINT8 g_Buffer[TOTAL_BYTES];
+static A_UINT32 g_MailboxAddrs[AR6K_MAILBOXES];
+static A_UINT32 g_BlockSizes[AR6K_MAILBOXES];
+
+#define BUFFER_PROC_LIST_DEPTH 4
+
+typedef struct _BUFFER_PROC_LIST{
+ A_UINT8 *pBuffer;
+ A_UINT32 length;
+}BUFFER_PROC_LIST;
+
+
+#define PUSH_BUFF_PROC_ENTRY(pList,len,pCurrpos) \
+{ \
+ (pList)->pBuffer = (pCurrpos); \
+ (pList)->length = (len); \
+ (pCurrpos) += (len); \
+ (pList)++; \
+}
+
+/* a simple and crude way to send different "message" sizes */
+static void AssembleBufferList(BUFFER_PROC_LIST *pList)
+{
+ A_UINT8 *pBuffer = g_Buffer;
+
+#if BUFFER_PROC_LIST_DEPTH < 4
+#error "Buffer processing list depth is not deep enough!!"
+#endif
+
+ PUSH_BUFF_PROC_ENTRY(pList,BUFFER1,pBuffer);
+ PUSH_BUFF_PROC_ENTRY(pList,BUFFER2,pBuffer);
+ PUSH_BUFF_PROC_ENTRY(pList,BUFFER3,pBuffer);
+ PUSH_BUFF_PROC_ENTRY(pList,BUFFER4,pBuffer);
+
+}
+
+#define FILL_ZERO TRUE
+#define FILL_COUNTING FALSE
+static void InitBuffers(A_BOOL Zero)
+{
+ A_UINT16 *pBuffer16 = (A_UINT16 *)g_Buffer;
+ int i;
+
+ /* fill buffer with 16 bit counting pattern or zeros */
+ for (i = 0; i < (TOTAL_BYTES / 2) ; i++) {
+ if (!Zero) {
+ pBuffer16[i] = (A_UINT16)i;
+ } else {
+ pBuffer16[i] = 0;
+ }
+ }
+}
+
+
+static A_BOOL CheckOneBuffer(A_UINT16 *pBuffer16, int Length)
+{
+ int i;
+ A_UINT16 startCount;
+ A_BOOL success = TRUE;
+
+ /* get the starting count */
+ startCount = pBuffer16[0];
+ /* invert it, this is the expected value */
+ startCount = ~startCount;
+ /* scan the buffer and verify */
+ for (i = 0; i < (Length / 2) ; i++,startCount++) {
+ /* target will invert all the data */
+ if ((A_UINT16)pBuffer16[i] != (A_UINT16)~startCount) {
+ success = FALSE;
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("Invalid Data Got:0x%X, Expecting:0x%X (offset:%d, total:%d) \n",
+ pBuffer16[i], ((A_UINT16)~startCount), i, Length));
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("0x%X 0x%X 0x%X 0x%X \n",
+ pBuffer16[i], pBuffer16[i + 1], pBuffer16[i + 2],pBuffer16[i+3]));
+ break;
+ }
+ }
+
+ return success;
+}
+
+static A_BOOL CheckBuffers(void)
+{
+ int i;
+ A_BOOL success = TRUE;
+ BUFFER_PROC_LIST checkList[BUFFER_PROC_LIST_DEPTH];
+
+ /* assemble the list */
+ AssembleBufferList(checkList);
+
+ /* scan the buffers and verify */
+ for (i = 0; i < BUFFER_PROC_LIST_DEPTH ; i++) {
+ success = CheckOneBuffer((A_UINT16 *)checkList[i].pBuffer, checkList[i].length);
+ if (!success) {
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("Buffer : 0x%X, Length:%d failed verify \n",
+ (A_UINT32)checkList[i].pBuffer, checkList[i].length));
+ break;
+ }
+ }
+
+ return success;
+}
+
+ /* find the end marker for the last buffer we will be sending */
+static A_UINT16 GetEndMarker(void)
+{
+ A_UINT8 *pBuffer;
+ BUFFER_PROC_LIST checkList[BUFFER_PROC_LIST_DEPTH];
+
+ /* fill up buffers with the normal counting pattern */
+ InitBuffers(FILL_COUNTING);
+
+ /* assemble the list we will be sending down */
+ AssembleBufferList(checkList);
+ /* point to the last 2 bytes of the last buffer */
+ pBuffer = &(checkList[BUFFER_PROC_LIST_DEPTH - 1].pBuffer[(checkList[BUFFER_PROC_LIST_DEPTH - 1].length) - 2]);
+
+ /* the last count in the last buffer is the marker */
+ return (A_UINT16)pBuffer[0] | ((A_UINT16)pBuffer[1] << 8);
+}
+
+#define ATH_PRINT_OUT_ZONE ATH_DEBUG_ERR
+
+/* send the ordered buffers to the target */
+static A_STATUS SendBuffers(AR6K_DEVICE *pDev, int mbox)
+{
+ A_STATUS status = A_OK;
+ A_UINT32 request = HIF_WR_SYNC_BLOCK_INC;
+ BUFFER_PROC_LIST sendList[BUFFER_PROC_LIST_DEPTH];
+ int i;
+ int totalBytes = 0;
+ int paddedLength;
+ int totalwPadding = 0;
+
+ AR_DEBUG_PRINTF(ATH_PRINT_OUT_ZONE, ("Sending buffers on mailbox : %d \n",mbox));
+
+ /* fill buffer with counting pattern */
+ InitBuffers(FILL_COUNTING);
+
+ /* assemble the order in which we send */
+ AssembleBufferList(sendList);
+
+ for (i = 0; i < BUFFER_PROC_LIST_DEPTH; i++) {
+
+ /* we are doing block transfers, so we need to pad everything to a block size */
+ paddedLength = (sendList[i].length + (g_BlockSizes[mbox] - 1)) &
+ (~(g_BlockSizes[mbox] - 1));
+
+ /* send each buffer synchronously */
+ status = HIFReadWrite(pDev->HIFDevice,
+ g_MailboxAddrs[mbox],
+ sendList[i].pBuffer,
+ paddedLength,
+ request,
+ NULL);
+ if (status != A_OK) {
+ break;
+ }
+ totalBytes += sendList[i].length;
+ totalwPadding += paddedLength;
+ }
+
+ AR_DEBUG_PRINTF(ATH_PRINT_OUT_ZONE, ("Sent %d bytes (%d padded bytes) to mailbox : %d \n",totalBytes,totalwPadding,mbox));
+
+ return status;
+}
+
+/* poll the mailbox credit counter until we get a credit or timeout */
+static A_STATUS GetCredits(AR6K_DEVICE *pDev, int mbox, int *pCredits)
+{
+ A_STATUS status = A_OK;
+ int timeout = TEST_CREDITS_RECV_TIMEOUT;
+ A_UINT8 credits = 0;
+ A_UINT32 address;
+
+ while (TRUE) {
+
+ /* Read the counter register to get credits, this auto-decrements */
+ address = COUNT_DEC_ADDRESS + (AR6K_MAILBOXES + mbox) * 4;
+ status = HIFReadWrite(pDev->HIFDevice, address, &credits, sizeof(credits),
+ HIF_RD_SYNC_BYTE_FIX, NULL);
+ if (status != A_OK) {
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERR,
+ ("Unable to decrement the command credit count register (mbox=%d)\n",mbox));
+ status = A_ERROR;
+ break;
+ }
+
+ if (credits) {
+ break;
+ }
+
+ timeout--;
+
+ if (timeout <= 0) {
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERR,
+ (" Timeout reading credit registers (mbox=%d, address:0x%X) \n",mbox,address));
+ status = A_ERROR;
+ break;
+ }
+
+ /* delay a little, target may not be ready */
+ A_MDELAY(1000);
+
+ }
+
+ if (status == A_OK) {
+ *pCredits = credits;
+ }
+
+ return status;
+}
+
+
+/* wait for the buffers to come back */
+static A_STATUS RecvBuffers(AR6K_DEVICE *pDev, int mbox)
+{
+ A_STATUS status = A_OK;
+ A_UINT32 request = HIF_RD_SYNC_BLOCK_INC;
+ BUFFER_PROC_LIST recvList[BUFFER_PROC_LIST_DEPTH];
+ int curBuffer;
+ int credits;
+ int i;
+ int totalBytes = 0;
+ int paddedLength;
+ int totalwPadding = 0;
+
+ AR_DEBUG_PRINTF(ATH_PRINT_OUT_ZONE, ("Waiting for buffers on mailbox : %d \n",mbox));
+
+ /* zero the buffers */
+ InitBuffers(FILL_ZERO);
+
+ /* assemble the order in which we should receive */
+ AssembleBufferList(recvList);
+
+ curBuffer = 0;
+
+ while (curBuffer < BUFFER_PROC_LIST_DEPTH) {
+
+ /* get number of buffers that have been completed, this blocks
+ * until we get at least 1 credit or it times out */
+ status = GetCredits(pDev, mbox, &credits);
+
+ if (status != A_OK) {
+ break;
+ }
+
+ AR_DEBUG_PRINTF(ATH_PRINT_OUT_ZONE, ("Got %d messages on mailbox : %d \n",credits, mbox));
+
+ /* get all the buffers that are sitting on the queue */
+ for (i = 0; i < credits; i++) {
+ A_ASSERT(curBuffer < BUFFER_PROC_LIST_DEPTH);
+ /* recv the current buffer synchronously, the buffers should come back in
+ * order... with padding applied by the target */
+ paddedLength = (recvList[curBuffer].length + (g_BlockSizes[mbox] - 1)) &
+ (~(g_BlockSizes[mbox] - 1));
+
+ status = HIFReadWrite(pDev->HIFDevice,
+ g_MailboxAddrs[mbox],
+ recvList[curBuffer].pBuffer,
+ paddedLength,
+ request,
+ NULL);
+ if (status != A_OK) {
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("Failed to read %d bytes on mailbox:%d : address:0x%X \n",
+ recvList[curBuffer].length, mbox, g_MailboxAddrs[mbox]));
+ break;
+ }
+
+ totalwPadding += paddedLength;
+ totalBytes += recvList[curBuffer].length;
+ curBuffer++;
+ }
+
+ if (status != A_OK) {
+ break;
+ }
+ /* go back and get some more */
+ credits = 0;
+ }
+
+ if (totalBytes != TEST_BYTES) {
+ A_ASSERT(FALSE);
+ } else {
+ AR_DEBUG_PRINTF(ATH_PRINT_OUT_ZONE, ("Got all buffers on mbox:%d total recv :%d (w/Padding : %d) \n",
+ mbox, totalBytes, totalwPadding));
+ }
+
+ return status;
+
+
+}
+
+static A_STATUS DoOneMboxHWTest(AR6K_DEVICE *pDev, int mbox)
+{
+ A_STATUS status;
+
+ do {
+ /* send out buffers */
+ status = SendBuffers(pDev,mbox);
+
+ if (status != A_OK) {
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("Sending buffers Failed : %d mbox:%d\n",status,mbox));
+ break;
+ }
+
+ /* go get them, this will block */
+ status = RecvBuffers(pDev, mbox);
+
+ if (status != A_OK) {
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("Recv buffers Failed : %d mbox:%d\n",status,mbox));
+ break;
+ }
+
+ /* check the returned data patterns */
+ if (!CheckBuffers()) {
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("Buffer Verify Failed : mbox:%d\n",mbox));
+ status = A_ERROR;
+ break;
+ }
+
+ AR_DEBUG_PRINTF(ATH_PRINT_OUT_ZONE, (" Send/Recv success! mailbox : %d \n",mbox));
+
+ } while (FALSE);
+
+ return status;
+}
+
+/* here is where the test starts */
+A_STATUS DoMboxHWTest(AR6K_DEVICE *pDev)
+{
+ int i;
+ A_STATUS status;
+ int credits = 0;
+ A_UINT8 params[4];
+ int numBufs;
+ int bufferSize;
+ A_UINT16 temp;
+
+
+ AR_DEBUG_PRINTF(ATH_PRINT_OUT_ZONE, (" DoMboxHWTest START - \n"));
+
+ do {
+ /* get the addresses for all 4 mailboxes */
+ status = HIFConfigureDevice(pDev->HIFDevice, HIF_DEVICE_GET_MBOX_ADDR,
+ g_MailboxAddrs, sizeof(g_MailboxAddrs));
+
+ if (status != A_OK) {
+ A_ASSERT(FALSE);
+ break;
+ }
+
+ /* get the block sizes */
+ status = HIFConfigureDevice(pDev->HIFDevice, HIF_DEVICE_GET_MBOX_BLOCK_SIZE,
+ g_BlockSizes, sizeof(g_BlockSizes));
+
+ if (status != A_OK) {
+ A_ASSERT(FALSE);
+ break;
+ }
+
+ /* note, the HIF layer usually reports mbox 0 to have a block size of
+ * 1, but our test wants to run in block-mode for all mailboxes, so we treat all mailboxes
+ * the same. */
+ g_BlockSizes[0] = g_BlockSizes[1];
+ AR_DEBUG_PRINTF(ATH_PRINT_OUT_ZONE, ("Block Size to use: %d \n",g_BlockSizes[0]));
+
+ if (g_BlockSizes[1] > BUFFER_BLOCK_PAD) {
+ AR_DEBUG_PRINTF(ATH_PRINT_OUT_ZONE, ("%d Block size is too large for buffer pad %d\n",
+ g_BlockSizes[1], BUFFER_BLOCK_PAD));
+ break;
+ }
+
+ AR_DEBUG_PRINTF(ATH_PRINT_OUT_ZONE, ("Waiting for target.... \n"));
+
+ /* the target lets us know it is ready by giving us 1 credit on
+ * mailbox 0 */
+ status = GetCredits(pDev, 0, &credits);
+
+ if (status != A_OK) {
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("Failed to wait for target ready \n"));
+ break;
+ }
+
+ AR_DEBUG_PRINTF(ATH_PRINT_OUT_ZONE, ("Target is ready ...\n"));
+
+ /* read the first 4 scratch registers */
+ status = HIFReadWrite(pDev->HIFDevice,
+ SCRATCH_ADDRESS,
+ params,
+ 4,
+ HIF_RD_SYNC_BYTE_INC,
+ NULL);
+
+ if (status != A_OK) {
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("Failed to wait get parameters \n"));
+ break;
+ }
+
+ numBufs = params[0];
+ bufferSize = (int)(((A_UINT16)params[2] << 8) | (A_UINT16)params[1]);
+
+ AR_DEBUG_PRINTF(ATH_PRINT_OUT_ZONE,
+ ("Target parameters: bufs per mailbox:%d, buffer size:%d bytes (total space: %d, minimum required space (w/padding): %d) \n",
+ numBufs, bufferSize, (numBufs * bufferSize), TOTAL_BYTES));
+
+ if ((numBufs * bufferSize) < TOTAL_BYTES) {
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("Not Enough buffer space to run test! need:%d, got:%d \n",
+ TOTAL_BYTES, (numBufs*bufferSize)));
+ status = A_ERROR;
+ break;
+ }
+
+ temp = GetEndMarker();
+
+ status = HIFReadWrite(pDev->HIFDevice,
+ SCRATCH_ADDRESS + 4,
+ (A_UINT8 *)&temp,
+ 2,
+ HIF_WR_SYNC_BYTE_INC,
+ NULL);
+
+ if (status != A_OK) {
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("Failed to write end marker \n"));
+ break;
+ }
+
+ AR_DEBUG_PRINTF(ATH_PRINT_OUT_ZONE, ("End Marker: 0x%X \n",temp));
+
+ temp = (A_UINT16)g_BlockSizes[1];
+ /* convert to a mask */
+ temp = temp - 1;
+ status = HIFReadWrite(pDev->HIFDevice,
+ SCRATCH_ADDRESS + 6,
+ (A_UINT8 *)&temp,
+ 2,
+ HIF_WR_SYNC_BYTE_INC,
+ NULL);
+
+ if (status != A_OK) {
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("Failed to write block mask \n"));
+ break;
+ }
+
+ AR_DEBUG_PRINTF(ATH_PRINT_OUT_ZONE, ("Set Block Mask: 0x%X \n",temp));
+
+ /* execute the test on each mailbox */
+ for (i = 0; i < AR6K_MAILBOXES; i++) {
+ status = DoOneMboxHWTest(pDev, i);
+ if (status != A_OK) {
+ break;
+ }
+ }
+
+ } while (FALSE);
+
+ if (status == A_OK) {
+ AR_DEBUG_PRINTF(ATH_PRINT_OUT_ZONE, (" DoMboxHWTest DONE - SUCCESS! - \n"));
+ } else {
+ AR_DEBUG_PRINTF(ATH_PRINT_OUT_ZONE, (" DoMboxHWTest DONE - FAILED! - \n"));
+ }
+ /* don't let HTC_Start continue, the target is actually not running any HTC code */
+ return A_ERROR;
+}
+#endif
+
+
+
diff --git a/drivers/staging/ath6kl/htc2/AR6000/ar6k.h b/drivers/staging/ath6kl/htc2/AR6000/ar6k.h
new file mode 100644
index 000000000000..b30fd877aebf
--- /dev/null
+++ b/drivers/staging/ath6kl/htc2/AR6000/ar6k.h
@@ -0,0 +1,398 @@
+//------------------------------------------------------------------------------
+// <copyright file="ar6k.h" company="Atheros">
+// Copyright (c) 2007-2010 Atheros Corporation. All rights reserved.
+//
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+//
+//
+//------------------------------------------------------------------------------
+//==============================================================================
+// AR6K device layer that handles register level I/O
+//
+// Author(s): ="Atheros"
+//==============================================================================
+#ifndef AR6K_H_
+#define AR6K_H_
+
+#include "hci_transport_api.h"
+#include "../htc_debug.h"
+
+#define AR6K_MAILBOXES 4
+
+/* HTC runs over mailbox 0 */
+#define HTC_MAILBOX 0
+
+#define AR6K_TARGET_DEBUG_INTR_MASK 0x01
+
+#define OTHER_INTS_ENABLED (INT_STATUS_ENABLE_ERROR_MASK | \
+ INT_STATUS_ENABLE_CPU_MASK | \
+ INT_STATUS_ENABLE_COUNTER_MASK)
+
+
+//#define MBOXHW_UNIT_TEST 1
+
+#include "athstartpack.h"
+typedef PREPACK struct _AR6K_IRQ_PROC_REGISTERS {
+ A_UINT8 host_int_status;
+ A_UINT8 cpu_int_status;
+ A_UINT8 error_int_status;
+ A_UINT8 counter_int_status;
+ A_UINT8 mbox_frame;
+ A_UINT8 rx_lookahead_valid;
+ A_UINT8 host_int_status2;
+ A_UINT8 gmbox_rx_avail;
+ A_UINT32 rx_lookahead[2];
+ A_UINT32 rx_gmbox_lookahead_alias[2];
+} POSTPACK AR6K_IRQ_PROC_REGISTERS;
+
+#define AR6K_IRQ_PROC_REGS_SIZE sizeof(AR6K_IRQ_PROC_REGISTERS)
+
+typedef PREPACK struct _AR6K_IRQ_ENABLE_REGISTERS {
+ A_UINT8 int_status_enable;
+ A_UINT8 cpu_int_status_enable;
+ A_UINT8 error_status_enable;
+ A_UINT8 counter_int_status_enable;
+} POSTPACK AR6K_IRQ_ENABLE_REGISTERS;
+
+typedef PREPACK struct _AR6K_GMBOX_CTRL_REGISTERS {
+ A_UINT8 int_status_enable;
+} POSTPACK AR6K_GMBOX_CTRL_REGISTERS;
+
+#include "athendpack.h"
+
+#define AR6K_IRQ_ENABLE_REGS_SIZE sizeof(AR6K_IRQ_ENABLE_REGISTERS)
+
+#define AR6K_REG_IO_BUFFER_SIZE 32
+#define AR6K_MAX_REG_IO_BUFFERS 8
+#define FROM_DMA_BUFFER TRUE
+#define TO_DMA_BUFFER FALSE
+#define AR6K_SCATTER_ENTRIES_PER_REQ 16
+#define AR6K_MAX_TRANSFER_SIZE_PER_SCATTER 16*1024
+#define AR6K_SCATTER_REQS 4
+#define AR6K_LEGACY_MAX_WRITE_LENGTH 2048
+
+#ifndef A_CACHE_LINE_PAD
+#define A_CACHE_LINE_PAD 128
+#endif
+#define AR6K_MIN_SCATTER_ENTRIES_PER_REQ 2
+#define AR6K_MIN_TRANSFER_SIZE_PER_SCATTER 4*1024
+
+/* buffers for ASYNC I/O */
+typedef struct AR6K_ASYNC_REG_IO_BUFFER {
+ HTC_PACKET HtcPacket; /* we use an HTC packet as a wrapper for our async register-based I/O */
+ A_UINT8 _Pad1[A_CACHE_LINE_PAD];
+ A_UINT8 Buffer[AR6K_REG_IO_BUFFER_SIZE]; /* cache-line safe with pads around */
+ A_UINT8 _Pad2[A_CACHE_LINE_PAD];
+} AR6K_ASYNC_REG_IO_BUFFER;
+
+typedef struct _AR6K_GMBOX_INFO {
+ void *pProtocolContext;
+ A_STATUS (*pMessagePendingCallBack)(void *pContext, A_UINT8 LookAheadBytes[], int ValidBytes);
+ A_STATUS (*pCreditsPendingCallback)(void *pContext, int NumCredits, A_BOOL CreditIRQEnabled);
+ void (*pTargetFailureCallback)(void *pContext, A_STATUS Status);
+ void (*pStateDumpCallback)(void *pContext);
+ A_BOOL CreditCountIRQEnabled;
+} AR6K_GMBOX_INFO;
+
+typedef struct _AR6K_DEVICE {
+ A_MUTEX_T Lock;
+ A_UINT8 _Pad1[A_CACHE_LINE_PAD];
+ AR6K_IRQ_PROC_REGISTERS IrqProcRegisters; /* cache-line safe with pads around */
+ A_UINT8 _Pad2[A_CACHE_LINE_PAD];
+ AR6K_IRQ_ENABLE_REGISTERS IrqEnableRegisters; /* cache-line safe with pads around */
+ A_UINT8 _Pad3[A_CACHE_LINE_PAD];
+ void *HIFDevice;
+ A_UINT32 BlockSize;
+ A_UINT32 BlockMask;
+ HIF_DEVICE_MBOX_INFO MailBoxInfo;
+ HIF_PENDING_EVENTS_FUNC GetPendingEventsFunc;
+ void *HTCContext;
+ HTC_PACKET_QUEUE RegisterIOList;
+ AR6K_ASYNC_REG_IO_BUFFER RegIOBuffers[AR6K_MAX_REG_IO_BUFFERS];
+ void (*TargetFailureCallback)(void *Context);
+ A_STATUS (*MessagePendingCallback)(void *Context,
+ A_UINT32 LookAheads[],
+ int NumLookAheads,
+ A_BOOL *pAsyncProc,
+ int *pNumPktsFetched);
+ HIF_DEVICE_IRQ_PROCESSING_MODE HifIRQProcessingMode;
+ HIF_MASK_UNMASK_RECV_EVENT HifMaskUmaskRecvEvent;
+ A_BOOL HifAttached;
+ HIF_DEVICE_IRQ_YIELD_PARAMS HifIRQYieldParams;
+ A_BOOL DSRCanYield;
+ int CurrentDSRRecvCount;
+ HIF_DEVICE_SCATTER_SUPPORT_INFO HifScatterInfo;
+ DL_LIST ScatterReqHead;
+ A_BOOL ScatterIsVirtual;
+ int MaxRecvBundleSize;
+ int MaxSendBundleSize;
+ AR6K_GMBOX_INFO GMboxInfo;
+ A_BOOL GMboxEnabled;
+ AR6K_GMBOX_CTRL_REGISTERS GMboxControlRegisters;
+ int RecheckIRQStatusCnt;
+} AR6K_DEVICE;
+
+#define LOCK_AR6K(p) A_MUTEX_LOCK(&(p)->Lock);
+#define UNLOCK_AR6K(p) A_MUTEX_UNLOCK(&(p)->Lock);
+#define REF_IRQ_STATUS_RECHECK(p) (p)->RecheckIRQStatusCnt = 1 /* note: no need to lock this, it only gets set */
+
+A_STATUS DevSetup(AR6K_DEVICE *pDev);
+void DevCleanup(AR6K_DEVICE *pDev);
+A_STATUS DevUnmaskInterrupts(AR6K_DEVICE *pDev);
+A_STATUS DevMaskInterrupts(AR6K_DEVICE *pDev);
+A_STATUS DevPollMboxMsgRecv(AR6K_DEVICE *pDev,
+ A_UINT32 *pLookAhead,
+ int TimeoutMS);
+A_STATUS DevRWCompletionHandler(void *context, A_STATUS status);
+A_STATUS DevDsrHandler(void *context);
+A_STATUS DevCheckPendingRecvMsgsAsync(void *context);
+void DevAsyncIrqProcessComplete(AR6K_DEVICE *pDev);
+void DevDumpRegisters(AR6K_DEVICE *pDev,
+ AR6K_IRQ_PROC_REGISTERS *pIrqProcRegs,
+ AR6K_IRQ_ENABLE_REGISTERS *pIrqEnableRegs);
+
+#define DEV_STOP_RECV_ASYNC TRUE
+#define DEV_STOP_RECV_SYNC FALSE
+#define DEV_ENABLE_RECV_ASYNC TRUE
+#define DEV_ENABLE_RECV_SYNC FALSE
+A_STATUS DevStopRecv(AR6K_DEVICE *pDev, A_BOOL ASyncMode);
+A_STATUS DevEnableRecv(AR6K_DEVICE *pDev, A_BOOL ASyncMode);
+A_STATUS DevEnableInterrupts(AR6K_DEVICE *pDev);
+A_STATUS DevDisableInterrupts(AR6K_DEVICE *pDev);
+A_STATUS DevWaitForPendingRecv(AR6K_DEVICE *pDev,A_UINT32 TimeoutInMs,A_BOOL *pbIsRecvPending);
+
+#define DEV_CALC_RECV_PADDED_LEN(pDev, length) (((length) + (pDev)->BlockMask) & (~((pDev)->BlockMask)))
+#define DEV_CALC_SEND_PADDED_LEN(pDev, length) DEV_CALC_RECV_PADDED_LEN(pDev,length)
+#define DEV_IS_LEN_BLOCK_ALIGNED(pDev, length) (((length) % (pDev)->BlockSize) == 0)
+
+static INLINE A_STATUS DevSendPacket(AR6K_DEVICE *pDev, HTC_PACKET *pPacket, A_UINT32 SendLength) {
+ A_UINT32 paddedLength;
+ A_BOOL sync = (pPacket->Completion == NULL) ? TRUE : FALSE;
+ A_STATUS status;
+
+ /* adjust the length to be a multiple of block size if appropriate */
+ paddedLength = DEV_CALC_SEND_PADDED_LEN(pDev, SendLength);
+
+#if 0
+ if (paddedLength > pPacket->BufferLength) {
+ A_ASSERT(FALSE);
+ if (pPacket->Completion != NULL) {
+ COMPLETE_HTC_PACKET(pPacket,A_EINVAL);
+ return A_OK;
+ }
+ return A_EINVAL;
+ }
+#endif
+
+ AR_DEBUG_PRINTF(ATH_DEBUG_SEND,
+ ("DevSendPacket, Padded Length: %d Mbox:0x%X (mode:%s)\n",
+ paddedLength,
+ pDev->MailBoxInfo.MboxAddresses[HTC_MAILBOX],
+ sync ? "SYNC" : "ASYNC"));
+
+ status = HIFReadWrite(pDev->HIFDevice,
+ pDev->MailBoxInfo.MboxAddresses[HTC_MAILBOX],
+ pPacket->pBuffer,
+ paddedLength, /* the padded length */
+ sync ? HIF_WR_SYNC_BLOCK_INC : HIF_WR_ASYNC_BLOCK_INC,
+ sync ? NULL : pPacket); /* pass the packet as the context to the HIF request */
+
+ if (sync) {
+ pPacket->Status = status;
+ } else {
+ if (status == A_PENDING) {
+ status = A_OK;
+ }
+ }
+
+ return status;
+}
+
+static INLINE A_STATUS DevRecvPacket(AR6K_DEVICE *pDev, HTC_PACKET *pPacket, A_UINT32 RecvLength) {
+ A_UINT32 paddedLength;
+ A_STATUS status;
+ A_BOOL sync = (pPacket->Completion == NULL) ? TRUE : FALSE;
+
+ /* adjust the length to be a multiple of block size if appropriate */
+ paddedLength = DEV_CALC_RECV_PADDED_LEN(pDev, RecvLength);
+
+ if (paddedLength > pPacket->BufferLength) {
+ A_ASSERT(FALSE);
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERR,
+ ("DevRecvPacket, Not enough space for padlen:%d recvlen:%d bufferlen:%d \n",
+ paddedLength,RecvLength,pPacket->BufferLength));
+ if (pPacket->Completion != NULL) {
+ COMPLETE_HTC_PACKET(pPacket,A_EINVAL);
+ return A_OK;
+ }
+ return A_EINVAL;
+ }
+
+ AR_DEBUG_PRINTF(ATH_DEBUG_RECV,
+ ("DevRecvPacket (0x%lX : hdr:0x%X) Padded Length: %d Mbox:0x%X (mode:%s)\n",
+ (unsigned long)pPacket, pPacket->PktInfo.AsRx.ExpectedHdr,
+ paddedLength,
+ pDev->MailBoxInfo.MboxAddresses[HTC_MAILBOX],
+ sync ? "SYNC" : "ASYNC"));
+
+ status = HIFReadWrite(pDev->HIFDevice,
+ pDev->MailBoxInfo.MboxAddresses[HTC_MAILBOX],
+ pPacket->pBuffer,
+ paddedLength,
+ sync ? HIF_RD_SYNC_BLOCK_FIX : HIF_RD_ASYNC_BLOCK_FIX,
+ sync ? NULL : pPacket); /* pass the packet as the context to the HIF request */
+
+ if (sync) {
+ pPacket->Status = status;
+ }
+
+ return status;
+}
+
+#define DEV_CHECK_RECV_YIELD(pDev) \
+ ((pDev)->CurrentDSRRecvCount >= (pDev)->HifIRQYieldParams.RecvPacketYieldCount)
+
+#define IS_DEV_IRQ_PROC_SYNC_MODE(pDev) (HIF_DEVICE_IRQ_SYNC_ONLY == (pDev)->HifIRQProcessingMode)
+#define IS_DEV_IRQ_PROCESSING_ASYNC_ALLOWED(pDev) ((pDev)->HifIRQProcessingMode != HIF_DEVICE_IRQ_SYNC_ONLY)
+
+/**************************************************/
+/****** Scatter Function and Definitions
+ *
+ *
+ */
+
+A_STATUS DevCopyScatterListToFromDMABuffer(HIF_SCATTER_REQ *pReq, A_BOOL FromDMA);
+
+ /* copy any READ data back into scatter list */
+#define DEV_FINISH_SCATTER_OPERATION(pR) \
+ if (A_SUCCESS((pR)->CompletionStatus) && \
+ !((pR)->Request & HIF_WRITE) && \
+ ((pR)->ScatterMethod == HIF_SCATTER_DMA_BOUNCE)) { \
+ (pR)->CompletionStatus = DevCopyScatterListToFromDMABuffer((pR),FROM_DMA_BUFFER); \
+ }
+
+ /* copy any WRITE data to bounce buffer */
+static INLINE A_STATUS DEV_PREPARE_SCATTER_OPERATION(HIF_SCATTER_REQ *pReq) {
+ if ((pReq->Request & HIF_WRITE) && (pReq->ScatterMethod == HIF_SCATTER_DMA_BOUNCE)) {
+ return DevCopyScatterListToFromDMABuffer(pReq,TO_DMA_BUFFER);
+ } else {
+ return A_OK;
+ }
+}
+
+
+A_STATUS DevSetupMsgBundling(AR6K_DEVICE *pDev, int MaxMsgsPerTransfer);
+
+#define DEV_GET_MAX_MSG_PER_BUNDLE(pDev) (pDev)->HifScatterInfo.MaxScatterEntries
+#define DEV_GET_MAX_BUNDLE_LENGTH(pDev) (pDev)->HifScatterInfo.MaxTransferSizePerScatterReq
+#define DEV_ALLOC_SCATTER_REQ(pDev) \
+ (pDev)->HifScatterInfo.pAllocateReqFunc((pDev)->ScatterIsVirtual ? (pDev) : (pDev)->HIFDevice)
+
+#define DEV_FREE_SCATTER_REQ(pDev,pR) \
+ (pDev)->HifScatterInfo.pFreeReqFunc((pDev)->ScatterIsVirtual ? (pDev) : (pDev)->HIFDevice,(pR))
+
+#define DEV_GET_MAX_BUNDLE_RECV_LENGTH(pDev) (pDev)->MaxRecvBundleSize
+#define DEV_GET_MAX_BUNDLE_SEND_LENGTH(pDev) (pDev)->MaxSendBundleSize
+
+#define DEV_SCATTER_READ TRUE
+#define DEV_SCATTER_WRITE FALSE
+#define DEV_SCATTER_ASYNC TRUE
+#define DEV_SCATTER_SYNC FALSE
+A_STATUS DevSubmitScatterRequest(AR6K_DEVICE *pDev, HIF_SCATTER_REQ *pScatterReq, A_BOOL Read, A_BOOL Async);
+
+#ifdef MBOXHW_UNIT_TEST
+A_STATUS DoMboxHWTest(AR6K_DEVICE *pDev);
+#endif
+
+ /* completely virtual */
+typedef struct _DEV_SCATTER_DMA_VIRTUAL_INFO {
+ A_UINT8 *pVirtDmaBuffer; /* dma-able buffer - CPU accessible address */
+ A_UINT8 DataArea[1]; /* start of data area */
+} DEV_SCATTER_DMA_VIRTUAL_INFO;
+
+
+
+void DumpAR6KDevState(AR6K_DEVICE *pDev);
+
+/**************************************************/
+/****** GMBOX functions and definitions
+ *
+ *
+ */
+
+#ifdef ATH_AR6K_ENABLE_GMBOX
+
+void DevCleanupGMbox(AR6K_DEVICE *pDev);
+A_STATUS DevSetupGMbox(AR6K_DEVICE *pDev);
+A_STATUS DevCheckGMboxInterrupts(AR6K_DEVICE *pDev);
+void DevNotifyGMboxTargetFailure(AR6K_DEVICE *pDev);
+
+#else
+
+ /* compiled out */
+#define DevCleanupGMbox(p)
+#define DevCheckGMboxInterrupts(p) A_OK
+#define DevNotifyGMboxTargetFailure(p)
+
+static INLINE A_STATUS DevSetupGMbox(AR6K_DEVICE *pDev) {
+ pDev->GMboxEnabled = FALSE;
+ return A_OK;
+}
+
+#endif
+
+#ifdef ATH_AR6K_ENABLE_GMBOX
+
+ /* GMBOX protocol modules must expose each of these internal APIs */
+HCI_TRANSPORT_HANDLE GMboxAttachProtocol(AR6K_DEVICE *pDev, HCI_TRANSPORT_CONFIG_INFO *pInfo);
+A_STATUS GMboxProtocolInstall(AR6K_DEVICE *pDev);
+void GMboxProtocolUninstall(AR6K_DEVICE *pDev);
+
+ /* API used by GMBOX protocol modules */
+AR6K_DEVICE *HTCGetAR6KDevice(void *HTCHandle);
+#define DEV_GMBOX_SET_PROTOCOL(pDev,recv_callback,credits_pending,failure,statedump,context) \
+{ \
+ (pDev)->GMboxInfo.pProtocolContext = (context); \
+ (pDev)->GMboxInfo.pMessagePendingCallBack = (recv_callback); \
+ (pDev)->GMboxInfo.pCreditsPendingCallback = (credits_pending); \
+ (pDev)->GMboxInfo.pTargetFailureCallback = (failure); \
+ (pDev)->GMboxInfo.pStateDumpCallback = (statedump); \
+}
+
+#define DEV_GMBOX_GET_PROTOCOL(pDev) (pDev)->GMboxInfo.pProtocolContext
+
+A_STATUS DevGMboxWrite(AR6K_DEVICE *pDev, HTC_PACKET *pPacket, A_UINT32 WriteLength);
+A_STATUS DevGMboxRead(AR6K_DEVICE *pDev, HTC_PACKET *pPacket, A_UINT32 ReadLength);
+
+#define PROC_IO_ASYNC TRUE
+#define PROC_IO_SYNC FALSE
+typedef enum GMBOX_IRQ_ACTION_TYPE {
+ GMBOX_ACTION_NONE = 0,
+ GMBOX_DISABLE_ALL,
+ GMBOX_ERRORS_IRQ_ENABLE,
+ GMBOX_RECV_IRQ_ENABLE,
+ GMBOX_RECV_IRQ_DISABLE,
+ GMBOX_CREDIT_IRQ_ENABLE,
+ GMBOX_CREDIT_IRQ_DISABLE,
+} GMBOX_IRQ_ACTION_TYPE;
+
+A_STATUS DevGMboxIRQAction(AR6K_DEVICE *pDev, GMBOX_IRQ_ACTION_TYPE, A_BOOL AsyncMode);
+A_STATUS DevGMboxReadCreditCounter(AR6K_DEVICE *pDev, A_BOOL AsyncMode, int *pCredits);
+A_STATUS DevGMboxReadCreditSize(AR6K_DEVICE *pDev, int *pCreditSize);
+A_STATUS DevGMboxRecvLookAheadPeek(AR6K_DEVICE *pDev, A_UINT8 *pLookAheadBuffer, int *pLookAheadBytes);
+A_STATUS DevGMboxSetTargetInterrupt(AR6K_DEVICE *pDev, int SignalNumber, int AckTimeoutMS);
+
+#endif
+
+#endif /*AR6K_H_*/
diff --git a/drivers/staging/ath6kl/htc2/AR6000/ar6k_events.c b/drivers/staging/ath6kl/htc2/AR6000/ar6k_events.c
new file mode 100644
index 000000000000..920123b9ba1a
--- /dev/null
+++ b/drivers/staging/ath6kl/htc2/AR6000/ar6k_events.c
@@ -0,0 +1,784 @@
+//------------------------------------------------------------------------------
+// <copyright file="ar6k_events.c" company="Atheros">
+// Copyright (c) 2007-2010 Atheros Corporation. All rights reserved.
+//
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+//
+//
+//------------------------------------------------------------------------------
+//==============================================================================
+// AR6K Driver layer event handling (i.e. interrupts, message polling)
+//
+// Author(s): ="Atheros"
+//==============================================================================
+
+#include "a_config.h"
+#include "athdefs.h"
+#include "a_types.h"
+#include "AR6002/hw2.0/hw/mbox_host_reg.h"
+#include "a_osapi.h"
+#include "../htc_debug.h"
+#include "hif.h"
+#include "htc_packet.h"
+#include "ar6k.h"
+
+extern void AR6KFreeIOPacket(AR6K_DEVICE *pDev, HTC_PACKET *pPacket);
+extern HTC_PACKET *AR6KAllocIOPacket(AR6K_DEVICE *pDev);
+
+static A_STATUS DevServiceDebugInterrupt(AR6K_DEVICE *pDev);
+
+#define DELAY_PER_INTERVAL_MS 10 /* 10 MS delay per polling interval */
+
+/* completion routine for ALL HIF layer async I/O */
+A_STATUS DevRWCompletionHandler(void *context, A_STATUS status)
+{
+ HTC_PACKET *pPacket = (HTC_PACKET *)context;
+
+ AR_DEBUG_PRINTF(ATH_DEBUG_RECV,
+ ("+DevRWCompletionHandler (Pkt:0x%lX) , Status: %d \n",
+ (unsigned long)pPacket,
+ status));
+
+ COMPLETE_HTC_PACKET(pPacket,status);
+
+ AR_DEBUG_PRINTF(ATH_DEBUG_RECV,
+ ("-DevRWCompletionHandler\n"));
+
+ return A_OK;
+}
+
+/* mailbox recv message polling */
+A_STATUS DevPollMboxMsgRecv(AR6K_DEVICE *pDev,
+ A_UINT32 *pLookAhead,
+ int TimeoutMS)
+{
+ A_STATUS status = A_OK;
+ int timeout = TimeoutMS/DELAY_PER_INTERVAL_MS;
+
+ A_ASSERT(timeout > 0);
+
+ AR_DEBUG_PRINTF(ATH_DEBUG_RECV,("+DevPollMboxMsgRecv \n"));
+
+ while (TRUE) {
+
+ if (pDev->GetPendingEventsFunc != NULL) {
+
+ HIF_PENDING_EVENTS_INFO events;
+
+#ifdef THREAD_X
+ events.Polling =1;
+#endif
+
+ /* the HIF layer uses a special mechanism to get events, do this
+ * synchronously */
+ status = pDev->GetPendingEventsFunc(pDev->HIFDevice,
+ &events,
+ NULL);
+ if (A_FAILED(status))
+ {
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("Failed to get pending events \n"));
+ break;
+ }
+
+ if (events.Events & HIF_RECV_MSG_AVAIL)
+ {
+ /* there is a message available, the lookahead should be valid now */
+ *pLookAhead = events.LookAhead;
+
+ break;
+ }
+ } else {
+
+ /* this is the standard HIF way.... */
+ /* load the register table */
+ status = HIFReadWrite(pDev->HIFDevice,
+ HOST_INT_STATUS_ADDRESS,
+ (A_UINT8 *)&pDev->IrqProcRegisters,
+ AR6K_IRQ_PROC_REGS_SIZE,
+ HIF_RD_SYNC_BYTE_INC,
+ NULL);
+
+ if (A_FAILED(status)){
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("Failed to read register table \n"));
+ break;
+ }
+
+ /* check for MBOX data and valid lookahead */
+ if (pDev->IrqProcRegisters.host_int_status & (1 << HTC_MAILBOX)) {
+ if (pDev->IrqProcRegisters.rx_lookahead_valid & (1 << HTC_MAILBOX))
+ {
+ /* mailbox has a message and the look ahead is valid */
+ *pLookAhead = pDev->IrqProcRegisters.rx_lookahead[HTC_MAILBOX];
+ break;
+ }
+ }
+
+ }
+
+ timeout--;
+
+ if (timeout <= 0) {
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERR, (" Timeout waiting for recv message \n"));
+ status = A_ERROR;
+
+ /* check if the target asserted */
+ if ( pDev->IrqProcRegisters.counter_int_status & AR6K_TARGET_DEBUG_INTR_MASK) {
+ /* target signaled an assert, process this pending interrupt
+ * this will call the target failure handler */
+ DevServiceDebugInterrupt(pDev);
+ }
+
+ break;
+ }
+
+ /* delay a little */
+ A_MDELAY(DELAY_PER_INTERVAL_MS);
+ AR_DEBUG_PRINTF(ATH_DEBUG_RECV,(" Retry Mbox Poll : %d \n",timeout));
+ }
+
+ AR_DEBUG_PRINTF(ATH_DEBUG_RECV,("-DevPollMboxMsgRecv \n"));
+
+ return status;
+}
+
+static A_STATUS DevServiceCPUInterrupt(AR6K_DEVICE *pDev)
+{
+ A_STATUS status;
+ A_UINT8 cpu_int_status;
+ A_UINT8 regBuffer[4];
+
+ AR_DEBUG_PRINTF(ATH_DEBUG_IRQ, ("CPU Interrupt\n"));
+ cpu_int_status = pDev->IrqProcRegisters.cpu_int_status &
+ pDev->IrqEnableRegisters.cpu_int_status_enable;
+ A_ASSERT(cpu_int_status);
+ AR_DEBUG_PRINTF(ATH_DEBUG_IRQ,
+ ("Valid interrupt source(s) in CPU_INT_STATUS: 0x%x\n",
+ cpu_int_status));
+
+ /* Clear the interrupt */
+ pDev->IrqProcRegisters.cpu_int_status &= ~cpu_int_status; /* W1C */
+
+ /* set up the register transfer buffer to hit the register 4 times , this is done
+ * to make the access 4-byte aligned to mitigate issues with host bus interconnects that
+ * restrict bus transfer lengths to be a multiple of 4-bytes */
+
+ /* set W1C value to clear the interrupt, this hits the register first */
+ regBuffer[0] = cpu_int_status;
+ /* the remaining 4 values are set to zero which have no-effect */
+ regBuffer[1] = 0;
+ regBuffer[2] = 0;
+ regBuffer[3] = 0;
+
+ status = HIFReadWrite(pDev->HIFDevice,
+ CPU_INT_STATUS_ADDRESS,
+ regBuffer,
+ 4,
+ HIF_WR_SYNC_BYTE_FIX,
+ NULL);
+
+ A_ASSERT(status == A_OK);
+ return status;
+}
+
+
+static A_STATUS DevServiceErrorInterrupt(AR6K_DEVICE *pDev)
+{
+ A_STATUS status;
+ A_UINT8 error_int_status;
+ A_UINT8 regBuffer[4];
+
+ AR_DEBUG_PRINTF(ATH_DEBUG_IRQ, ("Error Interrupt\n"));
+ error_int_status = pDev->IrqProcRegisters.error_int_status & 0x0F;
+ A_ASSERT(error_int_status);
+ AR_DEBUG_PRINTF(ATH_DEBUG_IRQ,
+ ("Valid interrupt source(s) in ERROR_INT_STATUS: 0x%x\n",
+ error_int_status));
+
+ if (ERROR_INT_STATUS_WAKEUP_GET(error_int_status)) {
+ /* Wakeup */
+ AR_DEBUG_PRINTF(ATH_DEBUG_IRQ, ("Error : Wakeup\n"));
+ }
+
+ if (ERROR_INT_STATUS_RX_UNDERFLOW_GET(error_int_status)) {
+ /* Rx Underflow */
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("Error : Rx Underflow\n"));
+ }
+
+ if (ERROR_INT_STATUS_TX_OVERFLOW_GET(error_int_status)) {
+ /* Tx Overflow */
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("Error : Tx Overflow\n"));
+ }
+
+ /* Clear the interrupt */
+ pDev->IrqProcRegisters.error_int_status &= ~error_int_status; /* W1C */
+
+ /* set up the register transfer buffer to hit the register 4 times , this is done
+ * to make the access 4-byte aligned to mitigate issues with host bus interconnects that
+ * restrict bus transfer lengths to be a multiple of 4-bytes */
+
+ /* set W1C value to clear the interrupt, this hits the register first */
+ regBuffer[0] = error_int_status;
+ /* the remaining 4 values are set to zero which have no-effect */
+ regBuffer[1] = 0;
+ regBuffer[2] = 0;
+ regBuffer[3] = 0;
+
+ status = HIFReadWrite(pDev->HIFDevice,
+ ERROR_INT_STATUS_ADDRESS,
+ regBuffer,
+ 4,
+ HIF_WR_SYNC_BYTE_FIX,
+ NULL);
+
+ A_ASSERT(status == A_OK);
+ return status;
+}
+
+static A_STATUS DevServiceDebugInterrupt(AR6K_DEVICE *pDev)
+{
+ A_UINT32 dummy;
+ A_STATUS status;
+
+ /* Send a target failure event to the application */
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("Target debug interrupt\n"));
+
+ if (pDev->TargetFailureCallback != NULL) {
+ pDev->TargetFailureCallback(pDev->HTCContext);
+ }
+
+ if (pDev->GMboxEnabled) {
+ DevNotifyGMboxTargetFailure(pDev);
+ }
+
+ /* clear the interrupt , the debug error interrupt is
+ * counter 0 */
+ /* read counter to clear interrupt */
+ status = HIFReadWrite(pDev->HIFDevice,
+ COUNT_DEC_ADDRESS,
+ (A_UINT8 *)&dummy,
+ 4,
+ HIF_RD_SYNC_BYTE_INC,
+ NULL);
+
+ A_ASSERT(status == A_OK);
+ return status;
+}
+
+static A_STATUS DevServiceCounterInterrupt(AR6K_DEVICE *pDev)
+{
+ A_UINT8 counter_int_status;
+
+ AR_DEBUG_PRINTF(ATH_DEBUG_IRQ, ("Counter Interrupt\n"));
+
+ counter_int_status = pDev->IrqProcRegisters.counter_int_status &
+ pDev->IrqEnableRegisters.counter_int_status_enable;
+
+ AR_DEBUG_PRINTF(ATH_DEBUG_IRQ,
+ ("Valid interrupt source(s) in COUNTER_INT_STATUS: 0x%x\n",
+ counter_int_status));
+
+ /* Check if the debug interrupt is pending
+ * NOTE: other modules like GMBOX may use the counter interrupt for
+ * credit flow control on other counters, we only need to check for the debug assertion
+ * counter interrupt */
+ if (counter_int_status & AR6K_TARGET_DEBUG_INTR_MASK) {
+ return DevServiceDebugInterrupt(pDev);
+ }
+
+ return A_OK;
+}
+
+/* callback when our fetch to get interrupt status registers completes */
+static void DevGetEventAsyncHandler(void *Context, HTC_PACKET *pPacket)
+{
+ AR6K_DEVICE *pDev = (AR6K_DEVICE *)Context;
+ A_UINT32 lookAhead = 0;
+ A_BOOL otherInts = FALSE;
+
+ AR_DEBUG_PRINTF(ATH_DEBUG_IRQ,("+DevGetEventAsyncHandler: (dev: 0x%lX)\n", (unsigned long)pDev));
+
+ do {
+
+ if (A_FAILED(pPacket->Status)) {
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERR,
+ (" GetEvents I/O request failed, status:%d \n", pPacket->Status));
+ /* bail out, don't unmask HIF interrupt */
+ break;
+ }
+
+ if (pDev->GetPendingEventsFunc != NULL) {
+ /* the HIF layer collected the information for us */
+ HIF_PENDING_EVENTS_INFO *pEvents = (HIF_PENDING_EVENTS_INFO *)pPacket->pBuffer;
+ if (pEvents->Events & HIF_RECV_MSG_AVAIL) {
+ lookAhead = pEvents->LookAhead;
+ if (0 == lookAhead) {
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERR,(" DevGetEventAsyncHandler1, lookAhead is zero! \n"));
+ }
+ }
+ if (pEvents->Events & HIF_OTHER_EVENTS) {
+ otherInts = TRUE;
+ }
+ } else {
+ /* standard interrupt table handling.... */
+ AR6K_IRQ_PROC_REGISTERS *pReg = (AR6K_IRQ_PROC_REGISTERS *)pPacket->pBuffer;
+ A_UINT8 host_int_status;
+
+ host_int_status = pReg->host_int_status & pDev->IrqEnableRegisters.int_status_enable;
+
+ if (host_int_status & (1 << HTC_MAILBOX)) {
+ host_int_status &= ~(1 << HTC_MAILBOX);
+ if (pReg->rx_lookahead_valid & (1 << HTC_MAILBOX)) {
+ /* mailbox has a message and the look ahead is valid */
+ lookAhead = pReg->rx_lookahead[HTC_MAILBOX];
+ if (0 == lookAhead) {
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERR,(" DevGetEventAsyncHandler2, lookAhead is zero! \n"));
+ }
+ }
+ }
+
+ if (host_int_status) {
+ /* there are other interrupts to handle */
+ otherInts = TRUE;
+ }
+ }
+
+ if (otherInts || (lookAhead == 0)) {
+ /* if there are other interrupts to process, we cannot do this in the async handler so
+ * ack the interrupt which will cause our sync handler to run again
+ * if however there are no more messages, we can now ack the interrupt */
+ AR_DEBUG_PRINTF(ATH_DEBUG_IRQ,
+ (" Acking interrupt from DevGetEventAsyncHandler (otherints:%d, lookahead:0x%X)\n",
+ otherInts, lookAhead));
+ HIFAckInterrupt(pDev->HIFDevice);
+ } else {
+ int fetched = 0;
+ A_STATUS status;
+
+ AR_DEBUG_PRINTF(ATH_DEBUG_IRQ,
+ (" DevGetEventAsyncHandler : detected another message, lookahead :0x%X \n",
+ lookAhead));
+ /* lookahead is non-zero and there are no other interrupts to service,
+ * go get the next message */
+ status = pDev->MessagePendingCallback(pDev->HTCContext, &lookAhead, 1, NULL, &fetched);
+
+ if (A_SUCCESS(status) && !fetched) {
+ /* HTC layer could not pull out messages due to lack of resources, stop IRQ processing */
+ AR_DEBUG_PRINTF(ATH_DEBUG_IRQ,("MessagePendingCallback did not pull any messages, force-ack \n"));
+ DevAsyncIrqProcessComplete(pDev);
+ }
+ }
+
+ } while (FALSE);
+
+ /* free this IO packet */
+ AR6KFreeIOPacket(pDev,pPacket);
+ AR_DEBUG_PRINTF(ATH_DEBUG_IRQ,("-DevGetEventAsyncHandler \n"));
+}
+
+/* called by the HTC layer when it wants us to check if the device has any more pending
+ * recv messages, this starts off a series of async requests to read interrupt registers */
+A_STATUS DevCheckPendingRecvMsgsAsync(void *context)
+{
+ AR6K_DEVICE *pDev = (AR6K_DEVICE *)context;
+ A_STATUS status = A_OK;
+ HTC_PACKET *pIOPacket;
+
+ /* this is called in an ASYNC only context, we may NOT block, sleep or call any apis that can
+ * cause us to switch contexts */
+
+ AR_DEBUG_PRINTF(ATH_DEBUG_IRQ,("+DevCheckPendingRecvMsgsAsync: (dev: 0x%lX)\n", (unsigned long)pDev));
+
+ do {
+
+ if (HIF_DEVICE_IRQ_SYNC_ONLY == pDev->HifIRQProcessingMode) {
+ /* break the async processing chain right here, no need to continue.
+ * The DevDsrHandler() will handle things in a loop when things are driven
+ * synchronously */
+ break;
+ }
+
+ /* an optimization to bypass reading the IRQ status registers unecessarily which can re-wake
+ * the target, if upper layers determine that we are in a low-throughput mode, we can
+ * rely on taking another interrupt rather than re-checking the status registers which can
+ * re-wake the target */
+ if (pDev->RecheckIRQStatusCnt == 0) {
+ AR_DEBUG_PRINTF(ATH_DEBUG_IRQ,("Bypassing IRQ Status re-check, re-acking HIF interrupts\n"));
+ /* ack interrupt */
+ HIFAckInterrupt(pDev->HIFDevice);
+ break;
+ }
+
+ /* first allocate one of our HTC packets we created for async I/O
+ * we reuse HTC packet definitions so that we can use the completion mechanism
+ * in DevRWCompletionHandler() */
+ pIOPacket = AR6KAllocIOPacket(pDev);
+
+ if (NULL == pIOPacket) {
+ /* there should be only 1 asynchronous request out at a time to read these registers
+ * so this should actually never happen */
+ status = A_NO_MEMORY;
+ A_ASSERT(FALSE);
+ break;
+ }
+
+ /* stick in our completion routine when the I/O operation completes */
+ pIOPacket->Completion = DevGetEventAsyncHandler;
+ pIOPacket->pContext = pDev;
+
+ if (pDev->GetPendingEventsFunc) {
+ /* HIF layer has it's own mechanism, pass the IO to it.. */
+ status = pDev->GetPendingEventsFunc(pDev->HIFDevice,
+ (HIF_PENDING_EVENTS_INFO *)pIOPacket->pBuffer,
+ pIOPacket);
+
+ } else {
+ /* standard way, read the interrupt register table asynchronously again */
+ status = HIFReadWrite(pDev->HIFDevice,
+ HOST_INT_STATUS_ADDRESS,
+ pIOPacket->pBuffer,
+ AR6K_IRQ_PROC_REGS_SIZE,
+ HIF_RD_ASYNC_BYTE_INC,
+ pIOPacket);
+ }
+
+ AR_DEBUG_PRINTF(ATH_DEBUG_IRQ,(" Async IO issued to get interrupt status...\n"));
+ } while (FALSE);
+
+ AR_DEBUG_PRINTF(ATH_DEBUG_IRQ,("-DevCheckPendingRecvMsgsAsync \n"));
+
+ return status;
+}
+
+void DevAsyncIrqProcessComplete(AR6K_DEVICE *pDev)
+{
+ AR_DEBUG_PRINTF(ATH_DEBUG_IRQ,("DevAsyncIrqProcessComplete - forcing HIF IRQ ACK \n"));
+ HIFAckInterrupt(pDev->HIFDevice);
+}
+
+/* process pending interrupts synchronously */
+static A_STATUS ProcessPendingIRQs(AR6K_DEVICE *pDev, A_BOOL *pDone, A_BOOL *pASyncProcessing)
+{
+ A_STATUS status = A_OK;
+ A_UINT8 host_int_status = 0;
+ A_UINT32 lookAhead = 0;
+
+ AR_DEBUG_PRINTF(ATH_DEBUG_IRQ,("+ProcessPendingIRQs: (dev: 0x%lX)\n", (unsigned long)pDev));
+
+ /*** NOTE: the HIF implementation guarantees that the context of this call allows
+ * us to perform SYNCHRONOUS I/O, that is we can block, sleep or call any API that
+ * can block or switch thread/task ontexts.
+ * This is a fully schedulable context.
+ * */
+ do {
+
+ if (pDev->IrqEnableRegisters.int_status_enable == 0) {
+ /* interrupt enables have been cleared, do not try to process any pending interrupts that
+ * may result in more bus transactions. The target may be unresponsive at this
+ * point. */
+ break;
+ }
+
+ if (pDev->GetPendingEventsFunc != NULL) {
+ HIF_PENDING_EVENTS_INFO events;
+
+#ifdef THREAD_X
+ events.Polling= 0;
+#endif
+ /* the HIF layer uses a special mechanism to get events
+ * get this synchronously */
+ status = pDev->GetPendingEventsFunc(pDev->HIFDevice,
+ &events,
+ NULL);
+
+ if (A_FAILED(status)) {
+ break;
+ }
+
+ if (events.Events & HIF_RECV_MSG_AVAIL) {
+ lookAhead = events.LookAhead;
+ if (0 == lookAhead) {
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERR,(" ProcessPendingIRQs1 lookAhead is zero! \n"));
+ }
+ }
+
+ if (!(events.Events & HIF_OTHER_EVENTS) ||
+ !(pDev->IrqEnableRegisters.int_status_enable & OTHER_INTS_ENABLED)) {
+ /* no need to read the register table, no other interesting interrupts.
+ * Some interfaces (like SPI) can shadow interrupt sources without
+ * requiring the host to do a full table read */
+ break;
+ }
+
+ /* otherwise fall through and read the register table */
+ }
+
+ /*
+ * Read the first 28 bytes of the HTC register table. This will yield us
+ * the value of different int status registers and the lookahead
+ * registers.
+ * length = sizeof(int_status) + sizeof(cpu_int_status) +
+ * sizeof(error_int_status) + sizeof(counter_int_status) +
+ * sizeof(mbox_frame) + sizeof(rx_lookahead_valid) +
+ * sizeof(hole) + sizeof(rx_lookahead) +
+ * sizeof(int_status_enable) + sizeof(cpu_int_status_enable) +
+ * sizeof(error_status_enable) +
+ * sizeof(counter_int_status_enable);
+ *
+ */
+#ifdef CONFIG_MMC_SDHCI_S3C
+ pDev->IrqProcRegisters.host_int_status = 0;
+ pDev->IrqProcRegisters.rx_lookahead_valid = 0;
+ pDev->IrqProcRegisters.host_int_status2 = 0;
+ pDev->IrqProcRegisters.rx_lookahead[0] = 0;
+ pDev->IrqProcRegisters.rx_lookahead[1] = 0xaaa5555;
+#endif /* CONFIG_MMC_SDHCI_S3C */
+ status = HIFReadWrite(pDev->HIFDevice,
+ HOST_INT_STATUS_ADDRESS,
+ (A_UINT8 *)&pDev->IrqProcRegisters,
+ AR6K_IRQ_PROC_REGS_SIZE,
+ HIF_RD_SYNC_BYTE_INC,
+ NULL);
+
+ if (A_FAILED(status)) {
+ break;
+ }
+
+#ifdef ATH_DEBUG_MODULE
+ if (AR_DEBUG_LVL_CHECK(ATH_DEBUG_IRQ)) {
+ DevDumpRegisters(pDev,
+ &pDev->IrqProcRegisters,
+ &pDev->IrqEnableRegisters);
+ }
+#endif
+
+ /* Update only those registers that are enabled */
+ host_int_status = pDev->IrqProcRegisters.host_int_status &
+ pDev->IrqEnableRegisters.int_status_enable;
+
+ if (NULL == pDev->GetPendingEventsFunc) {
+ /* only look at mailbox status if the HIF layer did not provide this function,
+ * on some HIF interfaces reading the RX lookahead is not valid to do */
+ if (host_int_status & (1 << HTC_MAILBOX)) {
+ /* mask out pending mailbox value, we use "lookAhead" as the real flag for
+ * mailbox processing below */
+ host_int_status &= ~(1 << HTC_MAILBOX);
+ if (pDev->IrqProcRegisters.rx_lookahead_valid & (1 << HTC_MAILBOX)) {
+ /* mailbox has a message and the look ahead is valid */
+ lookAhead = pDev->IrqProcRegisters.rx_lookahead[HTC_MAILBOX];
+ if (0 == lookAhead) {
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERR,(" ProcessPendingIRQs2, lookAhead is zero! \n"));
+ }
+ }
+ }
+ } else {
+ /* not valid to check if the HIF has another mechanism for reading mailbox pending status*/
+ host_int_status &= ~(1 << HTC_MAILBOX);
+ }
+
+ if (pDev->GMboxEnabled) {
+ /*call GMBOX layer to process any interrupts of interest */
+ status = DevCheckGMboxInterrupts(pDev);
+ }
+
+ } while (FALSE);
+
+
+ do {
+
+ /* did the interrupt status fetches succeed? */
+ if (A_FAILED(status)) {
+ break;
+ }
+
+ if ((0 == host_int_status) && (0 == lookAhead)) {
+ /* nothing to process, the caller can use this to break out of a loop */
+ *pDone = TRUE;
+ break;
+ }
+
+ if (lookAhead != 0) {
+ int fetched = 0;
+
+ AR_DEBUG_PRINTF(ATH_DEBUG_IRQ,("Pending mailbox message, LookAhead: 0x%X\n",lookAhead));
+ /* Mailbox Interrupt, the HTC layer may issue async requests to empty the
+ * mailbox...
+ * When emptying the recv mailbox we use the async handler above called from the
+ * completion routine of the callers read request. This can improve performance
+ * by reducing context switching when we rapidly pull packets */
+ status = pDev->MessagePendingCallback(pDev->HTCContext, &lookAhead, 1, pASyncProcessing, &fetched);
+ if (A_FAILED(status)) {
+ break;
+ }
+
+ if (!fetched) {
+ /* HTC could not pull any messages out due to lack of resources */
+ /* force DSR handler to ack the interrupt */
+ *pASyncProcessing = FALSE;
+ pDev->RecheckIRQStatusCnt = 0;
+ }
+ }
+
+ /* now handle the rest of them */
+ AR_DEBUG_PRINTF(ATH_DEBUG_IRQ,
+ (" Valid interrupt source(s) for OTHER interrupts: 0x%x\n",
+ host_int_status));
+
+ if (HOST_INT_STATUS_CPU_GET(host_int_status)) {
+ /* CPU Interrupt */
+ status = DevServiceCPUInterrupt(pDev);
+ if (A_FAILED(status)){
+ break;
+ }
+ }
+
+ if (HOST_INT_STATUS_ERROR_GET(host_int_status)) {
+ /* Error Interrupt */
+ status = DevServiceErrorInterrupt(pDev);
+ if (A_FAILED(status)){
+ break;
+ }
+ }
+
+ if (HOST_INT_STATUS_COUNTER_GET(host_int_status)) {
+ /* Counter Interrupt */
+ status = DevServiceCounterInterrupt(pDev);
+ if (A_FAILED(status)){
+ break;
+ }
+ }
+
+ } while (FALSE);
+
+ /* an optimization to bypass reading the IRQ status registers unecessarily which can re-wake
+ * the target, if upper layers determine that we are in a low-throughput mode, we can
+ * rely on taking another interrupt rather than re-checking the status registers which can
+ * re-wake the target.
+ *
+ * NOTE : for host interfaces that use the special GetPendingEventsFunc, this optimization cannot
+ * be used due to possible side-effects. For example, SPI requires the host to drain all
+ * messages from the mailbox before exiting the ISR routine. */
+ if (!(*pASyncProcessing) && (pDev->RecheckIRQStatusCnt == 0) && (pDev->GetPendingEventsFunc == NULL)) {
+ AR_DEBUG_PRINTF(ATH_DEBUG_IRQ,("Bypassing IRQ Status re-check, forcing done \n"));
+ *pDone = TRUE;
+ }
+
+ AR_DEBUG_PRINTF(ATH_DEBUG_IRQ,("-ProcessPendingIRQs: (done:%d, async:%d) status=%d \n",
+ *pDone, *pASyncProcessing, status));
+
+ return status;
+}
+
+
+/* Synchronousinterrupt handler, this handler kicks off all interrupt processing.*/
+A_STATUS DevDsrHandler(void *context)
+{
+ AR6K_DEVICE *pDev = (AR6K_DEVICE *)context;
+ A_STATUS status = A_OK;
+ A_BOOL done = FALSE;
+ A_BOOL asyncProc = FALSE;
+
+ AR_DEBUG_PRINTF(ATH_DEBUG_IRQ,("+DevDsrHandler: (dev: 0x%lX)\n", (unsigned long)pDev));
+
+ /* reset the recv counter that tracks when we need to yield from the DSR */
+ pDev->CurrentDSRRecvCount = 0;
+ /* reset counter used to flag a re-scan of IRQ status registers on the target */
+ pDev->RecheckIRQStatusCnt = 0;
+
+ while (!done) {
+ status = ProcessPendingIRQs(pDev, &done, &asyncProc);
+ if (A_FAILED(status)) {
+ break;
+ }
+
+ if (HIF_DEVICE_IRQ_SYNC_ONLY == pDev->HifIRQProcessingMode) {
+ /* the HIF layer does not allow async IRQ processing, override the asyncProc flag */
+ asyncProc = FALSE;
+ /* this will cause us to re-enter ProcessPendingIRQ() and re-read interrupt status registers.
+ * this has a nice side effect of blocking us until all async read requests are completed.
+ * This behavior is required on some HIF implementations that do not allow ASYNC
+ * processing in interrupt handlers (like Windows CE) */
+
+ if (pDev->DSRCanYield && DEV_CHECK_RECV_YIELD(pDev)) {
+ /* ProcessPendingIRQs() pulled enough recv messages to satisfy the yield count, stop
+ * checking for more messages and return */
+ break;
+ }
+ }
+
+ if (asyncProc) {
+ /* the function performed some async I/O for performance, we
+ need to exit the ISR immediately, the check below will prevent the interrupt from being
+ Ack'd while we handle it asynchronously */
+ break;
+ }
+
+ }
+
+ if (A_SUCCESS(status) && !asyncProc) {
+ /* Ack the interrupt only if :
+ * 1. we did not get any errors in processing interrupts
+ * 2. there are no outstanding async processing requests */
+ if (pDev->DSRCanYield) {
+ /* if the DSR can yield do not ACK the interrupt, there could be more pending messages.
+ * The HIF layer must ACK the interrupt on behalf of HTC */
+ AR_DEBUG_PRINTF(ATH_DEBUG_IRQ,(" Yield in effect (cur RX count: %d) \n", pDev->CurrentDSRRecvCount));
+ } else {
+ AR_DEBUG_PRINTF(ATH_DEBUG_IRQ,(" Acking interrupt from DevDsrHandler \n"));
+ HIFAckInterrupt(pDev->HIFDevice);
+ }
+ }
+
+ AR_DEBUG_PRINTF(ATH_DEBUG_IRQ,("-DevDsrHandler \n"));
+ return status;
+}
+
+#ifdef ATH_DEBUG_MODULE
+void DumpAR6KDevState(AR6K_DEVICE *pDev)
+{
+ A_STATUS status;
+ AR6K_IRQ_ENABLE_REGISTERS regs;
+ AR6K_IRQ_PROC_REGISTERS procRegs;
+
+ LOCK_AR6K(pDev);
+ /* copy into our temp area */
+ A_MEMCPY(&regs,&pDev->IrqEnableRegisters,AR6K_IRQ_ENABLE_REGS_SIZE);
+ UNLOCK_AR6K(pDev);
+
+ /* load the register table from the device */
+ status = HIFReadWrite(pDev->HIFDevice,
+ HOST_INT_STATUS_ADDRESS,
+ (A_UINT8 *)&procRegs,
+ AR6K_IRQ_PROC_REGS_SIZE,
+ HIF_RD_SYNC_BYTE_INC,
+ NULL);
+
+ if (A_FAILED(status)) {
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERR,
+ ("DumpAR6KDevState : Failed to read register table (%d) \n",status));
+ return;
+ }
+
+ DevDumpRegisters(pDev,&procRegs,&regs);
+
+ if (pDev->GMboxInfo.pStateDumpCallback != NULL) {
+ pDev->GMboxInfo.pStateDumpCallback(pDev->GMboxInfo.pProtocolContext);
+ }
+
+ /* dump any bus state at the HIF layer */
+ HIFConfigureDevice(pDev->HIFDevice,HIF_DEVICE_DEBUG_BUS_STATE,NULL,0);
+
+}
+#endif
+
+
diff --git a/drivers/staging/ath6kl/htc2/AR6000/ar6k_gmbox.c b/drivers/staging/ath6kl/htc2/AR6000/ar6k_gmbox.c
new file mode 100644
index 000000000000..e3d270d1d626
--- /dev/null
+++ b/drivers/staging/ath6kl/htc2/AR6000/ar6k_gmbox.c
@@ -0,0 +1,756 @@
+//------------------------------------------------------------------------------
+// <copyright file="ar6k_gmbox.c" company="Atheros">
+// Copyright (c) 2007-2010 Atheros Corporation. All rights reserved.
+//
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+//
+//
+//------------------------------------------------------------------------------
+//==============================================================================
+// Generic MBOX API implementation
+//
+// Author(s): ="Atheros"
+//==============================================================================
+#include "a_config.h"
+#include "athdefs.h"
+#include "a_types.h"
+#include "a_osapi.h"
+#include "../htc_debug.h"
+#include "hif.h"
+#include "htc_packet.h"
+#include "ar6k.h"
+#include "hw/mbox_host_reg.h"
+#include "gmboxif.h"
+
+/*
+ * This file provides management functions and a toolbox for GMBOX protocol modules.
+ * Only one protocol module can be installed at a time. The determination of which protocol
+ * module is installed is determined at compile time.
+ *
+ */
+#ifdef ATH_AR6K_ENABLE_GMBOX
+ /* GMBOX definitions */
+#define GMBOX_INT_STATUS_ENABLE_REG 0x488
+#define GMBOX_INT_STATUS_RX_DATA (1 << 0)
+#define GMBOX_INT_STATUS_TX_OVERFLOW (1 << 1)
+#define GMBOX_INT_STATUS_RX_OVERFLOW (1 << 2)
+
+#define GMBOX_LOOKAHEAD_MUX_REG 0x498
+#define GMBOX_LA_MUX_OVERRIDE_2_3 (1 << 0)
+
+#define AR6K_GMBOX_CREDIT_DEC_ADDRESS (COUNT_DEC_ADDRESS + 4 * AR6K_GMBOX_CREDIT_COUNTER)
+#define AR6K_GMBOX_CREDIT_SIZE_ADDRESS (COUNT_ADDRESS + AR6K_GMBOX_CREDIT_SIZE_COUNTER)
+
+
+ /* external APIs for allocating and freeing internal I/O packets to handle ASYNC I/O */
+extern void AR6KFreeIOPacket(AR6K_DEVICE *pDev, HTC_PACKET *pPacket);
+extern HTC_PACKET *AR6KAllocIOPacket(AR6K_DEVICE *pDev);
+
+
+/* callback when our fetch to enable/disable completes */
+static void DevGMboxIRQActionAsyncHandler(void *Context, HTC_PACKET *pPacket)
+{
+ AR6K_DEVICE *pDev = (AR6K_DEVICE *)Context;
+
+ AR_DEBUG_PRINTF(ATH_DEBUG_IRQ,("+DevGMboxIRQActionAsyncHandler: (dev: 0x%lX)\n", (unsigned long)pDev));
+
+ if (A_FAILED(pPacket->Status)) {
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERR,
+ ("IRQAction Operation (%d) failed! status:%d \n", pPacket->PktInfo.AsRx.HTCRxFlags,pPacket->Status));
+ }
+ /* free this IO packet */
+ AR6KFreeIOPacket(pDev,pPacket);
+ AR_DEBUG_PRINTF(ATH_DEBUG_IRQ,("-DevGMboxIRQActionAsyncHandler \n"));
+}
+
+static A_STATUS DevGMboxCounterEnableDisable(AR6K_DEVICE *pDev, GMBOX_IRQ_ACTION_TYPE IrqAction, A_BOOL AsyncMode)
+{
+ A_STATUS status = A_OK;
+ AR6K_IRQ_ENABLE_REGISTERS regs;
+ HTC_PACKET *pIOPacket = NULL;
+
+ LOCK_AR6K(pDev);
+
+ if (GMBOX_CREDIT_IRQ_ENABLE == IrqAction) {
+ pDev->GMboxInfo.CreditCountIRQEnabled = TRUE;
+ pDev->IrqEnableRegisters.counter_int_status_enable |=
+ COUNTER_INT_STATUS_ENABLE_BIT_SET(1 << AR6K_GMBOX_CREDIT_COUNTER);
+ pDev->IrqEnableRegisters.int_status_enable |= INT_STATUS_ENABLE_COUNTER_SET(0x01);
+ } else {
+ pDev->GMboxInfo.CreditCountIRQEnabled = FALSE;
+ pDev->IrqEnableRegisters.counter_int_status_enable &=
+ ~(COUNTER_INT_STATUS_ENABLE_BIT_SET(1 << AR6K_GMBOX_CREDIT_COUNTER));
+ }
+ /* copy into our temp area */
+ A_MEMCPY(&regs,&pDev->IrqEnableRegisters,AR6K_IRQ_ENABLE_REGS_SIZE);
+
+ UNLOCK_AR6K(pDev);
+
+ do {
+
+ if (AsyncMode) {
+
+ pIOPacket = AR6KAllocIOPacket(pDev);
+
+ if (NULL == pIOPacket) {
+ status = A_NO_MEMORY;
+ A_ASSERT(FALSE);
+ break;
+ }
+
+ /* copy values to write to our async I/O buffer */
+ A_MEMCPY(pIOPacket->pBuffer,&pDev->IrqEnableRegisters,AR6K_IRQ_ENABLE_REGS_SIZE);
+
+ /* stick in our completion routine when the I/O operation completes */
+ pIOPacket->Completion = DevGMboxIRQActionAsyncHandler;
+ pIOPacket->pContext = pDev;
+ pIOPacket->PktInfo.AsRx.HTCRxFlags = IrqAction;
+ /* write it out asynchronously */
+ HIFReadWrite(pDev->HIFDevice,
+ INT_STATUS_ENABLE_ADDRESS,
+ pIOPacket->pBuffer,
+ AR6K_IRQ_ENABLE_REGS_SIZE,
+ HIF_WR_ASYNC_BYTE_INC,
+ pIOPacket);
+
+ pIOPacket = NULL;
+ break;
+ }
+
+ /* if we get here we are doing it synchronously */
+ status = HIFReadWrite(pDev->HIFDevice,
+ INT_STATUS_ENABLE_ADDRESS,
+ &regs.int_status_enable,
+ AR6K_IRQ_ENABLE_REGS_SIZE,
+ HIF_WR_SYNC_BYTE_INC,
+ NULL);
+ } while (FALSE);
+
+ if (A_FAILED(status)) {
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERR,
+ (" IRQAction Operation (%d) failed! status:%d \n", IrqAction, status));
+ } else {
+ if (!AsyncMode) {
+ AR_DEBUG_PRINTF(ATH_DEBUG_IRQ,
+ (" IRQAction Operation (%d) success \n", IrqAction));
+ }
+ }
+
+ if (pIOPacket != NULL) {
+ AR6KFreeIOPacket(pDev,pIOPacket);
+ }
+
+ return status;
+}
+
+
+A_STATUS DevGMboxIRQAction(AR6K_DEVICE *pDev, GMBOX_IRQ_ACTION_TYPE IrqAction, A_BOOL AsyncMode)
+{
+ A_STATUS status = A_OK;
+ HTC_PACKET *pIOPacket = NULL;
+ A_UINT8 GMboxIntControl[4];
+
+ if (GMBOX_CREDIT_IRQ_ENABLE == IrqAction) {
+ return DevGMboxCounterEnableDisable(pDev, GMBOX_CREDIT_IRQ_ENABLE, AsyncMode);
+ } else if(GMBOX_CREDIT_IRQ_DISABLE == IrqAction) {
+ return DevGMboxCounterEnableDisable(pDev, GMBOX_CREDIT_IRQ_DISABLE, AsyncMode);
+ }
+
+ if (GMBOX_DISABLE_ALL == IrqAction) {
+ /* disable credit IRQ, those are on a different set of registers */
+ DevGMboxCounterEnableDisable(pDev, GMBOX_CREDIT_IRQ_DISABLE, AsyncMode);
+ }
+
+ /* take the lock to protect interrupt enable shadows */
+ LOCK_AR6K(pDev);
+
+ switch (IrqAction) {
+
+ case GMBOX_DISABLE_ALL:
+ pDev->GMboxControlRegisters.int_status_enable = 0;
+ break;
+ case GMBOX_ERRORS_IRQ_ENABLE:
+ pDev->GMboxControlRegisters.int_status_enable |= GMBOX_INT_STATUS_TX_OVERFLOW |
+ GMBOX_INT_STATUS_RX_OVERFLOW;
+ break;
+ case GMBOX_RECV_IRQ_ENABLE:
+ pDev->GMboxControlRegisters.int_status_enable |= GMBOX_INT_STATUS_RX_DATA;
+ break;
+ case GMBOX_RECV_IRQ_DISABLE:
+ pDev->GMboxControlRegisters.int_status_enable &= ~GMBOX_INT_STATUS_RX_DATA;
+ break;
+ case GMBOX_ACTION_NONE:
+ default:
+ A_ASSERT(FALSE);
+ break;
+ }
+
+ GMboxIntControl[0] = pDev->GMboxControlRegisters.int_status_enable;
+ GMboxIntControl[1] = GMboxIntControl[0];
+ GMboxIntControl[2] = GMboxIntControl[0];
+ GMboxIntControl[3] = GMboxIntControl[0];
+
+ UNLOCK_AR6K(pDev);
+
+ do {
+
+ if (AsyncMode) {
+
+ pIOPacket = AR6KAllocIOPacket(pDev);
+
+ if (NULL == pIOPacket) {
+ status = A_NO_MEMORY;
+ A_ASSERT(FALSE);
+ break;
+ }
+
+ /* copy values to write to our async I/O buffer */
+ A_MEMCPY(pIOPacket->pBuffer,GMboxIntControl,sizeof(GMboxIntControl));
+
+ /* stick in our completion routine when the I/O operation completes */
+ pIOPacket->Completion = DevGMboxIRQActionAsyncHandler;
+ pIOPacket->pContext = pDev;
+ pIOPacket->PktInfo.AsRx.HTCRxFlags = IrqAction;
+ /* write it out asynchronously */
+ HIFReadWrite(pDev->HIFDevice,
+ GMBOX_INT_STATUS_ENABLE_REG,
+ pIOPacket->pBuffer,
+ sizeof(GMboxIntControl),
+ HIF_WR_ASYNC_BYTE_FIX,
+ pIOPacket);
+ pIOPacket = NULL;
+ break;
+ }
+
+ /* if we get here we are doing it synchronously */
+
+ status = HIFReadWrite(pDev->HIFDevice,
+ GMBOX_INT_STATUS_ENABLE_REG,
+ GMboxIntControl,
+ sizeof(GMboxIntControl),
+ HIF_WR_SYNC_BYTE_FIX,
+ NULL);
+
+ } while (FALSE);
+
+ if (A_FAILED(status)) {
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERR,
+ (" IRQAction Operation (%d) failed! status:%d \n", IrqAction, status));
+ } else {
+ if (!AsyncMode) {
+ AR_DEBUG_PRINTF(ATH_DEBUG_IRQ,
+ (" IRQAction Operation (%d) success \n", IrqAction));
+ }
+ }
+
+ if (pIOPacket != NULL) {
+ AR6KFreeIOPacket(pDev,pIOPacket);
+ }
+
+ return status;
+}
+
+void DevCleanupGMbox(AR6K_DEVICE *pDev)
+{
+ if (pDev->GMboxEnabled) {
+ pDev->GMboxEnabled = FALSE;
+ GMboxProtocolUninstall(pDev);
+ }
+}
+
+A_STATUS DevSetupGMbox(AR6K_DEVICE *pDev)
+{
+ A_STATUS status = A_OK;
+ A_UINT8 muxControl[4];
+
+ do {
+
+ if (0 == pDev->MailBoxInfo.GMboxAddress) {
+ break;
+ }
+
+ AR_DEBUG_PRINTF(ATH_DEBUG_ANY,(" GMBOX Advertised: Address:0x%X , size:%d \n",
+ pDev->MailBoxInfo.GMboxAddress, pDev->MailBoxInfo.GMboxSize));
+
+ status = DevGMboxIRQAction(pDev, GMBOX_DISABLE_ALL, PROC_IO_SYNC);
+
+ if (A_FAILED(status)) {
+ break;
+ }
+
+ /* write to mailbox look ahead mux control register, we want the
+ * GMBOX lookaheads to appear on lookaheads 2 and 3
+ * the register is 1-byte wide so we need to hit it 4 times to align the operation
+ * to 4-bytes */
+ muxControl[0] = GMBOX_LA_MUX_OVERRIDE_2_3;
+ muxControl[1] = GMBOX_LA_MUX_OVERRIDE_2_3;
+ muxControl[2] = GMBOX_LA_MUX_OVERRIDE_2_3;
+ muxControl[3] = GMBOX_LA_MUX_OVERRIDE_2_3;
+
+ status = HIFReadWrite(pDev->HIFDevice,
+ GMBOX_LOOKAHEAD_MUX_REG,
+ muxControl,
+ sizeof(muxControl),
+ HIF_WR_SYNC_BYTE_FIX, /* hit this register 4 times */
+ NULL);
+
+ if (A_FAILED(status)) {
+ break;
+ }
+
+ status = GMboxProtocolInstall(pDev);
+
+ if (A_FAILED(status)) {
+ break;
+ }
+
+ pDev->GMboxEnabled = TRUE;
+
+ } while (FALSE);
+
+ return status;
+}
+
+A_STATUS DevCheckGMboxInterrupts(AR6K_DEVICE *pDev)
+{
+ A_STATUS status = A_OK;
+ A_UINT8 counter_int_status;
+ int credits;
+ A_UINT8 host_int_status2;
+
+ AR_DEBUG_PRINTF(ATH_DEBUG_IRQ, ("+DevCheckGMboxInterrupts \n"));
+
+ /* the caller guarantees that this is a context that allows for blocking I/O */
+
+ do {
+
+ host_int_status2 = pDev->IrqProcRegisters.host_int_status2 &
+ pDev->GMboxControlRegisters.int_status_enable;
+
+ if (host_int_status2 & GMBOX_INT_STATUS_TX_OVERFLOW) {
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("GMBOX : TX Overflow \n"));
+ status = A_ECOMM;
+ }
+
+ if (host_int_status2 & GMBOX_INT_STATUS_RX_OVERFLOW) {
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("GMBOX : RX Overflow \n"));
+ status = A_ECOMM;
+ }
+
+ if (A_FAILED(status)) {
+ if (pDev->GMboxInfo.pTargetFailureCallback != NULL) {
+ pDev->GMboxInfo.pTargetFailureCallback(pDev->GMboxInfo.pProtocolContext, status);
+ }
+ break;
+ }
+
+ if (host_int_status2 & GMBOX_INT_STATUS_RX_DATA) {
+ if (pDev->IrqProcRegisters.gmbox_rx_avail > 0) {
+ A_ASSERT(pDev->GMboxInfo.pMessagePendingCallBack != NULL);
+ status = pDev->GMboxInfo.pMessagePendingCallBack(
+ pDev->GMboxInfo.pProtocolContext,
+ (A_UINT8 *)&pDev->IrqProcRegisters.rx_gmbox_lookahead_alias[0],
+ pDev->IrqProcRegisters.gmbox_rx_avail);
+ }
+ }
+
+ if (A_FAILED(status)) {
+ break;
+ }
+
+ counter_int_status = pDev->IrqProcRegisters.counter_int_status &
+ pDev->IrqEnableRegisters.counter_int_status_enable;
+
+ /* check if credit interrupt is pending */
+ if (counter_int_status & (COUNTER_INT_STATUS_ENABLE_BIT_SET(1 << AR6K_GMBOX_CREDIT_COUNTER))) {
+
+ /* do synchronous read */
+ status = DevGMboxReadCreditCounter(pDev, PROC_IO_SYNC, &credits);
+
+ if (A_FAILED(status)) {
+ break;
+ }
+
+ A_ASSERT(pDev->GMboxInfo.pCreditsPendingCallback != NULL);
+ status = pDev->GMboxInfo.pCreditsPendingCallback(pDev->GMboxInfo.pProtocolContext,
+ credits,
+ pDev->GMboxInfo.CreditCountIRQEnabled);
+ }
+
+ } while (FALSE);
+
+ AR_DEBUG_PRINTF(ATH_DEBUG_IRQ, ("-DevCheckGMboxInterrupts (%d) \n",status));
+
+ return status;
+}
+
+
+A_STATUS DevGMboxWrite(AR6K_DEVICE *pDev, HTC_PACKET *pPacket, A_UINT32 WriteLength)
+{
+ A_UINT32 paddedLength;
+ A_BOOL sync = (pPacket->Completion == NULL) ? TRUE : FALSE;
+ A_STATUS status;
+ A_UINT32 address;
+
+ /* adjust the length to be a multiple of block size if appropriate */
+ paddedLength = DEV_CALC_SEND_PADDED_LEN(pDev, WriteLength);
+
+ AR_DEBUG_PRINTF(ATH_DEBUG_SEND,
+ ("DevGMboxWrite, Padded Length: %d Mbox:0x%X (mode:%s)\n",
+ WriteLength,
+ pDev->MailBoxInfo.GMboxAddress,
+ sync ? "SYNC" : "ASYNC"));
+
+ /* last byte of packet has to hit the EOM marker */
+ address = pDev->MailBoxInfo.GMboxAddress + pDev->MailBoxInfo.GMboxSize - paddedLength;
+
+ status = HIFReadWrite(pDev->HIFDevice,
+ address,
+ pPacket->pBuffer,
+ paddedLength, /* the padded length */
+ sync ? HIF_WR_SYNC_BLOCK_INC : HIF_WR_ASYNC_BLOCK_INC,
+ sync ? NULL : pPacket); /* pass the packet as the context to the HIF request */
+
+ if (sync) {
+ pPacket->Status = status;
+ } else {
+ if (status == A_PENDING) {
+ status = A_OK;
+ }
+ }
+
+ return status;
+}
+
+A_STATUS DevGMboxRead(AR6K_DEVICE *pDev, HTC_PACKET *pPacket, A_UINT32 ReadLength)
+{
+
+ A_UINT32 paddedLength;
+ A_STATUS status;
+ A_BOOL sync = (pPacket->Completion == NULL) ? TRUE : FALSE;
+
+ /* adjust the length to be a multiple of block size if appropriate */
+ paddedLength = DEV_CALC_RECV_PADDED_LEN(pDev, ReadLength);
+
+ if (paddedLength > pPacket->BufferLength) {
+ A_ASSERT(FALSE);
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERR,
+ ("DevGMboxRead, Not enough space for padlen:%d recvlen:%d bufferlen:%d \n",
+ paddedLength,ReadLength,pPacket->BufferLength));
+ if (pPacket->Completion != NULL) {
+ COMPLETE_HTC_PACKET(pPacket,A_EINVAL);
+ return A_OK;
+ }
+ return A_EINVAL;
+ }
+
+ AR_DEBUG_PRINTF(ATH_DEBUG_RECV,
+ ("DevGMboxRead (0x%lX : hdr:0x%X) Padded Length: %d Mbox:0x%X (mode:%s)\n",
+ (unsigned long)pPacket, pPacket->PktInfo.AsRx.ExpectedHdr,
+ paddedLength,
+ pDev->MailBoxInfo.GMboxAddress,
+ sync ? "SYNC" : "ASYNC"));
+
+ status = HIFReadWrite(pDev->HIFDevice,
+ pDev->MailBoxInfo.GMboxAddress,
+ pPacket->pBuffer,
+ paddedLength,
+ sync ? HIF_RD_SYNC_BLOCK_FIX : HIF_RD_ASYNC_BLOCK_FIX,
+ sync ? NULL : pPacket); /* pass the packet as the context to the HIF request */
+
+ if (sync) {
+ pPacket->Status = status;
+ }
+
+ return status;
+}
+
+
+static int ProcessCreditCounterReadBuffer(A_UINT8 *pBuffer, int Length)
+{
+ int credits = 0;
+
+ /* theory of how this works:
+ * We read the credit decrement register multiple times on a byte-wide basis.
+ * The number of times (32) aligns the I/O operation to be a multiple of 4 bytes and provides a
+ * reasonable chance to acquire "all" pending credits in a single I/O operation.
+ *
+ * Once we obtain the filled buffer, we can walk through it looking for credit decrement transitions.
+ * Each non-zero byte represents a single credit decrement (which is a credit given back to the host)
+ * For example if the target provides 3 credits and added 4 more during the 32-byte read operation the following
+ * pattern "could" appear:
+ *
+ * 0x3 0x2 0x1 0x0 0x0 0x0 0x0 0x0 0x1 0x0 0x1 0x0 0x1 0x0 0x1 0x0 ......rest zeros
+ * <---------> <----------------------------->
+ * \_ credits aleady there \_ target adding 4 more credits
+ *
+ * The total available credits would be 7, since there are 7 non-zero bytes in the buffer.
+ *
+ * */
+
+ if (AR_DEBUG_LVL_CHECK(ATH_DEBUG_RECV)) {
+ DebugDumpBytes(pBuffer, Length, "GMBOX Credit read buffer");
+ }
+
+ while (Length) {
+ if (*pBuffer != 0) {
+ credits++;
+ }
+ Length--;
+ pBuffer++;
+ }
+
+ return credits;
+}
+
+
+/* callback when our fetch to enable/disable completes */
+static void DevGMboxReadCreditsAsyncHandler(void *Context, HTC_PACKET *pPacket)
+{
+ AR6K_DEVICE *pDev = (AR6K_DEVICE *)Context;
+
+ AR_DEBUG_PRINTF(ATH_DEBUG_IRQ,("+DevGMboxReadCreditsAsyncHandler: (dev: 0x%lX)\n", (unsigned long)pDev));
+
+ if (A_FAILED(pPacket->Status)) {
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERR,
+ ("Read Credit Operation failed! status:%d \n", pPacket->Status));
+ } else {
+ int credits = 0;
+ credits = ProcessCreditCounterReadBuffer(pPacket->pBuffer, AR6K_REG_IO_BUFFER_SIZE);
+ pDev->GMboxInfo.pCreditsPendingCallback(pDev->GMboxInfo.pProtocolContext,
+ credits,
+ pDev->GMboxInfo.CreditCountIRQEnabled);
+
+
+ }
+ /* free this IO packet */
+ AR6KFreeIOPacket(pDev,pPacket);
+ AR_DEBUG_PRINTF(ATH_DEBUG_IRQ,("-DevGMboxReadCreditsAsyncHandler \n"));
+}
+
+A_STATUS DevGMboxReadCreditCounter(AR6K_DEVICE *pDev, A_BOOL AsyncMode, int *pCredits)
+{
+ A_STATUS status = A_OK;
+ HTC_PACKET *pIOPacket = NULL;
+
+ AR_DEBUG_PRINTF(ATH_DEBUG_SEND,("+DevGMboxReadCreditCounter (%s) \n", AsyncMode ? "ASYNC" : "SYNC"));
+
+ do {
+
+ pIOPacket = AR6KAllocIOPacket(pDev);
+
+ if (NULL == pIOPacket) {
+ status = A_NO_MEMORY;
+ A_ASSERT(FALSE);
+ break;
+ }
+
+ A_MEMZERO(pIOPacket->pBuffer,AR6K_REG_IO_BUFFER_SIZE);
+
+ if (AsyncMode) {
+ /* stick in our completion routine when the I/O operation completes */
+ pIOPacket->Completion = DevGMboxReadCreditsAsyncHandler;
+ pIOPacket->pContext = pDev;
+ /* read registers asynchronously */
+ HIFReadWrite(pDev->HIFDevice,
+ AR6K_GMBOX_CREDIT_DEC_ADDRESS,
+ pIOPacket->pBuffer,
+ AR6K_REG_IO_BUFFER_SIZE, /* hit the register multiple times */
+ HIF_RD_ASYNC_BYTE_FIX,
+ pIOPacket);
+ pIOPacket = NULL;
+ break;
+ }
+
+ pIOPacket->Completion = NULL;
+ /* if we get here we are doing it synchronously */
+ status = HIFReadWrite(pDev->HIFDevice,
+ AR6K_GMBOX_CREDIT_DEC_ADDRESS,
+ pIOPacket->pBuffer,
+ AR6K_REG_IO_BUFFER_SIZE,
+ HIF_RD_SYNC_BYTE_FIX,
+ NULL);
+ } while (FALSE);
+
+ if (A_FAILED(status)) {
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERR,
+ (" DevGMboxReadCreditCounter failed! status:%d \n", status));
+ }
+
+ if (pIOPacket != NULL) {
+ if (A_SUCCESS(status)) {
+ /* sync mode processing */
+ *pCredits = ProcessCreditCounterReadBuffer(pIOPacket->pBuffer, AR6K_REG_IO_BUFFER_SIZE);
+ }
+ AR6KFreeIOPacket(pDev,pIOPacket);
+ }
+
+ AR_DEBUG_PRINTF(ATH_DEBUG_SEND,("-DevGMboxReadCreditCounter (%s) (%d) \n",
+ AsyncMode ? "ASYNC" : "SYNC", status));
+
+ return status;
+}
+
+A_STATUS DevGMboxReadCreditSize(AR6K_DEVICE *pDev, int *pCreditSize)
+{
+ A_STATUS status;
+ A_UINT8 buffer[4];
+
+ status = HIFReadWrite(pDev->HIFDevice,
+ AR6K_GMBOX_CREDIT_SIZE_ADDRESS,
+ buffer,
+ sizeof(buffer),
+ HIF_RD_SYNC_BYTE_FIX, /* hit the register 4 times to align the I/O */
+ NULL);
+
+ if (A_SUCCESS(status)) {
+ if (buffer[0] == 0) {
+ *pCreditSize = 256;
+ } else {
+ *pCreditSize = buffer[0];
+ }
+
+ }
+
+ return status;
+}
+
+void DevNotifyGMboxTargetFailure(AR6K_DEVICE *pDev)
+{
+ /* Target ASSERTED!!! */
+ if (pDev->GMboxInfo.pTargetFailureCallback != NULL) {
+ pDev->GMboxInfo.pTargetFailureCallback(pDev->GMboxInfo.pProtocolContext, A_HARDWARE);
+ }
+}
+
+A_STATUS DevGMboxRecvLookAheadPeek(AR6K_DEVICE *pDev, A_UINT8 *pLookAheadBuffer, int *pLookAheadBytes)
+{
+
+ A_STATUS status = A_OK;
+ AR6K_IRQ_PROC_REGISTERS procRegs;
+ int maxCopy;
+
+ do {
+ /* on entry the caller provides the length of the lookahead buffer */
+ if (*pLookAheadBytes > sizeof(procRegs.rx_gmbox_lookahead_alias)) {
+ A_ASSERT(FALSE);
+ status = A_EINVAL;
+ break;
+ }
+
+ maxCopy = *pLookAheadBytes;
+ *pLookAheadBytes = 0;
+ /* load the register table from the device */
+ status = HIFReadWrite(pDev->HIFDevice,
+ HOST_INT_STATUS_ADDRESS,
+ (A_UINT8 *)&procRegs,
+ AR6K_IRQ_PROC_REGS_SIZE,
+ HIF_RD_SYNC_BYTE_INC,
+ NULL);
+
+ if (A_FAILED(status)) {
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERR,
+ ("DevGMboxRecvLookAheadPeek : Failed to read register table (%d) \n",status));
+ break;
+ }
+
+ if (procRegs.gmbox_rx_avail > 0) {
+ int bytes = procRegs.gmbox_rx_avail > maxCopy ? maxCopy : procRegs.gmbox_rx_avail;
+ A_MEMCPY(pLookAheadBuffer,&procRegs.rx_gmbox_lookahead_alias[0],bytes);
+ *pLookAheadBytes = bytes;
+ }
+
+ } while (FALSE);
+
+ return status;
+}
+
+A_STATUS DevGMboxSetTargetInterrupt(AR6K_DEVICE *pDev, int Signal, int AckTimeoutMS)
+{
+ A_STATUS status = A_OK;
+ int i;
+ A_UINT8 buffer[4];
+
+ A_MEMZERO(buffer, sizeof(buffer));
+
+ do {
+
+ if (Signal >= MBOX_SIG_HCI_BRIDGE_MAX) {
+ status = A_EINVAL;
+ break;
+ }
+
+ /* set the last buffer to do the actual signal trigger */
+ buffer[3] = (1 << Signal);
+
+ status = HIFReadWrite(pDev->HIFDevice,
+ INT_WLAN_ADDRESS,
+ buffer,
+ sizeof(buffer),
+ HIF_WR_SYNC_BYTE_FIX, /* hit the register 4 times to align the I/O */
+ NULL);
+
+ if (A_FAILED(status)) {
+ break;
+ }
+
+ } while (FALSE);
+
+
+ if (A_SUCCESS(status)) {
+ /* now read back the register to see if the bit cleared */
+ while (AckTimeoutMS) {
+ status = HIFReadWrite(pDev->HIFDevice,
+ INT_WLAN_ADDRESS,
+ buffer,
+ sizeof(buffer),
+ HIF_RD_SYNC_BYTE_FIX,
+ NULL);
+
+ if (A_FAILED(status)) {
+ break;
+ }
+
+ for (i = 0; i < sizeof(buffer); i++) {
+ if (buffer[i] & (1 << Signal)) {
+ /* bit is still set */
+ break;
+ }
+ }
+
+ if (i >= sizeof(buffer)) {
+ /* done */
+ break;
+ }
+
+ AckTimeoutMS--;
+ A_MDELAY(1);
+ }
+
+ if (0 == AckTimeoutMS) {
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERR,
+ ("DevGMboxSetTargetInterrupt : Ack Timed-out (sig:%d) \n",Signal));
+ status = A_ERROR;
+ }
+ }
+
+ return status;
+
+}
+
+#endif //ATH_AR6K_ENABLE_GMBOX
+
+
+
+
diff --git a/drivers/staging/ath6kl/htc2/AR6000/ar6k_gmbox_hciuart.c b/drivers/staging/ath6kl/htc2/AR6000/ar6k_gmbox_hciuart.c
new file mode 100644
index 000000000000..db6d30c113b0
--- /dev/null
+++ b/drivers/staging/ath6kl/htc2/AR6000/ar6k_gmbox_hciuart.c
@@ -0,0 +1,1280 @@
+//------------------------------------------------------------------------------
+// <copyright file="ar6k_prot_hciUart.c" company="Atheros">
+// Copyright (c) 2007-2010 Atheros Corporation. All rights reserved.
+//
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+//
+//
+//------------------------------------------------------------------------------
+//==============================================================================
+// Protocol module for use in bridging HCI-UART packets over the GMBOX interface
+//
+// Author(s): ="Atheros"
+//==============================================================================
+#include "a_config.h"
+#include "athdefs.h"
+#include "a_types.h"
+#include "a_osapi.h"
+#include "../htc_debug.h"
+#include "hif.h"
+#include "htc_packet.h"
+#include "ar6k.h"
+#include "hci_transport_api.h"
+#include "gmboxif.h"
+#include "ar6000_diag.h"
+#include "hw/apb_map.h"
+#include "hw/mbox_reg.h"
+
+#ifdef ATH_AR6K_ENABLE_GMBOX
+#define HCI_UART_COMMAND_PKT 0x01
+#define HCI_UART_ACL_PKT 0x02
+#define HCI_UART_SCO_PKT 0x03
+#define HCI_UART_EVENT_PKT 0x04
+
+#define HCI_RECV_WAIT_BUFFERS (1 << 0)
+
+#define HCI_SEND_WAIT_CREDITS (1 << 0)
+
+#define HCI_UART_BRIDGE_CREDIT_SIZE 128
+
+#define CREDIT_POLL_COUNT 256
+
+#define HCI_DELAY_PER_INTERVAL_MS 10
+#define BTON_TIMEOUT_MS 500
+#define BTOFF_TIMEOUT_MS 500
+#define BAUD_TIMEOUT_MS 1
+#define BTPWRSAV_TIMEOUT_MS 1
+
+typedef struct {
+ HCI_TRANSPORT_CONFIG_INFO HCIConfig;
+ A_BOOL HCIAttached;
+ A_BOOL HCIStopped;
+ A_UINT32 RecvStateFlags;
+ A_UINT32 SendStateFlags;
+ HCI_TRANSPORT_PACKET_TYPE WaitBufferType;
+ HTC_PACKET_QUEUE SendQueue; /* write queue holding HCI Command and ACL packets */
+ HTC_PACKET_QUEUE HCIACLRecvBuffers; /* recv queue holding buffers for incomming ACL packets */
+ HTC_PACKET_QUEUE HCIEventBuffers; /* recv queue holding buffers for incomming event packets */
+ AR6K_DEVICE *pDev;
+ A_MUTEX_T HCIRxLock;
+ A_MUTEX_T HCITxLock;
+ int CreditsMax;
+ int CreditsConsumed;
+ int CreditsAvailable;
+ int CreditSize;
+ int CreditsCurrentSeek;
+ int SendProcessCount;
+} GMBOX_PROTO_HCI_UART;
+
+#define LOCK_HCI_RX(t) A_MUTEX_LOCK(&(t)->HCIRxLock);
+#define UNLOCK_HCI_RX(t) A_MUTEX_UNLOCK(&(t)->HCIRxLock);
+#define LOCK_HCI_TX(t) A_MUTEX_LOCK(&(t)->HCITxLock);
+#define UNLOCK_HCI_TX(t) A_MUTEX_UNLOCK(&(t)->HCITxLock);
+
+#define DO_HCI_RECV_INDICATION(p,pt) \
+{ AR_DEBUG_PRINTF(ATH_DEBUG_RECV,("HCI: Indicate Recv on packet:0x%lX status:%d len:%d type:%d \n", \
+ (unsigned long)(pt),(pt)->Status, A_SUCCESS((pt)->Status) ? (pt)->ActualLength : 0, HCI_GET_PACKET_TYPE(pt))); \
+ (p)->HCIConfig.pHCIPktRecv((p)->HCIConfig.pContext, (pt)); \
+}
+
+#define DO_HCI_SEND_INDICATION(p,pt) \
+{ AR_DEBUG_PRINTF(ATH_DEBUG_SEND,("HCI: Indicate Send on packet:0x%lX status:%d type:%d \n", \
+ (unsigned long)(pt),(pt)->Status,HCI_GET_PACKET_TYPE(pt))); \
+ (p)->HCIConfig.pHCISendComplete((p)->HCIConfig.pContext, (pt)); \
+}
+
+static A_STATUS HCITrySend(GMBOX_PROTO_HCI_UART *pProt, HTC_PACKET *pPacket, A_BOOL Synchronous);
+
+static void HCIUartCleanup(GMBOX_PROTO_HCI_UART *pProtocol)
+{
+ A_ASSERT(pProtocol != NULL);
+
+ A_MUTEX_DELETE(&pProtocol->HCIRxLock);
+ A_MUTEX_DELETE(&pProtocol->HCITxLock);
+
+ A_FREE(pProtocol);
+}
+
+static A_STATUS InitTxCreditState(GMBOX_PROTO_HCI_UART *pProt)
+{
+ A_STATUS status;
+ int credits;
+ int creditPollCount = CREDIT_POLL_COUNT;
+ A_BOOL gotCredits = FALSE;
+
+ pProt->CreditsConsumed = 0;
+
+ do {
+
+ if (pProt->CreditsMax != 0) {
+ /* we can only call this only once per target reset */
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("HCI: InitTxCreditState - already called! \n"));
+ A_ASSERT(FALSE);
+ status = A_EINVAL;
+ break;
+ }
+
+ /* read the credit counter. At startup the target will set the credit counter
+ * to the max available, we read this in a loop because it may take
+ * multiple credit counter reads to get all credits */
+
+ while (creditPollCount) {
+
+ credits = 0;
+
+ status = DevGMboxReadCreditCounter(pProt->pDev, PROC_IO_SYNC, &credits);
+
+ if (A_FAILED(status)) {
+ break;
+ }
+
+ if (!gotCredits && (0 == credits)) {
+ creditPollCount--;
+ AR_DEBUG_PRINTF(ATH_DEBUG_SEND,("HCI: credit is 0, retrying (%d) \n",creditPollCount));
+ A_MDELAY(HCI_DELAY_PER_INTERVAL_MS);
+ continue;
+ } else {
+ gotCredits = TRUE;
+ }
+
+ if (0 == credits) {
+ break;
+ }
+
+ pProt->CreditsMax += credits;
+ }
+
+ if (A_FAILED(status)) {
+ break;
+ }
+
+ if (0 == creditPollCount) {
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERR,
+ ("** HCI : Failed to get credits! GMBOX Target was not available \n"));
+ status = A_ERROR;
+ break;
+ }
+
+ /* now get the size */
+ status = DevGMboxReadCreditSize(pProt->pDev, &pProt->CreditSize);
+
+ if (A_FAILED(status)) {
+ break;
+ }
+
+ } while (FALSE);
+
+ if (A_SUCCESS(status)) {
+ pProt->CreditsAvailable = pProt->CreditsMax;
+ AR_DEBUG_PRINTF(ATH_DEBUG_ANY,("HCI : InitTxCreditState - credits avail: %d, size: %d \n",
+ pProt->CreditsAvailable, pProt->CreditSize));
+ }
+
+ return status;
+}
+
+static A_STATUS CreditsAvailableCallback(void *pContext, int Credits, A_BOOL CreditIRQEnabled)
+{
+ GMBOX_PROTO_HCI_UART *pProt = (GMBOX_PROTO_HCI_UART *)pContext;
+ A_BOOL enableCreditIrq = FALSE;
+ A_BOOL disableCreditIrq = FALSE;
+ A_BOOL doPendingSends = FALSE;
+ A_STATUS status = A_OK;
+
+ /** this callback is called under 2 conditions:
+ * 1. The credit IRQ interrupt was enabled and signaled.
+ * 2. A credit counter read completed.
+ *
+ * The function must not assume that the calling context can block !
+ */
+
+ AR_DEBUG_PRINTF(ATH_DEBUG_RECV,("+CreditsAvailableCallback (Credits:%d, IRQ:%s) \n",
+ Credits, CreditIRQEnabled ? "ON" : "OFF"));
+
+ LOCK_HCI_TX(pProt);
+
+ do {
+
+ if (0 == Credits) {
+ if (!CreditIRQEnabled) {
+ /* enable credit IRQ */
+ enableCreditIrq = TRUE;
+ }
+ break;
+ }
+
+ AR_DEBUG_PRINTF(ATH_DEBUG_SEND,("HCI: current credit state, consumed:%d available:%d max:%d seek:%d\n",
+ pProt->CreditsConsumed,
+ pProt->CreditsAvailable,
+ pProt->CreditsMax,
+ pProt->CreditsCurrentSeek));
+
+ pProt->CreditsAvailable += Credits;
+ A_ASSERT(pProt->CreditsAvailable <= pProt->CreditsMax);
+ pProt->CreditsConsumed -= Credits;
+ A_ASSERT(pProt->CreditsConsumed >= 0);
+
+ AR_DEBUG_PRINTF(ATH_DEBUG_SEND,("HCI: new credit state, consumed:%d available:%d max:%d seek:%d\n",
+ pProt->CreditsConsumed,
+ pProt->CreditsAvailable,
+ pProt->CreditsMax,
+ pProt->CreditsCurrentSeek));
+
+ if (pProt->CreditsAvailable >= pProt->CreditsCurrentSeek) {
+ /* we have enough credits to fullfill at least 1 packet waiting in the queue */
+ pProt->CreditsCurrentSeek = 0;
+ pProt->SendStateFlags &= ~HCI_SEND_WAIT_CREDITS;
+ doPendingSends = TRUE;
+ if (CreditIRQEnabled) {
+ /* credit IRQ was enabled, we shouldn't need it anymore */
+ disableCreditIrq = TRUE;
+ }
+ } else {
+ /* not enough credits yet, enable credit IRQ if we haven't already */
+ if (!CreditIRQEnabled) {
+ enableCreditIrq = TRUE;
+ }
+ }
+
+ } while (FALSE);
+
+ UNLOCK_HCI_TX(pProt);
+
+ if (enableCreditIrq) {
+ AR_DEBUG_PRINTF(ATH_DEBUG_RECV,(" Enabling credit count IRQ...\n"));
+ /* must use async only */
+ status = DevGMboxIRQAction(pProt->pDev, GMBOX_CREDIT_IRQ_ENABLE, PROC_IO_ASYNC);
+ } else if (disableCreditIrq) {
+ /* must use async only */
+ AR_DEBUG_PRINTF(ATH_DEBUG_RECV,(" Disabling credit count IRQ...\n"));
+ status = DevGMboxIRQAction(pProt->pDev, GMBOX_CREDIT_IRQ_DISABLE, PROC_IO_ASYNC);
+ }
+
+ if (doPendingSends) {
+ HCITrySend(pProt, NULL, FALSE);
+ }
+
+ AR_DEBUG_PRINTF(ATH_DEBUG_RECV,("+CreditsAvailableCallback \n"));
+ return status;
+}
+
+static INLINE void NotifyTransportFailure(GMBOX_PROTO_HCI_UART *pProt, A_STATUS status)
+{
+ if (pProt->HCIConfig.TransportFailure != NULL) {
+ pProt->HCIConfig.TransportFailure(pProt->HCIConfig.pContext, status);
+ }
+}
+
+static void FailureCallback(void *pContext, A_STATUS Status)
+{
+ GMBOX_PROTO_HCI_UART *pProt = (GMBOX_PROTO_HCI_UART *)pContext;
+
+ /* target assertion occured */
+ NotifyTransportFailure(pProt, Status);
+}
+
+static void StateDumpCallback(void *pContext)
+{
+ GMBOX_PROTO_HCI_UART *pProt = (GMBOX_PROTO_HCI_UART *)pContext;
+
+ AR_DEBUG_PRINTF(ATH_DEBUG_ANY,("============ HCIUart State ======================\n"));
+ AR_DEBUG_PRINTF(ATH_DEBUG_ANY,("RecvStateFlags : 0x%X \n",pProt->RecvStateFlags));
+ AR_DEBUG_PRINTF(ATH_DEBUG_ANY,("SendStateFlags : 0x%X \n",pProt->SendStateFlags));
+ AR_DEBUG_PRINTF(ATH_DEBUG_ANY,("WaitBufferType : %d \n",pProt->WaitBufferType));
+ AR_DEBUG_PRINTF(ATH_DEBUG_ANY,("SendQueue Depth : %d \n",HTC_PACKET_QUEUE_DEPTH(&pProt->SendQueue)));
+ AR_DEBUG_PRINTF(ATH_DEBUG_ANY,("CreditsMax : %d \n",pProt->CreditsMax));
+ AR_DEBUG_PRINTF(ATH_DEBUG_ANY,("CreditsConsumed : %d \n",pProt->CreditsConsumed));
+ AR_DEBUG_PRINTF(ATH_DEBUG_ANY,("CreditsAvailable : %d \n",pProt->CreditsAvailable));
+ AR_DEBUG_PRINTF(ATH_DEBUG_ANY,("==================================================\n"));
+}
+
+static A_STATUS HCIUartMessagePending(void *pContext, A_UINT8 LookAheadBytes[], int ValidBytes)
+{
+ GMBOX_PROTO_HCI_UART *pProt = (GMBOX_PROTO_HCI_UART *)pContext;
+ A_STATUS status = A_OK;
+ int totalRecvLength = 0;
+ HCI_TRANSPORT_PACKET_TYPE pktType = HCI_PACKET_INVALID;
+ A_BOOL recvRefillCalled = FALSE;
+ A_BOOL blockRecv = FALSE;
+ HTC_PACKET *pPacket = NULL;
+
+ /** caller guarantees that this is a fully block-able context (synch I/O is allowed) */
+
+ AR_DEBUG_PRINTF(ATH_DEBUG_RECV,("+HCIUartMessagePending Lookahead Bytes:%d \n",ValidBytes));
+
+ LOCK_HCI_RX(pProt);
+
+ do {
+
+ if (ValidBytes < 3) {
+ /* not enough for ACL or event header */
+ break;
+ }
+
+ if ((LookAheadBytes[0] == HCI_UART_ACL_PKT) && (ValidBytes < 5)) {
+ /* not enough for ACL data header */
+ break;
+ }
+
+ switch (LookAheadBytes[0]) {
+ case HCI_UART_EVENT_PKT:
+ AR_DEBUG_PRINTF(ATH_DEBUG_RECV,("HCI Event: %d param length: %d \n",
+ LookAheadBytes[1], LookAheadBytes[2]));
+ totalRecvLength = LookAheadBytes[2];
+ totalRecvLength += 3; /* add type + event code + length field */
+ pktType = HCI_EVENT_TYPE;
+ break;
+ case HCI_UART_ACL_PKT:
+ totalRecvLength = (LookAheadBytes[4] << 8) | LookAheadBytes[3];
+ AR_DEBUG_PRINTF(ATH_DEBUG_RECV,("HCI ACL: conn:0x%X length: %d \n",
+ ((LookAheadBytes[2] & 0xF0) << 8) | LookAheadBytes[1], totalRecvLength));
+ totalRecvLength += 5; /* add type + connection handle + length field */
+ pktType = HCI_ACL_TYPE;
+ break;
+ default:
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("**Invalid HCI packet type: %d \n",LookAheadBytes[0]));
+ status = A_EPROTO;
+ break;
+ }
+
+ if (A_FAILED(status)) {
+ break;
+ }
+
+ if (pProt->HCIConfig.pHCIPktRecvAlloc != NULL) {
+ UNLOCK_HCI_RX(pProt);
+ /* user is using a per-packet allocation callback */
+ pPacket = pProt->HCIConfig.pHCIPktRecvAlloc(pProt->HCIConfig.pContext,
+ pktType,
+ totalRecvLength);
+ LOCK_HCI_RX(pProt);
+
+ } else {
+ HTC_PACKET_QUEUE *pQueue;
+ /* user is using a refill handler that can refill multiple HTC buffers */
+
+ /* select buffer queue */
+ if (pktType == HCI_ACL_TYPE) {
+ pQueue = &pProt->HCIACLRecvBuffers;
+ } else {
+ pQueue = &pProt->HCIEventBuffers;
+ }
+
+ if (HTC_QUEUE_EMPTY(pQueue)) {
+ AR_DEBUG_PRINTF(ATH_DEBUG_RECV,
+ ("** HCI pkt type: %d has no buffers available calling allocation handler \n",
+ pktType));
+ /* check for refill handler */
+ if (pProt->HCIConfig.pHCIPktRecvRefill != NULL) {
+ recvRefillCalled = TRUE;
+ UNLOCK_HCI_RX(pProt);
+ /* call the re-fill handler */
+ pProt->HCIConfig.pHCIPktRecvRefill(pProt->HCIConfig.pContext,
+ pktType,
+ 0);
+ LOCK_HCI_RX(pProt);
+ /* check if we have more buffers */
+ pPacket = HTC_PACKET_DEQUEUE(pQueue);
+ /* fall through */
+ }
+ } else {
+ pPacket = HTC_PACKET_DEQUEUE(pQueue);
+ AR_DEBUG_PRINTF(ATH_DEBUG_RECV,
+ ("HCI pkt type: %d now has %d recv buffers left \n",
+ pktType, HTC_PACKET_QUEUE_DEPTH(pQueue)));
+ }
+ }
+
+ if (NULL == pPacket) {
+ AR_DEBUG_PRINTF(ATH_DEBUG_RECV,
+ ("** HCI pkt type: %d has no buffers available stopping recv...\n", pktType));
+ /* this is not an error, we simply need to mark that we are waiting for buffers.*/
+ pProt->RecvStateFlags |= HCI_RECV_WAIT_BUFFERS;
+ pProt->WaitBufferType = pktType;
+ blockRecv = TRUE;
+ break;
+ }
+
+ if (totalRecvLength > (int)pPacket->BufferLength) {
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("** HCI-UART pkt: %d requires %d bytes (%d buffer bytes avail) ! \n",
+ LookAheadBytes[0], totalRecvLength, pPacket->BufferLength));
+ status = A_EINVAL;
+ break;
+ }
+
+ } while (FALSE);
+
+ UNLOCK_HCI_RX(pProt);
+
+ /* locks are released, we can go fetch the packet */
+
+ do {
+
+ if (A_FAILED(status) || (NULL == pPacket)) {
+ break;
+ }
+
+ /* do this synchronously, we don't need to be fast here */
+ pPacket->Completion = NULL;
+
+ AR_DEBUG_PRINTF(ATH_DEBUG_RECV,("HCI : getting recv packet len:%d hci-uart-type: %s \n",
+ totalRecvLength, (LookAheadBytes[0] == HCI_UART_EVENT_PKT) ? "EVENT" : "ACL"));
+
+ status = DevGMboxRead(pProt->pDev, pPacket, totalRecvLength);
+
+ if (A_FAILED(status)) {
+ break;
+ }
+
+ if (pPacket->pBuffer[0] != LookAheadBytes[0]) {
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("** HCI buffer does not contain expected packet type: %d ! \n",
+ pPacket->pBuffer[0]));
+ status = A_EPROTO;
+ break;
+ }
+
+ if (pPacket->pBuffer[0] == HCI_UART_EVENT_PKT) {
+ /* validate event header fields */
+ if ((pPacket->pBuffer[1] != LookAheadBytes[1]) ||
+ (pPacket->pBuffer[2] != LookAheadBytes[2])) {
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("** HCI buffer does not match lookahead! \n"));
+ DebugDumpBytes(LookAheadBytes, 3, "Expected HCI-UART Header");
+ DebugDumpBytes(pPacket->pBuffer, 3, "** Bad HCI-UART Header");
+ status = A_EPROTO;
+ break;
+ }
+ } else if (pPacket->pBuffer[0] == HCI_UART_ACL_PKT) {
+ /* validate acl header fields */
+ if ((pPacket->pBuffer[1] != LookAheadBytes[1]) ||
+ (pPacket->pBuffer[2] != LookAheadBytes[2]) ||
+ (pPacket->pBuffer[3] != LookAheadBytes[3]) ||
+ (pPacket->pBuffer[4] != LookAheadBytes[4])) {
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("** HCI buffer does not match lookahead! \n"));
+ DebugDumpBytes(LookAheadBytes, 5, "Expected HCI-UART Header");
+ DebugDumpBytes(pPacket->pBuffer, 5, "** Bad HCI-UART Header");
+ status = A_EPROTO;
+ break;
+ }
+ }
+
+ /* adjust buffer to move past packet ID */
+ pPacket->pBuffer++;
+ pPacket->ActualLength = totalRecvLength - 1;
+ pPacket->Status = A_OK;
+ /* indicate packet */
+ DO_HCI_RECV_INDICATION(pProt,pPacket);
+ pPacket = NULL;
+
+ /* check if we need to refill recv buffers */
+ if ((pProt->HCIConfig.pHCIPktRecvRefill != NULL) && !recvRefillCalled) {
+ HTC_PACKET_QUEUE *pQueue;
+ int watermark;
+
+ if (pktType == HCI_ACL_TYPE) {
+ watermark = pProt->HCIConfig.ACLRecvBufferWaterMark;
+ pQueue = &pProt->HCIACLRecvBuffers;
+ } else {
+ watermark = pProt->HCIConfig.EventRecvBufferWaterMark;
+ pQueue = &pProt->HCIEventBuffers;
+ }
+
+ if (HTC_PACKET_QUEUE_DEPTH(pQueue) < watermark) {
+ AR_DEBUG_PRINTF(ATH_DEBUG_RECV,
+ ("** HCI pkt type: %d watermark hit (%d) current:%d \n",
+ pktType, watermark, HTC_PACKET_QUEUE_DEPTH(pQueue)));
+ /* call the re-fill handler */
+ pProt->HCIConfig.pHCIPktRecvRefill(pProt->HCIConfig.pContext,
+ pktType,
+ HTC_PACKET_QUEUE_DEPTH(pQueue));
+ }
+ }
+
+ } while (FALSE);
+
+ /* check if we need to disable the reciever */
+ if (A_FAILED(status) || blockRecv) {
+ DevGMboxIRQAction(pProt->pDev, GMBOX_RECV_IRQ_DISABLE, PROC_IO_SYNC);
+ }
+
+ /* see if we need to recycle the recv buffer */
+ if (A_FAILED(status) && (pPacket != NULL)) {
+ HTC_PACKET_QUEUE queue;
+
+ if (A_EPROTO == status) {
+ DebugDumpBytes(pPacket->pBuffer, totalRecvLength, "Bad HCI-UART Recv packet");
+ }
+ /* recycle packet */
+ HTC_PACKET_RESET_RX(pPacket);
+ INIT_HTC_PACKET_QUEUE_AND_ADD(&queue,pPacket);
+ HCI_TransportAddReceivePkts(pProt,&queue);
+ NotifyTransportFailure(pProt,status);
+ }
+
+
+ AR_DEBUG_PRINTF(ATH_DEBUG_RECV,("-HCIUartMessagePending \n"));
+
+ return status;
+}
+
+static void HCISendPacketCompletion(void *Context, HTC_PACKET *pPacket)
+{
+ GMBOX_PROTO_HCI_UART *pProt = (GMBOX_PROTO_HCI_UART *)Context;
+ AR_DEBUG_PRINTF(ATH_DEBUG_SEND,("+HCISendPacketCompletion (pPacket:0x%lX) \n",(unsigned long)pPacket));
+
+ if (A_FAILED(pPacket->Status)) {
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERR,(" Send Packet (0x%lX) failed: %d , len:%d \n",
+ (unsigned long)pPacket, pPacket->Status, pPacket->ActualLength));
+ }
+
+ DO_HCI_SEND_INDICATION(pProt,pPacket);
+
+ AR_DEBUG_PRINTF(ATH_DEBUG_SEND,("+HCISendPacketCompletion \n"));
+}
+
+static A_STATUS SeekCreditsSynch(GMBOX_PROTO_HCI_UART *pProt)
+{
+ A_STATUS status = A_OK;
+ int credits;
+ int retry = 100;
+
+ while (TRUE) {
+ credits = 0;
+ status = DevGMboxReadCreditCounter(pProt->pDev, PROC_IO_SYNC, &credits);
+ if (A_FAILED(status)) {
+ break;
+ }
+ LOCK_HCI_TX(pProt);
+ pProt->CreditsAvailable += credits;
+ pProt->CreditsConsumed -= credits;
+ if (pProt->CreditsAvailable >= pProt->CreditsCurrentSeek) {
+ pProt->CreditsCurrentSeek = 0;
+ UNLOCK_HCI_TX(pProt);
+ break;
+ }
+ UNLOCK_HCI_TX(pProt);
+ retry--;
+ if (0 == retry) {
+ status = A_EBUSY;
+ break;
+ }
+ A_MDELAY(20);
+ }
+
+ return status;
+}
+
+static A_STATUS HCITrySend(GMBOX_PROTO_HCI_UART *pProt, HTC_PACKET *pPacket, A_BOOL Synchronous)
+{
+ A_STATUS status = A_OK;
+ int transferLength;
+ int creditsRequired, remainder;
+ A_UINT8 hciUartType;
+ A_BOOL synchSendComplete = FALSE;
+
+ AR_DEBUG_PRINTF(ATH_DEBUG_SEND,("+HCITrySend (pPacket:0x%lX) %s \n",(unsigned long)pPacket,
+ Synchronous ? "SYNC" :"ASYNC"));
+
+ LOCK_HCI_TX(pProt);
+
+ /* increment write processing count on entry */
+ pProt->SendProcessCount++;
+
+ do {
+
+ if (pProt->HCIStopped) {
+ status = A_ECANCELED;
+ break;
+ }
+
+ if (pPacket != NULL) {
+ /* packet was supplied */
+ if (Synchronous) {
+ /* in synchronous mode, the send queue can only hold 1 packet */
+ if (!HTC_QUEUE_EMPTY(&pProt->SendQueue)) {
+ status = A_EBUSY;
+ A_ASSERT(FALSE);
+ break;
+ }
+
+ if (pProt->SendProcessCount > 1) {
+ /* another thread or task is draining the TX queues */
+ status = A_EBUSY;
+ A_ASSERT(FALSE);
+ break;
+ }
+
+ HTC_PACKET_ENQUEUE(&pProt->SendQueue,pPacket);
+
+ } else {
+ /* see if adding this packet hits the max depth (asynchronous mode only) */
+ if ((pProt->HCIConfig.MaxSendQueueDepth > 0) &&
+ ((HTC_PACKET_QUEUE_DEPTH(&pProt->SendQueue) + 1) >= pProt->HCIConfig.MaxSendQueueDepth)) {
+ AR_DEBUG_PRINTF(ATH_DEBUG_SEND, ("HCI Send queue is full, Depth:%d, Max:%d \n",
+ HTC_PACKET_QUEUE_DEPTH(&pProt->SendQueue),
+ pProt->HCIConfig.MaxSendQueueDepth));
+ /* queue will be full, invoke any callbacks to determine what action to take */
+ if (pProt->HCIConfig.pHCISendFull != NULL) {
+ AR_DEBUG_PRINTF(ATH_DEBUG_SEND,
+ ("HCI : Calling driver's send full callback.... \n"));
+ if (pProt->HCIConfig.pHCISendFull(pProt->HCIConfig.pContext,
+ pPacket) == HCI_SEND_FULL_DROP) {
+ /* drop it */
+ status = A_NO_RESOURCE;
+ break;
+ }
+ }
+ }
+
+ HTC_PACKET_ENQUEUE(&pProt->SendQueue,pPacket);
+ }
+
+ }
+
+ if (pProt->SendStateFlags & HCI_SEND_WAIT_CREDITS) {
+ break;
+ }
+
+ if (pProt->SendProcessCount > 1) {
+ /* another thread or task is draining the TX queues */
+ break;
+ }
+
+ /***** beyond this point only 1 thread may enter ******/
+
+ /* now drain the send queue for transmission as long as we have enough
+ * credits */
+ while (!HTC_QUEUE_EMPTY(&pProt->SendQueue)) {
+
+ pPacket = HTC_PACKET_DEQUEUE(&pProt->SendQueue);
+
+ switch (HCI_GET_PACKET_TYPE(pPacket)) {
+ case HCI_COMMAND_TYPE:
+ hciUartType = HCI_UART_COMMAND_PKT;
+ break;
+ case HCI_ACL_TYPE:
+ hciUartType = HCI_UART_ACL_PKT;
+ break;
+ default:
+ status = A_EINVAL;
+ A_ASSERT(FALSE);
+ break;
+ }
+
+ if (A_FAILED(status)) {
+ break;
+ }
+
+ AR_DEBUG_PRINTF(ATH_DEBUG_SEND,("HCI: Got head packet:0x%lX , Type:%d Length: %d Remaining Queue Depth: %d\n",
+ (unsigned long)pPacket, HCI_GET_PACKET_TYPE(pPacket), pPacket->ActualLength,
+ HTC_PACKET_QUEUE_DEPTH(&pProt->SendQueue)));
+
+ transferLength = 1; /* UART type header is 1 byte */
+ transferLength += pPacket->ActualLength;
+ transferLength = DEV_CALC_SEND_PADDED_LEN(pProt->pDev, transferLength);
+
+ /* figure out how many credits this message requires */
+ creditsRequired = transferLength / pProt->CreditSize;
+ remainder = transferLength % pProt->CreditSize;
+
+ if (remainder) {
+ creditsRequired++;
+ }
+
+ AR_DEBUG_PRINTF(ATH_DEBUG_SEND,("HCI: Creds Required:%d Got:%d\n",
+ creditsRequired, pProt->CreditsAvailable));
+
+ if (creditsRequired > pProt->CreditsAvailable) {
+ if (Synchronous) {
+ /* in synchronous mode we need to seek credits in synchronously */
+ pProt->CreditsCurrentSeek = creditsRequired;
+ UNLOCK_HCI_TX(pProt);
+ status = SeekCreditsSynch(pProt);
+ LOCK_HCI_TX(pProt);
+ if (A_FAILED(status)) {
+ break;
+ }
+ /* fall through and continue processing this send op */
+ } else {
+ /* not enough credits, queue back to the head */
+ HTC_PACKET_ENQUEUE_TO_HEAD(&pProt->SendQueue,pPacket);
+ /* waiting for credits */
+ pProt->SendStateFlags |= HCI_SEND_WAIT_CREDITS;
+ /* provide a hint to reduce attempts to re-send if credits are dribbling back
+ * this hint is the short fall of credits */
+ pProt->CreditsCurrentSeek = creditsRequired;
+ AR_DEBUG_PRINTF(ATH_DEBUG_SEND,("HCI: packet:0x%lX placed back in queue. head packet needs: %d credits \n",
+ (unsigned long)pPacket, pProt->CreditsCurrentSeek));
+ pPacket = NULL;
+ UNLOCK_HCI_TX(pProt);
+
+ /* schedule a credit counter read, our CreditsAvailableCallback callback will be called
+ * with the result */
+ DevGMboxReadCreditCounter(pProt->pDev, PROC_IO_ASYNC, NULL);
+
+ LOCK_HCI_TX(pProt);
+ break;
+ }
+ }
+
+ /* caller guarantees some head room */
+ pPacket->pBuffer--;
+ pPacket->pBuffer[0] = hciUartType;
+
+ pProt->CreditsAvailable -= creditsRequired;
+ pProt->CreditsConsumed += creditsRequired;
+ A_ASSERT(pProt->CreditsConsumed <= pProt->CreditsMax);
+
+ AR_DEBUG_PRINTF(ATH_DEBUG_SEND,("HCI: new credit state: consumed:%d available:%d max:%d\n",
+ pProt->CreditsConsumed, pProt->CreditsAvailable, pProt->CreditsMax));
+
+ UNLOCK_HCI_TX(pProt);
+
+ /* write it out */
+ if (Synchronous) {
+ pPacket->Completion = NULL;
+ pPacket->pContext = NULL;
+ } else {
+ pPacket->Completion = HCISendPacketCompletion;
+ pPacket->pContext = pProt;
+ }
+
+ status = DevGMboxWrite(pProt->pDev,pPacket,transferLength);
+ if (Synchronous) {
+ synchSendComplete = TRUE;
+ } else {
+ pPacket = NULL;
+ }
+
+ LOCK_HCI_TX(pProt);
+
+ }
+
+ } while (FALSE);
+
+ pProt->SendProcessCount--;
+ A_ASSERT(pProt->SendProcessCount >= 0);
+ UNLOCK_HCI_TX(pProt);
+
+ if (Synchronous) {
+ A_ASSERT(pPacket != NULL);
+ if (A_SUCCESS(status) && (!synchSendComplete)) {
+ status = A_EBUSY;
+ A_ASSERT(FALSE);
+ LOCK_HCI_TX(pProt);
+ if (pPacket->ListLink.pNext != NULL) {
+ /* remove from the queue */
+ HTC_PACKET_REMOVE(&pProt->SendQueue,pPacket);
+ }
+ UNLOCK_HCI_TX(pProt);
+ }
+ } else {
+ if (A_FAILED(status) && (pPacket != NULL)) {
+ pPacket->Status = status;
+ DO_HCI_SEND_INDICATION(pProt,pPacket);
+ }
+ }
+
+ AR_DEBUG_PRINTF(ATH_DEBUG_SEND,("-HCITrySend: \n"));
+ return status;
+}
+
+static void FlushSendQueue(GMBOX_PROTO_HCI_UART *pProt)
+{
+ HTC_PACKET *pPacket;
+ HTC_PACKET_QUEUE discardQueue;
+
+ INIT_HTC_PACKET_QUEUE(&discardQueue);
+
+ LOCK_HCI_TX(pProt);
+
+ if (!HTC_QUEUE_EMPTY(&pProt->SendQueue)) {
+ HTC_PACKET_QUEUE_TRANSFER_TO_TAIL(&discardQueue,&pProt->SendQueue);
+ }
+
+ UNLOCK_HCI_TX(pProt);
+
+ /* discard packets */
+ while (!HTC_QUEUE_EMPTY(&discardQueue)) {
+ pPacket = HTC_PACKET_DEQUEUE(&discardQueue);
+ pPacket->Status = A_ECANCELED;
+ DO_HCI_SEND_INDICATION(pProt,pPacket);
+ }
+
+}
+
+static void FlushRecvBuffers(GMBOX_PROTO_HCI_UART *pProt)
+{
+ HTC_PACKET_QUEUE discardQueue;
+ HTC_PACKET *pPacket;
+
+ INIT_HTC_PACKET_QUEUE(&discardQueue);
+
+ LOCK_HCI_RX(pProt);
+ /*transfer list items from ACL and event buffer queues to the discard queue */
+ if (!HTC_QUEUE_EMPTY(&pProt->HCIACLRecvBuffers)) {
+ HTC_PACKET_QUEUE_TRANSFER_TO_TAIL(&discardQueue,&pProt->HCIACLRecvBuffers);
+ }
+ if (!HTC_QUEUE_EMPTY(&pProt->HCIEventBuffers)) {
+ HTC_PACKET_QUEUE_TRANSFER_TO_TAIL(&discardQueue,&pProt->HCIEventBuffers);
+ }
+ UNLOCK_HCI_RX(pProt);
+
+ /* now empty the discard queue */
+ while (!HTC_QUEUE_EMPTY(&discardQueue)) {
+ pPacket = HTC_PACKET_DEQUEUE(&discardQueue);
+ pPacket->Status = A_ECANCELED;
+ DO_HCI_RECV_INDICATION(pProt,pPacket);
+ }
+
+}
+
+/*** protocol module install entry point ***/
+
+A_STATUS GMboxProtocolInstall(AR6K_DEVICE *pDev)
+{
+ A_STATUS status = A_OK;
+ GMBOX_PROTO_HCI_UART *pProtocol = NULL;
+
+ do {
+
+ pProtocol = A_MALLOC(sizeof(GMBOX_PROTO_HCI_UART));
+
+ if (NULL == pProtocol) {
+ status = A_NO_MEMORY;
+ break;
+ }
+
+ A_MEMZERO(pProtocol, sizeof(*pProtocol));
+ pProtocol->pDev = pDev;
+ INIT_HTC_PACKET_QUEUE(&pProtocol->SendQueue);
+ INIT_HTC_PACKET_QUEUE(&pProtocol->HCIACLRecvBuffers);
+ INIT_HTC_PACKET_QUEUE(&pProtocol->HCIEventBuffers);
+ A_MUTEX_INIT(&pProtocol->HCIRxLock);
+ A_MUTEX_INIT(&pProtocol->HCITxLock);
+
+ } while (FALSE);
+
+ if (A_SUCCESS(status)) {
+ LOCK_AR6K(pDev);
+ DEV_GMBOX_SET_PROTOCOL(pDev,
+ HCIUartMessagePending,
+ CreditsAvailableCallback,
+ FailureCallback,
+ StateDumpCallback,
+ pProtocol);
+ UNLOCK_AR6K(pDev);
+ } else {
+ if (pProtocol != NULL) {
+ HCIUartCleanup(pProtocol);
+ }
+ }
+
+ return status;
+}
+
+/*** protocol module uninstall entry point ***/
+void GMboxProtocolUninstall(AR6K_DEVICE *pDev)
+{
+ GMBOX_PROTO_HCI_UART *pProtocol = (GMBOX_PROTO_HCI_UART *)DEV_GMBOX_GET_PROTOCOL(pDev);
+
+ if (pProtocol != NULL) {
+
+ /* notify anyone attached */
+ if (pProtocol->HCIAttached) {
+ A_ASSERT(pProtocol->HCIConfig.TransportRemoved != NULL);
+ pProtocol->HCIConfig.TransportRemoved(pProtocol->HCIConfig.pContext);
+ pProtocol->HCIAttached = FALSE;
+ }
+
+ HCIUartCleanup(pProtocol);
+ DEV_GMBOX_SET_PROTOCOL(pDev,NULL,NULL,NULL,NULL,NULL);
+ }
+
+}
+
+static A_STATUS NotifyTransportReady(GMBOX_PROTO_HCI_UART *pProt)
+{
+ HCI_TRANSPORT_PROPERTIES props;
+ A_STATUS status = A_OK;
+
+ do {
+
+ A_MEMZERO(&props,sizeof(props));
+
+ /* HCI UART only needs one extra byte at the head to indicate the packet TYPE */
+ props.HeadRoom = 1;
+ props.TailRoom = 0;
+ props.IOBlockPad = pProt->pDev->BlockSize;
+ if (pProt->HCIAttached) {
+ AR_DEBUG_PRINTF(ATH_DEBUG_ANY,("HCI: notifying attached client to transport... \n"));
+ A_ASSERT(pProt->HCIConfig.TransportReady != NULL);
+ status = pProt->HCIConfig.TransportReady(pProt,
+ &props,
+ pProt->HCIConfig.pContext);
+ }
+
+ } while (FALSE);
+
+ return status;
+}
+
+/*********** HCI UART protocol implementation ************************************************/
+
+HCI_TRANSPORT_HANDLE HCI_TransportAttach(void *HTCHandle, HCI_TRANSPORT_CONFIG_INFO *pInfo)
+{
+ GMBOX_PROTO_HCI_UART *pProtocol = NULL;
+ AR6K_DEVICE *pDev;
+
+ AR_DEBUG_PRINTF(ATH_DEBUG_TRC,("+HCI_TransportAttach \n"));
+
+ pDev = HTCGetAR6KDevice(HTCHandle);
+
+ LOCK_AR6K(pDev);
+
+ do {
+
+ pProtocol = (GMBOX_PROTO_HCI_UART *)DEV_GMBOX_GET_PROTOCOL(pDev);
+
+ if (NULL == pProtocol) {
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("GMBOX protocol not installed! \n"));
+ break;
+ }
+
+ if (pProtocol->HCIAttached) {
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("GMBOX protocol already attached! \n"));
+ break;
+ }
+
+ A_MEMCPY(&pProtocol->HCIConfig, pInfo, sizeof(HCI_TRANSPORT_CONFIG_INFO));
+
+ A_ASSERT(pProtocol->HCIConfig.pHCIPktRecv != NULL);
+ A_ASSERT(pProtocol->HCIConfig.pHCISendComplete != NULL);
+
+ pProtocol->HCIAttached = TRUE;
+
+ } while (FALSE);
+
+ UNLOCK_AR6K(pDev);
+
+ if (pProtocol != NULL) {
+ /* TODO ... should we use a worker? */
+ NotifyTransportReady(pProtocol);
+ }
+
+ AR_DEBUG_PRINTF(ATH_DEBUG_TRC,("-HCI_TransportAttach (0x%lX) \n",(unsigned long)pProtocol));
+ return (HCI_TRANSPORT_HANDLE)pProtocol;
+}
+
+void HCI_TransportDetach(HCI_TRANSPORT_HANDLE HciTrans)
+{
+ GMBOX_PROTO_HCI_UART *pProtocol = (GMBOX_PROTO_HCI_UART *)HciTrans;
+ AR6K_DEVICE *pDev = pProtocol->pDev;
+
+ AR_DEBUG_PRINTF(ATH_DEBUG_TRC,("+HCI_TransportDetach \n"));
+
+ LOCK_AR6K(pDev);
+ if (!pProtocol->HCIAttached) {
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("GMBOX protocol not attached! \n"));
+ UNLOCK_AR6K(pDev);
+ return;
+ }
+ pProtocol->HCIAttached = FALSE;
+ UNLOCK_AR6K(pDev);
+
+ HCI_TransportStop(HciTrans);
+ AR_DEBUG_PRINTF(ATH_DEBUG_TRC,("-HCI_TransportAttach \n"));
+}
+
+A_STATUS HCI_TransportAddReceivePkts(HCI_TRANSPORT_HANDLE HciTrans, HTC_PACKET_QUEUE *pQueue)
+{
+ GMBOX_PROTO_HCI_UART *pProt = (GMBOX_PROTO_HCI_UART *)HciTrans;
+ A_STATUS status = A_OK;
+ A_BOOL unblockRecv = FALSE;
+ HTC_PACKET *pPacket;
+
+ AR_DEBUG_PRINTF(ATH_DEBUG_RECV,("+HCI_TransportAddReceivePkt \n"));
+
+ LOCK_HCI_RX(pProt);
+
+ do {
+
+ if (pProt->HCIStopped) {
+ status = A_ECANCELED;
+ break;
+ }
+
+ pPacket = HTC_GET_PKT_AT_HEAD(pQueue);
+
+ if (NULL == pPacket) {
+ status = A_EINVAL;
+ break;
+ }
+
+ AR_DEBUG_PRINTF(ATH_DEBUG_RECV,(" HCI recv packet added, type :%d, len:%d num:%d \n",
+ HCI_GET_PACKET_TYPE(pPacket), pPacket->BufferLength, HTC_PACKET_QUEUE_DEPTH(pQueue)));
+
+ if (HCI_GET_PACKET_TYPE(pPacket) == HCI_EVENT_TYPE) {
+ HTC_PACKET_QUEUE_TRANSFER_TO_TAIL(&pProt->HCIEventBuffers, pQueue);
+ } else if (HCI_GET_PACKET_TYPE(pPacket) == HCI_ACL_TYPE) {
+ HTC_PACKET_QUEUE_TRANSFER_TO_TAIL(&pProt->HCIACLRecvBuffers, pQueue);
+ } else {
+ status = A_EINVAL;
+ break;
+ }
+
+ if (pProt->RecvStateFlags & HCI_RECV_WAIT_BUFFERS) {
+ if (pProt->WaitBufferType == HCI_GET_PACKET_TYPE(pPacket)) {
+ AR_DEBUG_PRINTF(ATH_DEBUG_RECV,(" HCI recv was blocked on packet type :%d, unblocking.. \n",
+ pProt->WaitBufferType));
+ pProt->RecvStateFlags &= ~HCI_RECV_WAIT_BUFFERS;
+ pProt->WaitBufferType = HCI_PACKET_INVALID;
+ unblockRecv = TRUE;
+ }
+ }
+
+ } while (FALSE);
+
+ UNLOCK_HCI_RX(pProt);
+
+ if (A_FAILED(status)) {
+ while (!HTC_QUEUE_EMPTY(pQueue)) {
+ pPacket = HTC_PACKET_DEQUEUE(pQueue);
+ pPacket->Status = A_ECANCELED;
+ DO_HCI_RECV_INDICATION(pProt,pPacket);
+ }
+ }
+
+ if (unblockRecv) {
+ DevGMboxIRQAction(pProt->pDev, GMBOX_RECV_IRQ_ENABLE, PROC_IO_ASYNC);
+ }
+
+ AR_DEBUG_PRINTF(ATH_DEBUG_RECV,("-HCI_TransportAddReceivePkt \n"));
+
+ return A_OK;
+}
+
+A_STATUS HCI_TransportSendPkt(HCI_TRANSPORT_HANDLE HciTrans, HTC_PACKET *pPacket, A_BOOL Synchronous)
+{
+ GMBOX_PROTO_HCI_UART *pProt = (GMBOX_PROTO_HCI_UART *)HciTrans;
+
+ return HCITrySend(pProt,pPacket,Synchronous);
+}
+
+void HCI_TransportStop(HCI_TRANSPORT_HANDLE HciTrans)
+{
+ GMBOX_PROTO_HCI_UART *pProt = (GMBOX_PROTO_HCI_UART *)HciTrans;
+
+ AR_DEBUG_PRINTF(ATH_DEBUG_TRC,("+HCI_TransportStop \n"));
+
+ LOCK_AR6K(pProt->pDev);
+ if (pProt->HCIStopped) {
+ UNLOCK_AR6K(pProt->pDev);
+ AR_DEBUG_PRINTF(ATH_DEBUG_TRC,("-HCI_TransportStop \n"));
+ return;
+ }
+ pProt->HCIStopped = TRUE;
+ UNLOCK_AR6K(pProt->pDev);
+
+ /* disable interrupts */
+ DevGMboxIRQAction(pProt->pDev, GMBOX_DISABLE_ALL, PROC_IO_SYNC);
+ FlushSendQueue(pProt);
+ FlushRecvBuffers(pProt);
+
+ /* signal bridge side to power down BT */
+ DevGMboxSetTargetInterrupt(pProt->pDev, MBOX_SIG_HCI_BRIDGE_BT_OFF, BTOFF_TIMEOUT_MS);
+
+ AR_DEBUG_PRINTF(ATH_DEBUG_TRC,("-HCI_TransportStop \n"));
+}
+
+A_STATUS HCI_TransportStart(HCI_TRANSPORT_HANDLE HciTrans)
+{
+ A_STATUS status;
+ GMBOX_PROTO_HCI_UART *pProt = (GMBOX_PROTO_HCI_UART *)HciTrans;
+
+ AR_DEBUG_PRINTF(ATH_DEBUG_TRC,("+HCI_TransportStart \n"));
+
+ /* set stopped in case we have a problem in starting */
+ pProt->HCIStopped = TRUE;
+
+ do {
+
+ status = InitTxCreditState(pProt);
+
+ if (A_FAILED(status)) {
+ break;
+ }
+
+ status = DevGMboxIRQAction(pProt->pDev, GMBOX_ERRORS_IRQ_ENABLE, PROC_IO_SYNC);
+
+ if (A_FAILED(status)) {
+ break;
+ }
+ /* enable recv */
+ status = DevGMboxIRQAction(pProt->pDev, GMBOX_RECV_IRQ_ENABLE, PROC_IO_SYNC);
+
+ if (A_FAILED(status)) {
+ break;
+ }
+ /* signal bridge side to power up BT */
+ status = DevGMboxSetTargetInterrupt(pProt->pDev, MBOX_SIG_HCI_BRIDGE_BT_ON, BTON_TIMEOUT_MS);
+
+ if (A_FAILED(status)) {
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("HCI_TransportStart : Failed to trigger BT ON \n"));
+ break;
+ }
+
+ /* we made it */
+ pProt->HCIStopped = FALSE;
+
+ } while (FALSE);
+
+ AR_DEBUG_PRINTF(ATH_DEBUG_TRC,("-HCI_TransportStart \n"));
+
+ return status;
+}
+
+A_STATUS HCI_TransportEnableDisableAsyncRecv(HCI_TRANSPORT_HANDLE HciTrans, A_BOOL Enable)
+{
+ GMBOX_PROTO_HCI_UART *pProt = (GMBOX_PROTO_HCI_UART *)HciTrans;
+ return DevGMboxIRQAction(pProt->pDev,
+ Enable ? GMBOX_RECV_IRQ_ENABLE : GMBOX_RECV_IRQ_DISABLE,
+ PROC_IO_SYNC);
+
+}
+
+A_STATUS HCI_TransportRecvHCIEventSync(HCI_TRANSPORT_HANDLE HciTrans,
+ HTC_PACKET *pPacket,
+ int MaxPollMS)
+{
+ GMBOX_PROTO_HCI_UART *pProt = (GMBOX_PROTO_HCI_UART *)HciTrans;
+ A_STATUS status = A_OK;
+ A_UINT8 lookAhead[8];
+ int bytes;
+ int totalRecvLength;
+
+ MaxPollMS = MaxPollMS / 16;
+
+ if (MaxPollMS < 2) {
+ MaxPollMS = 2;
+ }
+
+ while (MaxPollMS) {
+
+ bytes = sizeof(lookAhead);
+ status = DevGMboxRecvLookAheadPeek(pProt->pDev,lookAhead,&bytes);
+ if (A_FAILED(status)) {
+ break;
+ }
+
+ if (bytes < 3) {
+ AR_DEBUG_PRINTF(ATH_DEBUG_RECV,("HCI recv poll got bytes: %d, retry : %d \n",
+ bytes, MaxPollMS));
+ A_MDELAY(16);
+ MaxPollMS--;
+ continue;
+ }
+
+ totalRecvLength = 0;
+ switch (lookAhead[0]) {
+ case HCI_UART_EVENT_PKT:
+ AR_DEBUG_PRINTF(ATH_DEBUG_RECV,("HCI Event: %d param length: %d \n",
+ lookAhead[1], lookAhead[2]));
+ totalRecvLength = lookAhead[2];
+ totalRecvLength += 3; /* add type + event code + length field */
+ break;
+ default:
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("**Invalid HCI packet type: %d \n",lookAhead[0]));
+ status = A_EPROTO;
+ break;
+ }
+
+ if (A_FAILED(status)) {
+ break;
+ }
+
+ pPacket->Completion = NULL;
+ status = DevGMboxRead(pProt->pDev,pPacket,totalRecvLength);
+ if (A_FAILED(status)) {
+ break;
+ }
+
+ pPacket->pBuffer++;
+ pPacket->ActualLength = totalRecvLength - 1;
+ pPacket->Status = A_OK;
+ break;
+ }
+
+ if (MaxPollMS == 0) {
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("HCI recv poll timeout! \n"));
+ status = A_ERROR;
+ }
+
+ return status;
+}
+
+#define LSB_SCRATCH_IDX 4
+#define MSB_SCRATCH_IDX 5
+A_STATUS HCI_TransportSetBaudRate(HCI_TRANSPORT_HANDLE HciTrans, A_UINT32 Baud)
+{
+ GMBOX_PROTO_HCI_UART *pProt = (GMBOX_PROTO_HCI_UART *)HciTrans;
+ HIF_DEVICE *pHIFDevice = (HIF_DEVICE *)(pProt->pDev->HIFDevice);
+ A_UINT32 scaledBaud, scratchAddr;
+ A_STATUS status = A_OK;
+
+ /* Divide the desired baud rate by 100
+ * Store the LSB in the local scratch register 4 and the MSB in the local
+ * scratch register 5 for the target to read
+ */
+ scratchAddr = MBOX_BASE_ADDRESS | (LOCAL_SCRATCH_ADDRESS + 4 * LSB_SCRATCH_IDX);
+ scaledBaud = (Baud / 100) & LOCAL_SCRATCH_VALUE_MASK;
+ status = ar6000_WriteRegDiag(pHIFDevice, &scratchAddr, &scaledBaud);
+ scratchAddr = MBOX_BASE_ADDRESS | (LOCAL_SCRATCH_ADDRESS + 4 * MSB_SCRATCH_IDX);
+ scaledBaud = ((Baud / 100) >> (LOCAL_SCRATCH_VALUE_MSB+1)) & LOCAL_SCRATCH_VALUE_MASK;
+ status |= ar6000_WriteRegDiag(pHIFDevice, &scratchAddr, &scaledBaud);
+ if (A_OK != status) {
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("Failed to set up baud rate in scratch register!"));
+ return status;
+ }
+
+ /* Now interrupt the target to tell it about the baud rate */
+ status = DevGMboxSetTargetInterrupt(pProt->pDev, MBOX_SIG_HCI_BRIDGE_BAUD_SET, BAUD_TIMEOUT_MS);
+ if (A_OK != status) {
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("Failed to tell target to change baud rate!"));
+ }
+
+ return status;
+}
+
+A_STATUS HCI_TransportEnablePowerMgmt(HCI_TRANSPORT_HANDLE HciTrans, A_BOOL Enable)
+{
+ A_STATUS status;
+ GMBOX_PROTO_HCI_UART *pProt = (GMBOX_PROTO_HCI_UART *)HciTrans;
+
+ if (Enable) {
+ status = DevGMboxSetTargetInterrupt(pProt->pDev, MBOX_SIG_HCI_BRIDGE_PWR_SAV_ON, BTPWRSAV_TIMEOUT_MS);
+ } else {
+ status = DevGMboxSetTargetInterrupt(pProt->pDev, MBOX_SIG_HCI_BRIDGE_PWR_SAV_OFF, BTPWRSAV_TIMEOUT_MS);
+ }
+
+ if (A_FAILED(status)) {
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("Failed to enable/disable HCI power management!\n"));
+ } else {
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("HCI power management enabled/disabled!\n"));
+ }
+
+ return status;
+}
+
+#endif //ATH_AR6K_ENABLE_GMBOX
+
diff --git a/drivers/staging/ath6kl/htc2/htc.c b/drivers/staging/ath6kl/htc2/htc.c
new file mode 100644
index 000000000000..7df62a20d482
--- /dev/null
+++ b/drivers/staging/ath6kl/htc2/htc.c
@@ -0,0 +1,579 @@
+//------------------------------------------------------------------------------
+// <copyright file="htc.c" company="Atheros">
+// Copyright (c) 2007-2010 Atheros Corporation. All rights reserved.
+//
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+//
+//
+//------------------------------------------------------------------------------
+//==============================================================================
+// Author(s): ="Atheros"
+//==============================================================================
+#include "htc_internal.h"
+
+#ifdef ATH_DEBUG_MODULE
+static ATH_DEBUG_MASK_DESCRIPTION g_HTCDebugDescription[] = {
+ { ATH_DEBUG_SEND , "Send"},
+ { ATH_DEBUG_RECV , "Recv"},
+ { ATH_DEBUG_SYNC , "Sync"},
+ { ATH_DEBUG_DUMP , "Dump Data (RX or TX)"},
+ { ATH_DEBUG_IRQ , "Interrupt Processing"}
+};
+
+ATH_DEBUG_INSTANTIATE_MODULE_VAR(htc,
+ "htc",
+ "Host Target Communications",
+ ATH_DEBUG_MASK_DEFAULTS,
+ ATH_DEBUG_DESCRIPTION_COUNT(g_HTCDebugDescription),
+ g_HTCDebugDescription);
+
+#endif
+
+static void HTCReportFailure(void *Context);
+static void ResetEndpointStates(HTC_TARGET *target);
+
+void HTCFreeControlBuffer(HTC_TARGET *target, HTC_PACKET *pPacket, HTC_PACKET_QUEUE *pList)
+{
+ LOCK_HTC(target);
+ HTC_PACKET_ENQUEUE(pList,pPacket);
+ UNLOCK_HTC(target);
+}
+
+HTC_PACKET *HTCAllocControlBuffer(HTC_TARGET *target, HTC_PACKET_QUEUE *pList)
+{
+ HTC_PACKET *pPacket;
+
+ LOCK_HTC(target);
+ pPacket = HTC_PACKET_DEQUEUE(pList);
+ UNLOCK_HTC(target);
+
+ return pPacket;
+}
+
+/* cleanup the HTC instance */
+static void HTCCleanup(HTC_TARGET *target)
+{
+ A_INT32 i;
+
+ DevCleanup(&target->Device);
+
+ for (i = 0;i < NUM_CONTROL_BUFFERS;i++) {
+ if (target->HTCControlBuffers[i].Buffer) {
+ A_FREE(target->HTCControlBuffers[i].Buffer);
+ }
+ }
+
+ if (A_IS_MUTEX_VALID(&target->HTCLock)) {
+ A_MUTEX_DELETE(&target->HTCLock);
+ }
+
+ if (A_IS_MUTEX_VALID(&target->HTCRxLock)) {
+ A_MUTEX_DELETE(&target->HTCRxLock);
+ }
+
+ if (A_IS_MUTEX_VALID(&target->HTCTxLock)) {
+ A_MUTEX_DELETE(&target->HTCTxLock);
+ }
+ /* free our instance */
+ A_FREE(target);
+}
+
+/* registered target arrival callback from the HIF layer */
+HTC_HANDLE HTCCreate(void *hif_handle, HTC_INIT_INFO *pInfo)
+{
+ HTC_TARGET *target = NULL;
+ A_STATUS status = A_OK;
+ int i;
+ A_UINT32 ctrl_bufsz;
+ A_UINT32 blocksizes[HTC_MAILBOX_NUM_MAX];
+
+ AR_DEBUG_PRINTF(ATH_DEBUG_TRC, ("HTCCreate - Enter\n"));
+
+ A_REGISTER_MODULE_DEBUG_INFO(htc);
+
+ do {
+
+ /* allocate target memory */
+ if ((target = (HTC_TARGET *)A_MALLOC(sizeof(HTC_TARGET))) == NULL) {
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("Unable to allocate memory\n"));
+ status = A_ERROR;
+ break;
+ }
+
+ A_MEMZERO(target, sizeof(HTC_TARGET));
+ A_MUTEX_INIT(&target->HTCLock);
+ A_MUTEX_INIT(&target->HTCRxLock);
+ A_MUTEX_INIT(&target->HTCTxLock);
+ INIT_HTC_PACKET_QUEUE(&target->ControlBufferTXFreeList);
+ INIT_HTC_PACKET_QUEUE(&target->ControlBufferRXFreeList);
+
+ /* give device layer the hif device handle */
+ target->Device.HIFDevice = hif_handle;
+ /* give the device layer our context (for event processing)
+ * the device layer will register it's own context with HIF
+ * so we need to set this so we can fetch it in the target remove handler */
+ target->Device.HTCContext = target;
+ /* set device layer target failure callback */
+ target->Device.TargetFailureCallback = HTCReportFailure;
+ /* set device layer recv message pending callback */
+ target->Device.MessagePendingCallback = HTCRecvMessagePendingHandler;
+ target->EpWaitingForBuffers = ENDPOINT_MAX;
+
+ A_MEMCPY(&target->HTCInitInfo,pInfo,sizeof(HTC_INIT_INFO));
+
+ ResetEndpointStates(target);
+
+ /* setup device layer */
+ status = DevSetup(&target->Device);
+
+ if (A_FAILED(status)) {
+ break;
+ }
+
+
+ /* get the block sizes */
+ status = HIFConfigureDevice(hif_handle, HIF_DEVICE_GET_MBOX_BLOCK_SIZE,
+ blocksizes, sizeof(blocksizes));
+ if (A_FAILED(status)) {
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("Failed to get block size info from HIF layer...\n"));
+ break;
+ }
+
+ /* Set the control buffer size based on the block size */
+ if (blocksizes[1] > HTC_MAX_CONTROL_MESSAGE_LENGTH) {
+ ctrl_bufsz = blocksizes[1] + HTC_HDR_LENGTH;
+ } else {
+ ctrl_bufsz = HTC_MAX_CONTROL_MESSAGE_LENGTH + HTC_HDR_LENGTH;
+ }
+ for (i = 0;i < NUM_CONTROL_BUFFERS;i++) {
+ target->HTCControlBuffers[i].Buffer = A_MALLOC(ctrl_bufsz);
+ if (target->HTCControlBuffers[i].Buffer == NULL) {
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("Unable to allocate memory\n"));
+ status = A_ERROR;
+ break;
+ }
+ }
+
+ if (A_FAILED(status)) {
+ break;
+ }
+
+ /* carve up buffers/packets for control messages */
+ for (i = 0; i < NUM_CONTROL_RX_BUFFERS; i++) {
+ HTC_PACKET *pControlPacket;
+ pControlPacket = &target->HTCControlBuffers[i].HtcPacket;
+ SET_HTC_PACKET_INFO_RX_REFILL(pControlPacket,
+ target,
+ target->HTCControlBuffers[i].Buffer,
+ ctrl_bufsz,
+ ENDPOINT_0);
+ HTC_FREE_CONTROL_RX(target,pControlPacket);
+ }
+
+ for (;i < NUM_CONTROL_BUFFERS;i++) {
+ HTC_PACKET *pControlPacket;
+ pControlPacket = &target->HTCControlBuffers[i].HtcPacket;
+ INIT_HTC_PACKET_INFO(pControlPacket,
+ target->HTCControlBuffers[i].Buffer,
+ ctrl_bufsz);
+ HTC_FREE_CONTROL_TX(target,pControlPacket);
+ }
+
+ } while (FALSE);
+
+ if (A_FAILED(status)) {
+ if (target != NULL) {
+ HTCCleanup(target);
+ target = NULL;
+ }
+ }
+
+ AR_DEBUG_PRINTF(ATH_DEBUG_TRC, ("HTCCreate - Exit\n"));
+
+ return target;
+}
+
+void HTCDestroy(HTC_HANDLE HTCHandle)
+{
+ HTC_TARGET *target = GET_HTC_TARGET_FROM_HANDLE(HTCHandle);
+ AR_DEBUG_PRINTF(ATH_DEBUG_TRC, ("+HTCDestroy .. Destroying :0x%lX \n",(unsigned long)target));
+ HTCCleanup(target);
+ AR_DEBUG_PRINTF(ATH_DEBUG_TRC, ("-HTCDestroy \n"));
+}
+
+/* get the low level HIF device for the caller , the caller may wish to do low level
+ * HIF requests */
+void *HTCGetHifDevice(HTC_HANDLE HTCHandle)
+{
+ HTC_TARGET *target = GET_HTC_TARGET_FROM_HANDLE(HTCHandle);
+ return target->Device.HIFDevice;
+}
+
+/* wait for the target to arrive (sends HTC Ready message)
+ * this operation is fully synchronous and the message is polled for */
+A_STATUS HTCWaitTarget(HTC_HANDLE HTCHandle)
+{
+ HTC_TARGET *target = GET_HTC_TARGET_FROM_HANDLE(HTCHandle);
+ A_STATUS status;
+ HTC_PACKET *pPacket = NULL;
+ HTC_READY_EX_MSG *pRdyMsg;
+
+ HTC_SERVICE_CONNECT_REQ connect;
+ HTC_SERVICE_CONNECT_RESP resp;
+
+ AR_DEBUG_PRINTF(ATH_DEBUG_TRC, ("HTCWaitTarget - Enter (target:0x%lX) \n", (unsigned long)target));
+
+ do {
+
+#ifdef MBOXHW_UNIT_TEST
+
+ status = DoMboxHWTest(&target->Device);
+
+ if (status != A_OK) {
+ break;
+ }
+
+#endif
+
+ /* we should be getting 1 control message that the target is ready */
+ status = HTCWaitforControlMessage(target, &pPacket);
+
+ if (A_FAILED(status)) {
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERR, (" Target Not Available!!\n"));
+ break;
+ }
+
+ /* we controlled the buffer creation so it has to be properly aligned */
+ pRdyMsg = (HTC_READY_EX_MSG *)pPacket->pBuffer;
+
+ if ((pRdyMsg->Version2_0_Info.MessageID != HTC_MSG_READY_ID) ||
+ (pPacket->ActualLength < sizeof(HTC_READY_MSG))) {
+ /* this message is not valid */
+ AR_DEBUG_ASSERT(FALSE);
+ status = A_EPROTO;
+ break;
+ }
+
+
+ if (pRdyMsg->Version2_0_Info.CreditCount == 0 || pRdyMsg->Version2_0_Info.CreditSize == 0) {
+ /* this message is not valid */
+ AR_DEBUG_ASSERT(FALSE);
+ status = A_EPROTO;
+ break;
+ }
+
+ target->TargetCredits = pRdyMsg->Version2_0_Info.CreditCount;
+ target->TargetCreditSize = pRdyMsg->Version2_0_Info.CreditSize;
+
+ AR_DEBUG_PRINTF(ATH_DEBUG_WARN, (" Target Ready: credits: %d credit size: %d\n",
+ target->TargetCredits, target->TargetCreditSize));
+
+ /* check if this is an extended ready message */
+ if (pPacket->ActualLength >= sizeof(HTC_READY_EX_MSG)) {
+ /* this is an extended message */
+ target->HTCTargetVersion = pRdyMsg->HTCVersion;
+ target->MaxMsgPerBundle = pRdyMsg->MaxMsgsPerHTCBundle;
+ } else {
+ /* legacy */
+ target->HTCTargetVersion = HTC_VERSION_2P0;
+ target->MaxMsgPerBundle = 0;
+ }
+
+#ifdef HTC_FORCE_LEGACY_2P0
+ /* for testing and comparison...*/
+ target->HTCTargetVersion = HTC_VERSION_2P0;
+ target->MaxMsgPerBundle = 0;
+#endif
+
+ AR_DEBUG_PRINTF(ATH_DEBUG_TRC,
+ ("Using HTC Protocol Version : %s (%d)\n ",
+ (target->HTCTargetVersion == HTC_VERSION_2P0) ? "2.0" : ">= 2.1",
+ target->HTCTargetVersion));
+
+ if (target->MaxMsgPerBundle > 0) {
+ /* limit what HTC can handle */
+ target->MaxMsgPerBundle = min(HTC_HOST_MAX_MSG_PER_BUNDLE, target->MaxMsgPerBundle);
+ /* target supports message bundling, setup device layer */
+ if (A_FAILED(DevSetupMsgBundling(&target->Device,target->MaxMsgPerBundle))) {
+ /* device layer can't handle bundling */
+ target->MaxMsgPerBundle = 0;
+ } else {
+ /* limit bundle what the device layer can handle */
+ target->MaxMsgPerBundle = min(DEV_GET_MAX_MSG_PER_BUNDLE(&target->Device),
+ target->MaxMsgPerBundle);
+ }
+ }
+
+ if (target->MaxMsgPerBundle > 0) {
+ AR_DEBUG_PRINTF(ATH_DEBUG_TRC,
+ (" HTC bundling allowed. Max Msg Per HTC Bundle: %d\n", target->MaxMsgPerBundle));
+
+ if (DEV_GET_MAX_BUNDLE_SEND_LENGTH(&target->Device) != 0) {
+ target->SendBundlingEnabled = TRUE;
+ }
+ if (DEV_GET_MAX_BUNDLE_RECV_LENGTH(&target->Device) != 0) {
+ target->RecvBundlingEnabled = TRUE;
+ }
+
+ if (!DEV_IS_LEN_BLOCK_ALIGNED(&target->Device,target->TargetCreditSize)) {
+ AR_DEBUG_PRINTF(ATH_DEBUG_WARN, ("*** Credit size: %d is not block aligned! Disabling send bundling \n",
+ target->TargetCreditSize));
+ /* disallow send bundling since the credit size is not aligned to a block size
+ * the I/O block padding will spill into the next credit buffer which is fatal */
+ target->SendBundlingEnabled = FALSE;
+ }
+ }
+
+ /* setup our pseudo HTC control endpoint connection */
+ A_MEMZERO(&connect,sizeof(connect));
+ A_MEMZERO(&resp,sizeof(resp));
+ connect.EpCallbacks.pContext = target;
+ connect.EpCallbacks.EpTxComplete = HTCControlTxComplete;
+ connect.EpCallbacks.EpRecv = HTCControlRecv;
+ connect.EpCallbacks.EpRecvRefill = NULL; /* not needed */
+ connect.EpCallbacks.EpSendFull = NULL; /* not nedded */
+ connect.MaxSendQueueDepth = NUM_CONTROL_BUFFERS;
+ connect.ServiceID = HTC_CTRL_RSVD_SVC;
+
+ /* connect fake service */
+ status = HTCConnectService((HTC_HANDLE)target,
+ &connect,
+ &resp);
+
+ if (!A_FAILED(status)) {
+ break;
+ }
+
+ } while (FALSE);
+
+ if (pPacket != NULL) {
+ HTC_FREE_CONTROL_RX(target,pPacket);
+ }
+
+ AR_DEBUG_PRINTF(ATH_DEBUG_TRC, ("HTCWaitTarget - Exit\n"));
+
+ return status;
+}
+
+
+
+/* Start HTC, enable interrupts and let the target know host has finished setup */
+A_STATUS HTCStart(HTC_HANDLE HTCHandle)
+{
+ HTC_TARGET *target = GET_HTC_TARGET_FROM_HANDLE(HTCHandle);
+ HTC_PACKET *pPacket;
+ A_STATUS status;
+
+ AR_DEBUG_PRINTF(ATH_DEBUG_TRC, ("HTCStart Enter\n"));
+
+ /* make sure interrupts are disabled at the chip level,
+ * this function can be called again from a reboot of the target without shutting down HTC */
+ DevDisableInterrupts(&target->Device);
+ /* make sure state is cleared again */
+ target->OpStateFlags = 0;
+ target->RecvStateFlags = 0;
+
+ /* now that we are starting, push control receive buffers into the
+ * HTC control endpoint */
+
+ while (1) {
+ pPacket = HTC_ALLOC_CONTROL_RX(target);
+ if (NULL == pPacket) {
+ break;
+ }
+ HTCAddReceivePkt((HTC_HANDLE)target,pPacket);
+ }
+
+ do {
+
+ AR_DEBUG_ASSERT(target->InitCredits != NULL);
+ AR_DEBUG_ASSERT(target->EpCreditDistributionListHead != NULL);
+ AR_DEBUG_ASSERT(target->EpCreditDistributionListHead->pNext != NULL);
+
+ /* call init credits callback to do the distribution ,
+ * NOTE: the first entry in the distribution list is ENDPOINT_0, so
+ * we pass the start of the list after this one. */
+ target->InitCredits(target->pCredDistContext,
+ target->EpCreditDistributionListHead->pNext,
+ target->TargetCredits);
+
+#ifdef ATH_DEBUG_MODULE
+
+ if (AR_DEBUG_LVL_CHECK(ATH_DEBUG_TRC)) {
+ DumpCreditDistStates(target);
+ }
+#endif
+
+ /* the caller is done connecting to services, so we can indicate to the
+ * target that the setup phase is complete */
+ status = HTCSendSetupComplete(target);
+
+ if (A_FAILED(status)) {
+ break;
+ }
+
+ /* unmask interrupts */
+ status = DevUnmaskInterrupts(&target->Device);
+
+ if (A_FAILED(status)) {
+ HTCStop(target);
+ }
+
+ } while (FALSE);
+
+ AR_DEBUG_PRINTF(ATH_DEBUG_TRC, ("HTCStart Exit\n"));
+ return status;
+}
+
+static void ResetEndpointStates(HTC_TARGET *target)
+{
+ HTC_ENDPOINT *pEndpoint;
+ int i;
+
+ for (i = ENDPOINT_0; i < ENDPOINT_MAX; i++) {
+ pEndpoint = &target->EndPoint[i];
+
+ A_MEMZERO(&pEndpoint->CreditDist, sizeof(pEndpoint->CreditDist));
+ pEndpoint->ServiceID = 0;
+ pEndpoint->MaxMsgLength = 0;
+ pEndpoint->MaxTxQueueDepth = 0;
+#ifdef HTC_EP_STAT_PROFILING
+ A_MEMZERO(&pEndpoint->EndPointStats,sizeof(pEndpoint->EndPointStats));
+#endif
+ INIT_HTC_PACKET_QUEUE(&pEndpoint->RxBuffers);
+ INIT_HTC_PACKET_QUEUE(&pEndpoint->TxQueue);
+ INIT_HTC_PACKET_QUEUE(&pEndpoint->RecvIndicationQueue);
+ pEndpoint->target = target;
+ }
+ /* reset distribution list */
+ target->EpCreditDistributionListHead = NULL;
+}
+
+/* stop HTC communications, i.e. stop interrupt reception, and flush all queued buffers */
+void HTCStop(HTC_HANDLE HTCHandle)
+{
+ HTC_TARGET *target = GET_HTC_TARGET_FROM_HANDLE(HTCHandle);
+ AR_DEBUG_PRINTF(ATH_DEBUG_TRC, ("+HTCStop \n"));
+
+ LOCK_HTC(target);
+ /* mark that we are shutting down .. */
+ target->OpStateFlags |= HTC_OP_STATE_STOPPING;
+ UNLOCK_HTC(target);
+
+ /* Masking interrupts is a synchronous operation, when this function returns
+ * all pending HIF I/O has completed, we can safely flush the queues */
+ DevMaskInterrupts(&target->Device);
+
+#ifdef THREAD_X
+ //
+ // Is this delay required
+ //
+ A_MDELAY(200); // wait for IRQ process done
+#endif
+ /* flush all send packets */
+ HTCFlushSendPkts(target);
+ /* flush all recv buffers */
+ HTCFlushRecvBuffers(target);
+
+ ResetEndpointStates(target);
+
+ AR_DEBUG_PRINTF(ATH_DEBUG_TRC, ("-HTCStop \n"));
+}
+
+#ifdef ATH_DEBUG_MODULE
+void HTCDumpCreditStates(HTC_HANDLE HTCHandle)
+{
+ HTC_TARGET *target = GET_HTC_TARGET_FROM_HANDLE(HTCHandle);
+
+ LOCK_HTC_TX(target);
+
+ DumpCreditDistStates(target);
+
+ UNLOCK_HTC_TX(target);
+
+ DumpAR6KDevState(&target->Device);
+}
+#endif
+/* report a target failure from the device, this is a callback from the device layer
+ * which uses a mechanism to report errors from the target (i.e. special interrupts) */
+static void HTCReportFailure(void *Context)
+{
+ HTC_TARGET *target = (HTC_TARGET *)Context;
+
+ target->TargetFailure = TRUE;
+
+ if (target->HTCInitInfo.TargetFailure != NULL) {
+ /* let upper layer know, it needs to call HTCStop() */
+ target->HTCInitInfo.TargetFailure(target->HTCInitInfo.pContext, A_ERROR);
+ }
+}
+
+A_BOOL HTCGetEndpointStatistics(HTC_HANDLE HTCHandle,
+ HTC_ENDPOINT_ID Endpoint,
+ HTC_ENDPOINT_STAT_ACTION Action,
+ HTC_ENDPOINT_STATS *pStats)
+{
+
+#ifdef HTC_EP_STAT_PROFILING
+ HTC_TARGET *target = GET_HTC_TARGET_FROM_HANDLE(HTCHandle);
+ A_BOOL clearStats = FALSE;
+ A_BOOL sample = FALSE;
+
+ switch (Action) {
+ case HTC_EP_STAT_SAMPLE :
+ sample = TRUE;
+ break;
+ case HTC_EP_STAT_SAMPLE_AND_CLEAR :
+ sample = TRUE;
+ clearStats = TRUE;
+ break;
+ case HTC_EP_STAT_CLEAR :
+ clearStats = TRUE;
+ break;
+ default:
+ break;
+ }
+
+ A_ASSERT(Endpoint < ENDPOINT_MAX);
+
+ /* lock out TX and RX while we sample and/or clear */
+ LOCK_HTC_TX(target);
+ LOCK_HTC_RX(target);
+
+ if (sample) {
+ A_ASSERT(pStats != NULL);
+ /* return the stats to the caller */
+ A_MEMCPY(pStats, &target->EndPoint[Endpoint].EndPointStats, sizeof(HTC_ENDPOINT_STATS));
+ }
+
+ if (clearStats) {
+ /* reset stats */
+ A_MEMZERO(&target->EndPoint[Endpoint].EndPointStats, sizeof(HTC_ENDPOINT_STATS));
+ }
+
+ UNLOCK_HTC_RX(target);
+ UNLOCK_HTC_TX(target);
+
+ return TRUE;
+#else
+ return FALSE;
+#endif
+}
+
+AR6K_DEVICE *HTCGetAR6KDevice(void *HTCHandle)
+{
+ HTC_TARGET *target = GET_HTC_TARGET_FROM_HANDLE(HTCHandle);
+ return &target->Device;
+}
+
diff --git a/drivers/staging/ath6kl/htc2/htc_debug.h b/drivers/staging/ath6kl/htc2/htc_debug.h
new file mode 100644
index 000000000000..8455703e221c
--- /dev/null
+++ b/drivers/staging/ath6kl/htc2/htc_debug.h
@@ -0,0 +1,38 @@
+//------------------------------------------------------------------------------
+// <copyright file="htc_debug.h" company="Atheros">
+// Copyright (c) 2007-2010 Atheros Corporation. All rights reserved.
+//
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+//
+//
+//------------------------------------------------------------------------------
+//==============================================================================
+// Author(s): ="Atheros"
+//==============================================================================
+#ifndef HTC_DEBUG_H_
+#define HTC_DEBUG_H_
+
+#define ATH_MODULE_NAME htc
+#include "a_debug.h"
+
+/* ------- Debug related stuff ------- */
+
+#define ATH_DEBUG_SEND ATH_DEBUG_MAKE_MODULE_MASK(0)
+#define ATH_DEBUG_RECV ATH_DEBUG_MAKE_MODULE_MASK(1)
+#define ATH_DEBUG_SYNC ATH_DEBUG_MAKE_MODULE_MASK(2)
+#define ATH_DEBUG_DUMP ATH_DEBUG_MAKE_MODULE_MASK(3)
+#define ATH_DEBUG_IRQ ATH_DEBUG_MAKE_MODULE_MASK(4)
+
+
+#endif /*HTC_DEBUG_H_*/
diff --git a/drivers/staging/ath6kl/htc2/htc_internal.h b/drivers/staging/ath6kl/htc2/htc_internal.h
new file mode 100644
index 000000000000..bd6754beb221
--- /dev/null
+++ b/drivers/staging/ath6kl/htc2/htc_internal.h
@@ -0,0 +1,220 @@
+//------------------------------------------------------------------------------
+// <copyright file="htc_internal.h" company="Atheros">
+// Copyright (c) 2007-2010 Atheros Corporation. All rights reserved.
+//
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+//
+//
+//------------------------------------------------------------------------------
+//==============================================================================
+// Author(s): ="Atheros"
+//==============================================================================
+#ifndef _HTC_INTERNAL_H_
+#define _HTC_INTERNAL_H_
+
+/* for debugging, uncomment this to capture the last frame header, on frame header
+ * processing errors, the last frame header is dump for comparison */
+//#define HTC_CAPTURE_LAST_FRAME
+
+//#define HTC_EP_STAT_PROFILING
+
+#ifdef __cplusplus
+extern "C" {
+#endif /* __cplusplus */
+
+/* Header files */
+
+#include "a_config.h"
+#include "athdefs.h"
+#include "a_types.h"
+#include "a_osapi.h"
+#include "htc_debug.h"
+#include "htc.h"
+#include "htc_api.h"
+#include "bmi_msg.h"
+#include "hif.h"
+#include "AR6000/ar6k.h"
+
+/* HTC operational parameters */
+#define HTC_TARGET_RESPONSE_TIMEOUT 2000 /* in ms */
+#define HTC_TARGET_DEBUG_INTR_MASK 0x01
+#define HTC_TARGET_CREDIT_INTR_MASK 0xF0
+
+#define HTC_HOST_MAX_MSG_PER_BUNDLE 8
+#define HTC_MIN_HTC_MSGS_TO_BUNDLE 2
+
+/* packet flags */
+
+#define HTC_RX_PKT_IGNORE_LOOKAHEAD (1 << 0)
+#define HTC_RX_PKT_REFRESH_HDR (1 << 1)
+#define HTC_RX_PKT_PART_OF_BUNDLE (1 << 2)
+#define HTC_RX_PKT_NO_RECYCLE (1 << 3)
+
+/* scatter request flags */
+
+#define HTC_SCATTER_REQ_FLAGS_PARTIAL_BUNDLE (1 << 0)
+
+typedef struct _HTC_ENDPOINT {
+ HTC_ENDPOINT_ID Id;
+ HTC_SERVICE_ID ServiceID; /* service ID this endpoint is bound to
+ non-zero value means this endpoint is in use */
+ HTC_PACKET_QUEUE TxQueue; /* HTC frame buffer TX queue */
+ HTC_PACKET_QUEUE RxBuffers; /* HTC frame buffer RX list */
+ HTC_ENDPOINT_CREDIT_DIST CreditDist; /* credit distribution structure (exposed to driver layer) */
+ HTC_EP_CALLBACKS EpCallBacks; /* callbacks associated with this endpoint */
+ int MaxTxQueueDepth; /* max depth of the TX queue before we need to
+ call driver's full handler */
+ int MaxMsgLength; /* max length of endpoint message */
+ int TxProcessCount; /* reference count to continue tx processing */
+ HTC_PACKET_QUEUE RecvIndicationQueue; /* recv packets ready to be indicated */
+ int RxProcessCount; /* reference count to allow single processing context */
+ struct _HTC_TARGET *target; /* back pointer to target */
+ A_UINT8 SeqNo; /* TX seq no (helpful) for debugging */
+ A_UINT32 LocalConnectionFlags; /* local connection flags */
+#ifdef HTC_EP_STAT_PROFILING
+ HTC_ENDPOINT_STATS EndPointStats; /* endpoint statistics */
+#endif
+} HTC_ENDPOINT;
+
+#ifdef HTC_EP_STAT_PROFILING
+#define INC_HTC_EP_STAT(p,stat,count) (p)->EndPointStats.stat += (count);
+#else
+#define INC_HTC_EP_STAT(p,stat,count)
+#endif
+
+#define HTC_SERVICE_TX_PACKET_TAG HTC_TX_PACKET_TAG_INTERNAL
+
+#define NUM_CONTROL_BUFFERS 8
+#define NUM_CONTROL_TX_BUFFERS 2
+#define NUM_CONTROL_RX_BUFFERS (NUM_CONTROL_BUFFERS - NUM_CONTROL_TX_BUFFERS)
+
+typedef struct HTC_CONTROL_BUFFER {
+ HTC_PACKET HtcPacket;
+ A_UINT8 *Buffer;
+} HTC_CONTROL_BUFFER;
+
+#define HTC_RECV_WAIT_BUFFERS (1 << 0)
+#define HTC_OP_STATE_STOPPING (1 << 0)
+
+/* our HTC target state */
+typedef struct _HTC_TARGET {
+ HTC_ENDPOINT EndPoint[ENDPOINT_MAX];
+ HTC_CONTROL_BUFFER HTCControlBuffers[NUM_CONTROL_BUFFERS];
+ HTC_ENDPOINT_CREDIT_DIST *EpCreditDistributionListHead;
+ HTC_PACKET_QUEUE ControlBufferTXFreeList;
+ HTC_PACKET_QUEUE ControlBufferRXFreeList;
+ HTC_CREDIT_DIST_CALLBACK DistributeCredits;
+ HTC_CREDIT_INIT_CALLBACK InitCredits;
+ void *pCredDistContext;
+ int TargetCredits;
+ unsigned int TargetCreditSize;
+ A_MUTEX_T HTCLock;
+ A_MUTEX_T HTCRxLock;
+ A_MUTEX_T HTCTxLock;
+ AR6K_DEVICE Device; /* AR6K - specific state */
+ A_UINT32 OpStateFlags;
+ A_UINT32 RecvStateFlags;
+ HTC_ENDPOINT_ID EpWaitingForBuffers;
+ A_BOOL TargetFailure;
+#ifdef HTC_CAPTURE_LAST_FRAME
+ HTC_FRAME_HDR LastFrameHdr; /* useful for debugging */
+ A_UINT8 LastTrailer[256];
+ A_UINT8 LastTrailerLength;
+#endif
+ HTC_INIT_INFO HTCInitInfo;
+ A_UINT8 HTCTargetVersion;
+ int MaxMsgPerBundle; /* max messages per bundle for HTC */
+ A_BOOL SendBundlingEnabled; /* run time enable for send bundling (dynamic) */
+ int RecvBundlingEnabled; /* run time enable for recv bundling (dynamic) */
+} HTC_TARGET;
+
+#define HTC_STOPPING(t) ((t)->OpStateFlags & HTC_OP_STATE_STOPPING)
+#define LOCK_HTC(t) A_MUTEX_LOCK(&(t)->HTCLock);
+#define UNLOCK_HTC(t) A_MUTEX_UNLOCK(&(t)->HTCLock);
+#define LOCK_HTC_RX(t) A_MUTEX_LOCK(&(t)->HTCRxLock);
+#define UNLOCK_HTC_RX(t) A_MUTEX_UNLOCK(&(t)->HTCRxLock);
+#define LOCK_HTC_TX(t) A_MUTEX_LOCK(&(t)->HTCTxLock);
+#define UNLOCK_HTC_TX(t) A_MUTEX_UNLOCK(&(t)->HTCTxLock);
+
+#define GET_HTC_TARGET_FROM_HANDLE(hnd) ((HTC_TARGET *)(hnd))
+#define HTC_RECYCLE_RX_PKT(target,p,e) \
+{ \
+ if ((p)->PktInfo.AsRx.HTCRxFlags & HTC_RX_PKT_NO_RECYCLE) { \
+ HTC_PACKET_RESET_RX(pPacket); \
+ pPacket->Status = A_ECANCELED; \
+ (e)->EpCallBacks.EpRecv((e)->EpCallBacks.pContext, \
+ (p)); \
+ } else { \
+ HTC_PACKET_RESET_RX(pPacket); \
+ HTCAddReceivePkt((HTC_HANDLE)(target),(p)); \
+ } \
+}
+
+/* internal HTC functions */
+void HTCControlTxComplete(void *Context, HTC_PACKET *pPacket);
+void HTCControlRecv(void *Context, HTC_PACKET *pPacket);
+A_STATUS HTCWaitforControlMessage(HTC_TARGET *target, HTC_PACKET **ppControlPacket);
+HTC_PACKET *HTCAllocControlBuffer(HTC_TARGET *target, HTC_PACKET_QUEUE *pList);
+void HTCFreeControlBuffer(HTC_TARGET *target, HTC_PACKET *pPacket, HTC_PACKET_QUEUE *pList);
+A_STATUS HTCIssueSend(HTC_TARGET *target, HTC_PACKET *pPacket);
+void HTCRecvCompleteHandler(void *Context, HTC_PACKET *pPacket);
+A_STATUS HTCRecvMessagePendingHandler(void *Context, A_UINT32 MsgLookAheads[], int NumLookAheads, A_BOOL *pAsyncProc, int *pNumPktsFetched);
+void HTCProcessCreditRpt(HTC_TARGET *target, HTC_CREDIT_REPORT *pRpt, int NumEntries, HTC_ENDPOINT_ID FromEndpoint);
+A_STATUS HTCSendSetupComplete(HTC_TARGET *target);
+void HTCFlushRecvBuffers(HTC_TARGET *target);
+void HTCFlushSendPkts(HTC_TARGET *target);
+
+#ifdef ATH_DEBUG_MODULE
+void DumpCreditDist(HTC_ENDPOINT_CREDIT_DIST *pEPDist);
+void DumpCreditDistStates(HTC_TARGET *target);
+void DebugDumpBytes(A_UCHAR *buffer, A_UINT16 length, char *pDescription);
+#endif
+
+static INLINE HTC_PACKET *HTC_ALLOC_CONTROL_TX(HTC_TARGET *target) {
+ HTC_PACKET *pPacket = HTCAllocControlBuffer(target,&target->ControlBufferTXFreeList);
+ if (pPacket != NULL) {
+ /* set payload pointer area with some headroom */
+ pPacket->pBuffer = pPacket->pBufferStart + HTC_HDR_LENGTH;
+ }
+ return pPacket;
+}
+
+#define HTC_FREE_CONTROL_TX(t,p) HTCFreeControlBuffer((t),(p),&(t)->ControlBufferTXFreeList)
+#define HTC_ALLOC_CONTROL_RX(t) HTCAllocControlBuffer((t),&(t)->ControlBufferRXFreeList)
+#define HTC_FREE_CONTROL_RX(t,p) \
+{ \
+ HTC_PACKET_RESET_RX(p); \
+ HTCFreeControlBuffer((t),(p),&(t)->ControlBufferRXFreeList); \
+}
+
+#define HTC_PREPARE_SEND_PKT(pP,sendflags,ctrl0,ctrl1) \
+{ \
+ A_UINT8 *pHdrBuf; \
+ (pP)->pBuffer -= HTC_HDR_LENGTH; \
+ pHdrBuf = (pP)->pBuffer; \
+ A_SET_UINT16_FIELD(pHdrBuf,HTC_FRAME_HDR,PayloadLen,(A_UINT16)(pP)->ActualLength); \
+ A_SET_UINT8_FIELD(pHdrBuf,HTC_FRAME_HDR,Flags,(sendflags)); \
+ A_SET_UINT8_FIELD(pHdrBuf,HTC_FRAME_HDR,EndpointID, (A_UINT8)(pP)->Endpoint); \
+ A_SET_UINT8_FIELD(pHdrBuf,HTC_FRAME_HDR,ControlBytes[0], (A_UINT8)(ctrl0)); \
+ A_SET_UINT8_FIELD(pHdrBuf,HTC_FRAME_HDR,ControlBytes[1], (A_UINT8)(ctrl1)); \
+}
+
+#define HTC_UNPREPARE_SEND_PKT(pP) \
+ (pP)->pBuffer += HTC_HDR_LENGTH; \
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _HTC_INTERNAL_H_ */
diff --git a/drivers/staging/ath6kl/htc2/htc_recv.c b/drivers/staging/ath6kl/htc2/htc_recv.c
new file mode 100644
index 000000000000..3503657fe7d2
--- /dev/null
+++ b/drivers/staging/ath6kl/htc2/htc_recv.c
@@ -0,0 +1,1578 @@
+//------------------------------------------------------------------------------
+// <copyright file="htc_recv.c" company="Atheros">
+// Copyright (c) 2007-2010 Atheros Corporation. All rights reserved.
+//
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+//
+//
+//------------------------------------------------------------------------------
+//==============================================================================
+// Author(s): ="Atheros"
+//==============================================================================
+#include "htc_internal.h"
+
+#define HTCIssueRecv(t, p) \
+ DevRecvPacket(&(t)->Device, \
+ (p), \
+ (p)->ActualLength)
+
+#define DO_RCV_COMPLETION(e,q) DoRecvCompletion(e,q)
+
+#define DUMP_RECV_PKT_INFO(pP) \
+ AR_DEBUG_PRINTF(ATH_DEBUG_RECV, (" HTC RECV packet 0x%lX (%d bytes) (hdr:0x%X) on ep : %d \n", \
+ (unsigned long)(pP), \
+ (pP)->ActualLength, \
+ (pP)->PktInfo.AsRx.ExpectedHdr, \
+ (pP)->Endpoint))
+
+#ifdef HTC_EP_STAT_PROFILING
+#define HTC_RX_STAT_PROFILE(t,ep,numLookAheads) \
+{ \
+ INC_HTC_EP_STAT((ep), RxReceived, 1); \
+ if ((numLookAheads) == 1) { \
+ INC_HTC_EP_STAT((ep), RxLookAheads, 1); \
+ } else if ((numLookAheads) > 1) { \
+ INC_HTC_EP_STAT((ep), RxBundleLookAheads, 1); \
+ } \
+}
+#else
+#define HTC_RX_STAT_PROFILE(t,ep,lookAhead)
+#endif
+
+static void DoRecvCompletion(HTC_ENDPOINT *pEndpoint,
+ HTC_PACKET_QUEUE *pQueueToIndicate)
+{
+
+ do {
+
+ if (HTC_QUEUE_EMPTY(pQueueToIndicate)) {
+ /* nothing to indicate */
+ break;
+ }
+
+ if (pEndpoint->EpCallBacks.EpRecvPktMultiple != NULL) {
+ AR_DEBUG_PRINTF(ATH_DEBUG_RECV, (" HTC calling ep %d, recv multiple callback (%d pkts) \n",
+ pEndpoint->Id, HTC_PACKET_QUEUE_DEPTH(pQueueToIndicate)));
+ /* a recv multiple handler is being used, pass the queue to the handler */
+ pEndpoint->EpCallBacks.EpRecvPktMultiple(pEndpoint->EpCallBacks.pContext,
+ pQueueToIndicate);
+ INIT_HTC_PACKET_QUEUE(pQueueToIndicate);
+ } else {
+ HTC_PACKET *pPacket;
+ /* using legacy EpRecv */
+ do {
+ pPacket = HTC_PACKET_DEQUEUE(pQueueToIndicate);
+ AR_DEBUG_PRINTF(ATH_DEBUG_RECV, (" HTC calling ep %d recv callback on packet 0x%lX \n", \
+ pEndpoint->Id, (unsigned long)(pPacket)));
+ pEndpoint->EpCallBacks.EpRecv(pEndpoint->EpCallBacks.pContext, pPacket);
+ } while (!HTC_QUEUE_EMPTY(pQueueToIndicate));
+ }
+
+ } while (FALSE);
+
+}
+
+static INLINE A_STATUS HTCProcessTrailer(HTC_TARGET *target,
+ A_UINT8 *pBuffer,
+ int Length,
+ A_UINT32 *pNextLookAheads,
+ int *pNumLookAheads,
+ HTC_ENDPOINT_ID FromEndpoint)
+{
+ HTC_RECORD_HDR *pRecord;
+ A_UINT8 *pRecordBuf;
+ HTC_LOOKAHEAD_REPORT *pLookAhead;
+ A_UINT8 *pOrigBuffer;
+ int origLength;
+ A_STATUS status;
+
+ AR_DEBUG_PRINTF(ATH_DEBUG_RECV, ("+HTCProcessTrailer (length:%d) \n", Length));
+
+ if (AR_DEBUG_LVL_CHECK(ATH_DEBUG_RECV)) {
+ AR_DEBUG_PRINTBUF(pBuffer,Length,"Recv Trailer");
+ }
+
+ pOrigBuffer = pBuffer;
+ origLength = Length;
+ status = A_OK;
+
+ while (Length > 0) {
+
+ if (Length < sizeof(HTC_RECORD_HDR)) {
+ status = A_EPROTO;
+ break;
+ }
+ /* these are byte aligned structs */
+ pRecord = (HTC_RECORD_HDR *)pBuffer;
+ Length -= sizeof(HTC_RECORD_HDR);
+ pBuffer += sizeof(HTC_RECORD_HDR);
+
+ if (pRecord->Length > Length) {
+ /* no room left in buffer for record */
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERR,
+ (" invalid record length: %d (id:%d) buffer has: %d bytes left \n",
+ pRecord->Length, pRecord->RecordID, Length));
+ status = A_EPROTO;
+ break;
+ }
+ /* start of record follows the header */
+ pRecordBuf = pBuffer;
+
+ switch (pRecord->RecordID) {
+ case HTC_RECORD_CREDITS:
+ AR_DEBUG_ASSERT(pRecord->Length >= sizeof(HTC_CREDIT_REPORT));
+ HTCProcessCreditRpt(target,
+ (HTC_CREDIT_REPORT *)pRecordBuf,
+ pRecord->Length / (sizeof(HTC_CREDIT_REPORT)),
+ FromEndpoint);
+ break;
+ case HTC_RECORD_LOOKAHEAD:
+ AR_DEBUG_ASSERT(pRecord->Length >= sizeof(HTC_LOOKAHEAD_REPORT));
+ pLookAhead = (HTC_LOOKAHEAD_REPORT *)pRecordBuf;
+ if ((pLookAhead->PreValid == ((~pLookAhead->PostValid) & 0xFF)) &&
+ (pNextLookAheads != NULL)) {
+
+ AR_DEBUG_PRINTF(ATH_DEBUG_RECV,
+ (" LookAhead Report Found (pre valid:0x%X, post valid:0x%X) \n",
+ pLookAhead->PreValid,
+ pLookAhead->PostValid));
+
+ /* look ahead bytes are valid, copy them over */
+ ((A_UINT8 *)(&pNextLookAheads[0]))[0] = pLookAhead->LookAhead[0];
+ ((A_UINT8 *)(&pNextLookAheads[0]))[1] = pLookAhead->LookAhead[1];
+ ((A_UINT8 *)(&pNextLookAheads[0]))[2] = pLookAhead->LookAhead[2];
+ ((A_UINT8 *)(&pNextLookAheads[0]))[3] = pLookAhead->LookAhead[3];
+
+#ifdef ATH_DEBUG_MODULE
+ if (AR_DEBUG_LVL_CHECK(ATH_DEBUG_RECV)) {
+ DebugDumpBytes((A_UINT8 *)pNextLookAheads,4,"Next Look Ahead");
+ }
+#endif
+ /* just one normal lookahead */
+ *pNumLookAheads = 1;
+ }
+ break;
+ case HTC_RECORD_LOOKAHEAD_BUNDLE:
+ AR_DEBUG_ASSERT(pRecord->Length >= sizeof(HTC_BUNDLED_LOOKAHEAD_REPORT));
+ if (pRecord->Length >= sizeof(HTC_BUNDLED_LOOKAHEAD_REPORT) &&
+ (pNextLookAheads != NULL)) {
+ HTC_BUNDLED_LOOKAHEAD_REPORT *pBundledLookAheadRpt;
+ int i;
+
+ pBundledLookAheadRpt = (HTC_BUNDLED_LOOKAHEAD_REPORT *)pRecordBuf;
+
+#ifdef ATH_DEBUG_MODULE
+ if (AR_DEBUG_LVL_CHECK(ATH_DEBUG_RECV)) {
+ DebugDumpBytes(pRecordBuf,pRecord->Length,"Bundle LookAhead");
+ }
+#endif
+
+ if ((pRecord->Length / (sizeof(HTC_BUNDLED_LOOKAHEAD_REPORT))) >
+ HTC_HOST_MAX_MSG_PER_BUNDLE) {
+ /* this should never happen, the target restricts the number
+ * of messages per bundle configured by the host */
+ A_ASSERT(FALSE);
+ status = A_EPROTO;
+ break;
+ }
+
+ for (i = 0; i < (int)(pRecord->Length / (sizeof(HTC_BUNDLED_LOOKAHEAD_REPORT))); i++) {
+ ((A_UINT8 *)(&pNextLookAheads[i]))[0] = pBundledLookAheadRpt->LookAhead[0];
+ ((A_UINT8 *)(&pNextLookAheads[i]))[1] = pBundledLookAheadRpt->LookAhead[1];
+ ((A_UINT8 *)(&pNextLookAheads[i]))[2] = pBundledLookAheadRpt->LookAhead[2];
+ ((A_UINT8 *)(&pNextLookAheads[i]))[3] = pBundledLookAheadRpt->LookAhead[3];
+ pBundledLookAheadRpt++;
+ }
+
+ *pNumLookAheads = i;
+ }
+ break;
+ default:
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERR, (" unhandled record: id:%d length:%d \n",
+ pRecord->RecordID, pRecord->Length));
+ break;
+ }
+
+ if (A_FAILED(status)) {
+ break;
+ }
+
+ /* advance buffer past this record for next time around */
+ pBuffer += pRecord->Length;
+ Length -= pRecord->Length;
+ }
+
+#ifdef ATH_DEBUG_MODULE
+ if (A_FAILED(status)) {
+ DebugDumpBytes(pOrigBuffer,origLength,"BAD Recv Trailer");
+ }
+#endif
+
+ AR_DEBUG_PRINTF(ATH_DEBUG_RECV, ("-HTCProcessTrailer \n"));
+ return status;
+
+}
+
+/* process a received message (i.e. strip off header, process any trailer data)
+ * note : locks must be released when this function is called */
+static A_STATUS HTCProcessRecvHeader(HTC_TARGET *target,
+ HTC_PACKET *pPacket,
+ A_UINT32 *pNextLookAheads,
+ int *pNumLookAheads)
+{
+ A_UINT8 temp;
+ A_UINT8 *pBuf;
+ A_STATUS status = A_OK;
+ A_UINT16 payloadLen;
+ A_UINT32 lookAhead;
+
+ pBuf = pPacket->pBuffer;
+
+ if (pNumLookAheads != NULL) {
+ *pNumLookAheads = 0;
+ }
+
+ AR_DEBUG_PRINTF(ATH_DEBUG_RECV, ("+HTCProcessRecvHeader \n"));
+
+ if (AR_DEBUG_LVL_CHECK(ATH_DEBUG_RECV)) {
+ AR_DEBUG_PRINTBUF(pBuf,pPacket->ActualLength,"HTC Recv PKT");
+ }
+
+ do {
+ /* note, we cannot assume the alignment of pBuffer, so we use the safe macros to
+ * retrieve 16 bit fields */
+ payloadLen = A_GET_UINT16_FIELD(pBuf, HTC_FRAME_HDR, PayloadLen);
+
+ ((A_UINT8 *)&lookAhead)[0] = pBuf[0];
+ ((A_UINT8 *)&lookAhead)[1] = pBuf[1];
+ ((A_UINT8 *)&lookAhead)[2] = pBuf[2];
+ ((A_UINT8 *)&lookAhead)[3] = pBuf[3];
+
+ if (pPacket->PktInfo.AsRx.HTCRxFlags & HTC_RX_PKT_REFRESH_HDR) {
+ /* refresh expected hdr, since this was unknown at the time we grabbed the packets
+ * as part of a bundle */
+ pPacket->PktInfo.AsRx.ExpectedHdr = lookAhead;
+ /* refresh actual length since we now have the real header */
+ pPacket->ActualLength = payloadLen + HTC_HDR_LENGTH;
+
+ /* validate the actual header that was refreshed */
+ if (pPacket->ActualLength > pPacket->BufferLength) {
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERR,
+ ("Refreshed HDR payload length (%d) in bundled RECV is invalid (hdr: 0x%X) \n",
+ payloadLen, lookAhead));
+ /* limit this to max buffer just to print out some of the buffer */
+ pPacket->ActualLength = min(pPacket->ActualLength, pPacket->BufferLength);
+ status = A_EPROTO;
+ break;
+ }
+
+ if (pPacket->Endpoint != A_GET_UINT8_FIELD(pBuf, HTC_FRAME_HDR, EndpointID)) {
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERR,
+ ("Refreshed HDR endpoint (%d) does not match expected endpoint (%d) \n",
+ A_GET_UINT8_FIELD(pBuf, HTC_FRAME_HDR, EndpointID), pPacket->Endpoint));
+ status = A_EPROTO;
+ break;
+ }
+ }
+
+ if (lookAhead != pPacket->PktInfo.AsRx.ExpectedHdr) {
+ /* somehow the lookahead that gave us the full read length did not
+ * reflect the actual header in the pending message */
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERR,
+ ("HTCProcessRecvHeader, lookahead mismatch! (pPkt:0x%lX flags:0x%X) \n",
+ (unsigned long)pPacket, pPacket->PktInfo.AsRx.HTCRxFlags));
+#ifdef ATH_DEBUG_MODULE
+ DebugDumpBytes((A_UINT8 *)&pPacket->PktInfo.AsRx.ExpectedHdr,4,"Expected Message LookAhead");
+ DebugDumpBytes(pBuf,sizeof(HTC_FRAME_HDR),"Current Frame Header");
+#ifdef HTC_CAPTURE_LAST_FRAME
+ DebugDumpBytes((A_UINT8 *)&target->LastFrameHdr,sizeof(HTC_FRAME_HDR),"Last Frame Header");
+ if (target->LastTrailerLength != 0) {
+ DebugDumpBytes(target->LastTrailer,
+ target->LastTrailerLength,
+ "Last trailer");
+ }
+#endif
+#endif
+ status = A_EPROTO;
+ break;
+ }
+
+ /* get flags */
+ temp = A_GET_UINT8_FIELD(pBuf, HTC_FRAME_HDR, Flags);
+
+ if (temp & HTC_FLAGS_RECV_TRAILER) {
+ /* this packet has a trailer */
+
+ /* extract the trailer length in control byte 0 */
+ temp = A_GET_UINT8_FIELD(pBuf, HTC_FRAME_HDR, ControlBytes[0]);
+
+ if ((temp < sizeof(HTC_RECORD_HDR)) || (temp > payloadLen)) {
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERR,
+ ("HTCProcessRecvHeader, invalid header (payloadlength should be :%d, CB[0] is:%d) \n",
+ payloadLen, temp));
+ status = A_EPROTO;
+ break;
+ }
+
+ if (pPacket->PktInfo.AsRx.HTCRxFlags & HTC_RX_PKT_IGNORE_LOOKAHEAD) {
+ /* this packet was fetched as part of an HTC bundle, the embedded lookahead is
+ * not valid since the next packet may have already been fetched as part of the
+ * bundle */
+ pNextLookAheads = NULL;
+ pNumLookAheads = NULL;
+ }
+
+ /* process trailer data that follows HDR + application payload */
+ status = HTCProcessTrailer(target,
+ (pBuf + HTC_HDR_LENGTH + payloadLen - temp),
+ temp,
+ pNextLookAheads,
+ pNumLookAheads,
+ pPacket->Endpoint);
+
+ if (A_FAILED(status)) {
+ break;
+ }
+
+#ifdef HTC_CAPTURE_LAST_FRAME
+ A_MEMCPY(target->LastTrailer, (pBuf + HTC_HDR_LENGTH + payloadLen - temp), temp);
+ target->LastTrailerLength = temp;
+#endif
+ /* trim length by trailer bytes */
+ pPacket->ActualLength -= temp;
+ }
+#ifdef HTC_CAPTURE_LAST_FRAME
+ else {
+ target->LastTrailerLength = 0;
+ }
+#endif
+
+ /* if we get to this point, the packet is good */
+ /* remove header and adjust length */
+ pPacket->pBuffer += HTC_HDR_LENGTH;
+ pPacket->ActualLength -= HTC_HDR_LENGTH;
+
+ } while (FALSE);
+
+ if (A_FAILED(status)) {
+ /* dump the whole packet */
+#ifdef ATH_DEBUG_MODULE
+ DebugDumpBytes(pBuf,pPacket->ActualLength < 256 ? pPacket->ActualLength : 256 ,"BAD HTC Recv PKT");
+#endif
+ } else {
+#ifdef HTC_CAPTURE_LAST_FRAME
+ A_MEMCPY(&target->LastFrameHdr,pBuf,sizeof(HTC_FRAME_HDR));
+#endif
+ if (AR_DEBUG_LVL_CHECK(ATH_DEBUG_RECV)) {
+ if (pPacket->ActualLength > 0) {
+ AR_DEBUG_PRINTBUF(pPacket->pBuffer,pPacket->ActualLength,"HTC - Application Msg");
+ }
+ }
+ }
+
+ AR_DEBUG_PRINTF(ATH_DEBUG_RECV, ("-HTCProcessRecvHeader \n"));
+ return status;
+}
+
+static INLINE void HTCAsyncRecvCheckMorePackets(HTC_TARGET *target,
+ A_UINT32 NextLookAheads[],
+ int NumLookAheads,
+ A_BOOL CheckMoreMsgs)
+{
+ /* was there a lookahead for the next packet? */
+ if (NumLookAheads > 0) {
+ A_STATUS nextStatus;
+ int fetched = 0;
+ AR_DEBUG_PRINTF(ATH_DEBUG_RECV,
+ ("HTCAsyncRecvCheckMorePackets - num lookaheads were non-zero : %d \n",
+ NumLookAheads));
+ /* force status re-check */
+ REF_IRQ_STATUS_RECHECK(&target->Device);
+ /* we have more packets, get the next packet fetch started */
+ nextStatus = HTCRecvMessagePendingHandler(target, NextLookAheads, NumLookAheads, NULL, &fetched);
+ if (A_EPROTO == nextStatus) {
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERR,
+ ("Next look ahead from recv header was INVALID\n"));
+#ifdef ATH_DEBUG_MODULE
+ DebugDumpBytes((A_UINT8 *)NextLookAheads,
+ NumLookAheads * (sizeof(A_UINT32)),
+ "BAD lookaheads from lookahead report");
+#endif
+ }
+ if (A_SUCCESS(nextStatus) && !fetched) {
+ /* we could not fetch any more packets due to resources */
+ DevAsyncIrqProcessComplete(&target->Device);
+ }
+ } else {
+ if (CheckMoreMsgs) {
+ AR_DEBUG_PRINTF(ATH_DEBUG_RECV,
+ ("HTCAsyncRecvCheckMorePackets - rechecking for more messages...\n"));
+ /* if we did not get anything on the look-ahead,
+ * call device layer to asynchronously re-check for messages. If we can keep the async
+ * processing going we get better performance. If there is a pending message we will keep processing
+ * messages asynchronously which should pipeline things nicely */
+ DevCheckPendingRecvMsgsAsync(&target->Device);
+ } else {
+ AR_DEBUG_PRINTF(ATH_DEBUG_RECV, ("HTCAsyncRecvCheckMorePackets - no check \n"));
+ }
+ }
+
+
+}
+
+ /* unload the recv completion queue */
+static INLINE void DrainRecvIndicationQueue(HTC_TARGET *target, HTC_ENDPOINT *pEndpoint)
+{
+ HTC_PACKET_QUEUE recvCompletions;
+
+ AR_DEBUG_PRINTF(ATH_DEBUG_RECV, ("+DrainRecvIndicationQueue \n"));
+
+ INIT_HTC_PACKET_QUEUE(&recvCompletions);
+
+ LOCK_HTC_RX(target);
+
+ /* increment rx processing count on entry */
+ pEndpoint->RxProcessCount++;
+ if (pEndpoint->RxProcessCount > 1) {
+ pEndpoint->RxProcessCount--;
+ /* another thread or task is draining the RX completion queue on this endpoint
+ * that thread will reset the rx processing count when the queue is drained */
+ UNLOCK_HTC_RX(target);
+ return;
+ }
+
+ /******* at this point only 1 thread may enter ******/
+
+ while (TRUE) {
+
+ /* transfer items from main recv queue to the local one so we can release the lock */
+ HTC_PACKET_QUEUE_TRANSFER_TO_TAIL(&recvCompletions, &pEndpoint->RecvIndicationQueue);
+
+ if (HTC_QUEUE_EMPTY(&recvCompletions)) {
+ /* all drained */
+ break;
+ }
+
+ /* release lock while we do the recv completions
+ * other threads can now queue more recv completions */
+ UNLOCK_HTC_RX(target);
+
+ AR_DEBUG_PRINTF(ATH_DEBUG_RECV,
+ ("DrainRecvIndicationQueue : completing %d RECV packets \n",
+ HTC_PACKET_QUEUE_DEPTH(&recvCompletions)));
+ /* do completion */
+ DO_RCV_COMPLETION(pEndpoint,&recvCompletions);
+
+ /* re-acquire lock to grab some more completions */
+ LOCK_HTC_RX(target);
+ }
+
+ /* reset count */
+ pEndpoint->RxProcessCount = 0;
+ UNLOCK_HTC_RX(target);
+
+ AR_DEBUG_PRINTF(ATH_DEBUG_RECV, ("-DrainRecvIndicationQueue \n"));
+
+}
+
+ /* optimization for recv packets, we can indicate a "hint" that there are more
+ * single-packets to fetch on this endpoint */
+#define SET_MORE_RX_PACKET_INDICATION_FLAG(L,N,E,P) \
+ if ((N) > 0) { SetRxPacketIndicationFlags((L)[0],(E),(P)); }
+
+ /* for bundled frames, we can force the flag to indicate there are more packets */
+#define FORCE_MORE_RX_PACKET_INDICATION_FLAG(P) \
+ (P)->PktInfo.AsRx.IndicationFlags |= HTC_RX_FLAGS_INDICATE_MORE_PKTS;
+
+ /* note: this function can be called with the RX lock held */
+static INLINE void SetRxPacketIndicationFlags(A_UINT32 LookAhead,
+ HTC_ENDPOINT *pEndpoint,
+ HTC_PACKET *pPacket)
+{
+ HTC_FRAME_HDR *pHdr = (HTC_FRAME_HDR *)&LookAhead;
+ /* check to see if the "next" packet is from the same endpoint of the
+ completing packet */
+ if (pHdr->EndpointID == pPacket->Endpoint) {
+ /* check that there is a buffer available to actually fetch it */
+ if (!HTC_QUEUE_EMPTY(&pEndpoint->RxBuffers)) {
+ /* provide a hint that there are more RX packets to fetch */
+ FORCE_MORE_RX_PACKET_INDICATION_FLAG(pPacket);
+ }
+ }
+}
+
+
+/* asynchronous completion handler for recv packet fetching, when the device layer
+ * completes a read request, it will call this completion handler */
+void HTCRecvCompleteHandler(void *Context, HTC_PACKET *pPacket)
+{
+ HTC_TARGET *target = (HTC_TARGET *)Context;
+ HTC_ENDPOINT *pEndpoint;
+ A_UINT32 nextLookAheads[HTC_HOST_MAX_MSG_PER_BUNDLE];
+ int numLookAheads = 0;
+ A_STATUS status;
+ A_BOOL checkMorePkts = TRUE;
+
+ AR_DEBUG_PRINTF(ATH_DEBUG_RECV, ("+HTCRecvCompleteHandler (pkt:0x%lX, status:%d, ep:%d) \n",
+ (unsigned long)pPacket, pPacket->Status, pPacket->Endpoint));
+
+ A_ASSERT(!IS_DEV_IRQ_PROC_SYNC_MODE(&target->Device));
+ AR_DEBUG_ASSERT(pPacket->Endpoint < ENDPOINT_MAX);
+ pEndpoint = &target->EndPoint[pPacket->Endpoint];
+ pPacket->Completion = NULL;
+
+ /* get completion status */
+ status = pPacket->Status;
+
+ do {
+
+ if (A_FAILED(status)) {
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("HTCRecvCompleteHandler: request failed (status:%d, ep:%d) \n",
+ pPacket->Status, pPacket->Endpoint));
+ break;
+ }
+ /* process the header for any trailer data */
+ status = HTCProcessRecvHeader(target,pPacket,nextLookAheads,&numLookAheads);
+
+ if (A_FAILED(status)) {
+ break;
+ }
+
+ if (pPacket->PktInfo.AsRx.HTCRxFlags & HTC_RX_PKT_IGNORE_LOOKAHEAD) {
+ /* this packet was part of a bundle that had to be broken up.
+ * It was fetched one message at a time. There may be other asynchronous reads queued behind this one.
+ * Do no issue another check for more packets since the last one in the series of requests
+ * will handle it */
+ checkMorePkts = FALSE;
+ }
+
+ DUMP_RECV_PKT_INFO(pPacket);
+ LOCK_HTC_RX(target);
+ SET_MORE_RX_PACKET_INDICATION_FLAG(nextLookAheads,numLookAheads,pEndpoint,pPacket);
+ /* we have a good packet, queue it to the completion queue */
+ HTC_PACKET_ENQUEUE(&pEndpoint->RecvIndicationQueue,pPacket);
+ HTC_RX_STAT_PROFILE(target,pEndpoint,numLookAheads);
+ UNLOCK_HTC_RX(target);
+
+ /* check for more recv packets before indicating */
+ HTCAsyncRecvCheckMorePackets(target,nextLookAheads,numLookAheads,checkMorePkts);
+
+ } while (FALSE);
+
+ if (A_FAILED(status)) {
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERR,
+ ("HTCRecvCompleteHandler , message fetch failed (status = %d) \n",
+ status));
+ /* recycle this packet */
+ HTC_RECYCLE_RX_PKT(target, pPacket, pEndpoint);
+ } else {
+ /* a good packet was queued, drain the queue */
+ DrainRecvIndicationQueue(target,pEndpoint);
+ }
+
+ AR_DEBUG_PRINTF(ATH_DEBUG_RECV, ("-HTCRecvCompleteHandler\n"));
+}
+
+/* synchronously wait for a control message from the target,
+ * This function is used at initialization time ONLY. At init messages
+ * on ENDPOINT 0 are expected. */
+A_STATUS HTCWaitforControlMessage(HTC_TARGET *target, HTC_PACKET **ppControlPacket)
+{
+ A_STATUS status;
+ A_UINT32 lookAhead;
+ HTC_PACKET *pPacket = NULL;
+ HTC_FRAME_HDR *pHdr;
+
+ AR_DEBUG_PRINTF(ATH_DEBUG_RECV,("+HTCWaitforControlMessage \n"));
+
+ do {
+
+ *ppControlPacket = NULL;
+
+ /* call the polling function to see if we have a message */
+ status = DevPollMboxMsgRecv(&target->Device,
+ &lookAhead,
+ HTC_TARGET_RESPONSE_TIMEOUT);
+
+ if (A_FAILED(status)) {
+ break;
+ }
+
+ AR_DEBUG_PRINTF(ATH_DEBUG_RECV,
+ ("HTCWaitforControlMessage : lookAhead : 0x%X \n", lookAhead));
+
+ /* check the lookahead */
+ pHdr = (HTC_FRAME_HDR *)&lookAhead;
+
+ if (pHdr->EndpointID != ENDPOINT_0) {
+ /* unexpected endpoint number, should be zero */
+ AR_DEBUG_ASSERT(FALSE);
+ status = A_EPROTO;
+ break;
+ }
+
+ if (A_FAILED(status)) {
+ /* bad message */
+ AR_DEBUG_ASSERT(FALSE);
+ status = A_EPROTO;
+ break;
+ }
+
+ pPacket = HTC_ALLOC_CONTROL_RX(target);
+
+ if (pPacket == NULL) {
+ AR_DEBUG_ASSERT(FALSE);
+ status = A_NO_MEMORY;
+ break;
+ }
+
+ pPacket->PktInfo.AsRx.HTCRxFlags = 0;
+ pPacket->PktInfo.AsRx.ExpectedHdr = lookAhead;
+ pPacket->ActualLength = pHdr->PayloadLen + HTC_HDR_LENGTH;
+
+ if (pPacket->ActualLength > pPacket->BufferLength) {
+ AR_DEBUG_ASSERT(FALSE);
+ status = A_EPROTO;
+ break;
+ }
+
+ /* we want synchronous operation */
+ pPacket->Completion = NULL;
+
+ /* get the message from the device, this will block */
+ status = HTCIssueRecv(target, pPacket);
+
+ if (A_FAILED(status)) {
+ break;
+ }
+
+ /* process receive header */
+ status = HTCProcessRecvHeader(target,pPacket,NULL,NULL);
+
+ pPacket->Status = status;
+
+ if (A_FAILED(status)) {
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERR,
+ ("HTCWaitforControlMessage, HTCProcessRecvHeader failed (status = %d) \n",
+ status));
+ break;
+ }
+
+ /* give the caller this control message packet, they are responsible to free */
+ *ppControlPacket = pPacket;
+
+ } while (FALSE);
+
+ if (A_FAILED(status)) {
+ if (pPacket != NULL) {
+ /* cleanup buffer on error */
+ HTC_FREE_CONTROL_RX(target,pPacket);
+ }
+ }
+
+ AR_DEBUG_PRINTF(ATH_DEBUG_RECV,("-HTCWaitforControlMessage \n"));
+
+ return status;
+}
+
+static A_STATUS AllocAndPrepareRxPackets(HTC_TARGET *target,
+ A_UINT32 LookAheads[],
+ int Messages,
+ HTC_ENDPOINT *pEndpoint,
+ HTC_PACKET_QUEUE *pQueue)
+{
+ A_STATUS status = A_OK;
+ HTC_PACKET *pPacket;
+ HTC_FRAME_HDR *pHdr;
+ int i,j;
+ int numMessages;
+ int fullLength;
+ A_BOOL noRecycle;
+
+ /* lock RX while we assemble the packet buffers */
+ LOCK_HTC_RX(target);
+
+ for (i = 0; i < Messages; i++) {
+
+ pHdr = (HTC_FRAME_HDR *)&LookAheads[i];
+
+ if (pHdr->EndpointID >= ENDPOINT_MAX) {
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("Invalid Endpoint in look-ahead: %d \n",pHdr->EndpointID));
+ /* invalid endpoint */
+ status = A_EPROTO;
+ break;
+ }
+
+ if (pHdr->EndpointID != pEndpoint->Id) {
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("Invalid Endpoint in look-ahead: %d should be : %d (index:%d)\n",
+ pHdr->EndpointID, pEndpoint->Id, i));
+ /* invalid endpoint */
+ status = A_EPROTO;
+ break;
+ }
+
+ if (pHdr->PayloadLen > HTC_MAX_PAYLOAD_LENGTH) {
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("Payload length %d exceeds max HTC : %d !\n",
+ pHdr->PayloadLen, (A_UINT32)HTC_MAX_PAYLOAD_LENGTH));
+ status = A_EPROTO;
+ break;
+ }
+
+ if (0 == pEndpoint->ServiceID) {
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("Endpoint %d is not connected !\n",pHdr->EndpointID));
+ /* endpoint isn't even connected */
+ status = A_EPROTO;
+ break;
+ }
+
+ if ((pHdr->Flags & HTC_FLAGS_RECV_BUNDLE_CNT_MASK) == 0) {
+ /* HTC header only indicates 1 message to fetch */
+ numMessages = 1;
+ } else {
+ /* HTC header indicates that every packet to follow has the same padded length so that it can
+ * be optimally fetched as a full bundle */
+ numMessages = (pHdr->Flags & HTC_FLAGS_RECV_BUNDLE_CNT_MASK) >> HTC_FLAGS_RECV_BUNDLE_CNT_SHIFT;
+ /* the count doesn't include the starter frame, just a count of frames to follow */
+ numMessages++;
+ A_ASSERT(numMessages <= target->MaxMsgPerBundle);
+ INC_HTC_EP_STAT(pEndpoint, RxBundleIndFromHdr, 1);
+ AR_DEBUG_PRINTF(ATH_DEBUG_RECV,
+ ("HTC header indicates :%d messages can be fetched as a bundle \n",numMessages));
+ }
+
+ fullLength = DEV_CALC_RECV_PADDED_LEN(&target->Device,pHdr->PayloadLen + sizeof(HTC_FRAME_HDR));
+
+ /* get packet buffers for each message, if there was a bundle detected in the header,
+ * use pHdr as a template to fetch all packets in the bundle */
+ for (j = 0; j < numMessages; j++) {
+
+ /* reset flag, any packets allocated using the RecvAlloc() API cannot be recycled on cleanup,
+ * they must be explicitly returned */
+ noRecycle = FALSE;
+
+ if (pEndpoint->EpCallBacks.EpRecvAlloc != NULL) {
+ UNLOCK_HTC_RX(target);
+ noRecycle = TRUE;
+ /* user is using a per-packet allocation callback */
+ pPacket = pEndpoint->EpCallBacks.EpRecvAlloc(pEndpoint->EpCallBacks.pContext,
+ pEndpoint->Id,
+ fullLength);
+ LOCK_HTC_RX(target);
+
+ } else if ((pEndpoint->EpCallBacks.EpRecvAllocThresh != NULL) &&
+ (fullLength > pEndpoint->EpCallBacks.RecvAllocThreshold)) {
+ INC_HTC_EP_STAT(pEndpoint,RxAllocThreshHit,1);
+ INC_HTC_EP_STAT(pEndpoint,RxAllocThreshBytes,pHdr->PayloadLen);
+ /* threshold was hit, call the special recv allocation callback */
+ UNLOCK_HTC_RX(target);
+ noRecycle = TRUE;
+ /* user wants to allocate packets above a certain threshold */
+ pPacket = pEndpoint->EpCallBacks.EpRecvAllocThresh(pEndpoint->EpCallBacks.pContext,
+ pEndpoint->Id,
+ fullLength);
+ LOCK_HTC_RX(target);
+
+ } else {
+ /* user is using a refill handler that can refill multiple HTC buffers */
+
+ /* get a packet from the endpoint recv queue */
+ pPacket = HTC_PACKET_DEQUEUE(&pEndpoint->RxBuffers);
+
+ if (NULL == pPacket) {
+ /* check for refill handler */
+ if (pEndpoint->EpCallBacks.EpRecvRefill != NULL) {
+ UNLOCK_HTC_RX(target);
+ /* call the re-fill handler */
+ pEndpoint->EpCallBacks.EpRecvRefill(pEndpoint->EpCallBacks.pContext,
+ pEndpoint->Id);
+ LOCK_HTC_RX(target);
+ /* check if we have more buffers */
+ pPacket = HTC_PACKET_DEQUEUE(&pEndpoint->RxBuffers);
+ /* fall through */
+ }
+ }
+ }
+
+ if (NULL == pPacket) {
+ /* this is not an error, we simply need to mark that we are waiting for buffers.*/
+ target->RecvStateFlags |= HTC_RECV_WAIT_BUFFERS;
+ target->EpWaitingForBuffers = pEndpoint->Id;
+ status = A_NO_RESOURCE;
+ break;
+ }
+
+ AR_DEBUG_ASSERT(pPacket->Endpoint == pEndpoint->Id);
+ /* clear flags */
+ pPacket->PktInfo.AsRx.HTCRxFlags = 0;
+ pPacket->PktInfo.AsRx.IndicationFlags = 0;
+ pPacket->Status = A_OK;
+
+ if (noRecycle) {
+ /* flag that these packets cannot be recycled, they have to be returned to the
+ * user */
+ pPacket->PktInfo.AsRx.HTCRxFlags |= HTC_RX_PKT_NO_RECYCLE;
+ }
+ /* add packet to queue (also incase we need to cleanup down below) */
+ HTC_PACKET_ENQUEUE(pQueue,pPacket);
+
+ if (HTC_STOPPING(target)) {
+ status = A_ECANCELED;
+ break;
+ }
+
+ /* make sure this message can fit in the endpoint buffer */
+ if ((A_UINT32)fullLength > pPacket->BufferLength) {
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERR,
+ ("Payload Length Error : header reports payload of: %d (%d) endpoint buffer size: %d \n",
+ pHdr->PayloadLen, fullLength, pPacket->BufferLength));
+ status = A_EPROTO;
+ break;
+ }
+
+ if (j > 0) {
+ /* for messages fetched in a bundle the expected lookahead is unknown since we
+ * are only using the lookahead of the first packet as a template of what to
+ * expect for lengths */
+ /* flag that once we get the real HTC header we need to refesh the information */
+ pPacket->PktInfo.AsRx.HTCRxFlags |= HTC_RX_PKT_REFRESH_HDR;
+ /* set it to something invalid */
+ pPacket->PktInfo.AsRx.ExpectedHdr = 0xFFFFFFFF;
+ } else {
+
+ pPacket->PktInfo.AsRx.ExpectedHdr = LookAheads[i]; /* set expected look ahead */
+ }
+ /* set the amount of data to fetch */
+ pPacket->ActualLength = pHdr->PayloadLen + HTC_HDR_LENGTH;
+ }
+
+ if (A_FAILED(status)) {
+ if (A_NO_RESOURCE == status) {
+ /* this is actually okay */
+ status = A_OK;
+ }
+ break;
+ }
+
+ }
+
+ UNLOCK_HTC_RX(target);
+
+ if (A_FAILED(status)) {
+ while (!HTC_QUEUE_EMPTY(pQueue)) {
+ pPacket = HTC_PACKET_DEQUEUE(pQueue);
+ /* recycle all allocated packets */
+ HTC_RECYCLE_RX_PKT(target,pPacket,&target->EndPoint[pPacket->Endpoint]);
+ }
+ }
+
+ return status;
+}
+
+static void HTCAsyncRecvScatterCompletion(HIF_SCATTER_REQ *pScatterReq)
+{
+ int i;
+ HTC_PACKET *pPacket;
+ HTC_ENDPOINT *pEndpoint;
+ A_UINT32 lookAheads[HTC_HOST_MAX_MSG_PER_BUNDLE];
+ int numLookAheads = 0;
+ HTC_TARGET *target = (HTC_TARGET *)pScatterReq->Context;
+ A_STATUS status;
+ A_BOOL partialBundle = FALSE;
+ HTC_PACKET_QUEUE localRecvQueue;
+ A_BOOL procError = FALSE;
+
+ AR_DEBUG_PRINTF(ATH_DEBUG_RECV,("+HTCAsyncRecvScatterCompletion TotLen: %d Entries: %d\n",
+ pScatterReq->TotalLength, pScatterReq->ValidScatterEntries));
+
+ A_ASSERT(!IS_DEV_IRQ_PROC_SYNC_MODE(&target->Device));
+
+ if (A_FAILED(pScatterReq->CompletionStatus)) {
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("** Recv Scatter Request Failed: %d \n",pScatterReq->CompletionStatus));
+ }
+
+ if (pScatterReq->CallerFlags & HTC_SCATTER_REQ_FLAGS_PARTIAL_BUNDLE) {
+ partialBundle = TRUE;
+ }
+
+ DEV_FINISH_SCATTER_OPERATION(pScatterReq);
+
+ INIT_HTC_PACKET_QUEUE(&localRecvQueue);
+
+ pPacket = (HTC_PACKET *)pScatterReq->ScatterList[0].pCallerContexts[0];
+ /* note: all packets in a scatter req are for the same endpoint ! */
+ pEndpoint = &target->EndPoint[pPacket->Endpoint];
+
+ /* walk through the scatter list and process */
+ /* **** NOTE: DO NOT HOLD ANY LOCKS here, HTCProcessRecvHeader can take the TX lock
+ * as it processes credit reports */
+ for (i = 0; i < pScatterReq->ValidScatterEntries; i++) {
+ pPacket = (HTC_PACKET *)pScatterReq->ScatterList[i].pCallerContexts[0];
+ A_ASSERT(pPacket != NULL);
+ /* reset count, we are only interested in the look ahead in the last packet when we
+ * break out of this loop */
+ numLookAheads = 0;
+
+ if (A_SUCCESS(pScatterReq->CompletionStatus)) {
+ /* process header for each of the recv packets */
+ status = HTCProcessRecvHeader(target,pPacket,lookAheads,&numLookAheads);
+ } else {
+ status = A_ERROR;
+ }
+
+ if (A_SUCCESS(status)) {
+#ifdef HTC_EP_STAT_PROFILING
+ LOCK_HTC_RX(target);
+ HTC_RX_STAT_PROFILE(target,pEndpoint,numLookAheads);
+ INC_HTC_EP_STAT(pEndpoint, RxPacketsBundled, 1);
+ UNLOCK_HTC_RX(target);
+#endif
+ if (i == (pScatterReq->ValidScatterEntries - 1)) {
+ /* last packet's more packets flag is set based on the lookahead */
+ SET_MORE_RX_PACKET_INDICATION_FLAG(lookAheads,numLookAheads,pEndpoint,pPacket);
+ } else {
+ /* packets in a bundle automatically have this flag set */
+ FORCE_MORE_RX_PACKET_INDICATION_FLAG(pPacket);
+ }
+
+ DUMP_RECV_PKT_INFO(pPacket);
+ /* since we can't hold a lock in this loop, we insert into our local recv queue for
+ * storage until we can transfer them to the recv completion queue */
+ HTC_PACKET_ENQUEUE(&localRecvQueue,pPacket);
+
+ } else {
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERR,(" Recv packet scatter entry %d failed (out of %d) \n",
+ i, pScatterReq->ValidScatterEntries));
+ /* recycle failed recv */
+ HTC_RECYCLE_RX_PKT(target, pPacket, pEndpoint);
+ /* set flag and continue processing the remaining scatter entries */
+ procError = TRUE;
+ }
+
+ }
+
+ /* free scatter request */
+ DEV_FREE_SCATTER_REQ(&target->Device,pScatterReq);
+
+ LOCK_HTC_RX(target);
+ /* transfer the packets in the local recv queue to the recv completion queue */
+ HTC_PACKET_QUEUE_TRANSFER_TO_TAIL(&pEndpoint->RecvIndicationQueue, &localRecvQueue);
+
+ UNLOCK_HTC_RX(target);
+
+ if (!procError) {
+ /* pipeline the next check (asynchronously) for more packets */
+ HTCAsyncRecvCheckMorePackets(target,
+ lookAheads,
+ numLookAheads,
+ partialBundle ? FALSE : TRUE);
+ }
+
+ /* now drain the indication queue */
+ DrainRecvIndicationQueue(target,pEndpoint);
+
+ AR_DEBUG_PRINTF(ATH_DEBUG_RECV,("-HTCAsyncRecvScatterCompletion \n"));
+}
+
+static A_STATUS HTCIssueRecvPacketBundle(HTC_TARGET *target,
+ HTC_PACKET_QUEUE *pRecvPktQueue,
+ HTC_PACKET_QUEUE *pSyncCompletionQueue,
+ int *pNumPacketsFetched,
+ A_BOOL PartialBundle)
+{
+ A_STATUS status = A_OK;
+ HIF_SCATTER_REQ *pScatterReq;
+ int i, totalLength;
+ int pktsToScatter;
+ HTC_PACKET *pPacket;
+ A_BOOL asyncMode = (pSyncCompletionQueue == NULL) ? TRUE : FALSE;
+ int scatterSpaceRemaining = DEV_GET_MAX_BUNDLE_RECV_LENGTH(&target->Device);
+
+ pktsToScatter = HTC_PACKET_QUEUE_DEPTH(pRecvPktQueue);
+ pktsToScatter = min(pktsToScatter, target->MaxMsgPerBundle);
+
+ if ((HTC_PACKET_QUEUE_DEPTH(pRecvPktQueue) - pktsToScatter) > 0) {
+ /* we were forced to split this bundle receive operation
+ * all packets in this partial bundle must have their lookaheads ignored */
+ PartialBundle = TRUE;
+ /* this would only happen if the target ignored our max bundle limit */
+ AR_DEBUG_PRINTF(ATH_DEBUG_WARN,
+ ("HTCIssueRecvPacketBundle : partial bundle detected num:%d , %d \n",
+ HTC_PACKET_QUEUE_DEPTH(pRecvPktQueue), pktsToScatter));
+ }
+
+ totalLength = 0;
+
+ AR_DEBUG_PRINTF(ATH_DEBUG_RECV,("+HTCIssueRecvPacketBundle (Numpackets: %d , actual : %d) \n",
+ HTC_PACKET_QUEUE_DEPTH(pRecvPktQueue), pktsToScatter));
+
+ do {
+
+ pScatterReq = DEV_ALLOC_SCATTER_REQ(&target->Device);
+
+ if (pScatterReq == NULL) {
+ /* no scatter resources left, just let caller handle it the legacy way */
+ break;
+ }
+
+ pScatterReq->CallerFlags = 0;
+
+ if (PartialBundle) {
+ /* mark that this is a partial bundle, this has special ramifications to the
+ * scatter completion routine */
+ pScatterReq->CallerFlags |= HTC_SCATTER_REQ_FLAGS_PARTIAL_BUNDLE;
+ }
+
+ /* convert HTC packets to scatter list */
+ for (i = 0; i < pktsToScatter; i++) {
+ int paddedLength;
+
+ pPacket = HTC_PACKET_DEQUEUE(pRecvPktQueue);
+ A_ASSERT(pPacket != NULL);
+
+ paddedLength = DEV_CALC_RECV_PADDED_LEN(&target->Device, pPacket->ActualLength);
+
+ if ((scatterSpaceRemaining - paddedLength) < 0) {
+ /* exceeds what we can transfer, put the packet back */
+ HTC_PACKET_ENQUEUE_TO_HEAD(pRecvPktQueue,pPacket);
+ break;
+ }
+
+ scatterSpaceRemaining -= paddedLength;
+
+ if (PartialBundle || (i < (pktsToScatter - 1))) {
+ /* packet 0..n-1 cannot be checked for look-aheads since we are fetching a bundle
+ * the last packet however can have it's lookahead used */
+ pPacket->PktInfo.AsRx.HTCRxFlags |= HTC_RX_PKT_IGNORE_LOOKAHEAD;
+ }
+
+ /* note: 1 HTC packet per scatter entry */
+ /* setup packet into */
+ pScatterReq->ScatterList[i].pBuffer = pPacket->pBuffer;
+ pScatterReq->ScatterList[i].Length = paddedLength;
+
+ pPacket->PktInfo.AsRx.HTCRxFlags |= HTC_RX_PKT_PART_OF_BUNDLE;
+
+ if (asyncMode) {
+ /* save HTC packet for async completion routine */
+ pScatterReq->ScatterList[i].pCallerContexts[0] = pPacket;
+ } else {
+ /* queue to caller's sync completion queue, caller will unload this when we return */
+ HTC_PACKET_ENQUEUE(pSyncCompletionQueue,pPacket);
+ }
+
+ A_ASSERT(pScatterReq->ScatterList[i].Length);
+ totalLength += pScatterReq->ScatterList[i].Length;
+ }
+
+ pScatterReq->TotalLength = totalLength;
+ pScatterReq->ValidScatterEntries = i;
+
+ if (asyncMode) {
+ pScatterReq->CompletionRoutine = HTCAsyncRecvScatterCompletion;
+ pScatterReq->Context = target;
+ }
+
+ status = DevSubmitScatterRequest(&target->Device, pScatterReq, DEV_SCATTER_READ, asyncMode);
+
+ if (A_SUCCESS(status)) {
+ *pNumPacketsFetched = i;
+ }
+
+ if (!asyncMode) {
+ /* free scatter request */
+ DEV_FREE_SCATTER_REQ(&target->Device, pScatterReq);
+ }
+
+ } while (FALSE);
+
+ AR_DEBUG_PRINTF(ATH_DEBUG_RECV,("-HTCIssueRecvPacketBundle (status:%d) (fetched:%d) \n",
+ status,*pNumPacketsFetched));
+
+ return status;
+}
+
+static INLINE void CheckRecvWaterMark(HTC_ENDPOINT *pEndpoint)
+{
+ /* see if endpoint is using a refill watermark
+ * ** no need to use a lock here, since we are only inspecting...
+ * caller may must not hold locks when calling this function */
+ if (pEndpoint->EpCallBacks.RecvRefillWaterMark > 0) {
+ if (HTC_PACKET_QUEUE_DEPTH(&pEndpoint->RxBuffers) < pEndpoint->EpCallBacks.RecvRefillWaterMark) {
+ /* call the re-fill handler before we continue */
+ pEndpoint->EpCallBacks.EpRecvRefill(pEndpoint->EpCallBacks.pContext,
+ pEndpoint->Id);
+ }
+ }
+}
+
+/* callback when device layer or lookahead report parsing detects a pending message */
+A_STATUS HTCRecvMessagePendingHandler(void *Context, A_UINT32 MsgLookAheads[], int NumLookAheads, A_BOOL *pAsyncProc, int *pNumPktsFetched)
+{
+ HTC_TARGET *target = (HTC_TARGET *)Context;
+ A_STATUS status = A_OK;
+ HTC_PACKET *pPacket;
+ HTC_ENDPOINT *pEndpoint;
+ A_BOOL asyncProc = FALSE;
+ A_UINT32 lookAheads[HTC_HOST_MAX_MSG_PER_BUNDLE];
+ int pktsFetched;
+ HTC_PACKET_QUEUE recvPktQueue, syncCompletedPktsQueue;
+ A_BOOL partialBundle;
+ HTC_ENDPOINT_ID id;
+ int totalFetched = 0;
+
+ AR_DEBUG_PRINTF(ATH_DEBUG_RECV,("+HTCRecvMessagePendingHandler NumLookAheads: %d \n",NumLookAheads));
+
+ if (pNumPktsFetched != NULL) {
+ *pNumPktsFetched = 0;
+ }
+
+ if (IS_DEV_IRQ_PROCESSING_ASYNC_ALLOWED(&target->Device)) {
+ /* We use async mode to get the packets if the device layer supports it.
+ * The device layer interfaces with HIF in which HIF may have restrictions on
+ * how interrupts are processed */
+ asyncProc = TRUE;
+ }
+
+ if (pAsyncProc != NULL) {
+ /* indicate to caller how we decided to process this */
+ *pAsyncProc = asyncProc;
+ }
+
+ if (NumLookAheads > HTC_HOST_MAX_MSG_PER_BUNDLE) {
+ A_ASSERT(FALSE);
+ return A_EPROTO;
+ }
+
+ /* on first entry copy the lookaheads into our temp array for processing */
+ A_MEMCPY(lookAheads, MsgLookAheads, (sizeof(A_UINT32)) * NumLookAheads);
+
+ while (TRUE) {
+
+ /* reset packets queues */
+ INIT_HTC_PACKET_QUEUE(&recvPktQueue);
+ INIT_HTC_PACKET_QUEUE(&syncCompletedPktsQueue);
+
+ if (NumLookAheads > HTC_HOST_MAX_MSG_PER_BUNDLE) {
+ status = A_EPROTO;
+ A_ASSERT(FALSE);
+ break;
+ }
+
+ /* first lookahead sets the expected endpoint IDs for all packets in a bundle */
+ id = ((HTC_FRAME_HDR *)&lookAheads[0])->EndpointID;
+ pEndpoint = &target->EndPoint[id];
+
+ if (id >= ENDPOINT_MAX) {
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("MsgPend, Invalid Endpoint in look-ahead: %d \n",id));
+ status = A_EPROTO;
+ break;
+ }
+
+ /* try to allocate as many HTC RX packets indicated by the lookaheads
+ * these packets are stored in the recvPkt queue */
+ status = AllocAndPrepareRxPackets(target,
+ lookAheads,
+ NumLookAheads,
+ pEndpoint,
+ &recvPktQueue);
+ if (A_FAILED(status)) {
+ break;
+ }
+
+ if (HTC_PACKET_QUEUE_DEPTH(&recvPktQueue) >= 2) {
+ /* a recv bundle was detected, force IRQ status re-check again */
+ REF_IRQ_STATUS_RECHECK(&target->Device);
+ }
+
+ totalFetched += HTC_PACKET_QUEUE_DEPTH(&recvPktQueue);
+
+ /* we've got packet buffers for all we can currently fetch,
+ * this count is not valid anymore */
+ NumLookAheads = 0;
+ partialBundle = FALSE;
+
+ /* now go fetch the list of HTC packets */
+ while (!HTC_QUEUE_EMPTY(&recvPktQueue)) {
+
+ pktsFetched = 0;
+
+ if (target->RecvBundlingEnabled && (HTC_PACKET_QUEUE_DEPTH(&recvPktQueue) > 1)) {
+ /* there are enough packets to attempt a bundle transfer and recv bundling is allowed */
+ status = HTCIssueRecvPacketBundle(target,
+ &recvPktQueue,
+ asyncProc ? NULL : &syncCompletedPktsQueue,
+ &pktsFetched,
+ partialBundle);
+ if (A_FAILED(status)) {
+ break;
+ }
+
+ if (HTC_PACKET_QUEUE_DEPTH(&recvPktQueue) != 0) {
+ /* we couldn't fetch all packets at one time, this creates a broken
+ * bundle */
+ partialBundle = TRUE;
+ }
+ }
+
+ /* see if the previous operation fetched any packets using bundling */
+ if (0 == pktsFetched) {
+ /* dequeue one packet */
+ pPacket = HTC_PACKET_DEQUEUE(&recvPktQueue);
+ A_ASSERT(pPacket != NULL);
+
+ if (asyncProc) {
+ /* we use async mode to get the packet if the device layer supports it
+ * set our callback and context */
+ pPacket->Completion = HTCRecvCompleteHandler;
+ pPacket->pContext = target;
+ } else {
+ /* fully synchronous */
+ pPacket->Completion = NULL;
+ }
+
+ if (HTC_PACKET_QUEUE_DEPTH(&recvPktQueue) > 0) {
+ /* lookaheads in all packets except the last one in the bundle must be ignored */
+ pPacket->PktInfo.AsRx.HTCRxFlags |= HTC_RX_PKT_IGNORE_LOOKAHEAD;
+ }
+
+ /* go fetch the packet */
+ status = HTCIssueRecv(target, pPacket);
+ if (A_FAILED(status)) {
+ break;
+ }
+
+ if (!asyncProc) {
+ /* sent synchronously, queue this packet for synchronous completion */
+ HTC_PACKET_ENQUEUE(&syncCompletedPktsQueue,pPacket);
+ }
+
+ }
+
+ }
+
+ if (A_SUCCESS(status)) {
+ CheckRecvWaterMark(pEndpoint);
+ }
+
+ if (asyncProc) {
+ /* we did this asynchronously so we can get out of the loop, the asynch processing
+ * creates a chain of requests to continue processing pending messages in the
+ * context of callbacks */
+ break;
+ }
+
+ /* synchronous handling */
+ if (target->Device.DSRCanYield) {
+ /* for the SYNC case, increment count that tracks when the DSR should yield */
+ target->Device.CurrentDSRRecvCount++;
+ }
+
+ /* in the sync case, all packet buffers are now filled,
+ * we can process each packet, check lookaheads and then repeat */
+
+ /* unload sync completion queue */
+ while (!HTC_QUEUE_EMPTY(&syncCompletedPktsQueue)) {
+ HTC_PACKET_QUEUE container;
+
+ pPacket = HTC_PACKET_DEQUEUE(&syncCompletedPktsQueue);
+ A_ASSERT(pPacket != NULL);
+
+ pEndpoint = &target->EndPoint[pPacket->Endpoint];
+ /* reset count on each iteration, we are only interested in the last packet's lookahead
+ * information when we break out of this loop */
+ NumLookAheads = 0;
+ /* process header for each of the recv packets
+ * note: the lookahead of the last packet is useful for us to continue in this loop */
+ status = HTCProcessRecvHeader(target,pPacket,lookAheads,&NumLookAheads);
+ if (A_FAILED(status)) {
+ break;
+ }
+
+ if (HTC_QUEUE_EMPTY(&syncCompletedPktsQueue)) {
+ /* last packet's more packets flag is set based on the lookahead */
+ SET_MORE_RX_PACKET_INDICATION_FLAG(lookAheads,NumLookAheads,pEndpoint,pPacket);
+ } else {
+ /* packets in a bundle automatically have this flag set */
+ FORCE_MORE_RX_PACKET_INDICATION_FLAG(pPacket);
+ }
+ /* good packet, indicate it */
+ HTC_RX_STAT_PROFILE(target,pEndpoint,NumLookAheads);
+
+ if (pPacket->PktInfo.AsRx.HTCRxFlags & HTC_RX_PKT_PART_OF_BUNDLE) {
+ INC_HTC_EP_STAT(pEndpoint, RxPacketsBundled, 1);
+ }
+
+ INIT_HTC_PACKET_QUEUE_AND_ADD(&container,pPacket);
+ DO_RCV_COMPLETION(pEndpoint,&container);
+ }
+
+ if (A_FAILED(status)) {
+ break;
+ }
+
+ if (NumLookAheads == 0) {
+ /* no more look aheads */
+ break;
+ }
+
+ /* when we process recv synchronously we need to check if we should yield and stop
+ * fetching more packets indicated by the embedded lookaheads */
+ if (target->Device.DSRCanYield) {
+ if (DEV_CHECK_RECV_YIELD(&target->Device)) {
+ /* break out, don't fetch any more packets */
+ break;
+ }
+ }
+
+
+ /* check whether other OS contexts have queued any WMI command/data for WLAN.
+ * This check is needed only if WLAN Tx and Rx happens in same thread context */
+ A_CHECK_DRV_TX();
+
+ /* for SYNCH processing, if we get here, we are running through the loop again due to a detected lookahead.
+ * Set flag that we should re-check IRQ status registers again before leaving IRQ processing,
+ * this can net better performance in high throughput situations */
+ REF_IRQ_STATUS_RECHECK(&target->Device);
+ }
+
+ if (A_FAILED(status)) {
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERR,
+ ("Failed to get pending recv messages (%d) \n",status));
+ /* cleanup any packets we allocated but didn't use to actually fetch any packets */
+ while (!HTC_QUEUE_EMPTY(&recvPktQueue)) {
+ pPacket = HTC_PACKET_DEQUEUE(&recvPktQueue);
+ /* clean up packets */
+ HTC_RECYCLE_RX_PKT(target, pPacket, &target->EndPoint[pPacket->Endpoint]);
+ }
+ /* cleanup any packets in sync completion queue */
+ while (!HTC_QUEUE_EMPTY(&syncCompletedPktsQueue)) {
+ pPacket = HTC_PACKET_DEQUEUE(&syncCompletedPktsQueue);
+ /* clean up packets */
+ HTC_RECYCLE_RX_PKT(target, pPacket, &target->EndPoint[pPacket->Endpoint]);
+ }
+ if (HTC_STOPPING(target)) {
+ AR_DEBUG_PRINTF(ATH_DEBUG_WARN,
+ (" Host is going to stop. blocking receiver for HTCStop.. \n"));
+ DevStopRecv(&target->Device, asyncProc ? DEV_STOP_RECV_ASYNC : DEV_STOP_RECV_SYNC);
+ }
+ }
+ /* before leaving, check to see if host ran out of buffers and needs to stop the
+ * receiver */
+ if (target->RecvStateFlags & HTC_RECV_WAIT_BUFFERS) {
+ AR_DEBUG_PRINTF(ATH_DEBUG_WARN,
+ (" Host has no RX buffers, blocking receiver to prevent overrun.. \n"));
+ /* try to stop receive at the device layer */
+ DevStopRecv(&target->Device, asyncProc ? DEV_STOP_RECV_ASYNC : DEV_STOP_RECV_SYNC);
+ }
+
+ if (pNumPktsFetched != NULL) {
+ *pNumPktsFetched = totalFetched;
+ }
+
+ AR_DEBUG_PRINTF(ATH_DEBUG_RECV,("-HTCRecvMessagePendingHandler \n"));
+
+ return status;
+}
+
+A_STATUS HTCAddReceivePktMultiple(HTC_HANDLE HTCHandle, HTC_PACKET_QUEUE *pPktQueue)
+{
+ HTC_TARGET *target = GET_HTC_TARGET_FROM_HANDLE(HTCHandle);
+ HTC_ENDPOINT *pEndpoint;
+ A_BOOL unblockRecv = FALSE;
+ A_STATUS status = A_OK;
+ HTC_PACKET *pFirstPacket;
+
+ pFirstPacket = HTC_GET_PKT_AT_HEAD(pPktQueue);
+
+ if (NULL == pFirstPacket) {
+ A_ASSERT(FALSE);
+ return A_EINVAL;
+ }
+
+ AR_DEBUG_ASSERT(pFirstPacket->Endpoint < ENDPOINT_MAX);
+
+ AR_DEBUG_PRINTF(ATH_DEBUG_RECV,
+ ("+- HTCAddReceivePktMultiple : endPointId: %d, cnt:%d, length: %d\n",
+ pFirstPacket->Endpoint,
+ HTC_PACKET_QUEUE_DEPTH(pPktQueue),
+ pFirstPacket->BufferLength));
+
+ do {
+
+ pEndpoint = &target->EndPoint[pFirstPacket->Endpoint];
+
+ LOCK_HTC_RX(target);
+
+ if (HTC_STOPPING(target)) {
+ HTC_PACKET *pPacket;
+
+ UNLOCK_HTC_RX(target);
+
+ /* walk through queue and mark each one canceled */
+ HTC_PACKET_QUEUE_ITERATE_ALLOW_REMOVE(pPktQueue,pPacket) {
+ pPacket->Status = A_ECANCELED;
+ } HTC_PACKET_QUEUE_ITERATE_END;
+
+ DO_RCV_COMPLETION(pEndpoint,pPktQueue);
+ break;
+ }
+
+ /* store receive packets */
+ HTC_PACKET_QUEUE_TRANSFER_TO_TAIL(&pEndpoint->RxBuffers, pPktQueue);
+
+ /* check if we are blocked waiting for a new buffer */
+ if (target->RecvStateFlags & HTC_RECV_WAIT_BUFFERS) {
+ if (target->EpWaitingForBuffers == pFirstPacket->Endpoint) {
+ AR_DEBUG_PRINTF(ATH_DEBUG_RECV,(" receiver was blocked on ep:%d, unblocking.. \n",
+ target->EpWaitingForBuffers));
+ target->RecvStateFlags &= ~HTC_RECV_WAIT_BUFFERS;
+ target->EpWaitingForBuffers = ENDPOINT_MAX;
+ unblockRecv = TRUE;
+ }
+ }
+
+ UNLOCK_HTC_RX(target);
+
+ if (unblockRecv && !HTC_STOPPING(target)) {
+ /* TODO : implement a buffer threshold count? */
+ DevEnableRecv(&target->Device,DEV_ENABLE_RECV_SYNC);
+ }
+
+ } while (FALSE);
+
+ return status;
+}
+
+/* Makes a buffer available to the HTC module */
+A_STATUS HTCAddReceivePkt(HTC_HANDLE HTCHandle, HTC_PACKET *pPacket)
+{
+ HTC_PACKET_QUEUE queue;
+ INIT_HTC_PACKET_QUEUE_AND_ADD(&queue,pPacket);
+ return HTCAddReceivePktMultiple(HTCHandle, &queue);
+}
+
+void HTCUnblockRecv(HTC_HANDLE HTCHandle)
+{
+ HTC_TARGET *target = GET_HTC_TARGET_FROM_HANDLE(HTCHandle);
+ A_BOOL unblockRecv = FALSE;
+
+ LOCK_HTC_RX(target);
+
+ /* check if we are blocked waiting for a new buffer */
+ if (target->RecvStateFlags & HTC_RECV_WAIT_BUFFERS) {
+ AR_DEBUG_PRINTF(ATH_DEBUG_RECV,("HTCUnblockRx : receiver was blocked on ep:%d, unblocking.. \n",
+ target->EpWaitingForBuffers));
+ target->RecvStateFlags &= ~HTC_RECV_WAIT_BUFFERS;
+ target->EpWaitingForBuffers = ENDPOINT_MAX;
+ unblockRecv = TRUE;
+ }
+
+ UNLOCK_HTC_RX(target);
+
+ if (unblockRecv && !HTC_STOPPING(target)) {
+ /* re-enable */
+ DevEnableRecv(&target->Device,DEV_ENABLE_RECV_ASYNC);
+ }
+}
+
+static void HTCFlushRxQueue(HTC_TARGET *target, HTC_ENDPOINT *pEndpoint, HTC_PACKET_QUEUE *pQueue)
+{
+ HTC_PACKET *pPacket;
+ HTC_PACKET_QUEUE container;
+
+ LOCK_HTC_RX(target);
+
+ while (1) {
+ pPacket = HTC_PACKET_DEQUEUE(pQueue);
+ if (NULL == pPacket) {
+ break;
+ }
+ UNLOCK_HTC_RX(target);
+ pPacket->Status = A_ECANCELED;
+ pPacket->ActualLength = 0;
+ AR_DEBUG_PRINTF(ATH_DEBUG_RECV, (" Flushing RX packet:0x%lX, length:%d, ep:%d \n",
+ (unsigned long)pPacket, pPacket->BufferLength, pPacket->Endpoint));
+ INIT_HTC_PACKET_QUEUE_AND_ADD(&container,pPacket);
+ /* give the packet back */
+ DO_RCV_COMPLETION(pEndpoint,&container);
+ LOCK_HTC_RX(target);
+ }
+
+ UNLOCK_HTC_RX(target);
+}
+
+static void HTCFlushEndpointRX(HTC_TARGET *target, HTC_ENDPOINT *pEndpoint)
+{
+ /* flush any recv indications not already made */
+ HTCFlushRxQueue(target,pEndpoint,&pEndpoint->RecvIndicationQueue);
+ /* flush any rx buffers */
+ HTCFlushRxQueue(target,pEndpoint,&pEndpoint->RxBuffers);
+}
+
+void HTCFlushRecvBuffers(HTC_TARGET *target)
+{
+ HTC_ENDPOINT *pEndpoint;
+ int i;
+
+ for (i = ENDPOINT_0; i < ENDPOINT_MAX; i++) {
+ pEndpoint = &target->EndPoint[i];
+ if (pEndpoint->ServiceID == 0) {
+ /* not in use.. */
+ continue;
+ }
+ HTCFlushEndpointRX(target,pEndpoint);
+ }
+}
+
+
+void HTCEnableRecv(HTC_HANDLE HTCHandle)
+{
+ HTC_TARGET *target = GET_HTC_TARGET_FROM_HANDLE(HTCHandle);
+
+ if (!HTC_STOPPING(target)) {
+ /* re-enable */
+ DevEnableRecv(&target->Device,DEV_ENABLE_RECV_SYNC);
+ }
+}
+
+void HTCDisableRecv(HTC_HANDLE HTCHandle)
+{
+ HTC_TARGET *target = GET_HTC_TARGET_FROM_HANDLE(HTCHandle);
+
+ if (!HTC_STOPPING(target)) {
+ /* disable */
+ DevStopRecv(&target->Device,DEV_ENABLE_RECV_SYNC);
+ }
+}
+
+int HTCGetNumRecvBuffers(HTC_HANDLE HTCHandle,
+ HTC_ENDPOINT_ID Endpoint)
+{
+ HTC_TARGET *target = GET_HTC_TARGET_FROM_HANDLE(HTCHandle);
+ return HTC_PACKET_QUEUE_DEPTH(&(target->EndPoint[Endpoint].RxBuffers));
+}
+
+A_STATUS HTCWaitForPendingRecv(HTC_HANDLE HTCHandle,
+ A_UINT32 TimeoutInMs,
+ A_BOOL *pbIsRecvPending)
+{
+ A_STATUS status = A_OK;
+ HTC_TARGET *target = GET_HTC_TARGET_FROM_HANDLE(HTCHandle);
+
+ status = DevWaitForPendingRecv(&target->Device,
+ TimeoutInMs,
+ pbIsRecvPending);
+
+ return status;
+}
diff --git a/drivers/staging/ath6kl/htc2/htc_send.c b/drivers/staging/ath6kl/htc2/htc_send.c
new file mode 100644
index 000000000000..bc7ee7848263
--- /dev/null
+++ b/drivers/staging/ath6kl/htc2/htc_send.c
@@ -0,0 +1,1023 @@
+//------------------------------------------------------------------------------
+// <copyright file="htc_send.c" company="Atheros">
+// Copyright (c) 2007-2010 Atheros Corporation. All rights reserved.
+//
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+//
+//
+//------------------------------------------------------------------------------
+//==============================================================================
+// Author(s): ="Atheros"
+//==============================================================================
+#include "htc_internal.h"
+
+typedef enum _HTC_SEND_QUEUE_RESULT {
+ HTC_SEND_QUEUE_OK = 0, /* packet was queued */
+ HTC_SEND_QUEUE_DROP = 1, /* this packet should be dropped */
+} HTC_SEND_QUEUE_RESULT;
+
+#define DO_EP_TX_COMPLETION(ep,q) DoSendCompletion(ep,q)
+
+/* call the distribute credits callback with the distribution */
+#define DO_DISTRIBUTION(t,reason,description,pList) \
+{ \
+ AR_DEBUG_PRINTF(ATH_DEBUG_SEND, \
+ (" calling distribute function (%s) (dfn:0x%lX, ctxt:0x%lX, dist:0x%lX) \n", \
+ (description), \
+ (unsigned long)(t)->DistributeCredits, \
+ (unsigned long)(t)->pCredDistContext, \
+ (unsigned long)pList)); \
+ (t)->DistributeCredits((t)->pCredDistContext, \
+ (pList), \
+ (reason)); \
+}
+
+static void DoSendCompletion(HTC_ENDPOINT *pEndpoint,
+ HTC_PACKET_QUEUE *pQueueToIndicate)
+{
+ do {
+
+ if (HTC_QUEUE_EMPTY(pQueueToIndicate)) {
+ /* nothing to indicate */
+ break;
+ }
+
+ if (pEndpoint->EpCallBacks.EpTxCompleteMultiple != NULL) {
+ AR_DEBUG_PRINTF(ATH_DEBUG_SEND, (" HTC calling ep %d, send complete multiple callback (%d pkts) \n",
+ pEndpoint->Id, HTC_PACKET_QUEUE_DEPTH(pQueueToIndicate)));
+ /* a multiple send complete handler is being used, pass the queue to the handler */
+ pEndpoint->EpCallBacks.EpTxCompleteMultiple(pEndpoint->EpCallBacks.pContext,
+ pQueueToIndicate);
+ /* all packets are now owned by the callback, reset queue to be safe */
+ INIT_HTC_PACKET_QUEUE(pQueueToIndicate);
+ } else {
+ HTC_PACKET *pPacket;
+ /* using legacy EpTxComplete */
+ do {
+ pPacket = HTC_PACKET_DEQUEUE(pQueueToIndicate);
+ AR_DEBUG_PRINTF(ATH_DEBUG_SEND, (" HTC calling ep %d send complete callback on packet 0x%lX \n", \
+ pEndpoint->Id, (unsigned long)(pPacket)));
+ pEndpoint->EpCallBacks.EpTxComplete(pEndpoint->EpCallBacks.pContext, pPacket);
+ } while (!HTC_QUEUE_EMPTY(pQueueToIndicate));
+ }
+
+ } while (FALSE);
+
+}
+
+/* do final completion on sent packet */
+static INLINE void CompleteSentPacket(HTC_TARGET *target, HTC_ENDPOINT *pEndpoint, HTC_PACKET *pPacket)
+{
+ pPacket->Completion = NULL;
+
+ if (A_FAILED(pPacket->Status)) {
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERR,
+ ("CompleteSentPacket: request failed (status:%d, ep:%d, length:%d creds:%d) \n",
+ pPacket->Status, pPacket->Endpoint, pPacket->ActualLength, pPacket->PktInfo.AsTx.CreditsUsed));
+ /* on failure to submit, reclaim credits for this packet */
+ LOCK_HTC_TX(target);
+ pEndpoint->CreditDist.TxCreditsToDist += pPacket->PktInfo.AsTx.CreditsUsed;
+ pEndpoint->CreditDist.TxQueueDepth = HTC_PACKET_QUEUE_DEPTH(&pEndpoint->TxQueue);
+ DO_DISTRIBUTION(target,
+ HTC_CREDIT_DIST_SEND_COMPLETE,
+ "Send Complete",
+ target->EpCreditDistributionListHead->pNext);
+ UNLOCK_HTC_TX(target);
+ }
+ /* first, fixup the head room we allocated */
+ pPacket->pBuffer += HTC_HDR_LENGTH;
+}
+
+/* our internal send packet completion handler when packets are submited to the AR6K device
+ * layer */
+static void HTCSendPktCompletionHandler(void *Context, HTC_PACKET *pPacket)
+{
+ HTC_TARGET *target = (HTC_TARGET *)Context;
+ HTC_ENDPOINT *pEndpoint = &target->EndPoint[pPacket->Endpoint];
+ HTC_PACKET_QUEUE container;
+
+ CompleteSentPacket(target,pEndpoint,pPacket);
+ INIT_HTC_PACKET_QUEUE_AND_ADD(&container,pPacket);
+ /* do completion */
+ DO_EP_TX_COMPLETION(pEndpoint,&container);
+}
+
+A_STATUS HTCIssueSend(HTC_TARGET *target, HTC_PACKET *pPacket)
+{
+ A_STATUS status;
+ A_BOOL sync = FALSE;
+
+ if (pPacket->Completion == NULL) {
+ /* mark that this request was synchronously issued */
+ sync = TRUE;
+ }
+
+ AR_DEBUG_PRINTF(ATH_DEBUG_SEND,
+ ("+-HTCIssueSend: transmit length : %d (%s) \n",
+ pPacket->ActualLength + (A_UINT32)HTC_HDR_LENGTH,
+ sync ? "SYNC" : "ASYNC" ));
+
+ /* send message to device */
+ status = DevSendPacket(&target->Device,
+ pPacket,
+ pPacket->ActualLength + HTC_HDR_LENGTH);
+
+ if (sync) {
+ /* use local sync variable. If this was issued asynchronously, pPacket is no longer
+ * safe to access. */
+ pPacket->pBuffer += HTC_HDR_LENGTH;
+ }
+
+ /* if this request was asynchronous, the packet completion routine will be invoked by
+ * the device layer when the HIF layer completes the request */
+
+ return status;
+}
+
+ /* get HTC send packets from the TX queue on an endpoint */
+static INLINE void GetHTCSendPackets(HTC_TARGET *target,
+ HTC_ENDPOINT *pEndpoint,
+ HTC_PACKET_QUEUE *pQueue)
+{
+ int creditsRequired;
+ int remainder;
+ A_UINT8 sendFlags;
+ HTC_PACKET *pPacket;
+ unsigned int transferLength;
+
+ /****** NOTE : the TX lock is held when this function is called *****************/
+ AR_DEBUG_PRINTF(ATH_DEBUG_SEND,("+GetHTCSendPackets \n"));
+
+ /* loop until we can grab as many packets out of the queue as we can */
+ while (TRUE) {
+
+ sendFlags = 0;
+ /* get packet at head, but don't remove it */
+ pPacket = HTC_GET_PKT_AT_HEAD(&pEndpoint->TxQueue);
+ if (pPacket == NULL) {
+ break;
+ }
+
+ AR_DEBUG_PRINTF(ATH_DEBUG_SEND,(" Got head packet:0x%lX , Queue Depth: %d\n",
+ (unsigned long)pPacket, HTC_PACKET_QUEUE_DEPTH(&pEndpoint->TxQueue)));
+
+ transferLength = DEV_CALC_SEND_PADDED_LEN(&target->Device, pPacket->ActualLength + HTC_HDR_LENGTH);
+
+ if (transferLength <= target->TargetCreditSize) {
+ creditsRequired = 1;
+ } else {
+ /* figure out how many credits this message requires */
+ creditsRequired = transferLength / target->TargetCreditSize;
+ remainder = transferLength % target->TargetCreditSize;
+
+ if (remainder) {
+ creditsRequired++;
+ }
+ }
+
+ AR_DEBUG_PRINTF(ATH_DEBUG_SEND,(" Creds Required:%d Got:%d\n",
+ creditsRequired, pEndpoint->CreditDist.TxCredits));
+
+ if (pEndpoint->CreditDist.TxCredits < creditsRequired) {
+
+ /* not enough credits */
+ if (pPacket->Endpoint == ENDPOINT_0) {
+ /* leave it in the queue */
+ break;
+ }
+ /* invoke the registered distribution function only if this is not
+ * endpoint 0, we let the driver layer provide more credits if it can.
+ * We pass the credit distribution list starting at the endpoint in question
+ * */
+
+ /* set how many credits we need */
+ pEndpoint->CreditDist.TxCreditsSeek =
+ creditsRequired - pEndpoint->CreditDist.TxCredits;
+ DO_DISTRIBUTION(target,
+ HTC_CREDIT_DIST_SEEK_CREDITS,
+ "Seek Credits",
+ &pEndpoint->CreditDist);
+ pEndpoint->CreditDist.TxCreditsSeek = 0;
+
+ if (pEndpoint->CreditDist.TxCredits < creditsRequired) {
+ /* still not enough credits to send, leave packet in the queue */
+ AR_DEBUG_PRINTF(ATH_DEBUG_SEND,
+ (" Not enough credits for ep %d leaving packet in queue..\n",
+ pPacket->Endpoint));
+ break;
+ }
+
+ }
+
+ pEndpoint->CreditDist.TxCredits -= creditsRequired;
+ INC_HTC_EP_STAT(pEndpoint, TxCreditsConsummed, creditsRequired);
+
+ /* check if we need credits back from the target */
+ if (pEndpoint->CreditDist.TxCredits < pEndpoint->CreditDist.TxCreditsPerMaxMsg) {
+ /* we are getting low on credits, see if we can ask for more from the distribution function */
+ pEndpoint->CreditDist.TxCreditsSeek =
+ pEndpoint->CreditDist.TxCreditsPerMaxMsg - pEndpoint->CreditDist.TxCredits;
+
+ DO_DISTRIBUTION(target,
+ HTC_CREDIT_DIST_SEEK_CREDITS,
+ "Seek Credits",
+ &pEndpoint->CreditDist);
+
+ pEndpoint->CreditDist.TxCreditsSeek = 0;
+ /* see if we were successful in getting more */
+ if (pEndpoint->CreditDist.TxCredits < pEndpoint->CreditDist.TxCreditsPerMaxMsg) {
+ /* tell the target we need credits ASAP! */
+ sendFlags |= HTC_FLAGS_NEED_CREDIT_UPDATE;
+ INC_HTC_EP_STAT(pEndpoint, TxCreditLowIndications, 1);
+ AR_DEBUG_PRINTF(ATH_DEBUG_SEND,(" Host Needs Credits \n"));
+ }
+ }
+
+ /* now we can fully dequeue */
+ pPacket = HTC_PACKET_DEQUEUE(&pEndpoint->TxQueue);
+ /* save the number of credits this packet consumed */
+ pPacket->PktInfo.AsTx.CreditsUsed = creditsRequired;
+ /* all TX packets are handled asynchronously */
+ pPacket->Completion = HTCSendPktCompletionHandler;
+ pPacket->pContext = target;
+ INC_HTC_EP_STAT(pEndpoint, TxIssued, 1);
+ /* save send flags */
+ pPacket->PktInfo.AsTx.SendFlags = sendFlags;
+ pPacket->PktInfo.AsTx.SeqNo = pEndpoint->SeqNo;
+ pEndpoint->SeqNo++;
+ /* queue this packet into the caller's queue */
+ HTC_PACKET_ENQUEUE(pQueue,pPacket);
+ }
+
+ AR_DEBUG_PRINTF(ATH_DEBUG_SEND,("-GetHTCSendPackets \n"));
+
+}
+
+static void HTCAsyncSendScatterCompletion(HIF_SCATTER_REQ *pScatterReq)
+{
+ int i;
+ HTC_PACKET *pPacket;
+ HTC_ENDPOINT *pEndpoint = (HTC_ENDPOINT *)pScatterReq->Context;
+ HTC_TARGET *target = (HTC_TARGET *)pEndpoint->target;
+ A_STATUS status = A_OK;
+ HTC_PACKET_QUEUE sendCompletes;
+
+ INIT_HTC_PACKET_QUEUE(&sendCompletes);
+
+ AR_DEBUG_PRINTF(ATH_DEBUG_SEND,("+HTCAsyncSendScatterCompletion TotLen: %d Entries: %d\n",
+ pScatterReq->TotalLength, pScatterReq->ValidScatterEntries));
+
+ DEV_FINISH_SCATTER_OPERATION(pScatterReq);
+
+ if (A_FAILED(pScatterReq->CompletionStatus)) {
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("** Send Scatter Request Failed: %d \n",pScatterReq->CompletionStatus));
+ status = A_ERROR;
+ }
+
+ /* walk through the scatter list and process */
+ for (i = 0; i < pScatterReq->ValidScatterEntries; i++) {
+ pPacket = (HTC_PACKET *)(pScatterReq->ScatterList[i].pCallerContexts[0]);
+ A_ASSERT(pPacket != NULL);
+ pPacket->Status = status;
+ CompleteSentPacket(target,pEndpoint,pPacket);
+ /* add it to the completion queue */
+ HTC_PACKET_ENQUEUE(&sendCompletes, pPacket);
+ }
+
+ /* free scatter request */
+ DEV_FREE_SCATTER_REQ(&target->Device,pScatterReq);
+ /* complete all packets */
+ DO_EP_TX_COMPLETION(pEndpoint,&sendCompletes);
+
+ AR_DEBUG_PRINTF(ATH_DEBUG_SEND,("-HTCAsyncSendScatterCompletion \n"));
+}
+
+ /* drain a queue and send as bundles
+ * this function may return without fully draining the queue under the following conditions :
+ * - scatter resources are exhausted
+ * - a message that will consume a partial credit will stop the bundling process early
+ * - we drop below the minimum number of messages for a bundle
+ * */
+static void HTCIssueSendBundle(HTC_ENDPOINT *pEndpoint,
+ HTC_PACKET_QUEUE *pQueue,
+ int *pBundlesSent,
+ int *pTotalBundlesPkts)
+{
+ int pktsToScatter;
+ unsigned int scatterSpaceRemaining;
+ HIF_SCATTER_REQ *pScatterReq = NULL;
+ int i, packetsInScatterReq;
+ unsigned int transferLength;
+ HTC_PACKET *pPacket;
+ A_BOOL done = FALSE;
+ int bundlesSent = 0;
+ int totalPktsInBundle = 0;
+ HTC_TARGET *target = pEndpoint->target;
+ int creditRemainder = 0;
+ int creditPad;
+
+ AR_DEBUG_PRINTF(ATH_DEBUG_SEND,("+HTCIssueSendBundle \n"));
+
+ while (!done) {
+
+ pktsToScatter = HTC_PACKET_QUEUE_DEPTH(pQueue);
+ pktsToScatter = min(pktsToScatter, target->MaxMsgPerBundle);
+
+ if (pktsToScatter < HTC_MIN_HTC_MSGS_TO_BUNDLE) {
+ /* not enough to bundle */
+ break;
+ }
+
+ pScatterReq = DEV_ALLOC_SCATTER_REQ(&target->Device);
+
+ if (pScatterReq == NULL) {
+ /* no scatter resources */
+ AR_DEBUG_PRINTF(ATH_DEBUG_SEND,(" No more scatter resources \n"));
+ break;
+ }
+
+ AR_DEBUG_PRINTF(ATH_DEBUG_SEND,(" pkts to scatter: %d \n", pktsToScatter));
+
+ pScatterReq->TotalLength = 0;
+ pScatterReq->ValidScatterEntries = 0;
+
+ packetsInScatterReq = 0;
+ scatterSpaceRemaining = DEV_GET_MAX_BUNDLE_SEND_LENGTH(&target->Device);
+
+ for (i = 0; i < pktsToScatter; i++) {
+
+ pScatterReq->ScatterList[i].pCallerContexts[0] = NULL;
+
+ pPacket = HTC_GET_PKT_AT_HEAD(pQueue);
+ if (pPacket == NULL) {
+ A_ASSERT(FALSE);
+ break;
+ }
+
+ creditPad = 0;
+ transferLength = DEV_CALC_SEND_PADDED_LEN(&target->Device,
+ pPacket->ActualLength + HTC_HDR_LENGTH);
+ /* see if the padded transfer length falls on a credit boundary */
+ creditRemainder = transferLength % target->TargetCreditSize;
+
+ if (creditRemainder != 0) {
+ /* the transfer consumes a "partial" credit, this packet cannot be bundled unless
+ * we add additional "dummy" padding (max 255 bytes) to consume the entire credit
+ *** NOTE: only allow the send padding if the endpoint is allowed to */
+ if (pEndpoint->LocalConnectionFlags & HTC_LOCAL_CONN_FLAGS_ENABLE_SEND_BUNDLE_PADDING) {
+ if (transferLength < target->TargetCreditSize) {
+ /* special case where the transfer is less than a credit */
+ creditPad = target->TargetCreditSize - transferLength;
+ } else {
+ creditPad = creditRemainder;
+ }
+
+ /* now check to see if we can indicate padding in the HTC header */
+ if ((creditPad > 0) && (creditPad <= 255)) {
+ /* adjust the transferlength of this packet with the new credit padding */
+ transferLength += creditPad;
+ } else {
+ /* the amount to pad is too large, bail on this packet, we have to
+ * send it using the non-bundled method */
+ pPacket = NULL;
+ }
+ } else {
+ /* bail on this packet, user does not want padding applied */
+ pPacket = NULL;
+ }
+ }
+
+ if (NULL == pPacket) {
+ /* can't bundle */
+ done = TRUE;
+ break;
+ }
+
+ if (scatterSpaceRemaining < transferLength) {
+ /* exceeds what we can transfer */
+ break;
+ }
+
+ scatterSpaceRemaining -= transferLength;
+ /* now remove it from the queue */
+ pPacket = HTC_PACKET_DEQUEUE(pQueue);
+ /* save it in the scatter list */
+ pScatterReq->ScatterList[i].pCallerContexts[0] = pPacket;
+ /* prepare packet and flag message as part of a send bundle */
+ HTC_PREPARE_SEND_PKT(pPacket,
+ pPacket->PktInfo.AsTx.SendFlags | HTC_FLAGS_SEND_BUNDLE,
+ creditPad,
+ pPacket->PktInfo.AsTx.SeqNo);
+ pScatterReq->ScatterList[i].pBuffer = pPacket->pBuffer;
+ pScatterReq->ScatterList[i].Length = transferLength;
+ A_ASSERT(transferLength);
+ pScatterReq->TotalLength += transferLength;
+ pScatterReq->ValidScatterEntries++;
+ packetsInScatterReq++;
+ AR_DEBUG_PRINTF(ATH_DEBUG_SEND,(" %d, Adding packet : 0x%lX, len:%d (remaining space:%d) \n",
+ i, (unsigned long)pPacket,transferLength,scatterSpaceRemaining));
+ }
+
+ if (packetsInScatterReq >= HTC_MIN_HTC_MSGS_TO_BUNDLE) {
+ /* send path is always asynchronous */
+ pScatterReq->CompletionRoutine = HTCAsyncSendScatterCompletion;
+ pScatterReq->Context = pEndpoint;
+ bundlesSent++;
+ totalPktsInBundle += packetsInScatterReq;
+ packetsInScatterReq = 0;
+ AR_DEBUG_PRINTF(ATH_DEBUG_SEND,(" Send Scatter total bytes: %d , entries: %d\n",
+ pScatterReq->TotalLength,pScatterReq->ValidScatterEntries));
+ DevSubmitScatterRequest(&target->Device, pScatterReq, DEV_SCATTER_WRITE, DEV_SCATTER_ASYNC);
+ /* we don't own this anymore */
+ pScatterReq = NULL;
+ /* try to send some more */
+ continue;
+ }
+
+ /* not enough packets to use the scatter request, cleanup */
+ if (pScatterReq != NULL) {
+ if (packetsInScatterReq > 0) {
+ /* work backwards to requeue requests */
+ for (i = (packetsInScatterReq - 1); i >= 0; i--) {
+ pPacket = (HTC_PACKET *)(pScatterReq->ScatterList[i].pCallerContexts[0]);
+ if (pPacket != NULL) {
+ /* undo any prep */
+ HTC_UNPREPARE_SEND_PKT(pPacket);
+ /* queue back to the head */
+ HTC_PACKET_ENQUEUE_TO_HEAD(pQueue,pPacket);
+ }
+ }
+ }
+ DEV_FREE_SCATTER_REQ(&target->Device,pScatterReq);
+ }
+
+ /* if we get here, we sent all that we could, get out */
+ break;
+
+ }
+
+ *pBundlesSent = bundlesSent;
+ *pTotalBundlesPkts = totalPktsInBundle;
+ AR_DEBUG_PRINTF(ATH_DEBUG_SEND,("-HTCIssueSendBundle (sent:%d) \n",bundlesSent));
+
+ return;
+}
+
+/*
+ * if there are no credits, the packet(s) remains in the queue.
+ * this function returns the result of the attempt to send a queue of HTC packets */
+static HTC_SEND_QUEUE_RESULT HTCTrySend(HTC_TARGET *target,
+ HTC_ENDPOINT *pEndpoint,
+ HTC_PACKET_QUEUE *pCallersSendQueue)
+{
+ HTC_PACKET_QUEUE sendQueue; /* temp queue to hold packets at various stages */
+ HTC_PACKET *pPacket;
+ int bundlesSent;
+ int pktsInBundles;
+ int overflow;
+ HTC_SEND_QUEUE_RESULT result = HTC_SEND_QUEUE_OK;
+
+ AR_DEBUG_PRINTF(ATH_DEBUG_SEND,("+HTCTrySend (Queue:0x%lX Depth:%d)\n",
+ (unsigned long)pCallersSendQueue,
+ (pCallersSendQueue == NULL) ? 0 : HTC_PACKET_QUEUE_DEPTH(pCallersSendQueue)));
+
+ /* init the local send queue */
+ INIT_HTC_PACKET_QUEUE(&sendQueue);
+
+ do {
+
+ if (NULL == pCallersSendQueue) {
+ /* caller didn't provide a queue, just wants us to check queues and send */
+ break;
+ }
+
+ if (HTC_QUEUE_EMPTY(pCallersSendQueue)) {
+ /* empty queue */
+ result = HTC_SEND_QUEUE_DROP;
+ break;
+ }
+
+ if (HTC_PACKET_QUEUE_DEPTH(&pEndpoint->TxQueue) >= pEndpoint->MaxTxQueueDepth) {
+ /* we've already overflowed */
+ overflow = HTC_PACKET_QUEUE_DEPTH(pCallersSendQueue);
+ } else {
+ /* figure out how much we will overflow by */
+ overflow = HTC_PACKET_QUEUE_DEPTH(&pEndpoint->TxQueue);
+ overflow += HTC_PACKET_QUEUE_DEPTH(pCallersSendQueue);
+ /* figure out how much we will overflow the TX queue by */
+ overflow -= pEndpoint->MaxTxQueueDepth;
+ }
+
+ /* if overflow is negative or zero, we are okay */
+ if (overflow > 0) {
+ AR_DEBUG_PRINTF(ATH_DEBUG_SEND,
+ (" Endpoint %d, TX queue will overflow :%d , Tx Depth:%d, Max:%d \n",
+ pEndpoint->Id, overflow, HTC_PACKET_QUEUE_DEPTH(&pEndpoint->TxQueue), pEndpoint->MaxTxQueueDepth));
+ }
+ if ((overflow <= 0) || (pEndpoint->EpCallBacks.EpSendFull == NULL)) {
+ /* all packets will fit or caller did not provide send full indication handler
+ * -- just move all of them to the local sendQueue object */
+ HTC_PACKET_QUEUE_TRANSFER_TO_TAIL(&sendQueue, pCallersSendQueue);
+ } else {
+ int i;
+ int goodPkts = HTC_PACKET_QUEUE_DEPTH(pCallersSendQueue) - overflow;
+
+ A_ASSERT(goodPkts >= 0);
+ /* we have overflowed, and a callback is provided */
+ /* dequeue all non-overflow packets into the sendqueue */
+ for (i = 0; i < goodPkts; i++) {
+ /* pop off caller's queue*/
+ pPacket = HTC_PACKET_DEQUEUE(pCallersSendQueue);
+ A_ASSERT(pPacket != NULL);
+ /* insert into local queue */
+ HTC_PACKET_ENQUEUE(&sendQueue,pPacket);
+ }
+
+ /* the caller's queue has all the packets that won't fit*/
+ /* walk through the caller's queue and indicate each one to the send full handler */
+ ITERATE_OVER_LIST_ALLOW_REMOVE(&pCallersSendQueue->QueueHead, pPacket, HTC_PACKET, ListLink) {
+
+ AR_DEBUG_PRINTF(ATH_DEBUG_SEND, (" Indicating overflowed TX packet: 0x%lX \n",
+ (unsigned long)pPacket));
+ if (pEndpoint->EpCallBacks.EpSendFull(pEndpoint->EpCallBacks.pContext,
+ pPacket) == HTC_SEND_FULL_DROP) {
+ /* callback wants the packet dropped */
+ INC_HTC_EP_STAT(pEndpoint, TxDropped, 1);
+ /* leave this one in the caller's queue for cleanup */
+ } else {
+ /* callback wants to keep this packet, remove from caller's queue */
+ HTC_PACKET_REMOVE(pCallersSendQueue, pPacket);
+ /* put it in the send queue */
+ HTC_PACKET_ENQUEUE(&sendQueue,pPacket);
+ }
+
+ } ITERATE_END;
+
+ if (HTC_QUEUE_EMPTY(&sendQueue)) {
+ /* no packets made it in, caller will cleanup */
+ result = HTC_SEND_QUEUE_DROP;
+ break;
+ }
+ }
+
+ } while (FALSE);
+
+ if (result != HTC_SEND_QUEUE_OK) {
+ AR_DEBUG_PRINTF(ATH_DEBUG_SEND,("-HTCTrySend: \n"));
+ return result;
+ }
+
+ LOCK_HTC_TX(target);
+
+ if (!HTC_QUEUE_EMPTY(&sendQueue)) {
+ /* transfer packets */
+ HTC_PACKET_QUEUE_TRANSFER_TO_TAIL(&pEndpoint->TxQueue,&sendQueue);
+ A_ASSERT(HTC_QUEUE_EMPTY(&sendQueue));
+ INIT_HTC_PACKET_QUEUE(&sendQueue);
+ }
+
+ /* increment tx processing count on entry */
+ pEndpoint->TxProcessCount++;
+ if (pEndpoint->TxProcessCount > 1) {
+ /* another thread or task is draining the TX queues on this endpoint
+ * that thread will reset the tx processing count when the queue is drained */
+ pEndpoint->TxProcessCount--;
+ UNLOCK_HTC_TX(target);
+ AR_DEBUG_PRINTF(ATH_DEBUG_SEND,("-HTCTrySend (busy) \n"));
+ return HTC_SEND_QUEUE_OK;
+ }
+
+ /***** beyond this point only 1 thread may enter ******/
+
+ /* now drain the endpoint TX queue for transmission as long as we have enough
+ * credits */
+ while (TRUE) {
+
+ if (HTC_PACKET_QUEUE_DEPTH(&pEndpoint->TxQueue) == 0) {
+ break;
+ }
+
+ /* get all the packets for this endpoint that we can for this pass */
+ GetHTCSendPackets(target, pEndpoint, &sendQueue);
+
+ if (HTC_PACKET_QUEUE_DEPTH(&sendQueue) == 0) {
+ /* didn't get any packets due to a lack of credits */
+ break;
+ }
+
+ UNLOCK_HTC_TX(target);
+
+ /* any packets to send are now in our local send queue */
+
+ bundlesSent = 0;
+ pktsInBundles = 0;
+
+ while (TRUE) {
+
+ /* try to send a bundle on each pass */
+ if ((target->SendBundlingEnabled) &&
+ (HTC_PACKET_QUEUE_DEPTH(&sendQueue) >= HTC_MIN_HTC_MSGS_TO_BUNDLE)) {
+ int temp1,temp2;
+ /* bundling is enabled and there is at least a minimum number of packets in the send queue
+ * send what we can in this pass */
+ HTCIssueSendBundle(pEndpoint, &sendQueue, &temp1, &temp2);
+ bundlesSent += temp1;
+ pktsInBundles += temp2;
+ }
+
+ /* if not bundling or there was a packet that could not be placed in a bundle, pull it out
+ * and send it the normal way */
+ pPacket = HTC_PACKET_DEQUEUE(&sendQueue);
+ if (NULL == pPacket) {
+ /* local queue is fully drained */
+ break;
+ }
+ HTC_PREPARE_SEND_PKT(pPacket,
+ pPacket->PktInfo.AsTx.SendFlags,
+ 0,
+ pPacket->PktInfo.AsTx.SeqNo);
+ HTCIssueSend(target, pPacket);
+
+ /* go back and see if we can bundle some more */
+ }
+
+ LOCK_HTC_TX(target);
+
+ INC_HTC_EP_STAT(pEndpoint, TxBundles, bundlesSent);
+ INC_HTC_EP_STAT(pEndpoint, TxPacketsBundled, pktsInBundles);
+
+ }
+
+ /* done with this endpoint, we can clear the count */
+ pEndpoint->TxProcessCount = 0;
+ UNLOCK_HTC_TX(target);
+
+ AR_DEBUG_PRINTF(ATH_DEBUG_SEND,("-HTCTrySend: \n"));
+
+ return HTC_SEND_QUEUE_OK;
+}
+
+A_STATUS HTCSendPktsMultiple(HTC_HANDLE HTCHandle, HTC_PACKET_QUEUE *pPktQueue)
+{
+ HTC_TARGET *target = GET_HTC_TARGET_FROM_HANDLE(HTCHandle);
+ HTC_ENDPOINT *pEndpoint;
+ HTC_PACKET *pPacket;
+
+ AR_DEBUG_PRINTF(ATH_DEBUG_SEND, ("+HTCSendPktsMultiple: Queue: 0x%lX, Pkts %d \n",
+ (unsigned long)pPktQueue, HTC_PACKET_QUEUE_DEPTH(pPktQueue)));
+
+ /* get packet at head to figure out which endpoint these packets will go into */
+ pPacket = HTC_GET_PKT_AT_HEAD(pPktQueue);
+ if (NULL == pPacket) {
+ AR_DEBUG_PRINTF(ATH_DEBUG_SEND, ("-HTCSendPktsMultiple \n"));
+ return A_EINVAL;
+ }
+
+ AR_DEBUG_ASSERT(pPacket->Endpoint < ENDPOINT_MAX);
+ pEndpoint = &target->EndPoint[pPacket->Endpoint];
+
+ HTCTrySend(target, pEndpoint, pPktQueue);
+
+ /* do completion on any packets that couldn't get in */
+ if (!HTC_QUEUE_EMPTY(pPktQueue)) {
+
+ HTC_PACKET_QUEUE_ITERATE_ALLOW_REMOVE(pPktQueue,pPacket) {
+ if (HTC_STOPPING(target)) {
+ pPacket->Status = A_ECANCELED;
+ } else {
+ pPacket->Status = A_NO_RESOURCE;
+ }
+ } HTC_PACKET_QUEUE_ITERATE_END;
+
+ DO_EP_TX_COMPLETION(pEndpoint,pPktQueue);
+ }
+
+ AR_DEBUG_PRINTF(ATH_DEBUG_SEND, ("-HTCSendPktsMultiple \n"));
+
+ return A_OK;
+}
+
+/* HTC API - HTCSendPkt */
+A_STATUS HTCSendPkt(HTC_HANDLE HTCHandle, HTC_PACKET *pPacket)
+{
+ HTC_PACKET_QUEUE queue;
+
+ AR_DEBUG_PRINTF(ATH_DEBUG_SEND,
+ ("+-HTCSendPkt: Enter endPointId: %d, buffer: 0x%lX, length: %d \n",
+ pPacket->Endpoint, (unsigned long)pPacket->pBuffer, pPacket->ActualLength));
+ INIT_HTC_PACKET_QUEUE_AND_ADD(&queue,pPacket);
+ return HTCSendPktsMultiple(HTCHandle, &queue);
+}
+
+/* check TX queues to drain because of credit distribution update */
+static INLINE void HTCCheckEndpointTxQueues(HTC_TARGET *target)
+{
+ HTC_ENDPOINT *pEndpoint;
+ HTC_ENDPOINT_CREDIT_DIST *pDistItem;
+
+ AR_DEBUG_PRINTF(ATH_DEBUG_SEND, ("+HTCCheckEndpointTxQueues \n"));
+ pDistItem = target->EpCreditDistributionListHead;
+
+ /* run through the credit distribution list to see
+ * if there are packets queued
+ * NOTE: no locks need to be taken since the distribution list
+ * is not dynamic (cannot be re-ordered) and we are not modifying any state */
+ while (pDistItem != NULL) {
+ pEndpoint = (HTC_ENDPOINT *)pDistItem->pHTCReserved;
+
+ if (HTC_PACKET_QUEUE_DEPTH(&pEndpoint->TxQueue) > 0) {
+ AR_DEBUG_PRINTF(ATH_DEBUG_SEND, (" Ep %d has %d credits and %d Packets in TX Queue \n",
+ pDistItem->Endpoint, pEndpoint->CreditDist.TxCredits, HTC_PACKET_QUEUE_DEPTH(&pEndpoint->TxQueue)));
+ /* try to start the stalled queue, this list is ordered by priority.
+ * Highest priority queue get's processed first, if there are credits available the
+ * highest priority queue will get a chance to reclaim credits from lower priority
+ * ones */
+ HTCTrySend(target, pEndpoint, NULL);
+ }
+
+ pDistItem = pDistItem->pNext;
+ }
+
+ AR_DEBUG_PRINTF(ATH_DEBUG_SEND, ("-HTCCheckEndpointTxQueues \n"));
+}
+
+/* process credit reports and call distribution function */
+void HTCProcessCreditRpt(HTC_TARGET *target, HTC_CREDIT_REPORT *pRpt, int NumEntries, HTC_ENDPOINT_ID FromEndpoint)
+{
+ int i;
+ HTC_ENDPOINT *pEndpoint;
+ int totalCredits = 0;
+ A_BOOL doDist = FALSE;
+
+ AR_DEBUG_PRINTF(ATH_DEBUG_SEND, ("+HTCProcessCreditRpt, Credit Report Entries:%d \n", NumEntries));
+
+ /* lock out TX while we update credits */
+ LOCK_HTC_TX(target);
+
+ for (i = 0; i < NumEntries; i++, pRpt++) {
+ if (pRpt->EndpointID >= ENDPOINT_MAX) {
+ AR_DEBUG_ASSERT(FALSE);
+ break;
+ }
+
+ pEndpoint = &target->EndPoint[pRpt->EndpointID];
+
+ AR_DEBUG_PRINTF(ATH_DEBUG_SEND, (" Endpoint %d got %d credits \n",
+ pRpt->EndpointID, pRpt->Credits));
+
+
+#ifdef HTC_EP_STAT_PROFILING
+
+ INC_HTC_EP_STAT(pEndpoint, TxCreditRpts, 1);
+ INC_HTC_EP_STAT(pEndpoint, TxCreditsReturned, pRpt->Credits);
+
+ if (FromEndpoint == pRpt->EndpointID) {
+ /* this credit report arrived on the same endpoint indicating it arrived in an RX
+ * packet */
+ INC_HTC_EP_STAT(pEndpoint, TxCreditsFromRx, pRpt->Credits);
+ INC_HTC_EP_STAT(pEndpoint, TxCreditRptsFromRx, 1);
+ } else if (FromEndpoint == ENDPOINT_0) {
+ /* this credit arrived on endpoint 0 as a NULL message */
+ INC_HTC_EP_STAT(pEndpoint, TxCreditsFromEp0, pRpt->Credits);
+ INC_HTC_EP_STAT(pEndpoint, TxCreditRptsFromEp0, 1);
+ } else {
+ /* arrived on another endpoint */
+ INC_HTC_EP_STAT(pEndpoint, TxCreditsFromOther, pRpt->Credits);
+ INC_HTC_EP_STAT(pEndpoint, TxCreditRptsFromOther, 1);
+ }
+
+#endif
+
+ if (ENDPOINT_0 == pRpt->EndpointID) {
+ /* always give endpoint 0 credits back */
+ pEndpoint->CreditDist.TxCredits += pRpt->Credits;
+ } else {
+ /* for all other endpoints, update credits to distribute, the distribution function
+ * will handle giving out credits back to the endpoints */
+ pEndpoint->CreditDist.TxCreditsToDist += pRpt->Credits;
+ /* flag that we have to do the distribution */
+ doDist = TRUE;
+ }
+
+ /* refresh tx depth for distribution function that will recover these credits
+ * NOTE: this is only valid when there are credits to recover! */
+ pEndpoint->CreditDist.TxQueueDepth = HTC_PACKET_QUEUE_DEPTH(&pEndpoint->TxQueue);
+
+ totalCredits += pRpt->Credits;
+ }
+
+ AR_DEBUG_PRINTF(ATH_DEBUG_SEND, (" Report indicated %d credits to distribute \n", totalCredits));
+
+ if (doDist) {
+ /* this was a credit return based on a completed send operations
+ * note, this is done with the lock held */
+ DO_DISTRIBUTION(target,
+ HTC_CREDIT_DIST_SEND_COMPLETE,
+ "Send Complete",
+ target->EpCreditDistributionListHead->pNext);
+ }
+
+ UNLOCK_HTC_TX(target);
+
+ if (totalCredits) {
+ HTCCheckEndpointTxQueues(target);
+ }
+
+ AR_DEBUG_PRINTF(ATH_DEBUG_SEND, ("-HTCProcessCreditRpt \n"));
+}
+
+/* flush endpoint TX queue */
+static void HTCFlushEndpointTX(HTC_TARGET *target, HTC_ENDPOINT *pEndpoint, HTC_TX_TAG Tag)
+{
+ HTC_PACKET *pPacket;
+ HTC_PACKET_QUEUE discardQueue;
+ HTC_PACKET_QUEUE container;
+
+ /* initialize the discard queue */
+ INIT_HTC_PACKET_QUEUE(&discardQueue);
+
+ LOCK_HTC_TX(target);
+
+ /* interate from the front of the TX queue and flush out packets */
+ ITERATE_OVER_LIST_ALLOW_REMOVE(&pEndpoint->TxQueue.QueueHead, pPacket, HTC_PACKET, ListLink) {
+
+ /* check for removal */
+ if ((HTC_TX_PACKET_TAG_ALL == Tag) || (Tag == pPacket->PktInfo.AsTx.Tag)) {
+ /* remove from queue */
+ HTC_PACKET_REMOVE(&pEndpoint->TxQueue, pPacket);
+ /* add it to the discard pile */
+ HTC_PACKET_ENQUEUE(&discardQueue, pPacket);
+ }
+
+ } ITERATE_END;
+
+ UNLOCK_HTC_TX(target);
+
+ /* empty the discard queue */
+ while (1) {
+ pPacket = HTC_PACKET_DEQUEUE(&discardQueue);
+ if (NULL == pPacket) {
+ break;
+ }
+ pPacket->Status = A_ECANCELED;
+ AR_DEBUG_PRINTF(ATH_DEBUG_TRC, (" Flushing TX packet:0x%lX, length:%d, ep:%d tag:0x%X \n",
+ (unsigned long)pPacket, pPacket->ActualLength, pPacket->Endpoint, pPacket->PktInfo.AsTx.Tag));
+ INIT_HTC_PACKET_QUEUE_AND_ADD(&container,pPacket);
+ DO_EP_TX_COMPLETION(pEndpoint,&container);
+ }
+
+}
+
+void DumpCreditDist(HTC_ENDPOINT_CREDIT_DIST *pEPDist)
+{
+ AR_DEBUG_PRINTF(ATH_DEBUG_ANY, ("--- EP : %d ServiceID: 0x%X --------------\n",
+ pEPDist->Endpoint, pEPDist->ServiceID));
+ AR_DEBUG_PRINTF(ATH_DEBUG_ANY, (" this:0x%lX next:0x%lX prev:0x%lX\n",
+ (unsigned long)pEPDist, (unsigned long)pEPDist->pNext, (unsigned long)pEPDist->pPrev));
+ AR_DEBUG_PRINTF(ATH_DEBUG_ANY, (" DistFlags : 0x%X \n", pEPDist->DistFlags));
+ AR_DEBUG_PRINTF(ATH_DEBUG_ANY, (" TxCreditsNorm : %d \n", pEPDist->TxCreditsNorm));
+ AR_DEBUG_PRINTF(ATH_DEBUG_ANY, (" TxCreditsMin : %d \n", pEPDist->TxCreditsMin));
+ AR_DEBUG_PRINTF(ATH_DEBUG_ANY, (" TxCredits : %d \n", pEPDist->TxCredits));
+ AR_DEBUG_PRINTF(ATH_DEBUG_ANY, (" TxCreditsAssigned : %d \n", pEPDist->TxCreditsAssigned));
+ AR_DEBUG_PRINTF(ATH_DEBUG_ANY, (" TxCreditsSeek : %d \n", pEPDist->TxCreditsSeek));
+ AR_DEBUG_PRINTF(ATH_DEBUG_ANY, (" TxCreditSize : %d \n", pEPDist->TxCreditSize));
+ AR_DEBUG_PRINTF(ATH_DEBUG_ANY, (" TxCreditsPerMaxMsg : %d \n", pEPDist->TxCreditsPerMaxMsg));
+ AR_DEBUG_PRINTF(ATH_DEBUG_ANY, (" TxCreditsToDist : %d \n", pEPDist->TxCreditsToDist));
+ AR_DEBUG_PRINTF(ATH_DEBUG_ANY, (" TxQueueDepth : %d \n",
+ HTC_PACKET_QUEUE_DEPTH(&((HTC_ENDPOINT *)pEPDist->pHTCReserved)->TxQueue)));
+ AR_DEBUG_PRINTF(ATH_DEBUG_ANY, ("----------------------------------------------------\n"));
+}
+
+void DumpCreditDistStates(HTC_TARGET *target)
+{
+ HTC_ENDPOINT_CREDIT_DIST *pEPList = target->EpCreditDistributionListHead;
+
+ while (pEPList != NULL) {
+ DumpCreditDist(pEPList);
+ pEPList = pEPList->pNext;
+ }
+
+ if (target->DistributeCredits != NULL) {
+ DO_DISTRIBUTION(target,
+ HTC_DUMP_CREDIT_STATE,
+ "Dump State",
+ NULL);
+ }
+}
+
+/* flush all send packets from all endpoint queues */
+void HTCFlushSendPkts(HTC_TARGET *target)
+{
+ HTC_ENDPOINT *pEndpoint;
+ int i;
+
+ if (AR_DEBUG_LVL_CHECK(ATH_DEBUG_TRC)) {
+ DumpCreditDistStates(target);
+ }
+
+ for (i = ENDPOINT_0; i < ENDPOINT_MAX; i++) {
+ pEndpoint = &target->EndPoint[i];
+ if (pEndpoint->ServiceID == 0) {
+ /* not in use.. */
+ continue;
+ }
+ HTCFlushEndpointTX(target,pEndpoint,HTC_TX_PACKET_TAG_ALL);
+ }
+
+
+}
+
+/* HTC API to flush an endpoint's TX queue*/
+void HTCFlushEndpoint(HTC_HANDLE HTCHandle, HTC_ENDPOINT_ID Endpoint, HTC_TX_TAG Tag)
+{
+ HTC_TARGET *target = GET_HTC_TARGET_FROM_HANDLE(HTCHandle);
+ HTC_ENDPOINT *pEndpoint = &target->EndPoint[Endpoint];
+
+ if (pEndpoint->ServiceID == 0) {
+ AR_DEBUG_ASSERT(FALSE);
+ /* not in use.. */
+ return;
+ }
+
+ HTCFlushEndpointTX(target, pEndpoint, Tag);
+}
+
+/* HTC API to indicate activity to the credit distribution function */
+void HTCIndicateActivityChange(HTC_HANDLE HTCHandle,
+ HTC_ENDPOINT_ID Endpoint,
+ A_BOOL Active)
+{
+ HTC_TARGET *target = GET_HTC_TARGET_FROM_HANDLE(HTCHandle);
+ HTC_ENDPOINT *pEndpoint = &target->EndPoint[Endpoint];
+ A_BOOL doDist = FALSE;
+
+ if (pEndpoint->ServiceID == 0) {
+ AR_DEBUG_ASSERT(FALSE);
+ /* not in use.. */
+ return;
+ }
+
+ LOCK_HTC_TX(target);
+
+ if (Active) {
+ if (!(pEndpoint->CreditDist.DistFlags & HTC_EP_ACTIVE)) {
+ /* mark active now */
+ pEndpoint->CreditDist.DistFlags |= HTC_EP_ACTIVE;
+ doDist = TRUE;
+ }
+ } else {
+ if (pEndpoint->CreditDist.DistFlags & HTC_EP_ACTIVE) {
+ /* mark inactive now */
+ pEndpoint->CreditDist.DistFlags &= ~HTC_EP_ACTIVE;
+ doDist = TRUE;
+ }
+ }
+
+ if (doDist) {
+ /* indicate current Tx Queue depth to the credit distribution function */
+ pEndpoint->CreditDist.TxQueueDepth = HTC_PACKET_QUEUE_DEPTH(&pEndpoint->TxQueue);
+ /* do distribution again based on activity change
+ * note, this is done with the lock held */
+ DO_DISTRIBUTION(target,
+ HTC_CREDIT_DIST_ACTIVITY_CHANGE,
+ "Activity Change",
+ target->EpCreditDistributionListHead->pNext);
+ }
+
+ UNLOCK_HTC_TX(target);
+
+ if (doDist && !Active) {
+ /* if a stream went inactive and this resulted in a credit distribution change,
+ * some credits may now be available for HTC packets that are stuck in
+ * HTC queues */
+ HTCCheckEndpointTxQueues(target);
+ }
+}
+
+A_BOOL HTCIsEndpointActive(HTC_HANDLE HTCHandle,
+ HTC_ENDPOINT_ID Endpoint)
+{
+ HTC_TARGET *target = GET_HTC_TARGET_FROM_HANDLE(HTCHandle);
+ HTC_ENDPOINT *pEndpoint = &target->EndPoint[Endpoint];
+
+ if (pEndpoint->ServiceID == 0) {
+ return FALSE;
+ }
+
+ if (pEndpoint->CreditDist.DistFlags & HTC_EP_ACTIVE) {
+ return TRUE;
+ }
+
+ return FALSE;
+}
diff --git a/drivers/staging/ath6kl/htc2/htc_services.c b/drivers/staging/ath6kl/htc2/htc_services.c
new file mode 100644
index 000000000000..64fddc0ee376
--- /dev/null
+++ b/drivers/staging/ath6kl/htc2/htc_services.c
@@ -0,0 +1,450 @@
+//------------------------------------------------------------------------------
+// <copyright file="htc_services.c" company="Atheros">
+// Copyright (c) 2007-2010 Atheros Corporation. All rights reserved.
+//
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+//
+//
+//------------------------------------------------------------------------------
+//==============================================================================
+// Author(s): ="Atheros"
+//==============================================================================
+#include "htc_internal.h"
+
+void HTCControlTxComplete(void *Context, HTC_PACKET *pPacket)
+{
+ /* not implemented
+ * we do not send control TX frames during normal runtime, only during setup */
+ AR_DEBUG_ASSERT(FALSE);
+}
+
+ /* callback when a control message arrives on this endpoint */
+void HTCControlRecv(void *Context, HTC_PACKET *pPacket)
+{
+ AR_DEBUG_ASSERT(pPacket->Endpoint == ENDPOINT_0);
+
+ if (pPacket->Status == A_ECANCELED) {
+ /* this is a flush operation, return the control packet back to the pool */
+ HTC_FREE_CONTROL_RX((HTC_TARGET*)Context,pPacket);
+ return;
+ }
+
+ /* the only control messages we are expecting are NULL messages (credit resports) */
+ if (pPacket->ActualLength > 0) {
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERR,
+ ("HTCControlRecv, got message with length:%d \n",
+ pPacket->ActualLength + (A_UINT32)HTC_HDR_LENGTH));
+
+#ifdef ATH_DEBUG_MODULE
+ /* dump header and message */
+ DebugDumpBytes(pPacket->pBuffer - HTC_HDR_LENGTH,
+ pPacket->ActualLength + HTC_HDR_LENGTH,
+ "Unexpected ENDPOINT 0 Message");
+#endif
+ }
+
+ HTC_RECYCLE_RX_PKT((HTC_TARGET*)Context,pPacket,&((HTC_TARGET*)Context)->EndPoint[0]);
+}
+
+A_STATUS HTCSendSetupComplete(HTC_TARGET *target)
+{
+ HTC_PACKET *pSendPacket = NULL;
+ A_STATUS status;
+
+ do {
+ /* allocate a packet to send to the target */
+ pSendPacket = HTC_ALLOC_CONTROL_TX(target);
+
+ if (NULL == pSendPacket) {
+ status = A_NO_MEMORY;
+ break;
+ }
+
+ if (target->HTCTargetVersion >= HTC_VERSION_2P1) {
+ HTC_SETUP_COMPLETE_EX_MSG *pSetupCompleteEx;
+ A_UINT32 setupFlags = 0;
+
+ pSetupCompleteEx = (HTC_SETUP_COMPLETE_EX_MSG *)pSendPacket->pBuffer;
+ A_MEMZERO(pSetupCompleteEx, sizeof(HTC_SETUP_COMPLETE_EX_MSG));
+ pSetupCompleteEx->MessageID = HTC_MSG_SETUP_COMPLETE_EX_ID;
+ if (target->MaxMsgPerBundle > 0) {
+ /* host can do HTC bundling, indicate this to the target */
+ setupFlags |= HTC_SETUP_COMPLETE_FLAGS_ENABLE_BUNDLE_RECV;
+ pSetupCompleteEx->MaxMsgsPerBundledRecv = target->MaxMsgPerBundle;
+ }
+ A_MEMCPY(&pSetupCompleteEx->SetupFlags, &setupFlags, sizeof(pSetupCompleteEx->SetupFlags));
+ SET_HTC_PACKET_INFO_TX(pSendPacket,
+ NULL,
+ (A_UINT8 *)pSetupCompleteEx,
+ sizeof(HTC_SETUP_COMPLETE_EX_MSG),
+ ENDPOINT_0,
+ HTC_SERVICE_TX_PACKET_TAG);
+
+ } else {
+ HTC_SETUP_COMPLETE_MSG *pSetupComplete;
+ /* assemble setup complete message */
+ pSetupComplete = (HTC_SETUP_COMPLETE_MSG *)pSendPacket->pBuffer;
+ A_MEMZERO(pSetupComplete, sizeof(HTC_SETUP_COMPLETE_MSG));
+ pSetupComplete->MessageID = HTC_MSG_SETUP_COMPLETE_ID;
+ SET_HTC_PACKET_INFO_TX(pSendPacket,
+ NULL,
+ (A_UINT8 *)pSetupComplete,
+ sizeof(HTC_SETUP_COMPLETE_MSG),
+ ENDPOINT_0,
+ HTC_SERVICE_TX_PACKET_TAG);
+ }
+
+ /* we want synchronous operation */
+ pSendPacket->Completion = NULL;
+ HTC_PREPARE_SEND_PKT(pSendPacket,0,0,0);
+ /* send the message */
+ status = HTCIssueSend(target,pSendPacket);
+
+ } while (FALSE);
+
+ if (pSendPacket != NULL) {
+ HTC_FREE_CONTROL_TX(target,pSendPacket);
+ }
+
+ return status;
+}
+
+
+A_STATUS HTCConnectService(HTC_HANDLE HTCHandle,
+ HTC_SERVICE_CONNECT_REQ *pConnectReq,
+ HTC_SERVICE_CONNECT_RESP *pConnectResp)
+{
+ HTC_TARGET *target = GET_HTC_TARGET_FROM_HANDLE(HTCHandle);
+ A_STATUS status = A_OK;
+ HTC_PACKET *pRecvPacket = NULL;
+ HTC_PACKET *pSendPacket = NULL;
+ HTC_CONNECT_SERVICE_RESPONSE_MSG *pResponseMsg;
+ HTC_CONNECT_SERVICE_MSG *pConnectMsg;
+ HTC_ENDPOINT_ID assignedEndpoint = ENDPOINT_MAX;
+ HTC_ENDPOINT *pEndpoint;
+ unsigned int maxMsgSize = 0;
+
+ AR_DEBUG_PRINTF(ATH_DEBUG_TRC, ("+HTCConnectService, target:0x%lX SvcID:0x%X \n",
+ (unsigned long)target, pConnectReq->ServiceID));
+
+ do {
+
+ AR_DEBUG_ASSERT(pConnectReq->ServiceID != 0);
+
+ if (HTC_CTRL_RSVD_SVC == pConnectReq->ServiceID) {
+ /* special case for pseudo control service */
+ assignedEndpoint = ENDPOINT_0;
+ maxMsgSize = HTC_MAX_CONTROL_MESSAGE_LENGTH;
+ } else {
+ /* allocate a packet to send to the target */
+ pSendPacket = HTC_ALLOC_CONTROL_TX(target);
+
+ if (NULL == pSendPacket) {
+ AR_DEBUG_ASSERT(FALSE);
+ status = A_NO_MEMORY;
+ break;
+ }
+ /* assemble connect service message */
+ pConnectMsg = (HTC_CONNECT_SERVICE_MSG *)pSendPacket->pBuffer;
+ AR_DEBUG_ASSERT(pConnectMsg != NULL);
+ A_MEMZERO(pConnectMsg,sizeof(HTC_CONNECT_SERVICE_MSG));
+ pConnectMsg->MessageID = HTC_MSG_CONNECT_SERVICE_ID;
+ pConnectMsg->ServiceID = pConnectReq->ServiceID;
+ pConnectMsg->ConnectionFlags = pConnectReq->ConnectionFlags;
+ /* check caller if it wants to transfer meta data */
+ if ((pConnectReq->pMetaData != NULL) &&
+ (pConnectReq->MetaDataLength <= HTC_SERVICE_META_DATA_MAX_LENGTH)) {
+ /* copy meta data into message buffer (after header ) */
+ A_MEMCPY((A_UINT8 *)pConnectMsg + sizeof(HTC_CONNECT_SERVICE_MSG),
+ pConnectReq->pMetaData,
+ pConnectReq->MetaDataLength);
+ pConnectMsg->ServiceMetaLength = pConnectReq->MetaDataLength;
+ }
+
+ SET_HTC_PACKET_INFO_TX(pSendPacket,
+ NULL,
+ (A_UINT8 *)pConnectMsg,
+ sizeof(HTC_CONNECT_SERVICE_MSG) + pConnectMsg->ServiceMetaLength,
+ ENDPOINT_0,
+ HTC_SERVICE_TX_PACKET_TAG);
+
+ /* we want synchronous operation */
+ pSendPacket->Completion = NULL;
+ HTC_PREPARE_SEND_PKT(pSendPacket,0,0,0);
+ status = HTCIssueSend(target,pSendPacket);
+
+ if (A_FAILED(status)) {
+ break;
+ }
+
+ /* wait for response */
+ status = HTCWaitforControlMessage(target, &pRecvPacket);
+
+ if (A_FAILED(status)) {
+ break;
+ }
+ /* we controlled the buffer creation so it has to be properly aligned */
+ pResponseMsg = (HTC_CONNECT_SERVICE_RESPONSE_MSG *)pRecvPacket->pBuffer;
+
+ if ((pResponseMsg->MessageID != HTC_MSG_CONNECT_SERVICE_RESPONSE_ID) ||
+ (pRecvPacket->ActualLength < sizeof(HTC_CONNECT_SERVICE_RESPONSE_MSG))) {
+ /* this message is not valid */
+ AR_DEBUG_ASSERT(FALSE);
+ status = A_EPROTO;
+ break;
+ }
+
+ pConnectResp->ConnectRespCode = pResponseMsg->Status;
+ /* check response status */
+ if (pResponseMsg->Status != HTC_SERVICE_SUCCESS) {
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERR,
+ (" Target failed service 0x%X connect request (status:%d)\n",
+ pResponseMsg->ServiceID, pResponseMsg->Status));
+ status = A_EPROTO;
+ break;
+ }
+
+ assignedEndpoint = (HTC_ENDPOINT_ID) pResponseMsg->EndpointID;
+ maxMsgSize = pResponseMsg->MaxMsgSize;
+
+ if ((pConnectResp->pMetaData != NULL) &&
+ (pResponseMsg->ServiceMetaLength > 0) &&
+ (pResponseMsg->ServiceMetaLength <= HTC_SERVICE_META_DATA_MAX_LENGTH)) {
+ /* caller supplied a buffer and the target responded with data */
+ int copyLength = min((int)pConnectResp->BufferLength, (int)pResponseMsg->ServiceMetaLength);
+ /* copy the meta data */
+ A_MEMCPY(pConnectResp->pMetaData,
+ ((A_UINT8 *)pResponseMsg) + sizeof(HTC_CONNECT_SERVICE_RESPONSE_MSG),
+ copyLength);
+ pConnectResp->ActualLength = copyLength;
+ }
+
+ }
+
+ /* the rest of these are parameter checks so set the error status */
+ status = A_EPROTO;
+
+ if (assignedEndpoint >= ENDPOINT_MAX) {
+ AR_DEBUG_ASSERT(FALSE);
+ break;
+ }
+
+ if (0 == maxMsgSize) {
+ AR_DEBUG_ASSERT(FALSE);
+ break;
+ }
+
+ pEndpoint = &target->EndPoint[assignedEndpoint];
+ pEndpoint->Id = assignedEndpoint;
+ if (pEndpoint->ServiceID != 0) {
+ /* endpoint already in use! */
+ AR_DEBUG_ASSERT(FALSE);
+ break;
+ }
+
+ /* return assigned endpoint to caller */
+ pConnectResp->Endpoint = assignedEndpoint;
+ pConnectResp->MaxMsgLength = maxMsgSize;
+
+ /* setup the endpoint */
+ pEndpoint->ServiceID = pConnectReq->ServiceID; /* this marks the endpoint in use */
+ pEndpoint->MaxTxQueueDepth = pConnectReq->MaxSendQueueDepth;
+ pEndpoint->MaxMsgLength = maxMsgSize;
+ /* copy all the callbacks */
+ pEndpoint->EpCallBacks = pConnectReq->EpCallbacks;
+ /* set the credit distribution info for this endpoint, this information is
+ * passed back to the credit distribution callback function */
+ pEndpoint->CreditDist.ServiceID = pConnectReq->ServiceID;
+ pEndpoint->CreditDist.pHTCReserved = pEndpoint;
+ pEndpoint->CreditDist.Endpoint = assignedEndpoint;
+ pEndpoint->CreditDist.TxCreditSize = target->TargetCreditSize;
+
+ if (pConnectReq->MaxSendMsgSize != 0) {
+ /* override TxCreditsPerMaxMsg calculation, this optimizes the credit-low indications
+ * since the host will actually issue smaller messages in the Send path */
+ if (pConnectReq->MaxSendMsgSize > maxMsgSize) {
+ /* can't be larger than the maximum the target can support */
+ AR_DEBUG_ASSERT(FALSE);
+ break;
+ }
+ pEndpoint->CreditDist.TxCreditsPerMaxMsg = pConnectReq->MaxSendMsgSize / target->TargetCreditSize;
+ } else {
+ pEndpoint->CreditDist.TxCreditsPerMaxMsg = maxMsgSize / target->TargetCreditSize;
+ }
+
+ if (0 == pEndpoint->CreditDist.TxCreditsPerMaxMsg) {
+ pEndpoint->CreditDist.TxCreditsPerMaxMsg = 1;
+ }
+
+ /* save local connection flags */
+ pEndpoint->LocalConnectionFlags = pConnectReq->LocalConnectionFlags;
+
+ status = A_OK;
+
+ } while (FALSE);
+
+ if (pSendPacket != NULL) {
+ HTC_FREE_CONTROL_TX(target,pSendPacket);
+ }
+
+ if (pRecvPacket != NULL) {
+ HTC_FREE_CONTROL_RX(target,pRecvPacket);
+ }
+
+ AR_DEBUG_PRINTF(ATH_DEBUG_TRC, ("-HTCConnectService \n"));
+
+ return status;
+}
+
+static void AddToEndpointDistList(HTC_TARGET *target, HTC_ENDPOINT_CREDIT_DIST *pEpDist)
+{
+ HTC_ENDPOINT_CREDIT_DIST *pCurEntry,*pLastEntry;
+
+ if (NULL == target->EpCreditDistributionListHead) {
+ target->EpCreditDistributionListHead = pEpDist;
+ pEpDist->pNext = NULL;
+ pEpDist->pPrev = NULL;
+ return;
+ }
+
+ /* queue to the end of the list, this does not have to be very
+ * fast since this list is built at startup time */
+ pCurEntry = target->EpCreditDistributionListHead;
+
+ while (pCurEntry) {
+ pLastEntry = pCurEntry;
+ pCurEntry = pCurEntry->pNext;
+ }
+
+ pLastEntry->pNext = pEpDist;
+ pEpDist->pPrev = pLastEntry;
+ pEpDist->pNext = NULL;
+}
+
+
+
+/* default credit init callback */
+static void HTCDefaultCreditInit(void *Context,
+ HTC_ENDPOINT_CREDIT_DIST *pEPList,
+ int TotalCredits)
+{
+ HTC_ENDPOINT_CREDIT_DIST *pCurEpDist;
+ int totalEps = 0;
+ int creditsPerEndpoint;
+
+ pCurEpDist = pEPList;
+ /* first run through the list and figure out how many endpoints we are dealing with */
+ while (pCurEpDist != NULL) {
+ pCurEpDist = pCurEpDist->pNext;
+ totalEps++;
+ }
+
+ /* even distribution */
+ creditsPerEndpoint = TotalCredits/totalEps;
+
+ pCurEpDist = pEPList;
+ /* run through the list and set minimum and normal credits and
+ * provide the endpoint with some credits to start */
+ while (pCurEpDist != NULL) {
+
+ if (creditsPerEndpoint < pCurEpDist->TxCreditsPerMaxMsg) {
+ /* too many endpoints and not enough credits */
+ AR_DEBUG_ASSERT(FALSE);
+ break;
+ }
+ /* our minimum is set for at least 1 max message */
+ pCurEpDist->TxCreditsMin = pCurEpDist->TxCreditsPerMaxMsg;
+ /* this value is ignored by our credit alg, since we do
+ * not dynamically adjust credits, this is the policy of
+ * the "default" credit distribution, something simple and easy */
+ pCurEpDist->TxCreditsNorm = 0xFFFF;
+ /* give the endpoint minimum credits */
+ pCurEpDist->TxCredits = creditsPerEndpoint;
+ pCurEpDist->TxCreditsAssigned = creditsPerEndpoint;
+ pCurEpDist = pCurEpDist->pNext;
+ }
+
+}
+
+/* default credit distribution callback, NOTE, this callback holds the TX lock */
+void HTCDefaultCreditDist(void *Context,
+ HTC_ENDPOINT_CREDIT_DIST *pEPDistList,
+ HTC_CREDIT_DIST_REASON Reason)
+{
+ HTC_ENDPOINT_CREDIT_DIST *pCurEpDist;
+
+ if (Reason == HTC_CREDIT_DIST_SEND_COMPLETE) {
+ pCurEpDist = pEPDistList;
+ /* simple distribution */
+ while (pCurEpDist != NULL) {
+ if (pCurEpDist->TxCreditsToDist > 0) {
+ /* just give the endpoint back the credits */
+ pCurEpDist->TxCredits += pCurEpDist->TxCreditsToDist;
+ pCurEpDist->TxCreditsToDist = 0;
+ }
+ pCurEpDist = pCurEpDist->pNext;
+ }
+ }
+
+ /* note we do not need to handle the other reason codes as this is a very
+ * simple distribution scheme, no need to seek for more credits or handle inactivity */
+}
+
+void HTCSetCreditDistribution(HTC_HANDLE HTCHandle,
+ void *pCreditDistContext,
+ HTC_CREDIT_DIST_CALLBACK CreditDistFunc,
+ HTC_CREDIT_INIT_CALLBACK CreditInitFunc,
+ HTC_SERVICE_ID ServicePriorityOrder[],
+ int ListLength)
+{
+ HTC_TARGET *target = GET_HTC_TARGET_FROM_HANDLE(HTCHandle);
+ int i;
+ int ep;
+
+ if (CreditInitFunc != NULL) {
+ /* caller has supplied their own distribution functions */
+ target->InitCredits = CreditInitFunc;
+ AR_DEBUG_ASSERT(CreditDistFunc != NULL);
+ target->DistributeCredits = CreditDistFunc;
+ target->pCredDistContext = pCreditDistContext;
+ } else {
+ /* caller wants HTC to do distribution */
+ /* if caller wants service to handle distributions then
+ * it must set both of these to NULL! */
+ AR_DEBUG_ASSERT(CreditDistFunc == NULL);
+ target->InitCredits = HTCDefaultCreditInit;
+ target->DistributeCredits = HTCDefaultCreditDist;
+ target->pCredDistContext = target;
+ }
+
+ /* always add HTC control endpoint first, we only expose the list after the
+ * first one, this is added for TX queue checking */
+ AddToEndpointDistList(target, &target->EndPoint[ENDPOINT_0].CreditDist);
+
+ /* build the list of credit distribution structures in priority order
+ * supplied by the caller, these will follow endpoint 0 */
+ for (i = 0; i < ListLength; i++) {
+ /* match services with endpoints and add the endpoints to the distribution list
+ * in FIFO order */
+ for (ep = ENDPOINT_1; ep < ENDPOINT_MAX; ep++) {
+ if (target->EndPoint[ep].ServiceID == ServicePriorityOrder[i]) {
+ /* queue this one to the list */
+ AddToEndpointDistList(target, &target->EndPoint[ep].CreditDist);
+ break;
+ }
+ }
+ AR_DEBUG_ASSERT(ep < ENDPOINT_MAX);
+ }
+
+}
diff --git a/drivers/staging/ath6kl/include/a_config.h b/drivers/staging/ath6kl/include/a_config.h
new file mode 100644
index 000000000000..4a0083c65113
--- /dev/null
+++ b/drivers/staging/ath6kl/include/a_config.h
@@ -0,0 +1,53 @@
+//------------------------------------------------------------------------------
+// <copyright file="a_config.h" company="Atheros">
+// Copyright (c) 2004-2010 Atheros Corporation. All rights reserved.
+//
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+//
+//
+//------------------------------------------------------------------------------
+//==============================================================================
+// This file contains software configuration options that enables
+// specific software "features"
+//
+// Author(s): ="Atheros"
+//==============================================================================
+#ifndef _A_CONFIG_H_
+#define _A_CONFIG_H_
+
+#ifdef UNDER_NWIFI
+#include "../os/windows/include/config.h"
+#endif
+
+#ifdef ATHR_CE_LEGACY
+#include "../os/windows/include/config.h"
+#endif
+
+#if defined(__linux__) && !defined(LINUX_EMULATION)
+#include "../os/linux/include/config_linux.h"
+#endif
+
+#ifdef REXOS
+#include "../os/rexos/include/common/config_rexos.h"
+#endif
+
+#ifdef WIN_NWF
+#include "../os/windows/include/win/config_win.h"
+#endif
+
+#ifdef THREADX
+#include "../os/threadx/include/common/config_threadx.h"
+#endif
+
+#endif
diff --git a/drivers/staging/ath6kl/include/a_debug.h b/drivers/staging/ath6kl/include/a_debug.h
new file mode 100644
index 000000000000..5a1b01fbb93c
--- /dev/null
+++ b/drivers/staging/ath6kl/include/a_debug.h
@@ -0,0 +1,224 @@
+//------------------------------------------------------------------------------
+// <copyright file="a_debug.h" company="Atheros">
+// Copyright (c) 2004-2010 Atheros Corporation. All rights reserved.
+//
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+//
+//
+//------------------------------------------------------------------------------
+//==============================================================================
+// Author(s): ="Atheros"
+//==============================================================================
+#ifndef _A_DEBUG_H_
+#define _A_DEBUG_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif /* __cplusplus */
+
+#include <a_types.h>
+#include <a_osapi.h>
+
+ /* standard debug print masks bits 0..7 */
+#define ATH_DEBUG_ERR (1 << 0) /* errors */
+#define ATH_DEBUG_WARN (1 << 1) /* warnings */
+#define ATH_DEBUG_INFO (1 << 2) /* informational (module startup info) */
+#define ATH_DEBUG_TRC (1 << 3) /* generic function call tracing */
+#define ATH_DEBUG_RSVD1 (1 << 4)
+#define ATH_DEBUG_RSVD2 (1 << 5)
+#define ATH_DEBUG_RSVD3 (1 << 6)
+#define ATH_DEBUG_RSVD4 (1 << 7)
+
+#define ATH_DEBUG_MASK_DEFAULTS (ATH_DEBUG_ERR | ATH_DEBUG_WARN)
+#define ATH_DEBUG_ANY 0xFFFF
+
+ /* other aliases used throughout */
+#define ATH_DEBUG_ERROR ATH_DEBUG_ERR
+#define ATH_LOG_ERR ATH_DEBUG_ERR
+#define ATH_LOG_INF ATH_DEBUG_INFO
+#define ATH_LOG_TRC ATH_DEBUG_TRC
+#define ATH_DEBUG_TRACE ATH_DEBUG_TRC
+#define ATH_DEBUG_INIT ATH_DEBUG_INFO
+
+ /* bits 8..31 are module-specific masks */
+#define ATH_DEBUG_MODULE_MASK_SHIFT 8
+
+ /* macro to make a module-specific masks */
+#define ATH_DEBUG_MAKE_MODULE_MASK(index) (1 << (ATH_DEBUG_MODULE_MASK_SHIFT + (index)))
+
+void DebugDumpBytes(A_UCHAR *buffer, A_UINT16 length, char *pDescription);
+
+/* Debug support on a per-module basis
+ *
+ * Usage:
+ *
+ * Each module can utilize it's own debug mask variable. A set of commonly used
+ * masks are provided (ERRORS, WARNINGS, TRACE etc..). It is up to each module
+ * to define module-specific masks using the macros above.
+ *
+ * Each module defines a single debug mask variable debug_XXX where the "name" of the module is
+ * common to all C-files within that module. This requires every C-file that includes a_debug.h
+ * to define the module name in that file.
+ *
+ * Example:
+ *
+ * #define ATH_MODULE_NAME htc
+ * #include "a_debug.h"
+ *
+ * This will define a debug mask structure called debug_htc and all debug macros will reference this
+ * variable.
+ *
+ * A module can define module-specific bit masks using the ATH_DEBUG_MAKE_MODULE_MASK() macro:
+ *
+ * #define ATH_DEBUG_MY_MASK1 ATH_DEBUG_MAKE_MODULE_MASK(0)
+ * #define ATH_DEBUG_MY_MASK2 ATH_DEBUG_MAKE_MODULE_MASK(1)
+ *
+ * The instantiation of the debug structure should be made by the module. When a module is
+ * instantiated, the module can set a description string, a default mask and an array of description
+ * entries containing information on each module-defined debug mask.
+ * NOTE: The instantiation is statically allocated, only one instance can exist per module.
+ *
+ * Example:
+ *
+ *
+ * #define ATH_DEBUG_BMI ATH_DEBUG_MAKE_MODULE_MASK(0)
+ *
+ * #ifdef DEBUG
+ * static ATH_DEBUG_MASK_DESCRIPTION bmi_debug_desc[] = {
+ * { ATH_DEBUG_BMI , "BMI Tracing"}, <== description of the module specific mask
+ * };
+ *
+ * ATH_DEBUG_INSTANTIATE_MODULE_VAR(bmi,
+ * "bmi" <== module name
+ * "Boot Manager Interface", <== description of module
+ * ATH_DEBUG_MASK_DEFAULTS, <== defaults
+ * ATH_DEBUG_DESCRIPTION_COUNT(bmi_debug_desc),
+ * bmi_debug_desc);
+ *
+ * #endif
+ *
+ * A module can optionally register it's debug module information in order for other tools to change the
+ * bit mask at runtime. A module can call A_REGISTER_MODULE_DEBUG_INFO() in it's module
+ * init code. This macro can be called multiple times without consequence. The debug info maintains
+ * state to indicate whether the information was previously registered.
+ *
+ * */
+
+#define ATH_DEBUG_MAX_MASK_DESC_LENGTH 32
+#define ATH_DEBUG_MAX_MOD_DESC_LENGTH 64
+
+typedef struct {
+ A_UINT32 Mask;
+ A_CHAR Description[ATH_DEBUG_MAX_MASK_DESC_LENGTH];
+} ATH_DEBUG_MASK_DESCRIPTION;
+
+#define ATH_DEBUG_INFO_FLAGS_REGISTERED (1 << 0)
+
+typedef struct _ATH_DEBUG_MODULE_DBG_INFO{
+ struct _ATH_DEBUG_MODULE_DBG_INFO *pNext;
+ A_CHAR ModuleName[16];
+ A_CHAR ModuleDescription[ATH_DEBUG_MAX_MOD_DESC_LENGTH];
+ A_UINT32 Flags;
+ A_UINT32 CurrentMask;
+ int MaxDescriptions;
+ ATH_DEBUG_MASK_DESCRIPTION *pMaskDescriptions; /* pointer to array of descriptions */
+} ATH_DEBUG_MODULE_DBG_INFO;
+
+#define ATH_DEBUG_DESCRIPTION_COUNT(d) (int)((sizeof((d))) / (sizeof(ATH_DEBUG_MASK_DESCRIPTION)))
+
+#define GET_ATH_MODULE_DEBUG_VAR_NAME(s) _XGET_ATH_MODULE_NAME_DEBUG_(s)
+#define GET_ATH_MODULE_DEBUG_VAR_MASK(s) _XGET_ATH_MODULE_NAME_DEBUG_(s).CurrentMask
+#define _XGET_ATH_MODULE_NAME_DEBUG_(s) debug_ ## s
+
+#ifdef ATH_DEBUG_MODULE
+
+ /* for source files that will instantiate the debug variables */
+#define ATH_DEBUG_INSTANTIATE_MODULE_VAR(s,name,moddesc,initmask,count,descriptions) \
+ATH_DEBUG_MODULE_DBG_INFO GET_ATH_MODULE_DEBUG_VAR_NAME(s) = \
+ {NULL,(name),(moddesc),0,(initmask),count,(descriptions)}
+
+#ifdef ATH_MODULE_NAME
+extern ATH_DEBUG_MODULE_DBG_INFO GET_ATH_MODULE_DEBUG_VAR_NAME(ATH_MODULE_NAME);
+#define AR_DEBUG_LVL_CHECK(lvl) (GET_ATH_MODULE_DEBUG_VAR_MASK(ATH_MODULE_NAME) & (lvl))
+#endif /* ATH_MODULE_NAME */
+
+#define ATH_DEBUG_SET_DEBUG_MASK(s,lvl) GET_ATH_MODULE_DEBUG_VAR_MASK(s) = (lvl)
+
+#define ATH_DEBUG_DECLARE_EXTERN(s) \
+ extern ATH_DEBUG_MODULE_DBG_INFO GET_ATH_MODULE_DEBUG_VAR_NAME(s)
+
+#define AR_DEBUG_PRINTBUF(buffer, length, desc) DebugDumpBytes(buffer,length,desc)
+
+
+#define AR_DEBUG_ASSERT A_ASSERT
+
+void a_dump_module_debug_info(ATH_DEBUG_MODULE_DBG_INFO *pInfo);
+void a_register_module_debug_info(ATH_DEBUG_MODULE_DBG_INFO *pInfo);
+#define A_DUMP_MODULE_DEBUG_INFO(s) a_dump_module_debug_info(&(GET_ATH_MODULE_DEBUG_VAR_NAME(s)))
+#define A_REGISTER_MODULE_DEBUG_INFO(s) a_register_module_debug_info(&(GET_ATH_MODULE_DEBUG_VAR_NAME(s)))
+
+#else /* !ATH_DEBUG_MODULE */
+ /* NON ATH_DEBUG_MODULE */
+#define ATH_DEBUG_INSTANTIATE_MODULE_VAR(s,name,moddesc,initmask,count,descriptions)
+#define AR_DEBUG_LVL_CHECK(lvl) 0
+#define AR_DEBUG_PRINTBUF(buffer, length, desc)
+#define AR_DEBUG_ASSERT(test)
+#define ATH_DEBUG_DECLARE_EXTERN(s)
+#define ATH_DEBUG_SET_DEBUG_MASK(s,lvl)
+#define A_DUMP_MODULE_DEBUG_INFO(s)
+#define A_REGISTER_MODULE_DEBUG_INFO(s)
+
+#endif
+
+A_STATUS a_get_module_mask(A_CHAR *module_name, A_UINT32 *pMask);
+A_STATUS a_set_module_mask(A_CHAR *module_name, A_UINT32 Mask);
+void a_dump_module_debug_info_by_name(A_CHAR *module_name);
+void a_module_debug_support_init(void);
+void a_module_debug_support_cleanup(void);
+
+#ifdef UNDER_NWIFI
+#include "../os/windows/include/debug.h"
+#endif
+
+#ifdef ATHR_CE_LEGACY
+#include "../os/windows/include/debug.h"
+#endif
+
+#if defined(__linux__) && !defined(LINUX_EMULATION)
+#include "../os/linux/include/debug_linux.h"
+#endif
+
+#ifdef REXOS
+#include "../os/rexos/include/common/debug_rexos.h"
+#endif
+
+#if defined ART_WIN
+#include "../os/win_art/include/debug_win.h"
+#endif
+
+#ifdef WIN_NWF
+#include <debug_win.h>
+#endif
+
+#ifdef THREADX
+#define ATH_DEBUG_MAKE_MODULE_MASK(index) (1 << (ATH_DEBUG_MODULE_MASK_SHIFT + (index)))
+#include "../os/threadx/include/common/debug_threadx.h"
+#endif
+
+
+#ifdef __cplusplus
+}
+#endif /* __cplusplus */
+
+#endif
diff --git a/drivers/staging/ath6kl/include/a_drv.h b/drivers/staging/ath6kl/include/a_drv.h
new file mode 100644
index 000000000000..6db10f0f2d10
--- /dev/null
+++ b/drivers/staging/ath6kl/include/a_drv.h
@@ -0,0 +1,54 @@
+//------------------------------------------------------------------------------
+// <copyright file="a_drv.h" company="Atheros">
+// Copyright (c) 2004-2010 Atheros Corporation. All rights reserved.
+//
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+//
+//
+//------------------------------------------------------------------------------
+//==============================================================================
+// This file contains the definitions of the basic atheros data types.
+// It is used to map the data types in atheros files to a platform specific
+// type.
+//
+// Author(s): ="Atheros"
+//==============================================================================
+#ifndef _A_DRV_H_
+#define _A_DRV_H_
+
+#if defined(__linux__) && !defined(LINUX_EMULATION)
+#include "../os/linux/include/athdrv_linux.h"
+#endif
+
+#ifdef UNDER_NWIFI
+#include "../os/windows/include/athdrv.h"
+#endif
+
+#ifdef ATHR_CE_LEGACY
+#include "../os/windows/include/athdrv.h"
+#endif
+
+#ifdef REXOS
+#include "../os/rexos/include/common/athdrv_rexos.h"
+#endif
+
+#ifdef WIN_NWF
+#include "../os/windows/include/athdrv.h"
+#endif
+
+#ifdef THREADX
+#include "../os/threadx/include/common/athdrv_threadx.h"
+#endif
+
+#endif /* _ADRV_H_ */
diff --git a/drivers/staging/ath6kl/include/a_drv_api.h b/drivers/staging/ath6kl/include/a_drv_api.h
new file mode 100644
index 000000000000..7d077c62ad70
--- /dev/null
+++ b/drivers/staging/ath6kl/include/a_drv_api.h
@@ -0,0 +1,232 @@
+//------------------------------------------------------------------------------
+// <copyright file="a_drv_api.h" company="Atheros">
+// Copyright (c) 2004-2010 Atheros Corporation. All rights reserved.
+//
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+//
+//
+//------------------------------------------------------------------------------
+//==============================================================================
+// Author(s): ="Atheros"
+//==============================================================================
+#ifndef _A_DRV_API_H_
+#define _A_DRV_API_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/****************************************************************************/
+/****************************************************************************/
+/** **/
+/** WMI related hooks **/
+/** **/
+/****************************************************************************/
+/****************************************************************************/
+
+#include <ar6000_api.h>
+
+#define A_WMI_CHANNELLIST_RX(devt, numChan, chanList) \
+ ar6000_channelList_rx((devt), (numChan), (chanList))
+
+#define A_WMI_SET_NUMDATAENDPTS(devt, num) \
+ ar6000_set_numdataendpts((devt), (num))
+
+#define A_WMI_CONTROL_TX(devt, osbuf, streamID) \
+ ar6000_control_tx((devt), (osbuf), (streamID))
+
+#define A_WMI_TARGETSTATS_EVENT(devt, pStats, len) \
+ ar6000_targetStats_event((devt), (pStats), (len))
+
+#define A_WMI_SCANCOMPLETE_EVENT(devt, status) \
+ ar6000_scanComplete_event((devt), (status))
+
+#ifdef CONFIG_HOST_DSET_SUPPORT
+
+#define A_WMI_DSET_DATA_REQ(devt, access_cookie, offset, length, targ_buf, targ_reply_fn, targ_reply_arg) \
+ ar6000_dset_data_req((devt), (access_cookie), (offset), (length), (targ_buf), (targ_reply_fn), (targ_reply_arg))
+
+#define A_WMI_DSET_CLOSE(devt, access_cookie) \
+ ar6000_dset_close((devt), (access_cookie))
+
+#endif
+
+#define A_WMI_DSET_OPEN_REQ(devt, id, targ_handle, targ_reply_fn, targ_reply_arg) \
+ ar6000_dset_open_req((devt), (id), (targ_handle), (targ_reply_fn), (targ_reply_arg))
+
+#define A_WMI_CONNECT_EVENT(devt, channel, bssid, listenInterval, beaconInterval, networkType, beaconIeLen, assocReqLen, assocRespLen, assocInfo) \
+ ar6000_connect_event((devt), (channel), (bssid), (listenInterval), (beaconInterval), (networkType), (beaconIeLen), (assocReqLen), (assocRespLen), (assocInfo))
+
+#define A_WMI_PSPOLL_EVENT(devt, aid)\
+ ar6000_pspoll_event((devt),(aid))
+
+#define A_WMI_DTIMEXPIRY_EVENT(devt)\
+ ar6000_dtimexpiry_event((devt))
+
+#ifdef WAPI_ENABLE
+#define A_WMI_WAPI_REKEY_EVENT(devt, type, mac)\
+ ap_wapi_rekey_event((devt),(type),(mac))
+#endif
+
+#define A_WMI_REGDOMAIN_EVENT(devt, regCode) \
+ ar6000_regDomain_event((devt), (regCode))
+
+#define A_WMI_NEIGHBORREPORT_EVENT(devt, numAps, info) \
+ ar6000_neighborReport_event((devt), (numAps), (info))
+
+#define A_WMI_DISCONNECT_EVENT(devt, reason, bssid, assocRespLen, assocInfo, protocolReasonStatus) \
+ ar6000_disconnect_event((devt), (reason), (bssid), (assocRespLen), (assocInfo), (protocolReasonStatus))
+
+#define A_WMI_TKIP_MICERR_EVENT(devt, keyid, ismcast) \
+ ar6000_tkip_micerr_event((devt), (keyid), (ismcast))
+
+#define A_WMI_BITRATE_RX(devt, rateKbps) \
+ ar6000_bitrate_rx((devt), (rateKbps))
+
+#define A_WMI_TXPWR_RX(devt, txPwr) \
+ ar6000_txPwr_rx((devt), (txPwr))
+
+#define A_WMI_READY_EVENT(devt, datap, phyCap, sw_ver, abi_ver) \
+ ar6000_ready_event((devt), (datap), (phyCap), (sw_ver), (abi_ver))
+
+#define A_WMI_DBGLOG_INIT_DONE(ar) \
+ ar6000_dbglog_init_done(ar);
+
+#define A_WMI_RSSI_THRESHOLD_EVENT(devt, newThreshold, rssi) \
+ ar6000_rssiThreshold_event((devt), (newThreshold), (rssi))
+
+#define A_WMI_REPORT_ERROR_EVENT(devt, errorVal) \
+ ar6000_reportError_event((devt), (errorVal))
+
+#define A_WMI_ROAM_TABLE_EVENT(devt, pTbl) \
+ ar6000_roam_tbl_event((devt), (pTbl))
+
+#define A_WMI_ROAM_DATA_EVENT(devt, p) \
+ ar6000_roam_data_event((devt), (p))
+
+#define A_WMI_WOW_LIST_EVENT(devt, num_filters, wow_filters) \
+ ar6000_wow_list_event((devt), (num_filters), (wow_filters))
+
+#define A_WMI_CAC_EVENT(devt, ac, cac_indication, statusCode, tspecSuggestion) \
+ ar6000_cac_event((devt), (ac), (cac_indication), (statusCode), (tspecSuggestion))
+
+#define A_WMI_CHANNEL_CHANGE_EVENT(devt, oldChannel, newChannel) \
+ ar6000_channel_change_event((devt), (oldChannel), (newChannel))
+
+#define A_WMI_PMKID_LIST_EVENT(devt, num_pmkid, pmkid_list, bssid_list) \
+ ar6000_pmkid_list_event((devt), (num_pmkid), (pmkid_list), (bssid_list))
+
+#define A_WMI_PEER_EVENT(devt, eventCode, bssid) \
+ ar6000_peer_event ((devt), (eventCode), (bssid))
+
+#ifdef CONFIG_HOST_GPIO_SUPPORT
+
+#define A_WMI_GPIO_INTR_RX(intr_mask, input_values) \
+ ar6000_gpio_intr_rx((intr_mask), (input_values))
+
+#define A_WMI_GPIO_DATA_RX(reg_id, value) \
+ ar6000_gpio_data_rx((reg_id), (value))
+
+#define A_WMI_GPIO_ACK_RX() \
+ ar6000_gpio_ack_rx()
+
+#endif
+
+#ifdef SEND_EVENT_TO_APP
+
+#define A_WMI_SEND_EVENT_TO_APP(ar, eventId, datap, len) \
+ ar6000_send_event_to_app((ar), (eventId), (datap), (len))
+
+#define A_WMI_SEND_GENERIC_EVENT_TO_APP(ar, eventId, datap, len) \
+ ar6000_send_generic_event_to_app((ar), (eventId), (datap), (len))
+
+#else
+
+#define A_WMI_SEND_EVENT_TO_APP(ar, eventId, datap, len)
+#define A_WMI_SEND_GENERIC_EVENT_TO_APP(ar, eventId, datap, len)
+
+#endif
+
+#ifdef CONFIG_HOST_TCMD_SUPPORT
+#define A_WMI_TCMD_RX_REPORT_EVENT(devt, results, len) \
+ ar6000_tcmd_rx_report_event((devt), (results), (len))
+#endif
+
+#define A_WMI_HBCHALLENGERESP_EVENT(devt, cookie, source) \
+ ar6000_hbChallengeResp_event((devt), (cookie), (source))
+
+#define A_WMI_TX_RETRY_ERR_EVENT(devt) \
+ ar6000_tx_retry_err_event((devt))
+
+#define A_WMI_SNR_THRESHOLD_EVENT_RX(devt, newThreshold, snr) \
+ ar6000_snrThresholdEvent_rx((devt), (newThreshold), (snr))
+
+#define A_WMI_LQ_THRESHOLD_EVENT_RX(devt, range, lqVal) \
+ ar6000_lqThresholdEvent_rx((devt), (range), (lqVal))
+
+#define A_WMI_RATEMASK_RX(devt, ratemask) \
+ ar6000_ratemask_rx((devt), (ratemask))
+
+#define A_WMI_KEEPALIVE_RX(devt, configured) \
+ ar6000_keepalive_rx((devt), (configured))
+
+#define A_WMI_BSSINFO_EVENT_RX(ar, datp, len) \
+ ar6000_bssInfo_event_rx((ar), (datap), (len))
+
+#define A_WMI_DBGLOG_EVENT(ar, dropped, buffer, length) \
+ ar6000_dbglog_event((ar), (dropped), (buffer), (length));
+
+#define A_WMI_STREAM_TX_ACTIVE(devt,trafficClass) \
+ ar6000_indicate_tx_activity((devt),(trafficClass), TRUE)
+
+#define A_WMI_STREAM_TX_INACTIVE(devt,trafficClass) \
+ ar6000_indicate_tx_activity((devt),(trafficClass), FALSE)
+#define A_WMI_Ac2EndpointID(devht, ac)\
+ ar6000_ac2_endpoint_id((devht), (ac))
+
+#define A_WMI_AGGR_RECV_ADDBA_REQ_EVT(devt, cmd)\
+ ar6000_aggr_rcv_addba_req_evt((devt), (cmd))
+#define A_WMI_AGGR_RECV_ADDBA_RESP_EVT(devt, cmd)\
+ ar6000_aggr_rcv_addba_resp_evt((devt), (cmd))
+#define A_WMI_AGGR_RECV_DELBA_REQ_EVT(devt, cmd)\
+ ar6000_aggr_rcv_delba_req_evt((devt), (cmd))
+#define A_WMI_HCI_EVENT_EVT(devt, cmd)\
+ ar6000_hci_event_rcv_evt((devt), (cmd))
+
+#define A_WMI_Endpoint2Ac(devt, ep) \
+ ar6000_endpoint_id2_ac((devt), (ep))
+
+#define A_WMI_BTCOEX_CONFIG_EVENT(devt, evt, len)\
+ ar6000_btcoex_config_event((devt), (evt), (len))
+
+#define A_WMI_BTCOEX_STATS_EVENT(devt, datap, len)\
+ ar6000_btcoex_stats_event((devt), (datap), (len))
+
+/****************************************************************************/
+/****************************************************************************/
+/** **/
+/** HTC related hooks **/
+/** **/
+/****************************************************************************/
+/****************************************************************************/
+
+#if defined(CONFIG_TARGET_PROFILE_SUPPORT)
+#define A_WMI_PROF_COUNT_RX(addr, count) prof_count_rx((addr), (count))
+#endif /* CONFIG_TARGET_PROFILE_SUPPORT */
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/drivers/staging/ath6kl/include/a_osapi.h b/drivers/staging/ath6kl/include/a_osapi.h
new file mode 100644
index 000000000000..7bdeeea21503
--- /dev/null
+++ b/drivers/staging/ath6kl/include/a_osapi.h
@@ -0,0 +1,61 @@
+//------------------------------------------------------------------------------
+// <copyright file="a_osapi.h" company="Atheros">
+// Copyright (c) 2004-2010 Atheros Corporation. All rights reserved.
+//
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+//
+//
+//------------------------------------------------------------------------------
+//==============================================================================
+// This file contains the definitions of the basic atheros data types.
+// It is used to map the data types in atheros files to a platform specific
+// type.
+//
+// Author(s): ="Atheros"
+//==============================================================================
+#ifndef _A_OSAPI_H_
+#define _A_OSAPI_H_
+
+#if defined(__linux__) && !defined(LINUX_EMULATION)
+#include "../os/linux/include/osapi_linux.h"
+#endif
+
+#ifdef UNDER_NWIFI
+#include "../os/windows/include/osapi.h"
+#include "../os/windows/include/netbuf.h"
+#endif
+
+#ifdef ATHR_CE_LEGACY
+#include "../os/windows/include/osapi.h"
+#include "../os/windows/include/netbuf.h"
+#endif
+
+#ifdef REXOS
+#include "../os/rexos/include/common/osapi_rexos.h"
+#endif
+
+#if defined ART_WIN
+#include "../os/win_art/include/osapi_win.h"
+#include "../os/win_art/include/netbuf.h"
+#endif
+
+#ifdef WIN_NWF
+#include <osapi_win.h>
+#endif
+
+#if defined(THREADX)
+#include "../os/threadx/include/common/osapi_threadx.h"
+#endif
+
+#endif /* _OSAPI_H_ */
diff --git a/drivers/staging/ath6kl/include/a_types.h b/drivers/staging/ath6kl/include/a_types.h
new file mode 100644
index 000000000000..18f4cfe4f97d
--- /dev/null
+++ b/drivers/staging/ath6kl/include/a_types.h
@@ -0,0 +1,58 @@
+//------------------------------------------------------------------------------
+// <copyright file="a_types.h" company="Atheros">
+// Copyright (c) 2004-2010 Atheros Corporation. All rights reserved.
+//
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+//
+//
+//------------------------------------------------------------------------------
+//==============================================================================
+// This file contains the definitions of the basic atheros data types.
+// It is used to map the data types in atheros files to a platform specific
+// type.
+//
+// Author(s): ="Atheros"
+//==============================================================================
+#ifndef _A_TYPES_H_
+#define _A_TYPES_H_
+
+#if defined(__linux__) && !defined(LINUX_EMULATION)
+#include "../os/linux/include/athtypes_linux.h"
+#endif
+
+#ifdef UNDER_NWIFI
+#include "../os/windows/include/athtypes.h"
+#endif
+
+#ifdef ATHR_CE_LEGACY
+#include "../os/windows/include/athtypes.h"
+#endif
+
+#ifdef REXOS
+#include "../os/rexos/include/common/athtypes_rexos.h"
+#endif
+
+#if defined ART_WIN
+#include "../os/win_art/include/athtypes_win.h"
+#endif
+
+#ifdef WIN_NWF
+#include <athtypes_win.h>
+#endif
+
+#ifdef THREADX
+#include "../os/threadx/include/common/athtypes_threadx.h"
+#endif
+
+#endif /* _ATHTYPES_H_ */
diff --git a/drivers/staging/ath6kl/include/aggr_recv_api.h b/drivers/staging/ath6kl/include/aggr_recv_api.h
new file mode 100644
index 000000000000..0682bb4edcf1
--- /dev/null
+++ b/drivers/staging/ath6kl/include/aggr_recv_api.h
@@ -0,0 +1,140 @@
+/*
+ *
+ * Copyright (c) 2004-2010 Atheros Communications Inc.
+ * All rights reserved.
+ *
+ *
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+//
+//
+ *
+ */
+
+#ifndef __AGGR_RECV_API_H__
+#define __AGGR_RECV_API_H__
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+typedef void (* RX_CALLBACK)(void * dev, void *osbuf);
+
+typedef void (* ALLOC_NETBUFS)(A_NETBUF_QUEUE_T *q, A_UINT16 num);
+
+/*
+ * aggr_init:
+ * Initialises the data structures, allocates data queues and
+ * os buffers. Netbuf allocator is the input param, used by the
+ * aggr module for allocation of NETBUFs from driver context.
+ * These NETBUFs are used for AMSDU processing.
+ * Returns the context for the aggr module.
+ */
+void *
+aggr_init(ALLOC_NETBUFS netbuf_allocator);
+
+
+/*
+ * aggr_register_rx_dispatcher:
+ * Registers OS call back function to deliver the
+ * frames to OS. This is generally the topmost layer of
+ * the driver context, after which the frames go to
+ * IP stack via the call back function.
+ * This dispatcher is active only when aggregation is ON.
+ */
+void
+aggr_register_rx_dispatcher(void *cntxt, void * dev, RX_CALLBACK fn);
+
+
+/*
+ * aggr_process_bar:
+ * When target receives BAR, it communicates to host driver
+ * for modifying window parameters. Target indicates this via the
+ * event: WMI_ADDBA_REQ_EVENTID. Host will dequeue all frames
+ * up to the indicated sequence number.
+ */
+void
+aggr_process_bar(void *cntxt, A_UINT8 tid, A_UINT16 seq_no);
+
+
+/*
+ * aggr_recv_addba_req_evt:
+ * This event is to initiate/modify the receive side window.
+ * Target will send WMI_ADDBA_REQ_EVENTID event to host - to setup
+ * recv re-ordering queues. Target will negotiate ADDBA with peer,
+ * and indicate via this event after succesfully completing the
+ * negotiation. This happens in two situations:
+ * 1. Initial setup of aggregation
+ * 2. Renegotiation of current recv window.
+ * Window size for re-ordering is limited by target buffer
+ * space, which is reflected in win_sz.
+ * (Re)Start the periodic timer to deliver long standing frames,
+ * in hold_q to OS.
+ */
+void
+aggr_recv_addba_req_evt(void * cntxt, A_UINT8 tid, A_UINT16 seq_no, A_UINT8 win_sz);
+
+
+/*
+ * aggr_recv_delba_req_evt:
+ * Target indicates deletion of a BA window for a tid via the
+ * WMI_DELBA_EVENTID. Host would deliver all the frames in the
+ * hold_q, reset tid config and disable the periodic timer, if
+ * aggr is not enabled on any tid.
+ */
+void
+aggr_recv_delba_req_evt(void * cntxt, A_UINT8 tid);
+
+
+
+/*
+ * aggr_process_recv_frm:
+ * Called only for data frames. When aggr is ON for a tid, the buffer
+ * is always consumed, and osbuf would be NULL. For a non-aggr case,
+ * osbuf is not modified.
+ * AMSDU frames are consumed and are later freed. They are sliced and
+ * diced to individual frames and dispatched to stack.
+ * After consuming a osbuf(when aggr is ON), a previously registered
+ * callback may be called to deliver frames in order.
+ */
+void
+aggr_process_recv_frm(void *cntxt, A_UINT8 tid, A_UINT16 seq_no, A_BOOL is_amsdu, void **osbuf);
+
+
+/*
+ * aggr_module_destroy:
+ * Frees up all the queues and frames in them. Releases the cntxt to OS.
+ */
+void
+aggr_module_destroy(void *cntxt);
+
+/*
+ * Dumps the aggregation stats
+ */
+void
+aggr_dump_stats(void *cntxt, PACKET_LOG **log_buf);
+
+/*
+ * aggr_reset_state -- Called when it is deemed necessary to clear the aggregate
+ * hold Q state. Examples include when a Connect event or disconnect event is
+ * received.
+ */
+void
+aggr_reset_state(void *cntxt);
+
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /*__AGGR_RECV_API_H__ */
diff --git a/drivers/staging/ath6kl/include/ar3kconfig.h b/drivers/staging/ath6kl/include/ar3kconfig.h
new file mode 100644
index 000000000000..a10788cee461
--- /dev/null
+++ b/drivers/staging/ath6kl/include/ar3kconfig.h
@@ -0,0 +1,65 @@
+//------------------------------------------------------------------------------
+// Copyright (c) 2009-2010 Atheros Corporation. All rights reserved.
+//
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+//
+//
+//------------------------------------------------------------------------------
+//==============================================================================
+// Author(s): ="Atheros"
+//==============================================================================
+
+/* AR3K module configuration APIs for HCI-bridge operation */
+
+#ifndef AR3KCONFIG_H_
+#define AR3KCONFIG_H_
+
+#include <net/bluetooth/bluetooth.h>
+#include <net/bluetooth/hci_core.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#define AR3K_CONFIG_FLAG_FORCE_MINBOOT_EXIT (1 << 0)
+#define AR3K_CONFIG_FLAG_SET_AR3K_BAUD (1 << 1)
+#define AR3K_CONFIG_FLAG_AR3K_BAUD_CHANGE_DELAY (1 << 2)
+#define AR3K_CONFIG_FLAG_SET_AR6K_SCALE_STEP (1 << 3)
+
+
+typedef struct {
+ A_UINT32 Flags; /* config flags */
+ void *pHCIDev; /* HCI bridge device */
+ HCI_TRANSPORT_PROPERTIES *pHCIProps; /* HCI bridge props */
+ HIF_DEVICE *pHIFDevice; /* HIF layer device */
+
+ A_UINT32 AR3KBaudRate; /* AR3K operational baud rate */
+ A_UINT16 AR6KScale; /* AR6K UART scale value */
+ A_UINT16 AR6KStep; /* AR6K UART step value */
+ struct hci_dev *pBtStackHCIDev; /* BT Stack HCI dev */
+ A_UINT32 PwrMgmtEnabled; /* TLPM enabled? */
+ A_UINT16 IdleTimeout; /* TLPM idle timeout */
+ A_UINT16 WakeupTimeout; /* TLPM wakeup timeout */
+ A_UINT8 bdaddr[6]; /* Bluetooth device address */
+} AR3K_CONFIG_INFO;
+
+A_STATUS AR3KConfigure(AR3K_CONFIG_INFO *pConfigInfo);
+
+A_STATUS AR3KConfigureExit(void *config);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /*AR3KCONFIG_H_*/
diff --git a/drivers/staging/ath6kl/include/ar6000_api.h b/drivers/staging/ath6kl/include/ar6000_api.h
new file mode 100644
index 000000000000..1e1d92a507e2
--- /dev/null
+++ b/drivers/staging/ath6kl/include/ar6000_api.h
@@ -0,0 +1,54 @@
+//------------------------------------------------------------------------------
+// <copyright file="ar6000_api.h" company="Atheros">
+// Copyright (c) 2004-2010 Atheros Corporation. All rights reserved.
+//
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+//
+//
+//------------------------------------------------------------------------------
+//==============================================================================
+// This file contains the API to access the OS dependent atheros host driver
+// by the WMI or WLAN generic modules.
+//
+// Author(s): ="Atheros"
+//==============================================================================
+#ifndef _AR6000_API_H_
+#define _AR6000_API_H_
+
+#if defined(__linux__) && !defined(LINUX_EMULATION)
+#include "../os/linux/include/ar6xapi_linux.h"
+#endif
+
+#ifdef UNDER_NWIFI
+#include "../os/windows/include/ar6xapi.h"
+#endif
+
+#ifdef ATHR_CE_LEGACY
+#include "../os/windows/include/ar6xapi.h"
+#endif
+
+#ifdef REXOS
+#include "../os/rexos/include/common/ar6xapi_rexos.h"
+#endif
+
+#if defined ART_WIN
+#include "../os/win_art/include/ar6xapi_win.h"
+#endif
+
+#ifdef WIN_NWF
+#include "../os/windows/include/ar6xapi.h"
+#endif
+
+#endif /* _AR6000_API_H */
+
diff --git a/drivers/staging/ath6kl/include/ar6000_diag.h b/drivers/staging/ath6kl/include/ar6000_diag.h
new file mode 100644
index 000000000000..b53512e23d32
--- /dev/null
+++ b/drivers/staging/ath6kl/include/ar6000_diag.h
@@ -0,0 +1,48 @@
+//------------------------------------------------------------------------------
+// <copyright file="ar6000_diag.h" company="Atheros">
+// Copyright (c) 2004-2010 Atheros Corporation. All rights reserved.
+//
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+//
+//
+//------------------------------------------------------------------------------
+//==============================================================================
+// Author(s): ="Atheros"
+//==============================================================================
+
+#ifndef AR6000_DIAG_H_
+#define AR6000_DIAG_H_
+
+
+A_STATUS
+ar6000_ReadRegDiag(HIF_DEVICE *hifDevice, A_UINT32 *address, A_UINT32 *data);
+
+A_STATUS
+ar6000_WriteRegDiag(HIF_DEVICE *hifDevice, A_UINT32 *address, A_UINT32 *data);
+
+A_STATUS
+ar6000_ReadDataDiag(HIF_DEVICE *hifDevice, A_UINT32 address,
+ A_UCHAR *data, A_UINT32 length);
+
+A_STATUS
+ar6000_WriteDataDiag(HIF_DEVICE *hifDevice, A_UINT32 address,
+ A_UCHAR *data, A_UINT32 length);
+
+A_STATUS
+ar6k_ReadTargetRegister(HIF_DEVICE *hifDevice, int regsel, A_UINT32 *regval);
+
+void
+ar6k_FetchTargetRegs(HIF_DEVICE *hifDevice, A_UINT32 *targregs);
+
+#endif /*AR6000_DIAG_H_*/
diff --git a/drivers/staging/ath6kl/include/ar6kap_common.h b/drivers/staging/ath6kl/include/ar6kap_common.h
new file mode 100644
index 000000000000..9b1b8bfae675
--- /dev/null
+++ b/drivers/staging/ath6kl/include/ar6kap_common.h
@@ -0,0 +1,44 @@
+//------------------------------------------------------------------------------
+
+// <copyright file="ar6kap_common.h" company="Atheros">
+// Copyright (c) 2004-2010 Atheros Corporation. All rights reserved.
+//
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+//
+//
+//------------------------------------------------------------------------------
+
+//==============================================================================
+
+// This file contains the definitions of common AP mode data structures.
+//
+// Author(s): ="Atheros"
+//==============================================================================
+
+#ifndef _AR6KAP_COMMON_H_
+#define _AR6KAP_COMMON_H_
+/*
+ * Used with AR6000_XIOCTL_AP_GET_STA_LIST
+ */
+typedef struct {
+ A_UINT8 mac[ATH_MAC_LEN];
+ A_UINT8 aid;
+ A_UINT8 keymgmt;
+ A_UINT8 ucipher;
+ A_UINT8 auth;
+} station_t;
+typedef struct {
+ station_t sta[AP_MAX_NUM_STA];
+} ap_get_sta_t;
+#endif /* _AR6KAP_COMMON_H_ */
diff --git a/drivers/staging/ath6kl/include/athbtfilter.h b/drivers/staging/ath6kl/include/athbtfilter.h
new file mode 100644
index 000000000000..dbe68bbb727c
--- /dev/null
+++ b/drivers/staging/ath6kl/include/athbtfilter.h
@@ -0,0 +1,135 @@
+//------------------------------------------------------------------------------
+// <copyright file="athbtfilter.h" company="Atheros">
+// Copyright (c) 2007-2010 Atheros Corporation. All rights reserved.
+//
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+//
+//
+//------------------------------------------------------------------------------
+//==============================================================================
+// Public Bluetooth filter APIs
+// Author(s): ="Atheros"
+//==============================================================================
+#ifndef ATHBTFILTER_H_
+#define ATHBTFILTER_H_
+
+#define ATH_DEBUG_INFO (1 << 2)
+#define ATH_DEBUG_INF ATH_DEBUG_INFO
+
+typedef enum _ATHBT_HCI_CTRL_TYPE {
+ ATHBT_HCI_COMMAND = 0,
+ ATHBT_HCI_EVENT = 1,
+} ATHBT_HCI_CTRL_TYPE;
+
+typedef enum _ATHBT_STATE_INDICATION {
+ ATH_BT_NOOP = 0,
+ ATH_BT_INQUIRY = 1,
+ ATH_BT_CONNECT = 2,
+ ATH_BT_SCO = 3,
+ ATH_BT_ACL = 4,
+ ATH_BT_A2DP = 5,
+ ATH_BT_ESCO = 6,
+ /* new states go here.. */
+
+ ATH_BT_MAX_STATE_INDICATION
+} ATHBT_STATE_INDICATION;
+
+ /* filter function for OUTGOING commands and INCOMMING events */
+typedef void (*ATHBT_FILTER_CMD_EVENTS_FN)(void *pContext, ATHBT_HCI_CTRL_TYPE Type, unsigned char *pBuffer, int Length);
+
+ /* filter function for OUTGOING data HCI packets */
+typedef void (*ATHBT_FILTER_DATA_FN)(void *pContext, unsigned char *pBuffer, int Length);
+
+typedef enum _ATHBT_STATE {
+ STATE_OFF = 0,
+ STATE_ON = 1,
+ STATE_MAX
+} ATHBT_STATE;
+
+ /* BT state indication (when filter functions are not used) */
+
+typedef void (*ATHBT_INDICATE_STATE_FN)(void *pContext, ATHBT_STATE_INDICATION Indication, ATHBT_STATE State, unsigned char LMPVersion);
+
+typedef struct _ATHBT_FILTER_INSTANCE {
+#ifdef UNDER_CE
+ WCHAR *pWlanAdapterName; /* filled in by user */
+#else
+ char *pWlanAdapterName; /* filled in by user */
+#endif /* UNDER_CE */
+ int FilterEnabled; /* filtering is enabled */
+ int Attached; /* filter library is attached */
+ void *pContext; /* private context for filter library */
+ ATHBT_FILTER_CMD_EVENTS_FN pFilterCmdEvents; /* function ptr to filter a command or event */
+ ATHBT_FILTER_DATA_FN pFilterAclDataOut; /* function ptr to filter ACL data out (to radio) */
+ ATHBT_FILTER_DATA_FN pFilterAclDataIn; /* function ptr to filter ACL data in (from radio) */
+ ATHBT_INDICATE_STATE_FN pIndicateState; /* function ptr to indicate a state */
+} ATH_BT_FILTER_INSTANCE;
+
+
+/* API MACROS */
+
+#define AthBtFilterHciCommand(instance,packet,length) \
+ if ((instance)->FilterEnabled) { \
+ (instance)->pFilterCmdEvents((instance)->pContext, \
+ ATHBT_HCI_COMMAND, \
+ (unsigned char *)(packet), \
+ (length)); \
+ }
+
+#define AthBtFilterHciEvent(instance,packet,length) \
+ if ((instance)->FilterEnabled) { \
+ (instance)->pFilterCmdEvents((instance)->pContext, \
+ ATHBT_HCI_EVENT, \
+ (unsigned char *)(packet), \
+ (length)); \
+ }
+
+#define AthBtFilterHciAclDataOut(instance,packet,length) \
+ if ((instance)->FilterEnabled) { \
+ (instance)->pFilterAclDataOut((instance)->pContext, \
+ (unsigned char *)(packet), \
+ (length)); \
+ }
+
+#define AthBtFilterHciAclDataIn(instance,packet,length) \
+ if ((instance)->FilterEnabled) { \
+ (instance)->pFilterAclDataIn((instance)->pContext, \
+ (unsigned char *)(packet), \
+ (length)); \
+ }
+
+/* if filtering is not desired, the application can indicate the state directly using this
+ * macro:
+ */
+#define AthBtIndicateState(instance,indication,state) \
+ if ((instance)->FilterEnabled) { \
+ (instance)->pIndicateState((instance)->pContext, \
+ (indication), \
+ (state), \
+ 0); \
+ }
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/* API prototypes */
+int AthBtFilter_Attach(ATH_BT_FILTER_INSTANCE *pInstance, unsigned int flags);
+void AthBtFilter_Detach(ATH_BT_FILTER_INSTANCE *pInstance);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /*ATHBTFILTER_H_*/
diff --git a/drivers/staging/ath6kl/include/athendpack.h b/drivers/staging/ath6kl/include/athendpack.h
new file mode 100644
index 000000000000..1b940503bb21
--- /dev/null
+++ b/drivers/staging/ath6kl/include/athendpack.h
@@ -0,0 +1,52 @@
+//------------------------------------------------------------------------------
+// <copyright file="athendpack.h" company="Atheros">
+// Copyright (c) 2004-2010 Atheros Corporation. All rights reserved.
+//
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+//
+//
+//------------------------------------------------------------------------------
+//==============================================================================
+// end compiler-specific structure packing
+//
+// Author(s): ="Atheros"
+//==============================================================================
+#ifdef VXWORKS
+#endif /* VXWORKS */
+
+#if defined(LINUX) || defined(__linux__)
+#endif /* LINUX */
+
+#ifdef QNX
+#endif /* QNX */
+
+#ifdef INTEGRITY
+#include "integrity/athendpack_integrity.h"
+#endif /* INTEGRITY */
+
+#ifdef NUCLEUS
+#endif /* NUCLEUS */
+
+
+#ifdef UNDER_NWIFI
+#include "../os/windows/include/athendpack.h"
+#endif
+
+#ifdef ATHR_CE_LEGACY
+#include "../os/windows/include/athendpack.h"
+#endif /* WINCE */
+
+#ifdef WIN_NWF
+#include <athendpack_win.h>
+#endif
diff --git a/drivers/staging/ath6kl/include/athstartpack.h b/drivers/staging/ath6kl/include/athstartpack.h
new file mode 100644
index 000000000000..1c45f666d8a2
--- /dev/null
+++ b/drivers/staging/ath6kl/include/athstartpack.h
@@ -0,0 +1,55 @@
+//------------------------------------------------------------------------------
+// <copyright file="athstartpack.h" company="Atheros">
+// Copyright (c) 2004-2010 Atheros Corporation. All rights reserved.
+//
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+//
+//
+//------------------------------------------------------------------------------
+//==============================================================================
+// start compiler-specific structure packing
+//
+// Author(s): ="Atheros"
+//==============================================================================
+#ifdef VXWORKS
+#endif /* VXWORKS */
+
+#if defined(LINUX) || defined(__linux__)
+#endif /* LINUX */
+
+#ifdef QNX
+#endif /* QNX */
+
+#ifdef INTEGRITY
+#include "integrity/athstartpack_integrity.h"
+#endif /* INTEGRITY */
+
+#ifdef NUCLEUS
+#endif /* NUCLEUS */
+
+#ifdef UNDER_NWIFI
+#include "../os/windows/include/athstartpack.h"
+#endif
+
+#ifdef ATHR_CE_LEGACY
+#include "../os/windows/include/athstartpack.h"
+#endif /* WINCE */
+
+#ifdef WIN_NWF
+#include <athstartpack_win.h>
+#endif
+
+#ifdef THREADX
+#include "../os/threadx/include/common/osapi_threadx.h"
+#endif
diff --git a/drivers/staging/ath6kl/include/bmi.h b/drivers/staging/ath6kl/include/bmi.h
new file mode 100644
index 000000000000..27aa98df9c0b
--- /dev/null
+++ b/drivers/staging/ath6kl/include/bmi.h
@@ -0,0 +1,135 @@
+//------------------------------------------------------------------------------
+// <copyright file="bmi.h" company="Atheros">
+// Copyright (c) 2004-2010 Atheros Corporation. All rights reserved.
+//
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+//
+//
+//------------------------------------------------------------------------------
+//==============================================================================
+// BMI declarations and prototypes
+//
+// Author(s): ="Atheros"
+//==============================================================================
+#ifndef _BMI_H_
+#define _BMI_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif /* __cplusplus */
+
+/* Header files */
+#include "a_config.h"
+#include "athdefs.h"
+#include "a_types.h"
+#include "hif.h"
+#include "a_osapi.h"
+#include "bmi_msg.h"
+
+void
+BMIInit(void);
+
+void
+BMICleanup(void);
+
+A_STATUS
+BMIDone(HIF_DEVICE *device);
+
+A_STATUS
+BMIGetTargetInfo(HIF_DEVICE *device, struct bmi_target_info *targ_info);
+
+A_STATUS
+BMIReadMemory(HIF_DEVICE *device,
+ A_UINT32 address,
+ A_UCHAR *buffer,
+ A_UINT32 length);
+
+A_STATUS
+BMIWriteMemory(HIF_DEVICE *device,
+ A_UINT32 address,
+ A_UCHAR *buffer,
+ A_UINT32 length);
+
+A_STATUS
+BMIExecute(HIF_DEVICE *device,
+ A_UINT32 address,
+ A_UINT32 *param);
+
+A_STATUS
+BMISetAppStart(HIF_DEVICE *device,
+ A_UINT32 address);
+
+A_STATUS
+BMIReadSOCRegister(HIF_DEVICE *device,
+ A_UINT32 address,
+ A_UINT32 *param);
+
+A_STATUS
+BMIWriteSOCRegister(HIF_DEVICE *device,
+ A_UINT32 address,
+ A_UINT32 param);
+
+A_STATUS
+BMIrompatchInstall(HIF_DEVICE *device,
+ A_UINT32 ROM_addr,
+ A_UINT32 RAM_addr,
+ A_UINT32 nbytes,
+ A_UINT32 do_activate,
+ A_UINT32 *patch_id);
+
+A_STATUS
+BMIrompatchUninstall(HIF_DEVICE *device,
+ A_UINT32 rompatch_id);
+
+A_STATUS
+BMIrompatchActivate(HIF_DEVICE *device,
+ A_UINT32 rompatch_count,
+ A_UINT32 *rompatch_list);
+
+A_STATUS
+BMIrompatchDeactivate(HIF_DEVICE *device,
+ A_UINT32 rompatch_count,
+ A_UINT32 *rompatch_list);
+
+A_STATUS
+BMILZStreamStart(HIF_DEVICE *device,
+ A_UINT32 address);
+
+A_STATUS
+BMILZData(HIF_DEVICE *device,
+ A_UCHAR *buffer,
+ A_UINT32 length);
+
+A_STATUS
+BMIFastDownload(HIF_DEVICE *device,
+ A_UINT32 address,
+ A_UCHAR *buffer,
+ A_UINT32 length);
+
+A_STATUS
+BMIRawWrite(HIF_DEVICE *device,
+ A_UCHAR *buffer,
+ A_UINT32 length);
+
+A_STATUS
+BMIRawRead(HIF_DEVICE *device,
+ A_UCHAR *buffer,
+ A_UINT32 length,
+ A_BOOL want_timeout);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _BMI_H_ */
diff --git a/drivers/staging/ath6kl/include/common/AR6002/AR6002_regdump.h b/drivers/staging/ath6kl/include/common/AR6002/AR6002_regdump.h
new file mode 100644
index 000000000000..e3291cf4dbd4
--- /dev/null
+++ b/drivers/staging/ath6kl/include/common/AR6002/AR6002_regdump.h
@@ -0,0 +1,60 @@
+//------------------------------------------------------------------------------
+// Copyright (c) 2006-2010 Atheros Corporation. All rights reserved.
+//
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+//
+//
+//------------------------------------------------------------------------------
+//==============================================================================
+// Author(s): ="Atheros"
+//==============================================================================
+
+#ifndef __AR6002_REGDUMP_H__
+#define __AR6002_REGDUMP_H__
+
+#if !defined(__ASSEMBLER__)
+/*
+ * XTensa CPU state
+ * This must match the state saved by the target exception handler.
+ */
+struct XTensa_exception_frame_s {
+ A_UINT32 xt_pc;
+ A_UINT32 xt_ps;
+ A_UINT32 xt_sar;
+ A_UINT32 xt_vpri;
+ A_UINT32 xt_a2;
+ A_UINT32 xt_a3;
+ A_UINT32 xt_a4;
+ A_UINT32 xt_a5;
+ A_UINT32 xt_exccause;
+ A_UINT32 xt_lcount;
+ A_UINT32 xt_lbeg;
+ A_UINT32 xt_lend;
+
+ A_UINT32 epc1, epc2, epc3, epc4;
+
+ /* Extra info to simplify post-mortem stack walkback */
+#define AR6002_REGDUMP_FRAMES 10
+ struct {
+ A_UINT32 a0; /* pc */
+ A_UINT32 a1; /* sp */
+ A_UINT32 a2;
+ A_UINT32 a3;
+ } wb[AR6002_REGDUMP_FRAMES];
+};
+typedef struct XTensa_exception_frame_s CPU_exception_frame_t;
+#define RD_SIZE sizeof(CPU_exception_frame_t)
+
+#endif
+#endif /* __AR6002_REGDUMP_H__ */
diff --git a/drivers/staging/ath6kl/include/common/AR6002/AR6K_version.h b/drivers/staging/ath6kl/include/common/AR6002/AR6K_version.h
new file mode 100644
index 000000000000..5407e05d9b05
--- /dev/null
+++ b/drivers/staging/ath6kl/include/common/AR6002/AR6K_version.h
@@ -0,0 +1,52 @@
+//------------------------------------------------------------------------------
+// <copyright file="AR6K_version.h" company="Atheros">
+// Copyright (c) 2004-2010 Atheros Corporation. All rights reserved.
+//
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+//
+//
+//------------------------------------------------------------------------------
+//==============================================================================
+// Author(s): ="Atheros"
+//==============================================================================
+
+#define __VER_MAJOR_ 3
+#define __VER_MINOR_ 0
+#define __VER_PATCH_ 0
+
+/* The makear6ksdk script (used for release builds) modifies the following line. */
+#define __BUILD_NUMBER_ 233
+
+
+/* Format of the version number. */
+#define VER_MAJOR_BIT_OFFSET 28
+#define VER_MINOR_BIT_OFFSET 24
+#define VER_PATCH_BIT_OFFSET 16
+#define VER_BUILD_NUM_BIT_OFFSET 0
+
+
+/*
+ * The version has the following format:
+ * Bits 28-31: Major version
+ * Bits 24-27: Minor version
+ * Bits 16-23: Patch version
+ * Bits 0-15: Build number (automatically generated during build process )
+ * E.g. Build 1.1.3.7 would be represented as 0x11030007.
+ *
+ * DO NOT split the following macro into multiple lines as this may confuse the build scripts.
+ */
+#define AR6K_SW_VERSION ( ( __VER_MAJOR_ << VER_MAJOR_BIT_OFFSET ) + ( __VER_MINOR_ << VER_MINOR_BIT_OFFSET ) + ( __VER_PATCH_ << VER_PATCH_BIT_OFFSET ) + ( __BUILD_NUMBER_ << VER_BUILD_NUM_BIT_OFFSET ) )
+
+/* ABI Version. Reflects the version of binary interface exposed by AR6K target firmware. Needs to be incremented by 1 for any change in the firmware that requires upgrade of the driver on the host side for the change to work correctly */
+#define AR6K_ABI_VERSION 1
diff --git a/drivers/staging/ath6kl/include/common/AR6002/addrs.h b/drivers/staging/ath6kl/include/common/AR6002/addrs.h
new file mode 100644
index 000000000000..eaaccf4cad7b
--- /dev/null
+++ b/drivers/staging/ath6kl/include/common/AR6002/addrs.h
@@ -0,0 +1,90 @@
+//------------------------------------------------------------------------------
+// Copyright (c) 2004-2010 Atheros Corporation. All rights reserved.
+//
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+//
+//
+//
+// Author(s): ="Atheros"
+//------------------------------------------------------------------------------
+
+#ifndef __ADDRS_H__
+#define __ADDRS_H__
+
+/*
+ * Special AR6002 Addresses that may be needed by special
+ * applications (e.g. ART) on the Host as well as Target.
+ */
+
+#if defined(AR6002_REV2)
+#define AR6K_RAM_START 0x00500000
+#define TARG_RAM_OFFSET(vaddr) ((A_UINT32)(vaddr) & 0xfffff)
+#define TARG_RAM_SZ (184*1024)
+#define TARG_ROM_SZ (80*1024)
+#endif
+#if defined(AR6002_REV4) || defined(AR6003)
+#define AR6K_RAM_START 0x00540000
+#define TARG_RAM_OFFSET(vaddr) (((A_UINT32)(vaddr) & 0xfffff) - 0x40000)
+#define TARG_RAM_SZ (256*1024)
+#define TARG_ROM_SZ (256*1024)
+#endif
+
+#define AR6002_BOARD_DATA_SZ 768
+#define AR6002_BOARD_EXT_DATA_SZ 0
+#define AR6003_BOARD_DATA_SZ 1024
+#define AR6003_BOARD_EXT_DATA_SZ 768
+
+#define AR6K_RAM_ADDR(byte_offset) (AR6K_RAM_START+(byte_offset))
+#define TARG_RAM_ADDRS(byte_offset) AR6K_RAM_ADDR(byte_offset)
+
+#define AR6K_ROM_START 0x004e0000
+#define TARG_ROM_OFFSET(vaddr) (((A_UINT32)(vaddr) & 0x1fffff) - 0xe0000)
+#define AR6K_ROM_ADDR(byte_offset) (AR6K_ROM_START+(byte_offset))
+#define TARG_ROM_ADDRS(byte_offset) AR6K_ROM_ADDR(byte_offset)
+
+/*
+ * At this ROM address is a pointer to the start of the ROM DataSet Index.
+ * If there are no ROM DataSets, there's a 0 at this address.
+ */
+#define ROM_DATASET_INDEX_ADDR (TARG_ROM_ADDRS(TARG_ROM_SZ)-8)
+#define ROM_MBIST_CKSUM_ADDR (TARG_ROM_ADDRS(TARG_ROM_SZ)-4)
+
+/*
+ * The API A_BOARD_DATA_ADDR() is the proper way to get a read pointer to
+ * board data.
+ */
+
+/* Size of Board Data, in bytes */
+#if defined(AR6002_REV4) || defined(AR6003)
+#define BOARD_DATA_SZ AR6003_BOARD_DATA_SZ
+#else
+#define BOARD_DATA_SZ AR6002_BOARD_DATA_SZ
+#endif
+
+
+/*
+ * Constants used by ASM code to access fields of host_interest_s,
+ * which is at a fixed location in RAM.
+ */
+#if defined(AR6002_REV4) || defined(AR6003)
+#define HOST_INTEREST_FLASH_IS_PRESENT_ADDR (AR6K_RAM_START + 0x60c)
+#else
+#define HOST_INTEREST_FLASH_IS_PRESENT_ADDR (AR6K_RAM_START + 0x40c)
+#endif
+#define FLASH_IS_PRESENT_TARGADDR HOST_INTEREST_FLASH_IS_PRESENT_ADDR
+
+#endif /* __ADDRS_H__ */
+
+
+
diff --git a/drivers/staging/ath6kl/include/common/AR6002/hw2.0/hw/analog_intf_reg.h b/drivers/staging/ath6kl/include/common/AR6002/hw2.0/hw/analog_intf_reg.h
new file mode 100644
index 000000000000..9c82767b6efb
--- /dev/null
+++ b/drivers/staging/ath6kl/include/common/AR6002/hw2.0/hw/analog_intf_reg.h
@@ -0,0 +1,64 @@
+#ifndef _ANALOG_INTF_REG_REG_H_
+#define _ANALOG_INTF_REG_REG_H_
+
+#define SW_OVERRIDE_ADDRESS 0x00000080
+#define SW_OVERRIDE_OFFSET 0x00000080
+#define SW_OVERRIDE_SUPDATE_DELAY_MSB 1
+#define SW_OVERRIDE_SUPDATE_DELAY_LSB 1
+#define SW_OVERRIDE_SUPDATE_DELAY_MASK 0x00000002
+#define SW_OVERRIDE_SUPDATE_DELAY_GET(x) (((x) & SW_OVERRIDE_SUPDATE_DELAY_MASK) >> SW_OVERRIDE_SUPDATE_DELAY_LSB)
+#define SW_OVERRIDE_SUPDATE_DELAY_SET(x) (((x) << SW_OVERRIDE_SUPDATE_DELAY_LSB) & SW_OVERRIDE_SUPDATE_DELAY_MASK)
+#define SW_OVERRIDE_ENABLE_MSB 0
+#define SW_OVERRIDE_ENABLE_LSB 0
+#define SW_OVERRIDE_ENABLE_MASK 0x00000001
+#define SW_OVERRIDE_ENABLE_GET(x) (((x) & SW_OVERRIDE_ENABLE_MASK) >> SW_OVERRIDE_ENABLE_LSB)
+#define SW_OVERRIDE_ENABLE_SET(x) (((x) << SW_OVERRIDE_ENABLE_LSB) & SW_OVERRIDE_ENABLE_MASK)
+
+#define SIN_VAL_ADDRESS 0x00000084
+#define SIN_VAL_OFFSET 0x00000084
+#define SIN_VAL_SIN_MSB 0
+#define SIN_VAL_SIN_LSB 0
+#define SIN_VAL_SIN_MASK 0x00000001
+#define SIN_VAL_SIN_GET(x) (((x) & SIN_VAL_SIN_MASK) >> SIN_VAL_SIN_LSB)
+#define SIN_VAL_SIN_SET(x) (((x) << SIN_VAL_SIN_LSB) & SIN_VAL_SIN_MASK)
+
+#define SW_SCLK_ADDRESS 0x00000088
+#define SW_SCLK_OFFSET 0x00000088
+#define SW_SCLK_SW_SCLK_MSB 0
+#define SW_SCLK_SW_SCLK_LSB 0
+#define SW_SCLK_SW_SCLK_MASK 0x00000001
+#define SW_SCLK_SW_SCLK_GET(x) (((x) & SW_SCLK_SW_SCLK_MASK) >> SW_SCLK_SW_SCLK_LSB)
+#define SW_SCLK_SW_SCLK_SET(x) (((x) << SW_SCLK_SW_SCLK_LSB) & SW_SCLK_SW_SCLK_MASK)
+
+#define SW_CNTL_ADDRESS 0x0000008c
+#define SW_CNTL_OFFSET 0x0000008c
+#define SW_CNTL_SW_SCAPTURE_MSB 2
+#define SW_CNTL_SW_SCAPTURE_LSB 2
+#define SW_CNTL_SW_SCAPTURE_MASK 0x00000004
+#define SW_CNTL_SW_SCAPTURE_GET(x) (((x) & SW_CNTL_SW_SCAPTURE_MASK) >> SW_CNTL_SW_SCAPTURE_LSB)
+#define SW_CNTL_SW_SCAPTURE_SET(x) (((x) << SW_CNTL_SW_SCAPTURE_LSB) & SW_CNTL_SW_SCAPTURE_MASK)
+#define SW_CNTL_SW_SUPDATE_MSB 1
+#define SW_CNTL_SW_SUPDATE_LSB 1
+#define SW_CNTL_SW_SUPDATE_MASK 0x00000002
+#define SW_CNTL_SW_SUPDATE_GET(x) (((x) & SW_CNTL_SW_SUPDATE_MASK) >> SW_CNTL_SW_SUPDATE_LSB)
+#define SW_CNTL_SW_SUPDATE_SET(x) (((x) << SW_CNTL_SW_SUPDATE_LSB) & SW_CNTL_SW_SUPDATE_MASK)
+#define SW_CNTL_SW_SOUT_MSB 0
+#define SW_CNTL_SW_SOUT_LSB 0
+#define SW_CNTL_SW_SOUT_MASK 0x00000001
+#define SW_CNTL_SW_SOUT_GET(x) (((x) & SW_CNTL_SW_SOUT_MASK) >> SW_CNTL_SW_SOUT_LSB)
+#define SW_CNTL_SW_SOUT_SET(x) (((x) << SW_CNTL_SW_SOUT_LSB) & SW_CNTL_SW_SOUT_MASK)
+
+
+#ifndef __ASSEMBLER__
+
+typedef struct analog_intf_reg_reg_s {
+ unsigned char pad0[128]; /* pad to 0x80 */
+ volatile unsigned int sw_override;
+ volatile unsigned int sin_val;
+ volatile unsigned int sw_sclk;
+ volatile unsigned int sw_cntl;
+} analog_intf_reg_reg_t;
+
+#endif /* __ASSEMBLER__ */
+
+#endif /* _ANALOG_INTF_REG_H_ */
diff --git a/drivers/staging/ath6kl/include/common/AR6002/hw2.0/hw/analog_reg.h b/drivers/staging/ath6kl/include/common/AR6002/hw2.0/hw/analog_reg.h
new file mode 100644
index 000000000000..cf562b86f655
--- /dev/null
+++ b/drivers/staging/ath6kl/include/common/AR6002/hw2.0/hw/analog_reg.h
@@ -0,0 +1,1932 @@
+#ifndef _ANALOG_REG_REG_H_
+#define _ANALOG_REG_REG_H_
+
+#define SYNTH_SYNTH1_ADDRESS 0x00000000
+#define SYNTH_SYNTH1_OFFSET 0x00000000
+#define SYNTH_SYNTH1_PWD_BIAS_MSB 31
+#define SYNTH_SYNTH1_PWD_BIAS_LSB 31
+#define SYNTH_SYNTH1_PWD_BIAS_MASK 0x80000000
+#define SYNTH_SYNTH1_PWD_BIAS_GET(x) (((x) & SYNTH_SYNTH1_PWD_BIAS_MASK) >> SYNTH_SYNTH1_PWD_BIAS_LSB)
+#define SYNTH_SYNTH1_PWD_BIAS_SET(x) (((x) << SYNTH_SYNTH1_PWD_BIAS_LSB) & SYNTH_SYNTH1_PWD_BIAS_MASK)
+#define SYNTH_SYNTH1_PWD_CP_MSB 30
+#define SYNTH_SYNTH1_PWD_CP_LSB 30
+#define SYNTH_SYNTH1_PWD_CP_MASK 0x40000000
+#define SYNTH_SYNTH1_PWD_CP_GET(x) (((x) & SYNTH_SYNTH1_PWD_CP_MASK) >> SYNTH_SYNTH1_PWD_CP_LSB)
+#define SYNTH_SYNTH1_PWD_CP_SET(x) (((x) << SYNTH_SYNTH1_PWD_CP_LSB) & SYNTH_SYNTH1_PWD_CP_MASK)
+#define SYNTH_SYNTH1_PWD_VCMON_MSB 29
+#define SYNTH_SYNTH1_PWD_VCMON_LSB 29
+#define SYNTH_SYNTH1_PWD_VCMON_MASK 0x20000000
+#define SYNTH_SYNTH1_PWD_VCMON_GET(x) (((x) & SYNTH_SYNTH1_PWD_VCMON_MASK) >> SYNTH_SYNTH1_PWD_VCMON_LSB)
+#define SYNTH_SYNTH1_PWD_VCMON_SET(x) (((x) << SYNTH_SYNTH1_PWD_VCMON_LSB) & SYNTH_SYNTH1_PWD_VCMON_MASK)
+#define SYNTH_SYNTH1_PWD_VCO_MSB 28
+#define SYNTH_SYNTH1_PWD_VCO_LSB 28
+#define SYNTH_SYNTH1_PWD_VCO_MASK 0x10000000
+#define SYNTH_SYNTH1_PWD_VCO_GET(x) (((x) & SYNTH_SYNTH1_PWD_VCO_MASK) >> SYNTH_SYNTH1_PWD_VCO_LSB)
+#define SYNTH_SYNTH1_PWD_VCO_SET(x) (((x) << SYNTH_SYNTH1_PWD_VCO_LSB) & SYNTH_SYNTH1_PWD_VCO_MASK)
+#define SYNTH_SYNTH1_PWD_PRESC_MSB 27
+#define SYNTH_SYNTH1_PWD_PRESC_LSB 27
+#define SYNTH_SYNTH1_PWD_PRESC_MASK 0x08000000
+#define SYNTH_SYNTH1_PWD_PRESC_GET(x) (((x) & SYNTH_SYNTH1_PWD_PRESC_MASK) >> SYNTH_SYNTH1_PWD_PRESC_LSB)
+#define SYNTH_SYNTH1_PWD_PRESC_SET(x) (((x) << SYNTH_SYNTH1_PWD_PRESC_LSB) & SYNTH_SYNTH1_PWD_PRESC_MASK)
+#define SYNTH_SYNTH1_PWD_LODIV_MSB 26
+#define SYNTH_SYNTH1_PWD_LODIV_LSB 26
+#define SYNTH_SYNTH1_PWD_LODIV_MASK 0x04000000
+#define SYNTH_SYNTH1_PWD_LODIV_GET(x) (((x) & SYNTH_SYNTH1_PWD_LODIV_MASK) >> SYNTH_SYNTH1_PWD_LODIV_LSB)
+#define SYNTH_SYNTH1_PWD_LODIV_SET(x) (((x) << SYNTH_SYNTH1_PWD_LODIV_LSB) & SYNTH_SYNTH1_PWD_LODIV_MASK)
+#define SYNTH_SYNTH1_PWD_LOMIX_MSB 25
+#define SYNTH_SYNTH1_PWD_LOMIX_LSB 25
+#define SYNTH_SYNTH1_PWD_LOMIX_MASK 0x02000000
+#define SYNTH_SYNTH1_PWD_LOMIX_GET(x) (((x) & SYNTH_SYNTH1_PWD_LOMIX_MASK) >> SYNTH_SYNTH1_PWD_LOMIX_LSB)
+#define SYNTH_SYNTH1_PWD_LOMIX_SET(x) (((x) << SYNTH_SYNTH1_PWD_LOMIX_LSB) & SYNTH_SYNTH1_PWD_LOMIX_MASK)
+#define SYNTH_SYNTH1_FORCE_LO_ON_MSB 24
+#define SYNTH_SYNTH1_FORCE_LO_ON_LSB 24
+#define SYNTH_SYNTH1_FORCE_LO_ON_MASK 0x01000000
+#define SYNTH_SYNTH1_FORCE_LO_ON_GET(x) (((x) & SYNTH_SYNTH1_FORCE_LO_ON_MASK) >> SYNTH_SYNTH1_FORCE_LO_ON_LSB)
+#define SYNTH_SYNTH1_FORCE_LO_ON_SET(x) (((x) << SYNTH_SYNTH1_FORCE_LO_ON_LSB) & SYNTH_SYNTH1_FORCE_LO_ON_MASK)
+#define SYNTH_SYNTH1_PWD_LOBUF5G_MSB 23
+#define SYNTH_SYNTH1_PWD_LOBUF5G_LSB 23
+#define SYNTH_SYNTH1_PWD_LOBUF5G_MASK 0x00800000
+#define SYNTH_SYNTH1_PWD_LOBUF5G_GET(x) (((x) & SYNTH_SYNTH1_PWD_LOBUF5G_MASK) >> SYNTH_SYNTH1_PWD_LOBUF5G_LSB)
+#define SYNTH_SYNTH1_PWD_LOBUF5G_SET(x) (((x) << SYNTH_SYNTH1_PWD_LOBUF5G_LSB) & SYNTH_SYNTH1_PWD_LOBUF5G_MASK)
+#define SYNTH_SYNTH1_VCOREGBYPASS_MSB 22
+#define SYNTH_SYNTH1_VCOREGBYPASS_LSB 22
+#define SYNTH_SYNTH1_VCOREGBYPASS_MASK 0x00400000
+#define SYNTH_SYNTH1_VCOREGBYPASS_GET(x) (((x) & SYNTH_SYNTH1_VCOREGBYPASS_MASK) >> SYNTH_SYNTH1_VCOREGBYPASS_LSB)
+#define SYNTH_SYNTH1_VCOREGBYPASS_SET(x) (((x) << SYNTH_SYNTH1_VCOREGBYPASS_LSB) & SYNTH_SYNTH1_VCOREGBYPASS_MASK)
+#define SYNTH_SYNTH1_VCOREGLEVEL_MSB 21
+#define SYNTH_SYNTH1_VCOREGLEVEL_LSB 20
+#define SYNTH_SYNTH1_VCOREGLEVEL_MASK 0x00300000
+#define SYNTH_SYNTH1_VCOREGLEVEL_GET(x) (((x) & SYNTH_SYNTH1_VCOREGLEVEL_MASK) >> SYNTH_SYNTH1_VCOREGLEVEL_LSB)
+#define SYNTH_SYNTH1_VCOREGLEVEL_SET(x) (((x) << SYNTH_SYNTH1_VCOREGLEVEL_LSB) & SYNTH_SYNTH1_VCOREGLEVEL_MASK)
+#define SYNTH_SYNTH1_VCOREGBIAS_MSB 19
+#define SYNTH_SYNTH1_VCOREGBIAS_LSB 18
+#define SYNTH_SYNTH1_VCOREGBIAS_MASK 0x000c0000
+#define SYNTH_SYNTH1_VCOREGBIAS_GET(x) (((x) & SYNTH_SYNTH1_VCOREGBIAS_MASK) >> SYNTH_SYNTH1_VCOREGBIAS_LSB)
+#define SYNTH_SYNTH1_VCOREGBIAS_SET(x) (((x) << SYNTH_SYNTH1_VCOREGBIAS_LSB) & SYNTH_SYNTH1_VCOREGBIAS_MASK)
+#define SYNTH_SYNTH1_SLIDINGIF_MSB 17
+#define SYNTH_SYNTH1_SLIDINGIF_LSB 17
+#define SYNTH_SYNTH1_SLIDINGIF_MASK 0x00020000
+#define SYNTH_SYNTH1_SLIDINGIF_GET(x) (((x) & SYNTH_SYNTH1_SLIDINGIF_MASK) >> SYNTH_SYNTH1_SLIDINGIF_LSB)
+#define SYNTH_SYNTH1_SLIDINGIF_SET(x) (((x) << SYNTH_SYNTH1_SLIDINGIF_LSB) & SYNTH_SYNTH1_SLIDINGIF_MASK)
+#define SYNTH_SYNTH1_SPARE_PWD_MSB 16
+#define SYNTH_SYNTH1_SPARE_PWD_LSB 16
+#define SYNTH_SYNTH1_SPARE_PWD_MASK 0x00010000
+#define SYNTH_SYNTH1_SPARE_PWD_GET(x) (((x) & SYNTH_SYNTH1_SPARE_PWD_MASK) >> SYNTH_SYNTH1_SPARE_PWD_LSB)
+#define SYNTH_SYNTH1_SPARE_PWD_SET(x) (((x) << SYNTH_SYNTH1_SPARE_PWD_LSB) & SYNTH_SYNTH1_SPARE_PWD_MASK)
+#define SYNTH_SYNTH1_CON_VDDVCOREG_MSB 15
+#define SYNTH_SYNTH1_CON_VDDVCOREG_LSB 15
+#define SYNTH_SYNTH1_CON_VDDVCOREG_MASK 0x00008000
+#define SYNTH_SYNTH1_CON_VDDVCOREG_GET(x) (((x) & SYNTH_SYNTH1_CON_VDDVCOREG_MASK) >> SYNTH_SYNTH1_CON_VDDVCOREG_LSB)
+#define SYNTH_SYNTH1_CON_VDDVCOREG_SET(x) (((x) << SYNTH_SYNTH1_CON_VDDVCOREG_LSB) & SYNTH_SYNTH1_CON_VDDVCOREG_MASK)
+#define SYNTH_SYNTH1_CON_IVCOREG_MSB 14
+#define SYNTH_SYNTH1_CON_IVCOREG_LSB 14
+#define SYNTH_SYNTH1_CON_IVCOREG_MASK 0x00004000
+#define SYNTH_SYNTH1_CON_IVCOREG_GET(x) (((x) & SYNTH_SYNTH1_CON_IVCOREG_MASK) >> SYNTH_SYNTH1_CON_IVCOREG_LSB)
+#define SYNTH_SYNTH1_CON_IVCOREG_SET(x) (((x) << SYNTH_SYNTH1_CON_IVCOREG_LSB) & SYNTH_SYNTH1_CON_IVCOREG_MASK)
+#define SYNTH_SYNTH1_CON_IVCOBUF_MSB 13
+#define SYNTH_SYNTH1_CON_IVCOBUF_LSB 13
+#define SYNTH_SYNTH1_CON_IVCOBUF_MASK 0x00002000
+#define SYNTH_SYNTH1_CON_IVCOBUF_GET(x) (((x) & SYNTH_SYNTH1_CON_IVCOBUF_MASK) >> SYNTH_SYNTH1_CON_IVCOBUF_LSB)
+#define SYNTH_SYNTH1_CON_IVCOBUF_SET(x) (((x) << SYNTH_SYNTH1_CON_IVCOBUF_LSB) & SYNTH_SYNTH1_CON_IVCOBUF_MASK)
+#define SYNTH_SYNTH1_SEL_VCMONABUS_MSB 12
+#define SYNTH_SYNTH1_SEL_VCMONABUS_LSB 10
+#define SYNTH_SYNTH1_SEL_VCMONABUS_MASK 0x00001c00
+#define SYNTH_SYNTH1_SEL_VCMONABUS_GET(x) (((x) & SYNTH_SYNTH1_SEL_VCMONABUS_MASK) >> SYNTH_SYNTH1_SEL_VCMONABUS_LSB)
+#define SYNTH_SYNTH1_SEL_VCMONABUS_SET(x) (((x) << SYNTH_SYNTH1_SEL_VCMONABUS_LSB) & SYNTH_SYNTH1_SEL_VCMONABUS_MASK)
+#define SYNTH_SYNTH1_PWUP_VCOBUF_PD_MSB 9
+#define SYNTH_SYNTH1_PWUP_VCOBUF_PD_LSB 9
+#define SYNTH_SYNTH1_PWUP_VCOBUF_PD_MASK 0x00000200
+#define SYNTH_SYNTH1_PWUP_VCOBUF_PD_GET(x) (((x) & SYNTH_SYNTH1_PWUP_VCOBUF_PD_MASK) >> SYNTH_SYNTH1_PWUP_VCOBUF_PD_LSB)
+#define SYNTH_SYNTH1_PWUP_VCOBUF_PD_SET(x) (((x) << SYNTH_SYNTH1_PWUP_VCOBUF_PD_LSB) & SYNTH_SYNTH1_PWUP_VCOBUF_PD_MASK)
+#define SYNTH_SYNTH1_PWUP_LODIV_PD_MSB 8
+#define SYNTH_SYNTH1_PWUP_LODIV_PD_LSB 8
+#define SYNTH_SYNTH1_PWUP_LODIV_PD_MASK 0x00000100
+#define SYNTH_SYNTH1_PWUP_LODIV_PD_GET(x) (((x) & SYNTH_SYNTH1_PWUP_LODIV_PD_MASK) >> SYNTH_SYNTH1_PWUP_LODIV_PD_LSB)
+#define SYNTH_SYNTH1_PWUP_LODIV_PD_SET(x) (((x) << SYNTH_SYNTH1_PWUP_LODIV_PD_LSB) & SYNTH_SYNTH1_PWUP_LODIV_PD_MASK)
+#define SYNTH_SYNTH1_PWUP_LOMIX_PD_MSB 7
+#define SYNTH_SYNTH1_PWUP_LOMIX_PD_LSB 7
+#define SYNTH_SYNTH1_PWUP_LOMIX_PD_MASK 0x00000080
+#define SYNTH_SYNTH1_PWUP_LOMIX_PD_GET(x) (((x) & SYNTH_SYNTH1_PWUP_LOMIX_PD_MASK) >> SYNTH_SYNTH1_PWUP_LOMIX_PD_LSB)
+#define SYNTH_SYNTH1_PWUP_LOMIX_PD_SET(x) (((x) << SYNTH_SYNTH1_PWUP_LOMIX_PD_LSB) & SYNTH_SYNTH1_PWUP_LOMIX_PD_MASK)
+#define SYNTH_SYNTH1_PWUP_LOBUF5G_PD_MSB 6
+#define SYNTH_SYNTH1_PWUP_LOBUF5G_PD_LSB 6
+#define SYNTH_SYNTH1_PWUP_LOBUF5G_PD_MASK 0x00000040
+#define SYNTH_SYNTH1_PWUP_LOBUF5G_PD_GET(x) (((x) & SYNTH_SYNTH1_PWUP_LOBUF5G_PD_MASK) >> SYNTH_SYNTH1_PWUP_LOBUF5G_PD_LSB)
+#define SYNTH_SYNTH1_PWUP_LOBUF5G_PD_SET(x) (((x) << SYNTH_SYNTH1_PWUP_LOBUF5G_PD_LSB) & SYNTH_SYNTH1_PWUP_LOBUF5G_PD_MASK)
+#define SYNTH_SYNTH1_MONITOR_FB_MSB 5
+#define SYNTH_SYNTH1_MONITOR_FB_LSB 5
+#define SYNTH_SYNTH1_MONITOR_FB_MASK 0x00000020
+#define SYNTH_SYNTH1_MONITOR_FB_GET(x) (((x) & SYNTH_SYNTH1_MONITOR_FB_MASK) >> SYNTH_SYNTH1_MONITOR_FB_LSB)
+#define SYNTH_SYNTH1_MONITOR_FB_SET(x) (((x) << SYNTH_SYNTH1_MONITOR_FB_LSB) & SYNTH_SYNTH1_MONITOR_FB_MASK)
+#define SYNTH_SYNTH1_MONITOR_REF_MSB 4
+#define SYNTH_SYNTH1_MONITOR_REF_LSB 4
+#define SYNTH_SYNTH1_MONITOR_REF_MASK 0x00000010
+#define SYNTH_SYNTH1_MONITOR_REF_GET(x) (((x) & SYNTH_SYNTH1_MONITOR_REF_MASK) >> SYNTH_SYNTH1_MONITOR_REF_LSB)
+#define SYNTH_SYNTH1_MONITOR_REF_SET(x) (((x) << SYNTH_SYNTH1_MONITOR_REF_LSB) & SYNTH_SYNTH1_MONITOR_REF_MASK)
+#define SYNTH_SYNTH1_MONITOR_FB_DIV2_MSB 3
+#define SYNTH_SYNTH1_MONITOR_FB_DIV2_LSB 3
+#define SYNTH_SYNTH1_MONITOR_FB_DIV2_MASK 0x00000008
+#define SYNTH_SYNTH1_MONITOR_FB_DIV2_GET(x) (((x) & SYNTH_SYNTH1_MONITOR_FB_DIV2_MASK) >> SYNTH_SYNTH1_MONITOR_FB_DIV2_LSB)
+#define SYNTH_SYNTH1_MONITOR_FB_DIV2_SET(x) (((x) << SYNTH_SYNTH1_MONITOR_FB_DIV2_LSB) & SYNTH_SYNTH1_MONITOR_FB_DIV2_MASK)
+#define SYNTH_SYNTH1_MONITOR_VC2HIGH_MSB 2
+#define SYNTH_SYNTH1_MONITOR_VC2HIGH_LSB 2
+#define SYNTH_SYNTH1_MONITOR_VC2HIGH_MASK 0x00000004
+#define SYNTH_SYNTH1_MONITOR_VC2HIGH_GET(x) (((x) & SYNTH_SYNTH1_MONITOR_VC2HIGH_MASK) >> SYNTH_SYNTH1_MONITOR_VC2HIGH_LSB)
+#define SYNTH_SYNTH1_MONITOR_VC2HIGH_SET(x) (((x) << SYNTH_SYNTH1_MONITOR_VC2HIGH_LSB) & SYNTH_SYNTH1_MONITOR_VC2HIGH_MASK)
+#define SYNTH_SYNTH1_MONITOR_VC2LOW_MSB 1
+#define SYNTH_SYNTH1_MONITOR_VC2LOW_LSB 1
+#define SYNTH_SYNTH1_MONITOR_VC2LOW_MASK 0x00000002
+#define SYNTH_SYNTH1_MONITOR_VC2LOW_GET(x) (((x) & SYNTH_SYNTH1_MONITOR_VC2LOW_MASK) >> SYNTH_SYNTH1_MONITOR_VC2LOW_LSB)
+#define SYNTH_SYNTH1_MONITOR_VC2LOW_SET(x) (((x) << SYNTH_SYNTH1_MONITOR_VC2LOW_LSB) & SYNTH_SYNTH1_MONITOR_VC2LOW_MASK)
+#define SYNTH_SYNTH1_MONITOR_SYNTHLOCKVCOK_MSB 0
+#define SYNTH_SYNTH1_MONITOR_SYNTHLOCKVCOK_LSB 0
+#define SYNTH_SYNTH1_MONITOR_SYNTHLOCKVCOK_MASK 0x00000001
+#define SYNTH_SYNTH1_MONITOR_SYNTHLOCKVCOK_GET(x) (((x) & SYNTH_SYNTH1_MONITOR_SYNTHLOCKVCOK_MASK) >> SYNTH_SYNTH1_MONITOR_SYNTHLOCKVCOK_LSB)
+#define SYNTH_SYNTH1_MONITOR_SYNTHLOCKVCOK_SET(x) (((x) << SYNTH_SYNTH1_MONITOR_SYNTHLOCKVCOK_LSB) & SYNTH_SYNTH1_MONITOR_SYNTHLOCKVCOK_MASK)
+
+#define SYNTH_SYNTH2_ADDRESS 0x00000004
+#define SYNTH_SYNTH2_OFFSET 0x00000004
+#define SYNTH_SYNTH2_VC_CAL_REF_MSB 31
+#define SYNTH_SYNTH2_VC_CAL_REF_LSB 29
+#define SYNTH_SYNTH2_VC_CAL_REF_MASK 0xe0000000
+#define SYNTH_SYNTH2_VC_CAL_REF_GET(x) (((x) & SYNTH_SYNTH2_VC_CAL_REF_MASK) >> SYNTH_SYNTH2_VC_CAL_REF_LSB)
+#define SYNTH_SYNTH2_VC_CAL_REF_SET(x) (((x) << SYNTH_SYNTH2_VC_CAL_REF_LSB) & SYNTH_SYNTH2_VC_CAL_REF_MASK)
+#define SYNTH_SYNTH2_VC_HI_REF_MSB 28
+#define SYNTH_SYNTH2_VC_HI_REF_LSB 26
+#define SYNTH_SYNTH2_VC_HI_REF_MASK 0x1c000000
+#define SYNTH_SYNTH2_VC_HI_REF_GET(x) (((x) & SYNTH_SYNTH2_VC_HI_REF_MASK) >> SYNTH_SYNTH2_VC_HI_REF_LSB)
+#define SYNTH_SYNTH2_VC_HI_REF_SET(x) (((x) << SYNTH_SYNTH2_VC_HI_REF_LSB) & SYNTH_SYNTH2_VC_HI_REF_MASK)
+#define SYNTH_SYNTH2_VC_MID_REF_MSB 25
+#define SYNTH_SYNTH2_VC_MID_REF_LSB 23
+#define SYNTH_SYNTH2_VC_MID_REF_MASK 0x03800000
+#define SYNTH_SYNTH2_VC_MID_REF_GET(x) (((x) & SYNTH_SYNTH2_VC_MID_REF_MASK) >> SYNTH_SYNTH2_VC_MID_REF_LSB)
+#define SYNTH_SYNTH2_VC_MID_REF_SET(x) (((x) << SYNTH_SYNTH2_VC_MID_REF_LSB) & SYNTH_SYNTH2_VC_MID_REF_MASK)
+#define SYNTH_SYNTH2_VC_LOW_REF_MSB 22
+#define SYNTH_SYNTH2_VC_LOW_REF_LSB 20
+#define SYNTH_SYNTH2_VC_LOW_REF_MASK 0x00700000
+#define SYNTH_SYNTH2_VC_LOW_REF_GET(x) (((x) & SYNTH_SYNTH2_VC_LOW_REF_MASK) >> SYNTH_SYNTH2_VC_LOW_REF_LSB)
+#define SYNTH_SYNTH2_VC_LOW_REF_SET(x) (((x) << SYNTH_SYNTH2_VC_LOW_REF_LSB) & SYNTH_SYNTH2_VC_LOW_REF_MASK)
+#define SYNTH_SYNTH2_LOOP_3RD_ORDER_R_MSB 19
+#define SYNTH_SYNTH2_LOOP_3RD_ORDER_R_LSB 15
+#define SYNTH_SYNTH2_LOOP_3RD_ORDER_R_MASK 0x000f8000
+#define SYNTH_SYNTH2_LOOP_3RD_ORDER_R_GET(x) (((x) & SYNTH_SYNTH2_LOOP_3RD_ORDER_R_MASK) >> SYNTH_SYNTH2_LOOP_3RD_ORDER_R_LSB)
+#define SYNTH_SYNTH2_LOOP_3RD_ORDER_R_SET(x) (((x) << SYNTH_SYNTH2_LOOP_3RD_ORDER_R_LSB) & SYNTH_SYNTH2_LOOP_3RD_ORDER_R_MASK)
+#define SYNTH_SYNTH2_LOOP_CP_MSB 14
+#define SYNTH_SYNTH2_LOOP_CP_LSB 10
+#define SYNTH_SYNTH2_LOOP_CP_MASK 0x00007c00
+#define SYNTH_SYNTH2_LOOP_CP_GET(x) (((x) & SYNTH_SYNTH2_LOOP_CP_MASK) >> SYNTH_SYNTH2_LOOP_CP_LSB)
+#define SYNTH_SYNTH2_LOOP_CP_SET(x) (((x) << SYNTH_SYNTH2_LOOP_CP_LSB) & SYNTH_SYNTH2_LOOP_CP_MASK)
+#define SYNTH_SYNTH2_LOOP_RS_MSB 9
+#define SYNTH_SYNTH2_LOOP_RS_LSB 5
+#define SYNTH_SYNTH2_LOOP_RS_MASK 0x000003e0
+#define SYNTH_SYNTH2_LOOP_RS_GET(x) (((x) & SYNTH_SYNTH2_LOOP_RS_MASK) >> SYNTH_SYNTH2_LOOP_RS_LSB)
+#define SYNTH_SYNTH2_LOOP_RS_SET(x) (((x) << SYNTH_SYNTH2_LOOP_RS_LSB) & SYNTH_SYNTH2_LOOP_RS_MASK)
+#define SYNTH_SYNTH2_LOOP_CS_MSB 4
+#define SYNTH_SYNTH2_LOOP_CS_LSB 3
+#define SYNTH_SYNTH2_LOOP_CS_MASK 0x00000018
+#define SYNTH_SYNTH2_LOOP_CS_GET(x) (((x) & SYNTH_SYNTH2_LOOP_CS_MASK) >> SYNTH_SYNTH2_LOOP_CS_LSB)
+#define SYNTH_SYNTH2_LOOP_CS_SET(x) (((x) << SYNTH_SYNTH2_LOOP_CS_LSB) & SYNTH_SYNTH2_LOOP_CS_MASK)
+#define SYNTH_SYNTH2_SPARE_BITS_MSB 2
+#define SYNTH_SYNTH2_SPARE_BITS_LSB 0
+#define SYNTH_SYNTH2_SPARE_BITS_MASK 0x00000007
+#define SYNTH_SYNTH2_SPARE_BITS_GET(x) (((x) & SYNTH_SYNTH2_SPARE_BITS_MASK) >> SYNTH_SYNTH2_SPARE_BITS_LSB)
+#define SYNTH_SYNTH2_SPARE_BITS_SET(x) (((x) << SYNTH_SYNTH2_SPARE_BITS_LSB) & SYNTH_SYNTH2_SPARE_BITS_MASK)
+
+#define SYNTH_SYNTH3_ADDRESS 0x00000008
+#define SYNTH_SYNTH3_OFFSET 0x00000008
+#define SYNTH_SYNTH3_DIS_CLK_XTAL_MSB 31
+#define SYNTH_SYNTH3_DIS_CLK_XTAL_LSB 31
+#define SYNTH_SYNTH3_DIS_CLK_XTAL_MASK 0x80000000
+#define SYNTH_SYNTH3_DIS_CLK_XTAL_GET(x) (((x) & SYNTH_SYNTH3_DIS_CLK_XTAL_MASK) >> SYNTH_SYNTH3_DIS_CLK_XTAL_LSB)
+#define SYNTH_SYNTH3_DIS_CLK_XTAL_SET(x) (((x) << SYNTH_SYNTH3_DIS_CLK_XTAL_LSB) & SYNTH_SYNTH3_DIS_CLK_XTAL_MASK)
+#define SYNTH_SYNTH3_SEL_CLK_DIV2_MSB 30
+#define SYNTH_SYNTH3_SEL_CLK_DIV2_LSB 30
+#define SYNTH_SYNTH3_SEL_CLK_DIV2_MASK 0x40000000
+#define SYNTH_SYNTH3_SEL_CLK_DIV2_GET(x) (((x) & SYNTH_SYNTH3_SEL_CLK_DIV2_MASK) >> SYNTH_SYNTH3_SEL_CLK_DIV2_LSB)
+#define SYNTH_SYNTH3_SEL_CLK_DIV2_SET(x) (((x) << SYNTH_SYNTH3_SEL_CLK_DIV2_LSB) & SYNTH_SYNTH3_SEL_CLK_DIV2_MASK)
+#define SYNTH_SYNTH3_WAIT_SHORTR_PWRUP_MSB 29
+#define SYNTH_SYNTH3_WAIT_SHORTR_PWRUP_LSB 24
+#define SYNTH_SYNTH3_WAIT_SHORTR_PWRUP_MASK 0x3f000000
+#define SYNTH_SYNTH3_WAIT_SHORTR_PWRUP_GET(x) (((x) & SYNTH_SYNTH3_WAIT_SHORTR_PWRUP_MASK) >> SYNTH_SYNTH3_WAIT_SHORTR_PWRUP_LSB)
+#define SYNTH_SYNTH3_WAIT_SHORTR_PWRUP_SET(x) (((x) << SYNTH_SYNTH3_WAIT_SHORTR_PWRUP_LSB) & SYNTH_SYNTH3_WAIT_SHORTR_PWRUP_MASK)
+#define SYNTH_SYNTH3_WAIT_PWRUP_MSB 23
+#define SYNTH_SYNTH3_WAIT_PWRUP_LSB 18
+#define SYNTH_SYNTH3_WAIT_PWRUP_MASK 0x00fc0000
+#define SYNTH_SYNTH3_WAIT_PWRUP_GET(x) (((x) & SYNTH_SYNTH3_WAIT_PWRUP_MASK) >> SYNTH_SYNTH3_WAIT_PWRUP_LSB)
+#define SYNTH_SYNTH3_WAIT_PWRUP_SET(x) (((x) << SYNTH_SYNTH3_WAIT_PWRUP_LSB) & SYNTH_SYNTH3_WAIT_PWRUP_MASK)
+#define SYNTH_SYNTH3_WAIT_CAL_BIN_MSB 17
+#define SYNTH_SYNTH3_WAIT_CAL_BIN_LSB 12
+#define SYNTH_SYNTH3_WAIT_CAL_BIN_MASK 0x0003f000
+#define SYNTH_SYNTH3_WAIT_CAL_BIN_GET(x) (((x) & SYNTH_SYNTH3_WAIT_CAL_BIN_MASK) >> SYNTH_SYNTH3_WAIT_CAL_BIN_LSB)
+#define SYNTH_SYNTH3_WAIT_CAL_BIN_SET(x) (((x) << SYNTH_SYNTH3_WAIT_CAL_BIN_LSB) & SYNTH_SYNTH3_WAIT_CAL_BIN_MASK)
+#define SYNTH_SYNTH3_WAIT_CAL_LIN_MSB 11
+#define SYNTH_SYNTH3_WAIT_CAL_LIN_LSB 6
+#define SYNTH_SYNTH3_WAIT_CAL_LIN_MASK 0x00000fc0
+#define SYNTH_SYNTH3_WAIT_CAL_LIN_GET(x) (((x) & SYNTH_SYNTH3_WAIT_CAL_LIN_MASK) >> SYNTH_SYNTH3_WAIT_CAL_LIN_LSB)
+#define SYNTH_SYNTH3_WAIT_CAL_LIN_SET(x) (((x) << SYNTH_SYNTH3_WAIT_CAL_LIN_LSB) & SYNTH_SYNTH3_WAIT_CAL_LIN_MASK)
+#define SYNTH_SYNTH3_WAIT_VC_CHECK_MSB 5
+#define SYNTH_SYNTH3_WAIT_VC_CHECK_LSB 0
+#define SYNTH_SYNTH3_WAIT_VC_CHECK_MASK 0x0000003f
+#define SYNTH_SYNTH3_WAIT_VC_CHECK_GET(x) (((x) & SYNTH_SYNTH3_WAIT_VC_CHECK_MASK) >> SYNTH_SYNTH3_WAIT_VC_CHECK_LSB)
+#define SYNTH_SYNTH3_WAIT_VC_CHECK_SET(x) (((x) << SYNTH_SYNTH3_WAIT_VC_CHECK_LSB) & SYNTH_SYNTH3_WAIT_VC_CHECK_MASK)
+
+#define SYNTH_SYNTH4_ADDRESS 0x0000000c
+#define SYNTH_SYNTH4_OFFSET 0x0000000c
+#define SYNTH_SYNTH4_DIS_LIN_CAPSEARCH_MSB 31
+#define SYNTH_SYNTH4_DIS_LIN_CAPSEARCH_LSB 31
+#define SYNTH_SYNTH4_DIS_LIN_CAPSEARCH_MASK 0x80000000
+#define SYNTH_SYNTH4_DIS_LIN_CAPSEARCH_GET(x) (((x) & SYNTH_SYNTH4_DIS_LIN_CAPSEARCH_MASK) >> SYNTH_SYNTH4_DIS_LIN_CAPSEARCH_LSB)
+#define SYNTH_SYNTH4_DIS_LIN_CAPSEARCH_SET(x) (((x) << SYNTH_SYNTH4_DIS_LIN_CAPSEARCH_LSB) & SYNTH_SYNTH4_DIS_LIN_CAPSEARCH_MASK)
+#define SYNTH_SYNTH4_DIS_LOSTVC_MSB 30
+#define SYNTH_SYNTH4_DIS_LOSTVC_LSB 30
+#define SYNTH_SYNTH4_DIS_LOSTVC_MASK 0x40000000
+#define SYNTH_SYNTH4_DIS_LOSTVC_GET(x) (((x) & SYNTH_SYNTH4_DIS_LOSTVC_MASK) >> SYNTH_SYNTH4_DIS_LOSTVC_LSB)
+#define SYNTH_SYNTH4_DIS_LOSTVC_SET(x) (((x) << SYNTH_SYNTH4_DIS_LOSTVC_LSB) & SYNTH_SYNTH4_DIS_LOSTVC_MASK)
+#define SYNTH_SYNTH4_ALWAYS_SHORTR_MSB 29
+#define SYNTH_SYNTH4_ALWAYS_SHORTR_LSB 29
+#define SYNTH_SYNTH4_ALWAYS_SHORTR_MASK 0x20000000
+#define SYNTH_SYNTH4_ALWAYS_SHORTR_GET(x) (((x) & SYNTH_SYNTH4_ALWAYS_SHORTR_MASK) >> SYNTH_SYNTH4_ALWAYS_SHORTR_LSB)
+#define SYNTH_SYNTH4_ALWAYS_SHORTR_SET(x) (((x) << SYNTH_SYNTH4_ALWAYS_SHORTR_LSB) & SYNTH_SYNTH4_ALWAYS_SHORTR_MASK)
+#define SYNTH_SYNTH4_SHORTR_UNTIL_LOCKED_MSB 28
+#define SYNTH_SYNTH4_SHORTR_UNTIL_LOCKED_LSB 28
+#define SYNTH_SYNTH4_SHORTR_UNTIL_LOCKED_MASK 0x10000000
+#define SYNTH_SYNTH4_SHORTR_UNTIL_LOCKED_GET(x) (((x) & SYNTH_SYNTH4_SHORTR_UNTIL_LOCKED_MASK) >> SYNTH_SYNTH4_SHORTR_UNTIL_LOCKED_LSB)
+#define SYNTH_SYNTH4_SHORTR_UNTIL_LOCKED_SET(x) (((x) << SYNTH_SYNTH4_SHORTR_UNTIL_LOCKED_LSB) & SYNTH_SYNTH4_SHORTR_UNTIL_LOCKED_MASK)
+#define SYNTH_SYNTH4_FORCE_PINVC_MSB 27
+#define SYNTH_SYNTH4_FORCE_PINVC_LSB 27
+#define SYNTH_SYNTH4_FORCE_PINVC_MASK 0x08000000
+#define SYNTH_SYNTH4_FORCE_PINVC_GET(x) (((x) & SYNTH_SYNTH4_FORCE_PINVC_MASK) >> SYNTH_SYNTH4_FORCE_PINVC_LSB)
+#define SYNTH_SYNTH4_FORCE_PINVC_SET(x) (((x) << SYNTH_SYNTH4_FORCE_PINVC_LSB) & SYNTH_SYNTH4_FORCE_PINVC_MASK)
+#define SYNTH_SYNTH4_FORCE_VCOCAP_MSB 26
+#define SYNTH_SYNTH4_FORCE_VCOCAP_LSB 26
+#define SYNTH_SYNTH4_FORCE_VCOCAP_MASK 0x04000000
+#define SYNTH_SYNTH4_FORCE_VCOCAP_GET(x) (((x) & SYNTH_SYNTH4_FORCE_VCOCAP_MASK) >> SYNTH_SYNTH4_FORCE_VCOCAP_LSB)
+#define SYNTH_SYNTH4_FORCE_VCOCAP_SET(x) (((x) << SYNTH_SYNTH4_FORCE_VCOCAP_LSB) & SYNTH_SYNTH4_FORCE_VCOCAP_MASK)
+#define SYNTH_SYNTH4_VCOCAP_OVR_MSB 25
+#define SYNTH_SYNTH4_VCOCAP_OVR_LSB 18
+#define SYNTH_SYNTH4_VCOCAP_OVR_MASK 0x03fc0000
+#define SYNTH_SYNTH4_VCOCAP_OVR_GET(x) (((x) & SYNTH_SYNTH4_VCOCAP_OVR_MASK) >> SYNTH_SYNTH4_VCOCAP_OVR_LSB)
+#define SYNTH_SYNTH4_VCOCAP_OVR_SET(x) (((x) << SYNTH_SYNTH4_VCOCAP_OVR_LSB) & SYNTH_SYNTH4_VCOCAP_OVR_MASK)
+#define SYNTH_SYNTH4_VCOCAPPULLUP_MSB 17
+#define SYNTH_SYNTH4_VCOCAPPULLUP_LSB 17
+#define SYNTH_SYNTH4_VCOCAPPULLUP_MASK 0x00020000
+#define SYNTH_SYNTH4_VCOCAPPULLUP_GET(x) (((x) & SYNTH_SYNTH4_VCOCAPPULLUP_MASK) >> SYNTH_SYNTH4_VCOCAPPULLUP_LSB)
+#define SYNTH_SYNTH4_VCOCAPPULLUP_SET(x) (((x) << SYNTH_SYNTH4_VCOCAPPULLUP_LSB) & SYNTH_SYNTH4_VCOCAPPULLUP_MASK)
+#define SYNTH_SYNTH4_REFDIVSEL_MSB 16
+#define SYNTH_SYNTH4_REFDIVSEL_LSB 15
+#define SYNTH_SYNTH4_REFDIVSEL_MASK 0x00018000
+#define SYNTH_SYNTH4_REFDIVSEL_GET(x) (((x) & SYNTH_SYNTH4_REFDIVSEL_MASK) >> SYNTH_SYNTH4_REFDIVSEL_LSB)
+#define SYNTH_SYNTH4_REFDIVSEL_SET(x) (((x) << SYNTH_SYNTH4_REFDIVSEL_LSB) & SYNTH_SYNTH4_REFDIVSEL_MASK)
+#define SYNTH_SYNTH4_PFDDELAY_MSB 14
+#define SYNTH_SYNTH4_PFDDELAY_LSB 14
+#define SYNTH_SYNTH4_PFDDELAY_MASK 0x00004000
+#define SYNTH_SYNTH4_PFDDELAY_GET(x) (((x) & SYNTH_SYNTH4_PFDDELAY_MASK) >> SYNTH_SYNTH4_PFDDELAY_LSB)
+#define SYNTH_SYNTH4_PFDDELAY_SET(x) (((x) << SYNTH_SYNTH4_PFDDELAY_LSB) & SYNTH_SYNTH4_PFDDELAY_MASK)
+#define SYNTH_SYNTH4_PFD_DISABLE_MSB 13
+#define SYNTH_SYNTH4_PFD_DISABLE_LSB 13
+#define SYNTH_SYNTH4_PFD_DISABLE_MASK 0x00002000
+#define SYNTH_SYNTH4_PFD_DISABLE_GET(x) (((x) & SYNTH_SYNTH4_PFD_DISABLE_MASK) >> SYNTH_SYNTH4_PFD_DISABLE_LSB)
+#define SYNTH_SYNTH4_PFD_DISABLE_SET(x) (((x) << SYNTH_SYNTH4_PFD_DISABLE_LSB) & SYNTH_SYNTH4_PFD_DISABLE_MASK)
+#define SYNTH_SYNTH4_PRESCSEL_MSB 12
+#define SYNTH_SYNTH4_PRESCSEL_LSB 11
+#define SYNTH_SYNTH4_PRESCSEL_MASK 0x00001800
+#define SYNTH_SYNTH4_PRESCSEL_GET(x) (((x) & SYNTH_SYNTH4_PRESCSEL_MASK) >> SYNTH_SYNTH4_PRESCSEL_LSB)
+#define SYNTH_SYNTH4_PRESCSEL_SET(x) (((x) << SYNTH_SYNTH4_PRESCSEL_LSB) & SYNTH_SYNTH4_PRESCSEL_MASK)
+#define SYNTH_SYNTH4_RESET_PRESC_MSB 10
+#define SYNTH_SYNTH4_RESET_PRESC_LSB 10
+#define SYNTH_SYNTH4_RESET_PRESC_MASK 0x00000400
+#define SYNTH_SYNTH4_RESET_PRESC_GET(x) (((x) & SYNTH_SYNTH4_RESET_PRESC_MASK) >> SYNTH_SYNTH4_RESET_PRESC_LSB)
+#define SYNTH_SYNTH4_RESET_PRESC_SET(x) (((x) << SYNTH_SYNTH4_RESET_PRESC_LSB) & SYNTH_SYNTH4_RESET_PRESC_MASK)
+#define SYNTH_SYNTH4_SDM_DISABLE_MSB 9
+#define SYNTH_SYNTH4_SDM_DISABLE_LSB 9
+#define SYNTH_SYNTH4_SDM_DISABLE_MASK 0x00000200
+#define SYNTH_SYNTH4_SDM_DISABLE_GET(x) (((x) & SYNTH_SYNTH4_SDM_DISABLE_MASK) >> SYNTH_SYNTH4_SDM_DISABLE_LSB)
+#define SYNTH_SYNTH4_SDM_DISABLE_SET(x) (((x) << SYNTH_SYNTH4_SDM_DISABLE_LSB) & SYNTH_SYNTH4_SDM_DISABLE_MASK)
+#define SYNTH_SYNTH4_SDM_MODE_MSB 8
+#define SYNTH_SYNTH4_SDM_MODE_LSB 8
+#define SYNTH_SYNTH4_SDM_MODE_MASK 0x00000100
+#define SYNTH_SYNTH4_SDM_MODE_GET(x) (((x) & SYNTH_SYNTH4_SDM_MODE_MASK) >> SYNTH_SYNTH4_SDM_MODE_LSB)
+#define SYNTH_SYNTH4_SDM_MODE_SET(x) (((x) << SYNTH_SYNTH4_SDM_MODE_LSB) & SYNTH_SYNTH4_SDM_MODE_MASK)
+#define SYNTH_SYNTH4_SDM_DITHER_MSB 7
+#define SYNTH_SYNTH4_SDM_DITHER_LSB 6
+#define SYNTH_SYNTH4_SDM_DITHER_MASK 0x000000c0
+#define SYNTH_SYNTH4_SDM_DITHER_GET(x) (((x) & SYNTH_SYNTH4_SDM_DITHER_MASK) >> SYNTH_SYNTH4_SDM_DITHER_LSB)
+#define SYNTH_SYNTH4_SDM_DITHER_SET(x) (((x) << SYNTH_SYNTH4_SDM_DITHER_LSB) & SYNTH_SYNTH4_SDM_DITHER_MASK)
+#define SYNTH_SYNTH4_PSCOUNT_FBSEL_MSB 5
+#define SYNTH_SYNTH4_PSCOUNT_FBSEL_LSB 5
+#define SYNTH_SYNTH4_PSCOUNT_FBSEL_MASK 0x00000020
+#define SYNTH_SYNTH4_PSCOUNT_FBSEL_GET(x) (((x) & SYNTH_SYNTH4_PSCOUNT_FBSEL_MASK) >> SYNTH_SYNTH4_PSCOUNT_FBSEL_LSB)
+#define SYNTH_SYNTH4_PSCOUNT_FBSEL_SET(x) (((x) << SYNTH_SYNTH4_PSCOUNT_FBSEL_LSB) & SYNTH_SYNTH4_PSCOUNT_FBSEL_MASK)
+#define SYNTH_SYNTH4_SEL_CLKXTAL_EDGE_MSB 4
+#define SYNTH_SYNTH4_SEL_CLKXTAL_EDGE_LSB 4
+#define SYNTH_SYNTH4_SEL_CLKXTAL_EDGE_MASK 0x00000010
+#define SYNTH_SYNTH4_SEL_CLKXTAL_EDGE_GET(x) (((x) & SYNTH_SYNTH4_SEL_CLKXTAL_EDGE_MASK) >> SYNTH_SYNTH4_SEL_CLKXTAL_EDGE_LSB)
+#define SYNTH_SYNTH4_SEL_CLKXTAL_EDGE_SET(x) (((x) << SYNTH_SYNTH4_SEL_CLKXTAL_EDGE_LSB) & SYNTH_SYNTH4_SEL_CLKXTAL_EDGE_MASK)
+#define SYNTH_SYNTH4_SPARE_MISC_MSB 3
+#define SYNTH_SYNTH4_SPARE_MISC_LSB 2
+#define SYNTH_SYNTH4_SPARE_MISC_MASK 0x0000000c
+#define SYNTH_SYNTH4_SPARE_MISC_GET(x) (((x) & SYNTH_SYNTH4_SPARE_MISC_MASK) >> SYNTH_SYNTH4_SPARE_MISC_LSB)
+#define SYNTH_SYNTH4_SPARE_MISC_SET(x) (((x) << SYNTH_SYNTH4_SPARE_MISC_LSB) & SYNTH_SYNTH4_SPARE_MISC_MASK)
+#define SYNTH_SYNTH4_LONGSHIFTSEL_MSB 1
+#define SYNTH_SYNTH4_LONGSHIFTSEL_LSB 1
+#define SYNTH_SYNTH4_LONGSHIFTSEL_MASK 0x00000002
+#define SYNTH_SYNTH4_LONGSHIFTSEL_GET(x) (((x) & SYNTH_SYNTH4_LONGSHIFTSEL_MASK) >> SYNTH_SYNTH4_LONGSHIFTSEL_LSB)
+#define SYNTH_SYNTH4_LONGSHIFTSEL_SET(x) (((x) << SYNTH_SYNTH4_LONGSHIFTSEL_LSB) & SYNTH_SYNTH4_LONGSHIFTSEL_MASK)
+#define SYNTH_SYNTH4_FORCE_SHIFTREG_MSB 0
+#define SYNTH_SYNTH4_FORCE_SHIFTREG_LSB 0
+#define SYNTH_SYNTH4_FORCE_SHIFTREG_MASK 0x00000001
+#define SYNTH_SYNTH4_FORCE_SHIFTREG_GET(x) (((x) & SYNTH_SYNTH4_FORCE_SHIFTREG_MASK) >> SYNTH_SYNTH4_FORCE_SHIFTREG_LSB)
+#define SYNTH_SYNTH4_FORCE_SHIFTREG_SET(x) (((x) << SYNTH_SYNTH4_FORCE_SHIFTREG_LSB) & SYNTH_SYNTH4_FORCE_SHIFTREG_MASK)
+
+#define SYNTH_SYNTH5_ADDRESS 0x00000010
+#define SYNTH_SYNTH5_OFFSET 0x00000010
+#define SYNTH_SYNTH5_LOOP_IP0_MSB 31
+#define SYNTH_SYNTH5_LOOP_IP0_LSB 28
+#define SYNTH_SYNTH5_LOOP_IP0_MASK 0xf0000000
+#define SYNTH_SYNTH5_LOOP_IP0_GET(x) (((x) & SYNTH_SYNTH5_LOOP_IP0_MASK) >> SYNTH_SYNTH5_LOOP_IP0_LSB)
+#define SYNTH_SYNTH5_LOOP_IP0_SET(x) (((x) << SYNTH_SYNTH5_LOOP_IP0_LSB) & SYNTH_SYNTH5_LOOP_IP0_MASK)
+#define SYNTH_SYNTH5_SLOPE_IP_MSB 27
+#define SYNTH_SYNTH5_SLOPE_IP_LSB 25
+#define SYNTH_SYNTH5_SLOPE_IP_MASK 0x0e000000
+#define SYNTH_SYNTH5_SLOPE_IP_GET(x) (((x) & SYNTH_SYNTH5_SLOPE_IP_MASK) >> SYNTH_SYNTH5_SLOPE_IP_LSB)
+#define SYNTH_SYNTH5_SLOPE_IP_SET(x) (((x) << SYNTH_SYNTH5_SLOPE_IP_LSB) & SYNTH_SYNTH5_SLOPE_IP_MASK)
+#define SYNTH_SYNTH5_CPBIAS_MSB 24
+#define SYNTH_SYNTH5_CPBIAS_LSB 23
+#define SYNTH_SYNTH5_CPBIAS_MASK 0x01800000
+#define SYNTH_SYNTH5_CPBIAS_GET(x) (((x) & SYNTH_SYNTH5_CPBIAS_MASK) >> SYNTH_SYNTH5_CPBIAS_LSB)
+#define SYNTH_SYNTH5_CPBIAS_SET(x) (((x) << SYNTH_SYNTH5_CPBIAS_LSB) & SYNTH_SYNTH5_CPBIAS_MASK)
+#define SYNTH_SYNTH5_CPSTEERING_EN_MSB 22
+#define SYNTH_SYNTH5_CPSTEERING_EN_LSB 22
+#define SYNTH_SYNTH5_CPSTEERING_EN_MASK 0x00400000
+#define SYNTH_SYNTH5_CPSTEERING_EN_GET(x) (((x) & SYNTH_SYNTH5_CPSTEERING_EN_MASK) >> SYNTH_SYNTH5_CPSTEERING_EN_LSB)
+#define SYNTH_SYNTH5_CPSTEERING_EN_SET(x) (((x) << SYNTH_SYNTH5_CPSTEERING_EN_LSB) & SYNTH_SYNTH5_CPSTEERING_EN_MASK)
+#define SYNTH_SYNTH5_CPLOWLK_MSB 21
+#define SYNTH_SYNTH5_CPLOWLK_LSB 21
+#define SYNTH_SYNTH5_CPLOWLK_MASK 0x00200000
+#define SYNTH_SYNTH5_CPLOWLK_GET(x) (((x) & SYNTH_SYNTH5_CPLOWLK_MASK) >> SYNTH_SYNTH5_CPLOWLK_LSB)
+#define SYNTH_SYNTH5_CPLOWLK_SET(x) (((x) << SYNTH_SYNTH5_CPLOWLK_LSB) & SYNTH_SYNTH5_CPLOWLK_MASK)
+#define SYNTH_SYNTH5_LOOPLEAKCUR_MSB 20
+#define SYNTH_SYNTH5_LOOPLEAKCUR_LSB 17
+#define SYNTH_SYNTH5_LOOPLEAKCUR_MASK 0x001e0000
+#define SYNTH_SYNTH5_LOOPLEAKCUR_GET(x) (((x) & SYNTH_SYNTH5_LOOPLEAKCUR_MASK) >> SYNTH_SYNTH5_LOOPLEAKCUR_LSB)
+#define SYNTH_SYNTH5_LOOPLEAKCUR_SET(x) (((x) << SYNTH_SYNTH5_LOOPLEAKCUR_LSB) & SYNTH_SYNTH5_LOOPLEAKCUR_MASK)
+#define SYNTH_SYNTH5_CAPRANGE1_MSB 16
+#define SYNTH_SYNTH5_CAPRANGE1_LSB 13
+#define SYNTH_SYNTH5_CAPRANGE1_MASK 0x0001e000
+#define SYNTH_SYNTH5_CAPRANGE1_GET(x) (((x) & SYNTH_SYNTH5_CAPRANGE1_MASK) >> SYNTH_SYNTH5_CAPRANGE1_LSB)
+#define SYNTH_SYNTH5_CAPRANGE1_SET(x) (((x) << SYNTH_SYNTH5_CAPRANGE1_LSB) & SYNTH_SYNTH5_CAPRANGE1_MASK)
+#define SYNTH_SYNTH5_CAPRANGE2_MSB 12
+#define SYNTH_SYNTH5_CAPRANGE2_LSB 9
+#define SYNTH_SYNTH5_CAPRANGE2_MASK 0x00001e00
+#define SYNTH_SYNTH5_CAPRANGE2_GET(x) (((x) & SYNTH_SYNTH5_CAPRANGE2_MASK) >> SYNTH_SYNTH5_CAPRANGE2_LSB)
+#define SYNTH_SYNTH5_CAPRANGE2_SET(x) (((x) << SYNTH_SYNTH5_CAPRANGE2_LSB) & SYNTH_SYNTH5_CAPRANGE2_MASK)
+#define SYNTH_SYNTH5_CAPRANGE3_MSB 8
+#define SYNTH_SYNTH5_CAPRANGE3_LSB 5
+#define SYNTH_SYNTH5_CAPRANGE3_MASK 0x000001e0
+#define SYNTH_SYNTH5_CAPRANGE3_GET(x) (((x) & SYNTH_SYNTH5_CAPRANGE3_MASK) >> SYNTH_SYNTH5_CAPRANGE3_LSB)
+#define SYNTH_SYNTH5_CAPRANGE3_SET(x) (((x) << SYNTH_SYNTH5_CAPRANGE3_LSB) & SYNTH_SYNTH5_CAPRANGE3_MASK)
+#define SYNTH_SYNTH5_FORCE_LOBUF5GTUNE_MSB 4
+#define SYNTH_SYNTH5_FORCE_LOBUF5GTUNE_LSB 4
+#define SYNTH_SYNTH5_FORCE_LOBUF5GTUNE_MASK 0x00000010
+#define SYNTH_SYNTH5_FORCE_LOBUF5GTUNE_GET(x) (((x) & SYNTH_SYNTH5_FORCE_LOBUF5GTUNE_MASK) >> SYNTH_SYNTH5_FORCE_LOBUF5GTUNE_LSB)
+#define SYNTH_SYNTH5_FORCE_LOBUF5GTUNE_SET(x) (((x) << SYNTH_SYNTH5_FORCE_LOBUF5GTUNE_LSB) & SYNTH_SYNTH5_FORCE_LOBUF5GTUNE_MASK)
+#define SYNTH_SYNTH5_LOBUF5GTUNE_OVR_MSB 3
+#define SYNTH_SYNTH5_LOBUF5GTUNE_OVR_LSB 2
+#define SYNTH_SYNTH5_LOBUF5GTUNE_OVR_MASK 0x0000000c
+#define SYNTH_SYNTH5_LOBUF5GTUNE_OVR_GET(x) (((x) & SYNTH_SYNTH5_LOBUF5GTUNE_OVR_MASK) >> SYNTH_SYNTH5_LOBUF5GTUNE_OVR_LSB)
+#define SYNTH_SYNTH5_LOBUF5GTUNE_OVR_SET(x) (((x) << SYNTH_SYNTH5_LOBUF5GTUNE_OVR_LSB) & SYNTH_SYNTH5_LOBUF5GTUNE_OVR_MASK)
+#define SYNTH_SYNTH5_SPARE_MSB 1
+#define SYNTH_SYNTH5_SPARE_LSB 0
+#define SYNTH_SYNTH5_SPARE_MASK 0x00000003
+#define SYNTH_SYNTH5_SPARE_GET(x) (((x) & SYNTH_SYNTH5_SPARE_MASK) >> SYNTH_SYNTH5_SPARE_LSB)
+#define SYNTH_SYNTH5_SPARE_SET(x) (((x) << SYNTH_SYNTH5_SPARE_LSB) & SYNTH_SYNTH5_SPARE_MASK)
+
+#define SYNTH_SYNTH6_ADDRESS 0x00000014
+#define SYNTH_SYNTH6_OFFSET 0x00000014
+#define SYNTH_SYNTH6_IRCP_MSB 31
+#define SYNTH_SYNTH6_IRCP_LSB 29
+#define SYNTH_SYNTH6_IRCP_MASK 0xe0000000
+#define SYNTH_SYNTH6_IRCP_GET(x) (((x) & SYNTH_SYNTH6_IRCP_MASK) >> SYNTH_SYNTH6_IRCP_LSB)
+#define SYNTH_SYNTH6_IRCP_SET(x) (((x) << SYNTH_SYNTH6_IRCP_LSB) & SYNTH_SYNTH6_IRCP_MASK)
+#define SYNTH_SYNTH6_IRVCMON_MSB 28
+#define SYNTH_SYNTH6_IRVCMON_LSB 26
+#define SYNTH_SYNTH6_IRVCMON_MASK 0x1c000000
+#define SYNTH_SYNTH6_IRVCMON_GET(x) (((x) & SYNTH_SYNTH6_IRVCMON_MASK) >> SYNTH_SYNTH6_IRVCMON_LSB)
+#define SYNTH_SYNTH6_IRVCMON_SET(x) (((x) << SYNTH_SYNTH6_IRVCMON_LSB) & SYNTH_SYNTH6_IRVCMON_MASK)
+#define SYNTH_SYNTH6_IRSPARE_MSB 25
+#define SYNTH_SYNTH6_IRSPARE_LSB 23
+#define SYNTH_SYNTH6_IRSPARE_MASK 0x03800000
+#define SYNTH_SYNTH6_IRSPARE_GET(x) (((x) & SYNTH_SYNTH6_IRSPARE_MASK) >> SYNTH_SYNTH6_IRSPARE_LSB)
+#define SYNTH_SYNTH6_IRSPARE_SET(x) (((x) << SYNTH_SYNTH6_IRSPARE_LSB) & SYNTH_SYNTH6_IRSPARE_MASK)
+#define SYNTH_SYNTH6_ICPRESC_MSB 22
+#define SYNTH_SYNTH6_ICPRESC_LSB 20
+#define SYNTH_SYNTH6_ICPRESC_MASK 0x00700000
+#define SYNTH_SYNTH6_ICPRESC_GET(x) (((x) & SYNTH_SYNTH6_ICPRESC_MASK) >> SYNTH_SYNTH6_ICPRESC_LSB)
+#define SYNTH_SYNTH6_ICPRESC_SET(x) (((x) << SYNTH_SYNTH6_ICPRESC_LSB) & SYNTH_SYNTH6_ICPRESC_MASK)
+#define SYNTH_SYNTH6_ICLODIV_MSB 19
+#define SYNTH_SYNTH6_ICLODIV_LSB 17
+#define SYNTH_SYNTH6_ICLODIV_MASK 0x000e0000
+#define SYNTH_SYNTH6_ICLODIV_GET(x) (((x) & SYNTH_SYNTH6_ICLODIV_MASK) >> SYNTH_SYNTH6_ICLODIV_LSB)
+#define SYNTH_SYNTH6_ICLODIV_SET(x) (((x) << SYNTH_SYNTH6_ICLODIV_LSB) & SYNTH_SYNTH6_ICLODIV_MASK)
+#define SYNTH_SYNTH6_ICLOMIX_MSB 16
+#define SYNTH_SYNTH6_ICLOMIX_LSB 14
+#define SYNTH_SYNTH6_ICLOMIX_MASK 0x0001c000
+#define SYNTH_SYNTH6_ICLOMIX_GET(x) (((x) & SYNTH_SYNTH6_ICLOMIX_MASK) >> SYNTH_SYNTH6_ICLOMIX_LSB)
+#define SYNTH_SYNTH6_ICLOMIX_SET(x) (((x) << SYNTH_SYNTH6_ICLOMIX_LSB) & SYNTH_SYNTH6_ICLOMIX_MASK)
+#define SYNTH_SYNTH6_ICSPAREA_MSB 13
+#define SYNTH_SYNTH6_ICSPAREA_LSB 11
+#define SYNTH_SYNTH6_ICSPAREA_MASK 0x00003800
+#define SYNTH_SYNTH6_ICSPAREA_GET(x) (((x) & SYNTH_SYNTH6_ICSPAREA_MASK) >> SYNTH_SYNTH6_ICSPAREA_LSB)
+#define SYNTH_SYNTH6_ICSPAREA_SET(x) (((x) << SYNTH_SYNTH6_ICSPAREA_LSB) & SYNTH_SYNTH6_ICSPAREA_MASK)
+#define SYNTH_SYNTH6_ICSPAREB_MSB 10
+#define SYNTH_SYNTH6_ICSPAREB_LSB 8
+#define SYNTH_SYNTH6_ICSPAREB_MASK 0x00000700
+#define SYNTH_SYNTH6_ICSPAREB_GET(x) (((x) & SYNTH_SYNTH6_ICSPAREB_MASK) >> SYNTH_SYNTH6_ICSPAREB_LSB)
+#define SYNTH_SYNTH6_ICSPAREB_SET(x) (((x) << SYNTH_SYNTH6_ICSPAREB_LSB) & SYNTH_SYNTH6_ICSPAREB_MASK)
+#define SYNTH_SYNTH6_ICVCO_MSB 7
+#define SYNTH_SYNTH6_ICVCO_LSB 5
+#define SYNTH_SYNTH6_ICVCO_MASK 0x000000e0
+#define SYNTH_SYNTH6_ICVCO_GET(x) (((x) & SYNTH_SYNTH6_ICVCO_MASK) >> SYNTH_SYNTH6_ICVCO_LSB)
+#define SYNTH_SYNTH6_ICVCO_SET(x) (((x) << SYNTH_SYNTH6_ICVCO_LSB) & SYNTH_SYNTH6_ICVCO_MASK)
+#define SYNTH_SYNTH6_VCOBUFBIAS_MSB 4
+#define SYNTH_SYNTH6_VCOBUFBIAS_LSB 3
+#define SYNTH_SYNTH6_VCOBUFBIAS_MASK 0x00000018
+#define SYNTH_SYNTH6_VCOBUFBIAS_GET(x) (((x) & SYNTH_SYNTH6_VCOBUFBIAS_MASK) >> SYNTH_SYNTH6_VCOBUFBIAS_LSB)
+#define SYNTH_SYNTH6_VCOBUFBIAS_SET(x) (((x) << SYNTH_SYNTH6_VCOBUFBIAS_LSB) & SYNTH_SYNTH6_VCOBUFBIAS_MASK)
+#define SYNTH_SYNTH6_SPARE_BIAS_MSB 2
+#define SYNTH_SYNTH6_SPARE_BIAS_LSB 0
+#define SYNTH_SYNTH6_SPARE_BIAS_MASK 0x00000007
+#define SYNTH_SYNTH6_SPARE_BIAS_GET(x) (((x) & SYNTH_SYNTH6_SPARE_BIAS_MASK) >> SYNTH_SYNTH6_SPARE_BIAS_LSB)
+#define SYNTH_SYNTH6_SPARE_BIAS_SET(x) (((x) << SYNTH_SYNTH6_SPARE_BIAS_LSB) & SYNTH_SYNTH6_SPARE_BIAS_MASK)
+
+#define SYNTH_SYNTH7_ADDRESS 0x00000018
+#define SYNTH_SYNTH7_OFFSET 0x00000018
+#define SYNTH_SYNTH7_SYNTH_ON_MSB 31
+#define SYNTH_SYNTH7_SYNTH_ON_LSB 31
+#define SYNTH_SYNTH7_SYNTH_ON_MASK 0x80000000
+#define SYNTH_SYNTH7_SYNTH_ON_GET(x) (((x) & SYNTH_SYNTH7_SYNTH_ON_MASK) >> SYNTH_SYNTH7_SYNTH_ON_LSB)
+#define SYNTH_SYNTH7_SYNTH_ON_SET(x) (((x) << SYNTH_SYNTH7_SYNTH_ON_LSB) & SYNTH_SYNTH7_SYNTH_ON_MASK)
+#define SYNTH_SYNTH7_SYNTH_SM_STATE_MSB 30
+#define SYNTH_SYNTH7_SYNTH_SM_STATE_LSB 27
+#define SYNTH_SYNTH7_SYNTH_SM_STATE_MASK 0x78000000
+#define SYNTH_SYNTH7_SYNTH_SM_STATE_GET(x) (((x) & SYNTH_SYNTH7_SYNTH_SM_STATE_MASK) >> SYNTH_SYNTH7_SYNTH_SM_STATE_LSB)
+#define SYNTH_SYNTH7_SYNTH_SM_STATE_SET(x) (((x) << SYNTH_SYNTH7_SYNTH_SM_STATE_LSB) & SYNTH_SYNTH7_SYNTH_SM_STATE_MASK)
+#define SYNTH_SYNTH7_CAP_SEARCH_MSB 26
+#define SYNTH_SYNTH7_CAP_SEARCH_LSB 26
+#define SYNTH_SYNTH7_CAP_SEARCH_MASK 0x04000000
+#define SYNTH_SYNTH7_CAP_SEARCH_GET(x) (((x) & SYNTH_SYNTH7_CAP_SEARCH_MASK) >> SYNTH_SYNTH7_CAP_SEARCH_LSB)
+#define SYNTH_SYNTH7_CAP_SEARCH_SET(x) (((x) << SYNTH_SYNTH7_CAP_SEARCH_LSB) & SYNTH_SYNTH7_CAP_SEARCH_MASK)
+#define SYNTH_SYNTH7_SYNTH_LOCK_VC_OK_MSB 25
+#define SYNTH_SYNTH7_SYNTH_LOCK_VC_OK_LSB 25
+#define SYNTH_SYNTH7_SYNTH_LOCK_VC_OK_MASK 0x02000000
+#define SYNTH_SYNTH7_SYNTH_LOCK_VC_OK_GET(x) (((x) & SYNTH_SYNTH7_SYNTH_LOCK_VC_OK_MASK) >> SYNTH_SYNTH7_SYNTH_LOCK_VC_OK_LSB)
+#define SYNTH_SYNTH7_SYNTH_LOCK_VC_OK_SET(x) (((x) << SYNTH_SYNTH7_SYNTH_LOCK_VC_OK_LSB) & SYNTH_SYNTH7_SYNTH_LOCK_VC_OK_MASK)
+#define SYNTH_SYNTH7_PIN_VC_MSB 24
+#define SYNTH_SYNTH7_PIN_VC_LSB 24
+#define SYNTH_SYNTH7_PIN_VC_MASK 0x01000000
+#define SYNTH_SYNTH7_PIN_VC_GET(x) (((x) & SYNTH_SYNTH7_PIN_VC_MASK) >> SYNTH_SYNTH7_PIN_VC_LSB)
+#define SYNTH_SYNTH7_PIN_VC_SET(x) (((x) << SYNTH_SYNTH7_PIN_VC_LSB) & SYNTH_SYNTH7_PIN_VC_MASK)
+#define SYNTH_SYNTH7_VCO_CAP_ST_MSB 23
+#define SYNTH_SYNTH7_VCO_CAP_ST_LSB 16
+#define SYNTH_SYNTH7_VCO_CAP_ST_MASK 0x00ff0000
+#define SYNTH_SYNTH7_VCO_CAP_ST_GET(x) (((x) & SYNTH_SYNTH7_VCO_CAP_ST_MASK) >> SYNTH_SYNTH7_VCO_CAP_ST_LSB)
+#define SYNTH_SYNTH7_VCO_CAP_ST_SET(x) (((x) << SYNTH_SYNTH7_VCO_CAP_ST_LSB) & SYNTH_SYNTH7_VCO_CAP_ST_MASK)
+#define SYNTH_SYNTH7_SHORT_R_MSB 15
+#define SYNTH_SYNTH7_SHORT_R_LSB 15
+#define SYNTH_SYNTH7_SHORT_R_MASK 0x00008000
+#define SYNTH_SYNTH7_SHORT_R_GET(x) (((x) & SYNTH_SYNTH7_SHORT_R_MASK) >> SYNTH_SYNTH7_SHORT_R_LSB)
+#define SYNTH_SYNTH7_SHORT_R_SET(x) (((x) << SYNTH_SYNTH7_SHORT_R_LSB) & SYNTH_SYNTH7_SHORT_R_MASK)
+#define SYNTH_SYNTH7_RESET_RFD_MSB 14
+#define SYNTH_SYNTH7_RESET_RFD_LSB 14
+#define SYNTH_SYNTH7_RESET_RFD_MASK 0x00004000
+#define SYNTH_SYNTH7_RESET_RFD_GET(x) (((x) & SYNTH_SYNTH7_RESET_RFD_MASK) >> SYNTH_SYNTH7_RESET_RFD_LSB)
+#define SYNTH_SYNTH7_RESET_RFD_SET(x) (((x) << SYNTH_SYNTH7_RESET_RFD_LSB) & SYNTH_SYNTH7_RESET_RFD_MASK)
+#define SYNTH_SYNTH7_RESET_PFD_MSB 13
+#define SYNTH_SYNTH7_RESET_PFD_LSB 13
+#define SYNTH_SYNTH7_RESET_PFD_MASK 0x00002000
+#define SYNTH_SYNTH7_RESET_PFD_GET(x) (((x) & SYNTH_SYNTH7_RESET_PFD_MASK) >> SYNTH_SYNTH7_RESET_PFD_LSB)
+#define SYNTH_SYNTH7_RESET_PFD_SET(x) (((x) << SYNTH_SYNTH7_RESET_PFD_LSB) & SYNTH_SYNTH7_RESET_PFD_MASK)
+#define SYNTH_SYNTH7_RESET_PSCOUNTERS_MSB 12
+#define SYNTH_SYNTH7_RESET_PSCOUNTERS_LSB 12
+#define SYNTH_SYNTH7_RESET_PSCOUNTERS_MASK 0x00001000
+#define SYNTH_SYNTH7_RESET_PSCOUNTERS_GET(x) (((x) & SYNTH_SYNTH7_RESET_PSCOUNTERS_MASK) >> SYNTH_SYNTH7_RESET_PSCOUNTERS_LSB)
+#define SYNTH_SYNTH7_RESET_PSCOUNTERS_SET(x) (((x) << SYNTH_SYNTH7_RESET_PSCOUNTERS_LSB) & SYNTH_SYNTH7_RESET_PSCOUNTERS_MASK)
+#define SYNTH_SYNTH7_RESET_SDM_B_MSB 11
+#define SYNTH_SYNTH7_RESET_SDM_B_LSB 11
+#define SYNTH_SYNTH7_RESET_SDM_B_MASK 0x00000800
+#define SYNTH_SYNTH7_RESET_SDM_B_GET(x) (((x) & SYNTH_SYNTH7_RESET_SDM_B_MASK) >> SYNTH_SYNTH7_RESET_SDM_B_LSB)
+#define SYNTH_SYNTH7_RESET_SDM_B_SET(x) (((x) << SYNTH_SYNTH7_RESET_SDM_B_LSB) & SYNTH_SYNTH7_RESET_SDM_B_MASK)
+#define SYNTH_SYNTH7_VC2HIGH_MSB 10
+#define SYNTH_SYNTH7_VC2HIGH_LSB 10
+#define SYNTH_SYNTH7_VC2HIGH_MASK 0x00000400
+#define SYNTH_SYNTH7_VC2HIGH_GET(x) (((x) & SYNTH_SYNTH7_VC2HIGH_MASK) >> SYNTH_SYNTH7_VC2HIGH_LSB)
+#define SYNTH_SYNTH7_VC2HIGH_SET(x) (((x) << SYNTH_SYNTH7_VC2HIGH_LSB) & SYNTH_SYNTH7_VC2HIGH_MASK)
+#define SYNTH_SYNTH7_VC2LOW_MSB 9
+#define SYNTH_SYNTH7_VC2LOW_LSB 9
+#define SYNTH_SYNTH7_VC2LOW_MASK 0x00000200
+#define SYNTH_SYNTH7_VC2LOW_GET(x) (((x) & SYNTH_SYNTH7_VC2LOW_MASK) >> SYNTH_SYNTH7_VC2LOW_LSB)
+#define SYNTH_SYNTH7_VC2LOW_SET(x) (((x) << SYNTH_SYNTH7_VC2LOW_LSB) & SYNTH_SYNTH7_VC2LOW_MASK)
+#define SYNTH_SYNTH7_LOOP_IP_MSB 8
+#define SYNTH_SYNTH7_LOOP_IP_LSB 5
+#define SYNTH_SYNTH7_LOOP_IP_MASK 0x000001e0
+#define SYNTH_SYNTH7_LOOP_IP_GET(x) (((x) & SYNTH_SYNTH7_LOOP_IP_MASK) >> SYNTH_SYNTH7_LOOP_IP_LSB)
+#define SYNTH_SYNTH7_LOOP_IP_SET(x) (((x) << SYNTH_SYNTH7_LOOP_IP_LSB) & SYNTH_SYNTH7_LOOP_IP_MASK)
+#define SYNTH_SYNTH7_LOBUF5GTUNE_MSB 4
+#define SYNTH_SYNTH7_LOBUF5GTUNE_LSB 3
+#define SYNTH_SYNTH7_LOBUF5GTUNE_MASK 0x00000018
+#define SYNTH_SYNTH7_LOBUF5GTUNE_GET(x) (((x) & SYNTH_SYNTH7_LOBUF5GTUNE_MASK) >> SYNTH_SYNTH7_LOBUF5GTUNE_LSB)
+#define SYNTH_SYNTH7_LOBUF5GTUNE_SET(x) (((x) << SYNTH_SYNTH7_LOBUF5GTUNE_LSB) & SYNTH_SYNTH7_LOBUF5GTUNE_MASK)
+#define SYNTH_SYNTH7_SPARE_READ_MSB 2
+#define SYNTH_SYNTH7_SPARE_READ_LSB 0
+#define SYNTH_SYNTH7_SPARE_READ_MASK 0x00000007
+#define SYNTH_SYNTH7_SPARE_READ_GET(x) (((x) & SYNTH_SYNTH7_SPARE_READ_MASK) >> SYNTH_SYNTH7_SPARE_READ_LSB)
+#define SYNTH_SYNTH7_SPARE_READ_SET(x) (((x) << SYNTH_SYNTH7_SPARE_READ_LSB) & SYNTH_SYNTH7_SPARE_READ_MASK)
+
+#define SYNTH_SYNTH8_ADDRESS 0x0000001c
+#define SYNTH_SYNTH8_OFFSET 0x0000001c
+#define SYNTH_SYNTH8_LOADSYNTHCHANNEL_MSB 31
+#define SYNTH_SYNTH8_LOADSYNTHCHANNEL_LSB 31
+#define SYNTH_SYNTH8_LOADSYNTHCHANNEL_MASK 0x80000000
+#define SYNTH_SYNTH8_LOADSYNTHCHANNEL_GET(x) (((x) & SYNTH_SYNTH8_LOADSYNTHCHANNEL_MASK) >> SYNTH_SYNTH8_LOADSYNTHCHANNEL_LSB)
+#define SYNTH_SYNTH8_LOADSYNTHCHANNEL_SET(x) (((x) << SYNTH_SYNTH8_LOADSYNTHCHANNEL_LSB) & SYNTH_SYNTH8_LOADSYNTHCHANNEL_MASK)
+#define SYNTH_SYNTH8_FRACMODE_MSB 30
+#define SYNTH_SYNTH8_FRACMODE_LSB 30
+#define SYNTH_SYNTH8_FRACMODE_MASK 0x40000000
+#define SYNTH_SYNTH8_FRACMODE_GET(x) (((x) & SYNTH_SYNTH8_FRACMODE_MASK) >> SYNTH_SYNTH8_FRACMODE_LSB)
+#define SYNTH_SYNTH8_FRACMODE_SET(x) (((x) << SYNTH_SYNTH8_FRACMODE_LSB) & SYNTH_SYNTH8_FRACMODE_MASK)
+#define SYNTH_SYNTH8_AMODEREFSEL_MSB 29
+#define SYNTH_SYNTH8_AMODEREFSEL_LSB 28
+#define SYNTH_SYNTH8_AMODEREFSEL_MASK 0x30000000
+#define SYNTH_SYNTH8_AMODEREFSEL_GET(x) (((x) & SYNTH_SYNTH8_AMODEREFSEL_MASK) >> SYNTH_SYNTH8_AMODEREFSEL_LSB)
+#define SYNTH_SYNTH8_AMODEREFSEL_SET(x) (((x) << SYNTH_SYNTH8_AMODEREFSEL_LSB) & SYNTH_SYNTH8_AMODEREFSEL_MASK)
+#define SYNTH_SYNTH8_SPARE_MSB 27
+#define SYNTH_SYNTH8_SPARE_LSB 27
+#define SYNTH_SYNTH8_SPARE_MASK 0x08000000
+#define SYNTH_SYNTH8_SPARE_GET(x) (((x) & SYNTH_SYNTH8_SPARE_MASK) >> SYNTH_SYNTH8_SPARE_LSB)
+#define SYNTH_SYNTH8_SPARE_SET(x) (((x) << SYNTH_SYNTH8_SPARE_LSB) & SYNTH_SYNTH8_SPARE_MASK)
+#define SYNTH_SYNTH8_CHANSEL_MSB 26
+#define SYNTH_SYNTH8_CHANSEL_LSB 18
+#define SYNTH_SYNTH8_CHANSEL_MASK 0x07fc0000
+#define SYNTH_SYNTH8_CHANSEL_GET(x) (((x) & SYNTH_SYNTH8_CHANSEL_MASK) >> SYNTH_SYNTH8_CHANSEL_LSB)
+#define SYNTH_SYNTH8_CHANSEL_SET(x) (((x) << SYNTH_SYNTH8_CHANSEL_LSB) & SYNTH_SYNTH8_CHANSEL_MASK)
+#define SYNTH_SYNTH8_CHANFRAC_MSB 17
+#define SYNTH_SYNTH8_CHANFRAC_LSB 1
+#define SYNTH_SYNTH8_CHANFRAC_MASK 0x0003fffe
+#define SYNTH_SYNTH8_CHANFRAC_GET(x) (((x) & SYNTH_SYNTH8_CHANFRAC_MASK) >> SYNTH_SYNTH8_CHANFRAC_LSB)
+#define SYNTH_SYNTH8_CHANFRAC_SET(x) (((x) << SYNTH_SYNTH8_CHANFRAC_LSB) & SYNTH_SYNTH8_CHANFRAC_MASK)
+#define SYNTH_SYNTH8_FORCE_FRACLSB_MSB 0
+#define SYNTH_SYNTH8_FORCE_FRACLSB_LSB 0
+#define SYNTH_SYNTH8_FORCE_FRACLSB_MASK 0x00000001
+#define SYNTH_SYNTH8_FORCE_FRACLSB_GET(x) (((x) & SYNTH_SYNTH8_FORCE_FRACLSB_MASK) >> SYNTH_SYNTH8_FORCE_FRACLSB_LSB)
+#define SYNTH_SYNTH8_FORCE_FRACLSB_SET(x) (((x) << SYNTH_SYNTH8_FORCE_FRACLSB_LSB) & SYNTH_SYNTH8_FORCE_FRACLSB_MASK)
+
+#define RF5G_RF5G1_ADDRESS 0x00000020
+#define RF5G_RF5G1_OFFSET 0x00000020
+#define RF5G_RF5G1_PDTXLO5_MSB 31
+#define RF5G_RF5G1_PDTXLO5_LSB 31
+#define RF5G_RF5G1_PDTXLO5_MASK 0x80000000
+#define RF5G_RF5G1_PDTXLO5_GET(x) (((x) & RF5G_RF5G1_PDTXLO5_MASK) >> RF5G_RF5G1_PDTXLO5_LSB)
+#define RF5G_RF5G1_PDTXLO5_SET(x) (((x) << RF5G_RF5G1_PDTXLO5_LSB) & RF5G_RF5G1_PDTXLO5_MASK)
+#define RF5G_RF5G1_PDTXMIX5_MSB 30
+#define RF5G_RF5G1_PDTXMIX5_LSB 30
+#define RF5G_RF5G1_PDTXMIX5_MASK 0x40000000
+#define RF5G_RF5G1_PDTXMIX5_GET(x) (((x) & RF5G_RF5G1_PDTXMIX5_MASK) >> RF5G_RF5G1_PDTXMIX5_LSB)
+#define RF5G_RF5G1_PDTXMIX5_SET(x) (((x) << RF5G_RF5G1_PDTXMIX5_LSB) & RF5G_RF5G1_PDTXMIX5_MASK)
+#define RF5G_RF5G1_PDTXBUF5_MSB 29
+#define RF5G_RF5G1_PDTXBUF5_LSB 29
+#define RF5G_RF5G1_PDTXBUF5_MASK 0x20000000
+#define RF5G_RF5G1_PDTXBUF5_GET(x) (((x) & RF5G_RF5G1_PDTXBUF5_MASK) >> RF5G_RF5G1_PDTXBUF5_LSB)
+#define RF5G_RF5G1_PDTXBUF5_SET(x) (((x) << RF5G_RF5G1_PDTXBUF5_LSB) & RF5G_RF5G1_PDTXBUF5_MASK)
+#define RF5G_RF5G1_PDPADRV5_MSB 28
+#define RF5G_RF5G1_PDPADRV5_LSB 28
+#define RF5G_RF5G1_PDPADRV5_MASK 0x10000000
+#define RF5G_RF5G1_PDPADRV5_GET(x) (((x) & RF5G_RF5G1_PDPADRV5_MASK) >> RF5G_RF5G1_PDPADRV5_LSB)
+#define RF5G_RF5G1_PDPADRV5_SET(x) (((x) << RF5G_RF5G1_PDPADRV5_LSB) & RF5G_RF5G1_PDPADRV5_MASK)
+#define RF5G_RF5G1_PDPAOUT5_MSB 27
+#define RF5G_RF5G1_PDPAOUT5_LSB 27
+#define RF5G_RF5G1_PDPAOUT5_MASK 0x08000000
+#define RF5G_RF5G1_PDPAOUT5_GET(x) (((x) & RF5G_RF5G1_PDPAOUT5_MASK) >> RF5G_RF5G1_PDPAOUT5_LSB)
+#define RF5G_RF5G1_PDPAOUT5_SET(x) (((x) << RF5G_RF5G1_PDPAOUT5_LSB) & RF5G_RF5G1_PDPAOUT5_MASK)
+#define RF5G_RF5G1_TUNE_PADRV5_MSB 26
+#define RF5G_RF5G1_TUNE_PADRV5_LSB 24
+#define RF5G_RF5G1_TUNE_PADRV5_MASK 0x07000000
+#define RF5G_RF5G1_TUNE_PADRV5_GET(x) (((x) & RF5G_RF5G1_TUNE_PADRV5_MASK) >> RF5G_RF5G1_TUNE_PADRV5_LSB)
+#define RF5G_RF5G1_TUNE_PADRV5_SET(x) (((x) << RF5G_RF5G1_TUNE_PADRV5_LSB) & RF5G_RF5G1_TUNE_PADRV5_MASK)
+#define RF5G_RF5G1_PWDTXPKD_MSB 23
+#define RF5G_RF5G1_PWDTXPKD_LSB 21
+#define RF5G_RF5G1_PWDTXPKD_MASK 0x00e00000
+#define RF5G_RF5G1_PWDTXPKD_GET(x) (((x) & RF5G_RF5G1_PWDTXPKD_MASK) >> RF5G_RF5G1_PWDTXPKD_LSB)
+#define RF5G_RF5G1_PWDTXPKD_SET(x) (((x) << RF5G_RF5G1_PWDTXPKD_LSB) & RF5G_RF5G1_PWDTXPKD_MASK)
+#define RF5G_RF5G1_DB5_MSB 20
+#define RF5G_RF5G1_DB5_LSB 18
+#define RF5G_RF5G1_DB5_MASK 0x001c0000
+#define RF5G_RF5G1_DB5_GET(x) (((x) & RF5G_RF5G1_DB5_MASK) >> RF5G_RF5G1_DB5_LSB)
+#define RF5G_RF5G1_DB5_SET(x) (((x) << RF5G_RF5G1_DB5_LSB) & RF5G_RF5G1_DB5_MASK)
+#define RF5G_RF5G1_OB5_MSB 17
+#define RF5G_RF5G1_OB5_LSB 15
+#define RF5G_RF5G1_OB5_MASK 0x00038000
+#define RF5G_RF5G1_OB5_GET(x) (((x) & RF5G_RF5G1_OB5_MASK) >> RF5G_RF5G1_OB5_LSB)
+#define RF5G_RF5G1_OB5_SET(x) (((x) << RF5G_RF5G1_OB5_LSB) & RF5G_RF5G1_OB5_MASK)
+#define RF5G_RF5G1_TX5_ATB_SEL_MSB 14
+#define RF5G_RF5G1_TX5_ATB_SEL_LSB 12
+#define RF5G_RF5G1_TX5_ATB_SEL_MASK 0x00007000
+#define RF5G_RF5G1_TX5_ATB_SEL_GET(x) (((x) & RF5G_RF5G1_TX5_ATB_SEL_MASK) >> RF5G_RF5G1_TX5_ATB_SEL_LSB)
+#define RF5G_RF5G1_TX5_ATB_SEL_SET(x) (((x) << RF5G_RF5G1_TX5_ATB_SEL_LSB) & RF5G_RF5G1_TX5_ATB_SEL_MASK)
+#define RF5G_RF5G1_PDLO5DIV_MSB 11
+#define RF5G_RF5G1_PDLO5DIV_LSB 11
+#define RF5G_RF5G1_PDLO5DIV_MASK 0x00000800
+#define RF5G_RF5G1_PDLO5DIV_GET(x) (((x) & RF5G_RF5G1_PDLO5DIV_MASK) >> RF5G_RF5G1_PDLO5DIV_LSB)
+#define RF5G_RF5G1_PDLO5DIV_SET(x) (((x) << RF5G_RF5G1_PDLO5DIV_LSB) & RF5G_RF5G1_PDLO5DIV_MASK)
+#define RF5G_RF5G1_PDLO5MIX_MSB 10
+#define RF5G_RF5G1_PDLO5MIX_LSB 10
+#define RF5G_RF5G1_PDLO5MIX_MASK 0x00000400
+#define RF5G_RF5G1_PDLO5MIX_GET(x) (((x) & RF5G_RF5G1_PDLO5MIX_MASK) >> RF5G_RF5G1_PDLO5MIX_LSB)
+#define RF5G_RF5G1_PDLO5MIX_SET(x) (((x) << RF5G_RF5G1_PDLO5MIX_LSB) & RF5G_RF5G1_PDLO5MIX_MASK)
+#define RF5G_RF5G1_PDQBUF5_MSB 9
+#define RF5G_RF5G1_PDQBUF5_LSB 9
+#define RF5G_RF5G1_PDQBUF5_MASK 0x00000200
+#define RF5G_RF5G1_PDQBUF5_GET(x) (((x) & RF5G_RF5G1_PDQBUF5_MASK) >> RF5G_RF5G1_PDQBUF5_LSB)
+#define RF5G_RF5G1_PDQBUF5_SET(x) (((x) << RF5G_RF5G1_PDQBUF5_LSB) & RF5G_RF5G1_PDQBUF5_MASK)
+#define RF5G_RF5G1_PDLO5AGC_MSB 8
+#define RF5G_RF5G1_PDLO5AGC_LSB 8
+#define RF5G_RF5G1_PDLO5AGC_MASK 0x00000100
+#define RF5G_RF5G1_PDLO5AGC_GET(x) (((x) & RF5G_RF5G1_PDLO5AGC_MASK) >> RF5G_RF5G1_PDLO5AGC_LSB)
+#define RF5G_RF5G1_PDLO5AGC_SET(x) (((x) << RF5G_RF5G1_PDLO5AGC_LSB) & RF5G_RF5G1_PDLO5AGC_MASK)
+#define RF5G_RF5G1_PDREGLO5_MSB 7
+#define RF5G_RF5G1_PDREGLO5_LSB 7
+#define RF5G_RF5G1_PDREGLO5_MASK 0x00000080
+#define RF5G_RF5G1_PDREGLO5_GET(x) (((x) & RF5G_RF5G1_PDREGLO5_MASK) >> RF5G_RF5G1_PDREGLO5_LSB)
+#define RF5G_RF5G1_PDREGLO5_SET(x) (((x) << RF5G_RF5G1_PDREGLO5_LSB) & RF5G_RF5G1_PDREGLO5_MASK)
+#define RF5G_RF5G1_LO5_ATB_SEL_MSB 6
+#define RF5G_RF5G1_LO5_ATB_SEL_LSB 4
+#define RF5G_RF5G1_LO5_ATB_SEL_MASK 0x00000070
+#define RF5G_RF5G1_LO5_ATB_SEL_GET(x) (((x) & RF5G_RF5G1_LO5_ATB_SEL_MASK) >> RF5G_RF5G1_LO5_ATB_SEL_LSB)
+#define RF5G_RF5G1_LO5_ATB_SEL_SET(x) (((x) << RF5G_RF5G1_LO5_ATB_SEL_LSB) & RF5G_RF5G1_LO5_ATB_SEL_MASK)
+#define RF5G_RF5G1_LO5CONTROL_MSB 3
+#define RF5G_RF5G1_LO5CONTROL_LSB 3
+#define RF5G_RF5G1_LO5CONTROL_MASK 0x00000008
+#define RF5G_RF5G1_LO5CONTROL_GET(x) (((x) & RF5G_RF5G1_LO5CONTROL_MASK) >> RF5G_RF5G1_LO5CONTROL_LSB)
+#define RF5G_RF5G1_LO5CONTROL_SET(x) (((x) << RF5G_RF5G1_LO5CONTROL_LSB) & RF5G_RF5G1_LO5CONTROL_MASK)
+#define RF5G_RF5G1_REGLO_BYPASS5_MSB 2
+#define RF5G_RF5G1_REGLO_BYPASS5_LSB 2
+#define RF5G_RF5G1_REGLO_BYPASS5_MASK 0x00000004
+#define RF5G_RF5G1_REGLO_BYPASS5_GET(x) (((x) & RF5G_RF5G1_REGLO_BYPASS5_MASK) >> RF5G_RF5G1_REGLO_BYPASS5_LSB)
+#define RF5G_RF5G1_REGLO_BYPASS5_SET(x) (((x) << RF5G_RF5G1_REGLO_BYPASS5_LSB) & RF5G_RF5G1_REGLO_BYPASS5_MASK)
+#define RF5G_RF5G1_SPARE_MSB 1
+#define RF5G_RF5G1_SPARE_LSB 0
+#define RF5G_RF5G1_SPARE_MASK 0x00000003
+#define RF5G_RF5G1_SPARE_GET(x) (((x) & RF5G_RF5G1_SPARE_MASK) >> RF5G_RF5G1_SPARE_LSB)
+#define RF5G_RF5G1_SPARE_SET(x) (((x) << RF5G_RF5G1_SPARE_LSB) & RF5G_RF5G1_SPARE_MASK)
+
+#define RF5G_RF5G2_ADDRESS 0x00000024
+#define RF5G_RF5G2_OFFSET 0x00000024
+#define RF5G_RF5G2_AGCLO_B_MSB 31
+#define RF5G_RF5G2_AGCLO_B_LSB 29
+#define RF5G_RF5G2_AGCLO_B_MASK 0xe0000000
+#define RF5G_RF5G2_AGCLO_B_GET(x) (((x) & RF5G_RF5G2_AGCLO_B_MASK) >> RF5G_RF5G2_AGCLO_B_LSB)
+#define RF5G_RF5G2_AGCLO_B_SET(x) (((x) << RF5G_RF5G2_AGCLO_B_LSB) & RF5G_RF5G2_AGCLO_B_MASK)
+#define RF5G_RF5G2_RX5_ATB_SEL_MSB 28
+#define RF5G_RF5G2_RX5_ATB_SEL_LSB 26
+#define RF5G_RF5G2_RX5_ATB_SEL_MASK 0x1c000000
+#define RF5G_RF5G2_RX5_ATB_SEL_GET(x) (((x) & RF5G_RF5G2_RX5_ATB_SEL_MASK) >> RF5G_RF5G2_RX5_ATB_SEL_LSB)
+#define RF5G_RF5G2_RX5_ATB_SEL_SET(x) (((x) << RF5G_RF5G2_RX5_ATB_SEL_LSB) & RF5G_RF5G2_RX5_ATB_SEL_MASK)
+#define RF5G_RF5G2_PDCMOSLO5_MSB 25
+#define RF5G_RF5G2_PDCMOSLO5_LSB 25
+#define RF5G_RF5G2_PDCMOSLO5_MASK 0x02000000
+#define RF5G_RF5G2_PDCMOSLO5_GET(x) (((x) & RF5G_RF5G2_PDCMOSLO5_MASK) >> RF5G_RF5G2_PDCMOSLO5_LSB)
+#define RF5G_RF5G2_PDCMOSLO5_SET(x) (((x) << RF5G_RF5G2_PDCMOSLO5_LSB) & RF5G_RF5G2_PDCMOSLO5_MASK)
+#define RF5G_RF5G2_PDVGM5_MSB 24
+#define RF5G_RF5G2_PDVGM5_LSB 24
+#define RF5G_RF5G2_PDVGM5_MASK 0x01000000
+#define RF5G_RF5G2_PDVGM5_GET(x) (((x) & RF5G_RF5G2_PDVGM5_MASK) >> RF5G_RF5G2_PDVGM5_LSB)
+#define RF5G_RF5G2_PDVGM5_SET(x) (((x) << RF5G_RF5G2_PDVGM5_LSB) & RF5G_RF5G2_PDVGM5_MASK)
+#define RF5G_RF5G2_PDCSLNA5_MSB 23
+#define RF5G_RF5G2_PDCSLNA5_LSB 23
+#define RF5G_RF5G2_PDCSLNA5_MASK 0x00800000
+#define RF5G_RF5G2_PDCSLNA5_GET(x) (((x) & RF5G_RF5G2_PDCSLNA5_MASK) >> RF5G_RF5G2_PDCSLNA5_LSB)
+#define RF5G_RF5G2_PDCSLNA5_SET(x) (((x) << RF5G_RF5G2_PDCSLNA5_LSB) & RF5G_RF5G2_PDCSLNA5_MASK)
+#define RF5G_RF5G2_PDRFVGA5_MSB 22
+#define RF5G_RF5G2_PDRFVGA5_LSB 22
+#define RF5G_RF5G2_PDRFVGA5_MASK 0x00400000
+#define RF5G_RF5G2_PDRFVGA5_GET(x) (((x) & RF5G_RF5G2_PDRFVGA5_MASK) >> RF5G_RF5G2_PDRFVGA5_LSB)
+#define RF5G_RF5G2_PDRFVGA5_SET(x) (((x) << RF5G_RF5G2_PDRFVGA5_LSB) & RF5G_RF5G2_PDRFVGA5_MASK)
+#define RF5G_RF5G2_PDREGFE5_MSB 21
+#define RF5G_RF5G2_PDREGFE5_LSB 21
+#define RF5G_RF5G2_PDREGFE5_MASK 0x00200000
+#define RF5G_RF5G2_PDREGFE5_GET(x) (((x) & RF5G_RF5G2_PDREGFE5_MASK) >> RF5G_RF5G2_PDREGFE5_LSB)
+#define RF5G_RF5G2_PDREGFE5_SET(x) (((x) << RF5G_RF5G2_PDREGFE5_LSB) & RF5G_RF5G2_PDREGFE5_MASK)
+#define RF5G_RF5G2_TUNE_RFVGA5_MSB 20
+#define RF5G_RF5G2_TUNE_RFVGA5_LSB 18
+#define RF5G_RF5G2_TUNE_RFVGA5_MASK 0x001c0000
+#define RF5G_RF5G2_TUNE_RFVGA5_GET(x) (((x) & RF5G_RF5G2_TUNE_RFVGA5_MASK) >> RF5G_RF5G2_TUNE_RFVGA5_LSB)
+#define RF5G_RF5G2_TUNE_RFVGA5_SET(x) (((x) << RF5G_RF5G2_TUNE_RFVGA5_LSB) & RF5G_RF5G2_TUNE_RFVGA5_MASK)
+#define RF5G_RF5G2_BRFVGA5_MSB 17
+#define RF5G_RF5G2_BRFVGA5_LSB 15
+#define RF5G_RF5G2_BRFVGA5_MASK 0x00038000
+#define RF5G_RF5G2_BRFVGA5_GET(x) (((x) & RF5G_RF5G2_BRFVGA5_MASK) >> RF5G_RF5G2_BRFVGA5_LSB)
+#define RF5G_RF5G2_BRFVGA5_SET(x) (((x) << RF5G_RF5G2_BRFVGA5_LSB) & RF5G_RF5G2_BRFVGA5_MASK)
+#define RF5G_RF5G2_BCSLNA5_MSB 14
+#define RF5G_RF5G2_BCSLNA5_LSB 12
+#define RF5G_RF5G2_BCSLNA5_MASK 0x00007000
+#define RF5G_RF5G2_BCSLNA5_GET(x) (((x) & RF5G_RF5G2_BCSLNA5_MASK) >> RF5G_RF5G2_BCSLNA5_LSB)
+#define RF5G_RF5G2_BCSLNA5_SET(x) (((x) << RF5G_RF5G2_BCSLNA5_LSB) & RF5G_RF5G2_BCSLNA5_MASK)
+#define RF5G_RF5G2_BVGM5_MSB 11
+#define RF5G_RF5G2_BVGM5_LSB 9
+#define RF5G_RF5G2_BVGM5_MASK 0x00000e00
+#define RF5G_RF5G2_BVGM5_GET(x) (((x) & RF5G_RF5G2_BVGM5_MASK) >> RF5G_RF5G2_BVGM5_LSB)
+#define RF5G_RF5G2_BVGM5_SET(x) (((x) << RF5G_RF5G2_BVGM5_LSB) & RF5G_RF5G2_BVGM5_MASK)
+#define RF5G_RF5G2_REGFE_BYPASS5_MSB 8
+#define RF5G_RF5G2_REGFE_BYPASS5_LSB 8
+#define RF5G_RF5G2_REGFE_BYPASS5_MASK 0x00000100
+#define RF5G_RF5G2_REGFE_BYPASS5_GET(x) (((x) & RF5G_RF5G2_REGFE_BYPASS5_MASK) >> RF5G_RF5G2_REGFE_BYPASS5_LSB)
+#define RF5G_RF5G2_REGFE_BYPASS5_SET(x) (((x) << RF5G_RF5G2_REGFE_BYPASS5_LSB) & RF5G_RF5G2_REGFE_BYPASS5_MASK)
+#define RF5G_RF5G2_LNA5_ATTENMODE_MSB 7
+#define RF5G_RF5G2_LNA5_ATTENMODE_LSB 6
+#define RF5G_RF5G2_LNA5_ATTENMODE_MASK 0x000000c0
+#define RF5G_RF5G2_LNA5_ATTENMODE_GET(x) (((x) & RF5G_RF5G2_LNA5_ATTENMODE_MASK) >> RF5G_RF5G2_LNA5_ATTENMODE_LSB)
+#define RF5G_RF5G2_LNA5_ATTENMODE_SET(x) (((x) << RF5G_RF5G2_LNA5_ATTENMODE_LSB) & RF5G_RF5G2_LNA5_ATTENMODE_MASK)
+#define RF5G_RF5G2_ENABLE_PCA_MSB 5
+#define RF5G_RF5G2_ENABLE_PCA_LSB 5
+#define RF5G_RF5G2_ENABLE_PCA_MASK 0x00000020
+#define RF5G_RF5G2_ENABLE_PCA_GET(x) (((x) & RF5G_RF5G2_ENABLE_PCA_MASK) >> RF5G_RF5G2_ENABLE_PCA_LSB)
+#define RF5G_RF5G2_ENABLE_PCA_SET(x) (((x) << RF5G_RF5G2_ENABLE_PCA_LSB) & RF5G_RF5G2_ENABLE_PCA_MASK)
+#define RF5G_RF5G2_TUNE_LO_MSB 4
+#define RF5G_RF5G2_TUNE_LO_LSB 2
+#define RF5G_RF5G2_TUNE_LO_MASK 0x0000001c
+#define RF5G_RF5G2_TUNE_LO_GET(x) (((x) & RF5G_RF5G2_TUNE_LO_MASK) >> RF5G_RF5G2_TUNE_LO_LSB)
+#define RF5G_RF5G2_TUNE_LO_SET(x) (((x) << RF5G_RF5G2_TUNE_LO_LSB) & RF5G_RF5G2_TUNE_LO_MASK)
+#define RF5G_RF5G2_SPARE_MSB 1
+#define RF5G_RF5G2_SPARE_LSB 0
+#define RF5G_RF5G2_SPARE_MASK 0x00000003
+#define RF5G_RF5G2_SPARE_GET(x) (((x) & RF5G_RF5G2_SPARE_MASK) >> RF5G_RF5G2_SPARE_LSB)
+#define RF5G_RF5G2_SPARE_SET(x) (((x) << RF5G_RF5G2_SPARE_LSB) & RF5G_RF5G2_SPARE_MASK)
+
+#define RF2G_RF2G1_ADDRESS 0x00000028
+#define RF2G_RF2G1_OFFSET 0x00000028
+#define RF2G_RF2G1_BLNA1_MSB 31
+#define RF2G_RF2G1_BLNA1_LSB 29
+#define RF2G_RF2G1_BLNA1_MASK 0xe0000000
+#define RF2G_RF2G1_BLNA1_GET(x) (((x) & RF2G_RF2G1_BLNA1_MASK) >> RF2G_RF2G1_BLNA1_LSB)
+#define RF2G_RF2G1_BLNA1_SET(x) (((x) << RF2G_RF2G1_BLNA1_LSB) & RF2G_RF2G1_BLNA1_MASK)
+#define RF2G_RF2G1_BLNA1F_MSB 28
+#define RF2G_RF2G1_BLNA1F_LSB 26
+#define RF2G_RF2G1_BLNA1F_MASK 0x1c000000
+#define RF2G_RF2G1_BLNA1F_GET(x) (((x) & RF2G_RF2G1_BLNA1F_MASK) >> RF2G_RF2G1_BLNA1F_LSB)
+#define RF2G_RF2G1_BLNA1F_SET(x) (((x) << RF2G_RF2G1_BLNA1F_LSB) & RF2G_RF2G1_BLNA1F_MASK)
+#define RF2G_RF2G1_BLNA1BUF_MSB 25
+#define RF2G_RF2G1_BLNA1BUF_LSB 23
+#define RF2G_RF2G1_BLNA1BUF_MASK 0x03800000
+#define RF2G_RF2G1_BLNA1BUF_GET(x) (((x) & RF2G_RF2G1_BLNA1BUF_MASK) >> RF2G_RF2G1_BLNA1BUF_LSB)
+#define RF2G_RF2G1_BLNA1BUF_SET(x) (((x) << RF2G_RF2G1_BLNA1BUF_LSB) & RF2G_RF2G1_BLNA1BUF_MASK)
+#define RF2G_RF2G1_BLNA2_MSB 22
+#define RF2G_RF2G1_BLNA2_LSB 20
+#define RF2G_RF2G1_BLNA2_MASK 0x00700000
+#define RF2G_RF2G1_BLNA2_GET(x) (((x) & RF2G_RF2G1_BLNA2_MASK) >> RF2G_RF2G1_BLNA2_LSB)
+#define RF2G_RF2G1_BLNA2_SET(x) (((x) << RF2G_RF2G1_BLNA2_LSB) & RF2G_RF2G1_BLNA2_MASK)
+#define RF2G_RF2G1_DB_MSB 19
+#define RF2G_RF2G1_DB_LSB 17
+#define RF2G_RF2G1_DB_MASK 0x000e0000
+#define RF2G_RF2G1_DB_GET(x) (((x) & RF2G_RF2G1_DB_MASK) >> RF2G_RF2G1_DB_LSB)
+#define RF2G_RF2G1_DB_SET(x) (((x) << RF2G_RF2G1_DB_LSB) & RF2G_RF2G1_DB_MASK)
+#define RF2G_RF2G1_OB_MSB 16
+#define RF2G_RF2G1_OB_LSB 14
+#define RF2G_RF2G1_OB_MASK 0x0001c000
+#define RF2G_RF2G1_OB_GET(x) (((x) & RF2G_RF2G1_OB_MASK) >> RF2G_RF2G1_OB_LSB)
+#define RF2G_RF2G1_OB_SET(x) (((x) << RF2G_RF2G1_OB_LSB) & RF2G_RF2G1_OB_MASK)
+#define RF2G_RF2G1_FE_ATB_SEL_MSB 13
+#define RF2G_RF2G1_FE_ATB_SEL_LSB 11
+#define RF2G_RF2G1_FE_ATB_SEL_MASK 0x00003800
+#define RF2G_RF2G1_FE_ATB_SEL_GET(x) (((x) & RF2G_RF2G1_FE_ATB_SEL_MASK) >> RF2G_RF2G1_FE_ATB_SEL_LSB)
+#define RF2G_RF2G1_FE_ATB_SEL_SET(x) (((x) << RF2G_RF2G1_FE_ATB_SEL_LSB) & RF2G_RF2G1_FE_ATB_SEL_MASK)
+#define RF2G_RF2G1_RF_ATB_SEL_MSB 10
+#define RF2G_RF2G1_RF_ATB_SEL_LSB 8
+#define RF2G_RF2G1_RF_ATB_SEL_MASK 0x00000700
+#define RF2G_RF2G1_RF_ATB_SEL_GET(x) (((x) & RF2G_RF2G1_RF_ATB_SEL_MASK) >> RF2G_RF2G1_RF_ATB_SEL_LSB)
+#define RF2G_RF2G1_RF_ATB_SEL_SET(x) (((x) << RF2G_RF2G1_RF_ATB_SEL_LSB) & RF2G_RF2G1_RF_ATB_SEL_MASK)
+#define RF2G_RF2G1_SELLNA_MSB 7
+#define RF2G_RF2G1_SELLNA_LSB 7
+#define RF2G_RF2G1_SELLNA_MASK 0x00000080
+#define RF2G_RF2G1_SELLNA_GET(x) (((x) & RF2G_RF2G1_SELLNA_MASK) >> RF2G_RF2G1_SELLNA_LSB)
+#define RF2G_RF2G1_SELLNA_SET(x) (((x) << RF2G_RF2G1_SELLNA_LSB) & RF2G_RF2G1_SELLNA_MASK)
+#define RF2G_RF2G1_LOCONTROL_MSB 6
+#define RF2G_RF2G1_LOCONTROL_LSB 6
+#define RF2G_RF2G1_LOCONTROL_MASK 0x00000040
+#define RF2G_RF2G1_LOCONTROL_GET(x) (((x) & RF2G_RF2G1_LOCONTROL_MASK) >> RF2G_RF2G1_LOCONTROL_LSB)
+#define RF2G_RF2G1_LOCONTROL_SET(x) (((x) << RF2G_RF2G1_LOCONTROL_LSB) & RF2G_RF2G1_LOCONTROL_MASK)
+#define RF2G_RF2G1_SHORTLNA2_MSB 5
+#define RF2G_RF2G1_SHORTLNA2_LSB 5
+#define RF2G_RF2G1_SHORTLNA2_MASK 0x00000020
+#define RF2G_RF2G1_SHORTLNA2_GET(x) (((x) & RF2G_RF2G1_SHORTLNA2_MASK) >> RF2G_RF2G1_SHORTLNA2_LSB)
+#define RF2G_RF2G1_SHORTLNA2_SET(x) (((x) << RF2G_RF2G1_SHORTLNA2_LSB) & RF2G_RF2G1_SHORTLNA2_MASK)
+#define RF2G_RF2G1_SPARE_MSB 4
+#define RF2G_RF2G1_SPARE_LSB 0
+#define RF2G_RF2G1_SPARE_MASK 0x0000001f
+#define RF2G_RF2G1_SPARE_GET(x) (((x) & RF2G_RF2G1_SPARE_MASK) >> RF2G_RF2G1_SPARE_LSB)
+#define RF2G_RF2G1_SPARE_SET(x) (((x) << RF2G_RF2G1_SPARE_LSB) & RF2G_RF2G1_SPARE_MASK)
+
+#define RF2G_RF2G2_ADDRESS 0x0000002c
+#define RF2G_RF2G2_OFFSET 0x0000002c
+#define RF2G_RF2G2_PDCGLNA_MSB 31
+#define RF2G_RF2G2_PDCGLNA_LSB 31
+#define RF2G_RF2G2_PDCGLNA_MASK 0x80000000
+#define RF2G_RF2G2_PDCGLNA_GET(x) (((x) & RF2G_RF2G2_PDCGLNA_MASK) >> RF2G_RF2G2_PDCGLNA_LSB)
+#define RF2G_RF2G2_PDCGLNA_SET(x) (((x) << RF2G_RF2G2_PDCGLNA_LSB) & RF2G_RF2G2_PDCGLNA_MASK)
+#define RF2G_RF2G2_PDCGLNABUF_MSB 30
+#define RF2G_RF2G2_PDCGLNABUF_LSB 30
+#define RF2G_RF2G2_PDCGLNABUF_MASK 0x40000000
+#define RF2G_RF2G2_PDCGLNABUF_GET(x) (((x) & RF2G_RF2G2_PDCGLNABUF_MASK) >> RF2G_RF2G2_PDCGLNABUF_LSB)
+#define RF2G_RF2G2_PDCGLNABUF_SET(x) (((x) << RF2G_RF2G2_PDCGLNABUF_LSB) & RF2G_RF2G2_PDCGLNABUF_MASK)
+#define RF2G_RF2G2_PDCSLNA_MSB 29
+#define RF2G_RF2G2_PDCSLNA_LSB 29
+#define RF2G_RF2G2_PDCSLNA_MASK 0x20000000
+#define RF2G_RF2G2_PDCSLNA_GET(x) (((x) & RF2G_RF2G2_PDCSLNA_MASK) >> RF2G_RF2G2_PDCSLNA_LSB)
+#define RF2G_RF2G2_PDCSLNA_SET(x) (((x) << RF2G_RF2G2_PDCSLNA_LSB) & RF2G_RF2G2_PDCSLNA_MASK)
+#define RF2G_RF2G2_PDDIV_MSB 28
+#define RF2G_RF2G2_PDDIV_LSB 28
+#define RF2G_RF2G2_PDDIV_MASK 0x10000000
+#define RF2G_RF2G2_PDDIV_GET(x) (((x) & RF2G_RF2G2_PDDIV_MASK) >> RF2G_RF2G2_PDDIV_LSB)
+#define RF2G_RF2G2_PDDIV_SET(x) (((x) << RF2G_RF2G2_PDDIV_LSB) & RF2G_RF2G2_PDDIV_MASK)
+#define RF2G_RF2G2_PDPADRV_MSB 27
+#define RF2G_RF2G2_PDPADRV_LSB 27
+#define RF2G_RF2G2_PDPADRV_MASK 0x08000000
+#define RF2G_RF2G2_PDPADRV_GET(x) (((x) & RF2G_RF2G2_PDPADRV_MASK) >> RF2G_RF2G2_PDPADRV_LSB)
+#define RF2G_RF2G2_PDPADRV_SET(x) (((x) << RF2G_RF2G2_PDPADRV_LSB) & RF2G_RF2G2_PDPADRV_MASK)
+#define RF2G_RF2G2_PDPAOUT_MSB 26
+#define RF2G_RF2G2_PDPAOUT_LSB 26
+#define RF2G_RF2G2_PDPAOUT_MASK 0x04000000
+#define RF2G_RF2G2_PDPAOUT_GET(x) (((x) & RF2G_RF2G2_PDPAOUT_MASK) >> RF2G_RF2G2_PDPAOUT_LSB)
+#define RF2G_RF2G2_PDPAOUT_SET(x) (((x) << RF2G_RF2G2_PDPAOUT_LSB) & RF2G_RF2G2_PDPAOUT_MASK)
+#define RF2G_RF2G2_PDREGLNA_MSB 25
+#define RF2G_RF2G2_PDREGLNA_LSB 25
+#define RF2G_RF2G2_PDREGLNA_MASK 0x02000000
+#define RF2G_RF2G2_PDREGLNA_GET(x) (((x) & RF2G_RF2G2_PDREGLNA_MASK) >> RF2G_RF2G2_PDREGLNA_LSB)
+#define RF2G_RF2G2_PDREGLNA_SET(x) (((x) << RF2G_RF2G2_PDREGLNA_LSB) & RF2G_RF2G2_PDREGLNA_MASK)
+#define RF2G_RF2G2_PDREGLO_MSB 24
+#define RF2G_RF2G2_PDREGLO_LSB 24
+#define RF2G_RF2G2_PDREGLO_MASK 0x01000000
+#define RF2G_RF2G2_PDREGLO_GET(x) (((x) & RF2G_RF2G2_PDREGLO_MASK) >> RF2G_RF2G2_PDREGLO_LSB)
+#define RF2G_RF2G2_PDREGLO_SET(x) (((x) << RF2G_RF2G2_PDREGLO_LSB) & RF2G_RF2G2_PDREGLO_MASK)
+#define RF2G_RF2G2_PDRFGM_MSB 23
+#define RF2G_RF2G2_PDRFGM_LSB 23
+#define RF2G_RF2G2_PDRFGM_MASK 0x00800000
+#define RF2G_RF2G2_PDRFGM_GET(x) (((x) & RF2G_RF2G2_PDRFGM_MASK) >> RF2G_RF2G2_PDRFGM_LSB)
+#define RF2G_RF2G2_PDRFGM_SET(x) (((x) << RF2G_RF2G2_PDRFGM_LSB) & RF2G_RF2G2_PDRFGM_MASK)
+#define RF2G_RF2G2_PDRXLO_MSB 22
+#define RF2G_RF2G2_PDRXLO_LSB 22
+#define RF2G_RF2G2_PDRXLO_MASK 0x00400000
+#define RF2G_RF2G2_PDRXLO_GET(x) (((x) & RF2G_RF2G2_PDRXLO_MASK) >> RF2G_RF2G2_PDRXLO_LSB)
+#define RF2G_RF2G2_PDRXLO_SET(x) (((x) << RF2G_RF2G2_PDRXLO_LSB) & RF2G_RF2G2_PDRXLO_MASK)
+#define RF2G_RF2G2_PDTXLO_MSB 21
+#define RF2G_RF2G2_PDTXLO_LSB 21
+#define RF2G_RF2G2_PDTXLO_MASK 0x00200000
+#define RF2G_RF2G2_PDTXLO_GET(x) (((x) & RF2G_RF2G2_PDTXLO_MASK) >> RF2G_RF2G2_PDTXLO_LSB)
+#define RF2G_RF2G2_PDTXLO_SET(x) (((x) << RF2G_RF2G2_PDTXLO_LSB) & RF2G_RF2G2_PDTXLO_MASK)
+#define RF2G_RF2G2_PDTXMIX_MSB 20
+#define RF2G_RF2G2_PDTXMIX_LSB 20
+#define RF2G_RF2G2_PDTXMIX_MASK 0x00100000
+#define RF2G_RF2G2_PDTXMIX_GET(x) (((x) & RF2G_RF2G2_PDTXMIX_MASK) >> RF2G_RF2G2_PDTXMIX_LSB)
+#define RF2G_RF2G2_PDTXMIX_SET(x) (((x) << RF2G_RF2G2_PDTXMIX_LSB) & RF2G_RF2G2_PDTXMIX_MASK)
+#define RF2G_RF2G2_REGLNA_BYPASS_MSB 19
+#define RF2G_RF2G2_REGLNA_BYPASS_LSB 19
+#define RF2G_RF2G2_REGLNA_BYPASS_MASK 0x00080000
+#define RF2G_RF2G2_REGLNA_BYPASS_GET(x) (((x) & RF2G_RF2G2_REGLNA_BYPASS_MASK) >> RF2G_RF2G2_REGLNA_BYPASS_LSB)
+#define RF2G_RF2G2_REGLNA_BYPASS_SET(x) (((x) << RF2G_RF2G2_REGLNA_BYPASS_LSB) & RF2G_RF2G2_REGLNA_BYPASS_MASK)
+#define RF2G_RF2G2_REGLO_BYPASS_MSB 18
+#define RF2G_RF2G2_REGLO_BYPASS_LSB 18
+#define RF2G_RF2G2_REGLO_BYPASS_MASK 0x00040000
+#define RF2G_RF2G2_REGLO_BYPASS_GET(x) (((x) & RF2G_RF2G2_REGLO_BYPASS_MASK) >> RF2G_RF2G2_REGLO_BYPASS_LSB)
+#define RF2G_RF2G2_REGLO_BYPASS_SET(x) (((x) << RF2G_RF2G2_REGLO_BYPASS_LSB) & RF2G_RF2G2_REGLO_BYPASS_MASK)
+#define RF2G_RF2G2_ENABLE_PCB_MSB 17
+#define RF2G_RF2G2_ENABLE_PCB_LSB 17
+#define RF2G_RF2G2_ENABLE_PCB_MASK 0x00020000
+#define RF2G_RF2G2_ENABLE_PCB_GET(x) (((x) & RF2G_RF2G2_ENABLE_PCB_MASK) >> RF2G_RF2G2_ENABLE_PCB_LSB)
+#define RF2G_RF2G2_ENABLE_PCB_SET(x) (((x) << RF2G_RF2G2_ENABLE_PCB_LSB) & RF2G_RF2G2_ENABLE_PCB_MASK)
+#define RF2G_RF2G2_SPARE_MSB 16
+#define RF2G_RF2G2_SPARE_LSB 0
+#define RF2G_RF2G2_SPARE_MASK 0x0001ffff
+#define RF2G_RF2G2_SPARE_GET(x) (((x) & RF2G_RF2G2_SPARE_MASK) >> RF2G_RF2G2_SPARE_LSB)
+#define RF2G_RF2G2_SPARE_SET(x) (((x) << RF2G_RF2G2_SPARE_LSB) & RF2G_RF2G2_SPARE_MASK)
+
+#define TOP_GAIN_ADDRESS 0x00000030
+#define TOP_GAIN_OFFSET 0x00000030
+#define TOP_GAIN_TX6DBLOQGAIN_MSB 31
+#define TOP_GAIN_TX6DBLOQGAIN_LSB 30
+#define TOP_GAIN_TX6DBLOQGAIN_MASK 0xc0000000
+#define TOP_GAIN_TX6DBLOQGAIN_GET(x) (((x) & TOP_GAIN_TX6DBLOQGAIN_MASK) >> TOP_GAIN_TX6DBLOQGAIN_LSB)
+#define TOP_GAIN_TX6DBLOQGAIN_SET(x) (((x) << TOP_GAIN_TX6DBLOQGAIN_LSB) & TOP_GAIN_TX6DBLOQGAIN_MASK)
+#define TOP_GAIN_TX1DBLOQGAIN_MSB 29
+#define TOP_GAIN_TX1DBLOQGAIN_LSB 27
+#define TOP_GAIN_TX1DBLOQGAIN_MASK 0x38000000
+#define TOP_GAIN_TX1DBLOQGAIN_GET(x) (((x) & TOP_GAIN_TX1DBLOQGAIN_MASK) >> TOP_GAIN_TX1DBLOQGAIN_LSB)
+#define TOP_GAIN_TX1DBLOQGAIN_SET(x) (((x) << TOP_GAIN_TX1DBLOQGAIN_LSB) & TOP_GAIN_TX1DBLOQGAIN_MASK)
+#define TOP_GAIN_TXV2IGAIN_MSB 26
+#define TOP_GAIN_TXV2IGAIN_LSB 25
+#define TOP_GAIN_TXV2IGAIN_MASK 0x06000000
+#define TOP_GAIN_TXV2IGAIN_GET(x) (((x) & TOP_GAIN_TXV2IGAIN_MASK) >> TOP_GAIN_TXV2IGAIN_LSB)
+#define TOP_GAIN_TXV2IGAIN_SET(x) (((x) << TOP_GAIN_TXV2IGAIN_LSB) & TOP_GAIN_TXV2IGAIN_MASK)
+#define TOP_GAIN_PABUF5GN_MSB 24
+#define TOP_GAIN_PABUF5GN_LSB 24
+#define TOP_GAIN_PABUF5GN_MASK 0x01000000
+#define TOP_GAIN_PABUF5GN_GET(x) (((x) & TOP_GAIN_PABUF5GN_MASK) >> TOP_GAIN_PABUF5GN_LSB)
+#define TOP_GAIN_PABUF5GN_SET(x) (((x) << TOP_GAIN_PABUF5GN_LSB) & TOP_GAIN_PABUF5GN_MASK)
+#define TOP_GAIN_PADRVGN_MSB 23
+#define TOP_GAIN_PADRVGN_LSB 21
+#define TOP_GAIN_PADRVGN_MASK 0x00e00000
+#define TOP_GAIN_PADRVGN_GET(x) (((x) & TOP_GAIN_PADRVGN_MASK) >> TOP_GAIN_PADRVGN_LSB)
+#define TOP_GAIN_PADRVGN_SET(x) (((x) << TOP_GAIN_PADRVGN_LSB) & TOP_GAIN_PADRVGN_MASK)
+#define TOP_GAIN_PAOUT2GN_MSB 20
+#define TOP_GAIN_PAOUT2GN_LSB 18
+#define TOP_GAIN_PAOUT2GN_MASK 0x001c0000
+#define TOP_GAIN_PAOUT2GN_GET(x) (((x) & TOP_GAIN_PAOUT2GN_MASK) >> TOP_GAIN_PAOUT2GN_LSB)
+#define TOP_GAIN_PAOUT2GN_SET(x) (((x) << TOP_GAIN_PAOUT2GN_LSB) & TOP_GAIN_PAOUT2GN_MASK)
+#define TOP_GAIN_LNAON_MSB 17
+#define TOP_GAIN_LNAON_LSB 17
+#define TOP_GAIN_LNAON_MASK 0x00020000
+#define TOP_GAIN_LNAON_GET(x) (((x) & TOP_GAIN_LNAON_MASK) >> TOP_GAIN_LNAON_LSB)
+#define TOP_GAIN_LNAON_SET(x) (((x) << TOP_GAIN_LNAON_LSB) & TOP_GAIN_LNAON_MASK)
+#define TOP_GAIN_LNAGAIN_MSB 16
+#define TOP_GAIN_LNAGAIN_LSB 13
+#define TOP_GAIN_LNAGAIN_MASK 0x0001e000
+#define TOP_GAIN_LNAGAIN_GET(x) (((x) & TOP_GAIN_LNAGAIN_MASK) >> TOP_GAIN_LNAGAIN_LSB)
+#define TOP_GAIN_LNAGAIN_SET(x) (((x) << TOP_GAIN_LNAGAIN_LSB) & TOP_GAIN_LNAGAIN_MASK)
+#define TOP_GAIN_RFVGA5GAIN_MSB 12
+#define TOP_GAIN_RFVGA5GAIN_LSB 11
+#define TOP_GAIN_RFVGA5GAIN_MASK 0x00001800
+#define TOP_GAIN_RFVGA5GAIN_GET(x) (((x) & TOP_GAIN_RFVGA5GAIN_MASK) >> TOP_GAIN_RFVGA5GAIN_LSB)
+#define TOP_GAIN_RFVGA5GAIN_SET(x) (((x) << TOP_GAIN_RFVGA5GAIN_LSB) & TOP_GAIN_RFVGA5GAIN_MASK)
+#define TOP_GAIN_RFGMGN_MSB 10
+#define TOP_GAIN_RFGMGN_LSB 8
+#define TOP_GAIN_RFGMGN_MASK 0x00000700
+#define TOP_GAIN_RFGMGN_GET(x) (((x) & TOP_GAIN_RFGMGN_MASK) >> TOP_GAIN_RFGMGN_LSB)
+#define TOP_GAIN_RFGMGN_SET(x) (((x) << TOP_GAIN_RFGMGN_LSB) & TOP_GAIN_RFGMGN_MASK)
+#define TOP_GAIN_RX6DBLOQGAIN_MSB 7
+#define TOP_GAIN_RX6DBLOQGAIN_LSB 6
+#define TOP_GAIN_RX6DBLOQGAIN_MASK 0x000000c0
+#define TOP_GAIN_RX6DBLOQGAIN_GET(x) (((x) & TOP_GAIN_RX6DBLOQGAIN_MASK) >> TOP_GAIN_RX6DBLOQGAIN_LSB)
+#define TOP_GAIN_RX6DBLOQGAIN_SET(x) (((x) << TOP_GAIN_RX6DBLOQGAIN_LSB) & TOP_GAIN_RX6DBLOQGAIN_MASK)
+#define TOP_GAIN_RX1DBLOQGAIN_MSB 5
+#define TOP_GAIN_RX1DBLOQGAIN_LSB 3
+#define TOP_GAIN_RX1DBLOQGAIN_MASK 0x00000038
+#define TOP_GAIN_RX1DBLOQGAIN_GET(x) (((x) & TOP_GAIN_RX1DBLOQGAIN_MASK) >> TOP_GAIN_RX1DBLOQGAIN_LSB)
+#define TOP_GAIN_RX1DBLOQGAIN_SET(x) (((x) << TOP_GAIN_RX1DBLOQGAIN_LSB) & TOP_GAIN_RX1DBLOQGAIN_MASK)
+#define TOP_GAIN_RX6DBHIQGAIN_MSB 2
+#define TOP_GAIN_RX6DBHIQGAIN_LSB 1
+#define TOP_GAIN_RX6DBHIQGAIN_MASK 0x00000006
+#define TOP_GAIN_RX6DBHIQGAIN_GET(x) (((x) & TOP_GAIN_RX6DBHIQGAIN_MASK) >> TOP_GAIN_RX6DBHIQGAIN_LSB)
+#define TOP_GAIN_RX6DBHIQGAIN_SET(x) (((x) << TOP_GAIN_RX6DBHIQGAIN_LSB) & TOP_GAIN_RX6DBHIQGAIN_MASK)
+#define TOP_GAIN_SPARE_MSB 0
+#define TOP_GAIN_SPARE_LSB 0
+#define TOP_GAIN_SPARE_MASK 0x00000001
+#define TOP_GAIN_SPARE_GET(x) (((x) & TOP_GAIN_SPARE_MASK) >> TOP_GAIN_SPARE_LSB)
+#define TOP_GAIN_SPARE_SET(x) (((x) << TOP_GAIN_SPARE_LSB) & TOP_GAIN_SPARE_MASK)
+
+#define TOP_TOP_ADDRESS 0x00000034
+#define TOP_TOP_OFFSET 0x00000034
+#define TOP_TOP_LOCALTXGAIN_MSB 31
+#define TOP_TOP_LOCALTXGAIN_LSB 31
+#define TOP_TOP_LOCALTXGAIN_MASK 0x80000000
+#define TOP_TOP_LOCALTXGAIN_GET(x) (((x) & TOP_TOP_LOCALTXGAIN_MASK) >> TOP_TOP_LOCALTXGAIN_LSB)
+#define TOP_TOP_LOCALTXGAIN_SET(x) (((x) << TOP_TOP_LOCALTXGAIN_LSB) & TOP_TOP_LOCALTXGAIN_MASK)
+#define TOP_TOP_LOCALRXGAIN_MSB 30
+#define TOP_TOP_LOCALRXGAIN_LSB 30
+#define TOP_TOP_LOCALRXGAIN_MASK 0x40000000
+#define TOP_TOP_LOCALRXGAIN_GET(x) (((x) & TOP_TOP_LOCALRXGAIN_MASK) >> TOP_TOP_LOCALRXGAIN_LSB)
+#define TOP_TOP_LOCALRXGAIN_SET(x) (((x) << TOP_TOP_LOCALRXGAIN_LSB) & TOP_TOP_LOCALRXGAIN_MASK)
+#define TOP_TOP_LOCALMODE_MSB 29
+#define TOP_TOP_LOCALMODE_LSB 29
+#define TOP_TOP_LOCALMODE_MASK 0x20000000
+#define TOP_TOP_LOCALMODE_GET(x) (((x) & TOP_TOP_LOCALMODE_MASK) >> TOP_TOP_LOCALMODE_LSB)
+#define TOP_TOP_LOCALMODE_SET(x) (((x) << TOP_TOP_LOCALMODE_LSB) & TOP_TOP_LOCALMODE_MASK)
+#define TOP_TOP_CALFC_MSB 28
+#define TOP_TOP_CALFC_LSB 28
+#define TOP_TOP_CALFC_MASK 0x10000000
+#define TOP_TOP_CALFC_GET(x) (((x) & TOP_TOP_CALFC_MASK) >> TOP_TOP_CALFC_LSB)
+#define TOP_TOP_CALFC_SET(x) (((x) << TOP_TOP_CALFC_LSB) & TOP_TOP_CALFC_MASK)
+#define TOP_TOP_CALDC_MSB 27
+#define TOP_TOP_CALDC_LSB 27
+#define TOP_TOP_CALDC_MASK 0x08000000
+#define TOP_TOP_CALDC_GET(x) (((x) & TOP_TOP_CALDC_MASK) >> TOP_TOP_CALDC_LSB)
+#define TOP_TOP_CALDC_SET(x) (((x) << TOP_TOP_CALDC_LSB) & TOP_TOP_CALDC_MASK)
+#define TOP_TOP_CAL_RESIDUE_MSB 26
+#define TOP_TOP_CAL_RESIDUE_LSB 26
+#define TOP_TOP_CAL_RESIDUE_MASK 0x04000000
+#define TOP_TOP_CAL_RESIDUE_GET(x) (((x) & TOP_TOP_CAL_RESIDUE_MASK) >> TOP_TOP_CAL_RESIDUE_LSB)
+#define TOP_TOP_CAL_RESIDUE_SET(x) (((x) << TOP_TOP_CAL_RESIDUE_LSB) & TOP_TOP_CAL_RESIDUE_MASK)
+#define TOP_TOP_BMODE_MSB 25
+#define TOP_TOP_BMODE_LSB 25
+#define TOP_TOP_BMODE_MASK 0x02000000
+#define TOP_TOP_BMODE_GET(x) (((x) & TOP_TOP_BMODE_MASK) >> TOP_TOP_BMODE_LSB)
+#define TOP_TOP_BMODE_SET(x) (((x) << TOP_TOP_BMODE_LSB) & TOP_TOP_BMODE_MASK)
+#define TOP_TOP_SYNTHON_MSB 24
+#define TOP_TOP_SYNTHON_LSB 24
+#define TOP_TOP_SYNTHON_MASK 0x01000000
+#define TOP_TOP_SYNTHON_GET(x) (((x) & TOP_TOP_SYNTHON_MASK) >> TOP_TOP_SYNTHON_LSB)
+#define TOP_TOP_SYNTHON_SET(x) (((x) << TOP_TOP_SYNTHON_LSB) & TOP_TOP_SYNTHON_MASK)
+#define TOP_TOP_RXON_MSB 23
+#define TOP_TOP_RXON_LSB 23
+#define TOP_TOP_RXON_MASK 0x00800000
+#define TOP_TOP_RXON_GET(x) (((x) & TOP_TOP_RXON_MASK) >> TOP_TOP_RXON_LSB)
+#define TOP_TOP_RXON_SET(x) (((x) << TOP_TOP_RXON_LSB) & TOP_TOP_RXON_MASK)
+#define TOP_TOP_TXON_MSB 22
+#define TOP_TOP_TXON_LSB 22
+#define TOP_TOP_TXON_MASK 0x00400000
+#define TOP_TOP_TXON_GET(x) (((x) & TOP_TOP_TXON_MASK) >> TOP_TOP_TXON_LSB)
+#define TOP_TOP_TXON_SET(x) (((x) << TOP_TOP_TXON_LSB) & TOP_TOP_TXON_MASK)
+#define TOP_TOP_PAON_MSB 21
+#define TOP_TOP_PAON_LSB 21
+#define TOP_TOP_PAON_MASK 0x00200000
+#define TOP_TOP_PAON_GET(x) (((x) & TOP_TOP_PAON_MASK) >> TOP_TOP_PAON_LSB)
+#define TOP_TOP_PAON_SET(x) (((x) << TOP_TOP_PAON_LSB) & TOP_TOP_PAON_MASK)
+#define TOP_TOP_CALTX_MSB 20
+#define TOP_TOP_CALTX_LSB 20
+#define TOP_TOP_CALTX_MASK 0x00100000
+#define TOP_TOP_CALTX_GET(x) (((x) & TOP_TOP_CALTX_MASK) >> TOP_TOP_CALTX_LSB)
+#define TOP_TOP_CALTX_SET(x) (((x) << TOP_TOP_CALTX_LSB) & TOP_TOP_CALTX_MASK)
+#define TOP_TOP_LOCALADDAC_MSB 19
+#define TOP_TOP_LOCALADDAC_LSB 19
+#define TOP_TOP_LOCALADDAC_MASK 0x00080000
+#define TOP_TOP_LOCALADDAC_GET(x) (((x) & TOP_TOP_LOCALADDAC_MASK) >> TOP_TOP_LOCALADDAC_LSB)
+#define TOP_TOP_LOCALADDAC_SET(x) (((x) << TOP_TOP_LOCALADDAC_LSB) & TOP_TOP_LOCALADDAC_MASK)
+#define TOP_TOP_PWDPLL_MSB 18
+#define TOP_TOP_PWDPLL_LSB 18
+#define TOP_TOP_PWDPLL_MASK 0x00040000
+#define TOP_TOP_PWDPLL_GET(x) (((x) & TOP_TOP_PWDPLL_MASK) >> TOP_TOP_PWDPLL_LSB)
+#define TOP_TOP_PWDPLL_SET(x) (((x) << TOP_TOP_PWDPLL_LSB) & TOP_TOP_PWDPLL_MASK)
+#define TOP_TOP_PWDADC_MSB 17
+#define TOP_TOP_PWDADC_LSB 17
+#define TOP_TOP_PWDADC_MASK 0x00020000
+#define TOP_TOP_PWDADC_GET(x) (((x) & TOP_TOP_PWDADC_MASK) >> TOP_TOP_PWDADC_LSB)
+#define TOP_TOP_PWDADC_SET(x) (((x) << TOP_TOP_PWDADC_LSB) & TOP_TOP_PWDADC_MASK)
+#define TOP_TOP_PWDDAC_MSB 16
+#define TOP_TOP_PWDDAC_LSB 16
+#define TOP_TOP_PWDDAC_MASK 0x00010000
+#define TOP_TOP_PWDDAC_GET(x) (((x) & TOP_TOP_PWDDAC_MASK) >> TOP_TOP_PWDDAC_LSB)
+#define TOP_TOP_PWDDAC_SET(x) (((x) << TOP_TOP_PWDDAC_LSB) & TOP_TOP_PWDDAC_MASK)
+#define TOP_TOP_LOCALXTAL_MSB 15
+#define TOP_TOP_LOCALXTAL_LSB 15
+#define TOP_TOP_LOCALXTAL_MASK 0x00008000
+#define TOP_TOP_LOCALXTAL_GET(x) (((x) & TOP_TOP_LOCALXTAL_MASK) >> TOP_TOP_LOCALXTAL_LSB)
+#define TOP_TOP_LOCALXTAL_SET(x) (((x) << TOP_TOP_LOCALXTAL_LSB) & TOP_TOP_LOCALXTAL_MASK)
+#define TOP_TOP_PWDCLKIN_MSB 14
+#define TOP_TOP_PWDCLKIN_LSB 14
+#define TOP_TOP_PWDCLKIN_MASK 0x00004000
+#define TOP_TOP_PWDCLKIN_GET(x) (((x) & TOP_TOP_PWDCLKIN_MASK) >> TOP_TOP_PWDCLKIN_LSB)
+#define TOP_TOP_PWDCLKIN_SET(x) (((x) << TOP_TOP_PWDCLKIN_LSB) & TOP_TOP_PWDCLKIN_MASK)
+#define TOP_TOP_OSCON_MSB 13
+#define TOP_TOP_OSCON_LSB 13
+#define TOP_TOP_OSCON_MASK 0x00002000
+#define TOP_TOP_OSCON_GET(x) (((x) & TOP_TOP_OSCON_MASK) >> TOP_TOP_OSCON_LSB)
+#define TOP_TOP_OSCON_SET(x) (((x) << TOP_TOP_OSCON_LSB) & TOP_TOP_OSCON_MASK)
+#define TOP_TOP_SCLKEN_FORCE_MSB 12
+#define TOP_TOP_SCLKEN_FORCE_LSB 12
+#define TOP_TOP_SCLKEN_FORCE_MASK 0x00001000
+#define TOP_TOP_SCLKEN_FORCE_GET(x) (((x) & TOP_TOP_SCLKEN_FORCE_MASK) >> TOP_TOP_SCLKEN_FORCE_LSB)
+#define TOP_TOP_SCLKEN_FORCE_SET(x) (((x) << TOP_TOP_SCLKEN_FORCE_LSB) & TOP_TOP_SCLKEN_FORCE_MASK)
+#define TOP_TOP_SYNTHON_FORCE_MSB 11
+#define TOP_TOP_SYNTHON_FORCE_LSB 11
+#define TOP_TOP_SYNTHON_FORCE_MASK 0x00000800
+#define TOP_TOP_SYNTHON_FORCE_GET(x) (((x) & TOP_TOP_SYNTHON_FORCE_MASK) >> TOP_TOP_SYNTHON_FORCE_LSB)
+#define TOP_TOP_SYNTHON_FORCE_SET(x) (((x) << TOP_TOP_SYNTHON_FORCE_LSB) & TOP_TOP_SYNTHON_FORCE_MASK)
+#define TOP_TOP_PDBIAS_MSB 10
+#define TOP_TOP_PDBIAS_LSB 10
+#define TOP_TOP_PDBIAS_MASK 0x00000400
+#define TOP_TOP_PDBIAS_GET(x) (((x) & TOP_TOP_PDBIAS_MASK) >> TOP_TOP_PDBIAS_LSB)
+#define TOP_TOP_PDBIAS_SET(x) (((x) << TOP_TOP_PDBIAS_LSB) & TOP_TOP_PDBIAS_MASK)
+#define TOP_TOP_DATAOUTSEL_MSB 9
+#define TOP_TOP_DATAOUTSEL_LSB 8
+#define TOP_TOP_DATAOUTSEL_MASK 0x00000300
+#define TOP_TOP_DATAOUTSEL_GET(x) (((x) & TOP_TOP_DATAOUTSEL_MASK) >> TOP_TOP_DATAOUTSEL_LSB)
+#define TOP_TOP_DATAOUTSEL_SET(x) (((x) << TOP_TOP_DATAOUTSEL_LSB) & TOP_TOP_DATAOUTSEL_MASK)
+#define TOP_TOP_REVID_MSB 7
+#define TOP_TOP_REVID_LSB 5
+#define TOP_TOP_REVID_MASK 0x000000e0
+#define TOP_TOP_REVID_GET(x) (((x) & TOP_TOP_REVID_MASK) >> TOP_TOP_REVID_LSB)
+#define TOP_TOP_REVID_SET(x) (((x) << TOP_TOP_REVID_LSB) & TOP_TOP_REVID_MASK)
+#define TOP_TOP_INT2PAD_MSB 4
+#define TOP_TOP_INT2PAD_LSB 4
+#define TOP_TOP_INT2PAD_MASK 0x00000010
+#define TOP_TOP_INT2PAD_GET(x) (((x) & TOP_TOP_INT2PAD_MASK) >> TOP_TOP_INT2PAD_LSB)
+#define TOP_TOP_INT2PAD_SET(x) (((x) << TOP_TOP_INT2PAD_LSB) & TOP_TOP_INT2PAD_MASK)
+#define TOP_TOP_INTH2PAD_MSB 3
+#define TOP_TOP_INTH2PAD_LSB 3
+#define TOP_TOP_INTH2PAD_MASK 0x00000008
+#define TOP_TOP_INTH2PAD_GET(x) (((x) & TOP_TOP_INTH2PAD_MASK) >> TOP_TOP_INTH2PAD_LSB)
+#define TOP_TOP_INTH2PAD_SET(x) (((x) << TOP_TOP_INTH2PAD_LSB) & TOP_TOP_INTH2PAD_MASK)
+#define TOP_TOP_PAD2GND_MSB 2
+#define TOP_TOP_PAD2GND_LSB 2
+#define TOP_TOP_PAD2GND_MASK 0x00000004
+#define TOP_TOP_PAD2GND_GET(x) (((x) & TOP_TOP_PAD2GND_MASK) >> TOP_TOP_PAD2GND_LSB)
+#define TOP_TOP_PAD2GND_SET(x) (((x) << TOP_TOP_PAD2GND_LSB) & TOP_TOP_PAD2GND_MASK)
+#define TOP_TOP_INT2GND_MSB 1
+#define TOP_TOP_INT2GND_LSB 1
+#define TOP_TOP_INT2GND_MASK 0x00000002
+#define TOP_TOP_INT2GND_GET(x) (((x) & TOP_TOP_INT2GND_MASK) >> TOP_TOP_INT2GND_LSB)
+#define TOP_TOP_INT2GND_SET(x) (((x) << TOP_TOP_INT2GND_LSB) & TOP_TOP_INT2GND_MASK)
+#define TOP_TOP_FORCE_XPAON_MSB 0
+#define TOP_TOP_FORCE_XPAON_LSB 0
+#define TOP_TOP_FORCE_XPAON_MASK 0x00000001
+#define TOP_TOP_FORCE_XPAON_GET(x) (((x) & TOP_TOP_FORCE_XPAON_MASK) >> TOP_TOP_FORCE_XPAON_LSB)
+#define TOP_TOP_FORCE_XPAON_SET(x) (((x) << TOP_TOP_FORCE_XPAON_LSB) & TOP_TOP_FORCE_XPAON_MASK)
+
+#define BIAS_BIAS_SEL_ADDRESS 0x00000038
+#define BIAS_BIAS_SEL_OFFSET 0x00000038
+#define BIAS_BIAS_SEL_PADON_MSB 31
+#define BIAS_BIAS_SEL_PADON_LSB 31
+#define BIAS_BIAS_SEL_PADON_MASK 0x80000000
+#define BIAS_BIAS_SEL_PADON_GET(x) (((x) & BIAS_BIAS_SEL_PADON_MASK) >> BIAS_BIAS_SEL_PADON_LSB)
+#define BIAS_BIAS_SEL_PADON_SET(x) (((x) << BIAS_BIAS_SEL_PADON_LSB) & BIAS_BIAS_SEL_PADON_MASK)
+#define BIAS_BIAS_SEL_SEL_BIAS_MSB 30
+#define BIAS_BIAS_SEL_SEL_BIAS_LSB 25
+#define BIAS_BIAS_SEL_SEL_BIAS_MASK 0x7e000000
+#define BIAS_BIAS_SEL_SEL_BIAS_GET(x) (((x) & BIAS_BIAS_SEL_SEL_BIAS_MASK) >> BIAS_BIAS_SEL_SEL_BIAS_LSB)
+#define BIAS_BIAS_SEL_SEL_BIAS_SET(x) (((x) << BIAS_BIAS_SEL_SEL_BIAS_LSB) & BIAS_BIAS_SEL_SEL_BIAS_MASK)
+#define BIAS_BIAS_SEL_SEL_SPARE_MSB 24
+#define BIAS_BIAS_SEL_SEL_SPARE_LSB 21
+#define BIAS_BIAS_SEL_SEL_SPARE_MASK 0x01e00000
+#define BIAS_BIAS_SEL_SEL_SPARE_GET(x) (((x) & BIAS_BIAS_SEL_SEL_SPARE_MASK) >> BIAS_BIAS_SEL_SEL_SPARE_LSB)
+#define BIAS_BIAS_SEL_SEL_SPARE_SET(x) (((x) << BIAS_BIAS_SEL_SEL_SPARE_LSB) & BIAS_BIAS_SEL_SEL_SPARE_MASK)
+#define BIAS_BIAS_SEL_SPARE_MSB 20
+#define BIAS_BIAS_SEL_SPARE_LSB 20
+#define BIAS_BIAS_SEL_SPARE_MASK 0x00100000
+#define BIAS_BIAS_SEL_SPARE_GET(x) (((x) & BIAS_BIAS_SEL_SPARE_MASK) >> BIAS_BIAS_SEL_SPARE_LSB)
+#define BIAS_BIAS_SEL_SPARE_SET(x) (((x) << BIAS_BIAS_SEL_SPARE_LSB) & BIAS_BIAS_SEL_SPARE_MASK)
+#define BIAS_BIAS_SEL_PWD_ICREFBUFBIAS12P5_MSB 19
+#define BIAS_BIAS_SEL_PWD_ICREFBUFBIAS12P5_LSB 17
+#define BIAS_BIAS_SEL_PWD_ICREFBUFBIAS12P5_MASK 0x000e0000
+#define BIAS_BIAS_SEL_PWD_ICREFBUFBIAS12P5_GET(x) (((x) & BIAS_BIAS_SEL_PWD_ICREFBUFBIAS12P5_MASK) >> BIAS_BIAS_SEL_PWD_ICREFBUFBIAS12P5_LSB)
+#define BIAS_BIAS_SEL_PWD_ICREFBUFBIAS12P5_SET(x) (((x) << BIAS_BIAS_SEL_PWD_ICREFBUFBIAS12P5_LSB) & BIAS_BIAS_SEL_PWD_ICREFBUFBIAS12P5_MASK)
+#define BIAS_BIAS_SEL_PWD_IRDACREGREF12P5_MSB 16
+#define BIAS_BIAS_SEL_PWD_IRDACREGREF12P5_LSB 16
+#define BIAS_BIAS_SEL_PWD_IRDACREGREF12P5_MASK 0x00010000
+#define BIAS_BIAS_SEL_PWD_IRDACREGREF12P5_GET(x) (((x) & BIAS_BIAS_SEL_PWD_IRDACREGREF12P5_MASK) >> BIAS_BIAS_SEL_PWD_IRDACREGREF12P5_LSB)
+#define BIAS_BIAS_SEL_PWD_IRDACREGREF12P5_SET(x) (((x) << BIAS_BIAS_SEL_PWD_IRDACREGREF12P5_LSB) & BIAS_BIAS_SEL_PWD_IRDACREGREF12P5_MASK)
+#define BIAS_BIAS_SEL_PWD_IRREFMASTERBIAS12P5_MSB 15
+#define BIAS_BIAS_SEL_PWD_IRREFMASTERBIAS12P5_LSB 15
+#define BIAS_BIAS_SEL_PWD_IRREFMASTERBIAS12P5_MASK 0x00008000
+#define BIAS_BIAS_SEL_PWD_IRREFMASTERBIAS12P5_GET(x) (((x) & BIAS_BIAS_SEL_PWD_IRREFMASTERBIAS12P5_MASK) >> BIAS_BIAS_SEL_PWD_IRREFMASTERBIAS12P5_LSB)
+#define BIAS_BIAS_SEL_PWD_IRREFMASTERBIAS12P5_SET(x) (((x) << BIAS_BIAS_SEL_PWD_IRREFMASTERBIAS12P5_LSB) & BIAS_BIAS_SEL_PWD_IRREFMASTERBIAS12P5_MASK)
+#define BIAS_BIAS_SEL_PWD_ICREFOPAMPBIAS25_MSB 14
+#define BIAS_BIAS_SEL_PWD_ICREFOPAMPBIAS25_LSB 14
+#define BIAS_BIAS_SEL_PWD_ICREFOPAMPBIAS25_MASK 0x00004000
+#define BIAS_BIAS_SEL_PWD_ICREFOPAMPBIAS25_GET(x) (((x) & BIAS_BIAS_SEL_PWD_ICREFOPAMPBIAS25_MASK) >> BIAS_BIAS_SEL_PWD_ICREFOPAMPBIAS25_LSB)
+#define BIAS_BIAS_SEL_PWD_ICREFOPAMPBIAS25_SET(x) (((x) << BIAS_BIAS_SEL_PWD_ICREFOPAMPBIAS25_LSB) & BIAS_BIAS_SEL_PWD_ICREFOPAMPBIAS25_MASK)
+#define BIAS_BIAS_SEL_PWD_ICCPLL25_MSB 13
+#define BIAS_BIAS_SEL_PWD_ICCPLL25_LSB 13
+#define BIAS_BIAS_SEL_PWD_ICCPLL25_MASK 0x00002000
+#define BIAS_BIAS_SEL_PWD_ICCPLL25_GET(x) (((x) & BIAS_BIAS_SEL_PWD_ICCPLL25_MASK) >> BIAS_BIAS_SEL_PWD_ICCPLL25_LSB)
+#define BIAS_BIAS_SEL_PWD_ICCPLL25_SET(x) (((x) << BIAS_BIAS_SEL_PWD_ICCPLL25_LSB) & BIAS_BIAS_SEL_PWD_ICCPLL25_MASK)
+#define BIAS_BIAS_SEL_PWD_ICCOMPBIAS25_MSB 12
+#define BIAS_BIAS_SEL_PWD_ICCOMPBIAS25_LSB 10
+#define BIAS_BIAS_SEL_PWD_ICCOMPBIAS25_MASK 0x00001c00
+#define BIAS_BIAS_SEL_PWD_ICCOMPBIAS25_GET(x) (((x) & BIAS_BIAS_SEL_PWD_ICCOMPBIAS25_MASK) >> BIAS_BIAS_SEL_PWD_ICCOMPBIAS25_LSB)
+#define BIAS_BIAS_SEL_PWD_ICCOMPBIAS25_SET(x) (((x) << BIAS_BIAS_SEL_PWD_ICCOMPBIAS25_LSB) & BIAS_BIAS_SEL_PWD_ICCOMPBIAS25_MASK)
+#define BIAS_BIAS_SEL_PWD_ICXTAL25_MSB 9
+#define BIAS_BIAS_SEL_PWD_ICXTAL25_LSB 7
+#define BIAS_BIAS_SEL_PWD_ICXTAL25_MASK 0x00000380
+#define BIAS_BIAS_SEL_PWD_ICXTAL25_GET(x) (((x) & BIAS_BIAS_SEL_PWD_ICXTAL25_MASK) >> BIAS_BIAS_SEL_PWD_ICXTAL25_LSB)
+#define BIAS_BIAS_SEL_PWD_ICXTAL25_SET(x) (((x) << BIAS_BIAS_SEL_PWD_ICXTAL25_LSB) & BIAS_BIAS_SEL_PWD_ICXTAL25_MASK)
+#define BIAS_BIAS_SEL_PWD_ICTSENS25_MSB 6
+#define BIAS_BIAS_SEL_PWD_ICTSENS25_LSB 4
+#define BIAS_BIAS_SEL_PWD_ICTSENS25_MASK 0x00000070
+#define BIAS_BIAS_SEL_PWD_ICTSENS25_GET(x) (((x) & BIAS_BIAS_SEL_PWD_ICTSENS25_MASK) >> BIAS_BIAS_SEL_PWD_ICTSENS25_LSB)
+#define BIAS_BIAS_SEL_PWD_ICTSENS25_SET(x) (((x) << BIAS_BIAS_SEL_PWD_ICTSENS25_LSB) & BIAS_BIAS_SEL_PWD_ICTSENS25_MASK)
+#define BIAS_BIAS_SEL_PWD_ICTXPC25_MSB 3
+#define BIAS_BIAS_SEL_PWD_ICTXPC25_LSB 1
+#define BIAS_BIAS_SEL_PWD_ICTXPC25_MASK 0x0000000e
+#define BIAS_BIAS_SEL_PWD_ICTXPC25_GET(x) (((x) & BIAS_BIAS_SEL_PWD_ICTXPC25_MASK) >> BIAS_BIAS_SEL_PWD_ICTXPC25_LSB)
+#define BIAS_BIAS_SEL_PWD_ICTXPC25_SET(x) (((x) << BIAS_BIAS_SEL_PWD_ICTXPC25_LSB) & BIAS_BIAS_SEL_PWD_ICTXPC25_MASK)
+#define BIAS_BIAS_SEL_PWD_ICLDO25_MSB 0
+#define BIAS_BIAS_SEL_PWD_ICLDO25_LSB 0
+#define BIAS_BIAS_SEL_PWD_ICLDO25_MASK 0x00000001
+#define BIAS_BIAS_SEL_PWD_ICLDO25_GET(x) (((x) & BIAS_BIAS_SEL_PWD_ICLDO25_MASK) >> BIAS_BIAS_SEL_PWD_ICLDO25_LSB)
+#define BIAS_BIAS_SEL_PWD_ICLDO25_SET(x) (((x) << BIAS_BIAS_SEL_PWD_ICLDO25_LSB) & BIAS_BIAS_SEL_PWD_ICLDO25_MASK)
+
+#define BIAS_BIAS1_ADDRESS 0x0000003c
+#define BIAS_BIAS1_OFFSET 0x0000003c
+#define BIAS_BIAS1_PWD_ICDAC2BB25_MSB 31
+#define BIAS_BIAS1_PWD_ICDAC2BB25_LSB 29
+#define BIAS_BIAS1_PWD_ICDAC2BB25_MASK 0xe0000000
+#define BIAS_BIAS1_PWD_ICDAC2BB25_GET(x) (((x) & BIAS_BIAS1_PWD_ICDAC2BB25_MASK) >> BIAS_BIAS1_PWD_ICDAC2BB25_LSB)
+#define BIAS_BIAS1_PWD_ICDAC2BB25_SET(x) (((x) << BIAS_BIAS1_PWD_ICDAC2BB25_LSB) & BIAS_BIAS1_PWD_ICDAC2BB25_MASK)
+#define BIAS_BIAS1_PWD_IC2GVGM25_MSB 28
+#define BIAS_BIAS1_PWD_IC2GVGM25_LSB 26
+#define BIAS_BIAS1_PWD_IC2GVGM25_MASK 0x1c000000
+#define BIAS_BIAS1_PWD_IC2GVGM25_GET(x) (((x) & BIAS_BIAS1_PWD_IC2GVGM25_MASK) >> BIAS_BIAS1_PWD_IC2GVGM25_LSB)
+#define BIAS_BIAS1_PWD_IC2GVGM25_SET(x) (((x) << BIAS_BIAS1_PWD_IC2GVGM25_LSB) & BIAS_BIAS1_PWD_IC2GVGM25_MASK)
+#define BIAS_BIAS1_PWD_IC2GRFFE25_MSB 25
+#define BIAS_BIAS1_PWD_IC2GRFFE25_LSB 23
+#define BIAS_BIAS1_PWD_IC2GRFFE25_MASK 0x03800000
+#define BIAS_BIAS1_PWD_IC2GRFFE25_GET(x) (((x) & BIAS_BIAS1_PWD_IC2GRFFE25_MASK) >> BIAS_BIAS1_PWD_IC2GRFFE25_LSB)
+#define BIAS_BIAS1_PWD_IC2GRFFE25_SET(x) (((x) << BIAS_BIAS1_PWD_IC2GRFFE25_LSB) & BIAS_BIAS1_PWD_IC2GRFFE25_MASK)
+#define BIAS_BIAS1_PWD_IC2GLOREG25_MSB 22
+#define BIAS_BIAS1_PWD_IC2GLOREG25_LSB 20
+#define BIAS_BIAS1_PWD_IC2GLOREG25_MASK 0x00700000
+#define BIAS_BIAS1_PWD_IC2GLOREG25_GET(x) (((x) & BIAS_BIAS1_PWD_IC2GLOREG25_MASK) >> BIAS_BIAS1_PWD_IC2GLOREG25_LSB)
+#define BIAS_BIAS1_PWD_IC2GLOREG25_SET(x) (((x) << BIAS_BIAS1_PWD_IC2GLOREG25_LSB) & BIAS_BIAS1_PWD_IC2GLOREG25_MASK)
+#define BIAS_BIAS1_PWD_IC2GLNAREG25_MSB 19
+#define BIAS_BIAS1_PWD_IC2GLNAREG25_LSB 17
+#define BIAS_BIAS1_PWD_IC2GLNAREG25_MASK 0x000e0000
+#define BIAS_BIAS1_PWD_IC2GLNAREG25_GET(x) (((x) & BIAS_BIAS1_PWD_IC2GLNAREG25_MASK) >> BIAS_BIAS1_PWD_IC2GLNAREG25_LSB)
+#define BIAS_BIAS1_PWD_IC2GLNAREG25_SET(x) (((x) << BIAS_BIAS1_PWD_IC2GLNAREG25_LSB) & BIAS_BIAS1_PWD_IC2GLNAREG25_MASK)
+#define BIAS_BIAS1_PWD_ICDETECTORB25_MSB 16
+#define BIAS_BIAS1_PWD_ICDETECTORB25_LSB 16
+#define BIAS_BIAS1_PWD_ICDETECTORB25_MASK 0x00010000
+#define BIAS_BIAS1_PWD_ICDETECTORB25_GET(x) (((x) & BIAS_BIAS1_PWD_ICDETECTORB25_MASK) >> BIAS_BIAS1_PWD_ICDETECTORB25_LSB)
+#define BIAS_BIAS1_PWD_ICDETECTORB25_SET(x) (((x) << BIAS_BIAS1_PWD_ICDETECTORB25_LSB) & BIAS_BIAS1_PWD_ICDETECTORB25_MASK)
+#define BIAS_BIAS1_PWD_ICDETECTORA25_MSB 15
+#define BIAS_BIAS1_PWD_ICDETECTORA25_LSB 15
+#define BIAS_BIAS1_PWD_ICDETECTORA25_MASK 0x00008000
+#define BIAS_BIAS1_PWD_ICDETECTORA25_GET(x) (((x) & BIAS_BIAS1_PWD_ICDETECTORA25_MASK) >> BIAS_BIAS1_PWD_ICDETECTORA25_LSB)
+#define BIAS_BIAS1_PWD_ICDETECTORA25_SET(x) (((x) << BIAS_BIAS1_PWD_ICDETECTORA25_LSB) & BIAS_BIAS1_PWD_ICDETECTORA25_MASK)
+#define BIAS_BIAS1_PWD_IC5GRXRF25_MSB 14
+#define BIAS_BIAS1_PWD_IC5GRXRF25_LSB 14
+#define BIAS_BIAS1_PWD_IC5GRXRF25_MASK 0x00004000
+#define BIAS_BIAS1_PWD_IC5GRXRF25_GET(x) (((x) & BIAS_BIAS1_PWD_IC5GRXRF25_MASK) >> BIAS_BIAS1_PWD_IC5GRXRF25_LSB)
+#define BIAS_BIAS1_PWD_IC5GRXRF25_SET(x) (((x) << BIAS_BIAS1_PWD_IC5GRXRF25_LSB) & BIAS_BIAS1_PWD_IC5GRXRF25_MASK)
+#define BIAS_BIAS1_PWD_IC5GTXPA25_MSB 13
+#define BIAS_BIAS1_PWD_IC5GTXPA25_LSB 11
+#define BIAS_BIAS1_PWD_IC5GTXPA25_MASK 0x00003800
+#define BIAS_BIAS1_PWD_IC5GTXPA25_GET(x) (((x) & BIAS_BIAS1_PWD_IC5GTXPA25_MASK) >> BIAS_BIAS1_PWD_IC5GTXPA25_LSB)
+#define BIAS_BIAS1_PWD_IC5GTXPA25_SET(x) (((x) << BIAS_BIAS1_PWD_IC5GTXPA25_LSB) & BIAS_BIAS1_PWD_IC5GTXPA25_MASK)
+#define BIAS_BIAS1_PWD_IC5GTXBUF25_MSB 10
+#define BIAS_BIAS1_PWD_IC5GTXBUF25_LSB 8
+#define BIAS_BIAS1_PWD_IC5GTXBUF25_MASK 0x00000700
+#define BIAS_BIAS1_PWD_IC5GTXBUF25_GET(x) (((x) & BIAS_BIAS1_PWD_IC5GTXBUF25_MASK) >> BIAS_BIAS1_PWD_IC5GTXBUF25_LSB)
+#define BIAS_BIAS1_PWD_IC5GTXBUF25_SET(x) (((x) << BIAS_BIAS1_PWD_IC5GTXBUF25_LSB) & BIAS_BIAS1_PWD_IC5GTXBUF25_MASK)
+#define BIAS_BIAS1_PWD_IC5GQB25_MSB 7
+#define BIAS_BIAS1_PWD_IC5GQB25_LSB 5
+#define BIAS_BIAS1_PWD_IC5GQB25_MASK 0x000000e0
+#define BIAS_BIAS1_PWD_IC5GQB25_GET(x) (((x) & BIAS_BIAS1_PWD_IC5GQB25_MASK) >> BIAS_BIAS1_PWD_IC5GQB25_LSB)
+#define BIAS_BIAS1_PWD_IC5GQB25_SET(x) (((x) << BIAS_BIAS1_PWD_IC5GQB25_LSB) & BIAS_BIAS1_PWD_IC5GQB25_MASK)
+#define BIAS_BIAS1_PWD_IC5GMIXQ25_MSB 4
+#define BIAS_BIAS1_PWD_IC5GMIXQ25_LSB 2
+#define BIAS_BIAS1_PWD_IC5GMIXQ25_MASK 0x0000001c
+#define BIAS_BIAS1_PWD_IC5GMIXQ25_GET(x) (((x) & BIAS_BIAS1_PWD_IC5GMIXQ25_MASK) >> BIAS_BIAS1_PWD_IC5GMIXQ25_LSB)
+#define BIAS_BIAS1_PWD_IC5GMIXQ25_SET(x) (((x) << BIAS_BIAS1_PWD_IC5GMIXQ25_LSB) & BIAS_BIAS1_PWD_IC5GMIXQ25_MASK)
+#define BIAS_BIAS1_SPARE_MSB 1
+#define BIAS_BIAS1_SPARE_LSB 0
+#define BIAS_BIAS1_SPARE_MASK 0x00000003
+#define BIAS_BIAS1_SPARE_GET(x) (((x) & BIAS_BIAS1_SPARE_MASK) >> BIAS_BIAS1_SPARE_LSB)
+#define BIAS_BIAS1_SPARE_SET(x) (((x) << BIAS_BIAS1_SPARE_LSB) & BIAS_BIAS1_SPARE_MASK)
+
+#define BIAS_BIAS2_ADDRESS 0x00000040
+#define BIAS_BIAS2_OFFSET 0x00000040
+#define BIAS_BIAS2_PWD_IC5GMIXI25_MSB 31
+#define BIAS_BIAS2_PWD_IC5GMIXI25_LSB 29
+#define BIAS_BIAS2_PWD_IC5GMIXI25_MASK 0xe0000000
+#define BIAS_BIAS2_PWD_IC5GMIXI25_GET(x) (((x) & BIAS_BIAS2_PWD_IC5GMIXI25_MASK) >> BIAS_BIAS2_PWD_IC5GMIXI25_LSB)
+#define BIAS_BIAS2_PWD_IC5GMIXI25_SET(x) (((x) << BIAS_BIAS2_PWD_IC5GMIXI25_LSB) & BIAS_BIAS2_PWD_IC5GMIXI25_MASK)
+#define BIAS_BIAS2_PWD_IC5GDIV25_MSB 28
+#define BIAS_BIAS2_PWD_IC5GDIV25_LSB 26
+#define BIAS_BIAS2_PWD_IC5GDIV25_MASK 0x1c000000
+#define BIAS_BIAS2_PWD_IC5GDIV25_GET(x) (((x) & BIAS_BIAS2_PWD_IC5GDIV25_MASK) >> BIAS_BIAS2_PWD_IC5GDIV25_LSB)
+#define BIAS_BIAS2_PWD_IC5GDIV25_SET(x) (((x) << BIAS_BIAS2_PWD_IC5GDIV25_LSB) & BIAS_BIAS2_PWD_IC5GDIV25_MASK)
+#define BIAS_BIAS2_PWD_IC5GLOREG25_MSB 25
+#define BIAS_BIAS2_PWD_IC5GLOREG25_LSB 23
+#define BIAS_BIAS2_PWD_IC5GLOREG25_MASK 0x03800000
+#define BIAS_BIAS2_PWD_IC5GLOREG25_GET(x) (((x) & BIAS_BIAS2_PWD_IC5GLOREG25_MASK) >> BIAS_BIAS2_PWD_IC5GLOREG25_LSB)
+#define BIAS_BIAS2_PWD_IC5GLOREG25_SET(x) (((x) << BIAS_BIAS2_PWD_IC5GLOREG25_LSB) & BIAS_BIAS2_PWD_IC5GLOREG25_MASK)
+#define BIAS_BIAS2_PWD_IRPLL25_MSB 22
+#define BIAS_BIAS2_PWD_IRPLL25_LSB 22
+#define BIAS_BIAS2_PWD_IRPLL25_MASK 0x00400000
+#define BIAS_BIAS2_PWD_IRPLL25_GET(x) (((x) & BIAS_BIAS2_PWD_IRPLL25_MASK) >> BIAS_BIAS2_PWD_IRPLL25_LSB)
+#define BIAS_BIAS2_PWD_IRPLL25_SET(x) (((x) << BIAS_BIAS2_PWD_IRPLL25_LSB) & BIAS_BIAS2_PWD_IRPLL25_MASK)
+#define BIAS_BIAS2_PWD_IRXTAL25_MSB 21
+#define BIAS_BIAS2_PWD_IRXTAL25_LSB 19
+#define BIAS_BIAS2_PWD_IRXTAL25_MASK 0x00380000
+#define BIAS_BIAS2_PWD_IRXTAL25_GET(x) (((x) & BIAS_BIAS2_PWD_IRXTAL25_MASK) >> BIAS_BIAS2_PWD_IRXTAL25_LSB)
+#define BIAS_BIAS2_PWD_IRXTAL25_SET(x) (((x) << BIAS_BIAS2_PWD_IRXTAL25_LSB) & BIAS_BIAS2_PWD_IRXTAL25_MASK)
+#define BIAS_BIAS2_PWD_IRTSENS25_MSB 18
+#define BIAS_BIAS2_PWD_IRTSENS25_LSB 16
+#define BIAS_BIAS2_PWD_IRTSENS25_MASK 0x00070000
+#define BIAS_BIAS2_PWD_IRTSENS25_GET(x) (((x) & BIAS_BIAS2_PWD_IRTSENS25_MASK) >> BIAS_BIAS2_PWD_IRTSENS25_LSB)
+#define BIAS_BIAS2_PWD_IRTSENS25_SET(x) (((x) << BIAS_BIAS2_PWD_IRTSENS25_LSB) & BIAS_BIAS2_PWD_IRTSENS25_MASK)
+#define BIAS_BIAS2_PWD_IRTXPC25_MSB 15
+#define BIAS_BIAS2_PWD_IRTXPC25_LSB 13
+#define BIAS_BIAS2_PWD_IRTXPC25_MASK 0x0000e000
+#define BIAS_BIAS2_PWD_IRTXPC25_GET(x) (((x) & BIAS_BIAS2_PWD_IRTXPC25_MASK) >> BIAS_BIAS2_PWD_IRTXPC25_LSB)
+#define BIAS_BIAS2_PWD_IRTXPC25_SET(x) (((x) << BIAS_BIAS2_PWD_IRTXPC25_LSB) & BIAS_BIAS2_PWD_IRTXPC25_MASK)
+#define BIAS_BIAS2_PWD_IRLDO25_MSB 12
+#define BIAS_BIAS2_PWD_IRLDO25_LSB 12
+#define BIAS_BIAS2_PWD_IRLDO25_MASK 0x00001000
+#define BIAS_BIAS2_PWD_IRLDO25_GET(x) (((x) & BIAS_BIAS2_PWD_IRLDO25_MASK) >> BIAS_BIAS2_PWD_IRLDO25_LSB)
+#define BIAS_BIAS2_PWD_IRLDO25_SET(x) (((x) << BIAS_BIAS2_PWD_IRLDO25_LSB) & BIAS_BIAS2_PWD_IRLDO25_MASK)
+#define BIAS_BIAS2_PWD_IR2GTXMIX25_MSB 11
+#define BIAS_BIAS2_PWD_IR2GTXMIX25_LSB 9
+#define BIAS_BIAS2_PWD_IR2GTXMIX25_MASK 0x00000e00
+#define BIAS_BIAS2_PWD_IR2GTXMIX25_GET(x) (((x) & BIAS_BIAS2_PWD_IR2GTXMIX25_MASK) >> BIAS_BIAS2_PWD_IR2GTXMIX25_LSB)
+#define BIAS_BIAS2_PWD_IR2GTXMIX25_SET(x) (((x) << BIAS_BIAS2_PWD_IR2GTXMIX25_LSB) & BIAS_BIAS2_PWD_IR2GTXMIX25_MASK)
+#define BIAS_BIAS2_PWD_IR2GLOREG25_MSB 8
+#define BIAS_BIAS2_PWD_IR2GLOREG25_LSB 6
+#define BIAS_BIAS2_PWD_IR2GLOREG25_MASK 0x000001c0
+#define BIAS_BIAS2_PWD_IR2GLOREG25_GET(x) (((x) & BIAS_BIAS2_PWD_IR2GLOREG25_MASK) >> BIAS_BIAS2_PWD_IR2GLOREG25_LSB)
+#define BIAS_BIAS2_PWD_IR2GLOREG25_SET(x) (((x) << BIAS_BIAS2_PWD_IR2GLOREG25_LSB) & BIAS_BIAS2_PWD_IR2GLOREG25_MASK)
+#define BIAS_BIAS2_PWD_IR2GLNAREG25_MSB 5
+#define BIAS_BIAS2_PWD_IR2GLNAREG25_LSB 3
+#define BIAS_BIAS2_PWD_IR2GLNAREG25_MASK 0x00000038
+#define BIAS_BIAS2_PWD_IR2GLNAREG25_GET(x) (((x) & BIAS_BIAS2_PWD_IR2GLNAREG25_MASK) >> BIAS_BIAS2_PWD_IR2GLNAREG25_LSB)
+#define BIAS_BIAS2_PWD_IR2GLNAREG25_SET(x) (((x) << BIAS_BIAS2_PWD_IR2GLNAREG25_LSB) & BIAS_BIAS2_PWD_IR2GLNAREG25_MASK)
+#define BIAS_BIAS2_PWD_IR5GRFVREF2525_MSB 2
+#define BIAS_BIAS2_PWD_IR5GRFVREF2525_LSB 0
+#define BIAS_BIAS2_PWD_IR5GRFVREF2525_MASK 0x00000007
+#define BIAS_BIAS2_PWD_IR5GRFVREF2525_GET(x) (((x) & BIAS_BIAS2_PWD_IR5GRFVREF2525_MASK) >> BIAS_BIAS2_PWD_IR5GRFVREF2525_LSB)
+#define BIAS_BIAS2_PWD_IR5GRFVREF2525_SET(x) (((x) << BIAS_BIAS2_PWD_IR5GRFVREF2525_LSB) & BIAS_BIAS2_PWD_IR5GRFVREF2525_MASK)
+
+#define BIAS_BIAS3_ADDRESS 0x00000044
+#define BIAS_BIAS3_OFFSET 0x00000044
+#define BIAS_BIAS3_PWD_IR5GTXMIX25_MSB 31
+#define BIAS_BIAS3_PWD_IR5GTXMIX25_LSB 29
+#define BIAS_BIAS3_PWD_IR5GTXMIX25_MASK 0xe0000000
+#define BIAS_BIAS3_PWD_IR5GTXMIX25_GET(x) (((x) & BIAS_BIAS3_PWD_IR5GTXMIX25_MASK) >> BIAS_BIAS3_PWD_IR5GTXMIX25_LSB)
+#define BIAS_BIAS3_PWD_IR5GTXMIX25_SET(x) (((x) << BIAS_BIAS3_PWD_IR5GTXMIX25_LSB) & BIAS_BIAS3_PWD_IR5GTXMIX25_MASK)
+#define BIAS_BIAS3_PWD_IR5GAGC25_MSB 28
+#define BIAS_BIAS3_PWD_IR5GAGC25_LSB 26
+#define BIAS_BIAS3_PWD_IR5GAGC25_MASK 0x1c000000
+#define BIAS_BIAS3_PWD_IR5GAGC25_GET(x) (((x) & BIAS_BIAS3_PWD_IR5GAGC25_MASK) >> BIAS_BIAS3_PWD_IR5GAGC25_LSB)
+#define BIAS_BIAS3_PWD_IR5GAGC25_SET(x) (((x) << BIAS_BIAS3_PWD_IR5GAGC25_LSB) & BIAS_BIAS3_PWD_IR5GAGC25_MASK)
+#define BIAS_BIAS3_PWD_ICDAC50_MSB 25
+#define BIAS_BIAS3_PWD_ICDAC50_LSB 23
+#define BIAS_BIAS3_PWD_ICDAC50_MASK 0x03800000
+#define BIAS_BIAS3_PWD_ICDAC50_GET(x) (((x) & BIAS_BIAS3_PWD_ICDAC50_MASK) >> BIAS_BIAS3_PWD_ICDAC50_LSB)
+#define BIAS_BIAS3_PWD_ICDAC50_SET(x) (((x) << BIAS_BIAS3_PWD_ICDAC50_LSB) & BIAS_BIAS3_PWD_ICDAC50_MASK)
+#define BIAS_BIAS3_PWD_ICSYNTH50_MSB 22
+#define BIAS_BIAS3_PWD_ICSYNTH50_LSB 22
+#define BIAS_BIAS3_PWD_ICSYNTH50_MASK 0x00400000
+#define BIAS_BIAS3_PWD_ICSYNTH50_GET(x) (((x) & BIAS_BIAS3_PWD_ICSYNTH50_MASK) >> BIAS_BIAS3_PWD_ICSYNTH50_LSB)
+#define BIAS_BIAS3_PWD_ICSYNTH50_SET(x) (((x) << BIAS_BIAS3_PWD_ICSYNTH50_LSB) & BIAS_BIAS3_PWD_ICSYNTH50_MASK)
+#define BIAS_BIAS3_PWD_ICBB50_MSB 21
+#define BIAS_BIAS3_PWD_ICBB50_LSB 21
+#define BIAS_BIAS3_PWD_ICBB50_MASK 0x00200000
+#define BIAS_BIAS3_PWD_ICBB50_GET(x) (((x) & BIAS_BIAS3_PWD_ICBB50_MASK) >> BIAS_BIAS3_PWD_ICBB50_LSB)
+#define BIAS_BIAS3_PWD_ICBB50_SET(x) (((x) << BIAS_BIAS3_PWD_ICBB50_LSB) & BIAS_BIAS3_PWD_ICBB50_MASK)
+#define BIAS_BIAS3_PWD_IC2GDIV50_MSB 20
+#define BIAS_BIAS3_PWD_IC2GDIV50_LSB 18
+#define BIAS_BIAS3_PWD_IC2GDIV50_MASK 0x001c0000
+#define BIAS_BIAS3_PWD_IC2GDIV50_GET(x) (((x) & BIAS_BIAS3_PWD_IC2GDIV50_MASK) >> BIAS_BIAS3_PWD_IC2GDIV50_LSB)
+#define BIAS_BIAS3_PWD_IC2GDIV50_SET(x) (((x) << BIAS_BIAS3_PWD_IC2GDIV50_LSB) & BIAS_BIAS3_PWD_IC2GDIV50_MASK)
+#define BIAS_BIAS3_PWD_IRSYNTH50_MSB 17
+#define BIAS_BIAS3_PWD_IRSYNTH50_LSB 17
+#define BIAS_BIAS3_PWD_IRSYNTH50_MASK 0x00020000
+#define BIAS_BIAS3_PWD_IRSYNTH50_GET(x) (((x) & BIAS_BIAS3_PWD_IRSYNTH50_MASK) >> BIAS_BIAS3_PWD_IRSYNTH50_LSB)
+#define BIAS_BIAS3_PWD_IRSYNTH50_SET(x) (((x) << BIAS_BIAS3_PWD_IRSYNTH50_LSB) & BIAS_BIAS3_PWD_IRSYNTH50_MASK)
+#define BIAS_BIAS3_PWD_IRBB50_MSB 16
+#define BIAS_BIAS3_PWD_IRBB50_LSB 16
+#define BIAS_BIAS3_PWD_IRBB50_MASK 0x00010000
+#define BIAS_BIAS3_PWD_IRBB50_GET(x) (((x) & BIAS_BIAS3_PWD_IRBB50_MASK) >> BIAS_BIAS3_PWD_IRBB50_LSB)
+#define BIAS_BIAS3_PWD_IRBB50_SET(x) (((x) << BIAS_BIAS3_PWD_IRBB50_LSB) & BIAS_BIAS3_PWD_IRBB50_MASK)
+#define BIAS_BIAS3_PWD_IC25SPARE1_MSB 15
+#define BIAS_BIAS3_PWD_IC25SPARE1_LSB 13
+#define BIAS_BIAS3_PWD_IC25SPARE1_MASK 0x0000e000
+#define BIAS_BIAS3_PWD_IC25SPARE1_GET(x) (((x) & BIAS_BIAS3_PWD_IC25SPARE1_MASK) >> BIAS_BIAS3_PWD_IC25SPARE1_LSB)
+#define BIAS_BIAS3_PWD_IC25SPARE1_SET(x) (((x) << BIAS_BIAS3_PWD_IC25SPARE1_LSB) & BIAS_BIAS3_PWD_IC25SPARE1_MASK)
+#define BIAS_BIAS3_PWD_IC25SPARE2_MSB 12
+#define BIAS_BIAS3_PWD_IC25SPARE2_LSB 10
+#define BIAS_BIAS3_PWD_IC25SPARE2_MASK 0x00001c00
+#define BIAS_BIAS3_PWD_IC25SPARE2_GET(x) (((x) & BIAS_BIAS3_PWD_IC25SPARE2_MASK) >> BIAS_BIAS3_PWD_IC25SPARE2_LSB)
+#define BIAS_BIAS3_PWD_IC25SPARE2_SET(x) (((x) << BIAS_BIAS3_PWD_IC25SPARE2_LSB) & BIAS_BIAS3_PWD_IC25SPARE2_MASK)
+#define BIAS_BIAS3_PWD_IR25SPARE1_MSB 9
+#define BIAS_BIAS3_PWD_IR25SPARE1_LSB 7
+#define BIAS_BIAS3_PWD_IR25SPARE1_MASK 0x00000380
+#define BIAS_BIAS3_PWD_IR25SPARE1_GET(x) (((x) & BIAS_BIAS3_PWD_IR25SPARE1_MASK) >> BIAS_BIAS3_PWD_IR25SPARE1_LSB)
+#define BIAS_BIAS3_PWD_IR25SPARE1_SET(x) (((x) << BIAS_BIAS3_PWD_IR25SPARE1_LSB) & BIAS_BIAS3_PWD_IR25SPARE1_MASK)
+#define BIAS_BIAS3_PWD_IR25SPARE2_MSB 6
+#define BIAS_BIAS3_PWD_IR25SPARE2_LSB 4
+#define BIAS_BIAS3_PWD_IR25SPARE2_MASK 0x00000070
+#define BIAS_BIAS3_PWD_IR25SPARE2_GET(x) (((x) & BIAS_BIAS3_PWD_IR25SPARE2_MASK) >> BIAS_BIAS3_PWD_IR25SPARE2_LSB)
+#define BIAS_BIAS3_PWD_IR25SPARE2_SET(x) (((x) << BIAS_BIAS3_PWD_IR25SPARE2_LSB) & BIAS_BIAS3_PWD_IR25SPARE2_MASK)
+#define BIAS_BIAS3_PWD_ICDACREG12P5_MSB 3
+#define BIAS_BIAS3_PWD_ICDACREG12P5_LSB 1
+#define BIAS_BIAS3_PWD_ICDACREG12P5_MASK 0x0000000e
+#define BIAS_BIAS3_PWD_ICDACREG12P5_GET(x) (((x) & BIAS_BIAS3_PWD_ICDACREG12P5_MASK) >> BIAS_BIAS3_PWD_ICDACREG12P5_LSB)
+#define BIAS_BIAS3_PWD_ICDACREG12P5_SET(x) (((x) << BIAS_BIAS3_PWD_ICDACREG12P5_LSB) & BIAS_BIAS3_PWD_ICDACREG12P5_MASK)
+#define BIAS_BIAS3_SPARE_MSB 0
+#define BIAS_BIAS3_SPARE_LSB 0
+#define BIAS_BIAS3_SPARE_MASK 0x00000001
+#define BIAS_BIAS3_SPARE_GET(x) (((x) & BIAS_BIAS3_SPARE_MASK) >> BIAS_BIAS3_SPARE_LSB)
+#define BIAS_BIAS3_SPARE_SET(x) (((x) << BIAS_BIAS3_SPARE_LSB) & BIAS_BIAS3_SPARE_MASK)
+
+#define TXPC_TXPC_ADDRESS 0x00000048
+#define TXPC_TXPC_OFFSET 0x00000048
+#define TXPC_TXPC_SELINTPD_MSB 31
+#define TXPC_TXPC_SELINTPD_LSB 31
+#define TXPC_TXPC_SELINTPD_MASK 0x80000000
+#define TXPC_TXPC_SELINTPD_GET(x) (((x) & TXPC_TXPC_SELINTPD_MASK) >> TXPC_TXPC_SELINTPD_LSB)
+#define TXPC_TXPC_SELINTPD_SET(x) (((x) << TXPC_TXPC_SELINTPD_LSB) & TXPC_TXPC_SELINTPD_MASK)
+#define TXPC_TXPC_TEST_MSB 30
+#define TXPC_TXPC_TEST_LSB 30
+#define TXPC_TXPC_TEST_MASK 0x40000000
+#define TXPC_TXPC_TEST_GET(x) (((x) & TXPC_TXPC_TEST_MASK) >> TXPC_TXPC_TEST_LSB)
+#define TXPC_TXPC_TEST_SET(x) (((x) << TXPC_TXPC_TEST_LSB) & TXPC_TXPC_TEST_MASK)
+#define TXPC_TXPC_TESTGAIN_MSB 29
+#define TXPC_TXPC_TESTGAIN_LSB 28
+#define TXPC_TXPC_TESTGAIN_MASK 0x30000000
+#define TXPC_TXPC_TESTGAIN_GET(x) (((x) & TXPC_TXPC_TESTGAIN_MASK) >> TXPC_TXPC_TESTGAIN_LSB)
+#define TXPC_TXPC_TESTGAIN_SET(x) (((x) << TXPC_TXPC_TESTGAIN_LSB) & TXPC_TXPC_TESTGAIN_MASK)
+#define TXPC_TXPC_TESTDAC_MSB 27
+#define TXPC_TXPC_TESTDAC_LSB 22
+#define TXPC_TXPC_TESTDAC_MASK 0x0fc00000
+#define TXPC_TXPC_TESTDAC_GET(x) (((x) & TXPC_TXPC_TESTDAC_MASK) >> TXPC_TXPC_TESTDAC_LSB)
+#define TXPC_TXPC_TESTDAC_SET(x) (((x) << TXPC_TXPC_TESTDAC_LSB) & TXPC_TXPC_TESTDAC_MASK)
+#define TXPC_TXPC_TESTPWDPC_MSB 21
+#define TXPC_TXPC_TESTPWDPC_LSB 21
+#define TXPC_TXPC_TESTPWDPC_MASK 0x00200000
+#define TXPC_TXPC_TESTPWDPC_GET(x) (((x) & TXPC_TXPC_TESTPWDPC_MASK) >> TXPC_TXPC_TESTPWDPC_LSB)
+#define TXPC_TXPC_TESTPWDPC_SET(x) (((x) << TXPC_TXPC_TESTPWDPC_LSB) & TXPC_TXPC_TESTPWDPC_MASK)
+#define TXPC_TXPC_CURHALF_MSB 20
+#define TXPC_TXPC_CURHALF_LSB 20
+#define TXPC_TXPC_CURHALF_MASK 0x00100000
+#define TXPC_TXPC_CURHALF_GET(x) (((x) & TXPC_TXPC_CURHALF_MASK) >> TXPC_TXPC_CURHALF_LSB)
+#define TXPC_TXPC_CURHALF_SET(x) (((x) << TXPC_TXPC_CURHALF_LSB) & TXPC_TXPC_CURHALF_MASK)
+#define TXPC_TXPC_NEGOUT_MSB 19
+#define TXPC_TXPC_NEGOUT_LSB 19
+#define TXPC_TXPC_NEGOUT_MASK 0x00080000
+#define TXPC_TXPC_NEGOUT_GET(x) (((x) & TXPC_TXPC_NEGOUT_MASK) >> TXPC_TXPC_NEGOUT_LSB)
+#define TXPC_TXPC_NEGOUT_SET(x) (((x) << TXPC_TXPC_NEGOUT_LSB) & TXPC_TXPC_NEGOUT_MASK)
+#define TXPC_TXPC_CLKDELAY_MSB 18
+#define TXPC_TXPC_CLKDELAY_LSB 18
+#define TXPC_TXPC_CLKDELAY_MASK 0x00040000
+#define TXPC_TXPC_CLKDELAY_GET(x) (((x) & TXPC_TXPC_CLKDELAY_MASK) >> TXPC_TXPC_CLKDELAY_LSB)
+#define TXPC_TXPC_CLKDELAY_SET(x) (((x) << TXPC_TXPC_CLKDELAY_LSB) & TXPC_TXPC_CLKDELAY_MASK)
+#define TXPC_TXPC_SELMODREF_MSB 17
+#define TXPC_TXPC_SELMODREF_LSB 17
+#define TXPC_TXPC_SELMODREF_MASK 0x00020000
+#define TXPC_TXPC_SELMODREF_GET(x) (((x) & TXPC_TXPC_SELMODREF_MASK) >> TXPC_TXPC_SELMODREF_LSB)
+#define TXPC_TXPC_SELMODREF_SET(x) (((x) << TXPC_TXPC_SELMODREF_LSB) & TXPC_TXPC_SELMODREF_MASK)
+#define TXPC_TXPC_SELCMOUT_MSB 16
+#define TXPC_TXPC_SELCMOUT_LSB 16
+#define TXPC_TXPC_SELCMOUT_MASK 0x00010000
+#define TXPC_TXPC_SELCMOUT_GET(x) (((x) & TXPC_TXPC_SELCMOUT_MASK) >> TXPC_TXPC_SELCMOUT_LSB)
+#define TXPC_TXPC_SELCMOUT_SET(x) (((x) << TXPC_TXPC_SELCMOUT_LSB) & TXPC_TXPC_SELCMOUT_MASK)
+#define TXPC_TXPC_TSMODE_MSB 15
+#define TXPC_TXPC_TSMODE_LSB 14
+#define TXPC_TXPC_TSMODE_MASK 0x0000c000
+#define TXPC_TXPC_TSMODE_GET(x) (((x) & TXPC_TXPC_TSMODE_MASK) >> TXPC_TXPC_TSMODE_LSB)
+#define TXPC_TXPC_TSMODE_SET(x) (((x) << TXPC_TXPC_TSMODE_LSB) & TXPC_TXPC_TSMODE_MASK)
+#define TXPC_TXPC_N_MSB 13
+#define TXPC_TXPC_N_LSB 6
+#define TXPC_TXPC_N_MASK 0x00003fc0
+#define TXPC_TXPC_N_GET(x) (((x) & TXPC_TXPC_N_MASK) >> TXPC_TXPC_N_LSB)
+#define TXPC_TXPC_N_SET(x) (((x) << TXPC_TXPC_N_LSB) & TXPC_TXPC_N_MASK)
+#define TXPC_TXPC_ON1STSYNTHON_MSB 5
+#define TXPC_TXPC_ON1STSYNTHON_LSB 5
+#define TXPC_TXPC_ON1STSYNTHON_MASK 0x00000020
+#define TXPC_TXPC_ON1STSYNTHON_GET(x) (((x) & TXPC_TXPC_ON1STSYNTHON_MASK) >> TXPC_TXPC_ON1STSYNTHON_LSB)
+#define TXPC_TXPC_ON1STSYNTHON_SET(x) (((x) << TXPC_TXPC_ON1STSYNTHON_LSB) & TXPC_TXPC_ON1STSYNTHON_MASK)
+#define TXPC_TXPC_SELINIT_MSB 4
+#define TXPC_TXPC_SELINIT_LSB 3
+#define TXPC_TXPC_SELINIT_MASK 0x00000018
+#define TXPC_TXPC_SELINIT_GET(x) (((x) & TXPC_TXPC_SELINIT_MASK) >> TXPC_TXPC_SELINIT_LSB)
+#define TXPC_TXPC_SELINIT_SET(x) (((x) << TXPC_TXPC_SELINIT_LSB) & TXPC_TXPC_SELINIT_MASK)
+#define TXPC_TXPC_SELCOUNT_MSB 2
+#define TXPC_TXPC_SELCOUNT_LSB 2
+#define TXPC_TXPC_SELCOUNT_MASK 0x00000004
+#define TXPC_TXPC_SELCOUNT_GET(x) (((x) & TXPC_TXPC_SELCOUNT_MASK) >> TXPC_TXPC_SELCOUNT_LSB)
+#define TXPC_TXPC_SELCOUNT_SET(x) (((x) << TXPC_TXPC_SELCOUNT_LSB) & TXPC_TXPC_SELCOUNT_MASK)
+#define TXPC_TXPC_ATBSEL_MSB 1
+#define TXPC_TXPC_ATBSEL_LSB 0
+#define TXPC_TXPC_ATBSEL_MASK 0x00000003
+#define TXPC_TXPC_ATBSEL_GET(x) (((x) & TXPC_TXPC_ATBSEL_MASK) >> TXPC_TXPC_ATBSEL_LSB)
+#define TXPC_TXPC_ATBSEL_SET(x) (((x) << TXPC_TXPC_ATBSEL_LSB) & TXPC_TXPC_ATBSEL_MASK)
+
+#define TXPC_MISC_ADDRESS 0x0000004c
+#define TXPC_MISC_OFFSET 0x0000004c
+#define TXPC_MISC_FLIPBMODE_MSB 31
+#define TXPC_MISC_FLIPBMODE_LSB 31
+#define TXPC_MISC_FLIPBMODE_MASK 0x80000000
+#define TXPC_MISC_FLIPBMODE_GET(x) (((x) & TXPC_MISC_FLIPBMODE_MASK) >> TXPC_MISC_FLIPBMODE_LSB)
+#define TXPC_MISC_FLIPBMODE_SET(x) (((x) << TXPC_MISC_FLIPBMODE_LSB) & TXPC_MISC_FLIPBMODE_MASK)
+#define TXPC_MISC_LEVEL_MSB 30
+#define TXPC_MISC_LEVEL_LSB 29
+#define TXPC_MISC_LEVEL_MASK 0x60000000
+#define TXPC_MISC_LEVEL_GET(x) (((x) & TXPC_MISC_LEVEL_MASK) >> TXPC_MISC_LEVEL_LSB)
+#define TXPC_MISC_LEVEL_SET(x) (((x) << TXPC_MISC_LEVEL_LSB) & TXPC_MISC_LEVEL_MASK)
+#define TXPC_MISC_LDO_TEST_MODE_MSB 28
+#define TXPC_MISC_LDO_TEST_MODE_LSB 28
+#define TXPC_MISC_LDO_TEST_MODE_MASK 0x10000000
+#define TXPC_MISC_LDO_TEST_MODE_GET(x) (((x) & TXPC_MISC_LDO_TEST_MODE_MASK) >> TXPC_MISC_LDO_TEST_MODE_LSB)
+#define TXPC_MISC_LDO_TEST_MODE_SET(x) (((x) << TXPC_MISC_LDO_TEST_MODE_LSB) & TXPC_MISC_LDO_TEST_MODE_MASK)
+#define TXPC_MISC_NOTCXODET_MSB 27
+#define TXPC_MISC_NOTCXODET_LSB 27
+#define TXPC_MISC_NOTCXODET_MASK 0x08000000
+#define TXPC_MISC_NOTCXODET_GET(x) (((x) & TXPC_MISC_NOTCXODET_MASK) >> TXPC_MISC_NOTCXODET_LSB)
+#define TXPC_MISC_NOTCXODET_SET(x) (((x) << TXPC_MISC_NOTCXODET_LSB) & TXPC_MISC_NOTCXODET_MASK)
+#define TXPC_MISC_PWDCLKIND_MSB 26
+#define TXPC_MISC_PWDCLKIND_LSB 26
+#define TXPC_MISC_PWDCLKIND_MASK 0x04000000
+#define TXPC_MISC_PWDCLKIND_GET(x) (((x) & TXPC_MISC_PWDCLKIND_MASK) >> TXPC_MISC_PWDCLKIND_LSB)
+#define TXPC_MISC_PWDCLKIND_SET(x) (((x) << TXPC_MISC_PWDCLKIND_LSB) & TXPC_MISC_PWDCLKIND_MASK)
+#define TXPC_MISC_PWDXINPAD_MSB 25
+#define TXPC_MISC_PWDXINPAD_LSB 25
+#define TXPC_MISC_PWDXINPAD_MASK 0x02000000
+#define TXPC_MISC_PWDXINPAD_GET(x) (((x) & TXPC_MISC_PWDXINPAD_MASK) >> TXPC_MISC_PWDXINPAD_LSB)
+#define TXPC_MISC_PWDXINPAD_SET(x) (((x) << TXPC_MISC_PWDXINPAD_LSB) & TXPC_MISC_PWDXINPAD_MASK)
+#define TXPC_MISC_LOCALBIAS_MSB 24
+#define TXPC_MISC_LOCALBIAS_LSB 24
+#define TXPC_MISC_LOCALBIAS_MASK 0x01000000
+#define TXPC_MISC_LOCALBIAS_GET(x) (((x) & TXPC_MISC_LOCALBIAS_MASK) >> TXPC_MISC_LOCALBIAS_LSB)
+#define TXPC_MISC_LOCALBIAS_SET(x) (((x) << TXPC_MISC_LOCALBIAS_LSB) & TXPC_MISC_LOCALBIAS_MASK)
+#define TXPC_MISC_LOCALBIAS2X_MSB 23
+#define TXPC_MISC_LOCALBIAS2X_LSB 23
+#define TXPC_MISC_LOCALBIAS2X_MASK 0x00800000
+#define TXPC_MISC_LOCALBIAS2X_GET(x) (((x) & TXPC_MISC_LOCALBIAS2X_MASK) >> TXPC_MISC_LOCALBIAS2X_LSB)
+#define TXPC_MISC_LOCALBIAS2X_SET(x) (((x) << TXPC_MISC_LOCALBIAS2X_LSB) & TXPC_MISC_LOCALBIAS2X_MASK)
+#define TXPC_MISC_SELTSP_MSB 22
+#define TXPC_MISC_SELTSP_LSB 22
+#define TXPC_MISC_SELTSP_MASK 0x00400000
+#define TXPC_MISC_SELTSP_GET(x) (((x) & TXPC_MISC_SELTSP_MASK) >> TXPC_MISC_SELTSP_LSB)
+#define TXPC_MISC_SELTSP_SET(x) (((x) << TXPC_MISC_SELTSP_LSB) & TXPC_MISC_SELTSP_MASK)
+#define TXPC_MISC_SELTSN_MSB 21
+#define TXPC_MISC_SELTSN_LSB 21
+#define TXPC_MISC_SELTSN_MASK 0x00200000
+#define TXPC_MISC_SELTSN_GET(x) (((x) & TXPC_MISC_SELTSN_MASK) >> TXPC_MISC_SELTSN_LSB)
+#define TXPC_MISC_SELTSN_SET(x) (((x) << TXPC_MISC_SELTSN_LSB) & TXPC_MISC_SELTSN_MASK)
+#define TXPC_MISC_SPARE_A_MSB 20
+#define TXPC_MISC_SPARE_A_LSB 18
+#define TXPC_MISC_SPARE_A_MASK 0x001c0000
+#define TXPC_MISC_SPARE_A_GET(x) (((x) & TXPC_MISC_SPARE_A_MASK) >> TXPC_MISC_SPARE_A_LSB)
+#define TXPC_MISC_SPARE_A_SET(x) (((x) << TXPC_MISC_SPARE_A_LSB) & TXPC_MISC_SPARE_A_MASK)
+#define TXPC_MISC_DECOUT_MSB 17
+#define TXPC_MISC_DECOUT_LSB 8
+#define TXPC_MISC_DECOUT_MASK 0x0003ff00
+#define TXPC_MISC_DECOUT_GET(x) (((x) & TXPC_MISC_DECOUT_MASK) >> TXPC_MISC_DECOUT_LSB)
+#define TXPC_MISC_DECOUT_SET(x) (((x) << TXPC_MISC_DECOUT_LSB) & TXPC_MISC_DECOUT_MASK)
+#define TXPC_MISC_XTALDIV_MSB 7
+#define TXPC_MISC_XTALDIV_LSB 6
+#define TXPC_MISC_XTALDIV_MASK 0x000000c0
+#define TXPC_MISC_XTALDIV_GET(x) (((x) & TXPC_MISC_XTALDIV_MASK) >> TXPC_MISC_XTALDIV_LSB)
+#define TXPC_MISC_XTALDIV_SET(x) (((x) << TXPC_MISC_XTALDIV_LSB) & TXPC_MISC_XTALDIV_MASK)
+#define TXPC_MISC_SPARE_MSB 5
+#define TXPC_MISC_SPARE_LSB 0
+#define TXPC_MISC_SPARE_MASK 0x0000003f
+#define TXPC_MISC_SPARE_GET(x) (((x) & TXPC_MISC_SPARE_MASK) >> TXPC_MISC_SPARE_LSB)
+#define TXPC_MISC_SPARE_SET(x) (((x) << TXPC_MISC_SPARE_LSB) & TXPC_MISC_SPARE_MASK)
+
+#define RXTXBB_RXTXBB1_ADDRESS 0x00000050
+#define RXTXBB_RXTXBB1_OFFSET 0x00000050
+#define RXTXBB_RXTXBB1_SPARE_MSB 31
+#define RXTXBB_RXTXBB1_SPARE_LSB 19
+#define RXTXBB_RXTXBB1_SPARE_MASK 0xfff80000
+#define RXTXBB_RXTXBB1_SPARE_GET(x) (((x) & RXTXBB_RXTXBB1_SPARE_MASK) >> RXTXBB_RXTXBB1_SPARE_LSB)
+#define RXTXBB_RXTXBB1_SPARE_SET(x) (((x) << RXTXBB_RXTXBB1_SPARE_LSB) & RXTXBB_RXTXBB1_SPARE_MASK)
+#define RXTXBB_RXTXBB1_FNOTCH_MSB 18
+#define RXTXBB_RXTXBB1_FNOTCH_LSB 17
+#define RXTXBB_RXTXBB1_FNOTCH_MASK 0x00060000
+#define RXTXBB_RXTXBB1_FNOTCH_GET(x) (((x) & RXTXBB_RXTXBB1_FNOTCH_MASK) >> RXTXBB_RXTXBB1_FNOTCH_LSB)
+#define RXTXBB_RXTXBB1_FNOTCH_SET(x) (((x) << RXTXBB_RXTXBB1_FNOTCH_LSB) & RXTXBB_RXTXBB1_FNOTCH_MASK)
+#define RXTXBB_RXTXBB1_SEL_ATB_MSB 16
+#define RXTXBB_RXTXBB1_SEL_ATB_LSB 9
+#define RXTXBB_RXTXBB1_SEL_ATB_MASK 0x0001fe00
+#define RXTXBB_RXTXBB1_SEL_ATB_GET(x) (((x) & RXTXBB_RXTXBB1_SEL_ATB_MASK) >> RXTXBB_RXTXBB1_SEL_ATB_LSB)
+#define RXTXBB_RXTXBB1_SEL_ATB_SET(x) (((x) << RXTXBB_RXTXBB1_SEL_ATB_LSB) & RXTXBB_RXTXBB1_SEL_ATB_MASK)
+#define RXTXBB_RXTXBB1_PDDACINTERFACE_MSB 8
+#define RXTXBB_RXTXBB1_PDDACINTERFACE_LSB 8
+#define RXTXBB_RXTXBB1_PDDACINTERFACE_MASK 0x00000100
+#define RXTXBB_RXTXBB1_PDDACINTERFACE_GET(x) (((x) & RXTXBB_RXTXBB1_PDDACINTERFACE_MASK) >> RXTXBB_RXTXBB1_PDDACINTERFACE_LSB)
+#define RXTXBB_RXTXBB1_PDDACINTERFACE_SET(x) (((x) << RXTXBB_RXTXBB1_PDDACINTERFACE_LSB) & RXTXBB_RXTXBB1_PDDACINTERFACE_MASK)
+#define RXTXBB_RXTXBB1_PDV2I_MSB 7
+#define RXTXBB_RXTXBB1_PDV2I_LSB 7
+#define RXTXBB_RXTXBB1_PDV2I_MASK 0x00000080
+#define RXTXBB_RXTXBB1_PDV2I_GET(x) (((x) & RXTXBB_RXTXBB1_PDV2I_MASK) >> RXTXBB_RXTXBB1_PDV2I_LSB)
+#define RXTXBB_RXTXBB1_PDV2I_SET(x) (((x) << RXTXBB_RXTXBB1_PDV2I_LSB) & RXTXBB_RXTXBB1_PDV2I_MASK)
+#define RXTXBB_RXTXBB1_PDI2V_MSB 6
+#define RXTXBB_RXTXBB1_PDI2V_LSB 6
+#define RXTXBB_RXTXBB1_PDI2V_MASK 0x00000040
+#define RXTXBB_RXTXBB1_PDI2V_GET(x) (((x) & RXTXBB_RXTXBB1_PDI2V_MASK) >> RXTXBB_RXTXBB1_PDI2V_LSB)
+#define RXTXBB_RXTXBB1_PDI2V_SET(x) (((x) << RXTXBB_RXTXBB1_PDI2V_LSB) & RXTXBB_RXTXBB1_PDI2V_MASK)
+#define RXTXBB_RXTXBB1_PDRXTXBB_MSB 5
+#define RXTXBB_RXTXBB1_PDRXTXBB_LSB 5
+#define RXTXBB_RXTXBB1_PDRXTXBB_MASK 0x00000020
+#define RXTXBB_RXTXBB1_PDRXTXBB_GET(x) (((x) & RXTXBB_RXTXBB1_PDRXTXBB_MASK) >> RXTXBB_RXTXBB1_PDRXTXBB_LSB)
+#define RXTXBB_RXTXBB1_PDRXTXBB_SET(x) (((x) << RXTXBB_RXTXBB1_PDRXTXBB_LSB) & RXTXBB_RXTXBB1_PDRXTXBB_MASK)
+#define RXTXBB_RXTXBB1_PDOFFSETLOQ_MSB 4
+#define RXTXBB_RXTXBB1_PDOFFSETLOQ_LSB 4
+#define RXTXBB_RXTXBB1_PDOFFSETLOQ_MASK 0x00000010
+#define RXTXBB_RXTXBB1_PDOFFSETLOQ_GET(x) (((x) & RXTXBB_RXTXBB1_PDOFFSETLOQ_MASK) >> RXTXBB_RXTXBB1_PDOFFSETLOQ_LSB)
+#define RXTXBB_RXTXBB1_PDOFFSETLOQ_SET(x) (((x) << RXTXBB_RXTXBB1_PDOFFSETLOQ_LSB) & RXTXBB_RXTXBB1_PDOFFSETLOQ_MASK)
+#define RXTXBB_RXTXBB1_PDOFFSETHIQ_MSB 3
+#define RXTXBB_RXTXBB1_PDOFFSETHIQ_LSB 3
+#define RXTXBB_RXTXBB1_PDOFFSETHIQ_MASK 0x00000008
+#define RXTXBB_RXTXBB1_PDOFFSETHIQ_GET(x) (((x) & RXTXBB_RXTXBB1_PDOFFSETHIQ_MASK) >> RXTXBB_RXTXBB1_PDOFFSETHIQ_LSB)
+#define RXTXBB_RXTXBB1_PDOFFSETHIQ_SET(x) (((x) << RXTXBB_RXTXBB1_PDOFFSETHIQ_LSB) & RXTXBB_RXTXBB1_PDOFFSETHIQ_MASK)
+#define RXTXBB_RXTXBB1_PDOFFSETI2V_MSB 2
+#define RXTXBB_RXTXBB1_PDOFFSETI2V_LSB 2
+#define RXTXBB_RXTXBB1_PDOFFSETI2V_MASK 0x00000004
+#define RXTXBB_RXTXBB1_PDOFFSETI2V_GET(x) (((x) & RXTXBB_RXTXBB1_PDOFFSETI2V_MASK) >> RXTXBB_RXTXBB1_PDOFFSETI2V_LSB)
+#define RXTXBB_RXTXBB1_PDOFFSETI2V_SET(x) (((x) << RXTXBB_RXTXBB1_PDOFFSETI2V_LSB) & RXTXBB_RXTXBB1_PDOFFSETI2V_MASK)
+#define RXTXBB_RXTXBB1_PDLOQ_MSB 1
+#define RXTXBB_RXTXBB1_PDLOQ_LSB 1
+#define RXTXBB_RXTXBB1_PDLOQ_MASK 0x00000002
+#define RXTXBB_RXTXBB1_PDLOQ_GET(x) (((x) & RXTXBB_RXTXBB1_PDLOQ_MASK) >> RXTXBB_RXTXBB1_PDLOQ_LSB)
+#define RXTXBB_RXTXBB1_PDLOQ_SET(x) (((x) << RXTXBB_RXTXBB1_PDLOQ_LSB) & RXTXBB_RXTXBB1_PDLOQ_MASK)
+#define RXTXBB_RXTXBB1_PDHIQ_MSB 0
+#define RXTXBB_RXTXBB1_PDHIQ_LSB 0
+#define RXTXBB_RXTXBB1_PDHIQ_MASK 0x00000001
+#define RXTXBB_RXTXBB1_PDHIQ_GET(x) (((x) & RXTXBB_RXTXBB1_PDHIQ_MASK) >> RXTXBB_RXTXBB1_PDHIQ_LSB)
+#define RXTXBB_RXTXBB1_PDHIQ_SET(x) (((x) << RXTXBB_RXTXBB1_PDHIQ_LSB) & RXTXBB_RXTXBB1_PDHIQ_MASK)
+
+#define RXTXBB_RXTXBB2_ADDRESS 0x00000054
+#define RXTXBB_RXTXBB2_OFFSET 0x00000054
+#define RXTXBB_RXTXBB2_IBN_37P5_OSHI_CTRL_MSB 31
+#define RXTXBB_RXTXBB2_IBN_37P5_OSHI_CTRL_LSB 29
+#define RXTXBB_RXTXBB2_IBN_37P5_OSHI_CTRL_MASK 0xe0000000
+#define RXTXBB_RXTXBB2_IBN_37P5_OSHI_CTRL_GET(x) (((x) & RXTXBB_RXTXBB2_IBN_37P5_OSHI_CTRL_MASK) >> RXTXBB_RXTXBB2_IBN_37P5_OSHI_CTRL_LSB)
+#define RXTXBB_RXTXBB2_IBN_37P5_OSHI_CTRL_SET(x) (((x) << RXTXBB_RXTXBB2_IBN_37P5_OSHI_CTRL_LSB) & RXTXBB_RXTXBB2_IBN_37P5_OSHI_CTRL_MASK)
+#define RXTXBB_RXTXBB2_IBN_37P5_OSLO_CTRL_MSB 28
+#define RXTXBB_RXTXBB2_IBN_37P5_OSLO_CTRL_LSB 26
+#define RXTXBB_RXTXBB2_IBN_37P5_OSLO_CTRL_MASK 0x1c000000
+#define RXTXBB_RXTXBB2_IBN_37P5_OSLO_CTRL_GET(x) (((x) & RXTXBB_RXTXBB2_IBN_37P5_OSLO_CTRL_MASK) >> RXTXBB_RXTXBB2_IBN_37P5_OSLO_CTRL_LSB)
+#define RXTXBB_RXTXBB2_IBN_37P5_OSLO_CTRL_SET(x) (((x) << RXTXBB_RXTXBB2_IBN_37P5_OSLO_CTRL_LSB) & RXTXBB_RXTXBB2_IBN_37P5_OSLO_CTRL_MASK)
+#define RXTXBB_RXTXBB2_IBN_37P5_OSI2V_CTRL_MSB 25
+#define RXTXBB_RXTXBB2_IBN_37P5_OSI2V_CTRL_LSB 23
+#define RXTXBB_RXTXBB2_IBN_37P5_OSI2V_CTRL_MASK 0x03800000
+#define RXTXBB_RXTXBB2_IBN_37P5_OSI2V_CTRL_GET(x) (((x) & RXTXBB_RXTXBB2_IBN_37P5_OSI2V_CTRL_MASK) >> RXTXBB_RXTXBB2_IBN_37P5_OSI2V_CTRL_LSB)
+#define RXTXBB_RXTXBB2_IBN_37P5_OSI2V_CTRL_SET(x) (((x) << RXTXBB_RXTXBB2_IBN_37P5_OSI2V_CTRL_LSB) & RXTXBB_RXTXBB2_IBN_37P5_OSI2V_CTRL_MASK)
+#define RXTXBB_RXTXBB2_SPARE_MSB 22
+#define RXTXBB_RXTXBB2_SPARE_LSB 21
+#define RXTXBB_RXTXBB2_SPARE_MASK 0x00600000
+#define RXTXBB_RXTXBB2_SPARE_GET(x) (((x) & RXTXBB_RXTXBB2_SPARE_MASK) >> RXTXBB_RXTXBB2_SPARE_LSB)
+#define RXTXBB_RXTXBB2_SPARE_SET(x) (((x) << RXTXBB_RXTXBB2_SPARE_LSB) & RXTXBB_RXTXBB2_SPARE_MASK)
+#define RXTXBB_RXTXBB2_SHORTBUFFER_MSB 20
+#define RXTXBB_RXTXBB2_SHORTBUFFER_LSB 20
+#define RXTXBB_RXTXBB2_SHORTBUFFER_MASK 0x00100000
+#define RXTXBB_RXTXBB2_SHORTBUFFER_GET(x) (((x) & RXTXBB_RXTXBB2_SHORTBUFFER_MASK) >> RXTXBB_RXTXBB2_SHORTBUFFER_LSB)
+#define RXTXBB_RXTXBB2_SHORTBUFFER_SET(x) (((x) << RXTXBB_RXTXBB2_SHORTBUFFER_LSB) & RXTXBB_RXTXBB2_SHORTBUFFER_MASK)
+#define RXTXBB_RXTXBB2_SELBUFFER_MSB 19
+#define RXTXBB_RXTXBB2_SELBUFFER_LSB 19
+#define RXTXBB_RXTXBB2_SELBUFFER_MASK 0x00080000
+#define RXTXBB_RXTXBB2_SELBUFFER_GET(x) (((x) & RXTXBB_RXTXBB2_SELBUFFER_MASK) >> RXTXBB_RXTXBB2_SELBUFFER_LSB)
+#define RXTXBB_RXTXBB2_SELBUFFER_SET(x) (((x) << RXTXBB_RXTXBB2_SELBUFFER_LSB) & RXTXBB_RXTXBB2_SELBUFFER_MASK)
+#define RXTXBB_RXTXBB2_SEL_DAC_TEST_MSB 18
+#define RXTXBB_RXTXBB2_SEL_DAC_TEST_LSB 18
+#define RXTXBB_RXTXBB2_SEL_DAC_TEST_MASK 0x00040000
+#define RXTXBB_RXTXBB2_SEL_DAC_TEST_GET(x) (((x) & RXTXBB_RXTXBB2_SEL_DAC_TEST_MASK) >> RXTXBB_RXTXBB2_SEL_DAC_TEST_LSB)
+#define RXTXBB_RXTXBB2_SEL_DAC_TEST_SET(x) (((x) << RXTXBB_RXTXBB2_SEL_DAC_TEST_LSB) & RXTXBB_RXTXBB2_SEL_DAC_TEST_MASK)
+#define RXTXBB_RXTXBB2_SEL_LOQ_TEST_MSB 17
+#define RXTXBB_RXTXBB2_SEL_LOQ_TEST_LSB 17
+#define RXTXBB_RXTXBB2_SEL_LOQ_TEST_MASK 0x00020000
+#define RXTXBB_RXTXBB2_SEL_LOQ_TEST_GET(x) (((x) & RXTXBB_RXTXBB2_SEL_LOQ_TEST_MASK) >> RXTXBB_RXTXBB2_SEL_LOQ_TEST_LSB)
+#define RXTXBB_RXTXBB2_SEL_LOQ_TEST_SET(x) (((x) << RXTXBB_RXTXBB2_SEL_LOQ_TEST_LSB) & RXTXBB_RXTXBB2_SEL_LOQ_TEST_MASK)
+#define RXTXBB_RXTXBB2_SEL_HIQ_TEST_MSB 16
+#define RXTXBB_RXTXBB2_SEL_HIQ_TEST_LSB 16
+#define RXTXBB_RXTXBB2_SEL_HIQ_TEST_MASK 0x00010000
+#define RXTXBB_RXTXBB2_SEL_HIQ_TEST_GET(x) (((x) & RXTXBB_RXTXBB2_SEL_HIQ_TEST_MASK) >> RXTXBB_RXTXBB2_SEL_HIQ_TEST_LSB)
+#define RXTXBB_RXTXBB2_SEL_HIQ_TEST_SET(x) (((x) << RXTXBB_RXTXBB2_SEL_HIQ_TEST_LSB) & RXTXBB_RXTXBB2_SEL_HIQ_TEST_MASK)
+#define RXTXBB_RXTXBB2_SEL_I2V_TEST_MSB 15
+#define RXTXBB_RXTXBB2_SEL_I2V_TEST_LSB 15
+#define RXTXBB_RXTXBB2_SEL_I2V_TEST_MASK 0x00008000
+#define RXTXBB_RXTXBB2_SEL_I2V_TEST_GET(x) (((x) & RXTXBB_RXTXBB2_SEL_I2V_TEST_MASK) >> RXTXBB_RXTXBB2_SEL_I2V_TEST_LSB)
+#define RXTXBB_RXTXBB2_SEL_I2V_TEST_SET(x) (((x) << RXTXBB_RXTXBB2_SEL_I2V_TEST_LSB) & RXTXBB_RXTXBB2_SEL_I2V_TEST_MASK)
+#define RXTXBB_RXTXBB2_CMSEL_MSB 14
+#define RXTXBB_RXTXBB2_CMSEL_LSB 13
+#define RXTXBB_RXTXBB2_CMSEL_MASK 0x00006000
+#define RXTXBB_RXTXBB2_CMSEL_GET(x) (((x) & RXTXBB_RXTXBB2_CMSEL_MASK) >> RXTXBB_RXTXBB2_CMSEL_LSB)
+#define RXTXBB_RXTXBB2_CMSEL_SET(x) (((x) << RXTXBB_RXTXBB2_CMSEL_LSB) & RXTXBB_RXTXBB2_CMSEL_MASK)
+#define RXTXBB_RXTXBB2_FILTERFC_MSB 12
+#define RXTXBB_RXTXBB2_FILTERFC_LSB 8
+#define RXTXBB_RXTXBB2_FILTERFC_MASK 0x00001f00
+#define RXTXBB_RXTXBB2_FILTERFC_GET(x) (((x) & RXTXBB_RXTXBB2_FILTERFC_MASK) >> RXTXBB_RXTXBB2_FILTERFC_LSB)
+#define RXTXBB_RXTXBB2_FILTERFC_SET(x) (((x) << RXTXBB_RXTXBB2_FILTERFC_LSB) & RXTXBB_RXTXBB2_FILTERFC_MASK)
+#define RXTXBB_RXTXBB2_LOCALFILTERTUNING_MSB 7
+#define RXTXBB_RXTXBB2_LOCALFILTERTUNING_LSB 7
+#define RXTXBB_RXTXBB2_LOCALFILTERTUNING_MASK 0x00000080
+#define RXTXBB_RXTXBB2_LOCALFILTERTUNING_GET(x) (((x) & RXTXBB_RXTXBB2_LOCALFILTERTUNING_MASK) >> RXTXBB_RXTXBB2_LOCALFILTERTUNING_LSB)
+#define RXTXBB_RXTXBB2_LOCALFILTERTUNING_SET(x) (((x) << RXTXBB_RXTXBB2_LOCALFILTERTUNING_LSB) & RXTXBB_RXTXBB2_LOCALFILTERTUNING_MASK)
+#define RXTXBB_RXTXBB2_FILTERDOUBLEBW_MSB 6
+#define RXTXBB_RXTXBB2_FILTERDOUBLEBW_LSB 6
+#define RXTXBB_RXTXBB2_FILTERDOUBLEBW_MASK 0x00000040
+#define RXTXBB_RXTXBB2_FILTERDOUBLEBW_GET(x) (((x) & RXTXBB_RXTXBB2_FILTERDOUBLEBW_MASK) >> RXTXBB_RXTXBB2_FILTERDOUBLEBW_LSB)
+#define RXTXBB_RXTXBB2_FILTERDOUBLEBW_SET(x) (((x) << RXTXBB_RXTXBB2_FILTERDOUBLEBW_LSB) & RXTXBB_RXTXBB2_FILTERDOUBLEBW_MASK)
+#define RXTXBB_RXTXBB2_PATH2HIQ_EN_MSB 5
+#define RXTXBB_RXTXBB2_PATH2HIQ_EN_LSB 5
+#define RXTXBB_RXTXBB2_PATH2HIQ_EN_MASK 0x00000020
+#define RXTXBB_RXTXBB2_PATH2HIQ_EN_GET(x) (((x) & RXTXBB_RXTXBB2_PATH2HIQ_EN_MASK) >> RXTXBB_RXTXBB2_PATH2HIQ_EN_LSB)
+#define RXTXBB_RXTXBB2_PATH2HIQ_EN_SET(x) (((x) << RXTXBB_RXTXBB2_PATH2HIQ_EN_LSB) & RXTXBB_RXTXBB2_PATH2HIQ_EN_MASK)
+#define RXTXBB_RXTXBB2_PATH1HIQ_EN_MSB 4
+#define RXTXBB_RXTXBB2_PATH1HIQ_EN_LSB 4
+#define RXTXBB_RXTXBB2_PATH1HIQ_EN_MASK 0x00000010
+#define RXTXBB_RXTXBB2_PATH1HIQ_EN_GET(x) (((x) & RXTXBB_RXTXBB2_PATH1HIQ_EN_MASK) >> RXTXBB_RXTXBB2_PATH1HIQ_EN_LSB)
+#define RXTXBB_RXTXBB2_PATH1HIQ_EN_SET(x) (((x) << RXTXBB_RXTXBB2_PATH1HIQ_EN_LSB) & RXTXBB_RXTXBB2_PATH1HIQ_EN_MASK)
+#define RXTXBB_RXTXBB2_PATH3LOQ_EN_MSB 3
+#define RXTXBB_RXTXBB2_PATH3LOQ_EN_LSB 3
+#define RXTXBB_RXTXBB2_PATH3LOQ_EN_MASK 0x00000008
+#define RXTXBB_RXTXBB2_PATH3LOQ_EN_GET(x) (((x) & RXTXBB_RXTXBB2_PATH3LOQ_EN_MASK) >> RXTXBB_RXTXBB2_PATH3LOQ_EN_LSB)
+#define RXTXBB_RXTXBB2_PATH3LOQ_EN_SET(x) (((x) << RXTXBB_RXTXBB2_PATH3LOQ_EN_LSB) & RXTXBB_RXTXBB2_PATH3LOQ_EN_MASK)
+#define RXTXBB_RXTXBB2_PATH2LOQ_EN_MSB 2
+#define RXTXBB_RXTXBB2_PATH2LOQ_EN_LSB 2
+#define RXTXBB_RXTXBB2_PATH2LOQ_EN_MASK 0x00000004
+#define RXTXBB_RXTXBB2_PATH2LOQ_EN_GET(x) (((x) & RXTXBB_RXTXBB2_PATH2LOQ_EN_MASK) >> RXTXBB_RXTXBB2_PATH2LOQ_EN_LSB)
+#define RXTXBB_RXTXBB2_PATH2LOQ_EN_SET(x) (((x) << RXTXBB_RXTXBB2_PATH2LOQ_EN_LSB) & RXTXBB_RXTXBB2_PATH2LOQ_EN_MASK)
+#define RXTXBB_RXTXBB2_PATH1LOQ_EN_MSB 1
+#define RXTXBB_RXTXBB2_PATH1LOQ_EN_LSB 1
+#define RXTXBB_RXTXBB2_PATH1LOQ_EN_MASK 0x00000002
+#define RXTXBB_RXTXBB2_PATH1LOQ_EN_GET(x) (((x) & RXTXBB_RXTXBB2_PATH1LOQ_EN_MASK) >> RXTXBB_RXTXBB2_PATH1LOQ_EN_LSB)
+#define RXTXBB_RXTXBB2_PATH1LOQ_EN_SET(x) (((x) << RXTXBB_RXTXBB2_PATH1LOQ_EN_LSB) & RXTXBB_RXTXBB2_PATH1LOQ_EN_MASK)
+#define RXTXBB_RXTXBB2_PATH_OVERRIDE_MSB 0
+#define RXTXBB_RXTXBB2_PATH_OVERRIDE_LSB 0
+#define RXTXBB_RXTXBB2_PATH_OVERRIDE_MASK 0x00000001
+#define RXTXBB_RXTXBB2_PATH_OVERRIDE_GET(x) (((x) & RXTXBB_RXTXBB2_PATH_OVERRIDE_MASK) >> RXTXBB_RXTXBB2_PATH_OVERRIDE_LSB)
+#define RXTXBB_RXTXBB2_PATH_OVERRIDE_SET(x) (((x) << RXTXBB_RXTXBB2_PATH_OVERRIDE_LSB) & RXTXBB_RXTXBB2_PATH_OVERRIDE_MASK)
+
+#define RXTXBB_RXTXBB3_ADDRESS 0x00000058
+#define RXTXBB_RXTXBB3_OFFSET 0x00000058
+#define RXTXBB_RXTXBB3_SPARE_MSB 31
+#define RXTXBB_RXTXBB3_SPARE_LSB 27
+#define RXTXBB_RXTXBB3_SPARE_MASK 0xf8000000
+#define RXTXBB_RXTXBB3_SPARE_GET(x) (((x) & RXTXBB_RXTXBB3_SPARE_MASK) >> RXTXBB_RXTXBB3_SPARE_LSB)
+#define RXTXBB_RXTXBB3_SPARE_SET(x) (((x) << RXTXBB_RXTXBB3_SPARE_LSB) & RXTXBB_RXTXBB3_SPARE_MASK)
+#define RXTXBB_RXTXBB3_IBN_25U_CM_BUFAMP_CTRL_MSB 26
+#define RXTXBB_RXTXBB3_IBN_25U_CM_BUFAMP_CTRL_LSB 24
+#define RXTXBB_RXTXBB3_IBN_25U_CM_BUFAMP_CTRL_MASK 0x07000000
+#define RXTXBB_RXTXBB3_IBN_25U_CM_BUFAMP_CTRL_GET(x) (((x) & RXTXBB_RXTXBB3_IBN_25U_CM_BUFAMP_CTRL_MASK) >> RXTXBB_RXTXBB3_IBN_25U_CM_BUFAMP_CTRL_LSB)
+#define RXTXBB_RXTXBB3_IBN_25U_CM_BUFAMP_CTRL_SET(x) (((x) << RXTXBB_RXTXBB3_IBN_25U_CM_BUFAMP_CTRL_LSB) & RXTXBB_RXTXBB3_IBN_25U_CM_BUFAMP_CTRL_MASK)
+#define RXTXBB_RXTXBB3_IBN_25U_BKV2I_CTRL_MSB 23
+#define RXTXBB_RXTXBB3_IBN_25U_BKV2I_CTRL_LSB 21
+#define RXTXBB_RXTXBB3_IBN_25U_BKV2I_CTRL_MASK 0x00e00000
+#define RXTXBB_RXTXBB3_IBN_25U_BKV2I_CTRL_GET(x) (((x) & RXTXBB_RXTXBB3_IBN_25U_BKV2I_CTRL_MASK) >> RXTXBB_RXTXBB3_IBN_25U_BKV2I_CTRL_LSB)
+#define RXTXBB_RXTXBB3_IBN_25U_BKV2I_CTRL_SET(x) (((x) << RXTXBB_RXTXBB3_IBN_25U_BKV2I_CTRL_LSB) & RXTXBB_RXTXBB3_IBN_25U_BKV2I_CTRL_MASK)
+#define RXTXBB_RXTXBB3_IBN_25U_I2V_CTRL_MSB 20
+#define RXTXBB_RXTXBB3_IBN_25U_I2V_CTRL_LSB 18
+#define RXTXBB_RXTXBB3_IBN_25U_I2V_CTRL_MASK 0x001c0000
+#define RXTXBB_RXTXBB3_IBN_25U_I2V_CTRL_GET(x) (((x) & RXTXBB_RXTXBB3_IBN_25U_I2V_CTRL_MASK) >> RXTXBB_RXTXBB3_IBN_25U_I2V_CTRL_LSB)
+#define RXTXBB_RXTXBB3_IBN_25U_I2V_CTRL_SET(x) (((x) << RXTXBB_RXTXBB3_IBN_25U_I2V_CTRL_LSB) & RXTXBB_RXTXBB3_IBN_25U_I2V_CTRL_MASK)
+#define RXTXBB_RXTXBB3_IBN_25U_HI1_CTRL_MSB 17
+#define RXTXBB_RXTXBB3_IBN_25U_HI1_CTRL_LSB 15
+#define RXTXBB_RXTXBB3_IBN_25U_HI1_CTRL_MASK 0x00038000
+#define RXTXBB_RXTXBB3_IBN_25U_HI1_CTRL_GET(x) (((x) & RXTXBB_RXTXBB3_IBN_25U_HI1_CTRL_MASK) >> RXTXBB_RXTXBB3_IBN_25U_HI1_CTRL_LSB)
+#define RXTXBB_RXTXBB3_IBN_25U_HI1_CTRL_SET(x) (((x) << RXTXBB_RXTXBB3_IBN_25U_HI1_CTRL_LSB) & RXTXBB_RXTXBB3_IBN_25U_HI1_CTRL_MASK)
+#define RXTXBB_RXTXBB3_IBN_25U_HI2_CTRL_MSB 14
+#define RXTXBB_RXTXBB3_IBN_25U_HI2_CTRL_LSB 12
+#define RXTXBB_RXTXBB3_IBN_25U_HI2_CTRL_MASK 0x00007000
+#define RXTXBB_RXTXBB3_IBN_25U_HI2_CTRL_GET(x) (((x) & RXTXBB_RXTXBB3_IBN_25U_HI2_CTRL_MASK) >> RXTXBB_RXTXBB3_IBN_25U_HI2_CTRL_LSB)
+#define RXTXBB_RXTXBB3_IBN_25U_HI2_CTRL_SET(x) (((x) << RXTXBB_RXTXBB3_IBN_25U_HI2_CTRL_LSB) & RXTXBB_RXTXBB3_IBN_25U_HI2_CTRL_MASK)
+#define RXTXBB_RXTXBB3_IBN_25U_LO1_CTRL_MSB 11
+#define RXTXBB_RXTXBB3_IBN_25U_LO1_CTRL_LSB 9
+#define RXTXBB_RXTXBB3_IBN_25U_LO1_CTRL_MASK 0x00000e00
+#define RXTXBB_RXTXBB3_IBN_25U_LO1_CTRL_GET(x) (((x) & RXTXBB_RXTXBB3_IBN_25U_LO1_CTRL_MASK) >> RXTXBB_RXTXBB3_IBN_25U_LO1_CTRL_LSB)
+#define RXTXBB_RXTXBB3_IBN_25U_LO1_CTRL_SET(x) (((x) << RXTXBB_RXTXBB3_IBN_25U_LO1_CTRL_LSB) & RXTXBB_RXTXBB3_IBN_25U_LO1_CTRL_MASK)
+#define RXTXBB_RXTXBB3_IBN_25U_LO2_CTRL_MSB 8
+#define RXTXBB_RXTXBB3_IBN_25U_LO2_CTRL_LSB 6
+#define RXTXBB_RXTXBB3_IBN_25U_LO2_CTRL_MASK 0x000001c0
+#define RXTXBB_RXTXBB3_IBN_25U_LO2_CTRL_GET(x) (((x) & RXTXBB_RXTXBB3_IBN_25U_LO2_CTRL_MASK) >> RXTXBB_RXTXBB3_IBN_25U_LO2_CTRL_LSB)
+#define RXTXBB_RXTXBB3_IBN_25U_LO2_CTRL_SET(x) (((x) << RXTXBB_RXTXBB3_IBN_25U_LO2_CTRL_LSB) & RXTXBB_RXTXBB3_IBN_25U_LO2_CTRL_MASK)
+#define RXTXBB_RXTXBB3_IBRN_12P5_CM_CTRL_MSB 5
+#define RXTXBB_RXTXBB3_IBRN_12P5_CM_CTRL_LSB 3
+#define RXTXBB_RXTXBB3_IBRN_12P5_CM_CTRL_MASK 0x00000038
+#define RXTXBB_RXTXBB3_IBRN_12P5_CM_CTRL_GET(x) (((x) & RXTXBB_RXTXBB3_IBRN_12P5_CM_CTRL_MASK) >> RXTXBB_RXTXBB3_IBRN_12P5_CM_CTRL_LSB)
+#define RXTXBB_RXTXBB3_IBRN_12P5_CM_CTRL_SET(x) (((x) << RXTXBB_RXTXBB3_IBRN_12P5_CM_CTRL_LSB) & RXTXBB_RXTXBB3_IBRN_12P5_CM_CTRL_MASK)
+#define RXTXBB_RXTXBB3_IBN_100U_TEST_CTRL_MSB 2
+#define RXTXBB_RXTXBB3_IBN_100U_TEST_CTRL_LSB 0
+#define RXTXBB_RXTXBB3_IBN_100U_TEST_CTRL_MASK 0x00000007
+#define RXTXBB_RXTXBB3_IBN_100U_TEST_CTRL_GET(x) (((x) & RXTXBB_RXTXBB3_IBN_100U_TEST_CTRL_MASK) >> RXTXBB_RXTXBB3_IBN_100U_TEST_CTRL_LSB)
+#define RXTXBB_RXTXBB3_IBN_100U_TEST_CTRL_SET(x) (((x) << RXTXBB_RXTXBB3_IBN_100U_TEST_CTRL_LSB) & RXTXBB_RXTXBB3_IBN_100U_TEST_CTRL_MASK)
+
+#define RXTXBB_RXTXBB4_ADDRESS 0x0000005c
+#define RXTXBB_RXTXBB4_OFFSET 0x0000005c
+#define RXTXBB_RXTXBB4_SPARE_MSB 31
+#define RXTXBB_RXTXBB4_SPARE_LSB 31
+#define RXTXBB_RXTXBB4_SPARE_MASK 0x80000000
+#define RXTXBB_RXTXBB4_SPARE_GET(x) (((x) & RXTXBB_RXTXBB4_SPARE_MASK) >> RXTXBB_RXTXBB4_SPARE_LSB)
+#define RXTXBB_RXTXBB4_SPARE_SET(x) (((x) << RXTXBB_RXTXBB4_SPARE_LSB) & RXTXBB_RXTXBB4_SPARE_MASK)
+#define RXTXBB_RXTXBB4_LOCALOFFSET_MSB 30
+#define RXTXBB_RXTXBB4_LOCALOFFSET_LSB 30
+#define RXTXBB_RXTXBB4_LOCALOFFSET_MASK 0x40000000
+#define RXTXBB_RXTXBB4_LOCALOFFSET_GET(x) (((x) & RXTXBB_RXTXBB4_LOCALOFFSET_MASK) >> RXTXBB_RXTXBB4_LOCALOFFSET_LSB)
+#define RXTXBB_RXTXBB4_LOCALOFFSET_SET(x) (((x) << RXTXBB_RXTXBB4_LOCALOFFSET_LSB) & RXTXBB_RXTXBB4_LOCALOFFSET_MASK)
+#define RXTXBB_RXTXBB4_OFSTCORRHII_MSB 29
+#define RXTXBB_RXTXBB4_OFSTCORRHII_LSB 25
+#define RXTXBB_RXTXBB4_OFSTCORRHII_MASK 0x3e000000
+#define RXTXBB_RXTXBB4_OFSTCORRHII_GET(x) (((x) & RXTXBB_RXTXBB4_OFSTCORRHII_MASK) >> RXTXBB_RXTXBB4_OFSTCORRHII_LSB)
+#define RXTXBB_RXTXBB4_OFSTCORRHII_SET(x) (((x) << RXTXBB_RXTXBB4_OFSTCORRHII_LSB) & RXTXBB_RXTXBB4_OFSTCORRHII_MASK)
+#define RXTXBB_RXTXBB4_OFSTCORRHIQ_MSB 24
+#define RXTXBB_RXTXBB4_OFSTCORRHIQ_LSB 20
+#define RXTXBB_RXTXBB4_OFSTCORRHIQ_MASK 0x01f00000
+#define RXTXBB_RXTXBB4_OFSTCORRHIQ_GET(x) (((x) & RXTXBB_RXTXBB4_OFSTCORRHIQ_MASK) >> RXTXBB_RXTXBB4_OFSTCORRHIQ_LSB)
+#define RXTXBB_RXTXBB4_OFSTCORRHIQ_SET(x) (((x) << RXTXBB_RXTXBB4_OFSTCORRHIQ_LSB) & RXTXBB_RXTXBB4_OFSTCORRHIQ_MASK)
+#define RXTXBB_RXTXBB4_OFSTCORRLOI_MSB 19
+#define RXTXBB_RXTXBB4_OFSTCORRLOI_LSB 15
+#define RXTXBB_RXTXBB4_OFSTCORRLOI_MASK 0x000f8000
+#define RXTXBB_RXTXBB4_OFSTCORRLOI_GET(x) (((x) & RXTXBB_RXTXBB4_OFSTCORRLOI_MASK) >> RXTXBB_RXTXBB4_OFSTCORRLOI_LSB)
+#define RXTXBB_RXTXBB4_OFSTCORRLOI_SET(x) (((x) << RXTXBB_RXTXBB4_OFSTCORRLOI_LSB) & RXTXBB_RXTXBB4_OFSTCORRLOI_MASK)
+#define RXTXBB_RXTXBB4_OFSTCORRLOQ_MSB 14
+#define RXTXBB_RXTXBB4_OFSTCORRLOQ_LSB 10
+#define RXTXBB_RXTXBB4_OFSTCORRLOQ_MASK 0x00007c00
+#define RXTXBB_RXTXBB4_OFSTCORRLOQ_GET(x) (((x) & RXTXBB_RXTXBB4_OFSTCORRLOQ_MASK) >> RXTXBB_RXTXBB4_OFSTCORRLOQ_LSB)
+#define RXTXBB_RXTXBB4_OFSTCORRLOQ_SET(x) (((x) << RXTXBB_RXTXBB4_OFSTCORRLOQ_LSB) & RXTXBB_RXTXBB4_OFSTCORRLOQ_MASK)
+#define RXTXBB_RXTXBB4_OFSTCORRI2VI_MSB 9
+#define RXTXBB_RXTXBB4_OFSTCORRI2VI_LSB 5
+#define RXTXBB_RXTXBB4_OFSTCORRI2VI_MASK 0x000003e0
+#define RXTXBB_RXTXBB4_OFSTCORRI2VI_GET(x) (((x) & RXTXBB_RXTXBB4_OFSTCORRI2VI_MASK) >> RXTXBB_RXTXBB4_OFSTCORRI2VI_LSB)
+#define RXTXBB_RXTXBB4_OFSTCORRI2VI_SET(x) (((x) << RXTXBB_RXTXBB4_OFSTCORRI2VI_LSB) & RXTXBB_RXTXBB4_OFSTCORRI2VI_MASK)
+#define RXTXBB_RXTXBB4_OFSTCORRI2VQ_MSB 4
+#define RXTXBB_RXTXBB4_OFSTCORRI2VQ_LSB 0
+#define RXTXBB_RXTXBB4_OFSTCORRI2VQ_MASK 0x0000001f
+#define RXTXBB_RXTXBB4_OFSTCORRI2VQ_GET(x) (((x) & RXTXBB_RXTXBB4_OFSTCORRI2VQ_MASK) >> RXTXBB_RXTXBB4_OFSTCORRI2VQ_LSB)
+#define RXTXBB_RXTXBB4_OFSTCORRI2VQ_SET(x) (((x) << RXTXBB_RXTXBB4_OFSTCORRI2VQ_LSB) & RXTXBB_RXTXBB4_OFSTCORRI2VQ_MASK)
+
+#define ADDAC_ADDAC1_ADDRESS 0x00000060
+#define ADDAC_ADDAC1_OFFSET 0x00000060
+#define ADDAC_ADDAC1_PLL_SVREG_MSB 31
+#define ADDAC_ADDAC1_PLL_SVREG_LSB 31
+#define ADDAC_ADDAC1_PLL_SVREG_MASK 0x80000000
+#define ADDAC_ADDAC1_PLL_SVREG_GET(x) (((x) & ADDAC_ADDAC1_PLL_SVREG_MASK) >> ADDAC_ADDAC1_PLL_SVREG_LSB)
+#define ADDAC_ADDAC1_PLL_SVREG_SET(x) (((x) << ADDAC_ADDAC1_PLL_SVREG_LSB) & ADDAC_ADDAC1_PLL_SVREG_MASK)
+#define ADDAC_ADDAC1_PLL_SCLAMP_MSB 30
+#define ADDAC_ADDAC1_PLL_SCLAMP_LSB 28
+#define ADDAC_ADDAC1_PLL_SCLAMP_MASK 0x70000000
+#define ADDAC_ADDAC1_PLL_SCLAMP_GET(x) (((x) & ADDAC_ADDAC1_PLL_SCLAMP_MASK) >> ADDAC_ADDAC1_PLL_SCLAMP_LSB)
+#define ADDAC_ADDAC1_PLL_SCLAMP_SET(x) (((x) << ADDAC_ADDAC1_PLL_SCLAMP_LSB) & ADDAC_ADDAC1_PLL_SCLAMP_MASK)
+#define ADDAC_ADDAC1_PLL_ATB_MSB 27
+#define ADDAC_ADDAC1_PLL_ATB_LSB 26
+#define ADDAC_ADDAC1_PLL_ATB_MASK 0x0c000000
+#define ADDAC_ADDAC1_PLL_ATB_GET(x) (((x) & ADDAC_ADDAC1_PLL_ATB_MASK) >> ADDAC_ADDAC1_PLL_ATB_LSB)
+#define ADDAC_ADDAC1_PLL_ATB_SET(x) (((x) << ADDAC_ADDAC1_PLL_ATB_LSB) & ADDAC_ADDAC1_PLL_ATB_MASK)
+#define ADDAC_ADDAC1_PLL_ICP_MSB 25
+#define ADDAC_ADDAC1_PLL_ICP_LSB 23
+#define ADDAC_ADDAC1_PLL_ICP_MASK 0x03800000
+#define ADDAC_ADDAC1_PLL_ICP_GET(x) (((x) & ADDAC_ADDAC1_PLL_ICP_MASK) >> ADDAC_ADDAC1_PLL_ICP_LSB)
+#define ADDAC_ADDAC1_PLL_ICP_SET(x) (((x) << ADDAC_ADDAC1_PLL_ICP_LSB) & ADDAC_ADDAC1_PLL_ICP_MASK)
+#define ADDAC_ADDAC1_PLL_FILTER_MSB 22
+#define ADDAC_ADDAC1_PLL_FILTER_LSB 15
+#define ADDAC_ADDAC1_PLL_FILTER_MASK 0x007f8000
+#define ADDAC_ADDAC1_PLL_FILTER_GET(x) (((x) & ADDAC_ADDAC1_PLL_FILTER_MASK) >> ADDAC_ADDAC1_PLL_FILTER_LSB)
+#define ADDAC_ADDAC1_PLL_FILTER_SET(x) (((x) << ADDAC_ADDAC1_PLL_FILTER_LSB) & ADDAC_ADDAC1_PLL_FILTER_MASK)
+#define ADDAC_ADDAC1_PWDPLL_MSB 14
+#define ADDAC_ADDAC1_PWDPLL_LSB 14
+#define ADDAC_ADDAC1_PWDPLL_MASK 0x00004000
+#define ADDAC_ADDAC1_PWDPLL_GET(x) (((x) & ADDAC_ADDAC1_PWDPLL_MASK) >> ADDAC_ADDAC1_PWDPLL_LSB)
+#define ADDAC_ADDAC1_PWDPLL_SET(x) (((x) << ADDAC_ADDAC1_PWDPLL_LSB) & ADDAC_ADDAC1_PWDPLL_MASK)
+#define ADDAC_ADDAC1_PWDADC_MSB 13
+#define ADDAC_ADDAC1_PWDADC_LSB 13
+#define ADDAC_ADDAC1_PWDADC_MASK 0x00002000
+#define ADDAC_ADDAC1_PWDADC_GET(x) (((x) & ADDAC_ADDAC1_PWDADC_MASK) >> ADDAC_ADDAC1_PWDADC_LSB)
+#define ADDAC_ADDAC1_PWDADC_SET(x) (((x) << ADDAC_ADDAC1_PWDADC_LSB) & ADDAC_ADDAC1_PWDADC_MASK)
+#define ADDAC_ADDAC1_PWDDAC_MSB 12
+#define ADDAC_ADDAC1_PWDDAC_LSB 12
+#define ADDAC_ADDAC1_PWDDAC_MASK 0x00001000
+#define ADDAC_ADDAC1_PWDDAC_GET(x) (((x) & ADDAC_ADDAC1_PWDDAC_MASK) >> ADDAC_ADDAC1_PWDDAC_LSB)
+#define ADDAC_ADDAC1_PWDDAC_SET(x) (((x) << ADDAC_ADDAC1_PWDDAC_LSB) & ADDAC_ADDAC1_PWDDAC_MASK)
+#define ADDAC_ADDAC1_FORCEMSBLOW_MSB 11
+#define ADDAC_ADDAC1_FORCEMSBLOW_LSB 11
+#define ADDAC_ADDAC1_FORCEMSBLOW_MASK 0x00000800
+#define ADDAC_ADDAC1_FORCEMSBLOW_GET(x) (((x) & ADDAC_ADDAC1_FORCEMSBLOW_MASK) >> ADDAC_ADDAC1_FORCEMSBLOW_LSB)
+#define ADDAC_ADDAC1_FORCEMSBLOW_SET(x) (((x) << ADDAC_ADDAC1_FORCEMSBLOW_LSB) & ADDAC_ADDAC1_FORCEMSBLOW_MASK)
+#define ADDAC_ADDAC1_SELMANPWDS_MSB 10
+#define ADDAC_ADDAC1_SELMANPWDS_LSB 10
+#define ADDAC_ADDAC1_SELMANPWDS_MASK 0x00000400
+#define ADDAC_ADDAC1_SELMANPWDS_GET(x) (((x) & ADDAC_ADDAC1_SELMANPWDS_MASK) >> ADDAC_ADDAC1_SELMANPWDS_LSB)
+#define ADDAC_ADDAC1_SELMANPWDS_SET(x) (((x) << ADDAC_ADDAC1_SELMANPWDS_LSB) & ADDAC_ADDAC1_SELMANPWDS_MASK)
+#define ADDAC_ADDAC1_INV_CLK160_ADC_MSB 9
+#define ADDAC_ADDAC1_INV_CLK160_ADC_LSB 9
+#define ADDAC_ADDAC1_INV_CLK160_ADC_MASK 0x00000200
+#define ADDAC_ADDAC1_INV_CLK160_ADC_GET(x) (((x) & ADDAC_ADDAC1_INV_CLK160_ADC_MASK) >> ADDAC_ADDAC1_INV_CLK160_ADC_LSB)
+#define ADDAC_ADDAC1_INV_CLK160_ADC_SET(x) (((x) << ADDAC_ADDAC1_INV_CLK160_ADC_LSB) & ADDAC_ADDAC1_INV_CLK160_ADC_MASK)
+#define ADDAC_ADDAC1_CM_SEL_MSB 8
+#define ADDAC_ADDAC1_CM_SEL_LSB 7
+#define ADDAC_ADDAC1_CM_SEL_MASK 0x00000180
+#define ADDAC_ADDAC1_CM_SEL_GET(x) (((x) & ADDAC_ADDAC1_CM_SEL_MASK) >> ADDAC_ADDAC1_CM_SEL_LSB)
+#define ADDAC_ADDAC1_CM_SEL_SET(x) (((x) << ADDAC_ADDAC1_CM_SEL_LSB) & ADDAC_ADDAC1_CM_SEL_MASK)
+#define ADDAC_ADDAC1_DISABLE_DAC_REG_MSB 6
+#define ADDAC_ADDAC1_DISABLE_DAC_REG_LSB 6
+#define ADDAC_ADDAC1_DISABLE_DAC_REG_MASK 0x00000040
+#define ADDAC_ADDAC1_DISABLE_DAC_REG_GET(x) (((x) & ADDAC_ADDAC1_DISABLE_DAC_REG_MASK) >> ADDAC_ADDAC1_DISABLE_DAC_REG_LSB)
+#define ADDAC_ADDAC1_DISABLE_DAC_REG_SET(x) (((x) << ADDAC_ADDAC1_DISABLE_DAC_REG_LSB) & ADDAC_ADDAC1_DISABLE_DAC_REG_MASK)
+#define ADDAC_ADDAC1_SPARE_MSB 5
+#define ADDAC_ADDAC1_SPARE_LSB 0
+#define ADDAC_ADDAC1_SPARE_MASK 0x0000003f
+#define ADDAC_ADDAC1_SPARE_GET(x) (((x) & ADDAC_ADDAC1_SPARE_MASK) >> ADDAC_ADDAC1_SPARE_LSB)
+#define ADDAC_ADDAC1_SPARE_SET(x) (((x) << ADDAC_ADDAC1_SPARE_LSB) & ADDAC_ADDAC1_SPARE_MASK)
+
+
+#ifndef __ASSEMBLER__
+
+typedef struct analog_reg_reg_s {
+ volatile unsigned int synth_synth1;
+ volatile unsigned int synth_synth2;
+ volatile unsigned int synth_synth3;
+ volatile unsigned int synth_synth4;
+ volatile unsigned int synth_synth5;
+ volatile unsigned int synth_synth6;
+ volatile unsigned int synth_synth7;
+ volatile unsigned int synth_synth8;
+ volatile unsigned int rf5g_rf5g1;
+ volatile unsigned int rf5g_rf5g2;
+ volatile unsigned int rf2g_rf2g1;
+ volatile unsigned int rf2g_rf2g2;
+ volatile unsigned int top_gain;
+ volatile unsigned int top_top;
+ volatile unsigned int bias_bias_sel;
+ volatile unsigned int bias_bias1;
+ volatile unsigned int bias_bias2;
+ volatile unsigned int bias_bias3;
+ volatile unsigned int txpc_txpc;
+ volatile unsigned int txpc_misc;
+ volatile unsigned int rxtxbb_rxtxbb1;
+ volatile unsigned int rxtxbb_rxtxbb2;
+ volatile unsigned int rxtxbb_rxtxbb3;
+ volatile unsigned int rxtxbb_rxtxbb4;
+ volatile unsigned int addac_addac1;
+} analog_reg_reg_t;
+
+#endif /* __ASSEMBLER__ */
+
+#endif /* _ANALOG_REG_H_ */
diff --git a/drivers/staging/ath6kl/include/common/AR6002/hw2.0/hw/apb_map.h b/drivers/staging/ath6kl/include/common/AR6002/hw2.0/hw/apb_map.h
new file mode 100644
index 000000000000..f3bf6d6cc82b
--- /dev/null
+++ b/drivers/staging/ath6kl/include/common/AR6002/hw2.0/hw/apb_map.h
@@ -0,0 +1,13 @@
+#ifndef _APB_MAP_H_
+#define _APB_MAP_H_
+
+#define RTC_BASE_ADDRESS 0x00004000
+#define VMC_BASE_ADDRESS 0x00008000
+#define UART_BASE_ADDRESS 0x0000c000
+#define SI_BASE_ADDRESS 0x00010000
+#define GPIO_BASE_ADDRESS 0x00014000
+#define MBOX_BASE_ADDRESS 0x00018000
+#define ANALOG_INTF_BASE_ADDRESS 0x0001c000
+#define MAC_BASE_ADDRESS 0x00020000
+
+#endif /* _APB_MAP_REG_H_ */
diff --git a/drivers/staging/ath6kl/include/common/AR6002/hw2.0/hw/gpio_reg.h b/drivers/staging/ath6kl/include/common/AR6002/hw2.0/hw/gpio_reg.h
new file mode 100644
index 000000000000..4f2b964b7df3
--- /dev/null
+++ b/drivers/staging/ath6kl/include/common/AR6002/hw2.0/hw/gpio_reg.h
@@ -0,0 +1,977 @@
+#ifndef _GPIO_REG_REG_H_
+#define _GPIO_REG_REG_H_
+
+#define GPIO_OUT_ADDRESS 0x00000000
+#define GPIO_OUT_OFFSET 0x00000000
+#define GPIO_OUT_DATA_MSB 17
+#define GPIO_OUT_DATA_LSB 0
+#define GPIO_OUT_DATA_MASK 0x0003ffff
+#define GPIO_OUT_DATA_GET(x) (((x) & GPIO_OUT_DATA_MASK) >> GPIO_OUT_DATA_LSB)
+#define GPIO_OUT_DATA_SET(x) (((x) << GPIO_OUT_DATA_LSB) & GPIO_OUT_DATA_MASK)
+
+#define GPIO_OUT_W1TS_ADDRESS 0x00000004
+#define GPIO_OUT_W1TS_OFFSET 0x00000004
+#define GPIO_OUT_W1TS_DATA_MSB 17
+#define GPIO_OUT_W1TS_DATA_LSB 0
+#define GPIO_OUT_W1TS_DATA_MASK 0x0003ffff
+#define GPIO_OUT_W1TS_DATA_GET(x) (((x) & GPIO_OUT_W1TS_DATA_MASK) >> GPIO_OUT_W1TS_DATA_LSB)
+#define GPIO_OUT_W1TS_DATA_SET(x) (((x) << GPIO_OUT_W1TS_DATA_LSB) & GPIO_OUT_W1TS_DATA_MASK)
+
+#define GPIO_OUT_W1TC_ADDRESS 0x00000008
+#define GPIO_OUT_W1TC_OFFSET 0x00000008
+#define GPIO_OUT_W1TC_DATA_MSB 17
+#define GPIO_OUT_W1TC_DATA_LSB 0
+#define GPIO_OUT_W1TC_DATA_MASK 0x0003ffff
+#define GPIO_OUT_W1TC_DATA_GET(x) (((x) & GPIO_OUT_W1TC_DATA_MASK) >> GPIO_OUT_W1TC_DATA_LSB)
+#define GPIO_OUT_W1TC_DATA_SET(x) (((x) << GPIO_OUT_W1TC_DATA_LSB) & GPIO_OUT_W1TC_DATA_MASK)
+
+#define GPIO_ENABLE_ADDRESS 0x0000000c
+#define GPIO_ENABLE_OFFSET 0x0000000c
+#define GPIO_ENABLE_DATA_MSB 17
+#define GPIO_ENABLE_DATA_LSB 0
+#define GPIO_ENABLE_DATA_MASK 0x0003ffff
+#define GPIO_ENABLE_DATA_GET(x) (((x) & GPIO_ENABLE_DATA_MASK) >> GPIO_ENABLE_DATA_LSB)
+#define GPIO_ENABLE_DATA_SET(x) (((x) << GPIO_ENABLE_DATA_LSB) & GPIO_ENABLE_DATA_MASK)
+
+#define GPIO_ENABLE_W1TS_ADDRESS 0x00000010
+#define GPIO_ENABLE_W1TS_OFFSET 0x00000010
+#define GPIO_ENABLE_W1TS_DATA_MSB 17
+#define GPIO_ENABLE_W1TS_DATA_LSB 0
+#define GPIO_ENABLE_W1TS_DATA_MASK 0x0003ffff
+#define GPIO_ENABLE_W1TS_DATA_GET(x) (((x) & GPIO_ENABLE_W1TS_DATA_MASK) >> GPIO_ENABLE_W1TS_DATA_LSB)
+#define GPIO_ENABLE_W1TS_DATA_SET(x) (((x) << GPIO_ENABLE_W1TS_DATA_LSB) & GPIO_ENABLE_W1TS_DATA_MASK)
+
+#define GPIO_ENABLE_W1TC_ADDRESS 0x00000014
+#define GPIO_ENABLE_W1TC_OFFSET 0x00000014
+#define GPIO_ENABLE_W1TC_DATA_MSB 17
+#define GPIO_ENABLE_W1TC_DATA_LSB 0
+#define GPIO_ENABLE_W1TC_DATA_MASK 0x0003ffff
+#define GPIO_ENABLE_W1TC_DATA_GET(x) (((x) & GPIO_ENABLE_W1TC_DATA_MASK) >> GPIO_ENABLE_W1TC_DATA_LSB)
+#define GPIO_ENABLE_W1TC_DATA_SET(x) (((x) << GPIO_ENABLE_W1TC_DATA_LSB) & GPIO_ENABLE_W1TC_DATA_MASK)
+
+#define GPIO_IN_ADDRESS 0x00000018
+#define GPIO_IN_OFFSET 0x00000018
+#define GPIO_IN_DATA_MSB 17
+#define GPIO_IN_DATA_LSB 0
+#define GPIO_IN_DATA_MASK 0x0003ffff
+#define GPIO_IN_DATA_GET(x) (((x) & GPIO_IN_DATA_MASK) >> GPIO_IN_DATA_LSB)
+#define GPIO_IN_DATA_SET(x) (((x) << GPIO_IN_DATA_LSB) & GPIO_IN_DATA_MASK)
+
+#define GPIO_STATUS_ADDRESS 0x0000001c
+#define GPIO_STATUS_OFFSET 0x0000001c
+#define GPIO_STATUS_INTERRUPT_MSB 17
+#define GPIO_STATUS_INTERRUPT_LSB 0
+#define GPIO_STATUS_INTERRUPT_MASK 0x0003ffff
+#define GPIO_STATUS_INTERRUPT_GET(x) (((x) & GPIO_STATUS_INTERRUPT_MASK) >> GPIO_STATUS_INTERRUPT_LSB)
+#define GPIO_STATUS_INTERRUPT_SET(x) (((x) << GPIO_STATUS_INTERRUPT_LSB) & GPIO_STATUS_INTERRUPT_MASK)
+
+#define GPIO_STATUS_W1TS_ADDRESS 0x00000020
+#define GPIO_STATUS_W1TS_OFFSET 0x00000020
+#define GPIO_STATUS_W1TS_INTERRUPT_MSB 17
+#define GPIO_STATUS_W1TS_INTERRUPT_LSB 0
+#define GPIO_STATUS_W1TS_INTERRUPT_MASK 0x0003ffff
+#define GPIO_STATUS_W1TS_INTERRUPT_GET(x) (((x) & GPIO_STATUS_W1TS_INTERRUPT_MASK) >> GPIO_STATUS_W1TS_INTERRUPT_LSB)
+#define GPIO_STATUS_W1TS_INTERRUPT_SET(x) (((x) << GPIO_STATUS_W1TS_INTERRUPT_LSB) & GPIO_STATUS_W1TS_INTERRUPT_MASK)
+
+#define GPIO_STATUS_W1TC_ADDRESS 0x00000024
+#define GPIO_STATUS_W1TC_OFFSET 0x00000024
+#define GPIO_STATUS_W1TC_INTERRUPT_MSB 17
+#define GPIO_STATUS_W1TC_INTERRUPT_LSB 0
+#define GPIO_STATUS_W1TC_INTERRUPT_MASK 0x0003ffff
+#define GPIO_STATUS_W1TC_INTERRUPT_GET(x) (((x) & GPIO_STATUS_W1TC_INTERRUPT_MASK) >> GPIO_STATUS_W1TC_INTERRUPT_LSB)
+#define GPIO_STATUS_W1TC_INTERRUPT_SET(x) (((x) << GPIO_STATUS_W1TC_INTERRUPT_LSB) & GPIO_STATUS_W1TC_INTERRUPT_MASK)
+
+#define GPIO_PIN0_ADDRESS 0x00000028
+#define GPIO_PIN0_OFFSET 0x00000028
+#define GPIO_PIN0_CONFIG_MSB 12
+#define GPIO_PIN0_CONFIG_LSB 11
+#define GPIO_PIN0_CONFIG_MASK 0x00001800
+#define GPIO_PIN0_CONFIG_GET(x) (((x) & GPIO_PIN0_CONFIG_MASK) >> GPIO_PIN0_CONFIG_LSB)
+#define GPIO_PIN0_CONFIG_SET(x) (((x) << GPIO_PIN0_CONFIG_LSB) & GPIO_PIN0_CONFIG_MASK)
+#define GPIO_PIN0_WAKEUP_ENABLE_MSB 10
+#define GPIO_PIN0_WAKEUP_ENABLE_LSB 10
+#define GPIO_PIN0_WAKEUP_ENABLE_MASK 0x00000400
+#define GPIO_PIN0_WAKEUP_ENABLE_GET(x) (((x) & GPIO_PIN0_WAKEUP_ENABLE_MASK) >> GPIO_PIN0_WAKEUP_ENABLE_LSB)
+#define GPIO_PIN0_WAKEUP_ENABLE_SET(x) (((x) << GPIO_PIN0_WAKEUP_ENABLE_LSB) & GPIO_PIN0_WAKEUP_ENABLE_MASK)
+#define GPIO_PIN0_INT_TYPE_MSB 9
+#define GPIO_PIN0_INT_TYPE_LSB 7
+#define GPIO_PIN0_INT_TYPE_MASK 0x00000380
+#define GPIO_PIN0_INT_TYPE_GET(x) (((x) & GPIO_PIN0_INT_TYPE_MASK) >> GPIO_PIN0_INT_TYPE_LSB)
+#define GPIO_PIN0_INT_TYPE_SET(x) (((x) << GPIO_PIN0_INT_TYPE_LSB) & GPIO_PIN0_INT_TYPE_MASK)
+#define GPIO_PIN0_PAD_DRIVER_MSB 2
+#define GPIO_PIN0_PAD_DRIVER_LSB 2
+#define GPIO_PIN0_PAD_DRIVER_MASK 0x00000004
+#define GPIO_PIN0_PAD_DRIVER_GET(x) (((x) & GPIO_PIN0_PAD_DRIVER_MASK) >> GPIO_PIN0_PAD_DRIVER_LSB)
+#define GPIO_PIN0_PAD_DRIVER_SET(x) (((x) << GPIO_PIN0_PAD_DRIVER_LSB) & GPIO_PIN0_PAD_DRIVER_MASK)
+#define GPIO_PIN0_SOURCE_MSB 0
+#define GPIO_PIN0_SOURCE_LSB 0
+#define GPIO_PIN0_SOURCE_MASK 0x00000001
+#define GPIO_PIN0_SOURCE_GET(x) (((x) & GPIO_PIN0_SOURCE_MASK) >> GPIO_PIN0_SOURCE_LSB)
+#define GPIO_PIN0_SOURCE_SET(x) (((x) << GPIO_PIN0_SOURCE_LSB) & GPIO_PIN0_SOURCE_MASK)
+
+#define GPIO_PIN1_ADDRESS 0x0000002c
+#define GPIO_PIN1_OFFSET 0x0000002c
+#define GPIO_PIN1_CONFIG_MSB 12
+#define GPIO_PIN1_CONFIG_LSB 11
+#define GPIO_PIN1_CONFIG_MASK 0x00001800
+#define GPIO_PIN1_CONFIG_GET(x) (((x) & GPIO_PIN1_CONFIG_MASK) >> GPIO_PIN1_CONFIG_LSB)
+#define GPIO_PIN1_CONFIG_SET(x) (((x) << GPIO_PIN1_CONFIG_LSB) & GPIO_PIN1_CONFIG_MASK)
+#define GPIO_PIN1_WAKEUP_ENABLE_MSB 10
+#define GPIO_PIN1_WAKEUP_ENABLE_LSB 10
+#define GPIO_PIN1_WAKEUP_ENABLE_MASK 0x00000400
+#define GPIO_PIN1_WAKEUP_ENABLE_GET(x) (((x) & GPIO_PIN1_WAKEUP_ENABLE_MASK) >> GPIO_PIN1_WAKEUP_ENABLE_LSB)
+#define GPIO_PIN1_WAKEUP_ENABLE_SET(x) (((x) << GPIO_PIN1_WAKEUP_ENABLE_LSB) & GPIO_PIN1_WAKEUP_ENABLE_MASK)
+#define GPIO_PIN1_INT_TYPE_MSB 9
+#define GPIO_PIN1_INT_TYPE_LSB 7
+#define GPIO_PIN1_INT_TYPE_MASK 0x00000380
+#define GPIO_PIN1_INT_TYPE_GET(x) (((x) & GPIO_PIN1_INT_TYPE_MASK) >> GPIO_PIN1_INT_TYPE_LSB)
+#define GPIO_PIN1_INT_TYPE_SET(x) (((x) << GPIO_PIN1_INT_TYPE_LSB) & GPIO_PIN1_INT_TYPE_MASK)
+#define GPIO_PIN1_PAD_DRIVER_MSB 2
+#define GPIO_PIN1_PAD_DRIVER_LSB 2
+#define GPIO_PIN1_PAD_DRIVER_MASK 0x00000004
+#define GPIO_PIN1_PAD_DRIVER_GET(x) (((x) & GPIO_PIN1_PAD_DRIVER_MASK) >> GPIO_PIN1_PAD_DRIVER_LSB)
+#define GPIO_PIN1_PAD_DRIVER_SET(x) (((x) << GPIO_PIN1_PAD_DRIVER_LSB) & GPIO_PIN1_PAD_DRIVER_MASK)
+#define GPIO_PIN1_SOURCE_MSB 0
+#define GPIO_PIN1_SOURCE_LSB 0
+#define GPIO_PIN1_SOURCE_MASK 0x00000001
+#define GPIO_PIN1_SOURCE_GET(x) (((x) & GPIO_PIN1_SOURCE_MASK) >> GPIO_PIN1_SOURCE_LSB)
+#define GPIO_PIN1_SOURCE_SET(x) (((x) << GPIO_PIN1_SOURCE_LSB) & GPIO_PIN1_SOURCE_MASK)
+
+#define GPIO_PIN2_ADDRESS 0x00000030
+#define GPIO_PIN2_OFFSET 0x00000030
+#define GPIO_PIN2_CONFIG_MSB 12
+#define GPIO_PIN2_CONFIG_LSB 11
+#define GPIO_PIN2_CONFIG_MASK 0x00001800
+#define GPIO_PIN2_CONFIG_GET(x) (((x) & GPIO_PIN2_CONFIG_MASK) >> GPIO_PIN2_CONFIG_LSB)
+#define GPIO_PIN2_CONFIG_SET(x) (((x) << GPIO_PIN2_CONFIG_LSB) & GPIO_PIN2_CONFIG_MASK)
+#define GPIO_PIN2_WAKEUP_ENABLE_MSB 10
+#define GPIO_PIN2_WAKEUP_ENABLE_LSB 10
+#define GPIO_PIN2_WAKEUP_ENABLE_MASK 0x00000400
+#define GPIO_PIN2_WAKEUP_ENABLE_GET(x) (((x) & GPIO_PIN2_WAKEUP_ENABLE_MASK) >> GPIO_PIN2_WAKEUP_ENABLE_LSB)
+#define GPIO_PIN2_WAKEUP_ENABLE_SET(x) (((x) << GPIO_PIN2_WAKEUP_ENABLE_LSB) & GPIO_PIN2_WAKEUP_ENABLE_MASK)
+#define GPIO_PIN2_INT_TYPE_MSB 9
+#define GPIO_PIN2_INT_TYPE_LSB 7
+#define GPIO_PIN2_INT_TYPE_MASK 0x00000380
+#define GPIO_PIN2_INT_TYPE_GET(x) (((x) & GPIO_PIN2_INT_TYPE_MASK) >> GPIO_PIN2_INT_TYPE_LSB)
+#define GPIO_PIN2_INT_TYPE_SET(x) (((x) << GPIO_PIN2_INT_TYPE_LSB) & GPIO_PIN2_INT_TYPE_MASK)
+#define GPIO_PIN2_PAD_DRIVER_MSB 2
+#define GPIO_PIN2_PAD_DRIVER_LSB 2
+#define GPIO_PIN2_PAD_DRIVER_MASK 0x00000004
+#define GPIO_PIN2_PAD_DRIVER_GET(x) (((x) & GPIO_PIN2_PAD_DRIVER_MASK) >> GPIO_PIN2_PAD_DRIVER_LSB)
+#define GPIO_PIN2_PAD_DRIVER_SET(x) (((x) << GPIO_PIN2_PAD_DRIVER_LSB) & GPIO_PIN2_PAD_DRIVER_MASK)
+#define GPIO_PIN2_SOURCE_MSB 0
+#define GPIO_PIN2_SOURCE_LSB 0
+#define GPIO_PIN2_SOURCE_MASK 0x00000001
+#define GPIO_PIN2_SOURCE_GET(x) (((x) & GPIO_PIN2_SOURCE_MASK) >> GPIO_PIN2_SOURCE_LSB)
+#define GPIO_PIN2_SOURCE_SET(x) (((x) << GPIO_PIN2_SOURCE_LSB) & GPIO_PIN2_SOURCE_MASK)
+
+#define GPIO_PIN3_ADDRESS 0x00000034
+#define GPIO_PIN3_OFFSET 0x00000034
+#define GPIO_PIN3_CONFIG_MSB 12
+#define GPIO_PIN3_CONFIG_LSB 11
+#define GPIO_PIN3_CONFIG_MASK 0x00001800
+#define GPIO_PIN3_CONFIG_GET(x) (((x) & GPIO_PIN3_CONFIG_MASK) >> GPIO_PIN3_CONFIG_LSB)
+#define GPIO_PIN3_CONFIG_SET(x) (((x) << GPIO_PIN3_CONFIG_LSB) & GPIO_PIN3_CONFIG_MASK)
+#define GPIO_PIN3_WAKEUP_ENABLE_MSB 10
+#define GPIO_PIN3_WAKEUP_ENABLE_LSB 10
+#define GPIO_PIN3_WAKEUP_ENABLE_MASK 0x00000400
+#define GPIO_PIN3_WAKEUP_ENABLE_GET(x) (((x) & GPIO_PIN3_WAKEUP_ENABLE_MASK) >> GPIO_PIN3_WAKEUP_ENABLE_LSB)
+#define GPIO_PIN3_WAKEUP_ENABLE_SET(x) (((x) << GPIO_PIN3_WAKEUP_ENABLE_LSB) & GPIO_PIN3_WAKEUP_ENABLE_MASK)
+#define GPIO_PIN3_INT_TYPE_MSB 9
+#define GPIO_PIN3_INT_TYPE_LSB 7
+#define GPIO_PIN3_INT_TYPE_MASK 0x00000380
+#define GPIO_PIN3_INT_TYPE_GET(x) (((x) & GPIO_PIN3_INT_TYPE_MASK) >> GPIO_PIN3_INT_TYPE_LSB)
+#define GPIO_PIN3_INT_TYPE_SET(x) (((x) << GPIO_PIN3_INT_TYPE_LSB) & GPIO_PIN3_INT_TYPE_MASK)
+#define GPIO_PIN3_PAD_DRIVER_MSB 2
+#define GPIO_PIN3_PAD_DRIVER_LSB 2
+#define GPIO_PIN3_PAD_DRIVER_MASK 0x00000004
+#define GPIO_PIN3_PAD_DRIVER_GET(x) (((x) & GPIO_PIN3_PAD_DRIVER_MASK) >> GPIO_PIN3_PAD_DRIVER_LSB)
+#define GPIO_PIN3_PAD_DRIVER_SET(x) (((x) << GPIO_PIN3_PAD_DRIVER_LSB) & GPIO_PIN3_PAD_DRIVER_MASK)
+#define GPIO_PIN3_SOURCE_MSB 0
+#define GPIO_PIN3_SOURCE_LSB 0
+#define GPIO_PIN3_SOURCE_MASK 0x00000001
+#define GPIO_PIN3_SOURCE_GET(x) (((x) & GPIO_PIN3_SOURCE_MASK) >> GPIO_PIN3_SOURCE_LSB)
+#define GPIO_PIN3_SOURCE_SET(x) (((x) << GPIO_PIN3_SOURCE_LSB) & GPIO_PIN3_SOURCE_MASK)
+
+#define GPIO_PIN4_ADDRESS 0x00000038
+#define GPIO_PIN4_OFFSET 0x00000038
+#define GPIO_PIN4_CONFIG_MSB 12
+#define GPIO_PIN4_CONFIG_LSB 11
+#define GPIO_PIN4_CONFIG_MASK 0x00001800
+#define GPIO_PIN4_CONFIG_GET(x) (((x) & GPIO_PIN4_CONFIG_MASK) >> GPIO_PIN4_CONFIG_LSB)
+#define GPIO_PIN4_CONFIG_SET(x) (((x) << GPIO_PIN4_CONFIG_LSB) & GPIO_PIN4_CONFIG_MASK)
+#define GPIO_PIN4_WAKEUP_ENABLE_MSB 10
+#define GPIO_PIN4_WAKEUP_ENABLE_LSB 10
+#define GPIO_PIN4_WAKEUP_ENABLE_MASK 0x00000400
+#define GPIO_PIN4_WAKEUP_ENABLE_GET(x) (((x) & GPIO_PIN4_WAKEUP_ENABLE_MASK) >> GPIO_PIN4_WAKEUP_ENABLE_LSB)
+#define GPIO_PIN4_WAKEUP_ENABLE_SET(x) (((x) << GPIO_PIN4_WAKEUP_ENABLE_LSB) & GPIO_PIN4_WAKEUP_ENABLE_MASK)
+#define GPIO_PIN4_INT_TYPE_MSB 9
+#define GPIO_PIN4_INT_TYPE_LSB 7
+#define GPIO_PIN4_INT_TYPE_MASK 0x00000380
+#define GPIO_PIN4_INT_TYPE_GET(x) (((x) & GPIO_PIN4_INT_TYPE_MASK) >> GPIO_PIN4_INT_TYPE_LSB)
+#define GPIO_PIN4_INT_TYPE_SET(x) (((x) << GPIO_PIN4_INT_TYPE_LSB) & GPIO_PIN4_INT_TYPE_MASK)
+#define GPIO_PIN4_PAD_DRIVER_MSB 2
+#define GPIO_PIN4_PAD_DRIVER_LSB 2
+#define GPIO_PIN4_PAD_DRIVER_MASK 0x00000004
+#define GPIO_PIN4_PAD_DRIVER_GET(x) (((x) & GPIO_PIN4_PAD_DRIVER_MASK) >> GPIO_PIN4_PAD_DRIVER_LSB)
+#define GPIO_PIN4_PAD_DRIVER_SET(x) (((x) << GPIO_PIN4_PAD_DRIVER_LSB) & GPIO_PIN4_PAD_DRIVER_MASK)
+#define GPIO_PIN4_SOURCE_MSB 0
+#define GPIO_PIN4_SOURCE_LSB 0
+#define GPIO_PIN4_SOURCE_MASK 0x00000001
+#define GPIO_PIN4_SOURCE_GET(x) (((x) & GPIO_PIN4_SOURCE_MASK) >> GPIO_PIN4_SOURCE_LSB)
+#define GPIO_PIN4_SOURCE_SET(x) (((x) << GPIO_PIN4_SOURCE_LSB) & GPIO_PIN4_SOURCE_MASK)
+
+#define GPIO_PIN5_ADDRESS 0x0000003c
+#define GPIO_PIN5_OFFSET 0x0000003c
+#define GPIO_PIN5_CONFIG_MSB 12
+#define GPIO_PIN5_CONFIG_LSB 11
+#define GPIO_PIN5_CONFIG_MASK 0x00001800
+#define GPIO_PIN5_CONFIG_GET(x) (((x) & GPIO_PIN5_CONFIG_MASK) >> GPIO_PIN5_CONFIG_LSB)
+#define GPIO_PIN5_CONFIG_SET(x) (((x) << GPIO_PIN5_CONFIG_LSB) & GPIO_PIN5_CONFIG_MASK)
+#define GPIO_PIN5_WAKEUP_ENABLE_MSB 10
+#define GPIO_PIN5_WAKEUP_ENABLE_LSB 10
+#define GPIO_PIN5_WAKEUP_ENABLE_MASK 0x00000400
+#define GPIO_PIN5_WAKEUP_ENABLE_GET(x) (((x) & GPIO_PIN5_WAKEUP_ENABLE_MASK) >> GPIO_PIN5_WAKEUP_ENABLE_LSB)
+#define GPIO_PIN5_WAKEUP_ENABLE_SET(x) (((x) << GPIO_PIN5_WAKEUP_ENABLE_LSB) & GPIO_PIN5_WAKEUP_ENABLE_MASK)
+#define GPIO_PIN5_INT_TYPE_MSB 9
+#define GPIO_PIN5_INT_TYPE_LSB 7
+#define GPIO_PIN5_INT_TYPE_MASK 0x00000380
+#define GPIO_PIN5_INT_TYPE_GET(x) (((x) & GPIO_PIN5_INT_TYPE_MASK) >> GPIO_PIN5_INT_TYPE_LSB)
+#define GPIO_PIN5_INT_TYPE_SET(x) (((x) << GPIO_PIN5_INT_TYPE_LSB) & GPIO_PIN5_INT_TYPE_MASK)
+#define GPIO_PIN5_PAD_DRIVER_MSB 2
+#define GPIO_PIN5_PAD_DRIVER_LSB 2
+#define GPIO_PIN5_PAD_DRIVER_MASK 0x00000004
+#define GPIO_PIN5_PAD_DRIVER_GET(x) (((x) & GPIO_PIN5_PAD_DRIVER_MASK) >> GPIO_PIN5_PAD_DRIVER_LSB)
+#define GPIO_PIN5_PAD_DRIVER_SET(x) (((x) << GPIO_PIN5_PAD_DRIVER_LSB) & GPIO_PIN5_PAD_DRIVER_MASK)
+#define GPIO_PIN5_SOURCE_MSB 0
+#define GPIO_PIN5_SOURCE_LSB 0
+#define GPIO_PIN5_SOURCE_MASK 0x00000001
+#define GPIO_PIN5_SOURCE_GET(x) (((x) & GPIO_PIN5_SOURCE_MASK) >> GPIO_PIN5_SOURCE_LSB)
+#define GPIO_PIN5_SOURCE_SET(x) (((x) << GPIO_PIN5_SOURCE_LSB) & GPIO_PIN5_SOURCE_MASK)
+
+#define GPIO_PIN6_ADDRESS 0x00000040
+#define GPIO_PIN6_OFFSET 0x00000040
+#define GPIO_PIN6_CONFIG_MSB 12
+#define GPIO_PIN6_CONFIG_LSB 11
+#define GPIO_PIN6_CONFIG_MASK 0x00001800
+#define GPIO_PIN6_CONFIG_GET(x) (((x) & GPIO_PIN6_CONFIG_MASK) >> GPIO_PIN6_CONFIG_LSB)
+#define GPIO_PIN6_CONFIG_SET(x) (((x) << GPIO_PIN6_CONFIG_LSB) & GPIO_PIN6_CONFIG_MASK)
+#define GPIO_PIN6_WAKEUP_ENABLE_MSB 10
+#define GPIO_PIN6_WAKEUP_ENABLE_LSB 10
+#define GPIO_PIN6_WAKEUP_ENABLE_MASK 0x00000400
+#define GPIO_PIN6_WAKEUP_ENABLE_GET(x) (((x) & GPIO_PIN6_WAKEUP_ENABLE_MASK) >> GPIO_PIN6_WAKEUP_ENABLE_LSB)
+#define GPIO_PIN6_WAKEUP_ENABLE_SET(x) (((x) << GPIO_PIN6_WAKEUP_ENABLE_LSB) & GPIO_PIN6_WAKEUP_ENABLE_MASK)
+#define GPIO_PIN6_INT_TYPE_MSB 9
+#define GPIO_PIN6_INT_TYPE_LSB 7
+#define GPIO_PIN6_INT_TYPE_MASK 0x00000380
+#define GPIO_PIN6_INT_TYPE_GET(x) (((x) & GPIO_PIN6_INT_TYPE_MASK) >> GPIO_PIN6_INT_TYPE_LSB)
+#define GPIO_PIN6_INT_TYPE_SET(x) (((x) << GPIO_PIN6_INT_TYPE_LSB) & GPIO_PIN6_INT_TYPE_MASK)
+#define GPIO_PIN6_PAD_DRIVER_MSB 2
+#define GPIO_PIN6_PAD_DRIVER_LSB 2
+#define GPIO_PIN6_PAD_DRIVER_MASK 0x00000004
+#define GPIO_PIN6_PAD_DRIVER_GET(x) (((x) & GPIO_PIN6_PAD_DRIVER_MASK) >> GPIO_PIN6_PAD_DRIVER_LSB)
+#define GPIO_PIN6_PAD_DRIVER_SET(x) (((x) << GPIO_PIN6_PAD_DRIVER_LSB) & GPIO_PIN6_PAD_DRIVER_MASK)
+#define GPIO_PIN6_SOURCE_MSB 0
+#define GPIO_PIN6_SOURCE_LSB 0
+#define GPIO_PIN6_SOURCE_MASK 0x00000001
+#define GPIO_PIN6_SOURCE_GET(x) (((x) & GPIO_PIN6_SOURCE_MASK) >> GPIO_PIN6_SOURCE_LSB)
+#define GPIO_PIN6_SOURCE_SET(x) (((x) << GPIO_PIN6_SOURCE_LSB) & GPIO_PIN6_SOURCE_MASK)
+
+#define GPIO_PIN7_ADDRESS 0x00000044
+#define GPIO_PIN7_OFFSET 0x00000044
+#define GPIO_PIN7_CONFIG_MSB 12
+#define GPIO_PIN7_CONFIG_LSB 11
+#define GPIO_PIN7_CONFIG_MASK 0x00001800
+#define GPIO_PIN7_CONFIG_GET(x) (((x) & GPIO_PIN7_CONFIG_MASK) >> GPIO_PIN7_CONFIG_LSB)
+#define GPIO_PIN7_CONFIG_SET(x) (((x) << GPIO_PIN7_CONFIG_LSB) & GPIO_PIN7_CONFIG_MASK)
+#define GPIO_PIN7_WAKEUP_ENABLE_MSB 10
+#define GPIO_PIN7_WAKEUP_ENABLE_LSB 10
+#define GPIO_PIN7_WAKEUP_ENABLE_MASK 0x00000400
+#define GPIO_PIN7_WAKEUP_ENABLE_GET(x) (((x) & GPIO_PIN7_WAKEUP_ENABLE_MASK) >> GPIO_PIN7_WAKEUP_ENABLE_LSB)
+#define GPIO_PIN7_WAKEUP_ENABLE_SET(x) (((x) << GPIO_PIN7_WAKEUP_ENABLE_LSB) & GPIO_PIN7_WAKEUP_ENABLE_MASK)
+#define GPIO_PIN7_INT_TYPE_MSB 9
+#define GPIO_PIN7_INT_TYPE_LSB 7
+#define GPIO_PIN7_INT_TYPE_MASK 0x00000380
+#define GPIO_PIN7_INT_TYPE_GET(x) (((x) & GPIO_PIN7_INT_TYPE_MASK) >> GPIO_PIN7_INT_TYPE_LSB)
+#define GPIO_PIN7_INT_TYPE_SET(x) (((x) << GPIO_PIN7_INT_TYPE_LSB) & GPIO_PIN7_INT_TYPE_MASK)
+#define GPIO_PIN7_PAD_DRIVER_MSB 2
+#define GPIO_PIN7_PAD_DRIVER_LSB 2
+#define GPIO_PIN7_PAD_DRIVER_MASK 0x00000004
+#define GPIO_PIN7_PAD_DRIVER_GET(x) (((x) & GPIO_PIN7_PAD_DRIVER_MASK) >> GPIO_PIN7_PAD_DRIVER_LSB)
+#define GPIO_PIN7_PAD_DRIVER_SET(x) (((x) << GPIO_PIN7_PAD_DRIVER_LSB) & GPIO_PIN7_PAD_DRIVER_MASK)
+#define GPIO_PIN7_SOURCE_MSB 0
+#define GPIO_PIN7_SOURCE_LSB 0
+#define GPIO_PIN7_SOURCE_MASK 0x00000001
+#define GPIO_PIN7_SOURCE_GET(x) (((x) & GPIO_PIN7_SOURCE_MASK) >> GPIO_PIN7_SOURCE_LSB)
+#define GPIO_PIN7_SOURCE_SET(x) (((x) << GPIO_PIN7_SOURCE_LSB) & GPIO_PIN7_SOURCE_MASK)
+
+#define GPIO_PIN8_ADDRESS 0x00000048
+#define GPIO_PIN8_OFFSET 0x00000048
+#define GPIO_PIN8_CONFIG_MSB 12
+#define GPIO_PIN8_CONFIG_LSB 11
+#define GPIO_PIN8_CONFIG_MASK 0x00001800
+#define GPIO_PIN8_CONFIG_GET(x) (((x) & GPIO_PIN8_CONFIG_MASK) >> GPIO_PIN8_CONFIG_LSB)
+#define GPIO_PIN8_CONFIG_SET(x) (((x) << GPIO_PIN8_CONFIG_LSB) & GPIO_PIN8_CONFIG_MASK)
+#define GPIO_PIN8_WAKEUP_ENABLE_MSB 10
+#define GPIO_PIN8_WAKEUP_ENABLE_LSB 10
+#define GPIO_PIN8_WAKEUP_ENABLE_MASK 0x00000400
+#define GPIO_PIN8_WAKEUP_ENABLE_GET(x) (((x) & GPIO_PIN8_WAKEUP_ENABLE_MASK) >> GPIO_PIN8_WAKEUP_ENABLE_LSB)
+#define GPIO_PIN8_WAKEUP_ENABLE_SET(x) (((x) << GPIO_PIN8_WAKEUP_ENABLE_LSB) & GPIO_PIN8_WAKEUP_ENABLE_MASK)
+#define GPIO_PIN8_INT_TYPE_MSB 9
+#define GPIO_PIN8_INT_TYPE_LSB 7
+#define GPIO_PIN8_INT_TYPE_MASK 0x00000380
+#define GPIO_PIN8_INT_TYPE_GET(x) (((x) & GPIO_PIN8_INT_TYPE_MASK) >> GPIO_PIN8_INT_TYPE_LSB)
+#define GPIO_PIN8_INT_TYPE_SET(x) (((x) << GPIO_PIN8_INT_TYPE_LSB) & GPIO_PIN8_INT_TYPE_MASK)
+#define GPIO_PIN8_PAD_DRIVER_MSB 2
+#define GPIO_PIN8_PAD_DRIVER_LSB 2
+#define GPIO_PIN8_PAD_DRIVER_MASK 0x00000004
+#define GPIO_PIN8_PAD_DRIVER_GET(x) (((x) & GPIO_PIN8_PAD_DRIVER_MASK) >> GPIO_PIN8_PAD_DRIVER_LSB)
+#define GPIO_PIN8_PAD_DRIVER_SET(x) (((x) << GPIO_PIN8_PAD_DRIVER_LSB) & GPIO_PIN8_PAD_DRIVER_MASK)
+#define GPIO_PIN8_SOURCE_MSB 0
+#define GPIO_PIN8_SOURCE_LSB 0
+#define GPIO_PIN8_SOURCE_MASK 0x00000001
+#define GPIO_PIN8_SOURCE_GET(x) (((x) & GPIO_PIN8_SOURCE_MASK) >> GPIO_PIN8_SOURCE_LSB)
+#define GPIO_PIN8_SOURCE_SET(x) (((x) << GPIO_PIN8_SOURCE_LSB) & GPIO_PIN8_SOURCE_MASK)
+
+#define GPIO_PIN9_ADDRESS 0x0000004c
+#define GPIO_PIN9_OFFSET 0x0000004c
+#define GPIO_PIN9_CONFIG_MSB 12
+#define GPIO_PIN9_CONFIG_LSB 11
+#define GPIO_PIN9_CONFIG_MASK 0x00001800
+#define GPIO_PIN9_CONFIG_GET(x) (((x) & GPIO_PIN9_CONFIG_MASK) >> GPIO_PIN9_CONFIG_LSB)
+#define GPIO_PIN9_CONFIG_SET(x) (((x) << GPIO_PIN9_CONFIG_LSB) & GPIO_PIN9_CONFIG_MASK)
+#define GPIO_PIN9_WAKEUP_ENABLE_MSB 10
+#define GPIO_PIN9_WAKEUP_ENABLE_LSB 10
+#define GPIO_PIN9_WAKEUP_ENABLE_MASK 0x00000400
+#define GPIO_PIN9_WAKEUP_ENABLE_GET(x) (((x) & GPIO_PIN9_WAKEUP_ENABLE_MASK) >> GPIO_PIN9_WAKEUP_ENABLE_LSB)
+#define GPIO_PIN9_WAKEUP_ENABLE_SET(x) (((x) << GPIO_PIN9_WAKEUP_ENABLE_LSB) & GPIO_PIN9_WAKEUP_ENABLE_MASK)
+#define GPIO_PIN9_INT_TYPE_MSB 9
+#define GPIO_PIN9_INT_TYPE_LSB 7
+#define GPIO_PIN9_INT_TYPE_MASK 0x00000380
+#define GPIO_PIN9_INT_TYPE_GET(x) (((x) & GPIO_PIN9_INT_TYPE_MASK) >> GPIO_PIN9_INT_TYPE_LSB)
+#define GPIO_PIN9_INT_TYPE_SET(x) (((x) << GPIO_PIN9_INT_TYPE_LSB) & GPIO_PIN9_INT_TYPE_MASK)
+#define GPIO_PIN9_PAD_DRIVER_MSB 2
+#define GPIO_PIN9_PAD_DRIVER_LSB 2
+#define GPIO_PIN9_PAD_DRIVER_MASK 0x00000004
+#define GPIO_PIN9_PAD_DRIVER_GET(x) (((x) & GPIO_PIN9_PAD_DRIVER_MASK) >> GPIO_PIN9_PAD_DRIVER_LSB)
+#define GPIO_PIN9_PAD_DRIVER_SET(x) (((x) << GPIO_PIN9_PAD_DRIVER_LSB) & GPIO_PIN9_PAD_DRIVER_MASK)
+#define GPIO_PIN9_SOURCE_MSB 0
+#define GPIO_PIN9_SOURCE_LSB 0
+#define GPIO_PIN9_SOURCE_MASK 0x00000001
+#define GPIO_PIN9_SOURCE_GET(x) (((x) & GPIO_PIN9_SOURCE_MASK) >> GPIO_PIN9_SOURCE_LSB)
+#define GPIO_PIN9_SOURCE_SET(x) (((x) << GPIO_PIN9_SOURCE_LSB) & GPIO_PIN9_SOURCE_MASK)
+
+#define GPIO_PIN10_ADDRESS 0x00000050
+#define GPIO_PIN10_OFFSET 0x00000050
+#define GPIO_PIN10_CONFIG_MSB 12
+#define GPIO_PIN10_CONFIG_LSB 11
+#define GPIO_PIN10_CONFIG_MASK 0x00001800
+#define GPIO_PIN10_CONFIG_GET(x) (((x) & GPIO_PIN10_CONFIG_MASK) >> GPIO_PIN10_CONFIG_LSB)
+#define GPIO_PIN10_CONFIG_SET(x) (((x) << GPIO_PIN10_CONFIG_LSB) & GPIO_PIN10_CONFIG_MASK)
+#define GPIO_PIN10_WAKEUP_ENABLE_MSB 10
+#define GPIO_PIN10_WAKEUP_ENABLE_LSB 10
+#define GPIO_PIN10_WAKEUP_ENABLE_MASK 0x00000400
+#define GPIO_PIN10_WAKEUP_ENABLE_GET(x) (((x) & GPIO_PIN10_WAKEUP_ENABLE_MASK) >> GPIO_PIN10_WAKEUP_ENABLE_LSB)
+#define GPIO_PIN10_WAKEUP_ENABLE_SET(x) (((x) << GPIO_PIN10_WAKEUP_ENABLE_LSB) & GPIO_PIN10_WAKEUP_ENABLE_MASK)
+#define GPIO_PIN10_INT_TYPE_MSB 9
+#define GPIO_PIN10_INT_TYPE_LSB 7
+#define GPIO_PIN10_INT_TYPE_MASK 0x00000380
+#define GPIO_PIN10_INT_TYPE_GET(x) (((x) & GPIO_PIN10_INT_TYPE_MASK) >> GPIO_PIN10_INT_TYPE_LSB)
+#define GPIO_PIN10_INT_TYPE_SET(x) (((x) << GPIO_PIN10_INT_TYPE_LSB) & GPIO_PIN10_INT_TYPE_MASK)
+#define GPIO_PIN10_PAD_DRIVER_MSB 2
+#define GPIO_PIN10_PAD_DRIVER_LSB 2
+#define GPIO_PIN10_PAD_DRIVER_MASK 0x00000004
+#define GPIO_PIN10_PAD_DRIVER_GET(x) (((x) & GPIO_PIN10_PAD_DRIVER_MASK) >> GPIO_PIN10_PAD_DRIVER_LSB)
+#define GPIO_PIN10_PAD_DRIVER_SET(x) (((x) << GPIO_PIN10_PAD_DRIVER_LSB) & GPIO_PIN10_PAD_DRIVER_MASK)
+#define GPIO_PIN10_SOURCE_MSB 0
+#define GPIO_PIN10_SOURCE_LSB 0
+#define GPIO_PIN10_SOURCE_MASK 0x00000001
+#define GPIO_PIN10_SOURCE_GET(x) (((x) & GPIO_PIN10_SOURCE_MASK) >> GPIO_PIN10_SOURCE_LSB)
+#define GPIO_PIN10_SOURCE_SET(x) (((x) << GPIO_PIN10_SOURCE_LSB) & GPIO_PIN10_SOURCE_MASK)
+
+#define GPIO_PIN11_ADDRESS 0x00000054
+#define GPIO_PIN11_OFFSET 0x00000054
+#define GPIO_PIN11_CONFIG_MSB 12
+#define GPIO_PIN11_CONFIG_LSB 11
+#define GPIO_PIN11_CONFIG_MASK 0x00001800
+#define GPIO_PIN11_CONFIG_GET(x) (((x) & GPIO_PIN11_CONFIG_MASK) >> GPIO_PIN11_CONFIG_LSB)
+#define GPIO_PIN11_CONFIG_SET(x) (((x) << GPIO_PIN11_CONFIG_LSB) & GPIO_PIN11_CONFIG_MASK)
+#define GPIO_PIN11_WAKEUP_ENABLE_MSB 10
+#define GPIO_PIN11_WAKEUP_ENABLE_LSB 10
+#define GPIO_PIN11_WAKEUP_ENABLE_MASK 0x00000400
+#define GPIO_PIN11_WAKEUP_ENABLE_GET(x) (((x) & GPIO_PIN11_WAKEUP_ENABLE_MASK) >> GPIO_PIN11_WAKEUP_ENABLE_LSB)
+#define GPIO_PIN11_WAKEUP_ENABLE_SET(x) (((x) << GPIO_PIN11_WAKEUP_ENABLE_LSB) & GPIO_PIN11_WAKEUP_ENABLE_MASK)
+#define GPIO_PIN11_INT_TYPE_MSB 9
+#define GPIO_PIN11_INT_TYPE_LSB 7
+#define GPIO_PIN11_INT_TYPE_MASK 0x00000380
+#define GPIO_PIN11_INT_TYPE_GET(x) (((x) & GPIO_PIN11_INT_TYPE_MASK) >> GPIO_PIN11_INT_TYPE_LSB)
+#define GPIO_PIN11_INT_TYPE_SET(x) (((x) << GPIO_PIN11_INT_TYPE_LSB) & GPIO_PIN11_INT_TYPE_MASK)
+#define GPIO_PIN11_PAD_DRIVER_MSB 2
+#define GPIO_PIN11_PAD_DRIVER_LSB 2
+#define GPIO_PIN11_PAD_DRIVER_MASK 0x00000004
+#define GPIO_PIN11_PAD_DRIVER_GET(x) (((x) & GPIO_PIN11_PAD_DRIVER_MASK) >> GPIO_PIN11_PAD_DRIVER_LSB)
+#define GPIO_PIN11_PAD_DRIVER_SET(x) (((x) << GPIO_PIN11_PAD_DRIVER_LSB) & GPIO_PIN11_PAD_DRIVER_MASK)
+#define GPIO_PIN11_SOURCE_MSB 0
+#define GPIO_PIN11_SOURCE_LSB 0
+#define GPIO_PIN11_SOURCE_MASK 0x00000001
+#define GPIO_PIN11_SOURCE_GET(x) (((x) & GPIO_PIN11_SOURCE_MASK) >> GPIO_PIN11_SOURCE_LSB)
+#define GPIO_PIN11_SOURCE_SET(x) (((x) << GPIO_PIN11_SOURCE_LSB) & GPIO_PIN11_SOURCE_MASK)
+
+#define GPIO_PIN12_ADDRESS 0x00000058
+#define GPIO_PIN12_OFFSET 0x00000058
+#define GPIO_PIN12_CONFIG_MSB 12
+#define GPIO_PIN12_CONFIG_LSB 11
+#define GPIO_PIN12_CONFIG_MASK 0x00001800
+#define GPIO_PIN12_CONFIG_GET(x) (((x) & GPIO_PIN12_CONFIG_MASK) >> GPIO_PIN12_CONFIG_LSB)
+#define GPIO_PIN12_CONFIG_SET(x) (((x) << GPIO_PIN12_CONFIG_LSB) & GPIO_PIN12_CONFIG_MASK)
+#define GPIO_PIN12_WAKEUP_ENABLE_MSB 10
+#define GPIO_PIN12_WAKEUP_ENABLE_LSB 10
+#define GPIO_PIN12_WAKEUP_ENABLE_MASK 0x00000400
+#define GPIO_PIN12_WAKEUP_ENABLE_GET(x) (((x) & GPIO_PIN12_WAKEUP_ENABLE_MASK) >> GPIO_PIN12_WAKEUP_ENABLE_LSB)
+#define GPIO_PIN12_WAKEUP_ENABLE_SET(x) (((x) << GPIO_PIN12_WAKEUP_ENABLE_LSB) & GPIO_PIN12_WAKEUP_ENABLE_MASK)
+#define GPIO_PIN12_INT_TYPE_MSB 9
+#define GPIO_PIN12_INT_TYPE_LSB 7
+#define GPIO_PIN12_INT_TYPE_MASK 0x00000380
+#define GPIO_PIN12_INT_TYPE_GET(x) (((x) & GPIO_PIN12_INT_TYPE_MASK) >> GPIO_PIN12_INT_TYPE_LSB)
+#define GPIO_PIN12_INT_TYPE_SET(x) (((x) << GPIO_PIN12_INT_TYPE_LSB) & GPIO_PIN12_INT_TYPE_MASK)
+#define GPIO_PIN12_PAD_DRIVER_MSB 2
+#define GPIO_PIN12_PAD_DRIVER_LSB 2
+#define GPIO_PIN12_PAD_DRIVER_MASK 0x00000004
+#define GPIO_PIN12_PAD_DRIVER_GET(x) (((x) & GPIO_PIN12_PAD_DRIVER_MASK) >> GPIO_PIN12_PAD_DRIVER_LSB)
+#define GPIO_PIN12_PAD_DRIVER_SET(x) (((x) << GPIO_PIN12_PAD_DRIVER_LSB) & GPIO_PIN12_PAD_DRIVER_MASK)
+#define GPIO_PIN12_SOURCE_MSB 0
+#define GPIO_PIN12_SOURCE_LSB 0
+#define GPIO_PIN12_SOURCE_MASK 0x00000001
+#define GPIO_PIN12_SOURCE_GET(x) (((x) & GPIO_PIN12_SOURCE_MASK) >> GPIO_PIN12_SOURCE_LSB)
+#define GPIO_PIN12_SOURCE_SET(x) (((x) << GPIO_PIN12_SOURCE_LSB) & GPIO_PIN12_SOURCE_MASK)
+
+#define GPIO_PIN13_ADDRESS 0x0000005c
+#define GPIO_PIN13_OFFSET 0x0000005c
+#define GPIO_PIN13_CONFIG_MSB 12
+#define GPIO_PIN13_CONFIG_LSB 11
+#define GPIO_PIN13_CONFIG_MASK 0x00001800
+#define GPIO_PIN13_CONFIG_GET(x) (((x) & GPIO_PIN13_CONFIG_MASK) >> GPIO_PIN13_CONFIG_LSB)
+#define GPIO_PIN13_CONFIG_SET(x) (((x) << GPIO_PIN13_CONFIG_LSB) & GPIO_PIN13_CONFIG_MASK)
+#define GPIO_PIN13_WAKEUP_ENABLE_MSB 10
+#define GPIO_PIN13_WAKEUP_ENABLE_LSB 10
+#define GPIO_PIN13_WAKEUP_ENABLE_MASK 0x00000400
+#define GPIO_PIN13_WAKEUP_ENABLE_GET(x) (((x) & GPIO_PIN13_WAKEUP_ENABLE_MASK) >> GPIO_PIN13_WAKEUP_ENABLE_LSB)
+#define GPIO_PIN13_WAKEUP_ENABLE_SET(x) (((x) << GPIO_PIN13_WAKEUP_ENABLE_LSB) & GPIO_PIN13_WAKEUP_ENABLE_MASK)
+#define GPIO_PIN13_INT_TYPE_MSB 9
+#define GPIO_PIN13_INT_TYPE_LSB 7
+#define GPIO_PIN13_INT_TYPE_MASK 0x00000380
+#define GPIO_PIN13_INT_TYPE_GET(x) (((x) & GPIO_PIN13_INT_TYPE_MASK) >> GPIO_PIN13_INT_TYPE_LSB)
+#define GPIO_PIN13_INT_TYPE_SET(x) (((x) << GPIO_PIN13_INT_TYPE_LSB) & GPIO_PIN13_INT_TYPE_MASK)
+#define GPIO_PIN13_PAD_DRIVER_MSB 2
+#define GPIO_PIN13_PAD_DRIVER_LSB 2
+#define GPIO_PIN13_PAD_DRIVER_MASK 0x00000004
+#define GPIO_PIN13_PAD_DRIVER_GET(x) (((x) & GPIO_PIN13_PAD_DRIVER_MASK) >> GPIO_PIN13_PAD_DRIVER_LSB)
+#define GPIO_PIN13_PAD_DRIVER_SET(x) (((x) << GPIO_PIN13_PAD_DRIVER_LSB) & GPIO_PIN13_PAD_DRIVER_MASK)
+#define GPIO_PIN13_SOURCE_MSB 0
+#define GPIO_PIN13_SOURCE_LSB 0
+#define GPIO_PIN13_SOURCE_MASK 0x00000001
+#define GPIO_PIN13_SOURCE_GET(x) (((x) & GPIO_PIN13_SOURCE_MASK) >> GPIO_PIN13_SOURCE_LSB)
+#define GPIO_PIN13_SOURCE_SET(x) (((x) << GPIO_PIN13_SOURCE_LSB) & GPIO_PIN13_SOURCE_MASK)
+
+#define GPIO_PIN14_ADDRESS 0x00000060
+#define GPIO_PIN14_OFFSET 0x00000060
+#define GPIO_PIN14_CONFIG_MSB 12
+#define GPIO_PIN14_CONFIG_LSB 11
+#define GPIO_PIN14_CONFIG_MASK 0x00001800
+#define GPIO_PIN14_CONFIG_GET(x) (((x) & GPIO_PIN14_CONFIG_MASK) >> GPIO_PIN14_CONFIG_LSB)
+#define GPIO_PIN14_CONFIG_SET(x) (((x) << GPIO_PIN14_CONFIG_LSB) & GPIO_PIN14_CONFIG_MASK)
+#define GPIO_PIN14_WAKEUP_ENABLE_MSB 10
+#define GPIO_PIN14_WAKEUP_ENABLE_LSB 10
+#define GPIO_PIN14_WAKEUP_ENABLE_MASK 0x00000400
+#define GPIO_PIN14_WAKEUP_ENABLE_GET(x) (((x) & GPIO_PIN14_WAKEUP_ENABLE_MASK) >> GPIO_PIN14_WAKEUP_ENABLE_LSB)
+#define GPIO_PIN14_WAKEUP_ENABLE_SET(x) (((x) << GPIO_PIN14_WAKEUP_ENABLE_LSB) & GPIO_PIN14_WAKEUP_ENABLE_MASK)
+#define GPIO_PIN14_INT_TYPE_MSB 9
+#define GPIO_PIN14_INT_TYPE_LSB 7
+#define GPIO_PIN14_INT_TYPE_MASK 0x00000380
+#define GPIO_PIN14_INT_TYPE_GET(x) (((x) & GPIO_PIN14_INT_TYPE_MASK) >> GPIO_PIN14_INT_TYPE_LSB)
+#define GPIO_PIN14_INT_TYPE_SET(x) (((x) << GPIO_PIN14_INT_TYPE_LSB) & GPIO_PIN14_INT_TYPE_MASK)
+#define GPIO_PIN14_PAD_DRIVER_MSB 2
+#define GPIO_PIN14_PAD_DRIVER_LSB 2
+#define GPIO_PIN14_PAD_DRIVER_MASK 0x00000004
+#define GPIO_PIN14_PAD_DRIVER_GET(x) (((x) & GPIO_PIN14_PAD_DRIVER_MASK) >> GPIO_PIN14_PAD_DRIVER_LSB)
+#define GPIO_PIN14_PAD_DRIVER_SET(x) (((x) << GPIO_PIN14_PAD_DRIVER_LSB) & GPIO_PIN14_PAD_DRIVER_MASK)
+#define GPIO_PIN14_SOURCE_MSB 0
+#define GPIO_PIN14_SOURCE_LSB 0
+#define GPIO_PIN14_SOURCE_MASK 0x00000001
+#define GPIO_PIN14_SOURCE_GET(x) (((x) & GPIO_PIN14_SOURCE_MASK) >> GPIO_PIN14_SOURCE_LSB)
+#define GPIO_PIN14_SOURCE_SET(x) (((x) << GPIO_PIN14_SOURCE_LSB) & GPIO_PIN14_SOURCE_MASK)
+
+#define GPIO_PIN15_ADDRESS 0x00000064
+#define GPIO_PIN15_OFFSET 0x00000064
+#define GPIO_PIN15_CONFIG_MSB 12
+#define GPIO_PIN15_CONFIG_LSB 11
+#define GPIO_PIN15_CONFIG_MASK 0x00001800
+#define GPIO_PIN15_CONFIG_GET(x) (((x) & GPIO_PIN15_CONFIG_MASK) >> GPIO_PIN15_CONFIG_LSB)
+#define GPIO_PIN15_CONFIG_SET(x) (((x) << GPIO_PIN15_CONFIG_LSB) & GPIO_PIN15_CONFIG_MASK)
+#define GPIO_PIN15_WAKEUP_ENABLE_MSB 10
+#define GPIO_PIN15_WAKEUP_ENABLE_LSB 10
+#define GPIO_PIN15_WAKEUP_ENABLE_MASK 0x00000400
+#define GPIO_PIN15_WAKEUP_ENABLE_GET(x) (((x) & GPIO_PIN15_WAKEUP_ENABLE_MASK) >> GPIO_PIN15_WAKEUP_ENABLE_LSB)
+#define GPIO_PIN15_WAKEUP_ENABLE_SET(x) (((x) << GPIO_PIN15_WAKEUP_ENABLE_LSB) & GPIO_PIN15_WAKEUP_ENABLE_MASK)
+#define GPIO_PIN15_INT_TYPE_MSB 9
+#define GPIO_PIN15_INT_TYPE_LSB 7
+#define GPIO_PIN15_INT_TYPE_MASK 0x00000380
+#define GPIO_PIN15_INT_TYPE_GET(x) (((x) & GPIO_PIN15_INT_TYPE_MASK) >> GPIO_PIN15_INT_TYPE_LSB)
+#define GPIO_PIN15_INT_TYPE_SET(x) (((x) << GPIO_PIN15_INT_TYPE_LSB) & GPIO_PIN15_INT_TYPE_MASK)
+#define GPIO_PIN15_PAD_DRIVER_MSB 2
+#define GPIO_PIN15_PAD_DRIVER_LSB 2
+#define GPIO_PIN15_PAD_DRIVER_MASK 0x00000004
+#define GPIO_PIN15_PAD_DRIVER_GET(x) (((x) & GPIO_PIN15_PAD_DRIVER_MASK) >> GPIO_PIN15_PAD_DRIVER_LSB)
+#define GPIO_PIN15_PAD_DRIVER_SET(x) (((x) << GPIO_PIN15_PAD_DRIVER_LSB) & GPIO_PIN15_PAD_DRIVER_MASK)
+#define GPIO_PIN15_SOURCE_MSB 0
+#define GPIO_PIN15_SOURCE_LSB 0
+#define GPIO_PIN15_SOURCE_MASK 0x00000001
+#define GPIO_PIN15_SOURCE_GET(x) (((x) & GPIO_PIN15_SOURCE_MASK) >> GPIO_PIN15_SOURCE_LSB)
+#define GPIO_PIN15_SOURCE_SET(x) (((x) << GPIO_PIN15_SOURCE_LSB) & GPIO_PIN15_SOURCE_MASK)
+
+#define GPIO_PIN16_ADDRESS 0x00000068
+#define GPIO_PIN16_OFFSET 0x00000068
+#define GPIO_PIN16_CONFIG_MSB 12
+#define GPIO_PIN16_CONFIG_LSB 11
+#define GPIO_PIN16_CONFIG_MASK 0x00001800
+#define GPIO_PIN16_CONFIG_GET(x) (((x) & GPIO_PIN16_CONFIG_MASK) >> GPIO_PIN16_CONFIG_LSB)
+#define GPIO_PIN16_CONFIG_SET(x) (((x) << GPIO_PIN16_CONFIG_LSB) & GPIO_PIN16_CONFIG_MASK)
+#define GPIO_PIN16_WAKEUP_ENABLE_MSB 10
+#define GPIO_PIN16_WAKEUP_ENABLE_LSB 10
+#define GPIO_PIN16_WAKEUP_ENABLE_MASK 0x00000400
+#define GPIO_PIN16_WAKEUP_ENABLE_GET(x) (((x) & GPIO_PIN16_WAKEUP_ENABLE_MASK) >> GPIO_PIN16_WAKEUP_ENABLE_LSB)
+#define GPIO_PIN16_WAKEUP_ENABLE_SET(x) (((x) << GPIO_PIN16_WAKEUP_ENABLE_LSB) & GPIO_PIN16_WAKEUP_ENABLE_MASK)
+#define GPIO_PIN16_INT_TYPE_MSB 9
+#define GPIO_PIN16_INT_TYPE_LSB 7
+#define GPIO_PIN16_INT_TYPE_MASK 0x00000380
+#define GPIO_PIN16_INT_TYPE_GET(x) (((x) & GPIO_PIN16_INT_TYPE_MASK) >> GPIO_PIN16_INT_TYPE_LSB)
+#define GPIO_PIN16_INT_TYPE_SET(x) (((x) << GPIO_PIN16_INT_TYPE_LSB) & GPIO_PIN16_INT_TYPE_MASK)
+#define GPIO_PIN16_PAD_DRIVER_MSB 2
+#define GPIO_PIN16_PAD_DRIVER_LSB 2
+#define GPIO_PIN16_PAD_DRIVER_MASK 0x00000004
+#define GPIO_PIN16_PAD_DRIVER_GET(x) (((x) & GPIO_PIN16_PAD_DRIVER_MASK) >> GPIO_PIN16_PAD_DRIVER_LSB)
+#define GPIO_PIN16_PAD_DRIVER_SET(x) (((x) << GPIO_PIN16_PAD_DRIVER_LSB) & GPIO_PIN16_PAD_DRIVER_MASK)
+#define GPIO_PIN16_SOURCE_MSB 0
+#define GPIO_PIN16_SOURCE_LSB 0
+#define GPIO_PIN16_SOURCE_MASK 0x00000001
+#define GPIO_PIN16_SOURCE_GET(x) (((x) & GPIO_PIN16_SOURCE_MASK) >> GPIO_PIN16_SOURCE_LSB)
+#define GPIO_PIN16_SOURCE_SET(x) (((x) << GPIO_PIN16_SOURCE_LSB) & GPIO_PIN16_SOURCE_MASK)
+
+#define GPIO_PIN17_ADDRESS 0x0000006c
+#define GPIO_PIN17_OFFSET 0x0000006c
+#define GPIO_PIN17_CONFIG_MSB 12
+#define GPIO_PIN17_CONFIG_LSB 11
+#define GPIO_PIN17_CONFIG_MASK 0x00001800
+#define GPIO_PIN17_CONFIG_GET(x) (((x) & GPIO_PIN17_CONFIG_MASK) >> GPIO_PIN17_CONFIG_LSB)
+#define GPIO_PIN17_CONFIG_SET(x) (((x) << GPIO_PIN17_CONFIG_LSB) & GPIO_PIN17_CONFIG_MASK)
+#define GPIO_PIN17_WAKEUP_ENABLE_MSB 10
+#define GPIO_PIN17_WAKEUP_ENABLE_LSB 10
+#define GPIO_PIN17_WAKEUP_ENABLE_MASK 0x00000400
+#define GPIO_PIN17_WAKEUP_ENABLE_GET(x) (((x) & GPIO_PIN17_WAKEUP_ENABLE_MASK) >> GPIO_PIN17_WAKEUP_ENABLE_LSB)
+#define GPIO_PIN17_WAKEUP_ENABLE_SET(x) (((x) << GPIO_PIN17_WAKEUP_ENABLE_LSB) & GPIO_PIN17_WAKEUP_ENABLE_MASK)
+#define GPIO_PIN17_INT_TYPE_MSB 9
+#define GPIO_PIN17_INT_TYPE_LSB 7
+#define GPIO_PIN17_INT_TYPE_MASK 0x00000380
+#define GPIO_PIN17_INT_TYPE_GET(x) (((x) & GPIO_PIN17_INT_TYPE_MASK) >> GPIO_PIN17_INT_TYPE_LSB)
+#define GPIO_PIN17_INT_TYPE_SET(x) (((x) << GPIO_PIN17_INT_TYPE_LSB) & GPIO_PIN17_INT_TYPE_MASK)
+#define GPIO_PIN17_PAD_DRIVER_MSB 2
+#define GPIO_PIN17_PAD_DRIVER_LSB 2
+#define GPIO_PIN17_PAD_DRIVER_MASK 0x00000004
+#define GPIO_PIN17_PAD_DRIVER_GET(x) (((x) & GPIO_PIN17_PAD_DRIVER_MASK) >> GPIO_PIN17_PAD_DRIVER_LSB)
+#define GPIO_PIN17_PAD_DRIVER_SET(x) (((x) << GPIO_PIN17_PAD_DRIVER_LSB) & GPIO_PIN17_PAD_DRIVER_MASK)
+#define GPIO_PIN17_SOURCE_MSB 0
+#define GPIO_PIN17_SOURCE_LSB 0
+#define GPIO_PIN17_SOURCE_MASK 0x00000001
+#define GPIO_PIN17_SOURCE_GET(x) (((x) & GPIO_PIN17_SOURCE_MASK) >> GPIO_PIN17_SOURCE_LSB)
+#define GPIO_PIN17_SOURCE_SET(x) (((x) << GPIO_PIN17_SOURCE_LSB) & GPIO_PIN17_SOURCE_MASK)
+
+#define SDIO_PIN_ADDRESS 0x00000070
+#define SDIO_PIN_OFFSET 0x00000070
+#define SDIO_PIN_PAD_PULL_MSB 3
+#define SDIO_PIN_PAD_PULL_LSB 2
+#define SDIO_PIN_PAD_PULL_MASK 0x0000000c
+#define SDIO_PIN_PAD_PULL_GET(x) (((x) & SDIO_PIN_PAD_PULL_MASK) >> SDIO_PIN_PAD_PULL_LSB)
+#define SDIO_PIN_PAD_PULL_SET(x) (((x) << SDIO_PIN_PAD_PULL_LSB) & SDIO_PIN_PAD_PULL_MASK)
+#define SDIO_PIN_PAD_STRENGTH_MSB 1
+#define SDIO_PIN_PAD_STRENGTH_LSB 0
+#define SDIO_PIN_PAD_STRENGTH_MASK 0x00000003
+#define SDIO_PIN_PAD_STRENGTH_GET(x) (((x) & SDIO_PIN_PAD_STRENGTH_MASK) >> SDIO_PIN_PAD_STRENGTH_LSB)
+#define SDIO_PIN_PAD_STRENGTH_SET(x) (((x) << SDIO_PIN_PAD_STRENGTH_LSB) & SDIO_PIN_PAD_STRENGTH_MASK)
+
+#define CLK_REQ_PIN_ADDRESS 0x00000074
+#define CLK_REQ_PIN_OFFSET 0x00000074
+#define CLK_REQ_PIN_ATE_OE_L_MSB 4
+#define CLK_REQ_PIN_ATE_OE_L_LSB 4
+#define CLK_REQ_PIN_ATE_OE_L_MASK 0x00000010
+#define CLK_REQ_PIN_ATE_OE_L_GET(x) (((x) & CLK_REQ_PIN_ATE_OE_L_MASK) >> CLK_REQ_PIN_ATE_OE_L_LSB)
+#define CLK_REQ_PIN_ATE_OE_L_SET(x) (((x) << CLK_REQ_PIN_ATE_OE_L_LSB) & CLK_REQ_PIN_ATE_OE_L_MASK)
+#define CLK_REQ_PIN_PAD_PULL_MSB 3
+#define CLK_REQ_PIN_PAD_PULL_LSB 2
+#define CLK_REQ_PIN_PAD_PULL_MASK 0x0000000c
+#define CLK_REQ_PIN_PAD_PULL_GET(x) (((x) & CLK_REQ_PIN_PAD_PULL_MASK) >> CLK_REQ_PIN_PAD_PULL_LSB)
+#define CLK_REQ_PIN_PAD_PULL_SET(x) (((x) << CLK_REQ_PIN_PAD_PULL_LSB) & CLK_REQ_PIN_PAD_PULL_MASK)
+#define CLK_REQ_PIN_PAD_STRENGTH_MSB 1
+#define CLK_REQ_PIN_PAD_STRENGTH_LSB 0
+#define CLK_REQ_PIN_PAD_STRENGTH_MASK 0x00000003
+#define CLK_REQ_PIN_PAD_STRENGTH_GET(x) (((x) & CLK_REQ_PIN_PAD_STRENGTH_MASK) >> CLK_REQ_PIN_PAD_STRENGTH_LSB)
+#define CLK_REQ_PIN_PAD_STRENGTH_SET(x) (((x) << CLK_REQ_PIN_PAD_STRENGTH_LSB) & CLK_REQ_PIN_PAD_STRENGTH_MASK)
+
+#define SIGMA_DELTA_ADDRESS 0x00000078
+#define SIGMA_DELTA_OFFSET 0x00000078
+#define SIGMA_DELTA_ENABLE_MSB 16
+#define SIGMA_DELTA_ENABLE_LSB 16
+#define SIGMA_DELTA_ENABLE_MASK 0x00010000
+#define SIGMA_DELTA_ENABLE_GET(x) (((x) & SIGMA_DELTA_ENABLE_MASK) >> SIGMA_DELTA_ENABLE_LSB)
+#define SIGMA_DELTA_ENABLE_SET(x) (((x) << SIGMA_DELTA_ENABLE_LSB) & SIGMA_DELTA_ENABLE_MASK)
+#define SIGMA_DELTA_PRESCALAR_MSB 15
+#define SIGMA_DELTA_PRESCALAR_LSB 8
+#define SIGMA_DELTA_PRESCALAR_MASK 0x0000ff00
+#define SIGMA_DELTA_PRESCALAR_GET(x) (((x) & SIGMA_DELTA_PRESCALAR_MASK) >> SIGMA_DELTA_PRESCALAR_LSB)
+#define SIGMA_DELTA_PRESCALAR_SET(x) (((x) << SIGMA_DELTA_PRESCALAR_LSB) & SIGMA_DELTA_PRESCALAR_MASK)
+#define SIGMA_DELTA_TARGET_MSB 7
+#define SIGMA_DELTA_TARGET_LSB 0
+#define SIGMA_DELTA_TARGET_MASK 0x000000ff
+#define SIGMA_DELTA_TARGET_GET(x) (((x) & SIGMA_DELTA_TARGET_MASK) >> SIGMA_DELTA_TARGET_LSB)
+#define SIGMA_DELTA_TARGET_SET(x) (((x) << SIGMA_DELTA_TARGET_LSB) & SIGMA_DELTA_TARGET_MASK)
+
+#define DEBUG_CONTROL_ADDRESS 0x0000007c
+#define DEBUG_CONTROL_OFFSET 0x0000007c
+#define DEBUG_CONTROL_OBS_OE_L_MSB 1
+#define DEBUG_CONTROL_OBS_OE_L_LSB 1
+#define DEBUG_CONTROL_OBS_OE_L_MASK 0x00000002
+#define DEBUG_CONTROL_OBS_OE_L_GET(x) (((x) & DEBUG_CONTROL_OBS_OE_L_MASK) >> DEBUG_CONTROL_OBS_OE_L_LSB)
+#define DEBUG_CONTROL_OBS_OE_L_SET(x) (((x) << DEBUG_CONTROL_OBS_OE_L_LSB) & DEBUG_CONTROL_OBS_OE_L_MASK)
+#define DEBUG_CONTROL_ENABLE_MSB 0
+#define DEBUG_CONTROL_ENABLE_LSB 0
+#define DEBUG_CONTROL_ENABLE_MASK 0x00000001
+#define DEBUG_CONTROL_ENABLE_GET(x) (((x) & DEBUG_CONTROL_ENABLE_MASK) >> DEBUG_CONTROL_ENABLE_LSB)
+#define DEBUG_CONTROL_ENABLE_SET(x) (((x) << DEBUG_CONTROL_ENABLE_LSB) & DEBUG_CONTROL_ENABLE_MASK)
+
+#define DEBUG_INPUT_SEL_ADDRESS 0x00000080
+#define DEBUG_INPUT_SEL_OFFSET 0x00000080
+#define DEBUG_INPUT_SEL_SRC_MSB 3
+#define DEBUG_INPUT_SEL_SRC_LSB 0
+#define DEBUG_INPUT_SEL_SRC_MASK 0x0000000f
+#define DEBUG_INPUT_SEL_SRC_GET(x) (((x) & DEBUG_INPUT_SEL_SRC_MASK) >> DEBUG_INPUT_SEL_SRC_LSB)
+#define DEBUG_INPUT_SEL_SRC_SET(x) (((x) << DEBUG_INPUT_SEL_SRC_LSB) & DEBUG_INPUT_SEL_SRC_MASK)
+
+#define DEBUG_OUT_ADDRESS 0x00000084
+#define DEBUG_OUT_OFFSET 0x00000084
+#define DEBUG_OUT_DATA_MSB 17
+#define DEBUG_OUT_DATA_LSB 0
+#define DEBUG_OUT_DATA_MASK 0x0003ffff
+#define DEBUG_OUT_DATA_GET(x) (((x) & DEBUG_OUT_DATA_MASK) >> DEBUG_OUT_DATA_LSB)
+#define DEBUG_OUT_DATA_SET(x) (((x) << DEBUG_OUT_DATA_LSB) & DEBUG_OUT_DATA_MASK)
+
+#define LA_CONTROL_ADDRESS 0x00000088
+#define LA_CONTROL_OFFSET 0x00000088
+#define LA_CONTROL_RUN_MSB 1
+#define LA_CONTROL_RUN_LSB 1
+#define LA_CONTROL_RUN_MASK 0x00000002
+#define LA_CONTROL_RUN_GET(x) (((x) & LA_CONTROL_RUN_MASK) >> LA_CONTROL_RUN_LSB)
+#define LA_CONTROL_RUN_SET(x) (((x) << LA_CONTROL_RUN_LSB) & LA_CONTROL_RUN_MASK)
+#define LA_CONTROL_TRIGGERED_MSB 0
+#define LA_CONTROL_TRIGGERED_LSB 0
+#define LA_CONTROL_TRIGGERED_MASK 0x00000001
+#define LA_CONTROL_TRIGGERED_GET(x) (((x) & LA_CONTROL_TRIGGERED_MASK) >> LA_CONTROL_TRIGGERED_LSB)
+#define LA_CONTROL_TRIGGERED_SET(x) (((x) << LA_CONTROL_TRIGGERED_LSB) & LA_CONTROL_TRIGGERED_MASK)
+
+#define LA_CLOCK_ADDRESS 0x0000008c
+#define LA_CLOCK_OFFSET 0x0000008c
+#define LA_CLOCK_DIV_MSB 7
+#define LA_CLOCK_DIV_LSB 0
+#define LA_CLOCK_DIV_MASK 0x000000ff
+#define LA_CLOCK_DIV_GET(x) (((x) & LA_CLOCK_DIV_MASK) >> LA_CLOCK_DIV_LSB)
+#define LA_CLOCK_DIV_SET(x) (((x) << LA_CLOCK_DIV_LSB) & LA_CLOCK_DIV_MASK)
+
+#define LA_STATUS_ADDRESS 0x00000090
+#define LA_STATUS_OFFSET 0x00000090
+#define LA_STATUS_INTERRUPT_MSB 0
+#define LA_STATUS_INTERRUPT_LSB 0
+#define LA_STATUS_INTERRUPT_MASK 0x00000001
+#define LA_STATUS_INTERRUPT_GET(x) (((x) & LA_STATUS_INTERRUPT_MASK) >> LA_STATUS_INTERRUPT_LSB)
+#define LA_STATUS_INTERRUPT_SET(x) (((x) << LA_STATUS_INTERRUPT_LSB) & LA_STATUS_INTERRUPT_MASK)
+
+#define LA_TRIGGER_SAMPLE_ADDRESS 0x00000094
+#define LA_TRIGGER_SAMPLE_OFFSET 0x00000094
+#define LA_TRIGGER_SAMPLE_COUNT_MSB 15
+#define LA_TRIGGER_SAMPLE_COUNT_LSB 0
+#define LA_TRIGGER_SAMPLE_COUNT_MASK 0x0000ffff
+#define LA_TRIGGER_SAMPLE_COUNT_GET(x) (((x) & LA_TRIGGER_SAMPLE_COUNT_MASK) >> LA_TRIGGER_SAMPLE_COUNT_LSB)
+#define LA_TRIGGER_SAMPLE_COUNT_SET(x) (((x) << LA_TRIGGER_SAMPLE_COUNT_LSB) & LA_TRIGGER_SAMPLE_COUNT_MASK)
+
+#define LA_TRIGGER_POSITION_ADDRESS 0x00000098
+#define LA_TRIGGER_POSITION_OFFSET 0x00000098
+#define LA_TRIGGER_POSITION_VALUE_MSB 15
+#define LA_TRIGGER_POSITION_VALUE_LSB 0
+#define LA_TRIGGER_POSITION_VALUE_MASK 0x0000ffff
+#define LA_TRIGGER_POSITION_VALUE_GET(x) (((x) & LA_TRIGGER_POSITION_VALUE_MASK) >> LA_TRIGGER_POSITION_VALUE_LSB)
+#define LA_TRIGGER_POSITION_VALUE_SET(x) (((x) << LA_TRIGGER_POSITION_VALUE_LSB) & LA_TRIGGER_POSITION_VALUE_MASK)
+
+#define LA_PRE_TRIGGER_ADDRESS 0x0000009c
+#define LA_PRE_TRIGGER_OFFSET 0x0000009c
+#define LA_PRE_TRIGGER_COUNT_MSB 15
+#define LA_PRE_TRIGGER_COUNT_LSB 0
+#define LA_PRE_TRIGGER_COUNT_MASK 0x0000ffff
+#define LA_PRE_TRIGGER_COUNT_GET(x) (((x) & LA_PRE_TRIGGER_COUNT_MASK) >> LA_PRE_TRIGGER_COUNT_LSB)
+#define LA_PRE_TRIGGER_COUNT_SET(x) (((x) << LA_PRE_TRIGGER_COUNT_LSB) & LA_PRE_TRIGGER_COUNT_MASK)
+
+#define LA_POST_TRIGGER_ADDRESS 0x000000a0
+#define LA_POST_TRIGGER_OFFSET 0x000000a0
+#define LA_POST_TRIGGER_COUNT_MSB 15
+#define LA_POST_TRIGGER_COUNT_LSB 0
+#define LA_POST_TRIGGER_COUNT_MASK 0x0000ffff
+#define LA_POST_TRIGGER_COUNT_GET(x) (((x) & LA_POST_TRIGGER_COUNT_MASK) >> LA_POST_TRIGGER_COUNT_LSB)
+#define LA_POST_TRIGGER_COUNT_SET(x) (((x) << LA_POST_TRIGGER_COUNT_LSB) & LA_POST_TRIGGER_COUNT_MASK)
+
+#define LA_FILTER_CONTROL_ADDRESS 0x000000a4
+#define LA_FILTER_CONTROL_OFFSET 0x000000a4
+#define LA_FILTER_CONTROL_DELTA_MSB 0
+#define LA_FILTER_CONTROL_DELTA_LSB 0
+#define LA_FILTER_CONTROL_DELTA_MASK 0x00000001
+#define LA_FILTER_CONTROL_DELTA_GET(x) (((x) & LA_FILTER_CONTROL_DELTA_MASK) >> LA_FILTER_CONTROL_DELTA_LSB)
+#define LA_FILTER_CONTROL_DELTA_SET(x) (((x) << LA_FILTER_CONTROL_DELTA_LSB) & LA_FILTER_CONTROL_DELTA_MASK)
+
+#define LA_FILTER_DATA_ADDRESS 0x000000a8
+#define LA_FILTER_DATA_OFFSET 0x000000a8
+#define LA_FILTER_DATA_MATCH_MSB 17
+#define LA_FILTER_DATA_MATCH_LSB 0
+#define LA_FILTER_DATA_MATCH_MASK 0x0003ffff
+#define LA_FILTER_DATA_MATCH_GET(x) (((x) & LA_FILTER_DATA_MATCH_MASK) >> LA_FILTER_DATA_MATCH_LSB)
+#define LA_FILTER_DATA_MATCH_SET(x) (((x) << LA_FILTER_DATA_MATCH_LSB) & LA_FILTER_DATA_MATCH_MASK)
+
+#define LA_FILTER_WILDCARD_ADDRESS 0x000000ac
+#define LA_FILTER_WILDCARD_OFFSET 0x000000ac
+#define LA_FILTER_WILDCARD_MATCH_MSB 17
+#define LA_FILTER_WILDCARD_MATCH_LSB 0
+#define LA_FILTER_WILDCARD_MATCH_MASK 0x0003ffff
+#define LA_FILTER_WILDCARD_MATCH_GET(x) (((x) & LA_FILTER_WILDCARD_MATCH_MASK) >> LA_FILTER_WILDCARD_MATCH_LSB)
+#define LA_FILTER_WILDCARD_MATCH_SET(x) (((x) << LA_FILTER_WILDCARD_MATCH_LSB) & LA_FILTER_WILDCARD_MATCH_MASK)
+
+#define LA_TRIGGERA_DATA_ADDRESS 0x000000b0
+#define LA_TRIGGERA_DATA_OFFSET 0x000000b0
+#define LA_TRIGGERA_DATA_MATCH_MSB 17
+#define LA_TRIGGERA_DATA_MATCH_LSB 0
+#define LA_TRIGGERA_DATA_MATCH_MASK 0x0003ffff
+#define LA_TRIGGERA_DATA_MATCH_GET(x) (((x) & LA_TRIGGERA_DATA_MATCH_MASK) >> LA_TRIGGERA_DATA_MATCH_LSB)
+#define LA_TRIGGERA_DATA_MATCH_SET(x) (((x) << LA_TRIGGERA_DATA_MATCH_LSB) & LA_TRIGGERA_DATA_MATCH_MASK)
+
+#define LA_TRIGGERA_WILDCARD_ADDRESS 0x000000b4
+#define LA_TRIGGERA_WILDCARD_OFFSET 0x000000b4
+#define LA_TRIGGERA_WILDCARD_MATCH_MSB 17
+#define LA_TRIGGERA_WILDCARD_MATCH_LSB 0
+#define LA_TRIGGERA_WILDCARD_MATCH_MASK 0x0003ffff
+#define LA_TRIGGERA_WILDCARD_MATCH_GET(x) (((x) & LA_TRIGGERA_WILDCARD_MATCH_MASK) >> LA_TRIGGERA_WILDCARD_MATCH_LSB)
+#define LA_TRIGGERA_WILDCARD_MATCH_SET(x) (((x) << LA_TRIGGERA_WILDCARD_MATCH_LSB) & LA_TRIGGERA_WILDCARD_MATCH_MASK)
+
+#define LA_TRIGGERB_DATA_ADDRESS 0x000000b8
+#define LA_TRIGGERB_DATA_OFFSET 0x000000b8
+#define LA_TRIGGERB_DATA_MATCH_MSB 17
+#define LA_TRIGGERB_DATA_MATCH_LSB 0
+#define LA_TRIGGERB_DATA_MATCH_MASK 0x0003ffff
+#define LA_TRIGGERB_DATA_MATCH_GET(x) (((x) & LA_TRIGGERB_DATA_MATCH_MASK) >> LA_TRIGGERB_DATA_MATCH_LSB)
+#define LA_TRIGGERB_DATA_MATCH_SET(x) (((x) << LA_TRIGGERB_DATA_MATCH_LSB) & LA_TRIGGERB_DATA_MATCH_MASK)
+
+#define LA_TRIGGERB_WILDCARD_ADDRESS 0x000000bc
+#define LA_TRIGGERB_WILDCARD_OFFSET 0x000000bc
+#define LA_TRIGGERB_WILDCARD_MATCH_MSB 17
+#define LA_TRIGGERB_WILDCARD_MATCH_LSB 0
+#define LA_TRIGGERB_WILDCARD_MATCH_MASK 0x0003ffff
+#define LA_TRIGGERB_WILDCARD_MATCH_GET(x) (((x) & LA_TRIGGERB_WILDCARD_MATCH_MASK) >> LA_TRIGGERB_WILDCARD_MATCH_LSB)
+#define LA_TRIGGERB_WILDCARD_MATCH_SET(x) (((x) << LA_TRIGGERB_WILDCARD_MATCH_LSB) & LA_TRIGGERB_WILDCARD_MATCH_MASK)
+
+#define LA_TRIGGER_ADDRESS 0x000000c0
+#define LA_TRIGGER_OFFSET 0x000000c0
+#define LA_TRIGGER_EVENT_MSB 2
+#define LA_TRIGGER_EVENT_LSB 0
+#define LA_TRIGGER_EVENT_MASK 0x00000007
+#define LA_TRIGGER_EVENT_GET(x) (((x) & LA_TRIGGER_EVENT_MASK) >> LA_TRIGGER_EVENT_LSB)
+#define LA_TRIGGER_EVENT_SET(x) (((x) << LA_TRIGGER_EVENT_LSB) & LA_TRIGGER_EVENT_MASK)
+
+#define LA_FIFO_ADDRESS 0x000000c4
+#define LA_FIFO_OFFSET 0x000000c4
+#define LA_FIFO_FULL_MSB 1
+#define LA_FIFO_FULL_LSB 1
+#define LA_FIFO_FULL_MASK 0x00000002
+#define LA_FIFO_FULL_GET(x) (((x) & LA_FIFO_FULL_MASK) >> LA_FIFO_FULL_LSB)
+#define LA_FIFO_FULL_SET(x) (((x) << LA_FIFO_FULL_LSB) & LA_FIFO_FULL_MASK)
+#define LA_FIFO_EMPTY_MSB 0
+#define LA_FIFO_EMPTY_LSB 0
+#define LA_FIFO_EMPTY_MASK 0x00000001
+#define LA_FIFO_EMPTY_GET(x) (((x) & LA_FIFO_EMPTY_MASK) >> LA_FIFO_EMPTY_LSB)
+#define LA_FIFO_EMPTY_SET(x) (((x) << LA_FIFO_EMPTY_LSB) & LA_FIFO_EMPTY_MASK)
+
+#define LA_ADDRESS 0x000000c8
+#define LA_OFFSET 0x000000c8
+#define LA_DATA_MSB 17
+#define LA_DATA_LSB 0
+#define LA_DATA_MASK 0x0003ffff
+#define LA_DATA_GET(x) (((x) & LA_DATA_MASK) >> LA_DATA_LSB)
+#define LA_DATA_SET(x) (((x) << LA_DATA_LSB) & LA_DATA_MASK)
+
+#define ANT_PIN_ADDRESS 0x000000d0
+#define ANT_PIN_OFFSET 0x000000d0
+#define ANT_PIN_PAD_PULL_MSB 3
+#define ANT_PIN_PAD_PULL_LSB 2
+#define ANT_PIN_PAD_PULL_MASK 0x0000000c
+#define ANT_PIN_PAD_PULL_GET(x) (((x) & ANT_PIN_PAD_PULL_MASK) >> ANT_PIN_PAD_PULL_LSB)
+#define ANT_PIN_PAD_PULL_SET(x) (((x) << ANT_PIN_PAD_PULL_LSB) & ANT_PIN_PAD_PULL_MASK)
+#define ANT_PIN_PAD_STRENGTH_MSB 1
+#define ANT_PIN_PAD_STRENGTH_LSB 0
+#define ANT_PIN_PAD_STRENGTH_MASK 0x00000003
+#define ANT_PIN_PAD_STRENGTH_GET(x) (((x) & ANT_PIN_PAD_STRENGTH_MASK) >> ANT_PIN_PAD_STRENGTH_LSB)
+#define ANT_PIN_PAD_STRENGTH_SET(x) (((x) << ANT_PIN_PAD_STRENGTH_LSB) & ANT_PIN_PAD_STRENGTH_MASK)
+
+#define ANTD_PIN_ADDRESS 0x000000d4
+#define ANTD_PIN_OFFSET 0x000000d4
+#define ANTD_PIN_PAD_PULL_MSB 1
+#define ANTD_PIN_PAD_PULL_LSB 0
+#define ANTD_PIN_PAD_PULL_MASK 0x00000003
+#define ANTD_PIN_PAD_PULL_GET(x) (((x) & ANTD_PIN_PAD_PULL_MASK) >> ANTD_PIN_PAD_PULL_LSB)
+#define ANTD_PIN_PAD_PULL_SET(x) (((x) << ANTD_PIN_PAD_PULL_LSB) & ANTD_PIN_PAD_PULL_MASK)
+
+#define GPIO_PIN_ADDRESS 0x000000d8
+#define GPIO_PIN_OFFSET 0x000000d8
+#define GPIO_PIN_PAD_PULL_MSB 3
+#define GPIO_PIN_PAD_PULL_LSB 2
+#define GPIO_PIN_PAD_PULL_MASK 0x0000000c
+#define GPIO_PIN_PAD_PULL_GET(x) (((x) & GPIO_PIN_PAD_PULL_MASK) >> GPIO_PIN_PAD_PULL_LSB)
+#define GPIO_PIN_PAD_PULL_SET(x) (((x) << GPIO_PIN_PAD_PULL_LSB) & GPIO_PIN_PAD_PULL_MASK)
+#define GPIO_PIN_PAD_STRENGTH_MSB 1
+#define GPIO_PIN_PAD_STRENGTH_LSB 0
+#define GPIO_PIN_PAD_STRENGTH_MASK 0x00000003
+#define GPIO_PIN_PAD_STRENGTH_GET(x) (((x) & GPIO_PIN_PAD_STRENGTH_MASK) >> GPIO_PIN_PAD_STRENGTH_LSB)
+#define GPIO_PIN_PAD_STRENGTH_SET(x) (((x) << GPIO_PIN_PAD_STRENGTH_LSB) & GPIO_PIN_PAD_STRENGTH_MASK)
+
+#define GPIO_H_PIN_ADDRESS 0x000000dc
+#define GPIO_H_PIN_OFFSET 0x000000dc
+#define GPIO_H_PIN_PAD_PULL_MSB 1
+#define GPIO_H_PIN_PAD_PULL_LSB 0
+#define GPIO_H_PIN_PAD_PULL_MASK 0x00000003
+#define GPIO_H_PIN_PAD_PULL_GET(x) (((x) & GPIO_H_PIN_PAD_PULL_MASK) >> GPIO_H_PIN_PAD_PULL_LSB)
+#define GPIO_H_PIN_PAD_PULL_SET(x) (((x) << GPIO_H_PIN_PAD_PULL_LSB) & GPIO_H_PIN_PAD_PULL_MASK)
+
+#define BT_PIN_ADDRESS 0x000000e0
+#define BT_PIN_OFFSET 0x000000e0
+#define BT_PIN_PAD_PULL_MSB 3
+#define BT_PIN_PAD_PULL_LSB 2
+#define BT_PIN_PAD_PULL_MASK 0x0000000c
+#define BT_PIN_PAD_PULL_GET(x) (((x) & BT_PIN_PAD_PULL_MASK) >> BT_PIN_PAD_PULL_LSB)
+#define BT_PIN_PAD_PULL_SET(x) (((x) << BT_PIN_PAD_PULL_LSB) & BT_PIN_PAD_PULL_MASK)
+#define BT_PIN_PAD_STRENGTH_MSB 1
+#define BT_PIN_PAD_STRENGTH_LSB 0
+#define BT_PIN_PAD_STRENGTH_MASK 0x00000003
+#define BT_PIN_PAD_STRENGTH_GET(x) (((x) & BT_PIN_PAD_STRENGTH_MASK) >> BT_PIN_PAD_STRENGTH_LSB)
+#define BT_PIN_PAD_STRENGTH_SET(x) (((x) << BT_PIN_PAD_STRENGTH_LSB) & BT_PIN_PAD_STRENGTH_MASK)
+
+#define BT_WLAN_PIN_ADDRESS 0x000000e4
+#define BT_WLAN_PIN_OFFSET 0x000000e4
+#define BT_WLAN_PIN_PAD_PULL_MSB 1
+#define BT_WLAN_PIN_PAD_PULL_LSB 0
+#define BT_WLAN_PIN_PAD_PULL_MASK 0x00000003
+#define BT_WLAN_PIN_PAD_PULL_GET(x) (((x) & BT_WLAN_PIN_PAD_PULL_MASK) >> BT_WLAN_PIN_PAD_PULL_LSB)
+#define BT_WLAN_PIN_PAD_PULL_SET(x) (((x) << BT_WLAN_PIN_PAD_PULL_LSB) & BT_WLAN_PIN_PAD_PULL_MASK)
+
+#define SI_UART_PIN_ADDRESS 0x000000e8
+#define SI_UART_PIN_OFFSET 0x000000e8
+#define SI_UART_PIN_PAD_PULL_MSB 3
+#define SI_UART_PIN_PAD_PULL_LSB 2
+#define SI_UART_PIN_PAD_PULL_MASK 0x0000000c
+#define SI_UART_PIN_PAD_PULL_GET(x) (((x) & SI_UART_PIN_PAD_PULL_MASK) >> SI_UART_PIN_PAD_PULL_LSB)
+#define SI_UART_PIN_PAD_PULL_SET(x) (((x) << SI_UART_PIN_PAD_PULL_LSB) & SI_UART_PIN_PAD_PULL_MASK)
+#define SI_UART_PIN_PAD_STRENGTH_MSB 1
+#define SI_UART_PIN_PAD_STRENGTH_LSB 0
+#define SI_UART_PIN_PAD_STRENGTH_MASK 0x00000003
+#define SI_UART_PIN_PAD_STRENGTH_GET(x) (((x) & SI_UART_PIN_PAD_STRENGTH_MASK) >> SI_UART_PIN_PAD_STRENGTH_LSB)
+#define SI_UART_PIN_PAD_STRENGTH_SET(x) (((x) << SI_UART_PIN_PAD_STRENGTH_LSB) & SI_UART_PIN_PAD_STRENGTH_MASK)
+
+#define CLK32K_PIN_ADDRESS 0x000000ec
+#define CLK32K_PIN_OFFSET 0x000000ec
+#define CLK32K_PIN_PAD_PULL_MSB 1
+#define CLK32K_PIN_PAD_PULL_LSB 0
+#define CLK32K_PIN_PAD_PULL_MASK 0x00000003
+#define CLK32K_PIN_PAD_PULL_GET(x) (((x) & CLK32K_PIN_PAD_PULL_MASK) >> CLK32K_PIN_PAD_PULL_LSB)
+#define CLK32K_PIN_PAD_PULL_SET(x) (((x) << CLK32K_PIN_PAD_PULL_LSB) & CLK32K_PIN_PAD_PULL_MASK)
+
+#define RESET_TUPLE_STATUS_ADDRESS 0x000000f0
+#define RESET_TUPLE_STATUS_OFFSET 0x000000f0
+#define RESET_TUPLE_STATUS_TEST_RESET_TUPLE_MSB 11
+#define RESET_TUPLE_STATUS_TEST_RESET_TUPLE_LSB 8
+#define RESET_TUPLE_STATUS_TEST_RESET_TUPLE_MASK 0x00000f00
+#define RESET_TUPLE_STATUS_TEST_RESET_TUPLE_GET(x) (((x) & RESET_TUPLE_STATUS_TEST_RESET_TUPLE_MASK) >> RESET_TUPLE_STATUS_TEST_RESET_TUPLE_LSB)
+#define RESET_TUPLE_STATUS_TEST_RESET_TUPLE_SET(x) (((x) << RESET_TUPLE_STATUS_TEST_RESET_TUPLE_LSB) & RESET_TUPLE_STATUS_TEST_RESET_TUPLE_MASK)
+#define RESET_TUPLE_STATUS_PIN_RESET_TUPLE_MSB 7
+#define RESET_TUPLE_STATUS_PIN_RESET_TUPLE_LSB 0
+#define RESET_TUPLE_STATUS_PIN_RESET_TUPLE_MASK 0x000000ff
+#define RESET_TUPLE_STATUS_PIN_RESET_TUPLE_GET(x) (((x) & RESET_TUPLE_STATUS_PIN_RESET_TUPLE_MASK) >> RESET_TUPLE_STATUS_PIN_RESET_TUPLE_LSB)
+#define RESET_TUPLE_STATUS_PIN_RESET_TUPLE_SET(x) (((x) << RESET_TUPLE_STATUS_PIN_RESET_TUPLE_LSB) & RESET_TUPLE_STATUS_PIN_RESET_TUPLE_MASK)
+
+
+#ifndef __ASSEMBLER__
+
+typedef struct gpio_reg_reg_s {
+ volatile unsigned int gpio_out;
+ volatile unsigned int gpio_out_w1ts;
+ volatile unsigned int gpio_out_w1tc;
+ volatile unsigned int gpio_enable;
+ volatile unsigned int gpio_enable_w1ts;
+ volatile unsigned int gpio_enable_w1tc;
+ volatile unsigned int gpio_in;
+ volatile unsigned int gpio_status;
+ volatile unsigned int gpio_status_w1ts;
+ volatile unsigned int gpio_status_w1tc;
+ volatile unsigned int gpio_pin0;
+ volatile unsigned int gpio_pin1;
+ volatile unsigned int gpio_pin2;
+ volatile unsigned int gpio_pin3;
+ volatile unsigned int gpio_pin4;
+ volatile unsigned int gpio_pin5;
+ volatile unsigned int gpio_pin6;
+ volatile unsigned int gpio_pin7;
+ volatile unsigned int gpio_pin8;
+ volatile unsigned int gpio_pin9;
+ volatile unsigned int gpio_pin10;
+ volatile unsigned int gpio_pin11;
+ volatile unsigned int gpio_pin12;
+ volatile unsigned int gpio_pin13;
+ volatile unsigned int gpio_pin14;
+ volatile unsigned int gpio_pin15;
+ volatile unsigned int gpio_pin16;
+ volatile unsigned int gpio_pin17;
+ volatile unsigned int sdio_pin;
+ volatile unsigned int clk_req_pin;
+ volatile unsigned int sigma_delta;
+ volatile unsigned int debug_control;
+ volatile unsigned int debug_input_sel;
+ volatile unsigned int debug_out;
+ volatile unsigned int la_control;
+ volatile unsigned int la_clock;
+ volatile unsigned int la_status;
+ volatile unsigned int la_trigger_sample;
+ volatile unsigned int la_trigger_position;
+ volatile unsigned int la_pre_trigger;
+ volatile unsigned int la_post_trigger;
+ volatile unsigned int la_filter_control;
+ volatile unsigned int la_filter_data;
+ volatile unsigned int la_filter_wildcard;
+ volatile unsigned int la_triggera_data;
+ volatile unsigned int la_triggera_wildcard;
+ volatile unsigned int la_triggerb_data;
+ volatile unsigned int la_triggerb_wildcard;
+ volatile unsigned int la_trigger;
+ volatile unsigned int la_fifo;
+ volatile unsigned int la[2];
+ volatile unsigned int ant_pin;
+ volatile unsigned int antd_pin;
+ volatile unsigned int gpio_pin;
+ volatile unsigned int gpio_h_pin;
+ volatile unsigned int bt_pin;
+ volatile unsigned int bt_wlan_pin;
+ volatile unsigned int si_uart_pin;
+ volatile unsigned int clk32k_pin;
+ volatile unsigned int reset_tuple_status;
+} gpio_reg_reg_t;
+
+#endif /* __ASSEMBLER__ */
+
+#endif /* _GPIO_REG_H_ */
diff --git a/drivers/staging/ath6kl/include/common/AR6002/hw2.0/hw/mbox_host_reg.h b/drivers/staging/ath6kl/include/common/AR6002/hw2.0/hw/mbox_host_reg.h
new file mode 100644
index 000000000000..f836ae47a303
--- /dev/null
+++ b/drivers/staging/ath6kl/include/common/AR6002/hw2.0/hw/mbox_host_reg.h
@@ -0,0 +1,386 @@
+#ifndef _MBOX_HOST_REG_REG_H_
+#define _MBOX_HOST_REG_REG_H_
+
+#define HOST_INT_STATUS_ADDRESS 0x00000400
+#define HOST_INT_STATUS_OFFSET 0x00000400
+#define HOST_INT_STATUS_ERROR_MSB 7
+#define HOST_INT_STATUS_ERROR_LSB 7
+#define HOST_INT_STATUS_ERROR_MASK 0x00000080
+#define HOST_INT_STATUS_ERROR_GET(x) (((x) & HOST_INT_STATUS_ERROR_MASK) >> HOST_INT_STATUS_ERROR_LSB)
+#define HOST_INT_STATUS_ERROR_SET(x) (((x) << HOST_INT_STATUS_ERROR_LSB) & HOST_INT_STATUS_ERROR_MASK)
+#define HOST_INT_STATUS_CPU_MSB 6
+#define HOST_INT_STATUS_CPU_LSB 6
+#define HOST_INT_STATUS_CPU_MASK 0x00000040
+#define HOST_INT_STATUS_CPU_GET(x) (((x) & HOST_INT_STATUS_CPU_MASK) >> HOST_INT_STATUS_CPU_LSB)
+#define HOST_INT_STATUS_CPU_SET(x) (((x) << HOST_INT_STATUS_CPU_LSB) & HOST_INT_STATUS_CPU_MASK)
+#define HOST_INT_STATUS_DRAGON_INT_MSB 5
+#define HOST_INT_STATUS_DRAGON_INT_LSB 5
+#define HOST_INT_STATUS_DRAGON_INT_MASK 0x00000020
+#define HOST_INT_STATUS_DRAGON_INT_GET(x) (((x) & HOST_INT_STATUS_DRAGON_INT_MASK) >> HOST_INT_STATUS_DRAGON_INT_LSB)
+#define HOST_INT_STATUS_DRAGON_INT_SET(x) (((x) << HOST_INT_STATUS_DRAGON_INT_LSB) & HOST_INT_STATUS_DRAGON_INT_MASK)
+#define HOST_INT_STATUS_COUNTER_MSB 4
+#define HOST_INT_STATUS_COUNTER_LSB 4
+#define HOST_INT_STATUS_COUNTER_MASK 0x00000010
+#define HOST_INT_STATUS_COUNTER_GET(x) (((x) & HOST_INT_STATUS_COUNTER_MASK) >> HOST_INT_STATUS_COUNTER_LSB)
+#define HOST_INT_STATUS_COUNTER_SET(x) (((x) << HOST_INT_STATUS_COUNTER_LSB) & HOST_INT_STATUS_COUNTER_MASK)
+#define HOST_INT_STATUS_MBOX_DATA_MSB 3
+#define HOST_INT_STATUS_MBOX_DATA_LSB 0
+#define HOST_INT_STATUS_MBOX_DATA_MASK 0x0000000f
+#define HOST_INT_STATUS_MBOX_DATA_GET(x) (((x) & HOST_INT_STATUS_MBOX_DATA_MASK) >> HOST_INT_STATUS_MBOX_DATA_LSB)
+#define HOST_INT_STATUS_MBOX_DATA_SET(x) (((x) << HOST_INT_STATUS_MBOX_DATA_LSB) & HOST_INT_STATUS_MBOX_DATA_MASK)
+
+#define CPU_INT_STATUS_ADDRESS 0x00000401
+#define CPU_INT_STATUS_OFFSET 0x00000401
+#define CPU_INT_STATUS_BIT_MSB 7
+#define CPU_INT_STATUS_BIT_LSB 0
+#define CPU_INT_STATUS_BIT_MASK 0x000000ff
+#define CPU_INT_STATUS_BIT_GET(x) (((x) & CPU_INT_STATUS_BIT_MASK) >> CPU_INT_STATUS_BIT_LSB)
+#define CPU_INT_STATUS_BIT_SET(x) (((x) << CPU_INT_STATUS_BIT_LSB) & CPU_INT_STATUS_BIT_MASK)
+
+#define ERROR_INT_STATUS_ADDRESS 0x00000402
+#define ERROR_INT_STATUS_OFFSET 0x00000402
+#define ERROR_INT_STATUS_SPI_MSB 3
+#define ERROR_INT_STATUS_SPI_LSB 3
+#define ERROR_INT_STATUS_SPI_MASK 0x00000008
+#define ERROR_INT_STATUS_SPI_GET(x) (((x) & ERROR_INT_STATUS_SPI_MASK) >> ERROR_INT_STATUS_SPI_LSB)
+#define ERROR_INT_STATUS_SPI_SET(x) (((x) << ERROR_INT_STATUS_SPI_LSB) & ERROR_INT_STATUS_SPI_MASK)
+#define ERROR_INT_STATUS_WAKEUP_MSB 2
+#define ERROR_INT_STATUS_WAKEUP_LSB 2
+#define ERROR_INT_STATUS_WAKEUP_MASK 0x00000004
+#define ERROR_INT_STATUS_WAKEUP_GET(x) (((x) & ERROR_INT_STATUS_WAKEUP_MASK) >> ERROR_INT_STATUS_WAKEUP_LSB)
+#define ERROR_INT_STATUS_WAKEUP_SET(x) (((x) << ERROR_INT_STATUS_WAKEUP_LSB) & ERROR_INT_STATUS_WAKEUP_MASK)
+#define ERROR_INT_STATUS_RX_UNDERFLOW_MSB 1
+#define ERROR_INT_STATUS_RX_UNDERFLOW_LSB 1
+#define ERROR_INT_STATUS_RX_UNDERFLOW_MASK 0x00000002
+#define ERROR_INT_STATUS_RX_UNDERFLOW_GET(x) (((x) & ERROR_INT_STATUS_RX_UNDERFLOW_MASK) >> ERROR_INT_STATUS_RX_UNDERFLOW_LSB)
+#define ERROR_INT_STATUS_RX_UNDERFLOW_SET(x) (((x) << ERROR_INT_STATUS_RX_UNDERFLOW_LSB) & ERROR_INT_STATUS_RX_UNDERFLOW_MASK)
+#define ERROR_INT_STATUS_TX_OVERFLOW_MSB 0
+#define ERROR_INT_STATUS_TX_OVERFLOW_LSB 0
+#define ERROR_INT_STATUS_TX_OVERFLOW_MASK 0x00000001
+#define ERROR_INT_STATUS_TX_OVERFLOW_GET(x) (((x) & ERROR_INT_STATUS_TX_OVERFLOW_MASK) >> ERROR_INT_STATUS_TX_OVERFLOW_LSB)
+#define ERROR_INT_STATUS_TX_OVERFLOW_SET(x) (((x) << ERROR_INT_STATUS_TX_OVERFLOW_LSB) & ERROR_INT_STATUS_TX_OVERFLOW_MASK)
+
+#define COUNTER_INT_STATUS_ADDRESS 0x00000403
+#define COUNTER_INT_STATUS_OFFSET 0x00000403
+#define COUNTER_INT_STATUS_COUNTER_MSB 7
+#define COUNTER_INT_STATUS_COUNTER_LSB 0
+#define COUNTER_INT_STATUS_COUNTER_MASK 0x000000ff
+#define COUNTER_INT_STATUS_COUNTER_GET(x) (((x) & COUNTER_INT_STATUS_COUNTER_MASK) >> COUNTER_INT_STATUS_COUNTER_LSB)
+#define COUNTER_INT_STATUS_COUNTER_SET(x) (((x) << COUNTER_INT_STATUS_COUNTER_LSB) & COUNTER_INT_STATUS_COUNTER_MASK)
+
+#define MBOX_FRAME_ADDRESS 0x00000404
+#define MBOX_FRAME_OFFSET 0x00000404
+#define MBOX_FRAME_RX_EOM_MSB 7
+#define MBOX_FRAME_RX_EOM_LSB 4
+#define MBOX_FRAME_RX_EOM_MASK 0x000000f0
+#define MBOX_FRAME_RX_EOM_GET(x) (((x) & MBOX_FRAME_RX_EOM_MASK) >> MBOX_FRAME_RX_EOM_LSB)
+#define MBOX_FRAME_RX_EOM_SET(x) (((x) << MBOX_FRAME_RX_EOM_LSB) & MBOX_FRAME_RX_EOM_MASK)
+#define MBOX_FRAME_RX_SOM_MSB 3
+#define MBOX_FRAME_RX_SOM_LSB 0
+#define MBOX_FRAME_RX_SOM_MASK 0x0000000f
+#define MBOX_FRAME_RX_SOM_GET(x) (((x) & MBOX_FRAME_RX_SOM_MASK) >> MBOX_FRAME_RX_SOM_LSB)
+#define MBOX_FRAME_RX_SOM_SET(x) (((x) << MBOX_FRAME_RX_SOM_LSB) & MBOX_FRAME_RX_SOM_MASK)
+
+#define RX_LOOKAHEAD_VALID_ADDRESS 0x00000405
+#define RX_LOOKAHEAD_VALID_OFFSET 0x00000405
+#define RX_LOOKAHEAD_VALID_MBOX_MSB 3
+#define RX_LOOKAHEAD_VALID_MBOX_LSB 0
+#define RX_LOOKAHEAD_VALID_MBOX_MASK 0x0000000f
+#define RX_LOOKAHEAD_VALID_MBOX_GET(x) (((x) & RX_LOOKAHEAD_VALID_MBOX_MASK) >> RX_LOOKAHEAD_VALID_MBOX_LSB)
+#define RX_LOOKAHEAD_VALID_MBOX_SET(x) (((x) << RX_LOOKAHEAD_VALID_MBOX_LSB) & RX_LOOKAHEAD_VALID_MBOX_MASK)
+
+#define RX_LOOKAHEAD0_ADDRESS 0x00000408
+#define RX_LOOKAHEAD0_OFFSET 0x00000408
+#define RX_LOOKAHEAD0_DATA_MSB 7
+#define RX_LOOKAHEAD0_DATA_LSB 0
+#define RX_LOOKAHEAD0_DATA_MASK 0x000000ff
+#define RX_LOOKAHEAD0_DATA_GET(x) (((x) & RX_LOOKAHEAD0_DATA_MASK) >> RX_LOOKAHEAD0_DATA_LSB)
+#define RX_LOOKAHEAD0_DATA_SET(x) (((x) << RX_LOOKAHEAD0_DATA_LSB) & RX_LOOKAHEAD0_DATA_MASK)
+
+#define RX_LOOKAHEAD1_ADDRESS 0x0000040c
+#define RX_LOOKAHEAD1_OFFSET 0x0000040c
+#define RX_LOOKAHEAD1_DATA_MSB 7
+#define RX_LOOKAHEAD1_DATA_LSB 0
+#define RX_LOOKAHEAD1_DATA_MASK 0x000000ff
+#define RX_LOOKAHEAD1_DATA_GET(x) (((x) & RX_LOOKAHEAD1_DATA_MASK) >> RX_LOOKAHEAD1_DATA_LSB)
+#define RX_LOOKAHEAD1_DATA_SET(x) (((x) << RX_LOOKAHEAD1_DATA_LSB) & RX_LOOKAHEAD1_DATA_MASK)
+
+#define RX_LOOKAHEAD2_ADDRESS 0x00000410
+#define RX_LOOKAHEAD2_OFFSET 0x00000410
+#define RX_LOOKAHEAD2_DATA_MSB 7
+#define RX_LOOKAHEAD2_DATA_LSB 0
+#define RX_LOOKAHEAD2_DATA_MASK 0x000000ff
+#define RX_LOOKAHEAD2_DATA_GET(x) (((x) & RX_LOOKAHEAD2_DATA_MASK) >> RX_LOOKAHEAD2_DATA_LSB)
+#define RX_LOOKAHEAD2_DATA_SET(x) (((x) << RX_LOOKAHEAD2_DATA_LSB) & RX_LOOKAHEAD2_DATA_MASK)
+
+#define RX_LOOKAHEAD3_ADDRESS 0x00000414
+#define RX_LOOKAHEAD3_OFFSET 0x00000414
+#define RX_LOOKAHEAD3_DATA_MSB 7
+#define RX_LOOKAHEAD3_DATA_LSB 0
+#define RX_LOOKAHEAD3_DATA_MASK 0x000000ff
+#define RX_LOOKAHEAD3_DATA_GET(x) (((x) & RX_LOOKAHEAD3_DATA_MASK) >> RX_LOOKAHEAD3_DATA_LSB)
+#define RX_LOOKAHEAD3_DATA_SET(x) (((x) << RX_LOOKAHEAD3_DATA_LSB) & RX_LOOKAHEAD3_DATA_MASK)
+
+#define INT_STATUS_ENABLE_ADDRESS 0x00000418
+#define INT_STATUS_ENABLE_OFFSET 0x00000418
+#define INT_STATUS_ENABLE_ERROR_MSB 7
+#define INT_STATUS_ENABLE_ERROR_LSB 7
+#define INT_STATUS_ENABLE_ERROR_MASK 0x00000080
+#define INT_STATUS_ENABLE_ERROR_GET(x) (((x) & INT_STATUS_ENABLE_ERROR_MASK) >> INT_STATUS_ENABLE_ERROR_LSB)
+#define INT_STATUS_ENABLE_ERROR_SET(x) (((x) << INT_STATUS_ENABLE_ERROR_LSB) & INT_STATUS_ENABLE_ERROR_MASK)
+#define INT_STATUS_ENABLE_CPU_MSB 6
+#define INT_STATUS_ENABLE_CPU_LSB 6
+#define INT_STATUS_ENABLE_CPU_MASK 0x00000040
+#define INT_STATUS_ENABLE_CPU_GET(x) (((x) & INT_STATUS_ENABLE_CPU_MASK) >> INT_STATUS_ENABLE_CPU_LSB)
+#define INT_STATUS_ENABLE_CPU_SET(x) (((x) << INT_STATUS_ENABLE_CPU_LSB) & INT_STATUS_ENABLE_CPU_MASK)
+#define INT_STATUS_ENABLE_DRAGON_INT_MSB 5
+#define INT_STATUS_ENABLE_DRAGON_INT_LSB 5
+#define INT_STATUS_ENABLE_DRAGON_INT_MASK 0x00000020
+#define INT_STATUS_ENABLE_DRAGON_INT_GET(x) (((x) & INT_STATUS_ENABLE_DRAGON_INT_MASK) >> INT_STATUS_ENABLE_DRAGON_INT_LSB)
+#define INT_STATUS_ENABLE_DRAGON_INT_SET(x) (((x) << INT_STATUS_ENABLE_DRAGON_INT_LSB) & INT_STATUS_ENABLE_DRAGON_INT_MASK)
+#define INT_STATUS_ENABLE_COUNTER_MSB 4
+#define INT_STATUS_ENABLE_COUNTER_LSB 4
+#define INT_STATUS_ENABLE_COUNTER_MASK 0x00000010
+#define INT_STATUS_ENABLE_COUNTER_GET(x) (((x) & INT_STATUS_ENABLE_COUNTER_MASK) >> INT_STATUS_ENABLE_COUNTER_LSB)
+#define INT_STATUS_ENABLE_COUNTER_SET(x) (((x) << INT_STATUS_ENABLE_COUNTER_LSB) & INT_STATUS_ENABLE_COUNTER_MASK)
+#define INT_STATUS_ENABLE_MBOX_DATA_MSB 3
+#define INT_STATUS_ENABLE_MBOX_DATA_LSB 0
+#define INT_STATUS_ENABLE_MBOX_DATA_MASK 0x0000000f
+#define INT_STATUS_ENABLE_MBOX_DATA_GET(x) (((x) & INT_STATUS_ENABLE_MBOX_DATA_MASK) >> INT_STATUS_ENABLE_MBOX_DATA_LSB)
+#define INT_STATUS_ENABLE_MBOX_DATA_SET(x) (((x) << INT_STATUS_ENABLE_MBOX_DATA_LSB) & INT_STATUS_ENABLE_MBOX_DATA_MASK)
+
+#define CPU_INT_STATUS_ENABLE_ADDRESS 0x00000419
+#define CPU_INT_STATUS_ENABLE_OFFSET 0x00000419
+#define CPU_INT_STATUS_ENABLE_BIT_MSB 7
+#define CPU_INT_STATUS_ENABLE_BIT_LSB 0
+#define CPU_INT_STATUS_ENABLE_BIT_MASK 0x000000ff
+#define CPU_INT_STATUS_ENABLE_BIT_GET(x) (((x) & CPU_INT_STATUS_ENABLE_BIT_MASK) >> CPU_INT_STATUS_ENABLE_BIT_LSB)
+#define CPU_INT_STATUS_ENABLE_BIT_SET(x) (((x) << CPU_INT_STATUS_ENABLE_BIT_LSB) & CPU_INT_STATUS_ENABLE_BIT_MASK)
+
+#define ERROR_STATUS_ENABLE_ADDRESS 0x0000041a
+#define ERROR_STATUS_ENABLE_OFFSET 0x0000041a
+#define ERROR_STATUS_ENABLE_WAKEUP_MSB 2
+#define ERROR_STATUS_ENABLE_WAKEUP_LSB 2
+#define ERROR_STATUS_ENABLE_WAKEUP_MASK 0x00000004
+#define ERROR_STATUS_ENABLE_WAKEUP_GET(x) (((x) & ERROR_STATUS_ENABLE_WAKEUP_MASK) >> ERROR_STATUS_ENABLE_WAKEUP_LSB)
+#define ERROR_STATUS_ENABLE_WAKEUP_SET(x) (((x) << ERROR_STATUS_ENABLE_WAKEUP_LSB) & ERROR_STATUS_ENABLE_WAKEUP_MASK)
+#define ERROR_STATUS_ENABLE_RX_UNDERFLOW_MSB 1
+#define ERROR_STATUS_ENABLE_RX_UNDERFLOW_LSB 1
+#define ERROR_STATUS_ENABLE_RX_UNDERFLOW_MASK 0x00000002
+#define ERROR_STATUS_ENABLE_RX_UNDERFLOW_GET(x) (((x) & ERROR_STATUS_ENABLE_RX_UNDERFLOW_MASK) >> ERROR_STATUS_ENABLE_RX_UNDERFLOW_LSB)
+#define ERROR_STATUS_ENABLE_RX_UNDERFLOW_SET(x) (((x) << ERROR_STATUS_ENABLE_RX_UNDERFLOW_LSB) & ERROR_STATUS_ENABLE_RX_UNDERFLOW_MASK)
+#define ERROR_STATUS_ENABLE_TX_OVERFLOW_MSB 0
+#define ERROR_STATUS_ENABLE_TX_OVERFLOW_LSB 0
+#define ERROR_STATUS_ENABLE_TX_OVERFLOW_MASK 0x00000001
+#define ERROR_STATUS_ENABLE_TX_OVERFLOW_GET(x) (((x) & ERROR_STATUS_ENABLE_TX_OVERFLOW_MASK) >> ERROR_STATUS_ENABLE_TX_OVERFLOW_LSB)
+#define ERROR_STATUS_ENABLE_TX_OVERFLOW_SET(x) (((x) << ERROR_STATUS_ENABLE_TX_OVERFLOW_LSB) & ERROR_STATUS_ENABLE_TX_OVERFLOW_MASK)
+
+#define COUNTER_INT_STATUS_ENABLE_ADDRESS 0x0000041b
+#define COUNTER_INT_STATUS_ENABLE_OFFSET 0x0000041b
+#define COUNTER_INT_STATUS_ENABLE_BIT_MSB 7
+#define COUNTER_INT_STATUS_ENABLE_BIT_LSB 0
+#define COUNTER_INT_STATUS_ENABLE_BIT_MASK 0x000000ff
+#define COUNTER_INT_STATUS_ENABLE_BIT_GET(x) (((x) & COUNTER_INT_STATUS_ENABLE_BIT_MASK) >> COUNTER_INT_STATUS_ENABLE_BIT_LSB)
+#define COUNTER_INT_STATUS_ENABLE_BIT_SET(x) (((x) << COUNTER_INT_STATUS_ENABLE_BIT_LSB) & COUNTER_INT_STATUS_ENABLE_BIT_MASK)
+
+#define COUNT_ADDRESS 0x00000420
+#define COUNT_OFFSET 0x00000420
+#define COUNT_VALUE_MSB 7
+#define COUNT_VALUE_LSB 0
+#define COUNT_VALUE_MASK 0x000000ff
+#define COUNT_VALUE_GET(x) (((x) & COUNT_VALUE_MASK) >> COUNT_VALUE_LSB)
+#define COUNT_VALUE_SET(x) (((x) << COUNT_VALUE_LSB) & COUNT_VALUE_MASK)
+
+#define COUNT_DEC_ADDRESS 0x00000440
+#define COUNT_DEC_OFFSET 0x00000440
+#define COUNT_DEC_VALUE_MSB 7
+#define COUNT_DEC_VALUE_LSB 0
+#define COUNT_DEC_VALUE_MASK 0x000000ff
+#define COUNT_DEC_VALUE_GET(x) (((x) & COUNT_DEC_VALUE_MASK) >> COUNT_DEC_VALUE_LSB)
+#define COUNT_DEC_VALUE_SET(x) (((x) << COUNT_DEC_VALUE_LSB) & COUNT_DEC_VALUE_MASK)
+
+#define SCRATCH_ADDRESS 0x00000460
+#define SCRATCH_OFFSET 0x00000460
+#define SCRATCH_VALUE_MSB 7
+#define SCRATCH_VALUE_LSB 0
+#define SCRATCH_VALUE_MASK 0x000000ff
+#define SCRATCH_VALUE_GET(x) (((x) & SCRATCH_VALUE_MASK) >> SCRATCH_VALUE_LSB)
+#define SCRATCH_VALUE_SET(x) (((x) << SCRATCH_VALUE_LSB) & SCRATCH_VALUE_MASK)
+
+#define FIFO_TIMEOUT_ADDRESS 0x00000468
+#define FIFO_TIMEOUT_OFFSET 0x00000468
+#define FIFO_TIMEOUT_VALUE_MSB 7
+#define FIFO_TIMEOUT_VALUE_LSB 0
+#define FIFO_TIMEOUT_VALUE_MASK 0x000000ff
+#define FIFO_TIMEOUT_VALUE_GET(x) (((x) & FIFO_TIMEOUT_VALUE_MASK) >> FIFO_TIMEOUT_VALUE_LSB)
+#define FIFO_TIMEOUT_VALUE_SET(x) (((x) << FIFO_TIMEOUT_VALUE_LSB) & FIFO_TIMEOUT_VALUE_MASK)
+
+#define FIFO_TIMEOUT_ENABLE_ADDRESS 0x00000469
+#define FIFO_TIMEOUT_ENABLE_OFFSET 0x00000469
+#define FIFO_TIMEOUT_ENABLE_SET_MSB 0
+#define FIFO_TIMEOUT_ENABLE_SET_LSB 0
+#define FIFO_TIMEOUT_ENABLE_SET_MASK 0x00000001
+#define FIFO_TIMEOUT_ENABLE_SET_GET(x) (((x) & FIFO_TIMEOUT_ENABLE_SET_MASK) >> FIFO_TIMEOUT_ENABLE_SET_LSB)
+#define FIFO_TIMEOUT_ENABLE_SET_SET(x) (((x) << FIFO_TIMEOUT_ENABLE_SET_LSB) & FIFO_TIMEOUT_ENABLE_SET_MASK)
+
+#define DISABLE_SLEEP_ADDRESS 0x0000046a
+#define DISABLE_SLEEP_OFFSET 0x0000046a
+#define DISABLE_SLEEP_FOR_INT_MSB 1
+#define DISABLE_SLEEP_FOR_INT_LSB 1
+#define DISABLE_SLEEP_FOR_INT_MASK 0x00000002
+#define DISABLE_SLEEP_FOR_INT_GET(x) (((x) & DISABLE_SLEEP_FOR_INT_MASK) >> DISABLE_SLEEP_FOR_INT_LSB)
+#define DISABLE_SLEEP_FOR_INT_SET(x) (((x) << DISABLE_SLEEP_FOR_INT_LSB) & DISABLE_SLEEP_FOR_INT_MASK)
+#define DISABLE_SLEEP_ON_MSB 0
+#define DISABLE_SLEEP_ON_LSB 0
+#define DISABLE_SLEEP_ON_MASK 0x00000001
+#define DISABLE_SLEEP_ON_GET(x) (((x) & DISABLE_SLEEP_ON_MASK) >> DISABLE_SLEEP_ON_LSB)
+#define DISABLE_SLEEP_ON_SET(x) (((x) << DISABLE_SLEEP_ON_LSB) & DISABLE_SLEEP_ON_MASK)
+
+#define LOCAL_BUS_ADDRESS 0x00000470
+#define LOCAL_BUS_OFFSET 0x00000470
+#define LOCAL_BUS_STATE_MSB 1
+#define LOCAL_BUS_STATE_LSB 0
+#define LOCAL_BUS_STATE_MASK 0x00000003
+#define LOCAL_BUS_STATE_GET(x) (((x) & LOCAL_BUS_STATE_MASK) >> LOCAL_BUS_STATE_LSB)
+#define LOCAL_BUS_STATE_SET(x) (((x) << LOCAL_BUS_STATE_LSB) & LOCAL_BUS_STATE_MASK)
+
+#define INT_WLAN_ADDRESS 0x00000472
+#define INT_WLAN_OFFSET 0x00000472
+#define INT_WLAN_VECTOR_MSB 7
+#define INT_WLAN_VECTOR_LSB 0
+#define INT_WLAN_VECTOR_MASK 0x000000ff
+#define INT_WLAN_VECTOR_GET(x) (((x) & INT_WLAN_VECTOR_MASK) >> INT_WLAN_VECTOR_LSB)
+#define INT_WLAN_VECTOR_SET(x) (((x) << INT_WLAN_VECTOR_LSB) & INT_WLAN_VECTOR_MASK)
+
+#define WINDOW_DATA_ADDRESS 0x00000474
+#define WINDOW_DATA_OFFSET 0x00000474
+#define WINDOW_DATA_DATA_MSB 7
+#define WINDOW_DATA_DATA_LSB 0
+#define WINDOW_DATA_DATA_MASK 0x000000ff
+#define WINDOW_DATA_DATA_GET(x) (((x) & WINDOW_DATA_DATA_MASK) >> WINDOW_DATA_DATA_LSB)
+#define WINDOW_DATA_DATA_SET(x) (((x) << WINDOW_DATA_DATA_LSB) & WINDOW_DATA_DATA_MASK)
+
+#define WINDOW_WRITE_ADDR_ADDRESS 0x00000478
+#define WINDOW_WRITE_ADDR_OFFSET 0x00000478
+#define WINDOW_WRITE_ADDR_ADDR_MSB 7
+#define WINDOW_WRITE_ADDR_ADDR_LSB 0
+#define WINDOW_WRITE_ADDR_ADDR_MASK 0x000000ff
+#define WINDOW_WRITE_ADDR_ADDR_GET(x) (((x) & WINDOW_WRITE_ADDR_ADDR_MASK) >> WINDOW_WRITE_ADDR_ADDR_LSB)
+#define WINDOW_WRITE_ADDR_ADDR_SET(x) (((x) << WINDOW_WRITE_ADDR_ADDR_LSB) & WINDOW_WRITE_ADDR_ADDR_MASK)
+
+#define WINDOW_READ_ADDR_ADDRESS 0x0000047c
+#define WINDOW_READ_ADDR_OFFSET 0x0000047c
+#define WINDOW_READ_ADDR_ADDR_MSB 7
+#define WINDOW_READ_ADDR_ADDR_LSB 0
+#define WINDOW_READ_ADDR_ADDR_MASK 0x000000ff
+#define WINDOW_READ_ADDR_ADDR_GET(x) (((x) & WINDOW_READ_ADDR_ADDR_MASK) >> WINDOW_READ_ADDR_ADDR_LSB)
+#define WINDOW_READ_ADDR_ADDR_SET(x) (((x) << WINDOW_READ_ADDR_ADDR_LSB) & WINDOW_READ_ADDR_ADDR_MASK)
+
+#define SPI_CONFIG_ADDRESS 0x00000480
+#define SPI_CONFIG_OFFSET 0x00000480
+#define SPI_CONFIG_SPI_RESET_MSB 4
+#define SPI_CONFIG_SPI_RESET_LSB 4
+#define SPI_CONFIG_SPI_RESET_MASK 0x00000010
+#define SPI_CONFIG_SPI_RESET_GET(x) (((x) & SPI_CONFIG_SPI_RESET_MASK) >> SPI_CONFIG_SPI_RESET_LSB)
+#define SPI_CONFIG_SPI_RESET_SET(x) (((x) << SPI_CONFIG_SPI_RESET_LSB) & SPI_CONFIG_SPI_RESET_MASK)
+#define SPI_CONFIG_INTERRUPT_ENABLE_MSB 3
+#define SPI_CONFIG_INTERRUPT_ENABLE_LSB 3
+#define SPI_CONFIG_INTERRUPT_ENABLE_MASK 0x00000008
+#define SPI_CONFIG_INTERRUPT_ENABLE_GET(x) (((x) & SPI_CONFIG_INTERRUPT_ENABLE_MASK) >> SPI_CONFIG_INTERRUPT_ENABLE_LSB)
+#define SPI_CONFIG_INTERRUPT_ENABLE_SET(x) (((x) << SPI_CONFIG_INTERRUPT_ENABLE_LSB) & SPI_CONFIG_INTERRUPT_ENABLE_MASK)
+#define SPI_CONFIG_TEST_MODE_MSB 2
+#define SPI_CONFIG_TEST_MODE_LSB 2
+#define SPI_CONFIG_TEST_MODE_MASK 0x00000004
+#define SPI_CONFIG_TEST_MODE_GET(x) (((x) & SPI_CONFIG_TEST_MODE_MASK) >> SPI_CONFIG_TEST_MODE_LSB)
+#define SPI_CONFIG_TEST_MODE_SET(x) (((x) << SPI_CONFIG_TEST_MODE_LSB) & SPI_CONFIG_TEST_MODE_MASK)
+#define SPI_CONFIG_DATA_SIZE_MSB 1
+#define SPI_CONFIG_DATA_SIZE_LSB 0
+#define SPI_CONFIG_DATA_SIZE_MASK 0x00000003
+#define SPI_CONFIG_DATA_SIZE_GET(x) (((x) & SPI_CONFIG_DATA_SIZE_MASK) >> SPI_CONFIG_DATA_SIZE_LSB)
+#define SPI_CONFIG_DATA_SIZE_SET(x) (((x) << SPI_CONFIG_DATA_SIZE_LSB) & SPI_CONFIG_DATA_SIZE_MASK)
+
+#define SPI_STATUS_ADDRESS 0x00000481
+#define SPI_STATUS_OFFSET 0x00000481
+#define SPI_STATUS_ADDR_ERR_MSB 3
+#define SPI_STATUS_ADDR_ERR_LSB 3
+#define SPI_STATUS_ADDR_ERR_MASK 0x00000008
+#define SPI_STATUS_ADDR_ERR_GET(x) (((x) & SPI_STATUS_ADDR_ERR_MASK) >> SPI_STATUS_ADDR_ERR_LSB)
+#define SPI_STATUS_ADDR_ERR_SET(x) (((x) << SPI_STATUS_ADDR_ERR_LSB) & SPI_STATUS_ADDR_ERR_MASK)
+#define SPI_STATUS_RD_ERR_MSB 2
+#define SPI_STATUS_RD_ERR_LSB 2
+#define SPI_STATUS_RD_ERR_MASK 0x00000004
+#define SPI_STATUS_RD_ERR_GET(x) (((x) & SPI_STATUS_RD_ERR_MASK) >> SPI_STATUS_RD_ERR_LSB)
+#define SPI_STATUS_RD_ERR_SET(x) (((x) << SPI_STATUS_RD_ERR_LSB) & SPI_STATUS_RD_ERR_MASK)
+#define SPI_STATUS_WR_ERR_MSB 1
+#define SPI_STATUS_WR_ERR_LSB 1
+#define SPI_STATUS_WR_ERR_MASK 0x00000002
+#define SPI_STATUS_WR_ERR_GET(x) (((x) & SPI_STATUS_WR_ERR_MASK) >> SPI_STATUS_WR_ERR_LSB)
+#define SPI_STATUS_WR_ERR_SET(x) (((x) << SPI_STATUS_WR_ERR_LSB) & SPI_STATUS_WR_ERR_MASK)
+#define SPI_STATUS_READY_MSB 0
+#define SPI_STATUS_READY_LSB 0
+#define SPI_STATUS_READY_MASK 0x00000001
+#define SPI_STATUS_READY_GET(x) (((x) & SPI_STATUS_READY_MASK) >> SPI_STATUS_READY_LSB)
+#define SPI_STATUS_READY_SET(x) (((x) << SPI_STATUS_READY_LSB) & SPI_STATUS_READY_MASK)
+
+#define NON_ASSOC_SLEEP_EN_ADDRESS 0x00000482
+#define NON_ASSOC_SLEEP_EN_OFFSET 0x00000482
+#define NON_ASSOC_SLEEP_EN_BIT_MSB 0
+#define NON_ASSOC_SLEEP_EN_BIT_LSB 0
+#define NON_ASSOC_SLEEP_EN_BIT_MASK 0x00000001
+#define NON_ASSOC_SLEEP_EN_BIT_GET(x) (((x) & NON_ASSOC_SLEEP_EN_BIT_MASK) >> NON_ASSOC_SLEEP_EN_BIT_LSB)
+#define NON_ASSOC_SLEEP_EN_BIT_SET(x) (((x) << NON_ASSOC_SLEEP_EN_BIT_LSB) & NON_ASSOC_SLEEP_EN_BIT_MASK)
+
+#define CIS_WINDOW_ADDRESS 0x00000600
+#define CIS_WINDOW_OFFSET 0x00000600
+#define CIS_WINDOW_DATA_MSB 7
+#define CIS_WINDOW_DATA_LSB 0
+#define CIS_WINDOW_DATA_MASK 0x000000ff
+#define CIS_WINDOW_DATA_GET(x) (((x) & CIS_WINDOW_DATA_MASK) >> CIS_WINDOW_DATA_LSB)
+#define CIS_WINDOW_DATA_SET(x) (((x) << CIS_WINDOW_DATA_LSB) & CIS_WINDOW_DATA_MASK)
+
+
+#ifndef __ASSEMBLER__
+
+typedef struct mbox_host_reg_reg_s {
+ unsigned char pad0[1024]; /* pad to 0x400 */
+ volatile unsigned char host_int_status;
+ volatile unsigned char cpu_int_status;
+ volatile unsigned char error_int_status;
+ volatile unsigned char counter_int_status;
+ volatile unsigned char mbox_frame;
+ volatile unsigned char rx_lookahead_valid;
+ unsigned char pad1[2]; /* pad to 0x408 */
+ volatile unsigned char rx_lookahead0[4];
+ volatile unsigned char rx_lookahead1[4];
+ volatile unsigned char rx_lookahead2[4];
+ volatile unsigned char rx_lookahead3[4];
+ volatile unsigned char int_status_enable;
+ volatile unsigned char cpu_int_status_enable;
+ volatile unsigned char error_status_enable;
+ volatile unsigned char counter_int_status_enable;
+ unsigned char pad2[4]; /* pad to 0x420 */
+ volatile unsigned char count[8];
+ unsigned char pad3[24]; /* pad to 0x440 */
+ volatile unsigned char count_dec[32];
+ volatile unsigned char scratch[8];
+ volatile unsigned char fifo_timeout;
+ volatile unsigned char fifo_timeout_enable;
+ volatile unsigned char disable_sleep;
+ unsigned char pad4[5]; /* pad to 0x470 */
+ volatile unsigned char local_bus;
+ unsigned char pad5[1]; /* pad to 0x472 */
+ volatile unsigned char int_wlan;
+ unsigned char pad6[1]; /* pad to 0x474 */
+ volatile unsigned char window_data[4];
+ volatile unsigned char window_write_addr[4];
+ volatile unsigned char window_read_addr[4];
+ volatile unsigned char spi_config;
+ volatile unsigned char spi_status;
+ volatile unsigned char non_assoc_sleep_en;
+ unsigned char pad7[381]; /* pad to 0x600 */
+ volatile unsigned char cis_window[512];
+} mbox_host_reg_reg_t;
+
+#endif /* __ASSEMBLER__ */
+
+#endif /* _MBOX_HOST_REG_H_ */
diff --git a/drivers/staging/ath6kl/include/common/AR6002/hw2.0/hw/mbox_reg.h b/drivers/staging/ath6kl/include/common/AR6002/hw2.0/hw/mbox_reg.h
new file mode 100644
index 000000000000..4e07d2286107
--- /dev/null
+++ b/drivers/staging/ath6kl/include/common/AR6002/hw2.0/hw/mbox_reg.h
@@ -0,0 +1,481 @@
+#ifndef _MBOX_REG_REG_H_
+#define _MBOX_REG_REG_H_
+
+#define MBOX_FIFO_ADDRESS 0x00000000
+#define MBOX_FIFO_OFFSET 0x00000000
+#define MBOX_FIFO_DATA_MSB 19
+#define MBOX_FIFO_DATA_LSB 0
+#define MBOX_FIFO_DATA_MASK 0x000fffff
+#define MBOX_FIFO_DATA_GET(x) (((x) & MBOX_FIFO_DATA_MASK) >> MBOX_FIFO_DATA_LSB)
+#define MBOX_FIFO_DATA_SET(x) (((x) << MBOX_FIFO_DATA_LSB) & MBOX_FIFO_DATA_MASK)
+
+#define MBOX_FIFO_STATUS_ADDRESS 0x00000010
+#define MBOX_FIFO_STATUS_OFFSET 0x00000010
+#define MBOX_FIFO_STATUS_EMPTY_MSB 19
+#define MBOX_FIFO_STATUS_EMPTY_LSB 16
+#define MBOX_FIFO_STATUS_EMPTY_MASK 0x000f0000
+#define MBOX_FIFO_STATUS_EMPTY_GET(x) (((x) & MBOX_FIFO_STATUS_EMPTY_MASK) >> MBOX_FIFO_STATUS_EMPTY_LSB)
+#define MBOX_FIFO_STATUS_EMPTY_SET(x) (((x) << MBOX_FIFO_STATUS_EMPTY_LSB) & MBOX_FIFO_STATUS_EMPTY_MASK)
+#define MBOX_FIFO_STATUS_FULL_MSB 15
+#define MBOX_FIFO_STATUS_FULL_LSB 12
+#define MBOX_FIFO_STATUS_FULL_MASK 0x0000f000
+#define MBOX_FIFO_STATUS_FULL_GET(x) (((x) & MBOX_FIFO_STATUS_FULL_MASK) >> MBOX_FIFO_STATUS_FULL_LSB)
+#define MBOX_FIFO_STATUS_FULL_SET(x) (((x) << MBOX_FIFO_STATUS_FULL_LSB) & MBOX_FIFO_STATUS_FULL_MASK)
+
+#define MBOX_DMA_POLICY_ADDRESS 0x00000014
+#define MBOX_DMA_POLICY_OFFSET 0x00000014
+#define MBOX_DMA_POLICY_TX_QUANTUM_MSB 3
+#define MBOX_DMA_POLICY_TX_QUANTUM_LSB 3
+#define MBOX_DMA_POLICY_TX_QUANTUM_MASK 0x00000008
+#define MBOX_DMA_POLICY_TX_QUANTUM_GET(x) (((x) & MBOX_DMA_POLICY_TX_QUANTUM_MASK) >> MBOX_DMA_POLICY_TX_QUANTUM_LSB)
+#define MBOX_DMA_POLICY_TX_QUANTUM_SET(x) (((x) << MBOX_DMA_POLICY_TX_QUANTUM_LSB) & MBOX_DMA_POLICY_TX_QUANTUM_MASK)
+#define MBOX_DMA_POLICY_TX_ORDER_MSB 2
+#define MBOX_DMA_POLICY_TX_ORDER_LSB 2
+#define MBOX_DMA_POLICY_TX_ORDER_MASK 0x00000004
+#define MBOX_DMA_POLICY_TX_ORDER_GET(x) (((x) & MBOX_DMA_POLICY_TX_ORDER_MASK) >> MBOX_DMA_POLICY_TX_ORDER_LSB)
+#define MBOX_DMA_POLICY_TX_ORDER_SET(x) (((x) << MBOX_DMA_POLICY_TX_ORDER_LSB) & MBOX_DMA_POLICY_TX_ORDER_MASK)
+#define MBOX_DMA_POLICY_RX_QUANTUM_MSB 1
+#define MBOX_DMA_POLICY_RX_QUANTUM_LSB 1
+#define MBOX_DMA_POLICY_RX_QUANTUM_MASK 0x00000002
+#define MBOX_DMA_POLICY_RX_QUANTUM_GET(x) (((x) & MBOX_DMA_POLICY_RX_QUANTUM_MASK) >> MBOX_DMA_POLICY_RX_QUANTUM_LSB)
+#define MBOX_DMA_POLICY_RX_QUANTUM_SET(x) (((x) << MBOX_DMA_POLICY_RX_QUANTUM_LSB) & MBOX_DMA_POLICY_RX_QUANTUM_MASK)
+#define MBOX_DMA_POLICY_RX_ORDER_MSB 0
+#define MBOX_DMA_POLICY_RX_ORDER_LSB 0
+#define MBOX_DMA_POLICY_RX_ORDER_MASK 0x00000001
+#define MBOX_DMA_POLICY_RX_ORDER_GET(x) (((x) & MBOX_DMA_POLICY_RX_ORDER_MASK) >> MBOX_DMA_POLICY_RX_ORDER_LSB)
+#define MBOX_DMA_POLICY_RX_ORDER_SET(x) (((x) << MBOX_DMA_POLICY_RX_ORDER_LSB) & MBOX_DMA_POLICY_RX_ORDER_MASK)
+
+#define MBOX0_DMA_RX_DESCRIPTOR_BASE_ADDRESS 0x00000018
+#define MBOX0_DMA_RX_DESCRIPTOR_BASE_OFFSET 0x00000018
+#define MBOX0_DMA_RX_DESCRIPTOR_BASE_ADDRESS_MSB 27
+#define MBOX0_DMA_RX_DESCRIPTOR_BASE_ADDRESS_LSB 2
+#define MBOX0_DMA_RX_DESCRIPTOR_BASE_ADDRESS_MASK 0x0ffffffc
+#define MBOX0_DMA_RX_DESCRIPTOR_BASE_ADDRESS_GET(x) (((x) & MBOX0_DMA_RX_DESCRIPTOR_BASE_ADDRESS_MASK) >> MBOX0_DMA_RX_DESCRIPTOR_BASE_ADDRESS_LSB)
+#define MBOX0_DMA_RX_DESCRIPTOR_BASE_ADDRESS_SET(x) (((x) << MBOX0_DMA_RX_DESCRIPTOR_BASE_ADDRESS_LSB) & MBOX0_DMA_RX_DESCRIPTOR_BASE_ADDRESS_MASK)
+
+#define MBOX0_DMA_RX_CONTROL_ADDRESS 0x0000001c
+#define MBOX0_DMA_RX_CONTROL_OFFSET 0x0000001c
+#define MBOX0_DMA_RX_CONTROL_RESUME_MSB 2
+#define MBOX0_DMA_RX_CONTROL_RESUME_LSB 2
+#define MBOX0_DMA_RX_CONTROL_RESUME_MASK 0x00000004
+#define MBOX0_DMA_RX_CONTROL_RESUME_GET(x) (((x) & MBOX0_DMA_RX_CONTROL_RESUME_MASK) >> MBOX0_DMA_RX_CONTROL_RESUME_LSB)
+#define MBOX0_DMA_RX_CONTROL_RESUME_SET(x) (((x) << MBOX0_DMA_RX_CONTROL_RESUME_LSB) & MBOX0_DMA_RX_CONTROL_RESUME_MASK)
+#define MBOX0_DMA_RX_CONTROL_START_MSB 1
+#define MBOX0_DMA_RX_CONTROL_START_LSB 1
+#define MBOX0_DMA_RX_CONTROL_START_MASK 0x00000002
+#define MBOX0_DMA_RX_CONTROL_START_GET(x) (((x) & MBOX0_DMA_RX_CONTROL_START_MASK) >> MBOX0_DMA_RX_CONTROL_START_LSB)
+#define MBOX0_DMA_RX_CONTROL_START_SET(x) (((x) << MBOX0_DMA_RX_CONTROL_START_LSB) & MBOX0_DMA_RX_CONTROL_START_MASK)
+#define MBOX0_DMA_RX_CONTROL_STOP_MSB 0
+#define MBOX0_DMA_RX_CONTROL_STOP_LSB 0
+#define MBOX0_DMA_RX_CONTROL_STOP_MASK 0x00000001
+#define MBOX0_DMA_RX_CONTROL_STOP_GET(x) (((x) & MBOX0_DMA_RX_CONTROL_STOP_MASK) >> MBOX0_DMA_RX_CONTROL_STOP_LSB)
+#define MBOX0_DMA_RX_CONTROL_STOP_SET(x) (((x) << MBOX0_DMA_RX_CONTROL_STOP_LSB) & MBOX0_DMA_RX_CONTROL_STOP_MASK)
+
+#define MBOX0_DMA_TX_DESCRIPTOR_BASE_ADDRESS 0x00000020
+#define MBOX0_DMA_TX_DESCRIPTOR_BASE_OFFSET 0x00000020
+#define MBOX0_DMA_TX_DESCRIPTOR_BASE_ADDRESS_MSB 27
+#define MBOX0_DMA_TX_DESCRIPTOR_BASE_ADDRESS_LSB 2
+#define MBOX0_DMA_TX_DESCRIPTOR_BASE_ADDRESS_MASK 0x0ffffffc
+#define MBOX0_DMA_TX_DESCRIPTOR_BASE_ADDRESS_GET(x) (((x) & MBOX0_DMA_TX_DESCRIPTOR_BASE_ADDRESS_MASK) >> MBOX0_DMA_TX_DESCRIPTOR_BASE_ADDRESS_LSB)
+#define MBOX0_DMA_TX_DESCRIPTOR_BASE_ADDRESS_SET(x) (((x) << MBOX0_DMA_TX_DESCRIPTOR_BASE_ADDRESS_LSB) & MBOX0_DMA_TX_DESCRIPTOR_BASE_ADDRESS_MASK)
+
+#define MBOX0_DMA_TX_CONTROL_ADDRESS 0x00000024
+#define MBOX0_DMA_TX_CONTROL_OFFSET 0x00000024
+#define MBOX0_DMA_TX_CONTROL_RESUME_MSB 2
+#define MBOX0_DMA_TX_CONTROL_RESUME_LSB 2
+#define MBOX0_DMA_TX_CONTROL_RESUME_MASK 0x00000004
+#define MBOX0_DMA_TX_CONTROL_RESUME_GET(x) (((x) & MBOX0_DMA_TX_CONTROL_RESUME_MASK) >> MBOX0_DMA_TX_CONTROL_RESUME_LSB)
+#define MBOX0_DMA_TX_CONTROL_RESUME_SET(x) (((x) << MBOX0_DMA_TX_CONTROL_RESUME_LSB) & MBOX0_DMA_TX_CONTROL_RESUME_MASK)
+#define MBOX0_DMA_TX_CONTROL_START_MSB 1
+#define MBOX0_DMA_TX_CONTROL_START_LSB 1
+#define MBOX0_DMA_TX_CONTROL_START_MASK 0x00000002
+#define MBOX0_DMA_TX_CONTROL_START_GET(x) (((x) & MBOX0_DMA_TX_CONTROL_START_MASK) >> MBOX0_DMA_TX_CONTROL_START_LSB)
+#define MBOX0_DMA_TX_CONTROL_START_SET(x) (((x) << MBOX0_DMA_TX_CONTROL_START_LSB) & MBOX0_DMA_TX_CONTROL_START_MASK)
+#define MBOX0_DMA_TX_CONTROL_STOP_MSB 0
+#define MBOX0_DMA_TX_CONTROL_STOP_LSB 0
+#define MBOX0_DMA_TX_CONTROL_STOP_MASK 0x00000001
+#define MBOX0_DMA_TX_CONTROL_STOP_GET(x) (((x) & MBOX0_DMA_TX_CONTROL_STOP_MASK) >> MBOX0_DMA_TX_CONTROL_STOP_LSB)
+#define MBOX0_DMA_TX_CONTROL_STOP_SET(x) (((x) << MBOX0_DMA_TX_CONTROL_STOP_LSB) & MBOX0_DMA_TX_CONTROL_STOP_MASK)
+
+#define MBOX1_DMA_RX_DESCRIPTOR_BASE_ADDRESS 0x00000028
+#define MBOX1_DMA_RX_DESCRIPTOR_BASE_OFFSET 0x00000028
+#define MBOX1_DMA_RX_DESCRIPTOR_BASE_ADDRESS_MSB 27
+#define MBOX1_DMA_RX_DESCRIPTOR_BASE_ADDRESS_LSB 2
+#define MBOX1_DMA_RX_DESCRIPTOR_BASE_ADDRESS_MASK 0x0ffffffc
+#define MBOX1_DMA_RX_DESCRIPTOR_BASE_ADDRESS_GET(x) (((x) & MBOX1_DMA_RX_DESCRIPTOR_BASE_ADDRESS_MASK) >> MBOX1_DMA_RX_DESCRIPTOR_BASE_ADDRESS_LSB)
+#define MBOX1_DMA_RX_DESCRIPTOR_BASE_ADDRESS_SET(x) (((x) << MBOX1_DMA_RX_DESCRIPTOR_BASE_ADDRESS_LSB) & MBOX1_DMA_RX_DESCRIPTOR_BASE_ADDRESS_MASK)
+
+#define MBOX1_DMA_RX_CONTROL_ADDRESS 0x0000002c
+#define MBOX1_DMA_RX_CONTROL_OFFSET 0x0000002c
+#define MBOX1_DMA_RX_CONTROL_RESUME_MSB 2
+#define MBOX1_DMA_RX_CONTROL_RESUME_LSB 2
+#define MBOX1_DMA_RX_CONTROL_RESUME_MASK 0x00000004
+#define MBOX1_DMA_RX_CONTROL_RESUME_GET(x) (((x) & MBOX1_DMA_RX_CONTROL_RESUME_MASK) >> MBOX1_DMA_RX_CONTROL_RESUME_LSB)
+#define MBOX1_DMA_RX_CONTROL_RESUME_SET(x) (((x) << MBOX1_DMA_RX_CONTROL_RESUME_LSB) & MBOX1_DMA_RX_CONTROL_RESUME_MASK)
+#define MBOX1_DMA_RX_CONTROL_START_MSB 1
+#define MBOX1_DMA_RX_CONTROL_START_LSB 1
+#define MBOX1_DMA_RX_CONTROL_START_MASK 0x00000002
+#define MBOX1_DMA_RX_CONTROL_START_GET(x) (((x) & MBOX1_DMA_RX_CONTROL_START_MASK) >> MBOX1_DMA_RX_CONTROL_START_LSB)
+#define MBOX1_DMA_RX_CONTROL_START_SET(x) (((x) << MBOX1_DMA_RX_CONTROL_START_LSB) & MBOX1_DMA_RX_CONTROL_START_MASK)
+#define MBOX1_DMA_RX_CONTROL_STOP_MSB 0
+#define MBOX1_DMA_RX_CONTROL_STOP_LSB 0
+#define MBOX1_DMA_RX_CONTROL_STOP_MASK 0x00000001
+#define MBOX1_DMA_RX_CONTROL_STOP_GET(x) (((x) & MBOX1_DMA_RX_CONTROL_STOP_MASK) >> MBOX1_DMA_RX_CONTROL_STOP_LSB)
+#define MBOX1_DMA_RX_CONTROL_STOP_SET(x) (((x) << MBOX1_DMA_RX_CONTROL_STOP_LSB) & MBOX1_DMA_RX_CONTROL_STOP_MASK)
+
+#define MBOX1_DMA_TX_DESCRIPTOR_BASE_ADDRESS 0x00000030
+#define MBOX1_DMA_TX_DESCRIPTOR_BASE_OFFSET 0x00000030
+#define MBOX1_DMA_TX_DESCRIPTOR_BASE_ADDRESS_MSB 27
+#define MBOX1_DMA_TX_DESCRIPTOR_BASE_ADDRESS_LSB 2
+#define MBOX1_DMA_TX_DESCRIPTOR_BASE_ADDRESS_MASK 0x0ffffffc
+#define MBOX1_DMA_TX_DESCRIPTOR_BASE_ADDRESS_GET(x) (((x) & MBOX1_DMA_TX_DESCRIPTOR_BASE_ADDRESS_MASK) >> MBOX1_DMA_TX_DESCRIPTOR_BASE_ADDRESS_LSB)
+#define MBOX1_DMA_TX_DESCRIPTOR_BASE_ADDRESS_SET(x) (((x) << MBOX1_DMA_TX_DESCRIPTOR_BASE_ADDRESS_LSB) & MBOX1_DMA_TX_DESCRIPTOR_BASE_ADDRESS_MASK)
+
+#define MBOX1_DMA_TX_CONTROL_ADDRESS 0x00000034
+#define MBOX1_DMA_TX_CONTROL_OFFSET 0x00000034
+#define MBOX1_DMA_TX_CONTROL_RESUME_MSB 2
+#define MBOX1_DMA_TX_CONTROL_RESUME_LSB 2
+#define MBOX1_DMA_TX_CONTROL_RESUME_MASK 0x00000004
+#define MBOX1_DMA_TX_CONTROL_RESUME_GET(x) (((x) & MBOX1_DMA_TX_CONTROL_RESUME_MASK) >> MBOX1_DMA_TX_CONTROL_RESUME_LSB)
+#define MBOX1_DMA_TX_CONTROL_RESUME_SET(x) (((x) << MBOX1_DMA_TX_CONTROL_RESUME_LSB) & MBOX1_DMA_TX_CONTROL_RESUME_MASK)
+#define MBOX1_DMA_TX_CONTROL_START_MSB 1
+#define MBOX1_DMA_TX_CONTROL_START_LSB 1
+#define MBOX1_DMA_TX_CONTROL_START_MASK 0x00000002
+#define MBOX1_DMA_TX_CONTROL_START_GET(x) (((x) & MBOX1_DMA_TX_CONTROL_START_MASK) >> MBOX1_DMA_TX_CONTROL_START_LSB)
+#define MBOX1_DMA_TX_CONTROL_START_SET(x) (((x) << MBOX1_DMA_TX_CONTROL_START_LSB) & MBOX1_DMA_TX_CONTROL_START_MASK)
+#define MBOX1_DMA_TX_CONTROL_STOP_MSB 0
+#define MBOX1_DMA_TX_CONTROL_STOP_LSB 0
+#define MBOX1_DMA_TX_CONTROL_STOP_MASK 0x00000001
+#define MBOX1_DMA_TX_CONTROL_STOP_GET(x) (((x) & MBOX1_DMA_TX_CONTROL_STOP_MASK) >> MBOX1_DMA_TX_CONTROL_STOP_LSB)
+#define MBOX1_DMA_TX_CONTROL_STOP_SET(x) (((x) << MBOX1_DMA_TX_CONTROL_STOP_LSB) & MBOX1_DMA_TX_CONTROL_STOP_MASK)
+
+#define MBOX2_DMA_RX_DESCRIPTOR_BASE_ADDRESS 0x00000038
+#define MBOX2_DMA_RX_DESCRIPTOR_BASE_OFFSET 0x00000038
+#define MBOX2_DMA_RX_DESCRIPTOR_BASE_ADDRESS_MSB 27
+#define MBOX2_DMA_RX_DESCRIPTOR_BASE_ADDRESS_LSB 2
+#define MBOX2_DMA_RX_DESCRIPTOR_BASE_ADDRESS_MASK 0x0ffffffc
+#define MBOX2_DMA_RX_DESCRIPTOR_BASE_ADDRESS_GET(x) (((x) & MBOX2_DMA_RX_DESCRIPTOR_BASE_ADDRESS_MASK) >> MBOX2_DMA_RX_DESCRIPTOR_BASE_ADDRESS_LSB)
+#define MBOX2_DMA_RX_DESCRIPTOR_BASE_ADDRESS_SET(x) (((x) << MBOX2_DMA_RX_DESCRIPTOR_BASE_ADDRESS_LSB) & MBOX2_DMA_RX_DESCRIPTOR_BASE_ADDRESS_MASK)
+
+#define MBOX2_DMA_RX_CONTROL_ADDRESS 0x0000003c
+#define MBOX2_DMA_RX_CONTROL_OFFSET 0x0000003c
+#define MBOX2_DMA_RX_CONTROL_RESUME_MSB 2
+#define MBOX2_DMA_RX_CONTROL_RESUME_LSB 2
+#define MBOX2_DMA_RX_CONTROL_RESUME_MASK 0x00000004
+#define MBOX2_DMA_RX_CONTROL_RESUME_GET(x) (((x) & MBOX2_DMA_RX_CONTROL_RESUME_MASK) >> MBOX2_DMA_RX_CONTROL_RESUME_LSB)
+#define MBOX2_DMA_RX_CONTROL_RESUME_SET(x) (((x) << MBOX2_DMA_RX_CONTROL_RESUME_LSB) & MBOX2_DMA_RX_CONTROL_RESUME_MASK)
+#define MBOX2_DMA_RX_CONTROL_START_MSB 1
+#define MBOX2_DMA_RX_CONTROL_START_LSB 1
+#define MBOX2_DMA_RX_CONTROL_START_MASK 0x00000002
+#define MBOX2_DMA_RX_CONTROL_START_GET(x) (((x) & MBOX2_DMA_RX_CONTROL_START_MASK) >> MBOX2_DMA_RX_CONTROL_START_LSB)
+#define MBOX2_DMA_RX_CONTROL_START_SET(x) (((x) << MBOX2_DMA_RX_CONTROL_START_LSB) & MBOX2_DMA_RX_CONTROL_START_MASK)
+#define MBOX2_DMA_RX_CONTROL_STOP_MSB 0
+#define MBOX2_DMA_RX_CONTROL_STOP_LSB 0
+#define MBOX2_DMA_RX_CONTROL_STOP_MASK 0x00000001
+#define MBOX2_DMA_RX_CONTROL_STOP_GET(x) (((x) & MBOX2_DMA_RX_CONTROL_STOP_MASK) >> MBOX2_DMA_RX_CONTROL_STOP_LSB)
+#define MBOX2_DMA_RX_CONTROL_STOP_SET(x) (((x) << MBOX2_DMA_RX_CONTROL_STOP_LSB) & MBOX2_DMA_RX_CONTROL_STOP_MASK)
+
+#define MBOX2_DMA_TX_DESCRIPTOR_BASE_ADDRESS 0x00000040
+#define MBOX2_DMA_TX_DESCRIPTOR_BASE_OFFSET 0x00000040
+#define MBOX2_DMA_TX_DESCRIPTOR_BASE_ADDRESS_MSB 27
+#define MBOX2_DMA_TX_DESCRIPTOR_BASE_ADDRESS_LSB 2
+#define MBOX2_DMA_TX_DESCRIPTOR_BASE_ADDRESS_MASK 0x0ffffffc
+#define MBOX2_DMA_TX_DESCRIPTOR_BASE_ADDRESS_GET(x) (((x) & MBOX2_DMA_TX_DESCRIPTOR_BASE_ADDRESS_MASK) >> MBOX2_DMA_TX_DESCRIPTOR_BASE_ADDRESS_LSB)
+#define MBOX2_DMA_TX_DESCRIPTOR_BASE_ADDRESS_SET(x) (((x) << MBOX2_DMA_TX_DESCRIPTOR_BASE_ADDRESS_LSB) & MBOX2_DMA_TX_DESCRIPTOR_BASE_ADDRESS_MASK)
+
+#define MBOX2_DMA_TX_CONTROL_ADDRESS 0x00000044
+#define MBOX2_DMA_TX_CONTROL_OFFSET 0x00000044
+#define MBOX2_DMA_TX_CONTROL_RESUME_MSB 2
+#define MBOX2_DMA_TX_CONTROL_RESUME_LSB 2
+#define MBOX2_DMA_TX_CONTROL_RESUME_MASK 0x00000004
+#define MBOX2_DMA_TX_CONTROL_RESUME_GET(x) (((x) & MBOX2_DMA_TX_CONTROL_RESUME_MASK) >> MBOX2_DMA_TX_CONTROL_RESUME_LSB)
+#define MBOX2_DMA_TX_CONTROL_RESUME_SET(x) (((x) << MBOX2_DMA_TX_CONTROL_RESUME_LSB) & MBOX2_DMA_TX_CONTROL_RESUME_MASK)
+#define MBOX2_DMA_TX_CONTROL_START_MSB 1
+#define MBOX2_DMA_TX_CONTROL_START_LSB 1
+#define MBOX2_DMA_TX_CONTROL_START_MASK 0x00000002
+#define MBOX2_DMA_TX_CONTROL_START_GET(x) (((x) & MBOX2_DMA_TX_CONTROL_START_MASK) >> MBOX2_DMA_TX_CONTROL_START_LSB)
+#define MBOX2_DMA_TX_CONTROL_START_SET(x) (((x) << MBOX2_DMA_TX_CONTROL_START_LSB) & MBOX2_DMA_TX_CONTROL_START_MASK)
+#define MBOX2_DMA_TX_CONTROL_STOP_MSB 0
+#define MBOX2_DMA_TX_CONTROL_STOP_LSB 0
+#define MBOX2_DMA_TX_CONTROL_STOP_MASK 0x00000001
+#define MBOX2_DMA_TX_CONTROL_STOP_GET(x) (((x) & MBOX2_DMA_TX_CONTROL_STOP_MASK) >> MBOX2_DMA_TX_CONTROL_STOP_LSB)
+#define MBOX2_DMA_TX_CONTROL_STOP_SET(x) (((x) << MBOX2_DMA_TX_CONTROL_STOP_LSB) & MBOX2_DMA_TX_CONTROL_STOP_MASK)
+
+#define MBOX3_DMA_RX_DESCRIPTOR_BASE_ADDRESS 0x00000048
+#define MBOX3_DMA_RX_DESCRIPTOR_BASE_OFFSET 0x00000048
+#define MBOX3_DMA_RX_DESCRIPTOR_BASE_ADDRESS_MSB 27
+#define MBOX3_DMA_RX_DESCRIPTOR_BASE_ADDRESS_LSB 2
+#define MBOX3_DMA_RX_DESCRIPTOR_BASE_ADDRESS_MASK 0x0ffffffc
+#define MBOX3_DMA_RX_DESCRIPTOR_BASE_ADDRESS_GET(x) (((x) & MBOX3_DMA_RX_DESCRIPTOR_BASE_ADDRESS_MASK) >> MBOX3_DMA_RX_DESCRIPTOR_BASE_ADDRESS_LSB)
+#define MBOX3_DMA_RX_DESCRIPTOR_BASE_ADDRESS_SET(x) (((x) << MBOX3_DMA_RX_DESCRIPTOR_BASE_ADDRESS_LSB) & MBOX3_DMA_RX_DESCRIPTOR_BASE_ADDRESS_MASK)
+
+#define MBOX3_DMA_RX_CONTROL_ADDRESS 0x0000004c
+#define MBOX3_DMA_RX_CONTROL_OFFSET 0x0000004c
+#define MBOX3_DMA_RX_CONTROL_RESUME_MSB 2
+#define MBOX3_DMA_RX_CONTROL_RESUME_LSB 2
+#define MBOX3_DMA_RX_CONTROL_RESUME_MASK 0x00000004
+#define MBOX3_DMA_RX_CONTROL_RESUME_GET(x) (((x) & MBOX3_DMA_RX_CONTROL_RESUME_MASK) >> MBOX3_DMA_RX_CONTROL_RESUME_LSB)
+#define MBOX3_DMA_RX_CONTROL_RESUME_SET(x) (((x) << MBOX3_DMA_RX_CONTROL_RESUME_LSB) & MBOX3_DMA_RX_CONTROL_RESUME_MASK)
+#define MBOX3_DMA_RX_CONTROL_START_MSB 1
+#define MBOX3_DMA_RX_CONTROL_START_LSB 1
+#define MBOX3_DMA_RX_CONTROL_START_MASK 0x00000002
+#define MBOX3_DMA_RX_CONTROL_START_GET(x) (((x) & MBOX3_DMA_RX_CONTROL_START_MASK) >> MBOX3_DMA_RX_CONTROL_START_LSB)
+#define MBOX3_DMA_RX_CONTROL_START_SET(x) (((x) << MBOX3_DMA_RX_CONTROL_START_LSB) & MBOX3_DMA_RX_CONTROL_START_MASK)
+#define MBOX3_DMA_RX_CONTROL_STOP_MSB 0
+#define MBOX3_DMA_RX_CONTROL_STOP_LSB 0
+#define MBOX3_DMA_RX_CONTROL_STOP_MASK 0x00000001
+#define MBOX3_DMA_RX_CONTROL_STOP_GET(x) (((x) & MBOX3_DMA_RX_CONTROL_STOP_MASK) >> MBOX3_DMA_RX_CONTROL_STOP_LSB)
+#define MBOX3_DMA_RX_CONTROL_STOP_SET(x) (((x) << MBOX3_DMA_RX_CONTROL_STOP_LSB) & MBOX3_DMA_RX_CONTROL_STOP_MASK)
+
+#define MBOX3_DMA_TX_DESCRIPTOR_BASE_ADDRESS 0x00000050
+#define MBOX3_DMA_TX_DESCRIPTOR_BASE_OFFSET 0x00000050
+#define MBOX3_DMA_TX_DESCRIPTOR_BASE_ADDRESS_MSB 27
+#define MBOX3_DMA_TX_DESCRIPTOR_BASE_ADDRESS_LSB 2
+#define MBOX3_DMA_TX_DESCRIPTOR_BASE_ADDRESS_MASK 0x0ffffffc
+#define MBOX3_DMA_TX_DESCRIPTOR_BASE_ADDRESS_GET(x) (((x) & MBOX3_DMA_TX_DESCRIPTOR_BASE_ADDRESS_MASK) >> MBOX3_DMA_TX_DESCRIPTOR_BASE_ADDRESS_LSB)
+#define MBOX3_DMA_TX_DESCRIPTOR_BASE_ADDRESS_SET(x) (((x) << MBOX3_DMA_TX_DESCRIPTOR_BASE_ADDRESS_LSB) & MBOX3_DMA_TX_DESCRIPTOR_BASE_ADDRESS_MASK)
+
+#define MBOX3_DMA_TX_CONTROL_ADDRESS 0x00000054
+#define MBOX3_DMA_TX_CONTROL_OFFSET 0x00000054
+#define MBOX3_DMA_TX_CONTROL_RESUME_MSB 2
+#define MBOX3_DMA_TX_CONTROL_RESUME_LSB 2
+#define MBOX3_DMA_TX_CONTROL_RESUME_MASK 0x00000004
+#define MBOX3_DMA_TX_CONTROL_RESUME_GET(x) (((x) & MBOX3_DMA_TX_CONTROL_RESUME_MASK) >> MBOX3_DMA_TX_CONTROL_RESUME_LSB)
+#define MBOX3_DMA_TX_CONTROL_RESUME_SET(x) (((x) << MBOX3_DMA_TX_CONTROL_RESUME_LSB) & MBOX3_DMA_TX_CONTROL_RESUME_MASK)
+#define MBOX3_DMA_TX_CONTROL_START_MSB 1
+#define MBOX3_DMA_TX_CONTROL_START_LSB 1
+#define MBOX3_DMA_TX_CONTROL_START_MASK 0x00000002
+#define MBOX3_DMA_TX_CONTROL_START_GET(x) (((x) & MBOX3_DMA_TX_CONTROL_START_MASK) >> MBOX3_DMA_TX_CONTROL_START_LSB)
+#define MBOX3_DMA_TX_CONTROL_START_SET(x) (((x) << MBOX3_DMA_TX_CONTROL_START_LSB) & MBOX3_DMA_TX_CONTROL_START_MASK)
+#define MBOX3_DMA_TX_CONTROL_STOP_MSB 0
+#define MBOX3_DMA_TX_CONTROL_STOP_LSB 0
+#define MBOX3_DMA_TX_CONTROL_STOP_MASK 0x00000001
+#define MBOX3_DMA_TX_CONTROL_STOP_GET(x) (((x) & MBOX3_DMA_TX_CONTROL_STOP_MASK) >> MBOX3_DMA_TX_CONTROL_STOP_LSB)
+#define MBOX3_DMA_TX_CONTROL_STOP_SET(x) (((x) << MBOX3_DMA_TX_CONTROL_STOP_LSB) & MBOX3_DMA_TX_CONTROL_STOP_MASK)
+
+#define MBOX_INT_STATUS_ADDRESS 0x00000058
+#define MBOX_INT_STATUS_OFFSET 0x00000058
+#define MBOX_INT_STATUS_RX_DMA_COMPLETE_MSB 31
+#define MBOX_INT_STATUS_RX_DMA_COMPLETE_LSB 28
+#define MBOX_INT_STATUS_RX_DMA_COMPLETE_MASK 0xf0000000
+#define MBOX_INT_STATUS_RX_DMA_COMPLETE_GET(x) (((x) & MBOX_INT_STATUS_RX_DMA_COMPLETE_MASK) >> MBOX_INT_STATUS_RX_DMA_COMPLETE_LSB)
+#define MBOX_INT_STATUS_RX_DMA_COMPLETE_SET(x) (((x) << MBOX_INT_STATUS_RX_DMA_COMPLETE_LSB) & MBOX_INT_STATUS_RX_DMA_COMPLETE_MASK)
+#define MBOX_INT_STATUS_TX_DMA_EOM_COMPLETE_MSB 27
+#define MBOX_INT_STATUS_TX_DMA_EOM_COMPLETE_LSB 24
+#define MBOX_INT_STATUS_TX_DMA_EOM_COMPLETE_MASK 0x0f000000
+#define MBOX_INT_STATUS_TX_DMA_EOM_COMPLETE_GET(x) (((x) & MBOX_INT_STATUS_TX_DMA_EOM_COMPLETE_MASK) >> MBOX_INT_STATUS_TX_DMA_EOM_COMPLETE_LSB)
+#define MBOX_INT_STATUS_TX_DMA_EOM_COMPLETE_SET(x) (((x) << MBOX_INT_STATUS_TX_DMA_EOM_COMPLETE_LSB) & MBOX_INT_STATUS_TX_DMA_EOM_COMPLETE_MASK)
+#define MBOX_INT_STATUS_TX_DMA_COMPLETE_MSB 23
+#define MBOX_INT_STATUS_TX_DMA_COMPLETE_LSB 20
+#define MBOX_INT_STATUS_TX_DMA_COMPLETE_MASK 0x00f00000
+#define MBOX_INT_STATUS_TX_DMA_COMPLETE_GET(x) (((x) & MBOX_INT_STATUS_TX_DMA_COMPLETE_MASK) >> MBOX_INT_STATUS_TX_DMA_COMPLETE_LSB)
+#define MBOX_INT_STATUS_TX_DMA_COMPLETE_SET(x) (((x) << MBOX_INT_STATUS_TX_DMA_COMPLETE_LSB) & MBOX_INT_STATUS_TX_DMA_COMPLETE_MASK)
+#define MBOX_INT_STATUS_TX_OVERFLOW_MSB 17
+#define MBOX_INT_STATUS_TX_OVERFLOW_LSB 17
+#define MBOX_INT_STATUS_TX_OVERFLOW_MASK 0x00020000
+#define MBOX_INT_STATUS_TX_OVERFLOW_GET(x) (((x) & MBOX_INT_STATUS_TX_OVERFLOW_MASK) >> MBOX_INT_STATUS_TX_OVERFLOW_LSB)
+#define MBOX_INT_STATUS_TX_OVERFLOW_SET(x) (((x) << MBOX_INT_STATUS_TX_OVERFLOW_LSB) & MBOX_INT_STATUS_TX_OVERFLOW_MASK)
+#define MBOX_INT_STATUS_RX_UNDERFLOW_MSB 16
+#define MBOX_INT_STATUS_RX_UNDERFLOW_LSB 16
+#define MBOX_INT_STATUS_RX_UNDERFLOW_MASK 0x00010000
+#define MBOX_INT_STATUS_RX_UNDERFLOW_GET(x) (((x) & MBOX_INT_STATUS_RX_UNDERFLOW_MASK) >> MBOX_INT_STATUS_RX_UNDERFLOW_LSB)
+#define MBOX_INT_STATUS_RX_UNDERFLOW_SET(x) (((x) << MBOX_INT_STATUS_RX_UNDERFLOW_LSB) & MBOX_INT_STATUS_RX_UNDERFLOW_MASK)
+#define MBOX_INT_STATUS_TX_NOT_EMPTY_MSB 15
+#define MBOX_INT_STATUS_TX_NOT_EMPTY_LSB 12
+#define MBOX_INT_STATUS_TX_NOT_EMPTY_MASK 0x0000f000
+#define MBOX_INT_STATUS_TX_NOT_EMPTY_GET(x) (((x) & MBOX_INT_STATUS_TX_NOT_EMPTY_MASK) >> MBOX_INT_STATUS_TX_NOT_EMPTY_LSB)
+#define MBOX_INT_STATUS_TX_NOT_EMPTY_SET(x) (((x) << MBOX_INT_STATUS_TX_NOT_EMPTY_LSB) & MBOX_INT_STATUS_TX_NOT_EMPTY_MASK)
+#define MBOX_INT_STATUS_RX_NOT_FULL_MSB 11
+#define MBOX_INT_STATUS_RX_NOT_FULL_LSB 8
+#define MBOX_INT_STATUS_RX_NOT_FULL_MASK 0x00000f00
+#define MBOX_INT_STATUS_RX_NOT_FULL_GET(x) (((x) & MBOX_INT_STATUS_RX_NOT_FULL_MASK) >> MBOX_INT_STATUS_RX_NOT_FULL_LSB)
+#define MBOX_INT_STATUS_RX_NOT_FULL_SET(x) (((x) << MBOX_INT_STATUS_RX_NOT_FULL_LSB) & MBOX_INT_STATUS_RX_NOT_FULL_MASK)
+#define MBOX_INT_STATUS_HOST_MSB 7
+#define MBOX_INT_STATUS_HOST_LSB 0
+#define MBOX_INT_STATUS_HOST_MASK 0x000000ff
+#define MBOX_INT_STATUS_HOST_GET(x) (((x) & MBOX_INT_STATUS_HOST_MASK) >> MBOX_INT_STATUS_HOST_LSB)
+#define MBOX_INT_STATUS_HOST_SET(x) (((x) << MBOX_INT_STATUS_HOST_LSB) & MBOX_INT_STATUS_HOST_MASK)
+
+#define MBOX_INT_ENABLE_ADDRESS 0x0000005c
+#define MBOX_INT_ENABLE_OFFSET 0x0000005c
+#define MBOX_INT_ENABLE_RX_DMA_COMPLETE_MSB 31
+#define MBOX_INT_ENABLE_RX_DMA_COMPLETE_LSB 28
+#define MBOX_INT_ENABLE_RX_DMA_COMPLETE_MASK 0xf0000000
+#define MBOX_INT_ENABLE_RX_DMA_COMPLETE_GET(x) (((x) & MBOX_INT_ENABLE_RX_DMA_COMPLETE_MASK) >> MBOX_INT_ENABLE_RX_DMA_COMPLETE_LSB)
+#define MBOX_INT_ENABLE_RX_DMA_COMPLETE_SET(x) (((x) << MBOX_INT_ENABLE_RX_DMA_COMPLETE_LSB) & MBOX_INT_ENABLE_RX_DMA_COMPLETE_MASK)
+#define MBOX_INT_ENABLE_TX_DMA_EOM_COMPLETE_MSB 27
+#define MBOX_INT_ENABLE_TX_DMA_EOM_COMPLETE_LSB 24
+#define MBOX_INT_ENABLE_TX_DMA_EOM_COMPLETE_MASK 0x0f000000
+#define MBOX_INT_ENABLE_TX_DMA_EOM_COMPLETE_GET(x) (((x) & MBOX_INT_ENABLE_TX_DMA_EOM_COMPLETE_MASK) >> MBOX_INT_ENABLE_TX_DMA_EOM_COMPLETE_LSB)
+#define MBOX_INT_ENABLE_TX_DMA_EOM_COMPLETE_SET(x) (((x) << MBOX_INT_ENABLE_TX_DMA_EOM_COMPLETE_LSB) & MBOX_INT_ENABLE_TX_DMA_EOM_COMPLETE_MASK)
+#define MBOX_INT_ENABLE_TX_DMA_COMPLETE_MSB 23
+#define MBOX_INT_ENABLE_TX_DMA_COMPLETE_LSB 20
+#define MBOX_INT_ENABLE_TX_DMA_COMPLETE_MASK 0x00f00000
+#define MBOX_INT_ENABLE_TX_DMA_COMPLETE_GET(x) (((x) & MBOX_INT_ENABLE_TX_DMA_COMPLETE_MASK) >> MBOX_INT_ENABLE_TX_DMA_COMPLETE_LSB)
+#define MBOX_INT_ENABLE_TX_DMA_COMPLETE_SET(x) (((x) << MBOX_INT_ENABLE_TX_DMA_COMPLETE_LSB) & MBOX_INT_ENABLE_TX_DMA_COMPLETE_MASK)
+#define MBOX_INT_ENABLE_TX_OVERFLOW_MSB 17
+#define MBOX_INT_ENABLE_TX_OVERFLOW_LSB 17
+#define MBOX_INT_ENABLE_TX_OVERFLOW_MASK 0x00020000
+#define MBOX_INT_ENABLE_TX_OVERFLOW_GET(x) (((x) & MBOX_INT_ENABLE_TX_OVERFLOW_MASK) >> MBOX_INT_ENABLE_TX_OVERFLOW_LSB)
+#define MBOX_INT_ENABLE_TX_OVERFLOW_SET(x) (((x) << MBOX_INT_ENABLE_TX_OVERFLOW_LSB) & MBOX_INT_ENABLE_TX_OVERFLOW_MASK)
+#define MBOX_INT_ENABLE_RX_UNDERFLOW_MSB 16
+#define MBOX_INT_ENABLE_RX_UNDERFLOW_LSB 16
+#define MBOX_INT_ENABLE_RX_UNDERFLOW_MASK 0x00010000
+#define MBOX_INT_ENABLE_RX_UNDERFLOW_GET(x) (((x) & MBOX_INT_ENABLE_RX_UNDERFLOW_MASK) >> MBOX_INT_ENABLE_RX_UNDERFLOW_LSB)
+#define MBOX_INT_ENABLE_RX_UNDERFLOW_SET(x) (((x) << MBOX_INT_ENABLE_RX_UNDERFLOW_LSB) & MBOX_INT_ENABLE_RX_UNDERFLOW_MASK)
+#define MBOX_INT_ENABLE_TX_NOT_EMPTY_MSB 15
+#define MBOX_INT_ENABLE_TX_NOT_EMPTY_LSB 12
+#define MBOX_INT_ENABLE_TX_NOT_EMPTY_MASK 0x0000f000
+#define MBOX_INT_ENABLE_TX_NOT_EMPTY_GET(x) (((x) & MBOX_INT_ENABLE_TX_NOT_EMPTY_MASK) >> MBOX_INT_ENABLE_TX_NOT_EMPTY_LSB)
+#define MBOX_INT_ENABLE_TX_NOT_EMPTY_SET(x) (((x) << MBOX_INT_ENABLE_TX_NOT_EMPTY_LSB) & MBOX_INT_ENABLE_TX_NOT_EMPTY_MASK)
+#define MBOX_INT_ENABLE_RX_NOT_FULL_MSB 11
+#define MBOX_INT_ENABLE_RX_NOT_FULL_LSB 8
+#define MBOX_INT_ENABLE_RX_NOT_FULL_MASK 0x00000f00
+#define MBOX_INT_ENABLE_RX_NOT_FULL_GET(x) (((x) & MBOX_INT_ENABLE_RX_NOT_FULL_MASK) >> MBOX_INT_ENABLE_RX_NOT_FULL_LSB)
+#define MBOX_INT_ENABLE_RX_NOT_FULL_SET(x) (((x) << MBOX_INT_ENABLE_RX_NOT_FULL_LSB) & MBOX_INT_ENABLE_RX_NOT_FULL_MASK)
+#define MBOX_INT_ENABLE_HOST_MSB 7
+#define MBOX_INT_ENABLE_HOST_LSB 0
+#define MBOX_INT_ENABLE_HOST_MASK 0x000000ff
+#define MBOX_INT_ENABLE_HOST_GET(x) (((x) & MBOX_INT_ENABLE_HOST_MASK) >> MBOX_INT_ENABLE_HOST_LSB)
+#define MBOX_INT_ENABLE_HOST_SET(x) (((x) << MBOX_INT_ENABLE_HOST_LSB) & MBOX_INT_ENABLE_HOST_MASK)
+
+#define INT_HOST_ADDRESS 0x00000060
+#define INT_HOST_OFFSET 0x00000060
+#define INT_HOST_VECTOR_MSB 7
+#define INT_HOST_VECTOR_LSB 0
+#define INT_HOST_VECTOR_MASK 0x000000ff
+#define INT_HOST_VECTOR_GET(x) (((x) & INT_HOST_VECTOR_MASK) >> INT_HOST_VECTOR_LSB)
+#define INT_HOST_VECTOR_SET(x) (((x) << INT_HOST_VECTOR_LSB) & INT_HOST_VECTOR_MASK)
+
+#define LOCAL_COUNT_ADDRESS 0x00000080
+#define LOCAL_COUNT_OFFSET 0x00000080
+#define LOCAL_COUNT_VALUE_MSB 7
+#define LOCAL_COUNT_VALUE_LSB 0
+#define LOCAL_COUNT_VALUE_MASK 0x000000ff
+#define LOCAL_COUNT_VALUE_GET(x) (((x) & LOCAL_COUNT_VALUE_MASK) >> LOCAL_COUNT_VALUE_LSB)
+#define LOCAL_COUNT_VALUE_SET(x) (((x) << LOCAL_COUNT_VALUE_LSB) & LOCAL_COUNT_VALUE_MASK)
+
+#define COUNT_INC_ADDRESS 0x000000a0
+#define COUNT_INC_OFFSET 0x000000a0
+#define COUNT_INC_VALUE_MSB 7
+#define COUNT_INC_VALUE_LSB 0
+#define COUNT_INC_VALUE_MASK 0x000000ff
+#define COUNT_INC_VALUE_GET(x) (((x) & COUNT_INC_VALUE_MASK) >> COUNT_INC_VALUE_LSB)
+#define COUNT_INC_VALUE_SET(x) (((x) << COUNT_INC_VALUE_LSB) & COUNT_INC_VALUE_MASK)
+
+#define LOCAL_SCRATCH_ADDRESS 0x000000c0
+#define LOCAL_SCRATCH_OFFSET 0x000000c0
+#define LOCAL_SCRATCH_VALUE_MSB 7
+#define LOCAL_SCRATCH_VALUE_LSB 0
+#define LOCAL_SCRATCH_VALUE_MASK 0x000000ff
+#define LOCAL_SCRATCH_VALUE_GET(x) (((x) & LOCAL_SCRATCH_VALUE_MASK) >> LOCAL_SCRATCH_VALUE_LSB)
+#define LOCAL_SCRATCH_VALUE_SET(x) (((x) << LOCAL_SCRATCH_VALUE_LSB) & LOCAL_SCRATCH_VALUE_MASK)
+
+#define USE_LOCAL_BUS_ADDRESS 0x000000e0
+#define USE_LOCAL_BUS_OFFSET 0x000000e0
+#define USE_LOCAL_BUS_PIN_INIT_MSB 0
+#define USE_LOCAL_BUS_PIN_INIT_LSB 0
+#define USE_LOCAL_BUS_PIN_INIT_MASK 0x00000001
+#define USE_LOCAL_BUS_PIN_INIT_GET(x) (((x) & USE_LOCAL_BUS_PIN_INIT_MASK) >> USE_LOCAL_BUS_PIN_INIT_LSB)
+#define USE_LOCAL_BUS_PIN_INIT_SET(x) (((x) << USE_LOCAL_BUS_PIN_INIT_LSB) & USE_LOCAL_BUS_PIN_INIT_MASK)
+
+#define SDIO_CONFIG_ADDRESS 0x000000e4
+#define SDIO_CONFIG_OFFSET 0x000000e4
+#define SDIO_CONFIG_CCCR_IOR1_MSB 0
+#define SDIO_CONFIG_CCCR_IOR1_LSB 0
+#define SDIO_CONFIG_CCCR_IOR1_MASK 0x00000001
+#define SDIO_CONFIG_CCCR_IOR1_GET(x) (((x) & SDIO_CONFIG_CCCR_IOR1_MASK) >> SDIO_CONFIG_CCCR_IOR1_LSB)
+#define SDIO_CONFIG_CCCR_IOR1_SET(x) (((x) << SDIO_CONFIG_CCCR_IOR1_LSB) & SDIO_CONFIG_CCCR_IOR1_MASK)
+
+#define MBOX_DEBUG_ADDRESS 0x000000e8
+#define MBOX_DEBUG_OFFSET 0x000000e8
+#define MBOX_DEBUG_SEL_MSB 2
+#define MBOX_DEBUG_SEL_LSB 0
+#define MBOX_DEBUG_SEL_MASK 0x00000007
+#define MBOX_DEBUG_SEL_GET(x) (((x) & MBOX_DEBUG_SEL_MASK) >> MBOX_DEBUG_SEL_LSB)
+#define MBOX_DEBUG_SEL_SET(x) (((x) << MBOX_DEBUG_SEL_LSB) & MBOX_DEBUG_SEL_MASK)
+
+#define MBOX_FIFO_RESET_ADDRESS 0x000000ec
+#define MBOX_FIFO_RESET_OFFSET 0x000000ec
+#define MBOX_FIFO_RESET_INIT_MSB 0
+#define MBOX_FIFO_RESET_INIT_LSB 0
+#define MBOX_FIFO_RESET_INIT_MASK 0x00000001
+#define MBOX_FIFO_RESET_INIT_GET(x) (((x) & MBOX_FIFO_RESET_INIT_MASK) >> MBOX_FIFO_RESET_INIT_LSB)
+#define MBOX_FIFO_RESET_INIT_SET(x) (((x) << MBOX_FIFO_RESET_INIT_LSB) & MBOX_FIFO_RESET_INIT_MASK)
+
+#define MBOX_TXFIFO_POP_ADDRESS 0x000000f0
+#define MBOX_TXFIFO_POP_OFFSET 0x000000f0
+#define MBOX_TXFIFO_POP_DATA_MSB 0
+#define MBOX_TXFIFO_POP_DATA_LSB 0
+#define MBOX_TXFIFO_POP_DATA_MASK 0x00000001
+#define MBOX_TXFIFO_POP_DATA_GET(x) (((x) & MBOX_TXFIFO_POP_DATA_MASK) >> MBOX_TXFIFO_POP_DATA_LSB)
+#define MBOX_TXFIFO_POP_DATA_SET(x) (((x) << MBOX_TXFIFO_POP_DATA_LSB) & MBOX_TXFIFO_POP_DATA_MASK)
+
+#define MBOX_RXFIFO_POP_ADDRESS 0x00000100
+#define MBOX_RXFIFO_POP_OFFSET 0x00000100
+#define MBOX_RXFIFO_POP_DATA_MSB 0
+#define MBOX_RXFIFO_POP_DATA_LSB 0
+#define MBOX_RXFIFO_POP_DATA_MASK 0x00000001
+#define MBOX_RXFIFO_POP_DATA_GET(x) (((x) & MBOX_RXFIFO_POP_DATA_MASK) >> MBOX_RXFIFO_POP_DATA_LSB)
+#define MBOX_RXFIFO_POP_DATA_SET(x) (((x) << MBOX_RXFIFO_POP_DATA_LSB) & MBOX_RXFIFO_POP_DATA_MASK)
+
+#define SDIO_DEBUG_ADDRESS 0x00000110
+#define SDIO_DEBUG_OFFSET 0x00000110
+#define SDIO_DEBUG_SEL_MSB 3
+#define SDIO_DEBUG_SEL_LSB 0
+#define SDIO_DEBUG_SEL_MASK 0x0000000f
+#define SDIO_DEBUG_SEL_GET(x) (((x) & SDIO_DEBUG_SEL_MASK) >> SDIO_DEBUG_SEL_LSB)
+#define SDIO_DEBUG_SEL_SET(x) (((x) << SDIO_DEBUG_SEL_LSB) & SDIO_DEBUG_SEL_MASK)
+
+#define HOST_IF_WINDOW_ADDRESS 0x00002000
+#define HOST_IF_WINDOW_OFFSET 0x00002000
+#define HOST_IF_WINDOW_DATA_MSB 7
+#define HOST_IF_WINDOW_DATA_LSB 0
+#define HOST_IF_WINDOW_DATA_MASK 0x000000ff
+#define HOST_IF_WINDOW_DATA_GET(x) (((x) & HOST_IF_WINDOW_DATA_MASK) >> HOST_IF_WINDOW_DATA_LSB)
+#define HOST_IF_WINDOW_DATA_SET(x) (((x) << HOST_IF_WINDOW_DATA_LSB) & HOST_IF_WINDOW_DATA_MASK)
+
+
+#ifndef __ASSEMBLER__
+
+typedef struct mbox_reg_reg_s {
+ volatile unsigned int mbox_fifo[4];
+ volatile unsigned int mbox_fifo_status;
+ volatile unsigned int mbox_dma_policy;
+ volatile unsigned int mbox0_dma_rx_descriptor_base;
+ volatile unsigned int mbox0_dma_rx_control;
+ volatile unsigned int mbox0_dma_tx_descriptor_base;
+ volatile unsigned int mbox0_dma_tx_control;
+ volatile unsigned int mbox1_dma_rx_descriptor_base;
+ volatile unsigned int mbox1_dma_rx_control;
+ volatile unsigned int mbox1_dma_tx_descriptor_base;
+ volatile unsigned int mbox1_dma_tx_control;
+ volatile unsigned int mbox2_dma_rx_descriptor_base;
+ volatile unsigned int mbox2_dma_rx_control;
+ volatile unsigned int mbox2_dma_tx_descriptor_base;
+ volatile unsigned int mbox2_dma_tx_control;
+ volatile unsigned int mbox3_dma_rx_descriptor_base;
+ volatile unsigned int mbox3_dma_rx_control;
+ volatile unsigned int mbox3_dma_tx_descriptor_base;
+ volatile unsigned int mbox3_dma_tx_control;
+ volatile unsigned int mbox_int_status;
+ volatile unsigned int mbox_int_enable;
+ volatile unsigned int int_host;
+ unsigned char pad0[28]; /* pad to 0x80 */
+ volatile unsigned int local_count[8];
+ volatile unsigned int count_inc[8];
+ volatile unsigned int local_scratch[8];
+ volatile unsigned int use_local_bus;
+ volatile unsigned int sdio_config;
+ volatile unsigned int mbox_debug;
+ volatile unsigned int mbox_fifo_reset;
+ volatile unsigned int mbox_txfifo_pop[4];
+ volatile unsigned int mbox_rxfifo_pop[4];
+ volatile unsigned int sdio_debug;
+ unsigned char pad1[7916]; /* pad to 0x2000 */
+ volatile unsigned int host_if_window[2048];
+} mbox_reg_reg_t;
+
+#endif /* __ASSEMBLER__ */
+
+#endif /* _MBOX_REG_H_ */
diff --git a/drivers/staging/ath6kl/include/common/AR6002/hw2.0/hw/rtc_reg.h b/drivers/staging/ath6kl/include/common/AR6002/hw2.0/hw/rtc_reg.h
new file mode 100644
index 000000000000..8b3980afb643
--- /dev/null
+++ b/drivers/staging/ath6kl/include/common/AR6002/hw2.0/hw/rtc_reg.h
@@ -0,0 +1,1163 @@
+#ifndef _RTC_REG_REG_H_
+#define _RTC_REG_REG_H_
+
+#define RESET_CONTROL_ADDRESS 0x00000000
+#define RESET_CONTROL_OFFSET 0x00000000
+#define RESET_CONTROL_CPU_INIT_RESET_MSB 11
+#define RESET_CONTROL_CPU_INIT_RESET_LSB 11
+#define RESET_CONTROL_CPU_INIT_RESET_MASK 0x00000800
+#define RESET_CONTROL_CPU_INIT_RESET_GET(x) (((x) & RESET_CONTROL_CPU_INIT_RESET_MASK) >> RESET_CONTROL_CPU_INIT_RESET_LSB)
+#define RESET_CONTROL_CPU_INIT_RESET_SET(x) (((x) << RESET_CONTROL_CPU_INIT_RESET_LSB) & RESET_CONTROL_CPU_INIT_RESET_MASK)
+#define RESET_CONTROL_VMC_REMAP_RESET_MSB 10
+#define RESET_CONTROL_VMC_REMAP_RESET_LSB 10
+#define RESET_CONTROL_VMC_REMAP_RESET_MASK 0x00000400
+#define RESET_CONTROL_VMC_REMAP_RESET_GET(x) (((x) & RESET_CONTROL_VMC_REMAP_RESET_MASK) >> RESET_CONTROL_VMC_REMAP_RESET_LSB)
+#define RESET_CONTROL_VMC_REMAP_RESET_SET(x) (((x) << RESET_CONTROL_VMC_REMAP_RESET_LSB) & RESET_CONTROL_VMC_REMAP_RESET_MASK)
+#define RESET_CONTROL_RST_OUT_MSB 9
+#define RESET_CONTROL_RST_OUT_LSB 9
+#define RESET_CONTROL_RST_OUT_MASK 0x00000200
+#define RESET_CONTROL_RST_OUT_GET(x) (((x) & RESET_CONTROL_RST_OUT_MASK) >> RESET_CONTROL_RST_OUT_LSB)
+#define RESET_CONTROL_RST_OUT_SET(x) (((x) << RESET_CONTROL_RST_OUT_LSB) & RESET_CONTROL_RST_OUT_MASK)
+#define RESET_CONTROL_COLD_RST_MSB 8
+#define RESET_CONTROL_COLD_RST_LSB 8
+#define RESET_CONTROL_COLD_RST_MASK 0x00000100
+#define RESET_CONTROL_COLD_RST_GET(x) (((x) & RESET_CONTROL_COLD_RST_MASK) >> RESET_CONTROL_COLD_RST_LSB)
+#define RESET_CONTROL_COLD_RST_SET(x) (((x) << RESET_CONTROL_COLD_RST_LSB) & RESET_CONTROL_COLD_RST_MASK)
+#define RESET_CONTROL_WARM_RST_MSB 7
+#define RESET_CONTROL_WARM_RST_LSB 7
+#define RESET_CONTROL_WARM_RST_MASK 0x00000080
+#define RESET_CONTROL_WARM_RST_GET(x) (((x) & RESET_CONTROL_WARM_RST_MASK) >> RESET_CONTROL_WARM_RST_LSB)
+#define RESET_CONTROL_WARM_RST_SET(x) (((x) << RESET_CONTROL_WARM_RST_LSB) & RESET_CONTROL_WARM_RST_MASK)
+#define RESET_CONTROL_CPU_WARM_RST_MSB 6
+#define RESET_CONTROL_CPU_WARM_RST_LSB 6
+#define RESET_CONTROL_CPU_WARM_RST_MASK 0x00000040
+#define RESET_CONTROL_CPU_WARM_RST_GET(x) (((x) & RESET_CONTROL_CPU_WARM_RST_MASK) >> RESET_CONTROL_CPU_WARM_RST_LSB)
+#define RESET_CONTROL_CPU_WARM_RST_SET(x) (((x) << RESET_CONTROL_CPU_WARM_RST_LSB) & RESET_CONTROL_CPU_WARM_RST_MASK)
+#define RESET_CONTROL_MAC_COLD_RST_MSB 5
+#define RESET_CONTROL_MAC_COLD_RST_LSB 5
+#define RESET_CONTROL_MAC_COLD_RST_MASK 0x00000020
+#define RESET_CONTROL_MAC_COLD_RST_GET(x) (((x) & RESET_CONTROL_MAC_COLD_RST_MASK) >> RESET_CONTROL_MAC_COLD_RST_LSB)
+#define RESET_CONTROL_MAC_COLD_RST_SET(x) (((x) << RESET_CONTROL_MAC_COLD_RST_LSB) & RESET_CONTROL_MAC_COLD_RST_MASK)
+#define RESET_CONTROL_MAC_WARM_RST_MSB 4
+#define RESET_CONTROL_MAC_WARM_RST_LSB 4
+#define RESET_CONTROL_MAC_WARM_RST_MASK 0x00000010
+#define RESET_CONTROL_MAC_WARM_RST_GET(x) (((x) & RESET_CONTROL_MAC_WARM_RST_MASK) >> RESET_CONTROL_MAC_WARM_RST_LSB)
+#define RESET_CONTROL_MAC_WARM_RST_SET(x) (((x) << RESET_CONTROL_MAC_WARM_RST_LSB) & RESET_CONTROL_MAC_WARM_RST_MASK)
+#define RESET_CONTROL_MBOX_RST_MSB 2
+#define RESET_CONTROL_MBOX_RST_LSB 2
+#define RESET_CONTROL_MBOX_RST_MASK 0x00000004
+#define RESET_CONTROL_MBOX_RST_GET(x) (((x) & RESET_CONTROL_MBOX_RST_MASK) >> RESET_CONTROL_MBOX_RST_LSB)
+#define RESET_CONTROL_MBOX_RST_SET(x) (((x) << RESET_CONTROL_MBOX_RST_LSB) & RESET_CONTROL_MBOX_RST_MASK)
+#define RESET_CONTROL_UART_RST_MSB 1
+#define RESET_CONTROL_UART_RST_LSB 1
+#define RESET_CONTROL_UART_RST_MASK 0x00000002
+#define RESET_CONTROL_UART_RST_GET(x) (((x) & RESET_CONTROL_UART_RST_MASK) >> RESET_CONTROL_UART_RST_LSB)
+#define RESET_CONTROL_UART_RST_SET(x) (((x) << RESET_CONTROL_UART_RST_LSB) & RESET_CONTROL_UART_RST_MASK)
+#define RESET_CONTROL_SI0_RST_MSB 0
+#define RESET_CONTROL_SI0_RST_LSB 0
+#define RESET_CONTROL_SI0_RST_MASK 0x00000001
+#define RESET_CONTROL_SI0_RST_GET(x) (((x) & RESET_CONTROL_SI0_RST_MASK) >> RESET_CONTROL_SI0_RST_LSB)
+#define RESET_CONTROL_SI0_RST_SET(x) (((x) << RESET_CONTROL_SI0_RST_LSB) & RESET_CONTROL_SI0_RST_MASK)
+
+#define XTAL_CONTROL_ADDRESS 0x00000004
+#define XTAL_CONTROL_OFFSET 0x00000004
+#define XTAL_CONTROL_TCXO_MSB 0
+#define XTAL_CONTROL_TCXO_LSB 0
+#define XTAL_CONTROL_TCXO_MASK 0x00000001
+#define XTAL_CONTROL_TCXO_GET(x) (((x) & XTAL_CONTROL_TCXO_MASK) >> XTAL_CONTROL_TCXO_LSB)
+#define XTAL_CONTROL_TCXO_SET(x) (((x) << XTAL_CONTROL_TCXO_LSB) & XTAL_CONTROL_TCXO_MASK)
+
+#define TCXO_DETECT_ADDRESS 0x00000008
+#define TCXO_DETECT_OFFSET 0x00000008
+#define TCXO_DETECT_PRESENT_MSB 0
+#define TCXO_DETECT_PRESENT_LSB 0
+#define TCXO_DETECT_PRESENT_MASK 0x00000001
+#define TCXO_DETECT_PRESENT_GET(x) (((x) & TCXO_DETECT_PRESENT_MASK) >> TCXO_DETECT_PRESENT_LSB)
+#define TCXO_DETECT_PRESENT_SET(x) (((x) << TCXO_DETECT_PRESENT_LSB) & TCXO_DETECT_PRESENT_MASK)
+
+#define XTAL_TEST_ADDRESS 0x0000000c
+#define XTAL_TEST_OFFSET 0x0000000c
+#define XTAL_TEST_NOTCXODET_MSB 0
+#define XTAL_TEST_NOTCXODET_LSB 0
+#define XTAL_TEST_NOTCXODET_MASK 0x00000001
+#define XTAL_TEST_NOTCXODET_GET(x) (((x) & XTAL_TEST_NOTCXODET_MASK) >> XTAL_TEST_NOTCXODET_LSB)
+#define XTAL_TEST_NOTCXODET_SET(x) (((x) << XTAL_TEST_NOTCXODET_LSB) & XTAL_TEST_NOTCXODET_MASK)
+
+#define QUADRATURE_ADDRESS 0x00000010
+#define QUADRATURE_OFFSET 0x00000010
+#define QUADRATURE_ADC_MSB 5
+#define QUADRATURE_ADC_LSB 4
+#define QUADRATURE_ADC_MASK 0x00000030
+#define QUADRATURE_ADC_GET(x) (((x) & QUADRATURE_ADC_MASK) >> QUADRATURE_ADC_LSB)
+#define QUADRATURE_ADC_SET(x) (((x) << QUADRATURE_ADC_LSB) & QUADRATURE_ADC_MASK)
+#define QUADRATURE_SEL_MSB 2
+#define QUADRATURE_SEL_LSB 2
+#define QUADRATURE_SEL_MASK 0x00000004
+#define QUADRATURE_SEL_GET(x) (((x) & QUADRATURE_SEL_MASK) >> QUADRATURE_SEL_LSB)
+#define QUADRATURE_SEL_SET(x) (((x) << QUADRATURE_SEL_LSB) & QUADRATURE_SEL_MASK)
+#define QUADRATURE_DAC_MSB 1
+#define QUADRATURE_DAC_LSB 0
+#define QUADRATURE_DAC_MASK 0x00000003
+#define QUADRATURE_DAC_GET(x) (((x) & QUADRATURE_DAC_MASK) >> QUADRATURE_DAC_LSB)
+#define QUADRATURE_DAC_SET(x) (((x) << QUADRATURE_DAC_LSB) & QUADRATURE_DAC_MASK)
+
+#define PLL_CONTROL_ADDRESS 0x00000014
+#define PLL_CONTROL_OFFSET 0x00000014
+#define PLL_CONTROL_DIG_TEST_CLK_MSB 20
+#define PLL_CONTROL_DIG_TEST_CLK_LSB 20
+#define PLL_CONTROL_DIG_TEST_CLK_MASK 0x00100000
+#define PLL_CONTROL_DIG_TEST_CLK_GET(x) (((x) & PLL_CONTROL_DIG_TEST_CLK_MASK) >> PLL_CONTROL_DIG_TEST_CLK_LSB)
+#define PLL_CONTROL_DIG_TEST_CLK_SET(x) (((x) << PLL_CONTROL_DIG_TEST_CLK_LSB) & PLL_CONTROL_DIG_TEST_CLK_MASK)
+#define PLL_CONTROL_MAC_OVERRIDE_MSB 19
+#define PLL_CONTROL_MAC_OVERRIDE_LSB 19
+#define PLL_CONTROL_MAC_OVERRIDE_MASK 0x00080000
+#define PLL_CONTROL_MAC_OVERRIDE_GET(x) (((x) & PLL_CONTROL_MAC_OVERRIDE_MASK) >> PLL_CONTROL_MAC_OVERRIDE_LSB)
+#define PLL_CONTROL_MAC_OVERRIDE_SET(x) (((x) << PLL_CONTROL_MAC_OVERRIDE_LSB) & PLL_CONTROL_MAC_OVERRIDE_MASK)
+#define PLL_CONTROL_NOPWD_MSB 18
+#define PLL_CONTROL_NOPWD_LSB 18
+#define PLL_CONTROL_NOPWD_MASK 0x00040000
+#define PLL_CONTROL_NOPWD_GET(x) (((x) & PLL_CONTROL_NOPWD_MASK) >> PLL_CONTROL_NOPWD_LSB)
+#define PLL_CONTROL_NOPWD_SET(x) (((x) << PLL_CONTROL_NOPWD_LSB) & PLL_CONTROL_NOPWD_MASK)
+#define PLL_CONTROL_UPDATING_MSB 17
+#define PLL_CONTROL_UPDATING_LSB 17
+#define PLL_CONTROL_UPDATING_MASK 0x00020000
+#define PLL_CONTROL_UPDATING_GET(x) (((x) & PLL_CONTROL_UPDATING_MASK) >> PLL_CONTROL_UPDATING_LSB)
+#define PLL_CONTROL_UPDATING_SET(x) (((x) << PLL_CONTROL_UPDATING_LSB) & PLL_CONTROL_UPDATING_MASK)
+#define PLL_CONTROL_BYPASS_MSB 16
+#define PLL_CONTROL_BYPASS_LSB 16
+#define PLL_CONTROL_BYPASS_MASK 0x00010000
+#define PLL_CONTROL_BYPASS_GET(x) (((x) & PLL_CONTROL_BYPASS_MASK) >> PLL_CONTROL_BYPASS_LSB)
+#define PLL_CONTROL_BYPASS_SET(x) (((x) << PLL_CONTROL_BYPASS_LSB) & PLL_CONTROL_BYPASS_MASK)
+#define PLL_CONTROL_REFDIV_MSB 15
+#define PLL_CONTROL_REFDIV_LSB 12
+#define PLL_CONTROL_REFDIV_MASK 0x0000f000
+#define PLL_CONTROL_REFDIV_GET(x) (((x) & PLL_CONTROL_REFDIV_MASK) >> PLL_CONTROL_REFDIV_LSB)
+#define PLL_CONTROL_REFDIV_SET(x) (((x) << PLL_CONTROL_REFDIV_LSB) & PLL_CONTROL_REFDIV_MASK)
+#define PLL_CONTROL_DIV_MSB 9
+#define PLL_CONTROL_DIV_LSB 0
+#define PLL_CONTROL_DIV_MASK 0x000003ff
+#define PLL_CONTROL_DIV_GET(x) (((x) & PLL_CONTROL_DIV_MASK) >> PLL_CONTROL_DIV_LSB)
+#define PLL_CONTROL_DIV_SET(x) (((x) << PLL_CONTROL_DIV_LSB) & PLL_CONTROL_DIV_MASK)
+
+#define PLL_SETTLE_ADDRESS 0x00000018
+#define PLL_SETTLE_OFFSET 0x00000018
+#define PLL_SETTLE_TIME_MSB 11
+#define PLL_SETTLE_TIME_LSB 0
+#define PLL_SETTLE_TIME_MASK 0x00000fff
+#define PLL_SETTLE_TIME_GET(x) (((x) & PLL_SETTLE_TIME_MASK) >> PLL_SETTLE_TIME_LSB)
+#define PLL_SETTLE_TIME_SET(x) (((x) << PLL_SETTLE_TIME_LSB) & PLL_SETTLE_TIME_MASK)
+
+#define XTAL_SETTLE_ADDRESS 0x0000001c
+#define XTAL_SETTLE_OFFSET 0x0000001c
+#define XTAL_SETTLE_TIME_MSB 7
+#define XTAL_SETTLE_TIME_LSB 0
+#define XTAL_SETTLE_TIME_MASK 0x000000ff
+#define XTAL_SETTLE_TIME_GET(x) (((x) & XTAL_SETTLE_TIME_MASK) >> XTAL_SETTLE_TIME_LSB)
+#define XTAL_SETTLE_TIME_SET(x) (((x) << XTAL_SETTLE_TIME_LSB) & XTAL_SETTLE_TIME_MASK)
+
+#define CPU_CLOCK_ADDRESS 0x00000020
+#define CPU_CLOCK_OFFSET 0x00000020
+#define CPU_CLOCK_STANDARD_MSB 1
+#define CPU_CLOCK_STANDARD_LSB 0
+#define CPU_CLOCK_STANDARD_MASK 0x00000003
+#define CPU_CLOCK_STANDARD_GET(x) (((x) & CPU_CLOCK_STANDARD_MASK) >> CPU_CLOCK_STANDARD_LSB)
+#define CPU_CLOCK_STANDARD_SET(x) (((x) << CPU_CLOCK_STANDARD_LSB) & CPU_CLOCK_STANDARD_MASK)
+
+#define CLOCK_OUT_ADDRESS 0x00000024
+#define CLOCK_OUT_OFFSET 0x00000024
+#define CLOCK_OUT_SELECT_MSB 3
+#define CLOCK_OUT_SELECT_LSB 0
+#define CLOCK_OUT_SELECT_MASK 0x0000000f
+#define CLOCK_OUT_SELECT_GET(x) (((x) & CLOCK_OUT_SELECT_MASK) >> CLOCK_OUT_SELECT_LSB)
+#define CLOCK_OUT_SELECT_SET(x) (((x) << CLOCK_OUT_SELECT_LSB) & CLOCK_OUT_SELECT_MASK)
+
+#define CLOCK_CONTROL_ADDRESS 0x00000028
+#define CLOCK_CONTROL_OFFSET 0x00000028
+#define CLOCK_CONTROL_LF_CLK32_MSB 2
+#define CLOCK_CONTROL_LF_CLK32_LSB 2
+#define CLOCK_CONTROL_LF_CLK32_MASK 0x00000004
+#define CLOCK_CONTROL_LF_CLK32_GET(x) (((x) & CLOCK_CONTROL_LF_CLK32_MASK) >> CLOCK_CONTROL_LF_CLK32_LSB)
+#define CLOCK_CONTROL_LF_CLK32_SET(x) (((x) << CLOCK_CONTROL_LF_CLK32_LSB) & CLOCK_CONTROL_LF_CLK32_MASK)
+#define CLOCK_CONTROL_UART_CLK_MSB 1
+#define CLOCK_CONTROL_UART_CLK_LSB 1
+#define CLOCK_CONTROL_UART_CLK_MASK 0x00000002
+#define CLOCK_CONTROL_UART_CLK_GET(x) (((x) & CLOCK_CONTROL_UART_CLK_MASK) >> CLOCK_CONTROL_UART_CLK_LSB)
+#define CLOCK_CONTROL_UART_CLK_SET(x) (((x) << CLOCK_CONTROL_UART_CLK_LSB) & CLOCK_CONTROL_UART_CLK_MASK)
+#define CLOCK_CONTROL_SI0_CLK_MSB 0
+#define CLOCK_CONTROL_SI0_CLK_LSB 0
+#define CLOCK_CONTROL_SI0_CLK_MASK 0x00000001
+#define CLOCK_CONTROL_SI0_CLK_GET(x) (((x) & CLOCK_CONTROL_SI0_CLK_MASK) >> CLOCK_CONTROL_SI0_CLK_LSB)
+#define CLOCK_CONTROL_SI0_CLK_SET(x) (((x) << CLOCK_CONTROL_SI0_CLK_LSB) & CLOCK_CONTROL_SI0_CLK_MASK)
+
+#define BIAS_OVERRIDE_ADDRESS 0x0000002c
+#define BIAS_OVERRIDE_OFFSET 0x0000002c
+#define BIAS_OVERRIDE_ON_MSB 0
+#define BIAS_OVERRIDE_ON_LSB 0
+#define BIAS_OVERRIDE_ON_MASK 0x00000001
+#define BIAS_OVERRIDE_ON_GET(x) (((x) & BIAS_OVERRIDE_ON_MASK) >> BIAS_OVERRIDE_ON_LSB)
+#define BIAS_OVERRIDE_ON_SET(x) (((x) << BIAS_OVERRIDE_ON_LSB) & BIAS_OVERRIDE_ON_MASK)
+
+#define WDT_CONTROL_ADDRESS 0x00000030
+#define WDT_CONTROL_OFFSET 0x00000030
+#define WDT_CONTROL_ACTION_MSB 2
+#define WDT_CONTROL_ACTION_LSB 0
+#define WDT_CONTROL_ACTION_MASK 0x00000007
+#define WDT_CONTROL_ACTION_GET(x) (((x) & WDT_CONTROL_ACTION_MASK) >> WDT_CONTROL_ACTION_LSB)
+#define WDT_CONTROL_ACTION_SET(x) (((x) << WDT_CONTROL_ACTION_LSB) & WDT_CONTROL_ACTION_MASK)
+
+#define WDT_STATUS_ADDRESS 0x00000034
+#define WDT_STATUS_OFFSET 0x00000034
+#define WDT_STATUS_INTERRUPT_MSB 0
+#define WDT_STATUS_INTERRUPT_LSB 0
+#define WDT_STATUS_INTERRUPT_MASK 0x00000001
+#define WDT_STATUS_INTERRUPT_GET(x) (((x) & WDT_STATUS_INTERRUPT_MASK) >> WDT_STATUS_INTERRUPT_LSB)
+#define WDT_STATUS_INTERRUPT_SET(x) (((x) << WDT_STATUS_INTERRUPT_LSB) & WDT_STATUS_INTERRUPT_MASK)
+
+#define WDT_ADDRESS 0x00000038
+#define WDT_OFFSET 0x00000038
+#define WDT_TARGET_MSB 21
+#define WDT_TARGET_LSB 0
+#define WDT_TARGET_MASK 0x003fffff
+#define WDT_TARGET_GET(x) (((x) & WDT_TARGET_MASK) >> WDT_TARGET_LSB)
+#define WDT_TARGET_SET(x) (((x) << WDT_TARGET_LSB) & WDT_TARGET_MASK)
+
+#define WDT_COUNT_ADDRESS 0x0000003c
+#define WDT_COUNT_OFFSET 0x0000003c
+#define WDT_COUNT_VALUE_MSB 21
+#define WDT_COUNT_VALUE_LSB 0
+#define WDT_COUNT_VALUE_MASK 0x003fffff
+#define WDT_COUNT_VALUE_GET(x) (((x) & WDT_COUNT_VALUE_MASK) >> WDT_COUNT_VALUE_LSB)
+#define WDT_COUNT_VALUE_SET(x) (((x) << WDT_COUNT_VALUE_LSB) & WDT_COUNT_VALUE_MASK)
+
+#define WDT_RESET_ADDRESS 0x00000040
+#define WDT_RESET_OFFSET 0x00000040
+#define WDT_RESET_VALUE_MSB 0
+#define WDT_RESET_VALUE_LSB 0
+#define WDT_RESET_VALUE_MASK 0x00000001
+#define WDT_RESET_VALUE_GET(x) (((x) & WDT_RESET_VALUE_MASK) >> WDT_RESET_VALUE_LSB)
+#define WDT_RESET_VALUE_SET(x) (((x) << WDT_RESET_VALUE_LSB) & WDT_RESET_VALUE_MASK)
+
+#define INT_STATUS_ADDRESS 0x00000044
+#define INT_STATUS_OFFSET 0x00000044
+#define INT_STATUS_RTC_POWER_MSB 14
+#define INT_STATUS_RTC_POWER_LSB 14
+#define INT_STATUS_RTC_POWER_MASK 0x00004000
+#define INT_STATUS_RTC_POWER_GET(x) (((x) & INT_STATUS_RTC_POWER_MASK) >> INT_STATUS_RTC_POWER_LSB)
+#define INT_STATUS_RTC_POWER_SET(x) (((x) << INT_STATUS_RTC_POWER_LSB) & INT_STATUS_RTC_POWER_MASK)
+#define INT_STATUS_MAC_MSB 13
+#define INT_STATUS_MAC_LSB 13
+#define INT_STATUS_MAC_MASK 0x00002000
+#define INT_STATUS_MAC_GET(x) (((x) & INT_STATUS_MAC_MASK) >> INT_STATUS_MAC_LSB)
+#define INT_STATUS_MAC_SET(x) (((x) << INT_STATUS_MAC_LSB) & INT_STATUS_MAC_MASK)
+#define INT_STATUS_MAILBOX_MSB 12
+#define INT_STATUS_MAILBOX_LSB 12
+#define INT_STATUS_MAILBOX_MASK 0x00001000
+#define INT_STATUS_MAILBOX_GET(x) (((x) & INT_STATUS_MAILBOX_MASK) >> INT_STATUS_MAILBOX_LSB)
+#define INT_STATUS_MAILBOX_SET(x) (((x) << INT_STATUS_MAILBOX_LSB) & INT_STATUS_MAILBOX_MASK)
+#define INT_STATUS_RTC_ALARM_MSB 11
+#define INT_STATUS_RTC_ALARM_LSB 11
+#define INT_STATUS_RTC_ALARM_MASK 0x00000800
+#define INT_STATUS_RTC_ALARM_GET(x) (((x) & INT_STATUS_RTC_ALARM_MASK) >> INT_STATUS_RTC_ALARM_LSB)
+#define INT_STATUS_RTC_ALARM_SET(x) (((x) << INT_STATUS_RTC_ALARM_LSB) & INT_STATUS_RTC_ALARM_MASK)
+#define INT_STATUS_HF_TIMER_MSB 10
+#define INT_STATUS_HF_TIMER_LSB 10
+#define INT_STATUS_HF_TIMER_MASK 0x00000400
+#define INT_STATUS_HF_TIMER_GET(x) (((x) & INT_STATUS_HF_TIMER_MASK) >> INT_STATUS_HF_TIMER_LSB)
+#define INT_STATUS_HF_TIMER_SET(x) (((x) << INT_STATUS_HF_TIMER_LSB) & INT_STATUS_HF_TIMER_MASK)
+#define INT_STATUS_LF_TIMER3_MSB 9
+#define INT_STATUS_LF_TIMER3_LSB 9
+#define INT_STATUS_LF_TIMER3_MASK 0x00000200
+#define INT_STATUS_LF_TIMER3_GET(x) (((x) & INT_STATUS_LF_TIMER3_MASK) >> INT_STATUS_LF_TIMER3_LSB)
+#define INT_STATUS_LF_TIMER3_SET(x) (((x) << INT_STATUS_LF_TIMER3_LSB) & INT_STATUS_LF_TIMER3_MASK)
+#define INT_STATUS_LF_TIMER2_MSB 8
+#define INT_STATUS_LF_TIMER2_LSB 8
+#define INT_STATUS_LF_TIMER2_MASK 0x00000100
+#define INT_STATUS_LF_TIMER2_GET(x) (((x) & INT_STATUS_LF_TIMER2_MASK) >> INT_STATUS_LF_TIMER2_LSB)
+#define INT_STATUS_LF_TIMER2_SET(x) (((x) << INT_STATUS_LF_TIMER2_LSB) & INT_STATUS_LF_TIMER2_MASK)
+#define INT_STATUS_LF_TIMER1_MSB 7
+#define INT_STATUS_LF_TIMER1_LSB 7
+#define INT_STATUS_LF_TIMER1_MASK 0x00000080
+#define INT_STATUS_LF_TIMER1_GET(x) (((x) & INT_STATUS_LF_TIMER1_MASK) >> INT_STATUS_LF_TIMER1_LSB)
+#define INT_STATUS_LF_TIMER1_SET(x) (((x) << INT_STATUS_LF_TIMER1_LSB) & INT_STATUS_LF_TIMER1_MASK)
+#define INT_STATUS_LF_TIMER0_MSB 6
+#define INT_STATUS_LF_TIMER0_LSB 6
+#define INT_STATUS_LF_TIMER0_MASK 0x00000040
+#define INT_STATUS_LF_TIMER0_GET(x) (((x) & INT_STATUS_LF_TIMER0_MASK) >> INT_STATUS_LF_TIMER0_LSB)
+#define INT_STATUS_LF_TIMER0_SET(x) (((x) << INT_STATUS_LF_TIMER0_LSB) & INT_STATUS_LF_TIMER0_MASK)
+#define INT_STATUS_KEYPAD_MSB 5
+#define INT_STATUS_KEYPAD_LSB 5
+#define INT_STATUS_KEYPAD_MASK 0x00000020
+#define INT_STATUS_KEYPAD_GET(x) (((x) & INT_STATUS_KEYPAD_MASK) >> INT_STATUS_KEYPAD_LSB)
+#define INT_STATUS_KEYPAD_SET(x) (((x) << INT_STATUS_KEYPAD_LSB) & INT_STATUS_KEYPAD_MASK)
+#define INT_STATUS_SI_MSB 4
+#define INT_STATUS_SI_LSB 4
+#define INT_STATUS_SI_MASK 0x00000010
+#define INT_STATUS_SI_GET(x) (((x) & INT_STATUS_SI_MASK) >> INT_STATUS_SI_LSB)
+#define INT_STATUS_SI_SET(x) (((x) << INT_STATUS_SI_LSB) & INT_STATUS_SI_MASK)
+#define INT_STATUS_GPIO_MSB 3
+#define INT_STATUS_GPIO_LSB 3
+#define INT_STATUS_GPIO_MASK 0x00000008
+#define INT_STATUS_GPIO_GET(x) (((x) & INT_STATUS_GPIO_MASK) >> INT_STATUS_GPIO_LSB)
+#define INT_STATUS_GPIO_SET(x) (((x) << INT_STATUS_GPIO_LSB) & INT_STATUS_GPIO_MASK)
+#define INT_STATUS_UART_MSB 2
+#define INT_STATUS_UART_LSB 2
+#define INT_STATUS_UART_MASK 0x00000004
+#define INT_STATUS_UART_GET(x) (((x) & INT_STATUS_UART_MASK) >> INT_STATUS_UART_LSB)
+#define INT_STATUS_UART_SET(x) (((x) << INT_STATUS_UART_LSB) & INT_STATUS_UART_MASK)
+#define INT_STATUS_ERROR_MSB 1
+#define INT_STATUS_ERROR_LSB 1
+#define INT_STATUS_ERROR_MASK 0x00000002
+#define INT_STATUS_ERROR_GET(x) (((x) & INT_STATUS_ERROR_MASK) >> INT_STATUS_ERROR_LSB)
+#define INT_STATUS_ERROR_SET(x) (((x) << INT_STATUS_ERROR_LSB) & INT_STATUS_ERROR_MASK)
+#define INT_STATUS_WDT_INT_MSB 0
+#define INT_STATUS_WDT_INT_LSB 0
+#define INT_STATUS_WDT_INT_MASK 0x00000001
+#define INT_STATUS_WDT_INT_GET(x) (((x) & INT_STATUS_WDT_INT_MASK) >> INT_STATUS_WDT_INT_LSB)
+#define INT_STATUS_WDT_INT_SET(x) (((x) << INT_STATUS_WDT_INT_LSB) & INT_STATUS_WDT_INT_MASK)
+
+#define LF_TIMER0_ADDRESS 0x00000048
+#define LF_TIMER0_OFFSET 0x00000048
+#define LF_TIMER0_TARGET_MSB 31
+#define LF_TIMER0_TARGET_LSB 0
+#define LF_TIMER0_TARGET_MASK 0xffffffff
+#define LF_TIMER0_TARGET_GET(x) (((x) & LF_TIMER0_TARGET_MASK) >> LF_TIMER0_TARGET_LSB)
+#define LF_TIMER0_TARGET_SET(x) (((x) << LF_TIMER0_TARGET_LSB) & LF_TIMER0_TARGET_MASK)
+
+#define LF_TIMER_COUNT0_ADDRESS 0x0000004c
+#define LF_TIMER_COUNT0_OFFSET 0x0000004c
+#define LF_TIMER_COUNT0_VALUE_MSB 31
+#define LF_TIMER_COUNT0_VALUE_LSB 0
+#define LF_TIMER_COUNT0_VALUE_MASK 0xffffffff
+#define LF_TIMER_COUNT0_VALUE_GET(x) (((x) & LF_TIMER_COUNT0_VALUE_MASK) >> LF_TIMER_COUNT0_VALUE_LSB)
+#define LF_TIMER_COUNT0_VALUE_SET(x) (((x) << LF_TIMER_COUNT0_VALUE_LSB) & LF_TIMER_COUNT0_VALUE_MASK)
+
+#define LF_TIMER_CONTROL0_ADDRESS 0x00000050
+#define LF_TIMER_CONTROL0_OFFSET 0x00000050
+#define LF_TIMER_CONTROL0_ENABLE_MSB 2
+#define LF_TIMER_CONTROL0_ENABLE_LSB 2
+#define LF_TIMER_CONTROL0_ENABLE_MASK 0x00000004
+#define LF_TIMER_CONTROL0_ENABLE_GET(x) (((x) & LF_TIMER_CONTROL0_ENABLE_MASK) >> LF_TIMER_CONTROL0_ENABLE_LSB)
+#define LF_TIMER_CONTROL0_ENABLE_SET(x) (((x) << LF_TIMER_CONTROL0_ENABLE_LSB) & LF_TIMER_CONTROL0_ENABLE_MASK)
+#define LF_TIMER_CONTROL0_AUTO_RESTART_MSB 1
+#define LF_TIMER_CONTROL0_AUTO_RESTART_LSB 1
+#define LF_TIMER_CONTROL0_AUTO_RESTART_MASK 0x00000002
+#define LF_TIMER_CONTROL0_AUTO_RESTART_GET(x) (((x) & LF_TIMER_CONTROL0_AUTO_RESTART_MASK) >> LF_TIMER_CONTROL0_AUTO_RESTART_LSB)
+#define LF_TIMER_CONTROL0_AUTO_RESTART_SET(x) (((x) << LF_TIMER_CONTROL0_AUTO_RESTART_LSB) & LF_TIMER_CONTROL0_AUTO_RESTART_MASK)
+#define LF_TIMER_CONTROL0_RESET_MSB 0
+#define LF_TIMER_CONTROL0_RESET_LSB 0
+#define LF_TIMER_CONTROL0_RESET_MASK 0x00000001
+#define LF_TIMER_CONTROL0_RESET_GET(x) (((x) & LF_TIMER_CONTROL0_RESET_MASK) >> LF_TIMER_CONTROL0_RESET_LSB)
+#define LF_TIMER_CONTROL0_RESET_SET(x) (((x) << LF_TIMER_CONTROL0_RESET_LSB) & LF_TIMER_CONTROL0_RESET_MASK)
+
+#define LF_TIMER_STATUS0_ADDRESS 0x00000054
+#define LF_TIMER_STATUS0_OFFSET 0x00000054
+#define LF_TIMER_STATUS0_INTERRUPT_MSB 0
+#define LF_TIMER_STATUS0_INTERRUPT_LSB 0
+#define LF_TIMER_STATUS0_INTERRUPT_MASK 0x00000001
+#define LF_TIMER_STATUS0_INTERRUPT_GET(x) (((x) & LF_TIMER_STATUS0_INTERRUPT_MASK) >> LF_TIMER_STATUS0_INTERRUPT_LSB)
+#define LF_TIMER_STATUS0_INTERRUPT_SET(x) (((x) << LF_TIMER_STATUS0_INTERRUPT_LSB) & LF_TIMER_STATUS0_INTERRUPT_MASK)
+
+#define LF_TIMER1_ADDRESS 0x00000058
+#define LF_TIMER1_OFFSET 0x00000058
+#define LF_TIMER1_TARGET_MSB 31
+#define LF_TIMER1_TARGET_LSB 0
+#define LF_TIMER1_TARGET_MASK 0xffffffff
+#define LF_TIMER1_TARGET_GET(x) (((x) & LF_TIMER1_TARGET_MASK) >> LF_TIMER1_TARGET_LSB)
+#define LF_TIMER1_TARGET_SET(x) (((x) << LF_TIMER1_TARGET_LSB) & LF_TIMER1_TARGET_MASK)
+
+#define LF_TIMER_COUNT1_ADDRESS 0x0000005c
+#define LF_TIMER_COUNT1_OFFSET 0x0000005c
+#define LF_TIMER_COUNT1_VALUE_MSB 31
+#define LF_TIMER_COUNT1_VALUE_LSB 0
+#define LF_TIMER_COUNT1_VALUE_MASK 0xffffffff
+#define LF_TIMER_COUNT1_VALUE_GET(x) (((x) & LF_TIMER_COUNT1_VALUE_MASK) >> LF_TIMER_COUNT1_VALUE_LSB)
+#define LF_TIMER_COUNT1_VALUE_SET(x) (((x) << LF_TIMER_COUNT1_VALUE_LSB) & LF_TIMER_COUNT1_VALUE_MASK)
+
+#define LF_TIMER_CONTROL1_ADDRESS 0x00000060
+#define LF_TIMER_CONTROL1_OFFSET 0x00000060
+#define LF_TIMER_CONTROL1_ENABLE_MSB 2
+#define LF_TIMER_CONTROL1_ENABLE_LSB 2
+#define LF_TIMER_CONTROL1_ENABLE_MASK 0x00000004
+#define LF_TIMER_CONTROL1_ENABLE_GET(x) (((x) & LF_TIMER_CONTROL1_ENABLE_MASK) >> LF_TIMER_CONTROL1_ENABLE_LSB)
+#define LF_TIMER_CONTROL1_ENABLE_SET(x) (((x) << LF_TIMER_CONTROL1_ENABLE_LSB) & LF_TIMER_CONTROL1_ENABLE_MASK)
+#define LF_TIMER_CONTROL1_AUTO_RESTART_MSB 1
+#define LF_TIMER_CONTROL1_AUTO_RESTART_LSB 1
+#define LF_TIMER_CONTROL1_AUTO_RESTART_MASK 0x00000002
+#define LF_TIMER_CONTROL1_AUTO_RESTART_GET(x) (((x) & LF_TIMER_CONTROL1_AUTO_RESTART_MASK) >> LF_TIMER_CONTROL1_AUTO_RESTART_LSB)
+#define LF_TIMER_CONTROL1_AUTO_RESTART_SET(x) (((x) << LF_TIMER_CONTROL1_AUTO_RESTART_LSB) & LF_TIMER_CONTROL1_AUTO_RESTART_MASK)
+#define LF_TIMER_CONTROL1_RESET_MSB 0
+#define LF_TIMER_CONTROL1_RESET_LSB 0
+#define LF_TIMER_CONTROL1_RESET_MASK 0x00000001
+#define LF_TIMER_CONTROL1_RESET_GET(x) (((x) & LF_TIMER_CONTROL1_RESET_MASK) >> LF_TIMER_CONTROL1_RESET_LSB)
+#define LF_TIMER_CONTROL1_RESET_SET(x) (((x) << LF_TIMER_CONTROL1_RESET_LSB) & LF_TIMER_CONTROL1_RESET_MASK)
+
+#define LF_TIMER_STATUS1_ADDRESS 0x00000064
+#define LF_TIMER_STATUS1_OFFSET 0x00000064
+#define LF_TIMER_STATUS1_INTERRUPT_MSB 0
+#define LF_TIMER_STATUS1_INTERRUPT_LSB 0
+#define LF_TIMER_STATUS1_INTERRUPT_MASK 0x00000001
+#define LF_TIMER_STATUS1_INTERRUPT_GET(x) (((x) & LF_TIMER_STATUS1_INTERRUPT_MASK) >> LF_TIMER_STATUS1_INTERRUPT_LSB)
+#define LF_TIMER_STATUS1_INTERRUPT_SET(x) (((x) << LF_TIMER_STATUS1_INTERRUPT_LSB) & LF_TIMER_STATUS1_INTERRUPT_MASK)
+
+#define LF_TIMER2_ADDRESS 0x00000068
+#define LF_TIMER2_OFFSET 0x00000068
+#define LF_TIMER2_TARGET_MSB 31
+#define LF_TIMER2_TARGET_LSB 0
+#define LF_TIMER2_TARGET_MASK 0xffffffff
+#define LF_TIMER2_TARGET_GET(x) (((x) & LF_TIMER2_TARGET_MASK) >> LF_TIMER2_TARGET_LSB)
+#define LF_TIMER2_TARGET_SET(x) (((x) << LF_TIMER2_TARGET_LSB) & LF_TIMER2_TARGET_MASK)
+
+#define LF_TIMER_COUNT2_ADDRESS 0x0000006c
+#define LF_TIMER_COUNT2_OFFSET 0x0000006c
+#define LF_TIMER_COUNT2_VALUE_MSB 31
+#define LF_TIMER_COUNT2_VALUE_LSB 0
+#define LF_TIMER_COUNT2_VALUE_MASK 0xffffffff
+#define LF_TIMER_COUNT2_VALUE_GET(x) (((x) & LF_TIMER_COUNT2_VALUE_MASK) >> LF_TIMER_COUNT2_VALUE_LSB)
+#define LF_TIMER_COUNT2_VALUE_SET(x) (((x) << LF_TIMER_COUNT2_VALUE_LSB) & LF_TIMER_COUNT2_VALUE_MASK)
+
+#define LF_TIMER_CONTROL2_ADDRESS 0x00000070
+#define LF_TIMER_CONTROL2_OFFSET 0x00000070
+#define LF_TIMER_CONTROL2_ENABLE_MSB 2
+#define LF_TIMER_CONTROL2_ENABLE_LSB 2
+#define LF_TIMER_CONTROL2_ENABLE_MASK 0x00000004
+#define LF_TIMER_CONTROL2_ENABLE_GET(x) (((x) & LF_TIMER_CONTROL2_ENABLE_MASK) >> LF_TIMER_CONTROL2_ENABLE_LSB)
+#define LF_TIMER_CONTROL2_ENABLE_SET(x) (((x) << LF_TIMER_CONTROL2_ENABLE_LSB) & LF_TIMER_CONTROL2_ENABLE_MASK)
+#define LF_TIMER_CONTROL2_AUTO_RESTART_MSB 1
+#define LF_TIMER_CONTROL2_AUTO_RESTART_LSB 1
+#define LF_TIMER_CONTROL2_AUTO_RESTART_MASK 0x00000002
+#define LF_TIMER_CONTROL2_AUTO_RESTART_GET(x) (((x) & LF_TIMER_CONTROL2_AUTO_RESTART_MASK) >> LF_TIMER_CONTROL2_AUTO_RESTART_LSB)
+#define LF_TIMER_CONTROL2_AUTO_RESTART_SET(x) (((x) << LF_TIMER_CONTROL2_AUTO_RESTART_LSB) & LF_TIMER_CONTROL2_AUTO_RESTART_MASK)
+#define LF_TIMER_CONTROL2_RESET_MSB 0
+#define LF_TIMER_CONTROL2_RESET_LSB 0
+#define LF_TIMER_CONTROL2_RESET_MASK 0x00000001
+#define LF_TIMER_CONTROL2_RESET_GET(x) (((x) & LF_TIMER_CONTROL2_RESET_MASK) >> LF_TIMER_CONTROL2_RESET_LSB)
+#define LF_TIMER_CONTROL2_RESET_SET(x) (((x) << LF_TIMER_CONTROL2_RESET_LSB) & LF_TIMER_CONTROL2_RESET_MASK)
+
+#define LF_TIMER_STATUS2_ADDRESS 0x00000074
+#define LF_TIMER_STATUS2_OFFSET 0x00000074
+#define LF_TIMER_STATUS2_INTERRUPT_MSB 0
+#define LF_TIMER_STATUS2_INTERRUPT_LSB 0
+#define LF_TIMER_STATUS2_INTERRUPT_MASK 0x00000001
+#define LF_TIMER_STATUS2_INTERRUPT_GET(x) (((x) & LF_TIMER_STATUS2_INTERRUPT_MASK) >> LF_TIMER_STATUS2_INTERRUPT_LSB)
+#define LF_TIMER_STATUS2_INTERRUPT_SET(x) (((x) << LF_TIMER_STATUS2_INTERRUPT_LSB) & LF_TIMER_STATUS2_INTERRUPT_MASK)
+
+#define LF_TIMER3_ADDRESS 0x00000078
+#define LF_TIMER3_OFFSET 0x00000078
+#define LF_TIMER3_TARGET_MSB 31
+#define LF_TIMER3_TARGET_LSB 0
+#define LF_TIMER3_TARGET_MASK 0xffffffff
+#define LF_TIMER3_TARGET_GET(x) (((x) & LF_TIMER3_TARGET_MASK) >> LF_TIMER3_TARGET_LSB)
+#define LF_TIMER3_TARGET_SET(x) (((x) << LF_TIMER3_TARGET_LSB) & LF_TIMER3_TARGET_MASK)
+
+#define LF_TIMER_COUNT3_ADDRESS 0x0000007c
+#define LF_TIMER_COUNT3_OFFSET 0x0000007c
+#define LF_TIMER_COUNT3_VALUE_MSB 31
+#define LF_TIMER_COUNT3_VALUE_LSB 0
+#define LF_TIMER_COUNT3_VALUE_MASK 0xffffffff
+#define LF_TIMER_COUNT3_VALUE_GET(x) (((x) & LF_TIMER_COUNT3_VALUE_MASK) >> LF_TIMER_COUNT3_VALUE_LSB)
+#define LF_TIMER_COUNT3_VALUE_SET(x) (((x) << LF_TIMER_COUNT3_VALUE_LSB) & LF_TIMER_COUNT3_VALUE_MASK)
+
+#define LF_TIMER_CONTROL3_ADDRESS 0x00000080
+#define LF_TIMER_CONTROL3_OFFSET 0x00000080
+#define LF_TIMER_CONTROL3_ENABLE_MSB 2
+#define LF_TIMER_CONTROL3_ENABLE_LSB 2
+#define LF_TIMER_CONTROL3_ENABLE_MASK 0x00000004
+#define LF_TIMER_CONTROL3_ENABLE_GET(x) (((x) & LF_TIMER_CONTROL3_ENABLE_MASK) >> LF_TIMER_CONTROL3_ENABLE_LSB)
+#define LF_TIMER_CONTROL3_ENABLE_SET(x) (((x) << LF_TIMER_CONTROL3_ENABLE_LSB) & LF_TIMER_CONTROL3_ENABLE_MASK)
+#define LF_TIMER_CONTROL3_AUTO_RESTART_MSB 1
+#define LF_TIMER_CONTROL3_AUTO_RESTART_LSB 1
+#define LF_TIMER_CONTROL3_AUTO_RESTART_MASK 0x00000002
+#define LF_TIMER_CONTROL3_AUTO_RESTART_GET(x) (((x) & LF_TIMER_CONTROL3_AUTO_RESTART_MASK) >> LF_TIMER_CONTROL3_AUTO_RESTART_LSB)
+#define LF_TIMER_CONTROL3_AUTO_RESTART_SET(x) (((x) << LF_TIMER_CONTROL3_AUTO_RESTART_LSB) & LF_TIMER_CONTROL3_AUTO_RESTART_MASK)
+#define LF_TIMER_CONTROL3_RESET_MSB 0
+#define LF_TIMER_CONTROL3_RESET_LSB 0
+#define LF_TIMER_CONTROL3_RESET_MASK 0x00000001
+#define LF_TIMER_CONTROL3_RESET_GET(x) (((x) & LF_TIMER_CONTROL3_RESET_MASK) >> LF_TIMER_CONTROL3_RESET_LSB)
+#define LF_TIMER_CONTROL3_RESET_SET(x) (((x) << LF_TIMER_CONTROL3_RESET_LSB) & LF_TIMER_CONTROL3_RESET_MASK)
+
+#define LF_TIMER_STATUS3_ADDRESS 0x00000084
+#define LF_TIMER_STATUS3_OFFSET 0x00000084
+#define LF_TIMER_STATUS3_INTERRUPT_MSB 0
+#define LF_TIMER_STATUS3_INTERRUPT_LSB 0
+#define LF_TIMER_STATUS3_INTERRUPT_MASK 0x00000001
+#define LF_TIMER_STATUS3_INTERRUPT_GET(x) (((x) & LF_TIMER_STATUS3_INTERRUPT_MASK) >> LF_TIMER_STATUS3_INTERRUPT_LSB)
+#define LF_TIMER_STATUS3_INTERRUPT_SET(x) (((x) << LF_TIMER_STATUS3_INTERRUPT_LSB) & LF_TIMER_STATUS3_INTERRUPT_MASK)
+
+#define HF_TIMER_ADDRESS 0x00000088
+#define HF_TIMER_OFFSET 0x00000088
+#define HF_TIMER_TARGET_MSB 31
+#define HF_TIMER_TARGET_LSB 12
+#define HF_TIMER_TARGET_MASK 0xfffff000
+#define HF_TIMER_TARGET_GET(x) (((x) & HF_TIMER_TARGET_MASK) >> HF_TIMER_TARGET_LSB)
+#define HF_TIMER_TARGET_SET(x) (((x) << HF_TIMER_TARGET_LSB) & HF_TIMER_TARGET_MASK)
+
+#define HF_TIMER_COUNT_ADDRESS 0x0000008c
+#define HF_TIMER_COUNT_OFFSET 0x0000008c
+#define HF_TIMER_COUNT_VALUE_MSB 31
+#define HF_TIMER_COUNT_VALUE_LSB 12
+#define HF_TIMER_COUNT_VALUE_MASK 0xfffff000
+#define HF_TIMER_COUNT_VALUE_GET(x) (((x) & HF_TIMER_COUNT_VALUE_MASK) >> HF_TIMER_COUNT_VALUE_LSB)
+#define HF_TIMER_COUNT_VALUE_SET(x) (((x) << HF_TIMER_COUNT_VALUE_LSB) & HF_TIMER_COUNT_VALUE_MASK)
+
+#define HF_LF_COUNT_ADDRESS 0x00000090
+#define HF_LF_COUNT_OFFSET 0x00000090
+#define HF_LF_COUNT_VALUE_MSB 31
+#define HF_LF_COUNT_VALUE_LSB 0
+#define HF_LF_COUNT_VALUE_MASK 0xffffffff
+#define HF_LF_COUNT_VALUE_GET(x) (((x) & HF_LF_COUNT_VALUE_MASK) >> HF_LF_COUNT_VALUE_LSB)
+#define HF_LF_COUNT_VALUE_SET(x) (((x) << HF_LF_COUNT_VALUE_LSB) & HF_LF_COUNT_VALUE_MASK)
+
+#define HF_TIMER_CONTROL_ADDRESS 0x00000094
+#define HF_TIMER_CONTROL_OFFSET 0x00000094
+#define HF_TIMER_CONTROL_ENABLE_MSB 3
+#define HF_TIMER_CONTROL_ENABLE_LSB 3
+#define HF_TIMER_CONTROL_ENABLE_MASK 0x00000008
+#define HF_TIMER_CONTROL_ENABLE_GET(x) (((x) & HF_TIMER_CONTROL_ENABLE_MASK) >> HF_TIMER_CONTROL_ENABLE_LSB)
+#define HF_TIMER_CONTROL_ENABLE_SET(x) (((x) << HF_TIMER_CONTROL_ENABLE_LSB) & HF_TIMER_CONTROL_ENABLE_MASK)
+#define HF_TIMER_CONTROL_ON_MSB 2
+#define HF_TIMER_CONTROL_ON_LSB 2
+#define HF_TIMER_CONTROL_ON_MASK 0x00000004
+#define HF_TIMER_CONTROL_ON_GET(x) (((x) & HF_TIMER_CONTROL_ON_MASK) >> HF_TIMER_CONTROL_ON_LSB)
+#define HF_TIMER_CONTROL_ON_SET(x) (((x) << HF_TIMER_CONTROL_ON_LSB) & HF_TIMER_CONTROL_ON_MASK)
+#define HF_TIMER_CONTROL_AUTO_RESTART_MSB 1
+#define HF_TIMER_CONTROL_AUTO_RESTART_LSB 1
+#define HF_TIMER_CONTROL_AUTO_RESTART_MASK 0x00000002
+#define HF_TIMER_CONTROL_AUTO_RESTART_GET(x) (((x) & HF_TIMER_CONTROL_AUTO_RESTART_MASK) >> HF_TIMER_CONTROL_AUTO_RESTART_LSB)
+#define HF_TIMER_CONTROL_AUTO_RESTART_SET(x) (((x) << HF_TIMER_CONTROL_AUTO_RESTART_LSB) & HF_TIMER_CONTROL_AUTO_RESTART_MASK)
+#define HF_TIMER_CONTROL_RESET_MSB 0
+#define HF_TIMER_CONTROL_RESET_LSB 0
+#define HF_TIMER_CONTROL_RESET_MASK 0x00000001
+#define HF_TIMER_CONTROL_RESET_GET(x) (((x) & HF_TIMER_CONTROL_RESET_MASK) >> HF_TIMER_CONTROL_RESET_LSB)
+#define HF_TIMER_CONTROL_RESET_SET(x) (((x) << HF_TIMER_CONTROL_RESET_LSB) & HF_TIMER_CONTROL_RESET_MASK)
+
+#define HF_TIMER_STATUS_ADDRESS 0x00000098
+#define HF_TIMER_STATUS_OFFSET 0x00000098
+#define HF_TIMER_STATUS_INTERRUPT_MSB 0
+#define HF_TIMER_STATUS_INTERRUPT_LSB 0
+#define HF_TIMER_STATUS_INTERRUPT_MASK 0x00000001
+#define HF_TIMER_STATUS_INTERRUPT_GET(x) (((x) & HF_TIMER_STATUS_INTERRUPT_MASK) >> HF_TIMER_STATUS_INTERRUPT_LSB)
+#define HF_TIMER_STATUS_INTERRUPT_SET(x) (((x) << HF_TIMER_STATUS_INTERRUPT_LSB) & HF_TIMER_STATUS_INTERRUPT_MASK)
+
+#define RTC_CONTROL_ADDRESS 0x0000009c
+#define RTC_CONTROL_OFFSET 0x0000009c
+#define RTC_CONTROL_ENABLE_MSB 2
+#define RTC_CONTROL_ENABLE_LSB 2
+#define RTC_CONTROL_ENABLE_MASK 0x00000004
+#define RTC_CONTROL_ENABLE_GET(x) (((x) & RTC_CONTROL_ENABLE_MASK) >> RTC_CONTROL_ENABLE_LSB)
+#define RTC_CONTROL_ENABLE_SET(x) (((x) << RTC_CONTROL_ENABLE_LSB) & RTC_CONTROL_ENABLE_MASK)
+#define RTC_CONTROL_LOAD_RTC_MSB 1
+#define RTC_CONTROL_LOAD_RTC_LSB 1
+#define RTC_CONTROL_LOAD_RTC_MASK 0x00000002
+#define RTC_CONTROL_LOAD_RTC_GET(x) (((x) & RTC_CONTROL_LOAD_RTC_MASK) >> RTC_CONTROL_LOAD_RTC_LSB)
+#define RTC_CONTROL_LOAD_RTC_SET(x) (((x) << RTC_CONTROL_LOAD_RTC_LSB) & RTC_CONTROL_LOAD_RTC_MASK)
+#define RTC_CONTROL_LOAD_ALARM_MSB 0
+#define RTC_CONTROL_LOAD_ALARM_LSB 0
+#define RTC_CONTROL_LOAD_ALARM_MASK 0x00000001
+#define RTC_CONTROL_LOAD_ALARM_GET(x) (((x) & RTC_CONTROL_LOAD_ALARM_MASK) >> RTC_CONTROL_LOAD_ALARM_LSB)
+#define RTC_CONTROL_LOAD_ALARM_SET(x) (((x) << RTC_CONTROL_LOAD_ALARM_LSB) & RTC_CONTROL_LOAD_ALARM_MASK)
+
+#define RTC_TIME_ADDRESS 0x000000a0
+#define RTC_TIME_OFFSET 0x000000a0
+#define RTC_TIME_WEEK_DAY_MSB 26
+#define RTC_TIME_WEEK_DAY_LSB 24
+#define RTC_TIME_WEEK_DAY_MASK 0x07000000
+#define RTC_TIME_WEEK_DAY_GET(x) (((x) & RTC_TIME_WEEK_DAY_MASK) >> RTC_TIME_WEEK_DAY_LSB)
+#define RTC_TIME_WEEK_DAY_SET(x) (((x) << RTC_TIME_WEEK_DAY_LSB) & RTC_TIME_WEEK_DAY_MASK)
+#define RTC_TIME_HOUR_MSB 21
+#define RTC_TIME_HOUR_LSB 16
+#define RTC_TIME_HOUR_MASK 0x003f0000
+#define RTC_TIME_HOUR_GET(x) (((x) & RTC_TIME_HOUR_MASK) >> RTC_TIME_HOUR_LSB)
+#define RTC_TIME_HOUR_SET(x) (((x) << RTC_TIME_HOUR_LSB) & RTC_TIME_HOUR_MASK)
+#define RTC_TIME_MINUTE_MSB 14
+#define RTC_TIME_MINUTE_LSB 8
+#define RTC_TIME_MINUTE_MASK 0x00007f00
+#define RTC_TIME_MINUTE_GET(x) (((x) & RTC_TIME_MINUTE_MASK) >> RTC_TIME_MINUTE_LSB)
+#define RTC_TIME_MINUTE_SET(x) (((x) << RTC_TIME_MINUTE_LSB) & RTC_TIME_MINUTE_MASK)
+#define RTC_TIME_SECOND_MSB 6
+#define RTC_TIME_SECOND_LSB 0
+#define RTC_TIME_SECOND_MASK 0x0000007f
+#define RTC_TIME_SECOND_GET(x) (((x) & RTC_TIME_SECOND_MASK) >> RTC_TIME_SECOND_LSB)
+#define RTC_TIME_SECOND_SET(x) (((x) << RTC_TIME_SECOND_LSB) & RTC_TIME_SECOND_MASK)
+
+#define RTC_DATE_ADDRESS 0x000000a4
+#define RTC_DATE_OFFSET 0x000000a4
+#define RTC_DATE_YEAR_MSB 23
+#define RTC_DATE_YEAR_LSB 16
+#define RTC_DATE_YEAR_MASK 0x00ff0000
+#define RTC_DATE_YEAR_GET(x) (((x) & RTC_DATE_YEAR_MASK) >> RTC_DATE_YEAR_LSB)
+#define RTC_DATE_YEAR_SET(x) (((x) << RTC_DATE_YEAR_LSB) & RTC_DATE_YEAR_MASK)
+#define RTC_DATE_MONTH_MSB 12
+#define RTC_DATE_MONTH_LSB 8
+#define RTC_DATE_MONTH_MASK 0x00001f00
+#define RTC_DATE_MONTH_GET(x) (((x) & RTC_DATE_MONTH_MASK) >> RTC_DATE_MONTH_LSB)
+#define RTC_DATE_MONTH_SET(x) (((x) << RTC_DATE_MONTH_LSB) & RTC_DATE_MONTH_MASK)
+#define RTC_DATE_MONTH_DAY_MSB 5
+#define RTC_DATE_MONTH_DAY_LSB 0
+#define RTC_DATE_MONTH_DAY_MASK 0x0000003f
+#define RTC_DATE_MONTH_DAY_GET(x) (((x) & RTC_DATE_MONTH_DAY_MASK) >> RTC_DATE_MONTH_DAY_LSB)
+#define RTC_DATE_MONTH_DAY_SET(x) (((x) << RTC_DATE_MONTH_DAY_LSB) & RTC_DATE_MONTH_DAY_MASK)
+
+#define RTC_SET_TIME_ADDRESS 0x000000a8
+#define RTC_SET_TIME_OFFSET 0x000000a8
+#define RTC_SET_TIME_WEEK_DAY_MSB 26
+#define RTC_SET_TIME_WEEK_DAY_LSB 24
+#define RTC_SET_TIME_WEEK_DAY_MASK 0x07000000
+#define RTC_SET_TIME_WEEK_DAY_GET(x) (((x) & RTC_SET_TIME_WEEK_DAY_MASK) >> RTC_SET_TIME_WEEK_DAY_LSB)
+#define RTC_SET_TIME_WEEK_DAY_SET(x) (((x) << RTC_SET_TIME_WEEK_DAY_LSB) & RTC_SET_TIME_WEEK_DAY_MASK)
+#define RTC_SET_TIME_HOUR_MSB 21
+#define RTC_SET_TIME_HOUR_LSB 16
+#define RTC_SET_TIME_HOUR_MASK 0x003f0000
+#define RTC_SET_TIME_HOUR_GET(x) (((x) & RTC_SET_TIME_HOUR_MASK) >> RTC_SET_TIME_HOUR_LSB)
+#define RTC_SET_TIME_HOUR_SET(x) (((x) << RTC_SET_TIME_HOUR_LSB) & RTC_SET_TIME_HOUR_MASK)
+#define RTC_SET_TIME_MINUTE_MSB 14
+#define RTC_SET_TIME_MINUTE_LSB 8
+#define RTC_SET_TIME_MINUTE_MASK 0x00007f00
+#define RTC_SET_TIME_MINUTE_GET(x) (((x) & RTC_SET_TIME_MINUTE_MASK) >> RTC_SET_TIME_MINUTE_LSB)
+#define RTC_SET_TIME_MINUTE_SET(x) (((x) << RTC_SET_TIME_MINUTE_LSB) & RTC_SET_TIME_MINUTE_MASK)
+#define RTC_SET_TIME_SECOND_MSB 6
+#define RTC_SET_TIME_SECOND_LSB 0
+#define RTC_SET_TIME_SECOND_MASK 0x0000007f
+#define RTC_SET_TIME_SECOND_GET(x) (((x) & RTC_SET_TIME_SECOND_MASK) >> RTC_SET_TIME_SECOND_LSB)
+#define RTC_SET_TIME_SECOND_SET(x) (((x) << RTC_SET_TIME_SECOND_LSB) & RTC_SET_TIME_SECOND_MASK)
+
+#define RTC_SET_DATE_ADDRESS 0x000000ac
+#define RTC_SET_DATE_OFFSET 0x000000ac
+#define RTC_SET_DATE_YEAR_MSB 23
+#define RTC_SET_DATE_YEAR_LSB 16
+#define RTC_SET_DATE_YEAR_MASK 0x00ff0000
+#define RTC_SET_DATE_YEAR_GET(x) (((x) & RTC_SET_DATE_YEAR_MASK) >> RTC_SET_DATE_YEAR_LSB)
+#define RTC_SET_DATE_YEAR_SET(x) (((x) << RTC_SET_DATE_YEAR_LSB) & RTC_SET_DATE_YEAR_MASK)
+#define RTC_SET_DATE_MONTH_MSB 12
+#define RTC_SET_DATE_MONTH_LSB 8
+#define RTC_SET_DATE_MONTH_MASK 0x00001f00
+#define RTC_SET_DATE_MONTH_GET(x) (((x) & RTC_SET_DATE_MONTH_MASK) >> RTC_SET_DATE_MONTH_LSB)
+#define RTC_SET_DATE_MONTH_SET(x) (((x) << RTC_SET_DATE_MONTH_LSB) & RTC_SET_DATE_MONTH_MASK)
+#define RTC_SET_DATE_MONTH_DAY_MSB 5
+#define RTC_SET_DATE_MONTH_DAY_LSB 0
+#define RTC_SET_DATE_MONTH_DAY_MASK 0x0000003f
+#define RTC_SET_DATE_MONTH_DAY_GET(x) (((x) & RTC_SET_DATE_MONTH_DAY_MASK) >> RTC_SET_DATE_MONTH_DAY_LSB)
+#define RTC_SET_DATE_MONTH_DAY_SET(x) (((x) << RTC_SET_DATE_MONTH_DAY_LSB) & RTC_SET_DATE_MONTH_DAY_MASK)
+
+#define RTC_SET_ALARM_ADDRESS 0x000000b0
+#define RTC_SET_ALARM_OFFSET 0x000000b0
+#define RTC_SET_ALARM_HOUR_MSB 21
+#define RTC_SET_ALARM_HOUR_LSB 16
+#define RTC_SET_ALARM_HOUR_MASK 0x003f0000
+#define RTC_SET_ALARM_HOUR_GET(x) (((x) & RTC_SET_ALARM_HOUR_MASK) >> RTC_SET_ALARM_HOUR_LSB)
+#define RTC_SET_ALARM_HOUR_SET(x) (((x) << RTC_SET_ALARM_HOUR_LSB) & RTC_SET_ALARM_HOUR_MASK)
+#define RTC_SET_ALARM_MINUTE_MSB 14
+#define RTC_SET_ALARM_MINUTE_LSB 8
+#define RTC_SET_ALARM_MINUTE_MASK 0x00007f00
+#define RTC_SET_ALARM_MINUTE_GET(x) (((x) & RTC_SET_ALARM_MINUTE_MASK) >> RTC_SET_ALARM_MINUTE_LSB)
+#define RTC_SET_ALARM_MINUTE_SET(x) (((x) << RTC_SET_ALARM_MINUTE_LSB) & RTC_SET_ALARM_MINUTE_MASK)
+#define RTC_SET_ALARM_SECOND_MSB 6
+#define RTC_SET_ALARM_SECOND_LSB 0
+#define RTC_SET_ALARM_SECOND_MASK 0x0000007f
+#define RTC_SET_ALARM_SECOND_GET(x) (((x) & RTC_SET_ALARM_SECOND_MASK) >> RTC_SET_ALARM_SECOND_LSB)
+#define RTC_SET_ALARM_SECOND_SET(x) (((x) << RTC_SET_ALARM_SECOND_LSB) & RTC_SET_ALARM_SECOND_MASK)
+
+#define RTC_CONFIG_ADDRESS 0x000000b4
+#define RTC_CONFIG_OFFSET 0x000000b4
+#define RTC_CONFIG_BCD_MSB 2
+#define RTC_CONFIG_BCD_LSB 2
+#define RTC_CONFIG_BCD_MASK 0x00000004
+#define RTC_CONFIG_BCD_GET(x) (((x) & RTC_CONFIG_BCD_MASK) >> RTC_CONFIG_BCD_LSB)
+#define RTC_CONFIG_BCD_SET(x) (((x) << RTC_CONFIG_BCD_LSB) & RTC_CONFIG_BCD_MASK)
+#define RTC_CONFIG_TWELVE_HOUR_MSB 1
+#define RTC_CONFIG_TWELVE_HOUR_LSB 1
+#define RTC_CONFIG_TWELVE_HOUR_MASK 0x00000002
+#define RTC_CONFIG_TWELVE_HOUR_GET(x) (((x) & RTC_CONFIG_TWELVE_HOUR_MASK) >> RTC_CONFIG_TWELVE_HOUR_LSB)
+#define RTC_CONFIG_TWELVE_HOUR_SET(x) (((x) << RTC_CONFIG_TWELVE_HOUR_LSB) & RTC_CONFIG_TWELVE_HOUR_MASK)
+#define RTC_CONFIG_DSE_MSB 0
+#define RTC_CONFIG_DSE_LSB 0
+#define RTC_CONFIG_DSE_MASK 0x00000001
+#define RTC_CONFIG_DSE_GET(x) (((x) & RTC_CONFIG_DSE_MASK) >> RTC_CONFIG_DSE_LSB)
+#define RTC_CONFIG_DSE_SET(x) (((x) << RTC_CONFIG_DSE_LSB) & RTC_CONFIG_DSE_MASK)
+
+#define RTC_ALARM_STATUS_ADDRESS 0x000000b8
+#define RTC_ALARM_STATUS_OFFSET 0x000000b8
+#define RTC_ALARM_STATUS_ENABLE_MSB 1
+#define RTC_ALARM_STATUS_ENABLE_LSB 1
+#define RTC_ALARM_STATUS_ENABLE_MASK 0x00000002
+#define RTC_ALARM_STATUS_ENABLE_GET(x) (((x) & RTC_ALARM_STATUS_ENABLE_MASK) >> RTC_ALARM_STATUS_ENABLE_LSB)
+#define RTC_ALARM_STATUS_ENABLE_SET(x) (((x) << RTC_ALARM_STATUS_ENABLE_LSB) & RTC_ALARM_STATUS_ENABLE_MASK)
+#define RTC_ALARM_STATUS_INTERRUPT_MSB 0
+#define RTC_ALARM_STATUS_INTERRUPT_LSB 0
+#define RTC_ALARM_STATUS_INTERRUPT_MASK 0x00000001
+#define RTC_ALARM_STATUS_INTERRUPT_GET(x) (((x) & RTC_ALARM_STATUS_INTERRUPT_MASK) >> RTC_ALARM_STATUS_INTERRUPT_LSB)
+#define RTC_ALARM_STATUS_INTERRUPT_SET(x) (((x) << RTC_ALARM_STATUS_INTERRUPT_LSB) & RTC_ALARM_STATUS_INTERRUPT_MASK)
+
+#define UART_WAKEUP_ADDRESS 0x000000bc
+#define UART_WAKEUP_OFFSET 0x000000bc
+#define UART_WAKEUP_ENABLE_MSB 0
+#define UART_WAKEUP_ENABLE_LSB 0
+#define UART_WAKEUP_ENABLE_MASK 0x00000001
+#define UART_WAKEUP_ENABLE_GET(x) (((x) & UART_WAKEUP_ENABLE_MASK) >> UART_WAKEUP_ENABLE_LSB)
+#define UART_WAKEUP_ENABLE_SET(x) (((x) << UART_WAKEUP_ENABLE_LSB) & UART_WAKEUP_ENABLE_MASK)
+
+#define RESET_CAUSE_ADDRESS 0x000000c0
+#define RESET_CAUSE_OFFSET 0x000000c0
+#define RESET_CAUSE_LAST_MSB 2
+#define RESET_CAUSE_LAST_LSB 0
+#define RESET_CAUSE_LAST_MASK 0x00000007
+#define RESET_CAUSE_LAST_GET(x) (((x) & RESET_CAUSE_LAST_MASK) >> RESET_CAUSE_LAST_LSB)
+#define RESET_CAUSE_LAST_SET(x) (((x) << RESET_CAUSE_LAST_LSB) & RESET_CAUSE_LAST_MASK)
+
+#define SYSTEM_SLEEP_ADDRESS 0x000000c4
+#define SYSTEM_SLEEP_OFFSET 0x000000c4
+#define SYSTEM_SLEEP_HOST_IF_MSB 4
+#define SYSTEM_SLEEP_HOST_IF_LSB 4
+#define SYSTEM_SLEEP_HOST_IF_MASK 0x00000010
+#define SYSTEM_SLEEP_HOST_IF_GET(x) (((x) & SYSTEM_SLEEP_HOST_IF_MASK) >> SYSTEM_SLEEP_HOST_IF_LSB)
+#define SYSTEM_SLEEP_HOST_IF_SET(x) (((x) << SYSTEM_SLEEP_HOST_IF_LSB) & SYSTEM_SLEEP_HOST_IF_MASK)
+#define SYSTEM_SLEEP_MBOX_MSB 3
+#define SYSTEM_SLEEP_MBOX_LSB 3
+#define SYSTEM_SLEEP_MBOX_MASK 0x00000008
+#define SYSTEM_SLEEP_MBOX_GET(x) (((x) & SYSTEM_SLEEP_MBOX_MASK) >> SYSTEM_SLEEP_MBOX_LSB)
+#define SYSTEM_SLEEP_MBOX_SET(x) (((x) << SYSTEM_SLEEP_MBOX_LSB) & SYSTEM_SLEEP_MBOX_MASK)
+#define SYSTEM_SLEEP_MAC_IF_MSB 2
+#define SYSTEM_SLEEP_MAC_IF_LSB 2
+#define SYSTEM_SLEEP_MAC_IF_MASK 0x00000004
+#define SYSTEM_SLEEP_MAC_IF_GET(x) (((x) & SYSTEM_SLEEP_MAC_IF_MASK) >> SYSTEM_SLEEP_MAC_IF_LSB)
+#define SYSTEM_SLEEP_MAC_IF_SET(x) (((x) << SYSTEM_SLEEP_MAC_IF_LSB) & SYSTEM_SLEEP_MAC_IF_MASK)
+#define SYSTEM_SLEEP_LIGHT_MSB 1
+#define SYSTEM_SLEEP_LIGHT_LSB 1
+#define SYSTEM_SLEEP_LIGHT_MASK 0x00000002
+#define SYSTEM_SLEEP_LIGHT_GET(x) (((x) & SYSTEM_SLEEP_LIGHT_MASK) >> SYSTEM_SLEEP_LIGHT_LSB)
+#define SYSTEM_SLEEP_LIGHT_SET(x) (((x) << SYSTEM_SLEEP_LIGHT_LSB) & SYSTEM_SLEEP_LIGHT_MASK)
+#define SYSTEM_SLEEP_DISABLE_MSB 0
+#define SYSTEM_SLEEP_DISABLE_LSB 0
+#define SYSTEM_SLEEP_DISABLE_MASK 0x00000001
+#define SYSTEM_SLEEP_DISABLE_GET(x) (((x) & SYSTEM_SLEEP_DISABLE_MASK) >> SYSTEM_SLEEP_DISABLE_LSB)
+#define SYSTEM_SLEEP_DISABLE_SET(x) (((x) << SYSTEM_SLEEP_DISABLE_LSB) & SYSTEM_SLEEP_DISABLE_MASK)
+
+#define SDIO_WRAPPER_ADDRESS 0x000000c8
+#define SDIO_WRAPPER_OFFSET 0x000000c8
+#define SDIO_WRAPPER_SLEEP_MSB 3
+#define SDIO_WRAPPER_SLEEP_LSB 3
+#define SDIO_WRAPPER_SLEEP_MASK 0x00000008
+#define SDIO_WRAPPER_SLEEP_GET(x) (((x) & SDIO_WRAPPER_SLEEP_MASK) >> SDIO_WRAPPER_SLEEP_LSB)
+#define SDIO_WRAPPER_SLEEP_SET(x) (((x) << SDIO_WRAPPER_SLEEP_LSB) & SDIO_WRAPPER_SLEEP_MASK)
+#define SDIO_WRAPPER_WAKEUP_MSB 2
+#define SDIO_WRAPPER_WAKEUP_LSB 2
+#define SDIO_WRAPPER_WAKEUP_MASK 0x00000004
+#define SDIO_WRAPPER_WAKEUP_GET(x) (((x) & SDIO_WRAPPER_WAKEUP_MASK) >> SDIO_WRAPPER_WAKEUP_LSB)
+#define SDIO_WRAPPER_WAKEUP_SET(x) (((x) << SDIO_WRAPPER_WAKEUP_LSB) & SDIO_WRAPPER_WAKEUP_MASK)
+#define SDIO_WRAPPER_SOC_ON_MSB 1
+#define SDIO_WRAPPER_SOC_ON_LSB 1
+#define SDIO_WRAPPER_SOC_ON_MASK 0x00000002
+#define SDIO_WRAPPER_SOC_ON_GET(x) (((x) & SDIO_WRAPPER_SOC_ON_MASK) >> SDIO_WRAPPER_SOC_ON_LSB)
+#define SDIO_WRAPPER_SOC_ON_SET(x) (((x) << SDIO_WRAPPER_SOC_ON_LSB) & SDIO_WRAPPER_SOC_ON_MASK)
+#define SDIO_WRAPPER_ON_MSB 0
+#define SDIO_WRAPPER_ON_LSB 0
+#define SDIO_WRAPPER_ON_MASK 0x00000001
+#define SDIO_WRAPPER_ON_GET(x) (((x) & SDIO_WRAPPER_ON_MASK) >> SDIO_WRAPPER_ON_LSB)
+#define SDIO_WRAPPER_ON_SET(x) (((x) << SDIO_WRAPPER_ON_LSB) & SDIO_WRAPPER_ON_MASK)
+
+#define MAC_SLEEP_CONTROL_ADDRESS 0x000000cc
+#define MAC_SLEEP_CONTROL_OFFSET 0x000000cc
+#define MAC_SLEEP_CONTROL_ENABLE_MSB 1
+#define MAC_SLEEP_CONTROL_ENABLE_LSB 0
+#define MAC_SLEEP_CONTROL_ENABLE_MASK 0x00000003
+#define MAC_SLEEP_CONTROL_ENABLE_GET(x) (((x) & MAC_SLEEP_CONTROL_ENABLE_MASK) >> MAC_SLEEP_CONTROL_ENABLE_LSB)
+#define MAC_SLEEP_CONTROL_ENABLE_SET(x) (((x) << MAC_SLEEP_CONTROL_ENABLE_LSB) & MAC_SLEEP_CONTROL_ENABLE_MASK)
+
+#define KEEP_AWAKE_ADDRESS 0x000000d0
+#define KEEP_AWAKE_OFFSET 0x000000d0
+#define KEEP_AWAKE_COUNT_MSB 7
+#define KEEP_AWAKE_COUNT_LSB 0
+#define KEEP_AWAKE_COUNT_MASK 0x000000ff
+#define KEEP_AWAKE_COUNT_GET(x) (((x) & KEEP_AWAKE_COUNT_MASK) >> KEEP_AWAKE_COUNT_LSB)
+#define KEEP_AWAKE_COUNT_SET(x) (((x) << KEEP_AWAKE_COUNT_LSB) & KEEP_AWAKE_COUNT_MASK)
+
+#define LPO_CAL_TIME_ADDRESS 0x000000d4
+#define LPO_CAL_TIME_OFFSET 0x000000d4
+#define LPO_CAL_TIME_LENGTH_MSB 13
+#define LPO_CAL_TIME_LENGTH_LSB 0
+#define LPO_CAL_TIME_LENGTH_MASK 0x00003fff
+#define LPO_CAL_TIME_LENGTH_GET(x) (((x) & LPO_CAL_TIME_LENGTH_MASK) >> LPO_CAL_TIME_LENGTH_LSB)
+#define LPO_CAL_TIME_LENGTH_SET(x) (((x) << LPO_CAL_TIME_LENGTH_LSB) & LPO_CAL_TIME_LENGTH_MASK)
+
+#define LPO_INIT_DIVIDEND_INT_ADDRESS 0x000000d8
+#define LPO_INIT_DIVIDEND_INT_OFFSET 0x000000d8
+#define LPO_INIT_DIVIDEND_INT_VALUE_MSB 23
+#define LPO_INIT_DIVIDEND_INT_VALUE_LSB 0
+#define LPO_INIT_DIVIDEND_INT_VALUE_MASK 0x00ffffff
+#define LPO_INIT_DIVIDEND_INT_VALUE_GET(x) (((x) & LPO_INIT_DIVIDEND_INT_VALUE_MASK) >> LPO_INIT_DIVIDEND_INT_VALUE_LSB)
+#define LPO_INIT_DIVIDEND_INT_VALUE_SET(x) (((x) << LPO_INIT_DIVIDEND_INT_VALUE_LSB) & LPO_INIT_DIVIDEND_INT_VALUE_MASK)
+
+#define LPO_INIT_DIVIDEND_FRACTION_ADDRESS 0x000000dc
+#define LPO_INIT_DIVIDEND_FRACTION_OFFSET 0x000000dc
+#define LPO_INIT_DIVIDEND_FRACTION_VALUE_MSB 10
+#define LPO_INIT_DIVIDEND_FRACTION_VALUE_LSB 0
+#define LPO_INIT_DIVIDEND_FRACTION_VALUE_MASK 0x000007ff
+#define LPO_INIT_DIVIDEND_FRACTION_VALUE_GET(x) (((x) & LPO_INIT_DIVIDEND_FRACTION_VALUE_MASK) >> LPO_INIT_DIVIDEND_FRACTION_VALUE_LSB)
+#define LPO_INIT_DIVIDEND_FRACTION_VALUE_SET(x) (((x) << LPO_INIT_DIVIDEND_FRACTION_VALUE_LSB) & LPO_INIT_DIVIDEND_FRACTION_VALUE_MASK)
+
+#define LPO_CAL_ADDRESS 0x000000e0
+#define LPO_CAL_OFFSET 0x000000e0
+#define LPO_CAL_ENABLE_MSB 20
+#define LPO_CAL_ENABLE_LSB 20
+#define LPO_CAL_ENABLE_MASK 0x00100000
+#define LPO_CAL_ENABLE_GET(x) (((x) & LPO_CAL_ENABLE_MASK) >> LPO_CAL_ENABLE_LSB)
+#define LPO_CAL_ENABLE_SET(x) (((x) << LPO_CAL_ENABLE_LSB) & LPO_CAL_ENABLE_MASK)
+#define LPO_CAL_COUNT_MSB 19
+#define LPO_CAL_COUNT_LSB 0
+#define LPO_CAL_COUNT_MASK 0x000fffff
+#define LPO_CAL_COUNT_GET(x) (((x) & LPO_CAL_COUNT_MASK) >> LPO_CAL_COUNT_LSB)
+#define LPO_CAL_COUNT_SET(x) (((x) << LPO_CAL_COUNT_LSB) & LPO_CAL_COUNT_MASK)
+
+#define LPO_CAL_TEST_CONTROL_ADDRESS 0x000000e4
+#define LPO_CAL_TEST_CONTROL_OFFSET 0x000000e4
+#define LPO_CAL_TEST_CONTROL_ENABLE_MSB 5
+#define LPO_CAL_TEST_CONTROL_ENABLE_LSB 5
+#define LPO_CAL_TEST_CONTROL_ENABLE_MASK 0x00000020
+#define LPO_CAL_TEST_CONTROL_ENABLE_GET(x) (((x) & LPO_CAL_TEST_CONTROL_ENABLE_MASK) >> LPO_CAL_TEST_CONTROL_ENABLE_LSB)
+#define LPO_CAL_TEST_CONTROL_ENABLE_SET(x) (((x) << LPO_CAL_TEST_CONTROL_ENABLE_LSB) & LPO_CAL_TEST_CONTROL_ENABLE_MASK)
+#define LPO_CAL_TEST_CONTROL_RTC_CYCLES_MSB 4
+#define LPO_CAL_TEST_CONTROL_RTC_CYCLES_LSB 0
+#define LPO_CAL_TEST_CONTROL_RTC_CYCLES_MASK 0x0000001f
+#define LPO_CAL_TEST_CONTROL_RTC_CYCLES_GET(x) (((x) & LPO_CAL_TEST_CONTROL_RTC_CYCLES_MASK) >> LPO_CAL_TEST_CONTROL_RTC_CYCLES_LSB)
+#define LPO_CAL_TEST_CONTROL_RTC_CYCLES_SET(x) (((x) << LPO_CAL_TEST_CONTROL_RTC_CYCLES_LSB) & LPO_CAL_TEST_CONTROL_RTC_CYCLES_MASK)
+
+#define LPO_CAL_TEST_STATUS_ADDRESS 0x000000e8
+#define LPO_CAL_TEST_STATUS_OFFSET 0x000000e8
+#define LPO_CAL_TEST_STATUS_READY_MSB 16
+#define LPO_CAL_TEST_STATUS_READY_LSB 16
+#define LPO_CAL_TEST_STATUS_READY_MASK 0x00010000
+#define LPO_CAL_TEST_STATUS_READY_GET(x) (((x) & LPO_CAL_TEST_STATUS_READY_MASK) >> LPO_CAL_TEST_STATUS_READY_LSB)
+#define LPO_CAL_TEST_STATUS_READY_SET(x) (((x) << LPO_CAL_TEST_STATUS_READY_LSB) & LPO_CAL_TEST_STATUS_READY_MASK)
+#define LPO_CAL_TEST_STATUS_COUNT_MSB 15
+#define LPO_CAL_TEST_STATUS_COUNT_LSB 0
+#define LPO_CAL_TEST_STATUS_COUNT_MASK 0x0000ffff
+#define LPO_CAL_TEST_STATUS_COUNT_GET(x) (((x) & LPO_CAL_TEST_STATUS_COUNT_MASK) >> LPO_CAL_TEST_STATUS_COUNT_LSB)
+#define LPO_CAL_TEST_STATUS_COUNT_SET(x) (((x) << LPO_CAL_TEST_STATUS_COUNT_LSB) & LPO_CAL_TEST_STATUS_COUNT_MASK)
+
+#define CHIP_ID_ADDRESS 0x000000ec
+#define CHIP_ID_OFFSET 0x000000ec
+#define CHIP_ID_DEVICE_ID_MSB 31
+#define CHIP_ID_DEVICE_ID_LSB 16
+#define CHIP_ID_DEVICE_ID_MASK 0xffff0000
+#define CHIP_ID_DEVICE_ID_GET(x) (((x) & CHIP_ID_DEVICE_ID_MASK) >> CHIP_ID_DEVICE_ID_LSB)
+#define CHIP_ID_DEVICE_ID_SET(x) (((x) << CHIP_ID_DEVICE_ID_LSB) & CHIP_ID_DEVICE_ID_MASK)
+#define CHIP_ID_CONFIG_ID_MSB 15
+#define CHIP_ID_CONFIG_ID_LSB 4
+#define CHIP_ID_CONFIG_ID_MASK 0x0000fff0
+#define CHIP_ID_CONFIG_ID_GET(x) (((x) & CHIP_ID_CONFIG_ID_MASK) >> CHIP_ID_CONFIG_ID_LSB)
+#define CHIP_ID_CONFIG_ID_SET(x) (((x) << CHIP_ID_CONFIG_ID_LSB) & CHIP_ID_CONFIG_ID_MASK)
+#define CHIP_ID_VERSION_ID_MSB 3
+#define CHIP_ID_VERSION_ID_LSB 0
+#define CHIP_ID_VERSION_ID_MASK 0x0000000f
+#define CHIP_ID_VERSION_ID_GET(x) (((x) & CHIP_ID_VERSION_ID_MASK) >> CHIP_ID_VERSION_ID_LSB)
+#define CHIP_ID_VERSION_ID_SET(x) (((x) << CHIP_ID_VERSION_ID_LSB) & CHIP_ID_VERSION_ID_MASK)
+
+#define DERIVED_RTC_CLK_ADDRESS 0x000000f0
+#define DERIVED_RTC_CLK_OFFSET 0x000000f0
+#define DERIVED_RTC_CLK_EXTERNAL_DETECT_EN_MSB 20
+#define DERIVED_RTC_CLK_EXTERNAL_DETECT_EN_LSB 20
+#define DERIVED_RTC_CLK_EXTERNAL_DETECT_EN_MASK 0x00100000
+#define DERIVED_RTC_CLK_EXTERNAL_DETECT_EN_GET(x) (((x) & DERIVED_RTC_CLK_EXTERNAL_DETECT_EN_MASK) >> DERIVED_RTC_CLK_EXTERNAL_DETECT_EN_LSB)
+#define DERIVED_RTC_CLK_EXTERNAL_DETECT_EN_SET(x) (((x) << DERIVED_RTC_CLK_EXTERNAL_DETECT_EN_LSB) & DERIVED_RTC_CLK_EXTERNAL_DETECT_EN_MASK)
+#define DERIVED_RTC_CLK_EXTERNAL_DETECT_MSB 18
+#define DERIVED_RTC_CLK_EXTERNAL_DETECT_LSB 18
+#define DERIVED_RTC_CLK_EXTERNAL_DETECT_MASK 0x00040000
+#define DERIVED_RTC_CLK_EXTERNAL_DETECT_GET(x) (((x) & DERIVED_RTC_CLK_EXTERNAL_DETECT_MASK) >> DERIVED_RTC_CLK_EXTERNAL_DETECT_LSB)
+#define DERIVED_RTC_CLK_EXTERNAL_DETECT_SET(x) (((x) << DERIVED_RTC_CLK_EXTERNAL_DETECT_LSB) & DERIVED_RTC_CLK_EXTERNAL_DETECT_MASK)
+#define DERIVED_RTC_CLK_FORCE_MSB 17
+#define DERIVED_RTC_CLK_FORCE_LSB 16
+#define DERIVED_RTC_CLK_FORCE_MASK 0x00030000
+#define DERIVED_RTC_CLK_FORCE_GET(x) (((x) & DERIVED_RTC_CLK_FORCE_MASK) >> DERIVED_RTC_CLK_FORCE_LSB)
+#define DERIVED_RTC_CLK_FORCE_SET(x) (((x) << DERIVED_RTC_CLK_FORCE_LSB) & DERIVED_RTC_CLK_FORCE_MASK)
+#define DERIVED_RTC_CLK_PERIOD_MSB 15
+#define DERIVED_RTC_CLK_PERIOD_LSB 1
+#define DERIVED_RTC_CLK_PERIOD_MASK 0x0000fffe
+#define DERIVED_RTC_CLK_PERIOD_GET(x) (((x) & DERIVED_RTC_CLK_PERIOD_MASK) >> DERIVED_RTC_CLK_PERIOD_LSB)
+#define DERIVED_RTC_CLK_PERIOD_SET(x) (((x) << DERIVED_RTC_CLK_PERIOD_LSB) & DERIVED_RTC_CLK_PERIOD_MASK)
+
+#define MAC_PCU_SLP32_MODE_ADDRESS 0x000000f4
+#define MAC_PCU_SLP32_MODE_OFFSET 0x000000f4
+#define MAC_PCU_SLP32_MODE_TSF_WRITE_PENDING_MSB 21
+#define MAC_PCU_SLP32_MODE_TSF_WRITE_PENDING_LSB 21
+#define MAC_PCU_SLP32_MODE_TSF_WRITE_PENDING_MASK 0x00200000
+#define MAC_PCU_SLP32_MODE_TSF_WRITE_PENDING_GET(x) (((x) & MAC_PCU_SLP32_MODE_TSF_WRITE_PENDING_MASK) >> MAC_PCU_SLP32_MODE_TSF_WRITE_PENDING_LSB)
+#define MAC_PCU_SLP32_MODE_TSF_WRITE_PENDING_SET(x) (((x) << MAC_PCU_SLP32_MODE_TSF_WRITE_PENDING_LSB) & MAC_PCU_SLP32_MODE_TSF_WRITE_PENDING_MASK)
+#define MAC_PCU_SLP32_MODE_HALF_CLK_LATENCY_MSB 19
+#define MAC_PCU_SLP32_MODE_HALF_CLK_LATENCY_LSB 0
+#define MAC_PCU_SLP32_MODE_HALF_CLK_LATENCY_MASK 0x000fffff
+#define MAC_PCU_SLP32_MODE_HALF_CLK_LATENCY_GET(x) (((x) & MAC_PCU_SLP32_MODE_HALF_CLK_LATENCY_MASK) >> MAC_PCU_SLP32_MODE_HALF_CLK_LATENCY_LSB)
+#define MAC_PCU_SLP32_MODE_HALF_CLK_LATENCY_SET(x) (((x) << MAC_PCU_SLP32_MODE_HALF_CLK_LATENCY_LSB) & MAC_PCU_SLP32_MODE_HALF_CLK_LATENCY_MASK)
+
+#define MAC_PCU_SLP32_WAKE_ADDRESS 0x000000f8
+#define MAC_PCU_SLP32_WAKE_OFFSET 0x000000f8
+#define MAC_PCU_SLP32_WAKE_XTL_TIME_MSB 15
+#define MAC_PCU_SLP32_WAKE_XTL_TIME_LSB 0
+#define MAC_PCU_SLP32_WAKE_XTL_TIME_MASK 0x0000ffff
+#define MAC_PCU_SLP32_WAKE_XTL_TIME_GET(x) (((x) & MAC_PCU_SLP32_WAKE_XTL_TIME_MASK) >> MAC_PCU_SLP32_WAKE_XTL_TIME_LSB)
+#define MAC_PCU_SLP32_WAKE_XTL_TIME_SET(x) (((x) << MAC_PCU_SLP32_WAKE_XTL_TIME_LSB) & MAC_PCU_SLP32_WAKE_XTL_TIME_MASK)
+
+#define MAC_PCU_SLP32_INC_ADDRESS 0x000000fc
+#define MAC_PCU_SLP32_INC_OFFSET 0x000000fc
+#define MAC_PCU_SLP32_INC_TSF_INC_MSB 19
+#define MAC_PCU_SLP32_INC_TSF_INC_LSB 0
+#define MAC_PCU_SLP32_INC_TSF_INC_MASK 0x000fffff
+#define MAC_PCU_SLP32_INC_TSF_INC_GET(x) (((x) & MAC_PCU_SLP32_INC_TSF_INC_MASK) >> MAC_PCU_SLP32_INC_TSF_INC_LSB)
+#define MAC_PCU_SLP32_INC_TSF_INC_SET(x) (((x) << MAC_PCU_SLP32_INC_TSF_INC_LSB) & MAC_PCU_SLP32_INC_TSF_INC_MASK)
+
+#define MAC_PCU_SLP_MIB1_ADDRESS 0x00000100
+#define MAC_PCU_SLP_MIB1_OFFSET 0x00000100
+#define MAC_PCU_SLP_MIB1_SLEEP_CNT_MSB 31
+#define MAC_PCU_SLP_MIB1_SLEEP_CNT_LSB 0
+#define MAC_PCU_SLP_MIB1_SLEEP_CNT_MASK 0xffffffff
+#define MAC_PCU_SLP_MIB1_SLEEP_CNT_GET(x) (((x) & MAC_PCU_SLP_MIB1_SLEEP_CNT_MASK) >> MAC_PCU_SLP_MIB1_SLEEP_CNT_LSB)
+#define MAC_PCU_SLP_MIB1_SLEEP_CNT_SET(x) (((x) << MAC_PCU_SLP_MIB1_SLEEP_CNT_LSB) & MAC_PCU_SLP_MIB1_SLEEP_CNT_MASK)
+
+#define MAC_PCU_SLP_MIB2_ADDRESS 0x00000104
+#define MAC_PCU_SLP_MIB2_OFFSET 0x00000104
+#define MAC_PCU_SLP_MIB2_CYCLE_CNT_MSB 31
+#define MAC_PCU_SLP_MIB2_CYCLE_CNT_LSB 0
+#define MAC_PCU_SLP_MIB2_CYCLE_CNT_MASK 0xffffffff
+#define MAC_PCU_SLP_MIB2_CYCLE_CNT_GET(x) (((x) & MAC_PCU_SLP_MIB2_CYCLE_CNT_MASK) >> MAC_PCU_SLP_MIB2_CYCLE_CNT_LSB)
+#define MAC_PCU_SLP_MIB2_CYCLE_CNT_SET(x) (((x) << MAC_PCU_SLP_MIB2_CYCLE_CNT_LSB) & MAC_PCU_SLP_MIB2_CYCLE_CNT_MASK)
+
+#define MAC_PCU_SLP_MIB3_ADDRESS 0x00000108
+#define MAC_PCU_SLP_MIB3_OFFSET 0x00000108
+#define MAC_PCU_SLP_MIB3_PENDING_MSB 1
+#define MAC_PCU_SLP_MIB3_PENDING_LSB 1
+#define MAC_PCU_SLP_MIB3_PENDING_MASK 0x00000002
+#define MAC_PCU_SLP_MIB3_PENDING_GET(x) (((x) & MAC_PCU_SLP_MIB3_PENDING_MASK) >> MAC_PCU_SLP_MIB3_PENDING_LSB)
+#define MAC_PCU_SLP_MIB3_PENDING_SET(x) (((x) << MAC_PCU_SLP_MIB3_PENDING_LSB) & MAC_PCU_SLP_MIB3_PENDING_MASK)
+#define MAC_PCU_SLP_MIB3_CLR_CNT_MSB 0
+#define MAC_PCU_SLP_MIB3_CLR_CNT_LSB 0
+#define MAC_PCU_SLP_MIB3_CLR_CNT_MASK 0x00000001
+#define MAC_PCU_SLP_MIB3_CLR_CNT_GET(x) (((x) & MAC_PCU_SLP_MIB3_CLR_CNT_MASK) >> MAC_PCU_SLP_MIB3_CLR_CNT_LSB)
+#define MAC_PCU_SLP_MIB3_CLR_CNT_SET(x) (((x) << MAC_PCU_SLP_MIB3_CLR_CNT_LSB) & MAC_PCU_SLP_MIB3_CLR_CNT_MASK)
+
+#define MAC_PCU_SLP_BEACON_ADDRESS 0x0000010c
+#define MAC_PCU_SLP_BEACON_OFFSET 0x0000010c
+#define MAC_PCU_SLP_BEACON_BMISS_TIMEOUT_ENABLE_MSB 24
+#define MAC_PCU_SLP_BEACON_BMISS_TIMEOUT_ENABLE_LSB 24
+#define MAC_PCU_SLP_BEACON_BMISS_TIMEOUT_ENABLE_MASK 0x01000000
+#define MAC_PCU_SLP_BEACON_BMISS_TIMEOUT_ENABLE_GET(x) (((x) & MAC_PCU_SLP_BEACON_BMISS_TIMEOUT_ENABLE_MASK) >> MAC_PCU_SLP_BEACON_BMISS_TIMEOUT_ENABLE_LSB)
+#define MAC_PCU_SLP_BEACON_BMISS_TIMEOUT_ENABLE_SET(x) (((x) << MAC_PCU_SLP_BEACON_BMISS_TIMEOUT_ENABLE_LSB) & MAC_PCU_SLP_BEACON_BMISS_TIMEOUT_ENABLE_MASK)
+#define MAC_PCU_SLP_BEACON_BMISS_TIMEOUT_MSB 23
+#define MAC_PCU_SLP_BEACON_BMISS_TIMEOUT_LSB 0
+#define MAC_PCU_SLP_BEACON_BMISS_TIMEOUT_MASK 0x00ffffff
+#define MAC_PCU_SLP_BEACON_BMISS_TIMEOUT_GET(x) (((x) & MAC_PCU_SLP_BEACON_BMISS_TIMEOUT_MASK) >> MAC_PCU_SLP_BEACON_BMISS_TIMEOUT_LSB)
+#define MAC_PCU_SLP_BEACON_BMISS_TIMEOUT_SET(x) (((x) << MAC_PCU_SLP_BEACON_BMISS_TIMEOUT_LSB) & MAC_PCU_SLP_BEACON_BMISS_TIMEOUT_MASK)
+
+#define POWER_REG_ADDRESS 0x00000110
+#define POWER_REG_OFFSET 0x00000110
+#define POWER_REG_VLVL_MSB 11
+#define POWER_REG_VLVL_LSB 8
+#define POWER_REG_VLVL_MASK 0x00000f00
+#define POWER_REG_VLVL_GET(x) (((x) & POWER_REG_VLVL_MASK) >> POWER_REG_VLVL_LSB)
+#define POWER_REG_VLVL_SET(x) (((x) << POWER_REG_VLVL_LSB) & POWER_REG_VLVL_MASK)
+#define POWER_REG_CPU_INT_ENABLE_MSB 7
+#define POWER_REG_CPU_INT_ENABLE_LSB 7
+#define POWER_REG_CPU_INT_ENABLE_MASK 0x00000080
+#define POWER_REG_CPU_INT_ENABLE_GET(x) (((x) & POWER_REG_CPU_INT_ENABLE_MASK) >> POWER_REG_CPU_INT_ENABLE_LSB)
+#define POWER_REG_CPU_INT_ENABLE_SET(x) (((x) << POWER_REG_CPU_INT_ENABLE_LSB) & POWER_REG_CPU_INT_ENABLE_MASK)
+#define POWER_REG_WLAN_ISO_DIS_MSB 6
+#define POWER_REG_WLAN_ISO_DIS_LSB 6
+#define POWER_REG_WLAN_ISO_DIS_MASK 0x00000040
+#define POWER_REG_WLAN_ISO_DIS_GET(x) (((x) & POWER_REG_WLAN_ISO_DIS_MASK) >> POWER_REG_WLAN_ISO_DIS_LSB)
+#define POWER_REG_WLAN_ISO_DIS_SET(x) (((x) << POWER_REG_WLAN_ISO_DIS_LSB) & POWER_REG_WLAN_ISO_DIS_MASK)
+#define POWER_REG_WLAN_ISO_CNTL_MSB 5
+#define POWER_REG_WLAN_ISO_CNTL_LSB 5
+#define POWER_REG_WLAN_ISO_CNTL_MASK 0x00000020
+#define POWER_REG_WLAN_ISO_CNTL_GET(x) (((x) & POWER_REG_WLAN_ISO_CNTL_MASK) >> POWER_REG_WLAN_ISO_CNTL_LSB)
+#define POWER_REG_WLAN_ISO_CNTL_SET(x) (((x) << POWER_REG_WLAN_ISO_CNTL_LSB) & POWER_REG_WLAN_ISO_CNTL_MASK)
+#define POWER_REG_RADIO_PWD_EN_MSB 4
+#define POWER_REG_RADIO_PWD_EN_LSB 4
+#define POWER_REG_RADIO_PWD_EN_MASK 0x00000010
+#define POWER_REG_RADIO_PWD_EN_GET(x) (((x) & POWER_REG_RADIO_PWD_EN_MASK) >> POWER_REG_RADIO_PWD_EN_LSB)
+#define POWER_REG_RADIO_PWD_EN_SET(x) (((x) << POWER_REG_RADIO_PWD_EN_LSB) & POWER_REG_RADIO_PWD_EN_MASK)
+#define POWER_REG_SOC_SCALE_EN_MSB 3
+#define POWER_REG_SOC_SCALE_EN_LSB 3
+#define POWER_REG_SOC_SCALE_EN_MASK 0x00000008
+#define POWER_REG_SOC_SCALE_EN_GET(x) (((x) & POWER_REG_SOC_SCALE_EN_MASK) >> POWER_REG_SOC_SCALE_EN_LSB)
+#define POWER_REG_SOC_SCALE_EN_SET(x) (((x) << POWER_REG_SOC_SCALE_EN_LSB) & POWER_REG_SOC_SCALE_EN_MASK)
+#define POWER_REG_WLAN_SCALE_EN_MSB 2
+#define POWER_REG_WLAN_SCALE_EN_LSB 2
+#define POWER_REG_WLAN_SCALE_EN_MASK 0x00000004
+#define POWER_REG_WLAN_SCALE_EN_GET(x) (((x) & POWER_REG_WLAN_SCALE_EN_MASK) >> POWER_REG_WLAN_SCALE_EN_LSB)
+#define POWER_REG_WLAN_SCALE_EN_SET(x) (((x) << POWER_REG_WLAN_SCALE_EN_LSB) & POWER_REG_WLAN_SCALE_EN_MASK)
+#define POWER_REG_WLAN_PWD_EN_MSB 1
+#define POWER_REG_WLAN_PWD_EN_LSB 1
+#define POWER_REG_WLAN_PWD_EN_MASK 0x00000002
+#define POWER_REG_WLAN_PWD_EN_GET(x) (((x) & POWER_REG_WLAN_PWD_EN_MASK) >> POWER_REG_WLAN_PWD_EN_LSB)
+#define POWER_REG_WLAN_PWD_EN_SET(x) (((x) << POWER_REG_WLAN_PWD_EN_LSB) & POWER_REG_WLAN_PWD_EN_MASK)
+#define POWER_REG_POWER_EN_MSB 0
+#define POWER_REG_POWER_EN_LSB 0
+#define POWER_REG_POWER_EN_MASK 0x00000001
+#define POWER_REG_POWER_EN_GET(x) (((x) & POWER_REG_POWER_EN_MASK) >> POWER_REG_POWER_EN_LSB)
+#define POWER_REG_POWER_EN_SET(x) (((x) << POWER_REG_POWER_EN_LSB) & POWER_REG_POWER_EN_MASK)
+
+#define CORE_CLK_CTRL_ADDRESS 0x00000114
+#define CORE_CLK_CTRL_OFFSET 0x00000114
+#define CORE_CLK_CTRL_DIV_MSB 2
+#define CORE_CLK_CTRL_DIV_LSB 0
+#define CORE_CLK_CTRL_DIV_MASK 0x00000007
+#define CORE_CLK_CTRL_DIV_GET(x) (((x) & CORE_CLK_CTRL_DIV_MASK) >> CORE_CLK_CTRL_DIV_LSB)
+#define CORE_CLK_CTRL_DIV_SET(x) (((x) << CORE_CLK_CTRL_DIV_LSB) & CORE_CLK_CTRL_DIV_MASK)
+
+#define SDIO_SETUP_CIRCUIT_ADDRESS 0x00000120
+#define SDIO_SETUP_CIRCUIT_OFFSET 0x00000120
+#define SDIO_SETUP_CIRCUIT_VECTOR_MSB 7
+#define SDIO_SETUP_CIRCUIT_VECTOR_LSB 0
+#define SDIO_SETUP_CIRCUIT_VECTOR_MASK 0x000000ff
+#define SDIO_SETUP_CIRCUIT_VECTOR_GET(x) (((x) & SDIO_SETUP_CIRCUIT_VECTOR_MASK) >> SDIO_SETUP_CIRCUIT_VECTOR_LSB)
+#define SDIO_SETUP_CIRCUIT_VECTOR_SET(x) (((x) << SDIO_SETUP_CIRCUIT_VECTOR_LSB) & SDIO_SETUP_CIRCUIT_VECTOR_MASK)
+
+#define SDIO_SETUP_CONFIG_ADDRESS 0x00000140
+#define SDIO_SETUP_CONFIG_OFFSET 0x00000140
+#define SDIO_SETUP_CONFIG_ENABLE_MSB 1
+#define SDIO_SETUP_CONFIG_ENABLE_LSB 1
+#define SDIO_SETUP_CONFIG_ENABLE_MASK 0x00000002
+#define SDIO_SETUP_CONFIG_ENABLE_GET(x) (((x) & SDIO_SETUP_CONFIG_ENABLE_MASK) >> SDIO_SETUP_CONFIG_ENABLE_LSB)
+#define SDIO_SETUP_CONFIG_ENABLE_SET(x) (((x) << SDIO_SETUP_CONFIG_ENABLE_LSB) & SDIO_SETUP_CONFIG_ENABLE_MASK)
+#define SDIO_SETUP_CONFIG_CLEAR_MSB 0
+#define SDIO_SETUP_CONFIG_CLEAR_LSB 0
+#define SDIO_SETUP_CONFIG_CLEAR_MASK 0x00000001
+#define SDIO_SETUP_CONFIG_CLEAR_GET(x) (((x) & SDIO_SETUP_CONFIG_CLEAR_MASK) >> SDIO_SETUP_CONFIG_CLEAR_LSB)
+#define SDIO_SETUP_CONFIG_CLEAR_SET(x) (((x) << SDIO_SETUP_CONFIG_CLEAR_LSB) & SDIO_SETUP_CONFIG_CLEAR_MASK)
+
+#define CPU_SETUP_CONFIG_ADDRESS 0x00000144
+#define CPU_SETUP_CONFIG_OFFSET 0x00000144
+#define CPU_SETUP_CONFIG_ENABLE_MSB 1
+#define CPU_SETUP_CONFIG_ENABLE_LSB 1
+#define CPU_SETUP_CONFIG_ENABLE_MASK 0x00000002
+#define CPU_SETUP_CONFIG_ENABLE_GET(x) (((x) & CPU_SETUP_CONFIG_ENABLE_MASK) >> CPU_SETUP_CONFIG_ENABLE_LSB)
+#define CPU_SETUP_CONFIG_ENABLE_SET(x) (((x) << CPU_SETUP_CONFIG_ENABLE_LSB) & CPU_SETUP_CONFIG_ENABLE_MASK)
+#define CPU_SETUP_CONFIG_CLEAR_MSB 0
+#define CPU_SETUP_CONFIG_CLEAR_LSB 0
+#define CPU_SETUP_CONFIG_CLEAR_MASK 0x00000001
+#define CPU_SETUP_CONFIG_CLEAR_GET(x) (((x) & CPU_SETUP_CONFIG_CLEAR_MASK) >> CPU_SETUP_CONFIG_CLEAR_LSB)
+#define CPU_SETUP_CONFIG_CLEAR_SET(x) (((x) << CPU_SETUP_CONFIG_CLEAR_LSB) & CPU_SETUP_CONFIG_CLEAR_MASK)
+
+#define CPU_SETUP_CIRCUIT_ADDRESS 0x00000160
+#define CPU_SETUP_CIRCUIT_OFFSET 0x00000160
+#define CPU_SETUP_CIRCUIT_VECTOR_MSB 7
+#define CPU_SETUP_CIRCUIT_VECTOR_LSB 0
+#define CPU_SETUP_CIRCUIT_VECTOR_MASK 0x000000ff
+#define CPU_SETUP_CIRCUIT_VECTOR_GET(x) (((x) & CPU_SETUP_CIRCUIT_VECTOR_MASK) >> CPU_SETUP_CIRCUIT_VECTOR_LSB)
+#define CPU_SETUP_CIRCUIT_VECTOR_SET(x) (((x) << CPU_SETUP_CIRCUIT_VECTOR_LSB) & CPU_SETUP_CIRCUIT_VECTOR_MASK)
+
+#define BB_SETUP_CONFIG_ADDRESS 0x00000180
+#define BB_SETUP_CONFIG_OFFSET 0x00000180
+#define BB_SETUP_CONFIG_ENABLE_MSB 1
+#define BB_SETUP_CONFIG_ENABLE_LSB 1
+#define BB_SETUP_CONFIG_ENABLE_MASK 0x00000002
+#define BB_SETUP_CONFIG_ENABLE_GET(x) (((x) & BB_SETUP_CONFIG_ENABLE_MASK) >> BB_SETUP_CONFIG_ENABLE_LSB)
+#define BB_SETUP_CONFIG_ENABLE_SET(x) (((x) << BB_SETUP_CONFIG_ENABLE_LSB) & BB_SETUP_CONFIG_ENABLE_MASK)
+#define BB_SETUP_CONFIG_CLEAR_MSB 0
+#define BB_SETUP_CONFIG_CLEAR_LSB 0
+#define BB_SETUP_CONFIG_CLEAR_MASK 0x00000001
+#define BB_SETUP_CONFIG_CLEAR_GET(x) (((x) & BB_SETUP_CONFIG_CLEAR_MASK) >> BB_SETUP_CONFIG_CLEAR_LSB)
+#define BB_SETUP_CONFIG_CLEAR_SET(x) (((x) << BB_SETUP_CONFIG_CLEAR_LSB) & BB_SETUP_CONFIG_CLEAR_MASK)
+
+#define BB_SETUP_CIRCUIT_ADDRESS 0x000001a0
+#define BB_SETUP_CIRCUIT_OFFSET 0x000001a0
+#define BB_SETUP_CIRCUIT_VECTOR_MSB 7
+#define BB_SETUP_CIRCUIT_VECTOR_LSB 0
+#define BB_SETUP_CIRCUIT_VECTOR_MASK 0x000000ff
+#define BB_SETUP_CIRCUIT_VECTOR_GET(x) (((x) & BB_SETUP_CIRCUIT_VECTOR_MASK) >> BB_SETUP_CIRCUIT_VECTOR_LSB)
+#define BB_SETUP_CIRCUIT_VECTOR_SET(x) (((x) << BB_SETUP_CIRCUIT_VECTOR_LSB) & BB_SETUP_CIRCUIT_VECTOR_MASK)
+
+#define GPIO_WAKEUP_CONTROL_ADDRESS 0x000001c0
+#define GPIO_WAKEUP_CONTROL_OFFSET 0x000001c0
+#define GPIO_WAKEUP_CONTROL_ENABLE_MSB 0
+#define GPIO_WAKEUP_CONTROL_ENABLE_LSB 0
+#define GPIO_WAKEUP_CONTROL_ENABLE_MASK 0x00000001
+#define GPIO_WAKEUP_CONTROL_ENABLE_GET(x) (((x) & GPIO_WAKEUP_CONTROL_ENABLE_MASK) >> GPIO_WAKEUP_CONTROL_ENABLE_LSB)
+#define GPIO_WAKEUP_CONTROL_ENABLE_SET(x) (((x) << GPIO_WAKEUP_CONTROL_ENABLE_LSB) & GPIO_WAKEUP_CONTROL_ENABLE_MASK)
+
+
+#ifndef __ASSEMBLER__
+
+typedef struct rtc_reg_reg_s {
+ volatile unsigned int reset_control;
+ volatile unsigned int xtal_control;
+ volatile unsigned int tcxo_detect;
+ volatile unsigned int xtal_test;
+ volatile unsigned int quadrature;
+ volatile unsigned int pll_control;
+ volatile unsigned int pll_settle;
+ volatile unsigned int xtal_settle;
+ volatile unsigned int cpu_clock;
+ volatile unsigned int clock_out;
+ volatile unsigned int clock_control;
+ volatile unsigned int bias_override;
+ volatile unsigned int wdt_control;
+ volatile unsigned int wdt_status;
+ volatile unsigned int wdt;
+ volatile unsigned int wdt_count;
+ volatile unsigned int wdt_reset;
+ volatile unsigned int int_status;
+ volatile unsigned int lf_timer0;
+ volatile unsigned int lf_timer_count0;
+ volatile unsigned int lf_timer_control0;
+ volatile unsigned int lf_timer_status0;
+ volatile unsigned int lf_timer1;
+ volatile unsigned int lf_timer_count1;
+ volatile unsigned int lf_timer_control1;
+ volatile unsigned int lf_timer_status1;
+ volatile unsigned int lf_timer2;
+ volatile unsigned int lf_timer_count2;
+ volatile unsigned int lf_timer_control2;
+ volatile unsigned int lf_timer_status2;
+ volatile unsigned int lf_timer3;
+ volatile unsigned int lf_timer_count3;
+ volatile unsigned int lf_timer_control3;
+ volatile unsigned int lf_timer_status3;
+ volatile unsigned int hf_timer;
+ volatile unsigned int hf_timer_count;
+ volatile unsigned int hf_lf_count;
+ volatile unsigned int hf_timer_control;
+ volatile unsigned int hf_timer_status;
+ volatile unsigned int rtc_control;
+ volatile unsigned int rtc_time;
+ volatile unsigned int rtc_date;
+ volatile unsigned int rtc_set_time;
+ volatile unsigned int rtc_set_date;
+ volatile unsigned int rtc_set_alarm;
+ volatile unsigned int rtc_config;
+ volatile unsigned int rtc_alarm_status;
+ volatile unsigned int uart_wakeup;
+ volatile unsigned int reset_cause;
+ volatile unsigned int system_sleep;
+ volatile unsigned int sdio_wrapper;
+ volatile unsigned int mac_sleep_control;
+ volatile unsigned int keep_awake;
+ volatile unsigned int lpo_cal_time;
+ volatile unsigned int lpo_init_dividend_int;
+ volatile unsigned int lpo_init_dividend_fraction;
+ volatile unsigned int lpo_cal;
+ volatile unsigned int lpo_cal_test_control;
+ volatile unsigned int lpo_cal_test_status;
+ volatile unsigned int chip_id;
+ volatile unsigned int derived_rtc_clk;
+ volatile unsigned int mac_pcu_slp32_mode;
+ volatile unsigned int mac_pcu_slp32_wake;
+ volatile unsigned int mac_pcu_slp32_inc;
+ volatile unsigned int mac_pcu_slp_mib1;
+ volatile unsigned int mac_pcu_slp_mib2;
+ volatile unsigned int mac_pcu_slp_mib3;
+ volatile unsigned int mac_pcu_slp_beacon;
+ volatile unsigned int power_reg;
+ volatile unsigned int core_clk_ctrl;
+ unsigned char pad0[8]; /* pad to 0x120 */
+ volatile unsigned int sdio_setup_circuit[8];
+ volatile unsigned int sdio_setup_config;
+ volatile unsigned int cpu_setup_config;
+ unsigned char pad1[24]; /* pad to 0x160 */
+ volatile unsigned int cpu_setup_circuit[8];
+ volatile unsigned int bb_setup_config;
+ unsigned char pad2[28]; /* pad to 0x1a0 */
+ volatile unsigned int bb_setup_circuit[8];
+ volatile unsigned int gpio_wakeup_control;
+} rtc_reg_reg_t;
+
+#endif /* __ASSEMBLER__ */
+
+#endif /* _RTC_REG_H_ */
diff --git a/drivers/staging/ath6kl/include/common/AR6002/hw2.0/hw/si_reg.h b/drivers/staging/ath6kl/include/common/AR6002/hw2.0/hw/si_reg.h
new file mode 100644
index 000000000000..16fb99cfd0b8
--- /dev/null
+++ b/drivers/staging/ath6kl/include/common/AR6002/hw2.0/hw/si_reg.h
@@ -0,0 +1,186 @@
+#ifndef _SI_REG_REG_H_
+#define _SI_REG_REG_H_
+
+#define SI_CONFIG_ADDRESS 0x00000000
+#define SI_CONFIG_OFFSET 0x00000000
+#define SI_CONFIG_ERR_INT_MSB 19
+#define SI_CONFIG_ERR_INT_LSB 19
+#define SI_CONFIG_ERR_INT_MASK 0x00080000
+#define SI_CONFIG_ERR_INT_GET(x) (((x) & SI_CONFIG_ERR_INT_MASK) >> SI_CONFIG_ERR_INT_LSB)
+#define SI_CONFIG_ERR_INT_SET(x) (((x) << SI_CONFIG_ERR_INT_LSB) & SI_CONFIG_ERR_INT_MASK)
+#define SI_CONFIG_BIDIR_OD_DATA_MSB 18
+#define SI_CONFIG_BIDIR_OD_DATA_LSB 18
+#define SI_CONFIG_BIDIR_OD_DATA_MASK 0x00040000
+#define SI_CONFIG_BIDIR_OD_DATA_GET(x) (((x) & SI_CONFIG_BIDIR_OD_DATA_MASK) >> SI_CONFIG_BIDIR_OD_DATA_LSB)
+#define SI_CONFIG_BIDIR_OD_DATA_SET(x) (((x) << SI_CONFIG_BIDIR_OD_DATA_LSB) & SI_CONFIG_BIDIR_OD_DATA_MASK)
+#define SI_CONFIG_I2C_MSB 16
+#define SI_CONFIG_I2C_LSB 16
+#define SI_CONFIG_I2C_MASK 0x00010000
+#define SI_CONFIG_I2C_GET(x) (((x) & SI_CONFIG_I2C_MASK) >> SI_CONFIG_I2C_LSB)
+#define SI_CONFIG_I2C_SET(x) (((x) << SI_CONFIG_I2C_LSB) & SI_CONFIG_I2C_MASK)
+#define SI_CONFIG_POS_SAMPLE_MSB 7
+#define SI_CONFIG_POS_SAMPLE_LSB 7
+#define SI_CONFIG_POS_SAMPLE_MASK 0x00000080
+#define SI_CONFIG_POS_SAMPLE_GET(x) (((x) & SI_CONFIG_POS_SAMPLE_MASK) >> SI_CONFIG_POS_SAMPLE_LSB)
+#define SI_CONFIG_POS_SAMPLE_SET(x) (((x) << SI_CONFIG_POS_SAMPLE_LSB) & SI_CONFIG_POS_SAMPLE_MASK)
+#define SI_CONFIG_POS_DRIVE_MSB 6
+#define SI_CONFIG_POS_DRIVE_LSB 6
+#define SI_CONFIG_POS_DRIVE_MASK 0x00000040
+#define SI_CONFIG_POS_DRIVE_GET(x) (((x) & SI_CONFIG_POS_DRIVE_MASK) >> SI_CONFIG_POS_DRIVE_LSB)
+#define SI_CONFIG_POS_DRIVE_SET(x) (((x) << SI_CONFIG_POS_DRIVE_LSB) & SI_CONFIG_POS_DRIVE_MASK)
+#define SI_CONFIG_INACTIVE_DATA_MSB 5
+#define SI_CONFIG_INACTIVE_DATA_LSB 5
+#define SI_CONFIG_INACTIVE_DATA_MASK 0x00000020
+#define SI_CONFIG_INACTIVE_DATA_GET(x) (((x) & SI_CONFIG_INACTIVE_DATA_MASK) >> SI_CONFIG_INACTIVE_DATA_LSB)
+#define SI_CONFIG_INACTIVE_DATA_SET(x) (((x) << SI_CONFIG_INACTIVE_DATA_LSB) & SI_CONFIG_INACTIVE_DATA_MASK)
+#define SI_CONFIG_INACTIVE_CLK_MSB 4
+#define SI_CONFIG_INACTIVE_CLK_LSB 4
+#define SI_CONFIG_INACTIVE_CLK_MASK 0x00000010
+#define SI_CONFIG_INACTIVE_CLK_GET(x) (((x) & SI_CONFIG_INACTIVE_CLK_MASK) >> SI_CONFIG_INACTIVE_CLK_LSB)
+#define SI_CONFIG_INACTIVE_CLK_SET(x) (((x) << SI_CONFIG_INACTIVE_CLK_LSB) & SI_CONFIG_INACTIVE_CLK_MASK)
+#define SI_CONFIG_DIVIDER_MSB 3
+#define SI_CONFIG_DIVIDER_LSB 0
+#define SI_CONFIG_DIVIDER_MASK 0x0000000f
+#define SI_CONFIG_DIVIDER_GET(x) (((x) & SI_CONFIG_DIVIDER_MASK) >> SI_CONFIG_DIVIDER_LSB)
+#define SI_CONFIG_DIVIDER_SET(x) (((x) << SI_CONFIG_DIVIDER_LSB) & SI_CONFIG_DIVIDER_MASK)
+
+#define SI_CS_ADDRESS 0x00000004
+#define SI_CS_OFFSET 0x00000004
+#define SI_CS_BIT_CNT_IN_LAST_BYTE_MSB 13
+#define SI_CS_BIT_CNT_IN_LAST_BYTE_LSB 11
+#define SI_CS_BIT_CNT_IN_LAST_BYTE_MASK 0x00003800
+#define SI_CS_BIT_CNT_IN_LAST_BYTE_GET(x) (((x) & SI_CS_BIT_CNT_IN_LAST_BYTE_MASK) >> SI_CS_BIT_CNT_IN_LAST_BYTE_LSB)
+#define SI_CS_BIT_CNT_IN_LAST_BYTE_SET(x) (((x) << SI_CS_BIT_CNT_IN_LAST_BYTE_LSB) & SI_CS_BIT_CNT_IN_LAST_BYTE_MASK)
+#define SI_CS_DONE_ERR_MSB 10
+#define SI_CS_DONE_ERR_LSB 10
+#define SI_CS_DONE_ERR_MASK 0x00000400
+#define SI_CS_DONE_ERR_GET(x) (((x) & SI_CS_DONE_ERR_MASK) >> SI_CS_DONE_ERR_LSB)
+#define SI_CS_DONE_ERR_SET(x) (((x) << SI_CS_DONE_ERR_LSB) & SI_CS_DONE_ERR_MASK)
+#define SI_CS_DONE_INT_MSB 9
+#define SI_CS_DONE_INT_LSB 9
+#define SI_CS_DONE_INT_MASK 0x00000200
+#define SI_CS_DONE_INT_GET(x) (((x) & SI_CS_DONE_INT_MASK) >> SI_CS_DONE_INT_LSB)
+#define SI_CS_DONE_INT_SET(x) (((x) << SI_CS_DONE_INT_LSB) & SI_CS_DONE_INT_MASK)
+#define SI_CS_START_MSB 8
+#define SI_CS_START_LSB 8
+#define SI_CS_START_MASK 0x00000100
+#define SI_CS_START_GET(x) (((x) & SI_CS_START_MASK) >> SI_CS_START_LSB)
+#define SI_CS_START_SET(x) (((x) << SI_CS_START_LSB) & SI_CS_START_MASK)
+#define SI_CS_RX_CNT_MSB 7
+#define SI_CS_RX_CNT_LSB 4
+#define SI_CS_RX_CNT_MASK 0x000000f0
+#define SI_CS_RX_CNT_GET(x) (((x) & SI_CS_RX_CNT_MASK) >> SI_CS_RX_CNT_LSB)
+#define SI_CS_RX_CNT_SET(x) (((x) << SI_CS_RX_CNT_LSB) & SI_CS_RX_CNT_MASK)
+#define SI_CS_TX_CNT_MSB 3
+#define SI_CS_TX_CNT_LSB 0
+#define SI_CS_TX_CNT_MASK 0x0000000f
+#define SI_CS_TX_CNT_GET(x) (((x) & SI_CS_TX_CNT_MASK) >> SI_CS_TX_CNT_LSB)
+#define SI_CS_TX_CNT_SET(x) (((x) << SI_CS_TX_CNT_LSB) & SI_CS_TX_CNT_MASK)
+
+#define SI_TX_DATA0_ADDRESS 0x00000008
+#define SI_TX_DATA0_OFFSET 0x00000008
+#define SI_TX_DATA0_DATA3_MSB 31
+#define SI_TX_DATA0_DATA3_LSB 24
+#define SI_TX_DATA0_DATA3_MASK 0xff000000
+#define SI_TX_DATA0_DATA3_GET(x) (((x) & SI_TX_DATA0_DATA3_MASK) >> SI_TX_DATA0_DATA3_LSB)
+#define SI_TX_DATA0_DATA3_SET(x) (((x) << SI_TX_DATA0_DATA3_LSB) & SI_TX_DATA0_DATA3_MASK)
+#define SI_TX_DATA0_DATA2_MSB 23
+#define SI_TX_DATA0_DATA2_LSB 16
+#define SI_TX_DATA0_DATA2_MASK 0x00ff0000
+#define SI_TX_DATA0_DATA2_GET(x) (((x) & SI_TX_DATA0_DATA2_MASK) >> SI_TX_DATA0_DATA2_LSB)
+#define SI_TX_DATA0_DATA2_SET(x) (((x) << SI_TX_DATA0_DATA2_LSB) & SI_TX_DATA0_DATA2_MASK)
+#define SI_TX_DATA0_DATA1_MSB 15
+#define SI_TX_DATA0_DATA1_LSB 8
+#define SI_TX_DATA0_DATA1_MASK 0x0000ff00
+#define SI_TX_DATA0_DATA1_GET(x) (((x) & SI_TX_DATA0_DATA1_MASK) >> SI_TX_DATA0_DATA1_LSB)
+#define SI_TX_DATA0_DATA1_SET(x) (((x) << SI_TX_DATA0_DATA1_LSB) & SI_TX_DATA0_DATA1_MASK)
+#define SI_TX_DATA0_DATA0_MSB 7
+#define SI_TX_DATA0_DATA0_LSB 0
+#define SI_TX_DATA0_DATA0_MASK 0x000000ff
+#define SI_TX_DATA0_DATA0_GET(x) (((x) & SI_TX_DATA0_DATA0_MASK) >> SI_TX_DATA0_DATA0_LSB)
+#define SI_TX_DATA0_DATA0_SET(x) (((x) << SI_TX_DATA0_DATA0_LSB) & SI_TX_DATA0_DATA0_MASK)
+
+#define SI_TX_DATA1_ADDRESS 0x0000000c
+#define SI_TX_DATA1_OFFSET 0x0000000c
+#define SI_TX_DATA1_DATA7_MSB 31
+#define SI_TX_DATA1_DATA7_LSB 24
+#define SI_TX_DATA1_DATA7_MASK 0xff000000
+#define SI_TX_DATA1_DATA7_GET(x) (((x) & SI_TX_DATA1_DATA7_MASK) >> SI_TX_DATA1_DATA7_LSB)
+#define SI_TX_DATA1_DATA7_SET(x) (((x) << SI_TX_DATA1_DATA7_LSB) & SI_TX_DATA1_DATA7_MASK)
+#define SI_TX_DATA1_DATA6_MSB 23
+#define SI_TX_DATA1_DATA6_LSB 16
+#define SI_TX_DATA1_DATA6_MASK 0x00ff0000
+#define SI_TX_DATA1_DATA6_GET(x) (((x) & SI_TX_DATA1_DATA6_MASK) >> SI_TX_DATA1_DATA6_LSB)
+#define SI_TX_DATA1_DATA6_SET(x) (((x) << SI_TX_DATA1_DATA6_LSB) & SI_TX_DATA1_DATA6_MASK)
+#define SI_TX_DATA1_DATA5_MSB 15
+#define SI_TX_DATA1_DATA5_LSB 8
+#define SI_TX_DATA1_DATA5_MASK 0x0000ff00
+#define SI_TX_DATA1_DATA5_GET(x) (((x) & SI_TX_DATA1_DATA5_MASK) >> SI_TX_DATA1_DATA5_LSB)
+#define SI_TX_DATA1_DATA5_SET(x) (((x) << SI_TX_DATA1_DATA5_LSB) & SI_TX_DATA1_DATA5_MASK)
+#define SI_TX_DATA1_DATA4_MSB 7
+#define SI_TX_DATA1_DATA4_LSB 0
+#define SI_TX_DATA1_DATA4_MASK 0x000000ff
+#define SI_TX_DATA1_DATA4_GET(x) (((x) & SI_TX_DATA1_DATA4_MASK) >> SI_TX_DATA1_DATA4_LSB)
+#define SI_TX_DATA1_DATA4_SET(x) (((x) << SI_TX_DATA1_DATA4_LSB) & SI_TX_DATA1_DATA4_MASK)
+
+#define SI_RX_DATA0_ADDRESS 0x00000010
+#define SI_RX_DATA0_OFFSET 0x00000010
+#define SI_RX_DATA0_DATA3_MSB 31
+#define SI_RX_DATA0_DATA3_LSB 24
+#define SI_RX_DATA0_DATA3_MASK 0xff000000
+#define SI_RX_DATA0_DATA3_GET(x) (((x) & SI_RX_DATA0_DATA3_MASK) >> SI_RX_DATA0_DATA3_LSB)
+#define SI_RX_DATA0_DATA3_SET(x) (((x) << SI_RX_DATA0_DATA3_LSB) & SI_RX_DATA0_DATA3_MASK)
+#define SI_RX_DATA0_DATA2_MSB 23
+#define SI_RX_DATA0_DATA2_LSB 16
+#define SI_RX_DATA0_DATA2_MASK 0x00ff0000
+#define SI_RX_DATA0_DATA2_GET(x) (((x) & SI_RX_DATA0_DATA2_MASK) >> SI_RX_DATA0_DATA2_LSB)
+#define SI_RX_DATA0_DATA2_SET(x) (((x) << SI_RX_DATA0_DATA2_LSB) & SI_RX_DATA0_DATA2_MASK)
+#define SI_RX_DATA0_DATA1_MSB 15
+#define SI_RX_DATA0_DATA1_LSB 8
+#define SI_RX_DATA0_DATA1_MASK 0x0000ff00
+#define SI_RX_DATA0_DATA1_GET(x) (((x) & SI_RX_DATA0_DATA1_MASK) >> SI_RX_DATA0_DATA1_LSB)
+#define SI_RX_DATA0_DATA1_SET(x) (((x) << SI_RX_DATA0_DATA1_LSB) & SI_RX_DATA0_DATA1_MASK)
+#define SI_RX_DATA0_DATA0_MSB 7
+#define SI_RX_DATA0_DATA0_LSB 0
+#define SI_RX_DATA0_DATA0_MASK 0x000000ff
+#define SI_RX_DATA0_DATA0_GET(x) (((x) & SI_RX_DATA0_DATA0_MASK) >> SI_RX_DATA0_DATA0_LSB)
+#define SI_RX_DATA0_DATA0_SET(x) (((x) << SI_RX_DATA0_DATA0_LSB) & SI_RX_DATA0_DATA0_MASK)
+
+#define SI_RX_DATA1_ADDRESS 0x00000014
+#define SI_RX_DATA1_OFFSET 0x00000014
+#define SI_RX_DATA1_DATA7_MSB 31
+#define SI_RX_DATA1_DATA7_LSB 24
+#define SI_RX_DATA1_DATA7_MASK 0xff000000
+#define SI_RX_DATA1_DATA7_GET(x) (((x) & SI_RX_DATA1_DATA7_MASK) >> SI_RX_DATA1_DATA7_LSB)
+#define SI_RX_DATA1_DATA7_SET(x) (((x) << SI_RX_DATA1_DATA7_LSB) & SI_RX_DATA1_DATA7_MASK)
+#define SI_RX_DATA1_DATA6_MSB 23
+#define SI_RX_DATA1_DATA6_LSB 16
+#define SI_RX_DATA1_DATA6_MASK 0x00ff0000
+#define SI_RX_DATA1_DATA6_GET(x) (((x) & SI_RX_DATA1_DATA6_MASK) >> SI_RX_DATA1_DATA6_LSB)
+#define SI_RX_DATA1_DATA6_SET(x) (((x) << SI_RX_DATA1_DATA6_LSB) & SI_RX_DATA1_DATA6_MASK)
+#define SI_RX_DATA1_DATA5_MSB 15
+#define SI_RX_DATA1_DATA5_LSB 8
+#define SI_RX_DATA1_DATA5_MASK 0x0000ff00
+#define SI_RX_DATA1_DATA5_GET(x) (((x) & SI_RX_DATA1_DATA5_MASK) >> SI_RX_DATA1_DATA5_LSB)
+#define SI_RX_DATA1_DATA5_SET(x) (((x) << SI_RX_DATA1_DATA5_LSB) & SI_RX_DATA1_DATA5_MASK)
+#define SI_RX_DATA1_DATA4_MSB 7
+#define SI_RX_DATA1_DATA4_LSB 0
+#define SI_RX_DATA1_DATA4_MASK 0x000000ff
+#define SI_RX_DATA1_DATA4_GET(x) (((x) & SI_RX_DATA1_DATA4_MASK) >> SI_RX_DATA1_DATA4_LSB)
+#define SI_RX_DATA1_DATA4_SET(x) (((x) << SI_RX_DATA1_DATA4_LSB) & SI_RX_DATA1_DATA4_MASK)
+
+
+#ifndef __ASSEMBLER__
+
+typedef struct si_reg_reg_s {
+ volatile unsigned int si_config;
+ volatile unsigned int si_cs;
+ volatile unsigned int si_tx_data0;
+ volatile unsigned int si_tx_data1;
+ volatile unsigned int si_rx_data0;
+ volatile unsigned int si_rx_data1;
+} si_reg_reg_t;
+
+#endif /* __ASSEMBLER__ */
+
+#endif /* _SI_REG_H_ */
diff --git a/drivers/staging/ath6kl/include/common/AR6002/hw2.0/hw/uart_reg.h b/drivers/staging/ath6kl/include/common/AR6002/hw2.0/hw/uart_reg.h
new file mode 100644
index 000000000000..5db321b72b2c
--- /dev/null
+++ b/drivers/staging/ath6kl/include/common/AR6002/hw2.0/hw/uart_reg.h
@@ -0,0 +1,327 @@
+#ifndef _UART_REG_REG_H_
+#define _UART_REG_REG_H_
+
+#define RBR_ADDRESS 0x00000000
+#define RBR_OFFSET 0x00000000
+#define RBR_RBR_MSB 7
+#define RBR_RBR_LSB 0
+#define RBR_RBR_MASK 0x000000ff
+#define RBR_RBR_GET(x) (((x) & RBR_RBR_MASK) >> RBR_RBR_LSB)
+#define RBR_RBR_SET(x) (((x) << RBR_RBR_LSB) & RBR_RBR_MASK)
+
+#define THR_ADDRESS 0x00000000
+#define THR_OFFSET 0x00000000
+#define THR_THR_MSB 7
+#define THR_THR_LSB 0
+#define THR_THR_MASK 0x000000ff
+#define THR_THR_GET(x) (((x) & THR_THR_MASK) >> THR_THR_LSB)
+#define THR_THR_SET(x) (((x) << THR_THR_LSB) & THR_THR_MASK)
+
+#define DLL_ADDRESS 0x00000000
+#define DLL_OFFSET 0x00000000
+#define DLL_DLL_MSB 7
+#define DLL_DLL_LSB 0
+#define DLL_DLL_MASK 0x000000ff
+#define DLL_DLL_GET(x) (((x) & DLL_DLL_MASK) >> DLL_DLL_LSB)
+#define DLL_DLL_SET(x) (((x) << DLL_DLL_LSB) & DLL_DLL_MASK)
+
+#define DLH_ADDRESS 0x00000004
+#define DLH_OFFSET 0x00000004
+#define DLH_DLH_MSB 7
+#define DLH_DLH_LSB 0
+#define DLH_DLH_MASK 0x000000ff
+#define DLH_DLH_GET(x) (((x) & DLH_DLH_MASK) >> DLH_DLH_LSB)
+#define DLH_DLH_SET(x) (((x) << DLH_DLH_LSB) & DLH_DLH_MASK)
+
+#define IER_ADDRESS 0x00000004
+#define IER_OFFSET 0x00000004
+#define IER_EDDSI_MSB 3
+#define IER_EDDSI_LSB 3
+#define IER_EDDSI_MASK 0x00000008
+#define IER_EDDSI_GET(x) (((x) & IER_EDDSI_MASK) >> IER_EDDSI_LSB)
+#define IER_EDDSI_SET(x) (((x) << IER_EDDSI_LSB) & IER_EDDSI_MASK)
+#define IER_ELSI_MSB 2
+#define IER_ELSI_LSB 2
+#define IER_ELSI_MASK 0x00000004
+#define IER_ELSI_GET(x) (((x) & IER_ELSI_MASK) >> IER_ELSI_LSB)
+#define IER_ELSI_SET(x) (((x) << IER_ELSI_LSB) & IER_ELSI_MASK)
+#define IER_ETBEI_MSB 1
+#define IER_ETBEI_LSB 1
+#define IER_ETBEI_MASK 0x00000002
+#define IER_ETBEI_GET(x) (((x) & IER_ETBEI_MASK) >> IER_ETBEI_LSB)
+#define IER_ETBEI_SET(x) (((x) << IER_ETBEI_LSB) & IER_ETBEI_MASK)
+#define IER_ERBFI_MSB 0
+#define IER_ERBFI_LSB 0
+#define IER_ERBFI_MASK 0x00000001
+#define IER_ERBFI_GET(x) (((x) & IER_ERBFI_MASK) >> IER_ERBFI_LSB)
+#define IER_ERBFI_SET(x) (((x) << IER_ERBFI_LSB) & IER_ERBFI_MASK)
+
+#define IIR_ADDRESS 0x00000008
+#define IIR_OFFSET 0x00000008
+#define IIR_FIFO_STATUS_MSB 7
+#define IIR_FIFO_STATUS_LSB 6
+#define IIR_FIFO_STATUS_MASK 0x000000c0
+#define IIR_FIFO_STATUS_GET(x) (((x) & IIR_FIFO_STATUS_MASK) >> IIR_FIFO_STATUS_LSB)
+#define IIR_FIFO_STATUS_SET(x) (((x) << IIR_FIFO_STATUS_LSB) & IIR_FIFO_STATUS_MASK)
+#define IIR_IID_MSB 3
+#define IIR_IID_LSB 0
+#define IIR_IID_MASK 0x0000000f
+#define IIR_IID_GET(x) (((x) & IIR_IID_MASK) >> IIR_IID_LSB)
+#define IIR_IID_SET(x) (((x) << IIR_IID_LSB) & IIR_IID_MASK)
+
+#define FCR_ADDRESS 0x00000008
+#define FCR_OFFSET 0x00000008
+#define FCR_RCVR_TRIG_MSB 7
+#define FCR_RCVR_TRIG_LSB 6
+#define FCR_RCVR_TRIG_MASK 0x000000c0
+#define FCR_RCVR_TRIG_GET(x) (((x) & FCR_RCVR_TRIG_MASK) >> FCR_RCVR_TRIG_LSB)
+#define FCR_RCVR_TRIG_SET(x) (((x) << FCR_RCVR_TRIG_LSB) & FCR_RCVR_TRIG_MASK)
+#define FCR_DMA_MODE_MSB 3
+#define FCR_DMA_MODE_LSB 3
+#define FCR_DMA_MODE_MASK 0x00000008
+#define FCR_DMA_MODE_GET(x) (((x) & FCR_DMA_MODE_MASK) >> FCR_DMA_MODE_LSB)
+#define FCR_DMA_MODE_SET(x) (((x) << FCR_DMA_MODE_LSB) & FCR_DMA_MODE_MASK)
+#define FCR_XMIT_FIFO_RST_MSB 2
+#define FCR_XMIT_FIFO_RST_LSB 2
+#define FCR_XMIT_FIFO_RST_MASK 0x00000004
+#define FCR_XMIT_FIFO_RST_GET(x) (((x) & FCR_XMIT_FIFO_RST_MASK) >> FCR_XMIT_FIFO_RST_LSB)
+#define FCR_XMIT_FIFO_RST_SET(x) (((x) << FCR_XMIT_FIFO_RST_LSB) & FCR_XMIT_FIFO_RST_MASK)
+#define FCR_RCVR_FIFO_RST_MSB 1
+#define FCR_RCVR_FIFO_RST_LSB 1
+#define FCR_RCVR_FIFO_RST_MASK 0x00000002
+#define FCR_RCVR_FIFO_RST_GET(x) (((x) & FCR_RCVR_FIFO_RST_MASK) >> FCR_RCVR_FIFO_RST_LSB)
+#define FCR_RCVR_FIFO_RST_SET(x) (((x) << FCR_RCVR_FIFO_RST_LSB) & FCR_RCVR_FIFO_RST_MASK)
+#define FCR_FIFO_EN_MSB 0
+#define FCR_FIFO_EN_LSB 0
+#define FCR_FIFO_EN_MASK 0x00000001
+#define FCR_FIFO_EN_GET(x) (((x) & FCR_FIFO_EN_MASK) >> FCR_FIFO_EN_LSB)
+#define FCR_FIFO_EN_SET(x) (((x) << FCR_FIFO_EN_LSB) & FCR_FIFO_EN_MASK)
+
+#define LCR_ADDRESS 0x0000000c
+#define LCR_OFFSET 0x0000000c
+#define LCR_DLAB_MSB 7
+#define LCR_DLAB_LSB 7
+#define LCR_DLAB_MASK 0x00000080
+#define LCR_DLAB_GET(x) (((x) & LCR_DLAB_MASK) >> LCR_DLAB_LSB)
+#define LCR_DLAB_SET(x) (((x) << LCR_DLAB_LSB) & LCR_DLAB_MASK)
+#define LCR_BREAK_MSB 6
+#define LCR_BREAK_LSB 6
+#define LCR_BREAK_MASK 0x00000040
+#define LCR_BREAK_GET(x) (((x) & LCR_BREAK_MASK) >> LCR_BREAK_LSB)
+#define LCR_BREAK_SET(x) (((x) << LCR_BREAK_LSB) & LCR_BREAK_MASK)
+#define LCR_EPS_MSB 4
+#define LCR_EPS_LSB 4
+#define LCR_EPS_MASK 0x00000010
+#define LCR_EPS_GET(x) (((x) & LCR_EPS_MASK) >> LCR_EPS_LSB)
+#define LCR_EPS_SET(x) (((x) << LCR_EPS_LSB) & LCR_EPS_MASK)
+#define LCR_PEN_MSB 3
+#define LCR_PEN_LSB 3
+#define LCR_PEN_MASK 0x00000008
+#define LCR_PEN_GET(x) (((x) & LCR_PEN_MASK) >> LCR_PEN_LSB)
+#define LCR_PEN_SET(x) (((x) << LCR_PEN_LSB) & LCR_PEN_MASK)
+#define LCR_STOP_MSB 2
+#define LCR_STOP_LSB 2
+#define LCR_STOP_MASK 0x00000004
+#define LCR_STOP_GET(x) (((x) & LCR_STOP_MASK) >> LCR_STOP_LSB)
+#define LCR_STOP_SET(x) (((x) << LCR_STOP_LSB) & LCR_STOP_MASK)
+#define LCR_CLS_MSB 1
+#define LCR_CLS_LSB 0
+#define LCR_CLS_MASK 0x00000003
+#define LCR_CLS_GET(x) (((x) & LCR_CLS_MASK) >> LCR_CLS_LSB)
+#define LCR_CLS_SET(x) (((x) << LCR_CLS_LSB) & LCR_CLS_MASK)
+
+#define MCR_ADDRESS 0x00000010
+#define MCR_OFFSET 0x00000010
+#define MCR_LOOPBACK_MSB 5
+#define MCR_LOOPBACK_LSB 5
+#define MCR_LOOPBACK_MASK 0x00000020
+#define MCR_LOOPBACK_GET(x) (((x) & MCR_LOOPBACK_MASK) >> MCR_LOOPBACK_LSB)
+#define MCR_LOOPBACK_SET(x) (((x) << MCR_LOOPBACK_LSB) & MCR_LOOPBACK_MASK)
+#define MCR_OUT2_MSB 3
+#define MCR_OUT2_LSB 3
+#define MCR_OUT2_MASK 0x00000008
+#define MCR_OUT2_GET(x) (((x) & MCR_OUT2_MASK) >> MCR_OUT2_LSB)
+#define MCR_OUT2_SET(x) (((x) << MCR_OUT2_LSB) & MCR_OUT2_MASK)
+#define MCR_OUT1_MSB 2
+#define MCR_OUT1_LSB 2
+#define MCR_OUT1_MASK 0x00000004
+#define MCR_OUT1_GET(x) (((x) & MCR_OUT1_MASK) >> MCR_OUT1_LSB)
+#define MCR_OUT1_SET(x) (((x) << MCR_OUT1_LSB) & MCR_OUT1_MASK)
+#define MCR_RTS_MSB 1
+#define MCR_RTS_LSB 1
+#define MCR_RTS_MASK 0x00000002
+#define MCR_RTS_GET(x) (((x) & MCR_RTS_MASK) >> MCR_RTS_LSB)
+#define MCR_RTS_SET(x) (((x) << MCR_RTS_LSB) & MCR_RTS_MASK)
+#define MCR_DTR_MSB 0
+#define MCR_DTR_LSB 0
+#define MCR_DTR_MASK 0x00000001
+#define MCR_DTR_GET(x) (((x) & MCR_DTR_MASK) >> MCR_DTR_LSB)
+#define MCR_DTR_SET(x) (((x) << MCR_DTR_LSB) & MCR_DTR_MASK)
+
+#define LSR_ADDRESS 0x00000014
+#define LSR_OFFSET 0x00000014
+#define LSR_FERR_MSB 7
+#define LSR_FERR_LSB 7
+#define LSR_FERR_MASK 0x00000080
+#define LSR_FERR_GET(x) (((x) & LSR_FERR_MASK) >> LSR_FERR_LSB)
+#define LSR_FERR_SET(x) (((x) << LSR_FERR_LSB) & LSR_FERR_MASK)
+#define LSR_TEMT_MSB 6
+#define LSR_TEMT_LSB 6
+#define LSR_TEMT_MASK 0x00000040
+#define LSR_TEMT_GET(x) (((x) & LSR_TEMT_MASK) >> LSR_TEMT_LSB)
+#define LSR_TEMT_SET(x) (((x) << LSR_TEMT_LSB) & LSR_TEMT_MASK)
+#define LSR_THRE_MSB 5
+#define LSR_THRE_LSB 5
+#define LSR_THRE_MASK 0x00000020
+#define LSR_THRE_GET(x) (((x) & LSR_THRE_MASK) >> LSR_THRE_LSB)
+#define LSR_THRE_SET(x) (((x) << LSR_THRE_LSB) & LSR_THRE_MASK)
+#define LSR_BI_MSB 4
+#define LSR_BI_LSB 4
+#define LSR_BI_MASK 0x00000010
+#define LSR_BI_GET(x) (((x) & LSR_BI_MASK) >> LSR_BI_LSB)
+#define LSR_BI_SET(x) (((x) << LSR_BI_LSB) & LSR_BI_MASK)
+#define LSR_FE_MSB 3
+#define LSR_FE_LSB 3
+#define LSR_FE_MASK 0x00000008
+#define LSR_FE_GET(x) (((x) & LSR_FE_MASK) >> LSR_FE_LSB)
+#define LSR_FE_SET(x) (((x) << LSR_FE_LSB) & LSR_FE_MASK)
+#define LSR_PE_MSB 2
+#define LSR_PE_LSB 2
+#define LSR_PE_MASK 0x00000004
+#define LSR_PE_GET(x) (((x) & LSR_PE_MASK) >> LSR_PE_LSB)
+#define LSR_PE_SET(x) (((x) << LSR_PE_LSB) & LSR_PE_MASK)
+#define LSR_OE_MSB 1
+#define LSR_OE_LSB 1
+#define LSR_OE_MASK 0x00000002
+#define LSR_OE_GET(x) (((x) & LSR_OE_MASK) >> LSR_OE_LSB)
+#define LSR_OE_SET(x) (((x) << LSR_OE_LSB) & LSR_OE_MASK)
+#define LSR_DR_MSB 0
+#define LSR_DR_LSB 0
+#define LSR_DR_MASK 0x00000001
+#define LSR_DR_GET(x) (((x) & LSR_DR_MASK) >> LSR_DR_LSB)
+#define LSR_DR_SET(x) (((x) << LSR_DR_LSB) & LSR_DR_MASK)
+
+#define MSR_ADDRESS 0x00000018
+#define MSR_OFFSET 0x00000018
+#define MSR_DCD_MSB 7
+#define MSR_DCD_LSB 7
+#define MSR_DCD_MASK 0x00000080
+#define MSR_DCD_GET(x) (((x) & MSR_DCD_MASK) >> MSR_DCD_LSB)
+#define MSR_DCD_SET(x) (((x) << MSR_DCD_LSB) & MSR_DCD_MASK)
+#define MSR_RI_MSB 6
+#define MSR_RI_LSB 6
+#define MSR_RI_MASK 0x00000040
+#define MSR_RI_GET(x) (((x) & MSR_RI_MASK) >> MSR_RI_LSB)
+#define MSR_RI_SET(x) (((x) << MSR_RI_LSB) & MSR_RI_MASK)
+#define MSR_DSR_MSB 5
+#define MSR_DSR_LSB 5
+#define MSR_DSR_MASK 0x00000020
+#define MSR_DSR_GET(x) (((x) & MSR_DSR_MASK) >> MSR_DSR_LSB)
+#define MSR_DSR_SET(x) (((x) << MSR_DSR_LSB) & MSR_DSR_MASK)
+#define MSR_CTS_MSB 4
+#define MSR_CTS_LSB 4
+#define MSR_CTS_MASK 0x00000010
+#define MSR_CTS_GET(x) (((x) & MSR_CTS_MASK) >> MSR_CTS_LSB)
+#define MSR_CTS_SET(x) (((x) << MSR_CTS_LSB) & MSR_CTS_MASK)
+#define MSR_DDCD_MSB 3
+#define MSR_DDCD_LSB 3
+#define MSR_DDCD_MASK 0x00000008
+#define MSR_DDCD_GET(x) (((x) & MSR_DDCD_MASK) >> MSR_DDCD_LSB)
+#define MSR_DDCD_SET(x) (((x) << MSR_DDCD_LSB) & MSR_DDCD_MASK)
+#define MSR_TERI_MSB 2
+#define MSR_TERI_LSB 2
+#define MSR_TERI_MASK 0x00000004
+#define MSR_TERI_GET(x) (((x) & MSR_TERI_MASK) >> MSR_TERI_LSB)
+#define MSR_TERI_SET(x) (((x) << MSR_TERI_LSB) & MSR_TERI_MASK)
+#define MSR_DDSR_MSB 1
+#define MSR_DDSR_LSB 1
+#define MSR_DDSR_MASK 0x00000002
+#define MSR_DDSR_GET(x) (((x) & MSR_DDSR_MASK) >> MSR_DDSR_LSB)
+#define MSR_DDSR_SET(x) (((x) << MSR_DDSR_LSB) & MSR_DDSR_MASK)
+#define MSR_DCTS_MSB 0
+#define MSR_DCTS_LSB 0
+#define MSR_DCTS_MASK 0x00000001
+#define MSR_DCTS_GET(x) (((x) & MSR_DCTS_MASK) >> MSR_DCTS_LSB)
+#define MSR_DCTS_SET(x) (((x) << MSR_DCTS_LSB) & MSR_DCTS_MASK)
+
+#define SCR_ADDRESS 0x0000001c
+#define SCR_OFFSET 0x0000001c
+#define SCR_SCR_MSB 7
+#define SCR_SCR_LSB 0
+#define SCR_SCR_MASK 0x000000ff
+#define SCR_SCR_GET(x) (((x) & SCR_SCR_MASK) >> SCR_SCR_LSB)
+#define SCR_SCR_SET(x) (((x) << SCR_SCR_LSB) & SCR_SCR_MASK)
+
+#define SRBR_ADDRESS 0x00000020
+#define SRBR_OFFSET 0x00000020
+#define SRBR_SRBR_MSB 7
+#define SRBR_SRBR_LSB 0
+#define SRBR_SRBR_MASK 0x000000ff
+#define SRBR_SRBR_GET(x) (((x) & SRBR_SRBR_MASK) >> SRBR_SRBR_LSB)
+#define SRBR_SRBR_SET(x) (((x) << SRBR_SRBR_LSB) & SRBR_SRBR_MASK)
+
+#define SIIR_ADDRESS 0x00000028
+#define SIIR_OFFSET 0x00000028
+#define SIIR_SIIR_MSB 7
+#define SIIR_SIIR_LSB 0
+#define SIIR_SIIR_MASK 0x000000ff
+#define SIIR_SIIR_GET(x) (((x) & SIIR_SIIR_MASK) >> SIIR_SIIR_LSB)
+#define SIIR_SIIR_SET(x) (((x) << SIIR_SIIR_LSB) & SIIR_SIIR_MASK)
+
+#define MWR_ADDRESS 0x0000002c
+#define MWR_OFFSET 0x0000002c
+#define MWR_MWR_MSB 31
+#define MWR_MWR_LSB 0
+#define MWR_MWR_MASK 0xffffffff
+#define MWR_MWR_GET(x) (((x) & MWR_MWR_MASK) >> MWR_MWR_LSB)
+#define MWR_MWR_SET(x) (((x) << MWR_MWR_LSB) & MWR_MWR_MASK)
+
+#define SLSR_ADDRESS 0x00000034
+#define SLSR_OFFSET 0x00000034
+#define SLSR_SLSR_MSB 7
+#define SLSR_SLSR_LSB 0
+#define SLSR_SLSR_MASK 0x000000ff
+#define SLSR_SLSR_GET(x) (((x) & SLSR_SLSR_MASK) >> SLSR_SLSR_LSB)
+#define SLSR_SLSR_SET(x) (((x) << SLSR_SLSR_LSB) & SLSR_SLSR_MASK)
+
+#define SMSR_ADDRESS 0x00000038
+#define SMSR_OFFSET 0x00000038
+#define SMSR_SMSR_MSB 7
+#define SMSR_SMSR_LSB 0
+#define SMSR_SMSR_MASK 0x000000ff
+#define SMSR_SMSR_GET(x) (((x) & SMSR_SMSR_MASK) >> SMSR_SMSR_LSB)
+#define SMSR_SMSR_SET(x) (((x) << SMSR_SMSR_LSB) & SMSR_SMSR_MASK)
+
+#define MRR_ADDRESS 0x0000003c
+#define MRR_OFFSET 0x0000003c
+#define MRR_MRR_MSB 31
+#define MRR_MRR_LSB 0
+#define MRR_MRR_MASK 0xffffffff
+#define MRR_MRR_GET(x) (((x) & MRR_MRR_MASK) >> MRR_MRR_LSB)
+#define MRR_MRR_SET(x) (((x) << MRR_MRR_LSB) & MRR_MRR_MASK)
+
+
+#ifndef __ASSEMBLER__
+
+typedef struct uart_reg_reg_s {
+ volatile unsigned int rbr;
+ volatile unsigned int dlh;
+ volatile unsigned int iir;
+ volatile unsigned int lcr;
+ volatile unsigned int mcr;
+ volatile unsigned int lsr;
+ volatile unsigned int msr;
+ volatile unsigned int scr;
+ volatile unsigned int srbr;
+ unsigned char pad0[4]; /* pad to 0x28 */
+ volatile unsigned int siir;
+ volatile unsigned int mwr;
+ unsigned char pad1[4]; /* pad to 0x34 */
+ volatile unsigned int slsr;
+ volatile unsigned int smsr;
+ volatile unsigned int mrr;
+} uart_reg_reg_t;
+
+#endif /* __ASSEMBLER__ */
+
+#endif /* _UART_REG_H_ */
diff --git a/drivers/staging/ath6kl/include/common/AR6002/hw2.0/hw/vmc_reg.h b/drivers/staging/ath6kl/include/common/AR6002/hw2.0/hw/vmc_reg.h
new file mode 100644
index 000000000000..932ec510d26b
--- /dev/null
+++ b/drivers/staging/ath6kl/include/common/AR6002/hw2.0/hw/vmc_reg.h
@@ -0,0 +1,76 @@
+#ifndef _VMC_REG_REG_H_
+#define _VMC_REG_REG_H_
+
+#define MC_TCAM_VALID_ADDRESS 0x00000000
+#define MC_TCAM_VALID_OFFSET 0x00000000
+#define MC_TCAM_VALID_BIT_MSB 0
+#define MC_TCAM_VALID_BIT_LSB 0
+#define MC_TCAM_VALID_BIT_MASK 0x00000001
+#define MC_TCAM_VALID_BIT_GET(x) (((x) & MC_TCAM_VALID_BIT_MASK) >> MC_TCAM_VALID_BIT_LSB)
+#define MC_TCAM_VALID_BIT_SET(x) (((x) << MC_TCAM_VALID_BIT_LSB) & MC_TCAM_VALID_BIT_MASK)
+
+#define MC_TCAM_MASK_ADDRESS 0x00000080
+#define MC_TCAM_MASK_OFFSET 0x00000080
+#define MC_TCAM_MASK_SIZE_MSB 2
+#define MC_TCAM_MASK_SIZE_LSB 0
+#define MC_TCAM_MASK_SIZE_MASK 0x00000007
+#define MC_TCAM_MASK_SIZE_GET(x) (((x) & MC_TCAM_MASK_SIZE_MASK) >> MC_TCAM_MASK_SIZE_LSB)
+#define MC_TCAM_MASK_SIZE_SET(x) (((x) << MC_TCAM_MASK_SIZE_LSB) & MC_TCAM_MASK_SIZE_MASK)
+
+#define MC_TCAM_COMPARE_ADDRESS 0x00000100
+#define MC_TCAM_COMPARE_OFFSET 0x00000100
+#define MC_TCAM_COMPARE_KEY_MSB 21
+#define MC_TCAM_COMPARE_KEY_LSB 5
+#define MC_TCAM_COMPARE_KEY_MASK 0x003fffe0
+#define MC_TCAM_COMPARE_KEY_GET(x) (((x) & MC_TCAM_COMPARE_KEY_MASK) >> MC_TCAM_COMPARE_KEY_LSB)
+#define MC_TCAM_COMPARE_KEY_SET(x) (((x) << MC_TCAM_COMPARE_KEY_LSB) & MC_TCAM_COMPARE_KEY_MASK)
+
+#define MC_TCAM_TARGET_ADDRESS 0x00000180
+#define MC_TCAM_TARGET_OFFSET 0x00000180
+#define MC_TCAM_TARGET_ADDR_MSB 21
+#define MC_TCAM_TARGET_ADDR_LSB 5
+#define MC_TCAM_TARGET_ADDR_MASK 0x003fffe0
+#define MC_TCAM_TARGET_ADDR_GET(x) (((x) & MC_TCAM_TARGET_ADDR_MASK) >> MC_TCAM_TARGET_ADDR_LSB)
+#define MC_TCAM_TARGET_ADDR_SET(x) (((x) << MC_TCAM_TARGET_ADDR_LSB) & MC_TCAM_TARGET_ADDR_MASK)
+
+#define ADDR_ERROR_CONTROL_ADDRESS 0x00000200
+#define ADDR_ERROR_CONTROL_OFFSET 0x00000200
+#define ADDR_ERROR_CONTROL_QUAL_ENABLE_MSB 1
+#define ADDR_ERROR_CONTROL_QUAL_ENABLE_LSB 1
+#define ADDR_ERROR_CONTROL_QUAL_ENABLE_MASK 0x00000002
+#define ADDR_ERROR_CONTROL_QUAL_ENABLE_GET(x) (((x) & ADDR_ERROR_CONTROL_QUAL_ENABLE_MASK) >> ADDR_ERROR_CONTROL_QUAL_ENABLE_LSB)
+#define ADDR_ERROR_CONTROL_QUAL_ENABLE_SET(x) (((x) << ADDR_ERROR_CONTROL_QUAL_ENABLE_LSB) & ADDR_ERROR_CONTROL_QUAL_ENABLE_MASK)
+#define ADDR_ERROR_CONTROL_ENABLE_MSB 0
+#define ADDR_ERROR_CONTROL_ENABLE_LSB 0
+#define ADDR_ERROR_CONTROL_ENABLE_MASK 0x00000001
+#define ADDR_ERROR_CONTROL_ENABLE_GET(x) (((x) & ADDR_ERROR_CONTROL_ENABLE_MASK) >> ADDR_ERROR_CONTROL_ENABLE_LSB)
+#define ADDR_ERROR_CONTROL_ENABLE_SET(x) (((x) << ADDR_ERROR_CONTROL_ENABLE_LSB) & ADDR_ERROR_CONTROL_ENABLE_MASK)
+
+#define ADDR_ERROR_STATUS_ADDRESS 0x00000204
+#define ADDR_ERROR_STATUS_OFFSET 0x00000204
+#define ADDR_ERROR_STATUS_WRITE_MSB 25
+#define ADDR_ERROR_STATUS_WRITE_LSB 25
+#define ADDR_ERROR_STATUS_WRITE_MASK 0x02000000
+#define ADDR_ERROR_STATUS_WRITE_GET(x) (((x) & ADDR_ERROR_STATUS_WRITE_MASK) >> ADDR_ERROR_STATUS_WRITE_LSB)
+#define ADDR_ERROR_STATUS_WRITE_SET(x) (((x) << ADDR_ERROR_STATUS_WRITE_LSB) & ADDR_ERROR_STATUS_WRITE_MASK)
+#define ADDR_ERROR_STATUS_ADDRESS_MSB 24
+#define ADDR_ERROR_STATUS_ADDRESS_LSB 0
+#define ADDR_ERROR_STATUS_ADDRESS_MASK 0x01ffffff
+#define ADDR_ERROR_STATUS_ADDRESS_GET(x) (((x) & ADDR_ERROR_STATUS_ADDRESS_MASK) >> ADDR_ERROR_STATUS_ADDRESS_LSB)
+#define ADDR_ERROR_STATUS_ADDRESS_SET(x) (((x) << ADDR_ERROR_STATUS_ADDRESS_LSB) & ADDR_ERROR_STATUS_ADDRESS_MASK)
+
+
+#ifndef __ASSEMBLER__
+
+typedef struct vmc_reg_reg_s {
+ volatile unsigned int mc_tcam_valid[32];
+ volatile unsigned int mc_tcam_mask[32];
+ volatile unsigned int mc_tcam_compare[32];
+ volatile unsigned int mc_tcam_target[32];
+ volatile unsigned int addr_error_control;
+ volatile unsigned int addr_error_status;
+} vmc_reg_reg_t;
+
+#endif /* __ASSEMBLER__ */
+
+#endif /* _VMC_REG_H_ */
diff --git a/drivers/staging/ath6kl/include/common/AR6002/hw4.0/hw/analog_intf_ares_reg.h b/drivers/staging/ath6kl/include/common/AR6002/hw4.0/hw/analog_intf_ares_reg.h
new file mode 100644
index 000000000000..5970fa94d4d2
--- /dev/null
+++ b/drivers/staging/ath6kl/include/common/AR6002/hw4.0/hw/analog_intf_ares_reg.h
@@ -0,0 +1,3291 @@
+// ------------------------------------------------------------------
+// Copyright (c) 2004-2010 Atheros Corporation. All rights reserved.
+//
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+//
+//
+// ------------------------------------------------------------------
+//===================================================================
+// Author(s): ="Atheros"
+//===================================================================
+
+/* Copyright (C) 2009 Denali Software Inc. All rights reserved */
+/* THIS FILE IS AUTOMATICALLY GENERATED BY DENALI BLUEPRINT, DO NOT EDIT */
+
+
+#ifndef _ANALOG_INTF_ARES_REG_REG_H_
+#define _ANALOG_INTF_ARES_REG_REG_H_
+
+
+/* macros for RXRF_BIAS1 */
+#define PHY_ANALOG_RXRF_BIAS1_ADDRESS 0x00000000
+#define PHY_ANALOG_RXRF_BIAS1_OFFSET 0x00000000
+#define PHY_ANALOG_RXRF_BIAS1_SPARE_MSB 0
+#define PHY_ANALOG_RXRF_BIAS1_SPARE_LSB 0
+#define PHY_ANALOG_RXRF_BIAS1_SPARE_MASK 0x00000001
+#define PHY_ANALOG_RXRF_BIAS1_SPARE_GET(x) (((x) & 0x00000001) >> 0)
+#define PHY_ANALOG_RXRF_BIAS1_SPARE_SET(x) (((x) << 0) & 0x00000001)
+#define PHY_ANALOG_RXRF_BIAS1_PWD_IR25SPARE_MSB 3
+#define PHY_ANALOG_RXRF_BIAS1_PWD_IR25SPARE_LSB 1
+#define PHY_ANALOG_RXRF_BIAS1_PWD_IR25SPARE_MASK 0x0000000e
+#define PHY_ANALOG_RXRF_BIAS1_PWD_IR25SPARE_GET(x) (((x) & 0x0000000e) >> 1)
+#define PHY_ANALOG_RXRF_BIAS1_PWD_IR25SPARE_SET(x) (((x) << 1) & 0x0000000e)
+#define PHY_ANALOG_RXRF_BIAS1_PWD_IR25LO18_MSB 6
+#define PHY_ANALOG_RXRF_BIAS1_PWD_IR25LO18_LSB 4
+#define PHY_ANALOG_RXRF_BIAS1_PWD_IR25LO18_MASK 0x00000070
+#define PHY_ANALOG_RXRF_BIAS1_PWD_IR25LO18_GET(x) (((x) & 0x00000070) >> 4)
+#define PHY_ANALOG_RXRF_BIAS1_PWD_IR25LO18_SET(x) (((x) << 4) & 0x00000070)
+#define PHY_ANALOG_RXRF_BIAS1_PWD_IC25LO36_MSB 9
+#define PHY_ANALOG_RXRF_BIAS1_PWD_IC25LO36_LSB 7
+#define PHY_ANALOG_RXRF_BIAS1_PWD_IC25LO36_MASK 0x00000380
+#define PHY_ANALOG_RXRF_BIAS1_PWD_IC25LO36_GET(x) (((x) & 0x00000380) >> 7)
+#define PHY_ANALOG_RXRF_BIAS1_PWD_IC25LO36_SET(x) (((x) << 7) & 0x00000380)
+#define PHY_ANALOG_RXRF_BIAS1_PWD_IC25MXR2_5GH_MSB 12
+#define PHY_ANALOG_RXRF_BIAS1_PWD_IC25MXR2_5GH_LSB 10
+#define PHY_ANALOG_RXRF_BIAS1_PWD_IC25MXR2_5GH_MASK 0x00001c00
+#define PHY_ANALOG_RXRF_BIAS1_PWD_IC25MXR2_5GH_GET(x) (((x) & 0x00001c00) >> 10)
+#define PHY_ANALOG_RXRF_BIAS1_PWD_IC25MXR2_5GH_SET(x) (((x) << 10) & 0x00001c00)
+#define PHY_ANALOG_RXRF_BIAS1_PWD_IC25MXR5GH_MSB 15
+#define PHY_ANALOG_RXRF_BIAS1_PWD_IC25MXR5GH_LSB 13
+#define PHY_ANALOG_RXRF_BIAS1_PWD_IC25MXR5GH_MASK 0x0000e000
+#define PHY_ANALOG_RXRF_BIAS1_PWD_IC25MXR5GH_GET(x) (((x) & 0x0000e000) >> 13)
+#define PHY_ANALOG_RXRF_BIAS1_PWD_IC25MXR5GH_SET(x) (((x) << 13) & 0x0000e000)
+#define PHY_ANALOG_RXRF_BIAS1_PWD_IC25VGA5G_MSB 18
+#define PHY_ANALOG_RXRF_BIAS1_PWD_IC25VGA5G_LSB 16
+#define PHY_ANALOG_RXRF_BIAS1_PWD_IC25VGA5G_MASK 0x00070000
+#define PHY_ANALOG_RXRF_BIAS1_PWD_IC25VGA5G_GET(x) (((x) & 0x00070000) >> 16)
+#define PHY_ANALOG_RXRF_BIAS1_PWD_IC25VGA5G_SET(x) (((x) << 16) & 0x00070000)
+#define PHY_ANALOG_RXRF_BIAS1_PWD_IC75LNA5G_MSB 21
+#define PHY_ANALOG_RXRF_BIAS1_PWD_IC75LNA5G_LSB 19
+#define PHY_ANALOG_RXRF_BIAS1_PWD_IC75LNA5G_MASK 0x00380000
+#define PHY_ANALOG_RXRF_BIAS1_PWD_IC75LNA5G_GET(x) (((x) & 0x00380000) >> 19)
+#define PHY_ANALOG_RXRF_BIAS1_PWD_IC75LNA5G_SET(x) (((x) << 19) & 0x00380000)
+#define PHY_ANALOG_RXRF_BIAS1_PWD_IR25LO24_MSB 24
+#define PHY_ANALOG_RXRF_BIAS1_PWD_IR25LO24_LSB 22
+#define PHY_ANALOG_RXRF_BIAS1_PWD_IR25LO24_MASK 0x01c00000
+#define PHY_ANALOG_RXRF_BIAS1_PWD_IR25LO24_GET(x) (((x) & 0x01c00000) >> 22)
+#define PHY_ANALOG_RXRF_BIAS1_PWD_IR25LO24_SET(x) (((x) << 22) & 0x01c00000)
+#define PHY_ANALOG_RXRF_BIAS1_PWD_IC25MXR2GH_MSB 27
+#define PHY_ANALOG_RXRF_BIAS1_PWD_IC25MXR2GH_LSB 25
+#define PHY_ANALOG_RXRF_BIAS1_PWD_IC25MXR2GH_MASK 0x0e000000
+#define PHY_ANALOG_RXRF_BIAS1_PWD_IC25MXR2GH_GET(x) (((x) & 0x0e000000) >> 25)
+#define PHY_ANALOG_RXRF_BIAS1_PWD_IC25MXR2GH_SET(x) (((x) << 25) & 0x0e000000)
+#define PHY_ANALOG_RXRF_BIAS1_PWD_IC75LNA2G_MSB 30
+#define PHY_ANALOG_RXRF_BIAS1_PWD_IC75LNA2G_LSB 28
+#define PHY_ANALOG_RXRF_BIAS1_PWD_IC75LNA2G_MASK 0x70000000
+#define PHY_ANALOG_RXRF_BIAS1_PWD_IC75LNA2G_GET(x) (((x) & 0x70000000) >> 28)
+#define PHY_ANALOG_RXRF_BIAS1_PWD_IC75LNA2G_SET(x) (((x) << 28) & 0x70000000)
+#define PHY_ANALOG_RXRF_BIAS1_PWD_BIAS_MSB 31
+#define PHY_ANALOG_RXRF_BIAS1_PWD_BIAS_LSB 31
+#define PHY_ANALOG_RXRF_BIAS1_PWD_BIAS_MASK 0x80000000
+#define PHY_ANALOG_RXRF_BIAS1_PWD_BIAS_GET(x) (((x) & 0x80000000) >> 31)
+#define PHY_ANALOG_RXRF_BIAS1_PWD_BIAS_SET(x) (((x) << 31) & 0x80000000)
+
+/* macros for RXRF_BIAS2 */
+#define PHY_ANALOG_RXRF_BIAS2_ADDRESS 0x00000004
+#define PHY_ANALOG_RXRF_BIAS2_OFFSET 0x00000004
+#define PHY_ANALOG_RXRF_BIAS2_SPARE_MSB 0
+#define PHY_ANALOG_RXRF_BIAS2_SPARE_LSB 0
+#define PHY_ANALOG_RXRF_BIAS2_SPARE_MASK 0x00000001
+#define PHY_ANALOG_RXRF_BIAS2_SPARE_GET(x) (((x) & 0x00000001) >> 0)
+#define PHY_ANALOG_RXRF_BIAS2_SPARE_SET(x) (((x) << 0) & 0x00000001)
+#define PHY_ANALOG_RXRF_BIAS2_PKEN_MSB 3
+#define PHY_ANALOG_RXRF_BIAS2_PKEN_LSB 1
+#define PHY_ANALOG_RXRF_BIAS2_PKEN_MASK 0x0000000e
+#define PHY_ANALOG_RXRF_BIAS2_PKEN_GET(x) (((x) & 0x0000000e) >> 1)
+#define PHY_ANALOG_RXRF_BIAS2_PKEN_SET(x) (((x) << 1) & 0x0000000e)
+#define PHY_ANALOG_RXRF_BIAS2_VCMVALUE_MSB 6
+#define PHY_ANALOG_RXRF_BIAS2_VCMVALUE_LSB 4
+#define PHY_ANALOG_RXRF_BIAS2_VCMVALUE_MASK 0x00000070
+#define PHY_ANALOG_RXRF_BIAS2_VCMVALUE_GET(x) (((x) & 0x00000070) >> 4)
+#define PHY_ANALOG_RXRF_BIAS2_VCMVALUE_SET(x) (((x) << 4) & 0x00000070)
+#define PHY_ANALOG_RXRF_BIAS2_PWD_VCMBUF_MSB 7
+#define PHY_ANALOG_RXRF_BIAS2_PWD_VCMBUF_LSB 7
+#define PHY_ANALOG_RXRF_BIAS2_PWD_VCMBUF_MASK 0x00000080
+#define PHY_ANALOG_RXRF_BIAS2_PWD_VCMBUF_GET(x) (((x) & 0x00000080) >> 7)
+#define PHY_ANALOG_RXRF_BIAS2_PWD_VCMBUF_SET(x) (((x) << 7) & 0x00000080)
+#define PHY_ANALOG_RXRF_BIAS2_PWD_IR25AGC5GH_MSB 10
+#define PHY_ANALOG_RXRF_BIAS2_PWD_IR25AGC5GH_LSB 8
+#define PHY_ANALOG_RXRF_BIAS2_PWD_IR25AGC5GH_MASK 0x00000700
+#define PHY_ANALOG_RXRF_BIAS2_PWD_IR25AGC5GH_GET(x) (((x) & 0x00000700) >> 8)
+#define PHY_ANALOG_RXRF_BIAS2_PWD_IR25AGC5GH_SET(x) (((x) << 8) & 0x00000700)
+#define PHY_ANALOG_RXRF_BIAS2_PWD_IR25AGC5G_MSB 13
+#define PHY_ANALOG_RXRF_BIAS2_PWD_IR25AGC5G_LSB 11
+#define PHY_ANALOG_RXRF_BIAS2_PWD_IR25AGC5G_MASK 0x00003800
+#define PHY_ANALOG_RXRF_BIAS2_PWD_IR25AGC5G_GET(x) (((x) & 0x00003800) >> 11)
+#define PHY_ANALOG_RXRF_BIAS2_PWD_IR25AGC5G_SET(x) (((x) << 11) & 0x00003800)
+#define PHY_ANALOG_RXRF_BIAS2_PWD_IC25AGC5G_MSB 16
+#define PHY_ANALOG_RXRF_BIAS2_PWD_IC25AGC5G_LSB 14
+#define PHY_ANALOG_RXRF_BIAS2_PWD_IC25AGC5G_MASK 0x0001c000
+#define PHY_ANALOG_RXRF_BIAS2_PWD_IC25AGC5G_GET(x) (((x) & 0x0001c000) >> 14)
+#define PHY_ANALOG_RXRF_BIAS2_PWD_IC25AGC5G_SET(x) (((x) << 14) & 0x0001c000)
+#define PHY_ANALOG_RXRF_BIAS2_PWD_IR25AGC2GH_MSB 19
+#define PHY_ANALOG_RXRF_BIAS2_PWD_IR25AGC2GH_LSB 17
+#define PHY_ANALOG_RXRF_BIAS2_PWD_IR25AGC2GH_MASK 0x000e0000
+#define PHY_ANALOG_RXRF_BIAS2_PWD_IR25AGC2GH_GET(x) (((x) & 0x000e0000) >> 17)
+#define PHY_ANALOG_RXRF_BIAS2_PWD_IR25AGC2GH_SET(x) (((x) << 17) & 0x000e0000)
+#define PHY_ANALOG_RXRF_BIAS2_PWD_IR25AGC2G_MSB 22
+#define PHY_ANALOG_RXRF_BIAS2_PWD_IR25AGC2G_LSB 20
+#define PHY_ANALOG_RXRF_BIAS2_PWD_IR25AGC2G_MASK 0x00700000
+#define PHY_ANALOG_RXRF_BIAS2_PWD_IR25AGC2G_GET(x) (((x) & 0x00700000) >> 20)
+#define PHY_ANALOG_RXRF_BIAS2_PWD_IR25AGC2G_SET(x) (((x) << 20) & 0x00700000)
+#define PHY_ANALOG_RXRF_BIAS2_PWD_IC25AGC2G_MSB 25
+#define PHY_ANALOG_RXRF_BIAS2_PWD_IC25AGC2G_LSB 23
+#define PHY_ANALOG_RXRF_BIAS2_PWD_IC25AGC2G_MASK 0x03800000
+#define PHY_ANALOG_RXRF_BIAS2_PWD_IC25AGC2G_GET(x) (((x) & 0x03800000) >> 23)
+#define PHY_ANALOG_RXRF_BIAS2_PWD_IC25AGC2G_SET(x) (((x) << 23) & 0x03800000)
+#define PHY_ANALOG_RXRF_BIAS2_PWD_IC25VCMBUF_MSB 28
+#define PHY_ANALOG_RXRF_BIAS2_PWD_IC25VCMBUF_LSB 26
+#define PHY_ANALOG_RXRF_BIAS2_PWD_IC25VCMBUF_MASK 0x1c000000
+#define PHY_ANALOG_RXRF_BIAS2_PWD_IC25VCMBUF_GET(x) (((x) & 0x1c000000) >> 26)
+#define PHY_ANALOG_RXRF_BIAS2_PWD_IC25VCMBUF_SET(x) (((x) << 26) & 0x1c000000)
+#define PHY_ANALOG_RXRF_BIAS2_PWD_IR25VCM_MSB 31
+#define PHY_ANALOG_RXRF_BIAS2_PWD_IR25VCM_LSB 29
+#define PHY_ANALOG_RXRF_BIAS2_PWD_IR25VCM_MASK 0xe0000000
+#define PHY_ANALOG_RXRF_BIAS2_PWD_IR25VCM_GET(x) (((x) & 0xe0000000) >> 29)
+#define PHY_ANALOG_RXRF_BIAS2_PWD_IR25VCM_SET(x) (((x) << 29) & 0xe0000000)
+
+/* macros for RXRF_GAINSTAGES */
+#define PHY_ANALOG_RXRF_GAINSTAGES_ADDRESS 0x00000008
+#define PHY_ANALOG_RXRF_GAINSTAGES_OFFSET 0x00000008
+#define PHY_ANALOG_RXRF_GAINSTAGES_SPARE_MSB 0
+#define PHY_ANALOG_RXRF_GAINSTAGES_SPARE_LSB 0
+#define PHY_ANALOG_RXRF_GAINSTAGES_SPARE_MASK 0x00000001
+#define PHY_ANALOG_RXRF_GAINSTAGES_SPARE_GET(x) (((x) & 0x00000001) >> 0)
+#define PHY_ANALOG_RXRF_GAINSTAGES_SPARE_SET(x) (((x) << 0) & 0x00000001)
+#define PHY_ANALOG_RXRF_GAINSTAGES_LNAON_CALDC_MSB 1
+#define PHY_ANALOG_RXRF_GAINSTAGES_LNAON_CALDC_LSB 1
+#define PHY_ANALOG_RXRF_GAINSTAGES_LNAON_CALDC_MASK 0x00000002
+#define PHY_ANALOG_RXRF_GAINSTAGES_LNAON_CALDC_GET(x) (((x) & 0x00000002) >> 1)
+#define PHY_ANALOG_RXRF_GAINSTAGES_LNAON_CALDC_SET(x) (((x) << 1) & 0x00000002)
+#define PHY_ANALOG_RXRF_GAINSTAGES_VGA5G_CAP_MSB 3
+#define PHY_ANALOG_RXRF_GAINSTAGES_VGA5G_CAP_LSB 2
+#define PHY_ANALOG_RXRF_GAINSTAGES_VGA5G_CAP_MASK 0x0000000c
+#define PHY_ANALOG_RXRF_GAINSTAGES_VGA5G_CAP_GET(x) (((x) & 0x0000000c) >> 2)
+#define PHY_ANALOG_RXRF_GAINSTAGES_VGA5G_CAP_SET(x) (((x) << 2) & 0x0000000c)
+#define PHY_ANALOG_RXRF_GAINSTAGES_LNA5G_CAP_MSB 5
+#define PHY_ANALOG_RXRF_GAINSTAGES_LNA5G_CAP_LSB 4
+#define PHY_ANALOG_RXRF_GAINSTAGES_LNA5G_CAP_MASK 0x00000030
+#define PHY_ANALOG_RXRF_GAINSTAGES_LNA5G_CAP_GET(x) (((x) & 0x00000030) >> 4)
+#define PHY_ANALOG_RXRF_GAINSTAGES_LNA5G_CAP_SET(x) (((x) << 4) & 0x00000030)
+#define PHY_ANALOG_RXRF_GAINSTAGES_LNA5G_SHORTINP_MSB 6
+#define PHY_ANALOG_RXRF_GAINSTAGES_LNA5G_SHORTINP_LSB 6
+#define PHY_ANALOG_RXRF_GAINSTAGES_LNA5G_SHORTINP_MASK 0x00000040
+#define PHY_ANALOG_RXRF_GAINSTAGES_LNA5G_SHORTINP_GET(x) (((x) & 0x00000040) >> 6)
+#define PHY_ANALOG_RXRF_GAINSTAGES_LNA5G_SHORTINP_SET(x) (((x) << 6) & 0x00000040)
+#define PHY_ANALOG_RXRF_GAINSTAGES_PWD_LO5G_MSB 7
+#define PHY_ANALOG_RXRF_GAINSTAGES_PWD_LO5G_LSB 7
+#define PHY_ANALOG_RXRF_GAINSTAGES_PWD_LO5G_MASK 0x00000080
+#define PHY_ANALOG_RXRF_GAINSTAGES_PWD_LO5G_GET(x) (((x) & 0x00000080) >> 7)
+#define PHY_ANALOG_RXRF_GAINSTAGES_PWD_LO5G_SET(x) (((x) << 7) & 0x00000080)
+#define PHY_ANALOG_RXRF_GAINSTAGES_PWD_VGA5G_MSB 8
+#define PHY_ANALOG_RXRF_GAINSTAGES_PWD_VGA5G_LSB 8
+#define PHY_ANALOG_RXRF_GAINSTAGES_PWD_VGA5G_MASK 0x00000100
+#define PHY_ANALOG_RXRF_GAINSTAGES_PWD_VGA5G_GET(x) (((x) & 0x00000100) >> 8)
+#define PHY_ANALOG_RXRF_GAINSTAGES_PWD_VGA5G_SET(x) (((x) << 8) & 0x00000100)
+#define PHY_ANALOG_RXRF_GAINSTAGES_PWD_MXR5G_MSB 9
+#define PHY_ANALOG_RXRF_GAINSTAGES_PWD_MXR5G_LSB 9
+#define PHY_ANALOG_RXRF_GAINSTAGES_PWD_MXR5G_MASK 0x00000200
+#define PHY_ANALOG_RXRF_GAINSTAGES_PWD_MXR5G_GET(x) (((x) & 0x00000200) >> 9)
+#define PHY_ANALOG_RXRF_GAINSTAGES_PWD_MXR5G_SET(x) (((x) << 9) & 0x00000200)
+#define PHY_ANALOG_RXRF_GAINSTAGES_PWD_LNA5G_MSB 10
+#define PHY_ANALOG_RXRF_GAINSTAGES_PWD_LNA5G_LSB 10
+#define PHY_ANALOG_RXRF_GAINSTAGES_PWD_LNA5G_MASK 0x00000400
+#define PHY_ANALOG_RXRF_GAINSTAGES_PWD_LNA5G_GET(x) (((x) & 0x00000400) >> 10)
+#define PHY_ANALOG_RXRF_GAINSTAGES_PWD_LNA5G_SET(x) (((x) << 10) & 0x00000400)
+#define PHY_ANALOG_RXRF_GAINSTAGES_LNA2G_CAP_MSB 12
+#define PHY_ANALOG_RXRF_GAINSTAGES_LNA2G_CAP_LSB 11
+#define PHY_ANALOG_RXRF_GAINSTAGES_LNA2G_CAP_MASK 0x00001800
+#define PHY_ANALOG_RXRF_GAINSTAGES_LNA2G_CAP_GET(x) (((x) & 0x00001800) >> 11)
+#define PHY_ANALOG_RXRF_GAINSTAGES_LNA2G_CAP_SET(x) (((x) << 11) & 0x00001800)
+#define PHY_ANALOG_RXRF_GAINSTAGES_LNA2G_SHORTINP_MSB 13
+#define PHY_ANALOG_RXRF_GAINSTAGES_LNA2G_SHORTINP_LSB 13
+#define PHY_ANALOG_RXRF_GAINSTAGES_LNA2G_SHORTINP_MASK 0x00002000
+#define PHY_ANALOG_RXRF_GAINSTAGES_LNA2G_SHORTINP_GET(x) (((x) & 0x00002000) >> 13)
+#define PHY_ANALOG_RXRF_GAINSTAGES_LNA2G_SHORTINP_SET(x) (((x) << 13) & 0x00002000)
+#define PHY_ANALOG_RXRF_GAINSTAGES_LNA2G_LP_MSB 14
+#define PHY_ANALOG_RXRF_GAINSTAGES_LNA2G_LP_LSB 14
+#define PHY_ANALOG_RXRF_GAINSTAGES_LNA2G_LP_MASK 0x00004000
+#define PHY_ANALOG_RXRF_GAINSTAGES_LNA2G_LP_GET(x) (((x) & 0x00004000) >> 14)
+#define PHY_ANALOG_RXRF_GAINSTAGES_LNA2G_LP_SET(x) (((x) << 14) & 0x00004000)
+#define PHY_ANALOG_RXRF_GAINSTAGES_PWD_LO2G_MSB 15
+#define PHY_ANALOG_RXRF_GAINSTAGES_PWD_LO2G_LSB 15
+#define PHY_ANALOG_RXRF_GAINSTAGES_PWD_LO2G_MASK 0x00008000
+#define PHY_ANALOG_RXRF_GAINSTAGES_PWD_LO2G_GET(x) (((x) & 0x00008000) >> 15)
+#define PHY_ANALOG_RXRF_GAINSTAGES_PWD_LO2G_SET(x) (((x) << 15) & 0x00008000)
+#define PHY_ANALOG_RXRF_GAINSTAGES_PWD_MXR2G_MSB 16
+#define PHY_ANALOG_RXRF_GAINSTAGES_PWD_MXR2G_LSB 16
+#define PHY_ANALOG_RXRF_GAINSTAGES_PWD_MXR2G_MASK 0x00010000
+#define PHY_ANALOG_RXRF_GAINSTAGES_PWD_MXR2G_GET(x) (((x) & 0x00010000) >> 16)
+#define PHY_ANALOG_RXRF_GAINSTAGES_PWD_MXR2G_SET(x) (((x) << 16) & 0x00010000)
+#define PHY_ANALOG_RXRF_GAINSTAGES_PWD_LNA2G_MSB 17
+#define PHY_ANALOG_RXRF_GAINSTAGES_PWD_LNA2G_LSB 17
+#define PHY_ANALOG_RXRF_GAINSTAGES_PWD_LNA2G_MASK 0x00020000
+#define PHY_ANALOG_RXRF_GAINSTAGES_PWD_LNA2G_GET(x) (((x) & 0x00020000) >> 17)
+#define PHY_ANALOG_RXRF_GAINSTAGES_PWD_LNA2G_SET(x) (((x) << 17) & 0x00020000)
+#define PHY_ANALOG_RXRF_GAINSTAGES_MXR5G_GAIN_OVR_MSB 19
+#define PHY_ANALOG_RXRF_GAINSTAGES_MXR5G_GAIN_OVR_LSB 18
+#define PHY_ANALOG_RXRF_GAINSTAGES_MXR5G_GAIN_OVR_MASK 0x000c0000
+#define PHY_ANALOG_RXRF_GAINSTAGES_MXR5G_GAIN_OVR_GET(x) (((x) & 0x000c0000) >> 18)
+#define PHY_ANALOG_RXRF_GAINSTAGES_MXR5G_GAIN_OVR_SET(x) (((x) << 18) & 0x000c0000)
+#define PHY_ANALOG_RXRF_GAINSTAGES_VGA5G_GAIN_OVR_MSB 22
+#define PHY_ANALOG_RXRF_GAINSTAGES_VGA5G_GAIN_OVR_LSB 20
+#define PHY_ANALOG_RXRF_GAINSTAGES_VGA5G_GAIN_OVR_MASK 0x00700000
+#define PHY_ANALOG_RXRF_GAINSTAGES_VGA5G_GAIN_OVR_GET(x) (((x) & 0x00700000) >> 20)
+#define PHY_ANALOG_RXRF_GAINSTAGES_VGA5G_GAIN_OVR_SET(x) (((x) << 20) & 0x00700000)
+#define PHY_ANALOG_RXRF_GAINSTAGES_LNA5G_GAIN_OVR_MSB 25
+#define PHY_ANALOG_RXRF_GAINSTAGES_LNA5G_GAIN_OVR_LSB 23
+#define PHY_ANALOG_RXRF_GAINSTAGES_LNA5G_GAIN_OVR_MASK 0x03800000
+#define PHY_ANALOG_RXRF_GAINSTAGES_LNA5G_GAIN_OVR_GET(x) (((x) & 0x03800000) >> 23)
+#define PHY_ANALOG_RXRF_GAINSTAGES_LNA5G_GAIN_OVR_SET(x) (((x) << 23) & 0x03800000)
+#define PHY_ANALOG_RXRF_GAINSTAGES_MXR2G_GAIN_OVR_MSB 27
+#define PHY_ANALOG_RXRF_GAINSTAGES_MXR2G_GAIN_OVR_LSB 26
+#define PHY_ANALOG_RXRF_GAINSTAGES_MXR2G_GAIN_OVR_MASK 0x0c000000
+#define PHY_ANALOG_RXRF_GAINSTAGES_MXR2G_GAIN_OVR_GET(x) (((x) & 0x0c000000) >> 26)
+#define PHY_ANALOG_RXRF_GAINSTAGES_MXR2G_GAIN_OVR_SET(x) (((x) << 26) & 0x0c000000)
+#define PHY_ANALOG_RXRF_GAINSTAGES_LNA2G_GAIN_OVR_MSB 30
+#define PHY_ANALOG_RXRF_GAINSTAGES_LNA2G_GAIN_OVR_LSB 28
+#define PHY_ANALOG_RXRF_GAINSTAGES_LNA2G_GAIN_OVR_MASK 0x70000000
+#define PHY_ANALOG_RXRF_GAINSTAGES_LNA2G_GAIN_OVR_GET(x) (((x) & 0x70000000) >> 28)
+#define PHY_ANALOG_RXRF_GAINSTAGES_LNA2G_GAIN_OVR_SET(x) (((x) << 28) & 0x70000000)
+#define PHY_ANALOG_RXRF_GAINSTAGES_RX_OVERRIDE_MSB 31
+#define PHY_ANALOG_RXRF_GAINSTAGES_RX_OVERRIDE_LSB 31
+#define PHY_ANALOG_RXRF_GAINSTAGES_RX_OVERRIDE_MASK 0x80000000
+#define PHY_ANALOG_RXRF_GAINSTAGES_RX_OVERRIDE_GET(x) (((x) & 0x80000000) >> 31)
+#define PHY_ANALOG_RXRF_GAINSTAGES_RX_OVERRIDE_SET(x) (((x) << 31) & 0x80000000)
+
+/* macros for RXRF_AGC */
+#define PHY_ANALOG_RXRF_AGC_ADDRESS 0x0000000c
+#define PHY_ANALOG_RXRF_AGC_OFFSET 0x0000000c
+#define PHY_ANALOG_RXRF_AGC_SPARE_MSB 5
+#define PHY_ANALOG_RXRF_AGC_SPARE_LSB 0
+#define PHY_ANALOG_RXRF_AGC_SPARE_MASK 0x0000003f
+#define PHY_ANALOG_RXRF_AGC_SPARE_GET(x) (((x) & 0x0000003f) >> 0)
+#define PHY_ANALOG_RXRF_AGC_SPARE_SET(x) (((x) << 0) & 0x0000003f)
+#define PHY_ANALOG_RXRF_AGC_AGC_FALL_CTRL_MSB 8
+#define PHY_ANALOG_RXRF_AGC_AGC_FALL_CTRL_LSB 6
+#define PHY_ANALOG_RXRF_AGC_AGC_FALL_CTRL_MASK 0x000001c0
+#define PHY_ANALOG_RXRF_AGC_AGC_FALL_CTRL_GET(x) (((x) & 0x000001c0) >> 6)
+#define PHY_ANALOG_RXRF_AGC_AGC_FALL_CTRL_SET(x) (((x) << 6) & 0x000001c0)
+#define PHY_ANALOG_RXRF_AGC_AGC5G_CALDAC_OVR_MSB 14
+#define PHY_ANALOG_RXRF_AGC_AGC5G_CALDAC_OVR_LSB 9
+#define PHY_ANALOG_RXRF_AGC_AGC5G_CALDAC_OVR_MASK 0x00007e00
+#define PHY_ANALOG_RXRF_AGC_AGC5G_CALDAC_OVR_GET(x) (((x) & 0x00007e00) >> 9)
+#define PHY_ANALOG_RXRF_AGC_AGC5G_CALDAC_OVR_SET(x) (((x) << 9) & 0x00007e00)
+#define PHY_ANALOG_RXRF_AGC_AGC5G_DBDAC_OVR_MSB 18
+#define PHY_ANALOG_RXRF_AGC_AGC5G_DBDAC_OVR_LSB 15
+#define PHY_ANALOG_RXRF_AGC_AGC5G_DBDAC_OVR_MASK 0x00078000
+#define PHY_ANALOG_RXRF_AGC_AGC5G_DBDAC_OVR_GET(x) (((x) & 0x00078000) >> 15)
+#define PHY_ANALOG_RXRF_AGC_AGC5G_DBDAC_OVR_SET(x) (((x) << 15) & 0x00078000)
+#define PHY_ANALOG_RXRF_AGC_AGC2G_CALDAC_OVR_MSB 24
+#define PHY_ANALOG_RXRF_AGC_AGC2G_CALDAC_OVR_LSB 19
+#define PHY_ANALOG_RXRF_AGC_AGC2G_CALDAC_OVR_MASK 0x01f80000
+#define PHY_ANALOG_RXRF_AGC_AGC2G_CALDAC_OVR_GET(x) (((x) & 0x01f80000) >> 19)
+#define PHY_ANALOG_RXRF_AGC_AGC2G_CALDAC_OVR_SET(x) (((x) << 19) & 0x01f80000)
+#define PHY_ANALOG_RXRF_AGC_AGC2G_DBDAC_OVR_MSB 28
+#define PHY_ANALOG_RXRF_AGC_AGC2G_DBDAC_OVR_LSB 25
+#define PHY_ANALOG_RXRF_AGC_AGC2G_DBDAC_OVR_MASK 0x1e000000
+#define PHY_ANALOG_RXRF_AGC_AGC2G_DBDAC_OVR_GET(x) (((x) & 0x1e000000) >> 25)
+#define PHY_ANALOG_RXRF_AGC_AGC2G_DBDAC_OVR_SET(x) (((x) << 25) & 0x1e000000)
+#define PHY_ANALOG_RXRF_AGC_AGC_CAL_OVR_MSB 29
+#define PHY_ANALOG_RXRF_AGC_AGC_CAL_OVR_LSB 29
+#define PHY_ANALOG_RXRF_AGC_AGC_CAL_OVR_MASK 0x20000000
+#define PHY_ANALOG_RXRF_AGC_AGC_CAL_OVR_GET(x) (((x) & 0x20000000) >> 29)
+#define PHY_ANALOG_RXRF_AGC_AGC_CAL_OVR_SET(x) (((x) << 29) & 0x20000000)
+#define PHY_ANALOG_RXRF_AGC_AGC_ON_OVR_MSB 30
+#define PHY_ANALOG_RXRF_AGC_AGC_ON_OVR_LSB 30
+#define PHY_ANALOG_RXRF_AGC_AGC_ON_OVR_MASK 0x40000000
+#define PHY_ANALOG_RXRF_AGC_AGC_ON_OVR_GET(x) (((x) & 0x40000000) >> 30)
+#define PHY_ANALOG_RXRF_AGC_AGC_ON_OVR_SET(x) (((x) << 30) & 0x40000000)
+#define PHY_ANALOG_RXRF_AGC_AGC_OVERRIDE_MSB 31
+#define PHY_ANALOG_RXRF_AGC_AGC_OVERRIDE_LSB 31
+#define PHY_ANALOG_RXRF_AGC_AGC_OVERRIDE_MASK 0x80000000
+#define PHY_ANALOG_RXRF_AGC_AGC_OVERRIDE_GET(x) (((x) & 0x80000000) >> 31)
+#define PHY_ANALOG_RXRF_AGC_AGC_OVERRIDE_SET(x) (((x) << 31) & 0x80000000)
+
+/* macros for TXRF1 */
+#define PHY_ANALOG_TXRF1_ADDRESS 0x00000040
+#define PHY_ANALOG_TXRF1_OFFSET 0x00000040
+#define PHY_ANALOG_TXRF1_DCAS2G_MSB 2
+#define PHY_ANALOG_TXRF1_DCAS2G_LSB 0
+#define PHY_ANALOG_TXRF1_DCAS2G_MASK 0x00000007
+#define PHY_ANALOG_TXRF1_DCAS2G_GET(x) (((x) & 0x00000007) >> 0)
+#define PHY_ANALOG_TXRF1_DCAS2G_SET(x) (((x) << 0) & 0x00000007)
+#define PHY_ANALOG_TXRF1_OB2G_PALOFF_MSB 5
+#define PHY_ANALOG_TXRF1_OB2G_PALOFF_LSB 3
+#define PHY_ANALOG_TXRF1_OB2G_PALOFF_MASK 0x00000038
+#define PHY_ANALOG_TXRF1_OB2G_PALOFF_GET(x) (((x) & 0x00000038) >> 3)
+#define PHY_ANALOG_TXRF1_OB2G_PALOFF_SET(x) (((x) << 3) & 0x00000038)
+#define PHY_ANALOG_TXRF1_OB2G_QAM_MSB 8
+#define PHY_ANALOG_TXRF1_OB2G_QAM_LSB 6
+#define PHY_ANALOG_TXRF1_OB2G_QAM_MASK 0x000001c0
+#define PHY_ANALOG_TXRF1_OB2G_QAM_GET(x) (((x) & 0x000001c0) >> 6)
+#define PHY_ANALOG_TXRF1_OB2G_QAM_SET(x) (((x) << 6) & 0x000001c0)
+#define PHY_ANALOG_TXRF1_OB2G_PSK_MSB 11
+#define PHY_ANALOG_TXRF1_OB2G_PSK_LSB 9
+#define PHY_ANALOG_TXRF1_OB2G_PSK_MASK 0x00000e00
+#define PHY_ANALOG_TXRF1_OB2G_PSK_GET(x) (((x) & 0x00000e00) >> 9)
+#define PHY_ANALOG_TXRF1_OB2G_PSK_SET(x) (((x) << 9) & 0x00000e00)
+#define PHY_ANALOG_TXRF1_OB2G_CCK_MSB 14
+#define PHY_ANALOG_TXRF1_OB2G_CCK_LSB 12
+#define PHY_ANALOG_TXRF1_OB2G_CCK_MASK 0x00007000
+#define PHY_ANALOG_TXRF1_OB2G_CCK_GET(x) (((x) & 0x00007000) >> 12)
+#define PHY_ANALOG_TXRF1_OB2G_CCK_SET(x) (((x) << 12) & 0x00007000)
+#define PHY_ANALOG_TXRF1_DB2G_MSB 17
+#define PHY_ANALOG_TXRF1_DB2G_LSB 15
+#define PHY_ANALOG_TXRF1_DB2G_MASK 0x00038000
+#define PHY_ANALOG_TXRF1_DB2G_GET(x) (((x) & 0x00038000) >> 15)
+#define PHY_ANALOG_TXRF1_DB2G_SET(x) (((x) << 15) & 0x00038000)
+#define PHY_ANALOG_TXRF1_PDOUT2G_MSB 18
+#define PHY_ANALOG_TXRF1_PDOUT2G_LSB 18
+#define PHY_ANALOG_TXRF1_PDOUT2G_MASK 0x00040000
+#define PHY_ANALOG_TXRF1_PDOUT2G_GET(x) (((x) & 0x00040000) >> 18)
+#define PHY_ANALOG_TXRF1_PDOUT2G_SET(x) (((x) << 18) & 0x00040000)
+#define PHY_ANALOG_TXRF1_PDDR2G_MSB 19
+#define PHY_ANALOG_TXRF1_PDDR2G_LSB 19
+#define PHY_ANALOG_TXRF1_PDDR2G_MASK 0x00080000
+#define PHY_ANALOG_TXRF1_PDDR2G_GET(x) (((x) & 0x00080000) >> 19)
+#define PHY_ANALOG_TXRF1_PDDR2G_SET(x) (((x) << 19) & 0x00080000)
+#define PHY_ANALOG_TXRF1_PDMXR2G_MSB 20
+#define PHY_ANALOG_TXRF1_PDMXR2G_LSB 20
+#define PHY_ANALOG_TXRF1_PDMXR2G_MASK 0x00100000
+#define PHY_ANALOG_TXRF1_PDMXR2G_GET(x) (((x) & 0x00100000) >> 20)
+#define PHY_ANALOG_TXRF1_PDMXR2G_SET(x) (((x) << 20) & 0x00100000)
+#define PHY_ANALOG_TXRF1_PDLO2G_MSB 21
+#define PHY_ANALOG_TXRF1_PDLO2G_LSB 21
+#define PHY_ANALOG_TXRF1_PDLO2G_MASK 0x00200000
+#define PHY_ANALOG_TXRF1_PDLO2G_GET(x) (((x) & 0x00200000) >> 21)
+#define PHY_ANALOG_TXRF1_PDLO2G_SET(x) (((x) << 21) & 0x00200000)
+#define PHY_ANALOG_TXRF1_LOBUF2GFORCED_MSB 22
+#define PHY_ANALOG_TXRF1_LOBUF2GFORCED_LSB 22
+#define PHY_ANALOG_TXRF1_LOBUF2GFORCED_MASK 0x00400000
+#define PHY_ANALOG_TXRF1_LOBUF2GFORCED_GET(x) (((x) & 0x00400000) >> 22)
+#define PHY_ANALOG_TXRF1_LOBUF2GFORCED_SET(x) (((x) << 22) & 0x00400000)
+#define PHY_ANALOG_TXRF1_LODIV2GFORCED_MSB 23
+#define PHY_ANALOG_TXRF1_LODIV2GFORCED_LSB 23
+#define PHY_ANALOG_TXRF1_LODIV2GFORCED_MASK 0x00800000
+#define PHY_ANALOG_TXRF1_LODIV2GFORCED_GET(x) (((x) & 0x00800000) >> 23)
+#define PHY_ANALOG_TXRF1_LODIV2GFORCED_SET(x) (((x) << 23) & 0x00800000)
+#define PHY_ANALOG_TXRF1_PADRVGN2G_MSB 30
+#define PHY_ANALOG_TXRF1_PADRVGN2G_LSB 24
+#define PHY_ANALOG_TXRF1_PADRVGN2G_MASK 0x7f000000
+#define PHY_ANALOG_TXRF1_PADRVGN2G_GET(x) (((x) & 0x7f000000) >> 24)
+#define PHY_ANALOG_TXRF1_PADRVGN2G_SET(x) (((x) << 24) & 0x7f000000)
+#define PHY_ANALOG_TXRF1_LOCALTXGAIN2G_MSB 31
+#define PHY_ANALOG_TXRF1_LOCALTXGAIN2G_LSB 31
+#define PHY_ANALOG_TXRF1_LOCALTXGAIN2G_MASK 0x80000000
+#define PHY_ANALOG_TXRF1_LOCALTXGAIN2G_GET(x) (((x) & 0x80000000) >> 31)
+#define PHY_ANALOG_TXRF1_LOCALTXGAIN2G_SET(x) (((x) << 31) & 0x80000000)
+
+/* macros for TXRF2 */
+#define PHY_ANALOG_TXRF2_ADDRESS 0x00000044
+#define PHY_ANALOG_TXRF2_OFFSET 0x00000044
+#define PHY_ANALOG_TXRF2_SPARE2_MSB 0
+#define PHY_ANALOG_TXRF2_SPARE2_LSB 0
+#define PHY_ANALOG_TXRF2_SPARE2_MASK 0x00000001
+#define PHY_ANALOG_TXRF2_SPARE2_GET(x) (((x) & 0x00000001) >> 0)
+#define PHY_ANALOG_TXRF2_SPARE2_SET(x) (((x) << 0) & 0x00000001)
+#define PHY_ANALOG_TXRF2_D3B5G_MSB 3
+#define PHY_ANALOG_TXRF2_D3B5G_LSB 1
+#define PHY_ANALOG_TXRF2_D3B5G_MASK 0x0000000e
+#define PHY_ANALOG_TXRF2_D3B5G_GET(x) (((x) & 0x0000000e) >> 1)
+#define PHY_ANALOG_TXRF2_D3B5G_SET(x) (((x) << 1) & 0x0000000e)
+#define PHY_ANALOG_TXRF2_D4B5G_MSB 6
+#define PHY_ANALOG_TXRF2_D4B5G_LSB 4
+#define PHY_ANALOG_TXRF2_D4B5G_MASK 0x00000070
+#define PHY_ANALOG_TXRF2_D4B5G_GET(x) (((x) & 0x00000070) >> 4)
+#define PHY_ANALOG_TXRF2_D4B5G_SET(x) (((x) << 4) & 0x00000070)
+#define PHY_ANALOG_TXRF2_PDOUT5G_MSB 10
+#define PHY_ANALOG_TXRF2_PDOUT5G_LSB 7
+#define PHY_ANALOG_TXRF2_PDOUT5G_MASK 0x00000780
+#define PHY_ANALOG_TXRF2_PDOUT5G_GET(x) (((x) & 0x00000780) >> 7)
+#define PHY_ANALOG_TXRF2_PDOUT5G_SET(x) (((x) << 7) & 0x00000780)
+#define PHY_ANALOG_TXRF2_PDMXR5G_MSB 11
+#define PHY_ANALOG_TXRF2_PDMXR5G_LSB 11
+#define PHY_ANALOG_TXRF2_PDMXR5G_MASK 0x00000800
+#define PHY_ANALOG_TXRF2_PDMXR5G_GET(x) (((x) & 0x00000800) >> 11)
+#define PHY_ANALOG_TXRF2_PDMXR5G_SET(x) (((x) << 11) & 0x00000800)
+#define PHY_ANALOG_TXRF2_PDLOBUF5G_MSB 12
+#define PHY_ANALOG_TXRF2_PDLOBUF5G_LSB 12
+#define PHY_ANALOG_TXRF2_PDLOBUF5G_MASK 0x00001000
+#define PHY_ANALOG_TXRF2_PDLOBUF5G_GET(x) (((x) & 0x00001000) >> 12)
+#define PHY_ANALOG_TXRF2_PDLOBUF5G_SET(x) (((x) << 12) & 0x00001000)
+#define PHY_ANALOG_TXRF2_PDLODIV5G_MSB 13
+#define PHY_ANALOG_TXRF2_PDLODIV5G_LSB 13
+#define PHY_ANALOG_TXRF2_PDLODIV5G_MASK 0x00002000
+#define PHY_ANALOG_TXRF2_PDLODIV5G_GET(x) (((x) & 0x00002000) >> 13)
+#define PHY_ANALOG_TXRF2_PDLODIV5G_SET(x) (((x) << 13) & 0x00002000)
+#define PHY_ANALOG_TXRF2_LOBUF5GFORCED_MSB 14
+#define PHY_ANALOG_TXRF2_LOBUF5GFORCED_LSB 14
+#define PHY_ANALOG_TXRF2_LOBUF5GFORCED_MASK 0x00004000
+#define PHY_ANALOG_TXRF2_LOBUF5GFORCED_GET(x) (((x) & 0x00004000) >> 14)
+#define PHY_ANALOG_TXRF2_LOBUF5GFORCED_SET(x) (((x) << 14) & 0x00004000)
+#define PHY_ANALOG_TXRF2_LODIV5GFORCED_MSB 15
+#define PHY_ANALOG_TXRF2_LODIV5GFORCED_LSB 15
+#define PHY_ANALOG_TXRF2_LODIV5GFORCED_MASK 0x00008000
+#define PHY_ANALOG_TXRF2_LODIV5GFORCED_GET(x) (((x) & 0x00008000) >> 15)
+#define PHY_ANALOG_TXRF2_LODIV5GFORCED_SET(x) (((x) << 15) & 0x00008000)
+#define PHY_ANALOG_TXRF2_PADRV2GN5G_MSB 19
+#define PHY_ANALOG_TXRF2_PADRV2GN5G_LSB 16
+#define PHY_ANALOG_TXRF2_PADRV2GN5G_MASK 0x000f0000
+#define PHY_ANALOG_TXRF2_PADRV2GN5G_GET(x) (((x) & 0x000f0000) >> 16)
+#define PHY_ANALOG_TXRF2_PADRV2GN5G_SET(x) (((x) << 16) & 0x000f0000)
+#define PHY_ANALOG_TXRF2_PADRV3GN5G_MSB 23
+#define PHY_ANALOG_TXRF2_PADRV3GN5G_LSB 20
+#define PHY_ANALOG_TXRF2_PADRV3GN5G_MASK 0x00f00000
+#define PHY_ANALOG_TXRF2_PADRV3GN5G_GET(x) (((x) & 0x00f00000) >> 20)
+#define PHY_ANALOG_TXRF2_PADRV3GN5G_SET(x) (((x) << 20) & 0x00f00000)
+#define PHY_ANALOG_TXRF2_PADRV4GN5G_MSB 27
+#define PHY_ANALOG_TXRF2_PADRV4GN5G_LSB 24
+#define PHY_ANALOG_TXRF2_PADRV4GN5G_MASK 0x0f000000
+#define PHY_ANALOG_TXRF2_PADRV4GN5G_GET(x) (((x) & 0x0f000000) >> 24)
+#define PHY_ANALOG_TXRF2_PADRV4GN5G_SET(x) (((x) << 24) & 0x0f000000)
+#define PHY_ANALOG_TXRF2_LOCALTXGAIN5G_MSB 28
+#define PHY_ANALOG_TXRF2_LOCALTXGAIN5G_LSB 28
+#define PHY_ANALOG_TXRF2_LOCALTXGAIN5G_MASK 0x10000000
+#define PHY_ANALOG_TXRF2_LOCALTXGAIN5G_GET(x) (((x) & 0x10000000) >> 28)
+#define PHY_ANALOG_TXRF2_LOCALTXGAIN5G_SET(x) (((x) << 28) & 0x10000000)
+#define PHY_ANALOG_TXRF2_OCAS2G_MSB 31
+#define PHY_ANALOG_TXRF2_OCAS2G_LSB 29
+#define PHY_ANALOG_TXRF2_OCAS2G_MASK 0xe0000000
+#define PHY_ANALOG_TXRF2_OCAS2G_GET(x) (((x) & 0xe0000000) >> 29)
+#define PHY_ANALOG_TXRF2_OCAS2G_SET(x) (((x) << 29) & 0xe0000000)
+
+/* macros for TXRF3 */
+#define PHY_ANALOG_TXRF3_ADDRESS 0x00000048
+#define PHY_ANALOG_TXRF3_OFFSET 0x00000048
+#define PHY_ANALOG_TXRF3_SPARE3_MSB 22
+#define PHY_ANALOG_TXRF3_SPARE3_LSB 0
+#define PHY_ANALOG_TXRF3_SPARE3_MASK 0x007fffff
+#define PHY_ANALOG_TXRF3_SPARE3_GET(x) (((x) & 0x007fffff) >> 0)
+#define PHY_ANALOG_TXRF3_SPARE3_SET(x) (((x) << 0) & 0x007fffff)
+#define PHY_ANALOG_TXRF3_CAS5G_MSB 25
+#define PHY_ANALOG_TXRF3_CAS5G_LSB 23
+#define PHY_ANALOG_TXRF3_CAS5G_MASK 0x03800000
+#define PHY_ANALOG_TXRF3_CAS5G_GET(x) (((x) & 0x03800000) >> 23)
+#define PHY_ANALOG_TXRF3_CAS5G_SET(x) (((x) << 23) & 0x03800000)
+#define PHY_ANALOG_TXRF3_OB5G_MSB 28
+#define PHY_ANALOG_TXRF3_OB5G_LSB 26
+#define PHY_ANALOG_TXRF3_OB5G_MASK 0x1c000000
+#define PHY_ANALOG_TXRF3_OB5G_GET(x) (((x) & 0x1c000000) >> 26)
+#define PHY_ANALOG_TXRF3_OB5G_SET(x) (((x) << 26) & 0x1c000000)
+#define PHY_ANALOG_TXRF3_D2B5G_MSB 31
+#define PHY_ANALOG_TXRF3_D2B5G_LSB 29
+#define PHY_ANALOG_TXRF3_D2B5G_MASK 0xe0000000
+#define PHY_ANALOG_TXRF3_D2B5G_GET(x) (((x) & 0xe0000000) >> 29)
+#define PHY_ANALOG_TXRF3_D2B5G_SET(x) (((x) << 29) & 0xe0000000)
+
+/* macros for TXRF4 */
+#define PHY_ANALOG_TXRF4_ADDRESS 0x0000004c
+#define PHY_ANALOG_TXRF4_OFFSET 0x0000004c
+#define PHY_ANALOG_TXRF4_COMP2G_PSK_MSB 2
+#define PHY_ANALOG_TXRF4_COMP2G_PSK_LSB 0
+#define PHY_ANALOG_TXRF4_COMP2G_PSK_MASK 0x00000007
+#define PHY_ANALOG_TXRF4_COMP2G_PSK_GET(x) (((x) & 0x00000007) >> 0)
+#define PHY_ANALOG_TXRF4_COMP2G_PSK_SET(x) (((x) << 0) & 0x00000007)
+#define PHY_ANALOG_TXRF4_COMP2G_CCK_MSB 5
+#define PHY_ANALOG_TXRF4_COMP2G_CCK_LSB 3
+#define PHY_ANALOG_TXRF4_COMP2G_CCK_MASK 0x00000038
+#define PHY_ANALOG_TXRF4_COMP2G_CCK_GET(x) (((x) & 0x00000038) >> 3)
+#define PHY_ANALOG_TXRF4_COMP2G_CCK_SET(x) (((x) << 3) & 0x00000038)
+#define PHY_ANALOG_TXRF4_AMP2B2G_QAM_MSB 8
+#define PHY_ANALOG_TXRF4_AMP2B2G_QAM_LSB 6
+#define PHY_ANALOG_TXRF4_AMP2B2G_QAM_MASK 0x000001c0
+#define PHY_ANALOG_TXRF4_AMP2B2G_QAM_GET(x) (((x) & 0x000001c0) >> 6)
+#define PHY_ANALOG_TXRF4_AMP2B2G_QAM_SET(x) (((x) << 6) & 0x000001c0)
+#define PHY_ANALOG_TXRF4_AMP2B2G_PSK_MSB 11
+#define PHY_ANALOG_TXRF4_AMP2B2G_PSK_LSB 9
+#define PHY_ANALOG_TXRF4_AMP2B2G_PSK_MASK 0x00000e00
+#define PHY_ANALOG_TXRF4_AMP2B2G_PSK_GET(x) (((x) & 0x00000e00) >> 9)
+#define PHY_ANALOG_TXRF4_AMP2B2G_PSK_SET(x) (((x) << 9) & 0x00000e00)
+#define PHY_ANALOG_TXRF4_AMP2B2G_CCK_MSB 14
+#define PHY_ANALOG_TXRF4_AMP2B2G_CCK_LSB 12
+#define PHY_ANALOG_TXRF4_AMP2B2G_CCK_MASK 0x00007000
+#define PHY_ANALOG_TXRF4_AMP2B2G_CCK_GET(x) (((x) & 0x00007000) >> 12)
+#define PHY_ANALOG_TXRF4_AMP2B2G_CCK_SET(x) (((x) << 12) & 0x00007000)
+#define PHY_ANALOG_TXRF4_AMP2CAS2G_MSB 17
+#define PHY_ANALOG_TXRF4_AMP2CAS2G_LSB 15
+#define PHY_ANALOG_TXRF4_AMP2CAS2G_MASK 0x00038000
+#define PHY_ANALOG_TXRF4_AMP2CAS2G_GET(x) (((x) & 0x00038000) >> 15)
+#define PHY_ANALOG_TXRF4_AMP2CAS2G_SET(x) (((x) << 15) & 0x00038000)
+#define PHY_ANALOG_TXRF4_FILTR2G_MSB 19
+#define PHY_ANALOG_TXRF4_FILTR2G_LSB 18
+#define PHY_ANALOG_TXRF4_FILTR2G_MASK 0x000c0000
+#define PHY_ANALOG_TXRF4_FILTR2G_GET(x) (((x) & 0x000c0000) >> 18)
+#define PHY_ANALOG_TXRF4_FILTR2G_SET(x) (((x) << 18) & 0x000c0000)
+#define PHY_ANALOG_TXRF4_PWDFB2_2G_MSB 20
+#define PHY_ANALOG_TXRF4_PWDFB2_2G_LSB 20
+#define PHY_ANALOG_TXRF4_PWDFB2_2G_MASK 0x00100000
+#define PHY_ANALOG_TXRF4_PWDFB2_2G_GET(x) (((x) & 0x00100000) >> 20)
+#define PHY_ANALOG_TXRF4_PWDFB2_2G_SET(x) (((x) << 20) & 0x00100000)
+#define PHY_ANALOG_TXRF4_PWDFB1_2G_MSB 21
+#define PHY_ANALOG_TXRF4_PWDFB1_2G_LSB 21
+#define PHY_ANALOG_TXRF4_PWDFB1_2G_MASK 0x00200000
+#define PHY_ANALOG_TXRF4_PWDFB1_2G_GET(x) (((x) & 0x00200000) >> 21)
+#define PHY_ANALOG_TXRF4_PWDFB1_2G_SET(x) (((x) << 21) & 0x00200000)
+#define PHY_ANALOG_TXRF4_PDFB2G_MSB 22
+#define PHY_ANALOG_TXRF4_PDFB2G_LSB 22
+#define PHY_ANALOG_TXRF4_PDFB2G_MASK 0x00400000
+#define PHY_ANALOG_TXRF4_PDFB2G_GET(x) (((x) & 0x00400000) >> 22)
+#define PHY_ANALOG_TXRF4_PDFB2G_SET(x) (((x) << 22) & 0x00400000)
+#define PHY_ANALOG_TXRF4_RDIV5G_MSB 24
+#define PHY_ANALOG_TXRF4_RDIV5G_LSB 23
+#define PHY_ANALOG_TXRF4_RDIV5G_MASK 0x01800000
+#define PHY_ANALOG_TXRF4_RDIV5G_GET(x) (((x) & 0x01800000) >> 23)
+#define PHY_ANALOG_TXRF4_RDIV5G_SET(x) (((x) << 23) & 0x01800000)
+#define PHY_ANALOG_TXRF4_CAPDIV5G_MSB 27
+#define PHY_ANALOG_TXRF4_CAPDIV5G_LSB 25
+#define PHY_ANALOG_TXRF4_CAPDIV5G_MASK 0x0e000000
+#define PHY_ANALOG_TXRF4_CAPDIV5G_GET(x) (((x) & 0x0e000000) >> 25)
+#define PHY_ANALOG_TXRF4_CAPDIV5G_SET(x) (((x) << 25) & 0x0e000000)
+#define PHY_ANALOG_TXRF4_PDPREDIST5G_MSB 28
+#define PHY_ANALOG_TXRF4_PDPREDIST5G_LSB 28
+#define PHY_ANALOG_TXRF4_PDPREDIST5G_MASK 0x10000000
+#define PHY_ANALOG_TXRF4_PDPREDIST5G_GET(x) (((x) & 0x10000000) >> 28)
+#define PHY_ANALOG_TXRF4_PDPREDIST5G_SET(x) (((x) << 28) & 0x10000000)
+#define PHY_ANALOG_TXRF4_RDIV2G_MSB 30
+#define PHY_ANALOG_TXRF4_RDIV2G_LSB 29
+#define PHY_ANALOG_TXRF4_RDIV2G_MASK 0x60000000
+#define PHY_ANALOG_TXRF4_RDIV2G_GET(x) (((x) & 0x60000000) >> 29)
+#define PHY_ANALOG_TXRF4_RDIV2G_SET(x) (((x) << 29) & 0x60000000)
+#define PHY_ANALOG_TXRF4_PDPREDIST2G_MSB 31
+#define PHY_ANALOG_TXRF4_PDPREDIST2G_LSB 31
+#define PHY_ANALOG_TXRF4_PDPREDIST2G_MASK 0x80000000
+#define PHY_ANALOG_TXRF4_PDPREDIST2G_GET(x) (((x) & 0x80000000) >> 31)
+#define PHY_ANALOG_TXRF4_PDPREDIST2G_SET(x) (((x) << 31) & 0x80000000)
+
+/* macros for TXRF5 */
+#define PHY_ANALOG_TXRF5_ADDRESS 0x00000050
+#define PHY_ANALOG_TXRF5_OFFSET 0x00000050
+#define PHY_ANALOG_TXRF5_FBHI2G_MSB 0
+#define PHY_ANALOG_TXRF5_FBHI2G_LSB 0
+#define PHY_ANALOG_TXRF5_FBHI2G_MASK 0x00000001
+#define PHY_ANALOG_TXRF5_FBHI2G_GET(x) (((x) & 0x00000001) >> 0)
+#define PHY_ANALOG_TXRF5_FBLO2G_MSB 1
+#define PHY_ANALOG_TXRF5_FBLO2G_LSB 1
+#define PHY_ANALOG_TXRF5_FBLO2G_MASK 0x00000002
+#define PHY_ANALOG_TXRF5_FBLO2G_GET(x) (((x) & 0x00000002) >> 1)
+#define PHY_ANALOG_TXRF5_REFHI2G_MSB 4
+#define PHY_ANALOG_TXRF5_REFHI2G_LSB 2
+#define PHY_ANALOG_TXRF5_REFHI2G_MASK 0x0000001c
+#define PHY_ANALOG_TXRF5_REFHI2G_GET(x) (((x) & 0x0000001c) >> 2)
+#define PHY_ANALOG_TXRF5_REFHI2G_SET(x) (((x) << 2) & 0x0000001c)
+#define PHY_ANALOG_TXRF5_REFLO2G_MSB 7
+#define PHY_ANALOG_TXRF5_REFLO2G_LSB 5
+#define PHY_ANALOG_TXRF5_REFLO2G_MASK 0x000000e0
+#define PHY_ANALOG_TXRF5_REFLO2G_GET(x) (((x) & 0x000000e0) >> 5)
+#define PHY_ANALOG_TXRF5_REFLO2G_SET(x) (((x) << 5) & 0x000000e0)
+#define PHY_ANALOG_TXRF5_PK2B2G_QAM_MSB 9
+#define PHY_ANALOG_TXRF5_PK2B2G_QAM_LSB 8
+#define PHY_ANALOG_TXRF5_PK2B2G_QAM_MASK 0x00000300
+#define PHY_ANALOG_TXRF5_PK2B2G_QAM_GET(x) (((x) & 0x00000300) >> 8)
+#define PHY_ANALOG_TXRF5_PK2B2G_QAM_SET(x) (((x) << 8) & 0x00000300)
+#define PHY_ANALOG_TXRF5_PK2B2G_PSK_MSB 11
+#define PHY_ANALOG_TXRF5_PK2B2G_PSK_LSB 10
+#define PHY_ANALOG_TXRF5_PK2B2G_PSK_MASK 0x00000c00
+#define PHY_ANALOG_TXRF5_PK2B2G_PSK_GET(x) (((x) & 0x00000c00) >> 10)
+#define PHY_ANALOG_TXRF5_PK2B2G_PSK_SET(x) (((x) << 10) & 0x00000c00)
+#define PHY_ANALOG_TXRF5_PK2B2G_CCK_MSB 13
+#define PHY_ANALOG_TXRF5_PK2B2G_CCK_LSB 12
+#define PHY_ANALOG_TXRF5_PK2B2G_CCK_MASK 0x00003000
+#define PHY_ANALOG_TXRF5_PK2B2G_CCK_GET(x) (((x) & 0x00003000) >> 12)
+#define PHY_ANALOG_TXRF5_PK2B2G_CCK_SET(x) (((x) << 12) & 0x00003000)
+#define PHY_ANALOG_TXRF5_PK1B2G_QAM_MSB 15
+#define PHY_ANALOG_TXRF5_PK1B2G_QAM_LSB 14
+#define PHY_ANALOG_TXRF5_PK1B2G_QAM_MASK 0x0000c000
+#define PHY_ANALOG_TXRF5_PK1B2G_QAM_GET(x) (((x) & 0x0000c000) >> 14)
+#define PHY_ANALOG_TXRF5_PK1B2G_QAM_SET(x) (((x) << 14) & 0x0000c000)
+#define PHY_ANALOG_TXRF5_PK1B2G_PSK_MSB 17
+#define PHY_ANALOG_TXRF5_PK1B2G_PSK_LSB 16
+#define PHY_ANALOG_TXRF5_PK1B2G_PSK_MASK 0x00030000
+#define PHY_ANALOG_TXRF5_PK1B2G_PSK_GET(x) (((x) & 0x00030000) >> 16)
+#define PHY_ANALOG_TXRF5_PK1B2G_PSK_SET(x) (((x) << 16) & 0x00030000)
+#define PHY_ANALOG_TXRF5_PK1B2G_CCK_MSB 19
+#define PHY_ANALOG_TXRF5_PK1B2G_CCK_LSB 18
+#define PHY_ANALOG_TXRF5_PK1B2G_CCK_MASK 0x000c0000
+#define PHY_ANALOG_TXRF5_PK1B2G_CCK_GET(x) (((x) & 0x000c0000) >> 18)
+#define PHY_ANALOG_TXRF5_PK1B2G_CCK_SET(x) (((x) << 18) & 0x000c0000)
+#define PHY_ANALOG_TXRF5_MIOB2G_QAM_MSB 22
+#define PHY_ANALOG_TXRF5_MIOB2G_QAM_LSB 20
+#define PHY_ANALOG_TXRF5_MIOB2G_QAM_MASK 0x00700000
+#define PHY_ANALOG_TXRF5_MIOB2G_QAM_GET(x) (((x) & 0x00700000) >> 20)
+#define PHY_ANALOG_TXRF5_MIOB2G_QAM_SET(x) (((x) << 20) & 0x00700000)
+#define PHY_ANALOG_TXRF5_MIOB2G_PSK_MSB 25
+#define PHY_ANALOG_TXRF5_MIOB2G_PSK_LSB 23
+#define PHY_ANALOG_TXRF5_MIOB2G_PSK_MASK 0x03800000
+#define PHY_ANALOG_TXRF5_MIOB2G_PSK_GET(x) (((x) & 0x03800000) >> 23)
+#define PHY_ANALOG_TXRF5_MIOB2G_PSK_SET(x) (((x) << 23) & 0x03800000)
+#define PHY_ANALOG_TXRF5_MIOB2G_CCK_MSB 28
+#define PHY_ANALOG_TXRF5_MIOB2G_CCK_LSB 26
+#define PHY_ANALOG_TXRF5_MIOB2G_CCK_MASK 0x1c000000
+#define PHY_ANALOG_TXRF5_MIOB2G_CCK_GET(x) (((x) & 0x1c000000) >> 26)
+#define PHY_ANALOG_TXRF5_MIOB2G_CCK_SET(x) (((x) << 26) & 0x1c000000)
+#define PHY_ANALOG_TXRF5_COMP2G_QAM_MSB 31
+#define PHY_ANALOG_TXRF5_COMP2G_QAM_LSB 29
+#define PHY_ANALOG_TXRF5_COMP2G_QAM_MASK 0xe0000000
+#define PHY_ANALOG_TXRF5_COMP2G_QAM_GET(x) (((x) & 0xe0000000) >> 29)
+#define PHY_ANALOG_TXRF5_COMP2G_QAM_SET(x) (((x) << 29) & 0xe0000000)
+
+/* macros for TXRF6 */
+#define PHY_ANALOG_TXRF6_ADDRESS 0x00000054
+#define PHY_ANALOG_TXRF6_OFFSET 0x00000054
+#define PHY_ANALOG_TXRF6_SPARE6_MSB 0
+#define PHY_ANALOG_TXRF6_SPARE6_LSB 0
+#define PHY_ANALOG_TXRF6_SPARE6_MASK 0x00000001
+#define PHY_ANALOG_TXRF6_SPARE6_GET(x) (((x) & 0x00000001) >> 0)
+#define PHY_ANALOG_TXRF6_SPARE6_SET(x) (((x) << 0) & 0x00000001)
+#define PHY_ANALOG_TXRF6_PAL_LOCKED_MSB 1
+#define PHY_ANALOG_TXRF6_PAL_LOCKED_LSB 1
+#define PHY_ANALOG_TXRF6_PAL_LOCKED_MASK 0x00000002
+#define PHY_ANALOG_TXRF6_PAL_LOCKED_GET(x) (((x) & 0x00000002) >> 1)
+#define PHY_ANALOG_TXRF6_PADRVGN2G_SMOUT_MSB 7
+#define PHY_ANALOG_TXRF6_PADRVGN2G_SMOUT_LSB 2
+#define PHY_ANALOG_TXRF6_PADRVGN2G_SMOUT_MASK 0x000000fc
+#define PHY_ANALOG_TXRF6_PADRVGN2G_SMOUT_GET(x) (((x) & 0x000000fc) >> 2)
+#define PHY_ANALOG_TXRF6_GAINSTEP2G_MSB 10
+#define PHY_ANALOG_TXRF6_GAINSTEP2G_LSB 8
+#define PHY_ANALOG_TXRF6_GAINSTEP2G_MASK 0x00000700
+#define PHY_ANALOG_TXRF6_GAINSTEP2G_GET(x) (((x) & 0x00000700) >> 8)
+#define PHY_ANALOG_TXRF6_GAINSTEP2G_SET(x) (((x) << 8) & 0x00000700)
+#define PHY_ANALOG_TXRF6_USE_GAIN_DELTA2G_MSB 11
+#define PHY_ANALOG_TXRF6_USE_GAIN_DELTA2G_LSB 11
+#define PHY_ANALOG_TXRF6_USE_GAIN_DELTA2G_MASK 0x00000800
+#define PHY_ANALOG_TXRF6_USE_GAIN_DELTA2G_GET(x) (((x) & 0x00000800) >> 11)
+#define PHY_ANALOG_TXRF6_USE_GAIN_DELTA2G_SET(x) (((x) << 11) & 0x00000800)
+#define PHY_ANALOG_TXRF6_PADRVGN_INDEX_I2G_MSB 15
+#define PHY_ANALOG_TXRF6_PADRVGN_INDEX_I2G_LSB 12
+#define PHY_ANALOG_TXRF6_PADRVGN_INDEX_I2G_MASK 0x0000f000
+#define PHY_ANALOG_TXRF6_PADRVGN_INDEX_I2G_GET(x) (((x) & 0x0000f000) >> 12)
+#define PHY_ANALOG_TXRF6_PADRVGN_INDEX_I2G_SET(x) (((x) << 12) & 0x0000f000)
+#define PHY_ANALOG_TXRF6_VCMONDELAY2G_MSB 18
+#define PHY_ANALOG_TXRF6_VCMONDELAY2G_LSB 16
+#define PHY_ANALOG_TXRF6_VCMONDELAY2G_MASK 0x00070000
+#define PHY_ANALOG_TXRF6_VCMONDELAY2G_GET(x) (((x) & 0x00070000) >> 16)
+#define PHY_ANALOG_TXRF6_VCMONDELAY2G_SET(x) (((x) << 16) & 0x00070000)
+#define PHY_ANALOG_TXRF6_CAPDIV2G_MSB 21
+#define PHY_ANALOG_TXRF6_CAPDIV2G_LSB 19
+#define PHY_ANALOG_TXRF6_CAPDIV2G_MASK 0x00380000
+#define PHY_ANALOG_TXRF6_CAPDIV2G_GET(x) (((x) & 0x00380000) >> 19)
+#define PHY_ANALOG_TXRF6_CAPDIV2G_SET(x) (((x) << 19) & 0x00380000)
+#define PHY_ANALOG_TXRF6_CAPDIV2GOVR_MSB 22
+#define PHY_ANALOG_TXRF6_CAPDIV2GOVR_LSB 22
+#define PHY_ANALOG_TXRF6_CAPDIV2GOVR_MASK 0x00400000
+#define PHY_ANALOG_TXRF6_CAPDIV2GOVR_GET(x) (((x) & 0x00400000) >> 22)
+#define PHY_ANALOG_TXRF6_CAPDIV2GOVR_SET(x) (((x) << 22) & 0x00400000)
+#define PHY_ANALOG_TXRF6_ENPACAL2G_MSB 23
+#define PHY_ANALOG_TXRF6_ENPACAL2G_LSB 23
+#define PHY_ANALOG_TXRF6_ENPACAL2G_MASK 0x00800000
+#define PHY_ANALOG_TXRF6_ENPACAL2G_GET(x) (((x) & 0x00800000) >> 23)
+#define PHY_ANALOG_TXRF6_ENPACAL2G_SET(x) (((x) << 23) & 0x00800000)
+#define PHY_ANALOG_TXRF6_OFFSET2G_MSB 30
+#define PHY_ANALOG_TXRF6_OFFSET2G_LSB 24
+#define PHY_ANALOG_TXRF6_OFFSET2G_MASK 0x7f000000
+#define PHY_ANALOG_TXRF6_OFFSET2G_GET(x) (((x) & 0x7f000000) >> 24)
+#define PHY_ANALOG_TXRF6_OFFSET2G_SET(x) (((x) << 24) & 0x7f000000)
+#define PHY_ANALOG_TXRF6_ENOFFSETCAL2G_MSB 31
+#define PHY_ANALOG_TXRF6_ENOFFSETCAL2G_LSB 31
+#define PHY_ANALOG_TXRF6_ENOFFSETCAL2G_MASK 0x80000000
+#define PHY_ANALOG_TXRF6_ENOFFSETCAL2G_GET(x) (((x) & 0x80000000) >> 31)
+#define PHY_ANALOG_TXRF6_ENOFFSETCAL2G_SET(x) (((x) << 31) & 0x80000000)
+
+/* macros for TXRF7 */
+#define PHY_ANALOG_TXRF7_ADDRESS 0x00000058
+#define PHY_ANALOG_TXRF7_OFFSET 0x00000058
+#define PHY_ANALOG_TXRF7_SPARE7_MSB 1
+#define PHY_ANALOG_TXRF7_SPARE7_LSB 0
+#define PHY_ANALOG_TXRF7_SPARE7_MASK 0x00000003
+#define PHY_ANALOG_TXRF7_SPARE7_GET(x) (((x) & 0x00000003) >> 0)
+#define PHY_ANALOG_TXRF7_SPARE7_SET(x) (((x) << 0) & 0x00000003)
+#define PHY_ANALOG_TXRF7_PADRVGNTAB_4_MSB 7
+#define PHY_ANALOG_TXRF7_PADRVGNTAB_4_LSB 2
+#define PHY_ANALOG_TXRF7_PADRVGNTAB_4_MASK 0x000000fc
+#define PHY_ANALOG_TXRF7_PADRVGNTAB_4_GET(x) (((x) & 0x000000fc) >> 2)
+#define PHY_ANALOG_TXRF7_PADRVGNTAB_4_SET(x) (((x) << 2) & 0x000000fc)
+#define PHY_ANALOG_TXRF7_PADRVGNTAB_3_MSB 13
+#define PHY_ANALOG_TXRF7_PADRVGNTAB_3_LSB 8
+#define PHY_ANALOG_TXRF7_PADRVGNTAB_3_MASK 0x00003f00
+#define PHY_ANALOG_TXRF7_PADRVGNTAB_3_GET(x) (((x) & 0x00003f00) >> 8)
+#define PHY_ANALOG_TXRF7_PADRVGNTAB_3_SET(x) (((x) << 8) & 0x00003f00)
+#define PHY_ANALOG_TXRF7_PADRVGNTAB_2_MSB 19
+#define PHY_ANALOG_TXRF7_PADRVGNTAB_2_LSB 14
+#define PHY_ANALOG_TXRF7_PADRVGNTAB_2_MASK 0x000fc000
+#define PHY_ANALOG_TXRF7_PADRVGNTAB_2_GET(x) (((x) & 0x000fc000) >> 14)
+#define PHY_ANALOG_TXRF7_PADRVGNTAB_2_SET(x) (((x) << 14) & 0x000fc000)
+#define PHY_ANALOG_TXRF7_PADRVGNTAB_1_MSB 25
+#define PHY_ANALOG_TXRF7_PADRVGNTAB_1_LSB 20
+#define PHY_ANALOG_TXRF7_PADRVGNTAB_1_MASK 0x03f00000
+#define PHY_ANALOG_TXRF7_PADRVGNTAB_1_GET(x) (((x) & 0x03f00000) >> 20)
+#define PHY_ANALOG_TXRF7_PADRVGNTAB_1_SET(x) (((x) << 20) & 0x03f00000)
+#define PHY_ANALOG_TXRF7_PADRVGNTAB_0_MSB 31
+#define PHY_ANALOG_TXRF7_PADRVGNTAB_0_LSB 26
+#define PHY_ANALOG_TXRF7_PADRVGNTAB_0_MASK 0xfc000000
+#define PHY_ANALOG_TXRF7_PADRVGNTAB_0_GET(x) (((x) & 0xfc000000) >> 26)
+#define PHY_ANALOG_TXRF7_PADRVGNTAB_0_SET(x) (((x) << 26) & 0xfc000000)
+
+/* macros for TXRF8 */
+#define PHY_ANALOG_TXRF8_ADDRESS 0x0000005c
+#define PHY_ANALOG_TXRF8_OFFSET 0x0000005c
+#define PHY_ANALOG_TXRF8_SPARE8_MSB 1
+#define PHY_ANALOG_TXRF8_SPARE8_LSB 0
+#define PHY_ANALOG_TXRF8_SPARE8_MASK 0x00000003
+#define PHY_ANALOG_TXRF8_SPARE8_GET(x) (((x) & 0x00000003) >> 0)
+#define PHY_ANALOG_TXRF8_SPARE8_SET(x) (((x) << 0) & 0x00000003)
+#define PHY_ANALOG_TXRF8_PADRVGNTAB_9_MSB 7
+#define PHY_ANALOG_TXRF8_PADRVGNTAB_9_LSB 2
+#define PHY_ANALOG_TXRF8_PADRVGNTAB_9_MASK 0x000000fc
+#define PHY_ANALOG_TXRF8_PADRVGNTAB_9_GET(x) (((x) & 0x000000fc) >> 2)
+#define PHY_ANALOG_TXRF8_PADRVGNTAB_9_SET(x) (((x) << 2) & 0x000000fc)
+#define PHY_ANALOG_TXRF8_PADRVGNTAB_8_MSB 13
+#define PHY_ANALOG_TXRF8_PADRVGNTAB_8_LSB 8
+#define PHY_ANALOG_TXRF8_PADRVGNTAB_8_MASK 0x00003f00
+#define PHY_ANALOG_TXRF8_PADRVGNTAB_8_GET(x) (((x) & 0x00003f00) >> 8)
+#define PHY_ANALOG_TXRF8_PADRVGNTAB_8_SET(x) (((x) << 8) & 0x00003f00)
+#define PHY_ANALOG_TXRF8_PADRVGNTAB_7_MSB 19
+#define PHY_ANALOG_TXRF8_PADRVGNTAB_7_LSB 14
+#define PHY_ANALOG_TXRF8_PADRVGNTAB_7_MASK 0x000fc000
+#define PHY_ANALOG_TXRF8_PADRVGNTAB_7_GET(x) (((x) & 0x000fc000) >> 14)
+#define PHY_ANALOG_TXRF8_PADRVGNTAB_7_SET(x) (((x) << 14) & 0x000fc000)
+#define PHY_ANALOG_TXRF8_PADRVGNTAB_6_MSB 25
+#define PHY_ANALOG_TXRF8_PADRVGNTAB_6_LSB 20
+#define PHY_ANALOG_TXRF8_PADRVGNTAB_6_MASK 0x03f00000
+#define PHY_ANALOG_TXRF8_PADRVGNTAB_6_GET(x) (((x) & 0x03f00000) >> 20)
+#define PHY_ANALOG_TXRF8_PADRVGNTAB_6_SET(x) (((x) << 20) & 0x03f00000)
+#define PHY_ANALOG_TXRF8_PADRVGNTAB_5_MSB 31
+#define PHY_ANALOG_TXRF8_PADRVGNTAB_5_LSB 26
+#define PHY_ANALOG_TXRF8_PADRVGNTAB_5_MASK 0xfc000000
+#define PHY_ANALOG_TXRF8_PADRVGNTAB_5_GET(x) (((x) & 0xfc000000) >> 26)
+#define PHY_ANALOG_TXRF8_PADRVGNTAB_5_SET(x) (((x) << 26) & 0xfc000000)
+
+/* macros for TXRF9 */
+#define PHY_ANALOG_TXRF9_ADDRESS 0x00000060
+#define PHY_ANALOG_TXRF9_OFFSET 0x00000060
+#define PHY_ANALOG_TXRF9_SPARE9_MSB 1
+#define PHY_ANALOG_TXRF9_SPARE9_LSB 0
+#define PHY_ANALOG_TXRF9_SPARE9_MASK 0x00000003
+#define PHY_ANALOG_TXRF9_SPARE9_GET(x) (((x) & 0x00000003) >> 0)
+#define PHY_ANALOG_TXRF9_SPARE9_SET(x) (((x) << 0) & 0x00000003)
+#define PHY_ANALOG_TXRF9_PADRVGNTAB_14_MSB 7
+#define PHY_ANALOG_TXRF9_PADRVGNTAB_14_LSB 2
+#define PHY_ANALOG_TXRF9_PADRVGNTAB_14_MASK 0x000000fc
+#define PHY_ANALOG_TXRF9_PADRVGNTAB_14_GET(x) (((x) & 0x000000fc) >> 2)
+#define PHY_ANALOG_TXRF9_PADRVGNTAB_14_SET(x) (((x) << 2) & 0x000000fc)
+#define PHY_ANALOG_TXRF9_PADRVGNTAB_13_MSB 13
+#define PHY_ANALOG_TXRF9_PADRVGNTAB_13_LSB 8
+#define PHY_ANALOG_TXRF9_PADRVGNTAB_13_MASK 0x00003f00
+#define PHY_ANALOG_TXRF9_PADRVGNTAB_13_GET(x) (((x) & 0x00003f00) >> 8)
+#define PHY_ANALOG_TXRF9_PADRVGNTAB_13_SET(x) (((x) << 8) & 0x00003f00)
+#define PHY_ANALOG_TXRF9_PADRVGNTAB_12_MSB 19
+#define PHY_ANALOG_TXRF9_PADRVGNTAB_12_LSB 14
+#define PHY_ANALOG_TXRF9_PADRVGNTAB_12_MASK 0x000fc000
+#define PHY_ANALOG_TXRF9_PADRVGNTAB_12_GET(x) (((x) & 0x000fc000) >> 14)
+#define PHY_ANALOG_TXRF9_PADRVGNTAB_12_SET(x) (((x) << 14) & 0x000fc000)
+#define PHY_ANALOG_TXRF9_PADRVGNTAB_11_MSB 25
+#define PHY_ANALOG_TXRF9_PADRVGNTAB_11_LSB 20
+#define PHY_ANALOG_TXRF9_PADRVGNTAB_11_MASK 0x03f00000
+#define PHY_ANALOG_TXRF9_PADRVGNTAB_11_GET(x) (((x) & 0x03f00000) >> 20)
+#define PHY_ANALOG_TXRF9_PADRVGNTAB_11_SET(x) (((x) << 20) & 0x03f00000)
+#define PHY_ANALOG_TXRF9_PADRVGNTAB_10_MSB 31
+#define PHY_ANALOG_TXRF9_PADRVGNTAB_10_LSB 26
+#define PHY_ANALOG_TXRF9_PADRVGNTAB_10_MASK 0xfc000000
+#define PHY_ANALOG_TXRF9_PADRVGNTAB_10_GET(x) (((x) & 0xfc000000) >> 26)
+#define PHY_ANALOG_TXRF9_PADRVGNTAB_10_SET(x) (((x) << 26) & 0xfc000000)
+
+/* macros for TXRF10 */
+#define PHY_ANALOG_TXRF10_ADDRESS 0x00000064
+#define PHY_ANALOG_TXRF10_OFFSET 0x00000064
+#define PHY_ANALOG_TXRF10_SPARE10_MSB 12
+#define PHY_ANALOG_TXRF10_SPARE10_LSB 0
+#define PHY_ANALOG_TXRF10_SPARE10_MASK 0x00001fff
+#define PHY_ANALOG_TXRF10_SPARE10_GET(x) (((x) & 0x00001fff) >> 0)
+#define PHY_ANALOG_TXRF10_SPARE10_SET(x) (((x) << 0) & 0x00001fff)
+#define PHY_ANALOG_TXRF10_PDOUT5G_3CALTX_MSB 13
+#define PHY_ANALOG_TXRF10_PDOUT5G_3CALTX_LSB 13
+#define PHY_ANALOG_TXRF10_PDOUT5G_3CALTX_MASK 0x00002000
+#define PHY_ANALOG_TXRF10_PDOUT5G_3CALTX_GET(x) (((x) & 0x00002000) >> 13)
+#define PHY_ANALOG_TXRF10_PDOUT5G_3CALTX_SET(x) (((x) << 13) & 0x00002000)
+#define PHY_ANALOG_TXRF10_D3B5GCALTX_MSB 16
+#define PHY_ANALOG_TXRF10_D3B5GCALTX_LSB 14
+#define PHY_ANALOG_TXRF10_D3B5GCALTX_MASK 0x0001c000
+#define PHY_ANALOG_TXRF10_D3B5GCALTX_GET(x) (((x) & 0x0001c000) >> 14)
+#define PHY_ANALOG_TXRF10_D3B5GCALTX_SET(x) (((x) << 14) & 0x0001c000)
+#define PHY_ANALOG_TXRF10_D4B5GCALTX_MSB 19
+#define PHY_ANALOG_TXRF10_D4B5GCALTX_LSB 17
+#define PHY_ANALOG_TXRF10_D4B5GCALTX_MASK 0x000e0000
+#define PHY_ANALOG_TXRF10_D4B5GCALTX_GET(x) (((x) & 0x000e0000) >> 17)
+#define PHY_ANALOG_TXRF10_D4B5GCALTX_SET(x) (((x) << 17) & 0x000e0000)
+#define PHY_ANALOG_TXRF10_PADRVGN2GCALTX_MSB 26
+#define PHY_ANALOG_TXRF10_PADRVGN2GCALTX_LSB 20
+#define PHY_ANALOG_TXRF10_PADRVGN2GCALTX_MASK 0x07f00000
+#define PHY_ANALOG_TXRF10_PADRVGN2GCALTX_GET(x) (((x) & 0x07f00000) >> 20)
+#define PHY_ANALOG_TXRF10_PADRVGN2GCALTX_SET(x) (((x) << 20) & 0x07f00000)
+#define PHY_ANALOG_TXRF10_DB2GCALTX_MSB 29
+#define PHY_ANALOG_TXRF10_DB2GCALTX_LSB 27
+#define PHY_ANALOG_TXRF10_DB2GCALTX_MASK 0x38000000
+#define PHY_ANALOG_TXRF10_DB2GCALTX_GET(x) (((x) & 0x38000000) >> 27)
+#define PHY_ANALOG_TXRF10_DB2GCALTX_SET(x) (((x) << 27) & 0x38000000)
+#define PHY_ANALOG_TXRF10_CALTXSHIFT_MSB 30
+#define PHY_ANALOG_TXRF10_CALTXSHIFT_LSB 30
+#define PHY_ANALOG_TXRF10_CALTXSHIFT_MASK 0x40000000
+#define PHY_ANALOG_TXRF10_CALTXSHIFT_GET(x) (((x) & 0x40000000) >> 30)
+#define PHY_ANALOG_TXRF10_CALTXSHIFT_SET(x) (((x) << 30) & 0x40000000)
+#define PHY_ANALOG_TXRF10_CALTXSHIFTOVR_MSB 31
+#define PHY_ANALOG_TXRF10_CALTXSHIFTOVR_LSB 31
+#define PHY_ANALOG_TXRF10_CALTXSHIFTOVR_MASK 0x80000000
+#define PHY_ANALOG_TXRF10_CALTXSHIFTOVR_GET(x) (((x) & 0x80000000) >> 31)
+#define PHY_ANALOG_TXRF10_CALTXSHIFTOVR_SET(x) (((x) << 31) & 0x80000000)
+
+/* macros for TXRF11 */
+#define PHY_ANALOG_TXRF11_ADDRESS 0x00000068
+#define PHY_ANALOG_TXRF11_OFFSET 0x00000068
+#define PHY_ANALOG_TXRF11_SPARE11_MSB 1
+#define PHY_ANALOG_TXRF11_SPARE11_LSB 0
+#define PHY_ANALOG_TXRF11_SPARE11_MASK 0x00000003
+#define PHY_ANALOG_TXRF11_SPARE11_GET(x) (((x) & 0x00000003) >> 0)
+#define PHY_ANALOG_TXRF11_SPARE11_SET(x) (((x) << 0) & 0x00000003)
+#define PHY_ANALOG_TXRF11_PWD_IR25MIXBIAS5G_MSB 4
+#define PHY_ANALOG_TXRF11_PWD_IR25MIXBIAS5G_LSB 2
+#define PHY_ANALOG_TXRF11_PWD_IR25MIXBIAS5G_MASK 0x0000001c
+#define PHY_ANALOG_TXRF11_PWD_IR25MIXBIAS5G_GET(x) (((x) & 0x0000001c) >> 2)
+#define PHY_ANALOG_TXRF11_PWD_IR25MIXBIAS5G_SET(x) (((x) << 2) & 0x0000001c)
+#define PHY_ANALOG_TXRF11_PWD_IR25MIXDIV5G_MSB 7
+#define PHY_ANALOG_TXRF11_PWD_IR25MIXDIV5G_LSB 5
+#define PHY_ANALOG_TXRF11_PWD_IR25MIXDIV5G_MASK 0x000000e0
+#define PHY_ANALOG_TXRF11_PWD_IR25MIXDIV5G_GET(x) (((x) & 0x000000e0) >> 5)
+#define PHY_ANALOG_TXRF11_PWD_IR25MIXDIV5G_SET(x) (((x) << 5) & 0x000000e0)
+#define PHY_ANALOG_TXRF11_PWD_IR25PA2G_MSB 10
+#define PHY_ANALOG_TXRF11_PWD_IR25PA2G_LSB 8
+#define PHY_ANALOG_TXRF11_PWD_IR25PA2G_MASK 0x00000700
+#define PHY_ANALOG_TXRF11_PWD_IR25PA2G_GET(x) (((x) & 0x00000700) >> 8)
+#define PHY_ANALOG_TXRF11_PWD_IR25PA2G_SET(x) (((x) << 8) & 0x00000700)
+#define PHY_ANALOG_TXRF11_PWD_IR25MIXBIAS2G_MSB 13
+#define PHY_ANALOG_TXRF11_PWD_IR25MIXBIAS2G_LSB 11
+#define PHY_ANALOG_TXRF11_PWD_IR25MIXBIAS2G_MASK 0x00003800
+#define PHY_ANALOG_TXRF11_PWD_IR25MIXBIAS2G_GET(x) (((x) & 0x00003800) >> 11)
+#define PHY_ANALOG_TXRF11_PWD_IR25MIXBIAS2G_SET(x) (((x) << 11) & 0x00003800)
+#define PHY_ANALOG_TXRF11_PWD_IR25MIXDIV2G_MSB 16
+#define PHY_ANALOG_TXRF11_PWD_IR25MIXDIV2G_LSB 14
+#define PHY_ANALOG_TXRF11_PWD_IR25MIXDIV2G_MASK 0x0001c000
+#define PHY_ANALOG_TXRF11_PWD_IR25MIXDIV2G_GET(x) (((x) & 0x0001c000) >> 14)
+#define PHY_ANALOG_TXRF11_PWD_IR25MIXDIV2G_SET(x) (((x) << 14) & 0x0001c000)
+#define PHY_ANALOG_TXRF11_PWD_ICSPARE_MSB 19
+#define PHY_ANALOG_TXRF11_PWD_ICSPARE_LSB 17
+#define PHY_ANALOG_TXRF11_PWD_ICSPARE_MASK 0x000e0000
+#define PHY_ANALOG_TXRF11_PWD_ICSPARE_GET(x) (((x) & 0x000e0000) >> 17)
+#define PHY_ANALOG_TXRF11_PWD_ICSPARE_SET(x) (((x) << 17) & 0x000e0000)
+#define PHY_ANALOG_TXRF11_PWD_IC25PA5G2_MSB 22
+#define PHY_ANALOG_TXRF11_PWD_IC25PA5G2_LSB 20
+#define PHY_ANALOG_TXRF11_PWD_IC25PA5G2_MASK 0x00700000
+#define PHY_ANALOG_TXRF11_PWD_IC25PA5G2_GET(x) (((x) & 0x00700000) >> 20)
+#define PHY_ANALOG_TXRF11_PWD_IC25PA5G2_SET(x) (((x) << 20) & 0x00700000)
+#define PHY_ANALOG_TXRF11_PWD_IC25PA5G1_MSB 25
+#define PHY_ANALOG_TXRF11_PWD_IC25PA5G1_LSB 23
+#define PHY_ANALOG_TXRF11_PWD_IC25PA5G1_MASK 0x03800000
+#define PHY_ANALOG_TXRF11_PWD_IC25PA5G1_GET(x) (((x) & 0x03800000) >> 23)
+#define PHY_ANALOG_TXRF11_PWD_IC25PA5G1_SET(x) (((x) << 23) & 0x03800000)
+#define PHY_ANALOG_TXRF11_PWD_IC25MIXBUF5G_MSB 28
+#define PHY_ANALOG_TXRF11_PWD_IC25MIXBUF5G_LSB 26
+#define PHY_ANALOG_TXRF11_PWD_IC25MIXBUF5G_MASK 0x1c000000
+#define PHY_ANALOG_TXRF11_PWD_IC25MIXBUF5G_GET(x) (((x) & 0x1c000000) >> 26)
+#define PHY_ANALOG_TXRF11_PWD_IC25MIXBUF5G_SET(x) (((x) << 26) & 0x1c000000)
+#define PHY_ANALOG_TXRF11_PWD_IC25PA2G_MSB 31
+#define PHY_ANALOG_TXRF11_PWD_IC25PA2G_LSB 29
+#define PHY_ANALOG_TXRF11_PWD_IC25PA2G_MASK 0xe0000000
+#define PHY_ANALOG_TXRF11_PWD_IC25PA2G_GET(x) (((x) & 0xe0000000) >> 29)
+#define PHY_ANALOG_TXRF11_PWD_IC25PA2G_SET(x) (((x) << 29) & 0xe0000000)
+
+/* macros for TXRF12 */
+#define PHY_ANALOG_TXRF12_ADDRESS 0x0000006c
+#define PHY_ANALOG_TXRF12_OFFSET 0x0000006c
+#define PHY_ANALOG_TXRF12_SPARE12_2_MSB 7
+#define PHY_ANALOG_TXRF12_SPARE12_2_LSB 0
+#define PHY_ANALOG_TXRF12_SPARE12_2_MASK 0x000000ff
+#define PHY_ANALOG_TXRF12_SPARE12_2_GET(x) (((x) & 0x000000ff) >> 0)
+#define PHY_ANALOG_TXRF12_SPARE12_1_MSB 15
+#define PHY_ANALOG_TXRF12_SPARE12_1_LSB 8
+#define PHY_ANALOG_TXRF12_SPARE12_1_MASK 0x0000ff00
+#define PHY_ANALOG_TXRF12_SPARE12_1_GET(x) (((x) & 0x0000ff00) >> 8)
+#define PHY_ANALOG_TXRF12_SPARE12_1_SET(x) (((x) << 8) & 0x0000ff00)
+#define PHY_ANALOG_TXRF12_ATBSEL5G_MSB 19
+#define PHY_ANALOG_TXRF12_ATBSEL5G_LSB 16
+#define PHY_ANALOG_TXRF12_ATBSEL5G_MASK 0x000f0000
+#define PHY_ANALOG_TXRF12_ATBSEL5G_GET(x) (((x) & 0x000f0000) >> 16)
+#define PHY_ANALOG_TXRF12_ATBSEL5G_SET(x) (((x) << 16) & 0x000f0000)
+#define PHY_ANALOG_TXRF12_ATBSEL2G_MSB 22
+#define PHY_ANALOG_TXRF12_ATBSEL2G_LSB 20
+#define PHY_ANALOG_TXRF12_ATBSEL2G_MASK 0x00700000
+#define PHY_ANALOG_TXRF12_ATBSEL2G_GET(x) (((x) & 0x00700000) >> 20)
+#define PHY_ANALOG_TXRF12_ATBSEL2G_SET(x) (((x) << 20) & 0x00700000)
+#define PHY_ANALOG_TXRF12_PWD_IRSPARE_MSB 25
+#define PHY_ANALOG_TXRF12_PWD_IRSPARE_LSB 23
+#define PHY_ANALOG_TXRF12_PWD_IRSPARE_MASK 0x03800000
+#define PHY_ANALOG_TXRF12_PWD_IRSPARE_GET(x) (((x) & 0x03800000) >> 23)
+#define PHY_ANALOG_TXRF12_PWD_IRSPARE_SET(x) (((x) << 23) & 0x03800000)
+#define PHY_ANALOG_TXRF12_PWD_IR25PA5G2_MSB 28
+#define PHY_ANALOG_TXRF12_PWD_IR25PA5G2_LSB 26
+#define PHY_ANALOG_TXRF12_PWD_IR25PA5G2_MASK 0x1c000000
+#define PHY_ANALOG_TXRF12_PWD_IR25PA5G2_GET(x) (((x) & 0x1c000000) >> 26)
+#define PHY_ANALOG_TXRF12_PWD_IR25PA5G2_SET(x) (((x) << 26) & 0x1c000000)
+#define PHY_ANALOG_TXRF12_PWD_IR25PA5G1_MSB 31
+#define PHY_ANALOG_TXRF12_PWD_IR25PA5G1_LSB 29
+#define PHY_ANALOG_TXRF12_PWD_IR25PA5G1_MASK 0xe0000000
+#define PHY_ANALOG_TXRF12_PWD_IR25PA5G1_GET(x) (((x) & 0xe0000000) >> 29)
+#define PHY_ANALOG_TXRF12_PWD_IR25PA5G1_SET(x) (((x) << 29) & 0xe0000000)
+
+/* macros for SYNTH1 */
+#define PHY_ANALOG_SYNTH1_ADDRESS 0x00000080
+#define PHY_ANALOG_SYNTH1_OFFSET 0x00000080
+#define PHY_ANALOG_SYNTH1_SEL_VCMONABUS_MSB 2
+#define PHY_ANALOG_SYNTH1_SEL_VCMONABUS_LSB 0
+#define PHY_ANALOG_SYNTH1_SEL_VCMONABUS_MASK 0x00000007
+#define PHY_ANALOG_SYNTH1_SEL_VCMONABUS_GET(x) (((x) & 0x00000007) >> 0)
+#define PHY_ANALOG_SYNTH1_SEL_VCMONABUS_SET(x) (((x) << 0) & 0x00000007)
+#define PHY_ANALOG_SYNTH1_SEL_VCOABUS_MSB 5
+#define PHY_ANALOG_SYNTH1_SEL_VCOABUS_LSB 3
+#define PHY_ANALOG_SYNTH1_SEL_VCOABUS_MASK 0x00000038
+#define PHY_ANALOG_SYNTH1_SEL_VCOABUS_GET(x) (((x) & 0x00000038) >> 3)
+#define PHY_ANALOG_SYNTH1_SEL_VCOABUS_SET(x) (((x) << 3) & 0x00000038)
+#define PHY_ANALOG_SYNTH1_MONITOR_SYNTHLOCKVCOK_MSB 6
+#define PHY_ANALOG_SYNTH1_MONITOR_SYNTHLOCKVCOK_LSB 6
+#define PHY_ANALOG_SYNTH1_MONITOR_SYNTHLOCKVCOK_MASK 0x00000040
+#define PHY_ANALOG_SYNTH1_MONITOR_SYNTHLOCKVCOK_GET(x) (((x) & 0x00000040) >> 6)
+#define PHY_ANALOG_SYNTH1_MONITOR_SYNTHLOCKVCOK_SET(x) (((x) << 6) & 0x00000040)
+#define PHY_ANALOG_SYNTH1_MONITOR_VC2LOW_MSB 7
+#define PHY_ANALOG_SYNTH1_MONITOR_VC2LOW_LSB 7
+#define PHY_ANALOG_SYNTH1_MONITOR_VC2LOW_MASK 0x00000080
+#define PHY_ANALOG_SYNTH1_MONITOR_VC2LOW_GET(x) (((x) & 0x00000080) >> 7)
+#define PHY_ANALOG_SYNTH1_MONITOR_VC2LOW_SET(x) (((x) << 7) & 0x00000080)
+#define PHY_ANALOG_SYNTH1_MONITOR_VC2HIGH_MSB 8
+#define PHY_ANALOG_SYNTH1_MONITOR_VC2HIGH_LSB 8
+#define PHY_ANALOG_SYNTH1_MONITOR_VC2HIGH_MASK 0x00000100
+#define PHY_ANALOG_SYNTH1_MONITOR_VC2HIGH_GET(x) (((x) & 0x00000100) >> 8)
+#define PHY_ANALOG_SYNTH1_MONITOR_VC2HIGH_SET(x) (((x) << 8) & 0x00000100)
+#define PHY_ANALOG_SYNTH1_MONITOR_FB_DIV2_MSB 9
+#define PHY_ANALOG_SYNTH1_MONITOR_FB_DIV2_LSB 9
+#define PHY_ANALOG_SYNTH1_MONITOR_FB_DIV2_MASK 0x00000200
+#define PHY_ANALOG_SYNTH1_MONITOR_FB_DIV2_GET(x) (((x) & 0x00000200) >> 9)
+#define PHY_ANALOG_SYNTH1_MONITOR_FB_DIV2_SET(x) (((x) << 9) & 0x00000200)
+#define PHY_ANALOG_SYNTH1_MONITOR_REF_MSB 10
+#define PHY_ANALOG_SYNTH1_MONITOR_REF_LSB 10
+#define PHY_ANALOG_SYNTH1_MONITOR_REF_MASK 0x00000400
+#define PHY_ANALOG_SYNTH1_MONITOR_REF_GET(x) (((x) & 0x00000400) >> 10)
+#define PHY_ANALOG_SYNTH1_MONITOR_REF_SET(x) (((x) << 10) & 0x00000400)
+#define PHY_ANALOG_SYNTH1_MONITOR_FB_MSB 11
+#define PHY_ANALOG_SYNTH1_MONITOR_FB_LSB 11
+#define PHY_ANALOG_SYNTH1_MONITOR_FB_MASK 0x00000800
+#define PHY_ANALOG_SYNTH1_MONITOR_FB_GET(x) (((x) & 0x00000800) >> 11)
+#define PHY_ANALOG_SYNTH1_MONITOR_FB_SET(x) (((x) << 11) & 0x00000800)
+#define PHY_ANALOG_SYNTH1_SEVENBITVCOCAP_MSB 12
+#define PHY_ANALOG_SYNTH1_SEVENBITVCOCAP_LSB 12
+#define PHY_ANALOG_SYNTH1_SEVENBITVCOCAP_MASK 0x00001000
+#define PHY_ANALOG_SYNTH1_SEVENBITVCOCAP_GET(x) (((x) & 0x00001000) >> 12)
+#define PHY_ANALOG_SYNTH1_SEVENBITVCOCAP_SET(x) (((x) << 12) & 0x00001000)
+#define PHY_ANALOG_SYNTH1_PWUP_PD_MSB 15
+#define PHY_ANALOG_SYNTH1_PWUP_PD_LSB 13
+#define PHY_ANALOG_SYNTH1_PWUP_PD_MASK 0x0000e000
+#define PHY_ANALOG_SYNTH1_PWUP_PD_GET(x) (((x) & 0x0000e000) >> 13)
+#define PHY_ANALOG_SYNTH1_PWUP_PD_SET(x) (((x) << 13) & 0x0000e000)
+#define PHY_ANALOG_SYNTH1_PWD_VCOBUF_MSB 16
+#define PHY_ANALOG_SYNTH1_PWD_VCOBUF_LSB 16
+#define PHY_ANALOG_SYNTH1_PWD_VCOBUF_MASK 0x00010000
+#define PHY_ANALOG_SYNTH1_PWD_VCOBUF_GET(x) (((x) & 0x00010000) >> 16)
+#define PHY_ANALOG_SYNTH1_PWD_VCOBUF_SET(x) (((x) << 16) & 0x00010000)
+#define PHY_ANALOG_SYNTH1_VCOBUFGAIN_MSB 18
+#define PHY_ANALOG_SYNTH1_VCOBUFGAIN_LSB 17
+#define PHY_ANALOG_SYNTH1_VCOBUFGAIN_MASK 0x00060000
+#define PHY_ANALOG_SYNTH1_VCOBUFGAIN_GET(x) (((x) & 0x00060000) >> 17)
+#define PHY_ANALOG_SYNTH1_VCOBUFGAIN_SET(x) (((x) << 17) & 0x00060000)
+#define PHY_ANALOG_SYNTH1_VCOREGLEVEL_MSB 20
+#define PHY_ANALOG_SYNTH1_VCOREGLEVEL_LSB 19
+#define PHY_ANALOG_SYNTH1_VCOREGLEVEL_MASK 0x00180000
+#define PHY_ANALOG_SYNTH1_VCOREGLEVEL_GET(x) (((x) & 0x00180000) >> 19)
+#define PHY_ANALOG_SYNTH1_VCOREGLEVEL_SET(x) (((x) << 19) & 0x00180000)
+#define PHY_ANALOG_SYNTH1_VCOREGBYPASS_MSB 21
+#define PHY_ANALOG_SYNTH1_VCOREGBYPASS_LSB 21
+#define PHY_ANALOG_SYNTH1_VCOREGBYPASS_MASK 0x00200000
+#define PHY_ANALOG_SYNTH1_VCOREGBYPASS_GET(x) (((x) & 0x00200000) >> 21)
+#define PHY_ANALOG_SYNTH1_VCOREGBYPASS_SET(x) (((x) << 21) & 0x00200000)
+#define PHY_ANALOG_SYNTH1_PWUP_LOREF_MSB 22
+#define PHY_ANALOG_SYNTH1_PWUP_LOREF_LSB 22
+#define PHY_ANALOG_SYNTH1_PWUP_LOREF_MASK 0x00400000
+#define PHY_ANALOG_SYNTH1_PWUP_LOREF_GET(x) (((x) & 0x00400000) >> 22)
+#define PHY_ANALOG_SYNTH1_PWUP_LOREF_SET(x) (((x) << 22) & 0x00400000)
+#define PHY_ANALOG_SYNTH1_PWD_LOMIX_MSB 23
+#define PHY_ANALOG_SYNTH1_PWD_LOMIX_LSB 23
+#define PHY_ANALOG_SYNTH1_PWD_LOMIX_MASK 0x00800000
+#define PHY_ANALOG_SYNTH1_PWD_LOMIX_GET(x) (((x) & 0x00800000) >> 23)
+#define PHY_ANALOG_SYNTH1_PWD_LOMIX_SET(x) (((x) << 23) & 0x00800000)
+#define PHY_ANALOG_SYNTH1_PWD_LODIV_MSB 24
+#define PHY_ANALOG_SYNTH1_PWD_LODIV_LSB 24
+#define PHY_ANALOG_SYNTH1_PWD_LODIV_MASK 0x01000000
+#define PHY_ANALOG_SYNTH1_PWD_LODIV_GET(x) (((x) & 0x01000000) >> 24)
+#define PHY_ANALOG_SYNTH1_PWD_LODIV_SET(x) (((x) << 24) & 0x01000000)
+#define PHY_ANALOG_SYNTH1_PWD_LOBUF5G_MSB 25
+#define PHY_ANALOG_SYNTH1_PWD_LOBUF5G_LSB 25
+#define PHY_ANALOG_SYNTH1_PWD_LOBUF5G_MASK 0x02000000
+#define PHY_ANALOG_SYNTH1_PWD_LOBUF5G_GET(x) (((x) & 0x02000000) >> 25)
+#define PHY_ANALOG_SYNTH1_PWD_LOBUF5G_SET(x) (((x) << 25) & 0x02000000)
+#define PHY_ANALOG_SYNTH1_PWD_LOBUF2G_MSB 26
+#define PHY_ANALOG_SYNTH1_PWD_LOBUF2G_LSB 26
+#define PHY_ANALOG_SYNTH1_PWD_LOBUF2G_MASK 0x04000000
+#define PHY_ANALOG_SYNTH1_PWD_LOBUF2G_GET(x) (((x) & 0x04000000) >> 26)
+#define PHY_ANALOG_SYNTH1_PWD_LOBUF2G_SET(x) (((x) << 26) & 0x04000000)
+#define PHY_ANALOG_SYNTH1_PWD_PRESC_MSB 27
+#define PHY_ANALOG_SYNTH1_PWD_PRESC_LSB 27
+#define PHY_ANALOG_SYNTH1_PWD_PRESC_MASK 0x08000000
+#define PHY_ANALOG_SYNTH1_PWD_PRESC_GET(x) (((x) & 0x08000000) >> 27)
+#define PHY_ANALOG_SYNTH1_PWD_PRESC_SET(x) (((x) << 27) & 0x08000000)
+#define PHY_ANALOG_SYNTH1_PWD_VCO_MSB 28
+#define PHY_ANALOG_SYNTH1_PWD_VCO_LSB 28
+#define PHY_ANALOG_SYNTH1_PWD_VCO_MASK 0x10000000
+#define PHY_ANALOG_SYNTH1_PWD_VCO_GET(x) (((x) & 0x10000000) >> 28)
+#define PHY_ANALOG_SYNTH1_PWD_VCO_SET(x) (((x) << 28) & 0x10000000)
+#define PHY_ANALOG_SYNTH1_PWD_VCMON_MSB 29
+#define PHY_ANALOG_SYNTH1_PWD_VCMON_LSB 29
+#define PHY_ANALOG_SYNTH1_PWD_VCMON_MASK 0x20000000
+#define PHY_ANALOG_SYNTH1_PWD_VCMON_GET(x) (((x) & 0x20000000) >> 29)
+#define PHY_ANALOG_SYNTH1_PWD_VCMON_SET(x) (((x) << 29) & 0x20000000)
+#define PHY_ANALOG_SYNTH1_PWD_CP_MSB 30
+#define PHY_ANALOG_SYNTH1_PWD_CP_LSB 30
+#define PHY_ANALOG_SYNTH1_PWD_CP_MASK 0x40000000
+#define PHY_ANALOG_SYNTH1_PWD_CP_GET(x) (((x) & 0x40000000) >> 30)
+#define PHY_ANALOG_SYNTH1_PWD_CP_SET(x) (((x) << 30) & 0x40000000)
+#define PHY_ANALOG_SYNTH1_PWD_BIAS_MSB 31
+#define PHY_ANALOG_SYNTH1_PWD_BIAS_LSB 31
+#define PHY_ANALOG_SYNTH1_PWD_BIAS_MASK 0x80000000
+#define PHY_ANALOG_SYNTH1_PWD_BIAS_GET(x) (((x) & 0x80000000) >> 31)
+#define PHY_ANALOG_SYNTH1_PWD_BIAS_SET(x) (((x) << 31) & 0x80000000)
+
+/* macros for SYNTH2 */
+#define PHY_ANALOG_SYNTH2_ADDRESS 0x00000084
+#define PHY_ANALOG_SYNTH2_OFFSET 0x00000084
+#define PHY_ANALOG_SYNTH2_CAPRANGE3_MSB 3
+#define PHY_ANALOG_SYNTH2_CAPRANGE3_LSB 0
+#define PHY_ANALOG_SYNTH2_CAPRANGE3_MASK 0x0000000f
+#define PHY_ANALOG_SYNTH2_CAPRANGE3_GET(x) (((x) & 0x0000000f) >> 0)
+#define PHY_ANALOG_SYNTH2_CAPRANGE3_SET(x) (((x) << 0) & 0x0000000f)
+#define PHY_ANALOG_SYNTH2_CAPRANGE2_MSB 7
+#define PHY_ANALOG_SYNTH2_CAPRANGE2_LSB 4
+#define PHY_ANALOG_SYNTH2_CAPRANGE2_MASK 0x000000f0
+#define PHY_ANALOG_SYNTH2_CAPRANGE2_GET(x) (((x) & 0x000000f0) >> 4)
+#define PHY_ANALOG_SYNTH2_CAPRANGE2_SET(x) (((x) << 4) & 0x000000f0)
+#define PHY_ANALOG_SYNTH2_CAPRANGE1_MSB 11
+#define PHY_ANALOG_SYNTH2_CAPRANGE1_LSB 8
+#define PHY_ANALOG_SYNTH2_CAPRANGE1_MASK 0x00000f00
+#define PHY_ANALOG_SYNTH2_CAPRANGE1_GET(x) (((x) & 0x00000f00) >> 8)
+#define PHY_ANALOG_SYNTH2_CAPRANGE1_SET(x) (((x) << 8) & 0x00000f00)
+#define PHY_ANALOG_SYNTH2_LOOPLEAKCUR_MSB 15
+#define PHY_ANALOG_SYNTH2_LOOPLEAKCUR_LSB 12
+#define PHY_ANALOG_SYNTH2_LOOPLEAKCUR_MASK 0x0000f000
+#define PHY_ANALOG_SYNTH2_LOOPLEAKCUR_GET(x) (((x) & 0x0000f000) >> 12)
+#define PHY_ANALOG_SYNTH2_LOOPLEAKCUR_SET(x) (((x) << 12) & 0x0000f000)
+#define PHY_ANALOG_SYNTH2_CPLOWLK_MSB 16
+#define PHY_ANALOG_SYNTH2_CPLOWLK_LSB 16
+#define PHY_ANALOG_SYNTH2_CPLOWLK_MASK 0x00010000
+#define PHY_ANALOG_SYNTH2_CPLOWLK_GET(x) (((x) & 0x00010000) >> 16)
+#define PHY_ANALOG_SYNTH2_CPLOWLK_SET(x) (((x) << 16) & 0x00010000)
+#define PHY_ANALOG_SYNTH2_CPSTEERING_EN_INTN_MSB 17
+#define PHY_ANALOG_SYNTH2_CPSTEERING_EN_INTN_LSB 17
+#define PHY_ANALOG_SYNTH2_CPSTEERING_EN_INTN_MASK 0x00020000
+#define PHY_ANALOG_SYNTH2_CPSTEERING_EN_INTN_GET(x) (((x) & 0x00020000) >> 17)
+#define PHY_ANALOG_SYNTH2_CPSTEERING_EN_INTN_SET(x) (((x) << 17) & 0x00020000)
+#define PHY_ANALOG_SYNTH2_CPBIAS_MSB 19
+#define PHY_ANALOG_SYNTH2_CPBIAS_LSB 18
+#define PHY_ANALOG_SYNTH2_CPBIAS_MASK 0x000c0000
+#define PHY_ANALOG_SYNTH2_CPBIAS_GET(x) (((x) & 0x000c0000) >> 18)
+#define PHY_ANALOG_SYNTH2_CPBIAS_SET(x) (((x) << 18) & 0x000c0000)
+#define PHY_ANALOG_SYNTH2_VC_LOW_REF_MSB 22
+#define PHY_ANALOG_SYNTH2_VC_LOW_REF_LSB 20
+#define PHY_ANALOG_SYNTH2_VC_LOW_REF_MASK 0x00700000
+#define PHY_ANALOG_SYNTH2_VC_LOW_REF_GET(x) (((x) & 0x00700000) >> 20)
+#define PHY_ANALOG_SYNTH2_VC_LOW_REF_SET(x) (((x) << 20) & 0x00700000)
+#define PHY_ANALOG_SYNTH2_VC_MID_REF_MSB 25
+#define PHY_ANALOG_SYNTH2_VC_MID_REF_LSB 23
+#define PHY_ANALOG_SYNTH2_VC_MID_REF_MASK 0x03800000
+#define PHY_ANALOG_SYNTH2_VC_MID_REF_GET(x) (((x) & 0x03800000) >> 23)
+#define PHY_ANALOG_SYNTH2_VC_MID_REF_SET(x) (((x) << 23) & 0x03800000)
+#define PHY_ANALOG_SYNTH2_VC_HI_REF_MSB 28
+#define PHY_ANALOG_SYNTH2_VC_HI_REF_LSB 26
+#define PHY_ANALOG_SYNTH2_VC_HI_REF_MASK 0x1c000000
+#define PHY_ANALOG_SYNTH2_VC_HI_REF_GET(x) (((x) & 0x1c000000) >> 26)
+#define PHY_ANALOG_SYNTH2_VC_HI_REF_SET(x) (((x) << 26) & 0x1c000000)
+#define PHY_ANALOG_SYNTH2_VC_CAL_REF_MSB 31
+#define PHY_ANALOG_SYNTH2_VC_CAL_REF_LSB 29
+#define PHY_ANALOG_SYNTH2_VC_CAL_REF_MASK 0xe0000000
+#define PHY_ANALOG_SYNTH2_VC_CAL_REF_GET(x) (((x) & 0xe0000000) >> 29)
+#define PHY_ANALOG_SYNTH2_VC_CAL_REF_SET(x) (((x) << 29) & 0xe0000000)
+
+/* macros for SYNTH3 */
+#define PHY_ANALOG_SYNTH3_ADDRESS 0x00000088
+#define PHY_ANALOG_SYNTH3_OFFSET 0x00000088
+#define PHY_ANALOG_SYNTH3_WAIT_VC_CHECK_MSB 5
+#define PHY_ANALOG_SYNTH3_WAIT_VC_CHECK_LSB 0
+#define PHY_ANALOG_SYNTH3_WAIT_VC_CHECK_MASK 0x0000003f
+#define PHY_ANALOG_SYNTH3_WAIT_VC_CHECK_GET(x) (((x) & 0x0000003f) >> 0)
+#define PHY_ANALOG_SYNTH3_WAIT_VC_CHECK_SET(x) (((x) << 0) & 0x0000003f)
+#define PHY_ANALOG_SYNTH3_WAIT_CAL_LIN_MSB 11
+#define PHY_ANALOG_SYNTH3_WAIT_CAL_LIN_LSB 6
+#define PHY_ANALOG_SYNTH3_WAIT_CAL_LIN_MASK 0x00000fc0
+#define PHY_ANALOG_SYNTH3_WAIT_CAL_LIN_GET(x) (((x) & 0x00000fc0) >> 6)
+#define PHY_ANALOG_SYNTH3_WAIT_CAL_LIN_SET(x) (((x) << 6) & 0x00000fc0)
+#define PHY_ANALOG_SYNTH3_WAIT_CAL_BIN_MSB 17
+#define PHY_ANALOG_SYNTH3_WAIT_CAL_BIN_LSB 12
+#define PHY_ANALOG_SYNTH3_WAIT_CAL_BIN_MASK 0x0003f000
+#define PHY_ANALOG_SYNTH3_WAIT_CAL_BIN_GET(x) (((x) & 0x0003f000) >> 12)
+#define PHY_ANALOG_SYNTH3_WAIT_CAL_BIN_SET(x) (((x) << 12) & 0x0003f000)
+#define PHY_ANALOG_SYNTH3_WAIT_PWRUP_MSB 23
+#define PHY_ANALOG_SYNTH3_WAIT_PWRUP_LSB 18
+#define PHY_ANALOG_SYNTH3_WAIT_PWRUP_MASK 0x00fc0000
+#define PHY_ANALOG_SYNTH3_WAIT_PWRUP_GET(x) (((x) & 0x00fc0000) >> 18)
+#define PHY_ANALOG_SYNTH3_WAIT_PWRUP_SET(x) (((x) << 18) & 0x00fc0000)
+#define PHY_ANALOG_SYNTH3_WAIT_SHORTR_PWRUP_MSB 29
+#define PHY_ANALOG_SYNTH3_WAIT_SHORTR_PWRUP_LSB 24
+#define PHY_ANALOG_SYNTH3_WAIT_SHORTR_PWRUP_MASK 0x3f000000
+#define PHY_ANALOG_SYNTH3_WAIT_SHORTR_PWRUP_GET(x) (((x) & 0x3f000000) >> 24)
+#define PHY_ANALOG_SYNTH3_WAIT_SHORTR_PWRUP_SET(x) (((x) << 24) & 0x3f000000)
+#define PHY_ANALOG_SYNTH3_SEL_CLK_DIV2_MSB 30
+#define PHY_ANALOG_SYNTH3_SEL_CLK_DIV2_LSB 30
+#define PHY_ANALOG_SYNTH3_SEL_CLK_DIV2_MASK 0x40000000
+#define PHY_ANALOG_SYNTH3_SEL_CLK_DIV2_GET(x) (((x) & 0x40000000) >> 30)
+#define PHY_ANALOG_SYNTH3_SEL_CLK_DIV2_SET(x) (((x) << 30) & 0x40000000)
+#define PHY_ANALOG_SYNTH3_DIS_CLK_XTAL_MSB 31
+#define PHY_ANALOG_SYNTH3_DIS_CLK_XTAL_LSB 31
+#define PHY_ANALOG_SYNTH3_DIS_CLK_XTAL_MASK 0x80000000
+#define PHY_ANALOG_SYNTH3_DIS_CLK_XTAL_GET(x) (((x) & 0x80000000) >> 31)
+#define PHY_ANALOG_SYNTH3_DIS_CLK_XTAL_SET(x) (((x) << 31) & 0x80000000)
+
+/* macros for SYNTH4 */
+#define PHY_ANALOG_SYNTH4_ADDRESS 0x0000008c
+#define PHY_ANALOG_SYNTH4_OFFSET 0x0000008c
+#define PHY_ANALOG_SYNTH4_PS_SINGLE_PULSE_MSB 0
+#define PHY_ANALOG_SYNTH4_PS_SINGLE_PULSE_LSB 0
+#define PHY_ANALOG_SYNTH4_PS_SINGLE_PULSE_MASK 0x00000001
+#define PHY_ANALOG_SYNTH4_PS_SINGLE_PULSE_GET(x) (((x) & 0x00000001) >> 0)
+#define PHY_ANALOG_SYNTH4_PS_SINGLE_PULSE_SET(x) (((x) << 0) & 0x00000001)
+#define PHY_ANALOG_SYNTH4_LONGSHIFTSEL_MSB 1
+#define PHY_ANALOG_SYNTH4_LONGSHIFTSEL_LSB 1
+#define PHY_ANALOG_SYNTH4_LONGSHIFTSEL_MASK 0x00000002
+#define PHY_ANALOG_SYNTH4_LONGSHIFTSEL_GET(x) (((x) & 0x00000002) >> 1)
+#define PHY_ANALOG_SYNTH4_LONGSHIFTSEL_SET(x) (((x) << 1) & 0x00000002)
+#define PHY_ANALOG_SYNTH4_LOBUF5GTUNE_OVR_MSB 3
+#define PHY_ANALOG_SYNTH4_LOBUF5GTUNE_OVR_LSB 2
+#define PHY_ANALOG_SYNTH4_LOBUF5GTUNE_OVR_MASK 0x0000000c
+#define PHY_ANALOG_SYNTH4_LOBUF5GTUNE_OVR_GET(x) (((x) & 0x0000000c) >> 2)
+#define PHY_ANALOG_SYNTH4_LOBUF5GTUNE_OVR_SET(x) (((x) << 2) & 0x0000000c)
+#define PHY_ANALOG_SYNTH4_FORCE_LOBUF5GTUNE_MSB 4
+#define PHY_ANALOG_SYNTH4_FORCE_LOBUF5GTUNE_LSB 4
+#define PHY_ANALOG_SYNTH4_FORCE_LOBUF5GTUNE_MASK 0x00000010
+#define PHY_ANALOG_SYNTH4_FORCE_LOBUF5GTUNE_GET(x) (((x) & 0x00000010) >> 4)
+#define PHY_ANALOG_SYNTH4_FORCE_LOBUF5GTUNE_SET(x) (((x) << 4) & 0x00000010)
+#define PHY_ANALOG_SYNTH4_PSCOUNT_FBSEL_MSB 5
+#define PHY_ANALOG_SYNTH4_PSCOUNT_FBSEL_LSB 5
+#define PHY_ANALOG_SYNTH4_PSCOUNT_FBSEL_MASK 0x00000020
+#define PHY_ANALOG_SYNTH4_PSCOUNT_FBSEL_GET(x) (((x) & 0x00000020) >> 5)
+#define PHY_ANALOG_SYNTH4_PSCOUNT_FBSEL_SET(x) (((x) << 5) & 0x00000020)
+#define PHY_ANALOG_SYNTH4_SDM_DITHER_MSB 7
+#define PHY_ANALOG_SYNTH4_SDM_DITHER_LSB 6
+#define PHY_ANALOG_SYNTH4_SDM_DITHER_MASK 0x000000c0
+#define PHY_ANALOG_SYNTH4_SDM_DITHER_GET(x) (((x) & 0x000000c0) >> 6)
+#define PHY_ANALOG_SYNTH4_SDM_DITHER_SET(x) (((x) << 6) & 0x000000c0)
+#define PHY_ANALOG_SYNTH4_SDM_MODE_MSB 8
+#define PHY_ANALOG_SYNTH4_SDM_MODE_LSB 8
+#define PHY_ANALOG_SYNTH4_SDM_MODE_MASK 0x00000100
+#define PHY_ANALOG_SYNTH4_SDM_MODE_GET(x) (((x) & 0x00000100) >> 8)
+#define PHY_ANALOG_SYNTH4_SDM_MODE_SET(x) (((x) << 8) & 0x00000100)
+#define PHY_ANALOG_SYNTH4_SDM_DISABLE_MSB 9
+#define PHY_ANALOG_SYNTH4_SDM_DISABLE_LSB 9
+#define PHY_ANALOG_SYNTH4_SDM_DISABLE_MASK 0x00000200
+#define PHY_ANALOG_SYNTH4_SDM_DISABLE_GET(x) (((x) & 0x00000200) >> 9)
+#define PHY_ANALOG_SYNTH4_SDM_DISABLE_SET(x) (((x) << 9) & 0x00000200)
+#define PHY_ANALOG_SYNTH4_RESET_PRESC_MSB 10
+#define PHY_ANALOG_SYNTH4_RESET_PRESC_LSB 10
+#define PHY_ANALOG_SYNTH4_RESET_PRESC_MASK 0x00000400
+#define PHY_ANALOG_SYNTH4_RESET_PRESC_GET(x) (((x) & 0x00000400) >> 10)
+#define PHY_ANALOG_SYNTH4_RESET_PRESC_SET(x) (((x) << 10) & 0x00000400)
+#define PHY_ANALOG_SYNTH4_PRESCSEL_MSB 12
+#define PHY_ANALOG_SYNTH4_PRESCSEL_LSB 11
+#define PHY_ANALOG_SYNTH4_PRESCSEL_MASK 0x00001800
+#define PHY_ANALOG_SYNTH4_PRESCSEL_GET(x) (((x) & 0x00001800) >> 11)
+#define PHY_ANALOG_SYNTH4_PRESCSEL_SET(x) (((x) << 11) & 0x00001800)
+#define PHY_ANALOG_SYNTH4_PFD_DISABLE_MSB 13
+#define PHY_ANALOG_SYNTH4_PFD_DISABLE_LSB 13
+#define PHY_ANALOG_SYNTH4_PFD_DISABLE_MASK 0x00002000
+#define PHY_ANALOG_SYNTH4_PFD_DISABLE_GET(x) (((x) & 0x00002000) >> 13)
+#define PHY_ANALOG_SYNTH4_PFD_DISABLE_SET(x) (((x) << 13) & 0x00002000)
+#define PHY_ANALOG_SYNTH4_PFDDELAY_FRACN_MSB 14
+#define PHY_ANALOG_SYNTH4_PFDDELAY_FRACN_LSB 14
+#define PHY_ANALOG_SYNTH4_PFDDELAY_FRACN_MASK 0x00004000
+#define PHY_ANALOG_SYNTH4_PFDDELAY_FRACN_GET(x) (((x) & 0x00004000) >> 14)
+#define PHY_ANALOG_SYNTH4_PFDDELAY_FRACN_SET(x) (((x) << 14) & 0x00004000)
+#define PHY_ANALOG_SYNTH4_FORCE_LO_ON_MSB 15
+#define PHY_ANALOG_SYNTH4_FORCE_LO_ON_LSB 15
+#define PHY_ANALOG_SYNTH4_FORCE_LO_ON_MASK 0x00008000
+#define PHY_ANALOG_SYNTH4_FORCE_LO_ON_GET(x) (((x) & 0x00008000) >> 15)
+#define PHY_ANALOG_SYNTH4_FORCE_LO_ON_SET(x) (((x) << 15) & 0x00008000)
+#define PHY_ANALOG_SYNTH4_CLKXTAL_EDGE_SEL_MSB 16
+#define PHY_ANALOG_SYNTH4_CLKXTAL_EDGE_SEL_LSB 16
+#define PHY_ANALOG_SYNTH4_CLKXTAL_EDGE_SEL_MASK 0x00010000
+#define PHY_ANALOG_SYNTH4_CLKXTAL_EDGE_SEL_GET(x) (((x) & 0x00010000) >> 16)
+#define PHY_ANALOG_SYNTH4_CLKXTAL_EDGE_SEL_SET(x) (((x) << 16) & 0x00010000)
+#define PHY_ANALOG_SYNTH4_VCOCAPPULLUP_MSB 17
+#define PHY_ANALOG_SYNTH4_VCOCAPPULLUP_LSB 17
+#define PHY_ANALOG_SYNTH4_VCOCAPPULLUP_MASK 0x00020000
+#define PHY_ANALOG_SYNTH4_VCOCAPPULLUP_GET(x) (((x) & 0x00020000) >> 17)
+#define PHY_ANALOG_SYNTH4_VCOCAPPULLUP_SET(x) (((x) << 17) & 0x00020000)
+#define PHY_ANALOG_SYNTH4_VCOCAP_OVR_MSB 25
+#define PHY_ANALOG_SYNTH4_VCOCAP_OVR_LSB 18
+#define PHY_ANALOG_SYNTH4_VCOCAP_OVR_MASK 0x03fc0000
+#define PHY_ANALOG_SYNTH4_VCOCAP_OVR_GET(x) (((x) & 0x03fc0000) >> 18)
+#define PHY_ANALOG_SYNTH4_VCOCAP_OVR_SET(x) (((x) << 18) & 0x03fc0000)
+#define PHY_ANALOG_SYNTH4_FORCE_VCOCAP_MSB 26
+#define PHY_ANALOG_SYNTH4_FORCE_VCOCAP_LSB 26
+#define PHY_ANALOG_SYNTH4_FORCE_VCOCAP_MASK 0x04000000
+#define PHY_ANALOG_SYNTH4_FORCE_VCOCAP_GET(x) (((x) & 0x04000000) >> 26)
+#define PHY_ANALOG_SYNTH4_FORCE_VCOCAP_SET(x) (((x) << 26) & 0x04000000)
+#define PHY_ANALOG_SYNTH4_FORCE_PINVC_MSB 27
+#define PHY_ANALOG_SYNTH4_FORCE_PINVC_LSB 27
+#define PHY_ANALOG_SYNTH4_FORCE_PINVC_MASK 0x08000000
+#define PHY_ANALOG_SYNTH4_FORCE_PINVC_GET(x) (((x) & 0x08000000) >> 27)
+#define PHY_ANALOG_SYNTH4_FORCE_PINVC_SET(x) (((x) << 27) & 0x08000000)
+#define PHY_ANALOG_SYNTH4_SHORTR_UNTIL_LOCKED_MSB 28
+#define PHY_ANALOG_SYNTH4_SHORTR_UNTIL_LOCKED_LSB 28
+#define PHY_ANALOG_SYNTH4_SHORTR_UNTIL_LOCKED_MASK 0x10000000
+#define PHY_ANALOG_SYNTH4_SHORTR_UNTIL_LOCKED_GET(x) (((x) & 0x10000000) >> 28)
+#define PHY_ANALOG_SYNTH4_SHORTR_UNTIL_LOCKED_SET(x) (((x) << 28) & 0x10000000)
+#define PHY_ANALOG_SYNTH4_ALWAYS_SHORTR_MSB 29
+#define PHY_ANALOG_SYNTH4_ALWAYS_SHORTR_LSB 29
+#define PHY_ANALOG_SYNTH4_ALWAYS_SHORTR_MASK 0x20000000
+#define PHY_ANALOG_SYNTH4_ALWAYS_SHORTR_GET(x) (((x) & 0x20000000) >> 29)
+#define PHY_ANALOG_SYNTH4_ALWAYS_SHORTR_SET(x) (((x) << 29) & 0x20000000)
+#define PHY_ANALOG_SYNTH4_DIS_LOSTVC_MSB 30
+#define PHY_ANALOG_SYNTH4_DIS_LOSTVC_LSB 30
+#define PHY_ANALOG_SYNTH4_DIS_LOSTVC_MASK 0x40000000
+#define PHY_ANALOG_SYNTH4_DIS_LOSTVC_GET(x) (((x) & 0x40000000) >> 30)
+#define PHY_ANALOG_SYNTH4_DIS_LOSTVC_SET(x) (((x) << 30) & 0x40000000)
+#define PHY_ANALOG_SYNTH4_DIS_LIN_CAPSEARCH_MSB 31
+#define PHY_ANALOG_SYNTH4_DIS_LIN_CAPSEARCH_LSB 31
+#define PHY_ANALOG_SYNTH4_DIS_LIN_CAPSEARCH_MASK 0x80000000
+#define PHY_ANALOG_SYNTH4_DIS_LIN_CAPSEARCH_GET(x) (((x) & 0x80000000) >> 31)
+#define PHY_ANALOG_SYNTH4_DIS_LIN_CAPSEARCH_SET(x) (((x) << 31) & 0x80000000)
+
+/* macros for SYNTH5 */
+#define PHY_ANALOG_SYNTH5_ADDRESS 0x00000090
+#define PHY_ANALOG_SYNTH5_OFFSET 0x00000090
+#define PHY_ANALOG_SYNTH5_VCOBIAS_MSB 1
+#define PHY_ANALOG_SYNTH5_VCOBIAS_LSB 0
+#define PHY_ANALOG_SYNTH5_VCOBIAS_MASK 0x00000003
+#define PHY_ANALOG_SYNTH5_VCOBIAS_GET(x) (((x) & 0x00000003) >> 0)
+#define PHY_ANALOG_SYNTH5_VCOBIAS_SET(x) (((x) << 0) & 0x00000003)
+#define PHY_ANALOG_SYNTH5_PWDB_ICLOBUF5G50_MSB 4
+#define PHY_ANALOG_SYNTH5_PWDB_ICLOBUF5G50_LSB 2
+#define PHY_ANALOG_SYNTH5_PWDB_ICLOBUF5G50_MASK 0x0000001c
+#define PHY_ANALOG_SYNTH5_PWDB_ICLOBUF5G50_GET(x) (((x) & 0x0000001c) >> 2)
+#define PHY_ANALOG_SYNTH5_PWDB_ICLOBUF5G50_SET(x) (((x) << 2) & 0x0000001c)
+#define PHY_ANALOG_SYNTH5_PWDB_ICLOBUF2G50_MSB 7
+#define PHY_ANALOG_SYNTH5_PWDB_ICLOBUF2G50_LSB 5
+#define PHY_ANALOG_SYNTH5_PWDB_ICLOBUF2G50_MASK 0x000000e0
+#define PHY_ANALOG_SYNTH5_PWDB_ICLOBUF2G50_GET(x) (((x) & 0x000000e0) >> 5)
+#define PHY_ANALOG_SYNTH5_PWDB_ICLOBUF2G50_SET(x) (((x) << 5) & 0x000000e0)
+#define PHY_ANALOG_SYNTH5_PWDB_ICVCO25_MSB 10
+#define PHY_ANALOG_SYNTH5_PWDB_ICVCO25_LSB 8
+#define PHY_ANALOG_SYNTH5_PWDB_ICVCO25_MASK 0x00000700
+#define PHY_ANALOG_SYNTH5_PWDB_ICVCO25_GET(x) (((x) & 0x00000700) >> 8)
+#define PHY_ANALOG_SYNTH5_PWDB_ICVCO25_SET(x) (((x) << 8) & 0x00000700)
+#define PHY_ANALOG_SYNTH5_PWDB_ICVCOREG25_MSB 13
+#define PHY_ANALOG_SYNTH5_PWDB_ICVCOREG25_LSB 11
+#define PHY_ANALOG_SYNTH5_PWDB_ICVCOREG25_MASK 0x00003800
+#define PHY_ANALOG_SYNTH5_PWDB_ICVCOREG25_GET(x) (((x) & 0x00003800) >> 11)
+#define PHY_ANALOG_SYNTH5_PWDB_ICVCOREG25_SET(x) (((x) << 11) & 0x00003800)
+#define PHY_ANALOG_SYNTH5_PWDB_IRVCOREG50_MSB 14
+#define PHY_ANALOG_SYNTH5_PWDB_IRVCOREG50_LSB 14
+#define PHY_ANALOG_SYNTH5_PWDB_IRVCOREG50_MASK 0x00004000
+#define PHY_ANALOG_SYNTH5_PWDB_IRVCOREG50_GET(x) (((x) & 0x00004000) >> 14)
+#define PHY_ANALOG_SYNTH5_PWDB_IRVCOREG50_SET(x) (((x) << 14) & 0x00004000)
+#define PHY_ANALOG_SYNTH5_PWDB_ICLOMIX_MSB 17
+#define PHY_ANALOG_SYNTH5_PWDB_ICLOMIX_LSB 15
+#define PHY_ANALOG_SYNTH5_PWDB_ICLOMIX_MASK 0x00038000
+#define PHY_ANALOG_SYNTH5_PWDB_ICLOMIX_GET(x) (((x) & 0x00038000) >> 15)
+#define PHY_ANALOG_SYNTH5_PWDB_ICLOMIX_SET(x) (((x) << 15) & 0x00038000)
+#define PHY_ANALOG_SYNTH5_PWDB_ICLODIV50_MSB 20
+#define PHY_ANALOG_SYNTH5_PWDB_ICLODIV50_LSB 18
+#define PHY_ANALOG_SYNTH5_PWDB_ICLODIV50_MASK 0x001c0000
+#define PHY_ANALOG_SYNTH5_PWDB_ICLODIV50_GET(x) (((x) & 0x001c0000) >> 18)
+#define PHY_ANALOG_SYNTH5_PWDB_ICLODIV50_SET(x) (((x) << 18) & 0x001c0000)
+#define PHY_ANALOG_SYNTH5_PWDB_ICPRESC50_MSB 23
+#define PHY_ANALOG_SYNTH5_PWDB_ICPRESC50_LSB 21
+#define PHY_ANALOG_SYNTH5_PWDB_ICPRESC50_MASK 0x00e00000
+#define PHY_ANALOG_SYNTH5_PWDB_ICPRESC50_GET(x) (((x) & 0x00e00000) >> 21)
+#define PHY_ANALOG_SYNTH5_PWDB_ICPRESC50_SET(x) (((x) << 21) & 0x00e00000)
+#define PHY_ANALOG_SYNTH5_PWDB_IRVCMON25_MSB 26
+#define PHY_ANALOG_SYNTH5_PWDB_IRVCMON25_LSB 24
+#define PHY_ANALOG_SYNTH5_PWDB_IRVCMON25_MASK 0x07000000
+#define PHY_ANALOG_SYNTH5_PWDB_IRVCMON25_GET(x) (((x) & 0x07000000) >> 24)
+#define PHY_ANALOG_SYNTH5_PWDB_IRVCMON25_SET(x) (((x) << 24) & 0x07000000)
+#define PHY_ANALOG_SYNTH5_PWDB_IRPFDCP_MSB 29
+#define PHY_ANALOG_SYNTH5_PWDB_IRPFDCP_LSB 27
+#define PHY_ANALOG_SYNTH5_PWDB_IRPFDCP_MASK 0x38000000
+#define PHY_ANALOG_SYNTH5_PWDB_IRPFDCP_GET(x) (((x) & 0x38000000) >> 27)
+#define PHY_ANALOG_SYNTH5_PWDB_IRPFDCP_SET(x) (((x) << 27) & 0x38000000)
+#define PHY_ANALOG_SYNTH5_SPARE5A_MSB 31
+#define PHY_ANALOG_SYNTH5_SPARE5A_LSB 30
+#define PHY_ANALOG_SYNTH5_SPARE5A_MASK 0xc0000000
+#define PHY_ANALOG_SYNTH5_SPARE5A_GET(x) (((x) & 0xc0000000) >> 30)
+#define PHY_ANALOG_SYNTH5_SPARE5A_SET(x) (((x) << 30) & 0xc0000000)
+
+/* macros for SYNTH6 */
+#define PHY_ANALOG_SYNTH6_ADDRESS 0x00000094
+#define PHY_ANALOG_SYNTH6_OFFSET 0x00000094
+#define PHY_ANALOG_SYNTH6_LOBUF5GTUNE_MSB 1
+#define PHY_ANALOG_SYNTH6_LOBUF5GTUNE_LSB 0
+#define PHY_ANALOG_SYNTH6_LOBUF5GTUNE_MASK 0x00000003
+#define PHY_ANALOG_SYNTH6_LOBUF5GTUNE_GET(x) (((x) & 0x00000003) >> 0)
+#define PHY_ANALOG_SYNTH6_LOOP_IP_MSB 8
+#define PHY_ANALOG_SYNTH6_LOOP_IP_LSB 2
+#define PHY_ANALOG_SYNTH6_LOOP_IP_MASK 0x000001fc
+#define PHY_ANALOG_SYNTH6_LOOP_IP_GET(x) (((x) & 0x000001fc) >> 2)
+#define PHY_ANALOG_SYNTH6_VC2LOW_MSB 9
+#define PHY_ANALOG_SYNTH6_VC2LOW_LSB 9
+#define PHY_ANALOG_SYNTH6_VC2LOW_MASK 0x00000200
+#define PHY_ANALOG_SYNTH6_VC2LOW_GET(x) (((x) & 0x00000200) >> 9)
+#define PHY_ANALOG_SYNTH6_VC2HIGH_MSB 10
+#define PHY_ANALOG_SYNTH6_VC2HIGH_LSB 10
+#define PHY_ANALOG_SYNTH6_VC2HIGH_MASK 0x00000400
+#define PHY_ANALOG_SYNTH6_VC2HIGH_GET(x) (((x) & 0x00000400) >> 10)
+#define PHY_ANALOG_SYNTH6_RESET_SDM_B_MSB 11
+#define PHY_ANALOG_SYNTH6_RESET_SDM_B_LSB 11
+#define PHY_ANALOG_SYNTH6_RESET_SDM_B_MASK 0x00000800
+#define PHY_ANALOG_SYNTH6_RESET_SDM_B_GET(x) (((x) & 0x00000800) >> 11)
+#define PHY_ANALOG_SYNTH6_RESET_PSCOUNTERS_MSB 12
+#define PHY_ANALOG_SYNTH6_RESET_PSCOUNTERS_LSB 12
+#define PHY_ANALOG_SYNTH6_RESET_PSCOUNTERS_MASK 0x00001000
+#define PHY_ANALOG_SYNTH6_RESET_PSCOUNTERS_GET(x) (((x) & 0x00001000) >> 12)
+#define PHY_ANALOG_SYNTH6_RESET_PFD_MSB 13
+#define PHY_ANALOG_SYNTH6_RESET_PFD_LSB 13
+#define PHY_ANALOG_SYNTH6_RESET_PFD_MASK 0x00002000
+#define PHY_ANALOG_SYNTH6_RESET_PFD_GET(x) (((x) & 0x00002000) >> 13)
+#define PHY_ANALOG_SYNTH6_RESET_RFD_MSB 14
+#define PHY_ANALOG_SYNTH6_RESET_RFD_LSB 14
+#define PHY_ANALOG_SYNTH6_RESET_RFD_MASK 0x00004000
+#define PHY_ANALOG_SYNTH6_RESET_RFD_GET(x) (((x) & 0x00004000) >> 14)
+#define PHY_ANALOG_SYNTH6_SHORT_R_MSB 15
+#define PHY_ANALOG_SYNTH6_SHORT_R_LSB 15
+#define PHY_ANALOG_SYNTH6_SHORT_R_MASK 0x00008000
+#define PHY_ANALOG_SYNTH6_SHORT_R_GET(x) (((x) & 0x00008000) >> 15)
+#define PHY_ANALOG_SYNTH6_VCO_CAP_ST_MSB 23
+#define PHY_ANALOG_SYNTH6_VCO_CAP_ST_LSB 16
+#define PHY_ANALOG_SYNTH6_VCO_CAP_ST_MASK 0x00ff0000
+#define PHY_ANALOG_SYNTH6_VCO_CAP_ST_GET(x) (((x) & 0x00ff0000) >> 16)
+#define PHY_ANALOG_SYNTH6_PIN_VC_MSB 24
+#define PHY_ANALOG_SYNTH6_PIN_VC_LSB 24
+#define PHY_ANALOG_SYNTH6_PIN_VC_MASK 0x01000000
+#define PHY_ANALOG_SYNTH6_PIN_VC_GET(x) (((x) & 0x01000000) >> 24)
+#define PHY_ANALOG_SYNTH6_SYNTH_LOCK_VC_OK_MSB 25
+#define PHY_ANALOG_SYNTH6_SYNTH_LOCK_VC_OK_LSB 25
+#define PHY_ANALOG_SYNTH6_SYNTH_LOCK_VC_OK_MASK 0x02000000
+#define PHY_ANALOG_SYNTH6_SYNTH_LOCK_VC_OK_GET(x) (((x) & 0x02000000) >> 25)
+#define PHY_ANALOG_SYNTH6_CAP_SEARCH_MSB 26
+#define PHY_ANALOG_SYNTH6_CAP_SEARCH_LSB 26
+#define PHY_ANALOG_SYNTH6_CAP_SEARCH_MASK 0x04000000
+#define PHY_ANALOG_SYNTH6_CAP_SEARCH_GET(x) (((x) & 0x04000000) >> 26)
+#define PHY_ANALOG_SYNTH6_SYNTH_SM_STATE_MSB 30
+#define PHY_ANALOG_SYNTH6_SYNTH_SM_STATE_LSB 27
+#define PHY_ANALOG_SYNTH6_SYNTH_SM_STATE_MASK 0x78000000
+#define PHY_ANALOG_SYNTH6_SYNTH_SM_STATE_GET(x) (((x) & 0x78000000) >> 27)
+#define PHY_ANALOG_SYNTH6_SYNTH_ON_MSB 31
+#define PHY_ANALOG_SYNTH6_SYNTH_ON_LSB 31
+#define PHY_ANALOG_SYNTH6_SYNTH_ON_MASK 0x80000000
+#define PHY_ANALOG_SYNTH6_SYNTH_ON_GET(x) (((x) & 0x80000000) >> 31)
+
+/* macros for SYNTH7 */
+#define PHY_ANALOG_SYNTH7_ADDRESS 0x00000098
+#define PHY_ANALOG_SYNTH7_OFFSET 0x00000098
+#define PHY_ANALOG_SYNTH7_OVRCHANDECODER_MSB 0
+#define PHY_ANALOG_SYNTH7_OVRCHANDECODER_LSB 0
+#define PHY_ANALOG_SYNTH7_OVRCHANDECODER_MASK 0x00000001
+#define PHY_ANALOG_SYNTH7_OVRCHANDECODER_GET(x) (((x) & 0x00000001) >> 0)
+#define PHY_ANALOG_SYNTH7_OVRCHANDECODER_SET(x) (((x) << 0) & 0x00000001)
+#define PHY_ANALOG_SYNTH7_FORCE_FRACLSB_MSB 1
+#define PHY_ANALOG_SYNTH7_FORCE_FRACLSB_LSB 1
+#define PHY_ANALOG_SYNTH7_FORCE_FRACLSB_MASK 0x00000002
+#define PHY_ANALOG_SYNTH7_FORCE_FRACLSB_GET(x) (((x) & 0x00000002) >> 1)
+#define PHY_ANALOG_SYNTH7_FORCE_FRACLSB_SET(x) (((x) << 1) & 0x00000002)
+#define PHY_ANALOG_SYNTH7_CHANFRAC_MSB 18
+#define PHY_ANALOG_SYNTH7_CHANFRAC_LSB 2
+#define PHY_ANALOG_SYNTH7_CHANFRAC_MASK 0x0007fffc
+#define PHY_ANALOG_SYNTH7_CHANFRAC_GET(x) (((x) & 0x0007fffc) >> 2)
+#define PHY_ANALOG_SYNTH7_CHANFRAC_SET(x) (((x) << 2) & 0x0007fffc)
+#define PHY_ANALOG_SYNTH7_CHANSEL_MSB 27
+#define PHY_ANALOG_SYNTH7_CHANSEL_LSB 19
+#define PHY_ANALOG_SYNTH7_CHANSEL_MASK 0x0ff80000
+#define PHY_ANALOG_SYNTH7_CHANSEL_GET(x) (((x) & 0x0ff80000) >> 19)
+#define PHY_ANALOG_SYNTH7_CHANSEL_SET(x) (((x) << 19) & 0x0ff80000)
+#define PHY_ANALOG_SYNTH7_AMODEREFSEL_MSB 29
+#define PHY_ANALOG_SYNTH7_AMODEREFSEL_LSB 28
+#define PHY_ANALOG_SYNTH7_AMODEREFSEL_MASK 0x30000000
+#define PHY_ANALOG_SYNTH7_AMODEREFSEL_GET(x) (((x) & 0x30000000) >> 28)
+#define PHY_ANALOG_SYNTH7_AMODEREFSEL_SET(x) (((x) << 28) & 0x30000000)
+#define PHY_ANALOG_SYNTH7_FRACMODE_MSB 30
+#define PHY_ANALOG_SYNTH7_FRACMODE_LSB 30
+#define PHY_ANALOG_SYNTH7_FRACMODE_MASK 0x40000000
+#define PHY_ANALOG_SYNTH7_FRACMODE_GET(x) (((x) & 0x40000000) >> 30)
+#define PHY_ANALOG_SYNTH7_FRACMODE_SET(x) (((x) << 30) & 0x40000000)
+#define PHY_ANALOG_SYNTH7_LOADSYNTHCHANNEL_MSB 31
+#define PHY_ANALOG_SYNTH7_LOADSYNTHCHANNEL_LSB 31
+#define PHY_ANALOG_SYNTH7_LOADSYNTHCHANNEL_MASK 0x80000000
+#define PHY_ANALOG_SYNTH7_LOADSYNTHCHANNEL_GET(x) (((x) & 0x80000000) >> 31)
+#define PHY_ANALOG_SYNTH7_LOADSYNTHCHANNEL_SET(x) (((x) << 31) & 0x80000000)
+
+/* macros for SYNTH8 */
+#define PHY_ANALOG_SYNTH8_ADDRESS 0x0000009c
+#define PHY_ANALOG_SYNTH8_OFFSET 0x0000009c
+#define PHY_ANALOG_SYNTH8_CPSTEERING_EN_FRACN_MSB 0
+#define PHY_ANALOG_SYNTH8_CPSTEERING_EN_FRACN_LSB 0
+#define PHY_ANALOG_SYNTH8_CPSTEERING_EN_FRACN_MASK 0x00000001
+#define PHY_ANALOG_SYNTH8_CPSTEERING_EN_FRACN_GET(x) (((x) & 0x00000001) >> 0)
+#define PHY_ANALOG_SYNTH8_CPSTEERING_EN_FRACN_SET(x) (((x) << 0) & 0x00000001)
+#define PHY_ANALOG_SYNTH8_LOOP_ICPB_MSB 7
+#define PHY_ANALOG_SYNTH8_LOOP_ICPB_LSB 1
+#define PHY_ANALOG_SYNTH8_LOOP_ICPB_MASK 0x000000fe
+#define PHY_ANALOG_SYNTH8_LOOP_ICPB_GET(x) (((x) & 0x000000fe) >> 1)
+#define PHY_ANALOG_SYNTH8_LOOP_ICPB_SET(x) (((x) << 1) & 0x000000fe)
+#define PHY_ANALOG_SYNTH8_LOOP_CSB_MSB 11
+#define PHY_ANALOG_SYNTH8_LOOP_CSB_LSB 8
+#define PHY_ANALOG_SYNTH8_LOOP_CSB_MASK 0x00000f00
+#define PHY_ANALOG_SYNTH8_LOOP_CSB_GET(x) (((x) & 0x00000f00) >> 8)
+#define PHY_ANALOG_SYNTH8_LOOP_CSB_SET(x) (((x) << 8) & 0x00000f00)
+#define PHY_ANALOG_SYNTH8_LOOP_RSB_MSB 16
+#define PHY_ANALOG_SYNTH8_LOOP_RSB_LSB 12
+#define PHY_ANALOG_SYNTH8_LOOP_RSB_MASK 0x0001f000
+#define PHY_ANALOG_SYNTH8_LOOP_RSB_GET(x) (((x) & 0x0001f000) >> 12)
+#define PHY_ANALOG_SYNTH8_LOOP_RSB_SET(x) (((x) << 12) & 0x0001f000)
+#define PHY_ANALOG_SYNTH8_LOOP_CPB_MSB 21
+#define PHY_ANALOG_SYNTH8_LOOP_CPB_LSB 17
+#define PHY_ANALOG_SYNTH8_LOOP_CPB_MASK 0x003e0000
+#define PHY_ANALOG_SYNTH8_LOOP_CPB_GET(x) (((x) & 0x003e0000) >> 17)
+#define PHY_ANALOG_SYNTH8_LOOP_CPB_SET(x) (((x) << 17) & 0x003e0000)
+#define PHY_ANALOG_SYNTH8_LOOP_3RD_ORDER_RB_MSB 26
+#define PHY_ANALOG_SYNTH8_LOOP_3RD_ORDER_RB_LSB 22
+#define PHY_ANALOG_SYNTH8_LOOP_3RD_ORDER_RB_MASK 0x07c00000
+#define PHY_ANALOG_SYNTH8_LOOP_3RD_ORDER_RB_GET(x) (((x) & 0x07c00000) >> 22)
+#define PHY_ANALOG_SYNTH8_LOOP_3RD_ORDER_RB_SET(x) (((x) << 22) & 0x07c00000)
+#define PHY_ANALOG_SYNTH8_REFDIVB_MSB 31
+#define PHY_ANALOG_SYNTH8_REFDIVB_LSB 27
+#define PHY_ANALOG_SYNTH8_REFDIVB_MASK 0xf8000000
+#define PHY_ANALOG_SYNTH8_REFDIVB_GET(x) (((x) & 0xf8000000) >> 27)
+#define PHY_ANALOG_SYNTH8_REFDIVB_SET(x) (((x) << 27) & 0xf8000000)
+
+/* macros for SYNTH9 */
+#define PHY_ANALOG_SYNTH9_ADDRESS 0x000000a0
+#define PHY_ANALOG_SYNTH9_OFFSET 0x000000a0
+#define PHY_ANALOG_SYNTH9_PFDDELAY_INTN_MSB 0
+#define PHY_ANALOG_SYNTH9_PFDDELAY_INTN_LSB 0
+#define PHY_ANALOG_SYNTH9_PFDDELAY_INTN_MASK 0x00000001
+#define PHY_ANALOG_SYNTH9_PFDDELAY_INTN_GET(x) (((x) & 0x00000001) >> 0)
+#define PHY_ANALOG_SYNTH9_PFDDELAY_INTN_SET(x) (((x) << 0) & 0x00000001)
+#define PHY_ANALOG_SYNTH9_SLOPE_ICPA0_MSB 3
+#define PHY_ANALOG_SYNTH9_SLOPE_ICPA0_LSB 1
+#define PHY_ANALOG_SYNTH9_SLOPE_ICPA0_MASK 0x0000000e
+#define PHY_ANALOG_SYNTH9_SLOPE_ICPA0_GET(x) (((x) & 0x0000000e) >> 1)
+#define PHY_ANALOG_SYNTH9_SLOPE_ICPA0_SET(x) (((x) << 1) & 0x0000000e)
+#define PHY_ANALOG_SYNTH9_LOOP_ICPA0_MSB 7
+#define PHY_ANALOG_SYNTH9_LOOP_ICPA0_LSB 4
+#define PHY_ANALOG_SYNTH9_LOOP_ICPA0_MASK 0x000000f0
+#define PHY_ANALOG_SYNTH9_LOOP_ICPA0_GET(x) (((x) & 0x000000f0) >> 4)
+#define PHY_ANALOG_SYNTH9_LOOP_ICPA0_SET(x) (((x) << 4) & 0x000000f0)
+#define PHY_ANALOG_SYNTH9_LOOP_CSA0_MSB 11
+#define PHY_ANALOG_SYNTH9_LOOP_CSA0_LSB 8
+#define PHY_ANALOG_SYNTH9_LOOP_CSA0_MASK 0x00000f00
+#define PHY_ANALOG_SYNTH9_LOOP_CSA0_GET(x) (((x) & 0x00000f00) >> 8)
+#define PHY_ANALOG_SYNTH9_LOOP_CSA0_SET(x) (((x) << 8) & 0x00000f00)
+#define PHY_ANALOG_SYNTH9_LOOP_RSA0_MSB 16
+#define PHY_ANALOG_SYNTH9_LOOP_RSA0_LSB 12
+#define PHY_ANALOG_SYNTH9_LOOP_RSA0_MASK 0x0001f000
+#define PHY_ANALOG_SYNTH9_LOOP_RSA0_GET(x) (((x) & 0x0001f000) >> 12)
+#define PHY_ANALOG_SYNTH9_LOOP_RSA0_SET(x) (((x) << 12) & 0x0001f000)
+#define PHY_ANALOG_SYNTH9_LOOP_CPA0_MSB 21
+#define PHY_ANALOG_SYNTH9_LOOP_CPA0_LSB 17
+#define PHY_ANALOG_SYNTH9_LOOP_CPA0_MASK 0x003e0000
+#define PHY_ANALOG_SYNTH9_LOOP_CPA0_GET(x) (((x) & 0x003e0000) >> 17)
+#define PHY_ANALOG_SYNTH9_LOOP_CPA0_SET(x) (((x) << 17) & 0x003e0000)
+#define PHY_ANALOG_SYNTH9_LOOP_3RD_ORDER_RA_MSB 26
+#define PHY_ANALOG_SYNTH9_LOOP_3RD_ORDER_RA_LSB 22
+#define PHY_ANALOG_SYNTH9_LOOP_3RD_ORDER_RA_MASK 0x07c00000
+#define PHY_ANALOG_SYNTH9_LOOP_3RD_ORDER_RA_GET(x) (((x) & 0x07c00000) >> 22)
+#define PHY_ANALOG_SYNTH9_LOOP_3RD_ORDER_RA_SET(x) (((x) << 22) & 0x07c00000)
+#define PHY_ANALOG_SYNTH9_REFDIVA_MSB 31
+#define PHY_ANALOG_SYNTH9_REFDIVA_LSB 27
+#define PHY_ANALOG_SYNTH9_REFDIVA_MASK 0xf8000000
+#define PHY_ANALOG_SYNTH9_REFDIVA_GET(x) (((x) & 0xf8000000) >> 27)
+#define PHY_ANALOG_SYNTH9_REFDIVA_SET(x) (((x) << 27) & 0xf8000000)
+
+/* macros for SYNTH10 */
+#define PHY_ANALOG_SYNTH10_ADDRESS 0x000000a4
+#define PHY_ANALOG_SYNTH10_OFFSET 0x000000a4
+#define PHY_ANALOG_SYNTH10_SPARE10A_MSB 0
+#define PHY_ANALOG_SYNTH10_SPARE10A_LSB 0
+#define PHY_ANALOG_SYNTH10_SPARE10A_MASK 0x00000001
+#define PHY_ANALOG_SYNTH10_SPARE10A_GET(x) (((x) & 0x00000001) >> 0)
+#define PHY_ANALOG_SYNTH10_SPARE10A_SET(x) (((x) << 0) & 0x00000001)
+#define PHY_ANALOG_SYNTH10_PWDB_ICLOBIAS50_MSB 3
+#define PHY_ANALOG_SYNTH10_PWDB_ICLOBIAS50_LSB 1
+#define PHY_ANALOG_SYNTH10_PWDB_ICLOBIAS50_MASK 0x0000000e
+#define PHY_ANALOG_SYNTH10_PWDB_ICLOBIAS50_GET(x) (((x) & 0x0000000e) >> 1)
+#define PHY_ANALOG_SYNTH10_PWDB_ICLOBIAS50_SET(x) (((x) << 1) & 0x0000000e)
+#define PHY_ANALOG_SYNTH10_EN_2X_LOOPFILT_MSB 4
+#define PHY_ANALOG_SYNTH10_EN_2X_LOOPFILT_LSB 4
+#define PHY_ANALOG_SYNTH10_EN_2X_LOOPFILT_MASK 0x00000010
+#define PHY_ANALOG_SYNTH10_EN_2X_LOOPFILT_GET(x) (((x) & 0x00000010) >> 4)
+#define PHY_ANALOG_SYNTH10_EN_2X_LOOPFILT_SET(x) (((x) << 4) & 0x00000010)
+#define PHY_ANALOG_SYNTH10_PWDB_IRSPARE25_MSB 7
+#define PHY_ANALOG_SYNTH10_PWDB_IRSPARE25_LSB 5
+#define PHY_ANALOG_SYNTH10_PWDB_IRSPARE25_MASK 0x000000e0
+#define PHY_ANALOG_SYNTH10_PWDB_IRSPARE25_GET(x) (((x) & 0x000000e0) >> 5)
+#define PHY_ANALOG_SYNTH10_PWDB_IRSPARE25_SET(x) (((x) << 5) & 0x000000e0)
+#define PHY_ANALOG_SYNTH10_PWDB_ICSPARE25_MSB 10
+#define PHY_ANALOG_SYNTH10_PWDB_ICSPARE25_LSB 8
+#define PHY_ANALOG_SYNTH10_PWDB_ICSPARE25_MASK 0x00000700
+#define PHY_ANALOG_SYNTH10_PWDB_ICSPARE25_GET(x) (((x) & 0x00000700) >> 8)
+#define PHY_ANALOG_SYNTH10_PWDB_ICSPARE25_SET(x) (((x) << 8) & 0x00000700)
+#define PHY_ANALOG_SYNTH10_SLOPE_ICPA1_MSB 13
+#define PHY_ANALOG_SYNTH10_SLOPE_ICPA1_LSB 11
+#define PHY_ANALOG_SYNTH10_SLOPE_ICPA1_MASK 0x00003800
+#define PHY_ANALOG_SYNTH10_SLOPE_ICPA1_GET(x) (((x) & 0x00003800) >> 11)
+#define PHY_ANALOG_SYNTH10_SLOPE_ICPA1_SET(x) (((x) << 11) & 0x00003800)
+#define PHY_ANALOG_SYNTH10_LOOP_ICPA1_MSB 17
+#define PHY_ANALOG_SYNTH10_LOOP_ICPA1_LSB 14
+#define PHY_ANALOG_SYNTH10_LOOP_ICPA1_MASK 0x0003c000
+#define PHY_ANALOG_SYNTH10_LOOP_ICPA1_GET(x) (((x) & 0x0003c000) >> 14)
+#define PHY_ANALOG_SYNTH10_LOOP_ICPA1_SET(x) (((x) << 14) & 0x0003c000)
+#define PHY_ANALOG_SYNTH10_LOOP_CSA1_MSB 21
+#define PHY_ANALOG_SYNTH10_LOOP_CSA1_LSB 18
+#define PHY_ANALOG_SYNTH10_LOOP_CSA1_MASK 0x003c0000
+#define PHY_ANALOG_SYNTH10_LOOP_CSA1_GET(x) (((x) & 0x003c0000) >> 18)
+#define PHY_ANALOG_SYNTH10_LOOP_CSA1_SET(x) (((x) << 18) & 0x003c0000)
+#define PHY_ANALOG_SYNTH10_LOOP_RSA1_MSB 26
+#define PHY_ANALOG_SYNTH10_LOOP_RSA1_LSB 22
+#define PHY_ANALOG_SYNTH10_LOOP_RSA1_MASK 0x07c00000
+#define PHY_ANALOG_SYNTH10_LOOP_RSA1_GET(x) (((x) & 0x07c00000) >> 22)
+#define PHY_ANALOG_SYNTH10_LOOP_RSA1_SET(x) (((x) << 22) & 0x07c00000)
+#define PHY_ANALOG_SYNTH10_LOOP_CPA1_MSB 31
+#define PHY_ANALOG_SYNTH10_LOOP_CPA1_LSB 27
+#define PHY_ANALOG_SYNTH10_LOOP_CPA1_MASK 0xf8000000
+#define PHY_ANALOG_SYNTH10_LOOP_CPA1_GET(x) (((x) & 0xf8000000) >> 27)
+#define PHY_ANALOG_SYNTH10_LOOP_CPA1_SET(x) (((x) << 27) & 0xf8000000)
+
+/* macros for SYNTH11 */
+#define PHY_ANALOG_SYNTH11_ADDRESS 0x000000a8
+#define PHY_ANALOG_SYNTH11_OFFSET 0x000000a8
+#define PHY_ANALOG_SYNTH11_SPARE11A_MSB 4
+#define PHY_ANALOG_SYNTH11_SPARE11A_LSB 0
+#define PHY_ANALOG_SYNTH11_SPARE11A_MASK 0x0000001f
+#define PHY_ANALOG_SYNTH11_SPARE11A_GET(x) (((x) & 0x0000001f) >> 0)
+#define PHY_ANALOG_SYNTH11_SPARE11A_SET(x) (((x) << 0) & 0x0000001f)
+#define PHY_ANALOG_SYNTH11_FORCE_LOBUF5G_ON_MSB 5
+#define PHY_ANALOG_SYNTH11_FORCE_LOBUF5G_ON_LSB 5
+#define PHY_ANALOG_SYNTH11_FORCE_LOBUF5G_ON_MASK 0x00000020
+#define PHY_ANALOG_SYNTH11_FORCE_LOBUF5G_ON_GET(x) (((x) & 0x00000020) >> 5)
+#define PHY_ANALOG_SYNTH11_FORCE_LOBUF5G_ON_SET(x) (((x) << 5) & 0x00000020)
+#define PHY_ANALOG_SYNTH11_LOREFSEL_MSB 7
+#define PHY_ANALOG_SYNTH11_LOREFSEL_LSB 6
+#define PHY_ANALOG_SYNTH11_LOREFSEL_MASK 0x000000c0
+#define PHY_ANALOG_SYNTH11_LOREFSEL_GET(x) (((x) & 0x000000c0) >> 6)
+#define PHY_ANALOG_SYNTH11_LOREFSEL_SET(x) (((x) << 6) & 0x000000c0)
+#define PHY_ANALOG_SYNTH11_LOBUF2GTUNE_MSB 9
+#define PHY_ANALOG_SYNTH11_LOBUF2GTUNE_LSB 8
+#define PHY_ANALOG_SYNTH11_LOBUF2GTUNE_MASK 0x00000300
+#define PHY_ANALOG_SYNTH11_LOBUF2GTUNE_GET(x) (((x) & 0x00000300) >> 8)
+#define PHY_ANALOG_SYNTH11_LOBUF2GTUNE_SET(x) (((x) << 8) & 0x00000300)
+#define PHY_ANALOG_SYNTH11_CPSTEERING_MODE_MSB 10
+#define PHY_ANALOG_SYNTH11_CPSTEERING_MODE_LSB 10
+#define PHY_ANALOG_SYNTH11_CPSTEERING_MODE_MASK 0x00000400
+#define PHY_ANALOG_SYNTH11_CPSTEERING_MODE_GET(x) (((x) & 0x00000400) >> 10)
+#define PHY_ANALOG_SYNTH11_CPSTEERING_MODE_SET(x) (((x) << 10) & 0x00000400)
+#define PHY_ANALOG_SYNTH11_SLOPE_ICPA2_MSB 13
+#define PHY_ANALOG_SYNTH11_SLOPE_ICPA2_LSB 11
+#define PHY_ANALOG_SYNTH11_SLOPE_ICPA2_MASK 0x00003800
+#define PHY_ANALOG_SYNTH11_SLOPE_ICPA2_GET(x) (((x) & 0x00003800) >> 11)
+#define PHY_ANALOG_SYNTH11_SLOPE_ICPA2_SET(x) (((x) << 11) & 0x00003800)
+#define PHY_ANALOG_SYNTH11_LOOP_ICPA2_MSB 17
+#define PHY_ANALOG_SYNTH11_LOOP_ICPA2_LSB 14
+#define PHY_ANALOG_SYNTH11_LOOP_ICPA2_MASK 0x0003c000
+#define PHY_ANALOG_SYNTH11_LOOP_ICPA2_GET(x) (((x) & 0x0003c000) >> 14)
+#define PHY_ANALOG_SYNTH11_LOOP_ICPA2_SET(x) (((x) << 14) & 0x0003c000)
+#define PHY_ANALOG_SYNTH11_LOOP_CSA2_MSB 21
+#define PHY_ANALOG_SYNTH11_LOOP_CSA2_LSB 18
+#define PHY_ANALOG_SYNTH11_LOOP_CSA2_MASK 0x003c0000
+#define PHY_ANALOG_SYNTH11_LOOP_CSA2_GET(x) (((x) & 0x003c0000) >> 18)
+#define PHY_ANALOG_SYNTH11_LOOP_CSA2_SET(x) (((x) << 18) & 0x003c0000)
+#define PHY_ANALOG_SYNTH11_LOOP_RSA2_MSB 26
+#define PHY_ANALOG_SYNTH11_LOOP_RSA2_LSB 22
+#define PHY_ANALOG_SYNTH11_LOOP_RSA2_MASK 0x07c00000
+#define PHY_ANALOG_SYNTH11_LOOP_RSA2_GET(x) (((x) & 0x07c00000) >> 22)
+#define PHY_ANALOG_SYNTH11_LOOP_RSA2_SET(x) (((x) << 22) & 0x07c00000)
+#define PHY_ANALOG_SYNTH11_LOOP_CPA2_MSB 31
+#define PHY_ANALOG_SYNTH11_LOOP_CPA2_LSB 27
+#define PHY_ANALOG_SYNTH11_LOOP_CPA2_MASK 0xf8000000
+#define PHY_ANALOG_SYNTH11_LOOP_CPA2_GET(x) (((x) & 0xf8000000) >> 27)
+#define PHY_ANALOG_SYNTH11_LOOP_CPA2_SET(x) (((x) << 27) & 0xf8000000)
+
+/* macros for SYNTH12 */
+#define PHY_ANALOG_SYNTH12_ADDRESS 0x000000ac
+#define PHY_ANALOG_SYNTH12_OFFSET 0x000000ac
+#define PHY_ANALOG_SYNTH12_SPARE12A_MSB 17
+#define PHY_ANALOG_SYNTH12_SPARE12A_LSB 0
+#define PHY_ANALOG_SYNTH12_SPARE12A_MASK 0x0003ffff
+#define PHY_ANALOG_SYNTH12_SPARE12A_GET(x) (((x) & 0x0003ffff) >> 0)
+#define PHY_ANALOG_SYNTH12_SPARE12A_SET(x) (((x) << 0) & 0x0003ffff)
+#define PHY_ANALOG_SYNTH12_STRCONT_MSB 18
+#define PHY_ANALOG_SYNTH12_STRCONT_LSB 18
+#define PHY_ANALOG_SYNTH12_STRCONT_MASK 0x00040000
+#define PHY_ANALOG_SYNTH12_STRCONT_GET(x) (((x) & 0x00040000) >> 18)
+#define PHY_ANALOG_SYNTH12_STRCONT_SET(x) (((x) << 18) & 0x00040000)
+#define PHY_ANALOG_SYNTH12_VREFMUL3_MSB 22
+#define PHY_ANALOG_SYNTH12_VREFMUL3_LSB 19
+#define PHY_ANALOG_SYNTH12_VREFMUL3_MASK 0x00780000
+#define PHY_ANALOG_SYNTH12_VREFMUL3_GET(x) (((x) & 0x00780000) >> 19)
+#define PHY_ANALOG_SYNTH12_VREFMUL3_SET(x) (((x) << 19) & 0x00780000)
+#define PHY_ANALOG_SYNTH12_VREFMUL2_MSB 26
+#define PHY_ANALOG_SYNTH12_VREFMUL2_LSB 23
+#define PHY_ANALOG_SYNTH12_VREFMUL2_MASK 0x07800000
+#define PHY_ANALOG_SYNTH12_VREFMUL2_GET(x) (((x) & 0x07800000) >> 23)
+#define PHY_ANALOG_SYNTH12_VREFMUL2_SET(x) (((x) << 23) & 0x07800000)
+#define PHY_ANALOG_SYNTH12_VREFMUL1_MSB 30
+#define PHY_ANALOG_SYNTH12_VREFMUL1_LSB 27
+#define PHY_ANALOG_SYNTH12_VREFMUL1_MASK 0x78000000
+#define PHY_ANALOG_SYNTH12_VREFMUL1_GET(x) (((x) & 0x78000000) >> 27)
+#define PHY_ANALOG_SYNTH12_VREFMUL1_SET(x) (((x) << 27) & 0x78000000)
+#define PHY_ANALOG_SYNTH12_CLK_DOUBLER_EN_MSB 31
+#define PHY_ANALOG_SYNTH12_CLK_DOUBLER_EN_LSB 31
+#define PHY_ANALOG_SYNTH12_CLK_DOUBLER_EN_MASK 0x80000000
+#define PHY_ANALOG_SYNTH12_CLK_DOUBLER_EN_GET(x) (((x) & 0x80000000) >> 31)
+#define PHY_ANALOG_SYNTH12_CLK_DOUBLER_EN_SET(x) (((x) << 31) & 0x80000000)
+
+/* macros for BIAS1 */
+#define PHY_ANALOG_BIAS1_ADDRESS 0x000000c0
+#define PHY_ANALOG_BIAS1_OFFSET 0x000000c0
+#define PHY_ANALOG_BIAS1_SPARE1_MSB 6
+#define PHY_ANALOG_BIAS1_SPARE1_LSB 0
+#define PHY_ANALOG_BIAS1_SPARE1_MASK 0x0000007f
+#define PHY_ANALOG_BIAS1_SPARE1_GET(x) (((x) & 0x0000007f) >> 0)
+#define PHY_ANALOG_BIAS1_SPARE1_SET(x) (((x) << 0) & 0x0000007f)
+#define PHY_ANALOG_BIAS1_PWD_IC25V2IQ_MSB 9
+#define PHY_ANALOG_BIAS1_PWD_IC25V2IQ_LSB 7
+#define PHY_ANALOG_BIAS1_PWD_IC25V2IQ_MASK 0x00000380
+#define PHY_ANALOG_BIAS1_PWD_IC25V2IQ_GET(x) (((x) & 0x00000380) >> 7)
+#define PHY_ANALOG_BIAS1_PWD_IC25V2IQ_SET(x) (((x) << 7) & 0x00000380)
+#define PHY_ANALOG_BIAS1_PWD_IC25V2II_MSB 12
+#define PHY_ANALOG_BIAS1_PWD_IC25V2II_LSB 10
+#define PHY_ANALOG_BIAS1_PWD_IC25V2II_MASK 0x00001c00
+#define PHY_ANALOG_BIAS1_PWD_IC25V2II_GET(x) (((x) & 0x00001c00) >> 10)
+#define PHY_ANALOG_BIAS1_PWD_IC25V2II_SET(x) (((x) << 10) & 0x00001c00)
+#define PHY_ANALOG_BIAS1_PWD_IC25BB_MSB 15
+#define PHY_ANALOG_BIAS1_PWD_IC25BB_LSB 13
+#define PHY_ANALOG_BIAS1_PWD_IC25BB_MASK 0x0000e000
+#define PHY_ANALOG_BIAS1_PWD_IC25BB_GET(x) (((x) & 0x0000e000) >> 13)
+#define PHY_ANALOG_BIAS1_PWD_IC25BB_SET(x) (((x) << 13) & 0x0000e000)
+#define PHY_ANALOG_BIAS1_PWD_IC25DAC_MSB 18
+#define PHY_ANALOG_BIAS1_PWD_IC25DAC_LSB 16
+#define PHY_ANALOG_BIAS1_PWD_IC25DAC_MASK 0x00070000
+#define PHY_ANALOG_BIAS1_PWD_IC25DAC_GET(x) (((x) & 0x00070000) >> 16)
+#define PHY_ANALOG_BIAS1_PWD_IC25DAC_SET(x) (((x) << 16) & 0x00070000)
+#define PHY_ANALOG_BIAS1_PWD_IC25FIR_MSB 21
+#define PHY_ANALOG_BIAS1_PWD_IC25FIR_LSB 19
+#define PHY_ANALOG_BIAS1_PWD_IC25FIR_MASK 0x00380000
+#define PHY_ANALOG_BIAS1_PWD_IC25FIR_GET(x) (((x) & 0x00380000) >> 19)
+#define PHY_ANALOG_BIAS1_PWD_IC25FIR_SET(x) (((x) << 19) & 0x00380000)
+#define PHY_ANALOG_BIAS1_PWD_IC25ADC_MSB 24
+#define PHY_ANALOG_BIAS1_PWD_IC25ADC_LSB 22
+#define PHY_ANALOG_BIAS1_PWD_IC25ADC_MASK 0x01c00000
+#define PHY_ANALOG_BIAS1_PWD_IC25ADC_GET(x) (((x) & 0x01c00000) >> 22)
+#define PHY_ANALOG_BIAS1_PWD_IC25ADC_SET(x) (((x) << 22) & 0x01c00000)
+#define PHY_ANALOG_BIAS1_BIAS_SEL_MSB 31
+#define PHY_ANALOG_BIAS1_BIAS_SEL_LSB 25
+#define PHY_ANALOG_BIAS1_BIAS_SEL_MASK 0xfe000000
+#define PHY_ANALOG_BIAS1_BIAS_SEL_GET(x) (((x) & 0xfe000000) >> 25)
+#define PHY_ANALOG_BIAS1_BIAS_SEL_SET(x) (((x) << 25) & 0xfe000000)
+
+/* macros for BIAS2 */
+#define PHY_ANALOG_BIAS2_ADDRESS 0x000000c4
+#define PHY_ANALOG_BIAS2_OFFSET 0x000000c4
+#define PHY_ANALOG_BIAS2_SPARE2_MSB 4
+#define PHY_ANALOG_BIAS2_SPARE2_LSB 0
+#define PHY_ANALOG_BIAS2_SPARE2_MASK 0x0000001f
+#define PHY_ANALOG_BIAS2_SPARE2_GET(x) (((x) & 0x0000001f) >> 0)
+#define PHY_ANALOG_BIAS2_SPARE2_SET(x) (((x) << 0) & 0x0000001f)
+#define PHY_ANALOG_BIAS2_PWD_IC25XTALREG_MSB 7
+#define PHY_ANALOG_BIAS2_PWD_IC25XTALREG_LSB 5
+#define PHY_ANALOG_BIAS2_PWD_IC25XTALREG_MASK 0x000000e0
+#define PHY_ANALOG_BIAS2_PWD_IC25XTALREG_GET(x) (((x) & 0x000000e0) >> 5)
+#define PHY_ANALOG_BIAS2_PWD_IC25XTALREG_SET(x) (((x) << 5) & 0x000000e0)
+#define PHY_ANALOG_BIAS2_PWD_IC25XTAL_MSB 10
+#define PHY_ANALOG_BIAS2_PWD_IC25XTAL_LSB 8
+#define PHY_ANALOG_BIAS2_PWD_IC25XTAL_MASK 0x00000700
+#define PHY_ANALOG_BIAS2_PWD_IC25XTAL_GET(x) (((x) & 0x00000700) >> 8)
+#define PHY_ANALOG_BIAS2_PWD_IC25XTAL_SET(x) (((x) << 8) & 0x00000700)
+#define PHY_ANALOG_BIAS2_PWD_IC25TXRF_MSB 13
+#define PHY_ANALOG_BIAS2_PWD_IC25TXRF_LSB 11
+#define PHY_ANALOG_BIAS2_PWD_IC25TXRF_MASK 0x00003800
+#define PHY_ANALOG_BIAS2_PWD_IC25TXRF_GET(x) (((x) & 0x00003800) >> 11)
+#define PHY_ANALOG_BIAS2_PWD_IC25TXRF_SET(x) (((x) << 11) & 0x00003800)
+#define PHY_ANALOG_BIAS2_PWD_IC25RXRF_MSB 16
+#define PHY_ANALOG_BIAS2_PWD_IC25RXRF_LSB 14
+#define PHY_ANALOG_BIAS2_PWD_IC25RXRF_MASK 0x0001c000
+#define PHY_ANALOG_BIAS2_PWD_IC25RXRF_GET(x) (((x) & 0x0001c000) >> 14)
+#define PHY_ANALOG_BIAS2_PWD_IC25RXRF_SET(x) (((x) << 14) & 0x0001c000)
+#define PHY_ANALOG_BIAS2_PWD_IC50SYNTH_MSB 19
+#define PHY_ANALOG_BIAS2_PWD_IC50SYNTH_LSB 17
+#define PHY_ANALOG_BIAS2_PWD_IC50SYNTH_MASK 0x000e0000
+#define PHY_ANALOG_BIAS2_PWD_IC50SYNTH_GET(x) (((x) & 0x000e0000) >> 17)
+#define PHY_ANALOG_BIAS2_PWD_IC50SYNTH_SET(x) (((x) << 17) & 0x000e0000)
+#define PHY_ANALOG_BIAS2_PWD_IC25PLLREG_MSB 22
+#define PHY_ANALOG_BIAS2_PWD_IC25PLLREG_LSB 20
+#define PHY_ANALOG_BIAS2_PWD_IC25PLLREG_MASK 0x00700000
+#define PHY_ANALOG_BIAS2_PWD_IC25PLLREG_GET(x) (((x) & 0x00700000) >> 20)
+#define PHY_ANALOG_BIAS2_PWD_IC25PLLREG_SET(x) (((x) << 20) & 0x00700000)
+#define PHY_ANALOG_BIAS2_PWD_IC25PLLCP2_MSB 25
+#define PHY_ANALOG_BIAS2_PWD_IC25PLLCP2_LSB 23
+#define PHY_ANALOG_BIAS2_PWD_IC25PLLCP2_MASK 0x03800000
+#define PHY_ANALOG_BIAS2_PWD_IC25PLLCP2_GET(x) (((x) & 0x03800000) >> 23)
+#define PHY_ANALOG_BIAS2_PWD_IC25PLLCP2_SET(x) (((x) << 23) & 0x03800000)
+#define PHY_ANALOG_BIAS2_PWD_IC25PLLCP_MSB 28
+#define PHY_ANALOG_BIAS2_PWD_IC25PLLCP_LSB 26
+#define PHY_ANALOG_BIAS2_PWD_IC25PLLCP_MASK 0x1c000000
+#define PHY_ANALOG_BIAS2_PWD_IC25PLLCP_GET(x) (((x) & 0x1c000000) >> 26)
+#define PHY_ANALOG_BIAS2_PWD_IC25PLLCP_SET(x) (((x) << 26) & 0x1c000000)
+#define PHY_ANALOG_BIAS2_PWD_IC25PLLGM_MSB 31
+#define PHY_ANALOG_BIAS2_PWD_IC25PLLGM_LSB 29
+#define PHY_ANALOG_BIAS2_PWD_IC25PLLGM_MASK 0xe0000000
+#define PHY_ANALOG_BIAS2_PWD_IC25PLLGM_GET(x) (((x) & 0xe0000000) >> 29)
+#define PHY_ANALOG_BIAS2_PWD_IC25PLLGM_SET(x) (((x) << 29) & 0xe0000000)
+
+/* macros for BIAS3 */
+#define PHY_ANALOG_BIAS3_ADDRESS 0x000000c8
+#define PHY_ANALOG_BIAS3_OFFSET 0x000000c8
+#define PHY_ANALOG_BIAS3_SPARE3_MSB 1
+#define PHY_ANALOG_BIAS3_SPARE3_LSB 0
+#define PHY_ANALOG_BIAS3_SPARE3_MASK 0x00000003
+#define PHY_ANALOG_BIAS3_SPARE3_GET(x) (((x) & 0x00000003) >> 0)
+#define PHY_ANALOG_BIAS3_SPARE3_SET(x) (((x) << 0) & 0x00000003)
+#define PHY_ANALOG_BIAS3_PWD_IR25XTALREG_MSB 4
+#define PHY_ANALOG_BIAS3_PWD_IR25XTALREG_LSB 2
+#define PHY_ANALOG_BIAS3_PWD_IR25XTALREG_MASK 0x0000001c
+#define PHY_ANALOG_BIAS3_PWD_IR25XTALREG_GET(x) (((x) & 0x0000001c) >> 2)
+#define PHY_ANALOG_BIAS3_PWD_IR25XTALREG_SET(x) (((x) << 2) & 0x0000001c)
+#define PHY_ANALOG_BIAS3_PWD_IR25TXRF_MSB 7
+#define PHY_ANALOG_BIAS3_PWD_IR25TXRF_LSB 5
+#define PHY_ANALOG_BIAS3_PWD_IR25TXRF_MASK 0x000000e0
+#define PHY_ANALOG_BIAS3_PWD_IR25TXRF_GET(x) (((x) & 0x000000e0) >> 5)
+#define PHY_ANALOG_BIAS3_PWD_IR25TXRF_SET(x) (((x) << 5) & 0x000000e0)
+#define PHY_ANALOG_BIAS3_PWD_IR25RXRF_MSB 10
+#define PHY_ANALOG_BIAS3_PWD_IR25RXRF_LSB 8
+#define PHY_ANALOG_BIAS3_PWD_IR25RXRF_MASK 0x00000700
+#define PHY_ANALOG_BIAS3_PWD_IR25RXRF_GET(x) (((x) & 0x00000700) >> 8)
+#define PHY_ANALOG_BIAS3_PWD_IR25RXRF_SET(x) (((x) << 8) & 0x00000700)
+#define PHY_ANALOG_BIAS3_PWD_IR50SYNTH_MSB 13
+#define PHY_ANALOG_BIAS3_PWD_IR50SYNTH_LSB 11
+#define PHY_ANALOG_BIAS3_PWD_IR50SYNTH_MASK 0x00003800
+#define PHY_ANALOG_BIAS3_PWD_IR50SYNTH_GET(x) (((x) & 0x00003800) >> 11)
+#define PHY_ANALOG_BIAS3_PWD_IR50SYNTH_SET(x) (((x) << 11) & 0x00003800)
+#define PHY_ANALOG_BIAS3_PWD_IR25PLLREG_MSB 16
+#define PHY_ANALOG_BIAS3_PWD_IR25PLLREG_LSB 14
+#define PHY_ANALOG_BIAS3_PWD_IR25PLLREG_MASK 0x0001c000
+#define PHY_ANALOG_BIAS3_PWD_IR25PLLREG_GET(x) (((x) & 0x0001c000) >> 14)
+#define PHY_ANALOG_BIAS3_PWD_IR25PLLREG_SET(x) (((x) << 14) & 0x0001c000)
+#define PHY_ANALOG_BIAS3_PWD_IR25BB_MSB 19
+#define PHY_ANALOG_BIAS3_PWD_IR25BB_LSB 17
+#define PHY_ANALOG_BIAS3_PWD_IR25BB_MASK 0x000e0000
+#define PHY_ANALOG_BIAS3_PWD_IR25BB_GET(x) (((x) & 0x000e0000) >> 17)
+#define PHY_ANALOG_BIAS3_PWD_IR25BB_SET(x) (((x) << 17) & 0x000e0000)
+#define PHY_ANALOG_BIAS3_PWD_IR50DAC_MSB 22
+#define PHY_ANALOG_BIAS3_PWD_IR50DAC_LSB 20
+#define PHY_ANALOG_BIAS3_PWD_IR50DAC_MASK 0x00700000
+#define PHY_ANALOG_BIAS3_PWD_IR50DAC_GET(x) (((x) & 0x00700000) >> 20)
+#define PHY_ANALOG_BIAS3_PWD_IR50DAC_SET(x) (((x) << 20) & 0x00700000)
+#define PHY_ANALOG_BIAS3_PWD_IR25DAC_MSB 25
+#define PHY_ANALOG_BIAS3_PWD_IR25DAC_LSB 23
+#define PHY_ANALOG_BIAS3_PWD_IR25DAC_MASK 0x03800000
+#define PHY_ANALOG_BIAS3_PWD_IR25DAC_GET(x) (((x) & 0x03800000) >> 23)
+#define PHY_ANALOG_BIAS3_PWD_IR25DAC_SET(x) (((x) << 23) & 0x03800000)
+#define PHY_ANALOG_BIAS3_PWD_IR25FIR_MSB 28
+#define PHY_ANALOG_BIAS3_PWD_IR25FIR_LSB 26
+#define PHY_ANALOG_BIAS3_PWD_IR25FIR_MASK 0x1c000000
+#define PHY_ANALOG_BIAS3_PWD_IR25FIR_GET(x) (((x) & 0x1c000000) >> 26)
+#define PHY_ANALOG_BIAS3_PWD_IR25FIR_SET(x) (((x) << 26) & 0x1c000000)
+#define PHY_ANALOG_BIAS3_PWD_IR50ADC_MSB 31
+#define PHY_ANALOG_BIAS3_PWD_IR50ADC_LSB 29
+#define PHY_ANALOG_BIAS3_PWD_IR50ADC_MASK 0xe0000000
+#define PHY_ANALOG_BIAS3_PWD_IR50ADC_GET(x) (((x) & 0xe0000000) >> 29)
+#define PHY_ANALOG_BIAS3_PWD_IR50ADC_SET(x) (((x) << 29) & 0xe0000000)
+
+/* macros for BIAS4 */
+#define PHY_ANALOG_BIAS4_ADDRESS 0x000000cc
+#define PHY_ANALOG_BIAS4_OFFSET 0x000000cc
+#define PHY_ANALOG_BIAS4_SPARE4_MSB 13
+#define PHY_ANALOG_BIAS4_SPARE4_LSB 0
+#define PHY_ANALOG_BIAS4_SPARE4_MASK 0x00003fff
+#define PHY_ANALOG_BIAS4_SPARE4_GET(x) (((x) & 0x00003fff) >> 0)
+#define PHY_ANALOG_BIAS4_SPARE4_SET(x) (((x) << 0) & 0x00003fff)
+#define PHY_ANALOG_BIAS4_PWD_IR25SPAREC_MSB 16
+#define PHY_ANALOG_BIAS4_PWD_IR25SPAREC_LSB 14
+#define PHY_ANALOG_BIAS4_PWD_IR25SPAREC_MASK 0x0001c000
+#define PHY_ANALOG_BIAS4_PWD_IR25SPAREC_GET(x) (((x) & 0x0001c000) >> 14)
+#define PHY_ANALOG_BIAS4_PWD_IR25SPAREC_SET(x) (((x) << 14) & 0x0001c000)
+#define PHY_ANALOG_BIAS4_PWD_IR25SPAREB_MSB 19
+#define PHY_ANALOG_BIAS4_PWD_IR25SPAREB_LSB 17
+#define PHY_ANALOG_BIAS4_PWD_IR25SPAREB_MASK 0x000e0000
+#define PHY_ANALOG_BIAS4_PWD_IR25SPAREB_GET(x) (((x) & 0x000e0000) >> 17)
+#define PHY_ANALOG_BIAS4_PWD_IR25SPAREB_SET(x) (((x) << 17) & 0x000e0000)
+#define PHY_ANALOG_BIAS4_PWD_IR25SPAREA_MSB 22
+#define PHY_ANALOG_BIAS4_PWD_IR25SPAREA_LSB 20
+#define PHY_ANALOG_BIAS4_PWD_IR25SPAREA_MASK 0x00700000
+#define PHY_ANALOG_BIAS4_PWD_IR25SPAREA_GET(x) (((x) & 0x00700000) >> 20)
+#define PHY_ANALOG_BIAS4_PWD_IR25SPAREA_SET(x) (((x) << 20) & 0x00700000)
+#define PHY_ANALOG_BIAS4_PWD_IC25SPAREC_MSB 25
+#define PHY_ANALOG_BIAS4_PWD_IC25SPAREC_LSB 23
+#define PHY_ANALOG_BIAS4_PWD_IC25SPAREC_MASK 0x03800000
+#define PHY_ANALOG_BIAS4_PWD_IC25SPAREC_GET(x) (((x) & 0x03800000) >> 23)
+#define PHY_ANALOG_BIAS4_PWD_IC25SPAREC_SET(x) (((x) << 23) & 0x03800000)
+#define PHY_ANALOG_BIAS4_PWD_IC25SPAREB_MSB 28
+#define PHY_ANALOG_BIAS4_PWD_IC25SPAREB_LSB 26
+#define PHY_ANALOG_BIAS4_PWD_IC25SPAREB_MASK 0x1c000000
+#define PHY_ANALOG_BIAS4_PWD_IC25SPAREB_GET(x) (((x) & 0x1c000000) >> 26)
+#define PHY_ANALOG_BIAS4_PWD_IC25SPAREB_SET(x) (((x) << 26) & 0x1c000000)
+#define PHY_ANALOG_BIAS4_PWD_IC25SPAREA_MSB 31
+#define PHY_ANALOG_BIAS4_PWD_IC25SPAREA_LSB 29
+#define PHY_ANALOG_BIAS4_PWD_IC25SPAREA_MASK 0xe0000000
+#define PHY_ANALOG_BIAS4_PWD_IC25SPAREA_GET(x) (((x) & 0xe0000000) >> 29)
+#define PHY_ANALOG_BIAS4_PWD_IC25SPAREA_SET(x) (((x) << 29) & 0xe0000000)
+
+/* macros for RXTX1 */
+#define PHY_ANALOG_RXTX1_ADDRESS 0x00000100
+#define PHY_ANALOG_RXTX1_OFFSET 0x00000100
+#define PHY_ANALOG_RXTX1_SCFIR_GAIN_MSB 0
+#define PHY_ANALOG_RXTX1_SCFIR_GAIN_LSB 0
+#define PHY_ANALOG_RXTX1_SCFIR_GAIN_MASK 0x00000001
+#define PHY_ANALOG_RXTX1_SCFIR_GAIN_GET(x) (((x) & 0x00000001) >> 0)
+#define PHY_ANALOG_RXTX1_SCFIR_GAIN_SET(x) (((x) << 0) & 0x00000001)
+#define PHY_ANALOG_RXTX1_MANRXGAIN_MSB 1
+#define PHY_ANALOG_RXTX1_MANRXGAIN_LSB 1
+#define PHY_ANALOG_RXTX1_MANRXGAIN_MASK 0x00000002
+#define PHY_ANALOG_RXTX1_MANRXGAIN_GET(x) (((x) & 0x00000002) >> 1)
+#define PHY_ANALOG_RXTX1_MANRXGAIN_SET(x) (((x) << 1) & 0x00000002)
+#define PHY_ANALOG_RXTX1_AGC_DBDAC_MSB 5
+#define PHY_ANALOG_RXTX1_AGC_DBDAC_LSB 2
+#define PHY_ANALOG_RXTX1_AGC_DBDAC_MASK 0x0000003c
+#define PHY_ANALOG_RXTX1_AGC_DBDAC_GET(x) (((x) & 0x0000003c) >> 2)
+#define PHY_ANALOG_RXTX1_AGC_DBDAC_SET(x) (((x) << 2) & 0x0000003c)
+#define PHY_ANALOG_RXTX1_OVR_AGC_DBDAC_MSB 6
+#define PHY_ANALOG_RXTX1_OVR_AGC_DBDAC_LSB 6
+#define PHY_ANALOG_RXTX1_OVR_AGC_DBDAC_MASK 0x00000040
+#define PHY_ANALOG_RXTX1_OVR_AGC_DBDAC_GET(x) (((x) & 0x00000040) >> 6)
+#define PHY_ANALOG_RXTX1_OVR_AGC_DBDAC_SET(x) (((x) << 6) & 0x00000040)
+#define PHY_ANALOG_RXTX1_ENABLE_PAL_MSB 7
+#define PHY_ANALOG_RXTX1_ENABLE_PAL_LSB 7
+#define PHY_ANALOG_RXTX1_ENABLE_PAL_MASK 0x00000080
+#define PHY_ANALOG_RXTX1_ENABLE_PAL_GET(x) (((x) & 0x00000080) >> 7)
+#define PHY_ANALOG_RXTX1_ENABLE_PAL_SET(x) (((x) << 7) & 0x00000080)
+#define PHY_ANALOG_RXTX1_ENABLE_PAL_OVR_MSB 8
+#define PHY_ANALOG_RXTX1_ENABLE_PAL_OVR_LSB 8
+#define PHY_ANALOG_RXTX1_ENABLE_PAL_OVR_MASK 0x00000100
+#define PHY_ANALOG_RXTX1_ENABLE_PAL_OVR_GET(x) (((x) & 0x00000100) >> 8)
+#define PHY_ANALOG_RXTX1_ENABLE_PAL_OVR_SET(x) (((x) << 8) & 0x00000100)
+#define PHY_ANALOG_RXTX1_TX1DB_BIQUAD_MSB 11
+#define PHY_ANALOG_RXTX1_TX1DB_BIQUAD_LSB 9
+#define PHY_ANALOG_RXTX1_TX1DB_BIQUAD_MASK 0x00000e00
+#define PHY_ANALOG_RXTX1_TX1DB_BIQUAD_GET(x) (((x) & 0x00000e00) >> 9)
+#define PHY_ANALOG_RXTX1_TX1DB_BIQUAD_SET(x) (((x) << 9) & 0x00000e00)
+#define PHY_ANALOG_RXTX1_TX6DB_BIQUAD_MSB 13
+#define PHY_ANALOG_RXTX1_TX6DB_BIQUAD_LSB 12
+#define PHY_ANALOG_RXTX1_TX6DB_BIQUAD_MASK 0x00003000
+#define PHY_ANALOG_RXTX1_TX6DB_BIQUAD_GET(x) (((x) & 0x00003000) >> 12)
+#define PHY_ANALOG_RXTX1_TX6DB_BIQUAD_SET(x) (((x) << 12) & 0x00003000)
+#define PHY_ANALOG_RXTX1_PADRVHALFGN2G_MSB 14
+#define PHY_ANALOG_RXTX1_PADRVHALFGN2G_LSB 14
+#define PHY_ANALOG_RXTX1_PADRVHALFGN2G_MASK 0x00004000
+#define PHY_ANALOG_RXTX1_PADRVHALFGN2G_GET(x) (((x) & 0x00004000) >> 14)
+#define PHY_ANALOG_RXTX1_PADRVHALFGN2G_SET(x) (((x) << 14) & 0x00004000)
+#define PHY_ANALOG_RXTX1_PADRV2GN_MSB 18
+#define PHY_ANALOG_RXTX1_PADRV2GN_LSB 15
+#define PHY_ANALOG_RXTX1_PADRV2GN_MASK 0x00078000
+#define PHY_ANALOG_RXTX1_PADRV2GN_GET(x) (((x) & 0x00078000) >> 15)
+#define PHY_ANALOG_RXTX1_PADRV2GN_SET(x) (((x) << 15) & 0x00078000)
+#define PHY_ANALOG_RXTX1_PADRV3GN5G_MSB 22
+#define PHY_ANALOG_RXTX1_PADRV3GN5G_LSB 19
+#define PHY_ANALOG_RXTX1_PADRV3GN5G_MASK 0x00780000
+#define PHY_ANALOG_RXTX1_PADRV3GN5G_GET(x) (((x) & 0x00780000) >> 19)
+#define PHY_ANALOG_RXTX1_PADRV3GN5G_SET(x) (((x) << 19) & 0x00780000)
+#define PHY_ANALOG_RXTX1_PADRV4GN5G_MSB 26
+#define PHY_ANALOG_RXTX1_PADRV4GN5G_LSB 23
+#define PHY_ANALOG_RXTX1_PADRV4GN5G_MASK 0x07800000
+#define PHY_ANALOG_RXTX1_PADRV4GN5G_GET(x) (((x) & 0x07800000) >> 23)
+#define PHY_ANALOG_RXTX1_PADRV4GN5G_SET(x) (((x) << 23) & 0x07800000)
+#define PHY_ANALOG_RXTX1_TXBB_GC_MSB 30
+#define PHY_ANALOG_RXTX1_TXBB_GC_LSB 27
+#define PHY_ANALOG_RXTX1_TXBB_GC_MASK 0x78000000
+#define PHY_ANALOG_RXTX1_TXBB_GC_GET(x) (((x) & 0x78000000) >> 27)
+#define PHY_ANALOG_RXTX1_TXBB_GC_SET(x) (((x) << 27) & 0x78000000)
+#define PHY_ANALOG_RXTX1_MANTXGAIN_MSB 31
+#define PHY_ANALOG_RXTX1_MANTXGAIN_LSB 31
+#define PHY_ANALOG_RXTX1_MANTXGAIN_MASK 0x80000000
+#define PHY_ANALOG_RXTX1_MANTXGAIN_GET(x) (((x) & 0x80000000) >> 31)
+#define PHY_ANALOG_RXTX1_MANTXGAIN_SET(x) (((x) << 31) & 0x80000000)
+
+/* macros for RXTX2 */
+#define PHY_ANALOG_RXTX2_ADDRESS 0x00000104
+#define PHY_ANALOG_RXTX2_OFFSET 0x00000104
+#define PHY_ANALOG_RXTX2_BMODE_MSB 0
+#define PHY_ANALOG_RXTX2_BMODE_LSB 0
+#define PHY_ANALOG_RXTX2_BMODE_MASK 0x00000001
+#define PHY_ANALOG_RXTX2_BMODE_GET(x) (((x) & 0x00000001) >> 0)
+#define PHY_ANALOG_RXTX2_BMODE_SET(x) (((x) << 0) & 0x00000001)
+#define PHY_ANALOG_RXTX2_BMODE_OVR_MSB 1
+#define PHY_ANALOG_RXTX2_BMODE_OVR_LSB 1
+#define PHY_ANALOG_RXTX2_BMODE_OVR_MASK 0x00000002
+#define PHY_ANALOG_RXTX2_BMODE_OVR_GET(x) (((x) & 0x00000002) >> 1)
+#define PHY_ANALOG_RXTX2_BMODE_OVR_SET(x) (((x) << 1) & 0x00000002)
+#define PHY_ANALOG_RXTX2_SYNTHON_MSB 2
+#define PHY_ANALOG_RXTX2_SYNTHON_LSB 2
+#define PHY_ANALOG_RXTX2_SYNTHON_MASK 0x00000004
+#define PHY_ANALOG_RXTX2_SYNTHON_GET(x) (((x) & 0x00000004) >> 2)
+#define PHY_ANALOG_RXTX2_SYNTHON_SET(x) (((x) << 2) & 0x00000004)
+#define PHY_ANALOG_RXTX2_SYNTHON_OVR_MSB 3
+#define PHY_ANALOG_RXTX2_SYNTHON_OVR_LSB 3
+#define PHY_ANALOG_RXTX2_SYNTHON_OVR_MASK 0x00000008
+#define PHY_ANALOG_RXTX2_SYNTHON_OVR_GET(x) (((x) & 0x00000008) >> 3)
+#define PHY_ANALOG_RXTX2_SYNTHON_OVR_SET(x) (((x) << 3) & 0x00000008)
+#define PHY_ANALOG_RXTX2_BW_ST_MSB 5
+#define PHY_ANALOG_RXTX2_BW_ST_LSB 4
+#define PHY_ANALOG_RXTX2_BW_ST_MASK 0x00000030
+#define PHY_ANALOG_RXTX2_BW_ST_GET(x) (((x) & 0x00000030) >> 4)
+#define PHY_ANALOG_RXTX2_BW_ST_SET(x) (((x) << 4) & 0x00000030)
+#define PHY_ANALOG_RXTX2_BW_ST_OVR_MSB 6
+#define PHY_ANALOG_RXTX2_BW_ST_OVR_LSB 6
+#define PHY_ANALOG_RXTX2_BW_ST_OVR_MASK 0x00000040
+#define PHY_ANALOG_RXTX2_BW_ST_OVR_GET(x) (((x) & 0x00000040) >> 6)
+#define PHY_ANALOG_RXTX2_BW_ST_OVR_SET(x) (((x) << 6) & 0x00000040)
+#define PHY_ANALOG_RXTX2_TXON_MSB 7
+#define PHY_ANALOG_RXTX2_TXON_LSB 7
+#define PHY_ANALOG_RXTX2_TXON_MASK 0x00000080
+#define PHY_ANALOG_RXTX2_TXON_GET(x) (((x) & 0x00000080) >> 7)
+#define PHY_ANALOG_RXTX2_TXON_SET(x) (((x) << 7) & 0x00000080)
+#define PHY_ANALOG_RXTX2_TXON_OVR_MSB 8
+#define PHY_ANALOG_RXTX2_TXON_OVR_LSB 8
+#define PHY_ANALOG_RXTX2_TXON_OVR_MASK 0x00000100
+#define PHY_ANALOG_RXTX2_TXON_OVR_GET(x) (((x) & 0x00000100) >> 8)
+#define PHY_ANALOG_RXTX2_TXON_OVR_SET(x) (((x) << 8) & 0x00000100)
+#define PHY_ANALOG_RXTX2_PAON_MSB 9
+#define PHY_ANALOG_RXTX2_PAON_LSB 9
+#define PHY_ANALOG_RXTX2_PAON_MASK 0x00000200
+#define PHY_ANALOG_RXTX2_PAON_GET(x) (((x) & 0x00000200) >> 9)
+#define PHY_ANALOG_RXTX2_PAON_SET(x) (((x) << 9) & 0x00000200)
+#define PHY_ANALOG_RXTX2_PAON_OVR_MSB 10
+#define PHY_ANALOG_RXTX2_PAON_OVR_LSB 10
+#define PHY_ANALOG_RXTX2_PAON_OVR_MASK 0x00000400
+#define PHY_ANALOG_RXTX2_PAON_OVR_GET(x) (((x) & 0x00000400) >> 10)
+#define PHY_ANALOG_RXTX2_PAON_OVR_SET(x) (((x) << 10) & 0x00000400)
+#define PHY_ANALOG_RXTX2_RXON_MSB 11
+#define PHY_ANALOG_RXTX2_RXON_LSB 11
+#define PHY_ANALOG_RXTX2_RXON_MASK 0x00000800
+#define PHY_ANALOG_RXTX2_RXON_GET(x) (((x) & 0x00000800) >> 11)
+#define PHY_ANALOG_RXTX2_RXON_SET(x) (((x) << 11) & 0x00000800)
+#define PHY_ANALOG_RXTX2_RXON_OVR_MSB 12
+#define PHY_ANALOG_RXTX2_RXON_OVR_LSB 12
+#define PHY_ANALOG_RXTX2_RXON_OVR_MASK 0x00001000
+#define PHY_ANALOG_RXTX2_RXON_OVR_GET(x) (((x) & 0x00001000) >> 12)
+#define PHY_ANALOG_RXTX2_RXON_OVR_SET(x) (((x) << 12) & 0x00001000)
+#define PHY_ANALOG_RXTX2_AGCON_MSB 13
+#define PHY_ANALOG_RXTX2_AGCON_LSB 13
+#define PHY_ANALOG_RXTX2_AGCON_MASK 0x00002000
+#define PHY_ANALOG_RXTX2_AGCON_GET(x) (((x) & 0x00002000) >> 13)
+#define PHY_ANALOG_RXTX2_AGCON_SET(x) (((x) << 13) & 0x00002000)
+#define PHY_ANALOG_RXTX2_AGCON_OVR_MSB 14
+#define PHY_ANALOG_RXTX2_AGCON_OVR_LSB 14
+#define PHY_ANALOG_RXTX2_AGCON_OVR_MASK 0x00004000
+#define PHY_ANALOG_RXTX2_AGCON_OVR_GET(x) (((x) & 0x00004000) >> 14)
+#define PHY_ANALOG_RXTX2_AGCON_OVR_SET(x) (((x) << 14) & 0x00004000)
+#define PHY_ANALOG_RXTX2_TXMOD_MSB 17
+#define PHY_ANALOG_RXTX2_TXMOD_LSB 15
+#define PHY_ANALOG_RXTX2_TXMOD_MASK 0x00038000
+#define PHY_ANALOG_RXTX2_TXMOD_GET(x) (((x) & 0x00038000) >> 15)
+#define PHY_ANALOG_RXTX2_TXMOD_SET(x) (((x) << 15) & 0x00038000)
+#define PHY_ANALOG_RXTX2_TXMOD_OVR_MSB 18
+#define PHY_ANALOG_RXTX2_TXMOD_OVR_LSB 18
+#define PHY_ANALOG_RXTX2_TXMOD_OVR_MASK 0x00040000
+#define PHY_ANALOG_RXTX2_TXMOD_OVR_GET(x) (((x) & 0x00040000) >> 18)
+#define PHY_ANALOG_RXTX2_TXMOD_OVR_SET(x) (((x) << 18) & 0x00040000)
+#define PHY_ANALOG_RXTX2_RX1DB_BIQUAD_MSB 21
+#define PHY_ANALOG_RXTX2_RX1DB_BIQUAD_LSB 19
+#define PHY_ANALOG_RXTX2_RX1DB_BIQUAD_MASK 0x00380000
+#define PHY_ANALOG_RXTX2_RX1DB_BIQUAD_GET(x) (((x) & 0x00380000) >> 19)
+#define PHY_ANALOG_RXTX2_RX1DB_BIQUAD_SET(x) (((x) << 19) & 0x00380000)
+#define PHY_ANALOG_RXTX2_RX6DB_BIQUAD_MSB 23
+#define PHY_ANALOG_RXTX2_RX6DB_BIQUAD_LSB 22
+#define PHY_ANALOG_RXTX2_RX6DB_BIQUAD_MASK 0x00c00000
+#define PHY_ANALOG_RXTX2_RX6DB_BIQUAD_GET(x) (((x) & 0x00c00000) >> 22)
+#define PHY_ANALOG_RXTX2_RX6DB_BIQUAD_SET(x) (((x) << 22) & 0x00c00000)
+#define PHY_ANALOG_RXTX2_MXRGAIN_MSB 25
+#define PHY_ANALOG_RXTX2_MXRGAIN_LSB 24
+#define PHY_ANALOG_RXTX2_MXRGAIN_MASK 0x03000000
+#define PHY_ANALOG_RXTX2_MXRGAIN_GET(x) (((x) & 0x03000000) >> 24)
+#define PHY_ANALOG_RXTX2_MXRGAIN_SET(x) (((x) << 24) & 0x03000000)
+#define PHY_ANALOG_RXTX2_VGAGAIN_MSB 28
+#define PHY_ANALOG_RXTX2_VGAGAIN_LSB 26
+#define PHY_ANALOG_RXTX2_VGAGAIN_MASK 0x1c000000
+#define PHY_ANALOG_RXTX2_VGAGAIN_GET(x) (((x) & 0x1c000000) >> 26)
+#define PHY_ANALOG_RXTX2_VGAGAIN_SET(x) (((x) << 26) & 0x1c000000)
+#define PHY_ANALOG_RXTX2_LNAGAIN_MSB 31
+#define PHY_ANALOG_RXTX2_LNAGAIN_LSB 29
+#define PHY_ANALOG_RXTX2_LNAGAIN_MASK 0xe0000000
+#define PHY_ANALOG_RXTX2_LNAGAIN_GET(x) (((x) & 0xe0000000) >> 29)
+#define PHY_ANALOG_RXTX2_LNAGAIN_SET(x) (((x) << 29) & 0xe0000000)
+
+/* macros for RXTX3 */
+#define PHY_ANALOG_RXTX3_ADDRESS 0x00000108
+#define PHY_ANALOG_RXTX3_OFFSET 0x00000108
+#define PHY_ANALOG_RXTX3_SPARE3_MSB 2
+#define PHY_ANALOG_RXTX3_SPARE3_LSB 0
+#define PHY_ANALOG_RXTX3_SPARE3_MASK 0x00000007
+#define PHY_ANALOG_RXTX3_SPARE3_GET(x) (((x) & 0x00000007) >> 0)
+#define PHY_ANALOG_RXTX3_SPARE3_SET(x) (((x) << 0) & 0x00000007)
+#define PHY_ANALOG_RXTX3_DACFULLSCALE_MSB 3
+#define PHY_ANALOG_RXTX3_DACFULLSCALE_LSB 3
+#define PHY_ANALOG_RXTX3_DACFULLSCALE_MASK 0x00000008
+#define PHY_ANALOG_RXTX3_DACFULLSCALE_GET(x) (((x) & 0x00000008) >> 3)
+#define PHY_ANALOG_RXTX3_DACFULLSCALE_SET(x) (((x) << 3) & 0x00000008)
+#define PHY_ANALOG_RXTX3_DACRSTB_MSB 4
+#define PHY_ANALOG_RXTX3_DACRSTB_LSB 4
+#define PHY_ANALOG_RXTX3_DACRSTB_MASK 0x00000010
+#define PHY_ANALOG_RXTX3_DACRSTB_GET(x) (((x) & 0x00000010) >> 4)
+#define PHY_ANALOG_RXTX3_DACRSTB_SET(x) (((x) << 4) & 0x00000010)
+#define PHY_ANALOG_RXTX3_ADDACLOOPBACK_MSB 5
+#define PHY_ANALOG_RXTX3_ADDACLOOPBACK_LSB 5
+#define PHY_ANALOG_RXTX3_ADDACLOOPBACK_MASK 0x00000020
+#define PHY_ANALOG_RXTX3_ADDACLOOPBACK_GET(x) (((x) & 0x00000020) >> 5)
+#define PHY_ANALOG_RXTX3_ADDACLOOPBACK_SET(x) (((x) << 5) & 0x00000020)
+#define PHY_ANALOG_RXTX3_ADCSHORT_MSB 6
+#define PHY_ANALOG_RXTX3_ADCSHORT_LSB 6
+#define PHY_ANALOG_RXTX3_ADCSHORT_MASK 0x00000040
+#define PHY_ANALOG_RXTX3_ADCSHORT_GET(x) (((x) & 0x00000040) >> 6)
+#define PHY_ANALOG_RXTX3_ADCSHORT_SET(x) (((x) << 6) & 0x00000040)
+#define PHY_ANALOG_RXTX3_DACPWD_MSB 7
+#define PHY_ANALOG_RXTX3_DACPWD_LSB 7
+#define PHY_ANALOG_RXTX3_DACPWD_MASK 0x00000080
+#define PHY_ANALOG_RXTX3_DACPWD_GET(x) (((x) & 0x00000080) >> 7)
+#define PHY_ANALOG_RXTX3_DACPWD_SET(x) (((x) << 7) & 0x00000080)
+#define PHY_ANALOG_RXTX3_DACPWD_OVR_MSB 8
+#define PHY_ANALOG_RXTX3_DACPWD_OVR_LSB 8
+#define PHY_ANALOG_RXTX3_DACPWD_OVR_MASK 0x00000100
+#define PHY_ANALOG_RXTX3_DACPWD_OVR_GET(x) (((x) & 0x00000100) >> 8)
+#define PHY_ANALOG_RXTX3_DACPWD_OVR_SET(x) (((x) << 8) & 0x00000100)
+#define PHY_ANALOG_RXTX3_ADCPWD_MSB 9
+#define PHY_ANALOG_RXTX3_ADCPWD_LSB 9
+#define PHY_ANALOG_RXTX3_ADCPWD_MASK 0x00000200
+#define PHY_ANALOG_RXTX3_ADCPWD_GET(x) (((x) & 0x00000200) >> 9)
+#define PHY_ANALOG_RXTX3_ADCPWD_SET(x) (((x) << 9) & 0x00000200)
+#define PHY_ANALOG_RXTX3_ADCPWD_OVR_MSB 10
+#define PHY_ANALOG_RXTX3_ADCPWD_OVR_LSB 10
+#define PHY_ANALOG_RXTX3_ADCPWD_OVR_MASK 0x00000400
+#define PHY_ANALOG_RXTX3_ADCPWD_OVR_GET(x) (((x) & 0x00000400) >> 10)
+#define PHY_ANALOG_RXTX3_ADCPWD_OVR_SET(x) (((x) << 10) & 0x00000400)
+#define PHY_ANALOG_RXTX3_AGC_CALDAC_MSB 16
+#define PHY_ANALOG_RXTX3_AGC_CALDAC_LSB 11
+#define PHY_ANALOG_RXTX3_AGC_CALDAC_MASK 0x0001f800
+#define PHY_ANALOG_RXTX3_AGC_CALDAC_GET(x) (((x) & 0x0001f800) >> 11)
+#define PHY_ANALOG_RXTX3_AGC_CALDAC_SET(x) (((x) << 11) & 0x0001f800)
+#define PHY_ANALOG_RXTX3_AGC_CAL_MSB 17
+#define PHY_ANALOG_RXTX3_AGC_CAL_LSB 17
+#define PHY_ANALOG_RXTX3_AGC_CAL_MASK 0x00020000
+#define PHY_ANALOG_RXTX3_AGC_CAL_GET(x) (((x) & 0x00020000) >> 17)
+#define PHY_ANALOG_RXTX3_AGC_CAL_SET(x) (((x) << 17) & 0x00020000)
+#define PHY_ANALOG_RXTX3_AGC_CAL_OVR_MSB 18
+#define PHY_ANALOG_RXTX3_AGC_CAL_OVR_LSB 18
+#define PHY_ANALOG_RXTX3_AGC_CAL_OVR_MASK 0x00040000
+#define PHY_ANALOG_RXTX3_AGC_CAL_OVR_GET(x) (((x) & 0x00040000) >> 18)
+#define PHY_ANALOG_RXTX3_AGC_CAL_OVR_SET(x) (((x) << 18) & 0x00040000)
+#define PHY_ANALOG_RXTX3_LOFORCEDON_MSB 19
+#define PHY_ANALOG_RXTX3_LOFORCEDON_LSB 19
+#define PHY_ANALOG_RXTX3_LOFORCEDON_MASK 0x00080000
+#define PHY_ANALOG_RXTX3_LOFORCEDON_GET(x) (((x) & 0x00080000) >> 19)
+#define PHY_ANALOG_RXTX3_LOFORCEDON_SET(x) (((x) << 19) & 0x00080000)
+#define PHY_ANALOG_RXTX3_CALRESIDUE_MSB 20
+#define PHY_ANALOG_RXTX3_CALRESIDUE_LSB 20
+#define PHY_ANALOG_RXTX3_CALRESIDUE_MASK 0x00100000
+#define PHY_ANALOG_RXTX3_CALRESIDUE_GET(x) (((x) & 0x00100000) >> 20)
+#define PHY_ANALOG_RXTX3_CALRESIDUE_SET(x) (((x) << 20) & 0x00100000)
+#define PHY_ANALOG_RXTX3_CALRESIDUE_OVR_MSB 21
+#define PHY_ANALOG_RXTX3_CALRESIDUE_OVR_LSB 21
+#define PHY_ANALOG_RXTX3_CALRESIDUE_OVR_MASK 0x00200000
+#define PHY_ANALOG_RXTX3_CALRESIDUE_OVR_GET(x) (((x) & 0x00200000) >> 21)
+#define PHY_ANALOG_RXTX3_CALRESIDUE_OVR_SET(x) (((x) << 21) & 0x00200000)
+#define PHY_ANALOG_RXTX3_CALFC_MSB 22
+#define PHY_ANALOG_RXTX3_CALFC_LSB 22
+#define PHY_ANALOG_RXTX3_CALFC_MASK 0x00400000
+#define PHY_ANALOG_RXTX3_CALFC_GET(x) (((x) & 0x00400000) >> 22)
+#define PHY_ANALOG_RXTX3_CALFC_SET(x) (((x) << 22) & 0x00400000)
+#define PHY_ANALOG_RXTX3_CALFC_OVR_MSB 23
+#define PHY_ANALOG_RXTX3_CALFC_OVR_LSB 23
+#define PHY_ANALOG_RXTX3_CALFC_OVR_MASK 0x00800000
+#define PHY_ANALOG_RXTX3_CALFC_OVR_GET(x) (((x) & 0x00800000) >> 23)
+#define PHY_ANALOG_RXTX3_CALFC_OVR_SET(x) (((x) << 23) & 0x00800000)
+#define PHY_ANALOG_RXTX3_CALTX_MSB 24
+#define PHY_ANALOG_RXTX3_CALTX_LSB 24
+#define PHY_ANALOG_RXTX3_CALTX_MASK 0x01000000
+#define PHY_ANALOG_RXTX3_CALTX_GET(x) (((x) & 0x01000000) >> 24)
+#define PHY_ANALOG_RXTX3_CALTX_SET(x) (((x) << 24) & 0x01000000)
+#define PHY_ANALOG_RXTX3_CALTX_OVR_MSB 25
+#define PHY_ANALOG_RXTX3_CALTX_OVR_LSB 25
+#define PHY_ANALOG_RXTX3_CALTX_OVR_MASK 0x02000000
+#define PHY_ANALOG_RXTX3_CALTX_OVR_GET(x) (((x) & 0x02000000) >> 25)
+#define PHY_ANALOG_RXTX3_CALTX_OVR_SET(x) (((x) << 25) & 0x02000000)
+#define PHY_ANALOG_RXTX3_CALTXSHIFT_MSB 26
+#define PHY_ANALOG_RXTX3_CALTXSHIFT_LSB 26
+#define PHY_ANALOG_RXTX3_CALTXSHIFT_MASK 0x04000000
+#define PHY_ANALOG_RXTX3_CALTXSHIFT_GET(x) (((x) & 0x04000000) >> 26)
+#define PHY_ANALOG_RXTX3_CALTXSHIFT_SET(x) (((x) << 26) & 0x04000000)
+#define PHY_ANALOG_RXTX3_CALTXSHIFT_OVR_MSB 27
+#define PHY_ANALOG_RXTX3_CALTXSHIFT_OVR_LSB 27
+#define PHY_ANALOG_RXTX3_CALTXSHIFT_OVR_MASK 0x08000000
+#define PHY_ANALOG_RXTX3_CALTXSHIFT_OVR_GET(x) (((x) & 0x08000000) >> 27)
+#define PHY_ANALOG_RXTX3_CALTXSHIFT_OVR_SET(x) (((x) << 27) & 0x08000000)
+#define PHY_ANALOG_RXTX3_CALPA_MSB 28
+#define PHY_ANALOG_RXTX3_CALPA_LSB 28
+#define PHY_ANALOG_RXTX3_CALPA_MASK 0x10000000
+#define PHY_ANALOG_RXTX3_CALPA_GET(x) (((x) & 0x10000000) >> 28)
+#define PHY_ANALOG_RXTX3_CALPA_SET(x) (((x) << 28) & 0x10000000)
+#define PHY_ANALOG_RXTX3_CALPA_OVR_MSB 29
+#define PHY_ANALOG_RXTX3_CALPA_OVR_LSB 29
+#define PHY_ANALOG_RXTX3_CALPA_OVR_MASK 0x20000000
+#define PHY_ANALOG_RXTX3_CALPA_OVR_GET(x) (((x) & 0x20000000) >> 29)
+#define PHY_ANALOG_RXTX3_CALPA_OVR_SET(x) (((x) << 29) & 0x20000000)
+#define PHY_ANALOG_RXTX3_SPURON_MSB 30
+#define PHY_ANALOG_RXTX3_SPURON_LSB 30
+#define PHY_ANALOG_RXTX3_SPURON_MASK 0x40000000
+#define PHY_ANALOG_RXTX3_SPURON_GET(x) (((x) & 0x40000000) >> 30)
+#define PHY_ANALOG_RXTX3_SPURON_SET(x) (((x) << 30) & 0x40000000)
+#define PHY_ANALOG_RXTX3_SPURON_OVR_MSB 31
+#define PHY_ANALOG_RXTX3_SPURON_OVR_LSB 31
+#define PHY_ANALOG_RXTX3_SPURON_OVR_MASK 0x80000000
+#define PHY_ANALOG_RXTX3_SPURON_OVR_GET(x) (((x) & 0x80000000) >> 31)
+#define PHY_ANALOG_RXTX3_SPURON_OVR_SET(x) (((x) << 31) & 0x80000000)
+
+/* macros for BB1 */
+#define PHY_ANALOG_BB1_ADDRESS 0x00000140
+#define PHY_ANALOG_BB1_OFFSET 0x00000140
+#define PHY_ANALOG_BB1_I2V_CURR2X_MSB 0
+#define PHY_ANALOG_BB1_I2V_CURR2X_LSB 0
+#define PHY_ANALOG_BB1_I2V_CURR2X_MASK 0x00000001
+#define PHY_ANALOG_BB1_I2V_CURR2X_GET(x) (((x) & 0x00000001) >> 0)
+#define PHY_ANALOG_BB1_I2V_CURR2X_SET(x) (((x) << 0) & 0x00000001)
+#define PHY_ANALOG_BB1_ENABLE_LOQ_MSB 1
+#define PHY_ANALOG_BB1_ENABLE_LOQ_LSB 1
+#define PHY_ANALOG_BB1_ENABLE_LOQ_MASK 0x00000002
+#define PHY_ANALOG_BB1_ENABLE_LOQ_GET(x) (((x) & 0x00000002) >> 1)
+#define PHY_ANALOG_BB1_ENABLE_LOQ_SET(x) (((x) << 1) & 0x00000002)
+#define PHY_ANALOG_BB1_FORCE_LOQ_MSB 2
+#define PHY_ANALOG_BB1_FORCE_LOQ_LSB 2
+#define PHY_ANALOG_BB1_FORCE_LOQ_MASK 0x00000004
+#define PHY_ANALOG_BB1_FORCE_LOQ_GET(x) (((x) & 0x00000004) >> 2)
+#define PHY_ANALOG_BB1_FORCE_LOQ_SET(x) (((x) << 2) & 0x00000004)
+#define PHY_ANALOG_BB1_ENABLE_NOTCH_MSB 3
+#define PHY_ANALOG_BB1_ENABLE_NOTCH_LSB 3
+#define PHY_ANALOG_BB1_ENABLE_NOTCH_MASK 0x00000008
+#define PHY_ANALOG_BB1_ENABLE_NOTCH_GET(x) (((x) & 0x00000008) >> 3)
+#define PHY_ANALOG_BB1_ENABLE_NOTCH_SET(x) (((x) << 3) & 0x00000008)
+#define PHY_ANALOG_BB1_FORCE_NOTCH_MSB 4
+#define PHY_ANALOG_BB1_FORCE_NOTCH_LSB 4
+#define PHY_ANALOG_BB1_FORCE_NOTCH_MASK 0x00000010
+#define PHY_ANALOG_BB1_FORCE_NOTCH_GET(x) (((x) & 0x00000010) >> 4)
+#define PHY_ANALOG_BB1_FORCE_NOTCH_SET(x) (((x) << 4) & 0x00000010)
+#define PHY_ANALOG_BB1_ENABLE_BIQUAD_MSB 5
+#define PHY_ANALOG_BB1_ENABLE_BIQUAD_LSB 5
+#define PHY_ANALOG_BB1_ENABLE_BIQUAD_MASK 0x00000020
+#define PHY_ANALOG_BB1_ENABLE_BIQUAD_GET(x) (((x) & 0x00000020) >> 5)
+#define PHY_ANALOG_BB1_ENABLE_BIQUAD_SET(x) (((x) << 5) & 0x00000020)
+#define PHY_ANALOG_BB1_FORCE_BIQUAD_MSB 6
+#define PHY_ANALOG_BB1_FORCE_BIQUAD_LSB 6
+#define PHY_ANALOG_BB1_FORCE_BIQUAD_MASK 0x00000040
+#define PHY_ANALOG_BB1_FORCE_BIQUAD_GET(x) (((x) & 0x00000040) >> 6)
+#define PHY_ANALOG_BB1_FORCE_BIQUAD_SET(x) (((x) << 6) & 0x00000040)
+#define PHY_ANALOG_BB1_ENABLE_OSDAC_MSB 7
+#define PHY_ANALOG_BB1_ENABLE_OSDAC_LSB 7
+#define PHY_ANALOG_BB1_ENABLE_OSDAC_MASK 0x00000080
+#define PHY_ANALOG_BB1_ENABLE_OSDAC_GET(x) (((x) & 0x00000080) >> 7)
+#define PHY_ANALOG_BB1_ENABLE_OSDAC_SET(x) (((x) << 7) & 0x00000080)
+#define PHY_ANALOG_BB1_FORCE_OSDAC_MSB 8
+#define PHY_ANALOG_BB1_FORCE_OSDAC_LSB 8
+#define PHY_ANALOG_BB1_FORCE_OSDAC_MASK 0x00000100
+#define PHY_ANALOG_BB1_FORCE_OSDAC_GET(x) (((x) & 0x00000100) >> 8)
+#define PHY_ANALOG_BB1_FORCE_OSDAC_SET(x) (((x) << 8) & 0x00000100)
+#define PHY_ANALOG_BB1_ENABLE_V2I_MSB 9
+#define PHY_ANALOG_BB1_ENABLE_V2I_LSB 9
+#define PHY_ANALOG_BB1_ENABLE_V2I_MASK 0x00000200
+#define PHY_ANALOG_BB1_ENABLE_V2I_GET(x) (((x) & 0x00000200) >> 9)
+#define PHY_ANALOG_BB1_ENABLE_V2I_SET(x) (((x) << 9) & 0x00000200)
+#define PHY_ANALOG_BB1_FORCE_V2I_MSB 10
+#define PHY_ANALOG_BB1_FORCE_V2I_LSB 10
+#define PHY_ANALOG_BB1_FORCE_V2I_MASK 0x00000400
+#define PHY_ANALOG_BB1_FORCE_V2I_GET(x) (((x) & 0x00000400) >> 10)
+#define PHY_ANALOG_BB1_FORCE_V2I_SET(x) (((x) << 10) & 0x00000400)
+#define PHY_ANALOG_BB1_ENABLE_I2V_MSB 11
+#define PHY_ANALOG_BB1_ENABLE_I2V_LSB 11
+#define PHY_ANALOG_BB1_ENABLE_I2V_MASK 0x00000800
+#define PHY_ANALOG_BB1_ENABLE_I2V_GET(x) (((x) & 0x00000800) >> 11)
+#define PHY_ANALOG_BB1_ENABLE_I2V_SET(x) (((x) << 11) & 0x00000800)
+#define PHY_ANALOG_BB1_FORCE_I2V_MSB 12
+#define PHY_ANALOG_BB1_FORCE_I2V_LSB 12
+#define PHY_ANALOG_BB1_FORCE_I2V_MASK 0x00001000
+#define PHY_ANALOG_BB1_FORCE_I2V_GET(x) (((x) & 0x00001000) >> 12)
+#define PHY_ANALOG_BB1_FORCE_I2V_SET(x) (((x) << 12) & 0x00001000)
+#define PHY_ANALOG_BB1_CMSEL_MSB 15
+#define PHY_ANALOG_BB1_CMSEL_LSB 13
+#define PHY_ANALOG_BB1_CMSEL_MASK 0x0000e000
+#define PHY_ANALOG_BB1_CMSEL_GET(x) (((x) & 0x0000e000) >> 13)
+#define PHY_ANALOG_BB1_CMSEL_SET(x) (((x) << 13) & 0x0000e000)
+#define PHY_ANALOG_BB1_ATBSEL_MSB 17
+#define PHY_ANALOG_BB1_ATBSEL_LSB 16
+#define PHY_ANALOG_BB1_ATBSEL_MASK 0x00030000
+#define PHY_ANALOG_BB1_ATBSEL_GET(x) (((x) & 0x00030000) >> 16)
+#define PHY_ANALOG_BB1_ATBSEL_SET(x) (((x) << 16) & 0x00030000)
+#define PHY_ANALOG_BB1_PD_OSDAC_CALTX_CALPA_MSB 18
+#define PHY_ANALOG_BB1_PD_OSDAC_CALTX_CALPA_LSB 18
+#define PHY_ANALOG_BB1_PD_OSDAC_CALTX_CALPA_MASK 0x00040000
+#define PHY_ANALOG_BB1_PD_OSDAC_CALTX_CALPA_GET(x) (((x) & 0x00040000) >> 18)
+#define PHY_ANALOG_BB1_PD_OSDAC_CALTX_CALPA_SET(x) (((x) << 18) & 0x00040000)
+#define PHY_ANALOG_BB1_OFSTCORRI2VQ_MSB 23
+#define PHY_ANALOG_BB1_OFSTCORRI2VQ_LSB 19
+#define PHY_ANALOG_BB1_OFSTCORRI2VQ_MASK 0x00f80000
+#define PHY_ANALOG_BB1_OFSTCORRI2VQ_GET(x) (((x) & 0x00f80000) >> 19)
+#define PHY_ANALOG_BB1_OFSTCORRI2VQ_SET(x) (((x) << 19) & 0x00f80000)
+#define PHY_ANALOG_BB1_OFSTCORRI2VI_MSB 28
+#define PHY_ANALOG_BB1_OFSTCORRI2VI_LSB 24
+#define PHY_ANALOG_BB1_OFSTCORRI2VI_MASK 0x1f000000
+#define PHY_ANALOG_BB1_OFSTCORRI2VI_GET(x) (((x) & 0x1f000000) >> 24)
+#define PHY_ANALOG_BB1_OFSTCORRI2VI_SET(x) (((x) << 24) & 0x1f000000)
+#define PHY_ANALOG_BB1_LOCALOFFSET_MSB 29
+#define PHY_ANALOG_BB1_LOCALOFFSET_LSB 29
+#define PHY_ANALOG_BB1_LOCALOFFSET_MASK 0x20000000
+#define PHY_ANALOG_BB1_LOCALOFFSET_GET(x) (((x) & 0x20000000) >> 29)
+#define PHY_ANALOG_BB1_LOCALOFFSET_SET(x) (((x) << 29) & 0x20000000)
+#define PHY_ANALOG_BB1_RANGE_OSDAC_MSB 31
+#define PHY_ANALOG_BB1_RANGE_OSDAC_LSB 30
+#define PHY_ANALOG_BB1_RANGE_OSDAC_MASK 0xc0000000
+#define PHY_ANALOG_BB1_RANGE_OSDAC_GET(x) (((x) & 0xc0000000) >> 30)
+#define PHY_ANALOG_BB1_RANGE_OSDAC_SET(x) (((x) << 30) & 0xc0000000)
+
+/* macros for BB2 */
+#define PHY_ANALOG_BB2_ADDRESS 0x00000144
+#define PHY_ANALOG_BB2_OFFSET 0x00000144
+#define PHY_ANALOG_BB2_SPARE_MSB 6
+#define PHY_ANALOG_BB2_SPARE_LSB 0
+#define PHY_ANALOG_BB2_SPARE_MASK 0x0000007f
+#define PHY_ANALOG_BB2_SPARE_GET(x) (((x) & 0x0000007f) >> 0)
+#define PHY_ANALOG_BB2_SPARE_SET(x) (((x) << 0) & 0x0000007f)
+#define PHY_ANALOG_BB2_SEL_TEST_MSB 9
+#define PHY_ANALOG_BB2_SEL_TEST_LSB 7
+#define PHY_ANALOG_BB2_SEL_TEST_MASK 0x00000380
+#define PHY_ANALOG_BB2_SEL_TEST_GET(x) (((x) & 0x00000380) >> 7)
+#define PHY_ANALOG_BB2_SEL_TEST_SET(x) (((x) << 7) & 0x00000380)
+#define PHY_ANALOG_BB2_SCFIR_CAP_MSB 14
+#define PHY_ANALOG_BB2_SCFIR_CAP_LSB 10
+#define PHY_ANALOG_BB2_SCFIR_CAP_MASK 0x00007c00
+#define PHY_ANALOG_BB2_SCFIR_CAP_GET(x) (((x) & 0x00007c00) >> 10)
+#define PHY_ANALOG_BB2_SCFIR_CAP_SET(x) (((x) << 10) & 0x00007c00)
+#define PHY_ANALOG_BB2_OVERRIDE_SCFIR_CAP_MSB 15
+#define PHY_ANALOG_BB2_OVERRIDE_SCFIR_CAP_LSB 15
+#define PHY_ANALOG_BB2_OVERRIDE_SCFIR_CAP_MASK 0x00008000
+#define PHY_ANALOG_BB2_OVERRIDE_SCFIR_CAP_GET(x) (((x) & 0x00008000) >> 15)
+#define PHY_ANALOG_BB2_OVERRIDE_SCFIR_CAP_SET(x) (((x) << 15) & 0x00008000)
+#define PHY_ANALOG_BB2_FNOTCH_MSB 19
+#define PHY_ANALOG_BB2_FNOTCH_LSB 16
+#define PHY_ANALOG_BB2_FNOTCH_MASK 0x000f0000
+#define PHY_ANALOG_BB2_FNOTCH_GET(x) (((x) & 0x000f0000) >> 16)
+#define PHY_ANALOG_BB2_FNOTCH_SET(x) (((x) << 16) & 0x000f0000)
+#define PHY_ANALOG_BB2_OVERRIDE_FNOTCH_MSB 20
+#define PHY_ANALOG_BB2_OVERRIDE_FNOTCH_LSB 20
+#define PHY_ANALOG_BB2_OVERRIDE_FNOTCH_MASK 0x00100000
+#define PHY_ANALOG_BB2_OVERRIDE_FNOTCH_GET(x) (((x) & 0x00100000) >> 20)
+#define PHY_ANALOG_BB2_OVERRIDE_FNOTCH_SET(x) (((x) << 20) & 0x00100000)
+#define PHY_ANALOG_BB2_FILTERFC_MSB 25
+#define PHY_ANALOG_BB2_FILTERFC_LSB 21
+#define PHY_ANALOG_BB2_FILTERFC_MASK 0x03e00000
+#define PHY_ANALOG_BB2_FILTERFC_GET(x) (((x) & 0x03e00000) >> 21)
+#define PHY_ANALOG_BB2_FILTERFC_SET(x) (((x) << 21) & 0x03e00000)
+#define PHY_ANALOG_BB2_OVERRIDE_FILTERFC_MSB 26
+#define PHY_ANALOG_BB2_OVERRIDE_FILTERFC_LSB 26
+#define PHY_ANALOG_BB2_OVERRIDE_FILTERFC_MASK 0x04000000
+#define PHY_ANALOG_BB2_OVERRIDE_FILTERFC_GET(x) (((x) & 0x04000000) >> 26)
+#define PHY_ANALOG_BB2_OVERRIDE_FILTERFC_SET(x) (((x) << 26) & 0x04000000)
+#define PHY_ANALOG_BB2_I2V2RXOUT_EN_MSB 27
+#define PHY_ANALOG_BB2_I2V2RXOUT_EN_LSB 27
+#define PHY_ANALOG_BB2_I2V2RXOUT_EN_MASK 0x08000000
+#define PHY_ANALOG_BB2_I2V2RXOUT_EN_GET(x) (((x) & 0x08000000) >> 27)
+#define PHY_ANALOG_BB2_I2V2RXOUT_EN_SET(x) (((x) << 27) & 0x08000000)
+#define PHY_ANALOG_BB2_BQ2RXOUT_EN_MSB 28
+#define PHY_ANALOG_BB2_BQ2RXOUT_EN_LSB 28
+#define PHY_ANALOG_BB2_BQ2RXOUT_EN_MASK 0x10000000
+#define PHY_ANALOG_BB2_BQ2RXOUT_EN_GET(x) (((x) & 0x10000000) >> 28)
+#define PHY_ANALOG_BB2_BQ2RXOUT_EN_SET(x) (((x) << 28) & 0x10000000)
+#define PHY_ANALOG_BB2_RXIN2I2V_EN_MSB 29
+#define PHY_ANALOG_BB2_RXIN2I2V_EN_LSB 29
+#define PHY_ANALOG_BB2_RXIN2I2V_EN_MASK 0x20000000
+#define PHY_ANALOG_BB2_RXIN2I2V_EN_GET(x) (((x) & 0x20000000) >> 29)
+#define PHY_ANALOG_BB2_RXIN2I2V_EN_SET(x) (((x) << 29) & 0x20000000)
+#define PHY_ANALOG_BB2_RXIN2BQ_EN_MSB 30
+#define PHY_ANALOG_BB2_RXIN2BQ_EN_LSB 30
+#define PHY_ANALOG_BB2_RXIN2BQ_EN_MASK 0x40000000
+#define PHY_ANALOG_BB2_RXIN2BQ_EN_GET(x) (((x) & 0x40000000) >> 30)
+#define PHY_ANALOG_BB2_RXIN2BQ_EN_SET(x) (((x) << 30) & 0x40000000)
+#define PHY_ANALOG_BB2_SWITCH_OVERRIDE_MSB 31
+#define PHY_ANALOG_BB2_SWITCH_OVERRIDE_LSB 31
+#define PHY_ANALOG_BB2_SWITCH_OVERRIDE_MASK 0x80000000
+#define PHY_ANALOG_BB2_SWITCH_OVERRIDE_GET(x) (((x) & 0x80000000) >> 31)
+#define PHY_ANALOG_BB2_SWITCH_OVERRIDE_SET(x) (((x) << 31) & 0x80000000)
+
+/* macros for TOP1 */
+#define PHY_ANALOG_TOP1_ADDRESS 0x00000280
+#define PHY_ANALOG_TOP1_OFFSET 0x00000280
+#define PHY_ANALOG_TOP1_SEL_KVCO_MSB 1
+#define PHY_ANALOG_TOP1_SEL_KVCO_LSB 0
+#define PHY_ANALOG_TOP1_SEL_KVCO_MASK 0x00000003
+#define PHY_ANALOG_TOP1_SEL_KVCO_GET(x) (((x) & 0x00000003) >> 0)
+#define PHY_ANALOG_TOP1_SEL_KVCO_SET(x) (((x) << 0) & 0x00000003)
+#define PHY_ANALOG_TOP1_PLLATB_MSB 3
+#define PHY_ANALOG_TOP1_PLLATB_LSB 2
+#define PHY_ANALOG_TOP1_PLLATB_MASK 0x0000000c
+#define PHY_ANALOG_TOP1_PLLATB_GET(x) (((x) & 0x0000000c) >> 2)
+#define PHY_ANALOG_TOP1_PLLATB_SET(x) (((x) << 2) & 0x0000000c)
+#define PHY_ANALOG_TOP1_PLL_SVREG_MSB 4
+#define PHY_ANALOG_TOP1_PLL_SVREG_LSB 4
+#define PHY_ANALOG_TOP1_PLL_SVREG_MASK 0x00000010
+#define PHY_ANALOG_TOP1_PLL_SVREG_GET(x) (((x) & 0x00000010) >> 4)
+#define PHY_ANALOG_TOP1_PLL_SVREG_SET(x) (((x) << 4) & 0x00000010)
+#define PHY_ANALOG_TOP1_HI_FREQ_EN_MSB 5
+#define PHY_ANALOG_TOP1_HI_FREQ_EN_LSB 5
+#define PHY_ANALOG_TOP1_HI_FREQ_EN_MASK 0x00000020
+#define PHY_ANALOG_TOP1_HI_FREQ_EN_GET(x) (((x) & 0x00000020) >> 5)
+#define PHY_ANALOG_TOP1_HI_FREQ_EN_SET(x) (((x) << 5) & 0x00000020)
+#define PHY_ANALOG_TOP1_PWDPLL_MSB 6
+#define PHY_ANALOG_TOP1_PWDPLL_LSB 6
+#define PHY_ANALOG_TOP1_PWDPLL_MASK 0x00000040
+#define PHY_ANALOG_TOP1_PWDPLL_GET(x) (((x) & 0x00000040) >> 6)
+#define PHY_ANALOG_TOP1_PWDPLL_SET(x) (((x) << 6) & 0x00000040)
+#define PHY_ANALOG_TOP1_PWDEXTCLKBUF_MSB 7
+#define PHY_ANALOG_TOP1_PWDEXTCLKBUF_LSB 7
+#define PHY_ANALOG_TOP1_PWDEXTCLKBUF_MASK 0x00000080
+#define PHY_ANALOG_TOP1_PWDEXTCLKBUF_GET(x) (((x) & 0x00000080) >> 7)
+#define PHY_ANALOG_TOP1_PWDEXTCLKBUF_SET(x) (((x) << 7) & 0x00000080)
+#define PHY_ANALOG_TOP1_ADCPWD_PHASE_MSB 9
+#define PHY_ANALOG_TOP1_ADCPWD_PHASE_LSB 8
+#define PHY_ANALOG_TOP1_ADCPWD_PHASE_MASK 0x00000300
+#define PHY_ANALOG_TOP1_ADCPWD_PHASE_GET(x) (((x) & 0x00000300) >> 8)
+#define PHY_ANALOG_TOP1_ADCPWD_PHASE_SET(x) (((x) << 8) & 0x00000300)
+#define PHY_ANALOG_TOP1_ADCCLK_PHASE_MSB 11
+#define PHY_ANALOG_TOP1_ADCCLK_PHASE_LSB 10
+#define PHY_ANALOG_TOP1_ADCCLK_PHASE_MASK 0x00000c00
+#define PHY_ANALOG_TOP1_ADCCLK_PHASE_GET(x) (((x) & 0x00000c00) >> 10)
+#define PHY_ANALOG_TOP1_ADCCLK_PHASE_SET(x) (((x) << 10) & 0x00000c00)
+#define PHY_ANALOG_TOP1_DAC_CLK_SEL_MSB 13
+#define PHY_ANALOG_TOP1_DAC_CLK_SEL_LSB 12
+#define PHY_ANALOG_TOP1_DAC_CLK_SEL_MASK 0x00003000
+#define PHY_ANALOG_TOP1_DAC_CLK_SEL_GET(x) (((x) & 0x00003000) >> 12)
+#define PHY_ANALOG_TOP1_DAC_CLK_SEL_SET(x) (((x) << 12) & 0x00003000)
+#define PHY_ANALOG_TOP1_ADC_CLK_SEL_MSB 15
+#define PHY_ANALOG_TOP1_ADC_CLK_SEL_LSB 14
+#define PHY_ANALOG_TOP1_ADC_CLK_SEL_MASK 0x0000c000
+#define PHY_ANALOG_TOP1_ADC_CLK_SEL_GET(x) (((x) & 0x0000c000) >> 14)
+#define PHY_ANALOG_TOP1_ADC_CLK_SEL_SET(x) (((x) << 14) & 0x0000c000)
+#define PHY_ANALOG_TOP1_REFDIV_MSB 19
+#define PHY_ANALOG_TOP1_REFDIV_LSB 16
+#define PHY_ANALOG_TOP1_REFDIV_MASK 0x000f0000
+#define PHY_ANALOG_TOP1_REFDIV_GET(x) (((x) & 0x000f0000) >> 16)
+#define PHY_ANALOG_TOP1_REFDIV_SET(x) (((x) << 16) & 0x000f0000)
+#define PHY_ANALOG_TOP1_DIV_MSB 29
+#define PHY_ANALOG_TOP1_DIV_LSB 20
+#define PHY_ANALOG_TOP1_DIV_MASK 0x3ff00000
+#define PHY_ANALOG_TOP1_DIV_GET(x) (((x) & 0x3ff00000) >> 20)
+#define PHY_ANALOG_TOP1_DIV_SET(x) (((x) << 20) & 0x3ff00000)
+#define PHY_ANALOG_TOP1_PLLBYPASS_MSB 30
+#define PHY_ANALOG_TOP1_PLLBYPASS_LSB 30
+#define PHY_ANALOG_TOP1_PLLBYPASS_MASK 0x40000000
+#define PHY_ANALOG_TOP1_PLLBYPASS_GET(x) (((x) & 0x40000000) >> 30)
+#define PHY_ANALOG_TOP1_PLLBYPASS_SET(x) (((x) << 30) & 0x40000000)
+#define PHY_ANALOG_TOP1_CLKMOD_RSTB_MSB 31
+#define PHY_ANALOG_TOP1_CLKMOD_RSTB_LSB 31
+#define PHY_ANALOG_TOP1_CLKMOD_RSTB_MASK 0x80000000
+#define PHY_ANALOG_TOP1_CLKMOD_RSTB_GET(x) (((x) & 0x80000000) >> 31)
+#define PHY_ANALOG_TOP1_CLKMOD_RSTB_SET(x) (((x) << 31) & 0x80000000)
+
+/* macros for TOP2 */
+#define PHY_ANALOG_TOP2_ADDRESS 0x00000284
+#define PHY_ANALOG_TOP2_OFFSET 0x00000284
+#define PHY_ANALOG_TOP2_PLL_LOWLEAK_MSB 0
+#define PHY_ANALOG_TOP2_PLL_LOWLEAK_LSB 0
+#define PHY_ANALOG_TOP2_PLL_LOWLEAK_MASK 0x00000001
+#define PHY_ANALOG_TOP2_PLL_LOWLEAK_GET(x) (((x) & 0x00000001) >> 0)
+#define PHY_ANALOG_TOP2_PLL_LOWLEAK_SET(x) (((x) << 0) & 0x00000001)
+#define PHY_ANALOG_TOP2_PLL_LEAK_MSB 4
+#define PHY_ANALOG_TOP2_PLL_LEAK_LSB 1
+#define PHY_ANALOG_TOP2_PLL_LEAK_MASK 0x0000001e
+#define PHY_ANALOG_TOP2_PLL_LEAK_GET(x) (((x) & 0x0000001e) >> 1)
+#define PHY_ANALOG_TOP2_PLL_LEAK_SET(x) (((x) << 1) & 0x0000001e)
+#define PHY_ANALOG_TOP2_PLLFRAC_MSB 19
+#define PHY_ANALOG_TOP2_PLLFRAC_LSB 5
+#define PHY_ANALOG_TOP2_PLLFRAC_MASK 0x000fffe0
+#define PHY_ANALOG_TOP2_PLLFRAC_GET(x) (((x) & 0x000fffe0) >> 5)
+#define PHY_ANALOG_TOP2_PLLFRAC_SET(x) (((x) << 5) & 0x000fffe0)
+#define PHY_ANALOG_TOP2_PWD_PLLSDM_MSB 20
+#define PHY_ANALOG_TOP2_PWD_PLLSDM_LSB 20
+#define PHY_ANALOG_TOP2_PWD_PLLSDM_MASK 0x00100000
+#define PHY_ANALOG_TOP2_PWD_PLLSDM_GET(x) (((x) & 0x00100000) >> 20)
+#define PHY_ANALOG_TOP2_PWD_PLLSDM_SET(x) (((x) << 20) & 0x00100000)
+#define PHY_ANALOG_TOP2_PLLICP_MSB 23
+#define PHY_ANALOG_TOP2_PLLICP_LSB 21
+#define PHY_ANALOG_TOP2_PLLICP_MASK 0x00e00000
+#define PHY_ANALOG_TOP2_PLLICP_GET(x) (((x) & 0x00e00000) >> 21)
+#define PHY_ANALOG_TOP2_PLLICP_SET(x) (((x) << 21) & 0x00e00000)
+#define PHY_ANALOG_TOP2_PLLFILTER_MSB 31
+#define PHY_ANALOG_TOP2_PLLFILTER_LSB 24
+#define PHY_ANALOG_TOP2_PLLFILTER_MASK 0xff000000
+#define PHY_ANALOG_TOP2_PLLFILTER_GET(x) (((x) & 0xff000000) >> 24)
+#define PHY_ANALOG_TOP2_PLLFILTER_SET(x) (((x) << 24) & 0xff000000)
+
+/* macros for TOP3 */
+#define PHY_ANALOG_TOP3_ADDRESS 0x00000288
+#define PHY_ANALOG_TOP3_OFFSET 0x00000288
+#define PHY_ANALOG_TOP3_INT2GND_MSB 0
+#define PHY_ANALOG_TOP3_INT2GND_LSB 0
+#define PHY_ANALOG_TOP3_INT2GND_MASK 0x00000001
+#define PHY_ANALOG_TOP3_INT2GND_GET(x) (((x) & 0x00000001) >> 0)
+#define PHY_ANALOG_TOP3_INT2GND_SET(x) (((x) << 0) & 0x00000001)
+#define PHY_ANALOG_TOP3_PWDPALCLK_MSB 1
+#define PHY_ANALOG_TOP3_PWDPALCLK_LSB 1
+#define PHY_ANALOG_TOP3_PWDPALCLK_MASK 0x00000002
+#define PHY_ANALOG_TOP3_PWDPALCLK_GET(x) (((x) & 0x00000002) >> 1)
+#define PHY_ANALOG_TOP3_PWDPALCLK_SET(x) (((x) << 1) & 0x00000002)
+#define PHY_ANALOG_TOP3_PWDAGCCLK_MSB 2
+#define PHY_ANALOG_TOP3_PWDAGCCLK_LSB 2
+#define PHY_ANALOG_TOP3_PWDAGCCLK_MASK 0x00000004
+#define PHY_ANALOG_TOP3_PWDAGCCLK_GET(x) (((x) & 0x00000004) >> 2)
+#define PHY_ANALOG_TOP3_PWDAGCCLK_SET(x) (((x) << 2) & 0x00000004)
+#define PHY_ANALOG_TOP3_PWDV2I_MSB 3
+#define PHY_ANALOG_TOP3_PWDV2I_LSB 3
+#define PHY_ANALOG_TOP3_PWDV2I_MASK 0x00000008
+#define PHY_ANALOG_TOP3_PWDV2I_GET(x) (((x) & 0x00000008) >> 3)
+#define PHY_ANALOG_TOP3_PWDV2I_SET(x) (((x) << 3) & 0x00000008)
+#define PHY_ANALOG_TOP3_PWDBIAS_MSB 4
+#define PHY_ANALOG_TOP3_PWDBIAS_LSB 4
+#define PHY_ANALOG_TOP3_PWDBIAS_MASK 0x00000010
+#define PHY_ANALOG_TOP3_PWDBIAS_GET(x) (((x) & 0x00000010) >> 4)
+#define PHY_ANALOG_TOP3_PWDBIAS_SET(x) (((x) << 4) & 0x00000010)
+#define PHY_ANALOG_TOP3_PWDBG_MSB 5
+#define PHY_ANALOG_TOP3_PWDBG_LSB 5
+#define PHY_ANALOG_TOP3_PWDBG_MASK 0x00000020
+#define PHY_ANALOG_TOP3_PWDBG_GET(x) (((x) & 0x00000020) >> 5)
+#define PHY_ANALOG_TOP3_PWDBG_SET(x) (((x) << 5) & 0x00000020)
+#define PHY_ANALOG_TOP3_XTAL_SELVREG_MSB 6
+#define PHY_ANALOG_TOP3_XTAL_SELVREG_LSB 6
+#define PHY_ANALOG_TOP3_XTAL_SELVREG_MASK 0x00000040
+#define PHY_ANALOG_TOP3_XTAL_SELVREG_GET(x) (((x) & 0x00000040) >> 6)
+#define PHY_ANALOG_TOP3_XTAL_SELVREG_SET(x) (((x) << 6) & 0x00000040)
+#define PHY_ANALOG_TOP3_XTAL_PWDREG_MSB 7
+#define PHY_ANALOG_TOP3_XTAL_PWDREG_LSB 7
+#define PHY_ANALOG_TOP3_XTAL_PWDREG_MASK 0x00000080
+#define PHY_ANALOG_TOP3_XTAL_PWDREG_GET(x) (((x) & 0x00000080) >> 7)
+#define PHY_ANALOG_TOP3_XTAL_PWDREG_SET(x) (((x) << 7) & 0x00000080)
+#define PHY_ANALOG_TOP3_XTAL_PWDCLKIN_MSB 8
+#define PHY_ANALOG_TOP3_XTAL_PWDCLKIN_LSB 8
+#define PHY_ANALOG_TOP3_XTAL_PWDCLKIN_MASK 0x00000100
+#define PHY_ANALOG_TOP3_XTAL_PWDCLKIN_GET(x) (((x) & 0x00000100) >> 8)
+#define PHY_ANALOG_TOP3_XTAL_PWDCLKIN_SET(x) (((x) << 8) & 0x00000100)
+#define PHY_ANALOG_TOP3_XTAL_PWDCLKD_MSB 9
+#define PHY_ANALOG_TOP3_XTAL_PWDCLKD_LSB 9
+#define PHY_ANALOG_TOP3_XTAL_PWDCLKD_MASK 0x00000200
+#define PHY_ANALOG_TOP3_XTAL_PWDCLKD_GET(x) (((x) & 0x00000200) >> 9)
+#define PHY_ANALOG_TOP3_XTAL_PWDCLKD_SET(x) (((x) << 9) & 0x00000200)
+#define PHY_ANALOG_TOP3_XTAL_OSCON_MSB 10
+#define PHY_ANALOG_TOP3_XTAL_OSCON_LSB 10
+#define PHY_ANALOG_TOP3_XTAL_OSCON_MASK 0x00000400
+#define PHY_ANALOG_TOP3_XTAL_OSCON_GET(x) (((x) & 0x00000400) >> 10)
+#define PHY_ANALOG_TOP3_XTAL_OSCON_SET(x) (((x) << 10) & 0x00000400)
+#define PHY_ANALOG_TOP3_XTAL_NOTCXODET_MSB 11
+#define PHY_ANALOG_TOP3_XTAL_NOTCXODET_LSB 11
+#define PHY_ANALOG_TOP3_XTAL_NOTCXODET_MASK 0x00000800
+#define PHY_ANALOG_TOP3_XTAL_NOTCXODET_GET(x) (((x) & 0x00000800) >> 11)
+#define PHY_ANALOG_TOP3_XTAL_NOTCXODET_SET(x) (((x) << 11) & 0x00000800)
+#define PHY_ANALOG_TOP3_XTAL_LOCALBIAS_MSB 12
+#define PHY_ANALOG_TOP3_XTAL_LOCALBIAS_LSB 12
+#define PHY_ANALOG_TOP3_XTAL_LOCALBIAS_MASK 0x00001000
+#define PHY_ANALOG_TOP3_XTAL_LOCALBIAS_GET(x) (((x) & 0x00001000) >> 12)
+#define PHY_ANALOG_TOP3_XTAL_LOCALBIAS_SET(x) (((x) << 12) & 0x00001000)
+#define PHY_ANALOG_TOP3_XTAL_HIGHZ_MSB 13
+#define PHY_ANALOG_TOP3_XTAL_HIGHZ_LSB 13
+#define PHY_ANALOG_TOP3_XTAL_HIGHZ_MASK 0x00002000
+#define PHY_ANALOG_TOP3_XTAL_HIGHZ_GET(x) (((x) & 0x00002000) >> 13)
+#define PHY_ANALOG_TOP3_XTAL_HIGHZ_SET(x) (((x) << 13) & 0x00002000)
+#define PHY_ANALOG_TOP3_XTAL_DRVPNR_MSB 15
+#define PHY_ANALOG_TOP3_XTAL_DRVPNR_LSB 14
+#define PHY_ANALOG_TOP3_XTAL_DRVPNR_MASK 0x0000c000
+#define PHY_ANALOG_TOP3_XTAL_DRVPNR_GET(x) (((x) & 0x0000c000) >> 14)
+#define PHY_ANALOG_TOP3_XTAL_DRVPNR_SET(x) (((x) << 14) & 0x0000c000)
+#define PHY_ANALOG_TOP3_XTALCAPOUTDAC_MSB 22
+#define PHY_ANALOG_TOP3_XTALCAPOUTDAC_LSB 16
+#define PHY_ANALOG_TOP3_XTALCAPOUTDAC_MASK 0x007f0000
+#define PHY_ANALOG_TOP3_XTALCAPOUTDAC_GET(x) (((x) & 0x007f0000) >> 16)
+#define PHY_ANALOG_TOP3_XTALCAPOUTDAC_SET(x) (((x) << 16) & 0x007f0000)
+#define PHY_ANALOG_TOP3_XTAL_CAPINDAC_MSB 29
+#define PHY_ANALOG_TOP3_XTAL_CAPINDAC_LSB 23
+#define PHY_ANALOG_TOP3_XTAL_CAPINDAC_MASK 0x3f800000
+#define PHY_ANALOG_TOP3_XTAL_CAPINDAC_GET(x) (((x) & 0x3f800000) >> 23)
+#define PHY_ANALOG_TOP3_XTAL_CAPINDAC_SET(x) (((x) << 23) & 0x3f800000)
+#define PHY_ANALOG_TOP3_XTAL_BIAS2X_MSB 30
+#define PHY_ANALOG_TOP3_XTAL_BIAS2X_LSB 30
+#define PHY_ANALOG_TOP3_XTAL_BIAS2X_MASK 0x40000000
+#define PHY_ANALOG_TOP3_XTAL_BIAS2X_GET(x) (((x) & 0x40000000) >> 30)
+#define PHY_ANALOG_TOP3_XTAL_BIAS2X_SET(x) (((x) << 30) & 0x40000000)
+#define PHY_ANALOG_TOP3_TCXODET_MSB 31
+#define PHY_ANALOG_TOP3_TCXODET_LSB 31
+#define PHY_ANALOG_TOP3_TCXODET_MASK 0x80000000
+#define PHY_ANALOG_TOP3_TCXODET_GET(x) (((x) & 0x80000000) >> 31)
+
+/* macros for TOP4 */
+#define PHY_ANALOG_TOP4_ADDRESS 0x0000028c
+#define PHY_ANALOG_TOP4_OFFSET 0x0000028c
+#define PHY_ANALOG_TOP4_SPARE4_MSB 19
+#define PHY_ANALOG_TOP4_SPARE4_LSB 0
+#define PHY_ANALOG_TOP4_SPARE4_MASK 0x000fffff
+#define PHY_ANALOG_TOP4_SPARE4_GET(x) (((x) & 0x000fffff) >> 0)
+#define PHY_ANALOG_TOP4_SPARE4_SET(x) (((x) << 0) & 0x000fffff)
+#define PHY_ANALOG_TOP4_SEL_TEMPSENSOR_MSB 20
+#define PHY_ANALOG_TOP4_SEL_TEMPSENSOR_LSB 20
+#define PHY_ANALOG_TOP4_SEL_TEMPSENSOR_MASK 0x00100000
+#define PHY_ANALOG_TOP4_SEL_TEMPSENSOR_GET(x) (((x) & 0x00100000) >> 20)
+#define PHY_ANALOG_TOP4_SEL_TEMPSENSOR_SET(x) (((x) << 20) & 0x00100000)
+#define PHY_ANALOG_TOP4_ADCPWD_OVR_MSB 21
+#define PHY_ANALOG_TOP4_ADCPWD_OVR_LSB 21
+#define PHY_ANALOG_TOP4_ADCPWD_OVR_MASK 0x00200000
+#define PHY_ANALOG_TOP4_ADCPWD_OVR_GET(x) (((x) & 0x00200000) >> 21)
+#define PHY_ANALOG_TOP4_ADCPWD_OVR_SET(x) (((x) << 21) & 0x00200000)
+#define PHY_ANALOG_TOP4_ADCPWD_INT_MSB 22
+#define PHY_ANALOG_TOP4_ADCPWD_INT_LSB 22
+#define PHY_ANALOG_TOP4_ADCPWD_INT_MASK 0x00400000
+#define PHY_ANALOG_TOP4_ADCPWD_INT_GET(x) (((x) & 0x00400000) >> 22)
+#define PHY_ANALOG_TOP4_ADCPWD_INT_SET(x) (((x) << 22) & 0x00400000)
+#define PHY_ANALOG_TOP4_TESTIQ_OFF_MSB 23
+#define PHY_ANALOG_TOP4_TESTIQ_OFF_LSB 23
+#define PHY_ANALOG_TOP4_TESTIQ_OFF_MASK 0x00800000
+#define PHY_ANALOG_TOP4_TESTIQ_OFF_GET(x) (((x) & 0x00800000) >> 23)
+#define PHY_ANALOG_TOP4_TESTIQ_OFF_SET(x) (((x) << 23) & 0x00800000)
+#define PHY_ANALOG_TOP4_TESTIQ_BUFEN_MSB 24
+#define PHY_ANALOG_TOP4_TESTIQ_BUFEN_LSB 24
+#define PHY_ANALOG_TOP4_TESTIQ_BUFEN_MASK 0x01000000
+#define PHY_ANALOG_TOP4_TESTIQ_BUFEN_GET(x) (((x) & 0x01000000) >> 24)
+#define PHY_ANALOG_TOP4_TESTIQ_BUFEN_SET(x) (((x) << 24) & 0x01000000)
+#define PHY_ANALOG_TOP4_PAL_LOCKEDEN_MSB 25
+#define PHY_ANALOG_TOP4_PAL_LOCKEDEN_LSB 25
+#define PHY_ANALOG_TOP4_PAL_LOCKEDEN_MASK 0x02000000
+#define PHY_ANALOG_TOP4_PAL_LOCKEDEN_GET(x) (((x) & 0x02000000) >> 25)
+#define PHY_ANALOG_TOP4_PAL_LOCKEDEN_SET(x) (((x) << 25) & 0x02000000)
+#define PHY_ANALOG_TOP4_SYNTHDIGOUTEN_MSB 26
+#define PHY_ANALOG_TOP4_SYNTHDIGOUTEN_LSB 26
+#define PHY_ANALOG_TOP4_SYNTHDIGOUTEN_MASK 0x04000000
+#define PHY_ANALOG_TOP4_SYNTHDIGOUTEN_GET(x) (((x) & 0x04000000) >> 26)
+#define PHY_ANALOG_TOP4_SYNTHDIGOUTEN_SET(x) (((x) << 26) & 0x04000000)
+#define PHY_ANALOG_TOP4_ENBTCLK_MSB 27
+#define PHY_ANALOG_TOP4_ENBTCLK_LSB 27
+#define PHY_ANALOG_TOP4_ENBTCLK_MASK 0x08000000
+#define PHY_ANALOG_TOP4_ENBTCLK_GET(x) (((x) & 0x08000000) >> 27)
+#define PHY_ANALOG_TOP4_ENBTCLK_SET(x) (((x) << 27) & 0x08000000)
+#define PHY_ANALOG_TOP4_PAD2GND_MSB 28
+#define PHY_ANALOG_TOP4_PAD2GND_LSB 28
+#define PHY_ANALOG_TOP4_PAD2GND_MASK 0x10000000
+#define PHY_ANALOG_TOP4_PAD2GND_GET(x) (((x) & 0x10000000) >> 28)
+#define PHY_ANALOG_TOP4_PAD2GND_SET(x) (((x) << 28) & 0x10000000)
+#define PHY_ANALOG_TOP4_INTH2PAD_MSB 29
+#define PHY_ANALOG_TOP4_INTH2PAD_LSB 29
+#define PHY_ANALOG_TOP4_INTH2PAD_MASK 0x20000000
+#define PHY_ANALOG_TOP4_INTH2PAD_GET(x) (((x) & 0x20000000) >> 29)
+#define PHY_ANALOG_TOP4_INTH2PAD_SET(x) (((x) << 29) & 0x20000000)
+#define PHY_ANALOG_TOP4_INTH2GND_MSB 30
+#define PHY_ANALOG_TOP4_INTH2GND_LSB 30
+#define PHY_ANALOG_TOP4_INTH2GND_MASK 0x40000000
+#define PHY_ANALOG_TOP4_INTH2GND_GET(x) (((x) & 0x40000000) >> 30)
+#define PHY_ANALOG_TOP4_INTH2GND_SET(x) (((x) << 30) & 0x40000000)
+#define PHY_ANALOG_TOP4_INT2PAD_MSB 31
+#define PHY_ANALOG_TOP4_INT2PAD_LSB 31
+#define PHY_ANALOG_TOP4_INT2PAD_MASK 0x80000000
+#define PHY_ANALOG_TOP4_INT2PAD_GET(x) (((x) & 0x80000000) >> 31)
+#define PHY_ANALOG_TOP4_INT2PAD_SET(x) (((x) << 31) & 0x80000000)
+
+/* macros for rbist_cntrl */
+#define PHY_ANALOG_RBIST_CNTRL_ADDRESS 0x00000380
+#define PHY_ANALOG_RBIST_CNTRL_OFFSET 0x00000380
+#define PHY_ANALOG_RBIST_CNTRL_ATE_TONEGEN_DC_ENABLE_MSB 0
+#define PHY_ANALOG_RBIST_CNTRL_ATE_TONEGEN_DC_ENABLE_LSB 0
+#define PHY_ANALOG_RBIST_CNTRL_ATE_TONEGEN_DC_ENABLE_MASK 0x00000001
+#define PHY_ANALOG_RBIST_CNTRL_ATE_TONEGEN_DC_ENABLE_GET(x) (((x) & 0x00000001) >> 0)
+#define PHY_ANALOG_RBIST_CNTRL_ATE_TONEGEN_DC_ENABLE_SET(x) (((x) << 0) & 0x00000001)
+#define PHY_ANALOG_RBIST_CNTRL_ATE_TONEGEN_TONE0_ENABLE_MSB 1
+#define PHY_ANALOG_RBIST_CNTRL_ATE_TONEGEN_TONE0_ENABLE_LSB 1
+#define PHY_ANALOG_RBIST_CNTRL_ATE_TONEGEN_TONE0_ENABLE_MASK 0x00000002
+#define PHY_ANALOG_RBIST_CNTRL_ATE_TONEGEN_TONE0_ENABLE_GET(x) (((x) & 0x00000002) >> 1)
+#define PHY_ANALOG_RBIST_CNTRL_ATE_TONEGEN_TONE0_ENABLE_SET(x) (((x) << 1) & 0x00000002)
+#define PHY_ANALOG_RBIST_CNTRL_ATE_TONEGEN_TONE1_ENABLE_MSB 2
+#define PHY_ANALOG_RBIST_CNTRL_ATE_TONEGEN_TONE1_ENABLE_LSB 2
+#define PHY_ANALOG_RBIST_CNTRL_ATE_TONEGEN_TONE1_ENABLE_MASK 0x00000004
+#define PHY_ANALOG_RBIST_CNTRL_ATE_TONEGEN_TONE1_ENABLE_GET(x) (((x) & 0x00000004) >> 2)
+#define PHY_ANALOG_RBIST_CNTRL_ATE_TONEGEN_TONE1_ENABLE_SET(x) (((x) << 2) & 0x00000004)
+#define PHY_ANALOG_RBIST_CNTRL_ATE_TONEGEN_LFTONE0_ENABLE_MSB 3
+#define PHY_ANALOG_RBIST_CNTRL_ATE_TONEGEN_LFTONE0_ENABLE_LSB 3
+#define PHY_ANALOG_RBIST_CNTRL_ATE_TONEGEN_LFTONE0_ENABLE_MASK 0x00000008
+#define PHY_ANALOG_RBIST_CNTRL_ATE_TONEGEN_LFTONE0_ENABLE_GET(x) (((x) & 0x00000008) >> 3)
+#define PHY_ANALOG_RBIST_CNTRL_ATE_TONEGEN_LFTONE0_ENABLE_SET(x) (((x) << 3) & 0x00000008)
+#define PHY_ANALOG_RBIST_CNTRL_ATE_TONEGEN_LINRAMP_ENABLE_I_MSB 4
+#define PHY_ANALOG_RBIST_CNTRL_ATE_TONEGEN_LINRAMP_ENABLE_I_LSB 4
+#define PHY_ANALOG_RBIST_CNTRL_ATE_TONEGEN_LINRAMP_ENABLE_I_MASK 0x00000010
+#define PHY_ANALOG_RBIST_CNTRL_ATE_TONEGEN_LINRAMP_ENABLE_I_GET(x) (((x) & 0x00000010) >> 4)
+#define PHY_ANALOG_RBIST_CNTRL_ATE_TONEGEN_LINRAMP_ENABLE_I_SET(x) (((x) << 4) & 0x00000010)
+#define PHY_ANALOG_RBIST_CNTRL_ATE_TONEGEN_LINRAMP_ENABLE_Q_MSB 5
+#define PHY_ANALOG_RBIST_CNTRL_ATE_TONEGEN_LINRAMP_ENABLE_Q_LSB 5
+#define PHY_ANALOG_RBIST_CNTRL_ATE_TONEGEN_LINRAMP_ENABLE_Q_MASK 0x00000020
+#define PHY_ANALOG_RBIST_CNTRL_ATE_TONEGEN_LINRAMP_ENABLE_Q_GET(x) (((x) & 0x00000020) >> 5)
+#define PHY_ANALOG_RBIST_CNTRL_ATE_TONEGEN_LINRAMP_ENABLE_Q_SET(x) (((x) << 5) & 0x00000020)
+#define PHY_ANALOG_RBIST_CNTRL_ATE_TONEGEN_PRBS_ENABLE_I_MSB 6
+#define PHY_ANALOG_RBIST_CNTRL_ATE_TONEGEN_PRBS_ENABLE_I_LSB 6
+#define PHY_ANALOG_RBIST_CNTRL_ATE_TONEGEN_PRBS_ENABLE_I_MASK 0x00000040
+#define PHY_ANALOG_RBIST_CNTRL_ATE_TONEGEN_PRBS_ENABLE_I_GET(x) (((x) & 0x00000040) >> 6)
+#define PHY_ANALOG_RBIST_CNTRL_ATE_TONEGEN_PRBS_ENABLE_I_SET(x) (((x) << 6) & 0x00000040)
+#define PHY_ANALOG_RBIST_CNTRL_ATE_TONEGEN_PRBS_ENABLE_Q_MSB 7
+#define PHY_ANALOG_RBIST_CNTRL_ATE_TONEGEN_PRBS_ENABLE_Q_LSB 7
+#define PHY_ANALOG_RBIST_CNTRL_ATE_TONEGEN_PRBS_ENABLE_Q_MASK 0x00000080
+#define PHY_ANALOG_RBIST_CNTRL_ATE_TONEGEN_PRBS_ENABLE_Q_GET(x) (((x) & 0x00000080) >> 7)
+#define PHY_ANALOG_RBIST_CNTRL_ATE_TONEGEN_PRBS_ENABLE_Q_SET(x) (((x) << 7) & 0x00000080)
+#define PHY_ANALOG_RBIST_CNTRL_ATE_CMAC_DC_WRITE_TO_CANCEL_MSB 8
+#define PHY_ANALOG_RBIST_CNTRL_ATE_CMAC_DC_WRITE_TO_CANCEL_LSB 8
+#define PHY_ANALOG_RBIST_CNTRL_ATE_CMAC_DC_WRITE_TO_CANCEL_MASK 0x00000100
+#define PHY_ANALOG_RBIST_CNTRL_ATE_CMAC_DC_WRITE_TO_CANCEL_GET(x) (((x) & 0x00000100) >> 8)
+#define PHY_ANALOG_RBIST_CNTRL_ATE_CMAC_DC_WRITE_TO_CANCEL_SET(x) (((x) << 8) & 0x00000100)
+#define PHY_ANALOG_RBIST_CNTRL_ATE_CMAC_DC_ENABLE_MSB 9
+#define PHY_ANALOG_RBIST_CNTRL_ATE_CMAC_DC_ENABLE_LSB 9
+#define PHY_ANALOG_RBIST_CNTRL_ATE_CMAC_DC_ENABLE_MASK 0x00000200
+#define PHY_ANALOG_RBIST_CNTRL_ATE_CMAC_DC_ENABLE_GET(x) (((x) & 0x00000200) >> 9)
+#define PHY_ANALOG_RBIST_CNTRL_ATE_CMAC_DC_ENABLE_SET(x) (((x) << 9) & 0x00000200)
+#define PHY_ANALOG_RBIST_CNTRL_ATE_CMAC_CORR_ENABLE_MSB 10
+#define PHY_ANALOG_RBIST_CNTRL_ATE_CMAC_CORR_ENABLE_LSB 10
+#define PHY_ANALOG_RBIST_CNTRL_ATE_CMAC_CORR_ENABLE_MASK 0x00000400
+#define PHY_ANALOG_RBIST_CNTRL_ATE_CMAC_CORR_ENABLE_GET(x) (((x) & 0x00000400) >> 10)
+#define PHY_ANALOG_RBIST_CNTRL_ATE_CMAC_CORR_ENABLE_SET(x) (((x) << 10) & 0x00000400)
+#define PHY_ANALOG_RBIST_CNTRL_ATE_CMAC_POWER_ENABLE_MSB 11
+#define PHY_ANALOG_RBIST_CNTRL_ATE_CMAC_POWER_ENABLE_LSB 11
+#define PHY_ANALOG_RBIST_CNTRL_ATE_CMAC_POWER_ENABLE_MASK 0x00000800
+#define PHY_ANALOG_RBIST_CNTRL_ATE_CMAC_POWER_ENABLE_GET(x) (((x) & 0x00000800) >> 11)
+#define PHY_ANALOG_RBIST_CNTRL_ATE_CMAC_POWER_ENABLE_SET(x) (((x) << 11) & 0x00000800)
+#define PHY_ANALOG_RBIST_CNTRL_ATE_CMAC_IQ_ENABLE_MSB 12
+#define PHY_ANALOG_RBIST_CNTRL_ATE_CMAC_IQ_ENABLE_LSB 12
+#define PHY_ANALOG_RBIST_CNTRL_ATE_CMAC_IQ_ENABLE_MASK 0x00001000
+#define PHY_ANALOG_RBIST_CNTRL_ATE_CMAC_IQ_ENABLE_GET(x) (((x) & 0x00001000) >> 12)
+#define PHY_ANALOG_RBIST_CNTRL_ATE_CMAC_IQ_ENABLE_SET(x) (((x) << 12) & 0x00001000)
+#define PHY_ANALOG_RBIST_CNTRL_ATE_CMAC_I2Q2_ENABLE_MSB 13
+#define PHY_ANALOG_RBIST_CNTRL_ATE_CMAC_I2Q2_ENABLE_LSB 13
+#define PHY_ANALOG_RBIST_CNTRL_ATE_CMAC_I2Q2_ENABLE_MASK 0x00002000
+#define PHY_ANALOG_RBIST_CNTRL_ATE_CMAC_I2Q2_ENABLE_GET(x) (((x) & 0x00002000) >> 13)
+#define PHY_ANALOG_RBIST_CNTRL_ATE_CMAC_I2Q2_ENABLE_SET(x) (((x) << 13) & 0x00002000)
+#define PHY_ANALOG_RBIST_CNTRL_ATE_CMAC_POWER_HPF_ENABLE_MSB 14
+#define PHY_ANALOG_RBIST_CNTRL_ATE_CMAC_POWER_HPF_ENABLE_LSB 14
+#define PHY_ANALOG_RBIST_CNTRL_ATE_CMAC_POWER_HPF_ENABLE_MASK 0x00004000
+#define PHY_ANALOG_RBIST_CNTRL_ATE_CMAC_POWER_HPF_ENABLE_GET(x) (((x) & 0x00004000) >> 14)
+#define PHY_ANALOG_RBIST_CNTRL_ATE_CMAC_POWER_HPF_ENABLE_SET(x) (((x) << 14) & 0x00004000)
+#define PHY_ANALOG_RBIST_CNTRL_ATE_RXDAC_CALIBRATE_MSB 15
+#define PHY_ANALOG_RBIST_CNTRL_ATE_RXDAC_CALIBRATE_LSB 15
+#define PHY_ANALOG_RBIST_CNTRL_ATE_RXDAC_CALIBRATE_MASK 0x00008000
+#define PHY_ANALOG_RBIST_CNTRL_ATE_RXDAC_CALIBRATE_GET(x) (((x) & 0x00008000) >> 15)
+#define PHY_ANALOG_RBIST_CNTRL_ATE_RXDAC_CALIBRATE_SET(x) (((x) << 15) & 0x00008000)
+#define PHY_ANALOG_RBIST_CNTRL_ATE_RBIST_ENABLE_MSB 16
+#define PHY_ANALOG_RBIST_CNTRL_ATE_RBIST_ENABLE_LSB 16
+#define PHY_ANALOG_RBIST_CNTRL_ATE_RBIST_ENABLE_MASK 0x00010000
+#define PHY_ANALOG_RBIST_CNTRL_ATE_RBIST_ENABLE_GET(x) (((x) & 0x00010000) >> 16)
+#define PHY_ANALOG_RBIST_CNTRL_ATE_RBIST_ENABLE_SET(x) (((x) << 16) & 0x00010000)
+#define PHY_ANALOG_RBIST_CNTRL_ATE_ADC_CLK_INVERT_MSB 17
+#define PHY_ANALOG_RBIST_CNTRL_ATE_ADC_CLK_INVERT_LSB 17
+#define PHY_ANALOG_RBIST_CNTRL_ATE_ADC_CLK_INVERT_MASK 0x00020000
+#define PHY_ANALOG_RBIST_CNTRL_ATE_ADC_CLK_INVERT_GET(x) (((x) & 0x00020000) >> 17)
+#define PHY_ANALOG_RBIST_CNTRL_ATE_ADC_CLK_INVERT_SET(x) (((x) << 17) & 0x00020000)
+
+/* macros for tx_dc_offset */
+#define PHY_ANALOG_TX_DC_OFFSET_ADDRESS 0x00000384
+#define PHY_ANALOG_TX_DC_OFFSET_OFFSET 0x00000384
+#define PHY_ANALOG_TX_DC_OFFSET_ATE_TONEGEN_DC_I_MSB 10
+#define PHY_ANALOG_TX_DC_OFFSET_ATE_TONEGEN_DC_I_LSB 0
+#define PHY_ANALOG_TX_DC_OFFSET_ATE_TONEGEN_DC_I_MASK 0x000007ff
+#define PHY_ANALOG_TX_DC_OFFSET_ATE_TONEGEN_DC_I_GET(x) (((x) & 0x000007ff) >> 0)
+#define PHY_ANALOG_TX_DC_OFFSET_ATE_TONEGEN_DC_I_SET(x) (((x) << 0) & 0x000007ff)
+#define PHY_ANALOG_TX_DC_OFFSET_ATE_TONEGEN_DC_Q_MSB 26
+#define PHY_ANALOG_TX_DC_OFFSET_ATE_TONEGEN_DC_Q_LSB 16
+#define PHY_ANALOG_TX_DC_OFFSET_ATE_TONEGEN_DC_Q_MASK 0x07ff0000
+#define PHY_ANALOG_TX_DC_OFFSET_ATE_TONEGEN_DC_Q_GET(x) (((x) & 0x07ff0000) >> 16)
+#define PHY_ANALOG_TX_DC_OFFSET_ATE_TONEGEN_DC_Q_SET(x) (((x) << 16) & 0x07ff0000)
+
+/* macros for tx_tonegen0 */
+#define PHY_ANALOG_TX_TONEGEN0_ADDRESS 0x00000388
+#define PHY_ANALOG_TX_TONEGEN0_OFFSET 0x00000388
+#define PHY_ANALOG_TX_TONEGEN0_ATE_TONEGEN_TONE_FREQ_MSB 6
+#define PHY_ANALOG_TX_TONEGEN0_ATE_TONEGEN_TONE_FREQ_LSB 0
+#define PHY_ANALOG_TX_TONEGEN0_ATE_TONEGEN_TONE_FREQ_MASK 0x0000007f
+#define PHY_ANALOG_TX_TONEGEN0_ATE_TONEGEN_TONE_FREQ_GET(x) (((x) & 0x0000007f) >> 0)
+#define PHY_ANALOG_TX_TONEGEN0_ATE_TONEGEN_TONE_FREQ_SET(x) (((x) << 0) & 0x0000007f)
+#define PHY_ANALOG_TX_TONEGEN0_ATE_TONEGEN_TONE_A_EXP_MSB 11
+#define PHY_ANALOG_TX_TONEGEN0_ATE_TONEGEN_TONE_A_EXP_LSB 8
+#define PHY_ANALOG_TX_TONEGEN0_ATE_TONEGEN_TONE_A_EXP_MASK 0x00000f00
+#define PHY_ANALOG_TX_TONEGEN0_ATE_TONEGEN_TONE_A_EXP_GET(x) (((x) & 0x00000f00) >> 8)
+#define PHY_ANALOG_TX_TONEGEN0_ATE_TONEGEN_TONE_A_EXP_SET(x) (((x) << 8) & 0x00000f00)
+#define PHY_ANALOG_TX_TONEGEN0_ATE_TONEGEN_TONE_A_MAN_MSB 23
+#define PHY_ANALOG_TX_TONEGEN0_ATE_TONEGEN_TONE_A_MAN_LSB 16
+#define PHY_ANALOG_TX_TONEGEN0_ATE_TONEGEN_TONE_A_MAN_MASK 0x00ff0000
+#define PHY_ANALOG_TX_TONEGEN0_ATE_TONEGEN_TONE_A_MAN_GET(x) (((x) & 0x00ff0000) >> 16)
+#define PHY_ANALOG_TX_TONEGEN0_ATE_TONEGEN_TONE_A_MAN_SET(x) (((x) << 16) & 0x00ff0000)
+#define PHY_ANALOG_TX_TONEGEN0_ATE_TONEGEN_TONE_TAU_K_MSB 30
+#define PHY_ANALOG_TX_TONEGEN0_ATE_TONEGEN_TONE_TAU_K_LSB 24
+#define PHY_ANALOG_TX_TONEGEN0_ATE_TONEGEN_TONE_TAU_K_MASK 0x7f000000
+#define PHY_ANALOG_TX_TONEGEN0_ATE_TONEGEN_TONE_TAU_K_GET(x) (((x) & 0x7f000000) >> 24)
+#define PHY_ANALOG_TX_TONEGEN0_ATE_TONEGEN_TONE_TAU_K_SET(x) (((x) << 24) & 0x7f000000)
+
+/* macros for tx_tonegen1 */
+#define PHY_ANALOG_TX_TONEGEN1_ADDRESS 0x0000038c
+#define PHY_ANALOG_TX_TONEGEN1_OFFSET 0x0000038c
+#define PHY_ANALOG_TX_TONEGEN1_ATE_TONEGEN_TONE_FREQ_MSB 6
+#define PHY_ANALOG_TX_TONEGEN1_ATE_TONEGEN_TONE_FREQ_LSB 0
+#define PHY_ANALOG_TX_TONEGEN1_ATE_TONEGEN_TONE_FREQ_MASK 0x0000007f
+#define PHY_ANALOG_TX_TONEGEN1_ATE_TONEGEN_TONE_FREQ_GET(x) (((x) & 0x0000007f) >> 0)
+#define PHY_ANALOG_TX_TONEGEN1_ATE_TONEGEN_TONE_FREQ_SET(x) (((x) << 0) & 0x0000007f)
+#define PHY_ANALOG_TX_TONEGEN1_ATE_TONEGEN_TONE_A_EXP_MSB 11
+#define PHY_ANALOG_TX_TONEGEN1_ATE_TONEGEN_TONE_A_EXP_LSB 8
+#define PHY_ANALOG_TX_TONEGEN1_ATE_TONEGEN_TONE_A_EXP_MASK 0x00000f00
+#define PHY_ANALOG_TX_TONEGEN1_ATE_TONEGEN_TONE_A_EXP_GET(x) (((x) & 0x00000f00) >> 8)
+#define PHY_ANALOG_TX_TONEGEN1_ATE_TONEGEN_TONE_A_EXP_SET(x) (((x) << 8) & 0x00000f00)
+#define PHY_ANALOG_TX_TONEGEN1_ATE_TONEGEN_TONE_A_MAN_MSB 23
+#define PHY_ANALOG_TX_TONEGEN1_ATE_TONEGEN_TONE_A_MAN_LSB 16
+#define PHY_ANALOG_TX_TONEGEN1_ATE_TONEGEN_TONE_A_MAN_MASK 0x00ff0000
+#define PHY_ANALOG_TX_TONEGEN1_ATE_TONEGEN_TONE_A_MAN_GET(x) (((x) & 0x00ff0000) >> 16)
+#define PHY_ANALOG_TX_TONEGEN1_ATE_TONEGEN_TONE_A_MAN_SET(x) (((x) << 16) & 0x00ff0000)
+#define PHY_ANALOG_TX_TONEGEN1_ATE_TONEGEN_TONE_TAU_K_MSB 30
+#define PHY_ANALOG_TX_TONEGEN1_ATE_TONEGEN_TONE_TAU_K_LSB 24
+#define PHY_ANALOG_TX_TONEGEN1_ATE_TONEGEN_TONE_TAU_K_MASK 0x7f000000
+#define PHY_ANALOG_TX_TONEGEN1_ATE_TONEGEN_TONE_TAU_K_GET(x) (((x) & 0x7f000000) >> 24)
+#define PHY_ANALOG_TX_TONEGEN1_ATE_TONEGEN_TONE_TAU_K_SET(x) (((x) << 24) & 0x7f000000)
+
+/* macros for tx_lftonegen0 */
+#define PHY_ANALOG_TX_LFTONEGEN0_ADDRESS 0x00000390
+#define PHY_ANALOG_TX_LFTONEGEN0_OFFSET 0x00000390
+#define PHY_ANALOG_TX_LFTONEGEN0_ATE_TONEGEN_TONE_FREQ_MSB 6
+#define PHY_ANALOG_TX_LFTONEGEN0_ATE_TONEGEN_TONE_FREQ_LSB 0
+#define PHY_ANALOG_TX_LFTONEGEN0_ATE_TONEGEN_TONE_FREQ_MASK 0x0000007f
+#define PHY_ANALOG_TX_LFTONEGEN0_ATE_TONEGEN_TONE_FREQ_GET(x) (((x) & 0x0000007f) >> 0)
+#define PHY_ANALOG_TX_LFTONEGEN0_ATE_TONEGEN_TONE_FREQ_SET(x) (((x) << 0) & 0x0000007f)
+#define PHY_ANALOG_TX_LFTONEGEN0_ATE_TONEGEN_TONE_A_EXP_MSB 11
+#define PHY_ANALOG_TX_LFTONEGEN0_ATE_TONEGEN_TONE_A_EXP_LSB 8
+#define PHY_ANALOG_TX_LFTONEGEN0_ATE_TONEGEN_TONE_A_EXP_MASK 0x00000f00
+#define PHY_ANALOG_TX_LFTONEGEN0_ATE_TONEGEN_TONE_A_EXP_GET(x) (((x) & 0x00000f00) >> 8)
+#define PHY_ANALOG_TX_LFTONEGEN0_ATE_TONEGEN_TONE_A_EXP_SET(x) (((x) << 8) & 0x00000f00)
+#define PHY_ANALOG_TX_LFTONEGEN0_ATE_TONEGEN_TONE_A_MAN_MSB 23
+#define PHY_ANALOG_TX_LFTONEGEN0_ATE_TONEGEN_TONE_A_MAN_LSB 16
+#define PHY_ANALOG_TX_LFTONEGEN0_ATE_TONEGEN_TONE_A_MAN_MASK 0x00ff0000
+#define PHY_ANALOG_TX_LFTONEGEN0_ATE_TONEGEN_TONE_A_MAN_GET(x) (((x) & 0x00ff0000) >> 16)
+#define PHY_ANALOG_TX_LFTONEGEN0_ATE_TONEGEN_TONE_A_MAN_SET(x) (((x) << 16) & 0x00ff0000)
+#define PHY_ANALOG_TX_LFTONEGEN0_ATE_TONEGEN_TONE_TAU_K_MSB 30
+#define PHY_ANALOG_TX_LFTONEGEN0_ATE_TONEGEN_TONE_TAU_K_LSB 24
+#define PHY_ANALOG_TX_LFTONEGEN0_ATE_TONEGEN_TONE_TAU_K_MASK 0x7f000000
+#define PHY_ANALOG_TX_LFTONEGEN0_ATE_TONEGEN_TONE_TAU_K_GET(x) (((x) & 0x7f000000) >> 24)
+#define PHY_ANALOG_TX_LFTONEGEN0_ATE_TONEGEN_TONE_TAU_K_SET(x) (((x) << 24) & 0x7f000000)
+
+/* macros for tx_linear_ramp_i */
+#define PHY_ANALOG_TX_LINEAR_RAMP_I_ADDRESS 0x00000394
+#define PHY_ANALOG_TX_LINEAR_RAMP_I_OFFSET 0x00000394
+#define PHY_ANALOG_TX_LINEAR_RAMP_I_ATE_TONEGEN_LINRAMP_INIT_MSB 10
+#define PHY_ANALOG_TX_LINEAR_RAMP_I_ATE_TONEGEN_LINRAMP_INIT_LSB 0
+#define PHY_ANALOG_TX_LINEAR_RAMP_I_ATE_TONEGEN_LINRAMP_INIT_MASK 0x000007ff
+#define PHY_ANALOG_TX_LINEAR_RAMP_I_ATE_TONEGEN_LINRAMP_INIT_GET(x) (((x) & 0x000007ff) >> 0)
+#define PHY_ANALOG_TX_LINEAR_RAMP_I_ATE_TONEGEN_LINRAMP_INIT_SET(x) (((x) << 0) & 0x000007ff)
+#define PHY_ANALOG_TX_LINEAR_RAMP_I_ATE_TONEGEN_LINRAMP_DWELL_MSB 21
+#define PHY_ANALOG_TX_LINEAR_RAMP_I_ATE_TONEGEN_LINRAMP_DWELL_LSB 12
+#define PHY_ANALOG_TX_LINEAR_RAMP_I_ATE_TONEGEN_LINRAMP_DWELL_MASK 0x003ff000
+#define PHY_ANALOG_TX_LINEAR_RAMP_I_ATE_TONEGEN_LINRAMP_DWELL_GET(x) (((x) & 0x003ff000) >> 12)
+#define PHY_ANALOG_TX_LINEAR_RAMP_I_ATE_TONEGEN_LINRAMP_DWELL_SET(x) (((x) << 12) & 0x003ff000)
+#define PHY_ANALOG_TX_LINEAR_RAMP_I_ATE_TONEGEN_LINRAMP_STEP_MSB 29
+#define PHY_ANALOG_TX_LINEAR_RAMP_I_ATE_TONEGEN_LINRAMP_STEP_LSB 24
+#define PHY_ANALOG_TX_LINEAR_RAMP_I_ATE_TONEGEN_LINRAMP_STEP_MASK 0x3f000000
+#define PHY_ANALOG_TX_LINEAR_RAMP_I_ATE_TONEGEN_LINRAMP_STEP_GET(x) (((x) & 0x3f000000) >> 24)
+#define PHY_ANALOG_TX_LINEAR_RAMP_I_ATE_TONEGEN_LINRAMP_STEP_SET(x) (((x) << 24) & 0x3f000000)
+
+/* macros for tx_linear_ramp_q */
+#define PHY_ANALOG_TX_LINEAR_RAMP_Q_ADDRESS 0x00000398
+#define PHY_ANALOG_TX_LINEAR_RAMP_Q_OFFSET 0x00000398
+#define PHY_ANALOG_TX_LINEAR_RAMP_Q_ATE_TONEGEN_LINRAMP_INIT_MSB 10
+#define PHY_ANALOG_TX_LINEAR_RAMP_Q_ATE_TONEGEN_LINRAMP_INIT_LSB 0
+#define PHY_ANALOG_TX_LINEAR_RAMP_Q_ATE_TONEGEN_LINRAMP_INIT_MASK 0x000007ff
+#define PHY_ANALOG_TX_LINEAR_RAMP_Q_ATE_TONEGEN_LINRAMP_INIT_GET(x) (((x) & 0x000007ff) >> 0)
+#define PHY_ANALOG_TX_LINEAR_RAMP_Q_ATE_TONEGEN_LINRAMP_INIT_SET(x) (((x) << 0) & 0x000007ff)
+#define PHY_ANALOG_TX_LINEAR_RAMP_Q_ATE_TONEGEN_LINRAMP_DWELL_MSB 21
+#define PHY_ANALOG_TX_LINEAR_RAMP_Q_ATE_TONEGEN_LINRAMP_DWELL_LSB 12
+#define PHY_ANALOG_TX_LINEAR_RAMP_Q_ATE_TONEGEN_LINRAMP_DWELL_MASK 0x003ff000
+#define PHY_ANALOG_TX_LINEAR_RAMP_Q_ATE_TONEGEN_LINRAMP_DWELL_GET(x) (((x) & 0x003ff000) >> 12)
+#define PHY_ANALOG_TX_LINEAR_RAMP_Q_ATE_TONEGEN_LINRAMP_DWELL_SET(x) (((x) << 12) & 0x003ff000)
+#define PHY_ANALOG_TX_LINEAR_RAMP_Q_ATE_TONEGEN_LINRAMP_STEP_MSB 29
+#define PHY_ANALOG_TX_LINEAR_RAMP_Q_ATE_TONEGEN_LINRAMP_STEP_LSB 24
+#define PHY_ANALOG_TX_LINEAR_RAMP_Q_ATE_TONEGEN_LINRAMP_STEP_MASK 0x3f000000
+#define PHY_ANALOG_TX_LINEAR_RAMP_Q_ATE_TONEGEN_LINRAMP_STEP_GET(x) (((x) & 0x3f000000) >> 24)
+#define PHY_ANALOG_TX_LINEAR_RAMP_Q_ATE_TONEGEN_LINRAMP_STEP_SET(x) (((x) << 24) & 0x3f000000)
+
+/* macros for tx_prbs_mag */
+#define PHY_ANALOG_TX_PRBS_MAG_ADDRESS 0x0000039c
+#define PHY_ANALOG_TX_PRBS_MAG_OFFSET 0x0000039c
+#define PHY_ANALOG_TX_PRBS_MAG_ATE_TONEGEN_PRBS_MAGNITUDE_I_MSB 9
+#define PHY_ANALOG_TX_PRBS_MAG_ATE_TONEGEN_PRBS_MAGNITUDE_I_LSB 0
+#define PHY_ANALOG_TX_PRBS_MAG_ATE_TONEGEN_PRBS_MAGNITUDE_I_MASK 0x000003ff
+#define PHY_ANALOG_TX_PRBS_MAG_ATE_TONEGEN_PRBS_MAGNITUDE_I_GET(x) (((x) & 0x000003ff) >> 0)
+#define PHY_ANALOG_TX_PRBS_MAG_ATE_TONEGEN_PRBS_MAGNITUDE_I_SET(x) (((x) << 0) & 0x000003ff)
+#define PHY_ANALOG_TX_PRBS_MAG_ATE_TONEGEN_PRBS_MAGNITUDE_Q_MSB 25
+#define PHY_ANALOG_TX_PRBS_MAG_ATE_TONEGEN_PRBS_MAGNITUDE_Q_LSB 16
+#define PHY_ANALOG_TX_PRBS_MAG_ATE_TONEGEN_PRBS_MAGNITUDE_Q_MASK 0x03ff0000
+#define PHY_ANALOG_TX_PRBS_MAG_ATE_TONEGEN_PRBS_MAGNITUDE_Q_GET(x) (((x) & 0x03ff0000) >> 16)
+#define PHY_ANALOG_TX_PRBS_MAG_ATE_TONEGEN_PRBS_MAGNITUDE_Q_SET(x) (((x) << 16) & 0x03ff0000)
+
+/* macros for tx_prbs_seed_i */
+#define PHY_ANALOG_TX_PRBS_SEED_I_ADDRESS 0x000003a0
+#define PHY_ANALOG_TX_PRBS_SEED_I_OFFSET 0x000003a0
+#define PHY_ANALOG_TX_PRBS_SEED_I_ATE_TONEGEN_PRBS_SEED_MSB 30
+#define PHY_ANALOG_TX_PRBS_SEED_I_ATE_TONEGEN_PRBS_SEED_LSB 0
+#define PHY_ANALOG_TX_PRBS_SEED_I_ATE_TONEGEN_PRBS_SEED_MASK 0x7fffffff
+#define PHY_ANALOG_TX_PRBS_SEED_I_ATE_TONEGEN_PRBS_SEED_GET(x) (((x) & 0x7fffffff) >> 0)
+#define PHY_ANALOG_TX_PRBS_SEED_I_ATE_TONEGEN_PRBS_SEED_SET(x) (((x) << 0) & 0x7fffffff)
+
+/* macros for tx_prbs_seed_q */
+#define PHY_ANALOG_TX_PRBS_SEED_Q_ADDRESS 0x000003a4
+#define PHY_ANALOG_TX_PRBS_SEED_Q_OFFSET 0x000003a4
+#define PHY_ANALOG_TX_PRBS_SEED_Q_ATE_TONEGEN_PRBS_SEED_MSB 30
+#define PHY_ANALOG_TX_PRBS_SEED_Q_ATE_TONEGEN_PRBS_SEED_LSB 0
+#define PHY_ANALOG_TX_PRBS_SEED_Q_ATE_TONEGEN_PRBS_SEED_MASK 0x7fffffff
+#define PHY_ANALOG_TX_PRBS_SEED_Q_ATE_TONEGEN_PRBS_SEED_GET(x) (((x) & 0x7fffffff) >> 0)
+#define PHY_ANALOG_TX_PRBS_SEED_Q_ATE_TONEGEN_PRBS_SEED_SET(x) (((x) << 0) & 0x7fffffff)
+
+/* macros for cmac_dc_cancel */
+#define PHY_ANALOG_CMAC_DC_CANCEL_ADDRESS 0x000003a8
+#define PHY_ANALOG_CMAC_DC_CANCEL_OFFSET 0x000003a8
+#define PHY_ANALOG_CMAC_DC_CANCEL_ATE_CMAC_DC_CANCEL_I_MSB 9
+#define PHY_ANALOG_CMAC_DC_CANCEL_ATE_CMAC_DC_CANCEL_I_LSB 0
+#define PHY_ANALOG_CMAC_DC_CANCEL_ATE_CMAC_DC_CANCEL_I_MASK 0x000003ff
+#define PHY_ANALOG_CMAC_DC_CANCEL_ATE_CMAC_DC_CANCEL_I_GET(x) (((x) & 0x000003ff) >> 0)
+#define PHY_ANALOG_CMAC_DC_CANCEL_ATE_CMAC_DC_CANCEL_I_SET(x) (((x) << 0) & 0x000003ff)
+#define PHY_ANALOG_CMAC_DC_CANCEL_ATE_CMAC_DC_CANCEL_Q_MSB 25
+#define PHY_ANALOG_CMAC_DC_CANCEL_ATE_CMAC_DC_CANCEL_Q_LSB 16
+#define PHY_ANALOG_CMAC_DC_CANCEL_ATE_CMAC_DC_CANCEL_Q_MASK 0x03ff0000
+#define PHY_ANALOG_CMAC_DC_CANCEL_ATE_CMAC_DC_CANCEL_Q_GET(x) (((x) & 0x03ff0000) >> 16)
+#define PHY_ANALOG_CMAC_DC_CANCEL_ATE_CMAC_DC_CANCEL_Q_SET(x) (((x) << 16) & 0x03ff0000)
+
+/* macros for cmac_dc_offset */
+#define PHY_ANALOG_CMAC_DC_OFFSET_ADDRESS 0x000003ac
+#define PHY_ANALOG_CMAC_DC_OFFSET_OFFSET 0x000003ac
+#define PHY_ANALOG_CMAC_DC_OFFSET_ATE_CMAC_DC_CYCLES_MSB 3
+#define PHY_ANALOG_CMAC_DC_OFFSET_ATE_CMAC_DC_CYCLES_LSB 0
+#define PHY_ANALOG_CMAC_DC_OFFSET_ATE_CMAC_DC_CYCLES_MASK 0x0000000f
+#define PHY_ANALOG_CMAC_DC_OFFSET_ATE_CMAC_DC_CYCLES_GET(x) (((x) & 0x0000000f) >> 0)
+#define PHY_ANALOG_CMAC_DC_OFFSET_ATE_CMAC_DC_CYCLES_SET(x) (((x) << 0) & 0x0000000f)
+
+/* macros for cmac_corr */
+#define PHY_ANALOG_CMAC_CORR_ADDRESS 0x000003b0
+#define PHY_ANALOG_CMAC_CORR_OFFSET 0x000003b0
+#define PHY_ANALOG_CMAC_CORR_ATE_CMAC_CORR_CYCLES_MSB 4
+#define PHY_ANALOG_CMAC_CORR_ATE_CMAC_CORR_CYCLES_LSB 0
+#define PHY_ANALOG_CMAC_CORR_ATE_CMAC_CORR_CYCLES_MASK 0x0000001f
+#define PHY_ANALOG_CMAC_CORR_ATE_CMAC_CORR_CYCLES_GET(x) (((x) & 0x0000001f) >> 0)
+#define PHY_ANALOG_CMAC_CORR_ATE_CMAC_CORR_CYCLES_SET(x) (((x) << 0) & 0x0000001f)
+#define PHY_ANALOG_CMAC_CORR_ATE_CMAC_CORR_FREQ_MSB 13
+#define PHY_ANALOG_CMAC_CORR_ATE_CMAC_CORR_FREQ_LSB 8
+#define PHY_ANALOG_CMAC_CORR_ATE_CMAC_CORR_FREQ_MASK 0x00003f00
+#define PHY_ANALOG_CMAC_CORR_ATE_CMAC_CORR_FREQ_GET(x) (((x) & 0x00003f00) >> 8)
+#define PHY_ANALOG_CMAC_CORR_ATE_CMAC_CORR_FREQ_SET(x) (((x) << 8) & 0x00003f00)
+
+/* macros for cmac_power */
+#define PHY_ANALOG_CMAC_POWER_ADDRESS 0x000003b4
+#define PHY_ANALOG_CMAC_POWER_OFFSET 0x000003b4
+#define PHY_ANALOG_CMAC_POWER_ATE_CMAC_POWER_CYCLES_MSB 3
+#define PHY_ANALOG_CMAC_POWER_ATE_CMAC_POWER_CYCLES_LSB 0
+#define PHY_ANALOG_CMAC_POWER_ATE_CMAC_POWER_CYCLES_MASK 0x0000000f
+#define PHY_ANALOG_CMAC_POWER_ATE_CMAC_POWER_CYCLES_GET(x) (((x) & 0x0000000f) >> 0)
+#define PHY_ANALOG_CMAC_POWER_ATE_CMAC_POWER_CYCLES_SET(x) (((x) << 0) & 0x0000000f)
+
+/* macros for cmac_cross_corr */
+#define PHY_ANALOG_CMAC_CROSS_CORR_ADDRESS 0x000003b8
+#define PHY_ANALOG_CMAC_CROSS_CORR_OFFSET 0x000003b8
+#define PHY_ANALOG_CMAC_CROSS_CORR_ATE_CMAC_IQ_CYCLES_MSB 3
+#define PHY_ANALOG_CMAC_CROSS_CORR_ATE_CMAC_IQ_CYCLES_LSB 0
+#define PHY_ANALOG_CMAC_CROSS_CORR_ATE_CMAC_IQ_CYCLES_MASK 0x0000000f
+#define PHY_ANALOG_CMAC_CROSS_CORR_ATE_CMAC_IQ_CYCLES_GET(x) (((x) & 0x0000000f) >> 0)
+#define PHY_ANALOG_CMAC_CROSS_CORR_ATE_CMAC_IQ_CYCLES_SET(x) (((x) << 0) & 0x0000000f)
+
+/* macros for cmac_i2q2 */
+#define PHY_ANALOG_CMAC_I2Q2_ADDRESS 0x000003bc
+#define PHY_ANALOG_CMAC_I2Q2_OFFSET 0x000003bc
+#define PHY_ANALOG_CMAC_I2Q2_ATE_CMAC_I2Q2_CYCLES_MSB 3
+#define PHY_ANALOG_CMAC_I2Q2_ATE_CMAC_I2Q2_CYCLES_LSB 0
+#define PHY_ANALOG_CMAC_I2Q2_ATE_CMAC_I2Q2_CYCLES_MASK 0x0000000f
+#define PHY_ANALOG_CMAC_I2Q2_ATE_CMAC_I2Q2_CYCLES_GET(x) (((x) & 0x0000000f) >> 0)
+#define PHY_ANALOG_CMAC_I2Q2_ATE_CMAC_I2Q2_CYCLES_SET(x) (((x) << 0) & 0x0000000f)
+
+/* macros for cmac_power_hpf */
+#define PHY_ANALOG_CMAC_POWER_HPF_ADDRESS 0x000003c0
+#define PHY_ANALOG_CMAC_POWER_HPF_OFFSET 0x000003c0
+#define PHY_ANALOG_CMAC_POWER_HPF_ATE_CMAC_POWER_HPF_CYCLES_MSB 3
+#define PHY_ANALOG_CMAC_POWER_HPF_ATE_CMAC_POWER_HPF_CYCLES_LSB 0
+#define PHY_ANALOG_CMAC_POWER_HPF_ATE_CMAC_POWER_HPF_CYCLES_MASK 0x0000000f
+#define PHY_ANALOG_CMAC_POWER_HPF_ATE_CMAC_POWER_HPF_CYCLES_GET(x) (((x) & 0x0000000f) >> 0)
+#define PHY_ANALOG_CMAC_POWER_HPF_ATE_CMAC_POWER_HPF_CYCLES_SET(x) (((x) << 0) & 0x0000000f)
+#define PHY_ANALOG_CMAC_POWER_HPF_ATE_CMAC_POWER_HPF_WAIT_MSB 7
+#define PHY_ANALOG_CMAC_POWER_HPF_ATE_CMAC_POWER_HPF_WAIT_LSB 4
+#define PHY_ANALOG_CMAC_POWER_HPF_ATE_CMAC_POWER_HPF_WAIT_MASK 0x000000f0
+#define PHY_ANALOG_CMAC_POWER_HPF_ATE_CMAC_POWER_HPF_WAIT_GET(x) (((x) & 0x000000f0) >> 4)
+#define PHY_ANALOG_CMAC_POWER_HPF_ATE_CMAC_POWER_HPF_WAIT_SET(x) (((x) << 4) & 0x000000f0)
+
+/* macros for rxdac_set1 */
+#define PHY_ANALOG_RXDAC_SET1_ADDRESS 0x000003c4
+#define PHY_ANALOG_RXDAC_SET1_OFFSET 0x000003c4
+#define PHY_ANALOG_RXDAC_SET1_ATE_RXDAC_MUX_MSB 1
+#define PHY_ANALOG_RXDAC_SET1_ATE_RXDAC_MUX_LSB 0
+#define PHY_ANALOG_RXDAC_SET1_ATE_RXDAC_MUX_MASK 0x00000003
+#define PHY_ANALOG_RXDAC_SET1_ATE_RXDAC_MUX_GET(x) (((x) & 0x00000003) >> 0)
+#define PHY_ANALOG_RXDAC_SET1_ATE_RXDAC_MUX_SET(x) (((x) << 0) & 0x00000003)
+#define PHY_ANALOG_RXDAC_SET1_ATE_RXDAC_HI_GAIN_MSB 4
+#define PHY_ANALOG_RXDAC_SET1_ATE_RXDAC_HI_GAIN_LSB 4
+#define PHY_ANALOG_RXDAC_SET1_ATE_RXDAC_HI_GAIN_MASK 0x00000010
+#define PHY_ANALOG_RXDAC_SET1_ATE_RXDAC_HI_GAIN_GET(x) (((x) & 0x00000010) >> 4)
+#define PHY_ANALOG_RXDAC_SET1_ATE_RXDAC_HI_GAIN_SET(x) (((x) << 4) & 0x00000010)
+#define PHY_ANALOG_RXDAC_SET1_ATE_RXDAC_CAL_WAIT_MSB 13
+#define PHY_ANALOG_RXDAC_SET1_ATE_RXDAC_CAL_WAIT_LSB 8
+#define PHY_ANALOG_RXDAC_SET1_ATE_RXDAC_CAL_WAIT_MASK 0x00003f00
+#define PHY_ANALOG_RXDAC_SET1_ATE_RXDAC_CAL_WAIT_GET(x) (((x) & 0x00003f00) >> 8)
+#define PHY_ANALOG_RXDAC_SET1_ATE_RXDAC_CAL_WAIT_SET(x) (((x) << 8) & 0x00003f00)
+#define PHY_ANALOG_RXDAC_SET1_ATE_RXDAC_CAL_MEASURE_TIME_MSB 19
+#define PHY_ANALOG_RXDAC_SET1_ATE_RXDAC_CAL_MEASURE_TIME_LSB 16
+#define PHY_ANALOG_RXDAC_SET1_ATE_RXDAC_CAL_MEASURE_TIME_MASK 0x000f0000
+#define PHY_ANALOG_RXDAC_SET1_ATE_RXDAC_CAL_MEASURE_TIME_GET(x) (((x) & 0x000f0000) >> 16)
+#define PHY_ANALOG_RXDAC_SET1_ATE_RXDAC_CAL_MEASURE_TIME_SET(x) (((x) << 16) & 0x000f0000)
+
+/* macros for rxdac_set2 */
+#define PHY_ANALOG_RXDAC_SET2_ADDRESS 0x000003c8
+#define PHY_ANALOG_RXDAC_SET2_OFFSET 0x000003c8
+#define PHY_ANALOG_RXDAC_SET2_ATE_RXDAC_I_HI_MSB 4
+#define PHY_ANALOG_RXDAC_SET2_ATE_RXDAC_I_HI_LSB 0
+#define PHY_ANALOG_RXDAC_SET2_ATE_RXDAC_I_HI_MASK 0x0000001f
+#define PHY_ANALOG_RXDAC_SET2_ATE_RXDAC_I_HI_GET(x) (((x) & 0x0000001f) >> 0)
+#define PHY_ANALOG_RXDAC_SET2_ATE_RXDAC_I_HI_SET(x) (((x) << 0) & 0x0000001f)
+#define PHY_ANALOG_RXDAC_SET2_ATE_RXDAC_Q_HI_MSB 12
+#define PHY_ANALOG_RXDAC_SET2_ATE_RXDAC_Q_HI_LSB 8
+#define PHY_ANALOG_RXDAC_SET2_ATE_RXDAC_Q_HI_MASK 0x00001f00
+#define PHY_ANALOG_RXDAC_SET2_ATE_RXDAC_Q_HI_GET(x) (((x) & 0x00001f00) >> 8)
+#define PHY_ANALOG_RXDAC_SET2_ATE_RXDAC_Q_HI_SET(x) (((x) << 8) & 0x00001f00)
+#define PHY_ANALOG_RXDAC_SET2_ATE_RXDAC_I_LOW_MSB 20
+#define PHY_ANALOG_RXDAC_SET2_ATE_RXDAC_I_LOW_LSB 16
+#define PHY_ANALOG_RXDAC_SET2_ATE_RXDAC_I_LOW_MASK 0x001f0000
+#define PHY_ANALOG_RXDAC_SET2_ATE_RXDAC_I_LOW_GET(x) (((x) & 0x001f0000) >> 16)
+#define PHY_ANALOG_RXDAC_SET2_ATE_RXDAC_I_LOW_SET(x) (((x) << 16) & 0x001f0000)
+#define PHY_ANALOG_RXDAC_SET2_ATE_RXDAC_Q_LOW_MSB 28
+#define PHY_ANALOG_RXDAC_SET2_ATE_RXDAC_Q_LOW_LSB 24
+#define PHY_ANALOG_RXDAC_SET2_ATE_RXDAC_Q_LOW_MASK 0x1f000000
+#define PHY_ANALOG_RXDAC_SET2_ATE_RXDAC_Q_LOW_GET(x) (((x) & 0x1f000000) >> 24)
+#define PHY_ANALOG_RXDAC_SET2_ATE_RXDAC_Q_LOW_SET(x) (((x) << 24) & 0x1f000000)
+
+/* macros for rxdac_long_shift */
+#define PHY_ANALOG_RXDAC_LONG_SHIFT_ADDRESS 0x000003cc
+#define PHY_ANALOG_RXDAC_LONG_SHIFT_OFFSET 0x000003cc
+#define PHY_ANALOG_RXDAC_LONG_SHIFT_ATE_RXDAC_I_STATIC_MSB 4
+#define PHY_ANALOG_RXDAC_LONG_SHIFT_ATE_RXDAC_I_STATIC_LSB 0
+#define PHY_ANALOG_RXDAC_LONG_SHIFT_ATE_RXDAC_I_STATIC_MASK 0x0000001f
+#define PHY_ANALOG_RXDAC_LONG_SHIFT_ATE_RXDAC_I_STATIC_GET(x) (((x) & 0x0000001f) >> 0)
+#define PHY_ANALOG_RXDAC_LONG_SHIFT_ATE_RXDAC_I_STATIC_SET(x) (((x) << 0) & 0x0000001f)
+#define PHY_ANALOG_RXDAC_LONG_SHIFT_ATE_RXDAC_Q_STATIC_MSB 12
+#define PHY_ANALOG_RXDAC_LONG_SHIFT_ATE_RXDAC_Q_STATIC_LSB 8
+#define PHY_ANALOG_RXDAC_LONG_SHIFT_ATE_RXDAC_Q_STATIC_MASK 0x00001f00
+#define PHY_ANALOG_RXDAC_LONG_SHIFT_ATE_RXDAC_Q_STATIC_GET(x) (((x) & 0x00001f00) >> 8)
+#define PHY_ANALOG_RXDAC_LONG_SHIFT_ATE_RXDAC_Q_STATIC_SET(x) (((x) << 8) & 0x00001f00)
+
+/* macros for cmac_results_i */
+#define PHY_ANALOG_CMAC_RESULTS_I_ADDRESS 0x000003d0
+#define PHY_ANALOG_CMAC_RESULTS_I_OFFSET 0x000003d0
+#define PHY_ANALOG_CMAC_RESULTS_I_ATE_CMAC_RESULTS_MSB 31
+#define PHY_ANALOG_CMAC_RESULTS_I_ATE_CMAC_RESULTS_LSB 0
+#define PHY_ANALOG_CMAC_RESULTS_I_ATE_CMAC_RESULTS_MASK 0xffffffff
+#define PHY_ANALOG_CMAC_RESULTS_I_ATE_CMAC_RESULTS_GET(x) (((x) & 0xffffffff) >> 0)
+#define PHY_ANALOG_CMAC_RESULTS_I_ATE_CMAC_RESULTS_SET(x) (((x) << 0) & 0xffffffff)
+
+/* macros for cmac_results_q */
+#define PHY_ANALOG_CMAC_RESULTS_Q_ADDRESS 0x000003d4
+#define PHY_ANALOG_CMAC_RESULTS_Q_OFFSET 0x000003d4
+#define PHY_ANALOG_CMAC_RESULTS_Q_ATE_CMAC_RESULTS_MSB 31
+#define PHY_ANALOG_CMAC_RESULTS_Q_ATE_CMAC_RESULTS_LSB 0
+#define PHY_ANALOG_CMAC_RESULTS_Q_ATE_CMAC_RESULTS_MASK 0xffffffff
+#define PHY_ANALOG_CMAC_RESULTS_Q_ATE_CMAC_RESULTS_GET(x) (((x) & 0xffffffff) >> 0)
+#define PHY_ANALOG_CMAC_RESULTS_Q_ATE_CMAC_RESULTS_SET(x) (((x) << 0) & 0xffffffff)
+
+/* macros for PMU1 */
+#define PHY_ANALOG_PMU1_ADDRESS 0x00000740
+#define PHY_ANALOG_PMU1_OFFSET 0x00000740
+#define PHY_ANALOG_PMU1_SPARE_MSB 10
+#define PHY_ANALOG_PMU1_SPARE_LSB 0
+#define PHY_ANALOG_PMU1_SPARE_MASK 0x000007ff
+#define PHY_ANALOG_PMU1_SPARE_GET(x) (((x) & 0x000007ff) >> 0)
+#define PHY_ANALOG_PMU1_SPARE_SET(x) (((x) << 0) & 0x000007ff)
+#define PHY_ANALOG_PMU1_OTP_V25_PWD_MSB 11
+#define PHY_ANALOG_PMU1_OTP_V25_PWD_LSB 11
+#define PHY_ANALOG_PMU1_OTP_V25_PWD_MASK 0x00000800
+#define PHY_ANALOG_PMU1_OTP_V25_PWD_GET(x) (((x) & 0x00000800) >> 11)
+#define PHY_ANALOG_PMU1_OTP_V25_PWD_SET(x) (((x) << 11) & 0x00000800)
+#define PHY_ANALOG_PMU1_PAREGON_MAN_MSB 12
+#define PHY_ANALOG_PMU1_PAREGON_MAN_LSB 12
+#define PHY_ANALOG_PMU1_PAREGON_MAN_MASK 0x00001000
+#define PHY_ANALOG_PMU1_PAREGON_MAN_GET(x) (((x) & 0x00001000) >> 12)
+#define PHY_ANALOG_PMU1_PAREGON_MAN_SET(x) (((x) << 12) & 0x00001000)
+#define PHY_ANALOG_PMU1_OTPREGON_MAN_MSB 13
+#define PHY_ANALOG_PMU1_OTPREGON_MAN_LSB 13
+#define PHY_ANALOG_PMU1_OTPREGON_MAN_MASK 0x00002000
+#define PHY_ANALOG_PMU1_OTPREGON_MAN_GET(x) (((x) & 0x00002000) >> 13)
+#define PHY_ANALOG_PMU1_OTPREGON_MAN_SET(x) (((x) << 13) & 0x00002000)
+#define PHY_ANALOG_PMU1_DREGON_MAN_MSB 14
+#define PHY_ANALOG_PMU1_DREGON_MAN_LSB 14
+#define PHY_ANALOG_PMU1_DREGON_MAN_MASK 0x00004000
+#define PHY_ANALOG_PMU1_DREGON_MAN_GET(x) (((x) & 0x00004000) >> 14)
+#define PHY_ANALOG_PMU1_DREGON_MAN_SET(x) (((x) << 14) & 0x00004000)
+#define PHY_ANALOG_PMU1_DISCONTMODEEN_MSB 15
+#define PHY_ANALOG_PMU1_DISCONTMODEEN_LSB 15
+#define PHY_ANALOG_PMU1_DISCONTMODEEN_MASK 0x00008000
+#define PHY_ANALOG_PMU1_DISCONTMODEEN_GET(x) (((x) & 0x00008000) >> 15)
+#define PHY_ANALOG_PMU1_DISCONTMODEEN_SET(x) (((x) << 15) & 0x00008000)
+#define PHY_ANALOG_PMU1_SWREGON_MAN_MSB 16
+#define PHY_ANALOG_PMU1_SWREGON_MAN_LSB 16
+#define PHY_ANALOG_PMU1_SWREGON_MAN_MASK 0x00010000
+#define PHY_ANALOG_PMU1_SWREGON_MAN_GET(x) (((x) & 0x00010000) >> 16)
+#define PHY_ANALOG_PMU1_SWREGON_MAN_SET(x) (((x) << 16) & 0x00010000)
+#define PHY_ANALOG_PMU1_SWREG_FREQCUR_MSB 18
+#define PHY_ANALOG_PMU1_SWREG_FREQCUR_LSB 17
+#define PHY_ANALOG_PMU1_SWREG_FREQCUR_MASK 0x00060000
+#define PHY_ANALOG_PMU1_SWREG_FREQCUR_GET(x) (((x) & 0x00060000) >> 17)
+#define PHY_ANALOG_PMU1_SWREG_FREQCUR_SET(x) (((x) << 17) & 0x00060000)
+#define PHY_ANALOG_PMU1_SWREG_FREQCAP_MSB 21
+#define PHY_ANALOG_PMU1_SWREG_FREQCAP_LSB 19
+#define PHY_ANALOG_PMU1_SWREG_FREQCAP_MASK 0x00380000
+#define PHY_ANALOG_PMU1_SWREG_FREQCAP_GET(x) (((x) & 0x00380000) >> 19)
+#define PHY_ANALOG_PMU1_SWREG_FREQCAP_SET(x) (((x) << 19) & 0x00380000)
+#define PHY_ANALOG_PMU1_SWREG_LVLCTR_MSB 23
+#define PHY_ANALOG_PMU1_SWREG_LVLCTR_LSB 22
+#define PHY_ANALOG_PMU1_SWREG_LVLCTR_MASK 0x00c00000
+#define PHY_ANALOG_PMU1_SWREG_LVLCTR_GET(x) (((x) & 0x00c00000) >> 22)
+#define PHY_ANALOG_PMU1_SWREG_LVLCTR_SET(x) (((x) << 22) & 0x00c00000)
+#define PHY_ANALOG_PMU1_SREG_LVLCTR_MSB 25
+#define PHY_ANALOG_PMU1_SREG_LVLCTR_LSB 24
+#define PHY_ANALOG_PMU1_SREG_LVLCTR_MASK 0x03000000
+#define PHY_ANALOG_PMU1_SREG_LVLCTR_GET(x) (((x) & 0x03000000) >> 24)
+#define PHY_ANALOG_PMU1_SREG_LVLCTR_SET(x) (((x) << 24) & 0x03000000)
+#define PHY_ANALOG_PMU1_DREG_LVLCTR_MSB 27
+#define PHY_ANALOG_PMU1_DREG_LVLCTR_LSB 26
+#define PHY_ANALOG_PMU1_DREG_LVLCTR_MASK 0x0c000000
+#define PHY_ANALOG_PMU1_DREG_LVLCTR_GET(x) (((x) & 0x0c000000) >> 26)
+#define PHY_ANALOG_PMU1_DREG_LVLCTR_SET(x) (((x) << 26) & 0x0c000000)
+#define PHY_ANALOG_PMU1_PAREG_XPNP_MSB 28
+#define PHY_ANALOG_PMU1_PAREG_XPNP_LSB 28
+#define PHY_ANALOG_PMU1_PAREG_XPNP_MASK 0x10000000
+#define PHY_ANALOG_PMU1_PAREG_XPNP_GET(x) (((x) & 0x10000000) >> 28)
+#define PHY_ANALOG_PMU1_PAREG_XPNP_SET(x) (((x) << 28) & 0x10000000)
+#define PHY_ANALOG_PMU1_PAREG_LVLCTR_MSB 31
+#define PHY_ANALOG_PMU1_PAREG_LVLCTR_LSB 29
+#define PHY_ANALOG_PMU1_PAREG_LVLCTR_MASK 0xe0000000
+#define PHY_ANALOG_PMU1_PAREG_LVLCTR_GET(x) (((x) & 0xe0000000) >> 29)
+#define PHY_ANALOG_PMU1_PAREG_LVLCTR_SET(x) (((x) << 29) & 0xe0000000)
+
+/* macros for PMU2 */
+#define PHY_ANALOG_PMU2_ADDRESS 0x00000744
+#define PHY_ANALOG_PMU2_OFFSET 0x00000744
+#define PHY_ANALOG_PMU2_SPARE_MSB 7
+#define PHY_ANALOG_PMU2_SPARE_LSB 0
+#define PHY_ANALOG_PMU2_SPARE_MASK 0x000000ff
+#define PHY_ANALOG_PMU2_SPARE_GET(x) (((x) & 0x000000ff) >> 0)
+#define PHY_ANALOG_PMU2_SPARE_SET(x) (((x) << 0) & 0x000000ff)
+#define PHY_ANALOG_PMU2_VBATT_1_3TOATB_MSB 8
+#define PHY_ANALOG_PMU2_VBATT_1_3TOATB_LSB 8
+#define PHY_ANALOG_PMU2_VBATT_1_3TOATB_MASK 0x00000100
+#define PHY_ANALOG_PMU2_VBATT_1_3TOATB_GET(x) (((x) & 0x00000100) >> 8)
+#define PHY_ANALOG_PMU2_VBATT_1_3TOATB_SET(x) (((x) << 8) & 0x00000100)
+#define PHY_ANALOG_PMU2_VBATT_1_2TOATB_MSB 9
+#define PHY_ANALOG_PMU2_VBATT_1_2TOATB_LSB 9
+#define PHY_ANALOG_PMU2_VBATT_1_2TOATB_MASK 0x00000200
+#define PHY_ANALOG_PMU2_VBATT_1_2TOATB_GET(x) (((x) & 0x00000200) >> 9)
+#define PHY_ANALOG_PMU2_VBATT_1_2TOATB_SET(x) (((x) << 9) & 0x00000200)
+#define PHY_ANALOG_PMU2_VBATT_2_3TOATB_MSB 10
+#define PHY_ANALOG_PMU2_VBATT_2_3TOATB_LSB 10
+#define PHY_ANALOG_PMU2_VBATT_2_3TOATB_MASK 0x00000400
+#define PHY_ANALOG_PMU2_VBATT_2_3TOATB_GET(x) (((x) & 0x00000400) >> 10)
+#define PHY_ANALOG_PMU2_VBATT_2_3TOATB_SET(x) (((x) << 10) & 0x00000400)
+#define PHY_ANALOG_PMU2_PWD_BANDGAP_MAN_MSB 11
+#define PHY_ANALOG_PMU2_PWD_BANDGAP_MAN_LSB 11
+#define PHY_ANALOG_PMU2_PWD_BANDGAP_MAN_MASK 0x00000800
+#define PHY_ANALOG_PMU2_PWD_BANDGAP_MAN_GET(x) (((x) & 0x00000800) >> 11)
+#define PHY_ANALOG_PMU2_PWD_BANDGAP_MAN_SET(x) (((x) << 11) & 0x00000800)
+#define PHY_ANALOG_PMU2_PWD_LFO_MAN_MSB 12
+#define PHY_ANALOG_PMU2_PWD_LFO_MAN_LSB 12
+#define PHY_ANALOG_PMU2_PWD_LFO_MAN_MASK 0x00001000
+#define PHY_ANALOG_PMU2_PWD_LFO_MAN_GET(x) (((x) & 0x00001000) >> 12)
+#define PHY_ANALOG_PMU2_PWD_LFO_MAN_SET(x) (((x) << 12) & 0x00001000)
+#define PHY_ANALOG_PMU2_VBATT_LT_3P2_MSB 13
+#define PHY_ANALOG_PMU2_VBATT_LT_3P2_LSB 13
+#define PHY_ANALOG_PMU2_VBATT_LT_3P2_MASK 0x00002000
+#define PHY_ANALOG_PMU2_VBATT_LT_3P2_GET(x) (((x) & 0x00002000) >> 13)
+#define PHY_ANALOG_PMU2_VBATT_LT_3P2_SET(x) (((x) << 13) & 0x00002000)
+#define PHY_ANALOG_PMU2_VBATT_LT_2P8_MSB 14
+#define PHY_ANALOG_PMU2_VBATT_LT_2P8_LSB 14
+#define PHY_ANALOG_PMU2_VBATT_LT_2P8_MASK 0x00004000
+#define PHY_ANALOG_PMU2_VBATT_LT_2P8_GET(x) (((x) & 0x00004000) >> 14)
+#define PHY_ANALOG_PMU2_VBATT_LT_2P8_SET(x) (((x) << 14) & 0x00004000)
+#define PHY_ANALOG_PMU2_VBATT_GT_4P2_MSB 15
+#define PHY_ANALOG_PMU2_VBATT_GT_4P2_LSB 15
+#define PHY_ANALOG_PMU2_VBATT_GT_4P2_MASK 0x00008000
+#define PHY_ANALOG_PMU2_VBATT_GT_4P2_GET(x) (((x) & 0x00008000) >> 15)
+#define PHY_ANALOG_PMU2_VBATT_GT_4P2_SET(x) (((x) << 15) & 0x00008000)
+#define PHY_ANALOG_PMU2_PMU_MAN_OVERRIDE_EN_MSB 16
+#define PHY_ANALOG_PMU2_PMU_MAN_OVERRIDE_EN_LSB 16
+#define PHY_ANALOG_PMU2_PMU_MAN_OVERRIDE_EN_MASK 0x00010000
+#define PHY_ANALOG_PMU2_PMU_MAN_OVERRIDE_EN_GET(x) (((x) & 0x00010000) >> 16)
+#define PHY_ANALOG_PMU2_PMU_MAN_OVERRIDE_EN_SET(x) (((x) << 16) & 0x00010000)
+#define PHY_ANALOG_PMU2_VBATT_GT_LVLCTR_MSB 18
+#define PHY_ANALOG_PMU2_VBATT_GT_LVLCTR_LSB 17
+#define PHY_ANALOG_PMU2_VBATT_GT_LVLCTR_MASK 0x00060000
+#define PHY_ANALOG_PMU2_VBATT_GT_LVLCTR_GET(x) (((x) & 0x00060000) >> 17)
+#define PHY_ANALOG_PMU2_VBATT_GT_LVLCTR_SET(x) (((x) << 17) & 0x00060000)
+#define PHY_ANALOG_PMU2_SWREGVSSL2ATB_MSB 19
+#define PHY_ANALOG_PMU2_SWREGVSSL2ATB_LSB 19
+#define PHY_ANALOG_PMU2_SWREGVSSL2ATB_MASK 0x00080000
+#define PHY_ANALOG_PMU2_SWREGVSSL2ATB_GET(x) (((x) & 0x00080000) >> 19)
+#define PHY_ANALOG_PMU2_SWREGVSSL2ATB_SET(x) (((x) << 19) & 0x00080000)
+#define PHY_ANALOG_PMU2_SWREGVSSL_LVLCTR_MSB 21
+#define PHY_ANALOG_PMU2_SWREGVSSL_LVLCTR_LSB 20
+#define PHY_ANALOG_PMU2_SWREGVSSL_LVLCTR_MASK 0x00300000
+#define PHY_ANALOG_PMU2_SWREGVSSL_LVLCTR_GET(x) (((x) & 0x00300000) >> 20)
+#define PHY_ANALOG_PMU2_SWREGVSSL_LVLCTR_SET(x) (((x) << 20) & 0x00300000)
+#define PHY_ANALOG_PMU2_SWREGVDDH2ATB_MSB 22
+#define PHY_ANALOG_PMU2_SWREGVDDH2ATB_LSB 22
+#define PHY_ANALOG_PMU2_SWREGVDDH2ATB_MASK 0x00400000
+#define PHY_ANALOG_PMU2_SWREGVDDH2ATB_GET(x) (((x) & 0x00400000) >> 22)
+#define PHY_ANALOG_PMU2_SWREGVDDH2ATB_SET(x) (((x) << 22) & 0x00400000)
+#define PHY_ANALOG_PMU2_SWREGVDDH_LVLCTR_MSB 24
+#define PHY_ANALOG_PMU2_SWREGVDDH_LVLCTR_LSB 23
+#define PHY_ANALOG_PMU2_SWREGVDDH_LVLCTR_MASK 0x01800000
+#define PHY_ANALOG_PMU2_SWREGVDDH_LVLCTR_GET(x) (((x) & 0x01800000) >> 23)
+#define PHY_ANALOG_PMU2_SWREGVDDH_LVLCTR_SET(x) (((x) << 23) & 0x01800000)
+#define PHY_ANALOG_PMU2_SWREG2ATB_MSB 27
+#define PHY_ANALOG_PMU2_SWREG2ATB_LSB 25
+#define PHY_ANALOG_PMU2_SWREG2ATB_MASK 0x0e000000
+#define PHY_ANALOG_PMU2_SWREG2ATB_GET(x) (((x) & 0x0e000000) >> 25)
+#define PHY_ANALOG_PMU2_SWREG2ATB_SET(x) (((x) << 25) & 0x0e000000)
+#define PHY_ANALOG_PMU2_OTPREG2ATB_MSB 28
+#define PHY_ANALOG_PMU2_OTPREG2ATB_LSB 28
+#define PHY_ANALOG_PMU2_OTPREG2ATB_MASK 0x10000000
+#define PHY_ANALOG_PMU2_OTPREG2ATB_GET(x) (((x) & 0x10000000) >> 28)
+#define PHY_ANALOG_PMU2_OTPREG2ATB_SET(x) (((x) << 28) & 0x10000000)
+#define PHY_ANALOG_PMU2_OTPREG_LVLCTR_MSB 30
+#define PHY_ANALOG_PMU2_OTPREG_LVLCTR_LSB 29
+#define PHY_ANALOG_PMU2_OTPREG_LVLCTR_MASK 0x60000000
+#define PHY_ANALOG_PMU2_OTPREG_LVLCTR_GET(x) (((x) & 0x60000000) >> 29)
+#define PHY_ANALOG_PMU2_OTPREG_LVLCTR_SET(x) (((x) << 29) & 0x60000000)
+#define PHY_ANALOG_PMU2_DREG_LVLCTR_MANOVR_EN_MSB 31
+#define PHY_ANALOG_PMU2_DREG_LVLCTR_MANOVR_EN_LSB 31
+#define PHY_ANALOG_PMU2_DREG_LVLCTR_MANOVR_EN_MASK 0x80000000
+#define PHY_ANALOG_PMU2_DREG_LVLCTR_MANOVR_EN_GET(x) (((x) & 0x80000000) >> 31)
+#define PHY_ANALOG_PMU2_DREG_LVLCTR_MANOVR_EN_SET(x) (((x) << 31) & 0x80000000)
+
+
+#ifndef __ASSEMBLER__
+
+typedef struct analog_intf_ares_reg_reg_s {
+ volatile unsigned int RXRF_BIAS1; /* 0x0 - 0x4 */
+ volatile unsigned int RXRF_BIAS2; /* 0x4 - 0x8 */
+ volatile unsigned int RXRF_GAINSTAGES; /* 0x8 - 0xc */
+ volatile unsigned int RXRF_AGC; /* 0xc - 0x10 */
+ volatile char pad__0[0x30]; /* 0x10 - 0x40 */
+ volatile unsigned int TXRF1; /* 0x40 - 0x44 */
+ volatile unsigned int TXRF2; /* 0x44 - 0x48 */
+ volatile unsigned int TXRF3; /* 0x48 - 0x4c */
+ volatile unsigned int TXRF4; /* 0x4c - 0x50 */
+ volatile unsigned int TXRF5; /* 0x50 - 0x54 */
+ volatile unsigned int TXRF6; /* 0x54 - 0x58 */
+ volatile unsigned int TXRF7; /* 0x58 - 0x5c */
+ volatile unsigned int TXRF8; /* 0x5c - 0x60 */
+ volatile unsigned int TXRF9; /* 0x60 - 0x64 */
+ volatile unsigned int TXRF10; /* 0x64 - 0x68 */
+ volatile unsigned int TXRF11; /* 0x68 - 0x6c */
+ volatile unsigned int TXRF12; /* 0x6c - 0x70 */
+ volatile char pad__1[0x10]; /* 0x70 - 0x80 */
+ volatile unsigned int SYNTH1; /* 0x80 - 0x84 */
+ volatile unsigned int SYNTH2; /* 0x84 - 0x88 */
+ volatile unsigned int SYNTH3; /* 0x88 - 0x8c */
+ volatile unsigned int SYNTH4; /* 0x8c - 0x90 */
+ volatile unsigned int SYNTH5; /* 0x90 - 0x94 */
+ volatile unsigned int SYNTH6; /* 0x94 - 0x98 */
+ volatile unsigned int SYNTH7; /* 0x98 - 0x9c */
+ volatile unsigned int SYNTH8; /* 0x9c - 0xa0 */
+ volatile unsigned int SYNTH9; /* 0xa0 - 0xa4 */
+ volatile unsigned int SYNTH10; /* 0xa4 - 0xa8 */
+ volatile unsigned int SYNTH11; /* 0xa8 - 0xac */
+ volatile unsigned int SYNTH12; /* 0xac - 0xb0 */
+ volatile char pad__2[0x10]; /* 0xb0 - 0xc0 */
+ volatile unsigned int BIAS1; /* 0xc0 - 0xc4 */
+ volatile unsigned int BIAS2; /* 0xc4 - 0xc8 */
+ volatile unsigned int BIAS3; /* 0xc8 - 0xcc */
+ volatile unsigned int BIAS4; /* 0xcc - 0xd0 */
+ volatile char pad__3[0x30]; /* 0xd0 - 0x100 */
+ volatile unsigned int RXTX1; /* 0x100 - 0x104 */
+ volatile unsigned int RXTX2; /* 0x104 - 0x108 */
+ volatile unsigned int RXTX3; /* 0x108 - 0x10c */
+ volatile char pad__4[0x34]; /* 0x10c - 0x140 */
+ volatile unsigned int BB1; /* 0x140 - 0x144 */
+ volatile unsigned int BB2; /* 0x144 - 0x148 */
+ volatile char pad__5[0x138]; /* 0x148 - 0x280 */
+ volatile unsigned int TOP1; /* 0x280 - 0x284 */
+ volatile unsigned int TOP2; /* 0x284 - 0x288 */
+ volatile unsigned int TOP3; /* 0x288 - 0x28c */
+ volatile unsigned int TOP4; /* 0x28c - 0x290 */
+ volatile char pad__6[0xf0]; /* 0x290 - 0x380 */
+ volatile unsigned int rbist_cntrl; /* 0x380 - 0x384 */
+ volatile unsigned int tx_dc_offset; /* 0x384 - 0x388 */
+ volatile unsigned int tx_tonegen0; /* 0x388 - 0x38c */
+ volatile unsigned int tx_tonegen1; /* 0x38c - 0x390 */
+ volatile unsigned int tx_lftonegen0; /* 0x390 - 0x394 */
+ volatile unsigned int tx_linear_ramp_i; /* 0x394 - 0x398 */
+ volatile unsigned int tx_linear_ramp_q; /* 0x398 - 0x39c */
+ volatile unsigned int tx_prbs_mag; /* 0x39c - 0x3a0 */
+ volatile unsigned int tx_prbs_seed_i; /* 0x3a0 - 0x3a4 */
+ volatile unsigned int tx_prbs_seed_q; /* 0x3a4 - 0x3a8 */
+ volatile unsigned int cmac_dc_cancel; /* 0x3a8 - 0x3ac */
+ volatile unsigned int cmac_dc_offset; /* 0x3ac - 0x3b0 */
+ volatile unsigned int cmac_corr; /* 0x3b0 - 0x3b4 */
+ volatile unsigned int cmac_power; /* 0x3b4 - 0x3b8 */
+ volatile unsigned int cmac_cross_corr; /* 0x3b8 - 0x3bc */
+ volatile unsigned int cmac_i2q2; /* 0x3bc - 0x3c0 */
+ volatile unsigned int cmac_power_hpf; /* 0x3c0 - 0x3c4 */
+ volatile unsigned int rxdac_set1; /* 0x3c4 - 0x3c8 */
+ volatile unsigned int rxdac_set2; /* 0x3c8 - 0x3cc */
+ volatile unsigned int rxdac_long_shift; /* 0x3cc - 0x3d0 */
+ volatile unsigned int cmac_results_i; /* 0x3d0 - 0x3d4 */
+ volatile unsigned int cmac_results_q; /* 0x3d4 - 0x3d8 */
+ volatile char pad__7[0x368]; /* 0x3d8 - 0x740 */
+ volatile unsigned int PMU1; /* 0x740 - 0x744 */
+ volatile unsigned int PMU2; /* 0x744 - 0x748 */
+} analog_intf_ares_reg_reg_t;
+
+#endif /* __ASSEMBLER__ */
+
+#endif /* _ANALOG_INTF_ARES_REG_REG_H_ */
diff --git a/drivers/staging/ath6kl/include/common/AR6002/hw4.0/hw/analog_intf_athr_wlan_reg.h b/drivers/staging/ath6kl/include/common/AR6002/hw4.0/hw/analog_intf_athr_wlan_reg.h
new file mode 100644
index 000000000000..1c243fbbc810
--- /dev/null
+++ b/drivers/staging/ath6kl/include/common/AR6002/hw4.0/hw/analog_intf_athr_wlan_reg.h
@@ -0,0 +1,3674 @@
+// ------------------------------------------------------------------
+// Copyright (c) 2004-2010 Atheros Corporation. All rights reserved.
+//
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+//
+//
+// ------------------------------------------------------------------
+//===================================================================
+// Author(s): ="Atheros"
+//===================================================================
+
+/* Copyright (C) 2009 Denali Software Inc. All rights reserved */
+/* THIS FILE IS AUTOMATICALLY GENERATED BY DENALI BLUEPRINT, DO NOT EDIT */
+
+
+#ifndef _ANALOG_INTF_ATHR_WLAN_REG_REG_H_
+#define _ANALOG_INTF_ATHR_WLAN_REG_REG_H_
+
+
+/* macros for RXRF_BIAS1 */
+#define PHY_ANALOG_RXRF_BIAS1_ADDRESS 0x00000000
+#define PHY_ANALOG_RXRF_BIAS1_OFFSET 0x00000000
+#define PHY_ANALOG_RXRF_BIAS1_SPARE_MSB 0
+#define PHY_ANALOG_RXRF_BIAS1_SPARE_LSB 0
+#define PHY_ANALOG_RXRF_BIAS1_SPARE_MASK 0x00000001
+#define PHY_ANALOG_RXRF_BIAS1_SPARE_GET(x) (((x) & 0x00000001) >> 0)
+#define PHY_ANALOG_RXRF_BIAS1_SPARE_SET(x) (((x) << 0) & 0x00000001)
+#define PHY_ANALOG_RXRF_BIAS1_PWD_IR25SPARE_MSB 3
+#define PHY_ANALOG_RXRF_BIAS1_PWD_IR25SPARE_LSB 1
+#define PHY_ANALOG_RXRF_BIAS1_PWD_IR25SPARE_MASK 0x0000000e
+#define PHY_ANALOG_RXRF_BIAS1_PWD_IR25SPARE_GET(x) (((x) & 0x0000000e) >> 1)
+#define PHY_ANALOG_RXRF_BIAS1_PWD_IR25SPARE_SET(x) (((x) << 1) & 0x0000000e)
+#define PHY_ANALOG_RXRF_BIAS1_PWD_IR25LO18_MSB 6
+#define PHY_ANALOG_RXRF_BIAS1_PWD_IR25LO18_LSB 4
+#define PHY_ANALOG_RXRF_BIAS1_PWD_IR25LO18_MASK 0x00000070
+#define PHY_ANALOG_RXRF_BIAS1_PWD_IR25LO18_GET(x) (((x) & 0x00000070) >> 4)
+#define PHY_ANALOG_RXRF_BIAS1_PWD_IR25LO18_SET(x) (((x) << 4) & 0x00000070)
+#define PHY_ANALOG_RXRF_BIAS1_PWD_IC25LO36_MSB 9
+#define PHY_ANALOG_RXRF_BIAS1_PWD_IC25LO36_LSB 7
+#define PHY_ANALOG_RXRF_BIAS1_PWD_IC25LO36_MASK 0x00000380
+#define PHY_ANALOG_RXRF_BIAS1_PWD_IC25LO36_GET(x) (((x) & 0x00000380) >> 7)
+#define PHY_ANALOG_RXRF_BIAS1_PWD_IC25LO36_SET(x) (((x) << 7) & 0x00000380)
+#define PHY_ANALOG_RXRF_BIAS1_PWD_IC25MXR2_5GH_MSB 12
+#define PHY_ANALOG_RXRF_BIAS1_PWD_IC25MXR2_5GH_LSB 10
+#define PHY_ANALOG_RXRF_BIAS1_PWD_IC25MXR2_5GH_MASK 0x00001c00
+#define PHY_ANALOG_RXRF_BIAS1_PWD_IC25MXR2_5GH_GET(x) (((x) & 0x00001c00) >> 10)
+#define PHY_ANALOG_RXRF_BIAS1_PWD_IC25MXR2_5GH_SET(x) (((x) << 10) & 0x00001c00)
+#define PHY_ANALOG_RXRF_BIAS1_PWD_IC25MXR5GH_MSB 15
+#define PHY_ANALOG_RXRF_BIAS1_PWD_IC25MXR5GH_LSB 13
+#define PHY_ANALOG_RXRF_BIAS1_PWD_IC25MXR5GH_MASK 0x0000e000
+#define PHY_ANALOG_RXRF_BIAS1_PWD_IC25MXR5GH_GET(x) (((x) & 0x0000e000) >> 13)
+#define PHY_ANALOG_RXRF_BIAS1_PWD_IC25MXR5GH_SET(x) (((x) << 13) & 0x0000e000)
+#define PHY_ANALOG_RXRF_BIAS1_PWD_IC25VGA5G_MSB 18
+#define PHY_ANALOG_RXRF_BIAS1_PWD_IC25VGA5G_LSB 16
+#define PHY_ANALOG_RXRF_BIAS1_PWD_IC25VGA5G_MASK 0x00070000
+#define PHY_ANALOG_RXRF_BIAS1_PWD_IC25VGA5G_GET(x) (((x) & 0x00070000) >> 16)
+#define PHY_ANALOG_RXRF_BIAS1_PWD_IC25VGA5G_SET(x) (((x) << 16) & 0x00070000)
+#define PHY_ANALOG_RXRF_BIAS1_PWD_IC75LNA5G_MSB 21
+#define PHY_ANALOG_RXRF_BIAS1_PWD_IC75LNA5G_LSB 19
+#define PHY_ANALOG_RXRF_BIAS1_PWD_IC75LNA5G_MASK 0x00380000
+#define PHY_ANALOG_RXRF_BIAS1_PWD_IC75LNA5G_GET(x) (((x) & 0x00380000) >> 19)
+#define PHY_ANALOG_RXRF_BIAS1_PWD_IC75LNA5G_SET(x) (((x) << 19) & 0x00380000)
+#define PHY_ANALOG_RXRF_BIAS1_PWD_IR25LO24_MSB 24
+#define PHY_ANALOG_RXRF_BIAS1_PWD_IR25LO24_LSB 22
+#define PHY_ANALOG_RXRF_BIAS1_PWD_IR25LO24_MASK 0x01c00000
+#define PHY_ANALOG_RXRF_BIAS1_PWD_IR25LO24_GET(x) (((x) & 0x01c00000) >> 22)
+#define PHY_ANALOG_RXRF_BIAS1_PWD_IR25LO24_SET(x) (((x) << 22) & 0x01c00000)
+#define PHY_ANALOG_RXRF_BIAS1_PWD_IC25MXR2GH_MSB 27
+#define PHY_ANALOG_RXRF_BIAS1_PWD_IC25MXR2GH_LSB 25
+#define PHY_ANALOG_RXRF_BIAS1_PWD_IC25MXR2GH_MASK 0x0e000000
+#define PHY_ANALOG_RXRF_BIAS1_PWD_IC25MXR2GH_GET(x) (((x) & 0x0e000000) >> 25)
+#define PHY_ANALOG_RXRF_BIAS1_PWD_IC25MXR2GH_SET(x) (((x) << 25) & 0x0e000000)
+#define PHY_ANALOG_RXRF_BIAS1_PWD_IC75LNA2G_MSB 30
+#define PHY_ANALOG_RXRF_BIAS1_PWD_IC75LNA2G_LSB 28
+#define PHY_ANALOG_RXRF_BIAS1_PWD_IC75LNA2G_MASK 0x70000000
+#define PHY_ANALOG_RXRF_BIAS1_PWD_IC75LNA2G_GET(x) (((x) & 0x70000000) >> 28)
+#define PHY_ANALOG_RXRF_BIAS1_PWD_IC75LNA2G_SET(x) (((x) << 28) & 0x70000000)
+#define PHY_ANALOG_RXRF_BIAS1_PWD_BIAS_MSB 31
+#define PHY_ANALOG_RXRF_BIAS1_PWD_BIAS_LSB 31
+#define PHY_ANALOG_RXRF_BIAS1_PWD_BIAS_MASK 0x80000000
+#define PHY_ANALOG_RXRF_BIAS1_PWD_BIAS_GET(x) (((x) & 0x80000000) >> 31)
+#define PHY_ANALOG_RXRF_BIAS1_PWD_BIAS_SET(x) (((x) << 31) & 0x80000000)
+
+/* macros for RXRF_BIAS2 */
+#define PHY_ANALOG_RXRF_BIAS2_ADDRESS 0x00000004
+#define PHY_ANALOG_RXRF_BIAS2_OFFSET 0x00000004
+#define PHY_ANALOG_RXRF_BIAS2_SPARE_MSB 0
+#define PHY_ANALOG_RXRF_BIAS2_SPARE_LSB 0
+#define PHY_ANALOG_RXRF_BIAS2_SPARE_MASK 0x00000001
+#define PHY_ANALOG_RXRF_BIAS2_SPARE_GET(x) (((x) & 0x00000001) >> 0)
+#define PHY_ANALOG_RXRF_BIAS2_SPARE_SET(x) (((x) << 0) & 0x00000001)
+#define PHY_ANALOG_RXRF_BIAS2_PKEN_MSB 3
+#define PHY_ANALOG_RXRF_BIAS2_PKEN_LSB 1
+#define PHY_ANALOG_RXRF_BIAS2_PKEN_MASK 0x0000000e
+#define PHY_ANALOG_RXRF_BIAS2_PKEN_GET(x) (((x) & 0x0000000e) >> 1)
+#define PHY_ANALOG_RXRF_BIAS2_PKEN_SET(x) (((x) << 1) & 0x0000000e)
+#define PHY_ANALOG_RXRF_BIAS2_VCMVALUE_MSB 6
+#define PHY_ANALOG_RXRF_BIAS2_VCMVALUE_LSB 4
+#define PHY_ANALOG_RXRF_BIAS2_VCMVALUE_MASK 0x00000070
+#define PHY_ANALOG_RXRF_BIAS2_VCMVALUE_GET(x) (((x) & 0x00000070) >> 4)
+#define PHY_ANALOG_RXRF_BIAS2_VCMVALUE_SET(x) (((x) << 4) & 0x00000070)
+#define PHY_ANALOG_RXRF_BIAS2_PWD_VCMBUF_MSB 7
+#define PHY_ANALOG_RXRF_BIAS2_PWD_VCMBUF_LSB 7
+#define PHY_ANALOG_RXRF_BIAS2_PWD_VCMBUF_MASK 0x00000080
+#define PHY_ANALOG_RXRF_BIAS2_PWD_VCMBUF_GET(x) (((x) & 0x00000080) >> 7)
+#define PHY_ANALOG_RXRF_BIAS2_PWD_VCMBUF_SET(x) (((x) << 7) & 0x00000080)
+#define PHY_ANALOG_RXRF_BIAS2_PWD_IR25SPAREH_MSB 10
+#define PHY_ANALOG_RXRF_BIAS2_PWD_IR25SPAREH_LSB 8
+#define PHY_ANALOG_RXRF_BIAS2_PWD_IR25SPAREH_MASK 0x00000700
+#define PHY_ANALOG_RXRF_BIAS2_PWD_IR25SPAREH_GET(x) (((x) & 0x00000700) >> 8)
+#define PHY_ANALOG_RXRF_BIAS2_PWD_IR25SPAREH_SET(x) (((x) << 8) & 0x00000700)
+#define PHY_ANALOG_RXRF_BIAS2_PWD_IR25SPARE_MSB 13
+#define PHY_ANALOG_RXRF_BIAS2_PWD_IR25SPARE_LSB 11
+#define PHY_ANALOG_RXRF_BIAS2_PWD_IR25SPARE_MASK 0x00003800
+#define PHY_ANALOG_RXRF_BIAS2_PWD_IR25SPARE_GET(x) (((x) & 0x00003800) >> 11)
+#define PHY_ANALOG_RXRF_BIAS2_PWD_IR25SPARE_SET(x) (((x) << 11) & 0x00003800)
+#define PHY_ANALOG_RXRF_BIAS2_PWD_IC25LNABUF_MSB 16
+#define PHY_ANALOG_RXRF_BIAS2_PWD_IC25LNABUF_LSB 14
+#define PHY_ANALOG_RXRF_BIAS2_PWD_IC25LNABUF_MASK 0x0001c000
+#define PHY_ANALOG_RXRF_BIAS2_PWD_IC25LNABUF_GET(x) (((x) & 0x0001c000) >> 14)
+#define PHY_ANALOG_RXRF_BIAS2_PWD_IC25LNABUF_SET(x) (((x) << 14) & 0x0001c000)
+#define PHY_ANALOG_RXRF_BIAS2_PWD_IR25AGCH_MSB 19
+#define PHY_ANALOG_RXRF_BIAS2_PWD_IR25AGCH_LSB 17
+#define PHY_ANALOG_RXRF_BIAS2_PWD_IR25AGCH_MASK 0x000e0000
+#define PHY_ANALOG_RXRF_BIAS2_PWD_IR25AGCH_GET(x) (((x) & 0x000e0000) >> 17)
+#define PHY_ANALOG_RXRF_BIAS2_PWD_IR25AGCH_SET(x) (((x) << 17) & 0x000e0000)
+#define PHY_ANALOG_RXRF_BIAS2_PWD_IR25AGC_MSB 22
+#define PHY_ANALOG_RXRF_BIAS2_PWD_IR25AGC_LSB 20
+#define PHY_ANALOG_RXRF_BIAS2_PWD_IR25AGC_MASK 0x00700000
+#define PHY_ANALOG_RXRF_BIAS2_PWD_IR25AGC_GET(x) (((x) & 0x00700000) >> 20)
+#define PHY_ANALOG_RXRF_BIAS2_PWD_IR25AGC_SET(x) (((x) << 20) & 0x00700000)
+#define PHY_ANALOG_RXRF_BIAS2_PWD_IC25AGC_MSB 25
+#define PHY_ANALOG_RXRF_BIAS2_PWD_IC25AGC_LSB 23
+#define PHY_ANALOG_RXRF_BIAS2_PWD_IC25AGC_MASK 0x03800000
+#define PHY_ANALOG_RXRF_BIAS2_PWD_IC25AGC_GET(x) (((x) & 0x03800000) >> 23)
+#define PHY_ANALOG_RXRF_BIAS2_PWD_IC25AGC_SET(x) (((x) << 23) & 0x03800000)
+#define PHY_ANALOG_RXRF_BIAS2_PWD_IC25VCMBUF_MSB 28
+#define PHY_ANALOG_RXRF_BIAS2_PWD_IC25VCMBUF_LSB 26
+#define PHY_ANALOG_RXRF_BIAS2_PWD_IC25VCMBUF_MASK 0x1c000000
+#define PHY_ANALOG_RXRF_BIAS2_PWD_IC25VCMBUF_GET(x) (((x) & 0x1c000000) >> 26)
+#define PHY_ANALOG_RXRF_BIAS2_PWD_IC25VCMBUF_SET(x) (((x) << 26) & 0x1c000000)
+#define PHY_ANALOG_RXRF_BIAS2_PWD_IR25VCM_MSB 31
+#define PHY_ANALOG_RXRF_BIAS2_PWD_IR25VCM_LSB 29
+#define PHY_ANALOG_RXRF_BIAS2_PWD_IR25VCM_MASK 0xe0000000
+#define PHY_ANALOG_RXRF_BIAS2_PWD_IR25VCM_GET(x) (((x) & 0xe0000000) >> 29)
+#define PHY_ANALOG_RXRF_BIAS2_PWD_IR25VCM_SET(x) (((x) << 29) & 0xe0000000)
+
+/* macros for RXRF_GAINSTAGES */
+#define PHY_ANALOG_RXRF_GAINSTAGES_ADDRESS 0x00000008
+#define PHY_ANALOG_RXRF_GAINSTAGES_OFFSET 0x00000008
+#define PHY_ANALOG_RXRF_GAINSTAGES_SPARE_MSB 0
+#define PHY_ANALOG_RXRF_GAINSTAGES_SPARE_LSB 0
+#define PHY_ANALOG_RXRF_GAINSTAGES_SPARE_MASK 0x00000001
+#define PHY_ANALOG_RXRF_GAINSTAGES_SPARE_GET(x) (((x) & 0x00000001) >> 0)
+#define PHY_ANALOG_RXRF_GAINSTAGES_SPARE_SET(x) (((x) << 0) & 0x00000001)
+#define PHY_ANALOG_RXRF_GAINSTAGES_LNAON_CALDC_MSB 1
+#define PHY_ANALOG_RXRF_GAINSTAGES_LNAON_CALDC_LSB 1
+#define PHY_ANALOG_RXRF_GAINSTAGES_LNAON_CALDC_MASK 0x00000002
+#define PHY_ANALOG_RXRF_GAINSTAGES_LNAON_CALDC_GET(x) (((x) & 0x00000002) >> 1)
+#define PHY_ANALOG_RXRF_GAINSTAGES_LNAON_CALDC_SET(x) (((x) << 1) & 0x00000002)
+#define PHY_ANALOG_RXRF_GAINSTAGES_VGA5G_CAP_MSB 3
+#define PHY_ANALOG_RXRF_GAINSTAGES_VGA5G_CAP_LSB 2
+#define PHY_ANALOG_RXRF_GAINSTAGES_VGA5G_CAP_MASK 0x0000000c
+#define PHY_ANALOG_RXRF_GAINSTAGES_VGA5G_CAP_GET(x) (((x) & 0x0000000c) >> 2)
+#define PHY_ANALOG_RXRF_GAINSTAGES_VGA5G_CAP_SET(x) (((x) << 2) & 0x0000000c)
+#define PHY_ANALOG_RXRF_GAINSTAGES_LNA5G_CAP_MSB 5
+#define PHY_ANALOG_RXRF_GAINSTAGES_LNA5G_CAP_LSB 4
+#define PHY_ANALOG_RXRF_GAINSTAGES_LNA5G_CAP_MASK 0x00000030
+#define PHY_ANALOG_RXRF_GAINSTAGES_LNA5G_CAP_GET(x) (((x) & 0x00000030) >> 4)
+#define PHY_ANALOG_RXRF_GAINSTAGES_LNA5G_CAP_SET(x) (((x) << 4) & 0x00000030)
+#define PHY_ANALOG_RXRF_GAINSTAGES_LNA5G_SHORTINP_MSB 6
+#define PHY_ANALOG_RXRF_GAINSTAGES_LNA5G_SHORTINP_LSB 6
+#define PHY_ANALOG_RXRF_GAINSTAGES_LNA5G_SHORTINP_MASK 0x00000040
+#define PHY_ANALOG_RXRF_GAINSTAGES_LNA5G_SHORTINP_GET(x) (((x) & 0x00000040) >> 6)
+#define PHY_ANALOG_RXRF_GAINSTAGES_LNA5G_SHORTINP_SET(x) (((x) << 6) & 0x00000040)
+#define PHY_ANALOG_RXRF_GAINSTAGES_PWD_LO5G_MSB 7
+#define PHY_ANALOG_RXRF_GAINSTAGES_PWD_LO5G_LSB 7
+#define PHY_ANALOG_RXRF_GAINSTAGES_PWD_LO5G_MASK 0x00000080
+#define PHY_ANALOG_RXRF_GAINSTAGES_PWD_LO5G_GET(x) (((x) & 0x00000080) >> 7)
+#define PHY_ANALOG_RXRF_GAINSTAGES_PWD_LO5G_SET(x) (((x) << 7) & 0x00000080)
+#define PHY_ANALOG_RXRF_GAINSTAGES_PWD_VGA5G_MSB 8
+#define PHY_ANALOG_RXRF_GAINSTAGES_PWD_VGA5G_LSB 8
+#define PHY_ANALOG_RXRF_GAINSTAGES_PWD_VGA5G_MASK 0x00000100
+#define PHY_ANALOG_RXRF_GAINSTAGES_PWD_VGA5G_GET(x) (((x) & 0x00000100) >> 8)
+#define PHY_ANALOG_RXRF_GAINSTAGES_PWD_VGA5G_SET(x) (((x) << 8) & 0x00000100)
+#define PHY_ANALOG_RXRF_GAINSTAGES_PWD_MXR5G_MSB 9
+#define PHY_ANALOG_RXRF_GAINSTAGES_PWD_MXR5G_LSB 9
+#define PHY_ANALOG_RXRF_GAINSTAGES_PWD_MXR5G_MASK 0x00000200
+#define PHY_ANALOG_RXRF_GAINSTAGES_PWD_MXR5G_GET(x) (((x) & 0x00000200) >> 9)
+#define PHY_ANALOG_RXRF_GAINSTAGES_PWD_MXR5G_SET(x) (((x) << 9) & 0x00000200)
+#define PHY_ANALOG_RXRF_GAINSTAGES_PWD_LNA5G_MSB 10
+#define PHY_ANALOG_RXRF_GAINSTAGES_PWD_LNA5G_LSB 10
+#define PHY_ANALOG_RXRF_GAINSTAGES_PWD_LNA5G_MASK 0x00000400
+#define PHY_ANALOG_RXRF_GAINSTAGES_PWD_LNA5G_GET(x) (((x) & 0x00000400) >> 10)
+#define PHY_ANALOG_RXRF_GAINSTAGES_PWD_LNA5G_SET(x) (((x) << 10) & 0x00000400)
+#define PHY_ANALOG_RXRF_GAINSTAGES_LNA2G_CAP_MSB 12
+#define PHY_ANALOG_RXRF_GAINSTAGES_LNA2G_CAP_LSB 11
+#define PHY_ANALOG_RXRF_GAINSTAGES_LNA2G_CAP_MASK 0x00001800
+#define PHY_ANALOG_RXRF_GAINSTAGES_LNA2G_CAP_GET(x) (((x) & 0x00001800) >> 11)
+#define PHY_ANALOG_RXRF_GAINSTAGES_LNA2G_CAP_SET(x) (((x) << 11) & 0x00001800)
+#define PHY_ANALOG_RXRF_GAINSTAGES_LNA2G_SHORTINP_MSB 13
+#define PHY_ANALOG_RXRF_GAINSTAGES_LNA2G_SHORTINP_LSB 13
+#define PHY_ANALOG_RXRF_GAINSTAGES_LNA2G_SHORTINP_MASK 0x00002000
+#define PHY_ANALOG_RXRF_GAINSTAGES_LNA2G_SHORTINP_GET(x) (((x) & 0x00002000) >> 13)
+#define PHY_ANALOG_RXRF_GAINSTAGES_LNA2G_SHORTINP_SET(x) (((x) << 13) & 0x00002000)
+#define PHY_ANALOG_RXRF_GAINSTAGES_LNA2G_LP_MSB 14
+#define PHY_ANALOG_RXRF_GAINSTAGES_LNA2G_LP_LSB 14
+#define PHY_ANALOG_RXRF_GAINSTAGES_LNA2G_LP_MASK 0x00004000
+#define PHY_ANALOG_RXRF_GAINSTAGES_LNA2G_LP_GET(x) (((x) & 0x00004000) >> 14)
+#define PHY_ANALOG_RXRF_GAINSTAGES_LNA2G_LP_SET(x) (((x) << 14) & 0x00004000)
+#define PHY_ANALOG_RXRF_GAINSTAGES_PWD_LO2G_MSB 15
+#define PHY_ANALOG_RXRF_GAINSTAGES_PWD_LO2G_LSB 15
+#define PHY_ANALOG_RXRF_GAINSTAGES_PWD_LO2G_MASK 0x00008000
+#define PHY_ANALOG_RXRF_GAINSTAGES_PWD_LO2G_GET(x) (((x) & 0x00008000) >> 15)
+#define PHY_ANALOG_RXRF_GAINSTAGES_PWD_LO2G_SET(x) (((x) << 15) & 0x00008000)
+#define PHY_ANALOG_RXRF_GAINSTAGES_PWD_MXR2G_MSB 16
+#define PHY_ANALOG_RXRF_GAINSTAGES_PWD_MXR2G_LSB 16
+#define PHY_ANALOG_RXRF_GAINSTAGES_PWD_MXR2G_MASK 0x00010000
+#define PHY_ANALOG_RXRF_GAINSTAGES_PWD_MXR2G_GET(x) (((x) & 0x00010000) >> 16)
+#define PHY_ANALOG_RXRF_GAINSTAGES_PWD_MXR2G_SET(x) (((x) << 16) & 0x00010000)
+#define PHY_ANALOG_RXRF_GAINSTAGES_PWD_LNA2G_MSB 17
+#define PHY_ANALOG_RXRF_GAINSTAGES_PWD_LNA2G_LSB 17
+#define PHY_ANALOG_RXRF_GAINSTAGES_PWD_LNA2G_MASK 0x00020000
+#define PHY_ANALOG_RXRF_GAINSTAGES_PWD_LNA2G_GET(x) (((x) & 0x00020000) >> 17)
+#define PHY_ANALOG_RXRF_GAINSTAGES_PWD_LNA2G_SET(x) (((x) << 17) & 0x00020000)
+#define PHY_ANALOG_RXRF_GAINSTAGES_MXR5G_GAIN_OVR_MSB 19
+#define PHY_ANALOG_RXRF_GAINSTAGES_MXR5G_GAIN_OVR_LSB 18
+#define PHY_ANALOG_RXRF_GAINSTAGES_MXR5G_GAIN_OVR_MASK 0x000c0000
+#define PHY_ANALOG_RXRF_GAINSTAGES_MXR5G_GAIN_OVR_GET(x) (((x) & 0x000c0000) >> 18)
+#define PHY_ANALOG_RXRF_GAINSTAGES_MXR5G_GAIN_OVR_SET(x) (((x) << 18) & 0x000c0000)
+#define PHY_ANALOG_RXRF_GAINSTAGES_VGA5G_GAIN_OVR_MSB 22
+#define PHY_ANALOG_RXRF_GAINSTAGES_VGA5G_GAIN_OVR_LSB 20
+#define PHY_ANALOG_RXRF_GAINSTAGES_VGA5G_GAIN_OVR_MASK 0x00700000
+#define PHY_ANALOG_RXRF_GAINSTAGES_VGA5G_GAIN_OVR_GET(x) (((x) & 0x00700000) >> 20)
+#define PHY_ANALOG_RXRF_GAINSTAGES_VGA5G_GAIN_OVR_SET(x) (((x) << 20) & 0x00700000)
+#define PHY_ANALOG_RXRF_GAINSTAGES_LNA5G_GAIN_OVR_MSB 25
+#define PHY_ANALOG_RXRF_GAINSTAGES_LNA5G_GAIN_OVR_LSB 23
+#define PHY_ANALOG_RXRF_GAINSTAGES_LNA5G_GAIN_OVR_MASK 0x03800000
+#define PHY_ANALOG_RXRF_GAINSTAGES_LNA5G_GAIN_OVR_GET(x) (((x) & 0x03800000) >> 23)
+#define PHY_ANALOG_RXRF_GAINSTAGES_LNA5G_GAIN_OVR_SET(x) (((x) << 23) & 0x03800000)
+#define PHY_ANALOG_RXRF_GAINSTAGES_MXR2G_GAIN_OVR_MSB 27
+#define PHY_ANALOG_RXRF_GAINSTAGES_MXR2G_GAIN_OVR_LSB 26
+#define PHY_ANALOG_RXRF_GAINSTAGES_MXR2G_GAIN_OVR_MASK 0x0c000000
+#define PHY_ANALOG_RXRF_GAINSTAGES_MXR2G_GAIN_OVR_GET(x) (((x) & 0x0c000000) >> 26)
+#define PHY_ANALOG_RXRF_GAINSTAGES_MXR2G_GAIN_OVR_SET(x) (((x) << 26) & 0x0c000000)
+#define PHY_ANALOG_RXRF_GAINSTAGES_LNA2G_GAIN_OVR_MSB 30
+#define PHY_ANALOG_RXRF_GAINSTAGES_LNA2G_GAIN_OVR_LSB 28
+#define PHY_ANALOG_RXRF_GAINSTAGES_LNA2G_GAIN_OVR_MASK 0x70000000
+#define PHY_ANALOG_RXRF_GAINSTAGES_LNA2G_GAIN_OVR_GET(x) (((x) & 0x70000000) >> 28)
+#define PHY_ANALOG_RXRF_GAINSTAGES_LNA2G_GAIN_OVR_SET(x) (((x) << 28) & 0x70000000)
+#define PHY_ANALOG_RXRF_GAINSTAGES_RX_OVERRIDE_MSB 31
+#define PHY_ANALOG_RXRF_GAINSTAGES_RX_OVERRIDE_LSB 31
+#define PHY_ANALOG_RXRF_GAINSTAGES_RX_OVERRIDE_MASK 0x80000000
+#define PHY_ANALOG_RXRF_GAINSTAGES_RX_OVERRIDE_GET(x) (((x) & 0x80000000) >> 31)
+#define PHY_ANALOG_RXRF_GAINSTAGES_RX_OVERRIDE_SET(x) (((x) << 31) & 0x80000000)
+
+/* macros for RXRF_AGC */
+#define PHY_ANALOG_RXRF_AGC_ADDRESS 0x0000000c
+#define PHY_ANALOG_RXRF_AGC_OFFSET 0x0000000c
+#define PHY_ANALOG_RXRF_AGC_RF5G_ON_DURING_CALPA_MSB 0
+#define PHY_ANALOG_RXRF_AGC_RF5G_ON_DURING_CALPA_LSB 0
+#define PHY_ANALOG_RXRF_AGC_RF5G_ON_DURING_CALPA_MASK 0x00000001
+#define PHY_ANALOG_RXRF_AGC_RF5G_ON_DURING_CALPA_GET(x) (((x) & 0x00000001) >> 0)
+#define PHY_ANALOG_RXRF_AGC_RF5G_ON_DURING_CALPA_SET(x) (((x) << 0) & 0x00000001)
+#define PHY_ANALOG_RXRF_AGC_RF2G_ON_DURING_CALPA_MSB 1
+#define PHY_ANALOG_RXRF_AGC_RF2G_ON_DURING_CALPA_LSB 1
+#define PHY_ANALOG_RXRF_AGC_RF2G_ON_DURING_CALPA_MASK 0x00000002
+#define PHY_ANALOG_RXRF_AGC_RF2G_ON_DURING_CALPA_GET(x) (((x) & 0x00000002) >> 1)
+#define PHY_ANALOG_RXRF_AGC_RF2G_ON_DURING_CALPA_SET(x) (((x) << 1) & 0x00000002)
+#define PHY_ANALOG_RXRF_AGC_AGC_OUT_MSB 2
+#define PHY_ANALOG_RXRF_AGC_AGC_OUT_LSB 2
+#define PHY_ANALOG_RXRF_AGC_AGC_OUT_MASK 0x00000004
+#define PHY_ANALOG_RXRF_AGC_AGC_OUT_GET(x) (((x) & 0x00000004) >> 2)
+#define PHY_ANALOG_RXRF_AGC_LNABUFGAIN2X_MSB 3
+#define PHY_ANALOG_RXRF_AGC_LNABUFGAIN2X_LSB 3
+#define PHY_ANALOG_RXRF_AGC_LNABUFGAIN2X_MASK 0x00000008
+#define PHY_ANALOG_RXRF_AGC_LNABUFGAIN2X_GET(x) (((x) & 0x00000008) >> 3)
+#define PHY_ANALOG_RXRF_AGC_LNABUFGAIN2X_SET(x) (((x) << 3) & 0x00000008)
+#define PHY_ANALOG_RXRF_AGC_LNABUF_PWD_OVR_MSB 4
+#define PHY_ANALOG_RXRF_AGC_LNABUF_PWD_OVR_LSB 4
+#define PHY_ANALOG_RXRF_AGC_LNABUF_PWD_OVR_MASK 0x00000010
+#define PHY_ANALOG_RXRF_AGC_LNABUF_PWD_OVR_GET(x) (((x) & 0x00000010) >> 4)
+#define PHY_ANALOG_RXRF_AGC_LNABUF_PWD_OVR_SET(x) (((x) << 4) & 0x00000010)
+#define PHY_ANALOG_RXRF_AGC_PWD_LNABUF_MSB 5
+#define PHY_ANALOG_RXRF_AGC_PWD_LNABUF_LSB 5
+#define PHY_ANALOG_RXRF_AGC_PWD_LNABUF_MASK 0x00000020
+#define PHY_ANALOG_RXRF_AGC_PWD_LNABUF_GET(x) (((x) & 0x00000020) >> 5)
+#define PHY_ANALOG_RXRF_AGC_PWD_LNABUF_SET(x) (((x) << 5) & 0x00000020)
+#define PHY_ANALOG_RXRF_AGC_AGC_FALL_CTRL_MSB 8
+#define PHY_ANALOG_RXRF_AGC_AGC_FALL_CTRL_LSB 6
+#define PHY_ANALOG_RXRF_AGC_AGC_FALL_CTRL_MASK 0x000001c0
+#define PHY_ANALOG_RXRF_AGC_AGC_FALL_CTRL_GET(x) (((x) & 0x000001c0) >> 6)
+#define PHY_ANALOG_RXRF_AGC_AGC_FALL_CTRL_SET(x) (((x) << 6) & 0x000001c0)
+#define PHY_ANALOG_RXRF_AGC_AGC5G_CALDAC_OVR_MSB 14
+#define PHY_ANALOG_RXRF_AGC_AGC5G_CALDAC_OVR_LSB 9
+#define PHY_ANALOG_RXRF_AGC_AGC5G_CALDAC_OVR_MASK 0x00007e00
+#define PHY_ANALOG_RXRF_AGC_AGC5G_CALDAC_OVR_GET(x) (((x) & 0x00007e00) >> 9)
+#define PHY_ANALOG_RXRF_AGC_AGC5G_CALDAC_OVR_SET(x) (((x) << 9) & 0x00007e00)
+#define PHY_ANALOG_RXRF_AGC_AGC5G_DBDAC_OVR_MSB 18
+#define PHY_ANALOG_RXRF_AGC_AGC5G_DBDAC_OVR_LSB 15
+#define PHY_ANALOG_RXRF_AGC_AGC5G_DBDAC_OVR_MASK 0x00078000
+#define PHY_ANALOG_RXRF_AGC_AGC5G_DBDAC_OVR_GET(x) (((x) & 0x00078000) >> 15)
+#define PHY_ANALOG_RXRF_AGC_AGC5G_DBDAC_OVR_SET(x) (((x) << 15) & 0x00078000)
+#define PHY_ANALOG_RXRF_AGC_AGC2G_CALDAC_OVR_MSB 24
+#define PHY_ANALOG_RXRF_AGC_AGC2G_CALDAC_OVR_LSB 19
+#define PHY_ANALOG_RXRF_AGC_AGC2G_CALDAC_OVR_MASK 0x01f80000
+#define PHY_ANALOG_RXRF_AGC_AGC2G_CALDAC_OVR_GET(x) (((x) & 0x01f80000) >> 19)
+#define PHY_ANALOG_RXRF_AGC_AGC2G_CALDAC_OVR_SET(x) (((x) << 19) & 0x01f80000)
+#define PHY_ANALOG_RXRF_AGC_AGC2G_DBDAC_OVR_MSB 28
+#define PHY_ANALOG_RXRF_AGC_AGC2G_DBDAC_OVR_LSB 25
+#define PHY_ANALOG_RXRF_AGC_AGC2G_DBDAC_OVR_MASK 0x1e000000
+#define PHY_ANALOG_RXRF_AGC_AGC2G_DBDAC_OVR_GET(x) (((x) & 0x1e000000) >> 25)
+#define PHY_ANALOG_RXRF_AGC_AGC2G_DBDAC_OVR_SET(x) (((x) << 25) & 0x1e000000)
+#define PHY_ANALOG_RXRF_AGC_AGC_CAL_OVR_MSB 29
+#define PHY_ANALOG_RXRF_AGC_AGC_CAL_OVR_LSB 29
+#define PHY_ANALOG_RXRF_AGC_AGC_CAL_OVR_MASK 0x20000000
+#define PHY_ANALOG_RXRF_AGC_AGC_CAL_OVR_GET(x) (((x) & 0x20000000) >> 29)
+#define PHY_ANALOG_RXRF_AGC_AGC_CAL_OVR_SET(x) (((x) << 29) & 0x20000000)
+#define PHY_ANALOG_RXRF_AGC_AGC_ON_OVR_MSB 30
+#define PHY_ANALOG_RXRF_AGC_AGC_ON_OVR_LSB 30
+#define PHY_ANALOG_RXRF_AGC_AGC_ON_OVR_MASK 0x40000000
+#define PHY_ANALOG_RXRF_AGC_AGC_ON_OVR_GET(x) (((x) & 0x40000000) >> 30)
+#define PHY_ANALOG_RXRF_AGC_AGC_ON_OVR_SET(x) (((x) << 30) & 0x40000000)
+#define PHY_ANALOG_RXRF_AGC_AGC_OVERRIDE_MSB 31
+#define PHY_ANALOG_RXRF_AGC_AGC_OVERRIDE_LSB 31
+#define PHY_ANALOG_RXRF_AGC_AGC_OVERRIDE_MASK 0x80000000
+#define PHY_ANALOG_RXRF_AGC_AGC_OVERRIDE_GET(x) (((x) & 0x80000000) >> 31)
+#define PHY_ANALOG_RXRF_AGC_AGC_OVERRIDE_SET(x) (((x) << 31) & 0x80000000)
+
+/* macros for TXRF1 */
+#define PHY_ANALOG_TXRF1_ADDRESS 0x00000040
+#define PHY_ANALOG_TXRF1_OFFSET 0x00000040
+#define PHY_ANALOG_TXRF1_PDLOBUF5G_MSB 0
+#define PHY_ANALOG_TXRF1_PDLOBUF5G_LSB 0
+#define PHY_ANALOG_TXRF1_PDLOBUF5G_MASK 0x00000001
+#define PHY_ANALOG_TXRF1_PDLOBUF5G_GET(x) (((x) & 0x00000001) >> 0)
+#define PHY_ANALOG_TXRF1_PDLOBUF5G_SET(x) (((x) << 0) & 0x00000001)
+#define PHY_ANALOG_TXRF1_PDLODIV5G_MSB 1
+#define PHY_ANALOG_TXRF1_PDLODIV5G_LSB 1
+#define PHY_ANALOG_TXRF1_PDLODIV5G_MASK 0x00000002
+#define PHY_ANALOG_TXRF1_PDLODIV5G_GET(x) (((x) & 0x00000002) >> 1)
+#define PHY_ANALOG_TXRF1_PDLODIV5G_SET(x) (((x) << 1) & 0x00000002)
+#define PHY_ANALOG_TXRF1_LOBUF5GFORCED_MSB 2
+#define PHY_ANALOG_TXRF1_LOBUF5GFORCED_LSB 2
+#define PHY_ANALOG_TXRF1_LOBUF5GFORCED_MASK 0x00000004
+#define PHY_ANALOG_TXRF1_LOBUF5GFORCED_GET(x) (((x) & 0x00000004) >> 2)
+#define PHY_ANALOG_TXRF1_LOBUF5GFORCED_SET(x) (((x) << 2) & 0x00000004)
+#define PHY_ANALOG_TXRF1_LODIV5GFORCED_MSB 3
+#define PHY_ANALOG_TXRF1_LODIV5GFORCED_LSB 3
+#define PHY_ANALOG_TXRF1_LODIV5GFORCED_MASK 0x00000008
+#define PHY_ANALOG_TXRF1_LODIV5GFORCED_GET(x) (((x) & 0x00000008) >> 3)
+#define PHY_ANALOG_TXRF1_LODIV5GFORCED_SET(x) (((x) << 3) & 0x00000008)
+#define PHY_ANALOG_TXRF1_PADRV2GN5G_MSB 7
+#define PHY_ANALOG_TXRF1_PADRV2GN5G_LSB 4
+#define PHY_ANALOG_TXRF1_PADRV2GN5G_MASK 0x000000f0
+#define PHY_ANALOG_TXRF1_PADRV2GN5G_GET(x) (((x) & 0x000000f0) >> 4)
+#define PHY_ANALOG_TXRF1_PADRV2GN5G_SET(x) (((x) << 4) & 0x000000f0)
+#define PHY_ANALOG_TXRF1_PADRV3GN5G_MSB 11
+#define PHY_ANALOG_TXRF1_PADRV3GN5G_LSB 8
+#define PHY_ANALOG_TXRF1_PADRV3GN5G_MASK 0x00000f00
+#define PHY_ANALOG_TXRF1_PADRV3GN5G_GET(x) (((x) & 0x00000f00) >> 8)
+#define PHY_ANALOG_TXRF1_PADRV3GN5G_SET(x) (((x) << 8) & 0x00000f00)
+#define PHY_ANALOG_TXRF1_PADRV4GN5G_MSB 15
+#define PHY_ANALOG_TXRF1_PADRV4GN5G_LSB 12
+#define PHY_ANALOG_TXRF1_PADRV4GN5G_MASK 0x0000f000
+#define PHY_ANALOG_TXRF1_PADRV4GN5G_GET(x) (((x) & 0x0000f000) >> 12)
+#define PHY_ANALOG_TXRF1_PADRV4GN5G_SET(x) (((x) << 12) & 0x0000f000)
+#define PHY_ANALOG_TXRF1_LOCALTXGAIN5G_MSB 16
+#define PHY_ANALOG_TXRF1_LOCALTXGAIN5G_LSB 16
+#define PHY_ANALOG_TXRF1_LOCALTXGAIN5G_MASK 0x00010000
+#define PHY_ANALOG_TXRF1_LOCALTXGAIN5G_GET(x) (((x) & 0x00010000) >> 16)
+#define PHY_ANALOG_TXRF1_LOCALTXGAIN5G_SET(x) (((x) << 16) & 0x00010000)
+#define PHY_ANALOG_TXRF1_PDOUT2G_MSB 17
+#define PHY_ANALOG_TXRF1_PDOUT2G_LSB 17
+#define PHY_ANALOG_TXRF1_PDOUT2G_MASK 0x00020000
+#define PHY_ANALOG_TXRF1_PDOUT2G_GET(x) (((x) & 0x00020000) >> 17)
+#define PHY_ANALOG_TXRF1_PDOUT2G_SET(x) (((x) << 17) & 0x00020000)
+#define PHY_ANALOG_TXRF1_PDDR2G_MSB 18
+#define PHY_ANALOG_TXRF1_PDDR2G_LSB 18
+#define PHY_ANALOG_TXRF1_PDDR2G_MASK 0x00040000
+#define PHY_ANALOG_TXRF1_PDDR2G_GET(x) (((x) & 0x00040000) >> 18)
+#define PHY_ANALOG_TXRF1_PDDR2G_SET(x) (((x) << 18) & 0x00040000)
+#define PHY_ANALOG_TXRF1_PDMXR2G_MSB 19
+#define PHY_ANALOG_TXRF1_PDMXR2G_LSB 19
+#define PHY_ANALOG_TXRF1_PDMXR2G_MASK 0x00080000
+#define PHY_ANALOG_TXRF1_PDMXR2G_GET(x) (((x) & 0x00080000) >> 19)
+#define PHY_ANALOG_TXRF1_PDMXR2G_SET(x) (((x) << 19) & 0x00080000)
+#define PHY_ANALOG_TXRF1_PDLOBUF2G_MSB 20
+#define PHY_ANALOG_TXRF1_PDLOBUF2G_LSB 20
+#define PHY_ANALOG_TXRF1_PDLOBUF2G_MASK 0x00100000
+#define PHY_ANALOG_TXRF1_PDLOBUF2G_GET(x) (((x) & 0x00100000) >> 20)
+#define PHY_ANALOG_TXRF1_PDLOBUF2G_SET(x) (((x) << 20) & 0x00100000)
+#define PHY_ANALOG_TXRF1_PDLODIV2G_MSB 21
+#define PHY_ANALOG_TXRF1_PDLODIV2G_LSB 21
+#define PHY_ANALOG_TXRF1_PDLODIV2G_MASK 0x00200000
+#define PHY_ANALOG_TXRF1_PDLODIV2G_GET(x) (((x) & 0x00200000) >> 21)
+#define PHY_ANALOG_TXRF1_PDLODIV2G_SET(x) (((x) << 21) & 0x00200000)
+#define PHY_ANALOG_TXRF1_LOBUF2GFORCED_MSB 22
+#define PHY_ANALOG_TXRF1_LOBUF2GFORCED_LSB 22
+#define PHY_ANALOG_TXRF1_LOBUF2GFORCED_MASK 0x00400000
+#define PHY_ANALOG_TXRF1_LOBUF2GFORCED_GET(x) (((x) & 0x00400000) >> 22)
+#define PHY_ANALOG_TXRF1_LOBUF2GFORCED_SET(x) (((x) << 22) & 0x00400000)
+#define PHY_ANALOG_TXRF1_LODIV2GFORCED_MSB 23
+#define PHY_ANALOG_TXRF1_LODIV2GFORCED_LSB 23
+#define PHY_ANALOG_TXRF1_LODIV2GFORCED_MASK 0x00800000
+#define PHY_ANALOG_TXRF1_LODIV2GFORCED_GET(x) (((x) & 0x00800000) >> 23)
+#define PHY_ANALOG_TXRF1_LODIV2GFORCED_SET(x) (((x) << 23) & 0x00800000)
+#define PHY_ANALOG_TXRF1_PADRVGN2G_MSB 30
+#define PHY_ANALOG_TXRF1_PADRVGN2G_LSB 24
+#define PHY_ANALOG_TXRF1_PADRVGN2G_MASK 0x7f000000
+#define PHY_ANALOG_TXRF1_PADRVGN2G_GET(x) (((x) & 0x7f000000) >> 24)
+#define PHY_ANALOG_TXRF1_PADRVGN2G_SET(x) (((x) << 24) & 0x7f000000)
+#define PHY_ANALOG_TXRF1_LOCALTXGAIN2G_MSB 31
+#define PHY_ANALOG_TXRF1_LOCALTXGAIN2G_LSB 31
+#define PHY_ANALOG_TXRF1_LOCALTXGAIN2G_MASK 0x80000000
+#define PHY_ANALOG_TXRF1_LOCALTXGAIN2G_GET(x) (((x) & 0x80000000) >> 31)
+#define PHY_ANALOG_TXRF1_LOCALTXGAIN2G_SET(x) (((x) << 31) & 0x80000000)
+
+/* macros for TXRF2 */
+#define PHY_ANALOG_TXRF2_ADDRESS 0x00000044
+#define PHY_ANALOG_TXRF2_OFFSET 0x00000044
+#define PHY_ANALOG_TXRF2_D3B5G_MSB 2
+#define PHY_ANALOG_TXRF2_D3B5G_LSB 0
+#define PHY_ANALOG_TXRF2_D3B5G_MASK 0x00000007
+#define PHY_ANALOG_TXRF2_D3B5G_GET(x) (((x) & 0x00000007) >> 0)
+#define PHY_ANALOG_TXRF2_D3B5G_SET(x) (((x) << 0) & 0x00000007)
+#define PHY_ANALOG_TXRF2_D4B5G_MSB 5
+#define PHY_ANALOG_TXRF2_D4B5G_LSB 3
+#define PHY_ANALOG_TXRF2_D4B5G_MASK 0x00000038
+#define PHY_ANALOG_TXRF2_D4B5G_GET(x) (((x) & 0x00000038) >> 3)
+#define PHY_ANALOG_TXRF2_D4B5G_SET(x) (((x) << 3) & 0x00000038)
+#define PHY_ANALOG_TXRF2_OCAS2G_MSB 8
+#define PHY_ANALOG_TXRF2_OCAS2G_LSB 6
+#define PHY_ANALOG_TXRF2_OCAS2G_MASK 0x000001c0
+#define PHY_ANALOG_TXRF2_OCAS2G_GET(x) (((x) & 0x000001c0) >> 6)
+#define PHY_ANALOG_TXRF2_OCAS2G_SET(x) (((x) << 6) & 0x000001c0)
+#define PHY_ANALOG_TXRF2_DCAS2G_MSB 11
+#define PHY_ANALOG_TXRF2_DCAS2G_LSB 9
+#define PHY_ANALOG_TXRF2_DCAS2G_MASK 0x00000e00
+#define PHY_ANALOG_TXRF2_DCAS2G_GET(x) (((x) & 0x00000e00) >> 9)
+#define PHY_ANALOG_TXRF2_DCAS2G_SET(x) (((x) << 9) & 0x00000e00)
+#define PHY_ANALOG_TXRF2_OB2G_PALOFF_MSB 14
+#define PHY_ANALOG_TXRF2_OB2G_PALOFF_LSB 12
+#define PHY_ANALOG_TXRF2_OB2G_PALOFF_MASK 0x00007000
+#define PHY_ANALOG_TXRF2_OB2G_PALOFF_GET(x) (((x) & 0x00007000) >> 12)
+#define PHY_ANALOG_TXRF2_OB2G_PALOFF_SET(x) (((x) << 12) & 0x00007000)
+#define PHY_ANALOG_TXRF2_OB2G_QAM_MSB 17
+#define PHY_ANALOG_TXRF2_OB2G_QAM_LSB 15
+#define PHY_ANALOG_TXRF2_OB2G_QAM_MASK 0x00038000
+#define PHY_ANALOG_TXRF2_OB2G_QAM_GET(x) (((x) & 0x00038000) >> 15)
+#define PHY_ANALOG_TXRF2_OB2G_QAM_SET(x) (((x) << 15) & 0x00038000)
+#define PHY_ANALOG_TXRF2_OB2G_PSK_MSB 20
+#define PHY_ANALOG_TXRF2_OB2G_PSK_LSB 18
+#define PHY_ANALOG_TXRF2_OB2G_PSK_MASK 0x001c0000
+#define PHY_ANALOG_TXRF2_OB2G_PSK_GET(x) (((x) & 0x001c0000) >> 18)
+#define PHY_ANALOG_TXRF2_OB2G_PSK_SET(x) (((x) << 18) & 0x001c0000)
+#define PHY_ANALOG_TXRF2_OB2G_CCK_MSB 23
+#define PHY_ANALOG_TXRF2_OB2G_CCK_LSB 21
+#define PHY_ANALOG_TXRF2_OB2G_CCK_MASK 0x00e00000
+#define PHY_ANALOG_TXRF2_OB2G_CCK_GET(x) (((x) & 0x00e00000) >> 21)
+#define PHY_ANALOG_TXRF2_OB2G_CCK_SET(x) (((x) << 21) & 0x00e00000)
+#define PHY_ANALOG_TXRF2_DB2G_MSB 26
+#define PHY_ANALOG_TXRF2_DB2G_LSB 24
+#define PHY_ANALOG_TXRF2_DB2G_MASK 0x07000000
+#define PHY_ANALOG_TXRF2_DB2G_GET(x) (((x) & 0x07000000) >> 24)
+#define PHY_ANALOG_TXRF2_DB2G_SET(x) (((x) << 24) & 0x07000000)
+#define PHY_ANALOG_TXRF2_PDOUT5G_MSB 30
+#define PHY_ANALOG_TXRF2_PDOUT5G_LSB 27
+#define PHY_ANALOG_TXRF2_PDOUT5G_MASK 0x78000000
+#define PHY_ANALOG_TXRF2_PDOUT5G_GET(x) (((x) & 0x78000000) >> 27)
+#define PHY_ANALOG_TXRF2_PDOUT5G_SET(x) (((x) << 27) & 0x78000000)
+#define PHY_ANALOG_TXRF2_PDMXR5G_MSB 31
+#define PHY_ANALOG_TXRF2_PDMXR5G_LSB 31
+#define PHY_ANALOG_TXRF2_PDMXR5G_MASK 0x80000000
+#define PHY_ANALOG_TXRF2_PDMXR5G_GET(x) (((x) & 0x80000000) >> 31)
+#define PHY_ANALOG_TXRF2_PDMXR5G_SET(x) (((x) << 31) & 0x80000000)
+
+/* macros for TXRF3 */
+#define PHY_ANALOG_TXRF3_ADDRESS 0x00000048
+#define PHY_ANALOG_TXRF3_OFFSET 0x00000048
+#define PHY_ANALOG_TXRF3_FILTR2G_MSB 1
+#define PHY_ANALOG_TXRF3_FILTR2G_LSB 0
+#define PHY_ANALOG_TXRF3_FILTR2G_MASK 0x00000003
+#define PHY_ANALOG_TXRF3_FILTR2G_GET(x) (((x) & 0x00000003) >> 0)
+#define PHY_ANALOG_TXRF3_FILTR2G_SET(x) (((x) << 0) & 0x00000003)
+#define PHY_ANALOG_TXRF3_PWDFB2_2G_MSB 2
+#define PHY_ANALOG_TXRF3_PWDFB2_2G_LSB 2
+#define PHY_ANALOG_TXRF3_PWDFB2_2G_MASK 0x00000004
+#define PHY_ANALOG_TXRF3_PWDFB2_2G_GET(x) (((x) & 0x00000004) >> 2)
+#define PHY_ANALOG_TXRF3_PWDFB2_2G_SET(x) (((x) << 2) & 0x00000004)
+#define PHY_ANALOG_TXRF3_PWDFB1_2G_MSB 3
+#define PHY_ANALOG_TXRF3_PWDFB1_2G_LSB 3
+#define PHY_ANALOG_TXRF3_PWDFB1_2G_MASK 0x00000008
+#define PHY_ANALOG_TXRF3_PWDFB1_2G_GET(x) (((x) & 0x00000008) >> 3)
+#define PHY_ANALOG_TXRF3_PWDFB1_2G_SET(x) (((x) << 3) & 0x00000008)
+#define PHY_ANALOG_TXRF3_PDFB2G_MSB 4
+#define PHY_ANALOG_TXRF3_PDFB2G_LSB 4
+#define PHY_ANALOG_TXRF3_PDFB2G_MASK 0x00000010
+#define PHY_ANALOG_TXRF3_PDFB2G_GET(x) (((x) & 0x00000010) >> 4)
+#define PHY_ANALOG_TXRF3_PDFB2G_SET(x) (((x) << 4) & 0x00000010)
+#define PHY_ANALOG_TXRF3_RDIV5G_MSB 6
+#define PHY_ANALOG_TXRF3_RDIV5G_LSB 5
+#define PHY_ANALOG_TXRF3_RDIV5G_MASK 0x00000060
+#define PHY_ANALOG_TXRF3_RDIV5G_GET(x) (((x) & 0x00000060) >> 5)
+#define PHY_ANALOG_TXRF3_RDIV5G_SET(x) (((x) << 5) & 0x00000060)
+#define PHY_ANALOG_TXRF3_CAPDIV5G_MSB 9
+#define PHY_ANALOG_TXRF3_CAPDIV5G_LSB 7
+#define PHY_ANALOG_TXRF3_CAPDIV5G_MASK 0x00000380
+#define PHY_ANALOG_TXRF3_CAPDIV5G_GET(x) (((x) & 0x00000380) >> 7)
+#define PHY_ANALOG_TXRF3_CAPDIV5G_SET(x) (((x) << 7) & 0x00000380)
+#define PHY_ANALOG_TXRF3_PDPREDIST5G_MSB 10
+#define PHY_ANALOG_TXRF3_PDPREDIST5G_LSB 10
+#define PHY_ANALOG_TXRF3_PDPREDIST5G_MASK 0x00000400
+#define PHY_ANALOG_TXRF3_PDPREDIST5G_GET(x) (((x) & 0x00000400) >> 10)
+#define PHY_ANALOG_TXRF3_PDPREDIST5G_SET(x) (((x) << 10) & 0x00000400)
+#define PHY_ANALOG_TXRF3_RDIV2G_MSB 12
+#define PHY_ANALOG_TXRF3_RDIV2G_LSB 11
+#define PHY_ANALOG_TXRF3_RDIV2G_MASK 0x00001800
+#define PHY_ANALOG_TXRF3_RDIV2G_GET(x) (((x) & 0x00001800) >> 11)
+#define PHY_ANALOG_TXRF3_RDIV2G_SET(x) (((x) << 11) & 0x00001800)
+#define PHY_ANALOG_TXRF3_PDPREDIST2G_MSB 13
+#define PHY_ANALOG_TXRF3_PDPREDIST2G_LSB 13
+#define PHY_ANALOG_TXRF3_PDPREDIST2G_MASK 0x00002000
+#define PHY_ANALOG_TXRF3_PDPREDIST2G_GET(x) (((x) & 0x00002000) >> 13)
+#define PHY_ANALOG_TXRF3_PDPREDIST2G_SET(x) (((x) << 13) & 0x00002000)
+#define PHY_ANALOG_TXRF3_OCAS5G_MSB 16
+#define PHY_ANALOG_TXRF3_OCAS5G_LSB 14
+#define PHY_ANALOG_TXRF3_OCAS5G_MASK 0x0001c000
+#define PHY_ANALOG_TXRF3_OCAS5G_GET(x) (((x) & 0x0001c000) >> 14)
+#define PHY_ANALOG_TXRF3_OCAS5G_SET(x) (((x) << 14) & 0x0001c000)
+#define PHY_ANALOG_TXRF3_D2CAS5G_MSB 19
+#define PHY_ANALOG_TXRF3_D2CAS5G_LSB 17
+#define PHY_ANALOG_TXRF3_D2CAS5G_MASK 0x000e0000
+#define PHY_ANALOG_TXRF3_D2CAS5G_GET(x) (((x) & 0x000e0000) >> 17)
+#define PHY_ANALOG_TXRF3_D2CAS5G_SET(x) (((x) << 17) & 0x000e0000)
+#define PHY_ANALOG_TXRF3_D3CAS5G_MSB 22
+#define PHY_ANALOG_TXRF3_D3CAS5G_LSB 20
+#define PHY_ANALOG_TXRF3_D3CAS5G_MASK 0x00700000
+#define PHY_ANALOG_TXRF3_D3CAS5G_GET(x) (((x) & 0x00700000) >> 20)
+#define PHY_ANALOG_TXRF3_D3CAS5G_SET(x) (((x) << 20) & 0x00700000)
+#define PHY_ANALOG_TXRF3_D4CAS5G_MSB 25
+#define PHY_ANALOG_TXRF3_D4CAS5G_LSB 23
+#define PHY_ANALOG_TXRF3_D4CAS5G_MASK 0x03800000
+#define PHY_ANALOG_TXRF3_D4CAS5G_GET(x) (((x) & 0x03800000) >> 23)
+#define PHY_ANALOG_TXRF3_D4CAS5G_SET(x) (((x) << 23) & 0x03800000)
+#define PHY_ANALOG_TXRF3_OB5G_MSB 28
+#define PHY_ANALOG_TXRF3_OB5G_LSB 26
+#define PHY_ANALOG_TXRF3_OB5G_MASK 0x1c000000
+#define PHY_ANALOG_TXRF3_OB5G_GET(x) (((x) & 0x1c000000) >> 26)
+#define PHY_ANALOG_TXRF3_OB5G_SET(x) (((x) << 26) & 0x1c000000)
+#define PHY_ANALOG_TXRF3_D2B5G_MSB 31
+#define PHY_ANALOG_TXRF3_D2B5G_LSB 29
+#define PHY_ANALOG_TXRF3_D2B5G_MASK 0xe0000000
+#define PHY_ANALOG_TXRF3_D2B5G_GET(x) (((x) & 0xe0000000) >> 29)
+#define PHY_ANALOG_TXRF3_D2B5G_SET(x) (((x) << 29) & 0xe0000000)
+
+/* macros for TXRF4 */
+#define PHY_ANALOG_TXRF4_ADDRESS 0x0000004c
+#define PHY_ANALOG_TXRF4_OFFSET 0x0000004c
+#define PHY_ANALOG_TXRF4_PK1B2G_CCK_MSB 1
+#define PHY_ANALOG_TXRF4_PK1B2G_CCK_LSB 0
+#define PHY_ANALOG_TXRF4_PK1B2G_CCK_MASK 0x00000003
+#define PHY_ANALOG_TXRF4_PK1B2G_CCK_GET(x) (((x) & 0x00000003) >> 0)
+#define PHY_ANALOG_TXRF4_PK1B2G_CCK_SET(x) (((x) << 0) & 0x00000003)
+#define PHY_ANALOG_TXRF4_MIOB2G_QAM_MSB 4
+#define PHY_ANALOG_TXRF4_MIOB2G_QAM_LSB 2
+#define PHY_ANALOG_TXRF4_MIOB2G_QAM_MASK 0x0000001c
+#define PHY_ANALOG_TXRF4_MIOB2G_QAM_GET(x) (((x) & 0x0000001c) >> 2)
+#define PHY_ANALOG_TXRF4_MIOB2G_QAM_SET(x) (((x) << 2) & 0x0000001c)
+#define PHY_ANALOG_TXRF4_MIOB2G_PSK_MSB 7
+#define PHY_ANALOG_TXRF4_MIOB2G_PSK_LSB 5
+#define PHY_ANALOG_TXRF4_MIOB2G_PSK_MASK 0x000000e0
+#define PHY_ANALOG_TXRF4_MIOB2G_PSK_GET(x) (((x) & 0x000000e0) >> 5)
+#define PHY_ANALOG_TXRF4_MIOB2G_PSK_SET(x) (((x) << 5) & 0x000000e0)
+#define PHY_ANALOG_TXRF4_MIOB2G_CCK_MSB 10
+#define PHY_ANALOG_TXRF4_MIOB2G_CCK_LSB 8
+#define PHY_ANALOG_TXRF4_MIOB2G_CCK_MASK 0x00000700
+#define PHY_ANALOG_TXRF4_MIOB2G_CCK_GET(x) (((x) & 0x00000700) >> 8)
+#define PHY_ANALOG_TXRF4_MIOB2G_CCK_SET(x) (((x) << 8) & 0x00000700)
+#define PHY_ANALOG_TXRF4_COMP2G_QAM_MSB 13
+#define PHY_ANALOG_TXRF4_COMP2G_QAM_LSB 11
+#define PHY_ANALOG_TXRF4_COMP2G_QAM_MASK 0x00003800
+#define PHY_ANALOG_TXRF4_COMP2G_QAM_GET(x) (((x) & 0x00003800) >> 11)
+#define PHY_ANALOG_TXRF4_COMP2G_QAM_SET(x) (((x) << 11) & 0x00003800)
+#define PHY_ANALOG_TXRF4_COMP2G_PSK_MSB 16
+#define PHY_ANALOG_TXRF4_COMP2G_PSK_LSB 14
+#define PHY_ANALOG_TXRF4_COMP2G_PSK_MASK 0x0001c000
+#define PHY_ANALOG_TXRF4_COMP2G_PSK_GET(x) (((x) & 0x0001c000) >> 14)
+#define PHY_ANALOG_TXRF4_COMP2G_PSK_SET(x) (((x) << 14) & 0x0001c000)
+#define PHY_ANALOG_TXRF4_COMP2G_CCK_MSB 19
+#define PHY_ANALOG_TXRF4_COMP2G_CCK_LSB 17
+#define PHY_ANALOG_TXRF4_COMP2G_CCK_MASK 0x000e0000
+#define PHY_ANALOG_TXRF4_COMP2G_CCK_GET(x) (((x) & 0x000e0000) >> 17)
+#define PHY_ANALOG_TXRF4_COMP2G_CCK_SET(x) (((x) << 17) & 0x000e0000)
+#define PHY_ANALOG_TXRF4_AMP2B2G_QAM_MSB 22
+#define PHY_ANALOG_TXRF4_AMP2B2G_QAM_LSB 20
+#define PHY_ANALOG_TXRF4_AMP2B2G_QAM_MASK 0x00700000
+#define PHY_ANALOG_TXRF4_AMP2B2G_QAM_GET(x) (((x) & 0x00700000) >> 20)
+#define PHY_ANALOG_TXRF4_AMP2B2G_QAM_SET(x) (((x) << 20) & 0x00700000)
+#define PHY_ANALOG_TXRF4_AMP2B2G_PSK_MSB 25
+#define PHY_ANALOG_TXRF4_AMP2B2G_PSK_LSB 23
+#define PHY_ANALOG_TXRF4_AMP2B2G_PSK_MASK 0x03800000
+#define PHY_ANALOG_TXRF4_AMP2B2G_PSK_GET(x) (((x) & 0x03800000) >> 23)
+#define PHY_ANALOG_TXRF4_AMP2B2G_PSK_SET(x) (((x) << 23) & 0x03800000)
+#define PHY_ANALOG_TXRF4_AMP2B2G_CCK_MSB 28
+#define PHY_ANALOG_TXRF4_AMP2B2G_CCK_LSB 26
+#define PHY_ANALOG_TXRF4_AMP2B2G_CCK_MASK 0x1c000000
+#define PHY_ANALOG_TXRF4_AMP2B2G_CCK_GET(x) (((x) & 0x1c000000) >> 26)
+#define PHY_ANALOG_TXRF4_AMP2B2G_CCK_SET(x) (((x) << 26) & 0x1c000000)
+#define PHY_ANALOG_TXRF4_AMP2CAS2G_MSB 31
+#define PHY_ANALOG_TXRF4_AMP2CAS2G_LSB 29
+#define PHY_ANALOG_TXRF4_AMP2CAS2G_MASK 0xe0000000
+#define PHY_ANALOG_TXRF4_AMP2CAS2G_GET(x) (((x) & 0xe0000000) >> 29)
+#define PHY_ANALOG_TXRF4_AMP2CAS2G_SET(x) (((x) << 29) & 0xe0000000)
+
+/* macros for TXRF5 */
+#define PHY_ANALOG_TXRF5_ADDRESS 0x00000050
+#define PHY_ANALOG_TXRF5_OFFSET 0x00000050
+#define PHY_ANALOG_TXRF5_SPARE5_MSB 0
+#define PHY_ANALOG_TXRF5_SPARE5_LSB 0
+#define PHY_ANALOG_TXRF5_SPARE5_MASK 0x00000001
+#define PHY_ANALOG_TXRF5_SPARE5_GET(x) (((x) & 0x00000001) >> 0)
+#define PHY_ANALOG_TXRF5_SPARE5_SET(x) (((x) << 0) & 0x00000001)
+#define PHY_ANALOG_TXRF5_PAL_LOCKED_MSB 1
+#define PHY_ANALOG_TXRF5_PAL_LOCKED_LSB 1
+#define PHY_ANALOG_TXRF5_PAL_LOCKED_MASK 0x00000002
+#define PHY_ANALOG_TXRF5_PAL_LOCKED_GET(x) (((x) & 0x00000002) >> 1)
+#define PHY_ANALOG_TXRF5_FBHI2G_MSB 2
+#define PHY_ANALOG_TXRF5_FBHI2G_LSB 2
+#define PHY_ANALOG_TXRF5_FBHI2G_MASK 0x00000004
+#define PHY_ANALOG_TXRF5_FBHI2G_GET(x) (((x) & 0x00000004) >> 2)
+#define PHY_ANALOG_TXRF5_FBLO2G_MSB 3
+#define PHY_ANALOG_TXRF5_FBLO2G_LSB 3
+#define PHY_ANALOG_TXRF5_FBLO2G_MASK 0x00000008
+#define PHY_ANALOG_TXRF5_FBLO2G_GET(x) (((x) & 0x00000008) >> 3)
+#define PHY_ANALOG_TXRF5_NOPALGAIN2G_MSB 4
+#define PHY_ANALOG_TXRF5_NOPALGAIN2G_LSB 4
+#define PHY_ANALOG_TXRF5_NOPALGAIN2G_MASK 0x00000010
+#define PHY_ANALOG_TXRF5_NOPALGAIN2G_GET(x) (((x) & 0x00000010) >> 4)
+#define PHY_ANALOG_TXRF5_NOPALGAIN2G_SET(x) (((x) << 4) & 0x00000010)
+#define PHY_ANALOG_TXRF5_ENPACAL2G_MSB 5
+#define PHY_ANALOG_TXRF5_ENPACAL2G_LSB 5
+#define PHY_ANALOG_TXRF5_ENPACAL2G_MASK 0x00000020
+#define PHY_ANALOG_TXRF5_ENPACAL2G_GET(x) (((x) & 0x00000020) >> 5)
+#define PHY_ANALOG_TXRF5_ENPACAL2G_SET(x) (((x) << 5) & 0x00000020)
+#define PHY_ANALOG_TXRF5_OFFSET2G_MSB 12
+#define PHY_ANALOG_TXRF5_OFFSET2G_LSB 6
+#define PHY_ANALOG_TXRF5_OFFSET2G_MASK 0x00001fc0
+#define PHY_ANALOG_TXRF5_OFFSET2G_GET(x) (((x) & 0x00001fc0) >> 6)
+#define PHY_ANALOG_TXRF5_OFFSET2G_SET(x) (((x) << 6) & 0x00001fc0)
+#define PHY_ANALOG_TXRF5_ENOFFSETCAL2G_MSB 13
+#define PHY_ANALOG_TXRF5_ENOFFSETCAL2G_LSB 13
+#define PHY_ANALOG_TXRF5_ENOFFSETCAL2G_MASK 0x00002000
+#define PHY_ANALOG_TXRF5_ENOFFSETCAL2G_GET(x) (((x) & 0x00002000) >> 13)
+#define PHY_ANALOG_TXRF5_ENOFFSETCAL2G_SET(x) (((x) << 13) & 0x00002000)
+#define PHY_ANALOG_TXRF5_REFHI2G_MSB 16
+#define PHY_ANALOG_TXRF5_REFHI2G_LSB 14
+#define PHY_ANALOG_TXRF5_REFHI2G_MASK 0x0001c000
+#define PHY_ANALOG_TXRF5_REFHI2G_GET(x) (((x) & 0x0001c000) >> 14)
+#define PHY_ANALOG_TXRF5_REFHI2G_SET(x) (((x) << 14) & 0x0001c000)
+#define PHY_ANALOG_TXRF5_REFLO2G_MSB 19
+#define PHY_ANALOG_TXRF5_REFLO2G_LSB 17
+#define PHY_ANALOG_TXRF5_REFLO2G_MASK 0x000e0000
+#define PHY_ANALOG_TXRF5_REFLO2G_GET(x) (((x) & 0x000e0000) >> 17)
+#define PHY_ANALOG_TXRF5_REFLO2G_SET(x) (((x) << 17) & 0x000e0000)
+#define PHY_ANALOG_TXRF5_PALCLAMP2G_MSB 21
+#define PHY_ANALOG_TXRF5_PALCLAMP2G_LSB 20
+#define PHY_ANALOG_TXRF5_PALCLAMP2G_MASK 0x00300000
+#define PHY_ANALOG_TXRF5_PALCLAMP2G_GET(x) (((x) & 0x00300000) >> 20)
+#define PHY_ANALOG_TXRF5_PALCLAMP2G_SET(x) (((x) << 20) & 0x00300000)
+#define PHY_ANALOG_TXRF5_PK2B2G_QAM_MSB 23
+#define PHY_ANALOG_TXRF5_PK2B2G_QAM_LSB 22
+#define PHY_ANALOG_TXRF5_PK2B2G_QAM_MASK 0x00c00000
+#define PHY_ANALOG_TXRF5_PK2B2G_QAM_GET(x) (((x) & 0x00c00000) >> 22)
+#define PHY_ANALOG_TXRF5_PK2B2G_QAM_SET(x) (((x) << 22) & 0x00c00000)
+#define PHY_ANALOG_TXRF5_PK2B2G_PSK_MSB 25
+#define PHY_ANALOG_TXRF5_PK2B2G_PSK_LSB 24
+#define PHY_ANALOG_TXRF5_PK2B2G_PSK_MASK 0x03000000
+#define PHY_ANALOG_TXRF5_PK2B2G_PSK_GET(x) (((x) & 0x03000000) >> 24)
+#define PHY_ANALOG_TXRF5_PK2B2G_PSK_SET(x) (((x) << 24) & 0x03000000)
+#define PHY_ANALOG_TXRF5_PK2B2G_CCK_MSB 27
+#define PHY_ANALOG_TXRF5_PK2B2G_CCK_LSB 26
+#define PHY_ANALOG_TXRF5_PK2B2G_CCK_MASK 0x0c000000
+#define PHY_ANALOG_TXRF5_PK2B2G_CCK_GET(x) (((x) & 0x0c000000) >> 26)
+#define PHY_ANALOG_TXRF5_PK2B2G_CCK_SET(x) (((x) << 26) & 0x0c000000)
+#define PHY_ANALOG_TXRF5_PK1B2G_QAM_MSB 29
+#define PHY_ANALOG_TXRF5_PK1B2G_QAM_LSB 28
+#define PHY_ANALOG_TXRF5_PK1B2G_QAM_MASK 0x30000000
+#define PHY_ANALOG_TXRF5_PK1B2G_QAM_GET(x) (((x) & 0x30000000) >> 28)
+#define PHY_ANALOG_TXRF5_PK1B2G_QAM_SET(x) (((x) << 28) & 0x30000000)
+#define PHY_ANALOG_TXRF5_PK1B2G_PSK_MSB 31
+#define PHY_ANALOG_TXRF5_PK1B2G_PSK_LSB 30
+#define PHY_ANALOG_TXRF5_PK1B2G_PSK_MASK 0xc0000000
+#define PHY_ANALOG_TXRF5_PK1B2G_PSK_GET(x) (((x) & 0xc0000000) >> 30)
+#define PHY_ANALOG_TXRF5_PK1B2G_PSK_SET(x) (((x) << 30) & 0xc0000000)
+
+/* macros for TXRF6 */
+#define PHY_ANALOG_TXRF6_ADDRESS 0x00000054
+#define PHY_ANALOG_TXRF6_OFFSET 0x00000054
+#define PHY_ANALOG_TXRF6_PALCLKGATE2G_MSB 0
+#define PHY_ANALOG_TXRF6_PALCLKGATE2G_LSB 0
+#define PHY_ANALOG_TXRF6_PALCLKGATE2G_MASK 0x00000001
+#define PHY_ANALOG_TXRF6_PALCLKGATE2G_GET(x) (((x) & 0x00000001) >> 0)
+#define PHY_ANALOG_TXRF6_PALCLKGATE2G_SET(x) (((x) << 0) & 0x00000001)
+#define PHY_ANALOG_TXRF6_PALFLUCTCOUNT2G_MSB 8
+#define PHY_ANALOG_TXRF6_PALFLUCTCOUNT2G_LSB 1
+#define PHY_ANALOG_TXRF6_PALFLUCTCOUNT2G_MASK 0x000001fe
+#define PHY_ANALOG_TXRF6_PALFLUCTCOUNT2G_GET(x) (((x) & 0x000001fe) >> 1)
+#define PHY_ANALOG_TXRF6_PALFLUCTCOUNT2G_SET(x) (((x) << 1) & 0x000001fe)
+#define PHY_ANALOG_TXRF6_PALFLUCTGAIN2G_MSB 10
+#define PHY_ANALOG_TXRF6_PALFLUCTGAIN2G_LSB 9
+#define PHY_ANALOG_TXRF6_PALFLUCTGAIN2G_MASK 0x00000600
+#define PHY_ANALOG_TXRF6_PALFLUCTGAIN2G_GET(x) (((x) & 0x00000600) >> 9)
+#define PHY_ANALOG_TXRF6_PALFLUCTGAIN2G_SET(x) (((x) << 9) & 0x00000600)
+#define PHY_ANALOG_TXRF6_PALNOFLUCT2G_MSB 11
+#define PHY_ANALOG_TXRF6_PALNOFLUCT2G_LSB 11
+#define PHY_ANALOG_TXRF6_PALNOFLUCT2G_MASK 0x00000800
+#define PHY_ANALOG_TXRF6_PALNOFLUCT2G_GET(x) (((x) & 0x00000800) >> 11)
+#define PHY_ANALOG_TXRF6_PALNOFLUCT2G_SET(x) (((x) << 11) & 0x00000800)
+#define PHY_ANALOG_TXRF6_GAINSTEP2G_MSB 14
+#define PHY_ANALOG_TXRF6_GAINSTEP2G_LSB 12
+#define PHY_ANALOG_TXRF6_GAINSTEP2G_MASK 0x00007000
+#define PHY_ANALOG_TXRF6_GAINSTEP2G_GET(x) (((x) & 0x00007000) >> 12)
+#define PHY_ANALOG_TXRF6_GAINSTEP2G_SET(x) (((x) << 12) & 0x00007000)
+#define PHY_ANALOG_TXRF6_USE_GAIN_DELTA2G_MSB 15
+#define PHY_ANALOG_TXRF6_USE_GAIN_DELTA2G_LSB 15
+#define PHY_ANALOG_TXRF6_USE_GAIN_DELTA2G_MASK 0x00008000
+#define PHY_ANALOG_TXRF6_USE_GAIN_DELTA2G_GET(x) (((x) & 0x00008000) >> 15)
+#define PHY_ANALOG_TXRF6_USE_GAIN_DELTA2G_SET(x) (((x) << 15) & 0x00008000)
+#define PHY_ANALOG_TXRF6_CAPDIV_I2G_MSB 19
+#define PHY_ANALOG_TXRF6_CAPDIV_I2G_LSB 16
+#define PHY_ANALOG_TXRF6_CAPDIV_I2G_MASK 0x000f0000
+#define PHY_ANALOG_TXRF6_CAPDIV_I2G_GET(x) (((x) & 0x000f0000) >> 16)
+#define PHY_ANALOG_TXRF6_CAPDIV_I2G_SET(x) (((x) << 16) & 0x000f0000)
+#define PHY_ANALOG_TXRF6_PADRVGN_INDEX_I2G_MSB 23
+#define PHY_ANALOG_TXRF6_PADRVGN_INDEX_I2G_LSB 20
+#define PHY_ANALOG_TXRF6_PADRVGN_INDEX_I2G_MASK 0x00f00000
+#define PHY_ANALOG_TXRF6_PADRVGN_INDEX_I2G_GET(x) (((x) & 0x00f00000) >> 20)
+#define PHY_ANALOG_TXRF6_PADRVGN_INDEX_I2G_SET(x) (((x) << 20) & 0x00f00000)
+#define PHY_ANALOG_TXRF6_VCMONDELAY2G_MSB 26
+#define PHY_ANALOG_TXRF6_VCMONDELAY2G_LSB 24
+#define PHY_ANALOG_TXRF6_VCMONDELAY2G_MASK 0x07000000
+#define PHY_ANALOG_TXRF6_VCMONDELAY2G_GET(x) (((x) & 0x07000000) >> 24)
+#define PHY_ANALOG_TXRF6_VCMONDELAY2G_SET(x) (((x) << 24) & 0x07000000)
+#define PHY_ANALOG_TXRF6_CAPDIV2G_MSB 30
+#define PHY_ANALOG_TXRF6_CAPDIV2G_LSB 27
+#define PHY_ANALOG_TXRF6_CAPDIV2G_MASK 0x78000000
+#define PHY_ANALOG_TXRF6_CAPDIV2G_GET(x) (((x) & 0x78000000) >> 27)
+#define PHY_ANALOG_TXRF6_CAPDIV2G_SET(x) (((x) << 27) & 0x78000000)
+#define PHY_ANALOG_TXRF6_CAPDIV2GOVR_MSB 31
+#define PHY_ANALOG_TXRF6_CAPDIV2GOVR_LSB 31
+#define PHY_ANALOG_TXRF6_CAPDIV2GOVR_MASK 0x80000000
+#define PHY_ANALOG_TXRF6_CAPDIV2GOVR_GET(x) (((x) & 0x80000000) >> 31)
+#define PHY_ANALOG_TXRF6_CAPDIV2GOVR_SET(x) (((x) << 31) & 0x80000000)
+
+/* macros for TXRF7 */
+#define PHY_ANALOG_TXRF7_ADDRESS 0x00000058
+#define PHY_ANALOG_TXRF7_OFFSET 0x00000058
+#define PHY_ANALOG_TXRF7_SPARE7_MSB 1
+#define PHY_ANALOG_TXRF7_SPARE7_LSB 0
+#define PHY_ANALOG_TXRF7_SPARE7_MASK 0x00000003
+#define PHY_ANALOG_TXRF7_SPARE7_GET(x) (((x) & 0x00000003) >> 0)
+#define PHY_ANALOG_TXRF7_SPARE7_SET(x) (((x) << 0) & 0x00000003)
+#define PHY_ANALOG_TXRF7_PADRVGNTAB_4_MSB 7
+#define PHY_ANALOG_TXRF7_PADRVGNTAB_4_LSB 2
+#define PHY_ANALOG_TXRF7_PADRVGNTAB_4_MASK 0x000000fc
+#define PHY_ANALOG_TXRF7_PADRVGNTAB_4_GET(x) (((x) & 0x000000fc) >> 2)
+#define PHY_ANALOG_TXRF7_PADRVGNTAB_4_SET(x) (((x) << 2) & 0x000000fc)
+#define PHY_ANALOG_TXRF7_PADRVGNTAB_3_MSB 13
+#define PHY_ANALOG_TXRF7_PADRVGNTAB_3_LSB 8
+#define PHY_ANALOG_TXRF7_PADRVGNTAB_3_MASK 0x00003f00
+#define PHY_ANALOG_TXRF7_PADRVGNTAB_3_GET(x) (((x) & 0x00003f00) >> 8)
+#define PHY_ANALOG_TXRF7_PADRVGNTAB_3_SET(x) (((x) << 8) & 0x00003f00)
+#define PHY_ANALOG_TXRF7_PADRVGNTAB_2_MSB 19
+#define PHY_ANALOG_TXRF7_PADRVGNTAB_2_LSB 14
+#define PHY_ANALOG_TXRF7_PADRVGNTAB_2_MASK 0x000fc000
+#define PHY_ANALOG_TXRF7_PADRVGNTAB_2_GET(x) (((x) & 0x000fc000) >> 14)
+#define PHY_ANALOG_TXRF7_PADRVGNTAB_2_SET(x) (((x) << 14) & 0x000fc000)
+#define PHY_ANALOG_TXRF7_PADRVGNTAB_1_MSB 25
+#define PHY_ANALOG_TXRF7_PADRVGNTAB_1_LSB 20
+#define PHY_ANALOG_TXRF7_PADRVGNTAB_1_MASK 0x03f00000
+#define PHY_ANALOG_TXRF7_PADRVGNTAB_1_GET(x) (((x) & 0x03f00000) >> 20)
+#define PHY_ANALOG_TXRF7_PADRVGNTAB_1_SET(x) (((x) << 20) & 0x03f00000)
+#define PHY_ANALOG_TXRF7_PADRVGNTAB_0_MSB 31
+#define PHY_ANALOG_TXRF7_PADRVGNTAB_0_LSB 26
+#define PHY_ANALOG_TXRF7_PADRVGNTAB_0_MASK 0xfc000000
+#define PHY_ANALOG_TXRF7_PADRVGNTAB_0_GET(x) (((x) & 0xfc000000) >> 26)
+#define PHY_ANALOG_TXRF7_PADRVGNTAB_0_SET(x) (((x) << 26) & 0xfc000000)
+
+/* macros for TXRF8 */
+#define PHY_ANALOG_TXRF8_ADDRESS 0x0000005c
+#define PHY_ANALOG_TXRF8_OFFSET 0x0000005c
+#define PHY_ANALOG_TXRF8_SPARE8_MSB 1
+#define PHY_ANALOG_TXRF8_SPARE8_LSB 0
+#define PHY_ANALOG_TXRF8_SPARE8_MASK 0x00000003
+#define PHY_ANALOG_TXRF8_SPARE8_GET(x) (((x) & 0x00000003) >> 0)
+#define PHY_ANALOG_TXRF8_SPARE8_SET(x) (((x) << 0) & 0x00000003)
+#define PHY_ANALOG_TXRF8_PADRVGNTAB_9_MSB 7
+#define PHY_ANALOG_TXRF8_PADRVGNTAB_9_LSB 2
+#define PHY_ANALOG_TXRF8_PADRVGNTAB_9_MASK 0x000000fc
+#define PHY_ANALOG_TXRF8_PADRVGNTAB_9_GET(x) (((x) & 0x000000fc) >> 2)
+#define PHY_ANALOG_TXRF8_PADRVGNTAB_9_SET(x) (((x) << 2) & 0x000000fc)
+#define PHY_ANALOG_TXRF8_PADRVGNTAB_8_MSB 13
+#define PHY_ANALOG_TXRF8_PADRVGNTAB_8_LSB 8
+#define PHY_ANALOG_TXRF8_PADRVGNTAB_8_MASK 0x00003f00
+#define PHY_ANALOG_TXRF8_PADRVGNTAB_8_GET(x) (((x) & 0x00003f00) >> 8)
+#define PHY_ANALOG_TXRF8_PADRVGNTAB_8_SET(x) (((x) << 8) & 0x00003f00)
+#define PHY_ANALOG_TXRF8_PADRVGNTAB_7_MSB 19
+#define PHY_ANALOG_TXRF8_PADRVGNTAB_7_LSB 14
+#define PHY_ANALOG_TXRF8_PADRVGNTAB_7_MASK 0x000fc000
+#define PHY_ANALOG_TXRF8_PADRVGNTAB_7_GET(x) (((x) & 0x000fc000) >> 14)
+#define PHY_ANALOG_TXRF8_PADRVGNTAB_7_SET(x) (((x) << 14) & 0x000fc000)
+#define PHY_ANALOG_TXRF8_PADRVGNTAB_6_MSB 25
+#define PHY_ANALOG_TXRF8_PADRVGNTAB_6_LSB 20
+#define PHY_ANALOG_TXRF8_PADRVGNTAB_6_MASK 0x03f00000
+#define PHY_ANALOG_TXRF8_PADRVGNTAB_6_GET(x) (((x) & 0x03f00000) >> 20)
+#define PHY_ANALOG_TXRF8_PADRVGNTAB_6_SET(x) (((x) << 20) & 0x03f00000)
+#define PHY_ANALOG_TXRF8_PADRVGNTAB_5_MSB 31
+#define PHY_ANALOG_TXRF8_PADRVGNTAB_5_LSB 26
+#define PHY_ANALOG_TXRF8_PADRVGNTAB_5_MASK 0xfc000000
+#define PHY_ANALOG_TXRF8_PADRVGNTAB_5_GET(x) (((x) & 0xfc000000) >> 26)
+#define PHY_ANALOG_TXRF8_PADRVGNTAB_5_SET(x) (((x) << 26) & 0xfc000000)
+
+/* macros for TXRF9 */
+#define PHY_ANALOG_TXRF9_ADDRESS 0x00000060
+#define PHY_ANALOG_TXRF9_OFFSET 0x00000060
+#define PHY_ANALOG_TXRF9_SPARE9_MSB 1
+#define PHY_ANALOG_TXRF9_SPARE9_LSB 0
+#define PHY_ANALOG_TXRF9_SPARE9_MASK 0x00000003
+#define PHY_ANALOG_TXRF9_SPARE9_GET(x) (((x) & 0x00000003) >> 0)
+#define PHY_ANALOG_TXRF9_SPARE9_SET(x) (((x) << 0) & 0x00000003)
+#define PHY_ANALOG_TXRF9_PADRVGNTAB_14_MSB 7
+#define PHY_ANALOG_TXRF9_PADRVGNTAB_14_LSB 2
+#define PHY_ANALOG_TXRF9_PADRVGNTAB_14_MASK 0x000000fc
+#define PHY_ANALOG_TXRF9_PADRVGNTAB_14_GET(x) (((x) & 0x000000fc) >> 2)
+#define PHY_ANALOG_TXRF9_PADRVGNTAB_14_SET(x) (((x) << 2) & 0x000000fc)
+#define PHY_ANALOG_TXRF9_PADRVGNTAB_13_MSB 13
+#define PHY_ANALOG_TXRF9_PADRVGNTAB_13_LSB 8
+#define PHY_ANALOG_TXRF9_PADRVGNTAB_13_MASK 0x00003f00
+#define PHY_ANALOG_TXRF9_PADRVGNTAB_13_GET(x) (((x) & 0x00003f00) >> 8)
+#define PHY_ANALOG_TXRF9_PADRVGNTAB_13_SET(x) (((x) << 8) & 0x00003f00)
+#define PHY_ANALOG_TXRF9_PADRVGNTAB_12_MSB 19
+#define PHY_ANALOG_TXRF9_PADRVGNTAB_12_LSB 14
+#define PHY_ANALOG_TXRF9_PADRVGNTAB_12_MASK 0x000fc000
+#define PHY_ANALOG_TXRF9_PADRVGNTAB_12_GET(x) (((x) & 0x000fc000) >> 14)
+#define PHY_ANALOG_TXRF9_PADRVGNTAB_12_SET(x) (((x) << 14) & 0x000fc000)
+#define PHY_ANALOG_TXRF9_PADRVGNTAB_11_MSB 25
+#define PHY_ANALOG_TXRF9_PADRVGNTAB_11_LSB 20
+#define PHY_ANALOG_TXRF9_PADRVGNTAB_11_MASK 0x03f00000
+#define PHY_ANALOG_TXRF9_PADRVGNTAB_11_GET(x) (((x) & 0x03f00000) >> 20)
+#define PHY_ANALOG_TXRF9_PADRVGNTAB_11_SET(x) (((x) << 20) & 0x03f00000)
+#define PHY_ANALOG_TXRF9_PADRVGNTAB_10_MSB 31
+#define PHY_ANALOG_TXRF9_PADRVGNTAB_10_LSB 26
+#define PHY_ANALOG_TXRF9_PADRVGNTAB_10_MASK 0xfc000000
+#define PHY_ANALOG_TXRF9_PADRVGNTAB_10_GET(x) (((x) & 0xfc000000) >> 26)
+#define PHY_ANALOG_TXRF9_PADRVGNTAB_10_SET(x) (((x) << 26) & 0xfc000000)
+
+/* macros for TXRF10 */
+#define PHY_ANALOG_TXRF10_ADDRESS 0x00000064
+#define PHY_ANALOG_TXRF10_OFFSET 0x00000064
+#define PHY_ANALOG_TXRF10_SPARE10_MSB 2
+#define PHY_ANALOG_TXRF10_SPARE10_LSB 0
+#define PHY_ANALOG_TXRF10_SPARE10_MASK 0x00000007
+#define PHY_ANALOG_TXRF10_SPARE10_GET(x) (((x) & 0x00000007) >> 0)
+#define PHY_ANALOG_TXRF10_SPARE10_SET(x) (((x) << 0) & 0x00000007)
+#define PHY_ANALOG_TXRF10_PDOUT5G_3CALTX_MSB 3
+#define PHY_ANALOG_TXRF10_PDOUT5G_3CALTX_LSB 3
+#define PHY_ANALOG_TXRF10_PDOUT5G_3CALTX_MASK 0x00000008
+#define PHY_ANALOG_TXRF10_PDOUT5G_3CALTX_GET(x) (((x) & 0x00000008) >> 3)
+#define PHY_ANALOG_TXRF10_PDOUT5G_3CALTX_SET(x) (((x) << 3) & 0x00000008)
+#define PHY_ANALOG_TXRF10_D3B5GCALTX_MSB 6
+#define PHY_ANALOG_TXRF10_D3B5GCALTX_LSB 4
+#define PHY_ANALOG_TXRF10_D3B5GCALTX_MASK 0x00000070
+#define PHY_ANALOG_TXRF10_D3B5GCALTX_GET(x) (((x) & 0x00000070) >> 4)
+#define PHY_ANALOG_TXRF10_D3B5GCALTX_SET(x) (((x) << 4) & 0x00000070)
+#define PHY_ANALOG_TXRF10_D4B5GCALTX_MSB 9
+#define PHY_ANALOG_TXRF10_D4B5GCALTX_LSB 7
+#define PHY_ANALOG_TXRF10_D4B5GCALTX_MASK 0x00000380
+#define PHY_ANALOG_TXRF10_D4B5GCALTX_GET(x) (((x) & 0x00000380) >> 7)
+#define PHY_ANALOG_TXRF10_D4B5GCALTX_SET(x) (((x) << 7) & 0x00000380)
+#define PHY_ANALOG_TXRF10_PADRVGN2GCALTX_MSB 16
+#define PHY_ANALOG_TXRF10_PADRVGN2GCALTX_LSB 10
+#define PHY_ANALOG_TXRF10_PADRVGN2GCALTX_MASK 0x0001fc00
+#define PHY_ANALOG_TXRF10_PADRVGN2GCALTX_GET(x) (((x) & 0x0001fc00) >> 10)
+#define PHY_ANALOG_TXRF10_PADRVGN2GCALTX_SET(x) (((x) << 10) & 0x0001fc00)
+#define PHY_ANALOG_TXRF10_DB2GCALTX_MSB 19
+#define PHY_ANALOG_TXRF10_DB2GCALTX_LSB 17
+#define PHY_ANALOG_TXRF10_DB2GCALTX_MASK 0x000e0000
+#define PHY_ANALOG_TXRF10_DB2GCALTX_GET(x) (((x) & 0x000e0000) >> 17)
+#define PHY_ANALOG_TXRF10_DB2GCALTX_SET(x) (((x) << 17) & 0x000e0000)
+#define PHY_ANALOG_TXRF10_CALTXSHIFT_MSB 20
+#define PHY_ANALOG_TXRF10_CALTXSHIFT_LSB 20
+#define PHY_ANALOG_TXRF10_CALTXSHIFT_MASK 0x00100000
+#define PHY_ANALOG_TXRF10_CALTXSHIFT_GET(x) (((x) & 0x00100000) >> 20)
+#define PHY_ANALOG_TXRF10_CALTXSHIFT_SET(x) (((x) << 20) & 0x00100000)
+#define PHY_ANALOG_TXRF10_CALTXSHIFTOVR_MSB 21
+#define PHY_ANALOG_TXRF10_CALTXSHIFTOVR_LSB 21
+#define PHY_ANALOG_TXRF10_CALTXSHIFTOVR_MASK 0x00200000
+#define PHY_ANALOG_TXRF10_CALTXSHIFTOVR_GET(x) (((x) & 0x00200000) >> 21)
+#define PHY_ANALOG_TXRF10_CALTXSHIFTOVR_SET(x) (((x) << 21) & 0x00200000)
+#define PHY_ANALOG_TXRF10_PADRVGN2G_SMOUT_MSB 27
+#define PHY_ANALOG_TXRF10_PADRVGN2G_SMOUT_LSB 22
+#define PHY_ANALOG_TXRF10_PADRVGN2G_SMOUT_MASK 0x0fc00000
+#define PHY_ANALOG_TXRF10_PADRVGN2G_SMOUT_GET(x) (((x) & 0x0fc00000) >> 22)
+#define PHY_ANALOG_TXRF10_PADRVGN_INDEX2G_SMOUT_MSB 31
+#define PHY_ANALOG_TXRF10_PADRVGN_INDEX2G_SMOUT_LSB 28
+#define PHY_ANALOG_TXRF10_PADRVGN_INDEX2G_SMOUT_MASK 0xf0000000
+#define PHY_ANALOG_TXRF10_PADRVGN_INDEX2G_SMOUT_GET(x) (((x) & 0xf0000000) >> 28)
+
+/* macros for TXRF11 */
+#define PHY_ANALOG_TXRF11_ADDRESS 0x00000068
+#define PHY_ANALOG_TXRF11_OFFSET 0x00000068
+#define PHY_ANALOG_TXRF11_SPARE11_MSB 1
+#define PHY_ANALOG_TXRF11_SPARE11_LSB 0
+#define PHY_ANALOG_TXRF11_SPARE11_MASK 0x00000003
+#define PHY_ANALOG_TXRF11_SPARE11_GET(x) (((x) & 0x00000003) >> 0)
+#define PHY_ANALOG_TXRF11_SPARE11_SET(x) (((x) << 0) & 0x00000003)
+#define PHY_ANALOG_TXRF11_PWD_IR25MIXDIV5G_MSB 4
+#define PHY_ANALOG_TXRF11_PWD_IR25MIXDIV5G_LSB 2
+#define PHY_ANALOG_TXRF11_PWD_IR25MIXDIV5G_MASK 0x0000001c
+#define PHY_ANALOG_TXRF11_PWD_IR25MIXDIV5G_GET(x) (((x) & 0x0000001c) >> 2)
+#define PHY_ANALOG_TXRF11_PWD_IR25MIXDIV5G_SET(x) (((x) << 2) & 0x0000001c)
+#define PHY_ANALOG_TXRF11_PWD_IR25PA2G_MSB 7
+#define PHY_ANALOG_TXRF11_PWD_IR25PA2G_LSB 5
+#define PHY_ANALOG_TXRF11_PWD_IR25PA2G_MASK 0x000000e0
+#define PHY_ANALOG_TXRF11_PWD_IR25PA2G_GET(x) (((x) & 0x000000e0) >> 5)
+#define PHY_ANALOG_TXRF11_PWD_IR25PA2G_SET(x) (((x) << 5) & 0x000000e0)
+#define PHY_ANALOG_TXRF11_PWD_IR25MIXBIAS2G_MSB 10
+#define PHY_ANALOG_TXRF11_PWD_IR25MIXBIAS2G_LSB 8
+#define PHY_ANALOG_TXRF11_PWD_IR25MIXBIAS2G_MASK 0x00000700
+#define PHY_ANALOG_TXRF11_PWD_IR25MIXBIAS2G_GET(x) (((x) & 0x00000700) >> 8)
+#define PHY_ANALOG_TXRF11_PWD_IR25MIXBIAS2G_SET(x) (((x) << 8) & 0x00000700)
+#define PHY_ANALOG_TXRF11_PWD_IR25MIXDIV2G_MSB 13
+#define PHY_ANALOG_TXRF11_PWD_IR25MIXDIV2G_LSB 11
+#define PHY_ANALOG_TXRF11_PWD_IR25MIXDIV2G_MASK 0x00003800
+#define PHY_ANALOG_TXRF11_PWD_IR25MIXDIV2G_GET(x) (((x) & 0x00003800) >> 11)
+#define PHY_ANALOG_TXRF11_PWD_IR25MIXDIV2G_SET(x) (((x) << 11) & 0x00003800)
+#define PHY_ANALOG_TXRF11_PWD_ICSPARE_MSB 16
+#define PHY_ANALOG_TXRF11_PWD_ICSPARE_LSB 14
+#define PHY_ANALOG_TXRF11_PWD_ICSPARE_MASK 0x0001c000
+#define PHY_ANALOG_TXRF11_PWD_ICSPARE_GET(x) (((x) & 0x0001c000) >> 14)
+#define PHY_ANALOG_TXRF11_PWD_ICSPARE_SET(x) (((x) << 14) & 0x0001c000)
+#define PHY_ANALOG_TXRF11_PWD_IC25TEMPSEN_MSB 19
+#define PHY_ANALOG_TXRF11_PWD_IC25TEMPSEN_LSB 17
+#define PHY_ANALOG_TXRF11_PWD_IC25TEMPSEN_MASK 0x000e0000
+#define PHY_ANALOG_TXRF11_PWD_IC25TEMPSEN_GET(x) (((x) & 0x000e0000) >> 17)
+#define PHY_ANALOG_TXRF11_PWD_IC25TEMPSEN_SET(x) (((x) << 17) & 0x000e0000)
+#define PHY_ANALOG_TXRF11_PWD_IC25PA5G2_MSB 22
+#define PHY_ANALOG_TXRF11_PWD_IC25PA5G2_LSB 20
+#define PHY_ANALOG_TXRF11_PWD_IC25PA5G2_MASK 0x00700000
+#define PHY_ANALOG_TXRF11_PWD_IC25PA5G2_GET(x) (((x) & 0x00700000) >> 20)
+#define PHY_ANALOG_TXRF11_PWD_IC25PA5G2_SET(x) (((x) << 20) & 0x00700000)
+#define PHY_ANALOG_TXRF11_PWD_IC25PA5G1_MSB 25
+#define PHY_ANALOG_TXRF11_PWD_IC25PA5G1_LSB 23
+#define PHY_ANALOG_TXRF11_PWD_IC25PA5G1_MASK 0x03800000
+#define PHY_ANALOG_TXRF11_PWD_IC25PA5G1_GET(x) (((x) & 0x03800000) >> 23)
+#define PHY_ANALOG_TXRF11_PWD_IC25PA5G1_SET(x) (((x) << 23) & 0x03800000)
+#define PHY_ANALOG_TXRF11_PWD_IC25MIXBUF5G_MSB 28
+#define PHY_ANALOG_TXRF11_PWD_IC25MIXBUF5G_LSB 26
+#define PHY_ANALOG_TXRF11_PWD_IC25MIXBUF5G_MASK 0x1c000000
+#define PHY_ANALOG_TXRF11_PWD_IC25MIXBUF5G_GET(x) (((x) & 0x1c000000) >> 26)
+#define PHY_ANALOG_TXRF11_PWD_IC25MIXBUF5G_SET(x) (((x) << 26) & 0x1c000000)
+#define PHY_ANALOG_TXRF11_PWD_IC25PA2G_MSB 31
+#define PHY_ANALOG_TXRF11_PWD_IC25PA2G_LSB 29
+#define PHY_ANALOG_TXRF11_PWD_IC25PA2G_MASK 0xe0000000
+#define PHY_ANALOG_TXRF11_PWD_IC25PA2G_GET(x) (((x) & 0xe0000000) >> 29)
+#define PHY_ANALOG_TXRF11_PWD_IC25PA2G_SET(x) (((x) << 29) & 0xe0000000)
+
+/* macros for TXRF12 */
+#define PHY_ANALOG_TXRF12_ADDRESS 0x0000006c
+#define PHY_ANALOG_TXRF12_OFFSET 0x0000006c
+#define PHY_ANALOG_TXRF12_SPARE12_2_MSB 7
+#define PHY_ANALOG_TXRF12_SPARE12_2_LSB 0
+#define PHY_ANALOG_TXRF12_SPARE12_2_MASK 0x000000ff
+#define PHY_ANALOG_TXRF12_SPARE12_2_GET(x) (((x) & 0x000000ff) >> 0)
+#define PHY_ANALOG_TXRF12_SPARE12_1_MSB 9
+#define PHY_ANALOG_TXRF12_SPARE12_1_LSB 8
+#define PHY_ANALOG_TXRF12_SPARE12_1_MASK 0x00000300
+#define PHY_ANALOG_TXRF12_SPARE12_1_GET(x) (((x) & 0x00000300) >> 8)
+#define PHY_ANALOG_TXRF12_SPARE12_1_SET(x) (((x) << 8) & 0x00000300)
+#define PHY_ANALOG_TXRF12_ATBSEL5G_MSB 13
+#define PHY_ANALOG_TXRF12_ATBSEL5G_LSB 10
+#define PHY_ANALOG_TXRF12_ATBSEL5G_MASK 0x00003c00
+#define PHY_ANALOG_TXRF12_ATBSEL5G_GET(x) (((x) & 0x00003c00) >> 10)
+#define PHY_ANALOG_TXRF12_ATBSEL5G_SET(x) (((x) << 10) & 0x00003c00)
+#define PHY_ANALOG_TXRF12_ATBSEL2G_MSB 16
+#define PHY_ANALOG_TXRF12_ATBSEL2G_LSB 14
+#define PHY_ANALOG_TXRF12_ATBSEL2G_MASK 0x0001c000
+#define PHY_ANALOG_TXRF12_ATBSEL2G_GET(x) (((x) & 0x0001c000) >> 14)
+#define PHY_ANALOG_TXRF12_ATBSEL2G_SET(x) (((x) << 14) & 0x0001c000)
+#define PHY_ANALOG_TXRF12_PWD_IRSPARE_MSB 19
+#define PHY_ANALOG_TXRF12_PWD_IRSPARE_LSB 17
+#define PHY_ANALOG_TXRF12_PWD_IRSPARE_MASK 0x000e0000
+#define PHY_ANALOG_TXRF12_PWD_IRSPARE_GET(x) (((x) & 0x000e0000) >> 17)
+#define PHY_ANALOG_TXRF12_PWD_IRSPARE_SET(x) (((x) << 17) & 0x000e0000)
+#define PHY_ANALOG_TXRF12_PWD_IR25TEMPSEN_MSB 22
+#define PHY_ANALOG_TXRF12_PWD_IR25TEMPSEN_LSB 20
+#define PHY_ANALOG_TXRF12_PWD_IR25TEMPSEN_MASK 0x00700000
+#define PHY_ANALOG_TXRF12_PWD_IR25TEMPSEN_GET(x) (((x) & 0x00700000) >> 20)
+#define PHY_ANALOG_TXRF12_PWD_IR25TEMPSEN_SET(x) (((x) << 20) & 0x00700000)
+#define PHY_ANALOG_TXRF12_PWD_IR25PA5G2_MSB 25
+#define PHY_ANALOG_TXRF12_PWD_IR25PA5G2_LSB 23
+#define PHY_ANALOG_TXRF12_PWD_IR25PA5G2_MASK 0x03800000
+#define PHY_ANALOG_TXRF12_PWD_IR25PA5G2_GET(x) (((x) & 0x03800000) >> 23)
+#define PHY_ANALOG_TXRF12_PWD_IR25PA5G2_SET(x) (((x) << 23) & 0x03800000)
+#define PHY_ANALOG_TXRF12_PWD_IR25PA5G1_MSB 28
+#define PHY_ANALOG_TXRF12_PWD_IR25PA5G1_LSB 26
+#define PHY_ANALOG_TXRF12_PWD_IR25PA5G1_MASK 0x1c000000
+#define PHY_ANALOG_TXRF12_PWD_IR25PA5G1_GET(x) (((x) & 0x1c000000) >> 26)
+#define PHY_ANALOG_TXRF12_PWD_IR25PA5G1_SET(x) (((x) << 26) & 0x1c000000)
+#define PHY_ANALOG_TXRF12_PWD_IR25MIXBIAS5G_MSB 31
+#define PHY_ANALOG_TXRF12_PWD_IR25MIXBIAS5G_LSB 29
+#define PHY_ANALOG_TXRF12_PWD_IR25MIXBIAS5G_MASK 0xe0000000
+#define PHY_ANALOG_TXRF12_PWD_IR25MIXBIAS5G_GET(x) (((x) & 0xe0000000) >> 29)
+#define PHY_ANALOG_TXRF12_PWD_IR25MIXBIAS5G_SET(x) (((x) << 29) & 0xe0000000)
+
+/* macros for SYNTH1 */
+#define PHY_ANALOG_SYNTH1_ADDRESS 0x00000080
+#define PHY_ANALOG_SYNTH1_OFFSET 0x00000080
+#define PHY_ANALOG_SYNTH1_SEL_VCMONABUS_MSB 2
+#define PHY_ANALOG_SYNTH1_SEL_VCMONABUS_LSB 0
+#define PHY_ANALOG_SYNTH1_SEL_VCMONABUS_MASK 0x00000007
+#define PHY_ANALOG_SYNTH1_SEL_VCMONABUS_GET(x) (((x) & 0x00000007) >> 0)
+#define PHY_ANALOG_SYNTH1_SEL_VCMONABUS_SET(x) (((x) << 0) & 0x00000007)
+#define PHY_ANALOG_SYNTH1_SEL_VCOABUS_MSB 5
+#define PHY_ANALOG_SYNTH1_SEL_VCOABUS_LSB 3
+#define PHY_ANALOG_SYNTH1_SEL_VCOABUS_MASK 0x00000038
+#define PHY_ANALOG_SYNTH1_SEL_VCOABUS_GET(x) (((x) & 0x00000038) >> 3)
+#define PHY_ANALOG_SYNTH1_SEL_VCOABUS_SET(x) (((x) << 3) & 0x00000038)
+#define PHY_ANALOG_SYNTH1_MONITOR_SYNTHLOCKVCOK_MSB 6
+#define PHY_ANALOG_SYNTH1_MONITOR_SYNTHLOCKVCOK_LSB 6
+#define PHY_ANALOG_SYNTH1_MONITOR_SYNTHLOCKVCOK_MASK 0x00000040
+#define PHY_ANALOG_SYNTH1_MONITOR_SYNTHLOCKVCOK_GET(x) (((x) & 0x00000040) >> 6)
+#define PHY_ANALOG_SYNTH1_MONITOR_SYNTHLOCKVCOK_SET(x) (((x) << 6) & 0x00000040)
+#define PHY_ANALOG_SYNTH1_MONITOR_VC2LOW_MSB 7
+#define PHY_ANALOG_SYNTH1_MONITOR_VC2LOW_LSB 7
+#define PHY_ANALOG_SYNTH1_MONITOR_VC2LOW_MASK 0x00000080
+#define PHY_ANALOG_SYNTH1_MONITOR_VC2LOW_GET(x) (((x) & 0x00000080) >> 7)
+#define PHY_ANALOG_SYNTH1_MONITOR_VC2LOW_SET(x) (((x) << 7) & 0x00000080)
+#define PHY_ANALOG_SYNTH1_MONITOR_VC2HIGH_MSB 8
+#define PHY_ANALOG_SYNTH1_MONITOR_VC2HIGH_LSB 8
+#define PHY_ANALOG_SYNTH1_MONITOR_VC2HIGH_MASK 0x00000100
+#define PHY_ANALOG_SYNTH1_MONITOR_VC2HIGH_GET(x) (((x) & 0x00000100) >> 8)
+#define PHY_ANALOG_SYNTH1_MONITOR_VC2HIGH_SET(x) (((x) << 8) & 0x00000100)
+#define PHY_ANALOG_SYNTH1_MONITOR_FB_DIV2_MSB 9
+#define PHY_ANALOG_SYNTH1_MONITOR_FB_DIV2_LSB 9
+#define PHY_ANALOG_SYNTH1_MONITOR_FB_DIV2_MASK 0x00000200
+#define PHY_ANALOG_SYNTH1_MONITOR_FB_DIV2_GET(x) (((x) & 0x00000200) >> 9)
+#define PHY_ANALOG_SYNTH1_MONITOR_FB_DIV2_SET(x) (((x) << 9) & 0x00000200)
+#define PHY_ANALOG_SYNTH1_MONITOR_REF_MSB 10
+#define PHY_ANALOG_SYNTH1_MONITOR_REF_LSB 10
+#define PHY_ANALOG_SYNTH1_MONITOR_REF_MASK 0x00000400
+#define PHY_ANALOG_SYNTH1_MONITOR_REF_GET(x) (((x) & 0x00000400) >> 10)
+#define PHY_ANALOG_SYNTH1_MONITOR_REF_SET(x) (((x) << 10) & 0x00000400)
+#define PHY_ANALOG_SYNTH1_MONITOR_FB_MSB 11
+#define PHY_ANALOG_SYNTH1_MONITOR_FB_LSB 11
+#define PHY_ANALOG_SYNTH1_MONITOR_FB_MASK 0x00000800
+#define PHY_ANALOG_SYNTH1_MONITOR_FB_GET(x) (((x) & 0x00000800) >> 11)
+#define PHY_ANALOG_SYNTH1_MONITOR_FB_SET(x) (((x) << 11) & 0x00000800)
+#define PHY_ANALOG_SYNTH1_SEVENBITVCOCAP_MSB 12
+#define PHY_ANALOG_SYNTH1_SEVENBITVCOCAP_LSB 12
+#define PHY_ANALOG_SYNTH1_SEVENBITVCOCAP_MASK 0x00001000
+#define PHY_ANALOG_SYNTH1_SEVENBITVCOCAP_GET(x) (((x) & 0x00001000) >> 12)
+#define PHY_ANALOG_SYNTH1_SEVENBITVCOCAP_SET(x) (((x) << 12) & 0x00001000)
+#define PHY_ANALOG_SYNTH1_PWUP_PD_MSB 15
+#define PHY_ANALOG_SYNTH1_PWUP_PD_LSB 13
+#define PHY_ANALOG_SYNTH1_PWUP_PD_MASK 0x0000e000
+#define PHY_ANALOG_SYNTH1_PWUP_PD_GET(x) (((x) & 0x0000e000) >> 13)
+#define PHY_ANALOG_SYNTH1_PWUP_PD_SET(x) (((x) << 13) & 0x0000e000)
+#define PHY_ANALOG_SYNTH1_PWD_VCOBUF_MSB 16
+#define PHY_ANALOG_SYNTH1_PWD_VCOBUF_LSB 16
+#define PHY_ANALOG_SYNTH1_PWD_VCOBUF_MASK 0x00010000
+#define PHY_ANALOG_SYNTH1_PWD_VCOBUF_GET(x) (((x) & 0x00010000) >> 16)
+#define PHY_ANALOG_SYNTH1_PWD_VCOBUF_SET(x) (((x) << 16) & 0x00010000)
+#define PHY_ANALOG_SYNTH1_VCOBUFGAIN_MSB 18
+#define PHY_ANALOG_SYNTH1_VCOBUFGAIN_LSB 17
+#define PHY_ANALOG_SYNTH1_VCOBUFGAIN_MASK 0x00060000
+#define PHY_ANALOG_SYNTH1_VCOBUFGAIN_GET(x) (((x) & 0x00060000) >> 17)
+#define PHY_ANALOG_SYNTH1_VCOBUFGAIN_SET(x) (((x) << 17) & 0x00060000)
+#define PHY_ANALOG_SYNTH1_VCOREGLEVEL_MSB 20
+#define PHY_ANALOG_SYNTH1_VCOREGLEVEL_LSB 19
+#define PHY_ANALOG_SYNTH1_VCOREGLEVEL_MASK 0x00180000
+#define PHY_ANALOG_SYNTH1_VCOREGLEVEL_GET(x) (((x) & 0x00180000) >> 19)
+#define PHY_ANALOG_SYNTH1_VCOREGLEVEL_SET(x) (((x) << 19) & 0x00180000)
+#define PHY_ANALOG_SYNTH1_VCOREGBYPASS_MSB 21
+#define PHY_ANALOG_SYNTH1_VCOREGBYPASS_LSB 21
+#define PHY_ANALOG_SYNTH1_VCOREGBYPASS_MASK 0x00200000
+#define PHY_ANALOG_SYNTH1_VCOREGBYPASS_GET(x) (((x) & 0x00200000) >> 21)
+#define PHY_ANALOG_SYNTH1_VCOREGBYPASS_SET(x) (((x) << 21) & 0x00200000)
+#define PHY_ANALOG_SYNTH1_PWUP_LOREF_MSB 22
+#define PHY_ANALOG_SYNTH1_PWUP_LOREF_LSB 22
+#define PHY_ANALOG_SYNTH1_PWUP_LOREF_MASK 0x00400000
+#define PHY_ANALOG_SYNTH1_PWUP_LOREF_GET(x) (((x) & 0x00400000) >> 22)
+#define PHY_ANALOG_SYNTH1_PWUP_LOREF_SET(x) (((x) << 22) & 0x00400000)
+#define PHY_ANALOG_SYNTH1_PWD_LOMIX_MSB 23
+#define PHY_ANALOG_SYNTH1_PWD_LOMIX_LSB 23
+#define PHY_ANALOG_SYNTH1_PWD_LOMIX_MASK 0x00800000
+#define PHY_ANALOG_SYNTH1_PWD_LOMIX_GET(x) (((x) & 0x00800000) >> 23)
+#define PHY_ANALOG_SYNTH1_PWD_LOMIX_SET(x) (((x) << 23) & 0x00800000)
+#define PHY_ANALOG_SYNTH1_PWD_LODIV_MSB 24
+#define PHY_ANALOG_SYNTH1_PWD_LODIV_LSB 24
+#define PHY_ANALOG_SYNTH1_PWD_LODIV_MASK 0x01000000
+#define PHY_ANALOG_SYNTH1_PWD_LODIV_GET(x) (((x) & 0x01000000) >> 24)
+#define PHY_ANALOG_SYNTH1_PWD_LODIV_SET(x) (((x) << 24) & 0x01000000)
+#define PHY_ANALOG_SYNTH1_PWD_LOBUF5G_MSB 25
+#define PHY_ANALOG_SYNTH1_PWD_LOBUF5G_LSB 25
+#define PHY_ANALOG_SYNTH1_PWD_LOBUF5G_MASK 0x02000000
+#define PHY_ANALOG_SYNTH1_PWD_LOBUF5G_GET(x) (((x) & 0x02000000) >> 25)
+#define PHY_ANALOG_SYNTH1_PWD_LOBUF5G_SET(x) (((x) << 25) & 0x02000000)
+#define PHY_ANALOG_SYNTH1_PWD_LOBUF2G_MSB 26
+#define PHY_ANALOG_SYNTH1_PWD_LOBUF2G_LSB 26
+#define PHY_ANALOG_SYNTH1_PWD_LOBUF2G_MASK 0x04000000
+#define PHY_ANALOG_SYNTH1_PWD_LOBUF2G_GET(x) (((x) & 0x04000000) >> 26)
+#define PHY_ANALOG_SYNTH1_PWD_LOBUF2G_SET(x) (((x) << 26) & 0x04000000)
+#define PHY_ANALOG_SYNTH1_PWD_PRESC_MSB 27
+#define PHY_ANALOG_SYNTH1_PWD_PRESC_LSB 27
+#define PHY_ANALOG_SYNTH1_PWD_PRESC_MASK 0x08000000
+#define PHY_ANALOG_SYNTH1_PWD_PRESC_GET(x) (((x) & 0x08000000) >> 27)
+#define PHY_ANALOG_SYNTH1_PWD_PRESC_SET(x) (((x) << 27) & 0x08000000)
+#define PHY_ANALOG_SYNTH1_PWD_VCO_MSB 28
+#define PHY_ANALOG_SYNTH1_PWD_VCO_LSB 28
+#define PHY_ANALOG_SYNTH1_PWD_VCO_MASK 0x10000000
+#define PHY_ANALOG_SYNTH1_PWD_VCO_GET(x) (((x) & 0x10000000) >> 28)
+#define PHY_ANALOG_SYNTH1_PWD_VCO_SET(x) (((x) << 28) & 0x10000000)
+#define PHY_ANALOG_SYNTH1_PWD_VCMON_MSB 29
+#define PHY_ANALOG_SYNTH1_PWD_VCMON_LSB 29
+#define PHY_ANALOG_SYNTH1_PWD_VCMON_MASK 0x20000000
+#define PHY_ANALOG_SYNTH1_PWD_VCMON_GET(x) (((x) & 0x20000000) >> 29)
+#define PHY_ANALOG_SYNTH1_PWD_VCMON_SET(x) (((x) << 29) & 0x20000000)
+#define PHY_ANALOG_SYNTH1_PWD_CP_MSB 30
+#define PHY_ANALOG_SYNTH1_PWD_CP_LSB 30
+#define PHY_ANALOG_SYNTH1_PWD_CP_MASK 0x40000000
+#define PHY_ANALOG_SYNTH1_PWD_CP_GET(x) (((x) & 0x40000000) >> 30)
+#define PHY_ANALOG_SYNTH1_PWD_CP_SET(x) (((x) << 30) & 0x40000000)
+#define PHY_ANALOG_SYNTH1_PWD_BIAS_MSB 31
+#define PHY_ANALOG_SYNTH1_PWD_BIAS_LSB 31
+#define PHY_ANALOG_SYNTH1_PWD_BIAS_MASK 0x80000000
+#define PHY_ANALOG_SYNTH1_PWD_BIAS_GET(x) (((x) & 0x80000000) >> 31)
+#define PHY_ANALOG_SYNTH1_PWD_BIAS_SET(x) (((x) << 31) & 0x80000000)
+
+/* macros for SYNTH2 */
+#define PHY_ANALOG_SYNTH2_ADDRESS 0x00000084
+#define PHY_ANALOG_SYNTH2_OFFSET 0x00000084
+#define PHY_ANALOG_SYNTH2_CAPRANGE3_MSB 3
+#define PHY_ANALOG_SYNTH2_CAPRANGE3_LSB 0
+#define PHY_ANALOG_SYNTH2_CAPRANGE3_MASK 0x0000000f
+#define PHY_ANALOG_SYNTH2_CAPRANGE3_GET(x) (((x) & 0x0000000f) >> 0)
+#define PHY_ANALOG_SYNTH2_CAPRANGE3_SET(x) (((x) << 0) & 0x0000000f)
+#define PHY_ANALOG_SYNTH2_CAPRANGE2_MSB 7
+#define PHY_ANALOG_SYNTH2_CAPRANGE2_LSB 4
+#define PHY_ANALOG_SYNTH2_CAPRANGE2_MASK 0x000000f0
+#define PHY_ANALOG_SYNTH2_CAPRANGE2_GET(x) (((x) & 0x000000f0) >> 4)
+#define PHY_ANALOG_SYNTH2_CAPRANGE2_SET(x) (((x) << 4) & 0x000000f0)
+#define PHY_ANALOG_SYNTH2_CAPRANGE1_MSB 11
+#define PHY_ANALOG_SYNTH2_CAPRANGE1_LSB 8
+#define PHY_ANALOG_SYNTH2_CAPRANGE1_MASK 0x00000f00
+#define PHY_ANALOG_SYNTH2_CAPRANGE1_GET(x) (((x) & 0x00000f00) >> 8)
+#define PHY_ANALOG_SYNTH2_CAPRANGE1_SET(x) (((x) << 8) & 0x00000f00)
+#define PHY_ANALOG_SYNTH2_LOOPLEAKCUR_INTN_MSB 15
+#define PHY_ANALOG_SYNTH2_LOOPLEAKCUR_INTN_LSB 12
+#define PHY_ANALOG_SYNTH2_LOOPLEAKCUR_INTN_MASK 0x0000f000
+#define PHY_ANALOG_SYNTH2_LOOPLEAKCUR_INTN_GET(x) (((x) & 0x0000f000) >> 12)
+#define PHY_ANALOG_SYNTH2_LOOPLEAKCUR_INTN_SET(x) (((x) << 12) & 0x0000f000)
+#define PHY_ANALOG_SYNTH2_CPLOWLK_INTN_MSB 16
+#define PHY_ANALOG_SYNTH2_CPLOWLK_INTN_LSB 16
+#define PHY_ANALOG_SYNTH2_CPLOWLK_INTN_MASK 0x00010000
+#define PHY_ANALOG_SYNTH2_CPLOWLK_INTN_GET(x) (((x) & 0x00010000) >> 16)
+#define PHY_ANALOG_SYNTH2_CPLOWLK_INTN_SET(x) (((x) << 16) & 0x00010000)
+#define PHY_ANALOG_SYNTH2_CPSTEERING_EN_INTN_MSB 17
+#define PHY_ANALOG_SYNTH2_CPSTEERING_EN_INTN_LSB 17
+#define PHY_ANALOG_SYNTH2_CPSTEERING_EN_INTN_MASK 0x00020000
+#define PHY_ANALOG_SYNTH2_CPSTEERING_EN_INTN_GET(x) (((x) & 0x00020000) >> 17)
+#define PHY_ANALOG_SYNTH2_CPSTEERING_EN_INTN_SET(x) (((x) << 17) & 0x00020000)
+#define PHY_ANALOG_SYNTH2_CPBIAS_INTN_MSB 19
+#define PHY_ANALOG_SYNTH2_CPBIAS_INTN_LSB 18
+#define PHY_ANALOG_SYNTH2_CPBIAS_INTN_MASK 0x000c0000
+#define PHY_ANALOG_SYNTH2_CPBIAS_INTN_GET(x) (((x) & 0x000c0000) >> 18)
+#define PHY_ANALOG_SYNTH2_CPBIAS_INTN_SET(x) (((x) << 18) & 0x000c0000)
+#define PHY_ANALOG_SYNTH2_VC_LOW_REF_MSB 22
+#define PHY_ANALOG_SYNTH2_VC_LOW_REF_LSB 20
+#define PHY_ANALOG_SYNTH2_VC_LOW_REF_MASK 0x00700000
+#define PHY_ANALOG_SYNTH2_VC_LOW_REF_GET(x) (((x) & 0x00700000) >> 20)
+#define PHY_ANALOG_SYNTH2_VC_LOW_REF_SET(x) (((x) << 20) & 0x00700000)
+#define PHY_ANALOG_SYNTH2_VC_MID_REF_MSB 25
+#define PHY_ANALOG_SYNTH2_VC_MID_REF_LSB 23
+#define PHY_ANALOG_SYNTH2_VC_MID_REF_MASK 0x03800000
+#define PHY_ANALOG_SYNTH2_VC_MID_REF_GET(x) (((x) & 0x03800000) >> 23)
+#define PHY_ANALOG_SYNTH2_VC_MID_REF_SET(x) (((x) << 23) & 0x03800000)
+#define PHY_ANALOG_SYNTH2_VC_HI_REF_MSB 28
+#define PHY_ANALOG_SYNTH2_VC_HI_REF_LSB 26
+#define PHY_ANALOG_SYNTH2_VC_HI_REF_MASK 0x1c000000
+#define PHY_ANALOG_SYNTH2_VC_HI_REF_GET(x) (((x) & 0x1c000000) >> 26)
+#define PHY_ANALOG_SYNTH2_VC_HI_REF_SET(x) (((x) << 26) & 0x1c000000)
+#define PHY_ANALOG_SYNTH2_VC_CAL_REF_MSB 31
+#define PHY_ANALOG_SYNTH2_VC_CAL_REF_LSB 29
+#define PHY_ANALOG_SYNTH2_VC_CAL_REF_MASK 0xe0000000
+#define PHY_ANALOG_SYNTH2_VC_CAL_REF_GET(x) (((x) & 0xe0000000) >> 29)
+#define PHY_ANALOG_SYNTH2_VC_CAL_REF_SET(x) (((x) << 29) & 0xe0000000)
+
+/* macros for SYNTH3 */
+#define PHY_ANALOG_SYNTH3_ADDRESS 0x00000088
+#define PHY_ANALOG_SYNTH3_OFFSET 0x00000088
+#define PHY_ANALOG_SYNTH3_WAIT_VC_CHECK_MSB 5
+#define PHY_ANALOG_SYNTH3_WAIT_VC_CHECK_LSB 0
+#define PHY_ANALOG_SYNTH3_WAIT_VC_CHECK_MASK 0x0000003f
+#define PHY_ANALOG_SYNTH3_WAIT_VC_CHECK_GET(x) (((x) & 0x0000003f) >> 0)
+#define PHY_ANALOG_SYNTH3_WAIT_VC_CHECK_SET(x) (((x) << 0) & 0x0000003f)
+#define PHY_ANALOG_SYNTH3_WAIT_CAL_LIN_MSB 11
+#define PHY_ANALOG_SYNTH3_WAIT_CAL_LIN_LSB 6
+#define PHY_ANALOG_SYNTH3_WAIT_CAL_LIN_MASK 0x00000fc0
+#define PHY_ANALOG_SYNTH3_WAIT_CAL_LIN_GET(x) (((x) & 0x00000fc0) >> 6)
+#define PHY_ANALOG_SYNTH3_WAIT_CAL_LIN_SET(x) (((x) << 6) & 0x00000fc0)
+#define PHY_ANALOG_SYNTH3_WAIT_CAL_BIN_MSB 17
+#define PHY_ANALOG_SYNTH3_WAIT_CAL_BIN_LSB 12
+#define PHY_ANALOG_SYNTH3_WAIT_CAL_BIN_MASK 0x0003f000
+#define PHY_ANALOG_SYNTH3_WAIT_CAL_BIN_GET(x) (((x) & 0x0003f000) >> 12)
+#define PHY_ANALOG_SYNTH3_WAIT_CAL_BIN_SET(x) (((x) << 12) & 0x0003f000)
+#define PHY_ANALOG_SYNTH3_WAIT_PWRUP_MSB 23
+#define PHY_ANALOG_SYNTH3_WAIT_PWRUP_LSB 18
+#define PHY_ANALOG_SYNTH3_WAIT_PWRUP_MASK 0x00fc0000
+#define PHY_ANALOG_SYNTH3_WAIT_PWRUP_GET(x) (((x) & 0x00fc0000) >> 18)
+#define PHY_ANALOG_SYNTH3_WAIT_PWRUP_SET(x) (((x) << 18) & 0x00fc0000)
+#define PHY_ANALOG_SYNTH3_WAIT_SHORTR_PWRUP_MSB 29
+#define PHY_ANALOG_SYNTH3_WAIT_SHORTR_PWRUP_LSB 24
+#define PHY_ANALOG_SYNTH3_WAIT_SHORTR_PWRUP_MASK 0x3f000000
+#define PHY_ANALOG_SYNTH3_WAIT_SHORTR_PWRUP_GET(x) (((x) & 0x3f000000) >> 24)
+#define PHY_ANALOG_SYNTH3_WAIT_SHORTR_PWRUP_SET(x) (((x) << 24) & 0x3f000000)
+#define PHY_ANALOG_SYNTH3_SEL_CLK_DIV2_MSB 30
+#define PHY_ANALOG_SYNTH3_SEL_CLK_DIV2_LSB 30
+#define PHY_ANALOG_SYNTH3_SEL_CLK_DIV2_MASK 0x40000000
+#define PHY_ANALOG_SYNTH3_SEL_CLK_DIV2_GET(x) (((x) & 0x40000000) >> 30)
+#define PHY_ANALOG_SYNTH3_SEL_CLK_DIV2_SET(x) (((x) << 30) & 0x40000000)
+#define PHY_ANALOG_SYNTH3_DIS_CLK_XTAL_MSB 31
+#define PHY_ANALOG_SYNTH3_DIS_CLK_XTAL_LSB 31
+#define PHY_ANALOG_SYNTH3_DIS_CLK_XTAL_MASK 0x80000000
+#define PHY_ANALOG_SYNTH3_DIS_CLK_XTAL_GET(x) (((x) & 0x80000000) >> 31)
+#define PHY_ANALOG_SYNTH3_DIS_CLK_XTAL_SET(x) (((x) << 31) & 0x80000000)
+
+/* macros for SYNTH4 */
+#define PHY_ANALOG_SYNTH4_ADDRESS 0x0000008c
+#define PHY_ANALOG_SYNTH4_OFFSET 0x0000008c
+#define PHY_ANALOG_SYNTH4_PS_SINGLE_PULSE_MSB 0
+#define PHY_ANALOG_SYNTH4_PS_SINGLE_PULSE_LSB 0
+#define PHY_ANALOG_SYNTH4_PS_SINGLE_PULSE_MASK 0x00000001
+#define PHY_ANALOG_SYNTH4_PS_SINGLE_PULSE_GET(x) (((x) & 0x00000001) >> 0)
+#define PHY_ANALOG_SYNTH4_PS_SINGLE_PULSE_SET(x) (((x) << 0) & 0x00000001)
+#define PHY_ANALOG_SYNTH4_LONGSHIFTSEL_MSB 1
+#define PHY_ANALOG_SYNTH4_LONGSHIFTSEL_LSB 1
+#define PHY_ANALOG_SYNTH4_LONGSHIFTSEL_MASK 0x00000002
+#define PHY_ANALOG_SYNTH4_LONGSHIFTSEL_GET(x) (((x) & 0x00000002) >> 1)
+#define PHY_ANALOG_SYNTH4_LONGSHIFTSEL_SET(x) (((x) << 1) & 0x00000002)
+#define PHY_ANALOG_SYNTH4_LOBUF5GTUNE_OVR_MSB 3
+#define PHY_ANALOG_SYNTH4_LOBUF5GTUNE_OVR_LSB 2
+#define PHY_ANALOG_SYNTH4_LOBUF5GTUNE_OVR_MASK 0x0000000c
+#define PHY_ANALOG_SYNTH4_LOBUF5GTUNE_OVR_GET(x) (((x) & 0x0000000c) >> 2)
+#define PHY_ANALOG_SYNTH4_LOBUF5GTUNE_OVR_SET(x) (((x) << 2) & 0x0000000c)
+#define PHY_ANALOG_SYNTH4_FORCE_LOBUF5GTUNE_MSB 4
+#define PHY_ANALOG_SYNTH4_FORCE_LOBUF5GTUNE_LSB 4
+#define PHY_ANALOG_SYNTH4_FORCE_LOBUF5GTUNE_MASK 0x00000010
+#define PHY_ANALOG_SYNTH4_FORCE_LOBUF5GTUNE_GET(x) (((x) & 0x00000010) >> 4)
+#define PHY_ANALOG_SYNTH4_FORCE_LOBUF5GTUNE_SET(x) (((x) << 4) & 0x00000010)
+#define PHY_ANALOG_SYNTH4_PSCOUNT_FBSEL_MSB 5
+#define PHY_ANALOG_SYNTH4_PSCOUNT_FBSEL_LSB 5
+#define PHY_ANALOG_SYNTH4_PSCOUNT_FBSEL_MASK 0x00000020
+#define PHY_ANALOG_SYNTH4_PSCOUNT_FBSEL_GET(x) (((x) & 0x00000020) >> 5)
+#define PHY_ANALOG_SYNTH4_PSCOUNT_FBSEL_SET(x) (((x) << 5) & 0x00000020)
+#define PHY_ANALOG_SYNTH4_SDM_DITHER1_MSB 7
+#define PHY_ANALOG_SYNTH4_SDM_DITHER1_LSB 6
+#define PHY_ANALOG_SYNTH4_SDM_DITHER1_MASK 0x000000c0
+#define PHY_ANALOG_SYNTH4_SDM_DITHER1_GET(x) (((x) & 0x000000c0) >> 6)
+#define PHY_ANALOG_SYNTH4_SDM_DITHER1_SET(x) (((x) << 6) & 0x000000c0)
+#define PHY_ANALOG_SYNTH4_SDM_MODE_MSB 8
+#define PHY_ANALOG_SYNTH4_SDM_MODE_LSB 8
+#define PHY_ANALOG_SYNTH4_SDM_MODE_MASK 0x00000100
+#define PHY_ANALOG_SYNTH4_SDM_MODE_GET(x) (((x) & 0x00000100) >> 8)
+#define PHY_ANALOG_SYNTH4_SDM_MODE_SET(x) (((x) << 8) & 0x00000100)
+#define PHY_ANALOG_SYNTH4_SDM_DISABLE_MSB 9
+#define PHY_ANALOG_SYNTH4_SDM_DISABLE_LSB 9
+#define PHY_ANALOG_SYNTH4_SDM_DISABLE_MASK 0x00000200
+#define PHY_ANALOG_SYNTH4_SDM_DISABLE_GET(x) (((x) & 0x00000200) >> 9)
+#define PHY_ANALOG_SYNTH4_SDM_DISABLE_SET(x) (((x) << 9) & 0x00000200)
+#define PHY_ANALOG_SYNTH4_RESET_PRESC_MSB 10
+#define PHY_ANALOG_SYNTH4_RESET_PRESC_LSB 10
+#define PHY_ANALOG_SYNTH4_RESET_PRESC_MASK 0x00000400
+#define PHY_ANALOG_SYNTH4_RESET_PRESC_GET(x) (((x) & 0x00000400) >> 10)
+#define PHY_ANALOG_SYNTH4_RESET_PRESC_SET(x) (((x) << 10) & 0x00000400)
+#define PHY_ANALOG_SYNTH4_PRESCSEL_MSB 12
+#define PHY_ANALOG_SYNTH4_PRESCSEL_LSB 11
+#define PHY_ANALOG_SYNTH4_PRESCSEL_MASK 0x00001800
+#define PHY_ANALOG_SYNTH4_PRESCSEL_GET(x) (((x) & 0x00001800) >> 11)
+#define PHY_ANALOG_SYNTH4_PRESCSEL_SET(x) (((x) << 11) & 0x00001800)
+#define PHY_ANALOG_SYNTH4_PFD_DISABLE_MSB 13
+#define PHY_ANALOG_SYNTH4_PFD_DISABLE_LSB 13
+#define PHY_ANALOG_SYNTH4_PFD_DISABLE_MASK 0x00002000
+#define PHY_ANALOG_SYNTH4_PFD_DISABLE_GET(x) (((x) & 0x00002000) >> 13)
+#define PHY_ANALOG_SYNTH4_PFD_DISABLE_SET(x) (((x) << 13) & 0x00002000)
+#define PHY_ANALOG_SYNTH4_PFDDELAY_FRACN_MSB 14
+#define PHY_ANALOG_SYNTH4_PFDDELAY_FRACN_LSB 14
+#define PHY_ANALOG_SYNTH4_PFDDELAY_FRACN_MASK 0x00004000
+#define PHY_ANALOG_SYNTH4_PFDDELAY_FRACN_GET(x) (((x) & 0x00004000) >> 14)
+#define PHY_ANALOG_SYNTH4_PFDDELAY_FRACN_SET(x) (((x) << 14) & 0x00004000)
+#define PHY_ANALOG_SYNTH4_FORCE_LO_ON_MSB 15
+#define PHY_ANALOG_SYNTH4_FORCE_LO_ON_LSB 15
+#define PHY_ANALOG_SYNTH4_FORCE_LO_ON_MASK 0x00008000
+#define PHY_ANALOG_SYNTH4_FORCE_LO_ON_GET(x) (((x) & 0x00008000) >> 15)
+#define PHY_ANALOG_SYNTH4_FORCE_LO_ON_SET(x) (((x) << 15) & 0x00008000)
+#define PHY_ANALOG_SYNTH4_CLKXTAL_EDGE_SEL_MSB 16
+#define PHY_ANALOG_SYNTH4_CLKXTAL_EDGE_SEL_LSB 16
+#define PHY_ANALOG_SYNTH4_CLKXTAL_EDGE_SEL_MASK 0x00010000
+#define PHY_ANALOG_SYNTH4_CLKXTAL_EDGE_SEL_GET(x) (((x) & 0x00010000) >> 16)
+#define PHY_ANALOG_SYNTH4_CLKXTAL_EDGE_SEL_SET(x) (((x) << 16) & 0x00010000)
+#define PHY_ANALOG_SYNTH4_VCOCAPPULLUP_MSB 17
+#define PHY_ANALOG_SYNTH4_VCOCAPPULLUP_LSB 17
+#define PHY_ANALOG_SYNTH4_VCOCAPPULLUP_MASK 0x00020000
+#define PHY_ANALOG_SYNTH4_VCOCAPPULLUP_GET(x) (((x) & 0x00020000) >> 17)
+#define PHY_ANALOG_SYNTH4_VCOCAPPULLUP_SET(x) (((x) << 17) & 0x00020000)
+#define PHY_ANALOG_SYNTH4_VCOCAP_OVR_MSB 25
+#define PHY_ANALOG_SYNTH4_VCOCAP_OVR_LSB 18
+#define PHY_ANALOG_SYNTH4_VCOCAP_OVR_MASK 0x03fc0000
+#define PHY_ANALOG_SYNTH4_VCOCAP_OVR_GET(x) (((x) & 0x03fc0000) >> 18)
+#define PHY_ANALOG_SYNTH4_VCOCAP_OVR_SET(x) (((x) << 18) & 0x03fc0000)
+#define PHY_ANALOG_SYNTH4_FORCE_VCOCAP_MSB 26
+#define PHY_ANALOG_SYNTH4_FORCE_VCOCAP_LSB 26
+#define PHY_ANALOG_SYNTH4_FORCE_VCOCAP_MASK 0x04000000
+#define PHY_ANALOG_SYNTH4_FORCE_VCOCAP_GET(x) (((x) & 0x04000000) >> 26)
+#define PHY_ANALOG_SYNTH4_FORCE_VCOCAP_SET(x) (((x) << 26) & 0x04000000)
+#define PHY_ANALOG_SYNTH4_FORCE_PINVC_MSB 27
+#define PHY_ANALOG_SYNTH4_FORCE_PINVC_LSB 27
+#define PHY_ANALOG_SYNTH4_FORCE_PINVC_MASK 0x08000000
+#define PHY_ANALOG_SYNTH4_FORCE_PINVC_GET(x) (((x) & 0x08000000) >> 27)
+#define PHY_ANALOG_SYNTH4_FORCE_PINVC_SET(x) (((x) << 27) & 0x08000000)
+#define PHY_ANALOG_SYNTH4_SHORTR_UNTIL_LOCKED_MSB 28
+#define PHY_ANALOG_SYNTH4_SHORTR_UNTIL_LOCKED_LSB 28
+#define PHY_ANALOG_SYNTH4_SHORTR_UNTIL_LOCKED_MASK 0x10000000
+#define PHY_ANALOG_SYNTH4_SHORTR_UNTIL_LOCKED_GET(x) (((x) & 0x10000000) >> 28)
+#define PHY_ANALOG_SYNTH4_SHORTR_UNTIL_LOCKED_SET(x) (((x) << 28) & 0x10000000)
+#define PHY_ANALOG_SYNTH4_ALWAYS_SHORTR_MSB 29
+#define PHY_ANALOG_SYNTH4_ALWAYS_SHORTR_LSB 29
+#define PHY_ANALOG_SYNTH4_ALWAYS_SHORTR_MASK 0x20000000
+#define PHY_ANALOG_SYNTH4_ALWAYS_SHORTR_GET(x) (((x) & 0x20000000) >> 29)
+#define PHY_ANALOG_SYNTH4_ALWAYS_SHORTR_SET(x) (((x) << 29) & 0x20000000)
+#define PHY_ANALOG_SYNTH4_DIS_LOSTVC_MSB 30
+#define PHY_ANALOG_SYNTH4_DIS_LOSTVC_LSB 30
+#define PHY_ANALOG_SYNTH4_DIS_LOSTVC_MASK 0x40000000
+#define PHY_ANALOG_SYNTH4_DIS_LOSTVC_GET(x) (((x) & 0x40000000) >> 30)
+#define PHY_ANALOG_SYNTH4_DIS_LOSTVC_SET(x) (((x) << 30) & 0x40000000)
+#define PHY_ANALOG_SYNTH4_DIS_LIN_CAPSEARCH_MSB 31
+#define PHY_ANALOG_SYNTH4_DIS_LIN_CAPSEARCH_LSB 31
+#define PHY_ANALOG_SYNTH4_DIS_LIN_CAPSEARCH_MASK 0x80000000
+#define PHY_ANALOG_SYNTH4_DIS_LIN_CAPSEARCH_GET(x) (((x) & 0x80000000) >> 31)
+#define PHY_ANALOG_SYNTH4_DIS_LIN_CAPSEARCH_SET(x) (((x) << 31) & 0x80000000)
+
+/* macros for SYNTH5 */
+#define PHY_ANALOG_SYNTH5_ADDRESS 0x00000090
+#define PHY_ANALOG_SYNTH5_OFFSET 0x00000090
+#define PHY_ANALOG_SYNTH5_VCOBIAS_MSB 1
+#define PHY_ANALOG_SYNTH5_VCOBIAS_LSB 0
+#define PHY_ANALOG_SYNTH5_VCOBIAS_MASK 0x00000003
+#define PHY_ANALOG_SYNTH5_VCOBIAS_GET(x) (((x) & 0x00000003) >> 0)
+#define PHY_ANALOG_SYNTH5_VCOBIAS_SET(x) (((x) << 0) & 0x00000003)
+#define PHY_ANALOG_SYNTH5_PWDB_ICLOBUF5G50_MSB 4
+#define PHY_ANALOG_SYNTH5_PWDB_ICLOBUF5G50_LSB 2
+#define PHY_ANALOG_SYNTH5_PWDB_ICLOBUF5G50_MASK 0x0000001c
+#define PHY_ANALOG_SYNTH5_PWDB_ICLOBUF5G50_GET(x) (((x) & 0x0000001c) >> 2)
+#define PHY_ANALOG_SYNTH5_PWDB_ICLOBUF5G50_SET(x) (((x) << 2) & 0x0000001c)
+#define PHY_ANALOG_SYNTH5_PWDB_ICLOBUF2G50_MSB 7
+#define PHY_ANALOG_SYNTH5_PWDB_ICLOBUF2G50_LSB 5
+#define PHY_ANALOG_SYNTH5_PWDB_ICLOBUF2G50_MASK 0x000000e0
+#define PHY_ANALOG_SYNTH5_PWDB_ICLOBUF2G50_GET(x) (((x) & 0x000000e0) >> 5)
+#define PHY_ANALOG_SYNTH5_PWDB_ICLOBUF2G50_SET(x) (((x) << 5) & 0x000000e0)
+#define PHY_ANALOG_SYNTH5_PWDB_ICVCO25_MSB 10
+#define PHY_ANALOG_SYNTH5_PWDB_ICVCO25_LSB 8
+#define PHY_ANALOG_SYNTH5_PWDB_ICVCO25_MASK 0x00000700
+#define PHY_ANALOG_SYNTH5_PWDB_ICVCO25_GET(x) (((x) & 0x00000700) >> 8)
+#define PHY_ANALOG_SYNTH5_PWDB_ICVCO25_SET(x) (((x) << 8) & 0x00000700)
+#define PHY_ANALOG_SYNTH5_PWDB_ICVCOREG25_MSB 13
+#define PHY_ANALOG_SYNTH5_PWDB_ICVCOREG25_LSB 11
+#define PHY_ANALOG_SYNTH5_PWDB_ICVCOREG25_MASK 0x00003800
+#define PHY_ANALOG_SYNTH5_PWDB_ICVCOREG25_GET(x) (((x) & 0x00003800) >> 11)
+#define PHY_ANALOG_SYNTH5_PWDB_ICVCOREG25_SET(x) (((x) << 11) & 0x00003800)
+#define PHY_ANALOG_SYNTH5_PWDB_IRVCOREG50_MSB 14
+#define PHY_ANALOG_SYNTH5_PWDB_IRVCOREG50_LSB 14
+#define PHY_ANALOG_SYNTH5_PWDB_IRVCOREG50_MASK 0x00004000
+#define PHY_ANALOG_SYNTH5_PWDB_IRVCOREG50_GET(x) (((x) & 0x00004000) >> 14)
+#define PHY_ANALOG_SYNTH5_PWDB_IRVCOREG50_SET(x) (((x) << 14) & 0x00004000)
+#define PHY_ANALOG_SYNTH5_PWDB_ICLOMIX_MSB 17
+#define PHY_ANALOG_SYNTH5_PWDB_ICLOMIX_LSB 15
+#define PHY_ANALOG_SYNTH5_PWDB_ICLOMIX_MASK 0x00038000
+#define PHY_ANALOG_SYNTH5_PWDB_ICLOMIX_GET(x) (((x) & 0x00038000) >> 15)
+#define PHY_ANALOG_SYNTH5_PWDB_ICLOMIX_SET(x) (((x) << 15) & 0x00038000)
+#define PHY_ANALOG_SYNTH5_PWDB_ICLODIV50_MSB 20
+#define PHY_ANALOG_SYNTH5_PWDB_ICLODIV50_LSB 18
+#define PHY_ANALOG_SYNTH5_PWDB_ICLODIV50_MASK 0x001c0000
+#define PHY_ANALOG_SYNTH5_PWDB_ICLODIV50_GET(x) (((x) & 0x001c0000) >> 18)
+#define PHY_ANALOG_SYNTH5_PWDB_ICLODIV50_SET(x) (((x) << 18) & 0x001c0000)
+#define PHY_ANALOG_SYNTH5_PWDB_ICPRESC50_MSB 23
+#define PHY_ANALOG_SYNTH5_PWDB_ICPRESC50_LSB 21
+#define PHY_ANALOG_SYNTH5_PWDB_ICPRESC50_MASK 0x00e00000
+#define PHY_ANALOG_SYNTH5_PWDB_ICPRESC50_GET(x) (((x) & 0x00e00000) >> 21)
+#define PHY_ANALOG_SYNTH5_PWDB_ICPRESC50_SET(x) (((x) << 21) & 0x00e00000)
+#define PHY_ANALOG_SYNTH5_PWDB_IRVCMON25_MSB 26
+#define PHY_ANALOG_SYNTH5_PWDB_IRVCMON25_LSB 24
+#define PHY_ANALOG_SYNTH5_PWDB_IRVCMON25_MASK 0x07000000
+#define PHY_ANALOG_SYNTH5_PWDB_IRVCMON25_GET(x) (((x) & 0x07000000) >> 24)
+#define PHY_ANALOG_SYNTH5_PWDB_IRVCMON25_SET(x) (((x) << 24) & 0x07000000)
+#define PHY_ANALOG_SYNTH5_PWDB_IRPFDCP_MSB 29
+#define PHY_ANALOG_SYNTH5_PWDB_IRPFDCP_LSB 27
+#define PHY_ANALOG_SYNTH5_PWDB_IRPFDCP_MASK 0x38000000
+#define PHY_ANALOG_SYNTH5_PWDB_IRPFDCP_GET(x) (((x) & 0x38000000) >> 27)
+#define PHY_ANALOG_SYNTH5_PWDB_IRPFDCP_SET(x) (((x) << 27) & 0x38000000)
+#define PHY_ANALOG_SYNTH5_SDM_DITHER2_MSB 31
+#define PHY_ANALOG_SYNTH5_SDM_DITHER2_LSB 30
+#define PHY_ANALOG_SYNTH5_SDM_DITHER2_MASK 0xc0000000
+#define PHY_ANALOG_SYNTH5_SDM_DITHER2_GET(x) (((x) & 0xc0000000) >> 30)
+#define PHY_ANALOG_SYNTH5_SDM_DITHER2_SET(x) (((x) << 30) & 0xc0000000)
+
+/* macros for SYNTH6 */
+#define PHY_ANALOG_SYNTH6_ADDRESS 0x00000094
+#define PHY_ANALOG_SYNTH6_OFFSET 0x00000094
+#define PHY_ANALOG_SYNTH6_LOBUF5GTUNE_MSB 1
+#define PHY_ANALOG_SYNTH6_LOBUF5GTUNE_LSB 0
+#define PHY_ANALOG_SYNTH6_LOBUF5GTUNE_MASK 0x00000003
+#define PHY_ANALOG_SYNTH6_LOBUF5GTUNE_GET(x) (((x) & 0x00000003) >> 0)
+#define PHY_ANALOG_SYNTH6_LOOP_IP_MSB 8
+#define PHY_ANALOG_SYNTH6_LOOP_IP_LSB 2
+#define PHY_ANALOG_SYNTH6_LOOP_IP_MASK 0x000001fc
+#define PHY_ANALOG_SYNTH6_LOOP_IP_GET(x) (((x) & 0x000001fc) >> 2)
+#define PHY_ANALOG_SYNTH6_VC2LOW_MSB 9
+#define PHY_ANALOG_SYNTH6_VC2LOW_LSB 9
+#define PHY_ANALOG_SYNTH6_VC2LOW_MASK 0x00000200
+#define PHY_ANALOG_SYNTH6_VC2LOW_GET(x) (((x) & 0x00000200) >> 9)
+#define PHY_ANALOG_SYNTH6_VC2HIGH_MSB 10
+#define PHY_ANALOG_SYNTH6_VC2HIGH_LSB 10
+#define PHY_ANALOG_SYNTH6_VC2HIGH_MASK 0x00000400
+#define PHY_ANALOG_SYNTH6_VC2HIGH_GET(x) (((x) & 0x00000400) >> 10)
+#define PHY_ANALOG_SYNTH6_RESET_SDM_B_MSB 11
+#define PHY_ANALOG_SYNTH6_RESET_SDM_B_LSB 11
+#define PHY_ANALOG_SYNTH6_RESET_SDM_B_MASK 0x00000800
+#define PHY_ANALOG_SYNTH6_RESET_SDM_B_GET(x) (((x) & 0x00000800) >> 11)
+#define PHY_ANALOG_SYNTH6_RESET_PSCOUNTERS_MSB 12
+#define PHY_ANALOG_SYNTH6_RESET_PSCOUNTERS_LSB 12
+#define PHY_ANALOG_SYNTH6_RESET_PSCOUNTERS_MASK 0x00001000
+#define PHY_ANALOG_SYNTH6_RESET_PSCOUNTERS_GET(x) (((x) & 0x00001000) >> 12)
+#define PHY_ANALOG_SYNTH6_RESET_PFD_MSB 13
+#define PHY_ANALOG_SYNTH6_RESET_PFD_LSB 13
+#define PHY_ANALOG_SYNTH6_RESET_PFD_MASK 0x00002000
+#define PHY_ANALOG_SYNTH6_RESET_PFD_GET(x) (((x) & 0x00002000) >> 13)
+#define PHY_ANALOG_SYNTH6_RESET_RFD_MSB 14
+#define PHY_ANALOG_SYNTH6_RESET_RFD_LSB 14
+#define PHY_ANALOG_SYNTH6_RESET_RFD_MASK 0x00004000
+#define PHY_ANALOG_SYNTH6_RESET_RFD_GET(x) (((x) & 0x00004000) >> 14)
+#define PHY_ANALOG_SYNTH6_SHORT_R_MSB 15
+#define PHY_ANALOG_SYNTH6_SHORT_R_LSB 15
+#define PHY_ANALOG_SYNTH6_SHORT_R_MASK 0x00008000
+#define PHY_ANALOG_SYNTH6_SHORT_R_GET(x) (((x) & 0x00008000) >> 15)
+#define PHY_ANALOG_SYNTH6_VCO_CAP_ST_MSB 23
+#define PHY_ANALOG_SYNTH6_VCO_CAP_ST_LSB 16
+#define PHY_ANALOG_SYNTH6_VCO_CAP_ST_MASK 0x00ff0000
+#define PHY_ANALOG_SYNTH6_VCO_CAP_ST_GET(x) (((x) & 0x00ff0000) >> 16)
+#define PHY_ANALOG_SYNTH6_PIN_VC_MSB 24
+#define PHY_ANALOG_SYNTH6_PIN_VC_LSB 24
+#define PHY_ANALOG_SYNTH6_PIN_VC_MASK 0x01000000
+#define PHY_ANALOG_SYNTH6_PIN_VC_GET(x) (((x) & 0x01000000) >> 24)
+#define PHY_ANALOG_SYNTH6_SYNTH_LOCK_VC_OK_MSB 25
+#define PHY_ANALOG_SYNTH6_SYNTH_LOCK_VC_OK_LSB 25
+#define PHY_ANALOG_SYNTH6_SYNTH_LOCK_VC_OK_MASK 0x02000000
+#define PHY_ANALOG_SYNTH6_SYNTH_LOCK_VC_OK_GET(x) (((x) & 0x02000000) >> 25)
+#define PHY_ANALOG_SYNTH6_CAP_SEARCH_MSB 26
+#define PHY_ANALOG_SYNTH6_CAP_SEARCH_LSB 26
+#define PHY_ANALOG_SYNTH6_CAP_SEARCH_MASK 0x04000000
+#define PHY_ANALOG_SYNTH6_CAP_SEARCH_GET(x) (((x) & 0x04000000) >> 26)
+#define PHY_ANALOG_SYNTH6_SYNTH_SM_STATE_MSB 30
+#define PHY_ANALOG_SYNTH6_SYNTH_SM_STATE_LSB 27
+#define PHY_ANALOG_SYNTH6_SYNTH_SM_STATE_MASK 0x78000000
+#define PHY_ANALOG_SYNTH6_SYNTH_SM_STATE_GET(x) (((x) & 0x78000000) >> 27)
+#define PHY_ANALOG_SYNTH6_SYNTH_ON_MSB 31
+#define PHY_ANALOG_SYNTH6_SYNTH_ON_LSB 31
+#define PHY_ANALOG_SYNTH6_SYNTH_ON_MASK 0x80000000
+#define PHY_ANALOG_SYNTH6_SYNTH_ON_GET(x) (((x) & 0x80000000) >> 31)
+
+/* macros for SYNTH7 */
+#define PHY_ANALOG_SYNTH7_ADDRESS 0x00000098
+#define PHY_ANALOG_SYNTH7_OFFSET 0x00000098
+#define PHY_ANALOG_SYNTH7_OVRCHANDECODER_MSB 0
+#define PHY_ANALOG_SYNTH7_OVRCHANDECODER_LSB 0
+#define PHY_ANALOG_SYNTH7_OVRCHANDECODER_MASK 0x00000001
+#define PHY_ANALOG_SYNTH7_OVRCHANDECODER_GET(x) (((x) & 0x00000001) >> 0)
+#define PHY_ANALOG_SYNTH7_OVRCHANDECODER_SET(x) (((x) << 0) & 0x00000001)
+#define PHY_ANALOG_SYNTH7_FORCE_FRACLSB_MSB 1
+#define PHY_ANALOG_SYNTH7_FORCE_FRACLSB_LSB 1
+#define PHY_ANALOG_SYNTH7_FORCE_FRACLSB_MASK 0x00000002
+#define PHY_ANALOG_SYNTH7_FORCE_FRACLSB_GET(x) (((x) & 0x00000002) >> 1)
+#define PHY_ANALOG_SYNTH7_FORCE_FRACLSB_SET(x) (((x) << 1) & 0x00000002)
+#define PHY_ANALOG_SYNTH7_CHANFRAC_MSB 18
+#define PHY_ANALOG_SYNTH7_CHANFRAC_LSB 2
+#define PHY_ANALOG_SYNTH7_CHANFRAC_MASK 0x0007fffc
+#define PHY_ANALOG_SYNTH7_CHANFRAC_GET(x) (((x) & 0x0007fffc) >> 2)
+#define PHY_ANALOG_SYNTH7_CHANFRAC_SET(x) (((x) << 2) & 0x0007fffc)
+#define PHY_ANALOG_SYNTH7_CHANSEL_MSB 27
+#define PHY_ANALOG_SYNTH7_CHANSEL_LSB 19
+#define PHY_ANALOG_SYNTH7_CHANSEL_MASK 0x0ff80000
+#define PHY_ANALOG_SYNTH7_CHANSEL_GET(x) (((x) & 0x0ff80000) >> 19)
+#define PHY_ANALOG_SYNTH7_CHANSEL_SET(x) (((x) << 19) & 0x0ff80000)
+#define PHY_ANALOG_SYNTH7_AMODEREFSEL_MSB 29
+#define PHY_ANALOG_SYNTH7_AMODEREFSEL_LSB 28
+#define PHY_ANALOG_SYNTH7_AMODEREFSEL_MASK 0x30000000
+#define PHY_ANALOG_SYNTH7_AMODEREFSEL_GET(x) (((x) & 0x30000000) >> 28)
+#define PHY_ANALOG_SYNTH7_AMODEREFSEL_SET(x) (((x) << 28) & 0x30000000)
+#define PHY_ANALOG_SYNTH7_FRACMODE_MSB 30
+#define PHY_ANALOG_SYNTH7_FRACMODE_LSB 30
+#define PHY_ANALOG_SYNTH7_FRACMODE_MASK 0x40000000
+#define PHY_ANALOG_SYNTH7_FRACMODE_GET(x) (((x) & 0x40000000) >> 30)
+#define PHY_ANALOG_SYNTH7_FRACMODE_SET(x) (((x) << 30) & 0x40000000)
+#define PHY_ANALOG_SYNTH7_LOADSYNTHCHANNEL_MSB 31
+#define PHY_ANALOG_SYNTH7_LOADSYNTHCHANNEL_LSB 31
+#define PHY_ANALOG_SYNTH7_LOADSYNTHCHANNEL_MASK 0x80000000
+#define PHY_ANALOG_SYNTH7_LOADSYNTHCHANNEL_GET(x) (((x) & 0x80000000) >> 31)
+#define PHY_ANALOG_SYNTH7_LOADSYNTHCHANNEL_SET(x) (((x) << 31) & 0x80000000)
+
+/* macros for SYNTH8 */
+#define PHY_ANALOG_SYNTH8_ADDRESS 0x0000009c
+#define PHY_ANALOG_SYNTH8_OFFSET 0x0000009c
+#define PHY_ANALOG_SYNTH8_CPSTEERING_EN_FRACN_MSB 0
+#define PHY_ANALOG_SYNTH8_CPSTEERING_EN_FRACN_LSB 0
+#define PHY_ANALOG_SYNTH8_CPSTEERING_EN_FRACN_MASK 0x00000001
+#define PHY_ANALOG_SYNTH8_CPSTEERING_EN_FRACN_GET(x) (((x) & 0x00000001) >> 0)
+#define PHY_ANALOG_SYNTH8_CPSTEERING_EN_FRACN_SET(x) (((x) << 0) & 0x00000001)
+#define PHY_ANALOG_SYNTH8_LOOP_ICPB_MSB 7
+#define PHY_ANALOG_SYNTH8_LOOP_ICPB_LSB 1
+#define PHY_ANALOG_SYNTH8_LOOP_ICPB_MASK 0x000000fe
+#define PHY_ANALOG_SYNTH8_LOOP_ICPB_GET(x) (((x) & 0x000000fe) >> 1)
+#define PHY_ANALOG_SYNTH8_LOOP_ICPB_SET(x) (((x) << 1) & 0x000000fe)
+#define PHY_ANALOG_SYNTH8_LOOP_CSB_MSB 11
+#define PHY_ANALOG_SYNTH8_LOOP_CSB_LSB 8
+#define PHY_ANALOG_SYNTH8_LOOP_CSB_MASK 0x00000f00
+#define PHY_ANALOG_SYNTH8_LOOP_CSB_GET(x) (((x) & 0x00000f00) >> 8)
+#define PHY_ANALOG_SYNTH8_LOOP_CSB_SET(x) (((x) << 8) & 0x00000f00)
+#define PHY_ANALOG_SYNTH8_LOOP_RSB_MSB 16
+#define PHY_ANALOG_SYNTH8_LOOP_RSB_LSB 12
+#define PHY_ANALOG_SYNTH8_LOOP_RSB_MASK 0x0001f000
+#define PHY_ANALOG_SYNTH8_LOOP_RSB_GET(x) (((x) & 0x0001f000) >> 12)
+#define PHY_ANALOG_SYNTH8_LOOP_RSB_SET(x) (((x) << 12) & 0x0001f000)
+#define PHY_ANALOG_SYNTH8_LOOP_CPB_MSB 21
+#define PHY_ANALOG_SYNTH8_LOOP_CPB_LSB 17
+#define PHY_ANALOG_SYNTH8_LOOP_CPB_MASK 0x003e0000
+#define PHY_ANALOG_SYNTH8_LOOP_CPB_GET(x) (((x) & 0x003e0000) >> 17)
+#define PHY_ANALOG_SYNTH8_LOOP_CPB_SET(x) (((x) << 17) & 0x003e0000)
+#define PHY_ANALOG_SYNTH8_LOOP_3RD_ORDER_RB_MSB 26
+#define PHY_ANALOG_SYNTH8_LOOP_3RD_ORDER_RB_LSB 22
+#define PHY_ANALOG_SYNTH8_LOOP_3RD_ORDER_RB_MASK 0x07c00000
+#define PHY_ANALOG_SYNTH8_LOOP_3RD_ORDER_RB_GET(x) (((x) & 0x07c00000) >> 22)
+#define PHY_ANALOG_SYNTH8_LOOP_3RD_ORDER_RB_SET(x) (((x) << 22) & 0x07c00000)
+#define PHY_ANALOG_SYNTH8_REFDIVB_MSB 31
+#define PHY_ANALOG_SYNTH8_REFDIVB_LSB 27
+#define PHY_ANALOG_SYNTH8_REFDIVB_MASK 0xf8000000
+#define PHY_ANALOG_SYNTH8_REFDIVB_GET(x) (((x) & 0xf8000000) >> 27)
+#define PHY_ANALOG_SYNTH8_REFDIVB_SET(x) (((x) << 27) & 0xf8000000)
+
+/* macros for SYNTH9 */
+#define PHY_ANALOG_SYNTH9_ADDRESS 0x000000a0
+#define PHY_ANALOG_SYNTH9_OFFSET 0x000000a0
+#define PHY_ANALOG_SYNTH9_PFDDELAY_INTN_MSB 0
+#define PHY_ANALOG_SYNTH9_PFDDELAY_INTN_LSB 0
+#define PHY_ANALOG_SYNTH9_PFDDELAY_INTN_MASK 0x00000001
+#define PHY_ANALOG_SYNTH9_PFDDELAY_INTN_GET(x) (((x) & 0x00000001) >> 0)
+#define PHY_ANALOG_SYNTH9_PFDDELAY_INTN_SET(x) (((x) << 0) & 0x00000001)
+#define PHY_ANALOG_SYNTH9_SLOPE_ICPA0_MSB 3
+#define PHY_ANALOG_SYNTH9_SLOPE_ICPA0_LSB 1
+#define PHY_ANALOG_SYNTH9_SLOPE_ICPA0_MASK 0x0000000e
+#define PHY_ANALOG_SYNTH9_SLOPE_ICPA0_GET(x) (((x) & 0x0000000e) >> 1)
+#define PHY_ANALOG_SYNTH9_SLOPE_ICPA0_SET(x) (((x) << 1) & 0x0000000e)
+#define PHY_ANALOG_SYNTH9_LOOP_ICPA0_MSB 7
+#define PHY_ANALOG_SYNTH9_LOOP_ICPA0_LSB 4
+#define PHY_ANALOG_SYNTH9_LOOP_ICPA0_MASK 0x000000f0
+#define PHY_ANALOG_SYNTH9_LOOP_ICPA0_GET(x) (((x) & 0x000000f0) >> 4)
+#define PHY_ANALOG_SYNTH9_LOOP_ICPA0_SET(x) (((x) << 4) & 0x000000f0)
+#define PHY_ANALOG_SYNTH9_LOOP_CSA0_MSB 11
+#define PHY_ANALOG_SYNTH9_LOOP_CSA0_LSB 8
+#define PHY_ANALOG_SYNTH9_LOOP_CSA0_MASK 0x00000f00
+#define PHY_ANALOG_SYNTH9_LOOP_CSA0_GET(x) (((x) & 0x00000f00) >> 8)
+#define PHY_ANALOG_SYNTH9_LOOP_CSA0_SET(x) (((x) << 8) & 0x00000f00)
+#define PHY_ANALOG_SYNTH9_LOOP_RSA0_MSB 16
+#define PHY_ANALOG_SYNTH9_LOOP_RSA0_LSB 12
+#define PHY_ANALOG_SYNTH9_LOOP_RSA0_MASK 0x0001f000
+#define PHY_ANALOG_SYNTH9_LOOP_RSA0_GET(x) (((x) & 0x0001f000) >> 12)
+#define PHY_ANALOG_SYNTH9_LOOP_RSA0_SET(x) (((x) << 12) & 0x0001f000)
+#define PHY_ANALOG_SYNTH9_LOOP_CPA0_MSB 21
+#define PHY_ANALOG_SYNTH9_LOOP_CPA0_LSB 17
+#define PHY_ANALOG_SYNTH9_LOOP_CPA0_MASK 0x003e0000
+#define PHY_ANALOG_SYNTH9_LOOP_CPA0_GET(x) (((x) & 0x003e0000) >> 17)
+#define PHY_ANALOG_SYNTH9_LOOP_CPA0_SET(x) (((x) << 17) & 0x003e0000)
+#define PHY_ANALOG_SYNTH9_LOOP_3RD_ORDER_RA_MSB 26
+#define PHY_ANALOG_SYNTH9_LOOP_3RD_ORDER_RA_LSB 22
+#define PHY_ANALOG_SYNTH9_LOOP_3RD_ORDER_RA_MASK 0x07c00000
+#define PHY_ANALOG_SYNTH9_LOOP_3RD_ORDER_RA_GET(x) (((x) & 0x07c00000) >> 22)
+#define PHY_ANALOG_SYNTH9_LOOP_3RD_ORDER_RA_SET(x) (((x) << 22) & 0x07c00000)
+#define PHY_ANALOG_SYNTH9_REFDIVA_MSB 31
+#define PHY_ANALOG_SYNTH9_REFDIVA_LSB 27
+#define PHY_ANALOG_SYNTH9_REFDIVA_MASK 0xf8000000
+#define PHY_ANALOG_SYNTH9_REFDIVA_GET(x) (((x) & 0xf8000000) >> 27)
+#define PHY_ANALOG_SYNTH9_REFDIVA_SET(x) (((x) << 27) & 0xf8000000)
+
+/* macros for SYNTH10 */
+#define PHY_ANALOG_SYNTH10_ADDRESS 0x000000a4
+#define PHY_ANALOG_SYNTH10_OFFSET 0x000000a4
+#define PHY_ANALOG_SYNTH10_SPARE10A_MSB 1
+#define PHY_ANALOG_SYNTH10_SPARE10A_LSB 0
+#define PHY_ANALOG_SYNTH10_SPARE10A_MASK 0x00000003
+#define PHY_ANALOG_SYNTH10_SPARE10A_GET(x) (((x) & 0x00000003) >> 0)
+#define PHY_ANALOG_SYNTH10_SPARE10A_SET(x) (((x) << 0) & 0x00000003)
+#define PHY_ANALOG_SYNTH10_PWDB_ICLOBIAS50_MSB 4
+#define PHY_ANALOG_SYNTH10_PWDB_ICLOBIAS50_LSB 2
+#define PHY_ANALOG_SYNTH10_PWDB_ICLOBIAS50_MASK 0x0000001c
+#define PHY_ANALOG_SYNTH10_PWDB_ICLOBIAS50_GET(x) (((x) & 0x0000001c) >> 2)
+#define PHY_ANALOG_SYNTH10_PWDB_ICLOBIAS50_SET(x) (((x) << 2) & 0x0000001c)
+#define PHY_ANALOG_SYNTH10_PWDB_IRSPARE25_MSB 7
+#define PHY_ANALOG_SYNTH10_PWDB_IRSPARE25_LSB 5
+#define PHY_ANALOG_SYNTH10_PWDB_IRSPARE25_MASK 0x000000e0
+#define PHY_ANALOG_SYNTH10_PWDB_IRSPARE25_GET(x) (((x) & 0x000000e0) >> 5)
+#define PHY_ANALOG_SYNTH10_PWDB_IRSPARE25_SET(x) (((x) << 5) & 0x000000e0)
+#define PHY_ANALOG_SYNTH10_PWDB_ICSPARE25_MSB 10
+#define PHY_ANALOG_SYNTH10_PWDB_ICSPARE25_LSB 8
+#define PHY_ANALOG_SYNTH10_PWDB_ICSPARE25_MASK 0x00000700
+#define PHY_ANALOG_SYNTH10_PWDB_ICSPARE25_GET(x) (((x) & 0x00000700) >> 8)
+#define PHY_ANALOG_SYNTH10_PWDB_ICSPARE25_SET(x) (((x) << 8) & 0x00000700)
+#define PHY_ANALOG_SYNTH10_SLOPE_ICPA1_MSB 13
+#define PHY_ANALOG_SYNTH10_SLOPE_ICPA1_LSB 11
+#define PHY_ANALOG_SYNTH10_SLOPE_ICPA1_MASK 0x00003800
+#define PHY_ANALOG_SYNTH10_SLOPE_ICPA1_GET(x) (((x) & 0x00003800) >> 11)
+#define PHY_ANALOG_SYNTH10_SLOPE_ICPA1_SET(x) (((x) << 11) & 0x00003800)
+#define PHY_ANALOG_SYNTH10_LOOP_ICPA1_MSB 17
+#define PHY_ANALOG_SYNTH10_LOOP_ICPA1_LSB 14
+#define PHY_ANALOG_SYNTH10_LOOP_ICPA1_MASK 0x0003c000
+#define PHY_ANALOG_SYNTH10_LOOP_ICPA1_GET(x) (((x) & 0x0003c000) >> 14)
+#define PHY_ANALOG_SYNTH10_LOOP_ICPA1_SET(x) (((x) << 14) & 0x0003c000)
+#define PHY_ANALOG_SYNTH10_LOOP_CSA1_MSB 21
+#define PHY_ANALOG_SYNTH10_LOOP_CSA1_LSB 18
+#define PHY_ANALOG_SYNTH10_LOOP_CSA1_MASK 0x003c0000
+#define PHY_ANALOG_SYNTH10_LOOP_CSA1_GET(x) (((x) & 0x003c0000) >> 18)
+#define PHY_ANALOG_SYNTH10_LOOP_CSA1_SET(x) (((x) << 18) & 0x003c0000)
+#define PHY_ANALOG_SYNTH10_LOOP_RSA1_MSB 26
+#define PHY_ANALOG_SYNTH10_LOOP_RSA1_LSB 22
+#define PHY_ANALOG_SYNTH10_LOOP_RSA1_MASK 0x07c00000
+#define PHY_ANALOG_SYNTH10_LOOP_RSA1_GET(x) (((x) & 0x07c00000) >> 22)
+#define PHY_ANALOG_SYNTH10_LOOP_RSA1_SET(x) (((x) << 22) & 0x07c00000)
+#define PHY_ANALOG_SYNTH10_LOOP_CPA1_MSB 31
+#define PHY_ANALOG_SYNTH10_LOOP_CPA1_LSB 27
+#define PHY_ANALOG_SYNTH10_LOOP_CPA1_MASK 0xf8000000
+#define PHY_ANALOG_SYNTH10_LOOP_CPA1_GET(x) (((x) & 0xf8000000) >> 27)
+#define PHY_ANALOG_SYNTH10_LOOP_CPA1_SET(x) (((x) << 27) & 0xf8000000)
+
+/* macros for SYNTH11 */
+#define PHY_ANALOG_SYNTH11_ADDRESS 0x000000a8
+#define PHY_ANALOG_SYNTH11_OFFSET 0x000000a8
+#define PHY_ANALOG_SYNTH11_SPARE11A_MSB 4
+#define PHY_ANALOG_SYNTH11_SPARE11A_LSB 0
+#define PHY_ANALOG_SYNTH11_SPARE11A_MASK 0x0000001f
+#define PHY_ANALOG_SYNTH11_SPARE11A_GET(x) (((x) & 0x0000001f) >> 0)
+#define PHY_ANALOG_SYNTH11_SPARE11A_SET(x) (((x) << 0) & 0x0000001f)
+#define PHY_ANALOG_SYNTH11_FORCE_LOBUF5G_ON_MSB 5
+#define PHY_ANALOG_SYNTH11_FORCE_LOBUF5G_ON_LSB 5
+#define PHY_ANALOG_SYNTH11_FORCE_LOBUF5G_ON_MASK 0x00000020
+#define PHY_ANALOG_SYNTH11_FORCE_LOBUF5G_ON_GET(x) (((x) & 0x00000020) >> 5)
+#define PHY_ANALOG_SYNTH11_FORCE_LOBUF5G_ON_SET(x) (((x) << 5) & 0x00000020)
+#define PHY_ANALOG_SYNTH11_LOREFSEL_MSB 7
+#define PHY_ANALOG_SYNTH11_LOREFSEL_LSB 6
+#define PHY_ANALOG_SYNTH11_LOREFSEL_MASK 0x000000c0
+#define PHY_ANALOG_SYNTH11_LOREFSEL_GET(x) (((x) & 0x000000c0) >> 6)
+#define PHY_ANALOG_SYNTH11_LOREFSEL_SET(x) (((x) << 6) & 0x000000c0)
+#define PHY_ANALOG_SYNTH11_LOBUF2GTUNE_MSB 9
+#define PHY_ANALOG_SYNTH11_LOBUF2GTUNE_LSB 8
+#define PHY_ANALOG_SYNTH11_LOBUF2GTUNE_MASK 0x00000300
+#define PHY_ANALOG_SYNTH11_LOBUF2GTUNE_GET(x) (((x) & 0x00000300) >> 8)
+#define PHY_ANALOG_SYNTH11_LOBUF2GTUNE_SET(x) (((x) << 8) & 0x00000300)
+#define PHY_ANALOG_SYNTH11_CPSTEERING_MODE_MSB 10
+#define PHY_ANALOG_SYNTH11_CPSTEERING_MODE_LSB 10
+#define PHY_ANALOG_SYNTH11_CPSTEERING_MODE_MASK 0x00000400
+#define PHY_ANALOG_SYNTH11_CPSTEERING_MODE_GET(x) (((x) & 0x00000400) >> 10)
+#define PHY_ANALOG_SYNTH11_CPSTEERING_MODE_SET(x) (((x) << 10) & 0x00000400)
+#define PHY_ANALOG_SYNTH11_SLOPE_ICPA2_MSB 13
+#define PHY_ANALOG_SYNTH11_SLOPE_ICPA2_LSB 11
+#define PHY_ANALOG_SYNTH11_SLOPE_ICPA2_MASK 0x00003800
+#define PHY_ANALOG_SYNTH11_SLOPE_ICPA2_GET(x) (((x) & 0x00003800) >> 11)
+#define PHY_ANALOG_SYNTH11_SLOPE_ICPA2_SET(x) (((x) << 11) & 0x00003800)
+#define PHY_ANALOG_SYNTH11_LOOP_ICPA2_MSB 17
+#define PHY_ANALOG_SYNTH11_LOOP_ICPA2_LSB 14
+#define PHY_ANALOG_SYNTH11_LOOP_ICPA2_MASK 0x0003c000
+#define PHY_ANALOG_SYNTH11_LOOP_ICPA2_GET(x) (((x) & 0x0003c000) >> 14)
+#define PHY_ANALOG_SYNTH11_LOOP_ICPA2_SET(x) (((x) << 14) & 0x0003c000)
+#define PHY_ANALOG_SYNTH11_LOOP_CSA2_MSB 21
+#define PHY_ANALOG_SYNTH11_LOOP_CSA2_LSB 18
+#define PHY_ANALOG_SYNTH11_LOOP_CSA2_MASK 0x003c0000
+#define PHY_ANALOG_SYNTH11_LOOP_CSA2_GET(x) (((x) & 0x003c0000) >> 18)
+#define PHY_ANALOG_SYNTH11_LOOP_CSA2_SET(x) (((x) << 18) & 0x003c0000)
+#define PHY_ANALOG_SYNTH11_LOOP_RSA2_MSB 26
+#define PHY_ANALOG_SYNTH11_LOOP_RSA2_LSB 22
+#define PHY_ANALOG_SYNTH11_LOOP_RSA2_MASK 0x07c00000
+#define PHY_ANALOG_SYNTH11_LOOP_RSA2_GET(x) (((x) & 0x07c00000) >> 22)
+#define PHY_ANALOG_SYNTH11_LOOP_RSA2_SET(x) (((x) << 22) & 0x07c00000)
+#define PHY_ANALOG_SYNTH11_LOOP_CPA2_MSB 31
+#define PHY_ANALOG_SYNTH11_LOOP_CPA2_LSB 27
+#define PHY_ANALOG_SYNTH11_LOOP_CPA2_MASK 0xf8000000
+#define PHY_ANALOG_SYNTH11_LOOP_CPA2_GET(x) (((x) & 0xf8000000) >> 27)
+#define PHY_ANALOG_SYNTH11_LOOP_CPA2_SET(x) (((x) << 27) & 0xf8000000)
+
+/* macros for SYNTH12 */
+#define PHY_ANALOG_SYNTH12_ADDRESS 0x000000ac
+#define PHY_ANALOG_SYNTH12_OFFSET 0x000000ac
+#define PHY_ANALOG_SYNTH12_SPARE12A_MSB 9
+#define PHY_ANALOG_SYNTH12_SPARE12A_LSB 0
+#define PHY_ANALOG_SYNTH12_SPARE12A_MASK 0x000003ff
+#define PHY_ANALOG_SYNTH12_SPARE12A_GET(x) (((x) & 0x000003ff) >> 0)
+#define PHY_ANALOG_SYNTH12_SPARE12A_SET(x) (((x) << 0) & 0x000003ff)
+#define PHY_ANALOG_SYNTH12_LOOPLEAKCUR_FRACN_MSB 13
+#define PHY_ANALOG_SYNTH12_LOOPLEAKCUR_FRACN_LSB 10
+#define PHY_ANALOG_SYNTH12_LOOPLEAKCUR_FRACN_MASK 0x00003c00
+#define PHY_ANALOG_SYNTH12_LOOPLEAKCUR_FRACN_GET(x) (((x) & 0x00003c00) >> 10)
+#define PHY_ANALOG_SYNTH12_LOOPLEAKCUR_FRACN_SET(x) (((x) << 10) & 0x00003c00)
+#define PHY_ANALOG_SYNTH12_CPLOWLK_FRACN_MSB 14
+#define PHY_ANALOG_SYNTH12_CPLOWLK_FRACN_LSB 14
+#define PHY_ANALOG_SYNTH12_CPLOWLK_FRACN_MASK 0x00004000
+#define PHY_ANALOG_SYNTH12_CPLOWLK_FRACN_GET(x) (((x) & 0x00004000) >> 14)
+#define PHY_ANALOG_SYNTH12_CPLOWLK_FRACN_SET(x) (((x) << 14) & 0x00004000)
+#define PHY_ANALOG_SYNTH12_CPBIAS_FRACN_MSB 16
+#define PHY_ANALOG_SYNTH12_CPBIAS_FRACN_LSB 15
+#define PHY_ANALOG_SYNTH12_CPBIAS_FRACN_MASK 0x00018000
+#define PHY_ANALOG_SYNTH12_CPBIAS_FRACN_GET(x) (((x) & 0x00018000) >> 15)
+#define PHY_ANALOG_SYNTH12_CPBIAS_FRACN_SET(x) (((x) << 15) & 0x00018000)
+#define PHY_ANALOG_SYNTH12_SYNTHDIGOUTEN_MSB 17
+#define PHY_ANALOG_SYNTH12_SYNTHDIGOUTEN_LSB 17
+#define PHY_ANALOG_SYNTH12_SYNTHDIGOUTEN_MASK 0x00020000
+#define PHY_ANALOG_SYNTH12_SYNTHDIGOUTEN_GET(x) (((x) & 0x00020000) >> 17)
+#define PHY_ANALOG_SYNTH12_SYNTHDIGOUTEN_SET(x) (((x) << 17) & 0x00020000)
+#define PHY_ANALOG_SYNTH12_STRCONT_MSB 18
+#define PHY_ANALOG_SYNTH12_STRCONT_LSB 18
+#define PHY_ANALOG_SYNTH12_STRCONT_MASK 0x00040000
+#define PHY_ANALOG_SYNTH12_STRCONT_GET(x) (((x) & 0x00040000) >> 18)
+#define PHY_ANALOG_SYNTH12_STRCONT_SET(x) (((x) << 18) & 0x00040000)
+#define PHY_ANALOG_SYNTH12_VREFMUL3_MSB 22
+#define PHY_ANALOG_SYNTH12_VREFMUL3_LSB 19
+#define PHY_ANALOG_SYNTH12_VREFMUL3_MASK 0x00780000
+#define PHY_ANALOG_SYNTH12_VREFMUL3_GET(x) (((x) & 0x00780000) >> 19)
+#define PHY_ANALOG_SYNTH12_VREFMUL3_SET(x) (((x) << 19) & 0x00780000)
+#define PHY_ANALOG_SYNTH12_VREFMUL2_MSB 26
+#define PHY_ANALOG_SYNTH12_VREFMUL2_LSB 23
+#define PHY_ANALOG_SYNTH12_VREFMUL2_MASK 0x07800000
+#define PHY_ANALOG_SYNTH12_VREFMUL2_GET(x) (((x) & 0x07800000) >> 23)
+#define PHY_ANALOG_SYNTH12_VREFMUL2_SET(x) (((x) << 23) & 0x07800000)
+#define PHY_ANALOG_SYNTH12_VREFMUL1_MSB 30
+#define PHY_ANALOG_SYNTH12_VREFMUL1_LSB 27
+#define PHY_ANALOG_SYNTH12_VREFMUL1_MASK 0x78000000
+#define PHY_ANALOG_SYNTH12_VREFMUL1_GET(x) (((x) & 0x78000000) >> 27)
+#define PHY_ANALOG_SYNTH12_VREFMUL1_SET(x) (((x) << 27) & 0x78000000)
+#define PHY_ANALOG_SYNTH12_CLK_DOUBLER_EN_MSB 31
+#define PHY_ANALOG_SYNTH12_CLK_DOUBLER_EN_LSB 31
+#define PHY_ANALOG_SYNTH12_CLK_DOUBLER_EN_MASK 0x80000000
+#define PHY_ANALOG_SYNTH12_CLK_DOUBLER_EN_GET(x) (((x) & 0x80000000) >> 31)
+#define PHY_ANALOG_SYNTH12_CLK_DOUBLER_EN_SET(x) (((x) << 31) & 0x80000000)
+
+/* macros for SYNTH13 */
+#define PHY_ANALOG_SYNTH13_ADDRESS 0x000000b0
+#define PHY_ANALOG_SYNTH13_OFFSET 0x000000b0
+#define PHY_ANALOG_SYNTH13_SPARE13A_MSB 0
+#define PHY_ANALOG_SYNTH13_SPARE13A_LSB 0
+#define PHY_ANALOG_SYNTH13_SPARE13A_MASK 0x00000001
+#define PHY_ANALOG_SYNTH13_SPARE13A_GET(x) (((x) & 0x00000001) >> 0)
+#define PHY_ANALOG_SYNTH13_SPARE13A_SET(x) (((x) << 0) & 0x00000001)
+#define PHY_ANALOG_SYNTH13_SLOPE_ICPA_FRACN_MSB 3
+#define PHY_ANALOG_SYNTH13_SLOPE_ICPA_FRACN_LSB 1
+#define PHY_ANALOG_SYNTH13_SLOPE_ICPA_FRACN_MASK 0x0000000e
+#define PHY_ANALOG_SYNTH13_SLOPE_ICPA_FRACN_GET(x) (((x) & 0x0000000e) >> 1)
+#define PHY_ANALOG_SYNTH13_SLOPE_ICPA_FRACN_SET(x) (((x) << 1) & 0x0000000e)
+#define PHY_ANALOG_SYNTH13_LOOP_ICPA_FRACN_MSB 7
+#define PHY_ANALOG_SYNTH13_LOOP_ICPA_FRACN_LSB 4
+#define PHY_ANALOG_SYNTH13_LOOP_ICPA_FRACN_MASK 0x000000f0
+#define PHY_ANALOG_SYNTH13_LOOP_ICPA_FRACN_GET(x) (((x) & 0x000000f0) >> 4)
+#define PHY_ANALOG_SYNTH13_LOOP_ICPA_FRACN_SET(x) (((x) << 4) & 0x000000f0)
+#define PHY_ANALOG_SYNTH13_LOOP_CSA_FRACN_MSB 11
+#define PHY_ANALOG_SYNTH13_LOOP_CSA_FRACN_LSB 8
+#define PHY_ANALOG_SYNTH13_LOOP_CSA_FRACN_MASK 0x00000f00
+#define PHY_ANALOG_SYNTH13_LOOP_CSA_FRACN_GET(x) (((x) & 0x00000f00) >> 8)
+#define PHY_ANALOG_SYNTH13_LOOP_CSA_FRACN_SET(x) (((x) << 8) & 0x00000f00)
+#define PHY_ANALOG_SYNTH13_LOOP_RSA_FRACN_MSB 16
+#define PHY_ANALOG_SYNTH13_LOOP_RSA_FRACN_LSB 12
+#define PHY_ANALOG_SYNTH13_LOOP_RSA_FRACN_MASK 0x0001f000
+#define PHY_ANALOG_SYNTH13_LOOP_RSA_FRACN_GET(x) (((x) & 0x0001f000) >> 12)
+#define PHY_ANALOG_SYNTH13_LOOP_RSA_FRACN_SET(x) (((x) << 12) & 0x0001f000)
+#define PHY_ANALOG_SYNTH13_LOOP_CPA_FRACN_MSB 21
+#define PHY_ANALOG_SYNTH13_LOOP_CPA_FRACN_LSB 17
+#define PHY_ANALOG_SYNTH13_LOOP_CPA_FRACN_MASK 0x003e0000
+#define PHY_ANALOG_SYNTH13_LOOP_CPA_FRACN_GET(x) (((x) & 0x003e0000) >> 17)
+#define PHY_ANALOG_SYNTH13_LOOP_CPA_FRACN_SET(x) (((x) << 17) & 0x003e0000)
+#define PHY_ANALOG_SYNTH13_LOOP_3RD_ORDER_RA_FRACN_MSB 26
+#define PHY_ANALOG_SYNTH13_LOOP_3RD_ORDER_RA_FRACN_LSB 22
+#define PHY_ANALOG_SYNTH13_LOOP_3RD_ORDER_RA_FRACN_MASK 0x07c00000
+#define PHY_ANALOG_SYNTH13_LOOP_3RD_ORDER_RA_FRACN_GET(x) (((x) & 0x07c00000) >> 22)
+#define PHY_ANALOG_SYNTH13_LOOP_3RD_ORDER_RA_FRACN_SET(x) (((x) << 22) & 0x07c00000)
+#define PHY_ANALOG_SYNTH13_REFDIVA_FRACN_MSB 31
+#define PHY_ANALOG_SYNTH13_REFDIVA_FRACN_LSB 27
+#define PHY_ANALOG_SYNTH13_REFDIVA_FRACN_MASK 0xf8000000
+#define PHY_ANALOG_SYNTH13_REFDIVA_FRACN_GET(x) (((x) & 0xf8000000) >> 27)
+#define PHY_ANALOG_SYNTH13_REFDIVA_FRACN_SET(x) (((x) << 27) & 0xf8000000)
+
+/* macros for SYNTH14 */
+#define PHY_ANALOG_SYNTH14_ADDRESS 0x000000b4
+#define PHY_ANALOG_SYNTH14_OFFSET 0x000000b4
+#define PHY_ANALOG_SYNTH14_SPARE14A_MSB 1
+#define PHY_ANALOG_SYNTH14_SPARE14A_LSB 0
+#define PHY_ANALOG_SYNTH14_SPARE14A_MASK 0x00000003
+#define PHY_ANALOG_SYNTH14_SPARE14A_GET(x) (((x) & 0x00000003) >> 0)
+#define PHY_ANALOG_SYNTH14_SPARE14A_SET(x) (((x) << 0) & 0x00000003)
+#define PHY_ANALOG_SYNTH14_LOBUF5GTUNE_3_MSB 3
+#define PHY_ANALOG_SYNTH14_LOBUF5GTUNE_3_LSB 2
+#define PHY_ANALOG_SYNTH14_LOBUF5GTUNE_3_MASK 0x0000000c
+#define PHY_ANALOG_SYNTH14_LOBUF5GTUNE_3_GET(x) (((x) & 0x0000000c) >> 2)
+#define PHY_ANALOG_SYNTH14_LOBUF5GTUNE_3_SET(x) (((x) << 2) & 0x0000000c)
+#define PHY_ANALOG_SYNTH14_LOBUF2GTUNE_3_MSB 5
+#define PHY_ANALOG_SYNTH14_LOBUF2GTUNE_3_LSB 4
+#define PHY_ANALOG_SYNTH14_LOBUF2GTUNE_3_MASK 0x00000030
+#define PHY_ANALOG_SYNTH14_LOBUF2GTUNE_3_GET(x) (((x) & 0x00000030) >> 4)
+#define PHY_ANALOG_SYNTH14_LOBUF2GTUNE_3_SET(x) (((x) << 4) & 0x00000030)
+#define PHY_ANALOG_SYNTH14_LOBUF5GTUNE_2_MSB 7
+#define PHY_ANALOG_SYNTH14_LOBUF5GTUNE_2_LSB 6
+#define PHY_ANALOG_SYNTH14_LOBUF5GTUNE_2_MASK 0x000000c0
+#define PHY_ANALOG_SYNTH14_LOBUF5GTUNE_2_GET(x) (((x) & 0x000000c0) >> 6)
+#define PHY_ANALOG_SYNTH14_LOBUF5GTUNE_2_SET(x) (((x) << 6) & 0x000000c0)
+#define PHY_ANALOG_SYNTH14_LOBUF2GTUNE_2_MSB 9
+#define PHY_ANALOG_SYNTH14_LOBUF2GTUNE_2_LSB 8
+#define PHY_ANALOG_SYNTH14_LOBUF2GTUNE_2_MASK 0x00000300
+#define PHY_ANALOG_SYNTH14_LOBUF2GTUNE_2_GET(x) (((x) & 0x00000300) >> 8)
+#define PHY_ANALOG_SYNTH14_LOBUF2GTUNE_2_SET(x) (((x) << 8) & 0x00000300)
+#define PHY_ANALOG_SYNTH14_PWD_LOBUF5G_3_MSB 10
+#define PHY_ANALOG_SYNTH14_PWD_LOBUF5G_3_LSB 10
+#define PHY_ANALOG_SYNTH14_PWD_LOBUF5G_3_MASK 0x00000400
+#define PHY_ANALOG_SYNTH14_PWD_LOBUF5G_3_GET(x) (((x) & 0x00000400) >> 10)
+#define PHY_ANALOG_SYNTH14_PWD_LOBUF5G_3_SET(x) (((x) << 10) & 0x00000400)
+#define PHY_ANALOG_SYNTH14_PWD_LOBUF2G_3_MSB 11
+#define PHY_ANALOG_SYNTH14_PWD_LOBUF2G_3_LSB 11
+#define PHY_ANALOG_SYNTH14_PWD_LOBUF2G_3_MASK 0x00000800
+#define PHY_ANALOG_SYNTH14_PWD_LOBUF2G_3_GET(x) (((x) & 0x00000800) >> 11)
+#define PHY_ANALOG_SYNTH14_PWD_LOBUF2G_3_SET(x) (((x) << 11) & 0x00000800)
+#define PHY_ANALOG_SYNTH14_PWD_LOBUF5G_2_MSB 12
+#define PHY_ANALOG_SYNTH14_PWD_LOBUF5G_2_LSB 12
+#define PHY_ANALOG_SYNTH14_PWD_LOBUF5G_2_MASK 0x00001000
+#define PHY_ANALOG_SYNTH14_PWD_LOBUF5G_2_GET(x) (((x) & 0x00001000) >> 12)
+#define PHY_ANALOG_SYNTH14_PWD_LOBUF5G_2_SET(x) (((x) << 12) & 0x00001000)
+#define PHY_ANALOG_SYNTH14_PWD_LOBUF2G_2_MSB 13
+#define PHY_ANALOG_SYNTH14_PWD_LOBUF2G_2_LSB 13
+#define PHY_ANALOG_SYNTH14_PWD_LOBUF2G_2_MASK 0x00002000
+#define PHY_ANALOG_SYNTH14_PWD_LOBUF2G_2_GET(x) (((x) & 0x00002000) >> 13)
+#define PHY_ANALOG_SYNTH14_PWD_LOBUF2G_2_SET(x) (((x) << 13) & 0x00002000)
+#define PHY_ANALOG_SYNTH14_PWUPLO23_PD_MSB 16
+#define PHY_ANALOG_SYNTH14_PWUPLO23_PD_LSB 14
+#define PHY_ANALOG_SYNTH14_PWUPLO23_PD_MASK 0x0001c000
+#define PHY_ANALOG_SYNTH14_PWUPLO23_PD_GET(x) (((x) & 0x0001c000) >> 14)
+#define PHY_ANALOG_SYNTH14_PWUPLO23_PD_SET(x) (((x) << 14) & 0x0001c000)
+#define PHY_ANALOG_SYNTH14_PWDB_ICLOBUF5G50_3_MSB 19
+#define PHY_ANALOG_SYNTH14_PWDB_ICLOBUF5G50_3_LSB 17
+#define PHY_ANALOG_SYNTH14_PWDB_ICLOBUF5G50_3_MASK 0x000e0000
+#define PHY_ANALOG_SYNTH14_PWDB_ICLOBUF5G50_3_GET(x) (((x) & 0x000e0000) >> 17)
+#define PHY_ANALOG_SYNTH14_PWDB_ICLOBUF5G50_3_SET(x) (((x) << 17) & 0x000e0000)
+#define PHY_ANALOG_SYNTH14_PWDB_ICLOBUF2G50_3_MSB 22
+#define PHY_ANALOG_SYNTH14_PWDB_ICLOBUF2G50_3_LSB 20
+#define PHY_ANALOG_SYNTH14_PWDB_ICLOBUF2G50_3_MASK 0x00700000
+#define PHY_ANALOG_SYNTH14_PWDB_ICLOBUF2G50_3_GET(x) (((x) & 0x00700000) >> 20)
+#define PHY_ANALOG_SYNTH14_PWDB_ICLOBUF2G50_3_SET(x) (((x) << 20) & 0x00700000)
+#define PHY_ANALOG_SYNTH14_PWDB_ICLOBUF5G50_2_MSB 25
+#define PHY_ANALOG_SYNTH14_PWDB_ICLOBUF5G50_2_LSB 23
+#define PHY_ANALOG_SYNTH14_PWDB_ICLOBUF5G50_2_MASK 0x03800000
+#define PHY_ANALOG_SYNTH14_PWDB_ICLOBUF5G50_2_GET(x) (((x) & 0x03800000) >> 23)
+#define PHY_ANALOG_SYNTH14_PWDB_ICLOBUF5G50_2_SET(x) (((x) << 23) & 0x03800000)
+#define PHY_ANALOG_SYNTH14_PWDB_ICLOBUF2G50_2_MSB 28
+#define PHY_ANALOG_SYNTH14_PWDB_ICLOBUF2G50_2_LSB 26
+#define PHY_ANALOG_SYNTH14_PWDB_ICLOBUF2G50_2_MASK 0x1c000000
+#define PHY_ANALOG_SYNTH14_PWDB_ICLOBUF2G50_2_GET(x) (((x) & 0x1c000000) >> 26)
+#define PHY_ANALOG_SYNTH14_PWDB_ICLOBUF2G50_2_SET(x) (((x) << 26) & 0x1c000000)
+#define PHY_ANALOG_SYNTH14_PWDB_ICLVLSHFT_MSB 31
+#define PHY_ANALOG_SYNTH14_PWDB_ICLVLSHFT_LSB 29
+#define PHY_ANALOG_SYNTH14_PWDB_ICLVLSHFT_MASK 0xe0000000
+#define PHY_ANALOG_SYNTH14_PWDB_ICLVLSHFT_GET(x) (((x) & 0xe0000000) >> 29)
+#define PHY_ANALOG_SYNTH14_PWDB_ICLVLSHFT_SET(x) (((x) << 29) & 0xe0000000)
+
+/* macros for BIAS1 */
+#define PHY_ANALOG_BIAS1_ADDRESS 0x000000c0
+#define PHY_ANALOG_BIAS1_OFFSET 0x000000c0
+#define PHY_ANALOG_BIAS1_SPARE1_MSB 6
+#define PHY_ANALOG_BIAS1_SPARE1_LSB 0
+#define PHY_ANALOG_BIAS1_SPARE1_MASK 0x0000007f
+#define PHY_ANALOG_BIAS1_SPARE1_GET(x) (((x) & 0x0000007f) >> 0)
+#define PHY_ANALOG_BIAS1_SPARE1_SET(x) (((x) << 0) & 0x0000007f)
+#define PHY_ANALOG_BIAS1_PWD_IC25V2IQ_MSB 9
+#define PHY_ANALOG_BIAS1_PWD_IC25V2IQ_LSB 7
+#define PHY_ANALOG_BIAS1_PWD_IC25V2IQ_MASK 0x00000380
+#define PHY_ANALOG_BIAS1_PWD_IC25V2IQ_GET(x) (((x) & 0x00000380) >> 7)
+#define PHY_ANALOG_BIAS1_PWD_IC25V2IQ_SET(x) (((x) << 7) & 0x00000380)
+#define PHY_ANALOG_BIAS1_PWD_IC25V2II_MSB 12
+#define PHY_ANALOG_BIAS1_PWD_IC25V2II_LSB 10
+#define PHY_ANALOG_BIAS1_PWD_IC25V2II_MASK 0x00001c00
+#define PHY_ANALOG_BIAS1_PWD_IC25V2II_GET(x) (((x) & 0x00001c00) >> 10)
+#define PHY_ANALOG_BIAS1_PWD_IC25V2II_SET(x) (((x) << 10) & 0x00001c00)
+#define PHY_ANALOG_BIAS1_PWD_IC25BB_MSB 15
+#define PHY_ANALOG_BIAS1_PWD_IC25BB_LSB 13
+#define PHY_ANALOG_BIAS1_PWD_IC25BB_MASK 0x0000e000
+#define PHY_ANALOG_BIAS1_PWD_IC25BB_GET(x) (((x) & 0x0000e000) >> 13)
+#define PHY_ANALOG_BIAS1_PWD_IC25BB_SET(x) (((x) << 13) & 0x0000e000)
+#define PHY_ANALOG_BIAS1_PWD_IC25DAC_MSB 18
+#define PHY_ANALOG_BIAS1_PWD_IC25DAC_LSB 16
+#define PHY_ANALOG_BIAS1_PWD_IC25DAC_MASK 0x00070000
+#define PHY_ANALOG_BIAS1_PWD_IC25DAC_GET(x) (((x) & 0x00070000) >> 16)
+#define PHY_ANALOG_BIAS1_PWD_IC25DAC_SET(x) (((x) << 16) & 0x00070000)
+#define PHY_ANALOG_BIAS1_PWD_IC25FIR_MSB 21
+#define PHY_ANALOG_BIAS1_PWD_IC25FIR_LSB 19
+#define PHY_ANALOG_BIAS1_PWD_IC25FIR_MASK 0x00380000
+#define PHY_ANALOG_BIAS1_PWD_IC25FIR_GET(x) (((x) & 0x00380000) >> 19)
+#define PHY_ANALOG_BIAS1_PWD_IC25FIR_SET(x) (((x) << 19) & 0x00380000)
+#define PHY_ANALOG_BIAS1_PWD_IC25ADC_MSB 24
+#define PHY_ANALOG_BIAS1_PWD_IC25ADC_LSB 22
+#define PHY_ANALOG_BIAS1_PWD_IC25ADC_MASK 0x01c00000
+#define PHY_ANALOG_BIAS1_PWD_IC25ADC_GET(x) (((x) & 0x01c00000) >> 22)
+#define PHY_ANALOG_BIAS1_PWD_IC25ADC_SET(x) (((x) << 22) & 0x01c00000)
+#define PHY_ANALOG_BIAS1_BIAS_SEL_MSB 31
+#define PHY_ANALOG_BIAS1_BIAS_SEL_LSB 25
+#define PHY_ANALOG_BIAS1_BIAS_SEL_MASK 0xfe000000
+#define PHY_ANALOG_BIAS1_BIAS_SEL_GET(x) (((x) & 0xfe000000) >> 25)
+#define PHY_ANALOG_BIAS1_BIAS_SEL_SET(x) (((x) << 25) & 0xfe000000)
+
+/* macros for BIAS2 */
+#define PHY_ANALOG_BIAS2_ADDRESS 0x000000c4
+#define PHY_ANALOG_BIAS2_OFFSET 0x000000c4
+#define PHY_ANALOG_BIAS2_SPARE2_MSB 4
+#define PHY_ANALOG_BIAS2_SPARE2_LSB 0
+#define PHY_ANALOG_BIAS2_SPARE2_MASK 0x0000001f
+#define PHY_ANALOG_BIAS2_SPARE2_GET(x) (((x) & 0x0000001f) >> 0)
+#define PHY_ANALOG_BIAS2_SPARE2_SET(x) (((x) << 0) & 0x0000001f)
+#define PHY_ANALOG_BIAS2_PWD_IC25XPA_MSB 7
+#define PHY_ANALOG_BIAS2_PWD_IC25XPA_LSB 5
+#define PHY_ANALOG_BIAS2_PWD_IC25XPA_MASK 0x000000e0
+#define PHY_ANALOG_BIAS2_PWD_IC25XPA_GET(x) (((x) & 0x000000e0) >> 5)
+#define PHY_ANALOG_BIAS2_PWD_IC25XPA_SET(x) (((x) << 5) & 0x000000e0)
+#define PHY_ANALOG_BIAS2_PWD_IC25XTAL_MSB 10
+#define PHY_ANALOG_BIAS2_PWD_IC25XTAL_LSB 8
+#define PHY_ANALOG_BIAS2_PWD_IC25XTAL_MASK 0x00000700
+#define PHY_ANALOG_BIAS2_PWD_IC25XTAL_GET(x) (((x) & 0x00000700) >> 8)
+#define PHY_ANALOG_BIAS2_PWD_IC25XTAL_SET(x) (((x) << 8) & 0x00000700)
+#define PHY_ANALOG_BIAS2_PWD_IC25TXRF_MSB 13
+#define PHY_ANALOG_BIAS2_PWD_IC25TXRF_LSB 11
+#define PHY_ANALOG_BIAS2_PWD_IC25TXRF_MASK 0x00003800
+#define PHY_ANALOG_BIAS2_PWD_IC25TXRF_GET(x) (((x) & 0x00003800) >> 11)
+#define PHY_ANALOG_BIAS2_PWD_IC25TXRF_SET(x) (((x) << 11) & 0x00003800)
+#define PHY_ANALOG_BIAS2_PWD_IC25RXRF_MSB 16
+#define PHY_ANALOG_BIAS2_PWD_IC25RXRF_LSB 14
+#define PHY_ANALOG_BIAS2_PWD_IC25RXRF_MASK 0x0001c000
+#define PHY_ANALOG_BIAS2_PWD_IC25RXRF_GET(x) (((x) & 0x0001c000) >> 14)
+#define PHY_ANALOG_BIAS2_PWD_IC25RXRF_SET(x) (((x) << 14) & 0x0001c000)
+#define PHY_ANALOG_BIAS2_PWD_IC25SYNTH_MSB 19
+#define PHY_ANALOG_BIAS2_PWD_IC25SYNTH_LSB 17
+#define PHY_ANALOG_BIAS2_PWD_IC25SYNTH_MASK 0x000e0000
+#define PHY_ANALOG_BIAS2_PWD_IC25SYNTH_GET(x) (((x) & 0x000e0000) >> 17)
+#define PHY_ANALOG_BIAS2_PWD_IC25SYNTH_SET(x) (((x) << 17) & 0x000e0000)
+#define PHY_ANALOG_BIAS2_PWD_IC25PLLREG_MSB 22
+#define PHY_ANALOG_BIAS2_PWD_IC25PLLREG_LSB 20
+#define PHY_ANALOG_BIAS2_PWD_IC25PLLREG_MASK 0x00700000
+#define PHY_ANALOG_BIAS2_PWD_IC25PLLREG_GET(x) (((x) & 0x00700000) >> 20)
+#define PHY_ANALOG_BIAS2_PWD_IC25PLLREG_SET(x) (((x) << 20) & 0x00700000)
+#define PHY_ANALOG_BIAS2_PWD_IC25PLLCP2_MSB 25
+#define PHY_ANALOG_BIAS2_PWD_IC25PLLCP2_LSB 23
+#define PHY_ANALOG_BIAS2_PWD_IC25PLLCP2_MASK 0x03800000
+#define PHY_ANALOG_BIAS2_PWD_IC25PLLCP2_GET(x) (((x) & 0x03800000) >> 23)
+#define PHY_ANALOG_BIAS2_PWD_IC25PLLCP2_SET(x) (((x) << 23) & 0x03800000)
+#define PHY_ANALOG_BIAS2_PWD_IC25PLLCP_MSB 28
+#define PHY_ANALOG_BIAS2_PWD_IC25PLLCP_LSB 26
+#define PHY_ANALOG_BIAS2_PWD_IC25PLLCP_MASK 0x1c000000
+#define PHY_ANALOG_BIAS2_PWD_IC25PLLCP_GET(x) (((x) & 0x1c000000) >> 26)
+#define PHY_ANALOG_BIAS2_PWD_IC25PLLCP_SET(x) (((x) << 26) & 0x1c000000)
+#define PHY_ANALOG_BIAS2_PWD_IC25PLLGM_MSB 31
+#define PHY_ANALOG_BIAS2_PWD_IC25PLLGM_LSB 29
+#define PHY_ANALOG_BIAS2_PWD_IC25PLLGM_MASK 0xe0000000
+#define PHY_ANALOG_BIAS2_PWD_IC25PLLGM_GET(x) (((x) & 0xe0000000) >> 29)
+#define PHY_ANALOG_BIAS2_PWD_IC25PLLGM_SET(x) (((x) << 29) & 0xe0000000)
+
+/* macros for BIAS3 */
+#define PHY_ANALOG_BIAS3_ADDRESS 0x000000c8
+#define PHY_ANALOG_BIAS3_OFFSET 0x000000c8
+#define PHY_ANALOG_BIAS3_SPARE3_MSB 1
+#define PHY_ANALOG_BIAS3_SPARE3_LSB 0
+#define PHY_ANALOG_BIAS3_SPARE3_MASK 0x00000003
+#define PHY_ANALOG_BIAS3_SPARE3_GET(x) (((x) & 0x00000003) >> 0)
+#define PHY_ANALOG_BIAS3_SPARE3_SET(x) (((x) << 0) & 0x00000003)
+#define PHY_ANALOG_BIAS3_PWD_IR25SAR_MSB 4
+#define PHY_ANALOG_BIAS3_PWD_IR25SAR_LSB 2
+#define PHY_ANALOG_BIAS3_PWD_IR25SAR_MASK 0x0000001c
+#define PHY_ANALOG_BIAS3_PWD_IR25SAR_GET(x) (((x) & 0x0000001c) >> 2)
+#define PHY_ANALOG_BIAS3_PWD_IR25SAR_SET(x) (((x) << 2) & 0x0000001c)
+#define PHY_ANALOG_BIAS3_PWD_IR25TXRF_MSB 7
+#define PHY_ANALOG_BIAS3_PWD_IR25TXRF_LSB 5
+#define PHY_ANALOG_BIAS3_PWD_IR25TXRF_MASK 0x000000e0
+#define PHY_ANALOG_BIAS3_PWD_IR25TXRF_GET(x) (((x) & 0x000000e0) >> 5)
+#define PHY_ANALOG_BIAS3_PWD_IR25TXRF_SET(x) (((x) << 5) & 0x000000e0)
+#define PHY_ANALOG_BIAS3_PWD_IR25RXRF_MSB 10
+#define PHY_ANALOG_BIAS3_PWD_IR25RXRF_LSB 8
+#define PHY_ANALOG_BIAS3_PWD_IR25RXRF_MASK 0x00000700
+#define PHY_ANALOG_BIAS3_PWD_IR25RXRF_GET(x) (((x) & 0x00000700) >> 8)
+#define PHY_ANALOG_BIAS3_PWD_IR25RXRF_SET(x) (((x) << 8) & 0x00000700)
+#define PHY_ANALOG_BIAS3_PWD_IR25SYNTH_MSB 13
+#define PHY_ANALOG_BIAS3_PWD_IR25SYNTH_LSB 11
+#define PHY_ANALOG_BIAS3_PWD_IR25SYNTH_MASK 0x00003800
+#define PHY_ANALOG_BIAS3_PWD_IR25SYNTH_GET(x) (((x) & 0x00003800) >> 11)
+#define PHY_ANALOG_BIAS3_PWD_IR25SYNTH_SET(x) (((x) << 11) & 0x00003800)
+#define PHY_ANALOG_BIAS3_PWD_IR25PLLREG_MSB 16
+#define PHY_ANALOG_BIAS3_PWD_IR25PLLREG_LSB 14
+#define PHY_ANALOG_BIAS3_PWD_IR25PLLREG_MASK 0x0001c000
+#define PHY_ANALOG_BIAS3_PWD_IR25PLLREG_GET(x) (((x) & 0x0001c000) >> 14)
+#define PHY_ANALOG_BIAS3_PWD_IR25PLLREG_SET(x) (((x) << 14) & 0x0001c000)
+#define PHY_ANALOG_BIAS3_PWD_IR25BB_MSB 19
+#define PHY_ANALOG_BIAS3_PWD_IR25BB_LSB 17
+#define PHY_ANALOG_BIAS3_PWD_IR25BB_MASK 0x000e0000
+#define PHY_ANALOG_BIAS3_PWD_IR25BB_GET(x) (((x) & 0x000e0000) >> 17)
+#define PHY_ANALOG_BIAS3_PWD_IR25BB_SET(x) (((x) << 17) & 0x000e0000)
+#define PHY_ANALOG_BIAS3_PWD_IR50DAC_MSB 22
+#define PHY_ANALOG_BIAS3_PWD_IR50DAC_LSB 20
+#define PHY_ANALOG_BIAS3_PWD_IR50DAC_MASK 0x00700000
+#define PHY_ANALOG_BIAS3_PWD_IR50DAC_GET(x) (((x) & 0x00700000) >> 20)
+#define PHY_ANALOG_BIAS3_PWD_IR50DAC_SET(x) (((x) << 20) & 0x00700000)
+#define PHY_ANALOG_BIAS3_PWD_IR25DAC_MSB 25
+#define PHY_ANALOG_BIAS3_PWD_IR25DAC_LSB 23
+#define PHY_ANALOG_BIAS3_PWD_IR25DAC_MASK 0x03800000
+#define PHY_ANALOG_BIAS3_PWD_IR25DAC_GET(x) (((x) & 0x03800000) >> 23)
+#define PHY_ANALOG_BIAS3_PWD_IR25DAC_SET(x) (((x) << 23) & 0x03800000)
+#define PHY_ANALOG_BIAS3_PWD_IR25FIR_MSB 28
+#define PHY_ANALOG_BIAS3_PWD_IR25FIR_LSB 26
+#define PHY_ANALOG_BIAS3_PWD_IR25FIR_MASK 0x1c000000
+#define PHY_ANALOG_BIAS3_PWD_IR25FIR_GET(x) (((x) & 0x1c000000) >> 26)
+#define PHY_ANALOG_BIAS3_PWD_IR25FIR_SET(x) (((x) << 26) & 0x1c000000)
+#define PHY_ANALOG_BIAS3_PWD_IR50ADC_MSB 31
+#define PHY_ANALOG_BIAS3_PWD_IR50ADC_LSB 29
+#define PHY_ANALOG_BIAS3_PWD_IR50ADC_MASK 0xe0000000
+#define PHY_ANALOG_BIAS3_PWD_IR50ADC_GET(x) (((x) & 0xe0000000) >> 29)
+#define PHY_ANALOG_BIAS3_PWD_IR50ADC_SET(x) (((x) << 29) & 0xe0000000)
+
+/* macros for BIAS4 */
+#define PHY_ANALOG_BIAS4_ADDRESS 0x000000cc
+#define PHY_ANALOG_BIAS4_OFFSET 0x000000cc
+#define PHY_ANALOG_BIAS4_SPARE4_MSB 10
+#define PHY_ANALOG_BIAS4_SPARE4_LSB 0
+#define PHY_ANALOG_BIAS4_SPARE4_MASK 0x000007ff
+#define PHY_ANALOG_BIAS4_SPARE4_GET(x) (((x) & 0x000007ff) >> 0)
+#define PHY_ANALOG_BIAS4_SPARE4_SET(x) (((x) << 0) & 0x000007ff)
+#define PHY_ANALOG_BIAS4_PWD_IR25SPARED_MSB 13
+#define PHY_ANALOG_BIAS4_PWD_IR25SPARED_LSB 11
+#define PHY_ANALOG_BIAS4_PWD_IR25SPARED_MASK 0x00003800
+#define PHY_ANALOG_BIAS4_PWD_IR25SPARED_GET(x) (((x) & 0x00003800) >> 11)
+#define PHY_ANALOG_BIAS4_PWD_IR25SPARED_SET(x) (((x) << 11) & 0x00003800)
+#define PHY_ANALOG_BIAS4_PWD_IR25SPAREC_MSB 16
+#define PHY_ANALOG_BIAS4_PWD_IR25SPAREC_LSB 14
+#define PHY_ANALOG_BIAS4_PWD_IR25SPAREC_MASK 0x0001c000
+#define PHY_ANALOG_BIAS4_PWD_IR25SPAREC_GET(x) (((x) & 0x0001c000) >> 14)
+#define PHY_ANALOG_BIAS4_PWD_IR25SPAREC_SET(x) (((x) << 14) & 0x0001c000)
+#define PHY_ANALOG_BIAS4_PWD_IR25SPAREB_MSB 19
+#define PHY_ANALOG_BIAS4_PWD_IR25SPAREB_LSB 17
+#define PHY_ANALOG_BIAS4_PWD_IR25SPAREB_MASK 0x000e0000
+#define PHY_ANALOG_BIAS4_PWD_IR25SPAREB_GET(x) (((x) & 0x000e0000) >> 17)
+#define PHY_ANALOG_BIAS4_PWD_IR25SPAREB_SET(x) (((x) << 17) & 0x000e0000)
+#define PHY_ANALOG_BIAS4_PWD_IR25XPA_MSB 22
+#define PHY_ANALOG_BIAS4_PWD_IR25XPA_LSB 20
+#define PHY_ANALOG_BIAS4_PWD_IR25XPA_MASK 0x00700000
+#define PHY_ANALOG_BIAS4_PWD_IR25XPA_GET(x) (((x) & 0x00700000) >> 20)
+#define PHY_ANALOG_BIAS4_PWD_IR25XPA_SET(x) (((x) << 20) & 0x00700000)
+#define PHY_ANALOG_BIAS4_PWD_IC25SPAREC_MSB 25
+#define PHY_ANALOG_BIAS4_PWD_IC25SPAREC_LSB 23
+#define PHY_ANALOG_BIAS4_PWD_IC25SPAREC_MASK 0x03800000
+#define PHY_ANALOG_BIAS4_PWD_IC25SPAREC_GET(x) (((x) & 0x03800000) >> 23)
+#define PHY_ANALOG_BIAS4_PWD_IC25SPAREC_SET(x) (((x) << 23) & 0x03800000)
+#define PHY_ANALOG_BIAS4_PWD_IC25SPAREB_MSB 28
+#define PHY_ANALOG_BIAS4_PWD_IC25SPAREB_LSB 26
+#define PHY_ANALOG_BIAS4_PWD_IC25SPAREB_MASK 0x1c000000
+#define PHY_ANALOG_BIAS4_PWD_IC25SPAREB_GET(x) (((x) & 0x1c000000) >> 26)
+#define PHY_ANALOG_BIAS4_PWD_IC25SPAREB_SET(x) (((x) << 26) & 0x1c000000)
+#define PHY_ANALOG_BIAS4_PWD_IC25SPAREA_MSB 31
+#define PHY_ANALOG_BIAS4_PWD_IC25SPAREA_LSB 29
+#define PHY_ANALOG_BIAS4_PWD_IC25SPAREA_MASK 0xe0000000
+#define PHY_ANALOG_BIAS4_PWD_IC25SPAREA_GET(x) (((x) & 0xe0000000) >> 29)
+#define PHY_ANALOG_BIAS4_PWD_IC25SPAREA_SET(x) (((x) << 29) & 0xe0000000)
+
+/* macros for RXTX1 */
+#define PHY_ANALOG_RXTX1_ADDRESS 0x00000100
+#define PHY_ANALOG_RXTX1_OFFSET 0x00000100
+#define PHY_ANALOG_RXTX1_SCFIR_GAIN_MSB 0
+#define PHY_ANALOG_RXTX1_SCFIR_GAIN_LSB 0
+#define PHY_ANALOG_RXTX1_SCFIR_GAIN_MASK 0x00000001
+#define PHY_ANALOG_RXTX1_SCFIR_GAIN_GET(x) (((x) & 0x00000001) >> 0)
+#define PHY_ANALOG_RXTX1_SCFIR_GAIN_SET(x) (((x) << 0) & 0x00000001)
+#define PHY_ANALOG_RXTX1_MANRXGAIN_MSB 1
+#define PHY_ANALOG_RXTX1_MANRXGAIN_LSB 1
+#define PHY_ANALOG_RXTX1_MANRXGAIN_MASK 0x00000002
+#define PHY_ANALOG_RXTX1_MANRXGAIN_GET(x) (((x) & 0x00000002) >> 1)
+#define PHY_ANALOG_RXTX1_MANRXGAIN_SET(x) (((x) << 1) & 0x00000002)
+#define PHY_ANALOG_RXTX1_AGC_DBDAC_MSB 5
+#define PHY_ANALOG_RXTX1_AGC_DBDAC_LSB 2
+#define PHY_ANALOG_RXTX1_AGC_DBDAC_MASK 0x0000003c
+#define PHY_ANALOG_RXTX1_AGC_DBDAC_GET(x) (((x) & 0x0000003c) >> 2)
+#define PHY_ANALOG_RXTX1_AGC_DBDAC_SET(x) (((x) << 2) & 0x0000003c)
+#define PHY_ANALOG_RXTX1_OVR_AGC_DBDAC_MSB 6
+#define PHY_ANALOG_RXTX1_OVR_AGC_DBDAC_LSB 6
+#define PHY_ANALOG_RXTX1_OVR_AGC_DBDAC_MASK 0x00000040
+#define PHY_ANALOG_RXTX1_OVR_AGC_DBDAC_GET(x) (((x) & 0x00000040) >> 6)
+#define PHY_ANALOG_RXTX1_OVR_AGC_DBDAC_SET(x) (((x) << 6) & 0x00000040)
+#define PHY_ANALOG_RXTX1_ENABLE_PAL_MSB 7
+#define PHY_ANALOG_RXTX1_ENABLE_PAL_LSB 7
+#define PHY_ANALOG_RXTX1_ENABLE_PAL_MASK 0x00000080
+#define PHY_ANALOG_RXTX1_ENABLE_PAL_GET(x) (((x) & 0x00000080) >> 7)
+#define PHY_ANALOG_RXTX1_ENABLE_PAL_SET(x) (((x) << 7) & 0x00000080)
+#define PHY_ANALOG_RXTX1_ENABLE_PAL_OVR_MSB 8
+#define PHY_ANALOG_RXTX1_ENABLE_PAL_OVR_LSB 8
+#define PHY_ANALOG_RXTX1_ENABLE_PAL_OVR_MASK 0x00000100
+#define PHY_ANALOG_RXTX1_ENABLE_PAL_OVR_GET(x) (((x) & 0x00000100) >> 8)
+#define PHY_ANALOG_RXTX1_ENABLE_PAL_OVR_SET(x) (((x) << 8) & 0x00000100)
+#define PHY_ANALOG_RXTX1_TX1DB_BIQUAD_MSB 11
+#define PHY_ANALOG_RXTX1_TX1DB_BIQUAD_LSB 9
+#define PHY_ANALOG_RXTX1_TX1DB_BIQUAD_MASK 0x00000e00
+#define PHY_ANALOG_RXTX1_TX1DB_BIQUAD_GET(x) (((x) & 0x00000e00) >> 9)
+#define PHY_ANALOG_RXTX1_TX1DB_BIQUAD_SET(x) (((x) << 9) & 0x00000e00)
+#define PHY_ANALOG_RXTX1_TX6DB_BIQUAD_MSB 13
+#define PHY_ANALOG_RXTX1_TX6DB_BIQUAD_LSB 12
+#define PHY_ANALOG_RXTX1_TX6DB_BIQUAD_MASK 0x00003000
+#define PHY_ANALOG_RXTX1_TX6DB_BIQUAD_GET(x) (((x) & 0x00003000) >> 12)
+#define PHY_ANALOG_RXTX1_TX6DB_BIQUAD_SET(x) (((x) << 12) & 0x00003000)
+#define PHY_ANALOG_RXTX1_PADRVHALFGN2G_MSB 14
+#define PHY_ANALOG_RXTX1_PADRVHALFGN2G_LSB 14
+#define PHY_ANALOG_RXTX1_PADRVHALFGN2G_MASK 0x00004000
+#define PHY_ANALOG_RXTX1_PADRVHALFGN2G_GET(x) (((x) & 0x00004000) >> 14)
+#define PHY_ANALOG_RXTX1_PADRVHALFGN2G_SET(x) (((x) << 14) & 0x00004000)
+#define PHY_ANALOG_RXTX1_PADRV2GN_MSB 18
+#define PHY_ANALOG_RXTX1_PADRV2GN_LSB 15
+#define PHY_ANALOG_RXTX1_PADRV2GN_MASK 0x00078000
+#define PHY_ANALOG_RXTX1_PADRV2GN_GET(x) (((x) & 0x00078000) >> 15)
+#define PHY_ANALOG_RXTX1_PADRV2GN_SET(x) (((x) << 15) & 0x00078000)
+#define PHY_ANALOG_RXTX1_PADRV3GN5G_MSB 22
+#define PHY_ANALOG_RXTX1_PADRV3GN5G_LSB 19
+#define PHY_ANALOG_RXTX1_PADRV3GN5G_MASK 0x00780000
+#define PHY_ANALOG_RXTX1_PADRV3GN5G_GET(x) (((x) & 0x00780000) >> 19)
+#define PHY_ANALOG_RXTX1_PADRV3GN5G_SET(x) (((x) << 19) & 0x00780000)
+#define PHY_ANALOG_RXTX1_PADRV4GN5G_MSB 26
+#define PHY_ANALOG_RXTX1_PADRV4GN5G_LSB 23
+#define PHY_ANALOG_RXTX1_PADRV4GN5G_MASK 0x07800000
+#define PHY_ANALOG_RXTX1_PADRV4GN5G_GET(x) (((x) & 0x07800000) >> 23)
+#define PHY_ANALOG_RXTX1_PADRV4GN5G_SET(x) (((x) << 23) & 0x07800000)
+#define PHY_ANALOG_RXTX1_TXBB_GC_MSB 30
+#define PHY_ANALOG_RXTX1_TXBB_GC_LSB 27
+#define PHY_ANALOG_RXTX1_TXBB_GC_MASK 0x78000000
+#define PHY_ANALOG_RXTX1_TXBB_GC_GET(x) (((x) & 0x78000000) >> 27)
+#define PHY_ANALOG_RXTX1_TXBB_GC_SET(x) (((x) << 27) & 0x78000000)
+#define PHY_ANALOG_RXTX1_MANTXGAIN_MSB 31
+#define PHY_ANALOG_RXTX1_MANTXGAIN_LSB 31
+#define PHY_ANALOG_RXTX1_MANTXGAIN_MASK 0x80000000
+#define PHY_ANALOG_RXTX1_MANTXGAIN_GET(x) (((x) & 0x80000000) >> 31)
+#define PHY_ANALOG_RXTX1_MANTXGAIN_SET(x) (((x) << 31) & 0x80000000)
+
+/* macros for RXTX2 */
+#define PHY_ANALOG_RXTX2_ADDRESS 0x00000104
+#define PHY_ANALOG_RXTX2_OFFSET 0x00000104
+#define PHY_ANALOG_RXTX2_BMODE_MSB 0
+#define PHY_ANALOG_RXTX2_BMODE_LSB 0
+#define PHY_ANALOG_RXTX2_BMODE_MASK 0x00000001
+#define PHY_ANALOG_RXTX2_BMODE_GET(x) (((x) & 0x00000001) >> 0)
+#define PHY_ANALOG_RXTX2_BMODE_SET(x) (((x) << 0) & 0x00000001)
+#define PHY_ANALOG_RXTX2_BMODE_OVR_MSB 1
+#define PHY_ANALOG_RXTX2_BMODE_OVR_LSB 1
+#define PHY_ANALOG_RXTX2_BMODE_OVR_MASK 0x00000002
+#define PHY_ANALOG_RXTX2_BMODE_OVR_GET(x) (((x) & 0x00000002) >> 1)
+#define PHY_ANALOG_RXTX2_BMODE_OVR_SET(x) (((x) << 1) & 0x00000002)
+#define PHY_ANALOG_RXTX2_SYNTHON_MSB 2
+#define PHY_ANALOG_RXTX2_SYNTHON_LSB 2
+#define PHY_ANALOG_RXTX2_SYNTHON_MASK 0x00000004
+#define PHY_ANALOG_RXTX2_SYNTHON_GET(x) (((x) & 0x00000004) >> 2)
+#define PHY_ANALOG_RXTX2_SYNTHON_SET(x) (((x) << 2) & 0x00000004)
+#define PHY_ANALOG_RXTX2_SYNTHON_OVR_MSB 3
+#define PHY_ANALOG_RXTX2_SYNTHON_OVR_LSB 3
+#define PHY_ANALOG_RXTX2_SYNTHON_OVR_MASK 0x00000008
+#define PHY_ANALOG_RXTX2_SYNTHON_OVR_GET(x) (((x) & 0x00000008) >> 3)
+#define PHY_ANALOG_RXTX2_SYNTHON_OVR_SET(x) (((x) << 3) & 0x00000008)
+#define PHY_ANALOG_RXTX2_BW_ST_MSB 5
+#define PHY_ANALOG_RXTX2_BW_ST_LSB 4
+#define PHY_ANALOG_RXTX2_BW_ST_MASK 0x00000030
+#define PHY_ANALOG_RXTX2_BW_ST_GET(x) (((x) & 0x00000030) >> 4)
+#define PHY_ANALOG_RXTX2_BW_ST_SET(x) (((x) << 4) & 0x00000030)
+#define PHY_ANALOG_RXTX2_BW_ST_OVR_MSB 6
+#define PHY_ANALOG_RXTX2_BW_ST_OVR_LSB 6
+#define PHY_ANALOG_RXTX2_BW_ST_OVR_MASK 0x00000040
+#define PHY_ANALOG_RXTX2_BW_ST_OVR_GET(x) (((x) & 0x00000040) >> 6)
+#define PHY_ANALOG_RXTX2_BW_ST_OVR_SET(x) (((x) << 6) & 0x00000040)
+#define PHY_ANALOG_RXTX2_TXON_MSB 7
+#define PHY_ANALOG_RXTX2_TXON_LSB 7
+#define PHY_ANALOG_RXTX2_TXON_MASK 0x00000080
+#define PHY_ANALOG_RXTX2_TXON_GET(x) (((x) & 0x00000080) >> 7)
+#define PHY_ANALOG_RXTX2_TXON_SET(x) (((x) << 7) & 0x00000080)
+#define PHY_ANALOG_RXTX2_TXON_OVR_MSB 8
+#define PHY_ANALOG_RXTX2_TXON_OVR_LSB 8
+#define PHY_ANALOG_RXTX2_TXON_OVR_MASK 0x00000100
+#define PHY_ANALOG_RXTX2_TXON_OVR_GET(x) (((x) & 0x00000100) >> 8)
+#define PHY_ANALOG_RXTX2_TXON_OVR_SET(x) (((x) << 8) & 0x00000100)
+#define PHY_ANALOG_RXTX2_PAON_MSB 9
+#define PHY_ANALOG_RXTX2_PAON_LSB 9
+#define PHY_ANALOG_RXTX2_PAON_MASK 0x00000200
+#define PHY_ANALOG_RXTX2_PAON_GET(x) (((x) & 0x00000200) >> 9)
+#define PHY_ANALOG_RXTX2_PAON_SET(x) (((x) << 9) & 0x00000200)
+#define PHY_ANALOG_RXTX2_PAON_OVR_MSB 10
+#define PHY_ANALOG_RXTX2_PAON_OVR_LSB 10
+#define PHY_ANALOG_RXTX2_PAON_OVR_MASK 0x00000400
+#define PHY_ANALOG_RXTX2_PAON_OVR_GET(x) (((x) & 0x00000400) >> 10)
+#define PHY_ANALOG_RXTX2_PAON_OVR_SET(x) (((x) << 10) & 0x00000400)
+#define PHY_ANALOG_RXTX2_RXON_MSB 11
+#define PHY_ANALOG_RXTX2_RXON_LSB 11
+#define PHY_ANALOG_RXTX2_RXON_MASK 0x00000800
+#define PHY_ANALOG_RXTX2_RXON_GET(x) (((x) & 0x00000800) >> 11)
+#define PHY_ANALOG_RXTX2_RXON_SET(x) (((x) << 11) & 0x00000800)
+#define PHY_ANALOG_RXTX2_RXON_OVR_MSB 12
+#define PHY_ANALOG_RXTX2_RXON_OVR_LSB 12
+#define PHY_ANALOG_RXTX2_RXON_OVR_MASK 0x00001000
+#define PHY_ANALOG_RXTX2_RXON_OVR_GET(x) (((x) & 0x00001000) >> 12)
+#define PHY_ANALOG_RXTX2_RXON_OVR_SET(x) (((x) << 12) & 0x00001000)
+#define PHY_ANALOG_RXTX2_AGCON_MSB 13
+#define PHY_ANALOG_RXTX2_AGCON_LSB 13
+#define PHY_ANALOG_RXTX2_AGCON_MASK 0x00002000
+#define PHY_ANALOG_RXTX2_AGCON_GET(x) (((x) & 0x00002000) >> 13)
+#define PHY_ANALOG_RXTX2_AGCON_SET(x) (((x) << 13) & 0x00002000)
+#define PHY_ANALOG_RXTX2_AGCON_OVR_MSB 14
+#define PHY_ANALOG_RXTX2_AGCON_OVR_LSB 14
+#define PHY_ANALOG_RXTX2_AGCON_OVR_MASK 0x00004000
+#define PHY_ANALOG_RXTX2_AGCON_OVR_GET(x) (((x) & 0x00004000) >> 14)
+#define PHY_ANALOG_RXTX2_AGCON_OVR_SET(x) (((x) << 14) & 0x00004000)
+#define PHY_ANALOG_RXTX2_TXMOD_MSB 17
+#define PHY_ANALOG_RXTX2_TXMOD_LSB 15
+#define PHY_ANALOG_RXTX2_TXMOD_MASK 0x00038000
+#define PHY_ANALOG_RXTX2_TXMOD_GET(x) (((x) & 0x00038000) >> 15)
+#define PHY_ANALOG_RXTX2_TXMOD_SET(x) (((x) << 15) & 0x00038000)
+#define PHY_ANALOG_RXTX2_TXMOD_OVR_MSB 18
+#define PHY_ANALOG_RXTX2_TXMOD_OVR_LSB 18
+#define PHY_ANALOG_RXTX2_TXMOD_OVR_MASK 0x00040000
+#define PHY_ANALOG_RXTX2_TXMOD_OVR_GET(x) (((x) & 0x00040000) >> 18)
+#define PHY_ANALOG_RXTX2_TXMOD_OVR_SET(x) (((x) << 18) & 0x00040000)
+#define PHY_ANALOG_RXTX2_RX1DB_BIQUAD_MSB 21
+#define PHY_ANALOG_RXTX2_RX1DB_BIQUAD_LSB 19
+#define PHY_ANALOG_RXTX2_RX1DB_BIQUAD_MASK 0x00380000
+#define PHY_ANALOG_RXTX2_RX1DB_BIQUAD_GET(x) (((x) & 0x00380000) >> 19)
+#define PHY_ANALOG_RXTX2_RX1DB_BIQUAD_SET(x) (((x) << 19) & 0x00380000)
+#define PHY_ANALOG_RXTX2_RX6DB_BIQUAD_MSB 23
+#define PHY_ANALOG_RXTX2_RX6DB_BIQUAD_LSB 22
+#define PHY_ANALOG_RXTX2_RX6DB_BIQUAD_MASK 0x00c00000
+#define PHY_ANALOG_RXTX2_RX6DB_BIQUAD_GET(x) (((x) & 0x00c00000) >> 22)
+#define PHY_ANALOG_RXTX2_RX6DB_BIQUAD_SET(x) (((x) << 22) & 0x00c00000)
+#define PHY_ANALOG_RXTX2_MXRGAIN_MSB 25
+#define PHY_ANALOG_RXTX2_MXRGAIN_LSB 24
+#define PHY_ANALOG_RXTX2_MXRGAIN_MASK 0x03000000
+#define PHY_ANALOG_RXTX2_MXRGAIN_GET(x) (((x) & 0x03000000) >> 24)
+#define PHY_ANALOG_RXTX2_MXRGAIN_SET(x) (((x) << 24) & 0x03000000)
+#define PHY_ANALOG_RXTX2_VGAGAIN_MSB 28
+#define PHY_ANALOG_RXTX2_VGAGAIN_LSB 26
+#define PHY_ANALOG_RXTX2_VGAGAIN_MASK 0x1c000000
+#define PHY_ANALOG_RXTX2_VGAGAIN_GET(x) (((x) & 0x1c000000) >> 26)
+#define PHY_ANALOG_RXTX2_VGAGAIN_SET(x) (((x) << 26) & 0x1c000000)
+#define PHY_ANALOG_RXTX2_LNAGAIN_MSB 31
+#define PHY_ANALOG_RXTX2_LNAGAIN_LSB 29
+#define PHY_ANALOG_RXTX2_LNAGAIN_MASK 0xe0000000
+#define PHY_ANALOG_RXTX2_LNAGAIN_GET(x) (((x) & 0xe0000000) >> 29)
+#define PHY_ANALOG_RXTX2_LNAGAIN_SET(x) (((x) << 29) & 0xe0000000)
+
+/* macros for RXTX3 */
+#define PHY_ANALOG_RXTX3_ADDRESS 0x00000108
+#define PHY_ANALOG_RXTX3_OFFSET 0x00000108
+#define PHY_ANALOG_RXTX3_SPARE3_MSB 2
+#define PHY_ANALOG_RXTX3_SPARE3_LSB 0
+#define PHY_ANALOG_RXTX3_SPARE3_MASK 0x00000007
+#define PHY_ANALOG_RXTX3_SPARE3_GET(x) (((x) & 0x00000007) >> 0)
+#define PHY_ANALOG_RXTX3_SPARE3_SET(x) (((x) << 0) & 0x00000007)
+#define PHY_ANALOG_RXTX3_SPURON_MSB 3
+#define PHY_ANALOG_RXTX3_SPURON_LSB 3
+#define PHY_ANALOG_RXTX3_SPURON_MASK 0x00000008
+#define PHY_ANALOG_RXTX3_SPURON_GET(x) (((x) & 0x00000008) >> 3)
+#define PHY_ANALOG_RXTX3_SPURON_SET(x) (((x) << 3) & 0x00000008)
+#define PHY_ANALOG_RXTX3_PAL_LOCKEDEN_MSB 4
+#define PHY_ANALOG_RXTX3_PAL_LOCKEDEN_LSB 4
+#define PHY_ANALOG_RXTX3_PAL_LOCKEDEN_MASK 0x00000010
+#define PHY_ANALOG_RXTX3_PAL_LOCKEDEN_GET(x) (((x) & 0x00000010) >> 4)
+#define PHY_ANALOG_RXTX3_PAL_LOCKEDEN_SET(x) (((x) << 4) & 0x00000010)
+#define PHY_ANALOG_RXTX3_DACFULLSCALE_MSB 5
+#define PHY_ANALOG_RXTX3_DACFULLSCALE_LSB 5
+#define PHY_ANALOG_RXTX3_DACFULLSCALE_MASK 0x00000020
+#define PHY_ANALOG_RXTX3_DACFULLSCALE_GET(x) (((x) & 0x00000020) >> 5)
+#define PHY_ANALOG_RXTX3_DACFULLSCALE_SET(x) (((x) << 5) & 0x00000020)
+#define PHY_ANALOG_RXTX3_ADCSHORT_MSB 6
+#define PHY_ANALOG_RXTX3_ADCSHORT_LSB 6
+#define PHY_ANALOG_RXTX3_ADCSHORT_MASK 0x00000040
+#define PHY_ANALOG_RXTX3_ADCSHORT_GET(x) (((x) & 0x00000040) >> 6)
+#define PHY_ANALOG_RXTX3_ADCSHORT_SET(x) (((x) << 6) & 0x00000040)
+#define PHY_ANALOG_RXTX3_DACPWD_MSB 7
+#define PHY_ANALOG_RXTX3_DACPWD_LSB 7
+#define PHY_ANALOG_RXTX3_DACPWD_MASK 0x00000080
+#define PHY_ANALOG_RXTX3_DACPWD_GET(x) (((x) & 0x00000080) >> 7)
+#define PHY_ANALOG_RXTX3_DACPWD_SET(x) (((x) << 7) & 0x00000080)
+#define PHY_ANALOG_RXTX3_DACPWD_OVR_MSB 8
+#define PHY_ANALOG_RXTX3_DACPWD_OVR_LSB 8
+#define PHY_ANALOG_RXTX3_DACPWD_OVR_MASK 0x00000100
+#define PHY_ANALOG_RXTX3_DACPWD_OVR_GET(x) (((x) & 0x00000100) >> 8)
+#define PHY_ANALOG_RXTX3_DACPWD_OVR_SET(x) (((x) << 8) & 0x00000100)
+#define PHY_ANALOG_RXTX3_ADCPWD_MSB 9
+#define PHY_ANALOG_RXTX3_ADCPWD_LSB 9
+#define PHY_ANALOG_RXTX3_ADCPWD_MASK 0x00000200
+#define PHY_ANALOG_RXTX3_ADCPWD_GET(x) (((x) & 0x00000200) >> 9)
+#define PHY_ANALOG_RXTX3_ADCPWD_SET(x) (((x) << 9) & 0x00000200)
+#define PHY_ANALOG_RXTX3_ADCPWD_OVR_MSB 10
+#define PHY_ANALOG_RXTX3_ADCPWD_OVR_LSB 10
+#define PHY_ANALOG_RXTX3_ADCPWD_OVR_MASK 0x00000400
+#define PHY_ANALOG_RXTX3_ADCPWD_OVR_GET(x) (((x) & 0x00000400) >> 10)
+#define PHY_ANALOG_RXTX3_ADCPWD_OVR_SET(x) (((x) << 10) & 0x00000400)
+#define PHY_ANALOG_RXTX3_AGC_CALDAC_MSB 16
+#define PHY_ANALOG_RXTX3_AGC_CALDAC_LSB 11
+#define PHY_ANALOG_RXTX3_AGC_CALDAC_MASK 0x0001f800
+#define PHY_ANALOG_RXTX3_AGC_CALDAC_GET(x) (((x) & 0x0001f800) >> 11)
+#define PHY_ANALOG_RXTX3_AGC_CALDAC_SET(x) (((x) << 11) & 0x0001f800)
+#define PHY_ANALOG_RXTX3_AGC_CAL_MSB 17
+#define PHY_ANALOG_RXTX3_AGC_CAL_LSB 17
+#define PHY_ANALOG_RXTX3_AGC_CAL_MASK 0x00020000
+#define PHY_ANALOG_RXTX3_AGC_CAL_GET(x) (((x) & 0x00020000) >> 17)
+#define PHY_ANALOG_RXTX3_AGC_CAL_SET(x) (((x) << 17) & 0x00020000)
+#define PHY_ANALOG_RXTX3_AGC_CAL_OVR_MSB 18
+#define PHY_ANALOG_RXTX3_AGC_CAL_OVR_LSB 18
+#define PHY_ANALOG_RXTX3_AGC_CAL_OVR_MASK 0x00040000
+#define PHY_ANALOG_RXTX3_AGC_CAL_OVR_GET(x) (((x) & 0x00040000) >> 18)
+#define PHY_ANALOG_RXTX3_AGC_CAL_OVR_SET(x) (((x) << 18) & 0x00040000)
+#define PHY_ANALOG_RXTX3_LOFORCEDON_MSB 19
+#define PHY_ANALOG_RXTX3_LOFORCEDON_LSB 19
+#define PHY_ANALOG_RXTX3_LOFORCEDON_MASK 0x00080000
+#define PHY_ANALOG_RXTX3_LOFORCEDON_GET(x) (((x) & 0x00080000) >> 19)
+#define PHY_ANALOG_RXTX3_LOFORCEDON_SET(x) (((x) << 19) & 0x00080000)
+#define PHY_ANALOG_RXTX3_CALRESIDUE_MSB 20
+#define PHY_ANALOG_RXTX3_CALRESIDUE_LSB 20
+#define PHY_ANALOG_RXTX3_CALRESIDUE_MASK 0x00100000
+#define PHY_ANALOG_RXTX3_CALRESIDUE_GET(x) (((x) & 0x00100000) >> 20)
+#define PHY_ANALOG_RXTX3_CALRESIDUE_SET(x) (((x) << 20) & 0x00100000)
+#define PHY_ANALOG_RXTX3_CALRESIDUE_OVR_MSB 21
+#define PHY_ANALOG_RXTX3_CALRESIDUE_OVR_LSB 21
+#define PHY_ANALOG_RXTX3_CALRESIDUE_OVR_MASK 0x00200000
+#define PHY_ANALOG_RXTX3_CALRESIDUE_OVR_GET(x) (((x) & 0x00200000) >> 21)
+#define PHY_ANALOG_RXTX3_CALRESIDUE_OVR_SET(x) (((x) << 21) & 0x00200000)
+#define PHY_ANALOG_RXTX3_CALFC_MSB 22
+#define PHY_ANALOG_RXTX3_CALFC_LSB 22
+#define PHY_ANALOG_RXTX3_CALFC_MASK 0x00400000
+#define PHY_ANALOG_RXTX3_CALFC_GET(x) (((x) & 0x00400000) >> 22)
+#define PHY_ANALOG_RXTX3_CALFC_SET(x) (((x) << 22) & 0x00400000)
+#define PHY_ANALOG_RXTX3_CALFC_OVR_MSB 23
+#define PHY_ANALOG_RXTX3_CALFC_OVR_LSB 23
+#define PHY_ANALOG_RXTX3_CALFC_OVR_MASK 0x00800000
+#define PHY_ANALOG_RXTX3_CALFC_OVR_GET(x) (((x) & 0x00800000) >> 23)
+#define PHY_ANALOG_RXTX3_CALFC_OVR_SET(x) (((x) << 23) & 0x00800000)
+#define PHY_ANALOG_RXTX3_CALTX_MSB 24
+#define PHY_ANALOG_RXTX3_CALTX_LSB 24
+#define PHY_ANALOG_RXTX3_CALTX_MASK 0x01000000
+#define PHY_ANALOG_RXTX3_CALTX_GET(x) (((x) & 0x01000000) >> 24)
+#define PHY_ANALOG_RXTX3_CALTX_SET(x) (((x) << 24) & 0x01000000)
+#define PHY_ANALOG_RXTX3_CALTX_OVR_MSB 25
+#define PHY_ANALOG_RXTX3_CALTX_OVR_LSB 25
+#define PHY_ANALOG_RXTX3_CALTX_OVR_MASK 0x02000000
+#define PHY_ANALOG_RXTX3_CALTX_OVR_GET(x) (((x) & 0x02000000) >> 25)
+#define PHY_ANALOG_RXTX3_CALTX_OVR_SET(x) (((x) << 25) & 0x02000000)
+#define PHY_ANALOG_RXTX3_CALTXSHIFT_MSB 26
+#define PHY_ANALOG_RXTX3_CALTXSHIFT_LSB 26
+#define PHY_ANALOG_RXTX3_CALTXSHIFT_MASK 0x04000000
+#define PHY_ANALOG_RXTX3_CALTXSHIFT_GET(x) (((x) & 0x04000000) >> 26)
+#define PHY_ANALOG_RXTX3_CALTXSHIFT_SET(x) (((x) << 26) & 0x04000000)
+#define PHY_ANALOG_RXTX3_CALTXSHIFT_OVR_MSB 27
+#define PHY_ANALOG_RXTX3_CALTXSHIFT_OVR_LSB 27
+#define PHY_ANALOG_RXTX3_CALTXSHIFT_OVR_MASK 0x08000000
+#define PHY_ANALOG_RXTX3_CALTXSHIFT_OVR_GET(x) (((x) & 0x08000000) >> 27)
+#define PHY_ANALOG_RXTX3_CALTXSHIFT_OVR_SET(x) (((x) << 27) & 0x08000000)
+#define PHY_ANALOG_RXTX3_CALPA_MSB 28
+#define PHY_ANALOG_RXTX3_CALPA_LSB 28
+#define PHY_ANALOG_RXTX3_CALPA_MASK 0x10000000
+#define PHY_ANALOG_RXTX3_CALPA_GET(x) (((x) & 0x10000000) >> 28)
+#define PHY_ANALOG_RXTX3_CALPA_SET(x) (((x) << 28) & 0x10000000)
+#define PHY_ANALOG_RXTX3_CALPA_OVR_MSB 29
+#define PHY_ANALOG_RXTX3_CALPA_OVR_LSB 29
+#define PHY_ANALOG_RXTX3_CALPA_OVR_MASK 0x20000000
+#define PHY_ANALOG_RXTX3_CALPA_OVR_GET(x) (((x) & 0x20000000) >> 29)
+#define PHY_ANALOG_RXTX3_CALPA_OVR_SET(x) (((x) << 29) & 0x20000000)
+#define PHY_ANALOG_RXTX3_TURBOADC_MSB 30
+#define PHY_ANALOG_RXTX3_TURBOADC_LSB 30
+#define PHY_ANALOG_RXTX3_TURBOADC_MASK 0x40000000
+#define PHY_ANALOG_RXTX3_TURBOADC_GET(x) (((x) & 0x40000000) >> 30)
+#define PHY_ANALOG_RXTX3_TURBOADC_SET(x) (((x) << 30) & 0x40000000)
+#define PHY_ANALOG_RXTX3_TURBOADC_OVR_MSB 31
+#define PHY_ANALOG_RXTX3_TURBOADC_OVR_LSB 31
+#define PHY_ANALOG_RXTX3_TURBOADC_OVR_MASK 0x80000000
+#define PHY_ANALOG_RXTX3_TURBOADC_OVR_GET(x) (((x) & 0x80000000) >> 31)
+#define PHY_ANALOG_RXTX3_TURBOADC_OVR_SET(x) (((x) << 31) & 0x80000000)
+
+/* macros for BB1 */
+#define PHY_ANALOG_BB1_ADDRESS 0x00000140
+#define PHY_ANALOG_BB1_OFFSET 0x00000140
+#define PHY_ANALOG_BB1_I2V_CURR2X_MSB 0
+#define PHY_ANALOG_BB1_I2V_CURR2X_LSB 0
+#define PHY_ANALOG_BB1_I2V_CURR2X_MASK 0x00000001
+#define PHY_ANALOG_BB1_I2V_CURR2X_GET(x) (((x) & 0x00000001) >> 0)
+#define PHY_ANALOG_BB1_I2V_CURR2X_SET(x) (((x) << 0) & 0x00000001)
+#define PHY_ANALOG_BB1_ENABLE_LOQ_MSB 1
+#define PHY_ANALOG_BB1_ENABLE_LOQ_LSB 1
+#define PHY_ANALOG_BB1_ENABLE_LOQ_MASK 0x00000002
+#define PHY_ANALOG_BB1_ENABLE_LOQ_GET(x) (((x) & 0x00000002) >> 1)
+#define PHY_ANALOG_BB1_ENABLE_LOQ_SET(x) (((x) << 1) & 0x00000002)
+#define PHY_ANALOG_BB1_FORCE_LOQ_MSB 2
+#define PHY_ANALOG_BB1_FORCE_LOQ_LSB 2
+#define PHY_ANALOG_BB1_FORCE_LOQ_MASK 0x00000004
+#define PHY_ANALOG_BB1_FORCE_LOQ_GET(x) (((x) & 0x00000004) >> 2)
+#define PHY_ANALOG_BB1_FORCE_LOQ_SET(x) (((x) << 2) & 0x00000004)
+#define PHY_ANALOG_BB1_ENABLE_NOTCH_MSB 3
+#define PHY_ANALOG_BB1_ENABLE_NOTCH_LSB 3
+#define PHY_ANALOG_BB1_ENABLE_NOTCH_MASK 0x00000008
+#define PHY_ANALOG_BB1_ENABLE_NOTCH_GET(x) (((x) & 0x00000008) >> 3)
+#define PHY_ANALOG_BB1_ENABLE_NOTCH_SET(x) (((x) << 3) & 0x00000008)
+#define PHY_ANALOG_BB1_FORCE_NOTCH_MSB 4
+#define PHY_ANALOG_BB1_FORCE_NOTCH_LSB 4
+#define PHY_ANALOG_BB1_FORCE_NOTCH_MASK 0x00000010
+#define PHY_ANALOG_BB1_FORCE_NOTCH_GET(x) (((x) & 0x00000010) >> 4)
+#define PHY_ANALOG_BB1_FORCE_NOTCH_SET(x) (((x) << 4) & 0x00000010)
+#define PHY_ANALOG_BB1_ENABLE_BIQUAD_MSB 5
+#define PHY_ANALOG_BB1_ENABLE_BIQUAD_LSB 5
+#define PHY_ANALOG_BB1_ENABLE_BIQUAD_MASK 0x00000020
+#define PHY_ANALOG_BB1_ENABLE_BIQUAD_GET(x) (((x) & 0x00000020) >> 5)
+#define PHY_ANALOG_BB1_ENABLE_BIQUAD_SET(x) (((x) << 5) & 0x00000020)
+#define PHY_ANALOG_BB1_FORCE_BIQUAD_MSB 6
+#define PHY_ANALOG_BB1_FORCE_BIQUAD_LSB 6
+#define PHY_ANALOG_BB1_FORCE_BIQUAD_MASK 0x00000040
+#define PHY_ANALOG_BB1_FORCE_BIQUAD_GET(x) (((x) & 0x00000040) >> 6)
+#define PHY_ANALOG_BB1_FORCE_BIQUAD_SET(x) (((x) << 6) & 0x00000040)
+#define PHY_ANALOG_BB1_ENABLE_OSDAC_MSB 7
+#define PHY_ANALOG_BB1_ENABLE_OSDAC_LSB 7
+#define PHY_ANALOG_BB1_ENABLE_OSDAC_MASK 0x00000080
+#define PHY_ANALOG_BB1_ENABLE_OSDAC_GET(x) (((x) & 0x00000080) >> 7)
+#define PHY_ANALOG_BB1_ENABLE_OSDAC_SET(x) (((x) << 7) & 0x00000080)
+#define PHY_ANALOG_BB1_FORCE_OSDAC_MSB 8
+#define PHY_ANALOG_BB1_FORCE_OSDAC_LSB 8
+#define PHY_ANALOG_BB1_FORCE_OSDAC_MASK 0x00000100
+#define PHY_ANALOG_BB1_FORCE_OSDAC_GET(x) (((x) & 0x00000100) >> 8)
+#define PHY_ANALOG_BB1_FORCE_OSDAC_SET(x) (((x) << 8) & 0x00000100)
+#define PHY_ANALOG_BB1_ENABLE_V2I_MSB 9
+#define PHY_ANALOG_BB1_ENABLE_V2I_LSB 9
+#define PHY_ANALOG_BB1_ENABLE_V2I_MASK 0x00000200
+#define PHY_ANALOG_BB1_ENABLE_V2I_GET(x) (((x) & 0x00000200) >> 9)
+#define PHY_ANALOG_BB1_ENABLE_V2I_SET(x) (((x) << 9) & 0x00000200)
+#define PHY_ANALOG_BB1_FORCE_V2I_MSB 10
+#define PHY_ANALOG_BB1_FORCE_V2I_LSB 10
+#define PHY_ANALOG_BB1_FORCE_V2I_MASK 0x00000400
+#define PHY_ANALOG_BB1_FORCE_V2I_GET(x) (((x) & 0x00000400) >> 10)
+#define PHY_ANALOG_BB1_FORCE_V2I_SET(x) (((x) << 10) & 0x00000400)
+#define PHY_ANALOG_BB1_ENABLE_I2V_MSB 11
+#define PHY_ANALOG_BB1_ENABLE_I2V_LSB 11
+#define PHY_ANALOG_BB1_ENABLE_I2V_MASK 0x00000800
+#define PHY_ANALOG_BB1_ENABLE_I2V_GET(x) (((x) & 0x00000800) >> 11)
+#define PHY_ANALOG_BB1_ENABLE_I2V_SET(x) (((x) << 11) & 0x00000800)
+#define PHY_ANALOG_BB1_FORCE_I2V_MSB 12
+#define PHY_ANALOG_BB1_FORCE_I2V_LSB 12
+#define PHY_ANALOG_BB1_FORCE_I2V_MASK 0x00001000
+#define PHY_ANALOG_BB1_FORCE_I2V_GET(x) (((x) & 0x00001000) >> 12)
+#define PHY_ANALOG_BB1_FORCE_I2V_SET(x) (((x) << 12) & 0x00001000)
+#define PHY_ANALOG_BB1_CMSEL_MSB 15
+#define PHY_ANALOG_BB1_CMSEL_LSB 13
+#define PHY_ANALOG_BB1_CMSEL_MASK 0x0000e000
+#define PHY_ANALOG_BB1_CMSEL_GET(x) (((x) & 0x0000e000) >> 13)
+#define PHY_ANALOG_BB1_CMSEL_SET(x) (((x) << 13) & 0x0000e000)
+#define PHY_ANALOG_BB1_ATBSEL_MSB 17
+#define PHY_ANALOG_BB1_ATBSEL_LSB 16
+#define PHY_ANALOG_BB1_ATBSEL_MASK 0x00030000
+#define PHY_ANALOG_BB1_ATBSEL_GET(x) (((x) & 0x00030000) >> 16)
+#define PHY_ANALOG_BB1_ATBSEL_SET(x) (((x) << 16) & 0x00030000)
+#define PHY_ANALOG_BB1_PD_OSDAC_CALTX_CALPA_MSB 18
+#define PHY_ANALOG_BB1_PD_OSDAC_CALTX_CALPA_LSB 18
+#define PHY_ANALOG_BB1_PD_OSDAC_CALTX_CALPA_MASK 0x00040000
+#define PHY_ANALOG_BB1_PD_OSDAC_CALTX_CALPA_GET(x) (((x) & 0x00040000) >> 18)
+#define PHY_ANALOG_BB1_PD_OSDAC_CALTX_CALPA_SET(x) (((x) << 18) & 0x00040000)
+#define PHY_ANALOG_BB1_OFSTCORRI2VQ_MSB 23
+#define PHY_ANALOG_BB1_OFSTCORRI2VQ_LSB 19
+#define PHY_ANALOG_BB1_OFSTCORRI2VQ_MASK 0x00f80000
+#define PHY_ANALOG_BB1_OFSTCORRI2VQ_GET(x) (((x) & 0x00f80000) >> 19)
+#define PHY_ANALOG_BB1_OFSTCORRI2VQ_SET(x) (((x) << 19) & 0x00f80000)
+#define PHY_ANALOG_BB1_OFSTCORRI2VI_MSB 28
+#define PHY_ANALOG_BB1_OFSTCORRI2VI_LSB 24
+#define PHY_ANALOG_BB1_OFSTCORRI2VI_MASK 0x1f000000
+#define PHY_ANALOG_BB1_OFSTCORRI2VI_GET(x) (((x) & 0x1f000000) >> 24)
+#define PHY_ANALOG_BB1_OFSTCORRI2VI_SET(x) (((x) << 24) & 0x1f000000)
+#define PHY_ANALOG_BB1_LOCALOFFSET_MSB 29
+#define PHY_ANALOG_BB1_LOCALOFFSET_LSB 29
+#define PHY_ANALOG_BB1_LOCALOFFSET_MASK 0x20000000
+#define PHY_ANALOG_BB1_LOCALOFFSET_GET(x) (((x) & 0x20000000) >> 29)
+#define PHY_ANALOG_BB1_LOCALOFFSET_SET(x) (((x) << 29) & 0x20000000)
+#define PHY_ANALOG_BB1_RANGE_OSDAC_MSB 31
+#define PHY_ANALOG_BB1_RANGE_OSDAC_LSB 30
+#define PHY_ANALOG_BB1_RANGE_OSDAC_MASK 0xc0000000
+#define PHY_ANALOG_BB1_RANGE_OSDAC_GET(x) (((x) & 0xc0000000) >> 30)
+#define PHY_ANALOG_BB1_RANGE_OSDAC_SET(x) (((x) << 30) & 0xc0000000)
+
+/* macros for BB2 */
+#define PHY_ANALOG_BB2_ADDRESS 0x00000144
+#define PHY_ANALOG_BB2_OFFSET 0x00000144
+#define PHY_ANALOG_BB2_SPARE_MSB 3
+#define PHY_ANALOG_BB2_SPARE_LSB 0
+#define PHY_ANALOG_BB2_SPARE_MASK 0x0000000f
+#define PHY_ANALOG_BB2_SPARE_GET(x) (((x) & 0x0000000f) >> 0)
+#define PHY_ANALOG_BB2_SPARE_SET(x) (((x) << 0) & 0x0000000f)
+#define PHY_ANALOG_BB2_MXR_HIGHGAINMASK_MSB 7
+#define PHY_ANALOG_BB2_MXR_HIGHGAINMASK_LSB 4
+#define PHY_ANALOG_BB2_MXR_HIGHGAINMASK_MASK 0x000000f0
+#define PHY_ANALOG_BB2_MXR_HIGHGAINMASK_GET(x) (((x) & 0x000000f0) >> 4)
+#define PHY_ANALOG_BB2_MXR_HIGHGAINMASK_SET(x) (((x) << 4) & 0x000000f0)
+#define PHY_ANALOG_BB2_SEL_TEST_MSB 9
+#define PHY_ANALOG_BB2_SEL_TEST_LSB 8
+#define PHY_ANALOG_BB2_SEL_TEST_MASK 0x00000300
+#define PHY_ANALOG_BB2_SEL_TEST_GET(x) (((x) & 0x00000300) >> 8)
+#define PHY_ANALOG_BB2_SEL_TEST_SET(x) (((x) << 8) & 0x00000300)
+#define PHY_ANALOG_BB2_RCFILTER_CAP_MSB 14
+#define PHY_ANALOG_BB2_RCFILTER_CAP_LSB 10
+#define PHY_ANALOG_BB2_RCFILTER_CAP_MASK 0x00007c00
+#define PHY_ANALOG_BB2_RCFILTER_CAP_GET(x) (((x) & 0x00007c00) >> 10)
+#define PHY_ANALOG_BB2_RCFILTER_CAP_SET(x) (((x) << 10) & 0x00007c00)
+#define PHY_ANALOG_BB2_OVERRIDE_RCFILTER_CAP_MSB 15
+#define PHY_ANALOG_BB2_OVERRIDE_RCFILTER_CAP_LSB 15
+#define PHY_ANALOG_BB2_OVERRIDE_RCFILTER_CAP_MASK 0x00008000
+#define PHY_ANALOG_BB2_OVERRIDE_RCFILTER_CAP_GET(x) (((x) & 0x00008000) >> 15)
+#define PHY_ANALOG_BB2_OVERRIDE_RCFILTER_CAP_SET(x) (((x) << 15) & 0x00008000)
+#define PHY_ANALOG_BB2_FNOTCH_MSB 19
+#define PHY_ANALOG_BB2_FNOTCH_LSB 16
+#define PHY_ANALOG_BB2_FNOTCH_MASK 0x000f0000
+#define PHY_ANALOG_BB2_FNOTCH_GET(x) (((x) & 0x000f0000) >> 16)
+#define PHY_ANALOG_BB2_FNOTCH_SET(x) (((x) << 16) & 0x000f0000)
+#define PHY_ANALOG_BB2_OVERRIDE_FNOTCH_MSB 20
+#define PHY_ANALOG_BB2_OVERRIDE_FNOTCH_LSB 20
+#define PHY_ANALOG_BB2_OVERRIDE_FNOTCH_MASK 0x00100000
+#define PHY_ANALOG_BB2_OVERRIDE_FNOTCH_GET(x) (((x) & 0x00100000) >> 20)
+#define PHY_ANALOG_BB2_OVERRIDE_FNOTCH_SET(x) (((x) << 20) & 0x00100000)
+#define PHY_ANALOG_BB2_FILTERFC_MSB 25
+#define PHY_ANALOG_BB2_FILTERFC_LSB 21
+#define PHY_ANALOG_BB2_FILTERFC_MASK 0x03e00000
+#define PHY_ANALOG_BB2_FILTERFC_GET(x) (((x) & 0x03e00000) >> 21)
+#define PHY_ANALOG_BB2_FILTERFC_SET(x) (((x) << 21) & 0x03e00000)
+#define PHY_ANALOG_BB2_OVERRIDE_FILTERFC_MSB 26
+#define PHY_ANALOG_BB2_OVERRIDE_FILTERFC_LSB 26
+#define PHY_ANALOG_BB2_OVERRIDE_FILTERFC_MASK 0x04000000
+#define PHY_ANALOG_BB2_OVERRIDE_FILTERFC_GET(x) (((x) & 0x04000000) >> 26)
+#define PHY_ANALOG_BB2_OVERRIDE_FILTERFC_SET(x) (((x) << 26) & 0x04000000)
+#define PHY_ANALOG_BB2_I2V2RXOUT_EN_MSB 27
+#define PHY_ANALOG_BB2_I2V2RXOUT_EN_LSB 27
+#define PHY_ANALOG_BB2_I2V2RXOUT_EN_MASK 0x08000000
+#define PHY_ANALOG_BB2_I2V2RXOUT_EN_GET(x) (((x) & 0x08000000) >> 27)
+#define PHY_ANALOG_BB2_I2V2RXOUT_EN_SET(x) (((x) << 27) & 0x08000000)
+#define PHY_ANALOG_BB2_BQ2RXOUT_EN_MSB 28
+#define PHY_ANALOG_BB2_BQ2RXOUT_EN_LSB 28
+#define PHY_ANALOG_BB2_BQ2RXOUT_EN_MASK 0x10000000
+#define PHY_ANALOG_BB2_BQ2RXOUT_EN_GET(x) (((x) & 0x10000000) >> 28)
+#define PHY_ANALOG_BB2_BQ2RXOUT_EN_SET(x) (((x) << 28) & 0x10000000)
+#define PHY_ANALOG_BB2_RXIN2I2V_EN_MSB 29
+#define PHY_ANALOG_BB2_RXIN2I2V_EN_LSB 29
+#define PHY_ANALOG_BB2_RXIN2I2V_EN_MASK 0x20000000
+#define PHY_ANALOG_BB2_RXIN2I2V_EN_GET(x) (((x) & 0x20000000) >> 29)
+#define PHY_ANALOG_BB2_RXIN2I2V_EN_SET(x) (((x) << 29) & 0x20000000)
+#define PHY_ANALOG_BB2_RXIN2BQ_EN_MSB 30
+#define PHY_ANALOG_BB2_RXIN2BQ_EN_LSB 30
+#define PHY_ANALOG_BB2_RXIN2BQ_EN_MASK 0x40000000
+#define PHY_ANALOG_BB2_RXIN2BQ_EN_GET(x) (((x) & 0x40000000) >> 30)
+#define PHY_ANALOG_BB2_RXIN2BQ_EN_SET(x) (((x) << 30) & 0x40000000)
+#define PHY_ANALOG_BB2_SWITCH_OVERRIDE_MSB 31
+#define PHY_ANALOG_BB2_SWITCH_OVERRIDE_LSB 31
+#define PHY_ANALOG_BB2_SWITCH_OVERRIDE_MASK 0x80000000
+#define PHY_ANALOG_BB2_SWITCH_OVERRIDE_GET(x) (((x) & 0x80000000) >> 31)
+#define PHY_ANALOG_BB2_SWITCH_OVERRIDE_SET(x) (((x) << 31) & 0x80000000)
+
+/* macros for BB3 */
+#define PHY_ANALOG_BB3_ADDRESS 0x00000148
+#define PHY_ANALOG_BB3_OFFSET 0x00000148
+#define PHY_ANALOG_BB3_SPARE_MSB 15
+#define PHY_ANALOG_BB3_SPARE_LSB 0
+#define PHY_ANALOG_BB3_SPARE_MASK 0x0000ffff
+#define PHY_ANALOG_BB3_SPARE_GET(x) (((x) & 0x0000ffff) >> 0)
+#define PHY_ANALOG_BB3_SPARE_SET(x) (((x) << 0) & 0x0000ffff)
+#define PHY_ANALOG_BB3_FILTERFC_MSB 20
+#define PHY_ANALOG_BB3_FILTERFC_LSB 16
+#define PHY_ANALOG_BB3_FILTERFC_MASK 0x001f0000
+#define PHY_ANALOG_BB3_FILTERFC_GET(x) (((x) & 0x001f0000) >> 16)
+#define PHY_ANALOG_BB3_OFSTCORRI2VQ_MSB 25
+#define PHY_ANALOG_BB3_OFSTCORRI2VQ_LSB 21
+#define PHY_ANALOG_BB3_OFSTCORRI2VQ_MASK 0x03e00000
+#define PHY_ANALOG_BB3_OFSTCORRI2VQ_GET(x) (((x) & 0x03e00000) >> 21)
+#define PHY_ANALOG_BB3_OFSTCORRI2VI_MSB 30
+#define PHY_ANALOG_BB3_OFSTCORRI2VI_LSB 26
+#define PHY_ANALOG_BB3_OFSTCORRI2VI_MASK 0x7c000000
+#define PHY_ANALOG_BB3_OFSTCORRI2VI_GET(x) (((x) & 0x7c000000) >> 26)
+#define PHY_ANALOG_BB3_EN_TXBBCONSTCUR_MSB 31
+#define PHY_ANALOG_BB3_EN_TXBBCONSTCUR_LSB 31
+#define PHY_ANALOG_BB3_EN_TXBBCONSTCUR_MASK 0x80000000
+#define PHY_ANALOG_BB3_EN_TXBBCONSTCUR_GET(x) (((x) & 0x80000000) >> 31)
+#define PHY_ANALOG_BB3_EN_TXBBCONSTCUR_SET(x) (((x) << 31) & 0x80000000)
+
+/* macros for PLLCLKMODA */
+#define PHY_ANALOG_PLLCLKMODA_ADDRESS 0x00000280
+#define PHY_ANALOG_PLLCLKMODA_OFFSET 0x00000280
+#define PHY_ANALOG_PLLCLKMODA_PWD_PLLSDM_MSB 0
+#define PHY_ANALOG_PLLCLKMODA_PWD_PLLSDM_LSB 0
+#define PHY_ANALOG_PLLCLKMODA_PWD_PLLSDM_MASK 0x00000001
+#define PHY_ANALOG_PLLCLKMODA_PWD_PLLSDM_GET(x) (((x) & 0x00000001) >> 0)
+#define PHY_ANALOG_PLLCLKMODA_PWD_PLLSDM_SET(x) (((x) << 0) & 0x00000001)
+#define PHY_ANALOG_PLLCLKMODA_PWDPLL_MSB 1
+#define PHY_ANALOG_PLLCLKMODA_PWDPLL_LSB 1
+#define PHY_ANALOG_PLLCLKMODA_PWDPLL_MASK 0x00000002
+#define PHY_ANALOG_PLLCLKMODA_PWDPLL_GET(x) (((x) & 0x00000002) >> 1)
+#define PHY_ANALOG_PLLCLKMODA_PWDPLL_SET(x) (((x) << 1) & 0x00000002)
+#define PHY_ANALOG_PLLCLKMODA_PLLFRAC_MSB 16
+#define PHY_ANALOG_PLLCLKMODA_PLLFRAC_LSB 2
+#define PHY_ANALOG_PLLCLKMODA_PLLFRAC_MASK 0x0001fffc
+#define PHY_ANALOG_PLLCLKMODA_PLLFRAC_GET(x) (((x) & 0x0001fffc) >> 2)
+#define PHY_ANALOG_PLLCLKMODA_PLLFRAC_SET(x) (((x) << 2) & 0x0001fffc)
+#define PHY_ANALOG_PLLCLKMODA_REFDIV_MSB 20
+#define PHY_ANALOG_PLLCLKMODA_REFDIV_LSB 17
+#define PHY_ANALOG_PLLCLKMODA_REFDIV_MASK 0x001e0000
+#define PHY_ANALOG_PLLCLKMODA_REFDIV_GET(x) (((x) & 0x001e0000) >> 17)
+#define PHY_ANALOG_PLLCLKMODA_REFDIV_SET(x) (((x) << 17) & 0x001e0000)
+#define PHY_ANALOG_PLLCLKMODA_DIV_MSB 30
+#define PHY_ANALOG_PLLCLKMODA_DIV_LSB 21
+#define PHY_ANALOG_PLLCLKMODA_DIV_MASK 0x7fe00000
+#define PHY_ANALOG_PLLCLKMODA_DIV_GET(x) (((x) & 0x7fe00000) >> 21)
+#define PHY_ANALOG_PLLCLKMODA_DIV_SET(x) (((x) << 21) & 0x7fe00000)
+#define PHY_ANALOG_PLLCLKMODA_LOCAL_PLL_MSB 31
+#define PHY_ANALOG_PLLCLKMODA_LOCAL_PLL_LSB 31
+#define PHY_ANALOG_PLLCLKMODA_LOCAL_PLL_MASK 0x80000000
+#define PHY_ANALOG_PLLCLKMODA_LOCAL_PLL_GET(x) (((x) & 0x80000000) >> 31)
+#define PHY_ANALOG_PLLCLKMODA_LOCAL_PLL_SET(x) (((x) << 31) & 0x80000000)
+
+/* macros for PLLCLKMODA2 */
+#define PHY_ANALOG_PLLCLKMODA2_ADDRESS 0x00000284
+#define PHY_ANALOG_PLLCLKMODA2_OFFSET 0x00000284
+#define PHY_ANALOG_PLLCLKMODA2_SPARE_MSB 3
+#define PHY_ANALOG_PLLCLKMODA2_SPARE_LSB 0
+#define PHY_ANALOG_PLLCLKMODA2_SPARE_MASK 0x0000000f
+#define PHY_ANALOG_PLLCLKMODA2_SPARE_GET(x) (((x) & 0x0000000f) >> 0)
+#define PHY_ANALOG_PLLCLKMODA2_SPARE_SET(x) (((x) << 0) & 0x0000000f)
+#define PHY_ANALOG_PLLCLKMODA2_DACPWD_MSB 4
+#define PHY_ANALOG_PLLCLKMODA2_DACPWD_LSB 4
+#define PHY_ANALOG_PLLCLKMODA2_DACPWD_MASK 0x00000010
+#define PHY_ANALOG_PLLCLKMODA2_DACPWD_GET(x) (((x) & 0x00000010) >> 4)
+#define PHY_ANALOG_PLLCLKMODA2_DACPWD_SET(x) (((x) << 4) & 0x00000010)
+#define PHY_ANALOG_PLLCLKMODA2_ADCPWD_MSB 5
+#define PHY_ANALOG_PLLCLKMODA2_ADCPWD_LSB 5
+#define PHY_ANALOG_PLLCLKMODA2_ADCPWD_MASK 0x00000020
+#define PHY_ANALOG_PLLCLKMODA2_ADCPWD_GET(x) (((x) & 0x00000020) >> 5)
+#define PHY_ANALOG_PLLCLKMODA2_ADCPWD_SET(x) (((x) << 5) & 0x00000020)
+#define PHY_ANALOG_PLLCLKMODA2_LOCAL_ADDAC_MSB 6
+#define PHY_ANALOG_PLLCLKMODA2_LOCAL_ADDAC_LSB 6
+#define PHY_ANALOG_PLLCLKMODA2_LOCAL_ADDAC_MASK 0x00000040
+#define PHY_ANALOG_PLLCLKMODA2_LOCAL_ADDAC_GET(x) (((x) & 0x00000040) >> 6)
+#define PHY_ANALOG_PLLCLKMODA2_LOCAL_ADDAC_SET(x) (((x) << 6) & 0x00000040)
+#define PHY_ANALOG_PLLCLKMODA2_DAC_CLK_SEL_MSB 8
+#define PHY_ANALOG_PLLCLKMODA2_DAC_CLK_SEL_LSB 7
+#define PHY_ANALOG_PLLCLKMODA2_DAC_CLK_SEL_MASK 0x00000180
+#define PHY_ANALOG_PLLCLKMODA2_DAC_CLK_SEL_GET(x) (((x) & 0x00000180) >> 7)
+#define PHY_ANALOG_PLLCLKMODA2_DAC_CLK_SEL_SET(x) (((x) << 7) & 0x00000180)
+#define PHY_ANALOG_PLLCLKMODA2_ADC_CLK_SEL_MSB 12
+#define PHY_ANALOG_PLLCLKMODA2_ADC_CLK_SEL_LSB 9
+#define PHY_ANALOG_PLLCLKMODA2_ADC_CLK_SEL_MASK 0x00001e00
+#define PHY_ANALOG_PLLCLKMODA2_ADC_CLK_SEL_GET(x) (((x) & 0x00001e00) >> 9)
+#define PHY_ANALOG_PLLCLKMODA2_ADC_CLK_SEL_SET(x) (((x) << 9) & 0x00001e00)
+#define PHY_ANALOG_PLLCLKMODA2_LOCAL_CLKMODA_MSB 13
+#define PHY_ANALOG_PLLCLKMODA2_LOCAL_CLKMODA_LSB 13
+#define PHY_ANALOG_PLLCLKMODA2_LOCAL_CLKMODA_MASK 0x00002000
+#define PHY_ANALOG_PLLCLKMODA2_LOCAL_CLKMODA_GET(x) (((x) & 0x00002000) >> 13)
+#define PHY_ANALOG_PLLCLKMODA2_LOCAL_CLKMODA_SET(x) (((x) << 13) & 0x00002000)
+#define PHY_ANALOG_PLLCLKMODA2_PLLBYPASS_MSB 14
+#define PHY_ANALOG_PLLCLKMODA2_PLLBYPASS_LSB 14
+#define PHY_ANALOG_PLLCLKMODA2_PLLBYPASS_MASK 0x00004000
+#define PHY_ANALOG_PLLCLKMODA2_PLLBYPASS_GET(x) (((x) & 0x00004000) >> 14)
+#define PHY_ANALOG_PLLCLKMODA2_PLLBYPASS_SET(x) (((x) << 14) & 0x00004000)
+#define PHY_ANALOG_PLLCLKMODA2_LOCAL_PLLBYPASS_MSB 15
+#define PHY_ANALOG_PLLCLKMODA2_LOCAL_PLLBYPASS_LSB 15
+#define PHY_ANALOG_PLLCLKMODA2_LOCAL_PLLBYPASS_MASK 0x00008000
+#define PHY_ANALOG_PLLCLKMODA2_LOCAL_PLLBYPASS_GET(x) (((x) & 0x00008000) >> 15)
+#define PHY_ANALOG_PLLCLKMODA2_LOCAL_PLLBYPASS_SET(x) (((x) << 15) & 0x00008000)
+#define PHY_ANALOG_PLLCLKMODA2_PLLATB_MSB 17
+#define PHY_ANALOG_PLLCLKMODA2_PLLATB_LSB 16
+#define PHY_ANALOG_PLLCLKMODA2_PLLATB_MASK 0x00030000
+#define PHY_ANALOG_PLLCLKMODA2_PLLATB_GET(x) (((x) & 0x00030000) >> 16)
+#define PHY_ANALOG_PLLCLKMODA2_PLLATB_SET(x) (((x) << 16) & 0x00030000)
+#define PHY_ANALOG_PLLCLKMODA2_PLL_SVREG_MSB 18
+#define PHY_ANALOG_PLLCLKMODA2_PLL_SVREG_LSB 18
+#define PHY_ANALOG_PLLCLKMODA2_PLL_SVREG_MASK 0x00040000
+#define PHY_ANALOG_PLLCLKMODA2_PLL_SVREG_GET(x) (((x) & 0x00040000) >> 18)
+#define PHY_ANALOG_PLLCLKMODA2_PLL_SVREG_SET(x) (((x) << 18) & 0x00040000)
+#define PHY_ANALOG_PLLCLKMODA2_HI_FREQ_EN_MSB 19
+#define PHY_ANALOG_PLLCLKMODA2_HI_FREQ_EN_LSB 19
+#define PHY_ANALOG_PLLCLKMODA2_HI_FREQ_EN_MASK 0x00080000
+#define PHY_ANALOG_PLLCLKMODA2_HI_FREQ_EN_GET(x) (((x) & 0x00080000) >> 19)
+#define PHY_ANALOG_PLLCLKMODA2_HI_FREQ_EN_SET(x) (((x) << 19) & 0x00080000)
+#define PHY_ANALOG_PLLCLKMODA2_RST_WARM_INT_L_MSB 20
+#define PHY_ANALOG_PLLCLKMODA2_RST_WARM_INT_L_LSB 20
+#define PHY_ANALOG_PLLCLKMODA2_RST_WARM_INT_L_MASK 0x00100000
+#define PHY_ANALOG_PLLCLKMODA2_RST_WARM_INT_L_GET(x) (((x) & 0x00100000) >> 20)
+#define PHY_ANALOG_PLLCLKMODA2_RST_WARM_INT_L_SET(x) (((x) << 20) & 0x00100000)
+#define PHY_ANALOG_PLLCLKMODA2_RST_WARM_OVR_MSB 21
+#define PHY_ANALOG_PLLCLKMODA2_RST_WARM_OVR_LSB 21
+#define PHY_ANALOG_PLLCLKMODA2_RST_WARM_OVR_MASK 0x00200000
+#define PHY_ANALOG_PLLCLKMODA2_RST_WARM_OVR_GET(x) (((x) & 0x00200000) >> 21)
+#define PHY_ANALOG_PLLCLKMODA2_RST_WARM_OVR_SET(x) (((x) << 21) & 0x00200000)
+#define PHY_ANALOG_PLLCLKMODA2_PLL_KVCO_MSB 23
+#define PHY_ANALOG_PLLCLKMODA2_PLL_KVCO_LSB 22
+#define PHY_ANALOG_PLLCLKMODA2_PLL_KVCO_MASK 0x00c00000
+#define PHY_ANALOG_PLLCLKMODA2_PLL_KVCO_GET(x) (((x) & 0x00c00000) >> 22)
+#define PHY_ANALOG_PLLCLKMODA2_PLL_KVCO_SET(x) (((x) << 22) & 0x00c00000)
+#define PHY_ANALOG_PLLCLKMODA2_PLLICP_MSB 26
+#define PHY_ANALOG_PLLCLKMODA2_PLLICP_LSB 24
+#define PHY_ANALOG_PLLCLKMODA2_PLLICP_MASK 0x07000000
+#define PHY_ANALOG_PLLCLKMODA2_PLLICP_GET(x) (((x) & 0x07000000) >> 24)
+#define PHY_ANALOG_PLLCLKMODA2_PLLICP_SET(x) (((x) << 24) & 0x07000000)
+#define PHY_ANALOG_PLLCLKMODA2_PLLFILTER_MSB 31
+#define PHY_ANALOG_PLLCLKMODA2_PLLFILTER_LSB 27
+#define PHY_ANALOG_PLLCLKMODA2_PLLFILTER_MASK 0xf8000000
+#define PHY_ANALOG_PLLCLKMODA2_PLLFILTER_GET(x) (((x) & 0xf8000000) >> 27)
+#define PHY_ANALOG_PLLCLKMODA2_PLLFILTER_SET(x) (((x) << 27) & 0xf8000000)
+
+/* macros for TOP */
+#define PHY_ANALOG_TOP_ADDRESS 0x00000288
+#define PHY_ANALOG_TOP_OFFSET 0x00000288
+#define PHY_ANALOG_TOP_SPARE_MSB 2
+#define PHY_ANALOG_TOP_SPARE_LSB 0
+#define PHY_ANALOG_TOP_SPARE_MASK 0x00000007
+#define PHY_ANALOG_TOP_SPARE_GET(x) (((x) & 0x00000007) >> 0)
+#define PHY_ANALOG_TOP_SPARE_SET(x) (((x) << 0) & 0x00000007)
+#define PHY_ANALOG_TOP_PWDBIAS_MSB 3
+#define PHY_ANALOG_TOP_PWDBIAS_LSB 3
+#define PHY_ANALOG_TOP_PWDBIAS_MASK 0x00000008
+#define PHY_ANALOG_TOP_PWDBIAS_GET(x) (((x) & 0x00000008) >> 3)
+#define PHY_ANALOG_TOP_PWDBIAS_SET(x) (((x) << 3) & 0x00000008)
+#define PHY_ANALOG_TOP_FLIP_XPABIAS_MSB 4
+#define PHY_ANALOG_TOP_FLIP_XPABIAS_LSB 4
+#define PHY_ANALOG_TOP_FLIP_XPABIAS_MASK 0x00000010
+#define PHY_ANALOG_TOP_FLIP_XPABIAS_GET(x) (((x) & 0x00000010) >> 4)
+#define PHY_ANALOG_TOP_FLIP_XPABIAS_SET(x) (((x) << 4) & 0x00000010)
+#define PHY_ANALOG_TOP_XPAON2_MSB 5
+#define PHY_ANALOG_TOP_XPAON2_LSB 5
+#define PHY_ANALOG_TOP_XPAON2_MASK 0x00000020
+#define PHY_ANALOG_TOP_XPAON2_GET(x) (((x) & 0x00000020) >> 5)
+#define PHY_ANALOG_TOP_XPAON2_SET(x) (((x) << 5) & 0x00000020)
+#define PHY_ANALOG_TOP_XPAON5_MSB 6
+#define PHY_ANALOG_TOP_XPAON5_LSB 6
+#define PHY_ANALOG_TOP_XPAON5_MASK 0x00000040
+#define PHY_ANALOG_TOP_XPAON5_GET(x) (((x) & 0x00000040) >> 6)
+#define PHY_ANALOG_TOP_XPAON5_SET(x) (((x) << 6) & 0x00000040)
+#define PHY_ANALOG_TOP_XPASHORT2GND_MSB 7
+#define PHY_ANALOG_TOP_XPASHORT2GND_LSB 7
+#define PHY_ANALOG_TOP_XPASHORT2GND_MASK 0x00000080
+#define PHY_ANALOG_TOP_XPASHORT2GND_GET(x) (((x) & 0x00000080) >> 7)
+#define PHY_ANALOG_TOP_XPASHORT2GND_SET(x) (((x) << 7) & 0x00000080)
+#define PHY_ANALOG_TOP_XPABIASLVL_MSB 11
+#define PHY_ANALOG_TOP_XPABIASLVL_LSB 8
+#define PHY_ANALOG_TOP_XPABIASLVL_MASK 0x00000f00
+#define PHY_ANALOG_TOP_XPABIASLVL_GET(x) (((x) & 0x00000f00) >> 8)
+#define PHY_ANALOG_TOP_XPABIASLVL_SET(x) (((x) << 8) & 0x00000f00)
+#define PHY_ANALOG_TOP_XPABIAS_EN_MSB 12
+#define PHY_ANALOG_TOP_XPABIAS_EN_LSB 12
+#define PHY_ANALOG_TOP_XPABIAS_EN_MASK 0x00001000
+#define PHY_ANALOG_TOP_XPABIAS_EN_GET(x) (((x) & 0x00001000) >> 12)
+#define PHY_ANALOG_TOP_XPABIAS_EN_SET(x) (((x) << 12) & 0x00001000)
+#define PHY_ANALOG_TOP_ATBSELECT_MSB 13
+#define PHY_ANALOG_TOP_ATBSELECT_LSB 13
+#define PHY_ANALOG_TOP_ATBSELECT_MASK 0x00002000
+#define PHY_ANALOG_TOP_ATBSELECT_GET(x) (((x) & 0x00002000) >> 13)
+#define PHY_ANALOG_TOP_ATBSELECT_SET(x) (((x) << 13) & 0x00002000)
+#define PHY_ANALOG_TOP_LOCAL_XPA_MSB 14
+#define PHY_ANALOG_TOP_LOCAL_XPA_LSB 14
+#define PHY_ANALOG_TOP_LOCAL_XPA_MASK 0x00004000
+#define PHY_ANALOG_TOP_LOCAL_XPA_GET(x) (((x) & 0x00004000) >> 14)
+#define PHY_ANALOG_TOP_LOCAL_XPA_SET(x) (((x) << 14) & 0x00004000)
+#define PHY_ANALOG_TOP_XPABIAS_BYPASS_MSB 15
+#define PHY_ANALOG_TOP_XPABIAS_BYPASS_LSB 15
+#define PHY_ANALOG_TOP_XPABIAS_BYPASS_MASK 0x00008000
+#define PHY_ANALOG_TOP_XPABIAS_BYPASS_GET(x) (((x) & 0x00008000) >> 15)
+#define PHY_ANALOG_TOP_XPABIAS_BYPASS_SET(x) (((x) << 15) & 0x00008000)
+#define PHY_ANALOG_TOP_TEST_PADQ_EN_MSB 16
+#define PHY_ANALOG_TOP_TEST_PADQ_EN_LSB 16
+#define PHY_ANALOG_TOP_TEST_PADQ_EN_MASK 0x00010000
+#define PHY_ANALOG_TOP_TEST_PADQ_EN_GET(x) (((x) & 0x00010000) >> 16)
+#define PHY_ANALOG_TOP_TEST_PADQ_EN_SET(x) (((x) << 16) & 0x00010000)
+#define PHY_ANALOG_TOP_TEST_PADI_EN_MSB 17
+#define PHY_ANALOG_TOP_TEST_PADI_EN_LSB 17
+#define PHY_ANALOG_TOP_TEST_PADI_EN_MASK 0x00020000
+#define PHY_ANALOG_TOP_TEST_PADI_EN_GET(x) (((x) & 0x00020000) >> 17)
+#define PHY_ANALOG_TOP_TEST_PADI_EN_SET(x) (((x) << 17) & 0x00020000)
+#define PHY_ANALOG_TOP_TESTIQ_RSEL_MSB 18
+#define PHY_ANALOG_TOP_TESTIQ_RSEL_LSB 18
+#define PHY_ANALOG_TOP_TESTIQ_RSEL_MASK 0x00040000
+#define PHY_ANALOG_TOP_TESTIQ_RSEL_GET(x) (((x) & 0x00040000) >> 18)
+#define PHY_ANALOG_TOP_TESTIQ_RSEL_SET(x) (((x) << 18) & 0x00040000)
+#define PHY_ANALOG_TOP_TESTIQ_BUFEN_MSB 19
+#define PHY_ANALOG_TOP_TESTIQ_BUFEN_LSB 19
+#define PHY_ANALOG_TOP_TESTIQ_BUFEN_MASK 0x00080000
+#define PHY_ANALOG_TOP_TESTIQ_BUFEN_GET(x) (((x) & 0x00080000) >> 19)
+#define PHY_ANALOG_TOP_TESTIQ_BUFEN_SET(x) (((x) << 19) & 0x00080000)
+#define PHY_ANALOG_TOP_PAD2GND_MSB 20
+#define PHY_ANALOG_TOP_PAD2GND_LSB 20
+#define PHY_ANALOG_TOP_PAD2GND_MASK 0x00100000
+#define PHY_ANALOG_TOP_PAD2GND_GET(x) (((x) & 0x00100000) >> 20)
+#define PHY_ANALOG_TOP_PAD2GND_SET(x) (((x) << 20) & 0x00100000)
+#define PHY_ANALOG_TOP_INTH2PAD_MSB 21
+#define PHY_ANALOG_TOP_INTH2PAD_LSB 21
+#define PHY_ANALOG_TOP_INTH2PAD_MASK 0x00200000
+#define PHY_ANALOG_TOP_INTH2PAD_GET(x) (((x) & 0x00200000) >> 21)
+#define PHY_ANALOG_TOP_INTH2PAD_SET(x) (((x) << 21) & 0x00200000)
+#define PHY_ANALOG_TOP_INTH2GND_MSB 22
+#define PHY_ANALOG_TOP_INTH2GND_LSB 22
+#define PHY_ANALOG_TOP_INTH2GND_MASK 0x00400000
+#define PHY_ANALOG_TOP_INTH2GND_GET(x) (((x) & 0x00400000) >> 22)
+#define PHY_ANALOG_TOP_INTH2GND_SET(x) (((x) << 22) & 0x00400000)
+#define PHY_ANALOG_TOP_INT2PAD_MSB 23
+#define PHY_ANALOG_TOP_INT2PAD_LSB 23
+#define PHY_ANALOG_TOP_INT2PAD_MASK 0x00800000
+#define PHY_ANALOG_TOP_INT2PAD_GET(x) (((x) & 0x00800000) >> 23)
+#define PHY_ANALOG_TOP_INT2PAD_SET(x) (((x) << 23) & 0x00800000)
+#define PHY_ANALOG_TOP_INT2GND_MSB 24
+#define PHY_ANALOG_TOP_INT2GND_LSB 24
+#define PHY_ANALOG_TOP_INT2GND_MASK 0x01000000
+#define PHY_ANALOG_TOP_INT2GND_GET(x) (((x) & 0x01000000) >> 24)
+#define PHY_ANALOG_TOP_INT2GND_SET(x) (((x) << 24) & 0x01000000)
+#define PHY_ANALOG_TOP_PWDPALCLK_MSB 25
+#define PHY_ANALOG_TOP_PWDPALCLK_LSB 25
+#define PHY_ANALOG_TOP_PWDPALCLK_MASK 0x02000000
+#define PHY_ANALOG_TOP_PWDPALCLK_GET(x) (((x) & 0x02000000) >> 25)
+#define PHY_ANALOG_TOP_PWDPALCLK_SET(x) (((x) << 25) & 0x02000000)
+#define PHY_ANALOG_TOP_INV_CLK320_ADC_MSB 26
+#define PHY_ANALOG_TOP_INV_CLK320_ADC_LSB 26
+#define PHY_ANALOG_TOP_INV_CLK320_ADC_MASK 0x04000000
+#define PHY_ANALOG_TOP_INV_CLK320_ADC_GET(x) (((x) & 0x04000000) >> 26)
+#define PHY_ANALOG_TOP_INV_CLK320_ADC_SET(x) (((x) << 26) & 0x04000000)
+#define PHY_ANALOG_TOP_FLIP_REFCLK40_MSB 27
+#define PHY_ANALOG_TOP_FLIP_REFCLK40_LSB 27
+#define PHY_ANALOG_TOP_FLIP_REFCLK40_MASK 0x08000000
+#define PHY_ANALOG_TOP_FLIP_REFCLK40_GET(x) (((x) & 0x08000000) >> 27)
+#define PHY_ANALOG_TOP_FLIP_REFCLK40_SET(x) (((x) << 27) & 0x08000000)
+#define PHY_ANALOG_TOP_FLIP_PLLCLK320_MSB 28
+#define PHY_ANALOG_TOP_FLIP_PLLCLK320_LSB 28
+#define PHY_ANALOG_TOP_FLIP_PLLCLK320_MASK 0x10000000
+#define PHY_ANALOG_TOP_FLIP_PLLCLK320_GET(x) (((x) & 0x10000000) >> 28)
+#define PHY_ANALOG_TOP_FLIP_PLLCLK320_SET(x) (((x) << 28) & 0x10000000)
+#define PHY_ANALOG_TOP_FLIP_PLLCLK160_MSB 29
+#define PHY_ANALOG_TOP_FLIP_PLLCLK160_LSB 29
+#define PHY_ANALOG_TOP_FLIP_PLLCLK160_MASK 0x20000000
+#define PHY_ANALOG_TOP_FLIP_PLLCLK160_GET(x) (((x) & 0x20000000) >> 29)
+#define PHY_ANALOG_TOP_FLIP_PLLCLK160_SET(x) (((x) << 29) & 0x20000000)
+#define PHY_ANALOG_TOP_CLK_SEL_MSB 31
+#define PHY_ANALOG_TOP_CLK_SEL_LSB 30
+#define PHY_ANALOG_TOP_CLK_SEL_MASK 0xc0000000
+#define PHY_ANALOG_TOP_CLK_SEL_GET(x) (((x) & 0xc0000000) >> 30)
+#define PHY_ANALOG_TOP_CLK_SEL_SET(x) (((x) << 30) & 0xc0000000)
+
+/* macros for THERM */
+#define PHY_ANALOG_THERM_ADDRESS 0x0000028c
+#define PHY_ANALOG_THERM_OFFSET 0x0000028c
+#define PHY_ANALOG_THERM_LOREG_LVL_MSB 2
+#define PHY_ANALOG_THERM_LOREG_LVL_LSB 0
+#define PHY_ANALOG_THERM_LOREG_LVL_MASK 0x00000007
+#define PHY_ANALOG_THERM_LOREG_LVL_GET(x) (((x) & 0x00000007) >> 0)
+#define PHY_ANALOG_THERM_LOREG_LVL_SET(x) (((x) << 0) & 0x00000007)
+#define PHY_ANALOG_THERM_RFREG_LVL_MSB 5
+#define PHY_ANALOG_THERM_RFREG_LVL_LSB 3
+#define PHY_ANALOG_THERM_RFREG_LVL_MASK 0x00000038
+#define PHY_ANALOG_THERM_RFREG_LVL_GET(x) (((x) & 0x00000038) >> 3)
+#define PHY_ANALOG_THERM_RFREG_LVL_SET(x) (((x) << 3) & 0x00000038)
+#define PHY_ANALOG_THERM_SAR_ADC_DONE_MSB 6
+#define PHY_ANALOG_THERM_SAR_ADC_DONE_LSB 6
+#define PHY_ANALOG_THERM_SAR_ADC_DONE_MASK 0x00000040
+#define PHY_ANALOG_THERM_SAR_ADC_DONE_GET(x) (((x) & 0x00000040) >> 6)
+#define PHY_ANALOG_THERM_SAR_ADC_OUT_MSB 14
+#define PHY_ANALOG_THERM_SAR_ADC_OUT_LSB 7
+#define PHY_ANALOG_THERM_SAR_ADC_OUT_MASK 0x00007f80
+#define PHY_ANALOG_THERM_SAR_ADC_OUT_GET(x) (((x) & 0x00007f80) >> 7)
+#define PHY_ANALOG_THERM_SAR_DACTEST_CODE_MSB 22
+#define PHY_ANALOG_THERM_SAR_DACTEST_CODE_LSB 15
+#define PHY_ANALOG_THERM_SAR_DACTEST_CODE_MASK 0x007f8000
+#define PHY_ANALOG_THERM_SAR_DACTEST_CODE_GET(x) (((x) & 0x007f8000) >> 15)
+#define PHY_ANALOG_THERM_SAR_DACTEST_CODE_SET(x) (((x) << 15) & 0x007f8000)
+#define PHY_ANALOG_THERM_SAR_DACTEST_EN_MSB 23
+#define PHY_ANALOG_THERM_SAR_DACTEST_EN_LSB 23
+#define PHY_ANALOG_THERM_SAR_DACTEST_EN_MASK 0x00800000
+#define PHY_ANALOG_THERM_SAR_DACTEST_EN_GET(x) (((x) & 0x00800000) >> 23)
+#define PHY_ANALOG_THERM_SAR_DACTEST_EN_SET(x) (((x) << 23) & 0x00800000)
+#define PHY_ANALOG_THERM_SAR_ADCCAL_EN_MSB 24
+#define PHY_ANALOG_THERM_SAR_ADCCAL_EN_LSB 24
+#define PHY_ANALOG_THERM_SAR_ADCCAL_EN_MASK 0x01000000
+#define PHY_ANALOG_THERM_SAR_ADCCAL_EN_GET(x) (((x) & 0x01000000) >> 24)
+#define PHY_ANALOG_THERM_SAR_ADCCAL_EN_SET(x) (((x) << 24) & 0x01000000)
+#define PHY_ANALOG_THERM_THERMSEL_MSB 26
+#define PHY_ANALOG_THERM_THERMSEL_LSB 25
+#define PHY_ANALOG_THERM_THERMSEL_MASK 0x06000000
+#define PHY_ANALOG_THERM_THERMSEL_GET(x) (((x) & 0x06000000) >> 25)
+#define PHY_ANALOG_THERM_THERMSEL_SET(x) (((x) << 25) & 0x06000000)
+#define PHY_ANALOG_THERM_SAR_SLOW_EN_MSB 27
+#define PHY_ANALOG_THERM_SAR_SLOW_EN_LSB 27
+#define PHY_ANALOG_THERM_SAR_SLOW_EN_MASK 0x08000000
+#define PHY_ANALOG_THERM_SAR_SLOW_EN_GET(x) (((x) & 0x08000000) >> 27)
+#define PHY_ANALOG_THERM_SAR_SLOW_EN_SET(x) (((x) << 27) & 0x08000000)
+#define PHY_ANALOG_THERM_THERMSTART_MSB 28
+#define PHY_ANALOG_THERM_THERMSTART_LSB 28
+#define PHY_ANALOG_THERM_THERMSTART_MASK 0x10000000
+#define PHY_ANALOG_THERM_THERMSTART_GET(x) (((x) & 0x10000000) >> 28)
+#define PHY_ANALOG_THERM_THERMSTART_SET(x) (((x) << 28) & 0x10000000)
+#define PHY_ANALOG_THERM_SAR_AUTOPWD_EN_MSB 29
+#define PHY_ANALOG_THERM_SAR_AUTOPWD_EN_LSB 29
+#define PHY_ANALOG_THERM_SAR_AUTOPWD_EN_MASK 0x20000000
+#define PHY_ANALOG_THERM_SAR_AUTOPWD_EN_GET(x) (((x) & 0x20000000) >> 29)
+#define PHY_ANALOG_THERM_SAR_AUTOPWD_EN_SET(x) (((x) << 29) & 0x20000000)
+#define PHY_ANALOG_THERM_THERMON_MSB 30
+#define PHY_ANALOG_THERM_THERMON_LSB 30
+#define PHY_ANALOG_THERM_THERMON_MASK 0x40000000
+#define PHY_ANALOG_THERM_THERMON_GET(x) (((x) & 0x40000000) >> 30)
+#define PHY_ANALOG_THERM_THERMON_SET(x) (((x) << 30) & 0x40000000)
+#define PHY_ANALOG_THERM_LOCAL_THERM_MSB 31
+#define PHY_ANALOG_THERM_LOCAL_THERM_LSB 31
+#define PHY_ANALOG_THERM_LOCAL_THERM_MASK 0x80000000
+#define PHY_ANALOG_THERM_LOCAL_THERM_GET(x) (((x) & 0x80000000) >> 31)
+#define PHY_ANALOG_THERM_LOCAL_THERM_SET(x) (((x) << 31) & 0x80000000)
+
+/* macros for XTAL */
+#define PHY_ANALOG_XTAL_ADDRESS 0x00000290
+#define PHY_ANALOG_XTAL_OFFSET 0x00000290
+#define PHY_ANALOG_XTAL_SPARE_MSB 5
+#define PHY_ANALOG_XTAL_SPARE_LSB 0
+#define PHY_ANALOG_XTAL_SPARE_MASK 0x0000003f
+#define PHY_ANALOG_XTAL_SPARE_GET(x) (((x) & 0x0000003f) >> 0)
+#define PHY_ANALOG_XTAL_SPARE_SET(x) (((x) << 0) & 0x0000003f)
+#define PHY_ANALOG_XTAL_XTAL_NOTCXODET_MSB 6
+#define PHY_ANALOG_XTAL_XTAL_NOTCXODET_LSB 6
+#define PHY_ANALOG_XTAL_XTAL_NOTCXODET_MASK 0x00000040
+#define PHY_ANALOG_XTAL_XTAL_NOTCXODET_GET(x) (((x) & 0x00000040) >> 6)
+#define PHY_ANALOG_XTAL_XTAL_NOTCXODET_SET(x) (((x) << 6) & 0x00000040)
+#define PHY_ANALOG_XTAL_LOCALBIAS2X_MSB 7
+#define PHY_ANALOG_XTAL_LOCALBIAS2X_LSB 7
+#define PHY_ANALOG_XTAL_LOCALBIAS2X_MASK 0x00000080
+#define PHY_ANALOG_XTAL_LOCALBIAS2X_GET(x) (((x) & 0x00000080) >> 7)
+#define PHY_ANALOG_XTAL_LOCALBIAS2X_SET(x) (((x) << 7) & 0x00000080)
+#define PHY_ANALOG_XTAL_LOCAL_XTAL_MSB 8
+#define PHY_ANALOG_XTAL_LOCAL_XTAL_LSB 8
+#define PHY_ANALOG_XTAL_LOCAL_XTAL_MASK 0x00000100
+#define PHY_ANALOG_XTAL_LOCAL_XTAL_GET(x) (((x) & 0x00000100) >> 8)
+#define PHY_ANALOG_XTAL_LOCAL_XTAL_SET(x) (((x) << 8) & 0x00000100)
+#define PHY_ANALOG_XTAL_XTAL_PWDCLKIN_MSB 9
+#define PHY_ANALOG_XTAL_XTAL_PWDCLKIN_LSB 9
+#define PHY_ANALOG_XTAL_XTAL_PWDCLKIN_MASK 0x00000200
+#define PHY_ANALOG_XTAL_XTAL_PWDCLKIN_GET(x) (((x) & 0x00000200) >> 9)
+#define PHY_ANALOG_XTAL_XTAL_PWDCLKIN_SET(x) (((x) << 9) & 0x00000200)
+#define PHY_ANALOG_XTAL_XTAL_OSCON_MSB 10
+#define PHY_ANALOG_XTAL_XTAL_OSCON_LSB 10
+#define PHY_ANALOG_XTAL_XTAL_OSCON_MASK 0x00000400
+#define PHY_ANALOG_XTAL_XTAL_OSCON_GET(x) (((x) & 0x00000400) >> 10)
+#define PHY_ANALOG_XTAL_XTAL_OSCON_SET(x) (((x) << 10) & 0x00000400)
+#define PHY_ANALOG_XTAL_XTAL_PWDCLKD_MSB 11
+#define PHY_ANALOG_XTAL_XTAL_PWDCLKD_LSB 11
+#define PHY_ANALOG_XTAL_XTAL_PWDCLKD_MASK 0x00000800
+#define PHY_ANALOG_XTAL_XTAL_PWDCLKD_GET(x) (((x) & 0x00000800) >> 11)
+#define PHY_ANALOG_XTAL_XTAL_PWDCLKD_SET(x) (((x) << 11) & 0x00000800)
+#define PHY_ANALOG_XTAL_XTAL_LOCALBIAS_MSB 12
+#define PHY_ANALOG_XTAL_XTAL_LOCALBIAS_LSB 12
+#define PHY_ANALOG_XTAL_XTAL_LOCALBIAS_MASK 0x00001000
+#define PHY_ANALOG_XTAL_XTAL_LOCALBIAS_GET(x) (((x) & 0x00001000) >> 12)
+#define PHY_ANALOG_XTAL_XTAL_LOCALBIAS_SET(x) (((x) << 12) & 0x00001000)
+#define PHY_ANALOG_XTAL_XTAL_SHRTXIN_MSB 13
+#define PHY_ANALOG_XTAL_XTAL_SHRTXIN_LSB 13
+#define PHY_ANALOG_XTAL_XTAL_SHRTXIN_MASK 0x00002000
+#define PHY_ANALOG_XTAL_XTAL_SHRTXIN_GET(x) (((x) & 0x00002000) >> 13)
+#define PHY_ANALOG_XTAL_XTAL_SHRTXIN_SET(x) (((x) << 13) & 0x00002000)
+#define PHY_ANALOG_XTAL_XTAL_DRVSTR_MSB 15
+#define PHY_ANALOG_XTAL_XTAL_DRVSTR_LSB 14
+#define PHY_ANALOG_XTAL_XTAL_DRVSTR_MASK 0x0000c000
+#define PHY_ANALOG_XTAL_XTAL_DRVSTR_GET(x) (((x) & 0x0000c000) >> 14)
+#define PHY_ANALOG_XTAL_XTAL_DRVSTR_SET(x) (((x) << 14) & 0x0000c000)
+#define PHY_ANALOG_XTAL_XTAL_CAPOUTDAC_MSB 22
+#define PHY_ANALOG_XTAL_XTAL_CAPOUTDAC_LSB 16
+#define PHY_ANALOG_XTAL_XTAL_CAPOUTDAC_MASK 0x007f0000
+#define PHY_ANALOG_XTAL_XTAL_CAPOUTDAC_GET(x) (((x) & 0x007f0000) >> 16)
+#define PHY_ANALOG_XTAL_XTAL_CAPOUTDAC_SET(x) (((x) << 16) & 0x007f0000)
+#define PHY_ANALOG_XTAL_XTAL_CAPINDAC_MSB 29
+#define PHY_ANALOG_XTAL_XTAL_CAPINDAC_LSB 23
+#define PHY_ANALOG_XTAL_XTAL_CAPINDAC_MASK 0x3f800000
+#define PHY_ANALOG_XTAL_XTAL_CAPINDAC_GET(x) (((x) & 0x3f800000) >> 23)
+#define PHY_ANALOG_XTAL_XTAL_CAPINDAC_SET(x) (((x) << 23) & 0x3f800000)
+#define PHY_ANALOG_XTAL_XTAL_BIAS2X_MSB 30
+#define PHY_ANALOG_XTAL_XTAL_BIAS2X_LSB 30
+#define PHY_ANALOG_XTAL_XTAL_BIAS2X_MASK 0x40000000
+#define PHY_ANALOG_XTAL_XTAL_BIAS2X_GET(x) (((x) & 0x40000000) >> 30)
+#define PHY_ANALOG_XTAL_XTAL_BIAS2X_SET(x) (((x) << 30) & 0x40000000)
+#define PHY_ANALOG_XTAL_TCXODET_MSB 31
+#define PHY_ANALOG_XTAL_TCXODET_LSB 31
+#define PHY_ANALOG_XTAL_TCXODET_MASK 0x80000000
+#define PHY_ANALOG_XTAL_TCXODET_GET(x) (((x) & 0x80000000) >> 31)
+
+/* macros for rbist_cntrl */
+#define PHY_ANALOG_RBIST_CNTRL_ADDRESS 0x00000380
+#define PHY_ANALOG_RBIST_CNTRL_OFFSET 0x00000380
+#define PHY_ANALOG_RBIST_CNTRL_ATE_TONEGEN_DC_ENABLE_MSB 0
+#define PHY_ANALOG_RBIST_CNTRL_ATE_TONEGEN_DC_ENABLE_LSB 0
+#define PHY_ANALOG_RBIST_CNTRL_ATE_TONEGEN_DC_ENABLE_MASK 0x00000001
+#define PHY_ANALOG_RBIST_CNTRL_ATE_TONEGEN_DC_ENABLE_GET(x) (((x) & 0x00000001) >> 0)
+#define PHY_ANALOG_RBIST_CNTRL_ATE_TONEGEN_DC_ENABLE_SET(x) (((x) << 0) & 0x00000001)
+#define PHY_ANALOG_RBIST_CNTRL_ATE_TONEGEN_TONE0_ENABLE_MSB 1
+#define PHY_ANALOG_RBIST_CNTRL_ATE_TONEGEN_TONE0_ENABLE_LSB 1
+#define PHY_ANALOG_RBIST_CNTRL_ATE_TONEGEN_TONE0_ENABLE_MASK 0x00000002
+#define PHY_ANALOG_RBIST_CNTRL_ATE_TONEGEN_TONE0_ENABLE_GET(x) (((x) & 0x00000002) >> 1)
+#define PHY_ANALOG_RBIST_CNTRL_ATE_TONEGEN_TONE0_ENABLE_SET(x) (((x) << 1) & 0x00000002)
+#define PHY_ANALOG_RBIST_CNTRL_ATE_TONEGEN_TONE1_ENABLE_MSB 2
+#define PHY_ANALOG_RBIST_CNTRL_ATE_TONEGEN_TONE1_ENABLE_LSB 2
+#define PHY_ANALOG_RBIST_CNTRL_ATE_TONEGEN_TONE1_ENABLE_MASK 0x00000004
+#define PHY_ANALOG_RBIST_CNTRL_ATE_TONEGEN_TONE1_ENABLE_GET(x) (((x) & 0x00000004) >> 2)
+#define PHY_ANALOG_RBIST_CNTRL_ATE_TONEGEN_TONE1_ENABLE_SET(x) (((x) << 2) & 0x00000004)
+#define PHY_ANALOG_RBIST_CNTRL_ATE_TONEGEN_LFTONE0_ENABLE_MSB 3
+#define PHY_ANALOG_RBIST_CNTRL_ATE_TONEGEN_LFTONE0_ENABLE_LSB 3
+#define PHY_ANALOG_RBIST_CNTRL_ATE_TONEGEN_LFTONE0_ENABLE_MASK 0x00000008
+#define PHY_ANALOG_RBIST_CNTRL_ATE_TONEGEN_LFTONE0_ENABLE_GET(x) (((x) & 0x00000008) >> 3)
+#define PHY_ANALOG_RBIST_CNTRL_ATE_TONEGEN_LFTONE0_ENABLE_SET(x) (((x) << 3) & 0x00000008)
+#define PHY_ANALOG_RBIST_CNTRL_ATE_TONEGEN_LINRAMP_ENABLE_I_MSB 4
+#define PHY_ANALOG_RBIST_CNTRL_ATE_TONEGEN_LINRAMP_ENABLE_I_LSB 4
+#define PHY_ANALOG_RBIST_CNTRL_ATE_TONEGEN_LINRAMP_ENABLE_I_MASK 0x00000010
+#define PHY_ANALOG_RBIST_CNTRL_ATE_TONEGEN_LINRAMP_ENABLE_I_GET(x) (((x) & 0x00000010) >> 4)
+#define PHY_ANALOG_RBIST_CNTRL_ATE_TONEGEN_LINRAMP_ENABLE_I_SET(x) (((x) << 4) & 0x00000010)
+#define PHY_ANALOG_RBIST_CNTRL_ATE_TONEGEN_LINRAMP_ENABLE_Q_MSB 5
+#define PHY_ANALOG_RBIST_CNTRL_ATE_TONEGEN_LINRAMP_ENABLE_Q_LSB 5
+#define PHY_ANALOG_RBIST_CNTRL_ATE_TONEGEN_LINRAMP_ENABLE_Q_MASK 0x00000020
+#define PHY_ANALOG_RBIST_CNTRL_ATE_TONEGEN_LINRAMP_ENABLE_Q_GET(x) (((x) & 0x00000020) >> 5)
+#define PHY_ANALOG_RBIST_CNTRL_ATE_TONEGEN_LINRAMP_ENABLE_Q_SET(x) (((x) << 5) & 0x00000020)
+#define PHY_ANALOG_RBIST_CNTRL_ATE_TONEGEN_PRBS_ENABLE_I_MSB 6
+#define PHY_ANALOG_RBIST_CNTRL_ATE_TONEGEN_PRBS_ENABLE_I_LSB 6
+#define PHY_ANALOG_RBIST_CNTRL_ATE_TONEGEN_PRBS_ENABLE_I_MASK 0x00000040
+#define PHY_ANALOG_RBIST_CNTRL_ATE_TONEGEN_PRBS_ENABLE_I_GET(x) (((x) & 0x00000040) >> 6)
+#define PHY_ANALOG_RBIST_CNTRL_ATE_TONEGEN_PRBS_ENABLE_I_SET(x) (((x) << 6) & 0x00000040)
+#define PHY_ANALOG_RBIST_CNTRL_ATE_TONEGEN_PRBS_ENABLE_Q_MSB 7
+#define PHY_ANALOG_RBIST_CNTRL_ATE_TONEGEN_PRBS_ENABLE_Q_LSB 7
+#define PHY_ANALOG_RBIST_CNTRL_ATE_TONEGEN_PRBS_ENABLE_Q_MASK 0x00000080
+#define PHY_ANALOG_RBIST_CNTRL_ATE_TONEGEN_PRBS_ENABLE_Q_GET(x) (((x) & 0x00000080) >> 7)
+#define PHY_ANALOG_RBIST_CNTRL_ATE_TONEGEN_PRBS_ENABLE_Q_SET(x) (((x) << 7) & 0x00000080)
+#define PHY_ANALOG_RBIST_CNTRL_ATE_CMAC_DC_WRITE_TO_CANCEL_MSB 8
+#define PHY_ANALOG_RBIST_CNTRL_ATE_CMAC_DC_WRITE_TO_CANCEL_LSB 8
+#define PHY_ANALOG_RBIST_CNTRL_ATE_CMAC_DC_WRITE_TO_CANCEL_MASK 0x00000100
+#define PHY_ANALOG_RBIST_CNTRL_ATE_CMAC_DC_WRITE_TO_CANCEL_GET(x) (((x) & 0x00000100) >> 8)
+#define PHY_ANALOG_RBIST_CNTRL_ATE_CMAC_DC_WRITE_TO_CANCEL_SET(x) (((x) << 8) & 0x00000100)
+#define PHY_ANALOG_RBIST_CNTRL_ATE_CMAC_DC_ENABLE_MSB 9
+#define PHY_ANALOG_RBIST_CNTRL_ATE_CMAC_DC_ENABLE_LSB 9
+#define PHY_ANALOG_RBIST_CNTRL_ATE_CMAC_DC_ENABLE_MASK 0x00000200
+#define PHY_ANALOG_RBIST_CNTRL_ATE_CMAC_DC_ENABLE_GET(x) (((x) & 0x00000200) >> 9)
+#define PHY_ANALOG_RBIST_CNTRL_ATE_CMAC_DC_ENABLE_SET(x) (((x) << 9) & 0x00000200)
+#define PHY_ANALOG_RBIST_CNTRL_ATE_CMAC_CORR_ENABLE_MSB 10
+#define PHY_ANALOG_RBIST_CNTRL_ATE_CMAC_CORR_ENABLE_LSB 10
+#define PHY_ANALOG_RBIST_CNTRL_ATE_CMAC_CORR_ENABLE_MASK 0x00000400
+#define PHY_ANALOG_RBIST_CNTRL_ATE_CMAC_CORR_ENABLE_GET(x) (((x) & 0x00000400) >> 10)
+#define PHY_ANALOG_RBIST_CNTRL_ATE_CMAC_CORR_ENABLE_SET(x) (((x) << 10) & 0x00000400)
+#define PHY_ANALOG_RBIST_CNTRL_ATE_CMAC_POWER_ENABLE_MSB 11
+#define PHY_ANALOG_RBIST_CNTRL_ATE_CMAC_POWER_ENABLE_LSB 11
+#define PHY_ANALOG_RBIST_CNTRL_ATE_CMAC_POWER_ENABLE_MASK 0x00000800
+#define PHY_ANALOG_RBIST_CNTRL_ATE_CMAC_POWER_ENABLE_GET(x) (((x) & 0x00000800) >> 11)
+#define PHY_ANALOG_RBIST_CNTRL_ATE_CMAC_POWER_ENABLE_SET(x) (((x) << 11) & 0x00000800)
+#define PHY_ANALOG_RBIST_CNTRL_ATE_CMAC_IQ_ENABLE_MSB 12
+#define PHY_ANALOG_RBIST_CNTRL_ATE_CMAC_IQ_ENABLE_LSB 12
+#define PHY_ANALOG_RBIST_CNTRL_ATE_CMAC_IQ_ENABLE_MASK 0x00001000
+#define PHY_ANALOG_RBIST_CNTRL_ATE_CMAC_IQ_ENABLE_GET(x) (((x) & 0x00001000) >> 12)
+#define PHY_ANALOG_RBIST_CNTRL_ATE_CMAC_IQ_ENABLE_SET(x) (((x) << 12) & 0x00001000)
+#define PHY_ANALOG_RBIST_CNTRL_ATE_CMAC_I2Q2_ENABLE_MSB 13
+#define PHY_ANALOG_RBIST_CNTRL_ATE_CMAC_I2Q2_ENABLE_LSB 13
+#define PHY_ANALOG_RBIST_CNTRL_ATE_CMAC_I2Q2_ENABLE_MASK 0x00002000
+#define PHY_ANALOG_RBIST_CNTRL_ATE_CMAC_I2Q2_ENABLE_GET(x) (((x) & 0x00002000) >> 13)
+#define PHY_ANALOG_RBIST_CNTRL_ATE_CMAC_I2Q2_ENABLE_SET(x) (((x) << 13) & 0x00002000)
+#define PHY_ANALOG_RBIST_CNTRL_ATE_CMAC_POWER_HPF_ENABLE_MSB 14
+#define PHY_ANALOG_RBIST_CNTRL_ATE_CMAC_POWER_HPF_ENABLE_LSB 14
+#define PHY_ANALOG_RBIST_CNTRL_ATE_CMAC_POWER_HPF_ENABLE_MASK 0x00004000
+#define PHY_ANALOG_RBIST_CNTRL_ATE_CMAC_POWER_HPF_ENABLE_GET(x) (((x) & 0x00004000) >> 14)
+#define PHY_ANALOG_RBIST_CNTRL_ATE_CMAC_POWER_HPF_ENABLE_SET(x) (((x) << 14) & 0x00004000)
+#define PHY_ANALOG_RBIST_CNTRL_ATE_RXDAC_CALIBRATE_MSB 15
+#define PHY_ANALOG_RBIST_CNTRL_ATE_RXDAC_CALIBRATE_LSB 15
+#define PHY_ANALOG_RBIST_CNTRL_ATE_RXDAC_CALIBRATE_MASK 0x00008000
+#define PHY_ANALOG_RBIST_CNTRL_ATE_RXDAC_CALIBRATE_GET(x) (((x) & 0x00008000) >> 15)
+#define PHY_ANALOG_RBIST_CNTRL_ATE_RXDAC_CALIBRATE_SET(x) (((x) << 15) & 0x00008000)
+#define PHY_ANALOG_RBIST_CNTRL_ATE_RBIST_ENABLE_MSB 16
+#define PHY_ANALOG_RBIST_CNTRL_ATE_RBIST_ENABLE_LSB 16
+#define PHY_ANALOG_RBIST_CNTRL_ATE_RBIST_ENABLE_MASK 0x00010000
+#define PHY_ANALOG_RBIST_CNTRL_ATE_RBIST_ENABLE_GET(x) (((x) & 0x00010000) >> 16)
+#define PHY_ANALOG_RBIST_CNTRL_ATE_RBIST_ENABLE_SET(x) (((x) << 16) & 0x00010000)
+#define PHY_ANALOG_RBIST_CNTRL_ATE_ADC_CLK_INVERT_MSB 17
+#define PHY_ANALOG_RBIST_CNTRL_ATE_ADC_CLK_INVERT_LSB 17
+#define PHY_ANALOG_RBIST_CNTRL_ATE_ADC_CLK_INVERT_MASK 0x00020000
+#define PHY_ANALOG_RBIST_CNTRL_ATE_ADC_CLK_INVERT_GET(x) (((x) & 0x00020000) >> 17)
+#define PHY_ANALOG_RBIST_CNTRL_ATE_ADC_CLK_INVERT_SET(x) (((x) << 17) & 0x00020000)
+
+/* macros for tx_dc_offset */
+#define PHY_ANALOG_TX_DC_OFFSET_ADDRESS 0x00000384
+#define PHY_ANALOG_TX_DC_OFFSET_OFFSET 0x00000384
+#define PHY_ANALOG_TX_DC_OFFSET_ATE_TONEGEN_DC_I_MSB 10
+#define PHY_ANALOG_TX_DC_OFFSET_ATE_TONEGEN_DC_I_LSB 0
+#define PHY_ANALOG_TX_DC_OFFSET_ATE_TONEGEN_DC_I_MASK 0x000007ff
+#define PHY_ANALOG_TX_DC_OFFSET_ATE_TONEGEN_DC_I_GET(x) (((x) & 0x000007ff) >> 0)
+#define PHY_ANALOG_TX_DC_OFFSET_ATE_TONEGEN_DC_I_SET(x) (((x) << 0) & 0x000007ff)
+#define PHY_ANALOG_TX_DC_OFFSET_ATE_TONEGEN_DC_Q_MSB 26
+#define PHY_ANALOG_TX_DC_OFFSET_ATE_TONEGEN_DC_Q_LSB 16
+#define PHY_ANALOG_TX_DC_OFFSET_ATE_TONEGEN_DC_Q_MASK 0x07ff0000
+#define PHY_ANALOG_TX_DC_OFFSET_ATE_TONEGEN_DC_Q_GET(x) (((x) & 0x07ff0000) >> 16)
+#define PHY_ANALOG_TX_DC_OFFSET_ATE_TONEGEN_DC_Q_SET(x) (((x) << 16) & 0x07ff0000)
+
+/* macros for tx_tonegen0 */
+#define PHY_ANALOG_TX_TONEGEN0_ADDRESS 0x00000388
+#define PHY_ANALOG_TX_TONEGEN0_OFFSET 0x00000388
+#define PHY_ANALOG_TX_TONEGEN0_ATE_TONEGEN_TONE_FREQ_MSB 6
+#define PHY_ANALOG_TX_TONEGEN0_ATE_TONEGEN_TONE_FREQ_LSB 0
+#define PHY_ANALOG_TX_TONEGEN0_ATE_TONEGEN_TONE_FREQ_MASK 0x0000007f
+#define PHY_ANALOG_TX_TONEGEN0_ATE_TONEGEN_TONE_FREQ_GET(x) (((x) & 0x0000007f) >> 0)
+#define PHY_ANALOG_TX_TONEGEN0_ATE_TONEGEN_TONE_FREQ_SET(x) (((x) << 0) & 0x0000007f)
+#define PHY_ANALOG_TX_TONEGEN0_ATE_TONEGEN_TONE_A_EXP_MSB 11
+#define PHY_ANALOG_TX_TONEGEN0_ATE_TONEGEN_TONE_A_EXP_LSB 8
+#define PHY_ANALOG_TX_TONEGEN0_ATE_TONEGEN_TONE_A_EXP_MASK 0x00000f00
+#define PHY_ANALOG_TX_TONEGEN0_ATE_TONEGEN_TONE_A_EXP_GET(x) (((x) & 0x00000f00) >> 8)
+#define PHY_ANALOG_TX_TONEGEN0_ATE_TONEGEN_TONE_A_EXP_SET(x) (((x) << 8) & 0x00000f00)
+#define PHY_ANALOG_TX_TONEGEN0_ATE_TONEGEN_TONE_A_MAN_MSB 23
+#define PHY_ANALOG_TX_TONEGEN0_ATE_TONEGEN_TONE_A_MAN_LSB 16
+#define PHY_ANALOG_TX_TONEGEN0_ATE_TONEGEN_TONE_A_MAN_MASK 0x00ff0000
+#define PHY_ANALOG_TX_TONEGEN0_ATE_TONEGEN_TONE_A_MAN_GET(x) (((x) & 0x00ff0000) >> 16)
+#define PHY_ANALOG_TX_TONEGEN0_ATE_TONEGEN_TONE_A_MAN_SET(x) (((x) << 16) & 0x00ff0000)
+#define PHY_ANALOG_TX_TONEGEN0_ATE_TONEGEN_TONE_TAU_K_MSB 30
+#define PHY_ANALOG_TX_TONEGEN0_ATE_TONEGEN_TONE_TAU_K_LSB 24
+#define PHY_ANALOG_TX_TONEGEN0_ATE_TONEGEN_TONE_TAU_K_MASK 0x7f000000
+#define PHY_ANALOG_TX_TONEGEN0_ATE_TONEGEN_TONE_TAU_K_GET(x) (((x) & 0x7f000000) >> 24)
+#define PHY_ANALOG_TX_TONEGEN0_ATE_TONEGEN_TONE_TAU_K_SET(x) (((x) << 24) & 0x7f000000)
+
+/* macros for tx_tonegen1 */
+#define PHY_ANALOG_TX_TONEGEN1_ADDRESS 0x0000038c
+#define PHY_ANALOG_TX_TONEGEN1_OFFSET 0x0000038c
+#define PHY_ANALOG_TX_TONEGEN1_ATE_TONEGEN_TONE_FREQ_MSB 6
+#define PHY_ANALOG_TX_TONEGEN1_ATE_TONEGEN_TONE_FREQ_LSB 0
+#define PHY_ANALOG_TX_TONEGEN1_ATE_TONEGEN_TONE_FREQ_MASK 0x0000007f
+#define PHY_ANALOG_TX_TONEGEN1_ATE_TONEGEN_TONE_FREQ_GET(x) (((x) & 0x0000007f) >> 0)
+#define PHY_ANALOG_TX_TONEGEN1_ATE_TONEGEN_TONE_FREQ_SET(x) (((x) << 0) & 0x0000007f)
+#define PHY_ANALOG_TX_TONEGEN1_ATE_TONEGEN_TONE_A_EXP_MSB 11
+#define PHY_ANALOG_TX_TONEGEN1_ATE_TONEGEN_TONE_A_EXP_LSB 8
+#define PHY_ANALOG_TX_TONEGEN1_ATE_TONEGEN_TONE_A_EXP_MASK 0x00000f00
+#define PHY_ANALOG_TX_TONEGEN1_ATE_TONEGEN_TONE_A_EXP_GET(x) (((x) & 0x00000f00) >> 8)
+#define PHY_ANALOG_TX_TONEGEN1_ATE_TONEGEN_TONE_A_EXP_SET(x) (((x) << 8) & 0x00000f00)
+#define PHY_ANALOG_TX_TONEGEN1_ATE_TONEGEN_TONE_A_MAN_MSB 23
+#define PHY_ANALOG_TX_TONEGEN1_ATE_TONEGEN_TONE_A_MAN_LSB 16
+#define PHY_ANALOG_TX_TONEGEN1_ATE_TONEGEN_TONE_A_MAN_MASK 0x00ff0000
+#define PHY_ANALOG_TX_TONEGEN1_ATE_TONEGEN_TONE_A_MAN_GET(x) (((x) & 0x00ff0000) >> 16)
+#define PHY_ANALOG_TX_TONEGEN1_ATE_TONEGEN_TONE_A_MAN_SET(x) (((x) << 16) & 0x00ff0000)
+#define PHY_ANALOG_TX_TONEGEN1_ATE_TONEGEN_TONE_TAU_K_MSB 30
+#define PHY_ANALOG_TX_TONEGEN1_ATE_TONEGEN_TONE_TAU_K_LSB 24
+#define PHY_ANALOG_TX_TONEGEN1_ATE_TONEGEN_TONE_TAU_K_MASK 0x7f000000
+#define PHY_ANALOG_TX_TONEGEN1_ATE_TONEGEN_TONE_TAU_K_GET(x) (((x) & 0x7f000000) >> 24)
+#define PHY_ANALOG_TX_TONEGEN1_ATE_TONEGEN_TONE_TAU_K_SET(x) (((x) << 24) & 0x7f000000)
+
+/* macros for tx_lftonegen0 */
+#define PHY_ANALOG_TX_LFTONEGEN0_ADDRESS 0x00000390
+#define PHY_ANALOG_TX_LFTONEGEN0_OFFSET 0x00000390
+#define PHY_ANALOG_TX_LFTONEGEN0_ATE_TONEGEN_TONE_FREQ_MSB 6
+#define PHY_ANALOG_TX_LFTONEGEN0_ATE_TONEGEN_TONE_FREQ_LSB 0
+#define PHY_ANALOG_TX_LFTONEGEN0_ATE_TONEGEN_TONE_FREQ_MASK 0x0000007f
+#define PHY_ANALOG_TX_LFTONEGEN0_ATE_TONEGEN_TONE_FREQ_GET(x) (((x) & 0x0000007f) >> 0)
+#define PHY_ANALOG_TX_LFTONEGEN0_ATE_TONEGEN_TONE_FREQ_SET(x) (((x) << 0) & 0x0000007f)
+#define PHY_ANALOG_TX_LFTONEGEN0_ATE_TONEGEN_TONE_A_EXP_MSB 11
+#define PHY_ANALOG_TX_LFTONEGEN0_ATE_TONEGEN_TONE_A_EXP_LSB 8
+#define PHY_ANALOG_TX_LFTONEGEN0_ATE_TONEGEN_TONE_A_EXP_MASK 0x00000f00
+#define PHY_ANALOG_TX_LFTONEGEN0_ATE_TONEGEN_TONE_A_EXP_GET(x) (((x) & 0x00000f00) >> 8)
+#define PHY_ANALOG_TX_LFTONEGEN0_ATE_TONEGEN_TONE_A_EXP_SET(x) (((x) << 8) & 0x00000f00)
+#define PHY_ANALOG_TX_LFTONEGEN0_ATE_TONEGEN_TONE_A_MAN_MSB 23
+#define PHY_ANALOG_TX_LFTONEGEN0_ATE_TONEGEN_TONE_A_MAN_LSB 16
+#define PHY_ANALOG_TX_LFTONEGEN0_ATE_TONEGEN_TONE_A_MAN_MASK 0x00ff0000
+#define PHY_ANALOG_TX_LFTONEGEN0_ATE_TONEGEN_TONE_A_MAN_GET(x) (((x) & 0x00ff0000) >> 16)
+#define PHY_ANALOG_TX_LFTONEGEN0_ATE_TONEGEN_TONE_A_MAN_SET(x) (((x) << 16) & 0x00ff0000)
+#define PHY_ANALOG_TX_LFTONEGEN0_ATE_TONEGEN_TONE_TAU_K_MSB 30
+#define PHY_ANALOG_TX_LFTONEGEN0_ATE_TONEGEN_TONE_TAU_K_LSB 24
+#define PHY_ANALOG_TX_LFTONEGEN0_ATE_TONEGEN_TONE_TAU_K_MASK 0x7f000000
+#define PHY_ANALOG_TX_LFTONEGEN0_ATE_TONEGEN_TONE_TAU_K_GET(x) (((x) & 0x7f000000) >> 24)
+#define PHY_ANALOG_TX_LFTONEGEN0_ATE_TONEGEN_TONE_TAU_K_SET(x) (((x) << 24) & 0x7f000000)
+
+/* macros for tx_linear_ramp_i */
+#define PHY_ANALOG_TX_LINEAR_RAMP_I_ADDRESS 0x00000394
+#define PHY_ANALOG_TX_LINEAR_RAMP_I_OFFSET 0x00000394
+#define PHY_ANALOG_TX_LINEAR_RAMP_I_ATE_TONEGEN_LINRAMP_INIT_MSB 10
+#define PHY_ANALOG_TX_LINEAR_RAMP_I_ATE_TONEGEN_LINRAMP_INIT_LSB 0
+#define PHY_ANALOG_TX_LINEAR_RAMP_I_ATE_TONEGEN_LINRAMP_INIT_MASK 0x000007ff
+#define PHY_ANALOG_TX_LINEAR_RAMP_I_ATE_TONEGEN_LINRAMP_INIT_GET(x) (((x) & 0x000007ff) >> 0)
+#define PHY_ANALOG_TX_LINEAR_RAMP_I_ATE_TONEGEN_LINRAMP_INIT_SET(x) (((x) << 0) & 0x000007ff)
+#define PHY_ANALOG_TX_LINEAR_RAMP_I_ATE_TONEGEN_LINRAMP_DWELL_MSB 21
+#define PHY_ANALOG_TX_LINEAR_RAMP_I_ATE_TONEGEN_LINRAMP_DWELL_LSB 12
+#define PHY_ANALOG_TX_LINEAR_RAMP_I_ATE_TONEGEN_LINRAMP_DWELL_MASK 0x003ff000
+#define PHY_ANALOG_TX_LINEAR_RAMP_I_ATE_TONEGEN_LINRAMP_DWELL_GET(x) (((x) & 0x003ff000) >> 12)
+#define PHY_ANALOG_TX_LINEAR_RAMP_I_ATE_TONEGEN_LINRAMP_DWELL_SET(x) (((x) << 12) & 0x003ff000)
+#define PHY_ANALOG_TX_LINEAR_RAMP_I_ATE_TONEGEN_LINRAMP_STEP_MSB 29
+#define PHY_ANALOG_TX_LINEAR_RAMP_I_ATE_TONEGEN_LINRAMP_STEP_LSB 24
+#define PHY_ANALOG_TX_LINEAR_RAMP_I_ATE_TONEGEN_LINRAMP_STEP_MASK 0x3f000000
+#define PHY_ANALOG_TX_LINEAR_RAMP_I_ATE_TONEGEN_LINRAMP_STEP_GET(x) (((x) & 0x3f000000) >> 24)
+#define PHY_ANALOG_TX_LINEAR_RAMP_I_ATE_TONEGEN_LINRAMP_STEP_SET(x) (((x) << 24) & 0x3f000000)
+
+/* macros for tx_linear_ramp_q */
+#define PHY_ANALOG_TX_LINEAR_RAMP_Q_ADDRESS 0x00000398
+#define PHY_ANALOG_TX_LINEAR_RAMP_Q_OFFSET 0x00000398
+#define PHY_ANALOG_TX_LINEAR_RAMP_Q_ATE_TONEGEN_LINRAMP_INIT_MSB 10
+#define PHY_ANALOG_TX_LINEAR_RAMP_Q_ATE_TONEGEN_LINRAMP_INIT_LSB 0
+#define PHY_ANALOG_TX_LINEAR_RAMP_Q_ATE_TONEGEN_LINRAMP_INIT_MASK 0x000007ff
+#define PHY_ANALOG_TX_LINEAR_RAMP_Q_ATE_TONEGEN_LINRAMP_INIT_GET(x) (((x) & 0x000007ff) >> 0)
+#define PHY_ANALOG_TX_LINEAR_RAMP_Q_ATE_TONEGEN_LINRAMP_INIT_SET(x) (((x) << 0) & 0x000007ff)
+#define PHY_ANALOG_TX_LINEAR_RAMP_Q_ATE_TONEGEN_LINRAMP_DWELL_MSB 21
+#define PHY_ANALOG_TX_LINEAR_RAMP_Q_ATE_TONEGEN_LINRAMP_DWELL_LSB 12
+#define PHY_ANALOG_TX_LINEAR_RAMP_Q_ATE_TONEGEN_LINRAMP_DWELL_MASK 0x003ff000
+#define PHY_ANALOG_TX_LINEAR_RAMP_Q_ATE_TONEGEN_LINRAMP_DWELL_GET(x) (((x) & 0x003ff000) >> 12)
+#define PHY_ANALOG_TX_LINEAR_RAMP_Q_ATE_TONEGEN_LINRAMP_DWELL_SET(x) (((x) << 12) & 0x003ff000)
+#define PHY_ANALOG_TX_LINEAR_RAMP_Q_ATE_TONEGEN_LINRAMP_STEP_MSB 29
+#define PHY_ANALOG_TX_LINEAR_RAMP_Q_ATE_TONEGEN_LINRAMP_STEP_LSB 24
+#define PHY_ANALOG_TX_LINEAR_RAMP_Q_ATE_TONEGEN_LINRAMP_STEP_MASK 0x3f000000
+#define PHY_ANALOG_TX_LINEAR_RAMP_Q_ATE_TONEGEN_LINRAMP_STEP_GET(x) (((x) & 0x3f000000) >> 24)
+#define PHY_ANALOG_TX_LINEAR_RAMP_Q_ATE_TONEGEN_LINRAMP_STEP_SET(x) (((x) << 24) & 0x3f000000)
+
+/* macros for tx_prbs_mag */
+#define PHY_ANALOG_TX_PRBS_MAG_ADDRESS 0x0000039c
+#define PHY_ANALOG_TX_PRBS_MAG_OFFSET 0x0000039c
+#define PHY_ANALOG_TX_PRBS_MAG_ATE_TONEGEN_PRBS_MAGNITUDE_I_MSB 9
+#define PHY_ANALOG_TX_PRBS_MAG_ATE_TONEGEN_PRBS_MAGNITUDE_I_LSB 0
+#define PHY_ANALOG_TX_PRBS_MAG_ATE_TONEGEN_PRBS_MAGNITUDE_I_MASK 0x000003ff
+#define PHY_ANALOG_TX_PRBS_MAG_ATE_TONEGEN_PRBS_MAGNITUDE_I_GET(x) (((x) & 0x000003ff) >> 0)
+#define PHY_ANALOG_TX_PRBS_MAG_ATE_TONEGEN_PRBS_MAGNITUDE_I_SET(x) (((x) << 0) & 0x000003ff)
+#define PHY_ANALOG_TX_PRBS_MAG_ATE_TONEGEN_PRBS_MAGNITUDE_Q_MSB 25
+#define PHY_ANALOG_TX_PRBS_MAG_ATE_TONEGEN_PRBS_MAGNITUDE_Q_LSB 16
+#define PHY_ANALOG_TX_PRBS_MAG_ATE_TONEGEN_PRBS_MAGNITUDE_Q_MASK 0x03ff0000
+#define PHY_ANALOG_TX_PRBS_MAG_ATE_TONEGEN_PRBS_MAGNITUDE_Q_GET(x) (((x) & 0x03ff0000) >> 16)
+#define PHY_ANALOG_TX_PRBS_MAG_ATE_TONEGEN_PRBS_MAGNITUDE_Q_SET(x) (((x) << 16) & 0x03ff0000)
+
+/* macros for tx_prbs_seed_i */
+#define PHY_ANALOG_TX_PRBS_SEED_I_ADDRESS 0x000003a0
+#define PHY_ANALOG_TX_PRBS_SEED_I_OFFSET 0x000003a0
+#define PHY_ANALOG_TX_PRBS_SEED_I_ATE_TONEGEN_PRBS_SEED_MSB 30
+#define PHY_ANALOG_TX_PRBS_SEED_I_ATE_TONEGEN_PRBS_SEED_LSB 0
+#define PHY_ANALOG_TX_PRBS_SEED_I_ATE_TONEGEN_PRBS_SEED_MASK 0x7fffffff
+#define PHY_ANALOG_TX_PRBS_SEED_I_ATE_TONEGEN_PRBS_SEED_GET(x) (((x) & 0x7fffffff) >> 0)
+#define PHY_ANALOG_TX_PRBS_SEED_I_ATE_TONEGEN_PRBS_SEED_SET(x) (((x) << 0) & 0x7fffffff)
+
+/* macros for tx_prbs_seed_q */
+#define PHY_ANALOG_TX_PRBS_SEED_Q_ADDRESS 0x000003a4
+#define PHY_ANALOG_TX_PRBS_SEED_Q_OFFSET 0x000003a4
+#define PHY_ANALOG_TX_PRBS_SEED_Q_ATE_TONEGEN_PRBS_SEED_MSB 30
+#define PHY_ANALOG_TX_PRBS_SEED_Q_ATE_TONEGEN_PRBS_SEED_LSB 0
+#define PHY_ANALOG_TX_PRBS_SEED_Q_ATE_TONEGEN_PRBS_SEED_MASK 0x7fffffff
+#define PHY_ANALOG_TX_PRBS_SEED_Q_ATE_TONEGEN_PRBS_SEED_GET(x) (((x) & 0x7fffffff) >> 0)
+#define PHY_ANALOG_TX_PRBS_SEED_Q_ATE_TONEGEN_PRBS_SEED_SET(x) (((x) << 0) & 0x7fffffff)
+
+/* macros for cmac_dc_cancel */
+#define PHY_ANALOG_CMAC_DC_CANCEL_ADDRESS 0x000003a8
+#define PHY_ANALOG_CMAC_DC_CANCEL_OFFSET 0x000003a8
+#define PHY_ANALOG_CMAC_DC_CANCEL_ATE_CMAC_DC_CANCEL_I_MSB 9
+#define PHY_ANALOG_CMAC_DC_CANCEL_ATE_CMAC_DC_CANCEL_I_LSB 0
+#define PHY_ANALOG_CMAC_DC_CANCEL_ATE_CMAC_DC_CANCEL_I_MASK 0x000003ff
+#define PHY_ANALOG_CMAC_DC_CANCEL_ATE_CMAC_DC_CANCEL_I_GET(x) (((x) & 0x000003ff) >> 0)
+#define PHY_ANALOG_CMAC_DC_CANCEL_ATE_CMAC_DC_CANCEL_I_SET(x) (((x) << 0) & 0x000003ff)
+#define PHY_ANALOG_CMAC_DC_CANCEL_ATE_CMAC_DC_CANCEL_Q_MSB 25
+#define PHY_ANALOG_CMAC_DC_CANCEL_ATE_CMAC_DC_CANCEL_Q_LSB 16
+#define PHY_ANALOG_CMAC_DC_CANCEL_ATE_CMAC_DC_CANCEL_Q_MASK 0x03ff0000
+#define PHY_ANALOG_CMAC_DC_CANCEL_ATE_CMAC_DC_CANCEL_Q_GET(x) (((x) & 0x03ff0000) >> 16)
+#define PHY_ANALOG_CMAC_DC_CANCEL_ATE_CMAC_DC_CANCEL_Q_SET(x) (((x) << 16) & 0x03ff0000)
+
+/* macros for cmac_dc_offset */
+#define PHY_ANALOG_CMAC_DC_OFFSET_ADDRESS 0x000003ac
+#define PHY_ANALOG_CMAC_DC_OFFSET_OFFSET 0x000003ac
+#define PHY_ANALOG_CMAC_DC_OFFSET_ATE_CMAC_DC_CYCLES_MSB 3
+#define PHY_ANALOG_CMAC_DC_OFFSET_ATE_CMAC_DC_CYCLES_LSB 0
+#define PHY_ANALOG_CMAC_DC_OFFSET_ATE_CMAC_DC_CYCLES_MASK 0x0000000f
+#define PHY_ANALOG_CMAC_DC_OFFSET_ATE_CMAC_DC_CYCLES_GET(x) (((x) & 0x0000000f) >> 0)
+#define PHY_ANALOG_CMAC_DC_OFFSET_ATE_CMAC_DC_CYCLES_SET(x) (((x) << 0) & 0x0000000f)
+
+/* macros for cmac_corr */
+#define PHY_ANALOG_CMAC_CORR_ADDRESS 0x000003b0
+#define PHY_ANALOG_CMAC_CORR_OFFSET 0x000003b0
+#define PHY_ANALOG_CMAC_CORR_ATE_CMAC_CORR_CYCLES_MSB 4
+#define PHY_ANALOG_CMAC_CORR_ATE_CMAC_CORR_CYCLES_LSB 0
+#define PHY_ANALOG_CMAC_CORR_ATE_CMAC_CORR_CYCLES_MASK 0x0000001f
+#define PHY_ANALOG_CMAC_CORR_ATE_CMAC_CORR_CYCLES_GET(x) (((x) & 0x0000001f) >> 0)
+#define PHY_ANALOG_CMAC_CORR_ATE_CMAC_CORR_CYCLES_SET(x) (((x) << 0) & 0x0000001f)
+#define PHY_ANALOG_CMAC_CORR_ATE_CMAC_CORR_FREQ_MSB 13
+#define PHY_ANALOG_CMAC_CORR_ATE_CMAC_CORR_FREQ_LSB 8
+#define PHY_ANALOG_CMAC_CORR_ATE_CMAC_CORR_FREQ_MASK 0x00003f00
+#define PHY_ANALOG_CMAC_CORR_ATE_CMAC_CORR_FREQ_GET(x) (((x) & 0x00003f00) >> 8)
+#define PHY_ANALOG_CMAC_CORR_ATE_CMAC_CORR_FREQ_SET(x) (((x) << 8) & 0x00003f00)
+
+/* macros for cmac_power */
+#define PHY_ANALOG_CMAC_POWER_ADDRESS 0x000003b4
+#define PHY_ANALOG_CMAC_POWER_OFFSET 0x000003b4
+#define PHY_ANALOG_CMAC_POWER_ATE_CMAC_POWER_CYCLES_MSB 3
+#define PHY_ANALOG_CMAC_POWER_ATE_CMAC_POWER_CYCLES_LSB 0
+#define PHY_ANALOG_CMAC_POWER_ATE_CMAC_POWER_CYCLES_MASK 0x0000000f
+#define PHY_ANALOG_CMAC_POWER_ATE_CMAC_POWER_CYCLES_GET(x) (((x) & 0x0000000f) >> 0)
+#define PHY_ANALOG_CMAC_POWER_ATE_CMAC_POWER_CYCLES_SET(x) (((x) << 0) & 0x0000000f)
+
+/* macros for cmac_cross_corr */
+#define PHY_ANALOG_CMAC_CROSS_CORR_ADDRESS 0x000003b8
+#define PHY_ANALOG_CMAC_CROSS_CORR_OFFSET 0x000003b8
+#define PHY_ANALOG_CMAC_CROSS_CORR_ATE_CMAC_IQ_CYCLES_MSB 3
+#define PHY_ANALOG_CMAC_CROSS_CORR_ATE_CMAC_IQ_CYCLES_LSB 0
+#define PHY_ANALOG_CMAC_CROSS_CORR_ATE_CMAC_IQ_CYCLES_MASK 0x0000000f
+#define PHY_ANALOG_CMAC_CROSS_CORR_ATE_CMAC_IQ_CYCLES_GET(x) (((x) & 0x0000000f) >> 0)
+#define PHY_ANALOG_CMAC_CROSS_CORR_ATE_CMAC_IQ_CYCLES_SET(x) (((x) << 0) & 0x0000000f)
+
+/* macros for cmac_i2q2 */
+#define PHY_ANALOG_CMAC_I2Q2_ADDRESS 0x000003bc
+#define PHY_ANALOG_CMAC_I2Q2_OFFSET 0x000003bc
+#define PHY_ANALOG_CMAC_I2Q2_ATE_CMAC_I2Q2_CYCLES_MSB 3
+#define PHY_ANALOG_CMAC_I2Q2_ATE_CMAC_I2Q2_CYCLES_LSB 0
+#define PHY_ANALOG_CMAC_I2Q2_ATE_CMAC_I2Q2_CYCLES_MASK 0x0000000f
+#define PHY_ANALOG_CMAC_I2Q2_ATE_CMAC_I2Q2_CYCLES_GET(x) (((x) & 0x0000000f) >> 0)
+#define PHY_ANALOG_CMAC_I2Q2_ATE_CMAC_I2Q2_CYCLES_SET(x) (((x) << 0) & 0x0000000f)
+
+/* macros for cmac_power_hpf */
+#define PHY_ANALOG_CMAC_POWER_HPF_ADDRESS 0x000003c0
+#define PHY_ANALOG_CMAC_POWER_HPF_OFFSET 0x000003c0
+#define PHY_ANALOG_CMAC_POWER_HPF_ATE_CMAC_POWER_HPF_CYCLES_MSB 3
+#define PHY_ANALOG_CMAC_POWER_HPF_ATE_CMAC_POWER_HPF_CYCLES_LSB 0
+#define PHY_ANALOG_CMAC_POWER_HPF_ATE_CMAC_POWER_HPF_CYCLES_MASK 0x0000000f
+#define PHY_ANALOG_CMAC_POWER_HPF_ATE_CMAC_POWER_HPF_CYCLES_GET(x) (((x) & 0x0000000f) >> 0)
+#define PHY_ANALOG_CMAC_POWER_HPF_ATE_CMAC_POWER_HPF_CYCLES_SET(x) (((x) << 0) & 0x0000000f)
+#define PHY_ANALOG_CMAC_POWER_HPF_ATE_CMAC_POWER_HPF_WAIT_MSB 7
+#define PHY_ANALOG_CMAC_POWER_HPF_ATE_CMAC_POWER_HPF_WAIT_LSB 4
+#define PHY_ANALOG_CMAC_POWER_HPF_ATE_CMAC_POWER_HPF_WAIT_MASK 0x000000f0
+#define PHY_ANALOG_CMAC_POWER_HPF_ATE_CMAC_POWER_HPF_WAIT_GET(x) (((x) & 0x000000f0) >> 4)
+#define PHY_ANALOG_CMAC_POWER_HPF_ATE_CMAC_POWER_HPF_WAIT_SET(x) (((x) << 4) & 0x000000f0)
+
+/* macros for rxdac_set1 */
+#define PHY_ANALOG_RXDAC_SET1_ADDRESS 0x000003c4
+#define PHY_ANALOG_RXDAC_SET1_OFFSET 0x000003c4
+#define PHY_ANALOG_RXDAC_SET1_ATE_RXDAC_MUX_MSB 1
+#define PHY_ANALOG_RXDAC_SET1_ATE_RXDAC_MUX_LSB 0
+#define PHY_ANALOG_RXDAC_SET1_ATE_RXDAC_MUX_MASK 0x00000003
+#define PHY_ANALOG_RXDAC_SET1_ATE_RXDAC_MUX_GET(x) (((x) & 0x00000003) >> 0)
+#define PHY_ANALOG_RXDAC_SET1_ATE_RXDAC_MUX_SET(x) (((x) << 0) & 0x00000003)
+#define PHY_ANALOG_RXDAC_SET1_ATE_RXDAC_HI_GAIN_MSB 4
+#define PHY_ANALOG_RXDAC_SET1_ATE_RXDAC_HI_GAIN_LSB 4
+#define PHY_ANALOG_RXDAC_SET1_ATE_RXDAC_HI_GAIN_MASK 0x00000010
+#define PHY_ANALOG_RXDAC_SET1_ATE_RXDAC_HI_GAIN_GET(x) (((x) & 0x00000010) >> 4)
+#define PHY_ANALOG_RXDAC_SET1_ATE_RXDAC_HI_GAIN_SET(x) (((x) << 4) & 0x00000010)
+#define PHY_ANALOG_RXDAC_SET1_ATE_RXDAC_CAL_WAIT_MSB 13
+#define PHY_ANALOG_RXDAC_SET1_ATE_RXDAC_CAL_WAIT_LSB 8
+#define PHY_ANALOG_RXDAC_SET1_ATE_RXDAC_CAL_WAIT_MASK 0x00003f00
+#define PHY_ANALOG_RXDAC_SET1_ATE_RXDAC_CAL_WAIT_GET(x) (((x) & 0x00003f00) >> 8)
+#define PHY_ANALOG_RXDAC_SET1_ATE_RXDAC_CAL_WAIT_SET(x) (((x) << 8) & 0x00003f00)
+#define PHY_ANALOG_RXDAC_SET1_ATE_RXDAC_CAL_MEASURE_TIME_MSB 19
+#define PHY_ANALOG_RXDAC_SET1_ATE_RXDAC_CAL_MEASURE_TIME_LSB 16
+#define PHY_ANALOG_RXDAC_SET1_ATE_RXDAC_CAL_MEASURE_TIME_MASK 0x000f0000
+#define PHY_ANALOG_RXDAC_SET1_ATE_RXDAC_CAL_MEASURE_TIME_GET(x) (((x) & 0x000f0000) >> 16)
+#define PHY_ANALOG_RXDAC_SET1_ATE_RXDAC_CAL_MEASURE_TIME_SET(x) (((x) << 16) & 0x000f0000)
+
+/* macros for rxdac_set2 */
+#define PHY_ANALOG_RXDAC_SET2_ADDRESS 0x000003c8
+#define PHY_ANALOG_RXDAC_SET2_OFFSET 0x000003c8
+#define PHY_ANALOG_RXDAC_SET2_ATE_RXDAC_I_HI_MSB 4
+#define PHY_ANALOG_RXDAC_SET2_ATE_RXDAC_I_HI_LSB 0
+#define PHY_ANALOG_RXDAC_SET2_ATE_RXDAC_I_HI_MASK 0x0000001f
+#define PHY_ANALOG_RXDAC_SET2_ATE_RXDAC_I_HI_GET(x) (((x) & 0x0000001f) >> 0)
+#define PHY_ANALOG_RXDAC_SET2_ATE_RXDAC_I_HI_SET(x) (((x) << 0) & 0x0000001f)
+#define PHY_ANALOG_RXDAC_SET2_ATE_RXDAC_Q_HI_MSB 12
+#define PHY_ANALOG_RXDAC_SET2_ATE_RXDAC_Q_HI_LSB 8
+#define PHY_ANALOG_RXDAC_SET2_ATE_RXDAC_Q_HI_MASK 0x00001f00
+#define PHY_ANALOG_RXDAC_SET2_ATE_RXDAC_Q_HI_GET(x) (((x) & 0x00001f00) >> 8)
+#define PHY_ANALOG_RXDAC_SET2_ATE_RXDAC_Q_HI_SET(x) (((x) << 8) & 0x00001f00)
+#define PHY_ANALOG_RXDAC_SET2_ATE_RXDAC_I_LOW_MSB 20
+#define PHY_ANALOG_RXDAC_SET2_ATE_RXDAC_I_LOW_LSB 16
+#define PHY_ANALOG_RXDAC_SET2_ATE_RXDAC_I_LOW_MASK 0x001f0000
+#define PHY_ANALOG_RXDAC_SET2_ATE_RXDAC_I_LOW_GET(x) (((x) & 0x001f0000) >> 16)
+#define PHY_ANALOG_RXDAC_SET2_ATE_RXDAC_I_LOW_SET(x) (((x) << 16) & 0x001f0000)
+#define PHY_ANALOG_RXDAC_SET2_ATE_RXDAC_Q_LOW_MSB 28
+#define PHY_ANALOG_RXDAC_SET2_ATE_RXDAC_Q_LOW_LSB 24
+#define PHY_ANALOG_RXDAC_SET2_ATE_RXDAC_Q_LOW_MASK 0x1f000000
+#define PHY_ANALOG_RXDAC_SET2_ATE_RXDAC_Q_LOW_GET(x) (((x) & 0x1f000000) >> 24)
+#define PHY_ANALOG_RXDAC_SET2_ATE_RXDAC_Q_LOW_SET(x) (((x) << 24) & 0x1f000000)
+
+/* macros for rxdac_long_shift */
+#define PHY_ANALOG_RXDAC_LONG_SHIFT_ADDRESS 0x000003cc
+#define PHY_ANALOG_RXDAC_LONG_SHIFT_OFFSET 0x000003cc
+#define PHY_ANALOG_RXDAC_LONG_SHIFT_ATE_RXDAC_I_STATIC_MSB 4
+#define PHY_ANALOG_RXDAC_LONG_SHIFT_ATE_RXDAC_I_STATIC_LSB 0
+#define PHY_ANALOG_RXDAC_LONG_SHIFT_ATE_RXDAC_I_STATIC_MASK 0x0000001f
+#define PHY_ANALOG_RXDAC_LONG_SHIFT_ATE_RXDAC_I_STATIC_GET(x) (((x) & 0x0000001f) >> 0)
+#define PHY_ANALOG_RXDAC_LONG_SHIFT_ATE_RXDAC_I_STATIC_SET(x) (((x) << 0) & 0x0000001f)
+#define PHY_ANALOG_RXDAC_LONG_SHIFT_ATE_RXDAC_Q_STATIC_MSB 12
+#define PHY_ANALOG_RXDAC_LONG_SHIFT_ATE_RXDAC_Q_STATIC_LSB 8
+#define PHY_ANALOG_RXDAC_LONG_SHIFT_ATE_RXDAC_Q_STATIC_MASK 0x00001f00
+#define PHY_ANALOG_RXDAC_LONG_SHIFT_ATE_RXDAC_Q_STATIC_GET(x) (((x) & 0x00001f00) >> 8)
+#define PHY_ANALOG_RXDAC_LONG_SHIFT_ATE_RXDAC_Q_STATIC_SET(x) (((x) << 8) & 0x00001f00)
+
+/* macros for cmac_results_i */
+#define PHY_ANALOG_CMAC_RESULTS_I_ADDRESS 0x000003d0
+#define PHY_ANALOG_CMAC_RESULTS_I_OFFSET 0x000003d0
+#define PHY_ANALOG_CMAC_RESULTS_I_ATE_CMAC_RESULTS_MSB 31
+#define PHY_ANALOG_CMAC_RESULTS_I_ATE_CMAC_RESULTS_LSB 0
+#define PHY_ANALOG_CMAC_RESULTS_I_ATE_CMAC_RESULTS_MASK 0xffffffff
+#define PHY_ANALOG_CMAC_RESULTS_I_ATE_CMAC_RESULTS_GET(x) (((x) & 0xffffffff) >> 0)
+#define PHY_ANALOG_CMAC_RESULTS_I_ATE_CMAC_RESULTS_SET(x) (((x) << 0) & 0xffffffff)
+
+/* macros for cmac_results_q */
+#define PHY_ANALOG_CMAC_RESULTS_Q_ADDRESS 0x000003d4
+#define PHY_ANALOG_CMAC_RESULTS_Q_OFFSET 0x000003d4
+#define PHY_ANALOG_CMAC_RESULTS_Q_ATE_CMAC_RESULTS_MSB 31
+#define PHY_ANALOG_CMAC_RESULTS_Q_ATE_CMAC_RESULTS_LSB 0
+#define PHY_ANALOG_CMAC_RESULTS_Q_ATE_CMAC_RESULTS_MASK 0xffffffff
+#define PHY_ANALOG_CMAC_RESULTS_Q_ATE_CMAC_RESULTS_GET(x) (((x) & 0xffffffff) >> 0)
+#define PHY_ANALOG_CMAC_RESULTS_Q_ATE_CMAC_RESULTS_SET(x) (((x) << 0) & 0xffffffff)
+
+/* macros for PMU1 */
+#define PHY_ANALOG_PMU1_ADDRESS 0x00000740
+#define PHY_ANALOG_PMU1_OFFSET 0x00000740
+#define PHY_ANALOG_PMU1_SPARE_MSB 10
+#define PHY_ANALOG_PMU1_SPARE_LSB 0
+#define PHY_ANALOG_PMU1_SPARE_MASK 0x000007ff
+#define PHY_ANALOG_PMU1_SPARE_GET(x) (((x) & 0x000007ff) >> 0)
+#define PHY_ANALOG_PMU1_SPARE_SET(x) (((x) << 0) & 0x000007ff)
+#define PHY_ANALOG_PMU1_OTP_V25_PWD_MSB 11
+#define PHY_ANALOG_PMU1_OTP_V25_PWD_LSB 11
+#define PHY_ANALOG_PMU1_OTP_V25_PWD_MASK 0x00000800
+#define PHY_ANALOG_PMU1_OTP_V25_PWD_GET(x) (((x) & 0x00000800) >> 11)
+#define PHY_ANALOG_PMU1_OTP_V25_PWD_SET(x) (((x) << 11) & 0x00000800)
+#define PHY_ANALOG_PMU1_PAREGON_MAN_MSB 12
+#define PHY_ANALOG_PMU1_PAREGON_MAN_LSB 12
+#define PHY_ANALOG_PMU1_PAREGON_MAN_MASK 0x00001000
+#define PHY_ANALOG_PMU1_PAREGON_MAN_GET(x) (((x) & 0x00001000) >> 12)
+#define PHY_ANALOG_PMU1_PAREGON_MAN_SET(x) (((x) << 12) & 0x00001000)
+#define PHY_ANALOG_PMU1_OTPREGON_MAN_MSB 13
+#define PHY_ANALOG_PMU1_OTPREGON_MAN_LSB 13
+#define PHY_ANALOG_PMU1_OTPREGON_MAN_MASK 0x00002000
+#define PHY_ANALOG_PMU1_OTPREGON_MAN_GET(x) (((x) & 0x00002000) >> 13)
+#define PHY_ANALOG_PMU1_OTPREGON_MAN_SET(x) (((x) << 13) & 0x00002000)
+#define PHY_ANALOG_PMU1_DREGON_MAN_MSB 14
+#define PHY_ANALOG_PMU1_DREGON_MAN_LSB 14
+#define PHY_ANALOG_PMU1_DREGON_MAN_MASK 0x00004000
+#define PHY_ANALOG_PMU1_DREGON_MAN_GET(x) (((x) & 0x00004000) >> 14)
+#define PHY_ANALOG_PMU1_DREGON_MAN_SET(x) (((x) << 14) & 0x00004000)
+#define PHY_ANALOG_PMU1_DISCONTMODEEN_MSB 15
+#define PHY_ANALOG_PMU1_DISCONTMODEEN_LSB 15
+#define PHY_ANALOG_PMU1_DISCONTMODEEN_MASK 0x00008000
+#define PHY_ANALOG_PMU1_DISCONTMODEEN_GET(x) (((x) & 0x00008000) >> 15)
+#define PHY_ANALOG_PMU1_DISCONTMODEEN_SET(x) (((x) << 15) & 0x00008000)
+#define PHY_ANALOG_PMU1_SWREGON_MAN_MSB 16
+#define PHY_ANALOG_PMU1_SWREGON_MAN_LSB 16
+#define PHY_ANALOG_PMU1_SWREGON_MAN_MASK 0x00010000
+#define PHY_ANALOG_PMU1_SWREGON_MAN_GET(x) (((x) & 0x00010000) >> 16)
+#define PHY_ANALOG_PMU1_SWREGON_MAN_SET(x) (((x) << 16) & 0x00010000)
+#define PHY_ANALOG_PMU1_SWREG_FREQCUR_MSB 18
+#define PHY_ANALOG_PMU1_SWREG_FREQCUR_LSB 17
+#define PHY_ANALOG_PMU1_SWREG_FREQCUR_MASK 0x00060000
+#define PHY_ANALOG_PMU1_SWREG_FREQCUR_GET(x) (((x) & 0x00060000) >> 17)
+#define PHY_ANALOG_PMU1_SWREG_FREQCUR_SET(x) (((x) << 17) & 0x00060000)
+#define PHY_ANALOG_PMU1_SWREG_FREQCAP_MSB 21
+#define PHY_ANALOG_PMU1_SWREG_FREQCAP_LSB 19
+#define PHY_ANALOG_PMU1_SWREG_FREQCAP_MASK 0x00380000
+#define PHY_ANALOG_PMU1_SWREG_FREQCAP_GET(x) (((x) & 0x00380000) >> 19)
+#define PHY_ANALOG_PMU1_SWREG_FREQCAP_SET(x) (((x) << 19) & 0x00380000)
+#define PHY_ANALOG_PMU1_SWREG_LVLCTR_MSB 23
+#define PHY_ANALOG_PMU1_SWREG_LVLCTR_LSB 22
+#define PHY_ANALOG_PMU1_SWREG_LVLCTR_MASK 0x00c00000
+#define PHY_ANALOG_PMU1_SWREG_LVLCTR_GET(x) (((x) & 0x00c00000) >> 22)
+#define PHY_ANALOG_PMU1_SWREG_LVLCTR_SET(x) (((x) << 22) & 0x00c00000)
+#define PHY_ANALOG_PMU1_SREG_LVLCTR_MSB 25
+#define PHY_ANALOG_PMU1_SREG_LVLCTR_LSB 24
+#define PHY_ANALOG_PMU1_SREG_LVLCTR_MASK 0x03000000
+#define PHY_ANALOG_PMU1_SREG_LVLCTR_GET(x) (((x) & 0x03000000) >> 24)
+#define PHY_ANALOG_PMU1_SREG_LVLCTR_SET(x) (((x) << 24) & 0x03000000)
+#define PHY_ANALOG_PMU1_DREG_LVLCTR_MSB 27
+#define PHY_ANALOG_PMU1_DREG_LVLCTR_LSB 26
+#define PHY_ANALOG_PMU1_DREG_LVLCTR_MASK 0x0c000000
+#define PHY_ANALOG_PMU1_DREG_LVLCTR_GET(x) (((x) & 0x0c000000) >> 26)
+#define PHY_ANALOG_PMU1_DREG_LVLCTR_SET(x) (((x) << 26) & 0x0c000000)
+#define PHY_ANALOG_PMU1_PAREG_XPNP_MSB 28
+#define PHY_ANALOG_PMU1_PAREG_XPNP_LSB 28
+#define PHY_ANALOG_PMU1_PAREG_XPNP_MASK 0x10000000
+#define PHY_ANALOG_PMU1_PAREG_XPNP_GET(x) (((x) & 0x10000000) >> 28)
+#define PHY_ANALOG_PMU1_PAREG_XPNP_SET(x) (((x) << 28) & 0x10000000)
+#define PHY_ANALOG_PMU1_PAREG_LVLCTR_MSB 31
+#define PHY_ANALOG_PMU1_PAREG_LVLCTR_LSB 29
+#define PHY_ANALOG_PMU1_PAREG_LVLCTR_MASK 0xe0000000
+#define PHY_ANALOG_PMU1_PAREG_LVLCTR_GET(x) (((x) & 0xe0000000) >> 29)
+#define PHY_ANALOG_PMU1_PAREG_LVLCTR_SET(x) (((x) << 29) & 0xe0000000)
+
+/* macros for PMU2 */
+#define PHY_ANALOG_PMU2_ADDRESS 0x00000744
+#define PHY_ANALOG_PMU2_OFFSET 0x00000744
+#define PHY_ANALOG_PMU2_SPARE_MSB 7
+#define PHY_ANALOG_PMU2_SPARE_LSB 0
+#define PHY_ANALOG_PMU2_SPARE_MASK 0x000000ff
+#define PHY_ANALOG_PMU2_SPARE_GET(x) (((x) & 0x000000ff) >> 0)
+#define PHY_ANALOG_PMU2_SPARE_SET(x) (((x) << 0) & 0x000000ff)
+#define PHY_ANALOG_PMU2_VBATT_1_3TOATB_MSB 8
+#define PHY_ANALOG_PMU2_VBATT_1_3TOATB_LSB 8
+#define PHY_ANALOG_PMU2_VBATT_1_3TOATB_MASK 0x00000100
+#define PHY_ANALOG_PMU2_VBATT_1_3TOATB_GET(x) (((x) & 0x00000100) >> 8)
+#define PHY_ANALOG_PMU2_VBATT_1_3TOATB_SET(x) (((x) << 8) & 0x00000100)
+#define PHY_ANALOG_PMU2_VBATT_1_2TOATB_MSB 9
+#define PHY_ANALOG_PMU2_VBATT_1_2TOATB_LSB 9
+#define PHY_ANALOG_PMU2_VBATT_1_2TOATB_MASK 0x00000200
+#define PHY_ANALOG_PMU2_VBATT_1_2TOATB_GET(x) (((x) & 0x00000200) >> 9)
+#define PHY_ANALOG_PMU2_VBATT_1_2TOATB_SET(x) (((x) << 9) & 0x00000200)
+#define PHY_ANALOG_PMU2_VBATT_2_3TOATB_MSB 10
+#define PHY_ANALOG_PMU2_VBATT_2_3TOATB_LSB 10
+#define PHY_ANALOG_PMU2_VBATT_2_3TOATB_MASK 0x00000400
+#define PHY_ANALOG_PMU2_VBATT_2_3TOATB_GET(x) (((x) & 0x00000400) >> 10)
+#define PHY_ANALOG_PMU2_VBATT_2_3TOATB_SET(x) (((x) << 10) & 0x00000400)
+#define PHY_ANALOG_PMU2_PWD_BANDGAP_MAN_MSB 11
+#define PHY_ANALOG_PMU2_PWD_BANDGAP_MAN_LSB 11
+#define PHY_ANALOG_PMU2_PWD_BANDGAP_MAN_MASK 0x00000800
+#define PHY_ANALOG_PMU2_PWD_BANDGAP_MAN_GET(x) (((x) & 0x00000800) >> 11)
+#define PHY_ANALOG_PMU2_PWD_BANDGAP_MAN_SET(x) (((x) << 11) & 0x00000800)
+#define PHY_ANALOG_PMU2_PWD_LFO_MAN_MSB 12
+#define PHY_ANALOG_PMU2_PWD_LFO_MAN_LSB 12
+#define PHY_ANALOG_PMU2_PWD_LFO_MAN_MASK 0x00001000
+#define PHY_ANALOG_PMU2_PWD_LFO_MAN_GET(x) (((x) & 0x00001000) >> 12)
+#define PHY_ANALOG_PMU2_PWD_LFO_MAN_SET(x) (((x) << 12) & 0x00001000)
+#define PHY_ANALOG_PMU2_VBATT_LT_3P2_MSB 13
+#define PHY_ANALOG_PMU2_VBATT_LT_3P2_LSB 13
+#define PHY_ANALOG_PMU2_VBATT_LT_3P2_MASK 0x00002000
+#define PHY_ANALOG_PMU2_VBATT_LT_3P2_GET(x) (((x) & 0x00002000) >> 13)
+#define PHY_ANALOG_PMU2_VBATT_LT_3P2_SET(x) (((x) << 13) & 0x00002000)
+#define PHY_ANALOG_PMU2_VBATT_LT_2P8_MSB 14
+#define PHY_ANALOG_PMU2_VBATT_LT_2P8_LSB 14
+#define PHY_ANALOG_PMU2_VBATT_LT_2P8_MASK 0x00004000
+#define PHY_ANALOG_PMU2_VBATT_LT_2P8_GET(x) (((x) & 0x00004000) >> 14)
+#define PHY_ANALOG_PMU2_VBATT_LT_2P8_SET(x) (((x) << 14) & 0x00004000)
+#define PHY_ANALOG_PMU2_VBATT_GT_4P2_MSB 15
+#define PHY_ANALOG_PMU2_VBATT_GT_4P2_LSB 15
+#define PHY_ANALOG_PMU2_VBATT_GT_4P2_MASK 0x00008000
+#define PHY_ANALOG_PMU2_VBATT_GT_4P2_GET(x) (((x) & 0x00008000) >> 15)
+#define PHY_ANALOG_PMU2_VBATT_GT_4P2_SET(x) (((x) << 15) & 0x00008000)
+#define PHY_ANALOG_PMU2_PMU_MAN_OVERRIDE_EN_MSB 16
+#define PHY_ANALOG_PMU2_PMU_MAN_OVERRIDE_EN_LSB 16
+#define PHY_ANALOG_PMU2_PMU_MAN_OVERRIDE_EN_MASK 0x00010000
+#define PHY_ANALOG_PMU2_PMU_MAN_OVERRIDE_EN_GET(x) (((x) & 0x00010000) >> 16)
+#define PHY_ANALOG_PMU2_PMU_MAN_OVERRIDE_EN_SET(x) (((x) << 16) & 0x00010000)
+#define PHY_ANALOG_PMU2_VBATT_GT_LVLCTR_MSB 18
+#define PHY_ANALOG_PMU2_VBATT_GT_LVLCTR_LSB 17
+#define PHY_ANALOG_PMU2_VBATT_GT_LVLCTR_MASK 0x00060000
+#define PHY_ANALOG_PMU2_VBATT_GT_LVLCTR_GET(x) (((x) & 0x00060000) >> 17)
+#define PHY_ANALOG_PMU2_VBATT_GT_LVLCTR_SET(x) (((x) << 17) & 0x00060000)
+#define PHY_ANALOG_PMU2_SWREGVSSL2ATB_MSB 19
+#define PHY_ANALOG_PMU2_SWREGVSSL2ATB_LSB 19
+#define PHY_ANALOG_PMU2_SWREGVSSL2ATB_MASK 0x00080000
+#define PHY_ANALOG_PMU2_SWREGVSSL2ATB_GET(x) (((x) & 0x00080000) >> 19)
+#define PHY_ANALOG_PMU2_SWREGVSSL2ATB_SET(x) (((x) << 19) & 0x00080000)
+#define PHY_ANALOG_PMU2_SWREGVSSL_LVLCTR_MSB 21
+#define PHY_ANALOG_PMU2_SWREGVSSL_LVLCTR_LSB 20
+#define PHY_ANALOG_PMU2_SWREGVSSL_LVLCTR_MASK 0x00300000
+#define PHY_ANALOG_PMU2_SWREGVSSL_LVLCTR_GET(x) (((x) & 0x00300000) >> 20)
+#define PHY_ANALOG_PMU2_SWREGVSSL_LVLCTR_SET(x) (((x) << 20) & 0x00300000)
+#define PHY_ANALOG_PMU2_SWREGVDDH2ATB_MSB 22
+#define PHY_ANALOG_PMU2_SWREGVDDH2ATB_LSB 22
+#define PHY_ANALOG_PMU2_SWREGVDDH2ATB_MASK 0x00400000
+#define PHY_ANALOG_PMU2_SWREGVDDH2ATB_GET(x) (((x) & 0x00400000) >> 22)
+#define PHY_ANALOG_PMU2_SWREGVDDH2ATB_SET(x) (((x) << 22) & 0x00400000)
+#define PHY_ANALOG_PMU2_SWREGVDDH_LVLCTR_MSB 24
+#define PHY_ANALOG_PMU2_SWREGVDDH_LVLCTR_LSB 23
+#define PHY_ANALOG_PMU2_SWREGVDDH_LVLCTR_MASK 0x01800000
+#define PHY_ANALOG_PMU2_SWREGVDDH_LVLCTR_GET(x) (((x) & 0x01800000) >> 23)
+#define PHY_ANALOG_PMU2_SWREGVDDH_LVLCTR_SET(x) (((x) << 23) & 0x01800000)
+#define PHY_ANALOG_PMU2_SWREG2ATB_MSB 27
+#define PHY_ANALOG_PMU2_SWREG2ATB_LSB 25
+#define PHY_ANALOG_PMU2_SWREG2ATB_MASK 0x0e000000
+#define PHY_ANALOG_PMU2_SWREG2ATB_GET(x) (((x) & 0x0e000000) >> 25)
+#define PHY_ANALOG_PMU2_SWREG2ATB_SET(x) (((x) << 25) & 0x0e000000)
+#define PHY_ANALOG_PMU2_OTPREG2ATB_MSB 28
+#define PHY_ANALOG_PMU2_OTPREG2ATB_LSB 28
+#define PHY_ANALOG_PMU2_OTPREG2ATB_MASK 0x10000000
+#define PHY_ANALOG_PMU2_OTPREG2ATB_GET(x) (((x) & 0x10000000) >> 28)
+#define PHY_ANALOG_PMU2_OTPREG2ATB_SET(x) (((x) << 28) & 0x10000000)
+#define PHY_ANALOG_PMU2_OTPREG_LVLCTR_MSB 30
+#define PHY_ANALOG_PMU2_OTPREG_LVLCTR_LSB 29
+#define PHY_ANALOG_PMU2_OTPREG_LVLCTR_MASK 0x60000000
+#define PHY_ANALOG_PMU2_OTPREG_LVLCTR_GET(x) (((x) & 0x60000000) >> 29)
+#define PHY_ANALOG_PMU2_OTPREG_LVLCTR_SET(x) (((x) << 29) & 0x60000000)
+#define PHY_ANALOG_PMU2_DREG_LVLCTR_MANOVR_EN_MSB 31
+#define PHY_ANALOG_PMU2_DREG_LVLCTR_MANOVR_EN_LSB 31
+#define PHY_ANALOG_PMU2_DREG_LVLCTR_MANOVR_EN_MASK 0x80000000
+#define PHY_ANALOG_PMU2_DREG_LVLCTR_MANOVR_EN_GET(x) (((x) & 0x80000000) >> 31)
+#define PHY_ANALOG_PMU2_DREG_LVLCTR_MANOVR_EN_SET(x) (((x) << 31) & 0x80000000)
+
+
+#ifndef __ASSEMBLER__
+
+typedef struct analog_intf_athr_wlan_reg_reg_s {
+ volatile unsigned int RXRF_BIAS1; /* 0x0 - 0x4 */
+ volatile unsigned int RXRF_BIAS2; /* 0x4 - 0x8 */
+ volatile unsigned int RXRF_GAINSTAGES; /* 0x8 - 0xc */
+ volatile unsigned int RXRF_AGC; /* 0xc - 0x10 */
+ volatile char pad__0[0x30]; /* 0x10 - 0x40 */
+ volatile unsigned int TXRF1; /* 0x40 - 0x44 */
+ volatile unsigned int TXRF2; /* 0x44 - 0x48 */
+ volatile unsigned int TXRF3; /* 0x48 - 0x4c */
+ volatile unsigned int TXRF4; /* 0x4c - 0x50 */
+ volatile unsigned int TXRF5; /* 0x50 - 0x54 */
+ volatile unsigned int TXRF6; /* 0x54 - 0x58 */
+ volatile unsigned int TXRF7; /* 0x58 - 0x5c */
+ volatile unsigned int TXRF8; /* 0x5c - 0x60 */
+ volatile unsigned int TXRF9; /* 0x60 - 0x64 */
+ volatile unsigned int TXRF10; /* 0x64 - 0x68 */
+ volatile unsigned int TXRF11; /* 0x68 - 0x6c */
+ volatile unsigned int TXRF12; /* 0x6c - 0x70 */
+ volatile char pad__1[0x10]; /* 0x70 - 0x80 */
+ volatile unsigned int SYNTH1; /* 0x80 - 0x84 */
+ volatile unsigned int SYNTH2; /* 0x84 - 0x88 */
+ volatile unsigned int SYNTH3; /* 0x88 - 0x8c */
+ volatile unsigned int SYNTH4; /* 0x8c - 0x90 */
+ volatile unsigned int SYNTH5; /* 0x90 - 0x94 */
+ volatile unsigned int SYNTH6; /* 0x94 - 0x98 */
+ volatile unsigned int SYNTH7; /* 0x98 - 0x9c */
+ volatile unsigned int SYNTH8; /* 0x9c - 0xa0 */
+ volatile unsigned int SYNTH9; /* 0xa0 - 0xa4 */
+ volatile unsigned int SYNTH10; /* 0xa4 - 0xa8 */
+ volatile unsigned int SYNTH11; /* 0xa8 - 0xac */
+ volatile unsigned int SYNTH12; /* 0xac - 0xb0 */
+ volatile unsigned int SYNTH13; /* 0xb0 - 0xb4 */
+ volatile unsigned int SYNTH14; /* 0xb4 - 0xb8 */
+ volatile char pad__2[0x8]; /* 0xb8 - 0xc0 */
+ volatile unsigned int BIAS1; /* 0xc0 - 0xc4 */
+ volatile unsigned int BIAS2; /* 0xc4 - 0xc8 */
+ volatile unsigned int BIAS3; /* 0xc8 - 0xcc */
+ volatile unsigned int BIAS4; /* 0xcc - 0xd0 */
+ volatile char pad__3[0x30]; /* 0xd0 - 0x100 */
+ volatile unsigned int RXTX1; /* 0x100 - 0x104 */
+ volatile unsigned int RXTX2; /* 0x104 - 0x108 */
+ volatile unsigned int RXTX3; /* 0x108 - 0x10c */
+ volatile char pad__4[0x34]; /* 0x10c - 0x140 */
+ volatile unsigned int BB1; /* 0x140 - 0x144 */
+ volatile unsigned int BB2; /* 0x144 - 0x148 */
+ volatile unsigned int BB3; /* 0x148 - 0x14c */
+ volatile char pad__5[0x134]; /* 0x14c - 0x280 */
+ volatile unsigned int PLLCLKMODA; /* 0x280 - 0x284 */
+ volatile unsigned int PLLCLKMODA2; /* 0x284 - 0x288 */
+ volatile unsigned int TOP; /* 0x288 - 0x28c */
+ volatile unsigned int THERM; /* 0x28c - 0x290 */
+ volatile unsigned int XTAL; /* 0x290 - 0x294 */
+ volatile char pad__6[0xec]; /* 0x294 - 0x380 */
+ volatile unsigned int rbist_cntrl; /* 0x380 - 0x384 */
+ volatile unsigned int tx_dc_offset; /* 0x384 - 0x388 */
+ volatile unsigned int tx_tonegen0; /* 0x388 - 0x38c */
+ volatile unsigned int tx_tonegen1; /* 0x38c - 0x390 */
+ volatile unsigned int tx_lftonegen0; /* 0x390 - 0x394 */
+ volatile unsigned int tx_linear_ramp_i; /* 0x394 - 0x398 */
+ volatile unsigned int tx_linear_ramp_q; /* 0x398 - 0x39c */
+ volatile unsigned int tx_prbs_mag; /* 0x39c - 0x3a0 */
+ volatile unsigned int tx_prbs_seed_i; /* 0x3a0 - 0x3a4 */
+ volatile unsigned int tx_prbs_seed_q; /* 0x3a4 - 0x3a8 */
+ volatile unsigned int cmac_dc_cancel; /* 0x3a8 - 0x3ac */
+ volatile unsigned int cmac_dc_offset; /* 0x3ac - 0x3b0 */
+ volatile unsigned int cmac_corr; /* 0x3b0 - 0x3b4 */
+ volatile unsigned int cmac_power; /* 0x3b4 - 0x3b8 */
+ volatile unsigned int cmac_cross_corr; /* 0x3b8 - 0x3bc */
+ volatile unsigned int cmac_i2q2; /* 0x3bc - 0x3c0 */
+ volatile unsigned int cmac_power_hpf; /* 0x3c0 - 0x3c4 */
+ volatile unsigned int rxdac_set1; /* 0x3c4 - 0x3c8 */
+ volatile unsigned int rxdac_set2; /* 0x3c8 - 0x3cc */
+ volatile unsigned int rxdac_long_shift; /* 0x3cc - 0x3d0 */
+ volatile unsigned int cmac_results_i; /* 0x3d0 - 0x3d4 */
+ volatile unsigned int cmac_results_q; /* 0x3d4 - 0x3d8 */
+ volatile char pad__7[0x368]; /* 0x3d8 - 0x740 */
+ volatile unsigned int PMU1; /* 0x740 - 0x744 */
+ volatile unsigned int PMU2; /* 0x744 - 0x748 */
+} analog_intf_athr_wlan_reg_reg_t;
+
+#endif /* __ASSEMBLER__ */
+
+#endif /* _ANALOG_INTF_ATHR_WLAN_REG_REG_H_ */
diff --git a/drivers/staging/ath6kl/include/common/AR6002/hw4.0/hw/analog_intf_reg.h b/drivers/staging/ath6kl/include/common/AR6002/hw4.0/hw/analog_intf_reg.h
new file mode 100644
index 000000000000..01b9eb54a43c
--- /dev/null
+++ b/drivers/staging/ath6kl/include/common/AR6002/hw4.0/hw/analog_intf_reg.h
@@ -0,0 +1,37 @@
+// ------------------------------------------------------------------
+// Copyright (c) 2004-2010 Atheros Corporation. All rights reserved.
+//
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+//
+//
+// ------------------------------------------------------------------
+//===================================================================
+// Author(s): ="Atheros"
+//===================================================================
+
+
+#ifdef WLAN_HEADERS
+
+#include "analog_intf_athr_wlan_reg.h"
+
+
+#ifndef BT_HEADERS
+
+
+
+#endif
+#endif
+
+
+
diff --git a/drivers/staging/ath6kl/include/common/AR6002/hw4.0/hw/apb_athr_wlan_map.h b/drivers/staging/ath6kl/include/common/AR6002/hw4.0/hw/apb_athr_wlan_map.h
new file mode 100644
index 000000000000..609eb9841f59
--- /dev/null
+++ b/drivers/staging/ath6kl/include/common/AR6002/hw4.0/hw/apb_athr_wlan_map.h
@@ -0,0 +1,40 @@
+// ------------------------------------------------------------------
+// Copyright (c) 2004-2010 Atheros Corporation. All rights reserved.
+//
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+//
+//
+// ------------------------------------------------------------------
+//===================================================================
+// Author(s): ="Atheros"
+//===================================================================
+
+
+#ifndef _APB_ATHR_WLAN_MAP_H_
+#define _APB_ATHR_WLAN_MAP_H_
+
+#define WLAN_RTC_BASE_ADDRESS 0x00004000
+#define WLAN_VMC_BASE_ADDRESS 0x00008000
+#define WLAN_UART_BASE_ADDRESS 0x0000c000
+#define WLAN_DBG_UART_BASE_ADDRESS 0x0000d000
+#define WLAN_UMBOX_BASE_ADDRESS 0x0000e000
+#define WLAN_SI_BASE_ADDRESS 0x00010000
+#define WLAN_GPIO_BASE_ADDRESS 0x00014000
+#define WLAN_MBOX_BASE_ADDRESS 0x00018000
+#define WLAN_ANALOG_INTF_BASE_ADDRESS 0x0001c000
+#define WLAN_MAC_BASE_ADDRESS 0x00020000
+#define WLAN_RDMA_BASE_ADDRESS 0x00030100
+#define EFUSE_BASE_ADDRESS 0x00031000
+
+#endif /* _APB_ATHR_WLAN_MAP_REG_H_ */
diff --git a/drivers/staging/ath6kl/include/common/AR6002/hw4.0/hw/apb_map.h b/drivers/staging/ath6kl/include/common/AR6002/hw4.0/hw/apb_map.h
new file mode 100644
index 000000000000..e4d2d62f0bb4
--- /dev/null
+++ b/drivers/staging/ath6kl/include/common/AR6002/hw4.0/hw/apb_map.h
@@ -0,0 +1,48 @@
+// ------------------------------------------------------------------
+// Copyright (c) 2004-2010 Atheros Corporation. All rights reserved.
+//
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+//
+//
+// ------------------------------------------------------------------
+//===================================================================
+// Author(s): ="Atheros"
+//===================================================================
+
+
+#ifdef WLAN_HEADERS
+
+#include "apb_athr_wlan_map.h"
+
+
+#ifndef BT_HEADERS
+
+#define RTC_BASE_ADDRESS WLAN_RTC_BASE_ADDRESS
+#define VMC_BASE_ADDRESS WLAN_VMC_BASE_ADDRESS
+#define UART_BASE_ADDRESS WLAN_UART_BASE_ADDRESS
+#define DBG_UART_BASE_ADDRESS WLAN_DBG_UART_BASE_ADDRESS
+#define UMBOX_BASE_ADDRESS WLAN_UMBOX_BASE_ADDRESS
+#define SI_BASE_ADDRESS WLAN_SI_BASE_ADDRESS
+#define GPIO_BASE_ADDRESS WLAN_GPIO_BASE_ADDRESS
+#define MBOX_BASE_ADDRESS WLAN_MBOX_BASE_ADDRESS
+#define ANALOG_INTF_BASE_ADDRESS WLAN_ANALOG_INTF_BASE_ADDRESS
+#define MAC_BASE_ADDRESS WLAN_MAC_BASE_ADDRESS
+#define RDMA_BASE_ADDRESS WLAN_RDMA_BASE_ADDRESS
+
+
+#endif
+#endif
+
+
+
diff --git a/drivers/staging/ath6kl/include/common/AR6002/hw4.0/hw/bb_lc_reg.h b/drivers/staging/ath6kl/include/common/AR6002/hw4.0/hw/bb_lc_reg.h
new file mode 100644
index 000000000000..271192953162
--- /dev/null
+++ b/drivers/staging/ath6kl/include/common/AR6002/hw4.0/hw/bb_lc_reg.h
@@ -0,0 +1,7076 @@
+// ------------------------------------------------------------------
+// Copyright (c) 2004-2010 Atheros Corporation. All rights reserved.
+//
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+//
+//
+// ------------------------------------------------------------------
+//===================================================================
+// Author(s): ="Atheros"
+//===================================================================
+
+/* Copyright (C) 2009 Denali Software Inc. All rights reserved */
+/* THIS FILE IS AUTOMATICALLY GENERATED BY DENALI BLUEPRINT, DO NOT EDIT */
+
+
+#ifndef _BB_LC_REG_REG_H_
+#define _BB_LC_REG_REG_H_
+
+
+/* macros for BB_test_controls */
+#define PHY_BB_TEST_CONTROLS_ADDRESS 0x00009800
+#define PHY_BB_TEST_CONTROLS_OFFSET 0x00009800
+#define PHY_BB_TEST_CONTROLS_CF_TSTTRIG_SEL_MSB 3
+#define PHY_BB_TEST_CONTROLS_CF_TSTTRIG_SEL_LSB 0
+#define PHY_BB_TEST_CONTROLS_CF_TSTTRIG_SEL_MASK 0x0000000f
+#define PHY_BB_TEST_CONTROLS_CF_TSTTRIG_SEL_GET(x) (((x) & 0x0000000f) >> 0)
+#define PHY_BB_TEST_CONTROLS_CF_TSTTRIG_SEL_SET(x) (((x) << 0) & 0x0000000f)
+#define PHY_BB_TEST_CONTROLS_CF_TSTTRIG_MSB 4
+#define PHY_BB_TEST_CONTROLS_CF_TSTTRIG_LSB 4
+#define PHY_BB_TEST_CONTROLS_CF_TSTTRIG_MASK 0x00000010
+#define PHY_BB_TEST_CONTROLS_CF_TSTTRIG_GET(x) (((x) & 0x00000010) >> 4)
+#define PHY_BB_TEST_CONTROLS_CF_TSTTRIG_SET(x) (((x) << 4) & 0x00000010)
+#define PHY_BB_TEST_CONTROLS_CF_RFSHIFT_SEL_MSB 6
+#define PHY_BB_TEST_CONTROLS_CF_RFSHIFT_SEL_LSB 5
+#define PHY_BB_TEST_CONTROLS_CF_RFSHIFT_SEL_MASK 0x00000060
+#define PHY_BB_TEST_CONTROLS_CF_RFSHIFT_SEL_GET(x) (((x) & 0x00000060) >> 5)
+#define PHY_BB_TEST_CONTROLS_CF_RFSHIFT_SEL_SET(x) (((x) << 5) & 0x00000060)
+#define PHY_BB_TEST_CONTROLS_CARDBUS_MODE_MSB 9
+#define PHY_BB_TEST_CONTROLS_CARDBUS_MODE_LSB 8
+#define PHY_BB_TEST_CONTROLS_CARDBUS_MODE_MASK 0x00000300
+#define PHY_BB_TEST_CONTROLS_CARDBUS_MODE_GET(x) (((x) & 0x00000300) >> 8)
+#define PHY_BB_TEST_CONTROLS_CARDBUS_MODE_SET(x) (((x) << 8) & 0x00000300)
+#define PHY_BB_TEST_CONTROLS_CLKOUT_IS_CLK32_MSB 10
+#define PHY_BB_TEST_CONTROLS_CLKOUT_IS_CLK32_LSB 10
+#define PHY_BB_TEST_CONTROLS_CLKOUT_IS_CLK32_MASK 0x00000400
+#define PHY_BB_TEST_CONTROLS_CLKOUT_IS_CLK32_GET(x) (((x) & 0x00000400) >> 10)
+#define PHY_BB_TEST_CONTROLS_CLKOUT_IS_CLK32_SET(x) (((x) << 10) & 0x00000400)
+#define PHY_BB_TEST_CONTROLS_ENABLE_RFSILENT_BB_MSB 13
+#define PHY_BB_TEST_CONTROLS_ENABLE_RFSILENT_BB_LSB 13
+#define PHY_BB_TEST_CONTROLS_ENABLE_RFSILENT_BB_MASK 0x00002000
+#define PHY_BB_TEST_CONTROLS_ENABLE_RFSILENT_BB_GET(x) (((x) & 0x00002000) >> 13)
+#define PHY_BB_TEST_CONTROLS_ENABLE_RFSILENT_BB_SET(x) (((x) << 13) & 0x00002000)
+#define PHY_BB_TEST_CONTROLS_ENABLE_MINI_OBS_MSB 15
+#define PHY_BB_TEST_CONTROLS_ENABLE_MINI_OBS_LSB 15
+#define PHY_BB_TEST_CONTROLS_ENABLE_MINI_OBS_MASK 0x00008000
+#define PHY_BB_TEST_CONTROLS_ENABLE_MINI_OBS_GET(x) (((x) & 0x00008000) >> 15)
+#define PHY_BB_TEST_CONTROLS_ENABLE_MINI_OBS_SET(x) (((x) << 15) & 0x00008000)
+#define PHY_BB_TEST_CONTROLS_SLOW_CLK160_MSB 17
+#define PHY_BB_TEST_CONTROLS_SLOW_CLK160_LSB 17
+#define PHY_BB_TEST_CONTROLS_SLOW_CLK160_MASK 0x00020000
+#define PHY_BB_TEST_CONTROLS_SLOW_CLK160_GET(x) (((x) & 0x00020000) >> 17)
+#define PHY_BB_TEST_CONTROLS_SLOW_CLK160_SET(x) (((x) << 17) & 0x00020000)
+#define PHY_BB_TEST_CONTROLS_AGC_OBS_SEL_3_MSB 18
+#define PHY_BB_TEST_CONTROLS_AGC_OBS_SEL_3_LSB 18
+#define PHY_BB_TEST_CONTROLS_AGC_OBS_SEL_3_MASK 0x00040000
+#define PHY_BB_TEST_CONTROLS_AGC_OBS_SEL_3_GET(x) (((x) & 0x00040000) >> 18)
+#define PHY_BB_TEST_CONTROLS_AGC_OBS_SEL_3_SET(x) (((x) << 18) & 0x00040000)
+#define PHY_BB_TEST_CONTROLS_CF_BBB_OBS_SEL_MSB 22
+#define PHY_BB_TEST_CONTROLS_CF_BBB_OBS_SEL_LSB 19
+#define PHY_BB_TEST_CONTROLS_CF_BBB_OBS_SEL_MASK 0x00780000
+#define PHY_BB_TEST_CONTROLS_CF_BBB_OBS_SEL_GET(x) (((x) & 0x00780000) >> 19)
+#define PHY_BB_TEST_CONTROLS_CF_BBB_OBS_SEL_SET(x) (((x) << 19) & 0x00780000)
+#define PHY_BB_TEST_CONTROLS_RX_OBS_SEL_5TH_BIT_MSB 23
+#define PHY_BB_TEST_CONTROLS_RX_OBS_SEL_5TH_BIT_LSB 23
+#define PHY_BB_TEST_CONTROLS_RX_OBS_SEL_5TH_BIT_MASK 0x00800000
+#define PHY_BB_TEST_CONTROLS_RX_OBS_SEL_5TH_BIT_GET(x) (((x) & 0x00800000) >> 23)
+#define PHY_BB_TEST_CONTROLS_RX_OBS_SEL_5TH_BIT_SET(x) (((x) << 23) & 0x00800000)
+#define PHY_BB_TEST_CONTROLS_AGC_OBS_SEL_4_MSB 24
+#define PHY_BB_TEST_CONTROLS_AGC_OBS_SEL_4_LSB 24
+#define PHY_BB_TEST_CONTROLS_AGC_OBS_SEL_4_MASK 0x01000000
+#define PHY_BB_TEST_CONTROLS_AGC_OBS_SEL_4_GET(x) (((x) & 0x01000000) >> 24)
+#define PHY_BB_TEST_CONTROLS_AGC_OBS_SEL_4_SET(x) (((x) << 24) & 0x01000000)
+#define PHY_BB_TEST_CONTROLS_FORCE_AGC_CLEAR_MSB 28
+#define PHY_BB_TEST_CONTROLS_FORCE_AGC_CLEAR_LSB 28
+#define PHY_BB_TEST_CONTROLS_FORCE_AGC_CLEAR_MASK 0x10000000
+#define PHY_BB_TEST_CONTROLS_FORCE_AGC_CLEAR_GET(x) (((x) & 0x10000000) >> 28)
+#define PHY_BB_TEST_CONTROLS_FORCE_AGC_CLEAR_SET(x) (((x) << 28) & 0x10000000)
+#define PHY_BB_TEST_CONTROLS_TSTDAC_OUT_SEL_MSB 31
+#define PHY_BB_TEST_CONTROLS_TSTDAC_OUT_SEL_LSB 30
+#define PHY_BB_TEST_CONTROLS_TSTDAC_OUT_SEL_MASK 0xc0000000
+#define PHY_BB_TEST_CONTROLS_TSTDAC_OUT_SEL_GET(x) (((x) & 0xc0000000) >> 30)
+#define PHY_BB_TEST_CONTROLS_TSTDAC_OUT_SEL_SET(x) (((x) << 30) & 0xc0000000)
+
+/* macros for BB_gen_controls */
+#define PHY_BB_GEN_CONTROLS_ADDRESS 0x00009804
+#define PHY_BB_GEN_CONTROLS_OFFSET 0x00009804
+#define PHY_BB_GEN_CONTROLS_TURBO_MSB 0
+#define PHY_BB_GEN_CONTROLS_TURBO_LSB 0
+#define PHY_BB_GEN_CONTROLS_TURBO_MASK 0x00000001
+#define PHY_BB_GEN_CONTROLS_TURBO_GET(x) (((x) & 0x00000001) >> 0)
+#define PHY_BB_GEN_CONTROLS_TURBO_SET(x) (((x) << 0) & 0x00000001)
+#define PHY_BB_GEN_CONTROLS_CF_SHORT20_MSB 1
+#define PHY_BB_GEN_CONTROLS_CF_SHORT20_LSB 1
+#define PHY_BB_GEN_CONTROLS_CF_SHORT20_MASK 0x00000002
+#define PHY_BB_GEN_CONTROLS_CF_SHORT20_GET(x) (((x) & 0x00000002) >> 1)
+#define PHY_BB_GEN_CONTROLS_CF_SHORT20_SET(x) (((x) << 1) & 0x00000002)
+#define PHY_BB_GEN_CONTROLS_DYN_20_40_MSB 2
+#define PHY_BB_GEN_CONTROLS_DYN_20_40_LSB 2
+#define PHY_BB_GEN_CONTROLS_DYN_20_40_MASK 0x00000004
+#define PHY_BB_GEN_CONTROLS_DYN_20_40_GET(x) (((x) & 0x00000004) >> 2)
+#define PHY_BB_GEN_CONTROLS_DYN_20_40_SET(x) (((x) << 2) & 0x00000004)
+#define PHY_BB_GEN_CONTROLS_DYN_20_40_PRI_ONLY_MSB 3
+#define PHY_BB_GEN_CONTROLS_DYN_20_40_PRI_ONLY_LSB 3
+#define PHY_BB_GEN_CONTROLS_DYN_20_40_PRI_ONLY_MASK 0x00000008
+#define PHY_BB_GEN_CONTROLS_DYN_20_40_PRI_ONLY_GET(x) (((x) & 0x00000008) >> 3)
+#define PHY_BB_GEN_CONTROLS_DYN_20_40_PRI_ONLY_SET(x) (((x) << 3) & 0x00000008)
+#define PHY_BB_GEN_CONTROLS_DYN_20_40_PRI_CHN_MSB 4
+#define PHY_BB_GEN_CONTROLS_DYN_20_40_PRI_CHN_LSB 4
+#define PHY_BB_GEN_CONTROLS_DYN_20_40_PRI_CHN_MASK 0x00000010
+#define PHY_BB_GEN_CONTROLS_DYN_20_40_PRI_CHN_GET(x) (((x) & 0x00000010) >> 4)
+#define PHY_BB_GEN_CONTROLS_DYN_20_40_PRI_CHN_SET(x) (((x) << 4) & 0x00000010)
+#define PHY_BB_GEN_CONTROLS_DYN_20_40_EXT_CHN_MSB 5
+#define PHY_BB_GEN_CONTROLS_DYN_20_40_EXT_CHN_LSB 5
+#define PHY_BB_GEN_CONTROLS_DYN_20_40_EXT_CHN_MASK 0x00000020
+#define PHY_BB_GEN_CONTROLS_DYN_20_40_EXT_CHN_GET(x) (((x) & 0x00000020) >> 5)
+#define PHY_BB_GEN_CONTROLS_DYN_20_40_EXT_CHN_SET(x) (((x) << 5) & 0x00000020)
+#define PHY_BB_GEN_CONTROLS_HT_ENABLE_MSB 6
+#define PHY_BB_GEN_CONTROLS_HT_ENABLE_LSB 6
+#define PHY_BB_GEN_CONTROLS_HT_ENABLE_MASK 0x00000040
+#define PHY_BB_GEN_CONTROLS_HT_ENABLE_GET(x) (((x) & 0x00000040) >> 6)
+#define PHY_BB_GEN_CONTROLS_HT_ENABLE_SET(x) (((x) << 6) & 0x00000040)
+#define PHY_BB_GEN_CONTROLS_ALLOW_SHORT_GI_MSB 7
+#define PHY_BB_GEN_CONTROLS_ALLOW_SHORT_GI_LSB 7
+#define PHY_BB_GEN_CONTROLS_ALLOW_SHORT_GI_MASK 0x00000080
+#define PHY_BB_GEN_CONTROLS_ALLOW_SHORT_GI_GET(x) (((x) & 0x00000080) >> 7)
+#define PHY_BB_GEN_CONTROLS_ALLOW_SHORT_GI_SET(x) (((x) << 7) & 0x00000080)
+#define PHY_BB_GEN_CONTROLS_CF_2_CHAINS_USE_WALSH_MSB 8
+#define PHY_BB_GEN_CONTROLS_CF_2_CHAINS_USE_WALSH_LSB 8
+#define PHY_BB_GEN_CONTROLS_CF_2_CHAINS_USE_WALSH_MASK 0x00000100
+#define PHY_BB_GEN_CONTROLS_CF_2_CHAINS_USE_WALSH_GET(x) (((x) & 0x00000100) >> 8)
+#define PHY_BB_GEN_CONTROLS_CF_2_CHAINS_USE_WALSH_SET(x) (((x) << 8) & 0x00000100)
+#define PHY_BB_GEN_CONTROLS_CF_SINGLE_HT_LTF1_MSB 9
+#define PHY_BB_GEN_CONTROLS_CF_SINGLE_HT_LTF1_LSB 9
+#define PHY_BB_GEN_CONTROLS_CF_SINGLE_HT_LTF1_MASK 0x00000200
+#define PHY_BB_GEN_CONTROLS_CF_SINGLE_HT_LTF1_GET(x) (((x) & 0x00000200) >> 9)
+#define PHY_BB_GEN_CONTROLS_CF_SINGLE_HT_LTF1_SET(x) (((x) << 9) & 0x00000200)
+#define PHY_BB_GEN_CONTROLS_GF_ENABLE_MSB 10
+#define PHY_BB_GEN_CONTROLS_GF_ENABLE_LSB 10
+#define PHY_BB_GEN_CONTROLS_GF_ENABLE_MASK 0x00000400
+#define PHY_BB_GEN_CONTROLS_GF_ENABLE_GET(x) (((x) & 0x00000400) >> 10)
+#define PHY_BB_GEN_CONTROLS_GF_ENABLE_SET(x) (((x) << 10) & 0x00000400)
+#define PHY_BB_GEN_CONTROLS_BYPASS_DAC_FIFO_N_MSB 11
+#define PHY_BB_GEN_CONTROLS_BYPASS_DAC_FIFO_N_LSB 11
+#define PHY_BB_GEN_CONTROLS_BYPASS_DAC_FIFO_N_MASK 0x00000800
+#define PHY_BB_GEN_CONTROLS_BYPASS_DAC_FIFO_N_GET(x) (((x) & 0x00000800) >> 11)
+#define PHY_BB_GEN_CONTROLS_BYPASS_DAC_FIFO_N_SET(x) (((x) << 11) & 0x00000800)
+
+/* macros for BB_test_controls_status */
+#define PHY_BB_TEST_CONTROLS_STATUS_ADDRESS 0x00009808
+#define PHY_BB_TEST_CONTROLS_STATUS_OFFSET 0x00009808
+#define PHY_BB_TEST_CONTROLS_STATUS_CF_TSTDAC_EN_MSB 0
+#define PHY_BB_TEST_CONTROLS_STATUS_CF_TSTDAC_EN_LSB 0
+#define PHY_BB_TEST_CONTROLS_STATUS_CF_TSTDAC_EN_MASK 0x00000001
+#define PHY_BB_TEST_CONTROLS_STATUS_CF_TSTDAC_EN_GET(x) (((x) & 0x00000001) >> 0)
+#define PHY_BB_TEST_CONTROLS_STATUS_CF_TSTDAC_EN_SET(x) (((x) << 0) & 0x00000001)
+#define PHY_BB_TEST_CONTROLS_STATUS_CF_TX_SRC_IS_TSTDAC_MSB 1
+#define PHY_BB_TEST_CONTROLS_STATUS_CF_TX_SRC_IS_TSTDAC_LSB 1
+#define PHY_BB_TEST_CONTROLS_STATUS_CF_TX_SRC_IS_TSTDAC_MASK 0x00000002
+#define PHY_BB_TEST_CONTROLS_STATUS_CF_TX_SRC_IS_TSTDAC_GET(x) (((x) & 0x00000002) >> 1)
+#define PHY_BB_TEST_CONTROLS_STATUS_CF_TX_SRC_IS_TSTDAC_SET(x) (((x) << 1) & 0x00000002)
+#define PHY_BB_TEST_CONTROLS_STATUS_CF_TX_OBS_SEL_MSB 4
+#define PHY_BB_TEST_CONTROLS_STATUS_CF_TX_OBS_SEL_LSB 2
+#define PHY_BB_TEST_CONTROLS_STATUS_CF_TX_OBS_SEL_MASK 0x0000001c
+#define PHY_BB_TEST_CONTROLS_STATUS_CF_TX_OBS_SEL_GET(x) (((x) & 0x0000001c) >> 2)
+#define PHY_BB_TEST_CONTROLS_STATUS_CF_TX_OBS_SEL_SET(x) (((x) << 2) & 0x0000001c)
+#define PHY_BB_TEST_CONTROLS_STATUS_CF_TX_OBS_MUX_SEL_MSB 6
+#define PHY_BB_TEST_CONTROLS_STATUS_CF_TX_OBS_MUX_SEL_LSB 5
+#define PHY_BB_TEST_CONTROLS_STATUS_CF_TX_OBS_MUX_SEL_MASK 0x00000060
+#define PHY_BB_TEST_CONTROLS_STATUS_CF_TX_OBS_MUX_SEL_GET(x) (((x) & 0x00000060) >> 5)
+#define PHY_BB_TEST_CONTROLS_STATUS_CF_TX_OBS_MUX_SEL_SET(x) (((x) << 5) & 0x00000060)
+#define PHY_BB_TEST_CONTROLS_STATUS_CF_TX_SRC_ALTERNATE_MSB 7
+#define PHY_BB_TEST_CONTROLS_STATUS_CF_TX_SRC_ALTERNATE_LSB 7
+#define PHY_BB_TEST_CONTROLS_STATUS_CF_TX_SRC_ALTERNATE_MASK 0x00000080
+#define PHY_BB_TEST_CONTROLS_STATUS_CF_TX_SRC_ALTERNATE_GET(x) (((x) & 0x00000080) >> 7)
+#define PHY_BB_TEST_CONTROLS_STATUS_CF_TX_SRC_ALTERNATE_SET(x) (((x) << 7) & 0x00000080)
+#define PHY_BB_TEST_CONTROLS_STATUS_CF_TSTADC_EN_MSB 8
+#define PHY_BB_TEST_CONTROLS_STATUS_CF_TSTADC_EN_LSB 8
+#define PHY_BB_TEST_CONTROLS_STATUS_CF_TSTADC_EN_MASK 0x00000100
+#define PHY_BB_TEST_CONTROLS_STATUS_CF_TSTADC_EN_GET(x) (((x) & 0x00000100) >> 8)
+#define PHY_BB_TEST_CONTROLS_STATUS_CF_TSTADC_EN_SET(x) (((x) << 8) & 0x00000100)
+#define PHY_BB_TEST_CONTROLS_STATUS_CF_RX_SRC_IS_TSTADC_MSB 9
+#define PHY_BB_TEST_CONTROLS_STATUS_CF_RX_SRC_IS_TSTADC_LSB 9
+#define PHY_BB_TEST_CONTROLS_STATUS_CF_RX_SRC_IS_TSTADC_MASK 0x00000200
+#define PHY_BB_TEST_CONTROLS_STATUS_CF_RX_SRC_IS_TSTADC_GET(x) (((x) & 0x00000200) >> 9)
+#define PHY_BB_TEST_CONTROLS_STATUS_CF_RX_SRC_IS_TSTADC_SET(x) (((x) << 9) & 0x00000200)
+#define PHY_BB_TEST_CONTROLS_STATUS_RX_OBS_SEL_MSB 13
+#define PHY_BB_TEST_CONTROLS_STATUS_RX_OBS_SEL_LSB 10
+#define PHY_BB_TEST_CONTROLS_STATUS_RX_OBS_SEL_MASK 0x00003c00
+#define PHY_BB_TEST_CONTROLS_STATUS_RX_OBS_SEL_GET(x) (((x) & 0x00003c00) >> 10)
+#define PHY_BB_TEST_CONTROLS_STATUS_RX_OBS_SEL_SET(x) (((x) << 10) & 0x00003c00)
+#define PHY_BB_TEST_CONTROLS_STATUS_DISABLE_A2_WARM_RESET_MSB 14
+#define PHY_BB_TEST_CONTROLS_STATUS_DISABLE_A2_WARM_RESET_LSB 14
+#define PHY_BB_TEST_CONTROLS_STATUS_DISABLE_A2_WARM_RESET_MASK 0x00004000
+#define PHY_BB_TEST_CONTROLS_STATUS_DISABLE_A2_WARM_RESET_GET(x) (((x) & 0x00004000) >> 14)
+#define PHY_BB_TEST_CONTROLS_STATUS_DISABLE_A2_WARM_RESET_SET(x) (((x) << 14) & 0x00004000)
+#define PHY_BB_TEST_CONTROLS_STATUS_RESET_A2_MSB 15
+#define PHY_BB_TEST_CONTROLS_STATUS_RESET_A2_LSB 15
+#define PHY_BB_TEST_CONTROLS_STATUS_RESET_A2_MASK 0x00008000
+#define PHY_BB_TEST_CONTROLS_STATUS_RESET_A2_GET(x) (((x) & 0x00008000) >> 15)
+#define PHY_BB_TEST_CONTROLS_STATUS_RESET_A2_SET(x) (((x) << 15) & 0x00008000)
+#define PHY_BB_TEST_CONTROLS_STATUS_AGC_OBS_SEL_MSB 18
+#define PHY_BB_TEST_CONTROLS_STATUS_AGC_OBS_SEL_LSB 16
+#define PHY_BB_TEST_CONTROLS_STATUS_AGC_OBS_SEL_MASK 0x00070000
+#define PHY_BB_TEST_CONTROLS_STATUS_AGC_OBS_SEL_GET(x) (((x) & 0x00070000) >> 16)
+#define PHY_BB_TEST_CONTROLS_STATUS_AGC_OBS_SEL_SET(x) (((x) << 16) & 0x00070000)
+#define PHY_BB_TEST_CONTROLS_STATUS_CF_ENABLE_FFT_DUMP_MSB 19
+#define PHY_BB_TEST_CONTROLS_STATUS_CF_ENABLE_FFT_DUMP_LSB 19
+#define PHY_BB_TEST_CONTROLS_STATUS_CF_ENABLE_FFT_DUMP_MASK 0x00080000
+#define PHY_BB_TEST_CONTROLS_STATUS_CF_ENABLE_FFT_DUMP_GET(x) (((x) & 0x00080000) >> 19)
+#define PHY_BB_TEST_CONTROLS_STATUS_CF_ENABLE_FFT_DUMP_SET(x) (((x) << 19) & 0x00080000)
+#define PHY_BB_TEST_CONTROLS_STATUS_CF_DEBUGPORT_IN_MSB 23
+#define PHY_BB_TEST_CONTROLS_STATUS_CF_DEBUGPORT_IN_LSB 23
+#define PHY_BB_TEST_CONTROLS_STATUS_CF_DEBUGPORT_IN_MASK 0x00800000
+#define PHY_BB_TEST_CONTROLS_STATUS_CF_DEBUGPORT_IN_GET(x) (((x) & 0x00800000) >> 23)
+#define PHY_BB_TEST_CONTROLS_STATUS_CF_DEBUGPORT_IN_SET(x) (((x) << 23) & 0x00800000)
+#define PHY_BB_TEST_CONTROLS_STATUS_DISABLE_AGC_TO_A2_MSB 27
+#define PHY_BB_TEST_CONTROLS_STATUS_DISABLE_AGC_TO_A2_LSB 27
+#define PHY_BB_TEST_CONTROLS_STATUS_DISABLE_AGC_TO_A2_MASK 0x08000000
+#define PHY_BB_TEST_CONTROLS_STATUS_DISABLE_AGC_TO_A2_GET(x) (((x) & 0x08000000) >> 27)
+#define PHY_BB_TEST_CONTROLS_STATUS_DISABLE_AGC_TO_A2_SET(x) (((x) << 27) & 0x08000000)
+#define PHY_BB_TEST_CONTROLS_STATUS_CF_DEBUGPORT_EN_MSB 28
+#define PHY_BB_TEST_CONTROLS_STATUS_CF_DEBUGPORT_EN_LSB 28
+#define PHY_BB_TEST_CONTROLS_STATUS_CF_DEBUGPORT_EN_MASK 0x10000000
+#define PHY_BB_TEST_CONTROLS_STATUS_CF_DEBUGPORT_EN_GET(x) (((x) & 0x10000000) >> 28)
+#define PHY_BB_TEST_CONTROLS_STATUS_CF_DEBUGPORT_EN_SET(x) (((x) << 28) & 0x10000000)
+#define PHY_BB_TEST_CONTROLS_STATUS_CF_DEBUGPORT_SEL_MSB 30
+#define PHY_BB_TEST_CONTROLS_STATUS_CF_DEBUGPORT_SEL_LSB 29
+#define PHY_BB_TEST_CONTROLS_STATUS_CF_DEBUGPORT_SEL_MASK 0x60000000
+#define PHY_BB_TEST_CONTROLS_STATUS_CF_DEBUGPORT_SEL_GET(x) (((x) & 0x60000000) >> 29)
+#define PHY_BB_TEST_CONTROLS_STATUS_CF_DEBUGPORT_SEL_SET(x) (((x) << 29) & 0x60000000)
+
+/* macros for BB_timing_controls_1 */
+#define PHY_BB_TIMING_CONTROLS_1_ADDRESS 0x0000980c
+#define PHY_BB_TIMING_CONTROLS_1_OFFSET 0x0000980c
+#define PHY_BB_TIMING_CONTROLS_1_STE_THR_MSB 6
+#define PHY_BB_TIMING_CONTROLS_1_STE_THR_LSB 0
+#define PHY_BB_TIMING_CONTROLS_1_STE_THR_MASK 0x0000007f
+#define PHY_BB_TIMING_CONTROLS_1_STE_THR_GET(x) (((x) & 0x0000007f) >> 0)
+#define PHY_BB_TIMING_CONTROLS_1_STE_THR_SET(x) (((x) << 0) & 0x0000007f)
+#define PHY_BB_TIMING_CONTROLS_1_STE_TO_LONG1_MSB 12
+#define PHY_BB_TIMING_CONTROLS_1_STE_TO_LONG1_LSB 7
+#define PHY_BB_TIMING_CONTROLS_1_STE_TO_LONG1_MASK 0x00001f80
+#define PHY_BB_TIMING_CONTROLS_1_STE_TO_LONG1_GET(x) (((x) & 0x00001f80) >> 7)
+#define PHY_BB_TIMING_CONTROLS_1_STE_TO_LONG1_SET(x) (((x) << 7) & 0x00001f80)
+#define PHY_BB_TIMING_CONTROLS_1_TIMING_BACKOFF_MSB 16
+#define PHY_BB_TIMING_CONTROLS_1_TIMING_BACKOFF_LSB 13
+#define PHY_BB_TIMING_CONTROLS_1_TIMING_BACKOFF_MASK 0x0001e000
+#define PHY_BB_TIMING_CONTROLS_1_TIMING_BACKOFF_GET(x) (((x) & 0x0001e000) >> 13)
+#define PHY_BB_TIMING_CONTROLS_1_TIMING_BACKOFF_SET(x) (((x) << 13) & 0x0001e000)
+#define PHY_BB_TIMING_CONTROLS_1_ENABLE_HT_FINE_PPM_MSB 17
+#define PHY_BB_TIMING_CONTROLS_1_ENABLE_HT_FINE_PPM_LSB 17
+#define PHY_BB_TIMING_CONTROLS_1_ENABLE_HT_FINE_PPM_MASK 0x00020000
+#define PHY_BB_TIMING_CONTROLS_1_ENABLE_HT_FINE_PPM_GET(x) (((x) & 0x00020000) >> 17)
+#define PHY_BB_TIMING_CONTROLS_1_ENABLE_HT_FINE_PPM_SET(x) (((x) << 17) & 0x00020000)
+#define PHY_BB_TIMING_CONTROLS_1_HT_FINE_PPM_STREAM_MSB 19
+#define PHY_BB_TIMING_CONTROLS_1_HT_FINE_PPM_STREAM_LSB 18
+#define PHY_BB_TIMING_CONTROLS_1_HT_FINE_PPM_STREAM_MASK 0x000c0000
+#define PHY_BB_TIMING_CONTROLS_1_HT_FINE_PPM_STREAM_GET(x) (((x) & 0x000c0000) >> 18)
+#define PHY_BB_TIMING_CONTROLS_1_HT_FINE_PPM_STREAM_SET(x) (((x) << 18) & 0x000c0000)
+#define PHY_BB_TIMING_CONTROLS_1_HT_FINE_PPM_QAM_MSB 21
+#define PHY_BB_TIMING_CONTROLS_1_HT_FINE_PPM_QAM_LSB 20
+#define PHY_BB_TIMING_CONTROLS_1_HT_FINE_PPM_QAM_MASK 0x00300000
+#define PHY_BB_TIMING_CONTROLS_1_HT_FINE_PPM_QAM_GET(x) (((x) & 0x00300000) >> 20)
+#define PHY_BB_TIMING_CONTROLS_1_HT_FINE_PPM_QAM_SET(x) (((x) << 20) & 0x00300000)
+#define PHY_BB_TIMING_CONTROLS_1_ENABLE_LONG_CHANFIL_MSB 22
+#define PHY_BB_TIMING_CONTROLS_1_ENABLE_LONG_CHANFIL_LSB 22
+#define PHY_BB_TIMING_CONTROLS_1_ENABLE_LONG_CHANFIL_MASK 0x00400000
+#define PHY_BB_TIMING_CONTROLS_1_ENABLE_LONG_CHANFIL_GET(x) (((x) & 0x00400000) >> 22)
+#define PHY_BB_TIMING_CONTROLS_1_ENABLE_LONG_CHANFIL_SET(x) (((x) << 22) & 0x00400000)
+#define PHY_BB_TIMING_CONTROLS_1_ENABLE_RX_STBC_MSB 23
+#define PHY_BB_TIMING_CONTROLS_1_ENABLE_RX_STBC_LSB 23
+#define PHY_BB_TIMING_CONTROLS_1_ENABLE_RX_STBC_MASK 0x00800000
+#define PHY_BB_TIMING_CONTROLS_1_ENABLE_RX_STBC_GET(x) (((x) & 0x00800000) >> 23)
+#define PHY_BB_TIMING_CONTROLS_1_ENABLE_RX_STBC_SET(x) (((x) << 23) & 0x00800000)
+#define PHY_BB_TIMING_CONTROLS_1_ENABLE_CHANNEL_FILTER_MSB 24
+#define PHY_BB_TIMING_CONTROLS_1_ENABLE_CHANNEL_FILTER_LSB 24
+#define PHY_BB_TIMING_CONTROLS_1_ENABLE_CHANNEL_FILTER_MASK 0x01000000
+#define PHY_BB_TIMING_CONTROLS_1_ENABLE_CHANNEL_FILTER_GET(x) (((x) & 0x01000000) >> 24)
+#define PHY_BB_TIMING_CONTROLS_1_ENABLE_CHANNEL_FILTER_SET(x) (((x) << 24) & 0x01000000)
+#define PHY_BB_TIMING_CONTROLS_1_FALSE_ALARM_MSB 26
+#define PHY_BB_TIMING_CONTROLS_1_FALSE_ALARM_LSB 25
+#define PHY_BB_TIMING_CONTROLS_1_FALSE_ALARM_MASK 0x06000000
+#define PHY_BB_TIMING_CONTROLS_1_FALSE_ALARM_GET(x) (((x) & 0x06000000) >> 25)
+#define PHY_BB_TIMING_CONTROLS_1_FALSE_ALARM_SET(x) (((x) << 25) & 0x06000000)
+#define PHY_BB_TIMING_CONTROLS_1_ENABLE_LONG_RESCALE_MSB 27
+#define PHY_BB_TIMING_CONTROLS_1_ENABLE_LONG_RESCALE_LSB 27
+#define PHY_BB_TIMING_CONTROLS_1_ENABLE_LONG_RESCALE_MASK 0x08000000
+#define PHY_BB_TIMING_CONTROLS_1_ENABLE_LONG_RESCALE_GET(x) (((x) & 0x08000000) >> 27)
+#define PHY_BB_TIMING_CONTROLS_1_ENABLE_LONG_RESCALE_SET(x) (((x) << 27) & 0x08000000)
+#define PHY_BB_TIMING_CONTROLS_1_TIMING_LEAK_ENABLE_MSB 28
+#define PHY_BB_TIMING_CONTROLS_1_TIMING_LEAK_ENABLE_LSB 28
+#define PHY_BB_TIMING_CONTROLS_1_TIMING_LEAK_ENABLE_MASK 0x10000000
+#define PHY_BB_TIMING_CONTROLS_1_TIMING_LEAK_ENABLE_GET(x) (((x) & 0x10000000) >> 28)
+#define PHY_BB_TIMING_CONTROLS_1_TIMING_LEAK_ENABLE_SET(x) (((x) << 28) & 0x10000000)
+#define PHY_BB_TIMING_CONTROLS_1_COARSE_PPM_SELECT_MSB 30
+#define PHY_BB_TIMING_CONTROLS_1_COARSE_PPM_SELECT_LSB 29
+#define PHY_BB_TIMING_CONTROLS_1_COARSE_PPM_SELECT_MASK 0x60000000
+#define PHY_BB_TIMING_CONTROLS_1_COARSE_PPM_SELECT_GET(x) (((x) & 0x60000000) >> 29)
+#define PHY_BB_TIMING_CONTROLS_1_COARSE_PPM_SELECT_SET(x) (((x) << 29) & 0x60000000)
+#define PHY_BB_TIMING_CONTROLS_1_FFT_SCALING_MSB 31
+#define PHY_BB_TIMING_CONTROLS_1_FFT_SCALING_LSB 31
+#define PHY_BB_TIMING_CONTROLS_1_FFT_SCALING_MASK 0x80000000
+#define PHY_BB_TIMING_CONTROLS_1_FFT_SCALING_GET(x) (((x) & 0x80000000) >> 31)
+#define PHY_BB_TIMING_CONTROLS_1_FFT_SCALING_SET(x) (((x) << 31) & 0x80000000)
+
+/* macros for BB_timing_controls_2 */
+#define PHY_BB_TIMING_CONTROLS_2_ADDRESS 0x00009810
+#define PHY_BB_TIMING_CONTROLS_2_OFFSET 0x00009810
+#define PHY_BB_TIMING_CONTROLS_2_FORCED_DELTA_PHI_SYMBOL_MSB 11
+#define PHY_BB_TIMING_CONTROLS_2_FORCED_DELTA_PHI_SYMBOL_LSB 0
+#define PHY_BB_TIMING_CONTROLS_2_FORCED_DELTA_PHI_SYMBOL_MASK 0x00000fff
+#define PHY_BB_TIMING_CONTROLS_2_FORCED_DELTA_PHI_SYMBOL_GET(x) (((x) & 0x00000fff) >> 0)
+#define PHY_BB_TIMING_CONTROLS_2_FORCED_DELTA_PHI_SYMBOL_SET(x) (((x) << 0) & 0x00000fff)
+#define PHY_BB_TIMING_CONTROLS_2_FORCE_DELTA_PHI_SYMBOL_MSB 12
+#define PHY_BB_TIMING_CONTROLS_2_FORCE_DELTA_PHI_SYMBOL_LSB 12
+#define PHY_BB_TIMING_CONTROLS_2_FORCE_DELTA_PHI_SYMBOL_MASK 0x00001000
+#define PHY_BB_TIMING_CONTROLS_2_FORCE_DELTA_PHI_SYMBOL_GET(x) (((x) & 0x00001000) >> 12)
+#define PHY_BB_TIMING_CONTROLS_2_FORCE_DELTA_PHI_SYMBOL_SET(x) (((x) << 12) & 0x00001000)
+#define PHY_BB_TIMING_CONTROLS_2_ENABLE_MAGNITUDE_TRACK_MSB 13
+#define PHY_BB_TIMING_CONTROLS_2_ENABLE_MAGNITUDE_TRACK_LSB 13
+#define PHY_BB_TIMING_CONTROLS_2_ENABLE_MAGNITUDE_TRACK_MASK 0x00002000
+#define PHY_BB_TIMING_CONTROLS_2_ENABLE_MAGNITUDE_TRACK_GET(x) (((x) & 0x00002000) >> 13)
+#define PHY_BB_TIMING_CONTROLS_2_ENABLE_MAGNITUDE_TRACK_SET(x) (((x) << 13) & 0x00002000)
+#define PHY_BB_TIMING_CONTROLS_2_ENABLE_SLOPE_FILTER_MSB 14
+#define PHY_BB_TIMING_CONTROLS_2_ENABLE_SLOPE_FILTER_LSB 14
+#define PHY_BB_TIMING_CONTROLS_2_ENABLE_SLOPE_FILTER_MASK 0x00004000
+#define PHY_BB_TIMING_CONTROLS_2_ENABLE_SLOPE_FILTER_GET(x) (((x) & 0x00004000) >> 14)
+#define PHY_BB_TIMING_CONTROLS_2_ENABLE_SLOPE_FILTER_SET(x) (((x) << 14) & 0x00004000)
+#define PHY_BB_TIMING_CONTROLS_2_ENABLE_OFFSET_FILTER_MSB 15
+#define PHY_BB_TIMING_CONTROLS_2_ENABLE_OFFSET_FILTER_LSB 15
+#define PHY_BB_TIMING_CONTROLS_2_ENABLE_OFFSET_FILTER_MASK 0x00008000
+#define PHY_BB_TIMING_CONTROLS_2_ENABLE_OFFSET_FILTER_GET(x) (((x) & 0x00008000) >> 15)
+#define PHY_BB_TIMING_CONTROLS_2_ENABLE_OFFSET_FILTER_SET(x) (((x) << 15) & 0x00008000)
+#define PHY_BB_TIMING_CONTROLS_2_DC_OFF_DELTAF_THRES_MSB 22
+#define PHY_BB_TIMING_CONTROLS_2_DC_OFF_DELTAF_THRES_LSB 16
+#define PHY_BB_TIMING_CONTROLS_2_DC_OFF_DELTAF_THRES_MASK 0x007f0000
+#define PHY_BB_TIMING_CONTROLS_2_DC_OFF_DELTAF_THRES_GET(x) (((x) & 0x007f0000) >> 16)
+#define PHY_BB_TIMING_CONTROLS_2_DC_OFF_DELTAF_THRES_SET(x) (((x) << 16) & 0x007f0000)
+#define PHY_BB_TIMING_CONTROLS_2_DC_OFF_TIM_CONST_MSB 26
+#define PHY_BB_TIMING_CONTROLS_2_DC_OFF_TIM_CONST_LSB 24
+#define PHY_BB_TIMING_CONTROLS_2_DC_OFF_TIM_CONST_MASK 0x07000000
+#define PHY_BB_TIMING_CONTROLS_2_DC_OFF_TIM_CONST_GET(x) (((x) & 0x07000000) >> 24)
+#define PHY_BB_TIMING_CONTROLS_2_DC_OFF_TIM_CONST_SET(x) (((x) << 24) & 0x07000000)
+#define PHY_BB_TIMING_CONTROLS_2_ENABLE_DC_OFFSET_MSB 27
+#define PHY_BB_TIMING_CONTROLS_2_ENABLE_DC_OFFSET_LSB 27
+#define PHY_BB_TIMING_CONTROLS_2_ENABLE_DC_OFFSET_MASK 0x08000000
+#define PHY_BB_TIMING_CONTROLS_2_ENABLE_DC_OFFSET_GET(x) (((x) & 0x08000000) >> 27)
+#define PHY_BB_TIMING_CONTROLS_2_ENABLE_DC_OFFSET_SET(x) (((x) << 27) & 0x08000000)
+#define PHY_BB_TIMING_CONTROLS_2_ENABLE_DC_OFFSET_TRACK_MSB 28
+#define PHY_BB_TIMING_CONTROLS_2_ENABLE_DC_OFFSET_TRACK_LSB 28
+#define PHY_BB_TIMING_CONTROLS_2_ENABLE_DC_OFFSET_TRACK_MASK 0x10000000
+#define PHY_BB_TIMING_CONTROLS_2_ENABLE_DC_OFFSET_TRACK_GET(x) (((x) & 0x10000000) >> 28)
+#define PHY_BB_TIMING_CONTROLS_2_ENABLE_DC_OFFSET_TRACK_SET(x) (((x) << 28) & 0x10000000)
+#define PHY_BB_TIMING_CONTROLS_2_ENABLE_WEIGHTING_MSB 29
+#define PHY_BB_TIMING_CONTROLS_2_ENABLE_WEIGHTING_LSB 29
+#define PHY_BB_TIMING_CONTROLS_2_ENABLE_WEIGHTING_MASK 0x20000000
+#define PHY_BB_TIMING_CONTROLS_2_ENABLE_WEIGHTING_GET(x) (((x) & 0x20000000) >> 29)
+#define PHY_BB_TIMING_CONTROLS_2_ENABLE_WEIGHTING_SET(x) (((x) << 29) & 0x20000000)
+#define PHY_BB_TIMING_CONTROLS_2_TRACEBACK128_MSB 30
+#define PHY_BB_TIMING_CONTROLS_2_TRACEBACK128_LSB 30
+#define PHY_BB_TIMING_CONTROLS_2_TRACEBACK128_MASK 0x40000000
+#define PHY_BB_TIMING_CONTROLS_2_TRACEBACK128_GET(x) (((x) & 0x40000000) >> 30)
+#define PHY_BB_TIMING_CONTROLS_2_TRACEBACK128_SET(x) (((x) << 30) & 0x40000000)
+#define PHY_BB_TIMING_CONTROLS_2_ENABLE_HT_FINE_TIMING_MSB 31
+#define PHY_BB_TIMING_CONTROLS_2_ENABLE_HT_FINE_TIMING_LSB 31
+#define PHY_BB_TIMING_CONTROLS_2_ENABLE_HT_FINE_TIMING_MASK 0x80000000
+#define PHY_BB_TIMING_CONTROLS_2_ENABLE_HT_FINE_TIMING_GET(x) (((x) & 0x80000000) >> 31)
+#define PHY_BB_TIMING_CONTROLS_2_ENABLE_HT_FINE_TIMING_SET(x) (((x) << 31) & 0x80000000)
+
+/* macros for BB_timing_controls_3 */
+#define PHY_BB_TIMING_CONTROLS_3_ADDRESS 0x00009814
+#define PHY_BB_TIMING_CONTROLS_3_OFFSET 0x00009814
+#define PHY_BB_TIMING_CONTROLS_3_PPM_RESCUE_INTERVAL_MSB 7
+#define PHY_BB_TIMING_CONTROLS_3_PPM_RESCUE_INTERVAL_LSB 0
+#define PHY_BB_TIMING_CONTROLS_3_PPM_RESCUE_INTERVAL_MASK 0x000000ff
+#define PHY_BB_TIMING_CONTROLS_3_PPM_RESCUE_INTERVAL_GET(x) (((x) & 0x000000ff) >> 0)
+#define PHY_BB_TIMING_CONTROLS_3_PPM_RESCUE_INTERVAL_SET(x) (((x) << 0) & 0x000000ff)
+#define PHY_BB_TIMING_CONTROLS_3_ENABLE_PPM_RESCUE_MSB 8
+#define PHY_BB_TIMING_CONTROLS_3_ENABLE_PPM_RESCUE_LSB 8
+#define PHY_BB_TIMING_CONTROLS_3_ENABLE_PPM_RESCUE_MASK 0x00000100
+#define PHY_BB_TIMING_CONTROLS_3_ENABLE_PPM_RESCUE_GET(x) (((x) & 0x00000100) >> 8)
+#define PHY_BB_TIMING_CONTROLS_3_ENABLE_PPM_RESCUE_SET(x) (((x) << 8) & 0x00000100)
+#define PHY_BB_TIMING_CONTROLS_3_ENABLE_FINE_PPM_MSB 9
+#define PHY_BB_TIMING_CONTROLS_3_ENABLE_FINE_PPM_LSB 9
+#define PHY_BB_TIMING_CONTROLS_3_ENABLE_FINE_PPM_MASK 0x00000200
+#define PHY_BB_TIMING_CONTROLS_3_ENABLE_FINE_PPM_GET(x) (((x) & 0x00000200) >> 9)
+#define PHY_BB_TIMING_CONTROLS_3_ENABLE_FINE_PPM_SET(x) (((x) << 9) & 0x00000200)
+#define PHY_BB_TIMING_CONTROLS_3_ENABLE_FINE_INTERP_MSB 10
+#define PHY_BB_TIMING_CONTROLS_3_ENABLE_FINE_INTERP_LSB 10
+#define PHY_BB_TIMING_CONTROLS_3_ENABLE_FINE_INTERP_MASK 0x00000400
+#define PHY_BB_TIMING_CONTROLS_3_ENABLE_FINE_INTERP_GET(x) (((x) & 0x00000400) >> 10)
+#define PHY_BB_TIMING_CONTROLS_3_ENABLE_FINE_INTERP_SET(x) (((x) << 10) & 0x00000400)
+#define PHY_BB_TIMING_CONTROLS_3_CONTINUOUS_PPM_RESCUE_MSB 11
+#define PHY_BB_TIMING_CONTROLS_3_CONTINUOUS_PPM_RESCUE_LSB 11
+#define PHY_BB_TIMING_CONTROLS_3_CONTINUOUS_PPM_RESCUE_MASK 0x00000800
+#define PHY_BB_TIMING_CONTROLS_3_CONTINUOUS_PPM_RESCUE_GET(x) (((x) & 0x00000800) >> 11)
+#define PHY_BB_TIMING_CONTROLS_3_CONTINUOUS_PPM_RESCUE_SET(x) (((x) << 11) & 0x00000800)
+#define PHY_BB_TIMING_CONTROLS_3_ENABLE_DF_CHANEST_MSB 12
+#define PHY_BB_TIMING_CONTROLS_3_ENABLE_DF_CHANEST_LSB 12
+#define PHY_BB_TIMING_CONTROLS_3_ENABLE_DF_CHANEST_MASK 0x00001000
+#define PHY_BB_TIMING_CONTROLS_3_ENABLE_DF_CHANEST_GET(x) (((x) & 0x00001000) >> 12)
+#define PHY_BB_TIMING_CONTROLS_3_ENABLE_DF_CHANEST_SET(x) (((x) << 12) & 0x00001000)
+#define PHY_BB_TIMING_CONTROLS_3_DELTA_SLOPE_COEF_EXP_MSB 16
+#define PHY_BB_TIMING_CONTROLS_3_DELTA_SLOPE_COEF_EXP_LSB 13
+#define PHY_BB_TIMING_CONTROLS_3_DELTA_SLOPE_COEF_EXP_MASK 0x0001e000
+#define PHY_BB_TIMING_CONTROLS_3_DELTA_SLOPE_COEF_EXP_GET(x) (((x) & 0x0001e000) >> 13)
+#define PHY_BB_TIMING_CONTROLS_3_DELTA_SLOPE_COEF_EXP_SET(x) (((x) << 13) & 0x0001e000)
+#define PHY_BB_TIMING_CONTROLS_3_DELTA_SLOPE_COEF_MAN_MSB 31
+#define PHY_BB_TIMING_CONTROLS_3_DELTA_SLOPE_COEF_MAN_LSB 17
+#define PHY_BB_TIMING_CONTROLS_3_DELTA_SLOPE_COEF_MAN_MASK 0xfffe0000
+#define PHY_BB_TIMING_CONTROLS_3_DELTA_SLOPE_COEF_MAN_GET(x) (((x) & 0xfffe0000) >> 17)
+#define PHY_BB_TIMING_CONTROLS_3_DELTA_SLOPE_COEF_MAN_SET(x) (((x) << 17) & 0xfffe0000)
+
+/* macros for BB_D2_chip_id */
+#define PHY_BB_D2_CHIP_ID_ADDRESS 0x00009818
+#define PHY_BB_D2_CHIP_ID_OFFSET 0x00009818
+#define PHY_BB_D2_CHIP_ID_OLD_ID_MSB 7
+#define PHY_BB_D2_CHIP_ID_OLD_ID_LSB 0
+#define PHY_BB_D2_CHIP_ID_OLD_ID_MASK 0x000000ff
+#define PHY_BB_D2_CHIP_ID_OLD_ID_GET(x) (((x) & 0x000000ff) >> 0)
+#define PHY_BB_D2_CHIP_ID_ID_MSB 31
+#define PHY_BB_D2_CHIP_ID_ID_LSB 8
+#define PHY_BB_D2_CHIP_ID_ID_MASK 0xffffff00
+#define PHY_BB_D2_CHIP_ID_ID_GET(x) (((x) & 0xffffff00) >> 8)
+
+/* macros for BB_active */
+#define PHY_BB_ACTIVE_ADDRESS 0x0000981c
+#define PHY_BB_ACTIVE_OFFSET 0x0000981c
+#define PHY_BB_ACTIVE_CF_ACTIVE_MSB 0
+#define PHY_BB_ACTIVE_CF_ACTIVE_LSB 0
+#define PHY_BB_ACTIVE_CF_ACTIVE_MASK 0x00000001
+#define PHY_BB_ACTIVE_CF_ACTIVE_GET(x) (((x) & 0x00000001) >> 0)
+#define PHY_BB_ACTIVE_CF_ACTIVE_SET(x) (((x) << 0) & 0x00000001)
+
+/* macros for BB_tx_timing_1 */
+#define PHY_BB_TX_TIMING_1_ADDRESS 0x00009820
+#define PHY_BB_TX_TIMING_1_OFFSET 0x00009820
+#define PHY_BB_TX_TIMING_1_TX_FRAME_TO_ADC_OFF_MSB 7
+#define PHY_BB_TX_TIMING_1_TX_FRAME_TO_ADC_OFF_LSB 0
+#define PHY_BB_TX_TIMING_1_TX_FRAME_TO_ADC_OFF_MASK 0x000000ff
+#define PHY_BB_TX_TIMING_1_TX_FRAME_TO_ADC_OFF_GET(x) (((x) & 0x000000ff) >> 0)
+#define PHY_BB_TX_TIMING_1_TX_FRAME_TO_ADC_OFF_SET(x) (((x) << 0) & 0x000000ff)
+#define PHY_BB_TX_TIMING_1_TX_FRAME_TO_A2_RX_OFF_MSB 15
+#define PHY_BB_TX_TIMING_1_TX_FRAME_TO_A2_RX_OFF_LSB 8
+#define PHY_BB_TX_TIMING_1_TX_FRAME_TO_A2_RX_OFF_MASK 0x0000ff00
+#define PHY_BB_TX_TIMING_1_TX_FRAME_TO_A2_RX_OFF_GET(x) (((x) & 0x0000ff00) >> 8)
+#define PHY_BB_TX_TIMING_1_TX_FRAME_TO_A2_RX_OFF_SET(x) (((x) << 8) & 0x0000ff00)
+#define PHY_BB_TX_TIMING_1_TX_FRAME_TO_DAC_ON_MSB 23
+#define PHY_BB_TX_TIMING_1_TX_FRAME_TO_DAC_ON_LSB 16
+#define PHY_BB_TX_TIMING_1_TX_FRAME_TO_DAC_ON_MASK 0x00ff0000
+#define PHY_BB_TX_TIMING_1_TX_FRAME_TO_DAC_ON_GET(x) (((x) & 0x00ff0000) >> 16)
+#define PHY_BB_TX_TIMING_1_TX_FRAME_TO_DAC_ON_SET(x) (((x) << 16) & 0x00ff0000)
+#define PHY_BB_TX_TIMING_1_TX_FRAME_TO_A2_TX_ON_MSB 31
+#define PHY_BB_TX_TIMING_1_TX_FRAME_TO_A2_TX_ON_LSB 24
+#define PHY_BB_TX_TIMING_1_TX_FRAME_TO_A2_TX_ON_MASK 0xff000000
+#define PHY_BB_TX_TIMING_1_TX_FRAME_TO_A2_TX_ON_GET(x) (((x) & 0xff000000) >> 24)
+#define PHY_BB_TX_TIMING_1_TX_FRAME_TO_A2_TX_ON_SET(x) (((x) << 24) & 0xff000000)
+
+/* macros for BB_tx_timing_2 */
+#define PHY_BB_TX_TIMING_2_ADDRESS 0x00009824
+#define PHY_BB_TX_TIMING_2_OFFSET 0x00009824
+#define PHY_BB_TX_TIMING_2_TX_FRAME_TO_TX_D_START_MSB 7
+#define PHY_BB_TX_TIMING_2_TX_FRAME_TO_TX_D_START_LSB 0
+#define PHY_BB_TX_TIMING_2_TX_FRAME_TO_TX_D_START_MASK 0x000000ff
+#define PHY_BB_TX_TIMING_2_TX_FRAME_TO_TX_D_START_GET(x) (((x) & 0x000000ff) >> 0)
+#define PHY_BB_TX_TIMING_2_TX_FRAME_TO_TX_D_START_SET(x) (((x) << 0) & 0x000000ff)
+#define PHY_BB_TX_TIMING_2_TX_FRAME_TO_PA_ON_MSB 15
+#define PHY_BB_TX_TIMING_2_TX_FRAME_TO_PA_ON_LSB 8
+#define PHY_BB_TX_TIMING_2_TX_FRAME_TO_PA_ON_MASK 0x0000ff00
+#define PHY_BB_TX_TIMING_2_TX_FRAME_TO_PA_ON_GET(x) (((x) & 0x0000ff00) >> 8)
+#define PHY_BB_TX_TIMING_2_TX_FRAME_TO_PA_ON_SET(x) (((x) << 8) & 0x0000ff00)
+#define PHY_BB_TX_TIMING_2_TX_END_TO_PA_OFF_MSB 23
+#define PHY_BB_TX_TIMING_2_TX_END_TO_PA_OFF_LSB 16
+#define PHY_BB_TX_TIMING_2_TX_END_TO_PA_OFF_MASK 0x00ff0000
+#define PHY_BB_TX_TIMING_2_TX_END_TO_PA_OFF_GET(x) (((x) & 0x00ff0000) >> 16)
+#define PHY_BB_TX_TIMING_2_TX_END_TO_PA_OFF_SET(x) (((x) << 16) & 0x00ff0000)
+#define PHY_BB_TX_TIMING_2_TX_END_TO_A2_TX_OFF_MSB 31
+#define PHY_BB_TX_TIMING_2_TX_END_TO_A2_TX_OFF_LSB 24
+#define PHY_BB_TX_TIMING_2_TX_END_TO_A2_TX_OFF_MASK 0xff000000
+#define PHY_BB_TX_TIMING_2_TX_END_TO_A2_TX_OFF_GET(x) (((x) & 0xff000000) >> 24)
+#define PHY_BB_TX_TIMING_2_TX_END_TO_A2_TX_OFF_SET(x) (((x) << 24) & 0xff000000)
+
+/* macros for BB_tx_timing_3 */
+#define PHY_BB_TX_TIMING_3_ADDRESS 0x00009828
+#define PHY_BB_TX_TIMING_3_OFFSET 0x00009828
+#define PHY_BB_TX_TIMING_3_TX_END_TO_DAC_OFF_MSB 7
+#define PHY_BB_TX_TIMING_3_TX_END_TO_DAC_OFF_LSB 0
+#define PHY_BB_TX_TIMING_3_TX_END_TO_DAC_OFF_MASK 0x000000ff
+#define PHY_BB_TX_TIMING_3_TX_END_TO_DAC_OFF_GET(x) (((x) & 0x000000ff) >> 0)
+#define PHY_BB_TX_TIMING_3_TX_END_TO_DAC_OFF_SET(x) (((x) << 0) & 0x000000ff)
+#define PHY_BB_TX_TIMING_3_TX_FRAME_TO_THERM_CHAIN_ON_MSB 15
+#define PHY_BB_TX_TIMING_3_TX_FRAME_TO_THERM_CHAIN_ON_LSB 8
+#define PHY_BB_TX_TIMING_3_TX_FRAME_TO_THERM_CHAIN_ON_MASK 0x0000ff00
+#define PHY_BB_TX_TIMING_3_TX_FRAME_TO_THERM_CHAIN_ON_GET(x) (((x) & 0x0000ff00) >> 8)
+#define PHY_BB_TX_TIMING_3_TX_FRAME_TO_THERM_CHAIN_ON_SET(x) (((x) << 8) & 0x0000ff00)
+#define PHY_BB_TX_TIMING_3_TX_END_TO_A2_RX_ON_MSB 23
+#define PHY_BB_TX_TIMING_3_TX_END_TO_A2_RX_ON_LSB 16
+#define PHY_BB_TX_TIMING_3_TX_END_TO_A2_RX_ON_MASK 0x00ff0000
+#define PHY_BB_TX_TIMING_3_TX_END_TO_A2_RX_ON_GET(x) (((x) & 0x00ff0000) >> 16)
+#define PHY_BB_TX_TIMING_3_TX_END_TO_A2_RX_ON_SET(x) (((x) << 16) & 0x00ff0000)
+#define PHY_BB_TX_TIMING_3_TX_END_TO_ADC_ON_MSB 31
+#define PHY_BB_TX_TIMING_3_TX_END_TO_ADC_ON_LSB 24
+#define PHY_BB_TX_TIMING_3_TX_END_TO_ADC_ON_MASK 0xff000000
+#define PHY_BB_TX_TIMING_3_TX_END_TO_ADC_ON_GET(x) (((x) & 0xff000000) >> 24)
+#define PHY_BB_TX_TIMING_3_TX_END_TO_ADC_ON_SET(x) (((x) << 24) & 0xff000000)
+
+/* macros for BB_addac_parallel_control */
+#define PHY_BB_ADDAC_PARALLEL_CONTROL_ADDRESS 0x0000982c
+#define PHY_BB_ADDAC_PARALLEL_CONTROL_OFFSET 0x0000982c
+#define PHY_BB_ADDAC_PARALLEL_CONTROL_OFF_DACLPMODE_MSB 12
+#define PHY_BB_ADDAC_PARALLEL_CONTROL_OFF_DACLPMODE_LSB 12
+#define PHY_BB_ADDAC_PARALLEL_CONTROL_OFF_DACLPMODE_MASK 0x00001000
+#define PHY_BB_ADDAC_PARALLEL_CONTROL_OFF_DACLPMODE_GET(x) (((x) & 0x00001000) >> 12)
+#define PHY_BB_ADDAC_PARALLEL_CONTROL_OFF_DACLPMODE_SET(x) (((x) << 12) & 0x00001000)
+#define PHY_BB_ADDAC_PARALLEL_CONTROL_OFF_PWDDAC_MSB 13
+#define PHY_BB_ADDAC_PARALLEL_CONTROL_OFF_PWDDAC_LSB 13
+#define PHY_BB_ADDAC_PARALLEL_CONTROL_OFF_PWDDAC_MASK 0x00002000
+#define PHY_BB_ADDAC_PARALLEL_CONTROL_OFF_PWDDAC_GET(x) (((x) & 0x00002000) >> 13)
+#define PHY_BB_ADDAC_PARALLEL_CONTROL_OFF_PWDDAC_SET(x) (((x) << 13) & 0x00002000)
+#define PHY_BB_ADDAC_PARALLEL_CONTROL_OFF_PWDADC_MSB 15
+#define PHY_BB_ADDAC_PARALLEL_CONTROL_OFF_PWDADC_LSB 15
+#define PHY_BB_ADDAC_PARALLEL_CONTROL_OFF_PWDADC_MASK 0x00008000
+#define PHY_BB_ADDAC_PARALLEL_CONTROL_OFF_PWDADC_GET(x) (((x) & 0x00008000) >> 15)
+#define PHY_BB_ADDAC_PARALLEL_CONTROL_OFF_PWDADC_SET(x) (((x) << 15) & 0x00008000)
+#define PHY_BB_ADDAC_PARALLEL_CONTROL_ON_DACLPMODE_MSB 28
+#define PHY_BB_ADDAC_PARALLEL_CONTROL_ON_DACLPMODE_LSB 28
+#define PHY_BB_ADDAC_PARALLEL_CONTROL_ON_DACLPMODE_MASK 0x10000000
+#define PHY_BB_ADDAC_PARALLEL_CONTROL_ON_DACLPMODE_GET(x) (((x) & 0x10000000) >> 28)
+#define PHY_BB_ADDAC_PARALLEL_CONTROL_ON_DACLPMODE_SET(x) (((x) << 28) & 0x10000000)
+#define PHY_BB_ADDAC_PARALLEL_CONTROL_ON_PWDDAC_MSB 29
+#define PHY_BB_ADDAC_PARALLEL_CONTROL_ON_PWDDAC_LSB 29
+#define PHY_BB_ADDAC_PARALLEL_CONTROL_ON_PWDDAC_MASK 0x20000000
+#define PHY_BB_ADDAC_PARALLEL_CONTROL_ON_PWDDAC_GET(x) (((x) & 0x20000000) >> 29)
+#define PHY_BB_ADDAC_PARALLEL_CONTROL_ON_PWDDAC_SET(x) (((x) << 29) & 0x20000000)
+#define PHY_BB_ADDAC_PARALLEL_CONTROL_ON_PWDADC_MSB 31
+#define PHY_BB_ADDAC_PARALLEL_CONTROL_ON_PWDADC_LSB 31
+#define PHY_BB_ADDAC_PARALLEL_CONTROL_ON_PWDADC_MASK 0x80000000
+#define PHY_BB_ADDAC_PARALLEL_CONTROL_ON_PWDADC_GET(x) (((x) & 0x80000000) >> 31)
+#define PHY_BB_ADDAC_PARALLEL_CONTROL_ON_PWDADC_SET(x) (((x) << 31) & 0x80000000)
+
+/* macros for BB_xpa_timing_control */
+#define PHY_BB_XPA_TIMING_CONTROL_ADDRESS 0x00009834
+#define PHY_BB_XPA_TIMING_CONTROL_OFFSET 0x00009834
+#define PHY_BB_XPA_TIMING_CONTROL_TX_FRAME_TO_XPAA_ON_MSB 7
+#define PHY_BB_XPA_TIMING_CONTROL_TX_FRAME_TO_XPAA_ON_LSB 0
+#define PHY_BB_XPA_TIMING_CONTROL_TX_FRAME_TO_XPAA_ON_MASK 0x000000ff
+#define PHY_BB_XPA_TIMING_CONTROL_TX_FRAME_TO_XPAA_ON_GET(x) (((x) & 0x000000ff) >> 0)
+#define PHY_BB_XPA_TIMING_CONTROL_TX_FRAME_TO_XPAA_ON_SET(x) (((x) << 0) & 0x000000ff)
+#define PHY_BB_XPA_TIMING_CONTROL_TX_FRAME_TO_XPAB_ON_MSB 15
+#define PHY_BB_XPA_TIMING_CONTROL_TX_FRAME_TO_XPAB_ON_LSB 8
+#define PHY_BB_XPA_TIMING_CONTROL_TX_FRAME_TO_XPAB_ON_MASK 0x0000ff00
+#define PHY_BB_XPA_TIMING_CONTROL_TX_FRAME_TO_XPAB_ON_GET(x) (((x) & 0x0000ff00) >> 8)
+#define PHY_BB_XPA_TIMING_CONTROL_TX_FRAME_TO_XPAB_ON_SET(x) (((x) << 8) & 0x0000ff00)
+#define PHY_BB_XPA_TIMING_CONTROL_TX_END_TO_XPAA_OFF_MSB 23
+#define PHY_BB_XPA_TIMING_CONTROL_TX_END_TO_XPAA_OFF_LSB 16
+#define PHY_BB_XPA_TIMING_CONTROL_TX_END_TO_XPAA_OFF_MASK 0x00ff0000
+#define PHY_BB_XPA_TIMING_CONTROL_TX_END_TO_XPAA_OFF_GET(x) (((x) & 0x00ff0000) >> 16)
+#define PHY_BB_XPA_TIMING_CONTROL_TX_END_TO_XPAA_OFF_SET(x) (((x) << 16) & 0x00ff0000)
+#define PHY_BB_XPA_TIMING_CONTROL_TX_END_TO_XPAB_OFF_MSB 31
+#define PHY_BB_XPA_TIMING_CONTROL_TX_END_TO_XPAB_OFF_LSB 24
+#define PHY_BB_XPA_TIMING_CONTROL_TX_END_TO_XPAB_OFF_MASK 0xff000000
+#define PHY_BB_XPA_TIMING_CONTROL_TX_END_TO_XPAB_OFF_GET(x) (((x) & 0xff000000) >> 24)
+#define PHY_BB_XPA_TIMING_CONTROL_TX_END_TO_XPAB_OFF_SET(x) (((x) << 24) & 0xff000000)
+
+/* macros for BB_misc_pa_control */
+#define PHY_BB_MISC_PA_CONTROL_ADDRESS 0x00009838
+#define PHY_BB_MISC_PA_CONTROL_OFFSET 0x00009838
+#define PHY_BB_MISC_PA_CONTROL_XPAA_ACTIVE_HIGH_MSB 0
+#define PHY_BB_MISC_PA_CONTROL_XPAA_ACTIVE_HIGH_LSB 0
+#define PHY_BB_MISC_PA_CONTROL_XPAA_ACTIVE_HIGH_MASK 0x00000001
+#define PHY_BB_MISC_PA_CONTROL_XPAA_ACTIVE_HIGH_GET(x) (((x) & 0x00000001) >> 0)
+#define PHY_BB_MISC_PA_CONTROL_XPAA_ACTIVE_HIGH_SET(x) (((x) << 0) & 0x00000001)
+#define PHY_BB_MISC_PA_CONTROL_XPAB_ACTIVE_HIGH_MSB 1
+#define PHY_BB_MISC_PA_CONTROL_XPAB_ACTIVE_HIGH_LSB 1
+#define PHY_BB_MISC_PA_CONTROL_XPAB_ACTIVE_HIGH_MASK 0x00000002
+#define PHY_BB_MISC_PA_CONTROL_XPAB_ACTIVE_HIGH_GET(x) (((x) & 0x00000002) >> 1)
+#define PHY_BB_MISC_PA_CONTROL_XPAB_ACTIVE_HIGH_SET(x) (((x) << 1) & 0x00000002)
+#define PHY_BB_MISC_PA_CONTROL_ENABLE_XPAA_MSB 2
+#define PHY_BB_MISC_PA_CONTROL_ENABLE_XPAA_LSB 2
+#define PHY_BB_MISC_PA_CONTROL_ENABLE_XPAA_MASK 0x00000004
+#define PHY_BB_MISC_PA_CONTROL_ENABLE_XPAA_GET(x) (((x) & 0x00000004) >> 2)
+#define PHY_BB_MISC_PA_CONTROL_ENABLE_XPAA_SET(x) (((x) << 2) & 0x00000004)
+#define PHY_BB_MISC_PA_CONTROL_ENABLE_XPAB_MSB 3
+#define PHY_BB_MISC_PA_CONTROL_ENABLE_XPAB_LSB 3
+#define PHY_BB_MISC_PA_CONTROL_ENABLE_XPAB_MASK 0x00000008
+#define PHY_BB_MISC_PA_CONTROL_ENABLE_XPAB_GET(x) (((x) & 0x00000008) >> 3)
+#define PHY_BB_MISC_PA_CONTROL_ENABLE_XPAB_SET(x) (((x) << 3) & 0x00000008)
+
+/* macros for BB_tstdac_constant */
+#define PHY_BB_TSTDAC_CONSTANT_ADDRESS 0x0000983c
+#define PHY_BB_TSTDAC_CONSTANT_OFFSET 0x0000983c
+#define PHY_BB_TSTDAC_CONSTANT_CF_TSTDAC_CONSTANT_I_MSB 10
+#define PHY_BB_TSTDAC_CONSTANT_CF_TSTDAC_CONSTANT_I_LSB 0
+#define PHY_BB_TSTDAC_CONSTANT_CF_TSTDAC_CONSTANT_I_MASK 0x000007ff
+#define PHY_BB_TSTDAC_CONSTANT_CF_TSTDAC_CONSTANT_I_GET(x) (((x) & 0x000007ff) >> 0)
+#define PHY_BB_TSTDAC_CONSTANT_CF_TSTDAC_CONSTANT_I_SET(x) (((x) << 0) & 0x000007ff)
+#define PHY_BB_TSTDAC_CONSTANT_CF_TSTDAC_CONSTANT_Q_MSB 21
+#define PHY_BB_TSTDAC_CONSTANT_CF_TSTDAC_CONSTANT_Q_LSB 11
+#define PHY_BB_TSTDAC_CONSTANT_CF_TSTDAC_CONSTANT_Q_MASK 0x003ff800
+#define PHY_BB_TSTDAC_CONSTANT_CF_TSTDAC_CONSTANT_Q_GET(x) (((x) & 0x003ff800) >> 11)
+#define PHY_BB_TSTDAC_CONSTANT_CF_TSTDAC_CONSTANT_Q_SET(x) (((x) << 11) & 0x003ff800)
+
+/* macros for BB_find_signal_low */
+#define PHY_BB_FIND_SIGNAL_LOW_ADDRESS 0x00009840
+#define PHY_BB_FIND_SIGNAL_LOW_OFFSET 0x00009840
+#define PHY_BB_FIND_SIGNAL_LOW_RELSTEP_LOW_MSB 5
+#define PHY_BB_FIND_SIGNAL_LOW_RELSTEP_LOW_LSB 0
+#define PHY_BB_FIND_SIGNAL_LOW_RELSTEP_LOW_MASK 0x0000003f
+#define PHY_BB_FIND_SIGNAL_LOW_RELSTEP_LOW_GET(x) (((x) & 0x0000003f) >> 0)
+#define PHY_BB_FIND_SIGNAL_LOW_RELSTEP_LOW_SET(x) (((x) << 0) & 0x0000003f)
+#define PHY_BB_FIND_SIGNAL_LOW_FIRSTEP_LOW_MSB 11
+#define PHY_BB_FIND_SIGNAL_LOW_FIRSTEP_LOW_LSB 6
+#define PHY_BB_FIND_SIGNAL_LOW_FIRSTEP_LOW_MASK 0x00000fc0
+#define PHY_BB_FIND_SIGNAL_LOW_FIRSTEP_LOW_GET(x) (((x) & 0x00000fc0) >> 6)
+#define PHY_BB_FIND_SIGNAL_LOW_FIRSTEP_LOW_SET(x) (((x) << 6) & 0x00000fc0)
+#define PHY_BB_FIND_SIGNAL_LOW_FIRPWR_LOW_MSB 19
+#define PHY_BB_FIND_SIGNAL_LOW_FIRPWR_LOW_LSB 12
+#define PHY_BB_FIND_SIGNAL_LOW_FIRPWR_LOW_MASK 0x000ff000
+#define PHY_BB_FIND_SIGNAL_LOW_FIRPWR_LOW_GET(x) (((x) & 0x000ff000) >> 12)
+#define PHY_BB_FIND_SIGNAL_LOW_FIRPWR_LOW_SET(x) (((x) << 12) & 0x000ff000)
+#define PHY_BB_FIND_SIGNAL_LOW_YCOK_MAX_LOW_MSB 23
+#define PHY_BB_FIND_SIGNAL_LOW_YCOK_MAX_LOW_LSB 20
+#define PHY_BB_FIND_SIGNAL_LOW_YCOK_MAX_LOW_MASK 0x00f00000
+#define PHY_BB_FIND_SIGNAL_LOW_YCOK_MAX_LOW_GET(x) (((x) & 0x00f00000) >> 20)
+#define PHY_BB_FIND_SIGNAL_LOW_YCOK_MAX_LOW_SET(x) (((x) << 20) & 0x00f00000)
+#define PHY_BB_FIND_SIGNAL_LOW_LONG_SC_THRESH_MSB 30
+#define PHY_BB_FIND_SIGNAL_LOW_LONG_SC_THRESH_LSB 24
+#define PHY_BB_FIND_SIGNAL_LOW_LONG_SC_THRESH_MASK 0x7f000000
+#define PHY_BB_FIND_SIGNAL_LOW_LONG_SC_THRESH_GET(x) (((x) & 0x7f000000) >> 24)
+#define PHY_BB_FIND_SIGNAL_LOW_LONG_SC_THRESH_SET(x) (((x) << 24) & 0x7f000000)
+
+/* macros for BB_settling_time */
+#define PHY_BB_SETTLING_TIME_ADDRESS 0x00009844
+#define PHY_BB_SETTLING_TIME_OFFSET 0x00009844
+#define PHY_BB_SETTLING_TIME_AGC_SETTLING_MSB 6
+#define PHY_BB_SETTLING_TIME_AGC_SETTLING_LSB 0
+#define PHY_BB_SETTLING_TIME_AGC_SETTLING_MASK 0x0000007f
+#define PHY_BB_SETTLING_TIME_AGC_SETTLING_GET(x) (((x) & 0x0000007f) >> 0)
+#define PHY_BB_SETTLING_TIME_AGC_SETTLING_SET(x) (((x) << 0) & 0x0000007f)
+#define PHY_BB_SETTLING_TIME_SWITCH_SETTLING_MSB 13
+#define PHY_BB_SETTLING_TIME_SWITCH_SETTLING_LSB 7
+#define PHY_BB_SETTLING_TIME_SWITCH_SETTLING_MASK 0x00003f80
+#define PHY_BB_SETTLING_TIME_SWITCH_SETTLING_GET(x) (((x) & 0x00003f80) >> 7)
+#define PHY_BB_SETTLING_TIME_SWITCH_SETTLING_SET(x) (((x) << 7) & 0x00003f80)
+#define PHY_BB_SETTLING_TIME_ADCSAT_THRL_MSB 19
+#define PHY_BB_SETTLING_TIME_ADCSAT_THRL_LSB 14
+#define PHY_BB_SETTLING_TIME_ADCSAT_THRL_MASK 0x000fc000
+#define PHY_BB_SETTLING_TIME_ADCSAT_THRL_GET(x) (((x) & 0x000fc000) >> 14)
+#define PHY_BB_SETTLING_TIME_ADCSAT_THRL_SET(x) (((x) << 14) & 0x000fc000)
+#define PHY_BB_SETTLING_TIME_ADCSAT_THRH_MSB 25
+#define PHY_BB_SETTLING_TIME_ADCSAT_THRH_LSB 20
+#define PHY_BB_SETTLING_TIME_ADCSAT_THRH_MASK 0x03f00000
+#define PHY_BB_SETTLING_TIME_ADCSAT_THRH_GET(x) (((x) & 0x03f00000) >> 20)
+#define PHY_BB_SETTLING_TIME_ADCSAT_THRH_SET(x) (((x) << 20) & 0x03f00000)
+#define PHY_BB_SETTLING_TIME_LBRESET_ADVANCE_MSB 29
+#define PHY_BB_SETTLING_TIME_LBRESET_ADVANCE_LSB 26
+#define PHY_BB_SETTLING_TIME_LBRESET_ADVANCE_MASK 0x3c000000
+#define PHY_BB_SETTLING_TIME_LBRESET_ADVANCE_GET(x) (((x) & 0x3c000000) >> 26)
+#define PHY_BB_SETTLING_TIME_LBRESET_ADVANCE_SET(x) (((x) << 26) & 0x3c000000)
+
+/* macros for BB_gain_force_max_gains_b0 */
+#define PHY_BB_GAIN_FORCE_MAX_GAINS_B0_ADDRESS 0x00009848
+#define PHY_BB_GAIN_FORCE_MAX_GAINS_B0_OFFSET 0x00009848
+#define PHY_BB_GAIN_FORCE_MAX_GAINS_B0_XATTEN1_HYST_MARGIN_0_MSB 13
+#define PHY_BB_GAIN_FORCE_MAX_GAINS_B0_XATTEN1_HYST_MARGIN_0_LSB 7
+#define PHY_BB_GAIN_FORCE_MAX_GAINS_B0_XATTEN1_HYST_MARGIN_0_MASK 0x00003f80
+#define PHY_BB_GAIN_FORCE_MAX_GAINS_B0_XATTEN1_HYST_MARGIN_0_GET(x) (((x) & 0x00003f80) >> 7)
+#define PHY_BB_GAIN_FORCE_MAX_GAINS_B0_XATTEN1_HYST_MARGIN_0_SET(x) (((x) << 7) & 0x00003f80)
+#define PHY_BB_GAIN_FORCE_MAX_GAINS_B0_XATTEN2_HYST_MARGIN_0_MSB 20
+#define PHY_BB_GAIN_FORCE_MAX_GAINS_B0_XATTEN2_HYST_MARGIN_0_LSB 14
+#define PHY_BB_GAIN_FORCE_MAX_GAINS_B0_XATTEN2_HYST_MARGIN_0_MASK 0x001fc000
+#define PHY_BB_GAIN_FORCE_MAX_GAINS_B0_XATTEN2_HYST_MARGIN_0_GET(x) (((x) & 0x001fc000) >> 14)
+#define PHY_BB_GAIN_FORCE_MAX_GAINS_B0_XATTEN2_HYST_MARGIN_0_SET(x) (((x) << 14) & 0x001fc000)
+#define PHY_BB_GAIN_FORCE_MAX_GAINS_B0_GAIN_FORCE_MSB 21
+#define PHY_BB_GAIN_FORCE_MAX_GAINS_B0_GAIN_FORCE_LSB 21
+#define PHY_BB_GAIN_FORCE_MAX_GAINS_B0_GAIN_FORCE_MASK 0x00200000
+#define PHY_BB_GAIN_FORCE_MAX_GAINS_B0_GAIN_FORCE_GET(x) (((x) & 0x00200000) >> 21)
+#define PHY_BB_GAIN_FORCE_MAX_GAINS_B0_GAIN_FORCE_SET(x) (((x) << 21) & 0x00200000)
+#define PHY_BB_GAIN_FORCE_MAX_GAINS_B0_ENABLE_SHARED_RX_MSB 31
+#define PHY_BB_GAIN_FORCE_MAX_GAINS_B0_ENABLE_SHARED_RX_LSB 31
+#define PHY_BB_GAIN_FORCE_MAX_GAINS_B0_ENABLE_SHARED_RX_MASK 0x80000000
+#define PHY_BB_GAIN_FORCE_MAX_GAINS_B0_ENABLE_SHARED_RX_GET(x) (((x) & 0x80000000) >> 31)
+#define PHY_BB_GAIN_FORCE_MAX_GAINS_B0_ENABLE_SHARED_RX_SET(x) (((x) << 31) & 0x80000000)
+
+/* macros for BB_gains_min_offsets_b0 */
+#define PHY_BB_GAINS_MIN_OFFSETS_B0_ADDRESS 0x0000984c
+#define PHY_BB_GAINS_MIN_OFFSETS_B0_OFFSET 0x0000984c
+#define PHY_BB_GAINS_MIN_OFFSETS_B0_OFFSETC1_MSB 6
+#define PHY_BB_GAINS_MIN_OFFSETS_B0_OFFSETC1_LSB 0
+#define PHY_BB_GAINS_MIN_OFFSETS_B0_OFFSETC1_MASK 0x0000007f
+#define PHY_BB_GAINS_MIN_OFFSETS_B0_OFFSETC1_GET(x) (((x) & 0x0000007f) >> 0)
+#define PHY_BB_GAINS_MIN_OFFSETS_B0_OFFSETC1_SET(x) (((x) << 0) & 0x0000007f)
+#define PHY_BB_GAINS_MIN_OFFSETS_B0_OFFSETC2_MSB 11
+#define PHY_BB_GAINS_MIN_OFFSETS_B0_OFFSETC2_LSB 7
+#define PHY_BB_GAINS_MIN_OFFSETS_B0_OFFSETC2_MASK 0x00000f80
+#define PHY_BB_GAINS_MIN_OFFSETS_B0_OFFSETC2_GET(x) (((x) & 0x00000f80) >> 7)
+#define PHY_BB_GAINS_MIN_OFFSETS_B0_OFFSETC2_SET(x) (((x) << 7) & 0x00000f80)
+#define PHY_BB_GAINS_MIN_OFFSETS_B0_OFFSETC3_MSB 16
+#define PHY_BB_GAINS_MIN_OFFSETS_B0_OFFSETC3_LSB 12
+#define PHY_BB_GAINS_MIN_OFFSETS_B0_OFFSETC3_MASK 0x0001f000
+#define PHY_BB_GAINS_MIN_OFFSETS_B0_OFFSETC3_GET(x) (((x) & 0x0001f000) >> 12)
+#define PHY_BB_GAINS_MIN_OFFSETS_B0_OFFSETC3_SET(x) (((x) << 12) & 0x0001f000)
+#define PHY_BB_GAINS_MIN_OFFSETS_B0_RF_GAIN_F_0_MSB 24
+#define PHY_BB_GAINS_MIN_OFFSETS_B0_RF_GAIN_F_0_LSB 17
+#define PHY_BB_GAINS_MIN_OFFSETS_B0_RF_GAIN_F_0_MASK 0x01fe0000
+#define PHY_BB_GAINS_MIN_OFFSETS_B0_RF_GAIN_F_0_GET(x) (((x) & 0x01fe0000) >> 17)
+#define PHY_BB_GAINS_MIN_OFFSETS_B0_RF_GAIN_F_0_SET(x) (((x) << 17) & 0x01fe0000)
+#define PHY_BB_GAINS_MIN_OFFSETS_B0_XATTEN1_SW_F_0_MSB 25
+#define PHY_BB_GAINS_MIN_OFFSETS_B0_XATTEN1_SW_F_0_LSB 25
+#define PHY_BB_GAINS_MIN_OFFSETS_B0_XATTEN1_SW_F_0_MASK 0x02000000
+#define PHY_BB_GAINS_MIN_OFFSETS_B0_XATTEN1_SW_F_0_GET(x) (((x) & 0x02000000) >> 25)
+#define PHY_BB_GAINS_MIN_OFFSETS_B0_XATTEN1_SW_F_0_SET(x) (((x) << 25) & 0x02000000)
+#define PHY_BB_GAINS_MIN_OFFSETS_B0_XATTEN2_SW_F_0_MSB 26
+#define PHY_BB_GAINS_MIN_OFFSETS_B0_XATTEN2_SW_F_0_LSB 26
+#define PHY_BB_GAINS_MIN_OFFSETS_B0_XATTEN2_SW_F_0_MASK 0x04000000
+#define PHY_BB_GAINS_MIN_OFFSETS_B0_XATTEN2_SW_F_0_GET(x) (((x) & 0x04000000) >> 26)
+#define PHY_BB_GAINS_MIN_OFFSETS_B0_XATTEN2_SW_F_0_SET(x) (((x) << 26) & 0x04000000)
+
+/* macros for BB_desired_sigsize */
+#define PHY_BB_DESIRED_SIGSIZE_ADDRESS 0x00009850
+#define PHY_BB_DESIRED_SIGSIZE_OFFSET 0x00009850
+#define PHY_BB_DESIRED_SIGSIZE_ADC_DESIRED_SIZE_MSB 7
+#define PHY_BB_DESIRED_SIGSIZE_ADC_DESIRED_SIZE_LSB 0
+#define PHY_BB_DESIRED_SIGSIZE_ADC_DESIRED_SIZE_MASK 0x000000ff
+#define PHY_BB_DESIRED_SIGSIZE_ADC_DESIRED_SIZE_GET(x) (((x) & 0x000000ff) >> 0)
+#define PHY_BB_DESIRED_SIGSIZE_ADC_DESIRED_SIZE_SET(x) (((x) << 0) & 0x000000ff)
+#define PHY_BB_DESIRED_SIGSIZE_TOTAL_DESIRED_MSB 27
+#define PHY_BB_DESIRED_SIGSIZE_TOTAL_DESIRED_LSB 20
+#define PHY_BB_DESIRED_SIGSIZE_TOTAL_DESIRED_MASK 0x0ff00000
+#define PHY_BB_DESIRED_SIGSIZE_TOTAL_DESIRED_GET(x) (((x) & 0x0ff00000) >> 20)
+#define PHY_BB_DESIRED_SIGSIZE_TOTAL_DESIRED_SET(x) (((x) << 20) & 0x0ff00000)
+#define PHY_BB_DESIRED_SIGSIZE_INIT_GC_COUNT_MAX_MSB 29
+#define PHY_BB_DESIRED_SIGSIZE_INIT_GC_COUNT_MAX_LSB 28
+#define PHY_BB_DESIRED_SIGSIZE_INIT_GC_COUNT_MAX_MASK 0x30000000
+#define PHY_BB_DESIRED_SIGSIZE_INIT_GC_COUNT_MAX_GET(x) (((x) & 0x30000000) >> 28)
+#define PHY_BB_DESIRED_SIGSIZE_INIT_GC_COUNT_MAX_SET(x) (((x) << 28) & 0x30000000)
+#define PHY_BB_DESIRED_SIGSIZE_REDUCE_INIT_GC_COUNT_MSB 30
+#define PHY_BB_DESIRED_SIGSIZE_REDUCE_INIT_GC_COUNT_LSB 30
+#define PHY_BB_DESIRED_SIGSIZE_REDUCE_INIT_GC_COUNT_MASK 0x40000000
+#define PHY_BB_DESIRED_SIGSIZE_REDUCE_INIT_GC_COUNT_GET(x) (((x) & 0x40000000) >> 30)
+#define PHY_BB_DESIRED_SIGSIZE_REDUCE_INIT_GC_COUNT_SET(x) (((x) << 30) & 0x40000000)
+#define PHY_BB_DESIRED_SIGSIZE_ENA_INIT_GAIN_MSB 31
+#define PHY_BB_DESIRED_SIGSIZE_ENA_INIT_GAIN_LSB 31
+#define PHY_BB_DESIRED_SIGSIZE_ENA_INIT_GAIN_MASK 0x80000000
+#define PHY_BB_DESIRED_SIGSIZE_ENA_INIT_GAIN_GET(x) (((x) & 0x80000000) >> 31)
+#define PHY_BB_DESIRED_SIGSIZE_ENA_INIT_GAIN_SET(x) (((x) << 31) & 0x80000000)
+
+/* macros for BB_timing_control_3a */
+#define PHY_BB_TIMING_CONTROL_3A_ADDRESS 0x00009854
+#define PHY_BB_TIMING_CONTROL_3A_OFFSET 0x00009854
+#define PHY_BB_TIMING_CONTROL_3A_STE_THR_HI_RSSI_MSB 6
+#define PHY_BB_TIMING_CONTROL_3A_STE_THR_HI_RSSI_LSB 0
+#define PHY_BB_TIMING_CONTROL_3A_STE_THR_HI_RSSI_MASK 0x0000007f
+#define PHY_BB_TIMING_CONTROL_3A_STE_THR_HI_RSSI_GET(x) (((x) & 0x0000007f) >> 0)
+#define PHY_BB_TIMING_CONTROL_3A_STE_THR_HI_RSSI_SET(x) (((x) << 0) & 0x0000007f)
+
+/* macros for BB_find_signal */
+#define PHY_BB_FIND_SIGNAL_ADDRESS 0x00009858
+#define PHY_BB_FIND_SIGNAL_OFFSET 0x00009858
+#define PHY_BB_FIND_SIGNAL_RELSTEP_MSB 5
+#define PHY_BB_FIND_SIGNAL_RELSTEP_LSB 0
+#define PHY_BB_FIND_SIGNAL_RELSTEP_MASK 0x0000003f
+#define PHY_BB_FIND_SIGNAL_RELSTEP_GET(x) (((x) & 0x0000003f) >> 0)
+#define PHY_BB_FIND_SIGNAL_RELSTEP_SET(x) (((x) << 0) & 0x0000003f)
+#define PHY_BB_FIND_SIGNAL_RELPWR_MSB 11
+#define PHY_BB_FIND_SIGNAL_RELPWR_LSB 6
+#define PHY_BB_FIND_SIGNAL_RELPWR_MASK 0x00000fc0
+#define PHY_BB_FIND_SIGNAL_RELPWR_GET(x) (((x) & 0x00000fc0) >> 6)
+#define PHY_BB_FIND_SIGNAL_RELPWR_SET(x) (((x) << 6) & 0x00000fc0)
+#define PHY_BB_FIND_SIGNAL_FIRSTEP_MSB 17
+#define PHY_BB_FIND_SIGNAL_FIRSTEP_LSB 12
+#define PHY_BB_FIND_SIGNAL_FIRSTEP_MASK 0x0003f000
+#define PHY_BB_FIND_SIGNAL_FIRSTEP_GET(x) (((x) & 0x0003f000) >> 12)
+#define PHY_BB_FIND_SIGNAL_FIRSTEP_SET(x) (((x) << 12) & 0x0003f000)
+#define PHY_BB_FIND_SIGNAL_FIRPWR_MSB 25
+#define PHY_BB_FIND_SIGNAL_FIRPWR_LSB 18
+#define PHY_BB_FIND_SIGNAL_FIRPWR_MASK 0x03fc0000
+#define PHY_BB_FIND_SIGNAL_FIRPWR_GET(x) (((x) & 0x03fc0000) >> 18)
+#define PHY_BB_FIND_SIGNAL_FIRPWR_SET(x) (((x) << 18) & 0x03fc0000)
+#define PHY_BB_FIND_SIGNAL_M1COUNT_MAX_MSB 31
+#define PHY_BB_FIND_SIGNAL_M1COUNT_MAX_LSB 26
+#define PHY_BB_FIND_SIGNAL_M1COUNT_MAX_MASK 0xfc000000
+#define PHY_BB_FIND_SIGNAL_M1COUNT_MAX_GET(x) (((x) & 0xfc000000) >> 26)
+#define PHY_BB_FIND_SIGNAL_M1COUNT_MAX_SET(x) (((x) << 26) & 0xfc000000)
+
+/* macros for BB_agc */
+#define PHY_BB_AGC_ADDRESS 0x0000985c
+#define PHY_BB_AGC_OFFSET 0x0000985c
+#define PHY_BB_AGC_COARSEPWR_CONST_MSB 6
+#define PHY_BB_AGC_COARSEPWR_CONST_LSB 0
+#define PHY_BB_AGC_COARSEPWR_CONST_MASK 0x0000007f
+#define PHY_BB_AGC_COARSEPWR_CONST_GET(x) (((x) & 0x0000007f) >> 0)
+#define PHY_BB_AGC_COARSEPWR_CONST_SET(x) (((x) << 0) & 0x0000007f)
+#define PHY_BB_AGC_COARSE_LOW_MSB 14
+#define PHY_BB_AGC_COARSE_LOW_LSB 7
+#define PHY_BB_AGC_COARSE_LOW_MASK 0x00007f80
+#define PHY_BB_AGC_COARSE_LOW_GET(x) (((x) & 0x00007f80) >> 7)
+#define PHY_BB_AGC_COARSE_LOW_SET(x) (((x) << 7) & 0x00007f80)
+#define PHY_BB_AGC_COARSE_HIGH_MSB 21
+#define PHY_BB_AGC_COARSE_HIGH_LSB 15
+#define PHY_BB_AGC_COARSE_HIGH_MASK 0x003f8000
+#define PHY_BB_AGC_COARSE_HIGH_GET(x) (((x) & 0x003f8000) >> 15)
+#define PHY_BB_AGC_COARSE_HIGH_SET(x) (((x) << 15) & 0x003f8000)
+#define PHY_BB_AGC_QUICK_DROP_MSB 29
+#define PHY_BB_AGC_QUICK_DROP_LSB 22
+#define PHY_BB_AGC_QUICK_DROP_MASK 0x3fc00000
+#define PHY_BB_AGC_QUICK_DROP_GET(x) (((x) & 0x3fc00000) >> 22)
+#define PHY_BB_AGC_QUICK_DROP_SET(x) (((x) << 22) & 0x3fc00000)
+#define PHY_BB_AGC_RSSI_OUT_SELECT_MSB 31
+#define PHY_BB_AGC_RSSI_OUT_SELECT_LSB 30
+#define PHY_BB_AGC_RSSI_OUT_SELECT_MASK 0xc0000000
+#define PHY_BB_AGC_RSSI_OUT_SELECT_GET(x) (((x) & 0xc0000000) >> 30)
+#define PHY_BB_AGC_RSSI_OUT_SELECT_SET(x) (((x) << 30) & 0xc0000000)
+
+/* macros for BB_agc_control */
+#define PHY_BB_AGC_CONTROL_ADDRESS 0x00009860
+#define PHY_BB_AGC_CONTROL_OFFSET 0x00009860
+#define PHY_BB_AGC_CONTROL_DO_CALIBRATE_MSB 0
+#define PHY_BB_AGC_CONTROL_DO_CALIBRATE_LSB 0
+#define PHY_BB_AGC_CONTROL_DO_CALIBRATE_MASK 0x00000001
+#define PHY_BB_AGC_CONTROL_DO_CALIBRATE_GET(x) (((x) & 0x00000001) >> 0)
+#define PHY_BB_AGC_CONTROL_DO_CALIBRATE_SET(x) (((x) << 0) & 0x00000001)
+#define PHY_BB_AGC_CONTROL_DO_NOISEFLOOR_MSB 1
+#define PHY_BB_AGC_CONTROL_DO_NOISEFLOOR_LSB 1
+#define PHY_BB_AGC_CONTROL_DO_NOISEFLOOR_MASK 0x00000002
+#define PHY_BB_AGC_CONTROL_DO_NOISEFLOOR_GET(x) (((x) & 0x00000002) >> 1)
+#define PHY_BB_AGC_CONTROL_DO_NOISEFLOOR_SET(x) (((x) << 1) & 0x00000002)
+#define PHY_BB_AGC_CONTROL_MIN_NUM_GAIN_CHANGE_MSB 5
+#define PHY_BB_AGC_CONTROL_MIN_NUM_GAIN_CHANGE_LSB 3
+#define PHY_BB_AGC_CONTROL_MIN_NUM_GAIN_CHANGE_MASK 0x00000038
+#define PHY_BB_AGC_CONTROL_MIN_NUM_GAIN_CHANGE_GET(x) (((x) & 0x00000038) >> 3)
+#define PHY_BB_AGC_CONTROL_MIN_NUM_GAIN_CHANGE_SET(x) (((x) << 3) & 0x00000038)
+#define PHY_BB_AGC_CONTROL_YCOK_MAX_MSB 9
+#define PHY_BB_AGC_CONTROL_YCOK_MAX_LSB 6
+#define PHY_BB_AGC_CONTROL_YCOK_MAX_MASK 0x000003c0
+#define PHY_BB_AGC_CONTROL_YCOK_MAX_GET(x) (((x) & 0x000003c0) >> 6)
+#define PHY_BB_AGC_CONTROL_YCOK_MAX_SET(x) (((x) << 6) & 0x000003c0)
+#define PHY_BB_AGC_CONTROL_LEAKY_BUCKET_ENABLE_MSB 10
+#define PHY_BB_AGC_CONTROL_LEAKY_BUCKET_ENABLE_LSB 10
+#define PHY_BB_AGC_CONTROL_LEAKY_BUCKET_ENABLE_MASK 0x00000400
+#define PHY_BB_AGC_CONTROL_LEAKY_BUCKET_ENABLE_GET(x) (((x) & 0x00000400) >> 10)
+#define PHY_BB_AGC_CONTROL_LEAKY_BUCKET_ENABLE_SET(x) (((x) << 10) & 0x00000400)
+#define PHY_BB_AGC_CONTROL_CAL_ENABLE_MSB 11
+#define PHY_BB_AGC_CONTROL_CAL_ENABLE_LSB 11
+#define PHY_BB_AGC_CONTROL_CAL_ENABLE_MASK 0x00000800
+#define PHY_BB_AGC_CONTROL_CAL_ENABLE_GET(x) (((x) & 0x00000800) >> 11)
+#define PHY_BB_AGC_CONTROL_CAL_ENABLE_SET(x) (((x) << 11) & 0x00000800)
+#define PHY_BB_AGC_CONTROL_USE_TABLE_SEED_MSB 12
+#define PHY_BB_AGC_CONTROL_USE_TABLE_SEED_LSB 12
+#define PHY_BB_AGC_CONTROL_USE_TABLE_SEED_MASK 0x00001000
+#define PHY_BB_AGC_CONTROL_USE_TABLE_SEED_GET(x) (((x) & 0x00001000) >> 12)
+#define PHY_BB_AGC_CONTROL_USE_TABLE_SEED_SET(x) (((x) << 12) & 0x00001000)
+#define PHY_BB_AGC_CONTROL_AGC_UPDATE_TABLE_SEED_MSB 13
+#define PHY_BB_AGC_CONTROL_AGC_UPDATE_TABLE_SEED_LSB 13
+#define PHY_BB_AGC_CONTROL_AGC_UPDATE_TABLE_SEED_MASK 0x00002000
+#define PHY_BB_AGC_CONTROL_AGC_UPDATE_TABLE_SEED_GET(x) (((x) & 0x00002000) >> 13)
+#define PHY_BB_AGC_CONTROL_AGC_UPDATE_TABLE_SEED_SET(x) (((x) << 13) & 0x00002000)
+#define PHY_BB_AGC_CONTROL_ENABLE_NOISEFLOOR_MSB 15
+#define PHY_BB_AGC_CONTROL_ENABLE_NOISEFLOOR_LSB 15
+#define PHY_BB_AGC_CONTROL_ENABLE_NOISEFLOOR_MASK 0x00008000
+#define PHY_BB_AGC_CONTROL_ENABLE_NOISEFLOOR_GET(x) (((x) & 0x00008000) >> 15)
+#define PHY_BB_AGC_CONTROL_ENABLE_NOISEFLOOR_SET(x) (((x) << 15) & 0x00008000)
+#define PHY_BB_AGC_CONTROL_ENABLE_FLTR_CAL_MSB 16
+#define PHY_BB_AGC_CONTROL_ENABLE_FLTR_CAL_LSB 16
+#define PHY_BB_AGC_CONTROL_ENABLE_FLTR_CAL_MASK 0x00010000
+#define PHY_BB_AGC_CONTROL_ENABLE_FLTR_CAL_GET(x) (((x) & 0x00010000) >> 16)
+#define PHY_BB_AGC_CONTROL_ENABLE_FLTR_CAL_SET(x) (((x) << 16) & 0x00010000)
+#define PHY_BB_AGC_CONTROL_NO_UPDATE_NOISEFLOOR_MSB 17
+#define PHY_BB_AGC_CONTROL_NO_UPDATE_NOISEFLOOR_LSB 17
+#define PHY_BB_AGC_CONTROL_NO_UPDATE_NOISEFLOOR_MASK 0x00020000
+#define PHY_BB_AGC_CONTROL_NO_UPDATE_NOISEFLOOR_GET(x) (((x) & 0x00020000) >> 17)
+#define PHY_BB_AGC_CONTROL_NO_UPDATE_NOISEFLOOR_SET(x) (((x) << 17) & 0x00020000)
+#define PHY_BB_AGC_CONTROL_EXTEND_NF_PWR_MEAS_MSB 18
+#define PHY_BB_AGC_CONTROL_EXTEND_NF_PWR_MEAS_LSB 18
+#define PHY_BB_AGC_CONTROL_EXTEND_NF_PWR_MEAS_MASK 0x00040000
+#define PHY_BB_AGC_CONTROL_EXTEND_NF_PWR_MEAS_GET(x) (((x) & 0x00040000) >> 18)
+#define PHY_BB_AGC_CONTROL_EXTEND_NF_PWR_MEAS_SET(x) (((x) << 18) & 0x00040000)
+#define PHY_BB_AGC_CONTROL_CLC_SUCCESS_MSB 19
+#define PHY_BB_AGC_CONTROL_CLC_SUCCESS_LSB 19
+#define PHY_BB_AGC_CONTROL_CLC_SUCCESS_MASK 0x00080000
+#define PHY_BB_AGC_CONTROL_CLC_SUCCESS_GET(x) (((x) & 0x00080000) >> 19)
+#define PHY_BB_AGC_CONTROL_ENABLE_PKDET_CAL_MSB 20
+#define PHY_BB_AGC_CONTROL_ENABLE_PKDET_CAL_LSB 20
+#define PHY_BB_AGC_CONTROL_ENABLE_PKDET_CAL_MASK 0x00100000
+#define PHY_BB_AGC_CONTROL_ENABLE_PKDET_CAL_GET(x) (((x) & 0x00100000) >> 20)
+#define PHY_BB_AGC_CONTROL_ENABLE_PKDET_CAL_SET(x) (((x) << 20) & 0x00100000)
+
+/* macros for BB_cca_b0 */
+#define PHY_BB_CCA_B0_ADDRESS 0x00009864
+#define PHY_BB_CCA_B0_OFFSET 0x00009864
+#define PHY_BB_CCA_B0_CF_MAXCCAPWR_0_MSB 8
+#define PHY_BB_CCA_B0_CF_MAXCCAPWR_0_LSB 0
+#define PHY_BB_CCA_B0_CF_MAXCCAPWR_0_MASK 0x000001ff
+#define PHY_BB_CCA_B0_CF_MAXCCAPWR_0_GET(x) (((x) & 0x000001ff) >> 0)
+#define PHY_BB_CCA_B0_CF_MAXCCAPWR_0_SET(x) (((x) << 0) & 0x000001ff)
+#define PHY_BB_CCA_B0_CF_CCA_COUNT_MAXC_MSB 11
+#define PHY_BB_CCA_B0_CF_CCA_COUNT_MAXC_LSB 9
+#define PHY_BB_CCA_B0_CF_CCA_COUNT_MAXC_MASK 0x00000e00
+#define PHY_BB_CCA_B0_CF_CCA_COUNT_MAXC_GET(x) (((x) & 0x00000e00) >> 9)
+#define PHY_BB_CCA_B0_CF_CCA_COUNT_MAXC_SET(x) (((x) << 9) & 0x00000e00)
+#define PHY_BB_CCA_B0_CF_THRESH62_MSB 19
+#define PHY_BB_CCA_B0_CF_THRESH62_LSB 12
+#define PHY_BB_CCA_B0_CF_THRESH62_MASK 0x000ff000
+#define PHY_BB_CCA_B0_CF_THRESH62_GET(x) (((x) & 0x000ff000) >> 12)
+#define PHY_BB_CCA_B0_CF_THRESH62_SET(x) (((x) << 12) & 0x000ff000)
+#define PHY_BB_CCA_B0_MINCCAPWR_0_MSB 28
+#define PHY_BB_CCA_B0_MINCCAPWR_0_LSB 20
+#define PHY_BB_CCA_B0_MINCCAPWR_0_MASK 0x1ff00000
+#define PHY_BB_CCA_B0_MINCCAPWR_0_GET(x) (((x) & 0x1ff00000) >> 20)
+
+/* macros for BB_sfcorr */
+#define PHY_BB_SFCORR_ADDRESS 0x00009868
+#define PHY_BB_SFCORR_OFFSET 0x00009868
+#define PHY_BB_SFCORR_M2COUNT_THR_MSB 4
+#define PHY_BB_SFCORR_M2COUNT_THR_LSB 0
+#define PHY_BB_SFCORR_M2COUNT_THR_MASK 0x0000001f
+#define PHY_BB_SFCORR_M2COUNT_THR_GET(x) (((x) & 0x0000001f) >> 0)
+#define PHY_BB_SFCORR_M2COUNT_THR_SET(x) (((x) << 0) & 0x0000001f)
+#define PHY_BB_SFCORR_ADCSAT_THRESH_MSB 10
+#define PHY_BB_SFCORR_ADCSAT_THRESH_LSB 5
+#define PHY_BB_SFCORR_ADCSAT_THRESH_MASK 0x000007e0
+#define PHY_BB_SFCORR_ADCSAT_THRESH_GET(x) (((x) & 0x000007e0) >> 5)
+#define PHY_BB_SFCORR_ADCSAT_THRESH_SET(x) (((x) << 5) & 0x000007e0)
+#define PHY_BB_SFCORR_ADCSAT_ICOUNT_MSB 16
+#define PHY_BB_SFCORR_ADCSAT_ICOUNT_LSB 11
+#define PHY_BB_SFCORR_ADCSAT_ICOUNT_MASK 0x0001f800
+#define PHY_BB_SFCORR_ADCSAT_ICOUNT_GET(x) (((x) & 0x0001f800) >> 11)
+#define PHY_BB_SFCORR_ADCSAT_ICOUNT_SET(x) (((x) << 11) & 0x0001f800)
+#define PHY_BB_SFCORR_M1_THRES_MSB 23
+#define PHY_BB_SFCORR_M1_THRES_LSB 17
+#define PHY_BB_SFCORR_M1_THRES_MASK 0x00fe0000
+#define PHY_BB_SFCORR_M1_THRES_GET(x) (((x) & 0x00fe0000) >> 17)
+#define PHY_BB_SFCORR_M1_THRES_SET(x) (((x) << 17) & 0x00fe0000)
+#define PHY_BB_SFCORR_M2_THRES_MSB 30
+#define PHY_BB_SFCORR_M2_THRES_LSB 24
+#define PHY_BB_SFCORR_M2_THRES_MASK 0x7f000000
+#define PHY_BB_SFCORR_M2_THRES_GET(x) (((x) & 0x7f000000) >> 24)
+#define PHY_BB_SFCORR_M2_THRES_SET(x) (((x) << 24) & 0x7f000000)
+
+/* macros for BB_self_corr_low */
+#define PHY_BB_SELF_CORR_LOW_ADDRESS 0x0000986c
+#define PHY_BB_SELF_CORR_LOW_OFFSET 0x0000986c
+#define PHY_BB_SELF_CORR_LOW_USE_SELF_CORR_LOW_MSB 0
+#define PHY_BB_SELF_CORR_LOW_USE_SELF_CORR_LOW_LSB 0
+#define PHY_BB_SELF_CORR_LOW_USE_SELF_CORR_LOW_MASK 0x00000001
+#define PHY_BB_SELF_CORR_LOW_USE_SELF_CORR_LOW_GET(x) (((x) & 0x00000001) >> 0)
+#define PHY_BB_SELF_CORR_LOW_USE_SELF_CORR_LOW_SET(x) (((x) << 0) & 0x00000001)
+#define PHY_BB_SELF_CORR_LOW_M1COUNT_MAX_LOW_MSB 7
+#define PHY_BB_SELF_CORR_LOW_M1COUNT_MAX_LOW_LSB 1
+#define PHY_BB_SELF_CORR_LOW_M1COUNT_MAX_LOW_MASK 0x000000fe
+#define PHY_BB_SELF_CORR_LOW_M1COUNT_MAX_LOW_GET(x) (((x) & 0x000000fe) >> 1)
+#define PHY_BB_SELF_CORR_LOW_M1COUNT_MAX_LOW_SET(x) (((x) << 1) & 0x000000fe)
+#define PHY_BB_SELF_CORR_LOW_M2COUNT_THR_LOW_MSB 13
+#define PHY_BB_SELF_CORR_LOW_M2COUNT_THR_LOW_LSB 8
+#define PHY_BB_SELF_CORR_LOW_M2COUNT_THR_LOW_MASK 0x00003f00
+#define PHY_BB_SELF_CORR_LOW_M2COUNT_THR_LOW_GET(x) (((x) & 0x00003f00) >> 8)
+#define PHY_BB_SELF_CORR_LOW_M2COUNT_THR_LOW_SET(x) (((x) << 8) & 0x00003f00)
+#define PHY_BB_SELF_CORR_LOW_M1_THRESH_LOW_MSB 20
+#define PHY_BB_SELF_CORR_LOW_M1_THRESH_LOW_LSB 14
+#define PHY_BB_SELF_CORR_LOW_M1_THRESH_LOW_MASK 0x001fc000
+#define PHY_BB_SELF_CORR_LOW_M1_THRESH_LOW_GET(x) (((x) & 0x001fc000) >> 14)
+#define PHY_BB_SELF_CORR_LOW_M1_THRESH_LOW_SET(x) (((x) << 14) & 0x001fc000)
+#define PHY_BB_SELF_CORR_LOW_M2_THRESH_LOW_MSB 27
+#define PHY_BB_SELF_CORR_LOW_M2_THRESH_LOW_LSB 21
+#define PHY_BB_SELF_CORR_LOW_M2_THRESH_LOW_MASK 0x0fe00000
+#define PHY_BB_SELF_CORR_LOW_M2_THRESH_LOW_GET(x) (((x) & 0x0fe00000) >> 21)
+#define PHY_BB_SELF_CORR_LOW_M2_THRESH_LOW_SET(x) (((x) << 21) & 0x0fe00000)
+
+/* macros for BB_synth_control */
+#define PHY_BB_SYNTH_CONTROL_ADDRESS 0x00009874
+#define PHY_BB_SYNTH_CONTROL_OFFSET 0x00009874
+#define PHY_BB_SYNTH_CONTROL_RFCHANFRAC_MSB 16
+#define PHY_BB_SYNTH_CONTROL_RFCHANFRAC_LSB 0
+#define PHY_BB_SYNTH_CONTROL_RFCHANFRAC_MASK 0x0001ffff
+#define PHY_BB_SYNTH_CONTROL_RFCHANFRAC_GET(x) (((x) & 0x0001ffff) >> 0)
+#define PHY_BB_SYNTH_CONTROL_RFCHANFRAC_SET(x) (((x) << 0) & 0x0001ffff)
+#define PHY_BB_SYNTH_CONTROL_RFCHANNEL_MSB 25
+#define PHY_BB_SYNTH_CONTROL_RFCHANNEL_LSB 17
+#define PHY_BB_SYNTH_CONTROL_RFCHANNEL_MASK 0x03fe0000
+#define PHY_BB_SYNTH_CONTROL_RFCHANNEL_GET(x) (((x) & 0x03fe0000) >> 17)
+#define PHY_BB_SYNTH_CONTROL_RFCHANNEL_SET(x) (((x) << 17) & 0x03fe0000)
+#define PHY_BB_SYNTH_CONTROL_RFAMODEREFSEL_MSB 27
+#define PHY_BB_SYNTH_CONTROL_RFAMODEREFSEL_LSB 26
+#define PHY_BB_SYNTH_CONTROL_RFAMODEREFSEL_MASK 0x0c000000
+#define PHY_BB_SYNTH_CONTROL_RFAMODEREFSEL_GET(x) (((x) & 0x0c000000) >> 26)
+#define PHY_BB_SYNTH_CONTROL_RFAMODEREFSEL_SET(x) (((x) << 26) & 0x0c000000)
+#define PHY_BB_SYNTH_CONTROL_RFFRACMODE_MSB 28
+#define PHY_BB_SYNTH_CONTROL_RFFRACMODE_LSB 28
+#define PHY_BB_SYNTH_CONTROL_RFFRACMODE_MASK 0x10000000
+#define PHY_BB_SYNTH_CONTROL_RFFRACMODE_GET(x) (((x) & 0x10000000) >> 28)
+#define PHY_BB_SYNTH_CONTROL_RFFRACMODE_SET(x) (((x) << 28) & 0x10000000)
+#define PHY_BB_SYNTH_CONTROL_RFBMODE_MSB 29
+#define PHY_BB_SYNTH_CONTROL_RFBMODE_LSB 29
+#define PHY_BB_SYNTH_CONTROL_RFBMODE_MASK 0x20000000
+#define PHY_BB_SYNTH_CONTROL_RFBMODE_GET(x) (((x) & 0x20000000) >> 29)
+#define PHY_BB_SYNTH_CONTROL_RFBMODE_SET(x) (((x) << 29) & 0x20000000)
+#define PHY_BB_SYNTH_CONTROL_RFSYNTH_CTRL_SSHIFT_MSB 30
+#define PHY_BB_SYNTH_CONTROL_RFSYNTH_CTRL_SSHIFT_LSB 30
+#define PHY_BB_SYNTH_CONTROL_RFSYNTH_CTRL_SSHIFT_MASK 0x40000000
+#define PHY_BB_SYNTH_CONTROL_RFSYNTH_CTRL_SSHIFT_GET(x) (((x) & 0x40000000) >> 30)
+#define PHY_BB_SYNTH_CONTROL_RFSYNTH_CTRL_SSHIFT_SET(x) (((x) << 30) & 0x40000000)
+
+/* macros for BB_addac_clk_select */
+#define PHY_BB_ADDAC_CLK_SELECT_ADDRESS 0x00009878
+#define PHY_BB_ADDAC_CLK_SELECT_OFFSET 0x00009878
+#define PHY_BB_ADDAC_CLK_SELECT_BB_DAC_CLK_SELECT_MSB 3
+#define PHY_BB_ADDAC_CLK_SELECT_BB_DAC_CLK_SELECT_LSB 2
+#define PHY_BB_ADDAC_CLK_SELECT_BB_DAC_CLK_SELECT_MASK 0x0000000c
+#define PHY_BB_ADDAC_CLK_SELECT_BB_DAC_CLK_SELECT_GET(x) (((x) & 0x0000000c) >> 2)
+#define PHY_BB_ADDAC_CLK_SELECT_BB_DAC_CLK_SELECT_SET(x) (((x) << 2) & 0x0000000c)
+#define PHY_BB_ADDAC_CLK_SELECT_BB_ADC_CLK_SELECT_MSB 5
+#define PHY_BB_ADDAC_CLK_SELECT_BB_ADC_CLK_SELECT_LSB 4
+#define PHY_BB_ADDAC_CLK_SELECT_BB_ADC_CLK_SELECT_MASK 0x00000030
+#define PHY_BB_ADDAC_CLK_SELECT_BB_ADC_CLK_SELECT_GET(x) (((x) & 0x00000030) >> 4)
+#define PHY_BB_ADDAC_CLK_SELECT_BB_ADC_CLK_SELECT_SET(x) (((x) << 4) & 0x00000030)
+
+/* macros for BB_pll_cntl */
+#define PHY_BB_PLL_CNTL_ADDRESS 0x0000987c
+#define PHY_BB_PLL_CNTL_OFFSET 0x0000987c
+#define PHY_BB_PLL_CNTL_BB_PLL_DIV_MSB 9
+#define PHY_BB_PLL_CNTL_BB_PLL_DIV_LSB 0
+#define PHY_BB_PLL_CNTL_BB_PLL_DIV_MASK 0x000003ff
+#define PHY_BB_PLL_CNTL_BB_PLL_DIV_GET(x) (((x) & 0x000003ff) >> 0)
+#define PHY_BB_PLL_CNTL_BB_PLL_DIV_SET(x) (((x) << 0) & 0x000003ff)
+#define PHY_BB_PLL_CNTL_BB_PLL_REFDIV_MSB 13
+#define PHY_BB_PLL_CNTL_BB_PLL_REFDIV_LSB 10
+#define PHY_BB_PLL_CNTL_BB_PLL_REFDIV_MASK 0x00003c00
+#define PHY_BB_PLL_CNTL_BB_PLL_REFDIV_GET(x) (((x) & 0x00003c00) >> 10)
+#define PHY_BB_PLL_CNTL_BB_PLL_REFDIV_SET(x) (((x) << 10) & 0x00003c00)
+#define PHY_BB_PLL_CNTL_BB_PLL_CLK_SEL_MSB 15
+#define PHY_BB_PLL_CNTL_BB_PLL_CLK_SEL_LSB 14
+#define PHY_BB_PLL_CNTL_BB_PLL_CLK_SEL_MASK 0x0000c000
+#define PHY_BB_PLL_CNTL_BB_PLL_CLK_SEL_GET(x) (((x) & 0x0000c000) >> 14)
+#define PHY_BB_PLL_CNTL_BB_PLL_CLK_SEL_SET(x) (((x) << 14) & 0x0000c000)
+#define PHY_BB_PLL_CNTL_BB_PLLBYPASS_MSB 16
+#define PHY_BB_PLL_CNTL_BB_PLLBYPASS_LSB 16
+#define PHY_BB_PLL_CNTL_BB_PLLBYPASS_MASK 0x00010000
+#define PHY_BB_PLL_CNTL_BB_PLLBYPASS_GET(x) (((x) & 0x00010000) >> 16)
+#define PHY_BB_PLL_CNTL_BB_PLLBYPASS_SET(x) (((x) << 16) & 0x00010000)
+#define PHY_BB_PLL_CNTL_BB_PLL_SETTLE_TIME_MSB 27
+#define PHY_BB_PLL_CNTL_BB_PLL_SETTLE_TIME_LSB 17
+#define PHY_BB_PLL_CNTL_BB_PLL_SETTLE_TIME_MASK 0x0ffe0000
+#define PHY_BB_PLL_CNTL_BB_PLL_SETTLE_TIME_GET(x) (((x) & 0x0ffe0000) >> 17)
+#define PHY_BB_PLL_CNTL_BB_PLL_SETTLE_TIME_SET(x) (((x) << 17) & 0x0ffe0000)
+
+/* macros for BB_vit_spur_mask_A */
+#define PHY_BB_VIT_SPUR_MASK_A_ADDRESS 0x00009900
+#define PHY_BB_VIT_SPUR_MASK_A_OFFSET 0x00009900
+#define PHY_BB_VIT_SPUR_MASK_A_CF_PUNC_MASK_A_MSB 9
+#define PHY_BB_VIT_SPUR_MASK_A_CF_PUNC_MASK_A_LSB 0
+#define PHY_BB_VIT_SPUR_MASK_A_CF_PUNC_MASK_A_MASK 0x000003ff
+#define PHY_BB_VIT_SPUR_MASK_A_CF_PUNC_MASK_A_GET(x) (((x) & 0x000003ff) >> 0)
+#define PHY_BB_VIT_SPUR_MASK_A_CF_PUNC_MASK_A_SET(x) (((x) << 0) & 0x000003ff)
+#define PHY_BB_VIT_SPUR_MASK_A_CF_PUNC_MASK_IDX_A_MSB 16
+#define PHY_BB_VIT_SPUR_MASK_A_CF_PUNC_MASK_IDX_A_LSB 10
+#define PHY_BB_VIT_SPUR_MASK_A_CF_PUNC_MASK_IDX_A_MASK 0x0001fc00
+#define PHY_BB_VIT_SPUR_MASK_A_CF_PUNC_MASK_IDX_A_GET(x) (((x) & 0x0001fc00) >> 10)
+#define PHY_BB_VIT_SPUR_MASK_A_CF_PUNC_MASK_IDX_A_SET(x) (((x) << 10) & 0x0001fc00)
+
+/* macros for BB_vit_spur_mask_B */
+#define PHY_BB_VIT_SPUR_MASK_B_ADDRESS 0x00009904
+#define PHY_BB_VIT_SPUR_MASK_B_OFFSET 0x00009904
+#define PHY_BB_VIT_SPUR_MASK_B_CF_PUNC_MASK_B_MSB 9
+#define PHY_BB_VIT_SPUR_MASK_B_CF_PUNC_MASK_B_LSB 0
+#define PHY_BB_VIT_SPUR_MASK_B_CF_PUNC_MASK_B_MASK 0x000003ff
+#define PHY_BB_VIT_SPUR_MASK_B_CF_PUNC_MASK_B_GET(x) (((x) & 0x000003ff) >> 0)
+#define PHY_BB_VIT_SPUR_MASK_B_CF_PUNC_MASK_B_SET(x) (((x) << 0) & 0x000003ff)
+#define PHY_BB_VIT_SPUR_MASK_B_CF_PUNC_MASK_IDX_B_MSB 16
+#define PHY_BB_VIT_SPUR_MASK_B_CF_PUNC_MASK_IDX_B_LSB 10
+#define PHY_BB_VIT_SPUR_MASK_B_CF_PUNC_MASK_IDX_B_MASK 0x0001fc00
+#define PHY_BB_VIT_SPUR_MASK_B_CF_PUNC_MASK_IDX_B_GET(x) (((x) & 0x0001fc00) >> 10)
+#define PHY_BB_VIT_SPUR_MASK_B_CF_PUNC_MASK_IDX_B_SET(x) (((x) << 10) & 0x0001fc00)
+
+/* macros for BB_pilot_spur_mask */
+#define PHY_BB_PILOT_SPUR_MASK_ADDRESS 0x00009908
+#define PHY_BB_PILOT_SPUR_MASK_OFFSET 0x00009908
+#define PHY_BB_PILOT_SPUR_MASK_CF_PILOT_MASK_A_MSB 4
+#define PHY_BB_PILOT_SPUR_MASK_CF_PILOT_MASK_A_LSB 0
+#define PHY_BB_PILOT_SPUR_MASK_CF_PILOT_MASK_A_MASK 0x0000001f
+#define PHY_BB_PILOT_SPUR_MASK_CF_PILOT_MASK_A_GET(x) (((x) & 0x0000001f) >> 0)
+#define PHY_BB_PILOT_SPUR_MASK_CF_PILOT_MASK_A_SET(x) (((x) << 0) & 0x0000001f)
+#define PHY_BB_PILOT_SPUR_MASK_CF_PILOT_MASK_IDX_A_MSB 11
+#define PHY_BB_PILOT_SPUR_MASK_CF_PILOT_MASK_IDX_A_LSB 5
+#define PHY_BB_PILOT_SPUR_MASK_CF_PILOT_MASK_IDX_A_MASK 0x00000fe0
+#define PHY_BB_PILOT_SPUR_MASK_CF_PILOT_MASK_IDX_A_GET(x) (((x) & 0x00000fe0) >> 5)
+#define PHY_BB_PILOT_SPUR_MASK_CF_PILOT_MASK_IDX_A_SET(x) (((x) << 5) & 0x00000fe0)
+#define PHY_BB_PILOT_SPUR_MASK_CF_PILOT_MASK_B_MSB 16
+#define PHY_BB_PILOT_SPUR_MASK_CF_PILOT_MASK_B_LSB 12
+#define PHY_BB_PILOT_SPUR_MASK_CF_PILOT_MASK_B_MASK 0x0001f000
+#define PHY_BB_PILOT_SPUR_MASK_CF_PILOT_MASK_B_GET(x) (((x) & 0x0001f000) >> 12)
+#define PHY_BB_PILOT_SPUR_MASK_CF_PILOT_MASK_B_SET(x) (((x) << 12) & 0x0001f000)
+#define PHY_BB_PILOT_SPUR_MASK_CF_PILOT_MASK_IDX_B_MSB 23
+#define PHY_BB_PILOT_SPUR_MASK_CF_PILOT_MASK_IDX_B_LSB 17
+#define PHY_BB_PILOT_SPUR_MASK_CF_PILOT_MASK_IDX_B_MASK 0x00fe0000
+#define PHY_BB_PILOT_SPUR_MASK_CF_PILOT_MASK_IDX_B_GET(x) (((x) & 0x00fe0000) >> 17)
+#define PHY_BB_PILOT_SPUR_MASK_CF_PILOT_MASK_IDX_B_SET(x) (((x) << 17) & 0x00fe0000)
+
+/* macros for BB_chan_spur_mask */
+#define PHY_BB_CHAN_SPUR_MASK_ADDRESS 0x0000990c
+#define PHY_BB_CHAN_SPUR_MASK_OFFSET 0x0000990c
+#define PHY_BB_CHAN_SPUR_MASK_CF_CHAN_MASK_A_MSB 4
+#define PHY_BB_CHAN_SPUR_MASK_CF_CHAN_MASK_A_LSB 0
+#define PHY_BB_CHAN_SPUR_MASK_CF_CHAN_MASK_A_MASK 0x0000001f
+#define PHY_BB_CHAN_SPUR_MASK_CF_CHAN_MASK_A_GET(x) (((x) & 0x0000001f) >> 0)
+#define PHY_BB_CHAN_SPUR_MASK_CF_CHAN_MASK_A_SET(x) (((x) << 0) & 0x0000001f)
+#define PHY_BB_CHAN_SPUR_MASK_CF_CHAN_MASK_IDX_A_MSB 11
+#define PHY_BB_CHAN_SPUR_MASK_CF_CHAN_MASK_IDX_A_LSB 5
+#define PHY_BB_CHAN_SPUR_MASK_CF_CHAN_MASK_IDX_A_MASK 0x00000fe0
+#define PHY_BB_CHAN_SPUR_MASK_CF_CHAN_MASK_IDX_A_GET(x) (((x) & 0x00000fe0) >> 5)
+#define PHY_BB_CHAN_SPUR_MASK_CF_CHAN_MASK_IDX_A_SET(x) (((x) << 5) & 0x00000fe0)
+#define PHY_BB_CHAN_SPUR_MASK_CF_CHAN_MASK_B_MSB 16
+#define PHY_BB_CHAN_SPUR_MASK_CF_CHAN_MASK_B_LSB 12
+#define PHY_BB_CHAN_SPUR_MASK_CF_CHAN_MASK_B_MASK 0x0001f000
+#define PHY_BB_CHAN_SPUR_MASK_CF_CHAN_MASK_B_GET(x) (((x) & 0x0001f000) >> 12)
+#define PHY_BB_CHAN_SPUR_MASK_CF_CHAN_MASK_B_SET(x) (((x) << 12) & 0x0001f000)
+#define PHY_BB_CHAN_SPUR_MASK_CF_CHAN_MASK_IDX_B_MSB 23
+#define PHY_BB_CHAN_SPUR_MASK_CF_CHAN_MASK_IDX_B_LSB 17
+#define PHY_BB_CHAN_SPUR_MASK_CF_CHAN_MASK_IDX_B_MASK 0x00fe0000
+#define PHY_BB_CHAN_SPUR_MASK_CF_CHAN_MASK_IDX_B_GET(x) (((x) & 0x00fe0000) >> 17)
+#define PHY_BB_CHAN_SPUR_MASK_CF_CHAN_MASK_IDX_B_SET(x) (((x) << 17) & 0x00fe0000)
+
+/* macros for BB_spectral_scan */
+#define PHY_BB_SPECTRAL_SCAN_ADDRESS 0x00009910
+#define PHY_BB_SPECTRAL_SCAN_OFFSET 0x00009910
+#define PHY_BB_SPECTRAL_SCAN_SPECTRAL_SCAN_ENA_MSB 0
+#define PHY_BB_SPECTRAL_SCAN_SPECTRAL_SCAN_ENA_LSB 0
+#define PHY_BB_SPECTRAL_SCAN_SPECTRAL_SCAN_ENA_MASK 0x00000001
+#define PHY_BB_SPECTRAL_SCAN_SPECTRAL_SCAN_ENA_GET(x) (((x) & 0x00000001) >> 0)
+#define PHY_BB_SPECTRAL_SCAN_SPECTRAL_SCAN_ENA_SET(x) (((x) << 0) & 0x00000001)
+#define PHY_BB_SPECTRAL_SCAN_SPECTRAL_SCAN_ACTIVE_MSB 1
+#define PHY_BB_SPECTRAL_SCAN_SPECTRAL_SCAN_ACTIVE_LSB 1
+#define PHY_BB_SPECTRAL_SCAN_SPECTRAL_SCAN_ACTIVE_MASK 0x00000002
+#define PHY_BB_SPECTRAL_SCAN_SPECTRAL_SCAN_ACTIVE_GET(x) (((x) & 0x00000002) >> 1)
+#define PHY_BB_SPECTRAL_SCAN_SPECTRAL_SCAN_ACTIVE_SET(x) (((x) << 1) & 0x00000002)
+#define PHY_BB_SPECTRAL_SCAN_DISABLE_RADAR_TCTL_RST_MSB 2
+#define PHY_BB_SPECTRAL_SCAN_DISABLE_RADAR_TCTL_RST_LSB 2
+#define PHY_BB_SPECTRAL_SCAN_DISABLE_RADAR_TCTL_RST_MASK 0x00000004
+#define PHY_BB_SPECTRAL_SCAN_DISABLE_RADAR_TCTL_RST_GET(x) (((x) & 0x00000004) >> 2)
+#define PHY_BB_SPECTRAL_SCAN_DISABLE_RADAR_TCTL_RST_SET(x) (((x) << 2) & 0x00000004)
+#define PHY_BB_SPECTRAL_SCAN_DISABLE_PULSE_COARSE_LOW_MSB 3
+#define PHY_BB_SPECTRAL_SCAN_DISABLE_PULSE_COARSE_LOW_LSB 3
+#define PHY_BB_SPECTRAL_SCAN_DISABLE_PULSE_COARSE_LOW_MASK 0x00000008
+#define PHY_BB_SPECTRAL_SCAN_DISABLE_PULSE_COARSE_LOW_GET(x) (((x) & 0x00000008) >> 3)
+#define PHY_BB_SPECTRAL_SCAN_DISABLE_PULSE_COARSE_LOW_SET(x) (((x) << 3) & 0x00000008)
+#define PHY_BB_SPECTRAL_SCAN_SPECTRAL_SCAN_FFT_PERIOD_MSB 7
+#define PHY_BB_SPECTRAL_SCAN_SPECTRAL_SCAN_FFT_PERIOD_LSB 4
+#define PHY_BB_SPECTRAL_SCAN_SPECTRAL_SCAN_FFT_PERIOD_MASK 0x000000f0
+#define PHY_BB_SPECTRAL_SCAN_SPECTRAL_SCAN_FFT_PERIOD_GET(x) (((x) & 0x000000f0) >> 4)
+#define PHY_BB_SPECTRAL_SCAN_SPECTRAL_SCAN_FFT_PERIOD_SET(x) (((x) << 4) & 0x000000f0)
+#define PHY_BB_SPECTRAL_SCAN_SPECTRAL_SCAN_PERIOD_MSB 15
+#define PHY_BB_SPECTRAL_SCAN_SPECTRAL_SCAN_PERIOD_LSB 8
+#define PHY_BB_SPECTRAL_SCAN_SPECTRAL_SCAN_PERIOD_MASK 0x0000ff00
+#define PHY_BB_SPECTRAL_SCAN_SPECTRAL_SCAN_PERIOD_GET(x) (((x) & 0x0000ff00) >> 8)
+#define PHY_BB_SPECTRAL_SCAN_SPECTRAL_SCAN_PERIOD_SET(x) (((x) << 8) & 0x0000ff00)
+#define PHY_BB_SPECTRAL_SCAN_SPECTRAL_SCAN_COUNT_MSB 27
+#define PHY_BB_SPECTRAL_SCAN_SPECTRAL_SCAN_COUNT_LSB 16
+#define PHY_BB_SPECTRAL_SCAN_SPECTRAL_SCAN_COUNT_MASK 0x0fff0000
+#define PHY_BB_SPECTRAL_SCAN_SPECTRAL_SCAN_COUNT_GET(x) (((x) & 0x0fff0000) >> 16)
+#define PHY_BB_SPECTRAL_SCAN_SPECTRAL_SCAN_COUNT_SET(x) (((x) << 16) & 0x0fff0000)
+#define PHY_BB_SPECTRAL_SCAN_SPECTRAL_SCAN_SHORT_RPT_MSB 28
+#define PHY_BB_SPECTRAL_SCAN_SPECTRAL_SCAN_SHORT_RPT_LSB 28
+#define PHY_BB_SPECTRAL_SCAN_SPECTRAL_SCAN_SHORT_RPT_MASK 0x10000000
+#define PHY_BB_SPECTRAL_SCAN_SPECTRAL_SCAN_SHORT_RPT_GET(x) (((x) & 0x10000000) >> 28)
+#define PHY_BB_SPECTRAL_SCAN_SPECTRAL_SCAN_SHORT_RPT_SET(x) (((x) << 28) & 0x10000000)
+#define PHY_BB_SPECTRAL_SCAN_SPECTRAL_SCAN_PRIORITY_MSB 29
+#define PHY_BB_SPECTRAL_SCAN_SPECTRAL_SCAN_PRIORITY_LSB 29
+#define PHY_BB_SPECTRAL_SCAN_SPECTRAL_SCAN_PRIORITY_MASK 0x20000000
+#define PHY_BB_SPECTRAL_SCAN_SPECTRAL_SCAN_PRIORITY_GET(x) (((x) & 0x20000000) >> 29)
+#define PHY_BB_SPECTRAL_SCAN_SPECTRAL_SCAN_PRIORITY_SET(x) (((x) << 29) & 0x20000000)
+#define PHY_BB_SPECTRAL_SCAN_SPECTRAL_SCAN_USE_ERR5_MSB 30
+#define PHY_BB_SPECTRAL_SCAN_SPECTRAL_SCAN_USE_ERR5_LSB 30
+#define PHY_BB_SPECTRAL_SCAN_SPECTRAL_SCAN_USE_ERR5_MASK 0x40000000
+#define PHY_BB_SPECTRAL_SCAN_SPECTRAL_SCAN_USE_ERR5_GET(x) (((x) & 0x40000000) >> 30)
+#define PHY_BB_SPECTRAL_SCAN_SPECTRAL_SCAN_USE_ERR5_SET(x) (((x) << 30) & 0x40000000)
+
+/* macros for BB_analog_power_on_time */
+#define PHY_BB_ANALOG_POWER_ON_TIME_ADDRESS 0x00009914
+#define PHY_BB_ANALOG_POWER_ON_TIME_OFFSET 0x00009914
+#define PHY_BB_ANALOG_POWER_ON_TIME_ACTIVE_TO_RECEIVE_MSB 13
+#define PHY_BB_ANALOG_POWER_ON_TIME_ACTIVE_TO_RECEIVE_LSB 0
+#define PHY_BB_ANALOG_POWER_ON_TIME_ACTIVE_TO_RECEIVE_MASK 0x00003fff
+#define PHY_BB_ANALOG_POWER_ON_TIME_ACTIVE_TO_RECEIVE_GET(x) (((x) & 0x00003fff) >> 0)
+#define PHY_BB_ANALOG_POWER_ON_TIME_ACTIVE_TO_RECEIVE_SET(x) (((x) << 0) & 0x00003fff)
+
+/* macros for BB_search_start_delay */
+#define PHY_BB_SEARCH_START_DELAY_ADDRESS 0x00009918
+#define PHY_BB_SEARCH_START_DELAY_OFFSET 0x00009918
+#define PHY_BB_SEARCH_START_DELAY_SEARCH_START_DELAY_MSB 11
+#define PHY_BB_SEARCH_START_DELAY_SEARCH_START_DELAY_LSB 0
+#define PHY_BB_SEARCH_START_DELAY_SEARCH_START_DELAY_MASK 0x00000fff
+#define PHY_BB_SEARCH_START_DELAY_SEARCH_START_DELAY_GET(x) (((x) & 0x00000fff) >> 0)
+#define PHY_BB_SEARCH_START_DELAY_SEARCH_START_DELAY_SET(x) (((x) << 0) & 0x00000fff)
+#define PHY_BB_SEARCH_START_DELAY_ENABLE_FLT_SVD_MSB 12
+#define PHY_BB_SEARCH_START_DELAY_ENABLE_FLT_SVD_LSB 12
+#define PHY_BB_SEARCH_START_DELAY_ENABLE_FLT_SVD_MASK 0x00001000
+#define PHY_BB_SEARCH_START_DELAY_ENABLE_FLT_SVD_GET(x) (((x) & 0x00001000) >> 12)
+#define PHY_BB_SEARCH_START_DELAY_ENABLE_FLT_SVD_SET(x) (((x) << 12) & 0x00001000)
+#define PHY_BB_SEARCH_START_DELAY_ENABLE_SEND_CHAN_MSB 13
+#define PHY_BB_SEARCH_START_DELAY_ENABLE_SEND_CHAN_LSB 13
+#define PHY_BB_SEARCH_START_DELAY_ENABLE_SEND_CHAN_MASK 0x00002000
+#define PHY_BB_SEARCH_START_DELAY_ENABLE_SEND_CHAN_GET(x) (((x) & 0x00002000) >> 13)
+#define PHY_BB_SEARCH_START_DELAY_ENABLE_SEND_CHAN_SET(x) (((x) << 13) & 0x00002000)
+
+/* macros for BB_max_rx_length */
+#define PHY_BB_MAX_RX_LENGTH_ADDRESS 0x0000991c
+#define PHY_BB_MAX_RX_LENGTH_OFFSET 0x0000991c
+#define PHY_BB_MAX_RX_LENGTH_MAX_RX_LENGTH_MSB 11
+#define PHY_BB_MAX_RX_LENGTH_MAX_RX_LENGTH_LSB 0
+#define PHY_BB_MAX_RX_LENGTH_MAX_RX_LENGTH_MASK 0x00000fff
+#define PHY_BB_MAX_RX_LENGTH_MAX_RX_LENGTH_GET(x) (((x) & 0x00000fff) >> 0)
+#define PHY_BB_MAX_RX_LENGTH_MAX_RX_LENGTH_SET(x) (((x) << 0) & 0x00000fff)
+#define PHY_BB_MAX_RX_LENGTH_MAX_HT_LENGTH_MSB 29
+#define PHY_BB_MAX_RX_LENGTH_MAX_HT_LENGTH_LSB 12
+#define PHY_BB_MAX_RX_LENGTH_MAX_HT_LENGTH_MASK 0x3ffff000
+#define PHY_BB_MAX_RX_LENGTH_MAX_HT_LENGTH_GET(x) (((x) & 0x3ffff000) >> 12)
+#define PHY_BB_MAX_RX_LENGTH_MAX_HT_LENGTH_SET(x) (((x) << 12) & 0x3ffff000)
+
+/* macros for BB_timing_control_4 */
+#define PHY_BB_TIMING_CONTROL_4_ADDRESS 0x00009920
+#define PHY_BB_TIMING_CONTROL_4_OFFSET 0x00009920
+#define PHY_BB_TIMING_CONTROL_4_CAL_LG_COUNT_MAX_MSB 15
+#define PHY_BB_TIMING_CONTROL_4_CAL_LG_COUNT_MAX_LSB 12
+#define PHY_BB_TIMING_CONTROL_4_CAL_LG_COUNT_MAX_MASK 0x0000f000
+#define PHY_BB_TIMING_CONTROL_4_CAL_LG_COUNT_MAX_GET(x) (((x) & 0x0000f000) >> 12)
+#define PHY_BB_TIMING_CONTROL_4_CAL_LG_COUNT_MAX_SET(x) (((x) << 12) & 0x0000f000)
+#define PHY_BB_TIMING_CONTROL_4_DO_GAIN_DC_IQ_CAL_MSB 16
+#define PHY_BB_TIMING_CONTROL_4_DO_GAIN_DC_IQ_CAL_LSB 16
+#define PHY_BB_TIMING_CONTROL_4_DO_GAIN_DC_IQ_CAL_MASK 0x00010000
+#define PHY_BB_TIMING_CONTROL_4_DO_GAIN_DC_IQ_CAL_GET(x) (((x) & 0x00010000) >> 16)
+#define PHY_BB_TIMING_CONTROL_4_DO_GAIN_DC_IQ_CAL_SET(x) (((x) << 16) & 0x00010000)
+#define PHY_BB_TIMING_CONTROL_4_USE_PILOT_TRACK_DF_MSB 20
+#define PHY_BB_TIMING_CONTROL_4_USE_PILOT_TRACK_DF_LSB 17
+#define PHY_BB_TIMING_CONTROL_4_USE_PILOT_TRACK_DF_MASK 0x001e0000
+#define PHY_BB_TIMING_CONTROL_4_USE_PILOT_TRACK_DF_GET(x) (((x) & 0x001e0000) >> 17)
+#define PHY_BB_TIMING_CONTROL_4_USE_PILOT_TRACK_DF_SET(x) (((x) << 17) & 0x001e0000)
+#define PHY_BB_TIMING_CONTROL_4_EARLY_TRIGGER_THR_MSB 27
+#define PHY_BB_TIMING_CONTROL_4_EARLY_TRIGGER_THR_LSB 21
+#define PHY_BB_TIMING_CONTROL_4_EARLY_TRIGGER_THR_MASK 0x0fe00000
+#define PHY_BB_TIMING_CONTROL_4_EARLY_TRIGGER_THR_GET(x) (((x) & 0x0fe00000) >> 21)
+#define PHY_BB_TIMING_CONTROL_4_EARLY_TRIGGER_THR_SET(x) (((x) << 21) & 0x0fe00000)
+#define PHY_BB_TIMING_CONTROL_4_ENABLE_PILOT_MASK_MSB 28
+#define PHY_BB_TIMING_CONTROL_4_ENABLE_PILOT_MASK_LSB 28
+#define PHY_BB_TIMING_CONTROL_4_ENABLE_PILOT_MASK_MASK 0x10000000
+#define PHY_BB_TIMING_CONTROL_4_ENABLE_PILOT_MASK_GET(x) (((x) & 0x10000000) >> 28)
+#define PHY_BB_TIMING_CONTROL_4_ENABLE_PILOT_MASK_SET(x) (((x) << 28) & 0x10000000)
+#define PHY_BB_TIMING_CONTROL_4_ENABLE_CHAN_MASK_MSB 29
+#define PHY_BB_TIMING_CONTROL_4_ENABLE_CHAN_MASK_LSB 29
+#define PHY_BB_TIMING_CONTROL_4_ENABLE_CHAN_MASK_MASK 0x20000000
+#define PHY_BB_TIMING_CONTROL_4_ENABLE_CHAN_MASK_GET(x) (((x) & 0x20000000) >> 29)
+#define PHY_BB_TIMING_CONTROL_4_ENABLE_CHAN_MASK_SET(x) (((x) << 29) & 0x20000000)
+#define PHY_BB_TIMING_CONTROL_4_ENABLE_SPUR_FILTER_MSB 30
+#define PHY_BB_TIMING_CONTROL_4_ENABLE_SPUR_FILTER_LSB 30
+#define PHY_BB_TIMING_CONTROL_4_ENABLE_SPUR_FILTER_MASK 0x40000000
+#define PHY_BB_TIMING_CONTROL_4_ENABLE_SPUR_FILTER_GET(x) (((x) & 0x40000000) >> 30)
+#define PHY_BB_TIMING_CONTROL_4_ENABLE_SPUR_FILTER_SET(x) (((x) << 30) & 0x40000000)
+#define PHY_BB_TIMING_CONTROL_4_ENABLE_SPUR_RSSI_MSB 31
+#define PHY_BB_TIMING_CONTROL_4_ENABLE_SPUR_RSSI_LSB 31
+#define PHY_BB_TIMING_CONTROL_4_ENABLE_SPUR_RSSI_MASK 0x80000000
+#define PHY_BB_TIMING_CONTROL_4_ENABLE_SPUR_RSSI_GET(x) (((x) & 0x80000000) >> 31)
+#define PHY_BB_TIMING_CONTROL_4_ENABLE_SPUR_RSSI_SET(x) (((x) << 31) & 0x80000000)
+
+/* macros for BB_timing_control_5 */
+#define PHY_BB_TIMING_CONTROL_5_ADDRESS 0x00009924
+#define PHY_BB_TIMING_CONTROL_5_OFFSET 0x00009924
+#define PHY_BB_TIMING_CONTROL_5_ENABLE_CYCPWR_THR1_MSB 0
+#define PHY_BB_TIMING_CONTROL_5_ENABLE_CYCPWR_THR1_LSB 0
+#define PHY_BB_TIMING_CONTROL_5_ENABLE_CYCPWR_THR1_MASK 0x00000001
+#define PHY_BB_TIMING_CONTROL_5_ENABLE_CYCPWR_THR1_GET(x) (((x) & 0x00000001) >> 0)
+#define PHY_BB_TIMING_CONTROL_5_ENABLE_CYCPWR_THR1_SET(x) (((x) << 0) & 0x00000001)
+#define PHY_BB_TIMING_CONTROL_5_CYCPWR_THR1_MSB 7
+#define PHY_BB_TIMING_CONTROL_5_CYCPWR_THR1_LSB 1
+#define PHY_BB_TIMING_CONTROL_5_CYCPWR_THR1_MASK 0x000000fe
+#define PHY_BB_TIMING_CONTROL_5_CYCPWR_THR1_GET(x) (((x) & 0x000000fe) >> 1)
+#define PHY_BB_TIMING_CONTROL_5_CYCPWR_THR1_SET(x) (((x) << 1) & 0x000000fe)
+#define PHY_BB_TIMING_CONTROL_5_ENABLE_RSSI_THR1A_MSB 15
+#define PHY_BB_TIMING_CONTROL_5_ENABLE_RSSI_THR1A_LSB 15
+#define PHY_BB_TIMING_CONTROL_5_ENABLE_RSSI_THR1A_MASK 0x00008000
+#define PHY_BB_TIMING_CONTROL_5_ENABLE_RSSI_THR1A_GET(x) (((x) & 0x00008000) >> 15)
+#define PHY_BB_TIMING_CONTROL_5_ENABLE_RSSI_THR1A_SET(x) (((x) << 15) & 0x00008000)
+#define PHY_BB_TIMING_CONTROL_5_RSSI_THR1A_MSB 22
+#define PHY_BB_TIMING_CONTROL_5_RSSI_THR1A_LSB 16
+#define PHY_BB_TIMING_CONTROL_5_RSSI_THR1A_MASK 0x007f0000
+#define PHY_BB_TIMING_CONTROL_5_RSSI_THR1A_GET(x) (((x) & 0x007f0000) >> 16)
+#define PHY_BB_TIMING_CONTROL_5_RSSI_THR1A_SET(x) (((x) << 16) & 0x007f0000)
+#define PHY_BB_TIMING_CONTROL_5_LONG_SC_THRESH_HI_RSSI_MSB 29
+#define PHY_BB_TIMING_CONTROL_5_LONG_SC_THRESH_HI_RSSI_LSB 23
+#define PHY_BB_TIMING_CONTROL_5_LONG_SC_THRESH_HI_RSSI_MASK 0x3f800000
+#define PHY_BB_TIMING_CONTROL_5_LONG_SC_THRESH_HI_RSSI_GET(x) (((x) & 0x3f800000) >> 23)
+#define PHY_BB_TIMING_CONTROL_5_LONG_SC_THRESH_HI_RSSI_SET(x) (((x) << 23) & 0x3f800000)
+#define PHY_BB_TIMING_CONTROL_5_FORCED_AGC_STR_PRI_MSB 30
+#define PHY_BB_TIMING_CONTROL_5_FORCED_AGC_STR_PRI_LSB 30
+#define PHY_BB_TIMING_CONTROL_5_FORCED_AGC_STR_PRI_MASK 0x40000000
+#define PHY_BB_TIMING_CONTROL_5_FORCED_AGC_STR_PRI_GET(x) (((x) & 0x40000000) >> 30)
+#define PHY_BB_TIMING_CONTROL_5_FORCED_AGC_STR_PRI_SET(x) (((x) << 30) & 0x40000000)
+#define PHY_BB_TIMING_CONTROL_5_FORCED_AGC_STR_PRI_EN_MSB 31
+#define PHY_BB_TIMING_CONTROL_5_FORCED_AGC_STR_PRI_EN_LSB 31
+#define PHY_BB_TIMING_CONTROL_5_FORCED_AGC_STR_PRI_EN_MASK 0x80000000
+#define PHY_BB_TIMING_CONTROL_5_FORCED_AGC_STR_PRI_EN_GET(x) (((x) & 0x80000000) >> 31)
+#define PHY_BB_TIMING_CONTROL_5_FORCED_AGC_STR_PRI_EN_SET(x) (((x) << 31) & 0x80000000)
+
+/* macros for BB_phyonly_warm_reset */
+#define PHY_BB_PHYONLY_WARM_RESET_ADDRESS 0x00009928
+#define PHY_BB_PHYONLY_WARM_RESET_OFFSET 0x00009928
+#define PHY_BB_PHYONLY_WARM_RESET_PHYONLY_RST_WARM_L_MSB 0
+#define PHY_BB_PHYONLY_WARM_RESET_PHYONLY_RST_WARM_L_LSB 0
+#define PHY_BB_PHYONLY_WARM_RESET_PHYONLY_RST_WARM_L_MASK 0x00000001
+#define PHY_BB_PHYONLY_WARM_RESET_PHYONLY_RST_WARM_L_GET(x) (((x) & 0x00000001) >> 0)
+#define PHY_BB_PHYONLY_WARM_RESET_PHYONLY_RST_WARM_L_SET(x) (((x) << 0) & 0x00000001)
+
+/* macros for BB_phyonly_control */
+#define PHY_BB_PHYONLY_CONTROL_ADDRESS 0x0000992c
+#define PHY_BB_PHYONLY_CONTROL_OFFSET 0x0000992c
+#define PHY_BB_PHYONLY_CONTROL_RX_DRAIN_RATE_MSB 0
+#define PHY_BB_PHYONLY_CONTROL_RX_DRAIN_RATE_LSB 0
+#define PHY_BB_PHYONLY_CONTROL_RX_DRAIN_RATE_MASK 0x00000001
+#define PHY_BB_PHYONLY_CONTROL_RX_DRAIN_RATE_GET(x) (((x) & 0x00000001) >> 0)
+#define PHY_BB_PHYONLY_CONTROL_RX_DRAIN_RATE_SET(x) (((x) << 0) & 0x00000001)
+#define PHY_BB_PHYONLY_CONTROL_LATE_TX_SIGNAL_SYMBOL_MSB 1
+#define PHY_BB_PHYONLY_CONTROL_LATE_TX_SIGNAL_SYMBOL_LSB 1
+#define PHY_BB_PHYONLY_CONTROL_LATE_TX_SIGNAL_SYMBOL_MASK 0x00000002
+#define PHY_BB_PHYONLY_CONTROL_LATE_TX_SIGNAL_SYMBOL_GET(x) (((x) & 0x00000002) >> 1)
+#define PHY_BB_PHYONLY_CONTROL_LATE_TX_SIGNAL_SYMBOL_SET(x) (((x) << 1) & 0x00000002)
+#define PHY_BB_PHYONLY_CONTROL_GENERATE_SCRAMBLER_MSB 2
+#define PHY_BB_PHYONLY_CONTROL_GENERATE_SCRAMBLER_LSB 2
+#define PHY_BB_PHYONLY_CONTROL_GENERATE_SCRAMBLER_MASK 0x00000004
+#define PHY_BB_PHYONLY_CONTROL_GENERATE_SCRAMBLER_GET(x) (((x) & 0x00000004) >> 2)
+#define PHY_BB_PHYONLY_CONTROL_GENERATE_SCRAMBLER_SET(x) (((x) << 2) & 0x00000004)
+#define PHY_BB_PHYONLY_CONTROL_TX_ANTENNA_SELECT_MSB 3
+#define PHY_BB_PHYONLY_CONTROL_TX_ANTENNA_SELECT_LSB 3
+#define PHY_BB_PHYONLY_CONTROL_TX_ANTENNA_SELECT_MASK 0x00000008
+#define PHY_BB_PHYONLY_CONTROL_TX_ANTENNA_SELECT_GET(x) (((x) & 0x00000008) >> 3)
+#define PHY_BB_PHYONLY_CONTROL_TX_ANTENNA_SELECT_SET(x) (((x) << 3) & 0x00000008)
+#define PHY_BB_PHYONLY_CONTROL_STATIC_TX_ANTENNA_MSB 4
+#define PHY_BB_PHYONLY_CONTROL_STATIC_TX_ANTENNA_LSB 4
+#define PHY_BB_PHYONLY_CONTROL_STATIC_TX_ANTENNA_MASK 0x00000010
+#define PHY_BB_PHYONLY_CONTROL_STATIC_TX_ANTENNA_GET(x) (((x) & 0x00000010) >> 4)
+#define PHY_BB_PHYONLY_CONTROL_STATIC_TX_ANTENNA_SET(x) (((x) << 4) & 0x00000010)
+#define PHY_BB_PHYONLY_CONTROL_RX_ANTENNA_SELECT_MSB 5
+#define PHY_BB_PHYONLY_CONTROL_RX_ANTENNA_SELECT_LSB 5
+#define PHY_BB_PHYONLY_CONTROL_RX_ANTENNA_SELECT_MASK 0x00000020
+#define PHY_BB_PHYONLY_CONTROL_RX_ANTENNA_SELECT_GET(x) (((x) & 0x00000020) >> 5)
+#define PHY_BB_PHYONLY_CONTROL_RX_ANTENNA_SELECT_SET(x) (((x) << 5) & 0x00000020)
+#define PHY_BB_PHYONLY_CONTROL_STATIC_RX_ANTENNA_MSB 6
+#define PHY_BB_PHYONLY_CONTROL_STATIC_RX_ANTENNA_LSB 6
+#define PHY_BB_PHYONLY_CONTROL_STATIC_RX_ANTENNA_MASK 0x00000040
+#define PHY_BB_PHYONLY_CONTROL_STATIC_RX_ANTENNA_GET(x) (((x) & 0x00000040) >> 6)
+#define PHY_BB_PHYONLY_CONTROL_STATIC_RX_ANTENNA_SET(x) (((x) << 6) & 0x00000040)
+#define PHY_BB_PHYONLY_CONTROL_EN_LOW_FREQ_SLEEP_MSB 7
+#define PHY_BB_PHYONLY_CONTROL_EN_LOW_FREQ_SLEEP_LSB 7
+#define PHY_BB_PHYONLY_CONTROL_EN_LOW_FREQ_SLEEP_MASK 0x00000080
+#define PHY_BB_PHYONLY_CONTROL_EN_LOW_FREQ_SLEEP_GET(x) (((x) & 0x00000080) >> 7)
+#define PHY_BB_PHYONLY_CONTROL_EN_LOW_FREQ_SLEEP_SET(x) (((x) << 7) & 0x00000080)
+
+/* macros for BB_powertx_rate1 */
+#define PHY_BB_POWERTX_RATE1_ADDRESS 0x00009934
+#define PHY_BB_POWERTX_RATE1_OFFSET 0x00009934
+#define PHY_BB_POWERTX_RATE1_POWERTX_0_MSB 5
+#define PHY_BB_POWERTX_RATE1_POWERTX_0_LSB 0
+#define PHY_BB_POWERTX_RATE1_POWERTX_0_MASK 0x0000003f
+#define PHY_BB_POWERTX_RATE1_POWERTX_0_GET(x) (((x) & 0x0000003f) >> 0)
+#define PHY_BB_POWERTX_RATE1_POWERTX_0_SET(x) (((x) << 0) & 0x0000003f)
+#define PHY_BB_POWERTX_RATE1_POWERTX_1_MSB 13
+#define PHY_BB_POWERTX_RATE1_POWERTX_1_LSB 8
+#define PHY_BB_POWERTX_RATE1_POWERTX_1_MASK 0x00003f00
+#define PHY_BB_POWERTX_RATE1_POWERTX_1_GET(x) (((x) & 0x00003f00) >> 8)
+#define PHY_BB_POWERTX_RATE1_POWERTX_1_SET(x) (((x) << 8) & 0x00003f00)
+#define PHY_BB_POWERTX_RATE1_POWERTX_2_MSB 21
+#define PHY_BB_POWERTX_RATE1_POWERTX_2_LSB 16
+#define PHY_BB_POWERTX_RATE1_POWERTX_2_MASK 0x003f0000
+#define PHY_BB_POWERTX_RATE1_POWERTX_2_GET(x) (((x) & 0x003f0000) >> 16)
+#define PHY_BB_POWERTX_RATE1_POWERTX_2_SET(x) (((x) << 16) & 0x003f0000)
+#define PHY_BB_POWERTX_RATE1_POWERTX_3_MSB 29
+#define PHY_BB_POWERTX_RATE1_POWERTX_3_LSB 24
+#define PHY_BB_POWERTX_RATE1_POWERTX_3_MASK 0x3f000000
+#define PHY_BB_POWERTX_RATE1_POWERTX_3_GET(x) (((x) & 0x3f000000) >> 24)
+#define PHY_BB_POWERTX_RATE1_POWERTX_3_SET(x) (((x) << 24) & 0x3f000000)
+
+/* macros for BB_powertx_rate2 */
+#define PHY_BB_POWERTX_RATE2_ADDRESS 0x00009938
+#define PHY_BB_POWERTX_RATE2_OFFSET 0x00009938
+#define PHY_BB_POWERTX_RATE2_POWERTX_4_MSB 5
+#define PHY_BB_POWERTX_RATE2_POWERTX_4_LSB 0
+#define PHY_BB_POWERTX_RATE2_POWERTX_4_MASK 0x0000003f
+#define PHY_BB_POWERTX_RATE2_POWERTX_4_GET(x) (((x) & 0x0000003f) >> 0)
+#define PHY_BB_POWERTX_RATE2_POWERTX_4_SET(x) (((x) << 0) & 0x0000003f)
+#define PHY_BB_POWERTX_RATE2_POWERTX_5_MSB 13
+#define PHY_BB_POWERTX_RATE2_POWERTX_5_LSB 8
+#define PHY_BB_POWERTX_RATE2_POWERTX_5_MASK 0x00003f00
+#define PHY_BB_POWERTX_RATE2_POWERTX_5_GET(x) (((x) & 0x00003f00) >> 8)
+#define PHY_BB_POWERTX_RATE2_POWERTX_5_SET(x) (((x) << 8) & 0x00003f00)
+#define PHY_BB_POWERTX_RATE2_POWERTX_6_MSB 21
+#define PHY_BB_POWERTX_RATE2_POWERTX_6_LSB 16
+#define PHY_BB_POWERTX_RATE2_POWERTX_6_MASK 0x003f0000
+#define PHY_BB_POWERTX_RATE2_POWERTX_6_GET(x) (((x) & 0x003f0000) >> 16)
+#define PHY_BB_POWERTX_RATE2_POWERTX_6_SET(x) (((x) << 16) & 0x003f0000)
+#define PHY_BB_POWERTX_RATE2_POWERTX_7_MSB 29
+#define PHY_BB_POWERTX_RATE2_POWERTX_7_LSB 24
+#define PHY_BB_POWERTX_RATE2_POWERTX_7_MASK 0x3f000000
+#define PHY_BB_POWERTX_RATE2_POWERTX_7_GET(x) (((x) & 0x3f000000) >> 24)
+#define PHY_BB_POWERTX_RATE2_POWERTX_7_SET(x) (((x) << 24) & 0x3f000000)
+
+/* macros for BB_powertx_max */
+#define PHY_BB_POWERTX_MAX_ADDRESS 0x0000993c
+#define PHY_BB_POWERTX_MAX_OFFSET 0x0000993c
+#define PHY_BB_POWERTX_MAX_USE_PER_PACKET_POWERTX_MAX_MSB 6
+#define PHY_BB_POWERTX_MAX_USE_PER_PACKET_POWERTX_MAX_LSB 6
+#define PHY_BB_POWERTX_MAX_USE_PER_PACKET_POWERTX_MAX_MASK 0x00000040
+#define PHY_BB_POWERTX_MAX_USE_PER_PACKET_POWERTX_MAX_GET(x) (((x) & 0x00000040) >> 6)
+#define PHY_BB_POWERTX_MAX_USE_PER_PACKET_POWERTX_MAX_SET(x) (((x) << 6) & 0x00000040)
+
+/* macros for BB_extension_radar */
+#define PHY_BB_EXTENSION_RADAR_ADDRESS 0x00009940
+#define PHY_BB_EXTENSION_RADAR_OFFSET 0x00009940
+#define PHY_BB_EXTENSION_RADAR_BLOCKER40_MAX_RADAR_MSB 13
+#define PHY_BB_EXTENSION_RADAR_BLOCKER40_MAX_RADAR_LSB 8
+#define PHY_BB_EXTENSION_RADAR_BLOCKER40_MAX_RADAR_MASK 0x00003f00
+#define PHY_BB_EXTENSION_RADAR_BLOCKER40_MAX_RADAR_GET(x) (((x) & 0x00003f00) >> 8)
+#define PHY_BB_EXTENSION_RADAR_BLOCKER40_MAX_RADAR_SET(x) (((x) << 8) & 0x00003f00)
+#define PHY_BB_EXTENSION_RADAR_ENABLE_EXT_RADAR_MSB 14
+#define PHY_BB_EXTENSION_RADAR_ENABLE_EXT_RADAR_LSB 14
+#define PHY_BB_EXTENSION_RADAR_ENABLE_EXT_RADAR_MASK 0x00004000
+#define PHY_BB_EXTENSION_RADAR_ENABLE_EXT_RADAR_GET(x) (((x) & 0x00004000) >> 14)
+#define PHY_BB_EXTENSION_RADAR_ENABLE_EXT_RADAR_SET(x) (((x) << 14) & 0x00004000)
+#define PHY_BB_EXTENSION_RADAR_RADAR_DC_PWR_THRESH_MSB 22
+#define PHY_BB_EXTENSION_RADAR_RADAR_DC_PWR_THRESH_LSB 15
+#define PHY_BB_EXTENSION_RADAR_RADAR_DC_PWR_THRESH_MASK 0x007f8000
+#define PHY_BB_EXTENSION_RADAR_RADAR_DC_PWR_THRESH_GET(x) (((x) & 0x007f8000) >> 15)
+#define PHY_BB_EXTENSION_RADAR_RADAR_DC_PWR_THRESH_SET(x) (((x) << 15) & 0x007f8000)
+#define PHY_BB_EXTENSION_RADAR_RADAR_LB_DC_CAP_MSB 30
+#define PHY_BB_EXTENSION_RADAR_RADAR_LB_DC_CAP_LSB 23
+#define PHY_BB_EXTENSION_RADAR_RADAR_LB_DC_CAP_MASK 0x7f800000
+#define PHY_BB_EXTENSION_RADAR_RADAR_LB_DC_CAP_GET(x) (((x) & 0x7f800000) >> 23)
+#define PHY_BB_EXTENSION_RADAR_RADAR_LB_DC_CAP_SET(x) (((x) << 23) & 0x7f800000)
+#define PHY_BB_EXTENSION_RADAR_DISABLE_ADCSAT_HOLD_MSB 31
+#define PHY_BB_EXTENSION_RADAR_DISABLE_ADCSAT_HOLD_LSB 31
+#define PHY_BB_EXTENSION_RADAR_DISABLE_ADCSAT_HOLD_MASK 0x80000000
+#define PHY_BB_EXTENSION_RADAR_DISABLE_ADCSAT_HOLD_GET(x) (((x) & 0x80000000) >> 31)
+#define PHY_BB_EXTENSION_RADAR_DISABLE_ADCSAT_HOLD_SET(x) (((x) << 31) & 0x80000000)
+
+/* macros for BB_frame_control */
+#define PHY_BB_FRAME_CONTROL_ADDRESS 0x00009944
+#define PHY_BB_FRAME_CONTROL_OFFSET 0x00009944
+#define PHY_BB_FRAME_CONTROL_CF_OVERLAP_WINDOW_MSB 1
+#define PHY_BB_FRAME_CONTROL_CF_OVERLAP_WINDOW_LSB 0
+#define PHY_BB_FRAME_CONTROL_CF_OVERLAP_WINDOW_MASK 0x00000003
+#define PHY_BB_FRAME_CONTROL_CF_OVERLAP_WINDOW_GET(x) (((x) & 0x00000003) >> 0)
+#define PHY_BB_FRAME_CONTROL_CF_OVERLAP_WINDOW_SET(x) (((x) << 0) & 0x00000003)
+#define PHY_BB_FRAME_CONTROL_CF_SCALE_SHORT_MSB 2
+#define PHY_BB_FRAME_CONTROL_CF_SCALE_SHORT_LSB 2
+#define PHY_BB_FRAME_CONTROL_CF_SCALE_SHORT_MASK 0x00000004
+#define PHY_BB_FRAME_CONTROL_CF_SCALE_SHORT_GET(x) (((x) & 0x00000004) >> 2)
+#define PHY_BB_FRAME_CONTROL_CF_SCALE_SHORT_SET(x) (((x) << 2) & 0x00000004)
+#define PHY_BB_FRAME_CONTROL_CF_TX_CLIP_MSB 5
+#define PHY_BB_FRAME_CONTROL_CF_TX_CLIP_LSB 3
+#define PHY_BB_FRAME_CONTROL_CF_TX_CLIP_MASK 0x00000038
+#define PHY_BB_FRAME_CONTROL_CF_TX_CLIP_GET(x) (((x) & 0x00000038) >> 3)
+#define PHY_BB_FRAME_CONTROL_CF_TX_CLIP_SET(x) (((x) << 3) & 0x00000038)
+#define PHY_BB_FRAME_CONTROL_CF_TX_DOUBLESAMP_DAC_MSB 7
+#define PHY_BB_FRAME_CONTROL_CF_TX_DOUBLESAMP_DAC_LSB 6
+#define PHY_BB_FRAME_CONTROL_CF_TX_DOUBLESAMP_DAC_MASK 0x000000c0
+#define PHY_BB_FRAME_CONTROL_CF_TX_DOUBLESAMP_DAC_GET(x) (((x) & 0x000000c0) >> 6)
+#define PHY_BB_FRAME_CONTROL_CF_TX_DOUBLESAMP_DAC_SET(x) (((x) << 6) & 0x000000c0)
+#define PHY_BB_FRAME_CONTROL_TX_END_ADJUST_MSB 15
+#define PHY_BB_FRAME_CONTROL_TX_END_ADJUST_LSB 8
+#define PHY_BB_FRAME_CONTROL_TX_END_ADJUST_MASK 0x0000ff00
+#define PHY_BB_FRAME_CONTROL_TX_END_ADJUST_GET(x) (((x) & 0x0000ff00) >> 8)
+#define PHY_BB_FRAME_CONTROL_TX_END_ADJUST_SET(x) (((x) << 8) & 0x0000ff00)
+#define PHY_BB_FRAME_CONTROL_PREPEND_CHAN_INFO_MSB 16
+#define PHY_BB_FRAME_CONTROL_PREPEND_CHAN_INFO_LSB 16
+#define PHY_BB_FRAME_CONTROL_PREPEND_CHAN_INFO_MASK 0x00010000
+#define PHY_BB_FRAME_CONTROL_PREPEND_CHAN_INFO_GET(x) (((x) & 0x00010000) >> 16)
+#define PHY_BB_FRAME_CONTROL_PREPEND_CHAN_INFO_SET(x) (((x) << 16) & 0x00010000)
+#define PHY_BB_FRAME_CONTROL_SHORT_HIGH_PAR_NORM_MSB 17
+#define PHY_BB_FRAME_CONTROL_SHORT_HIGH_PAR_NORM_LSB 17
+#define PHY_BB_FRAME_CONTROL_SHORT_HIGH_PAR_NORM_MASK 0x00020000
+#define PHY_BB_FRAME_CONTROL_SHORT_HIGH_PAR_NORM_GET(x) (((x) & 0x00020000) >> 17)
+#define PHY_BB_FRAME_CONTROL_SHORT_HIGH_PAR_NORM_SET(x) (((x) << 17) & 0x00020000)
+#define PHY_BB_FRAME_CONTROL_EN_ERR_GREEN_FIELD_MSB 18
+#define PHY_BB_FRAME_CONTROL_EN_ERR_GREEN_FIELD_LSB 18
+#define PHY_BB_FRAME_CONTROL_EN_ERR_GREEN_FIELD_MASK 0x00040000
+#define PHY_BB_FRAME_CONTROL_EN_ERR_GREEN_FIELD_GET(x) (((x) & 0x00040000) >> 18)
+#define PHY_BB_FRAME_CONTROL_EN_ERR_GREEN_FIELD_SET(x) (((x) << 18) & 0x00040000)
+#define PHY_BB_FRAME_CONTROL_EN_ERR_XR_POWER_RATIO_MSB 19
+#define PHY_BB_FRAME_CONTROL_EN_ERR_XR_POWER_RATIO_LSB 19
+#define PHY_BB_FRAME_CONTROL_EN_ERR_XR_POWER_RATIO_MASK 0x00080000
+#define PHY_BB_FRAME_CONTROL_EN_ERR_XR_POWER_RATIO_GET(x) (((x) & 0x00080000) >> 19)
+#define PHY_BB_FRAME_CONTROL_EN_ERR_XR_POWER_RATIO_SET(x) (((x) << 19) & 0x00080000)
+#define PHY_BB_FRAME_CONTROL_EN_ERR_OFDM_XCORR_MSB 20
+#define PHY_BB_FRAME_CONTROL_EN_ERR_OFDM_XCORR_LSB 20
+#define PHY_BB_FRAME_CONTROL_EN_ERR_OFDM_XCORR_MASK 0x00100000
+#define PHY_BB_FRAME_CONTROL_EN_ERR_OFDM_XCORR_GET(x) (((x) & 0x00100000) >> 20)
+#define PHY_BB_FRAME_CONTROL_EN_ERR_OFDM_XCORR_SET(x) (((x) << 20) & 0x00100000)
+#define PHY_BB_FRAME_CONTROL_EN_ERR_LONG_SC_THR_MSB 21
+#define PHY_BB_FRAME_CONTROL_EN_ERR_LONG_SC_THR_LSB 21
+#define PHY_BB_FRAME_CONTROL_EN_ERR_LONG_SC_THR_MASK 0x00200000
+#define PHY_BB_FRAME_CONTROL_EN_ERR_LONG_SC_THR_GET(x) (((x) & 0x00200000) >> 21)
+#define PHY_BB_FRAME_CONTROL_EN_ERR_LONG_SC_THR_SET(x) (((x) << 21) & 0x00200000)
+#define PHY_BB_FRAME_CONTROL_EN_ERR_TIM_LONG1_MSB 22
+#define PHY_BB_FRAME_CONTROL_EN_ERR_TIM_LONG1_LSB 22
+#define PHY_BB_FRAME_CONTROL_EN_ERR_TIM_LONG1_MASK 0x00400000
+#define PHY_BB_FRAME_CONTROL_EN_ERR_TIM_LONG1_GET(x) (((x) & 0x00400000) >> 22)
+#define PHY_BB_FRAME_CONTROL_EN_ERR_TIM_LONG1_SET(x) (((x) << 22) & 0x00400000)
+#define PHY_BB_FRAME_CONTROL_EN_ERR_TIM_EARLY_TRIG_MSB 23
+#define PHY_BB_FRAME_CONTROL_EN_ERR_TIM_EARLY_TRIG_LSB 23
+#define PHY_BB_FRAME_CONTROL_EN_ERR_TIM_EARLY_TRIG_MASK 0x00800000
+#define PHY_BB_FRAME_CONTROL_EN_ERR_TIM_EARLY_TRIG_GET(x) (((x) & 0x00800000) >> 23)
+#define PHY_BB_FRAME_CONTROL_EN_ERR_TIM_EARLY_TRIG_SET(x) (((x) << 23) & 0x00800000)
+#define PHY_BB_FRAME_CONTROL_EN_ERR_TIM_TIMEOUT_MSB 24
+#define PHY_BB_FRAME_CONTROL_EN_ERR_TIM_TIMEOUT_LSB 24
+#define PHY_BB_FRAME_CONTROL_EN_ERR_TIM_TIMEOUT_MASK 0x01000000
+#define PHY_BB_FRAME_CONTROL_EN_ERR_TIM_TIMEOUT_GET(x) (((x) & 0x01000000) >> 24)
+#define PHY_BB_FRAME_CONTROL_EN_ERR_TIM_TIMEOUT_SET(x) (((x) << 24) & 0x01000000)
+#define PHY_BB_FRAME_CONTROL_EN_ERR_SIGNAL_PARITY_MSB 25
+#define PHY_BB_FRAME_CONTROL_EN_ERR_SIGNAL_PARITY_LSB 25
+#define PHY_BB_FRAME_CONTROL_EN_ERR_SIGNAL_PARITY_MASK 0x02000000
+#define PHY_BB_FRAME_CONTROL_EN_ERR_SIGNAL_PARITY_GET(x) (((x) & 0x02000000) >> 25)
+#define PHY_BB_FRAME_CONTROL_EN_ERR_SIGNAL_PARITY_SET(x) (((x) << 25) & 0x02000000)
+#define PHY_BB_FRAME_CONTROL_EN_ERR_RATE_ILLEGAL_MSB 26
+#define PHY_BB_FRAME_CONTROL_EN_ERR_RATE_ILLEGAL_LSB 26
+#define PHY_BB_FRAME_CONTROL_EN_ERR_RATE_ILLEGAL_MASK 0x04000000
+#define PHY_BB_FRAME_CONTROL_EN_ERR_RATE_ILLEGAL_GET(x) (((x) & 0x04000000) >> 26)
+#define PHY_BB_FRAME_CONTROL_EN_ERR_RATE_ILLEGAL_SET(x) (((x) << 26) & 0x04000000)
+#define PHY_BB_FRAME_CONTROL_EN_ERR_LENGTH_ILLEGAL_MSB 27
+#define PHY_BB_FRAME_CONTROL_EN_ERR_LENGTH_ILLEGAL_LSB 27
+#define PHY_BB_FRAME_CONTROL_EN_ERR_LENGTH_ILLEGAL_MASK 0x08000000
+#define PHY_BB_FRAME_CONTROL_EN_ERR_LENGTH_ILLEGAL_GET(x) (((x) & 0x08000000) >> 27)
+#define PHY_BB_FRAME_CONTROL_EN_ERR_LENGTH_ILLEGAL_SET(x) (((x) << 27) & 0x08000000)
+#define PHY_BB_FRAME_CONTROL_EN_ERR_HT_SERVICE_MSB 28
+#define PHY_BB_FRAME_CONTROL_EN_ERR_HT_SERVICE_LSB 28
+#define PHY_BB_FRAME_CONTROL_EN_ERR_HT_SERVICE_MASK 0x10000000
+#define PHY_BB_FRAME_CONTROL_EN_ERR_HT_SERVICE_GET(x) (((x) & 0x10000000) >> 28)
+#define PHY_BB_FRAME_CONTROL_EN_ERR_HT_SERVICE_SET(x) (((x) << 28) & 0x10000000)
+#define PHY_BB_FRAME_CONTROL_EN_ERR_SERVICE_MSB 29
+#define PHY_BB_FRAME_CONTROL_EN_ERR_SERVICE_LSB 29
+#define PHY_BB_FRAME_CONTROL_EN_ERR_SERVICE_MASK 0x20000000
+#define PHY_BB_FRAME_CONTROL_EN_ERR_SERVICE_GET(x) (((x) & 0x20000000) >> 29)
+#define PHY_BB_FRAME_CONTROL_EN_ERR_SERVICE_SET(x) (((x) << 29) & 0x20000000)
+#define PHY_BB_FRAME_CONTROL_EN_ERR_TX_UNDERRUN_MSB 30
+#define PHY_BB_FRAME_CONTROL_EN_ERR_TX_UNDERRUN_LSB 30
+#define PHY_BB_FRAME_CONTROL_EN_ERR_TX_UNDERRUN_MASK 0x40000000
+#define PHY_BB_FRAME_CONTROL_EN_ERR_TX_UNDERRUN_GET(x) (((x) & 0x40000000) >> 30)
+#define PHY_BB_FRAME_CONTROL_EN_ERR_TX_UNDERRUN_SET(x) (((x) << 30) & 0x40000000)
+#define PHY_BB_FRAME_CONTROL_EN_ERR_RX_ABORT_MSB 31
+#define PHY_BB_FRAME_CONTROL_EN_ERR_RX_ABORT_LSB 31
+#define PHY_BB_FRAME_CONTROL_EN_ERR_RX_ABORT_MASK 0x80000000
+#define PHY_BB_FRAME_CONTROL_EN_ERR_RX_ABORT_GET(x) (((x) & 0x80000000) >> 31)
+#define PHY_BB_FRAME_CONTROL_EN_ERR_RX_ABORT_SET(x) (((x) << 31) & 0x80000000)
+
+/* macros for BB_timing_control_6 */
+#define PHY_BB_TIMING_CONTROL_6_ADDRESS 0x00009948
+#define PHY_BB_TIMING_CONTROL_6_OFFSET 0x00009948
+#define PHY_BB_TIMING_CONTROL_6_HI_RSSI_THRESH_MSB 7
+#define PHY_BB_TIMING_CONTROL_6_HI_RSSI_THRESH_LSB 0
+#define PHY_BB_TIMING_CONTROL_6_HI_RSSI_THRESH_MASK 0x000000ff
+#define PHY_BB_TIMING_CONTROL_6_HI_RSSI_THRESH_GET(x) (((x) & 0x000000ff) >> 0)
+#define PHY_BB_TIMING_CONTROL_6_HI_RSSI_THRESH_SET(x) (((x) << 0) & 0x000000ff)
+#define PHY_BB_TIMING_CONTROL_6_EARLY_TRIGGER_THR_HI_RSSI_MSB 14
+#define PHY_BB_TIMING_CONTROL_6_EARLY_TRIGGER_THR_HI_RSSI_LSB 8
+#define PHY_BB_TIMING_CONTROL_6_EARLY_TRIGGER_THR_HI_RSSI_MASK 0x00007f00
+#define PHY_BB_TIMING_CONTROL_6_EARLY_TRIGGER_THR_HI_RSSI_GET(x) (((x) & 0x00007f00) >> 8)
+#define PHY_BB_TIMING_CONTROL_6_EARLY_TRIGGER_THR_HI_RSSI_SET(x) (((x) << 8) & 0x00007f00)
+#define PHY_BB_TIMING_CONTROL_6_OFDM_XCORR_THRESH_MSB 20
+#define PHY_BB_TIMING_CONTROL_6_OFDM_XCORR_THRESH_LSB 15
+#define PHY_BB_TIMING_CONTROL_6_OFDM_XCORR_THRESH_MASK 0x001f8000
+#define PHY_BB_TIMING_CONTROL_6_OFDM_XCORR_THRESH_GET(x) (((x) & 0x001f8000) >> 15)
+#define PHY_BB_TIMING_CONTROL_6_OFDM_XCORR_THRESH_SET(x) (((x) << 15) & 0x001f8000)
+#define PHY_BB_TIMING_CONTROL_6_OFDM_XCORR_THRESH_HI_RSSI_MSB 27
+#define PHY_BB_TIMING_CONTROL_6_OFDM_XCORR_THRESH_HI_RSSI_LSB 21
+#define PHY_BB_TIMING_CONTROL_6_OFDM_XCORR_THRESH_HI_RSSI_MASK 0x0fe00000
+#define PHY_BB_TIMING_CONTROL_6_OFDM_XCORR_THRESH_HI_RSSI_GET(x) (((x) & 0x0fe00000) >> 21)
+#define PHY_BB_TIMING_CONTROL_6_OFDM_XCORR_THRESH_HI_RSSI_SET(x) (((x) << 21) & 0x0fe00000)
+#define PHY_BB_TIMING_CONTROL_6_LONG_MEDIUM_RATIO_THR_MSB 31
+#define PHY_BB_TIMING_CONTROL_6_LONG_MEDIUM_RATIO_THR_LSB 28
+#define PHY_BB_TIMING_CONTROL_6_LONG_MEDIUM_RATIO_THR_MASK 0xf0000000
+#define PHY_BB_TIMING_CONTROL_6_LONG_MEDIUM_RATIO_THR_GET(x) (((x) & 0xf0000000) >> 28)
+#define PHY_BB_TIMING_CONTROL_6_LONG_MEDIUM_RATIO_THR_SET(x) (((x) << 28) & 0xf0000000)
+
+/* macros for BB_spur_mask_controls */
+#define PHY_BB_SPUR_MASK_CONTROLS_ADDRESS 0x0000994c
+#define PHY_BB_SPUR_MASK_CONTROLS_OFFSET 0x0000994c
+#define PHY_BB_SPUR_MASK_CONTROLS_SPUR_RSSI_THRESH_MSB 7
+#define PHY_BB_SPUR_MASK_CONTROLS_SPUR_RSSI_THRESH_LSB 0
+#define PHY_BB_SPUR_MASK_CONTROLS_SPUR_RSSI_THRESH_MASK 0x000000ff
+#define PHY_BB_SPUR_MASK_CONTROLS_SPUR_RSSI_THRESH_GET(x) (((x) & 0x000000ff) >> 0)
+#define PHY_BB_SPUR_MASK_CONTROLS_SPUR_RSSI_THRESH_SET(x) (((x) << 0) & 0x000000ff)
+#define PHY_BB_SPUR_MASK_CONTROLS_EN_VIT_SPUR_RSSI_MSB 8
+#define PHY_BB_SPUR_MASK_CONTROLS_EN_VIT_SPUR_RSSI_LSB 8
+#define PHY_BB_SPUR_MASK_CONTROLS_EN_VIT_SPUR_RSSI_MASK 0x00000100
+#define PHY_BB_SPUR_MASK_CONTROLS_EN_VIT_SPUR_RSSI_GET(x) (((x) & 0x00000100) >> 8)
+#define PHY_BB_SPUR_MASK_CONTROLS_EN_VIT_SPUR_RSSI_SET(x) (((x) << 8) & 0x00000100)
+#define PHY_BB_SPUR_MASK_CONTROLS_ENABLE_MASK_PPM_MSB 17
+#define PHY_BB_SPUR_MASK_CONTROLS_ENABLE_MASK_PPM_LSB 17
+#define PHY_BB_SPUR_MASK_CONTROLS_ENABLE_MASK_PPM_MASK 0x00020000
+#define PHY_BB_SPUR_MASK_CONTROLS_ENABLE_MASK_PPM_GET(x) (((x) & 0x00020000) >> 17)
+#define PHY_BB_SPUR_MASK_CONTROLS_ENABLE_MASK_PPM_SET(x) (((x) << 17) & 0x00020000)
+#define PHY_BB_SPUR_MASK_CONTROLS_MASK_RATE_CNTL_MSB 25
+#define PHY_BB_SPUR_MASK_CONTROLS_MASK_RATE_CNTL_LSB 18
+#define PHY_BB_SPUR_MASK_CONTROLS_MASK_RATE_CNTL_MASK 0x03fc0000
+#define PHY_BB_SPUR_MASK_CONTROLS_MASK_RATE_CNTL_GET(x) (((x) & 0x03fc0000) >> 18)
+#define PHY_BB_SPUR_MASK_CONTROLS_MASK_RATE_CNTL_SET(x) (((x) << 18) & 0x03fc0000)
+
+/* macros for BB_rx_iq_corr_b0 */
+#define PHY_BB_RX_IQ_CORR_B0_ADDRESS 0x00009950
+#define PHY_BB_RX_IQ_CORR_B0_OFFSET 0x00009950
+#define PHY_BB_RX_IQ_CORR_B0_RX_IQCORR_Q_Q_COFF_0_MSB 6
+#define PHY_BB_RX_IQ_CORR_B0_RX_IQCORR_Q_Q_COFF_0_LSB 0
+#define PHY_BB_RX_IQ_CORR_B0_RX_IQCORR_Q_Q_COFF_0_MASK 0x0000007f
+#define PHY_BB_RX_IQ_CORR_B0_RX_IQCORR_Q_Q_COFF_0_GET(x) (((x) & 0x0000007f) >> 0)
+#define PHY_BB_RX_IQ_CORR_B0_RX_IQCORR_Q_Q_COFF_0_SET(x) (((x) << 0) & 0x0000007f)
+#define PHY_BB_RX_IQ_CORR_B0_RX_IQCORR_Q_I_COFF_0_MSB 13
+#define PHY_BB_RX_IQ_CORR_B0_RX_IQCORR_Q_I_COFF_0_LSB 7
+#define PHY_BB_RX_IQ_CORR_B0_RX_IQCORR_Q_I_COFF_0_MASK 0x00003f80
+#define PHY_BB_RX_IQ_CORR_B0_RX_IQCORR_Q_I_COFF_0_GET(x) (((x) & 0x00003f80) >> 7)
+#define PHY_BB_RX_IQ_CORR_B0_RX_IQCORR_Q_I_COFF_0_SET(x) (((x) << 7) & 0x00003f80)
+#define PHY_BB_RX_IQ_CORR_B0_RX_IQCORR_ENABLE_MSB 14
+#define PHY_BB_RX_IQ_CORR_B0_RX_IQCORR_ENABLE_LSB 14
+#define PHY_BB_RX_IQ_CORR_B0_RX_IQCORR_ENABLE_MASK 0x00004000
+#define PHY_BB_RX_IQ_CORR_B0_RX_IQCORR_ENABLE_GET(x) (((x) & 0x00004000) >> 14)
+#define PHY_BB_RX_IQ_CORR_B0_RX_IQCORR_ENABLE_SET(x) (((x) << 14) & 0x00004000)
+#define PHY_BB_RX_IQ_CORR_B0_LOOPBACK_IQCORR_Q_Q_COFF_0_MSB 21
+#define PHY_BB_RX_IQ_CORR_B0_LOOPBACK_IQCORR_Q_Q_COFF_0_LSB 15
+#define PHY_BB_RX_IQ_CORR_B0_LOOPBACK_IQCORR_Q_Q_COFF_0_MASK 0x003f8000
+#define PHY_BB_RX_IQ_CORR_B0_LOOPBACK_IQCORR_Q_Q_COFF_0_GET(x) (((x) & 0x003f8000) >> 15)
+#define PHY_BB_RX_IQ_CORR_B0_LOOPBACK_IQCORR_Q_Q_COFF_0_SET(x) (((x) << 15) & 0x003f8000)
+#define PHY_BB_RX_IQ_CORR_B0_LOOPBACK_IQCORR_Q_I_COFF_0_MSB 28
+#define PHY_BB_RX_IQ_CORR_B0_LOOPBACK_IQCORR_Q_I_COFF_0_LSB 22
+#define PHY_BB_RX_IQ_CORR_B0_LOOPBACK_IQCORR_Q_I_COFF_0_MASK 0x1fc00000
+#define PHY_BB_RX_IQ_CORR_B0_LOOPBACK_IQCORR_Q_I_COFF_0_GET(x) (((x) & 0x1fc00000) >> 22)
+#define PHY_BB_RX_IQ_CORR_B0_LOOPBACK_IQCORR_Q_I_COFF_0_SET(x) (((x) << 22) & 0x1fc00000)
+#define PHY_BB_RX_IQ_CORR_B0_LOOPBACK_IQCORR_ENABLE_MSB 29
+#define PHY_BB_RX_IQ_CORR_B0_LOOPBACK_IQCORR_ENABLE_LSB 29
+#define PHY_BB_RX_IQ_CORR_B0_LOOPBACK_IQCORR_ENABLE_MASK 0x20000000
+#define PHY_BB_RX_IQ_CORR_B0_LOOPBACK_IQCORR_ENABLE_GET(x) (((x) & 0x20000000) >> 29)
+#define PHY_BB_RX_IQ_CORR_B0_LOOPBACK_IQCORR_ENABLE_SET(x) (((x) << 29) & 0x20000000)
+
+/* macros for BB_radar_detection */
+#define PHY_BB_RADAR_DETECTION_ADDRESS 0x00009954
+#define PHY_BB_RADAR_DETECTION_OFFSET 0x00009954
+#define PHY_BB_RADAR_DETECTION_PULSE_DETECT_ENABLE_MSB 0
+#define PHY_BB_RADAR_DETECTION_PULSE_DETECT_ENABLE_LSB 0
+#define PHY_BB_RADAR_DETECTION_PULSE_DETECT_ENABLE_MASK 0x00000001
+#define PHY_BB_RADAR_DETECTION_PULSE_DETECT_ENABLE_GET(x) (((x) & 0x00000001) >> 0)
+#define PHY_BB_RADAR_DETECTION_PULSE_DETECT_ENABLE_SET(x) (((x) << 0) & 0x00000001)
+#define PHY_BB_RADAR_DETECTION_PULSE_IN_BAND_THRESH_MSB 5
+#define PHY_BB_RADAR_DETECTION_PULSE_IN_BAND_THRESH_LSB 1
+#define PHY_BB_RADAR_DETECTION_PULSE_IN_BAND_THRESH_MASK 0x0000003e
+#define PHY_BB_RADAR_DETECTION_PULSE_IN_BAND_THRESH_GET(x) (((x) & 0x0000003e) >> 1)
+#define PHY_BB_RADAR_DETECTION_PULSE_IN_BAND_THRESH_SET(x) (((x) << 1) & 0x0000003e)
+#define PHY_BB_RADAR_DETECTION_PULSE_RSSI_THRESH_MSB 11
+#define PHY_BB_RADAR_DETECTION_PULSE_RSSI_THRESH_LSB 6
+#define PHY_BB_RADAR_DETECTION_PULSE_RSSI_THRESH_MASK 0x00000fc0
+#define PHY_BB_RADAR_DETECTION_PULSE_RSSI_THRESH_GET(x) (((x) & 0x00000fc0) >> 6)
+#define PHY_BB_RADAR_DETECTION_PULSE_RSSI_THRESH_SET(x) (((x) << 6) & 0x00000fc0)
+#define PHY_BB_RADAR_DETECTION_PULSE_HEIGHT_THRESH_MSB 17
+#define PHY_BB_RADAR_DETECTION_PULSE_HEIGHT_THRESH_LSB 12
+#define PHY_BB_RADAR_DETECTION_PULSE_HEIGHT_THRESH_MASK 0x0003f000
+#define PHY_BB_RADAR_DETECTION_PULSE_HEIGHT_THRESH_GET(x) (((x) & 0x0003f000) >> 12)
+#define PHY_BB_RADAR_DETECTION_PULSE_HEIGHT_THRESH_SET(x) (((x) << 12) & 0x0003f000)
+#define PHY_BB_RADAR_DETECTION_RADAR_RSSI_THRESH_MSB 23
+#define PHY_BB_RADAR_DETECTION_RADAR_RSSI_THRESH_LSB 18
+#define PHY_BB_RADAR_DETECTION_RADAR_RSSI_THRESH_MASK 0x00fc0000
+#define PHY_BB_RADAR_DETECTION_RADAR_RSSI_THRESH_GET(x) (((x) & 0x00fc0000) >> 18)
+#define PHY_BB_RADAR_DETECTION_RADAR_RSSI_THRESH_SET(x) (((x) << 18) & 0x00fc0000)
+#define PHY_BB_RADAR_DETECTION_RADAR_FIRPWR_THRESH_MSB 30
+#define PHY_BB_RADAR_DETECTION_RADAR_FIRPWR_THRESH_LSB 24
+#define PHY_BB_RADAR_DETECTION_RADAR_FIRPWR_THRESH_MASK 0x7f000000
+#define PHY_BB_RADAR_DETECTION_RADAR_FIRPWR_THRESH_GET(x) (((x) & 0x7f000000) >> 24)
+#define PHY_BB_RADAR_DETECTION_RADAR_FIRPWR_THRESH_SET(x) (((x) << 24) & 0x7f000000)
+#define PHY_BB_RADAR_DETECTION_ENABLE_RADAR_FFT_MSB 31
+#define PHY_BB_RADAR_DETECTION_ENABLE_RADAR_FFT_LSB 31
+#define PHY_BB_RADAR_DETECTION_ENABLE_RADAR_FFT_MASK 0x80000000
+#define PHY_BB_RADAR_DETECTION_ENABLE_RADAR_FFT_GET(x) (((x) & 0x80000000) >> 31)
+#define PHY_BB_RADAR_DETECTION_ENABLE_RADAR_FFT_SET(x) (((x) << 31) & 0x80000000)
+
+/* macros for BB_radar_detection_2 */
+#define PHY_BB_RADAR_DETECTION_2_ADDRESS 0x00009958
+#define PHY_BB_RADAR_DETECTION_2_OFFSET 0x00009958
+#define PHY_BB_RADAR_DETECTION_2_RADAR_LENGTH_MAX_MSB 7
+#define PHY_BB_RADAR_DETECTION_2_RADAR_LENGTH_MAX_LSB 0
+#define PHY_BB_RADAR_DETECTION_2_RADAR_LENGTH_MAX_MASK 0x000000ff
+#define PHY_BB_RADAR_DETECTION_2_RADAR_LENGTH_MAX_GET(x) (((x) & 0x000000ff) >> 0)
+#define PHY_BB_RADAR_DETECTION_2_RADAR_LENGTH_MAX_SET(x) (((x) << 0) & 0x000000ff)
+#define PHY_BB_RADAR_DETECTION_2_PULSE_RELSTEP_THRESH_MSB 12
+#define PHY_BB_RADAR_DETECTION_2_PULSE_RELSTEP_THRESH_LSB 8
+#define PHY_BB_RADAR_DETECTION_2_PULSE_RELSTEP_THRESH_MASK 0x00001f00
+#define PHY_BB_RADAR_DETECTION_2_PULSE_RELSTEP_THRESH_GET(x) (((x) & 0x00001f00) >> 8)
+#define PHY_BB_RADAR_DETECTION_2_PULSE_RELSTEP_THRESH_SET(x) (((x) << 8) & 0x00001f00)
+#define PHY_BB_RADAR_DETECTION_2_ENABLE_PULSE_RELSTEP_CHECK_MSB 13
+#define PHY_BB_RADAR_DETECTION_2_ENABLE_PULSE_RELSTEP_CHECK_LSB 13
+#define PHY_BB_RADAR_DETECTION_2_ENABLE_PULSE_RELSTEP_CHECK_MASK 0x00002000
+#define PHY_BB_RADAR_DETECTION_2_ENABLE_PULSE_RELSTEP_CHECK_GET(x) (((x) & 0x00002000) >> 13)
+#define PHY_BB_RADAR_DETECTION_2_ENABLE_PULSE_RELSTEP_CHECK_SET(x) (((x) << 13) & 0x00002000)
+#define PHY_BB_RADAR_DETECTION_2_ENABLE_MAX_RADAR_RSSI_MSB 14
+#define PHY_BB_RADAR_DETECTION_2_ENABLE_MAX_RADAR_RSSI_LSB 14
+#define PHY_BB_RADAR_DETECTION_2_ENABLE_MAX_RADAR_RSSI_MASK 0x00004000
+#define PHY_BB_RADAR_DETECTION_2_ENABLE_MAX_RADAR_RSSI_GET(x) (((x) & 0x00004000) >> 14)
+#define PHY_BB_RADAR_DETECTION_2_ENABLE_MAX_RADAR_RSSI_SET(x) (((x) << 14) & 0x00004000)
+#define PHY_BB_RADAR_DETECTION_2_ENABLE_BLOCK_RADAR_CHECK_MSB 15
+#define PHY_BB_RADAR_DETECTION_2_ENABLE_BLOCK_RADAR_CHECK_LSB 15
+#define PHY_BB_RADAR_DETECTION_2_ENABLE_BLOCK_RADAR_CHECK_MASK 0x00008000
+#define PHY_BB_RADAR_DETECTION_2_ENABLE_BLOCK_RADAR_CHECK_GET(x) (((x) & 0x00008000) >> 15)
+#define PHY_BB_RADAR_DETECTION_2_ENABLE_BLOCK_RADAR_CHECK_SET(x) (((x) << 15) & 0x00008000)
+#define PHY_BB_RADAR_DETECTION_2_RADAR_RELPWR_THRESH_MSB 21
+#define PHY_BB_RADAR_DETECTION_2_RADAR_RELPWR_THRESH_LSB 16
+#define PHY_BB_RADAR_DETECTION_2_RADAR_RELPWR_THRESH_MASK 0x003f0000
+#define PHY_BB_RADAR_DETECTION_2_RADAR_RELPWR_THRESH_GET(x) (((x) & 0x003f0000) >> 16)
+#define PHY_BB_RADAR_DETECTION_2_RADAR_RELPWR_THRESH_SET(x) (((x) << 16) & 0x003f0000)
+#define PHY_BB_RADAR_DETECTION_2_RADAR_USE_FIRPWR_128_MSB 22
+#define PHY_BB_RADAR_DETECTION_2_RADAR_USE_FIRPWR_128_LSB 22
+#define PHY_BB_RADAR_DETECTION_2_RADAR_USE_FIRPWR_128_MASK 0x00400000
+#define PHY_BB_RADAR_DETECTION_2_RADAR_USE_FIRPWR_128_GET(x) (((x) & 0x00400000) >> 22)
+#define PHY_BB_RADAR_DETECTION_2_RADAR_USE_FIRPWR_128_SET(x) (((x) << 22) & 0x00400000)
+#define PHY_BB_RADAR_DETECTION_2_ENABLE_RADAR_RELPWR_CHECK_MSB 23
+#define PHY_BB_RADAR_DETECTION_2_ENABLE_RADAR_RELPWR_CHECK_LSB 23
+#define PHY_BB_RADAR_DETECTION_2_ENABLE_RADAR_RELPWR_CHECK_MASK 0x00800000
+#define PHY_BB_RADAR_DETECTION_2_ENABLE_RADAR_RELPWR_CHECK_GET(x) (((x) & 0x00800000) >> 23)
+#define PHY_BB_RADAR_DETECTION_2_ENABLE_RADAR_RELPWR_CHECK_SET(x) (((x) << 23) & 0x00800000)
+#define PHY_BB_RADAR_DETECTION_2_CF_RADAR_BIN_THRESH_SEL_MSB 26
+#define PHY_BB_RADAR_DETECTION_2_CF_RADAR_BIN_THRESH_SEL_LSB 24
+#define PHY_BB_RADAR_DETECTION_2_CF_RADAR_BIN_THRESH_SEL_MASK 0x07000000
+#define PHY_BB_RADAR_DETECTION_2_CF_RADAR_BIN_THRESH_SEL_GET(x) (((x) & 0x07000000) >> 24)
+#define PHY_BB_RADAR_DETECTION_2_CF_RADAR_BIN_THRESH_SEL_SET(x) (((x) << 24) & 0x07000000)
+#define PHY_BB_RADAR_DETECTION_2_ENABLE_PULSE_GC_COUNT_CHECK_MSB 27
+#define PHY_BB_RADAR_DETECTION_2_ENABLE_PULSE_GC_COUNT_CHECK_LSB 27
+#define PHY_BB_RADAR_DETECTION_2_ENABLE_PULSE_GC_COUNT_CHECK_MASK 0x08000000
+#define PHY_BB_RADAR_DETECTION_2_ENABLE_PULSE_GC_COUNT_CHECK_GET(x) (((x) & 0x08000000) >> 27)
+#define PHY_BB_RADAR_DETECTION_2_ENABLE_PULSE_GC_COUNT_CHECK_SET(x) (((x) << 27) & 0x08000000)
+
+/* macros for BB_tx_phase_ramp_b0 */
+#define PHY_BB_TX_PHASE_RAMP_B0_ADDRESS 0x0000995c
+#define PHY_BB_TX_PHASE_RAMP_B0_OFFSET 0x0000995c
+#define PHY_BB_TX_PHASE_RAMP_B0_CF_PHASE_RAMP_ENABLE_0_MSB 0
+#define PHY_BB_TX_PHASE_RAMP_B0_CF_PHASE_RAMP_ENABLE_0_LSB 0
+#define PHY_BB_TX_PHASE_RAMP_B0_CF_PHASE_RAMP_ENABLE_0_MASK 0x00000001
+#define PHY_BB_TX_PHASE_RAMP_B0_CF_PHASE_RAMP_ENABLE_0_GET(x) (((x) & 0x00000001) >> 0)
+#define PHY_BB_TX_PHASE_RAMP_B0_CF_PHASE_RAMP_ENABLE_0_SET(x) (((x) << 0) & 0x00000001)
+#define PHY_BB_TX_PHASE_RAMP_B0_CF_PHASE_RAMP_BIAS_0_MSB 6
+#define PHY_BB_TX_PHASE_RAMP_B0_CF_PHASE_RAMP_BIAS_0_LSB 1
+#define PHY_BB_TX_PHASE_RAMP_B0_CF_PHASE_RAMP_BIAS_0_MASK 0x0000007e
+#define PHY_BB_TX_PHASE_RAMP_B0_CF_PHASE_RAMP_BIAS_0_GET(x) (((x) & 0x0000007e) >> 1)
+#define PHY_BB_TX_PHASE_RAMP_B0_CF_PHASE_RAMP_BIAS_0_SET(x) (((x) << 1) & 0x0000007e)
+#define PHY_BB_TX_PHASE_RAMP_B0_CF_PHASE_RAMP_INIT_0_MSB 16
+#define PHY_BB_TX_PHASE_RAMP_B0_CF_PHASE_RAMP_INIT_0_LSB 7
+#define PHY_BB_TX_PHASE_RAMP_B0_CF_PHASE_RAMP_INIT_0_MASK 0x0001ff80
+#define PHY_BB_TX_PHASE_RAMP_B0_CF_PHASE_RAMP_INIT_0_GET(x) (((x) & 0x0001ff80) >> 7)
+#define PHY_BB_TX_PHASE_RAMP_B0_CF_PHASE_RAMP_INIT_0_SET(x) (((x) << 7) & 0x0001ff80)
+#define PHY_BB_TX_PHASE_RAMP_B0_CF_PHASE_RAMP_ALPHA_0_MSB 24
+#define PHY_BB_TX_PHASE_RAMP_B0_CF_PHASE_RAMP_ALPHA_0_LSB 17
+#define PHY_BB_TX_PHASE_RAMP_B0_CF_PHASE_RAMP_ALPHA_0_MASK 0x01fe0000
+#define PHY_BB_TX_PHASE_RAMP_B0_CF_PHASE_RAMP_ALPHA_0_GET(x) (((x) & 0x01fe0000) >> 17)
+#define PHY_BB_TX_PHASE_RAMP_B0_CF_PHASE_RAMP_ALPHA_0_SET(x) (((x) << 17) & 0x01fe0000)
+
+/* macros for BB_switch_table_chn_b0 */
+#define PHY_BB_SWITCH_TABLE_CHN_B0_ADDRESS 0x00009960
+#define PHY_BB_SWITCH_TABLE_CHN_B0_OFFSET 0x00009960
+#define PHY_BB_SWITCH_TABLE_CHN_B0_SWITCH_TABLE_IDLE_0_MSB 1
+#define PHY_BB_SWITCH_TABLE_CHN_B0_SWITCH_TABLE_IDLE_0_LSB 0
+#define PHY_BB_SWITCH_TABLE_CHN_B0_SWITCH_TABLE_IDLE_0_MASK 0x00000003
+#define PHY_BB_SWITCH_TABLE_CHN_B0_SWITCH_TABLE_IDLE_0_GET(x) (((x) & 0x00000003) >> 0)
+#define PHY_BB_SWITCH_TABLE_CHN_B0_SWITCH_TABLE_IDLE_0_SET(x) (((x) << 0) & 0x00000003)
+#define PHY_BB_SWITCH_TABLE_CHN_B0_SWITCH_TABLE_T_0_MSB 3
+#define PHY_BB_SWITCH_TABLE_CHN_B0_SWITCH_TABLE_T_0_LSB 2
+#define PHY_BB_SWITCH_TABLE_CHN_B0_SWITCH_TABLE_T_0_MASK 0x0000000c
+#define PHY_BB_SWITCH_TABLE_CHN_B0_SWITCH_TABLE_T_0_GET(x) (((x) & 0x0000000c) >> 2)
+#define PHY_BB_SWITCH_TABLE_CHN_B0_SWITCH_TABLE_T_0_SET(x) (((x) << 2) & 0x0000000c)
+#define PHY_BB_SWITCH_TABLE_CHN_B0_SWITCH_TABLE_R_0_MSB 5
+#define PHY_BB_SWITCH_TABLE_CHN_B0_SWITCH_TABLE_R_0_LSB 4
+#define PHY_BB_SWITCH_TABLE_CHN_B0_SWITCH_TABLE_R_0_MASK 0x00000030
+#define PHY_BB_SWITCH_TABLE_CHN_B0_SWITCH_TABLE_R_0_GET(x) (((x) & 0x00000030) >> 4)
+#define PHY_BB_SWITCH_TABLE_CHN_B0_SWITCH_TABLE_R_0_SET(x) (((x) << 4) & 0x00000030)
+#define PHY_BB_SWITCH_TABLE_CHN_B0_SWITCH_TABLE_RX1_0_MSB 7
+#define PHY_BB_SWITCH_TABLE_CHN_B0_SWITCH_TABLE_RX1_0_LSB 6
+#define PHY_BB_SWITCH_TABLE_CHN_B0_SWITCH_TABLE_RX1_0_MASK 0x000000c0
+#define PHY_BB_SWITCH_TABLE_CHN_B0_SWITCH_TABLE_RX1_0_GET(x) (((x) & 0x000000c0) >> 6)
+#define PHY_BB_SWITCH_TABLE_CHN_B0_SWITCH_TABLE_RX1_0_SET(x) (((x) << 6) & 0x000000c0)
+#define PHY_BB_SWITCH_TABLE_CHN_B0_SWITCH_TABLE_RX12_0_MSB 9
+#define PHY_BB_SWITCH_TABLE_CHN_B0_SWITCH_TABLE_RX12_0_LSB 8
+#define PHY_BB_SWITCH_TABLE_CHN_B0_SWITCH_TABLE_RX12_0_MASK 0x00000300
+#define PHY_BB_SWITCH_TABLE_CHN_B0_SWITCH_TABLE_RX12_0_GET(x) (((x) & 0x00000300) >> 8)
+#define PHY_BB_SWITCH_TABLE_CHN_B0_SWITCH_TABLE_RX12_0_SET(x) (((x) << 8) & 0x00000300)
+#define PHY_BB_SWITCH_TABLE_CHN_B0_SWITCH_TABLE_B_0_MSB 11
+#define PHY_BB_SWITCH_TABLE_CHN_B0_SWITCH_TABLE_B_0_LSB 10
+#define PHY_BB_SWITCH_TABLE_CHN_B0_SWITCH_TABLE_B_0_MASK 0x00000c00
+#define PHY_BB_SWITCH_TABLE_CHN_B0_SWITCH_TABLE_B_0_GET(x) (((x) & 0x00000c00) >> 10)
+#define PHY_BB_SWITCH_TABLE_CHN_B0_SWITCH_TABLE_B_0_SET(x) (((x) << 10) & 0x00000c00)
+
+/* macros for BB_switch_table_com1 */
+#define PHY_BB_SWITCH_TABLE_COM1_ADDRESS 0x00009964
+#define PHY_BB_SWITCH_TABLE_COM1_OFFSET 0x00009964
+#define PHY_BB_SWITCH_TABLE_COM1_SWITCH_TABLE_COM_IDLE_MSB 3
+#define PHY_BB_SWITCH_TABLE_COM1_SWITCH_TABLE_COM_IDLE_LSB 0
+#define PHY_BB_SWITCH_TABLE_COM1_SWITCH_TABLE_COM_IDLE_MASK 0x0000000f
+#define PHY_BB_SWITCH_TABLE_COM1_SWITCH_TABLE_COM_IDLE_GET(x) (((x) & 0x0000000f) >> 0)
+#define PHY_BB_SWITCH_TABLE_COM1_SWITCH_TABLE_COM_IDLE_SET(x) (((x) << 0) & 0x0000000f)
+#define PHY_BB_SWITCH_TABLE_COM1_SWITCH_TABLE_COM_T1_MSB 7
+#define PHY_BB_SWITCH_TABLE_COM1_SWITCH_TABLE_COM_T1_LSB 4
+#define PHY_BB_SWITCH_TABLE_COM1_SWITCH_TABLE_COM_T1_MASK 0x000000f0
+#define PHY_BB_SWITCH_TABLE_COM1_SWITCH_TABLE_COM_T1_GET(x) (((x) & 0x000000f0) >> 4)
+#define PHY_BB_SWITCH_TABLE_COM1_SWITCH_TABLE_COM_T1_SET(x) (((x) << 4) & 0x000000f0)
+#define PHY_BB_SWITCH_TABLE_COM1_SWITCH_TABLE_COM_T2_MSB 11
+#define PHY_BB_SWITCH_TABLE_COM1_SWITCH_TABLE_COM_T2_LSB 8
+#define PHY_BB_SWITCH_TABLE_COM1_SWITCH_TABLE_COM_T2_MASK 0x00000f00
+#define PHY_BB_SWITCH_TABLE_COM1_SWITCH_TABLE_COM_T2_GET(x) (((x) & 0x00000f00) >> 8)
+#define PHY_BB_SWITCH_TABLE_COM1_SWITCH_TABLE_COM_T2_SET(x) (((x) << 8) & 0x00000f00)
+#define PHY_BB_SWITCH_TABLE_COM1_SWITCH_TABLE_COM_B_MSB 15
+#define PHY_BB_SWITCH_TABLE_COM1_SWITCH_TABLE_COM_B_LSB 12
+#define PHY_BB_SWITCH_TABLE_COM1_SWITCH_TABLE_COM_B_MASK 0x0000f000
+#define PHY_BB_SWITCH_TABLE_COM1_SWITCH_TABLE_COM_B_GET(x) (((x) & 0x0000f000) >> 12)
+#define PHY_BB_SWITCH_TABLE_COM1_SWITCH_TABLE_COM_B_SET(x) (((x) << 12) & 0x0000f000)
+
+/* macros for BB_cca_ctrl_2_b0 */
+#define PHY_BB_CCA_CTRL_2_B0_ADDRESS 0x00009968
+#define PHY_BB_CCA_CTRL_2_B0_OFFSET 0x00009968
+#define PHY_BB_CCA_CTRL_2_B0_MINCCAPWR_THR_0_MSB 8
+#define PHY_BB_CCA_CTRL_2_B0_MINCCAPWR_THR_0_LSB 0
+#define PHY_BB_CCA_CTRL_2_B0_MINCCAPWR_THR_0_MASK 0x000001ff
+#define PHY_BB_CCA_CTRL_2_B0_MINCCAPWR_THR_0_GET(x) (((x) & 0x000001ff) >> 0)
+#define PHY_BB_CCA_CTRL_2_B0_MINCCAPWR_THR_0_SET(x) (((x) << 0) & 0x000001ff)
+#define PHY_BB_CCA_CTRL_2_B0_ENABLE_MINCCAPWR_THR_MSB 9
+#define PHY_BB_CCA_CTRL_2_B0_ENABLE_MINCCAPWR_THR_LSB 9
+#define PHY_BB_CCA_CTRL_2_B0_ENABLE_MINCCAPWR_THR_MASK 0x00000200
+#define PHY_BB_CCA_CTRL_2_B0_ENABLE_MINCCAPWR_THR_GET(x) (((x) & 0x00000200) >> 9)
+#define PHY_BB_CCA_CTRL_2_B0_ENABLE_MINCCAPWR_THR_SET(x) (((x) << 9) & 0x00000200)
+#define PHY_BB_CCA_CTRL_2_B0_NF_GAIN_COMP_0_MSB 17
+#define PHY_BB_CCA_CTRL_2_B0_NF_GAIN_COMP_0_LSB 10
+#define PHY_BB_CCA_CTRL_2_B0_NF_GAIN_COMP_0_MASK 0x0003fc00
+#define PHY_BB_CCA_CTRL_2_B0_NF_GAIN_COMP_0_GET(x) (((x) & 0x0003fc00) >> 10)
+#define PHY_BB_CCA_CTRL_2_B0_NF_GAIN_COMP_0_SET(x) (((x) << 10) & 0x0003fc00)
+#define PHY_BB_CCA_CTRL_2_B0_THRESH62_MODE_MSB 18
+#define PHY_BB_CCA_CTRL_2_B0_THRESH62_MODE_LSB 18
+#define PHY_BB_CCA_CTRL_2_B0_THRESH62_MODE_MASK 0x00040000
+#define PHY_BB_CCA_CTRL_2_B0_THRESH62_MODE_GET(x) (((x) & 0x00040000) >> 18)
+#define PHY_BB_CCA_CTRL_2_B0_THRESH62_MODE_SET(x) (((x) << 18) & 0x00040000)
+
+/* macros for BB_switch_table_com2 */
+#define PHY_BB_SWITCH_TABLE_COM2_ADDRESS 0x0000996c
+#define PHY_BB_SWITCH_TABLE_COM2_OFFSET 0x0000996c
+#define PHY_BB_SWITCH_TABLE_COM2_SWITCH_TABLE_COM_RA1NXAL1_MSB 3
+#define PHY_BB_SWITCH_TABLE_COM2_SWITCH_TABLE_COM_RA1NXAL1_LSB 0
+#define PHY_BB_SWITCH_TABLE_COM2_SWITCH_TABLE_COM_RA1NXAL1_MASK 0x0000000f
+#define PHY_BB_SWITCH_TABLE_COM2_SWITCH_TABLE_COM_RA1NXAL1_GET(x) (((x) & 0x0000000f) >> 0)
+#define PHY_BB_SWITCH_TABLE_COM2_SWITCH_TABLE_COM_RA1NXAL1_SET(x) (((x) << 0) & 0x0000000f)
+#define PHY_BB_SWITCH_TABLE_COM2_SWITCH_TABLE_COM_RA2NXAL1_MSB 7
+#define PHY_BB_SWITCH_TABLE_COM2_SWITCH_TABLE_COM_RA2NXAL1_LSB 4
+#define PHY_BB_SWITCH_TABLE_COM2_SWITCH_TABLE_COM_RA2NXAL1_MASK 0x000000f0
+#define PHY_BB_SWITCH_TABLE_COM2_SWITCH_TABLE_COM_RA2NXAL1_GET(x) (((x) & 0x000000f0) >> 4)
+#define PHY_BB_SWITCH_TABLE_COM2_SWITCH_TABLE_COM_RA2NXAL1_SET(x) (((x) << 4) & 0x000000f0)
+#define PHY_BB_SWITCH_TABLE_COM2_SWITCH_TABLE_COM_RA1XAL1_MSB 11
+#define PHY_BB_SWITCH_TABLE_COM2_SWITCH_TABLE_COM_RA1XAL1_LSB 8
+#define PHY_BB_SWITCH_TABLE_COM2_SWITCH_TABLE_COM_RA1XAL1_MASK 0x00000f00
+#define PHY_BB_SWITCH_TABLE_COM2_SWITCH_TABLE_COM_RA1XAL1_GET(x) (((x) & 0x00000f00) >> 8)
+#define PHY_BB_SWITCH_TABLE_COM2_SWITCH_TABLE_COM_RA1XAL1_SET(x) (((x) << 8) & 0x00000f00)
+#define PHY_BB_SWITCH_TABLE_COM2_SWITCH_TABLE_COM_RA2XAL1_MSB 15
+#define PHY_BB_SWITCH_TABLE_COM2_SWITCH_TABLE_COM_RA2XAL1_LSB 12
+#define PHY_BB_SWITCH_TABLE_COM2_SWITCH_TABLE_COM_RA2XAL1_MASK 0x0000f000
+#define PHY_BB_SWITCH_TABLE_COM2_SWITCH_TABLE_COM_RA2XAL1_GET(x) (((x) & 0x0000f000) >> 12)
+#define PHY_BB_SWITCH_TABLE_COM2_SWITCH_TABLE_COM_RA2XAL1_SET(x) (((x) << 12) & 0x0000f000)
+#define PHY_BB_SWITCH_TABLE_COM2_SWITCH_TABLE_COM_RA1NXAL2_MSB 19
+#define PHY_BB_SWITCH_TABLE_COM2_SWITCH_TABLE_COM_RA1NXAL2_LSB 16
+#define PHY_BB_SWITCH_TABLE_COM2_SWITCH_TABLE_COM_RA1NXAL2_MASK 0x000f0000
+#define PHY_BB_SWITCH_TABLE_COM2_SWITCH_TABLE_COM_RA1NXAL2_GET(x) (((x) & 0x000f0000) >> 16)
+#define PHY_BB_SWITCH_TABLE_COM2_SWITCH_TABLE_COM_RA1NXAL2_SET(x) (((x) << 16) & 0x000f0000)
+#define PHY_BB_SWITCH_TABLE_COM2_SWITCH_TABLE_COM_RA2NXAL2_MSB 23
+#define PHY_BB_SWITCH_TABLE_COM2_SWITCH_TABLE_COM_RA2NXAL2_LSB 20
+#define PHY_BB_SWITCH_TABLE_COM2_SWITCH_TABLE_COM_RA2NXAL2_MASK 0x00f00000
+#define PHY_BB_SWITCH_TABLE_COM2_SWITCH_TABLE_COM_RA2NXAL2_GET(x) (((x) & 0x00f00000) >> 20)
+#define PHY_BB_SWITCH_TABLE_COM2_SWITCH_TABLE_COM_RA2NXAL2_SET(x) (((x) << 20) & 0x00f00000)
+#define PHY_BB_SWITCH_TABLE_COM2_SWITCH_TABLE_COM_RA1XAL2_MSB 27
+#define PHY_BB_SWITCH_TABLE_COM2_SWITCH_TABLE_COM_RA1XAL2_LSB 24
+#define PHY_BB_SWITCH_TABLE_COM2_SWITCH_TABLE_COM_RA1XAL2_MASK 0x0f000000
+#define PHY_BB_SWITCH_TABLE_COM2_SWITCH_TABLE_COM_RA1XAL2_GET(x) (((x) & 0x0f000000) >> 24)
+#define PHY_BB_SWITCH_TABLE_COM2_SWITCH_TABLE_COM_RA1XAL2_SET(x) (((x) << 24) & 0x0f000000)
+#define PHY_BB_SWITCH_TABLE_COM2_SWITCH_TABLE_COM_RA2XAL2_MSB 31
+#define PHY_BB_SWITCH_TABLE_COM2_SWITCH_TABLE_COM_RA2XAL2_LSB 28
+#define PHY_BB_SWITCH_TABLE_COM2_SWITCH_TABLE_COM_RA2XAL2_MASK 0xf0000000
+#define PHY_BB_SWITCH_TABLE_COM2_SWITCH_TABLE_COM_RA2XAL2_GET(x) (((x) & 0xf0000000) >> 28)
+#define PHY_BB_SWITCH_TABLE_COM2_SWITCH_TABLE_COM_RA2XAL2_SET(x) (((x) << 28) & 0xf0000000)
+
+/* macros for BB_restart */
+#define PHY_BB_RESTART_ADDRESS 0x00009970
+#define PHY_BB_RESTART_OFFSET 0x00009970
+#define PHY_BB_RESTART_ENABLE_RESTART_MSB 0
+#define PHY_BB_RESTART_ENABLE_RESTART_LSB 0
+#define PHY_BB_RESTART_ENABLE_RESTART_MASK 0x00000001
+#define PHY_BB_RESTART_ENABLE_RESTART_GET(x) (((x) & 0x00000001) >> 0)
+#define PHY_BB_RESTART_ENABLE_RESTART_SET(x) (((x) << 0) & 0x00000001)
+#define PHY_BB_RESTART_RESTART_LGFIRPWR_DELTA_MSB 5
+#define PHY_BB_RESTART_RESTART_LGFIRPWR_DELTA_LSB 1
+#define PHY_BB_RESTART_RESTART_LGFIRPWR_DELTA_MASK 0x0000003e
+#define PHY_BB_RESTART_RESTART_LGFIRPWR_DELTA_GET(x) (((x) & 0x0000003e) >> 1)
+#define PHY_BB_RESTART_RESTART_LGFIRPWR_DELTA_SET(x) (((x) << 1) & 0x0000003e)
+#define PHY_BB_RESTART_ENABLE_PWR_DROP_ERR_MSB 6
+#define PHY_BB_RESTART_ENABLE_PWR_DROP_ERR_LSB 6
+#define PHY_BB_RESTART_ENABLE_PWR_DROP_ERR_MASK 0x00000040
+#define PHY_BB_RESTART_ENABLE_PWR_DROP_ERR_GET(x) (((x) & 0x00000040) >> 6)
+#define PHY_BB_RESTART_ENABLE_PWR_DROP_ERR_SET(x) (((x) << 6) & 0x00000040)
+#define PHY_BB_RESTART_PWRDROP_LGFIRPWR_DELTA_MSB 11
+#define PHY_BB_RESTART_PWRDROP_LGFIRPWR_DELTA_LSB 7
+#define PHY_BB_RESTART_PWRDROP_LGFIRPWR_DELTA_MASK 0x00000f80
+#define PHY_BB_RESTART_PWRDROP_LGFIRPWR_DELTA_GET(x) (((x) & 0x00000f80) >> 7)
+#define PHY_BB_RESTART_PWRDROP_LGFIRPWR_DELTA_SET(x) (((x) << 7) & 0x00000f80)
+#define PHY_BB_RESTART_OFDM_CCK_RSSI_BIAS_MSB 17
+#define PHY_BB_RESTART_OFDM_CCK_RSSI_BIAS_LSB 12
+#define PHY_BB_RESTART_OFDM_CCK_RSSI_BIAS_MASK 0x0003f000
+#define PHY_BB_RESTART_OFDM_CCK_RSSI_BIAS_GET(x) (((x) & 0x0003f000) >> 12)
+#define PHY_BB_RESTART_OFDM_CCK_RSSI_BIAS_SET(x) (((x) << 12) & 0x0003f000)
+#define PHY_BB_RESTART_ANT_FAST_DIV_GC_LIMIT_MSB 20
+#define PHY_BB_RESTART_ANT_FAST_DIV_GC_LIMIT_LSB 18
+#define PHY_BB_RESTART_ANT_FAST_DIV_GC_LIMIT_MASK 0x001c0000
+#define PHY_BB_RESTART_ANT_FAST_DIV_GC_LIMIT_GET(x) (((x) & 0x001c0000) >> 18)
+#define PHY_BB_RESTART_ANT_FAST_DIV_GC_LIMIT_SET(x) (((x) << 18) & 0x001c0000)
+#define PHY_BB_RESTART_ENABLE_ANT_FAST_DIV_M2FLAG_MSB 21
+#define PHY_BB_RESTART_ENABLE_ANT_FAST_DIV_M2FLAG_LSB 21
+#define PHY_BB_RESTART_ENABLE_ANT_FAST_DIV_M2FLAG_MASK 0x00200000
+#define PHY_BB_RESTART_ENABLE_ANT_FAST_DIV_M2FLAG_GET(x) (((x) & 0x00200000) >> 21)
+#define PHY_BB_RESTART_ENABLE_ANT_FAST_DIV_M2FLAG_SET(x) (((x) << 21) & 0x00200000)
+#define PHY_BB_RESTART_WEAK_RSSI_VOTE_THR_MSB 28
+#define PHY_BB_RESTART_WEAK_RSSI_VOTE_THR_LSB 22
+#define PHY_BB_RESTART_WEAK_RSSI_VOTE_THR_MASK 0x1fc00000
+#define PHY_BB_RESTART_WEAK_RSSI_VOTE_THR_GET(x) (((x) & 0x1fc00000) >> 22)
+#define PHY_BB_RESTART_WEAK_RSSI_VOTE_THR_SET(x) (((x) << 22) & 0x1fc00000)
+#define PHY_BB_RESTART_ENABLE_PWR_DROP_ERR_CCK_MSB 29
+#define PHY_BB_RESTART_ENABLE_PWR_DROP_ERR_CCK_LSB 29
+#define PHY_BB_RESTART_ENABLE_PWR_DROP_ERR_CCK_MASK 0x20000000
+#define PHY_BB_RESTART_ENABLE_PWR_DROP_ERR_CCK_GET(x) (((x) & 0x20000000) >> 29)
+#define PHY_BB_RESTART_ENABLE_PWR_DROP_ERR_CCK_SET(x) (((x) << 29) & 0x20000000)
+#define PHY_BB_RESTART_DISABLE_DC_RESTART_MSB 30
+#define PHY_BB_RESTART_DISABLE_DC_RESTART_LSB 30
+#define PHY_BB_RESTART_DISABLE_DC_RESTART_MASK 0x40000000
+#define PHY_BB_RESTART_DISABLE_DC_RESTART_GET(x) (((x) & 0x40000000) >> 30)
+#define PHY_BB_RESTART_DISABLE_DC_RESTART_SET(x) (((x) << 30) & 0x40000000)
+#define PHY_BB_RESTART_RESTART_MODE_BW40_MSB 31
+#define PHY_BB_RESTART_RESTART_MODE_BW40_LSB 31
+#define PHY_BB_RESTART_RESTART_MODE_BW40_MASK 0x80000000
+#define PHY_BB_RESTART_RESTART_MODE_BW40_GET(x) (((x) & 0x80000000) >> 31)
+#define PHY_BB_RESTART_RESTART_MODE_BW40_SET(x) (((x) << 31) & 0x80000000)
+
+/* macros for BB_scrambler_seed */
+#define PHY_BB_SCRAMBLER_SEED_ADDRESS 0x00009978
+#define PHY_BB_SCRAMBLER_SEED_OFFSET 0x00009978
+#define PHY_BB_SCRAMBLER_SEED_FIXED_SCRAMBLER_SEED_MSB 6
+#define PHY_BB_SCRAMBLER_SEED_FIXED_SCRAMBLER_SEED_LSB 0
+#define PHY_BB_SCRAMBLER_SEED_FIXED_SCRAMBLER_SEED_MASK 0x0000007f
+#define PHY_BB_SCRAMBLER_SEED_FIXED_SCRAMBLER_SEED_GET(x) (((x) & 0x0000007f) >> 0)
+#define PHY_BB_SCRAMBLER_SEED_FIXED_SCRAMBLER_SEED_SET(x) (((x) << 0) & 0x0000007f)
+
+/* macros for BB_rfbus_request */
+#define PHY_BB_RFBUS_REQUEST_ADDRESS 0x0000997c
+#define PHY_BB_RFBUS_REQUEST_OFFSET 0x0000997c
+#define PHY_BB_RFBUS_REQUEST_RFBUS_REQUEST_MSB 0
+#define PHY_BB_RFBUS_REQUEST_RFBUS_REQUEST_LSB 0
+#define PHY_BB_RFBUS_REQUEST_RFBUS_REQUEST_MASK 0x00000001
+#define PHY_BB_RFBUS_REQUEST_RFBUS_REQUEST_GET(x) (((x) & 0x00000001) >> 0)
+#define PHY_BB_RFBUS_REQUEST_RFBUS_REQUEST_SET(x) (((x) << 0) & 0x00000001)
+
+/* macros for BB_timing_control_11 */
+#define PHY_BB_TIMING_CONTROL_11_ADDRESS 0x000099a0
+#define PHY_BB_TIMING_CONTROL_11_OFFSET 0x000099a0
+#define PHY_BB_TIMING_CONTROL_11_SPUR_DELTA_PHASE_MSB 19
+#define PHY_BB_TIMING_CONTROL_11_SPUR_DELTA_PHASE_LSB 0
+#define PHY_BB_TIMING_CONTROL_11_SPUR_DELTA_PHASE_MASK 0x000fffff
+#define PHY_BB_TIMING_CONTROL_11_SPUR_DELTA_PHASE_GET(x) (((x) & 0x000fffff) >> 0)
+#define PHY_BB_TIMING_CONTROL_11_SPUR_DELTA_PHASE_SET(x) (((x) << 0) & 0x000fffff)
+#define PHY_BB_TIMING_CONTROL_11_SPUR_FREQ_SD_MSB 29
+#define PHY_BB_TIMING_CONTROL_11_SPUR_FREQ_SD_LSB 20
+#define PHY_BB_TIMING_CONTROL_11_SPUR_FREQ_SD_MASK 0x3ff00000
+#define PHY_BB_TIMING_CONTROL_11_SPUR_FREQ_SD_GET(x) (((x) & 0x3ff00000) >> 20)
+#define PHY_BB_TIMING_CONTROL_11_SPUR_FREQ_SD_SET(x) (((x) << 20) & 0x3ff00000)
+#define PHY_BB_TIMING_CONTROL_11_USE_SPUR_FILTER_IN_AGC_MSB 30
+#define PHY_BB_TIMING_CONTROL_11_USE_SPUR_FILTER_IN_AGC_LSB 30
+#define PHY_BB_TIMING_CONTROL_11_USE_SPUR_FILTER_IN_AGC_MASK 0x40000000
+#define PHY_BB_TIMING_CONTROL_11_USE_SPUR_FILTER_IN_AGC_GET(x) (((x) & 0x40000000) >> 30)
+#define PHY_BB_TIMING_CONTROL_11_USE_SPUR_FILTER_IN_AGC_SET(x) (((x) << 30) & 0x40000000)
+#define PHY_BB_TIMING_CONTROL_11_USE_SPUR_FILTER_IN_SELFCOR_MSB 31
+#define PHY_BB_TIMING_CONTROL_11_USE_SPUR_FILTER_IN_SELFCOR_LSB 31
+#define PHY_BB_TIMING_CONTROL_11_USE_SPUR_FILTER_IN_SELFCOR_MASK 0x80000000
+#define PHY_BB_TIMING_CONTROL_11_USE_SPUR_FILTER_IN_SELFCOR_GET(x) (((x) & 0x80000000) >> 31)
+#define PHY_BB_TIMING_CONTROL_11_USE_SPUR_FILTER_IN_SELFCOR_SET(x) (((x) << 31) & 0x80000000)
+
+/* macros for BB_multichain_enable */
+#define PHY_BB_MULTICHAIN_ENABLE_ADDRESS 0x000099a4
+#define PHY_BB_MULTICHAIN_ENABLE_OFFSET 0x000099a4
+#define PHY_BB_MULTICHAIN_ENABLE_RX_CHAIN_MASK_MSB 2
+#define PHY_BB_MULTICHAIN_ENABLE_RX_CHAIN_MASK_LSB 0
+#define PHY_BB_MULTICHAIN_ENABLE_RX_CHAIN_MASK_MASK 0x00000007
+#define PHY_BB_MULTICHAIN_ENABLE_RX_CHAIN_MASK_GET(x) (((x) & 0x00000007) >> 0)
+#define PHY_BB_MULTICHAIN_ENABLE_RX_CHAIN_MASK_SET(x) (((x) << 0) & 0x00000007)
+
+/* macros for BB_multichain_control */
+#define PHY_BB_MULTICHAIN_CONTROL_ADDRESS 0x000099a8
+#define PHY_BB_MULTICHAIN_CONTROL_OFFSET 0x000099a8
+#define PHY_BB_MULTICHAIN_CONTROL_FORCE_ANALOG_GAIN_DIFF_MSB 0
+#define PHY_BB_MULTICHAIN_CONTROL_FORCE_ANALOG_GAIN_DIFF_LSB 0
+#define PHY_BB_MULTICHAIN_CONTROL_FORCE_ANALOG_GAIN_DIFF_MASK 0x00000001
+#define PHY_BB_MULTICHAIN_CONTROL_FORCE_ANALOG_GAIN_DIFF_GET(x) (((x) & 0x00000001) >> 0)
+#define PHY_BB_MULTICHAIN_CONTROL_FORCE_ANALOG_GAIN_DIFF_SET(x) (((x) << 0) & 0x00000001)
+#define PHY_BB_MULTICHAIN_CONTROL_FORCED_GAIN_DIFF_01_MSB 7
+#define PHY_BB_MULTICHAIN_CONTROL_FORCED_GAIN_DIFF_01_LSB 1
+#define PHY_BB_MULTICHAIN_CONTROL_FORCED_GAIN_DIFF_01_MASK 0x000000fe
+#define PHY_BB_MULTICHAIN_CONTROL_FORCED_GAIN_DIFF_01_GET(x) (((x) & 0x000000fe) >> 1)
+#define PHY_BB_MULTICHAIN_CONTROL_FORCED_GAIN_DIFF_01_SET(x) (((x) << 1) & 0x000000fe)
+#define PHY_BB_MULTICHAIN_CONTROL_SYNC_SYNTHON_MSB 8
+#define PHY_BB_MULTICHAIN_CONTROL_SYNC_SYNTHON_LSB 8
+#define PHY_BB_MULTICHAIN_CONTROL_SYNC_SYNTHON_MASK 0x00000100
+#define PHY_BB_MULTICHAIN_CONTROL_SYNC_SYNTHON_GET(x) (((x) & 0x00000100) >> 8)
+#define PHY_BB_MULTICHAIN_CONTROL_SYNC_SYNTHON_SET(x) (((x) << 8) & 0x00000100)
+#define PHY_BB_MULTICHAIN_CONTROL_USE_POSEDGE_REFCLK_MSB 9
+#define PHY_BB_MULTICHAIN_CONTROL_USE_POSEDGE_REFCLK_LSB 9
+#define PHY_BB_MULTICHAIN_CONTROL_USE_POSEDGE_REFCLK_MASK 0x00000200
+#define PHY_BB_MULTICHAIN_CONTROL_USE_POSEDGE_REFCLK_GET(x) (((x) & 0x00000200) >> 9)
+#define PHY_BB_MULTICHAIN_CONTROL_USE_POSEDGE_REFCLK_SET(x) (((x) << 9) & 0x00000200)
+#define PHY_BB_MULTICHAIN_CONTROL_CF_SHORT_SAT_MSB 20
+#define PHY_BB_MULTICHAIN_CONTROL_CF_SHORT_SAT_LSB 10
+#define PHY_BB_MULTICHAIN_CONTROL_CF_SHORT_SAT_MASK 0x001ffc00
+#define PHY_BB_MULTICHAIN_CONTROL_CF_SHORT_SAT_GET(x) (((x) & 0x001ffc00) >> 10)
+#define PHY_BB_MULTICHAIN_CONTROL_CF_SHORT_SAT_SET(x) (((x) << 10) & 0x001ffc00)
+#define PHY_BB_MULTICHAIN_CONTROL_FORCED_GAIN_DIFF_02_MSB 28
+#define PHY_BB_MULTICHAIN_CONTROL_FORCED_GAIN_DIFF_02_LSB 22
+#define PHY_BB_MULTICHAIN_CONTROL_FORCED_GAIN_DIFF_02_MASK 0x1fc00000
+#define PHY_BB_MULTICHAIN_CONTROL_FORCED_GAIN_DIFF_02_GET(x) (((x) & 0x1fc00000) >> 22)
+#define PHY_BB_MULTICHAIN_CONTROL_FORCED_GAIN_DIFF_02_SET(x) (((x) << 22) & 0x1fc00000)
+#define PHY_BB_MULTICHAIN_CONTROL_FORCE_SIGMA_ZERO_MSB 29
+#define PHY_BB_MULTICHAIN_CONTROL_FORCE_SIGMA_ZERO_LSB 29
+#define PHY_BB_MULTICHAIN_CONTROL_FORCE_SIGMA_ZERO_MASK 0x20000000
+#define PHY_BB_MULTICHAIN_CONTROL_FORCE_SIGMA_ZERO_GET(x) (((x) & 0x20000000) >> 29)
+#define PHY_BB_MULTICHAIN_CONTROL_FORCE_SIGMA_ZERO_SET(x) (((x) << 29) & 0x20000000)
+
+/* macros for BB_multichain_gain_ctrl */
+#define PHY_BB_MULTICHAIN_GAIN_CTRL_ADDRESS 0x000099ac
+#define PHY_BB_MULTICHAIN_GAIN_CTRL_OFFSET 0x000099ac
+#define PHY_BB_MULTICHAIN_GAIN_CTRL_QUICKDROP_LOW_MSB 7
+#define PHY_BB_MULTICHAIN_GAIN_CTRL_QUICKDROP_LOW_LSB 0
+#define PHY_BB_MULTICHAIN_GAIN_CTRL_QUICKDROP_LOW_MASK 0x000000ff
+#define PHY_BB_MULTICHAIN_GAIN_CTRL_QUICKDROP_LOW_GET(x) (((x) & 0x000000ff) >> 0)
+#define PHY_BB_MULTICHAIN_GAIN_CTRL_QUICKDROP_LOW_SET(x) (((x) << 0) & 0x000000ff)
+#define PHY_BB_MULTICHAIN_GAIN_CTRL_ENABLE_CHECK_STRONG_ANT_MSB 8
+#define PHY_BB_MULTICHAIN_GAIN_CTRL_ENABLE_CHECK_STRONG_ANT_LSB 8
+#define PHY_BB_MULTICHAIN_GAIN_CTRL_ENABLE_CHECK_STRONG_ANT_MASK 0x00000100
+#define PHY_BB_MULTICHAIN_GAIN_CTRL_ENABLE_CHECK_STRONG_ANT_GET(x) (((x) & 0x00000100) >> 8)
+#define PHY_BB_MULTICHAIN_GAIN_CTRL_ENABLE_CHECK_STRONG_ANT_SET(x) (((x) << 8) & 0x00000100)
+#define PHY_BB_MULTICHAIN_GAIN_CTRL_ANT_FAST_DIV_BIAS_MSB 14
+#define PHY_BB_MULTICHAIN_GAIN_CTRL_ANT_FAST_DIV_BIAS_LSB 9
+#define PHY_BB_MULTICHAIN_GAIN_CTRL_ANT_FAST_DIV_BIAS_MASK 0x00007e00
+#define PHY_BB_MULTICHAIN_GAIN_CTRL_ANT_FAST_DIV_BIAS_GET(x) (((x) & 0x00007e00) >> 9)
+#define PHY_BB_MULTICHAIN_GAIN_CTRL_ANT_FAST_DIV_BIAS_SET(x) (((x) << 9) & 0x00007e00)
+#define PHY_BB_MULTICHAIN_GAIN_CTRL_CAP_GAIN_RATIO_SNR_MSB 20
+#define PHY_BB_MULTICHAIN_GAIN_CTRL_CAP_GAIN_RATIO_SNR_LSB 15
+#define PHY_BB_MULTICHAIN_GAIN_CTRL_CAP_GAIN_RATIO_SNR_MASK 0x001f8000
+#define PHY_BB_MULTICHAIN_GAIN_CTRL_CAP_GAIN_RATIO_SNR_GET(x) (((x) & 0x001f8000) >> 15)
+#define PHY_BB_MULTICHAIN_GAIN_CTRL_CAP_GAIN_RATIO_SNR_SET(x) (((x) << 15) & 0x001f8000)
+#define PHY_BB_MULTICHAIN_GAIN_CTRL_CAP_GAIN_RATIO_ENA_MSB 21
+#define PHY_BB_MULTICHAIN_GAIN_CTRL_CAP_GAIN_RATIO_ENA_LSB 21
+#define PHY_BB_MULTICHAIN_GAIN_CTRL_CAP_GAIN_RATIO_ENA_MASK 0x00200000
+#define PHY_BB_MULTICHAIN_GAIN_CTRL_CAP_GAIN_RATIO_ENA_GET(x) (((x) & 0x00200000) >> 21)
+#define PHY_BB_MULTICHAIN_GAIN_CTRL_CAP_GAIN_RATIO_ENA_SET(x) (((x) << 21) & 0x00200000)
+#define PHY_BB_MULTICHAIN_GAIN_CTRL_CAP_GAIN_RATIO_MODE_MSB 22
+#define PHY_BB_MULTICHAIN_GAIN_CTRL_CAP_GAIN_RATIO_MODE_LSB 22
+#define PHY_BB_MULTICHAIN_GAIN_CTRL_CAP_GAIN_RATIO_MODE_MASK 0x00400000
+#define PHY_BB_MULTICHAIN_GAIN_CTRL_CAP_GAIN_RATIO_MODE_GET(x) (((x) & 0x00400000) >> 22)
+#define PHY_BB_MULTICHAIN_GAIN_CTRL_CAP_GAIN_RATIO_MODE_SET(x) (((x) << 22) & 0x00400000)
+#define PHY_BB_MULTICHAIN_GAIN_CTRL_ENABLE_ANT_SW_RX_PROT_MSB 23
+#define PHY_BB_MULTICHAIN_GAIN_CTRL_ENABLE_ANT_SW_RX_PROT_LSB 23
+#define PHY_BB_MULTICHAIN_GAIN_CTRL_ENABLE_ANT_SW_RX_PROT_MASK 0x00800000
+#define PHY_BB_MULTICHAIN_GAIN_CTRL_ENABLE_ANT_SW_RX_PROT_GET(x) (((x) & 0x00800000) >> 23)
+#define PHY_BB_MULTICHAIN_GAIN_CTRL_ENABLE_ANT_SW_RX_PROT_SET(x) (((x) << 23) & 0x00800000)
+#define PHY_BB_MULTICHAIN_GAIN_CTRL_ENABLE_ANT_DIV_LNADIV_MSB 24
+#define PHY_BB_MULTICHAIN_GAIN_CTRL_ENABLE_ANT_DIV_LNADIV_LSB 24
+#define PHY_BB_MULTICHAIN_GAIN_CTRL_ENABLE_ANT_DIV_LNADIV_MASK 0x01000000
+#define PHY_BB_MULTICHAIN_GAIN_CTRL_ENABLE_ANT_DIV_LNADIV_GET(x) (((x) & 0x01000000) >> 24)
+#define PHY_BB_MULTICHAIN_GAIN_CTRL_ENABLE_ANT_DIV_LNADIV_SET(x) (((x) << 24) & 0x01000000)
+#define PHY_BB_MULTICHAIN_GAIN_CTRL_ANT_DIV_ALT_LNACONF_MSB 26
+#define PHY_BB_MULTICHAIN_GAIN_CTRL_ANT_DIV_ALT_LNACONF_LSB 25
+#define PHY_BB_MULTICHAIN_GAIN_CTRL_ANT_DIV_ALT_LNACONF_MASK 0x06000000
+#define PHY_BB_MULTICHAIN_GAIN_CTRL_ANT_DIV_ALT_LNACONF_GET(x) (((x) & 0x06000000) >> 25)
+#define PHY_BB_MULTICHAIN_GAIN_CTRL_ANT_DIV_ALT_LNACONF_SET(x) (((x) << 25) & 0x06000000)
+#define PHY_BB_MULTICHAIN_GAIN_CTRL_ANT_DIV_MAIN_LNACONF_MSB 28
+#define PHY_BB_MULTICHAIN_GAIN_CTRL_ANT_DIV_MAIN_LNACONF_LSB 27
+#define PHY_BB_MULTICHAIN_GAIN_CTRL_ANT_DIV_MAIN_LNACONF_MASK 0x18000000
+#define PHY_BB_MULTICHAIN_GAIN_CTRL_ANT_DIV_MAIN_LNACONF_GET(x) (((x) & 0x18000000) >> 27)
+#define PHY_BB_MULTICHAIN_GAIN_CTRL_ANT_DIV_MAIN_LNACONF_SET(x) (((x) << 27) & 0x18000000)
+#define PHY_BB_MULTICHAIN_GAIN_CTRL_ANT_DIV_ALT_GAINTB_MSB 29
+#define PHY_BB_MULTICHAIN_GAIN_CTRL_ANT_DIV_ALT_GAINTB_LSB 29
+#define PHY_BB_MULTICHAIN_GAIN_CTRL_ANT_DIV_ALT_GAINTB_MASK 0x20000000
+#define PHY_BB_MULTICHAIN_GAIN_CTRL_ANT_DIV_ALT_GAINTB_GET(x) (((x) & 0x20000000) >> 29)
+#define PHY_BB_MULTICHAIN_GAIN_CTRL_ANT_DIV_ALT_GAINTB_SET(x) (((x) << 29) & 0x20000000)
+#define PHY_BB_MULTICHAIN_GAIN_CTRL_ANT_DIV_MAIN_GAINTB_MSB 30
+#define PHY_BB_MULTICHAIN_GAIN_CTRL_ANT_DIV_MAIN_GAINTB_LSB 30
+#define PHY_BB_MULTICHAIN_GAIN_CTRL_ANT_DIV_MAIN_GAINTB_MASK 0x40000000
+#define PHY_BB_MULTICHAIN_GAIN_CTRL_ANT_DIV_MAIN_GAINTB_GET(x) (((x) & 0x40000000) >> 30)
+#define PHY_BB_MULTICHAIN_GAIN_CTRL_ANT_DIV_MAIN_GAINTB_SET(x) (((x) << 30) & 0x40000000)
+
+/* macros for BB_adc_gain_dc_corr_b0 */
+#define PHY_BB_ADC_GAIN_DC_CORR_B0_ADDRESS 0x000099b4
+#define PHY_BB_ADC_GAIN_DC_CORR_B0_OFFSET 0x000099b4
+#define PHY_BB_ADC_GAIN_DC_CORR_B0_ADC_GAIN_CORR_Q_COEFF_0_MSB 5
+#define PHY_BB_ADC_GAIN_DC_CORR_B0_ADC_GAIN_CORR_Q_COEFF_0_LSB 0
+#define PHY_BB_ADC_GAIN_DC_CORR_B0_ADC_GAIN_CORR_Q_COEFF_0_MASK 0x0000003f
+#define PHY_BB_ADC_GAIN_DC_CORR_B0_ADC_GAIN_CORR_Q_COEFF_0_GET(x) (((x) & 0x0000003f) >> 0)
+#define PHY_BB_ADC_GAIN_DC_CORR_B0_ADC_GAIN_CORR_Q_COEFF_0_SET(x) (((x) << 0) & 0x0000003f)
+#define PHY_BB_ADC_GAIN_DC_CORR_B0_ADC_GAIN_CORR_I_COEFF_0_MSB 11
+#define PHY_BB_ADC_GAIN_DC_CORR_B0_ADC_GAIN_CORR_I_COEFF_0_LSB 6
+#define PHY_BB_ADC_GAIN_DC_CORR_B0_ADC_GAIN_CORR_I_COEFF_0_MASK 0x00000fc0
+#define PHY_BB_ADC_GAIN_DC_CORR_B0_ADC_GAIN_CORR_I_COEFF_0_GET(x) (((x) & 0x00000fc0) >> 6)
+#define PHY_BB_ADC_GAIN_DC_CORR_B0_ADC_GAIN_CORR_I_COEFF_0_SET(x) (((x) << 6) & 0x00000fc0)
+#define PHY_BB_ADC_GAIN_DC_CORR_B0_ADC_DC_CORR_Q_COEFF_0_MSB 20
+#define PHY_BB_ADC_GAIN_DC_CORR_B0_ADC_DC_CORR_Q_COEFF_0_LSB 12
+#define PHY_BB_ADC_GAIN_DC_CORR_B0_ADC_DC_CORR_Q_COEFF_0_MASK 0x001ff000
+#define PHY_BB_ADC_GAIN_DC_CORR_B0_ADC_DC_CORR_Q_COEFF_0_GET(x) (((x) & 0x001ff000) >> 12)
+#define PHY_BB_ADC_GAIN_DC_CORR_B0_ADC_DC_CORR_Q_COEFF_0_SET(x) (((x) << 12) & 0x001ff000)
+#define PHY_BB_ADC_GAIN_DC_CORR_B0_ADC_DC_CORR_I_COEFF_0_MSB 29
+#define PHY_BB_ADC_GAIN_DC_CORR_B0_ADC_DC_CORR_I_COEFF_0_LSB 21
+#define PHY_BB_ADC_GAIN_DC_CORR_B0_ADC_DC_CORR_I_COEFF_0_MASK 0x3fe00000
+#define PHY_BB_ADC_GAIN_DC_CORR_B0_ADC_DC_CORR_I_COEFF_0_GET(x) (((x) & 0x3fe00000) >> 21)
+#define PHY_BB_ADC_GAIN_DC_CORR_B0_ADC_DC_CORR_I_COEFF_0_SET(x) (((x) << 21) & 0x3fe00000)
+#define PHY_BB_ADC_GAIN_DC_CORR_B0_ADC_GAIN_CORR_ENABLE_MSB 30
+#define PHY_BB_ADC_GAIN_DC_CORR_B0_ADC_GAIN_CORR_ENABLE_LSB 30
+#define PHY_BB_ADC_GAIN_DC_CORR_B0_ADC_GAIN_CORR_ENABLE_MASK 0x40000000
+#define PHY_BB_ADC_GAIN_DC_CORR_B0_ADC_GAIN_CORR_ENABLE_GET(x) (((x) & 0x40000000) >> 30)
+#define PHY_BB_ADC_GAIN_DC_CORR_B0_ADC_GAIN_CORR_ENABLE_SET(x) (((x) << 30) & 0x40000000)
+#define PHY_BB_ADC_GAIN_DC_CORR_B0_ADC_DC_CORR_ENABLE_MSB 31
+#define PHY_BB_ADC_GAIN_DC_CORR_B0_ADC_DC_CORR_ENABLE_LSB 31
+#define PHY_BB_ADC_GAIN_DC_CORR_B0_ADC_DC_CORR_ENABLE_MASK 0x80000000
+#define PHY_BB_ADC_GAIN_DC_CORR_B0_ADC_DC_CORR_ENABLE_GET(x) (((x) & 0x80000000) >> 31)
+#define PHY_BB_ADC_GAIN_DC_CORR_B0_ADC_DC_CORR_ENABLE_SET(x) (((x) << 31) & 0x80000000)
+
+/* macros for BB_ext_chan_pwr_thr_1 */
+#define PHY_BB_EXT_CHAN_PWR_THR_1_ADDRESS 0x000099b8
+#define PHY_BB_EXT_CHAN_PWR_THR_1_OFFSET 0x000099b8
+#define PHY_BB_EXT_CHAN_PWR_THR_1_THRESH62_EXT_MSB 7
+#define PHY_BB_EXT_CHAN_PWR_THR_1_THRESH62_EXT_LSB 0
+#define PHY_BB_EXT_CHAN_PWR_THR_1_THRESH62_EXT_MASK 0x000000ff
+#define PHY_BB_EXT_CHAN_PWR_THR_1_THRESH62_EXT_GET(x) (((x) & 0x000000ff) >> 0)
+#define PHY_BB_EXT_CHAN_PWR_THR_1_THRESH62_EXT_SET(x) (((x) << 0) & 0x000000ff)
+#define PHY_BB_EXT_CHAN_PWR_THR_1_ANT_DIV_ALT_ANT_MINGAINIDX_MSB 15
+#define PHY_BB_EXT_CHAN_PWR_THR_1_ANT_DIV_ALT_ANT_MINGAINIDX_LSB 8
+#define PHY_BB_EXT_CHAN_PWR_THR_1_ANT_DIV_ALT_ANT_MINGAINIDX_MASK 0x0000ff00
+#define PHY_BB_EXT_CHAN_PWR_THR_1_ANT_DIV_ALT_ANT_MINGAINIDX_GET(x) (((x) & 0x0000ff00) >> 8)
+#define PHY_BB_EXT_CHAN_PWR_THR_1_ANT_DIV_ALT_ANT_MINGAINIDX_SET(x) (((x) << 8) & 0x0000ff00)
+#define PHY_BB_EXT_CHAN_PWR_THR_1_ANT_DIV_ALT_ANT_DELTAGAINIDX_MSB 20
+#define PHY_BB_EXT_CHAN_PWR_THR_1_ANT_DIV_ALT_ANT_DELTAGAINIDX_LSB 16
+#define PHY_BB_EXT_CHAN_PWR_THR_1_ANT_DIV_ALT_ANT_DELTAGAINIDX_MASK 0x001f0000
+#define PHY_BB_EXT_CHAN_PWR_THR_1_ANT_DIV_ALT_ANT_DELTAGAINIDX_GET(x) (((x) & 0x001f0000) >> 16)
+#define PHY_BB_EXT_CHAN_PWR_THR_1_ANT_DIV_ALT_ANT_DELTAGAINIDX_SET(x) (((x) << 16) & 0x001f0000)
+#define PHY_BB_EXT_CHAN_PWR_THR_1_ANT_DIV_ALT_ANT_DELTANF_MSB 26
+#define PHY_BB_EXT_CHAN_PWR_THR_1_ANT_DIV_ALT_ANT_DELTANF_LSB 21
+#define PHY_BB_EXT_CHAN_PWR_THR_1_ANT_DIV_ALT_ANT_DELTANF_MASK 0x07e00000
+#define PHY_BB_EXT_CHAN_PWR_THR_1_ANT_DIV_ALT_ANT_DELTANF_GET(x) (((x) & 0x07e00000) >> 21)
+#define PHY_BB_EXT_CHAN_PWR_THR_1_ANT_DIV_ALT_ANT_DELTANF_SET(x) (((x) << 21) & 0x07e00000)
+
+/* macros for BB_ext_chan_pwr_thr_2_b0 */
+#define PHY_BB_EXT_CHAN_PWR_THR_2_B0_ADDRESS 0x000099bc
+#define PHY_BB_EXT_CHAN_PWR_THR_2_B0_OFFSET 0x000099bc
+#define PHY_BB_EXT_CHAN_PWR_THR_2_B0_CF_MAXCCAPWR_EXT_0_MSB 8
+#define PHY_BB_EXT_CHAN_PWR_THR_2_B0_CF_MAXCCAPWR_EXT_0_LSB 0
+#define PHY_BB_EXT_CHAN_PWR_THR_2_B0_CF_MAXCCAPWR_EXT_0_MASK 0x000001ff
+#define PHY_BB_EXT_CHAN_PWR_THR_2_B0_CF_MAXCCAPWR_EXT_0_GET(x) (((x) & 0x000001ff) >> 0)
+#define PHY_BB_EXT_CHAN_PWR_THR_2_B0_CF_MAXCCAPWR_EXT_0_SET(x) (((x) << 0) & 0x000001ff)
+#define PHY_BB_EXT_CHAN_PWR_THR_2_B0_CYCPWR_THR1_EXT_MSB 15
+#define PHY_BB_EXT_CHAN_PWR_THR_2_B0_CYCPWR_THR1_EXT_LSB 9
+#define PHY_BB_EXT_CHAN_PWR_THR_2_B0_CYCPWR_THR1_EXT_MASK 0x0000fe00
+#define PHY_BB_EXT_CHAN_PWR_THR_2_B0_CYCPWR_THR1_EXT_GET(x) (((x) & 0x0000fe00) >> 9)
+#define PHY_BB_EXT_CHAN_PWR_THR_2_B0_CYCPWR_THR1_EXT_SET(x) (((x) << 9) & 0x0000fe00)
+#define PHY_BB_EXT_CHAN_PWR_THR_2_B0_MINCCAPWR_EXT_0_MSB 24
+#define PHY_BB_EXT_CHAN_PWR_THR_2_B0_MINCCAPWR_EXT_0_LSB 16
+#define PHY_BB_EXT_CHAN_PWR_THR_2_B0_MINCCAPWR_EXT_0_MASK 0x01ff0000
+#define PHY_BB_EXT_CHAN_PWR_THR_2_B0_MINCCAPWR_EXT_0_GET(x) (((x) & 0x01ff0000) >> 16)
+
+/* macros for BB_ext_chan_scorr_thr */
+#define PHY_BB_EXT_CHAN_SCORR_THR_ADDRESS 0x000099c0
+#define PHY_BB_EXT_CHAN_SCORR_THR_OFFSET 0x000099c0
+#define PHY_BB_EXT_CHAN_SCORR_THR_M1_THRES_EXT_MSB 6
+#define PHY_BB_EXT_CHAN_SCORR_THR_M1_THRES_EXT_LSB 0
+#define PHY_BB_EXT_CHAN_SCORR_THR_M1_THRES_EXT_MASK 0x0000007f
+#define PHY_BB_EXT_CHAN_SCORR_THR_M1_THRES_EXT_GET(x) (((x) & 0x0000007f) >> 0)
+#define PHY_BB_EXT_CHAN_SCORR_THR_M1_THRES_EXT_SET(x) (((x) << 0) & 0x0000007f)
+#define PHY_BB_EXT_CHAN_SCORR_THR_M2_THRES_EXT_MSB 13
+#define PHY_BB_EXT_CHAN_SCORR_THR_M2_THRES_EXT_LSB 7
+#define PHY_BB_EXT_CHAN_SCORR_THR_M2_THRES_EXT_MASK 0x00003f80
+#define PHY_BB_EXT_CHAN_SCORR_THR_M2_THRES_EXT_GET(x) (((x) & 0x00003f80) >> 7)
+#define PHY_BB_EXT_CHAN_SCORR_THR_M2_THRES_EXT_SET(x) (((x) << 7) & 0x00003f80)
+#define PHY_BB_EXT_CHAN_SCORR_THR_M1_THRES_LOW_EXT_MSB 20
+#define PHY_BB_EXT_CHAN_SCORR_THR_M1_THRES_LOW_EXT_LSB 14
+#define PHY_BB_EXT_CHAN_SCORR_THR_M1_THRES_LOW_EXT_MASK 0x001fc000
+#define PHY_BB_EXT_CHAN_SCORR_THR_M1_THRES_LOW_EXT_GET(x) (((x) & 0x001fc000) >> 14)
+#define PHY_BB_EXT_CHAN_SCORR_THR_M1_THRES_LOW_EXT_SET(x) (((x) << 14) & 0x001fc000)
+#define PHY_BB_EXT_CHAN_SCORR_THR_M2_THRES_LOW_EXT_MSB 27
+#define PHY_BB_EXT_CHAN_SCORR_THR_M2_THRES_LOW_EXT_LSB 21
+#define PHY_BB_EXT_CHAN_SCORR_THR_M2_THRES_LOW_EXT_MASK 0x0fe00000
+#define PHY_BB_EXT_CHAN_SCORR_THR_M2_THRES_LOW_EXT_GET(x) (((x) & 0x0fe00000) >> 21)
+#define PHY_BB_EXT_CHAN_SCORR_THR_M2_THRES_LOW_EXT_SET(x) (((x) << 21) & 0x0fe00000)
+#define PHY_BB_EXT_CHAN_SCORR_THR_SPUR_SUBCHANNEL_SD_MSB 28
+#define PHY_BB_EXT_CHAN_SCORR_THR_SPUR_SUBCHANNEL_SD_LSB 28
+#define PHY_BB_EXT_CHAN_SCORR_THR_SPUR_SUBCHANNEL_SD_MASK 0x10000000
+#define PHY_BB_EXT_CHAN_SCORR_THR_SPUR_SUBCHANNEL_SD_GET(x) (((x) & 0x10000000) >> 28)
+#define PHY_BB_EXT_CHAN_SCORR_THR_SPUR_SUBCHANNEL_SD_SET(x) (((x) << 28) & 0x10000000)
+
+/* macros for BB_ext_chan_detect_win */
+#define PHY_BB_EXT_CHAN_DETECT_WIN_ADDRESS 0x000099c4
+#define PHY_BB_EXT_CHAN_DETECT_WIN_OFFSET 0x000099c4
+#define PHY_BB_EXT_CHAN_DETECT_WIN_DET_DIFF_WIN_WEAK_MSB 3
+#define PHY_BB_EXT_CHAN_DETECT_WIN_DET_DIFF_WIN_WEAK_LSB 0
+#define PHY_BB_EXT_CHAN_DETECT_WIN_DET_DIFF_WIN_WEAK_MASK 0x0000000f
+#define PHY_BB_EXT_CHAN_DETECT_WIN_DET_DIFF_WIN_WEAK_GET(x) (((x) & 0x0000000f) >> 0)
+#define PHY_BB_EXT_CHAN_DETECT_WIN_DET_DIFF_WIN_WEAK_SET(x) (((x) << 0) & 0x0000000f)
+#define PHY_BB_EXT_CHAN_DETECT_WIN_DET_DIFF_WIN_WEAK_LOW_MSB 7
+#define PHY_BB_EXT_CHAN_DETECT_WIN_DET_DIFF_WIN_WEAK_LOW_LSB 4
+#define PHY_BB_EXT_CHAN_DETECT_WIN_DET_DIFF_WIN_WEAK_LOW_MASK 0x000000f0
+#define PHY_BB_EXT_CHAN_DETECT_WIN_DET_DIFF_WIN_WEAK_LOW_GET(x) (((x) & 0x000000f0) >> 4)
+#define PHY_BB_EXT_CHAN_DETECT_WIN_DET_DIFF_WIN_WEAK_LOW_SET(x) (((x) << 4) & 0x000000f0)
+#define PHY_BB_EXT_CHAN_DETECT_WIN_DET_DIFF_WIN_WEAK_CCK_MSB 12
+#define PHY_BB_EXT_CHAN_DETECT_WIN_DET_DIFF_WIN_WEAK_CCK_LSB 8
+#define PHY_BB_EXT_CHAN_DETECT_WIN_DET_DIFF_WIN_WEAK_CCK_MASK 0x00001f00
+#define PHY_BB_EXT_CHAN_DETECT_WIN_DET_DIFF_WIN_WEAK_CCK_GET(x) (((x) & 0x00001f00) >> 8)
+#define PHY_BB_EXT_CHAN_DETECT_WIN_DET_DIFF_WIN_WEAK_CCK_SET(x) (((x) << 8) & 0x00001f00)
+#define PHY_BB_EXT_CHAN_DETECT_WIN_DET_20H_COUNT_MSB 15
+#define PHY_BB_EXT_CHAN_DETECT_WIN_DET_20H_COUNT_LSB 13
+#define PHY_BB_EXT_CHAN_DETECT_WIN_DET_20H_COUNT_MASK 0x0000e000
+#define PHY_BB_EXT_CHAN_DETECT_WIN_DET_20H_COUNT_GET(x) (((x) & 0x0000e000) >> 13)
+#define PHY_BB_EXT_CHAN_DETECT_WIN_DET_20H_COUNT_SET(x) (((x) << 13) & 0x0000e000)
+#define PHY_BB_EXT_CHAN_DETECT_WIN_DET_EXT_BLK_COUNT_MSB 18
+#define PHY_BB_EXT_CHAN_DETECT_WIN_DET_EXT_BLK_COUNT_LSB 16
+#define PHY_BB_EXT_CHAN_DETECT_WIN_DET_EXT_BLK_COUNT_MASK 0x00070000
+#define PHY_BB_EXT_CHAN_DETECT_WIN_DET_EXT_BLK_COUNT_GET(x) (((x) & 0x00070000) >> 16)
+#define PHY_BB_EXT_CHAN_DETECT_WIN_DET_EXT_BLK_COUNT_SET(x) (((x) << 16) & 0x00070000)
+#define PHY_BB_EXT_CHAN_DETECT_WIN_WEAK_SIG_THR_CCK_EXT_MSB 24
+#define PHY_BB_EXT_CHAN_DETECT_WIN_WEAK_SIG_THR_CCK_EXT_LSB 19
+#define PHY_BB_EXT_CHAN_DETECT_WIN_WEAK_SIG_THR_CCK_EXT_MASK 0x01f80000
+#define PHY_BB_EXT_CHAN_DETECT_WIN_WEAK_SIG_THR_CCK_EXT_GET(x) (((x) & 0x01f80000) >> 19)
+#define PHY_BB_EXT_CHAN_DETECT_WIN_WEAK_SIG_THR_CCK_EXT_SET(x) (((x) << 19) & 0x01f80000)
+#define PHY_BB_EXT_CHAN_DETECT_WIN_DET_DIFF_WIN_THRESH_MSB 28
+#define PHY_BB_EXT_CHAN_DETECT_WIN_DET_DIFF_WIN_THRESH_LSB 25
+#define PHY_BB_EXT_CHAN_DETECT_WIN_DET_DIFF_WIN_THRESH_MASK 0x1e000000
+#define PHY_BB_EXT_CHAN_DETECT_WIN_DET_DIFF_WIN_THRESH_GET(x) (((x) & 0x1e000000) >> 25)
+#define PHY_BB_EXT_CHAN_DETECT_WIN_DET_DIFF_WIN_THRESH_SET(x) (((x) << 25) & 0x1e000000)
+
+/* macros for BB_pwr_thr_20_40_det */
+#define PHY_BB_PWR_THR_20_40_DET_ADDRESS 0x000099c8
+#define PHY_BB_PWR_THR_20_40_DET_OFFSET 0x000099c8
+#define PHY_BB_PWR_THR_20_40_DET_PWRDIFF40_THRSTR_MSB 4
+#define PHY_BB_PWR_THR_20_40_DET_PWRDIFF40_THRSTR_LSB 0
+#define PHY_BB_PWR_THR_20_40_DET_PWRDIFF40_THRSTR_MASK 0x0000001f
+#define PHY_BB_PWR_THR_20_40_DET_PWRDIFF40_THRSTR_GET(x) (((x) & 0x0000001f) >> 0)
+#define PHY_BB_PWR_THR_20_40_DET_PWRDIFF40_THRSTR_SET(x) (((x) << 0) & 0x0000001f)
+#define PHY_BB_PWR_THR_20_40_DET_BLOCKER40_MAX_MSB 10
+#define PHY_BB_PWR_THR_20_40_DET_BLOCKER40_MAX_LSB 5
+#define PHY_BB_PWR_THR_20_40_DET_BLOCKER40_MAX_MASK 0x000007e0
+#define PHY_BB_PWR_THR_20_40_DET_BLOCKER40_MAX_GET(x) (((x) & 0x000007e0) >> 5)
+#define PHY_BB_PWR_THR_20_40_DET_BLOCKER40_MAX_SET(x) (((x) << 5) & 0x000007e0)
+#define PHY_BB_PWR_THR_20_40_DET_DET40_PWRSTEP_MAX_MSB 15
+#define PHY_BB_PWR_THR_20_40_DET_DET40_PWRSTEP_MAX_LSB 11
+#define PHY_BB_PWR_THR_20_40_DET_DET40_PWRSTEP_MAX_MASK 0x0000f800
+#define PHY_BB_PWR_THR_20_40_DET_DET40_PWRSTEP_MAX_GET(x) (((x) & 0x0000f800) >> 11)
+#define PHY_BB_PWR_THR_20_40_DET_DET40_PWRSTEP_MAX_SET(x) (((x) << 11) & 0x0000f800)
+#define PHY_BB_PWR_THR_20_40_DET_DET40_THR_SNR_MSB 23
+#define PHY_BB_PWR_THR_20_40_DET_DET40_THR_SNR_LSB 16
+#define PHY_BB_PWR_THR_20_40_DET_DET40_THR_SNR_MASK 0x00ff0000
+#define PHY_BB_PWR_THR_20_40_DET_DET40_THR_SNR_GET(x) (((x) & 0x00ff0000) >> 16)
+#define PHY_BB_PWR_THR_20_40_DET_DET40_THR_SNR_SET(x) (((x) << 16) & 0x00ff0000)
+#define PHY_BB_PWR_THR_20_40_DET_DET40_PRI_BIAS_MSB 28
+#define PHY_BB_PWR_THR_20_40_DET_DET40_PRI_BIAS_LSB 24
+#define PHY_BB_PWR_THR_20_40_DET_DET40_PRI_BIAS_MASK 0x1f000000
+#define PHY_BB_PWR_THR_20_40_DET_DET40_PRI_BIAS_GET(x) (((x) & 0x1f000000) >> 24)
+#define PHY_BB_PWR_THR_20_40_DET_DET40_PRI_BIAS_SET(x) (((x) << 24) & 0x1f000000)
+#define PHY_BB_PWR_THR_20_40_DET_PWRSTEP40_ENA_MSB 29
+#define PHY_BB_PWR_THR_20_40_DET_PWRSTEP40_ENA_LSB 29
+#define PHY_BB_PWR_THR_20_40_DET_PWRSTEP40_ENA_MASK 0x20000000
+#define PHY_BB_PWR_THR_20_40_DET_PWRSTEP40_ENA_GET(x) (((x) & 0x20000000) >> 29)
+#define PHY_BB_PWR_THR_20_40_DET_PWRSTEP40_ENA_SET(x) (((x) << 29) & 0x20000000)
+#define PHY_BB_PWR_THR_20_40_DET_LOWSNR40_ENA_MSB 30
+#define PHY_BB_PWR_THR_20_40_DET_LOWSNR40_ENA_LSB 30
+#define PHY_BB_PWR_THR_20_40_DET_LOWSNR40_ENA_MASK 0x40000000
+#define PHY_BB_PWR_THR_20_40_DET_LOWSNR40_ENA_GET(x) (((x) & 0x40000000) >> 30)
+#define PHY_BB_PWR_THR_20_40_DET_LOWSNR40_ENA_SET(x) (((x) << 30) & 0x40000000)
+
+/* macros for BB_short_gi_delta_slope */
+#define PHY_BB_SHORT_GI_DELTA_SLOPE_ADDRESS 0x000099d0
+#define PHY_BB_SHORT_GI_DELTA_SLOPE_OFFSET 0x000099d0
+#define PHY_BB_SHORT_GI_DELTA_SLOPE_DELTA_SLOPE_COEF_EXP_SHORT_GI_MSB 3
+#define PHY_BB_SHORT_GI_DELTA_SLOPE_DELTA_SLOPE_COEF_EXP_SHORT_GI_LSB 0
+#define PHY_BB_SHORT_GI_DELTA_SLOPE_DELTA_SLOPE_COEF_EXP_SHORT_GI_MASK 0x0000000f
+#define PHY_BB_SHORT_GI_DELTA_SLOPE_DELTA_SLOPE_COEF_EXP_SHORT_GI_GET(x) (((x) & 0x0000000f) >> 0)
+#define PHY_BB_SHORT_GI_DELTA_SLOPE_DELTA_SLOPE_COEF_EXP_SHORT_GI_SET(x) (((x) << 0) & 0x0000000f)
+#define PHY_BB_SHORT_GI_DELTA_SLOPE_DELTA_SLOPE_COEF_MAN_SHORT_GI_MSB 18
+#define PHY_BB_SHORT_GI_DELTA_SLOPE_DELTA_SLOPE_COEF_MAN_SHORT_GI_LSB 4
+#define PHY_BB_SHORT_GI_DELTA_SLOPE_DELTA_SLOPE_COEF_MAN_SHORT_GI_MASK 0x0007fff0
+#define PHY_BB_SHORT_GI_DELTA_SLOPE_DELTA_SLOPE_COEF_MAN_SHORT_GI_GET(x) (((x) & 0x0007fff0) >> 4)
+#define PHY_BB_SHORT_GI_DELTA_SLOPE_DELTA_SLOPE_COEF_MAN_SHORT_GI_SET(x) (((x) << 4) & 0x0007fff0)
+
+/* macros for BB_chaninfo_ctrl */
+#define PHY_BB_CHANINFO_CTRL_ADDRESS 0x000099dc
+#define PHY_BB_CHANINFO_CTRL_OFFSET 0x000099dc
+#define PHY_BB_CHANINFO_CTRL_CAPTURE_CHAN_INFO_MSB 0
+#define PHY_BB_CHANINFO_CTRL_CAPTURE_CHAN_INFO_LSB 0
+#define PHY_BB_CHANINFO_CTRL_CAPTURE_CHAN_INFO_MASK 0x00000001
+#define PHY_BB_CHANINFO_CTRL_CAPTURE_CHAN_INFO_GET(x) (((x) & 0x00000001) >> 0)
+#define PHY_BB_CHANINFO_CTRL_CAPTURE_CHAN_INFO_SET(x) (((x) << 0) & 0x00000001)
+#define PHY_BB_CHANINFO_CTRL_DISABLE_CHANINFOMEM_MSB 1
+#define PHY_BB_CHANINFO_CTRL_DISABLE_CHANINFOMEM_LSB 1
+#define PHY_BB_CHANINFO_CTRL_DISABLE_CHANINFOMEM_MASK 0x00000002
+#define PHY_BB_CHANINFO_CTRL_DISABLE_CHANINFOMEM_GET(x) (((x) & 0x00000002) >> 1)
+#define PHY_BB_CHANINFO_CTRL_DISABLE_CHANINFOMEM_SET(x) (((x) << 1) & 0x00000002)
+
+/* macros for BB_heavy_clip_ctrl */
+#define PHY_BB_HEAVY_CLIP_CTRL_ADDRESS 0x000099e0
+#define PHY_BB_HEAVY_CLIP_CTRL_OFFSET 0x000099e0
+#define PHY_BB_HEAVY_CLIP_CTRL_CF_HEAVY_CLIP_ENABLE_MSB 8
+#define PHY_BB_HEAVY_CLIP_CTRL_CF_HEAVY_CLIP_ENABLE_LSB 0
+#define PHY_BB_HEAVY_CLIP_CTRL_CF_HEAVY_CLIP_ENABLE_MASK 0x000001ff
+#define PHY_BB_HEAVY_CLIP_CTRL_CF_HEAVY_CLIP_ENABLE_GET(x) (((x) & 0x000001ff) >> 0)
+#define PHY_BB_HEAVY_CLIP_CTRL_CF_HEAVY_CLIP_ENABLE_SET(x) (((x) << 0) & 0x000001ff)
+#define PHY_BB_HEAVY_CLIP_CTRL_PRE_EMP_HT40_ENABLE_MSB 9
+#define PHY_BB_HEAVY_CLIP_CTRL_PRE_EMP_HT40_ENABLE_LSB 9
+#define PHY_BB_HEAVY_CLIP_CTRL_PRE_EMP_HT40_ENABLE_MASK 0x00000200
+#define PHY_BB_HEAVY_CLIP_CTRL_PRE_EMP_HT40_ENABLE_GET(x) (((x) & 0x00000200) >> 9)
+#define PHY_BB_HEAVY_CLIP_CTRL_PRE_EMP_HT40_ENABLE_SET(x) (((x) << 9) & 0x00000200)
+
+/* macros for BB_heavy_clip_20 */
+#define PHY_BB_HEAVY_CLIP_20_ADDRESS 0x000099e4
+#define PHY_BB_HEAVY_CLIP_20_OFFSET 0x000099e4
+#define PHY_BB_HEAVY_CLIP_20_HEAVY_CLIP_FACTOR_0_MSB 7
+#define PHY_BB_HEAVY_CLIP_20_HEAVY_CLIP_FACTOR_0_LSB 0
+#define PHY_BB_HEAVY_CLIP_20_HEAVY_CLIP_FACTOR_0_MASK 0x000000ff
+#define PHY_BB_HEAVY_CLIP_20_HEAVY_CLIP_FACTOR_0_GET(x) (((x) & 0x000000ff) >> 0)
+#define PHY_BB_HEAVY_CLIP_20_HEAVY_CLIP_FACTOR_0_SET(x) (((x) << 0) & 0x000000ff)
+#define PHY_BB_HEAVY_CLIP_20_HEAVY_CLIP_FACTOR_1_MSB 15
+#define PHY_BB_HEAVY_CLIP_20_HEAVY_CLIP_FACTOR_1_LSB 8
+#define PHY_BB_HEAVY_CLIP_20_HEAVY_CLIP_FACTOR_1_MASK 0x0000ff00
+#define PHY_BB_HEAVY_CLIP_20_HEAVY_CLIP_FACTOR_1_GET(x) (((x) & 0x0000ff00) >> 8)
+#define PHY_BB_HEAVY_CLIP_20_HEAVY_CLIP_FACTOR_1_SET(x) (((x) << 8) & 0x0000ff00)
+#define PHY_BB_HEAVY_CLIP_20_HEAVY_CLIP_FACTOR_2_MSB 23
+#define PHY_BB_HEAVY_CLIP_20_HEAVY_CLIP_FACTOR_2_LSB 16
+#define PHY_BB_HEAVY_CLIP_20_HEAVY_CLIP_FACTOR_2_MASK 0x00ff0000
+#define PHY_BB_HEAVY_CLIP_20_HEAVY_CLIP_FACTOR_2_GET(x) (((x) & 0x00ff0000) >> 16)
+#define PHY_BB_HEAVY_CLIP_20_HEAVY_CLIP_FACTOR_2_SET(x) (((x) << 16) & 0x00ff0000)
+#define PHY_BB_HEAVY_CLIP_20_HEAVY_CLIP_FACTOR_3_MSB 31
+#define PHY_BB_HEAVY_CLIP_20_HEAVY_CLIP_FACTOR_3_LSB 24
+#define PHY_BB_HEAVY_CLIP_20_HEAVY_CLIP_FACTOR_3_MASK 0xff000000
+#define PHY_BB_HEAVY_CLIP_20_HEAVY_CLIP_FACTOR_3_GET(x) (((x) & 0xff000000) >> 24)
+#define PHY_BB_HEAVY_CLIP_20_HEAVY_CLIP_FACTOR_3_SET(x) (((x) << 24) & 0xff000000)
+
+/* macros for BB_heavy_clip_40 */
+#define PHY_BB_HEAVY_CLIP_40_ADDRESS 0x000099e8
+#define PHY_BB_HEAVY_CLIP_40_OFFSET 0x000099e8
+#define PHY_BB_HEAVY_CLIP_40_HEAVY_CLIP_FACTOR_4_MSB 7
+#define PHY_BB_HEAVY_CLIP_40_HEAVY_CLIP_FACTOR_4_LSB 0
+#define PHY_BB_HEAVY_CLIP_40_HEAVY_CLIP_FACTOR_4_MASK 0x000000ff
+#define PHY_BB_HEAVY_CLIP_40_HEAVY_CLIP_FACTOR_4_GET(x) (((x) & 0x000000ff) >> 0)
+#define PHY_BB_HEAVY_CLIP_40_HEAVY_CLIP_FACTOR_4_SET(x) (((x) << 0) & 0x000000ff)
+#define PHY_BB_HEAVY_CLIP_40_HEAVY_CLIP_FACTOR_5_MSB 15
+#define PHY_BB_HEAVY_CLIP_40_HEAVY_CLIP_FACTOR_5_LSB 8
+#define PHY_BB_HEAVY_CLIP_40_HEAVY_CLIP_FACTOR_5_MASK 0x0000ff00
+#define PHY_BB_HEAVY_CLIP_40_HEAVY_CLIP_FACTOR_5_GET(x) (((x) & 0x0000ff00) >> 8)
+#define PHY_BB_HEAVY_CLIP_40_HEAVY_CLIP_FACTOR_5_SET(x) (((x) << 8) & 0x0000ff00)
+#define PHY_BB_HEAVY_CLIP_40_HEAVY_CLIP_FACTOR_6_MSB 23
+#define PHY_BB_HEAVY_CLIP_40_HEAVY_CLIP_FACTOR_6_LSB 16
+#define PHY_BB_HEAVY_CLIP_40_HEAVY_CLIP_FACTOR_6_MASK 0x00ff0000
+#define PHY_BB_HEAVY_CLIP_40_HEAVY_CLIP_FACTOR_6_GET(x) (((x) & 0x00ff0000) >> 16)
+#define PHY_BB_HEAVY_CLIP_40_HEAVY_CLIP_FACTOR_6_SET(x) (((x) << 16) & 0x00ff0000)
+#define PHY_BB_HEAVY_CLIP_40_HEAVY_CLIP_FACTOR_7_MSB 31
+#define PHY_BB_HEAVY_CLIP_40_HEAVY_CLIP_FACTOR_7_LSB 24
+#define PHY_BB_HEAVY_CLIP_40_HEAVY_CLIP_FACTOR_7_MASK 0xff000000
+#define PHY_BB_HEAVY_CLIP_40_HEAVY_CLIP_FACTOR_7_GET(x) (((x) & 0xff000000) >> 24)
+#define PHY_BB_HEAVY_CLIP_40_HEAVY_CLIP_FACTOR_7_SET(x) (((x) << 24) & 0xff000000)
+
+/* macros for BB_rifs_srch */
+#define PHY_BB_RIFS_SRCH_ADDRESS 0x000099ec
+#define PHY_BB_RIFS_SRCH_OFFSET 0x000099ec
+#define PHY_BB_RIFS_SRCH_HEAVY_CLIP_FACTOR_XR_MSB 7
+#define PHY_BB_RIFS_SRCH_HEAVY_CLIP_FACTOR_XR_LSB 0
+#define PHY_BB_RIFS_SRCH_HEAVY_CLIP_FACTOR_XR_MASK 0x000000ff
+#define PHY_BB_RIFS_SRCH_HEAVY_CLIP_FACTOR_XR_GET(x) (((x) & 0x000000ff) >> 0)
+#define PHY_BB_RIFS_SRCH_HEAVY_CLIP_FACTOR_XR_SET(x) (((x) << 0) & 0x000000ff)
+#define PHY_BB_RIFS_SRCH_INIT_GAIN_DB_OFFSET_MSB 15
+#define PHY_BB_RIFS_SRCH_INIT_GAIN_DB_OFFSET_LSB 8
+#define PHY_BB_RIFS_SRCH_INIT_GAIN_DB_OFFSET_MASK 0x0000ff00
+#define PHY_BB_RIFS_SRCH_INIT_GAIN_DB_OFFSET_GET(x) (((x) & 0x0000ff00) >> 8)
+#define PHY_BB_RIFS_SRCH_INIT_GAIN_DB_OFFSET_SET(x) (((x) << 8) & 0x0000ff00)
+#define PHY_BB_RIFS_SRCH_RIFS_INIT_DELAY_MSB 25
+#define PHY_BB_RIFS_SRCH_RIFS_INIT_DELAY_LSB 16
+#define PHY_BB_RIFS_SRCH_RIFS_INIT_DELAY_MASK 0x03ff0000
+#define PHY_BB_RIFS_SRCH_RIFS_INIT_DELAY_GET(x) (((x) & 0x03ff0000) >> 16)
+#define PHY_BB_RIFS_SRCH_RIFS_INIT_DELAY_SET(x) (((x) << 16) & 0x03ff0000)
+#define PHY_BB_RIFS_SRCH_RIFS_DISABLE_PWRLOW_GC_MSB 26
+#define PHY_BB_RIFS_SRCH_RIFS_DISABLE_PWRLOW_GC_LSB 26
+#define PHY_BB_RIFS_SRCH_RIFS_DISABLE_PWRLOW_GC_MASK 0x04000000
+#define PHY_BB_RIFS_SRCH_RIFS_DISABLE_PWRLOW_GC_GET(x) (((x) & 0x04000000) >> 26)
+#define PHY_BB_RIFS_SRCH_RIFS_DISABLE_PWRLOW_GC_SET(x) (((x) << 26) & 0x04000000)
+#define PHY_BB_RIFS_SRCH_RIFS_DISABLE_CCK_DET_MSB 27
+#define PHY_BB_RIFS_SRCH_RIFS_DISABLE_CCK_DET_LSB 27
+#define PHY_BB_RIFS_SRCH_RIFS_DISABLE_CCK_DET_MASK 0x08000000
+#define PHY_BB_RIFS_SRCH_RIFS_DISABLE_CCK_DET_GET(x) (((x) & 0x08000000) >> 27)
+#define PHY_BB_RIFS_SRCH_RIFS_DISABLE_CCK_DET_SET(x) (((x) << 27) & 0x08000000)
+
+/* macros for BB_iq_adc_cal_mode */
+#define PHY_BB_IQ_ADC_CAL_MODE_ADDRESS 0x000099f0
+#define PHY_BB_IQ_ADC_CAL_MODE_OFFSET 0x000099f0
+#define PHY_BB_IQ_ADC_CAL_MODE_GAIN_DC_IQ_CAL_MODE_MSB 1
+#define PHY_BB_IQ_ADC_CAL_MODE_GAIN_DC_IQ_CAL_MODE_LSB 0
+#define PHY_BB_IQ_ADC_CAL_MODE_GAIN_DC_IQ_CAL_MODE_MASK 0x00000003
+#define PHY_BB_IQ_ADC_CAL_MODE_GAIN_DC_IQ_CAL_MODE_GET(x) (((x) & 0x00000003) >> 0)
+#define PHY_BB_IQ_ADC_CAL_MODE_GAIN_DC_IQ_CAL_MODE_SET(x) (((x) << 0) & 0x00000003)
+#define PHY_BB_IQ_ADC_CAL_MODE_TEST_CALADCOFF_MSB 2
+#define PHY_BB_IQ_ADC_CAL_MODE_TEST_CALADCOFF_LSB 2
+#define PHY_BB_IQ_ADC_CAL_MODE_TEST_CALADCOFF_MASK 0x00000004
+#define PHY_BB_IQ_ADC_CAL_MODE_TEST_CALADCOFF_GET(x) (((x) & 0x00000004) >> 2)
+#define PHY_BB_IQ_ADC_CAL_MODE_TEST_CALADCOFF_SET(x) (((x) << 2) & 0x00000004)
+
+/* macros for BB_per_chain_csd */
+#define PHY_BB_PER_CHAIN_CSD_ADDRESS 0x000099fc
+#define PHY_BB_PER_CHAIN_CSD_OFFSET 0x000099fc
+#define PHY_BB_PER_CHAIN_CSD_CSD_CHN1_2CHAINS_MSB 4
+#define PHY_BB_PER_CHAIN_CSD_CSD_CHN1_2CHAINS_LSB 0
+#define PHY_BB_PER_CHAIN_CSD_CSD_CHN1_2CHAINS_MASK 0x0000001f
+#define PHY_BB_PER_CHAIN_CSD_CSD_CHN1_2CHAINS_GET(x) (((x) & 0x0000001f) >> 0)
+#define PHY_BB_PER_CHAIN_CSD_CSD_CHN1_2CHAINS_SET(x) (((x) << 0) & 0x0000001f)
+#define PHY_BB_PER_CHAIN_CSD_CSD_CHN1_3CHAINS_MSB 9
+#define PHY_BB_PER_CHAIN_CSD_CSD_CHN1_3CHAINS_LSB 5
+#define PHY_BB_PER_CHAIN_CSD_CSD_CHN1_3CHAINS_MASK 0x000003e0
+#define PHY_BB_PER_CHAIN_CSD_CSD_CHN1_3CHAINS_GET(x) (((x) & 0x000003e0) >> 5)
+#define PHY_BB_PER_CHAIN_CSD_CSD_CHN1_3CHAINS_SET(x) (((x) << 5) & 0x000003e0)
+#define PHY_BB_PER_CHAIN_CSD_CSD_CHN2_3CHAINS_MSB 14
+#define PHY_BB_PER_CHAIN_CSD_CSD_CHN2_3CHAINS_LSB 10
+#define PHY_BB_PER_CHAIN_CSD_CSD_CHN2_3CHAINS_MASK 0x00007c00
+#define PHY_BB_PER_CHAIN_CSD_CSD_CHN2_3CHAINS_GET(x) (((x) & 0x00007c00) >> 10)
+#define PHY_BB_PER_CHAIN_CSD_CSD_CHN2_3CHAINS_SET(x) (((x) << 10) & 0x00007c00)
+
+/* macros for BB_rx_ocgain */
+#define PHY_BB_RX_OCGAIN_ADDRESS 0x00009a00
+#define PHY_BB_RX_OCGAIN_OFFSET 0x00009a00
+#define PHY_BB_RX_OCGAIN_GAIN_ENTRY_MSB 31
+#define PHY_BB_RX_OCGAIN_GAIN_ENTRY_LSB 0
+#define PHY_BB_RX_OCGAIN_GAIN_ENTRY_MASK 0xffffffff
+#define PHY_BB_RX_OCGAIN_GAIN_ENTRY_SET(x) (((x) << 0) & 0xffffffff)
+
+/* macros for BB_tx_crc */
+#define PHY_BB_TX_CRC_ADDRESS 0x00009c00
+#define PHY_BB_TX_CRC_OFFSET 0x00009c00
+#define PHY_BB_TX_CRC_TX_CRC_MSB 15
+#define PHY_BB_TX_CRC_TX_CRC_LSB 0
+#define PHY_BB_TX_CRC_TX_CRC_MASK 0x0000ffff
+#define PHY_BB_TX_CRC_TX_CRC_GET(x) (((x) & 0x0000ffff) >> 0)
+
+/* macros for BB_iq_adc_meas_0_b0 */
+#define PHY_BB_IQ_ADC_MEAS_0_B0_ADDRESS 0x00009c10
+#define PHY_BB_IQ_ADC_MEAS_0_B0_OFFSET 0x00009c10
+#define PHY_BB_IQ_ADC_MEAS_0_B0_GAIN_DC_IQ_CAL_MEAS_0_0_MSB 31
+#define PHY_BB_IQ_ADC_MEAS_0_B0_GAIN_DC_IQ_CAL_MEAS_0_0_LSB 0
+#define PHY_BB_IQ_ADC_MEAS_0_B0_GAIN_DC_IQ_CAL_MEAS_0_0_MASK 0xffffffff
+#define PHY_BB_IQ_ADC_MEAS_0_B0_GAIN_DC_IQ_CAL_MEAS_0_0_GET(x) (((x) & 0xffffffff) >> 0)
+
+/* macros for BB_iq_adc_meas_1_b0 */
+#define PHY_BB_IQ_ADC_MEAS_1_B0_ADDRESS 0x00009c14
+#define PHY_BB_IQ_ADC_MEAS_1_B0_OFFSET 0x00009c14
+#define PHY_BB_IQ_ADC_MEAS_1_B0_GAIN_DC_IQ_CAL_MEAS_1_0_MSB 31
+#define PHY_BB_IQ_ADC_MEAS_1_B0_GAIN_DC_IQ_CAL_MEAS_1_0_LSB 0
+#define PHY_BB_IQ_ADC_MEAS_1_B0_GAIN_DC_IQ_CAL_MEAS_1_0_MASK 0xffffffff
+#define PHY_BB_IQ_ADC_MEAS_1_B0_GAIN_DC_IQ_CAL_MEAS_1_0_GET(x) (((x) & 0xffffffff) >> 0)
+
+/* macros for BB_iq_adc_meas_2_b0 */
+#define PHY_BB_IQ_ADC_MEAS_2_B0_ADDRESS 0x00009c18
+#define PHY_BB_IQ_ADC_MEAS_2_B0_OFFSET 0x00009c18
+#define PHY_BB_IQ_ADC_MEAS_2_B0_GAIN_DC_IQ_CAL_MEAS_2_0_MSB 31
+#define PHY_BB_IQ_ADC_MEAS_2_B0_GAIN_DC_IQ_CAL_MEAS_2_0_LSB 0
+#define PHY_BB_IQ_ADC_MEAS_2_B0_GAIN_DC_IQ_CAL_MEAS_2_0_MASK 0xffffffff
+#define PHY_BB_IQ_ADC_MEAS_2_B0_GAIN_DC_IQ_CAL_MEAS_2_0_GET(x) (((x) & 0xffffffff) >> 0)
+
+/* macros for BB_iq_adc_meas_3_b0 */
+#define PHY_BB_IQ_ADC_MEAS_3_B0_ADDRESS 0x00009c1c
+#define PHY_BB_IQ_ADC_MEAS_3_B0_OFFSET 0x00009c1c
+#define PHY_BB_IQ_ADC_MEAS_3_B0_GAIN_DC_IQ_CAL_MEAS_3_0_MSB 31
+#define PHY_BB_IQ_ADC_MEAS_3_B0_GAIN_DC_IQ_CAL_MEAS_3_0_LSB 0
+#define PHY_BB_IQ_ADC_MEAS_3_B0_GAIN_DC_IQ_CAL_MEAS_3_0_MASK 0xffffffff
+#define PHY_BB_IQ_ADC_MEAS_3_B0_GAIN_DC_IQ_CAL_MEAS_3_0_GET(x) (((x) & 0xffffffff) >> 0)
+
+/* macros for BB_rfbus_grant */
+#define PHY_BB_RFBUS_GRANT_ADDRESS 0x00009c20
+#define PHY_BB_RFBUS_GRANT_OFFSET 0x00009c20
+#define PHY_BB_RFBUS_GRANT_RFBUS_GRANT_MSB 0
+#define PHY_BB_RFBUS_GRANT_RFBUS_GRANT_LSB 0
+#define PHY_BB_RFBUS_GRANT_RFBUS_GRANT_MASK 0x00000001
+#define PHY_BB_RFBUS_GRANT_RFBUS_GRANT_GET(x) (((x) & 0x00000001) >> 0)
+#define PHY_BB_RFBUS_GRANT_BT_ANT_MSB 1
+#define PHY_BB_RFBUS_GRANT_BT_ANT_LSB 1
+#define PHY_BB_RFBUS_GRANT_BT_ANT_MASK 0x00000002
+#define PHY_BB_RFBUS_GRANT_BT_ANT_GET(x) (((x) & 0x00000002) >> 1)
+
+/* macros for BB_tstadc */
+#define PHY_BB_TSTADC_ADDRESS 0x00009c24
+#define PHY_BB_TSTADC_OFFSET 0x00009c24
+#define PHY_BB_TSTADC_TSTADC_OUT_Q_MSB 9
+#define PHY_BB_TSTADC_TSTADC_OUT_Q_LSB 0
+#define PHY_BB_TSTADC_TSTADC_OUT_Q_MASK 0x000003ff
+#define PHY_BB_TSTADC_TSTADC_OUT_Q_GET(x) (((x) & 0x000003ff) >> 0)
+#define PHY_BB_TSTADC_TSTADC_OUT_I_MSB 19
+#define PHY_BB_TSTADC_TSTADC_OUT_I_LSB 10
+#define PHY_BB_TSTADC_TSTADC_OUT_I_MASK 0x000ffc00
+#define PHY_BB_TSTADC_TSTADC_OUT_I_GET(x) (((x) & 0x000ffc00) >> 10)
+
+/* macros for BB_tstdac */
+#define PHY_BB_TSTDAC_ADDRESS 0x00009c28
+#define PHY_BB_TSTDAC_OFFSET 0x00009c28
+#define PHY_BB_TSTDAC_TSTDAC_OUT_Q_MSB 9
+#define PHY_BB_TSTDAC_TSTDAC_OUT_Q_LSB 0
+#define PHY_BB_TSTDAC_TSTDAC_OUT_Q_MASK 0x000003ff
+#define PHY_BB_TSTDAC_TSTDAC_OUT_Q_GET(x) (((x) & 0x000003ff) >> 0)
+#define PHY_BB_TSTDAC_TSTDAC_OUT_I_MSB 19
+#define PHY_BB_TSTDAC_TSTDAC_OUT_I_LSB 10
+#define PHY_BB_TSTDAC_TSTDAC_OUT_I_MASK 0x000ffc00
+#define PHY_BB_TSTDAC_TSTDAC_OUT_I_GET(x) (((x) & 0x000ffc00) >> 10)
+
+/* macros for BB_illegal_tx_rate */
+#define PHY_BB_ILLEGAL_TX_RATE_ADDRESS 0x00009c30
+#define PHY_BB_ILLEGAL_TX_RATE_OFFSET 0x00009c30
+#define PHY_BB_ILLEGAL_TX_RATE_ILLEGAL_TX_RATE_MSB 0
+#define PHY_BB_ILLEGAL_TX_RATE_ILLEGAL_TX_RATE_LSB 0
+#define PHY_BB_ILLEGAL_TX_RATE_ILLEGAL_TX_RATE_MASK 0x00000001
+#define PHY_BB_ILLEGAL_TX_RATE_ILLEGAL_TX_RATE_GET(x) (((x) & 0x00000001) >> 0)
+
+/* macros for BB_spur_report_b0 */
+#define PHY_BB_SPUR_REPORT_B0_ADDRESS 0x00009c34
+#define PHY_BB_SPUR_REPORT_B0_OFFSET 0x00009c34
+#define PHY_BB_SPUR_REPORT_B0_SPUR_EST_I_0_MSB 7
+#define PHY_BB_SPUR_REPORT_B0_SPUR_EST_I_0_LSB 0
+#define PHY_BB_SPUR_REPORT_B0_SPUR_EST_I_0_MASK 0x000000ff
+#define PHY_BB_SPUR_REPORT_B0_SPUR_EST_I_0_GET(x) (((x) & 0x000000ff) >> 0)
+#define PHY_BB_SPUR_REPORT_B0_SPUR_EST_Q_0_MSB 15
+#define PHY_BB_SPUR_REPORT_B0_SPUR_EST_Q_0_LSB 8
+#define PHY_BB_SPUR_REPORT_B0_SPUR_EST_Q_0_MASK 0x0000ff00
+#define PHY_BB_SPUR_REPORT_B0_SPUR_EST_Q_0_GET(x) (((x) & 0x0000ff00) >> 8)
+#define PHY_BB_SPUR_REPORT_B0_POWER_WITH_SPUR_REMOVED_0_MSB 31
+#define PHY_BB_SPUR_REPORT_B0_POWER_WITH_SPUR_REMOVED_0_LSB 16
+#define PHY_BB_SPUR_REPORT_B0_POWER_WITH_SPUR_REMOVED_0_MASK 0xffff0000
+#define PHY_BB_SPUR_REPORT_B0_POWER_WITH_SPUR_REMOVED_0_GET(x) (((x) & 0xffff0000) >> 16)
+
+/* macros for BB_channel_status */
+#define PHY_BB_CHANNEL_STATUS_ADDRESS 0x00009c38
+#define PHY_BB_CHANNEL_STATUS_OFFSET 0x00009c38
+#define PHY_BB_CHANNEL_STATUS_BT_ACTIVE_MSB 0
+#define PHY_BB_CHANNEL_STATUS_BT_ACTIVE_LSB 0
+#define PHY_BB_CHANNEL_STATUS_BT_ACTIVE_MASK 0x00000001
+#define PHY_BB_CHANNEL_STATUS_BT_ACTIVE_GET(x) (((x) & 0x00000001) >> 0)
+#define PHY_BB_CHANNEL_STATUS_RX_CLEAR_RAW_MSB 1
+#define PHY_BB_CHANNEL_STATUS_RX_CLEAR_RAW_LSB 1
+#define PHY_BB_CHANNEL_STATUS_RX_CLEAR_RAW_MASK 0x00000002
+#define PHY_BB_CHANNEL_STATUS_RX_CLEAR_RAW_GET(x) (((x) & 0x00000002) >> 1)
+#define PHY_BB_CHANNEL_STATUS_RX_CLEAR_MAC_MSB 2
+#define PHY_BB_CHANNEL_STATUS_RX_CLEAR_MAC_LSB 2
+#define PHY_BB_CHANNEL_STATUS_RX_CLEAR_MAC_MASK 0x00000004
+#define PHY_BB_CHANNEL_STATUS_RX_CLEAR_MAC_GET(x) (((x) & 0x00000004) >> 2)
+#define PHY_BB_CHANNEL_STATUS_RX_CLEAR_PAD_MSB 3
+#define PHY_BB_CHANNEL_STATUS_RX_CLEAR_PAD_LSB 3
+#define PHY_BB_CHANNEL_STATUS_RX_CLEAR_PAD_MASK 0x00000008
+#define PHY_BB_CHANNEL_STATUS_RX_CLEAR_PAD_GET(x) (((x) & 0x00000008) >> 3)
+#define PHY_BB_CHANNEL_STATUS_BB_SW_OUT_0_MSB 5
+#define PHY_BB_CHANNEL_STATUS_BB_SW_OUT_0_LSB 4
+#define PHY_BB_CHANNEL_STATUS_BB_SW_OUT_0_MASK 0x00000030
+#define PHY_BB_CHANNEL_STATUS_BB_SW_OUT_0_GET(x) (((x) & 0x00000030) >> 4)
+#define PHY_BB_CHANNEL_STATUS_BB_SW_OUT_1_MSB 7
+#define PHY_BB_CHANNEL_STATUS_BB_SW_OUT_1_LSB 6
+#define PHY_BB_CHANNEL_STATUS_BB_SW_OUT_1_MASK 0x000000c0
+#define PHY_BB_CHANNEL_STATUS_BB_SW_OUT_1_GET(x) (((x) & 0x000000c0) >> 6)
+#define PHY_BB_CHANNEL_STATUS_BB_SW_OUT_2_MSB 9
+#define PHY_BB_CHANNEL_STATUS_BB_SW_OUT_2_LSB 8
+#define PHY_BB_CHANNEL_STATUS_BB_SW_OUT_2_MASK 0x00000300
+#define PHY_BB_CHANNEL_STATUS_BB_SW_OUT_2_GET(x) (((x) & 0x00000300) >> 8)
+#define PHY_BB_CHANNEL_STATUS_BB_SW_COM_OUT_MSB 13
+#define PHY_BB_CHANNEL_STATUS_BB_SW_COM_OUT_LSB 10
+#define PHY_BB_CHANNEL_STATUS_BB_SW_COM_OUT_MASK 0x00003c00
+#define PHY_BB_CHANNEL_STATUS_BB_SW_COM_OUT_GET(x) (((x) & 0x00003c00) >> 10)
+#define PHY_BB_CHANNEL_STATUS_ANT_DIV_CFG_USED_MSB 16
+#define PHY_BB_CHANNEL_STATUS_ANT_DIV_CFG_USED_LSB 14
+#define PHY_BB_CHANNEL_STATUS_ANT_DIV_CFG_USED_MASK 0x0001c000
+#define PHY_BB_CHANNEL_STATUS_ANT_DIV_CFG_USED_GET(x) (((x) & 0x0001c000) >> 14)
+
+/* macros for BB_rssi_b0 */
+#define PHY_BB_RSSI_B0_ADDRESS 0x00009c3c
+#define PHY_BB_RSSI_B0_OFFSET 0x00009c3c
+#define PHY_BB_RSSI_B0_RSSI_0_MSB 7
+#define PHY_BB_RSSI_B0_RSSI_0_LSB 0
+#define PHY_BB_RSSI_B0_RSSI_0_MASK 0x000000ff
+#define PHY_BB_RSSI_B0_RSSI_0_GET(x) (((x) & 0x000000ff) >> 0)
+#define PHY_BB_RSSI_B0_RSSI_EXT_0_MSB 15
+#define PHY_BB_RSSI_B0_RSSI_EXT_0_LSB 8
+#define PHY_BB_RSSI_B0_RSSI_EXT_0_MASK 0x0000ff00
+#define PHY_BB_RSSI_B0_RSSI_EXT_0_GET(x) (((x) & 0x0000ff00) >> 8)
+
+/* macros for BB_spur_est_cck_report_b0 */
+#define PHY_BB_SPUR_EST_CCK_REPORT_B0_ADDRESS 0x00009c40
+#define PHY_BB_SPUR_EST_CCK_REPORT_B0_OFFSET 0x00009c40
+#define PHY_BB_SPUR_EST_CCK_REPORT_B0_SPUR_EST_SD_I_0_CCK_MSB 7
+#define PHY_BB_SPUR_EST_CCK_REPORT_B0_SPUR_EST_SD_I_0_CCK_LSB 0
+#define PHY_BB_SPUR_EST_CCK_REPORT_B0_SPUR_EST_SD_I_0_CCK_MASK 0x000000ff
+#define PHY_BB_SPUR_EST_CCK_REPORT_B0_SPUR_EST_SD_I_0_CCK_GET(x) (((x) & 0x000000ff) >> 0)
+#define PHY_BB_SPUR_EST_CCK_REPORT_B0_SPUR_EST_SD_Q_0_CCK_MSB 15
+#define PHY_BB_SPUR_EST_CCK_REPORT_B0_SPUR_EST_SD_Q_0_CCK_LSB 8
+#define PHY_BB_SPUR_EST_CCK_REPORT_B0_SPUR_EST_SD_Q_0_CCK_MASK 0x0000ff00
+#define PHY_BB_SPUR_EST_CCK_REPORT_B0_SPUR_EST_SD_Q_0_CCK_GET(x) (((x) & 0x0000ff00) >> 8)
+#define PHY_BB_SPUR_EST_CCK_REPORT_B0_SPUR_EST_I_0_CCK_MSB 23
+#define PHY_BB_SPUR_EST_CCK_REPORT_B0_SPUR_EST_I_0_CCK_LSB 16
+#define PHY_BB_SPUR_EST_CCK_REPORT_B0_SPUR_EST_I_0_CCK_MASK 0x00ff0000
+#define PHY_BB_SPUR_EST_CCK_REPORT_B0_SPUR_EST_I_0_CCK_GET(x) (((x) & 0x00ff0000) >> 16)
+#define PHY_BB_SPUR_EST_CCK_REPORT_B0_SPUR_EST_Q_0_CCK_MSB 31
+#define PHY_BB_SPUR_EST_CCK_REPORT_B0_SPUR_EST_Q_0_CCK_LSB 24
+#define PHY_BB_SPUR_EST_CCK_REPORT_B0_SPUR_EST_Q_0_CCK_MASK 0xff000000
+#define PHY_BB_SPUR_EST_CCK_REPORT_B0_SPUR_EST_Q_0_CCK_GET(x) (((x) & 0xff000000) >> 24)
+
+/* macros for BB_chan_info_noise_pwr */
+#define PHY_BB_CHAN_INFO_NOISE_PWR_ADDRESS 0x00009cac
+#define PHY_BB_CHAN_INFO_NOISE_PWR_OFFSET 0x00009cac
+#define PHY_BB_CHAN_INFO_NOISE_PWR_NOISE_POWER_MSB 11
+#define PHY_BB_CHAN_INFO_NOISE_PWR_NOISE_POWER_LSB 0
+#define PHY_BB_CHAN_INFO_NOISE_PWR_NOISE_POWER_MASK 0x00000fff
+#define PHY_BB_CHAN_INFO_NOISE_PWR_NOISE_POWER_GET(x) (((x) & 0x00000fff) >> 0)
+
+/* macros for BB_chan_info_gain_diff */
+#define PHY_BB_CHAN_INFO_GAIN_DIFF_ADDRESS 0x00009cb0
+#define PHY_BB_CHAN_INFO_GAIN_DIFF_OFFSET 0x00009cb0
+#define PHY_BB_CHAN_INFO_GAIN_DIFF_FINE_PPM_MSB 11
+#define PHY_BB_CHAN_INFO_GAIN_DIFF_FINE_PPM_LSB 0
+#define PHY_BB_CHAN_INFO_GAIN_DIFF_FINE_PPM_MASK 0x00000fff
+#define PHY_BB_CHAN_INFO_GAIN_DIFF_FINE_PPM_GET(x) (((x) & 0x00000fff) >> 0)
+
+/* macros for BB_chan_info_fine_timing */
+#define PHY_BB_CHAN_INFO_FINE_TIMING_ADDRESS 0x00009cb4
+#define PHY_BB_CHAN_INFO_FINE_TIMING_OFFSET 0x00009cb4
+#define PHY_BB_CHAN_INFO_FINE_TIMING_COARSE_PPM_MSB 11
+#define PHY_BB_CHAN_INFO_FINE_TIMING_COARSE_PPM_LSB 0
+#define PHY_BB_CHAN_INFO_FINE_TIMING_COARSE_PPM_MASK 0x00000fff
+#define PHY_BB_CHAN_INFO_FINE_TIMING_COARSE_PPM_GET(x) (((x) & 0x00000fff) >> 0)
+#define PHY_BB_CHAN_INFO_FINE_TIMING_FINE_TIMING_MSB 21
+#define PHY_BB_CHAN_INFO_FINE_TIMING_FINE_TIMING_LSB 12
+#define PHY_BB_CHAN_INFO_FINE_TIMING_FINE_TIMING_MASK 0x003ff000
+#define PHY_BB_CHAN_INFO_FINE_TIMING_FINE_TIMING_GET(x) (((x) & 0x003ff000) >> 12)
+
+/* macros for BB_chan_info_gain_b0 */
+#define PHY_BB_CHAN_INFO_GAIN_B0_ADDRESS 0x00009cb8
+#define PHY_BB_CHAN_INFO_GAIN_B0_OFFSET 0x00009cb8
+#define PHY_BB_CHAN_INFO_GAIN_B0_CHAN_INFO_RSSI_0_MSB 7
+#define PHY_BB_CHAN_INFO_GAIN_B0_CHAN_INFO_RSSI_0_LSB 0
+#define PHY_BB_CHAN_INFO_GAIN_B0_CHAN_INFO_RSSI_0_MASK 0x000000ff
+#define PHY_BB_CHAN_INFO_GAIN_B0_CHAN_INFO_RSSI_0_GET(x) (((x) & 0x000000ff) >> 0)
+#define PHY_BB_CHAN_INFO_GAIN_B0_CHAN_INFO_RF_GAIN_0_MSB 15
+#define PHY_BB_CHAN_INFO_GAIN_B0_CHAN_INFO_RF_GAIN_0_LSB 8
+#define PHY_BB_CHAN_INFO_GAIN_B0_CHAN_INFO_RF_GAIN_0_MASK 0x0000ff00
+#define PHY_BB_CHAN_INFO_GAIN_B0_CHAN_INFO_RF_GAIN_0_GET(x) (((x) & 0x0000ff00) >> 8)
+#define PHY_BB_CHAN_INFO_GAIN_B0_CHAN_INFO_XATTEN1_SW_0_MSB 16
+#define PHY_BB_CHAN_INFO_GAIN_B0_CHAN_INFO_XATTEN1_SW_0_LSB 16
+#define PHY_BB_CHAN_INFO_GAIN_B0_CHAN_INFO_XATTEN1_SW_0_MASK 0x00010000
+#define PHY_BB_CHAN_INFO_GAIN_B0_CHAN_INFO_XATTEN1_SW_0_GET(x) (((x) & 0x00010000) >> 16)
+#define PHY_BB_CHAN_INFO_GAIN_B0_CHAN_INFO_XATTEN2_SW_0_MSB 17
+#define PHY_BB_CHAN_INFO_GAIN_B0_CHAN_INFO_XATTEN2_SW_0_LSB 17
+#define PHY_BB_CHAN_INFO_GAIN_B0_CHAN_INFO_XATTEN2_SW_0_MASK 0x00020000
+#define PHY_BB_CHAN_INFO_GAIN_B0_CHAN_INFO_XATTEN2_SW_0_GET(x) (((x) & 0x00020000) >> 17)
+
+/* macros for BB_chan_info_chan_tab_b0 */
+#define PHY_BB_CHAN_INFO_CHAN_TAB_B0_ADDRESS 0x00009cbc
+#define PHY_BB_CHAN_INFO_CHAN_TAB_B0_OFFSET 0x00009cbc
+#define PHY_BB_CHAN_INFO_CHAN_TAB_B0_MAN_Q_0_MSB 5
+#define PHY_BB_CHAN_INFO_CHAN_TAB_B0_MAN_Q_0_LSB 0
+#define PHY_BB_CHAN_INFO_CHAN_TAB_B0_MAN_Q_0_MASK 0x0000003f
+#define PHY_BB_CHAN_INFO_CHAN_TAB_B0_MAN_Q_0_GET(x) (((x) & 0x0000003f) >> 0)
+#define PHY_BB_CHAN_INFO_CHAN_TAB_B0_MAN_I_0_MSB 11
+#define PHY_BB_CHAN_INFO_CHAN_TAB_B0_MAN_I_0_LSB 6
+#define PHY_BB_CHAN_INFO_CHAN_TAB_B0_MAN_I_0_MASK 0x00000fc0
+#define PHY_BB_CHAN_INFO_CHAN_TAB_B0_MAN_I_0_GET(x) (((x) & 0x00000fc0) >> 6)
+#define PHY_BB_CHAN_INFO_CHAN_TAB_B0_EXP_0_MSB 15
+#define PHY_BB_CHAN_INFO_CHAN_TAB_B0_EXP_0_LSB 12
+#define PHY_BB_CHAN_INFO_CHAN_TAB_B0_EXP_0_MASK 0x0000f000
+#define PHY_BB_CHAN_INFO_CHAN_TAB_B0_EXP_0_GET(x) (((x) & 0x0000f000) >> 12)
+#define PHY_BB_CHAN_INFO_CHAN_TAB_B0_MAN_Q_1_MSB 21
+#define PHY_BB_CHAN_INFO_CHAN_TAB_B0_MAN_Q_1_LSB 16
+#define PHY_BB_CHAN_INFO_CHAN_TAB_B0_MAN_Q_1_MASK 0x003f0000
+#define PHY_BB_CHAN_INFO_CHAN_TAB_B0_MAN_Q_1_GET(x) (((x) & 0x003f0000) >> 16)
+#define PHY_BB_CHAN_INFO_CHAN_TAB_B0_MAN_I_1_MSB 27
+#define PHY_BB_CHAN_INFO_CHAN_TAB_B0_MAN_I_1_LSB 22
+#define PHY_BB_CHAN_INFO_CHAN_TAB_B0_MAN_I_1_MASK 0x0fc00000
+#define PHY_BB_CHAN_INFO_CHAN_TAB_B0_MAN_I_1_GET(x) (((x) & 0x0fc00000) >> 22)
+#define PHY_BB_CHAN_INFO_CHAN_TAB_B0_EXP_1_MSB 31
+#define PHY_BB_CHAN_INFO_CHAN_TAB_B0_EXP_1_LSB 28
+#define PHY_BB_CHAN_INFO_CHAN_TAB_B0_EXP_1_MASK 0xf0000000
+#define PHY_BB_CHAN_INFO_CHAN_TAB_B0_EXP_1_GET(x) (((x) & 0xf0000000) >> 28)
+
+/* macros for BB_paprd_am2am_mask */
+#define PHY_BB_PAPRD_AM2AM_MASK_ADDRESS 0x00009de4
+#define PHY_BB_PAPRD_AM2AM_MASK_OFFSET 0x00009de4
+#define PHY_BB_PAPRD_AM2AM_MASK_PAPRD_AM2AM_MASK_MSB 24
+#define PHY_BB_PAPRD_AM2AM_MASK_PAPRD_AM2AM_MASK_LSB 0
+#define PHY_BB_PAPRD_AM2AM_MASK_PAPRD_AM2AM_MASK_MASK 0x01ffffff
+#define PHY_BB_PAPRD_AM2AM_MASK_PAPRD_AM2AM_MASK_GET(x) (((x) & 0x01ffffff) >> 0)
+#define PHY_BB_PAPRD_AM2AM_MASK_PAPRD_AM2AM_MASK_SET(x) (((x) << 0) & 0x01ffffff)
+
+/* macros for BB_paprd_am2pm_mask */
+#define PHY_BB_PAPRD_AM2PM_MASK_ADDRESS 0x00009de8
+#define PHY_BB_PAPRD_AM2PM_MASK_OFFSET 0x00009de8
+#define PHY_BB_PAPRD_AM2PM_MASK_PAPRD_AM2PM_MASK_MSB 24
+#define PHY_BB_PAPRD_AM2PM_MASK_PAPRD_AM2PM_MASK_LSB 0
+#define PHY_BB_PAPRD_AM2PM_MASK_PAPRD_AM2PM_MASK_MASK 0x01ffffff
+#define PHY_BB_PAPRD_AM2PM_MASK_PAPRD_AM2PM_MASK_GET(x) (((x) & 0x01ffffff) >> 0)
+#define PHY_BB_PAPRD_AM2PM_MASK_PAPRD_AM2PM_MASK_SET(x) (((x) << 0) & 0x01ffffff)
+
+/* macros for BB_paprd_ht40_mask */
+#define PHY_BB_PAPRD_HT40_MASK_ADDRESS 0x00009dec
+#define PHY_BB_PAPRD_HT40_MASK_OFFSET 0x00009dec
+#define PHY_BB_PAPRD_HT40_MASK_PAPRD_HT40_MASK_MSB 24
+#define PHY_BB_PAPRD_HT40_MASK_PAPRD_HT40_MASK_LSB 0
+#define PHY_BB_PAPRD_HT40_MASK_PAPRD_HT40_MASK_MASK 0x01ffffff
+#define PHY_BB_PAPRD_HT40_MASK_PAPRD_HT40_MASK_GET(x) (((x) & 0x01ffffff) >> 0)
+#define PHY_BB_PAPRD_HT40_MASK_PAPRD_HT40_MASK_SET(x) (((x) << 0) & 0x01ffffff)
+
+/* macros for BB_paprd_ctrl0 */
+#define PHY_BB_PAPRD_CTRL0_ADDRESS 0x00009df0
+#define PHY_BB_PAPRD_CTRL0_OFFSET 0x00009df0
+#define PHY_BB_PAPRD_CTRL0_PAPRD_ENABLE_MSB 0
+#define PHY_BB_PAPRD_CTRL0_PAPRD_ENABLE_LSB 0
+#define PHY_BB_PAPRD_CTRL0_PAPRD_ENABLE_MASK 0x00000001
+#define PHY_BB_PAPRD_CTRL0_PAPRD_ENABLE_GET(x) (((x) & 0x00000001) >> 0)
+#define PHY_BB_PAPRD_CTRL0_PAPRD_ENABLE_SET(x) (((x) << 0) & 0x00000001)
+#define PHY_BB_PAPRD_CTRL0_PAPRD_ADAPTIVE_USE_SINGLE_TABLE_MSB 1
+#define PHY_BB_PAPRD_CTRL0_PAPRD_ADAPTIVE_USE_SINGLE_TABLE_LSB 1
+#define PHY_BB_PAPRD_CTRL0_PAPRD_ADAPTIVE_USE_SINGLE_TABLE_MASK 0x00000002
+#define PHY_BB_PAPRD_CTRL0_PAPRD_ADAPTIVE_USE_SINGLE_TABLE_GET(x) (((x) & 0x00000002) >> 1)
+#define PHY_BB_PAPRD_CTRL0_PAPRD_ADAPTIVE_USE_SINGLE_TABLE_SET(x) (((x) << 1) & 0x00000002)
+#define PHY_BB_PAPRD_CTRL0_PAPRD_VALID_GAIN_MSB 26
+#define PHY_BB_PAPRD_CTRL0_PAPRD_VALID_GAIN_LSB 2
+#define PHY_BB_PAPRD_CTRL0_PAPRD_VALID_GAIN_MASK 0x07fffffc
+#define PHY_BB_PAPRD_CTRL0_PAPRD_VALID_GAIN_GET(x) (((x) & 0x07fffffc) >> 2)
+#define PHY_BB_PAPRD_CTRL0_PAPRD_VALID_GAIN_SET(x) (((x) << 2) & 0x07fffffc)
+#define PHY_BB_PAPRD_CTRL0_PAPRD_MAG_THRSH_MSB 31
+#define PHY_BB_PAPRD_CTRL0_PAPRD_MAG_THRSH_LSB 27
+#define PHY_BB_PAPRD_CTRL0_PAPRD_MAG_THRSH_MASK 0xf8000000
+#define PHY_BB_PAPRD_CTRL0_PAPRD_MAG_THRSH_GET(x) (((x) & 0xf8000000) >> 27)
+#define PHY_BB_PAPRD_CTRL0_PAPRD_MAG_THRSH_SET(x) (((x) << 27) & 0xf8000000)
+
+/* macros for BB_paprd_ctrl1 */
+#define PHY_BB_PAPRD_CTRL1_ADDRESS 0x00009df4
+#define PHY_BB_PAPRD_CTRL1_OFFSET 0x00009df4
+#define PHY_BB_PAPRD_CTRL1_PAPRD_ADAPTIVE_SCALING_ENABLE_MSB 0
+#define PHY_BB_PAPRD_CTRL1_PAPRD_ADAPTIVE_SCALING_ENABLE_LSB 0
+#define PHY_BB_PAPRD_CTRL1_PAPRD_ADAPTIVE_SCALING_ENABLE_MASK 0x00000001
+#define PHY_BB_PAPRD_CTRL1_PAPRD_ADAPTIVE_SCALING_ENABLE_GET(x) (((x) & 0x00000001) >> 0)
+#define PHY_BB_PAPRD_CTRL1_PAPRD_ADAPTIVE_SCALING_ENABLE_SET(x) (((x) << 0) & 0x00000001)
+#define PHY_BB_PAPRD_CTRL1_PAPRD_ADAPTIVE_AM2AM_ENABLE_MSB 1
+#define PHY_BB_PAPRD_CTRL1_PAPRD_ADAPTIVE_AM2AM_ENABLE_LSB 1
+#define PHY_BB_PAPRD_CTRL1_PAPRD_ADAPTIVE_AM2AM_ENABLE_MASK 0x00000002
+#define PHY_BB_PAPRD_CTRL1_PAPRD_ADAPTIVE_AM2AM_ENABLE_GET(x) (((x) & 0x00000002) >> 1)
+#define PHY_BB_PAPRD_CTRL1_PAPRD_ADAPTIVE_AM2AM_ENABLE_SET(x) (((x) << 1) & 0x00000002)
+#define PHY_BB_PAPRD_CTRL1_PAPRD_ADAPTIVE_AM2PM_ENABLE_MSB 2
+#define PHY_BB_PAPRD_CTRL1_PAPRD_ADAPTIVE_AM2PM_ENABLE_LSB 2
+#define PHY_BB_PAPRD_CTRL1_PAPRD_ADAPTIVE_AM2PM_ENABLE_MASK 0x00000004
+#define PHY_BB_PAPRD_CTRL1_PAPRD_ADAPTIVE_AM2PM_ENABLE_GET(x) (((x) & 0x00000004) >> 2)
+#define PHY_BB_PAPRD_CTRL1_PAPRD_ADAPTIVE_AM2PM_ENABLE_SET(x) (((x) << 2) & 0x00000004)
+#define PHY_BB_PAPRD_CTRL1_PAPRD_POWER_AT_AM2AM_CAL_MSB 8
+#define PHY_BB_PAPRD_CTRL1_PAPRD_POWER_AT_AM2AM_CAL_LSB 3
+#define PHY_BB_PAPRD_CTRL1_PAPRD_POWER_AT_AM2AM_CAL_MASK 0x000001f8
+#define PHY_BB_PAPRD_CTRL1_PAPRD_POWER_AT_AM2AM_CAL_GET(x) (((x) & 0x000001f8) >> 3)
+#define PHY_BB_PAPRD_CTRL1_PAPRD_POWER_AT_AM2AM_CAL_SET(x) (((x) << 3) & 0x000001f8)
+#define PHY_BB_PAPRD_CTRL1_PA_GAIN_SCALE_FACTOR_MSB 16
+#define PHY_BB_PAPRD_CTRL1_PA_GAIN_SCALE_FACTOR_LSB 9
+#define PHY_BB_PAPRD_CTRL1_PA_GAIN_SCALE_FACTOR_MASK 0x0001fe00
+#define PHY_BB_PAPRD_CTRL1_PA_GAIN_SCALE_FACTOR_GET(x) (((x) & 0x0001fe00) >> 9)
+#define PHY_BB_PAPRD_CTRL1_PA_GAIN_SCALE_FACTOR_SET(x) (((x) << 9) & 0x0001fe00)
+#define PHY_BB_PAPRD_CTRL1_PAPRD_MAG_SCALE_FACTOR_MSB 26
+#define PHY_BB_PAPRD_CTRL1_PAPRD_MAG_SCALE_FACTOR_LSB 17
+#define PHY_BB_PAPRD_CTRL1_PAPRD_MAG_SCALE_FACTOR_MASK 0x07fe0000
+#define PHY_BB_PAPRD_CTRL1_PAPRD_MAG_SCALE_FACTOR_GET(x) (((x) & 0x07fe0000) >> 17)
+#define PHY_BB_PAPRD_CTRL1_PAPRD_MAG_SCALE_FACTOR_SET(x) (((x) << 17) & 0x07fe0000)
+#define PHY_BB_PAPRD_CTRL1_PAPRD_TRAINER_IANDQ_SEL_MSB 27
+#define PHY_BB_PAPRD_CTRL1_PAPRD_TRAINER_IANDQ_SEL_LSB 27
+#define PHY_BB_PAPRD_CTRL1_PAPRD_TRAINER_IANDQ_SEL_MASK 0x08000000
+#define PHY_BB_PAPRD_CTRL1_PAPRD_TRAINER_IANDQ_SEL_GET(x) (((x) & 0x08000000) >> 27)
+#define PHY_BB_PAPRD_CTRL1_PAPRD_TRAINER_IANDQ_SEL_SET(x) (((x) << 27) & 0x08000000)
+
+/* macros for BB_pa_gain123 */
+#define PHY_BB_PA_GAIN123_ADDRESS 0x00009df8
+#define PHY_BB_PA_GAIN123_OFFSET 0x00009df8
+#define PHY_BB_PA_GAIN123_PA_GAIN1_MSB 9
+#define PHY_BB_PA_GAIN123_PA_GAIN1_LSB 0
+#define PHY_BB_PA_GAIN123_PA_GAIN1_MASK 0x000003ff
+#define PHY_BB_PA_GAIN123_PA_GAIN1_GET(x) (((x) & 0x000003ff) >> 0)
+#define PHY_BB_PA_GAIN123_PA_GAIN1_SET(x) (((x) << 0) & 0x000003ff)
+#define PHY_BB_PA_GAIN123_PA_GAIN2_MSB 19
+#define PHY_BB_PA_GAIN123_PA_GAIN2_LSB 10
+#define PHY_BB_PA_GAIN123_PA_GAIN2_MASK 0x000ffc00
+#define PHY_BB_PA_GAIN123_PA_GAIN2_GET(x) (((x) & 0x000ffc00) >> 10)
+#define PHY_BB_PA_GAIN123_PA_GAIN2_SET(x) (((x) << 10) & 0x000ffc00)
+#define PHY_BB_PA_GAIN123_PA_GAIN3_MSB 29
+#define PHY_BB_PA_GAIN123_PA_GAIN3_LSB 20
+#define PHY_BB_PA_GAIN123_PA_GAIN3_MASK 0x3ff00000
+#define PHY_BB_PA_GAIN123_PA_GAIN3_GET(x) (((x) & 0x3ff00000) >> 20)
+#define PHY_BB_PA_GAIN123_PA_GAIN3_SET(x) (((x) << 20) & 0x3ff00000)
+
+/* macros for BB_pa_gain45 */
+#define PHY_BB_PA_GAIN45_ADDRESS 0x00009dfc
+#define PHY_BB_PA_GAIN45_OFFSET 0x00009dfc
+#define PHY_BB_PA_GAIN45_PA_GAIN4_MSB 9
+#define PHY_BB_PA_GAIN45_PA_GAIN4_LSB 0
+#define PHY_BB_PA_GAIN45_PA_GAIN4_MASK 0x000003ff
+#define PHY_BB_PA_GAIN45_PA_GAIN4_GET(x) (((x) & 0x000003ff) >> 0)
+#define PHY_BB_PA_GAIN45_PA_GAIN4_SET(x) (((x) << 0) & 0x000003ff)
+#define PHY_BB_PA_GAIN45_PA_GAIN5_MSB 19
+#define PHY_BB_PA_GAIN45_PA_GAIN5_LSB 10
+#define PHY_BB_PA_GAIN45_PA_GAIN5_MASK 0x000ffc00
+#define PHY_BB_PA_GAIN45_PA_GAIN5_GET(x) (((x) & 0x000ffc00) >> 10)
+#define PHY_BB_PA_GAIN45_PA_GAIN5_SET(x) (((x) << 10) & 0x000ffc00)
+#define PHY_BB_PA_GAIN45_PAPRD_ADAPTIVE_TABLE_VALID_MSB 24
+#define PHY_BB_PA_GAIN45_PAPRD_ADAPTIVE_TABLE_VALID_LSB 20
+#define PHY_BB_PA_GAIN45_PAPRD_ADAPTIVE_TABLE_VALID_MASK 0x01f00000
+#define PHY_BB_PA_GAIN45_PAPRD_ADAPTIVE_TABLE_VALID_GET(x) (((x) & 0x01f00000) >> 20)
+#define PHY_BB_PA_GAIN45_PAPRD_ADAPTIVE_TABLE_VALID_SET(x) (((x) << 20) & 0x01f00000)
+
+/* macros for BB_paprd_pre_post_scale_0 */
+#define PHY_BB_PAPRD_PRE_POST_SCALE_0_ADDRESS 0x00009e00
+#define PHY_BB_PAPRD_PRE_POST_SCALE_0_OFFSET 0x00009e00
+#define PHY_BB_PAPRD_PRE_POST_SCALE_0_PAPRD_PRE_POST_SCALING_0_MSB 17
+#define PHY_BB_PAPRD_PRE_POST_SCALE_0_PAPRD_PRE_POST_SCALING_0_LSB 0
+#define PHY_BB_PAPRD_PRE_POST_SCALE_0_PAPRD_PRE_POST_SCALING_0_MASK 0x0003ffff
+#define PHY_BB_PAPRD_PRE_POST_SCALE_0_PAPRD_PRE_POST_SCALING_0_GET(x) (((x) & 0x0003ffff) >> 0)
+#define PHY_BB_PAPRD_PRE_POST_SCALE_0_PAPRD_PRE_POST_SCALING_0_SET(x) (((x) << 0) & 0x0003ffff)
+
+/* macros for BB_paprd_pre_post_scale_1 */
+#define PHY_BB_PAPRD_PRE_POST_SCALE_1_ADDRESS 0x00009e04
+#define PHY_BB_PAPRD_PRE_POST_SCALE_1_OFFSET 0x00009e04
+#define PHY_BB_PAPRD_PRE_POST_SCALE_1_PAPRD_PRE_POST_SCALING_1_MSB 17
+#define PHY_BB_PAPRD_PRE_POST_SCALE_1_PAPRD_PRE_POST_SCALING_1_LSB 0
+#define PHY_BB_PAPRD_PRE_POST_SCALE_1_PAPRD_PRE_POST_SCALING_1_MASK 0x0003ffff
+#define PHY_BB_PAPRD_PRE_POST_SCALE_1_PAPRD_PRE_POST_SCALING_1_GET(x) (((x) & 0x0003ffff) >> 0)
+#define PHY_BB_PAPRD_PRE_POST_SCALE_1_PAPRD_PRE_POST_SCALING_1_SET(x) (((x) << 0) & 0x0003ffff)
+
+/* macros for BB_paprd_pre_post_scale_2 */
+#define PHY_BB_PAPRD_PRE_POST_SCALE_2_ADDRESS 0x00009e08
+#define PHY_BB_PAPRD_PRE_POST_SCALE_2_OFFSET 0x00009e08
+#define PHY_BB_PAPRD_PRE_POST_SCALE_2_PAPRD_PRE_POST_SCALING_2_MSB 17
+#define PHY_BB_PAPRD_PRE_POST_SCALE_2_PAPRD_PRE_POST_SCALING_2_LSB 0
+#define PHY_BB_PAPRD_PRE_POST_SCALE_2_PAPRD_PRE_POST_SCALING_2_MASK 0x0003ffff
+#define PHY_BB_PAPRD_PRE_POST_SCALE_2_PAPRD_PRE_POST_SCALING_2_GET(x) (((x) & 0x0003ffff) >> 0)
+#define PHY_BB_PAPRD_PRE_POST_SCALE_2_PAPRD_PRE_POST_SCALING_2_SET(x) (((x) << 0) & 0x0003ffff)
+
+/* macros for BB_paprd_pre_post_scale_3 */
+#define PHY_BB_PAPRD_PRE_POST_SCALE_3_ADDRESS 0x00009e0c
+#define PHY_BB_PAPRD_PRE_POST_SCALE_3_OFFSET 0x00009e0c
+#define PHY_BB_PAPRD_PRE_POST_SCALE_3_PAPRD_PRE_POST_SCALING_3_MSB 17
+#define PHY_BB_PAPRD_PRE_POST_SCALE_3_PAPRD_PRE_POST_SCALING_3_LSB 0
+#define PHY_BB_PAPRD_PRE_POST_SCALE_3_PAPRD_PRE_POST_SCALING_3_MASK 0x0003ffff
+#define PHY_BB_PAPRD_PRE_POST_SCALE_3_PAPRD_PRE_POST_SCALING_3_GET(x) (((x) & 0x0003ffff) >> 0)
+#define PHY_BB_PAPRD_PRE_POST_SCALE_3_PAPRD_PRE_POST_SCALING_3_SET(x) (((x) << 0) & 0x0003ffff)
+
+/* macros for BB_paprd_pre_post_scale_4 */
+#define PHY_BB_PAPRD_PRE_POST_SCALE_4_ADDRESS 0x00009e10
+#define PHY_BB_PAPRD_PRE_POST_SCALE_4_OFFSET 0x00009e10
+#define PHY_BB_PAPRD_PRE_POST_SCALE_4_PAPRD_PRE_POST_SCALING_4_MSB 17
+#define PHY_BB_PAPRD_PRE_POST_SCALE_4_PAPRD_PRE_POST_SCALING_4_LSB 0
+#define PHY_BB_PAPRD_PRE_POST_SCALE_4_PAPRD_PRE_POST_SCALING_4_MASK 0x0003ffff
+#define PHY_BB_PAPRD_PRE_POST_SCALE_4_PAPRD_PRE_POST_SCALING_4_GET(x) (((x) & 0x0003ffff) >> 0)
+#define PHY_BB_PAPRD_PRE_POST_SCALE_4_PAPRD_PRE_POST_SCALING_4_SET(x) (((x) << 0) & 0x0003ffff)
+
+/* macros for BB_paprd_pre_post_scale_5 */
+#define PHY_BB_PAPRD_PRE_POST_SCALE_5_ADDRESS 0x00009e14
+#define PHY_BB_PAPRD_PRE_POST_SCALE_5_OFFSET 0x00009e14
+#define PHY_BB_PAPRD_PRE_POST_SCALE_5_PAPRD_PRE_POST_SCALING_5_MSB 17
+#define PHY_BB_PAPRD_PRE_POST_SCALE_5_PAPRD_PRE_POST_SCALING_5_LSB 0
+#define PHY_BB_PAPRD_PRE_POST_SCALE_5_PAPRD_PRE_POST_SCALING_5_MASK 0x0003ffff
+#define PHY_BB_PAPRD_PRE_POST_SCALE_5_PAPRD_PRE_POST_SCALING_5_GET(x) (((x) & 0x0003ffff) >> 0)
+#define PHY_BB_PAPRD_PRE_POST_SCALE_5_PAPRD_PRE_POST_SCALING_5_SET(x) (((x) << 0) & 0x0003ffff)
+
+/* macros for BB_paprd_pre_post_scale_6 */
+#define PHY_BB_PAPRD_PRE_POST_SCALE_6_ADDRESS 0x00009e18
+#define PHY_BB_PAPRD_PRE_POST_SCALE_6_OFFSET 0x00009e18
+#define PHY_BB_PAPRD_PRE_POST_SCALE_6_PAPRD_PRE_POST_SCALING_6_MSB 17
+#define PHY_BB_PAPRD_PRE_POST_SCALE_6_PAPRD_PRE_POST_SCALING_6_LSB 0
+#define PHY_BB_PAPRD_PRE_POST_SCALE_6_PAPRD_PRE_POST_SCALING_6_MASK 0x0003ffff
+#define PHY_BB_PAPRD_PRE_POST_SCALE_6_PAPRD_PRE_POST_SCALING_6_GET(x) (((x) & 0x0003ffff) >> 0)
+#define PHY_BB_PAPRD_PRE_POST_SCALE_6_PAPRD_PRE_POST_SCALING_6_SET(x) (((x) << 0) & 0x0003ffff)
+
+/* macros for BB_paprd_pre_post_scale_7 */
+#define PHY_BB_PAPRD_PRE_POST_SCALE_7_ADDRESS 0x00009e1c
+#define PHY_BB_PAPRD_PRE_POST_SCALE_7_OFFSET 0x00009e1c
+#define PHY_BB_PAPRD_PRE_POST_SCALE_7_PAPRD_PRE_POST_SCALING_7_MSB 17
+#define PHY_BB_PAPRD_PRE_POST_SCALE_7_PAPRD_PRE_POST_SCALING_7_LSB 0
+#define PHY_BB_PAPRD_PRE_POST_SCALE_7_PAPRD_PRE_POST_SCALING_7_MASK 0x0003ffff
+#define PHY_BB_PAPRD_PRE_POST_SCALE_7_PAPRD_PRE_POST_SCALING_7_GET(x) (((x) & 0x0003ffff) >> 0)
+#define PHY_BB_PAPRD_PRE_POST_SCALE_7_PAPRD_PRE_POST_SCALING_7_SET(x) (((x) << 0) & 0x0003ffff)
+
+/* macros for BB_paprd_mem_tab */
+#define PHY_BB_PAPRD_MEM_TAB_ADDRESS 0x00009e20
+#define PHY_BB_PAPRD_MEM_TAB_OFFSET 0x00009e20
+#define PHY_BB_PAPRD_MEM_TAB_PAPRD_MEM_MSB 21
+#define PHY_BB_PAPRD_MEM_TAB_PAPRD_MEM_LSB 0
+#define PHY_BB_PAPRD_MEM_TAB_PAPRD_MEM_MASK 0x003fffff
+#define PHY_BB_PAPRD_MEM_TAB_PAPRD_MEM_GET(x) (((x) & 0x003fffff) >> 0)
+#define PHY_BB_PAPRD_MEM_TAB_PAPRD_MEM_SET(x) (((x) << 0) & 0x003fffff)
+
+/* macros for BB_peak_det_ctrl_1 */
+#define PHY_BB_PEAK_DET_CTRL_1_ADDRESS 0x0000a000
+#define PHY_BB_PEAK_DET_CTRL_1_OFFSET 0x0000a000
+#define PHY_BB_PEAK_DET_CTRL_1_USE_OC_GAIN_TABLE_MSB 0
+#define PHY_BB_PEAK_DET_CTRL_1_USE_OC_GAIN_TABLE_LSB 0
+#define PHY_BB_PEAK_DET_CTRL_1_USE_OC_GAIN_TABLE_MASK 0x00000001
+#define PHY_BB_PEAK_DET_CTRL_1_USE_OC_GAIN_TABLE_GET(x) (((x) & 0x00000001) >> 0)
+#define PHY_BB_PEAK_DET_CTRL_1_USE_OC_GAIN_TABLE_SET(x) (((x) << 0) & 0x00000001)
+#define PHY_BB_PEAK_DET_CTRL_1_USE_PEAK_DET_MSB 1
+#define PHY_BB_PEAK_DET_CTRL_1_USE_PEAK_DET_LSB 1
+#define PHY_BB_PEAK_DET_CTRL_1_USE_PEAK_DET_MASK 0x00000002
+#define PHY_BB_PEAK_DET_CTRL_1_USE_PEAK_DET_GET(x) (((x) & 0x00000002) >> 1)
+#define PHY_BB_PEAK_DET_CTRL_1_USE_PEAK_DET_SET(x) (((x) << 1) & 0x00000002)
+#define PHY_BB_PEAK_DET_CTRL_1_PEAK_DET_WIN_LEN_MSB 7
+#define PHY_BB_PEAK_DET_CTRL_1_PEAK_DET_WIN_LEN_LSB 2
+#define PHY_BB_PEAK_DET_CTRL_1_PEAK_DET_WIN_LEN_MASK 0x000000fc
+#define PHY_BB_PEAK_DET_CTRL_1_PEAK_DET_WIN_LEN_GET(x) (((x) & 0x000000fc) >> 2)
+#define PHY_BB_PEAK_DET_CTRL_1_PEAK_DET_WIN_LEN_SET(x) (((x) << 2) & 0x000000fc)
+#define PHY_BB_PEAK_DET_CTRL_1_PEAK_DET_TALLY_THR_LOW_MSB 12
+#define PHY_BB_PEAK_DET_CTRL_1_PEAK_DET_TALLY_THR_LOW_LSB 8
+#define PHY_BB_PEAK_DET_CTRL_1_PEAK_DET_TALLY_THR_LOW_MASK 0x00001f00
+#define PHY_BB_PEAK_DET_CTRL_1_PEAK_DET_TALLY_THR_LOW_GET(x) (((x) & 0x00001f00) >> 8)
+#define PHY_BB_PEAK_DET_CTRL_1_PEAK_DET_TALLY_THR_LOW_SET(x) (((x) << 8) & 0x00001f00)
+#define PHY_BB_PEAK_DET_CTRL_1_PEAK_DET_TALLY_THR_MED_MSB 17
+#define PHY_BB_PEAK_DET_CTRL_1_PEAK_DET_TALLY_THR_MED_LSB 13
+#define PHY_BB_PEAK_DET_CTRL_1_PEAK_DET_TALLY_THR_MED_MASK 0x0003e000
+#define PHY_BB_PEAK_DET_CTRL_1_PEAK_DET_TALLY_THR_MED_GET(x) (((x) & 0x0003e000) >> 13)
+#define PHY_BB_PEAK_DET_CTRL_1_PEAK_DET_TALLY_THR_MED_SET(x) (((x) << 13) & 0x0003e000)
+#define PHY_BB_PEAK_DET_CTRL_1_PEAK_DET_TALLY_THR_HIGH_MSB 22
+#define PHY_BB_PEAK_DET_CTRL_1_PEAK_DET_TALLY_THR_HIGH_LSB 18
+#define PHY_BB_PEAK_DET_CTRL_1_PEAK_DET_TALLY_THR_HIGH_MASK 0x007c0000
+#define PHY_BB_PEAK_DET_CTRL_1_PEAK_DET_TALLY_THR_HIGH_GET(x) (((x) & 0x007c0000) >> 18)
+#define PHY_BB_PEAK_DET_CTRL_1_PEAK_DET_TALLY_THR_HIGH_SET(x) (((x) << 18) & 0x007c0000)
+#define PHY_BB_PEAK_DET_CTRL_1_PEAK_DET_SETTLING_MSB 29
+#define PHY_BB_PEAK_DET_CTRL_1_PEAK_DET_SETTLING_LSB 23
+#define PHY_BB_PEAK_DET_CTRL_1_PEAK_DET_SETTLING_MASK 0x3f800000
+#define PHY_BB_PEAK_DET_CTRL_1_PEAK_DET_SETTLING_GET(x) (((x) & 0x3f800000) >> 23)
+#define PHY_BB_PEAK_DET_CTRL_1_PEAK_DET_SETTLING_SET(x) (((x) << 23) & 0x3f800000)
+#define PHY_BB_PEAK_DET_CTRL_1_PWD_PKDET_DURING_CAL_MSB 30
+#define PHY_BB_PEAK_DET_CTRL_1_PWD_PKDET_DURING_CAL_LSB 30
+#define PHY_BB_PEAK_DET_CTRL_1_PWD_PKDET_DURING_CAL_MASK 0x40000000
+#define PHY_BB_PEAK_DET_CTRL_1_PWD_PKDET_DURING_CAL_GET(x) (((x) & 0x40000000) >> 30)
+#define PHY_BB_PEAK_DET_CTRL_1_PWD_PKDET_DURING_CAL_SET(x) (((x) << 30) & 0x40000000)
+#define PHY_BB_PEAK_DET_CTRL_1_PWD_PKDET_DURING_RX_MSB 31
+#define PHY_BB_PEAK_DET_CTRL_1_PWD_PKDET_DURING_RX_LSB 31
+#define PHY_BB_PEAK_DET_CTRL_1_PWD_PKDET_DURING_RX_MASK 0x80000000
+#define PHY_BB_PEAK_DET_CTRL_1_PWD_PKDET_DURING_RX_GET(x) (((x) & 0x80000000) >> 31)
+#define PHY_BB_PEAK_DET_CTRL_1_PWD_PKDET_DURING_RX_SET(x) (((x) << 31) & 0x80000000)
+
+/* macros for BB_peak_det_ctrl_2 */
+#define PHY_BB_PEAK_DET_CTRL_2_ADDRESS 0x0000a004
+#define PHY_BB_PEAK_DET_CTRL_2_OFFSET 0x0000a004
+#define PHY_BB_PEAK_DET_CTRL_2_RFSAT_2_ADD_RFGAIN_DEL_MSB 9
+#define PHY_BB_PEAK_DET_CTRL_2_RFSAT_2_ADD_RFGAIN_DEL_LSB 0
+#define PHY_BB_PEAK_DET_CTRL_2_RFSAT_2_ADD_RFGAIN_DEL_MASK 0x000003ff
+#define PHY_BB_PEAK_DET_CTRL_2_RFSAT_2_ADD_RFGAIN_DEL_GET(x) (((x) & 0x000003ff) >> 0)
+#define PHY_BB_PEAK_DET_CTRL_2_RFSAT_2_ADD_RFGAIN_DEL_SET(x) (((x) << 0) & 0x000003ff)
+#define PHY_BB_PEAK_DET_CTRL_2_RF_GAIN_DROP_DB_LOW_MSB 14
+#define PHY_BB_PEAK_DET_CTRL_2_RF_GAIN_DROP_DB_LOW_LSB 10
+#define PHY_BB_PEAK_DET_CTRL_2_RF_GAIN_DROP_DB_LOW_MASK 0x00007c00
+#define PHY_BB_PEAK_DET_CTRL_2_RF_GAIN_DROP_DB_LOW_GET(x) (((x) & 0x00007c00) >> 10)
+#define PHY_BB_PEAK_DET_CTRL_2_RF_GAIN_DROP_DB_LOW_SET(x) (((x) << 10) & 0x00007c00)
+#define PHY_BB_PEAK_DET_CTRL_2_RF_GAIN_DROP_DB_MED_MSB 19
+#define PHY_BB_PEAK_DET_CTRL_2_RF_GAIN_DROP_DB_MED_LSB 15
+#define PHY_BB_PEAK_DET_CTRL_2_RF_GAIN_DROP_DB_MED_MASK 0x000f8000
+#define PHY_BB_PEAK_DET_CTRL_2_RF_GAIN_DROP_DB_MED_GET(x) (((x) & 0x000f8000) >> 15)
+#define PHY_BB_PEAK_DET_CTRL_2_RF_GAIN_DROP_DB_MED_SET(x) (((x) << 15) & 0x000f8000)
+#define PHY_BB_PEAK_DET_CTRL_2_RF_GAIN_DROP_DB_HIGH_MSB 24
+#define PHY_BB_PEAK_DET_CTRL_2_RF_GAIN_DROP_DB_HIGH_LSB 20
+#define PHY_BB_PEAK_DET_CTRL_2_RF_GAIN_DROP_DB_HIGH_MASK 0x01f00000
+#define PHY_BB_PEAK_DET_CTRL_2_RF_GAIN_DROP_DB_HIGH_GET(x) (((x) & 0x01f00000) >> 20)
+#define PHY_BB_PEAK_DET_CTRL_2_RF_GAIN_DROP_DB_HIGH_SET(x) (((x) << 20) & 0x01f00000)
+#define PHY_BB_PEAK_DET_CTRL_2_RF_GAIN_DROP_DB_NON_MSB 29
+#define PHY_BB_PEAK_DET_CTRL_2_RF_GAIN_DROP_DB_NON_LSB 25
+#define PHY_BB_PEAK_DET_CTRL_2_RF_GAIN_DROP_DB_NON_MASK 0x3e000000
+#define PHY_BB_PEAK_DET_CTRL_2_RF_GAIN_DROP_DB_NON_GET(x) (((x) & 0x3e000000) >> 25)
+#define PHY_BB_PEAK_DET_CTRL_2_RF_GAIN_DROP_DB_NON_SET(x) (((x) << 25) & 0x3e000000)
+
+/* macros for BB_rx_gain_bounds_1 */
+#define PHY_BB_RX_GAIN_BOUNDS_1_ADDRESS 0x0000a008
+#define PHY_BB_RX_GAIN_BOUNDS_1_OFFSET 0x0000a008
+#define PHY_BB_RX_GAIN_BOUNDS_1_RX_MAX_MB_GAIN_MSB 7
+#define PHY_BB_RX_GAIN_BOUNDS_1_RX_MAX_MB_GAIN_LSB 0
+#define PHY_BB_RX_GAIN_BOUNDS_1_RX_MAX_MB_GAIN_MASK 0x000000ff
+#define PHY_BB_RX_GAIN_BOUNDS_1_RX_MAX_MB_GAIN_GET(x) (((x) & 0x000000ff) >> 0)
+#define PHY_BB_RX_GAIN_BOUNDS_1_RX_MAX_MB_GAIN_SET(x) (((x) << 0) & 0x000000ff)
+#define PHY_BB_RX_GAIN_BOUNDS_1_RX_MAX_RF_GAIN_REF_MSB 15
+#define PHY_BB_RX_GAIN_BOUNDS_1_RX_MAX_RF_GAIN_REF_LSB 8
+#define PHY_BB_RX_GAIN_BOUNDS_1_RX_MAX_RF_GAIN_REF_MASK 0x0000ff00
+#define PHY_BB_RX_GAIN_BOUNDS_1_RX_MAX_RF_GAIN_REF_GET(x) (((x) & 0x0000ff00) >> 8)
+#define PHY_BB_RX_GAIN_BOUNDS_1_RX_MAX_RF_GAIN_REF_SET(x) (((x) << 8) & 0x0000ff00)
+#define PHY_BB_RX_GAIN_BOUNDS_1_RX_MAX_RF_GAIN_MSB 23
+#define PHY_BB_RX_GAIN_BOUNDS_1_RX_MAX_RF_GAIN_LSB 16
+#define PHY_BB_RX_GAIN_BOUNDS_1_RX_MAX_RF_GAIN_MASK 0x00ff0000
+#define PHY_BB_RX_GAIN_BOUNDS_1_RX_MAX_RF_GAIN_GET(x) (((x) & 0x00ff0000) >> 16)
+#define PHY_BB_RX_GAIN_BOUNDS_1_RX_MAX_RF_GAIN_SET(x) (((x) << 16) & 0x00ff0000)
+#define PHY_BB_RX_GAIN_BOUNDS_1_RX_OCGAIN_SEL_2G_MSB 24
+#define PHY_BB_RX_GAIN_BOUNDS_1_RX_OCGAIN_SEL_2G_LSB 24
+#define PHY_BB_RX_GAIN_BOUNDS_1_RX_OCGAIN_SEL_2G_MASK 0x01000000
+#define PHY_BB_RX_GAIN_BOUNDS_1_RX_OCGAIN_SEL_2G_GET(x) (((x) & 0x01000000) >> 24)
+#define PHY_BB_RX_GAIN_BOUNDS_1_RX_OCGAIN_SEL_2G_SET(x) (((x) << 24) & 0x01000000)
+#define PHY_BB_RX_GAIN_BOUNDS_1_RX_OCGAIN_SEL_5G_MSB 25
+#define PHY_BB_RX_GAIN_BOUNDS_1_RX_OCGAIN_SEL_5G_LSB 25
+#define PHY_BB_RX_GAIN_BOUNDS_1_RX_OCGAIN_SEL_5G_MASK 0x02000000
+#define PHY_BB_RX_GAIN_BOUNDS_1_RX_OCGAIN_SEL_5G_GET(x) (((x) & 0x02000000) >> 25)
+#define PHY_BB_RX_GAIN_BOUNDS_1_RX_OCGAIN_SEL_5G_SET(x) (((x) << 25) & 0x02000000)
+
+/* macros for BB_rx_gain_bounds_2 */
+#define PHY_BB_RX_GAIN_BOUNDS_2_ADDRESS 0x0000a00c
+#define PHY_BB_RX_GAIN_BOUNDS_2_OFFSET 0x0000a00c
+#define PHY_BB_RX_GAIN_BOUNDS_2_GC_RSSI_LOW_DB_MSB 7
+#define PHY_BB_RX_GAIN_BOUNDS_2_GC_RSSI_LOW_DB_LSB 0
+#define PHY_BB_RX_GAIN_BOUNDS_2_GC_RSSI_LOW_DB_MASK 0x000000ff
+#define PHY_BB_RX_GAIN_BOUNDS_2_GC_RSSI_LOW_DB_GET(x) (((x) & 0x000000ff) >> 0)
+#define PHY_BB_RX_GAIN_BOUNDS_2_GC_RSSI_LOW_DB_SET(x) (((x) << 0) & 0x000000ff)
+#define PHY_BB_RX_GAIN_BOUNDS_2_RF_GAIN_REF_BASE_ADDR_MSB 15
+#define PHY_BB_RX_GAIN_BOUNDS_2_RF_GAIN_REF_BASE_ADDR_LSB 8
+#define PHY_BB_RX_GAIN_BOUNDS_2_RF_GAIN_REF_BASE_ADDR_MASK 0x0000ff00
+#define PHY_BB_RX_GAIN_BOUNDS_2_RF_GAIN_REF_BASE_ADDR_GET(x) (((x) & 0x0000ff00) >> 8)
+#define PHY_BB_RX_GAIN_BOUNDS_2_RF_GAIN_REF_BASE_ADDR_SET(x) (((x) << 8) & 0x0000ff00)
+#define PHY_BB_RX_GAIN_BOUNDS_2_RF_GAIN_BASE_ADDR_MSB 23
+#define PHY_BB_RX_GAIN_BOUNDS_2_RF_GAIN_BASE_ADDR_LSB 16
+#define PHY_BB_RX_GAIN_BOUNDS_2_RF_GAIN_BASE_ADDR_MASK 0x00ff0000
+#define PHY_BB_RX_GAIN_BOUNDS_2_RF_GAIN_BASE_ADDR_GET(x) (((x) & 0x00ff0000) >> 16)
+#define PHY_BB_RX_GAIN_BOUNDS_2_RF_GAIN_BASE_ADDR_SET(x) (((x) << 16) & 0x00ff0000)
+#define PHY_BB_RX_GAIN_BOUNDS_2_RF_GAIN_DIV_BASE_ADDR_MSB 31
+#define PHY_BB_RX_GAIN_BOUNDS_2_RF_GAIN_DIV_BASE_ADDR_LSB 24
+#define PHY_BB_RX_GAIN_BOUNDS_2_RF_GAIN_DIV_BASE_ADDR_MASK 0xff000000
+#define PHY_BB_RX_GAIN_BOUNDS_2_RF_GAIN_DIV_BASE_ADDR_GET(x) (((x) & 0xff000000) >> 24)
+#define PHY_BB_RX_GAIN_BOUNDS_2_RF_GAIN_DIV_BASE_ADDR_SET(x) (((x) << 24) & 0xff000000)
+
+/* macros for BB_peak_det_cal_ctrl */
+#define PHY_BB_PEAK_DET_CAL_CTRL_ADDRESS 0x0000a010
+#define PHY_BB_PEAK_DET_CAL_CTRL_OFFSET 0x0000a010
+#define PHY_BB_PEAK_DET_CAL_CTRL_PKDET_CAL_WIN_THR_MSB 5
+#define PHY_BB_PEAK_DET_CAL_CTRL_PKDET_CAL_WIN_THR_LSB 0
+#define PHY_BB_PEAK_DET_CAL_CTRL_PKDET_CAL_WIN_THR_MASK 0x0000003f
+#define PHY_BB_PEAK_DET_CAL_CTRL_PKDET_CAL_WIN_THR_GET(x) (((x) & 0x0000003f) >> 0)
+#define PHY_BB_PEAK_DET_CAL_CTRL_PKDET_CAL_WIN_THR_SET(x) (((x) << 0) & 0x0000003f)
+#define PHY_BB_PEAK_DET_CAL_CTRL_PKDET_CAL_BIAS_MSB 11
+#define PHY_BB_PEAK_DET_CAL_CTRL_PKDET_CAL_BIAS_LSB 6
+#define PHY_BB_PEAK_DET_CAL_CTRL_PKDET_CAL_BIAS_MASK 0x00000fc0
+#define PHY_BB_PEAK_DET_CAL_CTRL_PKDET_CAL_BIAS_GET(x) (((x) & 0x00000fc0) >> 6)
+#define PHY_BB_PEAK_DET_CAL_CTRL_PKDET_CAL_BIAS_SET(x) (((x) << 6) & 0x00000fc0)
+#define PHY_BB_PEAK_DET_CAL_CTRL_PKDET_CAL_MEAS_TIME_SEL_MSB 13
+#define PHY_BB_PEAK_DET_CAL_CTRL_PKDET_CAL_MEAS_TIME_SEL_LSB 12
+#define PHY_BB_PEAK_DET_CAL_CTRL_PKDET_CAL_MEAS_TIME_SEL_MASK 0x00003000
+#define PHY_BB_PEAK_DET_CAL_CTRL_PKDET_CAL_MEAS_TIME_SEL_GET(x) (((x) & 0x00003000) >> 12)
+#define PHY_BB_PEAK_DET_CAL_CTRL_PKDET_CAL_MEAS_TIME_SEL_SET(x) (((x) << 12) & 0x00003000)
+
+/* macros for BB_agc_dig_dc_ctrl */
+#define PHY_BB_AGC_DIG_DC_CTRL_ADDRESS 0x0000a014
+#define PHY_BB_AGC_DIG_DC_CTRL_OFFSET 0x0000a014
+#define PHY_BB_AGC_DIG_DC_CTRL_USE_DIG_DC_MSB 0
+#define PHY_BB_AGC_DIG_DC_CTRL_USE_DIG_DC_LSB 0
+#define PHY_BB_AGC_DIG_DC_CTRL_USE_DIG_DC_MASK 0x00000001
+#define PHY_BB_AGC_DIG_DC_CTRL_USE_DIG_DC_GET(x) (((x) & 0x00000001) >> 0)
+#define PHY_BB_AGC_DIG_DC_CTRL_USE_DIG_DC_SET(x) (((x) << 0) & 0x00000001)
+#define PHY_BB_AGC_DIG_DC_CTRL_DIG_DC_SCALE_BIAS_MSB 3
+#define PHY_BB_AGC_DIG_DC_CTRL_DIG_DC_SCALE_BIAS_LSB 1
+#define PHY_BB_AGC_DIG_DC_CTRL_DIG_DC_SCALE_BIAS_MASK 0x0000000e
+#define PHY_BB_AGC_DIG_DC_CTRL_DIG_DC_SCALE_BIAS_GET(x) (((x) & 0x0000000e) >> 1)
+#define PHY_BB_AGC_DIG_DC_CTRL_DIG_DC_SCALE_BIAS_SET(x) (((x) << 1) & 0x0000000e)
+#define PHY_BB_AGC_DIG_DC_CTRL_DIG_DC_CORRECT_CAP_MSB 9
+#define PHY_BB_AGC_DIG_DC_CTRL_DIG_DC_CORRECT_CAP_LSB 4
+#define PHY_BB_AGC_DIG_DC_CTRL_DIG_DC_CORRECT_CAP_MASK 0x000003f0
+#define PHY_BB_AGC_DIG_DC_CTRL_DIG_DC_CORRECT_CAP_GET(x) (((x) & 0x000003f0) >> 4)
+#define PHY_BB_AGC_DIG_DC_CTRL_DIG_DC_CORRECT_CAP_SET(x) (((x) << 4) & 0x000003f0)
+#define PHY_BB_AGC_DIG_DC_CTRL_DIG_DC_MIXER_SEL_MASK_MSB 31
+#define PHY_BB_AGC_DIG_DC_CTRL_DIG_DC_MIXER_SEL_MASK_LSB 16
+#define PHY_BB_AGC_DIG_DC_CTRL_DIG_DC_MIXER_SEL_MASK_MASK 0xffff0000
+#define PHY_BB_AGC_DIG_DC_CTRL_DIG_DC_MIXER_SEL_MASK_GET(x) (((x) & 0xffff0000) >> 16)
+#define PHY_BB_AGC_DIG_DC_CTRL_DIG_DC_MIXER_SEL_MASK_SET(x) (((x) << 16) & 0xffff0000)
+
+/* macros for BB_agc_dig_dc_status_i_b0 */
+#define PHY_BB_AGC_DIG_DC_STATUS_I_B0_ADDRESS 0x0000a018
+#define PHY_BB_AGC_DIG_DC_STATUS_I_B0_OFFSET 0x0000a018
+#define PHY_BB_AGC_DIG_DC_STATUS_I_B0_DIG_DC_C1_RES_I_0_MSB 8
+#define PHY_BB_AGC_DIG_DC_STATUS_I_B0_DIG_DC_C1_RES_I_0_LSB 0
+#define PHY_BB_AGC_DIG_DC_STATUS_I_B0_DIG_DC_C1_RES_I_0_MASK 0x000001ff
+#define PHY_BB_AGC_DIG_DC_STATUS_I_B0_DIG_DC_C1_RES_I_0_GET(x) (((x) & 0x000001ff) >> 0)
+#define PHY_BB_AGC_DIG_DC_STATUS_I_B0_DIG_DC_C2_RES_I_0_MSB 17
+#define PHY_BB_AGC_DIG_DC_STATUS_I_B0_DIG_DC_C2_RES_I_0_LSB 9
+#define PHY_BB_AGC_DIG_DC_STATUS_I_B0_DIG_DC_C2_RES_I_0_MASK 0x0003fe00
+#define PHY_BB_AGC_DIG_DC_STATUS_I_B0_DIG_DC_C2_RES_I_0_GET(x) (((x) & 0x0003fe00) >> 9)
+#define PHY_BB_AGC_DIG_DC_STATUS_I_B0_DIG_DC_C3_RES_I_0_MSB 26
+#define PHY_BB_AGC_DIG_DC_STATUS_I_B0_DIG_DC_C3_RES_I_0_LSB 18
+#define PHY_BB_AGC_DIG_DC_STATUS_I_B0_DIG_DC_C3_RES_I_0_MASK 0x07fc0000
+#define PHY_BB_AGC_DIG_DC_STATUS_I_B0_DIG_DC_C3_RES_I_0_GET(x) (((x) & 0x07fc0000) >> 18)
+
+/* macros for BB_agc_dig_dc_status_q_b0 */
+#define PHY_BB_AGC_DIG_DC_STATUS_Q_B0_ADDRESS 0x0000a01c
+#define PHY_BB_AGC_DIG_DC_STATUS_Q_B0_OFFSET 0x0000a01c
+#define PHY_BB_AGC_DIG_DC_STATUS_Q_B0_DIG_DC_C1_RES_Q_0_MSB 8
+#define PHY_BB_AGC_DIG_DC_STATUS_Q_B0_DIG_DC_C1_RES_Q_0_LSB 0
+#define PHY_BB_AGC_DIG_DC_STATUS_Q_B0_DIG_DC_C1_RES_Q_0_MASK 0x000001ff
+#define PHY_BB_AGC_DIG_DC_STATUS_Q_B0_DIG_DC_C1_RES_Q_0_GET(x) (((x) & 0x000001ff) >> 0)
+#define PHY_BB_AGC_DIG_DC_STATUS_Q_B0_DIG_DC_C2_RES_Q_0_MSB 17
+#define PHY_BB_AGC_DIG_DC_STATUS_Q_B0_DIG_DC_C2_RES_Q_0_LSB 9
+#define PHY_BB_AGC_DIG_DC_STATUS_Q_B0_DIG_DC_C2_RES_Q_0_MASK 0x0003fe00
+#define PHY_BB_AGC_DIG_DC_STATUS_Q_B0_DIG_DC_C2_RES_Q_0_GET(x) (((x) & 0x0003fe00) >> 9)
+#define PHY_BB_AGC_DIG_DC_STATUS_Q_B0_DIG_DC_C3_RES_Q_0_MSB 26
+#define PHY_BB_AGC_DIG_DC_STATUS_Q_B0_DIG_DC_C3_RES_Q_0_LSB 18
+#define PHY_BB_AGC_DIG_DC_STATUS_Q_B0_DIG_DC_C3_RES_Q_0_MASK 0x07fc0000
+#define PHY_BB_AGC_DIG_DC_STATUS_Q_B0_DIG_DC_C3_RES_Q_0_GET(x) (((x) & 0x07fc0000) >> 18)
+
+/* macros for BB_bbb_txfir_0 */
+#define PHY_BB_BBB_TXFIR_0_ADDRESS 0x0000a1f4
+#define PHY_BB_BBB_TXFIR_0_OFFSET 0x0000a1f4
+#define PHY_BB_BBB_TXFIR_0_TXFIR_COEFF_H0_MSB 3
+#define PHY_BB_BBB_TXFIR_0_TXFIR_COEFF_H0_LSB 0
+#define PHY_BB_BBB_TXFIR_0_TXFIR_COEFF_H0_MASK 0x0000000f
+#define PHY_BB_BBB_TXFIR_0_TXFIR_COEFF_H0_GET(x) (((x) & 0x0000000f) >> 0)
+#define PHY_BB_BBB_TXFIR_0_TXFIR_COEFF_H0_SET(x) (((x) << 0) & 0x0000000f)
+#define PHY_BB_BBB_TXFIR_0_TXFIR_COEFF_H1_MSB 11
+#define PHY_BB_BBB_TXFIR_0_TXFIR_COEFF_H1_LSB 8
+#define PHY_BB_BBB_TXFIR_0_TXFIR_COEFF_H1_MASK 0x00000f00
+#define PHY_BB_BBB_TXFIR_0_TXFIR_COEFF_H1_GET(x) (((x) & 0x00000f00) >> 8)
+#define PHY_BB_BBB_TXFIR_0_TXFIR_COEFF_H1_SET(x) (((x) << 8) & 0x00000f00)
+#define PHY_BB_BBB_TXFIR_0_TXFIR_COEFF_H2_MSB 20
+#define PHY_BB_BBB_TXFIR_0_TXFIR_COEFF_H2_LSB 16
+#define PHY_BB_BBB_TXFIR_0_TXFIR_COEFF_H2_MASK 0x001f0000
+#define PHY_BB_BBB_TXFIR_0_TXFIR_COEFF_H2_GET(x) (((x) & 0x001f0000) >> 16)
+#define PHY_BB_BBB_TXFIR_0_TXFIR_COEFF_H2_SET(x) (((x) << 16) & 0x001f0000)
+#define PHY_BB_BBB_TXFIR_0_TXFIR_COEFF_H3_MSB 28
+#define PHY_BB_BBB_TXFIR_0_TXFIR_COEFF_H3_LSB 24
+#define PHY_BB_BBB_TXFIR_0_TXFIR_COEFF_H3_MASK 0x1f000000
+#define PHY_BB_BBB_TXFIR_0_TXFIR_COEFF_H3_GET(x) (((x) & 0x1f000000) >> 24)
+#define PHY_BB_BBB_TXFIR_0_TXFIR_COEFF_H3_SET(x) (((x) << 24) & 0x1f000000)
+
+/* macros for BB_bbb_txfir_1 */
+#define PHY_BB_BBB_TXFIR_1_ADDRESS 0x0000a1f8
+#define PHY_BB_BBB_TXFIR_1_OFFSET 0x0000a1f8
+#define PHY_BB_BBB_TXFIR_1_TXFIR_COEFF_H4_MSB 5
+#define PHY_BB_BBB_TXFIR_1_TXFIR_COEFF_H4_LSB 0
+#define PHY_BB_BBB_TXFIR_1_TXFIR_COEFF_H4_MASK 0x0000003f
+#define PHY_BB_BBB_TXFIR_1_TXFIR_COEFF_H4_GET(x) (((x) & 0x0000003f) >> 0)
+#define PHY_BB_BBB_TXFIR_1_TXFIR_COEFF_H4_SET(x) (((x) << 0) & 0x0000003f)
+#define PHY_BB_BBB_TXFIR_1_TXFIR_COEFF_H5_MSB 13
+#define PHY_BB_BBB_TXFIR_1_TXFIR_COEFF_H5_LSB 8
+#define PHY_BB_BBB_TXFIR_1_TXFIR_COEFF_H5_MASK 0x00003f00
+#define PHY_BB_BBB_TXFIR_1_TXFIR_COEFF_H5_GET(x) (((x) & 0x00003f00) >> 8)
+#define PHY_BB_BBB_TXFIR_1_TXFIR_COEFF_H5_SET(x) (((x) << 8) & 0x00003f00)
+#define PHY_BB_BBB_TXFIR_1_TXFIR_COEFF_H6_MSB 22
+#define PHY_BB_BBB_TXFIR_1_TXFIR_COEFF_H6_LSB 16
+#define PHY_BB_BBB_TXFIR_1_TXFIR_COEFF_H6_MASK 0x007f0000
+#define PHY_BB_BBB_TXFIR_1_TXFIR_COEFF_H6_GET(x) (((x) & 0x007f0000) >> 16)
+#define PHY_BB_BBB_TXFIR_1_TXFIR_COEFF_H6_SET(x) (((x) << 16) & 0x007f0000)
+#define PHY_BB_BBB_TXFIR_1_TXFIR_COEFF_H7_MSB 30
+#define PHY_BB_BBB_TXFIR_1_TXFIR_COEFF_H7_LSB 24
+#define PHY_BB_BBB_TXFIR_1_TXFIR_COEFF_H7_MASK 0x7f000000
+#define PHY_BB_BBB_TXFIR_1_TXFIR_COEFF_H7_GET(x) (((x) & 0x7f000000) >> 24)
+#define PHY_BB_BBB_TXFIR_1_TXFIR_COEFF_H7_SET(x) (((x) << 24) & 0x7f000000)
+
+/* macros for BB_bbb_txfir_2 */
+#define PHY_BB_BBB_TXFIR_2_ADDRESS 0x0000a1fc
+#define PHY_BB_BBB_TXFIR_2_OFFSET 0x0000a1fc
+#define PHY_BB_BBB_TXFIR_2_TXFIR_COEFF_H8_MSB 7
+#define PHY_BB_BBB_TXFIR_2_TXFIR_COEFF_H8_LSB 0
+#define PHY_BB_BBB_TXFIR_2_TXFIR_COEFF_H8_MASK 0x000000ff
+#define PHY_BB_BBB_TXFIR_2_TXFIR_COEFF_H8_GET(x) (((x) & 0x000000ff) >> 0)
+#define PHY_BB_BBB_TXFIR_2_TXFIR_COEFF_H8_SET(x) (((x) << 0) & 0x000000ff)
+#define PHY_BB_BBB_TXFIR_2_TXFIR_COEFF_H9_MSB 15
+#define PHY_BB_BBB_TXFIR_2_TXFIR_COEFF_H9_LSB 8
+#define PHY_BB_BBB_TXFIR_2_TXFIR_COEFF_H9_MASK 0x0000ff00
+#define PHY_BB_BBB_TXFIR_2_TXFIR_COEFF_H9_GET(x) (((x) & 0x0000ff00) >> 8)
+#define PHY_BB_BBB_TXFIR_2_TXFIR_COEFF_H9_SET(x) (((x) << 8) & 0x0000ff00)
+#define PHY_BB_BBB_TXFIR_2_TXFIR_COEFF_H10_MSB 23
+#define PHY_BB_BBB_TXFIR_2_TXFIR_COEFF_H10_LSB 16
+#define PHY_BB_BBB_TXFIR_2_TXFIR_COEFF_H10_MASK 0x00ff0000
+#define PHY_BB_BBB_TXFIR_2_TXFIR_COEFF_H10_GET(x) (((x) & 0x00ff0000) >> 16)
+#define PHY_BB_BBB_TXFIR_2_TXFIR_COEFF_H10_SET(x) (((x) << 16) & 0x00ff0000)
+#define PHY_BB_BBB_TXFIR_2_TXFIR_COEFF_H11_MSB 31
+#define PHY_BB_BBB_TXFIR_2_TXFIR_COEFF_H11_LSB 24
+#define PHY_BB_BBB_TXFIR_2_TXFIR_COEFF_H11_MASK 0xff000000
+#define PHY_BB_BBB_TXFIR_2_TXFIR_COEFF_H11_GET(x) (((x) & 0xff000000) >> 24)
+#define PHY_BB_BBB_TXFIR_2_TXFIR_COEFF_H11_SET(x) (((x) << 24) & 0xff000000)
+
+/* macros for BB_modes_select */
+#define PHY_BB_MODES_SELECT_ADDRESS 0x0000a200
+#define PHY_BB_MODES_SELECT_OFFSET 0x0000a200
+#define PHY_BB_MODES_SELECT_CCK_MODE_MSB 0
+#define PHY_BB_MODES_SELECT_CCK_MODE_LSB 0
+#define PHY_BB_MODES_SELECT_CCK_MODE_MASK 0x00000001
+#define PHY_BB_MODES_SELECT_CCK_MODE_GET(x) (((x) & 0x00000001) >> 0)
+#define PHY_BB_MODES_SELECT_CCK_MODE_SET(x) (((x) << 0) & 0x00000001)
+#define PHY_BB_MODES_SELECT_DYN_OFDM_CCK_MODE_MSB 2
+#define PHY_BB_MODES_SELECT_DYN_OFDM_CCK_MODE_LSB 2
+#define PHY_BB_MODES_SELECT_DYN_OFDM_CCK_MODE_MASK 0x00000004
+#define PHY_BB_MODES_SELECT_DYN_OFDM_CCK_MODE_GET(x) (((x) & 0x00000004) >> 2)
+#define PHY_BB_MODES_SELECT_DYN_OFDM_CCK_MODE_SET(x) (((x) << 2) & 0x00000004)
+#define PHY_BB_MODES_SELECT_HALF_RATE_MODE_MSB 5
+#define PHY_BB_MODES_SELECT_HALF_RATE_MODE_LSB 5
+#define PHY_BB_MODES_SELECT_HALF_RATE_MODE_MASK 0x00000020
+#define PHY_BB_MODES_SELECT_HALF_RATE_MODE_GET(x) (((x) & 0x00000020) >> 5)
+#define PHY_BB_MODES_SELECT_HALF_RATE_MODE_SET(x) (((x) << 5) & 0x00000020)
+#define PHY_BB_MODES_SELECT_QUARTER_RATE_MODE_MSB 6
+#define PHY_BB_MODES_SELECT_QUARTER_RATE_MODE_LSB 6
+#define PHY_BB_MODES_SELECT_QUARTER_RATE_MODE_MASK 0x00000040
+#define PHY_BB_MODES_SELECT_QUARTER_RATE_MODE_GET(x) (((x) & 0x00000040) >> 6)
+#define PHY_BB_MODES_SELECT_QUARTER_RATE_MODE_SET(x) (((x) << 6) & 0x00000040)
+#define PHY_BB_MODES_SELECT_MAC_CLK_MODE_MSB 7
+#define PHY_BB_MODES_SELECT_MAC_CLK_MODE_LSB 7
+#define PHY_BB_MODES_SELECT_MAC_CLK_MODE_MASK 0x00000080
+#define PHY_BB_MODES_SELECT_MAC_CLK_MODE_GET(x) (((x) & 0x00000080) >> 7)
+#define PHY_BB_MODES_SELECT_MAC_CLK_MODE_SET(x) (((x) << 7) & 0x00000080)
+#define PHY_BB_MODES_SELECT_DISABLE_DYN_CCK_DET_MSB 8
+#define PHY_BB_MODES_SELECT_DISABLE_DYN_CCK_DET_LSB 8
+#define PHY_BB_MODES_SELECT_DISABLE_DYN_CCK_DET_MASK 0x00000100
+#define PHY_BB_MODES_SELECT_DISABLE_DYN_CCK_DET_GET(x) (((x) & 0x00000100) >> 8)
+#define PHY_BB_MODES_SELECT_DISABLE_DYN_CCK_DET_SET(x) (((x) << 8) & 0x00000100)
+
+/* macros for BB_bbb_tx_ctrl */
+#define PHY_BB_BBB_TX_CTRL_ADDRESS 0x0000a204
+#define PHY_BB_BBB_TX_CTRL_OFFSET 0x0000a204
+#define PHY_BB_BBB_TX_CTRL_DISABLE_SCRAMBLER_MSB 0
+#define PHY_BB_BBB_TX_CTRL_DISABLE_SCRAMBLER_LSB 0
+#define PHY_BB_BBB_TX_CTRL_DISABLE_SCRAMBLER_MASK 0x00000001
+#define PHY_BB_BBB_TX_CTRL_DISABLE_SCRAMBLER_GET(x) (((x) & 0x00000001) >> 0)
+#define PHY_BB_BBB_TX_CTRL_DISABLE_SCRAMBLER_SET(x) (((x) << 0) & 0x00000001)
+#define PHY_BB_BBB_TX_CTRL_USE_SCRAMBLER_SEED_MSB 1
+#define PHY_BB_BBB_TX_CTRL_USE_SCRAMBLER_SEED_LSB 1
+#define PHY_BB_BBB_TX_CTRL_USE_SCRAMBLER_SEED_MASK 0x00000002
+#define PHY_BB_BBB_TX_CTRL_USE_SCRAMBLER_SEED_GET(x) (((x) & 0x00000002) >> 1)
+#define PHY_BB_BBB_TX_CTRL_USE_SCRAMBLER_SEED_SET(x) (((x) << 1) & 0x00000002)
+#define PHY_BB_BBB_TX_CTRL_TX_DAC_SCALE_CCK_MSB 3
+#define PHY_BB_BBB_TX_CTRL_TX_DAC_SCALE_CCK_LSB 2
+#define PHY_BB_BBB_TX_CTRL_TX_DAC_SCALE_CCK_MASK 0x0000000c
+#define PHY_BB_BBB_TX_CTRL_TX_DAC_SCALE_CCK_GET(x) (((x) & 0x0000000c) >> 2)
+#define PHY_BB_BBB_TX_CTRL_TX_DAC_SCALE_CCK_SET(x) (((x) << 2) & 0x0000000c)
+#define PHY_BB_BBB_TX_CTRL_TXFIR_JAPAN_CCK_MSB 4
+#define PHY_BB_BBB_TX_CTRL_TXFIR_JAPAN_CCK_LSB 4
+#define PHY_BB_BBB_TX_CTRL_TXFIR_JAPAN_CCK_MASK 0x00000010
+#define PHY_BB_BBB_TX_CTRL_TXFIR_JAPAN_CCK_GET(x) (((x) & 0x00000010) >> 4)
+#define PHY_BB_BBB_TX_CTRL_TXFIR_JAPAN_CCK_SET(x) (((x) << 4) & 0x00000010)
+#define PHY_BB_BBB_TX_CTRL_ALLOW_1MBPS_SHORT_MSB 5
+#define PHY_BB_BBB_TX_CTRL_ALLOW_1MBPS_SHORT_LSB 5
+#define PHY_BB_BBB_TX_CTRL_ALLOW_1MBPS_SHORT_MASK 0x00000020
+#define PHY_BB_BBB_TX_CTRL_ALLOW_1MBPS_SHORT_GET(x) (((x) & 0x00000020) >> 5)
+#define PHY_BB_BBB_TX_CTRL_ALLOW_1MBPS_SHORT_SET(x) (((x) << 5) & 0x00000020)
+#define PHY_BB_BBB_TX_CTRL_TX_CCK_DELAY_1_MSB 8
+#define PHY_BB_BBB_TX_CTRL_TX_CCK_DELAY_1_LSB 6
+#define PHY_BB_BBB_TX_CTRL_TX_CCK_DELAY_1_MASK 0x000001c0
+#define PHY_BB_BBB_TX_CTRL_TX_CCK_DELAY_1_GET(x) (((x) & 0x000001c0) >> 6)
+#define PHY_BB_BBB_TX_CTRL_TX_CCK_DELAY_1_SET(x) (((x) << 6) & 0x000001c0)
+#define PHY_BB_BBB_TX_CTRL_TX_CCK_DELAY_2_MSB 11
+#define PHY_BB_BBB_TX_CTRL_TX_CCK_DELAY_2_LSB 9
+#define PHY_BB_BBB_TX_CTRL_TX_CCK_DELAY_2_MASK 0x00000e00
+#define PHY_BB_BBB_TX_CTRL_TX_CCK_DELAY_2_GET(x) (((x) & 0x00000e00) >> 9)
+#define PHY_BB_BBB_TX_CTRL_TX_CCK_DELAY_2_SET(x) (((x) << 9) & 0x00000e00)
+
+/* macros for BB_bbb_sig_detect */
+#define PHY_BB_BBB_SIG_DETECT_ADDRESS 0x0000a208
+#define PHY_BB_BBB_SIG_DETECT_OFFSET 0x0000a208
+#define PHY_BB_BBB_SIG_DETECT_WEAK_SIG_THR_CCK_MSB 5
+#define PHY_BB_BBB_SIG_DETECT_WEAK_SIG_THR_CCK_LSB 0
+#define PHY_BB_BBB_SIG_DETECT_WEAK_SIG_THR_CCK_MASK 0x0000003f
+#define PHY_BB_BBB_SIG_DETECT_WEAK_SIG_THR_CCK_GET(x) (((x) & 0x0000003f) >> 0)
+#define PHY_BB_BBB_SIG_DETECT_WEAK_SIG_THR_CCK_SET(x) (((x) << 0) & 0x0000003f)
+#define PHY_BB_BBB_SIG_DETECT_ANT_SWITCH_TIME_MSB 12
+#define PHY_BB_BBB_SIG_DETECT_ANT_SWITCH_TIME_LSB 6
+#define PHY_BB_BBB_SIG_DETECT_ANT_SWITCH_TIME_MASK 0x00001fc0
+#define PHY_BB_BBB_SIG_DETECT_ANT_SWITCH_TIME_GET(x) (((x) & 0x00001fc0) >> 6)
+#define PHY_BB_BBB_SIG_DETECT_ANT_SWITCH_TIME_SET(x) (((x) << 6) & 0x00001fc0)
+#define PHY_BB_BBB_SIG_DETECT_ENABLE_ANT_FAST_DIV_MSB 13
+#define PHY_BB_BBB_SIG_DETECT_ENABLE_ANT_FAST_DIV_LSB 13
+#define PHY_BB_BBB_SIG_DETECT_ENABLE_ANT_FAST_DIV_MASK 0x00002000
+#define PHY_BB_BBB_SIG_DETECT_ENABLE_ANT_FAST_DIV_GET(x) (((x) & 0x00002000) >> 13)
+#define PHY_BB_BBB_SIG_DETECT_ENABLE_ANT_FAST_DIV_SET(x) (((x) << 13) & 0x00002000)
+#define PHY_BB_BBB_SIG_DETECT_LB_ALPHA_128_CCK_MSB 14
+#define PHY_BB_BBB_SIG_DETECT_LB_ALPHA_128_CCK_LSB 14
+#define PHY_BB_BBB_SIG_DETECT_LB_ALPHA_128_CCK_MASK 0x00004000
+#define PHY_BB_BBB_SIG_DETECT_LB_ALPHA_128_CCK_GET(x) (((x) & 0x00004000) >> 14)
+#define PHY_BB_BBB_SIG_DETECT_LB_ALPHA_128_CCK_SET(x) (((x) << 14) & 0x00004000)
+#define PHY_BB_BBB_SIG_DETECT_LB_RX_ENABLE_CCK_MSB 15
+#define PHY_BB_BBB_SIG_DETECT_LB_RX_ENABLE_CCK_LSB 15
+#define PHY_BB_BBB_SIG_DETECT_LB_RX_ENABLE_CCK_MASK 0x00008000
+#define PHY_BB_BBB_SIG_DETECT_LB_RX_ENABLE_CCK_GET(x) (((x) & 0x00008000) >> 15)
+#define PHY_BB_BBB_SIG_DETECT_LB_RX_ENABLE_CCK_SET(x) (((x) << 15) & 0x00008000)
+#define PHY_BB_BBB_SIG_DETECT_CYC32_COARSE_DC_EST_CCK_MSB 16
+#define PHY_BB_BBB_SIG_DETECT_CYC32_COARSE_DC_EST_CCK_LSB 16
+#define PHY_BB_BBB_SIG_DETECT_CYC32_COARSE_DC_EST_CCK_MASK 0x00010000
+#define PHY_BB_BBB_SIG_DETECT_CYC32_COARSE_DC_EST_CCK_GET(x) (((x) & 0x00010000) >> 16)
+#define PHY_BB_BBB_SIG_DETECT_CYC32_COARSE_DC_EST_CCK_SET(x) (((x) << 16) & 0x00010000)
+#define PHY_BB_BBB_SIG_DETECT_CYC64_COARSE_DC_EST_CCK_MSB 17
+#define PHY_BB_BBB_SIG_DETECT_CYC64_COARSE_DC_EST_CCK_LSB 17
+#define PHY_BB_BBB_SIG_DETECT_CYC64_COARSE_DC_EST_CCK_MASK 0x00020000
+#define PHY_BB_BBB_SIG_DETECT_CYC64_COARSE_DC_EST_CCK_GET(x) (((x) & 0x00020000) >> 17)
+#define PHY_BB_BBB_SIG_DETECT_CYC64_COARSE_DC_EST_CCK_SET(x) (((x) << 17) & 0x00020000)
+#define PHY_BB_BBB_SIG_DETECT_ENABLE_COARSE_DC_CCK_MSB 18
+#define PHY_BB_BBB_SIG_DETECT_ENABLE_COARSE_DC_CCK_LSB 18
+#define PHY_BB_BBB_SIG_DETECT_ENABLE_COARSE_DC_CCK_MASK 0x00040000
+#define PHY_BB_BBB_SIG_DETECT_ENABLE_COARSE_DC_CCK_GET(x) (((x) & 0x00040000) >> 18)
+#define PHY_BB_BBB_SIG_DETECT_ENABLE_COARSE_DC_CCK_SET(x) (((x) << 18) & 0x00040000)
+#define PHY_BB_BBB_SIG_DETECT_CYC256_FINE_DC_EST_CCK_MSB 19
+#define PHY_BB_BBB_SIG_DETECT_CYC256_FINE_DC_EST_CCK_LSB 19
+#define PHY_BB_BBB_SIG_DETECT_CYC256_FINE_DC_EST_CCK_MASK 0x00080000
+#define PHY_BB_BBB_SIG_DETECT_CYC256_FINE_DC_EST_CCK_GET(x) (((x) & 0x00080000) >> 19)
+#define PHY_BB_BBB_SIG_DETECT_CYC256_FINE_DC_EST_CCK_SET(x) (((x) << 19) & 0x00080000)
+#define PHY_BB_BBB_SIG_DETECT_ENABLE_FINE_DC_CCK_MSB 20
+#define PHY_BB_BBB_SIG_DETECT_ENABLE_FINE_DC_CCK_LSB 20
+#define PHY_BB_BBB_SIG_DETECT_ENABLE_FINE_DC_CCK_MASK 0x00100000
+#define PHY_BB_BBB_SIG_DETECT_ENABLE_FINE_DC_CCK_GET(x) (((x) & 0x00100000) >> 20)
+#define PHY_BB_BBB_SIG_DETECT_ENABLE_FINE_DC_CCK_SET(x) (((x) << 20) & 0x00100000)
+#define PHY_BB_BBB_SIG_DETECT_DELAY_START_SYNC_CCK_MSB 21
+#define PHY_BB_BBB_SIG_DETECT_DELAY_START_SYNC_CCK_LSB 21
+#define PHY_BB_BBB_SIG_DETECT_DELAY_START_SYNC_CCK_MASK 0x00200000
+#define PHY_BB_BBB_SIG_DETECT_DELAY_START_SYNC_CCK_GET(x) (((x) & 0x00200000) >> 21)
+#define PHY_BB_BBB_SIG_DETECT_DELAY_START_SYNC_CCK_SET(x) (((x) << 21) & 0x00200000)
+#define PHY_BB_BBB_SIG_DETECT_USE_DC_EST_DURING_SRCH_MSB 22
+#define PHY_BB_BBB_SIG_DETECT_USE_DC_EST_DURING_SRCH_LSB 22
+#define PHY_BB_BBB_SIG_DETECT_USE_DC_EST_DURING_SRCH_MASK 0x00400000
+#define PHY_BB_BBB_SIG_DETECT_USE_DC_EST_DURING_SRCH_GET(x) (((x) & 0x00400000) >> 22)
+#define PHY_BB_BBB_SIG_DETECT_USE_DC_EST_DURING_SRCH_SET(x) (((x) << 22) & 0x00400000)
+#define PHY_BB_BBB_SIG_DETECT_ENABLE_BARKER_TWO_PHASE_MSB 31
+#define PHY_BB_BBB_SIG_DETECT_ENABLE_BARKER_TWO_PHASE_LSB 31
+#define PHY_BB_BBB_SIG_DETECT_ENABLE_BARKER_TWO_PHASE_MASK 0x80000000
+#define PHY_BB_BBB_SIG_DETECT_ENABLE_BARKER_TWO_PHASE_GET(x) (((x) & 0x80000000) >> 31)
+#define PHY_BB_BBB_SIG_DETECT_ENABLE_BARKER_TWO_PHASE_SET(x) (((x) << 31) & 0x80000000)
+
+/* macros for BB_ext_atten_switch_ctl_b0 */
+#define PHY_BB_EXT_ATTEN_SWITCH_CTL_B0_ADDRESS 0x0000a20c
+#define PHY_BB_EXT_ATTEN_SWITCH_CTL_B0_OFFSET 0x0000a20c
+#define PHY_BB_EXT_ATTEN_SWITCH_CTL_B0_XATTEN1_DB_0_MSB 5
+#define PHY_BB_EXT_ATTEN_SWITCH_CTL_B0_XATTEN1_DB_0_LSB 0
+#define PHY_BB_EXT_ATTEN_SWITCH_CTL_B0_XATTEN1_DB_0_MASK 0x0000003f
+#define PHY_BB_EXT_ATTEN_SWITCH_CTL_B0_XATTEN1_DB_0_GET(x) (((x) & 0x0000003f) >> 0)
+#define PHY_BB_EXT_ATTEN_SWITCH_CTL_B0_XATTEN1_DB_0_SET(x) (((x) << 0) & 0x0000003f)
+#define PHY_BB_EXT_ATTEN_SWITCH_CTL_B0_XATTEN2_DB_0_MSB 11
+#define PHY_BB_EXT_ATTEN_SWITCH_CTL_B0_XATTEN2_DB_0_LSB 6
+#define PHY_BB_EXT_ATTEN_SWITCH_CTL_B0_XATTEN2_DB_0_MASK 0x00000fc0
+#define PHY_BB_EXT_ATTEN_SWITCH_CTL_B0_XATTEN2_DB_0_GET(x) (((x) & 0x00000fc0) >> 6)
+#define PHY_BB_EXT_ATTEN_SWITCH_CTL_B0_XATTEN2_DB_0_SET(x) (((x) << 6) & 0x00000fc0)
+#define PHY_BB_EXT_ATTEN_SWITCH_CTL_B0_XATTEN1_MARGIN_0_MSB 16
+#define PHY_BB_EXT_ATTEN_SWITCH_CTL_B0_XATTEN1_MARGIN_0_LSB 12
+#define PHY_BB_EXT_ATTEN_SWITCH_CTL_B0_XATTEN1_MARGIN_0_MASK 0x0001f000
+#define PHY_BB_EXT_ATTEN_SWITCH_CTL_B0_XATTEN1_MARGIN_0_GET(x) (((x) & 0x0001f000) >> 12)
+#define PHY_BB_EXT_ATTEN_SWITCH_CTL_B0_XATTEN1_MARGIN_0_SET(x) (((x) << 12) & 0x0001f000)
+#define PHY_BB_EXT_ATTEN_SWITCH_CTL_B0_XATTEN2_MARGIN_0_MSB 21
+#define PHY_BB_EXT_ATTEN_SWITCH_CTL_B0_XATTEN2_MARGIN_0_LSB 17
+#define PHY_BB_EXT_ATTEN_SWITCH_CTL_B0_XATTEN2_MARGIN_0_MASK 0x003e0000
+#define PHY_BB_EXT_ATTEN_SWITCH_CTL_B0_XATTEN2_MARGIN_0_GET(x) (((x) & 0x003e0000) >> 17)
+#define PHY_BB_EXT_ATTEN_SWITCH_CTL_B0_XATTEN2_MARGIN_0_SET(x) (((x) << 17) & 0x003e0000)
+
+/* macros for BB_bbb_rx_ctrl_1 */
+#define PHY_BB_BBB_RX_CTRL_1_ADDRESS 0x0000a210
+#define PHY_BB_BBB_RX_CTRL_1_OFFSET 0x0000a210
+#define PHY_BB_BBB_RX_CTRL_1_COARSE_TIM_THRESHOLD_2_MSB 2
+#define PHY_BB_BBB_RX_CTRL_1_COARSE_TIM_THRESHOLD_2_LSB 0
+#define PHY_BB_BBB_RX_CTRL_1_COARSE_TIM_THRESHOLD_2_MASK 0x00000007
+#define PHY_BB_BBB_RX_CTRL_1_COARSE_TIM_THRESHOLD_2_GET(x) (((x) & 0x00000007) >> 0)
+#define PHY_BB_BBB_RX_CTRL_1_COARSE_TIM_THRESHOLD_2_SET(x) (((x) << 0) & 0x00000007)
+#define PHY_BB_BBB_RX_CTRL_1_COARSE_TIM_THRESHOLD_MSB 7
+#define PHY_BB_BBB_RX_CTRL_1_COARSE_TIM_THRESHOLD_LSB 3
+#define PHY_BB_BBB_RX_CTRL_1_COARSE_TIM_THRESHOLD_MASK 0x000000f8
+#define PHY_BB_BBB_RX_CTRL_1_COARSE_TIM_THRESHOLD_GET(x) (((x) & 0x000000f8) >> 3)
+#define PHY_BB_BBB_RX_CTRL_1_COARSE_TIM_THRESHOLD_SET(x) (((x) << 3) & 0x000000f8)
+#define PHY_BB_BBB_RX_CTRL_1_COARSE_TIM_N_SYNC_MSB 10
+#define PHY_BB_BBB_RX_CTRL_1_COARSE_TIM_N_SYNC_LSB 8
+#define PHY_BB_BBB_RX_CTRL_1_COARSE_TIM_N_SYNC_MASK 0x00000700
+#define PHY_BB_BBB_RX_CTRL_1_COARSE_TIM_N_SYNC_GET(x) (((x) & 0x00000700) >> 8)
+#define PHY_BB_BBB_RX_CTRL_1_COARSE_TIM_N_SYNC_SET(x) (((x) << 8) & 0x00000700)
+#define PHY_BB_BBB_RX_CTRL_1_MAX_BAL_LONG_MSB 15
+#define PHY_BB_BBB_RX_CTRL_1_MAX_BAL_LONG_LSB 11
+#define PHY_BB_BBB_RX_CTRL_1_MAX_BAL_LONG_MASK 0x0000f800
+#define PHY_BB_BBB_RX_CTRL_1_MAX_BAL_LONG_GET(x) (((x) & 0x0000f800) >> 11)
+#define PHY_BB_BBB_RX_CTRL_1_MAX_BAL_LONG_SET(x) (((x) << 11) & 0x0000f800)
+#define PHY_BB_BBB_RX_CTRL_1_MAX_BAL_SHORT_MSB 20
+#define PHY_BB_BBB_RX_CTRL_1_MAX_BAL_SHORT_LSB 16
+#define PHY_BB_BBB_RX_CTRL_1_MAX_BAL_SHORT_MASK 0x001f0000
+#define PHY_BB_BBB_RX_CTRL_1_MAX_BAL_SHORT_GET(x) (((x) & 0x001f0000) >> 16)
+#define PHY_BB_BBB_RX_CTRL_1_MAX_BAL_SHORT_SET(x) (((x) << 16) & 0x001f0000)
+#define PHY_BB_BBB_RX_CTRL_1_RECON_LMS_STEP_MSB 23
+#define PHY_BB_BBB_RX_CTRL_1_RECON_LMS_STEP_LSB 21
+#define PHY_BB_BBB_RX_CTRL_1_RECON_LMS_STEP_MASK 0x00e00000
+#define PHY_BB_BBB_RX_CTRL_1_RECON_LMS_STEP_GET(x) (((x) & 0x00e00000) >> 21)
+#define PHY_BB_BBB_RX_CTRL_1_RECON_LMS_STEP_SET(x) (((x) << 21) & 0x00e00000)
+#define PHY_BB_BBB_RX_CTRL_1_SB_CHECK_WIN_MSB 30
+#define PHY_BB_BBB_RX_CTRL_1_SB_CHECK_WIN_LSB 24
+#define PHY_BB_BBB_RX_CTRL_1_SB_CHECK_WIN_MASK 0x7f000000
+#define PHY_BB_BBB_RX_CTRL_1_SB_CHECK_WIN_GET(x) (((x) & 0x7f000000) >> 24)
+#define PHY_BB_BBB_RX_CTRL_1_SB_CHECK_WIN_SET(x) (((x) << 24) & 0x7f000000)
+#define PHY_BB_BBB_RX_CTRL_1_EN_RX_ABORT_CCK_MSB 31
+#define PHY_BB_BBB_RX_CTRL_1_EN_RX_ABORT_CCK_LSB 31
+#define PHY_BB_BBB_RX_CTRL_1_EN_RX_ABORT_CCK_MASK 0x80000000
+#define PHY_BB_BBB_RX_CTRL_1_EN_RX_ABORT_CCK_GET(x) (((x) & 0x80000000) >> 31)
+#define PHY_BB_BBB_RX_CTRL_1_EN_RX_ABORT_CCK_SET(x) (((x) << 31) & 0x80000000)
+
+/* macros for BB_bbb_rx_ctrl_2 */
+#define PHY_BB_BBB_RX_CTRL_2_ADDRESS 0x0000a214
+#define PHY_BB_BBB_RX_CTRL_2_OFFSET 0x0000a214
+#define PHY_BB_BBB_RX_CTRL_2_FREQ_EST_N_AVG_LONG_MSB 5
+#define PHY_BB_BBB_RX_CTRL_2_FREQ_EST_N_AVG_LONG_LSB 0
+#define PHY_BB_BBB_RX_CTRL_2_FREQ_EST_N_AVG_LONG_MASK 0x0000003f
+#define PHY_BB_BBB_RX_CTRL_2_FREQ_EST_N_AVG_LONG_GET(x) (((x) & 0x0000003f) >> 0)
+#define PHY_BB_BBB_RX_CTRL_2_FREQ_EST_N_AVG_LONG_SET(x) (((x) << 0) & 0x0000003f)
+#define PHY_BB_BBB_RX_CTRL_2_CHAN_AVG_LONG_MSB 11
+#define PHY_BB_BBB_RX_CTRL_2_CHAN_AVG_LONG_LSB 6
+#define PHY_BB_BBB_RX_CTRL_2_CHAN_AVG_LONG_MASK 0x00000fc0
+#define PHY_BB_BBB_RX_CTRL_2_CHAN_AVG_LONG_GET(x) (((x) & 0x00000fc0) >> 6)
+#define PHY_BB_BBB_RX_CTRL_2_CHAN_AVG_LONG_SET(x) (((x) << 6) & 0x00000fc0)
+#define PHY_BB_BBB_RX_CTRL_2_COARSE_TIM_THRESHOLD_3_MSB 16
+#define PHY_BB_BBB_RX_CTRL_2_COARSE_TIM_THRESHOLD_3_LSB 12
+#define PHY_BB_BBB_RX_CTRL_2_COARSE_TIM_THRESHOLD_3_MASK 0x0001f000
+#define PHY_BB_BBB_RX_CTRL_2_COARSE_TIM_THRESHOLD_3_GET(x) (((x) & 0x0001f000) >> 12)
+#define PHY_BB_BBB_RX_CTRL_2_COARSE_TIM_THRESHOLD_3_SET(x) (((x) << 12) & 0x0001f000)
+#define PHY_BB_BBB_RX_CTRL_2_FREQ_TRACK_UPDATE_PERIOD_MSB 21
+#define PHY_BB_BBB_RX_CTRL_2_FREQ_TRACK_UPDATE_PERIOD_LSB 17
+#define PHY_BB_BBB_RX_CTRL_2_FREQ_TRACK_UPDATE_PERIOD_MASK 0x003e0000
+#define PHY_BB_BBB_RX_CTRL_2_FREQ_TRACK_UPDATE_PERIOD_GET(x) (((x) & 0x003e0000) >> 17)
+#define PHY_BB_BBB_RX_CTRL_2_FREQ_TRACK_UPDATE_PERIOD_SET(x) (((x) << 17) & 0x003e0000)
+#define PHY_BB_BBB_RX_CTRL_2_FREQ_EST_SCALING_PERIOD_MSB 25
+#define PHY_BB_BBB_RX_CTRL_2_FREQ_EST_SCALING_PERIOD_LSB 22
+#define PHY_BB_BBB_RX_CTRL_2_FREQ_EST_SCALING_PERIOD_MASK 0x03c00000
+#define PHY_BB_BBB_RX_CTRL_2_FREQ_EST_SCALING_PERIOD_GET(x) (((x) & 0x03c00000) >> 22)
+#define PHY_BB_BBB_RX_CTRL_2_FREQ_EST_SCALING_PERIOD_SET(x) (((x) << 22) & 0x03c00000)
+#define PHY_BB_BBB_RX_CTRL_2_LOOP_COEF_DPSK_C2_DATA_MSB 31
+#define PHY_BB_BBB_RX_CTRL_2_LOOP_COEF_DPSK_C2_DATA_LSB 26
+#define PHY_BB_BBB_RX_CTRL_2_LOOP_COEF_DPSK_C2_DATA_MASK 0xfc000000
+#define PHY_BB_BBB_RX_CTRL_2_LOOP_COEF_DPSK_C2_DATA_GET(x) (((x) & 0xfc000000) >> 26)
+#define PHY_BB_BBB_RX_CTRL_2_LOOP_COEF_DPSK_C2_DATA_SET(x) (((x) << 26) & 0xfc000000)
+
+/* macros for BB_bbb_rx_ctrl_3 */
+#define PHY_BB_BBB_RX_CTRL_3_ADDRESS 0x0000a218
+#define PHY_BB_BBB_RX_CTRL_3_OFFSET 0x0000a218
+#define PHY_BB_BBB_RX_CTRL_3_TIM_ADJUST_FREQ_DPSK_MSB 7
+#define PHY_BB_BBB_RX_CTRL_3_TIM_ADJUST_FREQ_DPSK_LSB 0
+#define PHY_BB_BBB_RX_CTRL_3_TIM_ADJUST_FREQ_DPSK_MASK 0x000000ff
+#define PHY_BB_BBB_RX_CTRL_3_TIM_ADJUST_FREQ_DPSK_GET(x) (((x) & 0x000000ff) >> 0)
+#define PHY_BB_BBB_RX_CTRL_3_TIM_ADJUST_FREQ_DPSK_SET(x) (((x) << 0) & 0x000000ff)
+#define PHY_BB_BBB_RX_CTRL_3_TIM_ADJUST_FREQ_CCK_MSB 15
+#define PHY_BB_BBB_RX_CTRL_3_TIM_ADJUST_FREQ_CCK_LSB 8
+#define PHY_BB_BBB_RX_CTRL_3_TIM_ADJUST_FREQ_CCK_MASK 0x0000ff00
+#define PHY_BB_BBB_RX_CTRL_3_TIM_ADJUST_FREQ_CCK_GET(x) (((x) & 0x0000ff00) >> 8)
+#define PHY_BB_BBB_RX_CTRL_3_TIM_ADJUST_FREQ_CCK_SET(x) (((x) << 8) & 0x0000ff00)
+#define PHY_BB_BBB_RX_CTRL_3_TIMER_N_SFD_MSB 23
+#define PHY_BB_BBB_RX_CTRL_3_TIMER_N_SFD_LSB 16
+#define PHY_BB_BBB_RX_CTRL_3_TIMER_N_SFD_MASK 0x00ff0000
+#define PHY_BB_BBB_RX_CTRL_3_TIMER_N_SFD_GET(x) (((x) & 0x00ff0000) >> 16)
+#define PHY_BB_BBB_RX_CTRL_3_TIMER_N_SFD_SET(x) (((x) << 16) & 0x00ff0000)
+
+/* macros for BB_bbb_rx_ctrl_4 */
+#define PHY_BB_BBB_RX_CTRL_4_ADDRESS 0x0000a21c
+#define PHY_BB_BBB_RX_CTRL_4_OFFSET 0x0000a21c
+#define PHY_BB_BBB_RX_CTRL_4_TIMER_N_SYNC_MSB 3
+#define PHY_BB_BBB_RX_CTRL_4_TIMER_N_SYNC_LSB 0
+#define PHY_BB_BBB_RX_CTRL_4_TIMER_N_SYNC_MASK 0x0000000f
+#define PHY_BB_BBB_RX_CTRL_4_TIMER_N_SYNC_GET(x) (((x) & 0x0000000f) >> 0)
+#define PHY_BB_BBB_RX_CTRL_4_TIMER_N_SYNC_SET(x) (((x) << 0) & 0x0000000f)
+#define PHY_BB_BBB_RX_CTRL_4_TIM_ADJUST_TIMER_EXP_MSB 15
+#define PHY_BB_BBB_RX_CTRL_4_TIM_ADJUST_TIMER_EXP_LSB 4
+#define PHY_BB_BBB_RX_CTRL_4_TIM_ADJUST_TIMER_EXP_MASK 0x0000fff0
+#define PHY_BB_BBB_RX_CTRL_4_TIM_ADJUST_TIMER_EXP_GET(x) (((x) & 0x0000fff0) >> 4)
+#define PHY_BB_BBB_RX_CTRL_4_TIM_ADJUST_TIMER_EXP_SET(x) (((x) << 4) & 0x0000fff0)
+#define PHY_BB_BBB_RX_CTRL_4_FORCE_UNLOCKED_CLOCKS_MSB 16
+#define PHY_BB_BBB_RX_CTRL_4_FORCE_UNLOCKED_CLOCKS_LSB 16
+#define PHY_BB_BBB_RX_CTRL_4_FORCE_UNLOCKED_CLOCKS_MASK 0x00010000
+#define PHY_BB_BBB_RX_CTRL_4_FORCE_UNLOCKED_CLOCKS_GET(x) (((x) & 0x00010000) >> 16)
+#define PHY_BB_BBB_RX_CTRL_4_FORCE_UNLOCKED_CLOCKS_SET(x) (((x) << 16) & 0x00010000)
+#define PHY_BB_BBB_RX_CTRL_4_DYNAMIC_PREAM_SEL_MSB 17
+#define PHY_BB_BBB_RX_CTRL_4_DYNAMIC_PREAM_SEL_LSB 17
+#define PHY_BB_BBB_RX_CTRL_4_DYNAMIC_PREAM_SEL_MASK 0x00020000
+#define PHY_BB_BBB_RX_CTRL_4_DYNAMIC_PREAM_SEL_GET(x) (((x) & 0x00020000) >> 17)
+#define PHY_BB_BBB_RX_CTRL_4_DYNAMIC_PREAM_SEL_SET(x) (((x) << 17) & 0x00020000)
+#define PHY_BB_BBB_RX_CTRL_4_SHORT_PREAMBLE_MSB 18
+#define PHY_BB_BBB_RX_CTRL_4_SHORT_PREAMBLE_LSB 18
+#define PHY_BB_BBB_RX_CTRL_4_SHORT_PREAMBLE_MASK 0x00040000
+#define PHY_BB_BBB_RX_CTRL_4_SHORT_PREAMBLE_GET(x) (((x) & 0x00040000) >> 18)
+#define PHY_BB_BBB_RX_CTRL_4_SHORT_PREAMBLE_SET(x) (((x) << 18) & 0x00040000)
+#define PHY_BB_BBB_RX_CTRL_4_FREQ_EST_N_AVG_SHORT_MSB 24
+#define PHY_BB_BBB_RX_CTRL_4_FREQ_EST_N_AVG_SHORT_LSB 19
+#define PHY_BB_BBB_RX_CTRL_4_FREQ_EST_N_AVG_SHORT_MASK 0x01f80000
+#define PHY_BB_BBB_RX_CTRL_4_FREQ_EST_N_AVG_SHORT_GET(x) (((x) & 0x01f80000) >> 19)
+#define PHY_BB_BBB_RX_CTRL_4_FREQ_EST_N_AVG_SHORT_SET(x) (((x) << 19) & 0x01f80000)
+#define PHY_BB_BBB_RX_CTRL_4_CHAN_AVG_SHORT_MSB 30
+#define PHY_BB_BBB_RX_CTRL_4_CHAN_AVG_SHORT_LSB 25
+#define PHY_BB_BBB_RX_CTRL_4_CHAN_AVG_SHORT_MASK 0x7e000000
+#define PHY_BB_BBB_RX_CTRL_4_CHAN_AVG_SHORT_GET(x) (((x) & 0x7e000000) >> 25)
+#define PHY_BB_BBB_RX_CTRL_4_CHAN_AVG_SHORT_SET(x) (((x) << 25) & 0x7e000000)
+
+/* macros for BB_bbb_rx_ctrl_5 */
+#define PHY_BB_BBB_RX_CTRL_5_ADDRESS 0x0000a220
+#define PHY_BB_BBB_RX_CTRL_5_OFFSET 0x0000a220
+#define PHY_BB_BBB_RX_CTRL_5_LOOP_COEF_DPSK_C1_DATA_MSB 4
+#define PHY_BB_BBB_RX_CTRL_5_LOOP_COEF_DPSK_C1_DATA_LSB 0
+#define PHY_BB_BBB_RX_CTRL_5_LOOP_COEF_DPSK_C1_DATA_MASK 0x0000001f
+#define PHY_BB_BBB_RX_CTRL_5_LOOP_COEF_DPSK_C1_DATA_GET(x) (((x) & 0x0000001f) >> 0)
+#define PHY_BB_BBB_RX_CTRL_5_LOOP_COEF_DPSK_C1_DATA_SET(x) (((x) << 0) & 0x0000001f)
+#define PHY_BB_BBB_RX_CTRL_5_LOOP_COEF_DPSK_C1_HEAD_MSB 9
+#define PHY_BB_BBB_RX_CTRL_5_LOOP_COEF_DPSK_C1_HEAD_LSB 5
+#define PHY_BB_BBB_RX_CTRL_5_LOOP_COEF_DPSK_C1_HEAD_MASK 0x000003e0
+#define PHY_BB_BBB_RX_CTRL_5_LOOP_COEF_DPSK_C1_HEAD_GET(x) (((x) & 0x000003e0) >> 5)
+#define PHY_BB_BBB_RX_CTRL_5_LOOP_COEF_DPSK_C1_HEAD_SET(x) (((x) << 5) & 0x000003e0)
+#define PHY_BB_BBB_RX_CTRL_5_LOOP_COEF_DPSK_C2_HEAD_MSB 15
+#define PHY_BB_BBB_RX_CTRL_5_LOOP_COEF_DPSK_C2_HEAD_LSB 10
+#define PHY_BB_BBB_RX_CTRL_5_LOOP_COEF_DPSK_C2_HEAD_MASK 0x0000fc00
+#define PHY_BB_BBB_RX_CTRL_5_LOOP_COEF_DPSK_C2_HEAD_GET(x) (((x) & 0x0000fc00) >> 10)
+#define PHY_BB_BBB_RX_CTRL_5_LOOP_COEF_DPSK_C2_HEAD_SET(x) (((x) << 10) & 0x0000fc00)
+#define PHY_BB_BBB_RX_CTRL_5_LOOP_COEF_CCK_C1_MSB 20
+#define PHY_BB_BBB_RX_CTRL_5_LOOP_COEF_CCK_C1_LSB 16
+#define PHY_BB_BBB_RX_CTRL_5_LOOP_COEF_CCK_C1_MASK 0x001f0000
+#define PHY_BB_BBB_RX_CTRL_5_LOOP_COEF_CCK_C1_GET(x) (((x) & 0x001f0000) >> 16)
+#define PHY_BB_BBB_RX_CTRL_5_LOOP_COEF_CCK_C1_SET(x) (((x) << 16) & 0x001f0000)
+#define PHY_BB_BBB_RX_CTRL_5_LOOP_COEF_CCK_C2_MSB 26
+#define PHY_BB_BBB_RX_CTRL_5_LOOP_COEF_CCK_C2_LSB 21
+#define PHY_BB_BBB_RX_CTRL_5_LOOP_COEF_CCK_C2_MASK 0x07e00000
+#define PHY_BB_BBB_RX_CTRL_5_LOOP_COEF_CCK_C2_GET(x) (((x) & 0x07e00000) >> 21)
+#define PHY_BB_BBB_RX_CTRL_5_LOOP_COEF_CCK_C2_SET(x) (((x) << 21) & 0x07e00000)
+
+/* macros for BB_bbb_rx_ctrl_6 */
+#define PHY_BB_BBB_RX_CTRL_6_ADDRESS 0x0000a224
+#define PHY_BB_BBB_RX_CTRL_6_OFFSET 0x0000a224
+#define PHY_BB_BBB_RX_CTRL_6_SYNC_START_DELAY_MSB 9
+#define PHY_BB_BBB_RX_CTRL_6_SYNC_START_DELAY_LSB 0
+#define PHY_BB_BBB_RX_CTRL_6_SYNC_START_DELAY_MASK 0x000003ff
+#define PHY_BB_BBB_RX_CTRL_6_SYNC_START_DELAY_GET(x) (((x) & 0x000003ff) >> 0)
+#define PHY_BB_BBB_RX_CTRL_6_SYNC_START_DELAY_SET(x) (((x) << 0) & 0x000003ff)
+#define PHY_BB_BBB_RX_CTRL_6_MAP_1S_TO_2S_MSB 10
+#define PHY_BB_BBB_RX_CTRL_6_MAP_1S_TO_2S_LSB 10
+#define PHY_BB_BBB_RX_CTRL_6_MAP_1S_TO_2S_MASK 0x00000400
+#define PHY_BB_BBB_RX_CTRL_6_MAP_1S_TO_2S_GET(x) (((x) & 0x00000400) >> 10)
+#define PHY_BB_BBB_RX_CTRL_6_MAP_1S_TO_2S_SET(x) (((x) << 10) & 0x00000400)
+#define PHY_BB_BBB_RX_CTRL_6_START_IIR_DELAY_MSB 20
+#define PHY_BB_BBB_RX_CTRL_6_START_IIR_DELAY_LSB 11
+#define PHY_BB_BBB_RX_CTRL_6_START_IIR_DELAY_MASK 0x001ff800
+#define PHY_BB_BBB_RX_CTRL_6_START_IIR_DELAY_GET(x) (((x) & 0x001ff800) >> 11)
+#define PHY_BB_BBB_RX_CTRL_6_START_IIR_DELAY_SET(x) (((x) << 11) & 0x001ff800)
+
+/* macros for BB_bbb_dagc_ctrl */
+#define PHY_BB_BBB_DAGC_CTRL_ADDRESS 0x0000a228
+#define PHY_BB_BBB_DAGC_CTRL_OFFSET 0x0000a228
+#define PHY_BB_BBB_DAGC_CTRL_ENABLE_DAGC_CCK_MSB 0
+#define PHY_BB_BBB_DAGC_CTRL_ENABLE_DAGC_CCK_LSB 0
+#define PHY_BB_BBB_DAGC_CTRL_ENABLE_DAGC_CCK_MASK 0x00000001
+#define PHY_BB_BBB_DAGC_CTRL_ENABLE_DAGC_CCK_GET(x) (((x) & 0x00000001) >> 0)
+#define PHY_BB_BBB_DAGC_CTRL_ENABLE_DAGC_CCK_SET(x) (((x) << 0) & 0x00000001)
+#define PHY_BB_BBB_DAGC_CTRL_DAGC_TARGET_PWR_CCK_MSB 8
+#define PHY_BB_BBB_DAGC_CTRL_DAGC_TARGET_PWR_CCK_LSB 1
+#define PHY_BB_BBB_DAGC_CTRL_DAGC_TARGET_PWR_CCK_MASK 0x000001fe
+#define PHY_BB_BBB_DAGC_CTRL_DAGC_TARGET_PWR_CCK_GET(x) (((x) & 0x000001fe) >> 1)
+#define PHY_BB_BBB_DAGC_CTRL_DAGC_TARGET_PWR_CCK_SET(x) (((x) << 1) & 0x000001fe)
+#define PHY_BB_BBB_DAGC_CTRL_ENABLE_BARKER_RSSI_THR_MSB 9
+#define PHY_BB_BBB_DAGC_CTRL_ENABLE_BARKER_RSSI_THR_LSB 9
+#define PHY_BB_BBB_DAGC_CTRL_ENABLE_BARKER_RSSI_THR_MASK 0x00000200
+#define PHY_BB_BBB_DAGC_CTRL_ENABLE_BARKER_RSSI_THR_GET(x) (((x) & 0x00000200) >> 9)
+#define PHY_BB_BBB_DAGC_CTRL_ENABLE_BARKER_RSSI_THR_SET(x) (((x) << 9) & 0x00000200)
+#define PHY_BB_BBB_DAGC_CTRL_BARKER_RSSI_THR_MSB 16
+#define PHY_BB_BBB_DAGC_CTRL_BARKER_RSSI_THR_LSB 10
+#define PHY_BB_BBB_DAGC_CTRL_BARKER_RSSI_THR_MASK 0x0001fc00
+#define PHY_BB_BBB_DAGC_CTRL_BARKER_RSSI_THR_GET(x) (((x) & 0x0001fc00) >> 10)
+#define PHY_BB_BBB_DAGC_CTRL_BARKER_RSSI_THR_SET(x) (((x) << 10) & 0x0001fc00)
+#define PHY_BB_BBB_DAGC_CTRL_ENABLE_FIRSTEP_SEL_MSB 17
+#define PHY_BB_BBB_DAGC_CTRL_ENABLE_FIRSTEP_SEL_LSB 17
+#define PHY_BB_BBB_DAGC_CTRL_ENABLE_FIRSTEP_SEL_MASK 0x00020000
+#define PHY_BB_BBB_DAGC_CTRL_ENABLE_FIRSTEP_SEL_GET(x) (((x) & 0x00020000) >> 17)
+#define PHY_BB_BBB_DAGC_CTRL_ENABLE_FIRSTEP_SEL_SET(x) (((x) << 17) & 0x00020000)
+#define PHY_BB_BBB_DAGC_CTRL_FIRSTEP_2_MSB 23
+#define PHY_BB_BBB_DAGC_CTRL_FIRSTEP_2_LSB 18
+#define PHY_BB_BBB_DAGC_CTRL_FIRSTEP_2_MASK 0x00fc0000
+#define PHY_BB_BBB_DAGC_CTRL_FIRSTEP_2_GET(x) (((x) & 0x00fc0000) >> 18)
+#define PHY_BB_BBB_DAGC_CTRL_FIRSTEP_2_SET(x) (((x) << 18) & 0x00fc0000)
+#define PHY_BB_BBB_DAGC_CTRL_FIRSTEP_COUNT_LGMAX_MSB 27
+#define PHY_BB_BBB_DAGC_CTRL_FIRSTEP_COUNT_LGMAX_LSB 24
+#define PHY_BB_BBB_DAGC_CTRL_FIRSTEP_COUNT_LGMAX_MASK 0x0f000000
+#define PHY_BB_BBB_DAGC_CTRL_FIRSTEP_COUNT_LGMAX_GET(x) (((x) & 0x0f000000) >> 24)
+#define PHY_BB_BBB_DAGC_CTRL_FIRSTEP_COUNT_LGMAX_SET(x) (((x) << 24) & 0x0f000000)
+
+/* macros for BB_force_clken_cck */
+#define PHY_BB_FORCE_CLKEN_CCK_ADDRESS 0x0000a22c
+#define PHY_BB_FORCE_CLKEN_CCK_OFFSET 0x0000a22c
+#define PHY_BB_FORCE_CLKEN_CCK_FORCE_RX_ENABLE0_MSB 0
+#define PHY_BB_FORCE_CLKEN_CCK_FORCE_RX_ENABLE0_LSB 0
+#define PHY_BB_FORCE_CLKEN_CCK_FORCE_RX_ENABLE0_MASK 0x00000001
+#define PHY_BB_FORCE_CLKEN_CCK_FORCE_RX_ENABLE0_GET(x) (((x) & 0x00000001) >> 0)
+#define PHY_BB_FORCE_CLKEN_CCK_FORCE_RX_ENABLE0_SET(x) (((x) << 0) & 0x00000001)
+#define PHY_BB_FORCE_CLKEN_CCK_FORCE_RX_ENABLE1_MSB 1
+#define PHY_BB_FORCE_CLKEN_CCK_FORCE_RX_ENABLE1_LSB 1
+#define PHY_BB_FORCE_CLKEN_CCK_FORCE_RX_ENABLE1_MASK 0x00000002
+#define PHY_BB_FORCE_CLKEN_CCK_FORCE_RX_ENABLE1_GET(x) (((x) & 0x00000002) >> 1)
+#define PHY_BB_FORCE_CLKEN_CCK_FORCE_RX_ENABLE1_SET(x) (((x) << 1) & 0x00000002)
+#define PHY_BB_FORCE_CLKEN_CCK_FORCE_RX_ENABLE2_MSB 2
+#define PHY_BB_FORCE_CLKEN_CCK_FORCE_RX_ENABLE2_LSB 2
+#define PHY_BB_FORCE_CLKEN_CCK_FORCE_RX_ENABLE2_MASK 0x00000004
+#define PHY_BB_FORCE_CLKEN_CCK_FORCE_RX_ENABLE2_GET(x) (((x) & 0x00000004) >> 2)
+#define PHY_BB_FORCE_CLKEN_CCK_FORCE_RX_ENABLE2_SET(x) (((x) << 2) & 0x00000004)
+#define PHY_BB_FORCE_CLKEN_CCK_FORCE_RX_ENABLE3_MSB 3
+#define PHY_BB_FORCE_CLKEN_CCK_FORCE_RX_ENABLE3_LSB 3
+#define PHY_BB_FORCE_CLKEN_CCK_FORCE_RX_ENABLE3_MASK 0x00000008
+#define PHY_BB_FORCE_CLKEN_CCK_FORCE_RX_ENABLE3_GET(x) (((x) & 0x00000008) >> 3)
+#define PHY_BB_FORCE_CLKEN_CCK_FORCE_RX_ENABLE3_SET(x) (((x) << 3) & 0x00000008)
+#define PHY_BB_FORCE_CLKEN_CCK_FORCE_RX_ALWAYS_MSB 4
+#define PHY_BB_FORCE_CLKEN_CCK_FORCE_RX_ALWAYS_LSB 4
+#define PHY_BB_FORCE_CLKEN_CCK_FORCE_RX_ALWAYS_MASK 0x00000010
+#define PHY_BB_FORCE_CLKEN_CCK_FORCE_RX_ALWAYS_GET(x) (((x) & 0x00000010) >> 4)
+#define PHY_BB_FORCE_CLKEN_CCK_FORCE_RX_ALWAYS_SET(x) (((x) << 4) & 0x00000010)
+#define PHY_BB_FORCE_CLKEN_CCK_FORCE_TXSM_CLKEN_MSB 5
+#define PHY_BB_FORCE_CLKEN_CCK_FORCE_TXSM_CLKEN_LSB 5
+#define PHY_BB_FORCE_CLKEN_CCK_FORCE_TXSM_CLKEN_MASK 0x00000020
+#define PHY_BB_FORCE_CLKEN_CCK_FORCE_TXSM_CLKEN_GET(x) (((x) & 0x00000020) >> 5)
+#define PHY_BB_FORCE_CLKEN_CCK_FORCE_TXSM_CLKEN_SET(x) (((x) << 5) & 0x00000020)
+
+/* macros for BB_rx_clear_delay */
+#define PHY_BB_RX_CLEAR_DELAY_ADDRESS 0x0000a230
+#define PHY_BB_RX_CLEAR_DELAY_OFFSET 0x0000a230
+#define PHY_BB_RX_CLEAR_DELAY_OFDM_XR_RX_CLEAR_DELAY_MSB 9
+#define PHY_BB_RX_CLEAR_DELAY_OFDM_XR_RX_CLEAR_DELAY_LSB 0
+#define PHY_BB_RX_CLEAR_DELAY_OFDM_XR_RX_CLEAR_DELAY_MASK 0x000003ff
+#define PHY_BB_RX_CLEAR_DELAY_OFDM_XR_RX_CLEAR_DELAY_GET(x) (((x) & 0x000003ff) >> 0)
+#define PHY_BB_RX_CLEAR_DELAY_OFDM_XR_RX_CLEAR_DELAY_SET(x) (((x) << 0) & 0x000003ff)
+
+/* macros for BB_powertx_rate3 */
+#define PHY_BB_POWERTX_RATE3_ADDRESS 0x0000a234
+#define PHY_BB_POWERTX_RATE3_OFFSET 0x0000a234
+#define PHY_BB_POWERTX_RATE3_POWERTX_1L_MSB 5
+#define PHY_BB_POWERTX_RATE3_POWERTX_1L_LSB 0
+#define PHY_BB_POWERTX_RATE3_POWERTX_1L_MASK 0x0000003f
+#define PHY_BB_POWERTX_RATE3_POWERTX_1L_GET(x) (((x) & 0x0000003f) >> 0)
+#define PHY_BB_POWERTX_RATE3_POWERTX_1L_SET(x) (((x) << 0) & 0x0000003f)
+#define PHY_BB_POWERTX_RATE3_POWERTX_2L_MSB 21
+#define PHY_BB_POWERTX_RATE3_POWERTX_2L_LSB 16
+#define PHY_BB_POWERTX_RATE3_POWERTX_2L_MASK 0x003f0000
+#define PHY_BB_POWERTX_RATE3_POWERTX_2L_GET(x) (((x) & 0x003f0000) >> 16)
+#define PHY_BB_POWERTX_RATE3_POWERTX_2L_SET(x) (((x) << 16) & 0x003f0000)
+#define PHY_BB_POWERTX_RATE3_POWERTX_2S_MSB 29
+#define PHY_BB_POWERTX_RATE3_POWERTX_2S_LSB 24
+#define PHY_BB_POWERTX_RATE3_POWERTX_2S_MASK 0x3f000000
+#define PHY_BB_POWERTX_RATE3_POWERTX_2S_GET(x) (((x) & 0x3f000000) >> 24)
+#define PHY_BB_POWERTX_RATE3_POWERTX_2S_SET(x) (((x) << 24) & 0x3f000000)
+
+/* macros for BB_powertx_rate4 */
+#define PHY_BB_POWERTX_RATE4_ADDRESS 0x0000a238
+#define PHY_BB_POWERTX_RATE4_OFFSET 0x0000a238
+#define PHY_BB_POWERTX_RATE4_POWERTX_55L_MSB 5
+#define PHY_BB_POWERTX_RATE4_POWERTX_55L_LSB 0
+#define PHY_BB_POWERTX_RATE4_POWERTX_55L_MASK 0x0000003f
+#define PHY_BB_POWERTX_RATE4_POWERTX_55L_GET(x) (((x) & 0x0000003f) >> 0)
+#define PHY_BB_POWERTX_RATE4_POWERTX_55L_SET(x) (((x) << 0) & 0x0000003f)
+#define PHY_BB_POWERTX_RATE4_POWERTX_55S_MSB 13
+#define PHY_BB_POWERTX_RATE4_POWERTX_55S_LSB 8
+#define PHY_BB_POWERTX_RATE4_POWERTX_55S_MASK 0x00003f00
+#define PHY_BB_POWERTX_RATE4_POWERTX_55S_GET(x) (((x) & 0x00003f00) >> 8)
+#define PHY_BB_POWERTX_RATE4_POWERTX_55S_SET(x) (((x) << 8) & 0x00003f00)
+#define PHY_BB_POWERTX_RATE4_POWERTX_11L_MSB 21
+#define PHY_BB_POWERTX_RATE4_POWERTX_11L_LSB 16
+#define PHY_BB_POWERTX_RATE4_POWERTX_11L_MASK 0x003f0000
+#define PHY_BB_POWERTX_RATE4_POWERTX_11L_GET(x) (((x) & 0x003f0000) >> 16)
+#define PHY_BB_POWERTX_RATE4_POWERTX_11L_SET(x) (((x) << 16) & 0x003f0000)
+#define PHY_BB_POWERTX_RATE4_POWERTX_11S_MSB 29
+#define PHY_BB_POWERTX_RATE4_POWERTX_11S_LSB 24
+#define PHY_BB_POWERTX_RATE4_POWERTX_11S_MASK 0x3f000000
+#define PHY_BB_POWERTX_RATE4_POWERTX_11S_GET(x) (((x) & 0x3f000000) >> 24)
+#define PHY_BB_POWERTX_RATE4_POWERTX_11S_SET(x) (((x) << 24) & 0x3f000000)
+
+/* macros for BB_cck_spur_mit */
+#define PHY_BB_CCK_SPUR_MIT_ADDRESS 0x0000a240
+#define PHY_BB_CCK_SPUR_MIT_OFFSET 0x0000a240
+#define PHY_BB_CCK_SPUR_MIT_USE_CCK_SPUR_MIT_MSB 0
+#define PHY_BB_CCK_SPUR_MIT_USE_CCK_SPUR_MIT_LSB 0
+#define PHY_BB_CCK_SPUR_MIT_USE_CCK_SPUR_MIT_MASK 0x00000001
+#define PHY_BB_CCK_SPUR_MIT_USE_CCK_SPUR_MIT_GET(x) (((x) & 0x00000001) >> 0)
+#define PHY_BB_CCK_SPUR_MIT_USE_CCK_SPUR_MIT_SET(x) (((x) << 0) & 0x00000001)
+#define PHY_BB_CCK_SPUR_MIT_SPUR_RSSI_THR_MSB 8
+#define PHY_BB_CCK_SPUR_MIT_SPUR_RSSI_THR_LSB 1
+#define PHY_BB_CCK_SPUR_MIT_SPUR_RSSI_THR_MASK 0x000001fe
+#define PHY_BB_CCK_SPUR_MIT_SPUR_RSSI_THR_GET(x) (((x) & 0x000001fe) >> 1)
+#define PHY_BB_CCK_SPUR_MIT_SPUR_RSSI_THR_SET(x) (((x) << 1) & 0x000001fe)
+#define PHY_BB_CCK_SPUR_MIT_CCK_SPUR_FREQ_MSB 28
+#define PHY_BB_CCK_SPUR_MIT_CCK_SPUR_FREQ_LSB 9
+#define PHY_BB_CCK_SPUR_MIT_CCK_SPUR_FREQ_MASK 0x1ffffe00
+#define PHY_BB_CCK_SPUR_MIT_CCK_SPUR_FREQ_GET(x) (((x) & 0x1ffffe00) >> 9)
+#define PHY_BB_CCK_SPUR_MIT_CCK_SPUR_FREQ_SET(x) (((x) << 9) & 0x1ffffe00)
+#define PHY_BB_CCK_SPUR_MIT_SPUR_FILTER_TYPE_MSB 30
+#define PHY_BB_CCK_SPUR_MIT_SPUR_FILTER_TYPE_LSB 29
+#define PHY_BB_CCK_SPUR_MIT_SPUR_FILTER_TYPE_MASK 0x60000000
+#define PHY_BB_CCK_SPUR_MIT_SPUR_FILTER_TYPE_GET(x) (((x) & 0x60000000) >> 29)
+#define PHY_BB_CCK_SPUR_MIT_SPUR_FILTER_TYPE_SET(x) (((x) << 29) & 0x60000000)
+
+/* macros for BB_panic_watchdog_status */
+#define PHY_BB_PANIC_WATCHDOG_STATUS_ADDRESS 0x0000a244
+#define PHY_BB_PANIC_WATCHDOG_STATUS_OFFSET 0x0000a244
+#define PHY_BB_PANIC_WATCHDOG_STATUS_PANIC_WATCHDOG_STATUS_1_MSB 2
+#define PHY_BB_PANIC_WATCHDOG_STATUS_PANIC_WATCHDOG_STATUS_1_LSB 0
+#define PHY_BB_PANIC_WATCHDOG_STATUS_PANIC_WATCHDOG_STATUS_1_MASK 0x00000007
+#define PHY_BB_PANIC_WATCHDOG_STATUS_PANIC_WATCHDOG_STATUS_1_GET(x) (((x) & 0x00000007) >> 0)
+#define PHY_BB_PANIC_WATCHDOG_STATUS_PANIC_WATCHDOG_STATUS_1_SET(x) (((x) << 0) & 0x00000007)
+#define PHY_BB_PANIC_WATCHDOG_STATUS_PANIC_WATCHDOG_DET_HANG_MSB 3
+#define PHY_BB_PANIC_WATCHDOG_STATUS_PANIC_WATCHDOG_DET_HANG_LSB 3
+#define PHY_BB_PANIC_WATCHDOG_STATUS_PANIC_WATCHDOG_DET_HANG_MASK 0x00000008
+#define PHY_BB_PANIC_WATCHDOG_STATUS_PANIC_WATCHDOG_DET_HANG_GET(x) (((x) & 0x00000008) >> 3)
+#define PHY_BB_PANIC_WATCHDOG_STATUS_PANIC_WATCHDOG_DET_HANG_SET(x) (((x) << 3) & 0x00000008)
+#define PHY_BB_PANIC_WATCHDOG_STATUS_PANIC_WATCHDOG_STATUS_2_MSB 7
+#define PHY_BB_PANIC_WATCHDOG_STATUS_PANIC_WATCHDOG_STATUS_2_LSB 4
+#define PHY_BB_PANIC_WATCHDOG_STATUS_PANIC_WATCHDOG_STATUS_2_MASK 0x000000f0
+#define PHY_BB_PANIC_WATCHDOG_STATUS_PANIC_WATCHDOG_STATUS_2_GET(x) (((x) & 0x000000f0) >> 4)
+#define PHY_BB_PANIC_WATCHDOG_STATUS_PANIC_WATCHDOG_STATUS_2_SET(x) (((x) << 4) & 0x000000f0)
+#define PHY_BB_PANIC_WATCHDOG_STATUS_PANIC_WATCHDOG_STATUS_3_MSB 11
+#define PHY_BB_PANIC_WATCHDOG_STATUS_PANIC_WATCHDOG_STATUS_3_LSB 8
+#define PHY_BB_PANIC_WATCHDOG_STATUS_PANIC_WATCHDOG_STATUS_3_MASK 0x00000f00
+#define PHY_BB_PANIC_WATCHDOG_STATUS_PANIC_WATCHDOG_STATUS_3_GET(x) (((x) & 0x00000f00) >> 8)
+#define PHY_BB_PANIC_WATCHDOG_STATUS_PANIC_WATCHDOG_STATUS_3_SET(x) (((x) << 8) & 0x00000f00)
+#define PHY_BB_PANIC_WATCHDOG_STATUS_PANIC_WATCHDOG_STATUS_4_MSB 15
+#define PHY_BB_PANIC_WATCHDOG_STATUS_PANIC_WATCHDOG_STATUS_4_LSB 12
+#define PHY_BB_PANIC_WATCHDOG_STATUS_PANIC_WATCHDOG_STATUS_4_MASK 0x0000f000
+#define PHY_BB_PANIC_WATCHDOG_STATUS_PANIC_WATCHDOG_STATUS_4_GET(x) (((x) & 0x0000f000) >> 12)
+#define PHY_BB_PANIC_WATCHDOG_STATUS_PANIC_WATCHDOG_STATUS_4_SET(x) (((x) << 12) & 0x0000f000)
+#define PHY_BB_PANIC_WATCHDOG_STATUS_PANIC_WATCHDOG_STATUS_5_MSB 19
+#define PHY_BB_PANIC_WATCHDOG_STATUS_PANIC_WATCHDOG_STATUS_5_LSB 16
+#define PHY_BB_PANIC_WATCHDOG_STATUS_PANIC_WATCHDOG_STATUS_5_MASK 0x000f0000
+#define PHY_BB_PANIC_WATCHDOG_STATUS_PANIC_WATCHDOG_STATUS_5_GET(x) (((x) & 0x000f0000) >> 16)
+#define PHY_BB_PANIC_WATCHDOG_STATUS_PANIC_WATCHDOG_STATUS_5_SET(x) (((x) << 16) & 0x000f0000)
+#define PHY_BB_PANIC_WATCHDOG_STATUS_PANIC_WATCHDOG_STATUS_6_MSB 23
+#define PHY_BB_PANIC_WATCHDOG_STATUS_PANIC_WATCHDOG_STATUS_6_LSB 20
+#define PHY_BB_PANIC_WATCHDOG_STATUS_PANIC_WATCHDOG_STATUS_6_MASK 0x00f00000
+#define PHY_BB_PANIC_WATCHDOG_STATUS_PANIC_WATCHDOG_STATUS_6_GET(x) (((x) & 0x00f00000) >> 20)
+#define PHY_BB_PANIC_WATCHDOG_STATUS_PANIC_WATCHDOG_STATUS_6_SET(x) (((x) << 20) & 0x00f00000)
+#define PHY_BB_PANIC_WATCHDOG_STATUS_PANIC_WATCHDOG_STATUS_7_MSB 27
+#define PHY_BB_PANIC_WATCHDOG_STATUS_PANIC_WATCHDOG_STATUS_7_LSB 24
+#define PHY_BB_PANIC_WATCHDOG_STATUS_PANIC_WATCHDOG_STATUS_7_MASK 0x0f000000
+#define PHY_BB_PANIC_WATCHDOG_STATUS_PANIC_WATCHDOG_STATUS_7_GET(x) (((x) & 0x0f000000) >> 24)
+#define PHY_BB_PANIC_WATCHDOG_STATUS_PANIC_WATCHDOG_STATUS_7_SET(x) (((x) << 24) & 0x0f000000)
+#define PHY_BB_PANIC_WATCHDOG_STATUS_PANIC_WATCHDOG_STATUS_8_MSB 31
+#define PHY_BB_PANIC_WATCHDOG_STATUS_PANIC_WATCHDOG_STATUS_8_LSB 28
+#define PHY_BB_PANIC_WATCHDOG_STATUS_PANIC_WATCHDOG_STATUS_8_MASK 0xf0000000
+#define PHY_BB_PANIC_WATCHDOG_STATUS_PANIC_WATCHDOG_STATUS_8_GET(x) (((x) & 0xf0000000) >> 28)
+#define PHY_BB_PANIC_WATCHDOG_STATUS_PANIC_WATCHDOG_STATUS_8_SET(x) (((x) << 28) & 0xf0000000)
+
+/* macros for BB_panic_watchdog_ctrl_1 */
+#define PHY_BB_PANIC_WATCHDOG_CTRL_1_ADDRESS 0x0000a248
+#define PHY_BB_PANIC_WATCHDOG_CTRL_1_OFFSET 0x0000a248
+#define PHY_BB_PANIC_WATCHDOG_CTRL_1_ENABLE_PANIC_WATCHDOG_NON_IDLE_MSB 0
+#define PHY_BB_PANIC_WATCHDOG_CTRL_1_ENABLE_PANIC_WATCHDOG_NON_IDLE_LSB 0
+#define PHY_BB_PANIC_WATCHDOG_CTRL_1_ENABLE_PANIC_WATCHDOG_NON_IDLE_MASK 0x00000001
+#define PHY_BB_PANIC_WATCHDOG_CTRL_1_ENABLE_PANIC_WATCHDOG_NON_IDLE_GET(x) (((x) & 0x00000001) >> 0)
+#define PHY_BB_PANIC_WATCHDOG_CTRL_1_ENABLE_PANIC_WATCHDOG_NON_IDLE_SET(x) (((x) << 0) & 0x00000001)
+#define PHY_BB_PANIC_WATCHDOG_CTRL_1_ENABLE_PANIC_WATCHDOG_IDLE_MSB 1
+#define PHY_BB_PANIC_WATCHDOG_CTRL_1_ENABLE_PANIC_WATCHDOG_IDLE_LSB 1
+#define PHY_BB_PANIC_WATCHDOG_CTRL_1_ENABLE_PANIC_WATCHDOG_IDLE_MASK 0x00000002
+#define PHY_BB_PANIC_WATCHDOG_CTRL_1_ENABLE_PANIC_WATCHDOG_IDLE_GET(x) (((x) & 0x00000002) >> 1)
+#define PHY_BB_PANIC_WATCHDOG_CTRL_1_ENABLE_PANIC_WATCHDOG_IDLE_SET(x) (((x) << 1) & 0x00000002)
+#define PHY_BB_PANIC_WATCHDOG_CTRL_1_PANIC_WATCHDOG_NON_IDLE_LIMIT_MSB 15
+#define PHY_BB_PANIC_WATCHDOG_CTRL_1_PANIC_WATCHDOG_NON_IDLE_LIMIT_LSB 2
+#define PHY_BB_PANIC_WATCHDOG_CTRL_1_PANIC_WATCHDOG_NON_IDLE_LIMIT_MASK 0x0000fffc
+#define PHY_BB_PANIC_WATCHDOG_CTRL_1_PANIC_WATCHDOG_NON_IDLE_LIMIT_GET(x) (((x) & 0x0000fffc) >> 2)
+#define PHY_BB_PANIC_WATCHDOG_CTRL_1_PANIC_WATCHDOG_NON_IDLE_LIMIT_SET(x) (((x) << 2) & 0x0000fffc)
+#define PHY_BB_PANIC_WATCHDOG_CTRL_1_PANIC_WATCHDOG_IDLE_LIMIT_MSB 31
+#define PHY_BB_PANIC_WATCHDOG_CTRL_1_PANIC_WATCHDOG_IDLE_LIMIT_LSB 16
+#define PHY_BB_PANIC_WATCHDOG_CTRL_1_PANIC_WATCHDOG_IDLE_LIMIT_MASK 0xffff0000
+#define PHY_BB_PANIC_WATCHDOG_CTRL_1_PANIC_WATCHDOG_IDLE_LIMIT_GET(x) (((x) & 0xffff0000) >> 16)
+#define PHY_BB_PANIC_WATCHDOG_CTRL_1_PANIC_WATCHDOG_IDLE_LIMIT_SET(x) (((x) << 16) & 0xffff0000)
+
+/* macros for BB_panic_watchdog_ctrl_2 */
+#define PHY_BB_PANIC_WATCHDOG_CTRL_2_ADDRESS 0x0000a24c
+#define PHY_BB_PANIC_WATCHDOG_CTRL_2_OFFSET 0x0000a24c
+#define PHY_BB_PANIC_WATCHDOG_CTRL_2_FORCE_FAST_ADC_CLK_MSB 0
+#define PHY_BB_PANIC_WATCHDOG_CTRL_2_FORCE_FAST_ADC_CLK_LSB 0
+#define PHY_BB_PANIC_WATCHDOG_CTRL_2_FORCE_FAST_ADC_CLK_MASK 0x00000001
+#define PHY_BB_PANIC_WATCHDOG_CTRL_2_FORCE_FAST_ADC_CLK_GET(x) (((x) & 0x00000001) >> 0)
+#define PHY_BB_PANIC_WATCHDOG_CTRL_2_FORCE_FAST_ADC_CLK_SET(x) (((x) << 0) & 0x00000001)
+#define PHY_BB_PANIC_WATCHDOG_CTRL_2_PANIC_WATCHDOG_RESET_ENA_MSB 1
+#define PHY_BB_PANIC_WATCHDOG_CTRL_2_PANIC_WATCHDOG_RESET_ENA_LSB 1
+#define PHY_BB_PANIC_WATCHDOG_CTRL_2_PANIC_WATCHDOG_RESET_ENA_MASK 0x00000002
+#define PHY_BB_PANIC_WATCHDOG_CTRL_2_PANIC_WATCHDOG_RESET_ENA_GET(x) (((x) & 0x00000002) >> 1)
+#define PHY_BB_PANIC_WATCHDOG_CTRL_2_PANIC_WATCHDOG_RESET_ENA_SET(x) (((x) << 1) & 0x00000002)
+#define PHY_BB_PANIC_WATCHDOG_CTRL_2_PANIC_WATCHDOG_IRQ_ENA_MSB 2
+#define PHY_BB_PANIC_WATCHDOG_CTRL_2_PANIC_WATCHDOG_IRQ_ENA_LSB 2
+#define PHY_BB_PANIC_WATCHDOG_CTRL_2_PANIC_WATCHDOG_IRQ_ENA_MASK 0x00000004
+#define PHY_BB_PANIC_WATCHDOG_CTRL_2_PANIC_WATCHDOG_IRQ_ENA_GET(x) (((x) & 0x00000004) >> 2)
+#define PHY_BB_PANIC_WATCHDOG_CTRL_2_PANIC_WATCHDOG_IRQ_ENA_SET(x) (((x) << 2) & 0x00000004)
+
+/* macros for BB_iqcorr_ctrl_cck */
+#define PHY_BB_IQCORR_CTRL_CCK_ADDRESS 0x0000a250
+#define PHY_BB_IQCORR_CTRL_CCK_OFFSET 0x0000a250
+#define PHY_BB_IQCORR_CTRL_CCK_IQCORR_Q_Q_COFF_CCK_MSB 4
+#define PHY_BB_IQCORR_CTRL_CCK_IQCORR_Q_Q_COFF_CCK_LSB 0
+#define PHY_BB_IQCORR_CTRL_CCK_IQCORR_Q_Q_COFF_CCK_MASK 0x0000001f
+#define PHY_BB_IQCORR_CTRL_CCK_IQCORR_Q_Q_COFF_CCK_GET(x) (((x) & 0x0000001f) >> 0)
+#define PHY_BB_IQCORR_CTRL_CCK_IQCORR_Q_Q_COFF_CCK_SET(x) (((x) << 0) & 0x0000001f)
+#define PHY_BB_IQCORR_CTRL_CCK_IQCORR_Q_I_COFF_CCK_MSB 10
+#define PHY_BB_IQCORR_CTRL_CCK_IQCORR_Q_I_COFF_CCK_LSB 5
+#define PHY_BB_IQCORR_CTRL_CCK_IQCORR_Q_I_COFF_CCK_MASK 0x000007e0
+#define PHY_BB_IQCORR_CTRL_CCK_IQCORR_Q_I_COFF_CCK_GET(x) (((x) & 0x000007e0) >> 5)
+#define PHY_BB_IQCORR_CTRL_CCK_IQCORR_Q_I_COFF_CCK_SET(x) (((x) << 5) & 0x000007e0)
+#define PHY_BB_IQCORR_CTRL_CCK_ENABLE_IQCORR_CCK_MSB 11
+#define PHY_BB_IQCORR_CTRL_CCK_ENABLE_IQCORR_CCK_LSB 11
+#define PHY_BB_IQCORR_CTRL_CCK_ENABLE_IQCORR_CCK_MASK 0x00000800
+#define PHY_BB_IQCORR_CTRL_CCK_ENABLE_IQCORR_CCK_GET(x) (((x) & 0x00000800) >> 11)
+#define PHY_BB_IQCORR_CTRL_CCK_ENABLE_IQCORR_CCK_SET(x) (((x) << 11) & 0x00000800)
+#define PHY_BB_IQCORR_CTRL_CCK_RXCAL_MEAS_TIME_SEL_MSB 13
+#define PHY_BB_IQCORR_CTRL_CCK_RXCAL_MEAS_TIME_SEL_LSB 12
+#define PHY_BB_IQCORR_CTRL_CCK_RXCAL_MEAS_TIME_SEL_MASK 0x00003000
+#define PHY_BB_IQCORR_CTRL_CCK_RXCAL_MEAS_TIME_SEL_GET(x) (((x) & 0x00003000) >> 12)
+#define PHY_BB_IQCORR_CTRL_CCK_RXCAL_MEAS_TIME_SEL_SET(x) (((x) << 12) & 0x00003000)
+#define PHY_BB_IQCORR_CTRL_CCK_CLCAL_MEAS_TIME_SEL_MSB 15
+#define PHY_BB_IQCORR_CTRL_CCK_CLCAL_MEAS_TIME_SEL_LSB 14
+#define PHY_BB_IQCORR_CTRL_CCK_CLCAL_MEAS_TIME_SEL_MASK 0x0000c000
+#define PHY_BB_IQCORR_CTRL_CCK_CLCAL_MEAS_TIME_SEL_GET(x) (((x) & 0x0000c000) >> 14)
+#define PHY_BB_IQCORR_CTRL_CCK_CLCAL_MEAS_TIME_SEL_SET(x) (((x) << 14) & 0x0000c000)
+#define PHY_BB_IQCORR_CTRL_CCK_CF_CLC_INIT_RFGAIN_MSB 20
+#define PHY_BB_IQCORR_CTRL_CCK_CF_CLC_INIT_RFGAIN_LSB 16
+#define PHY_BB_IQCORR_CTRL_CCK_CF_CLC_INIT_RFGAIN_MASK 0x001f0000
+#define PHY_BB_IQCORR_CTRL_CCK_CF_CLC_INIT_RFGAIN_GET(x) (((x) & 0x001f0000) >> 16)
+#define PHY_BB_IQCORR_CTRL_CCK_CF_CLC_INIT_RFGAIN_SET(x) (((x) << 16) & 0x001f0000)
+#define PHY_BB_IQCORR_CTRL_CCK_CF_CLC_PAL_MODE_MSB 21
+#define PHY_BB_IQCORR_CTRL_CCK_CF_CLC_PAL_MODE_LSB 21
+#define PHY_BB_IQCORR_CTRL_CCK_CF_CLC_PAL_MODE_MASK 0x00200000
+#define PHY_BB_IQCORR_CTRL_CCK_CF_CLC_PAL_MODE_GET(x) (((x) & 0x00200000) >> 21)
+#define PHY_BB_IQCORR_CTRL_CCK_CF_CLC_PAL_MODE_SET(x) (((x) << 21) & 0x00200000)
+
+/* macros for BB_bluetooth_cntl */
+#define PHY_BB_BLUETOOTH_CNTL_ADDRESS 0x0000a254
+#define PHY_BB_BLUETOOTH_CNTL_OFFSET 0x0000a254
+#define PHY_BB_BLUETOOTH_CNTL_BT_BREAK_CCK_EN_MSB 0
+#define PHY_BB_BLUETOOTH_CNTL_BT_BREAK_CCK_EN_LSB 0
+#define PHY_BB_BLUETOOTH_CNTL_BT_BREAK_CCK_EN_MASK 0x00000001
+#define PHY_BB_BLUETOOTH_CNTL_BT_BREAK_CCK_EN_GET(x) (((x) & 0x00000001) >> 0)
+#define PHY_BB_BLUETOOTH_CNTL_BT_BREAK_CCK_EN_SET(x) (((x) << 0) & 0x00000001)
+#define PHY_BB_BLUETOOTH_CNTL_BT_ANT_HALT_WLAN_MSB 1
+#define PHY_BB_BLUETOOTH_CNTL_BT_ANT_HALT_WLAN_LSB 1
+#define PHY_BB_BLUETOOTH_CNTL_BT_ANT_HALT_WLAN_MASK 0x00000002
+#define PHY_BB_BLUETOOTH_CNTL_BT_ANT_HALT_WLAN_GET(x) (((x) & 0x00000002) >> 1)
+#define PHY_BB_BLUETOOTH_CNTL_BT_ANT_HALT_WLAN_SET(x) (((x) << 1) & 0x00000002)
+
+/* macros for BB_tpc_1 */
+#define PHY_BB_TPC_1_ADDRESS 0x0000a258
+#define PHY_BB_TPC_1_OFFSET 0x0000a258
+#define PHY_BB_TPC_1_FORCE_DAC_GAIN_MSB 0
+#define PHY_BB_TPC_1_FORCE_DAC_GAIN_LSB 0
+#define PHY_BB_TPC_1_FORCE_DAC_GAIN_MASK 0x00000001
+#define PHY_BB_TPC_1_FORCE_DAC_GAIN_GET(x) (((x) & 0x00000001) >> 0)
+#define PHY_BB_TPC_1_FORCE_DAC_GAIN_SET(x) (((x) << 0) & 0x00000001)
+#define PHY_BB_TPC_1_FORCED_DAC_GAIN_MSB 5
+#define PHY_BB_TPC_1_FORCED_DAC_GAIN_LSB 1
+#define PHY_BB_TPC_1_FORCED_DAC_GAIN_MASK 0x0000003e
+#define PHY_BB_TPC_1_FORCED_DAC_GAIN_GET(x) (((x) & 0x0000003e) >> 1)
+#define PHY_BB_TPC_1_FORCED_DAC_GAIN_SET(x) (((x) << 1) & 0x0000003e)
+#define PHY_BB_TPC_1_PD_DC_OFFSET_TARGET_MSB 13
+#define PHY_BB_TPC_1_PD_DC_OFFSET_TARGET_LSB 6
+#define PHY_BB_TPC_1_PD_DC_OFFSET_TARGET_MASK 0x00003fc0
+#define PHY_BB_TPC_1_PD_DC_OFFSET_TARGET_GET(x) (((x) & 0x00003fc0) >> 6)
+#define PHY_BB_TPC_1_PD_DC_OFFSET_TARGET_SET(x) (((x) << 6) & 0x00003fc0)
+#define PHY_BB_TPC_1_NUM_PD_GAIN_MSB 15
+#define PHY_BB_TPC_1_NUM_PD_GAIN_LSB 14
+#define PHY_BB_TPC_1_NUM_PD_GAIN_MASK 0x0000c000
+#define PHY_BB_TPC_1_NUM_PD_GAIN_GET(x) (((x) & 0x0000c000) >> 14)
+#define PHY_BB_TPC_1_NUM_PD_GAIN_SET(x) (((x) << 14) & 0x0000c000)
+#define PHY_BB_TPC_1_PD_GAIN_SETTING1_MSB 17
+#define PHY_BB_TPC_1_PD_GAIN_SETTING1_LSB 16
+#define PHY_BB_TPC_1_PD_GAIN_SETTING1_MASK 0x00030000
+#define PHY_BB_TPC_1_PD_GAIN_SETTING1_GET(x) (((x) & 0x00030000) >> 16)
+#define PHY_BB_TPC_1_PD_GAIN_SETTING1_SET(x) (((x) << 16) & 0x00030000)
+#define PHY_BB_TPC_1_PD_GAIN_SETTING2_MSB 19
+#define PHY_BB_TPC_1_PD_GAIN_SETTING2_LSB 18
+#define PHY_BB_TPC_1_PD_GAIN_SETTING2_MASK 0x000c0000
+#define PHY_BB_TPC_1_PD_GAIN_SETTING2_GET(x) (((x) & 0x000c0000) >> 18)
+#define PHY_BB_TPC_1_PD_GAIN_SETTING2_SET(x) (((x) << 18) & 0x000c0000)
+#define PHY_BB_TPC_1_PD_GAIN_SETTING3_MSB 21
+#define PHY_BB_TPC_1_PD_GAIN_SETTING3_LSB 20
+#define PHY_BB_TPC_1_PD_GAIN_SETTING3_MASK 0x00300000
+#define PHY_BB_TPC_1_PD_GAIN_SETTING3_GET(x) (((x) & 0x00300000) >> 20)
+#define PHY_BB_TPC_1_PD_GAIN_SETTING3_SET(x) (((x) << 20) & 0x00300000)
+#define PHY_BB_TPC_1_ENABLE_PD_CALIBRATE_MSB 22
+#define PHY_BB_TPC_1_ENABLE_PD_CALIBRATE_LSB 22
+#define PHY_BB_TPC_1_ENABLE_PD_CALIBRATE_MASK 0x00400000
+#define PHY_BB_TPC_1_ENABLE_PD_CALIBRATE_GET(x) (((x) & 0x00400000) >> 22)
+#define PHY_BB_TPC_1_ENABLE_PD_CALIBRATE_SET(x) (((x) << 22) & 0x00400000)
+#define PHY_BB_TPC_1_PD_CALIBRATE_WAIT_MSB 28
+#define PHY_BB_TPC_1_PD_CALIBRATE_WAIT_LSB 23
+#define PHY_BB_TPC_1_PD_CALIBRATE_WAIT_MASK 0x1f800000
+#define PHY_BB_TPC_1_PD_CALIBRATE_WAIT_GET(x) (((x) & 0x1f800000) >> 23)
+#define PHY_BB_TPC_1_PD_CALIBRATE_WAIT_SET(x) (((x) << 23) & 0x1f800000)
+#define PHY_BB_TPC_1_FORCE_PDADC_GAIN_MSB 29
+#define PHY_BB_TPC_1_FORCE_PDADC_GAIN_LSB 29
+#define PHY_BB_TPC_1_FORCE_PDADC_GAIN_MASK 0x20000000
+#define PHY_BB_TPC_1_FORCE_PDADC_GAIN_GET(x) (((x) & 0x20000000) >> 29)
+#define PHY_BB_TPC_1_FORCE_PDADC_GAIN_SET(x) (((x) << 29) & 0x20000000)
+#define PHY_BB_TPC_1_FORCED_PDADC_GAIN_MSB 31
+#define PHY_BB_TPC_1_FORCED_PDADC_GAIN_LSB 30
+#define PHY_BB_TPC_1_FORCED_PDADC_GAIN_MASK 0xc0000000
+#define PHY_BB_TPC_1_FORCED_PDADC_GAIN_GET(x) (((x) & 0xc0000000) >> 30)
+#define PHY_BB_TPC_1_FORCED_PDADC_GAIN_SET(x) (((x) << 30) & 0xc0000000)
+
+/* macros for BB_tpc_2 */
+#define PHY_BB_TPC_2_ADDRESS 0x0000a25c
+#define PHY_BB_TPC_2_OFFSET 0x0000a25c
+#define PHY_BB_TPC_2_TX_FRAME_TO_PDADC_ON_MSB 7
+#define PHY_BB_TPC_2_TX_FRAME_TO_PDADC_ON_LSB 0
+#define PHY_BB_TPC_2_TX_FRAME_TO_PDADC_ON_MASK 0x000000ff
+#define PHY_BB_TPC_2_TX_FRAME_TO_PDADC_ON_GET(x) (((x) & 0x000000ff) >> 0)
+#define PHY_BB_TPC_2_TX_FRAME_TO_PDADC_ON_SET(x) (((x) << 0) & 0x000000ff)
+#define PHY_BB_TPC_2_TX_FRAME_TO_PD_ACC_OFDM_MSB 15
+#define PHY_BB_TPC_2_TX_FRAME_TO_PD_ACC_OFDM_LSB 8
+#define PHY_BB_TPC_2_TX_FRAME_TO_PD_ACC_OFDM_MASK 0x0000ff00
+#define PHY_BB_TPC_2_TX_FRAME_TO_PD_ACC_OFDM_GET(x) (((x) & 0x0000ff00) >> 8)
+#define PHY_BB_TPC_2_TX_FRAME_TO_PD_ACC_OFDM_SET(x) (((x) << 8) & 0x0000ff00)
+#define PHY_BB_TPC_2_TX_FRAME_TO_PD_ACC_CCK_MSB 23
+#define PHY_BB_TPC_2_TX_FRAME_TO_PD_ACC_CCK_LSB 16
+#define PHY_BB_TPC_2_TX_FRAME_TO_PD_ACC_CCK_MASK 0x00ff0000
+#define PHY_BB_TPC_2_TX_FRAME_TO_PD_ACC_CCK_GET(x) (((x) & 0x00ff0000) >> 16)
+#define PHY_BB_TPC_2_TX_FRAME_TO_PD_ACC_CCK_SET(x) (((x) << 16) & 0x00ff0000)
+
+/* macros for BB_tpc_3 */
+#define PHY_BB_TPC_3_ADDRESS 0x0000a260
+#define PHY_BB_TPC_3_OFFSET 0x0000a260
+#define PHY_BB_TPC_3_TX_END_TO_PDADC_ON_MSB 7
+#define PHY_BB_TPC_3_TX_END_TO_PDADC_ON_LSB 0
+#define PHY_BB_TPC_3_TX_END_TO_PDADC_ON_MASK 0x000000ff
+#define PHY_BB_TPC_3_TX_END_TO_PDADC_ON_GET(x) (((x) & 0x000000ff) >> 0)
+#define PHY_BB_TPC_3_TX_END_TO_PDADC_ON_SET(x) (((x) << 0) & 0x000000ff)
+#define PHY_BB_TPC_3_TX_END_TO_PD_ACC_ON_MSB 15
+#define PHY_BB_TPC_3_TX_END_TO_PD_ACC_ON_LSB 8
+#define PHY_BB_TPC_3_TX_END_TO_PD_ACC_ON_MASK 0x0000ff00
+#define PHY_BB_TPC_3_TX_END_TO_PD_ACC_ON_GET(x) (((x) & 0x0000ff00) >> 8)
+#define PHY_BB_TPC_3_TX_END_TO_PD_ACC_ON_SET(x) (((x) << 8) & 0x0000ff00)
+#define PHY_BB_TPC_3_PD_ACC_WINDOW_DC_OFF_MSB 18
+#define PHY_BB_TPC_3_PD_ACC_WINDOW_DC_OFF_LSB 16
+#define PHY_BB_TPC_3_PD_ACC_WINDOW_DC_OFF_MASK 0x00070000
+#define PHY_BB_TPC_3_PD_ACC_WINDOW_DC_OFF_GET(x) (((x) & 0x00070000) >> 16)
+#define PHY_BB_TPC_3_PD_ACC_WINDOW_DC_OFF_SET(x) (((x) << 16) & 0x00070000)
+#define PHY_BB_TPC_3_PD_ACC_WINDOW_CAL_MSB 21
+#define PHY_BB_TPC_3_PD_ACC_WINDOW_CAL_LSB 19
+#define PHY_BB_TPC_3_PD_ACC_WINDOW_CAL_MASK 0x00380000
+#define PHY_BB_TPC_3_PD_ACC_WINDOW_CAL_GET(x) (((x) & 0x00380000) >> 19)
+#define PHY_BB_TPC_3_PD_ACC_WINDOW_CAL_SET(x) (((x) << 19) & 0x00380000)
+#define PHY_BB_TPC_3_PD_ACC_WINDOW_OFDM_MSB 24
+#define PHY_BB_TPC_3_PD_ACC_WINDOW_OFDM_LSB 22
+#define PHY_BB_TPC_3_PD_ACC_WINDOW_OFDM_MASK 0x01c00000
+#define PHY_BB_TPC_3_PD_ACC_WINDOW_OFDM_GET(x) (((x) & 0x01c00000) >> 22)
+#define PHY_BB_TPC_3_PD_ACC_WINDOW_OFDM_SET(x) (((x) << 22) & 0x01c00000)
+#define PHY_BB_TPC_3_PD_ACC_WINDOW_CCK_MSB 27
+#define PHY_BB_TPC_3_PD_ACC_WINDOW_CCK_LSB 25
+#define PHY_BB_TPC_3_PD_ACC_WINDOW_CCK_MASK 0x0e000000
+#define PHY_BB_TPC_3_PD_ACC_WINDOW_CCK_GET(x) (((x) & 0x0e000000) >> 25)
+#define PHY_BB_TPC_3_PD_ACC_WINDOW_CCK_SET(x) (((x) << 25) & 0x0e000000)
+#define PHY_BB_TPC_3_TPC_CLK_GATE_ENABLE_MSB 31
+#define PHY_BB_TPC_3_TPC_CLK_GATE_ENABLE_LSB 31
+#define PHY_BB_TPC_3_TPC_CLK_GATE_ENABLE_MASK 0x80000000
+#define PHY_BB_TPC_3_TPC_CLK_GATE_ENABLE_GET(x) (((x) & 0x80000000) >> 31)
+#define PHY_BB_TPC_3_TPC_CLK_GATE_ENABLE_SET(x) (((x) << 31) & 0x80000000)
+
+/* macros for BB_tpc_4_b0 */
+#define PHY_BB_TPC_4_B0_ADDRESS 0x0000a264
+#define PHY_BB_TPC_4_B0_OFFSET 0x0000a264
+#define PHY_BB_TPC_4_B0_PD_AVG_VALID_0_MSB 0
+#define PHY_BB_TPC_4_B0_PD_AVG_VALID_0_LSB 0
+#define PHY_BB_TPC_4_B0_PD_AVG_VALID_0_MASK 0x00000001
+#define PHY_BB_TPC_4_B0_PD_AVG_VALID_0_GET(x) (((x) & 0x00000001) >> 0)
+#define PHY_BB_TPC_4_B0_PD_AVG_OUT_0_MSB 8
+#define PHY_BB_TPC_4_B0_PD_AVG_OUT_0_LSB 1
+#define PHY_BB_TPC_4_B0_PD_AVG_OUT_0_MASK 0x000001fe
+#define PHY_BB_TPC_4_B0_PD_AVG_OUT_0_GET(x) (((x) & 0x000001fe) >> 1)
+#define PHY_BB_TPC_4_B0_DAC_GAIN_0_MSB 13
+#define PHY_BB_TPC_4_B0_DAC_GAIN_0_LSB 9
+#define PHY_BB_TPC_4_B0_DAC_GAIN_0_MASK 0x00003e00
+#define PHY_BB_TPC_4_B0_DAC_GAIN_0_GET(x) (((x) & 0x00003e00) >> 9)
+#define PHY_BB_TPC_4_B0_TX_GAIN_SETTING_0_MSB 19
+#define PHY_BB_TPC_4_B0_TX_GAIN_SETTING_0_LSB 14
+#define PHY_BB_TPC_4_B0_TX_GAIN_SETTING_0_MASK 0x000fc000
+#define PHY_BB_TPC_4_B0_TX_GAIN_SETTING_0_GET(x) (((x) & 0x000fc000) >> 14)
+#define PHY_BB_TPC_4_B0_RATE_SENT_0_MSB 24
+#define PHY_BB_TPC_4_B0_RATE_SENT_0_LSB 20
+#define PHY_BB_TPC_4_B0_RATE_SENT_0_MASK 0x01f00000
+#define PHY_BB_TPC_4_B0_RATE_SENT_0_GET(x) (((x) & 0x01f00000) >> 20)
+#define PHY_BB_TPC_4_B0_ERROR_EST_UPDATE_POWER_THRESH_MSB 30
+#define PHY_BB_TPC_4_B0_ERROR_EST_UPDATE_POWER_THRESH_LSB 25
+#define PHY_BB_TPC_4_B0_ERROR_EST_UPDATE_POWER_THRESH_MASK 0x7e000000
+#define PHY_BB_TPC_4_B0_ERROR_EST_UPDATE_POWER_THRESH_GET(x) (((x) & 0x7e000000) >> 25)
+#define PHY_BB_TPC_4_B0_ERROR_EST_UPDATE_POWER_THRESH_SET(x) (((x) << 25) & 0x7e000000)
+
+/* macros for BB_analog_swap */
+#define PHY_BB_ANALOG_SWAP_ADDRESS 0x0000a268
+#define PHY_BB_ANALOG_SWAP_OFFSET 0x0000a268
+#define PHY_BB_ANALOG_SWAP_ANALOG_RX_SWAP_CNTL_MSB 2
+#define PHY_BB_ANALOG_SWAP_ANALOG_RX_SWAP_CNTL_LSB 0
+#define PHY_BB_ANALOG_SWAP_ANALOG_RX_SWAP_CNTL_MASK 0x00000007
+#define PHY_BB_ANALOG_SWAP_ANALOG_RX_SWAP_CNTL_GET(x) (((x) & 0x00000007) >> 0)
+#define PHY_BB_ANALOG_SWAP_ANALOG_RX_SWAP_CNTL_SET(x) (((x) << 0) & 0x00000007)
+#define PHY_BB_ANALOG_SWAP_ANALOG_TX_SWAP_CNTL_MSB 5
+#define PHY_BB_ANALOG_SWAP_ANALOG_TX_SWAP_CNTL_LSB 3
+#define PHY_BB_ANALOG_SWAP_ANALOG_TX_SWAP_CNTL_MASK 0x00000038
+#define PHY_BB_ANALOG_SWAP_ANALOG_TX_SWAP_CNTL_GET(x) (((x) & 0x00000038) >> 3)
+#define PHY_BB_ANALOG_SWAP_ANALOG_TX_SWAP_CNTL_SET(x) (((x) << 3) & 0x00000038)
+#define PHY_BB_ANALOG_SWAP_SWAP_ALT_CHN_MSB 6
+#define PHY_BB_ANALOG_SWAP_SWAP_ALT_CHN_LSB 6
+#define PHY_BB_ANALOG_SWAP_SWAP_ALT_CHN_MASK 0x00000040
+#define PHY_BB_ANALOG_SWAP_SWAP_ALT_CHN_GET(x) (((x) & 0x00000040) >> 6)
+#define PHY_BB_ANALOG_SWAP_SWAP_ALT_CHN_SET(x) (((x) << 6) & 0x00000040)
+#define PHY_BB_ANALOG_SWAP_ANALOG_DC_DAC_POLARITY_MSB 7
+#define PHY_BB_ANALOG_SWAP_ANALOG_DC_DAC_POLARITY_LSB 7
+#define PHY_BB_ANALOG_SWAP_ANALOG_DC_DAC_POLARITY_MASK 0x00000080
+#define PHY_BB_ANALOG_SWAP_ANALOG_DC_DAC_POLARITY_GET(x) (((x) & 0x00000080) >> 7)
+#define PHY_BB_ANALOG_SWAP_ANALOG_DC_DAC_POLARITY_SET(x) (((x) << 7) & 0x00000080)
+#define PHY_BB_ANALOG_SWAP_ANALOG_PKDET_DAC_POLARITY_MSB 8
+#define PHY_BB_ANALOG_SWAP_ANALOG_PKDET_DAC_POLARITY_LSB 8
+#define PHY_BB_ANALOG_SWAP_ANALOG_PKDET_DAC_POLARITY_MASK 0x00000100
+#define PHY_BB_ANALOG_SWAP_ANALOG_PKDET_DAC_POLARITY_GET(x) (((x) & 0x00000100) >> 8)
+#define PHY_BB_ANALOG_SWAP_ANALOG_PKDET_DAC_POLARITY_SET(x) (((x) << 8) & 0x00000100)
+
+/* macros for BB_tpc_5_b0 */
+#define PHY_BB_TPC_5_B0_ADDRESS 0x0000a26c
+#define PHY_BB_TPC_5_B0_OFFSET 0x0000a26c
+#define PHY_BB_TPC_5_B0_PD_GAIN_OVERLAP_MSB 3
+#define PHY_BB_TPC_5_B0_PD_GAIN_OVERLAP_LSB 0
+#define PHY_BB_TPC_5_B0_PD_GAIN_OVERLAP_MASK 0x0000000f
+#define PHY_BB_TPC_5_B0_PD_GAIN_OVERLAP_GET(x) (((x) & 0x0000000f) >> 0)
+#define PHY_BB_TPC_5_B0_PD_GAIN_OVERLAP_SET(x) (((x) << 0) & 0x0000000f)
+#define PHY_BB_TPC_5_B0_PD_GAIN_BOUNDARY_1_0_MSB 9
+#define PHY_BB_TPC_5_B0_PD_GAIN_BOUNDARY_1_0_LSB 4
+#define PHY_BB_TPC_5_B0_PD_GAIN_BOUNDARY_1_0_MASK 0x000003f0
+#define PHY_BB_TPC_5_B0_PD_GAIN_BOUNDARY_1_0_GET(x) (((x) & 0x000003f0) >> 4)
+#define PHY_BB_TPC_5_B0_PD_GAIN_BOUNDARY_1_0_SET(x) (((x) << 4) & 0x000003f0)
+#define PHY_BB_TPC_5_B0_PD_GAIN_BOUNDARY_2_0_MSB 15
+#define PHY_BB_TPC_5_B0_PD_GAIN_BOUNDARY_2_0_LSB 10
+#define PHY_BB_TPC_5_B0_PD_GAIN_BOUNDARY_2_0_MASK 0x0000fc00
+#define PHY_BB_TPC_5_B0_PD_GAIN_BOUNDARY_2_0_GET(x) (((x) & 0x0000fc00) >> 10)
+#define PHY_BB_TPC_5_B0_PD_GAIN_BOUNDARY_2_0_SET(x) (((x) << 10) & 0x0000fc00)
+#define PHY_BB_TPC_5_B0_PD_GAIN_BOUNDARY_3_0_MSB 21
+#define PHY_BB_TPC_5_B0_PD_GAIN_BOUNDARY_3_0_LSB 16
+#define PHY_BB_TPC_5_B0_PD_GAIN_BOUNDARY_3_0_MASK 0x003f0000
+#define PHY_BB_TPC_5_B0_PD_GAIN_BOUNDARY_3_0_GET(x) (((x) & 0x003f0000) >> 16)
+#define PHY_BB_TPC_5_B0_PD_GAIN_BOUNDARY_3_0_SET(x) (((x) << 16) & 0x003f0000)
+#define PHY_BB_TPC_5_B0_PD_GAIN_BOUNDARY_4_0_MSB 27
+#define PHY_BB_TPC_5_B0_PD_GAIN_BOUNDARY_4_0_LSB 22
+#define PHY_BB_TPC_5_B0_PD_GAIN_BOUNDARY_4_0_MASK 0x0fc00000
+#define PHY_BB_TPC_5_B0_PD_GAIN_BOUNDARY_4_0_GET(x) (((x) & 0x0fc00000) >> 22)
+#define PHY_BB_TPC_5_B0_PD_GAIN_BOUNDARY_4_0_SET(x) (((x) << 22) & 0x0fc00000)
+
+/* macros for BB_tpc_6_b0 */
+#define PHY_BB_TPC_6_B0_ADDRESS 0x0000a270
+#define PHY_BB_TPC_6_B0_OFFSET 0x0000a270
+#define PHY_BB_TPC_6_B0_PD_DAC_SETTING_1_0_MSB 5
+#define PHY_BB_TPC_6_B0_PD_DAC_SETTING_1_0_LSB 0
+#define PHY_BB_TPC_6_B0_PD_DAC_SETTING_1_0_MASK 0x0000003f
+#define PHY_BB_TPC_6_B0_PD_DAC_SETTING_1_0_GET(x) (((x) & 0x0000003f) >> 0)
+#define PHY_BB_TPC_6_B0_PD_DAC_SETTING_1_0_SET(x) (((x) << 0) & 0x0000003f)
+#define PHY_BB_TPC_6_B0_PD_DAC_SETTING_2_0_MSB 11
+#define PHY_BB_TPC_6_B0_PD_DAC_SETTING_2_0_LSB 6
+#define PHY_BB_TPC_6_B0_PD_DAC_SETTING_2_0_MASK 0x00000fc0
+#define PHY_BB_TPC_6_B0_PD_DAC_SETTING_2_0_GET(x) (((x) & 0x00000fc0) >> 6)
+#define PHY_BB_TPC_6_B0_PD_DAC_SETTING_2_0_SET(x) (((x) << 6) & 0x00000fc0)
+#define PHY_BB_TPC_6_B0_PD_DAC_SETTING_3_0_MSB 17
+#define PHY_BB_TPC_6_B0_PD_DAC_SETTING_3_0_LSB 12
+#define PHY_BB_TPC_6_B0_PD_DAC_SETTING_3_0_MASK 0x0003f000
+#define PHY_BB_TPC_6_B0_PD_DAC_SETTING_3_0_GET(x) (((x) & 0x0003f000) >> 12)
+#define PHY_BB_TPC_6_B0_PD_DAC_SETTING_3_0_SET(x) (((x) << 12) & 0x0003f000)
+#define PHY_BB_TPC_6_B0_PD_DAC_SETTING_4_0_MSB 23
+#define PHY_BB_TPC_6_B0_PD_DAC_SETTING_4_0_LSB 18
+#define PHY_BB_TPC_6_B0_PD_DAC_SETTING_4_0_MASK 0x00fc0000
+#define PHY_BB_TPC_6_B0_PD_DAC_SETTING_4_0_GET(x) (((x) & 0x00fc0000) >> 18)
+#define PHY_BB_TPC_6_B0_PD_DAC_SETTING_4_0_SET(x) (((x) << 18) & 0x00fc0000)
+#define PHY_BB_TPC_6_B0_ERROR_EST_MODE_MSB 25
+#define PHY_BB_TPC_6_B0_ERROR_EST_MODE_LSB 24
+#define PHY_BB_TPC_6_B0_ERROR_EST_MODE_MASK 0x03000000
+#define PHY_BB_TPC_6_B0_ERROR_EST_MODE_GET(x) (((x) & 0x03000000) >> 24)
+#define PHY_BB_TPC_6_B0_ERROR_EST_MODE_SET(x) (((x) << 24) & 0x03000000)
+#define PHY_BB_TPC_6_B0_ERROR_EST_FILTER_COEFF_MSB 28
+#define PHY_BB_TPC_6_B0_ERROR_EST_FILTER_COEFF_LSB 26
+#define PHY_BB_TPC_6_B0_ERROR_EST_FILTER_COEFF_MASK 0x1c000000
+#define PHY_BB_TPC_6_B0_ERROR_EST_FILTER_COEFF_GET(x) (((x) & 0x1c000000) >> 26)
+#define PHY_BB_TPC_6_B0_ERROR_EST_FILTER_COEFF_SET(x) (((x) << 26) & 0x1c000000)
+
+/* macros for BB_tpc_7 */
+#define PHY_BB_TPC_7_ADDRESS 0x0000a274
+#define PHY_BB_TPC_7_OFFSET 0x0000a274
+#define PHY_BB_TPC_7_TX_GAIN_TABLE_MAX_MSB 5
+#define PHY_BB_TPC_7_TX_GAIN_TABLE_MAX_LSB 0
+#define PHY_BB_TPC_7_TX_GAIN_TABLE_MAX_MASK 0x0000003f
+#define PHY_BB_TPC_7_TX_GAIN_TABLE_MAX_GET(x) (((x) & 0x0000003f) >> 0)
+#define PHY_BB_TPC_7_TX_GAIN_TABLE_MAX_SET(x) (((x) << 0) & 0x0000003f)
+#define PHY_BB_TPC_7_INIT_TX_GAIN_SETTING_MSB 11
+#define PHY_BB_TPC_7_INIT_TX_GAIN_SETTING_LSB 6
+#define PHY_BB_TPC_7_INIT_TX_GAIN_SETTING_MASK 0x00000fc0
+#define PHY_BB_TPC_7_INIT_TX_GAIN_SETTING_GET(x) (((x) & 0x00000fc0) >> 6)
+#define PHY_BB_TPC_7_INIT_TX_GAIN_SETTING_SET(x) (((x) << 6) & 0x00000fc0)
+#define PHY_BB_TPC_7_EN_CL_GAIN_MOD_MSB 12
+#define PHY_BB_TPC_7_EN_CL_GAIN_MOD_LSB 12
+#define PHY_BB_TPC_7_EN_CL_GAIN_MOD_MASK 0x00001000
+#define PHY_BB_TPC_7_EN_CL_GAIN_MOD_GET(x) (((x) & 0x00001000) >> 12)
+#define PHY_BB_TPC_7_EN_CL_GAIN_MOD_SET(x) (((x) << 12) & 0x00001000)
+#define PHY_BB_TPC_7_USE_TX_PD_IN_XPA_MSB 13
+#define PHY_BB_TPC_7_USE_TX_PD_IN_XPA_LSB 13
+#define PHY_BB_TPC_7_USE_TX_PD_IN_XPA_MASK 0x00002000
+#define PHY_BB_TPC_7_USE_TX_PD_IN_XPA_GET(x) (((x) & 0x00002000) >> 13)
+#define PHY_BB_TPC_7_USE_TX_PD_IN_XPA_SET(x) (((x) << 13) & 0x00002000)
+#define PHY_BB_TPC_7_EXTEND_TX_FRAME_FOR_TPC_MSB 14
+#define PHY_BB_TPC_7_EXTEND_TX_FRAME_FOR_TPC_LSB 14
+#define PHY_BB_TPC_7_EXTEND_TX_FRAME_FOR_TPC_MASK 0x00004000
+#define PHY_BB_TPC_7_EXTEND_TX_FRAME_FOR_TPC_GET(x) (((x) & 0x00004000) >> 14)
+#define PHY_BB_TPC_7_EXTEND_TX_FRAME_FOR_TPC_SET(x) (((x) << 14) & 0x00004000)
+#define PHY_BB_TPC_7_USE_INIT_TX_GAIN_SETTING_AFTER_WARM_RESET_MSB 15
+#define PHY_BB_TPC_7_USE_INIT_TX_GAIN_SETTING_AFTER_WARM_RESET_LSB 15
+#define PHY_BB_TPC_7_USE_INIT_TX_GAIN_SETTING_AFTER_WARM_RESET_MASK 0x00008000
+#define PHY_BB_TPC_7_USE_INIT_TX_GAIN_SETTING_AFTER_WARM_RESET_GET(x) (((x) & 0x00008000) >> 15)
+#define PHY_BB_TPC_7_USE_INIT_TX_GAIN_SETTING_AFTER_WARM_RESET_SET(x) (((x) << 15) & 0x00008000)
+
+/* macros for BB_tpc_8 */
+#define PHY_BB_TPC_8_ADDRESS 0x0000a278
+#define PHY_BB_TPC_8_OFFSET 0x0000a278
+#define PHY_BB_TPC_8_DESIRED_SCALE_0_MSB 4
+#define PHY_BB_TPC_8_DESIRED_SCALE_0_LSB 0
+#define PHY_BB_TPC_8_DESIRED_SCALE_0_MASK 0x0000001f
+#define PHY_BB_TPC_8_DESIRED_SCALE_0_GET(x) (((x) & 0x0000001f) >> 0)
+#define PHY_BB_TPC_8_DESIRED_SCALE_0_SET(x) (((x) << 0) & 0x0000001f)
+#define PHY_BB_TPC_8_DESIRED_SCALE_1_MSB 9
+#define PHY_BB_TPC_8_DESIRED_SCALE_1_LSB 5
+#define PHY_BB_TPC_8_DESIRED_SCALE_1_MASK 0x000003e0
+#define PHY_BB_TPC_8_DESIRED_SCALE_1_GET(x) (((x) & 0x000003e0) >> 5)
+#define PHY_BB_TPC_8_DESIRED_SCALE_1_SET(x) (((x) << 5) & 0x000003e0)
+#define PHY_BB_TPC_8_DESIRED_SCALE_2_MSB 14
+#define PHY_BB_TPC_8_DESIRED_SCALE_2_LSB 10
+#define PHY_BB_TPC_8_DESIRED_SCALE_2_MASK 0x00007c00
+#define PHY_BB_TPC_8_DESIRED_SCALE_2_GET(x) (((x) & 0x00007c00) >> 10)
+#define PHY_BB_TPC_8_DESIRED_SCALE_2_SET(x) (((x) << 10) & 0x00007c00)
+#define PHY_BB_TPC_8_DESIRED_SCALE_3_MSB 19
+#define PHY_BB_TPC_8_DESIRED_SCALE_3_LSB 15
+#define PHY_BB_TPC_8_DESIRED_SCALE_3_MASK 0x000f8000
+#define PHY_BB_TPC_8_DESIRED_SCALE_3_GET(x) (((x) & 0x000f8000) >> 15)
+#define PHY_BB_TPC_8_DESIRED_SCALE_3_SET(x) (((x) << 15) & 0x000f8000)
+#define PHY_BB_TPC_8_DESIRED_SCALE_4_MSB 24
+#define PHY_BB_TPC_8_DESIRED_SCALE_4_LSB 20
+#define PHY_BB_TPC_8_DESIRED_SCALE_4_MASK 0x01f00000
+#define PHY_BB_TPC_8_DESIRED_SCALE_4_GET(x) (((x) & 0x01f00000) >> 20)
+#define PHY_BB_TPC_8_DESIRED_SCALE_4_SET(x) (((x) << 20) & 0x01f00000)
+#define PHY_BB_TPC_8_DESIRED_SCALE_5_MSB 29
+#define PHY_BB_TPC_8_DESIRED_SCALE_5_LSB 25
+#define PHY_BB_TPC_8_DESIRED_SCALE_5_MASK 0x3e000000
+#define PHY_BB_TPC_8_DESIRED_SCALE_5_GET(x) (((x) & 0x3e000000) >> 25)
+#define PHY_BB_TPC_8_DESIRED_SCALE_5_SET(x) (((x) << 25) & 0x3e000000)
+
+/* macros for BB_tpc_9 */
+#define PHY_BB_TPC_9_ADDRESS 0x0000a27c
+#define PHY_BB_TPC_9_OFFSET 0x0000a27c
+#define PHY_BB_TPC_9_DESIRED_SCALE_6_MSB 4
+#define PHY_BB_TPC_9_DESIRED_SCALE_6_LSB 0
+#define PHY_BB_TPC_9_DESIRED_SCALE_6_MASK 0x0000001f
+#define PHY_BB_TPC_9_DESIRED_SCALE_6_GET(x) (((x) & 0x0000001f) >> 0)
+#define PHY_BB_TPC_9_DESIRED_SCALE_6_SET(x) (((x) << 0) & 0x0000001f)
+#define PHY_BB_TPC_9_DESIRED_SCALE_7_MSB 9
+#define PHY_BB_TPC_9_DESIRED_SCALE_7_LSB 5
+#define PHY_BB_TPC_9_DESIRED_SCALE_7_MASK 0x000003e0
+#define PHY_BB_TPC_9_DESIRED_SCALE_7_GET(x) (((x) & 0x000003e0) >> 5)
+#define PHY_BB_TPC_9_DESIRED_SCALE_7_SET(x) (((x) << 5) & 0x000003e0)
+#define PHY_BB_TPC_9_DESIRED_SCALE_CCK_MSB 14
+#define PHY_BB_TPC_9_DESIRED_SCALE_CCK_LSB 10
+#define PHY_BB_TPC_9_DESIRED_SCALE_CCK_MASK 0x00007c00
+#define PHY_BB_TPC_9_DESIRED_SCALE_CCK_GET(x) (((x) & 0x00007c00) >> 10)
+#define PHY_BB_TPC_9_DESIRED_SCALE_CCK_SET(x) (((x) << 10) & 0x00007c00)
+#define PHY_BB_TPC_9_EN_PD_DC_OFFSET_THR_MSB 20
+#define PHY_BB_TPC_9_EN_PD_DC_OFFSET_THR_LSB 20
+#define PHY_BB_TPC_9_EN_PD_DC_OFFSET_THR_MASK 0x00100000
+#define PHY_BB_TPC_9_EN_PD_DC_OFFSET_THR_GET(x) (((x) & 0x00100000) >> 20)
+#define PHY_BB_TPC_9_EN_PD_DC_OFFSET_THR_SET(x) (((x) << 20) & 0x00100000)
+#define PHY_BB_TPC_9_PD_DC_OFFSET_THR_MSB 26
+#define PHY_BB_TPC_9_PD_DC_OFFSET_THR_LSB 21
+#define PHY_BB_TPC_9_PD_DC_OFFSET_THR_MASK 0x07e00000
+#define PHY_BB_TPC_9_PD_DC_OFFSET_THR_GET(x) (((x) & 0x07e00000) >> 21)
+#define PHY_BB_TPC_9_PD_DC_OFFSET_THR_SET(x) (((x) << 21) & 0x07e00000)
+#define PHY_BB_TPC_9_WAIT_CALTX_SETTLE_MSB 30
+#define PHY_BB_TPC_9_WAIT_CALTX_SETTLE_LSB 27
+#define PHY_BB_TPC_9_WAIT_CALTX_SETTLE_MASK 0x78000000
+#define PHY_BB_TPC_9_WAIT_CALTX_SETTLE_GET(x) (((x) & 0x78000000) >> 27)
+#define PHY_BB_TPC_9_WAIT_CALTX_SETTLE_SET(x) (((x) << 27) & 0x78000000)
+#define PHY_BB_TPC_9_DISABLE_PDADC_RESIDUAL_DC_REMOVAL_MSB 31
+#define PHY_BB_TPC_9_DISABLE_PDADC_RESIDUAL_DC_REMOVAL_LSB 31
+#define PHY_BB_TPC_9_DISABLE_PDADC_RESIDUAL_DC_REMOVAL_MASK 0x80000000
+#define PHY_BB_TPC_9_DISABLE_PDADC_RESIDUAL_DC_REMOVAL_GET(x) (((x) & 0x80000000) >> 31)
+#define PHY_BB_TPC_9_DISABLE_PDADC_RESIDUAL_DC_REMOVAL_SET(x) (((x) << 31) & 0x80000000)
+
+/* macros for BB_pdadc_tab_b0 */
+#define PHY_BB_PDADC_TAB_B0_ADDRESS 0x0000a280
+#define PHY_BB_PDADC_TAB_B0_OFFSET 0x0000a280
+#define PHY_BB_PDADC_TAB_B0_TAB_ENTRY_MSB 31
+#define PHY_BB_PDADC_TAB_B0_TAB_ENTRY_LSB 0
+#define PHY_BB_PDADC_TAB_B0_TAB_ENTRY_MASK 0xffffffff
+#define PHY_BB_PDADC_TAB_B0_TAB_ENTRY_SET(x) (((x) << 0) & 0xffffffff)
+
+/* macros for BB_cl_tab_b0 */
+#define PHY_BB_CL_TAB_B0_ADDRESS 0x0000a300
+#define PHY_BB_CL_TAB_B0_OFFSET 0x0000a300
+#define PHY_BB_CL_TAB_B0_CL_GAIN_MOD_MSB 4
+#define PHY_BB_CL_TAB_B0_CL_GAIN_MOD_LSB 0
+#define PHY_BB_CL_TAB_B0_CL_GAIN_MOD_MASK 0x0000001f
+#define PHY_BB_CL_TAB_B0_CL_GAIN_MOD_GET(x) (((x) & 0x0000001f) >> 0)
+#define PHY_BB_CL_TAB_B0_CL_GAIN_MOD_SET(x) (((x) << 0) & 0x0000001f)
+#define PHY_BB_CL_TAB_B0_CARR_LK_DC_ADD_Q_MSB 15
+#define PHY_BB_CL_TAB_B0_CARR_LK_DC_ADD_Q_LSB 5
+#define PHY_BB_CL_TAB_B0_CARR_LK_DC_ADD_Q_MASK 0x0000ffe0
+#define PHY_BB_CL_TAB_B0_CARR_LK_DC_ADD_Q_GET(x) (((x) & 0x0000ffe0) >> 5)
+#define PHY_BB_CL_TAB_B0_CARR_LK_DC_ADD_Q_SET(x) (((x) << 5) & 0x0000ffe0)
+#define PHY_BB_CL_TAB_B0_CARR_LK_DC_ADD_I_MSB 26
+#define PHY_BB_CL_TAB_B0_CARR_LK_DC_ADD_I_LSB 16
+#define PHY_BB_CL_TAB_B0_CARR_LK_DC_ADD_I_MASK 0x07ff0000
+#define PHY_BB_CL_TAB_B0_CARR_LK_DC_ADD_I_GET(x) (((x) & 0x07ff0000) >> 16)
+#define PHY_BB_CL_TAB_B0_CARR_LK_DC_ADD_I_SET(x) (((x) << 16) & 0x07ff0000)
+#define PHY_BB_CL_TAB_B0_BB_GAIN_MSB 30
+#define PHY_BB_CL_TAB_B0_BB_GAIN_LSB 27
+#define PHY_BB_CL_TAB_B0_BB_GAIN_MASK 0x78000000
+#define PHY_BB_CL_TAB_B0_BB_GAIN_GET(x) (((x) & 0x78000000) >> 27)
+#define PHY_BB_CL_TAB_B0_BB_GAIN_SET(x) (((x) << 27) & 0x78000000)
+
+/* macros for BB_cl_map_0_b0 */
+#define PHY_BB_CL_MAP_0_B0_ADDRESS 0x0000a340
+#define PHY_BB_CL_MAP_0_B0_OFFSET 0x0000a340
+#define PHY_BB_CL_MAP_0_B0_CL_MAP_0_MSB 31
+#define PHY_BB_CL_MAP_0_B0_CL_MAP_0_LSB 0
+#define PHY_BB_CL_MAP_0_B0_CL_MAP_0_MASK 0xffffffff
+#define PHY_BB_CL_MAP_0_B0_CL_MAP_0_GET(x) (((x) & 0xffffffff) >> 0)
+#define PHY_BB_CL_MAP_0_B0_CL_MAP_0_SET(x) (((x) << 0) & 0xffffffff)
+
+/* macros for BB_cl_map_1_b0 */
+#define PHY_BB_CL_MAP_1_B0_ADDRESS 0x0000a344
+#define PHY_BB_CL_MAP_1_B0_OFFSET 0x0000a344
+#define PHY_BB_CL_MAP_1_B0_CL_MAP_1_MSB 31
+#define PHY_BB_CL_MAP_1_B0_CL_MAP_1_LSB 0
+#define PHY_BB_CL_MAP_1_B0_CL_MAP_1_MASK 0xffffffff
+#define PHY_BB_CL_MAP_1_B0_CL_MAP_1_GET(x) (((x) & 0xffffffff) >> 0)
+#define PHY_BB_CL_MAP_1_B0_CL_MAP_1_SET(x) (((x) << 0) & 0xffffffff)
+
+/* macros for BB_cl_map_2_b0 */
+#define PHY_BB_CL_MAP_2_B0_ADDRESS 0x0000a348
+#define PHY_BB_CL_MAP_2_B0_OFFSET 0x0000a348
+#define PHY_BB_CL_MAP_2_B0_CL_MAP_2_MSB 31
+#define PHY_BB_CL_MAP_2_B0_CL_MAP_2_LSB 0
+#define PHY_BB_CL_MAP_2_B0_CL_MAP_2_MASK 0xffffffff
+#define PHY_BB_CL_MAP_2_B0_CL_MAP_2_GET(x) (((x) & 0xffffffff) >> 0)
+#define PHY_BB_CL_MAP_2_B0_CL_MAP_2_SET(x) (((x) << 0) & 0xffffffff)
+
+/* macros for BB_cl_map_3_b0 */
+#define PHY_BB_CL_MAP_3_B0_ADDRESS 0x0000a34c
+#define PHY_BB_CL_MAP_3_B0_OFFSET 0x0000a34c
+#define PHY_BB_CL_MAP_3_B0_CL_MAP_3_MSB 31
+#define PHY_BB_CL_MAP_3_B0_CL_MAP_3_LSB 0
+#define PHY_BB_CL_MAP_3_B0_CL_MAP_3_MASK 0xffffffff
+#define PHY_BB_CL_MAP_3_B0_CL_MAP_3_GET(x) (((x) & 0xffffffff) >> 0)
+#define PHY_BB_CL_MAP_3_B0_CL_MAP_3_SET(x) (((x) << 0) & 0xffffffff)
+
+/* macros for BB_cl_cal_ctrl */
+#define PHY_BB_CL_CAL_CTRL_ADDRESS 0x0000a358
+#define PHY_BB_CL_CAL_CTRL_OFFSET 0x0000a358
+#define PHY_BB_CL_CAL_CTRL_ENABLE_PARALLEL_CAL_MSB 0
+#define PHY_BB_CL_CAL_CTRL_ENABLE_PARALLEL_CAL_LSB 0
+#define PHY_BB_CL_CAL_CTRL_ENABLE_PARALLEL_CAL_MASK 0x00000001
+#define PHY_BB_CL_CAL_CTRL_ENABLE_PARALLEL_CAL_GET(x) (((x) & 0x00000001) >> 0)
+#define PHY_BB_CL_CAL_CTRL_ENABLE_PARALLEL_CAL_SET(x) (((x) << 0) & 0x00000001)
+#define PHY_BB_CL_CAL_CTRL_ENABLE_CL_CALIBRATE_MSB 1
+#define PHY_BB_CL_CAL_CTRL_ENABLE_CL_CALIBRATE_LSB 1
+#define PHY_BB_CL_CAL_CTRL_ENABLE_CL_CALIBRATE_MASK 0x00000002
+#define PHY_BB_CL_CAL_CTRL_ENABLE_CL_CALIBRATE_GET(x) (((x) & 0x00000002) >> 1)
+#define PHY_BB_CL_CAL_CTRL_ENABLE_CL_CALIBRATE_SET(x) (((x) << 1) & 0x00000002)
+#define PHY_BB_CL_CAL_CTRL_CF_CLC_TEST_POINT_MSB 3
+#define PHY_BB_CL_CAL_CTRL_CF_CLC_TEST_POINT_LSB 2
+#define PHY_BB_CL_CAL_CTRL_CF_CLC_TEST_POINT_MASK 0x0000000c
+#define PHY_BB_CL_CAL_CTRL_CF_CLC_TEST_POINT_GET(x) (((x) & 0x0000000c) >> 2)
+#define PHY_BB_CL_CAL_CTRL_CF_CLC_TEST_POINT_SET(x) (((x) << 2) & 0x0000000c)
+#define PHY_BB_CL_CAL_CTRL_CF_CLC_FORCED_PAGAIN_MSB 7
+#define PHY_BB_CL_CAL_CTRL_CF_CLC_FORCED_PAGAIN_LSB 4
+#define PHY_BB_CL_CAL_CTRL_CF_CLC_FORCED_PAGAIN_MASK 0x000000f0
+#define PHY_BB_CL_CAL_CTRL_CF_CLC_FORCED_PAGAIN_GET(x) (((x) & 0x000000f0) >> 4)
+#define PHY_BB_CL_CAL_CTRL_CF_CLC_FORCED_PAGAIN_SET(x) (((x) << 4) & 0x000000f0)
+#define PHY_BB_CL_CAL_CTRL_CARR_LEAK_MAX_OFFSET_MSB 15
+#define PHY_BB_CL_CAL_CTRL_CARR_LEAK_MAX_OFFSET_LSB 8
+#define PHY_BB_CL_CAL_CTRL_CARR_LEAK_MAX_OFFSET_MASK 0x0000ff00
+#define PHY_BB_CL_CAL_CTRL_CARR_LEAK_MAX_OFFSET_GET(x) (((x) & 0x0000ff00) >> 8)
+#define PHY_BB_CL_CAL_CTRL_CARR_LEAK_MAX_OFFSET_SET(x) (((x) << 8) & 0x0000ff00)
+#define PHY_BB_CL_CAL_CTRL_CF_CLC_INIT_BBGAIN_MSB 21
+#define PHY_BB_CL_CAL_CTRL_CF_CLC_INIT_BBGAIN_LSB 16
+#define PHY_BB_CL_CAL_CTRL_CF_CLC_INIT_BBGAIN_MASK 0x003f0000
+#define PHY_BB_CL_CAL_CTRL_CF_CLC_INIT_BBGAIN_GET(x) (((x) & 0x003f0000) >> 16)
+#define PHY_BB_CL_CAL_CTRL_CF_CLC_INIT_BBGAIN_SET(x) (((x) << 16) & 0x003f0000)
+#define PHY_BB_CL_CAL_CTRL_CF_ADC_BOUND_MSB 29
+#define PHY_BB_CL_CAL_CTRL_CF_ADC_BOUND_LSB 22
+#define PHY_BB_CL_CAL_CTRL_CF_ADC_BOUND_MASK 0x3fc00000
+#define PHY_BB_CL_CAL_CTRL_CF_ADC_BOUND_GET(x) (((x) & 0x3fc00000) >> 22)
+#define PHY_BB_CL_CAL_CTRL_CF_ADC_BOUND_SET(x) (((x) << 22) & 0x3fc00000)
+#define PHY_BB_CL_CAL_CTRL_USE_DAC_CL_CORRECTION_MSB 30
+#define PHY_BB_CL_CAL_CTRL_USE_DAC_CL_CORRECTION_LSB 30
+#define PHY_BB_CL_CAL_CTRL_USE_DAC_CL_CORRECTION_MASK 0x40000000
+#define PHY_BB_CL_CAL_CTRL_USE_DAC_CL_CORRECTION_GET(x) (((x) & 0x40000000) >> 30)
+#define PHY_BB_CL_CAL_CTRL_USE_DAC_CL_CORRECTION_SET(x) (((x) << 30) & 0x40000000)
+#define PHY_BB_CL_CAL_CTRL_CL_MAP_HW_GEN_MSB 31
+#define PHY_BB_CL_CAL_CTRL_CL_MAP_HW_GEN_LSB 31
+#define PHY_BB_CL_CAL_CTRL_CL_MAP_HW_GEN_MASK 0x80000000
+#define PHY_BB_CL_CAL_CTRL_CL_MAP_HW_GEN_GET(x) (((x) & 0x80000000) >> 31)
+#define PHY_BB_CL_CAL_CTRL_CL_MAP_HW_GEN_SET(x) (((x) << 31) & 0x80000000)
+
+/* macros for BB_cl_map_pal_0_b0 */
+#define PHY_BB_CL_MAP_PAL_0_B0_ADDRESS 0x0000a35c
+#define PHY_BB_CL_MAP_PAL_0_B0_OFFSET 0x0000a35c
+#define PHY_BB_CL_MAP_PAL_0_B0_CL_MAP_0_MSB 31
+#define PHY_BB_CL_MAP_PAL_0_B0_CL_MAP_0_LSB 0
+#define PHY_BB_CL_MAP_PAL_0_B0_CL_MAP_0_MASK 0xffffffff
+#define PHY_BB_CL_MAP_PAL_0_B0_CL_MAP_0_GET(x) (((x) & 0xffffffff) >> 0)
+#define PHY_BB_CL_MAP_PAL_0_B0_CL_MAP_0_SET(x) (((x) << 0) & 0xffffffff)
+
+/* macros for BB_cl_map_pal_1_b0 */
+#define PHY_BB_CL_MAP_PAL_1_B0_ADDRESS 0x0000a360
+#define PHY_BB_CL_MAP_PAL_1_B0_OFFSET 0x0000a360
+#define PHY_BB_CL_MAP_PAL_1_B0_CL_MAP_1_MSB 31
+#define PHY_BB_CL_MAP_PAL_1_B0_CL_MAP_1_LSB 0
+#define PHY_BB_CL_MAP_PAL_1_B0_CL_MAP_1_MASK 0xffffffff
+#define PHY_BB_CL_MAP_PAL_1_B0_CL_MAP_1_GET(x) (((x) & 0xffffffff) >> 0)
+#define PHY_BB_CL_MAP_PAL_1_B0_CL_MAP_1_SET(x) (((x) << 0) & 0xffffffff)
+
+/* macros for BB_cl_map_pal_2_b0 */
+#define PHY_BB_CL_MAP_PAL_2_B0_ADDRESS 0x0000a364
+#define PHY_BB_CL_MAP_PAL_2_B0_OFFSET 0x0000a364
+#define PHY_BB_CL_MAP_PAL_2_B0_CL_MAP_2_MSB 31
+#define PHY_BB_CL_MAP_PAL_2_B0_CL_MAP_2_LSB 0
+#define PHY_BB_CL_MAP_PAL_2_B0_CL_MAP_2_MASK 0xffffffff
+#define PHY_BB_CL_MAP_PAL_2_B0_CL_MAP_2_GET(x) (((x) & 0xffffffff) >> 0)
+#define PHY_BB_CL_MAP_PAL_2_B0_CL_MAP_2_SET(x) (((x) << 0) & 0xffffffff)
+
+/* macros for BB_cl_map_pal_3_b0 */
+#define PHY_BB_CL_MAP_PAL_3_B0_ADDRESS 0x0000a368
+#define PHY_BB_CL_MAP_PAL_3_B0_OFFSET 0x0000a368
+#define PHY_BB_CL_MAP_PAL_3_B0_CL_MAP_3_MSB 31
+#define PHY_BB_CL_MAP_PAL_3_B0_CL_MAP_3_LSB 0
+#define PHY_BB_CL_MAP_PAL_3_B0_CL_MAP_3_MASK 0xffffffff
+#define PHY_BB_CL_MAP_PAL_3_B0_CL_MAP_3_GET(x) (((x) & 0xffffffff) >> 0)
+#define PHY_BB_CL_MAP_PAL_3_B0_CL_MAP_3_SET(x) (((x) << 0) & 0xffffffff)
+
+/* macros for BB_rifs */
+#define PHY_BB_RIFS_ADDRESS 0x0000a388
+#define PHY_BB_RIFS_OFFSET 0x0000a388
+#define PHY_BB_RIFS_DISABLE_FCC_FIX_MSB 25
+#define PHY_BB_RIFS_DISABLE_FCC_FIX_LSB 25
+#define PHY_BB_RIFS_DISABLE_FCC_FIX_MASK 0x02000000
+#define PHY_BB_RIFS_DISABLE_FCC_FIX_GET(x) (((x) & 0x02000000) >> 25)
+#define PHY_BB_RIFS_DISABLE_FCC_FIX_SET(x) (((x) << 25) & 0x02000000)
+#define PHY_BB_RIFS_ENABLE_RESET_TDOMAIN_MSB 26
+#define PHY_BB_RIFS_ENABLE_RESET_TDOMAIN_LSB 26
+#define PHY_BB_RIFS_ENABLE_RESET_TDOMAIN_MASK 0x04000000
+#define PHY_BB_RIFS_ENABLE_RESET_TDOMAIN_GET(x) (((x) & 0x04000000) >> 26)
+#define PHY_BB_RIFS_ENABLE_RESET_TDOMAIN_SET(x) (((x) << 26) & 0x04000000)
+#define PHY_BB_RIFS_DISABLE_FCC_FIX2_MSB 27
+#define PHY_BB_RIFS_DISABLE_FCC_FIX2_LSB 27
+#define PHY_BB_RIFS_DISABLE_FCC_FIX2_MASK 0x08000000
+#define PHY_BB_RIFS_DISABLE_FCC_FIX2_GET(x) (((x) & 0x08000000) >> 27)
+#define PHY_BB_RIFS_DISABLE_FCC_FIX2_SET(x) (((x) << 27) & 0x08000000)
+#define PHY_BB_RIFS_DISABLE_RIFS_CCK_FIX_MSB 28
+#define PHY_BB_RIFS_DISABLE_RIFS_CCK_FIX_LSB 28
+#define PHY_BB_RIFS_DISABLE_RIFS_CCK_FIX_MASK 0x10000000
+#define PHY_BB_RIFS_DISABLE_RIFS_CCK_FIX_GET(x) (((x) & 0x10000000) >> 28)
+#define PHY_BB_RIFS_DISABLE_RIFS_CCK_FIX_SET(x) (((x) << 28) & 0x10000000)
+#define PHY_BB_RIFS_DISABLE_ERROR_RESET_FIX_MSB 29
+#define PHY_BB_RIFS_DISABLE_ERROR_RESET_FIX_LSB 29
+#define PHY_BB_RIFS_DISABLE_ERROR_RESET_FIX_MASK 0x20000000
+#define PHY_BB_RIFS_DISABLE_ERROR_RESET_FIX_GET(x) (((x) & 0x20000000) >> 29)
+#define PHY_BB_RIFS_DISABLE_ERROR_RESET_FIX_SET(x) (((x) << 29) & 0x20000000)
+#define PHY_BB_RIFS_RADAR_USE_FDOMAIN_RESET_MSB 30
+#define PHY_BB_RIFS_RADAR_USE_FDOMAIN_RESET_LSB 30
+#define PHY_BB_RIFS_RADAR_USE_FDOMAIN_RESET_MASK 0x40000000
+#define PHY_BB_RIFS_RADAR_USE_FDOMAIN_RESET_GET(x) (((x) & 0x40000000) >> 30)
+#define PHY_BB_RIFS_RADAR_USE_FDOMAIN_RESET_SET(x) (((x) << 30) & 0x40000000)
+
+/* macros for BB_powertx_rate5 */
+#define PHY_BB_POWERTX_RATE5_ADDRESS 0x0000a38c
+#define PHY_BB_POWERTX_RATE5_OFFSET 0x0000a38c
+#define PHY_BB_POWERTX_RATE5_POWERTXHT20_0_MSB 5
+#define PHY_BB_POWERTX_RATE5_POWERTXHT20_0_LSB 0
+#define PHY_BB_POWERTX_RATE5_POWERTXHT20_0_MASK 0x0000003f
+#define PHY_BB_POWERTX_RATE5_POWERTXHT20_0_GET(x) (((x) & 0x0000003f) >> 0)
+#define PHY_BB_POWERTX_RATE5_POWERTXHT20_0_SET(x) (((x) << 0) & 0x0000003f)
+#define PHY_BB_POWERTX_RATE5_POWERTXHT20_1_MSB 13
+#define PHY_BB_POWERTX_RATE5_POWERTXHT20_1_LSB 8
+#define PHY_BB_POWERTX_RATE5_POWERTXHT20_1_MASK 0x00003f00
+#define PHY_BB_POWERTX_RATE5_POWERTXHT20_1_GET(x) (((x) & 0x00003f00) >> 8)
+#define PHY_BB_POWERTX_RATE5_POWERTXHT20_1_SET(x) (((x) << 8) & 0x00003f00)
+#define PHY_BB_POWERTX_RATE5_POWERTXHT20_2_MSB 21
+#define PHY_BB_POWERTX_RATE5_POWERTXHT20_2_LSB 16
+#define PHY_BB_POWERTX_RATE5_POWERTXHT20_2_MASK 0x003f0000
+#define PHY_BB_POWERTX_RATE5_POWERTXHT20_2_GET(x) (((x) & 0x003f0000) >> 16)
+#define PHY_BB_POWERTX_RATE5_POWERTXHT20_2_SET(x) (((x) << 16) & 0x003f0000)
+#define PHY_BB_POWERTX_RATE5_POWERTXHT20_3_MSB 29
+#define PHY_BB_POWERTX_RATE5_POWERTXHT20_3_LSB 24
+#define PHY_BB_POWERTX_RATE5_POWERTXHT20_3_MASK 0x3f000000
+#define PHY_BB_POWERTX_RATE5_POWERTXHT20_3_GET(x) (((x) & 0x3f000000) >> 24)
+#define PHY_BB_POWERTX_RATE5_POWERTXHT20_3_SET(x) (((x) << 24) & 0x3f000000)
+
+/* macros for BB_powertx_rate6 */
+#define PHY_BB_POWERTX_RATE6_ADDRESS 0x0000a390
+#define PHY_BB_POWERTX_RATE6_OFFSET 0x0000a390
+#define PHY_BB_POWERTX_RATE6_POWERTXHT20_4_MSB 5
+#define PHY_BB_POWERTX_RATE6_POWERTXHT20_4_LSB 0
+#define PHY_BB_POWERTX_RATE6_POWERTXHT20_4_MASK 0x0000003f
+#define PHY_BB_POWERTX_RATE6_POWERTXHT20_4_GET(x) (((x) & 0x0000003f) >> 0)
+#define PHY_BB_POWERTX_RATE6_POWERTXHT20_4_SET(x) (((x) << 0) & 0x0000003f)
+#define PHY_BB_POWERTX_RATE6_POWERTXHT20_5_MSB 13
+#define PHY_BB_POWERTX_RATE6_POWERTXHT20_5_LSB 8
+#define PHY_BB_POWERTX_RATE6_POWERTXHT20_5_MASK 0x00003f00
+#define PHY_BB_POWERTX_RATE6_POWERTXHT20_5_GET(x) (((x) & 0x00003f00) >> 8)
+#define PHY_BB_POWERTX_RATE6_POWERTXHT20_5_SET(x) (((x) << 8) & 0x00003f00)
+#define PHY_BB_POWERTX_RATE6_POWERTXHT20_6_MSB 21
+#define PHY_BB_POWERTX_RATE6_POWERTXHT20_6_LSB 16
+#define PHY_BB_POWERTX_RATE6_POWERTXHT20_6_MASK 0x003f0000
+#define PHY_BB_POWERTX_RATE6_POWERTXHT20_6_GET(x) (((x) & 0x003f0000) >> 16)
+#define PHY_BB_POWERTX_RATE6_POWERTXHT20_6_SET(x) (((x) << 16) & 0x003f0000)
+#define PHY_BB_POWERTX_RATE6_POWERTXHT20_7_MSB 29
+#define PHY_BB_POWERTX_RATE6_POWERTXHT20_7_LSB 24
+#define PHY_BB_POWERTX_RATE6_POWERTXHT20_7_MASK 0x3f000000
+#define PHY_BB_POWERTX_RATE6_POWERTXHT20_7_GET(x) (((x) & 0x3f000000) >> 24)
+#define PHY_BB_POWERTX_RATE6_POWERTXHT20_7_SET(x) (((x) << 24) & 0x3f000000)
+
+/* macros for BB_tpc_10 */
+#define PHY_BB_TPC_10_ADDRESS 0x0000a394
+#define PHY_BB_TPC_10_OFFSET 0x0000a394
+#define PHY_BB_TPC_10_DESIRED_SCALE_HT20_0_MSB 4
+#define PHY_BB_TPC_10_DESIRED_SCALE_HT20_0_LSB 0
+#define PHY_BB_TPC_10_DESIRED_SCALE_HT20_0_MASK 0x0000001f
+#define PHY_BB_TPC_10_DESIRED_SCALE_HT20_0_GET(x) (((x) & 0x0000001f) >> 0)
+#define PHY_BB_TPC_10_DESIRED_SCALE_HT20_0_SET(x) (((x) << 0) & 0x0000001f)
+#define PHY_BB_TPC_10_DESIRED_SCALE_HT20_1_MSB 9
+#define PHY_BB_TPC_10_DESIRED_SCALE_HT20_1_LSB 5
+#define PHY_BB_TPC_10_DESIRED_SCALE_HT20_1_MASK 0x000003e0
+#define PHY_BB_TPC_10_DESIRED_SCALE_HT20_1_GET(x) (((x) & 0x000003e0) >> 5)
+#define PHY_BB_TPC_10_DESIRED_SCALE_HT20_1_SET(x) (((x) << 5) & 0x000003e0)
+#define PHY_BB_TPC_10_DESIRED_SCALE_HT20_2_MSB 14
+#define PHY_BB_TPC_10_DESIRED_SCALE_HT20_2_LSB 10
+#define PHY_BB_TPC_10_DESIRED_SCALE_HT20_2_MASK 0x00007c00
+#define PHY_BB_TPC_10_DESIRED_SCALE_HT20_2_GET(x) (((x) & 0x00007c00) >> 10)
+#define PHY_BB_TPC_10_DESIRED_SCALE_HT20_2_SET(x) (((x) << 10) & 0x00007c00)
+#define PHY_BB_TPC_10_DESIRED_SCALE_HT20_3_MSB 19
+#define PHY_BB_TPC_10_DESIRED_SCALE_HT20_3_LSB 15
+#define PHY_BB_TPC_10_DESIRED_SCALE_HT20_3_MASK 0x000f8000
+#define PHY_BB_TPC_10_DESIRED_SCALE_HT20_3_GET(x) (((x) & 0x000f8000) >> 15)
+#define PHY_BB_TPC_10_DESIRED_SCALE_HT20_3_SET(x) (((x) << 15) & 0x000f8000)
+#define PHY_BB_TPC_10_DESIRED_SCALE_HT20_4_MSB 24
+#define PHY_BB_TPC_10_DESIRED_SCALE_HT20_4_LSB 20
+#define PHY_BB_TPC_10_DESIRED_SCALE_HT20_4_MASK 0x01f00000
+#define PHY_BB_TPC_10_DESIRED_SCALE_HT20_4_GET(x) (((x) & 0x01f00000) >> 20)
+#define PHY_BB_TPC_10_DESIRED_SCALE_HT20_4_SET(x) (((x) << 20) & 0x01f00000)
+#define PHY_BB_TPC_10_DESIRED_SCALE_HT20_5_MSB 29
+#define PHY_BB_TPC_10_DESIRED_SCALE_HT20_5_LSB 25
+#define PHY_BB_TPC_10_DESIRED_SCALE_HT20_5_MASK 0x3e000000
+#define PHY_BB_TPC_10_DESIRED_SCALE_HT20_5_GET(x) (((x) & 0x3e000000) >> 25)
+#define PHY_BB_TPC_10_DESIRED_SCALE_HT20_5_SET(x) (((x) << 25) & 0x3e000000)
+
+/* macros for BB_tpc_11_b0 */
+#define PHY_BB_TPC_11_B0_ADDRESS 0x0000a398
+#define PHY_BB_TPC_11_B0_OFFSET 0x0000a398
+#define PHY_BB_TPC_11_B0_DESIRED_SCALE_HT20_6_MSB 4
+#define PHY_BB_TPC_11_B0_DESIRED_SCALE_HT20_6_LSB 0
+#define PHY_BB_TPC_11_B0_DESIRED_SCALE_HT20_6_MASK 0x0000001f
+#define PHY_BB_TPC_11_B0_DESIRED_SCALE_HT20_6_GET(x) (((x) & 0x0000001f) >> 0)
+#define PHY_BB_TPC_11_B0_DESIRED_SCALE_HT20_6_SET(x) (((x) << 0) & 0x0000001f)
+#define PHY_BB_TPC_11_B0_DESIRED_SCALE_HT20_7_MSB 9
+#define PHY_BB_TPC_11_B0_DESIRED_SCALE_HT20_7_LSB 5
+#define PHY_BB_TPC_11_B0_DESIRED_SCALE_HT20_7_MASK 0x000003e0
+#define PHY_BB_TPC_11_B0_DESIRED_SCALE_HT20_7_GET(x) (((x) & 0x000003e0) >> 5)
+#define PHY_BB_TPC_11_B0_DESIRED_SCALE_HT20_7_SET(x) (((x) << 5) & 0x000003e0)
+#define PHY_BB_TPC_11_B0_OLPC_GAIN_DELTA_0_MSB 23
+#define PHY_BB_TPC_11_B0_OLPC_GAIN_DELTA_0_LSB 16
+#define PHY_BB_TPC_11_B0_OLPC_GAIN_DELTA_0_MASK 0x00ff0000
+#define PHY_BB_TPC_11_B0_OLPC_GAIN_DELTA_0_GET(x) (((x) & 0x00ff0000) >> 16)
+#define PHY_BB_TPC_11_B0_OLPC_GAIN_DELTA_0_SET(x) (((x) << 16) & 0x00ff0000)
+#define PHY_BB_TPC_11_B0_OLPC_GAIN_DELTA_0_PAL_ON_MSB 31
+#define PHY_BB_TPC_11_B0_OLPC_GAIN_DELTA_0_PAL_ON_LSB 24
+#define PHY_BB_TPC_11_B0_OLPC_GAIN_DELTA_0_PAL_ON_MASK 0xff000000
+#define PHY_BB_TPC_11_B0_OLPC_GAIN_DELTA_0_PAL_ON_GET(x) (((x) & 0xff000000) >> 24)
+#define PHY_BB_TPC_11_B0_OLPC_GAIN_DELTA_0_PAL_ON_SET(x) (((x) << 24) & 0xff000000)
+
+/* macros for BB_cal_chain_mask */
+#define PHY_BB_CAL_CHAIN_MASK_ADDRESS 0x0000a39c
+#define PHY_BB_CAL_CHAIN_MASK_OFFSET 0x0000a39c
+#define PHY_BB_CAL_CHAIN_MASK_CAL_CHAIN_MASK_MSB 2
+#define PHY_BB_CAL_CHAIN_MASK_CAL_CHAIN_MASK_LSB 0
+#define PHY_BB_CAL_CHAIN_MASK_CAL_CHAIN_MASK_MASK 0x00000007
+#define PHY_BB_CAL_CHAIN_MASK_CAL_CHAIN_MASK_GET(x) (((x) & 0x00000007) >> 0)
+#define PHY_BB_CAL_CHAIN_MASK_CAL_CHAIN_MASK_SET(x) (((x) << 0) & 0x00000007)
+
+/* macros for BB_powertx_sub */
+#define PHY_BB_POWERTX_SUB_ADDRESS 0x0000a3bc
+#define PHY_BB_POWERTX_SUB_OFFSET 0x0000a3bc
+#define PHY_BB_POWERTX_SUB_POWERTX_SUB_FOR_2CHAIN_MSB 5
+#define PHY_BB_POWERTX_SUB_POWERTX_SUB_FOR_2CHAIN_LSB 0
+#define PHY_BB_POWERTX_SUB_POWERTX_SUB_FOR_2CHAIN_MASK 0x0000003f
+#define PHY_BB_POWERTX_SUB_POWERTX_SUB_FOR_2CHAIN_GET(x) (((x) & 0x0000003f) >> 0)
+#define PHY_BB_POWERTX_SUB_POWERTX_SUB_FOR_2CHAIN_SET(x) (((x) << 0) & 0x0000003f)
+
+/* macros for BB_powertx_rate7 */
+#define PHY_BB_POWERTX_RATE7_ADDRESS 0x0000a3c0
+#define PHY_BB_POWERTX_RATE7_OFFSET 0x0000a3c0
+#define PHY_BB_POWERTX_RATE7_POWERTXHT40_0_MSB 5
+#define PHY_BB_POWERTX_RATE7_POWERTXHT40_0_LSB 0
+#define PHY_BB_POWERTX_RATE7_POWERTXHT40_0_MASK 0x0000003f
+#define PHY_BB_POWERTX_RATE7_POWERTXHT40_0_GET(x) (((x) & 0x0000003f) >> 0)
+#define PHY_BB_POWERTX_RATE7_POWERTXHT40_0_SET(x) (((x) << 0) & 0x0000003f)
+#define PHY_BB_POWERTX_RATE7_POWERTXHT40_1_MSB 13
+#define PHY_BB_POWERTX_RATE7_POWERTXHT40_1_LSB 8
+#define PHY_BB_POWERTX_RATE7_POWERTXHT40_1_MASK 0x00003f00
+#define PHY_BB_POWERTX_RATE7_POWERTXHT40_1_GET(x) (((x) & 0x00003f00) >> 8)
+#define PHY_BB_POWERTX_RATE7_POWERTXHT40_1_SET(x) (((x) << 8) & 0x00003f00)
+#define PHY_BB_POWERTX_RATE7_POWERTXHT40_2_MSB 21
+#define PHY_BB_POWERTX_RATE7_POWERTXHT40_2_LSB 16
+#define PHY_BB_POWERTX_RATE7_POWERTXHT40_2_MASK 0x003f0000
+#define PHY_BB_POWERTX_RATE7_POWERTXHT40_2_GET(x) (((x) & 0x003f0000) >> 16)
+#define PHY_BB_POWERTX_RATE7_POWERTXHT40_2_SET(x) (((x) << 16) & 0x003f0000)
+#define PHY_BB_POWERTX_RATE7_POWERTXHT40_3_MSB 29
+#define PHY_BB_POWERTX_RATE7_POWERTXHT40_3_LSB 24
+#define PHY_BB_POWERTX_RATE7_POWERTXHT40_3_MASK 0x3f000000
+#define PHY_BB_POWERTX_RATE7_POWERTXHT40_3_GET(x) (((x) & 0x3f000000) >> 24)
+#define PHY_BB_POWERTX_RATE7_POWERTXHT40_3_SET(x) (((x) << 24) & 0x3f000000)
+
+/* macros for BB_powertx_rate8 */
+#define PHY_BB_POWERTX_RATE8_ADDRESS 0x0000a3c4
+#define PHY_BB_POWERTX_RATE8_OFFSET 0x0000a3c4
+#define PHY_BB_POWERTX_RATE8_POWERTXHT40_4_MSB 5
+#define PHY_BB_POWERTX_RATE8_POWERTXHT40_4_LSB 0
+#define PHY_BB_POWERTX_RATE8_POWERTXHT40_4_MASK 0x0000003f
+#define PHY_BB_POWERTX_RATE8_POWERTXHT40_4_GET(x) (((x) & 0x0000003f) >> 0)
+#define PHY_BB_POWERTX_RATE8_POWERTXHT40_4_SET(x) (((x) << 0) & 0x0000003f)
+#define PHY_BB_POWERTX_RATE8_POWERTXHT40_5_MSB 13
+#define PHY_BB_POWERTX_RATE8_POWERTXHT40_5_LSB 8
+#define PHY_BB_POWERTX_RATE8_POWERTXHT40_5_MASK 0x00003f00
+#define PHY_BB_POWERTX_RATE8_POWERTXHT40_5_GET(x) (((x) & 0x00003f00) >> 8)
+#define PHY_BB_POWERTX_RATE8_POWERTXHT40_5_SET(x) (((x) << 8) & 0x00003f00)
+#define PHY_BB_POWERTX_RATE8_POWERTXHT40_6_MSB 21
+#define PHY_BB_POWERTX_RATE8_POWERTXHT40_6_LSB 16
+#define PHY_BB_POWERTX_RATE8_POWERTXHT40_6_MASK 0x003f0000
+#define PHY_BB_POWERTX_RATE8_POWERTXHT40_6_GET(x) (((x) & 0x003f0000) >> 16)
+#define PHY_BB_POWERTX_RATE8_POWERTXHT40_6_SET(x) (((x) << 16) & 0x003f0000)
+#define PHY_BB_POWERTX_RATE8_POWERTXHT40_7_MSB 29
+#define PHY_BB_POWERTX_RATE8_POWERTXHT40_7_LSB 24
+#define PHY_BB_POWERTX_RATE8_POWERTXHT40_7_MASK 0x3f000000
+#define PHY_BB_POWERTX_RATE8_POWERTXHT40_7_GET(x) (((x) & 0x3f000000) >> 24)
+#define PHY_BB_POWERTX_RATE8_POWERTXHT40_7_SET(x) (((x) << 24) & 0x3f000000)
+
+/* macros for BB_powertx_rate9 */
+#define PHY_BB_POWERTX_RATE9_ADDRESS 0x0000a3c8
+#define PHY_BB_POWERTX_RATE9_OFFSET 0x0000a3c8
+#define PHY_BB_POWERTX_RATE9_POWERTX_DUP40_CCK_MSB 5
+#define PHY_BB_POWERTX_RATE9_POWERTX_DUP40_CCK_LSB 0
+#define PHY_BB_POWERTX_RATE9_POWERTX_DUP40_CCK_MASK 0x0000003f
+#define PHY_BB_POWERTX_RATE9_POWERTX_DUP40_CCK_GET(x) (((x) & 0x0000003f) >> 0)
+#define PHY_BB_POWERTX_RATE9_POWERTX_DUP40_CCK_SET(x) (((x) << 0) & 0x0000003f)
+#define PHY_BB_POWERTX_RATE9_POWERTX_DUP40_OFDM_MSB 13
+#define PHY_BB_POWERTX_RATE9_POWERTX_DUP40_OFDM_LSB 8
+#define PHY_BB_POWERTX_RATE9_POWERTX_DUP40_OFDM_MASK 0x00003f00
+#define PHY_BB_POWERTX_RATE9_POWERTX_DUP40_OFDM_GET(x) (((x) & 0x00003f00) >> 8)
+#define PHY_BB_POWERTX_RATE9_POWERTX_DUP40_OFDM_SET(x) (((x) << 8) & 0x00003f00)
+#define PHY_BB_POWERTX_RATE9_POWERTX_EXT20_CCK_MSB 21
+#define PHY_BB_POWERTX_RATE9_POWERTX_EXT20_CCK_LSB 16
+#define PHY_BB_POWERTX_RATE9_POWERTX_EXT20_CCK_MASK 0x003f0000
+#define PHY_BB_POWERTX_RATE9_POWERTX_EXT20_CCK_GET(x) (((x) & 0x003f0000) >> 16)
+#define PHY_BB_POWERTX_RATE9_POWERTX_EXT20_CCK_SET(x) (((x) << 16) & 0x003f0000)
+#define PHY_BB_POWERTX_RATE9_POWERTX_EXT20_OFDM_MSB 29
+#define PHY_BB_POWERTX_RATE9_POWERTX_EXT20_OFDM_LSB 24
+#define PHY_BB_POWERTX_RATE9_POWERTX_EXT20_OFDM_MASK 0x3f000000
+#define PHY_BB_POWERTX_RATE9_POWERTX_EXT20_OFDM_GET(x) (((x) & 0x3f000000) >> 24)
+#define PHY_BB_POWERTX_RATE9_POWERTX_EXT20_OFDM_SET(x) (((x) << 24) & 0x3f000000)
+
+/* macros for BB_powertx_rate10 */
+#define PHY_BB_POWERTX_RATE10_ADDRESS 0x0000a3cc
+#define PHY_BB_POWERTX_RATE10_OFFSET 0x0000a3cc
+#define PHY_BB_POWERTX_RATE10_POWERTXHT20_8_MSB 5
+#define PHY_BB_POWERTX_RATE10_POWERTXHT20_8_LSB 0
+#define PHY_BB_POWERTX_RATE10_POWERTXHT20_8_MASK 0x0000003f
+#define PHY_BB_POWERTX_RATE10_POWERTXHT20_8_GET(x) (((x) & 0x0000003f) >> 0)
+#define PHY_BB_POWERTX_RATE10_POWERTXHT20_8_SET(x) (((x) << 0) & 0x0000003f)
+#define PHY_BB_POWERTX_RATE10_POWERTXHT20_9_MSB 13
+#define PHY_BB_POWERTX_RATE10_POWERTXHT20_9_LSB 8
+#define PHY_BB_POWERTX_RATE10_POWERTXHT20_9_MASK 0x00003f00
+#define PHY_BB_POWERTX_RATE10_POWERTXHT20_9_GET(x) (((x) & 0x00003f00) >> 8)
+#define PHY_BB_POWERTX_RATE10_POWERTXHT20_9_SET(x) (((x) << 8) & 0x00003f00)
+#define PHY_BB_POWERTX_RATE10_POWERTXHT20_10_MSB 21
+#define PHY_BB_POWERTX_RATE10_POWERTXHT20_10_LSB 16
+#define PHY_BB_POWERTX_RATE10_POWERTXHT20_10_MASK 0x003f0000
+#define PHY_BB_POWERTX_RATE10_POWERTXHT20_10_GET(x) (((x) & 0x003f0000) >> 16)
+#define PHY_BB_POWERTX_RATE10_POWERTXHT20_10_SET(x) (((x) << 16) & 0x003f0000)
+#define PHY_BB_POWERTX_RATE10_POWERTXHT20_11_MSB 29
+#define PHY_BB_POWERTX_RATE10_POWERTXHT20_11_LSB 24
+#define PHY_BB_POWERTX_RATE10_POWERTXHT20_11_MASK 0x3f000000
+#define PHY_BB_POWERTX_RATE10_POWERTXHT20_11_GET(x) (((x) & 0x3f000000) >> 24)
+#define PHY_BB_POWERTX_RATE10_POWERTXHT20_11_SET(x) (((x) << 24) & 0x3f000000)
+
+/* macros for BB_powertx_rate11 */
+#define PHY_BB_POWERTX_RATE11_ADDRESS 0x0000a3d0
+#define PHY_BB_POWERTX_RATE11_OFFSET 0x0000a3d0
+#define PHY_BB_POWERTX_RATE11_POWERTXHT20_12_MSB 5
+#define PHY_BB_POWERTX_RATE11_POWERTXHT20_12_LSB 0
+#define PHY_BB_POWERTX_RATE11_POWERTXHT20_12_MASK 0x0000003f
+#define PHY_BB_POWERTX_RATE11_POWERTXHT20_12_GET(x) (((x) & 0x0000003f) >> 0)
+#define PHY_BB_POWERTX_RATE11_POWERTXHT20_12_SET(x) (((x) << 0) & 0x0000003f)
+#define PHY_BB_POWERTX_RATE11_POWERTXHT20_13_MSB 13
+#define PHY_BB_POWERTX_RATE11_POWERTXHT20_13_LSB 8
+#define PHY_BB_POWERTX_RATE11_POWERTXHT20_13_MASK 0x00003f00
+#define PHY_BB_POWERTX_RATE11_POWERTXHT20_13_GET(x) (((x) & 0x00003f00) >> 8)
+#define PHY_BB_POWERTX_RATE11_POWERTXHT20_13_SET(x) (((x) << 8) & 0x00003f00)
+#define PHY_BB_POWERTX_RATE11_POWERTXHT40_12_MSB 21
+#define PHY_BB_POWERTX_RATE11_POWERTXHT40_12_LSB 16
+#define PHY_BB_POWERTX_RATE11_POWERTXHT40_12_MASK 0x003f0000
+#define PHY_BB_POWERTX_RATE11_POWERTXHT40_12_GET(x) (((x) & 0x003f0000) >> 16)
+#define PHY_BB_POWERTX_RATE11_POWERTXHT40_12_SET(x) (((x) << 16) & 0x003f0000)
+#define PHY_BB_POWERTX_RATE11_POWERTXHT40_13_MSB 29
+#define PHY_BB_POWERTX_RATE11_POWERTXHT40_13_LSB 24
+#define PHY_BB_POWERTX_RATE11_POWERTXHT40_13_MASK 0x3f000000
+#define PHY_BB_POWERTX_RATE11_POWERTXHT40_13_GET(x) (((x) & 0x3f000000) >> 24)
+#define PHY_BB_POWERTX_RATE11_POWERTXHT40_13_SET(x) (((x) << 24) & 0x3f000000)
+
+/* macros for BB_powertx_rate12 */
+#define PHY_BB_POWERTX_RATE12_ADDRESS 0x0000a3d4
+#define PHY_BB_POWERTX_RATE12_OFFSET 0x0000a3d4
+#define PHY_BB_POWERTX_RATE12_POWERTXHT40_8_MSB 5
+#define PHY_BB_POWERTX_RATE12_POWERTXHT40_8_LSB 0
+#define PHY_BB_POWERTX_RATE12_POWERTXHT40_8_MASK 0x0000003f
+#define PHY_BB_POWERTX_RATE12_POWERTXHT40_8_GET(x) (((x) & 0x0000003f) >> 0)
+#define PHY_BB_POWERTX_RATE12_POWERTXHT40_8_SET(x) (((x) << 0) & 0x0000003f)
+#define PHY_BB_POWERTX_RATE12_POWERTXHT40_9_MSB 13
+#define PHY_BB_POWERTX_RATE12_POWERTXHT40_9_LSB 8
+#define PHY_BB_POWERTX_RATE12_POWERTXHT40_9_MASK 0x00003f00
+#define PHY_BB_POWERTX_RATE12_POWERTXHT40_9_GET(x) (((x) & 0x00003f00) >> 8)
+#define PHY_BB_POWERTX_RATE12_POWERTXHT40_9_SET(x) (((x) << 8) & 0x00003f00)
+#define PHY_BB_POWERTX_RATE12_POWERTXHT40_10_MSB 21
+#define PHY_BB_POWERTX_RATE12_POWERTXHT40_10_LSB 16
+#define PHY_BB_POWERTX_RATE12_POWERTXHT40_10_MASK 0x003f0000
+#define PHY_BB_POWERTX_RATE12_POWERTXHT40_10_GET(x) (((x) & 0x003f0000) >> 16)
+#define PHY_BB_POWERTX_RATE12_POWERTXHT40_10_SET(x) (((x) << 16) & 0x003f0000)
+#define PHY_BB_POWERTX_RATE12_POWERTXHT40_11_MSB 29
+#define PHY_BB_POWERTX_RATE12_POWERTXHT40_11_LSB 24
+#define PHY_BB_POWERTX_RATE12_POWERTXHT40_11_MASK 0x3f000000
+#define PHY_BB_POWERTX_RATE12_POWERTXHT40_11_GET(x) (((x) & 0x3f000000) >> 24)
+#define PHY_BB_POWERTX_RATE12_POWERTXHT40_11_SET(x) (((x) << 24) & 0x3f000000)
+
+/* macros for BB_force_analog */
+#define PHY_BB_FORCE_ANALOG_ADDRESS 0x0000a3d8
+#define PHY_BB_FORCE_ANALOG_OFFSET 0x0000a3d8
+#define PHY_BB_FORCE_ANALOG_FORCE_XPAON_MSB 0
+#define PHY_BB_FORCE_ANALOG_FORCE_XPAON_LSB 0
+#define PHY_BB_FORCE_ANALOG_FORCE_XPAON_MASK 0x00000001
+#define PHY_BB_FORCE_ANALOG_FORCE_XPAON_GET(x) (((x) & 0x00000001) >> 0)
+#define PHY_BB_FORCE_ANALOG_FORCE_XPAON_SET(x) (((x) << 0) & 0x00000001)
+#define PHY_BB_FORCE_ANALOG_FORCED_XPAON_MSB 3
+#define PHY_BB_FORCE_ANALOG_FORCED_XPAON_LSB 1
+#define PHY_BB_FORCE_ANALOG_FORCED_XPAON_MASK 0x0000000e
+#define PHY_BB_FORCE_ANALOG_FORCED_XPAON_GET(x) (((x) & 0x0000000e) >> 1)
+#define PHY_BB_FORCE_ANALOG_FORCED_XPAON_SET(x) (((x) << 1) & 0x0000000e)
+#define PHY_BB_FORCE_ANALOG_FORCE_PDADC_PWD_MSB 4
+#define PHY_BB_FORCE_ANALOG_FORCE_PDADC_PWD_LSB 4
+#define PHY_BB_FORCE_ANALOG_FORCE_PDADC_PWD_MASK 0x00000010
+#define PHY_BB_FORCE_ANALOG_FORCE_PDADC_PWD_GET(x) (((x) & 0x00000010) >> 4)
+#define PHY_BB_FORCE_ANALOG_FORCE_PDADC_PWD_SET(x) (((x) << 4) & 0x00000010)
+#define PHY_BB_FORCE_ANALOG_FORCED_PDADC_PWD_MSB 7
+#define PHY_BB_FORCE_ANALOG_FORCED_PDADC_PWD_LSB 5
+#define PHY_BB_FORCE_ANALOG_FORCED_PDADC_PWD_MASK 0x000000e0
+#define PHY_BB_FORCE_ANALOG_FORCED_PDADC_PWD_GET(x) (((x) & 0x000000e0) >> 5)
+#define PHY_BB_FORCE_ANALOG_FORCED_PDADC_PWD_SET(x) (((x) << 5) & 0x000000e0)
+
+/* macros for BB_tpc_12 */
+#define PHY_BB_TPC_12_ADDRESS 0x0000a3dc
+#define PHY_BB_TPC_12_OFFSET 0x0000a3dc
+#define PHY_BB_TPC_12_DESIRED_SCALE_HT40_0_MSB 4
+#define PHY_BB_TPC_12_DESIRED_SCALE_HT40_0_LSB 0
+#define PHY_BB_TPC_12_DESIRED_SCALE_HT40_0_MASK 0x0000001f
+#define PHY_BB_TPC_12_DESIRED_SCALE_HT40_0_GET(x) (((x) & 0x0000001f) >> 0)
+#define PHY_BB_TPC_12_DESIRED_SCALE_HT40_0_SET(x) (((x) << 0) & 0x0000001f)
+#define PHY_BB_TPC_12_DESIRED_SCALE_HT40_1_MSB 9
+#define PHY_BB_TPC_12_DESIRED_SCALE_HT40_1_LSB 5
+#define PHY_BB_TPC_12_DESIRED_SCALE_HT40_1_MASK 0x000003e0
+#define PHY_BB_TPC_12_DESIRED_SCALE_HT40_1_GET(x) (((x) & 0x000003e0) >> 5)
+#define PHY_BB_TPC_12_DESIRED_SCALE_HT40_1_SET(x) (((x) << 5) & 0x000003e0)
+#define PHY_BB_TPC_12_DESIRED_SCALE_HT40_2_MSB 14
+#define PHY_BB_TPC_12_DESIRED_SCALE_HT40_2_LSB 10
+#define PHY_BB_TPC_12_DESIRED_SCALE_HT40_2_MASK 0x00007c00
+#define PHY_BB_TPC_12_DESIRED_SCALE_HT40_2_GET(x) (((x) & 0x00007c00) >> 10)
+#define PHY_BB_TPC_12_DESIRED_SCALE_HT40_2_SET(x) (((x) << 10) & 0x00007c00)
+#define PHY_BB_TPC_12_DESIRED_SCALE_HT40_3_MSB 19
+#define PHY_BB_TPC_12_DESIRED_SCALE_HT40_3_LSB 15
+#define PHY_BB_TPC_12_DESIRED_SCALE_HT40_3_MASK 0x000f8000
+#define PHY_BB_TPC_12_DESIRED_SCALE_HT40_3_GET(x) (((x) & 0x000f8000) >> 15)
+#define PHY_BB_TPC_12_DESIRED_SCALE_HT40_3_SET(x) (((x) << 15) & 0x000f8000)
+#define PHY_BB_TPC_12_DESIRED_SCALE_HT40_4_MSB 24
+#define PHY_BB_TPC_12_DESIRED_SCALE_HT40_4_LSB 20
+#define PHY_BB_TPC_12_DESIRED_SCALE_HT40_4_MASK 0x01f00000
+#define PHY_BB_TPC_12_DESIRED_SCALE_HT40_4_GET(x) (((x) & 0x01f00000) >> 20)
+#define PHY_BB_TPC_12_DESIRED_SCALE_HT40_4_SET(x) (((x) << 20) & 0x01f00000)
+#define PHY_BB_TPC_12_DESIRED_SCALE_HT40_5_MSB 29
+#define PHY_BB_TPC_12_DESIRED_SCALE_HT40_5_LSB 25
+#define PHY_BB_TPC_12_DESIRED_SCALE_HT40_5_MASK 0x3e000000
+#define PHY_BB_TPC_12_DESIRED_SCALE_HT40_5_GET(x) (((x) & 0x3e000000) >> 25)
+#define PHY_BB_TPC_12_DESIRED_SCALE_HT40_5_SET(x) (((x) << 25) & 0x3e000000)
+
+/* macros for BB_tpc_13 */
+#define PHY_BB_TPC_13_ADDRESS 0x0000a3e0
+#define PHY_BB_TPC_13_OFFSET 0x0000a3e0
+#define PHY_BB_TPC_13_DESIRED_SCALE_HT40_6_MSB 4
+#define PHY_BB_TPC_13_DESIRED_SCALE_HT40_6_LSB 0
+#define PHY_BB_TPC_13_DESIRED_SCALE_HT40_6_MASK 0x0000001f
+#define PHY_BB_TPC_13_DESIRED_SCALE_HT40_6_GET(x) (((x) & 0x0000001f) >> 0)
+#define PHY_BB_TPC_13_DESIRED_SCALE_HT40_6_SET(x) (((x) << 0) & 0x0000001f)
+#define PHY_BB_TPC_13_DESIRED_SCALE_HT40_7_MSB 9
+#define PHY_BB_TPC_13_DESIRED_SCALE_HT40_7_LSB 5
+#define PHY_BB_TPC_13_DESIRED_SCALE_HT40_7_MASK 0x000003e0
+#define PHY_BB_TPC_13_DESIRED_SCALE_HT40_7_GET(x) (((x) & 0x000003e0) >> 5)
+#define PHY_BB_TPC_13_DESIRED_SCALE_HT40_7_SET(x) (((x) << 5) & 0x000003e0)
+
+/* macros for BB_tpc_14 */
+#define PHY_BB_TPC_14_ADDRESS 0x0000a3e4
+#define PHY_BB_TPC_14_OFFSET 0x0000a3e4
+#define PHY_BB_TPC_14_DESIRED_SCALE_HT20_8_MSB 4
+#define PHY_BB_TPC_14_DESIRED_SCALE_HT20_8_LSB 0
+#define PHY_BB_TPC_14_DESIRED_SCALE_HT20_8_MASK 0x0000001f
+#define PHY_BB_TPC_14_DESIRED_SCALE_HT20_8_GET(x) (((x) & 0x0000001f) >> 0)
+#define PHY_BB_TPC_14_DESIRED_SCALE_HT20_8_SET(x) (((x) << 0) & 0x0000001f)
+#define PHY_BB_TPC_14_DESIRED_SCALE_HT20_9_MSB 9
+#define PHY_BB_TPC_14_DESIRED_SCALE_HT20_9_LSB 5
+#define PHY_BB_TPC_14_DESIRED_SCALE_HT20_9_MASK 0x000003e0
+#define PHY_BB_TPC_14_DESIRED_SCALE_HT20_9_GET(x) (((x) & 0x000003e0) >> 5)
+#define PHY_BB_TPC_14_DESIRED_SCALE_HT20_9_SET(x) (((x) << 5) & 0x000003e0)
+#define PHY_BB_TPC_14_DESIRED_SCALE_HT20_10_MSB 14
+#define PHY_BB_TPC_14_DESIRED_SCALE_HT20_10_LSB 10
+#define PHY_BB_TPC_14_DESIRED_SCALE_HT20_10_MASK 0x00007c00
+#define PHY_BB_TPC_14_DESIRED_SCALE_HT20_10_GET(x) (((x) & 0x00007c00) >> 10)
+#define PHY_BB_TPC_14_DESIRED_SCALE_HT20_10_SET(x) (((x) << 10) & 0x00007c00)
+#define PHY_BB_TPC_14_DESIRED_SCALE_HT20_11_MSB 19
+#define PHY_BB_TPC_14_DESIRED_SCALE_HT20_11_LSB 15
+#define PHY_BB_TPC_14_DESIRED_SCALE_HT20_11_MASK 0x000f8000
+#define PHY_BB_TPC_14_DESIRED_SCALE_HT20_11_GET(x) (((x) & 0x000f8000) >> 15)
+#define PHY_BB_TPC_14_DESIRED_SCALE_HT20_11_SET(x) (((x) << 15) & 0x000f8000)
+#define PHY_BB_TPC_14_DESIRED_SCALE_HT20_12_MSB 24
+#define PHY_BB_TPC_14_DESIRED_SCALE_HT20_12_LSB 20
+#define PHY_BB_TPC_14_DESIRED_SCALE_HT20_12_MASK 0x01f00000
+#define PHY_BB_TPC_14_DESIRED_SCALE_HT20_12_GET(x) (((x) & 0x01f00000) >> 20)
+#define PHY_BB_TPC_14_DESIRED_SCALE_HT20_12_SET(x) (((x) << 20) & 0x01f00000)
+#define PHY_BB_TPC_14_DESIRED_SCALE_HT20_13_MSB 29
+#define PHY_BB_TPC_14_DESIRED_SCALE_HT20_13_LSB 25
+#define PHY_BB_TPC_14_DESIRED_SCALE_HT20_13_MASK 0x3e000000
+#define PHY_BB_TPC_14_DESIRED_SCALE_HT20_13_GET(x) (((x) & 0x3e000000) >> 25)
+#define PHY_BB_TPC_14_DESIRED_SCALE_HT20_13_SET(x) (((x) << 25) & 0x3e000000)
+
+/* macros for BB_tpc_15 */
+#define PHY_BB_TPC_15_ADDRESS 0x0000a3e8
+#define PHY_BB_TPC_15_OFFSET 0x0000a3e8
+#define PHY_BB_TPC_15_DESIRED_SCALE_HT40_8_MSB 4
+#define PHY_BB_TPC_15_DESIRED_SCALE_HT40_8_LSB 0
+#define PHY_BB_TPC_15_DESIRED_SCALE_HT40_8_MASK 0x0000001f
+#define PHY_BB_TPC_15_DESIRED_SCALE_HT40_8_GET(x) (((x) & 0x0000001f) >> 0)
+#define PHY_BB_TPC_15_DESIRED_SCALE_HT40_8_SET(x) (((x) << 0) & 0x0000001f)
+#define PHY_BB_TPC_15_DESIRED_SCALE_HT40_9_MSB 9
+#define PHY_BB_TPC_15_DESIRED_SCALE_HT40_9_LSB 5
+#define PHY_BB_TPC_15_DESIRED_SCALE_HT40_9_MASK 0x000003e0
+#define PHY_BB_TPC_15_DESIRED_SCALE_HT40_9_GET(x) (((x) & 0x000003e0) >> 5)
+#define PHY_BB_TPC_15_DESIRED_SCALE_HT40_9_SET(x) (((x) << 5) & 0x000003e0)
+#define PHY_BB_TPC_15_DESIRED_SCALE_HT40_10_MSB 14
+#define PHY_BB_TPC_15_DESIRED_SCALE_HT40_10_LSB 10
+#define PHY_BB_TPC_15_DESIRED_SCALE_HT40_10_MASK 0x00007c00
+#define PHY_BB_TPC_15_DESIRED_SCALE_HT40_10_GET(x) (((x) & 0x00007c00) >> 10)
+#define PHY_BB_TPC_15_DESIRED_SCALE_HT40_10_SET(x) (((x) << 10) & 0x00007c00)
+#define PHY_BB_TPC_15_DESIRED_SCALE_HT40_11_MSB 19
+#define PHY_BB_TPC_15_DESIRED_SCALE_HT40_11_LSB 15
+#define PHY_BB_TPC_15_DESIRED_SCALE_HT40_11_MASK 0x000f8000
+#define PHY_BB_TPC_15_DESIRED_SCALE_HT40_11_GET(x) (((x) & 0x000f8000) >> 15)
+#define PHY_BB_TPC_15_DESIRED_SCALE_HT40_11_SET(x) (((x) << 15) & 0x000f8000)
+#define PHY_BB_TPC_15_DESIRED_SCALE_HT40_12_MSB 24
+#define PHY_BB_TPC_15_DESIRED_SCALE_HT40_12_LSB 20
+#define PHY_BB_TPC_15_DESIRED_SCALE_HT40_12_MASK 0x01f00000
+#define PHY_BB_TPC_15_DESIRED_SCALE_HT40_12_GET(x) (((x) & 0x01f00000) >> 20)
+#define PHY_BB_TPC_15_DESIRED_SCALE_HT40_12_SET(x) (((x) << 20) & 0x01f00000)
+#define PHY_BB_TPC_15_DESIRED_SCALE_HT40_13_MSB 29
+#define PHY_BB_TPC_15_DESIRED_SCALE_HT40_13_LSB 25
+#define PHY_BB_TPC_15_DESIRED_SCALE_HT40_13_MASK 0x3e000000
+#define PHY_BB_TPC_15_DESIRED_SCALE_HT40_13_GET(x) (((x) & 0x3e000000) >> 25)
+#define PHY_BB_TPC_15_DESIRED_SCALE_HT40_13_SET(x) (((x) << 25) & 0x3e000000)
+
+/* macros for BB_tpc_16 */
+#define PHY_BB_TPC_16_ADDRESS 0x0000a3ec
+#define PHY_BB_TPC_16_OFFSET 0x0000a3ec
+#define PHY_BB_TPC_16_PDADC_PAR_CORR_CCK_MSB 13
+#define PHY_BB_TPC_16_PDADC_PAR_CORR_CCK_LSB 8
+#define PHY_BB_TPC_16_PDADC_PAR_CORR_CCK_MASK 0x00003f00
+#define PHY_BB_TPC_16_PDADC_PAR_CORR_CCK_GET(x) (((x) & 0x00003f00) >> 8)
+#define PHY_BB_TPC_16_PDADC_PAR_CORR_CCK_SET(x) (((x) << 8) & 0x00003f00)
+#define PHY_BB_TPC_16_PDADC_PAR_CORR_OFDM_MSB 21
+#define PHY_BB_TPC_16_PDADC_PAR_CORR_OFDM_LSB 16
+#define PHY_BB_TPC_16_PDADC_PAR_CORR_OFDM_MASK 0x003f0000
+#define PHY_BB_TPC_16_PDADC_PAR_CORR_OFDM_GET(x) (((x) & 0x003f0000) >> 16)
+#define PHY_BB_TPC_16_PDADC_PAR_CORR_OFDM_SET(x) (((x) << 16) & 0x003f0000)
+#define PHY_BB_TPC_16_PDADC_PAR_CORR_HT40_MSB 29
+#define PHY_BB_TPC_16_PDADC_PAR_CORR_HT40_LSB 24
+#define PHY_BB_TPC_16_PDADC_PAR_CORR_HT40_MASK 0x3f000000
+#define PHY_BB_TPC_16_PDADC_PAR_CORR_HT40_GET(x) (((x) & 0x3f000000) >> 24)
+#define PHY_BB_TPC_16_PDADC_PAR_CORR_HT40_SET(x) (((x) << 24) & 0x3f000000)
+
+/* macros for BB_tpc_17 */
+#define PHY_BB_TPC_17_ADDRESS 0x0000a3f0
+#define PHY_BB_TPC_17_OFFSET 0x0000a3f0
+#define PHY_BB_TPC_17_ENABLE_PAL_MSB 0
+#define PHY_BB_TPC_17_ENABLE_PAL_LSB 0
+#define PHY_BB_TPC_17_ENABLE_PAL_MASK 0x00000001
+#define PHY_BB_TPC_17_ENABLE_PAL_GET(x) (((x) & 0x00000001) >> 0)
+#define PHY_BB_TPC_17_ENABLE_PAL_SET(x) (((x) << 0) & 0x00000001)
+#define PHY_BB_TPC_17_ENABLE_PAL_CCK_MSB 1
+#define PHY_BB_TPC_17_ENABLE_PAL_CCK_LSB 1
+#define PHY_BB_TPC_17_ENABLE_PAL_CCK_MASK 0x00000002
+#define PHY_BB_TPC_17_ENABLE_PAL_CCK_GET(x) (((x) & 0x00000002) >> 1)
+#define PHY_BB_TPC_17_ENABLE_PAL_CCK_SET(x) (((x) << 1) & 0x00000002)
+#define PHY_BB_TPC_17_ENABLE_PAL_OFDM_20_MSB 2
+#define PHY_BB_TPC_17_ENABLE_PAL_OFDM_20_LSB 2
+#define PHY_BB_TPC_17_ENABLE_PAL_OFDM_20_MASK 0x00000004
+#define PHY_BB_TPC_17_ENABLE_PAL_OFDM_20_GET(x) (((x) & 0x00000004) >> 2)
+#define PHY_BB_TPC_17_ENABLE_PAL_OFDM_20_SET(x) (((x) << 2) & 0x00000004)
+#define PHY_BB_TPC_17_ENABLE_PAL_OFDM_40_MSB 3
+#define PHY_BB_TPC_17_ENABLE_PAL_OFDM_40_LSB 3
+#define PHY_BB_TPC_17_ENABLE_PAL_OFDM_40_MASK 0x00000008
+#define PHY_BB_TPC_17_ENABLE_PAL_OFDM_40_GET(x) (((x) & 0x00000008) >> 3)
+#define PHY_BB_TPC_17_ENABLE_PAL_OFDM_40_SET(x) (((x) << 3) & 0x00000008)
+#define PHY_BB_TPC_17_PAL_POWER_THRESHOLD_MSB 9
+#define PHY_BB_TPC_17_PAL_POWER_THRESHOLD_LSB 4
+#define PHY_BB_TPC_17_PAL_POWER_THRESHOLD_MASK 0x000003f0
+#define PHY_BB_TPC_17_PAL_POWER_THRESHOLD_GET(x) (((x) & 0x000003f0) >> 4)
+#define PHY_BB_TPC_17_PAL_POWER_THRESHOLD_SET(x) (((x) << 4) & 0x000003f0)
+#define PHY_BB_TPC_17_FORCE_PAL_LOCKED_MSB 10
+#define PHY_BB_TPC_17_FORCE_PAL_LOCKED_LSB 10
+#define PHY_BB_TPC_17_FORCE_PAL_LOCKED_MASK 0x00000400
+#define PHY_BB_TPC_17_FORCE_PAL_LOCKED_GET(x) (((x) & 0x00000400) >> 10)
+#define PHY_BB_TPC_17_FORCE_PAL_LOCKED_SET(x) (((x) << 10) & 0x00000400)
+#define PHY_BB_TPC_17_INIT_TX_GAIN_SETTING_PAL_ON_MSB 16
+#define PHY_BB_TPC_17_INIT_TX_GAIN_SETTING_PAL_ON_LSB 11
+#define PHY_BB_TPC_17_INIT_TX_GAIN_SETTING_PAL_ON_MASK 0x0001f800
+#define PHY_BB_TPC_17_INIT_TX_GAIN_SETTING_PAL_ON_GET(x) (((x) & 0x0001f800) >> 11)
+#define PHY_BB_TPC_17_INIT_TX_GAIN_SETTING_PAL_ON_SET(x) (((x) << 11) & 0x0001f800)
+
+/* macros for BB_tpc_18 */
+#define PHY_BB_TPC_18_ADDRESS 0x0000a3f4
+#define PHY_BB_TPC_18_OFFSET 0x0000a3f4
+#define PHY_BB_TPC_18_THERM_CAL_VALUE_MSB 7
+#define PHY_BB_TPC_18_THERM_CAL_VALUE_LSB 0
+#define PHY_BB_TPC_18_THERM_CAL_VALUE_MASK 0x000000ff
+#define PHY_BB_TPC_18_THERM_CAL_VALUE_GET(x) (((x) & 0x000000ff) >> 0)
+#define PHY_BB_TPC_18_THERM_CAL_VALUE_SET(x) (((x) << 0) & 0x000000ff)
+#define PHY_BB_TPC_18_VOLT_CAL_VALUE_MSB 15
+#define PHY_BB_TPC_18_VOLT_CAL_VALUE_LSB 8
+#define PHY_BB_TPC_18_VOLT_CAL_VALUE_MASK 0x0000ff00
+#define PHY_BB_TPC_18_VOLT_CAL_VALUE_GET(x) (((x) & 0x0000ff00) >> 8)
+#define PHY_BB_TPC_18_VOLT_CAL_VALUE_SET(x) (((x) << 8) & 0x0000ff00)
+#define PHY_BB_TPC_18_USE_LEGACY_TPC_MSB 16
+#define PHY_BB_TPC_18_USE_LEGACY_TPC_LSB 16
+#define PHY_BB_TPC_18_USE_LEGACY_TPC_MASK 0x00010000
+#define PHY_BB_TPC_18_USE_LEGACY_TPC_GET(x) (((x) & 0x00010000) >> 16)
+#define PHY_BB_TPC_18_USE_LEGACY_TPC_SET(x) (((x) << 16) & 0x00010000)
+
+/* macros for BB_tpc_19 */
+#define PHY_BB_TPC_19_ADDRESS 0x0000a3f8
+#define PHY_BB_TPC_19_OFFSET 0x0000a3f8
+#define PHY_BB_TPC_19_ALPHA_THERM_MSB 7
+#define PHY_BB_TPC_19_ALPHA_THERM_LSB 0
+#define PHY_BB_TPC_19_ALPHA_THERM_MASK 0x000000ff
+#define PHY_BB_TPC_19_ALPHA_THERM_GET(x) (((x) & 0x000000ff) >> 0)
+#define PHY_BB_TPC_19_ALPHA_THERM_SET(x) (((x) << 0) & 0x000000ff)
+#define PHY_BB_TPC_19_ALPHA_THERM_PAL_ON_MSB 15
+#define PHY_BB_TPC_19_ALPHA_THERM_PAL_ON_LSB 8
+#define PHY_BB_TPC_19_ALPHA_THERM_PAL_ON_MASK 0x0000ff00
+#define PHY_BB_TPC_19_ALPHA_THERM_PAL_ON_GET(x) (((x) & 0x0000ff00) >> 8)
+#define PHY_BB_TPC_19_ALPHA_THERM_PAL_ON_SET(x) (((x) << 8) & 0x0000ff00)
+#define PHY_BB_TPC_19_ALPHA_VOLT_MSB 20
+#define PHY_BB_TPC_19_ALPHA_VOLT_LSB 16
+#define PHY_BB_TPC_19_ALPHA_VOLT_MASK 0x001f0000
+#define PHY_BB_TPC_19_ALPHA_VOLT_GET(x) (((x) & 0x001f0000) >> 16)
+#define PHY_BB_TPC_19_ALPHA_VOLT_SET(x) (((x) << 16) & 0x001f0000)
+#define PHY_BB_TPC_19_ALPHA_VOLT_PAL_ON_MSB 25
+#define PHY_BB_TPC_19_ALPHA_VOLT_PAL_ON_LSB 21
+#define PHY_BB_TPC_19_ALPHA_VOLT_PAL_ON_MASK 0x03e00000
+#define PHY_BB_TPC_19_ALPHA_VOLT_PAL_ON_GET(x) (((x) & 0x03e00000) >> 21)
+#define PHY_BB_TPC_19_ALPHA_VOLT_PAL_ON_SET(x) (((x) << 21) & 0x03e00000)
+
+/* macros for BB_tpc_20 */
+#define PHY_BB_TPC_20_ADDRESS 0x0000a3fc
+#define PHY_BB_TPC_20_OFFSET 0x0000a3fc
+#define PHY_BB_TPC_20_ENABLE_PAL_MCS_0_MSB 0
+#define PHY_BB_TPC_20_ENABLE_PAL_MCS_0_LSB 0
+#define PHY_BB_TPC_20_ENABLE_PAL_MCS_0_MASK 0x00000001
+#define PHY_BB_TPC_20_ENABLE_PAL_MCS_0_GET(x) (((x) & 0x00000001) >> 0)
+#define PHY_BB_TPC_20_ENABLE_PAL_MCS_0_SET(x) (((x) << 0) & 0x00000001)
+#define PHY_BB_TPC_20_ENABLE_PAL_MCS_1_MSB 1
+#define PHY_BB_TPC_20_ENABLE_PAL_MCS_1_LSB 1
+#define PHY_BB_TPC_20_ENABLE_PAL_MCS_1_MASK 0x00000002
+#define PHY_BB_TPC_20_ENABLE_PAL_MCS_1_GET(x) (((x) & 0x00000002) >> 1)
+#define PHY_BB_TPC_20_ENABLE_PAL_MCS_1_SET(x) (((x) << 1) & 0x00000002)
+#define PHY_BB_TPC_20_ENABLE_PAL_MCS_2_MSB 2
+#define PHY_BB_TPC_20_ENABLE_PAL_MCS_2_LSB 2
+#define PHY_BB_TPC_20_ENABLE_PAL_MCS_2_MASK 0x00000004
+#define PHY_BB_TPC_20_ENABLE_PAL_MCS_2_GET(x) (((x) & 0x00000004) >> 2)
+#define PHY_BB_TPC_20_ENABLE_PAL_MCS_2_SET(x) (((x) << 2) & 0x00000004)
+#define PHY_BB_TPC_20_ENABLE_PAL_MCS_3_MSB 3
+#define PHY_BB_TPC_20_ENABLE_PAL_MCS_3_LSB 3
+#define PHY_BB_TPC_20_ENABLE_PAL_MCS_3_MASK 0x00000008
+#define PHY_BB_TPC_20_ENABLE_PAL_MCS_3_GET(x) (((x) & 0x00000008) >> 3)
+#define PHY_BB_TPC_20_ENABLE_PAL_MCS_3_SET(x) (((x) << 3) & 0x00000008)
+#define PHY_BB_TPC_20_ENABLE_PAL_MCS_4_MSB 4
+#define PHY_BB_TPC_20_ENABLE_PAL_MCS_4_LSB 4
+#define PHY_BB_TPC_20_ENABLE_PAL_MCS_4_MASK 0x00000010
+#define PHY_BB_TPC_20_ENABLE_PAL_MCS_4_GET(x) (((x) & 0x00000010) >> 4)
+#define PHY_BB_TPC_20_ENABLE_PAL_MCS_4_SET(x) (((x) << 4) & 0x00000010)
+#define PHY_BB_TPC_20_ENABLE_PAL_MCS_5_MSB 5
+#define PHY_BB_TPC_20_ENABLE_PAL_MCS_5_LSB 5
+#define PHY_BB_TPC_20_ENABLE_PAL_MCS_5_MASK 0x00000020
+#define PHY_BB_TPC_20_ENABLE_PAL_MCS_5_GET(x) (((x) & 0x00000020) >> 5)
+#define PHY_BB_TPC_20_ENABLE_PAL_MCS_5_SET(x) (((x) << 5) & 0x00000020)
+#define PHY_BB_TPC_20_ENABLE_PAL_MCS_6_MSB 6
+#define PHY_BB_TPC_20_ENABLE_PAL_MCS_6_LSB 6
+#define PHY_BB_TPC_20_ENABLE_PAL_MCS_6_MASK 0x00000040
+#define PHY_BB_TPC_20_ENABLE_PAL_MCS_6_GET(x) (((x) & 0x00000040) >> 6)
+#define PHY_BB_TPC_20_ENABLE_PAL_MCS_6_SET(x) (((x) << 6) & 0x00000040)
+#define PHY_BB_TPC_20_ENABLE_PAL_MCS_7_MSB 7
+#define PHY_BB_TPC_20_ENABLE_PAL_MCS_7_LSB 7
+#define PHY_BB_TPC_20_ENABLE_PAL_MCS_7_MASK 0x00000080
+#define PHY_BB_TPC_20_ENABLE_PAL_MCS_7_GET(x) (((x) & 0x00000080) >> 7)
+#define PHY_BB_TPC_20_ENABLE_PAL_MCS_7_SET(x) (((x) << 7) & 0x00000080)
+#define PHY_BB_TPC_20_ENABLE_PAL_MCS_8_MSB 8
+#define PHY_BB_TPC_20_ENABLE_PAL_MCS_8_LSB 8
+#define PHY_BB_TPC_20_ENABLE_PAL_MCS_8_MASK 0x00000100
+#define PHY_BB_TPC_20_ENABLE_PAL_MCS_8_GET(x) (((x) & 0x00000100) >> 8)
+#define PHY_BB_TPC_20_ENABLE_PAL_MCS_8_SET(x) (((x) << 8) & 0x00000100)
+#define PHY_BB_TPC_20_ENABLE_PAL_MCS_9_MSB 9
+#define PHY_BB_TPC_20_ENABLE_PAL_MCS_9_LSB 9
+#define PHY_BB_TPC_20_ENABLE_PAL_MCS_9_MASK 0x00000200
+#define PHY_BB_TPC_20_ENABLE_PAL_MCS_9_GET(x) (((x) & 0x00000200) >> 9)
+#define PHY_BB_TPC_20_ENABLE_PAL_MCS_9_SET(x) (((x) << 9) & 0x00000200)
+#define PHY_BB_TPC_20_ENABLE_PAL_MCS_10_MSB 10
+#define PHY_BB_TPC_20_ENABLE_PAL_MCS_10_LSB 10
+#define PHY_BB_TPC_20_ENABLE_PAL_MCS_10_MASK 0x00000400
+#define PHY_BB_TPC_20_ENABLE_PAL_MCS_10_GET(x) (((x) & 0x00000400) >> 10)
+#define PHY_BB_TPC_20_ENABLE_PAL_MCS_10_SET(x) (((x) << 10) & 0x00000400)
+#define PHY_BB_TPC_20_ENABLE_PAL_MCS_11_MSB 11
+#define PHY_BB_TPC_20_ENABLE_PAL_MCS_11_LSB 11
+#define PHY_BB_TPC_20_ENABLE_PAL_MCS_11_MASK 0x00000800
+#define PHY_BB_TPC_20_ENABLE_PAL_MCS_11_GET(x) (((x) & 0x00000800) >> 11)
+#define PHY_BB_TPC_20_ENABLE_PAL_MCS_11_SET(x) (((x) << 11) & 0x00000800)
+#define PHY_BB_TPC_20_ENABLE_PAL_MCS_12_MSB 12
+#define PHY_BB_TPC_20_ENABLE_PAL_MCS_12_LSB 12
+#define PHY_BB_TPC_20_ENABLE_PAL_MCS_12_MASK 0x00001000
+#define PHY_BB_TPC_20_ENABLE_PAL_MCS_12_GET(x) (((x) & 0x00001000) >> 12)
+#define PHY_BB_TPC_20_ENABLE_PAL_MCS_12_SET(x) (((x) << 12) & 0x00001000)
+#define PHY_BB_TPC_20_ENABLE_PAL_MCS_13_MSB 13
+#define PHY_BB_TPC_20_ENABLE_PAL_MCS_13_LSB 13
+#define PHY_BB_TPC_20_ENABLE_PAL_MCS_13_MASK 0x00002000
+#define PHY_BB_TPC_20_ENABLE_PAL_MCS_13_GET(x) (((x) & 0x00002000) >> 13)
+#define PHY_BB_TPC_20_ENABLE_PAL_MCS_13_SET(x) (((x) << 13) & 0x00002000)
+#define PHY_BB_TPC_20_ENABLE_PAL_MCS_14_MSB 14
+#define PHY_BB_TPC_20_ENABLE_PAL_MCS_14_LSB 14
+#define PHY_BB_TPC_20_ENABLE_PAL_MCS_14_MASK 0x00004000
+#define PHY_BB_TPC_20_ENABLE_PAL_MCS_14_GET(x) (((x) & 0x00004000) >> 14)
+#define PHY_BB_TPC_20_ENABLE_PAL_MCS_14_SET(x) (((x) << 14) & 0x00004000)
+#define PHY_BB_TPC_20_ENABLE_PAL_MCS_15_MSB 15
+#define PHY_BB_TPC_20_ENABLE_PAL_MCS_15_LSB 15
+#define PHY_BB_TPC_20_ENABLE_PAL_MCS_15_MASK 0x00008000
+#define PHY_BB_TPC_20_ENABLE_PAL_MCS_15_GET(x) (((x) & 0x00008000) >> 15)
+#define PHY_BB_TPC_20_ENABLE_PAL_MCS_15_SET(x) (((x) << 15) & 0x00008000)
+#define PHY_BB_TPC_20_ENABLE_PAL_MCS_16_MSB 16
+#define PHY_BB_TPC_20_ENABLE_PAL_MCS_16_LSB 16
+#define PHY_BB_TPC_20_ENABLE_PAL_MCS_16_MASK 0x00010000
+#define PHY_BB_TPC_20_ENABLE_PAL_MCS_16_GET(x) (((x) & 0x00010000) >> 16)
+#define PHY_BB_TPC_20_ENABLE_PAL_MCS_16_SET(x) (((x) << 16) & 0x00010000)
+#define PHY_BB_TPC_20_ENABLE_PAL_MCS_17_MSB 17
+#define PHY_BB_TPC_20_ENABLE_PAL_MCS_17_LSB 17
+#define PHY_BB_TPC_20_ENABLE_PAL_MCS_17_MASK 0x00020000
+#define PHY_BB_TPC_20_ENABLE_PAL_MCS_17_GET(x) (((x) & 0x00020000) >> 17)
+#define PHY_BB_TPC_20_ENABLE_PAL_MCS_17_SET(x) (((x) << 17) & 0x00020000)
+#define PHY_BB_TPC_20_ENABLE_PAL_MCS_18_MSB 18
+#define PHY_BB_TPC_20_ENABLE_PAL_MCS_18_LSB 18
+#define PHY_BB_TPC_20_ENABLE_PAL_MCS_18_MASK 0x00040000
+#define PHY_BB_TPC_20_ENABLE_PAL_MCS_18_GET(x) (((x) & 0x00040000) >> 18)
+#define PHY_BB_TPC_20_ENABLE_PAL_MCS_18_SET(x) (((x) << 18) & 0x00040000)
+#define PHY_BB_TPC_20_ENABLE_PAL_MCS_19_MSB 19
+#define PHY_BB_TPC_20_ENABLE_PAL_MCS_19_LSB 19
+#define PHY_BB_TPC_20_ENABLE_PAL_MCS_19_MASK 0x00080000
+#define PHY_BB_TPC_20_ENABLE_PAL_MCS_19_GET(x) (((x) & 0x00080000) >> 19)
+#define PHY_BB_TPC_20_ENABLE_PAL_MCS_19_SET(x) (((x) << 19) & 0x00080000)
+#define PHY_BB_TPC_20_ENABLE_PAL_MCS_20_MSB 20
+#define PHY_BB_TPC_20_ENABLE_PAL_MCS_20_LSB 20
+#define PHY_BB_TPC_20_ENABLE_PAL_MCS_20_MASK 0x00100000
+#define PHY_BB_TPC_20_ENABLE_PAL_MCS_20_GET(x) (((x) & 0x00100000) >> 20)
+#define PHY_BB_TPC_20_ENABLE_PAL_MCS_20_SET(x) (((x) << 20) & 0x00100000)
+#define PHY_BB_TPC_20_ENABLE_PAL_MCS_21_MSB 21
+#define PHY_BB_TPC_20_ENABLE_PAL_MCS_21_LSB 21
+#define PHY_BB_TPC_20_ENABLE_PAL_MCS_21_MASK 0x00200000
+#define PHY_BB_TPC_20_ENABLE_PAL_MCS_21_GET(x) (((x) & 0x00200000) >> 21)
+#define PHY_BB_TPC_20_ENABLE_PAL_MCS_21_SET(x) (((x) << 21) & 0x00200000)
+#define PHY_BB_TPC_20_ENABLE_PAL_MCS_22_MSB 22
+#define PHY_BB_TPC_20_ENABLE_PAL_MCS_22_LSB 22
+#define PHY_BB_TPC_20_ENABLE_PAL_MCS_22_MASK 0x00400000
+#define PHY_BB_TPC_20_ENABLE_PAL_MCS_22_GET(x) (((x) & 0x00400000) >> 22)
+#define PHY_BB_TPC_20_ENABLE_PAL_MCS_22_SET(x) (((x) << 22) & 0x00400000)
+#define PHY_BB_TPC_20_ENABLE_PAL_MCS_23_MSB 23
+#define PHY_BB_TPC_20_ENABLE_PAL_MCS_23_LSB 23
+#define PHY_BB_TPC_20_ENABLE_PAL_MCS_23_MASK 0x00800000
+#define PHY_BB_TPC_20_ENABLE_PAL_MCS_23_GET(x) (((x) & 0x00800000) >> 23)
+#define PHY_BB_TPC_20_ENABLE_PAL_MCS_23_SET(x) (((x) << 23) & 0x00800000)
+
+/* macros for BB_tx_gain_tab_1 */
+#define PHY_BB_TX_GAIN_TAB_1_ADDRESS 0x0000a400
+#define PHY_BB_TX_GAIN_TAB_1_OFFSET 0x0000a400
+#define PHY_BB_TX_GAIN_TAB_1_TG_TABLE1_MSB 31
+#define PHY_BB_TX_GAIN_TAB_1_TG_TABLE1_LSB 0
+#define PHY_BB_TX_GAIN_TAB_1_TG_TABLE1_MASK 0xffffffff
+#define PHY_BB_TX_GAIN_TAB_1_TG_TABLE1_GET(x) (((x) & 0xffffffff) >> 0)
+#define PHY_BB_TX_GAIN_TAB_1_TG_TABLE1_SET(x) (((x) << 0) & 0xffffffff)
+
+/* macros for BB_tx_gain_tab_2 */
+#define PHY_BB_TX_GAIN_TAB_2_ADDRESS 0x0000a404
+#define PHY_BB_TX_GAIN_TAB_2_OFFSET 0x0000a404
+#define PHY_BB_TX_GAIN_TAB_2_TG_TABLE2_MSB 31
+#define PHY_BB_TX_GAIN_TAB_2_TG_TABLE2_LSB 0
+#define PHY_BB_TX_GAIN_TAB_2_TG_TABLE2_MASK 0xffffffff
+#define PHY_BB_TX_GAIN_TAB_2_TG_TABLE2_GET(x) (((x) & 0xffffffff) >> 0)
+#define PHY_BB_TX_GAIN_TAB_2_TG_TABLE2_SET(x) (((x) << 0) & 0xffffffff)
+
+/* macros for BB_tx_gain_tab_3 */
+#define PHY_BB_TX_GAIN_TAB_3_ADDRESS 0x0000a408
+#define PHY_BB_TX_GAIN_TAB_3_OFFSET 0x0000a408
+#define PHY_BB_TX_GAIN_TAB_3_TG_TABLE3_MSB 31
+#define PHY_BB_TX_GAIN_TAB_3_TG_TABLE3_LSB 0
+#define PHY_BB_TX_GAIN_TAB_3_TG_TABLE3_MASK 0xffffffff
+#define PHY_BB_TX_GAIN_TAB_3_TG_TABLE3_GET(x) (((x) & 0xffffffff) >> 0)
+#define PHY_BB_TX_GAIN_TAB_3_TG_TABLE3_SET(x) (((x) << 0) & 0xffffffff)
+
+/* macros for BB_tx_gain_tab_4 */
+#define PHY_BB_TX_GAIN_TAB_4_ADDRESS 0x0000a40c
+#define PHY_BB_TX_GAIN_TAB_4_OFFSET 0x0000a40c
+#define PHY_BB_TX_GAIN_TAB_4_TG_TABLE4_MSB 31
+#define PHY_BB_TX_GAIN_TAB_4_TG_TABLE4_LSB 0
+#define PHY_BB_TX_GAIN_TAB_4_TG_TABLE4_MASK 0xffffffff
+#define PHY_BB_TX_GAIN_TAB_4_TG_TABLE4_GET(x) (((x) & 0xffffffff) >> 0)
+#define PHY_BB_TX_GAIN_TAB_4_TG_TABLE4_SET(x) (((x) << 0) & 0xffffffff)
+
+/* macros for BB_tx_gain_tab_5 */
+#define PHY_BB_TX_GAIN_TAB_5_ADDRESS 0x0000a410
+#define PHY_BB_TX_GAIN_TAB_5_OFFSET 0x0000a410
+#define PHY_BB_TX_GAIN_TAB_5_TG_TABLE5_MSB 31
+#define PHY_BB_TX_GAIN_TAB_5_TG_TABLE5_LSB 0
+#define PHY_BB_TX_GAIN_TAB_5_TG_TABLE5_MASK 0xffffffff
+#define PHY_BB_TX_GAIN_TAB_5_TG_TABLE5_GET(x) (((x) & 0xffffffff) >> 0)
+#define PHY_BB_TX_GAIN_TAB_5_TG_TABLE5_SET(x) (((x) << 0) & 0xffffffff)
+
+/* macros for BB_tx_gain_tab_6 */
+#define PHY_BB_TX_GAIN_TAB_6_ADDRESS 0x0000a414
+#define PHY_BB_TX_GAIN_TAB_6_OFFSET 0x0000a414
+#define PHY_BB_TX_GAIN_TAB_6_TG_TABLE6_MSB 31
+#define PHY_BB_TX_GAIN_TAB_6_TG_TABLE6_LSB 0
+#define PHY_BB_TX_GAIN_TAB_6_TG_TABLE6_MASK 0xffffffff
+#define PHY_BB_TX_GAIN_TAB_6_TG_TABLE6_GET(x) (((x) & 0xffffffff) >> 0)
+#define PHY_BB_TX_GAIN_TAB_6_TG_TABLE6_SET(x) (((x) << 0) & 0xffffffff)
+
+/* macros for BB_tx_gain_tab_7 */
+#define PHY_BB_TX_GAIN_TAB_7_ADDRESS 0x0000a418
+#define PHY_BB_TX_GAIN_TAB_7_OFFSET 0x0000a418
+#define PHY_BB_TX_GAIN_TAB_7_TG_TABLE7_MSB 31
+#define PHY_BB_TX_GAIN_TAB_7_TG_TABLE7_LSB 0
+#define PHY_BB_TX_GAIN_TAB_7_TG_TABLE7_MASK 0xffffffff
+#define PHY_BB_TX_GAIN_TAB_7_TG_TABLE7_GET(x) (((x) & 0xffffffff) >> 0)
+#define PHY_BB_TX_GAIN_TAB_7_TG_TABLE7_SET(x) (((x) << 0) & 0xffffffff)
+
+/* macros for BB_tx_gain_tab_8 */
+#define PHY_BB_TX_GAIN_TAB_8_ADDRESS 0x0000a41c
+#define PHY_BB_TX_GAIN_TAB_8_OFFSET 0x0000a41c
+#define PHY_BB_TX_GAIN_TAB_8_TG_TABLE8_MSB 31
+#define PHY_BB_TX_GAIN_TAB_8_TG_TABLE8_LSB 0
+#define PHY_BB_TX_GAIN_TAB_8_TG_TABLE8_MASK 0xffffffff
+#define PHY_BB_TX_GAIN_TAB_8_TG_TABLE8_GET(x) (((x) & 0xffffffff) >> 0)
+#define PHY_BB_TX_GAIN_TAB_8_TG_TABLE8_SET(x) (((x) << 0) & 0xffffffff)
+
+/* macros for BB_tx_gain_tab_9 */
+#define PHY_BB_TX_GAIN_TAB_9_ADDRESS 0x0000a420
+#define PHY_BB_TX_GAIN_TAB_9_OFFSET 0x0000a420
+#define PHY_BB_TX_GAIN_TAB_9_TG_TABLE9_MSB 31
+#define PHY_BB_TX_GAIN_TAB_9_TG_TABLE9_LSB 0
+#define PHY_BB_TX_GAIN_TAB_9_TG_TABLE9_MASK 0xffffffff
+#define PHY_BB_TX_GAIN_TAB_9_TG_TABLE9_GET(x) (((x) & 0xffffffff) >> 0)
+#define PHY_BB_TX_GAIN_TAB_9_TG_TABLE9_SET(x) (((x) << 0) & 0xffffffff)
+
+/* macros for BB_tx_gain_tab_10 */
+#define PHY_BB_TX_GAIN_TAB_10_ADDRESS 0x0000a424
+#define PHY_BB_TX_GAIN_TAB_10_OFFSET 0x0000a424
+#define PHY_BB_TX_GAIN_TAB_10_TG_TABLE10_MSB 31
+#define PHY_BB_TX_GAIN_TAB_10_TG_TABLE10_LSB 0
+#define PHY_BB_TX_GAIN_TAB_10_TG_TABLE10_MASK 0xffffffff
+#define PHY_BB_TX_GAIN_TAB_10_TG_TABLE10_GET(x) (((x) & 0xffffffff) >> 0)
+#define PHY_BB_TX_GAIN_TAB_10_TG_TABLE10_SET(x) (((x) << 0) & 0xffffffff)
+
+/* macros for BB_tx_gain_tab_11 */
+#define PHY_BB_TX_GAIN_TAB_11_ADDRESS 0x0000a428
+#define PHY_BB_TX_GAIN_TAB_11_OFFSET 0x0000a428
+#define PHY_BB_TX_GAIN_TAB_11_TG_TABLE11_MSB 31
+#define PHY_BB_TX_GAIN_TAB_11_TG_TABLE11_LSB 0
+#define PHY_BB_TX_GAIN_TAB_11_TG_TABLE11_MASK 0xffffffff
+#define PHY_BB_TX_GAIN_TAB_11_TG_TABLE11_GET(x) (((x) & 0xffffffff) >> 0)
+#define PHY_BB_TX_GAIN_TAB_11_TG_TABLE11_SET(x) (((x) << 0) & 0xffffffff)
+
+/* macros for BB_tx_gain_tab_12 */
+#define PHY_BB_TX_GAIN_TAB_12_ADDRESS 0x0000a42c
+#define PHY_BB_TX_GAIN_TAB_12_OFFSET 0x0000a42c
+#define PHY_BB_TX_GAIN_TAB_12_TG_TABLE12_MSB 31
+#define PHY_BB_TX_GAIN_TAB_12_TG_TABLE12_LSB 0
+#define PHY_BB_TX_GAIN_TAB_12_TG_TABLE12_MASK 0xffffffff
+#define PHY_BB_TX_GAIN_TAB_12_TG_TABLE12_GET(x) (((x) & 0xffffffff) >> 0)
+#define PHY_BB_TX_GAIN_TAB_12_TG_TABLE12_SET(x) (((x) << 0) & 0xffffffff)
+
+/* macros for BB_tx_gain_tab_13 */
+#define PHY_BB_TX_GAIN_TAB_13_ADDRESS 0x0000a430
+#define PHY_BB_TX_GAIN_TAB_13_OFFSET 0x0000a430
+#define PHY_BB_TX_GAIN_TAB_13_TG_TABLE13_MSB 31
+#define PHY_BB_TX_GAIN_TAB_13_TG_TABLE13_LSB 0
+#define PHY_BB_TX_GAIN_TAB_13_TG_TABLE13_MASK 0xffffffff
+#define PHY_BB_TX_GAIN_TAB_13_TG_TABLE13_GET(x) (((x) & 0xffffffff) >> 0)
+#define PHY_BB_TX_GAIN_TAB_13_TG_TABLE13_SET(x) (((x) << 0) & 0xffffffff)
+
+/* macros for BB_tx_gain_tab_14 */
+#define PHY_BB_TX_GAIN_TAB_14_ADDRESS 0x0000a434
+#define PHY_BB_TX_GAIN_TAB_14_OFFSET 0x0000a434
+#define PHY_BB_TX_GAIN_TAB_14_TG_TABLE14_MSB 31
+#define PHY_BB_TX_GAIN_TAB_14_TG_TABLE14_LSB 0
+#define PHY_BB_TX_GAIN_TAB_14_TG_TABLE14_MASK 0xffffffff
+#define PHY_BB_TX_GAIN_TAB_14_TG_TABLE14_GET(x) (((x) & 0xffffffff) >> 0)
+#define PHY_BB_TX_GAIN_TAB_14_TG_TABLE14_SET(x) (((x) << 0) & 0xffffffff)
+
+/* macros for BB_tx_gain_tab_15 */
+#define PHY_BB_TX_GAIN_TAB_15_ADDRESS 0x0000a438
+#define PHY_BB_TX_GAIN_TAB_15_OFFSET 0x0000a438
+#define PHY_BB_TX_GAIN_TAB_15_TG_TABLE15_MSB 31
+#define PHY_BB_TX_GAIN_TAB_15_TG_TABLE15_LSB 0
+#define PHY_BB_TX_GAIN_TAB_15_TG_TABLE15_MASK 0xffffffff
+#define PHY_BB_TX_GAIN_TAB_15_TG_TABLE15_GET(x) (((x) & 0xffffffff) >> 0)
+#define PHY_BB_TX_GAIN_TAB_15_TG_TABLE15_SET(x) (((x) << 0) & 0xffffffff)
+
+/* macros for BB_tx_gain_tab_16 */
+#define PHY_BB_TX_GAIN_TAB_16_ADDRESS 0x0000a43c
+#define PHY_BB_TX_GAIN_TAB_16_OFFSET 0x0000a43c
+#define PHY_BB_TX_GAIN_TAB_16_TG_TABLE16_MSB 31
+#define PHY_BB_TX_GAIN_TAB_16_TG_TABLE16_LSB 0
+#define PHY_BB_TX_GAIN_TAB_16_TG_TABLE16_MASK 0xffffffff
+#define PHY_BB_TX_GAIN_TAB_16_TG_TABLE16_GET(x) (((x) & 0xffffffff) >> 0)
+#define PHY_BB_TX_GAIN_TAB_16_TG_TABLE16_SET(x) (((x) << 0) & 0xffffffff)
+
+/* macros for BB_tx_gain_tab_17 */
+#define PHY_BB_TX_GAIN_TAB_17_ADDRESS 0x0000a440
+#define PHY_BB_TX_GAIN_TAB_17_OFFSET 0x0000a440
+#define PHY_BB_TX_GAIN_TAB_17_TG_TABLE17_MSB 31
+#define PHY_BB_TX_GAIN_TAB_17_TG_TABLE17_LSB 0
+#define PHY_BB_TX_GAIN_TAB_17_TG_TABLE17_MASK 0xffffffff
+#define PHY_BB_TX_GAIN_TAB_17_TG_TABLE17_GET(x) (((x) & 0xffffffff) >> 0)
+#define PHY_BB_TX_GAIN_TAB_17_TG_TABLE17_SET(x) (((x) << 0) & 0xffffffff)
+
+/* macros for BB_tx_gain_tab_18 */
+#define PHY_BB_TX_GAIN_TAB_18_ADDRESS 0x0000a444
+#define PHY_BB_TX_GAIN_TAB_18_OFFSET 0x0000a444
+#define PHY_BB_TX_GAIN_TAB_18_TG_TABLE18_MSB 31
+#define PHY_BB_TX_GAIN_TAB_18_TG_TABLE18_LSB 0
+#define PHY_BB_TX_GAIN_TAB_18_TG_TABLE18_MASK 0xffffffff
+#define PHY_BB_TX_GAIN_TAB_18_TG_TABLE18_GET(x) (((x) & 0xffffffff) >> 0)
+#define PHY_BB_TX_GAIN_TAB_18_TG_TABLE18_SET(x) (((x) << 0) & 0xffffffff)
+
+/* macros for BB_tx_gain_tab_19 */
+#define PHY_BB_TX_GAIN_TAB_19_ADDRESS 0x0000a448
+#define PHY_BB_TX_GAIN_TAB_19_OFFSET 0x0000a448
+#define PHY_BB_TX_GAIN_TAB_19_TG_TABLE19_MSB 31
+#define PHY_BB_TX_GAIN_TAB_19_TG_TABLE19_LSB 0
+#define PHY_BB_TX_GAIN_TAB_19_TG_TABLE19_MASK 0xffffffff
+#define PHY_BB_TX_GAIN_TAB_19_TG_TABLE19_GET(x) (((x) & 0xffffffff) >> 0)
+#define PHY_BB_TX_GAIN_TAB_19_TG_TABLE19_SET(x) (((x) << 0) & 0xffffffff)
+
+/* macros for BB_tx_gain_tab_20 */
+#define PHY_BB_TX_GAIN_TAB_20_ADDRESS 0x0000a44c
+#define PHY_BB_TX_GAIN_TAB_20_OFFSET 0x0000a44c
+#define PHY_BB_TX_GAIN_TAB_20_TG_TABLE20_MSB 31
+#define PHY_BB_TX_GAIN_TAB_20_TG_TABLE20_LSB 0
+#define PHY_BB_TX_GAIN_TAB_20_TG_TABLE20_MASK 0xffffffff
+#define PHY_BB_TX_GAIN_TAB_20_TG_TABLE20_GET(x) (((x) & 0xffffffff) >> 0)
+#define PHY_BB_TX_GAIN_TAB_20_TG_TABLE20_SET(x) (((x) << 0) & 0xffffffff)
+
+/* macros for BB_tx_gain_tab_21 */
+#define PHY_BB_TX_GAIN_TAB_21_ADDRESS 0x0000a450
+#define PHY_BB_TX_GAIN_TAB_21_OFFSET 0x0000a450
+#define PHY_BB_TX_GAIN_TAB_21_TG_TABLE21_MSB 31
+#define PHY_BB_TX_GAIN_TAB_21_TG_TABLE21_LSB 0
+#define PHY_BB_TX_GAIN_TAB_21_TG_TABLE21_MASK 0xffffffff
+#define PHY_BB_TX_GAIN_TAB_21_TG_TABLE21_GET(x) (((x) & 0xffffffff) >> 0)
+#define PHY_BB_TX_GAIN_TAB_21_TG_TABLE21_SET(x) (((x) << 0) & 0xffffffff)
+
+/* macros for BB_tx_gain_tab_22 */
+#define PHY_BB_TX_GAIN_TAB_22_ADDRESS 0x0000a454
+#define PHY_BB_TX_GAIN_TAB_22_OFFSET 0x0000a454
+#define PHY_BB_TX_GAIN_TAB_22_TG_TABLE22_MSB 31
+#define PHY_BB_TX_GAIN_TAB_22_TG_TABLE22_LSB 0
+#define PHY_BB_TX_GAIN_TAB_22_TG_TABLE22_MASK 0xffffffff
+#define PHY_BB_TX_GAIN_TAB_22_TG_TABLE22_GET(x) (((x) & 0xffffffff) >> 0)
+#define PHY_BB_TX_GAIN_TAB_22_TG_TABLE22_SET(x) (((x) << 0) & 0xffffffff)
+
+/* macros for BB_tx_gain_tab_23 */
+#define PHY_BB_TX_GAIN_TAB_23_ADDRESS 0x0000a458
+#define PHY_BB_TX_GAIN_TAB_23_OFFSET 0x0000a458
+#define PHY_BB_TX_GAIN_TAB_23_TG_TABLE23_MSB 31
+#define PHY_BB_TX_GAIN_TAB_23_TG_TABLE23_LSB 0
+#define PHY_BB_TX_GAIN_TAB_23_TG_TABLE23_MASK 0xffffffff
+#define PHY_BB_TX_GAIN_TAB_23_TG_TABLE23_GET(x) (((x) & 0xffffffff) >> 0)
+#define PHY_BB_TX_GAIN_TAB_23_TG_TABLE23_SET(x) (((x) << 0) & 0xffffffff)
+
+/* macros for BB_tx_gain_tab_24 */
+#define PHY_BB_TX_GAIN_TAB_24_ADDRESS 0x0000a45c
+#define PHY_BB_TX_GAIN_TAB_24_OFFSET 0x0000a45c
+#define PHY_BB_TX_GAIN_TAB_24_TG_TABLE24_MSB 31
+#define PHY_BB_TX_GAIN_TAB_24_TG_TABLE24_LSB 0
+#define PHY_BB_TX_GAIN_TAB_24_TG_TABLE24_MASK 0xffffffff
+#define PHY_BB_TX_GAIN_TAB_24_TG_TABLE24_GET(x) (((x) & 0xffffffff) >> 0)
+#define PHY_BB_TX_GAIN_TAB_24_TG_TABLE24_SET(x) (((x) << 0) & 0xffffffff)
+
+/* macros for BB_tx_gain_tab_25 */
+#define PHY_BB_TX_GAIN_TAB_25_ADDRESS 0x0000a460
+#define PHY_BB_TX_GAIN_TAB_25_OFFSET 0x0000a460
+#define PHY_BB_TX_GAIN_TAB_25_TG_TABLE25_MSB 31
+#define PHY_BB_TX_GAIN_TAB_25_TG_TABLE25_LSB 0
+#define PHY_BB_TX_GAIN_TAB_25_TG_TABLE25_MASK 0xffffffff
+#define PHY_BB_TX_GAIN_TAB_25_TG_TABLE25_GET(x) (((x) & 0xffffffff) >> 0)
+#define PHY_BB_TX_GAIN_TAB_25_TG_TABLE25_SET(x) (((x) << 0) & 0xffffffff)
+
+/* macros for BB_tx_gain_tab_26 */
+#define PHY_BB_TX_GAIN_TAB_26_ADDRESS 0x0000a464
+#define PHY_BB_TX_GAIN_TAB_26_OFFSET 0x0000a464
+#define PHY_BB_TX_GAIN_TAB_26_TG_TABLE26_MSB 31
+#define PHY_BB_TX_GAIN_TAB_26_TG_TABLE26_LSB 0
+#define PHY_BB_TX_GAIN_TAB_26_TG_TABLE26_MASK 0xffffffff
+#define PHY_BB_TX_GAIN_TAB_26_TG_TABLE26_GET(x) (((x) & 0xffffffff) >> 0)
+#define PHY_BB_TX_GAIN_TAB_26_TG_TABLE26_SET(x) (((x) << 0) & 0xffffffff)
+
+/* macros for BB_tx_gain_tab_27 */
+#define PHY_BB_TX_GAIN_TAB_27_ADDRESS 0x0000a468
+#define PHY_BB_TX_GAIN_TAB_27_OFFSET 0x0000a468
+#define PHY_BB_TX_GAIN_TAB_27_TG_TABLE27_MSB 31
+#define PHY_BB_TX_GAIN_TAB_27_TG_TABLE27_LSB 0
+#define PHY_BB_TX_GAIN_TAB_27_TG_TABLE27_MASK 0xffffffff
+#define PHY_BB_TX_GAIN_TAB_27_TG_TABLE27_GET(x) (((x) & 0xffffffff) >> 0)
+#define PHY_BB_TX_GAIN_TAB_27_TG_TABLE27_SET(x) (((x) << 0) & 0xffffffff)
+
+/* macros for BB_tx_gain_tab_28 */
+#define PHY_BB_TX_GAIN_TAB_28_ADDRESS 0x0000a46c
+#define PHY_BB_TX_GAIN_TAB_28_OFFSET 0x0000a46c
+#define PHY_BB_TX_GAIN_TAB_28_TG_TABLE28_MSB 31
+#define PHY_BB_TX_GAIN_TAB_28_TG_TABLE28_LSB 0
+#define PHY_BB_TX_GAIN_TAB_28_TG_TABLE28_MASK 0xffffffff
+#define PHY_BB_TX_GAIN_TAB_28_TG_TABLE28_GET(x) (((x) & 0xffffffff) >> 0)
+#define PHY_BB_TX_GAIN_TAB_28_TG_TABLE28_SET(x) (((x) << 0) & 0xffffffff)
+
+/* macros for BB_tx_gain_tab_29 */
+#define PHY_BB_TX_GAIN_TAB_29_ADDRESS 0x0000a470
+#define PHY_BB_TX_GAIN_TAB_29_OFFSET 0x0000a470
+#define PHY_BB_TX_GAIN_TAB_29_TG_TABLE29_MSB 31
+#define PHY_BB_TX_GAIN_TAB_29_TG_TABLE29_LSB 0
+#define PHY_BB_TX_GAIN_TAB_29_TG_TABLE29_MASK 0xffffffff
+#define PHY_BB_TX_GAIN_TAB_29_TG_TABLE29_GET(x) (((x) & 0xffffffff) >> 0)
+#define PHY_BB_TX_GAIN_TAB_29_TG_TABLE29_SET(x) (((x) << 0) & 0xffffffff)
+
+/* macros for BB_tx_gain_tab_30 */
+#define PHY_BB_TX_GAIN_TAB_30_ADDRESS 0x0000a474
+#define PHY_BB_TX_GAIN_TAB_30_OFFSET 0x0000a474
+#define PHY_BB_TX_GAIN_TAB_30_TG_TABLE30_MSB 31
+#define PHY_BB_TX_GAIN_TAB_30_TG_TABLE30_LSB 0
+#define PHY_BB_TX_GAIN_TAB_30_TG_TABLE30_MASK 0xffffffff
+#define PHY_BB_TX_GAIN_TAB_30_TG_TABLE30_GET(x) (((x) & 0xffffffff) >> 0)
+#define PHY_BB_TX_GAIN_TAB_30_TG_TABLE30_SET(x) (((x) << 0) & 0xffffffff)
+
+/* macros for BB_tx_gain_tab_31 */
+#define PHY_BB_TX_GAIN_TAB_31_ADDRESS 0x0000a478
+#define PHY_BB_TX_GAIN_TAB_31_OFFSET 0x0000a478
+#define PHY_BB_TX_GAIN_TAB_31_TG_TABLE31_MSB 31
+#define PHY_BB_TX_GAIN_TAB_31_TG_TABLE31_LSB 0
+#define PHY_BB_TX_GAIN_TAB_31_TG_TABLE31_MASK 0xffffffff
+#define PHY_BB_TX_GAIN_TAB_31_TG_TABLE31_GET(x) (((x) & 0xffffffff) >> 0)
+#define PHY_BB_TX_GAIN_TAB_31_TG_TABLE31_SET(x) (((x) << 0) & 0xffffffff)
+
+/* macros for BB_tx_gain_tab_32 */
+#define PHY_BB_TX_GAIN_TAB_32_ADDRESS 0x0000a47c
+#define PHY_BB_TX_GAIN_TAB_32_OFFSET 0x0000a47c
+#define PHY_BB_TX_GAIN_TAB_32_TG_TABLE32_MSB 31
+#define PHY_BB_TX_GAIN_TAB_32_TG_TABLE32_LSB 0
+#define PHY_BB_TX_GAIN_TAB_32_TG_TABLE32_MASK 0xffffffff
+#define PHY_BB_TX_GAIN_TAB_32_TG_TABLE32_GET(x) (((x) & 0xffffffff) >> 0)
+#define PHY_BB_TX_GAIN_TAB_32_TG_TABLE32_SET(x) (((x) << 0) & 0xffffffff)
+
+/* macros for BB_tx_gain_tab_pal_1 */
+#define PHY_BB_TX_GAIN_TAB_PAL_1_ADDRESS 0x0000a480
+#define PHY_BB_TX_GAIN_TAB_PAL_1_OFFSET 0x0000a480
+#define PHY_BB_TX_GAIN_TAB_PAL_1_TG_TABLE1_PAL_ON_MSB 31
+#define PHY_BB_TX_GAIN_TAB_PAL_1_TG_TABLE1_PAL_ON_LSB 0
+#define PHY_BB_TX_GAIN_TAB_PAL_1_TG_TABLE1_PAL_ON_MASK 0xffffffff
+#define PHY_BB_TX_GAIN_TAB_PAL_1_TG_TABLE1_PAL_ON_GET(x) (((x) & 0xffffffff) >> 0)
+#define PHY_BB_TX_GAIN_TAB_PAL_1_TG_TABLE1_PAL_ON_SET(x) (((x) << 0) & 0xffffffff)
+
+/* macros for BB_tx_gain_tab_pal_2 */
+#define PHY_BB_TX_GAIN_TAB_PAL_2_ADDRESS 0x0000a484
+#define PHY_BB_TX_GAIN_TAB_PAL_2_OFFSET 0x0000a484
+#define PHY_BB_TX_GAIN_TAB_PAL_2_TG_TABLE2_PAL_ON_MSB 31
+#define PHY_BB_TX_GAIN_TAB_PAL_2_TG_TABLE2_PAL_ON_LSB 0
+#define PHY_BB_TX_GAIN_TAB_PAL_2_TG_TABLE2_PAL_ON_MASK 0xffffffff
+#define PHY_BB_TX_GAIN_TAB_PAL_2_TG_TABLE2_PAL_ON_GET(x) (((x) & 0xffffffff) >> 0)
+#define PHY_BB_TX_GAIN_TAB_PAL_2_TG_TABLE2_PAL_ON_SET(x) (((x) << 0) & 0xffffffff)
+
+/* macros for BB_tx_gain_tab_pal_3 */
+#define PHY_BB_TX_GAIN_TAB_PAL_3_ADDRESS 0x0000a488
+#define PHY_BB_TX_GAIN_TAB_PAL_3_OFFSET 0x0000a488
+#define PHY_BB_TX_GAIN_TAB_PAL_3_TG_TABLE3_PAL_ON_MSB 31
+#define PHY_BB_TX_GAIN_TAB_PAL_3_TG_TABLE3_PAL_ON_LSB 0
+#define PHY_BB_TX_GAIN_TAB_PAL_3_TG_TABLE3_PAL_ON_MASK 0xffffffff
+#define PHY_BB_TX_GAIN_TAB_PAL_3_TG_TABLE3_PAL_ON_GET(x) (((x) & 0xffffffff) >> 0)
+#define PHY_BB_TX_GAIN_TAB_PAL_3_TG_TABLE3_PAL_ON_SET(x) (((x) << 0) & 0xffffffff)
+
+/* macros for BB_tx_gain_tab_pal_4 */
+#define PHY_BB_TX_GAIN_TAB_PAL_4_ADDRESS 0x0000a48c
+#define PHY_BB_TX_GAIN_TAB_PAL_4_OFFSET 0x0000a48c
+#define PHY_BB_TX_GAIN_TAB_PAL_4_TG_TABLE4_PAL_ON_MSB 31
+#define PHY_BB_TX_GAIN_TAB_PAL_4_TG_TABLE4_PAL_ON_LSB 0
+#define PHY_BB_TX_GAIN_TAB_PAL_4_TG_TABLE4_PAL_ON_MASK 0xffffffff
+#define PHY_BB_TX_GAIN_TAB_PAL_4_TG_TABLE4_PAL_ON_GET(x) (((x) & 0xffffffff) >> 0)
+#define PHY_BB_TX_GAIN_TAB_PAL_4_TG_TABLE4_PAL_ON_SET(x) (((x) << 0) & 0xffffffff)
+
+/* macros for BB_tx_gain_tab_pal_5 */
+#define PHY_BB_TX_GAIN_TAB_PAL_5_ADDRESS 0x0000a490
+#define PHY_BB_TX_GAIN_TAB_PAL_5_OFFSET 0x0000a490
+#define PHY_BB_TX_GAIN_TAB_PAL_5_TG_TABLE5_PAL_ON_MSB 31
+#define PHY_BB_TX_GAIN_TAB_PAL_5_TG_TABLE5_PAL_ON_LSB 0
+#define PHY_BB_TX_GAIN_TAB_PAL_5_TG_TABLE5_PAL_ON_MASK 0xffffffff
+#define PHY_BB_TX_GAIN_TAB_PAL_5_TG_TABLE5_PAL_ON_GET(x) (((x) & 0xffffffff) >> 0)
+#define PHY_BB_TX_GAIN_TAB_PAL_5_TG_TABLE5_PAL_ON_SET(x) (((x) << 0) & 0xffffffff)
+
+/* macros for BB_tx_gain_tab_pal_6 */
+#define PHY_BB_TX_GAIN_TAB_PAL_6_ADDRESS 0x0000a494
+#define PHY_BB_TX_GAIN_TAB_PAL_6_OFFSET 0x0000a494
+#define PHY_BB_TX_GAIN_TAB_PAL_6_TG_TABLE6_PAL_ON_MSB 31
+#define PHY_BB_TX_GAIN_TAB_PAL_6_TG_TABLE6_PAL_ON_LSB 0
+#define PHY_BB_TX_GAIN_TAB_PAL_6_TG_TABLE6_PAL_ON_MASK 0xffffffff
+#define PHY_BB_TX_GAIN_TAB_PAL_6_TG_TABLE6_PAL_ON_GET(x) (((x) & 0xffffffff) >> 0)
+#define PHY_BB_TX_GAIN_TAB_PAL_6_TG_TABLE6_PAL_ON_SET(x) (((x) << 0) & 0xffffffff)
+
+/* macros for BB_tx_gain_tab_pal_7 */
+#define PHY_BB_TX_GAIN_TAB_PAL_7_ADDRESS 0x0000a498
+#define PHY_BB_TX_GAIN_TAB_PAL_7_OFFSET 0x0000a498
+#define PHY_BB_TX_GAIN_TAB_PAL_7_TG_TABLE7_PAL_ON_MSB 31
+#define PHY_BB_TX_GAIN_TAB_PAL_7_TG_TABLE7_PAL_ON_LSB 0
+#define PHY_BB_TX_GAIN_TAB_PAL_7_TG_TABLE7_PAL_ON_MASK 0xffffffff
+#define PHY_BB_TX_GAIN_TAB_PAL_7_TG_TABLE7_PAL_ON_GET(x) (((x) & 0xffffffff) >> 0)
+#define PHY_BB_TX_GAIN_TAB_PAL_7_TG_TABLE7_PAL_ON_SET(x) (((x) << 0) & 0xffffffff)
+
+/* macros for BB_tx_gain_tab_pal_8 */
+#define PHY_BB_TX_GAIN_TAB_PAL_8_ADDRESS 0x0000a49c
+#define PHY_BB_TX_GAIN_TAB_PAL_8_OFFSET 0x0000a49c
+#define PHY_BB_TX_GAIN_TAB_PAL_8_TG_TABLE8_PAL_ON_MSB 31
+#define PHY_BB_TX_GAIN_TAB_PAL_8_TG_TABLE8_PAL_ON_LSB 0
+#define PHY_BB_TX_GAIN_TAB_PAL_8_TG_TABLE8_PAL_ON_MASK 0xffffffff
+#define PHY_BB_TX_GAIN_TAB_PAL_8_TG_TABLE8_PAL_ON_GET(x) (((x) & 0xffffffff) >> 0)
+#define PHY_BB_TX_GAIN_TAB_PAL_8_TG_TABLE8_PAL_ON_SET(x) (((x) << 0) & 0xffffffff)
+
+/* macros for BB_tx_gain_tab_pal_9 */
+#define PHY_BB_TX_GAIN_TAB_PAL_9_ADDRESS 0x0000a4a0
+#define PHY_BB_TX_GAIN_TAB_PAL_9_OFFSET 0x0000a4a0
+#define PHY_BB_TX_GAIN_TAB_PAL_9_TG_TABLE9_PAL_ON_MSB 31
+#define PHY_BB_TX_GAIN_TAB_PAL_9_TG_TABLE9_PAL_ON_LSB 0
+#define PHY_BB_TX_GAIN_TAB_PAL_9_TG_TABLE9_PAL_ON_MASK 0xffffffff
+#define PHY_BB_TX_GAIN_TAB_PAL_9_TG_TABLE9_PAL_ON_GET(x) (((x) & 0xffffffff) >> 0)
+#define PHY_BB_TX_GAIN_TAB_PAL_9_TG_TABLE9_PAL_ON_SET(x) (((x) << 0) & 0xffffffff)
+
+/* macros for BB_tx_gain_tab_pal_10 */
+#define PHY_BB_TX_GAIN_TAB_PAL_10_ADDRESS 0x0000a4a4
+#define PHY_BB_TX_GAIN_TAB_PAL_10_OFFSET 0x0000a4a4
+#define PHY_BB_TX_GAIN_TAB_PAL_10_TG_TABLE10_PAL_ON_MSB 31
+#define PHY_BB_TX_GAIN_TAB_PAL_10_TG_TABLE10_PAL_ON_LSB 0
+#define PHY_BB_TX_GAIN_TAB_PAL_10_TG_TABLE10_PAL_ON_MASK 0xffffffff
+#define PHY_BB_TX_GAIN_TAB_PAL_10_TG_TABLE10_PAL_ON_GET(x) (((x) & 0xffffffff) >> 0)
+#define PHY_BB_TX_GAIN_TAB_PAL_10_TG_TABLE10_PAL_ON_SET(x) (((x) << 0) & 0xffffffff)
+
+/* macros for BB_tx_gain_tab_pal_11 */
+#define PHY_BB_TX_GAIN_TAB_PAL_11_ADDRESS 0x0000a4a8
+#define PHY_BB_TX_GAIN_TAB_PAL_11_OFFSET 0x0000a4a8
+#define PHY_BB_TX_GAIN_TAB_PAL_11_TG_TABLE11_PAL_ON_MSB 31
+#define PHY_BB_TX_GAIN_TAB_PAL_11_TG_TABLE11_PAL_ON_LSB 0
+#define PHY_BB_TX_GAIN_TAB_PAL_11_TG_TABLE11_PAL_ON_MASK 0xffffffff
+#define PHY_BB_TX_GAIN_TAB_PAL_11_TG_TABLE11_PAL_ON_GET(x) (((x) & 0xffffffff) >> 0)
+#define PHY_BB_TX_GAIN_TAB_PAL_11_TG_TABLE11_PAL_ON_SET(x) (((x) << 0) & 0xffffffff)
+
+/* macros for BB_tx_gain_tab_pal_12 */
+#define PHY_BB_TX_GAIN_TAB_PAL_12_ADDRESS 0x0000a4ac
+#define PHY_BB_TX_GAIN_TAB_PAL_12_OFFSET 0x0000a4ac
+#define PHY_BB_TX_GAIN_TAB_PAL_12_TG_TABLE12_PAL_ON_MSB 31
+#define PHY_BB_TX_GAIN_TAB_PAL_12_TG_TABLE12_PAL_ON_LSB 0
+#define PHY_BB_TX_GAIN_TAB_PAL_12_TG_TABLE12_PAL_ON_MASK 0xffffffff
+#define PHY_BB_TX_GAIN_TAB_PAL_12_TG_TABLE12_PAL_ON_GET(x) (((x) & 0xffffffff) >> 0)
+#define PHY_BB_TX_GAIN_TAB_PAL_12_TG_TABLE12_PAL_ON_SET(x) (((x) << 0) & 0xffffffff)
+
+/* macros for BB_tx_gain_tab_pal_13 */
+#define PHY_BB_TX_GAIN_TAB_PAL_13_ADDRESS 0x0000a4b0
+#define PHY_BB_TX_GAIN_TAB_PAL_13_OFFSET 0x0000a4b0
+#define PHY_BB_TX_GAIN_TAB_PAL_13_TG_TABLE13_PAL_ON_MSB 31
+#define PHY_BB_TX_GAIN_TAB_PAL_13_TG_TABLE13_PAL_ON_LSB 0
+#define PHY_BB_TX_GAIN_TAB_PAL_13_TG_TABLE13_PAL_ON_MASK 0xffffffff
+#define PHY_BB_TX_GAIN_TAB_PAL_13_TG_TABLE13_PAL_ON_GET(x) (((x) & 0xffffffff) >> 0)
+#define PHY_BB_TX_GAIN_TAB_PAL_13_TG_TABLE13_PAL_ON_SET(x) (((x) << 0) & 0xffffffff)
+
+/* macros for BB_tx_gain_tab_pal_14 */
+#define PHY_BB_TX_GAIN_TAB_PAL_14_ADDRESS 0x0000a4b4
+#define PHY_BB_TX_GAIN_TAB_PAL_14_OFFSET 0x0000a4b4
+#define PHY_BB_TX_GAIN_TAB_PAL_14_TG_TABLE14_PAL_ON_MSB 31
+#define PHY_BB_TX_GAIN_TAB_PAL_14_TG_TABLE14_PAL_ON_LSB 0
+#define PHY_BB_TX_GAIN_TAB_PAL_14_TG_TABLE14_PAL_ON_MASK 0xffffffff
+#define PHY_BB_TX_GAIN_TAB_PAL_14_TG_TABLE14_PAL_ON_GET(x) (((x) & 0xffffffff) >> 0)
+#define PHY_BB_TX_GAIN_TAB_PAL_14_TG_TABLE14_PAL_ON_SET(x) (((x) << 0) & 0xffffffff)
+
+/* macros for BB_tx_gain_tab_pal_15 */
+#define PHY_BB_TX_GAIN_TAB_PAL_15_ADDRESS 0x0000a4b8
+#define PHY_BB_TX_GAIN_TAB_PAL_15_OFFSET 0x0000a4b8
+#define PHY_BB_TX_GAIN_TAB_PAL_15_TG_TABLE15_PAL_ON_MSB 31
+#define PHY_BB_TX_GAIN_TAB_PAL_15_TG_TABLE15_PAL_ON_LSB 0
+#define PHY_BB_TX_GAIN_TAB_PAL_15_TG_TABLE15_PAL_ON_MASK 0xffffffff
+#define PHY_BB_TX_GAIN_TAB_PAL_15_TG_TABLE15_PAL_ON_GET(x) (((x) & 0xffffffff) >> 0)
+#define PHY_BB_TX_GAIN_TAB_PAL_15_TG_TABLE15_PAL_ON_SET(x) (((x) << 0) & 0xffffffff)
+
+/* macros for BB_tx_gain_tab_pal_16 */
+#define PHY_BB_TX_GAIN_TAB_PAL_16_ADDRESS 0x0000a4bc
+#define PHY_BB_TX_GAIN_TAB_PAL_16_OFFSET 0x0000a4bc
+#define PHY_BB_TX_GAIN_TAB_PAL_16_TG_TABLE16_PAL_ON_MSB 31
+#define PHY_BB_TX_GAIN_TAB_PAL_16_TG_TABLE16_PAL_ON_LSB 0
+#define PHY_BB_TX_GAIN_TAB_PAL_16_TG_TABLE16_PAL_ON_MASK 0xffffffff
+#define PHY_BB_TX_GAIN_TAB_PAL_16_TG_TABLE16_PAL_ON_GET(x) (((x) & 0xffffffff) >> 0)
+#define PHY_BB_TX_GAIN_TAB_PAL_16_TG_TABLE16_PAL_ON_SET(x) (((x) << 0) & 0xffffffff)
+
+/* macros for BB_tx_gain_tab_pal_17 */
+#define PHY_BB_TX_GAIN_TAB_PAL_17_ADDRESS 0x0000a4c0
+#define PHY_BB_TX_GAIN_TAB_PAL_17_OFFSET 0x0000a4c0
+#define PHY_BB_TX_GAIN_TAB_PAL_17_TG_TABLE17_PAL_ON_MSB 31
+#define PHY_BB_TX_GAIN_TAB_PAL_17_TG_TABLE17_PAL_ON_LSB 0
+#define PHY_BB_TX_GAIN_TAB_PAL_17_TG_TABLE17_PAL_ON_MASK 0xffffffff
+#define PHY_BB_TX_GAIN_TAB_PAL_17_TG_TABLE17_PAL_ON_GET(x) (((x) & 0xffffffff) >> 0)
+#define PHY_BB_TX_GAIN_TAB_PAL_17_TG_TABLE17_PAL_ON_SET(x) (((x) << 0) & 0xffffffff)
+
+/* macros for BB_tx_gain_tab_pal_18 */
+#define PHY_BB_TX_GAIN_TAB_PAL_18_ADDRESS 0x0000a4c4
+#define PHY_BB_TX_GAIN_TAB_PAL_18_OFFSET 0x0000a4c4
+#define PHY_BB_TX_GAIN_TAB_PAL_18_TG_TABLE18_PAL_ON_MSB 31
+#define PHY_BB_TX_GAIN_TAB_PAL_18_TG_TABLE18_PAL_ON_LSB 0
+#define PHY_BB_TX_GAIN_TAB_PAL_18_TG_TABLE18_PAL_ON_MASK 0xffffffff
+#define PHY_BB_TX_GAIN_TAB_PAL_18_TG_TABLE18_PAL_ON_GET(x) (((x) & 0xffffffff) >> 0)
+#define PHY_BB_TX_GAIN_TAB_PAL_18_TG_TABLE18_PAL_ON_SET(x) (((x) << 0) & 0xffffffff)
+
+/* macros for BB_tx_gain_tab_pal_19 */
+#define PHY_BB_TX_GAIN_TAB_PAL_19_ADDRESS 0x0000a4c8
+#define PHY_BB_TX_GAIN_TAB_PAL_19_OFFSET 0x0000a4c8
+#define PHY_BB_TX_GAIN_TAB_PAL_19_TG_TABLE19_PAL_ON_MSB 31
+#define PHY_BB_TX_GAIN_TAB_PAL_19_TG_TABLE19_PAL_ON_LSB 0
+#define PHY_BB_TX_GAIN_TAB_PAL_19_TG_TABLE19_PAL_ON_MASK 0xffffffff
+#define PHY_BB_TX_GAIN_TAB_PAL_19_TG_TABLE19_PAL_ON_GET(x) (((x) & 0xffffffff) >> 0)
+#define PHY_BB_TX_GAIN_TAB_PAL_19_TG_TABLE19_PAL_ON_SET(x) (((x) << 0) & 0xffffffff)
+
+/* macros for BB_tx_gain_tab_pal_20 */
+#define PHY_BB_TX_GAIN_TAB_PAL_20_ADDRESS 0x0000a4cc
+#define PHY_BB_TX_GAIN_TAB_PAL_20_OFFSET 0x0000a4cc
+#define PHY_BB_TX_GAIN_TAB_PAL_20_TG_TABLE20_PAL_ON_MSB 31
+#define PHY_BB_TX_GAIN_TAB_PAL_20_TG_TABLE20_PAL_ON_LSB 0
+#define PHY_BB_TX_GAIN_TAB_PAL_20_TG_TABLE20_PAL_ON_MASK 0xffffffff
+#define PHY_BB_TX_GAIN_TAB_PAL_20_TG_TABLE20_PAL_ON_GET(x) (((x) & 0xffffffff) >> 0)
+#define PHY_BB_TX_GAIN_TAB_PAL_20_TG_TABLE20_PAL_ON_SET(x) (((x) << 0) & 0xffffffff)
+
+/* macros for BB_tx_gain_tab_pal_21 */
+#define PHY_BB_TX_GAIN_TAB_PAL_21_ADDRESS 0x0000a4d0
+#define PHY_BB_TX_GAIN_TAB_PAL_21_OFFSET 0x0000a4d0
+#define PHY_BB_TX_GAIN_TAB_PAL_21_TG_TABLE21_PAL_ON_MSB 31
+#define PHY_BB_TX_GAIN_TAB_PAL_21_TG_TABLE21_PAL_ON_LSB 0
+#define PHY_BB_TX_GAIN_TAB_PAL_21_TG_TABLE21_PAL_ON_MASK 0xffffffff
+#define PHY_BB_TX_GAIN_TAB_PAL_21_TG_TABLE21_PAL_ON_GET(x) (((x) & 0xffffffff) >> 0)
+#define PHY_BB_TX_GAIN_TAB_PAL_21_TG_TABLE21_PAL_ON_SET(x) (((x) << 0) & 0xffffffff)
+
+/* macros for BB_tx_gain_tab_pal_22 */
+#define PHY_BB_TX_GAIN_TAB_PAL_22_ADDRESS 0x0000a4d4
+#define PHY_BB_TX_GAIN_TAB_PAL_22_OFFSET 0x0000a4d4
+#define PHY_BB_TX_GAIN_TAB_PAL_22_TG_TABLE22_PAL_ON_MSB 31
+#define PHY_BB_TX_GAIN_TAB_PAL_22_TG_TABLE22_PAL_ON_LSB 0
+#define PHY_BB_TX_GAIN_TAB_PAL_22_TG_TABLE22_PAL_ON_MASK 0xffffffff
+#define PHY_BB_TX_GAIN_TAB_PAL_22_TG_TABLE22_PAL_ON_GET(x) (((x) & 0xffffffff) >> 0)
+#define PHY_BB_TX_GAIN_TAB_PAL_22_TG_TABLE22_PAL_ON_SET(x) (((x) << 0) & 0xffffffff)
+
+/* macros for BB_tx_gain_tab_pal_23 */
+#define PHY_BB_TX_GAIN_TAB_PAL_23_ADDRESS 0x0000a4d8
+#define PHY_BB_TX_GAIN_TAB_PAL_23_OFFSET 0x0000a4d8
+#define PHY_BB_TX_GAIN_TAB_PAL_23_TG_TABLE23_PAL_ON_MSB 31
+#define PHY_BB_TX_GAIN_TAB_PAL_23_TG_TABLE23_PAL_ON_LSB 0
+#define PHY_BB_TX_GAIN_TAB_PAL_23_TG_TABLE23_PAL_ON_MASK 0xffffffff
+#define PHY_BB_TX_GAIN_TAB_PAL_23_TG_TABLE23_PAL_ON_GET(x) (((x) & 0xffffffff) >> 0)
+#define PHY_BB_TX_GAIN_TAB_PAL_23_TG_TABLE23_PAL_ON_SET(x) (((x) << 0) & 0xffffffff)
+
+/* macros for BB_tx_gain_tab_pal_24 */
+#define PHY_BB_TX_GAIN_TAB_PAL_24_ADDRESS 0x0000a4dc
+#define PHY_BB_TX_GAIN_TAB_PAL_24_OFFSET 0x0000a4dc
+#define PHY_BB_TX_GAIN_TAB_PAL_24_TG_TABLE24_PAL_ON_MSB 31
+#define PHY_BB_TX_GAIN_TAB_PAL_24_TG_TABLE24_PAL_ON_LSB 0
+#define PHY_BB_TX_GAIN_TAB_PAL_24_TG_TABLE24_PAL_ON_MASK 0xffffffff
+#define PHY_BB_TX_GAIN_TAB_PAL_24_TG_TABLE24_PAL_ON_GET(x) (((x) & 0xffffffff) >> 0)
+#define PHY_BB_TX_GAIN_TAB_PAL_24_TG_TABLE24_PAL_ON_SET(x) (((x) << 0) & 0xffffffff)
+
+/* macros for BB_tx_gain_tab_pal_25 */
+#define PHY_BB_TX_GAIN_TAB_PAL_25_ADDRESS 0x0000a4e0
+#define PHY_BB_TX_GAIN_TAB_PAL_25_OFFSET 0x0000a4e0
+#define PHY_BB_TX_GAIN_TAB_PAL_25_TG_TABLE25_PAL_ON_MSB 31
+#define PHY_BB_TX_GAIN_TAB_PAL_25_TG_TABLE25_PAL_ON_LSB 0
+#define PHY_BB_TX_GAIN_TAB_PAL_25_TG_TABLE25_PAL_ON_MASK 0xffffffff
+#define PHY_BB_TX_GAIN_TAB_PAL_25_TG_TABLE25_PAL_ON_GET(x) (((x) & 0xffffffff) >> 0)
+#define PHY_BB_TX_GAIN_TAB_PAL_25_TG_TABLE25_PAL_ON_SET(x) (((x) << 0) & 0xffffffff)
+
+/* macros for BB_tx_gain_tab_pal_26 */
+#define PHY_BB_TX_GAIN_TAB_PAL_26_ADDRESS 0x0000a4e4
+#define PHY_BB_TX_GAIN_TAB_PAL_26_OFFSET 0x0000a4e4
+#define PHY_BB_TX_GAIN_TAB_PAL_26_TG_TABLE26_PAL_ON_MSB 31
+#define PHY_BB_TX_GAIN_TAB_PAL_26_TG_TABLE26_PAL_ON_LSB 0
+#define PHY_BB_TX_GAIN_TAB_PAL_26_TG_TABLE26_PAL_ON_MASK 0xffffffff
+#define PHY_BB_TX_GAIN_TAB_PAL_26_TG_TABLE26_PAL_ON_GET(x) (((x) & 0xffffffff) >> 0)
+#define PHY_BB_TX_GAIN_TAB_PAL_26_TG_TABLE26_PAL_ON_SET(x) (((x) << 0) & 0xffffffff)
+
+/* macros for BB_tx_gain_tab_pal_27 */
+#define PHY_BB_TX_GAIN_TAB_PAL_27_ADDRESS 0x0000a4e8
+#define PHY_BB_TX_GAIN_TAB_PAL_27_OFFSET 0x0000a4e8
+#define PHY_BB_TX_GAIN_TAB_PAL_27_TG_TABLE27_PAL_ON_MSB 31
+#define PHY_BB_TX_GAIN_TAB_PAL_27_TG_TABLE27_PAL_ON_LSB 0
+#define PHY_BB_TX_GAIN_TAB_PAL_27_TG_TABLE27_PAL_ON_MASK 0xffffffff
+#define PHY_BB_TX_GAIN_TAB_PAL_27_TG_TABLE27_PAL_ON_GET(x) (((x) & 0xffffffff) >> 0)
+#define PHY_BB_TX_GAIN_TAB_PAL_27_TG_TABLE27_PAL_ON_SET(x) (((x) << 0) & 0xffffffff)
+
+/* macros for BB_tx_gain_tab_pal_28 */
+#define PHY_BB_TX_GAIN_TAB_PAL_28_ADDRESS 0x0000a4ec
+#define PHY_BB_TX_GAIN_TAB_PAL_28_OFFSET 0x0000a4ec
+#define PHY_BB_TX_GAIN_TAB_PAL_28_TG_TABLE28_PAL_ON_MSB 31
+#define PHY_BB_TX_GAIN_TAB_PAL_28_TG_TABLE28_PAL_ON_LSB 0
+#define PHY_BB_TX_GAIN_TAB_PAL_28_TG_TABLE28_PAL_ON_MASK 0xffffffff
+#define PHY_BB_TX_GAIN_TAB_PAL_28_TG_TABLE28_PAL_ON_GET(x) (((x) & 0xffffffff) >> 0)
+#define PHY_BB_TX_GAIN_TAB_PAL_28_TG_TABLE28_PAL_ON_SET(x) (((x) << 0) & 0xffffffff)
+
+/* macros for BB_tx_gain_tab_pal_29 */
+#define PHY_BB_TX_GAIN_TAB_PAL_29_ADDRESS 0x0000a4f0
+#define PHY_BB_TX_GAIN_TAB_PAL_29_OFFSET 0x0000a4f0
+#define PHY_BB_TX_GAIN_TAB_PAL_29_TG_TABLE29_PAL_ON_MSB 31
+#define PHY_BB_TX_GAIN_TAB_PAL_29_TG_TABLE29_PAL_ON_LSB 0
+#define PHY_BB_TX_GAIN_TAB_PAL_29_TG_TABLE29_PAL_ON_MASK 0xffffffff
+#define PHY_BB_TX_GAIN_TAB_PAL_29_TG_TABLE29_PAL_ON_GET(x) (((x) & 0xffffffff) >> 0)
+#define PHY_BB_TX_GAIN_TAB_PAL_29_TG_TABLE29_PAL_ON_SET(x) (((x) << 0) & 0xffffffff)
+
+/* macros for BB_tx_gain_tab_pal_30 */
+#define PHY_BB_TX_GAIN_TAB_PAL_30_ADDRESS 0x0000a4f4
+#define PHY_BB_TX_GAIN_TAB_PAL_30_OFFSET 0x0000a4f4
+#define PHY_BB_TX_GAIN_TAB_PAL_30_TG_TABLE30_PAL_ON_MSB 31
+#define PHY_BB_TX_GAIN_TAB_PAL_30_TG_TABLE30_PAL_ON_LSB 0
+#define PHY_BB_TX_GAIN_TAB_PAL_30_TG_TABLE30_PAL_ON_MASK 0xffffffff
+#define PHY_BB_TX_GAIN_TAB_PAL_30_TG_TABLE30_PAL_ON_GET(x) (((x) & 0xffffffff) >> 0)
+#define PHY_BB_TX_GAIN_TAB_PAL_30_TG_TABLE30_PAL_ON_SET(x) (((x) << 0) & 0xffffffff)
+
+/* macros for BB_tx_gain_tab_pal_31 */
+#define PHY_BB_TX_GAIN_TAB_PAL_31_ADDRESS 0x0000a4f8
+#define PHY_BB_TX_GAIN_TAB_PAL_31_OFFSET 0x0000a4f8
+#define PHY_BB_TX_GAIN_TAB_PAL_31_TG_TABLE31_PAL_ON_MSB 31
+#define PHY_BB_TX_GAIN_TAB_PAL_31_TG_TABLE31_PAL_ON_LSB 0
+#define PHY_BB_TX_GAIN_TAB_PAL_31_TG_TABLE31_PAL_ON_MASK 0xffffffff
+#define PHY_BB_TX_GAIN_TAB_PAL_31_TG_TABLE31_PAL_ON_GET(x) (((x) & 0xffffffff) >> 0)
+#define PHY_BB_TX_GAIN_TAB_PAL_31_TG_TABLE31_PAL_ON_SET(x) (((x) << 0) & 0xffffffff)
+
+/* macros for BB_tx_gain_tab_pal_32 */
+#define PHY_BB_TX_GAIN_TAB_PAL_32_ADDRESS 0x0000a4fc
+#define PHY_BB_TX_GAIN_TAB_PAL_32_OFFSET 0x0000a4fc
+#define PHY_BB_TX_GAIN_TAB_PAL_32_TG_TABLE32_PAL_ON_MSB 31
+#define PHY_BB_TX_GAIN_TAB_PAL_32_TG_TABLE32_PAL_ON_LSB 0
+#define PHY_BB_TX_GAIN_TAB_PAL_32_TG_TABLE32_PAL_ON_MASK 0xffffffff
+#define PHY_BB_TX_GAIN_TAB_PAL_32_TG_TABLE32_PAL_ON_GET(x) (((x) & 0xffffffff) >> 0)
+#define PHY_BB_TX_GAIN_TAB_PAL_32_TG_TABLE32_PAL_ON_SET(x) (((x) << 0) & 0xffffffff)
+
+/* macros for BB_caltx_gain_set_0 */
+#define PHY_BB_CALTX_GAIN_SET_0_ADDRESS 0x0000a518
+#define PHY_BB_CALTX_GAIN_SET_0_OFFSET 0x0000a518
+#define PHY_BB_CALTX_GAIN_SET_0_CALTX_GAIN_SET_0_MSB 13
+#define PHY_BB_CALTX_GAIN_SET_0_CALTX_GAIN_SET_0_LSB 0
+#define PHY_BB_CALTX_GAIN_SET_0_CALTX_GAIN_SET_0_MASK 0x00003fff
+#define PHY_BB_CALTX_GAIN_SET_0_CALTX_GAIN_SET_0_GET(x) (((x) & 0x00003fff) >> 0)
+#define PHY_BB_CALTX_GAIN_SET_0_CALTX_GAIN_SET_0_SET(x) (((x) << 0) & 0x00003fff)
+#define PHY_BB_CALTX_GAIN_SET_0_CALTX_GAIN_SET_1_MSB 27
+#define PHY_BB_CALTX_GAIN_SET_0_CALTX_GAIN_SET_1_LSB 14
+#define PHY_BB_CALTX_GAIN_SET_0_CALTX_GAIN_SET_1_MASK 0x0fffc000
+#define PHY_BB_CALTX_GAIN_SET_0_CALTX_GAIN_SET_1_GET(x) (((x) & 0x0fffc000) >> 14)
+#define PHY_BB_CALTX_GAIN_SET_0_CALTX_GAIN_SET_1_SET(x) (((x) << 14) & 0x0fffc000)
+
+/* macros for BB_caltx_gain_set_2 */
+#define PHY_BB_CALTX_GAIN_SET_2_ADDRESS 0x0000a51c
+#define PHY_BB_CALTX_GAIN_SET_2_OFFSET 0x0000a51c
+#define PHY_BB_CALTX_GAIN_SET_2_CALTX_GAIN_SET_2_MSB 13
+#define PHY_BB_CALTX_GAIN_SET_2_CALTX_GAIN_SET_2_LSB 0
+#define PHY_BB_CALTX_GAIN_SET_2_CALTX_GAIN_SET_2_MASK 0x00003fff
+#define PHY_BB_CALTX_GAIN_SET_2_CALTX_GAIN_SET_2_GET(x) (((x) & 0x00003fff) >> 0)
+#define PHY_BB_CALTX_GAIN_SET_2_CALTX_GAIN_SET_2_SET(x) (((x) << 0) & 0x00003fff)
+#define PHY_BB_CALTX_GAIN_SET_2_CALTX_GAIN_SET_3_MSB 27
+#define PHY_BB_CALTX_GAIN_SET_2_CALTX_GAIN_SET_3_LSB 14
+#define PHY_BB_CALTX_GAIN_SET_2_CALTX_GAIN_SET_3_MASK 0x0fffc000
+#define PHY_BB_CALTX_GAIN_SET_2_CALTX_GAIN_SET_3_GET(x) (((x) & 0x0fffc000) >> 14)
+#define PHY_BB_CALTX_GAIN_SET_2_CALTX_GAIN_SET_3_SET(x) (((x) << 14) & 0x0fffc000)
+
+/* macros for BB_caltx_gain_set_4 */
+#define PHY_BB_CALTX_GAIN_SET_4_ADDRESS 0x0000a520
+#define PHY_BB_CALTX_GAIN_SET_4_OFFSET 0x0000a520
+#define PHY_BB_CALTX_GAIN_SET_4_CALTX_GAIN_SET_4_MSB 13
+#define PHY_BB_CALTX_GAIN_SET_4_CALTX_GAIN_SET_4_LSB 0
+#define PHY_BB_CALTX_GAIN_SET_4_CALTX_GAIN_SET_4_MASK 0x00003fff
+#define PHY_BB_CALTX_GAIN_SET_4_CALTX_GAIN_SET_4_GET(x) (((x) & 0x00003fff) >> 0)
+#define PHY_BB_CALTX_GAIN_SET_4_CALTX_GAIN_SET_4_SET(x) (((x) << 0) & 0x00003fff)
+#define PHY_BB_CALTX_GAIN_SET_4_CALTX_GAIN_SET_5_MSB 27
+#define PHY_BB_CALTX_GAIN_SET_4_CALTX_GAIN_SET_5_LSB 14
+#define PHY_BB_CALTX_GAIN_SET_4_CALTX_GAIN_SET_5_MASK 0x0fffc000
+#define PHY_BB_CALTX_GAIN_SET_4_CALTX_GAIN_SET_5_GET(x) (((x) & 0x0fffc000) >> 14)
+#define PHY_BB_CALTX_GAIN_SET_4_CALTX_GAIN_SET_5_SET(x) (((x) << 14) & 0x0fffc000)
+
+/* macros for BB_caltx_gain_set_6 */
+#define PHY_BB_CALTX_GAIN_SET_6_ADDRESS 0x0000a524
+#define PHY_BB_CALTX_GAIN_SET_6_OFFSET 0x0000a524
+#define PHY_BB_CALTX_GAIN_SET_6_CALTX_GAIN_SET_6_MSB 13
+#define PHY_BB_CALTX_GAIN_SET_6_CALTX_GAIN_SET_6_LSB 0
+#define PHY_BB_CALTX_GAIN_SET_6_CALTX_GAIN_SET_6_MASK 0x00003fff
+#define PHY_BB_CALTX_GAIN_SET_6_CALTX_GAIN_SET_6_GET(x) (((x) & 0x00003fff) >> 0)
+#define PHY_BB_CALTX_GAIN_SET_6_CALTX_GAIN_SET_6_SET(x) (((x) << 0) & 0x00003fff)
+#define PHY_BB_CALTX_GAIN_SET_6_CALTX_GAIN_SET_7_MSB 27
+#define PHY_BB_CALTX_GAIN_SET_6_CALTX_GAIN_SET_7_LSB 14
+#define PHY_BB_CALTX_GAIN_SET_6_CALTX_GAIN_SET_7_MASK 0x0fffc000
+#define PHY_BB_CALTX_GAIN_SET_6_CALTX_GAIN_SET_7_GET(x) (((x) & 0x0fffc000) >> 14)
+#define PHY_BB_CALTX_GAIN_SET_6_CALTX_GAIN_SET_7_SET(x) (((x) << 14) & 0x0fffc000)
+
+/* macros for BB_caltx_gain_set_8 */
+#define PHY_BB_CALTX_GAIN_SET_8_ADDRESS 0x0000a528
+#define PHY_BB_CALTX_GAIN_SET_8_OFFSET 0x0000a528
+#define PHY_BB_CALTX_GAIN_SET_8_CALTX_GAIN_SET_8_MSB 13
+#define PHY_BB_CALTX_GAIN_SET_8_CALTX_GAIN_SET_8_LSB 0
+#define PHY_BB_CALTX_GAIN_SET_8_CALTX_GAIN_SET_8_MASK 0x00003fff
+#define PHY_BB_CALTX_GAIN_SET_8_CALTX_GAIN_SET_8_GET(x) (((x) & 0x00003fff) >> 0)
+#define PHY_BB_CALTX_GAIN_SET_8_CALTX_GAIN_SET_8_SET(x) (((x) << 0) & 0x00003fff)
+#define PHY_BB_CALTX_GAIN_SET_8_CALTX_GAIN_SET_9_MSB 27
+#define PHY_BB_CALTX_GAIN_SET_8_CALTX_GAIN_SET_9_LSB 14
+#define PHY_BB_CALTX_GAIN_SET_8_CALTX_GAIN_SET_9_MASK 0x0fffc000
+#define PHY_BB_CALTX_GAIN_SET_8_CALTX_GAIN_SET_9_GET(x) (((x) & 0x0fffc000) >> 14)
+#define PHY_BB_CALTX_GAIN_SET_8_CALTX_GAIN_SET_9_SET(x) (((x) << 14) & 0x0fffc000)
+
+/* macros for BB_caltx_gain_set_10 */
+#define PHY_BB_CALTX_GAIN_SET_10_ADDRESS 0x0000a52c
+#define PHY_BB_CALTX_GAIN_SET_10_OFFSET 0x0000a52c
+#define PHY_BB_CALTX_GAIN_SET_10_CALTX_GAIN_SET_10_MSB 13
+#define PHY_BB_CALTX_GAIN_SET_10_CALTX_GAIN_SET_10_LSB 0
+#define PHY_BB_CALTX_GAIN_SET_10_CALTX_GAIN_SET_10_MASK 0x00003fff
+#define PHY_BB_CALTX_GAIN_SET_10_CALTX_GAIN_SET_10_GET(x) (((x) & 0x00003fff) >> 0)
+#define PHY_BB_CALTX_GAIN_SET_10_CALTX_GAIN_SET_10_SET(x) (((x) << 0) & 0x00003fff)
+#define PHY_BB_CALTX_GAIN_SET_10_CALTX_GAIN_SET_11_MSB 27
+#define PHY_BB_CALTX_GAIN_SET_10_CALTX_GAIN_SET_11_LSB 14
+#define PHY_BB_CALTX_GAIN_SET_10_CALTX_GAIN_SET_11_MASK 0x0fffc000
+#define PHY_BB_CALTX_GAIN_SET_10_CALTX_GAIN_SET_11_GET(x) (((x) & 0x0fffc000) >> 14)
+#define PHY_BB_CALTX_GAIN_SET_10_CALTX_GAIN_SET_11_SET(x) (((x) << 14) & 0x0fffc000)
+
+/* macros for BB_caltx_gain_set_12 */
+#define PHY_BB_CALTX_GAIN_SET_12_ADDRESS 0x0000a530
+#define PHY_BB_CALTX_GAIN_SET_12_OFFSET 0x0000a530
+#define PHY_BB_CALTX_GAIN_SET_12_CALTX_GAIN_SET_12_MSB 13
+#define PHY_BB_CALTX_GAIN_SET_12_CALTX_GAIN_SET_12_LSB 0
+#define PHY_BB_CALTX_GAIN_SET_12_CALTX_GAIN_SET_12_MASK 0x00003fff
+#define PHY_BB_CALTX_GAIN_SET_12_CALTX_GAIN_SET_12_GET(x) (((x) & 0x00003fff) >> 0)
+#define PHY_BB_CALTX_GAIN_SET_12_CALTX_GAIN_SET_12_SET(x) (((x) << 0) & 0x00003fff)
+#define PHY_BB_CALTX_GAIN_SET_12_CALTX_GAIN_SET_13_MSB 27
+#define PHY_BB_CALTX_GAIN_SET_12_CALTX_GAIN_SET_13_LSB 14
+#define PHY_BB_CALTX_GAIN_SET_12_CALTX_GAIN_SET_13_MASK 0x0fffc000
+#define PHY_BB_CALTX_GAIN_SET_12_CALTX_GAIN_SET_13_GET(x) (((x) & 0x0fffc000) >> 14)
+#define PHY_BB_CALTX_GAIN_SET_12_CALTX_GAIN_SET_13_SET(x) (((x) << 14) & 0x0fffc000)
+
+/* macros for BB_caltx_gain_set_14 */
+#define PHY_BB_CALTX_GAIN_SET_14_ADDRESS 0x0000a534
+#define PHY_BB_CALTX_GAIN_SET_14_OFFSET 0x0000a534
+#define PHY_BB_CALTX_GAIN_SET_14_CALTX_GAIN_SET_14_MSB 13
+#define PHY_BB_CALTX_GAIN_SET_14_CALTX_GAIN_SET_14_LSB 0
+#define PHY_BB_CALTX_GAIN_SET_14_CALTX_GAIN_SET_14_MASK 0x00003fff
+#define PHY_BB_CALTX_GAIN_SET_14_CALTX_GAIN_SET_14_GET(x) (((x) & 0x00003fff) >> 0)
+#define PHY_BB_CALTX_GAIN_SET_14_CALTX_GAIN_SET_14_SET(x) (((x) << 0) & 0x00003fff)
+#define PHY_BB_CALTX_GAIN_SET_14_CALTX_GAIN_SET_15_MSB 27
+#define PHY_BB_CALTX_GAIN_SET_14_CALTX_GAIN_SET_15_LSB 14
+#define PHY_BB_CALTX_GAIN_SET_14_CALTX_GAIN_SET_15_MASK 0x0fffc000
+#define PHY_BB_CALTX_GAIN_SET_14_CALTX_GAIN_SET_15_GET(x) (((x) & 0x0fffc000) >> 14)
+#define PHY_BB_CALTX_GAIN_SET_14_CALTX_GAIN_SET_15_SET(x) (((x) << 14) & 0x0fffc000)
+
+/* macros for BB_caltx_gain_set_16 */
+#define PHY_BB_CALTX_GAIN_SET_16_ADDRESS 0x0000a538
+#define PHY_BB_CALTX_GAIN_SET_16_OFFSET 0x0000a538
+#define PHY_BB_CALTX_GAIN_SET_16_CALTX_GAIN_SET_16_MSB 13
+#define PHY_BB_CALTX_GAIN_SET_16_CALTX_GAIN_SET_16_LSB 0
+#define PHY_BB_CALTX_GAIN_SET_16_CALTX_GAIN_SET_16_MASK 0x00003fff
+#define PHY_BB_CALTX_GAIN_SET_16_CALTX_GAIN_SET_16_GET(x) (((x) & 0x00003fff) >> 0)
+#define PHY_BB_CALTX_GAIN_SET_16_CALTX_GAIN_SET_16_SET(x) (((x) << 0) & 0x00003fff)
+#define PHY_BB_CALTX_GAIN_SET_16_CALTX_GAIN_SET_17_MSB 27
+#define PHY_BB_CALTX_GAIN_SET_16_CALTX_GAIN_SET_17_LSB 14
+#define PHY_BB_CALTX_GAIN_SET_16_CALTX_GAIN_SET_17_MASK 0x0fffc000
+#define PHY_BB_CALTX_GAIN_SET_16_CALTX_GAIN_SET_17_GET(x) (((x) & 0x0fffc000) >> 14)
+#define PHY_BB_CALTX_GAIN_SET_16_CALTX_GAIN_SET_17_SET(x) (((x) << 14) & 0x0fffc000)
+
+/* macros for BB_caltx_gain_set_18 */
+#define PHY_BB_CALTX_GAIN_SET_18_ADDRESS 0x0000a53c
+#define PHY_BB_CALTX_GAIN_SET_18_OFFSET 0x0000a53c
+#define PHY_BB_CALTX_GAIN_SET_18_CALTX_GAIN_SET_18_MSB 13
+#define PHY_BB_CALTX_GAIN_SET_18_CALTX_GAIN_SET_18_LSB 0
+#define PHY_BB_CALTX_GAIN_SET_18_CALTX_GAIN_SET_18_MASK 0x00003fff
+#define PHY_BB_CALTX_GAIN_SET_18_CALTX_GAIN_SET_18_GET(x) (((x) & 0x00003fff) >> 0)
+#define PHY_BB_CALTX_GAIN_SET_18_CALTX_GAIN_SET_18_SET(x) (((x) << 0) & 0x00003fff)
+#define PHY_BB_CALTX_GAIN_SET_18_CALTX_GAIN_SET_19_MSB 27
+#define PHY_BB_CALTX_GAIN_SET_18_CALTX_GAIN_SET_19_LSB 14
+#define PHY_BB_CALTX_GAIN_SET_18_CALTX_GAIN_SET_19_MASK 0x0fffc000
+#define PHY_BB_CALTX_GAIN_SET_18_CALTX_GAIN_SET_19_GET(x) (((x) & 0x0fffc000) >> 14)
+#define PHY_BB_CALTX_GAIN_SET_18_CALTX_GAIN_SET_19_SET(x) (((x) << 14) & 0x0fffc000)
+
+/* macros for BB_caltx_gain_set_20 */
+#define PHY_BB_CALTX_GAIN_SET_20_ADDRESS 0x0000a540
+#define PHY_BB_CALTX_GAIN_SET_20_OFFSET 0x0000a540
+#define PHY_BB_CALTX_GAIN_SET_20_CALTX_GAIN_SET_20_MSB 13
+#define PHY_BB_CALTX_GAIN_SET_20_CALTX_GAIN_SET_20_LSB 0
+#define PHY_BB_CALTX_GAIN_SET_20_CALTX_GAIN_SET_20_MASK 0x00003fff
+#define PHY_BB_CALTX_GAIN_SET_20_CALTX_GAIN_SET_20_GET(x) (((x) & 0x00003fff) >> 0)
+#define PHY_BB_CALTX_GAIN_SET_20_CALTX_GAIN_SET_20_SET(x) (((x) << 0) & 0x00003fff)
+#define PHY_BB_CALTX_GAIN_SET_20_CALTX_GAIN_SET_21_MSB 27
+#define PHY_BB_CALTX_GAIN_SET_20_CALTX_GAIN_SET_21_LSB 14
+#define PHY_BB_CALTX_GAIN_SET_20_CALTX_GAIN_SET_21_MASK 0x0fffc000
+#define PHY_BB_CALTX_GAIN_SET_20_CALTX_GAIN_SET_21_GET(x) (((x) & 0x0fffc000) >> 14)
+#define PHY_BB_CALTX_GAIN_SET_20_CALTX_GAIN_SET_21_SET(x) (((x) << 14) & 0x0fffc000)
+
+/* macros for BB_caltx_gain_set_22 */
+#define PHY_BB_CALTX_GAIN_SET_22_ADDRESS 0x0000a544
+#define PHY_BB_CALTX_GAIN_SET_22_OFFSET 0x0000a544
+#define PHY_BB_CALTX_GAIN_SET_22_CALTX_GAIN_SET_22_MSB 13
+#define PHY_BB_CALTX_GAIN_SET_22_CALTX_GAIN_SET_22_LSB 0
+#define PHY_BB_CALTX_GAIN_SET_22_CALTX_GAIN_SET_22_MASK 0x00003fff
+#define PHY_BB_CALTX_GAIN_SET_22_CALTX_GAIN_SET_22_GET(x) (((x) & 0x00003fff) >> 0)
+#define PHY_BB_CALTX_GAIN_SET_22_CALTX_GAIN_SET_22_SET(x) (((x) << 0) & 0x00003fff)
+#define PHY_BB_CALTX_GAIN_SET_22_CALTX_GAIN_SET_23_MSB 27
+#define PHY_BB_CALTX_GAIN_SET_22_CALTX_GAIN_SET_23_LSB 14
+#define PHY_BB_CALTX_GAIN_SET_22_CALTX_GAIN_SET_23_MASK 0x0fffc000
+#define PHY_BB_CALTX_GAIN_SET_22_CALTX_GAIN_SET_23_GET(x) (((x) & 0x0fffc000) >> 14)
+#define PHY_BB_CALTX_GAIN_SET_22_CALTX_GAIN_SET_23_SET(x) (((x) << 14) & 0x0fffc000)
+
+/* macros for BB_caltx_gain_set_24 */
+#define PHY_BB_CALTX_GAIN_SET_24_ADDRESS 0x0000a548
+#define PHY_BB_CALTX_GAIN_SET_24_OFFSET 0x0000a548
+#define PHY_BB_CALTX_GAIN_SET_24_CALTX_GAIN_SET_24_MSB 13
+#define PHY_BB_CALTX_GAIN_SET_24_CALTX_GAIN_SET_24_LSB 0
+#define PHY_BB_CALTX_GAIN_SET_24_CALTX_GAIN_SET_24_MASK 0x00003fff
+#define PHY_BB_CALTX_GAIN_SET_24_CALTX_GAIN_SET_24_GET(x) (((x) & 0x00003fff) >> 0)
+#define PHY_BB_CALTX_GAIN_SET_24_CALTX_GAIN_SET_24_SET(x) (((x) << 0) & 0x00003fff)
+#define PHY_BB_CALTX_GAIN_SET_24_CALTX_GAIN_SET_25_MSB 27
+#define PHY_BB_CALTX_GAIN_SET_24_CALTX_GAIN_SET_25_LSB 14
+#define PHY_BB_CALTX_GAIN_SET_24_CALTX_GAIN_SET_25_MASK 0x0fffc000
+#define PHY_BB_CALTX_GAIN_SET_24_CALTX_GAIN_SET_25_GET(x) (((x) & 0x0fffc000) >> 14)
+#define PHY_BB_CALTX_GAIN_SET_24_CALTX_GAIN_SET_25_SET(x) (((x) << 14) & 0x0fffc000)
+
+/* macros for BB_caltx_gain_set_26 */
+#define PHY_BB_CALTX_GAIN_SET_26_ADDRESS 0x0000a54c
+#define PHY_BB_CALTX_GAIN_SET_26_OFFSET 0x0000a54c
+#define PHY_BB_CALTX_GAIN_SET_26_CALTX_GAIN_SET_26_MSB 13
+#define PHY_BB_CALTX_GAIN_SET_26_CALTX_GAIN_SET_26_LSB 0
+#define PHY_BB_CALTX_GAIN_SET_26_CALTX_GAIN_SET_26_MASK 0x00003fff
+#define PHY_BB_CALTX_GAIN_SET_26_CALTX_GAIN_SET_26_GET(x) (((x) & 0x00003fff) >> 0)
+#define PHY_BB_CALTX_GAIN_SET_26_CALTX_GAIN_SET_26_SET(x) (((x) << 0) & 0x00003fff)
+#define PHY_BB_CALTX_GAIN_SET_26_CALTX_GAIN_SET_27_MSB 27
+#define PHY_BB_CALTX_GAIN_SET_26_CALTX_GAIN_SET_27_LSB 14
+#define PHY_BB_CALTX_GAIN_SET_26_CALTX_GAIN_SET_27_MASK 0x0fffc000
+#define PHY_BB_CALTX_GAIN_SET_26_CALTX_GAIN_SET_27_GET(x) (((x) & 0x0fffc000) >> 14)
+#define PHY_BB_CALTX_GAIN_SET_26_CALTX_GAIN_SET_27_SET(x) (((x) << 14) & 0x0fffc000)
+
+/* macros for BB_caltx_gain_set_28 */
+#define PHY_BB_CALTX_GAIN_SET_28_ADDRESS 0x0000a550
+#define PHY_BB_CALTX_GAIN_SET_28_OFFSET 0x0000a550
+#define PHY_BB_CALTX_GAIN_SET_28_CALTX_GAIN_SET_28_MSB 13
+#define PHY_BB_CALTX_GAIN_SET_28_CALTX_GAIN_SET_28_LSB 0
+#define PHY_BB_CALTX_GAIN_SET_28_CALTX_GAIN_SET_28_MASK 0x00003fff
+#define PHY_BB_CALTX_GAIN_SET_28_CALTX_GAIN_SET_28_GET(x) (((x) & 0x00003fff) >> 0)
+#define PHY_BB_CALTX_GAIN_SET_28_CALTX_GAIN_SET_28_SET(x) (((x) << 0) & 0x00003fff)
+#define PHY_BB_CALTX_GAIN_SET_28_CALTX_GAIN_SET_29_MSB 27
+#define PHY_BB_CALTX_GAIN_SET_28_CALTX_GAIN_SET_29_LSB 14
+#define PHY_BB_CALTX_GAIN_SET_28_CALTX_GAIN_SET_29_MASK 0x0fffc000
+#define PHY_BB_CALTX_GAIN_SET_28_CALTX_GAIN_SET_29_GET(x) (((x) & 0x0fffc000) >> 14)
+#define PHY_BB_CALTX_GAIN_SET_28_CALTX_GAIN_SET_29_SET(x) (((x) << 14) & 0x0fffc000)
+
+/* macros for BB_caltx_gain_set_30 */
+#define PHY_BB_CALTX_GAIN_SET_30_ADDRESS 0x0000a554
+#define PHY_BB_CALTX_GAIN_SET_30_OFFSET 0x0000a554
+#define PHY_BB_CALTX_GAIN_SET_30_CALTX_GAIN_SET_30_MSB 13
+#define PHY_BB_CALTX_GAIN_SET_30_CALTX_GAIN_SET_30_LSB 0
+#define PHY_BB_CALTX_GAIN_SET_30_CALTX_GAIN_SET_30_MASK 0x00003fff
+#define PHY_BB_CALTX_GAIN_SET_30_CALTX_GAIN_SET_30_GET(x) (((x) & 0x00003fff) >> 0)
+#define PHY_BB_CALTX_GAIN_SET_30_CALTX_GAIN_SET_30_SET(x) (((x) << 0) & 0x00003fff)
+#define PHY_BB_CALTX_GAIN_SET_30_CALTX_GAIN_SET_31_MSB 27
+#define PHY_BB_CALTX_GAIN_SET_30_CALTX_GAIN_SET_31_LSB 14
+#define PHY_BB_CALTX_GAIN_SET_30_CALTX_GAIN_SET_31_MASK 0x0fffc000
+#define PHY_BB_CALTX_GAIN_SET_30_CALTX_GAIN_SET_31_GET(x) (((x) & 0x0fffc000) >> 14)
+#define PHY_BB_CALTX_GAIN_SET_30_CALTX_GAIN_SET_31_SET(x) (((x) << 14) & 0x0fffc000)
+
+/* macros for BB_txiqcal_meas_b0 */
+#define PHY_BB_TXIQCAL_MEAS_B0_ADDRESS 0x0000a558
+#define PHY_BB_TXIQCAL_MEAS_B0_OFFSET 0x0000a558
+#define PHY_BB_TXIQCAL_MEAS_B0_TXIQC_MEAS_DATA0_0_MSB 11
+#define PHY_BB_TXIQCAL_MEAS_B0_TXIQC_MEAS_DATA0_0_LSB 0
+#define PHY_BB_TXIQCAL_MEAS_B0_TXIQC_MEAS_DATA0_0_MASK 0x00000fff
+#define PHY_BB_TXIQCAL_MEAS_B0_TXIQC_MEAS_DATA0_0_GET(x) (((x) & 0x00000fff) >> 0)
+#define PHY_BB_TXIQCAL_MEAS_B0_TXIQC_MEAS_DATA1_0_MSB 23
+#define PHY_BB_TXIQCAL_MEAS_B0_TXIQC_MEAS_DATA1_0_LSB 12
+#define PHY_BB_TXIQCAL_MEAS_B0_TXIQC_MEAS_DATA1_0_MASK 0x00fff000
+#define PHY_BB_TXIQCAL_MEAS_B0_TXIQC_MEAS_DATA1_0_GET(x) (((x) & 0x00fff000) >> 12)
+
+/* macros for BB_txiqcal_start */
+#define PHY_BB_TXIQCAL_START_ADDRESS 0x0000a6d8
+#define PHY_BB_TXIQCAL_START_OFFSET 0x0000a6d8
+#define PHY_BB_TXIQCAL_START_DO_TX_IQCAL_MSB 0
+#define PHY_BB_TXIQCAL_START_DO_TX_IQCAL_LSB 0
+#define PHY_BB_TXIQCAL_START_DO_TX_IQCAL_MASK 0x00000001
+#define PHY_BB_TXIQCAL_START_DO_TX_IQCAL_GET(x) (((x) & 0x00000001) >> 0)
+#define PHY_BB_TXIQCAL_START_DO_TX_IQCAL_SET(x) (((x) << 0) & 0x00000001)
+
+/* macros for BB_txiqcal_control_0 */
+#define PHY_BB_TXIQCAL_CONTROL_0_ADDRESS 0x0000a6dc
+#define PHY_BB_TXIQCAL_CONTROL_0_OFFSET 0x0000a6dc
+#define PHY_BB_TXIQCAL_CONTROL_0_IQC_TX_TABLE_SEL_MSB 0
+#define PHY_BB_TXIQCAL_CONTROL_0_IQC_TX_TABLE_SEL_LSB 0
+#define PHY_BB_TXIQCAL_CONTROL_0_IQC_TX_TABLE_SEL_MASK 0x00000001
+#define PHY_BB_TXIQCAL_CONTROL_0_IQC_TX_TABLE_SEL_GET(x) (((x) & 0x00000001) >> 0)
+#define PHY_BB_TXIQCAL_CONTROL_0_IQC_TX_TABLE_SEL_SET(x) (((x) << 0) & 0x00000001)
+#define PHY_BB_TXIQCAL_CONTROL_0_BASE_TX_TONE_DB_MSB 6
+#define PHY_BB_TXIQCAL_CONTROL_0_BASE_TX_TONE_DB_LSB 1
+#define PHY_BB_TXIQCAL_CONTROL_0_BASE_TX_TONE_DB_MASK 0x0000007e
+#define PHY_BB_TXIQCAL_CONTROL_0_BASE_TX_TONE_DB_GET(x) (((x) & 0x0000007e) >> 1)
+#define PHY_BB_TXIQCAL_CONTROL_0_BASE_TX_TONE_DB_SET(x) (((x) << 1) & 0x0000007e)
+#define PHY_BB_TXIQCAL_CONTROL_0_MAX_TX_TONE_GAIN_MSB 12
+#define PHY_BB_TXIQCAL_CONTROL_0_MAX_TX_TONE_GAIN_LSB 7
+#define PHY_BB_TXIQCAL_CONTROL_0_MAX_TX_TONE_GAIN_MASK 0x00001f80
+#define PHY_BB_TXIQCAL_CONTROL_0_MAX_TX_TONE_GAIN_GET(x) (((x) & 0x00001f80) >> 7)
+#define PHY_BB_TXIQCAL_CONTROL_0_MAX_TX_TONE_GAIN_SET(x) (((x) << 7) & 0x00001f80)
+#define PHY_BB_TXIQCAL_CONTROL_0_MIN_TX_TONE_GAIN_MSB 18
+#define PHY_BB_TXIQCAL_CONTROL_0_MIN_TX_TONE_GAIN_LSB 13
+#define PHY_BB_TXIQCAL_CONTROL_0_MIN_TX_TONE_GAIN_MASK 0x0007e000
+#define PHY_BB_TXIQCAL_CONTROL_0_MIN_TX_TONE_GAIN_GET(x) (((x) & 0x0007e000) >> 13)
+#define PHY_BB_TXIQCAL_CONTROL_0_MIN_TX_TONE_GAIN_SET(x) (((x) << 13) & 0x0007e000)
+#define PHY_BB_TXIQCAL_CONTROL_0_CALTXSHIFT_DELAY_MSB 22
+#define PHY_BB_TXIQCAL_CONTROL_0_CALTXSHIFT_DELAY_LSB 19
+#define PHY_BB_TXIQCAL_CONTROL_0_CALTXSHIFT_DELAY_MASK 0x00780000
+#define PHY_BB_TXIQCAL_CONTROL_0_CALTXSHIFT_DELAY_GET(x) (((x) & 0x00780000) >> 19)
+#define PHY_BB_TXIQCAL_CONTROL_0_CALTXSHIFT_DELAY_SET(x) (((x) << 19) & 0x00780000)
+#define PHY_BB_TXIQCAL_CONTROL_0_LOOPBACK_DELAY_MSB 29
+#define PHY_BB_TXIQCAL_CONTROL_0_LOOPBACK_DELAY_LSB 23
+#define PHY_BB_TXIQCAL_CONTROL_0_LOOPBACK_DELAY_MASK 0x3f800000
+#define PHY_BB_TXIQCAL_CONTROL_0_LOOPBACK_DELAY_GET(x) (((x) & 0x3f800000) >> 23)
+#define PHY_BB_TXIQCAL_CONTROL_0_LOOPBACK_DELAY_SET(x) (((x) << 23) & 0x3f800000)
+
+/* macros for BB_txiqcal_control_1 */
+#define PHY_BB_TXIQCAL_CONTROL_1_ADDRESS 0x0000a6e0
+#define PHY_BB_TXIQCAL_CONTROL_1_OFFSET 0x0000a6e0
+#define PHY_BB_TXIQCAL_CONTROL_1_RX_INIT_GAIN_DB_MSB 5
+#define PHY_BB_TXIQCAL_CONTROL_1_RX_INIT_GAIN_DB_LSB 0
+#define PHY_BB_TXIQCAL_CONTROL_1_RX_INIT_GAIN_DB_MASK 0x0000003f
+#define PHY_BB_TXIQCAL_CONTROL_1_RX_INIT_GAIN_DB_GET(x) (((x) & 0x0000003f) >> 0)
+#define PHY_BB_TXIQCAL_CONTROL_1_RX_INIT_GAIN_DB_SET(x) (((x) << 0) & 0x0000003f)
+#define PHY_BB_TXIQCAL_CONTROL_1_MAX_RX_GAIN_DB_MSB 11
+#define PHY_BB_TXIQCAL_CONTROL_1_MAX_RX_GAIN_DB_LSB 6
+#define PHY_BB_TXIQCAL_CONTROL_1_MAX_RX_GAIN_DB_MASK 0x00000fc0
+#define PHY_BB_TXIQCAL_CONTROL_1_MAX_RX_GAIN_DB_GET(x) (((x) & 0x00000fc0) >> 6)
+#define PHY_BB_TXIQCAL_CONTROL_1_MAX_RX_GAIN_DB_SET(x) (((x) << 6) & 0x00000fc0)
+#define PHY_BB_TXIQCAL_CONTROL_1_MIN_RX_GAIN_DB_MSB 17
+#define PHY_BB_TXIQCAL_CONTROL_1_MIN_RX_GAIN_DB_LSB 12
+#define PHY_BB_TXIQCAL_CONTROL_1_MIN_RX_GAIN_DB_MASK 0x0003f000
+#define PHY_BB_TXIQCAL_CONTROL_1_MIN_RX_GAIN_DB_GET(x) (((x) & 0x0003f000) >> 12)
+#define PHY_BB_TXIQCAL_CONTROL_1_MIN_RX_GAIN_DB_SET(x) (((x) << 12) & 0x0003f000)
+#define PHY_BB_TXIQCAL_CONTROL_1_IQCORR_I_Q_COFF_DELPT_MSB 24
+#define PHY_BB_TXIQCAL_CONTROL_1_IQCORR_I_Q_COFF_DELPT_LSB 18
+#define PHY_BB_TXIQCAL_CONTROL_1_IQCORR_I_Q_COFF_DELPT_MASK 0x01fc0000
+#define PHY_BB_TXIQCAL_CONTROL_1_IQCORR_I_Q_COFF_DELPT_GET(x) (((x) & 0x01fc0000) >> 18)
+#define PHY_BB_TXIQCAL_CONTROL_1_IQCORR_I_Q_COFF_DELPT_SET(x) (((x) << 18) & 0x01fc0000)
+
+/* macros for BB_txiqcal_control_2 */
+#define PHY_BB_TXIQCAL_CONTROL_2_ADDRESS 0x0000a6e4
+#define PHY_BB_TXIQCAL_CONTROL_2_OFFSET 0x0000a6e4
+#define PHY_BB_TXIQCAL_CONTROL_2_IQC_FORCED_PAGAIN_MSB 3
+#define PHY_BB_TXIQCAL_CONTROL_2_IQC_FORCED_PAGAIN_LSB 0
+#define PHY_BB_TXIQCAL_CONTROL_2_IQC_FORCED_PAGAIN_MASK 0x0000000f
+#define PHY_BB_TXIQCAL_CONTROL_2_IQC_FORCED_PAGAIN_GET(x) (((x) & 0x0000000f) >> 0)
+#define PHY_BB_TXIQCAL_CONTROL_2_IQC_FORCED_PAGAIN_SET(x) (((x) << 0) & 0x0000000f)
+#define PHY_BB_TXIQCAL_CONTROL_2_IQCAL_MIN_TX_GAIN_MSB 8
+#define PHY_BB_TXIQCAL_CONTROL_2_IQCAL_MIN_TX_GAIN_LSB 4
+#define PHY_BB_TXIQCAL_CONTROL_2_IQCAL_MIN_TX_GAIN_MASK 0x000001f0
+#define PHY_BB_TXIQCAL_CONTROL_2_IQCAL_MIN_TX_GAIN_GET(x) (((x) & 0x000001f0) >> 4)
+#define PHY_BB_TXIQCAL_CONTROL_2_IQCAL_MIN_TX_GAIN_SET(x) (((x) << 4) & 0x000001f0)
+#define PHY_BB_TXIQCAL_CONTROL_2_IQCAL_MAX_TX_GAIN_MSB 13
+#define PHY_BB_TXIQCAL_CONTROL_2_IQCAL_MAX_TX_GAIN_LSB 9
+#define PHY_BB_TXIQCAL_CONTROL_2_IQCAL_MAX_TX_GAIN_MASK 0x00003e00
+#define PHY_BB_TXIQCAL_CONTROL_2_IQCAL_MAX_TX_GAIN_GET(x) (((x) & 0x00003e00) >> 9)
+#define PHY_BB_TXIQCAL_CONTROL_2_IQCAL_MAX_TX_GAIN_SET(x) (((x) << 9) & 0x00003e00)
+
+/* macros for BB_txiqcal_control_3 */
+#define PHY_BB_TXIQCAL_CONTROL_3_ADDRESS 0x0000a6e8
+#define PHY_BB_TXIQCAL_CONTROL_3_OFFSET 0x0000a6e8
+#define PHY_BB_TXIQCAL_CONTROL_3_PWR_HIGH_DB_MSB 5
+#define PHY_BB_TXIQCAL_CONTROL_3_PWR_HIGH_DB_LSB 0
+#define PHY_BB_TXIQCAL_CONTROL_3_PWR_HIGH_DB_MASK 0x0000003f
+#define PHY_BB_TXIQCAL_CONTROL_3_PWR_HIGH_DB_GET(x) (((x) & 0x0000003f) >> 0)
+#define PHY_BB_TXIQCAL_CONTROL_3_PWR_HIGH_DB_SET(x) (((x) << 0) & 0x0000003f)
+#define PHY_BB_TXIQCAL_CONTROL_3_PWR_LOW_DB_MSB 11
+#define PHY_BB_TXIQCAL_CONTROL_3_PWR_LOW_DB_LSB 6
+#define PHY_BB_TXIQCAL_CONTROL_3_PWR_LOW_DB_MASK 0x00000fc0
+#define PHY_BB_TXIQCAL_CONTROL_3_PWR_LOW_DB_GET(x) (((x) & 0x00000fc0) >> 6)
+#define PHY_BB_TXIQCAL_CONTROL_3_PWR_LOW_DB_SET(x) (((x) << 6) & 0x00000fc0)
+#define PHY_BB_TXIQCAL_CONTROL_3_IQCAL_TONE_PHS_STEP_MSB 21
+#define PHY_BB_TXIQCAL_CONTROL_3_IQCAL_TONE_PHS_STEP_LSB 12
+#define PHY_BB_TXIQCAL_CONTROL_3_IQCAL_TONE_PHS_STEP_MASK 0x003ff000
+#define PHY_BB_TXIQCAL_CONTROL_3_IQCAL_TONE_PHS_STEP_GET(x) (((x) & 0x003ff000) >> 12)
+#define PHY_BB_TXIQCAL_CONTROL_3_IQCAL_TONE_PHS_STEP_SET(x) (((x) << 12) & 0x003ff000)
+#define PHY_BB_TXIQCAL_CONTROL_3_DC_EST_LEN_MSB 23
+#define PHY_BB_TXIQCAL_CONTROL_3_DC_EST_LEN_LSB 22
+#define PHY_BB_TXIQCAL_CONTROL_3_DC_EST_LEN_MASK 0x00c00000
+#define PHY_BB_TXIQCAL_CONTROL_3_DC_EST_LEN_GET(x) (((x) & 0x00c00000) >> 22)
+#define PHY_BB_TXIQCAL_CONTROL_3_DC_EST_LEN_SET(x) (((x) << 22) & 0x00c00000)
+#define PHY_BB_TXIQCAL_CONTROL_3_ADC_SAT_LEN_MSB 24
+#define PHY_BB_TXIQCAL_CONTROL_3_ADC_SAT_LEN_LSB 24
+#define PHY_BB_TXIQCAL_CONTROL_3_ADC_SAT_LEN_MASK 0x01000000
+#define PHY_BB_TXIQCAL_CONTROL_3_ADC_SAT_LEN_GET(x) (((x) & 0x01000000) >> 24)
+#define PHY_BB_TXIQCAL_CONTROL_3_ADC_SAT_LEN_SET(x) (((x) << 24) & 0x01000000)
+#define PHY_BB_TXIQCAL_CONTROL_3_ADC_SAT_SEL_MSB 26
+#define PHY_BB_TXIQCAL_CONTROL_3_ADC_SAT_SEL_LSB 25
+#define PHY_BB_TXIQCAL_CONTROL_3_ADC_SAT_SEL_MASK 0x06000000
+#define PHY_BB_TXIQCAL_CONTROL_3_ADC_SAT_SEL_GET(x) (((x) & 0x06000000) >> 25)
+#define PHY_BB_TXIQCAL_CONTROL_3_ADC_SAT_SEL_SET(x) (((x) << 25) & 0x06000000)
+#define PHY_BB_TXIQCAL_CONTROL_3_IQCAL_MEAS_LEN_MSB 28
+#define PHY_BB_TXIQCAL_CONTROL_3_IQCAL_MEAS_LEN_LSB 27
+#define PHY_BB_TXIQCAL_CONTROL_3_IQCAL_MEAS_LEN_MASK 0x18000000
+#define PHY_BB_TXIQCAL_CONTROL_3_IQCAL_MEAS_LEN_GET(x) (((x) & 0x18000000) >> 27)
+#define PHY_BB_TXIQCAL_CONTROL_3_IQCAL_MEAS_LEN_SET(x) (((x) << 27) & 0x18000000)
+#define PHY_BB_TXIQCAL_CONTROL_3_DESIRED_SIZE_DB_MSB 30
+#define PHY_BB_TXIQCAL_CONTROL_3_DESIRED_SIZE_DB_LSB 29
+#define PHY_BB_TXIQCAL_CONTROL_3_DESIRED_SIZE_DB_MASK 0x60000000
+#define PHY_BB_TXIQCAL_CONTROL_3_DESIRED_SIZE_DB_GET(x) (((x) & 0x60000000) >> 29)
+#define PHY_BB_TXIQCAL_CONTROL_3_DESIRED_SIZE_DB_SET(x) (((x) << 29) & 0x60000000)
+#define PHY_BB_TXIQCAL_CONTROL_3_TX_IQCORR_EN_MSB 31
+#define PHY_BB_TXIQCAL_CONTROL_3_TX_IQCORR_EN_LSB 31
+#define PHY_BB_TXIQCAL_CONTROL_3_TX_IQCORR_EN_MASK 0x80000000
+#define PHY_BB_TXIQCAL_CONTROL_3_TX_IQCORR_EN_GET(x) (((x) & 0x80000000) >> 31)
+#define PHY_BB_TXIQCAL_CONTROL_3_TX_IQCORR_EN_SET(x) (((x) << 31) & 0x80000000)
+
+/* macros for BB_txiq_corr_coeff_01_b0 */
+#define PHY_BB_TXIQ_CORR_COEFF_01_B0_ADDRESS 0x0000a6ec
+#define PHY_BB_TXIQ_CORR_COEFF_01_B0_OFFSET 0x0000a6ec
+#define PHY_BB_TXIQ_CORR_COEFF_01_B0_IQC_COEFF_TABLE_0_0_MSB 13
+#define PHY_BB_TXIQ_CORR_COEFF_01_B0_IQC_COEFF_TABLE_0_0_LSB 0
+#define PHY_BB_TXIQ_CORR_COEFF_01_B0_IQC_COEFF_TABLE_0_0_MASK 0x00003fff
+#define PHY_BB_TXIQ_CORR_COEFF_01_B0_IQC_COEFF_TABLE_0_0_GET(x) (((x) & 0x00003fff) >> 0)
+#define PHY_BB_TXIQ_CORR_COEFF_01_B0_IQC_COEFF_TABLE_0_0_SET(x) (((x) << 0) & 0x00003fff)
+#define PHY_BB_TXIQ_CORR_COEFF_01_B0_IQC_COEFF_TABLE_1_0_MSB 27
+#define PHY_BB_TXIQ_CORR_COEFF_01_B0_IQC_COEFF_TABLE_1_0_LSB 14
+#define PHY_BB_TXIQ_CORR_COEFF_01_B0_IQC_COEFF_TABLE_1_0_MASK 0x0fffc000
+#define PHY_BB_TXIQ_CORR_COEFF_01_B0_IQC_COEFF_TABLE_1_0_GET(x) (((x) & 0x0fffc000) >> 14)
+#define PHY_BB_TXIQ_CORR_COEFF_01_B0_IQC_COEFF_TABLE_1_0_SET(x) (((x) << 14) & 0x0fffc000)
+
+/* macros for BB_txiq_corr_coeff_23_b0 */
+#define PHY_BB_TXIQ_CORR_COEFF_23_B0_ADDRESS 0x0000a6f0
+#define PHY_BB_TXIQ_CORR_COEFF_23_B0_OFFSET 0x0000a6f0
+#define PHY_BB_TXIQ_CORR_COEFF_23_B0_IQC_COEFF_TABLE_2_0_MSB 13
+#define PHY_BB_TXIQ_CORR_COEFF_23_B0_IQC_COEFF_TABLE_2_0_LSB 0
+#define PHY_BB_TXIQ_CORR_COEFF_23_B0_IQC_COEFF_TABLE_2_0_MASK 0x00003fff
+#define PHY_BB_TXIQ_CORR_COEFF_23_B0_IQC_COEFF_TABLE_2_0_GET(x) (((x) & 0x00003fff) >> 0)
+#define PHY_BB_TXIQ_CORR_COEFF_23_B0_IQC_COEFF_TABLE_2_0_SET(x) (((x) << 0) & 0x00003fff)
+#define PHY_BB_TXIQ_CORR_COEFF_23_B0_IQC_COEFF_TABLE_3_0_MSB 27
+#define PHY_BB_TXIQ_CORR_COEFF_23_B0_IQC_COEFF_TABLE_3_0_LSB 14
+#define PHY_BB_TXIQ_CORR_COEFF_23_B0_IQC_COEFF_TABLE_3_0_MASK 0x0fffc000
+#define PHY_BB_TXIQ_CORR_COEFF_23_B0_IQC_COEFF_TABLE_3_0_GET(x) (((x) & 0x0fffc000) >> 14)
+#define PHY_BB_TXIQ_CORR_COEFF_23_B0_IQC_COEFF_TABLE_3_0_SET(x) (((x) << 14) & 0x0fffc000)
+
+/* macros for BB_txiq_corr_coeff_45_b0 */
+#define PHY_BB_TXIQ_CORR_COEFF_45_B0_ADDRESS 0x0000a6f4
+#define PHY_BB_TXIQ_CORR_COEFF_45_B0_OFFSET 0x0000a6f4
+#define PHY_BB_TXIQ_CORR_COEFF_45_B0_IQC_COEFF_TABLE_4_0_MSB 13
+#define PHY_BB_TXIQ_CORR_COEFF_45_B0_IQC_COEFF_TABLE_4_0_LSB 0
+#define PHY_BB_TXIQ_CORR_COEFF_45_B0_IQC_COEFF_TABLE_4_0_MASK 0x00003fff
+#define PHY_BB_TXIQ_CORR_COEFF_45_B0_IQC_COEFF_TABLE_4_0_GET(x) (((x) & 0x00003fff) >> 0)
+#define PHY_BB_TXIQ_CORR_COEFF_45_B0_IQC_COEFF_TABLE_4_0_SET(x) (((x) << 0) & 0x00003fff)
+#define PHY_BB_TXIQ_CORR_COEFF_45_B0_IQC_COEFF_TABLE_5_0_MSB 27
+#define PHY_BB_TXIQ_CORR_COEFF_45_B0_IQC_COEFF_TABLE_5_0_LSB 14
+#define PHY_BB_TXIQ_CORR_COEFF_45_B0_IQC_COEFF_TABLE_5_0_MASK 0x0fffc000
+#define PHY_BB_TXIQ_CORR_COEFF_45_B0_IQC_COEFF_TABLE_5_0_GET(x) (((x) & 0x0fffc000) >> 14)
+#define PHY_BB_TXIQ_CORR_COEFF_45_B0_IQC_COEFF_TABLE_5_0_SET(x) (((x) << 14) & 0x0fffc000)
+
+/* macros for BB_txiq_corr_coeff_67_b0 */
+#define PHY_BB_TXIQ_CORR_COEFF_67_B0_ADDRESS 0x0000a6f8
+#define PHY_BB_TXIQ_CORR_COEFF_67_B0_OFFSET 0x0000a6f8
+#define PHY_BB_TXIQ_CORR_COEFF_67_B0_IQC_COEFF_TABLE_6_0_MSB 13
+#define PHY_BB_TXIQ_CORR_COEFF_67_B0_IQC_COEFF_TABLE_6_0_LSB 0
+#define PHY_BB_TXIQ_CORR_COEFF_67_B0_IQC_COEFF_TABLE_6_0_MASK 0x00003fff
+#define PHY_BB_TXIQ_CORR_COEFF_67_B0_IQC_COEFF_TABLE_6_0_GET(x) (((x) & 0x00003fff) >> 0)
+#define PHY_BB_TXIQ_CORR_COEFF_67_B0_IQC_COEFF_TABLE_6_0_SET(x) (((x) << 0) & 0x00003fff)
+#define PHY_BB_TXIQ_CORR_COEFF_67_B0_IQC_COEFF_TABLE_7_0_MSB 27
+#define PHY_BB_TXIQ_CORR_COEFF_67_B0_IQC_COEFF_TABLE_7_0_LSB 14
+#define PHY_BB_TXIQ_CORR_COEFF_67_B0_IQC_COEFF_TABLE_7_0_MASK 0x0fffc000
+#define PHY_BB_TXIQ_CORR_COEFF_67_B0_IQC_COEFF_TABLE_7_0_GET(x) (((x) & 0x0fffc000) >> 14)
+#define PHY_BB_TXIQ_CORR_COEFF_67_B0_IQC_COEFF_TABLE_7_0_SET(x) (((x) << 14) & 0x0fffc000)
+
+/* macros for BB_txiq_corr_coeff_89_b0 */
+#define PHY_BB_TXIQ_CORR_COEFF_89_B0_ADDRESS 0x0000a6fc
+#define PHY_BB_TXIQ_CORR_COEFF_89_B0_OFFSET 0x0000a6fc
+#define PHY_BB_TXIQ_CORR_COEFF_89_B0_IQC_COEFF_TABLE_8_0_MSB 13
+#define PHY_BB_TXIQ_CORR_COEFF_89_B0_IQC_COEFF_TABLE_8_0_LSB 0
+#define PHY_BB_TXIQ_CORR_COEFF_89_B0_IQC_COEFF_TABLE_8_0_MASK 0x00003fff
+#define PHY_BB_TXIQ_CORR_COEFF_89_B0_IQC_COEFF_TABLE_8_0_GET(x) (((x) & 0x00003fff) >> 0)
+#define PHY_BB_TXIQ_CORR_COEFF_89_B0_IQC_COEFF_TABLE_8_0_SET(x) (((x) << 0) & 0x00003fff)
+#define PHY_BB_TXIQ_CORR_COEFF_89_B0_IQC_COEFF_TABLE_9_0_MSB 27
+#define PHY_BB_TXIQ_CORR_COEFF_89_B0_IQC_COEFF_TABLE_9_0_LSB 14
+#define PHY_BB_TXIQ_CORR_COEFF_89_B0_IQC_COEFF_TABLE_9_0_MASK 0x0fffc000
+#define PHY_BB_TXIQ_CORR_COEFF_89_B0_IQC_COEFF_TABLE_9_0_GET(x) (((x) & 0x0fffc000) >> 14)
+#define PHY_BB_TXIQ_CORR_COEFF_89_B0_IQC_COEFF_TABLE_9_0_SET(x) (((x) << 14) & 0x0fffc000)
+
+/* macros for BB_txiq_corr_coeff_ab_b0 */
+#define PHY_BB_TXIQ_CORR_COEFF_AB_B0_ADDRESS 0x0000a700
+#define PHY_BB_TXIQ_CORR_COEFF_AB_B0_OFFSET 0x0000a700
+#define PHY_BB_TXIQ_CORR_COEFF_AB_B0_IQC_COEFF_TABLE_A_0_MSB 13
+#define PHY_BB_TXIQ_CORR_COEFF_AB_B0_IQC_COEFF_TABLE_A_0_LSB 0
+#define PHY_BB_TXIQ_CORR_COEFF_AB_B0_IQC_COEFF_TABLE_A_0_MASK 0x00003fff
+#define PHY_BB_TXIQ_CORR_COEFF_AB_B0_IQC_COEFF_TABLE_A_0_GET(x) (((x) & 0x00003fff) >> 0)
+#define PHY_BB_TXIQ_CORR_COEFF_AB_B0_IQC_COEFF_TABLE_A_0_SET(x) (((x) << 0) & 0x00003fff)
+#define PHY_BB_TXIQ_CORR_COEFF_AB_B0_IQC_COEFF_TABLE_B_0_MSB 27
+#define PHY_BB_TXIQ_CORR_COEFF_AB_B0_IQC_COEFF_TABLE_B_0_LSB 14
+#define PHY_BB_TXIQ_CORR_COEFF_AB_B0_IQC_COEFF_TABLE_B_0_MASK 0x0fffc000
+#define PHY_BB_TXIQ_CORR_COEFF_AB_B0_IQC_COEFF_TABLE_B_0_GET(x) (((x) & 0x0fffc000) >> 14)
+#define PHY_BB_TXIQ_CORR_COEFF_AB_B0_IQC_COEFF_TABLE_B_0_SET(x) (((x) << 14) & 0x0fffc000)
+
+/* macros for BB_txiq_corr_coeff_cd_b0 */
+#define PHY_BB_TXIQ_CORR_COEFF_CD_B0_ADDRESS 0x0000a704
+#define PHY_BB_TXIQ_CORR_COEFF_CD_B0_OFFSET 0x0000a704
+#define PHY_BB_TXIQ_CORR_COEFF_CD_B0_IQC_COEFF_TABLE_C_0_MSB 13
+#define PHY_BB_TXIQ_CORR_COEFF_CD_B0_IQC_COEFF_TABLE_C_0_LSB 0
+#define PHY_BB_TXIQ_CORR_COEFF_CD_B0_IQC_COEFF_TABLE_C_0_MASK 0x00003fff
+#define PHY_BB_TXIQ_CORR_COEFF_CD_B0_IQC_COEFF_TABLE_C_0_GET(x) (((x) & 0x00003fff) >> 0)
+#define PHY_BB_TXIQ_CORR_COEFF_CD_B0_IQC_COEFF_TABLE_C_0_SET(x) (((x) << 0) & 0x00003fff)
+#define PHY_BB_TXIQ_CORR_COEFF_CD_B0_IQC_COEFF_TABLE_D_0_MSB 27
+#define PHY_BB_TXIQ_CORR_COEFF_CD_B0_IQC_COEFF_TABLE_D_0_LSB 14
+#define PHY_BB_TXIQ_CORR_COEFF_CD_B0_IQC_COEFF_TABLE_D_0_MASK 0x0fffc000
+#define PHY_BB_TXIQ_CORR_COEFF_CD_B0_IQC_COEFF_TABLE_D_0_GET(x) (((x) & 0x0fffc000) >> 14)
+#define PHY_BB_TXIQ_CORR_COEFF_CD_B0_IQC_COEFF_TABLE_D_0_SET(x) (((x) << 14) & 0x0fffc000)
+
+/* macros for BB_txiq_corr_coeff_ef_b0 */
+#define PHY_BB_TXIQ_CORR_COEFF_EF_B0_ADDRESS 0x0000a708
+#define PHY_BB_TXIQ_CORR_COEFF_EF_B0_OFFSET 0x0000a708
+#define PHY_BB_TXIQ_CORR_COEFF_EF_B0_IQC_COEFF_TABLE_E_0_MSB 13
+#define PHY_BB_TXIQ_CORR_COEFF_EF_B0_IQC_COEFF_TABLE_E_0_LSB 0
+#define PHY_BB_TXIQ_CORR_COEFF_EF_B0_IQC_COEFF_TABLE_E_0_MASK 0x00003fff
+#define PHY_BB_TXIQ_CORR_COEFF_EF_B0_IQC_COEFF_TABLE_E_0_GET(x) (((x) & 0x00003fff) >> 0)
+#define PHY_BB_TXIQ_CORR_COEFF_EF_B0_IQC_COEFF_TABLE_E_0_SET(x) (((x) << 0) & 0x00003fff)
+#define PHY_BB_TXIQ_CORR_COEFF_EF_B0_IQC_COEFF_TABLE_F_0_MSB 27
+#define PHY_BB_TXIQ_CORR_COEFF_EF_B0_IQC_COEFF_TABLE_F_0_LSB 14
+#define PHY_BB_TXIQ_CORR_COEFF_EF_B0_IQC_COEFF_TABLE_F_0_MASK 0x0fffc000
+#define PHY_BB_TXIQ_CORR_COEFF_EF_B0_IQC_COEFF_TABLE_F_0_GET(x) (((x) & 0x0fffc000) >> 14)
+#define PHY_BB_TXIQ_CORR_COEFF_EF_B0_IQC_COEFF_TABLE_F_0_SET(x) (((x) << 14) & 0x0fffc000)
+
+/* macros for BB_cal_rxbb_gain_tbl_0 */
+#define PHY_BB_CAL_RXBB_GAIN_TBL_0_ADDRESS 0x0000a70c
+#define PHY_BB_CAL_RXBB_GAIN_TBL_0_OFFSET 0x0000a70c
+#define PHY_BB_CAL_RXBB_GAIN_TBL_0_TXCAL_RX_BB_GAIN_TABLE_0_MSB 5
+#define PHY_BB_CAL_RXBB_GAIN_TBL_0_TXCAL_RX_BB_GAIN_TABLE_0_LSB 0
+#define PHY_BB_CAL_RXBB_GAIN_TBL_0_TXCAL_RX_BB_GAIN_TABLE_0_MASK 0x0000003f
+#define PHY_BB_CAL_RXBB_GAIN_TBL_0_TXCAL_RX_BB_GAIN_TABLE_0_GET(x) (((x) & 0x0000003f) >> 0)
+#define PHY_BB_CAL_RXBB_GAIN_TBL_0_TXCAL_RX_BB_GAIN_TABLE_0_SET(x) (((x) << 0) & 0x0000003f)
+#define PHY_BB_CAL_RXBB_GAIN_TBL_0_TXCAL_RX_BB_GAIN_TABLE_1_MSB 11
+#define PHY_BB_CAL_RXBB_GAIN_TBL_0_TXCAL_RX_BB_GAIN_TABLE_1_LSB 6
+#define PHY_BB_CAL_RXBB_GAIN_TBL_0_TXCAL_RX_BB_GAIN_TABLE_1_MASK 0x00000fc0
+#define PHY_BB_CAL_RXBB_GAIN_TBL_0_TXCAL_RX_BB_GAIN_TABLE_1_GET(x) (((x) & 0x00000fc0) >> 6)
+#define PHY_BB_CAL_RXBB_GAIN_TBL_0_TXCAL_RX_BB_GAIN_TABLE_1_SET(x) (((x) << 6) & 0x00000fc0)
+#define PHY_BB_CAL_RXBB_GAIN_TBL_0_TXCAL_RX_BB_GAIN_TABLE_2_MSB 17
+#define PHY_BB_CAL_RXBB_GAIN_TBL_0_TXCAL_RX_BB_GAIN_TABLE_2_LSB 12
+#define PHY_BB_CAL_RXBB_GAIN_TBL_0_TXCAL_RX_BB_GAIN_TABLE_2_MASK 0x0003f000
+#define PHY_BB_CAL_RXBB_GAIN_TBL_0_TXCAL_RX_BB_GAIN_TABLE_2_GET(x) (((x) & 0x0003f000) >> 12)
+#define PHY_BB_CAL_RXBB_GAIN_TBL_0_TXCAL_RX_BB_GAIN_TABLE_2_SET(x) (((x) << 12) & 0x0003f000)
+#define PHY_BB_CAL_RXBB_GAIN_TBL_0_TXCAL_RX_BB_GAIN_TABLE_3_MSB 23
+#define PHY_BB_CAL_RXBB_GAIN_TBL_0_TXCAL_RX_BB_GAIN_TABLE_3_LSB 18
+#define PHY_BB_CAL_RXBB_GAIN_TBL_0_TXCAL_RX_BB_GAIN_TABLE_3_MASK 0x00fc0000
+#define PHY_BB_CAL_RXBB_GAIN_TBL_0_TXCAL_RX_BB_GAIN_TABLE_3_GET(x) (((x) & 0x00fc0000) >> 18)
+#define PHY_BB_CAL_RXBB_GAIN_TBL_0_TXCAL_RX_BB_GAIN_TABLE_3_SET(x) (((x) << 18) & 0x00fc0000)
+
+/* macros for BB_cal_rxbb_gain_tbl_4 */
+#define PHY_BB_CAL_RXBB_GAIN_TBL_4_ADDRESS 0x0000a710
+#define PHY_BB_CAL_RXBB_GAIN_TBL_4_OFFSET 0x0000a710
+#define PHY_BB_CAL_RXBB_GAIN_TBL_4_TXCAL_RX_BB_GAIN_TABLE_4_MSB 5
+#define PHY_BB_CAL_RXBB_GAIN_TBL_4_TXCAL_RX_BB_GAIN_TABLE_4_LSB 0
+#define PHY_BB_CAL_RXBB_GAIN_TBL_4_TXCAL_RX_BB_GAIN_TABLE_4_MASK 0x0000003f
+#define PHY_BB_CAL_RXBB_GAIN_TBL_4_TXCAL_RX_BB_GAIN_TABLE_4_GET(x) (((x) & 0x0000003f) >> 0)
+#define PHY_BB_CAL_RXBB_GAIN_TBL_4_TXCAL_RX_BB_GAIN_TABLE_4_SET(x) (((x) << 0) & 0x0000003f)
+#define PHY_BB_CAL_RXBB_GAIN_TBL_4_TXCAL_RX_BB_GAIN_TABLE_5_MSB 11
+#define PHY_BB_CAL_RXBB_GAIN_TBL_4_TXCAL_RX_BB_GAIN_TABLE_5_LSB 6
+#define PHY_BB_CAL_RXBB_GAIN_TBL_4_TXCAL_RX_BB_GAIN_TABLE_5_MASK 0x00000fc0
+#define PHY_BB_CAL_RXBB_GAIN_TBL_4_TXCAL_RX_BB_GAIN_TABLE_5_GET(x) (((x) & 0x00000fc0) >> 6)
+#define PHY_BB_CAL_RXBB_GAIN_TBL_4_TXCAL_RX_BB_GAIN_TABLE_5_SET(x) (((x) << 6) & 0x00000fc0)
+#define PHY_BB_CAL_RXBB_GAIN_TBL_4_TXCAL_RX_BB_GAIN_TABLE_6_MSB 17
+#define PHY_BB_CAL_RXBB_GAIN_TBL_4_TXCAL_RX_BB_GAIN_TABLE_6_LSB 12
+#define PHY_BB_CAL_RXBB_GAIN_TBL_4_TXCAL_RX_BB_GAIN_TABLE_6_MASK 0x0003f000
+#define PHY_BB_CAL_RXBB_GAIN_TBL_4_TXCAL_RX_BB_GAIN_TABLE_6_GET(x) (((x) & 0x0003f000) >> 12)
+#define PHY_BB_CAL_RXBB_GAIN_TBL_4_TXCAL_RX_BB_GAIN_TABLE_6_SET(x) (((x) << 12) & 0x0003f000)
+#define PHY_BB_CAL_RXBB_GAIN_TBL_4_TXCAL_RX_BB_GAIN_TABLE_7_MSB 23
+#define PHY_BB_CAL_RXBB_GAIN_TBL_4_TXCAL_RX_BB_GAIN_TABLE_7_LSB 18
+#define PHY_BB_CAL_RXBB_GAIN_TBL_4_TXCAL_RX_BB_GAIN_TABLE_7_MASK 0x00fc0000
+#define PHY_BB_CAL_RXBB_GAIN_TBL_4_TXCAL_RX_BB_GAIN_TABLE_7_GET(x) (((x) & 0x00fc0000) >> 18)
+#define PHY_BB_CAL_RXBB_GAIN_TBL_4_TXCAL_RX_BB_GAIN_TABLE_7_SET(x) (((x) << 18) & 0x00fc0000)
+
+/* macros for BB_cal_rxbb_gain_tbl_8 */
+#define PHY_BB_CAL_RXBB_GAIN_TBL_8_ADDRESS 0x0000a714
+#define PHY_BB_CAL_RXBB_GAIN_TBL_8_OFFSET 0x0000a714
+#define PHY_BB_CAL_RXBB_GAIN_TBL_8_TXCAL_RX_BB_GAIN_TABLE_8_MSB 5
+#define PHY_BB_CAL_RXBB_GAIN_TBL_8_TXCAL_RX_BB_GAIN_TABLE_8_LSB 0
+#define PHY_BB_CAL_RXBB_GAIN_TBL_8_TXCAL_RX_BB_GAIN_TABLE_8_MASK 0x0000003f
+#define PHY_BB_CAL_RXBB_GAIN_TBL_8_TXCAL_RX_BB_GAIN_TABLE_8_GET(x) (((x) & 0x0000003f) >> 0)
+#define PHY_BB_CAL_RXBB_GAIN_TBL_8_TXCAL_RX_BB_GAIN_TABLE_8_SET(x) (((x) << 0) & 0x0000003f)
+#define PHY_BB_CAL_RXBB_GAIN_TBL_8_TXCAL_RX_BB_GAIN_TABLE_9_MSB 11
+#define PHY_BB_CAL_RXBB_GAIN_TBL_8_TXCAL_RX_BB_GAIN_TABLE_9_LSB 6
+#define PHY_BB_CAL_RXBB_GAIN_TBL_8_TXCAL_RX_BB_GAIN_TABLE_9_MASK 0x00000fc0
+#define PHY_BB_CAL_RXBB_GAIN_TBL_8_TXCAL_RX_BB_GAIN_TABLE_9_GET(x) (((x) & 0x00000fc0) >> 6)
+#define PHY_BB_CAL_RXBB_GAIN_TBL_8_TXCAL_RX_BB_GAIN_TABLE_9_SET(x) (((x) << 6) & 0x00000fc0)
+#define PHY_BB_CAL_RXBB_GAIN_TBL_8_TXCAL_RX_BB_GAIN_TABLE_10_MSB 17
+#define PHY_BB_CAL_RXBB_GAIN_TBL_8_TXCAL_RX_BB_GAIN_TABLE_10_LSB 12
+#define PHY_BB_CAL_RXBB_GAIN_TBL_8_TXCAL_RX_BB_GAIN_TABLE_10_MASK 0x0003f000
+#define PHY_BB_CAL_RXBB_GAIN_TBL_8_TXCAL_RX_BB_GAIN_TABLE_10_GET(x) (((x) & 0x0003f000) >> 12)
+#define PHY_BB_CAL_RXBB_GAIN_TBL_8_TXCAL_RX_BB_GAIN_TABLE_10_SET(x) (((x) << 12) & 0x0003f000)
+#define PHY_BB_CAL_RXBB_GAIN_TBL_8_TXCAL_RX_BB_GAIN_TABLE_11_MSB 23
+#define PHY_BB_CAL_RXBB_GAIN_TBL_8_TXCAL_RX_BB_GAIN_TABLE_11_LSB 18
+#define PHY_BB_CAL_RXBB_GAIN_TBL_8_TXCAL_RX_BB_GAIN_TABLE_11_MASK 0x00fc0000
+#define PHY_BB_CAL_RXBB_GAIN_TBL_8_TXCAL_RX_BB_GAIN_TABLE_11_GET(x) (((x) & 0x00fc0000) >> 18)
+#define PHY_BB_CAL_RXBB_GAIN_TBL_8_TXCAL_RX_BB_GAIN_TABLE_11_SET(x) (((x) << 18) & 0x00fc0000)
+
+/* macros for BB_cal_rxbb_gain_tbl_12 */
+#define PHY_BB_CAL_RXBB_GAIN_TBL_12_ADDRESS 0x0000a718
+#define PHY_BB_CAL_RXBB_GAIN_TBL_12_OFFSET 0x0000a718
+#define PHY_BB_CAL_RXBB_GAIN_TBL_12_TXCAL_RX_BB_GAIN_TABLE_12_MSB 5
+#define PHY_BB_CAL_RXBB_GAIN_TBL_12_TXCAL_RX_BB_GAIN_TABLE_12_LSB 0
+#define PHY_BB_CAL_RXBB_GAIN_TBL_12_TXCAL_RX_BB_GAIN_TABLE_12_MASK 0x0000003f
+#define PHY_BB_CAL_RXBB_GAIN_TBL_12_TXCAL_RX_BB_GAIN_TABLE_12_GET(x) (((x) & 0x0000003f) >> 0)
+#define PHY_BB_CAL_RXBB_GAIN_TBL_12_TXCAL_RX_BB_GAIN_TABLE_12_SET(x) (((x) << 0) & 0x0000003f)
+#define PHY_BB_CAL_RXBB_GAIN_TBL_12_TXCAL_RX_BB_GAIN_TABLE_13_MSB 11
+#define PHY_BB_CAL_RXBB_GAIN_TBL_12_TXCAL_RX_BB_GAIN_TABLE_13_LSB 6
+#define PHY_BB_CAL_RXBB_GAIN_TBL_12_TXCAL_RX_BB_GAIN_TABLE_13_MASK 0x00000fc0
+#define PHY_BB_CAL_RXBB_GAIN_TBL_12_TXCAL_RX_BB_GAIN_TABLE_13_GET(x) (((x) & 0x00000fc0) >> 6)
+#define PHY_BB_CAL_RXBB_GAIN_TBL_12_TXCAL_RX_BB_GAIN_TABLE_13_SET(x) (((x) << 6) & 0x00000fc0)
+#define PHY_BB_CAL_RXBB_GAIN_TBL_12_TXCAL_RX_BB_GAIN_TABLE_14_MSB 17
+#define PHY_BB_CAL_RXBB_GAIN_TBL_12_TXCAL_RX_BB_GAIN_TABLE_14_LSB 12
+#define PHY_BB_CAL_RXBB_GAIN_TBL_12_TXCAL_RX_BB_GAIN_TABLE_14_MASK 0x0003f000
+#define PHY_BB_CAL_RXBB_GAIN_TBL_12_TXCAL_RX_BB_GAIN_TABLE_14_GET(x) (((x) & 0x0003f000) >> 12)
+#define PHY_BB_CAL_RXBB_GAIN_TBL_12_TXCAL_RX_BB_GAIN_TABLE_14_SET(x) (((x) << 12) & 0x0003f000)
+#define PHY_BB_CAL_RXBB_GAIN_TBL_12_TXCAL_RX_BB_GAIN_TABLE_15_MSB 23
+#define PHY_BB_CAL_RXBB_GAIN_TBL_12_TXCAL_RX_BB_GAIN_TABLE_15_LSB 18
+#define PHY_BB_CAL_RXBB_GAIN_TBL_12_TXCAL_RX_BB_GAIN_TABLE_15_MASK 0x00fc0000
+#define PHY_BB_CAL_RXBB_GAIN_TBL_12_TXCAL_RX_BB_GAIN_TABLE_15_GET(x) (((x) & 0x00fc0000) >> 18)
+#define PHY_BB_CAL_RXBB_GAIN_TBL_12_TXCAL_RX_BB_GAIN_TABLE_15_SET(x) (((x) << 18) & 0x00fc0000)
+
+/* macros for BB_cal_rxbb_gain_tbl_16 */
+#define PHY_BB_CAL_RXBB_GAIN_TBL_16_ADDRESS 0x0000a71c
+#define PHY_BB_CAL_RXBB_GAIN_TBL_16_OFFSET 0x0000a71c
+#define PHY_BB_CAL_RXBB_GAIN_TBL_16_TXCAL_RX_BB_GAIN_TABLE_16_MSB 5
+#define PHY_BB_CAL_RXBB_GAIN_TBL_16_TXCAL_RX_BB_GAIN_TABLE_16_LSB 0
+#define PHY_BB_CAL_RXBB_GAIN_TBL_16_TXCAL_RX_BB_GAIN_TABLE_16_MASK 0x0000003f
+#define PHY_BB_CAL_RXBB_GAIN_TBL_16_TXCAL_RX_BB_GAIN_TABLE_16_GET(x) (((x) & 0x0000003f) >> 0)
+#define PHY_BB_CAL_RXBB_GAIN_TBL_16_TXCAL_RX_BB_GAIN_TABLE_16_SET(x) (((x) << 0) & 0x0000003f)
+#define PHY_BB_CAL_RXBB_GAIN_TBL_16_TXCAL_RX_BB_GAIN_TABLE_17_MSB 11
+#define PHY_BB_CAL_RXBB_GAIN_TBL_16_TXCAL_RX_BB_GAIN_TABLE_17_LSB 6
+#define PHY_BB_CAL_RXBB_GAIN_TBL_16_TXCAL_RX_BB_GAIN_TABLE_17_MASK 0x00000fc0
+#define PHY_BB_CAL_RXBB_GAIN_TBL_16_TXCAL_RX_BB_GAIN_TABLE_17_GET(x) (((x) & 0x00000fc0) >> 6)
+#define PHY_BB_CAL_RXBB_GAIN_TBL_16_TXCAL_RX_BB_GAIN_TABLE_17_SET(x) (((x) << 6) & 0x00000fc0)
+#define PHY_BB_CAL_RXBB_GAIN_TBL_16_TXCAL_RX_BB_GAIN_TABLE_18_MSB 17
+#define PHY_BB_CAL_RXBB_GAIN_TBL_16_TXCAL_RX_BB_GAIN_TABLE_18_LSB 12
+#define PHY_BB_CAL_RXBB_GAIN_TBL_16_TXCAL_RX_BB_GAIN_TABLE_18_MASK 0x0003f000
+#define PHY_BB_CAL_RXBB_GAIN_TBL_16_TXCAL_RX_BB_GAIN_TABLE_18_GET(x) (((x) & 0x0003f000) >> 12)
+#define PHY_BB_CAL_RXBB_GAIN_TBL_16_TXCAL_RX_BB_GAIN_TABLE_18_SET(x) (((x) << 12) & 0x0003f000)
+#define PHY_BB_CAL_RXBB_GAIN_TBL_16_TXCAL_RX_BB_GAIN_TABLE_19_MSB 23
+#define PHY_BB_CAL_RXBB_GAIN_TBL_16_TXCAL_RX_BB_GAIN_TABLE_19_LSB 18
+#define PHY_BB_CAL_RXBB_GAIN_TBL_16_TXCAL_RX_BB_GAIN_TABLE_19_MASK 0x00fc0000
+#define PHY_BB_CAL_RXBB_GAIN_TBL_16_TXCAL_RX_BB_GAIN_TABLE_19_GET(x) (((x) & 0x00fc0000) >> 18)
+#define PHY_BB_CAL_RXBB_GAIN_TBL_16_TXCAL_RX_BB_GAIN_TABLE_19_SET(x) (((x) << 18) & 0x00fc0000)
+
+/* macros for BB_cal_rxbb_gain_tbl_20 */
+#define PHY_BB_CAL_RXBB_GAIN_TBL_20_ADDRESS 0x0000a720
+#define PHY_BB_CAL_RXBB_GAIN_TBL_20_OFFSET 0x0000a720
+#define PHY_BB_CAL_RXBB_GAIN_TBL_20_TXCAL_RX_BB_GAIN_TABLE_20_MSB 5
+#define PHY_BB_CAL_RXBB_GAIN_TBL_20_TXCAL_RX_BB_GAIN_TABLE_20_LSB 0
+#define PHY_BB_CAL_RXBB_GAIN_TBL_20_TXCAL_RX_BB_GAIN_TABLE_20_MASK 0x0000003f
+#define PHY_BB_CAL_RXBB_GAIN_TBL_20_TXCAL_RX_BB_GAIN_TABLE_20_GET(x) (((x) & 0x0000003f) >> 0)
+#define PHY_BB_CAL_RXBB_GAIN_TBL_20_TXCAL_RX_BB_GAIN_TABLE_20_SET(x) (((x) << 0) & 0x0000003f)
+#define PHY_BB_CAL_RXBB_GAIN_TBL_20_TXCAL_RX_BB_GAIN_TABLE_21_MSB 11
+#define PHY_BB_CAL_RXBB_GAIN_TBL_20_TXCAL_RX_BB_GAIN_TABLE_21_LSB 6
+#define PHY_BB_CAL_RXBB_GAIN_TBL_20_TXCAL_RX_BB_GAIN_TABLE_21_MASK 0x00000fc0
+#define PHY_BB_CAL_RXBB_GAIN_TBL_20_TXCAL_RX_BB_GAIN_TABLE_21_GET(x) (((x) & 0x00000fc0) >> 6)
+#define PHY_BB_CAL_RXBB_GAIN_TBL_20_TXCAL_RX_BB_GAIN_TABLE_21_SET(x) (((x) << 6) & 0x00000fc0)
+#define PHY_BB_CAL_RXBB_GAIN_TBL_20_TXCAL_RX_BB_GAIN_TABLE_22_MSB 17
+#define PHY_BB_CAL_RXBB_GAIN_TBL_20_TXCAL_RX_BB_GAIN_TABLE_22_LSB 12
+#define PHY_BB_CAL_RXBB_GAIN_TBL_20_TXCAL_RX_BB_GAIN_TABLE_22_MASK 0x0003f000
+#define PHY_BB_CAL_RXBB_GAIN_TBL_20_TXCAL_RX_BB_GAIN_TABLE_22_GET(x) (((x) & 0x0003f000) >> 12)
+#define PHY_BB_CAL_RXBB_GAIN_TBL_20_TXCAL_RX_BB_GAIN_TABLE_22_SET(x) (((x) << 12) & 0x0003f000)
+#define PHY_BB_CAL_RXBB_GAIN_TBL_20_TXCAL_RX_BB_GAIN_TABLE_23_MSB 23
+#define PHY_BB_CAL_RXBB_GAIN_TBL_20_TXCAL_RX_BB_GAIN_TABLE_23_LSB 18
+#define PHY_BB_CAL_RXBB_GAIN_TBL_20_TXCAL_RX_BB_GAIN_TABLE_23_MASK 0x00fc0000
+#define PHY_BB_CAL_RXBB_GAIN_TBL_20_TXCAL_RX_BB_GAIN_TABLE_23_GET(x) (((x) & 0x00fc0000) >> 18)
+#define PHY_BB_CAL_RXBB_GAIN_TBL_20_TXCAL_RX_BB_GAIN_TABLE_23_SET(x) (((x) << 18) & 0x00fc0000)
+
+/* macros for BB_cal_rxbb_gain_tbl_24 */
+#define PHY_BB_CAL_RXBB_GAIN_TBL_24_ADDRESS 0x0000a724
+#define PHY_BB_CAL_RXBB_GAIN_TBL_24_OFFSET 0x0000a724
+#define PHY_BB_CAL_RXBB_GAIN_TBL_24_TXCAL_RX_BB_GAIN_TABLE_24_MSB 5
+#define PHY_BB_CAL_RXBB_GAIN_TBL_24_TXCAL_RX_BB_GAIN_TABLE_24_LSB 0
+#define PHY_BB_CAL_RXBB_GAIN_TBL_24_TXCAL_RX_BB_GAIN_TABLE_24_MASK 0x0000003f
+#define PHY_BB_CAL_RXBB_GAIN_TBL_24_TXCAL_RX_BB_GAIN_TABLE_24_GET(x) (((x) & 0x0000003f) >> 0)
+#define PHY_BB_CAL_RXBB_GAIN_TBL_24_TXCAL_RX_BB_GAIN_TABLE_24_SET(x) (((x) << 0) & 0x0000003f)
+
+/* macros for BB_txiqcal_status_b0 */
+#define PHY_BB_TXIQCAL_STATUS_B0_ADDRESS 0x0000a728
+#define PHY_BB_TXIQCAL_STATUS_B0_OFFSET 0x0000a728
+#define PHY_BB_TXIQCAL_STATUS_B0_TXIQCAL_FAILED_0_MSB 0
+#define PHY_BB_TXIQCAL_STATUS_B0_TXIQCAL_FAILED_0_LSB 0
+#define PHY_BB_TXIQCAL_STATUS_B0_TXIQCAL_FAILED_0_MASK 0x00000001
+#define PHY_BB_TXIQCAL_STATUS_B0_TXIQCAL_FAILED_0_GET(x) (((x) & 0x00000001) >> 0)
+#define PHY_BB_TXIQCAL_STATUS_B0_CALIBRATED_GAINS_0_MSB 5
+#define PHY_BB_TXIQCAL_STATUS_B0_CALIBRATED_GAINS_0_LSB 1
+#define PHY_BB_TXIQCAL_STATUS_B0_CALIBRATED_GAINS_0_MASK 0x0000003e
+#define PHY_BB_TXIQCAL_STATUS_B0_CALIBRATED_GAINS_0_GET(x) (((x) & 0x0000003e) >> 1)
+#define PHY_BB_TXIQCAL_STATUS_B0_TONE_GAIN_USED_0_MSB 11
+#define PHY_BB_TXIQCAL_STATUS_B0_TONE_GAIN_USED_0_LSB 6
+#define PHY_BB_TXIQCAL_STATUS_B0_TONE_GAIN_USED_0_MASK 0x00000fc0
+#define PHY_BB_TXIQCAL_STATUS_B0_TONE_GAIN_USED_0_GET(x) (((x) & 0x00000fc0) >> 6)
+#define PHY_BB_TXIQCAL_STATUS_B0_RX_GAIN_USED_0_MSB 17
+#define PHY_BB_TXIQCAL_STATUS_B0_RX_GAIN_USED_0_LSB 12
+#define PHY_BB_TXIQCAL_STATUS_B0_RX_GAIN_USED_0_MASK 0x0003f000
+#define PHY_BB_TXIQCAL_STATUS_B0_RX_GAIN_USED_0_GET(x) (((x) & 0x0003f000) >> 12)
+#define PHY_BB_TXIQCAL_STATUS_B0_LAST_MEAS_ADDR_0_MSB 24
+#define PHY_BB_TXIQCAL_STATUS_B0_LAST_MEAS_ADDR_0_LSB 18
+#define PHY_BB_TXIQCAL_STATUS_B0_LAST_MEAS_ADDR_0_MASK 0x01fc0000
+#define PHY_BB_TXIQCAL_STATUS_B0_LAST_MEAS_ADDR_0_GET(x) (((x) & 0x01fc0000) >> 18)
+
+/* macros for BB_paprd_trainer_cntl1 */
+#define PHY_BB_PAPRD_TRAINER_CNTL1_ADDRESS 0x0000a72c
+#define PHY_BB_PAPRD_TRAINER_CNTL1_OFFSET 0x0000a72c
+#define PHY_BB_PAPRD_TRAINER_CNTL1_CF_PAPRD_TRAIN_ENABLE_MSB 0
+#define PHY_BB_PAPRD_TRAINER_CNTL1_CF_PAPRD_TRAIN_ENABLE_LSB 0
+#define PHY_BB_PAPRD_TRAINER_CNTL1_CF_PAPRD_TRAIN_ENABLE_MASK 0x00000001
+#define PHY_BB_PAPRD_TRAINER_CNTL1_CF_PAPRD_TRAIN_ENABLE_GET(x) (((x) & 0x00000001) >> 0)
+#define PHY_BB_PAPRD_TRAINER_CNTL1_CF_PAPRD_TRAIN_ENABLE_SET(x) (((x) << 0) & 0x00000001)
+#define PHY_BB_PAPRD_TRAINER_CNTL1_CF_PAPRD_AGC2_SETTLING_MSB 7
+#define PHY_BB_PAPRD_TRAINER_CNTL1_CF_PAPRD_AGC2_SETTLING_LSB 1
+#define PHY_BB_PAPRD_TRAINER_CNTL1_CF_PAPRD_AGC2_SETTLING_MASK 0x000000fe
+#define PHY_BB_PAPRD_TRAINER_CNTL1_CF_PAPRD_AGC2_SETTLING_GET(x) (((x) & 0x000000fe) >> 1)
+#define PHY_BB_PAPRD_TRAINER_CNTL1_CF_PAPRD_AGC2_SETTLING_SET(x) (((x) << 1) & 0x000000fe)
+#define PHY_BB_PAPRD_TRAINER_CNTL1_CF_PAPRD_IQCORR_ENABLE_MSB 8
+#define PHY_BB_PAPRD_TRAINER_CNTL1_CF_PAPRD_IQCORR_ENABLE_LSB 8
+#define PHY_BB_PAPRD_TRAINER_CNTL1_CF_PAPRD_IQCORR_ENABLE_MASK 0x00000100
+#define PHY_BB_PAPRD_TRAINER_CNTL1_CF_PAPRD_IQCORR_ENABLE_GET(x) (((x) & 0x00000100) >> 8)
+#define PHY_BB_PAPRD_TRAINER_CNTL1_CF_PAPRD_IQCORR_ENABLE_SET(x) (((x) << 8) & 0x00000100)
+#define PHY_BB_PAPRD_TRAINER_CNTL1_CF_PAPRD_RX_BB_GAIN_FORCE_MSB 9
+#define PHY_BB_PAPRD_TRAINER_CNTL1_CF_PAPRD_RX_BB_GAIN_FORCE_LSB 9
+#define PHY_BB_PAPRD_TRAINER_CNTL1_CF_PAPRD_RX_BB_GAIN_FORCE_MASK 0x00000200
+#define PHY_BB_PAPRD_TRAINER_CNTL1_CF_PAPRD_RX_BB_GAIN_FORCE_GET(x) (((x) & 0x00000200) >> 9)
+#define PHY_BB_PAPRD_TRAINER_CNTL1_CF_PAPRD_RX_BB_GAIN_FORCE_SET(x) (((x) << 9) & 0x00000200)
+#define PHY_BB_PAPRD_TRAINER_CNTL1_CF_PAPRD_TX_GAIN_FORCE_MSB 10
+#define PHY_BB_PAPRD_TRAINER_CNTL1_CF_PAPRD_TX_GAIN_FORCE_LSB 10
+#define PHY_BB_PAPRD_TRAINER_CNTL1_CF_PAPRD_TX_GAIN_FORCE_MASK 0x00000400
+#define PHY_BB_PAPRD_TRAINER_CNTL1_CF_PAPRD_TX_GAIN_FORCE_GET(x) (((x) & 0x00000400) >> 10)
+#define PHY_BB_PAPRD_TRAINER_CNTL1_CF_PAPRD_TX_GAIN_FORCE_SET(x) (((x) << 10) & 0x00000400)
+#define PHY_BB_PAPRD_TRAINER_CNTL1_CF_PAPRD_LB_ENABLE_MSB 11
+#define PHY_BB_PAPRD_TRAINER_CNTL1_CF_PAPRD_LB_ENABLE_LSB 11
+#define PHY_BB_PAPRD_TRAINER_CNTL1_CF_PAPRD_LB_ENABLE_MASK 0x00000800
+#define PHY_BB_PAPRD_TRAINER_CNTL1_CF_PAPRD_LB_ENABLE_GET(x) (((x) & 0x00000800) >> 11)
+#define PHY_BB_PAPRD_TRAINER_CNTL1_CF_PAPRD_LB_ENABLE_SET(x) (((x) << 11) & 0x00000800)
+#define PHY_BB_PAPRD_TRAINER_CNTL1_CF_PAPRD_LB_SKIP_MSB 18
+#define PHY_BB_PAPRD_TRAINER_CNTL1_CF_PAPRD_LB_SKIP_LSB 12
+#define PHY_BB_PAPRD_TRAINER_CNTL1_CF_PAPRD_LB_SKIP_MASK 0x0007f000
+#define PHY_BB_PAPRD_TRAINER_CNTL1_CF_PAPRD_LB_SKIP_GET(x) (((x) & 0x0007f000) >> 12)
+#define PHY_BB_PAPRD_TRAINER_CNTL1_CF_PAPRD_LB_SKIP_SET(x) (((x) << 12) & 0x0007f000)
+
+/* macros for BB_paprd_trainer_cntl2 */
+#define PHY_BB_PAPRD_TRAINER_CNTL2_ADDRESS 0x0000a730
+#define PHY_BB_PAPRD_TRAINER_CNTL2_OFFSET 0x0000a730
+#define PHY_BB_PAPRD_TRAINER_CNTL2_CF_PAPRD_INIT_RX_BB_GAIN_MSB 31
+#define PHY_BB_PAPRD_TRAINER_CNTL2_CF_PAPRD_INIT_RX_BB_GAIN_LSB 0
+#define PHY_BB_PAPRD_TRAINER_CNTL2_CF_PAPRD_INIT_RX_BB_GAIN_MASK 0xffffffff
+#define PHY_BB_PAPRD_TRAINER_CNTL2_CF_PAPRD_INIT_RX_BB_GAIN_GET(x) (((x) & 0xffffffff) >> 0)
+#define PHY_BB_PAPRD_TRAINER_CNTL2_CF_PAPRD_INIT_RX_BB_GAIN_SET(x) (((x) << 0) & 0xffffffff)
+
+/* macros for BB_paprd_trainer_cntl3 */
+#define PHY_BB_PAPRD_TRAINER_CNTL3_ADDRESS 0x0000a734
+#define PHY_BB_PAPRD_TRAINER_CNTL3_OFFSET 0x0000a734
+#define PHY_BB_PAPRD_TRAINER_CNTL3_CF_PAPRD_ADC_DESIRED_SIZE_MSB 5
+#define PHY_BB_PAPRD_TRAINER_CNTL3_CF_PAPRD_ADC_DESIRED_SIZE_LSB 0
+#define PHY_BB_PAPRD_TRAINER_CNTL3_CF_PAPRD_ADC_DESIRED_SIZE_MASK 0x0000003f
+#define PHY_BB_PAPRD_TRAINER_CNTL3_CF_PAPRD_ADC_DESIRED_SIZE_GET(x) (((x) & 0x0000003f) >> 0)
+#define PHY_BB_PAPRD_TRAINER_CNTL3_CF_PAPRD_ADC_DESIRED_SIZE_SET(x) (((x) << 0) & 0x0000003f)
+#define PHY_BB_PAPRD_TRAINER_CNTL3_CF_PAPRD_QUICK_DROP_MSB 11
+#define PHY_BB_PAPRD_TRAINER_CNTL3_CF_PAPRD_QUICK_DROP_LSB 6
+#define PHY_BB_PAPRD_TRAINER_CNTL3_CF_PAPRD_QUICK_DROP_MASK 0x00000fc0
+#define PHY_BB_PAPRD_TRAINER_CNTL3_CF_PAPRD_QUICK_DROP_GET(x) (((x) & 0x00000fc0) >> 6)
+#define PHY_BB_PAPRD_TRAINER_CNTL3_CF_PAPRD_QUICK_DROP_SET(x) (((x) << 6) & 0x00000fc0)
+#define PHY_BB_PAPRD_TRAINER_CNTL3_CF_PAPRD_MIN_LOOPBACK_DEL_MSB 16
+#define PHY_BB_PAPRD_TRAINER_CNTL3_CF_PAPRD_MIN_LOOPBACK_DEL_LSB 12
+#define PHY_BB_PAPRD_TRAINER_CNTL3_CF_PAPRD_MIN_LOOPBACK_DEL_MASK 0x0001f000
+#define PHY_BB_PAPRD_TRAINER_CNTL3_CF_PAPRD_MIN_LOOPBACK_DEL_GET(x) (((x) & 0x0001f000) >> 12)
+#define PHY_BB_PAPRD_TRAINER_CNTL3_CF_PAPRD_MIN_LOOPBACK_DEL_SET(x) (((x) << 12) & 0x0001f000)
+#define PHY_BB_PAPRD_TRAINER_CNTL3_CF_PAPRD_NUM_CORR_STAGES_MSB 19
+#define PHY_BB_PAPRD_TRAINER_CNTL3_CF_PAPRD_NUM_CORR_STAGES_LSB 17
+#define PHY_BB_PAPRD_TRAINER_CNTL3_CF_PAPRD_NUM_CORR_STAGES_MASK 0x000e0000
+#define PHY_BB_PAPRD_TRAINER_CNTL3_CF_PAPRD_NUM_CORR_STAGES_GET(x) (((x) & 0x000e0000) >> 17)
+#define PHY_BB_PAPRD_TRAINER_CNTL3_CF_PAPRD_NUM_CORR_STAGES_SET(x) (((x) << 17) & 0x000e0000)
+#define PHY_BB_PAPRD_TRAINER_CNTL3_CF_PAPRD_COARSE_CORR_LEN_MSB 23
+#define PHY_BB_PAPRD_TRAINER_CNTL3_CF_PAPRD_COARSE_CORR_LEN_LSB 20
+#define PHY_BB_PAPRD_TRAINER_CNTL3_CF_PAPRD_COARSE_CORR_LEN_MASK 0x00f00000
+#define PHY_BB_PAPRD_TRAINER_CNTL3_CF_PAPRD_COARSE_CORR_LEN_GET(x) (((x) & 0x00f00000) >> 20)
+#define PHY_BB_PAPRD_TRAINER_CNTL3_CF_PAPRD_COARSE_CORR_LEN_SET(x) (((x) << 20) & 0x00f00000)
+#define PHY_BB_PAPRD_TRAINER_CNTL3_CF_PAPRD_FINE_CORR_LEN_MSB 27
+#define PHY_BB_PAPRD_TRAINER_CNTL3_CF_PAPRD_FINE_CORR_LEN_LSB 24
+#define PHY_BB_PAPRD_TRAINER_CNTL3_CF_PAPRD_FINE_CORR_LEN_MASK 0x0f000000
+#define PHY_BB_PAPRD_TRAINER_CNTL3_CF_PAPRD_FINE_CORR_LEN_GET(x) (((x) & 0x0f000000) >> 24)
+#define PHY_BB_PAPRD_TRAINER_CNTL3_CF_PAPRD_FINE_CORR_LEN_SET(x) (((x) << 24) & 0x0f000000)
+#define PHY_BB_PAPRD_TRAINER_CNTL3_CF_PAPRD_BBTXMIX_DISABLE_MSB 28
+#define PHY_BB_PAPRD_TRAINER_CNTL3_CF_PAPRD_BBTXMIX_DISABLE_LSB 28
+#define PHY_BB_PAPRD_TRAINER_CNTL3_CF_PAPRD_BBTXMIX_DISABLE_MASK 0x10000000
+#define PHY_BB_PAPRD_TRAINER_CNTL3_CF_PAPRD_BBTXMIX_DISABLE_GET(x) (((x) & 0x10000000) >> 28)
+#define PHY_BB_PAPRD_TRAINER_CNTL3_CF_PAPRD_BBTXMIX_DISABLE_SET(x) (((x) << 28) & 0x10000000)
+
+/* macros for BB_paprd_trainer_cntl4 */
+#define PHY_BB_PAPRD_TRAINER_CNTL4_ADDRESS 0x0000a738
+#define PHY_BB_PAPRD_TRAINER_CNTL4_OFFSET 0x0000a738
+#define PHY_BB_PAPRD_TRAINER_CNTL4_CF_PAPRD_MIN_CORR_MSB 11
+#define PHY_BB_PAPRD_TRAINER_CNTL4_CF_PAPRD_MIN_CORR_LSB 0
+#define PHY_BB_PAPRD_TRAINER_CNTL4_CF_PAPRD_MIN_CORR_MASK 0x00000fff
+#define PHY_BB_PAPRD_TRAINER_CNTL4_CF_PAPRD_MIN_CORR_GET(x) (((x) & 0x00000fff) >> 0)
+#define PHY_BB_PAPRD_TRAINER_CNTL4_CF_PAPRD_MIN_CORR_SET(x) (((x) << 0) & 0x00000fff)
+#define PHY_BB_PAPRD_TRAINER_CNTL4_CF_PAPRD_SAFETY_DELTA_MSB 15
+#define PHY_BB_PAPRD_TRAINER_CNTL4_CF_PAPRD_SAFETY_DELTA_LSB 12
+#define PHY_BB_PAPRD_TRAINER_CNTL4_CF_PAPRD_SAFETY_DELTA_MASK 0x0000f000
+#define PHY_BB_PAPRD_TRAINER_CNTL4_CF_PAPRD_SAFETY_DELTA_GET(x) (((x) & 0x0000f000) >> 12)
+#define PHY_BB_PAPRD_TRAINER_CNTL4_CF_PAPRD_SAFETY_DELTA_SET(x) (((x) << 12) & 0x0000f000)
+#define PHY_BB_PAPRD_TRAINER_CNTL4_CF_PAPRD_NUM_TRAIN_SAMPLES_MSB 25
+#define PHY_BB_PAPRD_TRAINER_CNTL4_CF_PAPRD_NUM_TRAIN_SAMPLES_LSB 16
+#define PHY_BB_PAPRD_TRAINER_CNTL4_CF_PAPRD_NUM_TRAIN_SAMPLES_MASK 0x03ff0000
+#define PHY_BB_PAPRD_TRAINER_CNTL4_CF_PAPRD_NUM_TRAIN_SAMPLES_GET(x) (((x) & 0x03ff0000) >> 16)
+#define PHY_BB_PAPRD_TRAINER_CNTL4_CF_PAPRD_NUM_TRAIN_SAMPLES_SET(x) (((x) << 16) & 0x03ff0000)
+
+/* macros for BB_paprd_trainer_stat1 */
+#define PHY_BB_PAPRD_TRAINER_STAT1_ADDRESS 0x0000a73c
+#define PHY_BB_PAPRD_TRAINER_STAT1_OFFSET 0x0000a73c
+#define PHY_BB_PAPRD_TRAINER_STAT1_PAPRD_TRAIN_DONE_MSB 0
+#define PHY_BB_PAPRD_TRAINER_STAT1_PAPRD_TRAIN_DONE_LSB 0
+#define PHY_BB_PAPRD_TRAINER_STAT1_PAPRD_TRAIN_DONE_MASK 0x00000001
+#define PHY_BB_PAPRD_TRAINER_STAT1_PAPRD_TRAIN_DONE_GET(x) (((x) & 0x00000001) >> 0)
+#define PHY_BB_PAPRD_TRAINER_STAT1_PAPRD_TRAIN_DONE_SET(x) (((x) << 0) & 0x00000001)
+#define PHY_BB_PAPRD_TRAINER_STAT1_PAPRD_TRAIN_INCOMPLETE_MSB 1
+#define PHY_BB_PAPRD_TRAINER_STAT1_PAPRD_TRAIN_INCOMPLETE_LSB 1
+#define PHY_BB_PAPRD_TRAINER_STAT1_PAPRD_TRAIN_INCOMPLETE_MASK 0x00000002
+#define PHY_BB_PAPRD_TRAINER_STAT1_PAPRD_TRAIN_INCOMPLETE_GET(x) (((x) & 0x00000002) >> 1)
+#define PHY_BB_PAPRD_TRAINER_STAT1_PAPRD_CORR_ERR_MSB 2
+#define PHY_BB_PAPRD_TRAINER_STAT1_PAPRD_CORR_ERR_LSB 2
+#define PHY_BB_PAPRD_TRAINER_STAT1_PAPRD_CORR_ERR_MASK 0x00000004
+#define PHY_BB_PAPRD_TRAINER_STAT1_PAPRD_CORR_ERR_GET(x) (((x) & 0x00000004) >> 2)
+#define PHY_BB_PAPRD_TRAINER_STAT1_PAPRD_TRAIN_ACTIVE_MSB 3
+#define PHY_BB_PAPRD_TRAINER_STAT1_PAPRD_TRAIN_ACTIVE_LSB 3
+#define PHY_BB_PAPRD_TRAINER_STAT1_PAPRD_TRAIN_ACTIVE_MASK 0x00000008
+#define PHY_BB_PAPRD_TRAINER_STAT1_PAPRD_TRAIN_ACTIVE_GET(x) (((x) & 0x00000008) >> 3)
+#define PHY_BB_PAPRD_TRAINER_STAT1_PAPRD_RX_GAIN_IDX_MSB 8
+#define PHY_BB_PAPRD_TRAINER_STAT1_PAPRD_RX_GAIN_IDX_LSB 4
+#define PHY_BB_PAPRD_TRAINER_STAT1_PAPRD_RX_GAIN_IDX_MASK 0x000001f0
+#define PHY_BB_PAPRD_TRAINER_STAT1_PAPRD_RX_GAIN_IDX_GET(x) (((x) & 0x000001f0) >> 4)
+#define PHY_BB_PAPRD_TRAINER_STAT1_PAPRD_AGC2_PWR_MSB 16
+#define PHY_BB_PAPRD_TRAINER_STAT1_PAPRD_AGC2_PWR_LSB 9
+#define PHY_BB_PAPRD_TRAINER_STAT1_PAPRD_AGC2_PWR_MASK 0x0001fe00
+#define PHY_BB_PAPRD_TRAINER_STAT1_PAPRD_AGC2_PWR_GET(x) (((x) & 0x0001fe00) >> 9)
+
+/* macros for BB_paprd_trainer_stat2 */
+#define PHY_BB_PAPRD_TRAINER_STAT2_ADDRESS 0x0000a740
+#define PHY_BB_PAPRD_TRAINER_STAT2_OFFSET 0x0000a740
+#define PHY_BB_PAPRD_TRAINER_STAT2_PAPRD_FINE_VAL_MSB 15
+#define PHY_BB_PAPRD_TRAINER_STAT2_PAPRD_FINE_VAL_LSB 0
+#define PHY_BB_PAPRD_TRAINER_STAT2_PAPRD_FINE_VAL_MASK 0x0000ffff
+#define PHY_BB_PAPRD_TRAINER_STAT2_PAPRD_FINE_VAL_GET(x) (((x) & 0x0000ffff) >> 0)
+#define PHY_BB_PAPRD_TRAINER_STAT2_PAPRD_COARSE_IDX_MSB 20
+#define PHY_BB_PAPRD_TRAINER_STAT2_PAPRD_COARSE_IDX_LSB 16
+#define PHY_BB_PAPRD_TRAINER_STAT2_PAPRD_COARSE_IDX_MASK 0x001f0000
+#define PHY_BB_PAPRD_TRAINER_STAT2_PAPRD_COARSE_IDX_GET(x) (((x) & 0x001f0000) >> 16)
+#define PHY_BB_PAPRD_TRAINER_STAT2_PAPRD_FINE_IDX_MSB 22
+#define PHY_BB_PAPRD_TRAINER_STAT2_PAPRD_FINE_IDX_LSB 21
+#define PHY_BB_PAPRD_TRAINER_STAT2_PAPRD_FINE_IDX_MASK 0x00600000
+#define PHY_BB_PAPRD_TRAINER_STAT2_PAPRD_FINE_IDX_GET(x) (((x) & 0x00600000) >> 21)
+
+/* macros for BB_paprd_trainer_stat3 */
+#define PHY_BB_PAPRD_TRAINER_STAT3_ADDRESS 0x0000a744
+#define PHY_BB_PAPRD_TRAINER_STAT3_OFFSET 0x0000a744
+#define PHY_BB_PAPRD_TRAINER_STAT3_PAPRD_TRAIN_SAMPLES_CNT_MSB 19
+#define PHY_BB_PAPRD_TRAINER_STAT3_PAPRD_TRAIN_SAMPLES_CNT_LSB 0
+#define PHY_BB_PAPRD_TRAINER_STAT3_PAPRD_TRAIN_SAMPLES_CNT_MASK 0x000fffff
+#define PHY_BB_PAPRD_TRAINER_STAT3_PAPRD_TRAIN_SAMPLES_CNT_GET(x) (((x) & 0x000fffff) >> 0)
+
+/* macros for BB_fcal_1 */
+#define PHY_BB_FCAL_1_ADDRESS 0x0000a7d8
+#define PHY_BB_FCAL_1_OFFSET 0x0000a7d8
+#define PHY_BB_FCAL_1_FLC_PB_FSTEP_MSB 9
+#define PHY_BB_FCAL_1_FLC_PB_FSTEP_LSB 0
+#define PHY_BB_FCAL_1_FLC_PB_FSTEP_MASK 0x000003ff
+#define PHY_BB_FCAL_1_FLC_PB_FSTEP_GET(x) (((x) & 0x000003ff) >> 0)
+#define PHY_BB_FCAL_1_FLC_PB_FSTEP_SET(x) (((x) << 0) & 0x000003ff)
+#define PHY_BB_FCAL_1_FLC_SB_FSTEP_MSB 19
+#define PHY_BB_FCAL_1_FLC_SB_FSTEP_LSB 10
+#define PHY_BB_FCAL_1_FLC_SB_FSTEP_MASK 0x000ffc00
+#define PHY_BB_FCAL_1_FLC_SB_FSTEP_GET(x) (((x) & 0x000ffc00) >> 10)
+#define PHY_BB_FCAL_1_FLC_SB_FSTEP_SET(x) (((x) << 10) & 0x000ffc00)
+#define PHY_BB_FCAL_1_FLC_PB_ATTEN_MSB 24
+#define PHY_BB_FCAL_1_FLC_PB_ATTEN_LSB 20
+#define PHY_BB_FCAL_1_FLC_PB_ATTEN_MASK 0x01f00000
+#define PHY_BB_FCAL_1_FLC_PB_ATTEN_GET(x) (((x) & 0x01f00000) >> 20)
+#define PHY_BB_FCAL_1_FLC_PB_ATTEN_SET(x) (((x) << 20) & 0x01f00000)
+#define PHY_BB_FCAL_1_FLC_SB_ATTEN_MSB 29
+#define PHY_BB_FCAL_1_FLC_SB_ATTEN_LSB 25
+#define PHY_BB_FCAL_1_FLC_SB_ATTEN_MASK 0x3e000000
+#define PHY_BB_FCAL_1_FLC_SB_ATTEN_GET(x) (((x) & 0x3e000000) >> 25)
+#define PHY_BB_FCAL_1_FLC_SB_ATTEN_SET(x) (((x) << 25) & 0x3e000000)
+
+/* macros for BB_fcal_2_b0 */
+#define PHY_BB_FCAL_2_B0_ADDRESS 0x0000a7dc
+#define PHY_BB_FCAL_2_B0_OFFSET 0x0000a7dc
+#define PHY_BB_FCAL_2_B0_FLC_PWR_THRESH_MSB 2
+#define PHY_BB_FCAL_2_B0_FLC_PWR_THRESH_LSB 0
+#define PHY_BB_FCAL_2_B0_FLC_PWR_THRESH_MASK 0x00000007
+#define PHY_BB_FCAL_2_B0_FLC_PWR_THRESH_GET(x) (((x) & 0x00000007) >> 0)
+#define PHY_BB_FCAL_2_B0_FLC_PWR_THRESH_SET(x) (((x) << 0) & 0x00000007)
+#define PHY_BB_FCAL_2_B0_FLC_SW_CAP_VAL_0_MSB 7
+#define PHY_BB_FCAL_2_B0_FLC_SW_CAP_VAL_0_LSB 3
+#define PHY_BB_FCAL_2_B0_FLC_SW_CAP_VAL_0_MASK 0x000000f8
+#define PHY_BB_FCAL_2_B0_FLC_SW_CAP_VAL_0_GET(x) (((x) & 0x000000f8) >> 3)
+#define PHY_BB_FCAL_2_B0_FLC_SW_CAP_VAL_0_SET(x) (((x) << 3) & 0x000000f8)
+#define PHY_BB_FCAL_2_B0_FLC_BBMISCGAIN_MSB 9
+#define PHY_BB_FCAL_2_B0_FLC_BBMISCGAIN_LSB 8
+#define PHY_BB_FCAL_2_B0_FLC_BBMISCGAIN_MASK 0x00000300
+#define PHY_BB_FCAL_2_B0_FLC_BBMISCGAIN_GET(x) (((x) & 0x00000300) >> 8)
+#define PHY_BB_FCAL_2_B0_FLC_BBMISCGAIN_SET(x) (((x) << 8) & 0x00000300)
+#define PHY_BB_FCAL_2_B0_FLC_BB1DBGAIN_MSB 12
+#define PHY_BB_FCAL_2_B0_FLC_BB1DBGAIN_LSB 10
+#define PHY_BB_FCAL_2_B0_FLC_BB1DBGAIN_MASK 0x00001c00
+#define PHY_BB_FCAL_2_B0_FLC_BB1DBGAIN_GET(x) (((x) & 0x00001c00) >> 10)
+#define PHY_BB_FCAL_2_B0_FLC_BB1DBGAIN_SET(x) (((x) << 10) & 0x00001c00)
+#define PHY_BB_FCAL_2_B0_FLC_BB6DBGAIN_MSB 14
+#define PHY_BB_FCAL_2_B0_FLC_BB6DBGAIN_LSB 13
+#define PHY_BB_FCAL_2_B0_FLC_BB6DBGAIN_MASK 0x00006000
+#define PHY_BB_FCAL_2_B0_FLC_BB6DBGAIN_GET(x) (((x) & 0x00006000) >> 13)
+#define PHY_BB_FCAL_2_B0_FLC_BB6DBGAIN_SET(x) (((x) << 13) & 0x00006000)
+#define PHY_BB_FCAL_2_B0_FLC_SW_CAP_SET_MSB 15
+#define PHY_BB_FCAL_2_B0_FLC_SW_CAP_SET_LSB 15
+#define PHY_BB_FCAL_2_B0_FLC_SW_CAP_SET_MASK 0x00008000
+#define PHY_BB_FCAL_2_B0_FLC_SW_CAP_SET_GET(x) (((x) & 0x00008000) >> 15)
+#define PHY_BB_FCAL_2_B0_FLC_SW_CAP_SET_SET(x) (((x) << 15) & 0x00008000)
+#define PHY_BB_FCAL_2_B0_FLC_MEAS_WIN_MSB 18
+#define PHY_BB_FCAL_2_B0_FLC_MEAS_WIN_LSB 16
+#define PHY_BB_FCAL_2_B0_FLC_MEAS_WIN_MASK 0x00070000
+#define PHY_BB_FCAL_2_B0_FLC_MEAS_WIN_GET(x) (((x) & 0x00070000) >> 16)
+#define PHY_BB_FCAL_2_B0_FLC_MEAS_WIN_SET(x) (((x) << 16) & 0x00070000)
+#define PHY_BB_FCAL_2_B0_FLC_CAP_VAL_STATUS_0_MSB 24
+#define PHY_BB_FCAL_2_B0_FLC_CAP_VAL_STATUS_0_LSB 20
+#define PHY_BB_FCAL_2_B0_FLC_CAP_VAL_STATUS_0_MASK 0x01f00000
+#define PHY_BB_FCAL_2_B0_FLC_CAP_VAL_STATUS_0_GET(x) (((x) & 0x01f00000) >> 20)
+
+/* macros for BB_radar_bw_filter */
+#define PHY_BB_RADAR_BW_FILTER_ADDRESS 0x0000a7e0
+#define PHY_BB_RADAR_BW_FILTER_OFFSET 0x0000a7e0
+#define PHY_BB_RADAR_BW_FILTER_RADAR_AVG_BW_CHECK_MSB 0
+#define PHY_BB_RADAR_BW_FILTER_RADAR_AVG_BW_CHECK_LSB 0
+#define PHY_BB_RADAR_BW_FILTER_RADAR_AVG_BW_CHECK_MASK 0x00000001
+#define PHY_BB_RADAR_BW_FILTER_RADAR_AVG_BW_CHECK_GET(x) (((x) & 0x00000001) >> 0)
+#define PHY_BB_RADAR_BW_FILTER_RADAR_AVG_BW_CHECK_SET(x) (((x) << 0) & 0x00000001)
+#define PHY_BB_RADAR_BW_FILTER_RADAR_DC_SRC_SEL_MSB 1
+#define PHY_BB_RADAR_BW_FILTER_RADAR_DC_SRC_SEL_LSB 1
+#define PHY_BB_RADAR_BW_FILTER_RADAR_DC_SRC_SEL_MASK 0x00000002
+#define PHY_BB_RADAR_BW_FILTER_RADAR_DC_SRC_SEL_GET(x) (((x) & 0x00000002) >> 1)
+#define PHY_BB_RADAR_BW_FILTER_RADAR_DC_SRC_SEL_SET(x) (((x) << 1) & 0x00000002)
+#define PHY_BB_RADAR_BW_FILTER_RADAR_FIRPWR_SEL_MSB 3
+#define PHY_BB_RADAR_BW_FILTER_RADAR_FIRPWR_SEL_LSB 2
+#define PHY_BB_RADAR_BW_FILTER_RADAR_FIRPWR_SEL_MASK 0x0000000c
+#define PHY_BB_RADAR_BW_FILTER_RADAR_FIRPWR_SEL_GET(x) (((x) & 0x0000000c) >> 2)
+#define PHY_BB_RADAR_BW_FILTER_RADAR_FIRPWR_SEL_SET(x) (((x) << 2) & 0x0000000c)
+#define PHY_BB_RADAR_BW_FILTER_RADAR_PULSE_WIDTH_SEL_MSB 5
+#define PHY_BB_RADAR_BW_FILTER_RADAR_PULSE_WIDTH_SEL_LSB 4
+#define PHY_BB_RADAR_BW_FILTER_RADAR_PULSE_WIDTH_SEL_MASK 0x00000030
+#define PHY_BB_RADAR_BW_FILTER_RADAR_PULSE_WIDTH_SEL_GET(x) (((x) & 0x00000030) >> 4)
+#define PHY_BB_RADAR_BW_FILTER_RADAR_PULSE_WIDTH_SEL_SET(x) (((x) << 4) & 0x00000030)
+#define PHY_BB_RADAR_BW_FILTER_RADAR_DC_FIRPWR_THRESH_MSB 14
+#define PHY_BB_RADAR_BW_FILTER_RADAR_DC_FIRPWR_THRESH_LSB 8
+#define PHY_BB_RADAR_BW_FILTER_RADAR_DC_FIRPWR_THRESH_MASK 0x00007f00
+#define PHY_BB_RADAR_BW_FILTER_RADAR_DC_FIRPWR_THRESH_GET(x) (((x) & 0x00007f00) >> 8)
+#define PHY_BB_RADAR_BW_FILTER_RADAR_DC_FIRPWR_THRESH_SET(x) (((x) << 8) & 0x00007f00)
+#define PHY_BB_RADAR_BW_FILTER_RADAR_DC_PWR_BIAS_MSB 20
+#define PHY_BB_RADAR_BW_FILTER_RADAR_DC_PWR_BIAS_LSB 15
+#define PHY_BB_RADAR_BW_FILTER_RADAR_DC_PWR_BIAS_MASK 0x001f8000
+#define PHY_BB_RADAR_BW_FILTER_RADAR_DC_PWR_BIAS_GET(x) (((x) & 0x001f8000) >> 15)
+#define PHY_BB_RADAR_BW_FILTER_RADAR_DC_PWR_BIAS_SET(x) (((x) << 15) & 0x001f8000)
+#define PHY_BB_RADAR_BW_FILTER_RADAR_BIN_MAX_BW_MSB 26
+#define PHY_BB_RADAR_BW_FILTER_RADAR_BIN_MAX_BW_LSB 21
+#define PHY_BB_RADAR_BW_FILTER_RADAR_BIN_MAX_BW_MASK 0x07e00000
+#define PHY_BB_RADAR_BW_FILTER_RADAR_BIN_MAX_BW_GET(x) (((x) & 0x07e00000) >> 21)
+#define PHY_BB_RADAR_BW_FILTER_RADAR_BIN_MAX_BW_SET(x) (((x) << 21) & 0x07e00000)
+
+/* macros for BB_dft_tone_ctrl_b0 */
+#define PHY_BB_DFT_TONE_CTRL_B0_ADDRESS 0x0000a7e4
+#define PHY_BB_DFT_TONE_CTRL_B0_OFFSET 0x0000a7e4
+#define PHY_BB_DFT_TONE_CTRL_B0_DFT_TONE_EN_0_MSB 0
+#define PHY_BB_DFT_TONE_CTRL_B0_DFT_TONE_EN_0_LSB 0
+#define PHY_BB_DFT_TONE_CTRL_B0_DFT_TONE_EN_0_MASK 0x00000001
+#define PHY_BB_DFT_TONE_CTRL_B0_DFT_TONE_EN_0_GET(x) (((x) & 0x00000001) >> 0)
+#define PHY_BB_DFT_TONE_CTRL_B0_DFT_TONE_EN_0_SET(x) (((x) << 0) & 0x00000001)
+#define PHY_BB_DFT_TONE_CTRL_B0_DFT_TONE_AMP_SEL_0_MSB 3
+#define PHY_BB_DFT_TONE_CTRL_B0_DFT_TONE_AMP_SEL_0_LSB 2
+#define PHY_BB_DFT_TONE_CTRL_B0_DFT_TONE_AMP_SEL_0_MASK 0x0000000c
+#define PHY_BB_DFT_TONE_CTRL_B0_DFT_TONE_AMP_SEL_0_GET(x) (((x) & 0x0000000c) >> 2)
+#define PHY_BB_DFT_TONE_CTRL_B0_DFT_TONE_AMP_SEL_0_SET(x) (((x) << 2) & 0x0000000c)
+#define PHY_BB_DFT_TONE_CTRL_B0_DFT_TONE_FREQ_ANG_0_MSB 12
+#define PHY_BB_DFT_TONE_CTRL_B0_DFT_TONE_FREQ_ANG_0_LSB 4
+#define PHY_BB_DFT_TONE_CTRL_B0_DFT_TONE_FREQ_ANG_0_MASK 0x00001ff0
+#define PHY_BB_DFT_TONE_CTRL_B0_DFT_TONE_FREQ_ANG_0_GET(x) (((x) & 0x00001ff0) >> 4)
+#define PHY_BB_DFT_TONE_CTRL_B0_DFT_TONE_FREQ_ANG_0_SET(x) (((x) << 4) & 0x00001ff0)
+
+/* macros for BB_therm_adc_1 */
+#define PHY_BB_THERM_ADC_1_ADDRESS 0x0000a7e8
+#define PHY_BB_THERM_ADC_1_OFFSET 0x0000a7e8
+#define PHY_BB_THERM_ADC_1_INIT_THERM_SETTING_MSB 7
+#define PHY_BB_THERM_ADC_1_INIT_THERM_SETTING_LSB 0
+#define PHY_BB_THERM_ADC_1_INIT_THERM_SETTING_MASK 0x000000ff
+#define PHY_BB_THERM_ADC_1_INIT_THERM_SETTING_GET(x) (((x) & 0x000000ff) >> 0)
+#define PHY_BB_THERM_ADC_1_INIT_THERM_SETTING_SET(x) (((x) << 0) & 0x000000ff)
+#define PHY_BB_THERM_ADC_1_INIT_VOLT_SETTING_MSB 15
+#define PHY_BB_THERM_ADC_1_INIT_VOLT_SETTING_LSB 8
+#define PHY_BB_THERM_ADC_1_INIT_VOLT_SETTING_MASK 0x0000ff00
+#define PHY_BB_THERM_ADC_1_INIT_VOLT_SETTING_GET(x) (((x) & 0x0000ff00) >> 8)
+#define PHY_BB_THERM_ADC_1_INIT_VOLT_SETTING_SET(x) (((x) << 8) & 0x0000ff00)
+#define PHY_BB_THERM_ADC_1_INIT_ATB_SETTING_MSB 23
+#define PHY_BB_THERM_ADC_1_INIT_ATB_SETTING_LSB 16
+#define PHY_BB_THERM_ADC_1_INIT_ATB_SETTING_MASK 0x00ff0000
+#define PHY_BB_THERM_ADC_1_INIT_ATB_SETTING_GET(x) (((x) & 0x00ff0000) >> 16)
+#define PHY_BB_THERM_ADC_1_INIT_ATB_SETTING_SET(x) (((x) << 16) & 0x00ff0000)
+#define PHY_BB_THERM_ADC_1_SAMPLES_CNT_CODING_MSB 25
+#define PHY_BB_THERM_ADC_1_SAMPLES_CNT_CODING_LSB 24
+#define PHY_BB_THERM_ADC_1_SAMPLES_CNT_CODING_MASK 0x03000000
+#define PHY_BB_THERM_ADC_1_SAMPLES_CNT_CODING_GET(x) (((x) & 0x03000000) >> 24)
+#define PHY_BB_THERM_ADC_1_SAMPLES_CNT_CODING_SET(x) (((x) << 24) & 0x03000000)
+#define PHY_BB_THERM_ADC_1_USE_INIT_THERM_VOLT_ATB_AFTER_WARM_RESET_MSB 26
+#define PHY_BB_THERM_ADC_1_USE_INIT_THERM_VOLT_ATB_AFTER_WARM_RESET_LSB 26
+#define PHY_BB_THERM_ADC_1_USE_INIT_THERM_VOLT_ATB_AFTER_WARM_RESET_MASK 0x04000000
+#define PHY_BB_THERM_ADC_1_USE_INIT_THERM_VOLT_ATB_AFTER_WARM_RESET_GET(x) (((x) & 0x04000000) >> 26)
+#define PHY_BB_THERM_ADC_1_USE_INIT_THERM_VOLT_ATB_AFTER_WARM_RESET_SET(x) (((x) << 26) & 0x04000000)
+#define PHY_BB_THERM_ADC_1_FORCE_THERM_VOLT_ATB_TO_INIT_SETTINGS_MSB 27
+#define PHY_BB_THERM_ADC_1_FORCE_THERM_VOLT_ATB_TO_INIT_SETTINGS_LSB 27
+#define PHY_BB_THERM_ADC_1_FORCE_THERM_VOLT_ATB_TO_INIT_SETTINGS_MASK 0x08000000
+#define PHY_BB_THERM_ADC_1_FORCE_THERM_VOLT_ATB_TO_INIT_SETTINGS_GET(x) (((x) & 0x08000000) >> 27)
+#define PHY_BB_THERM_ADC_1_FORCE_THERM_VOLT_ATB_TO_INIT_SETTINGS_SET(x) (((x) << 27) & 0x08000000)
+
+/* macros for BB_therm_adc_2 */
+#define PHY_BB_THERM_ADC_2_ADDRESS 0x0000a7ec
+#define PHY_BB_THERM_ADC_2_OFFSET 0x0000a7ec
+#define PHY_BB_THERM_ADC_2_MEASURE_THERM_FREQ_MSB 11
+#define PHY_BB_THERM_ADC_2_MEASURE_THERM_FREQ_LSB 0
+#define PHY_BB_THERM_ADC_2_MEASURE_THERM_FREQ_MASK 0x00000fff
+#define PHY_BB_THERM_ADC_2_MEASURE_THERM_FREQ_GET(x) (((x) & 0x00000fff) >> 0)
+#define PHY_BB_THERM_ADC_2_MEASURE_THERM_FREQ_SET(x) (((x) << 0) & 0x00000fff)
+#define PHY_BB_THERM_ADC_2_MEASURE_VOLT_FREQ_MSB 21
+#define PHY_BB_THERM_ADC_2_MEASURE_VOLT_FREQ_LSB 12
+#define PHY_BB_THERM_ADC_2_MEASURE_VOLT_FREQ_MASK 0x003ff000
+#define PHY_BB_THERM_ADC_2_MEASURE_VOLT_FREQ_GET(x) (((x) & 0x003ff000) >> 12)
+#define PHY_BB_THERM_ADC_2_MEASURE_VOLT_FREQ_SET(x) (((x) << 12) & 0x003ff000)
+#define PHY_BB_THERM_ADC_2_MEASURE_ATB_FREQ_MSB 31
+#define PHY_BB_THERM_ADC_2_MEASURE_ATB_FREQ_LSB 22
+#define PHY_BB_THERM_ADC_2_MEASURE_ATB_FREQ_MASK 0xffc00000
+#define PHY_BB_THERM_ADC_2_MEASURE_ATB_FREQ_GET(x) (((x) & 0xffc00000) >> 22)
+#define PHY_BB_THERM_ADC_2_MEASURE_ATB_FREQ_SET(x) (((x) << 22) & 0xffc00000)
+
+/* macros for BB_therm_adc_3 */
+#define PHY_BB_THERM_ADC_3_ADDRESS 0x0000a7f0
+#define PHY_BB_THERM_ADC_3_OFFSET 0x0000a7f0
+#define PHY_BB_THERM_ADC_3_THERM_ADC_OFFSET_MSB 7
+#define PHY_BB_THERM_ADC_3_THERM_ADC_OFFSET_LSB 0
+#define PHY_BB_THERM_ADC_3_THERM_ADC_OFFSET_MASK 0x000000ff
+#define PHY_BB_THERM_ADC_3_THERM_ADC_OFFSET_GET(x) (((x) & 0x000000ff) >> 0)
+#define PHY_BB_THERM_ADC_3_THERM_ADC_OFFSET_SET(x) (((x) << 0) & 0x000000ff)
+#define PHY_BB_THERM_ADC_3_THERM_ADC_SCALED_GAIN_MSB 16
+#define PHY_BB_THERM_ADC_3_THERM_ADC_SCALED_GAIN_LSB 8
+#define PHY_BB_THERM_ADC_3_THERM_ADC_SCALED_GAIN_MASK 0x0001ff00
+#define PHY_BB_THERM_ADC_3_THERM_ADC_SCALED_GAIN_GET(x) (((x) & 0x0001ff00) >> 8)
+#define PHY_BB_THERM_ADC_3_THERM_ADC_SCALED_GAIN_SET(x) (((x) << 8) & 0x0001ff00)
+#define PHY_BB_THERM_ADC_3_ADC_INTERVAL_MSB 29
+#define PHY_BB_THERM_ADC_3_ADC_INTERVAL_LSB 17
+#define PHY_BB_THERM_ADC_3_ADC_INTERVAL_MASK 0x3ffe0000
+#define PHY_BB_THERM_ADC_3_ADC_INTERVAL_GET(x) (((x) & 0x3ffe0000) >> 17)
+#define PHY_BB_THERM_ADC_3_ADC_INTERVAL_SET(x) (((x) << 17) & 0x3ffe0000)
+
+/* macros for BB_therm_adc_4 */
+#define PHY_BB_THERM_ADC_4_ADDRESS 0x0000a7f4
+#define PHY_BB_THERM_ADC_4_OFFSET 0x0000a7f4
+#define PHY_BB_THERM_ADC_4_LATEST_THERM_VALUE_MSB 7
+#define PHY_BB_THERM_ADC_4_LATEST_THERM_VALUE_LSB 0
+#define PHY_BB_THERM_ADC_4_LATEST_THERM_VALUE_MASK 0x000000ff
+#define PHY_BB_THERM_ADC_4_LATEST_THERM_VALUE_GET(x) (((x) & 0x000000ff) >> 0)
+#define PHY_BB_THERM_ADC_4_LATEST_VOLT_VALUE_MSB 15
+#define PHY_BB_THERM_ADC_4_LATEST_VOLT_VALUE_LSB 8
+#define PHY_BB_THERM_ADC_4_LATEST_VOLT_VALUE_MASK 0x0000ff00
+#define PHY_BB_THERM_ADC_4_LATEST_VOLT_VALUE_GET(x) (((x) & 0x0000ff00) >> 8)
+#define PHY_BB_THERM_ADC_4_LATEST_ATB_VALUE_MSB 23
+#define PHY_BB_THERM_ADC_4_LATEST_ATB_VALUE_LSB 16
+#define PHY_BB_THERM_ADC_4_LATEST_ATB_VALUE_MASK 0x00ff0000
+#define PHY_BB_THERM_ADC_4_LATEST_ATB_VALUE_GET(x) (((x) & 0x00ff0000) >> 16)
+
+/* macros for BB_tx_forced_gain */
+#define PHY_BB_TX_FORCED_GAIN_ADDRESS 0x0000a7f8
+#define PHY_BB_TX_FORCED_GAIN_OFFSET 0x0000a7f8
+#define PHY_BB_TX_FORCED_GAIN_FORCE_TX_GAIN_MSB 0
+#define PHY_BB_TX_FORCED_GAIN_FORCE_TX_GAIN_LSB 0
+#define PHY_BB_TX_FORCED_GAIN_FORCE_TX_GAIN_MASK 0x00000001
+#define PHY_BB_TX_FORCED_GAIN_FORCE_TX_GAIN_GET(x) (((x) & 0x00000001) >> 0)
+#define PHY_BB_TX_FORCED_GAIN_FORCE_TX_GAIN_SET(x) (((x) << 0) & 0x00000001)
+#define PHY_BB_TX_FORCED_GAIN_FORCED_TXBB1DBGAIN_MSB 3
+#define PHY_BB_TX_FORCED_GAIN_FORCED_TXBB1DBGAIN_LSB 1
+#define PHY_BB_TX_FORCED_GAIN_FORCED_TXBB1DBGAIN_MASK 0x0000000e
+#define PHY_BB_TX_FORCED_GAIN_FORCED_TXBB1DBGAIN_GET(x) (((x) & 0x0000000e) >> 1)
+#define PHY_BB_TX_FORCED_GAIN_FORCED_TXBB1DBGAIN_SET(x) (((x) << 1) & 0x0000000e)
+#define PHY_BB_TX_FORCED_GAIN_FORCED_TXBB6DBGAIN_MSB 5
+#define PHY_BB_TX_FORCED_GAIN_FORCED_TXBB6DBGAIN_LSB 4
+#define PHY_BB_TX_FORCED_GAIN_FORCED_TXBB6DBGAIN_MASK 0x00000030
+#define PHY_BB_TX_FORCED_GAIN_FORCED_TXBB6DBGAIN_GET(x) (((x) & 0x00000030) >> 4)
+#define PHY_BB_TX_FORCED_GAIN_FORCED_TXBB6DBGAIN_SET(x) (((x) << 4) & 0x00000030)
+#define PHY_BB_TX_FORCED_GAIN_FORCED_TXMXRGAIN_MSB 9
+#define PHY_BB_TX_FORCED_GAIN_FORCED_TXMXRGAIN_LSB 6
+#define PHY_BB_TX_FORCED_GAIN_FORCED_TXMXRGAIN_MASK 0x000003c0
+#define PHY_BB_TX_FORCED_GAIN_FORCED_TXMXRGAIN_GET(x) (((x) & 0x000003c0) >> 6)
+#define PHY_BB_TX_FORCED_GAIN_FORCED_TXMXRGAIN_SET(x) (((x) << 6) & 0x000003c0)
+#define PHY_BB_TX_FORCED_GAIN_FORCED_PADRVGNA_MSB 13
+#define PHY_BB_TX_FORCED_GAIN_FORCED_PADRVGNA_LSB 10
+#define PHY_BB_TX_FORCED_GAIN_FORCED_PADRVGNA_MASK 0x00003c00
+#define PHY_BB_TX_FORCED_GAIN_FORCED_PADRVGNA_GET(x) (((x) & 0x00003c00) >> 10)
+#define PHY_BB_TX_FORCED_GAIN_FORCED_PADRVGNA_SET(x) (((x) << 10) & 0x00003c00)
+#define PHY_BB_TX_FORCED_GAIN_FORCED_PADRVGNB_MSB 17
+#define PHY_BB_TX_FORCED_GAIN_FORCED_PADRVGNB_LSB 14
+#define PHY_BB_TX_FORCED_GAIN_FORCED_PADRVGNB_MASK 0x0003c000
+#define PHY_BB_TX_FORCED_GAIN_FORCED_PADRVGNB_GET(x) (((x) & 0x0003c000) >> 14)
+#define PHY_BB_TX_FORCED_GAIN_FORCED_PADRVGNB_SET(x) (((x) << 14) & 0x0003c000)
+#define PHY_BB_TX_FORCED_GAIN_FORCED_PADRVGNC_MSB 21
+#define PHY_BB_TX_FORCED_GAIN_FORCED_PADRVGNC_LSB 18
+#define PHY_BB_TX_FORCED_GAIN_FORCED_PADRVGNC_MASK 0x003c0000
+#define PHY_BB_TX_FORCED_GAIN_FORCED_PADRVGNC_GET(x) (((x) & 0x003c0000) >> 18)
+#define PHY_BB_TX_FORCED_GAIN_FORCED_PADRVGNC_SET(x) (((x) << 18) & 0x003c0000)
+#define PHY_BB_TX_FORCED_GAIN_FORCED_PADRVGND_MSB 23
+#define PHY_BB_TX_FORCED_GAIN_FORCED_PADRVGND_LSB 22
+#define PHY_BB_TX_FORCED_GAIN_FORCED_PADRVGND_MASK 0x00c00000
+#define PHY_BB_TX_FORCED_GAIN_FORCED_PADRVGND_GET(x) (((x) & 0x00c00000) >> 22)
+#define PHY_BB_TX_FORCED_GAIN_FORCED_PADRVGND_SET(x) (((x) << 22) & 0x00c00000)
+#define PHY_BB_TX_FORCED_GAIN_FORCED_ENABLE_PAL_MSB 24
+#define PHY_BB_TX_FORCED_GAIN_FORCED_ENABLE_PAL_LSB 24
+#define PHY_BB_TX_FORCED_GAIN_FORCED_ENABLE_PAL_MASK 0x01000000
+#define PHY_BB_TX_FORCED_GAIN_FORCED_ENABLE_PAL_GET(x) (((x) & 0x01000000) >> 24)
+#define PHY_BB_TX_FORCED_GAIN_FORCED_ENABLE_PAL_SET(x) (((x) << 24) & 0x01000000)
+
+/* macros for BB_eco_ctrl */
+#define PHY_BB_ECO_CTRL_ADDRESS 0x0000a7fc
+#define PHY_BB_ECO_CTRL_OFFSET 0x0000a7fc
+#define PHY_BB_ECO_CTRL_ECO_CTRL_MSB 7
+#define PHY_BB_ECO_CTRL_ECO_CTRL_LSB 0
+#define PHY_BB_ECO_CTRL_ECO_CTRL_MASK 0x000000ff
+#define PHY_BB_ECO_CTRL_ECO_CTRL_GET(x) (((x) & 0x000000ff) >> 0)
+#define PHY_BB_ECO_CTRL_ECO_CTRL_SET(x) (((x) << 0) & 0x000000ff)
+
+/* macros for BB_gain_force_max_gains_b1 */
+#define PHY_BB_GAIN_FORCE_MAX_GAINS_B1_ADDRESS 0x0000a848
+#define PHY_BB_GAIN_FORCE_MAX_GAINS_B1_OFFSET 0x0000a848
+#define PHY_BB_GAIN_FORCE_MAX_GAINS_B1_XATTEN1_HYST_MARGIN_1_MSB 13
+#define PHY_BB_GAIN_FORCE_MAX_GAINS_B1_XATTEN1_HYST_MARGIN_1_LSB 7
+#define PHY_BB_GAIN_FORCE_MAX_GAINS_B1_XATTEN1_HYST_MARGIN_1_MASK 0x00003f80
+#define PHY_BB_GAIN_FORCE_MAX_GAINS_B1_XATTEN1_HYST_MARGIN_1_GET(x) (((x) & 0x00003f80) >> 7)
+#define PHY_BB_GAIN_FORCE_MAX_GAINS_B1_XATTEN1_HYST_MARGIN_1_SET(x) (((x) << 7) & 0x00003f80)
+#define PHY_BB_GAIN_FORCE_MAX_GAINS_B1_XATTEN2_HYST_MARGIN_1_MSB 20
+#define PHY_BB_GAIN_FORCE_MAX_GAINS_B1_XATTEN2_HYST_MARGIN_1_LSB 14
+#define PHY_BB_GAIN_FORCE_MAX_GAINS_B1_XATTEN2_HYST_MARGIN_1_MASK 0x001fc000
+#define PHY_BB_GAIN_FORCE_MAX_GAINS_B1_XATTEN2_HYST_MARGIN_1_GET(x) (((x) & 0x001fc000) >> 14)
+#define PHY_BB_GAIN_FORCE_MAX_GAINS_B1_XATTEN2_HYST_MARGIN_1_SET(x) (((x) << 14) & 0x001fc000)
+
+/* macros for BB_gains_min_offsets_b1 */
+#define PHY_BB_GAINS_MIN_OFFSETS_B1_ADDRESS 0x0000a84c
+#define PHY_BB_GAINS_MIN_OFFSETS_B1_OFFSET 0x0000a84c
+#define PHY_BB_GAINS_MIN_OFFSETS_B1_RF_GAIN_F_1_MSB 24
+#define PHY_BB_GAINS_MIN_OFFSETS_B1_RF_GAIN_F_1_LSB 17
+#define PHY_BB_GAINS_MIN_OFFSETS_B1_RF_GAIN_F_1_MASK 0x01fe0000
+#define PHY_BB_GAINS_MIN_OFFSETS_B1_RF_GAIN_F_1_GET(x) (((x) & 0x01fe0000) >> 17)
+#define PHY_BB_GAINS_MIN_OFFSETS_B1_RF_GAIN_F_1_SET(x) (((x) << 17) & 0x01fe0000)
+#define PHY_BB_GAINS_MIN_OFFSETS_B1_XATTEN1_SW_F_1_MSB 25
+#define PHY_BB_GAINS_MIN_OFFSETS_B1_XATTEN1_SW_F_1_LSB 25
+#define PHY_BB_GAINS_MIN_OFFSETS_B1_XATTEN1_SW_F_1_MASK 0x02000000
+#define PHY_BB_GAINS_MIN_OFFSETS_B1_XATTEN1_SW_F_1_GET(x) (((x) & 0x02000000) >> 25)
+#define PHY_BB_GAINS_MIN_OFFSETS_B1_XATTEN1_SW_F_1_SET(x) (((x) << 25) & 0x02000000)
+#define PHY_BB_GAINS_MIN_OFFSETS_B1_XATTEN2_SW_F_1_MSB 26
+#define PHY_BB_GAINS_MIN_OFFSETS_B1_XATTEN2_SW_F_1_LSB 26
+#define PHY_BB_GAINS_MIN_OFFSETS_B1_XATTEN2_SW_F_1_MASK 0x04000000
+#define PHY_BB_GAINS_MIN_OFFSETS_B1_XATTEN2_SW_F_1_GET(x) (((x) & 0x04000000) >> 26)
+#define PHY_BB_GAINS_MIN_OFFSETS_B1_XATTEN2_SW_F_1_SET(x) (((x) << 26) & 0x04000000)
+
+/* macros for BB_rx_ocgain2 */
+#define PHY_BB_RX_OCGAIN2_ADDRESS 0x0000aa00
+#define PHY_BB_RX_OCGAIN2_OFFSET 0x0000aa00
+#define PHY_BB_RX_OCGAIN2_GAIN_ENTRY2_MSB 31
+#define PHY_BB_RX_OCGAIN2_GAIN_ENTRY2_LSB 0
+#define PHY_BB_RX_OCGAIN2_GAIN_ENTRY2_MASK 0xffffffff
+#define PHY_BB_RX_OCGAIN2_GAIN_ENTRY2_SET(x) (((x) << 0) & 0xffffffff)
+
+/* macros for BB_ext_atten_switch_ctl_b1 */
+#define PHY_BB_EXT_ATTEN_SWITCH_CTL_B1_ADDRESS 0x0000b20c
+#define PHY_BB_EXT_ATTEN_SWITCH_CTL_B1_OFFSET 0x0000b20c
+#define PHY_BB_EXT_ATTEN_SWITCH_CTL_B1_XATTEN1_DB_1_MSB 5
+#define PHY_BB_EXT_ATTEN_SWITCH_CTL_B1_XATTEN1_DB_1_LSB 0
+#define PHY_BB_EXT_ATTEN_SWITCH_CTL_B1_XATTEN1_DB_1_MASK 0x0000003f
+#define PHY_BB_EXT_ATTEN_SWITCH_CTL_B1_XATTEN1_DB_1_GET(x) (((x) & 0x0000003f) >> 0)
+#define PHY_BB_EXT_ATTEN_SWITCH_CTL_B1_XATTEN1_DB_1_SET(x) (((x) << 0) & 0x0000003f)
+#define PHY_BB_EXT_ATTEN_SWITCH_CTL_B1_XATTEN2_DB_1_MSB 11
+#define PHY_BB_EXT_ATTEN_SWITCH_CTL_B1_XATTEN2_DB_1_LSB 6
+#define PHY_BB_EXT_ATTEN_SWITCH_CTL_B1_XATTEN2_DB_1_MASK 0x00000fc0
+#define PHY_BB_EXT_ATTEN_SWITCH_CTL_B1_XATTEN2_DB_1_GET(x) (((x) & 0x00000fc0) >> 6)
+#define PHY_BB_EXT_ATTEN_SWITCH_CTL_B1_XATTEN2_DB_1_SET(x) (((x) << 6) & 0x00000fc0)
+#define PHY_BB_EXT_ATTEN_SWITCH_CTL_B1_XATTEN1_MARGIN_1_MSB 16
+#define PHY_BB_EXT_ATTEN_SWITCH_CTL_B1_XATTEN1_MARGIN_1_LSB 12
+#define PHY_BB_EXT_ATTEN_SWITCH_CTL_B1_XATTEN1_MARGIN_1_MASK 0x0001f000
+#define PHY_BB_EXT_ATTEN_SWITCH_CTL_B1_XATTEN1_MARGIN_1_GET(x) (((x) & 0x0001f000) >> 12)
+#define PHY_BB_EXT_ATTEN_SWITCH_CTL_B1_XATTEN1_MARGIN_1_SET(x) (((x) << 12) & 0x0001f000)
+#define PHY_BB_EXT_ATTEN_SWITCH_CTL_B1_XATTEN2_MARGIN_1_MSB 21
+#define PHY_BB_EXT_ATTEN_SWITCH_CTL_B1_XATTEN2_MARGIN_1_LSB 17
+#define PHY_BB_EXT_ATTEN_SWITCH_CTL_B1_XATTEN2_MARGIN_1_MASK 0x003e0000
+#define PHY_BB_EXT_ATTEN_SWITCH_CTL_B1_XATTEN2_MARGIN_1_GET(x) (((x) & 0x003e0000) >> 17)
+#define PHY_BB_EXT_ATTEN_SWITCH_CTL_B1_XATTEN2_MARGIN_1_SET(x) (((x) << 17) & 0x003e0000)
+
+
+#ifndef __ASSEMBLER__
+
+typedef struct bb_lc_reg_reg_s {
+ volatile char pad__0[0x9800]; /* 0x0 - 0x9800 */
+ volatile unsigned int BB_test_controls; /* 0x9800 - 0x9804 */
+ volatile unsigned int BB_gen_controls; /* 0x9804 - 0x9808 */
+ volatile unsigned int BB_test_controls_status; /* 0x9808 - 0x980c */
+ volatile unsigned int BB_timing_controls_1; /* 0x980c - 0x9810 */
+ volatile unsigned int BB_timing_controls_2; /* 0x9810 - 0x9814 */
+ volatile unsigned int BB_timing_controls_3; /* 0x9814 - 0x9818 */
+ volatile unsigned int BB_D2_chip_id; /* 0x9818 - 0x981c */
+ volatile unsigned int BB_active; /* 0x981c - 0x9820 */
+ volatile unsigned int BB_tx_timing_1; /* 0x9820 - 0x9824 */
+ volatile unsigned int BB_tx_timing_2; /* 0x9824 - 0x9828 */
+ volatile unsigned int BB_tx_timing_3; /* 0x9828 - 0x982c */
+ volatile unsigned int BB_addac_parallel_control; /* 0x982c - 0x9830 */
+ volatile char pad__1[0x4]; /* 0x9830 - 0x9834 */
+ volatile unsigned int BB_xpa_timing_control; /* 0x9834 - 0x9838 */
+ volatile unsigned int BB_misc_pa_control; /* 0x9838 - 0x983c */
+ volatile unsigned int BB_tstdac_constant; /* 0x983c - 0x9840 */
+ volatile unsigned int BB_find_signal_low; /* 0x9840 - 0x9844 */
+ volatile unsigned int BB_settling_time; /* 0x9844 - 0x9848 */
+ volatile unsigned int BB_gain_force_max_gains_b0; /* 0x9848 - 0x984c */
+ volatile unsigned int BB_gains_min_offsets_b0; /* 0x984c - 0x9850 */
+ volatile unsigned int BB_desired_sigsize; /* 0x9850 - 0x9854 */
+ volatile unsigned int BB_timing_control_3a; /* 0x9854 - 0x9858 */
+ volatile unsigned int BB_find_signal; /* 0x9858 - 0x985c */
+ volatile unsigned int BB_agc; /* 0x985c - 0x9860 */
+ volatile unsigned int BB_agc_control; /* 0x9860 - 0x9864 */
+ volatile unsigned int BB_cca_b0; /* 0x9864 - 0x9868 */
+ volatile unsigned int BB_sfcorr; /* 0x9868 - 0x986c */
+ volatile unsigned int BB_self_corr_low; /* 0x986c - 0x9870 */
+ volatile char pad__2[0x4]; /* 0x9870 - 0x9874 */
+ volatile unsigned int BB_synth_control; /* 0x9874 - 0x9878 */
+ volatile unsigned int BB_addac_clk_select; /* 0x9878 - 0x987c */
+ volatile unsigned int BB_pll_cntl; /* 0x987c - 0x9880 */
+ volatile char pad__3[0x80]; /* 0x9880 - 0x9900 */
+ volatile unsigned int BB_vit_spur_mask_A; /* 0x9900 - 0x9904 */
+ volatile unsigned int BB_vit_spur_mask_B; /* 0x9904 - 0x9908 */
+ volatile unsigned int BB_pilot_spur_mask; /* 0x9908 - 0x990c */
+ volatile unsigned int BB_chan_spur_mask; /* 0x990c - 0x9910 */
+ volatile unsigned int BB_spectral_scan; /* 0x9910 - 0x9914 */
+ volatile unsigned int BB_analog_power_on_time; /* 0x9914 - 0x9918 */
+ volatile unsigned int BB_search_start_delay; /* 0x9918 - 0x991c */
+ volatile unsigned int BB_max_rx_length; /* 0x991c - 0x9920 */
+ volatile unsigned int BB_timing_control_4; /* 0x9920 - 0x9924 */
+ volatile unsigned int BB_timing_control_5; /* 0x9924 - 0x9928 */
+ volatile unsigned int BB_phyonly_warm_reset; /* 0x9928 - 0x992c */
+ volatile unsigned int BB_phyonly_control; /* 0x992c - 0x9930 */
+ volatile char pad__4[0x4]; /* 0x9930 - 0x9934 */
+ volatile unsigned int BB_powertx_rate1; /* 0x9934 - 0x9938 */
+ volatile unsigned int BB_powertx_rate2; /* 0x9938 - 0x993c */
+ volatile unsigned int BB_powertx_max; /* 0x993c - 0x9940 */
+ volatile unsigned int BB_extension_radar; /* 0x9940 - 0x9944 */
+ volatile unsigned int BB_frame_control; /* 0x9944 - 0x9948 */
+ volatile unsigned int BB_timing_control_6; /* 0x9948 - 0x994c */
+ volatile unsigned int BB_spur_mask_controls; /* 0x994c - 0x9950 */
+ volatile unsigned int BB_rx_iq_corr_b0; /* 0x9950 - 0x9954 */
+ volatile unsigned int BB_radar_detection; /* 0x9954 - 0x9958 */
+ volatile unsigned int BB_radar_detection_2; /* 0x9958 - 0x995c */
+ volatile unsigned int BB_tx_phase_ramp_b0; /* 0x995c - 0x9960 */
+ volatile unsigned int BB_switch_table_chn_b0; /* 0x9960 - 0x9964 */
+ volatile unsigned int BB_switch_table_com1; /* 0x9964 - 0x9968 */
+ volatile unsigned int BB_cca_ctrl_2_b0; /* 0x9968 - 0x996c */
+ volatile unsigned int BB_switch_table_com2; /* 0x996c - 0x9970 */
+ volatile unsigned int BB_restart; /* 0x9970 - 0x9974 */
+ volatile char pad__5[0x4]; /* 0x9974 - 0x9978 */
+ volatile unsigned int BB_scrambler_seed; /* 0x9978 - 0x997c */
+ volatile unsigned int BB_rfbus_request; /* 0x997c - 0x9980 */
+ volatile char pad__6[0x20]; /* 0x9980 - 0x99a0 */
+ volatile unsigned int BB_timing_control_11; /* 0x99a0 - 0x99a4 */
+ volatile unsigned int BB_multichain_enable; /* 0x99a4 - 0x99a8 */
+ volatile unsigned int BB_multichain_control; /* 0x99a8 - 0x99ac */
+ volatile unsigned int BB_multichain_gain_ctrl; /* 0x99ac - 0x99b0 */
+ volatile char pad__7[0x4]; /* 0x99b0 - 0x99b4 */
+ volatile unsigned int BB_adc_gain_dc_corr_b0; /* 0x99b4 - 0x99b8 */
+ volatile unsigned int BB_ext_chan_pwr_thr_1; /* 0x99b8 - 0x99bc */
+ volatile unsigned int BB_ext_chan_pwr_thr_2_b0; /* 0x99bc - 0x99c0 */
+ volatile unsigned int BB_ext_chan_scorr_thr; /* 0x99c0 - 0x99c4 */
+ volatile unsigned int BB_ext_chan_detect_win; /* 0x99c4 - 0x99c8 */
+ volatile unsigned int BB_pwr_thr_20_40_det; /* 0x99c8 - 0x99cc */
+ volatile char pad__8[0x4]; /* 0x99cc - 0x99d0 */
+ volatile unsigned int BB_short_gi_delta_slope; /* 0x99d0 - 0x99d4 */
+ volatile char pad__9[0x8]; /* 0x99d4 - 0x99dc */
+ volatile unsigned int BB_chaninfo_ctrl; /* 0x99dc - 0x99e0 */
+ volatile unsigned int BB_heavy_clip_ctrl; /* 0x99e0 - 0x99e4 */
+ volatile unsigned int BB_heavy_clip_20; /* 0x99e4 - 0x99e8 */
+ volatile unsigned int BB_heavy_clip_40; /* 0x99e8 - 0x99ec */
+ volatile unsigned int BB_rifs_srch; /* 0x99ec - 0x99f0 */
+ volatile unsigned int BB_iq_adc_cal_mode; /* 0x99f0 - 0x99f4 */
+ volatile char pad__10[0x8]; /* 0x99f4 - 0x99fc */
+ volatile unsigned int BB_per_chain_csd; /* 0x99fc - 0x9a00 */
+ volatile unsigned int BB_rx_ocgain[128]; /* 0x9a00 - 0x9c00 */
+ volatile unsigned int BB_tx_crc; /* 0x9c00 - 0x9c04 */
+ volatile char pad__11[0xc]; /* 0x9c04 - 0x9c10 */
+ volatile unsigned int BB_iq_adc_meas_0_b0; /* 0x9c10 - 0x9c14 */
+ volatile unsigned int BB_iq_adc_meas_1_b0; /* 0x9c14 - 0x9c18 */
+ volatile unsigned int BB_iq_adc_meas_2_b0; /* 0x9c18 - 0x9c1c */
+ volatile unsigned int BB_iq_adc_meas_3_b0; /* 0x9c1c - 0x9c20 */
+ volatile unsigned int BB_rfbus_grant; /* 0x9c20 - 0x9c24 */
+ volatile unsigned int BB_tstadc; /* 0x9c24 - 0x9c28 */
+ volatile unsigned int BB_tstdac; /* 0x9c28 - 0x9c2c */
+ volatile char pad__12[0x4]; /* 0x9c2c - 0x9c30 */
+ volatile unsigned int BB_illegal_tx_rate; /* 0x9c30 - 0x9c34 */
+ volatile unsigned int BB_spur_report_b0; /* 0x9c34 - 0x9c38 */
+ volatile unsigned int BB_channel_status; /* 0x9c38 - 0x9c3c */
+ volatile unsigned int BB_rssi_b0; /* 0x9c3c - 0x9c40 */
+ volatile unsigned int BB_spur_est_cck_report_b0; /* 0x9c40 - 0x9c44 */
+ volatile char pad__13[0x68]; /* 0x9c44 - 0x9cac */
+ volatile unsigned int BB_chan_info_noise_pwr; /* 0x9cac - 0x9cb0 */
+ volatile unsigned int BB_chan_info_gain_diff; /* 0x9cb0 - 0x9cb4 */
+ volatile unsigned int BB_chan_info_fine_timing; /* 0x9cb4 - 0x9cb8 */
+ volatile unsigned int BB_chan_info_gain_b0; /* 0x9cb8 - 0x9cbc */
+ volatile unsigned int BB_chan_info_chan_tab_b0[60]; /* 0x9cbc - 0x9dac */
+ volatile char pad__14[0x38]; /* 0x9dac - 0x9de4 */
+ volatile unsigned int BB_paprd_am2am_mask; /* 0x9de4 - 0x9de8 */
+ volatile unsigned int BB_paprd_am2pm_mask; /* 0x9de8 - 0x9dec */
+ volatile unsigned int BB_paprd_ht40_mask; /* 0x9dec - 0x9df0 */
+ volatile unsigned int BB_paprd_ctrl0; /* 0x9df0 - 0x9df4 */
+ volatile unsigned int BB_paprd_ctrl1; /* 0x9df4 - 0x9df8 */
+ volatile unsigned int BB_pa_gain123; /* 0x9df8 - 0x9dfc */
+ volatile unsigned int BB_pa_gain45; /* 0x9dfc - 0x9e00 */
+ volatile unsigned int BB_paprd_pre_post_scale_0; /* 0x9e00 - 0x9e04 */
+ volatile unsigned int BB_paprd_pre_post_scale_1; /* 0x9e04 - 0x9e08 */
+ volatile unsigned int BB_paprd_pre_post_scale_2; /* 0x9e08 - 0x9e0c */
+ volatile unsigned int BB_paprd_pre_post_scale_3; /* 0x9e0c - 0x9e10 */
+ volatile unsigned int BB_paprd_pre_post_scale_4; /* 0x9e10 - 0x9e14 */
+ volatile unsigned int BB_paprd_pre_post_scale_5; /* 0x9e14 - 0x9e18 */
+ volatile unsigned int BB_paprd_pre_post_scale_6; /* 0x9e18 - 0x9e1c */
+ volatile unsigned int BB_paprd_pre_post_scale_7; /* 0x9e1c - 0x9e20 */
+ volatile unsigned int BB_paprd_mem_tab[120]; /* 0x9e20 - 0xa000 */
+ volatile unsigned int BB_peak_det_ctrl_1; /* 0xa000 - 0xa004 */
+ volatile unsigned int BB_peak_det_ctrl_2; /* 0xa004 - 0xa008 */
+ volatile unsigned int BB_rx_gain_bounds_1; /* 0xa008 - 0xa00c */
+ volatile unsigned int BB_rx_gain_bounds_2; /* 0xa00c - 0xa010 */
+ volatile unsigned int BB_peak_det_cal_ctrl; /* 0xa010 - 0xa014 */
+ volatile unsigned int BB_agc_dig_dc_ctrl; /* 0xa014 - 0xa018 */
+ volatile unsigned int BB_agc_dig_dc_status_i_b0; /* 0xa018 - 0xa01c */
+ volatile unsigned int BB_agc_dig_dc_status_q_b0; /* 0xa01c - 0xa020 */
+ volatile char pad__15[0x1d4]; /* 0xa020 - 0xa1f4 */
+ volatile unsigned int BB_bbb_txfir_0; /* 0xa1f4 - 0xa1f8 */
+ volatile unsigned int BB_bbb_txfir_1; /* 0xa1f8 - 0xa1fc */
+ volatile unsigned int BB_bbb_txfir_2; /* 0xa1fc - 0xa200 */
+ volatile unsigned int BB_modes_select; /* 0xa200 - 0xa204 */
+ volatile unsigned int BB_bbb_tx_ctrl; /* 0xa204 - 0xa208 */
+ volatile unsigned int BB_bbb_sig_detect; /* 0xa208 - 0xa20c */
+ volatile unsigned int BB_ext_atten_switch_ctl_b0; /* 0xa20c - 0xa210 */
+ volatile unsigned int BB_bbb_rx_ctrl_1; /* 0xa210 - 0xa214 */
+ volatile unsigned int BB_bbb_rx_ctrl_2; /* 0xa214 - 0xa218 */
+ volatile unsigned int BB_bbb_rx_ctrl_3; /* 0xa218 - 0xa21c */
+ volatile unsigned int BB_bbb_rx_ctrl_4; /* 0xa21c - 0xa220 */
+ volatile unsigned int BB_bbb_rx_ctrl_5; /* 0xa220 - 0xa224 */
+ volatile unsigned int BB_bbb_rx_ctrl_6; /* 0xa224 - 0xa228 */
+ volatile unsigned int BB_bbb_dagc_ctrl; /* 0xa228 - 0xa22c */
+ volatile unsigned int BB_force_clken_cck; /* 0xa22c - 0xa230 */
+ volatile unsigned int BB_rx_clear_delay; /* 0xa230 - 0xa234 */
+ volatile unsigned int BB_powertx_rate3; /* 0xa234 - 0xa238 */
+ volatile unsigned int BB_powertx_rate4; /* 0xa238 - 0xa23c */
+ volatile char pad__16[0x4]; /* 0xa23c - 0xa240 */
+ volatile unsigned int BB_cck_spur_mit; /* 0xa240 - 0xa244 */
+ volatile unsigned int BB_panic_watchdog_status; /* 0xa244 - 0xa248 */
+ volatile unsigned int BB_panic_watchdog_ctrl_1; /* 0xa248 - 0xa24c */
+ volatile unsigned int BB_panic_watchdog_ctrl_2; /* 0xa24c - 0xa250 */
+ volatile unsigned int BB_iqcorr_ctrl_cck; /* 0xa250 - 0xa254 */
+ volatile unsigned int BB_bluetooth_cntl; /* 0xa254 - 0xa258 */
+ volatile unsigned int BB_tpc_1; /* 0xa258 - 0xa25c */
+ volatile unsigned int BB_tpc_2; /* 0xa25c - 0xa260 */
+ volatile unsigned int BB_tpc_3; /* 0xa260 - 0xa264 */
+ volatile unsigned int BB_tpc_4_b0; /* 0xa264 - 0xa268 */
+ volatile unsigned int BB_analog_swap; /* 0xa268 - 0xa26c */
+ volatile unsigned int BB_tpc_5_b0; /* 0xa26c - 0xa270 */
+ volatile unsigned int BB_tpc_6_b0; /* 0xa270 - 0xa274 */
+ volatile unsigned int BB_tpc_7; /* 0xa274 - 0xa278 */
+ volatile unsigned int BB_tpc_8; /* 0xa278 - 0xa27c */
+ volatile unsigned int BB_tpc_9; /* 0xa27c - 0xa280 */
+ volatile unsigned int BB_pdadc_tab_b0[32]; /* 0xa280 - 0xa300 */
+ volatile unsigned int BB_cl_tab_b0[16]; /* 0xa300 - 0xa340 */
+ volatile unsigned int BB_cl_map_0_b0; /* 0xa340 - 0xa344 */
+ volatile unsigned int BB_cl_map_1_b0; /* 0xa344 - 0xa348 */
+ volatile unsigned int BB_cl_map_2_b0; /* 0xa348 - 0xa34c */
+ volatile unsigned int BB_cl_map_3_b0; /* 0xa34c - 0xa350 */
+ volatile char pad__17[0x8]; /* 0xa350 - 0xa358 */
+ volatile unsigned int BB_cl_cal_ctrl; /* 0xa358 - 0xa35c */
+ volatile unsigned int BB_cl_map_pal_0_b0; /* 0xa35c - 0xa360 */
+ volatile unsigned int BB_cl_map_pal_1_b0; /* 0xa360 - 0xa364 */
+ volatile unsigned int BB_cl_map_pal_2_b0; /* 0xa364 - 0xa368 */
+ volatile unsigned int BB_cl_map_pal_3_b0; /* 0xa368 - 0xa36c */
+ volatile char pad__18[0x1c]; /* 0xa36c - 0xa388 */
+ volatile unsigned int BB_rifs; /* 0xa388 - 0xa38c */
+ volatile unsigned int BB_powertx_rate5; /* 0xa38c - 0xa390 */
+ volatile unsigned int BB_powertx_rate6; /* 0xa390 - 0xa394 */
+ volatile unsigned int BB_tpc_10; /* 0xa394 - 0xa398 */
+ volatile unsigned int BB_tpc_11_b0; /* 0xa398 - 0xa39c */
+ volatile unsigned int BB_cal_chain_mask; /* 0xa39c - 0xa3a0 */
+ volatile char pad__19[0x1c]; /* 0xa3a0 - 0xa3bc */
+ volatile unsigned int BB_powertx_sub; /* 0xa3bc - 0xa3c0 */
+ volatile unsigned int BB_powertx_rate7; /* 0xa3c0 - 0xa3c4 */
+ volatile unsigned int BB_powertx_rate8; /* 0xa3c4 - 0xa3c8 */
+ volatile unsigned int BB_powertx_rate9; /* 0xa3c8 - 0xa3cc */
+ volatile unsigned int BB_powertx_rate10; /* 0xa3cc - 0xa3d0 */
+ volatile unsigned int BB_powertx_rate11; /* 0xa3d0 - 0xa3d4 */
+ volatile unsigned int BB_powertx_rate12; /* 0xa3d4 - 0xa3d8 */
+ volatile unsigned int BB_force_analog; /* 0xa3d8 - 0xa3dc */
+ volatile unsigned int BB_tpc_12; /* 0xa3dc - 0xa3e0 */
+ volatile unsigned int BB_tpc_13; /* 0xa3e0 - 0xa3e4 */
+ volatile unsigned int BB_tpc_14; /* 0xa3e4 - 0xa3e8 */
+ volatile unsigned int BB_tpc_15; /* 0xa3e8 - 0xa3ec */
+ volatile unsigned int BB_tpc_16; /* 0xa3ec - 0xa3f0 */
+ volatile unsigned int BB_tpc_17; /* 0xa3f0 - 0xa3f4 */
+ volatile unsigned int BB_tpc_18; /* 0xa3f4 - 0xa3f8 */
+ volatile unsigned int BB_tpc_19; /* 0xa3f8 - 0xa3fc */
+ volatile unsigned int BB_tpc_20; /* 0xa3fc - 0xa400 */
+ volatile unsigned int BB_tx_gain_tab_1; /* 0xa400 - 0xa404 */
+ volatile unsigned int BB_tx_gain_tab_2; /* 0xa404 - 0xa408 */
+ volatile unsigned int BB_tx_gain_tab_3; /* 0xa408 - 0xa40c */
+ volatile unsigned int BB_tx_gain_tab_4; /* 0xa40c - 0xa410 */
+ volatile unsigned int BB_tx_gain_tab_5; /* 0xa410 - 0xa414 */
+ volatile unsigned int BB_tx_gain_tab_6; /* 0xa414 - 0xa418 */
+ volatile unsigned int BB_tx_gain_tab_7; /* 0xa418 - 0xa41c */
+ volatile unsigned int BB_tx_gain_tab_8; /* 0xa41c - 0xa420 */
+ volatile unsigned int BB_tx_gain_tab_9; /* 0xa420 - 0xa424 */
+ volatile unsigned int BB_tx_gain_tab_10; /* 0xa424 - 0xa428 */
+ volatile unsigned int BB_tx_gain_tab_11; /* 0xa428 - 0xa42c */
+ volatile unsigned int BB_tx_gain_tab_12; /* 0xa42c - 0xa430 */
+ volatile unsigned int BB_tx_gain_tab_13; /* 0xa430 - 0xa434 */
+ volatile unsigned int BB_tx_gain_tab_14; /* 0xa434 - 0xa438 */
+ volatile unsigned int BB_tx_gain_tab_15; /* 0xa438 - 0xa43c */
+ volatile unsigned int BB_tx_gain_tab_16; /* 0xa43c - 0xa440 */
+ volatile unsigned int BB_tx_gain_tab_17; /* 0xa440 - 0xa444 */
+ volatile unsigned int BB_tx_gain_tab_18; /* 0xa444 - 0xa448 */
+ volatile unsigned int BB_tx_gain_tab_19; /* 0xa448 - 0xa44c */
+ volatile unsigned int BB_tx_gain_tab_20; /* 0xa44c - 0xa450 */
+ volatile unsigned int BB_tx_gain_tab_21; /* 0xa450 - 0xa454 */
+ volatile unsigned int BB_tx_gain_tab_22; /* 0xa454 - 0xa458 */
+ volatile unsigned int BB_tx_gain_tab_23; /* 0xa458 - 0xa45c */
+ volatile unsigned int BB_tx_gain_tab_24; /* 0xa45c - 0xa460 */
+ volatile unsigned int BB_tx_gain_tab_25; /* 0xa460 - 0xa464 */
+ volatile unsigned int BB_tx_gain_tab_26; /* 0xa464 - 0xa468 */
+ volatile unsigned int BB_tx_gain_tab_27; /* 0xa468 - 0xa46c */
+ volatile unsigned int BB_tx_gain_tab_28; /* 0xa46c - 0xa470 */
+ volatile unsigned int BB_tx_gain_tab_29; /* 0xa470 - 0xa474 */
+ volatile unsigned int BB_tx_gain_tab_30; /* 0xa474 - 0xa478 */
+ volatile unsigned int BB_tx_gain_tab_31; /* 0xa478 - 0xa47c */
+ volatile unsigned int BB_tx_gain_tab_32; /* 0xa47c - 0xa480 */
+ volatile unsigned int BB_tx_gain_tab_pal_1; /* 0xa480 - 0xa484 */
+ volatile unsigned int BB_tx_gain_tab_pal_2; /* 0xa484 - 0xa488 */
+ volatile unsigned int BB_tx_gain_tab_pal_3; /* 0xa488 - 0xa48c */
+ volatile unsigned int BB_tx_gain_tab_pal_4; /* 0xa48c - 0xa490 */
+ volatile unsigned int BB_tx_gain_tab_pal_5; /* 0xa490 - 0xa494 */
+ volatile unsigned int BB_tx_gain_tab_pal_6; /* 0xa494 - 0xa498 */
+ volatile unsigned int BB_tx_gain_tab_pal_7; /* 0xa498 - 0xa49c */
+ volatile unsigned int BB_tx_gain_tab_pal_8; /* 0xa49c - 0xa4a0 */
+ volatile unsigned int BB_tx_gain_tab_pal_9; /* 0xa4a0 - 0xa4a4 */
+ volatile unsigned int BB_tx_gain_tab_pal_10; /* 0xa4a4 - 0xa4a8 */
+ volatile unsigned int BB_tx_gain_tab_pal_11; /* 0xa4a8 - 0xa4ac */
+ volatile unsigned int BB_tx_gain_tab_pal_12; /* 0xa4ac - 0xa4b0 */
+ volatile unsigned int BB_tx_gain_tab_pal_13; /* 0xa4b0 - 0xa4b4 */
+ volatile unsigned int BB_tx_gain_tab_pal_14; /* 0xa4b4 - 0xa4b8 */
+ volatile unsigned int BB_tx_gain_tab_pal_15; /* 0xa4b8 - 0xa4bc */
+ volatile unsigned int BB_tx_gain_tab_pal_16; /* 0xa4bc - 0xa4c0 */
+ volatile unsigned int BB_tx_gain_tab_pal_17; /* 0xa4c0 - 0xa4c4 */
+ volatile unsigned int BB_tx_gain_tab_pal_18; /* 0xa4c4 - 0xa4c8 */
+ volatile unsigned int BB_tx_gain_tab_pal_19; /* 0xa4c8 - 0xa4cc */
+ volatile unsigned int BB_tx_gain_tab_pal_20; /* 0xa4cc - 0xa4d0 */
+ volatile unsigned int BB_tx_gain_tab_pal_21; /* 0xa4d0 - 0xa4d4 */
+ volatile unsigned int BB_tx_gain_tab_pal_22; /* 0xa4d4 - 0xa4d8 */
+ volatile unsigned int BB_tx_gain_tab_pal_23; /* 0xa4d8 - 0xa4dc */
+ volatile unsigned int BB_tx_gain_tab_pal_24; /* 0xa4dc - 0xa4e0 */
+ volatile unsigned int BB_tx_gain_tab_pal_25; /* 0xa4e0 - 0xa4e4 */
+ volatile unsigned int BB_tx_gain_tab_pal_26; /* 0xa4e4 - 0xa4e8 */
+ volatile unsigned int BB_tx_gain_tab_pal_27; /* 0xa4e8 - 0xa4ec */
+ volatile unsigned int BB_tx_gain_tab_pal_28; /* 0xa4ec - 0xa4f0 */
+ volatile unsigned int BB_tx_gain_tab_pal_29; /* 0xa4f0 - 0xa4f4 */
+ volatile unsigned int BB_tx_gain_tab_pal_30; /* 0xa4f4 - 0xa4f8 */
+ volatile unsigned int BB_tx_gain_tab_pal_31; /* 0xa4f8 - 0xa4fc */
+ volatile unsigned int BB_tx_gain_tab_pal_32; /* 0xa4fc - 0xa500 */
+ volatile char pad__20[0x18]; /* 0xa500 - 0xa518 */
+ volatile unsigned int BB_caltx_gain_set_0; /* 0xa518 - 0xa51c */
+ volatile unsigned int BB_caltx_gain_set_2; /* 0xa51c - 0xa520 */
+ volatile unsigned int BB_caltx_gain_set_4; /* 0xa520 - 0xa524 */
+ volatile unsigned int BB_caltx_gain_set_6; /* 0xa524 - 0xa528 */
+ volatile unsigned int BB_caltx_gain_set_8; /* 0xa528 - 0xa52c */
+ volatile unsigned int BB_caltx_gain_set_10; /* 0xa52c - 0xa530 */
+ volatile unsigned int BB_caltx_gain_set_12; /* 0xa530 - 0xa534 */
+ volatile unsigned int BB_caltx_gain_set_14; /* 0xa534 - 0xa538 */
+ volatile unsigned int BB_caltx_gain_set_16; /* 0xa538 - 0xa53c */
+ volatile unsigned int BB_caltx_gain_set_18; /* 0xa53c - 0xa540 */
+ volatile unsigned int BB_caltx_gain_set_20; /* 0xa540 - 0xa544 */
+ volatile unsigned int BB_caltx_gain_set_22; /* 0xa544 - 0xa548 */
+ volatile unsigned int BB_caltx_gain_set_24; /* 0xa548 - 0xa54c */
+ volatile unsigned int BB_caltx_gain_set_26; /* 0xa54c - 0xa550 */
+ volatile unsigned int BB_caltx_gain_set_28; /* 0xa550 - 0xa554 */
+ volatile unsigned int BB_caltx_gain_set_30; /* 0xa554 - 0xa558 */
+ volatile unsigned int BB_txiqcal_meas_b0[96]; /* 0xa558 - 0xa6d8 */
+ volatile unsigned int BB_txiqcal_start; /* 0xa6d8 - 0xa6dc */
+ volatile unsigned int BB_txiqcal_control_0; /* 0xa6dc - 0xa6e0 */
+ volatile unsigned int BB_txiqcal_control_1; /* 0xa6e0 - 0xa6e4 */
+ volatile unsigned int BB_txiqcal_control_2; /* 0xa6e4 - 0xa6e8 */
+ volatile unsigned int BB_txiqcal_control_3; /* 0xa6e8 - 0xa6ec */
+ volatile unsigned int BB_txiq_corr_coeff_01_b0; /* 0xa6ec - 0xa6f0 */
+ volatile unsigned int BB_txiq_corr_coeff_23_b0; /* 0xa6f0 - 0xa6f4 */
+ volatile unsigned int BB_txiq_corr_coeff_45_b0; /* 0xa6f4 - 0xa6f8 */
+ volatile unsigned int BB_txiq_corr_coeff_67_b0; /* 0xa6f8 - 0xa6fc */
+ volatile unsigned int BB_txiq_corr_coeff_89_b0; /* 0xa6fc - 0xa700 */
+ volatile unsigned int BB_txiq_corr_coeff_ab_b0; /* 0xa700 - 0xa704 */
+ volatile unsigned int BB_txiq_corr_coeff_cd_b0; /* 0xa704 - 0xa708 */
+ volatile unsigned int BB_txiq_corr_coeff_ef_b0; /* 0xa708 - 0xa70c */
+ volatile unsigned int BB_cal_rxbb_gain_tbl_0; /* 0xa70c - 0xa710 */
+ volatile unsigned int BB_cal_rxbb_gain_tbl_4; /* 0xa710 - 0xa714 */
+ volatile unsigned int BB_cal_rxbb_gain_tbl_8; /* 0xa714 - 0xa718 */
+ volatile unsigned int BB_cal_rxbb_gain_tbl_12; /* 0xa718 - 0xa71c */
+ volatile unsigned int BB_cal_rxbb_gain_tbl_16; /* 0xa71c - 0xa720 */
+ volatile unsigned int BB_cal_rxbb_gain_tbl_20; /* 0xa720 - 0xa724 */
+ volatile unsigned int BB_cal_rxbb_gain_tbl_24; /* 0xa724 - 0xa728 */
+ volatile unsigned int BB_txiqcal_status_b0; /* 0xa728 - 0xa72c */
+ volatile unsigned int BB_paprd_trainer_cntl1; /* 0xa72c - 0xa730 */
+ volatile unsigned int BB_paprd_trainer_cntl2; /* 0xa730 - 0xa734 */
+ volatile unsigned int BB_paprd_trainer_cntl3; /* 0xa734 - 0xa738 */
+ volatile unsigned int BB_paprd_trainer_cntl4; /* 0xa738 - 0xa73c */
+ volatile unsigned int BB_paprd_trainer_stat1; /* 0xa73c - 0xa740 */
+ volatile unsigned int BB_paprd_trainer_stat2; /* 0xa740 - 0xa744 */
+ volatile unsigned int BB_paprd_trainer_stat3; /* 0xa744 - 0xa748 */
+ volatile char pad__21[0x90]; /* 0xa748 - 0xa7d8 */
+ volatile unsigned int BB_fcal_1; /* 0xa7d8 - 0xa7dc */
+ volatile unsigned int BB_fcal_2_b0; /* 0xa7dc - 0xa7e0 */
+ volatile unsigned int BB_radar_bw_filter; /* 0xa7e0 - 0xa7e4 */
+ volatile unsigned int BB_dft_tone_ctrl_b0; /* 0xa7e4 - 0xa7e8 */
+ volatile unsigned int BB_therm_adc_1; /* 0xa7e8 - 0xa7ec */
+ volatile unsigned int BB_therm_adc_2; /* 0xa7ec - 0xa7f0 */
+ volatile unsigned int BB_therm_adc_3; /* 0xa7f0 - 0xa7f4 */
+ volatile unsigned int BB_therm_adc_4; /* 0xa7f4 - 0xa7f8 */
+ volatile unsigned int BB_tx_forced_gain; /* 0xa7f8 - 0xa7fc */
+ volatile unsigned int BB_eco_ctrl; /* 0xa7fc - 0xa800 */
+ volatile char pad__22[0x48]; /* 0xa800 - 0xa848 */
+ volatile unsigned int BB_gain_force_max_gains_b1; /* 0xa848 - 0xa84c */
+ volatile unsigned int BB_gains_min_offsets_b1; /* 0xa84c - 0xa850 */
+ volatile char pad__23[0x1b0]; /* 0xa850 - 0xaa00 */
+ volatile unsigned int BB_rx_ocgain2[128]; /* 0xaa00 - 0xac00 */
+ volatile char pad__24[0x60c]; /* 0xac00 - 0xb20c */
+ volatile unsigned int BB_ext_atten_switch_ctl_b1; /* 0xb20c - 0xb210 */
+} bb_lc_reg_reg_t;
+
+#endif /* __ASSEMBLER__ */
+
+#endif /* _BB_LC_REG_REG_H_ */
diff --git a/drivers/staging/ath6kl/include/common/AR6002/hw4.0/hw/efuse_reg.h b/drivers/staging/ath6kl/include/common/AR6002/hw4.0/hw/efuse_reg.h
new file mode 100644
index 000000000000..12cadb337482
--- /dev/null
+++ b/drivers/staging/ath6kl/include/common/AR6002/hw4.0/hw/efuse_reg.h
@@ -0,0 +1,108 @@
+// ------------------------------------------------------------------
+// Copyright (c) 2004-2010 Atheros Corporation. All rights reserved.
+//
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+//
+//
+// ------------------------------------------------------------------
+//===================================================================
+// Author(s): ="Atheros"
+//===================================================================
+
+
+#ifndef _EFUSE_REG_REG_H_
+#define _EFUSE_REG_REG_H_
+
+#define EFUSE_WR_ENABLE_REG_ADDRESS 0x00000000
+#define EFUSE_WR_ENABLE_REG_OFFSET 0x00000000
+#define EFUSE_WR_ENABLE_REG_V_MSB 0
+#define EFUSE_WR_ENABLE_REG_V_LSB 0
+#define EFUSE_WR_ENABLE_REG_V_MASK 0x00000001
+#define EFUSE_WR_ENABLE_REG_V_GET(x) (((x) & EFUSE_WR_ENABLE_REG_V_MASK) >> EFUSE_WR_ENABLE_REG_V_LSB)
+#define EFUSE_WR_ENABLE_REG_V_SET(x) (((x) << EFUSE_WR_ENABLE_REG_V_LSB) & EFUSE_WR_ENABLE_REG_V_MASK)
+
+#define EFUSE_INT_ENABLE_REG_ADDRESS 0x00000004
+#define EFUSE_INT_ENABLE_REG_OFFSET 0x00000004
+#define EFUSE_INT_ENABLE_REG_V_MSB 0
+#define EFUSE_INT_ENABLE_REG_V_LSB 0
+#define EFUSE_INT_ENABLE_REG_V_MASK 0x00000001
+#define EFUSE_INT_ENABLE_REG_V_GET(x) (((x) & EFUSE_INT_ENABLE_REG_V_MASK) >> EFUSE_INT_ENABLE_REG_V_LSB)
+#define EFUSE_INT_ENABLE_REG_V_SET(x) (((x) << EFUSE_INT_ENABLE_REG_V_LSB) & EFUSE_INT_ENABLE_REG_V_MASK)
+
+#define EFUSE_INT_STATUS_REG_ADDRESS 0x00000008
+#define EFUSE_INT_STATUS_REG_OFFSET 0x00000008
+#define EFUSE_INT_STATUS_REG_V_MSB 0
+#define EFUSE_INT_STATUS_REG_V_LSB 0
+#define EFUSE_INT_STATUS_REG_V_MASK 0x00000001
+#define EFUSE_INT_STATUS_REG_V_GET(x) (((x) & EFUSE_INT_STATUS_REG_V_MASK) >> EFUSE_INT_STATUS_REG_V_LSB)
+#define EFUSE_INT_STATUS_REG_V_SET(x) (((x) << EFUSE_INT_STATUS_REG_V_LSB) & EFUSE_INT_STATUS_REG_V_MASK)
+
+#define BITMASK_WR_REG_ADDRESS 0x0000000c
+#define BITMASK_WR_REG_OFFSET 0x0000000c
+#define BITMASK_WR_REG_V_MSB 31
+#define BITMASK_WR_REG_V_LSB 0
+#define BITMASK_WR_REG_V_MASK 0xffffffff
+#define BITMASK_WR_REG_V_GET(x) (((x) & BITMASK_WR_REG_V_MASK) >> BITMASK_WR_REG_V_LSB)
+#define BITMASK_WR_REG_V_SET(x) (((x) << BITMASK_WR_REG_V_LSB) & BITMASK_WR_REG_V_MASK)
+
+#define VDDQ_SETTLE_TIME_REG_ADDRESS 0x00000010
+#define VDDQ_SETTLE_TIME_REG_OFFSET 0x00000010
+#define VDDQ_SETTLE_TIME_REG_V_MSB 31
+#define VDDQ_SETTLE_TIME_REG_V_LSB 0
+#define VDDQ_SETTLE_TIME_REG_V_MASK 0xffffffff
+#define VDDQ_SETTLE_TIME_REG_V_GET(x) (((x) & VDDQ_SETTLE_TIME_REG_V_MASK) >> VDDQ_SETTLE_TIME_REG_V_LSB)
+#define VDDQ_SETTLE_TIME_REG_V_SET(x) (((x) << VDDQ_SETTLE_TIME_REG_V_LSB) & VDDQ_SETTLE_TIME_REG_V_MASK)
+
+#define RD_STROBE_PW_REG_ADDRESS 0x00000014
+#define RD_STROBE_PW_REG_OFFSET 0x00000014
+#define RD_STROBE_PW_REG_V_MSB 31
+#define RD_STROBE_PW_REG_V_LSB 0
+#define RD_STROBE_PW_REG_V_MASK 0xffffffff
+#define RD_STROBE_PW_REG_V_GET(x) (((x) & RD_STROBE_PW_REG_V_MASK) >> RD_STROBE_PW_REG_V_LSB)
+#define RD_STROBE_PW_REG_V_SET(x) (((x) << RD_STROBE_PW_REG_V_LSB) & RD_STROBE_PW_REG_V_MASK)
+
+#define PG_STROBE_PW_REG_ADDRESS 0x00000018
+#define PG_STROBE_PW_REG_OFFSET 0x00000018
+#define PG_STROBE_PW_REG_V_MSB 31
+#define PG_STROBE_PW_REG_V_LSB 0
+#define PG_STROBE_PW_REG_V_MASK 0xffffffff
+#define PG_STROBE_PW_REG_V_GET(x) (((x) & PG_STROBE_PW_REG_V_MASK) >> PG_STROBE_PW_REG_V_LSB)
+#define PG_STROBE_PW_REG_V_SET(x) (((x) << PG_STROBE_PW_REG_V_LSB) & PG_STROBE_PW_REG_V_MASK)
+
+#define EFUSE_INTF_ADDRESS 0x00000800
+#define EFUSE_INTF_OFFSET 0x00000800
+#define EFUSE_INTF_R_MSB 31
+#define EFUSE_INTF_R_LSB 0
+#define EFUSE_INTF_R_MASK 0xffffffff
+#define EFUSE_INTF_R_GET(x) (((x) & EFUSE_INTF_R_MASK) >> EFUSE_INTF_R_LSB)
+#define EFUSE_INTF_R_SET(x) (((x) << EFUSE_INTF_R_LSB) & EFUSE_INTF_R_MASK)
+
+
+#ifndef __ASSEMBLER__
+
+typedef struct efuse_reg_reg_s {
+ volatile unsigned int efuse_wr_enable_reg;
+ volatile unsigned int efuse_int_enable_reg;
+ volatile unsigned int efuse_int_status_reg;
+ volatile unsigned int bitmask_wr_reg;
+ volatile unsigned int vddq_settle_time_reg;
+ volatile unsigned int rd_strobe_pw_reg;
+ volatile unsigned int pg_strobe_pw_reg;
+ unsigned char pad0[2020]; /* pad to 0x800 */
+ volatile unsigned int efuse_intf[512];
+} efuse_reg_reg_t;
+
+#endif /* __ASSEMBLER__ */
+
+#endif /* _EFUSE_REG_H_ */
diff --git a/drivers/staging/ath6kl/include/common/AR6002/hw4.0/hw/gpio_athr_wlan_reg.h b/drivers/staging/ath6kl/include/common/AR6002/hw4.0/hw/gpio_athr_wlan_reg.h
new file mode 100644
index 000000000000..1adee707de7c
--- /dev/null
+++ b/drivers/staging/ath6kl/include/common/AR6002/hw4.0/hw/gpio_athr_wlan_reg.h
@@ -0,0 +1,1253 @@
+// ------------------------------------------------------------------
+// Copyright (c) 2004-2010 Atheros Corporation. All rights reserved.
+//
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+//
+//
+// ------------------------------------------------------------------
+//===================================================================
+// Author(s): ="Atheros"
+//===================================================================
+
+
+#ifndef _GPIO_ATHR_WLAN_REG_REG_H_
+#define _GPIO_ATHR_WLAN_REG_REG_H_
+
+#define WLAN_GPIO_OUT_ADDRESS 0x00000000
+#define WLAN_GPIO_OUT_OFFSET 0x00000000
+#define WLAN_GPIO_OUT_DATA_MSB 25
+#define WLAN_GPIO_OUT_DATA_LSB 0
+#define WLAN_GPIO_OUT_DATA_MASK 0x03ffffff
+#define WLAN_GPIO_OUT_DATA_GET(x) (((x) & WLAN_GPIO_OUT_DATA_MASK) >> WLAN_GPIO_OUT_DATA_LSB)
+#define WLAN_GPIO_OUT_DATA_SET(x) (((x) << WLAN_GPIO_OUT_DATA_LSB) & WLAN_GPIO_OUT_DATA_MASK)
+
+#define WLAN_GPIO_OUT_W1TS_ADDRESS 0x00000004
+#define WLAN_GPIO_OUT_W1TS_OFFSET 0x00000004
+#define WLAN_GPIO_OUT_W1TS_DATA_MSB 25
+#define WLAN_GPIO_OUT_W1TS_DATA_LSB 0
+#define WLAN_GPIO_OUT_W1TS_DATA_MASK 0x03ffffff
+#define WLAN_GPIO_OUT_W1TS_DATA_GET(x) (((x) & WLAN_GPIO_OUT_W1TS_DATA_MASK) >> WLAN_GPIO_OUT_W1TS_DATA_LSB)
+#define WLAN_GPIO_OUT_W1TS_DATA_SET(x) (((x) << WLAN_GPIO_OUT_W1TS_DATA_LSB) & WLAN_GPIO_OUT_W1TS_DATA_MASK)
+
+#define WLAN_GPIO_OUT_W1TC_ADDRESS 0x00000008
+#define WLAN_GPIO_OUT_W1TC_OFFSET 0x00000008
+#define WLAN_GPIO_OUT_W1TC_DATA_MSB 25
+#define WLAN_GPIO_OUT_W1TC_DATA_LSB 0
+#define WLAN_GPIO_OUT_W1TC_DATA_MASK 0x03ffffff
+#define WLAN_GPIO_OUT_W1TC_DATA_GET(x) (((x) & WLAN_GPIO_OUT_W1TC_DATA_MASK) >> WLAN_GPIO_OUT_W1TC_DATA_LSB)
+#define WLAN_GPIO_OUT_W1TC_DATA_SET(x) (((x) << WLAN_GPIO_OUT_W1TC_DATA_LSB) & WLAN_GPIO_OUT_W1TC_DATA_MASK)
+
+#define WLAN_GPIO_ENABLE_ADDRESS 0x0000000c
+#define WLAN_GPIO_ENABLE_OFFSET 0x0000000c
+#define WLAN_GPIO_ENABLE_DATA_MSB 25
+#define WLAN_GPIO_ENABLE_DATA_LSB 0
+#define WLAN_GPIO_ENABLE_DATA_MASK 0x03ffffff
+#define WLAN_GPIO_ENABLE_DATA_GET(x) (((x) & WLAN_GPIO_ENABLE_DATA_MASK) >> WLAN_GPIO_ENABLE_DATA_LSB)
+#define WLAN_GPIO_ENABLE_DATA_SET(x) (((x) << WLAN_GPIO_ENABLE_DATA_LSB) & WLAN_GPIO_ENABLE_DATA_MASK)
+
+#define WLAN_GPIO_ENABLE_W1TS_ADDRESS 0x00000010
+#define WLAN_GPIO_ENABLE_W1TS_OFFSET 0x00000010
+#define WLAN_GPIO_ENABLE_W1TS_DATA_MSB 25
+#define WLAN_GPIO_ENABLE_W1TS_DATA_LSB 0
+#define WLAN_GPIO_ENABLE_W1TS_DATA_MASK 0x03ffffff
+#define WLAN_GPIO_ENABLE_W1TS_DATA_GET(x) (((x) & WLAN_GPIO_ENABLE_W1TS_DATA_MASK) >> WLAN_GPIO_ENABLE_W1TS_DATA_LSB)
+#define WLAN_GPIO_ENABLE_W1TS_DATA_SET(x) (((x) << WLAN_GPIO_ENABLE_W1TS_DATA_LSB) & WLAN_GPIO_ENABLE_W1TS_DATA_MASK)
+
+#define WLAN_GPIO_ENABLE_W1TC_ADDRESS 0x00000014
+#define WLAN_GPIO_ENABLE_W1TC_OFFSET 0x00000014
+#define WLAN_GPIO_ENABLE_W1TC_DATA_MSB 25
+#define WLAN_GPIO_ENABLE_W1TC_DATA_LSB 0
+#define WLAN_GPIO_ENABLE_W1TC_DATA_MASK 0x03ffffff
+#define WLAN_GPIO_ENABLE_W1TC_DATA_GET(x) (((x) & WLAN_GPIO_ENABLE_W1TC_DATA_MASK) >> WLAN_GPIO_ENABLE_W1TC_DATA_LSB)
+#define WLAN_GPIO_ENABLE_W1TC_DATA_SET(x) (((x) << WLAN_GPIO_ENABLE_W1TC_DATA_LSB) & WLAN_GPIO_ENABLE_W1TC_DATA_MASK)
+
+#define WLAN_GPIO_IN_ADDRESS 0x00000018
+#define WLAN_GPIO_IN_OFFSET 0x00000018
+#define WLAN_GPIO_IN_DATA_MSB 25
+#define WLAN_GPIO_IN_DATA_LSB 0
+#define WLAN_GPIO_IN_DATA_MASK 0x03ffffff
+#define WLAN_GPIO_IN_DATA_GET(x) (((x) & WLAN_GPIO_IN_DATA_MASK) >> WLAN_GPIO_IN_DATA_LSB)
+#define WLAN_GPIO_IN_DATA_SET(x) (((x) << WLAN_GPIO_IN_DATA_LSB) & WLAN_GPIO_IN_DATA_MASK)
+
+#define WLAN_GPIO_STATUS_ADDRESS 0x0000001c
+#define WLAN_GPIO_STATUS_OFFSET 0x0000001c
+#define WLAN_GPIO_STATUS_INTERRUPT_MSB 25
+#define WLAN_GPIO_STATUS_INTERRUPT_LSB 0
+#define WLAN_GPIO_STATUS_INTERRUPT_MASK 0x03ffffff
+#define WLAN_GPIO_STATUS_INTERRUPT_GET(x) (((x) & WLAN_GPIO_STATUS_INTERRUPT_MASK) >> WLAN_GPIO_STATUS_INTERRUPT_LSB)
+#define WLAN_GPIO_STATUS_INTERRUPT_SET(x) (((x) << WLAN_GPIO_STATUS_INTERRUPT_LSB) & WLAN_GPIO_STATUS_INTERRUPT_MASK)
+
+#define WLAN_GPIO_STATUS_W1TS_ADDRESS 0x00000020
+#define WLAN_GPIO_STATUS_W1TS_OFFSET 0x00000020
+#define WLAN_GPIO_STATUS_W1TS_INTERRUPT_MSB 25
+#define WLAN_GPIO_STATUS_W1TS_INTERRUPT_LSB 0
+#define WLAN_GPIO_STATUS_W1TS_INTERRUPT_MASK 0x03ffffff
+#define WLAN_GPIO_STATUS_W1TS_INTERRUPT_GET(x) (((x) & WLAN_GPIO_STATUS_W1TS_INTERRUPT_MASK) >> WLAN_GPIO_STATUS_W1TS_INTERRUPT_LSB)
+#define WLAN_GPIO_STATUS_W1TS_INTERRUPT_SET(x) (((x) << WLAN_GPIO_STATUS_W1TS_INTERRUPT_LSB) & WLAN_GPIO_STATUS_W1TS_INTERRUPT_MASK)
+
+#define WLAN_GPIO_STATUS_W1TC_ADDRESS 0x00000024
+#define WLAN_GPIO_STATUS_W1TC_OFFSET 0x00000024
+#define WLAN_GPIO_STATUS_W1TC_INTERRUPT_MSB 25
+#define WLAN_GPIO_STATUS_W1TC_INTERRUPT_LSB 0
+#define WLAN_GPIO_STATUS_W1TC_INTERRUPT_MASK 0x03ffffff
+#define WLAN_GPIO_STATUS_W1TC_INTERRUPT_GET(x) (((x) & WLAN_GPIO_STATUS_W1TC_INTERRUPT_MASK) >> WLAN_GPIO_STATUS_W1TC_INTERRUPT_LSB)
+#define WLAN_GPIO_STATUS_W1TC_INTERRUPT_SET(x) (((x) << WLAN_GPIO_STATUS_W1TC_INTERRUPT_LSB) & WLAN_GPIO_STATUS_W1TC_INTERRUPT_MASK)
+
+#define WLAN_GPIO_PIN0_ADDRESS 0x00000028
+#define WLAN_GPIO_PIN0_OFFSET 0x00000028
+#define WLAN_GPIO_PIN0_CONFIG_MSB 13
+#define WLAN_GPIO_PIN0_CONFIG_LSB 11
+#define WLAN_GPIO_PIN0_CONFIG_MASK 0x00003800
+#define WLAN_GPIO_PIN0_CONFIG_GET(x) (((x) & WLAN_GPIO_PIN0_CONFIG_MASK) >> WLAN_GPIO_PIN0_CONFIG_LSB)
+#define WLAN_GPIO_PIN0_CONFIG_SET(x) (((x) << WLAN_GPIO_PIN0_CONFIG_LSB) & WLAN_GPIO_PIN0_CONFIG_MASK)
+#define WLAN_GPIO_PIN0_WAKEUP_ENABLE_MSB 10
+#define WLAN_GPIO_PIN0_WAKEUP_ENABLE_LSB 10
+#define WLAN_GPIO_PIN0_WAKEUP_ENABLE_MASK 0x00000400
+#define WLAN_GPIO_PIN0_WAKEUP_ENABLE_GET(x) (((x) & WLAN_GPIO_PIN0_WAKEUP_ENABLE_MASK) >> WLAN_GPIO_PIN0_WAKEUP_ENABLE_LSB)
+#define WLAN_GPIO_PIN0_WAKEUP_ENABLE_SET(x) (((x) << WLAN_GPIO_PIN0_WAKEUP_ENABLE_LSB) & WLAN_GPIO_PIN0_WAKEUP_ENABLE_MASK)
+#define WLAN_GPIO_PIN0_INT_TYPE_MSB 9
+#define WLAN_GPIO_PIN0_INT_TYPE_LSB 7
+#define WLAN_GPIO_PIN0_INT_TYPE_MASK 0x00000380
+#define WLAN_GPIO_PIN0_INT_TYPE_GET(x) (((x) & WLAN_GPIO_PIN0_INT_TYPE_MASK) >> WLAN_GPIO_PIN0_INT_TYPE_LSB)
+#define WLAN_GPIO_PIN0_INT_TYPE_SET(x) (((x) << WLAN_GPIO_PIN0_INT_TYPE_LSB) & WLAN_GPIO_PIN0_INT_TYPE_MASK)
+#define WLAN_GPIO_PIN0_PAD_PULL_MSB 6
+#define WLAN_GPIO_PIN0_PAD_PULL_LSB 5
+#define WLAN_GPIO_PIN0_PAD_PULL_MASK 0x00000060
+#define WLAN_GPIO_PIN0_PAD_PULL_GET(x) (((x) & WLAN_GPIO_PIN0_PAD_PULL_MASK) >> WLAN_GPIO_PIN0_PAD_PULL_LSB)
+#define WLAN_GPIO_PIN0_PAD_PULL_SET(x) (((x) << WLAN_GPIO_PIN0_PAD_PULL_LSB) & WLAN_GPIO_PIN0_PAD_PULL_MASK)
+#define WLAN_GPIO_PIN0_PAD_STRENGTH_MSB 4
+#define WLAN_GPIO_PIN0_PAD_STRENGTH_LSB 3
+#define WLAN_GPIO_PIN0_PAD_STRENGTH_MASK 0x00000018
+#define WLAN_GPIO_PIN0_PAD_STRENGTH_GET(x) (((x) & WLAN_GPIO_PIN0_PAD_STRENGTH_MASK) >> WLAN_GPIO_PIN0_PAD_STRENGTH_LSB)
+#define WLAN_GPIO_PIN0_PAD_STRENGTH_SET(x) (((x) << WLAN_GPIO_PIN0_PAD_STRENGTH_LSB) & WLAN_GPIO_PIN0_PAD_STRENGTH_MASK)
+#define WLAN_GPIO_PIN0_PAD_DRIVER_MSB 2
+#define WLAN_GPIO_PIN0_PAD_DRIVER_LSB 2
+#define WLAN_GPIO_PIN0_PAD_DRIVER_MASK 0x00000004
+#define WLAN_GPIO_PIN0_PAD_DRIVER_GET(x) (((x) & WLAN_GPIO_PIN0_PAD_DRIVER_MASK) >> WLAN_GPIO_PIN0_PAD_DRIVER_LSB)
+#define WLAN_GPIO_PIN0_PAD_DRIVER_SET(x) (((x) << WLAN_GPIO_PIN0_PAD_DRIVER_LSB) & WLAN_GPIO_PIN0_PAD_DRIVER_MASK)
+#define WLAN_GPIO_PIN0_SOURCE_MSB 0
+#define WLAN_GPIO_PIN0_SOURCE_LSB 0
+#define WLAN_GPIO_PIN0_SOURCE_MASK 0x00000001
+#define WLAN_GPIO_PIN0_SOURCE_GET(x) (((x) & WLAN_GPIO_PIN0_SOURCE_MASK) >> WLAN_GPIO_PIN0_SOURCE_LSB)
+#define WLAN_GPIO_PIN0_SOURCE_SET(x) (((x) << WLAN_GPIO_PIN0_SOURCE_LSB) & WLAN_GPIO_PIN0_SOURCE_MASK)
+
+#define WLAN_GPIO_PIN1_ADDRESS 0x0000002c
+#define WLAN_GPIO_PIN1_OFFSET 0x0000002c
+#define WLAN_GPIO_PIN1_CONFIG_MSB 13
+#define WLAN_GPIO_PIN1_CONFIG_LSB 11
+#define WLAN_GPIO_PIN1_CONFIG_MASK 0x00003800
+#define WLAN_GPIO_PIN1_CONFIG_GET(x) (((x) & WLAN_GPIO_PIN1_CONFIG_MASK) >> WLAN_GPIO_PIN1_CONFIG_LSB)
+#define WLAN_GPIO_PIN1_CONFIG_SET(x) (((x) << WLAN_GPIO_PIN1_CONFIG_LSB) & WLAN_GPIO_PIN1_CONFIG_MASK)
+#define WLAN_GPIO_PIN1_WAKEUP_ENABLE_MSB 10
+#define WLAN_GPIO_PIN1_WAKEUP_ENABLE_LSB 10
+#define WLAN_GPIO_PIN1_WAKEUP_ENABLE_MASK 0x00000400
+#define WLAN_GPIO_PIN1_WAKEUP_ENABLE_GET(x) (((x) & WLAN_GPIO_PIN1_WAKEUP_ENABLE_MASK) >> WLAN_GPIO_PIN1_WAKEUP_ENABLE_LSB)
+#define WLAN_GPIO_PIN1_WAKEUP_ENABLE_SET(x) (((x) << WLAN_GPIO_PIN1_WAKEUP_ENABLE_LSB) & WLAN_GPIO_PIN1_WAKEUP_ENABLE_MASK)
+#define WLAN_GPIO_PIN1_INT_TYPE_MSB 9
+#define WLAN_GPIO_PIN1_INT_TYPE_LSB 7
+#define WLAN_GPIO_PIN1_INT_TYPE_MASK 0x00000380
+#define WLAN_GPIO_PIN1_INT_TYPE_GET(x) (((x) & WLAN_GPIO_PIN1_INT_TYPE_MASK) >> WLAN_GPIO_PIN1_INT_TYPE_LSB)
+#define WLAN_GPIO_PIN1_INT_TYPE_SET(x) (((x) << WLAN_GPIO_PIN1_INT_TYPE_LSB) & WLAN_GPIO_PIN1_INT_TYPE_MASK)
+#define WLAN_GPIO_PIN1_PAD_PULL_MSB 6
+#define WLAN_GPIO_PIN1_PAD_PULL_LSB 5
+#define WLAN_GPIO_PIN1_PAD_PULL_MASK 0x00000060
+#define WLAN_GPIO_PIN1_PAD_PULL_GET(x) (((x) & WLAN_GPIO_PIN1_PAD_PULL_MASK) >> WLAN_GPIO_PIN1_PAD_PULL_LSB)
+#define WLAN_GPIO_PIN1_PAD_PULL_SET(x) (((x) << WLAN_GPIO_PIN1_PAD_PULL_LSB) & WLAN_GPIO_PIN1_PAD_PULL_MASK)
+#define WLAN_GPIO_PIN1_PAD_STRENGTH_MSB 4
+#define WLAN_GPIO_PIN1_PAD_STRENGTH_LSB 3
+#define WLAN_GPIO_PIN1_PAD_STRENGTH_MASK 0x00000018
+#define WLAN_GPIO_PIN1_PAD_STRENGTH_GET(x) (((x) & WLAN_GPIO_PIN1_PAD_STRENGTH_MASK) >> WLAN_GPIO_PIN1_PAD_STRENGTH_LSB)
+#define WLAN_GPIO_PIN1_PAD_STRENGTH_SET(x) (((x) << WLAN_GPIO_PIN1_PAD_STRENGTH_LSB) & WLAN_GPIO_PIN1_PAD_STRENGTH_MASK)
+#define WLAN_GPIO_PIN1_PAD_DRIVER_MSB 2
+#define WLAN_GPIO_PIN1_PAD_DRIVER_LSB 2
+#define WLAN_GPIO_PIN1_PAD_DRIVER_MASK 0x00000004
+#define WLAN_GPIO_PIN1_PAD_DRIVER_GET(x) (((x) & WLAN_GPIO_PIN1_PAD_DRIVER_MASK) >> WLAN_GPIO_PIN1_PAD_DRIVER_LSB)
+#define WLAN_GPIO_PIN1_PAD_DRIVER_SET(x) (((x) << WLAN_GPIO_PIN1_PAD_DRIVER_LSB) & WLAN_GPIO_PIN1_PAD_DRIVER_MASK)
+#define WLAN_GPIO_PIN1_SOURCE_MSB 0
+#define WLAN_GPIO_PIN1_SOURCE_LSB 0
+#define WLAN_GPIO_PIN1_SOURCE_MASK 0x00000001
+#define WLAN_GPIO_PIN1_SOURCE_GET(x) (((x) & WLAN_GPIO_PIN1_SOURCE_MASK) >> WLAN_GPIO_PIN1_SOURCE_LSB)
+#define WLAN_GPIO_PIN1_SOURCE_SET(x) (((x) << WLAN_GPIO_PIN1_SOURCE_LSB) & WLAN_GPIO_PIN1_SOURCE_MASK)
+
+#define WLAN_GPIO_PIN2_ADDRESS 0x00000030
+#define WLAN_GPIO_PIN2_OFFSET 0x00000030
+#define WLAN_GPIO_PIN2_CONFIG_MSB 13
+#define WLAN_GPIO_PIN2_CONFIG_LSB 11
+#define WLAN_GPIO_PIN2_CONFIG_MASK 0x00003800
+#define WLAN_GPIO_PIN2_CONFIG_GET(x) (((x) & WLAN_GPIO_PIN2_CONFIG_MASK) >> WLAN_GPIO_PIN2_CONFIG_LSB)
+#define WLAN_GPIO_PIN2_CONFIG_SET(x) (((x) << WLAN_GPIO_PIN2_CONFIG_LSB) & WLAN_GPIO_PIN2_CONFIG_MASK)
+#define WLAN_GPIO_PIN2_WAKEUP_ENABLE_MSB 10
+#define WLAN_GPIO_PIN2_WAKEUP_ENABLE_LSB 10
+#define WLAN_GPIO_PIN2_WAKEUP_ENABLE_MASK 0x00000400
+#define WLAN_GPIO_PIN2_WAKEUP_ENABLE_GET(x) (((x) & WLAN_GPIO_PIN2_WAKEUP_ENABLE_MASK) >> WLAN_GPIO_PIN2_WAKEUP_ENABLE_LSB)
+#define WLAN_GPIO_PIN2_WAKEUP_ENABLE_SET(x) (((x) << WLAN_GPIO_PIN2_WAKEUP_ENABLE_LSB) & WLAN_GPIO_PIN2_WAKEUP_ENABLE_MASK)
+#define WLAN_GPIO_PIN2_INT_TYPE_MSB 9
+#define WLAN_GPIO_PIN2_INT_TYPE_LSB 7
+#define WLAN_GPIO_PIN2_INT_TYPE_MASK 0x00000380
+#define WLAN_GPIO_PIN2_INT_TYPE_GET(x) (((x) & WLAN_GPIO_PIN2_INT_TYPE_MASK) >> WLAN_GPIO_PIN2_INT_TYPE_LSB)
+#define WLAN_GPIO_PIN2_INT_TYPE_SET(x) (((x) << WLAN_GPIO_PIN2_INT_TYPE_LSB) & WLAN_GPIO_PIN2_INT_TYPE_MASK)
+#define WLAN_GPIO_PIN2_PAD_PULL_MSB 6
+#define WLAN_GPIO_PIN2_PAD_PULL_LSB 5
+#define WLAN_GPIO_PIN2_PAD_PULL_MASK 0x00000060
+#define WLAN_GPIO_PIN2_PAD_PULL_GET(x) (((x) & WLAN_GPIO_PIN2_PAD_PULL_MASK) >> WLAN_GPIO_PIN2_PAD_PULL_LSB)
+#define WLAN_GPIO_PIN2_PAD_PULL_SET(x) (((x) << WLAN_GPIO_PIN2_PAD_PULL_LSB) & WLAN_GPIO_PIN2_PAD_PULL_MASK)
+#define WLAN_GPIO_PIN2_PAD_STRENGTH_MSB 4
+#define WLAN_GPIO_PIN2_PAD_STRENGTH_LSB 3
+#define WLAN_GPIO_PIN2_PAD_STRENGTH_MASK 0x00000018
+#define WLAN_GPIO_PIN2_PAD_STRENGTH_GET(x) (((x) & WLAN_GPIO_PIN2_PAD_STRENGTH_MASK) >> WLAN_GPIO_PIN2_PAD_STRENGTH_LSB)
+#define WLAN_GPIO_PIN2_PAD_STRENGTH_SET(x) (((x) << WLAN_GPIO_PIN2_PAD_STRENGTH_LSB) & WLAN_GPIO_PIN2_PAD_STRENGTH_MASK)
+#define WLAN_GPIO_PIN2_PAD_DRIVER_MSB 2
+#define WLAN_GPIO_PIN2_PAD_DRIVER_LSB 2
+#define WLAN_GPIO_PIN2_PAD_DRIVER_MASK 0x00000004
+#define WLAN_GPIO_PIN2_PAD_DRIVER_GET(x) (((x) & WLAN_GPIO_PIN2_PAD_DRIVER_MASK) >> WLAN_GPIO_PIN2_PAD_DRIVER_LSB)
+#define WLAN_GPIO_PIN2_PAD_DRIVER_SET(x) (((x) << WLAN_GPIO_PIN2_PAD_DRIVER_LSB) & WLAN_GPIO_PIN2_PAD_DRIVER_MASK)
+#define WLAN_GPIO_PIN2_SOURCE_MSB 0
+#define WLAN_GPIO_PIN2_SOURCE_LSB 0
+#define WLAN_GPIO_PIN2_SOURCE_MASK 0x00000001
+#define WLAN_GPIO_PIN2_SOURCE_GET(x) (((x) & WLAN_GPIO_PIN2_SOURCE_MASK) >> WLAN_GPIO_PIN2_SOURCE_LSB)
+#define WLAN_GPIO_PIN2_SOURCE_SET(x) (((x) << WLAN_GPIO_PIN2_SOURCE_LSB) & WLAN_GPIO_PIN2_SOURCE_MASK)
+
+#define WLAN_GPIO_PIN3_ADDRESS 0x00000034
+#define WLAN_GPIO_PIN3_OFFSET 0x00000034
+#define WLAN_GPIO_PIN3_CONFIG_MSB 13
+#define WLAN_GPIO_PIN3_CONFIG_LSB 11
+#define WLAN_GPIO_PIN3_CONFIG_MASK 0x00003800
+#define WLAN_GPIO_PIN3_CONFIG_GET(x) (((x) & WLAN_GPIO_PIN3_CONFIG_MASK) >> WLAN_GPIO_PIN3_CONFIG_LSB)
+#define WLAN_GPIO_PIN3_CONFIG_SET(x) (((x) << WLAN_GPIO_PIN3_CONFIG_LSB) & WLAN_GPIO_PIN3_CONFIG_MASK)
+#define WLAN_GPIO_PIN3_WAKEUP_ENABLE_MSB 10
+#define WLAN_GPIO_PIN3_WAKEUP_ENABLE_LSB 10
+#define WLAN_GPIO_PIN3_WAKEUP_ENABLE_MASK 0x00000400
+#define WLAN_GPIO_PIN3_WAKEUP_ENABLE_GET(x) (((x) & WLAN_GPIO_PIN3_WAKEUP_ENABLE_MASK) >> WLAN_GPIO_PIN3_WAKEUP_ENABLE_LSB)
+#define WLAN_GPIO_PIN3_WAKEUP_ENABLE_SET(x) (((x) << WLAN_GPIO_PIN3_WAKEUP_ENABLE_LSB) & WLAN_GPIO_PIN3_WAKEUP_ENABLE_MASK)
+#define WLAN_GPIO_PIN3_INT_TYPE_MSB 9
+#define WLAN_GPIO_PIN3_INT_TYPE_LSB 7
+#define WLAN_GPIO_PIN3_INT_TYPE_MASK 0x00000380
+#define WLAN_GPIO_PIN3_INT_TYPE_GET(x) (((x) & WLAN_GPIO_PIN3_INT_TYPE_MASK) >> WLAN_GPIO_PIN3_INT_TYPE_LSB)
+#define WLAN_GPIO_PIN3_INT_TYPE_SET(x) (((x) << WLAN_GPIO_PIN3_INT_TYPE_LSB) & WLAN_GPIO_PIN3_INT_TYPE_MASK)
+#define WLAN_GPIO_PIN3_PAD_PULL_MSB 6
+#define WLAN_GPIO_PIN3_PAD_PULL_LSB 5
+#define WLAN_GPIO_PIN3_PAD_PULL_MASK 0x00000060
+#define WLAN_GPIO_PIN3_PAD_PULL_GET(x) (((x) & WLAN_GPIO_PIN3_PAD_PULL_MASK) >> WLAN_GPIO_PIN3_PAD_PULL_LSB)
+#define WLAN_GPIO_PIN3_PAD_PULL_SET(x) (((x) << WLAN_GPIO_PIN3_PAD_PULL_LSB) & WLAN_GPIO_PIN3_PAD_PULL_MASK)
+#define WLAN_GPIO_PIN3_PAD_STRENGTH_MSB 4
+#define WLAN_GPIO_PIN3_PAD_STRENGTH_LSB 3
+#define WLAN_GPIO_PIN3_PAD_STRENGTH_MASK 0x00000018
+#define WLAN_GPIO_PIN3_PAD_STRENGTH_GET(x) (((x) & WLAN_GPIO_PIN3_PAD_STRENGTH_MASK) >> WLAN_GPIO_PIN3_PAD_STRENGTH_LSB)
+#define WLAN_GPIO_PIN3_PAD_STRENGTH_SET(x) (((x) << WLAN_GPIO_PIN3_PAD_STRENGTH_LSB) & WLAN_GPIO_PIN3_PAD_STRENGTH_MASK)
+#define WLAN_GPIO_PIN3_PAD_DRIVER_MSB 2
+#define WLAN_GPIO_PIN3_PAD_DRIVER_LSB 2
+#define WLAN_GPIO_PIN3_PAD_DRIVER_MASK 0x00000004
+#define WLAN_GPIO_PIN3_PAD_DRIVER_GET(x) (((x) & WLAN_GPIO_PIN3_PAD_DRIVER_MASK) >> WLAN_GPIO_PIN3_PAD_DRIVER_LSB)
+#define WLAN_GPIO_PIN3_PAD_DRIVER_SET(x) (((x) << WLAN_GPIO_PIN3_PAD_DRIVER_LSB) & WLAN_GPIO_PIN3_PAD_DRIVER_MASK)
+#define WLAN_GPIO_PIN3_SOURCE_MSB 0
+#define WLAN_GPIO_PIN3_SOURCE_LSB 0
+#define WLAN_GPIO_PIN3_SOURCE_MASK 0x00000001
+#define WLAN_GPIO_PIN3_SOURCE_GET(x) (((x) & WLAN_GPIO_PIN3_SOURCE_MASK) >> WLAN_GPIO_PIN3_SOURCE_LSB)
+#define WLAN_GPIO_PIN3_SOURCE_SET(x) (((x) << WLAN_GPIO_PIN3_SOURCE_LSB) & WLAN_GPIO_PIN3_SOURCE_MASK)
+
+#define WLAN_GPIO_PIN4_ADDRESS 0x00000038
+#define WLAN_GPIO_PIN4_OFFSET 0x00000038
+#define WLAN_GPIO_PIN4_CONFIG_MSB 13
+#define WLAN_GPIO_PIN4_CONFIG_LSB 11
+#define WLAN_GPIO_PIN4_CONFIG_MASK 0x00003800
+#define WLAN_GPIO_PIN4_CONFIG_GET(x) (((x) & WLAN_GPIO_PIN4_CONFIG_MASK) >> WLAN_GPIO_PIN4_CONFIG_LSB)
+#define WLAN_GPIO_PIN4_CONFIG_SET(x) (((x) << WLAN_GPIO_PIN4_CONFIG_LSB) & WLAN_GPIO_PIN4_CONFIG_MASK)
+#define WLAN_GPIO_PIN4_WAKEUP_ENABLE_MSB 10
+#define WLAN_GPIO_PIN4_WAKEUP_ENABLE_LSB 10
+#define WLAN_GPIO_PIN4_WAKEUP_ENABLE_MASK 0x00000400
+#define WLAN_GPIO_PIN4_WAKEUP_ENABLE_GET(x) (((x) & WLAN_GPIO_PIN4_WAKEUP_ENABLE_MASK) >> WLAN_GPIO_PIN4_WAKEUP_ENABLE_LSB)
+#define WLAN_GPIO_PIN4_WAKEUP_ENABLE_SET(x) (((x) << WLAN_GPIO_PIN4_WAKEUP_ENABLE_LSB) & WLAN_GPIO_PIN4_WAKEUP_ENABLE_MASK)
+#define WLAN_GPIO_PIN4_INT_TYPE_MSB 9
+#define WLAN_GPIO_PIN4_INT_TYPE_LSB 7
+#define WLAN_GPIO_PIN4_INT_TYPE_MASK 0x00000380
+#define WLAN_GPIO_PIN4_INT_TYPE_GET(x) (((x) & WLAN_GPIO_PIN4_INT_TYPE_MASK) >> WLAN_GPIO_PIN4_INT_TYPE_LSB)
+#define WLAN_GPIO_PIN4_INT_TYPE_SET(x) (((x) << WLAN_GPIO_PIN4_INT_TYPE_LSB) & WLAN_GPIO_PIN4_INT_TYPE_MASK)
+#define WLAN_GPIO_PIN4_PAD_PULL_MSB 6
+#define WLAN_GPIO_PIN4_PAD_PULL_LSB 5
+#define WLAN_GPIO_PIN4_PAD_PULL_MASK 0x00000060
+#define WLAN_GPIO_PIN4_PAD_PULL_GET(x) (((x) & WLAN_GPIO_PIN4_PAD_PULL_MASK) >> WLAN_GPIO_PIN4_PAD_PULL_LSB)
+#define WLAN_GPIO_PIN4_PAD_PULL_SET(x) (((x) << WLAN_GPIO_PIN4_PAD_PULL_LSB) & WLAN_GPIO_PIN4_PAD_PULL_MASK)
+#define WLAN_GPIO_PIN4_PAD_STRENGTH_MSB 4
+#define WLAN_GPIO_PIN4_PAD_STRENGTH_LSB 3
+#define WLAN_GPIO_PIN4_PAD_STRENGTH_MASK 0x00000018
+#define WLAN_GPIO_PIN4_PAD_STRENGTH_GET(x) (((x) & WLAN_GPIO_PIN4_PAD_STRENGTH_MASK) >> WLAN_GPIO_PIN4_PAD_STRENGTH_LSB)
+#define WLAN_GPIO_PIN4_PAD_STRENGTH_SET(x) (((x) << WLAN_GPIO_PIN4_PAD_STRENGTH_LSB) & WLAN_GPIO_PIN4_PAD_STRENGTH_MASK)
+#define WLAN_GPIO_PIN4_PAD_DRIVER_MSB 2
+#define WLAN_GPIO_PIN4_PAD_DRIVER_LSB 2
+#define WLAN_GPIO_PIN4_PAD_DRIVER_MASK 0x00000004
+#define WLAN_GPIO_PIN4_PAD_DRIVER_GET(x) (((x) & WLAN_GPIO_PIN4_PAD_DRIVER_MASK) >> WLAN_GPIO_PIN4_PAD_DRIVER_LSB)
+#define WLAN_GPIO_PIN4_PAD_DRIVER_SET(x) (((x) << WLAN_GPIO_PIN4_PAD_DRIVER_LSB) & WLAN_GPIO_PIN4_PAD_DRIVER_MASK)
+#define WLAN_GPIO_PIN4_SOURCE_MSB 0
+#define WLAN_GPIO_PIN4_SOURCE_LSB 0
+#define WLAN_GPIO_PIN4_SOURCE_MASK 0x00000001
+#define WLAN_GPIO_PIN4_SOURCE_GET(x) (((x) & WLAN_GPIO_PIN4_SOURCE_MASK) >> WLAN_GPIO_PIN4_SOURCE_LSB)
+#define WLAN_GPIO_PIN4_SOURCE_SET(x) (((x) << WLAN_GPIO_PIN4_SOURCE_LSB) & WLAN_GPIO_PIN4_SOURCE_MASK)
+
+#define WLAN_GPIO_PIN5_ADDRESS 0x0000003c
+#define WLAN_GPIO_PIN5_OFFSET 0x0000003c
+#define WLAN_GPIO_PIN5_CONFIG_MSB 13
+#define WLAN_GPIO_PIN5_CONFIG_LSB 11
+#define WLAN_GPIO_PIN5_CONFIG_MASK 0x00003800
+#define WLAN_GPIO_PIN5_CONFIG_GET(x) (((x) & WLAN_GPIO_PIN5_CONFIG_MASK) >> WLAN_GPIO_PIN5_CONFIG_LSB)
+#define WLAN_GPIO_PIN5_CONFIG_SET(x) (((x) << WLAN_GPIO_PIN5_CONFIG_LSB) & WLAN_GPIO_PIN5_CONFIG_MASK)
+#define WLAN_GPIO_PIN5_WAKEUP_ENABLE_MSB 10
+#define WLAN_GPIO_PIN5_WAKEUP_ENABLE_LSB 10
+#define WLAN_GPIO_PIN5_WAKEUP_ENABLE_MASK 0x00000400
+#define WLAN_GPIO_PIN5_WAKEUP_ENABLE_GET(x) (((x) & WLAN_GPIO_PIN5_WAKEUP_ENABLE_MASK) >> WLAN_GPIO_PIN5_WAKEUP_ENABLE_LSB)
+#define WLAN_GPIO_PIN5_WAKEUP_ENABLE_SET(x) (((x) << WLAN_GPIO_PIN5_WAKEUP_ENABLE_LSB) & WLAN_GPIO_PIN5_WAKEUP_ENABLE_MASK)
+#define WLAN_GPIO_PIN5_INT_TYPE_MSB 9
+#define WLAN_GPIO_PIN5_INT_TYPE_LSB 7
+#define WLAN_GPIO_PIN5_INT_TYPE_MASK 0x00000380
+#define WLAN_GPIO_PIN5_INT_TYPE_GET(x) (((x) & WLAN_GPIO_PIN5_INT_TYPE_MASK) >> WLAN_GPIO_PIN5_INT_TYPE_LSB)
+#define WLAN_GPIO_PIN5_INT_TYPE_SET(x) (((x) << WLAN_GPIO_PIN5_INT_TYPE_LSB) & WLAN_GPIO_PIN5_INT_TYPE_MASK)
+#define WLAN_GPIO_PIN5_PAD_PULL_MSB 6
+#define WLAN_GPIO_PIN5_PAD_PULL_LSB 5
+#define WLAN_GPIO_PIN5_PAD_PULL_MASK 0x00000060
+#define WLAN_GPIO_PIN5_PAD_PULL_GET(x) (((x) & WLAN_GPIO_PIN5_PAD_PULL_MASK) >> WLAN_GPIO_PIN5_PAD_PULL_LSB)
+#define WLAN_GPIO_PIN5_PAD_PULL_SET(x) (((x) << WLAN_GPIO_PIN5_PAD_PULL_LSB) & WLAN_GPIO_PIN5_PAD_PULL_MASK)
+#define WLAN_GPIO_PIN5_PAD_STRENGTH_MSB 4
+#define WLAN_GPIO_PIN5_PAD_STRENGTH_LSB 3
+#define WLAN_GPIO_PIN5_PAD_STRENGTH_MASK 0x00000018
+#define WLAN_GPIO_PIN5_PAD_STRENGTH_GET(x) (((x) & WLAN_GPIO_PIN5_PAD_STRENGTH_MASK) >> WLAN_GPIO_PIN5_PAD_STRENGTH_LSB)
+#define WLAN_GPIO_PIN5_PAD_STRENGTH_SET(x) (((x) << WLAN_GPIO_PIN5_PAD_STRENGTH_LSB) & WLAN_GPIO_PIN5_PAD_STRENGTH_MASK)
+#define WLAN_GPIO_PIN5_PAD_DRIVER_MSB 2
+#define WLAN_GPIO_PIN5_PAD_DRIVER_LSB 2
+#define WLAN_GPIO_PIN5_PAD_DRIVER_MASK 0x00000004
+#define WLAN_GPIO_PIN5_PAD_DRIVER_GET(x) (((x) & WLAN_GPIO_PIN5_PAD_DRIVER_MASK) >> WLAN_GPIO_PIN5_PAD_DRIVER_LSB)
+#define WLAN_GPIO_PIN5_PAD_DRIVER_SET(x) (((x) << WLAN_GPIO_PIN5_PAD_DRIVER_LSB) & WLAN_GPIO_PIN5_PAD_DRIVER_MASK)
+#define WLAN_GPIO_PIN5_SOURCE_MSB 0
+#define WLAN_GPIO_PIN5_SOURCE_LSB 0
+#define WLAN_GPIO_PIN5_SOURCE_MASK 0x00000001
+#define WLAN_GPIO_PIN5_SOURCE_GET(x) (((x) & WLAN_GPIO_PIN5_SOURCE_MASK) >> WLAN_GPIO_PIN5_SOURCE_LSB)
+#define WLAN_GPIO_PIN5_SOURCE_SET(x) (((x) << WLAN_GPIO_PIN5_SOURCE_LSB) & WLAN_GPIO_PIN5_SOURCE_MASK)
+
+#define WLAN_GPIO_PIN6_ADDRESS 0x00000040
+#define WLAN_GPIO_PIN6_OFFSET 0x00000040
+#define WLAN_GPIO_PIN6_CONFIG_MSB 13
+#define WLAN_GPIO_PIN6_CONFIG_LSB 11
+#define WLAN_GPIO_PIN6_CONFIG_MASK 0x00003800
+#define WLAN_GPIO_PIN6_CONFIG_GET(x) (((x) & WLAN_GPIO_PIN6_CONFIG_MASK) >> WLAN_GPIO_PIN6_CONFIG_LSB)
+#define WLAN_GPIO_PIN6_CONFIG_SET(x) (((x) << WLAN_GPIO_PIN6_CONFIG_LSB) & WLAN_GPIO_PIN6_CONFIG_MASK)
+#define WLAN_GPIO_PIN6_WAKEUP_ENABLE_MSB 10
+#define WLAN_GPIO_PIN6_WAKEUP_ENABLE_LSB 10
+#define WLAN_GPIO_PIN6_WAKEUP_ENABLE_MASK 0x00000400
+#define WLAN_GPIO_PIN6_WAKEUP_ENABLE_GET(x) (((x) & WLAN_GPIO_PIN6_WAKEUP_ENABLE_MASK) >> WLAN_GPIO_PIN6_WAKEUP_ENABLE_LSB)
+#define WLAN_GPIO_PIN6_WAKEUP_ENABLE_SET(x) (((x) << WLAN_GPIO_PIN6_WAKEUP_ENABLE_LSB) & WLAN_GPIO_PIN6_WAKEUP_ENABLE_MASK)
+#define WLAN_GPIO_PIN6_INT_TYPE_MSB 9
+#define WLAN_GPIO_PIN6_INT_TYPE_LSB 7
+#define WLAN_GPIO_PIN6_INT_TYPE_MASK 0x00000380
+#define WLAN_GPIO_PIN6_INT_TYPE_GET(x) (((x) & WLAN_GPIO_PIN6_INT_TYPE_MASK) >> WLAN_GPIO_PIN6_INT_TYPE_LSB)
+#define WLAN_GPIO_PIN6_INT_TYPE_SET(x) (((x) << WLAN_GPIO_PIN6_INT_TYPE_LSB) & WLAN_GPIO_PIN6_INT_TYPE_MASK)
+#define WLAN_GPIO_PIN6_PAD_PULL_MSB 6
+#define WLAN_GPIO_PIN6_PAD_PULL_LSB 5
+#define WLAN_GPIO_PIN6_PAD_PULL_MASK 0x00000060
+#define WLAN_GPIO_PIN6_PAD_PULL_GET(x) (((x) & WLAN_GPIO_PIN6_PAD_PULL_MASK) >> WLAN_GPIO_PIN6_PAD_PULL_LSB)
+#define WLAN_GPIO_PIN6_PAD_PULL_SET(x) (((x) << WLAN_GPIO_PIN6_PAD_PULL_LSB) & WLAN_GPIO_PIN6_PAD_PULL_MASK)
+#define WLAN_GPIO_PIN6_PAD_STRENGTH_MSB 4
+#define WLAN_GPIO_PIN6_PAD_STRENGTH_LSB 3
+#define WLAN_GPIO_PIN6_PAD_STRENGTH_MASK 0x00000018
+#define WLAN_GPIO_PIN6_PAD_STRENGTH_GET(x) (((x) & WLAN_GPIO_PIN6_PAD_STRENGTH_MASK) >> WLAN_GPIO_PIN6_PAD_STRENGTH_LSB)
+#define WLAN_GPIO_PIN6_PAD_STRENGTH_SET(x) (((x) << WLAN_GPIO_PIN6_PAD_STRENGTH_LSB) & WLAN_GPIO_PIN6_PAD_STRENGTH_MASK)
+#define WLAN_GPIO_PIN6_PAD_DRIVER_MSB 2
+#define WLAN_GPIO_PIN6_PAD_DRIVER_LSB 2
+#define WLAN_GPIO_PIN6_PAD_DRIVER_MASK 0x00000004
+#define WLAN_GPIO_PIN6_PAD_DRIVER_GET(x) (((x) & WLAN_GPIO_PIN6_PAD_DRIVER_MASK) >> WLAN_GPIO_PIN6_PAD_DRIVER_LSB)
+#define WLAN_GPIO_PIN6_PAD_DRIVER_SET(x) (((x) << WLAN_GPIO_PIN6_PAD_DRIVER_LSB) & WLAN_GPIO_PIN6_PAD_DRIVER_MASK)
+#define WLAN_GPIO_PIN6_SOURCE_MSB 0
+#define WLAN_GPIO_PIN6_SOURCE_LSB 0
+#define WLAN_GPIO_PIN6_SOURCE_MASK 0x00000001
+#define WLAN_GPIO_PIN6_SOURCE_GET(x) (((x) & WLAN_GPIO_PIN6_SOURCE_MASK) >> WLAN_GPIO_PIN6_SOURCE_LSB)
+#define WLAN_GPIO_PIN6_SOURCE_SET(x) (((x) << WLAN_GPIO_PIN6_SOURCE_LSB) & WLAN_GPIO_PIN6_SOURCE_MASK)
+
+#define WLAN_GPIO_PIN7_ADDRESS 0x00000044
+#define WLAN_GPIO_PIN7_OFFSET 0x00000044
+#define WLAN_GPIO_PIN7_CONFIG_MSB 13
+#define WLAN_GPIO_PIN7_CONFIG_LSB 11
+#define WLAN_GPIO_PIN7_CONFIG_MASK 0x00003800
+#define WLAN_GPIO_PIN7_CONFIG_GET(x) (((x) & WLAN_GPIO_PIN7_CONFIG_MASK) >> WLAN_GPIO_PIN7_CONFIG_LSB)
+#define WLAN_GPIO_PIN7_CONFIG_SET(x) (((x) << WLAN_GPIO_PIN7_CONFIG_LSB) & WLAN_GPIO_PIN7_CONFIG_MASK)
+#define WLAN_GPIO_PIN7_WAKEUP_ENABLE_MSB 10
+#define WLAN_GPIO_PIN7_WAKEUP_ENABLE_LSB 10
+#define WLAN_GPIO_PIN7_WAKEUP_ENABLE_MASK 0x00000400
+#define WLAN_GPIO_PIN7_WAKEUP_ENABLE_GET(x) (((x) & WLAN_GPIO_PIN7_WAKEUP_ENABLE_MASK) >> WLAN_GPIO_PIN7_WAKEUP_ENABLE_LSB)
+#define WLAN_GPIO_PIN7_WAKEUP_ENABLE_SET(x) (((x) << WLAN_GPIO_PIN7_WAKEUP_ENABLE_LSB) & WLAN_GPIO_PIN7_WAKEUP_ENABLE_MASK)
+#define WLAN_GPIO_PIN7_INT_TYPE_MSB 9
+#define WLAN_GPIO_PIN7_INT_TYPE_LSB 7
+#define WLAN_GPIO_PIN7_INT_TYPE_MASK 0x00000380
+#define WLAN_GPIO_PIN7_INT_TYPE_GET(x) (((x) & WLAN_GPIO_PIN7_INT_TYPE_MASK) >> WLAN_GPIO_PIN7_INT_TYPE_LSB)
+#define WLAN_GPIO_PIN7_INT_TYPE_SET(x) (((x) << WLAN_GPIO_PIN7_INT_TYPE_LSB) & WLAN_GPIO_PIN7_INT_TYPE_MASK)
+#define WLAN_GPIO_PIN7_PAD_PULL_MSB 6
+#define WLAN_GPIO_PIN7_PAD_PULL_LSB 5
+#define WLAN_GPIO_PIN7_PAD_PULL_MASK 0x00000060
+#define WLAN_GPIO_PIN7_PAD_PULL_GET(x) (((x) & WLAN_GPIO_PIN7_PAD_PULL_MASK) >> WLAN_GPIO_PIN7_PAD_PULL_LSB)
+#define WLAN_GPIO_PIN7_PAD_PULL_SET(x) (((x) << WLAN_GPIO_PIN7_PAD_PULL_LSB) & WLAN_GPIO_PIN7_PAD_PULL_MASK)
+#define WLAN_GPIO_PIN7_PAD_STRENGTH_MSB 4
+#define WLAN_GPIO_PIN7_PAD_STRENGTH_LSB 3
+#define WLAN_GPIO_PIN7_PAD_STRENGTH_MASK 0x00000018
+#define WLAN_GPIO_PIN7_PAD_STRENGTH_GET(x) (((x) & WLAN_GPIO_PIN7_PAD_STRENGTH_MASK) >> WLAN_GPIO_PIN7_PAD_STRENGTH_LSB)
+#define WLAN_GPIO_PIN7_PAD_STRENGTH_SET(x) (((x) << WLAN_GPIO_PIN7_PAD_STRENGTH_LSB) & WLAN_GPIO_PIN7_PAD_STRENGTH_MASK)
+#define WLAN_GPIO_PIN7_PAD_DRIVER_MSB 2
+#define WLAN_GPIO_PIN7_PAD_DRIVER_LSB 2
+#define WLAN_GPIO_PIN7_PAD_DRIVER_MASK 0x00000004
+#define WLAN_GPIO_PIN7_PAD_DRIVER_GET(x) (((x) & WLAN_GPIO_PIN7_PAD_DRIVER_MASK) >> WLAN_GPIO_PIN7_PAD_DRIVER_LSB)
+#define WLAN_GPIO_PIN7_PAD_DRIVER_SET(x) (((x) << WLAN_GPIO_PIN7_PAD_DRIVER_LSB) & WLAN_GPIO_PIN7_PAD_DRIVER_MASK)
+#define WLAN_GPIO_PIN7_SOURCE_MSB 0
+#define WLAN_GPIO_PIN7_SOURCE_LSB 0
+#define WLAN_GPIO_PIN7_SOURCE_MASK 0x00000001
+#define WLAN_GPIO_PIN7_SOURCE_GET(x) (((x) & WLAN_GPIO_PIN7_SOURCE_MASK) >> WLAN_GPIO_PIN7_SOURCE_LSB)
+#define WLAN_GPIO_PIN7_SOURCE_SET(x) (((x) << WLAN_GPIO_PIN7_SOURCE_LSB) & WLAN_GPIO_PIN7_SOURCE_MASK)
+
+#define WLAN_GPIO_PIN8_ADDRESS 0x00000048
+#define WLAN_GPIO_PIN8_OFFSET 0x00000048
+#define WLAN_GPIO_PIN8_CONFIG_MSB 13
+#define WLAN_GPIO_PIN8_CONFIG_LSB 11
+#define WLAN_GPIO_PIN8_CONFIG_MASK 0x00003800
+#define WLAN_GPIO_PIN8_CONFIG_GET(x) (((x) & WLAN_GPIO_PIN8_CONFIG_MASK) >> WLAN_GPIO_PIN8_CONFIG_LSB)
+#define WLAN_GPIO_PIN8_CONFIG_SET(x) (((x) << WLAN_GPIO_PIN8_CONFIG_LSB) & WLAN_GPIO_PIN8_CONFIG_MASK)
+#define WLAN_GPIO_PIN8_WAKEUP_ENABLE_MSB 10
+#define WLAN_GPIO_PIN8_WAKEUP_ENABLE_LSB 10
+#define WLAN_GPIO_PIN8_WAKEUP_ENABLE_MASK 0x00000400
+#define WLAN_GPIO_PIN8_WAKEUP_ENABLE_GET(x) (((x) & WLAN_GPIO_PIN8_WAKEUP_ENABLE_MASK) >> WLAN_GPIO_PIN8_WAKEUP_ENABLE_LSB)
+#define WLAN_GPIO_PIN8_WAKEUP_ENABLE_SET(x) (((x) << WLAN_GPIO_PIN8_WAKEUP_ENABLE_LSB) & WLAN_GPIO_PIN8_WAKEUP_ENABLE_MASK)
+#define WLAN_GPIO_PIN8_INT_TYPE_MSB 9
+#define WLAN_GPIO_PIN8_INT_TYPE_LSB 7
+#define WLAN_GPIO_PIN8_INT_TYPE_MASK 0x00000380
+#define WLAN_GPIO_PIN8_INT_TYPE_GET(x) (((x) & WLAN_GPIO_PIN8_INT_TYPE_MASK) >> WLAN_GPIO_PIN8_INT_TYPE_LSB)
+#define WLAN_GPIO_PIN8_INT_TYPE_SET(x) (((x) << WLAN_GPIO_PIN8_INT_TYPE_LSB) & WLAN_GPIO_PIN8_INT_TYPE_MASK)
+#define WLAN_GPIO_PIN8_PAD_PULL_MSB 6
+#define WLAN_GPIO_PIN8_PAD_PULL_LSB 5
+#define WLAN_GPIO_PIN8_PAD_PULL_MASK 0x00000060
+#define WLAN_GPIO_PIN8_PAD_PULL_GET(x) (((x) & WLAN_GPIO_PIN8_PAD_PULL_MASK) >> WLAN_GPIO_PIN8_PAD_PULL_LSB)
+#define WLAN_GPIO_PIN8_PAD_PULL_SET(x) (((x) << WLAN_GPIO_PIN8_PAD_PULL_LSB) & WLAN_GPIO_PIN8_PAD_PULL_MASK)
+#define WLAN_GPIO_PIN8_PAD_STRENGTH_MSB 4
+#define WLAN_GPIO_PIN8_PAD_STRENGTH_LSB 3
+#define WLAN_GPIO_PIN8_PAD_STRENGTH_MASK 0x00000018
+#define WLAN_GPIO_PIN8_PAD_STRENGTH_GET(x) (((x) & WLAN_GPIO_PIN8_PAD_STRENGTH_MASK) >> WLAN_GPIO_PIN8_PAD_STRENGTH_LSB)
+#define WLAN_GPIO_PIN8_PAD_STRENGTH_SET(x) (((x) << WLAN_GPIO_PIN8_PAD_STRENGTH_LSB) & WLAN_GPIO_PIN8_PAD_STRENGTH_MASK)
+#define WLAN_GPIO_PIN8_PAD_DRIVER_MSB 2
+#define WLAN_GPIO_PIN8_PAD_DRIVER_LSB 2
+#define WLAN_GPIO_PIN8_PAD_DRIVER_MASK 0x00000004
+#define WLAN_GPIO_PIN8_PAD_DRIVER_GET(x) (((x) & WLAN_GPIO_PIN8_PAD_DRIVER_MASK) >> WLAN_GPIO_PIN8_PAD_DRIVER_LSB)
+#define WLAN_GPIO_PIN8_PAD_DRIVER_SET(x) (((x) << WLAN_GPIO_PIN8_PAD_DRIVER_LSB) & WLAN_GPIO_PIN8_PAD_DRIVER_MASK)
+#define WLAN_GPIO_PIN8_SOURCE_MSB 0
+#define WLAN_GPIO_PIN8_SOURCE_LSB 0
+#define WLAN_GPIO_PIN8_SOURCE_MASK 0x00000001
+#define WLAN_GPIO_PIN8_SOURCE_GET(x) (((x) & WLAN_GPIO_PIN8_SOURCE_MASK) >> WLAN_GPIO_PIN8_SOURCE_LSB)
+#define WLAN_GPIO_PIN8_SOURCE_SET(x) (((x) << WLAN_GPIO_PIN8_SOURCE_LSB) & WLAN_GPIO_PIN8_SOURCE_MASK)
+
+#define WLAN_GPIO_PIN9_ADDRESS 0x0000004c
+#define WLAN_GPIO_PIN9_OFFSET 0x0000004c
+#define WLAN_GPIO_PIN9_CONFIG_MSB 13
+#define WLAN_GPIO_PIN9_CONFIG_LSB 11
+#define WLAN_GPIO_PIN9_CONFIG_MASK 0x00003800
+#define WLAN_GPIO_PIN9_CONFIG_GET(x) (((x) & WLAN_GPIO_PIN9_CONFIG_MASK) >> WLAN_GPIO_PIN9_CONFIG_LSB)
+#define WLAN_GPIO_PIN9_CONFIG_SET(x) (((x) << WLAN_GPIO_PIN9_CONFIG_LSB) & WLAN_GPIO_PIN9_CONFIG_MASK)
+#define WLAN_GPIO_PIN9_WAKEUP_ENABLE_MSB 10
+#define WLAN_GPIO_PIN9_WAKEUP_ENABLE_LSB 10
+#define WLAN_GPIO_PIN9_WAKEUP_ENABLE_MASK 0x00000400
+#define WLAN_GPIO_PIN9_WAKEUP_ENABLE_GET(x) (((x) & WLAN_GPIO_PIN9_WAKEUP_ENABLE_MASK) >> WLAN_GPIO_PIN9_WAKEUP_ENABLE_LSB)
+#define WLAN_GPIO_PIN9_WAKEUP_ENABLE_SET(x) (((x) << WLAN_GPIO_PIN9_WAKEUP_ENABLE_LSB) & WLAN_GPIO_PIN9_WAKEUP_ENABLE_MASK)
+#define WLAN_GPIO_PIN9_INT_TYPE_MSB 9
+#define WLAN_GPIO_PIN9_INT_TYPE_LSB 7
+#define WLAN_GPIO_PIN9_INT_TYPE_MASK 0x00000380
+#define WLAN_GPIO_PIN9_INT_TYPE_GET(x) (((x) & WLAN_GPIO_PIN9_INT_TYPE_MASK) >> WLAN_GPIO_PIN9_INT_TYPE_LSB)
+#define WLAN_GPIO_PIN9_INT_TYPE_SET(x) (((x) << WLAN_GPIO_PIN9_INT_TYPE_LSB) & WLAN_GPIO_PIN9_INT_TYPE_MASK)
+#define WLAN_GPIO_PIN9_PAD_PULL_MSB 6
+#define WLAN_GPIO_PIN9_PAD_PULL_LSB 5
+#define WLAN_GPIO_PIN9_PAD_PULL_MASK 0x00000060
+#define WLAN_GPIO_PIN9_PAD_PULL_GET(x) (((x) & WLAN_GPIO_PIN9_PAD_PULL_MASK) >> WLAN_GPIO_PIN9_PAD_PULL_LSB)
+#define WLAN_GPIO_PIN9_PAD_PULL_SET(x) (((x) << WLAN_GPIO_PIN9_PAD_PULL_LSB) & WLAN_GPIO_PIN9_PAD_PULL_MASK)
+#define WLAN_GPIO_PIN9_PAD_STRENGTH_MSB 4
+#define WLAN_GPIO_PIN9_PAD_STRENGTH_LSB 3
+#define WLAN_GPIO_PIN9_PAD_STRENGTH_MASK 0x00000018
+#define WLAN_GPIO_PIN9_PAD_STRENGTH_GET(x) (((x) & WLAN_GPIO_PIN9_PAD_STRENGTH_MASK) >> WLAN_GPIO_PIN9_PAD_STRENGTH_LSB)
+#define WLAN_GPIO_PIN9_PAD_STRENGTH_SET(x) (((x) << WLAN_GPIO_PIN9_PAD_STRENGTH_LSB) & WLAN_GPIO_PIN9_PAD_STRENGTH_MASK)
+#define WLAN_GPIO_PIN9_PAD_DRIVER_MSB 2
+#define WLAN_GPIO_PIN9_PAD_DRIVER_LSB 2
+#define WLAN_GPIO_PIN9_PAD_DRIVER_MASK 0x00000004
+#define WLAN_GPIO_PIN9_PAD_DRIVER_GET(x) (((x) & WLAN_GPIO_PIN9_PAD_DRIVER_MASK) >> WLAN_GPIO_PIN9_PAD_DRIVER_LSB)
+#define WLAN_GPIO_PIN9_PAD_DRIVER_SET(x) (((x) << WLAN_GPIO_PIN9_PAD_DRIVER_LSB) & WLAN_GPIO_PIN9_PAD_DRIVER_MASK)
+#define WLAN_GPIO_PIN9_SOURCE_MSB 0
+#define WLAN_GPIO_PIN9_SOURCE_LSB 0
+#define WLAN_GPIO_PIN9_SOURCE_MASK 0x00000001
+#define WLAN_GPIO_PIN9_SOURCE_GET(x) (((x) & WLAN_GPIO_PIN9_SOURCE_MASK) >> WLAN_GPIO_PIN9_SOURCE_LSB)
+#define WLAN_GPIO_PIN9_SOURCE_SET(x) (((x) << WLAN_GPIO_PIN9_SOURCE_LSB) & WLAN_GPIO_PIN9_SOURCE_MASK)
+
+#define WLAN_GPIO_PIN10_ADDRESS 0x00000050
+#define WLAN_GPIO_PIN10_OFFSET 0x00000050
+#define WLAN_GPIO_PIN10_CONFIG_MSB 13
+#define WLAN_GPIO_PIN10_CONFIG_LSB 11
+#define WLAN_GPIO_PIN10_CONFIG_MASK 0x00003800
+#define WLAN_GPIO_PIN10_CONFIG_GET(x) (((x) & WLAN_GPIO_PIN10_CONFIG_MASK) >> WLAN_GPIO_PIN10_CONFIG_LSB)
+#define WLAN_GPIO_PIN10_CONFIG_SET(x) (((x) << WLAN_GPIO_PIN10_CONFIG_LSB) & WLAN_GPIO_PIN10_CONFIG_MASK)
+#define WLAN_GPIO_PIN10_WAKEUP_ENABLE_MSB 10
+#define WLAN_GPIO_PIN10_WAKEUP_ENABLE_LSB 10
+#define WLAN_GPIO_PIN10_WAKEUP_ENABLE_MASK 0x00000400
+#define WLAN_GPIO_PIN10_WAKEUP_ENABLE_GET(x) (((x) & WLAN_GPIO_PIN10_WAKEUP_ENABLE_MASK) >> WLAN_GPIO_PIN10_WAKEUP_ENABLE_LSB)
+#define WLAN_GPIO_PIN10_WAKEUP_ENABLE_SET(x) (((x) << WLAN_GPIO_PIN10_WAKEUP_ENABLE_LSB) & WLAN_GPIO_PIN10_WAKEUP_ENABLE_MASK)
+#define WLAN_GPIO_PIN10_INT_TYPE_MSB 9
+#define WLAN_GPIO_PIN10_INT_TYPE_LSB 7
+#define WLAN_GPIO_PIN10_INT_TYPE_MASK 0x00000380
+#define WLAN_GPIO_PIN10_INT_TYPE_GET(x) (((x) & WLAN_GPIO_PIN10_INT_TYPE_MASK) >> WLAN_GPIO_PIN10_INT_TYPE_LSB)
+#define WLAN_GPIO_PIN10_INT_TYPE_SET(x) (((x) << WLAN_GPIO_PIN10_INT_TYPE_LSB) & WLAN_GPIO_PIN10_INT_TYPE_MASK)
+#define WLAN_GPIO_PIN10_PAD_PULL_MSB 6
+#define WLAN_GPIO_PIN10_PAD_PULL_LSB 5
+#define WLAN_GPIO_PIN10_PAD_PULL_MASK 0x00000060
+#define WLAN_GPIO_PIN10_PAD_PULL_GET(x) (((x) & WLAN_GPIO_PIN10_PAD_PULL_MASK) >> WLAN_GPIO_PIN10_PAD_PULL_LSB)
+#define WLAN_GPIO_PIN10_PAD_PULL_SET(x) (((x) << WLAN_GPIO_PIN10_PAD_PULL_LSB) & WLAN_GPIO_PIN10_PAD_PULL_MASK)
+#define WLAN_GPIO_PIN10_PAD_STRENGTH_MSB 4
+#define WLAN_GPIO_PIN10_PAD_STRENGTH_LSB 3
+#define WLAN_GPIO_PIN10_PAD_STRENGTH_MASK 0x00000018
+#define WLAN_GPIO_PIN10_PAD_STRENGTH_GET(x) (((x) & WLAN_GPIO_PIN10_PAD_STRENGTH_MASK) >> WLAN_GPIO_PIN10_PAD_STRENGTH_LSB)
+#define WLAN_GPIO_PIN10_PAD_STRENGTH_SET(x) (((x) << WLAN_GPIO_PIN10_PAD_STRENGTH_LSB) & WLAN_GPIO_PIN10_PAD_STRENGTH_MASK)
+#define WLAN_GPIO_PIN10_PAD_DRIVER_MSB 2
+#define WLAN_GPIO_PIN10_PAD_DRIVER_LSB 2
+#define WLAN_GPIO_PIN10_PAD_DRIVER_MASK 0x00000004
+#define WLAN_GPIO_PIN10_PAD_DRIVER_GET(x) (((x) & WLAN_GPIO_PIN10_PAD_DRIVER_MASK) >> WLAN_GPIO_PIN10_PAD_DRIVER_LSB)
+#define WLAN_GPIO_PIN10_PAD_DRIVER_SET(x) (((x) << WLAN_GPIO_PIN10_PAD_DRIVER_LSB) & WLAN_GPIO_PIN10_PAD_DRIVER_MASK)
+#define WLAN_GPIO_PIN10_SOURCE_MSB 0
+#define WLAN_GPIO_PIN10_SOURCE_LSB 0
+#define WLAN_GPIO_PIN10_SOURCE_MASK 0x00000001
+#define WLAN_GPIO_PIN10_SOURCE_GET(x) (((x) & WLAN_GPIO_PIN10_SOURCE_MASK) >> WLAN_GPIO_PIN10_SOURCE_LSB)
+#define WLAN_GPIO_PIN10_SOURCE_SET(x) (((x) << WLAN_GPIO_PIN10_SOURCE_LSB) & WLAN_GPIO_PIN10_SOURCE_MASK)
+
+#define WLAN_GPIO_PIN11_ADDRESS 0x00000054
+#define WLAN_GPIO_PIN11_OFFSET 0x00000054
+#define WLAN_GPIO_PIN11_CONFIG_MSB 13
+#define WLAN_GPIO_PIN11_CONFIG_LSB 11
+#define WLAN_GPIO_PIN11_CONFIG_MASK 0x00003800
+#define WLAN_GPIO_PIN11_CONFIG_GET(x) (((x) & WLAN_GPIO_PIN11_CONFIG_MASK) >> WLAN_GPIO_PIN11_CONFIG_LSB)
+#define WLAN_GPIO_PIN11_CONFIG_SET(x) (((x) << WLAN_GPIO_PIN11_CONFIG_LSB) & WLAN_GPIO_PIN11_CONFIG_MASK)
+#define WLAN_GPIO_PIN11_WAKEUP_ENABLE_MSB 10
+#define WLAN_GPIO_PIN11_WAKEUP_ENABLE_LSB 10
+#define WLAN_GPIO_PIN11_WAKEUP_ENABLE_MASK 0x00000400
+#define WLAN_GPIO_PIN11_WAKEUP_ENABLE_GET(x) (((x) & WLAN_GPIO_PIN11_WAKEUP_ENABLE_MASK) >> WLAN_GPIO_PIN11_WAKEUP_ENABLE_LSB)
+#define WLAN_GPIO_PIN11_WAKEUP_ENABLE_SET(x) (((x) << WLAN_GPIO_PIN11_WAKEUP_ENABLE_LSB) & WLAN_GPIO_PIN11_WAKEUP_ENABLE_MASK)
+#define WLAN_GPIO_PIN11_INT_TYPE_MSB 9
+#define WLAN_GPIO_PIN11_INT_TYPE_LSB 7
+#define WLAN_GPIO_PIN11_INT_TYPE_MASK 0x00000380
+#define WLAN_GPIO_PIN11_INT_TYPE_GET(x) (((x) & WLAN_GPIO_PIN11_INT_TYPE_MASK) >> WLAN_GPIO_PIN11_INT_TYPE_LSB)
+#define WLAN_GPIO_PIN11_INT_TYPE_SET(x) (((x) << WLAN_GPIO_PIN11_INT_TYPE_LSB) & WLAN_GPIO_PIN11_INT_TYPE_MASK)
+#define WLAN_GPIO_PIN11_PAD_PULL_MSB 6
+#define WLAN_GPIO_PIN11_PAD_PULL_LSB 5
+#define WLAN_GPIO_PIN11_PAD_PULL_MASK 0x00000060
+#define WLAN_GPIO_PIN11_PAD_PULL_GET(x) (((x) & WLAN_GPIO_PIN11_PAD_PULL_MASK) >> WLAN_GPIO_PIN11_PAD_PULL_LSB)
+#define WLAN_GPIO_PIN11_PAD_PULL_SET(x) (((x) << WLAN_GPIO_PIN11_PAD_PULL_LSB) & WLAN_GPIO_PIN11_PAD_PULL_MASK)
+#define WLAN_GPIO_PIN11_PAD_STRENGTH_MSB 4
+#define WLAN_GPIO_PIN11_PAD_STRENGTH_LSB 3
+#define WLAN_GPIO_PIN11_PAD_STRENGTH_MASK 0x00000018
+#define WLAN_GPIO_PIN11_PAD_STRENGTH_GET(x) (((x) & WLAN_GPIO_PIN11_PAD_STRENGTH_MASK) >> WLAN_GPIO_PIN11_PAD_STRENGTH_LSB)
+#define WLAN_GPIO_PIN11_PAD_STRENGTH_SET(x) (((x) << WLAN_GPIO_PIN11_PAD_STRENGTH_LSB) & WLAN_GPIO_PIN11_PAD_STRENGTH_MASK)
+#define WLAN_GPIO_PIN11_PAD_DRIVER_MSB 2
+#define WLAN_GPIO_PIN11_PAD_DRIVER_LSB 2
+#define WLAN_GPIO_PIN11_PAD_DRIVER_MASK 0x00000004
+#define WLAN_GPIO_PIN11_PAD_DRIVER_GET(x) (((x) & WLAN_GPIO_PIN11_PAD_DRIVER_MASK) >> WLAN_GPIO_PIN11_PAD_DRIVER_LSB)
+#define WLAN_GPIO_PIN11_PAD_DRIVER_SET(x) (((x) << WLAN_GPIO_PIN11_PAD_DRIVER_LSB) & WLAN_GPIO_PIN11_PAD_DRIVER_MASK)
+#define WLAN_GPIO_PIN11_SOURCE_MSB 0
+#define WLAN_GPIO_PIN11_SOURCE_LSB 0
+#define WLAN_GPIO_PIN11_SOURCE_MASK 0x00000001
+#define WLAN_GPIO_PIN11_SOURCE_GET(x) (((x) & WLAN_GPIO_PIN11_SOURCE_MASK) >> WLAN_GPIO_PIN11_SOURCE_LSB)
+#define WLAN_GPIO_PIN11_SOURCE_SET(x) (((x) << WLAN_GPIO_PIN11_SOURCE_LSB) & WLAN_GPIO_PIN11_SOURCE_MASK)
+
+#define WLAN_GPIO_PIN12_ADDRESS 0x00000058
+#define WLAN_GPIO_PIN12_OFFSET 0x00000058
+#define WLAN_GPIO_PIN12_CONFIG_MSB 13
+#define WLAN_GPIO_PIN12_CONFIG_LSB 11
+#define WLAN_GPIO_PIN12_CONFIG_MASK 0x00003800
+#define WLAN_GPIO_PIN12_CONFIG_GET(x) (((x) & WLAN_GPIO_PIN12_CONFIG_MASK) >> WLAN_GPIO_PIN12_CONFIG_LSB)
+#define WLAN_GPIO_PIN12_CONFIG_SET(x) (((x) << WLAN_GPIO_PIN12_CONFIG_LSB) & WLAN_GPIO_PIN12_CONFIG_MASK)
+#define WLAN_GPIO_PIN12_WAKEUP_ENABLE_MSB 10
+#define WLAN_GPIO_PIN12_WAKEUP_ENABLE_LSB 10
+#define WLAN_GPIO_PIN12_WAKEUP_ENABLE_MASK 0x00000400
+#define WLAN_GPIO_PIN12_WAKEUP_ENABLE_GET(x) (((x) & WLAN_GPIO_PIN12_WAKEUP_ENABLE_MASK) >> WLAN_GPIO_PIN12_WAKEUP_ENABLE_LSB)
+#define WLAN_GPIO_PIN12_WAKEUP_ENABLE_SET(x) (((x) << WLAN_GPIO_PIN12_WAKEUP_ENABLE_LSB) & WLAN_GPIO_PIN12_WAKEUP_ENABLE_MASK)
+#define WLAN_GPIO_PIN12_INT_TYPE_MSB 9
+#define WLAN_GPIO_PIN12_INT_TYPE_LSB 7
+#define WLAN_GPIO_PIN12_INT_TYPE_MASK 0x00000380
+#define WLAN_GPIO_PIN12_INT_TYPE_GET(x) (((x) & WLAN_GPIO_PIN12_INT_TYPE_MASK) >> WLAN_GPIO_PIN12_INT_TYPE_LSB)
+#define WLAN_GPIO_PIN12_INT_TYPE_SET(x) (((x) << WLAN_GPIO_PIN12_INT_TYPE_LSB) & WLAN_GPIO_PIN12_INT_TYPE_MASK)
+#define WLAN_GPIO_PIN12_PAD_PULL_MSB 6
+#define WLAN_GPIO_PIN12_PAD_PULL_LSB 5
+#define WLAN_GPIO_PIN12_PAD_PULL_MASK 0x00000060
+#define WLAN_GPIO_PIN12_PAD_PULL_GET(x) (((x) & WLAN_GPIO_PIN12_PAD_PULL_MASK) >> WLAN_GPIO_PIN12_PAD_PULL_LSB)
+#define WLAN_GPIO_PIN12_PAD_PULL_SET(x) (((x) << WLAN_GPIO_PIN12_PAD_PULL_LSB) & WLAN_GPIO_PIN12_PAD_PULL_MASK)
+#define WLAN_GPIO_PIN12_PAD_STRENGTH_MSB 4
+#define WLAN_GPIO_PIN12_PAD_STRENGTH_LSB 3
+#define WLAN_GPIO_PIN12_PAD_STRENGTH_MASK 0x00000018
+#define WLAN_GPIO_PIN12_PAD_STRENGTH_GET(x) (((x) & WLAN_GPIO_PIN12_PAD_STRENGTH_MASK) >> WLAN_GPIO_PIN12_PAD_STRENGTH_LSB)
+#define WLAN_GPIO_PIN12_PAD_STRENGTH_SET(x) (((x) << WLAN_GPIO_PIN12_PAD_STRENGTH_LSB) & WLAN_GPIO_PIN12_PAD_STRENGTH_MASK)
+#define WLAN_GPIO_PIN12_PAD_DRIVER_MSB 2
+#define WLAN_GPIO_PIN12_PAD_DRIVER_LSB 2
+#define WLAN_GPIO_PIN12_PAD_DRIVER_MASK 0x00000004
+#define WLAN_GPIO_PIN12_PAD_DRIVER_GET(x) (((x) & WLAN_GPIO_PIN12_PAD_DRIVER_MASK) >> WLAN_GPIO_PIN12_PAD_DRIVER_LSB)
+#define WLAN_GPIO_PIN12_PAD_DRIVER_SET(x) (((x) << WLAN_GPIO_PIN12_PAD_DRIVER_LSB) & WLAN_GPIO_PIN12_PAD_DRIVER_MASK)
+#define WLAN_GPIO_PIN12_SOURCE_MSB 0
+#define WLAN_GPIO_PIN12_SOURCE_LSB 0
+#define WLAN_GPIO_PIN12_SOURCE_MASK 0x00000001
+#define WLAN_GPIO_PIN12_SOURCE_GET(x) (((x) & WLAN_GPIO_PIN12_SOURCE_MASK) >> WLAN_GPIO_PIN12_SOURCE_LSB)
+#define WLAN_GPIO_PIN12_SOURCE_SET(x) (((x) << WLAN_GPIO_PIN12_SOURCE_LSB) & WLAN_GPIO_PIN12_SOURCE_MASK)
+
+#define WLAN_GPIO_PIN13_ADDRESS 0x0000005c
+#define WLAN_GPIO_PIN13_OFFSET 0x0000005c
+#define WLAN_GPIO_PIN13_CONFIG_MSB 13
+#define WLAN_GPIO_PIN13_CONFIG_LSB 11
+#define WLAN_GPIO_PIN13_CONFIG_MASK 0x00003800
+#define WLAN_GPIO_PIN13_CONFIG_GET(x) (((x) & WLAN_GPIO_PIN13_CONFIG_MASK) >> WLAN_GPIO_PIN13_CONFIG_LSB)
+#define WLAN_GPIO_PIN13_CONFIG_SET(x) (((x) << WLAN_GPIO_PIN13_CONFIG_LSB) & WLAN_GPIO_PIN13_CONFIG_MASK)
+#define WLAN_GPIO_PIN13_WAKEUP_ENABLE_MSB 10
+#define WLAN_GPIO_PIN13_WAKEUP_ENABLE_LSB 10
+#define WLAN_GPIO_PIN13_WAKEUP_ENABLE_MASK 0x00000400
+#define WLAN_GPIO_PIN13_WAKEUP_ENABLE_GET(x) (((x) & WLAN_GPIO_PIN13_WAKEUP_ENABLE_MASK) >> WLAN_GPIO_PIN13_WAKEUP_ENABLE_LSB)
+#define WLAN_GPIO_PIN13_WAKEUP_ENABLE_SET(x) (((x) << WLAN_GPIO_PIN13_WAKEUP_ENABLE_LSB) & WLAN_GPIO_PIN13_WAKEUP_ENABLE_MASK)
+#define WLAN_GPIO_PIN13_INT_TYPE_MSB 9
+#define WLAN_GPIO_PIN13_INT_TYPE_LSB 7
+#define WLAN_GPIO_PIN13_INT_TYPE_MASK 0x00000380
+#define WLAN_GPIO_PIN13_INT_TYPE_GET(x) (((x) & WLAN_GPIO_PIN13_INT_TYPE_MASK) >> WLAN_GPIO_PIN13_INT_TYPE_LSB)
+#define WLAN_GPIO_PIN13_INT_TYPE_SET(x) (((x) << WLAN_GPIO_PIN13_INT_TYPE_LSB) & WLAN_GPIO_PIN13_INT_TYPE_MASK)
+#define WLAN_GPIO_PIN13_PAD_PULL_MSB 6
+#define WLAN_GPIO_PIN13_PAD_PULL_LSB 5
+#define WLAN_GPIO_PIN13_PAD_PULL_MASK 0x00000060
+#define WLAN_GPIO_PIN13_PAD_PULL_GET(x) (((x) & WLAN_GPIO_PIN13_PAD_PULL_MASK) >> WLAN_GPIO_PIN13_PAD_PULL_LSB)
+#define WLAN_GPIO_PIN13_PAD_PULL_SET(x) (((x) << WLAN_GPIO_PIN13_PAD_PULL_LSB) & WLAN_GPIO_PIN13_PAD_PULL_MASK)
+#define WLAN_GPIO_PIN13_PAD_STRENGTH_MSB 4
+#define WLAN_GPIO_PIN13_PAD_STRENGTH_LSB 3
+#define WLAN_GPIO_PIN13_PAD_STRENGTH_MASK 0x00000018
+#define WLAN_GPIO_PIN13_PAD_STRENGTH_GET(x) (((x) & WLAN_GPIO_PIN13_PAD_STRENGTH_MASK) >> WLAN_GPIO_PIN13_PAD_STRENGTH_LSB)
+#define WLAN_GPIO_PIN13_PAD_STRENGTH_SET(x) (((x) << WLAN_GPIO_PIN13_PAD_STRENGTH_LSB) & WLAN_GPIO_PIN13_PAD_STRENGTH_MASK)
+#define WLAN_GPIO_PIN13_PAD_DRIVER_MSB 2
+#define WLAN_GPIO_PIN13_PAD_DRIVER_LSB 2
+#define WLAN_GPIO_PIN13_PAD_DRIVER_MASK 0x00000004
+#define WLAN_GPIO_PIN13_PAD_DRIVER_GET(x) (((x) & WLAN_GPIO_PIN13_PAD_DRIVER_MASK) >> WLAN_GPIO_PIN13_PAD_DRIVER_LSB)
+#define WLAN_GPIO_PIN13_PAD_DRIVER_SET(x) (((x) << WLAN_GPIO_PIN13_PAD_DRIVER_LSB) & WLAN_GPIO_PIN13_PAD_DRIVER_MASK)
+#define WLAN_GPIO_PIN13_SOURCE_MSB 0
+#define WLAN_GPIO_PIN13_SOURCE_LSB 0
+#define WLAN_GPIO_PIN13_SOURCE_MASK 0x00000001
+#define WLAN_GPIO_PIN13_SOURCE_GET(x) (((x) & WLAN_GPIO_PIN13_SOURCE_MASK) >> WLAN_GPIO_PIN13_SOURCE_LSB)
+#define WLAN_GPIO_PIN13_SOURCE_SET(x) (((x) << WLAN_GPIO_PIN13_SOURCE_LSB) & WLAN_GPIO_PIN13_SOURCE_MASK)
+
+#define WLAN_GPIO_PIN14_ADDRESS 0x00000060
+#define WLAN_GPIO_PIN14_OFFSET 0x00000060
+#define WLAN_GPIO_PIN14_CONFIG_MSB 13
+#define WLAN_GPIO_PIN14_CONFIG_LSB 11
+#define WLAN_GPIO_PIN14_CONFIG_MASK 0x00003800
+#define WLAN_GPIO_PIN14_CONFIG_GET(x) (((x) & WLAN_GPIO_PIN14_CONFIG_MASK) >> WLAN_GPIO_PIN14_CONFIG_LSB)
+#define WLAN_GPIO_PIN14_CONFIG_SET(x) (((x) << WLAN_GPIO_PIN14_CONFIG_LSB) & WLAN_GPIO_PIN14_CONFIG_MASK)
+#define WLAN_GPIO_PIN14_WAKEUP_ENABLE_MSB 10
+#define WLAN_GPIO_PIN14_WAKEUP_ENABLE_LSB 10
+#define WLAN_GPIO_PIN14_WAKEUP_ENABLE_MASK 0x00000400
+#define WLAN_GPIO_PIN14_WAKEUP_ENABLE_GET(x) (((x) & WLAN_GPIO_PIN14_WAKEUP_ENABLE_MASK) >> WLAN_GPIO_PIN14_WAKEUP_ENABLE_LSB)
+#define WLAN_GPIO_PIN14_WAKEUP_ENABLE_SET(x) (((x) << WLAN_GPIO_PIN14_WAKEUP_ENABLE_LSB) & WLAN_GPIO_PIN14_WAKEUP_ENABLE_MASK)
+#define WLAN_GPIO_PIN14_INT_TYPE_MSB 9
+#define WLAN_GPIO_PIN14_INT_TYPE_LSB 7
+#define WLAN_GPIO_PIN14_INT_TYPE_MASK 0x00000380
+#define WLAN_GPIO_PIN14_INT_TYPE_GET(x) (((x) & WLAN_GPIO_PIN14_INT_TYPE_MASK) >> WLAN_GPIO_PIN14_INT_TYPE_LSB)
+#define WLAN_GPIO_PIN14_INT_TYPE_SET(x) (((x) << WLAN_GPIO_PIN14_INT_TYPE_LSB) & WLAN_GPIO_PIN14_INT_TYPE_MASK)
+#define WLAN_GPIO_PIN14_PAD_PULL_MSB 6
+#define WLAN_GPIO_PIN14_PAD_PULL_LSB 5
+#define WLAN_GPIO_PIN14_PAD_PULL_MASK 0x00000060
+#define WLAN_GPIO_PIN14_PAD_PULL_GET(x) (((x) & WLAN_GPIO_PIN14_PAD_PULL_MASK) >> WLAN_GPIO_PIN14_PAD_PULL_LSB)
+#define WLAN_GPIO_PIN14_PAD_PULL_SET(x) (((x) << WLAN_GPIO_PIN14_PAD_PULL_LSB) & WLAN_GPIO_PIN14_PAD_PULL_MASK)
+#define WLAN_GPIO_PIN14_PAD_STRENGTH_MSB 4
+#define WLAN_GPIO_PIN14_PAD_STRENGTH_LSB 3
+#define WLAN_GPIO_PIN14_PAD_STRENGTH_MASK 0x00000018
+#define WLAN_GPIO_PIN14_PAD_STRENGTH_GET(x) (((x) & WLAN_GPIO_PIN14_PAD_STRENGTH_MASK) >> WLAN_GPIO_PIN14_PAD_STRENGTH_LSB)
+#define WLAN_GPIO_PIN14_PAD_STRENGTH_SET(x) (((x) << WLAN_GPIO_PIN14_PAD_STRENGTH_LSB) & WLAN_GPIO_PIN14_PAD_STRENGTH_MASK)
+#define WLAN_GPIO_PIN14_PAD_DRIVER_MSB 2
+#define WLAN_GPIO_PIN14_PAD_DRIVER_LSB 2
+#define WLAN_GPIO_PIN14_PAD_DRIVER_MASK 0x00000004
+#define WLAN_GPIO_PIN14_PAD_DRIVER_GET(x) (((x) & WLAN_GPIO_PIN14_PAD_DRIVER_MASK) >> WLAN_GPIO_PIN14_PAD_DRIVER_LSB)
+#define WLAN_GPIO_PIN14_PAD_DRIVER_SET(x) (((x) << WLAN_GPIO_PIN14_PAD_DRIVER_LSB) & WLAN_GPIO_PIN14_PAD_DRIVER_MASK)
+#define WLAN_GPIO_PIN14_SOURCE_MSB 0
+#define WLAN_GPIO_PIN14_SOURCE_LSB 0
+#define WLAN_GPIO_PIN14_SOURCE_MASK 0x00000001
+#define WLAN_GPIO_PIN14_SOURCE_GET(x) (((x) & WLAN_GPIO_PIN14_SOURCE_MASK) >> WLAN_GPIO_PIN14_SOURCE_LSB)
+#define WLAN_GPIO_PIN14_SOURCE_SET(x) (((x) << WLAN_GPIO_PIN14_SOURCE_LSB) & WLAN_GPIO_PIN14_SOURCE_MASK)
+
+#define WLAN_GPIO_PIN15_ADDRESS 0x00000064
+#define WLAN_GPIO_PIN15_OFFSET 0x00000064
+#define WLAN_GPIO_PIN15_CONFIG_MSB 13
+#define WLAN_GPIO_PIN15_CONFIG_LSB 11
+#define WLAN_GPIO_PIN15_CONFIG_MASK 0x00003800
+#define WLAN_GPIO_PIN15_CONFIG_GET(x) (((x) & WLAN_GPIO_PIN15_CONFIG_MASK) >> WLAN_GPIO_PIN15_CONFIG_LSB)
+#define WLAN_GPIO_PIN15_CONFIG_SET(x) (((x) << WLAN_GPIO_PIN15_CONFIG_LSB) & WLAN_GPIO_PIN15_CONFIG_MASK)
+#define WLAN_GPIO_PIN15_WAKEUP_ENABLE_MSB 10
+#define WLAN_GPIO_PIN15_WAKEUP_ENABLE_LSB 10
+#define WLAN_GPIO_PIN15_WAKEUP_ENABLE_MASK 0x00000400
+#define WLAN_GPIO_PIN15_WAKEUP_ENABLE_GET(x) (((x) & WLAN_GPIO_PIN15_WAKEUP_ENABLE_MASK) >> WLAN_GPIO_PIN15_WAKEUP_ENABLE_LSB)
+#define WLAN_GPIO_PIN15_WAKEUP_ENABLE_SET(x) (((x) << WLAN_GPIO_PIN15_WAKEUP_ENABLE_LSB) & WLAN_GPIO_PIN15_WAKEUP_ENABLE_MASK)
+#define WLAN_GPIO_PIN15_INT_TYPE_MSB 9
+#define WLAN_GPIO_PIN15_INT_TYPE_LSB 7
+#define WLAN_GPIO_PIN15_INT_TYPE_MASK 0x00000380
+#define WLAN_GPIO_PIN15_INT_TYPE_GET(x) (((x) & WLAN_GPIO_PIN15_INT_TYPE_MASK) >> WLAN_GPIO_PIN15_INT_TYPE_LSB)
+#define WLAN_GPIO_PIN15_INT_TYPE_SET(x) (((x) << WLAN_GPIO_PIN15_INT_TYPE_LSB) & WLAN_GPIO_PIN15_INT_TYPE_MASK)
+#define WLAN_GPIO_PIN15_PAD_PULL_MSB 6
+#define WLAN_GPIO_PIN15_PAD_PULL_LSB 5
+#define WLAN_GPIO_PIN15_PAD_PULL_MASK 0x00000060
+#define WLAN_GPIO_PIN15_PAD_PULL_GET(x) (((x) & WLAN_GPIO_PIN15_PAD_PULL_MASK) >> WLAN_GPIO_PIN15_PAD_PULL_LSB)
+#define WLAN_GPIO_PIN15_PAD_PULL_SET(x) (((x) << WLAN_GPIO_PIN15_PAD_PULL_LSB) & WLAN_GPIO_PIN15_PAD_PULL_MASK)
+#define WLAN_GPIO_PIN15_PAD_STRENGTH_MSB 4
+#define WLAN_GPIO_PIN15_PAD_STRENGTH_LSB 3
+#define WLAN_GPIO_PIN15_PAD_STRENGTH_MASK 0x00000018
+#define WLAN_GPIO_PIN15_PAD_STRENGTH_GET(x) (((x) & WLAN_GPIO_PIN15_PAD_STRENGTH_MASK) >> WLAN_GPIO_PIN15_PAD_STRENGTH_LSB)
+#define WLAN_GPIO_PIN15_PAD_STRENGTH_SET(x) (((x) << WLAN_GPIO_PIN15_PAD_STRENGTH_LSB) & WLAN_GPIO_PIN15_PAD_STRENGTH_MASK)
+#define WLAN_GPIO_PIN15_PAD_DRIVER_MSB 2
+#define WLAN_GPIO_PIN15_PAD_DRIVER_LSB 2
+#define WLAN_GPIO_PIN15_PAD_DRIVER_MASK 0x00000004
+#define WLAN_GPIO_PIN15_PAD_DRIVER_GET(x) (((x) & WLAN_GPIO_PIN15_PAD_DRIVER_MASK) >> WLAN_GPIO_PIN15_PAD_DRIVER_LSB)
+#define WLAN_GPIO_PIN15_PAD_DRIVER_SET(x) (((x) << WLAN_GPIO_PIN15_PAD_DRIVER_LSB) & WLAN_GPIO_PIN15_PAD_DRIVER_MASK)
+#define WLAN_GPIO_PIN15_SOURCE_MSB 0
+#define WLAN_GPIO_PIN15_SOURCE_LSB 0
+#define WLAN_GPIO_PIN15_SOURCE_MASK 0x00000001
+#define WLAN_GPIO_PIN15_SOURCE_GET(x) (((x) & WLAN_GPIO_PIN15_SOURCE_MASK) >> WLAN_GPIO_PIN15_SOURCE_LSB)
+#define WLAN_GPIO_PIN15_SOURCE_SET(x) (((x) << WLAN_GPIO_PIN15_SOURCE_LSB) & WLAN_GPIO_PIN15_SOURCE_MASK)
+
+#define WLAN_GPIO_PIN16_ADDRESS 0x00000068
+#define WLAN_GPIO_PIN16_OFFSET 0x00000068
+#define WLAN_GPIO_PIN16_CONFIG_MSB 13
+#define WLAN_GPIO_PIN16_CONFIG_LSB 11
+#define WLAN_GPIO_PIN16_CONFIG_MASK 0x00003800
+#define WLAN_GPIO_PIN16_CONFIG_GET(x) (((x) & WLAN_GPIO_PIN16_CONFIG_MASK) >> WLAN_GPIO_PIN16_CONFIG_LSB)
+#define WLAN_GPIO_PIN16_CONFIG_SET(x) (((x) << WLAN_GPIO_PIN16_CONFIG_LSB) & WLAN_GPIO_PIN16_CONFIG_MASK)
+#define WLAN_GPIO_PIN16_WAKEUP_ENABLE_MSB 10
+#define WLAN_GPIO_PIN16_WAKEUP_ENABLE_LSB 10
+#define WLAN_GPIO_PIN16_WAKEUP_ENABLE_MASK 0x00000400
+#define WLAN_GPIO_PIN16_WAKEUP_ENABLE_GET(x) (((x) & WLAN_GPIO_PIN16_WAKEUP_ENABLE_MASK) >> WLAN_GPIO_PIN16_WAKEUP_ENABLE_LSB)
+#define WLAN_GPIO_PIN16_WAKEUP_ENABLE_SET(x) (((x) << WLAN_GPIO_PIN16_WAKEUP_ENABLE_LSB) & WLAN_GPIO_PIN16_WAKEUP_ENABLE_MASK)
+#define WLAN_GPIO_PIN16_INT_TYPE_MSB 9
+#define WLAN_GPIO_PIN16_INT_TYPE_LSB 7
+#define WLAN_GPIO_PIN16_INT_TYPE_MASK 0x00000380
+#define WLAN_GPIO_PIN16_INT_TYPE_GET(x) (((x) & WLAN_GPIO_PIN16_INT_TYPE_MASK) >> WLAN_GPIO_PIN16_INT_TYPE_LSB)
+#define WLAN_GPIO_PIN16_INT_TYPE_SET(x) (((x) << WLAN_GPIO_PIN16_INT_TYPE_LSB) & WLAN_GPIO_PIN16_INT_TYPE_MASK)
+#define WLAN_GPIO_PIN16_PAD_PULL_MSB 6
+#define WLAN_GPIO_PIN16_PAD_PULL_LSB 5
+#define WLAN_GPIO_PIN16_PAD_PULL_MASK 0x00000060
+#define WLAN_GPIO_PIN16_PAD_PULL_GET(x) (((x) & WLAN_GPIO_PIN16_PAD_PULL_MASK) >> WLAN_GPIO_PIN16_PAD_PULL_LSB)
+#define WLAN_GPIO_PIN16_PAD_PULL_SET(x) (((x) << WLAN_GPIO_PIN16_PAD_PULL_LSB) & WLAN_GPIO_PIN16_PAD_PULL_MASK)
+#define WLAN_GPIO_PIN16_PAD_STRENGTH_MSB 4
+#define WLAN_GPIO_PIN16_PAD_STRENGTH_LSB 3
+#define WLAN_GPIO_PIN16_PAD_STRENGTH_MASK 0x00000018
+#define WLAN_GPIO_PIN16_PAD_STRENGTH_GET(x) (((x) & WLAN_GPIO_PIN16_PAD_STRENGTH_MASK) >> WLAN_GPIO_PIN16_PAD_STRENGTH_LSB)
+#define WLAN_GPIO_PIN16_PAD_STRENGTH_SET(x) (((x) << WLAN_GPIO_PIN16_PAD_STRENGTH_LSB) & WLAN_GPIO_PIN16_PAD_STRENGTH_MASK)
+#define WLAN_GPIO_PIN16_PAD_DRIVER_MSB 2
+#define WLAN_GPIO_PIN16_PAD_DRIVER_LSB 2
+#define WLAN_GPIO_PIN16_PAD_DRIVER_MASK 0x00000004
+#define WLAN_GPIO_PIN16_PAD_DRIVER_GET(x) (((x) & WLAN_GPIO_PIN16_PAD_DRIVER_MASK) >> WLAN_GPIO_PIN16_PAD_DRIVER_LSB)
+#define WLAN_GPIO_PIN16_PAD_DRIVER_SET(x) (((x) << WLAN_GPIO_PIN16_PAD_DRIVER_LSB) & WLAN_GPIO_PIN16_PAD_DRIVER_MASK)
+#define WLAN_GPIO_PIN16_SOURCE_MSB 0
+#define WLAN_GPIO_PIN16_SOURCE_LSB 0
+#define WLAN_GPIO_PIN16_SOURCE_MASK 0x00000001
+#define WLAN_GPIO_PIN16_SOURCE_GET(x) (((x) & WLAN_GPIO_PIN16_SOURCE_MASK) >> WLAN_GPIO_PIN16_SOURCE_LSB)
+#define WLAN_GPIO_PIN16_SOURCE_SET(x) (((x) << WLAN_GPIO_PIN16_SOURCE_LSB) & WLAN_GPIO_PIN16_SOURCE_MASK)
+
+#define WLAN_GPIO_PIN17_ADDRESS 0x0000006c
+#define WLAN_GPIO_PIN17_OFFSET 0x0000006c
+#define WLAN_GPIO_PIN17_CONFIG_MSB 13
+#define WLAN_GPIO_PIN17_CONFIG_LSB 11
+#define WLAN_GPIO_PIN17_CONFIG_MASK 0x00003800
+#define WLAN_GPIO_PIN17_CONFIG_GET(x) (((x) & WLAN_GPIO_PIN17_CONFIG_MASK) >> WLAN_GPIO_PIN17_CONFIG_LSB)
+#define WLAN_GPIO_PIN17_CONFIG_SET(x) (((x) << WLAN_GPIO_PIN17_CONFIG_LSB) & WLAN_GPIO_PIN17_CONFIG_MASK)
+#define WLAN_GPIO_PIN17_WAKEUP_ENABLE_MSB 10
+#define WLAN_GPIO_PIN17_WAKEUP_ENABLE_LSB 10
+#define WLAN_GPIO_PIN17_WAKEUP_ENABLE_MASK 0x00000400
+#define WLAN_GPIO_PIN17_WAKEUP_ENABLE_GET(x) (((x) & WLAN_GPIO_PIN17_WAKEUP_ENABLE_MASK) >> WLAN_GPIO_PIN17_WAKEUP_ENABLE_LSB)
+#define WLAN_GPIO_PIN17_WAKEUP_ENABLE_SET(x) (((x) << WLAN_GPIO_PIN17_WAKEUP_ENABLE_LSB) & WLAN_GPIO_PIN17_WAKEUP_ENABLE_MASK)
+#define WLAN_GPIO_PIN17_INT_TYPE_MSB 9
+#define WLAN_GPIO_PIN17_INT_TYPE_LSB 7
+#define WLAN_GPIO_PIN17_INT_TYPE_MASK 0x00000380
+#define WLAN_GPIO_PIN17_INT_TYPE_GET(x) (((x) & WLAN_GPIO_PIN17_INT_TYPE_MASK) >> WLAN_GPIO_PIN17_INT_TYPE_LSB)
+#define WLAN_GPIO_PIN17_INT_TYPE_SET(x) (((x) << WLAN_GPIO_PIN17_INT_TYPE_LSB) & WLAN_GPIO_PIN17_INT_TYPE_MASK)
+#define WLAN_GPIO_PIN17_PAD_PULL_MSB 6
+#define WLAN_GPIO_PIN17_PAD_PULL_LSB 5
+#define WLAN_GPIO_PIN17_PAD_PULL_MASK 0x00000060
+#define WLAN_GPIO_PIN17_PAD_PULL_GET(x) (((x) & WLAN_GPIO_PIN17_PAD_PULL_MASK) >> WLAN_GPIO_PIN17_PAD_PULL_LSB)
+#define WLAN_GPIO_PIN17_PAD_PULL_SET(x) (((x) << WLAN_GPIO_PIN17_PAD_PULL_LSB) & WLAN_GPIO_PIN17_PAD_PULL_MASK)
+#define WLAN_GPIO_PIN17_PAD_STRENGTH_MSB 4
+#define WLAN_GPIO_PIN17_PAD_STRENGTH_LSB 3
+#define WLAN_GPIO_PIN17_PAD_STRENGTH_MASK 0x00000018
+#define WLAN_GPIO_PIN17_PAD_STRENGTH_GET(x) (((x) & WLAN_GPIO_PIN17_PAD_STRENGTH_MASK) >> WLAN_GPIO_PIN17_PAD_STRENGTH_LSB)
+#define WLAN_GPIO_PIN17_PAD_STRENGTH_SET(x) (((x) << WLAN_GPIO_PIN17_PAD_STRENGTH_LSB) & WLAN_GPIO_PIN17_PAD_STRENGTH_MASK)
+#define WLAN_GPIO_PIN17_PAD_DRIVER_MSB 2
+#define WLAN_GPIO_PIN17_PAD_DRIVER_LSB 2
+#define WLAN_GPIO_PIN17_PAD_DRIVER_MASK 0x00000004
+#define WLAN_GPIO_PIN17_PAD_DRIVER_GET(x) (((x) & WLAN_GPIO_PIN17_PAD_DRIVER_MASK) >> WLAN_GPIO_PIN17_PAD_DRIVER_LSB)
+#define WLAN_GPIO_PIN17_PAD_DRIVER_SET(x) (((x) << WLAN_GPIO_PIN17_PAD_DRIVER_LSB) & WLAN_GPIO_PIN17_PAD_DRIVER_MASK)
+#define WLAN_GPIO_PIN17_SOURCE_MSB 0
+#define WLAN_GPIO_PIN17_SOURCE_LSB 0
+#define WLAN_GPIO_PIN17_SOURCE_MASK 0x00000001
+#define WLAN_GPIO_PIN17_SOURCE_GET(x) (((x) & WLAN_GPIO_PIN17_SOURCE_MASK) >> WLAN_GPIO_PIN17_SOURCE_LSB)
+#define WLAN_GPIO_PIN17_SOURCE_SET(x) (((x) << WLAN_GPIO_PIN17_SOURCE_LSB) & WLAN_GPIO_PIN17_SOURCE_MASK)
+
+#define WLAN_GPIO_PIN18_ADDRESS 0x00000070
+#define WLAN_GPIO_PIN18_OFFSET 0x00000070
+#define WLAN_GPIO_PIN18_CONFIG_MSB 13
+#define WLAN_GPIO_PIN18_CONFIG_LSB 11
+#define WLAN_GPIO_PIN18_CONFIG_MASK 0x00003800
+#define WLAN_GPIO_PIN18_CONFIG_GET(x) (((x) & WLAN_GPIO_PIN18_CONFIG_MASK) >> WLAN_GPIO_PIN18_CONFIG_LSB)
+#define WLAN_GPIO_PIN18_CONFIG_SET(x) (((x) << WLAN_GPIO_PIN18_CONFIG_LSB) & WLAN_GPIO_PIN18_CONFIG_MASK)
+#define WLAN_GPIO_PIN18_WAKEUP_ENABLE_MSB 10
+#define WLAN_GPIO_PIN18_WAKEUP_ENABLE_LSB 10
+#define WLAN_GPIO_PIN18_WAKEUP_ENABLE_MASK 0x00000400
+#define WLAN_GPIO_PIN18_WAKEUP_ENABLE_GET(x) (((x) & WLAN_GPIO_PIN18_WAKEUP_ENABLE_MASK) >> WLAN_GPIO_PIN18_WAKEUP_ENABLE_LSB)
+#define WLAN_GPIO_PIN18_WAKEUP_ENABLE_SET(x) (((x) << WLAN_GPIO_PIN18_WAKEUP_ENABLE_LSB) & WLAN_GPIO_PIN18_WAKEUP_ENABLE_MASK)
+#define WLAN_GPIO_PIN18_INT_TYPE_MSB 9
+#define WLAN_GPIO_PIN18_INT_TYPE_LSB 7
+#define WLAN_GPIO_PIN18_INT_TYPE_MASK 0x00000380
+#define WLAN_GPIO_PIN18_INT_TYPE_GET(x) (((x) & WLAN_GPIO_PIN18_INT_TYPE_MASK) >> WLAN_GPIO_PIN18_INT_TYPE_LSB)
+#define WLAN_GPIO_PIN18_INT_TYPE_SET(x) (((x) << WLAN_GPIO_PIN18_INT_TYPE_LSB) & WLAN_GPIO_PIN18_INT_TYPE_MASK)
+#define WLAN_GPIO_PIN18_PAD_PULL_MSB 6
+#define WLAN_GPIO_PIN18_PAD_PULL_LSB 5
+#define WLAN_GPIO_PIN18_PAD_PULL_MASK 0x00000060
+#define WLAN_GPIO_PIN18_PAD_PULL_GET(x) (((x) & WLAN_GPIO_PIN18_PAD_PULL_MASK) >> WLAN_GPIO_PIN18_PAD_PULL_LSB)
+#define WLAN_GPIO_PIN18_PAD_PULL_SET(x) (((x) << WLAN_GPIO_PIN18_PAD_PULL_LSB) & WLAN_GPIO_PIN18_PAD_PULL_MASK)
+#define WLAN_GPIO_PIN18_PAD_STRENGTH_MSB 4
+#define WLAN_GPIO_PIN18_PAD_STRENGTH_LSB 3
+#define WLAN_GPIO_PIN18_PAD_STRENGTH_MASK 0x00000018
+#define WLAN_GPIO_PIN18_PAD_STRENGTH_GET(x) (((x) & WLAN_GPIO_PIN18_PAD_STRENGTH_MASK) >> WLAN_GPIO_PIN18_PAD_STRENGTH_LSB)
+#define WLAN_GPIO_PIN18_PAD_STRENGTH_SET(x) (((x) << WLAN_GPIO_PIN18_PAD_STRENGTH_LSB) & WLAN_GPIO_PIN18_PAD_STRENGTH_MASK)
+#define WLAN_GPIO_PIN18_PAD_DRIVER_MSB 2
+#define WLAN_GPIO_PIN18_PAD_DRIVER_LSB 2
+#define WLAN_GPIO_PIN18_PAD_DRIVER_MASK 0x00000004
+#define WLAN_GPIO_PIN18_PAD_DRIVER_GET(x) (((x) & WLAN_GPIO_PIN18_PAD_DRIVER_MASK) >> WLAN_GPIO_PIN18_PAD_DRIVER_LSB)
+#define WLAN_GPIO_PIN18_PAD_DRIVER_SET(x) (((x) << WLAN_GPIO_PIN18_PAD_DRIVER_LSB) & WLAN_GPIO_PIN18_PAD_DRIVER_MASK)
+#define WLAN_GPIO_PIN18_SOURCE_MSB 0
+#define WLAN_GPIO_PIN18_SOURCE_LSB 0
+#define WLAN_GPIO_PIN18_SOURCE_MASK 0x00000001
+#define WLAN_GPIO_PIN18_SOURCE_GET(x) (((x) & WLAN_GPIO_PIN18_SOURCE_MASK) >> WLAN_GPIO_PIN18_SOURCE_LSB)
+#define WLAN_GPIO_PIN18_SOURCE_SET(x) (((x) << WLAN_GPIO_PIN18_SOURCE_LSB) & WLAN_GPIO_PIN18_SOURCE_MASK)
+
+#define WLAN_GPIO_PIN19_ADDRESS 0x00000074
+#define WLAN_GPIO_PIN19_OFFSET 0x00000074
+#define WLAN_GPIO_PIN19_CONFIG_MSB 13
+#define WLAN_GPIO_PIN19_CONFIG_LSB 11
+#define WLAN_GPIO_PIN19_CONFIG_MASK 0x00003800
+#define WLAN_GPIO_PIN19_CONFIG_GET(x) (((x) & WLAN_GPIO_PIN19_CONFIG_MASK) >> WLAN_GPIO_PIN19_CONFIG_LSB)
+#define WLAN_GPIO_PIN19_CONFIG_SET(x) (((x) << WLAN_GPIO_PIN19_CONFIG_LSB) & WLAN_GPIO_PIN19_CONFIG_MASK)
+#define WLAN_GPIO_PIN19_WAKEUP_ENABLE_MSB 10
+#define WLAN_GPIO_PIN19_WAKEUP_ENABLE_LSB 10
+#define WLAN_GPIO_PIN19_WAKEUP_ENABLE_MASK 0x00000400
+#define WLAN_GPIO_PIN19_WAKEUP_ENABLE_GET(x) (((x) & WLAN_GPIO_PIN19_WAKEUP_ENABLE_MASK) >> WLAN_GPIO_PIN19_WAKEUP_ENABLE_LSB)
+#define WLAN_GPIO_PIN19_WAKEUP_ENABLE_SET(x) (((x) << WLAN_GPIO_PIN19_WAKEUP_ENABLE_LSB) & WLAN_GPIO_PIN19_WAKEUP_ENABLE_MASK)
+#define WLAN_GPIO_PIN19_INT_TYPE_MSB 9
+#define WLAN_GPIO_PIN19_INT_TYPE_LSB 7
+#define WLAN_GPIO_PIN19_INT_TYPE_MASK 0x00000380
+#define WLAN_GPIO_PIN19_INT_TYPE_GET(x) (((x) & WLAN_GPIO_PIN19_INT_TYPE_MASK) >> WLAN_GPIO_PIN19_INT_TYPE_LSB)
+#define WLAN_GPIO_PIN19_INT_TYPE_SET(x) (((x) << WLAN_GPIO_PIN19_INT_TYPE_LSB) & WLAN_GPIO_PIN19_INT_TYPE_MASK)
+#define WLAN_GPIO_PIN19_PAD_PULL_MSB 6
+#define WLAN_GPIO_PIN19_PAD_PULL_LSB 5
+#define WLAN_GPIO_PIN19_PAD_PULL_MASK 0x00000060
+#define WLAN_GPIO_PIN19_PAD_PULL_GET(x) (((x) & WLAN_GPIO_PIN19_PAD_PULL_MASK) >> WLAN_GPIO_PIN19_PAD_PULL_LSB)
+#define WLAN_GPIO_PIN19_PAD_PULL_SET(x) (((x) << WLAN_GPIO_PIN19_PAD_PULL_LSB) & WLAN_GPIO_PIN19_PAD_PULL_MASK)
+#define WLAN_GPIO_PIN19_PAD_STRENGTH_MSB 4
+#define WLAN_GPIO_PIN19_PAD_STRENGTH_LSB 3
+#define WLAN_GPIO_PIN19_PAD_STRENGTH_MASK 0x00000018
+#define WLAN_GPIO_PIN19_PAD_STRENGTH_GET(x) (((x) & WLAN_GPIO_PIN19_PAD_STRENGTH_MASK) >> WLAN_GPIO_PIN19_PAD_STRENGTH_LSB)
+#define WLAN_GPIO_PIN19_PAD_STRENGTH_SET(x) (((x) << WLAN_GPIO_PIN19_PAD_STRENGTH_LSB) & WLAN_GPIO_PIN19_PAD_STRENGTH_MASK)
+#define WLAN_GPIO_PIN19_PAD_DRIVER_MSB 2
+#define WLAN_GPIO_PIN19_PAD_DRIVER_LSB 2
+#define WLAN_GPIO_PIN19_PAD_DRIVER_MASK 0x00000004
+#define WLAN_GPIO_PIN19_PAD_DRIVER_GET(x) (((x) & WLAN_GPIO_PIN19_PAD_DRIVER_MASK) >> WLAN_GPIO_PIN19_PAD_DRIVER_LSB)
+#define WLAN_GPIO_PIN19_PAD_DRIVER_SET(x) (((x) << WLAN_GPIO_PIN19_PAD_DRIVER_LSB) & WLAN_GPIO_PIN19_PAD_DRIVER_MASK)
+#define WLAN_GPIO_PIN19_SOURCE_MSB 0
+#define WLAN_GPIO_PIN19_SOURCE_LSB 0
+#define WLAN_GPIO_PIN19_SOURCE_MASK 0x00000001
+#define WLAN_GPIO_PIN19_SOURCE_GET(x) (((x) & WLAN_GPIO_PIN19_SOURCE_MASK) >> WLAN_GPIO_PIN19_SOURCE_LSB)
+#define WLAN_GPIO_PIN19_SOURCE_SET(x) (((x) << WLAN_GPIO_PIN19_SOURCE_LSB) & WLAN_GPIO_PIN19_SOURCE_MASK)
+
+#define WLAN_GPIO_PIN20_ADDRESS 0x00000078
+#define WLAN_GPIO_PIN20_OFFSET 0x00000078
+#define WLAN_GPIO_PIN20_CONFIG_MSB 13
+#define WLAN_GPIO_PIN20_CONFIG_LSB 11
+#define WLAN_GPIO_PIN20_CONFIG_MASK 0x00003800
+#define WLAN_GPIO_PIN20_CONFIG_GET(x) (((x) & WLAN_GPIO_PIN20_CONFIG_MASK) >> WLAN_GPIO_PIN20_CONFIG_LSB)
+#define WLAN_GPIO_PIN20_CONFIG_SET(x) (((x) << WLAN_GPIO_PIN20_CONFIG_LSB) & WLAN_GPIO_PIN20_CONFIG_MASK)
+#define WLAN_GPIO_PIN20_WAKEUP_ENABLE_MSB 10
+#define WLAN_GPIO_PIN20_WAKEUP_ENABLE_LSB 10
+#define WLAN_GPIO_PIN20_WAKEUP_ENABLE_MASK 0x00000400
+#define WLAN_GPIO_PIN20_WAKEUP_ENABLE_GET(x) (((x) & WLAN_GPIO_PIN20_WAKEUP_ENABLE_MASK) >> WLAN_GPIO_PIN20_WAKEUP_ENABLE_LSB)
+#define WLAN_GPIO_PIN20_WAKEUP_ENABLE_SET(x) (((x) << WLAN_GPIO_PIN20_WAKEUP_ENABLE_LSB) & WLAN_GPIO_PIN20_WAKEUP_ENABLE_MASK)
+#define WLAN_GPIO_PIN20_INT_TYPE_MSB 9
+#define WLAN_GPIO_PIN20_INT_TYPE_LSB 7
+#define WLAN_GPIO_PIN20_INT_TYPE_MASK 0x00000380
+#define WLAN_GPIO_PIN20_INT_TYPE_GET(x) (((x) & WLAN_GPIO_PIN20_INT_TYPE_MASK) >> WLAN_GPIO_PIN20_INT_TYPE_LSB)
+#define WLAN_GPIO_PIN20_INT_TYPE_SET(x) (((x) << WLAN_GPIO_PIN20_INT_TYPE_LSB) & WLAN_GPIO_PIN20_INT_TYPE_MASK)
+#define WLAN_GPIO_PIN20_PAD_PULL_MSB 6
+#define WLAN_GPIO_PIN20_PAD_PULL_LSB 5
+#define WLAN_GPIO_PIN20_PAD_PULL_MASK 0x00000060
+#define WLAN_GPIO_PIN20_PAD_PULL_GET(x) (((x) & WLAN_GPIO_PIN20_PAD_PULL_MASK) >> WLAN_GPIO_PIN20_PAD_PULL_LSB)
+#define WLAN_GPIO_PIN20_PAD_PULL_SET(x) (((x) << WLAN_GPIO_PIN20_PAD_PULL_LSB) & WLAN_GPIO_PIN20_PAD_PULL_MASK)
+#define WLAN_GPIO_PIN20_PAD_STRENGTH_MSB 4
+#define WLAN_GPIO_PIN20_PAD_STRENGTH_LSB 3
+#define WLAN_GPIO_PIN20_PAD_STRENGTH_MASK 0x00000018
+#define WLAN_GPIO_PIN20_PAD_STRENGTH_GET(x) (((x) & WLAN_GPIO_PIN20_PAD_STRENGTH_MASK) >> WLAN_GPIO_PIN20_PAD_STRENGTH_LSB)
+#define WLAN_GPIO_PIN20_PAD_STRENGTH_SET(x) (((x) << WLAN_GPIO_PIN20_PAD_STRENGTH_LSB) & WLAN_GPIO_PIN20_PAD_STRENGTH_MASK)
+#define WLAN_GPIO_PIN20_PAD_DRIVER_MSB 2
+#define WLAN_GPIO_PIN20_PAD_DRIVER_LSB 2
+#define WLAN_GPIO_PIN20_PAD_DRIVER_MASK 0x00000004
+#define WLAN_GPIO_PIN20_PAD_DRIVER_GET(x) (((x) & WLAN_GPIO_PIN20_PAD_DRIVER_MASK) >> WLAN_GPIO_PIN20_PAD_DRIVER_LSB)
+#define WLAN_GPIO_PIN20_PAD_DRIVER_SET(x) (((x) << WLAN_GPIO_PIN20_PAD_DRIVER_LSB) & WLAN_GPIO_PIN20_PAD_DRIVER_MASK)
+#define WLAN_GPIO_PIN20_SOURCE_MSB 0
+#define WLAN_GPIO_PIN20_SOURCE_LSB 0
+#define WLAN_GPIO_PIN20_SOURCE_MASK 0x00000001
+#define WLAN_GPIO_PIN20_SOURCE_GET(x) (((x) & WLAN_GPIO_PIN20_SOURCE_MASK) >> WLAN_GPIO_PIN20_SOURCE_LSB)
+#define WLAN_GPIO_PIN20_SOURCE_SET(x) (((x) << WLAN_GPIO_PIN20_SOURCE_LSB) & WLAN_GPIO_PIN20_SOURCE_MASK)
+
+#define WLAN_GPIO_PIN21_ADDRESS 0x0000007c
+#define WLAN_GPIO_PIN21_OFFSET 0x0000007c
+#define WLAN_GPIO_PIN21_CONFIG_MSB 13
+#define WLAN_GPIO_PIN21_CONFIG_LSB 11
+#define WLAN_GPIO_PIN21_CONFIG_MASK 0x00003800
+#define WLAN_GPIO_PIN21_CONFIG_GET(x) (((x) & WLAN_GPIO_PIN21_CONFIG_MASK) >> WLAN_GPIO_PIN21_CONFIG_LSB)
+#define WLAN_GPIO_PIN21_CONFIG_SET(x) (((x) << WLAN_GPIO_PIN21_CONFIG_LSB) & WLAN_GPIO_PIN21_CONFIG_MASK)
+#define WLAN_GPIO_PIN21_WAKEUP_ENABLE_MSB 10
+#define WLAN_GPIO_PIN21_WAKEUP_ENABLE_LSB 10
+#define WLAN_GPIO_PIN21_WAKEUP_ENABLE_MASK 0x00000400
+#define WLAN_GPIO_PIN21_WAKEUP_ENABLE_GET(x) (((x) & WLAN_GPIO_PIN21_WAKEUP_ENABLE_MASK) >> WLAN_GPIO_PIN21_WAKEUP_ENABLE_LSB)
+#define WLAN_GPIO_PIN21_WAKEUP_ENABLE_SET(x) (((x) << WLAN_GPIO_PIN21_WAKEUP_ENABLE_LSB) & WLAN_GPIO_PIN21_WAKEUP_ENABLE_MASK)
+#define WLAN_GPIO_PIN21_INT_TYPE_MSB 9
+#define WLAN_GPIO_PIN21_INT_TYPE_LSB 7
+#define WLAN_GPIO_PIN21_INT_TYPE_MASK 0x00000380
+#define WLAN_GPIO_PIN21_INT_TYPE_GET(x) (((x) & WLAN_GPIO_PIN21_INT_TYPE_MASK) >> WLAN_GPIO_PIN21_INT_TYPE_LSB)
+#define WLAN_GPIO_PIN21_INT_TYPE_SET(x) (((x) << WLAN_GPIO_PIN21_INT_TYPE_LSB) & WLAN_GPIO_PIN21_INT_TYPE_MASK)
+#define WLAN_GPIO_PIN21_PAD_PULL_MSB 6
+#define WLAN_GPIO_PIN21_PAD_PULL_LSB 5
+#define WLAN_GPIO_PIN21_PAD_PULL_MASK 0x00000060
+#define WLAN_GPIO_PIN21_PAD_PULL_GET(x) (((x) & WLAN_GPIO_PIN21_PAD_PULL_MASK) >> WLAN_GPIO_PIN21_PAD_PULL_LSB)
+#define WLAN_GPIO_PIN21_PAD_PULL_SET(x) (((x) << WLAN_GPIO_PIN21_PAD_PULL_LSB) & WLAN_GPIO_PIN21_PAD_PULL_MASK)
+#define WLAN_GPIO_PIN21_PAD_STRENGTH_MSB 4
+#define WLAN_GPIO_PIN21_PAD_STRENGTH_LSB 3
+#define WLAN_GPIO_PIN21_PAD_STRENGTH_MASK 0x00000018
+#define WLAN_GPIO_PIN21_PAD_STRENGTH_GET(x) (((x) & WLAN_GPIO_PIN21_PAD_STRENGTH_MASK) >> WLAN_GPIO_PIN21_PAD_STRENGTH_LSB)
+#define WLAN_GPIO_PIN21_PAD_STRENGTH_SET(x) (((x) << WLAN_GPIO_PIN21_PAD_STRENGTH_LSB) & WLAN_GPIO_PIN21_PAD_STRENGTH_MASK)
+#define WLAN_GPIO_PIN21_PAD_DRIVER_MSB 2
+#define WLAN_GPIO_PIN21_PAD_DRIVER_LSB 2
+#define WLAN_GPIO_PIN21_PAD_DRIVER_MASK 0x00000004
+#define WLAN_GPIO_PIN21_PAD_DRIVER_GET(x) (((x) & WLAN_GPIO_PIN21_PAD_DRIVER_MASK) >> WLAN_GPIO_PIN21_PAD_DRIVER_LSB)
+#define WLAN_GPIO_PIN21_PAD_DRIVER_SET(x) (((x) << WLAN_GPIO_PIN21_PAD_DRIVER_LSB) & WLAN_GPIO_PIN21_PAD_DRIVER_MASK)
+#define WLAN_GPIO_PIN21_SOURCE_MSB 0
+#define WLAN_GPIO_PIN21_SOURCE_LSB 0
+#define WLAN_GPIO_PIN21_SOURCE_MASK 0x00000001
+#define WLAN_GPIO_PIN21_SOURCE_GET(x) (((x) & WLAN_GPIO_PIN21_SOURCE_MASK) >> WLAN_GPIO_PIN21_SOURCE_LSB)
+#define WLAN_GPIO_PIN21_SOURCE_SET(x) (((x) << WLAN_GPIO_PIN21_SOURCE_LSB) & WLAN_GPIO_PIN21_SOURCE_MASK)
+
+#define WLAN_GPIO_PIN22_ADDRESS 0x00000080
+#define WLAN_GPIO_PIN22_OFFSET 0x00000080
+#define WLAN_GPIO_PIN22_CONFIG_MSB 13
+#define WLAN_GPIO_PIN22_CONFIG_LSB 11
+#define WLAN_GPIO_PIN22_CONFIG_MASK 0x00003800
+#define WLAN_GPIO_PIN22_CONFIG_GET(x) (((x) & WLAN_GPIO_PIN22_CONFIG_MASK) >> WLAN_GPIO_PIN22_CONFIG_LSB)
+#define WLAN_GPIO_PIN22_CONFIG_SET(x) (((x) << WLAN_GPIO_PIN22_CONFIG_LSB) & WLAN_GPIO_PIN22_CONFIG_MASK)
+#define WLAN_GPIO_PIN22_WAKEUP_ENABLE_MSB 10
+#define WLAN_GPIO_PIN22_WAKEUP_ENABLE_LSB 10
+#define WLAN_GPIO_PIN22_WAKEUP_ENABLE_MASK 0x00000400
+#define WLAN_GPIO_PIN22_WAKEUP_ENABLE_GET(x) (((x) & WLAN_GPIO_PIN22_WAKEUP_ENABLE_MASK) >> WLAN_GPIO_PIN22_WAKEUP_ENABLE_LSB)
+#define WLAN_GPIO_PIN22_WAKEUP_ENABLE_SET(x) (((x) << WLAN_GPIO_PIN22_WAKEUP_ENABLE_LSB) & WLAN_GPIO_PIN22_WAKEUP_ENABLE_MASK)
+#define WLAN_GPIO_PIN22_INT_TYPE_MSB 9
+#define WLAN_GPIO_PIN22_INT_TYPE_LSB 7
+#define WLAN_GPIO_PIN22_INT_TYPE_MASK 0x00000380
+#define WLAN_GPIO_PIN22_INT_TYPE_GET(x) (((x) & WLAN_GPIO_PIN22_INT_TYPE_MASK) >> WLAN_GPIO_PIN22_INT_TYPE_LSB)
+#define WLAN_GPIO_PIN22_INT_TYPE_SET(x) (((x) << WLAN_GPIO_PIN22_INT_TYPE_LSB) & WLAN_GPIO_PIN22_INT_TYPE_MASK)
+#define WLAN_GPIO_PIN22_PAD_PULL_MSB 6
+#define WLAN_GPIO_PIN22_PAD_PULL_LSB 5
+#define WLAN_GPIO_PIN22_PAD_PULL_MASK 0x00000060
+#define WLAN_GPIO_PIN22_PAD_PULL_GET(x) (((x) & WLAN_GPIO_PIN22_PAD_PULL_MASK) >> WLAN_GPIO_PIN22_PAD_PULL_LSB)
+#define WLAN_GPIO_PIN22_PAD_PULL_SET(x) (((x) << WLAN_GPIO_PIN22_PAD_PULL_LSB) & WLAN_GPIO_PIN22_PAD_PULL_MASK)
+#define WLAN_GPIO_PIN22_PAD_STRENGTH_MSB 4
+#define WLAN_GPIO_PIN22_PAD_STRENGTH_LSB 3
+#define WLAN_GPIO_PIN22_PAD_STRENGTH_MASK 0x00000018
+#define WLAN_GPIO_PIN22_PAD_STRENGTH_GET(x) (((x) & WLAN_GPIO_PIN22_PAD_STRENGTH_MASK) >> WLAN_GPIO_PIN22_PAD_STRENGTH_LSB)
+#define WLAN_GPIO_PIN22_PAD_STRENGTH_SET(x) (((x) << WLAN_GPIO_PIN22_PAD_STRENGTH_LSB) & WLAN_GPIO_PIN22_PAD_STRENGTH_MASK)
+#define WLAN_GPIO_PIN22_PAD_DRIVER_MSB 2
+#define WLAN_GPIO_PIN22_PAD_DRIVER_LSB 2
+#define WLAN_GPIO_PIN22_PAD_DRIVER_MASK 0x00000004
+#define WLAN_GPIO_PIN22_PAD_DRIVER_GET(x) (((x) & WLAN_GPIO_PIN22_PAD_DRIVER_MASK) >> WLAN_GPIO_PIN22_PAD_DRIVER_LSB)
+#define WLAN_GPIO_PIN22_PAD_DRIVER_SET(x) (((x) << WLAN_GPIO_PIN22_PAD_DRIVER_LSB) & WLAN_GPIO_PIN22_PAD_DRIVER_MASK)
+#define WLAN_GPIO_PIN22_SOURCE_MSB 0
+#define WLAN_GPIO_PIN22_SOURCE_LSB 0
+#define WLAN_GPIO_PIN22_SOURCE_MASK 0x00000001
+#define WLAN_GPIO_PIN22_SOURCE_GET(x) (((x) & WLAN_GPIO_PIN22_SOURCE_MASK) >> WLAN_GPIO_PIN22_SOURCE_LSB)
+#define WLAN_GPIO_PIN22_SOURCE_SET(x) (((x) << WLAN_GPIO_PIN22_SOURCE_LSB) & WLAN_GPIO_PIN22_SOURCE_MASK)
+
+#define WLAN_GPIO_PIN23_ADDRESS 0x00000084
+#define WLAN_GPIO_PIN23_OFFSET 0x00000084
+#define WLAN_GPIO_PIN23_CONFIG_MSB 13
+#define WLAN_GPIO_PIN23_CONFIG_LSB 11
+#define WLAN_GPIO_PIN23_CONFIG_MASK 0x00003800
+#define WLAN_GPIO_PIN23_CONFIG_GET(x) (((x) & WLAN_GPIO_PIN23_CONFIG_MASK) >> WLAN_GPIO_PIN23_CONFIG_LSB)
+#define WLAN_GPIO_PIN23_CONFIG_SET(x) (((x) << WLAN_GPIO_PIN23_CONFIG_LSB) & WLAN_GPIO_PIN23_CONFIG_MASK)
+#define WLAN_GPIO_PIN23_WAKEUP_ENABLE_MSB 10
+#define WLAN_GPIO_PIN23_WAKEUP_ENABLE_LSB 10
+#define WLAN_GPIO_PIN23_WAKEUP_ENABLE_MASK 0x00000400
+#define WLAN_GPIO_PIN23_WAKEUP_ENABLE_GET(x) (((x) & WLAN_GPIO_PIN23_WAKEUP_ENABLE_MASK) >> WLAN_GPIO_PIN23_WAKEUP_ENABLE_LSB)
+#define WLAN_GPIO_PIN23_WAKEUP_ENABLE_SET(x) (((x) << WLAN_GPIO_PIN23_WAKEUP_ENABLE_LSB) & WLAN_GPIO_PIN23_WAKEUP_ENABLE_MASK)
+#define WLAN_GPIO_PIN23_INT_TYPE_MSB 9
+#define WLAN_GPIO_PIN23_INT_TYPE_LSB 7
+#define WLAN_GPIO_PIN23_INT_TYPE_MASK 0x00000380
+#define WLAN_GPIO_PIN23_INT_TYPE_GET(x) (((x) & WLAN_GPIO_PIN23_INT_TYPE_MASK) >> WLAN_GPIO_PIN23_INT_TYPE_LSB)
+#define WLAN_GPIO_PIN23_INT_TYPE_SET(x) (((x) << WLAN_GPIO_PIN23_INT_TYPE_LSB) & WLAN_GPIO_PIN23_INT_TYPE_MASK)
+#define WLAN_GPIO_PIN23_PAD_DRIVER_MSB 2
+#define WLAN_GPIO_PIN23_PAD_DRIVER_LSB 2
+#define WLAN_GPIO_PIN23_PAD_DRIVER_MASK 0x00000004
+#define WLAN_GPIO_PIN23_PAD_DRIVER_GET(x) (((x) & WLAN_GPIO_PIN23_PAD_DRIVER_MASK) >> WLAN_GPIO_PIN23_PAD_DRIVER_LSB)
+#define WLAN_GPIO_PIN23_PAD_DRIVER_SET(x) (((x) << WLAN_GPIO_PIN23_PAD_DRIVER_LSB) & WLAN_GPIO_PIN23_PAD_DRIVER_MASK)
+#define WLAN_GPIO_PIN23_SOURCE_MSB 0
+#define WLAN_GPIO_PIN23_SOURCE_LSB 0
+#define WLAN_GPIO_PIN23_SOURCE_MASK 0x00000001
+#define WLAN_GPIO_PIN23_SOURCE_GET(x) (((x) & WLAN_GPIO_PIN23_SOURCE_MASK) >> WLAN_GPIO_PIN23_SOURCE_LSB)
+#define WLAN_GPIO_PIN23_SOURCE_SET(x) (((x) << WLAN_GPIO_PIN23_SOURCE_LSB) & WLAN_GPIO_PIN23_SOURCE_MASK)
+
+#define WLAN_GPIO_PIN24_ADDRESS 0x00000088
+#define WLAN_GPIO_PIN24_OFFSET 0x00000088
+#define WLAN_GPIO_PIN24_CONFIG_MSB 13
+#define WLAN_GPIO_PIN24_CONFIG_LSB 11
+#define WLAN_GPIO_PIN24_CONFIG_MASK 0x00003800
+#define WLAN_GPIO_PIN24_CONFIG_GET(x) (((x) & WLAN_GPIO_PIN24_CONFIG_MASK) >> WLAN_GPIO_PIN24_CONFIG_LSB)
+#define WLAN_GPIO_PIN24_CONFIG_SET(x) (((x) << WLAN_GPIO_PIN24_CONFIG_LSB) & WLAN_GPIO_PIN24_CONFIG_MASK)
+#define WLAN_GPIO_PIN24_WAKEUP_ENABLE_MSB 10
+#define WLAN_GPIO_PIN24_WAKEUP_ENABLE_LSB 10
+#define WLAN_GPIO_PIN24_WAKEUP_ENABLE_MASK 0x00000400
+#define WLAN_GPIO_PIN24_WAKEUP_ENABLE_GET(x) (((x) & WLAN_GPIO_PIN24_WAKEUP_ENABLE_MASK) >> WLAN_GPIO_PIN24_WAKEUP_ENABLE_LSB)
+#define WLAN_GPIO_PIN24_WAKEUP_ENABLE_SET(x) (((x) << WLAN_GPIO_PIN24_WAKEUP_ENABLE_LSB) & WLAN_GPIO_PIN24_WAKEUP_ENABLE_MASK)
+#define WLAN_GPIO_PIN24_INT_TYPE_MSB 9
+#define WLAN_GPIO_PIN24_INT_TYPE_LSB 7
+#define WLAN_GPIO_PIN24_INT_TYPE_MASK 0x00000380
+#define WLAN_GPIO_PIN24_INT_TYPE_GET(x) (((x) & WLAN_GPIO_PIN24_INT_TYPE_MASK) >> WLAN_GPIO_PIN24_INT_TYPE_LSB)
+#define WLAN_GPIO_PIN24_INT_TYPE_SET(x) (((x) << WLAN_GPIO_PIN24_INT_TYPE_LSB) & WLAN_GPIO_PIN24_INT_TYPE_MASK)
+#define WLAN_GPIO_PIN24_PAD_DRIVER_MSB 2
+#define WLAN_GPIO_PIN24_PAD_DRIVER_LSB 2
+#define WLAN_GPIO_PIN24_PAD_DRIVER_MASK 0x00000004
+#define WLAN_GPIO_PIN24_PAD_DRIVER_GET(x) (((x) & WLAN_GPIO_PIN24_PAD_DRIVER_MASK) >> WLAN_GPIO_PIN24_PAD_DRIVER_LSB)
+#define WLAN_GPIO_PIN24_PAD_DRIVER_SET(x) (((x) << WLAN_GPIO_PIN24_PAD_DRIVER_LSB) & WLAN_GPIO_PIN24_PAD_DRIVER_MASK)
+#define WLAN_GPIO_PIN24_SOURCE_MSB 0
+#define WLAN_GPIO_PIN24_SOURCE_LSB 0
+#define WLAN_GPIO_PIN24_SOURCE_MASK 0x00000001
+#define WLAN_GPIO_PIN24_SOURCE_GET(x) (((x) & WLAN_GPIO_PIN24_SOURCE_MASK) >> WLAN_GPIO_PIN24_SOURCE_LSB)
+#define WLAN_GPIO_PIN24_SOURCE_SET(x) (((x) << WLAN_GPIO_PIN24_SOURCE_LSB) & WLAN_GPIO_PIN24_SOURCE_MASK)
+
+#define WLAN_GPIO_PIN25_ADDRESS 0x0000008c
+#define WLAN_GPIO_PIN25_OFFSET 0x0000008c
+#define WLAN_GPIO_PIN25_CONFIG_MSB 13
+#define WLAN_GPIO_PIN25_CONFIG_LSB 11
+#define WLAN_GPIO_PIN25_CONFIG_MASK 0x00003800
+#define WLAN_GPIO_PIN25_CONFIG_GET(x) (((x) & WLAN_GPIO_PIN25_CONFIG_MASK) >> WLAN_GPIO_PIN25_CONFIG_LSB)
+#define WLAN_GPIO_PIN25_CONFIG_SET(x) (((x) << WLAN_GPIO_PIN25_CONFIG_LSB) & WLAN_GPIO_PIN25_CONFIG_MASK)
+#define WLAN_GPIO_PIN25_WAKEUP_ENABLE_MSB 10
+#define WLAN_GPIO_PIN25_WAKEUP_ENABLE_LSB 10
+#define WLAN_GPIO_PIN25_WAKEUP_ENABLE_MASK 0x00000400
+#define WLAN_GPIO_PIN25_WAKEUP_ENABLE_GET(x) (((x) & WLAN_GPIO_PIN25_WAKEUP_ENABLE_MASK) >> WLAN_GPIO_PIN25_WAKEUP_ENABLE_LSB)
+#define WLAN_GPIO_PIN25_WAKEUP_ENABLE_SET(x) (((x) << WLAN_GPIO_PIN25_WAKEUP_ENABLE_LSB) & WLAN_GPIO_PIN25_WAKEUP_ENABLE_MASK)
+#define WLAN_GPIO_PIN25_INT_TYPE_MSB 9
+#define WLAN_GPIO_PIN25_INT_TYPE_LSB 7
+#define WLAN_GPIO_PIN25_INT_TYPE_MASK 0x00000380
+#define WLAN_GPIO_PIN25_INT_TYPE_GET(x) (((x) & WLAN_GPIO_PIN25_INT_TYPE_MASK) >> WLAN_GPIO_PIN25_INT_TYPE_LSB)
+#define WLAN_GPIO_PIN25_INT_TYPE_SET(x) (((x) << WLAN_GPIO_PIN25_INT_TYPE_LSB) & WLAN_GPIO_PIN25_INT_TYPE_MASK)
+#define WLAN_GPIO_PIN25_PAD_DRIVER_MSB 2
+#define WLAN_GPIO_PIN25_PAD_DRIVER_LSB 2
+#define WLAN_GPIO_PIN25_PAD_DRIVER_MASK 0x00000004
+#define WLAN_GPIO_PIN25_PAD_DRIVER_GET(x) (((x) & WLAN_GPIO_PIN25_PAD_DRIVER_MASK) >> WLAN_GPIO_PIN25_PAD_DRIVER_LSB)
+#define WLAN_GPIO_PIN25_PAD_DRIVER_SET(x) (((x) << WLAN_GPIO_PIN25_PAD_DRIVER_LSB) & WLAN_GPIO_PIN25_PAD_DRIVER_MASK)
+#define WLAN_GPIO_PIN25_SOURCE_MSB 0
+#define WLAN_GPIO_PIN25_SOURCE_LSB 0
+#define WLAN_GPIO_PIN25_SOURCE_MASK 0x00000001
+#define WLAN_GPIO_PIN25_SOURCE_GET(x) (((x) & WLAN_GPIO_PIN25_SOURCE_MASK) >> WLAN_GPIO_PIN25_SOURCE_LSB)
+#define WLAN_GPIO_PIN25_SOURCE_SET(x) (((x) << WLAN_GPIO_PIN25_SOURCE_LSB) & WLAN_GPIO_PIN25_SOURCE_MASK)
+
+#define SDIO_ADDRESS 0x00000090
+#define SDIO_OFFSET 0x00000090
+#define SDIO_PINS_EN_MSB 0
+#define SDIO_PINS_EN_LSB 0
+#define SDIO_PINS_EN_MASK 0x00000001
+#define SDIO_PINS_EN_GET(x) (((x) & SDIO_PINS_EN_MASK) >> SDIO_PINS_EN_LSB)
+#define SDIO_PINS_EN_SET(x) (((x) << SDIO_PINS_EN_LSB) & SDIO_PINS_EN_MASK)
+
+#define FUNC_BUS_ADDRESS 0x00000094
+#define FUNC_BUS_OFFSET 0x00000094
+#define FUNC_BUS_GPIO_MODE_MSB 22
+#define FUNC_BUS_GPIO_MODE_LSB 22
+#define FUNC_BUS_GPIO_MODE_MASK 0x00400000
+#define FUNC_BUS_GPIO_MODE_GET(x) (((x) & FUNC_BUS_GPIO_MODE_MASK) >> FUNC_BUS_GPIO_MODE_LSB)
+#define FUNC_BUS_GPIO_MODE_SET(x) (((x) << FUNC_BUS_GPIO_MODE_LSB) & FUNC_BUS_GPIO_MODE_MASK)
+#define FUNC_BUS_OE_L_MSB 21
+#define FUNC_BUS_OE_L_LSB 0
+#define FUNC_BUS_OE_L_MASK 0x003fffff
+#define FUNC_BUS_OE_L_GET(x) (((x) & FUNC_BUS_OE_L_MASK) >> FUNC_BUS_OE_L_LSB)
+#define FUNC_BUS_OE_L_SET(x) (((x) << FUNC_BUS_OE_L_LSB) & FUNC_BUS_OE_L_MASK)
+
+#define WL_SOC_APB_ADDRESS 0x00000098
+#define WL_SOC_APB_OFFSET 0x00000098
+#define WL_SOC_APB_TOGGLE_MSB 0
+#define WL_SOC_APB_TOGGLE_LSB 0
+#define WL_SOC_APB_TOGGLE_MASK 0x00000001
+#define WL_SOC_APB_TOGGLE_GET(x) (((x) & WL_SOC_APB_TOGGLE_MASK) >> WL_SOC_APB_TOGGLE_LSB)
+#define WL_SOC_APB_TOGGLE_SET(x) (((x) << WL_SOC_APB_TOGGLE_LSB) & WL_SOC_APB_TOGGLE_MASK)
+
+#define WLAN_SIGMA_DELTA_ADDRESS 0x0000009c
+#define WLAN_SIGMA_DELTA_OFFSET 0x0000009c
+#define WLAN_SIGMA_DELTA_ENABLE_MSB 16
+#define WLAN_SIGMA_DELTA_ENABLE_LSB 16
+#define WLAN_SIGMA_DELTA_ENABLE_MASK 0x00010000
+#define WLAN_SIGMA_DELTA_ENABLE_GET(x) (((x) & WLAN_SIGMA_DELTA_ENABLE_MASK) >> WLAN_SIGMA_DELTA_ENABLE_LSB)
+#define WLAN_SIGMA_DELTA_ENABLE_SET(x) (((x) << WLAN_SIGMA_DELTA_ENABLE_LSB) & WLAN_SIGMA_DELTA_ENABLE_MASK)
+#define WLAN_SIGMA_DELTA_PRESCALAR_MSB 15
+#define WLAN_SIGMA_DELTA_PRESCALAR_LSB 8
+#define WLAN_SIGMA_DELTA_PRESCALAR_MASK 0x0000ff00
+#define WLAN_SIGMA_DELTA_PRESCALAR_GET(x) (((x) & WLAN_SIGMA_DELTA_PRESCALAR_MASK) >> WLAN_SIGMA_DELTA_PRESCALAR_LSB)
+#define WLAN_SIGMA_DELTA_PRESCALAR_SET(x) (((x) << WLAN_SIGMA_DELTA_PRESCALAR_LSB) & WLAN_SIGMA_DELTA_PRESCALAR_MASK)
+#define WLAN_SIGMA_DELTA_TARGET_MSB 7
+#define WLAN_SIGMA_DELTA_TARGET_LSB 0
+#define WLAN_SIGMA_DELTA_TARGET_MASK 0x000000ff
+#define WLAN_SIGMA_DELTA_TARGET_GET(x) (((x) & WLAN_SIGMA_DELTA_TARGET_MASK) >> WLAN_SIGMA_DELTA_TARGET_LSB)
+#define WLAN_SIGMA_DELTA_TARGET_SET(x) (((x) << WLAN_SIGMA_DELTA_TARGET_LSB) & WLAN_SIGMA_DELTA_TARGET_MASK)
+
+#define WL_BOOTSTRAP_ADDRESS 0x000000a0
+#define WL_BOOTSTRAP_OFFSET 0x000000a0
+#define WL_BOOTSTRAP_STATUS_MSB 22
+#define WL_BOOTSTRAP_STATUS_LSB 0
+#define WL_BOOTSTRAP_STATUS_MASK 0x007fffff
+#define WL_BOOTSTRAP_STATUS_GET(x) (((x) & WL_BOOTSTRAP_STATUS_MASK) >> WL_BOOTSTRAP_STATUS_LSB)
+#define WL_BOOTSTRAP_STATUS_SET(x) (((x) << WL_BOOTSTRAP_STATUS_LSB) & WL_BOOTSTRAP_STATUS_MASK)
+
+#define CLOCK_GPIO_ADDRESS 0x000000a4
+#define CLOCK_GPIO_OFFSET 0x000000a4
+#define CLOCK_GPIO_CLK_REQ_OUT_EN_MSB 2
+#define CLOCK_GPIO_CLK_REQ_OUT_EN_LSB 2
+#define CLOCK_GPIO_CLK_REQ_OUT_EN_MASK 0x00000004
+#define CLOCK_GPIO_CLK_REQ_OUT_EN_GET(x) (((x) & CLOCK_GPIO_CLK_REQ_OUT_EN_MASK) >> CLOCK_GPIO_CLK_REQ_OUT_EN_LSB)
+#define CLOCK_GPIO_CLK_REQ_OUT_EN_SET(x) (((x) << CLOCK_GPIO_CLK_REQ_OUT_EN_LSB) & CLOCK_GPIO_CLK_REQ_OUT_EN_MASK)
+#define CLOCK_GPIO_BT_CLK_REQ_EN_MSB 1
+#define CLOCK_GPIO_BT_CLK_REQ_EN_LSB 1
+#define CLOCK_GPIO_BT_CLK_REQ_EN_MASK 0x00000002
+#define CLOCK_GPIO_BT_CLK_REQ_EN_GET(x) (((x) & CLOCK_GPIO_BT_CLK_REQ_EN_MASK) >> CLOCK_GPIO_BT_CLK_REQ_EN_LSB)
+#define CLOCK_GPIO_BT_CLK_REQ_EN_SET(x) (((x) << CLOCK_GPIO_BT_CLK_REQ_EN_LSB) & CLOCK_GPIO_BT_CLK_REQ_EN_MASK)
+#define CLOCK_GPIO_BT_CLK_OUT_EN_MSB 0
+#define CLOCK_GPIO_BT_CLK_OUT_EN_LSB 0
+#define CLOCK_GPIO_BT_CLK_OUT_EN_MASK 0x00000001
+#define CLOCK_GPIO_BT_CLK_OUT_EN_GET(x) (((x) & CLOCK_GPIO_BT_CLK_OUT_EN_MASK) >> CLOCK_GPIO_BT_CLK_OUT_EN_LSB)
+#define CLOCK_GPIO_BT_CLK_OUT_EN_SET(x) (((x) << CLOCK_GPIO_BT_CLK_OUT_EN_LSB) & CLOCK_GPIO_BT_CLK_OUT_EN_MASK)
+
+#define WLAN_DEBUG_CONTROL_ADDRESS 0x000000a8
+#define WLAN_DEBUG_CONTROL_OFFSET 0x000000a8
+#define WLAN_DEBUG_CONTROL_ENABLE_MSB 0
+#define WLAN_DEBUG_CONTROL_ENABLE_LSB 0
+#define WLAN_DEBUG_CONTROL_ENABLE_MASK 0x00000001
+#define WLAN_DEBUG_CONTROL_ENABLE_GET(x) (((x) & WLAN_DEBUG_CONTROL_ENABLE_MASK) >> WLAN_DEBUG_CONTROL_ENABLE_LSB)
+#define WLAN_DEBUG_CONTROL_ENABLE_SET(x) (((x) << WLAN_DEBUG_CONTROL_ENABLE_LSB) & WLAN_DEBUG_CONTROL_ENABLE_MASK)
+
+#define WLAN_DEBUG_INPUT_SEL_ADDRESS 0x000000ac
+#define WLAN_DEBUG_INPUT_SEL_OFFSET 0x000000ac
+#define WLAN_DEBUG_INPUT_SEL_SHIFT_MSB 5
+#define WLAN_DEBUG_INPUT_SEL_SHIFT_LSB 4
+#define WLAN_DEBUG_INPUT_SEL_SHIFT_MASK 0x00000030
+#define WLAN_DEBUG_INPUT_SEL_SHIFT_GET(x) (((x) & WLAN_DEBUG_INPUT_SEL_SHIFT_MASK) >> WLAN_DEBUG_INPUT_SEL_SHIFT_LSB)
+#define WLAN_DEBUG_INPUT_SEL_SHIFT_SET(x) (((x) << WLAN_DEBUG_INPUT_SEL_SHIFT_LSB) & WLAN_DEBUG_INPUT_SEL_SHIFT_MASK)
+#define WLAN_DEBUG_INPUT_SEL_SRC_MSB 3
+#define WLAN_DEBUG_INPUT_SEL_SRC_LSB 0
+#define WLAN_DEBUG_INPUT_SEL_SRC_MASK 0x0000000f
+#define WLAN_DEBUG_INPUT_SEL_SRC_GET(x) (((x) & WLAN_DEBUG_INPUT_SEL_SRC_MASK) >> WLAN_DEBUG_INPUT_SEL_SRC_LSB)
+#define WLAN_DEBUG_INPUT_SEL_SRC_SET(x) (((x) << WLAN_DEBUG_INPUT_SEL_SRC_LSB) & WLAN_DEBUG_INPUT_SEL_SRC_MASK)
+
+#define WLAN_DEBUG_OUT_ADDRESS 0x000000b0
+#define WLAN_DEBUG_OUT_OFFSET 0x000000b0
+#define WLAN_DEBUG_OUT_DATA_MSB 17
+#define WLAN_DEBUG_OUT_DATA_LSB 0
+#define WLAN_DEBUG_OUT_DATA_MASK 0x0003ffff
+#define WLAN_DEBUG_OUT_DATA_GET(x) (((x) & WLAN_DEBUG_OUT_DATA_MASK) >> WLAN_DEBUG_OUT_DATA_LSB)
+#define WLAN_DEBUG_OUT_DATA_SET(x) (((x) << WLAN_DEBUG_OUT_DATA_LSB) & WLAN_DEBUG_OUT_DATA_MASK)
+
+#define WLAN_RESET_TUPLE_STATUS_ADDRESS 0x000000b4
+#define WLAN_RESET_TUPLE_STATUS_OFFSET 0x000000b4
+#define WLAN_RESET_TUPLE_STATUS_TEST_RESET_TUPLE_MSB 11
+#define WLAN_RESET_TUPLE_STATUS_TEST_RESET_TUPLE_LSB 8
+#define WLAN_RESET_TUPLE_STATUS_TEST_RESET_TUPLE_MASK 0x00000f00
+#define WLAN_RESET_TUPLE_STATUS_TEST_RESET_TUPLE_GET(x) (((x) & WLAN_RESET_TUPLE_STATUS_TEST_RESET_TUPLE_MASK) >> WLAN_RESET_TUPLE_STATUS_TEST_RESET_TUPLE_LSB)
+#define WLAN_RESET_TUPLE_STATUS_TEST_RESET_TUPLE_SET(x) (((x) << WLAN_RESET_TUPLE_STATUS_TEST_RESET_TUPLE_LSB) & WLAN_RESET_TUPLE_STATUS_TEST_RESET_TUPLE_MASK)
+#define WLAN_RESET_TUPLE_STATUS_PIN_RESET_TUPLE_MSB 7
+#define WLAN_RESET_TUPLE_STATUS_PIN_RESET_TUPLE_LSB 0
+#define WLAN_RESET_TUPLE_STATUS_PIN_RESET_TUPLE_MASK 0x000000ff
+#define WLAN_RESET_TUPLE_STATUS_PIN_RESET_TUPLE_GET(x) (((x) & WLAN_RESET_TUPLE_STATUS_PIN_RESET_TUPLE_MASK) >> WLAN_RESET_TUPLE_STATUS_PIN_RESET_TUPLE_LSB)
+#define WLAN_RESET_TUPLE_STATUS_PIN_RESET_TUPLE_SET(x) (((x) << WLAN_RESET_TUPLE_STATUS_PIN_RESET_TUPLE_LSB) & WLAN_RESET_TUPLE_STATUS_PIN_RESET_TUPLE_MASK)
+
+#define ANTENNA_SLEEP_CONTROL_ADDRESS 0x000000b8
+#define ANTENNA_SLEEP_CONTROL_OFFSET 0x000000b8
+#define ANTENNA_SLEEP_CONTROL_OVERRIDE_MSB 14
+#define ANTENNA_SLEEP_CONTROL_OVERRIDE_LSB 10
+#define ANTENNA_SLEEP_CONTROL_OVERRIDE_MASK 0x00007c00
+#define ANTENNA_SLEEP_CONTROL_OVERRIDE_GET(x) (((x) & ANTENNA_SLEEP_CONTROL_OVERRIDE_MASK) >> ANTENNA_SLEEP_CONTROL_OVERRIDE_LSB)
+#define ANTENNA_SLEEP_CONTROL_OVERRIDE_SET(x) (((x) << ANTENNA_SLEEP_CONTROL_OVERRIDE_LSB) & ANTENNA_SLEEP_CONTROL_OVERRIDE_MASK)
+#define ANTENNA_SLEEP_CONTROL_VALUE_MSB 9
+#define ANTENNA_SLEEP_CONTROL_VALUE_LSB 5
+#define ANTENNA_SLEEP_CONTROL_VALUE_MASK 0x000003e0
+#define ANTENNA_SLEEP_CONTROL_VALUE_GET(x) (((x) & ANTENNA_SLEEP_CONTROL_VALUE_MASK) >> ANTENNA_SLEEP_CONTROL_VALUE_LSB)
+#define ANTENNA_SLEEP_CONTROL_VALUE_SET(x) (((x) << ANTENNA_SLEEP_CONTROL_VALUE_LSB) & ANTENNA_SLEEP_CONTROL_VALUE_MASK)
+#define ANTENNA_SLEEP_CONTROL_ENABLE_MSB 4
+#define ANTENNA_SLEEP_CONTROL_ENABLE_LSB 0
+#define ANTENNA_SLEEP_CONTROL_ENABLE_MASK 0x0000001f
+#define ANTENNA_SLEEP_CONTROL_ENABLE_GET(x) (((x) & ANTENNA_SLEEP_CONTROL_ENABLE_MASK) >> ANTENNA_SLEEP_CONTROL_ENABLE_LSB)
+#define ANTENNA_SLEEP_CONTROL_ENABLE_SET(x) (((x) << ANTENNA_SLEEP_CONTROL_ENABLE_LSB) & ANTENNA_SLEEP_CONTROL_ENABLE_MASK)
+
+
+#ifndef __ASSEMBLER__
+
+typedef struct gpio_athr_wlan_reg_reg_s {
+ volatile unsigned int wlan_gpio_out;
+ volatile unsigned int wlan_gpio_out_w1ts;
+ volatile unsigned int wlan_gpio_out_w1tc;
+ volatile unsigned int wlan_gpio_enable;
+ volatile unsigned int wlan_gpio_enable_w1ts;
+ volatile unsigned int wlan_gpio_enable_w1tc;
+ volatile unsigned int wlan_gpio_in;
+ volatile unsigned int wlan_gpio_status;
+ volatile unsigned int wlan_gpio_status_w1ts;
+ volatile unsigned int wlan_gpio_status_w1tc;
+ volatile unsigned int wlan_gpio_pin0;
+ volatile unsigned int wlan_gpio_pin1;
+ volatile unsigned int wlan_gpio_pin2;
+ volatile unsigned int wlan_gpio_pin3;
+ volatile unsigned int wlan_gpio_pin4;
+ volatile unsigned int wlan_gpio_pin5;
+ volatile unsigned int wlan_gpio_pin6;
+ volatile unsigned int wlan_gpio_pin7;
+ volatile unsigned int wlan_gpio_pin8;
+ volatile unsigned int wlan_gpio_pin9;
+ volatile unsigned int wlan_gpio_pin10;
+ volatile unsigned int wlan_gpio_pin11;
+ volatile unsigned int wlan_gpio_pin12;
+ volatile unsigned int wlan_gpio_pin13;
+ volatile unsigned int wlan_gpio_pin14;
+ volatile unsigned int wlan_gpio_pin15;
+ volatile unsigned int wlan_gpio_pin16;
+ volatile unsigned int wlan_gpio_pin17;
+ volatile unsigned int wlan_gpio_pin18;
+ volatile unsigned int wlan_gpio_pin19;
+ volatile unsigned int wlan_gpio_pin20;
+ volatile unsigned int wlan_gpio_pin21;
+ volatile unsigned int wlan_gpio_pin22;
+ volatile unsigned int wlan_gpio_pin23;
+ volatile unsigned int wlan_gpio_pin24;
+ volatile unsigned int wlan_gpio_pin25;
+ volatile unsigned int sdio;
+ volatile unsigned int func_bus;
+ volatile unsigned int wl_soc_apb;
+ volatile unsigned int wlan_sigma_delta;
+ volatile unsigned int wl_bootstrap;
+ volatile unsigned int clock_gpio;
+ volatile unsigned int wlan_debug_control;
+ volatile unsigned int wlan_debug_input_sel;
+ volatile unsigned int wlan_debug_out;
+ volatile unsigned int wlan_reset_tuple_status;
+ volatile unsigned int antenna_sleep_control;
+} gpio_athr_wlan_reg_reg_t;
+
+#endif /* __ASSEMBLER__ */
+
+#endif /* _GPIO_ATHR_WLAN_REG_H_ */
diff --git a/drivers/staging/ath6kl/include/common/AR6002/hw4.0/hw/gpio_reg.h b/drivers/staging/ath6kl/include/common/AR6002/hw4.0/hw/gpio_reg.h
new file mode 100644
index 000000000000..b3e7126e26a2
--- /dev/null
+++ b/drivers/staging/ath6kl/include/common/AR6002/hw4.0/hw/gpio_reg.h
@@ -0,0 +1,1094 @@
+// ------------------------------------------------------------------
+// Copyright (c) 2004-2010 Atheros Corporation. All rights reserved.
+//
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+//
+//
+// ------------------------------------------------------------------
+//===================================================================
+// Author(s): ="Atheros"
+//===================================================================
+
+
+#ifdef WLAN_HEADERS
+
+#include "gpio_athr_wlan_reg.h"
+
+
+#ifndef BT_HEADERS
+
+#define GPIO_OUT_ADDRESS WLAN_GPIO_OUT_ADDRESS
+#define GPIO_OUT_OFFSET WLAN_GPIO_OUT_OFFSET
+#define GPIO_OUT_DATA_MSB WLAN_GPIO_OUT_DATA_MSB
+#define GPIO_OUT_DATA_LSB WLAN_GPIO_OUT_DATA_LSB
+#define GPIO_OUT_DATA_MASK WLAN_GPIO_OUT_DATA_MASK
+#define GPIO_OUT_DATA_GET(x) WLAN_GPIO_OUT_DATA_GET(x)
+#define GPIO_OUT_DATA_SET(x) WLAN_GPIO_OUT_DATA_SET(x)
+#define GPIO_OUT_W1TS_ADDRESS WLAN_GPIO_OUT_W1TS_ADDRESS
+#define GPIO_OUT_W1TS_OFFSET WLAN_GPIO_OUT_W1TS_OFFSET
+#define GPIO_OUT_W1TS_DATA_MSB WLAN_GPIO_OUT_W1TS_DATA_MSB
+#define GPIO_OUT_W1TS_DATA_LSB WLAN_GPIO_OUT_W1TS_DATA_LSB
+#define GPIO_OUT_W1TS_DATA_MASK WLAN_GPIO_OUT_W1TS_DATA_MASK
+#define GPIO_OUT_W1TS_DATA_GET(x) WLAN_GPIO_OUT_W1TS_DATA_GET(x)
+#define GPIO_OUT_W1TS_DATA_SET(x) WLAN_GPIO_OUT_W1TS_DATA_SET(x)
+#define GPIO_OUT_W1TC_ADDRESS WLAN_GPIO_OUT_W1TC_ADDRESS
+#define GPIO_OUT_W1TC_OFFSET WLAN_GPIO_OUT_W1TC_OFFSET
+#define GPIO_OUT_W1TC_DATA_MSB WLAN_GPIO_OUT_W1TC_DATA_MSB
+#define GPIO_OUT_W1TC_DATA_LSB WLAN_GPIO_OUT_W1TC_DATA_LSB
+#define GPIO_OUT_W1TC_DATA_MASK WLAN_GPIO_OUT_W1TC_DATA_MASK
+#define GPIO_OUT_W1TC_DATA_GET(x) WLAN_GPIO_OUT_W1TC_DATA_GET(x)
+#define GPIO_OUT_W1TC_DATA_SET(x) WLAN_GPIO_OUT_W1TC_DATA_SET(x)
+#define GPIO_ENABLE_ADDRESS WLAN_GPIO_ENABLE_ADDRESS
+#define GPIO_ENABLE_OFFSET WLAN_GPIO_ENABLE_OFFSET
+#define GPIO_ENABLE_DATA_MSB WLAN_GPIO_ENABLE_DATA_MSB
+#define GPIO_ENABLE_DATA_LSB WLAN_GPIO_ENABLE_DATA_LSB
+#define GPIO_ENABLE_DATA_MASK WLAN_GPIO_ENABLE_DATA_MASK
+#define GPIO_ENABLE_DATA_GET(x) WLAN_GPIO_ENABLE_DATA_GET(x)
+#define GPIO_ENABLE_DATA_SET(x) WLAN_GPIO_ENABLE_DATA_SET(x)
+#define GPIO_ENABLE_W1TS_ADDRESS WLAN_GPIO_ENABLE_W1TS_ADDRESS
+#define GPIO_ENABLE_W1TS_OFFSET WLAN_GPIO_ENABLE_W1TS_OFFSET
+#define GPIO_ENABLE_W1TS_DATA_MSB WLAN_GPIO_ENABLE_W1TS_DATA_MSB
+#define GPIO_ENABLE_W1TS_DATA_LSB WLAN_GPIO_ENABLE_W1TS_DATA_LSB
+#define GPIO_ENABLE_W1TS_DATA_MASK WLAN_GPIO_ENABLE_W1TS_DATA_MASK
+#define GPIO_ENABLE_W1TS_DATA_GET(x) WLAN_GPIO_ENABLE_W1TS_DATA_GET(x)
+#define GPIO_ENABLE_W1TS_DATA_SET(x) WLAN_GPIO_ENABLE_W1TS_DATA_SET(x)
+#define GPIO_ENABLE_W1TC_ADDRESS WLAN_GPIO_ENABLE_W1TC_ADDRESS
+#define GPIO_ENABLE_W1TC_OFFSET WLAN_GPIO_ENABLE_W1TC_OFFSET
+#define GPIO_ENABLE_W1TC_DATA_MSB WLAN_GPIO_ENABLE_W1TC_DATA_MSB
+#define GPIO_ENABLE_W1TC_DATA_LSB WLAN_GPIO_ENABLE_W1TC_DATA_LSB
+#define GPIO_ENABLE_W1TC_DATA_MASK WLAN_GPIO_ENABLE_W1TC_DATA_MASK
+#define GPIO_ENABLE_W1TC_DATA_GET(x) WLAN_GPIO_ENABLE_W1TC_DATA_GET(x)
+#define GPIO_ENABLE_W1TC_DATA_SET(x) WLAN_GPIO_ENABLE_W1TC_DATA_SET(x)
+#define GPIO_IN_ADDRESS WLAN_GPIO_IN_ADDRESS
+#define GPIO_IN_OFFSET WLAN_GPIO_IN_OFFSET
+#define GPIO_IN_DATA_MSB WLAN_GPIO_IN_DATA_MSB
+#define GPIO_IN_DATA_LSB WLAN_GPIO_IN_DATA_LSB
+#define GPIO_IN_DATA_MASK WLAN_GPIO_IN_DATA_MASK
+#define GPIO_IN_DATA_GET(x) WLAN_GPIO_IN_DATA_GET(x)
+#define GPIO_IN_DATA_SET(x) WLAN_GPIO_IN_DATA_SET(x)
+#define GPIO_STATUS_ADDRESS WLAN_GPIO_STATUS_ADDRESS
+#define GPIO_STATUS_OFFSET WLAN_GPIO_STATUS_OFFSET
+#define GPIO_STATUS_INTERRUPT_MSB WLAN_GPIO_STATUS_INTERRUPT_MSB
+#define GPIO_STATUS_INTERRUPT_LSB WLAN_GPIO_STATUS_INTERRUPT_LSB
+#define GPIO_STATUS_INTERRUPT_MASK WLAN_GPIO_STATUS_INTERRUPT_MASK
+#define GPIO_STATUS_INTERRUPT_GET(x) WLAN_GPIO_STATUS_INTERRUPT_GET(x)
+#define GPIO_STATUS_INTERRUPT_SET(x) WLAN_GPIO_STATUS_INTERRUPT_SET(x)
+#define GPIO_STATUS_W1TS_ADDRESS WLAN_GPIO_STATUS_W1TS_ADDRESS
+#define GPIO_STATUS_W1TS_OFFSET WLAN_GPIO_STATUS_W1TS_OFFSET
+#define GPIO_STATUS_W1TS_INTERRUPT_MSB WLAN_GPIO_STATUS_W1TS_INTERRUPT_MSB
+#define GPIO_STATUS_W1TS_INTERRUPT_LSB WLAN_GPIO_STATUS_W1TS_INTERRUPT_LSB
+#define GPIO_STATUS_W1TS_INTERRUPT_MASK WLAN_GPIO_STATUS_W1TS_INTERRUPT_MASK
+#define GPIO_STATUS_W1TS_INTERRUPT_GET(x) WLAN_GPIO_STATUS_W1TS_INTERRUPT_GET(x)
+#define GPIO_STATUS_W1TS_INTERRUPT_SET(x) WLAN_GPIO_STATUS_W1TS_INTERRUPT_SET(x)
+#define GPIO_STATUS_W1TC_ADDRESS WLAN_GPIO_STATUS_W1TC_ADDRESS
+#define GPIO_STATUS_W1TC_OFFSET WLAN_GPIO_STATUS_W1TC_OFFSET
+#define GPIO_STATUS_W1TC_INTERRUPT_MSB WLAN_GPIO_STATUS_W1TC_INTERRUPT_MSB
+#define GPIO_STATUS_W1TC_INTERRUPT_LSB WLAN_GPIO_STATUS_W1TC_INTERRUPT_LSB
+#define GPIO_STATUS_W1TC_INTERRUPT_MASK WLAN_GPIO_STATUS_W1TC_INTERRUPT_MASK
+#define GPIO_STATUS_W1TC_INTERRUPT_GET(x) WLAN_GPIO_STATUS_W1TC_INTERRUPT_GET(x)
+#define GPIO_STATUS_W1TC_INTERRUPT_SET(x) WLAN_GPIO_STATUS_W1TC_INTERRUPT_SET(x)
+#define GPIO_PIN0_ADDRESS WLAN_GPIO_PIN0_ADDRESS
+#define GPIO_PIN0_OFFSET WLAN_GPIO_PIN0_OFFSET
+#define GPIO_PIN0_CONFIG_MSB WLAN_GPIO_PIN0_CONFIG_MSB
+#define GPIO_PIN0_CONFIG_LSB WLAN_GPIO_PIN0_CONFIG_LSB
+#define GPIO_PIN0_CONFIG_MASK WLAN_GPIO_PIN0_CONFIG_MASK
+#define GPIO_PIN0_CONFIG_GET(x) WLAN_GPIO_PIN0_CONFIG_GET(x)
+#define GPIO_PIN0_CONFIG_SET(x) WLAN_GPIO_PIN0_CONFIG_SET(x)
+#define GPIO_PIN0_WAKEUP_ENABLE_MSB WLAN_GPIO_PIN0_WAKEUP_ENABLE_MSB
+#define GPIO_PIN0_WAKEUP_ENABLE_LSB WLAN_GPIO_PIN0_WAKEUP_ENABLE_LSB
+#define GPIO_PIN0_WAKEUP_ENABLE_MASK WLAN_GPIO_PIN0_WAKEUP_ENABLE_MASK
+#define GPIO_PIN0_WAKEUP_ENABLE_GET(x) WLAN_GPIO_PIN0_WAKEUP_ENABLE_GET(x)
+#define GPIO_PIN0_WAKEUP_ENABLE_SET(x) WLAN_GPIO_PIN0_WAKEUP_ENABLE_SET(x)
+#define GPIO_PIN0_INT_TYPE_MSB WLAN_GPIO_PIN0_INT_TYPE_MSB
+#define GPIO_PIN0_INT_TYPE_LSB WLAN_GPIO_PIN0_INT_TYPE_LSB
+#define GPIO_PIN0_INT_TYPE_MASK WLAN_GPIO_PIN0_INT_TYPE_MASK
+#define GPIO_PIN0_INT_TYPE_GET(x) WLAN_GPIO_PIN0_INT_TYPE_GET(x)
+#define GPIO_PIN0_INT_TYPE_SET(x) WLAN_GPIO_PIN0_INT_TYPE_SET(x)
+#define GPIO_PIN0_PAD_PULL_MSB WLAN_GPIO_PIN0_PAD_PULL_MSB
+#define GPIO_PIN0_PAD_PULL_LSB WLAN_GPIO_PIN0_PAD_PULL_LSB
+#define GPIO_PIN0_PAD_PULL_MASK WLAN_GPIO_PIN0_PAD_PULL_MASK
+#define GPIO_PIN0_PAD_PULL_GET(x) WLAN_GPIO_PIN0_PAD_PULL_GET(x)
+#define GPIO_PIN0_PAD_PULL_SET(x) WLAN_GPIO_PIN0_PAD_PULL_SET(x)
+#define GPIO_PIN0_PAD_STRENGTH_MSB WLAN_GPIO_PIN0_PAD_STRENGTH_MSB
+#define GPIO_PIN0_PAD_STRENGTH_LSB WLAN_GPIO_PIN0_PAD_STRENGTH_LSB
+#define GPIO_PIN0_PAD_STRENGTH_MASK WLAN_GPIO_PIN0_PAD_STRENGTH_MASK
+#define GPIO_PIN0_PAD_STRENGTH_GET(x) WLAN_GPIO_PIN0_PAD_STRENGTH_GET(x)
+#define GPIO_PIN0_PAD_STRENGTH_SET(x) WLAN_GPIO_PIN0_PAD_STRENGTH_SET(x)
+#define GPIO_PIN0_PAD_DRIVER_MSB WLAN_GPIO_PIN0_PAD_DRIVER_MSB
+#define GPIO_PIN0_PAD_DRIVER_LSB WLAN_GPIO_PIN0_PAD_DRIVER_LSB
+#define GPIO_PIN0_PAD_DRIVER_MASK WLAN_GPIO_PIN0_PAD_DRIVER_MASK
+#define GPIO_PIN0_PAD_DRIVER_GET(x) WLAN_GPIO_PIN0_PAD_DRIVER_GET(x)
+#define GPIO_PIN0_PAD_DRIVER_SET(x) WLAN_GPIO_PIN0_PAD_DRIVER_SET(x)
+#define GPIO_PIN0_SOURCE_MSB WLAN_GPIO_PIN0_SOURCE_MSB
+#define GPIO_PIN0_SOURCE_LSB WLAN_GPIO_PIN0_SOURCE_LSB
+#define GPIO_PIN0_SOURCE_MASK WLAN_GPIO_PIN0_SOURCE_MASK
+#define GPIO_PIN0_SOURCE_GET(x) WLAN_GPIO_PIN0_SOURCE_GET(x)
+#define GPIO_PIN0_SOURCE_SET(x) WLAN_GPIO_PIN0_SOURCE_SET(x)
+#define GPIO_PIN1_ADDRESS WLAN_GPIO_PIN1_ADDRESS
+#define GPIO_PIN1_OFFSET WLAN_GPIO_PIN1_OFFSET
+#define GPIO_PIN1_CONFIG_MSB WLAN_GPIO_PIN1_CONFIG_MSB
+#define GPIO_PIN1_CONFIG_LSB WLAN_GPIO_PIN1_CONFIG_LSB
+#define GPIO_PIN1_CONFIG_MASK WLAN_GPIO_PIN1_CONFIG_MASK
+#define GPIO_PIN1_CONFIG_GET(x) WLAN_GPIO_PIN1_CONFIG_GET(x)
+#define GPIO_PIN1_CONFIG_SET(x) WLAN_GPIO_PIN1_CONFIG_SET(x)
+#define GPIO_PIN1_WAKEUP_ENABLE_MSB WLAN_GPIO_PIN1_WAKEUP_ENABLE_MSB
+#define GPIO_PIN1_WAKEUP_ENABLE_LSB WLAN_GPIO_PIN1_WAKEUP_ENABLE_LSB
+#define GPIO_PIN1_WAKEUP_ENABLE_MASK WLAN_GPIO_PIN1_WAKEUP_ENABLE_MASK
+#define GPIO_PIN1_WAKEUP_ENABLE_GET(x) WLAN_GPIO_PIN1_WAKEUP_ENABLE_GET(x)
+#define GPIO_PIN1_WAKEUP_ENABLE_SET(x) WLAN_GPIO_PIN1_WAKEUP_ENABLE_SET(x)
+#define GPIO_PIN1_INT_TYPE_MSB WLAN_GPIO_PIN1_INT_TYPE_MSB
+#define GPIO_PIN1_INT_TYPE_LSB WLAN_GPIO_PIN1_INT_TYPE_LSB
+#define GPIO_PIN1_INT_TYPE_MASK WLAN_GPIO_PIN1_INT_TYPE_MASK
+#define GPIO_PIN1_INT_TYPE_GET(x) WLAN_GPIO_PIN1_INT_TYPE_GET(x)
+#define GPIO_PIN1_INT_TYPE_SET(x) WLAN_GPIO_PIN1_INT_TYPE_SET(x)
+#define GPIO_PIN1_PAD_PULL_MSB WLAN_GPIO_PIN1_PAD_PULL_MSB
+#define GPIO_PIN1_PAD_PULL_LSB WLAN_GPIO_PIN1_PAD_PULL_LSB
+#define GPIO_PIN1_PAD_PULL_MASK WLAN_GPIO_PIN1_PAD_PULL_MASK
+#define GPIO_PIN1_PAD_PULL_GET(x) WLAN_GPIO_PIN1_PAD_PULL_GET(x)
+#define GPIO_PIN1_PAD_PULL_SET(x) WLAN_GPIO_PIN1_PAD_PULL_SET(x)
+#define GPIO_PIN1_PAD_STRENGTH_MSB WLAN_GPIO_PIN1_PAD_STRENGTH_MSB
+#define GPIO_PIN1_PAD_STRENGTH_LSB WLAN_GPIO_PIN1_PAD_STRENGTH_LSB
+#define GPIO_PIN1_PAD_STRENGTH_MASK WLAN_GPIO_PIN1_PAD_STRENGTH_MASK
+#define GPIO_PIN1_PAD_STRENGTH_GET(x) WLAN_GPIO_PIN1_PAD_STRENGTH_GET(x)
+#define GPIO_PIN1_PAD_STRENGTH_SET(x) WLAN_GPIO_PIN1_PAD_STRENGTH_SET(x)
+#define GPIO_PIN1_PAD_DRIVER_MSB WLAN_GPIO_PIN1_PAD_DRIVER_MSB
+#define GPIO_PIN1_PAD_DRIVER_LSB WLAN_GPIO_PIN1_PAD_DRIVER_LSB
+#define GPIO_PIN1_PAD_DRIVER_MASK WLAN_GPIO_PIN1_PAD_DRIVER_MASK
+#define GPIO_PIN1_PAD_DRIVER_GET(x) WLAN_GPIO_PIN1_PAD_DRIVER_GET(x)
+#define GPIO_PIN1_PAD_DRIVER_SET(x) WLAN_GPIO_PIN1_PAD_DRIVER_SET(x)
+#define GPIO_PIN1_SOURCE_MSB WLAN_GPIO_PIN1_SOURCE_MSB
+#define GPIO_PIN1_SOURCE_LSB WLAN_GPIO_PIN1_SOURCE_LSB
+#define GPIO_PIN1_SOURCE_MASK WLAN_GPIO_PIN1_SOURCE_MASK
+#define GPIO_PIN1_SOURCE_GET(x) WLAN_GPIO_PIN1_SOURCE_GET(x)
+#define GPIO_PIN1_SOURCE_SET(x) WLAN_GPIO_PIN1_SOURCE_SET(x)
+#define GPIO_PIN2_ADDRESS WLAN_GPIO_PIN2_ADDRESS
+#define GPIO_PIN2_OFFSET WLAN_GPIO_PIN2_OFFSET
+#define GPIO_PIN2_CONFIG_MSB WLAN_GPIO_PIN2_CONFIG_MSB
+#define GPIO_PIN2_CONFIG_LSB WLAN_GPIO_PIN2_CONFIG_LSB
+#define GPIO_PIN2_CONFIG_MASK WLAN_GPIO_PIN2_CONFIG_MASK
+#define GPIO_PIN2_CONFIG_GET(x) WLAN_GPIO_PIN2_CONFIG_GET(x)
+#define GPIO_PIN2_CONFIG_SET(x) WLAN_GPIO_PIN2_CONFIG_SET(x)
+#define GPIO_PIN2_WAKEUP_ENABLE_MSB WLAN_GPIO_PIN2_WAKEUP_ENABLE_MSB
+#define GPIO_PIN2_WAKEUP_ENABLE_LSB WLAN_GPIO_PIN2_WAKEUP_ENABLE_LSB
+#define GPIO_PIN2_WAKEUP_ENABLE_MASK WLAN_GPIO_PIN2_WAKEUP_ENABLE_MASK
+#define GPIO_PIN2_WAKEUP_ENABLE_GET(x) WLAN_GPIO_PIN2_WAKEUP_ENABLE_GET(x)
+#define GPIO_PIN2_WAKEUP_ENABLE_SET(x) WLAN_GPIO_PIN2_WAKEUP_ENABLE_SET(x)
+#define GPIO_PIN2_INT_TYPE_MSB WLAN_GPIO_PIN2_INT_TYPE_MSB
+#define GPIO_PIN2_INT_TYPE_LSB WLAN_GPIO_PIN2_INT_TYPE_LSB
+#define GPIO_PIN2_INT_TYPE_MASK WLAN_GPIO_PIN2_INT_TYPE_MASK
+#define GPIO_PIN2_INT_TYPE_GET(x) WLAN_GPIO_PIN2_INT_TYPE_GET(x)
+#define GPIO_PIN2_INT_TYPE_SET(x) WLAN_GPIO_PIN2_INT_TYPE_SET(x)
+#define GPIO_PIN2_PAD_PULL_MSB WLAN_GPIO_PIN2_PAD_PULL_MSB
+#define GPIO_PIN2_PAD_PULL_LSB WLAN_GPIO_PIN2_PAD_PULL_LSB
+#define GPIO_PIN2_PAD_PULL_MASK WLAN_GPIO_PIN2_PAD_PULL_MASK
+#define GPIO_PIN2_PAD_PULL_GET(x) WLAN_GPIO_PIN2_PAD_PULL_GET(x)
+#define GPIO_PIN2_PAD_PULL_SET(x) WLAN_GPIO_PIN2_PAD_PULL_SET(x)
+#define GPIO_PIN2_PAD_STRENGTH_MSB WLAN_GPIO_PIN2_PAD_STRENGTH_MSB
+#define GPIO_PIN2_PAD_STRENGTH_LSB WLAN_GPIO_PIN2_PAD_STRENGTH_LSB
+#define GPIO_PIN2_PAD_STRENGTH_MASK WLAN_GPIO_PIN2_PAD_STRENGTH_MASK
+#define GPIO_PIN2_PAD_STRENGTH_GET(x) WLAN_GPIO_PIN2_PAD_STRENGTH_GET(x)
+#define GPIO_PIN2_PAD_STRENGTH_SET(x) WLAN_GPIO_PIN2_PAD_STRENGTH_SET(x)
+#define GPIO_PIN2_PAD_DRIVER_MSB WLAN_GPIO_PIN2_PAD_DRIVER_MSB
+#define GPIO_PIN2_PAD_DRIVER_LSB WLAN_GPIO_PIN2_PAD_DRIVER_LSB
+#define GPIO_PIN2_PAD_DRIVER_MASK WLAN_GPIO_PIN2_PAD_DRIVER_MASK
+#define GPIO_PIN2_PAD_DRIVER_GET(x) WLAN_GPIO_PIN2_PAD_DRIVER_GET(x)
+#define GPIO_PIN2_PAD_DRIVER_SET(x) WLAN_GPIO_PIN2_PAD_DRIVER_SET(x)
+#define GPIO_PIN2_SOURCE_MSB WLAN_GPIO_PIN2_SOURCE_MSB
+#define GPIO_PIN2_SOURCE_LSB WLAN_GPIO_PIN2_SOURCE_LSB
+#define GPIO_PIN2_SOURCE_MASK WLAN_GPIO_PIN2_SOURCE_MASK
+#define GPIO_PIN2_SOURCE_GET(x) WLAN_GPIO_PIN2_SOURCE_GET(x)
+#define GPIO_PIN2_SOURCE_SET(x) WLAN_GPIO_PIN2_SOURCE_SET(x)
+#define GPIO_PIN3_ADDRESS WLAN_GPIO_PIN3_ADDRESS
+#define GPIO_PIN3_OFFSET WLAN_GPIO_PIN3_OFFSET
+#define GPIO_PIN3_CONFIG_MSB WLAN_GPIO_PIN3_CONFIG_MSB
+#define GPIO_PIN3_CONFIG_LSB WLAN_GPIO_PIN3_CONFIG_LSB
+#define GPIO_PIN3_CONFIG_MASK WLAN_GPIO_PIN3_CONFIG_MASK
+#define GPIO_PIN3_CONFIG_GET(x) WLAN_GPIO_PIN3_CONFIG_GET(x)
+#define GPIO_PIN3_CONFIG_SET(x) WLAN_GPIO_PIN3_CONFIG_SET(x)
+#define GPIO_PIN3_WAKEUP_ENABLE_MSB WLAN_GPIO_PIN3_WAKEUP_ENABLE_MSB
+#define GPIO_PIN3_WAKEUP_ENABLE_LSB WLAN_GPIO_PIN3_WAKEUP_ENABLE_LSB
+#define GPIO_PIN3_WAKEUP_ENABLE_MASK WLAN_GPIO_PIN3_WAKEUP_ENABLE_MASK
+#define GPIO_PIN3_WAKEUP_ENABLE_GET(x) WLAN_GPIO_PIN3_WAKEUP_ENABLE_GET(x)
+#define GPIO_PIN3_WAKEUP_ENABLE_SET(x) WLAN_GPIO_PIN3_WAKEUP_ENABLE_SET(x)
+#define GPIO_PIN3_INT_TYPE_MSB WLAN_GPIO_PIN3_INT_TYPE_MSB
+#define GPIO_PIN3_INT_TYPE_LSB WLAN_GPIO_PIN3_INT_TYPE_LSB
+#define GPIO_PIN3_INT_TYPE_MASK WLAN_GPIO_PIN3_INT_TYPE_MASK
+#define GPIO_PIN3_INT_TYPE_GET(x) WLAN_GPIO_PIN3_INT_TYPE_GET(x)
+#define GPIO_PIN3_INT_TYPE_SET(x) WLAN_GPIO_PIN3_INT_TYPE_SET(x)
+#define GPIO_PIN3_PAD_PULL_MSB WLAN_GPIO_PIN3_PAD_PULL_MSB
+#define GPIO_PIN3_PAD_PULL_LSB WLAN_GPIO_PIN3_PAD_PULL_LSB
+#define GPIO_PIN3_PAD_PULL_MASK WLAN_GPIO_PIN3_PAD_PULL_MASK
+#define GPIO_PIN3_PAD_PULL_GET(x) WLAN_GPIO_PIN3_PAD_PULL_GET(x)
+#define GPIO_PIN3_PAD_PULL_SET(x) WLAN_GPIO_PIN3_PAD_PULL_SET(x)
+#define GPIO_PIN3_PAD_STRENGTH_MSB WLAN_GPIO_PIN3_PAD_STRENGTH_MSB
+#define GPIO_PIN3_PAD_STRENGTH_LSB WLAN_GPIO_PIN3_PAD_STRENGTH_LSB
+#define GPIO_PIN3_PAD_STRENGTH_MASK WLAN_GPIO_PIN3_PAD_STRENGTH_MASK
+#define GPIO_PIN3_PAD_STRENGTH_GET(x) WLAN_GPIO_PIN3_PAD_STRENGTH_GET(x)
+#define GPIO_PIN3_PAD_STRENGTH_SET(x) WLAN_GPIO_PIN3_PAD_STRENGTH_SET(x)
+#define GPIO_PIN3_PAD_DRIVER_MSB WLAN_GPIO_PIN3_PAD_DRIVER_MSB
+#define GPIO_PIN3_PAD_DRIVER_LSB WLAN_GPIO_PIN3_PAD_DRIVER_LSB
+#define GPIO_PIN3_PAD_DRIVER_MASK WLAN_GPIO_PIN3_PAD_DRIVER_MASK
+#define GPIO_PIN3_PAD_DRIVER_GET(x) WLAN_GPIO_PIN3_PAD_DRIVER_GET(x)
+#define GPIO_PIN3_PAD_DRIVER_SET(x) WLAN_GPIO_PIN3_PAD_DRIVER_SET(x)
+#define GPIO_PIN3_SOURCE_MSB WLAN_GPIO_PIN3_SOURCE_MSB
+#define GPIO_PIN3_SOURCE_LSB WLAN_GPIO_PIN3_SOURCE_LSB
+#define GPIO_PIN3_SOURCE_MASK WLAN_GPIO_PIN3_SOURCE_MASK
+#define GPIO_PIN3_SOURCE_GET(x) WLAN_GPIO_PIN3_SOURCE_GET(x)
+#define GPIO_PIN3_SOURCE_SET(x) WLAN_GPIO_PIN3_SOURCE_SET(x)
+#define GPIO_PIN4_ADDRESS WLAN_GPIO_PIN4_ADDRESS
+#define GPIO_PIN4_OFFSET WLAN_GPIO_PIN4_OFFSET
+#define GPIO_PIN4_CONFIG_MSB WLAN_GPIO_PIN4_CONFIG_MSB
+#define GPIO_PIN4_CONFIG_LSB WLAN_GPIO_PIN4_CONFIG_LSB
+#define GPIO_PIN4_CONFIG_MASK WLAN_GPIO_PIN4_CONFIG_MASK
+#define GPIO_PIN4_CONFIG_GET(x) WLAN_GPIO_PIN4_CONFIG_GET(x)
+#define GPIO_PIN4_CONFIG_SET(x) WLAN_GPIO_PIN4_CONFIG_SET(x)
+#define GPIO_PIN4_WAKEUP_ENABLE_MSB WLAN_GPIO_PIN4_WAKEUP_ENABLE_MSB
+#define GPIO_PIN4_WAKEUP_ENABLE_LSB WLAN_GPIO_PIN4_WAKEUP_ENABLE_LSB
+#define GPIO_PIN4_WAKEUP_ENABLE_MASK WLAN_GPIO_PIN4_WAKEUP_ENABLE_MASK
+#define GPIO_PIN4_WAKEUP_ENABLE_GET(x) WLAN_GPIO_PIN4_WAKEUP_ENABLE_GET(x)
+#define GPIO_PIN4_WAKEUP_ENABLE_SET(x) WLAN_GPIO_PIN4_WAKEUP_ENABLE_SET(x)
+#define GPIO_PIN4_INT_TYPE_MSB WLAN_GPIO_PIN4_INT_TYPE_MSB
+#define GPIO_PIN4_INT_TYPE_LSB WLAN_GPIO_PIN4_INT_TYPE_LSB
+#define GPIO_PIN4_INT_TYPE_MASK WLAN_GPIO_PIN4_INT_TYPE_MASK
+#define GPIO_PIN4_INT_TYPE_GET(x) WLAN_GPIO_PIN4_INT_TYPE_GET(x)
+#define GPIO_PIN4_INT_TYPE_SET(x) WLAN_GPIO_PIN4_INT_TYPE_SET(x)
+#define GPIO_PIN4_PAD_PULL_MSB WLAN_GPIO_PIN4_PAD_PULL_MSB
+#define GPIO_PIN4_PAD_PULL_LSB WLAN_GPIO_PIN4_PAD_PULL_LSB
+#define GPIO_PIN4_PAD_PULL_MASK WLAN_GPIO_PIN4_PAD_PULL_MASK
+#define GPIO_PIN4_PAD_PULL_GET(x) WLAN_GPIO_PIN4_PAD_PULL_GET(x)
+#define GPIO_PIN4_PAD_PULL_SET(x) WLAN_GPIO_PIN4_PAD_PULL_SET(x)
+#define GPIO_PIN4_PAD_STRENGTH_MSB WLAN_GPIO_PIN4_PAD_STRENGTH_MSB
+#define GPIO_PIN4_PAD_STRENGTH_LSB WLAN_GPIO_PIN4_PAD_STRENGTH_LSB
+#define GPIO_PIN4_PAD_STRENGTH_MASK WLAN_GPIO_PIN4_PAD_STRENGTH_MASK
+#define GPIO_PIN4_PAD_STRENGTH_GET(x) WLAN_GPIO_PIN4_PAD_STRENGTH_GET(x)
+#define GPIO_PIN4_PAD_STRENGTH_SET(x) WLAN_GPIO_PIN4_PAD_STRENGTH_SET(x)
+#define GPIO_PIN4_PAD_DRIVER_MSB WLAN_GPIO_PIN4_PAD_DRIVER_MSB
+#define GPIO_PIN4_PAD_DRIVER_LSB WLAN_GPIO_PIN4_PAD_DRIVER_LSB
+#define GPIO_PIN4_PAD_DRIVER_MASK WLAN_GPIO_PIN4_PAD_DRIVER_MASK
+#define GPIO_PIN4_PAD_DRIVER_GET(x) WLAN_GPIO_PIN4_PAD_DRIVER_GET(x)
+#define GPIO_PIN4_PAD_DRIVER_SET(x) WLAN_GPIO_PIN4_PAD_DRIVER_SET(x)
+#define GPIO_PIN4_SOURCE_MSB WLAN_GPIO_PIN4_SOURCE_MSB
+#define GPIO_PIN4_SOURCE_LSB WLAN_GPIO_PIN4_SOURCE_LSB
+#define GPIO_PIN4_SOURCE_MASK WLAN_GPIO_PIN4_SOURCE_MASK
+#define GPIO_PIN4_SOURCE_GET(x) WLAN_GPIO_PIN4_SOURCE_GET(x)
+#define GPIO_PIN4_SOURCE_SET(x) WLAN_GPIO_PIN4_SOURCE_SET(x)
+#define GPIO_PIN5_ADDRESS WLAN_GPIO_PIN5_ADDRESS
+#define GPIO_PIN5_OFFSET WLAN_GPIO_PIN5_OFFSET
+#define GPIO_PIN5_CONFIG_MSB WLAN_GPIO_PIN5_CONFIG_MSB
+#define GPIO_PIN5_CONFIG_LSB WLAN_GPIO_PIN5_CONFIG_LSB
+#define GPIO_PIN5_CONFIG_MASK WLAN_GPIO_PIN5_CONFIG_MASK
+#define GPIO_PIN5_CONFIG_GET(x) WLAN_GPIO_PIN5_CONFIG_GET(x)
+#define GPIO_PIN5_CONFIG_SET(x) WLAN_GPIO_PIN5_CONFIG_SET(x)
+#define GPIO_PIN5_WAKEUP_ENABLE_MSB WLAN_GPIO_PIN5_WAKEUP_ENABLE_MSB
+#define GPIO_PIN5_WAKEUP_ENABLE_LSB WLAN_GPIO_PIN5_WAKEUP_ENABLE_LSB
+#define GPIO_PIN5_WAKEUP_ENABLE_MASK WLAN_GPIO_PIN5_WAKEUP_ENABLE_MASK
+#define GPIO_PIN5_WAKEUP_ENABLE_GET(x) WLAN_GPIO_PIN5_WAKEUP_ENABLE_GET(x)
+#define GPIO_PIN5_WAKEUP_ENABLE_SET(x) WLAN_GPIO_PIN5_WAKEUP_ENABLE_SET(x)
+#define GPIO_PIN5_INT_TYPE_MSB WLAN_GPIO_PIN5_INT_TYPE_MSB
+#define GPIO_PIN5_INT_TYPE_LSB WLAN_GPIO_PIN5_INT_TYPE_LSB
+#define GPIO_PIN5_INT_TYPE_MASK WLAN_GPIO_PIN5_INT_TYPE_MASK
+#define GPIO_PIN5_INT_TYPE_GET(x) WLAN_GPIO_PIN5_INT_TYPE_GET(x)
+#define GPIO_PIN5_INT_TYPE_SET(x) WLAN_GPIO_PIN5_INT_TYPE_SET(x)
+#define GPIO_PIN5_PAD_PULL_MSB WLAN_GPIO_PIN5_PAD_PULL_MSB
+#define GPIO_PIN5_PAD_PULL_LSB WLAN_GPIO_PIN5_PAD_PULL_LSB
+#define GPIO_PIN5_PAD_PULL_MASK WLAN_GPIO_PIN5_PAD_PULL_MASK
+#define GPIO_PIN5_PAD_PULL_GET(x) WLAN_GPIO_PIN5_PAD_PULL_GET(x)
+#define GPIO_PIN5_PAD_PULL_SET(x) WLAN_GPIO_PIN5_PAD_PULL_SET(x)
+#define GPIO_PIN5_PAD_STRENGTH_MSB WLAN_GPIO_PIN5_PAD_STRENGTH_MSB
+#define GPIO_PIN5_PAD_STRENGTH_LSB WLAN_GPIO_PIN5_PAD_STRENGTH_LSB
+#define GPIO_PIN5_PAD_STRENGTH_MASK WLAN_GPIO_PIN5_PAD_STRENGTH_MASK
+#define GPIO_PIN5_PAD_STRENGTH_GET(x) WLAN_GPIO_PIN5_PAD_STRENGTH_GET(x)
+#define GPIO_PIN5_PAD_STRENGTH_SET(x) WLAN_GPIO_PIN5_PAD_STRENGTH_SET(x)
+#define GPIO_PIN5_PAD_DRIVER_MSB WLAN_GPIO_PIN5_PAD_DRIVER_MSB
+#define GPIO_PIN5_PAD_DRIVER_LSB WLAN_GPIO_PIN5_PAD_DRIVER_LSB
+#define GPIO_PIN5_PAD_DRIVER_MASK WLAN_GPIO_PIN5_PAD_DRIVER_MASK
+#define GPIO_PIN5_PAD_DRIVER_GET(x) WLAN_GPIO_PIN5_PAD_DRIVER_GET(x)
+#define GPIO_PIN5_PAD_DRIVER_SET(x) WLAN_GPIO_PIN5_PAD_DRIVER_SET(x)
+#define GPIO_PIN5_SOURCE_MSB WLAN_GPIO_PIN5_SOURCE_MSB
+#define GPIO_PIN5_SOURCE_LSB WLAN_GPIO_PIN5_SOURCE_LSB
+#define GPIO_PIN5_SOURCE_MASK WLAN_GPIO_PIN5_SOURCE_MASK
+#define GPIO_PIN5_SOURCE_GET(x) WLAN_GPIO_PIN5_SOURCE_GET(x)
+#define GPIO_PIN5_SOURCE_SET(x) WLAN_GPIO_PIN5_SOURCE_SET(x)
+#define GPIO_PIN6_ADDRESS WLAN_GPIO_PIN6_ADDRESS
+#define GPIO_PIN6_OFFSET WLAN_GPIO_PIN6_OFFSET
+#define GPIO_PIN6_CONFIG_MSB WLAN_GPIO_PIN6_CONFIG_MSB
+#define GPIO_PIN6_CONFIG_LSB WLAN_GPIO_PIN6_CONFIG_LSB
+#define GPIO_PIN6_CONFIG_MASK WLAN_GPIO_PIN6_CONFIG_MASK
+#define GPIO_PIN6_CONFIG_GET(x) WLAN_GPIO_PIN6_CONFIG_GET(x)
+#define GPIO_PIN6_CONFIG_SET(x) WLAN_GPIO_PIN6_CONFIG_SET(x)
+#define GPIO_PIN6_WAKEUP_ENABLE_MSB WLAN_GPIO_PIN6_WAKEUP_ENABLE_MSB
+#define GPIO_PIN6_WAKEUP_ENABLE_LSB WLAN_GPIO_PIN6_WAKEUP_ENABLE_LSB
+#define GPIO_PIN6_WAKEUP_ENABLE_MASK WLAN_GPIO_PIN6_WAKEUP_ENABLE_MASK
+#define GPIO_PIN6_WAKEUP_ENABLE_GET(x) WLAN_GPIO_PIN6_WAKEUP_ENABLE_GET(x)
+#define GPIO_PIN6_WAKEUP_ENABLE_SET(x) WLAN_GPIO_PIN6_WAKEUP_ENABLE_SET(x)
+#define GPIO_PIN6_INT_TYPE_MSB WLAN_GPIO_PIN6_INT_TYPE_MSB
+#define GPIO_PIN6_INT_TYPE_LSB WLAN_GPIO_PIN6_INT_TYPE_LSB
+#define GPIO_PIN6_INT_TYPE_MASK WLAN_GPIO_PIN6_INT_TYPE_MASK
+#define GPIO_PIN6_INT_TYPE_GET(x) WLAN_GPIO_PIN6_INT_TYPE_GET(x)
+#define GPIO_PIN6_INT_TYPE_SET(x) WLAN_GPIO_PIN6_INT_TYPE_SET(x)
+#define GPIO_PIN6_PAD_PULL_MSB WLAN_GPIO_PIN6_PAD_PULL_MSB
+#define GPIO_PIN6_PAD_PULL_LSB WLAN_GPIO_PIN6_PAD_PULL_LSB
+#define GPIO_PIN6_PAD_PULL_MASK WLAN_GPIO_PIN6_PAD_PULL_MASK
+#define GPIO_PIN6_PAD_PULL_GET(x) WLAN_GPIO_PIN6_PAD_PULL_GET(x)
+#define GPIO_PIN6_PAD_PULL_SET(x) WLAN_GPIO_PIN6_PAD_PULL_SET(x)
+#define GPIO_PIN6_PAD_STRENGTH_MSB WLAN_GPIO_PIN6_PAD_STRENGTH_MSB
+#define GPIO_PIN6_PAD_STRENGTH_LSB WLAN_GPIO_PIN6_PAD_STRENGTH_LSB
+#define GPIO_PIN6_PAD_STRENGTH_MASK WLAN_GPIO_PIN6_PAD_STRENGTH_MASK
+#define GPIO_PIN6_PAD_STRENGTH_GET(x) WLAN_GPIO_PIN6_PAD_STRENGTH_GET(x)
+#define GPIO_PIN6_PAD_STRENGTH_SET(x) WLAN_GPIO_PIN6_PAD_STRENGTH_SET(x)
+#define GPIO_PIN6_PAD_DRIVER_MSB WLAN_GPIO_PIN6_PAD_DRIVER_MSB
+#define GPIO_PIN6_PAD_DRIVER_LSB WLAN_GPIO_PIN6_PAD_DRIVER_LSB
+#define GPIO_PIN6_PAD_DRIVER_MASK WLAN_GPIO_PIN6_PAD_DRIVER_MASK
+#define GPIO_PIN6_PAD_DRIVER_GET(x) WLAN_GPIO_PIN6_PAD_DRIVER_GET(x)
+#define GPIO_PIN6_PAD_DRIVER_SET(x) WLAN_GPIO_PIN6_PAD_DRIVER_SET(x)
+#define GPIO_PIN6_SOURCE_MSB WLAN_GPIO_PIN6_SOURCE_MSB
+#define GPIO_PIN6_SOURCE_LSB WLAN_GPIO_PIN6_SOURCE_LSB
+#define GPIO_PIN6_SOURCE_MASK WLAN_GPIO_PIN6_SOURCE_MASK
+#define GPIO_PIN6_SOURCE_GET(x) WLAN_GPIO_PIN6_SOURCE_GET(x)
+#define GPIO_PIN6_SOURCE_SET(x) WLAN_GPIO_PIN6_SOURCE_SET(x)
+#define GPIO_PIN7_ADDRESS WLAN_GPIO_PIN7_ADDRESS
+#define GPIO_PIN7_OFFSET WLAN_GPIO_PIN7_OFFSET
+#define GPIO_PIN7_CONFIG_MSB WLAN_GPIO_PIN7_CONFIG_MSB
+#define GPIO_PIN7_CONFIG_LSB WLAN_GPIO_PIN7_CONFIG_LSB
+#define GPIO_PIN7_CONFIG_MASK WLAN_GPIO_PIN7_CONFIG_MASK
+#define GPIO_PIN7_CONFIG_GET(x) WLAN_GPIO_PIN7_CONFIG_GET(x)
+#define GPIO_PIN7_CONFIG_SET(x) WLAN_GPIO_PIN7_CONFIG_SET(x)
+#define GPIO_PIN7_WAKEUP_ENABLE_MSB WLAN_GPIO_PIN7_WAKEUP_ENABLE_MSB
+#define GPIO_PIN7_WAKEUP_ENABLE_LSB WLAN_GPIO_PIN7_WAKEUP_ENABLE_LSB
+#define GPIO_PIN7_WAKEUP_ENABLE_MASK WLAN_GPIO_PIN7_WAKEUP_ENABLE_MASK
+#define GPIO_PIN7_WAKEUP_ENABLE_GET(x) WLAN_GPIO_PIN7_WAKEUP_ENABLE_GET(x)
+#define GPIO_PIN7_WAKEUP_ENABLE_SET(x) WLAN_GPIO_PIN7_WAKEUP_ENABLE_SET(x)
+#define GPIO_PIN7_INT_TYPE_MSB WLAN_GPIO_PIN7_INT_TYPE_MSB
+#define GPIO_PIN7_INT_TYPE_LSB WLAN_GPIO_PIN7_INT_TYPE_LSB
+#define GPIO_PIN7_INT_TYPE_MASK WLAN_GPIO_PIN7_INT_TYPE_MASK
+#define GPIO_PIN7_INT_TYPE_GET(x) WLAN_GPIO_PIN7_INT_TYPE_GET(x)
+#define GPIO_PIN7_INT_TYPE_SET(x) WLAN_GPIO_PIN7_INT_TYPE_SET(x)
+#define GPIO_PIN7_PAD_PULL_MSB WLAN_GPIO_PIN7_PAD_PULL_MSB
+#define GPIO_PIN7_PAD_PULL_LSB WLAN_GPIO_PIN7_PAD_PULL_LSB
+#define GPIO_PIN7_PAD_PULL_MASK WLAN_GPIO_PIN7_PAD_PULL_MASK
+#define GPIO_PIN7_PAD_PULL_GET(x) WLAN_GPIO_PIN7_PAD_PULL_GET(x)
+#define GPIO_PIN7_PAD_PULL_SET(x) WLAN_GPIO_PIN7_PAD_PULL_SET(x)
+#define GPIO_PIN7_PAD_STRENGTH_MSB WLAN_GPIO_PIN7_PAD_STRENGTH_MSB
+#define GPIO_PIN7_PAD_STRENGTH_LSB WLAN_GPIO_PIN7_PAD_STRENGTH_LSB
+#define GPIO_PIN7_PAD_STRENGTH_MASK WLAN_GPIO_PIN7_PAD_STRENGTH_MASK
+#define GPIO_PIN7_PAD_STRENGTH_GET(x) WLAN_GPIO_PIN7_PAD_STRENGTH_GET(x)
+#define GPIO_PIN7_PAD_STRENGTH_SET(x) WLAN_GPIO_PIN7_PAD_STRENGTH_SET(x)
+#define GPIO_PIN7_PAD_DRIVER_MSB WLAN_GPIO_PIN7_PAD_DRIVER_MSB
+#define GPIO_PIN7_PAD_DRIVER_LSB WLAN_GPIO_PIN7_PAD_DRIVER_LSB
+#define GPIO_PIN7_PAD_DRIVER_MASK WLAN_GPIO_PIN7_PAD_DRIVER_MASK
+#define GPIO_PIN7_PAD_DRIVER_GET(x) WLAN_GPIO_PIN7_PAD_DRIVER_GET(x)
+#define GPIO_PIN7_PAD_DRIVER_SET(x) WLAN_GPIO_PIN7_PAD_DRIVER_SET(x)
+#define GPIO_PIN7_SOURCE_MSB WLAN_GPIO_PIN7_SOURCE_MSB
+#define GPIO_PIN7_SOURCE_LSB WLAN_GPIO_PIN7_SOURCE_LSB
+#define GPIO_PIN7_SOURCE_MASK WLAN_GPIO_PIN7_SOURCE_MASK
+#define GPIO_PIN7_SOURCE_GET(x) WLAN_GPIO_PIN7_SOURCE_GET(x)
+#define GPIO_PIN7_SOURCE_SET(x) WLAN_GPIO_PIN7_SOURCE_SET(x)
+#define GPIO_PIN8_ADDRESS WLAN_GPIO_PIN8_ADDRESS
+#define GPIO_PIN8_OFFSET WLAN_GPIO_PIN8_OFFSET
+#define GPIO_PIN8_CONFIG_MSB WLAN_GPIO_PIN8_CONFIG_MSB
+#define GPIO_PIN8_CONFIG_LSB WLAN_GPIO_PIN8_CONFIG_LSB
+#define GPIO_PIN8_CONFIG_MASK WLAN_GPIO_PIN8_CONFIG_MASK
+#define GPIO_PIN8_CONFIG_GET(x) WLAN_GPIO_PIN8_CONFIG_GET(x)
+#define GPIO_PIN8_CONFIG_SET(x) WLAN_GPIO_PIN8_CONFIG_SET(x)
+#define GPIO_PIN8_WAKEUP_ENABLE_MSB WLAN_GPIO_PIN8_WAKEUP_ENABLE_MSB
+#define GPIO_PIN8_WAKEUP_ENABLE_LSB WLAN_GPIO_PIN8_WAKEUP_ENABLE_LSB
+#define GPIO_PIN8_WAKEUP_ENABLE_MASK WLAN_GPIO_PIN8_WAKEUP_ENABLE_MASK
+#define GPIO_PIN8_WAKEUP_ENABLE_GET(x) WLAN_GPIO_PIN8_WAKEUP_ENABLE_GET(x)
+#define GPIO_PIN8_WAKEUP_ENABLE_SET(x) WLAN_GPIO_PIN8_WAKEUP_ENABLE_SET(x)
+#define GPIO_PIN8_INT_TYPE_MSB WLAN_GPIO_PIN8_INT_TYPE_MSB
+#define GPIO_PIN8_INT_TYPE_LSB WLAN_GPIO_PIN8_INT_TYPE_LSB
+#define GPIO_PIN8_INT_TYPE_MASK WLAN_GPIO_PIN8_INT_TYPE_MASK
+#define GPIO_PIN8_INT_TYPE_GET(x) WLAN_GPIO_PIN8_INT_TYPE_GET(x)
+#define GPIO_PIN8_INT_TYPE_SET(x) WLAN_GPIO_PIN8_INT_TYPE_SET(x)
+#define GPIO_PIN8_PAD_PULL_MSB WLAN_GPIO_PIN8_PAD_PULL_MSB
+#define GPIO_PIN8_PAD_PULL_LSB WLAN_GPIO_PIN8_PAD_PULL_LSB
+#define GPIO_PIN8_PAD_PULL_MASK WLAN_GPIO_PIN8_PAD_PULL_MASK
+#define GPIO_PIN8_PAD_PULL_GET(x) WLAN_GPIO_PIN8_PAD_PULL_GET(x)
+#define GPIO_PIN8_PAD_PULL_SET(x) WLAN_GPIO_PIN8_PAD_PULL_SET(x)
+#define GPIO_PIN8_PAD_STRENGTH_MSB WLAN_GPIO_PIN8_PAD_STRENGTH_MSB
+#define GPIO_PIN8_PAD_STRENGTH_LSB WLAN_GPIO_PIN8_PAD_STRENGTH_LSB
+#define GPIO_PIN8_PAD_STRENGTH_MASK WLAN_GPIO_PIN8_PAD_STRENGTH_MASK
+#define GPIO_PIN8_PAD_STRENGTH_GET(x) WLAN_GPIO_PIN8_PAD_STRENGTH_GET(x)
+#define GPIO_PIN8_PAD_STRENGTH_SET(x) WLAN_GPIO_PIN8_PAD_STRENGTH_SET(x)
+#define GPIO_PIN8_PAD_DRIVER_MSB WLAN_GPIO_PIN8_PAD_DRIVER_MSB
+#define GPIO_PIN8_PAD_DRIVER_LSB WLAN_GPIO_PIN8_PAD_DRIVER_LSB
+#define GPIO_PIN8_PAD_DRIVER_MASK WLAN_GPIO_PIN8_PAD_DRIVER_MASK
+#define GPIO_PIN8_PAD_DRIVER_GET(x) WLAN_GPIO_PIN8_PAD_DRIVER_GET(x)
+#define GPIO_PIN8_PAD_DRIVER_SET(x) WLAN_GPIO_PIN8_PAD_DRIVER_SET(x)
+#define GPIO_PIN8_SOURCE_MSB WLAN_GPIO_PIN8_SOURCE_MSB
+#define GPIO_PIN8_SOURCE_LSB WLAN_GPIO_PIN8_SOURCE_LSB
+#define GPIO_PIN8_SOURCE_MASK WLAN_GPIO_PIN8_SOURCE_MASK
+#define GPIO_PIN8_SOURCE_GET(x) WLAN_GPIO_PIN8_SOURCE_GET(x)
+#define GPIO_PIN8_SOURCE_SET(x) WLAN_GPIO_PIN8_SOURCE_SET(x)
+#define GPIO_PIN9_ADDRESS WLAN_GPIO_PIN9_ADDRESS
+#define GPIO_PIN9_OFFSET WLAN_GPIO_PIN9_OFFSET
+#define GPIO_PIN9_CONFIG_MSB WLAN_GPIO_PIN9_CONFIG_MSB
+#define GPIO_PIN9_CONFIG_LSB WLAN_GPIO_PIN9_CONFIG_LSB
+#define GPIO_PIN9_CONFIG_MASK WLAN_GPIO_PIN9_CONFIG_MASK
+#define GPIO_PIN9_CONFIG_GET(x) WLAN_GPIO_PIN9_CONFIG_GET(x)
+#define GPIO_PIN9_CONFIG_SET(x) WLAN_GPIO_PIN9_CONFIG_SET(x)
+#define GPIO_PIN9_WAKEUP_ENABLE_MSB WLAN_GPIO_PIN9_WAKEUP_ENABLE_MSB
+#define GPIO_PIN9_WAKEUP_ENABLE_LSB WLAN_GPIO_PIN9_WAKEUP_ENABLE_LSB
+#define GPIO_PIN9_WAKEUP_ENABLE_MASK WLAN_GPIO_PIN9_WAKEUP_ENABLE_MASK
+#define GPIO_PIN9_WAKEUP_ENABLE_GET(x) WLAN_GPIO_PIN9_WAKEUP_ENABLE_GET(x)
+#define GPIO_PIN9_WAKEUP_ENABLE_SET(x) WLAN_GPIO_PIN9_WAKEUP_ENABLE_SET(x)
+#define GPIO_PIN9_INT_TYPE_MSB WLAN_GPIO_PIN9_INT_TYPE_MSB
+#define GPIO_PIN9_INT_TYPE_LSB WLAN_GPIO_PIN9_INT_TYPE_LSB
+#define GPIO_PIN9_INT_TYPE_MASK WLAN_GPIO_PIN9_INT_TYPE_MASK
+#define GPIO_PIN9_INT_TYPE_GET(x) WLAN_GPIO_PIN9_INT_TYPE_GET(x)
+#define GPIO_PIN9_INT_TYPE_SET(x) WLAN_GPIO_PIN9_INT_TYPE_SET(x)
+#define GPIO_PIN9_PAD_PULL_MSB WLAN_GPIO_PIN9_PAD_PULL_MSB
+#define GPIO_PIN9_PAD_PULL_LSB WLAN_GPIO_PIN9_PAD_PULL_LSB
+#define GPIO_PIN9_PAD_PULL_MASK WLAN_GPIO_PIN9_PAD_PULL_MASK
+#define GPIO_PIN9_PAD_PULL_GET(x) WLAN_GPIO_PIN9_PAD_PULL_GET(x)
+#define GPIO_PIN9_PAD_PULL_SET(x) WLAN_GPIO_PIN9_PAD_PULL_SET(x)
+#define GPIO_PIN9_PAD_STRENGTH_MSB WLAN_GPIO_PIN9_PAD_STRENGTH_MSB
+#define GPIO_PIN9_PAD_STRENGTH_LSB WLAN_GPIO_PIN9_PAD_STRENGTH_LSB
+#define GPIO_PIN9_PAD_STRENGTH_MASK WLAN_GPIO_PIN9_PAD_STRENGTH_MASK
+#define GPIO_PIN9_PAD_STRENGTH_GET(x) WLAN_GPIO_PIN9_PAD_STRENGTH_GET(x)
+#define GPIO_PIN9_PAD_STRENGTH_SET(x) WLAN_GPIO_PIN9_PAD_STRENGTH_SET(x)
+#define GPIO_PIN9_PAD_DRIVER_MSB WLAN_GPIO_PIN9_PAD_DRIVER_MSB
+#define GPIO_PIN9_PAD_DRIVER_LSB WLAN_GPIO_PIN9_PAD_DRIVER_LSB
+#define GPIO_PIN9_PAD_DRIVER_MASK WLAN_GPIO_PIN9_PAD_DRIVER_MASK
+#define GPIO_PIN9_PAD_DRIVER_GET(x) WLAN_GPIO_PIN9_PAD_DRIVER_GET(x)
+#define GPIO_PIN9_PAD_DRIVER_SET(x) WLAN_GPIO_PIN9_PAD_DRIVER_SET(x)
+#define GPIO_PIN9_SOURCE_MSB WLAN_GPIO_PIN9_SOURCE_MSB
+#define GPIO_PIN9_SOURCE_LSB WLAN_GPIO_PIN9_SOURCE_LSB
+#define GPIO_PIN9_SOURCE_MASK WLAN_GPIO_PIN9_SOURCE_MASK
+#define GPIO_PIN9_SOURCE_GET(x) WLAN_GPIO_PIN9_SOURCE_GET(x)
+#define GPIO_PIN9_SOURCE_SET(x) WLAN_GPIO_PIN9_SOURCE_SET(x)
+#define GPIO_PIN10_ADDRESS WLAN_GPIO_PIN10_ADDRESS
+#define GPIO_PIN10_OFFSET WLAN_GPIO_PIN10_OFFSET
+#define GPIO_PIN10_CONFIG_MSB WLAN_GPIO_PIN10_CONFIG_MSB
+#define GPIO_PIN10_CONFIG_LSB WLAN_GPIO_PIN10_CONFIG_LSB
+#define GPIO_PIN10_CONFIG_MASK WLAN_GPIO_PIN10_CONFIG_MASK
+#define GPIO_PIN10_CONFIG_GET(x) WLAN_GPIO_PIN10_CONFIG_GET(x)
+#define GPIO_PIN10_CONFIG_SET(x) WLAN_GPIO_PIN10_CONFIG_SET(x)
+#define GPIO_PIN10_WAKEUP_ENABLE_MSB WLAN_GPIO_PIN10_WAKEUP_ENABLE_MSB
+#define GPIO_PIN10_WAKEUP_ENABLE_LSB WLAN_GPIO_PIN10_WAKEUP_ENABLE_LSB
+#define GPIO_PIN10_WAKEUP_ENABLE_MASK WLAN_GPIO_PIN10_WAKEUP_ENABLE_MASK
+#define GPIO_PIN10_WAKEUP_ENABLE_GET(x) WLAN_GPIO_PIN10_WAKEUP_ENABLE_GET(x)
+#define GPIO_PIN10_WAKEUP_ENABLE_SET(x) WLAN_GPIO_PIN10_WAKEUP_ENABLE_SET(x)
+#define GPIO_PIN10_INT_TYPE_MSB WLAN_GPIO_PIN10_INT_TYPE_MSB
+#define GPIO_PIN10_INT_TYPE_LSB WLAN_GPIO_PIN10_INT_TYPE_LSB
+#define GPIO_PIN10_INT_TYPE_MASK WLAN_GPIO_PIN10_INT_TYPE_MASK
+#define GPIO_PIN10_INT_TYPE_GET(x) WLAN_GPIO_PIN10_INT_TYPE_GET(x)
+#define GPIO_PIN10_INT_TYPE_SET(x) WLAN_GPIO_PIN10_INT_TYPE_SET(x)
+#define GPIO_PIN10_PAD_PULL_MSB WLAN_GPIO_PIN10_PAD_PULL_MSB
+#define GPIO_PIN10_PAD_PULL_LSB WLAN_GPIO_PIN10_PAD_PULL_LSB
+#define GPIO_PIN10_PAD_PULL_MASK WLAN_GPIO_PIN10_PAD_PULL_MASK
+#define GPIO_PIN10_PAD_PULL_GET(x) WLAN_GPIO_PIN10_PAD_PULL_GET(x)
+#define GPIO_PIN10_PAD_PULL_SET(x) WLAN_GPIO_PIN10_PAD_PULL_SET(x)
+#define GPIO_PIN10_PAD_STRENGTH_MSB WLAN_GPIO_PIN10_PAD_STRENGTH_MSB
+#define GPIO_PIN10_PAD_STRENGTH_LSB WLAN_GPIO_PIN10_PAD_STRENGTH_LSB
+#define GPIO_PIN10_PAD_STRENGTH_MASK WLAN_GPIO_PIN10_PAD_STRENGTH_MASK
+#define GPIO_PIN10_PAD_STRENGTH_GET(x) WLAN_GPIO_PIN10_PAD_STRENGTH_GET(x)
+#define GPIO_PIN10_PAD_STRENGTH_SET(x) WLAN_GPIO_PIN10_PAD_STRENGTH_SET(x)
+#define GPIO_PIN10_PAD_DRIVER_MSB WLAN_GPIO_PIN10_PAD_DRIVER_MSB
+#define GPIO_PIN10_PAD_DRIVER_LSB WLAN_GPIO_PIN10_PAD_DRIVER_LSB
+#define GPIO_PIN10_PAD_DRIVER_MASK WLAN_GPIO_PIN10_PAD_DRIVER_MASK
+#define GPIO_PIN10_PAD_DRIVER_GET(x) WLAN_GPIO_PIN10_PAD_DRIVER_GET(x)
+#define GPIO_PIN10_PAD_DRIVER_SET(x) WLAN_GPIO_PIN10_PAD_DRIVER_SET(x)
+#define GPIO_PIN10_SOURCE_MSB WLAN_GPIO_PIN10_SOURCE_MSB
+#define GPIO_PIN10_SOURCE_LSB WLAN_GPIO_PIN10_SOURCE_LSB
+#define GPIO_PIN10_SOURCE_MASK WLAN_GPIO_PIN10_SOURCE_MASK
+#define GPIO_PIN10_SOURCE_GET(x) WLAN_GPIO_PIN10_SOURCE_GET(x)
+#define GPIO_PIN10_SOURCE_SET(x) WLAN_GPIO_PIN10_SOURCE_SET(x)
+#define GPIO_PIN11_ADDRESS WLAN_GPIO_PIN11_ADDRESS
+#define GPIO_PIN11_OFFSET WLAN_GPIO_PIN11_OFFSET
+#define GPIO_PIN11_CONFIG_MSB WLAN_GPIO_PIN11_CONFIG_MSB
+#define GPIO_PIN11_CONFIG_LSB WLAN_GPIO_PIN11_CONFIG_LSB
+#define GPIO_PIN11_CONFIG_MASK WLAN_GPIO_PIN11_CONFIG_MASK
+#define GPIO_PIN11_CONFIG_GET(x) WLAN_GPIO_PIN11_CONFIG_GET(x)
+#define GPIO_PIN11_CONFIG_SET(x) WLAN_GPIO_PIN11_CONFIG_SET(x)
+#define GPIO_PIN11_WAKEUP_ENABLE_MSB WLAN_GPIO_PIN11_WAKEUP_ENABLE_MSB
+#define GPIO_PIN11_WAKEUP_ENABLE_LSB WLAN_GPIO_PIN11_WAKEUP_ENABLE_LSB
+#define GPIO_PIN11_WAKEUP_ENABLE_MASK WLAN_GPIO_PIN11_WAKEUP_ENABLE_MASK
+#define GPIO_PIN11_WAKEUP_ENABLE_GET(x) WLAN_GPIO_PIN11_WAKEUP_ENABLE_GET(x)
+#define GPIO_PIN11_WAKEUP_ENABLE_SET(x) WLAN_GPIO_PIN11_WAKEUP_ENABLE_SET(x)
+#define GPIO_PIN11_INT_TYPE_MSB WLAN_GPIO_PIN11_INT_TYPE_MSB
+#define GPIO_PIN11_INT_TYPE_LSB WLAN_GPIO_PIN11_INT_TYPE_LSB
+#define GPIO_PIN11_INT_TYPE_MASK WLAN_GPIO_PIN11_INT_TYPE_MASK
+#define GPIO_PIN11_INT_TYPE_GET(x) WLAN_GPIO_PIN11_INT_TYPE_GET(x)
+#define GPIO_PIN11_INT_TYPE_SET(x) WLAN_GPIO_PIN11_INT_TYPE_SET(x)
+#define GPIO_PIN11_PAD_PULL_MSB WLAN_GPIO_PIN11_PAD_PULL_MSB
+#define GPIO_PIN11_PAD_PULL_LSB WLAN_GPIO_PIN11_PAD_PULL_LSB
+#define GPIO_PIN11_PAD_PULL_MASK WLAN_GPIO_PIN11_PAD_PULL_MASK
+#define GPIO_PIN11_PAD_PULL_GET(x) WLAN_GPIO_PIN11_PAD_PULL_GET(x)
+#define GPIO_PIN11_PAD_PULL_SET(x) WLAN_GPIO_PIN11_PAD_PULL_SET(x)
+#define GPIO_PIN11_PAD_STRENGTH_MSB WLAN_GPIO_PIN11_PAD_STRENGTH_MSB
+#define GPIO_PIN11_PAD_STRENGTH_LSB WLAN_GPIO_PIN11_PAD_STRENGTH_LSB
+#define GPIO_PIN11_PAD_STRENGTH_MASK WLAN_GPIO_PIN11_PAD_STRENGTH_MASK
+#define GPIO_PIN11_PAD_STRENGTH_GET(x) WLAN_GPIO_PIN11_PAD_STRENGTH_GET(x)
+#define GPIO_PIN11_PAD_STRENGTH_SET(x) WLAN_GPIO_PIN11_PAD_STRENGTH_SET(x)
+#define GPIO_PIN11_PAD_DRIVER_MSB WLAN_GPIO_PIN11_PAD_DRIVER_MSB
+#define GPIO_PIN11_PAD_DRIVER_LSB WLAN_GPIO_PIN11_PAD_DRIVER_LSB
+#define GPIO_PIN11_PAD_DRIVER_MASK WLAN_GPIO_PIN11_PAD_DRIVER_MASK
+#define GPIO_PIN11_PAD_DRIVER_GET(x) WLAN_GPIO_PIN11_PAD_DRIVER_GET(x)
+#define GPIO_PIN11_PAD_DRIVER_SET(x) WLAN_GPIO_PIN11_PAD_DRIVER_SET(x)
+#define GPIO_PIN11_SOURCE_MSB WLAN_GPIO_PIN11_SOURCE_MSB
+#define GPIO_PIN11_SOURCE_LSB WLAN_GPIO_PIN11_SOURCE_LSB
+#define GPIO_PIN11_SOURCE_MASK WLAN_GPIO_PIN11_SOURCE_MASK
+#define GPIO_PIN11_SOURCE_GET(x) WLAN_GPIO_PIN11_SOURCE_GET(x)
+#define GPIO_PIN11_SOURCE_SET(x) WLAN_GPIO_PIN11_SOURCE_SET(x)
+#define GPIO_PIN12_ADDRESS WLAN_GPIO_PIN12_ADDRESS
+#define GPIO_PIN12_OFFSET WLAN_GPIO_PIN12_OFFSET
+#define GPIO_PIN12_CONFIG_MSB WLAN_GPIO_PIN12_CONFIG_MSB
+#define GPIO_PIN12_CONFIG_LSB WLAN_GPIO_PIN12_CONFIG_LSB
+#define GPIO_PIN12_CONFIG_MASK WLAN_GPIO_PIN12_CONFIG_MASK
+#define GPIO_PIN12_CONFIG_GET(x) WLAN_GPIO_PIN12_CONFIG_GET(x)
+#define GPIO_PIN12_CONFIG_SET(x) WLAN_GPIO_PIN12_CONFIG_SET(x)
+#define GPIO_PIN12_WAKEUP_ENABLE_MSB WLAN_GPIO_PIN12_WAKEUP_ENABLE_MSB
+#define GPIO_PIN12_WAKEUP_ENABLE_LSB WLAN_GPIO_PIN12_WAKEUP_ENABLE_LSB
+#define GPIO_PIN12_WAKEUP_ENABLE_MASK WLAN_GPIO_PIN12_WAKEUP_ENABLE_MASK
+#define GPIO_PIN12_WAKEUP_ENABLE_GET(x) WLAN_GPIO_PIN12_WAKEUP_ENABLE_GET(x)
+#define GPIO_PIN12_WAKEUP_ENABLE_SET(x) WLAN_GPIO_PIN12_WAKEUP_ENABLE_SET(x)
+#define GPIO_PIN12_INT_TYPE_MSB WLAN_GPIO_PIN12_INT_TYPE_MSB
+#define GPIO_PIN12_INT_TYPE_LSB WLAN_GPIO_PIN12_INT_TYPE_LSB
+#define GPIO_PIN12_INT_TYPE_MASK WLAN_GPIO_PIN12_INT_TYPE_MASK
+#define GPIO_PIN12_INT_TYPE_GET(x) WLAN_GPIO_PIN12_INT_TYPE_GET(x)
+#define GPIO_PIN12_INT_TYPE_SET(x) WLAN_GPIO_PIN12_INT_TYPE_SET(x)
+#define GPIO_PIN12_PAD_PULL_MSB WLAN_GPIO_PIN12_PAD_PULL_MSB
+#define GPIO_PIN12_PAD_PULL_LSB WLAN_GPIO_PIN12_PAD_PULL_LSB
+#define GPIO_PIN12_PAD_PULL_MASK WLAN_GPIO_PIN12_PAD_PULL_MASK
+#define GPIO_PIN12_PAD_PULL_GET(x) WLAN_GPIO_PIN12_PAD_PULL_GET(x)
+#define GPIO_PIN12_PAD_PULL_SET(x) WLAN_GPIO_PIN12_PAD_PULL_SET(x)
+#define GPIO_PIN12_PAD_STRENGTH_MSB WLAN_GPIO_PIN12_PAD_STRENGTH_MSB
+#define GPIO_PIN12_PAD_STRENGTH_LSB WLAN_GPIO_PIN12_PAD_STRENGTH_LSB
+#define GPIO_PIN12_PAD_STRENGTH_MASK WLAN_GPIO_PIN12_PAD_STRENGTH_MASK
+#define GPIO_PIN12_PAD_STRENGTH_GET(x) WLAN_GPIO_PIN12_PAD_STRENGTH_GET(x)
+#define GPIO_PIN12_PAD_STRENGTH_SET(x) WLAN_GPIO_PIN12_PAD_STRENGTH_SET(x)
+#define GPIO_PIN12_PAD_DRIVER_MSB WLAN_GPIO_PIN12_PAD_DRIVER_MSB
+#define GPIO_PIN12_PAD_DRIVER_LSB WLAN_GPIO_PIN12_PAD_DRIVER_LSB
+#define GPIO_PIN12_PAD_DRIVER_MASK WLAN_GPIO_PIN12_PAD_DRIVER_MASK
+#define GPIO_PIN12_PAD_DRIVER_GET(x) WLAN_GPIO_PIN12_PAD_DRIVER_GET(x)
+#define GPIO_PIN12_PAD_DRIVER_SET(x) WLAN_GPIO_PIN12_PAD_DRIVER_SET(x)
+#define GPIO_PIN12_SOURCE_MSB WLAN_GPIO_PIN12_SOURCE_MSB
+#define GPIO_PIN12_SOURCE_LSB WLAN_GPIO_PIN12_SOURCE_LSB
+#define GPIO_PIN12_SOURCE_MASK WLAN_GPIO_PIN12_SOURCE_MASK
+#define GPIO_PIN12_SOURCE_GET(x) WLAN_GPIO_PIN12_SOURCE_GET(x)
+#define GPIO_PIN12_SOURCE_SET(x) WLAN_GPIO_PIN12_SOURCE_SET(x)
+#define GPIO_PIN13_ADDRESS WLAN_GPIO_PIN13_ADDRESS
+#define GPIO_PIN13_OFFSET WLAN_GPIO_PIN13_OFFSET
+#define GPIO_PIN13_CONFIG_MSB WLAN_GPIO_PIN13_CONFIG_MSB
+#define GPIO_PIN13_CONFIG_LSB WLAN_GPIO_PIN13_CONFIG_LSB
+#define GPIO_PIN13_CONFIG_MASK WLAN_GPIO_PIN13_CONFIG_MASK
+#define GPIO_PIN13_CONFIG_GET(x) WLAN_GPIO_PIN13_CONFIG_GET(x)
+#define GPIO_PIN13_CONFIG_SET(x) WLAN_GPIO_PIN13_CONFIG_SET(x)
+#define GPIO_PIN13_WAKEUP_ENABLE_MSB WLAN_GPIO_PIN13_WAKEUP_ENABLE_MSB
+#define GPIO_PIN13_WAKEUP_ENABLE_LSB WLAN_GPIO_PIN13_WAKEUP_ENABLE_LSB
+#define GPIO_PIN13_WAKEUP_ENABLE_MASK WLAN_GPIO_PIN13_WAKEUP_ENABLE_MASK
+#define GPIO_PIN13_WAKEUP_ENABLE_GET(x) WLAN_GPIO_PIN13_WAKEUP_ENABLE_GET(x)
+#define GPIO_PIN13_WAKEUP_ENABLE_SET(x) WLAN_GPIO_PIN13_WAKEUP_ENABLE_SET(x)
+#define GPIO_PIN13_INT_TYPE_MSB WLAN_GPIO_PIN13_INT_TYPE_MSB
+#define GPIO_PIN13_INT_TYPE_LSB WLAN_GPIO_PIN13_INT_TYPE_LSB
+#define GPIO_PIN13_INT_TYPE_MASK WLAN_GPIO_PIN13_INT_TYPE_MASK
+#define GPIO_PIN13_INT_TYPE_GET(x) WLAN_GPIO_PIN13_INT_TYPE_GET(x)
+#define GPIO_PIN13_INT_TYPE_SET(x) WLAN_GPIO_PIN13_INT_TYPE_SET(x)
+#define GPIO_PIN13_PAD_PULL_MSB WLAN_GPIO_PIN13_PAD_PULL_MSB
+#define GPIO_PIN13_PAD_PULL_LSB WLAN_GPIO_PIN13_PAD_PULL_LSB
+#define GPIO_PIN13_PAD_PULL_MASK WLAN_GPIO_PIN13_PAD_PULL_MASK
+#define GPIO_PIN13_PAD_PULL_GET(x) WLAN_GPIO_PIN13_PAD_PULL_GET(x)
+#define GPIO_PIN13_PAD_PULL_SET(x) WLAN_GPIO_PIN13_PAD_PULL_SET(x)
+#define GPIO_PIN13_PAD_STRENGTH_MSB WLAN_GPIO_PIN13_PAD_STRENGTH_MSB
+#define GPIO_PIN13_PAD_STRENGTH_LSB WLAN_GPIO_PIN13_PAD_STRENGTH_LSB
+#define GPIO_PIN13_PAD_STRENGTH_MASK WLAN_GPIO_PIN13_PAD_STRENGTH_MASK
+#define GPIO_PIN13_PAD_STRENGTH_GET(x) WLAN_GPIO_PIN13_PAD_STRENGTH_GET(x)
+#define GPIO_PIN13_PAD_STRENGTH_SET(x) WLAN_GPIO_PIN13_PAD_STRENGTH_SET(x)
+#define GPIO_PIN13_PAD_DRIVER_MSB WLAN_GPIO_PIN13_PAD_DRIVER_MSB
+#define GPIO_PIN13_PAD_DRIVER_LSB WLAN_GPIO_PIN13_PAD_DRIVER_LSB
+#define GPIO_PIN13_PAD_DRIVER_MASK WLAN_GPIO_PIN13_PAD_DRIVER_MASK
+#define GPIO_PIN13_PAD_DRIVER_GET(x) WLAN_GPIO_PIN13_PAD_DRIVER_GET(x)
+#define GPIO_PIN13_PAD_DRIVER_SET(x) WLAN_GPIO_PIN13_PAD_DRIVER_SET(x)
+#define GPIO_PIN13_SOURCE_MSB WLAN_GPIO_PIN13_SOURCE_MSB
+#define GPIO_PIN13_SOURCE_LSB WLAN_GPIO_PIN13_SOURCE_LSB
+#define GPIO_PIN13_SOURCE_MASK WLAN_GPIO_PIN13_SOURCE_MASK
+#define GPIO_PIN13_SOURCE_GET(x) WLAN_GPIO_PIN13_SOURCE_GET(x)
+#define GPIO_PIN13_SOURCE_SET(x) WLAN_GPIO_PIN13_SOURCE_SET(x)
+#define GPIO_PIN14_ADDRESS WLAN_GPIO_PIN14_ADDRESS
+#define GPIO_PIN14_OFFSET WLAN_GPIO_PIN14_OFFSET
+#define GPIO_PIN14_CONFIG_MSB WLAN_GPIO_PIN14_CONFIG_MSB
+#define GPIO_PIN14_CONFIG_LSB WLAN_GPIO_PIN14_CONFIG_LSB
+#define GPIO_PIN14_CONFIG_MASK WLAN_GPIO_PIN14_CONFIG_MASK
+#define GPIO_PIN14_CONFIG_GET(x) WLAN_GPIO_PIN14_CONFIG_GET(x)
+#define GPIO_PIN14_CONFIG_SET(x) WLAN_GPIO_PIN14_CONFIG_SET(x)
+#define GPIO_PIN14_WAKEUP_ENABLE_MSB WLAN_GPIO_PIN14_WAKEUP_ENABLE_MSB
+#define GPIO_PIN14_WAKEUP_ENABLE_LSB WLAN_GPIO_PIN14_WAKEUP_ENABLE_LSB
+#define GPIO_PIN14_WAKEUP_ENABLE_MASK WLAN_GPIO_PIN14_WAKEUP_ENABLE_MASK
+#define GPIO_PIN14_WAKEUP_ENABLE_GET(x) WLAN_GPIO_PIN14_WAKEUP_ENABLE_GET(x)
+#define GPIO_PIN14_WAKEUP_ENABLE_SET(x) WLAN_GPIO_PIN14_WAKEUP_ENABLE_SET(x)
+#define GPIO_PIN14_INT_TYPE_MSB WLAN_GPIO_PIN14_INT_TYPE_MSB
+#define GPIO_PIN14_INT_TYPE_LSB WLAN_GPIO_PIN14_INT_TYPE_LSB
+#define GPIO_PIN14_INT_TYPE_MASK WLAN_GPIO_PIN14_INT_TYPE_MASK
+#define GPIO_PIN14_INT_TYPE_GET(x) WLAN_GPIO_PIN14_INT_TYPE_GET(x)
+#define GPIO_PIN14_INT_TYPE_SET(x) WLAN_GPIO_PIN14_INT_TYPE_SET(x)
+#define GPIO_PIN14_PAD_PULL_MSB WLAN_GPIO_PIN14_PAD_PULL_MSB
+#define GPIO_PIN14_PAD_PULL_LSB WLAN_GPIO_PIN14_PAD_PULL_LSB
+#define GPIO_PIN14_PAD_PULL_MASK WLAN_GPIO_PIN14_PAD_PULL_MASK
+#define GPIO_PIN14_PAD_PULL_GET(x) WLAN_GPIO_PIN14_PAD_PULL_GET(x)
+#define GPIO_PIN14_PAD_PULL_SET(x) WLAN_GPIO_PIN14_PAD_PULL_SET(x)
+#define GPIO_PIN14_PAD_STRENGTH_MSB WLAN_GPIO_PIN14_PAD_STRENGTH_MSB
+#define GPIO_PIN14_PAD_STRENGTH_LSB WLAN_GPIO_PIN14_PAD_STRENGTH_LSB
+#define GPIO_PIN14_PAD_STRENGTH_MASK WLAN_GPIO_PIN14_PAD_STRENGTH_MASK
+#define GPIO_PIN14_PAD_STRENGTH_GET(x) WLAN_GPIO_PIN14_PAD_STRENGTH_GET(x)
+#define GPIO_PIN14_PAD_STRENGTH_SET(x) WLAN_GPIO_PIN14_PAD_STRENGTH_SET(x)
+#define GPIO_PIN14_PAD_DRIVER_MSB WLAN_GPIO_PIN14_PAD_DRIVER_MSB
+#define GPIO_PIN14_PAD_DRIVER_LSB WLAN_GPIO_PIN14_PAD_DRIVER_LSB
+#define GPIO_PIN14_PAD_DRIVER_MASK WLAN_GPIO_PIN14_PAD_DRIVER_MASK
+#define GPIO_PIN14_PAD_DRIVER_GET(x) WLAN_GPIO_PIN14_PAD_DRIVER_GET(x)
+#define GPIO_PIN14_PAD_DRIVER_SET(x) WLAN_GPIO_PIN14_PAD_DRIVER_SET(x)
+#define GPIO_PIN14_SOURCE_MSB WLAN_GPIO_PIN14_SOURCE_MSB
+#define GPIO_PIN14_SOURCE_LSB WLAN_GPIO_PIN14_SOURCE_LSB
+#define GPIO_PIN14_SOURCE_MASK WLAN_GPIO_PIN14_SOURCE_MASK
+#define GPIO_PIN14_SOURCE_GET(x) WLAN_GPIO_PIN14_SOURCE_GET(x)
+#define GPIO_PIN14_SOURCE_SET(x) WLAN_GPIO_PIN14_SOURCE_SET(x)
+#define GPIO_PIN15_ADDRESS WLAN_GPIO_PIN15_ADDRESS
+#define GPIO_PIN15_OFFSET WLAN_GPIO_PIN15_OFFSET
+#define GPIO_PIN15_CONFIG_MSB WLAN_GPIO_PIN15_CONFIG_MSB
+#define GPIO_PIN15_CONFIG_LSB WLAN_GPIO_PIN15_CONFIG_LSB
+#define GPIO_PIN15_CONFIG_MASK WLAN_GPIO_PIN15_CONFIG_MASK
+#define GPIO_PIN15_CONFIG_GET(x) WLAN_GPIO_PIN15_CONFIG_GET(x)
+#define GPIO_PIN15_CONFIG_SET(x) WLAN_GPIO_PIN15_CONFIG_SET(x)
+#define GPIO_PIN15_WAKEUP_ENABLE_MSB WLAN_GPIO_PIN15_WAKEUP_ENABLE_MSB
+#define GPIO_PIN15_WAKEUP_ENABLE_LSB WLAN_GPIO_PIN15_WAKEUP_ENABLE_LSB
+#define GPIO_PIN15_WAKEUP_ENABLE_MASK WLAN_GPIO_PIN15_WAKEUP_ENABLE_MASK
+#define GPIO_PIN15_WAKEUP_ENABLE_GET(x) WLAN_GPIO_PIN15_WAKEUP_ENABLE_GET(x)
+#define GPIO_PIN15_WAKEUP_ENABLE_SET(x) WLAN_GPIO_PIN15_WAKEUP_ENABLE_SET(x)
+#define GPIO_PIN15_INT_TYPE_MSB WLAN_GPIO_PIN15_INT_TYPE_MSB
+#define GPIO_PIN15_INT_TYPE_LSB WLAN_GPIO_PIN15_INT_TYPE_LSB
+#define GPIO_PIN15_INT_TYPE_MASK WLAN_GPIO_PIN15_INT_TYPE_MASK
+#define GPIO_PIN15_INT_TYPE_GET(x) WLAN_GPIO_PIN15_INT_TYPE_GET(x)
+#define GPIO_PIN15_INT_TYPE_SET(x) WLAN_GPIO_PIN15_INT_TYPE_SET(x)
+#define GPIO_PIN15_PAD_PULL_MSB WLAN_GPIO_PIN15_PAD_PULL_MSB
+#define GPIO_PIN15_PAD_PULL_LSB WLAN_GPIO_PIN15_PAD_PULL_LSB
+#define GPIO_PIN15_PAD_PULL_MASK WLAN_GPIO_PIN15_PAD_PULL_MASK
+#define GPIO_PIN15_PAD_PULL_GET(x) WLAN_GPIO_PIN15_PAD_PULL_GET(x)
+#define GPIO_PIN15_PAD_PULL_SET(x) WLAN_GPIO_PIN15_PAD_PULL_SET(x)
+#define GPIO_PIN15_PAD_STRENGTH_MSB WLAN_GPIO_PIN15_PAD_STRENGTH_MSB
+#define GPIO_PIN15_PAD_STRENGTH_LSB WLAN_GPIO_PIN15_PAD_STRENGTH_LSB
+#define GPIO_PIN15_PAD_STRENGTH_MASK WLAN_GPIO_PIN15_PAD_STRENGTH_MASK
+#define GPIO_PIN15_PAD_STRENGTH_GET(x) WLAN_GPIO_PIN15_PAD_STRENGTH_GET(x)
+#define GPIO_PIN15_PAD_STRENGTH_SET(x) WLAN_GPIO_PIN15_PAD_STRENGTH_SET(x)
+#define GPIO_PIN15_PAD_DRIVER_MSB WLAN_GPIO_PIN15_PAD_DRIVER_MSB
+#define GPIO_PIN15_PAD_DRIVER_LSB WLAN_GPIO_PIN15_PAD_DRIVER_LSB
+#define GPIO_PIN15_PAD_DRIVER_MASK WLAN_GPIO_PIN15_PAD_DRIVER_MASK
+#define GPIO_PIN15_PAD_DRIVER_GET(x) WLAN_GPIO_PIN15_PAD_DRIVER_GET(x)
+#define GPIO_PIN15_PAD_DRIVER_SET(x) WLAN_GPIO_PIN15_PAD_DRIVER_SET(x)
+#define GPIO_PIN15_SOURCE_MSB WLAN_GPIO_PIN15_SOURCE_MSB
+#define GPIO_PIN15_SOURCE_LSB WLAN_GPIO_PIN15_SOURCE_LSB
+#define GPIO_PIN15_SOURCE_MASK WLAN_GPIO_PIN15_SOURCE_MASK
+#define GPIO_PIN15_SOURCE_GET(x) WLAN_GPIO_PIN15_SOURCE_GET(x)
+#define GPIO_PIN15_SOURCE_SET(x) WLAN_GPIO_PIN15_SOURCE_SET(x)
+#define GPIO_PIN16_ADDRESS WLAN_GPIO_PIN16_ADDRESS
+#define GPIO_PIN16_OFFSET WLAN_GPIO_PIN16_OFFSET
+#define GPIO_PIN16_CONFIG_MSB WLAN_GPIO_PIN16_CONFIG_MSB
+#define GPIO_PIN16_CONFIG_LSB WLAN_GPIO_PIN16_CONFIG_LSB
+#define GPIO_PIN16_CONFIG_MASK WLAN_GPIO_PIN16_CONFIG_MASK
+#define GPIO_PIN16_CONFIG_GET(x) WLAN_GPIO_PIN16_CONFIG_GET(x)
+#define GPIO_PIN16_CONFIG_SET(x) WLAN_GPIO_PIN16_CONFIG_SET(x)
+#define GPIO_PIN16_WAKEUP_ENABLE_MSB WLAN_GPIO_PIN16_WAKEUP_ENABLE_MSB
+#define GPIO_PIN16_WAKEUP_ENABLE_LSB WLAN_GPIO_PIN16_WAKEUP_ENABLE_LSB
+#define GPIO_PIN16_WAKEUP_ENABLE_MASK WLAN_GPIO_PIN16_WAKEUP_ENABLE_MASK
+#define GPIO_PIN16_WAKEUP_ENABLE_GET(x) WLAN_GPIO_PIN16_WAKEUP_ENABLE_GET(x)
+#define GPIO_PIN16_WAKEUP_ENABLE_SET(x) WLAN_GPIO_PIN16_WAKEUP_ENABLE_SET(x)
+#define GPIO_PIN16_INT_TYPE_MSB WLAN_GPIO_PIN16_INT_TYPE_MSB
+#define GPIO_PIN16_INT_TYPE_LSB WLAN_GPIO_PIN16_INT_TYPE_LSB
+#define GPIO_PIN16_INT_TYPE_MASK WLAN_GPIO_PIN16_INT_TYPE_MASK
+#define GPIO_PIN16_INT_TYPE_GET(x) WLAN_GPIO_PIN16_INT_TYPE_GET(x)
+#define GPIO_PIN16_INT_TYPE_SET(x) WLAN_GPIO_PIN16_INT_TYPE_SET(x)
+#define GPIO_PIN16_PAD_PULL_MSB WLAN_GPIO_PIN16_PAD_PULL_MSB
+#define GPIO_PIN16_PAD_PULL_LSB WLAN_GPIO_PIN16_PAD_PULL_LSB
+#define GPIO_PIN16_PAD_PULL_MASK WLAN_GPIO_PIN16_PAD_PULL_MASK
+#define GPIO_PIN16_PAD_PULL_GET(x) WLAN_GPIO_PIN16_PAD_PULL_GET(x)
+#define GPIO_PIN16_PAD_PULL_SET(x) WLAN_GPIO_PIN16_PAD_PULL_SET(x)
+#define GPIO_PIN16_PAD_STRENGTH_MSB WLAN_GPIO_PIN16_PAD_STRENGTH_MSB
+#define GPIO_PIN16_PAD_STRENGTH_LSB WLAN_GPIO_PIN16_PAD_STRENGTH_LSB
+#define GPIO_PIN16_PAD_STRENGTH_MASK WLAN_GPIO_PIN16_PAD_STRENGTH_MASK
+#define GPIO_PIN16_PAD_STRENGTH_GET(x) WLAN_GPIO_PIN16_PAD_STRENGTH_GET(x)
+#define GPIO_PIN16_PAD_STRENGTH_SET(x) WLAN_GPIO_PIN16_PAD_STRENGTH_SET(x)
+#define GPIO_PIN16_PAD_DRIVER_MSB WLAN_GPIO_PIN16_PAD_DRIVER_MSB
+#define GPIO_PIN16_PAD_DRIVER_LSB WLAN_GPIO_PIN16_PAD_DRIVER_LSB
+#define GPIO_PIN16_PAD_DRIVER_MASK WLAN_GPIO_PIN16_PAD_DRIVER_MASK
+#define GPIO_PIN16_PAD_DRIVER_GET(x) WLAN_GPIO_PIN16_PAD_DRIVER_GET(x)
+#define GPIO_PIN16_PAD_DRIVER_SET(x) WLAN_GPIO_PIN16_PAD_DRIVER_SET(x)
+#define GPIO_PIN16_SOURCE_MSB WLAN_GPIO_PIN16_SOURCE_MSB
+#define GPIO_PIN16_SOURCE_LSB WLAN_GPIO_PIN16_SOURCE_LSB
+#define GPIO_PIN16_SOURCE_MASK WLAN_GPIO_PIN16_SOURCE_MASK
+#define GPIO_PIN16_SOURCE_GET(x) WLAN_GPIO_PIN16_SOURCE_GET(x)
+#define GPIO_PIN16_SOURCE_SET(x) WLAN_GPIO_PIN16_SOURCE_SET(x)
+#define GPIO_PIN17_ADDRESS WLAN_GPIO_PIN17_ADDRESS
+#define GPIO_PIN17_OFFSET WLAN_GPIO_PIN17_OFFSET
+#define GPIO_PIN17_CONFIG_MSB WLAN_GPIO_PIN17_CONFIG_MSB
+#define GPIO_PIN17_CONFIG_LSB WLAN_GPIO_PIN17_CONFIG_LSB
+#define GPIO_PIN17_CONFIG_MASK WLAN_GPIO_PIN17_CONFIG_MASK
+#define GPIO_PIN17_CONFIG_GET(x) WLAN_GPIO_PIN17_CONFIG_GET(x)
+#define GPIO_PIN17_CONFIG_SET(x) WLAN_GPIO_PIN17_CONFIG_SET(x)
+#define GPIO_PIN17_WAKEUP_ENABLE_MSB WLAN_GPIO_PIN17_WAKEUP_ENABLE_MSB
+#define GPIO_PIN17_WAKEUP_ENABLE_LSB WLAN_GPIO_PIN17_WAKEUP_ENABLE_LSB
+#define GPIO_PIN17_WAKEUP_ENABLE_MASK WLAN_GPIO_PIN17_WAKEUP_ENABLE_MASK
+#define GPIO_PIN17_WAKEUP_ENABLE_GET(x) WLAN_GPIO_PIN17_WAKEUP_ENABLE_GET(x)
+#define GPIO_PIN17_WAKEUP_ENABLE_SET(x) WLAN_GPIO_PIN17_WAKEUP_ENABLE_SET(x)
+#define GPIO_PIN17_INT_TYPE_MSB WLAN_GPIO_PIN17_INT_TYPE_MSB
+#define GPIO_PIN17_INT_TYPE_LSB WLAN_GPIO_PIN17_INT_TYPE_LSB
+#define GPIO_PIN17_INT_TYPE_MASK WLAN_GPIO_PIN17_INT_TYPE_MASK
+#define GPIO_PIN17_INT_TYPE_GET(x) WLAN_GPIO_PIN17_INT_TYPE_GET(x)
+#define GPIO_PIN17_INT_TYPE_SET(x) WLAN_GPIO_PIN17_INT_TYPE_SET(x)
+#define GPIO_PIN17_PAD_PULL_MSB WLAN_GPIO_PIN17_PAD_PULL_MSB
+#define GPIO_PIN17_PAD_PULL_LSB WLAN_GPIO_PIN17_PAD_PULL_LSB
+#define GPIO_PIN17_PAD_PULL_MASK WLAN_GPIO_PIN17_PAD_PULL_MASK
+#define GPIO_PIN17_PAD_PULL_GET(x) WLAN_GPIO_PIN17_PAD_PULL_GET(x)
+#define GPIO_PIN17_PAD_PULL_SET(x) WLAN_GPIO_PIN17_PAD_PULL_SET(x)
+#define GPIO_PIN17_PAD_STRENGTH_MSB WLAN_GPIO_PIN17_PAD_STRENGTH_MSB
+#define GPIO_PIN17_PAD_STRENGTH_LSB WLAN_GPIO_PIN17_PAD_STRENGTH_LSB
+#define GPIO_PIN17_PAD_STRENGTH_MASK WLAN_GPIO_PIN17_PAD_STRENGTH_MASK
+#define GPIO_PIN17_PAD_STRENGTH_GET(x) WLAN_GPIO_PIN17_PAD_STRENGTH_GET(x)
+#define GPIO_PIN17_PAD_STRENGTH_SET(x) WLAN_GPIO_PIN17_PAD_STRENGTH_SET(x)
+#define GPIO_PIN17_PAD_DRIVER_MSB WLAN_GPIO_PIN17_PAD_DRIVER_MSB
+#define GPIO_PIN17_PAD_DRIVER_LSB WLAN_GPIO_PIN17_PAD_DRIVER_LSB
+#define GPIO_PIN17_PAD_DRIVER_MASK WLAN_GPIO_PIN17_PAD_DRIVER_MASK
+#define GPIO_PIN17_PAD_DRIVER_GET(x) WLAN_GPIO_PIN17_PAD_DRIVER_GET(x)
+#define GPIO_PIN17_PAD_DRIVER_SET(x) WLAN_GPIO_PIN17_PAD_DRIVER_SET(x)
+#define GPIO_PIN17_SOURCE_MSB WLAN_GPIO_PIN17_SOURCE_MSB
+#define GPIO_PIN17_SOURCE_LSB WLAN_GPIO_PIN17_SOURCE_LSB
+#define GPIO_PIN17_SOURCE_MASK WLAN_GPIO_PIN17_SOURCE_MASK
+#define GPIO_PIN17_SOURCE_GET(x) WLAN_GPIO_PIN17_SOURCE_GET(x)
+#define GPIO_PIN17_SOURCE_SET(x) WLAN_GPIO_PIN17_SOURCE_SET(x)
+#define GPIO_PIN18_ADDRESS WLAN_GPIO_PIN18_ADDRESS
+#define GPIO_PIN18_OFFSET WLAN_GPIO_PIN18_OFFSET
+#define GPIO_PIN18_CONFIG_MSB WLAN_GPIO_PIN18_CONFIG_MSB
+#define GPIO_PIN18_CONFIG_LSB WLAN_GPIO_PIN18_CONFIG_LSB
+#define GPIO_PIN18_CONFIG_MASK WLAN_GPIO_PIN18_CONFIG_MASK
+#define GPIO_PIN18_CONFIG_GET(x) WLAN_GPIO_PIN18_CONFIG_GET(x)
+#define GPIO_PIN18_CONFIG_SET(x) WLAN_GPIO_PIN18_CONFIG_SET(x)
+#define GPIO_PIN18_WAKEUP_ENABLE_MSB WLAN_GPIO_PIN18_WAKEUP_ENABLE_MSB
+#define GPIO_PIN18_WAKEUP_ENABLE_LSB WLAN_GPIO_PIN18_WAKEUP_ENABLE_LSB
+#define GPIO_PIN18_WAKEUP_ENABLE_MASK WLAN_GPIO_PIN18_WAKEUP_ENABLE_MASK
+#define GPIO_PIN18_WAKEUP_ENABLE_GET(x) WLAN_GPIO_PIN18_WAKEUP_ENABLE_GET(x)
+#define GPIO_PIN18_WAKEUP_ENABLE_SET(x) WLAN_GPIO_PIN18_WAKEUP_ENABLE_SET(x)
+#define GPIO_PIN18_INT_TYPE_MSB WLAN_GPIO_PIN18_INT_TYPE_MSB
+#define GPIO_PIN18_INT_TYPE_LSB WLAN_GPIO_PIN18_INT_TYPE_LSB
+#define GPIO_PIN18_INT_TYPE_MASK WLAN_GPIO_PIN18_INT_TYPE_MASK
+#define GPIO_PIN18_INT_TYPE_GET(x) WLAN_GPIO_PIN18_INT_TYPE_GET(x)
+#define GPIO_PIN18_INT_TYPE_SET(x) WLAN_GPIO_PIN18_INT_TYPE_SET(x)
+#define GPIO_PIN18_PAD_PULL_MSB WLAN_GPIO_PIN18_PAD_PULL_MSB
+#define GPIO_PIN18_PAD_PULL_LSB WLAN_GPIO_PIN18_PAD_PULL_LSB
+#define GPIO_PIN18_PAD_PULL_MASK WLAN_GPIO_PIN18_PAD_PULL_MASK
+#define GPIO_PIN18_PAD_PULL_GET(x) WLAN_GPIO_PIN18_PAD_PULL_GET(x)
+#define GPIO_PIN18_PAD_PULL_SET(x) WLAN_GPIO_PIN18_PAD_PULL_SET(x)
+#define GPIO_PIN18_PAD_STRENGTH_MSB WLAN_GPIO_PIN18_PAD_STRENGTH_MSB
+#define GPIO_PIN18_PAD_STRENGTH_LSB WLAN_GPIO_PIN18_PAD_STRENGTH_LSB
+#define GPIO_PIN18_PAD_STRENGTH_MASK WLAN_GPIO_PIN18_PAD_STRENGTH_MASK
+#define GPIO_PIN18_PAD_STRENGTH_GET(x) WLAN_GPIO_PIN18_PAD_STRENGTH_GET(x)
+#define GPIO_PIN18_PAD_STRENGTH_SET(x) WLAN_GPIO_PIN18_PAD_STRENGTH_SET(x)
+#define GPIO_PIN18_PAD_DRIVER_MSB WLAN_GPIO_PIN18_PAD_DRIVER_MSB
+#define GPIO_PIN18_PAD_DRIVER_LSB WLAN_GPIO_PIN18_PAD_DRIVER_LSB
+#define GPIO_PIN18_PAD_DRIVER_MASK WLAN_GPIO_PIN18_PAD_DRIVER_MASK
+#define GPIO_PIN18_PAD_DRIVER_GET(x) WLAN_GPIO_PIN18_PAD_DRIVER_GET(x)
+#define GPIO_PIN18_PAD_DRIVER_SET(x) WLAN_GPIO_PIN18_PAD_DRIVER_SET(x)
+#define GPIO_PIN18_SOURCE_MSB WLAN_GPIO_PIN18_SOURCE_MSB
+#define GPIO_PIN18_SOURCE_LSB WLAN_GPIO_PIN18_SOURCE_LSB
+#define GPIO_PIN18_SOURCE_MASK WLAN_GPIO_PIN18_SOURCE_MASK
+#define GPIO_PIN18_SOURCE_GET(x) WLAN_GPIO_PIN18_SOURCE_GET(x)
+#define GPIO_PIN18_SOURCE_SET(x) WLAN_GPIO_PIN18_SOURCE_SET(x)
+#define GPIO_PIN19_ADDRESS WLAN_GPIO_PIN19_ADDRESS
+#define GPIO_PIN19_OFFSET WLAN_GPIO_PIN19_OFFSET
+#define GPIO_PIN19_CONFIG_MSB WLAN_GPIO_PIN19_CONFIG_MSB
+#define GPIO_PIN19_CONFIG_LSB WLAN_GPIO_PIN19_CONFIG_LSB
+#define GPIO_PIN19_CONFIG_MASK WLAN_GPIO_PIN19_CONFIG_MASK
+#define GPIO_PIN19_CONFIG_GET(x) WLAN_GPIO_PIN19_CONFIG_GET(x)
+#define GPIO_PIN19_CONFIG_SET(x) WLAN_GPIO_PIN19_CONFIG_SET(x)
+#define GPIO_PIN19_WAKEUP_ENABLE_MSB WLAN_GPIO_PIN19_WAKEUP_ENABLE_MSB
+#define GPIO_PIN19_WAKEUP_ENABLE_LSB WLAN_GPIO_PIN19_WAKEUP_ENABLE_LSB
+#define GPIO_PIN19_WAKEUP_ENABLE_MASK WLAN_GPIO_PIN19_WAKEUP_ENABLE_MASK
+#define GPIO_PIN19_WAKEUP_ENABLE_GET(x) WLAN_GPIO_PIN19_WAKEUP_ENABLE_GET(x)
+#define GPIO_PIN19_WAKEUP_ENABLE_SET(x) WLAN_GPIO_PIN19_WAKEUP_ENABLE_SET(x)
+#define GPIO_PIN19_INT_TYPE_MSB WLAN_GPIO_PIN19_INT_TYPE_MSB
+#define GPIO_PIN19_INT_TYPE_LSB WLAN_GPIO_PIN19_INT_TYPE_LSB
+#define GPIO_PIN19_INT_TYPE_MASK WLAN_GPIO_PIN19_INT_TYPE_MASK
+#define GPIO_PIN19_INT_TYPE_GET(x) WLAN_GPIO_PIN19_INT_TYPE_GET(x)
+#define GPIO_PIN19_INT_TYPE_SET(x) WLAN_GPIO_PIN19_INT_TYPE_SET(x)
+#define GPIO_PIN19_PAD_PULL_MSB WLAN_GPIO_PIN19_PAD_PULL_MSB
+#define GPIO_PIN19_PAD_PULL_LSB WLAN_GPIO_PIN19_PAD_PULL_LSB
+#define GPIO_PIN19_PAD_PULL_MASK WLAN_GPIO_PIN19_PAD_PULL_MASK
+#define GPIO_PIN19_PAD_PULL_GET(x) WLAN_GPIO_PIN19_PAD_PULL_GET(x)
+#define GPIO_PIN19_PAD_PULL_SET(x) WLAN_GPIO_PIN19_PAD_PULL_SET(x)
+#define GPIO_PIN19_PAD_STRENGTH_MSB WLAN_GPIO_PIN19_PAD_STRENGTH_MSB
+#define GPIO_PIN19_PAD_STRENGTH_LSB WLAN_GPIO_PIN19_PAD_STRENGTH_LSB
+#define GPIO_PIN19_PAD_STRENGTH_MASK WLAN_GPIO_PIN19_PAD_STRENGTH_MASK
+#define GPIO_PIN19_PAD_STRENGTH_GET(x) WLAN_GPIO_PIN19_PAD_STRENGTH_GET(x)
+#define GPIO_PIN19_PAD_STRENGTH_SET(x) WLAN_GPIO_PIN19_PAD_STRENGTH_SET(x)
+#define GPIO_PIN19_PAD_DRIVER_MSB WLAN_GPIO_PIN19_PAD_DRIVER_MSB
+#define GPIO_PIN19_PAD_DRIVER_LSB WLAN_GPIO_PIN19_PAD_DRIVER_LSB
+#define GPIO_PIN19_PAD_DRIVER_MASK WLAN_GPIO_PIN19_PAD_DRIVER_MASK
+#define GPIO_PIN19_PAD_DRIVER_GET(x) WLAN_GPIO_PIN19_PAD_DRIVER_GET(x)
+#define GPIO_PIN19_PAD_DRIVER_SET(x) WLAN_GPIO_PIN19_PAD_DRIVER_SET(x)
+#define GPIO_PIN19_SOURCE_MSB WLAN_GPIO_PIN19_SOURCE_MSB
+#define GPIO_PIN19_SOURCE_LSB WLAN_GPIO_PIN19_SOURCE_LSB
+#define GPIO_PIN19_SOURCE_MASK WLAN_GPIO_PIN19_SOURCE_MASK
+#define GPIO_PIN19_SOURCE_GET(x) WLAN_GPIO_PIN19_SOURCE_GET(x)
+#define GPIO_PIN19_SOURCE_SET(x) WLAN_GPIO_PIN19_SOURCE_SET(x)
+#define GPIO_PIN20_ADDRESS WLAN_GPIO_PIN20_ADDRESS
+#define GPIO_PIN20_OFFSET WLAN_GPIO_PIN20_OFFSET
+#define GPIO_PIN20_CONFIG_MSB WLAN_GPIO_PIN20_CONFIG_MSB
+#define GPIO_PIN20_CONFIG_LSB WLAN_GPIO_PIN20_CONFIG_LSB
+#define GPIO_PIN20_CONFIG_MASK WLAN_GPIO_PIN20_CONFIG_MASK
+#define GPIO_PIN20_CONFIG_GET(x) WLAN_GPIO_PIN20_CONFIG_GET(x)
+#define GPIO_PIN20_CONFIG_SET(x) WLAN_GPIO_PIN20_CONFIG_SET(x)
+#define GPIO_PIN20_WAKEUP_ENABLE_MSB WLAN_GPIO_PIN20_WAKEUP_ENABLE_MSB
+#define GPIO_PIN20_WAKEUP_ENABLE_LSB WLAN_GPIO_PIN20_WAKEUP_ENABLE_LSB
+#define GPIO_PIN20_WAKEUP_ENABLE_MASK WLAN_GPIO_PIN20_WAKEUP_ENABLE_MASK
+#define GPIO_PIN20_WAKEUP_ENABLE_GET(x) WLAN_GPIO_PIN20_WAKEUP_ENABLE_GET(x)
+#define GPIO_PIN20_WAKEUP_ENABLE_SET(x) WLAN_GPIO_PIN20_WAKEUP_ENABLE_SET(x)
+#define GPIO_PIN20_INT_TYPE_MSB WLAN_GPIO_PIN20_INT_TYPE_MSB
+#define GPIO_PIN20_INT_TYPE_LSB WLAN_GPIO_PIN20_INT_TYPE_LSB
+#define GPIO_PIN20_INT_TYPE_MASK WLAN_GPIO_PIN20_INT_TYPE_MASK
+#define GPIO_PIN20_INT_TYPE_GET(x) WLAN_GPIO_PIN20_INT_TYPE_GET(x)
+#define GPIO_PIN20_INT_TYPE_SET(x) WLAN_GPIO_PIN20_INT_TYPE_SET(x)
+#define GPIO_PIN20_PAD_PULL_MSB WLAN_GPIO_PIN20_PAD_PULL_MSB
+#define GPIO_PIN20_PAD_PULL_LSB WLAN_GPIO_PIN20_PAD_PULL_LSB
+#define GPIO_PIN20_PAD_PULL_MASK WLAN_GPIO_PIN20_PAD_PULL_MASK
+#define GPIO_PIN20_PAD_PULL_GET(x) WLAN_GPIO_PIN20_PAD_PULL_GET(x)
+#define GPIO_PIN20_PAD_PULL_SET(x) WLAN_GPIO_PIN20_PAD_PULL_SET(x)
+#define GPIO_PIN20_PAD_STRENGTH_MSB WLAN_GPIO_PIN20_PAD_STRENGTH_MSB
+#define GPIO_PIN20_PAD_STRENGTH_LSB WLAN_GPIO_PIN20_PAD_STRENGTH_LSB
+#define GPIO_PIN20_PAD_STRENGTH_MASK WLAN_GPIO_PIN20_PAD_STRENGTH_MASK
+#define GPIO_PIN20_PAD_STRENGTH_GET(x) WLAN_GPIO_PIN20_PAD_STRENGTH_GET(x)
+#define GPIO_PIN20_PAD_STRENGTH_SET(x) WLAN_GPIO_PIN20_PAD_STRENGTH_SET(x)
+#define GPIO_PIN20_PAD_DRIVER_MSB WLAN_GPIO_PIN20_PAD_DRIVER_MSB
+#define GPIO_PIN20_PAD_DRIVER_LSB WLAN_GPIO_PIN20_PAD_DRIVER_LSB
+#define GPIO_PIN20_PAD_DRIVER_MASK WLAN_GPIO_PIN20_PAD_DRIVER_MASK
+#define GPIO_PIN20_PAD_DRIVER_GET(x) WLAN_GPIO_PIN20_PAD_DRIVER_GET(x)
+#define GPIO_PIN20_PAD_DRIVER_SET(x) WLAN_GPIO_PIN20_PAD_DRIVER_SET(x)
+#define GPIO_PIN20_SOURCE_MSB WLAN_GPIO_PIN20_SOURCE_MSB
+#define GPIO_PIN20_SOURCE_LSB WLAN_GPIO_PIN20_SOURCE_LSB
+#define GPIO_PIN20_SOURCE_MASK WLAN_GPIO_PIN20_SOURCE_MASK
+#define GPIO_PIN20_SOURCE_GET(x) WLAN_GPIO_PIN20_SOURCE_GET(x)
+#define GPIO_PIN20_SOURCE_SET(x) WLAN_GPIO_PIN20_SOURCE_SET(x)
+#define GPIO_PIN21_ADDRESS WLAN_GPIO_PIN21_ADDRESS
+#define GPIO_PIN21_OFFSET WLAN_GPIO_PIN21_OFFSET
+#define GPIO_PIN21_CONFIG_MSB WLAN_GPIO_PIN21_CONFIG_MSB
+#define GPIO_PIN21_CONFIG_LSB WLAN_GPIO_PIN21_CONFIG_LSB
+#define GPIO_PIN21_CONFIG_MASK WLAN_GPIO_PIN21_CONFIG_MASK
+#define GPIO_PIN21_CONFIG_GET(x) WLAN_GPIO_PIN21_CONFIG_GET(x)
+#define GPIO_PIN21_CONFIG_SET(x) WLAN_GPIO_PIN21_CONFIG_SET(x)
+#define GPIO_PIN21_WAKEUP_ENABLE_MSB WLAN_GPIO_PIN21_WAKEUP_ENABLE_MSB
+#define GPIO_PIN21_WAKEUP_ENABLE_LSB WLAN_GPIO_PIN21_WAKEUP_ENABLE_LSB
+#define GPIO_PIN21_WAKEUP_ENABLE_MASK WLAN_GPIO_PIN21_WAKEUP_ENABLE_MASK
+#define GPIO_PIN21_WAKEUP_ENABLE_GET(x) WLAN_GPIO_PIN21_WAKEUP_ENABLE_GET(x)
+#define GPIO_PIN21_WAKEUP_ENABLE_SET(x) WLAN_GPIO_PIN21_WAKEUP_ENABLE_SET(x)
+#define GPIO_PIN21_INT_TYPE_MSB WLAN_GPIO_PIN21_INT_TYPE_MSB
+#define GPIO_PIN21_INT_TYPE_LSB WLAN_GPIO_PIN21_INT_TYPE_LSB
+#define GPIO_PIN21_INT_TYPE_MASK WLAN_GPIO_PIN21_INT_TYPE_MASK
+#define GPIO_PIN21_INT_TYPE_GET(x) WLAN_GPIO_PIN21_INT_TYPE_GET(x)
+#define GPIO_PIN21_INT_TYPE_SET(x) WLAN_GPIO_PIN21_INT_TYPE_SET(x)
+#define GPIO_PIN21_PAD_PULL_MSB WLAN_GPIO_PIN21_PAD_PULL_MSB
+#define GPIO_PIN21_PAD_PULL_LSB WLAN_GPIO_PIN21_PAD_PULL_LSB
+#define GPIO_PIN21_PAD_PULL_MASK WLAN_GPIO_PIN21_PAD_PULL_MASK
+#define GPIO_PIN21_PAD_PULL_GET(x) WLAN_GPIO_PIN21_PAD_PULL_GET(x)
+#define GPIO_PIN21_PAD_PULL_SET(x) WLAN_GPIO_PIN21_PAD_PULL_SET(x)
+#define GPIO_PIN21_PAD_STRENGTH_MSB WLAN_GPIO_PIN21_PAD_STRENGTH_MSB
+#define GPIO_PIN21_PAD_STRENGTH_LSB WLAN_GPIO_PIN21_PAD_STRENGTH_LSB
+#define GPIO_PIN21_PAD_STRENGTH_MASK WLAN_GPIO_PIN21_PAD_STRENGTH_MASK
+#define GPIO_PIN21_PAD_STRENGTH_GET(x) WLAN_GPIO_PIN21_PAD_STRENGTH_GET(x)
+#define GPIO_PIN21_PAD_STRENGTH_SET(x) WLAN_GPIO_PIN21_PAD_STRENGTH_SET(x)
+#define GPIO_PIN21_PAD_DRIVER_MSB WLAN_GPIO_PIN21_PAD_DRIVER_MSB
+#define GPIO_PIN21_PAD_DRIVER_LSB WLAN_GPIO_PIN21_PAD_DRIVER_LSB
+#define GPIO_PIN21_PAD_DRIVER_MASK WLAN_GPIO_PIN21_PAD_DRIVER_MASK
+#define GPIO_PIN21_PAD_DRIVER_GET(x) WLAN_GPIO_PIN21_PAD_DRIVER_GET(x)
+#define GPIO_PIN21_PAD_DRIVER_SET(x) WLAN_GPIO_PIN21_PAD_DRIVER_SET(x)
+#define GPIO_PIN21_SOURCE_MSB WLAN_GPIO_PIN21_SOURCE_MSB
+#define GPIO_PIN21_SOURCE_LSB WLAN_GPIO_PIN21_SOURCE_LSB
+#define GPIO_PIN21_SOURCE_MASK WLAN_GPIO_PIN21_SOURCE_MASK
+#define GPIO_PIN21_SOURCE_GET(x) WLAN_GPIO_PIN21_SOURCE_GET(x)
+#define GPIO_PIN21_SOURCE_SET(x) WLAN_GPIO_PIN21_SOURCE_SET(x)
+#define GPIO_PIN22_ADDRESS WLAN_GPIO_PIN22_ADDRESS
+#define GPIO_PIN22_OFFSET WLAN_GPIO_PIN22_OFFSET
+#define GPIO_PIN22_CONFIG_MSB WLAN_GPIO_PIN22_CONFIG_MSB
+#define GPIO_PIN22_CONFIG_LSB WLAN_GPIO_PIN22_CONFIG_LSB
+#define GPIO_PIN22_CONFIG_MASK WLAN_GPIO_PIN22_CONFIG_MASK
+#define GPIO_PIN22_CONFIG_GET(x) WLAN_GPIO_PIN22_CONFIG_GET(x)
+#define GPIO_PIN22_CONFIG_SET(x) WLAN_GPIO_PIN22_CONFIG_SET(x)
+#define GPIO_PIN22_WAKEUP_ENABLE_MSB WLAN_GPIO_PIN22_WAKEUP_ENABLE_MSB
+#define GPIO_PIN22_WAKEUP_ENABLE_LSB WLAN_GPIO_PIN22_WAKEUP_ENABLE_LSB
+#define GPIO_PIN22_WAKEUP_ENABLE_MASK WLAN_GPIO_PIN22_WAKEUP_ENABLE_MASK
+#define GPIO_PIN22_WAKEUP_ENABLE_GET(x) WLAN_GPIO_PIN22_WAKEUP_ENABLE_GET(x)
+#define GPIO_PIN22_WAKEUP_ENABLE_SET(x) WLAN_GPIO_PIN22_WAKEUP_ENABLE_SET(x)
+#define GPIO_PIN22_INT_TYPE_MSB WLAN_GPIO_PIN22_INT_TYPE_MSB
+#define GPIO_PIN22_INT_TYPE_LSB WLAN_GPIO_PIN22_INT_TYPE_LSB
+#define GPIO_PIN22_INT_TYPE_MASK WLAN_GPIO_PIN22_INT_TYPE_MASK
+#define GPIO_PIN22_INT_TYPE_GET(x) WLAN_GPIO_PIN22_INT_TYPE_GET(x)
+#define GPIO_PIN22_INT_TYPE_SET(x) WLAN_GPIO_PIN22_INT_TYPE_SET(x)
+#define GPIO_PIN22_PAD_PULL_MSB WLAN_GPIO_PIN22_PAD_PULL_MSB
+#define GPIO_PIN22_PAD_PULL_LSB WLAN_GPIO_PIN22_PAD_PULL_LSB
+#define GPIO_PIN22_PAD_PULL_MASK WLAN_GPIO_PIN22_PAD_PULL_MASK
+#define GPIO_PIN22_PAD_PULL_GET(x) WLAN_GPIO_PIN22_PAD_PULL_GET(x)
+#define GPIO_PIN22_PAD_PULL_SET(x) WLAN_GPIO_PIN22_PAD_PULL_SET(x)
+#define GPIO_PIN22_PAD_STRENGTH_MSB WLAN_GPIO_PIN22_PAD_STRENGTH_MSB
+#define GPIO_PIN22_PAD_STRENGTH_LSB WLAN_GPIO_PIN22_PAD_STRENGTH_LSB
+#define GPIO_PIN22_PAD_STRENGTH_MASK WLAN_GPIO_PIN22_PAD_STRENGTH_MASK
+#define GPIO_PIN22_PAD_STRENGTH_GET(x) WLAN_GPIO_PIN22_PAD_STRENGTH_GET(x)
+#define GPIO_PIN22_PAD_STRENGTH_SET(x) WLAN_GPIO_PIN22_PAD_STRENGTH_SET(x)
+#define GPIO_PIN22_PAD_DRIVER_MSB WLAN_GPIO_PIN22_PAD_DRIVER_MSB
+#define GPIO_PIN22_PAD_DRIVER_LSB WLAN_GPIO_PIN22_PAD_DRIVER_LSB
+#define GPIO_PIN22_PAD_DRIVER_MASK WLAN_GPIO_PIN22_PAD_DRIVER_MASK
+#define GPIO_PIN22_PAD_DRIVER_GET(x) WLAN_GPIO_PIN22_PAD_DRIVER_GET(x)
+#define GPIO_PIN22_PAD_DRIVER_SET(x) WLAN_GPIO_PIN22_PAD_DRIVER_SET(x)
+#define GPIO_PIN22_SOURCE_MSB WLAN_GPIO_PIN22_SOURCE_MSB
+#define GPIO_PIN22_SOURCE_LSB WLAN_GPIO_PIN22_SOURCE_LSB
+#define GPIO_PIN22_SOURCE_MASK WLAN_GPIO_PIN22_SOURCE_MASK
+#define GPIO_PIN22_SOURCE_GET(x) WLAN_GPIO_PIN22_SOURCE_GET(x)
+#define GPIO_PIN22_SOURCE_SET(x) WLAN_GPIO_PIN22_SOURCE_SET(x)
+#define GPIO_PIN23_ADDRESS WLAN_GPIO_PIN23_ADDRESS
+#define GPIO_PIN23_OFFSET WLAN_GPIO_PIN23_OFFSET
+#define GPIO_PIN23_CONFIG_MSB WLAN_GPIO_PIN23_CONFIG_MSB
+#define GPIO_PIN23_CONFIG_LSB WLAN_GPIO_PIN23_CONFIG_LSB
+#define GPIO_PIN23_CONFIG_MASK WLAN_GPIO_PIN23_CONFIG_MASK
+#define GPIO_PIN23_CONFIG_GET(x) WLAN_GPIO_PIN23_CONFIG_GET(x)
+#define GPIO_PIN23_CONFIG_SET(x) WLAN_GPIO_PIN23_CONFIG_SET(x)
+#define GPIO_PIN23_WAKEUP_ENABLE_MSB WLAN_GPIO_PIN23_WAKEUP_ENABLE_MSB
+#define GPIO_PIN23_WAKEUP_ENABLE_LSB WLAN_GPIO_PIN23_WAKEUP_ENABLE_LSB
+#define GPIO_PIN23_WAKEUP_ENABLE_MASK WLAN_GPIO_PIN23_WAKEUP_ENABLE_MASK
+#define GPIO_PIN23_WAKEUP_ENABLE_GET(x) WLAN_GPIO_PIN23_WAKEUP_ENABLE_GET(x)
+#define GPIO_PIN23_WAKEUP_ENABLE_SET(x) WLAN_GPIO_PIN23_WAKEUP_ENABLE_SET(x)
+#define GPIO_PIN23_INT_TYPE_MSB WLAN_GPIO_PIN23_INT_TYPE_MSB
+#define GPIO_PIN23_INT_TYPE_LSB WLAN_GPIO_PIN23_INT_TYPE_LSB
+#define GPIO_PIN23_INT_TYPE_MASK WLAN_GPIO_PIN23_INT_TYPE_MASK
+#define GPIO_PIN23_INT_TYPE_GET(x) WLAN_GPIO_PIN23_INT_TYPE_GET(x)
+#define GPIO_PIN23_INT_TYPE_SET(x) WLAN_GPIO_PIN23_INT_TYPE_SET(x)
+#define GPIO_PIN23_PAD_DRIVER_MSB WLAN_GPIO_PIN23_PAD_DRIVER_MSB
+#define GPIO_PIN23_PAD_DRIVER_LSB WLAN_GPIO_PIN23_PAD_DRIVER_LSB
+#define GPIO_PIN23_PAD_DRIVER_MASK WLAN_GPIO_PIN23_PAD_DRIVER_MASK
+#define GPIO_PIN23_PAD_DRIVER_GET(x) WLAN_GPIO_PIN23_PAD_DRIVER_GET(x)
+#define GPIO_PIN23_PAD_DRIVER_SET(x) WLAN_GPIO_PIN23_PAD_DRIVER_SET(x)
+#define GPIO_PIN23_SOURCE_MSB WLAN_GPIO_PIN23_SOURCE_MSB
+#define GPIO_PIN23_SOURCE_LSB WLAN_GPIO_PIN23_SOURCE_LSB
+#define GPIO_PIN23_SOURCE_MASK WLAN_GPIO_PIN23_SOURCE_MASK
+#define GPIO_PIN23_SOURCE_GET(x) WLAN_GPIO_PIN23_SOURCE_GET(x)
+#define GPIO_PIN23_SOURCE_SET(x) WLAN_GPIO_PIN23_SOURCE_SET(x)
+#define GPIO_PIN24_ADDRESS WLAN_GPIO_PIN24_ADDRESS
+#define GPIO_PIN24_OFFSET WLAN_GPIO_PIN24_OFFSET
+#define GPIO_PIN24_CONFIG_MSB WLAN_GPIO_PIN24_CONFIG_MSB
+#define GPIO_PIN24_CONFIG_LSB WLAN_GPIO_PIN24_CONFIG_LSB
+#define GPIO_PIN24_CONFIG_MASK WLAN_GPIO_PIN24_CONFIG_MASK
+#define GPIO_PIN24_CONFIG_GET(x) WLAN_GPIO_PIN24_CONFIG_GET(x)
+#define GPIO_PIN24_CONFIG_SET(x) WLAN_GPIO_PIN24_CONFIG_SET(x)
+#define GPIO_PIN24_WAKEUP_ENABLE_MSB WLAN_GPIO_PIN24_WAKEUP_ENABLE_MSB
+#define GPIO_PIN24_WAKEUP_ENABLE_LSB WLAN_GPIO_PIN24_WAKEUP_ENABLE_LSB
+#define GPIO_PIN24_WAKEUP_ENABLE_MASK WLAN_GPIO_PIN24_WAKEUP_ENABLE_MASK
+#define GPIO_PIN24_WAKEUP_ENABLE_GET(x) WLAN_GPIO_PIN24_WAKEUP_ENABLE_GET(x)
+#define GPIO_PIN24_WAKEUP_ENABLE_SET(x) WLAN_GPIO_PIN24_WAKEUP_ENABLE_SET(x)
+#define GPIO_PIN24_INT_TYPE_MSB WLAN_GPIO_PIN24_INT_TYPE_MSB
+#define GPIO_PIN24_INT_TYPE_LSB WLAN_GPIO_PIN24_INT_TYPE_LSB
+#define GPIO_PIN24_INT_TYPE_MASK WLAN_GPIO_PIN24_INT_TYPE_MASK
+#define GPIO_PIN24_INT_TYPE_GET(x) WLAN_GPIO_PIN24_INT_TYPE_GET(x)
+#define GPIO_PIN24_INT_TYPE_SET(x) WLAN_GPIO_PIN24_INT_TYPE_SET(x)
+#define GPIO_PIN24_PAD_DRIVER_MSB WLAN_GPIO_PIN24_PAD_DRIVER_MSB
+#define GPIO_PIN24_PAD_DRIVER_LSB WLAN_GPIO_PIN24_PAD_DRIVER_LSB
+#define GPIO_PIN24_PAD_DRIVER_MASK WLAN_GPIO_PIN24_PAD_DRIVER_MASK
+#define GPIO_PIN24_PAD_DRIVER_GET(x) WLAN_GPIO_PIN24_PAD_DRIVER_GET(x)
+#define GPIO_PIN24_PAD_DRIVER_SET(x) WLAN_GPIO_PIN24_PAD_DRIVER_SET(x)
+#define GPIO_PIN24_SOURCE_MSB WLAN_GPIO_PIN24_SOURCE_MSB
+#define GPIO_PIN24_SOURCE_LSB WLAN_GPIO_PIN24_SOURCE_LSB
+#define GPIO_PIN24_SOURCE_MASK WLAN_GPIO_PIN24_SOURCE_MASK
+#define GPIO_PIN24_SOURCE_GET(x) WLAN_GPIO_PIN24_SOURCE_GET(x)
+#define GPIO_PIN24_SOURCE_SET(x) WLAN_GPIO_PIN24_SOURCE_SET(x)
+#define GPIO_PIN25_ADDRESS WLAN_GPIO_PIN25_ADDRESS
+#define GPIO_PIN25_OFFSET WLAN_GPIO_PIN25_OFFSET
+#define GPIO_PIN25_CONFIG_MSB WLAN_GPIO_PIN25_CONFIG_MSB
+#define GPIO_PIN25_CONFIG_LSB WLAN_GPIO_PIN25_CONFIG_LSB
+#define GPIO_PIN25_CONFIG_MASK WLAN_GPIO_PIN25_CONFIG_MASK
+#define GPIO_PIN25_CONFIG_GET(x) WLAN_GPIO_PIN25_CONFIG_GET(x)
+#define GPIO_PIN25_CONFIG_SET(x) WLAN_GPIO_PIN25_CONFIG_SET(x)
+#define GPIO_PIN25_WAKEUP_ENABLE_MSB WLAN_GPIO_PIN25_WAKEUP_ENABLE_MSB
+#define GPIO_PIN25_WAKEUP_ENABLE_LSB WLAN_GPIO_PIN25_WAKEUP_ENABLE_LSB
+#define GPIO_PIN25_WAKEUP_ENABLE_MASK WLAN_GPIO_PIN25_WAKEUP_ENABLE_MASK
+#define GPIO_PIN25_WAKEUP_ENABLE_GET(x) WLAN_GPIO_PIN25_WAKEUP_ENABLE_GET(x)
+#define GPIO_PIN25_WAKEUP_ENABLE_SET(x) WLAN_GPIO_PIN25_WAKEUP_ENABLE_SET(x)
+#define GPIO_PIN25_INT_TYPE_MSB WLAN_GPIO_PIN25_INT_TYPE_MSB
+#define GPIO_PIN25_INT_TYPE_LSB WLAN_GPIO_PIN25_INT_TYPE_LSB
+#define GPIO_PIN25_INT_TYPE_MASK WLAN_GPIO_PIN25_INT_TYPE_MASK
+#define GPIO_PIN25_INT_TYPE_GET(x) WLAN_GPIO_PIN25_INT_TYPE_GET(x)
+#define GPIO_PIN25_INT_TYPE_SET(x) WLAN_GPIO_PIN25_INT_TYPE_SET(x)
+#define GPIO_PIN25_PAD_DRIVER_MSB WLAN_GPIO_PIN25_PAD_DRIVER_MSB
+#define GPIO_PIN25_PAD_DRIVER_LSB WLAN_GPIO_PIN25_PAD_DRIVER_LSB
+#define GPIO_PIN25_PAD_DRIVER_MASK WLAN_GPIO_PIN25_PAD_DRIVER_MASK
+#define GPIO_PIN25_PAD_DRIVER_GET(x) WLAN_GPIO_PIN25_PAD_DRIVER_GET(x)
+#define GPIO_PIN25_PAD_DRIVER_SET(x) WLAN_GPIO_PIN25_PAD_DRIVER_SET(x)
+#define GPIO_PIN25_SOURCE_MSB WLAN_GPIO_PIN25_SOURCE_MSB
+#define GPIO_PIN25_SOURCE_LSB WLAN_GPIO_PIN25_SOURCE_LSB
+#define GPIO_PIN25_SOURCE_MASK WLAN_GPIO_PIN25_SOURCE_MASK
+#define GPIO_PIN25_SOURCE_GET(x) WLAN_GPIO_PIN25_SOURCE_GET(x)
+#define GPIO_PIN25_SOURCE_SET(x) WLAN_GPIO_PIN25_SOURCE_SET(x)
+#define SIGMA_DELTA_ADDRESS WLAN_SIGMA_DELTA_ADDRESS
+#define SIGMA_DELTA_OFFSET WLAN_SIGMA_DELTA_OFFSET
+#define SIGMA_DELTA_ENABLE_MSB WLAN_SIGMA_DELTA_ENABLE_MSB
+#define SIGMA_DELTA_ENABLE_LSB WLAN_SIGMA_DELTA_ENABLE_LSB
+#define SIGMA_DELTA_ENABLE_MASK WLAN_SIGMA_DELTA_ENABLE_MASK
+#define SIGMA_DELTA_ENABLE_GET(x) WLAN_SIGMA_DELTA_ENABLE_GET(x)
+#define SIGMA_DELTA_ENABLE_SET(x) WLAN_SIGMA_DELTA_ENABLE_SET(x)
+#define SIGMA_DELTA_PRESCALAR_MSB WLAN_SIGMA_DELTA_PRESCALAR_MSB
+#define SIGMA_DELTA_PRESCALAR_LSB WLAN_SIGMA_DELTA_PRESCALAR_LSB
+#define SIGMA_DELTA_PRESCALAR_MASK WLAN_SIGMA_DELTA_PRESCALAR_MASK
+#define SIGMA_DELTA_PRESCALAR_GET(x) WLAN_SIGMA_DELTA_PRESCALAR_GET(x)
+#define SIGMA_DELTA_PRESCALAR_SET(x) WLAN_SIGMA_DELTA_PRESCALAR_SET(x)
+#define SIGMA_DELTA_TARGET_MSB WLAN_SIGMA_DELTA_TARGET_MSB
+#define SIGMA_DELTA_TARGET_LSB WLAN_SIGMA_DELTA_TARGET_LSB
+#define SIGMA_DELTA_TARGET_MASK WLAN_SIGMA_DELTA_TARGET_MASK
+#define SIGMA_DELTA_TARGET_GET(x) WLAN_SIGMA_DELTA_TARGET_GET(x)
+#define SIGMA_DELTA_TARGET_SET(x) WLAN_SIGMA_DELTA_TARGET_SET(x)
+#define DEBUG_CONTROL_ADDRESS WLAN_DEBUG_CONTROL_ADDRESS
+#define DEBUG_CONTROL_OFFSET WLAN_DEBUG_CONTROL_OFFSET
+#define DEBUG_CONTROL_ENABLE_MSB WLAN_DEBUG_CONTROL_ENABLE_MSB
+#define DEBUG_CONTROL_ENABLE_LSB WLAN_DEBUG_CONTROL_ENABLE_LSB
+#define DEBUG_CONTROL_ENABLE_MASK WLAN_DEBUG_CONTROL_ENABLE_MASK
+#define DEBUG_CONTROL_ENABLE_GET(x) WLAN_DEBUG_CONTROL_ENABLE_GET(x)
+#define DEBUG_CONTROL_ENABLE_SET(x) WLAN_DEBUG_CONTROL_ENABLE_SET(x)
+#define DEBUG_INPUT_SEL_ADDRESS WLAN_DEBUG_INPUT_SEL_ADDRESS
+#define DEBUG_INPUT_SEL_OFFSET WLAN_DEBUG_INPUT_SEL_OFFSET
+#define DEBUG_INPUT_SEL_SHIFT_MSB WLAN_DEBUG_INPUT_SEL_SHIFT_MSB
+#define DEBUG_INPUT_SEL_SHIFT_LSB WLAN_DEBUG_INPUT_SEL_SHIFT_LSB
+#define DEBUG_INPUT_SEL_SHIFT_MASK WLAN_DEBUG_INPUT_SEL_SHIFT_MASK
+#define DEBUG_INPUT_SEL_SHIFT_GET(x) WLAN_DEBUG_INPUT_SEL_SHIFT_GET(x)
+#define DEBUG_INPUT_SEL_SHIFT_SET(x) WLAN_DEBUG_INPUT_SEL_SHIFT_SET(x)
+#define DEBUG_INPUT_SEL_SRC_MSB WLAN_DEBUG_INPUT_SEL_SRC_MSB
+#define DEBUG_INPUT_SEL_SRC_LSB WLAN_DEBUG_INPUT_SEL_SRC_LSB
+#define DEBUG_INPUT_SEL_SRC_MASK WLAN_DEBUG_INPUT_SEL_SRC_MASK
+#define DEBUG_INPUT_SEL_SRC_GET(x) WLAN_DEBUG_INPUT_SEL_SRC_GET(x)
+#define DEBUG_INPUT_SEL_SRC_SET(x) WLAN_DEBUG_INPUT_SEL_SRC_SET(x)
+#define DEBUG_OUT_ADDRESS WLAN_DEBUG_OUT_ADDRESS
+#define DEBUG_OUT_OFFSET WLAN_DEBUG_OUT_OFFSET
+#define DEBUG_OUT_DATA_MSB WLAN_DEBUG_OUT_DATA_MSB
+#define DEBUG_OUT_DATA_LSB WLAN_DEBUG_OUT_DATA_LSB
+#define DEBUG_OUT_DATA_MASK WLAN_DEBUG_OUT_DATA_MASK
+#define DEBUG_OUT_DATA_GET(x) WLAN_DEBUG_OUT_DATA_GET(x)
+#define DEBUG_OUT_DATA_SET(x) WLAN_DEBUG_OUT_DATA_SET(x)
+#define RESET_TUPLE_STATUS_ADDRESS WLAN_RESET_TUPLE_STATUS_ADDRESS
+#define RESET_TUPLE_STATUS_OFFSET WLAN_RESET_TUPLE_STATUS_OFFSET
+#define RESET_TUPLE_STATUS_TEST_RESET_TUPLE_MSB WLAN_RESET_TUPLE_STATUS_TEST_RESET_TUPLE_MSB
+#define RESET_TUPLE_STATUS_TEST_RESET_TUPLE_LSB WLAN_RESET_TUPLE_STATUS_TEST_RESET_TUPLE_LSB
+#define RESET_TUPLE_STATUS_TEST_RESET_TUPLE_MASK WLAN_RESET_TUPLE_STATUS_TEST_RESET_TUPLE_MASK
+#define RESET_TUPLE_STATUS_TEST_RESET_TUPLE_GET(x) WLAN_RESET_TUPLE_STATUS_TEST_RESET_TUPLE_GET(x)
+#define RESET_TUPLE_STATUS_TEST_RESET_TUPLE_SET(x) WLAN_RESET_TUPLE_STATUS_TEST_RESET_TUPLE_SET(x)
+#define RESET_TUPLE_STATUS_PIN_RESET_TUPLE_MSB WLAN_RESET_TUPLE_STATUS_PIN_RESET_TUPLE_MSB
+#define RESET_TUPLE_STATUS_PIN_RESET_TUPLE_LSB WLAN_RESET_TUPLE_STATUS_PIN_RESET_TUPLE_LSB
+#define RESET_TUPLE_STATUS_PIN_RESET_TUPLE_MASK WLAN_RESET_TUPLE_STATUS_PIN_RESET_TUPLE_MASK
+#define RESET_TUPLE_STATUS_PIN_RESET_TUPLE_GET(x) WLAN_RESET_TUPLE_STATUS_PIN_RESET_TUPLE_GET(x)
+#define RESET_TUPLE_STATUS_PIN_RESET_TUPLE_SET(x) WLAN_RESET_TUPLE_STATUS_PIN_RESET_TUPLE_SET(x)
+
+
+#endif
+#endif
+
+
+
diff --git a/drivers/staging/ath6kl/include/common/AR6002/hw4.0/hw/mac_dma_reg.h b/drivers/staging/ath6kl/include/common/AR6002/hw4.0/hw/mac_dma_reg.h
new file mode 100644
index 000000000000..f82f809171a0
--- /dev/null
+++ b/drivers/staging/ath6kl/include/common/AR6002/hw4.0/hw/mac_dma_reg.h
@@ -0,0 +1,605 @@
+// ------------------------------------------------------------------
+// Copyright (c) 2002-2010 Atheros Communications Inc.
+// All rights reserved.
+//
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+//
+//
+// ------------------------------------------------------------------
+//===================================================================
+// Author(s): ="Atheros"
+//===================================================================
+
+
+/*****************************************************************************/
+/* AR6003 WLAN MAC DMA register definitions */
+/*****************************************************************************/
+
+#ifndef _AR6000_DMAREG_H_
+#define _AR6000_DMAREG_H_
+
+/*
+ * Definitions for the Atheros AR6003 chipset.
+ */
+
+/* DMA Control and Interrupt Registers */
+#define MAC_DMA_CR_ADDRESS 0x00000008 /* MAC control register */
+#define MAC_DMA_CR_RXE_MASK 0x00000004 /* Receive enable */
+#define MAC_DMA_CR_RXD_MASK 0x00000020 /* Receive disable */
+#define MAC_DMA_CR_SWI_MASK 0x00000040 /* One-shot software interrupt */
+
+#define MAC_DMA_RXDP_ADDRESS 0x0000000C /* MAC receive queue descriptor pointer */
+
+#define MAC_DMA_CFG_ADDRESS 0x00000014 /* MAC configuration and status register */
+#define MAC_DMA_CFG_SWTD_MASK 0x00000001 /* byteswap tx descriptor words */
+#define MAC_DMA_CFG_SWTB_MASK 0x00000002 /* byteswap tx data buffer words */
+#define MAC_DMA_CFG_SWRD_MASK 0x00000004 /* byteswap rx descriptor words */
+#define MAC_DMA_CFG_SWRB_MASK 0x00000008 /* byteswap rx data buffer words */
+#define MAC_DMA_CFG_SWRG_MASK 0x00000010 /* byteswap register access data words */
+#define MAC_DMA_CFG_AP_ADHOC_INDICATION_MASK 0x00000020 /* AP/adhoc indication (0-AP, 1-Adhoc) */
+#define MAC_DMA_CFG_PHOK_MASK 0x00000100 /* PHY OK status */
+#define MAC_DMA_CFG_CLK_GATE_DIS_MASK 0x00000400 /* Clock gating disable */
+
+#define MAC_DMA_MIRT_ADDRESS 0x00000020 /* Maximum rate threshold register */
+#define MAC_DMA_MIRT_THRESH_MASK 0x0000FFFF
+
+#define MAC_DMA_IER_ADDRESS 0x00000024 /* MAC Interrupt enable register */
+#define MAC_DMA_IER_ENABLE_MASK 0x00000001 /* Global interrupt enable */
+#define MAC_DMA_IER_DISABLE_MASK 0x00000000 /* Global interrupt disable */
+
+#define MAC_DMA_TIMT_ADDRESS 0x00000028 /* Transmit Interrupt Mitigation Threshold */
+#define MAC_DMA_TIMT_LAST_PACKER_THRESH_MASK 0x0000FFFF /* Last packet threshold mask */
+#define MAC_DMA_TIMT_FIRST_PACKER_THRESH_MASK 0xFFFF0000 /* First packet threshold mask */
+
+#define MAC_DMA_RIMT_ADDRESS 0x0000002C /* Receive Interrupt Mitigation Threshold */
+#define MAC_DMA_RIMT_LAST_PACKER_THRESH_MASK 0x0000FFFF /* Last packet threshold mask */
+#define MAC_DMA_RIMT_FIRST_PACKER_THRESH_MASK 0xFFFF0000 /* First packet threshold mask */
+
+#define MAC_DMA_TXCFG_ADDRESS 0x00000030 /* MAC tx DMA size config register */
+#define MAC_DMA_FTRIG_MASK 0x000003F0 /* Mask for Frame trigger level */
+#define MAC_DMA_FTRIG_LSB 4 /* Shift for Frame trigger level */
+#define MAC_DMA_FTRIG_IMMED 0x00000000 /* bytes in PCU TX FIFO before air */
+#define MAC_DMA_FTRIG_64B 0x00000010 /* default */
+#define MAC_DMA_FTRIG_128B 0x00000020
+#define MAC_DMA_FTRIG_192B 0x00000030
+#define MAC_DMA_FTRIG_256B 0x00000040 /* 5 bits total */
+#define MAC_DMA_TXCFG_ADHOC_BEACON_ATIM_TX_POLICY_MASK 0x00000800
+
+#define MAC_DMA_RXCFG_ADDRESS 0x00000034 /* MAC rx DMA size config register */
+#define MAC_DMA_RXCFG_ZLFDMA_MASK 0x00000010 /* Enable DMA of zero-length frame */
+#define MAC_DMA_RXCFG_DMASIZE_4B 0x00000000 /* DMA size 4 bytes (TXCFG + RXCFG) */
+#define MAC_DMA_RXCFG_DMASIZE_8B 0x00000001 /* DMA size 8 bytes */
+#define MAC_DMA_RXCFG_DMASIZE_16B 0x00000002 /* DMA size 16 bytes */
+#define MAC_DMA_RXCFG_DMASIZE_32B 0x00000003 /* DMA size 32 bytes */
+#define MAC_DMA_RXCFG_DMASIZE_64B 0x00000004 /* DMA size 64 bytes */
+#define MAC_DMA_RXCFG_DMASIZE_128B 0x00000005 /* DMA size 128 bytes */
+#define MAC_DMA_RXCFG_DMASIZE_256B 0x00000006 /* DMA size 256 bytes */
+#define MAC_DMA_RXCFG_DMASIZE_512B 0x00000007 /* DMA size 512 bytes */
+
+#define MAC_DMA_MIBC_ADDRESS 0x00000040 /* MAC MIB control register */
+#define MAC_DMA_MIBC_COW_MASK 0x00000001 /* counter overflow warning */
+#define MAC_DMA_MIBC_FMC_MASK 0x00000002 /* freeze MIB counters */
+#define MAC_DMA_MIBC_CMC_MASK 0x00000004 /* clear MIB counters */
+#define MAC_DMA_MIBC_MCS_MASK 0x00000008 /* MIB counter strobe, increment all */
+
+#define MAC_DMA_TOPS_ADDRESS 0x00000044 /* MAC timeout prescale count */
+#define MAC_DMA_TOPS_MASK 0x0000FFFF /* Mask for timeout prescale */
+
+#define MAC_DMA_RXNPTO_ADDRESS 0x00000048 /* MAC no frame received timeout */
+#define MAC_DMA_RXNPTO_MASK 0x000003FF /* Mask for no frame received timeout */
+
+#define MAC_DMA_TXNPTO_ADDRESS 0x0000004C /* MAC no frame trasmitted timeout */
+#define MAC_DMA_TXNPTO_MASK 0x000003FF /* Mask for no frame transmitted timeout */
+#define MAC_DMA_TXNPTO_QCU_MASK 0x000FFC00 /* Mask indicating the set of QCUs */
+ /* for which frame completions will cause */
+ /* a reset of the no frame xmit'd timeout */
+
+#define MAC_DMA_RPGTO_ADDRESS 0x00000050 /* MAC receive frame gap timeout */
+#define MAC_DMA_RPGTO_MASK 0x000003FF /* Mask for receive frame gap timeout */
+
+#define MAC_DMA_RPCNT_ADDRESS 0x00000054 /* MAC receive frame count limit */
+#define MAC_DMA_RPCNT_MASK 0x0000001F /* Mask for receive frame count limit */
+
+#define MAC_DMA_MACMISC_ADDRESS 0x00000058 /* MAC miscellaneous control/status register */
+#define MAC_DMA_MACMISC_DMA_OBS_MASK 0x000001E0 /* Mask for DMA observation bus mux select */
+#define MAC_DMA_MACMISC_DMA_OBS_LSB 5 /* Shift for DMA observation bus mux select */
+#define MAC_DMA_MACMISC_MISC_OBS 0x00000E00 /* Mask for MISC observation bus mux select */
+#define MAC_DMA_MACMISC_MISC_OBS_LSB 9 /* Shift for MISC observation bus mux select */
+#define MAC_DMA_MACMISC_MAC_OBS_BUS_LSB 0x00007000 /* Mask for MAC observation bus mux select (lsb) */
+#define MAC_DMA_MACMISC_MAC_OBS_BUS_LSB_LSB 12 /* Shift for MAC observation bus mux select (lsb) */
+#define MAC_DMA_MACMISC_MAC_OBS_BUS_MSB 0x00038000 /* Mask for MAC observation bus mux select (msb) */
+#define MAC_DMA_MACMISC_MAC_OBS_BUS_MSB_LSB 15 /* Shift for MAC observation bus mux select (msb) */
+
+
+#define MAC_DMA_ISR_ADDRESS 0x00000080 /* MAC Primary interrupt status register */
+/*
+ * Interrupt Status Registers
+ *
+ * Only the bits in the ISR_P register and the IMR_P registers
+ * control whether the MAC's INTA# output is asserted. The bits in
+ * the secondary interrupt status/mask registers control what bits
+ * are set in the primary interrupt status register; however the
+ * IMR_S* registers DO NOT determine whether INTA# is asserted.
+ * That is INTA# is asserted only when the logical AND of ISR_P
+ * and IMR_P is non-zero. The secondary interrupt mask/status
+ * registers affect what bits are set in ISR_P but they do not
+ * directly affect whether INTA# is asserted.
+ */
+#define MAC_DMA_ISR_RXOK_MASK 0x00000001 /* At least one frame received sans errors */
+#define MAC_DMA_ISR_RXDESC_MASK 0x00000002 /* Receive interrupt request */
+#define MAC_DMA_ISR_RXERR_MASK 0x00000004 /* Receive error interrupt */
+#define MAC_DMA_ISR_RXNOPKT_MASK 0x00000008 /* No frame received within timeout clock */
+#define MAC_DMA_ISR_RXEOL_MASK 0x00000010 /* Received descriptor empty interrupt */
+#define MAC_DMA_ISR_RXORN_MASK 0x00000020 /* Receive FIFO overrun interrupt */
+#define MAC_DMA_ISR_TXOK_MASK 0x00000040 /* Transmit okay interrupt */
+#define MAC_DMA_ISR_TXDESC_MASK 0x00000080 /* Transmit interrupt request */
+#define MAC_DMA_ISR_TXERR_MASK 0x00000100 /* Transmit error interrupt */
+#define MAC_DMA_ISR_TXNOPKT_MASK 0x00000200 /* No frame transmitted interrupt */
+#define MAC_DMA_ISR_TXEOL_MASK 0x00000400 /* Transmit descriptor empty interrupt */
+#define MAC_DMA_ISR_TXURN_MASK 0x00000800 /* Transmit FIFO underrun interrupt */
+#define MAC_DMA_ISR_MIB_MASK 0x00001000 /* MIB interrupt - see MIBC */
+#define MAC_DMA_ISR_SWI_MASK 0x00002000 /* Software interrupt */
+#define MAC_DMA_ISR_RXPHY_MASK 0x00004000 /* PHY receive error interrupt */
+#define MAC_DMA_ISR_RXKCM_MASK 0x00008000 /* Key-cache miss interrupt */
+#define MAC_DMA_ISR_BRSSI_HI_MASK 0x00010000 /* Beacon rssi high threshold interrupt */
+#define MAC_DMA_ISR_BRSSI_LO_MASK 0x00020000 /* Beacon threshold interrupt */
+#define MAC_DMA_ISR_BMISS_MASK 0x00040000 /* Beacon missed interrupt */
+#define MAC_DMA_ISR_TXMINTR_MASK 0x00080000 /* Maximum transmit interrupt rate */
+#define MAC_DMA_ISR_BNR_MASK 0x00100000 /* Beacon not ready interrupt */
+#define MAC_DMA_ISR_HIUERR_MASK 0x00200000 /* An unexpected bus error has occurred */
+#define MAC_DMA_ISR_BCNMISC_MASK 0x00800000 /* 'or' of TIM, CABEND, DTIMSYNC, BCNTO */
+#define MAC_DMA_ISR_RXMINTR_MASK 0x01000000 /* Maximum receive interrupt rate */
+#define MAC_DMA_ISR_QCBROVF_MASK 0x02000000 /* QCU CBR overflow interrupt */
+#define MAC_DMA_ISR_QCBRURN_MASK 0x04000000 /* QCU CBR underrun interrupt */
+#define MAC_DMA_ISR_QTRIG_MASK 0x08000000 /* QCU scheduling trigger interrupt */
+#define MAC_DMA_ISR_TIMER_MASK 0x10000000 /* GENTMR interrupt */
+#define MAC_DMA_ISR_HCFTO_MASK 0x20000000 /* HCFTO interrupt */
+#define MAC_DMA_ISR_TXINTM_MASK 0x40000000 /* Transmit completion mitigation interrupt */
+#define MAC_DMA_ISR_RXINTM_MASK 0x80000000 /* Receive completion mitigation interrupt */
+
+#define MAC_DMA_ISR_S0_ADDRESS 0x00000084 /* MAC Secondary interrupt status register 0 */
+#define MAC_DMA_ISR_S0_QCU_TXOK_MASK 0x000003FF /* Mask for TXOK (QCU 0-9) */
+#define MAC_DMA_ISR_S0_QCU_TXOK_LSB 0
+#define MAC_DMA_ISR_S0_QCU_TXDESC_MASK 0x03FF0000 /* Mask for TXDESC (QCU 0-9) */
+#define MAC_DMA_ISR_S0_QCU_TXDESC_LSB 16
+
+#define MAC_DMA_ISR_S1_ADDRESS 0x00000088 /* MAC Secondary interrupt status register 1 */
+#define MAC_DMA_ISR_S1_QCU_TXERR_MASK 0x000003FF /* Mask for TXERR (QCU 0-9) */
+#define MAC_DMA_ISR_S1_QCU_TXERR_LSB 0
+#define MAC_DMA_ISR_S1_QCU_TXEOL_MASK 0x03FF0000 /* Mask for TXEOL (QCU 0-9) */
+#define MAC_DMA_ISR_S1_QCU_TXEOL_LSB 16
+
+#define MAC_DMA_ISR_S2_ADDRESS 0x0000008c /* MAC Secondary interrupt status register 2 */
+#define MAC_DMA_ISR_S2_QCU_TXURN_MASK 0x000003FF /* Mask for TXURN (QCU 0-9) */
+#define MAC_DMA_ISR_S2_QCU_TXURN_LSB 0 /* Shift for TXURN (QCU 0-9) */
+#define MAC_DMA_ISR_S2_RX_INT_MASK 0x00000800
+#define MAC_DMA_ISR_S2_WL_STOMPED_MASK 0x00001000
+#define MAC_DMA_ISR_S2_RX_PTR_BAD_MASK 0x00002000
+#define MAC_DMA_ISR_S2_BT_LOW_PRIORITY_RISING_MASK 0x00004000
+#define MAC_DMA_ISR_S2_BT_LOW_PRIORITY_FALLING_MASK 0x00008000
+#define MAC_DMA_ISR_S2_BB_PANIC_IRQ_MASK 0x00010000
+#define MAC_DMA_ISR_S2_BT_STOMPED_MASK 0x00020000
+#define MAC_DMA_ISR_S2_BT_ACTIVE_RISING_MASK 0x00040000
+#define MAC_DMA_ISR_S2_BT_ACTIVE_FALLING_MASK 0x00080000
+#define MAC_DMA_ISR_S2_BT_PRIORITY_RISING_MASK 0x00100000
+#define MAC_DMA_ISR_S2_BT_PRIORITY_FALLING_MASK 0x00200000
+#define MAC_DMA_ISR_S2_CST_MASK 0x00400000
+#define MAC_DMA_ISR_S2_GTT_MASK 0x00800000
+#define MAC_DMA_ISR_S2_TIM_MASK 0x01000000 /* TIM */
+#define MAC_DMA_ISR_S2_CABEND_MASK 0x02000000 /* CABEND */
+#define MAC_DMA_ISR_S2_DTIMSYNC_MASK 0x04000000 /* DTIMSYNC */
+#define MAC_DMA_ISR_S2_BCNTO_MASK 0x08000000 /* BCNTO */
+#define MAC_DMA_ISR_S2_CABTO_MASK 0x10000000 /* CABTO */
+#define MAC_DMA_ISR_S2_DTIM_MASK 0x20000000 /* DTIM */
+#define MAC_DMA_ISR_S2_TSFOOR_MASK 0x40000000 /* TSFOOR */
+
+#define MAC_DMA_ISR_S3_ADDRESS 0x00000090 /* MAC Secondary interrupt status register 3 */
+#define MAC_DMA_ISR_S3_QCU_QCBROVF_MASK 0x000003FF /* Mask for QCBROVF (QCU 0-9) */
+#define MAC_DMA_ISR_S3_QCU_QCBRURN_MASK 0x03FF0000 /* Mask for QCBRURN (QCU 0-9) */
+
+#define MAC_DMA_ISR_S4_ADDRESS 0x00000094 /* MAC Secondary interrupt status register 4 */
+#define MAC_DMA_ISR_S4_QCU_QTRIG_MASK 0x000003FF /* Mask for QTRIG (QCU 0-9) */
+
+#define MAC_DMA_ISR_S5_ADDRESS 0x00000098 /* MAC Secondary interrupt status register 5 */
+#define MAC_DMA_ISR_S5_TBTT_TIMER_TRIGGER_MASK 0x00000001
+#define MAC_DMA_ISR_S5_DBA_TIMER_TRIGGER_MASK 0x00000002
+#define MAC_DMA_ISR_S5_SBA_TIMER_TRIGGER_MASK 0x00000004
+#define MAC_DMA_ISR_S5_HCF_TIMER_TRIGGER_MASK 0x00000008
+#define MAC_DMA_ISR_S5_TIM_TIMER_TRIGGER_MASK 0x00000010
+#define MAC_DMA_ISR_S5_DTIM_TIMER_TRIGGER_MASK 0x00000020
+#define MAC_DMA_ISR_S5_QUIET_TIMER_TRIGGER_MASK 0x00000040
+#define MAC_DMA_ISR_S5_NDP_TIMER_TRIGGER_MASK 0x00000080
+#define MAC_DMA_ISR_S5_GENERIC_TIMER2_TRIGGER_MASK 0x0000FF00
+#define MAC_DMA_ISR_S5_GENERIC_TIMER2_TRIGGER_LSB 8
+#define MAC_DMA_ISR_S5_GENERIC_TIMER2_TRIGGER(_i) (0x00000100 << (_i))
+#define MAC_DMA_ISR_S5_TIMER_OVERFLOW_MASK 0x00010000
+#define MAC_DMA_ISR_S5_DBA_TIMER_THRESHOLD_MASK 0x00020000
+#define MAC_DMA_ISR_S5_SBA_TIMER_THRESHOLD_MASK 0x00040000
+#define MAC_DMA_ISR_S5_HCF_TIMER_THRESHOLD_MASK 0x00080000
+#define MAC_DMA_ISR_S5_TIM_TIMER_THRESHOLD_MASK 0x00100000
+#define MAC_DMA_ISR_S5_DTIM_TIMER_THRESHOLD_MASK 0x00200000
+#define MAC_DMA_ISR_S5_QUIET_TIMER_THRESHOLD_MASK 0x00400000
+#define MAC_DMA_ISR_S5_NDP_TIMER_THRESHOLD_MASK 0x00800000
+#define MAC_DMA_IMR_S5_GENERIC_TIMER2_THRESHOLD_MASK 0xFF000000
+#define MAC_DMA_IMR_S5_GENERIC_TIMER2_THRESHOLD_LSB 24
+#define MAC_DMA_IMR_S5_GENERIC_TIMER2_THRESHOLD(_i) (0x01000000 << (_i))
+
+#define MAC_DMA_IMR_ADDRESS 0x000000A0 /* MAC Primary interrupt mask register */
+/*
+ * Interrupt Mask Registers
+ *
+ * Only the bits in the IMR control whether the MAC's INTA#
+ * output will be asserted. The bits in the secondary interrupt
+ * mask registers control what bits get set in the primary
+ * interrupt status register; however the IMR_S* registers
+ * DO NOT determine whether INTA# is asserted.
+ */
+#define MAC_DMA_IMR_RXOK_MASK 0x00000001 /* At least one frame received sans errors */
+#define MAC_DMA_IMR_RXDESC_MASK 0x00000002 /* Receive interrupt request */
+#define MAC_DMA_IMR_RXERR_MASK 0x00000004 /* Receive error interrupt */
+#define MAC_DMA_IMR_RXNOPKT_MASK 0x00000008 /* No frame received within timeout clock */
+#define MAC_DMA_IMR_RXEOL_MASK 0x00000010 /* Received descriptor empty interrupt */
+#define MAC_DMA_IMR_RXORN_MASK 0x00000020 /* Receive FIFO overrun interrupt */
+#define MAC_DMA_IMR_TXOK_MASK 0x00000040 /* Transmit okay interrupt */
+#define MAC_DMA_IMR_TXDESC_MASK 0x00000080 /* Transmit interrupt request */
+#define MAC_DMA_IMR_TXERR_MASK 0x00000100 /* Transmit error interrupt */
+#define MAC_DMA_IMR_TXNOPKT_MASK 0x00000200 /* No frame transmitted interrupt */
+#define MAC_DMA_IMR_TXEOL_MASK 0x00000400 /* Transmit descriptor empty interrupt */
+#define MAC_DMA_IMR_TXURN_MASK 0x00000800 /* Transmit FIFO underrun interrupt */
+#define MAC_DMA_IMR_MIB_MASK 0x00001000 /* MIB interrupt - see MIBC */
+#define MAC_DMA_IMR_SWI_MASK 0x00002000 /* Software interrupt */
+#define MAC_DMA_IMR_RXPHY_MASK 0x00004000 /* PHY receive error interrupt */
+#define MAC_DMA_IMR_RXKCM_MASK 0x00008000 /* Key-cache miss interrupt */
+#define MAC_DMA_IMR_BRSSI_HI_MASK 0x00010000 /* Beacon rssi hi threshold interrupt */
+#define MAC_DMA_IMR_BRSSI_LO_MASK 0x00020000 /* Beacon rssi lo threshold interrupt */
+#define MAC_DMA_IMR_BMISS_MASK 0x00040000 /* Beacon missed interrupt */
+#define MAC_DMA_IMR_TXMINTR_MASK 0x00080000 /* Maximum transmit interrupt rate */
+#define MAC_DMA_IMR_BNR_MASK 0x00100000 /* BNR interrupt */
+#define MAC_DMA_IMR_HIUERR_MASK 0x00200000 /* An unexpected bus error has occurred */
+#define MAC_DMA_IMR_BCNMISC_MASK 0x00800000 /* Beacon Misc */
+#define MAC_DMA_IMR_RXMINTR_MASK 0x01000000 /* Maximum receive interrupt rate */
+#define MAC_DMA_IMR_QCBROVF_MASK 0x02000000 /* QCU CBR overflow interrupt */
+#define MAC_DMA_IMR_QCBRURN_MASK 0x04000000 /* QCU CBR underrun interrupt */
+#define MAC_DMA_IMR_QTRIG_MASK 0x08000000 /* QCU scheduling trigger interrupt */
+#define MAC_DMA_IMR_TIMER_MASK 0x10000000 /* GENTMR interrupt */
+#define MAC_DMA_IMR_HCFTO_MASK 0x20000000 /* HCFTO interrupt*/
+#define MAC_DMA_IMR_TXINTM_MASK 0x40000000 /* Transmit completion mitigation interrupt */
+#define MAC_DMA_IMR_RXINTM_MASK 0x80000000 /* Receive completion mitigation interrupt */
+
+#define MAC_DMA_IMR_S0_ADDRESS 0x000000A4 /* MAC Secondary interrupt mask register 0 */
+#define MAC_DMA_IMR_S0_QCU_TXOK_MASK 0x000003FF /* TXOK (QCU 0-9) */
+#define MAC_DMA_IMR_S0_QCU_TXOK_LSB 0
+#define MAC_DMA_IMR_S0_QCU_TXDESC_MASK 0x03FF0000 /* TXDESC (QCU 0-9) */
+#define MAC_DMA_IMR_S0_QCU_TXDESC_LSB 16
+
+#define MAC_DMA_IMR_S1_ADDRESS 0x000000A8 /* MAC Secondary interrupt mask register 1 */
+#define MAC_DMA_IMR_S1_QCU_TXERR_MASK 0x000003FF /* TXERR (QCU 0-9) */
+#define MAC_DMA_IMR_S1_QCU_TXERR_LSB 0
+#define MAC_DMA_IMR_S1_QCU_TXEOL_MASK 0x03FF0000 /* TXEOL (QCU 0-9) */
+#define MAC_DMA_IMR_S1_QCU_TXEOL_LSB 16
+
+#define MAC_DMA_IMR_S2_ADDRESS 0x000000AC /* MAC Secondary interrupt mask register 2 */
+#define MAC_DMA_IMR_S2_QCU_TXURN_MASK 0x000003FF /* Mask for TXURN (QCU 0-9) */
+#define MAC_DMA_IMR_S2_QCU_TXURN_LSB 0
+#define MAC_DMA_IMR_S2_RX_INT_MASK 0x00000800
+#define MAC_DMA_IMR_S2_WL_STOMPED_MASK 0x00001000
+#define MAC_DMA_IMR_S2_RX_PTR_BAD_MASK 0x00002000
+#define MAC_DMA_IMR_S2_BT_LOW_PRIORITY_RISING_MASK 0x00004000
+#define MAC_DMA_IMR_S2_BT_LOW_PRIORITY_FALLING_MASK 0x00008000
+#define MAC_DMA_IMR_S2_BB_PANIC_IRQ_MASK 0x00010000
+#define MAC_DMA_IMR_S2_BT_STOMPED_MASK 0x00020000
+#define MAC_DMA_IMR_S2_BT_ACTIVE_RISING_MASK 0x00040000
+#define MAC_DMA_IMR_S2_BT_ACTIVE_FALLING_MASK 0x00080000
+#define MAC_DMA_IMR_S2_BT_PRIORITY_RISING_MASK 0x00100000
+#define MAC_DMA_IMR_S2_BT_PRIORITY_FALLING_MASK 0x00200000
+#define MAC_DMA_IMR_S2_CST_MASK 0x00400000
+#define MAC_DMA_IMR_S2_GTT_MASK 0x00800000
+#define MAC_DMA_IMR_S2_TIM_MASK 0x01000000 /* TIM */
+#define MAC_DMA_IMR_S2_CABEND_MASK 0x02000000 /* CABEND */
+#define MAC_DMA_IMR_S2_DTIMSYNC_MASK 0x04000000 /* DTIMSYNC */
+#define MAC_DMA_IMR_S2_BCNTO_MASK 0x08000000 /* BCNTO */
+#define MAC_DMA_IMR_S2_CABTO_MASK 0x10000000 /* CABTO */
+#define MAC_DMA_IMR_S2_DTIM_MASK 0x20000000 /* DTIM */
+#define MAC_DMA_IMR_S2_TSFOOR_MASK 0x40000000 /* TSFOOR */
+
+#define MAC_DMA_IMR_S3_ADDRESS 0x000000B0 /* MAC Secondary interrupt mask register 3 */
+#define MAC_DMA_IMR_S3_QCU_QCBROVF_MASK 0x000003FF /* Mask for QCBROVF (QCU 0-9) */
+#define MAC_DMA_IMR_S3_QCU_QCBRURN_MASK 0x03FF0000 /* Mask for QCBRURN (QCU 0-9) */
+#define MAC_DMA_IMR_S3_QCU_QCBRURN_LSB 16
+
+#define MAC_DMA_IMR_S4_ADDRESS 0x000000B4 /* MAC Secondary interrupt mask register 4 */
+#define MAC_DMA_IMR_S4_QCU_QTRIG_MASK 0x000003FF /* Mask for QTRIG (QCU 0-9) */
+
+#define MAC_DMA_IMR_S5_ADDRESS 0x000000B8 /* MAC Secondary interrupt mask register 5 */
+#define MAC_DMA_IMR_S5_TBTT_TIMER_TRIGGER_MASK 0x00000001
+#define MAC_DMA_IMR_S5_DBA_TIMER_TRIGGER_MASK 0x00000002
+#define MAC_DMA_IMR_S5_SBA_TIMER_TRIGGER_MASK 0x00000004
+#define MAC_DMA_IMR_S5_HCF_TIMER_TRIGGER_MASK 0x00000008
+#define MAC_DMA_IMR_S5_TIM_TIMER_TRIGGER_MASK 0x00000010
+#define MAC_DMA_IMR_S5_DTIM_TIMER_TRIGGER_MASK 0x00000020
+#define MAC_DMA_IMR_S5_QUIET_TIMER_TRIGGER_MASK 0x00000040
+#define MAC_DMA_IMR_S5_NDP_TIMER_TRIGGER_MASK 0x00000080
+#define MAC_DMA_IMR_S5_GENERIC_TIMER2_TRIGGER_MASK 0x0000FF00
+#define MAC_DMA_IMR_S5_GENERIC_TIMER2_TRIGGER_LSB 8
+#define MAC_DMA_IMR_S5_GENERIC_TIMER2_TRIGGER(_i) (0x100 << (_i))
+#define MAC_DMA_IMR_S5_TIMER_OVERFLOW_MASK 0x00010000
+#define MAC_DMA_IMR_S5_DBA_TIMER_THRESHOLD_MASK 0x00020000
+#define MAC_DMA_IMR_S5_SBA_TIMER_THRESHOLD_MASK 0x00040000
+#define MAC_DMA_IMR_S5_HCF_TIMER_THRESHOLD_MASK 0x00080000
+#define MAC_DMA_IMR_S5_TIM_TIMER_THRESHOLD_MASK 0x00100000
+#define MAC_DMA_IMR_S5_DTIM_TIMER_THRESHOLD_MASK 0x00200000
+#define MAC_DMA_IMR_S5_QUIET_TIMER_THRESHOLD_MASK 0000400000
+#define MAC_DMA_IMR_S5_NDP_TIMER_THRESHOLD_MASK 0x00800000
+#define MAC_DMA_IMR_S5_GENERIC_TIMER2_THRESHOLD_MASK 0xFF000000
+#define MAC_DMA_IMR_S5_GENERIC_TIMER2_THRESHOLD_LSB 24
+#define MAC_DMA_IMR_S5_GENERIC_TIMER2_THRESHOLD(_i) (0x01000000 << (_i))
+
+#define MAC_DMA_ISR_RAC_ADDRESS 0x000000C0 /* ISR read-and-clear access */
+
+/* Shadow copies with read-and-clear access */
+#define MAC_DMA_ISR_S0_S_ADDRESS 0x000000C4 /* ISR_S0 shadow copy */
+#define MAC_DMA_ISR_S1_S_ADDRESS 0x000000C8 /* ISR_S1 shadow copy */
+#define MAC_DMA_ISR_S2_S_ADDRESS 0x000000Cc /* ISR_S2 shadow copy */
+#define MAC_DMA_ISR_S3_S_ADDRESS 0x000000D0 /* ISR_S3 shadow copy */
+#define MAC_DMA_ISR_S4_S_ADDRESS 0x000000D4 /* ISR_S4 shadow copy */
+#define MAC_DMA_ISR_S5_S_ADDRESS 0x000000D8 /* ISR_S5 shadow copy */
+
+#define MAC_DMA_Q0_TXDP_ADDRESS 0x00000800 /* MAC Transmit Queue descriptor pointer */
+#define MAC_DMA_Q1_TXDP_ADDRESS 0x00000804 /* MAC Transmit Queue descriptor pointer */
+#define MAC_DMA_Q2_TXDP_ADDRESS 0x00000808 /* MAC Transmit Queue descriptor pointer */
+#define MAC_DMA_Q3_TXDP_ADDRESS 0x0000080C /* MAC Transmit Queue descriptor pointer */
+#define MAC_DMA_Q4_TXDP_ADDRESS 0x00000810 /* MAC Transmit Queue descriptor pointer */
+#define MAC_DMA_Q5_TXDP_ADDRESS 0x00000814 /* MAC Transmit Queue descriptor pointer */
+#define MAC_DMA_Q6_TXDP_ADDRESS 0x00000818 /* MAC Transmit Queue descriptor pointer */
+#define MAC_DMA_Q7_TXDP_ADDRESS 0x0000081C /* MAC Transmit Queue descriptor pointer */
+#define MAC_DMA_Q8_TXDP_ADDRESS 0x00000820 /* MAC Transmit Queue descriptor pointer */
+#define MAC_DMA_Q9_TXDP_ADDRESS 0x00000824 /* MAC Transmit Queue descriptor pointer */
+#define MAC_DMA_QTXDP_ADDRESS(_i) (MAC_DMA_Q0_TXDP_ADDRESS + ((_i)<<2))
+
+#define MAC_DMA_Q_TXE_ADDRESS 0x00000840 /* MAC Transmit Queue enable */
+#define MAC_DMA_Q_TXD_ADDRESS 0x00000880 /* MAC Transmit Queue disable */
+/* QCU registers */
+
+#define MAC_DMA_Q0_CBRCFG_ADDRESS 0x000008C0 /* MAC CBR configuration */
+#define MAC_DMA_Q1_CBRCFG_ADDRESS 0x000008C4 /* MAC CBR configuration */
+#define MAC_DMA_Q2_CBRCFG_ADDRESS 0x000008C8 /* MAC CBR configuration */
+#define MAC_DMA_Q3_CBRCFG_ADDRESS 0x000008CC /* MAC CBR configuration */
+#define MAC_DMA_Q4_CBRCFG_ADDRESS 0x000008D0 /* MAC CBR configuration */
+#define MAC_DMA_Q5_CBRCFG_ADDRESS 0x000008D4 /* MAC CBR configuration */
+#define MAC_DMA_Q6_CBRCFG_ADDRESS 0x000008D8 /* MAC CBR configuration */
+#define MAC_DMA_Q7_CBRCFG_ADDRESS 0x000008DC /* MAC CBR configuration */
+#define MAC_DMA_Q8_CBRCFG_ADDRESS 0x000008E0 /* MAC CBR configuration */
+#define MAC_DMA_Q9_CBRCFG_ADDRESS 0x000008E4 /* MAC CBR configuration */
+#define MAC_DMA_QCBRCFG_ADDRESS(_i) (MAC_DMA_Q0_CBRCFG_ADDRESS + ((_i)<<2))
+
+#define MAC_DMA_Q_CBRCFG_CBR_INTERVAL_MASK 0x00FFFFFF /* Mask for CBR interval (us) */
+#define MAC_DMA_Q_CBRCFG_CBR_INTERVAL_LSB 0 /* Shift for CBR interval */
+#define MAC_DMA_Q_CBRCFG_CBR_OVF_THRESH_MASK 0xFF000000 /* Mask for CBR overflow threshold */
+#define MAC_DMA_Q_CBRCFG_CBR_OVF_THRESH_LSB 24 /* Shift for CBR overflow thresh */
+
+
+#define MAC_DMA_Q0_RDYTIMECFG_ADDRESS 0x00000900 /* MAC ReadyTime configuration */
+#define MAC_DMA_Q1_RDYTIMECFG_ADDRESS 0x00000904 /* MAC ReadyTime configuration */
+#define MAC_DMA_Q2_RDYTIMECFG_ADDRESS 0x00000908 /* MAC ReadyTime configuration */
+#define MAC_DMA_Q3_RDYTIMECFG_ADDRESS 0x0000090C /* MAC ReadyTime configuration */
+#define MAC_DMA_Q4_RDYTIMECFG_ADDRESS 0x00000910 /* MAC ReadyTime configuration */
+#define MAC_DMA_Q5_RDYTIMECFG_ADDRESS 0x00000914 /* MAC ReadyTime configuration */
+#define MAC_DMA_Q6_RDYTIMECFG_ADDRESS 0x00000918 /* MAC ReadyTime configuration */
+#define MAC_DMA_Q7_RDYTIMECFG_ADDRESS 0x0000091C /* MAC ReadyTime configuration */
+#define MAC_DMA_Q8_RDYTIMECFG_ADDRESS 0x00000920 /* MAC ReadyTime configuration */
+#define MAC_DMA_Q9_RDYTIMECFG_ADDRESS 0x00000924 /* MAC ReadyTime configuration */
+#define MAC_DMA_QRDYTIMECFG_ADDRESS(_i) (MAC_DMA_Q0_RDYTIMECFG_ADDRESS + ((_i)<<2))
+
+#define MAC_DMA_Q_RDYTIMECFG_INT_MASK 0x00FFFFFF /* CBR interval (us) */
+#define MAC_DMA_Q_RDYTIMECFG_INT_LSB 0 /* Shift for ReadyTime Interval (us) */
+#define MAC_DMA_Q_RDYTIMECFG_ENA_MASK 0x01000000 /* CBR enable */
+
+#define MAC_DMA_Q_ONESHOTMAC_DMAM_SC_ADDRESS 0x00000940 /* MAC OneShotArm set control */
+#define MAC_DMA_Q_ONESHOTMAC_DMAM_CC_ADDRESS 0x00000980 /* MAC OneShotArm clear control */
+
+#define MAC_DMA_Q0_MISC_ADDRESS 0x000009C0 /* MAC Miscellaneous QCU settings */
+#define MAC_DMA_Q1_MISC_ADDRESS 0x000009C4 /* MAC Miscellaneous QCU settings */
+#define MAC_DMA_Q2_MISC_ADDRESS 0x000009C8 /* MAC Miscellaneous QCU settings */
+#define MAC_DMA_Q3_MISC_ADDRESS 0x000009CC /* MAC Miscellaneous QCU settings */
+#define MAC_DMA_Q4_MISC_ADDRESS 0x000009D0 /* MAC Miscellaneous QCU settings */
+#define MAC_DMA_Q5_MISC_ADDRESS 0x000009D4 /* MAC Miscellaneous QCU settings */
+#define MAC_DMA_Q6_MISC_ADDRESS 0x000009D8 /* MAC Miscellaneous QCU settings */
+#define MAC_DMA_Q7_MISC_ADDRESS 0x000009DC /* MAC Miscellaneous QCU settings */
+#define MAC_DMA_Q8_MISC_ADDRESS 0x000009E0 /* MAC Miscellaneous QCU settings */
+#define MAC_DMA_Q9_MISC_ADDRESS 0x000009E4 /* MAC Miscellaneous QCU settings */
+#define MAC_DMA_QMISC_ADDRESS(_i) (MAC_DMA_Q0_MISC_ADDRESS + ((_i)<<2))
+
+#define MAC_DMA_Q_MISC_FSP_MASK 0x0000000F /* Frame Scheduling Policy mask */
+#define MAC_DMA_Q_MISC_FSP_ASAP 0 /* ASAP */
+#define MAC_DMA_Q_MISC_FSP_CBR 1 /* CBR */
+#define MAC_DMA_Q_MISC_FSP_DBA_GATED 2 /* DMA Beacon Alert gated */
+#define MAC_DMA_Q_MISC_FSP_TIM_GATED 3 /* TIM gated */
+#define MAC_DMA_Q_MISC_FSP_BEACON_SENT_GATED 4 /* Beacon-sent-gated */
+#define MAC_DMA_Q_MISC_ONE_SHOT_EN_MASK 0x00000010 /* OneShot enable */
+#define MAC_DMA_Q_MISC_CBR_INCR_DIS1_MASK 0x00000020 /* Disable CBR expired counter incr
+ (empty q) */
+#define MAC_DMA_Q_MISC_CBR_INCR_DIS0_MASK 0x00000040 /* Disable CBR expired counter incr
+ (empty beacon q) */
+#define MAC_DMA_Q_MISC_BEACON_USE_MASK 0x00000080 /* Beacon use indication */
+#define MAC_DMA_Q_MISC_CBR_EXP_CNTR_LIMIT_MASK 0x00000100 /* CBR expired counter limit enable */
+#define MAC_DMA_Q_MISC_RDYTIME_EXP_POLICY_MASK 0x00000200 /* Enable TXE cleared on ReadyTime expired or VEOL */
+#define MAC_DMA_Q_MISC_RESET_CBR_EXP_CTR_MASK 0x00000400 /* Reset CBR expired counter */
+#define MAC_DMA_Q_MISC_DCU_EARLY_TERM_REQ_MASK 0x00000800 /* DCU frame early termination request control */
+
+#define MAC_DMA_Q0_STS_ADDRESS 0x00000A00 /* MAC Miscellaneous QCU status */
+#define MAC_DMA_Q1_STS_ADDRESS 0x00000A04 /* MAC Miscellaneous QCU status */
+#define MAC_DMA_Q2_STS_ADDRESS 0x00000A08 /* MAC Miscellaneous QCU status */
+#define MAC_DMA_Q3_STS_ADDRESS 0x00000A0C /* MAC Miscellaneous QCU status */
+#define MAC_DMA_Q4_STS_ADDRESS 0x00000A10 /* MAC Miscellaneous QCU status */
+#define MAC_DMA_Q5_STS_ADDRESS 0x00000A14 /* MAC Miscellaneous QCU status */
+#define MAC_DMA_Q6_STS_ADDRESS 0x00000A18 /* MAC Miscellaneous QCU status */
+#define MAC_DMA_Q7_STS_ADDRESS 0x00000A1C /* MAC Miscellaneous QCU status */
+#define MAC_DMA_Q8_STS_ADDRESS 0x00000A20 /* MAC Miscellaneous QCU status */
+#define MAC_DMA_Q9_STS_ADDRESS 0x00000A24 /* MAC Miscellaneous QCU status */
+#define MAC_DMA_QSTS_ADDRESS(_i) (MAC_DMA_Q0_STS_ADDRESS + ((_i)<<2))
+
+#define MAC_DMA_Q_STS_PEND_FR_CNT_MASK 0x00000003 /* Mask for Pending Frame Count */
+#define MAC_DMA_Q_STS_CBR_EXP_CNT_MASK 0x0000FF00 /* Mask for CBR expired counter */
+
+#define MAC_DMA_Q_RDYTIMESHDN_ADDRESS 0x00000A40 /* MAC ReadyTimeShutdown status */
+
+/* DCU registers */
+
+#define MAC_DMA_D0_QCUMASK_ADDRESS 0x00001000 /* MAC QCU Mask */
+#define MAC_DMA_D1_QCUMASK_ADDRESS 0x00001004 /* MAC QCU Mask */
+#define MAC_DMA_D2_QCUMASK_ADDRESS 0x00001008 /* MAC QCU Mask */
+#define MAC_DMA_D3_QCUMASK_ADDRESS 0x0000100C /* MAC QCU Mask */
+#define MAC_DMA_D4_QCUMASK_ADDRESS 0x00001010 /* MAC QCU Mask */
+#define MAC_DMA_D5_QCUMASK_ADDRESS 0x00001014 /* MAC QCU Mask */
+#define MAC_DMA_D6_QCUMASK_ADDRESS 0x00001018 /* MAC QCU Mask */
+#define MAC_DMA_D7_QCUMASK_ADDRESS 0x0000101C /* MAC QCU Mask */
+#define MAC_DMA_D8_QCUMASK_ADDRESS 0x00001020 /* MAC QCU Mask */
+#define MAC_DMA_D9_QCUMASK_ADDRESS 0x00001024 /* MAC QCU Mask */
+#define MAC_DMA_DQCUMASK_ADDRESS(_i) (MAC_DMA_D0_QCUMASK_ADDRESS + ((_i)<<2))
+
+#define MAC_DMA_D_QCUMASK_MASK 0x000003FF /* Mask for QCU Mask (QCU 0-9) */
+
+#define MAC_DMA_D_GBL_IFS_SIFS_ADDRESS 0x00001030 /* DCU global SIFS settings */
+
+
+#define MAC_DMA_D0_LCL_IFS_ADDRESS 0x00001040 /* MAC DCU-specific IFS settings */
+#define MAC_DMA_D1_LCL_IFS_ADDRESS 0x00001044 /* MAC DCU-specific IFS settings */
+#define MAC_DMA_D2_LCL_IFS_ADDRESS 0x00001048 /* MAC DCU-specific IFS settings */
+#define MAC_DMA_D3_LCL_IFS_ADDRESS 0x0000104C /* MAC DCU-specific IFS settings */
+#define MAC_DMA_D4_LCL_IFS_ADDRESS 0x00001050 /* MAC DCU-specific IFS settings */
+#define MAC_DMA_D5_LCL_IFS_ADDRESS 0x00001054 /* MAC DCU-specific IFS settings */
+#define MAC_DMA_D6_LCL_IFS_ADDRESS 0x00001058 /* MAC DCU-specific IFS settings */
+#define MAC_DMA_D7_LCL_IFS_ADDRESS 0x0000105C /* MAC DCU-specific IFS settings */
+#define MAC_DMA_D8_LCL_IFS_ADDRESS 0x00001060 /* MAC DCU-specific IFS settings */
+#define MAC_DMA_D9_LCL_IFS_ADDRESS 0x00001064 /* MAC DCU-specific IFS settings */
+#define MAC_DMA_DLCL_IFS_ADDRESS(_i) (MAC_DMA_D0_LCL_IFS_ADDRESS + ((_i)<<2))
+#define MAC_DMA_D_LCL_IFS_CWMIN_MASK 0x000003FF /* Mask for CW_MIN */
+#define MAC_DMA_D_LCL_IFS_CWMIN_LSB 0
+#define MAC_DMA_D_LCL_IFS_CWMAX_MASK 0x000FFC00 /* Mask for CW_MAX */
+#define MAC_DMA_D_LCL_IFS_CWMAX_LSB 10
+#define MAC_DMA_D_LCL_IFS_AIFS_MASK 0x0FF00000 /* Mask for AIFS */
+#define MAC_DMA_D_LCL_IFS_AIFS_LSB 20
+/*
+ * Note: even though this field is 8 bits wide the
+ * maximum supported AIFS value is 0xFc. Setting the AIFS value
+ * to 0xFd 0xFe, or 0xFf will not work correctly and will cause
+ * the DCU to hang.
+ */
+#define MAC_DMA_D_GBL_IFS_SLOT_ADDRESS 0x00001070 /* DC global slot interval */
+
+#define MAC_DMA_D0_RETRY_LIMIT_ADDRESS 0x00001080 /* MAC Retry limits */
+#define MAC_DMA_D1_RETRY_LIMIT_ADDRESS 0x00001084 /* MAC Retry limits */
+#define MAC_DMA_D2_RETRY_LIMIT_ADDRESS 0x00001088 /* MAC Retry limits */
+#define MAC_DMA_D3_RETRY_LIMIT_ADDRESS 0x0000108C /* MAC Retry limits */
+#define MAC_DMA_D4_RETRY_LIMIT_ADDRESS 0x00001090 /* MAC Retry limits */
+#define MAC_DMA_D5_RETRY_LIMIT_ADDRESS 0x00001094 /* MAC Retry limits */
+#define MAC_DMA_D6_RETRY_LIMIT_ADDRESS 0x00001098 /* MAC Retry limits */
+#define MAC_DMA_D7_RETRY_LIMIT_ADDRESS 0x0000109C /* MAC Retry limits */
+#define MAC_DMA_D8_RETRY_LIMIT_ADDRESS 0x000010A0 /* MAC Retry limits */
+#define MAC_DMA_D9_RETRY_LIMIT_ADDRESS 0x000010A4 /* MAC Retry limits */
+#define MAC_DMA_DRETRY_LIMIT_ADDRESS(_i) (MAC_DMA_D0_RETRY_LIMIT_ADDRESS + ((_i)<<2))
+
+#define MAC_DMA_D_RETRY_LIMIT_FR_RTS_MASK 0x0000000F /* frame RTS failure limit */
+#define MAC_DMA_D_RETRY_LIMIT_FR_RTS_LSB 0
+#define MAC_DMA_D_RETRY_LIMIT_STA_RTS_MASK 0x00003F00 /* station RTS failure limit */
+#define MAC_DMA_D_RETRY_LIMIT_STA_RTS_LSB 8
+#define MAC_DMA_D_RETRY_LIMIT_STA_DATA_MASK 0x000FC000 /* station short retry limit */
+#define MAC_DMA_D_RETRY_LIMIT_STA_DATA_LSB 14
+
+#define MAC_DMA_D_GBL_IFS_EIFS_ADDRESS 0x000010B0 /* DCU global EIFS setting */
+
+#define MAC_DMA_D0_CHNTIME_ADDRESS 0x000010C0 /* MAC ChannelTime settings */
+#define MAC_DMA_D1_CHNTIME_ADDRESS 0x000010C4 /* MAC ChannelTime settings */
+#define MAC_DMA_D2_CHNTIME_ADDRESS 0x000010C8 /* MAC ChannelTime settings */
+#define MAC_DMA_D3_CHNTIME_ADDRESS 0x000010CC /* MAC ChannelTime settings */
+#define MAC_DMA_D4_CHNTIME_ADDRESS 0x000010D0 /* MAC ChannelTime settings */
+#define MAC_DMA_D5_CHNTIME_ADDRESS 0x000010D4 /* MAC ChannelTime settings */
+#define MAC_DMA_D6_CHNTIME_ADDRESS 0x000010D8 /* MAC ChannelTime settings */
+#define MAC_DMA_D7_CHNTIME_ADDRESS 0x000010DC /* MAC ChannelTime settings */
+#define MAC_DMA_D8_CHNTIME_ADDRESS 0x000010E0 /* MAC ChannelTime settings */
+#define MAC_DMA_D9_CHNTIME_ADDRESS 0x000010E4 /* MAC ChannelTime settings */
+#define MAC_DMA_DCHNTIME_ADDRESS(_i) (MAC_DMA_D0_CHNTIME_ADDRESS + ((_i)<<2))
+
+#define MAC_DMA_D_CHNTIME_DUR_MASK 0x000FFFFF /* ChannelTime duration (us) */
+#define MAC_DMA_D_CHNTIME_DUR_LSB 0 /* Shift for ChannelTime duration */
+#define MAC_DMA_D_CHNTIME_EN_MASK 0x00100000 /* ChannelTime enable */
+
+#define MAC_DMA_D_GBL_IFS_MISC_ADDRESS 0x000010f0 /* DCU global misc. IFS settings */
+#define MAC_DMA_D_GBL_IFS_MISC_LFSR_SLICE_SEL_MASK 0x00000007 /* LFSR slice select */
+#define MAC_DMA_D_GBL_IFS_MISC_TURBO_MODE_MASK 0x00000008 /* Turbo mode indication */
+#define MAC_DMA_D_GBL_IFS_MISC_DCU_ARBITER_DLY_MASK 0x00300000 /* DCU arbiter delay */
+#define MAC_DMA_D_GBL_IFS_IGNORE_BACKOFF_MASK 0x10000000
+
+#define MAC_DMA_D0_MISC_ADDRESS 0x00001100 /* MAC Miscellaneous DCU-specific settings */
+#define MAC_DMA_D1_MISC_ADDRESS 0x00001104 /* MAC Miscellaneous DCU-specific settings */
+#define MAC_DMA_D2_MISC_ADDRESS 0x00001108 /* MAC Miscellaneous DCU-specific settings */
+#define MAC_DMA_D3_MISC_ADDRESS 0x0000110C /* MAC Miscellaneous DCU-specific settings */
+#define MAC_DMA_D4_MISC_ADDRESS 0x00001110 /* MAC Miscellaneous DCU-specific settings */
+#define MAC_DMA_D5_MISC_ADDRESS 0x00001114 /* MAC Miscellaneous DCU-specific settings */
+#define MAC_DMA_D6_MISC_ADDRESS 0x00001118 /* MAC Miscellaneous DCU-specific settings */
+#define MAC_DMA_D7_MISC_ADDRESS 0x0000111C /* MAC Miscellaneous DCU-specific settings */
+#define MAC_DMA_D8_MISC_ADDRESS 0x00001120 /* MAC Miscellaneous DCU-specific settings */
+#define MAC_DMA_D9_MISC_ADDRESS 0x00001124 /* MAC Miscellaneous DCU-specific settings */
+#define MAC_DMA_DMISC_ADDRESS(_i) (MAC_DMA_D0_MISC_ADDRESS + ((_i)<<2))
+
+#define MAC_DMA_D0_EOL_ADDRESS 0x00001180
+#define MAC_DMA_D1_EOL_ADDRESS 0x00001184
+#define MAC_DMA_D2_EOL_ADDRESS 0x00001188
+#define MAC_DMA_D3_EOL_ADDRESS 0x0000118C
+#define MAC_DMA_D4_EOL_ADDRESS 0x00001190
+#define MAC_DMA_D5_EOL_ADDRESS 0x00001194
+#define MAC_DMA_D6_EOL_ADDRESS 0x00001198
+#define MAC_DMA_D7_EOL_ADDRESS 0x0000119C
+#define MAC_DMA_D8_EOL_ADDRESS 0x00001200
+#define MAC_DMA_D9_EOL_ADDRESS 0x00001204
+#define MAC_DMA_DEOL_ADDRESS(_i) (MAC_DMA_D0_EOL_ADDRESS + ((_i)<<2))
+
+#define MAC_DMA_D_MISC_BKOFF_THRESH_MASK 0x0000003F /* Backoff threshold */
+#define MAC_DMA_D_MISC_BACK_OFF_THRESH_LSB 0
+#define MAC_DMA_D_MISC_ETS_RTS_MASK 0x00000040 /* End of transmission series
+ station RTS/data failure
+ count reset policy */
+#define MAC_DMA_D_MISC_ETS_CW_MASK 0x00000080 /* End of transmission series
+ CW reset policy */
+#define MAC_DMA_D_MISC_FRAG_WAIT_EN_MASK 0x00000100 /* Fragment Starvation Policy */
+
+#define MAC_DMA_D_MISC_FRAG_BKOFF_EN_MASK 0x00000200 /* Backoff during a frag burst */
+#define MAC_DMA_D_MISC_HCF_POLL_EN_MASK 0x00000800 /* HFC poll enable */
+#define MAC_DMA_D_MISC_BKOFF_PERSISTENCE_MASK 0x00001000 /* Backoff persistence factor
+ setting */
+#define MAC_DMA_D_MISC_VIR_COL_HANDLING_MASK 0x0000C000 /* Mask for Virtual collision
+ handling policy */
+#define MAC_DMA_D_MISC_VIR_COL_HANDLING_LSB 14
+#define MAC_DMA_D_MISC_VIR_COL_HANDLING_DEFAULT 0 /* Normal */
+#define MAC_DMA_D_MISC_VIR_COL_HANDLING_IGNORE 1 /* Ignore */
+#define MAC_DMA_D_MISC_BEACON_USE_MASK 0x00010000 /* Beacon use indication */
+#define MAC_DMA_D_MISC_ARB_LOCKOUT_CNTRL_MASK 0x00060000 /* Mask for DCU arbiter lockout control */
+#define MAC_DMA_D_MISC_ARB_LOCKOUT_CNTRL_LSB 17
+#define MAC_DMA_D_MISC_ARB_LOCKOUT_CNTRL_NONE 0 /* No lockout*/
+#define MAC_DMA_D_MISC_ARB_LOCKOUT_CNTRL_INTRA_FR 1 /* Intra-frame*/
+#define MAC_DMA_D_MISC_ARB_LOCKOUT_CNTRL_GLOBAL 2 /* Global */
+#define MAC_DMA_D_MISC_ARB_LOCKOUT_IGNORE_MASK 0x00080000 /* DCU arbiter lockout ignore control */
+#define MAC_DMA_D_MISC_SEQ_NUM_INCR_DIS_MASK 0x00100000 /* Sequence number increment disable */
+#define MAC_DMA_D_MISC_POST_FR_BKOFF_DIS_MASK 0x00200000 /* Post-frame backoff disable */
+#define MAC_DMA_D_MISC_VIRT_COLL_POLICY_MASK 0x00400000 /* Virtual coll. handling policy */
+#define MAC_DMA_D_MISC_BLOWN_IFS_POLICY_MASK 0x00800000 /* Blown IFS handling policy */
+
+#define MAC_DMA_D_SEQNUM_ADDRESS 0x00001140 /* MAC Frame sequence number */
+
+
+
+#define MAC_DMA_D_FPCTL_ADDRESS 0x00001230 /* DCU frame prefetch settings */
+#define MAC_DMA_D_TXPSE_ADDRESS 0x00001270 /* DCU transmit pause control/status */
+
+#endif /* _AR6000_DMMAEG_H_ */
diff --git a/drivers/staging/ath6kl/include/common/AR6002/hw4.0/hw/mac_pcu_reg.h b/drivers/staging/ath6kl/include/common/AR6002/hw4.0/hw/mac_pcu_reg.h
new file mode 100644
index 000000000000..6ccb08c5dab2
--- /dev/null
+++ b/drivers/staging/ath6kl/include/common/AR6002/hw4.0/hw/mac_pcu_reg.h
@@ -0,0 +1,3065 @@
+// ------------------------------------------------------------------
+// Copyright (c) 2004-2010 Atheros Corporation. All rights reserved.
+//
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+//
+//
+// ------------------------------------------------------------------
+//===================================================================
+// Author(s): ="Atheros"
+//===================================================================
+
+
+#ifndef _MAC_PCU_REG_H_
+#define _MAC_PCU_REG_H_
+
+#define MAC_PCU_STA_ADDR_L32_ADDRESS 0x00008000
+#define MAC_PCU_STA_ADDR_L32_OFFSET 0x00000000
+#define MAC_PCU_STA_ADDR_L32_ADDR_31_0_MSB 31
+#define MAC_PCU_STA_ADDR_L32_ADDR_31_0_LSB 0
+#define MAC_PCU_STA_ADDR_L32_ADDR_31_0_MASK 0xffffffff
+#define MAC_PCU_STA_ADDR_L32_ADDR_31_0_GET(x) (((x) & MAC_PCU_STA_ADDR_L32_ADDR_31_0_MASK) >> MAC_PCU_STA_ADDR_L32_ADDR_31_0_LSB)
+#define MAC_PCU_STA_ADDR_L32_ADDR_31_0_SET(x) (((x) << MAC_PCU_STA_ADDR_L32_ADDR_31_0_LSB) & MAC_PCU_STA_ADDR_L32_ADDR_31_0_MASK)
+
+#define MAC_PCU_STA_ADDR_U16_ADDRESS 0x00008004
+#define MAC_PCU_STA_ADDR_U16_OFFSET 0x00000004
+#define MAC_PCU_STA_ADDR_U16_ADHOC_MCAST_SEARCH_MSB 31
+#define MAC_PCU_STA_ADDR_U16_ADHOC_MCAST_SEARCH_LSB 31
+#define MAC_PCU_STA_ADDR_U16_ADHOC_MCAST_SEARCH_MASK 0x80000000
+#define MAC_PCU_STA_ADDR_U16_ADHOC_MCAST_SEARCH_GET(x) (((x) & MAC_PCU_STA_ADDR_U16_ADHOC_MCAST_SEARCH_MASK) >> MAC_PCU_STA_ADDR_U16_ADHOC_MCAST_SEARCH_LSB)
+#define MAC_PCU_STA_ADDR_U16_ADHOC_MCAST_SEARCH_SET(x) (((x) << MAC_PCU_STA_ADDR_U16_ADHOC_MCAST_SEARCH_LSB) & MAC_PCU_STA_ADDR_U16_ADHOC_MCAST_SEARCH_MASK)
+#define MAC_PCU_STA_ADDR_U16_CBCIV_ENDIAN_MSB 30
+#define MAC_PCU_STA_ADDR_U16_CBCIV_ENDIAN_LSB 30
+#define MAC_PCU_STA_ADDR_U16_CBCIV_ENDIAN_MASK 0x40000000
+#define MAC_PCU_STA_ADDR_U16_CBCIV_ENDIAN_GET(x) (((x) & MAC_PCU_STA_ADDR_U16_CBCIV_ENDIAN_MASK) >> MAC_PCU_STA_ADDR_U16_CBCIV_ENDIAN_LSB)
+#define MAC_PCU_STA_ADDR_U16_CBCIV_ENDIAN_SET(x) (((x) << MAC_PCU_STA_ADDR_U16_CBCIV_ENDIAN_LSB) & MAC_PCU_STA_ADDR_U16_CBCIV_ENDIAN_MASK)
+#define MAC_PCU_STA_ADDR_U16_PRESERVE_SEQNUM_MSB 29
+#define MAC_PCU_STA_ADDR_U16_PRESERVE_SEQNUM_LSB 29
+#define MAC_PCU_STA_ADDR_U16_PRESERVE_SEQNUM_MASK 0x20000000
+#define MAC_PCU_STA_ADDR_U16_PRESERVE_SEQNUM_GET(x) (((x) & MAC_PCU_STA_ADDR_U16_PRESERVE_SEQNUM_MASK) >> MAC_PCU_STA_ADDR_U16_PRESERVE_SEQNUM_LSB)
+#define MAC_PCU_STA_ADDR_U16_PRESERVE_SEQNUM_SET(x) (((x) << MAC_PCU_STA_ADDR_U16_PRESERVE_SEQNUM_LSB) & MAC_PCU_STA_ADDR_U16_PRESERVE_SEQNUM_MASK)
+#define MAC_PCU_STA_ADDR_U16_KSRCH_MODE_MSB 28
+#define MAC_PCU_STA_ADDR_U16_KSRCH_MODE_LSB 28
+#define MAC_PCU_STA_ADDR_U16_KSRCH_MODE_MASK 0x10000000
+#define MAC_PCU_STA_ADDR_U16_KSRCH_MODE_GET(x) (((x) & MAC_PCU_STA_ADDR_U16_KSRCH_MODE_MASK) >> MAC_PCU_STA_ADDR_U16_KSRCH_MODE_LSB)
+#define MAC_PCU_STA_ADDR_U16_KSRCH_MODE_SET(x) (((x) << MAC_PCU_STA_ADDR_U16_KSRCH_MODE_LSB) & MAC_PCU_STA_ADDR_U16_KSRCH_MODE_MASK)
+#define MAC_PCU_STA_ADDR_U16_CRPT_MIC_ENABLE_MSB 27
+#define MAC_PCU_STA_ADDR_U16_CRPT_MIC_ENABLE_LSB 27
+#define MAC_PCU_STA_ADDR_U16_CRPT_MIC_ENABLE_MASK 0x08000000
+#define MAC_PCU_STA_ADDR_U16_CRPT_MIC_ENABLE_GET(x) (((x) & MAC_PCU_STA_ADDR_U16_CRPT_MIC_ENABLE_MASK) >> MAC_PCU_STA_ADDR_U16_CRPT_MIC_ENABLE_LSB)
+#define MAC_PCU_STA_ADDR_U16_CRPT_MIC_ENABLE_SET(x) (((x) << MAC_PCU_STA_ADDR_U16_CRPT_MIC_ENABLE_LSB) & MAC_PCU_STA_ADDR_U16_CRPT_MIC_ENABLE_MASK)
+#define MAC_PCU_STA_ADDR_U16_SECTOR_SELF_GEN_MSB 26
+#define MAC_PCU_STA_ADDR_U16_SECTOR_SELF_GEN_LSB 26
+#define MAC_PCU_STA_ADDR_U16_SECTOR_SELF_GEN_MASK 0x04000000
+#define MAC_PCU_STA_ADDR_U16_SECTOR_SELF_GEN_GET(x) (((x) & MAC_PCU_STA_ADDR_U16_SECTOR_SELF_GEN_MASK) >> MAC_PCU_STA_ADDR_U16_SECTOR_SELF_GEN_LSB)
+#define MAC_PCU_STA_ADDR_U16_SECTOR_SELF_GEN_SET(x) (((x) << MAC_PCU_STA_ADDR_U16_SECTOR_SELF_GEN_LSB) & MAC_PCU_STA_ADDR_U16_SECTOR_SELF_GEN_MASK)
+#define MAC_PCU_STA_ADDR_U16_BASE_RATE_11B_MSB 25
+#define MAC_PCU_STA_ADDR_U16_BASE_RATE_11B_LSB 25
+#define MAC_PCU_STA_ADDR_U16_BASE_RATE_11B_MASK 0x02000000
+#define MAC_PCU_STA_ADDR_U16_BASE_RATE_11B_GET(x) (((x) & MAC_PCU_STA_ADDR_U16_BASE_RATE_11B_MASK) >> MAC_PCU_STA_ADDR_U16_BASE_RATE_11B_LSB)
+#define MAC_PCU_STA_ADDR_U16_BASE_RATE_11B_SET(x) (((x) << MAC_PCU_STA_ADDR_U16_BASE_RATE_11B_LSB) & MAC_PCU_STA_ADDR_U16_BASE_RATE_11B_MASK)
+#define MAC_PCU_STA_ADDR_U16_ACKCTS_6MB_MSB 24
+#define MAC_PCU_STA_ADDR_U16_ACKCTS_6MB_LSB 24
+#define MAC_PCU_STA_ADDR_U16_ACKCTS_6MB_MASK 0x01000000
+#define MAC_PCU_STA_ADDR_U16_ACKCTS_6MB_GET(x) (((x) & MAC_PCU_STA_ADDR_U16_ACKCTS_6MB_MASK) >> MAC_PCU_STA_ADDR_U16_ACKCTS_6MB_LSB)
+#define MAC_PCU_STA_ADDR_U16_ACKCTS_6MB_SET(x) (((x) << MAC_PCU_STA_ADDR_U16_ACKCTS_6MB_LSB) & MAC_PCU_STA_ADDR_U16_ACKCTS_6MB_MASK)
+#define MAC_PCU_STA_ADDR_U16_RTS_USE_DEF_MSB 23
+#define MAC_PCU_STA_ADDR_U16_RTS_USE_DEF_LSB 23
+#define MAC_PCU_STA_ADDR_U16_RTS_USE_DEF_MASK 0x00800000
+#define MAC_PCU_STA_ADDR_U16_RTS_USE_DEF_GET(x) (((x) & MAC_PCU_STA_ADDR_U16_RTS_USE_DEF_MASK) >> MAC_PCU_STA_ADDR_U16_RTS_USE_DEF_LSB)
+#define MAC_PCU_STA_ADDR_U16_RTS_USE_DEF_SET(x) (((x) << MAC_PCU_STA_ADDR_U16_RTS_USE_DEF_LSB) & MAC_PCU_STA_ADDR_U16_RTS_USE_DEF_MASK)
+#define MAC_PCU_STA_ADDR_U16_DEFANT_UPDATE_MSB 22
+#define MAC_PCU_STA_ADDR_U16_DEFANT_UPDATE_LSB 22
+#define MAC_PCU_STA_ADDR_U16_DEFANT_UPDATE_MASK 0x00400000
+#define MAC_PCU_STA_ADDR_U16_DEFANT_UPDATE_GET(x) (((x) & MAC_PCU_STA_ADDR_U16_DEFANT_UPDATE_MASK) >> MAC_PCU_STA_ADDR_U16_DEFANT_UPDATE_LSB)
+#define MAC_PCU_STA_ADDR_U16_DEFANT_UPDATE_SET(x) (((x) << MAC_PCU_STA_ADDR_U16_DEFANT_UPDATE_LSB) & MAC_PCU_STA_ADDR_U16_DEFANT_UPDATE_MASK)
+#define MAC_PCU_STA_ADDR_U16_USE_DEFANT_MSB 21
+#define MAC_PCU_STA_ADDR_U16_USE_DEFANT_LSB 21
+#define MAC_PCU_STA_ADDR_U16_USE_DEFANT_MASK 0x00200000
+#define MAC_PCU_STA_ADDR_U16_USE_DEFANT_GET(x) (((x) & MAC_PCU_STA_ADDR_U16_USE_DEFANT_MASK) >> MAC_PCU_STA_ADDR_U16_USE_DEFANT_LSB)
+#define MAC_PCU_STA_ADDR_U16_USE_DEFANT_SET(x) (((x) << MAC_PCU_STA_ADDR_U16_USE_DEFANT_LSB) & MAC_PCU_STA_ADDR_U16_USE_DEFANT_MASK)
+#define MAC_PCU_STA_ADDR_U16_PCF_MSB 20
+#define MAC_PCU_STA_ADDR_U16_PCF_LSB 20
+#define MAC_PCU_STA_ADDR_U16_PCF_MASK 0x00100000
+#define MAC_PCU_STA_ADDR_U16_PCF_GET(x) (((x) & MAC_PCU_STA_ADDR_U16_PCF_MASK) >> MAC_PCU_STA_ADDR_U16_PCF_LSB)
+#define MAC_PCU_STA_ADDR_U16_PCF_SET(x) (((x) << MAC_PCU_STA_ADDR_U16_PCF_LSB) & MAC_PCU_STA_ADDR_U16_PCF_MASK)
+#define MAC_PCU_STA_ADDR_U16_KEYSRCH_DIS_MSB 19
+#define MAC_PCU_STA_ADDR_U16_KEYSRCH_DIS_LSB 19
+#define MAC_PCU_STA_ADDR_U16_KEYSRCH_DIS_MASK 0x00080000
+#define MAC_PCU_STA_ADDR_U16_KEYSRCH_DIS_GET(x) (((x) & MAC_PCU_STA_ADDR_U16_KEYSRCH_DIS_MASK) >> MAC_PCU_STA_ADDR_U16_KEYSRCH_DIS_LSB)
+#define MAC_PCU_STA_ADDR_U16_KEYSRCH_DIS_SET(x) (((x) << MAC_PCU_STA_ADDR_U16_KEYSRCH_DIS_LSB) & MAC_PCU_STA_ADDR_U16_KEYSRCH_DIS_MASK)
+#define MAC_PCU_STA_ADDR_U16_PW_SAVE_MSB 18
+#define MAC_PCU_STA_ADDR_U16_PW_SAVE_LSB 18
+#define MAC_PCU_STA_ADDR_U16_PW_SAVE_MASK 0x00040000
+#define MAC_PCU_STA_ADDR_U16_PW_SAVE_GET(x) (((x) & MAC_PCU_STA_ADDR_U16_PW_SAVE_MASK) >> MAC_PCU_STA_ADDR_U16_PW_SAVE_LSB)
+#define MAC_PCU_STA_ADDR_U16_PW_SAVE_SET(x) (((x) << MAC_PCU_STA_ADDR_U16_PW_SAVE_LSB) & MAC_PCU_STA_ADDR_U16_PW_SAVE_MASK)
+#define MAC_PCU_STA_ADDR_U16_ADHOC_MSB 17
+#define MAC_PCU_STA_ADDR_U16_ADHOC_LSB 17
+#define MAC_PCU_STA_ADDR_U16_ADHOC_MASK 0x00020000
+#define MAC_PCU_STA_ADDR_U16_ADHOC_GET(x) (((x) & MAC_PCU_STA_ADDR_U16_ADHOC_MASK) >> MAC_PCU_STA_ADDR_U16_ADHOC_LSB)
+#define MAC_PCU_STA_ADDR_U16_ADHOC_SET(x) (((x) << MAC_PCU_STA_ADDR_U16_ADHOC_LSB) & MAC_PCU_STA_ADDR_U16_ADHOC_MASK)
+#define MAC_PCU_STA_ADDR_U16_STA_AP_MSB 16
+#define MAC_PCU_STA_ADDR_U16_STA_AP_LSB 16
+#define MAC_PCU_STA_ADDR_U16_STA_AP_MASK 0x00010000
+#define MAC_PCU_STA_ADDR_U16_STA_AP_GET(x) (((x) & MAC_PCU_STA_ADDR_U16_STA_AP_MASK) >> MAC_PCU_STA_ADDR_U16_STA_AP_LSB)
+#define MAC_PCU_STA_ADDR_U16_STA_AP_SET(x) (((x) << MAC_PCU_STA_ADDR_U16_STA_AP_LSB) & MAC_PCU_STA_ADDR_U16_STA_AP_MASK)
+#define MAC_PCU_STA_ADDR_U16_ADDR_47_32_MSB 15
+#define MAC_PCU_STA_ADDR_U16_ADDR_47_32_LSB 0
+#define MAC_PCU_STA_ADDR_U16_ADDR_47_32_MASK 0x0000ffff
+#define MAC_PCU_STA_ADDR_U16_ADDR_47_32_GET(x) (((x) & MAC_PCU_STA_ADDR_U16_ADDR_47_32_MASK) >> MAC_PCU_STA_ADDR_U16_ADDR_47_32_LSB)
+#define MAC_PCU_STA_ADDR_U16_ADDR_47_32_SET(x) (((x) << MAC_PCU_STA_ADDR_U16_ADDR_47_32_LSB) & MAC_PCU_STA_ADDR_U16_ADDR_47_32_MASK)
+
+#define MAC_PCU_BSSID_L32_ADDRESS 0x00008008
+#define MAC_PCU_BSSID_L32_OFFSET 0x00000008
+#define MAC_PCU_BSSID_L32_ADDR_MSB 31
+#define MAC_PCU_BSSID_L32_ADDR_LSB 0
+#define MAC_PCU_BSSID_L32_ADDR_MASK 0xffffffff
+#define MAC_PCU_BSSID_L32_ADDR_GET(x) (((x) & MAC_PCU_BSSID_L32_ADDR_MASK) >> MAC_PCU_BSSID_L32_ADDR_LSB)
+#define MAC_PCU_BSSID_L32_ADDR_SET(x) (((x) << MAC_PCU_BSSID_L32_ADDR_LSB) & MAC_PCU_BSSID_L32_ADDR_MASK)
+
+#define MAC_PCU_BSSID_U16_ADDRESS 0x0000800c
+#define MAC_PCU_BSSID_U16_OFFSET 0x0000000c
+#define MAC_PCU_BSSID_U16_AID_MSB 26
+#define MAC_PCU_BSSID_U16_AID_LSB 16
+#define MAC_PCU_BSSID_U16_AID_MASK 0x07ff0000
+#define MAC_PCU_BSSID_U16_AID_GET(x) (((x) & MAC_PCU_BSSID_U16_AID_MASK) >> MAC_PCU_BSSID_U16_AID_LSB)
+#define MAC_PCU_BSSID_U16_AID_SET(x) (((x) << MAC_PCU_BSSID_U16_AID_LSB) & MAC_PCU_BSSID_U16_AID_MASK)
+#define MAC_PCU_BSSID_U16_ADDR_MSB 15
+#define MAC_PCU_BSSID_U16_ADDR_LSB 0
+#define MAC_PCU_BSSID_U16_ADDR_MASK 0x0000ffff
+#define MAC_PCU_BSSID_U16_ADDR_GET(x) (((x) & MAC_PCU_BSSID_U16_ADDR_MASK) >> MAC_PCU_BSSID_U16_ADDR_LSB)
+#define MAC_PCU_BSSID_U16_ADDR_SET(x) (((x) << MAC_PCU_BSSID_U16_ADDR_LSB) & MAC_PCU_BSSID_U16_ADDR_MASK)
+
+#define MAC_PCU_BCN_RSSI_AVE_ADDRESS 0x00008010
+#define MAC_PCU_BCN_RSSI_AVE_OFFSET 0x00000010
+#define MAC_PCU_BCN_RSSI_AVE_VALUE_MSB 11
+#define MAC_PCU_BCN_RSSI_AVE_VALUE_LSB 0
+#define MAC_PCU_BCN_RSSI_AVE_VALUE_MASK 0x00000fff
+#define MAC_PCU_BCN_RSSI_AVE_VALUE_GET(x) (((x) & MAC_PCU_BCN_RSSI_AVE_VALUE_MASK) >> MAC_PCU_BCN_RSSI_AVE_VALUE_LSB)
+#define MAC_PCU_BCN_RSSI_AVE_VALUE_SET(x) (((x) << MAC_PCU_BCN_RSSI_AVE_VALUE_LSB) & MAC_PCU_BCN_RSSI_AVE_VALUE_MASK)
+
+#define MAC_PCU_ACK_CTS_TIMEOUT_ADDRESS 0x00008014
+#define MAC_PCU_ACK_CTS_TIMEOUT_OFFSET 0x00000014
+#define MAC_PCU_ACK_CTS_TIMEOUT_CTS_TIMEOUT_MSB 29
+#define MAC_PCU_ACK_CTS_TIMEOUT_CTS_TIMEOUT_LSB 16
+#define MAC_PCU_ACK_CTS_TIMEOUT_CTS_TIMEOUT_MASK 0x3fff0000
+#define MAC_PCU_ACK_CTS_TIMEOUT_CTS_TIMEOUT_GET(x) (((x) & MAC_PCU_ACK_CTS_TIMEOUT_CTS_TIMEOUT_MASK) >> MAC_PCU_ACK_CTS_TIMEOUT_CTS_TIMEOUT_LSB)
+#define MAC_PCU_ACK_CTS_TIMEOUT_CTS_TIMEOUT_SET(x) (((x) << MAC_PCU_ACK_CTS_TIMEOUT_CTS_TIMEOUT_LSB) & MAC_PCU_ACK_CTS_TIMEOUT_CTS_TIMEOUT_MASK)
+#define MAC_PCU_ACK_CTS_TIMEOUT_ACK_TIMEOUT_MSB 13
+#define MAC_PCU_ACK_CTS_TIMEOUT_ACK_TIMEOUT_LSB 0
+#define MAC_PCU_ACK_CTS_TIMEOUT_ACK_TIMEOUT_MASK 0x00003fff
+#define MAC_PCU_ACK_CTS_TIMEOUT_ACK_TIMEOUT_GET(x) (((x) & MAC_PCU_ACK_CTS_TIMEOUT_ACK_TIMEOUT_MASK) >> MAC_PCU_ACK_CTS_TIMEOUT_ACK_TIMEOUT_LSB)
+#define MAC_PCU_ACK_CTS_TIMEOUT_ACK_TIMEOUT_SET(x) (((x) << MAC_PCU_ACK_CTS_TIMEOUT_ACK_TIMEOUT_LSB) & MAC_PCU_ACK_CTS_TIMEOUT_ACK_TIMEOUT_MASK)
+
+#define MAC_PCU_BCN_RSSI_CTL_ADDRESS 0x00008018
+#define MAC_PCU_BCN_RSSI_CTL_OFFSET 0x00000018
+#define MAC_PCU_BCN_RSSI_CTL_RESET_MSB 29
+#define MAC_PCU_BCN_RSSI_CTL_RESET_LSB 29
+#define MAC_PCU_BCN_RSSI_CTL_RESET_MASK 0x20000000
+#define MAC_PCU_BCN_RSSI_CTL_RESET_GET(x) (((x) & MAC_PCU_BCN_RSSI_CTL_RESET_MASK) >> MAC_PCU_BCN_RSSI_CTL_RESET_LSB)
+#define MAC_PCU_BCN_RSSI_CTL_RESET_SET(x) (((x) << MAC_PCU_BCN_RSSI_CTL_RESET_LSB) & MAC_PCU_BCN_RSSI_CTL_RESET_MASK)
+#define MAC_PCU_BCN_RSSI_CTL_WEIGHT_MSB 28
+#define MAC_PCU_BCN_RSSI_CTL_WEIGHT_LSB 24
+#define MAC_PCU_BCN_RSSI_CTL_WEIGHT_MASK 0x1f000000
+#define MAC_PCU_BCN_RSSI_CTL_WEIGHT_GET(x) (((x) & MAC_PCU_BCN_RSSI_CTL_WEIGHT_MASK) >> MAC_PCU_BCN_RSSI_CTL_WEIGHT_LSB)
+#define MAC_PCU_BCN_RSSI_CTL_WEIGHT_SET(x) (((x) << MAC_PCU_BCN_RSSI_CTL_WEIGHT_LSB) & MAC_PCU_BCN_RSSI_CTL_WEIGHT_MASK)
+#define MAC_PCU_BCN_RSSI_CTL_RSSI_HIGH_THRESH_MSB 23
+#define MAC_PCU_BCN_RSSI_CTL_RSSI_HIGH_THRESH_LSB 16
+#define MAC_PCU_BCN_RSSI_CTL_RSSI_HIGH_THRESH_MASK 0x00ff0000
+#define MAC_PCU_BCN_RSSI_CTL_RSSI_HIGH_THRESH_GET(x) (((x) & MAC_PCU_BCN_RSSI_CTL_RSSI_HIGH_THRESH_MASK) >> MAC_PCU_BCN_RSSI_CTL_RSSI_HIGH_THRESH_LSB)
+#define MAC_PCU_BCN_RSSI_CTL_RSSI_HIGH_THRESH_SET(x) (((x) << MAC_PCU_BCN_RSSI_CTL_RSSI_HIGH_THRESH_LSB) & MAC_PCU_BCN_RSSI_CTL_RSSI_HIGH_THRESH_MASK)
+#define MAC_PCU_BCN_RSSI_CTL_MISS_THRESH_MSB 15
+#define MAC_PCU_BCN_RSSI_CTL_MISS_THRESH_LSB 8
+#define MAC_PCU_BCN_RSSI_CTL_MISS_THRESH_MASK 0x0000ff00
+#define MAC_PCU_BCN_RSSI_CTL_MISS_THRESH_GET(x) (((x) & MAC_PCU_BCN_RSSI_CTL_MISS_THRESH_MASK) >> MAC_PCU_BCN_RSSI_CTL_MISS_THRESH_LSB)
+#define MAC_PCU_BCN_RSSI_CTL_MISS_THRESH_SET(x) (((x) << MAC_PCU_BCN_RSSI_CTL_MISS_THRESH_LSB) & MAC_PCU_BCN_RSSI_CTL_MISS_THRESH_MASK)
+#define MAC_PCU_BCN_RSSI_CTL_RSSI_LOW_THRESH_MSB 7
+#define MAC_PCU_BCN_RSSI_CTL_RSSI_LOW_THRESH_LSB 0
+#define MAC_PCU_BCN_RSSI_CTL_RSSI_LOW_THRESH_MASK 0x000000ff
+#define MAC_PCU_BCN_RSSI_CTL_RSSI_LOW_THRESH_GET(x) (((x) & MAC_PCU_BCN_RSSI_CTL_RSSI_LOW_THRESH_MASK) >> MAC_PCU_BCN_RSSI_CTL_RSSI_LOW_THRESH_LSB)
+#define MAC_PCU_BCN_RSSI_CTL_RSSI_LOW_THRESH_SET(x) (((x) << MAC_PCU_BCN_RSSI_CTL_RSSI_LOW_THRESH_LSB) & MAC_PCU_BCN_RSSI_CTL_RSSI_LOW_THRESH_MASK)
+
+#define MAC_PCU_USEC_LATENCY_ADDRESS 0x0000801c
+#define MAC_PCU_USEC_LATENCY_OFFSET 0x0000001c
+#define MAC_PCU_USEC_LATENCY_RX_LATENCY_MSB 28
+#define MAC_PCU_USEC_LATENCY_RX_LATENCY_LSB 23
+#define MAC_PCU_USEC_LATENCY_RX_LATENCY_MASK 0x1f800000
+#define MAC_PCU_USEC_LATENCY_RX_LATENCY_GET(x) (((x) & MAC_PCU_USEC_LATENCY_RX_LATENCY_MASK) >> MAC_PCU_USEC_LATENCY_RX_LATENCY_LSB)
+#define MAC_PCU_USEC_LATENCY_RX_LATENCY_SET(x) (((x) << MAC_PCU_USEC_LATENCY_RX_LATENCY_LSB) & MAC_PCU_USEC_LATENCY_RX_LATENCY_MASK)
+#define MAC_PCU_USEC_LATENCY_TX_LATENCY_MSB 22
+#define MAC_PCU_USEC_LATENCY_TX_LATENCY_LSB 14
+#define MAC_PCU_USEC_LATENCY_TX_LATENCY_MASK 0x007fc000
+#define MAC_PCU_USEC_LATENCY_TX_LATENCY_GET(x) (((x) & MAC_PCU_USEC_LATENCY_TX_LATENCY_MASK) >> MAC_PCU_USEC_LATENCY_TX_LATENCY_LSB)
+#define MAC_PCU_USEC_LATENCY_TX_LATENCY_SET(x) (((x) << MAC_PCU_USEC_LATENCY_TX_LATENCY_LSB) & MAC_PCU_USEC_LATENCY_TX_LATENCY_MASK)
+#define MAC_PCU_USEC_LATENCY_USEC_MSB 7
+#define MAC_PCU_USEC_LATENCY_USEC_LSB 0
+#define MAC_PCU_USEC_LATENCY_USEC_MASK 0x000000ff
+#define MAC_PCU_USEC_LATENCY_USEC_GET(x) (((x) & MAC_PCU_USEC_LATENCY_USEC_MASK) >> MAC_PCU_USEC_LATENCY_USEC_LSB)
+#define MAC_PCU_USEC_LATENCY_USEC_SET(x) (((x) << MAC_PCU_USEC_LATENCY_USEC_LSB) & MAC_PCU_USEC_LATENCY_USEC_MASK)
+
+#define PCU_MAX_CFP_DUR_ADDRESS 0x00008020
+#define PCU_MAX_CFP_DUR_OFFSET 0x00000020
+#define PCU_MAX_CFP_DUR_VALUE_MSB 15
+#define PCU_MAX_CFP_DUR_VALUE_LSB 0
+#define PCU_MAX_CFP_DUR_VALUE_MASK 0x0000ffff
+#define PCU_MAX_CFP_DUR_VALUE_GET(x) (((x) & PCU_MAX_CFP_DUR_VALUE_MASK) >> PCU_MAX_CFP_DUR_VALUE_LSB)
+#define PCU_MAX_CFP_DUR_VALUE_SET(x) (((x) << PCU_MAX_CFP_DUR_VALUE_LSB) & PCU_MAX_CFP_DUR_VALUE_MASK)
+
+#define MAC_PCU_RX_FILTER_ADDRESS 0x00008024
+#define MAC_PCU_RX_FILTER_OFFSET 0x00000024
+#define MAC_PCU_RX_FILTER_GENERIC_FILTER_MSB 25
+#define MAC_PCU_RX_FILTER_GENERIC_FILTER_LSB 24
+#define MAC_PCU_RX_FILTER_GENERIC_FILTER_MASK 0x03000000
+#define MAC_PCU_RX_FILTER_GENERIC_FILTER_GET(x) (((x) & MAC_PCU_RX_FILTER_GENERIC_FILTER_MASK) >> MAC_PCU_RX_FILTER_GENERIC_FILTER_LSB)
+#define MAC_PCU_RX_FILTER_GENERIC_FILTER_SET(x) (((x) << MAC_PCU_RX_FILTER_GENERIC_FILTER_LSB) & MAC_PCU_RX_FILTER_GENERIC_FILTER_MASK)
+#define MAC_PCU_RX_FILTER_GENERIC_FTYPE_MSB 23
+#define MAC_PCU_RX_FILTER_GENERIC_FTYPE_LSB 18
+#define MAC_PCU_RX_FILTER_GENERIC_FTYPE_MASK 0x00fc0000
+#define MAC_PCU_RX_FILTER_GENERIC_FTYPE_GET(x) (((x) & MAC_PCU_RX_FILTER_GENERIC_FTYPE_MASK) >> MAC_PCU_RX_FILTER_GENERIC_FTYPE_LSB)
+#define MAC_PCU_RX_FILTER_GENERIC_FTYPE_SET(x) (((x) << MAC_PCU_RX_FILTER_GENERIC_FTYPE_LSB) & MAC_PCU_RX_FILTER_GENERIC_FTYPE_MASK)
+#define MAC_PCU_RX_FILTER_FROM_TO_DS_MSB 17
+#define MAC_PCU_RX_FILTER_FROM_TO_DS_LSB 17
+#define MAC_PCU_RX_FILTER_FROM_TO_DS_MASK 0x00020000
+#define MAC_PCU_RX_FILTER_FROM_TO_DS_GET(x) (((x) & MAC_PCU_RX_FILTER_FROM_TO_DS_MASK) >> MAC_PCU_RX_FILTER_FROM_TO_DS_LSB)
+#define MAC_PCU_RX_FILTER_FROM_TO_DS_SET(x) (((x) << MAC_PCU_RX_FILTER_FROM_TO_DS_LSB) & MAC_PCU_RX_FILTER_FROM_TO_DS_MASK)
+#define MAC_PCU_RX_FILTER_RST_DLMTR_CNT_DISABLE_MSB 16
+#define MAC_PCU_RX_FILTER_RST_DLMTR_CNT_DISABLE_LSB 16
+#define MAC_PCU_RX_FILTER_RST_DLMTR_CNT_DISABLE_MASK 0x00010000
+#define MAC_PCU_RX_FILTER_RST_DLMTR_CNT_DISABLE_GET(x) (((x) & MAC_PCU_RX_FILTER_RST_DLMTR_CNT_DISABLE_MASK) >> MAC_PCU_RX_FILTER_RST_DLMTR_CNT_DISABLE_LSB)
+#define MAC_PCU_RX_FILTER_RST_DLMTR_CNT_DISABLE_SET(x) (((x) << MAC_PCU_RX_FILTER_RST_DLMTR_CNT_DISABLE_LSB) & MAC_PCU_RX_FILTER_RST_DLMTR_CNT_DISABLE_MASK)
+#define MAC_PCU_RX_FILTER_MCAST_BCAST_ALL_MSB 15
+#define MAC_PCU_RX_FILTER_MCAST_BCAST_ALL_LSB 15
+#define MAC_PCU_RX_FILTER_MCAST_BCAST_ALL_MASK 0x00008000
+#define MAC_PCU_RX_FILTER_MCAST_BCAST_ALL_GET(x) (((x) & MAC_PCU_RX_FILTER_MCAST_BCAST_ALL_MASK) >> MAC_PCU_RX_FILTER_MCAST_BCAST_ALL_LSB)
+#define MAC_PCU_RX_FILTER_MCAST_BCAST_ALL_SET(x) (((x) << MAC_PCU_RX_FILTER_MCAST_BCAST_ALL_LSB) & MAC_PCU_RX_FILTER_MCAST_BCAST_ALL_MASK)
+#define MAC_PCU_RX_FILTER_PS_POLL_MSB 14
+#define MAC_PCU_RX_FILTER_PS_POLL_LSB 14
+#define MAC_PCU_RX_FILTER_PS_POLL_MASK 0x00004000
+#define MAC_PCU_RX_FILTER_PS_POLL_GET(x) (((x) & MAC_PCU_RX_FILTER_PS_POLL_MASK) >> MAC_PCU_RX_FILTER_PS_POLL_LSB)
+#define MAC_PCU_RX_FILTER_PS_POLL_SET(x) (((x) << MAC_PCU_RX_FILTER_PS_POLL_LSB) & MAC_PCU_RX_FILTER_PS_POLL_MASK)
+#define MAC_PCU_RX_FILTER_ASSUME_RADAR_MSB 13
+#define MAC_PCU_RX_FILTER_ASSUME_RADAR_LSB 13
+#define MAC_PCU_RX_FILTER_ASSUME_RADAR_MASK 0x00002000
+#define MAC_PCU_RX_FILTER_ASSUME_RADAR_GET(x) (((x) & MAC_PCU_RX_FILTER_ASSUME_RADAR_MASK) >> MAC_PCU_RX_FILTER_ASSUME_RADAR_LSB)
+#define MAC_PCU_RX_FILTER_ASSUME_RADAR_SET(x) (((x) << MAC_PCU_RX_FILTER_ASSUME_RADAR_LSB) & MAC_PCU_RX_FILTER_ASSUME_RADAR_MASK)
+#define MAC_PCU_RX_FILTER_UNCOMPRESSED_BA_BAR_MSB 12
+#define MAC_PCU_RX_FILTER_UNCOMPRESSED_BA_BAR_LSB 12
+#define MAC_PCU_RX_FILTER_UNCOMPRESSED_BA_BAR_MASK 0x00001000
+#define MAC_PCU_RX_FILTER_UNCOMPRESSED_BA_BAR_GET(x) (((x) & MAC_PCU_RX_FILTER_UNCOMPRESSED_BA_BAR_MASK) >> MAC_PCU_RX_FILTER_UNCOMPRESSED_BA_BAR_LSB)
+#define MAC_PCU_RX_FILTER_UNCOMPRESSED_BA_BAR_SET(x) (((x) << MAC_PCU_RX_FILTER_UNCOMPRESSED_BA_BAR_LSB) & MAC_PCU_RX_FILTER_UNCOMPRESSED_BA_BAR_MASK)
+#define MAC_PCU_RX_FILTER_COMPRESSED_BA_MSB 11
+#define MAC_PCU_RX_FILTER_COMPRESSED_BA_LSB 11
+#define MAC_PCU_RX_FILTER_COMPRESSED_BA_MASK 0x00000800
+#define MAC_PCU_RX_FILTER_COMPRESSED_BA_GET(x) (((x) & MAC_PCU_RX_FILTER_COMPRESSED_BA_MASK) >> MAC_PCU_RX_FILTER_COMPRESSED_BA_LSB)
+#define MAC_PCU_RX_FILTER_COMPRESSED_BA_SET(x) (((x) << MAC_PCU_RX_FILTER_COMPRESSED_BA_LSB) & MAC_PCU_RX_FILTER_COMPRESSED_BA_MASK)
+#define MAC_PCU_RX_FILTER_COMPRESSED_BAR_MSB 10
+#define MAC_PCU_RX_FILTER_COMPRESSED_BAR_LSB 10
+#define MAC_PCU_RX_FILTER_COMPRESSED_BAR_MASK 0x00000400
+#define MAC_PCU_RX_FILTER_COMPRESSED_BAR_GET(x) (((x) & MAC_PCU_RX_FILTER_COMPRESSED_BAR_MASK) >> MAC_PCU_RX_FILTER_COMPRESSED_BAR_LSB)
+#define MAC_PCU_RX_FILTER_COMPRESSED_BAR_SET(x) (((x) << MAC_PCU_RX_FILTER_COMPRESSED_BAR_LSB) & MAC_PCU_RX_FILTER_COMPRESSED_BAR_MASK)
+#define MAC_PCU_RX_FILTER_MY_BEACON_MSB 9
+#define MAC_PCU_RX_FILTER_MY_BEACON_LSB 9
+#define MAC_PCU_RX_FILTER_MY_BEACON_MASK 0x00000200
+#define MAC_PCU_RX_FILTER_MY_BEACON_GET(x) (((x) & MAC_PCU_RX_FILTER_MY_BEACON_MASK) >> MAC_PCU_RX_FILTER_MY_BEACON_LSB)
+#define MAC_PCU_RX_FILTER_MY_BEACON_SET(x) (((x) << MAC_PCU_RX_FILTER_MY_BEACON_LSB) & MAC_PCU_RX_FILTER_MY_BEACON_MASK)
+#define MAC_PCU_RX_FILTER_SYNC_FRAME_MSB 8
+#define MAC_PCU_RX_FILTER_SYNC_FRAME_LSB 8
+#define MAC_PCU_RX_FILTER_SYNC_FRAME_MASK 0x00000100
+#define MAC_PCU_RX_FILTER_SYNC_FRAME_GET(x) (((x) & MAC_PCU_RX_FILTER_SYNC_FRAME_MASK) >> MAC_PCU_RX_FILTER_SYNC_FRAME_LSB)
+#define MAC_PCU_RX_FILTER_SYNC_FRAME_SET(x) (((x) << MAC_PCU_RX_FILTER_SYNC_FRAME_LSB) & MAC_PCU_RX_FILTER_SYNC_FRAME_MASK)
+#define MAC_PCU_RX_FILTER_PROBE_REQ_MSB 7
+#define MAC_PCU_RX_FILTER_PROBE_REQ_LSB 7
+#define MAC_PCU_RX_FILTER_PROBE_REQ_MASK 0x00000080
+#define MAC_PCU_RX_FILTER_PROBE_REQ_GET(x) (((x) & MAC_PCU_RX_FILTER_PROBE_REQ_MASK) >> MAC_PCU_RX_FILTER_PROBE_REQ_LSB)
+#define MAC_PCU_RX_FILTER_PROBE_REQ_SET(x) (((x) << MAC_PCU_RX_FILTER_PROBE_REQ_LSB) & MAC_PCU_RX_FILTER_PROBE_REQ_MASK)
+#define MAC_PCU_RX_FILTER_XR_POLL_MSB 6
+#define MAC_PCU_RX_FILTER_XR_POLL_LSB 6
+#define MAC_PCU_RX_FILTER_XR_POLL_MASK 0x00000040
+#define MAC_PCU_RX_FILTER_XR_POLL_GET(x) (((x) & MAC_PCU_RX_FILTER_XR_POLL_MASK) >> MAC_PCU_RX_FILTER_XR_POLL_LSB)
+#define MAC_PCU_RX_FILTER_XR_POLL_SET(x) (((x) << MAC_PCU_RX_FILTER_XR_POLL_LSB) & MAC_PCU_RX_FILTER_XR_POLL_MASK)
+#define MAC_PCU_RX_FILTER_PROMISCUOUS_MSB 5
+#define MAC_PCU_RX_FILTER_PROMISCUOUS_LSB 5
+#define MAC_PCU_RX_FILTER_PROMISCUOUS_MASK 0x00000020
+#define MAC_PCU_RX_FILTER_PROMISCUOUS_GET(x) (((x) & MAC_PCU_RX_FILTER_PROMISCUOUS_MASK) >> MAC_PCU_RX_FILTER_PROMISCUOUS_LSB)
+#define MAC_PCU_RX_FILTER_PROMISCUOUS_SET(x) (((x) << MAC_PCU_RX_FILTER_PROMISCUOUS_LSB) & MAC_PCU_RX_FILTER_PROMISCUOUS_MASK)
+#define MAC_PCU_RX_FILTER_BEACON_MSB 4
+#define MAC_PCU_RX_FILTER_BEACON_LSB 4
+#define MAC_PCU_RX_FILTER_BEACON_MASK 0x00000010
+#define MAC_PCU_RX_FILTER_BEACON_GET(x) (((x) & MAC_PCU_RX_FILTER_BEACON_MASK) >> MAC_PCU_RX_FILTER_BEACON_LSB)
+#define MAC_PCU_RX_FILTER_BEACON_SET(x) (((x) << MAC_PCU_RX_FILTER_BEACON_LSB) & MAC_PCU_RX_FILTER_BEACON_MASK)
+#define MAC_PCU_RX_FILTER_CONTROL_MSB 3
+#define MAC_PCU_RX_FILTER_CONTROL_LSB 3
+#define MAC_PCU_RX_FILTER_CONTROL_MASK 0x00000008
+#define MAC_PCU_RX_FILTER_CONTROL_GET(x) (((x) & MAC_PCU_RX_FILTER_CONTROL_MASK) >> MAC_PCU_RX_FILTER_CONTROL_LSB)
+#define MAC_PCU_RX_FILTER_CONTROL_SET(x) (((x) << MAC_PCU_RX_FILTER_CONTROL_LSB) & MAC_PCU_RX_FILTER_CONTROL_MASK)
+#define MAC_PCU_RX_FILTER_BROADCAST_MSB 2
+#define MAC_PCU_RX_FILTER_BROADCAST_LSB 2
+#define MAC_PCU_RX_FILTER_BROADCAST_MASK 0x00000004
+#define MAC_PCU_RX_FILTER_BROADCAST_GET(x) (((x) & MAC_PCU_RX_FILTER_BROADCAST_MASK) >> MAC_PCU_RX_FILTER_BROADCAST_LSB)
+#define MAC_PCU_RX_FILTER_BROADCAST_SET(x) (((x) << MAC_PCU_RX_FILTER_BROADCAST_LSB) & MAC_PCU_RX_FILTER_BROADCAST_MASK)
+#define MAC_PCU_RX_FILTER_MULTICAST_MSB 1
+#define MAC_PCU_RX_FILTER_MULTICAST_LSB 1
+#define MAC_PCU_RX_FILTER_MULTICAST_MASK 0x00000002
+#define MAC_PCU_RX_FILTER_MULTICAST_GET(x) (((x) & MAC_PCU_RX_FILTER_MULTICAST_MASK) >> MAC_PCU_RX_FILTER_MULTICAST_LSB)
+#define MAC_PCU_RX_FILTER_MULTICAST_SET(x) (((x) << MAC_PCU_RX_FILTER_MULTICAST_LSB) & MAC_PCU_RX_FILTER_MULTICAST_MASK)
+#define MAC_PCU_RX_FILTER_UNICAST_MSB 0
+#define MAC_PCU_RX_FILTER_UNICAST_LSB 0
+#define MAC_PCU_RX_FILTER_UNICAST_MASK 0x00000001
+#define MAC_PCU_RX_FILTER_UNICAST_GET(x) (((x) & MAC_PCU_RX_FILTER_UNICAST_MASK) >> MAC_PCU_RX_FILTER_UNICAST_LSB)
+#define MAC_PCU_RX_FILTER_UNICAST_SET(x) (((x) << MAC_PCU_RX_FILTER_UNICAST_LSB) & MAC_PCU_RX_FILTER_UNICAST_MASK)
+
+#define MAC_PCU_MCAST_FILTER_L32_ADDRESS 0x00008028
+#define MAC_PCU_MCAST_FILTER_L32_OFFSET 0x00000028
+#define MAC_PCU_MCAST_FILTER_L32_VALUE_MSB 31
+#define MAC_PCU_MCAST_FILTER_L32_VALUE_LSB 0
+#define MAC_PCU_MCAST_FILTER_L32_VALUE_MASK 0xffffffff
+#define MAC_PCU_MCAST_FILTER_L32_VALUE_GET(x) (((x) & MAC_PCU_MCAST_FILTER_L32_VALUE_MASK) >> MAC_PCU_MCAST_FILTER_L32_VALUE_LSB)
+#define MAC_PCU_MCAST_FILTER_L32_VALUE_SET(x) (((x) << MAC_PCU_MCAST_FILTER_L32_VALUE_LSB) & MAC_PCU_MCAST_FILTER_L32_VALUE_MASK)
+
+#define MAC_PCU_MCAST_FILTER_U32_ADDRESS 0x0000802c
+#define MAC_PCU_MCAST_FILTER_U32_OFFSET 0x0000002c
+#define MAC_PCU_MCAST_FILTER_U32_VALUE_MSB 31
+#define MAC_PCU_MCAST_FILTER_U32_VALUE_LSB 0
+#define MAC_PCU_MCAST_FILTER_U32_VALUE_MASK 0xffffffff
+#define MAC_PCU_MCAST_FILTER_U32_VALUE_GET(x) (((x) & MAC_PCU_MCAST_FILTER_U32_VALUE_MASK) >> MAC_PCU_MCAST_FILTER_U32_VALUE_LSB)
+#define MAC_PCU_MCAST_FILTER_U32_VALUE_SET(x) (((x) << MAC_PCU_MCAST_FILTER_U32_VALUE_LSB) & MAC_PCU_MCAST_FILTER_U32_VALUE_MASK)
+
+#define MAC_PCU_DIAG_SW_ADDRESS 0x00008030
+#define MAC_PCU_DIAG_SW_OFFSET 0x00000030
+#define MAC_PCU_DIAG_SW_DEBUG_MODE_MSB 31
+#define MAC_PCU_DIAG_SW_DEBUG_MODE_LSB 30
+#define MAC_PCU_DIAG_SW_DEBUG_MODE_MASK 0xc0000000
+#define MAC_PCU_DIAG_SW_DEBUG_MODE_GET(x) (((x) & MAC_PCU_DIAG_SW_DEBUG_MODE_MASK) >> MAC_PCU_DIAG_SW_DEBUG_MODE_LSB)
+#define MAC_PCU_DIAG_SW_DEBUG_MODE_SET(x) (((x) << MAC_PCU_DIAG_SW_DEBUG_MODE_LSB) & MAC_PCU_DIAG_SW_DEBUG_MODE_MASK)
+#define MAC_PCU_DIAG_SW_RX_CLEAR_EXT_LOW_MSB 29
+#define MAC_PCU_DIAG_SW_RX_CLEAR_EXT_LOW_LSB 29
+#define MAC_PCU_DIAG_SW_RX_CLEAR_EXT_LOW_MASK 0x20000000
+#define MAC_PCU_DIAG_SW_RX_CLEAR_EXT_LOW_GET(x) (((x) & MAC_PCU_DIAG_SW_RX_CLEAR_EXT_LOW_MASK) >> MAC_PCU_DIAG_SW_RX_CLEAR_EXT_LOW_LSB)
+#define MAC_PCU_DIAG_SW_RX_CLEAR_EXT_LOW_SET(x) (((x) << MAC_PCU_DIAG_SW_RX_CLEAR_EXT_LOW_LSB) & MAC_PCU_DIAG_SW_RX_CLEAR_EXT_LOW_MASK)
+#define MAC_PCU_DIAG_SW_RX_CLEAR_CTL_LOW_MSB 28
+#define MAC_PCU_DIAG_SW_RX_CLEAR_CTL_LOW_LSB 28
+#define MAC_PCU_DIAG_SW_RX_CLEAR_CTL_LOW_MASK 0x10000000
+#define MAC_PCU_DIAG_SW_RX_CLEAR_CTL_LOW_GET(x) (((x) & MAC_PCU_DIAG_SW_RX_CLEAR_CTL_LOW_MASK) >> MAC_PCU_DIAG_SW_RX_CLEAR_CTL_LOW_LSB)
+#define MAC_PCU_DIAG_SW_RX_CLEAR_CTL_LOW_SET(x) (((x) << MAC_PCU_DIAG_SW_RX_CLEAR_CTL_LOW_LSB) & MAC_PCU_DIAG_SW_RX_CLEAR_CTL_LOW_MASK)
+#define MAC_PCU_DIAG_SW_OBS_SEL_2_MSB 27
+#define MAC_PCU_DIAG_SW_OBS_SEL_2_LSB 27
+#define MAC_PCU_DIAG_SW_OBS_SEL_2_MASK 0x08000000
+#define MAC_PCU_DIAG_SW_OBS_SEL_2_GET(x) (((x) & MAC_PCU_DIAG_SW_OBS_SEL_2_MASK) >> MAC_PCU_DIAG_SW_OBS_SEL_2_LSB)
+#define MAC_PCU_DIAG_SW_OBS_SEL_2_SET(x) (((x) << MAC_PCU_DIAG_SW_OBS_SEL_2_LSB) & MAC_PCU_DIAG_SW_OBS_SEL_2_MASK)
+#define MAC_PCU_DIAG_SW_SATURATE_CYCLE_CNT_MSB 26
+#define MAC_PCU_DIAG_SW_SATURATE_CYCLE_CNT_LSB 26
+#define MAC_PCU_DIAG_SW_SATURATE_CYCLE_CNT_MASK 0x04000000
+#define MAC_PCU_DIAG_SW_SATURATE_CYCLE_CNT_GET(x) (((x) & MAC_PCU_DIAG_SW_SATURATE_CYCLE_CNT_MASK) >> MAC_PCU_DIAG_SW_SATURATE_CYCLE_CNT_LSB)
+#define MAC_PCU_DIAG_SW_SATURATE_CYCLE_CNT_SET(x) (((x) << MAC_PCU_DIAG_SW_SATURATE_CYCLE_CNT_LSB) & MAC_PCU_DIAG_SW_SATURATE_CYCLE_CNT_MASK)
+#define MAC_PCU_DIAG_SW_FORCE_RX_ABORT_MSB 25
+#define MAC_PCU_DIAG_SW_FORCE_RX_ABORT_LSB 25
+#define MAC_PCU_DIAG_SW_FORCE_RX_ABORT_MASK 0x02000000
+#define MAC_PCU_DIAG_SW_FORCE_RX_ABORT_GET(x) (((x) & MAC_PCU_DIAG_SW_FORCE_RX_ABORT_MASK) >> MAC_PCU_DIAG_SW_FORCE_RX_ABORT_LSB)
+#define MAC_PCU_DIAG_SW_FORCE_RX_ABORT_SET(x) (((x) << MAC_PCU_DIAG_SW_FORCE_RX_ABORT_LSB) & MAC_PCU_DIAG_SW_FORCE_RX_ABORT_MASK)
+#define MAC_PCU_DIAG_SW_DUAL_CHAIN_CHAN_INFO_MSB 24
+#define MAC_PCU_DIAG_SW_DUAL_CHAIN_CHAN_INFO_LSB 24
+#define MAC_PCU_DIAG_SW_DUAL_CHAIN_CHAN_INFO_MASK 0x01000000
+#define MAC_PCU_DIAG_SW_DUAL_CHAIN_CHAN_INFO_GET(x) (((x) & MAC_PCU_DIAG_SW_DUAL_CHAIN_CHAN_INFO_MASK) >> MAC_PCU_DIAG_SW_DUAL_CHAIN_CHAN_INFO_LSB)
+#define MAC_PCU_DIAG_SW_DUAL_CHAIN_CHAN_INFO_SET(x) (((x) << MAC_PCU_DIAG_SW_DUAL_CHAIN_CHAN_INFO_LSB) & MAC_PCU_DIAG_SW_DUAL_CHAIN_CHAN_INFO_MASK)
+#define MAC_PCU_DIAG_SW_PHYERR_ENABLE_EIFS_CTL_MSB 23
+#define MAC_PCU_DIAG_SW_PHYERR_ENABLE_EIFS_CTL_LSB 23
+#define MAC_PCU_DIAG_SW_PHYERR_ENABLE_EIFS_CTL_MASK 0x00800000
+#define MAC_PCU_DIAG_SW_PHYERR_ENABLE_EIFS_CTL_GET(x) (((x) & MAC_PCU_DIAG_SW_PHYERR_ENABLE_EIFS_CTL_MASK) >> MAC_PCU_DIAG_SW_PHYERR_ENABLE_EIFS_CTL_LSB)
+#define MAC_PCU_DIAG_SW_PHYERR_ENABLE_EIFS_CTL_SET(x) (((x) << MAC_PCU_DIAG_SW_PHYERR_ENABLE_EIFS_CTL_LSB) & MAC_PCU_DIAG_SW_PHYERR_ENABLE_EIFS_CTL_MASK)
+#define MAC_PCU_DIAG_SW_CHAN_IDLE_HIGH_MSB 22
+#define MAC_PCU_DIAG_SW_CHAN_IDLE_HIGH_LSB 22
+#define MAC_PCU_DIAG_SW_CHAN_IDLE_HIGH_MASK 0x00400000
+#define MAC_PCU_DIAG_SW_CHAN_IDLE_HIGH_GET(x) (((x) & MAC_PCU_DIAG_SW_CHAN_IDLE_HIGH_MASK) >> MAC_PCU_DIAG_SW_CHAN_IDLE_HIGH_LSB)
+#define MAC_PCU_DIAG_SW_CHAN_IDLE_HIGH_SET(x) (((x) << MAC_PCU_DIAG_SW_CHAN_IDLE_HIGH_LSB) & MAC_PCU_DIAG_SW_CHAN_IDLE_HIGH_MASK)
+#define MAC_PCU_DIAG_SW_IGNORE_NAV_MSB 21
+#define MAC_PCU_DIAG_SW_IGNORE_NAV_LSB 21
+#define MAC_PCU_DIAG_SW_IGNORE_NAV_MASK 0x00200000
+#define MAC_PCU_DIAG_SW_IGNORE_NAV_GET(x) (((x) & MAC_PCU_DIAG_SW_IGNORE_NAV_MASK) >> MAC_PCU_DIAG_SW_IGNORE_NAV_LSB)
+#define MAC_PCU_DIAG_SW_IGNORE_NAV_SET(x) (((x) << MAC_PCU_DIAG_SW_IGNORE_NAV_LSB) & MAC_PCU_DIAG_SW_IGNORE_NAV_MASK)
+#define MAC_PCU_DIAG_SW_RX_CLEAR_HIGH_MSB 20
+#define MAC_PCU_DIAG_SW_RX_CLEAR_HIGH_LSB 20
+#define MAC_PCU_DIAG_SW_RX_CLEAR_HIGH_MASK 0x00100000
+#define MAC_PCU_DIAG_SW_RX_CLEAR_HIGH_GET(x) (((x) & MAC_PCU_DIAG_SW_RX_CLEAR_HIGH_MASK) >> MAC_PCU_DIAG_SW_RX_CLEAR_HIGH_LSB)
+#define MAC_PCU_DIAG_SW_RX_CLEAR_HIGH_SET(x) (((x) << MAC_PCU_DIAG_SW_RX_CLEAR_HIGH_LSB) & MAC_PCU_DIAG_SW_RX_CLEAR_HIGH_MASK)
+#define MAC_PCU_DIAG_SW_OBS_SEL_1_0_MSB 19
+#define MAC_PCU_DIAG_SW_OBS_SEL_1_0_LSB 18
+#define MAC_PCU_DIAG_SW_OBS_SEL_1_0_MASK 0x000c0000
+#define MAC_PCU_DIAG_SW_OBS_SEL_1_0_GET(x) (((x) & MAC_PCU_DIAG_SW_OBS_SEL_1_0_MASK) >> MAC_PCU_DIAG_SW_OBS_SEL_1_0_LSB)
+#define MAC_PCU_DIAG_SW_OBS_SEL_1_0_SET(x) (((x) << MAC_PCU_DIAG_SW_OBS_SEL_1_0_LSB) & MAC_PCU_DIAG_SW_OBS_SEL_1_0_MASK)
+#define MAC_PCU_DIAG_SW_ACCEPT_NON_V0_MSB 17
+#define MAC_PCU_DIAG_SW_ACCEPT_NON_V0_LSB 17
+#define MAC_PCU_DIAG_SW_ACCEPT_NON_V0_MASK 0x00020000
+#define MAC_PCU_DIAG_SW_ACCEPT_NON_V0_GET(x) (((x) & MAC_PCU_DIAG_SW_ACCEPT_NON_V0_MASK) >> MAC_PCU_DIAG_SW_ACCEPT_NON_V0_LSB)
+#define MAC_PCU_DIAG_SW_ACCEPT_NON_V0_SET(x) (((x) << MAC_PCU_DIAG_SW_ACCEPT_NON_V0_LSB) & MAC_PCU_DIAG_SW_ACCEPT_NON_V0_MASK)
+#define MAC_PCU_DIAG_SW_DUMP_CHAN_INFO_MSB 8
+#define MAC_PCU_DIAG_SW_DUMP_CHAN_INFO_LSB 8
+#define MAC_PCU_DIAG_SW_DUMP_CHAN_INFO_MASK 0x00000100
+#define MAC_PCU_DIAG_SW_DUMP_CHAN_INFO_GET(x) (((x) & MAC_PCU_DIAG_SW_DUMP_CHAN_INFO_MASK) >> MAC_PCU_DIAG_SW_DUMP_CHAN_INFO_LSB)
+#define MAC_PCU_DIAG_SW_DUMP_CHAN_INFO_SET(x) (((x) << MAC_PCU_DIAG_SW_DUMP_CHAN_INFO_LSB) & MAC_PCU_DIAG_SW_DUMP_CHAN_INFO_MASK)
+#define MAC_PCU_DIAG_SW_CORRUPT_FCS_MSB 7
+#define MAC_PCU_DIAG_SW_CORRUPT_FCS_LSB 7
+#define MAC_PCU_DIAG_SW_CORRUPT_FCS_MASK 0x00000080
+#define MAC_PCU_DIAG_SW_CORRUPT_FCS_GET(x) (((x) & MAC_PCU_DIAG_SW_CORRUPT_FCS_MASK) >> MAC_PCU_DIAG_SW_CORRUPT_FCS_LSB)
+#define MAC_PCU_DIAG_SW_CORRUPT_FCS_SET(x) (((x) << MAC_PCU_DIAG_SW_CORRUPT_FCS_LSB) & MAC_PCU_DIAG_SW_CORRUPT_FCS_MASK)
+#define MAC_PCU_DIAG_SW_LOOP_BACK_MSB 6
+#define MAC_PCU_DIAG_SW_LOOP_BACK_LSB 6
+#define MAC_PCU_DIAG_SW_LOOP_BACK_MASK 0x00000040
+#define MAC_PCU_DIAG_SW_LOOP_BACK_GET(x) (((x) & MAC_PCU_DIAG_SW_LOOP_BACK_MASK) >> MAC_PCU_DIAG_SW_LOOP_BACK_LSB)
+#define MAC_PCU_DIAG_SW_LOOP_BACK_SET(x) (((x) << MAC_PCU_DIAG_SW_LOOP_BACK_LSB) & MAC_PCU_DIAG_SW_LOOP_BACK_MASK)
+#define MAC_PCU_DIAG_SW_HALT_RX_MSB 5
+#define MAC_PCU_DIAG_SW_HALT_RX_LSB 5
+#define MAC_PCU_DIAG_SW_HALT_RX_MASK 0x00000020
+#define MAC_PCU_DIAG_SW_HALT_RX_GET(x) (((x) & MAC_PCU_DIAG_SW_HALT_RX_MASK) >> MAC_PCU_DIAG_SW_HALT_RX_LSB)
+#define MAC_PCU_DIAG_SW_HALT_RX_SET(x) (((x) << MAC_PCU_DIAG_SW_HALT_RX_LSB) & MAC_PCU_DIAG_SW_HALT_RX_MASK)
+#define MAC_PCU_DIAG_SW_NO_DECRYPT_MSB 4
+#define MAC_PCU_DIAG_SW_NO_DECRYPT_LSB 4
+#define MAC_PCU_DIAG_SW_NO_DECRYPT_MASK 0x00000010
+#define MAC_PCU_DIAG_SW_NO_DECRYPT_GET(x) (((x) & MAC_PCU_DIAG_SW_NO_DECRYPT_MASK) >> MAC_PCU_DIAG_SW_NO_DECRYPT_LSB)
+#define MAC_PCU_DIAG_SW_NO_DECRYPT_SET(x) (((x) << MAC_PCU_DIAG_SW_NO_DECRYPT_LSB) & MAC_PCU_DIAG_SW_NO_DECRYPT_MASK)
+#define MAC_PCU_DIAG_SW_NO_ENCRYPT_MSB 3
+#define MAC_PCU_DIAG_SW_NO_ENCRYPT_LSB 3
+#define MAC_PCU_DIAG_SW_NO_ENCRYPT_MASK 0x00000008
+#define MAC_PCU_DIAG_SW_NO_ENCRYPT_GET(x) (((x) & MAC_PCU_DIAG_SW_NO_ENCRYPT_MASK) >> MAC_PCU_DIAG_SW_NO_ENCRYPT_LSB)
+#define MAC_PCU_DIAG_SW_NO_ENCRYPT_SET(x) (((x) << MAC_PCU_DIAG_SW_NO_ENCRYPT_LSB) & MAC_PCU_DIAG_SW_NO_ENCRYPT_MASK)
+#define MAC_PCU_DIAG_SW_NO_CTS_MSB 2
+#define MAC_PCU_DIAG_SW_NO_CTS_LSB 2
+#define MAC_PCU_DIAG_SW_NO_CTS_MASK 0x00000004
+#define MAC_PCU_DIAG_SW_NO_CTS_GET(x) (((x) & MAC_PCU_DIAG_SW_NO_CTS_MASK) >> MAC_PCU_DIAG_SW_NO_CTS_LSB)
+#define MAC_PCU_DIAG_SW_NO_CTS_SET(x) (((x) << MAC_PCU_DIAG_SW_NO_CTS_LSB) & MAC_PCU_DIAG_SW_NO_CTS_MASK)
+#define MAC_PCU_DIAG_SW_NO_ACK_MSB 1
+#define MAC_PCU_DIAG_SW_NO_ACK_LSB 1
+#define MAC_PCU_DIAG_SW_NO_ACK_MASK 0x00000002
+#define MAC_PCU_DIAG_SW_NO_ACK_GET(x) (((x) & MAC_PCU_DIAG_SW_NO_ACK_MASK) >> MAC_PCU_DIAG_SW_NO_ACK_LSB)
+#define MAC_PCU_DIAG_SW_NO_ACK_SET(x) (((x) << MAC_PCU_DIAG_SW_NO_ACK_LSB) & MAC_PCU_DIAG_SW_NO_ACK_MASK)
+#define MAC_PCU_DIAG_SW_INVALID_KEY_NO_ACK_MSB 0
+#define MAC_PCU_DIAG_SW_INVALID_KEY_NO_ACK_LSB 0
+#define MAC_PCU_DIAG_SW_INVALID_KEY_NO_ACK_MASK 0x00000001
+#define MAC_PCU_DIAG_SW_INVALID_KEY_NO_ACK_GET(x) (((x) & MAC_PCU_DIAG_SW_INVALID_KEY_NO_ACK_MASK) >> MAC_PCU_DIAG_SW_INVALID_KEY_NO_ACK_LSB)
+#define MAC_PCU_DIAG_SW_INVALID_KEY_NO_ACK_SET(x) (((x) << MAC_PCU_DIAG_SW_INVALID_KEY_NO_ACK_LSB) & MAC_PCU_DIAG_SW_INVALID_KEY_NO_ACK_MASK)
+
+#define MAC_PCU_TST_ADDAC_ADDRESS 0x00008034
+#define MAC_PCU_TST_ADDAC_OFFSET 0x00000034
+#define MAC_PCU_TST_ADDAC_TEST_ARM_MSB 20
+#define MAC_PCU_TST_ADDAC_TEST_ARM_LSB 20
+#define MAC_PCU_TST_ADDAC_TEST_ARM_MASK 0x00100000
+#define MAC_PCU_TST_ADDAC_TEST_ARM_GET(x) (((x) & MAC_PCU_TST_ADDAC_TEST_ARM_MASK) >> MAC_PCU_TST_ADDAC_TEST_ARM_LSB)
+#define MAC_PCU_TST_ADDAC_TEST_ARM_SET(x) (((x) << MAC_PCU_TST_ADDAC_TEST_ARM_LSB) & MAC_PCU_TST_ADDAC_TEST_ARM_MASK)
+#define MAC_PCU_TST_ADDAC_TEST_CAPTURE_MSB 19
+#define MAC_PCU_TST_ADDAC_TEST_CAPTURE_LSB 19
+#define MAC_PCU_TST_ADDAC_TEST_CAPTURE_MASK 0x00080000
+#define MAC_PCU_TST_ADDAC_TEST_CAPTURE_GET(x) (((x) & MAC_PCU_TST_ADDAC_TEST_CAPTURE_MASK) >> MAC_PCU_TST_ADDAC_TEST_CAPTURE_LSB)
+#define MAC_PCU_TST_ADDAC_TEST_CAPTURE_SET(x) (((x) << MAC_PCU_TST_ADDAC_TEST_CAPTURE_LSB) & MAC_PCU_TST_ADDAC_TEST_CAPTURE_MASK)
+#define MAC_PCU_TST_ADDAC_CONT_TEST_MSB 18
+#define MAC_PCU_TST_ADDAC_CONT_TEST_LSB 18
+#define MAC_PCU_TST_ADDAC_CONT_TEST_MASK 0x00040000
+#define MAC_PCU_TST_ADDAC_CONT_TEST_GET(x) (((x) & MAC_PCU_TST_ADDAC_CONT_TEST_MASK) >> MAC_PCU_TST_ADDAC_CONT_TEST_LSB)
+#define MAC_PCU_TST_ADDAC_CONT_TEST_SET(x) (((x) << MAC_PCU_TST_ADDAC_CONT_TEST_LSB) & MAC_PCU_TST_ADDAC_CONT_TEST_MASK)
+#define MAC_PCU_TST_ADDAC_TRIG_POLARITY_MSB 17
+#define MAC_PCU_TST_ADDAC_TRIG_POLARITY_LSB 17
+#define MAC_PCU_TST_ADDAC_TRIG_POLARITY_MASK 0x00020000
+#define MAC_PCU_TST_ADDAC_TRIG_POLARITY_GET(x) (((x) & MAC_PCU_TST_ADDAC_TRIG_POLARITY_MASK) >> MAC_PCU_TST_ADDAC_TRIG_POLARITY_LSB)
+#define MAC_PCU_TST_ADDAC_TRIG_POLARITY_SET(x) (((x) << MAC_PCU_TST_ADDAC_TRIG_POLARITY_LSB) & MAC_PCU_TST_ADDAC_TRIG_POLARITY_MASK)
+#define MAC_PCU_TST_ADDAC_TRIG_SEL_MSB 16
+#define MAC_PCU_TST_ADDAC_TRIG_SEL_LSB 16
+#define MAC_PCU_TST_ADDAC_TRIG_SEL_MASK 0x00010000
+#define MAC_PCU_TST_ADDAC_TRIG_SEL_GET(x) (((x) & MAC_PCU_TST_ADDAC_TRIG_SEL_MASK) >> MAC_PCU_TST_ADDAC_TRIG_SEL_LSB)
+#define MAC_PCU_TST_ADDAC_TRIG_SEL_SET(x) (((x) << MAC_PCU_TST_ADDAC_TRIG_SEL_LSB) & MAC_PCU_TST_ADDAC_TRIG_SEL_MASK)
+#define MAC_PCU_TST_ADDAC_UPPER_8B_MSB 14
+#define MAC_PCU_TST_ADDAC_UPPER_8B_LSB 14
+#define MAC_PCU_TST_ADDAC_UPPER_8B_MASK 0x00004000
+#define MAC_PCU_TST_ADDAC_UPPER_8B_GET(x) (((x) & MAC_PCU_TST_ADDAC_UPPER_8B_MASK) >> MAC_PCU_TST_ADDAC_UPPER_8B_LSB)
+#define MAC_PCU_TST_ADDAC_UPPER_8B_SET(x) (((x) << MAC_PCU_TST_ADDAC_UPPER_8B_LSB) & MAC_PCU_TST_ADDAC_UPPER_8B_MASK)
+#define MAC_PCU_TST_ADDAC_LOOP_LEN_MSB 13
+#define MAC_PCU_TST_ADDAC_LOOP_LEN_LSB 3
+#define MAC_PCU_TST_ADDAC_LOOP_LEN_MASK 0x00003ff8
+#define MAC_PCU_TST_ADDAC_LOOP_LEN_GET(x) (((x) & MAC_PCU_TST_ADDAC_LOOP_LEN_MASK) >> MAC_PCU_TST_ADDAC_LOOP_LEN_LSB)
+#define MAC_PCU_TST_ADDAC_LOOP_LEN_SET(x) (((x) << MAC_PCU_TST_ADDAC_LOOP_LEN_LSB) & MAC_PCU_TST_ADDAC_LOOP_LEN_MASK)
+#define MAC_PCU_TST_ADDAC_LOOP_MSB 2
+#define MAC_PCU_TST_ADDAC_LOOP_LSB 2
+#define MAC_PCU_TST_ADDAC_LOOP_MASK 0x00000004
+#define MAC_PCU_TST_ADDAC_LOOP_GET(x) (((x) & MAC_PCU_TST_ADDAC_LOOP_MASK) >> MAC_PCU_TST_ADDAC_LOOP_LSB)
+#define MAC_PCU_TST_ADDAC_LOOP_SET(x) (((x) << MAC_PCU_TST_ADDAC_LOOP_LSB) & MAC_PCU_TST_ADDAC_LOOP_MASK)
+#define MAC_PCU_TST_ADDAC_TESTMODE_MSB 1
+#define MAC_PCU_TST_ADDAC_TESTMODE_LSB 1
+#define MAC_PCU_TST_ADDAC_TESTMODE_MASK 0x00000002
+#define MAC_PCU_TST_ADDAC_TESTMODE_GET(x) (((x) & MAC_PCU_TST_ADDAC_TESTMODE_MASK) >> MAC_PCU_TST_ADDAC_TESTMODE_LSB)
+#define MAC_PCU_TST_ADDAC_TESTMODE_SET(x) (((x) << MAC_PCU_TST_ADDAC_TESTMODE_LSB) & MAC_PCU_TST_ADDAC_TESTMODE_MASK)
+#define MAC_PCU_TST_ADDAC_CONT_TX_MSB 0
+#define MAC_PCU_TST_ADDAC_CONT_TX_LSB 0
+#define MAC_PCU_TST_ADDAC_CONT_TX_MASK 0x00000001
+#define MAC_PCU_TST_ADDAC_CONT_TX_GET(x) (((x) & MAC_PCU_TST_ADDAC_CONT_TX_MASK) >> MAC_PCU_TST_ADDAC_CONT_TX_LSB)
+#define MAC_PCU_TST_ADDAC_CONT_TX_SET(x) (((x) << MAC_PCU_TST_ADDAC_CONT_TX_LSB) & MAC_PCU_TST_ADDAC_CONT_TX_MASK)
+
+#define MAC_PCU_DEF_ANTENNA_ADDRESS 0x00008038
+#define MAC_PCU_DEF_ANTENNA_OFFSET 0x00000038
+#define MAC_PCU_DEF_ANTENNA_RX_LNA_CONFIG_SEL_MSB 28
+#define MAC_PCU_DEF_ANTENNA_RX_LNA_CONFIG_SEL_LSB 28
+#define MAC_PCU_DEF_ANTENNA_RX_LNA_CONFIG_SEL_MASK 0x10000000
+#define MAC_PCU_DEF_ANTENNA_RX_LNA_CONFIG_SEL_GET(x) (((x) & MAC_PCU_DEF_ANTENNA_RX_LNA_CONFIG_SEL_MASK) >> MAC_PCU_DEF_ANTENNA_RX_LNA_CONFIG_SEL_LSB)
+#define MAC_PCU_DEF_ANTENNA_RX_LNA_CONFIG_SEL_SET(x) (((x) << MAC_PCU_DEF_ANTENNA_RX_LNA_CONFIG_SEL_LSB) & MAC_PCU_DEF_ANTENNA_RX_LNA_CONFIG_SEL_MASK)
+#define MAC_PCU_DEF_ANTENNA_TX_DEF_ANT_SEL_MSB 24
+#define MAC_PCU_DEF_ANTENNA_TX_DEF_ANT_SEL_LSB 24
+#define MAC_PCU_DEF_ANTENNA_TX_DEF_ANT_SEL_MASK 0x01000000
+#define MAC_PCU_DEF_ANTENNA_TX_DEF_ANT_SEL_GET(x) (((x) & MAC_PCU_DEF_ANTENNA_TX_DEF_ANT_SEL_MASK) >> MAC_PCU_DEF_ANTENNA_TX_DEF_ANT_SEL_LSB)
+#define MAC_PCU_DEF_ANTENNA_TX_DEF_ANT_SEL_SET(x) (((x) << MAC_PCU_DEF_ANTENNA_TX_DEF_ANT_SEL_LSB) & MAC_PCU_DEF_ANTENNA_TX_DEF_ANT_SEL_MASK)
+#define MAC_PCU_DEF_ANTENNA_VALUE_MSB 23
+#define MAC_PCU_DEF_ANTENNA_VALUE_LSB 0
+#define MAC_PCU_DEF_ANTENNA_VALUE_MASK 0x00ffffff
+#define MAC_PCU_DEF_ANTENNA_VALUE_GET(x) (((x) & MAC_PCU_DEF_ANTENNA_VALUE_MASK) >> MAC_PCU_DEF_ANTENNA_VALUE_LSB)
+#define MAC_PCU_DEF_ANTENNA_VALUE_SET(x) (((x) << MAC_PCU_DEF_ANTENNA_VALUE_LSB) & MAC_PCU_DEF_ANTENNA_VALUE_MASK)
+
+#define MAC_PCU_AES_MUTE_MASK_0_ADDRESS 0x0000803c
+#define MAC_PCU_AES_MUTE_MASK_0_OFFSET 0x0000003c
+#define MAC_PCU_AES_MUTE_MASK_0_QOS_MSB 31
+#define MAC_PCU_AES_MUTE_MASK_0_QOS_LSB 16
+#define MAC_PCU_AES_MUTE_MASK_0_QOS_MASK 0xffff0000
+#define MAC_PCU_AES_MUTE_MASK_0_QOS_GET(x) (((x) & MAC_PCU_AES_MUTE_MASK_0_QOS_MASK) >> MAC_PCU_AES_MUTE_MASK_0_QOS_LSB)
+#define MAC_PCU_AES_MUTE_MASK_0_QOS_SET(x) (((x) << MAC_PCU_AES_MUTE_MASK_0_QOS_LSB) & MAC_PCU_AES_MUTE_MASK_0_QOS_MASK)
+#define MAC_PCU_AES_MUTE_MASK_0_FC_MSB 15
+#define MAC_PCU_AES_MUTE_MASK_0_FC_LSB 0
+#define MAC_PCU_AES_MUTE_MASK_0_FC_MASK 0x0000ffff
+#define MAC_PCU_AES_MUTE_MASK_0_FC_GET(x) (((x) & MAC_PCU_AES_MUTE_MASK_0_FC_MASK) >> MAC_PCU_AES_MUTE_MASK_0_FC_LSB)
+#define MAC_PCU_AES_MUTE_MASK_0_FC_SET(x) (((x) << MAC_PCU_AES_MUTE_MASK_0_FC_LSB) & MAC_PCU_AES_MUTE_MASK_0_FC_MASK)
+
+#define MAC_PCU_AES_MUTE_MASK_1_ADDRESS 0x00008040
+#define MAC_PCU_AES_MUTE_MASK_1_OFFSET 0x00000040
+#define MAC_PCU_AES_MUTE_MASK_1_FC_MGMT_MSB 31
+#define MAC_PCU_AES_MUTE_MASK_1_FC_MGMT_LSB 16
+#define MAC_PCU_AES_MUTE_MASK_1_FC_MGMT_MASK 0xffff0000
+#define MAC_PCU_AES_MUTE_MASK_1_FC_MGMT_GET(x) (((x) & MAC_PCU_AES_MUTE_MASK_1_FC_MGMT_MASK) >> MAC_PCU_AES_MUTE_MASK_1_FC_MGMT_LSB)
+#define MAC_PCU_AES_MUTE_MASK_1_FC_MGMT_SET(x) (((x) << MAC_PCU_AES_MUTE_MASK_1_FC_MGMT_LSB) & MAC_PCU_AES_MUTE_MASK_1_FC_MGMT_MASK)
+#define MAC_PCU_AES_MUTE_MASK_1_SEQ_MSB 15
+#define MAC_PCU_AES_MUTE_MASK_1_SEQ_LSB 0
+#define MAC_PCU_AES_MUTE_MASK_1_SEQ_MASK 0x0000ffff
+#define MAC_PCU_AES_MUTE_MASK_1_SEQ_GET(x) (((x) & MAC_PCU_AES_MUTE_MASK_1_SEQ_MASK) >> MAC_PCU_AES_MUTE_MASK_1_SEQ_LSB)
+#define MAC_PCU_AES_MUTE_MASK_1_SEQ_SET(x) (((x) << MAC_PCU_AES_MUTE_MASK_1_SEQ_LSB) & MAC_PCU_AES_MUTE_MASK_1_SEQ_MASK)
+
+#define MAC_PCU_GATED_CLKS_ADDRESS 0x00008044
+#define MAC_PCU_GATED_CLKS_OFFSET 0x00000044
+#define MAC_PCU_GATED_CLKS_GATED_REG_MSB 3
+#define MAC_PCU_GATED_CLKS_GATED_REG_LSB 3
+#define MAC_PCU_GATED_CLKS_GATED_REG_MASK 0x00000008
+#define MAC_PCU_GATED_CLKS_GATED_REG_GET(x) (((x) & MAC_PCU_GATED_CLKS_GATED_REG_MASK) >> MAC_PCU_GATED_CLKS_GATED_REG_LSB)
+#define MAC_PCU_GATED_CLKS_GATED_REG_SET(x) (((x) << MAC_PCU_GATED_CLKS_GATED_REG_LSB) & MAC_PCU_GATED_CLKS_GATED_REG_MASK)
+#define MAC_PCU_GATED_CLKS_GATED_RX_MSB 2
+#define MAC_PCU_GATED_CLKS_GATED_RX_LSB 2
+#define MAC_PCU_GATED_CLKS_GATED_RX_MASK 0x00000004
+#define MAC_PCU_GATED_CLKS_GATED_RX_GET(x) (((x) & MAC_PCU_GATED_CLKS_GATED_RX_MASK) >> MAC_PCU_GATED_CLKS_GATED_RX_LSB)
+#define MAC_PCU_GATED_CLKS_GATED_RX_SET(x) (((x) << MAC_PCU_GATED_CLKS_GATED_RX_LSB) & MAC_PCU_GATED_CLKS_GATED_RX_MASK)
+#define MAC_PCU_GATED_CLKS_GATED_TX_MSB 1
+#define MAC_PCU_GATED_CLKS_GATED_TX_LSB 1
+#define MAC_PCU_GATED_CLKS_GATED_TX_MASK 0x00000002
+#define MAC_PCU_GATED_CLKS_GATED_TX_GET(x) (((x) & MAC_PCU_GATED_CLKS_GATED_TX_MASK) >> MAC_PCU_GATED_CLKS_GATED_TX_LSB)
+#define MAC_PCU_GATED_CLKS_GATED_TX_SET(x) (((x) << MAC_PCU_GATED_CLKS_GATED_TX_LSB) & MAC_PCU_GATED_CLKS_GATED_TX_MASK)
+
+#define MAC_PCU_OBS_BUS_2_ADDRESS 0x00008048
+#define MAC_PCU_OBS_BUS_2_OFFSET 0x00000048
+#define MAC_PCU_OBS_BUS_2_VALUE_MSB 17
+#define MAC_PCU_OBS_BUS_2_VALUE_LSB 0
+#define MAC_PCU_OBS_BUS_2_VALUE_MASK 0x0003ffff
+#define MAC_PCU_OBS_BUS_2_VALUE_GET(x) (((x) & MAC_PCU_OBS_BUS_2_VALUE_MASK) >> MAC_PCU_OBS_BUS_2_VALUE_LSB)
+#define MAC_PCU_OBS_BUS_2_VALUE_SET(x) (((x) << MAC_PCU_OBS_BUS_2_VALUE_LSB) & MAC_PCU_OBS_BUS_2_VALUE_MASK)
+
+#define MAC_PCU_OBS_BUS_1_ADDRESS 0x0000804c
+#define MAC_PCU_OBS_BUS_1_OFFSET 0x0000004c
+#define MAC_PCU_OBS_BUS_1_TX_STATE_MSB 30
+#define MAC_PCU_OBS_BUS_1_TX_STATE_LSB 25
+#define MAC_PCU_OBS_BUS_1_TX_STATE_MASK 0x7e000000
+#define MAC_PCU_OBS_BUS_1_TX_STATE_GET(x) (((x) & MAC_PCU_OBS_BUS_1_TX_STATE_MASK) >> MAC_PCU_OBS_BUS_1_TX_STATE_LSB)
+#define MAC_PCU_OBS_BUS_1_TX_STATE_SET(x) (((x) << MAC_PCU_OBS_BUS_1_TX_STATE_LSB) & MAC_PCU_OBS_BUS_1_TX_STATE_MASK)
+#define MAC_PCU_OBS_BUS_1_RX_STATE_MSB 24
+#define MAC_PCU_OBS_BUS_1_RX_STATE_LSB 20
+#define MAC_PCU_OBS_BUS_1_RX_STATE_MASK 0x01f00000
+#define MAC_PCU_OBS_BUS_1_RX_STATE_GET(x) (((x) & MAC_PCU_OBS_BUS_1_RX_STATE_MASK) >> MAC_PCU_OBS_BUS_1_RX_STATE_LSB)
+#define MAC_PCU_OBS_BUS_1_RX_STATE_SET(x) (((x) << MAC_PCU_OBS_BUS_1_RX_STATE_LSB) & MAC_PCU_OBS_BUS_1_RX_STATE_MASK)
+#define MAC_PCU_OBS_BUS_1_WEP_STATE_MSB 17
+#define MAC_PCU_OBS_BUS_1_WEP_STATE_LSB 12
+#define MAC_PCU_OBS_BUS_1_WEP_STATE_MASK 0x0003f000
+#define MAC_PCU_OBS_BUS_1_WEP_STATE_GET(x) (((x) & MAC_PCU_OBS_BUS_1_WEP_STATE_MASK) >> MAC_PCU_OBS_BUS_1_WEP_STATE_LSB)
+#define MAC_PCU_OBS_BUS_1_WEP_STATE_SET(x) (((x) << MAC_PCU_OBS_BUS_1_WEP_STATE_LSB) & MAC_PCU_OBS_BUS_1_WEP_STATE_MASK)
+#define MAC_PCU_OBS_BUS_1_RX_CLEAR_MSB 11
+#define MAC_PCU_OBS_BUS_1_RX_CLEAR_LSB 11
+#define MAC_PCU_OBS_BUS_1_RX_CLEAR_MASK 0x00000800
+#define MAC_PCU_OBS_BUS_1_RX_CLEAR_GET(x) (((x) & MAC_PCU_OBS_BUS_1_RX_CLEAR_MASK) >> MAC_PCU_OBS_BUS_1_RX_CLEAR_LSB)
+#define MAC_PCU_OBS_BUS_1_RX_CLEAR_SET(x) (((x) << MAC_PCU_OBS_BUS_1_RX_CLEAR_LSB) & MAC_PCU_OBS_BUS_1_RX_CLEAR_MASK)
+#define MAC_PCU_OBS_BUS_1_RX_FRAME_MSB 10
+#define MAC_PCU_OBS_BUS_1_RX_FRAME_LSB 10
+#define MAC_PCU_OBS_BUS_1_RX_FRAME_MASK 0x00000400
+#define MAC_PCU_OBS_BUS_1_RX_FRAME_GET(x) (((x) & MAC_PCU_OBS_BUS_1_RX_FRAME_MASK) >> MAC_PCU_OBS_BUS_1_RX_FRAME_LSB)
+#define MAC_PCU_OBS_BUS_1_RX_FRAME_SET(x) (((x) << MAC_PCU_OBS_BUS_1_RX_FRAME_LSB) & MAC_PCU_OBS_BUS_1_RX_FRAME_MASK)
+#define MAC_PCU_OBS_BUS_1_TX_FRAME_MSB 9
+#define MAC_PCU_OBS_BUS_1_TX_FRAME_LSB 9
+#define MAC_PCU_OBS_BUS_1_TX_FRAME_MASK 0x00000200
+#define MAC_PCU_OBS_BUS_1_TX_FRAME_GET(x) (((x) & MAC_PCU_OBS_BUS_1_TX_FRAME_MASK) >> MAC_PCU_OBS_BUS_1_TX_FRAME_LSB)
+#define MAC_PCU_OBS_BUS_1_TX_FRAME_SET(x) (((x) << MAC_PCU_OBS_BUS_1_TX_FRAME_LSB) & MAC_PCU_OBS_BUS_1_TX_FRAME_MASK)
+#define MAC_PCU_OBS_BUS_1_TX_HOLD_MSB 8
+#define MAC_PCU_OBS_BUS_1_TX_HOLD_LSB 8
+#define MAC_PCU_OBS_BUS_1_TX_HOLD_MASK 0x00000100
+#define MAC_PCU_OBS_BUS_1_TX_HOLD_GET(x) (((x) & MAC_PCU_OBS_BUS_1_TX_HOLD_MASK) >> MAC_PCU_OBS_BUS_1_TX_HOLD_LSB)
+#define MAC_PCU_OBS_BUS_1_TX_HOLD_SET(x) (((x) << MAC_PCU_OBS_BUS_1_TX_HOLD_LSB) & MAC_PCU_OBS_BUS_1_TX_HOLD_MASK)
+#define MAC_PCU_OBS_BUS_1_PCU_CHANNEL_IDLE_MSB 7
+#define MAC_PCU_OBS_BUS_1_PCU_CHANNEL_IDLE_LSB 7
+#define MAC_PCU_OBS_BUS_1_PCU_CHANNEL_IDLE_MASK 0x00000080
+#define MAC_PCU_OBS_BUS_1_PCU_CHANNEL_IDLE_GET(x) (((x) & MAC_PCU_OBS_BUS_1_PCU_CHANNEL_IDLE_MASK) >> MAC_PCU_OBS_BUS_1_PCU_CHANNEL_IDLE_LSB)
+#define MAC_PCU_OBS_BUS_1_PCU_CHANNEL_IDLE_SET(x) (((x) << MAC_PCU_OBS_BUS_1_PCU_CHANNEL_IDLE_LSB) & MAC_PCU_OBS_BUS_1_PCU_CHANNEL_IDLE_MASK)
+#define MAC_PCU_OBS_BUS_1_TM_QUIET_TIME_MSB 6
+#define MAC_PCU_OBS_BUS_1_TM_QUIET_TIME_LSB 6
+#define MAC_PCU_OBS_BUS_1_TM_QUIET_TIME_MASK 0x00000040
+#define MAC_PCU_OBS_BUS_1_TM_QUIET_TIME_GET(x) (((x) & MAC_PCU_OBS_BUS_1_TM_QUIET_TIME_MASK) >> MAC_PCU_OBS_BUS_1_TM_QUIET_TIME_LSB)
+#define MAC_PCU_OBS_BUS_1_TM_QUIET_TIME_SET(x) (((x) << MAC_PCU_OBS_BUS_1_TM_QUIET_TIME_LSB) & MAC_PCU_OBS_BUS_1_TM_QUIET_TIME_MASK)
+#define MAC_PCU_OBS_BUS_1_TX_HCF_MSB 5
+#define MAC_PCU_OBS_BUS_1_TX_HCF_LSB 5
+#define MAC_PCU_OBS_BUS_1_TX_HCF_MASK 0x00000020
+#define MAC_PCU_OBS_BUS_1_TX_HCF_GET(x) (((x) & MAC_PCU_OBS_BUS_1_TX_HCF_MASK) >> MAC_PCU_OBS_BUS_1_TX_HCF_LSB)
+#define MAC_PCU_OBS_BUS_1_TX_HCF_SET(x) (((x) << MAC_PCU_OBS_BUS_1_TX_HCF_LSB) & MAC_PCU_OBS_BUS_1_TX_HCF_MASK)
+#define MAC_PCU_OBS_BUS_1_FILTER_PASS_MSB 4
+#define MAC_PCU_OBS_BUS_1_FILTER_PASS_LSB 4
+#define MAC_PCU_OBS_BUS_1_FILTER_PASS_MASK 0x00000010
+#define MAC_PCU_OBS_BUS_1_FILTER_PASS_GET(x) (((x) & MAC_PCU_OBS_BUS_1_FILTER_PASS_MASK) >> MAC_PCU_OBS_BUS_1_FILTER_PASS_LSB)
+#define MAC_PCU_OBS_BUS_1_FILTER_PASS_SET(x) (((x) << MAC_PCU_OBS_BUS_1_FILTER_PASS_LSB) & MAC_PCU_OBS_BUS_1_FILTER_PASS_MASK)
+#define MAC_PCU_OBS_BUS_1_RX_MY_BEACON_MSB 3
+#define MAC_PCU_OBS_BUS_1_RX_MY_BEACON_LSB 3
+#define MAC_PCU_OBS_BUS_1_RX_MY_BEACON_MASK 0x00000008
+#define MAC_PCU_OBS_BUS_1_RX_MY_BEACON_GET(x) (((x) & MAC_PCU_OBS_BUS_1_RX_MY_BEACON_MASK) >> MAC_PCU_OBS_BUS_1_RX_MY_BEACON_LSB)
+#define MAC_PCU_OBS_BUS_1_RX_MY_BEACON_SET(x) (((x) << MAC_PCU_OBS_BUS_1_RX_MY_BEACON_LSB) & MAC_PCU_OBS_BUS_1_RX_MY_BEACON_MASK)
+#define MAC_PCU_OBS_BUS_1_RX_WEP_MSB 2
+#define MAC_PCU_OBS_BUS_1_RX_WEP_LSB 2
+#define MAC_PCU_OBS_BUS_1_RX_WEP_MASK 0x00000004
+#define MAC_PCU_OBS_BUS_1_RX_WEP_GET(x) (((x) & MAC_PCU_OBS_BUS_1_RX_WEP_MASK) >> MAC_PCU_OBS_BUS_1_RX_WEP_LSB)
+#define MAC_PCU_OBS_BUS_1_RX_WEP_SET(x) (((x) << MAC_PCU_OBS_BUS_1_RX_WEP_LSB) & MAC_PCU_OBS_BUS_1_RX_WEP_MASK)
+#define MAC_PCU_OBS_BUS_1_PCU_RX_END_MSB 1
+#define MAC_PCU_OBS_BUS_1_PCU_RX_END_LSB 1
+#define MAC_PCU_OBS_BUS_1_PCU_RX_END_MASK 0x00000002
+#define MAC_PCU_OBS_BUS_1_PCU_RX_END_GET(x) (((x) & MAC_PCU_OBS_BUS_1_PCU_RX_END_MASK) >> MAC_PCU_OBS_BUS_1_PCU_RX_END_LSB)
+#define MAC_PCU_OBS_BUS_1_PCU_RX_END_SET(x) (((x) << MAC_PCU_OBS_BUS_1_PCU_RX_END_LSB) & MAC_PCU_OBS_BUS_1_PCU_RX_END_MASK)
+#define MAC_PCU_OBS_BUS_1_PCU_DIRECTED_MSB 0
+#define MAC_PCU_OBS_BUS_1_PCU_DIRECTED_LSB 0
+#define MAC_PCU_OBS_BUS_1_PCU_DIRECTED_MASK 0x00000001
+#define MAC_PCU_OBS_BUS_1_PCU_DIRECTED_GET(x) (((x) & MAC_PCU_OBS_BUS_1_PCU_DIRECTED_MASK) >> MAC_PCU_OBS_BUS_1_PCU_DIRECTED_LSB)
+#define MAC_PCU_OBS_BUS_1_PCU_DIRECTED_SET(x) (((x) << MAC_PCU_OBS_BUS_1_PCU_DIRECTED_LSB) & MAC_PCU_OBS_BUS_1_PCU_DIRECTED_MASK)
+
+#define MAC_PCU_DYM_MIMO_PWR_SAVE_ADDRESS 0x00008050
+#define MAC_PCU_DYM_MIMO_PWR_SAVE_OFFSET 0x00000050
+#define MAC_PCU_DYM_MIMO_PWR_SAVE_HI_PWR_CHAIN_MASK_MSB 10
+#define MAC_PCU_DYM_MIMO_PWR_SAVE_HI_PWR_CHAIN_MASK_LSB 8
+#define MAC_PCU_DYM_MIMO_PWR_SAVE_HI_PWR_CHAIN_MASK_MASK 0x00000700
+#define MAC_PCU_DYM_MIMO_PWR_SAVE_HI_PWR_CHAIN_MASK_GET(x) (((x) & MAC_PCU_DYM_MIMO_PWR_SAVE_HI_PWR_CHAIN_MASK_MASK) >> MAC_PCU_DYM_MIMO_PWR_SAVE_HI_PWR_CHAIN_MASK_LSB)
+#define MAC_PCU_DYM_MIMO_PWR_SAVE_HI_PWR_CHAIN_MASK_SET(x) (((x) << MAC_PCU_DYM_MIMO_PWR_SAVE_HI_PWR_CHAIN_MASK_LSB) & MAC_PCU_DYM_MIMO_PWR_SAVE_HI_PWR_CHAIN_MASK_MASK)
+#define MAC_PCU_DYM_MIMO_PWR_SAVE_LOW_PWR_CHAIN_MASK_MSB 6
+#define MAC_PCU_DYM_MIMO_PWR_SAVE_LOW_PWR_CHAIN_MASK_LSB 4
+#define MAC_PCU_DYM_MIMO_PWR_SAVE_LOW_PWR_CHAIN_MASK_MASK 0x00000070
+#define MAC_PCU_DYM_MIMO_PWR_SAVE_LOW_PWR_CHAIN_MASK_GET(x) (((x) & MAC_PCU_DYM_MIMO_PWR_SAVE_LOW_PWR_CHAIN_MASK_MASK) >> MAC_PCU_DYM_MIMO_PWR_SAVE_LOW_PWR_CHAIN_MASK_LSB)
+#define MAC_PCU_DYM_MIMO_PWR_SAVE_LOW_PWR_CHAIN_MASK_SET(x) (((x) << MAC_PCU_DYM_MIMO_PWR_SAVE_LOW_PWR_CHAIN_MASK_LSB) & MAC_PCU_DYM_MIMO_PWR_SAVE_LOW_PWR_CHAIN_MASK_MASK)
+#define MAC_PCU_DYM_MIMO_PWR_SAVE_SW_CHAIN_MASK_SEL_MSB 2
+#define MAC_PCU_DYM_MIMO_PWR_SAVE_SW_CHAIN_MASK_SEL_LSB 2
+#define MAC_PCU_DYM_MIMO_PWR_SAVE_SW_CHAIN_MASK_SEL_MASK 0x00000004
+#define MAC_PCU_DYM_MIMO_PWR_SAVE_SW_CHAIN_MASK_SEL_GET(x) (((x) & MAC_PCU_DYM_MIMO_PWR_SAVE_SW_CHAIN_MASK_SEL_MASK) >> MAC_PCU_DYM_MIMO_PWR_SAVE_SW_CHAIN_MASK_SEL_LSB)
+#define MAC_PCU_DYM_MIMO_PWR_SAVE_SW_CHAIN_MASK_SEL_SET(x) (((x) << MAC_PCU_DYM_MIMO_PWR_SAVE_SW_CHAIN_MASK_SEL_LSB) & MAC_PCU_DYM_MIMO_PWR_SAVE_SW_CHAIN_MASK_SEL_MASK)
+#define MAC_PCU_DYM_MIMO_PWR_SAVE_HW_CTRL_EN_MSB 1
+#define MAC_PCU_DYM_MIMO_PWR_SAVE_HW_CTRL_EN_LSB 1
+#define MAC_PCU_DYM_MIMO_PWR_SAVE_HW_CTRL_EN_MASK 0x00000002
+#define MAC_PCU_DYM_MIMO_PWR_SAVE_HW_CTRL_EN_GET(x) (((x) & MAC_PCU_DYM_MIMO_PWR_SAVE_HW_CTRL_EN_MASK) >> MAC_PCU_DYM_MIMO_PWR_SAVE_HW_CTRL_EN_LSB)
+#define MAC_PCU_DYM_MIMO_PWR_SAVE_HW_CTRL_EN_SET(x) (((x) << MAC_PCU_DYM_MIMO_PWR_SAVE_HW_CTRL_EN_LSB) & MAC_PCU_DYM_MIMO_PWR_SAVE_HW_CTRL_EN_MASK)
+#define MAC_PCU_DYM_MIMO_PWR_SAVE_USE_MAC_CTRL_MSB 0
+#define MAC_PCU_DYM_MIMO_PWR_SAVE_USE_MAC_CTRL_LSB 0
+#define MAC_PCU_DYM_MIMO_PWR_SAVE_USE_MAC_CTRL_MASK 0x00000001
+#define MAC_PCU_DYM_MIMO_PWR_SAVE_USE_MAC_CTRL_GET(x) (((x) & MAC_PCU_DYM_MIMO_PWR_SAVE_USE_MAC_CTRL_MASK) >> MAC_PCU_DYM_MIMO_PWR_SAVE_USE_MAC_CTRL_LSB)
+#define MAC_PCU_DYM_MIMO_PWR_SAVE_USE_MAC_CTRL_SET(x) (((x) << MAC_PCU_DYM_MIMO_PWR_SAVE_USE_MAC_CTRL_LSB) & MAC_PCU_DYM_MIMO_PWR_SAVE_USE_MAC_CTRL_MASK)
+
+#define MAC_PCU_LAST_BEACON_TSF_ADDRESS 0x00008054
+#define MAC_PCU_LAST_BEACON_TSF_OFFSET 0x00000054
+#define MAC_PCU_LAST_BEACON_TSF_VALUE_MSB 31
+#define MAC_PCU_LAST_BEACON_TSF_VALUE_LSB 0
+#define MAC_PCU_LAST_BEACON_TSF_VALUE_MASK 0xffffffff
+#define MAC_PCU_LAST_BEACON_TSF_VALUE_GET(x) (((x) & MAC_PCU_LAST_BEACON_TSF_VALUE_MASK) >> MAC_PCU_LAST_BEACON_TSF_VALUE_LSB)
+#define MAC_PCU_LAST_BEACON_TSF_VALUE_SET(x) (((x) << MAC_PCU_LAST_BEACON_TSF_VALUE_LSB) & MAC_PCU_LAST_BEACON_TSF_VALUE_MASK)
+
+#define MAC_PCU_NAV_ADDRESS 0x00008058
+#define MAC_PCU_NAV_OFFSET 0x00000058
+#define MAC_PCU_NAV_VALUE_MSB 25
+#define MAC_PCU_NAV_VALUE_LSB 0
+#define MAC_PCU_NAV_VALUE_MASK 0x03ffffff
+#define MAC_PCU_NAV_VALUE_GET(x) (((x) & MAC_PCU_NAV_VALUE_MASK) >> MAC_PCU_NAV_VALUE_LSB)
+#define MAC_PCU_NAV_VALUE_SET(x) (((x) << MAC_PCU_NAV_VALUE_LSB) & MAC_PCU_NAV_VALUE_MASK)
+
+#define MAC_PCU_RTS_SUCCESS_CNT_ADDRESS 0x0000805c
+#define MAC_PCU_RTS_SUCCESS_CNT_OFFSET 0x0000005c
+#define MAC_PCU_RTS_SUCCESS_CNT_VALUE_MSB 15
+#define MAC_PCU_RTS_SUCCESS_CNT_VALUE_LSB 0
+#define MAC_PCU_RTS_SUCCESS_CNT_VALUE_MASK 0x0000ffff
+#define MAC_PCU_RTS_SUCCESS_CNT_VALUE_GET(x) (((x) & MAC_PCU_RTS_SUCCESS_CNT_VALUE_MASK) >> MAC_PCU_RTS_SUCCESS_CNT_VALUE_LSB)
+#define MAC_PCU_RTS_SUCCESS_CNT_VALUE_SET(x) (((x) << MAC_PCU_RTS_SUCCESS_CNT_VALUE_LSB) & MAC_PCU_RTS_SUCCESS_CNT_VALUE_MASK)
+
+#define MAC_PCU_RTS_FAIL_CNT_ADDRESS 0x00008060
+#define MAC_PCU_RTS_FAIL_CNT_OFFSET 0x00000060
+#define MAC_PCU_RTS_FAIL_CNT_VALUE_MSB 15
+#define MAC_PCU_RTS_FAIL_CNT_VALUE_LSB 0
+#define MAC_PCU_RTS_FAIL_CNT_VALUE_MASK 0x0000ffff
+#define MAC_PCU_RTS_FAIL_CNT_VALUE_GET(x) (((x) & MAC_PCU_RTS_FAIL_CNT_VALUE_MASK) >> MAC_PCU_RTS_FAIL_CNT_VALUE_LSB)
+#define MAC_PCU_RTS_FAIL_CNT_VALUE_SET(x) (((x) << MAC_PCU_RTS_FAIL_CNT_VALUE_LSB) & MAC_PCU_RTS_FAIL_CNT_VALUE_MASK)
+
+#define MAC_PCU_ACK_FAIL_CNT_ADDRESS 0x00008064
+#define MAC_PCU_ACK_FAIL_CNT_OFFSET 0x00000064
+#define MAC_PCU_ACK_FAIL_CNT_VALUE_MSB 15
+#define MAC_PCU_ACK_FAIL_CNT_VALUE_LSB 0
+#define MAC_PCU_ACK_FAIL_CNT_VALUE_MASK 0x0000ffff
+#define MAC_PCU_ACK_FAIL_CNT_VALUE_GET(x) (((x) & MAC_PCU_ACK_FAIL_CNT_VALUE_MASK) >> MAC_PCU_ACK_FAIL_CNT_VALUE_LSB)
+#define MAC_PCU_ACK_FAIL_CNT_VALUE_SET(x) (((x) << MAC_PCU_ACK_FAIL_CNT_VALUE_LSB) & MAC_PCU_ACK_FAIL_CNT_VALUE_MASK)
+
+#define MAC_PCU_FCS_FAIL_CNT_ADDRESS 0x00008068
+#define MAC_PCU_FCS_FAIL_CNT_OFFSET 0x00000068
+#define MAC_PCU_FCS_FAIL_CNT_VALUE_MSB 15
+#define MAC_PCU_FCS_FAIL_CNT_VALUE_LSB 0
+#define MAC_PCU_FCS_FAIL_CNT_VALUE_MASK 0x0000ffff
+#define MAC_PCU_FCS_FAIL_CNT_VALUE_GET(x) (((x) & MAC_PCU_FCS_FAIL_CNT_VALUE_MASK) >> MAC_PCU_FCS_FAIL_CNT_VALUE_LSB)
+#define MAC_PCU_FCS_FAIL_CNT_VALUE_SET(x) (((x) << MAC_PCU_FCS_FAIL_CNT_VALUE_LSB) & MAC_PCU_FCS_FAIL_CNT_VALUE_MASK)
+
+#define MAC_PCU_BEACON_CNT_ADDRESS 0x0000806c
+#define MAC_PCU_BEACON_CNT_OFFSET 0x0000006c
+#define MAC_PCU_BEACON_CNT_VALUE_MSB 15
+#define MAC_PCU_BEACON_CNT_VALUE_LSB 0
+#define MAC_PCU_BEACON_CNT_VALUE_MASK 0x0000ffff
+#define MAC_PCU_BEACON_CNT_VALUE_GET(x) (((x) & MAC_PCU_BEACON_CNT_VALUE_MASK) >> MAC_PCU_BEACON_CNT_VALUE_LSB)
+#define MAC_PCU_BEACON_CNT_VALUE_SET(x) (((x) << MAC_PCU_BEACON_CNT_VALUE_LSB) & MAC_PCU_BEACON_CNT_VALUE_MASK)
+
+#define MAC_PCU_XRMODE_ADDRESS 0x00008070
+#define MAC_PCU_XRMODE_OFFSET 0x00000070
+#define MAC_PCU_XRMODE_FRAME_HOLD_MSB 31
+#define MAC_PCU_XRMODE_FRAME_HOLD_LSB 20
+#define MAC_PCU_XRMODE_FRAME_HOLD_MASK 0xfff00000
+#define MAC_PCU_XRMODE_FRAME_HOLD_GET(x) (((x) & MAC_PCU_XRMODE_FRAME_HOLD_MASK) >> MAC_PCU_XRMODE_FRAME_HOLD_LSB)
+#define MAC_PCU_XRMODE_FRAME_HOLD_SET(x) (((x) << MAC_PCU_XRMODE_FRAME_HOLD_LSB) & MAC_PCU_XRMODE_FRAME_HOLD_MASK)
+#define MAC_PCU_XRMODE_WAIT_FOR_POLL_MSB 7
+#define MAC_PCU_XRMODE_WAIT_FOR_POLL_LSB 7
+#define MAC_PCU_XRMODE_WAIT_FOR_POLL_MASK 0x00000080
+#define MAC_PCU_XRMODE_WAIT_FOR_POLL_GET(x) (((x) & MAC_PCU_XRMODE_WAIT_FOR_POLL_MASK) >> MAC_PCU_XRMODE_WAIT_FOR_POLL_LSB)
+#define MAC_PCU_XRMODE_WAIT_FOR_POLL_SET(x) (((x) << MAC_PCU_XRMODE_WAIT_FOR_POLL_LSB) & MAC_PCU_XRMODE_WAIT_FOR_POLL_MASK)
+#define MAC_PCU_XRMODE_POLL_TYPE_MSB 5
+#define MAC_PCU_XRMODE_POLL_TYPE_LSB 0
+#define MAC_PCU_XRMODE_POLL_TYPE_MASK 0x0000003f
+#define MAC_PCU_XRMODE_POLL_TYPE_GET(x) (((x) & MAC_PCU_XRMODE_POLL_TYPE_MASK) >> MAC_PCU_XRMODE_POLL_TYPE_LSB)
+#define MAC_PCU_XRMODE_POLL_TYPE_SET(x) (((x) << MAC_PCU_XRMODE_POLL_TYPE_LSB) & MAC_PCU_XRMODE_POLL_TYPE_MASK)
+
+#define MAC_PCU_XRDEL_ADDRESS 0x00008074
+#define MAC_PCU_XRDEL_OFFSET 0x00000074
+#define MAC_PCU_XRDEL_CHIRP_DATA_DELAY_MSB 31
+#define MAC_PCU_XRDEL_CHIRP_DATA_DELAY_LSB 16
+#define MAC_PCU_XRDEL_CHIRP_DATA_DELAY_MASK 0xffff0000
+#define MAC_PCU_XRDEL_CHIRP_DATA_DELAY_GET(x) (((x) & MAC_PCU_XRDEL_CHIRP_DATA_DELAY_MASK) >> MAC_PCU_XRDEL_CHIRP_DATA_DELAY_LSB)
+#define MAC_PCU_XRDEL_CHIRP_DATA_DELAY_SET(x) (((x) << MAC_PCU_XRDEL_CHIRP_DATA_DELAY_LSB) & MAC_PCU_XRDEL_CHIRP_DATA_DELAY_MASK)
+#define MAC_PCU_XRDEL_SLOT_DELAY_MSB 15
+#define MAC_PCU_XRDEL_SLOT_DELAY_LSB 0
+#define MAC_PCU_XRDEL_SLOT_DELAY_MASK 0x0000ffff
+#define MAC_PCU_XRDEL_SLOT_DELAY_GET(x) (((x) & MAC_PCU_XRDEL_SLOT_DELAY_MASK) >> MAC_PCU_XRDEL_SLOT_DELAY_LSB)
+#define MAC_PCU_XRDEL_SLOT_DELAY_SET(x) (((x) << MAC_PCU_XRDEL_SLOT_DELAY_LSB) & MAC_PCU_XRDEL_SLOT_DELAY_MASK)
+
+#define MAC_PCU_XRTO_ADDRESS 0x00008078
+#define MAC_PCU_XRTO_OFFSET 0x00000078
+#define MAC_PCU_XRTO_POLL_TIMEOUT_MSB 31
+#define MAC_PCU_XRTO_POLL_TIMEOUT_LSB 16
+#define MAC_PCU_XRTO_POLL_TIMEOUT_MASK 0xffff0000
+#define MAC_PCU_XRTO_POLL_TIMEOUT_GET(x) (((x) & MAC_PCU_XRTO_POLL_TIMEOUT_MASK) >> MAC_PCU_XRTO_POLL_TIMEOUT_LSB)
+#define MAC_PCU_XRTO_POLL_TIMEOUT_SET(x) (((x) << MAC_PCU_XRTO_POLL_TIMEOUT_LSB) & MAC_PCU_XRTO_POLL_TIMEOUT_MASK)
+#define MAC_PCU_XRTO_CHIRP_TIMEOUT_MSB 15
+#define MAC_PCU_XRTO_CHIRP_TIMEOUT_LSB 0
+#define MAC_PCU_XRTO_CHIRP_TIMEOUT_MASK 0x0000ffff
+#define MAC_PCU_XRTO_CHIRP_TIMEOUT_GET(x) (((x) & MAC_PCU_XRTO_CHIRP_TIMEOUT_MASK) >> MAC_PCU_XRTO_CHIRP_TIMEOUT_LSB)
+#define MAC_PCU_XRTO_CHIRP_TIMEOUT_SET(x) (((x) << MAC_PCU_XRTO_CHIRP_TIMEOUT_LSB) & MAC_PCU_XRTO_CHIRP_TIMEOUT_MASK)
+
+#define MAC_PCU_XRCRP_ADDRESS 0x0000807c
+#define MAC_PCU_XRCRP_OFFSET 0x0000007c
+#define MAC_PCU_XRCRP_CHIRP_GAP_MSB 31
+#define MAC_PCU_XRCRP_CHIRP_GAP_LSB 16
+#define MAC_PCU_XRCRP_CHIRP_GAP_MASK 0xffff0000
+#define MAC_PCU_XRCRP_CHIRP_GAP_GET(x) (((x) & MAC_PCU_XRCRP_CHIRP_GAP_MASK) >> MAC_PCU_XRCRP_CHIRP_GAP_LSB)
+#define MAC_PCU_XRCRP_CHIRP_GAP_SET(x) (((x) << MAC_PCU_XRCRP_CHIRP_GAP_LSB) & MAC_PCU_XRCRP_CHIRP_GAP_MASK)
+#define MAC_PCU_XRCRP_SEND_CHIRP_MSB 0
+#define MAC_PCU_XRCRP_SEND_CHIRP_LSB 0
+#define MAC_PCU_XRCRP_SEND_CHIRP_MASK 0x00000001
+#define MAC_PCU_XRCRP_SEND_CHIRP_GET(x) (((x) & MAC_PCU_XRCRP_SEND_CHIRP_MASK) >> MAC_PCU_XRCRP_SEND_CHIRP_LSB)
+#define MAC_PCU_XRCRP_SEND_CHIRP_SET(x) (((x) << MAC_PCU_XRCRP_SEND_CHIRP_LSB) & MAC_PCU_XRCRP_SEND_CHIRP_MASK)
+
+#define MAC_PCU_XRSTMP_ADDRESS 0x00008080
+#define MAC_PCU_XRSTMP_OFFSET 0x00000080
+#define MAC_PCU_XRSTMP_RX_ABORT_RSSI_THRESH_MSB 23
+#define MAC_PCU_XRSTMP_RX_ABORT_RSSI_THRESH_LSB 16
+#define MAC_PCU_XRSTMP_RX_ABORT_RSSI_THRESH_MASK 0x00ff0000
+#define MAC_PCU_XRSTMP_RX_ABORT_RSSI_THRESH_GET(x) (((x) & MAC_PCU_XRSTMP_RX_ABORT_RSSI_THRESH_MASK) >> MAC_PCU_XRSTMP_RX_ABORT_RSSI_THRESH_LSB)
+#define MAC_PCU_XRSTMP_RX_ABORT_RSSI_THRESH_SET(x) (((x) << MAC_PCU_XRSTMP_RX_ABORT_RSSI_THRESH_LSB) & MAC_PCU_XRSTMP_RX_ABORT_RSSI_THRESH_MASK)
+#define MAC_PCU_XRSTMP_TX_STOMP_RSSI_THRESH_MSB 15
+#define MAC_PCU_XRSTMP_TX_STOMP_RSSI_THRESH_LSB 8
+#define MAC_PCU_XRSTMP_TX_STOMP_RSSI_THRESH_MASK 0x0000ff00
+#define MAC_PCU_XRSTMP_TX_STOMP_RSSI_THRESH_GET(x) (((x) & MAC_PCU_XRSTMP_TX_STOMP_RSSI_THRESH_MASK) >> MAC_PCU_XRSTMP_TX_STOMP_RSSI_THRESH_LSB)
+#define MAC_PCU_XRSTMP_TX_STOMP_RSSI_THRESH_SET(x) (((x) << MAC_PCU_XRSTMP_TX_STOMP_RSSI_THRESH_LSB) & MAC_PCU_XRSTMP_TX_STOMP_RSSI_THRESH_MASK)
+#define MAC_PCU_XRSTMP_RX_ABORT_DATA_MSB 5
+#define MAC_PCU_XRSTMP_RX_ABORT_DATA_LSB 5
+#define MAC_PCU_XRSTMP_RX_ABORT_DATA_MASK 0x00000020
+#define MAC_PCU_XRSTMP_RX_ABORT_DATA_GET(x) (((x) & MAC_PCU_XRSTMP_RX_ABORT_DATA_MASK) >> MAC_PCU_XRSTMP_RX_ABORT_DATA_LSB)
+#define MAC_PCU_XRSTMP_RX_ABORT_DATA_SET(x) (((x) << MAC_PCU_XRSTMP_RX_ABORT_DATA_LSB) & MAC_PCU_XRSTMP_RX_ABORT_DATA_MASK)
+#define MAC_PCU_XRSTMP_TX_STOMP_DATA_MSB 4
+#define MAC_PCU_XRSTMP_TX_STOMP_DATA_LSB 4
+#define MAC_PCU_XRSTMP_TX_STOMP_DATA_MASK 0x00000010
+#define MAC_PCU_XRSTMP_TX_STOMP_DATA_GET(x) (((x) & MAC_PCU_XRSTMP_TX_STOMP_DATA_MASK) >> MAC_PCU_XRSTMP_TX_STOMP_DATA_LSB)
+#define MAC_PCU_XRSTMP_TX_STOMP_DATA_SET(x) (((x) << MAC_PCU_XRSTMP_TX_STOMP_DATA_LSB) & MAC_PCU_XRSTMP_TX_STOMP_DATA_MASK)
+#define MAC_PCU_XRSTMP_TX_STOMP_BSSID_MSB 3
+#define MAC_PCU_XRSTMP_TX_STOMP_BSSID_LSB 3
+#define MAC_PCU_XRSTMP_TX_STOMP_BSSID_MASK 0x00000008
+#define MAC_PCU_XRSTMP_TX_STOMP_BSSID_GET(x) (((x) & MAC_PCU_XRSTMP_TX_STOMP_BSSID_MASK) >> MAC_PCU_XRSTMP_TX_STOMP_BSSID_LSB)
+#define MAC_PCU_XRSTMP_TX_STOMP_BSSID_SET(x) (((x) << MAC_PCU_XRSTMP_TX_STOMP_BSSID_LSB) & MAC_PCU_XRSTMP_TX_STOMP_BSSID_MASK)
+#define MAC_PCU_XRSTMP_TX_STOMP_RSSI_MSB 2
+#define MAC_PCU_XRSTMP_TX_STOMP_RSSI_LSB 2
+#define MAC_PCU_XRSTMP_TX_STOMP_RSSI_MASK 0x00000004
+#define MAC_PCU_XRSTMP_TX_STOMP_RSSI_GET(x) (((x) & MAC_PCU_XRSTMP_TX_STOMP_RSSI_MASK) >> MAC_PCU_XRSTMP_TX_STOMP_RSSI_LSB)
+#define MAC_PCU_XRSTMP_TX_STOMP_RSSI_SET(x) (((x) << MAC_PCU_XRSTMP_TX_STOMP_RSSI_LSB) & MAC_PCU_XRSTMP_TX_STOMP_RSSI_MASK)
+#define MAC_PCU_XRSTMP_RX_ABORT_BSSID_MSB 1
+#define MAC_PCU_XRSTMP_RX_ABORT_BSSID_LSB 1
+#define MAC_PCU_XRSTMP_RX_ABORT_BSSID_MASK 0x00000002
+#define MAC_PCU_XRSTMP_RX_ABORT_BSSID_GET(x) (((x) & MAC_PCU_XRSTMP_RX_ABORT_BSSID_MASK) >> MAC_PCU_XRSTMP_RX_ABORT_BSSID_LSB)
+#define MAC_PCU_XRSTMP_RX_ABORT_BSSID_SET(x) (((x) << MAC_PCU_XRSTMP_RX_ABORT_BSSID_LSB) & MAC_PCU_XRSTMP_RX_ABORT_BSSID_MASK)
+#define MAC_PCU_XRSTMP_RX_ABORT_RSSI_MSB 0
+#define MAC_PCU_XRSTMP_RX_ABORT_RSSI_LSB 0
+#define MAC_PCU_XRSTMP_RX_ABORT_RSSI_MASK 0x00000001
+#define MAC_PCU_XRSTMP_RX_ABORT_RSSI_GET(x) (((x) & MAC_PCU_XRSTMP_RX_ABORT_RSSI_MASK) >> MAC_PCU_XRSTMP_RX_ABORT_RSSI_LSB)
+#define MAC_PCU_XRSTMP_RX_ABORT_RSSI_SET(x) (((x) << MAC_PCU_XRSTMP_RX_ABORT_RSSI_LSB) & MAC_PCU_XRSTMP_RX_ABORT_RSSI_MASK)
+
+#define MAC_PCU_ADDR1_MASK_L32_ADDRESS 0x00008084
+#define MAC_PCU_ADDR1_MASK_L32_OFFSET 0x00000084
+#define MAC_PCU_ADDR1_MASK_L32_VALUE_MSB 31
+#define MAC_PCU_ADDR1_MASK_L32_VALUE_LSB 0
+#define MAC_PCU_ADDR1_MASK_L32_VALUE_MASK 0xffffffff
+#define MAC_PCU_ADDR1_MASK_L32_VALUE_GET(x) (((x) & MAC_PCU_ADDR1_MASK_L32_VALUE_MASK) >> MAC_PCU_ADDR1_MASK_L32_VALUE_LSB)
+#define MAC_PCU_ADDR1_MASK_L32_VALUE_SET(x) (((x) << MAC_PCU_ADDR1_MASK_L32_VALUE_LSB) & MAC_PCU_ADDR1_MASK_L32_VALUE_MASK)
+
+#define MAC_PCU_ADDR1_MASK_U16_ADDRESS 0x00008088
+#define MAC_PCU_ADDR1_MASK_U16_OFFSET 0x00000088
+#define MAC_PCU_ADDR1_MASK_U16_VALUE_MSB 15
+#define MAC_PCU_ADDR1_MASK_U16_VALUE_LSB 0
+#define MAC_PCU_ADDR1_MASK_U16_VALUE_MASK 0x0000ffff
+#define MAC_PCU_ADDR1_MASK_U16_VALUE_GET(x) (((x) & MAC_PCU_ADDR1_MASK_U16_VALUE_MASK) >> MAC_PCU_ADDR1_MASK_U16_VALUE_LSB)
+#define MAC_PCU_ADDR1_MASK_U16_VALUE_SET(x) (((x) << MAC_PCU_ADDR1_MASK_U16_VALUE_LSB) & MAC_PCU_ADDR1_MASK_U16_VALUE_MASK)
+
+#define MAC_PCU_TPC_ADDRESS 0x0000808c
+#define MAC_PCU_TPC_OFFSET 0x0000008c
+#define MAC_PCU_TPC_CHIRP_PWR_MSB 21
+#define MAC_PCU_TPC_CHIRP_PWR_LSB 16
+#define MAC_PCU_TPC_CHIRP_PWR_MASK 0x003f0000
+#define MAC_PCU_TPC_CHIRP_PWR_GET(x) (((x) & MAC_PCU_TPC_CHIRP_PWR_MASK) >> MAC_PCU_TPC_CHIRP_PWR_LSB)
+#define MAC_PCU_TPC_CHIRP_PWR_SET(x) (((x) << MAC_PCU_TPC_CHIRP_PWR_LSB) & MAC_PCU_TPC_CHIRP_PWR_MASK)
+#define MAC_PCU_TPC_CTS_PWR_MSB 13
+#define MAC_PCU_TPC_CTS_PWR_LSB 8
+#define MAC_PCU_TPC_CTS_PWR_MASK 0x00003f00
+#define MAC_PCU_TPC_CTS_PWR_GET(x) (((x) & MAC_PCU_TPC_CTS_PWR_MASK) >> MAC_PCU_TPC_CTS_PWR_LSB)
+#define MAC_PCU_TPC_CTS_PWR_SET(x) (((x) << MAC_PCU_TPC_CTS_PWR_LSB) & MAC_PCU_TPC_CTS_PWR_MASK)
+#define MAC_PCU_TPC_ACK_PWR_MSB 5
+#define MAC_PCU_TPC_ACK_PWR_LSB 0
+#define MAC_PCU_TPC_ACK_PWR_MASK 0x0000003f
+#define MAC_PCU_TPC_ACK_PWR_GET(x) (((x) & MAC_PCU_TPC_ACK_PWR_MASK) >> MAC_PCU_TPC_ACK_PWR_LSB)
+#define MAC_PCU_TPC_ACK_PWR_SET(x) (((x) << MAC_PCU_TPC_ACK_PWR_LSB) & MAC_PCU_TPC_ACK_PWR_MASK)
+
+#define MAC_PCU_TX_FRAME_CNT_ADDRESS 0x00008090
+#define MAC_PCU_TX_FRAME_CNT_OFFSET 0x00000090
+#define MAC_PCU_TX_FRAME_CNT_VALUE_MSB 31
+#define MAC_PCU_TX_FRAME_CNT_VALUE_LSB 0
+#define MAC_PCU_TX_FRAME_CNT_VALUE_MASK 0xffffffff
+#define MAC_PCU_TX_FRAME_CNT_VALUE_GET(x) (((x) & MAC_PCU_TX_FRAME_CNT_VALUE_MASK) >> MAC_PCU_TX_FRAME_CNT_VALUE_LSB)
+#define MAC_PCU_TX_FRAME_CNT_VALUE_SET(x) (((x) << MAC_PCU_TX_FRAME_CNT_VALUE_LSB) & MAC_PCU_TX_FRAME_CNT_VALUE_MASK)
+
+#define MAC_PCU_RX_FRAME_CNT_ADDRESS 0x00008094
+#define MAC_PCU_RX_FRAME_CNT_OFFSET 0x00000094
+#define MAC_PCU_RX_FRAME_CNT_VALUE_MSB 31
+#define MAC_PCU_RX_FRAME_CNT_VALUE_LSB 0
+#define MAC_PCU_RX_FRAME_CNT_VALUE_MASK 0xffffffff
+#define MAC_PCU_RX_FRAME_CNT_VALUE_GET(x) (((x) & MAC_PCU_RX_FRAME_CNT_VALUE_MASK) >> MAC_PCU_RX_FRAME_CNT_VALUE_LSB)
+#define MAC_PCU_RX_FRAME_CNT_VALUE_SET(x) (((x) << MAC_PCU_RX_FRAME_CNT_VALUE_LSB) & MAC_PCU_RX_FRAME_CNT_VALUE_MASK)
+
+#define MAC_PCU_RX_CLEAR_CNT_ADDRESS 0x00008098
+#define MAC_PCU_RX_CLEAR_CNT_OFFSET 0x00000098
+#define MAC_PCU_RX_CLEAR_CNT_VALUE_MSB 31
+#define MAC_PCU_RX_CLEAR_CNT_VALUE_LSB 0
+#define MAC_PCU_RX_CLEAR_CNT_VALUE_MASK 0xffffffff
+#define MAC_PCU_RX_CLEAR_CNT_VALUE_GET(x) (((x) & MAC_PCU_RX_CLEAR_CNT_VALUE_MASK) >> MAC_PCU_RX_CLEAR_CNT_VALUE_LSB)
+#define MAC_PCU_RX_CLEAR_CNT_VALUE_SET(x) (((x) << MAC_PCU_RX_CLEAR_CNT_VALUE_LSB) & MAC_PCU_RX_CLEAR_CNT_VALUE_MASK)
+
+#define MAC_PCU_CYCLE_CNT_ADDRESS 0x0000809c
+#define MAC_PCU_CYCLE_CNT_OFFSET 0x0000009c
+#define MAC_PCU_CYCLE_CNT_VALUE_MSB 31
+#define MAC_PCU_CYCLE_CNT_VALUE_LSB 0
+#define MAC_PCU_CYCLE_CNT_VALUE_MASK 0xffffffff
+#define MAC_PCU_CYCLE_CNT_VALUE_GET(x) (((x) & MAC_PCU_CYCLE_CNT_VALUE_MASK) >> MAC_PCU_CYCLE_CNT_VALUE_LSB)
+#define MAC_PCU_CYCLE_CNT_VALUE_SET(x) (((x) << MAC_PCU_CYCLE_CNT_VALUE_LSB) & MAC_PCU_CYCLE_CNT_VALUE_MASK)
+
+#define MAC_PCU_QUIET_TIME_1_ADDRESS 0x000080a0
+#define MAC_PCU_QUIET_TIME_1_OFFSET 0x000000a0
+#define MAC_PCU_QUIET_TIME_1_ACK_CTS_ENABLE_MSB 17
+#define MAC_PCU_QUIET_TIME_1_ACK_CTS_ENABLE_LSB 17
+#define MAC_PCU_QUIET_TIME_1_ACK_CTS_ENABLE_MASK 0x00020000
+#define MAC_PCU_QUIET_TIME_1_ACK_CTS_ENABLE_GET(x) (((x) & MAC_PCU_QUIET_TIME_1_ACK_CTS_ENABLE_MASK) >> MAC_PCU_QUIET_TIME_1_ACK_CTS_ENABLE_LSB)
+#define MAC_PCU_QUIET_TIME_1_ACK_CTS_ENABLE_SET(x) (((x) << MAC_PCU_QUIET_TIME_1_ACK_CTS_ENABLE_LSB) & MAC_PCU_QUIET_TIME_1_ACK_CTS_ENABLE_MASK)
+
+#define MAC_PCU_QUIET_TIME_2_ADDRESS 0x000080a4
+#define MAC_PCU_QUIET_TIME_2_OFFSET 0x000000a4
+#define MAC_PCU_QUIET_TIME_2_DURATION_MSB 31
+#define MAC_PCU_QUIET_TIME_2_DURATION_LSB 16
+#define MAC_PCU_QUIET_TIME_2_DURATION_MASK 0xffff0000
+#define MAC_PCU_QUIET_TIME_2_DURATION_GET(x) (((x) & MAC_PCU_QUIET_TIME_2_DURATION_MASK) >> MAC_PCU_QUIET_TIME_2_DURATION_LSB)
+#define MAC_PCU_QUIET_TIME_2_DURATION_SET(x) (((x) << MAC_PCU_QUIET_TIME_2_DURATION_LSB) & MAC_PCU_QUIET_TIME_2_DURATION_MASK)
+
+#define MAC_PCU_QOS_NO_ACK_ADDRESS 0x000080a8
+#define MAC_PCU_QOS_NO_ACK_OFFSET 0x000000a8
+#define MAC_PCU_QOS_NO_ACK_BYTE_OFFSET_MSB 8
+#define MAC_PCU_QOS_NO_ACK_BYTE_OFFSET_LSB 7
+#define MAC_PCU_QOS_NO_ACK_BYTE_OFFSET_MASK 0x00000180
+#define MAC_PCU_QOS_NO_ACK_BYTE_OFFSET_GET(x) (((x) & MAC_PCU_QOS_NO_ACK_BYTE_OFFSET_MASK) >> MAC_PCU_QOS_NO_ACK_BYTE_OFFSET_LSB)
+#define MAC_PCU_QOS_NO_ACK_BYTE_OFFSET_SET(x) (((x) << MAC_PCU_QOS_NO_ACK_BYTE_OFFSET_LSB) & MAC_PCU_QOS_NO_ACK_BYTE_OFFSET_MASK)
+#define MAC_PCU_QOS_NO_ACK_BIT_OFFSET_MSB 6
+#define MAC_PCU_QOS_NO_ACK_BIT_OFFSET_LSB 4
+#define MAC_PCU_QOS_NO_ACK_BIT_OFFSET_MASK 0x00000070
+#define MAC_PCU_QOS_NO_ACK_BIT_OFFSET_GET(x) (((x) & MAC_PCU_QOS_NO_ACK_BIT_OFFSET_MASK) >> MAC_PCU_QOS_NO_ACK_BIT_OFFSET_LSB)
+#define MAC_PCU_QOS_NO_ACK_BIT_OFFSET_SET(x) (((x) << MAC_PCU_QOS_NO_ACK_BIT_OFFSET_LSB) & MAC_PCU_QOS_NO_ACK_BIT_OFFSET_MASK)
+#define MAC_PCU_QOS_NO_ACK_TWO_BIT_VALUES_MSB 3
+#define MAC_PCU_QOS_NO_ACK_TWO_BIT_VALUES_LSB 0
+#define MAC_PCU_QOS_NO_ACK_TWO_BIT_VALUES_MASK 0x0000000f
+#define MAC_PCU_QOS_NO_ACK_TWO_BIT_VALUES_GET(x) (((x) & MAC_PCU_QOS_NO_ACK_TWO_BIT_VALUES_MASK) >> MAC_PCU_QOS_NO_ACK_TWO_BIT_VALUES_LSB)
+#define MAC_PCU_QOS_NO_ACK_TWO_BIT_VALUES_SET(x) (((x) << MAC_PCU_QOS_NO_ACK_TWO_BIT_VALUES_LSB) & MAC_PCU_QOS_NO_ACK_TWO_BIT_VALUES_MASK)
+
+#define MAC_PCU_PHY_ERROR_MASK_ADDRESS 0x000080ac
+#define MAC_PCU_PHY_ERROR_MASK_OFFSET 0x000000ac
+#define MAC_PCU_PHY_ERROR_MASK_VALUE_MSB 31
+#define MAC_PCU_PHY_ERROR_MASK_VALUE_LSB 0
+#define MAC_PCU_PHY_ERROR_MASK_VALUE_MASK 0xffffffff
+#define MAC_PCU_PHY_ERROR_MASK_VALUE_GET(x) (((x) & MAC_PCU_PHY_ERROR_MASK_VALUE_MASK) >> MAC_PCU_PHY_ERROR_MASK_VALUE_LSB)
+#define MAC_PCU_PHY_ERROR_MASK_VALUE_SET(x) (((x) << MAC_PCU_PHY_ERROR_MASK_VALUE_LSB) & MAC_PCU_PHY_ERROR_MASK_VALUE_MASK)
+
+#define MAC_PCU_XRLAT_ADDRESS 0x000080b0
+#define MAC_PCU_XRLAT_OFFSET 0x000000b0
+#define MAC_PCU_XRLAT_VALUE_MSB 11
+#define MAC_PCU_XRLAT_VALUE_LSB 0
+#define MAC_PCU_XRLAT_VALUE_MASK 0x00000fff
+#define MAC_PCU_XRLAT_VALUE_GET(x) (((x) & MAC_PCU_XRLAT_VALUE_MASK) >> MAC_PCU_XRLAT_VALUE_LSB)
+#define MAC_PCU_XRLAT_VALUE_SET(x) (((x) << MAC_PCU_XRLAT_VALUE_LSB) & MAC_PCU_XRLAT_VALUE_MASK)
+
+#define MAC_PCU_RXBUF_ADDRESS 0x000080b4
+#define MAC_PCU_RXBUF_OFFSET 0x000000b4
+#define MAC_PCU_RXBUF_REG_RD_ENABLE_MSB 11
+#define MAC_PCU_RXBUF_REG_RD_ENABLE_LSB 11
+#define MAC_PCU_RXBUF_REG_RD_ENABLE_MASK 0x00000800
+#define MAC_PCU_RXBUF_REG_RD_ENABLE_GET(x) (((x) & MAC_PCU_RXBUF_REG_RD_ENABLE_MASK) >> MAC_PCU_RXBUF_REG_RD_ENABLE_LSB)
+#define MAC_PCU_RXBUF_REG_RD_ENABLE_SET(x) (((x) << MAC_PCU_RXBUF_REG_RD_ENABLE_LSB) & MAC_PCU_RXBUF_REG_RD_ENABLE_MASK)
+#define MAC_PCU_RXBUF_HIGH_PRIORITY_THRSHD_MSB 10
+#define MAC_PCU_RXBUF_HIGH_PRIORITY_THRSHD_LSB 0
+#define MAC_PCU_RXBUF_HIGH_PRIORITY_THRSHD_MASK 0x000007ff
+#define MAC_PCU_RXBUF_HIGH_PRIORITY_THRSHD_GET(x) (((x) & MAC_PCU_RXBUF_HIGH_PRIORITY_THRSHD_MASK) >> MAC_PCU_RXBUF_HIGH_PRIORITY_THRSHD_LSB)
+#define MAC_PCU_RXBUF_HIGH_PRIORITY_THRSHD_SET(x) (((x) << MAC_PCU_RXBUF_HIGH_PRIORITY_THRSHD_LSB) & MAC_PCU_RXBUF_HIGH_PRIORITY_THRSHD_MASK)
+
+#define MAC_PCU_MIC_QOS_CONTROL_ADDRESS 0x000080b8
+#define MAC_PCU_MIC_QOS_CONTROL_OFFSET 0x000000b8
+#define MAC_PCU_MIC_QOS_CONTROL_ENABLE_MSB 16
+#define MAC_PCU_MIC_QOS_CONTROL_ENABLE_LSB 16
+#define MAC_PCU_MIC_QOS_CONTROL_ENABLE_MASK 0x00010000
+#define MAC_PCU_MIC_QOS_CONTROL_ENABLE_GET(x) (((x) & MAC_PCU_MIC_QOS_CONTROL_ENABLE_MASK) >> MAC_PCU_MIC_QOS_CONTROL_ENABLE_LSB)
+#define MAC_PCU_MIC_QOS_CONTROL_ENABLE_SET(x) (((x) << MAC_PCU_MIC_QOS_CONTROL_ENABLE_LSB) & MAC_PCU_MIC_QOS_CONTROL_ENABLE_MASK)
+#define MAC_PCU_MIC_QOS_CONTROL_VALUE_7_MSB 15
+#define MAC_PCU_MIC_QOS_CONTROL_VALUE_7_LSB 14
+#define MAC_PCU_MIC_QOS_CONTROL_VALUE_7_MASK 0x0000c000
+#define MAC_PCU_MIC_QOS_CONTROL_VALUE_7_GET(x) (((x) & MAC_PCU_MIC_QOS_CONTROL_VALUE_7_MASK) >> MAC_PCU_MIC_QOS_CONTROL_VALUE_7_LSB)
+#define MAC_PCU_MIC_QOS_CONTROL_VALUE_7_SET(x) (((x) << MAC_PCU_MIC_QOS_CONTROL_VALUE_7_LSB) & MAC_PCU_MIC_QOS_CONTROL_VALUE_7_MASK)
+#define MAC_PCU_MIC_QOS_CONTROL_VALUE_6_MSB 13
+#define MAC_PCU_MIC_QOS_CONTROL_VALUE_6_LSB 12
+#define MAC_PCU_MIC_QOS_CONTROL_VALUE_6_MASK 0x00003000
+#define MAC_PCU_MIC_QOS_CONTROL_VALUE_6_GET(x) (((x) & MAC_PCU_MIC_QOS_CONTROL_VALUE_6_MASK) >> MAC_PCU_MIC_QOS_CONTROL_VALUE_6_LSB)
+#define MAC_PCU_MIC_QOS_CONTROL_VALUE_6_SET(x) (((x) << MAC_PCU_MIC_QOS_CONTROL_VALUE_6_LSB) & MAC_PCU_MIC_QOS_CONTROL_VALUE_6_MASK)
+#define MAC_PCU_MIC_QOS_CONTROL_VALUE_5_MSB 11
+#define MAC_PCU_MIC_QOS_CONTROL_VALUE_5_LSB 10
+#define MAC_PCU_MIC_QOS_CONTROL_VALUE_5_MASK 0x00000c00
+#define MAC_PCU_MIC_QOS_CONTROL_VALUE_5_GET(x) (((x) & MAC_PCU_MIC_QOS_CONTROL_VALUE_5_MASK) >> MAC_PCU_MIC_QOS_CONTROL_VALUE_5_LSB)
+#define MAC_PCU_MIC_QOS_CONTROL_VALUE_5_SET(x) (((x) << MAC_PCU_MIC_QOS_CONTROL_VALUE_5_LSB) & MAC_PCU_MIC_QOS_CONTROL_VALUE_5_MASK)
+#define MAC_PCU_MIC_QOS_CONTROL_VALUE_4_MSB 9
+#define MAC_PCU_MIC_QOS_CONTROL_VALUE_4_LSB 8
+#define MAC_PCU_MIC_QOS_CONTROL_VALUE_4_MASK 0x00000300
+#define MAC_PCU_MIC_QOS_CONTROL_VALUE_4_GET(x) (((x) & MAC_PCU_MIC_QOS_CONTROL_VALUE_4_MASK) >> MAC_PCU_MIC_QOS_CONTROL_VALUE_4_LSB)
+#define MAC_PCU_MIC_QOS_CONTROL_VALUE_4_SET(x) (((x) << MAC_PCU_MIC_QOS_CONTROL_VALUE_4_LSB) & MAC_PCU_MIC_QOS_CONTROL_VALUE_4_MASK)
+#define MAC_PCU_MIC_QOS_CONTROL_VALUE_3_MSB 7
+#define MAC_PCU_MIC_QOS_CONTROL_VALUE_3_LSB 6
+#define MAC_PCU_MIC_QOS_CONTROL_VALUE_3_MASK 0x000000c0
+#define MAC_PCU_MIC_QOS_CONTROL_VALUE_3_GET(x) (((x) & MAC_PCU_MIC_QOS_CONTROL_VALUE_3_MASK) >> MAC_PCU_MIC_QOS_CONTROL_VALUE_3_LSB)
+#define MAC_PCU_MIC_QOS_CONTROL_VALUE_3_SET(x) (((x) << MAC_PCU_MIC_QOS_CONTROL_VALUE_3_LSB) & MAC_PCU_MIC_QOS_CONTROL_VALUE_3_MASK)
+#define MAC_PCU_MIC_QOS_CONTROL_VALUE_2_MSB 5
+#define MAC_PCU_MIC_QOS_CONTROL_VALUE_2_LSB 4
+#define MAC_PCU_MIC_QOS_CONTROL_VALUE_2_MASK 0x00000030
+#define MAC_PCU_MIC_QOS_CONTROL_VALUE_2_GET(x) (((x) & MAC_PCU_MIC_QOS_CONTROL_VALUE_2_MASK) >> MAC_PCU_MIC_QOS_CONTROL_VALUE_2_LSB)
+#define MAC_PCU_MIC_QOS_CONTROL_VALUE_2_SET(x) (((x) << MAC_PCU_MIC_QOS_CONTROL_VALUE_2_LSB) & MAC_PCU_MIC_QOS_CONTROL_VALUE_2_MASK)
+#define MAC_PCU_MIC_QOS_CONTROL_VALUE_1_MSB 3
+#define MAC_PCU_MIC_QOS_CONTROL_VALUE_1_LSB 2
+#define MAC_PCU_MIC_QOS_CONTROL_VALUE_1_MASK 0x0000000c
+#define MAC_PCU_MIC_QOS_CONTROL_VALUE_1_GET(x) (((x) & MAC_PCU_MIC_QOS_CONTROL_VALUE_1_MASK) >> MAC_PCU_MIC_QOS_CONTROL_VALUE_1_LSB)
+#define MAC_PCU_MIC_QOS_CONTROL_VALUE_1_SET(x) (((x) << MAC_PCU_MIC_QOS_CONTROL_VALUE_1_LSB) & MAC_PCU_MIC_QOS_CONTROL_VALUE_1_MASK)
+#define MAC_PCU_MIC_QOS_CONTROL_VALUE_0_MSB 1
+#define MAC_PCU_MIC_QOS_CONTROL_VALUE_0_LSB 0
+#define MAC_PCU_MIC_QOS_CONTROL_VALUE_0_MASK 0x00000003
+#define MAC_PCU_MIC_QOS_CONTROL_VALUE_0_GET(x) (((x) & MAC_PCU_MIC_QOS_CONTROL_VALUE_0_MASK) >> MAC_PCU_MIC_QOS_CONTROL_VALUE_0_LSB)
+#define MAC_PCU_MIC_QOS_CONTROL_VALUE_0_SET(x) (((x) << MAC_PCU_MIC_QOS_CONTROL_VALUE_0_LSB) & MAC_PCU_MIC_QOS_CONTROL_VALUE_0_MASK)
+
+#define MAC_PCU_MIC_QOS_SELECT_ADDRESS 0x000080bc
+#define MAC_PCU_MIC_QOS_SELECT_OFFSET 0x000000bc
+#define MAC_PCU_MIC_QOS_SELECT_VALUE_7_MSB 31
+#define MAC_PCU_MIC_QOS_SELECT_VALUE_7_LSB 28
+#define MAC_PCU_MIC_QOS_SELECT_VALUE_7_MASK 0xf0000000
+#define MAC_PCU_MIC_QOS_SELECT_VALUE_7_GET(x) (((x) & MAC_PCU_MIC_QOS_SELECT_VALUE_7_MASK) >> MAC_PCU_MIC_QOS_SELECT_VALUE_7_LSB)
+#define MAC_PCU_MIC_QOS_SELECT_VALUE_7_SET(x) (((x) << MAC_PCU_MIC_QOS_SELECT_VALUE_7_LSB) & MAC_PCU_MIC_QOS_SELECT_VALUE_7_MASK)
+#define MAC_PCU_MIC_QOS_SELECT_VALUE_6_MSB 27
+#define MAC_PCU_MIC_QOS_SELECT_VALUE_6_LSB 24
+#define MAC_PCU_MIC_QOS_SELECT_VALUE_6_MASK 0x0f000000
+#define MAC_PCU_MIC_QOS_SELECT_VALUE_6_GET(x) (((x) & MAC_PCU_MIC_QOS_SELECT_VALUE_6_MASK) >> MAC_PCU_MIC_QOS_SELECT_VALUE_6_LSB)
+#define MAC_PCU_MIC_QOS_SELECT_VALUE_6_SET(x) (((x) << MAC_PCU_MIC_QOS_SELECT_VALUE_6_LSB) & MAC_PCU_MIC_QOS_SELECT_VALUE_6_MASK)
+#define MAC_PCU_MIC_QOS_SELECT_VALUE_5_MSB 23
+#define MAC_PCU_MIC_QOS_SELECT_VALUE_5_LSB 20
+#define MAC_PCU_MIC_QOS_SELECT_VALUE_5_MASK 0x00f00000
+#define MAC_PCU_MIC_QOS_SELECT_VALUE_5_GET(x) (((x) & MAC_PCU_MIC_QOS_SELECT_VALUE_5_MASK) >> MAC_PCU_MIC_QOS_SELECT_VALUE_5_LSB)
+#define MAC_PCU_MIC_QOS_SELECT_VALUE_5_SET(x) (((x) << MAC_PCU_MIC_QOS_SELECT_VALUE_5_LSB) & MAC_PCU_MIC_QOS_SELECT_VALUE_5_MASK)
+#define MAC_PCU_MIC_QOS_SELECT_VALUE_4_MSB 19
+#define MAC_PCU_MIC_QOS_SELECT_VALUE_4_LSB 16
+#define MAC_PCU_MIC_QOS_SELECT_VALUE_4_MASK 0x000f0000
+#define MAC_PCU_MIC_QOS_SELECT_VALUE_4_GET(x) (((x) & MAC_PCU_MIC_QOS_SELECT_VALUE_4_MASK) >> MAC_PCU_MIC_QOS_SELECT_VALUE_4_LSB)
+#define MAC_PCU_MIC_QOS_SELECT_VALUE_4_SET(x) (((x) << MAC_PCU_MIC_QOS_SELECT_VALUE_4_LSB) & MAC_PCU_MIC_QOS_SELECT_VALUE_4_MASK)
+#define MAC_PCU_MIC_QOS_SELECT_VALUE_3_MSB 15
+#define MAC_PCU_MIC_QOS_SELECT_VALUE_3_LSB 12
+#define MAC_PCU_MIC_QOS_SELECT_VALUE_3_MASK 0x0000f000
+#define MAC_PCU_MIC_QOS_SELECT_VALUE_3_GET(x) (((x) & MAC_PCU_MIC_QOS_SELECT_VALUE_3_MASK) >> MAC_PCU_MIC_QOS_SELECT_VALUE_3_LSB)
+#define MAC_PCU_MIC_QOS_SELECT_VALUE_3_SET(x) (((x) << MAC_PCU_MIC_QOS_SELECT_VALUE_3_LSB) & MAC_PCU_MIC_QOS_SELECT_VALUE_3_MASK)
+#define MAC_PCU_MIC_QOS_SELECT_VALUE_2_MSB 11
+#define MAC_PCU_MIC_QOS_SELECT_VALUE_2_LSB 8
+#define MAC_PCU_MIC_QOS_SELECT_VALUE_2_MASK 0x00000f00
+#define MAC_PCU_MIC_QOS_SELECT_VALUE_2_GET(x) (((x) & MAC_PCU_MIC_QOS_SELECT_VALUE_2_MASK) >> MAC_PCU_MIC_QOS_SELECT_VALUE_2_LSB)
+#define MAC_PCU_MIC_QOS_SELECT_VALUE_2_SET(x) (((x) << MAC_PCU_MIC_QOS_SELECT_VALUE_2_LSB) & MAC_PCU_MIC_QOS_SELECT_VALUE_2_MASK)
+#define MAC_PCU_MIC_QOS_SELECT_VALUE_1_MSB 7
+#define MAC_PCU_MIC_QOS_SELECT_VALUE_1_LSB 4
+#define MAC_PCU_MIC_QOS_SELECT_VALUE_1_MASK 0x000000f0
+#define MAC_PCU_MIC_QOS_SELECT_VALUE_1_GET(x) (((x) & MAC_PCU_MIC_QOS_SELECT_VALUE_1_MASK) >> MAC_PCU_MIC_QOS_SELECT_VALUE_1_LSB)
+#define MAC_PCU_MIC_QOS_SELECT_VALUE_1_SET(x) (((x) << MAC_PCU_MIC_QOS_SELECT_VALUE_1_LSB) & MAC_PCU_MIC_QOS_SELECT_VALUE_1_MASK)
+#define MAC_PCU_MIC_QOS_SELECT_VALUE_0_MSB 3
+#define MAC_PCU_MIC_QOS_SELECT_VALUE_0_LSB 0
+#define MAC_PCU_MIC_QOS_SELECT_VALUE_0_MASK 0x0000000f
+#define MAC_PCU_MIC_QOS_SELECT_VALUE_0_GET(x) (((x) & MAC_PCU_MIC_QOS_SELECT_VALUE_0_MASK) >> MAC_PCU_MIC_QOS_SELECT_VALUE_0_LSB)
+#define MAC_PCU_MIC_QOS_SELECT_VALUE_0_SET(x) (((x) << MAC_PCU_MIC_QOS_SELECT_VALUE_0_LSB) & MAC_PCU_MIC_QOS_SELECT_VALUE_0_MASK)
+
+#define MAC_PCU_MISC_MODE_ADDRESS 0x000080c0
+#define MAC_PCU_MISC_MODE_OFFSET 0x000000c0
+#define MAC_PCU_MISC_MODE_DEBUG_MODE_MSB 31
+#define MAC_PCU_MISC_MODE_DEBUG_MODE_LSB 30
+#define MAC_PCU_MISC_MODE_DEBUG_MODE_MASK 0xc0000000
+#define MAC_PCU_MISC_MODE_DEBUG_MODE_GET(x) (((x) & MAC_PCU_MISC_MODE_DEBUG_MODE_MASK) >> MAC_PCU_MISC_MODE_DEBUG_MODE_LSB)
+#define MAC_PCU_MISC_MODE_DEBUG_MODE_SET(x) (((x) << MAC_PCU_MISC_MODE_DEBUG_MODE_LSB) & MAC_PCU_MISC_MODE_DEBUG_MODE_MASK)
+#define MAC_PCU_MISC_MODE_USE_EOP_PTR_FOR_DMA_WR_MSB 29
+#define MAC_PCU_MISC_MODE_USE_EOP_PTR_FOR_DMA_WR_LSB 29
+#define MAC_PCU_MISC_MODE_USE_EOP_PTR_FOR_DMA_WR_MASK 0x20000000
+#define MAC_PCU_MISC_MODE_USE_EOP_PTR_FOR_DMA_WR_GET(x) (((x) & MAC_PCU_MISC_MODE_USE_EOP_PTR_FOR_DMA_WR_MASK) >> MAC_PCU_MISC_MODE_USE_EOP_PTR_FOR_DMA_WR_LSB)
+#define MAC_PCU_MISC_MODE_USE_EOP_PTR_FOR_DMA_WR_SET(x) (((x) << MAC_PCU_MISC_MODE_USE_EOP_PTR_FOR_DMA_WR_LSB) & MAC_PCU_MISC_MODE_USE_EOP_PTR_FOR_DMA_WR_MASK)
+#define MAC_PCU_MISC_MODE_ALWAYS_PERFORM_KEY_SEARCH_MSB 28
+#define MAC_PCU_MISC_MODE_ALWAYS_PERFORM_KEY_SEARCH_LSB 28
+#define MAC_PCU_MISC_MODE_ALWAYS_PERFORM_KEY_SEARCH_MASK 0x10000000
+#define MAC_PCU_MISC_MODE_ALWAYS_PERFORM_KEY_SEARCH_GET(x) (((x) & MAC_PCU_MISC_MODE_ALWAYS_PERFORM_KEY_SEARCH_MASK) >> MAC_PCU_MISC_MODE_ALWAYS_PERFORM_KEY_SEARCH_LSB)
+#define MAC_PCU_MISC_MODE_ALWAYS_PERFORM_KEY_SEARCH_SET(x) (((x) << MAC_PCU_MISC_MODE_ALWAYS_PERFORM_KEY_SEARCH_LSB) & MAC_PCU_MISC_MODE_ALWAYS_PERFORM_KEY_SEARCH_MASK)
+#define MAC_PCU_MISC_MODE_SEL_EVM_MSB 27
+#define MAC_PCU_MISC_MODE_SEL_EVM_LSB 27
+#define MAC_PCU_MISC_MODE_SEL_EVM_MASK 0x08000000
+#define MAC_PCU_MISC_MODE_SEL_EVM_GET(x) (((x) & MAC_PCU_MISC_MODE_SEL_EVM_MASK) >> MAC_PCU_MISC_MODE_SEL_EVM_LSB)
+#define MAC_PCU_MISC_MODE_SEL_EVM_SET(x) (((x) << MAC_PCU_MISC_MODE_SEL_EVM_LSB) & MAC_PCU_MISC_MODE_SEL_EVM_MASK)
+#define MAC_PCU_MISC_MODE_CLEAR_BA_VALID_MSB 26
+#define MAC_PCU_MISC_MODE_CLEAR_BA_VALID_LSB 26
+#define MAC_PCU_MISC_MODE_CLEAR_BA_VALID_MASK 0x04000000
+#define MAC_PCU_MISC_MODE_CLEAR_BA_VALID_GET(x) (((x) & MAC_PCU_MISC_MODE_CLEAR_BA_VALID_MASK) >> MAC_PCU_MISC_MODE_CLEAR_BA_VALID_LSB)
+#define MAC_PCU_MISC_MODE_CLEAR_BA_VALID_SET(x) (((x) << MAC_PCU_MISC_MODE_CLEAR_BA_VALID_LSB) & MAC_PCU_MISC_MODE_CLEAR_BA_VALID_MASK)
+#define MAC_PCU_MISC_MODE_CLEAR_FIRST_HCF_MSB 25
+#define MAC_PCU_MISC_MODE_CLEAR_FIRST_HCF_LSB 25
+#define MAC_PCU_MISC_MODE_CLEAR_FIRST_HCF_MASK 0x02000000
+#define MAC_PCU_MISC_MODE_CLEAR_FIRST_HCF_GET(x) (((x) & MAC_PCU_MISC_MODE_CLEAR_FIRST_HCF_MASK) >> MAC_PCU_MISC_MODE_CLEAR_FIRST_HCF_LSB)
+#define MAC_PCU_MISC_MODE_CLEAR_FIRST_HCF_SET(x) (((x) << MAC_PCU_MISC_MODE_CLEAR_FIRST_HCF_LSB) & MAC_PCU_MISC_MODE_CLEAR_FIRST_HCF_MASK)
+#define MAC_PCU_MISC_MODE_CLEAR_VMF_MSB 24
+#define MAC_PCU_MISC_MODE_CLEAR_VMF_LSB 24
+#define MAC_PCU_MISC_MODE_CLEAR_VMF_MASK 0x01000000
+#define MAC_PCU_MISC_MODE_CLEAR_VMF_GET(x) (((x) & MAC_PCU_MISC_MODE_CLEAR_VMF_MASK) >> MAC_PCU_MISC_MODE_CLEAR_VMF_LSB)
+#define MAC_PCU_MISC_MODE_CLEAR_VMF_SET(x) (((x) << MAC_PCU_MISC_MODE_CLEAR_VMF_LSB) & MAC_PCU_MISC_MODE_CLEAR_VMF_MASK)
+#define MAC_PCU_MISC_MODE_RX_HCF_POLL_ENABLE_MSB 23
+#define MAC_PCU_MISC_MODE_RX_HCF_POLL_ENABLE_LSB 23
+#define MAC_PCU_MISC_MODE_RX_HCF_POLL_ENABLE_MASK 0x00800000
+#define MAC_PCU_MISC_MODE_RX_HCF_POLL_ENABLE_GET(x) (((x) & MAC_PCU_MISC_MODE_RX_HCF_POLL_ENABLE_MASK) >> MAC_PCU_MISC_MODE_RX_HCF_POLL_ENABLE_LSB)
+#define MAC_PCU_MISC_MODE_RX_HCF_POLL_ENABLE_SET(x) (((x) << MAC_PCU_MISC_MODE_RX_HCF_POLL_ENABLE_LSB) & MAC_PCU_MISC_MODE_RX_HCF_POLL_ENABLE_MASK)
+#define MAC_PCU_MISC_MODE_HCF_POLL_CANCELS_NAV_MSB 22
+#define MAC_PCU_MISC_MODE_HCF_POLL_CANCELS_NAV_LSB 22
+#define MAC_PCU_MISC_MODE_HCF_POLL_CANCELS_NAV_MASK 0x00400000
+#define MAC_PCU_MISC_MODE_HCF_POLL_CANCELS_NAV_GET(x) (((x) & MAC_PCU_MISC_MODE_HCF_POLL_CANCELS_NAV_MASK) >> MAC_PCU_MISC_MODE_HCF_POLL_CANCELS_NAV_LSB)
+#define MAC_PCU_MISC_MODE_HCF_POLL_CANCELS_NAV_SET(x) (((x) << MAC_PCU_MISC_MODE_HCF_POLL_CANCELS_NAV_LSB) & MAC_PCU_MISC_MODE_HCF_POLL_CANCELS_NAV_MASK)
+#define MAC_PCU_MISC_MODE_TBTT_PROTECT_MSB 21
+#define MAC_PCU_MISC_MODE_TBTT_PROTECT_LSB 21
+#define MAC_PCU_MISC_MODE_TBTT_PROTECT_MASK 0x00200000
+#define MAC_PCU_MISC_MODE_TBTT_PROTECT_GET(x) (((x) & MAC_PCU_MISC_MODE_TBTT_PROTECT_MASK) >> MAC_PCU_MISC_MODE_TBTT_PROTECT_LSB)
+#define MAC_PCU_MISC_MODE_TBTT_PROTECT_SET(x) (((x) << MAC_PCU_MISC_MODE_TBTT_PROTECT_LSB) & MAC_PCU_MISC_MODE_TBTT_PROTECT_MASK)
+#define MAC_PCU_MISC_MODE_BT_ANT_PREVENTS_RX_MSB 20
+#define MAC_PCU_MISC_MODE_BT_ANT_PREVENTS_RX_LSB 20
+#define MAC_PCU_MISC_MODE_BT_ANT_PREVENTS_RX_MASK 0x00100000
+#define MAC_PCU_MISC_MODE_BT_ANT_PREVENTS_RX_GET(x) (((x) & MAC_PCU_MISC_MODE_BT_ANT_PREVENTS_RX_MASK) >> MAC_PCU_MISC_MODE_BT_ANT_PREVENTS_RX_LSB)
+#define MAC_PCU_MISC_MODE_BT_ANT_PREVENTS_RX_SET(x) (((x) << MAC_PCU_MISC_MODE_BT_ANT_PREVENTS_RX_LSB) & MAC_PCU_MISC_MODE_BT_ANT_PREVENTS_RX_MASK)
+#define MAC_PCU_MISC_MODE_FORCE_QUIET_COLLISION_MSB 18
+#define MAC_PCU_MISC_MODE_FORCE_QUIET_COLLISION_LSB 18
+#define MAC_PCU_MISC_MODE_FORCE_QUIET_COLLISION_MASK 0x00040000
+#define MAC_PCU_MISC_MODE_FORCE_QUIET_COLLISION_GET(x) (((x) & MAC_PCU_MISC_MODE_FORCE_QUIET_COLLISION_MASK) >> MAC_PCU_MISC_MODE_FORCE_QUIET_COLLISION_LSB)
+#define MAC_PCU_MISC_MODE_FORCE_QUIET_COLLISION_SET(x) (((x) << MAC_PCU_MISC_MODE_FORCE_QUIET_COLLISION_LSB) & MAC_PCU_MISC_MODE_FORCE_QUIET_COLLISION_MASK)
+#define MAC_PCU_MISC_MODE_MISS_BEACON_IN_SLEEP_MSB 14
+#define MAC_PCU_MISC_MODE_MISS_BEACON_IN_SLEEP_LSB 14
+#define MAC_PCU_MISC_MODE_MISS_BEACON_IN_SLEEP_MASK 0x00004000
+#define MAC_PCU_MISC_MODE_MISS_BEACON_IN_SLEEP_GET(x) (((x) & MAC_PCU_MISC_MODE_MISS_BEACON_IN_SLEEP_MASK) >> MAC_PCU_MISC_MODE_MISS_BEACON_IN_SLEEP_LSB)
+#define MAC_PCU_MISC_MODE_MISS_BEACON_IN_SLEEP_SET(x) (((x) << MAC_PCU_MISC_MODE_MISS_BEACON_IN_SLEEP_LSB) & MAC_PCU_MISC_MODE_MISS_BEACON_IN_SLEEP_MASK)
+#define MAC_PCU_MISC_MODE_TXOP_TBTT_LIMIT_ENABLE_MSB 12
+#define MAC_PCU_MISC_MODE_TXOP_TBTT_LIMIT_ENABLE_LSB 12
+#define MAC_PCU_MISC_MODE_TXOP_TBTT_LIMIT_ENABLE_MASK 0x00001000
+#define MAC_PCU_MISC_MODE_TXOP_TBTT_LIMIT_ENABLE_GET(x) (((x) & MAC_PCU_MISC_MODE_TXOP_TBTT_LIMIT_ENABLE_MASK) >> MAC_PCU_MISC_MODE_TXOP_TBTT_LIMIT_ENABLE_LSB)
+#define MAC_PCU_MISC_MODE_TXOP_TBTT_LIMIT_ENABLE_SET(x) (((x) << MAC_PCU_MISC_MODE_TXOP_TBTT_LIMIT_ENABLE_LSB) & MAC_PCU_MISC_MODE_TXOP_TBTT_LIMIT_ENABLE_MASK)
+#define MAC_PCU_MISC_MODE_KC_RX_ANT_UPDATE_MSB 11
+#define MAC_PCU_MISC_MODE_KC_RX_ANT_UPDATE_LSB 11
+#define MAC_PCU_MISC_MODE_KC_RX_ANT_UPDATE_MASK 0x00000800
+#define MAC_PCU_MISC_MODE_KC_RX_ANT_UPDATE_GET(x) (((x) & MAC_PCU_MISC_MODE_KC_RX_ANT_UPDATE_MASK) >> MAC_PCU_MISC_MODE_KC_RX_ANT_UPDATE_LSB)
+#define MAC_PCU_MISC_MODE_KC_RX_ANT_UPDATE_SET(x) (((x) << MAC_PCU_MISC_MODE_KC_RX_ANT_UPDATE_LSB) & MAC_PCU_MISC_MODE_KC_RX_ANT_UPDATE_MASK)
+#define MAC_PCU_MISC_MODE_DEBUG_MODE_SIFS_MSB 10
+#define MAC_PCU_MISC_MODE_DEBUG_MODE_SIFS_LSB 10
+#define MAC_PCU_MISC_MODE_DEBUG_MODE_SIFS_MASK 0x00000400
+#define MAC_PCU_MISC_MODE_DEBUG_MODE_SIFS_GET(x) (((x) & MAC_PCU_MISC_MODE_DEBUG_MODE_SIFS_MASK) >> MAC_PCU_MISC_MODE_DEBUG_MODE_SIFS_LSB)
+#define MAC_PCU_MISC_MODE_DEBUG_MODE_SIFS_SET(x) (((x) << MAC_PCU_MISC_MODE_DEBUG_MODE_SIFS_LSB) & MAC_PCU_MISC_MODE_DEBUG_MODE_SIFS_MASK)
+#define MAC_PCU_MISC_MODE_DEBUG_MODE_BA_BITMAP_MSB 9
+#define MAC_PCU_MISC_MODE_DEBUG_MODE_BA_BITMAP_LSB 9
+#define MAC_PCU_MISC_MODE_DEBUG_MODE_BA_BITMAP_MASK 0x00000200
+#define MAC_PCU_MISC_MODE_DEBUG_MODE_BA_BITMAP_GET(x) (((x) & MAC_PCU_MISC_MODE_DEBUG_MODE_BA_BITMAP_MASK) >> MAC_PCU_MISC_MODE_DEBUG_MODE_BA_BITMAP_LSB)
+#define MAC_PCU_MISC_MODE_DEBUG_MODE_BA_BITMAP_SET(x) (((x) << MAC_PCU_MISC_MODE_DEBUG_MODE_BA_BITMAP_LSB) & MAC_PCU_MISC_MODE_DEBUG_MODE_BA_BITMAP_MASK)
+#define MAC_PCU_MISC_MODE_CCK_SIFS_MODE_MSB 4
+#define MAC_PCU_MISC_MODE_CCK_SIFS_MODE_LSB 4
+#define MAC_PCU_MISC_MODE_CCK_SIFS_MODE_MASK 0x00000010
+#define MAC_PCU_MISC_MODE_CCK_SIFS_MODE_GET(x) (((x) & MAC_PCU_MISC_MODE_CCK_SIFS_MODE_MASK) >> MAC_PCU_MISC_MODE_CCK_SIFS_MODE_LSB)
+#define MAC_PCU_MISC_MODE_CCK_SIFS_MODE_SET(x) (((x) << MAC_PCU_MISC_MODE_CCK_SIFS_MODE_LSB) & MAC_PCU_MISC_MODE_CCK_SIFS_MODE_MASK)
+#define MAC_PCU_MISC_MODE_TX_ADD_TSF_MSB 3
+#define MAC_PCU_MISC_MODE_TX_ADD_TSF_LSB 3
+#define MAC_PCU_MISC_MODE_TX_ADD_TSF_MASK 0x00000008
+#define MAC_PCU_MISC_MODE_TX_ADD_TSF_GET(x) (((x) & MAC_PCU_MISC_MODE_TX_ADD_TSF_MASK) >> MAC_PCU_MISC_MODE_TX_ADD_TSF_LSB)
+#define MAC_PCU_MISC_MODE_TX_ADD_TSF_SET(x) (((x) << MAC_PCU_MISC_MODE_TX_ADD_TSF_LSB) & MAC_PCU_MISC_MODE_TX_ADD_TSF_MASK)
+#define MAC_PCU_MISC_MODE_MIC_NEW_LOCATION_ENABLE_MSB 2
+#define MAC_PCU_MISC_MODE_MIC_NEW_LOCATION_ENABLE_LSB 2
+#define MAC_PCU_MISC_MODE_MIC_NEW_LOCATION_ENABLE_MASK 0x00000004
+#define MAC_PCU_MISC_MODE_MIC_NEW_LOCATION_ENABLE_GET(x) (((x) & MAC_PCU_MISC_MODE_MIC_NEW_LOCATION_ENABLE_MASK) >> MAC_PCU_MISC_MODE_MIC_NEW_LOCATION_ENABLE_LSB)
+#define MAC_PCU_MISC_MODE_MIC_NEW_LOCATION_ENABLE_SET(x) (((x) << MAC_PCU_MISC_MODE_MIC_NEW_LOCATION_ENABLE_LSB) & MAC_PCU_MISC_MODE_MIC_NEW_LOCATION_ENABLE_MASK)
+#define MAC_PCU_MISC_MODE_DEBUG_MODE_AD_MSB 1
+#define MAC_PCU_MISC_MODE_DEBUG_MODE_AD_LSB 1
+#define MAC_PCU_MISC_MODE_DEBUG_MODE_AD_MASK 0x00000002
+#define MAC_PCU_MISC_MODE_DEBUG_MODE_AD_GET(x) (((x) & MAC_PCU_MISC_MODE_DEBUG_MODE_AD_MASK) >> MAC_PCU_MISC_MODE_DEBUG_MODE_AD_LSB)
+#define MAC_PCU_MISC_MODE_DEBUG_MODE_AD_SET(x) (((x) << MAC_PCU_MISC_MODE_DEBUG_MODE_AD_LSB) & MAC_PCU_MISC_MODE_DEBUG_MODE_AD_MASK)
+#define MAC_PCU_MISC_MODE_BSSID_MATCH_FORCE_MSB 0
+#define MAC_PCU_MISC_MODE_BSSID_MATCH_FORCE_LSB 0
+#define MAC_PCU_MISC_MODE_BSSID_MATCH_FORCE_MASK 0x00000001
+#define MAC_PCU_MISC_MODE_BSSID_MATCH_FORCE_GET(x) (((x) & MAC_PCU_MISC_MODE_BSSID_MATCH_FORCE_MASK) >> MAC_PCU_MISC_MODE_BSSID_MATCH_FORCE_LSB)
+#define MAC_PCU_MISC_MODE_BSSID_MATCH_FORCE_SET(x) (((x) << MAC_PCU_MISC_MODE_BSSID_MATCH_FORCE_LSB) & MAC_PCU_MISC_MODE_BSSID_MATCH_FORCE_MASK)
+
+#define MAC_PCU_FILTER_OFDM_CNT_ADDRESS 0x000080c4
+#define MAC_PCU_FILTER_OFDM_CNT_OFFSET 0x000000c4
+#define MAC_PCU_FILTER_OFDM_CNT_VALUE_MSB 23
+#define MAC_PCU_FILTER_OFDM_CNT_VALUE_LSB 0
+#define MAC_PCU_FILTER_OFDM_CNT_VALUE_MASK 0x00ffffff
+#define MAC_PCU_FILTER_OFDM_CNT_VALUE_GET(x) (((x) & MAC_PCU_FILTER_OFDM_CNT_VALUE_MASK) >> MAC_PCU_FILTER_OFDM_CNT_VALUE_LSB)
+#define MAC_PCU_FILTER_OFDM_CNT_VALUE_SET(x) (((x) << MAC_PCU_FILTER_OFDM_CNT_VALUE_LSB) & MAC_PCU_FILTER_OFDM_CNT_VALUE_MASK)
+
+#define MAC_PCU_FILTER_CCK_CNT_ADDRESS 0x000080c8
+#define MAC_PCU_FILTER_CCK_CNT_OFFSET 0x000000c8
+#define MAC_PCU_FILTER_CCK_CNT_VALUE_MSB 23
+#define MAC_PCU_FILTER_CCK_CNT_VALUE_LSB 0
+#define MAC_PCU_FILTER_CCK_CNT_VALUE_MASK 0x00ffffff
+#define MAC_PCU_FILTER_CCK_CNT_VALUE_GET(x) (((x) & MAC_PCU_FILTER_CCK_CNT_VALUE_MASK) >> MAC_PCU_FILTER_CCK_CNT_VALUE_LSB)
+#define MAC_PCU_FILTER_CCK_CNT_VALUE_SET(x) (((x) << MAC_PCU_FILTER_CCK_CNT_VALUE_LSB) & MAC_PCU_FILTER_CCK_CNT_VALUE_MASK)
+
+#define MAC_PCU_PHY_ERR_CNT_1_ADDRESS 0x000080cc
+#define MAC_PCU_PHY_ERR_CNT_1_OFFSET 0x000000cc
+#define MAC_PCU_PHY_ERR_CNT_1_VALUE_MSB 23
+#define MAC_PCU_PHY_ERR_CNT_1_VALUE_LSB 0
+#define MAC_PCU_PHY_ERR_CNT_1_VALUE_MASK 0x00ffffff
+#define MAC_PCU_PHY_ERR_CNT_1_VALUE_GET(x) (((x) & MAC_PCU_PHY_ERR_CNT_1_VALUE_MASK) >> MAC_PCU_PHY_ERR_CNT_1_VALUE_LSB)
+#define MAC_PCU_PHY_ERR_CNT_1_VALUE_SET(x) (((x) << MAC_PCU_PHY_ERR_CNT_1_VALUE_LSB) & MAC_PCU_PHY_ERR_CNT_1_VALUE_MASK)
+
+#define MAC_PCU_PHY_ERR_CNT_1_MASK_ADDRESS 0x000080d0
+#define MAC_PCU_PHY_ERR_CNT_1_MASK_OFFSET 0x000000d0
+#define MAC_PCU_PHY_ERR_CNT_1_MASK_VALUE_MSB 31
+#define MAC_PCU_PHY_ERR_CNT_1_MASK_VALUE_LSB 0
+#define MAC_PCU_PHY_ERR_CNT_1_MASK_VALUE_MASK 0xffffffff
+#define MAC_PCU_PHY_ERR_CNT_1_MASK_VALUE_GET(x) (((x) & MAC_PCU_PHY_ERR_CNT_1_MASK_VALUE_MASK) >> MAC_PCU_PHY_ERR_CNT_1_MASK_VALUE_LSB)
+#define MAC_PCU_PHY_ERR_CNT_1_MASK_VALUE_SET(x) (((x) << MAC_PCU_PHY_ERR_CNT_1_MASK_VALUE_LSB) & MAC_PCU_PHY_ERR_CNT_1_MASK_VALUE_MASK)
+
+#define MAC_PCU_PHY_ERR_CNT_2_ADDRESS 0x000080d4
+#define MAC_PCU_PHY_ERR_CNT_2_OFFSET 0x000000d4
+#define MAC_PCU_PHY_ERR_CNT_2_VALUE_MSB 23
+#define MAC_PCU_PHY_ERR_CNT_2_VALUE_LSB 0
+#define MAC_PCU_PHY_ERR_CNT_2_VALUE_MASK 0x00ffffff
+#define MAC_PCU_PHY_ERR_CNT_2_VALUE_GET(x) (((x) & MAC_PCU_PHY_ERR_CNT_2_VALUE_MASK) >> MAC_PCU_PHY_ERR_CNT_2_VALUE_LSB)
+#define MAC_PCU_PHY_ERR_CNT_2_VALUE_SET(x) (((x) << MAC_PCU_PHY_ERR_CNT_2_VALUE_LSB) & MAC_PCU_PHY_ERR_CNT_2_VALUE_MASK)
+
+#define MAC_PCU_PHY_ERR_CNT_2_MASK_ADDRESS 0x000080d8
+#define MAC_PCU_PHY_ERR_CNT_2_MASK_OFFSET 0x000000d8
+#define MAC_PCU_PHY_ERR_CNT_2_MASK_VALUE_MSB 31
+#define MAC_PCU_PHY_ERR_CNT_2_MASK_VALUE_LSB 0
+#define MAC_PCU_PHY_ERR_CNT_2_MASK_VALUE_MASK 0xffffffff
+#define MAC_PCU_PHY_ERR_CNT_2_MASK_VALUE_GET(x) (((x) & MAC_PCU_PHY_ERR_CNT_2_MASK_VALUE_MASK) >> MAC_PCU_PHY_ERR_CNT_2_MASK_VALUE_LSB)
+#define MAC_PCU_PHY_ERR_CNT_2_MASK_VALUE_SET(x) (((x) << MAC_PCU_PHY_ERR_CNT_2_MASK_VALUE_LSB) & MAC_PCU_PHY_ERR_CNT_2_MASK_VALUE_MASK)
+
+#define MAC_PCU_TSF_THRESHOLD_ADDRESS 0x000080dc
+#define MAC_PCU_TSF_THRESHOLD_OFFSET 0x000000dc
+#define MAC_PCU_TSF_THRESHOLD_VALUE_MSB 15
+#define MAC_PCU_TSF_THRESHOLD_VALUE_LSB 0
+#define MAC_PCU_TSF_THRESHOLD_VALUE_MASK 0x0000ffff
+#define MAC_PCU_TSF_THRESHOLD_VALUE_GET(x) (((x) & MAC_PCU_TSF_THRESHOLD_VALUE_MASK) >> MAC_PCU_TSF_THRESHOLD_VALUE_LSB)
+#define MAC_PCU_TSF_THRESHOLD_VALUE_SET(x) (((x) << MAC_PCU_TSF_THRESHOLD_VALUE_LSB) & MAC_PCU_TSF_THRESHOLD_VALUE_MASK)
+
+#define MAC_PCU_PHY_ERROR_EIFS_MASK_ADDRESS 0x000080e0
+#define MAC_PCU_PHY_ERROR_EIFS_MASK_OFFSET 0x000000e0
+#define MAC_PCU_PHY_ERROR_EIFS_MASK_VALUE_MSB 31
+#define MAC_PCU_PHY_ERROR_EIFS_MASK_VALUE_LSB 0
+#define MAC_PCU_PHY_ERROR_EIFS_MASK_VALUE_MASK 0xffffffff
+#define MAC_PCU_PHY_ERROR_EIFS_MASK_VALUE_GET(x) (((x) & MAC_PCU_PHY_ERROR_EIFS_MASK_VALUE_MASK) >> MAC_PCU_PHY_ERROR_EIFS_MASK_VALUE_LSB)
+#define MAC_PCU_PHY_ERROR_EIFS_MASK_VALUE_SET(x) (((x) << MAC_PCU_PHY_ERROR_EIFS_MASK_VALUE_LSB) & MAC_PCU_PHY_ERROR_EIFS_MASK_VALUE_MASK)
+
+#define MAC_PCU_PHY_ERR_CNT_3_ADDRESS 0x000080e4
+#define MAC_PCU_PHY_ERR_CNT_3_OFFSET 0x000000e4
+#define MAC_PCU_PHY_ERR_CNT_3_VALUE_MSB 23
+#define MAC_PCU_PHY_ERR_CNT_3_VALUE_LSB 0
+#define MAC_PCU_PHY_ERR_CNT_3_VALUE_MASK 0x00ffffff
+#define MAC_PCU_PHY_ERR_CNT_3_VALUE_GET(x) (((x) & MAC_PCU_PHY_ERR_CNT_3_VALUE_MASK) >> MAC_PCU_PHY_ERR_CNT_3_VALUE_LSB)
+#define MAC_PCU_PHY_ERR_CNT_3_VALUE_SET(x) (((x) << MAC_PCU_PHY_ERR_CNT_3_VALUE_LSB) & MAC_PCU_PHY_ERR_CNT_3_VALUE_MASK)
+
+#define MAC_PCU_PHY_ERR_CNT_3_MASK_ADDRESS 0x000080e8
+#define MAC_PCU_PHY_ERR_CNT_3_MASK_OFFSET 0x000000e8
+#define MAC_PCU_PHY_ERR_CNT_3_MASK_VALUE_MSB 31
+#define MAC_PCU_PHY_ERR_CNT_3_MASK_VALUE_LSB 0
+#define MAC_PCU_PHY_ERR_CNT_3_MASK_VALUE_MASK 0xffffffff
+#define MAC_PCU_PHY_ERR_CNT_3_MASK_VALUE_GET(x) (((x) & MAC_PCU_PHY_ERR_CNT_3_MASK_VALUE_MASK) >> MAC_PCU_PHY_ERR_CNT_3_MASK_VALUE_LSB)
+#define MAC_PCU_PHY_ERR_CNT_3_MASK_VALUE_SET(x) (((x) << MAC_PCU_PHY_ERR_CNT_3_MASK_VALUE_LSB) & MAC_PCU_PHY_ERR_CNT_3_MASK_VALUE_MASK)
+
+#define MAC_PCU_BLUETOOTH_MODE_ADDRESS 0x000080ec
+#define MAC_PCU_BLUETOOTH_MODE_OFFSET 0x000000ec
+#define MAC_PCU_BLUETOOTH_MODE_FIRST_SLOT_TIME_MSB 31
+#define MAC_PCU_BLUETOOTH_MODE_FIRST_SLOT_TIME_LSB 24
+#define MAC_PCU_BLUETOOTH_MODE_FIRST_SLOT_TIME_MASK 0xff000000
+#define MAC_PCU_BLUETOOTH_MODE_FIRST_SLOT_TIME_GET(x) (((x) & MAC_PCU_BLUETOOTH_MODE_FIRST_SLOT_TIME_MASK) >> MAC_PCU_BLUETOOTH_MODE_FIRST_SLOT_TIME_LSB)
+#define MAC_PCU_BLUETOOTH_MODE_FIRST_SLOT_TIME_SET(x) (((x) << MAC_PCU_BLUETOOTH_MODE_FIRST_SLOT_TIME_LSB) & MAC_PCU_BLUETOOTH_MODE_FIRST_SLOT_TIME_MASK)
+#define MAC_PCU_BLUETOOTH_MODE_PRIORITY_TIME_MSB 23
+#define MAC_PCU_BLUETOOTH_MODE_PRIORITY_TIME_LSB 18
+#define MAC_PCU_BLUETOOTH_MODE_PRIORITY_TIME_MASK 0x00fc0000
+#define MAC_PCU_BLUETOOTH_MODE_PRIORITY_TIME_GET(x) (((x) & MAC_PCU_BLUETOOTH_MODE_PRIORITY_TIME_MASK) >> MAC_PCU_BLUETOOTH_MODE_PRIORITY_TIME_LSB)
+#define MAC_PCU_BLUETOOTH_MODE_PRIORITY_TIME_SET(x) (((x) << MAC_PCU_BLUETOOTH_MODE_PRIORITY_TIME_LSB) & MAC_PCU_BLUETOOTH_MODE_PRIORITY_TIME_MASK)
+#define MAC_PCU_BLUETOOTH_MODE_RX_CLEAR_POLARITY_MSB 17
+#define MAC_PCU_BLUETOOTH_MODE_RX_CLEAR_POLARITY_LSB 17
+#define MAC_PCU_BLUETOOTH_MODE_RX_CLEAR_POLARITY_MASK 0x00020000
+#define MAC_PCU_BLUETOOTH_MODE_RX_CLEAR_POLARITY_GET(x) (((x) & MAC_PCU_BLUETOOTH_MODE_RX_CLEAR_POLARITY_MASK) >> MAC_PCU_BLUETOOTH_MODE_RX_CLEAR_POLARITY_LSB)
+#define MAC_PCU_BLUETOOTH_MODE_RX_CLEAR_POLARITY_SET(x) (((x) << MAC_PCU_BLUETOOTH_MODE_RX_CLEAR_POLARITY_LSB) & MAC_PCU_BLUETOOTH_MODE_RX_CLEAR_POLARITY_MASK)
+#define MAC_PCU_BLUETOOTH_MODE_QCU_THRESH_MSB 16
+#define MAC_PCU_BLUETOOTH_MODE_QCU_THRESH_LSB 13
+#define MAC_PCU_BLUETOOTH_MODE_QCU_THRESH_MASK 0x0001e000
+#define MAC_PCU_BLUETOOTH_MODE_QCU_THRESH_GET(x) (((x) & MAC_PCU_BLUETOOTH_MODE_QCU_THRESH_MASK) >> MAC_PCU_BLUETOOTH_MODE_QCU_THRESH_LSB)
+#define MAC_PCU_BLUETOOTH_MODE_QCU_THRESH_SET(x) (((x) << MAC_PCU_BLUETOOTH_MODE_QCU_THRESH_LSB) & MAC_PCU_BLUETOOTH_MODE_QCU_THRESH_MASK)
+#define MAC_PCU_BLUETOOTH_MODE_QUIET_MSB 12
+#define MAC_PCU_BLUETOOTH_MODE_QUIET_LSB 12
+#define MAC_PCU_BLUETOOTH_MODE_QUIET_MASK 0x00001000
+#define MAC_PCU_BLUETOOTH_MODE_QUIET_GET(x) (((x) & MAC_PCU_BLUETOOTH_MODE_QUIET_MASK) >> MAC_PCU_BLUETOOTH_MODE_QUIET_LSB)
+#define MAC_PCU_BLUETOOTH_MODE_QUIET_SET(x) (((x) << MAC_PCU_BLUETOOTH_MODE_QUIET_LSB) & MAC_PCU_BLUETOOTH_MODE_QUIET_MASK)
+#define MAC_PCU_BLUETOOTH_MODE_MODE_MSB 11
+#define MAC_PCU_BLUETOOTH_MODE_MODE_LSB 10
+#define MAC_PCU_BLUETOOTH_MODE_MODE_MASK 0x00000c00
+#define MAC_PCU_BLUETOOTH_MODE_MODE_GET(x) (((x) & MAC_PCU_BLUETOOTH_MODE_MODE_MASK) >> MAC_PCU_BLUETOOTH_MODE_MODE_LSB)
+#define MAC_PCU_BLUETOOTH_MODE_MODE_SET(x) (((x) << MAC_PCU_BLUETOOTH_MODE_MODE_LSB) & MAC_PCU_BLUETOOTH_MODE_MODE_MASK)
+#define MAC_PCU_BLUETOOTH_MODE_TX_FRAME_EXTEND_MSB 9
+#define MAC_PCU_BLUETOOTH_MODE_TX_FRAME_EXTEND_LSB 9
+#define MAC_PCU_BLUETOOTH_MODE_TX_FRAME_EXTEND_MASK 0x00000200
+#define MAC_PCU_BLUETOOTH_MODE_TX_FRAME_EXTEND_GET(x) (((x) & MAC_PCU_BLUETOOTH_MODE_TX_FRAME_EXTEND_MASK) >> MAC_PCU_BLUETOOTH_MODE_TX_FRAME_EXTEND_LSB)
+#define MAC_PCU_BLUETOOTH_MODE_TX_FRAME_EXTEND_SET(x) (((x) << MAC_PCU_BLUETOOTH_MODE_TX_FRAME_EXTEND_LSB) & MAC_PCU_BLUETOOTH_MODE_TX_FRAME_EXTEND_MASK)
+#define MAC_PCU_BLUETOOTH_MODE_TX_STATE_EXTEND_MSB 8
+#define MAC_PCU_BLUETOOTH_MODE_TX_STATE_EXTEND_LSB 8
+#define MAC_PCU_BLUETOOTH_MODE_TX_STATE_EXTEND_MASK 0x00000100
+#define MAC_PCU_BLUETOOTH_MODE_TX_STATE_EXTEND_GET(x) (((x) & MAC_PCU_BLUETOOTH_MODE_TX_STATE_EXTEND_MASK) >> MAC_PCU_BLUETOOTH_MODE_TX_STATE_EXTEND_LSB)
+#define MAC_PCU_BLUETOOTH_MODE_TX_STATE_EXTEND_SET(x) (((x) << MAC_PCU_BLUETOOTH_MODE_TX_STATE_EXTEND_LSB) & MAC_PCU_BLUETOOTH_MODE_TX_STATE_EXTEND_MASK)
+#define MAC_PCU_BLUETOOTH_MODE_TIME_EXTEND_MSB 7
+#define MAC_PCU_BLUETOOTH_MODE_TIME_EXTEND_LSB 0
+#define MAC_PCU_BLUETOOTH_MODE_TIME_EXTEND_MASK 0x000000ff
+#define MAC_PCU_BLUETOOTH_MODE_TIME_EXTEND_GET(x) (((x) & MAC_PCU_BLUETOOTH_MODE_TIME_EXTEND_MASK) >> MAC_PCU_BLUETOOTH_MODE_TIME_EXTEND_LSB)
+#define MAC_PCU_BLUETOOTH_MODE_TIME_EXTEND_SET(x) (((x) << MAC_PCU_BLUETOOTH_MODE_TIME_EXTEND_LSB) & MAC_PCU_BLUETOOTH_MODE_TIME_EXTEND_MASK)
+
+#define MAC_PCU_BLUETOOTH_WEIGHTS_ADDRESS 0x000080f0
+#define MAC_PCU_BLUETOOTH_WEIGHTS_OFFSET 0x000000f0
+#define MAC_PCU_BLUETOOTH_WEIGHTS_WL_WEIGHT_MSB 31
+#define MAC_PCU_BLUETOOTH_WEIGHTS_WL_WEIGHT_LSB 16
+#define MAC_PCU_BLUETOOTH_WEIGHTS_WL_WEIGHT_MASK 0xffff0000
+#define MAC_PCU_BLUETOOTH_WEIGHTS_WL_WEIGHT_GET(x) (((x) & MAC_PCU_BLUETOOTH_WEIGHTS_WL_WEIGHT_MASK) >> MAC_PCU_BLUETOOTH_WEIGHTS_WL_WEIGHT_LSB)
+#define MAC_PCU_BLUETOOTH_WEIGHTS_WL_WEIGHT_SET(x) (((x) << MAC_PCU_BLUETOOTH_WEIGHTS_WL_WEIGHT_LSB) & MAC_PCU_BLUETOOTH_WEIGHTS_WL_WEIGHT_MASK)
+#define MAC_PCU_BLUETOOTH_WEIGHTS_BT_WEIGHT_MSB 15
+#define MAC_PCU_BLUETOOTH_WEIGHTS_BT_WEIGHT_LSB 0
+#define MAC_PCU_BLUETOOTH_WEIGHTS_BT_WEIGHT_MASK 0x0000ffff
+#define MAC_PCU_BLUETOOTH_WEIGHTS_BT_WEIGHT_GET(x) (((x) & MAC_PCU_BLUETOOTH_WEIGHTS_BT_WEIGHT_MASK) >> MAC_PCU_BLUETOOTH_WEIGHTS_BT_WEIGHT_LSB)
+#define MAC_PCU_BLUETOOTH_WEIGHTS_BT_WEIGHT_SET(x) (((x) << MAC_PCU_BLUETOOTH_WEIGHTS_BT_WEIGHT_LSB) & MAC_PCU_BLUETOOTH_WEIGHTS_BT_WEIGHT_MASK)
+
+#define MAC_PCU_BLUETOOTH_MODE2_ADDRESS 0x000080f4
+#define MAC_PCU_BLUETOOTH_MODE2_OFFSET 0x000000f4
+#define MAC_PCU_BLUETOOTH_MODE2_PHY_ERR_BT_COLL_ENABLE_MSB 31
+#define MAC_PCU_BLUETOOTH_MODE2_PHY_ERR_BT_COLL_ENABLE_LSB 31
+#define MAC_PCU_BLUETOOTH_MODE2_PHY_ERR_BT_COLL_ENABLE_MASK 0x80000000
+#define MAC_PCU_BLUETOOTH_MODE2_PHY_ERR_BT_COLL_ENABLE_GET(x) (((x) & MAC_PCU_BLUETOOTH_MODE2_PHY_ERR_BT_COLL_ENABLE_MASK) >> MAC_PCU_BLUETOOTH_MODE2_PHY_ERR_BT_COLL_ENABLE_LSB)
+#define MAC_PCU_BLUETOOTH_MODE2_PHY_ERR_BT_COLL_ENABLE_SET(x) (((x) << MAC_PCU_BLUETOOTH_MODE2_PHY_ERR_BT_COLL_ENABLE_LSB) & MAC_PCU_BLUETOOTH_MODE2_PHY_ERR_BT_COLL_ENABLE_MASK)
+#define MAC_PCU_BLUETOOTH_MODE2_INTERRUPT_ENABLE_MSB 30
+#define MAC_PCU_BLUETOOTH_MODE2_INTERRUPT_ENABLE_LSB 30
+#define MAC_PCU_BLUETOOTH_MODE2_INTERRUPT_ENABLE_MASK 0x40000000
+#define MAC_PCU_BLUETOOTH_MODE2_INTERRUPT_ENABLE_GET(x) (((x) & MAC_PCU_BLUETOOTH_MODE2_INTERRUPT_ENABLE_MASK) >> MAC_PCU_BLUETOOTH_MODE2_INTERRUPT_ENABLE_LSB)
+#define MAC_PCU_BLUETOOTH_MODE2_INTERRUPT_ENABLE_SET(x) (((x) << MAC_PCU_BLUETOOTH_MODE2_INTERRUPT_ENABLE_LSB) & MAC_PCU_BLUETOOTH_MODE2_INTERRUPT_ENABLE_MASK)
+#define MAC_PCU_BLUETOOTH_MODE2_TSF_BT_PRIORITY_CTRL_MSB 29
+#define MAC_PCU_BLUETOOTH_MODE2_TSF_BT_PRIORITY_CTRL_LSB 28
+#define MAC_PCU_BLUETOOTH_MODE2_TSF_BT_PRIORITY_CTRL_MASK 0x30000000
+#define MAC_PCU_BLUETOOTH_MODE2_TSF_BT_PRIORITY_CTRL_GET(x) (((x) & MAC_PCU_BLUETOOTH_MODE2_TSF_BT_PRIORITY_CTRL_MASK) >> MAC_PCU_BLUETOOTH_MODE2_TSF_BT_PRIORITY_CTRL_LSB)
+#define MAC_PCU_BLUETOOTH_MODE2_TSF_BT_PRIORITY_CTRL_SET(x) (((x) << MAC_PCU_BLUETOOTH_MODE2_TSF_BT_PRIORITY_CTRL_LSB) & MAC_PCU_BLUETOOTH_MODE2_TSF_BT_PRIORITY_CTRL_MASK)
+#define MAC_PCU_BLUETOOTH_MODE2_TSF_BT_ACTIVE_CTRL_MSB 27
+#define MAC_PCU_BLUETOOTH_MODE2_TSF_BT_ACTIVE_CTRL_LSB 26
+#define MAC_PCU_BLUETOOTH_MODE2_TSF_BT_ACTIVE_CTRL_MASK 0x0c000000
+#define MAC_PCU_BLUETOOTH_MODE2_TSF_BT_ACTIVE_CTRL_GET(x) (((x) & MAC_PCU_BLUETOOTH_MODE2_TSF_BT_ACTIVE_CTRL_MASK) >> MAC_PCU_BLUETOOTH_MODE2_TSF_BT_ACTIVE_CTRL_LSB)
+#define MAC_PCU_BLUETOOTH_MODE2_TSF_BT_ACTIVE_CTRL_SET(x) (((x) << MAC_PCU_BLUETOOTH_MODE2_TSF_BT_ACTIVE_CTRL_LSB) & MAC_PCU_BLUETOOTH_MODE2_TSF_BT_ACTIVE_CTRL_MASK)
+#define MAC_PCU_BLUETOOTH_MODE2_RS_DISCARD_EXTEND_MSB 25
+#define MAC_PCU_BLUETOOTH_MODE2_RS_DISCARD_EXTEND_LSB 25
+#define MAC_PCU_BLUETOOTH_MODE2_RS_DISCARD_EXTEND_MASK 0x02000000
+#define MAC_PCU_BLUETOOTH_MODE2_RS_DISCARD_EXTEND_GET(x) (((x) & MAC_PCU_BLUETOOTH_MODE2_RS_DISCARD_EXTEND_MASK) >> MAC_PCU_BLUETOOTH_MODE2_RS_DISCARD_EXTEND_LSB)
+#define MAC_PCU_BLUETOOTH_MODE2_RS_DISCARD_EXTEND_SET(x) (((x) << MAC_PCU_BLUETOOTH_MODE2_RS_DISCARD_EXTEND_LSB) & MAC_PCU_BLUETOOTH_MODE2_RS_DISCARD_EXTEND_MASK)
+#define MAC_PCU_BLUETOOTH_MODE2_WL_TXRX_SEPARATE_MSB 24
+#define MAC_PCU_BLUETOOTH_MODE2_WL_TXRX_SEPARATE_LSB 24
+#define MAC_PCU_BLUETOOTH_MODE2_WL_TXRX_SEPARATE_MASK 0x01000000
+#define MAC_PCU_BLUETOOTH_MODE2_WL_TXRX_SEPARATE_GET(x) (((x) & MAC_PCU_BLUETOOTH_MODE2_WL_TXRX_SEPARATE_MASK) >> MAC_PCU_BLUETOOTH_MODE2_WL_TXRX_SEPARATE_LSB)
+#define MAC_PCU_BLUETOOTH_MODE2_WL_TXRX_SEPARATE_SET(x) (((x) << MAC_PCU_BLUETOOTH_MODE2_WL_TXRX_SEPARATE_LSB) & MAC_PCU_BLUETOOTH_MODE2_WL_TXRX_SEPARATE_MASK)
+#define MAC_PCU_BLUETOOTH_MODE2_WL_ACTIVE_MODE_MSB 23
+#define MAC_PCU_BLUETOOTH_MODE2_WL_ACTIVE_MODE_LSB 22
+#define MAC_PCU_BLUETOOTH_MODE2_WL_ACTIVE_MODE_MASK 0x00c00000
+#define MAC_PCU_BLUETOOTH_MODE2_WL_ACTIVE_MODE_GET(x) (((x) & MAC_PCU_BLUETOOTH_MODE2_WL_ACTIVE_MODE_MASK) >> MAC_PCU_BLUETOOTH_MODE2_WL_ACTIVE_MODE_LSB)
+#define MAC_PCU_BLUETOOTH_MODE2_WL_ACTIVE_MODE_SET(x) (((x) << MAC_PCU_BLUETOOTH_MODE2_WL_ACTIVE_MODE_LSB) & MAC_PCU_BLUETOOTH_MODE2_WL_ACTIVE_MODE_MASK)
+#define MAC_PCU_BLUETOOTH_MODE2_QUIET_2_WIRE_MSB 21
+#define MAC_PCU_BLUETOOTH_MODE2_QUIET_2_WIRE_LSB 21
+#define MAC_PCU_BLUETOOTH_MODE2_QUIET_2_WIRE_MASK 0x00200000
+#define MAC_PCU_BLUETOOTH_MODE2_QUIET_2_WIRE_GET(x) (((x) & MAC_PCU_BLUETOOTH_MODE2_QUIET_2_WIRE_MASK) >> MAC_PCU_BLUETOOTH_MODE2_QUIET_2_WIRE_LSB)
+#define MAC_PCU_BLUETOOTH_MODE2_QUIET_2_WIRE_SET(x) (((x) << MAC_PCU_BLUETOOTH_MODE2_QUIET_2_WIRE_LSB) & MAC_PCU_BLUETOOTH_MODE2_QUIET_2_WIRE_MASK)
+#define MAC_PCU_BLUETOOTH_MODE2_DISABLE_BT_ANT_MSB 20
+#define MAC_PCU_BLUETOOTH_MODE2_DISABLE_BT_ANT_LSB 20
+#define MAC_PCU_BLUETOOTH_MODE2_DISABLE_BT_ANT_MASK 0x00100000
+#define MAC_PCU_BLUETOOTH_MODE2_DISABLE_BT_ANT_GET(x) (((x) & MAC_PCU_BLUETOOTH_MODE2_DISABLE_BT_ANT_MASK) >> MAC_PCU_BLUETOOTH_MODE2_DISABLE_BT_ANT_LSB)
+#define MAC_PCU_BLUETOOTH_MODE2_DISABLE_BT_ANT_SET(x) (((x) << MAC_PCU_BLUETOOTH_MODE2_DISABLE_BT_ANT_LSB) & MAC_PCU_BLUETOOTH_MODE2_DISABLE_BT_ANT_MASK)
+#define MAC_PCU_BLUETOOTH_MODE2_PROTECT_BT_AFTER_WAKEUP_MSB 19
+#define MAC_PCU_BLUETOOTH_MODE2_PROTECT_BT_AFTER_WAKEUP_LSB 19
+#define MAC_PCU_BLUETOOTH_MODE2_PROTECT_BT_AFTER_WAKEUP_MASK 0x00080000
+#define MAC_PCU_BLUETOOTH_MODE2_PROTECT_BT_AFTER_WAKEUP_GET(x) (((x) & MAC_PCU_BLUETOOTH_MODE2_PROTECT_BT_AFTER_WAKEUP_MASK) >> MAC_PCU_BLUETOOTH_MODE2_PROTECT_BT_AFTER_WAKEUP_LSB)
+#define MAC_PCU_BLUETOOTH_MODE2_PROTECT_BT_AFTER_WAKEUP_SET(x) (((x) << MAC_PCU_BLUETOOTH_MODE2_PROTECT_BT_AFTER_WAKEUP_LSB) & MAC_PCU_BLUETOOTH_MODE2_PROTECT_BT_AFTER_WAKEUP_MASK)
+#define MAC_PCU_BLUETOOTH_MODE2_SLEEP_ALLOW_BT_ACCESS_MSB 17
+#define MAC_PCU_BLUETOOTH_MODE2_SLEEP_ALLOW_BT_ACCESS_LSB 17
+#define MAC_PCU_BLUETOOTH_MODE2_SLEEP_ALLOW_BT_ACCESS_MASK 0x00020000
+#define MAC_PCU_BLUETOOTH_MODE2_SLEEP_ALLOW_BT_ACCESS_GET(x) (((x) & MAC_PCU_BLUETOOTH_MODE2_SLEEP_ALLOW_BT_ACCESS_MASK) >> MAC_PCU_BLUETOOTH_MODE2_SLEEP_ALLOW_BT_ACCESS_LSB)
+#define MAC_PCU_BLUETOOTH_MODE2_SLEEP_ALLOW_BT_ACCESS_SET(x) (((x) << MAC_PCU_BLUETOOTH_MODE2_SLEEP_ALLOW_BT_ACCESS_LSB) & MAC_PCU_BLUETOOTH_MODE2_SLEEP_ALLOW_BT_ACCESS_MASK)
+#define MAC_PCU_BLUETOOTH_MODE2_HOLD_RX_CLEAR_MSB 16
+#define MAC_PCU_BLUETOOTH_MODE2_HOLD_RX_CLEAR_LSB 16
+#define MAC_PCU_BLUETOOTH_MODE2_HOLD_RX_CLEAR_MASK 0x00010000
+#define MAC_PCU_BLUETOOTH_MODE2_HOLD_RX_CLEAR_GET(x) (((x) & MAC_PCU_BLUETOOTH_MODE2_HOLD_RX_CLEAR_MASK) >> MAC_PCU_BLUETOOTH_MODE2_HOLD_RX_CLEAR_LSB)
+#define MAC_PCU_BLUETOOTH_MODE2_HOLD_RX_CLEAR_SET(x) (((x) << MAC_PCU_BLUETOOTH_MODE2_HOLD_RX_CLEAR_LSB) & MAC_PCU_BLUETOOTH_MODE2_HOLD_RX_CLEAR_MASK)
+#define MAC_PCU_BLUETOOTH_MODE2_BCN_MISS_CNT_MSB 15
+#define MAC_PCU_BLUETOOTH_MODE2_BCN_MISS_CNT_LSB 8
+#define MAC_PCU_BLUETOOTH_MODE2_BCN_MISS_CNT_MASK 0x0000ff00
+#define MAC_PCU_BLUETOOTH_MODE2_BCN_MISS_CNT_GET(x) (((x) & MAC_PCU_BLUETOOTH_MODE2_BCN_MISS_CNT_MASK) >> MAC_PCU_BLUETOOTH_MODE2_BCN_MISS_CNT_LSB)
+#define MAC_PCU_BLUETOOTH_MODE2_BCN_MISS_CNT_SET(x) (((x) << MAC_PCU_BLUETOOTH_MODE2_BCN_MISS_CNT_LSB) & MAC_PCU_BLUETOOTH_MODE2_BCN_MISS_CNT_MASK)
+#define MAC_PCU_BLUETOOTH_MODE2_BCN_MISS_THRESH_MSB 7
+#define MAC_PCU_BLUETOOTH_MODE2_BCN_MISS_THRESH_LSB 0
+#define MAC_PCU_BLUETOOTH_MODE2_BCN_MISS_THRESH_MASK 0x000000ff
+#define MAC_PCU_BLUETOOTH_MODE2_BCN_MISS_THRESH_GET(x) (((x) & MAC_PCU_BLUETOOTH_MODE2_BCN_MISS_THRESH_MASK) >> MAC_PCU_BLUETOOTH_MODE2_BCN_MISS_THRESH_LSB)
+#define MAC_PCU_BLUETOOTH_MODE2_BCN_MISS_THRESH_SET(x) (((x) << MAC_PCU_BLUETOOTH_MODE2_BCN_MISS_THRESH_LSB) & MAC_PCU_BLUETOOTH_MODE2_BCN_MISS_THRESH_MASK)
+
+#define MAC_PCU_TXSIFS_ADDRESS 0x000080f8
+#define MAC_PCU_TXSIFS_OFFSET 0x000000f8
+#define MAC_PCU_TXSIFS_ACK_SHIFT_MSB 14
+#define MAC_PCU_TXSIFS_ACK_SHIFT_LSB 12
+#define MAC_PCU_TXSIFS_ACK_SHIFT_MASK 0x00007000
+#define MAC_PCU_TXSIFS_ACK_SHIFT_GET(x) (((x) & MAC_PCU_TXSIFS_ACK_SHIFT_MASK) >> MAC_PCU_TXSIFS_ACK_SHIFT_LSB)
+#define MAC_PCU_TXSIFS_ACK_SHIFT_SET(x) (((x) << MAC_PCU_TXSIFS_ACK_SHIFT_LSB) & MAC_PCU_TXSIFS_ACK_SHIFT_MASK)
+#define MAC_PCU_TXSIFS_TX_LATENCY_MSB 11
+#define MAC_PCU_TXSIFS_TX_LATENCY_LSB 8
+#define MAC_PCU_TXSIFS_TX_LATENCY_MASK 0x00000f00
+#define MAC_PCU_TXSIFS_TX_LATENCY_GET(x) (((x) & MAC_PCU_TXSIFS_TX_LATENCY_MASK) >> MAC_PCU_TXSIFS_TX_LATENCY_LSB)
+#define MAC_PCU_TXSIFS_TX_LATENCY_SET(x) (((x) << MAC_PCU_TXSIFS_TX_LATENCY_LSB) & MAC_PCU_TXSIFS_TX_LATENCY_MASK)
+#define MAC_PCU_TXSIFS_SIFS_TIME_MSB 7
+#define MAC_PCU_TXSIFS_SIFS_TIME_LSB 0
+#define MAC_PCU_TXSIFS_SIFS_TIME_MASK 0x000000ff
+#define MAC_PCU_TXSIFS_SIFS_TIME_GET(x) (((x) & MAC_PCU_TXSIFS_SIFS_TIME_MASK) >> MAC_PCU_TXSIFS_SIFS_TIME_LSB)
+#define MAC_PCU_TXSIFS_SIFS_TIME_SET(x) (((x) << MAC_PCU_TXSIFS_SIFS_TIME_LSB) & MAC_PCU_TXSIFS_SIFS_TIME_MASK)
+
+#define MAC_PCU_TXOP_X_ADDRESS 0x000080fc
+#define MAC_PCU_TXOP_X_OFFSET 0x000000fc
+#define MAC_PCU_TXOP_X_VALUE_MSB 7
+#define MAC_PCU_TXOP_X_VALUE_LSB 0
+#define MAC_PCU_TXOP_X_VALUE_MASK 0x000000ff
+#define MAC_PCU_TXOP_X_VALUE_GET(x) (((x) & MAC_PCU_TXOP_X_VALUE_MASK) >> MAC_PCU_TXOP_X_VALUE_LSB)
+#define MAC_PCU_TXOP_X_VALUE_SET(x) (((x) << MAC_PCU_TXOP_X_VALUE_LSB) & MAC_PCU_TXOP_X_VALUE_MASK)
+
+#define MAC_PCU_TXOP_0_3_ADDRESS 0x00008100
+#define MAC_PCU_TXOP_0_3_OFFSET 0x00000100
+#define MAC_PCU_TXOP_0_3_VALUE_3_MSB 31
+#define MAC_PCU_TXOP_0_3_VALUE_3_LSB 24
+#define MAC_PCU_TXOP_0_3_VALUE_3_MASK 0xff000000
+#define MAC_PCU_TXOP_0_3_VALUE_3_GET(x) (((x) & MAC_PCU_TXOP_0_3_VALUE_3_MASK) >> MAC_PCU_TXOP_0_3_VALUE_3_LSB)
+#define MAC_PCU_TXOP_0_3_VALUE_3_SET(x) (((x) << MAC_PCU_TXOP_0_3_VALUE_3_LSB) & MAC_PCU_TXOP_0_3_VALUE_3_MASK)
+#define MAC_PCU_TXOP_0_3_VALUE_2_MSB 23
+#define MAC_PCU_TXOP_0_3_VALUE_2_LSB 16
+#define MAC_PCU_TXOP_0_3_VALUE_2_MASK 0x00ff0000
+#define MAC_PCU_TXOP_0_3_VALUE_2_GET(x) (((x) & MAC_PCU_TXOP_0_3_VALUE_2_MASK) >> MAC_PCU_TXOP_0_3_VALUE_2_LSB)
+#define MAC_PCU_TXOP_0_3_VALUE_2_SET(x) (((x) << MAC_PCU_TXOP_0_3_VALUE_2_LSB) & MAC_PCU_TXOP_0_3_VALUE_2_MASK)
+#define MAC_PCU_TXOP_0_3_VALUE_1_MSB 15
+#define MAC_PCU_TXOP_0_3_VALUE_1_LSB 8
+#define MAC_PCU_TXOP_0_3_VALUE_1_MASK 0x0000ff00
+#define MAC_PCU_TXOP_0_3_VALUE_1_GET(x) (((x) & MAC_PCU_TXOP_0_3_VALUE_1_MASK) >> MAC_PCU_TXOP_0_3_VALUE_1_LSB)
+#define MAC_PCU_TXOP_0_3_VALUE_1_SET(x) (((x) << MAC_PCU_TXOP_0_3_VALUE_1_LSB) & MAC_PCU_TXOP_0_3_VALUE_1_MASK)
+#define MAC_PCU_TXOP_0_3_VALUE_0_MSB 7
+#define MAC_PCU_TXOP_0_3_VALUE_0_LSB 0
+#define MAC_PCU_TXOP_0_3_VALUE_0_MASK 0x000000ff
+#define MAC_PCU_TXOP_0_3_VALUE_0_GET(x) (((x) & MAC_PCU_TXOP_0_3_VALUE_0_MASK) >> MAC_PCU_TXOP_0_3_VALUE_0_LSB)
+#define MAC_PCU_TXOP_0_3_VALUE_0_SET(x) (((x) << MAC_PCU_TXOP_0_3_VALUE_0_LSB) & MAC_PCU_TXOP_0_3_VALUE_0_MASK)
+
+#define MAC_PCU_TXOP_4_7_ADDRESS 0x00008104
+#define MAC_PCU_TXOP_4_7_OFFSET 0x00000104
+#define MAC_PCU_TXOP_4_7_VALUE_7_MSB 31
+#define MAC_PCU_TXOP_4_7_VALUE_7_LSB 24
+#define MAC_PCU_TXOP_4_7_VALUE_7_MASK 0xff000000
+#define MAC_PCU_TXOP_4_7_VALUE_7_GET(x) (((x) & MAC_PCU_TXOP_4_7_VALUE_7_MASK) >> MAC_PCU_TXOP_4_7_VALUE_7_LSB)
+#define MAC_PCU_TXOP_4_7_VALUE_7_SET(x) (((x) << MAC_PCU_TXOP_4_7_VALUE_7_LSB) & MAC_PCU_TXOP_4_7_VALUE_7_MASK)
+#define MAC_PCU_TXOP_4_7_VALUE_6_MSB 23
+#define MAC_PCU_TXOP_4_7_VALUE_6_LSB 16
+#define MAC_PCU_TXOP_4_7_VALUE_6_MASK 0x00ff0000
+#define MAC_PCU_TXOP_4_7_VALUE_6_GET(x) (((x) & MAC_PCU_TXOP_4_7_VALUE_6_MASK) >> MAC_PCU_TXOP_4_7_VALUE_6_LSB)
+#define MAC_PCU_TXOP_4_7_VALUE_6_SET(x) (((x) << MAC_PCU_TXOP_4_7_VALUE_6_LSB) & MAC_PCU_TXOP_4_7_VALUE_6_MASK)
+#define MAC_PCU_TXOP_4_7_VALUE_5_MSB 15
+#define MAC_PCU_TXOP_4_7_VALUE_5_LSB 8
+#define MAC_PCU_TXOP_4_7_VALUE_5_MASK 0x0000ff00
+#define MAC_PCU_TXOP_4_7_VALUE_5_GET(x) (((x) & MAC_PCU_TXOP_4_7_VALUE_5_MASK) >> MAC_PCU_TXOP_4_7_VALUE_5_LSB)
+#define MAC_PCU_TXOP_4_7_VALUE_5_SET(x) (((x) << MAC_PCU_TXOP_4_7_VALUE_5_LSB) & MAC_PCU_TXOP_4_7_VALUE_5_MASK)
+#define MAC_PCU_TXOP_4_7_VALUE_4_MSB 7
+#define MAC_PCU_TXOP_4_7_VALUE_4_LSB 0
+#define MAC_PCU_TXOP_4_7_VALUE_4_MASK 0x000000ff
+#define MAC_PCU_TXOP_4_7_VALUE_4_GET(x) (((x) & MAC_PCU_TXOP_4_7_VALUE_4_MASK) >> MAC_PCU_TXOP_4_7_VALUE_4_LSB)
+#define MAC_PCU_TXOP_4_7_VALUE_4_SET(x) (((x) << MAC_PCU_TXOP_4_7_VALUE_4_LSB) & MAC_PCU_TXOP_4_7_VALUE_4_MASK)
+
+#define MAC_PCU_TXOP_8_11_ADDRESS 0x00008108
+#define MAC_PCU_TXOP_8_11_OFFSET 0x00000108
+#define MAC_PCU_TXOP_8_11_VALUE_11_MSB 31
+#define MAC_PCU_TXOP_8_11_VALUE_11_LSB 24
+#define MAC_PCU_TXOP_8_11_VALUE_11_MASK 0xff000000
+#define MAC_PCU_TXOP_8_11_VALUE_11_GET(x) (((x) & MAC_PCU_TXOP_8_11_VALUE_11_MASK) >> MAC_PCU_TXOP_8_11_VALUE_11_LSB)
+#define MAC_PCU_TXOP_8_11_VALUE_11_SET(x) (((x) << MAC_PCU_TXOP_8_11_VALUE_11_LSB) & MAC_PCU_TXOP_8_11_VALUE_11_MASK)
+#define MAC_PCU_TXOP_8_11_VALUE_10_MSB 23
+#define MAC_PCU_TXOP_8_11_VALUE_10_LSB 16
+#define MAC_PCU_TXOP_8_11_VALUE_10_MASK 0x00ff0000
+#define MAC_PCU_TXOP_8_11_VALUE_10_GET(x) (((x) & MAC_PCU_TXOP_8_11_VALUE_10_MASK) >> MAC_PCU_TXOP_8_11_VALUE_10_LSB)
+#define MAC_PCU_TXOP_8_11_VALUE_10_SET(x) (((x) << MAC_PCU_TXOP_8_11_VALUE_10_LSB) & MAC_PCU_TXOP_8_11_VALUE_10_MASK)
+#define MAC_PCU_TXOP_8_11_VALUE_9_MSB 15
+#define MAC_PCU_TXOP_8_11_VALUE_9_LSB 8
+#define MAC_PCU_TXOP_8_11_VALUE_9_MASK 0x0000ff00
+#define MAC_PCU_TXOP_8_11_VALUE_9_GET(x) (((x) & MAC_PCU_TXOP_8_11_VALUE_9_MASK) >> MAC_PCU_TXOP_8_11_VALUE_9_LSB)
+#define MAC_PCU_TXOP_8_11_VALUE_9_SET(x) (((x) << MAC_PCU_TXOP_8_11_VALUE_9_LSB) & MAC_PCU_TXOP_8_11_VALUE_9_MASK)
+#define MAC_PCU_TXOP_8_11_VALUE_8_MSB 7
+#define MAC_PCU_TXOP_8_11_VALUE_8_LSB 0
+#define MAC_PCU_TXOP_8_11_VALUE_8_MASK 0x000000ff
+#define MAC_PCU_TXOP_8_11_VALUE_8_GET(x) (((x) & MAC_PCU_TXOP_8_11_VALUE_8_MASK) >> MAC_PCU_TXOP_8_11_VALUE_8_LSB)
+#define MAC_PCU_TXOP_8_11_VALUE_8_SET(x) (((x) << MAC_PCU_TXOP_8_11_VALUE_8_LSB) & MAC_PCU_TXOP_8_11_VALUE_8_MASK)
+
+#define MAC_PCU_TXOP_12_15_ADDRESS 0x0000810c
+#define MAC_PCU_TXOP_12_15_OFFSET 0x0000010c
+#define MAC_PCU_TXOP_12_15_VALUE_15_MSB 31
+#define MAC_PCU_TXOP_12_15_VALUE_15_LSB 24
+#define MAC_PCU_TXOP_12_15_VALUE_15_MASK 0xff000000
+#define MAC_PCU_TXOP_12_15_VALUE_15_GET(x) (((x) & MAC_PCU_TXOP_12_15_VALUE_15_MASK) >> MAC_PCU_TXOP_12_15_VALUE_15_LSB)
+#define MAC_PCU_TXOP_12_15_VALUE_15_SET(x) (((x) << MAC_PCU_TXOP_12_15_VALUE_15_LSB) & MAC_PCU_TXOP_12_15_VALUE_15_MASK)
+#define MAC_PCU_TXOP_12_15_VALUE_14_MSB 23
+#define MAC_PCU_TXOP_12_15_VALUE_14_LSB 16
+#define MAC_PCU_TXOP_12_15_VALUE_14_MASK 0x00ff0000
+#define MAC_PCU_TXOP_12_15_VALUE_14_GET(x) (((x) & MAC_PCU_TXOP_12_15_VALUE_14_MASK) >> MAC_PCU_TXOP_12_15_VALUE_14_LSB)
+#define MAC_PCU_TXOP_12_15_VALUE_14_SET(x) (((x) << MAC_PCU_TXOP_12_15_VALUE_14_LSB) & MAC_PCU_TXOP_12_15_VALUE_14_MASK)
+#define MAC_PCU_TXOP_12_15_VALUE_13_MSB 15
+#define MAC_PCU_TXOP_12_15_VALUE_13_LSB 8
+#define MAC_PCU_TXOP_12_15_VALUE_13_MASK 0x0000ff00
+#define MAC_PCU_TXOP_12_15_VALUE_13_GET(x) (((x) & MAC_PCU_TXOP_12_15_VALUE_13_MASK) >> MAC_PCU_TXOP_12_15_VALUE_13_LSB)
+#define MAC_PCU_TXOP_12_15_VALUE_13_SET(x) (((x) << MAC_PCU_TXOP_12_15_VALUE_13_LSB) & MAC_PCU_TXOP_12_15_VALUE_13_MASK)
+#define MAC_PCU_TXOP_12_15_VALUE_12_MSB 7
+#define MAC_PCU_TXOP_12_15_VALUE_12_LSB 0
+#define MAC_PCU_TXOP_12_15_VALUE_12_MASK 0x000000ff
+#define MAC_PCU_TXOP_12_15_VALUE_12_GET(x) (((x) & MAC_PCU_TXOP_12_15_VALUE_12_MASK) >> MAC_PCU_TXOP_12_15_VALUE_12_LSB)
+#define MAC_PCU_TXOP_12_15_VALUE_12_SET(x) (((x) << MAC_PCU_TXOP_12_15_VALUE_12_LSB) & MAC_PCU_TXOP_12_15_VALUE_12_MASK)
+
+#define MAC_PCU_LOGIC_ANALYZER_ADDRESS 0x00008110
+#define MAC_PCU_LOGIC_ANALYZER_OFFSET 0x00000110
+#define MAC_PCU_LOGIC_ANALYZER_DIAG_MODE_MSB 31
+#define MAC_PCU_LOGIC_ANALYZER_DIAG_MODE_LSB 18
+#define MAC_PCU_LOGIC_ANALYZER_DIAG_MODE_MASK 0xfffc0000
+#define MAC_PCU_LOGIC_ANALYZER_DIAG_MODE_GET(x) (((x) & MAC_PCU_LOGIC_ANALYZER_DIAG_MODE_MASK) >> MAC_PCU_LOGIC_ANALYZER_DIAG_MODE_LSB)
+#define MAC_PCU_LOGIC_ANALYZER_DIAG_MODE_SET(x) (((x) << MAC_PCU_LOGIC_ANALYZER_DIAG_MODE_LSB) & MAC_PCU_LOGIC_ANALYZER_DIAG_MODE_MASK)
+#define MAC_PCU_LOGIC_ANALYZER_INT_ADDR_MSB 17
+#define MAC_PCU_LOGIC_ANALYZER_INT_ADDR_LSB 8
+#define MAC_PCU_LOGIC_ANALYZER_INT_ADDR_MASK 0x0003ff00
+#define MAC_PCU_LOGIC_ANALYZER_INT_ADDR_GET(x) (((x) & MAC_PCU_LOGIC_ANALYZER_INT_ADDR_MASK) >> MAC_PCU_LOGIC_ANALYZER_INT_ADDR_LSB)
+#define MAC_PCU_LOGIC_ANALYZER_INT_ADDR_SET(x) (((x) << MAC_PCU_LOGIC_ANALYZER_INT_ADDR_LSB) & MAC_PCU_LOGIC_ANALYZER_INT_ADDR_MASK)
+#define MAC_PCU_LOGIC_ANALYZER_QCU_SEL_MSB 7
+#define MAC_PCU_LOGIC_ANALYZER_QCU_SEL_LSB 4
+#define MAC_PCU_LOGIC_ANALYZER_QCU_SEL_MASK 0x000000f0
+#define MAC_PCU_LOGIC_ANALYZER_QCU_SEL_GET(x) (((x) & MAC_PCU_LOGIC_ANALYZER_QCU_SEL_MASK) >> MAC_PCU_LOGIC_ANALYZER_QCU_SEL_LSB)
+#define MAC_PCU_LOGIC_ANALYZER_QCU_SEL_SET(x) (((x) << MAC_PCU_LOGIC_ANALYZER_QCU_SEL_LSB) & MAC_PCU_LOGIC_ANALYZER_QCU_SEL_MASK)
+#define MAC_PCU_LOGIC_ANALYZER_ENABLE_MSB 3
+#define MAC_PCU_LOGIC_ANALYZER_ENABLE_LSB 3
+#define MAC_PCU_LOGIC_ANALYZER_ENABLE_MASK 0x00000008
+#define MAC_PCU_LOGIC_ANALYZER_ENABLE_GET(x) (((x) & MAC_PCU_LOGIC_ANALYZER_ENABLE_MASK) >> MAC_PCU_LOGIC_ANALYZER_ENABLE_LSB)
+#define MAC_PCU_LOGIC_ANALYZER_ENABLE_SET(x) (((x) << MAC_PCU_LOGIC_ANALYZER_ENABLE_LSB) & MAC_PCU_LOGIC_ANALYZER_ENABLE_MASK)
+#define MAC_PCU_LOGIC_ANALYZER_STATE_MSB 2
+#define MAC_PCU_LOGIC_ANALYZER_STATE_LSB 2
+#define MAC_PCU_LOGIC_ANALYZER_STATE_MASK 0x00000004
+#define MAC_PCU_LOGIC_ANALYZER_STATE_GET(x) (((x) & MAC_PCU_LOGIC_ANALYZER_STATE_MASK) >> MAC_PCU_LOGIC_ANALYZER_STATE_LSB)
+#define MAC_PCU_LOGIC_ANALYZER_STATE_SET(x) (((x) << MAC_PCU_LOGIC_ANALYZER_STATE_LSB) & MAC_PCU_LOGIC_ANALYZER_STATE_MASK)
+#define MAC_PCU_LOGIC_ANALYZER_CLEAR_MSB 1
+#define MAC_PCU_LOGIC_ANALYZER_CLEAR_LSB 1
+#define MAC_PCU_LOGIC_ANALYZER_CLEAR_MASK 0x00000002
+#define MAC_PCU_LOGIC_ANALYZER_CLEAR_GET(x) (((x) & MAC_PCU_LOGIC_ANALYZER_CLEAR_MASK) >> MAC_PCU_LOGIC_ANALYZER_CLEAR_LSB)
+#define MAC_PCU_LOGIC_ANALYZER_CLEAR_SET(x) (((x) << MAC_PCU_LOGIC_ANALYZER_CLEAR_LSB) & MAC_PCU_LOGIC_ANALYZER_CLEAR_MASK)
+#define MAC_PCU_LOGIC_ANALYZER_HOLD_MSB 0
+#define MAC_PCU_LOGIC_ANALYZER_HOLD_LSB 0
+#define MAC_PCU_LOGIC_ANALYZER_HOLD_MASK 0x00000001
+#define MAC_PCU_LOGIC_ANALYZER_HOLD_GET(x) (((x) & MAC_PCU_LOGIC_ANALYZER_HOLD_MASK) >> MAC_PCU_LOGIC_ANALYZER_HOLD_LSB)
+#define MAC_PCU_LOGIC_ANALYZER_HOLD_SET(x) (((x) << MAC_PCU_LOGIC_ANALYZER_HOLD_LSB) & MAC_PCU_LOGIC_ANALYZER_HOLD_MASK)
+
+#define MAC_PCU_LOGIC_ANALYZER_32L_ADDRESS 0x00008114
+#define MAC_PCU_LOGIC_ANALYZER_32L_OFFSET 0x00000114
+#define MAC_PCU_LOGIC_ANALYZER_32L_MASK_MSB 31
+#define MAC_PCU_LOGIC_ANALYZER_32L_MASK_LSB 0
+#define MAC_PCU_LOGIC_ANALYZER_32L_MASK_MASK 0xffffffff
+#define MAC_PCU_LOGIC_ANALYZER_32L_MASK_GET(x) (((x) & MAC_PCU_LOGIC_ANALYZER_32L_MASK_MASK) >> MAC_PCU_LOGIC_ANALYZER_32L_MASK_LSB)
+#define MAC_PCU_LOGIC_ANALYZER_32L_MASK_SET(x) (((x) << MAC_PCU_LOGIC_ANALYZER_32L_MASK_LSB) & MAC_PCU_LOGIC_ANALYZER_32L_MASK_MASK)
+
+#define MAC_PCU_LOGIC_ANALYZER_16U_ADDRESS 0x00008118
+#define MAC_PCU_LOGIC_ANALYZER_16U_OFFSET 0x00000118
+#define MAC_PCU_LOGIC_ANALYZER_16U_MASK_MSB 15
+#define MAC_PCU_LOGIC_ANALYZER_16U_MASK_LSB 0
+#define MAC_PCU_LOGIC_ANALYZER_16U_MASK_MASK 0x0000ffff
+#define MAC_PCU_LOGIC_ANALYZER_16U_MASK_GET(x) (((x) & MAC_PCU_LOGIC_ANALYZER_16U_MASK_MASK) >> MAC_PCU_LOGIC_ANALYZER_16U_MASK_LSB)
+#define MAC_PCU_LOGIC_ANALYZER_16U_MASK_SET(x) (((x) << MAC_PCU_LOGIC_ANALYZER_16U_MASK_LSB) & MAC_PCU_LOGIC_ANALYZER_16U_MASK_MASK)
+
+#define MAC_PCU_PHY_ERR_CNT_MASK_CONT_ADDRESS 0x0000811c
+#define MAC_PCU_PHY_ERR_CNT_MASK_CONT_OFFSET 0x0000011c
+#define MAC_PCU_PHY_ERR_CNT_MASK_CONT_MASK3_MSB 23
+#define MAC_PCU_PHY_ERR_CNT_MASK_CONT_MASK3_LSB 16
+#define MAC_PCU_PHY_ERR_CNT_MASK_CONT_MASK3_MASK 0x00ff0000
+#define MAC_PCU_PHY_ERR_CNT_MASK_CONT_MASK3_GET(x) (((x) & MAC_PCU_PHY_ERR_CNT_MASK_CONT_MASK3_MASK) >> MAC_PCU_PHY_ERR_CNT_MASK_CONT_MASK3_LSB)
+#define MAC_PCU_PHY_ERR_CNT_MASK_CONT_MASK3_SET(x) (((x) << MAC_PCU_PHY_ERR_CNT_MASK_CONT_MASK3_LSB) & MAC_PCU_PHY_ERR_CNT_MASK_CONT_MASK3_MASK)
+#define MAC_PCU_PHY_ERR_CNT_MASK_CONT_MASK2_MSB 15
+#define MAC_PCU_PHY_ERR_CNT_MASK_CONT_MASK2_LSB 8
+#define MAC_PCU_PHY_ERR_CNT_MASK_CONT_MASK2_MASK 0x0000ff00
+#define MAC_PCU_PHY_ERR_CNT_MASK_CONT_MASK2_GET(x) (((x) & MAC_PCU_PHY_ERR_CNT_MASK_CONT_MASK2_MASK) >> MAC_PCU_PHY_ERR_CNT_MASK_CONT_MASK2_LSB)
+#define MAC_PCU_PHY_ERR_CNT_MASK_CONT_MASK2_SET(x) (((x) << MAC_PCU_PHY_ERR_CNT_MASK_CONT_MASK2_LSB) & MAC_PCU_PHY_ERR_CNT_MASK_CONT_MASK2_MASK)
+#define MAC_PCU_PHY_ERR_CNT_MASK_CONT_MASK1_MSB 7
+#define MAC_PCU_PHY_ERR_CNT_MASK_CONT_MASK1_LSB 0
+#define MAC_PCU_PHY_ERR_CNT_MASK_CONT_MASK1_MASK 0x000000ff
+#define MAC_PCU_PHY_ERR_CNT_MASK_CONT_MASK1_GET(x) (((x) & MAC_PCU_PHY_ERR_CNT_MASK_CONT_MASK1_MASK) >> MAC_PCU_PHY_ERR_CNT_MASK_CONT_MASK1_LSB)
+#define MAC_PCU_PHY_ERR_CNT_MASK_CONT_MASK1_SET(x) (((x) << MAC_PCU_PHY_ERR_CNT_MASK_CONT_MASK1_LSB) & MAC_PCU_PHY_ERR_CNT_MASK_CONT_MASK1_MASK)
+
+#define MAC_PCU_AZIMUTH_MODE_ADDRESS 0x00008120
+#define MAC_PCU_AZIMUTH_MODE_OFFSET 0x00000120
+#define MAC_PCU_AZIMUTH_MODE_BA_USES_AD1_MSB 7
+#define MAC_PCU_AZIMUTH_MODE_BA_USES_AD1_LSB 7
+#define MAC_PCU_AZIMUTH_MODE_BA_USES_AD1_MASK 0x00000080
+#define MAC_PCU_AZIMUTH_MODE_BA_USES_AD1_GET(x) (((x) & MAC_PCU_AZIMUTH_MODE_BA_USES_AD1_MASK) >> MAC_PCU_AZIMUTH_MODE_BA_USES_AD1_LSB)
+#define MAC_PCU_AZIMUTH_MODE_BA_USES_AD1_SET(x) (((x) << MAC_PCU_AZIMUTH_MODE_BA_USES_AD1_LSB) & MAC_PCU_AZIMUTH_MODE_BA_USES_AD1_MASK)
+#define MAC_PCU_AZIMUTH_MODE_ACK_CTS_MATCH_TX_AD2_MSB 6
+#define MAC_PCU_AZIMUTH_MODE_ACK_CTS_MATCH_TX_AD2_LSB 6
+#define MAC_PCU_AZIMUTH_MODE_ACK_CTS_MATCH_TX_AD2_MASK 0x00000040
+#define MAC_PCU_AZIMUTH_MODE_ACK_CTS_MATCH_TX_AD2_GET(x) (((x) & MAC_PCU_AZIMUTH_MODE_ACK_CTS_MATCH_TX_AD2_MASK) >> MAC_PCU_AZIMUTH_MODE_ACK_CTS_MATCH_TX_AD2_LSB)
+#define MAC_PCU_AZIMUTH_MODE_ACK_CTS_MATCH_TX_AD2_SET(x) (((x) << MAC_PCU_AZIMUTH_MODE_ACK_CTS_MATCH_TX_AD2_LSB) & MAC_PCU_AZIMUTH_MODE_ACK_CTS_MATCH_TX_AD2_MASK)
+#define MAC_PCU_AZIMUTH_MODE_TX_DESC_EN_MSB 5
+#define MAC_PCU_AZIMUTH_MODE_TX_DESC_EN_LSB 5
+#define MAC_PCU_AZIMUTH_MODE_TX_DESC_EN_MASK 0x00000020
+#define MAC_PCU_AZIMUTH_MODE_TX_DESC_EN_GET(x) (((x) & MAC_PCU_AZIMUTH_MODE_TX_DESC_EN_MASK) >> MAC_PCU_AZIMUTH_MODE_TX_DESC_EN_LSB)
+#define MAC_PCU_AZIMUTH_MODE_TX_DESC_EN_SET(x) (((x) << MAC_PCU_AZIMUTH_MODE_TX_DESC_EN_LSB) & MAC_PCU_AZIMUTH_MODE_TX_DESC_EN_MASK)
+#define MAC_PCU_AZIMUTH_MODE_CLK_EN_MSB 4
+#define MAC_PCU_AZIMUTH_MODE_CLK_EN_LSB 4
+#define MAC_PCU_AZIMUTH_MODE_CLK_EN_MASK 0x00000010
+#define MAC_PCU_AZIMUTH_MODE_CLK_EN_GET(x) (((x) & MAC_PCU_AZIMUTH_MODE_CLK_EN_MASK) >> MAC_PCU_AZIMUTH_MODE_CLK_EN_LSB)
+#define MAC_PCU_AZIMUTH_MODE_CLK_EN_SET(x) (((x) << MAC_PCU_AZIMUTH_MODE_CLK_EN_LSB) & MAC_PCU_AZIMUTH_MODE_CLK_EN_MASK)
+#define MAC_PCU_AZIMUTH_MODE_RX_TSF_STATUS_SEL_MSB 3
+#define MAC_PCU_AZIMUTH_MODE_RX_TSF_STATUS_SEL_LSB 3
+#define MAC_PCU_AZIMUTH_MODE_RX_TSF_STATUS_SEL_MASK 0x00000008
+#define MAC_PCU_AZIMUTH_MODE_RX_TSF_STATUS_SEL_GET(x) (((x) & MAC_PCU_AZIMUTH_MODE_RX_TSF_STATUS_SEL_MASK) >> MAC_PCU_AZIMUTH_MODE_RX_TSF_STATUS_SEL_LSB)
+#define MAC_PCU_AZIMUTH_MODE_RX_TSF_STATUS_SEL_SET(x) (((x) << MAC_PCU_AZIMUTH_MODE_RX_TSF_STATUS_SEL_LSB) & MAC_PCU_AZIMUTH_MODE_RX_TSF_STATUS_SEL_MASK)
+#define MAC_PCU_AZIMUTH_MODE_TX_TSF_STATUS_SEL_MSB 2
+#define MAC_PCU_AZIMUTH_MODE_TX_TSF_STATUS_SEL_LSB 2
+#define MAC_PCU_AZIMUTH_MODE_TX_TSF_STATUS_SEL_MASK 0x00000004
+#define MAC_PCU_AZIMUTH_MODE_TX_TSF_STATUS_SEL_GET(x) (((x) & MAC_PCU_AZIMUTH_MODE_TX_TSF_STATUS_SEL_MASK) >> MAC_PCU_AZIMUTH_MODE_TX_TSF_STATUS_SEL_LSB)
+#define MAC_PCU_AZIMUTH_MODE_TX_TSF_STATUS_SEL_SET(x) (((x) << MAC_PCU_AZIMUTH_MODE_TX_TSF_STATUS_SEL_LSB) & MAC_PCU_AZIMUTH_MODE_TX_TSF_STATUS_SEL_MASK)
+#define MAC_PCU_AZIMUTH_MODE_KEY_SEARCH_AD1_MSB 1
+#define MAC_PCU_AZIMUTH_MODE_KEY_SEARCH_AD1_LSB 1
+#define MAC_PCU_AZIMUTH_MODE_KEY_SEARCH_AD1_MASK 0x00000002
+#define MAC_PCU_AZIMUTH_MODE_KEY_SEARCH_AD1_GET(x) (((x) & MAC_PCU_AZIMUTH_MODE_KEY_SEARCH_AD1_MASK) >> MAC_PCU_AZIMUTH_MODE_KEY_SEARCH_AD1_LSB)
+#define MAC_PCU_AZIMUTH_MODE_KEY_SEARCH_AD1_SET(x) (((x) << MAC_PCU_AZIMUTH_MODE_KEY_SEARCH_AD1_LSB) & MAC_PCU_AZIMUTH_MODE_KEY_SEARCH_AD1_MASK)
+#define MAC_PCU_AZIMUTH_MODE_DISABLE_TSF_UPDATE_MSB 0
+#define MAC_PCU_AZIMUTH_MODE_DISABLE_TSF_UPDATE_LSB 0
+#define MAC_PCU_AZIMUTH_MODE_DISABLE_TSF_UPDATE_MASK 0x00000001
+#define MAC_PCU_AZIMUTH_MODE_DISABLE_TSF_UPDATE_GET(x) (((x) & MAC_PCU_AZIMUTH_MODE_DISABLE_TSF_UPDATE_MASK) >> MAC_PCU_AZIMUTH_MODE_DISABLE_TSF_UPDATE_LSB)
+#define MAC_PCU_AZIMUTH_MODE_DISABLE_TSF_UPDATE_SET(x) (((x) << MAC_PCU_AZIMUTH_MODE_DISABLE_TSF_UPDATE_LSB) & MAC_PCU_AZIMUTH_MODE_DISABLE_TSF_UPDATE_MASK)
+
+#define MAC_PCU_20_40_MODE_ADDRESS 0x00008124
+#define MAC_PCU_20_40_MODE_OFFSET 0x00000124
+#define MAC_PCU_20_40_MODE_PIFS_CYCLES_MSB 15
+#define MAC_PCU_20_40_MODE_PIFS_CYCLES_LSB 4
+#define MAC_PCU_20_40_MODE_PIFS_CYCLES_MASK 0x0000fff0
+#define MAC_PCU_20_40_MODE_PIFS_CYCLES_GET(x) (((x) & MAC_PCU_20_40_MODE_PIFS_CYCLES_MASK) >> MAC_PCU_20_40_MODE_PIFS_CYCLES_LSB)
+#define MAC_PCU_20_40_MODE_PIFS_CYCLES_SET(x) (((x) << MAC_PCU_20_40_MODE_PIFS_CYCLES_LSB) & MAC_PCU_20_40_MODE_PIFS_CYCLES_MASK)
+#define MAC_PCU_20_40_MODE_SWAMPED_FORCES_RX_CLEAR_CTL_IDLE_MSB 3
+#define MAC_PCU_20_40_MODE_SWAMPED_FORCES_RX_CLEAR_CTL_IDLE_LSB 3
+#define MAC_PCU_20_40_MODE_SWAMPED_FORCES_RX_CLEAR_CTL_IDLE_MASK 0x00000008
+#define MAC_PCU_20_40_MODE_SWAMPED_FORCES_RX_CLEAR_CTL_IDLE_GET(x) (((x) & MAC_PCU_20_40_MODE_SWAMPED_FORCES_RX_CLEAR_CTL_IDLE_MASK) >> MAC_PCU_20_40_MODE_SWAMPED_FORCES_RX_CLEAR_CTL_IDLE_LSB)
+#define MAC_PCU_20_40_MODE_SWAMPED_FORCES_RX_CLEAR_CTL_IDLE_SET(x) (((x) << MAC_PCU_20_40_MODE_SWAMPED_FORCES_RX_CLEAR_CTL_IDLE_LSB) & MAC_PCU_20_40_MODE_SWAMPED_FORCES_RX_CLEAR_CTL_IDLE_MASK)
+#define MAC_PCU_20_40_MODE_TX_HT20_ON_EXT_BUSY_MSB 2
+#define MAC_PCU_20_40_MODE_TX_HT20_ON_EXT_BUSY_LSB 2
+#define MAC_PCU_20_40_MODE_TX_HT20_ON_EXT_BUSY_MASK 0x00000004
+#define MAC_PCU_20_40_MODE_TX_HT20_ON_EXT_BUSY_GET(x) (((x) & MAC_PCU_20_40_MODE_TX_HT20_ON_EXT_BUSY_MASK) >> MAC_PCU_20_40_MODE_TX_HT20_ON_EXT_BUSY_LSB)
+#define MAC_PCU_20_40_MODE_TX_HT20_ON_EXT_BUSY_SET(x) (((x) << MAC_PCU_20_40_MODE_TX_HT20_ON_EXT_BUSY_LSB) & MAC_PCU_20_40_MODE_TX_HT20_ON_EXT_BUSY_MASK)
+#define MAC_PCU_20_40_MODE_EXT_PIFS_ENABLE_MSB 1
+#define MAC_PCU_20_40_MODE_EXT_PIFS_ENABLE_LSB 1
+#define MAC_PCU_20_40_MODE_EXT_PIFS_ENABLE_MASK 0x00000002
+#define MAC_PCU_20_40_MODE_EXT_PIFS_ENABLE_GET(x) (((x) & MAC_PCU_20_40_MODE_EXT_PIFS_ENABLE_MASK) >> MAC_PCU_20_40_MODE_EXT_PIFS_ENABLE_LSB)
+#define MAC_PCU_20_40_MODE_EXT_PIFS_ENABLE_SET(x) (((x) << MAC_PCU_20_40_MODE_EXT_PIFS_ENABLE_LSB) & MAC_PCU_20_40_MODE_EXT_PIFS_ENABLE_MASK)
+#define MAC_PCU_20_40_MODE_JOINED_RX_CLEAR_MSB 0
+#define MAC_PCU_20_40_MODE_JOINED_RX_CLEAR_LSB 0
+#define MAC_PCU_20_40_MODE_JOINED_RX_CLEAR_MASK 0x00000001
+#define MAC_PCU_20_40_MODE_JOINED_RX_CLEAR_GET(x) (((x) & MAC_PCU_20_40_MODE_JOINED_RX_CLEAR_MASK) >> MAC_PCU_20_40_MODE_JOINED_RX_CLEAR_LSB)
+#define MAC_PCU_20_40_MODE_JOINED_RX_CLEAR_SET(x) (((x) << MAC_PCU_20_40_MODE_JOINED_RX_CLEAR_LSB) & MAC_PCU_20_40_MODE_JOINED_RX_CLEAR_MASK)
+
+#define MAC_PCU_RX_CLEAR_DIFF_CNT_ADDRESS 0x00008128
+#define MAC_PCU_RX_CLEAR_DIFF_CNT_OFFSET 0x00000128
+#define MAC_PCU_RX_CLEAR_DIFF_CNT_VALUE_MSB 31
+#define MAC_PCU_RX_CLEAR_DIFF_CNT_VALUE_LSB 0
+#define MAC_PCU_RX_CLEAR_DIFF_CNT_VALUE_MASK 0xffffffff
+#define MAC_PCU_RX_CLEAR_DIFF_CNT_VALUE_GET(x) (((x) & MAC_PCU_RX_CLEAR_DIFF_CNT_VALUE_MASK) >> MAC_PCU_RX_CLEAR_DIFF_CNT_VALUE_LSB)
+#define MAC_PCU_RX_CLEAR_DIFF_CNT_VALUE_SET(x) (((x) << MAC_PCU_RX_CLEAR_DIFF_CNT_VALUE_LSB) & MAC_PCU_RX_CLEAR_DIFF_CNT_VALUE_MASK)
+
+#define MAC_PCU_SELF_GEN_ANTENNA_MASK_ADDRESS 0x0000812c
+#define MAC_PCU_SELF_GEN_ANTENNA_MASK_OFFSET 0x0000012c
+#define MAC_PCU_SELF_GEN_ANTENNA_MASK_VALUE_MSB 2
+#define MAC_PCU_SELF_GEN_ANTENNA_MASK_VALUE_LSB 0
+#define MAC_PCU_SELF_GEN_ANTENNA_MASK_VALUE_MASK 0x00000007
+#define MAC_PCU_SELF_GEN_ANTENNA_MASK_VALUE_GET(x) (((x) & MAC_PCU_SELF_GEN_ANTENNA_MASK_VALUE_MASK) >> MAC_PCU_SELF_GEN_ANTENNA_MASK_VALUE_LSB)
+#define MAC_PCU_SELF_GEN_ANTENNA_MASK_VALUE_SET(x) (((x) << MAC_PCU_SELF_GEN_ANTENNA_MASK_VALUE_LSB) & MAC_PCU_SELF_GEN_ANTENNA_MASK_VALUE_MASK)
+
+#define MAC_PCU_BA_BAR_CONTROL_ADDRESS 0x00008130
+#define MAC_PCU_BA_BAR_CONTROL_OFFSET 0x00000130
+#define MAC_PCU_BA_BAR_CONTROL_UPDATE_BA_BITMAP_QOS_NULL_MSB 12
+#define MAC_PCU_BA_BAR_CONTROL_UPDATE_BA_BITMAP_QOS_NULL_LSB 12
+#define MAC_PCU_BA_BAR_CONTROL_UPDATE_BA_BITMAP_QOS_NULL_MASK 0x00001000
+#define MAC_PCU_BA_BAR_CONTROL_UPDATE_BA_BITMAP_QOS_NULL_GET(x) (((x) & MAC_PCU_BA_BAR_CONTROL_UPDATE_BA_BITMAP_QOS_NULL_MASK) >> MAC_PCU_BA_BAR_CONTROL_UPDATE_BA_BITMAP_QOS_NULL_LSB)
+#define MAC_PCU_BA_BAR_CONTROL_UPDATE_BA_BITMAP_QOS_NULL_SET(x) (((x) << MAC_PCU_BA_BAR_CONTROL_UPDATE_BA_BITMAP_QOS_NULL_LSB) & MAC_PCU_BA_BAR_CONTROL_UPDATE_BA_BITMAP_QOS_NULL_MASK)
+#define MAC_PCU_BA_BAR_CONTROL_TX_BA_CLEAR_BA_VALID_MSB 11
+#define MAC_PCU_BA_BAR_CONTROL_TX_BA_CLEAR_BA_VALID_LSB 11
+#define MAC_PCU_BA_BAR_CONTROL_TX_BA_CLEAR_BA_VALID_MASK 0x00000800
+#define MAC_PCU_BA_BAR_CONTROL_TX_BA_CLEAR_BA_VALID_GET(x) (((x) & MAC_PCU_BA_BAR_CONTROL_TX_BA_CLEAR_BA_VALID_MASK) >> MAC_PCU_BA_BAR_CONTROL_TX_BA_CLEAR_BA_VALID_LSB)
+#define MAC_PCU_BA_BAR_CONTROL_TX_BA_CLEAR_BA_VALID_SET(x) (((x) << MAC_PCU_BA_BAR_CONTROL_TX_BA_CLEAR_BA_VALID_LSB) & MAC_PCU_BA_BAR_CONTROL_TX_BA_CLEAR_BA_VALID_MASK)
+#define MAC_PCU_BA_BAR_CONTROL_FORCE_NO_MATCH_MSB 10
+#define MAC_PCU_BA_BAR_CONTROL_FORCE_NO_MATCH_LSB 10
+#define MAC_PCU_BA_BAR_CONTROL_FORCE_NO_MATCH_MASK 0x00000400
+#define MAC_PCU_BA_BAR_CONTROL_FORCE_NO_MATCH_GET(x) (((x) & MAC_PCU_BA_BAR_CONTROL_FORCE_NO_MATCH_MASK) >> MAC_PCU_BA_BAR_CONTROL_FORCE_NO_MATCH_LSB)
+#define MAC_PCU_BA_BAR_CONTROL_FORCE_NO_MATCH_SET(x) (((x) << MAC_PCU_BA_BAR_CONTROL_FORCE_NO_MATCH_LSB) & MAC_PCU_BA_BAR_CONTROL_FORCE_NO_MATCH_MASK)
+#define MAC_PCU_BA_BAR_CONTROL_ACK_POLICY_VALUE_MSB 9
+#define MAC_PCU_BA_BAR_CONTROL_ACK_POLICY_VALUE_LSB 9
+#define MAC_PCU_BA_BAR_CONTROL_ACK_POLICY_VALUE_MASK 0x00000200
+#define MAC_PCU_BA_BAR_CONTROL_ACK_POLICY_VALUE_GET(x) (((x) & MAC_PCU_BA_BAR_CONTROL_ACK_POLICY_VALUE_MASK) >> MAC_PCU_BA_BAR_CONTROL_ACK_POLICY_VALUE_LSB)
+#define MAC_PCU_BA_BAR_CONTROL_ACK_POLICY_VALUE_SET(x) (((x) << MAC_PCU_BA_BAR_CONTROL_ACK_POLICY_VALUE_LSB) & MAC_PCU_BA_BAR_CONTROL_ACK_POLICY_VALUE_MASK)
+#define MAC_PCU_BA_BAR_CONTROL_COMPRESSED_VALUE_MSB 8
+#define MAC_PCU_BA_BAR_CONTROL_COMPRESSED_VALUE_LSB 8
+#define MAC_PCU_BA_BAR_CONTROL_COMPRESSED_VALUE_MASK 0x00000100
+#define MAC_PCU_BA_BAR_CONTROL_COMPRESSED_VALUE_GET(x) (((x) & MAC_PCU_BA_BAR_CONTROL_COMPRESSED_VALUE_MASK) >> MAC_PCU_BA_BAR_CONTROL_COMPRESSED_VALUE_LSB)
+#define MAC_PCU_BA_BAR_CONTROL_COMPRESSED_VALUE_SET(x) (((x) << MAC_PCU_BA_BAR_CONTROL_COMPRESSED_VALUE_LSB) & MAC_PCU_BA_BAR_CONTROL_COMPRESSED_VALUE_MASK)
+#define MAC_PCU_BA_BAR_CONTROL_ACK_POLICY_OFFSET_MSB 7
+#define MAC_PCU_BA_BAR_CONTROL_ACK_POLICY_OFFSET_LSB 4
+#define MAC_PCU_BA_BAR_CONTROL_ACK_POLICY_OFFSET_MASK 0x000000f0
+#define MAC_PCU_BA_BAR_CONTROL_ACK_POLICY_OFFSET_GET(x) (((x) & MAC_PCU_BA_BAR_CONTROL_ACK_POLICY_OFFSET_MASK) >> MAC_PCU_BA_BAR_CONTROL_ACK_POLICY_OFFSET_LSB)
+#define MAC_PCU_BA_BAR_CONTROL_ACK_POLICY_OFFSET_SET(x) (((x) << MAC_PCU_BA_BAR_CONTROL_ACK_POLICY_OFFSET_LSB) & MAC_PCU_BA_BAR_CONTROL_ACK_POLICY_OFFSET_MASK)
+#define MAC_PCU_BA_BAR_CONTROL_COMPRESSED_OFFSET_MSB 3
+#define MAC_PCU_BA_BAR_CONTROL_COMPRESSED_OFFSET_LSB 0
+#define MAC_PCU_BA_BAR_CONTROL_COMPRESSED_OFFSET_MASK 0x0000000f
+#define MAC_PCU_BA_BAR_CONTROL_COMPRESSED_OFFSET_GET(x) (((x) & MAC_PCU_BA_BAR_CONTROL_COMPRESSED_OFFSET_MASK) >> MAC_PCU_BA_BAR_CONTROL_COMPRESSED_OFFSET_LSB)
+#define MAC_PCU_BA_BAR_CONTROL_COMPRESSED_OFFSET_SET(x) (((x) << MAC_PCU_BA_BAR_CONTROL_COMPRESSED_OFFSET_LSB) & MAC_PCU_BA_BAR_CONTROL_COMPRESSED_OFFSET_MASK)
+
+#define MAC_PCU_LEGACY_PLCP_SPOOF_ADDRESS 0x00008134
+#define MAC_PCU_LEGACY_PLCP_SPOOF_OFFSET 0x00000134
+#define MAC_PCU_LEGACY_PLCP_SPOOF_MIN_LENGTH_MSB 12
+#define MAC_PCU_LEGACY_PLCP_SPOOF_MIN_LENGTH_LSB 8
+#define MAC_PCU_LEGACY_PLCP_SPOOF_MIN_LENGTH_MASK 0x00001f00
+#define MAC_PCU_LEGACY_PLCP_SPOOF_MIN_LENGTH_GET(x) (((x) & MAC_PCU_LEGACY_PLCP_SPOOF_MIN_LENGTH_MASK) >> MAC_PCU_LEGACY_PLCP_SPOOF_MIN_LENGTH_LSB)
+#define MAC_PCU_LEGACY_PLCP_SPOOF_MIN_LENGTH_SET(x) (((x) << MAC_PCU_LEGACY_PLCP_SPOOF_MIN_LENGTH_LSB) & MAC_PCU_LEGACY_PLCP_SPOOF_MIN_LENGTH_MASK)
+#define MAC_PCU_LEGACY_PLCP_SPOOF_EIFS_MINUS_DIFS_MSB 7
+#define MAC_PCU_LEGACY_PLCP_SPOOF_EIFS_MINUS_DIFS_LSB 0
+#define MAC_PCU_LEGACY_PLCP_SPOOF_EIFS_MINUS_DIFS_MASK 0x000000ff
+#define MAC_PCU_LEGACY_PLCP_SPOOF_EIFS_MINUS_DIFS_GET(x) (((x) & MAC_PCU_LEGACY_PLCP_SPOOF_EIFS_MINUS_DIFS_MASK) >> MAC_PCU_LEGACY_PLCP_SPOOF_EIFS_MINUS_DIFS_LSB)
+#define MAC_PCU_LEGACY_PLCP_SPOOF_EIFS_MINUS_DIFS_SET(x) (((x) << MAC_PCU_LEGACY_PLCP_SPOOF_EIFS_MINUS_DIFS_LSB) & MAC_PCU_LEGACY_PLCP_SPOOF_EIFS_MINUS_DIFS_MASK)
+
+#define MAC_PCU_PHY_ERROR_MASK_CONT_ADDRESS 0x00008138
+#define MAC_PCU_PHY_ERROR_MASK_CONT_OFFSET 0x00000138
+#define MAC_PCU_PHY_ERROR_MASK_CONT_EIFS_VALUE_MSB 23
+#define MAC_PCU_PHY_ERROR_MASK_CONT_EIFS_VALUE_LSB 16
+#define MAC_PCU_PHY_ERROR_MASK_CONT_EIFS_VALUE_MASK 0x00ff0000
+#define MAC_PCU_PHY_ERROR_MASK_CONT_EIFS_VALUE_GET(x) (((x) & MAC_PCU_PHY_ERROR_MASK_CONT_EIFS_VALUE_MASK) >> MAC_PCU_PHY_ERROR_MASK_CONT_EIFS_VALUE_LSB)
+#define MAC_PCU_PHY_ERROR_MASK_CONT_EIFS_VALUE_SET(x) (((x) << MAC_PCU_PHY_ERROR_MASK_CONT_EIFS_VALUE_LSB) & MAC_PCU_PHY_ERROR_MASK_CONT_EIFS_VALUE_MASK)
+#define MAC_PCU_PHY_ERROR_MASK_CONT_MASK_VALUE_MSB 7
+#define MAC_PCU_PHY_ERROR_MASK_CONT_MASK_VALUE_LSB 0
+#define MAC_PCU_PHY_ERROR_MASK_CONT_MASK_VALUE_MASK 0x000000ff
+#define MAC_PCU_PHY_ERROR_MASK_CONT_MASK_VALUE_GET(x) (((x) & MAC_PCU_PHY_ERROR_MASK_CONT_MASK_VALUE_MASK) >> MAC_PCU_PHY_ERROR_MASK_CONT_MASK_VALUE_LSB)
+#define MAC_PCU_PHY_ERROR_MASK_CONT_MASK_VALUE_SET(x) (((x) << MAC_PCU_PHY_ERROR_MASK_CONT_MASK_VALUE_LSB) & MAC_PCU_PHY_ERROR_MASK_CONT_MASK_VALUE_MASK)
+
+#define MAC_PCU_TX_TIMER_ADDRESS 0x0000813c
+#define MAC_PCU_TX_TIMER_OFFSET 0x0000013c
+#define MAC_PCU_TX_TIMER_QUIET_TIMER_ENABLE_MSB 25
+#define MAC_PCU_TX_TIMER_QUIET_TIMER_ENABLE_LSB 25
+#define MAC_PCU_TX_TIMER_QUIET_TIMER_ENABLE_MASK 0x02000000
+#define MAC_PCU_TX_TIMER_QUIET_TIMER_ENABLE_GET(x) (((x) & MAC_PCU_TX_TIMER_QUIET_TIMER_ENABLE_MASK) >> MAC_PCU_TX_TIMER_QUIET_TIMER_ENABLE_LSB)
+#define MAC_PCU_TX_TIMER_QUIET_TIMER_ENABLE_SET(x) (((x) << MAC_PCU_TX_TIMER_QUIET_TIMER_ENABLE_LSB) & MAC_PCU_TX_TIMER_QUIET_TIMER_ENABLE_MASK)
+#define MAC_PCU_TX_TIMER_QUIET_TIMER_MSB 24
+#define MAC_PCU_TX_TIMER_QUIET_TIMER_LSB 20
+#define MAC_PCU_TX_TIMER_QUIET_TIMER_MASK 0x01f00000
+#define MAC_PCU_TX_TIMER_QUIET_TIMER_GET(x) (((x) & MAC_PCU_TX_TIMER_QUIET_TIMER_MASK) >> MAC_PCU_TX_TIMER_QUIET_TIMER_LSB)
+#define MAC_PCU_TX_TIMER_QUIET_TIMER_SET(x) (((x) << MAC_PCU_TX_TIMER_QUIET_TIMER_LSB) & MAC_PCU_TX_TIMER_QUIET_TIMER_MASK)
+#define MAC_PCU_TX_TIMER_RIFS_TIMER_MSB 19
+#define MAC_PCU_TX_TIMER_RIFS_TIMER_LSB 16
+#define MAC_PCU_TX_TIMER_RIFS_TIMER_MASK 0x000f0000
+#define MAC_PCU_TX_TIMER_RIFS_TIMER_GET(x) (((x) & MAC_PCU_TX_TIMER_RIFS_TIMER_MASK) >> MAC_PCU_TX_TIMER_RIFS_TIMER_LSB)
+#define MAC_PCU_TX_TIMER_RIFS_TIMER_SET(x) (((x) << MAC_PCU_TX_TIMER_RIFS_TIMER_LSB) & MAC_PCU_TX_TIMER_RIFS_TIMER_MASK)
+#define MAC_PCU_TX_TIMER_TX_TIMER_ENABLE_MSB 15
+#define MAC_PCU_TX_TIMER_TX_TIMER_ENABLE_LSB 15
+#define MAC_PCU_TX_TIMER_TX_TIMER_ENABLE_MASK 0x00008000
+#define MAC_PCU_TX_TIMER_TX_TIMER_ENABLE_GET(x) (((x) & MAC_PCU_TX_TIMER_TX_TIMER_ENABLE_MASK) >> MAC_PCU_TX_TIMER_TX_TIMER_ENABLE_LSB)
+#define MAC_PCU_TX_TIMER_TX_TIMER_ENABLE_SET(x) (((x) << MAC_PCU_TX_TIMER_TX_TIMER_ENABLE_LSB) & MAC_PCU_TX_TIMER_TX_TIMER_ENABLE_MASK)
+#define MAC_PCU_TX_TIMER_TX_TIMER_MSB 14
+#define MAC_PCU_TX_TIMER_TX_TIMER_LSB 0
+#define MAC_PCU_TX_TIMER_TX_TIMER_MASK 0x00007fff
+#define MAC_PCU_TX_TIMER_TX_TIMER_GET(x) (((x) & MAC_PCU_TX_TIMER_TX_TIMER_MASK) >> MAC_PCU_TX_TIMER_TX_TIMER_LSB)
+#define MAC_PCU_TX_TIMER_TX_TIMER_SET(x) (((x) << MAC_PCU_TX_TIMER_TX_TIMER_LSB) & MAC_PCU_TX_TIMER_TX_TIMER_MASK)
+
+#define MAC_PCU_TXBUF_CTRL_ADDRESS 0x00008140
+#define MAC_PCU_TXBUF_CTRL_OFFSET 0x00000140
+#define MAC_PCU_TXBUF_CTRL_TX_FIFO_WRAP_ENABLE_MSB 16
+#define MAC_PCU_TXBUF_CTRL_TX_FIFO_WRAP_ENABLE_LSB 16
+#define MAC_PCU_TXBUF_CTRL_TX_FIFO_WRAP_ENABLE_MASK 0x00010000
+#define MAC_PCU_TXBUF_CTRL_TX_FIFO_WRAP_ENABLE_GET(x) (((x) & MAC_PCU_TXBUF_CTRL_TX_FIFO_WRAP_ENABLE_MASK) >> MAC_PCU_TXBUF_CTRL_TX_FIFO_WRAP_ENABLE_LSB)
+#define MAC_PCU_TXBUF_CTRL_TX_FIFO_WRAP_ENABLE_SET(x) (((x) << MAC_PCU_TXBUF_CTRL_TX_FIFO_WRAP_ENABLE_LSB) & MAC_PCU_TXBUF_CTRL_TX_FIFO_WRAP_ENABLE_MASK)
+#define MAC_PCU_TXBUF_CTRL_USABLE_ENTRIES_MSB 11
+#define MAC_PCU_TXBUF_CTRL_USABLE_ENTRIES_LSB 0
+#define MAC_PCU_TXBUF_CTRL_USABLE_ENTRIES_MASK 0x00000fff
+#define MAC_PCU_TXBUF_CTRL_USABLE_ENTRIES_GET(x) (((x) & MAC_PCU_TXBUF_CTRL_USABLE_ENTRIES_MASK) >> MAC_PCU_TXBUF_CTRL_USABLE_ENTRIES_LSB)
+#define MAC_PCU_TXBUF_CTRL_USABLE_ENTRIES_SET(x) (((x) << MAC_PCU_TXBUF_CTRL_USABLE_ENTRIES_LSB) & MAC_PCU_TXBUF_CTRL_USABLE_ENTRIES_MASK)
+
+#define MAC_PCU_MISC_MODE2_ADDRESS 0x00008144
+#define MAC_PCU_MISC_MODE2_OFFSET 0x00000144
+#define MAC_PCU_MISC_MODE2_RESERVED_1_MSB 31
+#define MAC_PCU_MISC_MODE2_RESERVED_1_LSB 28
+#define MAC_PCU_MISC_MODE2_RESERVED_1_MASK 0xf0000000
+#define MAC_PCU_MISC_MODE2_RESERVED_1_GET(x) (((x) & MAC_PCU_MISC_MODE2_RESERVED_1_MASK) >> MAC_PCU_MISC_MODE2_RESERVED_1_LSB)
+#define MAC_PCU_MISC_MODE2_RESERVED_1_SET(x) (((x) << MAC_PCU_MISC_MODE2_RESERVED_1_LSB) & MAC_PCU_MISC_MODE2_RESERVED_1_MASK)
+#define MAC_PCU_MISC_MODE2_RCV_TIMESTAMP_FIX_MSB 27
+#define MAC_PCU_MISC_MODE2_RCV_TIMESTAMP_FIX_LSB 27
+#define MAC_PCU_MISC_MODE2_RCV_TIMESTAMP_FIX_MASK 0x08000000
+#define MAC_PCU_MISC_MODE2_RCV_TIMESTAMP_FIX_GET(x) (((x) & MAC_PCU_MISC_MODE2_RCV_TIMESTAMP_FIX_MASK) >> MAC_PCU_MISC_MODE2_RCV_TIMESTAMP_FIX_LSB)
+#define MAC_PCU_MISC_MODE2_RCV_TIMESTAMP_FIX_SET(x) (((x) << MAC_PCU_MISC_MODE2_RCV_TIMESTAMP_FIX_LSB) & MAC_PCU_MISC_MODE2_RCV_TIMESTAMP_FIX_MASK)
+#define MAC_PCU_MISC_MODE2_BEACON_FROM_TO_DS_MSB 26
+#define MAC_PCU_MISC_MODE2_BEACON_FROM_TO_DS_LSB 26
+#define MAC_PCU_MISC_MODE2_BEACON_FROM_TO_DS_MASK 0x04000000
+#define MAC_PCU_MISC_MODE2_BEACON_FROM_TO_DS_GET(x) (((x) & MAC_PCU_MISC_MODE2_BEACON_FROM_TO_DS_MASK) >> MAC_PCU_MISC_MODE2_BEACON_FROM_TO_DS_LSB)
+#define MAC_PCU_MISC_MODE2_BEACON_FROM_TO_DS_SET(x) (((x) << MAC_PCU_MISC_MODE2_BEACON_FROM_TO_DS_LSB) & MAC_PCU_MISC_MODE2_BEACON_FROM_TO_DS_MASK)
+#define MAC_PCU_MISC_MODE2_PM_FIELD_FOR_MGMT_MSB 25
+#define MAC_PCU_MISC_MODE2_PM_FIELD_FOR_MGMT_LSB 25
+#define MAC_PCU_MISC_MODE2_PM_FIELD_FOR_MGMT_MASK 0x02000000
+#define MAC_PCU_MISC_MODE2_PM_FIELD_FOR_MGMT_GET(x) (((x) & MAC_PCU_MISC_MODE2_PM_FIELD_FOR_MGMT_MASK) >> MAC_PCU_MISC_MODE2_PM_FIELD_FOR_MGMT_LSB)
+#define MAC_PCU_MISC_MODE2_PM_FIELD_FOR_MGMT_SET(x) (((x) << MAC_PCU_MISC_MODE2_PM_FIELD_FOR_MGMT_LSB) & MAC_PCU_MISC_MODE2_PM_FIELD_FOR_MGMT_MASK)
+#define MAC_PCU_MISC_MODE2_PM_FIELD_FOR_DAT_MSB 24
+#define MAC_PCU_MISC_MODE2_PM_FIELD_FOR_DAT_LSB 24
+#define MAC_PCU_MISC_MODE2_PM_FIELD_FOR_DAT_MASK 0x01000000
+#define MAC_PCU_MISC_MODE2_PM_FIELD_FOR_DAT_GET(x) (((x) & MAC_PCU_MISC_MODE2_PM_FIELD_FOR_DAT_MASK) >> MAC_PCU_MISC_MODE2_PM_FIELD_FOR_DAT_LSB)
+#define MAC_PCU_MISC_MODE2_PM_FIELD_FOR_DAT_SET(x) (((x) << MAC_PCU_MISC_MODE2_PM_FIELD_FOR_DAT_LSB) & MAC_PCU_MISC_MODE2_PM_FIELD_FOR_DAT_MASK)
+#define MAC_PCU_MISC_MODE2_IGNORE_TXOP_IF_ZERO_MSB 23
+#define MAC_PCU_MISC_MODE2_IGNORE_TXOP_IF_ZERO_LSB 23
+#define MAC_PCU_MISC_MODE2_IGNORE_TXOP_IF_ZERO_MASK 0x00800000
+#define MAC_PCU_MISC_MODE2_IGNORE_TXOP_IF_ZERO_GET(x) (((x) & MAC_PCU_MISC_MODE2_IGNORE_TXOP_IF_ZERO_MASK) >> MAC_PCU_MISC_MODE2_IGNORE_TXOP_IF_ZERO_LSB)
+#define MAC_PCU_MISC_MODE2_IGNORE_TXOP_IF_ZERO_SET(x) (((x) << MAC_PCU_MISC_MODE2_IGNORE_TXOP_IF_ZERO_LSB) & MAC_PCU_MISC_MODE2_IGNORE_TXOP_IF_ZERO_MASK)
+#define MAC_PCU_MISC_MODE2_IGNORE_TXOP_1ST_PKT_MSB 22
+#define MAC_PCU_MISC_MODE2_IGNORE_TXOP_1ST_PKT_LSB 22
+#define MAC_PCU_MISC_MODE2_IGNORE_TXOP_1ST_PKT_MASK 0x00400000
+#define MAC_PCU_MISC_MODE2_IGNORE_TXOP_1ST_PKT_GET(x) (((x) & MAC_PCU_MISC_MODE2_IGNORE_TXOP_1ST_PKT_MASK) >> MAC_PCU_MISC_MODE2_IGNORE_TXOP_1ST_PKT_LSB)
+#define MAC_PCU_MISC_MODE2_IGNORE_TXOP_1ST_PKT_SET(x) (((x) << MAC_PCU_MISC_MODE2_IGNORE_TXOP_1ST_PKT_LSB) & MAC_PCU_MISC_MODE2_IGNORE_TXOP_1ST_PKT_MASK)
+#define MAC_PCU_MISC_MODE2_CLEAR_MORE_FRAG_MSB 21
+#define MAC_PCU_MISC_MODE2_CLEAR_MORE_FRAG_LSB 21
+#define MAC_PCU_MISC_MODE2_CLEAR_MORE_FRAG_MASK 0x00200000
+#define MAC_PCU_MISC_MODE2_CLEAR_MORE_FRAG_GET(x) (((x) & MAC_PCU_MISC_MODE2_CLEAR_MORE_FRAG_MASK) >> MAC_PCU_MISC_MODE2_CLEAR_MORE_FRAG_LSB)
+#define MAC_PCU_MISC_MODE2_CLEAR_MORE_FRAG_SET(x) (((x) << MAC_PCU_MISC_MODE2_CLEAR_MORE_FRAG_LSB) & MAC_PCU_MISC_MODE2_CLEAR_MORE_FRAG_MASK)
+#define MAC_PCU_MISC_MODE2_BUG_28676_MSB 20
+#define MAC_PCU_MISC_MODE2_BUG_28676_LSB 20
+#define MAC_PCU_MISC_MODE2_BUG_28676_MASK 0x00100000
+#define MAC_PCU_MISC_MODE2_BUG_28676_GET(x) (((x) & MAC_PCU_MISC_MODE2_BUG_28676_MASK) >> MAC_PCU_MISC_MODE2_BUG_28676_LSB)
+#define MAC_PCU_MISC_MODE2_BUG_28676_SET(x) (((x) << MAC_PCU_MISC_MODE2_BUG_28676_LSB) & MAC_PCU_MISC_MODE2_BUG_28676_MASK)
+#define MAC_PCU_MISC_MODE2_DUR_ACCOUNT_BY_BA_MSB 19
+#define MAC_PCU_MISC_MODE2_DUR_ACCOUNT_BY_BA_LSB 19
+#define MAC_PCU_MISC_MODE2_DUR_ACCOUNT_BY_BA_MASK 0x00080000
+#define MAC_PCU_MISC_MODE2_DUR_ACCOUNT_BY_BA_GET(x) (((x) & MAC_PCU_MISC_MODE2_DUR_ACCOUNT_BY_BA_MASK) >> MAC_PCU_MISC_MODE2_DUR_ACCOUNT_BY_BA_LSB)
+#define MAC_PCU_MISC_MODE2_DUR_ACCOUNT_BY_BA_SET(x) (((x) << MAC_PCU_MISC_MODE2_DUR_ACCOUNT_BY_BA_LSB) & MAC_PCU_MISC_MODE2_DUR_ACCOUNT_BY_BA_MASK)
+#define MAC_PCU_MISC_MODE2_BC_MC_WAPI_MODE_MSB 18
+#define MAC_PCU_MISC_MODE2_BC_MC_WAPI_MODE_LSB 18
+#define MAC_PCU_MISC_MODE2_BC_MC_WAPI_MODE_MASK 0x00040000
+#define MAC_PCU_MISC_MODE2_BC_MC_WAPI_MODE_GET(x) (((x) & MAC_PCU_MISC_MODE2_BC_MC_WAPI_MODE_MASK) >> MAC_PCU_MISC_MODE2_BC_MC_WAPI_MODE_LSB)
+#define MAC_PCU_MISC_MODE2_BC_MC_WAPI_MODE_SET(x) (((x) << MAC_PCU_MISC_MODE2_BC_MC_WAPI_MODE_LSB) & MAC_PCU_MISC_MODE2_BC_MC_WAPI_MODE_MASK)
+#define MAC_PCU_MISC_MODE2_AGG_WEP_MSB 17
+#define MAC_PCU_MISC_MODE2_AGG_WEP_LSB 17
+#define MAC_PCU_MISC_MODE2_AGG_WEP_MASK 0x00020000
+#define MAC_PCU_MISC_MODE2_AGG_WEP_GET(x) (((x) & MAC_PCU_MISC_MODE2_AGG_WEP_MASK) >> MAC_PCU_MISC_MODE2_AGG_WEP_LSB)
+#define MAC_PCU_MISC_MODE2_AGG_WEP_SET(x) (((x) << MAC_PCU_MISC_MODE2_AGG_WEP_LSB) & MAC_PCU_MISC_MODE2_AGG_WEP_MASK)
+#define MAC_PCU_MISC_MODE2_ENABLE_LOAD_NAV_BEACON_DURATION_MSB 16
+#define MAC_PCU_MISC_MODE2_ENABLE_LOAD_NAV_BEACON_DURATION_LSB 16
+#define MAC_PCU_MISC_MODE2_ENABLE_LOAD_NAV_BEACON_DURATION_MASK 0x00010000
+#define MAC_PCU_MISC_MODE2_ENABLE_LOAD_NAV_BEACON_DURATION_GET(x) (((x) & MAC_PCU_MISC_MODE2_ENABLE_LOAD_NAV_BEACON_DURATION_MASK) >> MAC_PCU_MISC_MODE2_ENABLE_LOAD_NAV_BEACON_DURATION_LSB)
+#define MAC_PCU_MISC_MODE2_ENABLE_LOAD_NAV_BEACON_DURATION_SET(x) (((x) << MAC_PCU_MISC_MODE2_ENABLE_LOAD_NAV_BEACON_DURATION_LSB) & MAC_PCU_MISC_MODE2_ENABLE_LOAD_NAV_BEACON_DURATION_MASK)
+#define MAC_PCU_MISC_MODE2_MGMT_QOS_MSB 15
+#define MAC_PCU_MISC_MODE2_MGMT_QOS_LSB 8
+#define MAC_PCU_MISC_MODE2_MGMT_QOS_MASK 0x0000ff00
+#define MAC_PCU_MISC_MODE2_MGMT_QOS_GET(x) (((x) & MAC_PCU_MISC_MODE2_MGMT_QOS_MASK) >> MAC_PCU_MISC_MODE2_MGMT_QOS_LSB)
+#define MAC_PCU_MISC_MODE2_MGMT_QOS_SET(x) (((x) << MAC_PCU_MISC_MODE2_MGMT_QOS_LSB) & MAC_PCU_MISC_MODE2_MGMT_QOS_MASK)
+#define MAC_PCU_MISC_MODE2_CFP_IGNORE_MSB 7
+#define MAC_PCU_MISC_MODE2_CFP_IGNORE_LSB 7
+#define MAC_PCU_MISC_MODE2_CFP_IGNORE_MASK 0x00000080
+#define MAC_PCU_MISC_MODE2_CFP_IGNORE_GET(x) (((x) & MAC_PCU_MISC_MODE2_CFP_IGNORE_MASK) >> MAC_PCU_MISC_MODE2_CFP_IGNORE_LSB)
+#define MAC_PCU_MISC_MODE2_CFP_IGNORE_SET(x) (((x) << MAC_PCU_MISC_MODE2_CFP_IGNORE_LSB) & MAC_PCU_MISC_MODE2_CFP_IGNORE_MASK)
+#define MAC_PCU_MISC_MODE2_ADHOC_MCAST_KEYID_ENABLE_MSB 6
+#define MAC_PCU_MISC_MODE2_ADHOC_MCAST_KEYID_ENABLE_LSB 6
+#define MAC_PCU_MISC_MODE2_ADHOC_MCAST_KEYID_ENABLE_MASK 0x00000040
+#define MAC_PCU_MISC_MODE2_ADHOC_MCAST_KEYID_ENABLE_GET(x) (((x) & MAC_PCU_MISC_MODE2_ADHOC_MCAST_KEYID_ENABLE_MASK) >> MAC_PCU_MISC_MODE2_ADHOC_MCAST_KEYID_ENABLE_LSB)
+#define MAC_PCU_MISC_MODE2_ADHOC_MCAST_KEYID_ENABLE_SET(x) (((x) << MAC_PCU_MISC_MODE2_ADHOC_MCAST_KEYID_ENABLE_LSB) & MAC_PCU_MISC_MODE2_ADHOC_MCAST_KEYID_ENABLE_MASK)
+#define MAC_PCU_MISC_MODE2_RESERVED_2_MSB 5
+#define MAC_PCU_MISC_MODE2_RESERVED_2_LSB 5
+#define MAC_PCU_MISC_MODE2_RESERVED_2_MASK 0x00000020
+#define MAC_PCU_MISC_MODE2_RESERVED_2_GET(x) (((x) & MAC_PCU_MISC_MODE2_RESERVED_2_MASK) >> MAC_PCU_MISC_MODE2_RESERVED_2_LSB)
+#define MAC_PCU_MISC_MODE2_RESERVED_2_SET(x) (((x) << MAC_PCU_MISC_MODE2_RESERVED_2_LSB) & MAC_PCU_MISC_MODE2_RESERVED_2_MASK)
+#define MAC_PCU_MISC_MODE2_BUG_58057_FIX_ENABLE_MSB 4
+#define MAC_PCU_MISC_MODE2_BUG_58057_FIX_ENABLE_LSB 4
+#define MAC_PCU_MISC_MODE2_BUG_58057_FIX_ENABLE_MASK 0x00000010
+#define MAC_PCU_MISC_MODE2_BUG_58057_FIX_ENABLE_GET(x) (((x) & MAC_PCU_MISC_MODE2_BUG_58057_FIX_ENABLE_MASK) >> MAC_PCU_MISC_MODE2_BUG_58057_FIX_ENABLE_LSB)
+#define MAC_PCU_MISC_MODE2_BUG_58057_FIX_ENABLE_SET(x) (((x) << MAC_PCU_MISC_MODE2_BUG_58057_FIX_ENABLE_LSB) & MAC_PCU_MISC_MODE2_BUG_58057_FIX_ENABLE_MASK)
+#define MAC_PCU_MISC_MODE2_RESERVED_0_MSB 3
+#define MAC_PCU_MISC_MODE2_RESERVED_0_LSB 3
+#define MAC_PCU_MISC_MODE2_RESERVED_0_MASK 0x00000008
+#define MAC_PCU_MISC_MODE2_RESERVED_0_GET(x) (((x) & MAC_PCU_MISC_MODE2_RESERVED_0_MASK) >> MAC_PCU_MISC_MODE2_RESERVED_0_LSB)
+#define MAC_PCU_MISC_MODE2_RESERVED_0_SET(x) (((x) << MAC_PCU_MISC_MODE2_RESERVED_0_LSB) & MAC_PCU_MISC_MODE2_RESERVED_0_MASK)
+#define MAC_PCU_MISC_MODE2_NO_CRYPTO_FOR_NON_DATA_PKT_MSB 2
+#define MAC_PCU_MISC_MODE2_NO_CRYPTO_FOR_NON_DATA_PKT_LSB 2
+#define MAC_PCU_MISC_MODE2_NO_CRYPTO_FOR_NON_DATA_PKT_MASK 0x00000004
+#define MAC_PCU_MISC_MODE2_NO_CRYPTO_FOR_NON_DATA_PKT_GET(x) (((x) & MAC_PCU_MISC_MODE2_NO_CRYPTO_FOR_NON_DATA_PKT_MASK) >> MAC_PCU_MISC_MODE2_NO_CRYPTO_FOR_NON_DATA_PKT_LSB)
+#define MAC_PCU_MISC_MODE2_NO_CRYPTO_FOR_NON_DATA_PKT_SET(x) (((x) << MAC_PCU_MISC_MODE2_NO_CRYPTO_FOR_NON_DATA_PKT_LSB) & MAC_PCU_MISC_MODE2_NO_CRYPTO_FOR_NON_DATA_PKT_MASK)
+#define MAC_PCU_MISC_MODE2_MGMT_CRYPTO_ENABLE_MSB 1
+#define MAC_PCU_MISC_MODE2_MGMT_CRYPTO_ENABLE_LSB 1
+#define MAC_PCU_MISC_MODE2_MGMT_CRYPTO_ENABLE_MASK 0x00000002
+#define MAC_PCU_MISC_MODE2_MGMT_CRYPTO_ENABLE_GET(x) (((x) & MAC_PCU_MISC_MODE2_MGMT_CRYPTO_ENABLE_MASK) >> MAC_PCU_MISC_MODE2_MGMT_CRYPTO_ENABLE_LSB)
+#define MAC_PCU_MISC_MODE2_MGMT_CRYPTO_ENABLE_SET(x) (((x) << MAC_PCU_MISC_MODE2_MGMT_CRYPTO_ENABLE_LSB) & MAC_PCU_MISC_MODE2_MGMT_CRYPTO_ENABLE_MASK)
+#define MAC_PCU_MISC_MODE2_BUG_21532_FIX_ENABLE_MSB 0
+#define MAC_PCU_MISC_MODE2_BUG_21532_FIX_ENABLE_LSB 0
+#define MAC_PCU_MISC_MODE2_BUG_21532_FIX_ENABLE_MASK 0x00000001
+#define MAC_PCU_MISC_MODE2_BUG_21532_FIX_ENABLE_GET(x) (((x) & MAC_PCU_MISC_MODE2_BUG_21532_FIX_ENABLE_MASK) >> MAC_PCU_MISC_MODE2_BUG_21532_FIX_ENABLE_LSB)
+#define MAC_PCU_MISC_MODE2_BUG_21532_FIX_ENABLE_SET(x) (((x) << MAC_PCU_MISC_MODE2_BUG_21532_FIX_ENABLE_LSB) & MAC_PCU_MISC_MODE2_BUG_21532_FIX_ENABLE_MASK)
+
+#define MAC_PCU_ALT_AES_MUTE_MASK_ADDRESS 0x00008148
+#define MAC_PCU_ALT_AES_MUTE_MASK_OFFSET 0x00000148
+#define MAC_PCU_ALT_AES_MUTE_MASK_QOS_MSB 31
+#define MAC_PCU_ALT_AES_MUTE_MASK_QOS_LSB 16
+#define MAC_PCU_ALT_AES_MUTE_MASK_QOS_MASK 0xffff0000
+#define MAC_PCU_ALT_AES_MUTE_MASK_QOS_GET(x) (((x) & MAC_PCU_ALT_AES_MUTE_MASK_QOS_MASK) >> MAC_PCU_ALT_AES_MUTE_MASK_QOS_LSB)
+#define MAC_PCU_ALT_AES_MUTE_MASK_QOS_SET(x) (((x) << MAC_PCU_ALT_AES_MUTE_MASK_QOS_LSB) & MAC_PCU_ALT_AES_MUTE_MASK_QOS_MASK)
+
+#define MAC_PCU_AZIMUTH_TIME_STAMP_ADDRESS 0x0000814c
+#define MAC_PCU_AZIMUTH_TIME_STAMP_OFFSET 0x0000014c
+#define MAC_PCU_AZIMUTH_TIME_STAMP_VALUE_MSB 31
+#define MAC_PCU_AZIMUTH_TIME_STAMP_VALUE_LSB 0
+#define MAC_PCU_AZIMUTH_TIME_STAMP_VALUE_MASK 0xffffffff
+#define MAC_PCU_AZIMUTH_TIME_STAMP_VALUE_GET(x) (((x) & MAC_PCU_AZIMUTH_TIME_STAMP_VALUE_MASK) >> MAC_PCU_AZIMUTH_TIME_STAMP_VALUE_LSB)
+#define MAC_PCU_AZIMUTH_TIME_STAMP_VALUE_SET(x) (((x) << MAC_PCU_AZIMUTH_TIME_STAMP_VALUE_LSB) & MAC_PCU_AZIMUTH_TIME_STAMP_VALUE_MASK)
+
+#define MAC_PCU_MAX_CFP_DUR_ADDRESS 0x00008150
+#define MAC_PCU_MAX_CFP_DUR_OFFSET 0x00000150
+#define MAC_PCU_MAX_CFP_DUR_USEC_FRAC_DENOMINATOR_MSB 7
+#define MAC_PCU_MAX_CFP_DUR_USEC_FRAC_DENOMINATOR_LSB 4
+#define MAC_PCU_MAX_CFP_DUR_USEC_FRAC_DENOMINATOR_MASK 0x000000f0
+#define MAC_PCU_MAX_CFP_DUR_USEC_FRAC_DENOMINATOR_GET(x) (((x) & MAC_PCU_MAX_CFP_DUR_USEC_FRAC_DENOMINATOR_MASK) >> MAC_PCU_MAX_CFP_DUR_USEC_FRAC_DENOMINATOR_LSB)
+#define MAC_PCU_MAX_CFP_DUR_USEC_FRAC_DENOMINATOR_SET(x) (((x) << MAC_PCU_MAX_CFP_DUR_USEC_FRAC_DENOMINATOR_LSB) & MAC_PCU_MAX_CFP_DUR_USEC_FRAC_DENOMINATOR_MASK)
+#define MAC_PCU_MAX_CFP_DUR_USEC_FRAC_NUMERATOR_MSB 3
+#define MAC_PCU_MAX_CFP_DUR_USEC_FRAC_NUMERATOR_LSB 0
+#define MAC_PCU_MAX_CFP_DUR_USEC_FRAC_NUMERATOR_MASK 0x0000000f
+#define MAC_PCU_MAX_CFP_DUR_USEC_FRAC_NUMERATOR_GET(x) (((x) & MAC_PCU_MAX_CFP_DUR_USEC_FRAC_NUMERATOR_MASK) >> MAC_PCU_MAX_CFP_DUR_USEC_FRAC_NUMERATOR_LSB)
+#define MAC_PCU_MAX_CFP_DUR_USEC_FRAC_NUMERATOR_SET(x) (((x) << MAC_PCU_MAX_CFP_DUR_USEC_FRAC_NUMERATOR_LSB) & MAC_PCU_MAX_CFP_DUR_USEC_FRAC_NUMERATOR_MASK)
+
+#define MAC_PCU_HCF_TIMEOUT_ADDRESS 0x00008154
+#define MAC_PCU_HCF_TIMEOUT_OFFSET 0x00000154
+#define MAC_PCU_HCF_TIMEOUT_VALUE_MSB 15
+#define MAC_PCU_HCF_TIMEOUT_VALUE_LSB 0
+#define MAC_PCU_HCF_TIMEOUT_VALUE_MASK 0x0000ffff
+#define MAC_PCU_HCF_TIMEOUT_VALUE_GET(x) (((x) & MAC_PCU_HCF_TIMEOUT_VALUE_MASK) >> MAC_PCU_HCF_TIMEOUT_VALUE_LSB)
+#define MAC_PCU_HCF_TIMEOUT_VALUE_SET(x) (((x) << MAC_PCU_HCF_TIMEOUT_VALUE_LSB) & MAC_PCU_HCF_TIMEOUT_VALUE_MASK)
+
+#define MAC_PCU_BLUETOOTH_WEIGHTS2_ADDRESS 0x00008158
+#define MAC_PCU_BLUETOOTH_WEIGHTS2_OFFSET 0x00000158
+#define MAC_PCU_BLUETOOTH_WEIGHTS2_WL_WEIGHT_CONTD_MSB 31
+#define MAC_PCU_BLUETOOTH_WEIGHTS2_WL_WEIGHT_CONTD_LSB 16
+#define MAC_PCU_BLUETOOTH_WEIGHTS2_WL_WEIGHT_CONTD_MASK 0xffff0000
+#define MAC_PCU_BLUETOOTH_WEIGHTS2_WL_WEIGHT_CONTD_GET(x) (((x) & MAC_PCU_BLUETOOTH_WEIGHTS2_WL_WEIGHT_CONTD_MASK) >> MAC_PCU_BLUETOOTH_WEIGHTS2_WL_WEIGHT_CONTD_LSB)
+#define MAC_PCU_BLUETOOTH_WEIGHTS2_WL_WEIGHT_CONTD_SET(x) (((x) << MAC_PCU_BLUETOOTH_WEIGHTS2_WL_WEIGHT_CONTD_LSB) & MAC_PCU_BLUETOOTH_WEIGHTS2_WL_WEIGHT_CONTD_MASK)
+
+#define MAC_PCU_BLUETOOTH_TSF_BT_ACTIVE_ADDRESS 0x0000815c
+#define MAC_PCU_BLUETOOTH_TSF_BT_ACTIVE_OFFSET 0x0000015c
+#define MAC_PCU_BLUETOOTH_TSF_BT_ACTIVE_VALUE_MSB 31
+#define MAC_PCU_BLUETOOTH_TSF_BT_ACTIVE_VALUE_LSB 0
+#define MAC_PCU_BLUETOOTH_TSF_BT_ACTIVE_VALUE_MASK 0xffffffff
+#define MAC_PCU_BLUETOOTH_TSF_BT_ACTIVE_VALUE_GET(x) (((x) & MAC_PCU_BLUETOOTH_TSF_BT_ACTIVE_VALUE_MASK) >> MAC_PCU_BLUETOOTH_TSF_BT_ACTIVE_VALUE_LSB)
+#define MAC_PCU_BLUETOOTH_TSF_BT_ACTIVE_VALUE_SET(x) (((x) << MAC_PCU_BLUETOOTH_TSF_BT_ACTIVE_VALUE_LSB) & MAC_PCU_BLUETOOTH_TSF_BT_ACTIVE_VALUE_MASK)
+
+#define MAC_PCU_BLUETOOTH_TSF_BT_PRIORITY_ADDRESS 0x00008160
+#define MAC_PCU_BLUETOOTH_TSF_BT_PRIORITY_OFFSET 0x00000160
+#define MAC_PCU_BLUETOOTH_TSF_BT_PRIORITY_VALUE_MSB 31
+#define MAC_PCU_BLUETOOTH_TSF_BT_PRIORITY_VALUE_LSB 0
+#define MAC_PCU_BLUETOOTH_TSF_BT_PRIORITY_VALUE_MASK 0xffffffff
+#define MAC_PCU_BLUETOOTH_TSF_BT_PRIORITY_VALUE_GET(x) (((x) & MAC_PCU_BLUETOOTH_TSF_BT_PRIORITY_VALUE_MASK) >> MAC_PCU_BLUETOOTH_TSF_BT_PRIORITY_VALUE_LSB)
+#define MAC_PCU_BLUETOOTH_TSF_BT_PRIORITY_VALUE_SET(x) (((x) << MAC_PCU_BLUETOOTH_TSF_BT_PRIORITY_VALUE_LSB) & MAC_PCU_BLUETOOTH_TSF_BT_PRIORITY_VALUE_MASK)
+
+#define MAC_PCU_BLUETOOTH_MODE3_ADDRESS 0x00008164
+#define MAC_PCU_BLUETOOTH_MODE3_OFFSET 0x00000164
+#define MAC_PCU_BLUETOOTH_MODE3_BT_PRIORITY_EXTEND_THRES_MSB 31
+#define MAC_PCU_BLUETOOTH_MODE3_BT_PRIORITY_EXTEND_THRES_LSB 28
+#define MAC_PCU_BLUETOOTH_MODE3_BT_PRIORITY_EXTEND_THRES_MASK 0xf0000000
+#define MAC_PCU_BLUETOOTH_MODE3_BT_PRIORITY_EXTEND_THRES_GET(x) (((x) & MAC_PCU_BLUETOOTH_MODE3_BT_PRIORITY_EXTEND_THRES_MASK) >> MAC_PCU_BLUETOOTH_MODE3_BT_PRIORITY_EXTEND_THRES_LSB)
+#define MAC_PCU_BLUETOOTH_MODE3_BT_PRIORITY_EXTEND_THRES_SET(x) (((x) << MAC_PCU_BLUETOOTH_MODE3_BT_PRIORITY_EXTEND_THRES_LSB) & MAC_PCU_BLUETOOTH_MODE3_BT_PRIORITY_EXTEND_THRES_MASK)
+#define MAC_PCU_BLUETOOTH_MODE3_BT_TX_ON_EN_MSB 27
+#define MAC_PCU_BLUETOOTH_MODE3_BT_TX_ON_EN_LSB 27
+#define MAC_PCU_BLUETOOTH_MODE3_BT_TX_ON_EN_MASK 0x08000000
+#define MAC_PCU_BLUETOOTH_MODE3_BT_TX_ON_EN_GET(x) (((x) & MAC_PCU_BLUETOOTH_MODE3_BT_TX_ON_EN_MASK) >> MAC_PCU_BLUETOOTH_MODE3_BT_TX_ON_EN_LSB)
+#define MAC_PCU_BLUETOOTH_MODE3_BT_TX_ON_EN_SET(x) (((x) << MAC_PCU_BLUETOOTH_MODE3_BT_TX_ON_EN_LSB) & MAC_PCU_BLUETOOTH_MODE3_BT_TX_ON_EN_MASK)
+#define MAC_PCU_BLUETOOTH_MODE3_SLOT_SLOP_MSB 26
+#define MAC_PCU_BLUETOOTH_MODE3_SLOT_SLOP_LSB 25
+#define MAC_PCU_BLUETOOTH_MODE3_SLOT_SLOP_MASK 0x06000000
+#define MAC_PCU_BLUETOOTH_MODE3_SLOT_SLOP_GET(x) (((x) & MAC_PCU_BLUETOOTH_MODE3_SLOT_SLOP_MASK) >> MAC_PCU_BLUETOOTH_MODE3_SLOT_SLOP_LSB)
+#define MAC_PCU_BLUETOOTH_MODE3_SLOT_SLOP_SET(x) (((x) << MAC_PCU_BLUETOOTH_MODE3_SLOT_SLOP_LSB) & MAC_PCU_BLUETOOTH_MODE3_SLOT_SLOP_MASK)
+#define MAC_PCU_BLUETOOTH_MODE3_DYNAMIC_TOGGLE_WLA_EN_MSB 24
+#define MAC_PCU_BLUETOOTH_MODE3_DYNAMIC_TOGGLE_WLA_EN_LSB 24
+#define MAC_PCU_BLUETOOTH_MODE3_DYNAMIC_TOGGLE_WLA_EN_MASK 0x01000000
+#define MAC_PCU_BLUETOOTH_MODE3_DYNAMIC_TOGGLE_WLA_EN_GET(x) (((x) & MAC_PCU_BLUETOOTH_MODE3_DYNAMIC_TOGGLE_WLA_EN_MASK) >> MAC_PCU_BLUETOOTH_MODE3_DYNAMIC_TOGGLE_WLA_EN_LSB)
+#define MAC_PCU_BLUETOOTH_MODE3_DYNAMIC_TOGGLE_WLA_EN_SET(x) (((x) << MAC_PCU_BLUETOOTH_MODE3_DYNAMIC_TOGGLE_WLA_EN_LSB) & MAC_PCU_BLUETOOTH_MODE3_DYNAMIC_TOGGLE_WLA_EN_MASK)
+#define MAC_PCU_BLUETOOTH_MODE3_DYNAMIC_PRI_EN_MSB 23
+#define MAC_PCU_BLUETOOTH_MODE3_DYNAMIC_PRI_EN_LSB 23
+#define MAC_PCU_BLUETOOTH_MODE3_DYNAMIC_PRI_EN_MASK 0x00800000
+#define MAC_PCU_BLUETOOTH_MODE3_DYNAMIC_PRI_EN_GET(x) (((x) & MAC_PCU_BLUETOOTH_MODE3_DYNAMIC_PRI_EN_MASK) >> MAC_PCU_BLUETOOTH_MODE3_DYNAMIC_PRI_EN_LSB)
+#define MAC_PCU_BLUETOOTH_MODE3_DYNAMIC_PRI_EN_SET(x) (((x) << MAC_PCU_BLUETOOTH_MODE3_DYNAMIC_PRI_EN_LSB) & MAC_PCU_BLUETOOTH_MODE3_DYNAMIC_PRI_EN_MASK)
+#define MAC_PCU_BLUETOOTH_MODE3_RFGAIN_LOCK_SRC_MSB 22
+#define MAC_PCU_BLUETOOTH_MODE3_RFGAIN_LOCK_SRC_LSB 22
+#define MAC_PCU_BLUETOOTH_MODE3_RFGAIN_LOCK_SRC_MASK 0x00400000
+#define MAC_PCU_BLUETOOTH_MODE3_RFGAIN_LOCK_SRC_GET(x) (((x) & MAC_PCU_BLUETOOTH_MODE3_RFGAIN_LOCK_SRC_MASK) >> MAC_PCU_BLUETOOTH_MODE3_RFGAIN_LOCK_SRC_LSB)
+#define MAC_PCU_BLUETOOTH_MODE3_RFGAIN_LOCK_SRC_SET(x) (((x) << MAC_PCU_BLUETOOTH_MODE3_RFGAIN_LOCK_SRC_LSB) & MAC_PCU_BLUETOOTH_MODE3_RFGAIN_LOCK_SRC_MASK)
+#define MAC_PCU_BLUETOOTH_MODE3_WL_PRIORITY_OFFSET_EN_MSB 21
+#define MAC_PCU_BLUETOOTH_MODE3_WL_PRIORITY_OFFSET_EN_LSB 21
+#define MAC_PCU_BLUETOOTH_MODE3_WL_PRIORITY_OFFSET_EN_MASK 0x00200000
+#define MAC_PCU_BLUETOOTH_MODE3_WL_PRIORITY_OFFSET_EN_GET(x) (((x) & MAC_PCU_BLUETOOTH_MODE3_WL_PRIORITY_OFFSET_EN_MASK) >> MAC_PCU_BLUETOOTH_MODE3_WL_PRIORITY_OFFSET_EN_LSB)
+#define MAC_PCU_BLUETOOTH_MODE3_WL_PRIORITY_OFFSET_EN_SET(x) (((x) << MAC_PCU_BLUETOOTH_MODE3_WL_PRIORITY_OFFSET_EN_LSB) & MAC_PCU_BLUETOOTH_MODE3_WL_PRIORITY_OFFSET_EN_MASK)
+#define MAC_PCU_BLUETOOTH_MODE3_SHARED_RX_MSB 20
+#define MAC_PCU_BLUETOOTH_MODE3_SHARED_RX_LSB 20
+#define MAC_PCU_BLUETOOTH_MODE3_SHARED_RX_MASK 0x00100000
+#define MAC_PCU_BLUETOOTH_MODE3_SHARED_RX_GET(x) (((x) & MAC_PCU_BLUETOOTH_MODE3_SHARED_RX_MASK) >> MAC_PCU_BLUETOOTH_MODE3_SHARED_RX_LSB)
+#define MAC_PCU_BLUETOOTH_MODE3_SHARED_RX_SET(x) (((x) << MAC_PCU_BLUETOOTH_MODE3_SHARED_RX_LSB) & MAC_PCU_BLUETOOTH_MODE3_SHARED_RX_MASK)
+#define MAC_PCU_BLUETOOTH_MODE3_ALLOW_CONCURRENT_ACCESS_MSB 19
+#define MAC_PCU_BLUETOOTH_MODE3_ALLOW_CONCURRENT_ACCESS_LSB 16
+#define MAC_PCU_BLUETOOTH_MODE3_ALLOW_CONCURRENT_ACCESS_MASK 0x000f0000
+#define MAC_PCU_BLUETOOTH_MODE3_ALLOW_CONCURRENT_ACCESS_GET(x) (((x) & MAC_PCU_BLUETOOTH_MODE3_ALLOW_CONCURRENT_ACCESS_MASK) >> MAC_PCU_BLUETOOTH_MODE3_ALLOW_CONCURRENT_ACCESS_LSB)
+#define MAC_PCU_BLUETOOTH_MODE3_ALLOW_CONCURRENT_ACCESS_SET(x) (((x) << MAC_PCU_BLUETOOTH_MODE3_ALLOW_CONCURRENT_ACCESS_LSB) & MAC_PCU_BLUETOOTH_MODE3_ALLOW_CONCURRENT_ACCESS_MASK)
+#define MAC_PCU_BLUETOOTH_MODE3_WL_QC_TIME_MSB 15
+#define MAC_PCU_BLUETOOTH_MODE3_WL_QC_TIME_LSB 8
+#define MAC_PCU_BLUETOOTH_MODE3_WL_QC_TIME_MASK 0x0000ff00
+#define MAC_PCU_BLUETOOTH_MODE3_WL_QC_TIME_GET(x) (((x) & MAC_PCU_BLUETOOTH_MODE3_WL_QC_TIME_MASK) >> MAC_PCU_BLUETOOTH_MODE3_WL_QC_TIME_LSB)
+#define MAC_PCU_BLUETOOTH_MODE3_WL_QC_TIME_SET(x) (((x) << MAC_PCU_BLUETOOTH_MODE3_WL_QC_TIME_LSB) & MAC_PCU_BLUETOOTH_MODE3_WL_QC_TIME_MASK)
+#define MAC_PCU_BLUETOOTH_MODE3_WL_ACTIVE_TIME_MSB 7
+#define MAC_PCU_BLUETOOTH_MODE3_WL_ACTIVE_TIME_LSB 0
+#define MAC_PCU_BLUETOOTH_MODE3_WL_ACTIVE_TIME_MASK 0x000000ff
+#define MAC_PCU_BLUETOOTH_MODE3_WL_ACTIVE_TIME_GET(x) (((x) & MAC_PCU_BLUETOOTH_MODE3_WL_ACTIVE_TIME_MASK) >> MAC_PCU_BLUETOOTH_MODE3_WL_ACTIVE_TIME_LSB)
+#define MAC_PCU_BLUETOOTH_MODE3_WL_ACTIVE_TIME_SET(x) (((x) << MAC_PCU_BLUETOOTH_MODE3_WL_ACTIVE_TIME_LSB) & MAC_PCU_BLUETOOTH_MODE3_WL_ACTIVE_TIME_MASK)
+
+#define MAC_PCU_BLUETOOTH_MODE4_ADDRESS 0x00008168
+#define MAC_PCU_BLUETOOTH_MODE4_OFFSET 0x00000168
+#define MAC_PCU_BLUETOOTH_MODE4_BT_PRIORITY_EXTEND_MSB 31
+#define MAC_PCU_BLUETOOTH_MODE4_BT_PRIORITY_EXTEND_LSB 16
+#define MAC_PCU_BLUETOOTH_MODE4_BT_PRIORITY_EXTEND_MASK 0xffff0000
+#define MAC_PCU_BLUETOOTH_MODE4_BT_PRIORITY_EXTEND_GET(x) (((x) & MAC_PCU_BLUETOOTH_MODE4_BT_PRIORITY_EXTEND_MASK) >> MAC_PCU_BLUETOOTH_MODE4_BT_PRIORITY_EXTEND_LSB)
+#define MAC_PCU_BLUETOOTH_MODE4_BT_PRIORITY_EXTEND_SET(x) (((x) << MAC_PCU_BLUETOOTH_MODE4_BT_PRIORITY_EXTEND_LSB) & MAC_PCU_BLUETOOTH_MODE4_BT_PRIORITY_EXTEND_MASK)
+#define MAC_PCU_BLUETOOTH_MODE4_BT_ACTIVE_EXTEND_MSB 15
+#define MAC_PCU_BLUETOOTH_MODE4_BT_ACTIVE_EXTEND_LSB 0
+#define MAC_PCU_BLUETOOTH_MODE4_BT_ACTIVE_EXTEND_MASK 0x0000ffff
+#define MAC_PCU_BLUETOOTH_MODE4_BT_ACTIVE_EXTEND_GET(x) (((x) & MAC_PCU_BLUETOOTH_MODE4_BT_ACTIVE_EXTEND_MASK) >> MAC_PCU_BLUETOOTH_MODE4_BT_ACTIVE_EXTEND_LSB)
+#define MAC_PCU_BLUETOOTH_MODE4_BT_ACTIVE_EXTEND_SET(x) (((x) << MAC_PCU_BLUETOOTH_MODE4_BT_ACTIVE_EXTEND_LSB) & MAC_PCU_BLUETOOTH_MODE4_BT_ACTIVE_EXTEND_MASK)
+
+#define MAC_PCU_BT_BT_ADDRESS 0x00008200
+#define MAC_PCU_BT_BT_OFFSET 0x00000200
+#define MAC_PCU_BT_BT_WEIGHT_MSB 31
+#define MAC_PCU_BT_BT_WEIGHT_LSB 0
+#define MAC_PCU_BT_BT_WEIGHT_MASK 0xffffffff
+#define MAC_PCU_BT_BT_WEIGHT_GET(x) (((x) & MAC_PCU_BT_BT_WEIGHT_MASK) >> MAC_PCU_BT_BT_WEIGHT_LSB)
+#define MAC_PCU_BT_BT_WEIGHT_SET(x) (((x) << MAC_PCU_BT_BT_WEIGHT_LSB) & MAC_PCU_BT_BT_WEIGHT_MASK)
+
+#define MAC_PCU_BT_BT_ASYNC_ADDRESS 0x00008300
+#define MAC_PCU_BT_BT_ASYNC_OFFSET 0x00000300
+#define MAC_PCU_BT_BT_ASYNC_RXLP_WEIGHT_MSB 15
+#define MAC_PCU_BT_BT_ASYNC_RXLP_WEIGHT_LSB 12
+#define MAC_PCU_BT_BT_ASYNC_RXLP_WEIGHT_MASK 0x0000f000
+#define MAC_PCU_BT_BT_ASYNC_RXLP_WEIGHT_GET(x) (((x) & MAC_PCU_BT_BT_ASYNC_RXLP_WEIGHT_MASK) >> MAC_PCU_BT_BT_ASYNC_RXLP_WEIGHT_LSB)
+#define MAC_PCU_BT_BT_ASYNC_RXLP_WEIGHT_SET(x) (((x) << MAC_PCU_BT_BT_ASYNC_RXLP_WEIGHT_LSB) & MAC_PCU_BT_BT_ASYNC_RXLP_WEIGHT_MASK)
+#define MAC_PCU_BT_BT_ASYNC_RXHP_WEIGHT_MSB 11
+#define MAC_PCU_BT_BT_ASYNC_RXHP_WEIGHT_LSB 8
+#define MAC_PCU_BT_BT_ASYNC_RXHP_WEIGHT_MASK 0x00000f00
+#define MAC_PCU_BT_BT_ASYNC_RXHP_WEIGHT_GET(x) (((x) & MAC_PCU_BT_BT_ASYNC_RXHP_WEIGHT_MASK) >> MAC_PCU_BT_BT_ASYNC_RXHP_WEIGHT_LSB)
+#define MAC_PCU_BT_BT_ASYNC_RXHP_WEIGHT_SET(x) (((x) << MAC_PCU_BT_BT_ASYNC_RXHP_WEIGHT_LSB) & MAC_PCU_BT_BT_ASYNC_RXHP_WEIGHT_MASK)
+#define MAC_PCU_BT_BT_ASYNC_TXLP_WEIGHT_MSB 7
+#define MAC_PCU_BT_BT_ASYNC_TXLP_WEIGHT_LSB 4
+#define MAC_PCU_BT_BT_ASYNC_TXLP_WEIGHT_MASK 0x000000f0
+#define MAC_PCU_BT_BT_ASYNC_TXLP_WEIGHT_GET(x) (((x) & MAC_PCU_BT_BT_ASYNC_TXLP_WEIGHT_MASK) >> MAC_PCU_BT_BT_ASYNC_TXLP_WEIGHT_LSB)
+#define MAC_PCU_BT_BT_ASYNC_TXLP_WEIGHT_SET(x) (((x) << MAC_PCU_BT_BT_ASYNC_TXLP_WEIGHT_LSB) & MAC_PCU_BT_BT_ASYNC_TXLP_WEIGHT_MASK)
+#define MAC_PCU_BT_BT_ASYNC_TXHP_WEIGHT_MSB 3
+#define MAC_PCU_BT_BT_ASYNC_TXHP_WEIGHT_LSB 0
+#define MAC_PCU_BT_BT_ASYNC_TXHP_WEIGHT_MASK 0x0000000f
+#define MAC_PCU_BT_BT_ASYNC_TXHP_WEIGHT_GET(x) (((x) & MAC_PCU_BT_BT_ASYNC_TXHP_WEIGHT_MASK) >> MAC_PCU_BT_BT_ASYNC_TXHP_WEIGHT_LSB)
+#define MAC_PCU_BT_BT_ASYNC_TXHP_WEIGHT_SET(x) (((x) << MAC_PCU_BT_BT_ASYNC_TXHP_WEIGHT_LSB) & MAC_PCU_BT_BT_ASYNC_TXHP_WEIGHT_MASK)
+
+#define MAC_PCU_BT_WL_1_ADDRESS 0x00008304
+#define MAC_PCU_BT_WL_1_OFFSET 0x00000304
+#define MAC_PCU_BT_WL_1_WEIGHT_MSB 31
+#define MAC_PCU_BT_WL_1_WEIGHT_LSB 0
+#define MAC_PCU_BT_WL_1_WEIGHT_MASK 0xffffffff
+#define MAC_PCU_BT_WL_1_WEIGHT_GET(x) (((x) & MAC_PCU_BT_WL_1_WEIGHT_MASK) >> MAC_PCU_BT_WL_1_WEIGHT_LSB)
+#define MAC_PCU_BT_WL_1_WEIGHT_SET(x) (((x) << MAC_PCU_BT_WL_1_WEIGHT_LSB) & MAC_PCU_BT_WL_1_WEIGHT_MASK)
+
+#define MAC_PCU_BT_WL_2_ADDRESS 0x00008308
+#define MAC_PCU_BT_WL_2_OFFSET 0x00000308
+#define MAC_PCU_BT_WL_2_WEIGHT_MSB 31
+#define MAC_PCU_BT_WL_2_WEIGHT_LSB 0
+#define MAC_PCU_BT_WL_2_WEIGHT_MASK 0xffffffff
+#define MAC_PCU_BT_WL_2_WEIGHT_GET(x) (((x) & MAC_PCU_BT_WL_2_WEIGHT_MASK) >> MAC_PCU_BT_WL_2_WEIGHT_LSB)
+#define MAC_PCU_BT_WL_2_WEIGHT_SET(x) (((x) << MAC_PCU_BT_WL_2_WEIGHT_LSB) & MAC_PCU_BT_WL_2_WEIGHT_MASK)
+
+#define MAC_PCU_BT_WL_3_ADDRESS 0x0000830c
+#define MAC_PCU_BT_WL_3_OFFSET 0x0000030c
+#define MAC_PCU_BT_WL_3_WEIGHT_MSB 31
+#define MAC_PCU_BT_WL_3_WEIGHT_LSB 0
+#define MAC_PCU_BT_WL_3_WEIGHT_MASK 0xffffffff
+#define MAC_PCU_BT_WL_3_WEIGHT_GET(x) (((x) & MAC_PCU_BT_WL_3_WEIGHT_MASK) >> MAC_PCU_BT_WL_3_WEIGHT_LSB)
+#define MAC_PCU_BT_WL_3_WEIGHT_SET(x) (((x) << MAC_PCU_BT_WL_3_WEIGHT_LSB) & MAC_PCU_BT_WL_3_WEIGHT_MASK)
+
+#define MAC_PCU_BT_WL_4_ADDRESS 0x00008310
+#define MAC_PCU_BT_WL_4_OFFSET 0x00000310
+#define MAC_PCU_BT_WL_4_WEIGHT_MSB 31
+#define MAC_PCU_BT_WL_4_WEIGHT_LSB 0
+#define MAC_PCU_BT_WL_4_WEIGHT_MASK 0xffffffff
+#define MAC_PCU_BT_WL_4_WEIGHT_GET(x) (((x) & MAC_PCU_BT_WL_4_WEIGHT_MASK) >> MAC_PCU_BT_WL_4_WEIGHT_LSB)
+#define MAC_PCU_BT_WL_4_WEIGHT_SET(x) (((x) << MAC_PCU_BT_WL_4_WEIGHT_LSB) & MAC_PCU_BT_WL_4_WEIGHT_MASK)
+
+#define MAC_PCU_COEX_EPTA_ADDRESS 0x00008314
+#define MAC_PCU_COEX_EPTA_OFFSET 0x00000314
+#define MAC_PCU_COEX_EPTA_WT_IDX_MSB 12
+#define MAC_PCU_COEX_EPTA_WT_IDX_LSB 6
+#define MAC_PCU_COEX_EPTA_WT_IDX_MASK 0x00001fc0
+#define MAC_PCU_COEX_EPTA_WT_IDX_GET(x) (((x) & MAC_PCU_COEX_EPTA_WT_IDX_MASK) >> MAC_PCU_COEX_EPTA_WT_IDX_LSB)
+#define MAC_PCU_COEX_EPTA_WT_IDX_SET(x) (((x) << MAC_PCU_COEX_EPTA_WT_IDX_LSB) & MAC_PCU_COEX_EPTA_WT_IDX_MASK)
+#define MAC_PCU_COEX_EPTA_LINKID_MSB 5
+#define MAC_PCU_COEX_EPTA_LINKID_LSB 0
+#define MAC_PCU_COEX_EPTA_LINKID_MASK 0x0000003f
+#define MAC_PCU_COEX_EPTA_LINKID_GET(x) (((x) & MAC_PCU_COEX_EPTA_LINKID_MASK) >> MAC_PCU_COEX_EPTA_LINKID_LSB)
+#define MAC_PCU_COEX_EPTA_LINKID_SET(x) (((x) << MAC_PCU_COEX_EPTA_LINKID_LSB) & MAC_PCU_COEX_EPTA_LINKID_MASK)
+
+#define MAC_PCU_COEX_LNAMAXGAIN1_ADDRESS 0x00008318
+#define MAC_PCU_COEX_LNAMAXGAIN1_OFFSET 0x00000318
+#define MAC_PCU_COEX_LNAMAXGAIN1_MAXGAIN4_MSB 31
+#define MAC_PCU_COEX_LNAMAXGAIN1_MAXGAIN4_LSB 24
+#define MAC_PCU_COEX_LNAMAXGAIN1_MAXGAIN4_MASK 0xff000000
+#define MAC_PCU_COEX_LNAMAXGAIN1_MAXGAIN4_GET(x) (((x) & MAC_PCU_COEX_LNAMAXGAIN1_MAXGAIN4_MASK) >> MAC_PCU_COEX_LNAMAXGAIN1_MAXGAIN4_LSB)
+#define MAC_PCU_COEX_LNAMAXGAIN1_MAXGAIN4_SET(x) (((x) << MAC_PCU_COEX_LNAMAXGAIN1_MAXGAIN4_LSB) & MAC_PCU_COEX_LNAMAXGAIN1_MAXGAIN4_MASK)
+#define MAC_PCU_COEX_LNAMAXGAIN1_MAXGAIN3_MSB 23
+#define MAC_PCU_COEX_LNAMAXGAIN1_MAXGAIN3_LSB 16
+#define MAC_PCU_COEX_LNAMAXGAIN1_MAXGAIN3_MASK 0x00ff0000
+#define MAC_PCU_COEX_LNAMAXGAIN1_MAXGAIN3_GET(x) (((x) & MAC_PCU_COEX_LNAMAXGAIN1_MAXGAIN3_MASK) >> MAC_PCU_COEX_LNAMAXGAIN1_MAXGAIN3_LSB)
+#define MAC_PCU_COEX_LNAMAXGAIN1_MAXGAIN3_SET(x) (((x) << MAC_PCU_COEX_LNAMAXGAIN1_MAXGAIN3_LSB) & MAC_PCU_COEX_LNAMAXGAIN1_MAXGAIN3_MASK)
+#define MAC_PCU_COEX_LNAMAXGAIN1_MAXGAIN2_MSB 15
+#define MAC_PCU_COEX_LNAMAXGAIN1_MAXGAIN2_LSB 8
+#define MAC_PCU_COEX_LNAMAXGAIN1_MAXGAIN2_MASK 0x0000ff00
+#define MAC_PCU_COEX_LNAMAXGAIN1_MAXGAIN2_GET(x) (((x) & MAC_PCU_COEX_LNAMAXGAIN1_MAXGAIN2_MASK) >> MAC_PCU_COEX_LNAMAXGAIN1_MAXGAIN2_LSB)
+#define MAC_PCU_COEX_LNAMAXGAIN1_MAXGAIN2_SET(x) (((x) << MAC_PCU_COEX_LNAMAXGAIN1_MAXGAIN2_LSB) & MAC_PCU_COEX_LNAMAXGAIN1_MAXGAIN2_MASK)
+#define MAC_PCU_COEX_LNAMAXGAIN1_MAXGAIN1_MSB 7
+#define MAC_PCU_COEX_LNAMAXGAIN1_MAXGAIN1_LSB 0
+#define MAC_PCU_COEX_LNAMAXGAIN1_MAXGAIN1_MASK 0x000000ff
+#define MAC_PCU_COEX_LNAMAXGAIN1_MAXGAIN1_GET(x) (((x) & MAC_PCU_COEX_LNAMAXGAIN1_MAXGAIN1_MASK) >> MAC_PCU_COEX_LNAMAXGAIN1_MAXGAIN1_LSB)
+#define MAC_PCU_COEX_LNAMAXGAIN1_MAXGAIN1_SET(x) (((x) << MAC_PCU_COEX_LNAMAXGAIN1_MAXGAIN1_LSB) & MAC_PCU_COEX_LNAMAXGAIN1_MAXGAIN1_MASK)
+
+#define MAC_PCU_COEX_LNAMAXGAIN2_ADDRESS 0x0000831c
+#define MAC_PCU_COEX_LNAMAXGAIN2_OFFSET 0x0000031c
+#define MAC_PCU_COEX_LNAMAXGAIN2_MAXGAIN4_MSB 31
+#define MAC_PCU_COEX_LNAMAXGAIN2_MAXGAIN4_LSB 24
+#define MAC_PCU_COEX_LNAMAXGAIN2_MAXGAIN4_MASK 0xff000000
+#define MAC_PCU_COEX_LNAMAXGAIN2_MAXGAIN4_GET(x) (((x) & MAC_PCU_COEX_LNAMAXGAIN2_MAXGAIN4_MASK) >> MAC_PCU_COEX_LNAMAXGAIN2_MAXGAIN4_LSB)
+#define MAC_PCU_COEX_LNAMAXGAIN2_MAXGAIN4_SET(x) (((x) << MAC_PCU_COEX_LNAMAXGAIN2_MAXGAIN4_LSB) & MAC_PCU_COEX_LNAMAXGAIN2_MAXGAIN4_MASK)
+#define MAC_PCU_COEX_LNAMAXGAIN2_MAXGAIN3_MSB 23
+#define MAC_PCU_COEX_LNAMAXGAIN2_MAXGAIN3_LSB 16
+#define MAC_PCU_COEX_LNAMAXGAIN2_MAXGAIN3_MASK 0x00ff0000
+#define MAC_PCU_COEX_LNAMAXGAIN2_MAXGAIN3_GET(x) (((x) & MAC_PCU_COEX_LNAMAXGAIN2_MAXGAIN3_MASK) >> MAC_PCU_COEX_LNAMAXGAIN2_MAXGAIN3_LSB)
+#define MAC_PCU_COEX_LNAMAXGAIN2_MAXGAIN3_SET(x) (((x) << MAC_PCU_COEX_LNAMAXGAIN2_MAXGAIN3_LSB) & MAC_PCU_COEX_LNAMAXGAIN2_MAXGAIN3_MASK)
+#define MAC_PCU_COEX_LNAMAXGAIN2_MAXGAIN2_MSB 15
+#define MAC_PCU_COEX_LNAMAXGAIN2_MAXGAIN2_LSB 8
+#define MAC_PCU_COEX_LNAMAXGAIN2_MAXGAIN2_MASK 0x0000ff00
+#define MAC_PCU_COEX_LNAMAXGAIN2_MAXGAIN2_GET(x) (((x) & MAC_PCU_COEX_LNAMAXGAIN2_MAXGAIN2_MASK) >> MAC_PCU_COEX_LNAMAXGAIN2_MAXGAIN2_LSB)
+#define MAC_PCU_COEX_LNAMAXGAIN2_MAXGAIN2_SET(x) (((x) << MAC_PCU_COEX_LNAMAXGAIN2_MAXGAIN2_LSB) & MAC_PCU_COEX_LNAMAXGAIN2_MAXGAIN2_MASK)
+#define MAC_PCU_COEX_LNAMAXGAIN2_MAXGAIN1_MSB 7
+#define MAC_PCU_COEX_LNAMAXGAIN2_MAXGAIN1_LSB 0
+#define MAC_PCU_COEX_LNAMAXGAIN2_MAXGAIN1_MASK 0x000000ff
+#define MAC_PCU_COEX_LNAMAXGAIN2_MAXGAIN1_GET(x) (((x) & MAC_PCU_COEX_LNAMAXGAIN2_MAXGAIN1_MASK) >> MAC_PCU_COEX_LNAMAXGAIN2_MAXGAIN1_LSB)
+#define MAC_PCU_COEX_LNAMAXGAIN2_MAXGAIN1_SET(x) (((x) << MAC_PCU_COEX_LNAMAXGAIN2_MAXGAIN1_LSB) & MAC_PCU_COEX_LNAMAXGAIN2_MAXGAIN1_MASK)
+
+#define MAC_PCU_COEX_LNAMAXGAIN3_ADDRESS 0x00008320
+#define MAC_PCU_COEX_LNAMAXGAIN3_OFFSET 0x00000320
+#define MAC_PCU_COEX_LNAMAXGAIN3_MAXGAIN4_MSB 31
+#define MAC_PCU_COEX_LNAMAXGAIN3_MAXGAIN4_LSB 24
+#define MAC_PCU_COEX_LNAMAXGAIN3_MAXGAIN4_MASK 0xff000000
+#define MAC_PCU_COEX_LNAMAXGAIN3_MAXGAIN4_GET(x) (((x) & MAC_PCU_COEX_LNAMAXGAIN3_MAXGAIN4_MASK) >> MAC_PCU_COEX_LNAMAXGAIN3_MAXGAIN4_LSB)
+#define MAC_PCU_COEX_LNAMAXGAIN3_MAXGAIN4_SET(x) (((x) << MAC_PCU_COEX_LNAMAXGAIN3_MAXGAIN4_LSB) & MAC_PCU_COEX_LNAMAXGAIN3_MAXGAIN4_MASK)
+#define MAC_PCU_COEX_LNAMAXGAIN3_MAXGAIN3_MSB 23
+#define MAC_PCU_COEX_LNAMAXGAIN3_MAXGAIN3_LSB 16
+#define MAC_PCU_COEX_LNAMAXGAIN3_MAXGAIN3_MASK 0x00ff0000
+#define MAC_PCU_COEX_LNAMAXGAIN3_MAXGAIN3_GET(x) (((x) & MAC_PCU_COEX_LNAMAXGAIN3_MAXGAIN3_MASK) >> MAC_PCU_COEX_LNAMAXGAIN3_MAXGAIN3_LSB)
+#define MAC_PCU_COEX_LNAMAXGAIN3_MAXGAIN3_SET(x) (((x) << MAC_PCU_COEX_LNAMAXGAIN3_MAXGAIN3_LSB) & MAC_PCU_COEX_LNAMAXGAIN3_MAXGAIN3_MASK)
+#define MAC_PCU_COEX_LNAMAXGAIN3_MAXGAIN2_MSB 15
+#define MAC_PCU_COEX_LNAMAXGAIN3_MAXGAIN2_LSB 8
+#define MAC_PCU_COEX_LNAMAXGAIN3_MAXGAIN2_MASK 0x0000ff00
+#define MAC_PCU_COEX_LNAMAXGAIN3_MAXGAIN2_GET(x) (((x) & MAC_PCU_COEX_LNAMAXGAIN3_MAXGAIN2_MASK) >> MAC_PCU_COEX_LNAMAXGAIN3_MAXGAIN2_LSB)
+#define MAC_PCU_COEX_LNAMAXGAIN3_MAXGAIN2_SET(x) (((x) << MAC_PCU_COEX_LNAMAXGAIN3_MAXGAIN2_LSB) & MAC_PCU_COEX_LNAMAXGAIN3_MAXGAIN2_MASK)
+#define MAC_PCU_COEX_LNAMAXGAIN3_MAXGAIN1_MSB 7
+#define MAC_PCU_COEX_LNAMAXGAIN3_MAXGAIN1_LSB 0
+#define MAC_PCU_COEX_LNAMAXGAIN3_MAXGAIN1_MASK 0x000000ff
+#define MAC_PCU_COEX_LNAMAXGAIN3_MAXGAIN1_GET(x) (((x) & MAC_PCU_COEX_LNAMAXGAIN3_MAXGAIN1_MASK) >> MAC_PCU_COEX_LNAMAXGAIN3_MAXGAIN1_LSB)
+#define MAC_PCU_COEX_LNAMAXGAIN3_MAXGAIN1_SET(x) (((x) << MAC_PCU_COEX_LNAMAXGAIN3_MAXGAIN1_LSB) & MAC_PCU_COEX_LNAMAXGAIN3_MAXGAIN1_MASK)
+
+#define MAC_PCU_COEX_LNAMAXGAIN4_ADDRESS 0x00008324
+#define MAC_PCU_COEX_LNAMAXGAIN4_OFFSET 0x00000324
+#define MAC_PCU_COEX_LNAMAXGAIN4_MAXGAIN4_MSB 31
+#define MAC_PCU_COEX_LNAMAXGAIN4_MAXGAIN4_LSB 24
+#define MAC_PCU_COEX_LNAMAXGAIN4_MAXGAIN4_MASK 0xff000000
+#define MAC_PCU_COEX_LNAMAXGAIN4_MAXGAIN4_GET(x) (((x) & MAC_PCU_COEX_LNAMAXGAIN4_MAXGAIN4_MASK) >> MAC_PCU_COEX_LNAMAXGAIN4_MAXGAIN4_LSB)
+#define MAC_PCU_COEX_LNAMAXGAIN4_MAXGAIN4_SET(x) (((x) << MAC_PCU_COEX_LNAMAXGAIN4_MAXGAIN4_LSB) & MAC_PCU_COEX_LNAMAXGAIN4_MAXGAIN4_MASK)
+#define MAC_PCU_COEX_LNAMAXGAIN4_MAXGAIN3_MSB 23
+#define MAC_PCU_COEX_LNAMAXGAIN4_MAXGAIN3_LSB 16
+#define MAC_PCU_COEX_LNAMAXGAIN4_MAXGAIN3_MASK 0x00ff0000
+#define MAC_PCU_COEX_LNAMAXGAIN4_MAXGAIN3_GET(x) (((x) & MAC_PCU_COEX_LNAMAXGAIN4_MAXGAIN3_MASK) >> MAC_PCU_COEX_LNAMAXGAIN4_MAXGAIN3_LSB)
+#define MAC_PCU_COEX_LNAMAXGAIN4_MAXGAIN3_SET(x) (((x) << MAC_PCU_COEX_LNAMAXGAIN4_MAXGAIN3_LSB) & MAC_PCU_COEX_LNAMAXGAIN4_MAXGAIN3_MASK)
+#define MAC_PCU_COEX_LNAMAXGAIN4_MAXGAIN2_MSB 15
+#define MAC_PCU_COEX_LNAMAXGAIN4_MAXGAIN2_LSB 8
+#define MAC_PCU_COEX_LNAMAXGAIN4_MAXGAIN2_MASK 0x0000ff00
+#define MAC_PCU_COEX_LNAMAXGAIN4_MAXGAIN2_GET(x) (((x) & MAC_PCU_COEX_LNAMAXGAIN4_MAXGAIN2_MASK) >> MAC_PCU_COEX_LNAMAXGAIN4_MAXGAIN2_LSB)
+#define MAC_PCU_COEX_LNAMAXGAIN4_MAXGAIN2_SET(x) (((x) << MAC_PCU_COEX_LNAMAXGAIN4_MAXGAIN2_LSB) & MAC_PCU_COEX_LNAMAXGAIN4_MAXGAIN2_MASK)
+#define MAC_PCU_COEX_LNAMAXGAIN4_MAXGAIN1_MSB 7
+#define MAC_PCU_COEX_LNAMAXGAIN4_MAXGAIN1_LSB 0
+#define MAC_PCU_COEX_LNAMAXGAIN4_MAXGAIN1_MASK 0x000000ff
+#define MAC_PCU_COEX_LNAMAXGAIN4_MAXGAIN1_GET(x) (((x) & MAC_PCU_COEX_LNAMAXGAIN4_MAXGAIN1_MASK) >> MAC_PCU_COEX_LNAMAXGAIN4_MAXGAIN1_LSB)
+#define MAC_PCU_COEX_LNAMAXGAIN4_MAXGAIN1_SET(x) (((x) << MAC_PCU_COEX_LNAMAXGAIN4_MAXGAIN1_LSB) & MAC_PCU_COEX_LNAMAXGAIN4_MAXGAIN1_MASK)
+
+#define MAC_PCU_BASIC_RATE_SET0_ADDRESS 0x00008328
+#define MAC_PCU_BASIC_RATE_SET0_OFFSET 0x00000328
+#define MAC_PCU_BASIC_RATE_SET0_VALUE_MSB 29
+#define MAC_PCU_BASIC_RATE_SET0_VALUE_LSB 0
+#define MAC_PCU_BASIC_RATE_SET0_VALUE_MASK 0x3fffffff
+#define MAC_PCU_BASIC_RATE_SET0_VALUE_GET(x) (((x) & MAC_PCU_BASIC_RATE_SET0_VALUE_MASK) >> MAC_PCU_BASIC_RATE_SET0_VALUE_LSB)
+#define MAC_PCU_BASIC_RATE_SET0_VALUE_SET(x) (((x) << MAC_PCU_BASIC_RATE_SET0_VALUE_LSB) & MAC_PCU_BASIC_RATE_SET0_VALUE_MASK)
+
+#define MAC_PCU_BASIC_RATE_SET1_ADDRESS 0x0000832c
+#define MAC_PCU_BASIC_RATE_SET1_OFFSET 0x0000032c
+#define MAC_PCU_BASIC_RATE_SET1_VALUE_MSB 29
+#define MAC_PCU_BASIC_RATE_SET1_VALUE_LSB 0
+#define MAC_PCU_BASIC_RATE_SET1_VALUE_MASK 0x3fffffff
+#define MAC_PCU_BASIC_RATE_SET1_VALUE_GET(x) (((x) & MAC_PCU_BASIC_RATE_SET1_VALUE_MASK) >> MAC_PCU_BASIC_RATE_SET1_VALUE_LSB)
+#define MAC_PCU_BASIC_RATE_SET1_VALUE_SET(x) (((x) << MAC_PCU_BASIC_RATE_SET1_VALUE_LSB) & MAC_PCU_BASIC_RATE_SET1_VALUE_MASK)
+
+#define MAC_PCU_BASIC_RATE_SET2_ADDRESS 0x00008330
+#define MAC_PCU_BASIC_RATE_SET2_OFFSET 0x00000330
+#define MAC_PCU_BASIC_RATE_SET2_VALUE_MSB 29
+#define MAC_PCU_BASIC_RATE_SET2_VALUE_LSB 0
+#define MAC_PCU_BASIC_RATE_SET2_VALUE_MASK 0x3fffffff
+#define MAC_PCU_BASIC_RATE_SET2_VALUE_GET(x) (((x) & MAC_PCU_BASIC_RATE_SET2_VALUE_MASK) >> MAC_PCU_BASIC_RATE_SET2_VALUE_LSB)
+#define MAC_PCU_BASIC_RATE_SET2_VALUE_SET(x) (((x) << MAC_PCU_BASIC_RATE_SET2_VALUE_LSB) & MAC_PCU_BASIC_RATE_SET2_VALUE_MASK)
+
+#define MAC_PCU_BASIC_RATE_SET3_ADDRESS 0x00008334
+#define MAC_PCU_BASIC_RATE_SET3_OFFSET 0x00000334
+#define MAC_PCU_BASIC_RATE_SET3_VALUE_MSB 24
+#define MAC_PCU_BASIC_RATE_SET3_VALUE_LSB 0
+#define MAC_PCU_BASIC_RATE_SET3_VALUE_MASK 0x01ffffff
+#define MAC_PCU_BASIC_RATE_SET3_VALUE_GET(x) (((x) & MAC_PCU_BASIC_RATE_SET3_VALUE_MASK) >> MAC_PCU_BASIC_RATE_SET3_VALUE_LSB)
+#define MAC_PCU_BASIC_RATE_SET3_VALUE_SET(x) (((x) << MAC_PCU_BASIC_RATE_SET3_VALUE_LSB) & MAC_PCU_BASIC_RATE_SET3_VALUE_MASK)
+
+#define MAC_PCU_RX_INT_STATUS0_ADDRESS 0x00008338
+#define MAC_PCU_RX_INT_STATUS0_OFFSET 0x00000338
+#define MAC_PCU_RX_INT_STATUS0_DURATION_H_MSB 31
+#define MAC_PCU_RX_INT_STATUS0_DURATION_H_LSB 24
+#define MAC_PCU_RX_INT_STATUS0_DURATION_H_MASK 0xff000000
+#define MAC_PCU_RX_INT_STATUS0_DURATION_H_GET(x) (((x) & MAC_PCU_RX_INT_STATUS0_DURATION_H_MASK) >> MAC_PCU_RX_INT_STATUS0_DURATION_H_LSB)
+#define MAC_PCU_RX_INT_STATUS0_DURATION_H_SET(x) (((x) << MAC_PCU_RX_INT_STATUS0_DURATION_H_LSB) & MAC_PCU_RX_INT_STATUS0_DURATION_H_MASK)
+#define MAC_PCU_RX_INT_STATUS0_DURATION_L_MSB 23
+#define MAC_PCU_RX_INT_STATUS0_DURATION_L_LSB 16
+#define MAC_PCU_RX_INT_STATUS0_DURATION_L_MASK 0x00ff0000
+#define MAC_PCU_RX_INT_STATUS0_DURATION_L_GET(x) (((x) & MAC_PCU_RX_INT_STATUS0_DURATION_L_MASK) >> MAC_PCU_RX_INT_STATUS0_DURATION_L_LSB)
+#define MAC_PCU_RX_INT_STATUS0_DURATION_L_SET(x) (((x) << MAC_PCU_RX_INT_STATUS0_DURATION_L_LSB) & MAC_PCU_RX_INT_STATUS0_DURATION_L_MASK)
+#define MAC_PCU_RX_INT_STATUS0_FRAME_CONTROL_H_MSB 15
+#define MAC_PCU_RX_INT_STATUS0_FRAME_CONTROL_H_LSB 8
+#define MAC_PCU_RX_INT_STATUS0_FRAME_CONTROL_H_MASK 0x0000ff00
+#define MAC_PCU_RX_INT_STATUS0_FRAME_CONTROL_H_GET(x) (((x) & MAC_PCU_RX_INT_STATUS0_FRAME_CONTROL_H_MASK) >> MAC_PCU_RX_INT_STATUS0_FRAME_CONTROL_H_LSB)
+#define MAC_PCU_RX_INT_STATUS0_FRAME_CONTROL_H_SET(x) (((x) << MAC_PCU_RX_INT_STATUS0_FRAME_CONTROL_H_LSB) & MAC_PCU_RX_INT_STATUS0_FRAME_CONTROL_H_MASK)
+#define MAC_PCU_RX_INT_STATUS0_FRAME_CONTROL_L_MSB 7
+#define MAC_PCU_RX_INT_STATUS0_FRAME_CONTROL_L_LSB 0
+#define MAC_PCU_RX_INT_STATUS0_FRAME_CONTROL_L_MASK 0x000000ff
+#define MAC_PCU_RX_INT_STATUS0_FRAME_CONTROL_L_GET(x) (((x) & MAC_PCU_RX_INT_STATUS0_FRAME_CONTROL_L_MASK) >> MAC_PCU_RX_INT_STATUS0_FRAME_CONTROL_L_LSB)
+#define MAC_PCU_RX_INT_STATUS0_FRAME_CONTROL_L_SET(x) (((x) << MAC_PCU_RX_INT_STATUS0_FRAME_CONTROL_L_LSB) & MAC_PCU_RX_INT_STATUS0_FRAME_CONTROL_L_MASK)
+
+#define MAC_PCU_RX_INT_STATUS1_ADDRESS 0x0000833c
+#define MAC_PCU_RX_INT_STATUS1_OFFSET 0x0000033c
+#define MAC_PCU_RX_INT_STATUS1_VALUE_MSB 17
+#define MAC_PCU_RX_INT_STATUS1_VALUE_LSB 0
+#define MAC_PCU_RX_INT_STATUS1_VALUE_MASK 0x0003ffff
+#define MAC_PCU_RX_INT_STATUS1_VALUE_GET(x) (((x) & MAC_PCU_RX_INT_STATUS1_VALUE_MASK) >> MAC_PCU_RX_INT_STATUS1_VALUE_LSB)
+#define MAC_PCU_RX_INT_STATUS1_VALUE_SET(x) (((x) << MAC_PCU_RX_INT_STATUS1_VALUE_LSB) & MAC_PCU_RX_INT_STATUS1_VALUE_MASK)
+
+#define MAC_PCU_RX_INT_STATUS2_ADDRESS 0x00008340
+#define MAC_PCU_RX_INT_STATUS2_OFFSET 0x00000340
+#define MAC_PCU_RX_INT_STATUS2_VALUE_MSB 26
+#define MAC_PCU_RX_INT_STATUS2_VALUE_LSB 0
+#define MAC_PCU_RX_INT_STATUS2_VALUE_MASK 0x07ffffff
+#define MAC_PCU_RX_INT_STATUS2_VALUE_GET(x) (((x) & MAC_PCU_RX_INT_STATUS2_VALUE_MASK) >> MAC_PCU_RX_INT_STATUS2_VALUE_LSB)
+#define MAC_PCU_RX_INT_STATUS2_VALUE_SET(x) (((x) << MAC_PCU_RX_INT_STATUS2_VALUE_LSB) & MAC_PCU_RX_INT_STATUS2_VALUE_MASK)
+
+#define MAC_PCU_RX_INT_STATUS3_ADDRESS 0x00008344
+#define MAC_PCU_RX_INT_STATUS3_OFFSET 0x00000344
+#define MAC_PCU_RX_INT_STATUS3_VALUE_MSB 23
+#define MAC_PCU_RX_INT_STATUS3_VALUE_LSB 0
+#define MAC_PCU_RX_INT_STATUS3_VALUE_MASK 0x00ffffff
+#define MAC_PCU_RX_INT_STATUS3_VALUE_GET(x) (((x) & MAC_PCU_RX_INT_STATUS3_VALUE_MASK) >> MAC_PCU_RX_INT_STATUS3_VALUE_LSB)
+#define MAC_PCU_RX_INT_STATUS3_VALUE_SET(x) (((x) << MAC_PCU_RX_INT_STATUS3_VALUE_LSB) & MAC_PCU_RX_INT_STATUS3_VALUE_MASK)
+
+#define HT_HALF_GI_RATE1_ADDRESS 0x00008348
+#define HT_HALF_GI_RATE1_OFFSET 0x00000348
+#define HT_HALF_GI_RATE1_MCS3_MSB 31
+#define HT_HALF_GI_RATE1_MCS3_LSB 24
+#define HT_HALF_GI_RATE1_MCS3_MASK 0xff000000
+#define HT_HALF_GI_RATE1_MCS3_GET(x) (((x) & HT_HALF_GI_RATE1_MCS3_MASK) >> HT_HALF_GI_RATE1_MCS3_LSB)
+#define HT_HALF_GI_RATE1_MCS3_SET(x) (((x) << HT_HALF_GI_RATE1_MCS3_LSB) & HT_HALF_GI_RATE1_MCS3_MASK)
+#define HT_HALF_GI_RATE1_MCS2_MSB 23
+#define HT_HALF_GI_RATE1_MCS2_LSB 16
+#define HT_HALF_GI_RATE1_MCS2_MASK 0x00ff0000
+#define HT_HALF_GI_RATE1_MCS2_GET(x) (((x) & HT_HALF_GI_RATE1_MCS2_MASK) >> HT_HALF_GI_RATE1_MCS2_LSB)
+#define HT_HALF_GI_RATE1_MCS2_SET(x) (((x) << HT_HALF_GI_RATE1_MCS2_LSB) & HT_HALF_GI_RATE1_MCS2_MASK)
+#define HT_HALF_GI_RATE1_MCS1_MSB 15
+#define HT_HALF_GI_RATE1_MCS1_LSB 8
+#define HT_HALF_GI_RATE1_MCS1_MASK 0x0000ff00
+#define HT_HALF_GI_RATE1_MCS1_GET(x) (((x) & HT_HALF_GI_RATE1_MCS1_MASK) >> HT_HALF_GI_RATE1_MCS1_LSB)
+#define HT_HALF_GI_RATE1_MCS1_SET(x) (((x) << HT_HALF_GI_RATE1_MCS1_LSB) & HT_HALF_GI_RATE1_MCS1_MASK)
+#define HT_HALF_GI_RATE1_MCS0_MSB 7
+#define HT_HALF_GI_RATE1_MCS0_LSB 0
+#define HT_HALF_GI_RATE1_MCS0_MASK 0x000000ff
+#define HT_HALF_GI_RATE1_MCS0_GET(x) (((x) & HT_HALF_GI_RATE1_MCS0_MASK) >> HT_HALF_GI_RATE1_MCS0_LSB)
+#define HT_HALF_GI_RATE1_MCS0_SET(x) (((x) << HT_HALF_GI_RATE1_MCS0_LSB) & HT_HALF_GI_RATE1_MCS0_MASK)
+
+#define HT_HALF_GI_RATE2_ADDRESS 0x0000834c
+#define HT_HALF_GI_RATE2_OFFSET 0x0000034c
+#define HT_HALF_GI_RATE2_MCS7_MSB 31
+#define HT_HALF_GI_RATE2_MCS7_LSB 24
+#define HT_HALF_GI_RATE2_MCS7_MASK 0xff000000
+#define HT_HALF_GI_RATE2_MCS7_GET(x) (((x) & HT_HALF_GI_RATE2_MCS7_MASK) >> HT_HALF_GI_RATE2_MCS7_LSB)
+#define HT_HALF_GI_RATE2_MCS7_SET(x) (((x) << HT_HALF_GI_RATE2_MCS7_LSB) & HT_HALF_GI_RATE2_MCS7_MASK)
+#define HT_HALF_GI_RATE2_MCS6_MSB 23
+#define HT_HALF_GI_RATE2_MCS6_LSB 16
+#define HT_HALF_GI_RATE2_MCS6_MASK 0x00ff0000
+#define HT_HALF_GI_RATE2_MCS6_GET(x) (((x) & HT_HALF_GI_RATE2_MCS6_MASK) >> HT_HALF_GI_RATE2_MCS6_LSB)
+#define HT_HALF_GI_RATE2_MCS6_SET(x) (((x) << HT_HALF_GI_RATE2_MCS6_LSB) & HT_HALF_GI_RATE2_MCS6_MASK)
+#define HT_HALF_GI_RATE2_MCS5_MSB 15
+#define HT_HALF_GI_RATE2_MCS5_LSB 8
+#define HT_HALF_GI_RATE2_MCS5_MASK 0x0000ff00
+#define HT_HALF_GI_RATE2_MCS5_GET(x) (((x) & HT_HALF_GI_RATE2_MCS5_MASK) >> HT_HALF_GI_RATE2_MCS5_LSB)
+#define HT_HALF_GI_RATE2_MCS5_SET(x) (((x) << HT_HALF_GI_RATE2_MCS5_LSB) & HT_HALF_GI_RATE2_MCS5_MASK)
+#define HT_HALF_GI_RATE2_MCS4_MSB 7
+#define HT_HALF_GI_RATE2_MCS4_LSB 0
+#define HT_HALF_GI_RATE2_MCS4_MASK 0x000000ff
+#define HT_HALF_GI_RATE2_MCS4_GET(x) (((x) & HT_HALF_GI_RATE2_MCS4_MASK) >> HT_HALF_GI_RATE2_MCS4_LSB)
+#define HT_HALF_GI_RATE2_MCS4_SET(x) (((x) << HT_HALF_GI_RATE2_MCS4_LSB) & HT_HALF_GI_RATE2_MCS4_MASK)
+
+#define HT_FULL_GI_RATE1_ADDRESS 0x00008350
+#define HT_FULL_GI_RATE1_OFFSET 0x00000350
+#define HT_FULL_GI_RATE1_MCS3_MSB 31
+#define HT_FULL_GI_RATE1_MCS3_LSB 24
+#define HT_FULL_GI_RATE1_MCS3_MASK 0xff000000
+#define HT_FULL_GI_RATE1_MCS3_GET(x) (((x) & HT_FULL_GI_RATE1_MCS3_MASK) >> HT_FULL_GI_RATE1_MCS3_LSB)
+#define HT_FULL_GI_RATE1_MCS3_SET(x) (((x) << HT_FULL_GI_RATE1_MCS3_LSB) & HT_FULL_GI_RATE1_MCS3_MASK)
+#define HT_FULL_GI_RATE1_MCS2_MSB 23
+#define HT_FULL_GI_RATE1_MCS2_LSB 16
+#define HT_FULL_GI_RATE1_MCS2_MASK 0x00ff0000
+#define HT_FULL_GI_RATE1_MCS2_GET(x) (((x) & HT_FULL_GI_RATE1_MCS2_MASK) >> HT_FULL_GI_RATE1_MCS2_LSB)
+#define HT_FULL_GI_RATE1_MCS2_SET(x) (((x) << HT_FULL_GI_RATE1_MCS2_LSB) & HT_FULL_GI_RATE1_MCS2_MASK)
+#define HT_FULL_GI_RATE1_MCS1_MSB 15
+#define HT_FULL_GI_RATE1_MCS1_LSB 8
+#define HT_FULL_GI_RATE1_MCS1_MASK 0x0000ff00
+#define HT_FULL_GI_RATE1_MCS1_GET(x) (((x) & HT_FULL_GI_RATE1_MCS1_MASK) >> HT_FULL_GI_RATE1_MCS1_LSB)
+#define HT_FULL_GI_RATE1_MCS1_SET(x) (((x) << HT_FULL_GI_RATE1_MCS1_LSB) & HT_FULL_GI_RATE1_MCS1_MASK)
+#define HT_FULL_GI_RATE1_MCS0_MSB 7
+#define HT_FULL_GI_RATE1_MCS0_LSB 0
+#define HT_FULL_GI_RATE1_MCS0_MASK 0x000000ff
+#define HT_FULL_GI_RATE1_MCS0_GET(x) (((x) & HT_FULL_GI_RATE1_MCS0_MASK) >> HT_FULL_GI_RATE1_MCS0_LSB)
+#define HT_FULL_GI_RATE1_MCS0_SET(x) (((x) << HT_FULL_GI_RATE1_MCS0_LSB) & HT_FULL_GI_RATE1_MCS0_MASK)
+
+#define HT_FULL_GI_RATE2_ADDRESS 0x00008354
+#define HT_FULL_GI_RATE2_OFFSET 0x00000354
+#define HT_FULL_GI_RATE2_MCS7_MSB 31
+#define HT_FULL_GI_RATE2_MCS7_LSB 24
+#define HT_FULL_GI_RATE2_MCS7_MASK 0xff000000
+#define HT_FULL_GI_RATE2_MCS7_GET(x) (((x) & HT_FULL_GI_RATE2_MCS7_MASK) >> HT_FULL_GI_RATE2_MCS7_LSB)
+#define HT_FULL_GI_RATE2_MCS7_SET(x) (((x) << HT_FULL_GI_RATE2_MCS7_LSB) & HT_FULL_GI_RATE2_MCS7_MASK)
+#define HT_FULL_GI_RATE2_MCS6_MSB 23
+#define HT_FULL_GI_RATE2_MCS6_LSB 16
+#define HT_FULL_GI_RATE2_MCS6_MASK 0x00ff0000
+#define HT_FULL_GI_RATE2_MCS6_GET(x) (((x) & HT_FULL_GI_RATE2_MCS6_MASK) >> HT_FULL_GI_RATE2_MCS6_LSB)
+#define HT_FULL_GI_RATE2_MCS6_SET(x) (((x) << HT_FULL_GI_RATE2_MCS6_LSB) & HT_FULL_GI_RATE2_MCS6_MASK)
+#define HT_FULL_GI_RATE2_MCS5_MSB 15
+#define HT_FULL_GI_RATE2_MCS5_LSB 8
+#define HT_FULL_GI_RATE2_MCS5_MASK 0x0000ff00
+#define HT_FULL_GI_RATE2_MCS5_GET(x) (((x) & HT_FULL_GI_RATE2_MCS5_MASK) >> HT_FULL_GI_RATE2_MCS5_LSB)
+#define HT_FULL_GI_RATE2_MCS5_SET(x) (((x) << HT_FULL_GI_RATE2_MCS5_LSB) & HT_FULL_GI_RATE2_MCS5_MASK)
+#define HT_FULL_GI_RATE2_MCS4_MSB 7
+#define HT_FULL_GI_RATE2_MCS4_LSB 0
+#define HT_FULL_GI_RATE2_MCS4_MASK 0x000000ff
+#define HT_FULL_GI_RATE2_MCS4_GET(x) (((x) & HT_FULL_GI_RATE2_MCS4_MASK) >> HT_FULL_GI_RATE2_MCS4_LSB)
+#define HT_FULL_GI_RATE2_MCS4_SET(x) (((x) << HT_FULL_GI_RATE2_MCS4_LSB) & HT_FULL_GI_RATE2_MCS4_MASK)
+
+#define LEGACY_RATE1_ADDRESS 0x00008358
+#define LEGACY_RATE1_OFFSET 0x00000358
+#define LEGACY_RATE1_RATE12_MSB 29
+#define LEGACY_RATE1_RATE12_LSB 24
+#define LEGACY_RATE1_RATE12_MASK 0x3f000000
+#define LEGACY_RATE1_RATE12_GET(x) (((x) & LEGACY_RATE1_RATE12_MASK) >> LEGACY_RATE1_RATE12_LSB)
+#define LEGACY_RATE1_RATE12_SET(x) (((x) << LEGACY_RATE1_RATE12_LSB) & LEGACY_RATE1_RATE12_MASK)
+#define LEGACY_RATE1_RATE11_MSB 23
+#define LEGACY_RATE1_RATE11_LSB 18
+#define LEGACY_RATE1_RATE11_MASK 0x00fc0000
+#define LEGACY_RATE1_RATE11_GET(x) (((x) & LEGACY_RATE1_RATE11_MASK) >> LEGACY_RATE1_RATE11_LSB)
+#define LEGACY_RATE1_RATE11_SET(x) (((x) << LEGACY_RATE1_RATE11_LSB) & LEGACY_RATE1_RATE11_MASK)
+#define LEGACY_RATE1_RATE10_MSB 17
+#define LEGACY_RATE1_RATE10_LSB 12
+#define LEGACY_RATE1_RATE10_MASK 0x0003f000
+#define LEGACY_RATE1_RATE10_GET(x) (((x) & LEGACY_RATE1_RATE10_MASK) >> LEGACY_RATE1_RATE10_LSB)
+#define LEGACY_RATE1_RATE10_SET(x) (((x) << LEGACY_RATE1_RATE10_LSB) & LEGACY_RATE1_RATE10_MASK)
+#define LEGACY_RATE1_RATE9_MSB 11
+#define LEGACY_RATE1_RATE9_LSB 6
+#define LEGACY_RATE1_RATE9_MASK 0x00000fc0
+#define LEGACY_RATE1_RATE9_GET(x) (((x) & LEGACY_RATE1_RATE9_MASK) >> LEGACY_RATE1_RATE9_LSB)
+#define LEGACY_RATE1_RATE9_SET(x) (((x) << LEGACY_RATE1_RATE9_LSB) & LEGACY_RATE1_RATE9_MASK)
+#define LEGACY_RATE1_RATE8_MSB 5
+#define LEGACY_RATE1_RATE8_LSB 0
+#define LEGACY_RATE1_RATE8_MASK 0x0000003f
+#define LEGACY_RATE1_RATE8_GET(x) (((x) & LEGACY_RATE1_RATE8_MASK) >> LEGACY_RATE1_RATE8_LSB)
+#define LEGACY_RATE1_RATE8_SET(x) (((x) << LEGACY_RATE1_RATE8_LSB) & LEGACY_RATE1_RATE8_MASK)
+
+#define LEGACY_RATE2_ADDRESS 0x0000835c
+#define LEGACY_RATE2_OFFSET 0x0000035c
+#define LEGACY_RATE2_RATE25_MSB 29
+#define LEGACY_RATE2_RATE25_LSB 24
+#define LEGACY_RATE2_RATE25_MASK 0x3f000000
+#define LEGACY_RATE2_RATE25_GET(x) (((x) & LEGACY_RATE2_RATE25_MASK) >> LEGACY_RATE2_RATE25_LSB)
+#define LEGACY_RATE2_RATE25_SET(x) (((x) << LEGACY_RATE2_RATE25_LSB) & LEGACY_RATE2_RATE25_MASK)
+#define LEGACY_RATE2_RATE24_MSB 23
+#define LEGACY_RATE2_RATE24_LSB 18
+#define LEGACY_RATE2_RATE24_MASK 0x00fc0000
+#define LEGACY_RATE2_RATE24_GET(x) (((x) & LEGACY_RATE2_RATE24_MASK) >> LEGACY_RATE2_RATE24_LSB)
+#define LEGACY_RATE2_RATE24_SET(x) (((x) << LEGACY_RATE2_RATE24_LSB) & LEGACY_RATE2_RATE24_MASK)
+#define LEGACY_RATE2_RATE15_MSB 17
+#define LEGACY_RATE2_RATE15_LSB 12
+#define LEGACY_RATE2_RATE15_MASK 0x0003f000
+#define LEGACY_RATE2_RATE15_GET(x) (((x) & LEGACY_RATE2_RATE15_MASK) >> LEGACY_RATE2_RATE15_LSB)
+#define LEGACY_RATE2_RATE15_SET(x) (((x) << LEGACY_RATE2_RATE15_LSB) & LEGACY_RATE2_RATE15_MASK)
+#define LEGACY_RATE2_RATE14_MSB 11
+#define LEGACY_RATE2_RATE14_LSB 6
+#define LEGACY_RATE2_RATE14_MASK 0x00000fc0
+#define LEGACY_RATE2_RATE14_GET(x) (((x) & LEGACY_RATE2_RATE14_MASK) >> LEGACY_RATE2_RATE14_LSB)
+#define LEGACY_RATE2_RATE14_SET(x) (((x) << LEGACY_RATE2_RATE14_LSB) & LEGACY_RATE2_RATE14_MASK)
+#define LEGACY_RATE2_RATE13_MSB 5
+#define LEGACY_RATE2_RATE13_LSB 0
+#define LEGACY_RATE2_RATE13_MASK 0x0000003f
+#define LEGACY_RATE2_RATE13_GET(x) (((x) & LEGACY_RATE2_RATE13_MASK) >> LEGACY_RATE2_RATE13_LSB)
+#define LEGACY_RATE2_RATE13_SET(x) (((x) << LEGACY_RATE2_RATE13_LSB) & LEGACY_RATE2_RATE13_MASK)
+
+#define LEGACY_RATE3_ADDRESS 0x00008360
+#define LEGACY_RATE3_OFFSET 0x00000360
+#define LEGACY_RATE3_RATE30_MSB 29
+#define LEGACY_RATE3_RATE30_LSB 24
+#define LEGACY_RATE3_RATE30_MASK 0x3f000000
+#define LEGACY_RATE3_RATE30_GET(x) (((x) & LEGACY_RATE3_RATE30_MASK) >> LEGACY_RATE3_RATE30_LSB)
+#define LEGACY_RATE3_RATE30_SET(x) (((x) << LEGACY_RATE3_RATE30_LSB) & LEGACY_RATE3_RATE30_MASK)
+#define LEGACY_RATE3_RATE29_MSB 23
+#define LEGACY_RATE3_RATE29_LSB 18
+#define LEGACY_RATE3_RATE29_MASK 0x00fc0000
+#define LEGACY_RATE3_RATE29_GET(x) (((x) & LEGACY_RATE3_RATE29_MASK) >> LEGACY_RATE3_RATE29_LSB)
+#define LEGACY_RATE3_RATE29_SET(x) (((x) << LEGACY_RATE3_RATE29_LSB) & LEGACY_RATE3_RATE29_MASK)
+#define LEGACY_RATE3_RATE28_MSB 17
+#define LEGACY_RATE3_RATE28_LSB 12
+#define LEGACY_RATE3_RATE28_MASK 0x0003f000
+#define LEGACY_RATE3_RATE28_GET(x) (((x) & LEGACY_RATE3_RATE28_MASK) >> LEGACY_RATE3_RATE28_LSB)
+#define LEGACY_RATE3_RATE28_SET(x) (((x) << LEGACY_RATE3_RATE28_LSB) & LEGACY_RATE3_RATE28_MASK)
+#define LEGACY_RATE3_RATE27_MSB 11
+#define LEGACY_RATE3_RATE27_LSB 6
+#define LEGACY_RATE3_RATE27_MASK 0x00000fc0
+#define LEGACY_RATE3_RATE27_GET(x) (((x) & LEGACY_RATE3_RATE27_MASK) >> LEGACY_RATE3_RATE27_LSB)
+#define LEGACY_RATE3_RATE27_SET(x) (((x) << LEGACY_RATE3_RATE27_LSB) & LEGACY_RATE3_RATE27_MASK)
+#define LEGACY_RATE3_RATE26_MSB 5
+#define LEGACY_RATE3_RATE26_LSB 0
+#define LEGACY_RATE3_RATE26_MASK 0x0000003f
+#define LEGACY_RATE3_RATE26_GET(x) (((x) & LEGACY_RATE3_RATE26_MASK) >> LEGACY_RATE3_RATE26_LSB)
+#define LEGACY_RATE3_RATE26_SET(x) (((x) << LEGACY_RATE3_RATE26_LSB) & LEGACY_RATE3_RATE26_MASK)
+
+#define RX_INT_FILTER_ADDRESS 0x00008364
+#define RX_INT_FILTER_OFFSET 0x00000364
+#define RX_INT_FILTER_BEACON_MSB 17
+#define RX_INT_FILTER_BEACON_LSB 17
+#define RX_INT_FILTER_BEACON_MASK 0x00020000
+#define RX_INT_FILTER_BEACON_GET(x) (((x) & RX_INT_FILTER_BEACON_MASK) >> RX_INT_FILTER_BEACON_LSB)
+#define RX_INT_FILTER_BEACON_SET(x) (((x) << RX_INT_FILTER_BEACON_LSB) & RX_INT_FILTER_BEACON_MASK)
+#define RX_INT_FILTER_AMPDU_MSB 16
+#define RX_INT_FILTER_AMPDU_LSB 16
+#define RX_INT_FILTER_AMPDU_MASK 0x00010000
+#define RX_INT_FILTER_AMPDU_GET(x) (((x) & RX_INT_FILTER_AMPDU_MASK) >> RX_INT_FILTER_AMPDU_LSB)
+#define RX_INT_FILTER_AMPDU_SET(x) (((x) << RX_INT_FILTER_AMPDU_LSB) & RX_INT_FILTER_AMPDU_MASK)
+#define RX_INT_FILTER_EOSP_MSB 15
+#define RX_INT_FILTER_EOSP_LSB 15
+#define RX_INT_FILTER_EOSP_MASK 0x00008000
+#define RX_INT_FILTER_EOSP_GET(x) (((x) & RX_INT_FILTER_EOSP_MASK) >> RX_INT_FILTER_EOSP_LSB)
+#define RX_INT_FILTER_EOSP_SET(x) (((x) << RX_INT_FILTER_EOSP_LSB) & RX_INT_FILTER_EOSP_MASK)
+#define RX_INT_FILTER_LENGTH_LOW_MSB 14
+#define RX_INT_FILTER_LENGTH_LOW_LSB 14
+#define RX_INT_FILTER_LENGTH_LOW_MASK 0x00004000
+#define RX_INT_FILTER_LENGTH_LOW_GET(x) (((x) & RX_INT_FILTER_LENGTH_LOW_MASK) >> RX_INT_FILTER_LENGTH_LOW_LSB)
+#define RX_INT_FILTER_LENGTH_LOW_SET(x) (((x) << RX_INT_FILTER_LENGTH_LOW_LSB) & RX_INT_FILTER_LENGTH_LOW_MASK)
+#define RX_INT_FILTER_LENGTH_HIGH_MSB 13
+#define RX_INT_FILTER_LENGTH_HIGH_LSB 13
+#define RX_INT_FILTER_LENGTH_HIGH_MASK 0x00002000
+#define RX_INT_FILTER_LENGTH_HIGH_GET(x) (((x) & RX_INT_FILTER_LENGTH_HIGH_MASK) >> RX_INT_FILTER_LENGTH_HIGH_LSB)
+#define RX_INT_FILTER_LENGTH_HIGH_SET(x) (((x) << RX_INT_FILTER_LENGTH_HIGH_LSB) & RX_INT_FILTER_LENGTH_HIGH_MASK)
+#define RX_INT_FILTER_RSSI_MSB 12
+#define RX_INT_FILTER_RSSI_LSB 12
+#define RX_INT_FILTER_RSSI_MASK 0x00001000
+#define RX_INT_FILTER_RSSI_GET(x) (((x) & RX_INT_FILTER_RSSI_MASK) >> RX_INT_FILTER_RSSI_LSB)
+#define RX_INT_FILTER_RSSI_SET(x) (((x) << RX_INT_FILTER_RSSI_LSB) & RX_INT_FILTER_RSSI_MASK)
+#define RX_INT_FILTER_RATE_LOW_MSB 11
+#define RX_INT_FILTER_RATE_LOW_LSB 11
+#define RX_INT_FILTER_RATE_LOW_MASK 0x00000800
+#define RX_INT_FILTER_RATE_LOW_GET(x) (((x) & RX_INT_FILTER_RATE_LOW_MASK) >> RX_INT_FILTER_RATE_LOW_LSB)
+#define RX_INT_FILTER_RATE_LOW_SET(x) (((x) << RX_INT_FILTER_RATE_LOW_LSB) & RX_INT_FILTER_RATE_LOW_MASK)
+#define RX_INT_FILTER_RATE_HIGH_MSB 10
+#define RX_INT_FILTER_RATE_HIGH_LSB 10
+#define RX_INT_FILTER_RATE_HIGH_MASK 0x00000400
+#define RX_INT_FILTER_RATE_HIGH_GET(x) (((x) & RX_INT_FILTER_RATE_HIGH_MASK) >> RX_INT_FILTER_RATE_HIGH_LSB)
+#define RX_INT_FILTER_RATE_HIGH_SET(x) (((x) << RX_INT_FILTER_RATE_HIGH_LSB) & RX_INT_FILTER_RATE_HIGH_MASK)
+#define RX_INT_FILTER_MORE_FRAG_MSB 9
+#define RX_INT_FILTER_MORE_FRAG_LSB 9
+#define RX_INT_FILTER_MORE_FRAG_MASK 0x00000200
+#define RX_INT_FILTER_MORE_FRAG_GET(x) (((x) & RX_INT_FILTER_MORE_FRAG_MASK) >> RX_INT_FILTER_MORE_FRAG_LSB)
+#define RX_INT_FILTER_MORE_FRAG_SET(x) (((x) << RX_INT_FILTER_MORE_FRAG_LSB) & RX_INT_FILTER_MORE_FRAG_MASK)
+#define RX_INT_FILTER_MORE_DATA_MSB 8
+#define RX_INT_FILTER_MORE_DATA_LSB 8
+#define RX_INT_FILTER_MORE_DATA_MASK 0x00000100
+#define RX_INT_FILTER_MORE_DATA_GET(x) (((x) & RX_INT_FILTER_MORE_DATA_MASK) >> RX_INT_FILTER_MORE_DATA_LSB)
+#define RX_INT_FILTER_MORE_DATA_SET(x) (((x) << RX_INT_FILTER_MORE_DATA_LSB) & RX_INT_FILTER_MORE_DATA_MASK)
+#define RX_INT_FILTER_RETRY_MSB 7
+#define RX_INT_FILTER_RETRY_LSB 7
+#define RX_INT_FILTER_RETRY_MASK 0x00000080
+#define RX_INT_FILTER_RETRY_GET(x) (((x) & RX_INT_FILTER_RETRY_MASK) >> RX_INT_FILTER_RETRY_LSB)
+#define RX_INT_FILTER_RETRY_SET(x) (((x) << RX_INT_FILTER_RETRY_LSB) & RX_INT_FILTER_RETRY_MASK)
+#define RX_INT_FILTER_CTS_MSB 6
+#define RX_INT_FILTER_CTS_LSB 6
+#define RX_INT_FILTER_CTS_MASK 0x00000040
+#define RX_INT_FILTER_CTS_GET(x) (((x) & RX_INT_FILTER_CTS_MASK) >> RX_INT_FILTER_CTS_LSB)
+#define RX_INT_FILTER_CTS_SET(x) (((x) << RX_INT_FILTER_CTS_LSB) & RX_INT_FILTER_CTS_MASK)
+#define RX_INT_FILTER_ACK_MSB 5
+#define RX_INT_FILTER_ACK_LSB 5
+#define RX_INT_FILTER_ACK_MASK 0x00000020
+#define RX_INT_FILTER_ACK_GET(x) (((x) & RX_INT_FILTER_ACK_MASK) >> RX_INT_FILTER_ACK_LSB)
+#define RX_INT_FILTER_ACK_SET(x) (((x) << RX_INT_FILTER_ACK_LSB) & RX_INT_FILTER_ACK_MASK)
+#define RX_INT_FILTER_RTS_MSB 4
+#define RX_INT_FILTER_RTS_LSB 4
+#define RX_INT_FILTER_RTS_MASK 0x00000010
+#define RX_INT_FILTER_RTS_GET(x) (((x) & RX_INT_FILTER_RTS_MASK) >> RX_INT_FILTER_RTS_LSB)
+#define RX_INT_FILTER_RTS_SET(x) (((x) << RX_INT_FILTER_RTS_LSB) & RX_INT_FILTER_RTS_MASK)
+#define RX_INT_FILTER_MCAST_MSB 3
+#define RX_INT_FILTER_MCAST_LSB 3
+#define RX_INT_FILTER_MCAST_MASK 0x00000008
+#define RX_INT_FILTER_MCAST_GET(x) (((x) & RX_INT_FILTER_MCAST_MASK) >> RX_INT_FILTER_MCAST_LSB)
+#define RX_INT_FILTER_MCAST_SET(x) (((x) << RX_INT_FILTER_MCAST_LSB) & RX_INT_FILTER_MCAST_MASK)
+#define RX_INT_FILTER_BCAST_MSB 2
+#define RX_INT_FILTER_BCAST_LSB 2
+#define RX_INT_FILTER_BCAST_MASK 0x00000004
+#define RX_INT_FILTER_BCAST_GET(x) (((x) & RX_INT_FILTER_BCAST_MASK) >> RX_INT_FILTER_BCAST_LSB)
+#define RX_INT_FILTER_BCAST_SET(x) (((x) << RX_INT_FILTER_BCAST_LSB) & RX_INT_FILTER_BCAST_MASK)
+#define RX_INT_FILTER_DIRECTED_MSB 1
+#define RX_INT_FILTER_DIRECTED_LSB 1
+#define RX_INT_FILTER_DIRECTED_MASK 0x00000002
+#define RX_INT_FILTER_DIRECTED_GET(x) (((x) & RX_INT_FILTER_DIRECTED_MASK) >> RX_INT_FILTER_DIRECTED_LSB)
+#define RX_INT_FILTER_DIRECTED_SET(x) (((x) << RX_INT_FILTER_DIRECTED_LSB) & RX_INT_FILTER_DIRECTED_MASK)
+#define RX_INT_FILTER_ENABLE_MSB 0
+#define RX_INT_FILTER_ENABLE_LSB 0
+#define RX_INT_FILTER_ENABLE_MASK 0x00000001
+#define RX_INT_FILTER_ENABLE_GET(x) (((x) & RX_INT_FILTER_ENABLE_MASK) >> RX_INT_FILTER_ENABLE_LSB)
+#define RX_INT_FILTER_ENABLE_SET(x) (((x) << RX_INT_FILTER_ENABLE_LSB) & RX_INT_FILTER_ENABLE_MASK)
+
+#define RX_INT_OVERFLOW_ADDRESS 0x00008368
+#define RX_INT_OVERFLOW_OFFSET 0x00000368
+#define RX_INT_OVERFLOW_STATUS_MSB 0
+#define RX_INT_OVERFLOW_STATUS_LSB 0
+#define RX_INT_OVERFLOW_STATUS_MASK 0x00000001
+#define RX_INT_OVERFLOW_STATUS_GET(x) (((x) & RX_INT_OVERFLOW_STATUS_MASK) >> RX_INT_OVERFLOW_STATUS_LSB)
+#define RX_INT_OVERFLOW_STATUS_SET(x) (((x) << RX_INT_OVERFLOW_STATUS_LSB) & RX_INT_OVERFLOW_STATUS_MASK)
+
+#define RX_FILTER_THRESH_ADDRESS 0x0000836c
+#define RX_FILTER_THRESH_OFFSET 0x0000036c
+#define RX_FILTER_THRESH_RSSI_LOW_MSB 23
+#define RX_FILTER_THRESH_RSSI_LOW_LSB 16
+#define RX_FILTER_THRESH_RSSI_LOW_MASK 0x00ff0000
+#define RX_FILTER_THRESH_RSSI_LOW_GET(x) (((x) & RX_FILTER_THRESH_RSSI_LOW_MASK) >> RX_FILTER_THRESH_RSSI_LOW_LSB)
+#define RX_FILTER_THRESH_RSSI_LOW_SET(x) (((x) << RX_FILTER_THRESH_RSSI_LOW_LSB) & RX_FILTER_THRESH_RSSI_LOW_MASK)
+#define RX_FILTER_THRESH_RATE_LOW_MSB 15
+#define RX_FILTER_THRESH_RATE_LOW_LSB 8
+#define RX_FILTER_THRESH_RATE_LOW_MASK 0x0000ff00
+#define RX_FILTER_THRESH_RATE_LOW_GET(x) (((x) & RX_FILTER_THRESH_RATE_LOW_MASK) >> RX_FILTER_THRESH_RATE_LOW_LSB)
+#define RX_FILTER_THRESH_RATE_LOW_SET(x) (((x) << RX_FILTER_THRESH_RATE_LOW_LSB) & RX_FILTER_THRESH_RATE_LOW_MASK)
+#define RX_FILTER_THRESH_RATE_HIGH_MSB 7
+#define RX_FILTER_THRESH_RATE_HIGH_LSB 0
+#define RX_FILTER_THRESH_RATE_HIGH_MASK 0x000000ff
+#define RX_FILTER_THRESH_RATE_HIGH_GET(x) (((x) & RX_FILTER_THRESH_RATE_HIGH_MASK) >> RX_FILTER_THRESH_RATE_HIGH_LSB)
+#define RX_FILTER_THRESH_RATE_HIGH_SET(x) (((x) << RX_FILTER_THRESH_RATE_HIGH_LSB) & RX_FILTER_THRESH_RATE_HIGH_MASK)
+
+#define RX_FILTER_THRESH1_ADDRESS 0x00008370
+#define RX_FILTER_THRESH1_OFFSET 0x00000370
+#define RX_FILTER_THRESH1_LENGTH_LOW_MSB 23
+#define RX_FILTER_THRESH1_LENGTH_LOW_LSB 12
+#define RX_FILTER_THRESH1_LENGTH_LOW_MASK 0x00fff000
+#define RX_FILTER_THRESH1_LENGTH_LOW_GET(x) (((x) & RX_FILTER_THRESH1_LENGTH_LOW_MASK) >> RX_FILTER_THRESH1_LENGTH_LOW_LSB)
+#define RX_FILTER_THRESH1_LENGTH_LOW_SET(x) (((x) << RX_FILTER_THRESH1_LENGTH_LOW_LSB) & RX_FILTER_THRESH1_LENGTH_LOW_MASK)
+#define RX_FILTER_THRESH1_LENGTH_HIGH_MSB 11
+#define RX_FILTER_THRESH1_LENGTH_HIGH_LSB 0
+#define RX_FILTER_THRESH1_LENGTH_HIGH_MASK 0x00000fff
+#define RX_FILTER_THRESH1_LENGTH_HIGH_GET(x) (((x) & RX_FILTER_THRESH1_LENGTH_HIGH_MASK) >> RX_FILTER_THRESH1_LENGTH_HIGH_LSB)
+#define RX_FILTER_THRESH1_LENGTH_HIGH_SET(x) (((x) << RX_FILTER_THRESH1_LENGTH_HIGH_LSB) & RX_FILTER_THRESH1_LENGTH_HIGH_MASK)
+
+#define RX_PRIORITY_THRESH0_ADDRESS 0x00008374
+#define RX_PRIORITY_THRESH0_OFFSET 0x00000374
+#define RX_PRIORITY_THRESH0_RSSI_LOW_MSB 31
+#define RX_PRIORITY_THRESH0_RSSI_LOW_LSB 24
+#define RX_PRIORITY_THRESH0_RSSI_LOW_MASK 0xff000000
+#define RX_PRIORITY_THRESH0_RSSI_LOW_GET(x) (((x) & RX_PRIORITY_THRESH0_RSSI_LOW_MASK) >> RX_PRIORITY_THRESH0_RSSI_LOW_LSB)
+#define RX_PRIORITY_THRESH0_RSSI_LOW_SET(x) (((x) << RX_PRIORITY_THRESH0_RSSI_LOW_LSB) & RX_PRIORITY_THRESH0_RSSI_LOW_MASK)
+#define RX_PRIORITY_THRESH0_RSSI_HIGH_MSB 23
+#define RX_PRIORITY_THRESH0_RSSI_HIGH_LSB 16
+#define RX_PRIORITY_THRESH0_RSSI_HIGH_MASK 0x00ff0000
+#define RX_PRIORITY_THRESH0_RSSI_HIGH_GET(x) (((x) & RX_PRIORITY_THRESH0_RSSI_HIGH_MASK) >> RX_PRIORITY_THRESH0_RSSI_HIGH_LSB)
+#define RX_PRIORITY_THRESH0_RSSI_HIGH_SET(x) (((x) << RX_PRIORITY_THRESH0_RSSI_HIGH_LSB) & RX_PRIORITY_THRESH0_RSSI_HIGH_MASK)
+#define RX_PRIORITY_THRESH0_RATE_LOW_MSB 15
+#define RX_PRIORITY_THRESH0_RATE_LOW_LSB 8
+#define RX_PRIORITY_THRESH0_RATE_LOW_MASK 0x0000ff00
+#define RX_PRIORITY_THRESH0_RATE_LOW_GET(x) (((x) & RX_PRIORITY_THRESH0_RATE_LOW_MASK) >> RX_PRIORITY_THRESH0_RATE_LOW_LSB)
+#define RX_PRIORITY_THRESH0_RATE_LOW_SET(x) (((x) << RX_PRIORITY_THRESH0_RATE_LOW_LSB) & RX_PRIORITY_THRESH0_RATE_LOW_MASK)
+#define RX_PRIORITY_THRESH0_RATE_HIGH_MSB 7
+#define RX_PRIORITY_THRESH0_RATE_HIGH_LSB 0
+#define RX_PRIORITY_THRESH0_RATE_HIGH_MASK 0x000000ff
+#define RX_PRIORITY_THRESH0_RATE_HIGH_GET(x) (((x) & RX_PRIORITY_THRESH0_RATE_HIGH_MASK) >> RX_PRIORITY_THRESH0_RATE_HIGH_LSB)
+#define RX_PRIORITY_THRESH0_RATE_HIGH_SET(x) (((x) << RX_PRIORITY_THRESH0_RATE_HIGH_LSB) & RX_PRIORITY_THRESH0_RATE_HIGH_MASK)
+
+#define RX_PRIORITY_THRESH1_ADDRESS 0x00008378
+#define RX_PRIORITY_THRESH1_OFFSET 0x00000378
+#define RX_PRIORITY_THRESH1_XCAST_RSSI_HIGH_MSB 31
+#define RX_PRIORITY_THRESH1_XCAST_RSSI_HIGH_LSB 24
+#define RX_PRIORITY_THRESH1_XCAST_RSSI_HIGH_MASK 0xff000000
+#define RX_PRIORITY_THRESH1_XCAST_RSSI_HIGH_GET(x) (((x) & RX_PRIORITY_THRESH1_XCAST_RSSI_HIGH_MASK) >> RX_PRIORITY_THRESH1_XCAST_RSSI_HIGH_LSB)
+#define RX_PRIORITY_THRESH1_XCAST_RSSI_HIGH_SET(x) (((x) << RX_PRIORITY_THRESH1_XCAST_RSSI_HIGH_LSB) & RX_PRIORITY_THRESH1_XCAST_RSSI_HIGH_MASK)
+#define RX_PRIORITY_THRESH1_LENGTH_LOW_MSB 23
+#define RX_PRIORITY_THRESH1_LENGTH_LOW_LSB 12
+#define RX_PRIORITY_THRESH1_LENGTH_LOW_MASK 0x00fff000
+#define RX_PRIORITY_THRESH1_LENGTH_LOW_GET(x) (((x) & RX_PRIORITY_THRESH1_LENGTH_LOW_MASK) >> RX_PRIORITY_THRESH1_LENGTH_LOW_LSB)
+#define RX_PRIORITY_THRESH1_LENGTH_LOW_SET(x) (((x) << RX_PRIORITY_THRESH1_LENGTH_LOW_LSB) & RX_PRIORITY_THRESH1_LENGTH_LOW_MASK)
+#define RX_PRIORITY_THRESH1_LENGTH_HIGH_MSB 11
+#define RX_PRIORITY_THRESH1_LENGTH_HIGH_LSB 0
+#define RX_PRIORITY_THRESH1_LENGTH_HIGH_MASK 0x00000fff
+#define RX_PRIORITY_THRESH1_LENGTH_HIGH_GET(x) (((x) & RX_PRIORITY_THRESH1_LENGTH_HIGH_MASK) >> RX_PRIORITY_THRESH1_LENGTH_HIGH_LSB)
+#define RX_PRIORITY_THRESH1_LENGTH_HIGH_SET(x) (((x) << RX_PRIORITY_THRESH1_LENGTH_HIGH_LSB) & RX_PRIORITY_THRESH1_LENGTH_HIGH_MASK)
+
+#define RX_PRIORITY_THRESH2_ADDRESS 0x0000837c
+#define RX_PRIORITY_THRESH2_OFFSET 0x0000037c
+#define RX_PRIORITY_THRESH2_NULL_RSSI_HIGH_MSB 31
+#define RX_PRIORITY_THRESH2_NULL_RSSI_HIGH_LSB 24
+#define RX_PRIORITY_THRESH2_NULL_RSSI_HIGH_MASK 0xff000000
+#define RX_PRIORITY_THRESH2_NULL_RSSI_HIGH_GET(x) (((x) & RX_PRIORITY_THRESH2_NULL_RSSI_HIGH_MASK) >> RX_PRIORITY_THRESH2_NULL_RSSI_HIGH_LSB)
+#define RX_PRIORITY_THRESH2_NULL_RSSI_HIGH_SET(x) (((x) << RX_PRIORITY_THRESH2_NULL_RSSI_HIGH_LSB) & RX_PRIORITY_THRESH2_NULL_RSSI_HIGH_MASK)
+#define RX_PRIORITY_THRESH2_BEACON_RSSI_HIGH_MSB 23
+#define RX_PRIORITY_THRESH2_BEACON_RSSI_HIGH_LSB 16
+#define RX_PRIORITY_THRESH2_BEACON_RSSI_HIGH_MASK 0x00ff0000
+#define RX_PRIORITY_THRESH2_BEACON_RSSI_HIGH_GET(x) (((x) & RX_PRIORITY_THRESH2_BEACON_RSSI_HIGH_MASK) >> RX_PRIORITY_THRESH2_BEACON_RSSI_HIGH_LSB)
+#define RX_PRIORITY_THRESH2_BEACON_RSSI_HIGH_SET(x) (((x) << RX_PRIORITY_THRESH2_BEACON_RSSI_HIGH_LSB) & RX_PRIORITY_THRESH2_BEACON_RSSI_HIGH_MASK)
+#define RX_PRIORITY_THRESH2_MGMT_RSSI_HIGH_MSB 15
+#define RX_PRIORITY_THRESH2_MGMT_RSSI_HIGH_LSB 8
+#define RX_PRIORITY_THRESH2_MGMT_RSSI_HIGH_MASK 0x0000ff00
+#define RX_PRIORITY_THRESH2_MGMT_RSSI_HIGH_GET(x) (((x) & RX_PRIORITY_THRESH2_MGMT_RSSI_HIGH_MASK) >> RX_PRIORITY_THRESH2_MGMT_RSSI_HIGH_LSB)
+#define RX_PRIORITY_THRESH2_MGMT_RSSI_HIGH_SET(x) (((x) << RX_PRIORITY_THRESH2_MGMT_RSSI_HIGH_LSB) & RX_PRIORITY_THRESH2_MGMT_RSSI_HIGH_MASK)
+#define RX_PRIORITY_THRESH2_PRESP_RSSI_HIGH_MSB 7
+#define RX_PRIORITY_THRESH2_PRESP_RSSI_HIGH_LSB 0
+#define RX_PRIORITY_THRESH2_PRESP_RSSI_HIGH_MASK 0x000000ff
+#define RX_PRIORITY_THRESH2_PRESP_RSSI_HIGH_GET(x) (((x) & RX_PRIORITY_THRESH2_PRESP_RSSI_HIGH_MASK) >> RX_PRIORITY_THRESH2_PRESP_RSSI_HIGH_LSB)
+#define RX_PRIORITY_THRESH2_PRESP_RSSI_HIGH_SET(x) (((x) << RX_PRIORITY_THRESH2_PRESP_RSSI_HIGH_LSB) & RX_PRIORITY_THRESH2_PRESP_RSSI_HIGH_MASK)
+
+#define RX_PRIORITY_THRESH3_ADDRESS 0x00008380
+#define RX_PRIORITY_THRESH3_OFFSET 0x00000380
+#define RX_PRIORITY_THRESH3_PS_POLL_RSSI_HIGH_MSB 15
+#define RX_PRIORITY_THRESH3_PS_POLL_RSSI_HIGH_LSB 8
+#define RX_PRIORITY_THRESH3_PS_POLL_RSSI_HIGH_MASK 0x0000ff00
+#define RX_PRIORITY_THRESH3_PS_POLL_RSSI_HIGH_GET(x) (((x) & RX_PRIORITY_THRESH3_PS_POLL_RSSI_HIGH_MASK) >> RX_PRIORITY_THRESH3_PS_POLL_RSSI_HIGH_LSB)
+#define RX_PRIORITY_THRESH3_PS_POLL_RSSI_HIGH_SET(x) (((x) << RX_PRIORITY_THRESH3_PS_POLL_RSSI_HIGH_LSB) & RX_PRIORITY_THRESH3_PS_POLL_RSSI_HIGH_MASK)
+#define RX_PRIORITY_THRESH3_PREQ_RSSI_HIGH_MSB 7
+#define RX_PRIORITY_THRESH3_PREQ_RSSI_HIGH_LSB 0
+#define RX_PRIORITY_THRESH3_PREQ_RSSI_HIGH_MASK 0x000000ff
+#define RX_PRIORITY_THRESH3_PREQ_RSSI_HIGH_GET(x) (((x) & RX_PRIORITY_THRESH3_PREQ_RSSI_HIGH_MASK) >> RX_PRIORITY_THRESH3_PREQ_RSSI_HIGH_LSB)
+#define RX_PRIORITY_THRESH3_PREQ_RSSI_HIGH_SET(x) (((x) << RX_PRIORITY_THRESH3_PREQ_RSSI_HIGH_LSB) & RX_PRIORITY_THRESH3_PREQ_RSSI_HIGH_MASK)
+
+#define RX_PRIORITY_OFFSET0_ADDRESS 0x00008384
+#define RX_PRIORITY_OFFSET0_OFFSET 0x00000384
+#define RX_PRIORITY_OFFSET0_XCAST_RSSI_HIGH_MSB 29
+#define RX_PRIORITY_OFFSET0_XCAST_RSSI_HIGH_LSB 24
+#define RX_PRIORITY_OFFSET0_XCAST_RSSI_HIGH_MASK 0x3f000000
+#define RX_PRIORITY_OFFSET0_XCAST_RSSI_HIGH_GET(x) (((x) & RX_PRIORITY_OFFSET0_XCAST_RSSI_HIGH_MASK) >> RX_PRIORITY_OFFSET0_XCAST_RSSI_HIGH_LSB)
+#define RX_PRIORITY_OFFSET0_XCAST_RSSI_HIGH_SET(x) (((x) << RX_PRIORITY_OFFSET0_XCAST_RSSI_HIGH_LSB) & RX_PRIORITY_OFFSET0_XCAST_RSSI_HIGH_MASK)
+#define RX_PRIORITY_OFFSET0_RSSI_LOW_MSB 23
+#define RX_PRIORITY_OFFSET0_RSSI_LOW_LSB 18
+#define RX_PRIORITY_OFFSET0_RSSI_LOW_MASK 0x00fc0000
+#define RX_PRIORITY_OFFSET0_RSSI_LOW_GET(x) (((x) & RX_PRIORITY_OFFSET0_RSSI_LOW_MASK) >> RX_PRIORITY_OFFSET0_RSSI_LOW_LSB)
+#define RX_PRIORITY_OFFSET0_RSSI_LOW_SET(x) (((x) << RX_PRIORITY_OFFSET0_RSSI_LOW_LSB) & RX_PRIORITY_OFFSET0_RSSI_LOW_MASK)
+#define RX_PRIORITY_OFFSET0_RSSI_HIGH_MSB 17
+#define RX_PRIORITY_OFFSET0_RSSI_HIGH_LSB 12
+#define RX_PRIORITY_OFFSET0_RSSI_HIGH_MASK 0x0003f000
+#define RX_PRIORITY_OFFSET0_RSSI_HIGH_GET(x) (((x) & RX_PRIORITY_OFFSET0_RSSI_HIGH_MASK) >> RX_PRIORITY_OFFSET0_RSSI_HIGH_LSB)
+#define RX_PRIORITY_OFFSET0_RSSI_HIGH_SET(x) (((x) << RX_PRIORITY_OFFSET0_RSSI_HIGH_LSB) & RX_PRIORITY_OFFSET0_RSSI_HIGH_MASK)
+#define RX_PRIORITY_OFFSET0_PHY_RATE_LOW_MSB 11
+#define RX_PRIORITY_OFFSET0_PHY_RATE_LOW_LSB 6
+#define RX_PRIORITY_OFFSET0_PHY_RATE_LOW_MASK 0x00000fc0
+#define RX_PRIORITY_OFFSET0_PHY_RATE_LOW_GET(x) (((x) & RX_PRIORITY_OFFSET0_PHY_RATE_LOW_MASK) >> RX_PRIORITY_OFFSET0_PHY_RATE_LOW_LSB)
+#define RX_PRIORITY_OFFSET0_PHY_RATE_LOW_SET(x) (((x) << RX_PRIORITY_OFFSET0_PHY_RATE_LOW_LSB) & RX_PRIORITY_OFFSET0_PHY_RATE_LOW_MASK)
+#define RX_PRIORITY_OFFSET0_PHY_RATE_HIGH_MSB 5
+#define RX_PRIORITY_OFFSET0_PHY_RATE_HIGH_LSB 0
+#define RX_PRIORITY_OFFSET0_PHY_RATE_HIGH_MASK 0x0000003f
+#define RX_PRIORITY_OFFSET0_PHY_RATE_HIGH_GET(x) (((x) & RX_PRIORITY_OFFSET0_PHY_RATE_HIGH_MASK) >> RX_PRIORITY_OFFSET0_PHY_RATE_HIGH_LSB)
+#define RX_PRIORITY_OFFSET0_PHY_RATE_HIGH_SET(x) (((x) << RX_PRIORITY_OFFSET0_PHY_RATE_HIGH_LSB) & RX_PRIORITY_OFFSET0_PHY_RATE_HIGH_MASK)
+
+#define RX_PRIORITY_OFFSET1_ADDRESS 0x00008388
+#define RX_PRIORITY_OFFSET1_OFFSET 0x00000388
+#define RX_PRIORITY_OFFSET1_RTS_MSB 29
+#define RX_PRIORITY_OFFSET1_RTS_LSB 24
+#define RX_PRIORITY_OFFSET1_RTS_MASK 0x3f000000
+#define RX_PRIORITY_OFFSET1_RTS_GET(x) (((x) & RX_PRIORITY_OFFSET1_RTS_MASK) >> RX_PRIORITY_OFFSET1_RTS_LSB)
+#define RX_PRIORITY_OFFSET1_RTS_SET(x) (((x) << RX_PRIORITY_OFFSET1_RTS_LSB) & RX_PRIORITY_OFFSET1_RTS_MASK)
+#define RX_PRIORITY_OFFSET1_RETX_MSB 23
+#define RX_PRIORITY_OFFSET1_RETX_LSB 18
+#define RX_PRIORITY_OFFSET1_RETX_MASK 0x00fc0000
+#define RX_PRIORITY_OFFSET1_RETX_GET(x) (((x) & RX_PRIORITY_OFFSET1_RETX_MASK) >> RX_PRIORITY_OFFSET1_RETX_LSB)
+#define RX_PRIORITY_OFFSET1_RETX_SET(x) (((x) << RX_PRIORITY_OFFSET1_RETX_LSB) & RX_PRIORITY_OFFSET1_RETX_MASK)
+#define RX_PRIORITY_OFFSET1_PRESP_RSSI_HIGH_MSB 17
+#define RX_PRIORITY_OFFSET1_PRESP_RSSI_HIGH_LSB 12
+#define RX_PRIORITY_OFFSET1_PRESP_RSSI_HIGH_MASK 0x0003f000
+#define RX_PRIORITY_OFFSET1_PRESP_RSSI_HIGH_GET(x) (((x) & RX_PRIORITY_OFFSET1_PRESP_RSSI_HIGH_MASK) >> RX_PRIORITY_OFFSET1_PRESP_RSSI_HIGH_LSB)
+#define RX_PRIORITY_OFFSET1_PRESP_RSSI_HIGH_SET(x) (((x) << RX_PRIORITY_OFFSET1_PRESP_RSSI_HIGH_LSB) & RX_PRIORITY_OFFSET1_PRESP_RSSI_HIGH_MASK)
+#define RX_PRIORITY_OFFSET1_LENGTH_LOW_MSB 11
+#define RX_PRIORITY_OFFSET1_LENGTH_LOW_LSB 6
+#define RX_PRIORITY_OFFSET1_LENGTH_LOW_MASK 0x00000fc0
+#define RX_PRIORITY_OFFSET1_LENGTH_LOW_GET(x) (((x) & RX_PRIORITY_OFFSET1_LENGTH_LOW_MASK) >> RX_PRIORITY_OFFSET1_LENGTH_LOW_LSB)
+#define RX_PRIORITY_OFFSET1_LENGTH_LOW_SET(x) (((x) << RX_PRIORITY_OFFSET1_LENGTH_LOW_LSB) & RX_PRIORITY_OFFSET1_LENGTH_LOW_MASK)
+#define RX_PRIORITY_OFFSET1_LENGTH_HIGH_MSB 5
+#define RX_PRIORITY_OFFSET1_LENGTH_HIGH_LSB 0
+#define RX_PRIORITY_OFFSET1_LENGTH_HIGH_MASK 0x0000003f
+#define RX_PRIORITY_OFFSET1_LENGTH_HIGH_GET(x) (((x) & RX_PRIORITY_OFFSET1_LENGTH_HIGH_MASK) >> RX_PRIORITY_OFFSET1_LENGTH_HIGH_LSB)
+#define RX_PRIORITY_OFFSET1_LENGTH_HIGH_SET(x) (((x) << RX_PRIORITY_OFFSET1_LENGTH_HIGH_LSB) & RX_PRIORITY_OFFSET1_LENGTH_HIGH_MASK)
+
+#define RX_PRIORITY_OFFSET2_ADDRESS 0x0000838c
+#define RX_PRIORITY_OFFSET2_OFFSET 0x0000038c
+#define RX_PRIORITY_OFFSET2_BEACON_MSB 29
+#define RX_PRIORITY_OFFSET2_BEACON_LSB 24
+#define RX_PRIORITY_OFFSET2_BEACON_MASK 0x3f000000
+#define RX_PRIORITY_OFFSET2_BEACON_GET(x) (((x) & RX_PRIORITY_OFFSET2_BEACON_MASK) >> RX_PRIORITY_OFFSET2_BEACON_LSB)
+#define RX_PRIORITY_OFFSET2_BEACON_SET(x) (((x) << RX_PRIORITY_OFFSET2_BEACON_LSB) & RX_PRIORITY_OFFSET2_BEACON_MASK)
+#define RX_PRIORITY_OFFSET2_MGMT_MSB 23
+#define RX_PRIORITY_OFFSET2_MGMT_LSB 18
+#define RX_PRIORITY_OFFSET2_MGMT_MASK 0x00fc0000
+#define RX_PRIORITY_OFFSET2_MGMT_GET(x) (((x) & RX_PRIORITY_OFFSET2_MGMT_MASK) >> RX_PRIORITY_OFFSET2_MGMT_LSB)
+#define RX_PRIORITY_OFFSET2_MGMT_SET(x) (((x) << RX_PRIORITY_OFFSET2_MGMT_LSB) & RX_PRIORITY_OFFSET2_MGMT_MASK)
+#define RX_PRIORITY_OFFSET2_ATIM_MSB 17
+#define RX_PRIORITY_OFFSET2_ATIM_LSB 12
+#define RX_PRIORITY_OFFSET2_ATIM_MASK 0x0003f000
+#define RX_PRIORITY_OFFSET2_ATIM_GET(x) (((x) & RX_PRIORITY_OFFSET2_ATIM_MASK) >> RX_PRIORITY_OFFSET2_ATIM_LSB)
+#define RX_PRIORITY_OFFSET2_ATIM_SET(x) (((x) << RX_PRIORITY_OFFSET2_ATIM_LSB) & RX_PRIORITY_OFFSET2_ATIM_MASK)
+#define RX_PRIORITY_OFFSET2_PRESP_MSB 11
+#define RX_PRIORITY_OFFSET2_PRESP_LSB 6
+#define RX_PRIORITY_OFFSET2_PRESP_MASK 0x00000fc0
+#define RX_PRIORITY_OFFSET2_PRESP_GET(x) (((x) & RX_PRIORITY_OFFSET2_PRESP_MASK) >> RX_PRIORITY_OFFSET2_PRESP_LSB)
+#define RX_PRIORITY_OFFSET2_PRESP_SET(x) (((x) << RX_PRIORITY_OFFSET2_PRESP_LSB) & RX_PRIORITY_OFFSET2_PRESP_MASK)
+#define RX_PRIORITY_OFFSET2_XCAST_MSB 5
+#define RX_PRIORITY_OFFSET2_XCAST_LSB 0
+#define RX_PRIORITY_OFFSET2_XCAST_MASK 0x0000003f
+#define RX_PRIORITY_OFFSET2_XCAST_GET(x) (((x) & RX_PRIORITY_OFFSET2_XCAST_MASK) >> RX_PRIORITY_OFFSET2_XCAST_LSB)
+#define RX_PRIORITY_OFFSET2_XCAST_SET(x) (((x) << RX_PRIORITY_OFFSET2_XCAST_LSB) & RX_PRIORITY_OFFSET2_XCAST_MASK)
+
+#define RX_PRIORITY_OFFSET3_ADDRESS 0x00008390
+#define RX_PRIORITY_OFFSET3_OFFSET 0x00000390
+#define RX_PRIORITY_OFFSET3_PS_POLL_MSB 29
+#define RX_PRIORITY_OFFSET3_PS_POLL_LSB 24
+#define RX_PRIORITY_OFFSET3_PS_POLL_MASK 0x3f000000
+#define RX_PRIORITY_OFFSET3_PS_POLL_GET(x) (((x) & RX_PRIORITY_OFFSET3_PS_POLL_MASK) >> RX_PRIORITY_OFFSET3_PS_POLL_LSB)
+#define RX_PRIORITY_OFFSET3_PS_POLL_SET(x) (((x) << RX_PRIORITY_OFFSET3_PS_POLL_LSB) & RX_PRIORITY_OFFSET3_PS_POLL_MASK)
+#define RX_PRIORITY_OFFSET3_AMSDU_MSB 23
+#define RX_PRIORITY_OFFSET3_AMSDU_LSB 18
+#define RX_PRIORITY_OFFSET3_AMSDU_MASK 0x00fc0000
+#define RX_PRIORITY_OFFSET3_AMSDU_GET(x) (((x) & RX_PRIORITY_OFFSET3_AMSDU_MASK) >> RX_PRIORITY_OFFSET3_AMSDU_LSB)
+#define RX_PRIORITY_OFFSET3_AMSDU_SET(x) (((x) << RX_PRIORITY_OFFSET3_AMSDU_LSB) & RX_PRIORITY_OFFSET3_AMSDU_MASK)
+#define RX_PRIORITY_OFFSET3_AMPDU_MSB 17
+#define RX_PRIORITY_OFFSET3_AMPDU_LSB 12
+#define RX_PRIORITY_OFFSET3_AMPDU_MASK 0x0003f000
+#define RX_PRIORITY_OFFSET3_AMPDU_GET(x) (((x) & RX_PRIORITY_OFFSET3_AMPDU_MASK) >> RX_PRIORITY_OFFSET3_AMPDU_LSB)
+#define RX_PRIORITY_OFFSET3_AMPDU_SET(x) (((x) << RX_PRIORITY_OFFSET3_AMPDU_LSB) & RX_PRIORITY_OFFSET3_AMPDU_MASK)
+#define RX_PRIORITY_OFFSET3_EOSP_MSB 11
+#define RX_PRIORITY_OFFSET3_EOSP_LSB 6
+#define RX_PRIORITY_OFFSET3_EOSP_MASK 0x00000fc0
+#define RX_PRIORITY_OFFSET3_EOSP_GET(x) (((x) & RX_PRIORITY_OFFSET3_EOSP_MASK) >> RX_PRIORITY_OFFSET3_EOSP_LSB)
+#define RX_PRIORITY_OFFSET3_EOSP_SET(x) (((x) << RX_PRIORITY_OFFSET3_EOSP_LSB) & RX_PRIORITY_OFFSET3_EOSP_MASK)
+#define RX_PRIORITY_OFFSET3_MORE_MSB 5
+#define RX_PRIORITY_OFFSET3_MORE_LSB 0
+#define RX_PRIORITY_OFFSET3_MORE_MASK 0x0000003f
+#define RX_PRIORITY_OFFSET3_MORE_GET(x) (((x) & RX_PRIORITY_OFFSET3_MORE_MASK) >> RX_PRIORITY_OFFSET3_MORE_LSB)
+#define RX_PRIORITY_OFFSET3_MORE_SET(x) (((x) << RX_PRIORITY_OFFSET3_MORE_LSB) & RX_PRIORITY_OFFSET3_MORE_MASK)
+
+#define RX_PRIORITY_OFFSET4_ADDRESS 0x00008394
+#define RX_PRIORITY_OFFSET4_OFFSET 0x00000394
+#define RX_PRIORITY_OFFSET4_BEACON_RSSI_HIGH_MSB 29
+#define RX_PRIORITY_OFFSET4_BEACON_RSSI_HIGH_LSB 24
+#define RX_PRIORITY_OFFSET4_BEACON_RSSI_HIGH_MASK 0x3f000000
+#define RX_PRIORITY_OFFSET4_BEACON_RSSI_HIGH_GET(x) (((x) & RX_PRIORITY_OFFSET4_BEACON_RSSI_HIGH_MASK) >> RX_PRIORITY_OFFSET4_BEACON_RSSI_HIGH_LSB)
+#define RX_PRIORITY_OFFSET4_BEACON_RSSI_HIGH_SET(x) (((x) << RX_PRIORITY_OFFSET4_BEACON_RSSI_HIGH_LSB) & RX_PRIORITY_OFFSET4_BEACON_RSSI_HIGH_MASK)
+#define RX_PRIORITY_OFFSET4_MGMT_RSSI_HIGH_MSB 23
+#define RX_PRIORITY_OFFSET4_MGMT_RSSI_HIGH_LSB 18
+#define RX_PRIORITY_OFFSET4_MGMT_RSSI_HIGH_MASK 0x00fc0000
+#define RX_PRIORITY_OFFSET4_MGMT_RSSI_HIGH_GET(x) (((x) & RX_PRIORITY_OFFSET4_MGMT_RSSI_HIGH_MASK) >> RX_PRIORITY_OFFSET4_MGMT_RSSI_HIGH_LSB)
+#define RX_PRIORITY_OFFSET4_MGMT_RSSI_HIGH_SET(x) (((x) << RX_PRIORITY_OFFSET4_MGMT_RSSI_HIGH_LSB) & RX_PRIORITY_OFFSET4_MGMT_RSSI_HIGH_MASK)
+#define RX_PRIORITY_OFFSET4_BEACON_SSID_MSB 17
+#define RX_PRIORITY_OFFSET4_BEACON_SSID_LSB 12
+#define RX_PRIORITY_OFFSET4_BEACON_SSID_MASK 0x0003f000
+#define RX_PRIORITY_OFFSET4_BEACON_SSID_GET(x) (((x) & RX_PRIORITY_OFFSET4_BEACON_SSID_MASK) >> RX_PRIORITY_OFFSET4_BEACON_SSID_LSB)
+#define RX_PRIORITY_OFFSET4_BEACON_SSID_SET(x) (((x) << RX_PRIORITY_OFFSET4_BEACON_SSID_LSB) & RX_PRIORITY_OFFSET4_BEACON_SSID_MASK)
+#define RX_PRIORITY_OFFSET4_NULL_MSB 11
+#define RX_PRIORITY_OFFSET4_NULL_LSB 6
+#define RX_PRIORITY_OFFSET4_NULL_MASK 0x00000fc0
+#define RX_PRIORITY_OFFSET4_NULL_GET(x) (((x) & RX_PRIORITY_OFFSET4_NULL_MASK) >> RX_PRIORITY_OFFSET4_NULL_LSB)
+#define RX_PRIORITY_OFFSET4_NULL_SET(x) (((x) << RX_PRIORITY_OFFSET4_NULL_LSB) & RX_PRIORITY_OFFSET4_NULL_MASK)
+#define RX_PRIORITY_OFFSET4_PREQ_MSB 5
+#define RX_PRIORITY_OFFSET4_PREQ_LSB 0
+#define RX_PRIORITY_OFFSET4_PREQ_MASK 0x0000003f
+#define RX_PRIORITY_OFFSET4_PREQ_GET(x) (((x) & RX_PRIORITY_OFFSET4_PREQ_MASK) >> RX_PRIORITY_OFFSET4_PREQ_LSB)
+#define RX_PRIORITY_OFFSET4_PREQ_SET(x) (((x) << RX_PRIORITY_OFFSET4_PREQ_LSB) & RX_PRIORITY_OFFSET4_PREQ_MASK)
+
+#define RX_PRIORITY_OFFSET5_ADDRESS 0x00008398
+#define RX_PRIORITY_OFFSET5_OFFSET 0x00000398
+#define RX_PRIORITY_OFFSET5_PS_POLL_RSSI_HIGH_MSB 17
+#define RX_PRIORITY_OFFSET5_PS_POLL_RSSI_HIGH_LSB 12
+#define RX_PRIORITY_OFFSET5_PS_POLL_RSSI_HIGH_MASK 0x0003f000
+#define RX_PRIORITY_OFFSET5_PS_POLL_RSSI_HIGH_GET(x) (((x) & RX_PRIORITY_OFFSET5_PS_POLL_RSSI_HIGH_MASK) >> RX_PRIORITY_OFFSET5_PS_POLL_RSSI_HIGH_LSB)
+#define RX_PRIORITY_OFFSET5_PS_POLL_RSSI_HIGH_SET(x) (((x) << RX_PRIORITY_OFFSET5_PS_POLL_RSSI_HIGH_LSB) & RX_PRIORITY_OFFSET5_PS_POLL_RSSI_HIGH_MASK)
+#define RX_PRIORITY_OFFSET5_PREQ_RSSI_HIGH_MSB 11
+#define RX_PRIORITY_OFFSET5_PREQ_RSSI_HIGH_LSB 6
+#define RX_PRIORITY_OFFSET5_PREQ_RSSI_HIGH_MASK 0x00000fc0
+#define RX_PRIORITY_OFFSET5_PREQ_RSSI_HIGH_GET(x) (((x) & RX_PRIORITY_OFFSET5_PREQ_RSSI_HIGH_MASK) >> RX_PRIORITY_OFFSET5_PREQ_RSSI_HIGH_LSB)
+#define RX_PRIORITY_OFFSET5_PREQ_RSSI_HIGH_SET(x) (((x) << RX_PRIORITY_OFFSET5_PREQ_RSSI_HIGH_LSB) & RX_PRIORITY_OFFSET5_PREQ_RSSI_HIGH_MASK)
+#define RX_PRIORITY_OFFSET5_NULL_RSSI_HIGH_MSB 5
+#define RX_PRIORITY_OFFSET5_NULL_RSSI_HIGH_LSB 0
+#define RX_PRIORITY_OFFSET5_NULL_RSSI_HIGH_MASK 0x0000003f
+#define RX_PRIORITY_OFFSET5_NULL_RSSI_HIGH_GET(x) (((x) & RX_PRIORITY_OFFSET5_NULL_RSSI_HIGH_MASK) >> RX_PRIORITY_OFFSET5_NULL_RSSI_HIGH_LSB)
+#define RX_PRIORITY_OFFSET5_NULL_RSSI_HIGH_SET(x) (((x) << RX_PRIORITY_OFFSET5_NULL_RSSI_HIGH_LSB) & RX_PRIORITY_OFFSET5_NULL_RSSI_HIGH_MASK)
+
+#define MAC_PCU_BSSID2_L32_ADDRESS 0x0000839c
+#define MAC_PCU_BSSID2_L32_OFFSET 0x0000039c
+#define MAC_PCU_BSSID2_L32_ADDR_MSB 31
+#define MAC_PCU_BSSID2_L32_ADDR_LSB 0
+#define MAC_PCU_BSSID2_L32_ADDR_MASK 0xffffffff
+#define MAC_PCU_BSSID2_L32_ADDR_GET(x) (((x) & MAC_PCU_BSSID2_L32_ADDR_MASK) >> MAC_PCU_BSSID2_L32_ADDR_LSB)
+#define MAC_PCU_BSSID2_L32_ADDR_SET(x) (((x) << MAC_PCU_BSSID2_L32_ADDR_LSB) & MAC_PCU_BSSID2_L32_ADDR_MASK)
+
+#define MAC_PCU_BSSID2_U16_ADDRESS 0x000083a0
+#define MAC_PCU_BSSID2_U16_OFFSET 0x000003a0
+#define MAC_PCU_BSSID2_U16_ENABLE_MSB 16
+#define MAC_PCU_BSSID2_U16_ENABLE_LSB 16
+#define MAC_PCU_BSSID2_U16_ENABLE_MASK 0x00010000
+#define MAC_PCU_BSSID2_U16_ENABLE_GET(x) (((x) & MAC_PCU_BSSID2_U16_ENABLE_MASK) >> MAC_PCU_BSSID2_U16_ENABLE_LSB)
+#define MAC_PCU_BSSID2_U16_ENABLE_SET(x) (((x) << MAC_PCU_BSSID2_U16_ENABLE_LSB) & MAC_PCU_BSSID2_U16_ENABLE_MASK)
+#define MAC_PCU_BSSID2_U16_ADDR_MSB 15
+#define MAC_PCU_BSSID2_U16_ADDR_LSB 0
+#define MAC_PCU_BSSID2_U16_ADDR_MASK 0x0000ffff
+#define MAC_PCU_BSSID2_U16_ADDR_GET(x) (((x) & MAC_PCU_BSSID2_U16_ADDR_MASK) >> MAC_PCU_BSSID2_U16_ADDR_LSB)
+#define MAC_PCU_BSSID2_U16_ADDR_SET(x) (((x) << MAC_PCU_BSSID2_U16_ADDR_LSB) & MAC_PCU_BSSID2_U16_ADDR_MASK)
+
+#define MAC_PCU_TSF1_STATUS_L32_ADDRESS 0x000083a4
+#define MAC_PCU_TSF1_STATUS_L32_OFFSET 0x000003a4
+#define MAC_PCU_TSF1_STATUS_L32_VALUE_MSB 31
+#define MAC_PCU_TSF1_STATUS_L32_VALUE_LSB 0
+#define MAC_PCU_TSF1_STATUS_L32_VALUE_MASK 0xffffffff
+#define MAC_PCU_TSF1_STATUS_L32_VALUE_GET(x) (((x) & MAC_PCU_TSF1_STATUS_L32_VALUE_MASK) >> MAC_PCU_TSF1_STATUS_L32_VALUE_LSB)
+#define MAC_PCU_TSF1_STATUS_L32_VALUE_SET(x) (((x) << MAC_PCU_TSF1_STATUS_L32_VALUE_LSB) & MAC_PCU_TSF1_STATUS_L32_VALUE_MASK)
+
+#define MAC_PCU_TSF1_STATUS_U32_ADDRESS 0x000083a8
+#define MAC_PCU_TSF1_STATUS_U32_OFFSET 0x000003a8
+#define MAC_PCU_TSF1_STATUS_U32_VALUE_MSB 31
+#define MAC_PCU_TSF1_STATUS_U32_VALUE_LSB 0
+#define MAC_PCU_TSF1_STATUS_U32_VALUE_MASK 0xffffffff
+#define MAC_PCU_TSF1_STATUS_U32_VALUE_GET(x) (((x) & MAC_PCU_TSF1_STATUS_U32_VALUE_MASK) >> MAC_PCU_TSF1_STATUS_U32_VALUE_LSB)
+#define MAC_PCU_TSF1_STATUS_U32_VALUE_SET(x) (((x) << MAC_PCU_TSF1_STATUS_U32_VALUE_LSB) & MAC_PCU_TSF1_STATUS_U32_VALUE_MASK)
+
+#define MAC_PCU_TSF2_STATUS_L32_ADDRESS 0x000083ac
+#define MAC_PCU_TSF2_STATUS_L32_OFFSET 0x000003ac
+#define MAC_PCU_TSF2_STATUS_L32_VALUE_MSB 31
+#define MAC_PCU_TSF2_STATUS_L32_VALUE_LSB 0
+#define MAC_PCU_TSF2_STATUS_L32_VALUE_MASK 0xffffffff
+#define MAC_PCU_TSF2_STATUS_L32_VALUE_GET(x) (((x) & MAC_PCU_TSF2_STATUS_L32_VALUE_MASK) >> MAC_PCU_TSF2_STATUS_L32_VALUE_LSB)
+#define MAC_PCU_TSF2_STATUS_L32_VALUE_SET(x) (((x) << MAC_PCU_TSF2_STATUS_L32_VALUE_LSB) & MAC_PCU_TSF2_STATUS_L32_VALUE_MASK)
+
+#define MAC_PCU_TSF2_STATUS_U32_ADDRESS 0x000083b0
+#define MAC_PCU_TSF2_STATUS_U32_OFFSET 0x000003b0
+#define MAC_PCU_TSF2_STATUS_U32_VALUE_MSB 31
+#define MAC_PCU_TSF2_STATUS_U32_VALUE_LSB 0
+#define MAC_PCU_TSF2_STATUS_U32_VALUE_MASK 0xffffffff
+#define MAC_PCU_TSF2_STATUS_U32_VALUE_GET(x) (((x) & MAC_PCU_TSF2_STATUS_U32_VALUE_MASK) >> MAC_PCU_TSF2_STATUS_U32_VALUE_LSB)
+#define MAC_PCU_TSF2_STATUS_U32_VALUE_SET(x) (((x) << MAC_PCU_TSF2_STATUS_U32_VALUE_LSB) & MAC_PCU_TSF2_STATUS_U32_VALUE_MASK)
+
+#define MAC_PCU_TXBUF_BA_ADDRESS 0x00008400
+#define MAC_PCU_TXBUF_BA_OFFSET 0x00000400
+#define MAC_PCU_TXBUF_BA_DATA_MSB 31
+#define MAC_PCU_TXBUF_BA_DATA_LSB 0
+#define MAC_PCU_TXBUF_BA_DATA_MASK 0xffffffff
+#define MAC_PCU_TXBUF_BA_DATA_GET(x) (((x) & MAC_PCU_TXBUF_BA_DATA_MASK) >> MAC_PCU_TXBUF_BA_DATA_LSB)
+#define MAC_PCU_TXBUF_BA_DATA_SET(x) (((x) << MAC_PCU_TXBUF_BA_DATA_LSB) & MAC_PCU_TXBUF_BA_DATA_MASK)
+
+#define MAC_PCU_KEY_CACHE_1_ADDRESS 0x00008800
+#define MAC_PCU_KEY_CACHE_1_OFFSET 0x00000800
+#define MAC_PCU_KEY_CACHE_1_DATA_MSB 31
+#define MAC_PCU_KEY_CACHE_1_DATA_LSB 0
+#define MAC_PCU_KEY_CACHE_1_DATA_MASK 0xffffffff
+#define MAC_PCU_KEY_CACHE_1_DATA_GET(x) (((x) & MAC_PCU_KEY_CACHE_1_DATA_MASK) >> MAC_PCU_KEY_CACHE_1_DATA_LSB)
+#define MAC_PCU_KEY_CACHE_1_DATA_SET(x) (((x) << MAC_PCU_KEY_CACHE_1_DATA_LSB) & MAC_PCU_KEY_CACHE_1_DATA_MASK)
+
+#define MAC_PCU_BASEBAND_0_ADDRESS 0x00009800
+#define MAC_PCU_BASEBAND_0_OFFSET 0x00001800
+#define MAC_PCU_BASEBAND_0_DATA_MSB 31
+#define MAC_PCU_BASEBAND_0_DATA_LSB 0
+#define MAC_PCU_BASEBAND_0_DATA_MASK 0xffffffff
+#define MAC_PCU_BASEBAND_0_DATA_GET(x) (((x) & MAC_PCU_BASEBAND_0_DATA_MASK) >> MAC_PCU_BASEBAND_0_DATA_LSB)
+#define MAC_PCU_BASEBAND_0_DATA_SET(x) (((x) << MAC_PCU_BASEBAND_0_DATA_LSB) & MAC_PCU_BASEBAND_0_DATA_MASK)
+
+#define MAC_PCU_BASEBAND_1_ADDRESS 0x0000a000
+#define MAC_PCU_BASEBAND_1_OFFSET 0x00002000
+#define MAC_PCU_BASEBAND_1_DATA_MSB 31
+#define MAC_PCU_BASEBAND_1_DATA_LSB 0
+#define MAC_PCU_BASEBAND_1_DATA_MASK 0xffffffff
+#define MAC_PCU_BASEBAND_1_DATA_GET(x) (((x) & MAC_PCU_BASEBAND_1_DATA_MASK) >> MAC_PCU_BASEBAND_1_DATA_LSB)
+#define MAC_PCU_BASEBAND_1_DATA_SET(x) (((x) << MAC_PCU_BASEBAND_1_DATA_LSB) & MAC_PCU_BASEBAND_1_DATA_MASK)
+
+#define MAC_PCU_BASEBAND_2_ADDRESS 0x0000c000
+#define MAC_PCU_BASEBAND_2_OFFSET 0x00004000
+#define MAC_PCU_BASEBAND_2_DATA_MSB 31
+#define MAC_PCU_BASEBAND_2_DATA_LSB 0
+#define MAC_PCU_BASEBAND_2_DATA_MASK 0xffffffff
+#define MAC_PCU_BASEBAND_2_DATA_GET(x) (((x) & MAC_PCU_BASEBAND_2_DATA_MASK) >> MAC_PCU_BASEBAND_2_DATA_LSB)
+#define MAC_PCU_BASEBAND_2_DATA_SET(x) (((x) << MAC_PCU_BASEBAND_2_DATA_LSB) & MAC_PCU_BASEBAND_2_DATA_MASK)
+
+#define MAC_PCU_BASEBAND_3_ADDRESS 0x0000d000
+#define MAC_PCU_BASEBAND_3_OFFSET 0x00005000
+#define MAC_PCU_BASEBAND_3_DATA_MSB 31
+#define MAC_PCU_BASEBAND_3_DATA_LSB 0
+#define MAC_PCU_BASEBAND_3_DATA_MASK 0xffffffff
+#define MAC_PCU_BASEBAND_3_DATA_GET(x) (((x) & MAC_PCU_BASEBAND_3_DATA_MASK) >> MAC_PCU_BASEBAND_3_DATA_LSB)
+#define MAC_PCU_BASEBAND_3_DATA_SET(x) (((x) << MAC_PCU_BASEBAND_3_DATA_LSB) & MAC_PCU_BASEBAND_3_DATA_MASK)
+
+#define MAC_PCU_BUF_ADDRESS 0x0000e000
+#define MAC_PCU_BUF_OFFSET 0x00006000
+#define MAC_PCU_BUF_DATA_MSB 31
+#define MAC_PCU_BUF_DATA_LSB 0
+#define MAC_PCU_BUF_DATA_MASK 0xffffffff
+#define MAC_PCU_BUF_DATA_GET(x) (((x) & MAC_PCU_BUF_DATA_MASK) >> MAC_PCU_BUF_DATA_LSB)
+#define MAC_PCU_BUF_DATA_SET(x) (((x) << MAC_PCU_BUF_DATA_LSB) & MAC_PCU_BUF_DATA_MASK)
+
+
+#ifndef __ASSEMBLER__
+
+typedef struct mac_pcu_reg_s {
+ volatile unsigned int mac_pcu_sta_addr_l32;
+ volatile unsigned int mac_pcu_sta_addr_u16;
+ volatile unsigned int mac_pcu_bssid_l32;
+ volatile unsigned int mac_pcu_bssid_u16;
+ volatile unsigned int mac_pcu_bcn_rssi_ave;
+ volatile unsigned int mac_pcu_ack_cts_timeout;
+ volatile unsigned int mac_pcu_bcn_rssi_ctl;
+ volatile unsigned int mac_pcu_usec_latency;
+ volatile unsigned int pcu_max_cfp_dur;
+ volatile unsigned int mac_pcu_rx_filter;
+ volatile unsigned int mac_pcu_mcast_filter_l32;
+ volatile unsigned int mac_pcu_mcast_filter_u32;
+ volatile unsigned int mac_pcu_diag_sw;
+ volatile unsigned int mac_pcu_tst_addac;
+ volatile unsigned int mac_pcu_def_antenna;
+ volatile unsigned int mac_pcu_aes_mute_mask_0;
+ volatile unsigned int mac_pcu_aes_mute_mask_1;
+ volatile unsigned int mac_pcu_gated_clks;
+ volatile unsigned int mac_pcu_obs_bus_2;
+ volatile unsigned int mac_pcu_obs_bus_1;
+ volatile unsigned int mac_pcu_dym_mimo_pwr_save;
+ volatile unsigned int mac_pcu_last_beacon_tsf;
+ volatile unsigned int mac_pcu_nav;
+ volatile unsigned int mac_pcu_rts_success_cnt;
+ volatile unsigned int mac_pcu_rts_fail_cnt;
+ volatile unsigned int mac_pcu_ack_fail_cnt;
+ volatile unsigned int mac_pcu_fcs_fail_cnt;
+ volatile unsigned int mac_pcu_beacon_cnt;
+ volatile unsigned int mac_pcu_xrmode;
+ volatile unsigned int mac_pcu_xrdel;
+ volatile unsigned int mac_pcu_xrto;
+ volatile unsigned int mac_pcu_xrcrp;
+ volatile unsigned int mac_pcu_xrstmp;
+ volatile unsigned int mac_pcu_addr1_mask_l32;
+ volatile unsigned int mac_pcu_addr1_mask_u16;
+ volatile unsigned int mac_pcu_tpc;
+ volatile unsigned int mac_pcu_tx_frame_cnt;
+ volatile unsigned int mac_pcu_rx_frame_cnt;
+ volatile unsigned int mac_pcu_rx_clear_cnt;
+ volatile unsigned int mac_pcu_cycle_cnt;
+ volatile unsigned int mac_pcu_quiet_time_1;
+ volatile unsigned int mac_pcu_quiet_time_2;
+ volatile unsigned int mac_pcu_qos_no_ack;
+ volatile unsigned int mac_pcu_phy_error_mask;
+ volatile unsigned int mac_pcu_xrlat;
+ volatile unsigned int mac_pcu_rxbuf;
+ volatile unsigned int mac_pcu_mic_qos_control;
+ volatile unsigned int mac_pcu_mic_qos_select;
+ volatile unsigned int mac_pcu_misc_mode;
+ volatile unsigned int mac_pcu_filter_ofdm_cnt;
+ volatile unsigned int mac_pcu_filter_cck_cnt;
+ volatile unsigned int mac_pcu_phy_err_cnt_1;
+ volatile unsigned int mac_pcu_phy_err_cnt_1_mask;
+ volatile unsigned int mac_pcu_phy_err_cnt_2;
+ volatile unsigned int mac_pcu_phy_err_cnt_2_mask;
+ volatile unsigned int mac_pcu_tsf_threshold;
+ volatile unsigned int mac_pcu_phy_error_eifs_mask;
+ volatile unsigned int mac_pcu_phy_err_cnt_3;
+ volatile unsigned int mac_pcu_phy_err_cnt_3_mask;
+ volatile unsigned int mac_pcu_bluetooth_mode;
+ volatile unsigned int mac_pcu_bluetooth_weights;
+ volatile unsigned int mac_pcu_bluetooth_mode2;
+ volatile unsigned int mac_pcu_txsifs;
+ volatile unsigned int mac_pcu_txop_x;
+ volatile unsigned int mac_pcu_txop_0_3;
+ volatile unsigned int mac_pcu_txop_4_7;
+ volatile unsigned int mac_pcu_txop_8_11;
+ volatile unsigned int mac_pcu_txop_12_15;
+ volatile unsigned int mac_pcu_logic_analyzer;
+ volatile unsigned int mac_pcu_logic_analyzer_32l;
+ volatile unsigned int mac_pcu_logic_analyzer_16u;
+ volatile unsigned int mac_pcu_phy_err_cnt_mask_cont;
+ volatile unsigned int mac_pcu_azimuth_mode;
+ volatile unsigned int mac_pcu_20_40_mode;
+ volatile unsigned int mac_pcu_rx_clear_diff_cnt;
+ volatile unsigned int mac_pcu_self_gen_antenna_mask;
+ volatile unsigned int mac_pcu_ba_bar_control;
+ volatile unsigned int mac_pcu_legacy_plcp_spoof;
+ volatile unsigned int mac_pcu_phy_error_mask_cont;
+ volatile unsigned int mac_pcu_tx_timer;
+ volatile unsigned int mac_pcu_txbuf_ctrl;
+ volatile unsigned int mac_pcu_misc_mode2;
+ volatile unsigned int mac_pcu_alt_aes_mute_mask;
+ volatile unsigned int mac_pcu_azimuth_time_stamp;
+ volatile unsigned int mac_pcu_max_cfp_dur;
+ volatile unsigned int mac_pcu_hcf_timeout;
+ volatile unsigned int mac_pcu_bluetooth_weights2;
+ volatile unsigned int mac_pcu_bluetooth_tsf_bt_active;
+ volatile unsigned int mac_pcu_bluetooth_tsf_bt_priority;
+ volatile unsigned int mac_pcu_bluetooth_mode3;
+ volatile unsigned int mac_pcu_bluetooth_mode4;
+ unsigned char pad0[148]; /* pad to 0x200 */
+ volatile unsigned int mac_pcu_bt_bt[64];
+ volatile unsigned int mac_pcu_bt_bt_async;
+ volatile unsigned int mac_pcu_bt_wl_1;
+ volatile unsigned int mac_pcu_bt_wl_2;
+ volatile unsigned int mac_pcu_bt_wl_3;
+ volatile unsigned int mac_pcu_bt_wl_4;
+ volatile unsigned int mac_pcu_coex_epta;
+ volatile unsigned int mac_pcu_coex_lnamaxgain1;
+ volatile unsigned int mac_pcu_coex_lnamaxgain2;
+ volatile unsigned int mac_pcu_coex_lnamaxgain3;
+ volatile unsigned int mac_pcu_coex_lnamaxgain4;
+ volatile unsigned int mac_pcu_basic_rate_set0;
+ volatile unsigned int mac_pcu_basic_rate_set1;
+ volatile unsigned int mac_pcu_basic_rate_set2;
+ volatile unsigned int mac_pcu_basic_rate_set3;
+ volatile unsigned int mac_pcu_rx_int_status0;
+ volatile unsigned int mac_pcu_rx_int_status1;
+ volatile unsigned int mac_pcu_rx_int_status2;
+ volatile unsigned int mac_pcu_rx_int_status3;
+ volatile unsigned int ht_half_gi_rate1;
+ volatile unsigned int ht_half_gi_rate2;
+ volatile unsigned int ht_full_gi_rate1;
+ volatile unsigned int ht_full_gi_rate2;
+ volatile unsigned int legacy_rate1;
+ volatile unsigned int legacy_rate2;
+ volatile unsigned int legacy_rate3;
+ volatile unsigned int rx_int_filter;
+ volatile unsigned int rx_int_overflow;
+ volatile unsigned int rx_filter_thresh;
+ volatile unsigned int rx_filter_thresh1;
+ volatile unsigned int rx_priority_thresh0;
+ volatile unsigned int rx_priority_thresh1;
+ volatile unsigned int rx_priority_thresh2;
+ volatile unsigned int rx_priority_thresh3;
+ volatile unsigned int rx_priority_offset0;
+ volatile unsigned int rx_priority_offset1;
+ volatile unsigned int rx_priority_offset2;
+ volatile unsigned int rx_priority_offset3;
+ volatile unsigned int rx_priority_offset4;
+ volatile unsigned int rx_priority_offset5;
+ volatile unsigned int mac_pcu_bssid2_l32;
+ volatile unsigned int mac_pcu_bssid2_u16;
+ volatile unsigned int mac_pcu_tsf1_status_l32;
+ volatile unsigned int mac_pcu_tsf1_status_u32;
+ volatile unsigned int mac_pcu_tsf2_status_l32;
+ volatile unsigned int mac_pcu_tsf2_status_u32;
+ unsigned char pad1[76]; /* pad to 0x400 */
+ volatile unsigned int mac_pcu_txbuf_ba[64];
+ unsigned char pad2[768]; /* pad to 0x800 */
+ volatile unsigned int mac_pcu_key_cache_1[256];
+ unsigned char pad3[3072]; /* pad to 0x1800 */
+ volatile unsigned int mac_pcu_baseband_0[512];
+ volatile unsigned int mac_pcu_baseband_1[2048];
+ volatile unsigned int mac_pcu_baseband_2[1024];
+ volatile unsigned int mac_pcu_baseband_3[1024];
+ volatile unsigned int mac_pcu_buf[512];
+} mac_pcu_reg_t;
+
+#endif /* __ASSEMBLER__ */
+
+#endif /* _MAC_PCU_H_ */
diff --git a/drivers/staging/ath6kl/include/common/AR6002/hw4.0/hw/mbox_host_reg.h b/drivers/staging/ath6kl/include/common/AR6002/hw4.0/hw/mbox_host_reg.h
new file mode 100644
index 000000000000..3af562156f6e
--- /dev/null
+++ b/drivers/staging/ath6kl/include/common/AR6002/hw4.0/hw/mbox_host_reg.h
@@ -0,0 +1,37 @@
+// ------------------------------------------------------------------
+// Copyright (c) 2004-2010 Atheros Corporation. All rights reserved.
+//
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+//
+//
+// ------------------------------------------------------------------
+//===================================================================
+// Author(s): ="Atheros"
+//===================================================================
+
+
+#ifdef WLAN_HEADERS
+
+#include "mbox_wlan_host_reg.h"
+
+
+#ifndef BT_HEADERS
+
+
+
+#endif
+#endif
+
+
+
diff --git a/drivers/staging/ath6kl/include/common/AR6002/hw4.0/hw/mbox_reg.h b/drivers/staging/ath6kl/include/common/AR6002/hw4.0/hw/mbox_reg.h
new file mode 100644
index 000000000000..cc67585e2e8b
--- /dev/null
+++ b/drivers/staging/ath6kl/include/common/AR6002/hw4.0/hw/mbox_reg.h
@@ -0,0 +1,560 @@
+// ------------------------------------------------------------------
+// Copyright (c) 2004-2010 Atheros Corporation. All rights reserved.
+//
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+//
+//
+// ------------------------------------------------------------------
+//===================================================================
+// Author(s): ="Atheros"
+//===================================================================
+
+
+#ifdef WLAN_HEADERS
+
+#include "mbox_wlan_reg.h"
+
+
+#ifndef BT_HEADERS
+
+#define MBOX_FIFO_ADDRESS WLAN_MBOX_FIFO_ADDRESS
+#define MBOX_FIFO_OFFSET WLAN_MBOX_FIFO_OFFSET
+#define MBOX_FIFO_DATA_MSB WLAN_MBOX_FIFO_DATA_MSB
+#define MBOX_FIFO_DATA_LSB WLAN_MBOX_FIFO_DATA_LSB
+#define MBOX_FIFO_DATA_MASK WLAN_MBOX_FIFO_DATA_MASK
+#define MBOX_FIFO_DATA_GET(x) WLAN_MBOX_FIFO_DATA_GET(x)
+#define MBOX_FIFO_DATA_SET(x) WLAN_MBOX_FIFO_DATA_SET(x)
+#define MBOX_FIFO_STATUS_ADDRESS WLAN_MBOX_FIFO_STATUS_ADDRESS
+#define MBOX_FIFO_STATUS_OFFSET WLAN_MBOX_FIFO_STATUS_OFFSET
+#define MBOX_FIFO_STATUS_EMPTY_MSB WLAN_MBOX_FIFO_STATUS_EMPTY_MSB
+#define MBOX_FIFO_STATUS_EMPTY_LSB WLAN_MBOX_FIFO_STATUS_EMPTY_LSB
+#define MBOX_FIFO_STATUS_EMPTY_MASK WLAN_MBOX_FIFO_STATUS_EMPTY_MASK
+#define MBOX_FIFO_STATUS_EMPTY_GET(x) WLAN_MBOX_FIFO_STATUS_EMPTY_GET(x)
+#define MBOX_FIFO_STATUS_EMPTY_SET(x) WLAN_MBOX_FIFO_STATUS_EMPTY_SET(x)
+#define MBOX_FIFO_STATUS_FULL_MSB WLAN_MBOX_FIFO_STATUS_FULL_MSB
+#define MBOX_FIFO_STATUS_FULL_LSB WLAN_MBOX_FIFO_STATUS_FULL_LSB
+#define MBOX_FIFO_STATUS_FULL_MASK WLAN_MBOX_FIFO_STATUS_FULL_MASK
+#define MBOX_FIFO_STATUS_FULL_GET(x) WLAN_MBOX_FIFO_STATUS_FULL_GET(x)
+#define MBOX_FIFO_STATUS_FULL_SET(x) WLAN_MBOX_FIFO_STATUS_FULL_SET(x)
+#define MBOX_DMA_POLICY_ADDRESS WLAN_MBOX_DMA_POLICY_ADDRESS
+#define MBOX_DMA_POLICY_OFFSET WLAN_MBOX_DMA_POLICY_OFFSET
+#define MBOX_DMA_POLICY_TX_QUANTUM_MSB WLAN_MBOX_DMA_POLICY_TX_QUANTUM_MSB
+#define MBOX_DMA_POLICY_TX_QUANTUM_LSB WLAN_MBOX_DMA_POLICY_TX_QUANTUM_LSB
+#define MBOX_DMA_POLICY_TX_QUANTUM_MASK WLAN_MBOX_DMA_POLICY_TX_QUANTUM_MASK
+#define MBOX_DMA_POLICY_TX_QUANTUM_GET(x) WLAN_MBOX_DMA_POLICY_TX_QUANTUM_GET(x)
+#define MBOX_DMA_POLICY_TX_QUANTUM_SET(x) WLAN_MBOX_DMA_POLICY_TX_QUANTUM_SET(x)
+#define MBOX_DMA_POLICY_TX_ORDER_MSB WLAN_MBOX_DMA_POLICY_TX_ORDER_MSB
+#define MBOX_DMA_POLICY_TX_ORDER_LSB WLAN_MBOX_DMA_POLICY_TX_ORDER_LSB
+#define MBOX_DMA_POLICY_TX_ORDER_MASK WLAN_MBOX_DMA_POLICY_TX_ORDER_MASK
+#define MBOX_DMA_POLICY_TX_ORDER_GET(x) WLAN_MBOX_DMA_POLICY_TX_ORDER_GET(x)
+#define MBOX_DMA_POLICY_TX_ORDER_SET(x) WLAN_MBOX_DMA_POLICY_TX_ORDER_SET(x)
+#define MBOX_DMA_POLICY_RX_QUANTUM_MSB WLAN_MBOX_DMA_POLICY_RX_QUANTUM_MSB
+#define MBOX_DMA_POLICY_RX_QUANTUM_LSB WLAN_MBOX_DMA_POLICY_RX_QUANTUM_LSB
+#define MBOX_DMA_POLICY_RX_QUANTUM_MASK WLAN_MBOX_DMA_POLICY_RX_QUANTUM_MASK
+#define MBOX_DMA_POLICY_RX_QUANTUM_GET(x) WLAN_MBOX_DMA_POLICY_RX_QUANTUM_GET(x)
+#define MBOX_DMA_POLICY_RX_QUANTUM_SET(x) WLAN_MBOX_DMA_POLICY_RX_QUANTUM_SET(x)
+#define MBOX_DMA_POLICY_RX_ORDER_MSB WLAN_MBOX_DMA_POLICY_RX_ORDER_MSB
+#define MBOX_DMA_POLICY_RX_ORDER_LSB WLAN_MBOX_DMA_POLICY_RX_ORDER_LSB
+#define MBOX_DMA_POLICY_RX_ORDER_MASK WLAN_MBOX_DMA_POLICY_RX_ORDER_MASK
+#define MBOX_DMA_POLICY_RX_ORDER_GET(x) WLAN_MBOX_DMA_POLICY_RX_ORDER_GET(x)
+#define MBOX_DMA_POLICY_RX_ORDER_SET(x) WLAN_MBOX_DMA_POLICY_RX_ORDER_SET(x)
+#define MBOX0_DMA_RX_DESCRIPTOR_BASE_ADDRESS WLAN_MBOX0_DMA_RX_DESCRIPTOR_BASE_ADDRESS
+#define MBOX0_DMA_RX_DESCRIPTOR_BASE_OFFSET WLAN_MBOX0_DMA_RX_DESCRIPTOR_BASE_OFFSET
+#define MBOX0_DMA_RX_DESCRIPTOR_BASE_ADDRESS_MSB WLAN_MBOX0_DMA_RX_DESCRIPTOR_BASE_ADDRESS_MSB
+#define MBOX0_DMA_RX_DESCRIPTOR_BASE_ADDRESS_LSB WLAN_MBOX0_DMA_RX_DESCRIPTOR_BASE_ADDRESS_LSB
+#define MBOX0_DMA_RX_DESCRIPTOR_BASE_ADDRESS_MASK WLAN_MBOX0_DMA_RX_DESCRIPTOR_BASE_ADDRESS_MASK
+#define MBOX0_DMA_RX_DESCRIPTOR_BASE_ADDRESS_GET(x) WLAN_MBOX0_DMA_RX_DESCRIPTOR_BASE_ADDRESS_GET(x)
+#define MBOX0_DMA_RX_DESCRIPTOR_BASE_ADDRESS_SET(x) WLAN_MBOX0_DMA_RX_DESCRIPTOR_BASE_ADDRESS_SET(x)
+#define MBOX0_DMA_RX_CONTROL_ADDRESS WLAN_MBOX0_DMA_RX_CONTROL_ADDRESS
+#define MBOX0_DMA_RX_CONTROL_OFFSET WLAN_MBOX0_DMA_RX_CONTROL_OFFSET
+#define MBOX0_DMA_RX_CONTROL_RESUME_MSB WLAN_MBOX0_DMA_RX_CONTROL_RESUME_MSB
+#define MBOX0_DMA_RX_CONTROL_RESUME_LSB WLAN_MBOX0_DMA_RX_CONTROL_RESUME_LSB
+#define MBOX0_DMA_RX_CONTROL_RESUME_MASK WLAN_MBOX0_DMA_RX_CONTROL_RESUME_MASK
+#define MBOX0_DMA_RX_CONTROL_RESUME_GET(x) WLAN_MBOX0_DMA_RX_CONTROL_RESUME_GET(x)
+#define MBOX0_DMA_RX_CONTROL_RESUME_SET(x) WLAN_MBOX0_DMA_RX_CONTROL_RESUME_SET(x)
+#define MBOX0_DMA_RX_CONTROL_START_MSB WLAN_MBOX0_DMA_RX_CONTROL_START_MSB
+#define MBOX0_DMA_RX_CONTROL_START_LSB WLAN_MBOX0_DMA_RX_CONTROL_START_LSB
+#define MBOX0_DMA_RX_CONTROL_START_MASK WLAN_MBOX0_DMA_RX_CONTROL_START_MASK
+#define MBOX0_DMA_RX_CONTROL_START_GET(x) WLAN_MBOX0_DMA_RX_CONTROL_START_GET(x)
+#define MBOX0_DMA_RX_CONTROL_START_SET(x) WLAN_MBOX0_DMA_RX_CONTROL_START_SET(x)
+#define MBOX0_DMA_RX_CONTROL_STOP_MSB WLAN_MBOX0_DMA_RX_CONTROL_STOP_MSB
+#define MBOX0_DMA_RX_CONTROL_STOP_LSB WLAN_MBOX0_DMA_RX_CONTROL_STOP_LSB
+#define MBOX0_DMA_RX_CONTROL_STOP_MASK WLAN_MBOX0_DMA_RX_CONTROL_STOP_MASK
+#define MBOX0_DMA_RX_CONTROL_STOP_GET(x) WLAN_MBOX0_DMA_RX_CONTROL_STOP_GET(x)
+#define MBOX0_DMA_RX_CONTROL_STOP_SET(x) WLAN_MBOX0_DMA_RX_CONTROL_STOP_SET(x)
+#define MBOX0_DMA_TX_DESCRIPTOR_BASE_ADDRESS WLAN_MBOX0_DMA_TX_DESCRIPTOR_BASE_ADDRESS
+#define MBOX0_DMA_TX_DESCRIPTOR_BASE_OFFSET WLAN_MBOX0_DMA_TX_DESCRIPTOR_BASE_OFFSET
+#define MBOX0_DMA_TX_DESCRIPTOR_BASE_ADDRESS_MSB WLAN_MBOX0_DMA_TX_DESCRIPTOR_BASE_ADDRESS_MSB
+#define MBOX0_DMA_TX_DESCRIPTOR_BASE_ADDRESS_LSB WLAN_MBOX0_DMA_TX_DESCRIPTOR_BASE_ADDRESS_LSB
+#define MBOX0_DMA_TX_DESCRIPTOR_BASE_ADDRESS_MASK WLAN_MBOX0_DMA_TX_DESCRIPTOR_BASE_ADDRESS_MASK
+#define MBOX0_DMA_TX_DESCRIPTOR_BASE_ADDRESS_GET(x) WLAN_MBOX0_DMA_TX_DESCRIPTOR_BASE_ADDRESS_GET(x)
+#define MBOX0_DMA_TX_DESCRIPTOR_BASE_ADDRESS_SET(x) WLAN_MBOX0_DMA_TX_DESCRIPTOR_BASE_ADDRESS_SET(x)
+#define MBOX0_DMA_TX_CONTROL_ADDRESS WLAN_MBOX0_DMA_TX_CONTROL_ADDRESS
+#define MBOX0_DMA_TX_CONTROL_OFFSET WLAN_MBOX0_DMA_TX_CONTROL_OFFSET
+#define MBOX0_DMA_TX_CONTROL_RESUME_MSB WLAN_MBOX0_DMA_TX_CONTROL_RESUME_MSB
+#define MBOX0_DMA_TX_CONTROL_RESUME_LSB WLAN_MBOX0_DMA_TX_CONTROL_RESUME_LSB
+#define MBOX0_DMA_TX_CONTROL_RESUME_MASK WLAN_MBOX0_DMA_TX_CONTROL_RESUME_MASK
+#define MBOX0_DMA_TX_CONTROL_RESUME_GET(x) WLAN_MBOX0_DMA_TX_CONTROL_RESUME_GET(x)
+#define MBOX0_DMA_TX_CONTROL_RESUME_SET(x) WLAN_MBOX0_DMA_TX_CONTROL_RESUME_SET(x)
+#define MBOX0_DMA_TX_CONTROL_START_MSB WLAN_MBOX0_DMA_TX_CONTROL_START_MSB
+#define MBOX0_DMA_TX_CONTROL_START_LSB WLAN_MBOX0_DMA_TX_CONTROL_START_LSB
+#define MBOX0_DMA_TX_CONTROL_START_MASK WLAN_MBOX0_DMA_TX_CONTROL_START_MASK
+#define MBOX0_DMA_TX_CONTROL_START_GET(x) WLAN_MBOX0_DMA_TX_CONTROL_START_GET(x)
+#define MBOX0_DMA_TX_CONTROL_START_SET(x) WLAN_MBOX0_DMA_TX_CONTROL_START_SET(x)
+#define MBOX0_DMA_TX_CONTROL_STOP_MSB WLAN_MBOX0_DMA_TX_CONTROL_STOP_MSB
+#define MBOX0_DMA_TX_CONTROL_STOP_LSB WLAN_MBOX0_DMA_TX_CONTROL_STOP_LSB
+#define MBOX0_DMA_TX_CONTROL_STOP_MASK WLAN_MBOX0_DMA_TX_CONTROL_STOP_MASK
+#define MBOX0_DMA_TX_CONTROL_STOP_GET(x) WLAN_MBOX0_DMA_TX_CONTROL_STOP_GET(x)
+#define MBOX0_DMA_TX_CONTROL_STOP_SET(x) WLAN_MBOX0_DMA_TX_CONTROL_STOP_SET(x)
+#define MBOX1_DMA_RX_DESCRIPTOR_BASE_ADDRESS WLAN_MBOX1_DMA_RX_DESCRIPTOR_BASE_ADDRESS
+#define MBOX1_DMA_RX_DESCRIPTOR_BASE_OFFSET WLAN_MBOX1_DMA_RX_DESCRIPTOR_BASE_OFFSET
+#define MBOX1_DMA_RX_DESCRIPTOR_BASE_ADDRESS_MSB WLAN_MBOX1_DMA_RX_DESCRIPTOR_BASE_ADDRESS_MSB
+#define MBOX1_DMA_RX_DESCRIPTOR_BASE_ADDRESS_LSB WLAN_MBOX1_DMA_RX_DESCRIPTOR_BASE_ADDRESS_LSB
+#define MBOX1_DMA_RX_DESCRIPTOR_BASE_ADDRESS_MASK WLAN_MBOX1_DMA_RX_DESCRIPTOR_BASE_ADDRESS_MASK
+#define MBOX1_DMA_RX_DESCRIPTOR_BASE_ADDRESS_GET(x) WLAN_MBOX1_DMA_RX_DESCRIPTOR_BASE_ADDRESS_GET(x)
+#define MBOX1_DMA_RX_DESCRIPTOR_BASE_ADDRESS_SET(x) WLAN_MBOX1_DMA_RX_DESCRIPTOR_BASE_ADDRESS_SET(x)
+#define MBOX1_DMA_RX_CONTROL_ADDRESS WLAN_MBOX1_DMA_RX_CONTROL_ADDRESS
+#define MBOX1_DMA_RX_CONTROL_OFFSET WLAN_MBOX1_DMA_RX_CONTROL_OFFSET
+#define MBOX1_DMA_RX_CONTROL_RESUME_MSB WLAN_MBOX1_DMA_RX_CONTROL_RESUME_MSB
+#define MBOX1_DMA_RX_CONTROL_RESUME_LSB WLAN_MBOX1_DMA_RX_CONTROL_RESUME_LSB
+#define MBOX1_DMA_RX_CONTROL_RESUME_MASK WLAN_MBOX1_DMA_RX_CONTROL_RESUME_MASK
+#define MBOX1_DMA_RX_CONTROL_RESUME_GET(x) WLAN_MBOX1_DMA_RX_CONTROL_RESUME_GET(x)
+#define MBOX1_DMA_RX_CONTROL_RESUME_SET(x) WLAN_MBOX1_DMA_RX_CONTROL_RESUME_SET(x)
+#define MBOX1_DMA_RX_CONTROL_START_MSB WLAN_MBOX1_DMA_RX_CONTROL_START_MSB
+#define MBOX1_DMA_RX_CONTROL_START_LSB WLAN_MBOX1_DMA_RX_CONTROL_START_LSB
+#define MBOX1_DMA_RX_CONTROL_START_MASK WLAN_MBOX1_DMA_RX_CONTROL_START_MASK
+#define MBOX1_DMA_RX_CONTROL_START_GET(x) WLAN_MBOX1_DMA_RX_CONTROL_START_GET(x)
+#define MBOX1_DMA_RX_CONTROL_START_SET(x) WLAN_MBOX1_DMA_RX_CONTROL_START_SET(x)
+#define MBOX1_DMA_RX_CONTROL_STOP_MSB WLAN_MBOX1_DMA_RX_CONTROL_STOP_MSB
+#define MBOX1_DMA_RX_CONTROL_STOP_LSB WLAN_MBOX1_DMA_RX_CONTROL_STOP_LSB
+#define MBOX1_DMA_RX_CONTROL_STOP_MASK WLAN_MBOX1_DMA_RX_CONTROL_STOP_MASK
+#define MBOX1_DMA_RX_CONTROL_STOP_GET(x) WLAN_MBOX1_DMA_RX_CONTROL_STOP_GET(x)
+#define MBOX1_DMA_RX_CONTROL_STOP_SET(x) WLAN_MBOX1_DMA_RX_CONTROL_STOP_SET(x)
+#define MBOX1_DMA_TX_DESCRIPTOR_BASE_ADDRESS WLAN_MBOX1_DMA_TX_DESCRIPTOR_BASE_ADDRESS
+#define MBOX1_DMA_TX_DESCRIPTOR_BASE_OFFSET WLAN_MBOX1_DMA_TX_DESCRIPTOR_BASE_OFFSET
+#define MBOX1_DMA_TX_DESCRIPTOR_BASE_ADDRESS_MSB WLAN_MBOX1_DMA_TX_DESCRIPTOR_BASE_ADDRESS_MSB
+#define MBOX1_DMA_TX_DESCRIPTOR_BASE_ADDRESS_LSB WLAN_MBOX1_DMA_TX_DESCRIPTOR_BASE_ADDRESS_LSB
+#define MBOX1_DMA_TX_DESCRIPTOR_BASE_ADDRESS_MASK WLAN_MBOX1_DMA_TX_DESCRIPTOR_BASE_ADDRESS_MASK
+#define MBOX1_DMA_TX_DESCRIPTOR_BASE_ADDRESS_GET(x) WLAN_MBOX1_DMA_TX_DESCRIPTOR_BASE_ADDRESS_GET(x)
+#define MBOX1_DMA_TX_DESCRIPTOR_BASE_ADDRESS_SET(x) WLAN_MBOX1_DMA_TX_DESCRIPTOR_BASE_ADDRESS_SET(x)
+#define MBOX1_DMA_TX_CONTROL_ADDRESS WLAN_MBOX1_DMA_TX_CONTROL_ADDRESS
+#define MBOX1_DMA_TX_CONTROL_OFFSET WLAN_MBOX1_DMA_TX_CONTROL_OFFSET
+#define MBOX1_DMA_TX_CONTROL_RESUME_MSB WLAN_MBOX1_DMA_TX_CONTROL_RESUME_MSB
+#define MBOX1_DMA_TX_CONTROL_RESUME_LSB WLAN_MBOX1_DMA_TX_CONTROL_RESUME_LSB
+#define MBOX1_DMA_TX_CONTROL_RESUME_MASK WLAN_MBOX1_DMA_TX_CONTROL_RESUME_MASK
+#define MBOX1_DMA_TX_CONTROL_RESUME_GET(x) WLAN_MBOX1_DMA_TX_CONTROL_RESUME_GET(x)
+#define MBOX1_DMA_TX_CONTROL_RESUME_SET(x) WLAN_MBOX1_DMA_TX_CONTROL_RESUME_SET(x)
+#define MBOX1_DMA_TX_CONTROL_START_MSB WLAN_MBOX1_DMA_TX_CONTROL_START_MSB
+#define MBOX1_DMA_TX_CONTROL_START_LSB WLAN_MBOX1_DMA_TX_CONTROL_START_LSB
+#define MBOX1_DMA_TX_CONTROL_START_MASK WLAN_MBOX1_DMA_TX_CONTROL_START_MASK
+#define MBOX1_DMA_TX_CONTROL_START_GET(x) WLAN_MBOX1_DMA_TX_CONTROL_START_GET(x)
+#define MBOX1_DMA_TX_CONTROL_START_SET(x) WLAN_MBOX1_DMA_TX_CONTROL_START_SET(x)
+#define MBOX1_DMA_TX_CONTROL_STOP_MSB WLAN_MBOX1_DMA_TX_CONTROL_STOP_MSB
+#define MBOX1_DMA_TX_CONTROL_STOP_LSB WLAN_MBOX1_DMA_TX_CONTROL_STOP_LSB
+#define MBOX1_DMA_TX_CONTROL_STOP_MASK WLAN_MBOX1_DMA_TX_CONTROL_STOP_MASK
+#define MBOX1_DMA_TX_CONTROL_STOP_GET(x) WLAN_MBOX1_DMA_TX_CONTROL_STOP_GET(x)
+#define MBOX1_DMA_TX_CONTROL_STOP_SET(x) WLAN_MBOX1_DMA_TX_CONTROL_STOP_SET(x)
+#define MBOX2_DMA_RX_DESCRIPTOR_BASE_ADDRESS WLAN_MBOX2_DMA_RX_DESCRIPTOR_BASE_ADDRESS
+#define MBOX2_DMA_RX_DESCRIPTOR_BASE_OFFSET WLAN_MBOX2_DMA_RX_DESCRIPTOR_BASE_OFFSET
+#define MBOX2_DMA_RX_DESCRIPTOR_BASE_ADDRESS_MSB WLAN_MBOX2_DMA_RX_DESCRIPTOR_BASE_ADDRESS_MSB
+#define MBOX2_DMA_RX_DESCRIPTOR_BASE_ADDRESS_LSB WLAN_MBOX2_DMA_RX_DESCRIPTOR_BASE_ADDRESS_LSB
+#define MBOX2_DMA_RX_DESCRIPTOR_BASE_ADDRESS_MASK WLAN_MBOX2_DMA_RX_DESCRIPTOR_BASE_ADDRESS_MASK
+#define MBOX2_DMA_RX_DESCRIPTOR_BASE_ADDRESS_GET(x) WLAN_MBOX2_DMA_RX_DESCRIPTOR_BASE_ADDRESS_GET(x)
+#define MBOX2_DMA_RX_DESCRIPTOR_BASE_ADDRESS_SET(x) WLAN_MBOX2_DMA_RX_DESCRIPTOR_BASE_ADDRESS_SET(x)
+#define MBOX2_DMA_RX_CONTROL_ADDRESS WLAN_MBOX2_DMA_RX_CONTROL_ADDRESS
+#define MBOX2_DMA_RX_CONTROL_OFFSET WLAN_MBOX2_DMA_RX_CONTROL_OFFSET
+#define MBOX2_DMA_RX_CONTROL_RESUME_MSB WLAN_MBOX2_DMA_RX_CONTROL_RESUME_MSB
+#define MBOX2_DMA_RX_CONTROL_RESUME_LSB WLAN_MBOX2_DMA_RX_CONTROL_RESUME_LSB
+#define MBOX2_DMA_RX_CONTROL_RESUME_MASK WLAN_MBOX2_DMA_RX_CONTROL_RESUME_MASK
+#define MBOX2_DMA_RX_CONTROL_RESUME_GET(x) WLAN_MBOX2_DMA_RX_CONTROL_RESUME_GET(x)
+#define MBOX2_DMA_RX_CONTROL_RESUME_SET(x) WLAN_MBOX2_DMA_RX_CONTROL_RESUME_SET(x)
+#define MBOX2_DMA_RX_CONTROL_START_MSB WLAN_MBOX2_DMA_RX_CONTROL_START_MSB
+#define MBOX2_DMA_RX_CONTROL_START_LSB WLAN_MBOX2_DMA_RX_CONTROL_START_LSB
+#define MBOX2_DMA_RX_CONTROL_START_MASK WLAN_MBOX2_DMA_RX_CONTROL_START_MASK
+#define MBOX2_DMA_RX_CONTROL_START_GET(x) WLAN_MBOX2_DMA_RX_CONTROL_START_GET(x)
+#define MBOX2_DMA_RX_CONTROL_START_SET(x) WLAN_MBOX2_DMA_RX_CONTROL_START_SET(x)
+#define MBOX2_DMA_RX_CONTROL_STOP_MSB WLAN_MBOX2_DMA_RX_CONTROL_STOP_MSB
+#define MBOX2_DMA_RX_CONTROL_STOP_LSB WLAN_MBOX2_DMA_RX_CONTROL_STOP_LSB
+#define MBOX2_DMA_RX_CONTROL_STOP_MASK WLAN_MBOX2_DMA_RX_CONTROL_STOP_MASK
+#define MBOX2_DMA_RX_CONTROL_STOP_GET(x) WLAN_MBOX2_DMA_RX_CONTROL_STOP_GET(x)
+#define MBOX2_DMA_RX_CONTROL_STOP_SET(x) WLAN_MBOX2_DMA_RX_CONTROL_STOP_SET(x)
+#define MBOX2_DMA_TX_DESCRIPTOR_BASE_ADDRESS WLAN_MBOX2_DMA_TX_DESCRIPTOR_BASE_ADDRESS
+#define MBOX2_DMA_TX_DESCRIPTOR_BASE_OFFSET WLAN_MBOX2_DMA_TX_DESCRIPTOR_BASE_OFFSET
+#define MBOX2_DMA_TX_DESCRIPTOR_BASE_ADDRESS_MSB WLAN_MBOX2_DMA_TX_DESCRIPTOR_BASE_ADDRESS_MSB
+#define MBOX2_DMA_TX_DESCRIPTOR_BASE_ADDRESS_LSB WLAN_MBOX2_DMA_TX_DESCRIPTOR_BASE_ADDRESS_LSB
+#define MBOX2_DMA_TX_DESCRIPTOR_BASE_ADDRESS_MASK WLAN_MBOX2_DMA_TX_DESCRIPTOR_BASE_ADDRESS_MASK
+#define MBOX2_DMA_TX_DESCRIPTOR_BASE_ADDRESS_GET(x) WLAN_MBOX2_DMA_TX_DESCRIPTOR_BASE_ADDRESS_GET(x)
+#define MBOX2_DMA_TX_DESCRIPTOR_BASE_ADDRESS_SET(x) WLAN_MBOX2_DMA_TX_DESCRIPTOR_BASE_ADDRESS_SET(x)
+#define MBOX2_DMA_TX_CONTROL_ADDRESS WLAN_MBOX2_DMA_TX_CONTROL_ADDRESS
+#define MBOX2_DMA_TX_CONTROL_OFFSET WLAN_MBOX2_DMA_TX_CONTROL_OFFSET
+#define MBOX2_DMA_TX_CONTROL_RESUME_MSB WLAN_MBOX2_DMA_TX_CONTROL_RESUME_MSB
+#define MBOX2_DMA_TX_CONTROL_RESUME_LSB WLAN_MBOX2_DMA_TX_CONTROL_RESUME_LSB
+#define MBOX2_DMA_TX_CONTROL_RESUME_MASK WLAN_MBOX2_DMA_TX_CONTROL_RESUME_MASK
+#define MBOX2_DMA_TX_CONTROL_RESUME_GET(x) WLAN_MBOX2_DMA_TX_CONTROL_RESUME_GET(x)
+#define MBOX2_DMA_TX_CONTROL_RESUME_SET(x) WLAN_MBOX2_DMA_TX_CONTROL_RESUME_SET(x)
+#define MBOX2_DMA_TX_CONTROL_START_MSB WLAN_MBOX2_DMA_TX_CONTROL_START_MSB
+#define MBOX2_DMA_TX_CONTROL_START_LSB WLAN_MBOX2_DMA_TX_CONTROL_START_LSB
+#define MBOX2_DMA_TX_CONTROL_START_MASK WLAN_MBOX2_DMA_TX_CONTROL_START_MASK
+#define MBOX2_DMA_TX_CONTROL_START_GET(x) WLAN_MBOX2_DMA_TX_CONTROL_START_GET(x)
+#define MBOX2_DMA_TX_CONTROL_START_SET(x) WLAN_MBOX2_DMA_TX_CONTROL_START_SET(x)
+#define MBOX2_DMA_TX_CONTROL_STOP_MSB WLAN_MBOX2_DMA_TX_CONTROL_STOP_MSB
+#define MBOX2_DMA_TX_CONTROL_STOP_LSB WLAN_MBOX2_DMA_TX_CONTROL_STOP_LSB
+#define MBOX2_DMA_TX_CONTROL_STOP_MASK WLAN_MBOX2_DMA_TX_CONTROL_STOP_MASK
+#define MBOX2_DMA_TX_CONTROL_STOP_GET(x) WLAN_MBOX2_DMA_TX_CONTROL_STOP_GET(x)
+#define MBOX2_DMA_TX_CONTROL_STOP_SET(x) WLAN_MBOX2_DMA_TX_CONTROL_STOP_SET(x)
+#define MBOX3_DMA_RX_DESCRIPTOR_BASE_ADDRESS WLAN_MBOX3_DMA_RX_DESCRIPTOR_BASE_ADDRESS
+#define MBOX3_DMA_RX_DESCRIPTOR_BASE_OFFSET WLAN_MBOX3_DMA_RX_DESCRIPTOR_BASE_OFFSET
+#define MBOX3_DMA_RX_DESCRIPTOR_BASE_ADDRESS_MSB WLAN_MBOX3_DMA_RX_DESCRIPTOR_BASE_ADDRESS_MSB
+#define MBOX3_DMA_RX_DESCRIPTOR_BASE_ADDRESS_LSB WLAN_MBOX3_DMA_RX_DESCRIPTOR_BASE_ADDRESS_LSB
+#define MBOX3_DMA_RX_DESCRIPTOR_BASE_ADDRESS_MASK WLAN_MBOX3_DMA_RX_DESCRIPTOR_BASE_ADDRESS_MASK
+#define MBOX3_DMA_RX_DESCRIPTOR_BASE_ADDRESS_GET(x) WLAN_MBOX3_DMA_RX_DESCRIPTOR_BASE_ADDRESS_GET(x)
+#define MBOX3_DMA_RX_DESCRIPTOR_BASE_ADDRESS_SET(x) WLAN_MBOX3_DMA_RX_DESCRIPTOR_BASE_ADDRESS_SET(x)
+#define MBOX3_DMA_RX_CONTROL_ADDRESS WLAN_MBOX3_DMA_RX_CONTROL_ADDRESS
+#define MBOX3_DMA_RX_CONTROL_OFFSET WLAN_MBOX3_DMA_RX_CONTROL_OFFSET
+#define MBOX3_DMA_RX_CONTROL_RESUME_MSB WLAN_MBOX3_DMA_RX_CONTROL_RESUME_MSB
+#define MBOX3_DMA_RX_CONTROL_RESUME_LSB WLAN_MBOX3_DMA_RX_CONTROL_RESUME_LSB
+#define MBOX3_DMA_RX_CONTROL_RESUME_MASK WLAN_MBOX3_DMA_RX_CONTROL_RESUME_MASK
+#define MBOX3_DMA_RX_CONTROL_RESUME_GET(x) WLAN_MBOX3_DMA_RX_CONTROL_RESUME_GET(x)
+#define MBOX3_DMA_RX_CONTROL_RESUME_SET(x) WLAN_MBOX3_DMA_RX_CONTROL_RESUME_SET(x)
+#define MBOX3_DMA_RX_CONTROL_START_MSB WLAN_MBOX3_DMA_RX_CONTROL_START_MSB
+#define MBOX3_DMA_RX_CONTROL_START_LSB WLAN_MBOX3_DMA_RX_CONTROL_START_LSB
+#define MBOX3_DMA_RX_CONTROL_START_MASK WLAN_MBOX3_DMA_RX_CONTROL_START_MASK
+#define MBOX3_DMA_RX_CONTROL_START_GET(x) WLAN_MBOX3_DMA_RX_CONTROL_START_GET(x)
+#define MBOX3_DMA_RX_CONTROL_START_SET(x) WLAN_MBOX3_DMA_RX_CONTROL_START_SET(x)
+#define MBOX3_DMA_RX_CONTROL_STOP_MSB WLAN_MBOX3_DMA_RX_CONTROL_STOP_MSB
+#define MBOX3_DMA_RX_CONTROL_STOP_LSB WLAN_MBOX3_DMA_RX_CONTROL_STOP_LSB
+#define MBOX3_DMA_RX_CONTROL_STOP_MASK WLAN_MBOX3_DMA_RX_CONTROL_STOP_MASK
+#define MBOX3_DMA_RX_CONTROL_STOP_GET(x) WLAN_MBOX3_DMA_RX_CONTROL_STOP_GET(x)
+#define MBOX3_DMA_RX_CONTROL_STOP_SET(x) WLAN_MBOX3_DMA_RX_CONTROL_STOP_SET(x)
+#define MBOX3_DMA_TX_DESCRIPTOR_BASE_ADDRESS WLAN_MBOX3_DMA_TX_DESCRIPTOR_BASE_ADDRESS
+#define MBOX3_DMA_TX_DESCRIPTOR_BASE_OFFSET WLAN_MBOX3_DMA_TX_DESCRIPTOR_BASE_OFFSET
+#define MBOX3_DMA_TX_DESCRIPTOR_BASE_ADDRESS_MSB WLAN_MBOX3_DMA_TX_DESCRIPTOR_BASE_ADDRESS_MSB
+#define MBOX3_DMA_TX_DESCRIPTOR_BASE_ADDRESS_LSB WLAN_MBOX3_DMA_TX_DESCRIPTOR_BASE_ADDRESS_LSB
+#define MBOX3_DMA_TX_DESCRIPTOR_BASE_ADDRESS_MASK WLAN_MBOX3_DMA_TX_DESCRIPTOR_BASE_ADDRESS_MASK
+#define MBOX3_DMA_TX_DESCRIPTOR_BASE_ADDRESS_GET(x) WLAN_MBOX3_DMA_TX_DESCRIPTOR_BASE_ADDRESS_GET(x)
+#define MBOX3_DMA_TX_DESCRIPTOR_BASE_ADDRESS_SET(x) WLAN_MBOX3_DMA_TX_DESCRIPTOR_BASE_ADDRESS_SET(x)
+#define MBOX3_DMA_TX_CONTROL_ADDRESS WLAN_MBOX3_DMA_TX_CONTROL_ADDRESS
+#define MBOX3_DMA_TX_CONTROL_OFFSET WLAN_MBOX3_DMA_TX_CONTROL_OFFSET
+#define MBOX3_DMA_TX_CONTROL_RESUME_MSB WLAN_MBOX3_DMA_TX_CONTROL_RESUME_MSB
+#define MBOX3_DMA_TX_CONTROL_RESUME_LSB WLAN_MBOX3_DMA_TX_CONTROL_RESUME_LSB
+#define MBOX3_DMA_TX_CONTROL_RESUME_MASK WLAN_MBOX3_DMA_TX_CONTROL_RESUME_MASK
+#define MBOX3_DMA_TX_CONTROL_RESUME_GET(x) WLAN_MBOX3_DMA_TX_CONTROL_RESUME_GET(x)
+#define MBOX3_DMA_TX_CONTROL_RESUME_SET(x) WLAN_MBOX3_DMA_TX_CONTROL_RESUME_SET(x)
+#define MBOX3_DMA_TX_CONTROL_START_MSB WLAN_MBOX3_DMA_TX_CONTROL_START_MSB
+#define MBOX3_DMA_TX_CONTROL_START_LSB WLAN_MBOX3_DMA_TX_CONTROL_START_LSB
+#define MBOX3_DMA_TX_CONTROL_START_MASK WLAN_MBOX3_DMA_TX_CONTROL_START_MASK
+#define MBOX3_DMA_TX_CONTROL_START_GET(x) WLAN_MBOX3_DMA_TX_CONTROL_START_GET(x)
+#define MBOX3_DMA_TX_CONTROL_START_SET(x) WLAN_MBOX3_DMA_TX_CONTROL_START_SET(x)
+#define MBOX3_DMA_TX_CONTROL_STOP_MSB WLAN_MBOX3_DMA_TX_CONTROL_STOP_MSB
+#define MBOX3_DMA_TX_CONTROL_STOP_LSB WLAN_MBOX3_DMA_TX_CONTROL_STOP_LSB
+#define MBOX3_DMA_TX_CONTROL_STOP_MASK WLAN_MBOX3_DMA_TX_CONTROL_STOP_MASK
+#define MBOX3_DMA_TX_CONTROL_STOP_GET(x) WLAN_MBOX3_DMA_TX_CONTROL_STOP_GET(x)
+#define MBOX3_DMA_TX_CONTROL_STOP_SET(x) WLAN_MBOX3_DMA_TX_CONTROL_STOP_SET(x)
+#define MBOX_INT_STATUS_ADDRESS WLAN_MBOX_INT_STATUS_ADDRESS
+#define MBOX_INT_STATUS_OFFSET WLAN_MBOX_INT_STATUS_OFFSET
+#define MBOX_INT_STATUS_RX_DMA_COMPLETE_MSB WLAN_MBOX_INT_STATUS_RX_DMA_COMPLETE_MSB
+#define MBOX_INT_STATUS_RX_DMA_COMPLETE_LSB WLAN_MBOX_INT_STATUS_RX_DMA_COMPLETE_LSB
+#define MBOX_INT_STATUS_RX_DMA_COMPLETE_MASK WLAN_MBOX_INT_STATUS_RX_DMA_COMPLETE_MASK
+#define MBOX_INT_STATUS_RX_DMA_COMPLETE_GET(x) WLAN_MBOX_INT_STATUS_RX_DMA_COMPLETE_GET(x)
+#define MBOX_INT_STATUS_RX_DMA_COMPLETE_SET(x) WLAN_MBOX_INT_STATUS_RX_DMA_COMPLETE_SET(x)
+#define MBOX_INT_STATUS_TX_DMA_EOM_COMPLETE_MSB WLAN_MBOX_INT_STATUS_TX_DMA_EOM_COMPLETE_MSB
+#define MBOX_INT_STATUS_TX_DMA_EOM_COMPLETE_LSB WLAN_MBOX_INT_STATUS_TX_DMA_EOM_COMPLETE_LSB
+#define MBOX_INT_STATUS_TX_DMA_EOM_COMPLETE_MASK WLAN_MBOX_INT_STATUS_TX_DMA_EOM_COMPLETE_MASK
+#define MBOX_INT_STATUS_TX_DMA_EOM_COMPLETE_GET(x) WLAN_MBOX_INT_STATUS_TX_DMA_EOM_COMPLETE_GET(x)
+#define MBOX_INT_STATUS_TX_DMA_EOM_COMPLETE_SET(x) WLAN_MBOX_INT_STATUS_TX_DMA_EOM_COMPLETE_SET(x)
+#define MBOX_INT_STATUS_TX_DMA_COMPLETE_MSB WLAN_MBOX_INT_STATUS_TX_DMA_COMPLETE_MSB
+#define MBOX_INT_STATUS_TX_DMA_COMPLETE_LSB WLAN_MBOX_INT_STATUS_TX_DMA_COMPLETE_LSB
+#define MBOX_INT_STATUS_TX_DMA_COMPLETE_MASK WLAN_MBOX_INT_STATUS_TX_DMA_COMPLETE_MASK
+#define MBOX_INT_STATUS_TX_DMA_COMPLETE_GET(x) WLAN_MBOX_INT_STATUS_TX_DMA_COMPLETE_GET(x)
+#define MBOX_INT_STATUS_TX_DMA_COMPLETE_SET(x) WLAN_MBOX_INT_STATUS_TX_DMA_COMPLETE_SET(x)
+#define MBOX_INT_STATUS_TX_OVERFLOW_MSB WLAN_MBOX_INT_STATUS_TX_OVERFLOW_MSB
+#define MBOX_INT_STATUS_TX_OVERFLOW_LSB WLAN_MBOX_INT_STATUS_TX_OVERFLOW_LSB
+#define MBOX_INT_STATUS_TX_OVERFLOW_MASK WLAN_MBOX_INT_STATUS_TX_OVERFLOW_MASK
+#define MBOX_INT_STATUS_TX_OVERFLOW_GET(x) WLAN_MBOX_INT_STATUS_TX_OVERFLOW_GET(x)
+#define MBOX_INT_STATUS_TX_OVERFLOW_SET(x) WLAN_MBOX_INT_STATUS_TX_OVERFLOW_SET(x)
+#define MBOX_INT_STATUS_RX_UNDERFLOW_MSB WLAN_MBOX_INT_STATUS_RX_UNDERFLOW_MSB
+#define MBOX_INT_STATUS_RX_UNDERFLOW_LSB WLAN_MBOX_INT_STATUS_RX_UNDERFLOW_LSB
+#define MBOX_INT_STATUS_RX_UNDERFLOW_MASK WLAN_MBOX_INT_STATUS_RX_UNDERFLOW_MASK
+#define MBOX_INT_STATUS_RX_UNDERFLOW_GET(x) WLAN_MBOX_INT_STATUS_RX_UNDERFLOW_GET(x)
+#define MBOX_INT_STATUS_RX_UNDERFLOW_SET(x) WLAN_MBOX_INT_STATUS_RX_UNDERFLOW_SET(x)
+#define MBOX_INT_STATUS_TX_NOT_EMPTY_MSB WLAN_MBOX_INT_STATUS_TX_NOT_EMPTY_MSB
+#define MBOX_INT_STATUS_TX_NOT_EMPTY_LSB WLAN_MBOX_INT_STATUS_TX_NOT_EMPTY_LSB
+#define MBOX_INT_STATUS_TX_NOT_EMPTY_MASK WLAN_MBOX_INT_STATUS_TX_NOT_EMPTY_MASK
+#define MBOX_INT_STATUS_TX_NOT_EMPTY_GET(x) WLAN_MBOX_INT_STATUS_TX_NOT_EMPTY_GET(x)
+#define MBOX_INT_STATUS_TX_NOT_EMPTY_SET(x) WLAN_MBOX_INT_STATUS_TX_NOT_EMPTY_SET(x)
+#define MBOX_INT_STATUS_RX_NOT_FULL_MSB WLAN_MBOX_INT_STATUS_RX_NOT_FULL_MSB
+#define MBOX_INT_STATUS_RX_NOT_FULL_LSB WLAN_MBOX_INT_STATUS_RX_NOT_FULL_LSB
+#define MBOX_INT_STATUS_RX_NOT_FULL_MASK WLAN_MBOX_INT_STATUS_RX_NOT_FULL_MASK
+#define MBOX_INT_STATUS_RX_NOT_FULL_GET(x) WLAN_MBOX_INT_STATUS_RX_NOT_FULL_GET(x)
+#define MBOX_INT_STATUS_RX_NOT_FULL_SET(x) WLAN_MBOX_INT_STATUS_RX_NOT_FULL_SET(x)
+#define MBOX_INT_STATUS_HOST_MSB WLAN_MBOX_INT_STATUS_HOST_MSB
+#define MBOX_INT_STATUS_HOST_LSB WLAN_MBOX_INT_STATUS_HOST_LSB
+#define MBOX_INT_STATUS_HOST_MASK WLAN_MBOX_INT_STATUS_HOST_MASK
+#define MBOX_INT_STATUS_HOST_GET(x) WLAN_MBOX_INT_STATUS_HOST_GET(x)
+#define MBOX_INT_STATUS_HOST_SET(x) WLAN_MBOX_INT_STATUS_HOST_SET(x)
+#define MBOX_INT_ENABLE_ADDRESS WLAN_MBOX_INT_ENABLE_ADDRESS
+#define MBOX_INT_ENABLE_OFFSET WLAN_MBOX_INT_ENABLE_OFFSET
+#define MBOX_INT_ENABLE_RX_DMA_COMPLETE_MSB WLAN_MBOX_INT_ENABLE_RX_DMA_COMPLETE_MSB
+#define MBOX_INT_ENABLE_RX_DMA_COMPLETE_LSB WLAN_MBOX_INT_ENABLE_RX_DMA_COMPLETE_LSB
+#define MBOX_INT_ENABLE_RX_DMA_COMPLETE_MASK WLAN_MBOX_INT_ENABLE_RX_DMA_COMPLETE_MASK
+#define MBOX_INT_ENABLE_RX_DMA_COMPLETE_GET(x) WLAN_MBOX_INT_ENABLE_RX_DMA_COMPLETE_GET(x)
+#define MBOX_INT_ENABLE_RX_DMA_COMPLETE_SET(x) WLAN_MBOX_INT_ENABLE_RX_DMA_COMPLETE_SET(x)
+#define MBOX_INT_ENABLE_TX_DMA_EOM_COMPLETE_MSB WLAN_MBOX_INT_ENABLE_TX_DMA_EOM_COMPLETE_MSB
+#define MBOX_INT_ENABLE_TX_DMA_EOM_COMPLETE_LSB WLAN_MBOX_INT_ENABLE_TX_DMA_EOM_COMPLETE_LSB
+#define MBOX_INT_ENABLE_TX_DMA_EOM_COMPLETE_MASK WLAN_MBOX_INT_ENABLE_TX_DMA_EOM_COMPLETE_MASK
+#define MBOX_INT_ENABLE_TX_DMA_EOM_COMPLETE_GET(x) WLAN_MBOX_INT_ENABLE_TX_DMA_EOM_COMPLETE_GET(x)
+#define MBOX_INT_ENABLE_TX_DMA_EOM_COMPLETE_SET(x) WLAN_MBOX_INT_ENABLE_TX_DMA_EOM_COMPLETE_SET(x)
+#define MBOX_INT_ENABLE_TX_DMA_COMPLETE_MSB WLAN_MBOX_INT_ENABLE_TX_DMA_COMPLETE_MSB
+#define MBOX_INT_ENABLE_TX_DMA_COMPLETE_LSB WLAN_MBOX_INT_ENABLE_TX_DMA_COMPLETE_LSB
+#define MBOX_INT_ENABLE_TX_DMA_COMPLETE_MASK WLAN_MBOX_INT_ENABLE_TX_DMA_COMPLETE_MASK
+#define MBOX_INT_ENABLE_TX_DMA_COMPLETE_GET(x) WLAN_MBOX_INT_ENABLE_TX_DMA_COMPLETE_GET(x)
+#define MBOX_INT_ENABLE_TX_DMA_COMPLETE_SET(x) WLAN_MBOX_INT_ENABLE_TX_DMA_COMPLETE_SET(x)
+#define MBOX_INT_ENABLE_TX_OVERFLOW_MSB WLAN_MBOX_INT_ENABLE_TX_OVERFLOW_MSB
+#define MBOX_INT_ENABLE_TX_OVERFLOW_LSB WLAN_MBOX_INT_ENABLE_TX_OVERFLOW_LSB
+#define MBOX_INT_ENABLE_TX_OVERFLOW_MASK WLAN_MBOX_INT_ENABLE_TX_OVERFLOW_MASK
+#define MBOX_INT_ENABLE_TX_OVERFLOW_GET(x) WLAN_MBOX_INT_ENABLE_TX_OVERFLOW_GET(x)
+#define MBOX_INT_ENABLE_TX_OVERFLOW_SET(x) WLAN_MBOX_INT_ENABLE_TX_OVERFLOW_SET(x)
+#define MBOX_INT_ENABLE_RX_UNDERFLOW_MSB WLAN_MBOX_INT_ENABLE_RX_UNDERFLOW_MSB
+#define MBOX_INT_ENABLE_RX_UNDERFLOW_LSB WLAN_MBOX_INT_ENABLE_RX_UNDERFLOW_LSB
+#define MBOX_INT_ENABLE_RX_UNDERFLOW_MASK WLAN_MBOX_INT_ENABLE_RX_UNDERFLOW_MASK
+#define MBOX_INT_ENABLE_RX_UNDERFLOW_GET(x) WLAN_MBOX_INT_ENABLE_RX_UNDERFLOW_GET(x)
+#define MBOX_INT_ENABLE_RX_UNDERFLOW_SET(x) WLAN_MBOX_INT_ENABLE_RX_UNDERFLOW_SET(x)
+#define MBOX_INT_ENABLE_TX_NOT_EMPTY_MSB WLAN_MBOX_INT_ENABLE_TX_NOT_EMPTY_MSB
+#define MBOX_INT_ENABLE_TX_NOT_EMPTY_LSB WLAN_MBOX_INT_ENABLE_TX_NOT_EMPTY_LSB
+#define MBOX_INT_ENABLE_TX_NOT_EMPTY_MASK WLAN_MBOX_INT_ENABLE_TX_NOT_EMPTY_MASK
+#define MBOX_INT_ENABLE_TX_NOT_EMPTY_GET(x) WLAN_MBOX_INT_ENABLE_TX_NOT_EMPTY_GET(x)
+#define MBOX_INT_ENABLE_TX_NOT_EMPTY_SET(x) WLAN_MBOX_INT_ENABLE_TX_NOT_EMPTY_SET(x)
+#define MBOX_INT_ENABLE_RX_NOT_FULL_MSB WLAN_MBOX_INT_ENABLE_RX_NOT_FULL_MSB
+#define MBOX_INT_ENABLE_RX_NOT_FULL_LSB WLAN_MBOX_INT_ENABLE_RX_NOT_FULL_LSB
+#define MBOX_INT_ENABLE_RX_NOT_FULL_MASK WLAN_MBOX_INT_ENABLE_RX_NOT_FULL_MASK
+#define MBOX_INT_ENABLE_RX_NOT_FULL_GET(x) WLAN_MBOX_INT_ENABLE_RX_NOT_FULL_GET(x)
+#define MBOX_INT_ENABLE_RX_NOT_FULL_SET(x) WLAN_MBOX_INT_ENABLE_RX_NOT_FULL_SET(x)
+#define MBOX_INT_ENABLE_HOST_MSB WLAN_MBOX_INT_ENABLE_HOST_MSB
+#define MBOX_INT_ENABLE_HOST_LSB WLAN_MBOX_INT_ENABLE_HOST_LSB
+#define MBOX_INT_ENABLE_HOST_MASK WLAN_MBOX_INT_ENABLE_HOST_MASK
+#define MBOX_INT_ENABLE_HOST_GET(x) WLAN_MBOX_INT_ENABLE_HOST_GET(x)
+#define MBOX_INT_ENABLE_HOST_SET(x) WLAN_MBOX_INT_ENABLE_HOST_SET(x)
+#define INT_HOST_ADDRESS WLAN_INT_HOST_ADDRESS
+#define INT_HOST_OFFSET WLAN_INT_HOST_OFFSET
+#define INT_HOST_VECTOR_MSB WLAN_INT_HOST_VECTOR_MSB
+#define INT_HOST_VECTOR_LSB WLAN_INT_HOST_VECTOR_LSB
+#define INT_HOST_VECTOR_MASK WLAN_INT_HOST_VECTOR_MASK
+#define INT_HOST_VECTOR_GET(x) WLAN_INT_HOST_VECTOR_GET(x)
+#define INT_HOST_VECTOR_SET(x) WLAN_INT_HOST_VECTOR_SET(x)
+#define LOCAL_COUNT_ADDRESS WLAN_LOCAL_COUNT_ADDRESS
+#define LOCAL_COUNT_OFFSET WLAN_LOCAL_COUNT_OFFSET
+#define LOCAL_COUNT_VALUE_MSB WLAN_LOCAL_COUNT_VALUE_MSB
+#define LOCAL_COUNT_VALUE_LSB WLAN_LOCAL_COUNT_VALUE_LSB
+#define LOCAL_COUNT_VALUE_MASK WLAN_LOCAL_COUNT_VALUE_MASK
+#define LOCAL_COUNT_VALUE_GET(x) WLAN_LOCAL_COUNT_VALUE_GET(x)
+#define LOCAL_COUNT_VALUE_SET(x) WLAN_LOCAL_COUNT_VALUE_SET(x)
+#define COUNT_INC_ADDRESS WLAN_COUNT_INC_ADDRESS
+#define COUNT_INC_OFFSET WLAN_COUNT_INC_OFFSET
+#define COUNT_INC_VALUE_MSB WLAN_COUNT_INC_VALUE_MSB
+#define COUNT_INC_VALUE_LSB WLAN_COUNT_INC_VALUE_LSB
+#define COUNT_INC_VALUE_MASK WLAN_COUNT_INC_VALUE_MASK
+#define COUNT_INC_VALUE_GET(x) WLAN_COUNT_INC_VALUE_GET(x)
+#define COUNT_INC_VALUE_SET(x) WLAN_COUNT_INC_VALUE_SET(x)
+#define LOCAL_SCRATCH_ADDRESS WLAN_LOCAL_SCRATCH_ADDRESS
+#define LOCAL_SCRATCH_OFFSET WLAN_LOCAL_SCRATCH_OFFSET
+#define LOCAL_SCRATCH_VALUE_MSB WLAN_LOCAL_SCRATCH_VALUE_MSB
+#define LOCAL_SCRATCH_VALUE_LSB WLAN_LOCAL_SCRATCH_VALUE_LSB
+#define LOCAL_SCRATCH_VALUE_MASK WLAN_LOCAL_SCRATCH_VALUE_MASK
+#define LOCAL_SCRATCH_VALUE_GET(x) WLAN_LOCAL_SCRATCH_VALUE_GET(x)
+#define LOCAL_SCRATCH_VALUE_SET(x) WLAN_LOCAL_SCRATCH_VALUE_SET(x)
+#define USE_LOCAL_BUS_ADDRESS WLAN_USE_LOCAL_BUS_ADDRESS
+#define USE_LOCAL_BUS_OFFSET WLAN_USE_LOCAL_BUS_OFFSET
+#define USE_LOCAL_BUS_PIN_INIT_MSB WLAN_USE_LOCAL_BUS_PIN_INIT_MSB
+#define USE_LOCAL_BUS_PIN_INIT_LSB WLAN_USE_LOCAL_BUS_PIN_INIT_LSB
+#define USE_LOCAL_BUS_PIN_INIT_MASK WLAN_USE_LOCAL_BUS_PIN_INIT_MASK
+#define USE_LOCAL_BUS_PIN_INIT_GET(x) WLAN_USE_LOCAL_BUS_PIN_INIT_GET(x)
+#define USE_LOCAL_BUS_PIN_INIT_SET(x) WLAN_USE_LOCAL_BUS_PIN_INIT_SET(x)
+#define SDIO_CONFIG_ADDRESS WLAN_SDIO_CONFIG_ADDRESS
+#define SDIO_CONFIG_OFFSET WLAN_SDIO_CONFIG_OFFSET
+#define SDIO_CONFIG_CCCR_IOR1_MSB WLAN_SDIO_CONFIG_CCCR_IOR1_MSB
+#define SDIO_CONFIG_CCCR_IOR1_LSB WLAN_SDIO_CONFIG_CCCR_IOR1_LSB
+#define SDIO_CONFIG_CCCR_IOR1_MASK WLAN_SDIO_CONFIG_CCCR_IOR1_MASK
+#define SDIO_CONFIG_CCCR_IOR1_GET(x) WLAN_SDIO_CONFIG_CCCR_IOR1_GET(x)
+#define SDIO_CONFIG_CCCR_IOR1_SET(x) WLAN_SDIO_CONFIG_CCCR_IOR1_SET(x)
+#define MBOX_DEBUG_ADDRESS WLAN_MBOX_DEBUG_ADDRESS
+#define MBOX_DEBUG_OFFSET WLAN_MBOX_DEBUG_OFFSET
+#define MBOX_DEBUG_SEL_MSB WLAN_MBOX_DEBUG_SEL_MSB
+#define MBOX_DEBUG_SEL_LSB WLAN_MBOX_DEBUG_SEL_LSB
+#define MBOX_DEBUG_SEL_MASK WLAN_MBOX_DEBUG_SEL_MASK
+#define MBOX_DEBUG_SEL_GET(x) WLAN_MBOX_DEBUG_SEL_GET(x)
+#define MBOX_DEBUG_SEL_SET(x) WLAN_MBOX_DEBUG_SEL_SET(x)
+#define MBOX_FIFO_RESET_ADDRESS WLAN_MBOX_FIFO_RESET_ADDRESS
+#define MBOX_FIFO_RESET_OFFSET WLAN_MBOX_FIFO_RESET_OFFSET
+#define MBOX_FIFO_RESET_INIT_MSB WLAN_MBOX_FIFO_RESET_INIT_MSB
+#define MBOX_FIFO_RESET_INIT_LSB WLAN_MBOX_FIFO_RESET_INIT_LSB
+#define MBOX_FIFO_RESET_INIT_MASK WLAN_MBOX_FIFO_RESET_INIT_MASK
+#define MBOX_FIFO_RESET_INIT_GET(x) WLAN_MBOX_FIFO_RESET_INIT_GET(x)
+#define MBOX_FIFO_RESET_INIT_SET(x) WLAN_MBOX_FIFO_RESET_INIT_SET(x)
+#define MBOX_TXFIFO_POP_ADDRESS WLAN_MBOX_TXFIFO_POP_ADDRESS
+#define MBOX_TXFIFO_POP_OFFSET WLAN_MBOX_TXFIFO_POP_OFFSET
+#define MBOX_TXFIFO_POP_DATA_MSB WLAN_MBOX_TXFIFO_POP_DATA_MSB
+#define MBOX_TXFIFO_POP_DATA_LSB WLAN_MBOX_TXFIFO_POP_DATA_LSB
+#define MBOX_TXFIFO_POP_DATA_MASK WLAN_MBOX_TXFIFO_POP_DATA_MASK
+#define MBOX_TXFIFO_POP_DATA_GET(x) WLAN_MBOX_TXFIFO_POP_DATA_GET(x)
+#define MBOX_TXFIFO_POP_DATA_SET(x) WLAN_MBOX_TXFIFO_POP_DATA_SET(x)
+#define MBOX_RXFIFO_POP_ADDRESS WLAN_MBOX_RXFIFO_POP_ADDRESS
+#define MBOX_RXFIFO_POP_OFFSET WLAN_MBOX_RXFIFO_POP_OFFSET
+#define MBOX_RXFIFO_POP_DATA_MSB WLAN_MBOX_RXFIFO_POP_DATA_MSB
+#define MBOX_RXFIFO_POP_DATA_LSB WLAN_MBOX_RXFIFO_POP_DATA_LSB
+#define MBOX_RXFIFO_POP_DATA_MASK WLAN_MBOX_RXFIFO_POP_DATA_MASK
+#define MBOX_RXFIFO_POP_DATA_GET(x) WLAN_MBOX_RXFIFO_POP_DATA_GET(x)
+#define MBOX_RXFIFO_POP_DATA_SET(x) WLAN_MBOX_RXFIFO_POP_DATA_SET(x)
+#define SDIO_DEBUG_ADDRESS WLAN_SDIO_DEBUG_ADDRESS
+#define SDIO_DEBUG_OFFSET WLAN_SDIO_DEBUG_OFFSET
+#define SDIO_DEBUG_SEL_MSB WLAN_SDIO_DEBUG_SEL_MSB
+#define SDIO_DEBUG_SEL_LSB WLAN_SDIO_DEBUG_SEL_LSB
+#define SDIO_DEBUG_SEL_MASK WLAN_SDIO_DEBUG_SEL_MASK
+#define SDIO_DEBUG_SEL_GET(x) WLAN_SDIO_DEBUG_SEL_GET(x)
+#define SDIO_DEBUG_SEL_SET(x) WLAN_SDIO_DEBUG_SEL_SET(x)
+#define GMBOX0_DMA_RX_DESCRIPTOR_BASE_ADDRESS WLAN_GMBOX0_DMA_RX_DESCRIPTOR_BASE_ADDRESS
+#define GMBOX0_DMA_RX_DESCRIPTOR_BASE_OFFSET WLAN_GMBOX0_DMA_RX_DESCRIPTOR_BASE_OFFSET
+#define GMBOX0_DMA_RX_DESCRIPTOR_BASE_ADDRESS_MSB WLAN_GMBOX0_DMA_RX_DESCRIPTOR_BASE_ADDRESS_MSB
+#define GMBOX0_DMA_RX_DESCRIPTOR_BASE_ADDRESS_LSB WLAN_GMBOX0_DMA_RX_DESCRIPTOR_BASE_ADDRESS_LSB
+#define GMBOX0_DMA_RX_DESCRIPTOR_BASE_ADDRESS_MASK WLAN_GMBOX0_DMA_RX_DESCRIPTOR_BASE_ADDRESS_MASK
+#define GMBOX0_DMA_RX_DESCRIPTOR_BASE_ADDRESS_GET(x) WLAN_GMBOX0_DMA_RX_DESCRIPTOR_BASE_ADDRESS_GET(x)
+#define GMBOX0_DMA_RX_DESCRIPTOR_BASE_ADDRESS_SET(x) WLAN_GMBOX0_DMA_RX_DESCRIPTOR_BASE_ADDRESS_SET(x)
+#define GMBOX0_DMA_RX_CONTROL_ADDRESS WLAN_GMBOX0_DMA_RX_CONTROL_ADDRESS
+#define GMBOX0_DMA_RX_CONTROL_OFFSET WLAN_GMBOX0_DMA_RX_CONTROL_OFFSET
+#define GMBOX0_DMA_RX_CONTROL_RESUME_MSB WLAN_GMBOX0_DMA_RX_CONTROL_RESUME_MSB
+#define GMBOX0_DMA_RX_CONTROL_RESUME_LSB WLAN_GMBOX0_DMA_RX_CONTROL_RESUME_LSB
+#define GMBOX0_DMA_RX_CONTROL_RESUME_MASK WLAN_GMBOX0_DMA_RX_CONTROL_RESUME_MASK
+#define GMBOX0_DMA_RX_CONTROL_RESUME_GET(x) WLAN_GMBOX0_DMA_RX_CONTROL_RESUME_GET(x)
+#define GMBOX0_DMA_RX_CONTROL_RESUME_SET(x) WLAN_GMBOX0_DMA_RX_CONTROL_RESUME_SET(x)
+#define GMBOX0_DMA_RX_CONTROL_START_MSB WLAN_GMBOX0_DMA_RX_CONTROL_START_MSB
+#define GMBOX0_DMA_RX_CONTROL_START_LSB WLAN_GMBOX0_DMA_RX_CONTROL_START_LSB
+#define GMBOX0_DMA_RX_CONTROL_START_MASK WLAN_GMBOX0_DMA_RX_CONTROL_START_MASK
+#define GMBOX0_DMA_RX_CONTROL_START_GET(x) WLAN_GMBOX0_DMA_RX_CONTROL_START_GET(x)
+#define GMBOX0_DMA_RX_CONTROL_START_SET(x) WLAN_GMBOX0_DMA_RX_CONTROL_START_SET(x)
+#define GMBOX0_DMA_RX_CONTROL_STOP_MSB WLAN_GMBOX0_DMA_RX_CONTROL_STOP_MSB
+#define GMBOX0_DMA_RX_CONTROL_STOP_LSB WLAN_GMBOX0_DMA_RX_CONTROL_STOP_LSB
+#define GMBOX0_DMA_RX_CONTROL_STOP_MASK WLAN_GMBOX0_DMA_RX_CONTROL_STOP_MASK
+#define GMBOX0_DMA_RX_CONTROL_STOP_GET(x) WLAN_GMBOX0_DMA_RX_CONTROL_STOP_GET(x)
+#define GMBOX0_DMA_RX_CONTROL_STOP_SET(x) WLAN_GMBOX0_DMA_RX_CONTROL_STOP_SET(x)
+#define GMBOX0_DMA_TX_DESCRIPTOR_BASE_ADDRESS WLAN_GMBOX0_DMA_TX_DESCRIPTOR_BASE_ADDRESS
+#define GMBOX0_DMA_TX_DESCRIPTOR_BASE_OFFSET WLAN_GMBOX0_DMA_TX_DESCRIPTOR_BASE_OFFSET
+#define GMBOX0_DMA_TX_DESCRIPTOR_BASE_ADDRESS_MSB WLAN_GMBOX0_DMA_TX_DESCRIPTOR_BASE_ADDRESS_MSB
+#define GMBOX0_DMA_TX_DESCRIPTOR_BASE_ADDRESS_LSB WLAN_GMBOX0_DMA_TX_DESCRIPTOR_BASE_ADDRESS_LSB
+#define GMBOX0_DMA_TX_DESCRIPTOR_BASE_ADDRESS_MASK WLAN_GMBOX0_DMA_TX_DESCRIPTOR_BASE_ADDRESS_MASK
+#define GMBOX0_DMA_TX_DESCRIPTOR_BASE_ADDRESS_GET(x) WLAN_GMBOX0_DMA_TX_DESCRIPTOR_BASE_ADDRESS_GET(x)
+#define GMBOX0_DMA_TX_DESCRIPTOR_BASE_ADDRESS_SET(x) WLAN_GMBOX0_DMA_TX_DESCRIPTOR_BASE_ADDRESS_SET(x)
+#define GMBOX0_DMA_TX_CONTROL_ADDRESS WLAN_GMBOX0_DMA_TX_CONTROL_ADDRESS
+#define GMBOX0_DMA_TX_CONTROL_OFFSET WLAN_GMBOX0_DMA_TX_CONTROL_OFFSET
+#define GMBOX0_DMA_TX_CONTROL_RESUME_MSB WLAN_GMBOX0_DMA_TX_CONTROL_RESUME_MSB
+#define GMBOX0_DMA_TX_CONTROL_RESUME_LSB WLAN_GMBOX0_DMA_TX_CONTROL_RESUME_LSB
+#define GMBOX0_DMA_TX_CONTROL_RESUME_MASK WLAN_GMBOX0_DMA_TX_CONTROL_RESUME_MASK
+#define GMBOX0_DMA_TX_CONTROL_RESUME_GET(x) WLAN_GMBOX0_DMA_TX_CONTROL_RESUME_GET(x)
+#define GMBOX0_DMA_TX_CONTROL_RESUME_SET(x) WLAN_GMBOX0_DMA_TX_CONTROL_RESUME_SET(x)
+#define GMBOX0_DMA_TX_CONTROL_START_MSB WLAN_GMBOX0_DMA_TX_CONTROL_START_MSB
+#define GMBOX0_DMA_TX_CONTROL_START_LSB WLAN_GMBOX0_DMA_TX_CONTROL_START_LSB
+#define GMBOX0_DMA_TX_CONTROL_START_MASK WLAN_GMBOX0_DMA_TX_CONTROL_START_MASK
+#define GMBOX0_DMA_TX_CONTROL_START_GET(x) WLAN_GMBOX0_DMA_TX_CONTROL_START_GET(x)
+#define GMBOX0_DMA_TX_CONTROL_START_SET(x) WLAN_GMBOX0_DMA_TX_CONTROL_START_SET(x)
+#define GMBOX0_DMA_TX_CONTROL_STOP_MSB WLAN_GMBOX0_DMA_TX_CONTROL_STOP_MSB
+#define GMBOX0_DMA_TX_CONTROL_STOP_LSB WLAN_GMBOX0_DMA_TX_CONTROL_STOP_LSB
+#define GMBOX0_DMA_TX_CONTROL_STOP_MASK WLAN_GMBOX0_DMA_TX_CONTROL_STOP_MASK
+#define GMBOX0_DMA_TX_CONTROL_STOP_GET(x) WLAN_GMBOX0_DMA_TX_CONTROL_STOP_GET(x)
+#define GMBOX0_DMA_TX_CONTROL_STOP_SET(x) WLAN_GMBOX0_DMA_TX_CONTROL_STOP_SET(x)
+#define GMBOX_INT_STATUS_ADDRESS WLAN_GMBOX_INT_STATUS_ADDRESS
+#define GMBOX_INT_STATUS_OFFSET WLAN_GMBOX_INT_STATUS_OFFSET
+#define GMBOX_INT_STATUS_TX_OVERFLOW_MSB WLAN_GMBOX_INT_STATUS_TX_OVERFLOW_MSB
+#define GMBOX_INT_STATUS_TX_OVERFLOW_LSB WLAN_GMBOX_INT_STATUS_TX_OVERFLOW_LSB
+#define GMBOX_INT_STATUS_TX_OVERFLOW_MASK WLAN_GMBOX_INT_STATUS_TX_OVERFLOW_MASK
+#define GMBOX_INT_STATUS_TX_OVERFLOW_GET(x) WLAN_GMBOX_INT_STATUS_TX_OVERFLOW_GET(x)
+#define GMBOX_INT_STATUS_TX_OVERFLOW_SET(x) WLAN_GMBOX_INT_STATUS_TX_OVERFLOW_SET(x)
+#define GMBOX_INT_STATUS_RX_UNDERFLOW_MSB WLAN_GMBOX_INT_STATUS_RX_UNDERFLOW_MSB
+#define GMBOX_INT_STATUS_RX_UNDERFLOW_LSB WLAN_GMBOX_INT_STATUS_RX_UNDERFLOW_LSB
+#define GMBOX_INT_STATUS_RX_UNDERFLOW_MASK WLAN_GMBOX_INT_STATUS_RX_UNDERFLOW_MASK
+#define GMBOX_INT_STATUS_RX_UNDERFLOW_GET(x) WLAN_GMBOX_INT_STATUS_RX_UNDERFLOW_GET(x)
+#define GMBOX_INT_STATUS_RX_UNDERFLOW_SET(x) WLAN_GMBOX_INT_STATUS_RX_UNDERFLOW_SET(x)
+#define GMBOX_INT_STATUS_RX_DMA_COMPLETE_MSB WLAN_GMBOX_INT_STATUS_RX_DMA_COMPLETE_MSB
+#define GMBOX_INT_STATUS_RX_DMA_COMPLETE_LSB WLAN_GMBOX_INT_STATUS_RX_DMA_COMPLETE_LSB
+#define GMBOX_INT_STATUS_RX_DMA_COMPLETE_MASK WLAN_GMBOX_INT_STATUS_RX_DMA_COMPLETE_MASK
+#define GMBOX_INT_STATUS_RX_DMA_COMPLETE_GET(x) WLAN_GMBOX_INT_STATUS_RX_DMA_COMPLETE_GET(x)
+#define GMBOX_INT_STATUS_RX_DMA_COMPLETE_SET(x) WLAN_GMBOX_INT_STATUS_RX_DMA_COMPLETE_SET(x)
+#define GMBOX_INT_STATUS_TX_DMA_EOM_COMPLETE_MSB WLAN_GMBOX_INT_STATUS_TX_DMA_EOM_COMPLETE_MSB
+#define GMBOX_INT_STATUS_TX_DMA_EOM_COMPLETE_LSB WLAN_GMBOX_INT_STATUS_TX_DMA_EOM_COMPLETE_LSB
+#define GMBOX_INT_STATUS_TX_DMA_EOM_COMPLETE_MASK WLAN_GMBOX_INT_STATUS_TX_DMA_EOM_COMPLETE_MASK
+#define GMBOX_INT_STATUS_TX_DMA_EOM_COMPLETE_GET(x) WLAN_GMBOX_INT_STATUS_TX_DMA_EOM_COMPLETE_GET(x)
+#define GMBOX_INT_STATUS_TX_DMA_EOM_COMPLETE_SET(x) WLAN_GMBOX_INT_STATUS_TX_DMA_EOM_COMPLETE_SET(x)
+#define GMBOX_INT_STATUS_TX_DMA_COMPLETE_MSB WLAN_GMBOX_INT_STATUS_TX_DMA_COMPLETE_MSB
+#define GMBOX_INT_STATUS_TX_DMA_COMPLETE_LSB WLAN_GMBOX_INT_STATUS_TX_DMA_COMPLETE_LSB
+#define GMBOX_INT_STATUS_TX_DMA_COMPLETE_MASK WLAN_GMBOX_INT_STATUS_TX_DMA_COMPLETE_MASK
+#define GMBOX_INT_STATUS_TX_DMA_COMPLETE_GET(x) WLAN_GMBOX_INT_STATUS_TX_DMA_COMPLETE_GET(x)
+#define GMBOX_INT_STATUS_TX_DMA_COMPLETE_SET(x) WLAN_GMBOX_INT_STATUS_TX_DMA_COMPLETE_SET(x)
+#define GMBOX_INT_STATUS_TX_NOT_EMPTY_MSB WLAN_GMBOX_INT_STATUS_TX_NOT_EMPTY_MSB
+#define GMBOX_INT_STATUS_TX_NOT_EMPTY_LSB WLAN_GMBOX_INT_STATUS_TX_NOT_EMPTY_LSB
+#define GMBOX_INT_STATUS_TX_NOT_EMPTY_MASK WLAN_GMBOX_INT_STATUS_TX_NOT_EMPTY_MASK
+#define GMBOX_INT_STATUS_TX_NOT_EMPTY_GET(x) WLAN_GMBOX_INT_STATUS_TX_NOT_EMPTY_GET(x)
+#define GMBOX_INT_STATUS_TX_NOT_EMPTY_SET(x) WLAN_GMBOX_INT_STATUS_TX_NOT_EMPTY_SET(x)
+#define GMBOX_INT_STATUS_RX_NOT_FULL_MSB WLAN_GMBOX_INT_STATUS_RX_NOT_FULL_MSB
+#define GMBOX_INT_STATUS_RX_NOT_FULL_LSB WLAN_GMBOX_INT_STATUS_RX_NOT_FULL_LSB
+#define GMBOX_INT_STATUS_RX_NOT_FULL_MASK WLAN_GMBOX_INT_STATUS_RX_NOT_FULL_MASK
+#define GMBOX_INT_STATUS_RX_NOT_FULL_GET(x) WLAN_GMBOX_INT_STATUS_RX_NOT_FULL_GET(x)
+#define GMBOX_INT_STATUS_RX_NOT_FULL_SET(x) WLAN_GMBOX_INT_STATUS_RX_NOT_FULL_SET(x)
+#define GMBOX_INT_ENABLE_ADDRESS WLAN_GMBOX_INT_ENABLE_ADDRESS
+#define GMBOX_INT_ENABLE_OFFSET WLAN_GMBOX_INT_ENABLE_OFFSET
+#define GMBOX_INT_ENABLE_TX_OVERFLOW_MSB WLAN_GMBOX_INT_ENABLE_TX_OVERFLOW_MSB
+#define GMBOX_INT_ENABLE_TX_OVERFLOW_LSB WLAN_GMBOX_INT_ENABLE_TX_OVERFLOW_LSB
+#define GMBOX_INT_ENABLE_TX_OVERFLOW_MASK WLAN_GMBOX_INT_ENABLE_TX_OVERFLOW_MASK
+#define GMBOX_INT_ENABLE_TX_OVERFLOW_GET(x) WLAN_GMBOX_INT_ENABLE_TX_OVERFLOW_GET(x)
+#define GMBOX_INT_ENABLE_TX_OVERFLOW_SET(x) WLAN_GMBOX_INT_ENABLE_TX_OVERFLOW_SET(x)
+#define GMBOX_INT_ENABLE_RX_UNDERFLOW_MSB WLAN_GMBOX_INT_ENABLE_RX_UNDERFLOW_MSB
+#define GMBOX_INT_ENABLE_RX_UNDERFLOW_LSB WLAN_GMBOX_INT_ENABLE_RX_UNDERFLOW_LSB
+#define GMBOX_INT_ENABLE_RX_UNDERFLOW_MASK WLAN_GMBOX_INT_ENABLE_RX_UNDERFLOW_MASK
+#define GMBOX_INT_ENABLE_RX_UNDERFLOW_GET(x) WLAN_GMBOX_INT_ENABLE_RX_UNDERFLOW_GET(x)
+#define GMBOX_INT_ENABLE_RX_UNDERFLOW_SET(x) WLAN_GMBOX_INT_ENABLE_RX_UNDERFLOW_SET(x)
+#define GMBOX_INT_ENABLE_RX_DMA_COMPLETE_MSB WLAN_GMBOX_INT_ENABLE_RX_DMA_COMPLETE_MSB
+#define GMBOX_INT_ENABLE_RX_DMA_COMPLETE_LSB WLAN_GMBOX_INT_ENABLE_RX_DMA_COMPLETE_LSB
+#define GMBOX_INT_ENABLE_RX_DMA_COMPLETE_MASK WLAN_GMBOX_INT_ENABLE_RX_DMA_COMPLETE_MASK
+#define GMBOX_INT_ENABLE_RX_DMA_COMPLETE_GET(x) WLAN_GMBOX_INT_ENABLE_RX_DMA_COMPLETE_GET(x)
+#define GMBOX_INT_ENABLE_RX_DMA_COMPLETE_SET(x) WLAN_GMBOX_INT_ENABLE_RX_DMA_COMPLETE_SET(x)
+#define GMBOX_INT_ENABLE_TX_DMA_EOM_COMPLETE_MSB WLAN_GMBOX_INT_ENABLE_TX_DMA_EOM_COMPLETE_MSB
+#define GMBOX_INT_ENABLE_TX_DMA_EOM_COMPLETE_LSB WLAN_GMBOX_INT_ENABLE_TX_DMA_EOM_COMPLETE_LSB
+#define GMBOX_INT_ENABLE_TX_DMA_EOM_COMPLETE_MASK WLAN_GMBOX_INT_ENABLE_TX_DMA_EOM_COMPLETE_MASK
+#define GMBOX_INT_ENABLE_TX_DMA_EOM_COMPLETE_GET(x) WLAN_GMBOX_INT_ENABLE_TX_DMA_EOM_COMPLETE_GET(x)
+#define GMBOX_INT_ENABLE_TX_DMA_EOM_COMPLETE_SET(x) WLAN_GMBOX_INT_ENABLE_TX_DMA_EOM_COMPLETE_SET(x)
+#define GMBOX_INT_ENABLE_TX_DMA_COMPLETE_MSB WLAN_GMBOX_INT_ENABLE_TX_DMA_COMPLETE_MSB
+#define GMBOX_INT_ENABLE_TX_DMA_COMPLETE_LSB WLAN_GMBOX_INT_ENABLE_TX_DMA_COMPLETE_LSB
+#define GMBOX_INT_ENABLE_TX_DMA_COMPLETE_MASK WLAN_GMBOX_INT_ENABLE_TX_DMA_COMPLETE_MASK
+#define GMBOX_INT_ENABLE_TX_DMA_COMPLETE_GET(x) WLAN_GMBOX_INT_ENABLE_TX_DMA_COMPLETE_GET(x)
+#define GMBOX_INT_ENABLE_TX_DMA_COMPLETE_SET(x) WLAN_GMBOX_INT_ENABLE_TX_DMA_COMPLETE_SET(x)
+#define GMBOX_INT_ENABLE_TX_NOT_EMPTY_MSB WLAN_GMBOX_INT_ENABLE_TX_NOT_EMPTY_MSB
+#define GMBOX_INT_ENABLE_TX_NOT_EMPTY_LSB WLAN_GMBOX_INT_ENABLE_TX_NOT_EMPTY_LSB
+#define GMBOX_INT_ENABLE_TX_NOT_EMPTY_MASK WLAN_GMBOX_INT_ENABLE_TX_NOT_EMPTY_MASK
+#define GMBOX_INT_ENABLE_TX_NOT_EMPTY_GET(x) WLAN_GMBOX_INT_ENABLE_TX_NOT_EMPTY_GET(x)
+#define GMBOX_INT_ENABLE_TX_NOT_EMPTY_SET(x) WLAN_GMBOX_INT_ENABLE_TX_NOT_EMPTY_SET(x)
+#define GMBOX_INT_ENABLE_RX_NOT_FULL_MSB WLAN_GMBOX_INT_ENABLE_RX_NOT_FULL_MSB
+#define GMBOX_INT_ENABLE_RX_NOT_FULL_LSB WLAN_GMBOX_INT_ENABLE_RX_NOT_FULL_LSB
+#define GMBOX_INT_ENABLE_RX_NOT_FULL_MASK WLAN_GMBOX_INT_ENABLE_RX_NOT_FULL_MASK
+#define GMBOX_INT_ENABLE_RX_NOT_FULL_GET(x) WLAN_GMBOX_INT_ENABLE_RX_NOT_FULL_GET(x)
+#define GMBOX_INT_ENABLE_RX_NOT_FULL_SET(x) WLAN_GMBOX_INT_ENABLE_RX_NOT_FULL_SET(x)
+#define HOST_IF_WINDOW_ADDRESS WLAN_HOST_IF_WINDOW_ADDRESS
+#define HOST_IF_WINDOW_OFFSET WLAN_HOST_IF_WINDOW_OFFSET
+#define HOST_IF_WINDOW_DATA_MSB WLAN_HOST_IF_WINDOW_DATA_MSB
+#define HOST_IF_WINDOW_DATA_LSB WLAN_HOST_IF_WINDOW_DATA_LSB
+#define HOST_IF_WINDOW_DATA_MASK WLAN_HOST_IF_WINDOW_DATA_MASK
+#define HOST_IF_WINDOW_DATA_GET(x) WLAN_HOST_IF_WINDOW_DATA_GET(x)
+#define HOST_IF_WINDOW_DATA_SET(x) WLAN_HOST_IF_WINDOW_DATA_SET(x)
+
+
+#endif
+#endif
+
+
+
diff --git a/drivers/staging/ath6kl/include/common/AR6002/hw4.0/hw/mbox_wlan_host_reg.h b/drivers/staging/ath6kl/include/common/AR6002/hw4.0/hw/mbox_wlan_host_reg.h
new file mode 100644
index 000000000000..60855021c2b0
--- /dev/null
+++ b/drivers/staging/ath6kl/include/common/AR6002/hw4.0/hw/mbox_wlan_host_reg.h
@@ -0,0 +1,522 @@
+// ------------------------------------------------------------------
+// Copyright (c) 2004-2010 Atheros Corporation. All rights reserved.
+//
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+//
+//
+// ------------------------------------------------------------------
+//===================================================================
+// Author(s): ="Atheros"
+//===================================================================
+
+
+#ifndef _MBOX_WLAN_HOST_REG_REG_H_
+#define _MBOX_WLAN_HOST_REG_REG_H_
+
+#define HOST_INT_STATUS_ADDRESS 0x00000400
+#define HOST_INT_STATUS_OFFSET 0x00000400
+#define HOST_INT_STATUS_ERROR_MSB 7
+#define HOST_INT_STATUS_ERROR_LSB 7
+#define HOST_INT_STATUS_ERROR_MASK 0x00000080
+#define HOST_INT_STATUS_ERROR_GET(x) (((x) & HOST_INT_STATUS_ERROR_MASK) >> HOST_INT_STATUS_ERROR_LSB)
+#define HOST_INT_STATUS_ERROR_SET(x) (((x) << HOST_INT_STATUS_ERROR_LSB) & HOST_INT_STATUS_ERROR_MASK)
+#define HOST_INT_STATUS_CPU_MSB 6
+#define HOST_INT_STATUS_CPU_LSB 6
+#define HOST_INT_STATUS_CPU_MASK 0x00000040
+#define HOST_INT_STATUS_CPU_GET(x) (((x) & HOST_INT_STATUS_CPU_MASK) >> HOST_INT_STATUS_CPU_LSB)
+#define HOST_INT_STATUS_CPU_SET(x) (((x) << HOST_INT_STATUS_CPU_LSB) & HOST_INT_STATUS_CPU_MASK)
+#define HOST_INT_STATUS_INT_MSB 5
+#define HOST_INT_STATUS_INT_LSB 5
+#define HOST_INT_STATUS_INT_MASK 0x00000020
+#define HOST_INT_STATUS_INT_GET(x) (((x) & HOST_INT_STATUS_INT_MASK) >> HOST_INT_STATUS_INT_LSB)
+#define HOST_INT_STATUS_INT_SET(x) (((x) << HOST_INT_STATUS_INT_LSB) & HOST_INT_STATUS_INT_MASK)
+#define HOST_INT_STATUS_COUNTER_MSB 4
+#define HOST_INT_STATUS_COUNTER_LSB 4
+#define HOST_INT_STATUS_COUNTER_MASK 0x00000010
+#define HOST_INT_STATUS_COUNTER_GET(x) (((x) & HOST_INT_STATUS_COUNTER_MASK) >> HOST_INT_STATUS_COUNTER_LSB)
+#define HOST_INT_STATUS_COUNTER_SET(x) (((x) << HOST_INT_STATUS_COUNTER_LSB) & HOST_INT_STATUS_COUNTER_MASK)
+#define HOST_INT_STATUS_MBOX_DATA_MSB 3
+#define HOST_INT_STATUS_MBOX_DATA_LSB 0
+#define HOST_INT_STATUS_MBOX_DATA_MASK 0x0000000f
+#define HOST_INT_STATUS_MBOX_DATA_GET(x) (((x) & HOST_INT_STATUS_MBOX_DATA_MASK) >> HOST_INT_STATUS_MBOX_DATA_LSB)
+#define HOST_INT_STATUS_MBOX_DATA_SET(x) (((x) << HOST_INT_STATUS_MBOX_DATA_LSB) & HOST_INT_STATUS_MBOX_DATA_MASK)
+
+#define CPU_INT_STATUS_ADDRESS 0x00000401
+#define CPU_INT_STATUS_OFFSET 0x00000401
+#define CPU_INT_STATUS_BIT_MSB 7
+#define CPU_INT_STATUS_BIT_LSB 0
+#define CPU_INT_STATUS_BIT_MASK 0x000000ff
+#define CPU_INT_STATUS_BIT_GET(x) (((x) & CPU_INT_STATUS_BIT_MASK) >> CPU_INT_STATUS_BIT_LSB)
+#define CPU_INT_STATUS_BIT_SET(x) (((x) << CPU_INT_STATUS_BIT_LSB) & CPU_INT_STATUS_BIT_MASK)
+
+#define ERROR_INT_STATUS_ADDRESS 0x00000402
+#define ERROR_INT_STATUS_OFFSET 0x00000402
+#define ERROR_INT_STATUS_UART_HCI_FRAMER_SYNC_ERROR_MSB 6
+#define ERROR_INT_STATUS_UART_HCI_FRAMER_SYNC_ERROR_LSB 6
+#define ERROR_INT_STATUS_UART_HCI_FRAMER_SYNC_ERROR_MASK 0x00000040
+#define ERROR_INT_STATUS_UART_HCI_FRAMER_SYNC_ERROR_GET(x) (((x) & ERROR_INT_STATUS_UART_HCI_FRAMER_SYNC_ERROR_MASK) >> ERROR_INT_STATUS_UART_HCI_FRAMER_SYNC_ERROR_LSB)
+#define ERROR_INT_STATUS_UART_HCI_FRAMER_SYNC_ERROR_SET(x) (((x) << ERROR_INT_STATUS_UART_HCI_FRAMER_SYNC_ERROR_LSB) & ERROR_INT_STATUS_UART_HCI_FRAMER_SYNC_ERROR_MASK)
+#define ERROR_INT_STATUS_UART_HCI_FRAMER_OVERFLOW_MSB 5
+#define ERROR_INT_STATUS_UART_HCI_FRAMER_OVERFLOW_LSB 5
+#define ERROR_INT_STATUS_UART_HCI_FRAMER_OVERFLOW_MASK 0x00000020
+#define ERROR_INT_STATUS_UART_HCI_FRAMER_OVERFLOW_GET(x) (((x) & ERROR_INT_STATUS_UART_HCI_FRAMER_OVERFLOW_MASK) >> ERROR_INT_STATUS_UART_HCI_FRAMER_OVERFLOW_LSB)
+#define ERROR_INT_STATUS_UART_HCI_FRAMER_OVERFLOW_SET(x) (((x) << ERROR_INT_STATUS_UART_HCI_FRAMER_OVERFLOW_LSB) & ERROR_INT_STATUS_UART_HCI_FRAMER_OVERFLOW_MASK)
+#define ERROR_INT_STATUS_UART_HCI_FRAMER_UNDERFLOW_MSB 4
+#define ERROR_INT_STATUS_UART_HCI_FRAMER_UNDERFLOW_LSB 4
+#define ERROR_INT_STATUS_UART_HCI_FRAMER_UNDERFLOW_MASK 0x00000010
+#define ERROR_INT_STATUS_UART_HCI_FRAMER_UNDERFLOW_GET(x) (((x) & ERROR_INT_STATUS_UART_HCI_FRAMER_UNDERFLOW_MASK) >> ERROR_INT_STATUS_UART_HCI_FRAMER_UNDERFLOW_LSB)
+#define ERROR_INT_STATUS_UART_HCI_FRAMER_UNDERFLOW_SET(x) (((x) << ERROR_INT_STATUS_UART_HCI_FRAMER_UNDERFLOW_LSB) & ERROR_INT_STATUS_UART_HCI_FRAMER_UNDERFLOW_MASK)
+#define ERROR_INT_STATUS_SPI_MSB 3
+#define ERROR_INT_STATUS_SPI_LSB 3
+#define ERROR_INT_STATUS_SPI_MASK 0x00000008
+#define ERROR_INT_STATUS_SPI_GET(x) (((x) & ERROR_INT_STATUS_SPI_MASK) >> ERROR_INT_STATUS_SPI_LSB)
+#define ERROR_INT_STATUS_SPI_SET(x) (((x) << ERROR_INT_STATUS_SPI_LSB) & ERROR_INT_STATUS_SPI_MASK)
+#define ERROR_INT_STATUS_WAKEUP_MSB 2
+#define ERROR_INT_STATUS_WAKEUP_LSB 2
+#define ERROR_INT_STATUS_WAKEUP_MASK 0x00000004
+#define ERROR_INT_STATUS_WAKEUP_GET(x) (((x) & ERROR_INT_STATUS_WAKEUP_MASK) >> ERROR_INT_STATUS_WAKEUP_LSB)
+#define ERROR_INT_STATUS_WAKEUP_SET(x) (((x) << ERROR_INT_STATUS_WAKEUP_LSB) & ERROR_INT_STATUS_WAKEUP_MASK)
+#define ERROR_INT_STATUS_RX_UNDERFLOW_MSB 1
+#define ERROR_INT_STATUS_RX_UNDERFLOW_LSB 1
+#define ERROR_INT_STATUS_RX_UNDERFLOW_MASK 0x00000002
+#define ERROR_INT_STATUS_RX_UNDERFLOW_GET(x) (((x) & ERROR_INT_STATUS_RX_UNDERFLOW_MASK) >> ERROR_INT_STATUS_RX_UNDERFLOW_LSB)
+#define ERROR_INT_STATUS_RX_UNDERFLOW_SET(x) (((x) << ERROR_INT_STATUS_RX_UNDERFLOW_LSB) & ERROR_INT_STATUS_RX_UNDERFLOW_MASK)
+#define ERROR_INT_STATUS_TX_OVERFLOW_MSB 0
+#define ERROR_INT_STATUS_TX_OVERFLOW_LSB 0
+#define ERROR_INT_STATUS_TX_OVERFLOW_MASK 0x00000001
+#define ERROR_INT_STATUS_TX_OVERFLOW_GET(x) (((x) & ERROR_INT_STATUS_TX_OVERFLOW_MASK) >> ERROR_INT_STATUS_TX_OVERFLOW_LSB)
+#define ERROR_INT_STATUS_TX_OVERFLOW_SET(x) (((x) << ERROR_INT_STATUS_TX_OVERFLOW_LSB) & ERROR_INT_STATUS_TX_OVERFLOW_MASK)
+
+#define COUNTER_INT_STATUS_ADDRESS 0x00000403
+#define COUNTER_INT_STATUS_OFFSET 0x00000403
+#define COUNTER_INT_STATUS_COUNTER_MSB 7
+#define COUNTER_INT_STATUS_COUNTER_LSB 0
+#define COUNTER_INT_STATUS_COUNTER_MASK 0x000000ff
+#define COUNTER_INT_STATUS_COUNTER_GET(x) (((x) & COUNTER_INT_STATUS_COUNTER_MASK) >> COUNTER_INT_STATUS_COUNTER_LSB)
+#define COUNTER_INT_STATUS_COUNTER_SET(x) (((x) << COUNTER_INT_STATUS_COUNTER_LSB) & COUNTER_INT_STATUS_COUNTER_MASK)
+
+#define MBOX_FRAME_ADDRESS 0x00000404
+#define MBOX_FRAME_OFFSET 0x00000404
+#define MBOX_FRAME_RX_EOM_MSB 7
+#define MBOX_FRAME_RX_EOM_LSB 4
+#define MBOX_FRAME_RX_EOM_MASK 0x000000f0
+#define MBOX_FRAME_RX_EOM_GET(x) (((x) & MBOX_FRAME_RX_EOM_MASK) >> MBOX_FRAME_RX_EOM_LSB)
+#define MBOX_FRAME_RX_EOM_SET(x) (((x) << MBOX_FRAME_RX_EOM_LSB) & MBOX_FRAME_RX_EOM_MASK)
+#define MBOX_FRAME_RX_SOM_MSB 3
+#define MBOX_FRAME_RX_SOM_LSB 0
+#define MBOX_FRAME_RX_SOM_MASK 0x0000000f
+#define MBOX_FRAME_RX_SOM_GET(x) (((x) & MBOX_FRAME_RX_SOM_MASK) >> MBOX_FRAME_RX_SOM_LSB)
+#define MBOX_FRAME_RX_SOM_SET(x) (((x) << MBOX_FRAME_RX_SOM_LSB) & MBOX_FRAME_RX_SOM_MASK)
+
+#define RX_LOOKAHEAD_VALID_ADDRESS 0x00000405
+#define RX_LOOKAHEAD_VALID_OFFSET 0x00000405
+#define RX_LOOKAHEAD_VALID_MBOX_MSB 3
+#define RX_LOOKAHEAD_VALID_MBOX_LSB 0
+#define RX_LOOKAHEAD_VALID_MBOX_MASK 0x0000000f
+#define RX_LOOKAHEAD_VALID_MBOX_GET(x) (((x) & RX_LOOKAHEAD_VALID_MBOX_MASK) >> RX_LOOKAHEAD_VALID_MBOX_LSB)
+#define RX_LOOKAHEAD_VALID_MBOX_SET(x) (((x) << RX_LOOKAHEAD_VALID_MBOX_LSB) & RX_LOOKAHEAD_VALID_MBOX_MASK)
+
+#define HOST_INT_STATUS2_ADDRESS 0x00000406
+#define HOST_INT_STATUS2_OFFSET 0x00000406
+#define HOST_INT_STATUS2_GMBOX_RX_UNDERFLOW_MSB 2
+#define HOST_INT_STATUS2_GMBOX_RX_UNDERFLOW_LSB 2
+#define HOST_INT_STATUS2_GMBOX_RX_UNDERFLOW_MASK 0x00000004
+#define HOST_INT_STATUS2_GMBOX_RX_UNDERFLOW_GET(x) (((x) & HOST_INT_STATUS2_GMBOX_RX_UNDERFLOW_MASK) >> HOST_INT_STATUS2_GMBOX_RX_UNDERFLOW_LSB)
+#define HOST_INT_STATUS2_GMBOX_RX_UNDERFLOW_SET(x) (((x) << HOST_INT_STATUS2_GMBOX_RX_UNDERFLOW_LSB) & HOST_INT_STATUS2_GMBOX_RX_UNDERFLOW_MASK)
+#define HOST_INT_STATUS2_GMBOX_TX_OVERFLOW_MSB 1
+#define HOST_INT_STATUS2_GMBOX_TX_OVERFLOW_LSB 1
+#define HOST_INT_STATUS2_GMBOX_TX_OVERFLOW_MASK 0x00000002
+#define HOST_INT_STATUS2_GMBOX_TX_OVERFLOW_GET(x) (((x) & HOST_INT_STATUS2_GMBOX_TX_OVERFLOW_MASK) >> HOST_INT_STATUS2_GMBOX_TX_OVERFLOW_LSB)
+#define HOST_INT_STATUS2_GMBOX_TX_OVERFLOW_SET(x) (((x) << HOST_INT_STATUS2_GMBOX_TX_OVERFLOW_LSB) & HOST_INT_STATUS2_GMBOX_TX_OVERFLOW_MASK)
+#define HOST_INT_STATUS2_GMBOX_DATA_MSB 0
+#define HOST_INT_STATUS2_GMBOX_DATA_LSB 0
+#define HOST_INT_STATUS2_GMBOX_DATA_MASK 0x00000001
+#define HOST_INT_STATUS2_GMBOX_DATA_GET(x) (((x) & HOST_INT_STATUS2_GMBOX_DATA_MASK) >> HOST_INT_STATUS2_GMBOX_DATA_LSB)
+#define HOST_INT_STATUS2_GMBOX_DATA_SET(x) (((x) << HOST_INT_STATUS2_GMBOX_DATA_LSB) & HOST_INT_STATUS2_GMBOX_DATA_MASK)
+
+#define GMBOX_RX_AVAIL_ADDRESS 0x00000407
+#define GMBOX_RX_AVAIL_OFFSET 0x00000407
+#define GMBOX_RX_AVAIL_BYTE_MSB 6
+#define GMBOX_RX_AVAIL_BYTE_LSB 0
+#define GMBOX_RX_AVAIL_BYTE_MASK 0x0000007f
+#define GMBOX_RX_AVAIL_BYTE_GET(x) (((x) & GMBOX_RX_AVAIL_BYTE_MASK) >> GMBOX_RX_AVAIL_BYTE_LSB)
+#define GMBOX_RX_AVAIL_BYTE_SET(x) (((x) << GMBOX_RX_AVAIL_BYTE_LSB) & GMBOX_RX_AVAIL_BYTE_MASK)
+
+#define RX_LOOKAHEAD0_ADDRESS 0x00000408
+#define RX_LOOKAHEAD0_OFFSET 0x00000408
+#define RX_LOOKAHEAD0_DATA_MSB 7
+#define RX_LOOKAHEAD0_DATA_LSB 0
+#define RX_LOOKAHEAD0_DATA_MASK 0x000000ff
+#define RX_LOOKAHEAD0_DATA_GET(x) (((x) & RX_LOOKAHEAD0_DATA_MASK) >> RX_LOOKAHEAD0_DATA_LSB)
+#define RX_LOOKAHEAD0_DATA_SET(x) (((x) << RX_LOOKAHEAD0_DATA_LSB) & RX_LOOKAHEAD0_DATA_MASK)
+
+#define RX_LOOKAHEAD1_ADDRESS 0x0000040c
+#define RX_LOOKAHEAD1_OFFSET 0x0000040c
+#define RX_LOOKAHEAD1_DATA_MSB 7
+#define RX_LOOKAHEAD1_DATA_LSB 0
+#define RX_LOOKAHEAD1_DATA_MASK 0x000000ff
+#define RX_LOOKAHEAD1_DATA_GET(x) (((x) & RX_LOOKAHEAD1_DATA_MASK) >> RX_LOOKAHEAD1_DATA_LSB)
+#define RX_LOOKAHEAD1_DATA_SET(x) (((x) << RX_LOOKAHEAD1_DATA_LSB) & RX_LOOKAHEAD1_DATA_MASK)
+
+#define RX_LOOKAHEAD2_ADDRESS 0x00000410
+#define RX_LOOKAHEAD2_OFFSET 0x00000410
+#define RX_LOOKAHEAD2_DATA_MSB 7
+#define RX_LOOKAHEAD2_DATA_LSB 0
+#define RX_LOOKAHEAD2_DATA_MASK 0x000000ff
+#define RX_LOOKAHEAD2_DATA_GET(x) (((x) & RX_LOOKAHEAD2_DATA_MASK) >> RX_LOOKAHEAD2_DATA_LSB)
+#define RX_LOOKAHEAD2_DATA_SET(x) (((x) << RX_LOOKAHEAD2_DATA_LSB) & RX_LOOKAHEAD2_DATA_MASK)
+
+#define RX_LOOKAHEAD3_ADDRESS 0x00000414
+#define RX_LOOKAHEAD3_OFFSET 0x00000414
+#define RX_LOOKAHEAD3_DATA_MSB 7
+#define RX_LOOKAHEAD3_DATA_LSB 0
+#define RX_LOOKAHEAD3_DATA_MASK 0x000000ff
+#define RX_LOOKAHEAD3_DATA_GET(x) (((x) & RX_LOOKAHEAD3_DATA_MASK) >> RX_LOOKAHEAD3_DATA_LSB)
+#define RX_LOOKAHEAD3_DATA_SET(x) (((x) << RX_LOOKAHEAD3_DATA_LSB) & RX_LOOKAHEAD3_DATA_MASK)
+
+#define INT_STATUS_ENABLE_ADDRESS 0x00000418
+#define INT_STATUS_ENABLE_OFFSET 0x00000418
+#define INT_STATUS_ENABLE_ERROR_MSB 7
+#define INT_STATUS_ENABLE_ERROR_LSB 7
+#define INT_STATUS_ENABLE_ERROR_MASK 0x00000080
+#define INT_STATUS_ENABLE_ERROR_GET(x) (((x) & INT_STATUS_ENABLE_ERROR_MASK) >> INT_STATUS_ENABLE_ERROR_LSB)
+#define INT_STATUS_ENABLE_ERROR_SET(x) (((x) << INT_STATUS_ENABLE_ERROR_LSB) & INT_STATUS_ENABLE_ERROR_MASK)
+#define INT_STATUS_ENABLE_CPU_MSB 6
+#define INT_STATUS_ENABLE_CPU_LSB 6
+#define INT_STATUS_ENABLE_CPU_MASK 0x00000040
+#define INT_STATUS_ENABLE_CPU_GET(x) (((x) & INT_STATUS_ENABLE_CPU_MASK) >> INT_STATUS_ENABLE_CPU_LSB)
+#define INT_STATUS_ENABLE_CPU_SET(x) (((x) << INT_STATUS_ENABLE_CPU_LSB) & INT_STATUS_ENABLE_CPU_MASK)
+#define INT_STATUS_ENABLE_INT_MSB 5
+#define INT_STATUS_ENABLE_INT_LSB 5
+#define INT_STATUS_ENABLE_INT_MASK 0x00000020
+#define INT_STATUS_ENABLE_INT_GET(x) (((x) & INT_STATUS_ENABLE_INT_MASK) >> INT_STATUS_ENABLE_INT_LSB)
+#define INT_STATUS_ENABLE_INT_SET(x) (((x) << INT_STATUS_ENABLE_INT_LSB) & INT_STATUS_ENABLE_INT_MASK)
+#define INT_STATUS_ENABLE_COUNTER_MSB 4
+#define INT_STATUS_ENABLE_COUNTER_LSB 4
+#define INT_STATUS_ENABLE_COUNTER_MASK 0x00000010
+#define INT_STATUS_ENABLE_COUNTER_GET(x) (((x) & INT_STATUS_ENABLE_COUNTER_MASK) >> INT_STATUS_ENABLE_COUNTER_LSB)
+#define INT_STATUS_ENABLE_COUNTER_SET(x) (((x) << INT_STATUS_ENABLE_COUNTER_LSB) & INT_STATUS_ENABLE_COUNTER_MASK)
+#define INT_STATUS_ENABLE_MBOX_DATA_MSB 3
+#define INT_STATUS_ENABLE_MBOX_DATA_LSB 0
+#define INT_STATUS_ENABLE_MBOX_DATA_MASK 0x0000000f
+#define INT_STATUS_ENABLE_MBOX_DATA_GET(x) (((x) & INT_STATUS_ENABLE_MBOX_DATA_MASK) >> INT_STATUS_ENABLE_MBOX_DATA_LSB)
+#define INT_STATUS_ENABLE_MBOX_DATA_SET(x) (((x) << INT_STATUS_ENABLE_MBOX_DATA_LSB) & INT_STATUS_ENABLE_MBOX_DATA_MASK)
+
+#define CPU_INT_STATUS_ENABLE_ADDRESS 0x00000419
+#define CPU_INT_STATUS_ENABLE_OFFSET 0x00000419
+#define CPU_INT_STATUS_ENABLE_BIT_MSB 7
+#define CPU_INT_STATUS_ENABLE_BIT_LSB 0
+#define CPU_INT_STATUS_ENABLE_BIT_MASK 0x000000ff
+#define CPU_INT_STATUS_ENABLE_BIT_GET(x) (((x) & CPU_INT_STATUS_ENABLE_BIT_MASK) >> CPU_INT_STATUS_ENABLE_BIT_LSB)
+#define CPU_INT_STATUS_ENABLE_BIT_SET(x) (((x) << CPU_INT_STATUS_ENABLE_BIT_LSB) & CPU_INT_STATUS_ENABLE_BIT_MASK)
+
+#define ERROR_STATUS_ENABLE_ADDRESS 0x0000041a
+#define ERROR_STATUS_ENABLE_OFFSET 0x0000041a
+#define ERROR_STATUS_ENABLE_UART_HCI_FRAMER_SYNC_ERROR_MSB 6
+#define ERROR_STATUS_ENABLE_UART_HCI_FRAMER_SYNC_ERROR_LSB 6
+#define ERROR_STATUS_ENABLE_UART_HCI_FRAMER_SYNC_ERROR_MASK 0x00000040
+#define ERROR_STATUS_ENABLE_UART_HCI_FRAMER_SYNC_ERROR_GET(x) (((x) & ERROR_STATUS_ENABLE_UART_HCI_FRAMER_SYNC_ERROR_MASK) >> ERROR_STATUS_ENABLE_UART_HCI_FRAMER_SYNC_ERROR_LSB)
+#define ERROR_STATUS_ENABLE_UART_HCI_FRAMER_SYNC_ERROR_SET(x) (((x) << ERROR_STATUS_ENABLE_UART_HCI_FRAMER_SYNC_ERROR_LSB) & ERROR_STATUS_ENABLE_UART_HCI_FRAMER_SYNC_ERROR_MASK)
+#define ERROR_STATUS_ENABLE_UART_HCI_FRAMER_OVERFLOW_MSB 5
+#define ERROR_STATUS_ENABLE_UART_HCI_FRAMER_OVERFLOW_LSB 5
+#define ERROR_STATUS_ENABLE_UART_HCI_FRAMER_OVERFLOW_MASK 0x00000020
+#define ERROR_STATUS_ENABLE_UART_HCI_FRAMER_OVERFLOW_GET(x) (((x) & ERROR_STATUS_ENABLE_UART_HCI_FRAMER_OVERFLOW_MASK) >> ERROR_STATUS_ENABLE_UART_HCI_FRAMER_OVERFLOW_LSB)
+#define ERROR_STATUS_ENABLE_UART_HCI_FRAMER_OVERFLOW_SET(x) (((x) << ERROR_STATUS_ENABLE_UART_HCI_FRAMER_OVERFLOW_LSB) & ERROR_STATUS_ENABLE_UART_HCI_FRAMER_OVERFLOW_MASK)
+#define ERROR_STATUS_ENABLE_UART_HCI_FRAMER_UNDERFLOW_MSB 4
+#define ERROR_STATUS_ENABLE_UART_HCI_FRAMER_UNDERFLOW_LSB 4
+#define ERROR_STATUS_ENABLE_UART_HCI_FRAMER_UNDERFLOW_MASK 0x00000010
+#define ERROR_STATUS_ENABLE_UART_HCI_FRAMER_UNDERFLOW_GET(x) (((x) & ERROR_STATUS_ENABLE_UART_HCI_FRAMER_UNDERFLOW_MASK) >> ERROR_STATUS_ENABLE_UART_HCI_FRAMER_UNDERFLOW_LSB)
+#define ERROR_STATUS_ENABLE_UART_HCI_FRAMER_UNDERFLOW_SET(x) (((x) << ERROR_STATUS_ENABLE_UART_HCI_FRAMER_UNDERFLOW_LSB) & ERROR_STATUS_ENABLE_UART_HCI_FRAMER_UNDERFLOW_MASK)
+#define ERROR_STATUS_ENABLE_WAKEUP_MSB 2
+#define ERROR_STATUS_ENABLE_WAKEUP_LSB 2
+#define ERROR_STATUS_ENABLE_WAKEUP_MASK 0x00000004
+#define ERROR_STATUS_ENABLE_WAKEUP_GET(x) (((x) & ERROR_STATUS_ENABLE_WAKEUP_MASK) >> ERROR_STATUS_ENABLE_WAKEUP_LSB)
+#define ERROR_STATUS_ENABLE_WAKEUP_SET(x) (((x) << ERROR_STATUS_ENABLE_WAKEUP_LSB) & ERROR_STATUS_ENABLE_WAKEUP_MASK)
+#define ERROR_STATUS_ENABLE_RX_UNDERFLOW_MSB 1
+#define ERROR_STATUS_ENABLE_RX_UNDERFLOW_LSB 1
+#define ERROR_STATUS_ENABLE_RX_UNDERFLOW_MASK 0x00000002
+#define ERROR_STATUS_ENABLE_RX_UNDERFLOW_GET(x) (((x) & ERROR_STATUS_ENABLE_RX_UNDERFLOW_MASK) >> ERROR_STATUS_ENABLE_RX_UNDERFLOW_LSB)
+#define ERROR_STATUS_ENABLE_RX_UNDERFLOW_SET(x) (((x) << ERROR_STATUS_ENABLE_RX_UNDERFLOW_LSB) & ERROR_STATUS_ENABLE_RX_UNDERFLOW_MASK)
+#define ERROR_STATUS_ENABLE_TX_OVERFLOW_MSB 0
+#define ERROR_STATUS_ENABLE_TX_OVERFLOW_LSB 0
+#define ERROR_STATUS_ENABLE_TX_OVERFLOW_MASK 0x00000001
+#define ERROR_STATUS_ENABLE_TX_OVERFLOW_GET(x) (((x) & ERROR_STATUS_ENABLE_TX_OVERFLOW_MASK) >> ERROR_STATUS_ENABLE_TX_OVERFLOW_LSB)
+#define ERROR_STATUS_ENABLE_TX_OVERFLOW_SET(x) (((x) << ERROR_STATUS_ENABLE_TX_OVERFLOW_LSB) & ERROR_STATUS_ENABLE_TX_OVERFLOW_MASK)
+
+#define COUNTER_INT_STATUS_ENABLE_ADDRESS 0x0000041b
+#define COUNTER_INT_STATUS_ENABLE_OFFSET 0x0000041b
+#define COUNTER_INT_STATUS_ENABLE_BIT_MSB 7
+#define COUNTER_INT_STATUS_ENABLE_BIT_LSB 0
+#define COUNTER_INT_STATUS_ENABLE_BIT_MASK 0x000000ff
+#define COUNTER_INT_STATUS_ENABLE_BIT_GET(x) (((x) & COUNTER_INT_STATUS_ENABLE_BIT_MASK) >> COUNTER_INT_STATUS_ENABLE_BIT_LSB)
+#define COUNTER_INT_STATUS_ENABLE_BIT_SET(x) (((x) << COUNTER_INT_STATUS_ENABLE_BIT_LSB) & COUNTER_INT_STATUS_ENABLE_BIT_MASK)
+
+#define COUNT_ADDRESS 0x00000420
+#define COUNT_OFFSET 0x00000420
+#define COUNT_VALUE_MSB 7
+#define COUNT_VALUE_LSB 0
+#define COUNT_VALUE_MASK 0x000000ff
+#define COUNT_VALUE_GET(x) (((x) & COUNT_VALUE_MASK) >> COUNT_VALUE_LSB)
+#define COUNT_VALUE_SET(x) (((x) << COUNT_VALUE_LSB) & COUNT_VALUE_MASK)
+
+#define COUNT_DEC_ADDRESS 0x00000440
+#define COUNT_DEC_OFFSET 0x00000440
+#define COUNT_DEC_VALUE_MSB 7
+#define COUNT_DEC_VALUE_LSB 0
+#define COUNT_DEC_VALUE_MASK 0x000000ff
+#define COUNT_DEC_VALUE_GET(x) (((x) & COUNT_DEC_VALUE_MASK) >> COUNT_DEC_VALUE_LSB)
+#define COUNT_DEC_VALUE_SET(x) (((x) << COUNT_DEC_VALUE_LSB) & COUNT_DEC_VALUE_MASK)
+
+#define SCRATCH_ADDRESS 0x00000460
+#define SCRATCH_OFFSET 0x00000460
+#define SCRATCH_VALUE_MSB 7
+#define SCRATCH_VALUE_LSB 0
+#define SCRATCH_VALUE_MASK 0x000000ff
+#define SCRATCH_VALUE_GET(x) (((x) & SCRATCH_VALUE_MASK) >> SCRATCH_VALUE_LSB)
+#define SCRATCH_VALUE_SET(x) (((x) << SCRATCH_VALUE_LSB) & SCRATCH_VALUE_MASK)
+
+#define FIFO_TIMEOUT_ADDRESS 0x00000468
+#define FIFO_TIMEOUT_OFFSET 0x00000468
+#define FIFO_TIMEOUT_VALUE_MSB 7
+#define FIFO_TIMEOUT_VALUE_LSB 0
+#define FIFO_TIMEOUT_VALUE_MASK 0x000000ff
+#define FIFO_TIMEOUT_VALUE_GET(x) (((x) & FIFO_TIMEOUT_VALUE_MASK) >> FIFO_TIMEOUT_VALUE_LSB)
+#define FIFO_TIMEOUT_VALUE_SET(x) (((x) << FIFO_TIMEOUT_VALUE_LSB) & FIFO_TIMEOUT_VALUE_MASK)
+
+#define FIFO_TIMEOUT_ENABLE_ADDRESS 0x00000469
+#define FIFO_TIMEOUT_ENABLE_OFFSET 0x00000469
+#define FIFO_TIMEOUT_ENABLE_SET_MSB 0
+#define FIFO_TIMEOUT_ENABLE_SET_LSB 0
+#define FIFO_TIMEOUT_ENABLE_SET_MASK 0x00000001
+#define FIFO_TIMEOUT_ENABLE_SET_GET(x) (((x) & FIFO_TIMEOUT_ENABLE_SET_MASK) >> FIFO_TIMEOUT_ENABLE_SET_LSB)
+#define FIFO_TIMEOUT_ENABLE_SET_SET(x) (((x) << FIFO_TIMEOUT_ENABLE_SET_LSB) & FIFO_TIMEOUT_ENABLE_SET_MASK)
+
+#define DISABLE_SLEEP_ADDRESS 0x0000046a
+#define DISABLE_SLEEP_OFFSET 0x0000046a
+#define DISABLE_SLEEP_FOR_INT_MSB 1
+#define DISABLE_SLEEP_FOR_INT_LSB 1
+#define DISABLE_SLEEP_FOR_INT_MASK 0x00000002
+#define DISABLE_SLEEP_FOR_INT_GET(x) (((x) & DISABLE_SLEEP_FOR_INT_MASK) >> DISABLE_SLEEP_FOR_INT_LSB)
+#define DISABLE_SLEEP_FOR_INT_SET(x) (((x) << DISABLE_SLEEP_FOR_INT_LSB) & DISABLE_SLEEP_FOR_INT_MASK)
+#define DISABLE_SLEEP_ON_MSB 0
+#define DISABLE_SLEEP_ON_LSB 0
+#define DISABLE_SLEEP_ON_MASK 0x00000001
+#define DISABLE_SLEEP_ON_GET(x) (((x) & DISABLE_SLEEP_ON_MASK) >> DISABLE_SLEEP_ON_LSB)
+#define DISABLE_SLEEP_ON_SET(x) (((x) << DISABLE_SLEEP_ON_LSB) & DISABLE_SLEEP_ON_MASK)
+
+#define LOCAL_BUS_ADDRESS 0x00000470
+#define LOCAL_BUS_OFFSET 0x00000470
+#define LOCAL_BUS_STATE_MSB 1
+#define LOCAL_BUS_STATE_LSB 0
+#define LOCAL_BUS_STATE_MASK 0x00000003
+#define LOCAL_BUS_STATE_GET(x) (((x) & LOCAL_BUS_STATE_MASK) >> LOCAL_BUS_STATE_LSB)
+#define LOCAL_BUS_STATE_SET(x) (((x) << LOCAL_BUS_STATE_LSB) & LOCAL_BUS_STATE_MASK)
+
+#define INT_WLAN_ADDRESS 0x00000472
+#define INT_WLAN_OFFSET 0x00000472
+#define INT_WLAN_VECTOR_MSB 7
+#define INT_WLAN_VECTOR_LSB 0
+#define INT_WLAN_VECTOR_MASK 0x000000ff
+#define INT_WLAN_VECTOR_GET(x) (((x) & INT_WLAN_VECTOR_MASK) >> INT_WLAN_VECTOR_LSB)
+#define INT_WLAN_VECTOR_SET(x) (((x) << INT_WLAN_VECTOR_LSB) & INT_WLAN_VECTOR_MASK)
+
+#define WINDOW_DATA_ADDRESS 0x00000474
+#define WINDOW_DATA_OFFSET 0x00000474
+#define WINDOW_DATA_DATA_MSB 7
+#define WINDOW_DATA_DATA_LSB 0
+#define WINDOW_DATA_DATA_MASK 0x000000ff
+#define WINDOW_DATA_DATA_GET(x) (((x) & WINDOW_DATA_DATA_MASK) >> WINDOW_DATA_DATA_LSB)
+#define WINDOW_DATA_DATA_SET(x) (((x) << WINDOW_DATA_DATA_LSB) & WINDOW_DATA_DATA_MASK)
+
+#define WINDOW_WRITE_ADDR_ADDRESS 0x00000478
+#define WINDOW_WRITE_ADDR_OFFSET 0x00000478
+#define WINDOW_WRITE_ADDR_ADDR_MSB 7
+#define WINDOW_WRITE_ADDR_ADDR_LSB 0
+#define WINDOW_WRITE_ADDR_ADDR_MASK 0x000000ff
+#define WINDOW_WRITE_ADDR_ADDR_GET(x) (((x) & WINDOW_WRITE_ADDR_ADDR_MASK) >> WINDOW_WRITE_ADDR_ADDR_LSB)
+#define WINDOW_WRITE_ADDR_ADDR_SET(x) (((x) << WINDOW_WRITE_ADDR_ADDR_LSB) & WINDOW_WRITE_ADDR_ADDR_MASK)
+
+#define WINDOW_READ_ADDR_ADDRESS 0x0000047c
+#define WINDOW_READ_ADDR_OFFSET 0x0000047c
+#define WINDOW_READ_ADDR_ADDR_MSB 7
+#define WINDOW_READ_ADDR_ADDR_LSB 0
+#define WINDOW_READ_ADDR_ADDR_MASK 0x000000ff
+#define WINDOW_READ_ADDR_ADDR_GET(x) (((x) & WINDOW_READ_ADDR_ADDR_MASK) >> WINDOW_READ_ADDR_ADDR_LSB)
+#define WINDOW_READ_ADDR_ADDR_SET(x) (((x) << WINDOW_READ_ADDR_ADDR_LSB) & WINDOW_READ_ADDR_ADDR_MASK)
+
+#define HOST_CTRL_SPI_CONFIG_ADDRESS 0x00000480
+#define HOST_CTRL_SPI_CONFIG_OFFSET 0x00000480
+#define HOST_CTRL_SPI_CONFIG_SPI_RESET_MSB 4
+#define HOST_CTRL_SPI_CONFIG_SPI_RESET_LSB 4
+#define HOST_CTRL_SPI_CONFIG_SPI_RESET_MASK 0x00000010
+#define HOST_CTRL_SPI_CONFIG_SPI_RESET_GET(x) (((x) & HOST_CTRL_SPI_CONFIG_SPI_RESET_MASK) >> HOST_CTRL_SPI_CONFIG_SPI_RESET_LSB)
+#define HOST_CTRL_SPI_CONFIG_SPI_RESET_SET(x) (((x) << HOST_CTRL_SPI_CONFIG_SPI_RESET_LSB) & HOST_CTRL_SPI_CONFIG_SPI_RESET_MASK)
+#define HOST_CTRL_SPI_CONFIG_INTERRUPT_ENABLE_MSB 3
+#define HOST_CTRL_SPI_CONFIG_INTERRUPT_ENABLE_LSB 3
+#define HOST_CTRL_SPI_CONFIG_INTERRUPT_ENABLE_MASK 0x00000008
+#define HOST_CTRL_SPI_CONFIG_INTERRUPT_ENABLE_GET(x) (((x) & HOST_CTRL_SPI_CONFIG_INTERRUPT_ENABLE_MASK) >> HOST_CTRL_SPI_CONFIG_INTERRUPT_ENABLE_LSB)
+#define HOST_CTRL_SPI_CONFIG_INTERRUPT_ENABLE_SET(x) (((x) << HOST_CTRL_SPI_CONFIG_INTERRUPT_ENABLE_LSB) & HOST_CTRL_SPI_CONFIG_INTERRUPT_ENABLE_MASK)
+#define HOST_CTRL_SPI_CONFIG_TEST_MODE_MSB 2
+#define HOST_CTRL_SPI_CONFIG_TEST_MODE_LSB 2
+#define HOST_CTRL_SPI_CONFIG_TEST_MODE_MASK 0x00000004
+#define HOST_CTRL_SPI_CONFIG_TEST_MODE_GET(x) (((x) & HOST_CTRL_SPI_CONFIG_TEST_MODE_MASK) >> HOST_CTRL_SPI_CONFIG_TEST_MODE_LSB)
+#define HOST_CTRL_SPI_CONFIG_TEST_MODE_SET(x) (((x) << HOST_CTRL_SPI_CONFIG_TEST_MODE_LSB) & HOST_CTRL_SPI_CONFIG_TEST_MODE_MASK)
+#define HOST_CTRL_SPI_CONFIG_DATA_SIZE_MSB 1
+#define HOST_CTRL_SPI_CONFIG_DATA_SIZE_LSB 0
+#define HOST_CTRL_SPI_CONFIG_DATA_SIZE_MASK 0x00000003
+#define HOST_CTRL_SPI_CONFIG_DATA_SIZE_GET(x) (((x) & HOST_CTRL_SPI_CONFIG_DATA_SIZE_MASK) >> HOST_CTRL_SPI_CONFIG_DATA_SIZE_LSB)
+#define HOST_CTRL_SPI_CONFIG_DATA_SIZE_SET(x) (((x) << HOST_CTRL_SPI_CONFIG_DATA_SIZE_LSB) & HOST_CTRL_SPI_CONFIG_DATA_SIZE_MASK)
+
+#define HOST_CTRL_SPI_STATUS_ADDRESS 0x00000481
+#define HOST_CTRL_SPI_STATUS_OFFSET 0x00000481
+#define HOST_CTRL_SPI_STATUS_ADDR_ERR_MSB 3
+#define HOST_CTRL_SPI_STATUS_ADDR_ERR_LSB 3
+#define HOST_CTRL_SPI_STATUS_ADDR_ERR_MASK 0x00000008
+#define HOST_CTRL_SPI_STATUS_ADDR_ERR_GET(x) (((x) & HOST_CTRL_SPI_STATUS_ADDR_ERR_MASK) >> HOST_CTRL_SPI_STATUS_ADDR_ERR_LSB)
+#define HOST_CTRL_SPI_STATUS_ADDR_ERR_SET(x) (((x) << HOST_CTRL_SPI_STATUS_ADDR_ERR_LSB) & HOST_CTRL_SPI_STATUS_ADDR_ERR_MASK)
+#define HOST_CTRL_SPI_STATUS_RD_ERR_MSB 2
+#define HOST_CTRL_SPI_STATUS_RD_ERR_LSB 2
+#define HOST_CTRL_SPI_STATUS_RD_ERR_MASK 0x00000004
+#define HOST_CTRL_SPI_STATUS_RD_ERR_GET(x) (((x) & HOST_CTRL_SPI_STATUS_RD_ERR_MASK) >> HOST_CTRL_SPI_STATUS_RD_ERR_LSB)
+#define HOST_CTRL_SPI_STATUS_RD_ERR_SET(x) (((x) << HOST_CTRL_SPI_STATUS_RD_ERR_LSB) & HOST_CTRL_SPI_STATUS_RD_ERR_MASK)
+#define HOST_CTRL_SPI_STATUS_WR_ERR_MSB 1
+#define HOST_CTRL_SPI_STATUS_WR_ERR_LSB 1
+#define HOST_CTRL_SPI_STATUS_WR_ERR_MASK 0x00000002
+#define HOST_CTRL_SPI_STATUS_WR_ERR_GET(x) (((x) & HOST_CTRL_SPI_STATUS_WR_ERR_MASK) >> HOST_CTRL_SPI_STATUS_WR_ERR_LSB)
+#define HOST_CTRL_SPI_STATUS_WR_ERR_SET(x) (((x) << HOST_CTRL_SPI_STATUS_WR_ERR_LSB) & HOST_CTRL_SPI_STATUS_WR_ERR_MASK)
+#define HOST_CTRL_SPI_STATUS_READY_MSB 0
+#define HOST_CTRL_SPI_STATUS_READY_LSB 0
+#define HOST_CTRL_SPI_STATUS_READY_MASK 0x00000001
+#define HOST_CTRL_SPI_STATUS_READY_GET(x) (((x) & HOST_CTRL_SPI_STATUS_READY_MASK) >> HOST_CTRL_SPI_STATUS_READY_LSB)
+#define HOST_CTRL_SPI_STATUS_READY_SET(x) (((x) << HOST_CTRL_SPI_STATUS_READY_LSB) & HOST_CTRL_SPI_STATUS_READY_MASK)
+
+#define NON_ASSOC_SLEEP_EN_ADDRESS 0x00000482
+#define NON_ASSOC_SLEEP_EN_OFFSET 0x00000482
+#define NON_ASSOC_SLEEP_EN_BIT_MSB 0
+#define NON_ASSOC_SLEEP_EN_BIT_LSB 0
+#define NON_ASSOC_SLEEP_EN_BIT_MASK 0x00000001
+#define NON_ASSOC_SLEEP_EN_BIT_GET(x) (((x) & NON_ASSOC_SLEEP_EN_BIT_MASK) >> NON_ASSOC_SLEEP_EN_BIT_LSB)
+#define NON_ASSOC_SLEEP_EN_BIT_SET(x) (((x) << NON_ASSOC_SLEEP_EN_BIT_LSB) & NON_ASSOC_SLEEP_EN_BIT_MASK)
+
+#define CPU_DBG_SEL_ADDRESS 0x00000483
+#define CPU_DBG_SEL_OFFSET 0x00000483
+#define CPU_DBG_SEL_BIT_MSB 5
+#define CPU_DBG_SEL_BIT_LSB 0
+#define CPU_DBG_SEL_BIT_MASK 0x0000003f
+#define CPU_DBG_SEL_BIT_GET(x) (((x) & CPU_DBG_SEL_BIT_MASK) >> CPU_DBG_SEL_BIT_LSB)
+#define CPU_DBG_SEL_BIT_SET(x) (((x) << CPU_DBG_SEL_BIT_LSB) & CPU_DBG_SEL_BIT_MASK)
+
+#define CPU_DBG_ADDRESS 0x00000484
+#define CPU_DBG_OFFSET 0x00000484
+#define CPU_DBG_DATA_MSB 7
+#define CPU_DBG_DATA_LSB 0
+#define CPU_DBG_DATA_MASK 0x000000ff
+#define CPU_DBG_DATA_GET(x) (((x) & CPU_DBG_DATA_MASK) >> CPU_DBG_DATA_LSB)
+#define CPU_DBG_DATA_SET(x) (((x) << CPU_DBG_DATA_LSB) & CPU_DBG_DATA_MASK)
+
+#define INT_STATUS2_ENABLE_ADDRESS 0x00000488
+#define INT_STATUS2_ENABLE_OFFSET 0x00000488
+#define INT_STATUS2_ENABLE_GMBOX_RX_UNDERFLOW_MSB 2
+#define INT_STATUS2_ENABLE_GMBOX_RX_UNDERFLOW_LSB 2
+#define INT_STATUS2_ENABLE_GMBOX_RX_UNDERFLOW_MASK 0x00000004
+#define INT_STATUS2_ENABLE_GMBOX_RX_UNDERFLOW_GET(x) (((x) & INT_STATUS2_ENABLE_GMBOX_RX_UNDERFLOW_MASK) >> INT_STATUS2_ENABLE_GMBOX_RX_UNDERFLOW_LSB)
+#define INT_STATUS2_ENABLE_GMBOX_RX_UNDERFLOW_SET(x) (((x) << INT_STATUS2_ENABLE_GMBOX_RX_UNDERFLOW_LSB) & INT_STATUS2_ENABLE_GMBOX_RX_UNDERFLOW_MASK)
+#define INT_STATUS2_ENABLE_GMBOX_TX_OVERFLOW_MSB 1
+#define INT_STATUS2_ENABLE_GMBOX_TX_OVERFLOW_LSB 1
+#define INT_STATUS2_ENABLE_GMBOX_TX_OVERFLOW_MASK 0x00000002
+#define INT_STATUS2_ENABLE_GMBOX_TX_OVERFLOW_GET(x) (((x) & INT_STATUS2_ENABLE_GMBOX_TX_OVERFLOW_MASK) >> INT_STATUS2_ENABLE_GMBOX_TX_OVERFLOW_LSB)
+#define INT_STATUS2_ENABLE_GMBOX_TX_OVERFLOW_SET(x) (((x) << INT_STATUS2_ENABLE_GMBOX_TX_OVERFLOW_LSB) & INT_STATUS2_ENABLE_GMBOX_TX_OVERFLOW_MASK)
+#define INT_STATUS2_ENABLE_GMBOX_DATA_MSB 0
+#define INT_STATUS2_ENABLE_GMBOX_DATA_LSB 0
+#define INT_STATUS2_ENABLE_GMBOX_DATA_MASK 0x00000001
+#define INT_STATUS2_ENABLE_GMBOX_DATA_GET(x) (((x) & INT_STATUS2_ENABLE_GMBOX_DATA_MASK) >> INT_STATUS2_ENABLE_GMBOX_DATA_LSB)
+#define INT_STATUS2_ENABLE_GMBOX_DATA_SET(x) (((x) << INT_STATUS2_ENABLE_GMBOX_DATA_LSB) & INT_STATUS2_ENABLE_GMBOX_DATA_MASK)
+
+#define GMBOX_RX_LOOKAHEAD_ADDRESS 0x00000490
+#define GMBOX_RX_LOOKAHEAD_OFFSET 0x00000490
+#define GMBOX_RX_LOOKAHEAD_DATA_MSB 7
+#define GMBOX_RX_LOOKAHEAD_DATA_LSB 0
+#define GMBOX_RX_LOOKAHEAD_DATA_MASK 0x000000ff
+#define GMBOX_RX_LOOKAHEAD_DATA_GET(x) (((x) & GMBOX_RX_LOOKAHEAD_DATA_MASK) >> GMBOX_RX_LOOKAHEAD_DATA_LSB)
+#define GMBOX_RX_LOOKAHEAD_DATA_SET(x) (((x) << GMBOX_RX_LOOKAHEAD_DATA_LSB) & GMBOX_RX_LOOKAHEAD_DATA_MASK)
+
+#define GMBOX_RX_LOOKAHEAD_MUX_ADDRESS 0x00000498
+#define GMBOX_RX_LOOKAHEAD_MUX_OFFSET 0x00000498
+#define GMBOX_RX_LOOKAHEAD_MUX_SEL_MSB 0
+#define GMBOX_RX_LOOKAHEAD_MUX_SEL_LSB 0
+#define GMBOX_RX_LOOKAHEAD_MUX_SEL_MASK 0x00000001
+#define GMBOX_RX_LOOKAHEAD_MUX_SEL_GET(x) (((x) & GMBOX_RX_LOOKAHEAD_MUX_SEL_MASK) >> GMBOX_RX_LOOKAHEAD_MUX_SEL_LSB)
+#define GMBOX_RX_LOOKAHEAD_MUX_SEL_SET(x) (((x) << GMBOX_RX_LOOKAHEAD_MUX_SEL_LSB) & GMBOX_RX_LOOKAHEAD_MUX_SEL_MASK)
+
+#define CIS_WINDOW_ADDRESS 0x00000600
+#define CIS_WINDOW_OFFSET 0x00000600
+#define CIS_WINDOW_DATA_MSB 7
+#define CIS_WINDOW_DATA_LSB 0
+#define CIS_WINDOW_DATA_MASK 0x000000ff
+#define CIS_WINDOW_DATA_GET(x) (((x) & CIS_WINDOW_DATA_MASK) >> CIS_WINDOW_DATA_LSB)
+#define CIS_WINDOW_DATA_SET(x) (((x) << CIS_WINDOW_DATA_LSB) & CIS_WINDOW_DATA_MASK)
+
+
+#ifndef __ASSEMBLER__
+
+typedef struct mbox_wlan_host_reg_reg_s {
+ unsigned char pad0[1024]; /* pad to 0x400 */
+ volatile unsigned char host_int_status;
+ volatile unsigned char cpu_int_status;
+ volatile unsigned char error_int_status;
+ volatile unsigned char counter_int_status;
+ volatile unsigned char mbox_frame;
+ volatile unsigned char rx_lookahead_valid;
+ volatile unsigned char host_int_status2;
+ volatile unsigned char gmbox_rx_avail;
+ volatile unsigned char rx_lookahead0[4];
+ volatile unsigned char rx_lookahead1[4];
+ volatile unsigned char rx_lookahead2[4];
+ volatile unsigned char rx_lookahead3[4];
+ volatile unsigned char int_status_enable;
+ volatile unsigned char cpu_int_status_enable;
+ volatile unsigned char error_status_enable;
+ volatile unsigned char counter_int_status_enable;
+ unsigned char pad1[4]; /* pad to 0x420 */
+ volatile unsigned char count[8];
+ unsigned char pad2[24]; /* pad to 0x440 */
+ volatile unsigned char count_dec[32];
+ volatile unsigned char scratch[8];
+ volatile unsigned char fifo_timeout;
+ volatile unsigned char fifo_timeout_enable;
+ volatile unsigned char disable_sleep;
+ unsigned char pad3[5]; /* pad to 0x470 */
+ volatile unsigned char local_bus;
+ unsigned char pad4[1]; /* pad to 0x472 */
+ volatile unsigned char int_wlan;
+ unsigned char pad5[1]; /* pad to 0x474 */
+ volatile unsigned char window_data[4];
+ volatile unsigned char window_write_addr[4];
+ volatile unsigned char window_read_addr[4];
+ volatile unsigned char host_ctrl_spi_config;
+ volatile unsigned char host_ctrl_spi_status;
+ volatile unsigned char non_assoc_sleep_en;
+ volatile unsigned char cpu_dbg_sel;
+ volatile unsigned char cpu_dbg[4];
+ volatile unsigned char int_status2_enable;
+ unsigned char pad6[7]; /* pad to 0x490 */
+ volatile unsigned char gmbox_rx_lookahead[8];
+ volatile unsigned char gmbox_rx_lookahead_mux;
+ unsigned char pad7[359]; /* pad to 0x600 */
+ volatile unsigned char cis_window[512];
+} mbox_wlan_host_reg_reg_t;
+
+#endif /* __ASSEMBLER__ */
+
+#endif /* _MBOX_WLAN_HOST_REG_H_ */
diff --git a/drivers/staging/ath6kl/include/common/AR6002/hw4.0/hw/mbox_wlan_reg.h b/drivers/staging/ath6kl/include/common/AR6002/hw4.0/hw/mbox_wlan_reg.h
new file mode 100644
index 000000000000..e00270fc1450
--- /dev/null
+++ b/drivers/staging/ath6kl/include/common/AR6002/hw4.0/hw/mbox_wlan_reg.h
@@ -0,0 +1,638 @@
+// ------------------------------------------------------------------
+// Copyright (c) 2004-2010 Atheros Corporation. All rights reserved.
+//
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+//
+//
+// ------------------------------------------------------------------
+//===================================================================
+// Author(s): ="Atheros"
+//===================================================================
+
+
+#ifndef _MBOX_WLAN_REG_REG_H_
+#define _MBOX_WLAN_REG_REG_H_
+
+#define WLAN_MBOX_FIFO_ADDRESS 0x00000000
+#define WLAN_MBOX_FIFO_OFFSET 0x00000000
+#define WLAN_MBOX_FIFO_DATA_MSB 19
+#define WLAN_MBOX_FIFO_DATA_LSB 0
+#define WLAN_MBOX_FIFO_DATA_MASK 0x000fffff
+#define WLAN_MBOX_FIFO_DATA_GET(x) (((x) & WLAN_MBOX_FIFO_DATA_MASK) >> WLAN_MBOX_FIFO_DATA_LSB)
+#define WLAN_MBOX_FIFO_DATA_SET(x) (((x) << WLAN_MBOX_FIFO_DATA_LSB) & WLAN_MBOX_FIFO_DATA_MASK)
+
+#define WLAN_MBOX_FIFO_STATUS_ADDRESS 0x00000010
+#define WLAN_MBOX_FIFO_STATUS_OFFSET 0x00000010
+#define WLAN_MBOX_FIFO_STATUS_EMPTY_MSB 19
+#define WLAN_MBOX_FIFO_STATUS_EMPTY_LSB 16
+#define WLAN_MBOX_FIFO_STATUS_EMPTY_MASK 0x000f0000
+#define WLAN_MBOX_FIFO_STATUS_EMPTY_GET(x) (((x) & WLAN_MBOX_FIFO_STATUS_EMPTY_MASK) >> WLAN_MBOX_FIFO_STATUS_EMPTY_LSB)
+#define WLAN_MBOX_FIFO_STATUS_EMPTY_SET(x) (((x) << WLAN_MBOX_FIFO_STATUS_EMPTY_LSB) & WLAN_MBOX_FIFO_STATUS_EMPTY_MASK)
+#define WLAN_MBOX_FIFO_STATUS_FULL_MSB 15
+#define WLAN_MBOX_FIFO_STATUS_FULL_LSB 12
+#define WLAN_MBOX_FIFO_STATUS_FULL_MASK 0x0000f000
+#define WLAN_MBOX_FIFO_STATUS_FULL_GET(x) (((x) & WLAN_MBOX_FIFO_STATUS_FULL_MASK) >> WLAN_MBOX_FIFO_STATUS_FULL_LSB)
+#define WLAN_MBOX_FIFO_STATUS_FULL_SET(x) (((x) << WLAN_MBOX_FIFO_STATUS_FULL_LSB) & WLAN_MBOX_FIFO_STATUS_FULL_MASK)
+
+#define WLAN_MBOX_DMA_POLICY_ADDRESS 0x00000014
+#define WLAN_MBOX_DMA_POLICY_OFFSET 0x00000014
+#define WLAN_MBOX_DMA_POLICY_TX_QUANTUM_MSB 3
+#define WLAN_MBOX_DMA_POLICY_TX_QUANTUM_LSB 3
+#define WLAN_MBOX_DMA_POLICY_TX_QUANTUM_MASK 0x00000008
+#define WLAN_MBOX_DMA_POLICY_TX_QUANTUM_GET(x) (((x) & WLAN_MBOX_DMA_POLICY_TX_QUANTUM_MASK) >> WLAN_MBOX_DMA_POLICY_TX_QUANTUM_LSB)
+#define WLAN_MBOX_DMA_POLICY_TX_QUANTUM_SET(x) (((x) << WLAN_MBOX_DMA_POLICY_TX_QUANTUM_LSB) & WLAN_MBOX_DMA_POLICY_TX_QUANTUM_MASK)
+#define WLAN_MBOX_DMA_POLICY_TX_ORDER_MSB 2
+#define WLAN_MBOX_DMA_POLICY_TX_ORDER_LSB 2
+#define WLAN_MBOX_DMA_POLICY_TX_ORDER_MASK 0x00000004
+#define WLAN_MBOX_DMA_POLICY_TX_ORDER_GET(x) (((x) & WLAN_MBOX_DMA_POLICY_TX_ORDER_MASK) >> WLAN_MBOX_DMA_POLICY_TX_ORDER_LSB)
+#define WLAN_MBOX_DMA_POLICY_TX_ORDER_SET(x) (((x) << WLAN_MBOX_DMA_POLICY_TX_ORDER_LSB) & WLAN_MBOX_DMA_POLICY_TX_ORDER_MASK)
+#define WLAN_MBOX_DMA_POLICY_RX_QUANTUM_MSB 1
+#define WLAN_MBOX_DMA_POLICY_RX_QUANTUM_LSB 1
+#define WLAN_MBOX_DMA_POLICY_RX_QUANTUM_MASK 0x00000002
+#define WLAN_MBOX_DMA_POLICY_RX_QUANTUM_GET(x) (((x) & WLAN_MBOX_DMA_POLICY_RX_QUANTUM_MASK) >> WLAN_MBOX_DMA_POLICY_RX_QUANTUM_LSB)
+#define WLAN_MBOX_DMA_POLICY_RX_QUANTUM_SET(x) (((x) << WLAN_MBOX_DMA_POLICY_RX_QUANTUM_LSB) & WLAN_MBOX_DMA_POLICY_RX_QUANTUM_MASK)
+#define WLAN_MBOX_DMA_POLICY_RX_ORDER_MSB 0
+#define WLAN_MBOX_DMA_POLICY_RX_ORDER_LSB 0
+#define WLAN_MBOX_DMA_POLICY_RX_ORDER_MASK 0x00000001
+#define WLAN_MBOX_DMA_POLICY_RX_ORDER_GET(x) (((x) & WLAN_MBOX_DMA_POLICY_RX_ORDER_MASK) >> WLAN_MBOX_DMA_POLICY_RX_ORDER_LSB)
+#define WLAN_MBOX_DMA_POLICY_RX_ORDER_SET(x) (((x) << WLAN_MBOX_DMA_POLICY_RX_ORDER_LSB) & WLAN_MBOX_DMA_POLICY_RX_ORDER_MASK)
+
+#define WLAN_MBOX0_DMA_RX_DESCRIPTOR_BASE_ADDRESS 0x00000018
+#define WLAN_MBOX0_DMA_RX_DESCRIPTOR_BASE_OFFSET 0x00000018
+#define WLAN_MBOX0_DMA_RX_DESCRIPTOR_BASE_ADDRESS_MSB 27
+#define WLAN_MBOX0_DMA_RX_DESCRIPTOR_BASE_ADDRESS_LSB 2
+#define WLAN_MBOX0_DMA_RX_DESCRIPTOR_BASE_ADDRESS_MASK 0x0ffffffc
+#define WLAN_MBOX0_DMA_RX_DESCRIPTOR_BASE_ADDRESS_GET(x) (((x) & WLAN_MBOX0_DMA_RX_DESCRIPTOR_BASE_ADDRESS_MASK) >> WLAN_MBOX0_DMA_RX_DESCRIPTOR_BASE_ADDRESS_LSB)
+#define WLAN_MBOX0_DMA_RX_DESCRIPTOR_BASE_ADDRESS_SET(x) (((x) << WLAN_MBOX0_DMA_RX_DESCRIPTOR_BASE_ADDRESS_LSB) & WLAN_MBOX0_DMA_RX_DESCRIPTOR_BASE_ADDRESS_MASK)
+
+#define WLAN_MBOX0_DMA_RX_CONTROL_ADDRESS 0x0000001c
+#define WLAN_MBOX0_DMA_RX_CONTROL_OFFSET 0x0000001c
+#define WLAN_MBOX0_DMA_RX_CONTROL_RESUME_MSB 2
+#define WLAN_MBOX0_DMA_RX_CONTROL_RESUME_LSB 2
+#define WLAN_MBOX0_DMA_RX_CONTROL_RESUME_MASK 0x00000004
+#define WLAN_MBOX0_DMA_RX_CONTROL_RESUME_GET(x) (((x) & WLAN_MBOX0_DMA_RX_CONTROL_RESUME_MASK) >> WLAN_MBOX0_DMA_RX_CONTROL_RESUME_LSB)
+#define WLAN_MBOX0_DMA_RX_CONTROL_RESUME_SET(x) (((x) << WLAN_MBOX0_DMA_RX_CONTROL_RESUME_LSB) & WLAN_MBOX0_DMA_RX_CONTROL_RESUME_MASK)
+#define WLAN_MBOX0_DMA_RX_CONTROL_START_MSB 1
+#define WLAN_MBOX0_DMA_RX_CONTROL_START_LSB 1
+#define WLAN_MBOX0_DMA_RX_CONTROL_START_MASK 0x00000002
+#define WLAN_MBOX0_DMA_RX_CONTROL_START_GET(x) (((x) & WLAN_MBOX0_DMA_RX_CONTROL_START_MASK) >> WLAN_MBOX0_DMA_RX_CONTROL_START_LSB)
+#define WLAN_MBOX0_DMA_RX_CONTROL_START_SET(x) (((x) << WLAN_MBOX0_DMA_RX_CONTROL_START_LSB) & WLAN_MBOX0_DMA_RX_CONTROL_START_MASK)
+#define WLAN_MBOX0_DMA_RX_CONTROL_STOP_MSB 0
+#define WLAN_MBOX0_DMA_RX_CONTROL_STOP_LSB 0
+#define WLAN_MBOX0_DMA_RX_CONTROL_STOP_MASK 0x00000001
+#define WLAN_MBOX0_DMA_RX_CONTROL_STOP_GET(x) (((x) & WLAN_MBOX0_DMA_RX_CONTROL_STOP_MASK) >> WLAN_MBOX0_DMA_RX_CONTROL_STOP_LSB)
+#define WLAN_MBOX0_DMA_RX_CONTROL_STOP_SET(x) (((x) << WLAN_MBOX0_DMA_RX_CONTROL_STOP_LSB) & WLAN_MBOX0_DMA_RX_CONTROL_STOP_MASK)
+
+#define WLAN_MBOX0_DMA_TX_DESCRIPTOR_BASE_ADDRESS 0x00000020
+#define WLAN_MBOX0_DMA_TX_DESCRIPTOR_BASE_OFFSET 0x00000020
+#define WLAN_MBOX0_DMA_TX_DESCRIPTOR_BASE_ADDRESS_MSB 27
+#define WLAN_MBOX0_DMA_TX_DESCRIPTOR_BASE_ADDRESS_LSB 2
+#define WLAN_MBOX0_DMA_TX_DESCRIPTOR_BASE_ADDRESS_MASK 0x0ffffffc
+#define WLAN_MBOX0_DMA_TX_DESCRIPTOR_BASE_ADDRESS_GET(x) (((x) & WLAN_MBOX0_DMA_TX_DESCRIPTOR_BASE_ADDRESS_MASK) >> WLAN_MBOX0_DMA_TX_DESCRIPTOR_BASE_ADDRESS_LSB)
+#define WLAN_MBOX0_DMA_TX_DESCRIPTOR_BASE_ADDRESS_SET(x) (((x) << WLAN_MBOX0_DMA_TX_DESCRIPTOR_BASE_ADDRESS_LSB) & WLAN_MBOX0_DMA_TX_DESCRIPTOR_BASE_ADDRESS_MASK)
+
+#define WLAN_MBOX0_DMA_TX_CONTROL_ADDRESS 0x00000024
+#define WLAN_MBOX0_DMA_TX_CONTROL_OFFSET 0x00000024
+#define WLAN_MBOX0_DMA_TX_CONTROL_RESUME_MSB 2
+#define WLAN_MBOX0_DMA_TX_CONTROL_RESUME_LSB 2
+#define WLAN_MBOX0_DMA_TX_CONTROL_RESUME_MASK 0x00000004
+#define WLAN_MBOX0_DMA_TX_CONTROL_RESUME_GET(x) (((x) & WLAN_MBOX0_DMA_TX_CONTROL_RESUME_MASK) >> WLAN_MBOX0_DMA_TX_CONTROL_RESUME_LSB)
+#define WLAN_MBOX0_DMA_TX_CONTROL_RESUME_SET(x) (((x) << WLAN_MBOX0_DMA_TX_CONTROL_RESUME_LSB) & WLAN_MBOX0_DMA_TX_CONTROL_RESUME_MASK)
+#define WLAN_MBOX0_DMA_TX_CONTROL_START_MSB 1
+#define WLAN_MBOX0_DMA_TX_CONTROL_START_LSB 1
+#define WLAN_MBOX0_DMA_TX_CONTROL_START_MASK 0x00000002
+#define WLAN_MBOX0_DMA_TX_CONTROL_START_GET(x) (((x) & WLAN_MBOX0_DMA_TX_CONTROL_START_MASK) >> WLAN_MBOX0_DMA_TX_CONTROL_START_LSB)
+#define WLAN_MBOX0_DMA_TX_CONTROL_START_SET(x) (((x) << WLAN_MBOX0_DMA_TX_CONTROL_START_LSB) & WLAN_MBOX0_DMA_TX_CONTROL_START_MASK)
+#define WLAN_MBOX0_DMA_TX_CONTROL_STOP_MSB 0
+#define WLAN_MBOX0_DMA_TX_CONTROL_STOP_LSB 0
+#define WLAN_MBOX0_DMA_TX_CONTROL_STOP_MASK 0x00000001
+#define WLAN_MBOX0_DMA_TX_CONTROL_STOP_GET(x) (((x) & WLAN_MBOX0_DMA_TX_CONTROL_STOP_MASK) >> WLAN_MBOX0_DMA_TX_CONTROL_STOP_LSB)
+#define WLAN_MBOX0_DMA_TX_CONTROL_STOP_SET(x) (((x) << WLAN_MBOX0_DMA_TX_CONTROL_STOP_LSB) & WLAN_MBOX0_DMA_TX_CONTROL_STOP_MASK)
+
+#define WLAN_MBOX1_DMA_RX_DESCRIPTOR_BASE_ADDRESS 0x00000028
+#define WLAN_MBOX1_DMA_RX_DESCRIPTOR_BASE_OFFSET 0x00000028
+#define WLAN_MBOX1_DMA_RX_DESCRIPTOR_BASE_ADDRESS_MSB 27
+#define WLAN_MBOX1_DMA_RX_DESCRIPTOR_BASE_ADDRESS_LSB 2
+#define WLAN_MBOX1_DMA_RX_DESCRIPTOR_BASE_ADDRESS_MASK 0x0ffffffc
+#define WLAN_MBOX1_DMA_RX_DESCRIPTOR_BASE_ADDRESS_GET(x) (((x) & WLAN_MBOX1_DMA_RX_DESCRIPTOR_BASE_ADDRESS_MASK) >> WLAN_MBOX1_DMA_RX_DESCRIPTOR_BASE_ADDRESS_LSB)
+#define WLAN_MBOX1_DMA_RX_DESCRIPTOR_BASE_ADDRESS_SET(x) (((x) << WLAN_MBOX1_DMA_RX_DESCRIPTOR_BASE_ADDRESS_LSB) & WLAN_MBOX1_DMA_RX_DESCRIPTOR_BASE_ADDRESS_MASK)
+
+#define WLAN_MBOX1_DMA_RX_CONTROL_ADDRESS 0x0000002c
+#define WLAN_MBOX1_DMA_RX_CONTROL_OFFSET 0x0000002c
+#define WLAN_MBOX1_DMA_RX_CONTROL_RESUME_MSB 2
+#define WLAN_MBOX1_DMA_RX_CONTROL_RESUME_LSB 2
+#define WLAN_MBOX1_DMA_RX_CONTROL_RESUME_MASK 0x00000004
+#define WLAN_MBOX1_DMA_RX_CONTROL_RESUME_GET(x) (((x) & WLAN_MBOX1_DMA_RX_CONTROL_RESUME_MASK) >> WLAN_MBOX1_DMA_RX_CONTROL_RESUME_LSB)
+#define WLAN_MBOX1_DMA_RX_CONTROL_RESUME_SET(x) (((x) << WLAN_MBOX1_DMA_RX_CONTROL_RESUME_LSB) & WLAN_MBOX1_DMA_RX_CONTROL_RESUME_MASK)
+#define WLAN_MBOX1_DMA_RX_CONTROL_START_MSB 1
+#define WLAN_MBOX1_DMA_RX_CONTROL_START_LSB 1
+#define WLAN_MBOX1_DMA_RX_CONTROL_START_MASK 0x00000002
+#define WLAN_MBOX1_DMA_RX_CONTROL_START_GET(x) (((x) & WLAN_MBOX1_DMA_RX_CONTROL_START_MASK) >> WLAN_MBOX1_DMA_RX_CONTROL_START_LSB)
+#define WLAN_MBOX1_DMA_RX_CONTROL_START_SET(x) (((x) << WLAN_MBOX1_DMA_RX_CONTROL_START_LSB) & WLAN_MBOX1_DMA_RX_CONTROL_START_MASK)
+#define WLAN_MBOX1_DMA_RX_CONTROL_STOP_MSB 0
+#define WLAN_MBOX1_DMA_RX_CONTROL_STOP_LSB 0
+#define WLAN_MBOX1_DMA_RX_CONTROL_STOP_MASK 0x00000001
+#define WLAN_MBOX1_DMA_RX_CONTROL_STOP_GET(x) (((x) & WLAN_MBOX1_DMA_RX_CONTROL_STOP_MASK) >> WLAN_MBOX1_DMA_RX_CONTROL_STOP_LSB)
+#define WLAN_MBOX1_DMA_RX_CONTROL_STOP_SET(x) (((x) << WLAN_MBOX1_DMA_RX_CONTROL_STOP_LSB) & WLAN_MBOX1_DMA_RX_CONTROL_STOP_MASK)
+
+#define WLAN_MBOX1_DMA_TX_DESCRIPTOR_BASE_ADDRESS 0x00000030
+#define WLAN_MBOX1_DMA_TX_DESCRIPTOR_BASE_OFFSET 0x00000030
+#define WLAN_MBOX1_DMA_TX_DESCRIPTOR_BASE_ADDRESS_MSB 27
+#define WLAN_MBOX1_DMA_TX_DESCRIPTOR_BASE_ADDRESS_LSB 2
+#define WLAN_MBOX1_DMA_TX_DESCRIPTOR_BASE_ADDRESS_MASK 0x0ffffffc
+#define WLAN_MBOX1_DMA_TX_DESCRIPTOR_BASE_ADDRESS_GET(x) (((x) & WLAN_MBOX1_DMA_TX_DESCRIPTOR_BASE_ADDRESS_MASK) >> WLAN_MBOX1_DMA_TX_DESCRIPTOR_BASE_ADDRESS_LSB)
+#define WLAN_MBOX1_DMA_TX_DESCRIPTOR_BASE_ADDRESS_SET(x) (((x) << WLAN_MBOX1_DMA_TX_DESCRIPTOR_BASE_ADDRESS_LSB) & WLAN_MBOX1_DMA_TX_DESCRIPTOR_BASE_ADDRESS_MASK)
+
+#define WLAN_MBOX1_DMA_TX_CONTROL_ADDRESS 0x00000034
+#define WLAN_MBOX1_DMA_TX_CONTROL_OFFSET 0x00000034
+#define WLAN_MBOX1_DMA_TX_CONTROL_RESUME_MSB 2
+#define WLAN_MBOX1_DMA_TX_CONTROL_RESUME_LSB 2
+#define WLAN_MBOX1_DMA_TX_CONTROL_RESUME_MASK 0x00000004
+#define WLAN_MBOX1_DMA_TX_CONTROL_RESUME_GET(x) (((x) & WLAN_MBOX1_DMA_TX_CONTROL_RESUME_MASK) >> WLAN_MBOX1_DMA_TX_CONTROL_RESUME_LSB)
+#define WLAN_MBOX1_DMA_TX_CONTROL_RESUME_SET(x) (((x) << WLAN_MBOX1_DMA_TX_CONTROL_RESUME_LSB) & WLAN_MBOX1_DMA_TX_CONTROL_RESUME_MASK)
+#define WLAN_MBOX1_DMA_TX_CONTROL_START_MSB 1
+#define WLAN_MBOX1_DMA_TX_CONTROL_START_LSB 1
+#define WLAN_MBOX1_DMA_TX_CONTROL_START_MASK 0x00000002
+#define WLAN_MBOX1_DMA_TX_CONTROL_START_GET(x) (((x) & WLAN_MBOX1_DMA_TX_CONTROL_START_MASK) >> WLAN_MBOX1_DMA_TX_CONTROL_START_LSB)
+#define WLAN_MBOX1_DMA_TX_CONTROL_START_SET(x) (((x) << WLAN_MBOX1_DMA_TX_CONTROL_START_LSB) & WLAN_MBOX1_DMA_TX_CONTROL_START_MASK)
+#define WLAN_MBOX1_DMA_TX_CONTROL_STOP_MSB 0
+#define WLAN_MBOX1_DMA_TX_CONTROL_STOP_LSB 0
+#define WLAN_MBOX1_DMA_TX_CONTROL_STOP_MASK 0x00000001
+#define WLAN_MBOX1_DMA_TX_CONTROL_STOP_GET(x) (((x) & WLAN_MBOX1_DMA_TX_CONTROL_STOP_MASK) >> WLAN_MBOX1_DMA_TX_CONTROL_STOP_LSB)
+#define WLAN_MBOX1_DMA_TX_CONTROL_STOP_SET(x) (((x) << WLAN_MBOX1_DMA_TX_CONTROL_STOP_LSB) & WLAN_MBOX1_DMA_TX_CONTROL_STOP_MASK)
+
+#define WLAN_MBOX2_DMA_RX_DESCRIPTOR_BASE_ADDRESS 0x00000038
+#define WLAN_MBOX2_DMA_RX_DESCRIPTOR_BASE_OFFSET 0x00000038
+#define WLAN_MBOX2_DMA_RX_DESCRIPTOR_BASE_ADDRESS_MSB 27
+#define WLAN_MBOX2_DMA_RX_DESCRIPTOR_BASE_ADDRESS_LSB 2
+#define WLAN_MBOX2_DMA_RX_DESCRIPTOR_BASE_ADDRESS_MASK 0x0ffffffc
+#define WLAN_MBOX2_DMA_RX_DESCRIPTOR_BASE_ADDRESS_GET(x) (((x) & WLAN_MBOX2_DMA_RX_DESCRIPTOR_BASE_ADDRESS_MASK) >> WLAN_MBOX2_DMA_RX_DESCRIPTOR_BASE_ADDRESS_LSB)
+#define WLAN_MBOX2_DMA_RX_DESCRIPTOR_BASE_ADDRESS_SET(x) (((x) << WLAN_MBOX2_DMA_RX_DESCRIPTOR_BASE_ADDRESS_LSB) & WLAN_MBOX2_DMA_RX_DESCRIPTOR_BASE_ADDRESS_MASK)
+
+#define WLAN_MBOX2_DMA_RX_CONTROL_ADDRESS 0x0000003c
+#define WLAN_MBOX2_DMA_RX_CONTROL_OFFSET 0x0000003c
+#define WLAN_MBOX2_DMA_RX_CONTROL_RESUME_MSB 2
+#define WLAN_MBOX2_DMA_RX_CONTROL_RESUME_LSB 2
+#define WLAN_MBOX2_DMA_RX_CONTROL_RESUME_MASK 0x00000004
+#define WLAN_MBOX2_DMA_RX_CONTROL_RESUME_GET(x) (((x) & WLAN_MBOX2_DMA_RX_CONTROL_RESUME_MASK) >> WLAN_MBOX2_DMA_RX_CONTROL_RESUME_LSB)
+#define WLAN_MBOX2_DMA_RX_CONTROL_RESUME_SET(x) (((x) << WLAN_MBOX2_DMA_RX_CONTROL_RESUME_LSB) & WLAN_MBOX2_DMA_RX_CONTROL_RESUME_MASK)
+#define WLAN_MBOX2_DMA_RX_CONTROL_START_MSB 1
+#define WLAN_MBOX2_DMA_RX_CONTROL_START_LSB 1
+#define WLAN_MBOX2_DMA_RX_CONTROL_START_MASK 0x00000002
+#define WLAN_MBOX2_DMA_RX_CONTROL_START_GET(x) (((x) & WLAN_MBOX2_DMA_RX_CONTROL_START_MASK) >> WLAN_MBOX2_DMA_RX_CONTROL_START_LSB)
+#define WLAN_MBOX2_DMA_RX_CONTROL_START_SET(x) (((x) << WLAN_MBOX2_DMA_RX_CONTROL_START_LSB) & WLAN_MBOX2_DMA_RX_CONTROL_START_MASK)
+#define WLAN_MBOX2_DMA_RX_CONTROL_STOP_MSB 0
+#define WLAN_MBOX2_DMA_RX_CONTROL_STOP_LSB 0
+#define WLAN_MBOX2_DMA_RX_CONTROL_STOP_MASK 0x00000001
+#define WLAN_MBOX2_DMA_RX_CONTROL_STOP_GET(x) (((x) & WLAN_MBOX2_DMA_RX_CONTROL_STOP_MASK) >> WLAN_MBOX2_DMA_RX_CONTROL_STOP_LSB)
+#define WLAN_MBOX2_DMA_RX_CONTROL_STOP_SET(x) (((x) << WLAN_MBOX2_DMA_RX_CONTROL_STOP_LSB) & WLAN_MBOX2_DMA_RX_CONTROL_STOP_MASK)
+
+#define WLAN_MBOX2_DMA_TX_DESCRIPTOR_BASE_ADDRESS 0x00000040
+#define WLAN_MBOX2_DMA_TX_DESCRIPTOR_BASE_OFFSET 0x00000040
+#define WLAN_MBOX2_DMA_TX_DESCRIPTOR_BASE_ADDRESS_MSB 27
+#define WLAN_MBOX2_DMA_TX_DESCRIPTOR_BASE_ADDRESS_LSB 2
+#define WLAN_MBOX2_DMA_TX_DESCRIPTOR_BASE_ADDRESS_MASK 0x0ffffffc
+#define WLAN_MBOX2_DMA_TX_DESCRIPTOR_BASE_ADDRESS_GET(x) (((x) & WLAN_MBOX2_DMA_TX_DESCRIPTOR_BASE_ADDRESS_MASK) >> WLAN_MBOX2_DMA_TX_DESCRIPTOR_BASE_ADDRESS_LSB)
+#define WLAN_MBOX2_DMA_TX_DESCRIPTOR_BASE_ADDRESS_SET(x) (((x) << WLAN_MBOX2_DMA_TX_DESCRIPTOR_BASE_ADDRESS_LSB) & WLAN_MBOX2_DMA_TX_DESCRIPTOR_BASE_ADDRESS_MASK)
+
+#define WLAN_MBOX2_DMA_TX_CONTROL_ADDRESS 0x00000044
+#define WLAN_MBOX2_DMA_TX_CONTROL_OFFSET 0x00000044
+#define WLAN_MBOX2_DMA_TX_CONTROL_RESUME_MSB 2
+#define WLAN_MBOX2_DMA_TX_CONTROL_RESUME_LSB 2
+#define WLAN_MBOX2_DMA_TX_CONTROL_RESUME_MASK 0x00000004
+#define WLAN_MBOX2_DMA_TX_CONTROL_RESUME_GET(x) (((x) & WLAN_MBOX2_DMA_TX_CONTROL_RESUME_MASK) >> WLAN_MBOX2_DMA_TX_CONTROL_RESUME_LSB)
+#define WLAN_MBOX2_DMA_TX_CONTROL_RESUME_SET(x) (((x) << WLAN_MBOX2_DMA_TX_CONTROL_RESUME_LSB) & WLAN_MBOX2_DMA_TX_CONTROL_RESUME_MASK)
+#define WLAN_MBOX2_DMA_TX_CONTROL_START_MSB 1
+#define WLAN_MBOX2_DMA_TX_CONTROL_START_LSB 1
+#define WLAN_MBOX2_DMA_TX_CONTROL_START_MASK 0x00000002
+#define WLAN_MBOX2_DMA_TX_CONTROL_START_GET(x) (((x) & WLAN_MBOX2_DMA_TX_CONTROL_START_MASK) >> WLAN_MBOX2_DMA_TX_CONTROL_START_LSB)
+#define WLAN_MBOX2_DMA_TX_CONTROL_START_SET(x) (((x) << WLAN_MBOX2_DMA_TX_CONTROL_START_LSB) & WLAN_MBOX2_DMA_TX_CONTROL_START_MASK)
+#define WLAN_MBOX2_DMA_TX_CONTROL_STOP_MSB 0
+#define WLAN_MBOX2_DMA_TX_CONTROL_STOP_LSB 0
+#define WLAN_MBOX2_DMA_TX_CONTROL_STOP_MASK 0x00000001
+#define WLAN_MBOX2_DMA_TX_CONTROL_STOP_GET(x) (((x) & WLAN_MBOX2_DMA_TX_CONTROL_STOP_MASK) >> WLAN_MBOX2_DMA_TX_CONTROL_STOP_LSB)
+#define WLAN_MBOX2_DMA_TX_CONTROL_STOP_SET(x) (((x) << WLAN_MBOX2_DMA_TX_CONTROL_STOP_LSB) & WLAN_MBOX2_DMA_TX_CONTROL_STOP_MASK)
+
+#define WLAN_MBOX3_DMA_RX_DESCRIPTOR_BASE_ADDRESS 0x00000048
+#define WLAN_MBOX3_DMA_RX_DESCRIPTOR_BASE_OFFSET 0x00000048
+#define WLAN_MBOX3_DMA_RX_DESCRIPTOR_BASE_ADDRESS_MSB 27
+#define WLAN_MBOX3_DMA_RX_DESCRIPTOR_BASE_ADDRESS_LSB 2
+#define WLAN_MBOX3_DMA_RX_DESCRIPTOR_BASE_ADDRESS_MASK 0x0ffffffc
+#define WLAN_MBOX3_DMA_RX_DESCRIPTOR_BASE_ADDRESS_GET(x) (((x) & WLAN_MBOX3_DMA_RX_DESCRIPTOR_BASE_ADDRESS_MASK) >> WLAN_MBOX3_DMA_RX_DESCRIPTOR_BASE_ADDRESS_LSB)
+#define WLAN_MBOX3_DMA_RX_DESCRIPTOR_BASE_ADDRESS_SET(x) (((x) << WLAN_MBOX3_DMA_RX_DESCRIPTOR_BASE_ADDRESS_LSB) & WLAN_MBOX3_DMA_RX_DESCRIPTOR_BASE_ADDRESS_MASK)
+
+#define WLAN_MBOX3_DMA_RX_CONTROL_ADDRESS 0x0000004c
+#define WLAN_MBOX3_DMA_RX_CONTROL_OFFSET 0x0000004c
+#define WLAN_MBOX3_DMA_RX_CONTROL_RESUME_MSB 2
+#define WLAN_MBOX3_DMA_RX_CONTROL_RESUME_LSB 2
+#define WLAN_MBOX3_DMA_RX_CONTROL_RESUME_MASK 0x00000004
+#define WLAN_MBOX3_DMA_RX_CONTROL_RESUME_GET(x) (((x) & WLAN_MBOX3_DMA_RX_CONTROL_RESUME_MASK) >> WLAN_MBOX3_DMA_RX_CONTROL_RESUME_LSB)
+#define WLAN_MBOX3_DMA_RX_CONTROL_RESUME_SET(x) (((x) << WLAN_MBOX3_DMA_RX_CONTROL_RESUME_LSB) & WLAN_MBOX3_DMA_RX_CONTROL_RESUME_MASK)
+#define WLAN_MBOX3_DMA_RX_CONTROL_START_MSB 1
+#define WLAN_MBOX3_DMA_RX_CONTROL_START_LSB 1
+#define WLAN_MBOX3_DMA_RX_CONTROL_START_MASK 0x00000002
+#define WLAN_MBOX3_DMA_RX_CONTROL_START_GET(x) (((x) & WLAN_MBOX3_DMA_RX_CONTROL_START_MASK) >> WLAN_MBOX3_DMA_RX_CONTROL_START_LSB)
+#define WLAN_MBOX3_DMA_RX_CONTROL_START_SET(x) (((x) << WLAN_MBOX3_DMA_RX_CONTROL_START_LSB) & WLAN_MBOX3_DMA_RX_CONTROL_START_MASK)
+#define WLAN_MBOX3_DMA_RX_CONTROL_STOP_MSB 0
+#define WLAN_MBOX3_DMA_RX_CONTROL_STOP_LSB 0
+#define WLAN_MBOX3_DMA_RX_CONTROL_STOP_MASK 0x00000001
+#define WLAN_MBOX3_DMA_RX_CONTROL_STOP_GET(x) (((x) & WLAN_MBOX3_DMA_RX_CONTROL_STOP_MASK) >> WLAN_MBOX3_DMA_RX_CONTROL_STOP_LSB)
+#define WLAN_MBOX3_DMA_RX_CONTROL_STOP_SET(x) (((x) << WLAN_MBOX3_DMA_RX_CONTROL_STOP_LSB) & WLAN_MBOX3_DMA_RX_CONTROL_STOP_MASK)
+
+#define WLAN_MBOX3_DMA_TX_DESCRIPTOR_BASE_ADDRESS 0x00000050
+#define WLAN_MBOX3_DMA_TX_DESCRIPTOR_BASE_OFFSET 0x00000050
+#define WLAN_MBOX3_DMA_TX_DESCRIPTOR_BASE_ADDRESS_MSB 27
+#define WLAN_MBOX3_DMA_TX_DESCRIPTOR_BASE_ADDRESS_LSB 2
+#define WLAN_MBOX3_DMA_TX_DESCRIPTOR_BASE_ADDRESS_MASK 0x0ffffffc
+#define WLAN_MBOX3_DMA_TX_DESCRIPTOR_BASE_ADDRESS_GET(x) (((x) & WLAN_MBOX3_DMA_TX_DESCRIPTOR_BASE_ADDRESS_MASK) >> WLAN_MBOX3_DMA_TX_DESCRIPTOR_BASE_ADDRESS_LSB)
+#define WLAN_MBOX3_DMA_TX_DESCRIPTOR_BASE_ADDRESS_SET(x) (((x) << WLAN_MBOX3_DMA_TX_DESCRIPTOR_BASE_ADDRESS_LSB) & WLAN_MBOX3_DMA_TX_DESCRIPTOR_BASE_ADDRESS_MASK)
+
+#define WLAN_MBOX3_DMA_TX_CONTROL_ADDRESS 0x00000054
+#define WLAN_MBOX3_DMA_TX_CONTROL_OFFSET 0x00000054
+#define WLAN_MBOX3_DMA_TX_CONTROL_RESUME_MSB 2
+#define WLAN_MBOX3_DMA_TX_CONTROL_RESUME_LSB 2
+#define WLAN_MBOX3_DMA_TX_CONTROL_RESUME_MASK 0x00000004
+#define WLAN_MBOX3_DMA_TX_CONTROL_RESUME_GET(x) (((x) & WLAN_MBOX3_DMA_TX_CONTROL_RESUME_MASK) >> WLAN_MBOX3_DMA_TX_CONTROL_RESUME_LSB)
+#define WLAN_MBOX3_DMA_TX_CONTROL_RESUME_SET(x) (((x) << WLAN_MBOX3_DMA_TX_CONTROL_RESUME_LSB) & WLAN_MBOX3_DMA_TX_CONTROL_RESUME_MASK)
+#define WLAN_MBOX3_DMA_TX_CONTROL_START_MSB 1
+#define WLAN_MBOX3_DMA_TX_CONTROL_START_LSB 1
+#define WLAN_MBOX3_DMA_TX_CONTROL_START_MASK 0x00000002
+#define WLAN_MBOX3_DMA_TX_CONTROL_START_GET(x) (((x) & WLAN_MBOX3_DMA_TX_CONTROL_START_MASK) >> WLAN_MBOX3_DMA_TX_CONTROL_START_LSB)
+#define WLAN_MBOX3_DMA_TX_CONTROL_START_SET(x) (((x) << WLAN_MBOX3_DMA_TX_CONTROL_START_LSB) & WLAN_MBOX3_DMA_TX_CONTROL_START_MASK)
+#define WLAN_MBOX3_DMA_TX_CONTROL_STOP_MSB 0
+#define WLAN_MBOX3_DMA_TX_CONTROL_STOP_LSB 0
+#define WLAN_MBOX3_DMA_TX_CONTROL_STOP_MASK 0x00000001
+#define WLAN_MBOX3_DMA_TX_CONTROL_STOP_GET(x) (((x) & WLAN_MBOX3_DMA_TX_CONTROL_STOP_MASK) >> WLAN_MBOX3_DMA_TX_CONTROL_STOP_LSB)
+#define WLAN_MBOX3_DMA_TX_CONTROL_STOP_SET(x) (((x) << WLAN_MBOX3_DMA_TX_CONTROL_STOP_LSB) & WLAN_MBOX3_DMA_TX_CONTROL_STOP_MASK)
+
+#define WLAN_MBOX_INT_STATUS_ADDRESS 0x00000058
+#define WLAN_MBOX_INT_STATUS_OFFSET 0x00000058
+#define WLAN_MBOX_INT_STATUS_RX_DMA_COMPLETE_MSB 31
+#define WLAN_MBOX_INT_STATUS_RX_DMA_COMPLETE_LSB 28
+#define WLAN_MBOX_INT_STATUS_RX_DMA_COMPLETE_MASK 0xf0000000
+#define WLAN_MBOX_INT_STATUS_RX_DMA_COMPLETE_GET(x) (((x) & WLAN_MBOX_INT_STATUS_RX_DMA_COMPLETE_MASK) >> WLAN_MBOX_INT_STATUS_RX_DMA_COMPLETE_LSB)
+#define WLAN_MBOX_INT_STATUS_RX_DMA_COMPLETE_SET(x) (((x) << WLAN_MBOX_INT_STATUS_RX_DMA_COMPLETE_LSB) & WLAN_MBOX_INT_STATUS_RX_DMA_COMPLETE_MASK)
+#define WLAN_MBOX_INT_STATUS_TX_DMA_EOM_COMPLETE_MSB 27
+#define WLAN_MBOX_INT_STATUS_TX_DMA_EOM_COMPLETE_LSB 24
+#define WLAN_MBOX_INT_STATUS_TX_DMA_EOM_COMPLETE_MASK 0x0f000000
+#define WLAN_MBOX_INT_STATUS_TX_DMA_EOM_COMPLETE_GET(x) (((x) & WLAN_MBOX_INT_STATUS_TX_DMA_EOM_COMPLETE_MASK) >> WLAN_MBOX_INT_STATUS_TX_DMA_EOM_COMPLETE_LSB)
+#define WLAN_MBOX_INT_STATUS_TX_DMA_EOM_COMPLETE_SET(x) (((x) << WLAN_MBOX_INT_STATUS_TX_DMA_EOM_COMPLETE_LSB) & WLAN_MBOX_INT_STATUS_TX_DMA_EOM_COMPLETE_MASK)
+#define WLAN_MBOX_INT_STATUS_TX_DMA_COMPLETE_MSB 23
+#define WLAN_MBOX_INT_STATUS_TX_DMA_COMPLETE_LSB 20
+#define WLAN_MBOX_INT_STATUS_TX_DMA_COMPLETE_MASK 0x00f00000
+#define WLAN_MBOX_INT_STATUS_TX_DMA_COMPLETE_GET(x) (((x) & WLAN_MBOX_INT_STATUS_TX_DMA_COMPLETE_MASK) >> WLAN_MBOX_INT_STATUS_TX_DMA_COMPLETE_LSB)
+#define WLAN_MBOX_INT_STATUS_TX_DMA_COMPLETE_SET(x) (((x) << WLAN_MBOX_INT_STATUS_TX_DMA_COMPLETE_LSB) & WLAN_MBOX_INT_STATUS_TX_DMA_COMPLETE_MASK)
+#define WLAN_MBOX_INT_STATUS_TX_OVERFLOW_MSB 17
+#define WLAN_MBOX_INT_STATUS_TX_OVERFLOW_LSB 17
+#define WLAN_MBOX_INT_STATUS_TX_OVERFLOW_MASK 0x00020000
+#define WLAN_MBOX_INT_STATUS_TX_OVERFLOW_GET(x) (((x) & WLAN_MBOX_INT_STATUS_TX_OVERFLOW_MASK) >> WLAN_MBOX_INT_STATUS_TX_OVERFLOW_LSB)
+#define WLAN_MBOX_INT_STATUS_TX_OVERFLOW_SET(x) (((x) << WLAN_MBOX_INT_STATUS_TX_OVERFLOW_LSB) & WLAN_MBOX_INT_STATUS_TX_OVERFLOW_MASK)
+#define WLAN_MBOX_INT_STATUS_RX_UNDERFLOW_MSB 16
+#define WLAN_MBOX_INT_STATUS_RX_UNDERFLOW_LSB 16
+#define WLAN_MBOX_INT_STATUS_RX_UNDERFLOW_MASK 0x00010000
+#define WLAN_MBOX_INT_STATUS_RX_UNDERFLOW_GET(x) (((x) & WLAN_MBOX_INT_STATUS_RX_UNDERFLOW_MASK) >> WLAN_MBOX_INT_STATUS_RX_UNDERFLOW_LSB)
+#define WLAN_MBOX_INT_STATUS_RX_UNDERFLOW_SET(x) (((x) << WLAN_MBOX_INT_STATUS_RX_UNDERFLOW_LSB) & WLAN_MBOX_INT_STATUS_RX_UNDERFLOW_MASK)
+#define WLAN_MBOX_INT_STATUS_TX_NOT_EMPTY_MSB 15
+#define WLAN_MBOX_INT_STATUS_TX_NOT_EMPTY_LSB 12
+#define WLAN_MBOX_INT_STATUS_TX_NOT_EMPTY_MASK 0x0000f000
+#define WLAN_MBOX_INT_STATUS_TX_NOT_EMPTY_GET(x) (((x) & WLAN_MBOX_INT_STATUS_TX_NOT_EMPTY_MASK) >> WLAN_MBOX_INT_STATUS_TX_NOT_EMPTY_LSB)
+#define WLAN_MBOX_INT_STATUS_TX_NOT_EMPTY_SET(x) (((x) << WLAN_MBOX_INT_STATUS_TX_NOT_EMPTY_LSB) & WLAN_MBOX_INT_STATUS_TX_NOT_EMPTY_MASK)
+#define WLAN_MBOX_INT_STATUS_RX_NOT_FULL_MSB 11
+#define WLAN_MBOX_INT_STATUS_RX_NOT_FULL_LSB 8
+#define WLAN_MBOX_INT_STATUS_RX_NOT_FULL_MASK 0x00000f00
+#define WLAN_MBOX_INT_STATUS_RX_NOT_FULL_GET(x) (((x) & WLAN_MBOX_INT_STATUS_RX_NOT_FULL_MASK) >> WLAN_MBOX_INT_STATUS_RX_NOT_FULL_LSB)
+#define WLAN_MBOX_INT_STATUS_RX_NOT_FULL_SET(x) (((x) << WLAN_MBOX_INT_STATUS_RX_NOT_FULL_LSB) & WLAN_MBOX_INT_STATUS_RX_NOT_FULL_MASK)
+#define WLAN_MBOX_INT_STATUS_HOST_MSB 7
+#define WLAN_MBOX_INT_STATUS_HOST_LSB 0
+#define WLAN_MBOX_INT_STATUS_HOST_MASK 0x000000ff
+#define WLAN_MBOX_INT_STATUS_HOST_GET(x) (((x) & WLAN_MBOX_INT_STATUS_HOST_MASK) >> WLAN_MBOX_INT_STATUS_HOST_LSB)
+#define WLAN_MBOX_INT_STATUS_HOST_SET(x) (((x) << WLAN_MBOX_INT_STATUS_HOST_LSB) & WLAN_MBOX_INT_STATUS_HOST_MASK)
+
+#define WLAN_MBOX_INT_ENABLE_ADDRESS 0x0000005c
+#define WLAN_MBOX_INT_ENABLE_OFFSET 0x0000005c
+#define WLAN_MBOX_INT_ENABLE_RX_DMA_COMPLETE_MSB 31
+#define WLAN_MBOX_INT_ENABLE_RX_DMA_COMPLETE_LSB 28
+#define WLAN_MBOX_INT_ENABLE_RX_DMA_COMPLETE_MASK 0xf0000000
+#define WLAN_MBOX_INT_ENABLE_RX_DMA_COMPLETE_GET(x) (((x) & WLAN_MBOX_INT_ENABLE_RX_DMA_COMPLETE_MASK) >> WLAN_MBOX_INT_ENABLE_RX_DMA_COMPLETE_LSB)
+#define WLAN_MBOX_INT_ENABLE_RX_DMA_COMPLETE_SET(x) (((x) << WLAN_MBOX_INT_ENABLE_RX_DMA_COMPLETE_LSB) & WLAN_MBOX_INT_ENABLE_RX_DMA_COMPLETE_MASK)
+#define WLAN_MBOX_INT_ENABLE_TX_DMA_EOM_COMPLETE_MSB 27
+#define WLAN_MBOX_INT_ENABLE_TX_DMA_EOM_COMPLETE_LSB 24
+#define WLAN_MBOX_INT_ENABLE_TX_DMA_EOM_COMPLETE_MASK 0x0f000000
+#define WLAN_MBOX_INT_ENABLE_TX_DMA_EOM_COMPLETE_GET(x) (((x) & WLAN_MBOX_INT_ENABLE_TX_DMA_EOM_COMPLETE_MASK) >> WLAN_MBOX_INT_ENABLE_TX_DMA_EOM_COMPLETE_LSB)
+#define WLAN_MBOX_INT_ENABLE_TX_DMA_EOM_COMPLETE_SET(x) (((x) << WLAN_MBOX_INT_ENABLE_TX_DMA_EOM_COMPLETE_LSB) & WLAN_MBOX_INT_ENABLE_TX_DMA_EOM_COMPLETE_MASK)
+#define WLAN_MBOX_INT_ENABLE_TX_DMA_COMPLETE_MSB 23
+#define WLAN_MBOX_INT_ENABLE_TX_DMA_COMPLETE_LSB 20
+#define WLAN_MBOX_INT_ENABLE_TX_DMA_COMPLETE_MASK 0x00f00000
+#define WLAN_MBOX_INT_ENABLE_TX_DMA_COMPLETE_GET(x) (((x) & WLAN_MBOX_INT_ENABLE_TX_DMA_COMPLETE_MASK) >> WLAN_MBOX_INT_ENABLE_TX_DMA_COMPLETE_LSB)
+#define WLAN_MBOX_INT_ENABLE_TX_DMA_COMPLETE_SET(x) (((x) << WLAN_MBOX_INT_ENABLE_TX_DMA_COMPLETE_LSB) & WLAN_MBOX_INT_ENABLE_TX_DMA_COMPLETE_MASK)
+#define WLAN_MBOX_INT_ENABLE_TX_OVERFLOW_MSB 17
+#define WLAN_MBOX_INT_ENABLE_TX_OVERFLOW_LSB 17
+#define WLAN_MBOX_INT_ENABLE_TX_OVERFLOW_MASK 0x00020000
+#define WLAN_MBOX_INT_ENABLE_TX_OVERFLOW_GET(x) (((x) & WLAN_MBOX_INT_ENABLE_TX_OVERFLOW_MASK) >> WLAN_MBOX_INT_ENABLE_TX_OVERFLOW_LSB)
+#define WLAN_MBOX_INT_ENABLE_TX_OVERFLOW_SET(x) (((x) << WLAN_MBOX_INT_ENABLE_TX_OVERFLOW_LSB) & WLAN_MBOX_INT_ENABLE_TX_OVERFLOW_MASK)
+#define WLAN_MBOX_INT_ENABLE_RX_UNDERFLOW_MSB 16
+#define WLAN_MBOX_INT_ENABLE_RX_UNDERFLOW_LSB 16
+#define WLAN_MBOX_INT_ENABLE_RX_UNDERFLOW_MASK 0x00010000
+#define WLAN_MBOX_INT_ENABLE_RX_UNDERFLOW_GET(x) (((x) & WLAN_MBOX_INT_ENABLE_RX_UNDERFLOW_MASK) >> WLAN_MBOX_INT_ENABLE_RX_UNDERFLOW_LSB)
+#define WLAN_MBOX_INT_ENABLE_RX_UNDERFLOW_SET(x) (((x) << WLAN_MBOX_INT_ENABLE_RX_UNDERFLOW_LSB) & WLAN_MBOX_INT_ENABLE_RX_UNDERFLOW_MASK)
+#define WLAN_MBOX_INT_ENABLE_TX_NOT_EMPTY_MSB 15
+#define WLAN_MBOX_INT_ENABLE_TX_NOT_EMPTY_LSB 12
+#define WLAN_MBOX_INT_ENABLE_TX_NOT_EMPTY_MASK 0x0000f000
+#define WLAN_MBOX_INT_ENABLE_TX_NOT_EMPTY_GET(x) (((x) & WLAN_MBOX_INT_ENABLE_TX_NOT_EMPTY_MASK) >> WLAN_MBOX_INT_ENABLE_TX_NOT_EMPTY_LSB)
+#define WLAN_MBOX_INT_ENABLE_TX_NOT_EMPTY_SET(x) (((x) << WLAN_MBOX_INT_ENABLE_TX_NOT_EMPTY_LSB) & WLAN_MBOX_INT_ENABLE_TX_NOT_EMPTY_MASK)
+#define WLAN_MBOX_INT_ENABLE_RX_NOT_FULL_MSB 11
+#define WLAN_MBOX_INT_ENABLE_RX_NOT_FULL_LSB 8
+#define WLAN_MBOX_INT_ENABLE_RX_NOT_FULL_MASK 0x00000f00
+#define WLAN_MBOX_INT_ENABLE_RX_NOT_FULL_GET(x) (((x) & WLAN_MBOX_INT_ENABLE_RX_NOT_FULL_MASK) >> WLAN_MBOX_INT_ENABLE_RX_NOT_FULL_LSB)
+#define WLAN_MBOX_INT_ENABLE_RX_NOT_FULL_SET(x) (((x) << WLAN_MBOX_INT_ENABLE_RX_NOT_FULL_LSB) & WLAN_MBOX_INT_ENABLE_RX_NOT_FULL_MASK)
+#define WLAN_MBOX_INT_ENABLE_HOST_MSB 7
+#define WLAN_MBOX_INT_ENABLE_HOST_LSB 0
+#define WLAN_MBOX_INT_ENABLE_HOST_MASK 0x000000ff
+#define WLAN_MBOX_INT_ENABLE_HOST_GET(x) (((x) & WLAN_MBOX_INT_ENABLE_HOST_MASK) >> WLAN_MBOX_INT_ENABLE_HOST_LSB)
+#define WLAN_MBOX_INT_ENABLE_HOST_SET(x) (((x) << WLAN_MBOX_INT_ENABLE_HOST_LSB) & WLAN_MBOX_INT_ENABLE_HOST_MASK)
+
+#define WLAN_INT_HOST_ADDRESS 0x00000060
+#define WLAN_INT_HOST_OFFSET 0x00000060
+#define WLAN_INT_HOST_VECTOR_MSB 7
+#define WLAN_INT_HOST_VECTOR_LSB 0
+#define WLAN_INT_HOST_VECTOR_MASK 0x000000ff
+#define WLAN_INT_HOST_VECTOR_GET(x) (((x) & WLAN_INT_HOST_VECTOR_MASK) >> WLAN_INT_HOST_VECTOR_LSB)
+#define WLAN_INT_HOST_VECTOR_SET(x) (((x) << WLAN_INT_HOST_VECTOR_LSB) & WLAN_INT_HOST_VECTOR_MASK)
+
+#define WLAN_LOCAL_COUNT_ADDRESS 0x00000080
+#define WLAN_LOCAL_COUNT_OFFSET 0x00000080
+#define WLAN_LOCAL_COUNT_VALUE_MSB 7
+#define WLAN_LOCAL_COUNT_VALUE_LSB 0
+#define WLAN_LOCAL_COUNT_VALUE_MASK 0x000000ff
+#define WLAN_LOCAL_COUNT_VALUE_GET(x) (((x) & WLAN_LOCAL_COUNT_VALUE_MASK) >> WLAN_LOCAL_COUNT_VALUE_LSB)
+#define WLAN_LOCAL_COUNT_VALUE_SET(x) (((x) << WLAN_LOCAL_COUNT_VALUE_LSB) & WLAN_LOCAL_COUNT_VALUE_MASK)
+
+#define WLAN_COUNT_INC_ADDRESS 0x000000a0
+#define WLAN_COUNT_INC_OFFSET 0x000000a0
+#define WLAN_COUNT_INC_VALUE_MSB 7
+#define WLAN_COUNT_INC_VALUE_LSB 0
+#define WLAN_COUNT_INC_VALUE_MASK 0x000000ff
+#define WLAN_COUNT_INC_VALUE_GET(x) (((x) & WLAN_COUNT_INC_VALUE_MASK) >> WLAN_COUNT_INC_VALUE_LSB)
+#define WLAN_COUNT_INC_VALUE_SET(x) (((x) << WLAN_COUNT_INC_VALUE_LSB) & WLAN_COUNT_INC_VALUE_MASK)
+
+#define WLAN_LOCAL_SCRATCH_ADDRESS 0x000000c0
+#define WLAN_LOCAL_SCRATCH_OFFSET 0x000000c0
+#define WLAN_LOCAL_SCRATCH_VALUE_MSB 7
+#define WLAN_LOCAL_SCRATCH_VALUE_LSB 0
+#define WLAN_LOCAL_SCRATCH_VALUE_MASK 0x000000ff
+#define WLAN_LOCAL_SCRATCH_VALUE_GET(x) (((x) & WLAN_LOCAL_SCRATCH_VALUE_MASK) >> WLAN_LOCAL_SCRATCH_VALUE_LSB)
+#define WLAN_LOCAL_SCRATCH_VALUE_SET(x) (((x) << WLAN_LOCAL_SCRATCH_VALUE_LSB) & WLAN_LOCAL_SCRATCH_VALUE_MASK)
+
+#define WLAN_USE_LOCAL_BUS_ADDRESS 0x000000e0
+#define WLAN_USE_LOCAL_BUS_OFFSET 0x000000e0
+#define WLAN_USE_LOCAL_BUS_PIN_INIT_MSB 0
+#define WLAN_USE_LOCAL_BUS_PIN_INIT_LSB 0
+#define WLAN_USE_LOCAL_BUS_PIN_INIT_MASK 0x00000001
+#define WLAN_USE_LOCAL_BUS_PIN_INIT_GET(x) (((x) & WLAN_USE_LOCAL_BUS_PIN_INIT_MASK) >> WLAN_USE_LOCAL_BUS_PIN_INIT_LSB)
+#define WLAN_USE_LOCAL_BUS_PIN_INIT_SET(x) (((x) << WLAN_USE_LOCAL_BUS_PIN_INIT_LSB) & WLAN_USE_LOCAL_BUS_PIN_INIT_MASK)
+
+#define WLAN_SDIO_CONFIG_ADDRESS 0x000000e4
+#define WLAN_SDIO_CONFIG_OFFSET 0x000000e4
+#define WLAN_SDIO_CONFIG_CCCR_IOR1_MSB 0
+#define WLAN_SDIO_CONFIG_CCCR_IOR1_LSB 0
+#define WLAN_SDIO_CONFIG_CCCR_IOR1_MASK 0x00000001
+#define WLAN_SDIO_CONFIG_CCCR_IOR1_GET(x) (((x) & WLAN_SDIO_CONFIG_CCCR_IOR1_MASK) >> WLAN_SDIO_CONFIG_CCCR_IOR1_LSB)
+#define WLAN_SDIO_CONFIG_CCCR_IOR1_SET(x) (((x) << WLAN_SDIO_CONFIG_CCCR_IOR1_LSB) & WLAN_SDIO_CONFIG_CCCR_IOR1_MASK)
+
+#define WLAN_MBOX_DEBUG_ADDRESS 0x000000e8
+#define WLAN_MBOX_DEBUG_OFFSET 0x000000e8
+#define WLAN_MBOX_DEBUG_SEL_MSB 2
+#define WLAN_MBOX_DEBUG_SEL_LSB 0
+#define WLAN_MBOX_DEBUG_SEL_MASK 0x00000007
+#define WLAN_MBOX_DEBUG_SEL_GET(x) (((x) & WLAN_MBOX_DEBUG_SEL_MASK) >> WLAN_MBOX_DEBUG_SEL_LSB)
+#define WLAN_MBOX_DEBUG_SEL_SET(x) (((x) << WLAN_MBOX_DEBUG_SEL_LSB) & WLAN_MBOX_DEBUG_SEL_MASK)
+
+#define WLAN_MBOX_FIFO_RESET_ADDRESS 0x000000ec
+#define WLAN_MBOX_FIFO_RESET_OFFSET 0x000000ec
+#define WLAN_MBOX_FIFO_RESET_INIT_MSB 0
+#define WLAN_MBOX_FIFO_RESET_INIT_LSB 0
+#define WLAN_MBOX_FIFO_RESET_INIT_MASK 0x00000001
+#define WLAN_MBOX_FIFO_RESET_INIT_GET(x) (((x) & WLAN_MBOX_FIFO_RESET_INIT_MASK) >> WLAN_MBOX_FIFO_RESET_INIT_LSB)
+#define WLAN_MBOX_FIFO_RESET_INIT_SET(x) (((x) << WLAN_MBOX_FIFO_RESET_INIT_LSB) & WLAN_MBOX_FIFO_RESET_INIT_MASK)
+
+#define WLAN_MBOX_TXFIFO_POP_ADDRESS 0x000000f0
+#define WLAN_MBOX_TXFIFO_POP_OFFSET 0x000000f0
+#define WLAN_MBOX_TXFIFO_POP_DATA_MSB 0
+#define WLAN_MBOX_TXFIFO_POP_DATA_LSB 0
+#define WLAN_MBOX_TXFIFO_POP_DATA_MASK 0x00000001
+#define WLAN_MBOX_TXFIFO_POP_DATA_GET(x) (((x) & WLAN_MBOX_TXFIFO_POP_DATA_MASK) >> WLAN_MBOX_TXFIFO_POP_DATA_LSB)
+#define WLAN_MBOX_TXFIFO_POP_DATA_SET(x) (((x) << WLAN_MBOX_TXFIFO_POP_DATA_LSB) & WLAN_MBOX_TXFIFO_POP_DATA_MASK)
+
+#define WLAN_MBOX_RXFIFO_POP_ADDRESS 0x00000100
+#define WLAN_MBOX_RXFIFO_POP_OFFSET 0x00000100
+#define WLAN_MBOX_RXFIFO_POP_DATA_MSB 0
+#define WLAN_MBOX_RXFIFO_POP_DATA_LSB 0
+#define WLAN_MBOX_RXFIFO_POP_DATA_MASK 0x00000001
+#define WLAN_MBOX_RXFIFO_POP_DATA_GET(x) (((x) & WLAN_MBOX_RXFIFO_POP_DATA_MASK) >> WLAN_MBOX_RXFIFO_POP_DATA_LSB)
+#define WLAN_MBOX_RXFIFO_POP_DATA_SET(x) (((x) << WLAN_MBOX_RXFIFO_POP_DATA_LSB) & WLAN_MBOX_RXFIFO_POP_DATA_MASK)
+
+#define WLAN_SDIO_DEBUG_ADDRESS 0x00000110
+#define WLAN_SDIO_DEBUG_OFFSET 0x00000110
+#define WLAN_SDIO_DEBUG_SEL_MSB 3
+#define WLAN_SDIO_DEBUG_SEL_LSB 0
+#define WLAN_SDIO_DEBUG_SEL_MASK 0x0000000f
+#define WLAN_SDIO_DEBUG_SEL_GET(x) (((x) & WLAN_SDIO_DEBUG_SEL_MASK) >> WLAN_SDIO_DEBUG_SEL_LSB)
+#define WLAN_SDIO_DEBUG_SEL_SET(x) (((x) << WLAN_SDIO_DEBUG_SEL_LSB) & WLAN_SDIO_DEBUG_SEL_MASK)
+
+#define WLAN_GMBOX0_DMA_RX_DESCRIPTOR_BASE_ADDRESS 0x00000114
+#define WLAN_GMBOX0_DMA_RX_DESCRIPTOR_BASE_OFFSET 0x00000114
+#define WLAN_GMBOX0_DMA_RX_DESCRIPTOR_BASE_ADDRESS_MSB 27
+#define WLAN_GMBOX0_DMA_RX_DESCRIPTOR_BASE_ADDRESS_LSB 2
+#define WLAN_GMBOX0_DMA_RX_DESCRIPTOR_BASE_ADDRESS_MASK 0x0ffffffc
+#define WLAN_GMBOX0_DMA_RX_DESCRIPTOR_BASE_ADDRESS_GET(x) (((x) & WLAN_GMBOX0_DMA_RX_DESCRIPTOR_BASE_ADDRESS_MASK) >> WLAN_GMBOX0_DMA_RX_DESCRIPTOR_BASE_ADDRESS_LSB)
+#define WLAN_GMBOX0_DMA_RX_DESCRIPTOR_BASE_ADDRESS_SET(x) (((x) << WLAN_GMBOX0_DMA_RX_DESCRIPTOR_BASE_ADDRESS_LSB) & WLAN_GMBOX0_DMA_RX_DESCRIPTOR_BASE_ADDRESS_MASK)
+
+#define WLAN_GMBOX0_DMA_RX_CONTROL_ADDRESS 0x00000118
+#define WLAN_GMBOX0_DMA_RX_CONTROL_OFFSET 0x00000118
+#define WLAN_GMBOX0_DMA_RX_CONTROL_RESUME_MSB 2
+#define WLAN_GMBOX0_DMA_RX_CONTROL_RESUME_LSB 2
+#define WLAN_GMBOX0_DMA_RX_CONTROL_RESUME_MASK 0x00000004
+#define WLAN_GMBOX0_DMA_RX_CONTROL_RESUME_GET(x) (((x) & WLAN_GMBOX0_DMA_RX_CONTROL_RESUME_MASK) >> WLAN_GMBOX0_DMA_RX_CONTROL_RESUME_LSB)
+#define WLAN_GMBOX0_DMA_RX_CONTROL_RESUME_SET(x) (((x) << WLAN_GMBOX0_DMA_RX_CONTROL_RESUME_LSB) & WLAN_GMBOX0_DMA_RX_CONTROL_RESUME_MASK)
+#define WLAN_GMBOX0_DMA_RX_CONTROL_START_MSB 1
+#define WLAN_GMBOX0_DMA_RX_CONTROL_START_LSB 1
+#define WLAN_GMBOX0_DMA_RX_CONTROL_START_MASK 0x00000002
+#define WLAN_GMBOX0_DMA_RX_CONTROL_START_GET(x) (((x) & WLAN_GMBOX0_DMA_RX_CONTROL_START_MASK) >> WLAN_GMBOX0_DMA_RX_CONTROL_START_LSB)
+#define WLAN_GMBOX0_DMA_RX_CONTROL_START_SET(x) (((x) << WLAN_GMBOX0_DMA_RX_CONTROL_START_LSB) & WLAN_GMBOX0_DMA_RX_CONTROL_START_MASK)
+#define WLAN_GMBOX0_DMA_RX_CONTROL_STOP_MSB 0
+#define WLAN_GMBOX0_DMA_RX_CONTROL_STOP_LSB 0
+#define WLAN_GMBOX0_DMA_RX_CONTROL_STOP_MASK 0x00000001
+#define WLAN_GMBOX0_DMA_RX_CONTROL_STOP_GET(x) (((x) & WLAN_GMBOX0_DMA_RX_CONTROL_STOP_MASK) >> WLAN_GMBOX0_DMA_RX_CONTROL_STOP_LSB)
+#define WLAN_GMBOX0_DMA_RX_CONTROL_STOP_SET(x) (((x) << WLAN_GMBOX0_DMA_RX_CONTROL_STOP_LSB) & WLAN_GMBOX0_DMA_RX_CONTROL_STOP_MASK)
+
+#define WLAN_GMBOX0_DMA_TX_DESCRIPTOR_BASE_ADDRESS 0x0000011c
+#define WLAN_GMBOX0_DMA_TX_DESCRIPTOR_BASE_OFFSET 0x0000011c
+#define WLAN_GMBOX0_DMA_TX_DESCRIPTOR_BASE_ADDRESS_MSB 27
+#define WLAN_GMBOX0_DMA_TX_DESCRIPTOR_BASE_ADDRESS_LSB 2
+#define WLAN_GMBOX0_DMA_TX_DESCRIPTOR_BASE_ADDRESS_MASK 0x0ffffffc
+#define WLAN_GMBOX0_DMA_TX_DESCRIPTOR_BASE_ADDRESS_GET(x) (((x) & WLAN_GMBOX0_DMA_TX_DESCRIPTOR_BASE_ADDRESS_MASK) >> WLAN_GMBOX0_DMA_TX_DESCRIPTOR_BASE_ADDRESS_LSB)
+#define WLAN_GMBOX0_DMA_TX_DESCRIPTOR_BASE_ADDRESS_SET(x) (((x) << WLAN_GMBOX0_DMA_TX_DESCRIPTOR_BASE_ADDRESS_LSB) & WLAN_GMBOX0_DMA_TX_DESCRIPTOR_BASE_ADDRESS_MASK)
+
+#define WLAN_GMBOX0_DMA_TX_CONTROL_ADDRESS 0x00000120
+#define WLAN_GMBOX0_DMA_TX_CONTROL_OFFSET 0x00000120
+#define WLAN_GMBOX0_DMA_TX_CONTROL_RESUME_MSB 2
+#define WLAN_GMBOX0_DMA_TX_CONTROL_RESUME_LSB 2
+#define WLAN_GMBOX0_DMA_TX_CONTROL_RESUME_MASK 0x00000004
+#define WLAN_GMBOX0_DMA_TX_CONTROL_RESUME_GET(x) (((x) & WLAN_GMBOX0_DMA_TX_CONTROL_RESUME_MASK) >> WLAN_GMBOX0_DMA_TX_CONTROL_RESUME_LSB)
+#define WLAN_GMBOX0_DMA_TX_CONTROL_RESUME_SET(x) (((x) << WLAN_GMBOX0_DMA_TX_CONTROL_RESUME_LSB) & WLAN_GMBOX0_DMA_TX_CONTROL_RESUME_MASK)
+#define WLAN_GMBOX0_DMA_TX_CONTROL_START_MSB 1
+#define WLAN_GMBOX0_DMA_TX_CONTROL_START_LSB 1
+#define WLAN_GMBOX0_DMA_TX_CONTROL_START_MASK 0x00000002
+#define WLAN_GMBOX0_DMA_TX_CONTROL_START_GET(x) (((x) & WLAN_GMBOX0_DMA_TX_CONTROL_START_MASK) >> WLAN_GMBOX0_DMA_TX_CONTROL_START_LSB)
+#define WLAN_GMBOX0_DMA_TX_CONTROL_START_SET(x) (((x) << WLAN_GMBOX0_DMA_TX_CONTROL_START_LSB) & WLAN_GMBOX0_DMA_TX_CONTROL_START_MASK)
+#define WLAN_GMBOX0_DMA_TX_CONTROL_STOP_MSB 0
+#define WLAN_GMBOX0_DMA_TX_CONTROL_STOP_LSB 0
+#define WLAN_GMBOX0_DMA_TX_CONTROL_STOP_MASK 0x00000001
+#define WLAN_GMBOX0_DMA_TX_CONTROL_STOP_GET(x) (((x) & WLAN_GMBOX0_DMA_TX_CONTROL_STOP_MASK) >> WLAN_GMBOX0_DMA_TX_CONTROL_STOP_LSB)
+#define WLAN_GMBOX0_DMA_TX_CONTROL_STOP_SET(x) (((x) << WLAN_GMBOX0_DMA_TX_CONTROL_STOP_LSB) & WLAN_GMBOX0_DMA_TX_CONTROL_STOP_MASK)
+
+#define WLAN_GMBOX_INT_STATUS_ADDRESS 0x00000124
+#define WLAN_GMBOX_INT_STATUS_OFFSET 0x00000124
+#define WLAN_GMBOX_INT_STATUS_TX_OVERFLOW_MSB 6
+#define WLAN_GMBOX_INT_STATUS_TX_OVERFLOW_LSB 6
+#define WLAN_GMBOX_INT_STATUS_TX_OVERFLOW_MASK 0x00000040
+#define WLAN_GMBOX_INT_STATUS_TX_OVERFLOW_GET(x) (((x) & WLAN_GMBOX_INT_STATUS_TX_OVERFLOW_MASK) >> WLAN_GMBOX_INT_STATUS_TX_OVERFLOW_LSB)
+#define WLAN_GMBOX_INT_STATUS_TX_OVERFLOW_SET(x) (((x) << WLAN_GMBOX_INT_STATUS_TX_OVERFLOW_LSB) & WLAN_GMBOX_INT_STATUS_TX_OVERFLOW_MASK)
+#define WLAN_GMBOX_INT_STATUS_RX_UNDERFLOW_MSB 5
+#define WLAN_GMBOX_INT_STATUS_RX_UNDERFLOW_LSB 5
+#define WLAN_GMBOX_INT_STATUS_RX_UNDERFLOW_MASK 0x00000020
+#define WLAN_GMBOX_INT_STATUS_RX_UNDERFLOW_GET(x) (((x) & WLAN_GMBOX_INT_STATUS_RX_UNDERFLOW_MASK) >> WLAN_GMBOX_INT_STATUS_RX_UNDERFLOW_LSB)
+#define WLAN_GMBOX_INT_STATUS_RX_UNDERFLOW_SET(x) (((x) << WLAN_GMBOX_INT_STATUS_RX_UNDERFLOW_LSB) & WLAN_GMBOX_INT_STATUS_RX_UNDERFLOW_MASK)
+#define WLAN_GMBOX_INT_STATUS_RX_DMA_COMPLETE_MSB 4
+#define WLAN_GMBOX_INT_STATUS_RX_DMA_COMPLETE_LSB 4
+#define WLAN_GMBOX_INT_STATUS_RX_DMA_COMPLETE_MASK 0x00000010
+#define WLAN_GMBOX_INT_STATUS_RX_DMA_COMPLETE_GET(x) (((x) & WLAN_GMBOX_INT_STATUS_RX_DMA_COMPLETE_MASK) >> WLAN_GMBOX_INT_STATUS_RX_DMA_COMPLETE_LSB)
+#define WLAN_GMBOX_INT_STATUS_RX_DMA_COMPLETE_SET(x) (((x) << WLAN_GMBOX_INT_STATUS_RX_DMA_COMPLETE_LSB) & WLAN_GMBOX_INT_STATUS_RX_DMA_COMPLETE_MASK)
+#define WLAN_GMBOX_INT_STATUS_TX_DMA_EOM_COMPLETE_MSB 3
+#define WLAN_GMBOX_INT_STATUS_TX_DMA_EOM_COMPLETE_LSB 3
+#define WLAN_GMBOX_INT_STATUS_TX_DMA_EOM_COMPLETE_MASK 0x00000008
+#define WLAN_GMBOX_INT_STATUS_TX_DMA_EOM_COMPLETE_GET(x) (((x) & WLAN_GMBOX_INT_STATUS_TX_DMA_EOM_COMPLETE_MASK) >> WLAN_GMBOX_INT_STATUS_TX_DMA_EOM_COMPLETE_LSB)
+#define WLAN_GMBOX_INT_STATUS_TX_DMA_EOM_COMPLETE_SET(x) (((x) << WLAN_GMBOX_INT_STATUS_TX_DMA_EOM_COMPLETE_LSB) & WLAN_GMBOX_INT_STATUS_TX_DMA_EOM_COMPLETE_MASK)
+#define WLAN_GMBOX_INT_STATUS_TX_DMA_COMPLETE_MSB 2
+#define WLAN_GMBOX_INT_STATUS_TX_DMA_COMPLETE_LSB 2
+#define WLAN_GMBOX_INT_STATUS_TX_DMA_COMPLETE_MASK 0x00000004
+#define WLAN_GMBOX_INT_STATUS_TX_DMA_COMPLETE_GET(x) (((x) & WLAN_GMBOX_INT_STATUS_TX_DMA_COMPLETE_MASK) >> WLAN_GMBOX_INT_STATUS_TX_DMA_COMPLETE_LSB)
+#define WLAN_GMBOX_INT_STATUS_TX_DMA_COMPLETE_SET(x) (((x) << WLAN_GMBOX_INT_STATUS_TX_DMA_COMPLETE_LSB) & WLAN_GMBOX_INT_STATUS_TX_DMA_COMPLETE_MASK)
+#define WLAN_GMBOX_INT_STATUS_TX_NOT_EMPTY_MSB 1
+#define WLAN_GMBOX_INT_STATUS_TX_NOT_EMPTY_LSB 1
+#define WLAN_GMBOX_INT_STATUS_TX_NOT_EMPTY_MASK 0x00000002
+#define WLAN_GMBOX_INT_STATUS_TX_NOT_EMPTY_GET(x) (((x) & WLAN_GMBOX_INT_STATUS_TX_NOT_EMPTY_MASK) >> WLAN_GMBOX_INT_STATUS_TX_NOT_EMPTY_LSB)
+#define WLAN_GMBOX_INT_STATUS_TX_NOT_EMPTY_SET(x) (((x) << WLAN_GMBOX_INT_STATUS_TX_NOT_EMPTY_LSB) & WLAN_GMBOX_INT_STATUS_TX_NOT_EMPTY_MASK)
+#define WLAN_GMBOX_INT_STATUS_RX_NOT_FULL_MSB 0
+#define WLAN_GMBOX_INT_STATUS_RX_NOT_FULL_LSB 0
+#define WLAN_GMBOX_INT_STATUS_RX_NOT_FULL_MASK 0x00000001
+#define WLAN_GMBOX_INT_STATUS_RX_NOT_FULL_GET(x) (((x) & WLAN_GMBOX_INT_STATUS_RX_NOT_FULL_MASK) >> WLAN_GMBOX_INT_STATUS_RX_NOT_FULL_LSB)
+#define WLAN_GMBOX_INT_STATUS_RX_NOT_FULL_SET(x) (((x) << WLAN_GMBOX_INT_STATUS_RX_NOT_FULL_LSB) & WLAN_GMBOX_INT_STATUS_RX_NOT_FULL_MASK)
+
+#define WLAN_GMBOX_INT_ENABLE_ADDRESS 0x00000128
+#define WLAN_GMBOX_INT_ENABLE_OFFSET 0x00000128
+#define WLAN_GMBOX_INT_ENABLE_TX_OVERFLOW_MSB 6
+#define WLAN_GMBOX_INT_ENABLE_TX_OVERFLOW_LSB 6
+#define WLAN_GMBOX_INT_ENABLE_TX_OVERFLOW_MASK 0x00000040
+#define WLAN_GMBOX_INT_ENABLE_TX_OVERFLOW_GET(x) (((x) & WLAN_GMBOX_INT_ENABLE_TX_OVERFLOW_MASK) >> WLAN_GMBOX_INT_ENABLE_TX_OVERFLOW_LSB)
+#define WLAN_GMBOX_INT_ENABLE_TX_OVERFLOW_SET(x) (((x) << WLAN_GMBOX_INT_ENABLE_TX_OVERFLOW_LSB) & WLAN_GMBOX_INT_ENABLE_TX_OVERFLOW_MASK)
+#define WLAN_GMBOX_INT_ENABLE_RX_UNDERFLOW_MSB 5
+#define WLAN_GMBOX_INT_ENABLE_RX_UNDERFLOW_LSB 5
+#define WLAN_GMBOX_INT_ENABLE_RX_UNDERFLOW_MASK 0x00000020
+#define WLAN_GMBOX_INT_ENABLE_RX_UNDERFLOW_GET(x) (((x) & WLAN_GMBOX_INT_ENABLE_RX_UNDERFLOW_MASK) >> WLAN_GMBOX_INT_ENABLE_RX_UNDERFLOW_LSB)
+#define WLAN_GMBOX_INT_ENABLE_RX_UNDERFLOW_SET(x) (((x) << WLAN_GMBOX_INT_ENABLE_RX_UNDERFLOW_LSB) & WLAN_GMBOX_INT_ENABLE_RX_UNDERFLOW_MASK)
+#define WLAN_GMBOX_INT_ENABLE_RX_DMA_COMPLETE_MSB 4
+#define WLAN_GMBOX_INT_ENABLE_RX_DMA_COMPLETE_LSB 4
+#define WLAN_GMBOX_INT_ENABLE_RX_DMA_COMPLETE_MASK 0x00000010
+#define WLAN_GMBOX_INT_ENABLE_RX_DMA_COMPLETE_GET(x) (((x) & WLAN_GMBOX_INT_ENABLE_RX_DMA_COMPLETE_MASK) >> WLAN_GMBOX_INT_ENABLE_RX_DMA_COMPLETE_LSB)
+#define WLAN_GMBOX_INT_ENABLE_RX_DMA_COMPLETE_SET(x) (((x) << WLAN_GMBOX_INT_ENABLE_RX_DMA_COMPLETE_LSB) & WLAN_GMBOX_INT_ENABLE_RX_DMA_COMPLETE_MASK)
+#define WLAN_GMBOX_INT_ENABLE_TX_DMA_EOM_COMPLETE_MSB 3
+#define WLAN_GMBOX_INT_ENABLE_TX_DMA_EOM_COMPLETE_LSB 3
+#define WLAN_GMBOX_INT_ENABLE_TX_DMA_EOM_COMPLETE_MASK 0x00000008
+#define WLAN_GMBOX_INT_ENABLE_TX_DMA_EOM_COMPLETE_GET(x) (((x) & WLAN_GMBOX_INT_ENABLE_TX_DMA_EOM_COMPLETE_MASK) >> WLAN_GMBOX_INT_ENABLE_TX_DMA_EOM_COMPLETE_LSB)
+#define WLAN_GMBOX_INT_ENABLE_TX_DMA_EOM_COMPLETE_SET(x) (((x) << WLAN_GMBOX_INT_ENABLE_TX_DMA_EOM_COMPLETE_LSB) & WLAN_GMBOX_INT_ENABLE_TX_DMA_EOM_COMPLETE_MASK)
+#define WLAN_GMBOX_INT_ENABLE_TX_DMA_COMPLETE_MSB 2
+#define WLAN_GMBOX_INT_ENABLE_TX_DMA_COMPLETE_LSB 2
+#define WLAN_GMBOX_INT_ENABLE_TX_DMA_COMPLETE_MASK 0x00000004
+#define WLAN_GMBOX_INT_ENABLE_TX_DMA_COMPLETE_GET(x) (((x) & WLAN_GMBOX_INT_ENABLE_TX_DMA_COMPLETE_MASK) >> WLAN_GMBOX_INT_ENABLE_TX_DMA_COMPLETE_LSB)
+#define WLAN_GMBOX_INT_ENABLE_TX_DMA_COMPLETE_SET(x) (((x) << WLAN_GMBOX_INT_ENABLE_TX_DMA_COMPLETE_LSB) & WLAN_GMBOX_INT_ENABLE_TX_DMA_COMPLETE_MASK)
+#define WLAN_GMBOX_INT_ENABLE_TX_NOT_EMPTY_MSB 1
+#define WLAN_GMBOX_INT_ENABLE_TX_NOT_EMPTY_LSB 1
+#define WLAN_GMBOX_INT_ENABLE_TX_NOT_EMPTY_MASK 0x00000002
+#define WLAN_GMBOX_INT_ENABLE_TX_NOT_EMPTY_GET(x) (((x) & WLAN_GMBOX_INT_ENABLE_TX_NOT_EMPTY_MASK) >> WLAN_GMBOX_INT_ENABLE_TX_NOT_EMPTY_LSB)
+#define WLAN_GMBOX_INT_ENABLE_TX_NOT_EMPTY_SET(x) (((x) << WLAN_GMBOX_INT_ENABLE_TX_NOT_EMPTY_LSB) & WLAN_GMBOX_INT_ENABLE_TX_NOT_EMPTY_MASK)
+#define WLAN_GMBOX_INT_ENABLE_RX_NOT_FULL_MSB 0
+#define WLAN_GMBOX_INT_ENABLE_RX_NOT_FULL_LSB 0
+#define WLAN_GMBOX_INT_ENABLE_RX_NOT_FULL_MASK 0x00000001
+#define WLAN_GMBOX_INT_ENABLE_RX_NOT_FULL_GET(x) (((x) & WLAN_GMBOX_INT_ENABLE_RX_NOT_FULL_MASK) >> WLAN_GMBOX_INT_ENABLE_RX_NOT_FULL_LSB)
+#define WLAN_GMBOX_INT_ENABLE_RX_NOT_FULL_SET(x) (((x) << WLAN_GMBOX_INT_ENABLE_RX_NOT_FULL_LSB) & WLAN_GMBOX_INT_ENABLE_RX_NOT_FULL_MASK)
+
+#define WLAN_HOST_IF_WINDOW_ADDRESS 0x00002000
+#define WLAN_HOST_IF_WINDOW_OFFSET 0x00002000
+#define WLAN_HOST_IF_WINDOW_DATA_MSB 7
+#define WLAN_HOST_IF_WINDOW_DATA_LSB 0
+#define WLAN_HOST_IF_WINDOW_DATA_MASK 0x000000ff
+#define WLAN_HOST_IF_WINDOW_DATA_GET(x) (((x) & WLAN_HOST_IF_WINDOW_DATA_MASK) >> WLAN_HOST_IF_WINDOW_DATA_LSB)
+#define WLAN_HOST_IF_WINDOW_DATA_SET(x) (((x) << WLAN_HOST_IF_WINDOW_DATA_LSB) & WLAN_HOST_IF_WINDOW_DATA_MASK)
+
+
+#ifndef __ASSEMBLER__
+
+typedef struct mbox_wlan_reg_reg_s {
+ volatile unsigned int wlan_mbox_fifo[4];
+ volatile unsigned int wlan_mbox_fifo_status;
+ volatile unsigned int wlan_mbox_dma_policy;
+ volatile unsigned int wlan_mbox0_dma_rx_descriptor_base;
+ volatile unsigned int wlan_mbox0_dma_rx_control;
+ volatile unsigned int wlan_mbox0_dma_tx_descriptor_base;
+ volatile unsigned int wlan_mbox0_dma_tx_control;
+ volatile unsigned int wlan_mbox1_dma_rx_descriptor_base;
+ volatile unsigned int wlan_mbox1_dma_rx_control;
+ volatile unsigned int wlan_mbox1_dma_tx_descriptor_base;
+ volatile unsigned int wlan_mbox1_dma_tx_control;
+ volatile unsigned int wlan_mbox2_dma_rx_descriptor_base;
+ volatile unsigned int wlan_mbox2_dma_rx_control;
+ volatile unsigned int wlan_mbox2_dma_tx_descriptor_base;
+ volatile unsigned int wlan_mbox2_dma_tx_control;
+ volatile unsigned int wlan_mbox3_dma_rx_descriptor_base;
+ volatile unsigned int wlan_mbox3_dma_rx_control;
+ volatile unsigned int wlan_mbox3_dma_tx_descriptor_base;
+ volatile unsigned int wlan_mbox3_dma_tx_control;
+ volatile unsigned int wlan_mbox_int_status;
+ volatile unsigned int wlan_mbox_int_enable;
+ volatile unsigned int wlan_int_host;
+ unsigned char pad0[28]; /* pad to 0x80 */
+ volatile unsigned int wlan_local_count[8];
+ volatile unsigned int wlan_count_inc[8];
+ volatile unsigned int wlan_local_scratch[8];
+ volatile unsigned int wlan_use_local_bus;
+ volatile unsigned int wlan_sdio_config;
+ volatile unsigned int wlan_mbox_debug;
+ volatile unsigned int wlan_mbox_fifo_reset;
+ volatile unsigned int wlan_mbox_txfifo_pop[4];
+ volatile unsigned int wlan_mbox_rxfifo_pop[4];
+ volatile unsigned int wlan_sdio_debug;
+ volatile unsigned int wlan_gmbox0_dma_rx_descriptor_base;
+ volatile unsigned int wlan_gmbox0_dma_rx_control;
+ volatile unsigned int wlan_gmbox0_dma_tx_descriptor_base;
+ volatile unsigned int wlan_gmbox0_dma_tx_control;
+ volatile unsigned int wlan_gmbox_int_status;
+ volatile unsigned int wlan_gmbox_int_enable;
+ unsigned char pad1[7892]; /* pad to 0x2000 */
+ volatile unsigned int wlan_host_if_window[2048];
+} mbox_wlan_reg_reg_t;
+
+#endif /* __ASSEMBLER__ */
+
+#endif /* _MBOX_WLAN_REG_H_ */
diff --git a/drivers/staging/ath6kl/include/common/AR6002/hw4.0/hw/rdma_reg.h b/drivers/staging/ath6kl/include/common/AR6002/hw4.0/hw/rdma_reg.h
new file mode 100644
index 000000000000..56ffda5b1a30
--- /dev/null
+++ b/drivers/staging/ath6kl/include/common/AR6002/hw4.0/hw/rdma_reg.h
@@ -0,0 +1,564 @@
+// ------------------------------------------------------------------
+// Copyright (c) 2004-2010 Atheros Corporation. All rights reserved.
+//
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+//
+//
+// ------------------------------------------------------------------
+//===================================================================
+// Author(s): ="Atheros"
+//===================================================================
+
+
+#ifndef _RDMA_REG_REG_H_
+#define _RDMA_REG_REG_H_
+
+#define DMA_CONFIG_ADDRESS 0x00000000
+#define DMA_CONFIG_OFFSET 0x00000000
+#define DMA_CONFIG_WLBB_PWD_EN_MSB 4
+#define DMA_CONFIG_WLBB_PWD_EN_LSB 4
+#define DMA_CONFIG_WLBB_PWD_EN_MASK 0x00000010
+#define DMA_CONFIG_WLBB_PWD_EN_GET(x) (((x) & DMA_CONFIG_WLBB_PWD_EN_MASK) >> DMA_CONFIG_WLBB_PWD_EN_LSB)
+#define DMA_CONFIG_WLBB_PWD_EN_SET(x) (((x) << DMA_CONFIG_WLBB_PWD_EN_LSB) & DMA_CONFIG_WLBB_PWD_EN_MASK)
+#define DMA_CONFIG_WLMAC_PWD_EN_MSB 3
+#define DMA_CONFIG_WLMAC_PWD_EN_LSB 3
+#define DMA_CONFIG_WLMAC_PWD_EN_MASK 0x00000008
+#define DMA_CONFIG_WLMAC_PWD_EN_GET(x) (((x) & DMA_CONFIG_WLMAC_PWD_EN_MASK) >> DMA_CONFIG_WLMAC_PWD_EN_LSB)
+#define DMA_CONFIG_WLMAC_PWD_EN_SET(x) (((x) << DMA_CONFIG_WLMAC_PWD_EN_LSB) & DMA_CONFIG_WLMAC_PWD_EN_MASK)
+#define DMA_CONFIG_ENABLE_RETENTION_MSB 2
+#define DMA_CONFIG_ENABLE_RETENTION_LSB 2
+#define DMA_CONFIG_ENABLE_RETENTION_MASK 0x00000004
+#define DMA_CONFIG_ENABLE_RETENTION_GET(x) (((x) & DMA_CONFIG_ENABLE_RETENTION_MASK) >> DMA_CONFIG_ENABLE_RETENTION_LSB)
+#define DMA_CONFIG_ENABLE_RETENTION_SET(x) (((x) << DMA_CONFIG_ENABLE_RETENTION_LSB) & DMA_CONFIG_ENABLE_RETENTION_MASK)
+#define DMA_CONFIG_RTC_PRIORITY_MSB 1
+#define DMA_CONFIG_RTC_PRIORITY_LSB 1
+#define DMA_CONFIG_RTC_PRIORITY_MASK 0x00000002
+#define DMA_CONFIG_RTC_PRIORITY_GET(x) (((x) & DMA_CONFIG_RTC_PRIORITY_MASK) >> DMA_CONFIG_RTC_PRIORITY_LSB)
+#define DMA_CONFIG_RTC_PRIORITY_SET(x) (((x) << DMA_CONFIG_RTC_PRIORITY_LSB) & DMA_CONFIG_RTC_PRIORITY_MASK)
+#define DMA_CONFIG_DMA_TYPE_MSB 0
+#define DMA_CONFIG_DMA_TYPE_LSB 0
+#define DMA_CONFIG_DMA_TYPE_MASK 0x00000001
+#define DMA_CONFIG_DMA_TYPE_GET(x) (((x) & DMA_CONFIG_DMA_TYPE_MASK) >> DMA_CONFIG_DMA_TYPE_LSB)
+#define DMA_CONFIG_DMA_TYPE_SET(x) (((x) << DMA_CONFIG_DMA_TYPE_LSB) & DMA_CONFIG_DMA_TYPE_MASK)
+
+#define DMA_CONTROL_ADDRESS 0x00000004
+#define DMA_CONTROL_OFFSET 0x00000004
+#define DMA_CONTROL_START_MSB 1
+#define DMA_CONTROL_START_LSB 1
+#define DMA_CONTROL_START_MASK 0x00000002
+#define DMA_CONTROL_START_GET(x) (((x) & DMA_CONTROL_START_MASK) >> DMA_CONTROL_START_LSB)
+#define DMA_CONTROL_START_SET(x) (((x) << DMA_CONTROL_START_LSB) & DMA_CONTROL_START_MASK)
+#define DMA_CONTROL_STOP_MSB 0
+#define DMA_CONTROL_STOP_LSB 0
+#define DMA_CONTROL_STOP_MASK 0x00000001
+#define DMA_CONTROL_STOP_GET(x) (((x) & DMA_CONTROL_STOP_MASK) >> DMA_CONTROL_STOP_LSB)
+#define DMA_CONTROL_STOP_SET(x) (((x) << DMA_CONTROL_STOP_LSB) & DMA_CONTROL_STOP_MASK)
+
+#define DMA_SRC_ADDRESS 0x00000008
+#define DMA_SRC_OFFSET 0x00000008
+#define DMA_SRC_ADDR_MSB 31
+#define DMA_SRC_ADDR_LSB 2
+#define DMA_SRC_ADDR_MASK 0xfffffffc
+#define DMA_SRC_ADDR_GET(x) (((x) & DMA_SRC_ADDR_MASK) >> DMA_SRC_ADDR_LSB)
+#define DMA_SRC_ADDR_SET(x) (((x) << DMA_SRC_ADDR_LSB) & DMA_SRC_ADDR_MASK)
+
+#define DMA_DEST_ADDRESS 0x0000000c
+#define DMA_DEST_OFFSET 0x0000000c
+#define DMA_DEST_ADDR_MSB 31
+#define DMA_DEST_ADDR_LSB 2
+#define DMA_DEST_ADDR_MASK 0xfffffffc
+#define DMA_DEST_ADDR_GET(x) (((x) & DMA_DEST_ADDR_MASK) >> DMA_DEST_ADDR_LSB)
+#define DMA_DEST_ADDR_SET(x) (((x) << DMA_DEST_ADDR_LSB) & DMA_DEST_ADDR_MASK)
+
+#define DMA_LENGTH_ADDRESS 0x00000010
+#define DMA_LENGTH_OFFSET 0x00000010
+#define DMA_LENGTH_WORDS_MSB 11
+#define DMA_LENGTH_WORDS_LSB 0
+#define DMA_LENGTH_WORDS_MASK 0x00000fff
+#define DMA_LENGTH_WORDS_GET(x) (((x) & DMA_LENGTH_WORDS_MASK) >> DMA_LENGTH_WORDS_LSB)
+#define DMA_LENGTH_WORDS_SET(x) (((x) << DMA_LENGTH_WORDS_LSB) & DMA_LENGTH_WORDS_MASK)
+
+#define VMC_BASE_ADDRESS 0x00000014
+#define VMC_BASE_OFFSET 0x00000014
+#define VMC_BASE_ADDR_MSB 31
+#define VMC_BASE_ADDR_LSB 2
+#define VMC_BASE_ADDR_MASK 0xfffffffc
+#define VMC_BASE_ADDR_GET(x) (((x) & VMC_BASE_ADDR_MASK) >> VMC_BASE_ADDR_LSB)
+#define VMC_BASE_ADDR_SET(x) (((x) << VMC_BASE_ADDR_LSB) & VMC_BASE_ADDR_MASK)
+
+#define INDIRECT_REG_ADDRESS 0x00000018
+#define INDIRECT_REG_OFFSET 0x00000018
+#define INDIRECT_REG_ID_MSB 31
+#define INDIRECT_REG_ID_LSB 2
+#define INDIRECT_REG_ID_MASK 0xfffffffc
+#define INDIRECT_REG_ID_GET(x) (((x) & INDIRECT_REG_ID_MASK) >> INDIRECT_REG_ID_LSB)
+#define INDIRECT_REG_ID_SET(x) (((x) << INDIRECT_REG_ID_LSB) & INDIRECT_REG_ID_MASK)
+
+#define INDIRECT_RETURN_ADDRESS 0x0000001c
+#define INDIRECT_RETURN_OFFSET 0x0000001c
+#define INDIRECT_RETURN_ADDR_MSB 31
+#define INDIRECT_RETURN_ADDR_LSB 2
+#define INDIRECT_RETURN_ADDR_MASK 0xfffffffc
+#define INDIRECT_RETURN_ADDR_GET(x) (((x) & INDIRECT_RETURN_ADDR_MASK) >> INDIRECT_RETURN_ADDR_LSB)
+#define INDIRECT_RETURN_ADDR_SET(x) (((x) << INDIRECT_RETURN_ADDR_LSB) & INDIRECT_RETURN_ADDR_MASK)
+
+#define RDMA_REGION_0__ADDRESS 0x00000020
+#define RDMA_REGION_0__OFFSET 0x00000020
+#define RDMA_REGION_0__ADDR_MSB 31
+#define RDMA_REGION_0__ADDR_LSB 13
+#define RDMA_REGION_0__ADDR_MASK 0xffffe000
+#define RDMA_REGION_0__ADDR_GET(x) (((x) & RDMA_REGION_0__ADDR_MASK) >> RDMA_REGION_0__ADDR_LSB)
+#define RDMA_REGION_0__ADDR_SET(x) (((x) << RDMA_REGION_0__ADDR_LSB) & RDMA_REGION_0__ADDR_MASK)
+#define RDMA_REGION_0__LENGTH_MSB 12
+#define RDMA_REGION_0__LENGTH_LSB 2
+#define RDMA_REGION_0__LENGTH_MASK 0x00001ffc
+#define RDMA_REGION_0__LENGTH_GET(x) (((x) & RDMA_REGION_0__LENGTH_MASK) >> RDMA_REGION_0__LENGTH_LSB)
+#define RDMA_REGION_0__LENGTH_SET(x) (((x) << RDMA_REGION_0__LENGTH_LSB) & RDMA_REGION_0__LENGTH_MASK)
+#define RDMA_REGION_0__INDI_MSB 1
+#define RDMA_REGION_0__INDI_LSB 1
+#define RDMA_REGION_0__INDI_MASK 0x00000002
+#define RDMA_REGION_0__INDI_GET(x) (((x) & RDMA_REGION_0__INDI_MASK) >> RDMA_REGION_0__INDI_LSB)
+#define RDMA_REGION_0__INDI_SET(x) (((x) << RDMA_REGION_0__INDI_LSB) & RDMA_REGION_0__INDI_MASK)
+#define RDMA_REGION_0__NEXT_MSB 0
+#define RDMA_REGION_0__NEXT_LSB 0
+#define RDMA_REGION_0__NEXT_MASK 0x00000001
+#define RDMA_REGION_0__NEXT_GET(x) (((x) & RDMA_REGION_0__NEXT_MASK) >> RDMA_REGION_0__NEXT_LSB)
+#define RDMA_REGION_0__NEXT_SET(x) (((x) << RDMA_REGION_0__NEXT_LSB) & RDMA_REGION_0__NEXT_MASK)
+
+#define RDMA_REGION_1__ADDRESS 0x00000024
+#define RDMA_REGION_1__OFFSET 0x00000024
+#define RDMA_REGION_1__ADDR_MSB 31
+#define RDMA_REGION_1__ADDR_LSB 13
+#define RDMA_REGION_1__ADDR_MASK 0xffffe000
+#define RDMA_REGION_1__ADDR_GET(x) (((x) & RDMA_REGION_1__ADDR_MASK) >> RDMA_REGION_1__ADDR_LSB)
+#define RDMA_REGION_1__ADDR_SET(x) (((x) << RDMA_REGION_1__ADDR_LSB) & RDMA_REGION_1__ADDR_MASK)
+#define RDMA_REGION_1__LENGTH_MSB 12
+#define RDMA_REGION_1__LENGTH_LSB 2
+#define RDMA_REGION_1__LENGTH_MASK 0x00001ffc
+#define RDMA_REGION_1__LENGTH_GET(x) (((x) & RDMA_REGION_1__LENGTH_MASK) >> RDMA_REGION_1__LENGTH_LSB)
+#define RDMA_REGION_1__LENGTH_SET(x) (((x) << RDMA_REGION_1__LENGTH_LSB) & RDMA_REGION_1__LENGTH_MASK)
+#define RDMA_REGION_1__INDI_MSB 1
+#define RDMA_REGION_1__INDI_LSB 1
+#define RDMA_REGION_1__INDI_MASK 0x00000002
+#define RDMA_REGION_1__INDI_GET(x) (((x) & RDMA_REGION_1__INDI_MASK) >> RDMA_REGION_1__INDI_LSB)
+#define RDMA_REGION_1__INDI_SET(x) (((x) << RDMA_REGION_1__INDI_LSB) & RDMA_REGION_1__INDI_MASK)
+#define RDMA_REGION_1__NEXT_MSB 0
+#define RDMA_REGION_1__NEXT_LSB 0
+#define RDMA_REGION_1__NEXT_MASK 0x00000001
+#define RDMA_REGION_1__NEXT_GET(x) (((x) & RDMA_REGION_1__NEXT_MASK) >> RDMA_REGION_1__NEXT_LSB)
+#define RDMA_REGION_1__NEXT_SET(x) (((x) << RDMA_REGION_1__NEXT_LSB) & RDMA_REGION_1__NEXT_MASK)
+
+#define RDMA_REGION_2__ADDRESS 0x00000028
+#define RDMA_REGION_2__OFFSET 0x00000028
+#define RDMA_REGION_2__ADDR_MSB 31
+#define RDMA_REGION_2__ADDR_LSB 13
+#define RDMA_REGION_2__ADDR_MASK 0xffffe000
+#define RDMA_REGION_2__ADDR_GET(x) (((x) & RDMA_REGION_2__ADDR_MASK) >> RDMA_REGION_2__ADDR_LSB)
+#define RDMA_REGION_2__ADDR_SET(x) (((x) << RDMA_REGION_2__ADDR_LSB) & RDMA_REGION_2__ADDR_MASK)
+#define RDMA_REGION_2__LENGTH_MSB 12
+#define RDMA_REGION_2__LENGTH_LSB 2
+#define RDMA_REGION_2__LENGTH_MASK 0x00001ffc
+#define RDMA_REGION_2__LENGTH_GET(x) (((x) & RDMA_REGION_2__LENGTH_MASK) >> RDMA_REGION_2__LENGTH_LSB)
+#define RDMA_REGION_2__LENGTH_SET(x) (((x) << RDMA_REGION_2__LENGTH_LSB) & RDMA_REGION_2__LENGTH_MASK)
+#define RDMA_REGION_2__INDI_MSB 1
+#define RDMA_REGION_2__INDI_LSB 1
+#define RDMA_REGION_2__INDI_MASK 0x00000002
+#define RDMA_REGION_2__INDI_GET(x) (((x) & RDMA_REGION_2__INDI_MASK) >> RDMA_REGION_2__INDI_LSB)
+#define RDMA_REGION_2__INDI_SET(x) (((x) << RDMA_REGION_2__INDI_LSB) & RDMA_REGION_2__INDI_MASK)
+#define RDMA_REGION_2__NEXT_MSB 0
+#define RDMA_REGION_2__NEXT_LSB 0
+#define RDMA_REGION_2__NEXT_MASK 0x00000001
+#define RDMA_REGION_2__NEXT_GET(x) (((x) & RDMA_REGION_2__NEXT_MASK) >> RDMA_REGION_2__NEXT_LSB)
+#define RDMA_REGION_2__NEXT_SET(x) (((x) << RDMA_REGION_2__NEXT_LSB) & RDMA_REGION_2__NEXT_MASK)
+
+#define RDMA_REGION_3__ADDRESS 0x0000002c
+#define RDMA_REGION_3__OFFSET 0x0000002c
+#define RDMA_REGION_3__ADDR_MSB 31
+#define RDMA_REGION_3__ADDR_LSB 13
+#define RDMA_REGION_3__ADDR_MASK 0xffffe000
+#define RDMA_REGION_3__ADDR_GET(x) (((x) & RDMA_REGION_3__ADDR_MASK) >> RDMA_REGION_3__ADDR_LSB)
+#define RDMA_REGION_3__ADDR_SET(x) (((x) << RDMA_REGION_3__ADDR_LSB) & RDMA_REGION_3__ADDR_MASK)
+#define RDMA_REGION_3__LENGTH_MSB 12
+#define RDMA_REGION_3__LENGTH_LSB 2
+#define RDMA_REGION_3__LENGTH_MASK 0x00001ffc
+#define RDMA_REGION_3__LENGTH_GET(x) (((x) & RDMA_REGION_3__LENGTH_MASK) >> RDMA_REGION_3__LENGTH_LSB)
+#define RDMA_REGION_3__LENGTH_SET(x) (((x) << RDMA_REGION_3__LENGTH_LSB) & RDMA_REGION_3__LENGTH_MASK)
+#define RDMA_REGION_3__INDI_MSB 1
+#define RDMA_REGION_3__INDI_LSB 1
+#define RDMA_REGION_3__INDI_MASK 0x00000002
+#define RDMA_REGION_3__INDI_GET(x) (((x) & RDMA_REGION_3__INDI_MASK) >> RDMA_REGION_3__INDI_LSB)
+#define RDMA_REGION_3__INDI_SET(x) (((x) << RDMA_REGION_3__INDI_LSB) & RDMA_REGION_3__INDI_MASK)
+#define RDMA_REGION_3__NEXT_MSB 0
+#define RDMA_REGION_3__NEXT_LSB 0
+#define RDMA_REGION_3__NEXT_MASK 0x00000001
+#define RDMA_REGION_3__NEXT_GET(x) (((x) & RDMA_REGION_3__NEXT_MASK) >> RDMA_REGION_3__NEXT_LSB)
+#define RDMA_REGION_3__NEXT_SET(x) (((x) << RDMA_REGION_3__NEXT_LSB) & RDMA_REGION_3__NEXT_MASK)
+
+#define RDMA_REGION_4__ADDRESS 0x00000030
+#define RDMA_REGION_4__OFFSET 0x00000030
+#define RDMA_REGION_4__ADDR_MSB 31
+#define RDMA_REGION_4__ADDR_LSB 13
+#define RDMA_REGION_4__ADDR_MASK 0xffffe000
+#define RDMA_REGION_4__ADDR_GET(x) (((x) & RDMA_REGION_4__ADDR_MASK) >> RDMA_REGION_4__ADDR_LSB)
+#define RDMA_REGION_4__ADDR_SET(x) (((x) << RDMA_REGION_4__ADDR_LSB) & RDMA_REGION_4__ADDR_MASK)
+#define RDMA_REGION_4__LENGTH_MSB 12
+#define RDMA_REGION_4__LENGTH_LSB 2
+#define RDMA_REGION_4__LENGTH_MASK 0x00001ffc
+#define RDMA_REGION_4__LENGTH_GET(x) (((x) & RDMA_REGION_4__LENGTH_MASK) >> RDMA_REGION_4__LENGTH_LSB)
+#define RDMA_REGION_4__LENGTH_SET(x) (((x) << RDMA_REGION_4__LENGTH_LSB) & RDMA_REGION_4__LENGTH_MASK)
+#define RDMA_REGION_4__INDI_MSB 1
+#define RDMA_REGION_4__INDI_LSB 1
+#define RDMA_REGION_4__INDI_MASK 0x00000002
+#define RDMA_REGION_4__INDI_GET(x) (((x) & RDMA_REGION_4__INDI_MASK) >> RDMA_REGION_4__INDI_LSB)
+#define RDMA_REGION_4__INDI_SET(x) (((x) << RDMA_REGION_4__INDI_LSB) & RDMA_REGION_4__INDI_MASK)
+#define RDMA_REGION_4__NEXT_MSB 0
+#define RDMA_REGION_4__NEXT_LSB 0
+#define RDMA_REGION_4__NEXT_MASK 0x00000001
+#define RDMA_REGION_4__NEXT_GET(x) (((x) & RDMA_REGION_4__NEXT_MASK) >> RDMA_REGION_4__NEXT_LSB)
+#define RDMA_REGION_4__NEXT_SET(x) (((x) << RDMA_REGION_4__NEXT_LSB) & RDMA_REGION_4__NEXT_MASK)
+
+#define RDMA_REGION_5__ADDRESS 0x00000034
+#define RDMA_REGION_5__OFFSET 0x00000034
+#define RDMA_REGION_5__ADDR_MSB 31
+#define RDMA_REGION_5__ADDR_LSB 13
+#define RDMA_REGION_5__ADDR_MASK 0xffffe000
+#define RDMA_REGION_5__ADDR_GET(x) (((x) & RDMA_REGION_5__ADDR_MASK) >> RDMA_REGION_5__ADDR_LSB)
+#define RDMA_REGION_5__ADDR_SET(x) (((x) << RDMA_REGION_5__ADDR_LSB) & RDMA_REGION_5__ADDR_MASK)
+#define RDMA_REGION_5__LENGTH_MSB 12
+#define RDMA_REGION_5__LENGTH_LSB 2
+#define RDMA_REGION_5__LENGTH_MASK 0x00001ffc
+#define RDMA_REGION_5__LENGTH_GET(x) (((x) & RDMA_REGION_5__LENGTH_MASK) >> RDMA_REGION_5__LENGTH_LSB)
+#define RDMA_REGION_5__LENGTH_SET(x) (((x) << RDMA_REGION_5__LENGTH_LSB) & RDMA_REGION_5__LENGTH_MASK)
+#define RDMA_REGION_5__INDI_MSB 1
+#define RDMA_REGION_5__INDI_LSB 1
+#define RDMA_REGION_5__INDI_MASK 0x00000002
+#define RDMA_REGION_5__INDI_GET(x) (((x) & RDMA_REGION_5__INDI_MASK) >> RDMA_REGION_5__INDI_LSB)
+#define RDMA_REGION_5__INDI_SET(x) (((x) << RDMA_REGION_5__INDI_LSB) & RDMA_REGION_5__INDI_MASK)
+#define RDMA_REGION_5__NEXT_MSB 0
+#define RDMA_REGION_5__NEXT_LSB 0
+#define RDMA_REGION_5__NEXT_MASK 0x00000001
+#define RDMA_REGION_5__NEXT_GET(x) (((x) & RDMA_REGION_5__NEXT_MASK) >> RDMA_REGION_5__NEXT_LSB)
+#define RDMA_REGION_5__NEXT_SET(x) (((x) << RDMA_REGION_5__NEXT_LSB) & RDMA_REGION_5__NEXT_MASK)
+
+#define RDMA_REGION_6__ADDRESS 0x00000038
+#define RDMA_REGION_6__OFFSET 0x00000038
+#define RDMA_REGION_6__ADDR_MSB 31
+#define RDMA_REGION_6__ADDR_LSB 13
+#define RDMA_REGION_6__ADDR_MASK 0xffffe000
+#define RDMA_REGION_6__ADDR_GET(x) (((x) & RDMA_REGION_6__ADDR_MASK) >> RDMA_REGION_6__ADDR_LSB)
+#define RDMA_REGION_6__ADDR_SET(x) (((x) << RDMA_REGION_6__ADDR_LSB) & RDMA_REGION_6__ADDR_MASK)
+#define RDMA_REGION_6__LENGTH_MSB 12
+#define RDMA_REGION_6__LENGTH_LSB 2
+#define RDMA_REGION_6__LENGTH_MASK 0x00001ffc
+#define RDMA_REGION_6__LENGTH_GET(x) (((x) & RDMA_REGION_6__LENGTH_MASK) >> RDMA_REGION_6__LENGTH_LSB)
+#define RDMA_REGION_6__LENGTH_SET(x) (((x) << RDMA_REGION_6__LENGTH_LSB) & RDMA_REGION_6__LENGTH_MASK)
+#define RDMA_REGION_6__INDI_MSB 1
+#define RDMA_REGION_6__INDI_LSB 1
+#define RDMA_REGION_6__INDI_MASK 0x00000002
+#define RDMA_REGION_6__INDI_GET(x) (((x) & RDMA_REGION_6__INDI_MASK) >> RDMA_REGION_6__INDI_LSB)
+#define RDMA_REGION_6__INDI_SET(x) (((x) << RDMA_REGION_6__INDI_LSB) & RDMA_REGION_6__INDI_MASK)
+#define RDMA_REGION_6__NEXT_MSB 0
+#define RDMA_REGION_6__NEXT_LSB 0
+#define RDMA_REGION_6__NEXT_MASK 0x00000001
+#define RDMA_REGION_6__NEXT_GET(x) (((x) & RDMA_REGION_6__NEXT_MASK) >> RDMA_REGION_6__NEXT_LSB)
+#define RDMA_REGION_6__NEXT_SET(x) (((x) << RDMA_REGION_6__NEXT_LSB) & RDMA_REGION_6__NEXT_MASK)
+
+#define RDMA_REGION_7__ADDRESS 0x0000003c
+#define RDMA_REGION_7__OFFSET 0x0000003c
+#define RDMA_REGION_7__ADDR_MSB 31
+#define RDMA_REGION_7__ADDR_LSB 13
+#define RDMA_REGION_7__ADDR_MASK 0xffffe000
+#define RDMA_REGION_7__ADDR_GET(x) (((x) & RDMA_REGION_7__ADDR_MASK) >> RDMA_REGION_7__ADDR_LSB)
+#define RDMA_REGION_7__ADDR_SET(x) (((x) << RDMA_REGION_7__ADDR_LSB) & RDMA_REGION_7__ADDR_MASK)
+#define RDMA_REGION_7__LENGTH_MSB 12
+#define RDMA_REGION_7__LENGTH_LSB 2
+#define RDMA_REGION_7__LENGTH_MASK 0x00001ffc
+#define RDMA_REGION_7__LENGTH_GET(x) (((x) & RDMA_REGION_7__LENGTH_MASK) >> RDMA_REGION_7__LENGTH_LSB)
+#define RDMA_REGION_7__LENGTH_SET(x) (((x) << RDMA_REGION_7__LENGTH_LSB) & RDMA_REGION_7__LENGTH_MASK)
+#define RDMA_REGION_7__INDI_MSB 1
+#define RDMA_REGION_7__INDI_LSB 1
+#define RDMA_REGION_7__INDI_MASK 0x00000002
+#define RDMA_REGION_7__INDI_GET(x) (((x) & RDMA_REGION_7__INDI_MASK) >> RDMA_REGION_7__INDI_LSB)
+#define RDMA_REGION_7__INDI_SET(x) (((x) << RDMA_REGION_7__INDI_LSB) & RDMA_REGION_7__INDI_MASK)
+#define RDMA_REGION_7__NEXT_MSB 0
+#define RDMA_REGION_7__NEXT_LSB 0
+#define RDMA_REGION_7__NEXT_MASK 0x00000001
+#define RDMA_REGION_7__NEXT_GET(x) (((x) & RDMA_REGION_7__NEXT_MASK) >> RDMA_REGION_7__NEXT_LSB)
+#define RDMA_REGION_7__NEXT_SET(x) (((x) << RDMA_REGION_7__NEXT_LSB) & RDMA_REGION_7__NEXT_MASK)
+
+#define RDMA_REGION_8__ADDRESS 0x00000040
+#define RDMA_REGION_8__OFFSET 0x00000040
+#define RDMA_REGION_8__ADDR_MSB 31
+#define RDMA_REGION_8__ADDR_LSB 13
+#define RDMA_REGION_8__ADDR_MASK 0xffffe000
+#define RDMA_REGION_8__ADDR_GET(x) (((x) & RDMA_REGION_8__ADDR_MASK) >> RDMA_REGION_8__ADDR_LSB)
+#define RDMA_REGION_8__ADDR_SET(x) (((x) << RDMA_REGION_8__ADDR_LSB) & RDMA_REGION_8__ADDR_MASK)
+#define RDMA_REGION_8__LENGTH_MSB 12
+#define RDMA_REGION_8__LENGTH_LSB 2
+#define RDMA_REGION_8__LENGTH_MASK 0x00001ffc
+#define RDMA_REGION_8__LENGTH_GET(x) (((x) & RDMA_REGION_8__LENGTH_MASK) >> RDMA_REGION_8__LENGTH_LSB)
+#define RDMA_REGION_8__LENGTH_SET(x) (((x) << RDMA_REGION_8__LENGTH_LSB) & RDMA_REGION_8__LENGTH_MASK)
+#define RDMA_REGION_8__INDI_MSB 1
+#define RDMA_REGION_8__INDI_LSB 1
+#define RDMA_REGION_8__INDI_MASK 0x00000002
+#define RDMA_REGION_8__INDI_GET(x) (((x) & RDMA_REGION_8__INDI_MASK) >> RDMA_REGION_8__INDI_LSB)
+#define RDMA_REGION_8__INDI_SET(x) (((x) << RDMA_REGION_8__INDI_LSB) & RDMA_REGION_8__INDI_MASK)
+#define RDMA_REGION_8__NEXT_MSB 0
+#define RDMA_REGION_8__NEXT_LSB 0
+#define RDMA_REGION_8__NEXT_MASK 0x00000001
+#define RDMA_REGION_8__NEXT_GET(x) (((x) & RDMA_REGION_8__NEXT_MASK) >> RDMA_REGION_8__NEXT_LSB)
+#define RDMA_REGION_8__NEXT_SET(x) (((x) << RDMA_REGION_8__NEXT_LSB) & RDMA_REGION_8__NEXT_MASK)
+
+#define RDMA_REGION_9__ADDRESS 0x00000044
+#define RDMA_REGION_9__OFFSET 0x00000044
+#define RDMA_REGION_9__ADDR_MSB 31
+#define RDMA_REGION_9__ADDR_LSB 13
+#define RDMA_REGION_9__ADDR_MASK 0xffffe000
+#define RDMA_REGION_9__ADDR_GET(x) (((x) & RDMA_REGION_9__ADDR_MASK) >> RDMA_REGION_9__ADDR_LSB)
+#define RDMA_REGION_9__ADDR_SET(x) (((x) << RDMA_REGION_9__ADDR_LSB) & RDMA_REGION_9__ADDR_MASK)
+#define RDMA_REGION_9__LENGTH_MSB 12
+#define RDMA_REGION_9__LENGTH_LSB 2
+#define RDMA_REGION_9__LENGTH_MASK 0x00001ffc
+#define RDMA_REGION_9__LENGTH_GET(x) (((x) & RDMA_REGION_9__LENGTH_MASK) >> RDMA_REGION_9__LENGTH_LSB)
+#define RDMA_REGION_9__LENGTH_SET(x) (((x) << RDMA_REGION_9__LENGTH_LSB) & RDMA_REGION_9__LENGTH_MASK)
+#define RDMA_REGION_9__INDI_MSB 1
+#define RDMA_REGION_9__INDI_LSB 1
+#define RDMA_REGION_9__INDI_MASK 0x00000002
+#define RDMA_REGION_9__INDI_GET(x) (((x) & RDMA_REGION_9__INDI_MASK) >> RDMA_REGION_9__INDI_LSB)
+#define RDMA_REGION_9__INDI_SET(x) (((x) << RDMA_REGION_9__INDI_LSB) & RDMA_REGION_9__INDI_MASK)
+#define RDMA_REGION_9__NEXT_MSB 0
+#define RDMA_REGION_9__NEXT_LSB 0
+#define RDMA_REGION_9__NEXT_MASK 0x00000001
+#define RDMA_REGION_9__NEXT_GET(x) (((x) & RDMA_REGION_9__NEXT_MASK) >> RDMA_REGION_9__NEXT_LSB)
+#define RDMA_REGION_9__NEXT_SET(x) (((x) << RDMA_REGION_9__NEXT_LSB) & RDMA_REGION_9__NEXT_MASK)
+
+#define RDMA_REGION_10__ADDRESS 0x00000048
+#define RDMA_REGION_10__OFFSET 0x00000048
+#define RDMA_REGION_10__ADDR_MSB 31
+#define RDMA_REGION_10__ADDR_LSB 13
+#define RDMA_REGION_10__ADDR_MASK 0xffffe000
+#define RDMA_REGION_10__ADDR_GET(x) (((x) & RDMA_REGION_10__ADDR_MASK) >> RDMA_REGION_10__ADDR_LSB)
+#define RDMA_REGION_10__ADDR_SET(x) (((x) << RDMA_REGION_10__ADDR_LSB) & RDMA_REGION_10__ADDR_MASK)
+#define RDMA_REGION_10__LENGTH_MSB 12
+#define RDMA_REGION_10__LENGTH_LSB 2
+#define RDMA_REGION_10__LENGTH_MASK 0x00001ffc
+#define RDMA_REGION_10__LENGTH_GET(x) (((x) & RDMA_REGION_10__LENGTH_MASK) >> RDMA_REGION_10__LENGTH_LSB)
+#define RDMA_REGION_10__LENGTH_SET(x) (((x) << RDMA_REGION_10__LENGTH_LSB) & RDMA_REGION_10__LENGTH_MASK)
+#define RDMA_REGION_10__INDI_MSB 1
+#define RDMA_REGION_10__INDI_LSB 1
+#define RDMA_REGION_10__INDI_MASK 0x00000002
+#define RDMA_REGION_10__INDI_GET(x) (((x) & RDMA_REGION_10__INDI_MASK) >> RDMA_REGION_10__INDI_LSB)
+#define RDMA_REGION_10__INDI_SET(x) (((x) << RDMA_REGION_10__INDI_LSB) & RDMA_REGION_10__INDI_MASK)
+#define RDMA_REGION_10__NEXT_MSB 0
+#define RDMA_REGION_10__NEXT_LSB 0
+#define RDMA_REGION_10__NEXT_MASK 0x00000001
+#define RDMA_REGION_10__NEXT_GET(x) (((x) & RDMA_REGION_10__NEXT_MASK) >> RDMA_REGION_10__NEXT_LSB)
+#define RDMA_REGION_10__NEXT_SET(x) (((x) << RDMA_REGION_10__NEXT_LSB) & RDMA_REGION_10__NEXT_MASK)
+
+#define RDMA_REGION_11__ADDRESS 0x0000004c
+#define RDMA_REGION_11__OFFSET 0x0000004c
+#define RDMA_REGION_11__ADDR_MSB 31
+#define RDMA_REGION_11__ADDR_LSB 13
+#define RDMA_REGION_11__ADDR_MASK 0xffffe000
+#define RDMA_REGION_11__ADDR_GET(x) (((x) & RDMA_REGION_11__ADDR_MASK) >> RDMA_REGION_11__ADDR_LSB)
+#define RDMA_REGION_11__ADDR_SET(x) (((x) << RDMA_REGION_11__ADDR_LSB) & RDMA_REGION_11__ADDR_MASK)
+#define RDMA_REGION_11__LENGTH_MSB 12
+#define RDMA_REGION_11__LENGTH_LSB 2
+#define RDMA_REGION_11__LENGTH_MASK 0x00001ffc
+#define RDMA_REGION_11__LENGTH_GET(x) (((x) & RDMA_REGION_11__LENGTH_MASK) >> RDMA_REGION_11__LENGTH_LSB)
+#define RDMA_REGION_11__LENGTH_SET(x) (((x) << RDMA_REGION_11__LENGTH_LSB) & RDMA_REGION_11__LENGTH_MASK)
+#define RDMA_REGION_11__INDI_MSB 1
+#define RDMA_REGION_11__INDI_LSB 1
+#define RDMA_REGION_11__INDI_MASK 0x00000002
+#define RDMA_REGION_11__INDI_GET(x) (((x) & RDMA_REGION_11__INDI_MASK) >> RDMA_REGION_11__INDI_LSB)
+#define RDMA_REGION_11__INDI_SET(x) (((x) << RDMA_REGION_11__INDI_LSB) & RDMA_REGION_11__INDI_MASK)
+#define RDMA_REGION_11__NEXT_MSB 0
+#define RDMA_REGION_11__NEXT_LSB 0
+#define RDMA_REGION_11__NEXT_MASK 0x00000001
+#define RDMA_REGION_11__NEXT_GET(x) (((x) & RDMA_REGION_11__NEXT_MASK) >> RDMA_REGION_11__NEXT_LSB)
+#define RDMA_REGION_11__NEXT_SET(x) (((x) << RDMA_REGION_11__NEXT_LSB) & RDMA_REGION_11__NEXT_MASK)
+
+#define RDMA_REGION_12__ADDRESS 0x00000050
+#define RDMA_REGION_12__OFFSET 0x00000050
+#define RDMA_REGION_12__ADDR_MSB 31
+#define RDMA_REGION_12__ADDR_LSB 13
+#define RDMA_REGION_12__ADDR_MASK 0xffffe000
+#define RDMA_REGION_12__ADDR_GET(x) (((x) & RDMA_REGION_12__ADDR_MASK) >> RDMA_REGION_12__ADDR_LSB)
+#define RDMA_REGION_12__ADDR_SET(x) (((x) << RDMA_REGION_12__ADDR_LSB) & RDMA_REGION_12__ADDR_MASK)
+#define RDMA_REGION_12__LENGTH_MSB 12
+#define RDMA_REGION_12__LENGTH_LSB 2
+#define RDMA_REGION_12__LENGTH_MASK 0x00001ffc
+#define RDMA_REGION_12__LENGTH_GET(x) (((x) & RDMA_REGION_12__LENGTH_MASK) >> RDMA_REGION_12__LENGTH_LSB)
+#define RDMA_REGION_12__LENGTH_SET(x) (((x) << RDMA_REGION_12__LENGTH_LSB) & RDMA_REGION_12__LENGTH_MASK)
+#define RDMA_REGION_12__INDI_MSB 1
+#define RDMA_REGION_12__INDI_LSB 1
+#define RDMA_REGION_12__INDI_MASK 0x00000002
+#define RDMA_REGION_12__INDI_GET(x) (((x) & RDMA_REGION_12__INDI_MASK) >> RDMA_REGION_12__INDI_LSB)
+#define RDMA_REGION_12__INDI_SET(x) (((x) << RDMA_REGION_12__INDI_LSB) & RDMA_REGION_12__INDI_MASK)
+#define RDMA_REGION_12__NEXT_MSB 0
+#define RDMA_REGION_12__NEXT_LSB 0
+#define RDMA_REGION_12__NEXT_MASK 0x00000001
+#define RDMA_REGION_12__NEXT_GET(x) (((x) & RDMA_REGION_12__NEXT_MASK) >> RDMA_REGION_12__NEXT_LSB)
+#define RDMA_REGION_12__NEXT_SET(x) (((x) << RDMA_REGION_12__NEXT_LSB) & RDMA_REGION_12__NEXT_MASK)
+
+#define RDMA_REGION_13__ADDRESS 0x00000054
+#define RDMA_REGION_13__OFFSET 0x00000054
+#define RDMA_REGION_13__ADDR_MSB 31
+#define RDMA_REGION_13__ADDR_LSB 13
+#define RDMA_REGION_13__ADDR_MASK 0xffffe000
+#define RDMA_REGION_13__ADDR_GET(x) (((x) & RDMA_REGION_13__ADDR_MASK) >> RDMA_REGION_13__ADDR_LSB)
+#define RDMA_REGION_13__ADDR_SET(x) (((x) << RDMA_REGION_13__ADDR_LSB) & RDMA_REGION_13__ADDR_MASK)
+#define RDMA_REGION_13__LENGTH_MSB 12
+#define RDMA_REGION_13__LENGTH_LSB 2
+#define RDMA_REGION_13__LENGTH_MASK 0x00001ffc
+#define RDMA_REGION_13__LENGTH_GET(x) (((x) & RDMA_REGION_13__LENGTH_MASK) >> RDMA_REGION_13__LENGTH_LSB)
+#define RDMA_REGION_13__LENGTH_SET(x) (((x) << RDMA_REGION_13__LENGTH_LSB) & RDMA_REGION_13__LENGTH_MASK)
+#define RDMA_REGION_13__INDI_MSB 1
+#define RDMA_REGION_13__INDI_LSB 1
+#define RDMA_REGION_13__INDI_MASK 0x00000002
+#define RDMA_REGION_13__INDI_GET(x) (((x) & RDMA_REGION_13__INDI_MASK) >> RDMA_REGION_13__INDI_LSB)
+#define RDMA_REGION_13__INDI_SET(x) (((x) << RDMA_REGION_13__INDI_LSB) & RDMA_REGION_13__INDI_MASK)
+#define RDMA_REGION_13__NEXT_MSB 0
+#define RDMA_REGION_13__NEXT_LSB 0
+#define RDMA_REGION_13__NEXT_MASK 0x00000001
+#define RDMA_REGION_13__NEXT_GET(x) (((x) & RDMA_REGION_13__NEXT_MASK) >> RDMA_REGION_13__NEXT_LSB)
+#define RDMA_REGION_13__NEXT_SET(x) (((x) << RDMA_REGION_13__NEXT_LSB) & RDMA_REGION_13__NEXT_MASK)
+
+#define RDMA_REGION_14__ADDRESS 0x00000058
+#define RDMA_REGION_14__OFFSET 0x00000058
+#define RDMA_REGION_14__ADDR_MSB 31
+#define RDMA_REGION_14__ADDR_LSB 13
+#define RDMA_REGION_14__ADDR_MASK 0xffffe000
+#define RDMA_REGION_14__ADDR_GET(x) (((x) & RDMA_REGION_14__ADDR_MASK) >> RDMA_REGION_14__ADDR_LSB)
+#define RDMA_REGION_14__ADDR_SET(x) (((x) << RDMA_REGION_14__ADDR_LSB) & RDMA_REGION_14__ADDR_MASK)
+#define RDMA_REGION_14__LENGTH_MSB 12
+#define RDMA_REGION_14__LENGTH_LSB 2
+#define RDMA_REGION_14__LENGTH_MASK 0x00001ffc
+#define RDMA_REGION_14__LENGTH_GET(x) (((x) & RDMA_REGION_14__LENGTH_MASK) >> RDMA_REGION_14__LENGTH_LSB)
+#define RDMA_REGION_14__LENGTH_SET(x) (((x) << RDMA_REGION_14__LENGTH_LSB) & RDMA_REGION_14__LENGTH_MASK)
+#define RDMA_REGION_14__INDI_MSB 1
+#define RDMA_REGION_14__INDI_LSB 1
+#define RDMA_REGION_14__INDI_MASK 0x00000002
+#define RDMA_REGION_14__INDI_GET(x) (((x) & RDMA_REGION_14__INDI_MASK) >> RDMA_REGION_14__INDI_LSB)
+#define RDMA_REGION_14__INDI_SET(x) (((x) << RDMA_REGION_14__INDI_LSB) & RDMA_REGION_14__INDI_MASK)
+#define RDMA_REGION_14__NEXT_MSB 0
+#define RDMA_REGION_14__NEXT_LSB 0
+#define RDMA_REGION_14__NEXT_MASK 0x00000001
+#define RDMA_REGION_14__NEXT_GET(x) (((x) & RDMA_REGION_14__NEXT_MASK) >> RDMA_REGION_14__NEXT_LSB)
+#define RDMA_REGION_14__NEXT_SET(x) (((x) << RDMA_REGION_14__NEXT_LSB) & RDMA_REGION_14__NEXT_MASK)
+
+#define RDMA_REGION_15__ADDRESS 0x0000005c
+#define RDMA_REGION_15__OFFSET 0x0000005c
+#define RDMA_REGION_15__ADDR_MSB 31
+#define RDMA_REGION_15__ADDR_LSB 13
+#define RDMA_REGION_15__ADDR_MASK 0xffffe000
+#define RDMA_REGION_15__ADDR_GET(x) (((x) & RDMA_REGION_15__ADDR_MASK) >> RDMA_REGION_15__ADDR_LSB)
+#define RDMA_REGION_15__ADDR_SET(x) (((x) << RDMA_REGION_15__ADDR_LSB) & RDMA_REGION_15__ADDR_MASK)
+#define RDMA_REGION_15__LENGTH_MSB 12
+#define RDMA_REGION_15__LENGTH_LSB 2
+#define RDMA_REGION_15__LENGTH_MASK 0x00001ffc
+#define RDMA_REGION_15__LENGTH_GET(x) (((x) & RDMA_REGION_15__LENGTH_MASK) >> RDMA_REGION_15__LENGTH_LSB)
+#define RDMA_REGION_15__LENGTH_SET(x) (((x) << RDMA_REGION_15__LENGTH_LSB) & RDMA_REGION_15__LENGTH_MASK)
+#define RDMA_REGION_15__INDI_MSB 1
+#define RDMA_REGION_15__INDI_LSB 1
+#define RDMA_REGION_15__INDI_MASK 0x00000002
+#define RDMA_REGION_15__INDI_GET(x) (((x) & RDMA_REGION_15__INDI_MASK) >> RDMA_REGION_15__INDI_LSB)
+#define RDMA_REGION_15__INDI_SET(x) (((x) << RDMA_REGION_15__INDI_LSB) & RDMA_REGION_15__INDI_MASK)
+#define RDMA_REGION_15__NEXT_MSB 0
+#define RDMA_REGION_15__NEXT_LSB 0
+#define RDMA_REGION_15__NEXT_MASK 0x00000001
+#define RDMA_REGION_15__NEXT_GET(x) (((x) & RDMA_REGION_15__NEXT_MASK) >> RDMA_REGION_15__NEXT_LSB)
+#define RDMA_REGION_15__NEXT_SET(x) (((x) << RDMA_REGION_15__NEXT_LSB) & RDMA_REGION_15__NEXT_MASK)
+
+#define DMA_STATUS_ADDRESS 0x00000060
+#define DMA_STATUS_OFFSET 0x00000060
+#define DMA_STATUS_ERROR_CODE_MSB 14
+#define DMA_STATUS_ERROR_CODE_LSB 4
+#define DMA_STATUS_ERROR_CODE_MASK 0x00007ff0
+#define DMA_STATUS_ERROR_CODE_GET(x) (((x) & DMA_STATUS_ERROR_CODE_MASK) >> DMA_STATUS_ERROR_CODE_LSB)
+#define DMA_STATUS_ERROR_CODE_SET(x) (((x) << DMA_STATUS_ERROR_CODE_LSB) & DMA_STATUS_ERROR_CODE_MASK)
+#define DMA_STATUS_ERROR_MSB 3
+#define DMA_STATUS_ERROR_LSB 3
+#define DMA_STATUS_ERROR_MASK 0x00000008
+#define DMA_STATUS_ERROR_GET(x) (((x) & DMA_STATUS_ERROR_MASK) >> DMA_STATUS_ERROR_LSB)
+#define DMA_STATUS_ERROR_SET(x) (((x) << DMA_STATUS_ERROR_LSB) & DMA_STATUS_ERROR_MASK)
+#define DMA_STATUS_DONE_MSB 2
+#define DMA_STATUS_DONE_LSB 2
+#define DMA_STATUS_DONE_MASK 0x00000004
+#define DMA_STATUS_DONE_GET(x) (((x) & DMA_STATUS_DONE_MASK) >> DMA_STATUS_DONE_LSB)
+#define DMA_STATUS_DONE_SET(x) (((x) << DMA_STATUS_DONE_LSB) & DMA_STATUS_DONE_MASK)
+#define DMA_STATUS_STOPPED_MSB 1
+#define DMA_STATUS_STOPPED_LSB 1
+#define DMA_STATUS_STOPPED_MASK 0x00000002
+#define DMA_STATUS_STOPPED_GET(x) (((x) & DMA_STATUS_STOPPED_MASK) >> DMA_STATUS_STOPPED_LSB)
+#define DMA_STATUS_STOPPED_SET(x) (((x) << DMA_STATUS_STOPPED_LSB) & DMA_STATUS_STOPPED_MASK)
+#define DMA_STATUS_RUNNING_MSB 0
+#define DMA_STATUS_RUNNING_LSB 0
+#define DMA_STATUS_RUNNING_MASK 0x00000001
+#define DMA_STATUS_RUNNING_GET(x) (((x) & DMA_STATUS_RUNNING_MASK) >> DMA_STATUS_RUNNING_LSB)
+#define DMA_STATUS_RUNNING_SET(x) (((x) << DMA_STATUS_RUNNING_LSB) & DMA_STATUS_RUNNING_MASK)
+
+#define DMA_INT_EN_ADDRESS 0x00000064
+#define DMA_INT_EN_OFFSET 0x00000064
+#define DMA_INT_EN_ERROR_ENA_MSB 3
+#define DMA_INT_EN_ERROR_ENA_LSB 3
+#define DMA_INT_EN_ERROR_ENA_MASK 0x00000008
+#define DMA_INT_EN_ERROR_ENA_GET(x) (((x) & DMA_INT_EN_ERROR_ENA_MASK) >> DMA_INT_EN_ERROR_ENA_LSB)
+#define DMA_INT_EN_ERROR_ENA_SET(x) (((x) << DMA_INT_EN_ERROR_ENA_LSB) & DMA_INT_EN_ERROR_ENA_MASK)
+#define DMA_INT_EN_DONE_ENA_MSB 2
+#define DMA_INT_EN_DONE_ENA_LSB 2
+#define DMA_INT_EN_DONE_ENA_MASK 0x00000004
+#define DMA_INT_EN_DONE_ENA_GET(x) (((x) & DMA_INT_EN_DONE_ENA_MASK) >> DMA_INT_EN_DONE_ENA_LSB)
+#define DMA_INT_EN_DONE_ENA_SET(x) (((x) << DMA_INT_EN_DONE_ENA_LSB) & DMA_INT_EN_DONE_ENA_MASK)
+#define DMA_INT_EN_STOPPED_ENA_MSB 1
+#define DMA_INT_EN_STOPPED_ENA_LSB 1
+#define DMA_INT_EN_STOPPED_ENA_MASK 0x00000002
+#define DMA_INT_EN_STOPPED_ENA_GET(x) (((x) & DMA_INT_EN_STOPPED_ENA_MASK) >> DMA_INT_EN_STOPPED_ENA_LSB)
+#define DMA_INT_EN_STOPPED_ENA_SET(x) (((x) << DMA_INT_EN_STOPPED_ENA_LSB) & DMA_INT_EN_STOPPED_ENA_MASK)
+
+
+#ifndef __ASSEMBLER__
+
+typedef struct rdma_reg_reg_s {
+ volatile unsigned int dma_config;
+ volatile unsigned int dma_control;
+ volatile unsigned int dma_src;
+ volatile unsigned int dma_dest;
+ volatile unsigned int dma_length;
+ volatile unsigned int vmc_base;
+ volatile unsigned int indirect_reg;
+ volatile unsigned int indirect_return;
+ volatile unsigned int rdma_region_0_;
+ volatile unsigned int rdma_region_1_;
+ volatile unsigned int rdma_region_2_;
+ volatile unsigned int rdma_region_3_;
+ volatile unsigned int rdma_region_4_;
+ volatile unsigned int rdma_region_5_;
+ volatile unsigned int rdma_region_6_;
+ volatile unsigned int rdma_region_7_;
+ volatile unsigned int rdma_region_8_;
+ volatile unsigned int rdma_region_9_;
+ volatile unsigned int rdma_region_10_;
+ volatile unsigned int rdma_region_11_;
+ volatile unsigned int rdma_region_12_;
+ volatile unsigned int rdma_region_13_;
+ volatile unsigned int rdma_region_14_;
+ volatile unsigned int rdma_region_15_;
+ volatile unsigned int dma_status;
+ volatile unsigned int dma_int_en;
+} rdma_reg_reg_t;
+
+#endif /* __ASSEMBLER__ */
+
+#endif /* _RDMA_REG_H_ */
diff --git a/drivers/staging/ath6kl/include/common/AR6002/hw4.0/hw/rtc_reg.h b/drivers/staging/ath6kl/include/common/AR6002/hw4.0/hw/rtc_reg.h
new file mode 100644
index 000000000000..0855de5f1400
--- /dev/null
+++ b/drivers/staging/ath6kl/include/common/AR6002/hw4.0/hw/rtc_reg.h
@@ -0,0 +1,975 @@
+// ------------------------------------------------------------------
+// Copyright (c) 2004-2010 Atheros Corporation. All rights reserved.
+//
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+//
+//
+// ------------------------------------------------------------------
+//===================================================================
+// Author(s): ="Atheros"
+//===================================================================
+
+
+#ifdef WLAN_HEADERS
+
+#include "rtc_wlan_reg.h"
+
+
+#ifndef BT_HEADERS
+
+#define RESET_CONTROL_ADDRESS WLAN_RESET_CONTROL_ADDRESS
+#define RESET_CONTROL_OFFSET WLAN_RESET_CONTROL_OFFSET
+#define RESET_CONTROL_DEBUG_UART_RST_MSB WLAN_RESET_CONTROL_DEBUG_UART_RST_MSB
+#define RESET_CONTROL_DEBUG_UART_RST_LSB WLAN_RESET_CONTROL_DEBUG_UART_RST_LSB
+#define RESET_CONTROL_DEBUG_UART_RST_MASK WLAN_RESET_CONTROL_DEBUG_UART_RST_MASK
+#define RESET_CONTROL_DEBUG_UART_RST_GET(x) WLAN_RESET_CONTROL_DEBUG_UART_RST_GET(x)
+#define RESET_CONTROL_DEBUG_UART_RST_SET(x) WLAN_RESET_CONTROL_DEBUG_UART_RST_SET(x)
+#define RESET_CONTROL_BB_COLD_RST_MSB WLAN_RESET_CONTROL_BB_COLD_RST_MSB
+#define RESET_CONTROL_BB_COLD_RST_LSB WLAN_RESET_CONTROL_BB_COLD_RST_LSB
+#define RESET_CONTROL_BB_COLD_RST_MASK WLAN_RESET_CONTROL_BB_COLD_RST_MASK
+#define RESET_CONTROL_BB_COLD_RST_GET(x) WLAN_RESET_CONTROL_BB_COLD_RST_GET(x)
+#define RESET_CONTROL_BB_COLD_RST_SET(x) WLAN_RESET_CONTROL_BB_COLD_RST_SET(x)
+#define RESET_CONTROL_BB_WARM_RST_MSB WLAN_RESET_CONTROL_BB_WARM_RST_MSB
+#define RESET_CONTROL_BB_WARM_RST_LSB WLAN_RESET_CONTROL_BB_WARM_RST_LSB
+#define RESET_CONTROL_BB_WARM_RST_MASK WLAN_RESET_CONTROL_BB_WARM_RST_MASK
+#define RESET_CONTROL_BB_WARM_RST_GET(x) WLAN_RESET_CONTROL_BB_WARM_RST_GET(x)
+#define RESET_CONTROL_BB_WARM_RST_SET(x) WLAN_RESET_CONTROL_BB_WARM_RST_SET(x)
+#define RESET_CONTROL_CPU_INIT_RESET_MSB WLAN_RESET_CONTROL_CPU_INIT_RESET_MSB
+#define RESET_CONTROL_CPU_INIT_RESET_LSB WLAN_RESET_CONTROL_CPU_INIT_RESET_LSB
+#define RESET_CONTROL_CPU_INIT_RESET_MASK WLAN_RESET_CONTROL_CPU_INIT_RESET_MASK
+#define RESET_CONTROL_CPU_INIT_RESET_GET(x) WLAN_RESET_CONTROL_CPU_INIT_RESET_GET(x)
+#define RESET_CONTROL_CPU_INIT_RESET_SET(x) WLAN_RESET_CONTROL_CPU_INIT_RESET_SET(x)
+#define RESET_CONTROL_VMC_REMAP_RESET_MSB WLAN_RESET_CONTROL_VMC_REMAP_RESET_MSB
+#define RESET_CONTROL_VMC_REMAP_RESET_LSB WLAN_RESET_CONTROL_VMC_REMAP_RESET_LSB
+#define RESET_CONTROL_VMC_REMAP_RESET_MASK WLAN_RESET_CONTROL_VMC_REMAP_RESET_MASK
+#define RESET_CONTROL_VMC_REMAP_RESET_GET(x) WLAN_RESET_CONTROL_VMC_REMAP_RESET_GET(x)
+#define RESET_CONTROL_VMC_REMAP_RESET_SET(x) WLAN_RESET_CONTROL_VMC_REMAP_RESET_SET(x)
+#define RESET_CONTROL_RST_OUT_MSB WLAN_RESET_CONTROL_RST_OUT_MSB
+#define RESET_CONTROL_RST_OUT_LSB WLAN_RESET_CONTROL_RST_OUT_LSB
+#define RESET_CONTROL_RST_OUT_MASK WLAN_RESET_CONTROL_RST_OUT_MASK
+#define RESET_CONTROL_RST_OUT_GET(x) WLAN_RESET_CONTROL_RST_OUT_GET(x)
+#define RESET_CONTROL_RST_OUT_SET(x) WLAN_RESET_CONTROL_RST_OUT_SET(x)
+#define RESET_CONTROL_COLD_RST_MSB WLAN_RESET_CONTROL_COLD_RST_MSB
+#define RESET_CONTROL_COLD_RST_LSB WLAN_RESET_CONTROL_COLD_RST_LSB
+#define RESET_CONTROL_COLD_RST_MASK WLAN_RESET_CONTROL_COLD_RST_MASK
+#define RESET_CONTROL_COLD_RST_GET(x) WLAN_RESET_CONTROL_COLD_RST_GET(x)
+#define RESET_CONTROL_COLD_RST_SET(x) WLAN_RESET_CONTROL_COLD_RST_SET(x)
+#define RESET_CONTROL_WARM_RST_MSB WLAN_RESET_CONTROL_WARM_RST_MSB
+#define RESET_CONTROL_WARM_RST_LSB WLAN_RESET_CONTROL_WARM_RST_LSB
+#define RESET_CONTROL_WARM_RST_MASK WLAN_RESET_CONTROL_WARM_RST_MASK
+#define RESET_CONTROL_WARM_RST_GET(x) WLAN_RESET_CONTROL_WARM_RST_GET(x)
+#define RESET_CONTROL_WARM_RST_SET(x) WLAN_RESET_CONTROL_WARM_RST_SET(x)
+#define RESET_CONTROL_CPU_WARM_RST_MSB WLAN_RESET_CONTROL_CPU_WARM_RST_MSB
+#define RESET_CONTROL_CPU_WARM_RST_LSB WLAN_RESET_CONTROL_CPU_WARM_RST_LSB
+#define RESET_CONTROL_CPU_WARM_RST_MASK WLAN_RESET_CONTROL_CPU_WARM_RST_MASK
+#define RESET_CONTROL_CPU_WARM_RST_GET(x) WLAN_RESET_CONTROL_CPU_WARM_RST_GET(x)
+#define RESET_CONTROL_CPU_WARM_RST_SET(x) WLAN_RESET_CONTROL_CPU_WARM_RST_SET(x)
+#define RESET_CONTROL_MAC_COLD_RST_MSB WLAN_RESET_CONTROL_MAC_COLD_RST_MSB
+#define RESET_CONTROL_MAC_COLD_RST_LSB WLAN_RESET_CONTROL_MAC_COLD_RST_LSB
+#define RESET_CONTROL_MAC_COLD_RST_MASK WLAN_RESET_CONTROL_MAC_COLD_RST_MASK
+#define RESET_CONTROL_MAC_COLD_RST_GET(x) WLAN_RESET_CONTROL_MAC_COLD_RST_GET(x)
+#define RESET_CONTROL_MAC_COLD_RST_SET(x) WLAN_RESET_CONTROL_MAC_COLD_RST_SET(x)
+#define RESET_CONTROL_MAC_WARM_RST_MSB WLAN_RESET_CONTROL_MAC_WARM_RST_MSB
+#define RESET_CONTROL_MAC_WARM_RST_LSB WLAN_RESET_CONTROL_MAC_WARM_RST_LSB
+#define RESET_CONTROL_MAC_WARM_RST_MASK WLAN_RESET_CONTROL_MAC_WARM_RST_MASK
+#define RESET_CONTROL_MAC_WARM_RST_GET(x) WLAN_RESET_CONTROL_MAC_WARM_RST_GET(x)
+#define RESET_CONTROL_MAC_WARM_RST_SET(x) WLAN_RESET_CONTROL_MAC_WARM_RST_SET(x)
+#define RESET_CONTROL_MBOX_RST_MSB WLAN_RESET_CONTROL_MBOX_RST_MSB
+#define RESET_CONTROL_MBOX_RST_LSB WLAN_RESET_CONTROL_MBOX_RST_LSB
+#define RESET_CONTROL_MBOX_RST_MASK WLAN_RESET_CONTROL_MBOX_RST_MASK
+#define RESET_CONTROL_MBOX_RST_GET(x) WLAN_RESET_CONTROL_MBOX_RST_GET(x)
+#define RESET_CONTROL_MBOX_RST_SET(x) WLAN_RESET_CONTROL_MBOX_RST_SET(x)
+#define RESET_CONTROL_UART_RST_MSB WLAN_RESET_CONTROL_UART_RST_MSB
+#define RESET_CONTROL_UART_RST_LSB WLAN_RESET_CONTROL_UART_RST_LSB
+#define RESET_CONTROL_UART_RST_MASK WLAN_RESET_CONTROL_UART_RST_MASK
+#define RESET_CONTROL_UART_RST_GET(x) WLAN_RESET_CONTROL_UART_RST_GET(x)
+#define RESET_CONTROL_UART_RST_SET(x) WLAN_RESET_CONTROL_UART_RST_SET(x)
+#define RESET_CONTROL_SI0_RST_MSB WLAN_RESET_CONTROL_SI0_RST_MSB
+#define RESET_CONTROL_SI0_RST_LSB WLAN_RESET_CONTROL_SI0_RST_LSB
+#define RESET_CONTROL_SI0_RST_MASK WLAN_RESET_CONTROL_SI0_RST_MASK
+#define RESET_CONTROL_SI0_RST_GET(x) WLAN_RESET_CONTROL_SI0_RST_GET(x)
+#define RESET_CONTROL_SI0_RST_SET(x) WLAN_RESET_CONTROL_SI0_RST_SET(x)
+#define XTAL_CONTROL_ADDRESS WLAN_XTAL_CONTROL_ADDRESS
+#define XTAL_CONTROL_OFFSET WLAN_XTAL_CONTROL_OFFSET
+#define XTAL_CONTROL_TCXO_MSB WLAN_XTAL_CONTROL_TCXO_MSB
+#define XTAL_CONTROL_TCXO_LSB WLAN_XTAL_CONTROL_TCXO_LSB
+#define XTAL_CONTROL_TCXO_MASK WLAN_XTAL_CONTROL_TCXO_MASK
+#define XTAL_CONTROL_TCXO_GET(x) WLAN_XTAL_CONTROL_TCXO_GET(x)
+#define XTAL_CONTROL_TCXO_SET(x) WLAN_XTAL_CONTROL_TCXO_SET(x)
+#define TCXO_DETECT_ADDRESS WLAN_TCXO_DETECT_ADDRESS
+#define TCXO_DETECT_OFFSET WLAN_TCXO_DETECT_OFFSET
+#define TCXO_DETECT_PRESENT_MSB WLAN_TCXO_DETECT_PRESENT_MSB
+#define TCXO_DETECT_PRESENT_LSB WLAN_TCXO_DETECT_PRESENT_LSB
+#define TCXO_DETECT_PRESENT_MASK WLAN_TCXO_DETECT_PRESENT_MASK
+#define TCXO_DETECT_PRESENT_GET(x) WLAN_TCXO_DETECT_PRESENT_GET(x)
+#define TCXO_DETECT_PRESENT_SET(x) WLAN_TCXO_DETECT_PRESENT_SET(x)
+#define XTAL_TEST_ADDRESS WLAN_XTAL_TEST_ADDRESS
+#define XTAL_TEST_OFFSET WLAN_XTAL_TEST_OFFSET
+#define XTAL_TEST_NOTCXODET_MSB WLAN_XTAL_TEST_NOTCXODET_MSB
+#define XTAL_TEST_NOTCXODET_LSB WLAN_XTAL_TEST_NOTCXODET_LSB
+#define XTAL_TEST_NOTCXODET_MASK WLAN_XTAL_TEST_NOTCXODET_MASK
+#define XTAL_TEST_NOTCXODET_GET(x) WLAN_XTAL_TEST_NOTCXODET_GET(x)
+#define XTAL_TEST_NOTCXODET_SET(x) WLAN_XTAL_TEST_NOTCXODET_SET(x)
+#define QUADRATURE_ADDRESS WLAN_QUADRATURE_ADDRESS
+#define QUADRATURE_OFFSET WLAN_QUADRATURE_OFFSET
+#define QUADRATURE_ADC_MSB WLAN_QUADRATURE_ADC_MSB
+#define QUADRATURE_ADC_LSB WLAN_QUADRATURE_ADC_LSB
+#define QUADRATURE_ADC_MASK WLAN_QUADRATURE_ADC_MASK
+#define QUADRATURE_ADC_GET(x) WLAN_QUADRATURE_ADC_GET(x)
+#define QUADRATURE_ADC_SET(x) WLAN_QUADRATURE_ADC_SET(x)
+#define QUADRATURE_SEL_MSB WLAN_QUADRATURE_SEL_MSB
+#define QUADRATURE_SEL_LSB WLAN_QUADRATURE_SEL_LSB
+#define QUADRATURE_SEL_MASK WLAN_QUADRATURE_SEL_MASK
+#define QUADRATURE_SEL_GET(x) WLAN_QUADRATURE_SEL_GET(x)
+#define QUADRATURE_SEL_SET(x) WLAN_QUADRATURE_SEL_SET(x)
+#define QUADRATURE_DAC_MSB WLAN_QUADRATURE_DAC_MSB
+#define QUADRATURE_DAC_LSB WLAN_QUADRATURE_DAC_LSB
+#define QUADRATURE_DAC_MASK WLAN_QUADRATURE_DAC_MASK
+#define QUADRATURE_DAC_GET(x) WLAN_QUADRATURE_DAC_GET(x)
+#define QUADRATURE_DAC_SET(x) WLAN_QUADRATURE_DAC_SET(x)
+#define PLL_CONTROL_ADDRESS WLAN_PLL_CONTROL_ADDRESS
+#define PLL_CONTROL_OFFSET WLAN_PLL_CONTROL_OFFSET
+#define PLL_CONTROL_DIG_TEST_CLK_MSB WLAN_PLL_CONTROL_DIG_TEST_CLK_MSB
+#define PLL_CONTROL_DIG_TEST_CLK_LSB WLAN_PLL_CONTROL_DIG_TEST_CLK_LSB
+#define PLL_CONTROL_DIG_TEST_CLK_MASK WLAN_PLL_CONTROL_DIG_TEST_CLK_MASK
+#define PLL_CONTROL_DIG_TEST_CLK_GET(x) WLAN_PLL_CONTROL_DIG_TEST_CLK_GET(x)
+#define PLL_CONTROL_DIG_TEST_CLK_SET(x) WLAN_PLL_CONTROL_DIG_TEST_CLK_SET(x)
+#define PLL_CONTROL_MAC_OVERRIDE_MSB WLAN_PLL_CONTROL_MAC_OVERRIDE_MSB
+#define PLL_CONTROL_MAC_OVERRIDE_LSB WLAN_PLL_CONTROL_MAC_OVERRIDE_LSB
+#define PLL_CONTROL_MAC_OVERRIDE_MASK WLAN_PLL_CONTROL_MAC_OVERRIDE_MASK
+#define PLL_CONTROL_MAC_OVERRIDE_GET(x) WLAN_PLL_CONTROL_MAC_OVERRIDE_GET(x)
+#define PLL_CONTROL_MAC_OVERRIDE_SET(x) WLAN_PLL_CONTROL_MAC_OVERRIDE_SET(x)
+#define PLL_CONTROL_NOPWD_MSB WLAN_PLL_CONTROL_NOPWD_MSB
+#define PLL_CONTROL_NOPWD_LSB WLAN_PLL_CONTROL_NOPWD_LSB
+#define PLL_CONTROL_NOPWD_MASK WLAN_PLL_CONTROL_NOPWD_MASK
+#define PLL_CONTROL_NOPWD_GET(x) WLAN_PLL_CONTROL_NOPWD_GET(x)
+#define PLL_CONTROL_NOPWD_SET(x) WLAN_PLL_CONTROL_NOPWD_SET(x)
+#define PLL_CONTROL_UPDATING_MSB WLAN_PLL_CONTROL_UPDATING_MSB
+#define PLL_CONTROL_UPDATING_LSB WLAN_PLL_CONTROL_UPDATING_LSB
+#define PLL_CONTROL_UPDATING_MASK WLAN_PLL_CONTROL_UPDATING_MASK
+#define PLL_CONTROL_UPDATING_GET(x) WLAN_PLL_CONTROL_UPDATING_GET(x)
+#define PLL_CONTROL_UPDATING_SET(x) WLAN_PLL_CONTROL_UPDATING_SET(x)
+#define PLL_CONTROL_BYPASS_MSB WLAN_PLL_CONTROL_BYPASS_MSB
+#define PLL_CONTROL_BYPASS_LSB WLAN_PLL_CONTROL_BYPASS_LSB
+#define PLL_CONTROL_BYPASS_MASK WLAN_PLL_CONTROL_BYPASS_MASK
+#define PLL_CONTROL_BYPASS_GET(x) WLAN_PLL_CONTROL_BYPASS_GET(x)
+#define PLL_CONTROL_BYPASS_SET(x) WLAN_PLL_CONTROL_BYPASS_SET(x)
+#define PLL_CONTROL_REFDIV_MSB WLAN_PLL_CONTROL_REFDIV_MSB
+#define PLL_CONTROL_REFDIV_LSB WLAN_PLL_CONTROL_REFDIV_LSB
+#define PLL_CONTROL_REFDIV_MASK WLAN_PLL_CONTROL_REFDIV_MASK
+#define PLL_CONTROL_REFDIV_GET(x) WLAN_PLL_CONTROL_REFDIV_GET(x)
+#define PLL_CONTROL_REFDIV_SET(x) WLAN_PLL_CONTROL_REFDIV_SET(x)
+#define PLL_CONTROL_DIV_MSB WLAN_PLL_CONTROL_DIV_MSB
+#define PLL_CONTROL_DIV_LSB WLAN_PLL_CONTROL_DIV_LSB
+#define PLL_CONTROL_DIV_MASK WLAN_PLL_CONTROL_DIV_MASK
+#define PLL_CONTROL_DIV_GET(x) WLAN_PLL_CONTROL_DIV_GET(x)
+#define PLL_CONTROL_DIV_SET(x) WLAN_PLL_CONTROL_DIV_SET(x)
+#define PLL_SETTLE_ADDRESS WLAN_PLL_SETTLE_ADDRESS
+#define PLL_SETTLE_OFFSET WLAN_PLL_SETTLE_OFFSET
+#define PLL_SETTLE_TIME_MSB WLAN_PLL_SETTLE_TIME_MSB
+#define PLL_SETTLE_TIME_LSB WLAN_PLL_SETTLE_TIME_LSB
+#define PLL_SETTLE_TIME_MASK WLAN_PLL_SETTLE_TIME_MASK
+#define PLL_SETTLE_TIME_GET(x) WLAN_PLL_SETTLE_TIME_GET(x)
+#define PLL_SETTLE_TIME_SET(x) WLAN_PLL_SETTLE_TIME_SET(x)
+#define XTAL_SETTLE_ADDRESS WLAN_XTAL_SETTLE_ADDRESS
+#define XTAL_SETTLE_OFFSET WLAN_XTAL_SETTLE_OFFSET
+#define XTAL_SETTLE_TIME_MSB WLAN_XTAL_SETTLE_TIME_MSB
+#define XTAL_SETTLE_TIME_LSB WLAN_XTAL_SETTLE_TIME_LSB
+#define XTAL_SETTLE_TIME_MASK WLAN_XTAL_SETTLE_TIME_MASK
+#define XTAL_SETTLE_TIME_GET(x) WLAN_XTAL_SETTLE_TIME_GET(x)
+#define XTAL_SETTLE_TIME_SET(x) WLAN_XTAL_SETTLE_TIME_SET(x)
+#define CPU_CLOCK_ADDRESS WLAN_CPU_CLOCK_ADDRESS
+#define CPU_CLOCK_OFFSET WLAN_CPU_CLOCK_OFFSET
+#define CPU_CLOCK_STANDARD_MSB WLAN_CPU_CLOCK_STANDARD_MSB
+#define CPU_CLOCK_STANDARD_LSB WLAN_CPU_CLOCK_STANDARD_LSB
+#define CPU_CLOCK_STANDARD_MASK WLAN_CPU_CLOCK_STANDARD_MASK
+#define CPU_CLOCK_STANDARD_GET(x) WLAN_CPU_CLOCK_STANDARD_GET(x)
+#define CPU_CLOCK_STANDARD_SET(x) WLAN_CPU_CLOCK_STANDARD_SET(x)
+#define CLOCK_OUT_ADDRESS WLAN_CLOCK_OUT_ADDRESS
+#define CLOCK_OUT_OFFSET WLAN_CLOCK_OUT_OFFSET
+#define CLOCK_OUT_SELECT_MSB WLAN_CLOCK_OUT_SELECT_MSB
+#define CLOCK_OUT_SELECT_LSB WLAN_CLOCK_OUT_SELECT_LSB
+#define CLOCK_OUT_SELECT_MASK WLAN_CLOCK_OUT_SELECT_MASK
+#define CLOCK_OUT_SELECT_GET(x) WLAN_CLOCK_OUT_SELECT_GET(x)
+#define CLOCK_OUT_SELECT_SET(x) WLAN_CLOCK_OUT_SELECT_SET(x)
+#define CLOCK_CONTROL_ADDRESS WLAN_CLOCK_CONTROL_ADDRESS
+#define CLOCK_CONTROL_OFFSET WLAN_CLOCK_CONTROL_OFFSET
+#define CLOCK_CONTROL_LF_CLK32_MSB WLAN_CLOCK_CONTROL_LF_CLK32_MSB
+#define CLOCK_CONTROL_LF_CLK32_LSB WLAN_CLOCK_CONTROL_LF_CLK32_LSB
+#define CLOCK_CONTROL_LF_CLK32_MASK WLAN_CLOCK_CONTROL_LF_CLK32_MASK
+#define CLOCK_CONTROL_LF_CLK32_GET(x) WLAN_CLOCK_CONTROL_LF_CLK32_GET(x)
+#define CLOCK_CONTROL_LF_CLK32_SET(x) WLAN_CLOCK_CONTROL_LF_CLK32_SET(x)
+#define CLOCK_CONTROL_SI0_CLK_MSB WLAN_CLOCK_CONTROL_SI0_CLK_MSB
+#define CLOCK_CONTROL_SI0_CLK_LSB WLAN_CLOCK_CONTROL_SI0_CLK_LSB
+#define CLOCK_CONTROL_SI0_CLK_MASK WLAN_CLOCK_CONTROL_SI0_CLK_MASK
+#define CLOCK_CONTROL_SI0_CLK_GET(x) WLAN_CLOCK_CONTROL_SI0_CLK_GET(x)
+#define CLOCK_CONTROL_SI0_CLK_SET(x) WLAN_CLOCK_CONTROL_SI0_CLK_SET(x)
+#define BIAS_OVERRIDE_ADDRESS WLAN_BIAS_OVERRIDE_ADDRESS
+#define BIAS_OVERRIDE_OFFSET WLAN_BIAS_OVERRIDE_OFFSET
+#define BIAS_OVERRIDE_ON_MSB WLAN_BIAS_OVERRIDE_ON_MSB
+#define BIAS_OVERRIDE_ON_LSB WLAN_BIAS_OVERRIDE_ON_LSB
+#define BIAS_OVERRIDE_ON_MASK WLAN_BIAS_OVERRIDE_ON_MASK
+#define BIAS_OVERRIDE_ON_GET(x) WLAN_BIAS_OVERRIDE_ON_GET(x)
+#define BIAS_OVERRIDE_ON_SET(x) WLAN_BIAS_OVERRIDE_ON_SET(x)
+#define WDT_CONTROL_ADDRESS WLAN_WDT_CONTROL_ADDRESS
+#define WDT_CONTROL_OFFSET WLAN_WDT_CONTROL_OFFSET
+#define WDT_CONTROL_ACTION_MSB WLAN_WDT_CONTROL_ACTION_MSB
+#define WDT_CONTROL_ACTION_LSB WLAN_WDT_CONTROL_ACTION_LSB
+#define WDT_CONTROL_ACTION_MASK WLAN_WDT_CONTROL_ACTION_MASK
+#define WDT_CONTROL_ACTION_GET(x) WLAN_WDT_CONTROL_ACTION_GET(x)
+#define WDT_CONTROL_ACTION_SET(x) WLAN_WDT_CONTROL_ACTION_SET(x)
+#define WDT_STATUS_ADDRESS WLAN_WDT_STATUS_ADDRESS
+#define WDT_STATUS_OFFSET WLAN_WDT_STATUS_OFFSET
+#define WDT_STATUS_INTERRUPT_MSB WLAN_WDT_STATUS_INTERRUPT_MSB
+#define WDT_STATUS_INTERRUPT_LSB WLAN_WDT_STATUS_INTERRUPT_LSB
+#define WDT_STATUS_INTERRUPT_MASK WLAN_WDT_STATUS_INTERRUPT_MASK
+#define WDT_STATUS_INTERRUPT_GET(x) WLAN_WDT_STATUS_INTERRUPT_GET(x)
+#define WDT_STATUS_INTERRUPT_SET(x) WLAN_WDT_STATUS_INTERRUPT_SET(x)
+#define WDT_ADDRESS WLAN_WDT_ADDRESS
+#define WDT_OFFSET WLAN_WDT_OFFSET
+#define WDT_TARGET_MSB WLAN_WDT_TARGET_MSB
+#define WDT_TARGET_LSB WLAN_WDT_TARGET_LSB
+#define WDT_TARGET_MASK WLAN_WDT_TARGET_MASK
+#define WDT_TARGET_GET(x) WLAN_WDT_TARGET_GET(x)
+#define WDT_TARGET_SET(x) WLAN_WDT_TARGET_SET(x)
+#define WDT_COUNT_ADDRESS WLAN_WDT_COUNT_ADDRESS
+#define WDT_COUNT_OFFSET WLAN_WDT_COUNT_OFFSET
+#define WDT_COUNT_VALUE_MSB WLAN_WDT_COUNT_VALUE_MSB
+#define WDT_COUNT_VALUE_LSB WLAN_WDT_COUNT_VALUE_LSB
+#define WDT_COUNT_VALUE_MASK WLAN_WDT_COUNT_VALUE_MASK
+#define WDT_COUNT_VALUE_GET(x) WLAN_WDT_COUNT_VALUE_GET(x)
+#define WDT_COUNT_VALUE_SET(x) WLAN_WDT_COUNT_VALUE_SET(x)
+#define WDT_RESET_ADDRESS WLAN_WDT_RESET_ADDRESS
+#define WDT_RESET_OFFSET WLAN_WDT_RESET_OFFSET
+#define WDT_RESET_VALUE_MSB WLAN_WDT_RESET_VALUE_MSB
+#define WDT_RESET_VALUE_LSB WLAN_WDT_RESET_VALUE_LSB
+#define WDT_RESET_VALUE_MASK WLAN_WDT_RESET_VALUE_MASK
+#define WDT_RESET_VALUE_GET(x) WLAN_WDT_RESET_VALUE_GET(x)
+#define WDT_RESET_VALUE_SET(x) WLAN_WDT_RESET_VALUE_SET(x)
+#define INT_STATUS_ADDRESS WLAN_INT_STATUS_ADDRESS
+#define INT_STATUS_OFFSET WLAN_INT_STATUS_OFFSET
+#define INT_STATUS_HCI_UART_MSB WLAN_INT_STATUS_HCI_UART_MSB
+#define INT_STATUS_HCI_UART_LSB WLAN_INT_STATUS_HCI_UART_LSB
+#define INT_STATUS_HCI_UART_MASK WLAN_INT_STATUS_HCI_UART_MASK
+#define INT_STATUS_HCI_UART_GET(x) WLAN_INT_STATUS_HCI_UART_GET(x)
+#define INT_STATUS_HCI_UART_SET(x) WLAN_INT_STATUS_HCI_UART_SET(x)
+#define INT_STATUS_THERM_MSB WLAN_INT_STATUS_THERM_MSB
+#define INT_STATUS_THERM_LSB WLAN_INT_STATUS_THERM_LSB
+#define INT_STATUS_THERM_MASK WLAN_INT_STATUS_THERM_MASK
+#define INT_STATUS_THERM_GET(x) WLAN_INT_STATUS_THERM_GET(x)
+#define INT_STATUS_THERM_SET(x) WLAN_INT_STATUS_THERM_SET(x)
+#define INT_STATUS_EFUSE_OVERWRITE_MSB WLAN_INT_STATUS_EFUSE_OVERWRITE_MSB
+#define INT_STATUS_EFUSE_OVERWRITE_LSB WLAN_INT_STATUS_EFUSE_OVERWRITE_LSB
+#define INT_STATUS_EFUSE_OVERWRITE_MASK WLAN_INT_STATUS_EFUSE_OVERWRITE_MASK
+#define INT_STATUS_EFUSE_OVERWRITE_GET(x) WLAN_INT_STATUS_EFUSE_OVERWRITE_GET(x)
+#define INT_STATUS_EFUSE_OVERWRITE_SET(x) WLAN_INT_STATUS_EFUSE_OVERWRITE_SET(x)
+#define INT_STATUS_UART_MBOX_MSB WLAN_INT_STATUS_UART_MBOX_MSB
+#define INT_STATUS_UART_MBOX_LSB WLAN_INT_STATUS_UART_MBOX_LSB
+#define INT_STATUS_UART_MBOX_MASK WLAN_INT_STATUS_UART_MBOX_MASK
+#define INT_STATUS_UART_MBOX_GET(x) WLAN_INT_STATUS_UART_MBOX_GET(x)
+#define INT_STATUS_UART_MBOX_SET(x) WLAN_INT_STATUS_UART_MBOX_SET(x)
+#define INT_STATUS_GENERIC_MBOX_MSB WLAN_INT_STATUS_GENERIC_MBOX_MSB
+#define INT_STATUS_GENERIC_MBOX_LSB WLAN_INT_STATUS_GENERIC_MBOX_LSB
+#define INT_STATUS_GENERIC_MBOX_MASK WLAN_INT_STATUS_GENERIC_MBOX_MASK
+#define INT_STATUS_GENERIC_MBOX_GET(x) WLAN_INT_STATUS_GENERIC_MBOX_GET(x)
+#define INT_STATUS_GENERIC_MBOX_SET(x) WLAN_INT_STATUS_GENERIC_MBOX_SET(x)
+#define INT_STATUS_RDMA_MSB WLAN_INT_STATUS_RDMA_MSB
+#define INT_STATUS_RDMA_LSB WLAN_INT_STATUS_RDMA_LSB
+#define INT_STATUS_RDMA_MASK WLAN_INT_STATUS_RDMA_MASK
+#define INT_STATUS_RDMA_GET(x) WLAN_INT_STATUS_RDMA_GET(x)
+#define INT_STATUS_RDMA_SET(x) WLAN_INT_STATUS_RDMA_SET(x)
+#define INT_STATUS_BTCOEX_MSB WLAN_INT_STATUS_BTCOEX_MSB
+#define INT_STATUS_BTCOEX_LSB WLAN_INT_STATUS_BTCOEX_LSB
+#define INT_STATUS_BTCOEX_MASK WLAN_INT_STATUS_BTCOEX_MASK
+#define INT_STATUS_BTCOEX_GET(x) WLAN_INT_STATUS_BTCOEX_GET(x)
+#define INT_STATUS_BTCOEX_SET(x) WLAN_INT_STATUS_BTCOEX_SET(x)
+#define INT_STATUS_RTC_POWER_MSB WLAN_INT_STATUS_RTC_POWER_MSB
+#define INT_STATUS_RTC_POWER_LSB WLAN_INT_STATUS_RTC_POWER_LSB
+#define INT_STATUS_RTC_POWER_MASK WLAN_INT_STATUS_RTC_POWER_MASK
+#define INT_STATUS_RTC_POWER_GET(x) WLAN_INT_STATUS_RTC_POWER_GET(x)
+#define INT_STATUS_RTC_POWER_SET(x) WLAN_INT_STATUS_RTC_POWER_SET(x)
+#define INT_STATUS_MAC_MSB WLAN_INT_STATUS_MAC_MSB
+#define INT_STATUS_MAC_LSB WLAN_INT_STATUS_MAC_LSB
+#define INT_STATUS_MAC_MASK WLAN_INT_STATUS_MAC_MASK
+#define INT_STATUS_MAC_GET(x) WLAN_INT_STATUS_MAC_GET(x)
+#define INT_STATUS_MAC_SET(x) WLAN_INT_STATUS_MAC_SET(x)
+#define INT_STATUS_MAILBOX_MSB WLAN_INT_STATUS_MAILBOX_MSB
+#define INT_STATUS_MAILBOX_LSB WLAN_INT_STATUS_MAILBOX_LSB
+#define INT_STATUS_MAILBOX_MASK WLAN_INT_STATUS_MAILBOX_MASK
+#define INT_STATUS_MAILBOX_GET(x) WLAN_INT_STATUS_MAILBOX_GET(x)
+#define INT_STATUS_MAILBOX_SET(x) WLAN_INT_STATUS_MAILBOX_SET(x)
+#define INT_STATUS_RTC_ALARM_MSB WLAN_INT_STATUS_RTC_ALARM_MSB
+#define INT_STATUS_RTC_ALARM_LSB WLAN_INT_STATUS_RTC_ALARM_LSB
+#define INT_STATUS_RTC_ALARM_MASK WLAN_INT_STATUS_RTC_ALARM_MASK
+#define INT_STATUS_RTC_ALARM_GET(x) WLAN_INT_STATUS_RTC_ALARM_GET(x)
+#define INT_STATUS_RTC_ALARM_SET(x) WLAN_INT_STATUS_RTC_ALARM_SET(x)
+#define INT_STATUS_HF_TIMER_MSB WLAN_INT_STATUS_HF_TIMER_MSB
+#define INT_STATUS_HF_TIMER_LSB WLAN_INT_STATUS_HF_TIMER_LSB
+#define INT_STATUS_HF_TIMER_MASK WLAN_INT_STATUS_HF_TIMER_MASK
+#define INT_STATUS_HF_TIMER_GET(x) WLAN_INT_STATUS_HF_TIMER_GET(x)
+#define INT_STATUS_HF_TIMER_SET(x) WLAN_INT_STATUS_HF_TIMER_SET(x)
+#define INT_STATUS_LF_TIMER3_MSB WLAN_INT_STATUS_LF_TIMER3_MSB
+#define INT_STATUS_LF_TIMER3_LSB WLAN_INT_STATUS_LF_TIMER3_LSB
+#define INT_STATUS_LF_TIMER3_MASK WLAN_INT_STATUS_LF_TIMER3_MASK
+#define INT_STATUS_LF_TIMER3_GET(x) WLAN_INT_STATUS_LF_TIMER3_GET(x)
+#define INT_STATUS_LF_TIMER3_SET(x) WLAN_INT_STATUS_LF_TIMER3_SET(x)
+#define INT_STATUS_LF_TIMER2_MSB WLAN_INT_STATUS_LF_TIMER2_MSB
+#define INT_STATUS_LF_TIMER2_LSB WLAN_INT_STATUS_LF_TIMER2_LSB
+#define INT_STATUS_LF_TIMER2_MASK WLAN_INT_STATUS_LF_TIMER2_MASK
+#define INT_STATUS_LF_TIMER2_GET(x) WLAN_INT_STATUS_LF_TIMER2_GET(x)
+#define INT_STATUS_LF_TIMER2_SET(x) WLAN_INT_STATUS_LF_TIMER2_SET(x)
+#define INT_STATUS_LF_TIMER1_MSB WLAN_INT_STATUS_LF_TIMER1_MSB
+#define INT_STATUS_LF_TIMER1_LSB WLAN_INT_STATUS_LF_TIMER1_LSB
+#define INT_STATUS_LF_TIMER1_MASK WLAN_INT_STATUS_LF_TIMER1_MASK
+#define INT_STATUS_LF_TIMER1_GET(x) WLAN_INT_STATUS_LF_TIMER1_GET(x)
+#define INT_STATUS_LF_TIMER1_SET(x) WLAN_INT_STATUS_LF_TIMER1_SET(x)
+#define INT_STATUS_LF_TIMER0_MSB WLAN_INT_STATUS_LF_TIMER0_MSB
+#define INT_STATUS_LF_TIMER0_LSB WLAN_INT_STATUS_LF_TIMER0_LSB
+#define INT_STATUS_LF_TIMER0_MASK WLAN_INT_STATUS_LF_TIMER0_MASK
+#define INT_STATUS_LF_TIMER0_GET(x) WLAN_INT_STATUS_LF_TIMER0_GET(x)
+#define INT_STATUS_LF_TIMER0_SET(x) WLAN_INT_STATUS_LF_TIMER0_SET(x)
+#define INT_STATUS_KEYPAD_MSB WLAN_INT_STATUS_KEYPAD_MSB
+#define INT_STATUS_KEYPAD_LSB WLAN_INT_STATUS_KEYPAD_LSB
+#define INT_STATUS_KEYPAD_MASK WLAN_INT_STATUS_KEYPAD_MASK
+#define INT_STATUS_KEYPAD_GET(x) WLAN_INT_STATUS_KEYPAD_GET(x)
+#define INT_STATUS_KEYPAD_SET(x) WLAN_INT_STATUS_KEYPAD_SET(x)
+#define INT_STATUS_SI_MSB WLAN_INT_STATUS_SI_MSB
+#define INT_STATUS_SI_LSB WLAN_INT_STATUS_SI_LSB
+#define INT_STATUS_SI_MASK WLAN_INT_STATUS_SI_MASK
+#define INT_STATUS_SI_GET(x) WLAN_INT_STATUS_SI_GET(x)
+#define INT_STATUS_SI_SET(x) WLAN_INT_STATUS_SI_SET(x)
+#define INT_STATUS_GPIO_MSB WLAN_INT_STATUS_GPIO_MSB
+#define INT_STATUS_GPIO_LSB WLAN_INT_STATUS_GPIO_LSB
+#define INT_STATUS_GPIO_MASK WLAN_INT_STATUS_GPIO_MASK
+#define INT_STATUS_GPIO_GET(x) WLAN_INT_STATUS_GPIO_GET(x)
+#define INT_STATUS_GPIO_SET(x) WLAN_INT_STATUS_GPIO_SET(x)
+#define INT_STATUS_UART_MSB WLAN_INT_STATUS_UART_MSB
+#define INT_STATUS_UART_LSB WLAN_INT_STATUS_UART_LSB
+#define INT_STATUS_UART_MASK WLAN_INT_STATUS_UART_MASK
+#define INT_STATUS_UART_GET(x) WLAN_INT_STATUS_UART_GET(x)
+#define INT_STATUS_UART_SET(x) WLAN_INT_STATUS_UART_SET(x)
+#define INT_STATUS_ERROR_MSB WLAN_INT_STATUS_ERROR_MSB
+#define INT_STATUS_ERROR_LSB WLAN_INT_STATUS_ERROR_LSB
+#define INT_STATUS_ERROR_MASK WLAN_INT_STATUS_ERROR_MASK
+#define INT_STATUS_ERROR_GET(x) WLAN_INT_STATUS_ERROR_GET(x)
+#define INT_STATUS_ERROR_SET(x) WLAN_INT_STATUS_ERROR_SET(x)
+#define INT_STATUS_WDT_INT_MSB WLAN_INT_STATUS_WDT_INT_MSB
+#define INT_STATUS_WDT_INT_LSB WLAN_INT_STATUS_WDT_INT_LSB
+#define INT_STATUS_WDT_INT_MASK WLAN_INT_STATUS_WDT_INT_MASK
+#define INT_STATUS_WDT_INT_GET(x) WLAN_INT_STATUS_WDT_INT_GET(x)
+#define INT_STATUS_WDT_INT_SET(x) WLAN_INT_STATUS_WDT_INT_SET(x)
+#define LF_TIMER0_ADDRESS WLAN_LF_TIMER0_ADDRESS
+#define LF_TIMER0_OFFSET WLAN_LF_TIMER0_OFFSET
+#define LF_TIMER0_TARGET_MSB WLAN_LF_TIMER0_TARGET_MSB
+#define LF_TIMER0_TARGET_LSB WLAN_LF_TIMER0_TARGET_LSB
+#define LF_TIMER0_TARGET_MASK WLAN_LF_TIMER0_TARGET_MASK
+#define LF_TIMER0_TARGET_GET(x) WLAN_LF_TIMER0_TARGET_GET(x)
+#define LF_TIMER0_TARGET_SET(x) WLAN_LF_TIMER0_TARGET_SET(x)
+#define LF_TIMER_COUNT0_ADDRESS WLAN_LF_TIMER_COUNT0_ADDRESS
+#define LF_TIMER_COUNT0_OFFSET WLAN_LF_TIMER_COUNT0_OFFSET
+#define LF_TIMER_COUNT0_VALUE_MSB WLAN_LF_TIMER_COUNT0_VALUE_MSB
+#define LF_TIMER_COUNT0_VALUE_LSB WLAN_LF_TIMER_COUNT0_VALUE_LSB
+#define LF_TIMER_COUNT0_VALUE_MASK WLAN_LF_TIMER_COUNT0_VALUE_MASK
+#define LF_TIMER_COUNT0_VALUE_GET(x) WLAN_LF_TIMER_COUNT0_VALUE_GET(x)
+#define LF_TIMER_COUNT0_VALUE_SET(x) WLAN_LF_TIMER_COUNT0_VALUE_SET(x)
+#define LF_TIMER_CONTROL0_ADDRESS WLAN_LF_TIMER_CONTROL0_ADDRESS
+#define LF_TIMER_CONTROL0_OFFSET WLAN_LF_TIMER_CONTROL0_OFFSET
+#define LF_TIMER_CONTROL0_ENABLE_MSB WLAN_LF_TIMER_CONTROL0_ENABLE_MSB
+#define LF_TIMER_CONTROL0_ENABLE_LSB WLAN_LF_TIMER_CONTROL0_ENABLE_LSB
+#define LF_TIMER_CONTROL0_ENABLE_MASK WLAN_LF_TIMER_CONTROL0_ENABLE_MASK
+#define LF_TIMER_CONTROL0_ENABLE_GET(x) WLAN_LF_TIMER_CONTROL0_ENABLE_GET(x)
+#define LF_TIMER_CONTROL0_ENABLE_SET(x) WLAN_LF_TIMER_CONTROL0_ENABLE_SET(x)
+#define LF_TIMER_CONTROL0_AUTO_RESTART_MSB WLAN_LF_TIMER_CONTROL0_AUTO_RESTART_MSB
+#define LF_TIMER_CONTROL0_AUTO_RESTART_LSB WLAN_LF_TIMER_CONTROL0_AUTO_RESTART_LSB
+#define LF_TIMER_CONTROL0_AUTO_RESTART_MASK WLAN_LF_TIMER_CONTROL0_AUTO_RESTART_MASK
+#define LF_TIMER_CONTROL0_AUTO_RESTART_GET(x) WLAN_LF_TIMER_CONTROL0_AUTO_RESTART_GET(x)
+#define LF_TIMER_CONTROL0_AUTO_RESTART_SET(x) WLAN_LF_TIMER_CONTROL0_AUTO_RESTART_SET(x)
+#define LF_TIMER_CONTROL0_RESET_MSB WLAN_LF_TIMER_CONTROL0_RESET_MSB
+#define LF_TIMER_CONTROL0_RESET_LSB WLAN_LF_TIMER_CONTROL0_RESET_LSB
+#define LF_TIMER_CONTROL0_RESET_MASK WLAN_LF_TIMER_CONTROL0_RESET_MASK
+#define LF_TIMER_CONTROL0_RESET_GET(x) WLAN_LF_TIMER_CONTROL0_RESET_GET(x)
+#define LF_TIMER_CONTROL0_RESET_SET(x) WLAN_LF_TIMER_CONTROL0_RESET_SET(x)
+#define LF_TIMER_STATUS0_ADDRESS WLAN_LF_TIMER_STATUS0_ADDRESS
+#define LF_TIMER_STATUS0_OFFSET WLAN_LF_TIMER_STATUS0_OFFSET
+#define LF_TIMER_STATUS0_INTERRUPT_MSB WLAN_LF_TIMER_STATUS0_INTERRUPT_MSB
+#define LF_TIMER_STATUS0_INTERRUPT_LSB WLAN_LF_TIMER_STATUS0_INTERRUPT_LSB
+#define LF_TIMER_STATUS0_INTERRUPT_MASK WLAN_LF_TIMER_STATUS0_INTERRUPT_MASK
+#define LF_TIMER_STATUS0_INTERRUPT_GET(x) WLAN_LF_TIMER_STATUS0_INTERRUPT_GET(x)
+#define LF_TIMER_STATUS0_INTERRUPT_SET(x) WLAN_LF_TIMER_STATUS0_INTERRUPT_SET(x)
+#define LF_TIMER1_ADDRESS WLAN_LF_TIMER1_ADDRESS
+#define LF_TIMER1_OFFSET WLAN_LF_TIMER1_OFFSET
+#define LF_TIMER1_TARGET_MSB WLAN_LF_TIMER1_TARGET_MSB
+#define LF_TIMER1_TARGET_LSB WLAN_LF_TIMER1_TARGET_LSB
+#define LF_TIMER1_TARGET_MASK WLAN_LF_TIMER1_TARGET_MASK
+#define LF_TIMER1_TARGET_GET(x) WLAN_LF_TIMER1_TARGET_GET(x)
+#define LF_TIMER1_TARGET_SET(x) WLAN_LF_TIMER1_TARGET_SET(x)
+#define LF_TIMER_COUNT1_ADDRESS WLAN_LF_TIMER_COUNT1_ADDRESS
+#define LF_TIMER_COUNT1_OFFSET WLAN_LF_TIMER_COUNT1_OFFSET
+#define LF_TIMER_COUNT1_VALUE_MSB WLAN_LF_TIMER_COUNT1_VALUE_MSB
+#define LF_TIMER_COUNT1_VALUE_LSB WLAN_LF_TIMER_COUNT1_VALUE_LSB
+#define LF_TIMER_COUNT1_VALUE_MASK WLAN_LF_TIMER_COUNT1_VALUE_MASK
+#define LF_TIMER_COUNT1_VALUE_GET(x) WLAN_LF_TIMER_COUNT1_VALUE_GET(x)
+#define LF_TIMER_COUNT1_VALUE_SET(x) WLAN_LF_TIMER_COUNT1_VALUE_SET(x)
+#define LF_TIMER_CONTROL1_ADDRESS WLAN_LF_TIMER_CONTROL1_ADDRESS
+#define LF_TIMER_CONTROL1_OFFSET WLAN_LF_TIMER_CONTROL1_OFFSET
+#define LF_TIMER_CONTROL1_ENABLE_MSB WLAN_LF_TIMER_CONTROL1_ENABLE_MSB
+#define LF_TIMER_CONTROL1_ENABLE_LSB WLAN_LF_TIMER_CONTROL1_ENABLE_LSB
+#define LF_TIMER_CONTROL1_ENABLE_MASK WLAN_LF_TIMER_CONTROL1_ENABLE_MASK
+#define LF_TIMER_CONTROL1_ENABLE_GET(x) WLAN_LF_TIMER_CONTROL1_ENABLE_GET(x)
+#define LF_TIMER_CONTROL1_ENABLE_SET(x) WLAN_LF_TIMER_CONTROL1_ENABLE_SET(x)
+#define LF_TIMER_CONTROL1_AUTO_RESTART_MSB WLAN_LF_TIMER_CONTROL1_AUTO_RESTART_MSB
+#define LF_TIMER_CONTROL1_AUTO_RESTART_LSB WLAN_LF_TIMER_CONTROL1_AUTO_RESTART_LSB
+#define LF_TIMER_CONTROL1_AUTO_RESTART_MASK WLAN_LF_TIMER_CONTROL1_AUTO_RESTART_MASK
+#define LF_TIMER_CONTROL1_AUTO_RESTART_GET(x) WLAN_LF_TIMER_CONTROL1_AUTO_RESTART_GET(x)
+#define LF_TIMER_CONTROL1_AUTO_RESTART_SET(x) WLAN_LF_TIMER_CONTROL1_AUTO_RESTART_SET(x)
+#define LF_TIMER_CONTROL1_RESET_MSB WLAN_LF_TIMER_CONTROL1_RESET_MSB
+#define LF_TIMER_CONTROL1_RESET_LSB WLAN_LF_TIMER_CONTROL1_RESET_LSB
+#define LF_TIMER_CONTROL1_RESET_MASK WLAN_LF_TIMER_CONTROL1_RESET_MASK
+#define LF_TIMER_CONTROL1_RESET_GET(x) WLAN_LF_TIMER_CONTROL1_RESET_GET(x)
+#define LF_TIMER_CONTROL1_RESET_SET(x) WLAN_LF_TIMER_CONTROL1_RESET_SET(x)
+#define LF_TIMER_STATUS1_ADDRESS WLAN_LF_TIMER_STATUS1_ADDRESS
+#define LF_TIMER_STATUS1_OFFSET WLAN_LF_TIMER_STATUS1_OFFSET
+#define LF_TIMER_STATUS1_INTERRUPT_MSB WLAN_LF_TIMER_STATUS1_INTERRUPT_MSB
+#define LF_TIMER_STATUS1_INTERRUPT_LSB WLAN_LF_TIMER_STATUS1_INTERRUPT_LSB
+#define LF_TIMER_STATUS1_INTERRUPT_MASK WLAN_LF_TIMER_STATUS1_INTERRUPT_MASK
+#define LF_TIMER_STATUS1_INTERRUPT_GET(x) WLAN_LF_TIMER_STATUS1_INTERRUPT_GET(x)
+#define LF_TIMER_STATUS1_INTERRUPT_SET(x) WLAN_LF_TIMER_STATUS1_INTERRUPT_SET(x)
+#define LF_TIMER2_ADDRESS WLAN_LF_TIMER2_ADDRESS
+#define LF_TIMER2_OFFSET WLAN_LF_TIMER2_OFFSET
+#define LF_TIMER2_TARGET_MSB WLAN_LF_TIMER2_TARGET_MSB
+#define LF_TIMER2_TARGET_LSB WLAN_LF_TIMER2_TARGET_LSB
+#define LF_TIMER2_TARGET_MASK WLAN_LF_TIMER2_TARGET_MASK
+#define LF_TIMER2_TARGET_GET(x) WLAN_LF_TIMER2_TARGET_GET(x)
+#define LF_TIMER2_TARGET_SET(x) WLAN_LF_TIMER2_TARGET_SET(x)
+#define LF_TIMER_COUNT2_ADDRESS WLAN_LF_TIMER_COUNT2_ADDRESS
+#define LF_TIMER_COUNT2_OFFSET WLAN_LF_TIMER_COUNT2_OFFSET
+#define LF_TIMER_COUNT2_VALUE_MSB WLAN_LF_TIMER_COUNT2_VALUE_MSB
+#define LF_TIMER_COUNT2_VALUE_LSB WLAN_LF_TIMER_COUNT2_VALUE_LSB
+#define LF_TIMER_COUNT2_VALUE_MASK WLAN_LF_TIMER_COUNT2_VALUE_MASK
+#define LF_TIMER_COUNT2_VALUE_GET(x) WLAN_LF_TIMER_COUNT2_VALUE_GET(x)
+#define LF_TIMER_COUNT2_VALUE_SET(x) WLAN_LF_TIMER_COUNT2_VALUE_SET(x)
+#define LF_TIMER_CONTROL2_ADDRESS WLAN_LF_TIMER_CONTROL2_ADDRESS
+#define LF_TIMER_CONTROL2_OFFSET WLAN_LF_TIMER_CONTROL2_OFFSET
+#define LF_TIMER_CONTROL2_ENABLE_MSB WLAN_LF_TIMER_CONTROL2_ENABLE_MSB
+#define LF_TIMER_CONTROL2_ENABLE_LSB WLAN_LF_TIMER_CONTROL2_ENABLE_LSB
+#define LF_TIMER_CONTROL2_ENABLE_MASK WLAN_LF_TIMER_CONTROL2_ENABLE_MASK
+#define LF_TIMER_CONTROL2_ENABLE_GET(x) WLAN_LF_TIMER_CONTROL2_ENABLE_GET(x)
+#define LF_TIMER_CONTROL2_ENABLE_SET(x) WLAN_LF_TIMER_CONTROL2_ENABLE_SET(x)
+#define LF_TIMER_CONTROL2_AUTO_RESTART_MSB WLAN_LF_TIMER_CONTROL2_AUTO_RESTART_MSB
+#define LF_TIMER_CONTROL2_AUTO_RESTART_LSB WLAN_LF_TIMER_CONTROL2_AUTO_RESTART_LSB
+#define LF_TIMER_CONTROL2_AUTO_RESTART_MASK WLAN_LF_TIMER_CONTROL2_AUTO_RESTART_MASK
+#define LF_TIMER_CONTROL2_AUTO_RESTART_GET(x) WLAN_LF_TIMER_CONTROL2_AUTO_RESTART_GET(x)
+#define LF_TIMER_CONTROL2_AUTO_RESTART_SET(x) WLAN_LF_TIMER_CONTROL2_AUTO_RESTART_SET(x)
+#define LF_TIMER_CONTROL2_RESET_MSB WLAN_LF_TIMER_CONTROL2_RESET_MSB
+#define LF_TIMER_CONTROL2_RESET_LSB WLAN_LF_TIMER_CONTROL2_RESET_LSB
+#define LF_TIMER_CONTROL2_RESET_MASK WLAN_LF_TIMER_CONTROL2_RESET_MASK
+#define LF_TIMER_CONTROL2_RESET_GET(x) WLAN_LF_TIMER_CONTROL2_RESET_GET(x)
+#define LF_TIMER_CONTROL2_RESET_SET(x) WLAN_LF_TIMER_CONTROL2_RESET_SET(x)
+#define LF_TIMER_STATUS2_ADDRESS WLAN_LF_TIMER_STATUS2_ADDRESS
+#define LF_TIMER_STATUS2_OFFSET WLAN_LF_TIMER_STATUS2_OFFSET
+#define LF_TIMER_STATUS2_INTERRUPT_MSB WLAN_LF_TIMER_STATUS2_INTERRUPT_MSB
+#define LF_TIMER_STATUS2_INTERRUPT_LSB WLAN_LF_TIMER_STATUS2_INTERRUPT_LSB
+#define LF_TIMER_STATUS2_INTERRUPT_MASK WLAN_LF_TIMER_STATUS2_INTERRUPT_MASK
+#define LF_TIMER_STATUS2_INTERRUPT_GET(x) WLAN_LF_TIMER_STATUS2_INTERRUPT_GET(x)
+#define LF_TIMER_STATUS2_INTERRUPT_SET(x) WLAN_LF_TIMER_STATUS2_INTERRUPT_SET(x)
+#define LF_TIMER3_ADDRESS WLAN_LF_TIMER3_ADDRESS
+#define LF_TIMER3_OFFSET WLAN_LF_TIMER3_OFFSET
+#define LF_TIMER3_TARGET_MSB WLAN_LF_TIMER3_TARGET_MSB
+#define LF_TIMER3_TARGET_LSB WLAN_LF_TIMER3_TARGET_LSB
+#define LF_TIMER3_TARGET_MASK WLAN_LF_TIMER3_TARGET_MASK
+#define LF_TIMER3_TARGET_GET(x) WLAN_LF_TIMER3_TARGET_GET(x)
+#define LF_TIMER3_TARGET_SET(x) WLAN_LF_TIMER3_TARGET_SET(x)
+#define LF_TIMER_COUNT3_ADDRESS WLAN_LF_TIMER_COUNT3_ADDRESS
+#define LF_TIMER_COUNT3_OFFSET WLAN_LF_TIMER_COUNT3_OFFSET
+#define LF_TIMER_COUNT3_VALUE_MSB WLAN_LF_TIMER_COUNT3_VALUE_MSB
+#define LF_TIMER_COUNT3_VALUE_LSB WLAN_LF_TIMER_COUNT3_VALUE_LSB
+#define LF_TIMER_COUNT3_VALUE_MASK WLAN_LF_TIMER_COUNT3_VALUE_MASK
+#define LF_TIMER_COUNT3_VALUE_GET(x) WLAN_LF_TIMER_COUNT3_VALUE_GET(x)
+#define LF_TIMER_COUNT3_VALUE_SET(x) WLAN_LF_TIMER_COUNT3_VALUE_SET(x)
+#define LF_TIMER_CONTROL3_ADDRESS WLAN_LF_TIMER_CONTROL3_ADDRESS
+#define LF_TIMER_CONTROL3_OFFSET WLAN_LF_TIMER_CONTROL3_OFFSET
+#define LF_TIMER_CONTROL3_ENABLE_MSB WLAN_LF_TIMER_CONTROL3_ENABLE_MSB
+#define LF_TIMER_CONTROL3_ENABLE_LSB WLAN_LF_TIMER_CONTROL3_ENABLE_LSB
+#define LF_TIMER_CONTROL3_ENABLE_MASK WLAN_LF_TIMER_CONTROL3_ENABLE_MASK
+#define LF_TIMER_CONTROL3_ENABLE_GET(x) WLAN_LF_TIMER_CONTROL3_ENABLE_GET(x)
+#define LF_TIMER_CONTROL3_ENABLE_SET(x) WLAN_LF_TIMER_CONTROL3_ENABLE_SET(x)
+#define LF_TIMER_CONTROL3_AUTO_RESTART_MSB WLAN_LF_TIMER_CONTROL3_AUTO_RESTART_MSB
+#define LF_TIMER_CONTROL3_AUTO_RESTART_LSB WLAN_LF_TIMER_CONTROL3_AUTO_RESTART_LSB
+#define LF_TIMER_CONTROL3_AUTO_RESTART_MASK WLAN_LF_TIMER_CONTROL3_AUTO_RESTART_MASK
+#define LF_TIMER_CONTROL3_AUTO_RESTART_GET(x) WLAN_LF_TIMER_CONTROL3_AUTO_RESTART_GET(x)
+#define LF_TIMER_CONTROL3_AUTO_RESTART_SET(x) WLAN_LF_TIMER_CONTROL3_AUTO_RESTART_SET(x)
+#define LF_TIMER_CONTROL3_RESET_MSB WLAN_LF_TIMER_CONTROL3_RESET_MSB
+#define LF_TIMER_CONTROL3_RESET_LSB WLAN_LF_TIMER_CONTROL3_RESET_LSB
+#define LF_TIMER_CONTROL3_RESET_MASK WLAN_LF_TIMER_CONTROL3_RESET_MASK
+#define LF_TIMER_CONTROL3_RESET_GET(x) WLAN_LF_TIMER_CONTROL3_RESET_GET(x)
+#define LF_TIMER_CONTROL3_RESET_SET(x) WLAN_LF_TIMER_CONTROL3_RESET_SET(x)
+#define LF_TIMER_STATUS3_ADDRESS WLAN_LF_TIMER_STATUS3_ADDRESS
+#define LF_TIMER_STATUS3_OFFSET WLAN_LF_TIMER_STATUS3_OFFSET
+#define LF_TIMER_STATUS3_INTERRUPT_MSB WLAN_LF_TIMER_STATUS3_INTERRUPT_MSB
+#define LF_TIMER_STATUS3_INTERRUPT_LSB WLAN_LF_TIMER_STATUS3_INTERRUPT_LSB
+#define LF_TIMER_STATUS3_INTERRUPT_MASK WLAN_LF_TIMER_STATUS3_INTERRUPT_MASK
+#define LF_TIMER_STATUS3_INTERRUPT_GET(x) WLAN_LF_TIMER_STATUS3_INTERRUPT_GET(x)
+#define LF_TIMER_STATUS3_INTERRUPT_SET(x) WLAN_LF_TIMER_STATUS3_INTERRUPT_SET(x)
+#define HF_TIMER_ADDRESS WLAN_HF_TIMER_ADDRESS
+#define HF_TIMER_OFFSET WLAN_HF_TIMER_OFFSET
+#define HF_TIMER_TARGET_MSB WLAN_HF_TIMER_TARGET_MSB
+#define HF_TIMER_TARGET_LSB WLAN_HF_TIMER_TARGET_LSB
+#define HF_TIMER_TARGET_MASK WLAN_HF_TIMER_TARGET_MASK
+#define HF_TIMER_TARGET_GET(x) WLAN_HF_TIMER_TARGET_GET(x)
+#define HF_TIMER_TARGET_SET(x) WLAN_HF_TIMER_TARGET_SET(x)
+#define HF_TIMER_COUNT_ADDRESS WLAN_HF_TIMER_COUNT_ADDRESS
+#define HF_TIMER_COUNT_OFFSET WLAN_HF_TIMER_COUNT_OFFSET
+#define HF_TIMER_COUNT_VALUE_MSB WLAN_HF_TIMER_COUNT_VALUE_MSB
+#define HF_TIMER_COUNT_VALUE_LSB WLAN_HF_TIMER_COUNT_VALUE_LSB
+#define HF_TIMER_COUNT_VALUE_MASK WLAN_HF_TIMER_COUNT_VALUE_MASK
+#define HF_TIMER_COUNT_VALUE_GET(x) WLAN_HF_TIMER_COUNT_VALUE_GET(x)
+#define HF_TIMER_COUNT_VALUE_SET(x) WLAN_HF_TIMER_COUNT_VALUE_SET(x)
+#define HF_LF_COUNT_ADDRESS WLAN_HF_LF_COUNT_ADDRESS
+#define HF_LF_COUNT_OFFSET WLAN_HF_LF_COUNT_OFFSET
+#define HF_LF_COUNT_VALUE_MSB WLAN_HF_LF_COUNT_VALUE_MSB
+#define HF_LF_COUNT_VALUE_LSB WLAN_HF_LF_COUNT_VALUE_LSB
+#define HF_LF_COUNT_VALUE_MASK WLAN_HF_LF_COUNT_VALUE_MASK
+#define HF_LF_COUNT_VALUE_GET(x) WLAN_HF_LF_COUNT_VALUE_GET(x)
+#define HF_LF_COUNT_VALUE_SET(x) WLAN_HF_LF_COUNT_VALUE_SET(x)
+#define HF_TIMER_CONTROL_ADDRESS WLAN_HF_TIMER_CONTROL_ADDRESS
+#define HF_TIMER_CONTROL_OFFSET WLAN_HF_TIMER_CONTROL_OFFSET
+#define HF_TIMER_CONTROL_ENABLE_MSB WLAN_HF_TIMER_CONTROL_ENABLE_MSB
+#define HF_TIMER_CONTROL_ENABLE_LSB WLAN_HF_TIMER_CONTROL_ENABLE_LSB
+#define HF_TIMER_CONTROL_ENABLE_MASK WLAN_HF_TIMER_CONTROL_ENABLE_MASK
+#define HF_TIMER_CONTROL_ENABLE_GET(x) WLAN_HF_TIMER_CONTROL_ENABLE_GET(x)
+#define HF_TIMER_CONTROL_ENABLE_SET(x) WLAN_HF_TIMER_CONTROL_ENABLE_SET(x)
+#define HF_TIMER_CONTROL_ON_MSB WLAN_HF_TIMER_CONTROL_ON_MSB
+#define HF_TIMER_CONTROL_ON_LSB WLAN_HF_TIMER_CONTROL_ON_LSB
+#define HF_TIMER_CONTROL_ON_MASK WLAN_HF_TIMER_CONTROL_ON_MASK
+#define HF_TIMER_CONTROL_ON_GET(x) WLAN_HF_TIMER_CONTROL_ON_GET(x)
+#define HF_TIMER_CONTROL_ON_SET(x) WLAN_HF_TIMER_CONTROL_ON_SET(x)
+#define HF_TIMER_CONTROL_AUTO_RESTART_MSB WLAN_HF_TIMER_CONTROL_AUTO_RESTART_MSB
+#define HF_TIMER_CONTROL_AUTO_RESTART_LSB WLAN_HF_TIMER_CONTROL_AUTO_RESTART_LSB
+#define HF_TIMER_CONTROL_AUTO_RESTART_MASK WLAN_HF_TIMER_CONTROL_AUTO_RESTART_MASK
+#define HF_TIMER_CONTROL_AUTO_RESTART_GET(x) WLAN_HF_TIMER_CONTROL_AUTO_RESTART_GET(x)
+#define HF_TIMER_CONTROL_AUTO_RESTART_SET(x) WLAN_HF_TIMER_CONTROL_AUTO_RESTART_SET(x)
+#define HF_TIMER_CONTROL_RESET_MSB WLAN_HF_TIMER_CONTROL_RESET_MSB
+#define HF_TIMER_CONTROL_RESET_LSB WLAN_HF_TIMER_CONTROL_RESET_LSB
+#define HF_TIMER_CONTROL_RESET_MASK WLAN_HF_TIMER_CONTROL_RESET_MASK
+#define HF_TIMER_CONTROL_RESET_GET(x) WLAN_HF_TIMER_CONTROL_RESET_GET(x)
+#define HF_TIMER_CONTROL_RESET_SET(x) WLAN_HF_TIMER_CONTROL_RESET_SET(x)
+#define HF_TIMER_STATUS_ADDRESS WLAN_HF_TIMER_STATUS_ADDRESS
+#define HF_TIMER_STATUS_OFFSET WLAN_HF_TIMER_STATUS_OFFSET
+#define HF_TIMER_STATUS_INTERRUPT_MSB WLAN_HF_TIMER_STATUS_INTERRUPT_MSB
+#define HF_TIMER_STATUS_INTERRUPT_LSB WLAN_HF_TIMER_STATUS_INTERRUPT_LSB
+#define HF_TIMER_STATUS_INTERRUPT_MASK WLAN_HF_TIMER_STATUS_INTERRUPT_MASK
+#define HF_TIMER_STATUS_INTERRUPT_GET(x) WLAN_HF_TIMER_STATUS_INTERRUPT_GET(x)
+#define HF_TIMER_STATUS_INTERRUPT_SET(x) WLAN_HF_TIMER_STATUS_INTERRUPT_SET(x)
+#define RTC_CONTROL_ADDRESS WLAN_RTC_CONTROL_ADDRESS
+#define RTC_CONTROL_OFFSET WLAN_RTC_CONTROL_OFFSET
+#define RTC_CONTROL_ENABLE_MSB WLAN_RTC_CONTROL_ENABLE_MSB
+#define RTC_CONTROL_ENABLE_LSB WLAN_RTC_CONTROL_ENABLE_LSB
+#define RTC_CONTROL_ENABLE_MASK WLAN_RTC_CONTROL_ENABLE_MASK
+#define RTC_CONTROL_ENABLE_GET(x) WLAN_RTC_CONTROL_ENABLE_GET(x)
+#define RTC_CONTROL_ENABLE_SET(x) WLAN_RTC_CONTROL_ENABLE_SET(x)
+#define RTC_CONTROL_LOAD_RTC_MSB WLAN_RTC_CONTROL_LOAD_RTC_MSB
+#define RTC_CONTROL_LOAD_RTC_LSB WLAN_RTC_CONTROL_LOAD_RTC_LSB
+#define RTC_CONTROL_LOAD_RTC_MASK WLAN_RTC_CONTROL_LOAD_RTC_MASK
+#define RTC_CONTROL_LOAD_RTC_GET(x) WLAN_RTC_CONTROL_LOAD_RTC_GET(x)
+#define RTC_CONTROL_LOAD_RTC_SET(x) WLAN_RTC_CONTROL_LOAD_RTC_SET(x)
+#define RTC_CONTROL_LOAD_ALARM_MSB WLAN_RTC_CONTROL_LOAD_ALARM_MSB
+#define RTC_CONTROL_LOAD_ALARM_LSB WLAN_RTC_CONTROL_LOAD_ALARM_LSB
+#define RTC_CONTROL_LOAD_ALARM_MASK WLAN_RTC_CONTROL_LOAD_ALARM_MASK
+#define RTC_CONTROL_LOAD_ALARM_GET(x) WLAN_RTC_CONTROL_LOAD_ALARM_GET(x)
+#define RTC_CONTROL_LOAD_ALARM_SET(x) WLAN_RTC_CONTROL_LOAD_ALARM_SET(x)
+#define RTC_TIME_ADDRESS WLAN_RTC_TIME_ADDRESS
+#define RTC_TIME_OFFSET WLAN_RTC_TIME_OFFSET
+#define RTC_TIME_WEEK_DAY_MSB WLAN_RTC_TIME_WEEK_DAY_MSB
+#define RTC_TIME_WEEK_DAY_LSB WLAN_RTC_TIME_WEEK_DAY_LSB
+#define RTC_TIME_WEEK_DAY_MASK WLAN_RTC_TIME_WEEK_DAY_MASK
+#define RTC_TIME_WEEK_DAY_GET(x) WLAN_RTC_TIME_WEEK_DAY_GET(x)
+#define RTC_TIME_WEEK_DAY_SET(x) WLAN_RTC_TIME_WEEK_DAY_SET(x)
+#define RTC_TIME_HOUR_MSB WLAN_RTC_TIME_HOUR_MSB
+#define RTC_TIME_HOUR_LSB WLAN_RTC_TIME_HOUR_LSB
+#define RTC_TIME_HOUR_MASK WLAN_RTC_TIME_HOUR_MASK
+#define RTC_TIME_HOUR_GET(x) WLAN_RTC_TIME_HOUR_GET(x)
+#define RTC_TIME_HOUR_SET(x) WLAN_RTC_TIME_HOUR_SET(x)
+#define RTC_TIME_MINUTE_MSB WLAN_RTC_TIME_MINUTE_MSB
+#define RTC_TIME_MINUTE_LSB WLAN_RTC_TIME_MINUTE_LSB
+#define RTC_TIME_MINUTE_MASK WLAN_RTC_TIME_MINUTE_MASK
+#define RTC_TIME_MINUTE_GET(x) WLAN_RTC_TIME_MINUTE_GET(x)
+#define RTC_TIME_MINUTE_SET(x) WLAN_RTC_TIME_MINUTE_SET(x)
+#define RTC_TIME_SECOND_MSB WLAN_RTC_TIME_SECOND_MSB
+#define RTC_TIME_SECOND_LSB WLAN_RTC_TIME_SECOND_LSB
+#define RTC_TIME_SECOND_MASK WLAN_RTC_TIME_SECOND_MASK
+#define RTC_TIME_SECOND_GET(x) WLAN_RTC_TIME_SECOND_GET(x)
+#define RTC_TIME_SECOND_SET(x) WLAN_RTC_TIME_SECOND_SET(x)
+#define RTC_DATE_ADDRESS WLAN_RTC_DATE_ADDRESS
+#define RTC_DATE_OFFSET WLAN_RTC_DATE_OFFSET
+#define RTC_DATE_YEAR_MSB WLAN_RTC_DATE_YEAR_MSB
+#define RTC_DATE_YEAR_LSB WLAN_RTC_DATE_YEAR_LSB
+#define RTC_DATE_YEAR_MASK WLAN_RTC_DATE_YEAR_MASK
+#define RTC_DATE_YEAR_GET(x) WLAN_RTC_DATE_YEAR_GET(x)
+#define RTC_DATE_YEAR_SET(x) WLAN_RTC_DATE_YEAR_SET(x)
+#define RTC_DATE_MONTH_MSB WLAN_RTC_DATE_MONTH_MSB
+#define RTC_DATE_MONTH_LSB WLAN_RTC_DATE_MONTH_LSB
+#define RTC_DATE_MONTH_MASK WLAN_RTC_DATE_MONTH_MASK
+#define RTC_DATE_MONTH_GET(x) WLAN_RTC_DATE_MONTH_GET(x)
+#define RTC_DATE_MONTH_SET(x) WLAN_RTC_DATE_MONTH_SET(x)
+#define RTC_DATE_MONTH_DAY_MSB WLAN_RTC_DATE_MONTH_DAY_MSB
+#define RTC_DATE_MONTH_DAY_LSB WLAN_RTC_DATE_MONTH_DAY_LSB
+#define RTC_DATE_MONTH_DAY_MASK WLAN_RTC_DATE_MONTH_DAY_MASK
+#define RTC_DATE_MONTH_DAY_GET(x) WLAN_RTC_DATE_MONTH_DAY_GET(x)
+#define RTC_DATE_MONTH_DAY_SET(x) WLAN_RTC_DATE_MONTH_DAY_SET(x)
+#define RTC_SET_TIME_ADDRESS WLAN_RTC_SET_TIME_ADDRESS
+#define RTC_SET_TIME_OFFSET WLAN_RTC_SET_TIME_OFFSET
+#define RTC_SET_TIME_WEEK_DAY_MSB WLAN_RTC_SET_TIME_WEEK_DAY_MSB
+#define RTC_SET_TIME_WEEK_DAY_LSB WLAN_RTC_SET_TIME_WEEK_DAY_LSB
+#define RTC_SET_TIME_WEEK_DAY_MASK WLAN_RTC_SET_TIME_WEEK_DAY_MASK
+#define RTC_SET_TIME_WEEK_DAY_GET(x) WLAN_RTC_SET_TIME_WEEK_DAY_GET(x)
+#define RTC_SET_TIME_WEEK_DAY_SET(x) WLAN_RTC_SET_TIME_WEEK_DAY_SET(x)
+#define RTC_SET_TIME_HOUR_MSB WLAN_RTC_SET_TIME_HOUR_MSB
+#define RTC_SET_TIME_HOUR_LSB WLAN_RTC_SET_TIME_HOUR_LSB
+#define RTC_SET_TIME_HOUR_MASK WLAN_RTC_SET_TIME_HOUR_MASK
+#define RTC_SET_TIME_HOUR_GET(x) WLAN_RTC_SET_TIME_HOUR_GET(x)
+#define RTC_SET_TIME_HOUR_SET(x) WLAN_RTC_SET_TIME_HOUR_SET(x)
+#define RTC_SET_TIME_MINUTE_MSB WLAN_RTC_SET_TIME_MINUTE_MSB
+#define RTC_SET_TIME_MINUTE_LSB WLAN_RTC_SET_TIME_MINUTE_LSB
+#define RTC_SET_TIME_MINUTE_MASK WLAN_RTC_SET_TIME_MINUTE_MASK
+#define RTC_SET_TIME_MINUTE_GET(x) WLAN_RTC_SET_TIME_MINUTE_GET(x)
+#define RTC_SET_TIME_MINUTE_SET(x) WLAN_RTC_SET_TIME_MINUTE_SET(x)
+#define RTC_SET_TIME_SECOND_MSB WLAN_RTC_SET_TIME_SECOND_MSB
+#define RTC_SET_TIME_SECOND_LSB WLAN_RTC_SET_TIME_SECOND_LSB
+#define RTC_SET_TIME_SECOND_MASK WLAN_RTC_SET_TIME_SECOND_MASK
+#define RTC_SET_TIME_SECOND_GET(x) WLAN_RTC_SET_TIME_SECOND_GET(x)
+#define RTC_SET_TIME_SECOND_SET(x) WLAN_RTC_SET_TIME_SECOND_SET(x)
+#define RTC_SET_DATE_ADDRESS WLAN_RTC_SET_DATE_ADDRESS
+#define RTC_SET_DATE_OFFSET WLAN_RTC_SET_DATE_OFFSET
+#define RTC_SET_DATE_YEAR_MSB WLAN_RTC_SET_DATE_YEAR_MSB
+#define RTC_SET_DATE_YEAR_LSB WLAN_RTC_SET_DATE_YEAR_LSB
+#define RTC_SET_DATE_YEAR_MASK WLAN_RTC_SET_DATE_YEAR_MASK
+#define RTC_SET_DATE_YEAR_GET(x) WLAN_RTC_SET_DATE_YEAR_GET(x)
+#define RTC_SET_DATE_YEAR_SET(x) WLAN_RTC_SET_DATE_YEAR_SET(x)
+#define RTC_SET_DATE_MONTH_MSB WLAN_RTC_SET_DATE_MONTH_MSB
+#define RTC_SET_DATE_MONTH_LSB WLAN_RTC_SET_DATE_MONTH_LSB
+#define RTC_SET_DATE_MONTH_MASK WLAN_RTC_SET_DATE_MONTH_MASK
+#define RTC_SET_DATE_MONTH_GET(x) WLAN_RTC_SET_DATE_MONTH_GET(x)
+#define RTC_SET_DATE_MONTH_SET(x) WLAN_RTC_SET_DATE_MONTH_SET(x)
+#define RTC_SET_DATE_MONTH_DAY_MSB WLAN_RTC_SET_DATE_MONTH_DAY_MSB
+#define RTC_SET_DATE_MONTH_DAY_LSB WLAN_RTC_SET_DATE_MONTH_DAY_LSB
+#define RTC_SET_DATE_MONTH_DAY_MASK WLAN_RTC_SET_DATE_MONTH_DAY_MASK
+#define RTC_SET_DATE_MONTH_DAY_GET(x) WLAN_RTC_SET_DATE_MONTH_DAY_GET(x)
+#define RTC_SET_DATE_MONTH_DAY_SET(x) WLAN_RTC_SET_DATE_MONTH_DAY_SET(x)
+#define RTC_SET_ALARM_ADDRESS WLAN_RTC_SET_ALARM_ADDRESS
+#define RTC_SET_ALARM_OFFSET WLAN_RTC_SET_ALARM_OFFSET
+#define RTC_SET_ALARM_HOUR_MSB WLAN_RTC_SET_ALARM_HOUR_MSB
+#define RTC_SET_ALARM_HOUR_LSB WLAN_RTC_SET_ALARM_HOUR_LSB
+#define RTC_SET_ALARM_HOUR_MASK WLAN_RTC_SET_ALARM_HOUR_MASK
+#define RTC_SET_ALARM_HOUR_GET(x) WLAN_RTC_SET_ALARM_HOUR_GET(x)
+#define RTC_SET_ALARM_HOUR_SET(x) WLAN_RTC_SET_ALARM_HOUR_SET(x)
+#define RTC_SET_ALARM_MINUTE_MSB WLAN_RTC_SET_ALARM_MINUTE_MSB
+#define RTC_SET_ALARM_MINUTE_LSB WLAN_RTC_SET_ALARM_MINUTE_LSB
+#define RTC_SET_ALARM_MINUTE_MASK WLAN_RTC_SET_ALARM_MINUTE_MASK
+#define RTC_SET_ALARM_MINUTE_GET(x) WLAN_RTC_SET_ALARM_MINUTE_GET(x)
+#define RTC_SET_ALARM_MINUTE_SET(x) WLAN_RTC_SET_ALARM_MINUTE_SET(x)
+#define RTC_SET_ALARM_SECOND_MSB WLAN_RTC_SET_ALARM_SECOND_MSB
+#define RTC_SET_ALARM_SECOND_LSB WLAN_RTC_SET_ALARM_SECOND_LSB
+#define RTC_SET_ALARM_SECOND_MASK WLAN_RTC_SET_ALARM_SECOND_MASK
+#define RTC_SET_ALARM_SECOND_GET(x) WLAN_RTC_SET_ALARM_SECOND_GET(x)
+#define RTC_SET_ALARM_SECOND_SET(x) WLAN_RTC_SET_ALARM_SECOND_SET(x)
+#define RTC_CONFIG_ADDRESS WLAN_RTC_CONFIG_ADDRESS
+#define RTC_CONFIG_OFFSET WLAN_RTC_CONFIG_OFFSET
+#define RTC_CONFIG_BCD_MSB WLAN_RTC_CONFIG_BCD_MSB
+#define RTC_CONFIG_BCD_LSB WLAN_RTC_CONFIG_BCD_LSB
+#define RTC_CONFIG_BCD_MASK WLAN_RTC_CONFIG_BCD_MASK
+#define RTC_CONFIG_BCD_GET(x) WLAN_RTC_CONFIG_BCD_GET(x)
+#define RTC_CONFIG_BCD_SET(x) WLAN_RTC_CONFIG_BCD_SET(x)
+#define RTC_CONFIG_TWELVE_HOUR_MSB WLAN_RTC_CONFIG_TWELVE_HOUR_MSB
+#define RTC_CONFIG_TWELVE_HOUR_LSB WLAN_RTC_CONFIG_TWELVE_HOUR_LSB
+#define RTC_CONFIG_TWELVE_HOUR_MASK WLAN_RTC_CONFIG_TWELVE_HOUR_MASK
+#define RTC_CONFIG_TWELVE_HOUR_GET(x) WLAN_RTC_CONFIG_TWELVE_HOUR_GET(x)
+#define RTC_CONFIG_TWELVE_HOUR_SET(x) WLAN_RTC_CONFIG_TWELVE_HOUR_SET(x)
+#define RTC_CONFIG_DSE_MSB WLAN_RTC_CONFIG_DSE_MSB
+#define RTC_CONFIG_DSE_LSB WLAN_RTC_CONFIG_DSE_LSB
+#define RTC_CONFIG_DSE_MASK WLAN_RTC_CONFIG_DSE_MASK
+#define RTC_CONFIG_DSE_GET(x) WLAN_RTC_CONFIG_DSE_GET(x)
+#define RTC_CONFIG_DSE_SET(x) WLAN_RTC_CONFIG_DSE_SET(x)
+#define RTC_ALARM_STATUS_ADDRESS WLAN_RTC_ALARM_STATUS_ADDRESS
+#define RTC_ALARM_STATUS_OFFSET WLAN_RTC_ALARM_STATUS_OFFSET
+#define RTC_ALARM_STATUS_ENABLE_MSB WLAN_RTC_ALARM_STATUS_ENABLE_MSB
+#define RTC_ALARM_STATUS_ENABLE_LSB WLAN_RTC_ALARM_STATUS_ENABLE_LSB
+#define RTC_ALARM_STATUS_ENABLE_MASK WLAN_RTC_ALARM_STATUS_ENABLE_MASK
+#define RTC_ALARM_STATUS_ENABLE_GET(x) WLAN_RTC_ALARM_STATUS_ENABLE_GET(x)
+#define RTC_ALARM_STATUS_ENABLE_SET(x) WLAN_RTC_ALARM_STATUS_ENABLE_SET(x)
+#define RTC_ALARM_STATUS_INTERRUPT_MSB WLAN_RTC_ALARM_STATUS_INTERRUPT_MSB
+#define RTC_ALARM_STATUS_INTERRUPT_LSB WLAN_RTC_ALARM_STATUS_INTERRUPT_LSB
+#define RTC_ALARM_STATUS_INTERRUPT_MASK WLAN_RTC_ALARM_STATUS_INTERRUPT_MASK
+#define RTC_ALARM_STATUS_INTERRUPT_GET(x) WLAN_RTC_ALARM_STATUS_INTERRUPT_GET(x)
+#define RTC_ALARM_STATUS_INTERRUPT_SET(x) WLAN_RTC_ALARM_STATUS_INTERRUPT_SET(x)
+#define UART_WAKEUP_ADDRESS WLAN_UART_WAKEUP_ADDRESS
+#define UART_WAKEUP_OFFSET WLAN_UART_WAKEUP_OFFSET
+#define UART_WAKEUP_ENABLE_MSB WLAN_UART_WAKEUP_ENABLE_MSB
+#define UART_WAKEUP_ENABLE_LSB WLAN_UART_WAKEUP_ENABLE_LSB
+#define UART_WAKEUP_ENABLE_MASK WLAN_UART_WAKEUP_ENABLE_MASK
+#define UART_WAKEUP_ENABLE_GET(x) WLAN_UART_WAKEUP_ENABLE_GET(x)
+#define UART_WAKEUP_ENABLE_SET(x) WLAN_UART_WAKEUP_ENABLE_SET(x)
+#define RESET_CAUSE_ADDRESS WLAN_RESET_CAUSE_ADDRESS
+#define RESET_CAUSE_OFFSET WLAN_RESET_CAUSE_OFFSET
+#define RESET_CAUSE_LAST_MSB WLAN_RESET_CAUSE_LAST_MSB
+#define RESET_CAUSE_LAST_LSB WLAN_RESET_CAUSE_LAST_LSB
+#define RESET_CAUSE_LAST_MASK WLAN_RESET_CAUSE_LAST_MASK
+#define RESET_CAUSE_LAST_GET(x) WLAN_RESET_CAUSE_LAST_GET(x)
+#define RESET_CAUSE_LAST_SET(x) WLAN_RESET_CAUSE_LAST_SET(x)
+#define SYSTEM_SLEEP_ADDRESS WLAN_SYSTEM_SLEEP_ADDRESS
+#define SYSTEM_SLEEP_OFFSET WLAN_SYSTEM_SLEEP_OFFSET
+#define SYSTEM_SLEEP_HOST_IF_MSB WLAN_SYSTEM_SLEEP_HOST_IF_MSB
+#define SYSTEM_SLEEP_HOST_IF_LSB WLAN_SYSTEM_SLEEP_HOST_IF_LSB
+#define SYSTEM_SLEEP_HOST_IF_MASK WLAN_SYSTEM_SLEEP_HOST_IF_MASK
+#define SYSTEM_SLEEP_HOST_IF_GET(x) WLAN_SYSTEM_SLEEP_HOST_IF_GET(x)
+#define SYSTEM_SLEEP_HOST_IF_SET(x) WLAN_SYSTEM_SLEEP_HOST_IF_SET(x)
+#define SYSTEM_SLEEP_MBOX_MSB WLAN_SYSTEM_SLEEP_MBOX_MSB
+#define SYSTEM_SLEEP_MBOX_LSB WLAN_SYSTEM_SLEEP_MBOX_LSB
+#define SYSTEM_SLEEP_MBOX_MASK WLAN_SYSTEM_SLEEP_MBOX_MASK
+#define SYSTEM_SLEEP_MBOX_GET(x) WLAN_SYSTEM_SLEEP_MBOX_GET(x)
+#define SYSTEM_SLEEP_MBOX_SET(x) WLAN_SYSTEM_SLEEP_MBOX_SET(x)
+#define SYSTEM_SLEEP_MAC_IF_MSB WLAN_SYSTEM_SLEEP_MAC_IF_MSB
+#define SYSTEM_SLEEP_MAC_IF_LSB WLAN_SYSTEM_SLEEP_MAC_IF_LSB
+#define SYSTEM_SLEEP_MAC_IF_MASK WLAN_SYSTEM_SLEEP_MAC_IF_MASK
+#define SYSTEM_SLEEP_MAC_IF_GET(x) WLAN_SYSTEM_SLEEP_MAC_IF_GET(x)
+#define SYSTEM_SLEEP_MAC_IF_SET(x) WLAN_SYSTEM_SLEEP_MAC_IF_SET(x)
+#define SYSTEM_SLEEP_LIGHT_MSB WLAN_SYSTEM_SLEEP_LIGHT_MSB
+#define SYSTEM_SLEEP_LIGHT_LSB WLAN_SYSTEM_SLEEP_LIGHT_LSB
+#define SYSTEM_SLEEP_LIGHT_MASK WLAN_SYSTEM_SLEEP_LIGHT_MASK
+#define SYSTEM_SLEEP_LIGHT_GET(x) WLAN_SYSTEM_SLEEP_LIGHT_GET(x)
+#define SYSTEM_SLEEP_LIGHT_SET(x) WLAN_SYSTEM_SLEEP_LIGHT_SET(x)
+#define SYSTEM_SLEEP_DISABLE_MSB WLAN_SYSTEM_SLEEP_DISABLE_MSB
+#define SYSTEM_SLEEP_DISABLE_LSB WLAN_SYSTEM_SLEEP_DISABLE_LSB
+#define SYSTEM_SLEEP_DISABLE_MASK WLAN_SYSTEM_SLEEP_DISABLE_MASK
+#define SYSTEM_SLEEP_DISABLE_GET(x) WLAN_SYSTEM_SLEEP_DISABLE_GET(x)
+#define SYSTEM_SLEEP_DISABLE_SET(x) WLAN_SYSTEM_SLEEP_DISABLE_SET(x)
+#define SDIO_WRAPPER_ADDRESS WLAN_SDIO_WRAPPER_ADDRESS
+#define SDIO_WRAPPER_OFFSET WLAN_SDIO_WRAPPER_OFFSET
+#define SDIO_WRAPPER_SLEEP_MSB WLAN_SDIO_WRAPPER_SLEEP_MSB
+#define SDIO_WRAPPER_SLEEP_LSB WLAN_SDIO_WRAPPER_SLEEP_LSB
+#define SDIO_WRAPPER_SLEEP_MASK WLAN_SDIO_WRAPPER_SLEEP_MASK
+#define SDIO_WRAPPER_SLEEP_GET(x) WLAN_SDIO_WRAPPER_SLEEP_GET(x)
+#define SDIO_WRAPPER_SLEEP_SET(x) WLAN_SDIO_WRAPPER_SLEEP_SET(x)
+#define SDIO_WRAPPER_WAKEUP_MSB WLAN_SDIO_WRAPPER_WAKEUP_MSB
+#define SDIO_WRAPPER_WAKEUP_LSB WLAN_SDIO_WRAPPER_WAKEUP_LSB
+#define SDIO_WRAPPER_WAKEUP_MASK WLAN_SDIO_WRAPPER_WAKEUP_MASK
+#define SDIO_WRAPPER_WAKEUP_GET(x) WLAN_SDIO_WRAPPER_WAKEUP_GET(x)
+#define SDIO_WRAPPER_WAKEUP_SET(x) WLAN_SDIO_WRAPPER_WAKEUP_SET(x)
+#define SDIO_WRAPPER_SOC_ON_MSB WLAN_SDIO_WRAPPER_SOC_ON_MSB
+#define SDIO_WRAPPER_SOC_ON_LSB WLAN_SDIO_WRAPPER_SOC_ON_LSB
+#define SDIO_WRAPPER_SOC_ON_MASK WLAN_SDIO_WRAPPER_SOC_ON_MASK
+#define SDIO_WRAPPER_SOC_ON_GET(x) WLAN_SDIO_WRAPPER_SOC_ON_GET(x)
+#define SDIO_WRAPPER_SOC_ON_SET(x) WLAN_SDIO_WRAPPER_SOC_ON_SET(x)
+#define SDIO_WRAPPER_ON_MSB WLAN_SDIO_WRAPPER_ON_MSB
+#define SDIO_WRAPPER_ON_LSB WLAN_SDIO_WRAPPER_ON_LSB
+#define SDIO_WRAPPER_ON_MASK WLAN_SDIO_WRAPPER_ON_MASK
+#define SDIO_WRAPPER_ON_GET(x) WLAN_SDIO_WRAPPER_ON_GET(x)
+#define SDIO_WRAPPER_ON_SET(x) WLAN_SDIO_WRAPPER_ON_SET(x)
+#define MAC_SLEEP_CONTROL_ADDRESS WLAN_MAC_SLEEP_CONTROL_ADDRESS
+#define MAC_SLEEP_CONTROL_OFFSET WLAN_MAC_SLEEP_CONTROL_OFFSET
+#define MAC_SLEEP_CONTROL_ENABLE_MSB WLAN_MAC_SLEEP_CONTROL_ENABLE_MSB
+#define MAC_SLEEP_CONTROL_ENABLE_LSB WLAN_MAC_SLEEP_CONTROL_ENABLE_LSB
+#define MAC_SLEEP_CONTROL_ENABLE_MASK WLAN_MAC_SLEEP_CONTROL_ENABLE_MASK
+#define MAC_SLEEP_CONTROL_ENABLE_GET(x) WLAN_MAC_SLEEP_CONTROL_ENABLE_GET(x)
+#define MAC_SLEEP_CONTROL_ENABLE_SET(x) WLAN_MAC_SLEEP_CONTROL_ENABLE_SET(x)
+#define KEEP_AWAKE_ADDRESS WLAN_KEEP_AWAKE_ADDRESS
+#define KEEP_AWAKE_OFFSET WLAN_KEEP_AWAKE_OFFSET
+#define KEEP_AWAKE_COUNT_MSB WLAN_KEEP_AWAKE_COUNT_MSB
+#define KEEP_AWAKE_COUNT_LSB WLAN_KEEP_AWAKE_COUNT_LSB
+#define KEEP_AWAKE_COUNT_MASK WLAN_KEEP_AWAKE_COUNT_MASK
+#define KEEP_AWAKE_COUNT_GET(x) WLAN_KEEP_AWAKE_COUNT_GET(x)
+#define KEEP_AWAKE_COUNT_SET(x) WLAN_KEEP_AWAKE_COUNT_SET(x)
+#define LPO_CAL_TIME_ADDRESS WLAN_LPO_CAL_TIME_ADDRESS
+#define LPO_CAL_TIME_OFFSET WLAN_LPO_CAL_TIME_OFFSET
+#define LPO_CAL_TIME_LENGTH_MSB WLAN_LPO_CAL_TIME_LENGTH_MSB
+#define LPO_CAL_TIME_LENGTH_LSB WLAN_LPO_CAL_TIME_LENGTH_LSB
+#define LPO_CAL_TIME_LENGTH_MASK WLAN_LPO_CAL_TIME_LENGTH_MASK
+#define LPO_CAL_TIME_LENGTH_GET(x) WLAN_LPO_CAL_TIME_LENGTH_GET(x)
+#define LPO_CAL_TIME_LENGTH_SET(x) WLAN_LPO_CAL_TIME_LENGTH_SET(x)
+#define LPO_INIT_DIVIDEND_INT_ADDRESS WLAN_LPO_INIT_DIVIDEND_INT_ADDRESS
+#define LPO_INIT_DIVIDEND_INT_OFFSET WLAN_LPO_INIT_DIVIDEND_INT_OFFSET
+#define LPO_INIT_DIVIDEND_INT_VALUE_MSB WLAN_LPO_INIT_DIVIDEND_INT_VALUE_MSB
+#define LPO_INIT_DIVIDEND_INT_VALUE_LSB WLAN_LPO_INIT_DIVIDEND_INT_VALUE_LSB
+#define LPO_INIT_DIVIDEND_INT_VALUE_MASK WLAN_LPO_INIT_DIVIDEND_INT_VALUE_MASK
+#define LPO_INIT_DIVIDEND_INT_VALUE_GET(x) WLAN_LPO_INIT_DIVIDEND_INT_VALUE_GET(x)
+#define LPO_INIT_DIVIDEND_INT_VALUE_SET(x) WLAN_LPO_INIT_DIVIDEND_INT_VALUE_SET(x)
+#define LPO_INIT_DIVIDEND_FRACTION_ADDRESS WLAN_LPO_INIT_DIVIDEND_FRACTION_ADDRESS
+#define LPO_INIT_DIVIDEND_FRACTION_OFFSET WLAN_LPO_INIT_DIVIDEND_FRACTION_OFFSET
+#define LPO_INIT_DIVIDEND_FRACTION_VALUE_MSB WLAN_LPO_INIT_DIVIDEND_FRACTION_VALUE_MSB
+#define LPO_INIT_DIVIDEND_FRACTION_VALUE_LSB WLAN_LPO_INIT_DIVIDEND_FRACTION_VALUE_LSB
+#define LPO_INIT_DIVIDEND_FRACTION_VALUE_MASK WLAN_LPO_INIT_DIVIDEND_FRACTION_VALUE_MASK
+#define LPO_INIT_DIVIDEND_FRACTION_VALUE_GET(x) WLAN_LPO_INIT_DIVIDEND_FRACTION_VALUE_GET(x)
+#define LPO_INIT_DIVIDEND_FRACTION_VALUE_SET(x) WLAN_LPO_INIT_DIVIDEND_FRACTION_VALUE_SET(x)
+#define LPO_CAL_ADDRESS WLAN_LPO_CAL_ADDRESS
+#define LPO_CAL_OFFSET WLAN_LPO_CAL_OFFSET
+#define LPO_CAL_ENABLE_MSB WLAN_LPO_CAL_ENABLE_MSB
+#define LPO_CAL_ENABLE_LSB WLAN_LPO_CAL_ENABLE_LSB
+#define LPO_CAL_ENABLE_MASK WLAN_LPO_CAL_ENABLE_MASK
+#define LPO_CAL_ENABLE_GET(x) WLAN_LPO_CAL_ENABLE_GET(x)
+#define LPO_CAL_ENABLE_SET(x) WLAN_LPO_CAL_ENABLE_SET(x)
+#define LPO_CAL_COUNT_MSB WLAN_LPO_CAL_COUNT_MSB
+#define LPO_CAL_COUNT_LSB WLAN_LPO_CAL_COUNT_LSB
+#define LPO_CAL_COUNT_MASK WLAN_LPO_CAL_COUNT_MASK
+#define LPO_CAL_COUNT_GET(x) WLAN_LPO_CAL_COUNT_GET(x)
+#define LPO_CAL_COUNT_SET(x) WLAN_LPO_CAL_COUNT_SET(x)
+#define LPO_CAL_TEST_CONTROL_ADDRESS WLAN_LPO_CAL_TEST_CONTROL_ADDRESS
+#define LPO_CAL_TEST_CONTROL_OFFSET WLAN_LPO_CAL_TEST_CONTROL_OFFSET
+#define LPO_CAL_TEST_CONTROL_ENABLE_MSB WLAN_LPO_CAL_TEST_CONTROL_ENABLE_MSB
+#define LPO_CAL_TEST_CONTROL_ENABLE_LSB WLAN_LPO_CAL_TEST_CONTROL_ENABLE_LSB
+#define LPO_CAL_TEST_CONTROL_ENABLE_MASK WLAN_LPO_CAL_TEST_CONTROL_ENABLE_MASK
+#define LPO_CAL_TEST_CONTROL_ENABLE_GET(x) WLAN_LPO_CAL_TEST_CONTROL_ENABLE_GET(x)
+#define LPO_CAL_TEST_CONTROL_ENABLE_SET(x) WLAN_LPO_CAL_TEST_CONTROL_ENABLE_SET(x)
+#define LPO_CAL_TEST_CONTROL_RTC_CYCLES_MSB WLAN_LPO_CAL_TEST_CONTROL_RTC_CYCLES_MSB
+#define LPO_CAL_TEST_CONTROL_RTC_CYCLES_LSB WLAN_LPO_CAL_TEST_CONTROL_RTC_CYCLES_LSB
+#define LPO_CAL_TEST_CONTROL_RTC_CYCLES_MASK WLAN_LPO_CAL_TEST_CONTROL_RTC_CYCLES_MASK
+#define LPO_CAL_TEST_CONTROL_RTC_CYCLES_GET(x) WLAN_LPO_CAL_TEST_CONTROL_RTC_CYCLES_GET(x)
+#define LPO_CAL_TEST_CONTROL_RTC_CYCLES_SET(x) WLAN_LPO_CAL_TEST_CONTROL_RTC_CYCLES_SET(x)
+#define LPO_CAL_TEST_STATUS_ADDRESS WLAN_LPO_CAL_TEST_STATUS_ADDRESS
+#define LPO_CAL_TEST_STATUS_OFFSET WLAN_LPO_CAL_TEST_STATUS_OFFSET
+#define LPO_CAL_TEST_STATUS_READY_MSB WLAN_LPO_CAL_TEST_STATUS_READY_MSB
+#define LPO_CAL_TEST_STATUS_READY_LSB WLAN_LPO_CAL_TEST_STATUS_READY_LSB
+#define LPO_CAL_TEST_STATUS_READY_MASK WLAN_LPO_CAL_TEST_STATUS_READY_MASK
+#define LPO_CAL_TEST_STATUS_READY_GET(x) WLAN_LPO_CAL_TEST_STATUS_READY_GET(x)
+#define LPO_CAL_TEST_STATUS_READY_SET(x) WLAN_LPO_CAL_TEST_STATUS_READY_SET(x)
+#define LPO_CAL_TEST_STATUS_COUNT_MSB WLAN_LPO_CAL_TEST_STATUS_COUNT_MSB
+#define LPO_CAL_TEST_STATUS_COUNT_LSB WLAN_LPO_CAL_TEST_STATUS_COUNT_LSB
+#define LPO_CAL_TEST_STATUS_COUNT_MASK WLAN_LPO_CAL_TEST_STATUS_COUNT_MASK
+#define LPO_CAL_TEST_STATUS_COUNT_GET(x) WLAN_LPO_CAL_TEST_STATUS_COUNT_GET(x)
+#define LPO_CAL_TEST_STATUS_COUNT_SET(x) WLAN_LPO_CAL_TEST_STATUS_COUNT_SET(x)
+#define CHIP_ID_ADDRESS WLAN_CHIP_ID_ADDRESS
+#define CHIP_ID_OFFSET WLAN_CHIP_ID_OFFSET
+#define CHIP_ID_DEVICE_ID_MSB WLAN_CHIP_ID_DEVICE_ID_MSB
+#define CHIP_ID_DEVICE_ID_LSB WLAN_CHIP_ID_DEVICE_ID_LSB
+#define CHIP_ID_DEVICE_ID_MASK WLAN_CHIP_ID_DEVICE_ID_MASK
+#define CHIP_ID_DEVICE_ID_GET(x) WLAN_CHIP_ID_DEVICE_ID_GET(x)
+#define CHIP_ID_DEVICE_ID_SET(x) WLAN_CHIP_ID_DEVICE_ID_SET(x)
+#define CHIP_ID_CONFIG_ID_MSB WLAN_CHIP_ID_CONFIG_ID_MSB
+#define CHIP_ID_CONFIG_ID_LSB WLAN_CHIP_ID_CONFIG_ID_LSB
+#define CHIP_ID_CONFIG_ID_MASK WLAN_CHIP_ID_CONFIG_ID_MASK
+#define CHIP_ID_CONFIG_ID_GET(x) WLAN_CHIP_ID_CONFIG_ID_GET(x)
+#define CHIP_ID_CONFIG_ID_SET(x) WLAN_CHIP_ID_CONFIG_ID_SET(x)
+#define CHIP_ID_VERSION_ID_MSB WLAN_CHIP_ID_VERSION_ID_MSB
+#define CHIP_ID_VERSION_ID_LSB WLAN_CHIP_ID_VERSION_ID_LSB
+#define CHIP_ID_VERSION_ID_MASK WLAN_CHIP_ID_VERSION_ID_MASK
+#define CHIP_ID_VERSION_ID_GET(x) WLAN_CHIP_ID_VERSION_ID_GET(x)
+#define CHIP_ID_VERSION_ID_SET(x) WLAN_CHIP_ID_VERSION_ID_SET(x)
+#define DERIVED_RTC_CLK_ADDRESS WLAN_DERIVED_RTC_CLK_ADDRESS
+#define DERIVED_RTC_CLK_OFFSET WLAN_DERIVED_RTC_CLK_OFFSET
+#define DERIVED_RTC_CLK_EXTERNAL_DETECT_EN_MSB WLAN_DERIVED_RTC_CLK_EXTERNAL_DETECT_EN_MSB
+#define DERIVED_RTC_CLK_EXTERNAL_DETECT_EN_LSB WLAN_DERIVED_RTC_CLK_EXTERNAL_DETECT_EN_LSB
+#define DERIVED_RTC_CLK_EXTERNAL_DETECT_EN_MASK WLAN_DERIVED_RTC_CLK_EXTERNAL_DETECT_EN_MASK
+#define DERIVED_RTC_CLK_EXTERNAL_DETECT_EN_GET(x) WLAN_DERIVED_RTC_CLK_EXTERNAL_DETECT_EN_GET(x)
+#define DERIVED_RTC_CLK_EXTERNAL_DETECT_EN_SET(x) WLAN_DERIVED_RTC_CLK_EXTERNAL_DETECT_EN_SET(x)
+#define DERIVED_RTC_CLK_EXTERNAL_DETECT_MSB WLAN_DERIVED_RTC_CLK_EXTERNAL_DETECT_MSB
+#define DERIVED_RTC_CLK_EXTERNAL_DETECT_LSB WLAN_DERIVED_RTC_CLK_EXTERNAL_DETECT_LSB
+#define DERIVED_RTC_CLK_EXTERNAL_DETECT_MASK WLAN_DERIVED_RTC_CLK_EXTERNAL_DETECT_MASK
+#define DERIVED_RTC_CLK_EXTERNAL_DETECT_GET(x) WLAN_DERIVED_RTC_CLK_EXTERNAL_DETECT_GET(x)
+#define DERIVED_RTC_CLK_EXTERNAL_DETECT_SET(x) WLAN_DERIVED_RTC_CLK_EXTERNAL_DETECT_SET(x)
+#define DERIVED_RTC_CLK_FORCE_MSB WLAN_DERIVED_RTC_CLK_FORCE_MSB
+#define DERIVED_RTC_CLK_FORCE_LSB WLAN_DERIVED_RTC_CLK_FORCE_LSB
+#define DERIVED_RTC_CLK_FORCE_MASK WLAN_DERIVED_RTC_CLK_FORCE_MASK
+#define DERIVED_RTC_CLK_FORCE_GET(x) WLAN_DERIVED_RTC_CLK_FORCE_GET(x)
+#define DERIVED_RTC_CLK_FORCE_SET(x) WLAN_DERIVED_RTC_CLK_FORCE_SET(x)
+#define DERIVED_RTC_CLK_PERIOD_MSB WLAN_DERIVED_RTC_CLK_PERIOD_MSB
+#define DERIVED_RTC_CLK_PERIOD_LSB WLAN_DERIVED_RTC_CLK_PERIOD_LSB
+#define DERIVED_RTC_CLK_PERIOD_MASK WLAN_DERIVED_RTC_CLK_PERIOD_MASK
+#define DERIVED_RTC_CLK_PERIOD_GET(x) WLAN_DERIVED_RTC_CLK_PERIOD_GET(x)
+#define DERIVED_RTC_CLK_PERIOD_SET(x) WLAN_DERIVED_RTC_CLK_PERIOD_SET(x)
+#define POWER_REG_ADDRESS WLAN_POWER_REG_ADDRESS
+#define POWER_REG_OFFSET WLAN_POWER_REG_OFFSET
+#define POWER_REG_SLEEP_MAKE_N_BREAK_EN_MSB WLAN_POWER_REG_SLEEP_MAKE_N_BREAK_EN_MSB
+#define POWER_REG_SLEEP_MAKE_N_BREAK_EN_LSB WLAN_POWER_REG_SLEEP_MAKE_N_BREAK_EN_LSB
+#define POWER_REG_SLEEP_MAKE_N_BREAK_EN_MASK WLAN_POWER_REG_SLEEP_MAKE_N_BREAK_EN_MASK
+#define POWER_REG_SLEEP_MAKE_N_BREAK_EN_GET(x) WLAN_POWER_REG_SLEEP_MAKE_N_BREAK_EN_GET(x)
+#define POWER_REG_SLEEP_MAKE_N_BREAK_EN_SET(x) WLAN_POWER_REG_SLEEP_MAKE_N_BREAK_EN_SET(x)
+#define POWER_REG_DEBUG_EN_MSB WLAN_POWER_REG_DEBUG_EN_MSB
+#define POWER_REG_DEBUG_EN_LSB WLAN_POWER_REG_DEBUG_EN_LSB
+#define POWER_REG_DEBUG_EN_MASK WLAN_POWER_REG_DEBUG_EN_MASK
+#define POWER_REG_DEBUG_EN_GET(x) WLAN_POWER_REG_DEBUG_EN_GET(x)
+#define POWER_REG_DEBUG_EN_SET(x) WLAN_POWER_REG_DEBUG_EN_SET(x)
+#define POWER_REG_WLAN_BB_PWD_EN_MSB WLAN_POWER_REG_WLAN_BB_PWD_EN_MSB
+#define POWER_REG_WLAN_BB_PWD_EN_LSB WLAN_POWER_REG_WLAN_BB_PWD_EN_LSB
+#define POWER_REG_WLAN_BB_PWD_EN_MASK WLAN_POWER_REG_WLAN_BB_PWD_EN_MASK
+#define POWER_REG_WLAN_BB_PWD_EN_GET(x) WLAN_POWER_REG_WLAN_BB_PWD_EN_GET(x)
+#define POWER_REG_WLAN_BB_PWD_EN_SET(x) WLAN_POWER_REG_WLAN_BB_PWD_EN_SET(x)
+#define POWER_REG_WLAN_MAC_PWD_EN_MSB WLAN_POWER_REG_WLAN_MAC_PWD_EN_MSB
+#define POWER_REG_WLAN_MAC_PWD_EN_LSB WLAN_POWER_REG_WLAN_MAC_PWD_EN_LSB
+#define POWER_REG_WLAN_MAC_PWD_EN_MASK WLAN_POWER_REG_WLAN_MAC_PWD_EN_MASK
+#define POWER_REG_WLAN_MAC_PWD_EN_GET(x) WLAN_POWER_REG_WLAN_MAC_PWD_EN_GET(x)
+#define POWER_REG_WLAN_MAC_PWD_EN_SET(x) WLAN_POWER_REG_WLAN_MAC_PWD_EN_SET(x)
+#define POWER_REG_VLVL_MSB WLAN_POWER_REG_VLVL_MSB
+#define POWER_REG_VLVL_LSB WLAN_POWER_REG_VLVL_LSB
+#define POWER_REG_VLVL_MASK WLAN_POWER_REG_VLVL_MASK
+#define POWER_REG_VLVL_GET(x) WLAN_POWER_REG_VLVL_GET(x)
+#define POWER_REG_VLVL_SET(x) WLAN_POWER_REG_VLVL_SET(x)
+#define POWER_REG_CPU_INT_ENABLE_MSB WLAN_POWER_REG_CPU_INT_ENABLE_MSB
+#define POWER_REG_CPU_INT_ENABLE_LSB WLAN_POWER_REG_CPU_INT_ENABLE_LSB
+#define POWER_REG_CPU_INT_ENABLE_MASK WLAN_POWER_REG_CPU_INT_ENABLE_MASK
+#define POWER_REG_CPU_INT_ENABLE_GET(x) WLAN_POWER_REG_CPU_INT_ENABLE_GET(x)
+#define POWER_REG_CPU_INT_ENABLE_SET(x) WLAN_POWER_REG_CPU_INT_ENABLE_SET(x)
+#define POWER_REG_WLAN_ISO_DIS_MSB WLAN_POWER_REG_WLAN_ISO_DIS_MSB
+#define POWER_REG_WLAN_ISO_DIS_LSB WLAN_POWER_REG_WLAN_ISO_DIS_LSB
+#define POWER_REG_WLAN_ISO_DIS_MASK WLAN_POWER_REG_WLAN_ISO_DIS_MASK
+#define POWER_REG_WLAN_ISO_DIS_GET(x) WLAN_POWER_REG_WLAN_ISO_DIS_GET(x)
+#define POWER_REG_WLAN_ISO_DIS_SET(x) WLAN_POWER_REG_WLAN_ISO_DIS_SET(x)
+#define POWER_REG_WLAN_ISO_CNTL_MSB WLAN_POWER_REG_WLAN_ISO_CNTL_MSB
+#define POWER_REG_WLAN_ISO_CNTL_LSB WLAN_POWER_REG_WLAN_ISO_CNTL_LSB
+#define POWER_REG_WLAN_ISO_CNTL_MASK WLAN_POWER_REG_WLAN_ISO_CNTL_MASK
+#define POWER_REG_WLAN_ISO_CNTL_GET(x) WLAN_POWER_REG_WLAN_ISO_CNTL_GET(x)
+#define POWER_REG_WLAN_ISO_CNTL_SET(x) WLAN_POWER_REG_WLAN_ISO_CNTL_SET(x)
+#define POWER_REG_RADIO_PWD_EN_MSB WLAN_POWER_REG_RADIO_PWD_EN_MSB
+#define POWER_REG_RADIO_PWD_EN_LSB WLAN_POWER_REG_RADIO_PWD_EN_LSB
+#define POWER_REG_RADIO_PWD_EN_MASK WLAN_POWER_REG_RADIO_PWD_EN_MASK
+#define POWER_REG_RADIO_PWD_EN_GET(x) WLAN_POWER_REG_RADIO_PWD_EN_GET(x)
+#define POWER_REG_RADIO_PWD_EN_SET(x) WLAN_POWER_REG_RADIO_PWD_EN_SET(x)
+#define POWER_REG_SOC_ISO_EN_MSB WLAN_POWER_REG_SOC_ISO_EN_MSB
+#define POWER_REG_SOC_ISO_EN_LSB WLAN_POWER_REG_SOC_ISO_EN_LSB
+#define POWER_REG_SOC_ISO_EN_MASK WLAN_POWER_REG_SOC_ISO_EN_MASK
+#define POWER_REG_SOC_ISO_EN_GET(x) WLAN_POWER_REG_SOC_ISO_EN_GET(x)
+#define POWER_REG_SOC_ISO_EN_SET(x) WLAN_POWER_REG_SOC_ISO_EN_SET(x)
+#define POWER_REG_WLAN_ISO_EN_MSB WLAN_POWER_REG_WLAN_ISO_EN_MSB
+#define POWER_REG_WLAN_ISO_EN_LSB WLAN_POWER_REG_WLAN_ISO_EN_LSB
+#define POWER_REG_WLAN_ISO_EN_MASK WLAN_POWER_REG_WLAN_ISO_EN_MASK
+#define POWER_REG_WLAN_ISO_EN_GET(x) WLAN_POWER_REG_WLAN_ISO_EN_GET(x)
+#define POWER_REG_WLAN_ISO_EN_SET(x) WLAN_POWER_REG_WLAN_ISO_EN_SET(x)
+#define POWER_REG_WLAN_PWD_EN_MSB WLAN_POWER_REG_WLAN_PWD_EN_MSB
+#define POWER_REG_WLAN_PWD_EN_LSB WLAN_POWER_REG_WLAN_PWD_EN_LSB
+#define POWER_REG_WLAN_PWD_EN_MASK WLAN_POWER_REG_WLAN_PWD_EN_MASK
+#define POWER_REG_WLAN_PWD_EN_GET(x) WLAN_POWER_REG_WLAN_PWD_EN_GET(x)
+#define POWER_REG_WLAN_PWD_EN_SET(x) WLAN_POWER_REG_WLAN_PWD_EN_SET(x)
+#define POWER_REG_POWER_EN_MSB WLAN_POWER_REG_POWER_EN_MSB
+#define POWER_REG_POWER_EN_LSB WLAN_POWER_REG_POWER_EN_LSB
+#define POWER_REG_POWER_EN_MASK WLAN_POWER_REG_POWER_EN_MASK
+#define POWER_REG_POWER_EN_GET(x) WLAN_POWER_REG_POWER_EN_GET(x)
+#define POWER_REG_POWER_EN_SET(x) WLAN_POWER_REG_POWER_EN_SET(x)
+#define CORE_CLK_CTRL_ADDRESS WLAN_CORE_CLK_CTRL_ADDRESS
+#define CORE_CLK_CTRL_OFFSET WLAN_CORE_CLK_CTRL_OFFSET
+#define CORE_CLK_CTRL_DIV_MSB WLAN_CORE_CLK_CTRL_DIV_MSB
+#define CORE_CLK_CTRL_DIV_LSB WLAN_CORE_CLK_CTRL_DIV_LSB
+#define CORE_CLK_CTRL_DIV_MASK WLAN_CORE_CLK_CTRL_DIV_MASK
+#define CORE_CLK_CTRL_DIV_GET(x) WLAN_CORE_CLK_CTRL_DIV_GET(x)
+#define CORE_CLK_CTRL_DIV_SET(x) WLAN_CORE_CLK_CTRL_DIV_SET(x)
+#define GPIO_WAKEUP_CONTROL_ADDRESS WLAN_GPIO_WAKEUP_CONTROL_ADDRESS
+#define GPIO_WAKEUP_CONTROL_OFFSET WLAN_GPIO_WAKEUP_CONTROL_OFFSET
+#define GPIO_WAKEUP_CONTROL_ENABLE_MSB WLAN_GPIO_WAKEUP_CONTROL_ENABLE_MSB
+#define GPIO_WAKEUP_CONTROL_ENABLE_LSB WLAN_GPIO_WAKEUP_CONTROL_ENABLE_LSB
+#define GPIO_WAKEUP_CONTROL_ENABLE_MASK WLAN_GPIO_WAKEUP_CONTROL_ENABLE_MASK
+#define GPIO_WAKEUP_CONTROL_ENABLE_GET(x) WLAN_GPIO_WAKEUP_CONTROL_ENABLE_GET(x)
+#define GPIO_WAKEUP_CONTROL_ENABLE_SET(x) WLAN_GPIO_WAKEUP_CONTROL_ENABLE_SET(x)
+
+
+#endif
+#endif
+
+
+
diff --git a/drivers/staging/ath6kl/include/common/AR6002/hw4.0/hw/rtc_wlan_reg.h b/drivers/staging/ath6kl/include/common/AR6002/hw4.0/hw/rtc_wlan_reg.h
new file mode 100644
index 000000000000..abf872650054
--- /dev/null
+++ b/drivers/staging/ath6kl/include/common/AR6002/hw4.0/hw/rtc_wlan_reg.h
@@ -0,0 +1,2065 @@
+// ------------------------------------------------------------------
+// Copyright (c) 2004-2010 Atheros Corporation. All rights reserved.
+//
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+//
+//
+// ------------------------------------------------------------------
+//===================================================================
+// Author(s): ="Atheros"
+//===================================================================
+
+
+#ifndef _RTC_WLAN_REG_REG_H_
+#define _RTC_WLAN_REG_REG_H_
+
+#define WLAN_RESET_CONTROL_ADDRESS 0x00000000
+#define WLAN_RESET_CONTROL_OFFSET 0x00000000
+#define WLAN_RESET_CONTROL_DEBUG_UART_RST_MSB 14
+#define WLAN_RESET_CONTROL_DEBUG_UART_RST_LSB 14
+#define WLAN_RESET_CONTROL_DEBUG_UART_RST_MASK 0x00004000
+#define WLAN_RESET_CONTROL_DEBUG_UART_RST_GET(x) (((x) & WLAN_RESET_CONTROL_DEBUG_UART_RST_MASK) >> WLAN_RESET_CONTROL_DEBUG_UART_RST_LSB)
+#define WLAN_RESET_CONTROL_DEBUG_UART_RST_SET(x) (((x) << WLAN_RESET_CONTROL_DEBUG_UART_RST_LSB) & WLAN_RESET_CONTROL_DEBUG_UART_RST_MASK)
+#define WLAN_RESET_CONTROL_BB_COLD_RST_MSB 13
+#define WLAN_RESET_CONTROL_BB_COLD_RST_LSB 13
+#define WLAN_RESET_CONTROL_BB_COLD_RST_MASK 0x00002000
+#define WLAN_RESET_CONTROL_BB_COLD_RST_GET(x) (((x) & WLAN_RESET_CONTROL_BB_COLD_RST_MASK) >> WLAN_RESET_CONTROL_BB_COLD_RST_LSB)
+#define WLAN_RESET_CONTROL_BB_COLD_RST_SET(x) (((x) << WLAN_RESET_CONTROL_BB_COLD_RST_LSB) & WLAN_RESET_CONTROL_BB_COLD_RST_MASK)
+#define WLAN_RESET_CONTROL_BB_WARM_RST_MSB 12
+#define WLAN_RESET_CONTROL_BB_WARM_RST_LSB 12
+#define WLAN_RESET_CONTROL_BB_WARM_RST_MASK 0x00001000
+#define WLAN_RESET_CONTROL_BB_WARM_RST_GET(x) (((x) & WLAN_RESET_CONTROL_BB_WARM_RST_MASK) >> WLAN_RESET_CONTROL_BB_WARM_RST_LSB)
+#define WLAN_RESET_CONTROL_BB_WARM_RST_SET(x) (((x) << WLAN_RESET_CONTROL_BB_WARM_RST_LSB) & WLAN_RESET_CONTROL_BB_WARM_RST_MASK)
+#define WLAN_RESET_CONTROL_CPU_INIT_RESET_MSB 11
+#define WLAN_RESET_CONTROL_CPU_INIT_RESET_LSB 11
+#define WLAN_RESET_CONTROL_CPU_INIT_RESET_MASK 0x00000800
+#define WLAN_RESET_CONTROL_CPU_INIT_RESET_GET(x) (((x) & WLAN_RESET_CONTROL_CPU_INIT_RESET_MASK) >> WLAN_RESET_CONTROL_CPU_INIT_RESET_LSB)
+#define WLAN_RESET_CONTROL_CPU_INIT_RESET_SET(x) (((x) << WLAN_RESET_CONTROL_CPU_INIT_RESET_LSB) & WLAN_RESET_CONTROL_CPU_INIT_RESET_MASK)
+#define WLAN_RESET_CONTROL_VMC_REMAP_RESET_MSB 10
+#define WLAN_RESET_CONTROL_VMC_REMAP_RESET_LSB 10
+#define WLAN_RESET_CONTROL_VMC_REMAP_RESET_MASK 0x00000400
+#define WLAN_RESET_CONTROL_VMC_REMAP_RESET_GET(x) (((x) & WLAN_RESET_CONTROL_VMC_REMAP_RESET_MASK) >> WLAN_RESET_CONTROL_VMC_REMAP_RESET_LSB)
+#define WLAN_RESET_CONTROL_VMC_REMAP_RESET_SET(x) (((x) << WLAN_RESET_CONTROL_VMC_REMAP_RESET_LSB) & WLAN_RESET_CONTROL_VMC_REMAP_RESET_MASK)
+#define WLAN_RESET_CONTROL_RST_OUT_MSB 9
+#define WLAN_RESET_CONTROL_RST_OUT_LSB 9
+#define WLAN_RESET_CONTROL_RST_OUT_MASK 0x00000200
+#define WLAN_RESET_CONTROL_RST_OUT_GET(x) (((x) & WLAN_RESET_CONTROL_RST_OUT_MASK) >> WLAN_RESET_CONTROL_RST_OUT_LSB)
+#define WLAN_RESET_CONTROL_RST_OUT_SET(x) (((x) << WLAN_RESET_CONTROL_RST_OUT_LSB) & WLAN_RESET_CONTROL_RST_OUT_MASK)
+#define WLAN_RESET_CONTROL_COLD_RST_MSB 8
+#define WLAN_RESET_CONTROL_COLD_RST_LSB 8
+#define WLAN_RESET_CONTROL_COLD_RST_MASK 0x00000100
+#define WLAN_RESET_CONTROL_COLD_RST_GET(x) (((x) & WLAN_RESET_CONTROL_COLD_RST_MASK) >> WLAN_RESET_CONTROL_COLD_RST_LSB)
+#define WLAN_RESET_CONTROL_COLD_RST_SET(x) (((x) << WLAN_RESET_CONTROL_COLD_RST_LSB) & WLAN_RESET_CONTROL_COLD_RST_MASK)
+#define WLAN_RESET_CONTROL_WARM_RST_MSB 7
+#define WLAN_RESET_CONTROL_WARM_RST_LSB 7
+#define WLAN_RESET_CONTROL_WARM_RST_MASK 0x00000080
+#define WLAN_RESET_CONTROL_WARM_RST_GET(x) (((x) & WLAN_RESET_CONTROL_WARM_RST_MASK) >> WLAN_RESET_CONTROL_WARM_RST_LSB)
+#define WLAN_RESET_CONTROL_WARM_RST_SET(x) (((x) << WLAN_RESET_CONTROL_WARM_RST_LSB) & WLAN_RESET_CONTROL_WARM_RST_MASK)
+#define WLAN_RESET_CONTROL_CPU_WARM_RST_MSB 6
+#define WLAN_RESET_CONTROL_CPU_WARM_RST_LSB 6
+#define WLAN_RESET_CONTROL_CPU_WARM_RST_MASK 0x00000040
+#define WLAN_RESET_CONTROL_CPU_WARM_RST_GET(x) (((x) & WLAN_RESET_CONTROL_CPU_WARM_RST_MASK) >> WLAN_RESET_CONTROL_CPU_WARM_RST_LSB)
+#define WLAN_RESET_CONTROL_CPU_WARM_RST_SET(x) (((x) << WLAN_RESET_CONTROL_CPU_WARM_RST_LSB) & WLAN_RESET_CONTROL_CPU_WARM_RST_MASK)
+#define WLAN_RESET_CONTROL_MAC_COLD_RST_MSB 5
+#define WLAN_RESET_CONTROL_MAC_COLD_RST_LSB 5
+#define WLAN_RESET_CONTROL_MAC_COLD_RST_MASK 0x00000020
+#define WLAN_RESET_CONTROL_MAC_COLD_RST_GET(x) (((x) & WLAN_RESET_CONTROL_MAC_COLD_RST_MASK) >> WLAN_RESET_CONTROL_MAC_COLD_RST_LSB)
+#define WLAN_RESET_CONTROL_MAC_COLD_RST_SET(x) (((x) << WLAN_RESET_CONTROL_MAC_COLD_RST_LSB) & WLAN_RESET_CONTROL_MAC_COLD_RST_MASK)
+#define WLAN_RESET_CONTROL_MAC_WARM_RST_MSB 4
+#define WLAN_RESET_CONTROL_MAC_WARM_RST_LSB 4
+#define WLAN_RESET_CONTROL_MAC_WARM_RST_MASK 0x00000010
+#define WLAN_RESET_CONTROL_MAC_WARM_RST_GET(x) (((x) & WLAN_RESET_CONTROL_MAC_WARM_RST_MASK) >> WLAN_RESET_CONTROL_MAC_WARM_RST_LSB)
+#define WLAN_RESET_CONTROL_MAC_WARM_RST_SET(x) (((x) << WLAN_RESET_CONTROL_MAC_WARM_RST_LSB) & WLAN_RESET_CONTROL_MAC_WARM_RST_MASK)
+#define WLAN_RESET_CONTROL_MBOX_RST_MSB 2
+#define WLAN_RESET_CONTROL_MBOX_RST_LSB 2
+#define WLAN_RESET_CONTROL_MBOX_RST_MASK 0x00000004
+#define WLAN_RESET_CONTROL_MBOX_RST_GET(x) (((x) & WLAN_RESET_CONTROL_MBOX_RST_MASK) >> WLAN_RESET_CONTROL_MBOX_RST_LSB)
+#define WLAN_RESET_CONTROL_MBOX_RST_SET(x) (((x) << WLAN_RESET_CONTROL_MBOX_RST_LSB) & WLAN_RESET_CONTROL_MBOX_RST_MASK)
+#define WLAN_RESET_CONTROL_UART_RST_MSB 1
+#define WLAN_RESET_CONTROL_UART_RST_LSB 1
+#define WLAN_RESET_CONTROL_UART_RST_MASK 0x00000002
+#define WLAN_RESET_CONTROL_UART_RST_GET(x) (((x) & WLAN_RESET_CONTROL_UART_RST_MASK) >> WLAN_RESET_CONTROL_UART_RST_LSB)
+#define WLAN_RESET_CONTROL_UART_RST_SET(x) (((x) << WLAN_RESET_CONTROL_UART_RST_LSB) & WLAN_RESET_CONTROL_UART_RST_MASK)
+#define WLAN_RESET_CONTROL_SI0_RST_MSB 0
+#define WLAN_RESET_CONTROL_SI0_RST_LSB 0
+#define WLAN_RESET_CONTROL_SI0_RST_MASK 0x00000001
+#define WLAN_RESET_CONTROL_SI0_RST_GET(x) (((x) & WLAN_RESET_CONTROL_SI0_RST_MASK) >> WLAN_RESET_CONTROL_SI0_RST_LSB)
+#define WLAN_RESET_CONTROL_SI0_RST_SET(x) (((x) << WLAN_RESET_CONTROL_SI0_RST_LSB) & WLAN_RESET_CONTROL_SI0_RST_MASK)
+
+#define WLAN_XTAL_CONTROL_ADDRESS 0x00000004
+#define WLAN_XTAL_CONTROL_OFFSET 0x00000004
+#define WLAN_XTAL_CONTROL_TCXO_MSB 0
+#define WLAN_XTAL_CONTROL_TCXO_LSB 0
+#define WLAN_XTAL_CONTROL_TCXO_MASK 0x00000001
+#define WLAN_XTAL_CONTROL_TCXO_GET(x) (((x) & WLAN_XTAL_CONTROL_TCXO_MASK) >> WLAN_XTAL_CONTROL_TCXO_LSB)
+#define WLAN_XTAL_CONTROL_TCXO_SET(x) (((x) << WLAN_XTAL_CONTROL_TCXO_LSB) & WLAN_XTAL_CONTROL_TCXO_MASK)
+
+#define WLAN_TCXO_DETECT_ADDRESS 0x00000008
+#define WLAN_TCXO_DETECT_OFFSET 0x00000008
+#define WLAN_TCXO_DETECT_PRESENT_MSB 0
+#define WLAN_TCXO_DETECT_PRESENT_LSB 0
+#define WLAN_TCXO_DETECT_PRESENT_MASK 0x00000001
+#define WLAN_TCXO_DETECT_PRESENT_GET(x) (((x) & WLAN_TCXO_DETECT_PRESENT_MASK) >> WLAN_TCXO_DETECT_PRESENT_LSB)
+#define WLAN_TCXO_DETECT_PRESENT_SET(x) (((x) << WLAN_TCXO_DETECT_PRESENT_LSB) & WLAN_TCXO_DETECT_PRESENT_MASK)
+
+#define WLAN_XTAL_TEST_ADDRESS 0x0000000c
+#define WLAN_XTAL_TEST_OFFSET 0x0000000c
+#define WLAN_XTAL_TEST_NOTCXODET_MSB 0
+#define WLAN_XTAL_TEST_NOTCXODET_LSB 0
+#define WLAN_XTAL_TEST_NOTCXODET_MASK 0x00000001
+#define WLAN_XTAL_TEST_NOTCXODET_GET(x) (((x) & WLAN_XTAL_TEST_NOTCXODET_MASK) >> WLAN_XTAL_TEST_NOTCXODET_LSB)
+#define WLAN_XTAL_TEST_NOTCXODET_SET(x) (((x) << WLAN_XTAL_TEST_NOTCXODET_LSB) & WLAN_XTAL_TEST_NOTCXODET_MASK)
+
+#define WLAN_QUADRATURE_ADDRESS 0x00000010
+#define WLAN_QUADRATURE_OFFSET 0x00000010
+#define WLAN_QUADRATURE_ADC_MSB 7
+#define WLAN_QUADRATURE_ADC_LSB 4
+#define WLAN_QUADRATURE_ADC_MASK 0x000000f0
+#define WLAN_QUADRATURE_ADC_GET(x) (((x) & WLAN_QUADRATURE_ADC_MASK) >> WLAN_QUADRATURE_ADC_LSB)
+#define WLAN_QUADRATURE_ADC_SET(x) (((x) << WLAN_QUADRATURE_ADC_LSB) & WLAN_QUADRATURE_ADC_MASK)
+#define WLAN_QUADRATURE_SEL_MSB 2
+#define WLAN_QUADRATURE_SEL_LSB 2
+#define WLAN_QUADRATURE_SEL_MASK 0x00000004
+#define WLAN_QUADRATURE_SEL_GET(x) (((x) & WLAN_QUADRATURE_SEL_MASK) >> WLAN_QUADRATURE_SEL_LSB)
+#define WLAN_QUADRATURE_SEL_SET(x) (((x) << WLAN_QUADRATURE_SEL_LSB) & WLAN_QUADRATURE_SEL_MASK)
+#define WLAN_QUADRATURE_DAC_MSB 1
+#define WLAN_QUADRATURE_DAC_LSB 0
+#define WLAN_QUADRATURE_DAC_MASK 0x00000003
+#define WLAN_QUADRATURE_DAC_GET(x) (((x) & WLAN_QUADRATURE_DAC_MASK) >> WLAN_QUADRATURE_DAC_LSB)
+#define WLAN_QUADRATURE_DAC_SET(x) (((x) << WLAN_QUADRATURE_DAC_LSB) & WLAN_QUADRATURE_DAC_MASK)
+
+#define WLAN_PLL_CONTROL_ADDRESS 0x00000014
+#define WLAN_PLL_CONTROL_OFFSET 0x00000014
+#define WLAN_PLL_CONTROL_DIG_TEST_CLK_MSB 20
+#define WLAN_PLL_CONTROL_DIG_TEST_CLK_LSB 20
+#define WLAN_PLL_CONTROL_DIG_TEST_CLK_MASK 0x00100000
+#define WLAN_PLL_CONTROL_DIG_TEST_CLK_GET(x) (((x) & WLAN_PLL_CONTROL_DIG_TEST_CLK_MASK) >> WLAN_PLL_CONTROL_DIG_TEST_CLK_LSB)
+#define WLAN_PLL_CONTROL_DIG_TEST_CLK_SET(x) (((x) << WLAN_PLL_CONTROL_DIG_TEST_CLK_LSB) & WLAN_PLL_CONTROL_DIG_TEST_CLK_MASK)
+#define WLAN_PLL_CONTROL_MAC_OVERRIDE_MSB 19
+#define WLAN_PLL_CONTROL_MAC_OVERRIDE_LSB 19
+#define WLAN_PLL_CONTROL_MAC_OVERRIDE_MASK 0x00080000
+#define WLAN_PLL_CONTROL_MAC_OVERRIDE_GET(x) (((x) & WLAN_PLL_CONTROL_MAC_OVERRIDE_MASK) >> WLAN_PLL_CONTROL_MAC_OVERRIDE_LSB)
+#define WLAN_PLL_CONTROL_MAC_OVERRIDE_SET(x) (((x) << WLAN_PLL_CONTROL_MAC_OVERRIDE_LSB) & WLAN_PLL_CONTROL_MAC_OVERRIDE_MASK)
+#define WLAN_PLL_CONTROL_NOPWD_MSB 18
+#define WLAN_PLL_CONTROL_NOPWD_LSB 18
+#define WLAN_PLL_CONTROL_NOPWD_MASK 0x00040000
+#define WLAN_PLL_CONTROL_NOPWD_GET(x) (((x) & WLAN_PLL_CONTROL_NOPWD_MASK) >> WLAN_PLL_CONTROL_NOPWD_LSB)
+#define WLAN_PLL_CONTROL_NOPWD_SET(x) (((x) << WLAN_PLL_CONTROL_NOPWD_LSB) & WLAN_PLL_CONTROL_NOPWD_MASK)
+#define WLAN_PLL_CONTROL_UPDATING_MSB 17
+#define WLAN_PLL_CONTROL_UPDATING_LSB 17
+#define WLAN_PLL_CONTROL_UPDATING_MASK 0x00020000
+#define WLAN_PLL_CONTROL_UPDATING_GET(x) (((x) & WLAN_PLL_CONTROL_UPDATING_MASK) >> WLAN_PLL_CONTROL_UPDATING_LSB)
+#define WLAN_PLL_CONTROL_UPDATING_SET(x) (((x) << WLAN_PLL_CONTROL_UPDATING_LSB) & WLAN_PLL_CONTROL_UPDATING_MASK)
+#define WLAN_PLL_CONTROL_BYPASS_MSB 16
+#define WLAN_PLL_CONTROL_BYPASS_LSB 16
+#define WLAN_PLL_CONTROL_BYPASS_MASK 0x00010000
+#define WLAN_PLL_CONTROL_BYPASS_GET(x) (((x) & WLAN_PLL_CONTROL_BYPASS_MASK) >> WLAN_PLL_CONTROL_BYPASS_LSB)
+#define WLAN_PLL_CONTROL_BYPASS_SET(x) (((x) << WLAN_PLL_CONTROL_BYPASS_LSB) & WLAN_PLL_CONTROL_BYPASS_MASK)
+#define WLAN_PLL_CONTROL_REFDIV_MSB 15
+#define WLAN_PLL_CONTROL_REFDIV_LSB 12
+#define WLAN_PLL_CONTROL_REFDIV_MASK 0x0000f000
+#define WLAN_PLL_CONTROL_REFDIV_GET(x) (((x) & WLAN_PLL_CONTROL_REFDIV_MASK) >> WLAN_PLL_CONTROL_REFDIV_LSB)
+#define WLAN_PLL_CONTROL_REFDIV_SET(x) (((x) << WLAN_PLL_CONTROL_REFDIV_LSB) & WLAN_PLL_CONTROL_REFDIV_MASK)
+#define WLAN_PLL_CONTROL_DIV_MSB 9
+#define WLAN_PLL_CONTROL_DIV_LSB 0
+#define WLAN_PLL_CONTROL_DIV_MASK 0x000003ff
+#define WLAN_PLL_CONTROL_DIV_GET(x) (((x) & WLAN_PLL_CONTROL_DIV_MASK) >> WLAN_PLL_CONTROL_DIV_LSB)
+#define WLAN_PLL_CONTROL_DIV_SET(x) (((x) << WLAN_PLL_CONTROL_DIV_LSB) & WLAN_PLL_CONTROL_DIV_MASK)
+
+#define WLAN_PLL_SETTLE_ADDRESS 0x00000018
+#define WLAN_PLL_SETTLE_OFFSET 0x00000018
+#define WLAN_PLL_SETTLE_TIME_MSB 11
+#define WLAN_PLL_SETTLE_TIME_LSB 0
+#define WLAN_PLL_SETTLE_TIME_MASK 0x00000fff
+#define WLAN_PLL_SETTLE_TIME_GET(x) (((x) & WLAN_PLL_SETTLE_TIME_MASK) >> WLAN_PLL_SETTLE_TIME_LSB)
+#define WLAN_PLL_SETTLE_TIME_SET(x) (((x) << WLAN_PLL_SETTLE_TIME_LSB) & WLAN_PLL_SETTLE_TIME_MASK)
+
+#define WLAN_XTAL_SETTLE_ADDRESS 0x0000001c
+#define WLAN_XTAL_SETTLE_OFFSET 0x0000001c
+#define WLAN_XTAL_SETTLE_TIME_MSB 7
+#define WLAN_XTAL_SETTLE_TIME_LSB 0
+#define WLAN_XTAL_SETTLE_TIME_MASK 0x000000ff
+#define WLAN_XTAL_SETTLE_TIME_GET(x) (((x) & WLAN_XTAL_SETTLE_TIME_MASK) >> WLAN_XTAL_SETTLE_TIME_LSB)
+#define WLAN_XTAL_SETTLE_TIME_SET(x) (((x) << WLAN_XTAL_SETTLE_TIME_LSB) & WLAN_XTAL_SETTLE_TIME_MASK)
+
+#define WLAN_CPU_CLOCK_ADDRESS 0x00000020
+#define WLAN_CPU_CLOCK_OFFSET 0x00000020
+#define WLAN_CPU_CLOCK_STANDARD_MSB 1
+#define WLAN_CPU_CLOCK_STANDARD_LSB 0
+#define WLAN_CPU_CLOCK_STANDARD_MASK 0x00000003
+#define WLAN_CPU_CLOCK_STANDARD_GET(x) (((x) & WLAN_CPU_CLOCK_STANDARD_MASK) >> WLAN_CPU_CLOCK_STANDARD_LSB)
+#define WLAN_CPU_CLOCK_STANDARD_SET(x) (((x) << WLAN_CPU_CLOCK_STANDARD_LSB) & WLAN_CPU_CLOCK_STANDARD_MASK)
+
+#define WLAN_CLOCK_OUT_ADDRESS 0x00000024
+#define WLAN_CLOCK_OUT_OFFSET 0x00000024
+#define WLAN_CLOCK_OUT_SELECT_MSB 3
+#define WLAN_CLOCK_OUT_SELECT_LSB 0
+#define WLAN_CLOCK_OUT_SELECT_MASK 0x0000000f
+#define WLAN_CLOCK_OUT_SELECT_GET(x) (((x) & WLAN_CLOCK_OUT_SELECT_MASK) >> WLAN_CLOCK_OUT_SELECT_LSB)
+#define WLAN_CLOCK_OUT_SELECT_SET(x) (((x) << WLAN_CLOCK_OUT_SELECT_LSB) & WLAN_CLOCK_OUT_SELECT_MASK)
+
+#define WLAN_CLOCK_CONTROL_ADDRESS 0x00000028
+#define WLAN_CLOCK_CONTROL_OFFSET 0x00000028
+#define WLAN_CLOCK_CONTROL_LF_CLK32_MSB 2
+#define WLAN_CLOCK_CONTROL_LF_CLK32_LSB 2
+#define WLAN_CLOCK_CONTROL_LF_CLK32_MASK 0x00000004
+#define WLAN_CLOCK_CONTROL_LF_CLK32_GET(x) (((x) & WLAN_CLOCK_CONTROL_LF_CLK32_MASK) >> WLAN_CLOCK_CONTROL_LF_CLK32_LSB)
+#define WLAN_CLOCK_CONTROL_LF_CLK32_SET(x) (((x) << WLAN_CLOCK_CONTROL_LF_CLK32_LSB) & WLAN_CLOCK_CONTROL_LF_CLK32_MASK)
+#define WLAN_CLOCK_CONTROL_SI0_CLK_MSB 0
+#define WLAN_CLOCK_CONTROL_SI0_CLK_LSB 0
+#define WLAN_CLOCK_CONTROL_SI0_CLK_MASK 0x00000001
+#define WLAN_CLOCK_CONTROL_SI0_CLK_GET(x) (((x) & WLAN_CLOCK_CONTROL_SI0_CLK_MASK) >> WLAN_CLOCK_CONTROL_SI0_CLK_LSB)
+#define WLAN_CLOCK_CONTROL_SI0_CLK_SET(x) (((x) << WLAN_CLOCK_CONTROL_SI0_CLK_LSB) & WLAN_CLOCK_CONTROL_SI0_CLK_MASK)
+
+#define WLAN_BIAS_OVERRIDE_ADDRESS 0x0000002c
+#define WLAN_BIAS_OVERRIDE_OFFSET 0x0000002c
+#define WLAN_BIAS_OVERRIDE_ON_MSB 0
+#define WLAN_BIAS_OVERRIDE_ON_LSB 0
+#define WLAN_BIAS_OVERRIDE_ON_MASK 0x00000001
+#define WLAN_BIAS_OVERRIDE_ON_GET(x) (((x) & WLAN_BIAS_OVERRIDE_ON_MASK) >> WLAN_BIAS_OVERRIDE_ON_LSB)
+#define WLAN_BIAS_OVERRIDE_ON_SET(x) (((x) << WLAN_BIAS_OVERRIDE_ON_LSB) & WLAN_BIAS_OVERRIDE_ON_MASK)
+
+#define WLAN_WDT_CONTROL_ADDRESS 0x00000030
+#define WLAN_WDT_CONTROL_OFFSET 0x00000030
+#define WLAN_WDT_CONTROL_ACTION_MSB 2
+#define WLAN_WDT_CONTROL_ACTION_LSB 0
+#define WLAN_WDT_CONTROL_ACTION_MASK 0x00000007
+#define WLAN_WDT_CONTROL_ACTION_GET(x) (((x) & WLAN_WDT_CONTROL_ACTION_MASK) >> WLAN_WDT_CONTROL_ACTION_LSB)
+#define WLAN_WDT_CONTROL_ACTION_SET(x) (((x) << WLAN_WDT_CONTROL_ACTION_LSB) & WLAN_WDT_CONTROL_ACTION_MASK)
+
+#define WLAN_WDT_STATUS_ADDRESS 0x00000034
+#define WLAN_WDT_STATUS_OFFSET 0x00000034
+#define WLAN_WDT_STATUS_INTERRUPT_MSB 0
+#define WLAN_WDT_STATUS_INTERRUPT_LSB 0
+#define WLAN_WDT_STATUS_INTERRUPT_MASK 0x00000001
+#define WLAN_WDT_STATUS_INTERRUPT_GET(x) (((x) & WLAN_WDT_STATUS_INTERRUPT_MASK) >> WLAN_WDT_STATUS_INTERRUPT_LSB)
+#define WLAN_WDT_STATUS_INTERRUPT_SET(x) (((x) << WLAN_WDT_STATUS_INTERRUPT_LSB) & WLAN_WDT_STATUS_INTERRUPT_MASK)
+
+#define WLAN_WDT_ADDRESS 0x00000038
+#define WLAN_WDT_OFFSET 0x00000038
+#define WLAN_WDT_TARGET_MSB 21
+#define WLAN_WDT_TARGET_LSB 0
+#define WLAN_WDT_TARGET_MASK 0x003fffff
+#define WLAN_WDT_TARGET_GET(x) (((x) & WLAN_WDT_TARGET_MASK) >> WLAN_WDT_TARGET_LSB)
+#define WLAN_WDT_TARGET_SET(x) (((x) << WLAN_WDT_TARGET_LSB) & WLAN_WDT_TARGET_MASK)
+
+#define WLAN_WDT_COUNT_ADDRESS 0x0000003c
+#define WLAN_WDT_COUNT_OFFSET 0x0000003c
+#define WLAN_WDT_COUNT_VALUE_MSB 21
+#define WLAN_WDT_COUNT_VALUE_LSB 0
+#define WLAN_WDT_COUNT_VALUE_MASK 0x003fffff
+#define WLAN_WDT_COUNT_VALUE_GET(x) (((x) & WLAN_WDT_COUNT_VALUE_MASK) >> WLAN_WDT_COUNT_VALUE_LSB)
+#define WLAN_WDT_COUNT_VALUE_SET(x) (((x) << WLAN_WDT_COUNT_VALUE_LSB) & WLAN_WDT_COUNT_VALUE_MASK)
+
+#define WLAN_WDT_RESET_ADDRESS 0x00000040
+#define WLAN_WDT_RESET_OFFSET 0x00000040
+#define WLAN_WDT_RESET_VALUE_MSB 0
+#define WLAN_WDT_RESET_VALUE_LSB 0
+#define WLAN_WDT_RESET_VALUE_MASK 0x00000001
+#define WLAN_WDT_RESET_VALUE_GET(x) (((x) & WLAN_WDT_RESET_VALUE_MASK) >> WLAN_WDT_RESET_VALUE_LSB)
+#define WLAN_WDT_RESET_VALUE_SET(x) (((x) << WLAN_WDT_RESET_VALUE_LSB) & WLAN_WDT_RESET_VALUE_MASK)
+
+#define WLAN_INT_STATUS_ADDRESS 0x00000044
+#define WLAN_INT_STATUS_OFFSET 0x00000044
+#define WLAN_INT_STATUS_HCI_UART_MSB 21
+#define WLAN_INT_STATUS_HCI_UART_LSB 21
+#define WLAN_INT_STATUS_HCI_UART_MASK 0x00200000
+#define WLAN_INT_STATUS_HCI_UART_GET(x) (((x) & WLAN_INT_STATUS_HCI_UART_MASK) >> WLAN_INT_STATUS_HCI_UART_LSB)
+#define WLAN_INT_STATUS_HCI_UART_SET(x) (((x) << WLAN_INT_STATUS_HCI_UART_LSB) & WLAN_INT_STATUS_HCI_UART_MASK)
+#define WLAN_INT_STATUS_THERM_MSB 20
+#define WLAN_INT_STATUS_THERM_LSB 20
+#define WLAN_INT_STATUS_THERM_MASK 0x00100000
+#define WLAN_INT_STATUS_THERM_GET(x) (((x) & WLAN_INT_STATUS_THERM_MASK) >> WLAN_INT_STATUS_THERM_LSB)
+#define WLAN_INT_STATUS_THERM_SET(x) (((x) << WLAN_INT_STATUS_THERM_LSB) & WLAN_INT_STATUS_THERM_MASK)
+#define WLAN_INT_STATUS_EFUSE_OVERWRITE_MSB 19
+#define WLAN_INT_STATUS_EFUSE_OVERWRITE_LSB 19
+#define WLAN_INT_STATUS_EFUSE_OVERWRITE_MASK 0x00080000
+#define WLAN_INT_STATUS_EFUSE_OVERWRITE_GET(x) (((x) & WLAN_INT_STATUS_EFUSE_OVERWRITE_MASK) >> WLAN_INT_STATUS_EFUSE_OVERWRITE_LSB)
+#define WLAN_INT_STATUS_EFUSE_OVERWRITE_SET(x) (((x) << WLAN_INT_STATUS_EFUSE_OVERWRITE_LSB) & WLAN_INT_STATUS_EFUSE_OVERWRITE_MASK)
+#define WLAN_INT_STATUS_UART_MBOX_MSB 18
+#define WLAN_INT_STATUS_UART_MBOX_LSB 18
+#define WLAN_INT_STATUS_UART_MBOX_MASK 0x00040000
+#define WLAN_INT_STATUS_UART_MBOX_GET(x) (((x) & WLAN_INT_STATUS_UART_MBOX_MASK) >> WLAN_INT_STATUS_UART_MBOX_LSB)
+#define WLAN_INT_STATUS_UART_MBOX_SET(x) (((x) << WLAN_INT_STATUS_UART_MBOX_LSB) & WLAN_INT_STATUS_UART_MBOX_MASK)
+#define WLAN_INT_STATUS_GENERIC_MBOX_MSB 17
+#define WLAN_INT_STATUS_GENERIC_MBOX_LSB 17
+#define WLAN_INT_STATUS_GENERIC_MBOX_MASK 0x00020000
+#define WLAN_INT_STATUS_GENERIC_MBOX_GET(x) (((x) & WLAN_INT_STATUS_GENERIC_MBOX_MASK) >> WLAN_INT_STATUS_GENERIC_MBOX_LSB)
+#define WLAN_INT_STATUS_GENERIC_MBOX_SET(x) (((x) << WLAN_INT_STATUS_GENERIC_MBOX_LSB) & WLAN_INT_STATUS_GENERIC_MBOX_MASK)
+#define WLAN_INT_STATUS_RDMA_MSB 16
+#define WLAN_INT_STATUS_RDMA_LSB 16
+#define WLAN_INT_STATUS_RDMA_MASK 0x00010000
+#define WLAN_INT_STATUS_RDMA_GET(x) (((x) & WLAN_INT_STATUS_RDMA_MASK) >> WLAN_INT_STATUS_RDMA_LSB)
+#define WLAN_INT_STATUS_RDMA_SET(x) (((x) << WLAN_INT_STATUS_RDMA_LSB) & WLAN_INT_STATUS_RDMA_MASK)
+#define WLAN_INT_STATUS_BTCOEX_MSB 15
+#define WLAN_INT_STATUS_BTCOEX_LSB 15
+#define WLAN_INT_STATUS_BTCOEX_MASK 0x00008000
+#define WLAN_INT_STATUS_BTCOEX_GET(x) (((x) & WLAN_INT_STATUS_BTCOEX_MASK) >> WLAN_INT_STATUS_BTCOEX_LSB)
+#define WLAN_INT_STATUS_BTCOEX_SET(x) (((x) << WLAN_INT_STATUS_BTCOEX_LSB) & WLAN_INT_STATUS_BTCOEX_MASK)
+#define WLAN_INT_STATUS_RTC_POWER_MSB 14
+#define WLAN_INT_STATUS_RTC_POWER_LSB 14
+#define WLAN_INT_STATUS_RTC_POWER_MASK 0x00004000
+#define WLAN_INT_STATUS_RTC_POWER_GET(x) (((x) & WLAN_INT_STATUS_RTC_POWER_MASK) >> WLAN_INT_STATUS_RTC_POWER_LSB)
+#define WLAN_INT_STATUS_RTC_POWER_SET(x) (((x) << WLAN_INT_STATUS_RTC_POWER_LSB) & WLAN_INT_STATUS_RTC_POWER_MASK)
+#define WLAN_INT_STATUS_MAC_MSB 13
+#define WLAN_INT_STATUS_MAC_LSB 13
+#define WLAN_INT_STATUS_MAC_MASK 0x00002000
+#define WLAN_INT_STATUS_MAC_GET(x) (((x) & WLAN_INT_STATUS_MAC_MASK) >> WLAN_INT_STATUS_MAC_LSB)
+#define WLAN_INT_STATUS_MAC_SET(x) (((x) << WLAN_INT_STATUS_MAC_LSB) & WLAN_INT_STATUS_MAC_MASK)
+#define WLAN_INT_STATUS_MAILBOX_MSB 12
+#define WLAN_INT_STATUS_MAILBOX_LSB 12
+#define WLAN_INT_STATUS_MAILBOX_MASK 0x00001000
+#define WLAN_INT_STATUS_MAILBOX_GET(x) (((x) & WLAN_INT_STATUS_MAILBOX_MASK) >> WLAN_INT_STATUS_MAILBOX_LSB)
+#define WLAN_INT_STATUS_MAILBOX_SET(x) (((x) << WLAN_INT_STATUS_MAILBOX_LSB) & WLAN_INT_STATUS_MAILBOX_MASK)
+#define WLAN_INT_STATUS_RTC_ALARM_MSB 11
+#define WLAN_INT_STATUS_RTC_ALARM_LSB 11
+#define WLAN_INT_STATUS_RTC_ALARM_MASK 0x00000800
+#define WLAN_INT_STATUS_RTC_ALARM_GET(x) (((x) & WLAN_INT_STATUS_RTC_ALARM_MASK) >> WLAN_INT_STATUS_RTC_ALARM_LSB)
+#define WLAN_INT_STATUS_RTC_ALARM_SET(x) (((x) << WLAN_INT_STATUS_RTC_ALARM_LSB) & WLAN_INT_STATUS_RTC_ALARM_MASK)
+#define WLAN_INT_STATUS_HF_TIMER_MSB 10
+#define WLAN_INT_STATUS_HF_TIMER_LSB 10
+#define WLAN_INT_STATUS_HF_TIMER_MASK 0x00000400
+#define WLAN_INT_STATUS_HF_TIMER_GET(x) (((x) & WLAN_INT_STATUS_HF_TIMER_MASK) >> WLAN_INT_STATUS_HF_TIMER_LSB)
+#define WLAN_INT_STATUS_HF_TIMER_SET(x) (((x) << WLAN_INT_STATUS_HF_TIMER_LSB) & WLAN_INT_STATUS_HF_TIMER_MASK)
+#define WLAN_INT_STATUS_LF_TIMER3_MSB 9
+#define WLAN_INT_STATUS_LF_TIMER3_LSB 9
+#define WLAN_INT_STATUS_LF_TIMER3_MASK 0x00000200
+#define WLAN_INT_STATUS_LF_TIMER3_GET(x) (((x) & WLAN_INT_STATUS_LF_TIMER3_MASK) >> WLAN_INT_STATUS_LF_TIMER3_LSB)
+#define WLAN_INT_STATUS_LF_TIMER3_SET(x) (((x) << WLAN_INT_STATUS_LF_TIMER3_LSB) & WLAN_INT_STATUS_LF_TIMER3_MASK)
+#define WLAN_INT_STATUS_LF_TIMER2_MSB 8
+#define WLAN_INT_STATUS_LF_TIMER2_LSB 8
+#define WLAN_INT_STATUS_LF_TIMER2_MASK 0x00000100
+#define WLAN_INT_STATUS_LF_TIMER2_GET(x) (((x) & WLAN_INT_STATUS_LF_TIMER2_MASK) >> WLAN_INT_STATUS_LF_TIMER2_LSB)
+#define WLAN_INT_STATUS_LF_TIMER2_SET(x) (((x) << WLAN_INT_STATUS_LF_TIMER2_LSB) & WLAN_INT_STATUS_LF_TIMER2_MASK)
+#define WLAN_INT_STATUS_LF_TIMER1_MSB 7
+#define WLAN_INT_STATUS_LF_TIMER1_LSB 7
+#define WLAN_INT_STATUS_LF_TIMER1_MASK 0x00000080
+#define WLAN_INT_STATUS_LF_TIMER1_GET(x) (((x) & WLAN_INT_STATUS_LF_TIMER1_MASK) >> WLAN_INT_STATUS_LF_TIMER1_LSB)
+#define WLAN_INT_STATUS_LF_TIMER1_SET(x) (((x) << WLAN_INT_STATUS_LF_TIMER1_LSB) & WLAN_INT_STATUS_LF_TIMER1_MASK)
+#define WLAN_INT_STATUS_LF_TIMER0_MSB 6
+#define WLAN_INT_STATUS_LF_TIMER0_LSB 6
+#define WLAN_INT_STATUS_LF_TIMER0_MASK 0x00000040
+#define WLAN_INT_STATUS_LF_TIMER0_GET(x) (((x) & WLAN_INT_STATUS_LF_TIMER0_MASK) >> WLAN_INT_STATUS_LF_TIMER0_LSB)
+#define WLAN_INT_STATUS_LF_TIMER0_SET(x) (((x) << WLAN_INT_STATUS_LF_TIMER0_LSB) & WLAN_INT_STATUS_LF_TIMER0_MASK)
+#define WLAN_INT_STATUS_KEYPAD_MSB 5
+#define WLAN_INT_STATUS_KEYPAD_LSB 5
+#define WLAN_INT_STATUS_KEYPAD_MASK 0x00000020
+#define WLAN_INT_STATUS_KEYPAD_GET(x) (((x) & WLAN_INT_STATUS_KEYPAD_MASK) >> WLAN_INT_STATUS_KEYPAD_LSB)
+#define WLAN_INT_STATUS_KEYPAD_SET(x) (((x) << WLAN_INT_STATUS_KEYPAD_LSB) & WLAN_INT_STATUS_KEYPAD_MASK)
+#define WLAN_INT_STATUS_SI_MSB 4
+#define WLAN_INT_STATUS_SI_LSB 4
+#define WLAN_INT_STATUS_SI_MASK 0x00000010
+#define WLAN_INT_STATUS_SI_GET(x) (((x) & WLAN_INT_STATUS_SI_MASK) >> WLAN_INT_STATUS_SI_LSB)
+#define WLAN_INT_STATUS_SI_SET(x) (((x) << WLAN_INT_STATUS_SI_LSB) & WLAN_INT_STATUS_SI_MASK)
+#define WLAN_INT_STATUS_GPIO_MSB 3
+#define WLAN_INT_STATUS_GPIO_LSB 3
+#define WLAN_INT_STATUS_GPIO_MASK 0x00000008
+#define WLAN_INT_STATUS_GPIO_GET(x) (((x) & WLAN_INT_STATUS_GPIO_MASK) >> WLAN_INT_STATUS_GPIO_LSB)
+#define WLAN_INT_STATUS_GPIO_SET(x) (((x) << WLAN_INT_STATUS_GPIO_LSB) & WLAN_INT_STATUS_GPIO_MASK)
+#define WLAN_INT_STATUS_UART_MSB 2
+#define WLAN_INT_STATUS_UART_LSB 2
+#define WLAN_INT_STATUS_UART_MASK 0x00000004
+#define WLAN_INT_STATUS_UART_GET(x) (((x) & WLAN_INT_STATUS_UART_MASK) >> WLAN_INT_STATUS_UART_LSB)
+#define WLAN_INT_STATUS_UART_SET(x) (((x) << WLAN_INT_STATUS_UART_LSB) & WLAN_INT_STATUS_UART_MASK)
+#define WLAN_INT_STATUS_ERROR_MSB 1
+#define WLAN_INT_STATUS_ERROR_LSB 1
+#define WLAN_INT_STATUS_ERROR_MASK 0x00000002
+#define WLAN_INT_STATUS_ERROR_GET(x) (((x) & WLAN_INT_STATUS_ERROR_MASK) >> WLAN_INT_STATUS_ERROR_LSB)
+#define WLAN_INT_STATUS_ERROR_SET(x) (((x) << WLAN_INT_STATUS_ERROR_LSB) & WLAN_INT_STATUS_ERROR_MASK)
+#define WLAN_INT_STATUS_WDT_INT_MSB 0
+#define WLAN_INT_STATUS_WDT_INT_LSB 0
+#define WLAN_INT_STATUS_WDT_INT_MASK 0x00000001
+#define WLAN_INT_STATUS_WDT_INT_GET(x) (((x) & WLAN_INT_STATUS_WDT_INT_MASK) >> WLAN_INT_STATUS_WDT_INT_LSB)
+#define WLAN_INT_STATUS_WDT_INT_SET(x) (((x) << WLAN_INT_STATUS_WDT_INT_LSB) & WLAN_INT_STATUS_WDT_INT_MASK)
+
+#define WLAN_LF_TIMER0_ADDRESS 0x00000048
+#define WLAN_LF_TIMER0_OFFSET 0x00000048
+#define WLAN_LF_TIMER0_TARGET_MSB 31
+#define WLAN_LF_TIMER0_TARGET_LSB 0
+#define WLAN_LF_TIMER0_TARGET_MASK 0xffffffff
+#define WLAN_LF_TIMER0_TARGET_GET(x) (((x) & WLAN_LF_TIMER0_TARGET_MASK) >> WLAN_LF_TIMER0_TARGET_LSB)
+#define WLAN_LF_TIMER0_TARGET_SET(x) (((x) << WLAN_LF_TIMER0_TARGET_LSB) & WLAN_LF_TIMER0_TARGET_MASK)
+
+#define WLAN_LF_TIMER_COUNT0_ADDRESS 0x0000004c
+#define WLAN_LF_TIMER_COUNT0_OFFSET 0x0000004c
+#define WLAN_LF_TIMER_COUNT0_VALUE_MSB 31
+#define WLAN_LF_TIMER_COUNT0_VALUE_LSB 0
+#define WLAN_LF_TIMER_COUNT0_VALUE_MASK 0xffffffff
+#define WLAN_LF_TIMER_COUNT0_VALUE_GET(x) (((x) & WLAN_LF_TIMER_COUNT0_VALUE_MASK) >> WLAN_LF_TIMER_COUNT0_VALUE_LSB)
+#define WLAN_LF_TIMER_COUNT0_VALUE_SET(x) (((x) << WLAN_LF_TIMER_COUNT0_VALUE_LSB) & WLAN_LF_TIMER_COUNT0_VALUE_MASK)
+
+#define WLAN_LF_TIMER_CONTROL0_ADDRESS 0x00000050
+#define WLAN_LF_TIMER_CONTROL0_OFFSET 0x00000050
+#define WLAN_LF_TIMER_CONTROL0_ENABLE_MSB 2
+#define WLAN_LF_TIMER_CONTROL0_ENABLE_LSB 2
+#define WLAN_LF_TIMER_CONTROL0_ENABLE_MASK 0x00000004
+#define WLAN_LF_TIMER_CONTROL0_ENABLE_GET(x) (((x) & WLAN_LF_TIMER_CONTROL0_ENABLE_MASK) >> WLAN_LF_TIMER_CONTROL0_ENABLE_LSB)
+#define WLAN_LF_TIMER_CONTROL0_ENABLE_SET(x) (((x) << WLAN_LF_TIMER_CONTROL0_ENABLE_LSB) & WLAN_LF_TIMER_CONTROL0_ENABLE_MASK)
+#define WLAN_LF_TIMER_CONTROL0_AUTO_RESTART_MSB 1
+#define WLAN_LF_TIMER_CONTROL0_AUTO_RESTART_LSB 1
+#define WLAN_LF_TIMER_CONTROL0_AUTO_RESTART_MASK 0x00000002
+#define WLAN_LF_TIMER_CONTROL0_AUTO_RESTART_GET(x) (((x) & WLAN_LF_TIMER_CONTROL0_AUTO_RESTART_MASK) >> WLAN_LF_TIMER_CONTROL0_AUTO_RESTART_LSB)
+#define WLAN_LF_TIMER_CONTROL0_AUTO_RESTART_SET(x) (((x) << WLAN_LF_TIMER_CONTROL0_AUTO_RESTART_LSB) & WLAN_LF_TIMER_CONTROL0_AUTO_RESTART_MASK)
+#define WLAN_LF_TIMER_CONTROL0_RESET_MSB 0
+#define WLAN_LF_TIMER_CONTROL0_RESET_LSB 0
+#define WLAN_LF_TIMER_CONTROL0_RESET_MASK 0x00000001
+#define WLAN_LF_TIMER_CONTROL0_RESET_GET(x) (((x) & WLAN_LF_TIMER_CONTROL0_RESET_MASK) >> WLAN_LF_TIMER_CONTROL0_RESET_LSB)
+#define WLAN_LF_TIMER_CONTROL0_RESET_SET(x) (((x) << WLAN_LF_TIMER_CONTROL0_RESET_LSB) & WLAN_LF_TIMER_CONTROL0_RESET_MASK)
+
+#define WLAN_LF_TIMER_STATUS0_ADDRESS 0x00000054
+#define WLAN_LF_TIMER_STATUS0_OFFSET 0x00000054
+#define WLAN_LF_TIMER_STATUS0_INTERRUPT_MSB 0
+#define WLAN_LF_TIMER_STATUS0_INTERRUPT_LSB 0
+#define WLAN_LF_TIMER_STATUS0_INTERRUPT_MASK 0x00000001
+#define WLAN_LF_TIMER_STATUS0_INTERRUPT_GET(x) (((x) & WLAN_LF_TIMER_STATUS0_INTERRUPT_MASK) >> WLAN_LF_TIMER_STATUS0_INTERRUPT_LSB)
+#define WLAN_LF_TIMER_STATUS0_INTERRUPT_SET(x) (((x) << WLAN_LF_TIMER_STATUS0_INTERRUPT_LSB) & WLAN_LF_TIMER_STATUS0_INTERRUPT_MASK)
+
+#define WLAN_LF_TIMER1_ADDRESS 0x00000058
+#define WLAN_LF_TIMER1_OFFSET 0x00000058
+#define WLAN_LF_TIMER1_TARGET_MSB 31
+#define WLAN_LF_TIMER1_TARGET_LSB 0
+#define WLAN_LF_TIMER1_TARGET_MASK 0xffffffff
+#define WLAN_LF_TIMER1_TARGET_GET(x) (((x) & WLAN_LF_TIMER1_TARGET_MASK) >> WLAN_LF_TIMER1_TARGET_LSB)
+#define WLAN_LF_TIMER1_TARGET_SET(x) (((x) << WLAN_LF_TIMER1_TARGET_LSB) & WLAN_LF_TIMER1_TARGET_MASK)
+
+#define WLAN_LF_TIMER_COUNT1_ADDRESS 0x0000005c
+#define WLAN_LF_TIMER_COUNT1_OFFSET 0x0000005c
+#define WLAN_LF_TIMER_COUNT1_VALUE_MSB 31
+#define WLAN_LF_TIMER_COUNT1_VALUE_LSB 0
+#define WLAN_LF_TIMER_COUNT1_VALUE_MASK 0xffffffff
+#define WLAN_LF_TIMER_COUNT1_VALUE_GET(x) (((x) & WLAN_LF_TIMER_COUNT1_VALUE_MASK) >> WLAN_LF_TIMER_COUNT1_VALUE_LSB)
+#define WLAN_LF_TIMER_COUNT1_VALUE_SET(x) (((x) << WLAN_LF_TIMER_COUNT1_VALUE_LSB) & WLAN_LF_TIMER_COUNT1_VALUE_MASK)
+
+#define WLAN_LF_TIMER_CONTROL1_ADDRESS 0x00000060
+#define WLAN_LF_TIMER_CONTROL1_OFFSET 0x00000060
+#define WLAN_LF_TIMER_CONTROL1_ENABLE_MSB 2
+#define WLAN_LF_TIMER_CONTROL1_ENABLE_LSB 2
+#define WLAN_LF_TIMER_CONTROL1_ENABLE_MASK 0x00000004
+#define WLAN_LF_TIMER_CONTROL1_ENABLE_GET(x) (((x) & WLAN_LF_TIMER_CONTROL1_ENABLE_MASK) >> WLAN_LF_TIMER_CONTROL1_ENABLE_LSB)
+#define WLAN_LF_TIMER_CONTROL1_ENABLE_SET(x) (((x) << WLAN_LF_TIMER_CONTROL1_ENABLE_LSB) & WLAN_LF_TIMER_CONTROL1_ENABLE_MASK)
+#define WLAN_LF_TIMER_CONTROL1_AUTO_RESTART_MSB 1
+#define WLAN_LF_TIMER_CONTROL1_AUTO_RESTART_LSB 1
+#define WLAN_LF_TIMER_CONTROL1_AUTO_RESTART_MASK 0x00000002
+#define WLAN_LF_TIMER_CONTROL1_AUTO_RESTART_GET(x) (((x) & WLAN_LF_TIMER_CONTROL1_AUTO_RESTART_MASK) >> WLAN_LF_TIMER_CONTROL1_AUTO_RESTART_LSB)
+#define WLAN_LF_TIMER_CONTROL1_AUTO_RESTART_SET(x) (((x) << WLAN_LF_TIMER_CONTROL1_AUTO_RESTART_LSB) & WLAN_LF_TIMER_CONTROL1_AUTO_RESTART_MASK)
+#define WLAN_LF_TIMER_CONTROL1_RESET_MSB 0
+#define WLAN_LF_TIMER_CONTROL1_RESET_LSB 0
+#define WLAN_LF_TIMER_CONTROL1_RESET_MASK 0x00000001
+#define WLAN_LF_TIMER_CONTROL1_RESET_GET(x) (((x) & WLAN_LF_TIMER_CONTROL1_RESET_MASK) >> WLAN_LF_TIMER_CONTROL1_RESET_LSB)
+#define WLAN_LF_TIMER_CONTROL1_RESET_SET(x) (((x) << WLAN_LF_TIMER_CONTROL1_RESET_LSB) & WLAN_LF_TIMER_CONTROL1_RESET_MASK)
+
+#define WLAN_LF_TIMER_STATUS1_ADDRESS 0x00000064
+#define WLAN_LF_TIMER_STATUS1_OFFSET 0x00000064
+#define WLAN_LF_TIMER_STATUS1_INTERRUPT_MSB 0
+#define WLAN_LF_TIMER_STATUS1_INTERRUPT_LSB 0
+#define WLAN_LF_TIMER_STATUS1_INTERRUPT_MASK 0x00000001
+#define WLAN_LF_TIMER_STATUS1_INTERRUPT_GET(x) (((x) & WLAN_LF_TIMER_STATUS1_INTERRUPT_MASK) >> WLAN_LF_TIMER_STATUS1_INTERRUPT_LSB)
+#define WLAN_LF_TIMER_STATUS1_INTERRUPT_SET(x) (((x) << WLAN_LF_TIMER_STATUS1_INTERRUPT_LSB) & WLAN_LF_TIMER_STATUS1_INTERRUPT_MASK)
+
+#define WLAN_LF_TIMER2_ADDRESS 0x00000068
+#define WLAN_LF_TIMER2_OFFSET 0x00000068
+#define WLAN_LF_TIMER2_TARGET_MSB 31
+#define WLAN_LF_TIMER2_TARGET_LSB 0
+#define WLAN_LF_TIMER2_TARGET_MASK 0xffffffff
+#define WLAN_LF_TIMER2_TARGET_GET(x) (((x) & WLAN_LF_TIMER2_TARGET_MASK) >> WLAN_LF_TIMER2_TARGET_LSB)
+#define WLAN_LF_TIMER2_TARGET_SET(x) (((x) << WLAN_LF_TIMER2_TARGET_LSB) & WLAN_LF_TIMER2_TARGET_MASK)
+
+#define WLAN_LF_TIMER_COUNT2_ADDRESS 0x0000006c
+#define WLAN_LF_TIMER_COUNT2_OFFSET 0x0000006c
+#define WLAN_LF_TIMER_COUNT2_VALUE_MSB 31
+#define WLAN_LF_TIMER_COUNT2_VALUE_LSB 0
+#define WLAN_LF_TIMER_COUNT2_VALUE_MASK 0xffffffff
+#define WLAN_LF_TIMER_COUNT2_VALUE_GET(x) (((x) & WLAN_LF_TIMER_COUNT2_VALUE_MASK) >> WLAN_LF_TIMER_COUNT2_VALUE_LSB)
+#define WLAN_LF_TIMER_COUNT2_VALUE_SET(x) (((x) << WLAN_LF_TIMER_COUNT2_VALUE_LSB) & WLAN_LF_TIMER_COUNT2_VALUE_MASK)
+
+#define WLAN_LF_TIMER_CONTROL2_ADDRESS 0x00000070
+#define WLAN_LF_TIMER_CONTROL2_OFFSET 0x00000070
+#define WLAN_LF_TIMER_CONTROL2_ENABLE_MSB 2
+#define WLAN_LF_TIMER_CONTROL2_ENABLE_LSB 2
+#define WLAN_LF_TIMER_CONTROL2_ENABLE_MASK 0x00000004
+#define WLAN_LF_TIMER_CONTROL2_ENABLE_GET(x) (((x) & WLAN_LF_TIMER_CONTROL2_ENABLE_MASK) >> WLAN_LF_TIMER_CONTROL2_ENABLE_LSB)
+#define WLAN_LF_TIMER_CONTROL2_ENABLE_SET(x) (((x) << WLAN_LF_TIMER_CONTROL2_ENABLE_LSB) & WLAN_LF_TIMER_CONTROL2_ENABLE_MASK)
+#define WLAN_LF_TIMER_CONTROL2_AUTO_RESTART_MSB 1
+#define WLAN_LF_TIMER_CONTROL2_AUTO_RESTART_LSB 1
+#define WLAN_LF_TIMER_CONTROL2_AUTO_RESTART_MASK 0x00000002
+#define WLAN_LF_TIMER_CONTROL2_AUTO_RESTART_GET(x) (((x) & WLAN_LF_TIMER_CONTROL2_AUTO_RESTART_MASK) >> WLAN_LF_TIMER_CONTROL2_AUTO_RESTART_LSB)
+#define WLAN_LF_TIMER_CONTROL2_AUTO_RESTART_SET(x) (((x) << WLAN_LF_TIMER_CONTROL2_AUTO_RESTART_LSB) & WLAN_LF_TIMER_CONTROL2_AUTO_RESTART_MASK)
+#define WLAN_LF_TIMER_CONTROL2_RESET_MSB 0
+#define WLAN_LF_TIMER_CONTROL2_RESET_LSB 0
+#define WLAN_LF_TIMER_CONTROL2_RESET_MASK 0x00000001
+#define WLAN_LF_TIMER_CONTROL2_RESET_GET(x) (((x) & WLAN_LF_TIMER_CONTROL2_RESET_MASK) >> WLAN_LF_TIMER_CONTROL2_RESET_LSB)
+#define WLAN_LF_TIMER_CONTROL2_RESET_SET(x) (((x) << WLAN_LF_TIMER_CONTROL2_RESET_LSB) & WLAN_LF_TIMER_CONTROL2_RESET_MASK)
+
+#define WLAN_LF_TIMER_STATUS2_ADDRESS 0x00000074
+#define WLAN_LF_TIMER_STATUS2_OFFSET 0x00000074
+#define WLAN_LF_TIMER_STATUS2_INTERRUPT_MSB 0
+#define WLAN_LF_TIMER_STATUS2_INTERRUPT_LSB 0
+#define WLAN_LF_TIMER_STATUS2_INTERRUPT_MASK 0x00000001
+#define WLAN_LF_TIMER_STATUS2_INTERRUPT_GET(x) (((x) & WLAN_LF_TIMER_STATUS2_INTERRUPT_MASK) >> WLAN_LF_TIMER_STATUS2_INTERRUPT_LSB)
+#define WLAN_LF_TIMER_STATUS2_INTERRUPT_SET(x) (((x) << WLAN_LF_TIMER_STATUS2_INTERRUPT_LSB) & WLAN_LF_TIMER_STATUS2_INTERRUPT_MASK)
+
+#define WLAN_LF_TIMER3_ADDRESS 0x00000078
+#define WLAN_LF_TIMER3_OFFSET 0x00000078
+#define WLAN_LF_TIMER3_TARGET_MSB 31
+#define WLAN_LF_TIMER3_TARGET_LSB 0
+#define WLAN_LF_TIMER3_TARGET_MASK 0xffffffff
+#define WLAN_LF_TIMER3_TARGET_GET(x) (((x) & WLAN_LF_TIMER3_TARGET_MASK) >> WLAN_LF_TIMER3_TARGET_LSB)
+#define WLAN_LF_TIMER3_TARGET_SET(x) (((x) << WLAN_LF_TIMER3_TARGET_LSB) & WLAN_LF_TIMER3_TARGET_MASK)
+
+#define WLAN_LF_TIMER_COUNT3_ADDRESS 0x0000007c
+#define WLAN_LF_TIMER_COUNT3_OFFSET 0x0000007c
+#define WLAN_LF_TIMER_COUNT3_VALUE_MSB 31
+#define WLAN_LF_TIMER_COUNT3_VALUE_LSB 0
+#define WLAN_LF_TIMER_COUNT3_VALUE_MASK 0xffffffff
+#define WLAN_LF_TIMER_COUNT3_VALUE_GET(x) (((x) & WLAN_LF_TIMER_COUNT3_VALUE_MASK) >> WLAN_LF_TIMER_COUNT3_VALUE_LSB)
+#define WLAN_LF_TIMER_COUNT3_VALUE_SET(x) (((x) << WLAN_LF_TIMER_COUNT3_VALUE_LSB) & WLAN_LF_TIMER_COUNT3_VALUE_MASK)
+
+#define WLAN_LF_TIMER_CONTROL3_ADDRESS 0x00000080
+#define WLAN_LF_TIMER_CONTROL3_OFFSET 0x00000080
+#define WLAN_LF_TIMER_CONTROL3_ENABLE_MSB 2
+#define WLAN_LF_TIMER_CONTROL3_ENABLE_LSB 2
+#define WLAN_LF_TIMER_CONTROL3_ENABLE_MASK 0x00000004
+#define WLAN_LF_TIMER_CONTROL3_ENABLE_GET(x) (((x) & WLAN_LF_TIMER_CONTROL3_ENABLE_MASK) >> WLAN_LF_TIMER_CONTROL3_ENABLE_LSB)
+#define WLAN_LF_TIMER_CONTROL3_ENABLE_SET(x) (((x) << WLAN_LF_TIMER_CONTROL3_ENABLE_LSB) & WLAN_LF_TIMER_CONTROL3_ENABLE_MASK)
+#define WLAN_LF_TIMER_CONTROL3_AUTO_RESTART_MSB 1
+#define WLAN_LF_TIMER_CONTROL3_AUTO_RESTART_LSB 1
+#define WLAN_LF_TIMER_CONTROL3_AUTO_RESTART_MASK 0x00000002
+#define WLAN_LF_TIMER_CONTROL3_AUTO_RESTART_GET(x) (((x) & WLAN_LF_TIMER_CONTROL3_AUTO_RESTART_MASK) >> WLAN_LF_TIMER_CONTROL3_AUTO_RESTART_LSB)
+#define WLAN_LF_TIMER_CONTROL3_AUTO_RESTART_SET(x) (((x) << WLAN_LF_TIMER_CONTROL3_AUTO_RESTART_LSB) & WLAN_LF_TIMER_CONTROL3_AUTO_RESTART_MASK)
+#define WLAN_LF_TIMER_CONTROL3_RESET_MSB 0
+#define WLAN_LF_TIMER_CONTROL3_RESET_LSB 0
+#define WLAN_LF_TIMER_CONTROL3_RESET_MASK 0x00000001
+#define WLAN_LF_TIMER_CONTROL3_RESET_GET(x) (((x) & WLAN_LF_TIMER_CONTROL3_RESET_MASK) >> WLAN_LF_TIMER_CONTROL3_RESET_LSB)
+#define WLAN_LF_TIMER_CONTROL3_RESET_SET(x) (((x) << WLAN_LF_TIMER_CONTROL3_RESET_LSB) & WLAN_LF_TIMER_CONTROL3_RESET_MASK)
+
+#define WLAN_LF_TIMER_STATUS3_ADDRESS 0x00000084
+#define WLAN_LF_TIMER_STATUS3_OFFSET 0x00000084
+#define WLAN_LF_TIMER_STATUS3_INTERRUPT_MSB 0
+#define WLAN_LF_TIMER_STATUS3_INTERRUPT_LSB 0
+#define WLAN_LF_TIMER_STATUS3_INTERRUPT_MASK 0x00000001
+#define WLAN_LF_TIMER_STATUS3_INTERRUPT_GET(x) (((x) & WLAN_LF_TIMER_STATUS3_INTERRUPT_MASK) >> WLAN_LF_TIMER_STATUS3_INTERRUPT_LSB)
+#define WLAN_LF_TIMER_STATUS3_INTERRUPT_SET(x) (((x) << WLAN_LF_TIMER_STATUS3_INTERRUPT_LSB) & WLAN_LF_TIMER_STATUS3_INTERRUPT_MASK)
+
+#define WLAN_HF_TIMER_ADDRESS 0x00000088
+#define WLAN_HF_TIMER_OFFSET 0x00000088
+#define WLAN_HF_TIMER_TARGET_MSB 31
+#define WLAN_HF_TIMER_TARGET_LSB 12
+#define WLAN_HF_TIMER_TARGET_MASK 0xfffff000
+#define WLAN_HF_TIMER_TARGET_GET(x) (((x) & WLAN_HF_TIMER_TARGET_MASK) >> WLAN_HF_TIMER_TARGET_LSB)
+#define WLAN_HF_TIMER_TARGET_SET(x) (((x) << WLAN_HF_TIMER_TARGET_LSB) & WLAN_HF_TIMER_TARGET_MASK)
+
+#define WLAN_HF_TIMER_COUNT_ADDRESS 0x0000008c
+#define WLAN_HF_TIMER_COUNT_OFFSET 0x0000008c
+#define WLAN_HF_TIMER_COUNT_VALUE_MSB 31
+#define WLAN_HF_TIMER_COUNT_VALUE_LSB 12
+#define WLAN_HF_TIMER_COUNT_VALUE_MASK 0xfffff000
+#define WLAN_HF_TIMER_COUNT_VALUE_GET(x) (((x) & WLAN_HF_TIMER_COUNT_VALUE_MASK) >> WLAN_HF_TIMER_COUNT_VALUE_LSB)
+#define WLAN_HF_TIMER_COUNT_VALUE_SET(x) (((x) << WLAN_HF_TIMER_COUNT_VALUE_LSB) & WLAN_HF_TIMER_COUNT_VALUE_MASK)
+
+#define WLAN_HF_LF_COUNT_ADDRESS 0x00000090
+#define WLAN_HF_LF_COUNT_OFFSET 0x00000090
+#define WLAN_HF_LF_COUNT_VALUE_MSB 31
+#define WLAN_HF_LF_COUNT_VALUE_LSB 0
+#define WLAN_HF_LF_COUNT_VALUE_MASK 0xffffffff
+#define WLAN_HF_LF_COUNT_VALUE_GET(x) (((x) & WLAN_HF_LF_COUNT_VALUE_MASK) >> WLAN_HF_LF_COUNT_VALUE_LSB)
+#define WLAN_HF_LF_COUNT_VALUE_SET(x) (((x) << WLAN_HF_LF_COUNT_VALUE_LSB) & WLAN_HF_LF_COUNT_VALUE_MASK)
+
+#define WLAN_HF_TIMER_CONTROL_ADDRESS 0x00000094
+#define WLAN_HF_TIMER_CONTROL_OFFSET 0x00000094
+#define WLAN_HF_TIMER_CONTROL_ENABLE_MSB 3
+#define WLAN_HF_TIMER_CONTROL_ENABLE_LSB 3
+#define WLAN_HF_TIMER_CONTROL_ENABLE_MASK 0x00000008
+#define WLAN_HF_TIMER_CONTROL_ENABLE_GET(x) (((x) & WLAN_HF_TIMER_CONTROL_ENABLE_MASK) >> WLAN_HF_TIMER_CONTROL_ENABLE_LSB)
+#define WLAN_HF_TIMER_CONTROL_ENABLE_SET(x) (((x) << WLAN_HF_TIMER_CONTROL_ENABLE_LSB) & WLAN_HF_TIMER_CONTROL_ENABLE_MASK)
+#define WLAN_HF_TIMER_CONTROL_ON_MSB 2
+#define WLAN_HF_TIMER_CONTROL_ON_LSB 2
+#define WLAN_HF_TIMER_CONTROL_ON_MASK 0x00000004
+#define WLAN_HF_TIMER_CONTROL_ON_GET(x) (((x) & WLAN_HF_TIMER_CONTROL_ON_MASK) >> WLAN_HF_TIMER_CONTROL_ON_LSB)
+#define WLAN_HF_TIMER_CONTROL_ON_SET(x) (((x) << WLAN_HF_TIMER_CONTROL_ON_LSB) & WLAN_HF_TIMER_CONTROL_ON_MASK)
+#define WLAN_HF_TIMER_CONTROL_AUTO_RESTART_MSB 1
+#define WLAN_HF_TIMER_CONTROL_AUTO_RESTART_LSB 1
+#define WLAN_HF_TIMER_CONTROL_AUTO_RESTART_MASK 0x00000002
+#define WLAN_HF_TIMER_CONTROL_AUTO_RESTART_GET(x) (((x) & WLAN_HF_TIMER_CONTROL_AUTO_RESTART_MASK) >> WLAN_HF_TIMER_CONTROL_AUTO_RESTART_LSB)
+#define WLAN_HF_TIMER_CONTROL_AUTO_RESTART_SET(x) (((x) << WLAN_HF_TIMER_CONTROL_AUTO_RESTART_LSB) & WLAN_HF_TIMER_CONTROL_AUTO_RESTART_MASK)
+#define WLAN_HF_TIMER_CONTROL_RESET_MSB 0
+#define WLAN_HF_TIMER_CONTROL_RESET_LSB 0
+#define WLAN_HF_TIMER_CONTROL_RESET_MASK 0x00000001
+#define WLAN_HF_TIMER_CONTROL_RESET_GET(x) (((x) & WLAN_HF_TIMER_CONTROL_RESET_MASK) >> WLAN_HF_TIMER_CONTROL_RESET_LSB)
+#define WLAN_HF_TIMER_CONTROL_RESET_SET(x) (((x) << WLAN_HF_TIMER_CONTROL_RESET_LSB) & WLAN_HF_TIMER_CONTROL_RESET_MASK)
+
+#define WLAN_HF_TIMER_STATUS_ADDRESS 0x00000098
+#define WLAN_HF_TIMER_STATUS_OFFSET 0x00000098
+#define WLAN_HF_TIMER_STATUS_INTERRUPT_MSB 0
+#define WLAN_HF_TIMER_STATUS_INTERRUPT_LSB 0
+#define WLAN_HF_TIMER_STATUS_INTERRUPT_MASK 0x00000001
+#define WLAN_HF_TIMER_STATUS_INTERRUPT_GET(x) (((x) & WLAN_HF_TIMER_STATUS_INTERRUPT_MASK) >> WLAN_HF_TIMER_STATUS_INTERRUPT_LSB)
+#define WLAN_HF_TIMER_STATUS_INTERRUPT_SET(x) (((x) << WLAN_HF_TIMER_STATUS_INTERRUPT_LSB) & WLAN_HF_TIMER_STATUS_INTERRUPT_MASK)
+
+#define WLAN_RTC_CONTROL_ADDRESS 0x0000009c
+#define WLAN_RTC_CONTROL_OFFSET 0x0000009c
+#define WLAN_RTC_CONTROL_ENABLE_MSB 2
+#define WLAN_RTC_CONTROL_ENABLE_LSB 2
+#define WLAN_RTC_CONTROL_ENABLE_MASK 0x00000004
+#define WLAN_RTC_CONTROL_ENABLE_GET(x) (((x) & WLAN_RTC_CONTROL_ENABLE_MASK) >> WLAN_RTC_CONTROL_ENABLE_LSB)
+#define WLAN_RTC_CONTROL_ENABLE_SET(x) (((x) << WLAN_RTC_CONTROL_ENABLE_LSB) & WLAN_RTC_CONTROL_ENABLE_MASK)
+#define WLAN_RTC_CONTROL_LOAD_RTC_MSB 1
+#define WLAN_RTC_CONTROL_LOAD_RTC_LSB 1
+#define WLAN_RTC_CONTROL_LOAD_RTC_MASK 0x00000002
+#define WLAN_RTC_CONTROL_LOAD_RTC_GET(x) (((x) & WLAN_RTC_CONTROL_LOAD_RTC_MASK) >> WLAN_RTC_CONTROL_LOAD_RTC_LSB)
+#define WLAN_RTC_CONTROL_LOAD_RTC_SET(x) (((x) << WLAN_RTC_CONTROL_LOAD_RTC_LSB) & WLAN_RTC_CONTROL_LOAD_RTC_MASK)
+#define WLAN_RTC_CONTROL_LOAD_ALARM_MSB 0
+#define WLAN_RTC_CONTROL_LOAD_ALARM_LSB 0
+#define WLAN_RTC_CONTROL_LOAD_ALARM_MASK 0x00000001
+#define WLAN_RTC_CONTROL_LOAD_ALARM_GET(x) (((x) & WLAN_RTC_CONTROL_LOAD_ALARM_MASK) >> WLAN_RTC_CONTROL_LOAD_ALARM_LSB)
+#define WLAN_RTC_CONTROL_LOAD_ALARM_SET(x) (((x) << WLAN_RTC_CONTROL_LOAD_ALARM_LSB) & WLAN_RTC_CONTROL_LOAD_ALARM_MASK)
+
+#define WLAN_RTC_TIME_ADDRESS 0x000000a0
+#define WLAN_RTC_TIME_OFFSET 0x000000a0
+#define WLAN_RTC_TIME_WEEK_DAY_MSB 26
+#define WLAN_RTC_TIME_WEEK_DAY_LSB 24
+#define WLAN_RTC_TIME_WEEK_DAY_MASK 0x07000000
+#define WLAN_RTC_TIME_WEEK_DAY_GET(x) (((x) & WLAN_RTC_TIME_WEEK_DAY_MASK) >> WLAN_RTC_TIME_WEEK_DAY_LSB)
+#define WLAN_RTC_TIME_WEEK_DAY_SET(x) (((x) << WLAN_RTC_TIME_WEEK_DAY_LSB) & WLAN_RTC_TIME_WEEK_DAY_MASK)
+#define WLAN_RTC_TIME_HOUR_MSB 21
+#define WLAN_RTC_TIME_HOUR_LSB 16
+#define WLAN_RTC_TIME_HOUR_MASK 0x003f0000
+#define WLAN_RTC_TIME_HOUR_GET(x) (((x) & WLAN_RTC_TIME_HOUR_MASK) >> WLAN_RTC_TIME_HOUR_LSB)
+#define WLAN_RTC_TIME_HOUR_SET(x) (((x) << WLAN_RTC_TIME_HOUR_LSB) & WLAN_RTC_TIME_HOUR_MASK)
+#define WLAN_RTC_TIME_MINUTE_MSB 14
+#define WLAN_RTC_TIME_MINUTE_LSB 8
+#define WLAN_RTC_TIME_MINUTE_MASK 0x00007f00
+#define WLAN_RTC_TIME_MINUTE_GET(x) (((x) & WLAN_RTC_TIME_MINUTE_MASK) >> WLAN_RTC_TIME_MINUTE_LSB)
+#define WLAN_RTC_TIME_MINUTE_SET(x) (((x) << WLAN_RTC_TIME_MINUTE_LSB) & WLAN_RTC_TIME_MINUTE_MASK)
+#define WLAN_RTC_TIME_SECOND_MSB 6
+#define WLAN_RTC_TIME_SECOND_LSB 0
+#define WLAN_RTC_TIME_SECOND_MASK 0x0000007f
+#define WLAN_RTC_TIME_SECOND_GET(x) (((x) & WLAN_RTC_TIME_SECOND_MASK) >> WLAN_RTC_TIME_SECOND_LSB)
+#define WLAN_RTC_TIME_SECOND_SET(x) (((x) << WLAN_RTC_TIME_SECOND_LSB) & WLAN_RTC_TIME_SECOND_MASK)
+
+#define WLAN_RTC_DATE_ADDRESS 0x000000a4
+#define WLAN_RTC_DATE_OFFSET 0x000000a4
+#define WLAN_RTC_DATE_YEAR_MSB 23
+#define WLAN_RTC_DATE_YEAR_LSB 16
+#define WLAN_RTC_DATE_YEAR_MASK 0x00ff0000
+#define WLAN_RTC_DATE_YEAR_GET(x) (((x) & WLAN_RTC_DATE_YEAR_MASK) >> WLAN_RTC_DATE_YEAR_LSB)
+#define WLAN_RTC_DATE_YEAR_SET(x) (((x) << WLAN_RTC_DATE_YEAR_LSB) & WLAN_RTC_DATE_YEAR_MASK)
+#define WLAN_RTC_DATE_MONTH_MSB 12
+#define WLAN_RTC_DATE_MONTH_LSB 8
+#define WLAN_RTC_DATE_MONTH_MASK 0x00001f00
+#define WLAN_RTC_DATE_MONTH_GET(x) (((x) & WLAN_RTC_DATE_MONTH_MASK) >> WLAN_RTC_DATE_MONTH_LSB)
+#define WLAN_RTC_DATE_MONTH_SET(x) (((x) << WLAN_RTC_DATE_MONTH_LSB) & WLAN_RTC_DATE_MONTH_MASK)
+#define WLAN_RTC_DATE_MONTH_DAY_MSB 5
+#define WLAN_RTC_DATE_MONTH_DAY_LSB 0
+#define WLAN_RTC_DATE_MONTH_DAY_MASK 0x0000003f
+#define WLAN_RTC_DATE_MONTH_DAY_GET(x) (((x) & WLAN_RTC_DATE_MONTH_DAY_MASK) >> WLAN_RTC_DATE_MONTH_DAY_LSB)
+#define WLAN_RTC_DATE_MONTH_DAY_SET(x) (((x) << WLAN_RTC_DATE_MONTH_DAY_LSB) & WLAN_RTC_DATE_MONTH_DAY_MASK)
+
+#define WLAN_RTC_SET_TIME_ADDRESS 0x000000a8
+#define WLAN_RTC_SET_TIME_OFFSET 0x000000a8
+#define WLAN_RTC_SET_TIME_WEEK_DAY_MSB 26
+#define WLAN_RTC_SET_TIME_WEEK_DAY_LSB 24
+#define WLAN_RTC_SET_TIME_WEEK_DAY_MASK 0x07000000
+#define WLAN_RTC_SET_TIME_WEEK_DAY_GET(x) (((x) & WLAN_RTC_SET_TIME_WEEK_DAY_MASK) >> WLAN_RTC_SET_TIME_WEEK_DAY_LSB)
+#define WLAN_RTC_SET_TIME_WEEK_DAY_SET(x) (((x) << WLAN_RTC_SET_TIME_WEEK_DAY_LSB) & WLAN_RTC_SET_TIME_WEEK_DAY_MASK)
+#define WLAN_RTC_SET_TIME_HOUR_MSB 21
+#define WLAN_RTC_SET_TIME_HOUR_LSB 16
+#define WLAN_RTC_SET_TIME_HOUR_MASK 0x003f0000
+#define WLAN_RTC_SET_TIME_HOUR_GET(x) (((x) & WLAN_RTC_SET_TIME_HOUR_MASK) >> WLAN_RTC_SET_TIME_HOUR_LSB)
+#define WLAN_RTC_SET_TIME_HOUR_SET(x) (((x) << WLAN_RTC_SET_TIME_HOUR_LSB) & WLAN_RTC_SET_TIME_HOUR_MASK)
+#define WLAN_RTC_SET_TIME_MINUTE_MSB 14
+#define WLAN_RTC_SET_TIME_MINUTE_LSB 8
+#define WLAN_RTC_SET_TIME_MINUTE_MASK 0x00007f00
+#define WLAN_RTC_SET_TIME_MINUTE_GET(x) (((x) & WLAN_RTC_SET_TIME_MINUTE_MASK) >> WLAN_RTC_SET_TIME_MINUTE_LSB)
+#define WLAN_RTC_SET_TIME_MINUTE_SET(x) (((x) << WLAN_RTC_SET_TIME_MINUTE_LSB) & WLAN_RTC_SET_TIME_MINUTE_MASK)
+#define WLAN_RTC_SET_TIME_SECOND_MSB 6
+#define WLAN_RTC_SET_TIME_SECOND_LSB 0
+#define WLAN_RTC_SET_TIME_SECOND_MASK 0x0000007f
+#define WLAN_RTC_SET_TIME_SECOND_GET(x) (((x) & WLAN_RTC_SET_TIME_SECOND_MASK) >> WLAN_RTC_SET_TIME_SECOND_LSB)
+#define WLAN_RTC_SET_TIME_SECOND_SET(x) (((x) << WLAN_RTC_SET_TIME_SECOND_LSB) & WLAN_RTC_SET_TIME_SECOND_MASK)
+
+#define WLAN_RTC_SET_DATE_ADDRESS 0x000000ac
+#define WLAN_RTC_SET_DATE_OFFSET 0x000000ac
+#define WLAN_RTC_SET_DATE_YEAR_MSB 23
+#define WLAN_RTC_SET_DATE_YEAR_LSB 16
+#define WLAN_RTC_SET_DATE_YEAR_MASK 0x00ff0000
+#define WLAN_RTC_SET_DATE_YEAR_GET(x) (((x) & WLAN_RTC_SET_DATE_YEAR_MASK) >> WLAN_RTC_SET_DATE_YEAR_LSB)
+#define WLAN_RTC_SET_DATE_YEAR_SET(x) (((x) << WLAN_RTC_SET_DATE_YEAR_LSB) & WLAN_RTC_SET_DATE_YEAR_MASK)
+#define WLAN_RTC_SET_DATE_MONTH_MSB 12
+#define WLAN_RTC_SET_DATE_MONTH_LSB 8
+#define WLAN_RTC_SET_DATE_MONTH_MASK 0x00001f00
+#define WLAN_RTC_SET_DATE_MONTH_GET(x) (((x) & WLAN_RTC_SET_DATE_MONTH_MASK) >> WLAN_RTC_SET_DATE_MONTH_LSB)
+#define WLAN_RTC_SET_DATE_MONTH_SET(x) (((x) << WLAN_RTC_SET_DATE_MONTH_LSB) & WLAN_RTC_SET_DATE_MONTH_MASK)
+#define WLAN_RTC_SET_DATE_MONTH_DAY_MSB 5
+#define WLAN_RTC_SET_DATE_MONTH_DAY_LSB 0
+#define WLAN_RTC_SET_DATE_MONTH_DAY_MASK 0x0000003f
+#define WLAN_RTC_SET_DATE_MONTH_DAY_GET(x) (((x) & WLAN_RTC_SET_DATE_MONTH_DAY_MASK) >> WLAN_RTC_SET_DATE_MONTH_DAY_LSB)
+#define WLAN_RTC_SET_DATE_MONTH_DAY_SET(x) (((x) << WLAN_RTC_SET_DATE_MONTH_DAY_LSB) & WLAN_RTC_SET_DATE_MONTH_DAY_MASK)
+
+#define WLAN_RTC_SET_ALARM_ADDRESS 0x000000b0
+#define WLAN_RTC_SET_ALARM_OFFSET 0x000000b0
+#define WLAN_RTC_SET_ALARM_HOUR_MSB 21
+#define WLAN_RTC_SET_ALARM_HOUR_LSB 16
+#define WLAN_RTC_SET_ALARM_HOUR_MASK 0x003f0000
+#define WLAN_RTC_SET_ALARM_HOUR_GET(x) (((x) & WLAN_RTC_SET_ALARM_HOUR_MASK) >> WLAN_RTC_SET_ALARM_HOUR_LSB)
+#define WLAN_RTC_SET_ALARM_HOUR_SET(x) (((x) << WLAN_RTC_SET_ALARM_HOUR_LSB) & WLAN_RTC_SET_ALARM_HOUR_MASK)
+#define WLAN_RTC_SET_ALARM_MINUTE_MSB 14
+#define WLAN_RTC_SET_ALARM_MINUTE_LSB 8
+#define WLAN_RTC_SET_ALARM_MINUTE_MASK 0x00007f00
+#define WLAN_RTC_SET_ALARM_MINUTE_GET(x) (((x) & WLAN_RTC_SET_ALARM_MINUTE_MASK) >> WLAN_RTC_SET_ALARM_MINUTE_LSB)
+#define WLAN_RTC_SET_ALARM_MINUTE_SET(x) (((x) << WLAN_RTC_SET_ALARM_MINUTE_LSB) & WLAN_RTC_SET_ALARM_MINUTE_MASK)
+#define WLAN_RTC_SET_ALARM_SECOND_MSB 6
+#define WLAN_RTC_SET_ALARM_SECOND_LSB 0
+#define WLAN_RTC_SET_ALARM_SECOND_MASK 0x0000007f
+#define WLAN_RTC_SET_ALARM_SECOND_GET(x) (((x) & WLAN_RTC_SET_ALARM_SECOND_MASK) >> WLAN_RTC_SET_ALARM_SECOND_LSB)
+#define WLAN_RTC_SET_ALARM_SECOND_SET(x) (((x) << WLAN_RTC_SET_ALARM_SECOND_LSB) & WLAN_RTC_SET_ALARM_SECOND_MASK)
+
+#define WLAN_RTC_CONFIG_ADDRESS 0x000000b4
+#define WLAN_RTC_CONFIG_OFFSET 0x000000b4
+#define WLAN_RTC_CONFIG_BCD_MSB 2
+#define WLAN_RTC_CONFIG_BCD_LSB 2
+#define WLAN_RTC_CONFIG_BCD_MASK 0x00000004
+#define WLAN_RTC_CONFIG_BCD_GET(x) (((x) & WLAN_RTC_CONFIG_BCD_MASK) >> WLAN_RTC_CONFIG_BCD_LSB)
+#define WLAN_RTC_CONFIG_BCD_SET(x) (((x) << WLAN_RTC_CONFIG_BCD_LSB) & WLAN_RTC_CONFIG_BCD_MASK)
+#define WLAN_RTC_CONFIG_TWELVE_HOUR_MSB 1
+#define WLAN_RTC_CONFIG_TWELVE_HOUR_LSB 1
+#define WLAN_RTC_CONFIG_TWELVE_HOUR_MASK 0x00000002
+#define WLAN_RTC_CONFIG_TWELVE_HOUR_GET(x) (((x) & WLAN_RTC_CONFIG_TWELVE_HOUR_MASK) >> WLAN_RTC_CONFIG_TWELVE_HOUR_LSB)
+#define WLAN_RTC_CONFIG_TWELVE_HOUR_SET(x) (((x) << WLAN_RTC_CONFIG_TWELVE_HOUR_LSB) & WLAN_RTC_CONFIG_TWELVE_HOUR_MASK)
+#define WLAN_RTC_CONFIG_DSE_MSB 0
+#define WLAN_RTC_CONFIG_DSE_LSB 0
+#define WLAN_RTC_CONFIG_DSE_MASK 0x00000001
+#define WLAN_RTC_CONFIG_DSE_GET(x) (((x) & WLAN_RTC_CONFIG_DSE_MASK) >> WLAN_RTC_CONFIG_DSE_LSB)
+#define WLAN_RTC_CONFIG_DSE_SET(x) (((x) << WLAN_RTC_CONFIG_DSE_LSB) & WLAN_RTC_CONFIG_DSE_MASK)
+
+#define WLAN_RTC_ALARM_STATUS_ADDRESS 0x000000b8
+#define WLAN_RTC_ALARM_STATUS_OFFSET 0x000000b8
+#define WLAN_RTC_ALARM_STATUS_ENABLE_MSB 1
+#define WLAN_RTC_ALARM_STATUS_ENABLE_LSB 1
+#define WLAN_RTC_ALARM_STATUS_ENABLE_MASK 0x00000002
+#define WLAN_RTC_ALARM_STATUS_ENABLE_GET(x) (((x) & WLAN_RTC_ALARM_STATUS_ENABLE_MASK) >> WLAN_RTC_ALARM_STATUS_ENABLE_LSB)
+#define WLAN_RTC_ALARM_STATUS_ENABLE_SET(x) (((x) << WLAN_RTC_ALARM_STATUS_ENABLE_LSB) & WLAN_RTC_ALARM_STATUS_ENABLE_MASK)
+#define WLAN_RTC_ALARM_STATUS_INTERRUPT_MSB 0
+#define WLAN_RTC_ALARM_STATUS_INTERRUPT_LSB 0
+#define WLAN_RTC_ALARM_STATUS_INTERRUPT_MASK 0x00000001
+#define WLAN_RTC_ALARM_STATUS_INTERRUPT_GET(x) (((x) & WLAN_RTC_ALARM_STATUS_INTERRUPT_MASK) >> WLAN_RTC_ALARM_STATUS_INTERRUPT_LSB)
+#define WLAN_RTC_ALARM_STATUS_INTERRUPT_SET(x) (((x) << WLAN_RTC_ALARM_STATUS_INTERRUPT_LSB) & WLAN_RTC_ALARM_STATUS_INTERRUPT_MASK)
+
+#define WLAN_UART_WAKEUP_ADDRESS 0x000000bc
+#define WLAN_UART_WAKEUP_OFFSET 0x000000bc
+#define WLAN_UART_WAKEUP_ENABLE_MSB 0
+#define WLAN_UART_WAKEUP_ENABLE_LSB 0
+#define WLAN_UART_WAKEUP_ENABLE_MASK 0x00000001
+#define WLAN_UART_WAKEUP_ENABLE_GET(x) (((x) & WLAN_UART_WAKEUP_ENABLE_MASK) >> WLAN_UART_WAKEUP_ENABLE_LSB)
+#define WLAN_UART_WAKEUP_ENABLE_SET(x) (((x) << WLAN_UART_WAKEUP_ENABLE_LSB) & WLAN_UART_WAKEUP_ENABLE_MASK)
+
+#define WLAN_RESET_CAUSE_ADDRESS 0x000000c0
+#define WLAN_RESET_CAUSE_OFFSET 0x000000c0
+#define WLAN_RESET_CAUSE_LAST_MSB 2
+#define WLAN_RESET_CAUSE_LAST_LSB 0
+#define WLAN_RESET_CAUSE_LAST_MASK 0x00000007
+#define WLAN_RESET_CAUSE_LAST_GET(x) (((x) & WLAN_RESET_CAUSE_LAST_MASK) >> WLAN_RESET_CAUSE_LAST_LSB)
+#define WLAN_RESET_CAUSE_LAST_SET(x) (((x) << WLAN_RESET_CAUSE_LAST_LSB) & WLAN_RESET_CAUSE_LAST_MASK)
+
+#define WLAN_SYSTEM_SLEEP_ADDRESS 0x000000c4
+#define WLAN_SYSTEM_SLEEP_OFFSET 0x000000c4
+#define WLAN_SYSTEM_SLEEP_HOST_IF_MSB 4
+#define WLAN_SYSTEM_SLEEP_HOST_IF_LSB 4
+#define WLAN_SYSTEM_SLEEP_HOST_IF_MASK 0x00000010
+#define WLAN_SYSTEM_SLEEP_HOST_IF_GET(x) (((x) & WLAN_SYSTEM_SLEEP_HOST_IF_MASK) >> WLAN_SYSTEM_SLEEP_HOST_IF_LSB)
+#define WLAN_SYSTEM_SLEEP_HOST_IF_SET(x) (((x) << WLAN_SYSTEM_SLEEP_HOST_IF_LSB) & WLAN_SYSTEM_SLEEP_HOST_IF_MASK)
+#define WLAN_SYSTEM_SLEEP_MBOX_MSB 3
+#define WLAN_SYSTEM_SLEEP_MBOX_LSB 3
+#define WLAN_SYSTEM_SLEEP_MBOX_MASK 0x00000008
+#define WLAN_SYSTEM_SLEEP_MBOX_GET(x) (((x) & WLAN_SYSTEM_SLEEP_MBOX_MASK) >> WLAN_SYSTEM_SLEEP_MBOX_LSB)
+#define WLAN_SYSTEM_SLEEP_MBOX_SET(x) (((x) << WLAN_SYSTEM_SLEEP_MBOX_LSB) & WLAN_SYSTEM_SLEEP_MBOX_MASK)
+#define WLAN_SYSTEM_SLEEP_MAC_IF_MSB 2
+#define WLAN_SYSTEM_SLEEP_MAC_IF_LSB 2
+#define WLAN_SYSTEM_SLEEP_MAC_IF_MASK 0x00000004
+#define WLAN_SYSTEM_SLEEP_MAC_IF_GET(x) (((x) & WLAN_SYSTEM_SLEEP_MAC_IF_MASK) >> WLAN_SYSTEM_SLEEP_MAC_IF_LSB)
+#define WLAN_SYSTEM_SLEEP_MAC_IF_SET(x) (((x) << WLAN_SYSTEM_SLEEP_MAC_IF_LSB) & WLAN_SYSTEM_SLEEP_MAC_IF_MASK)
+#define WLAN_SYSTEM_SLEEP_LIGHT_MSB 1
+#define WLAN_SYSTEM_SLEEP_LIGHT_LSB 1
+#define WLAN_SYSTEM_SLEEP_LIGHT_MASK 0x00000002
+#define WLAN_SYSTEM_SLEEP_LIGHT_GET(x) (((x) & WLAN_SYSTEM_SLEEP_LIGHT_MASK) >> WLAN_SYSTEM_SLEEP_LIGHT_LSB)
+#define WLAN_SYSTEM_SLEEP_LIGHT_SET(x) (((x) << WLAN_SYSTEM_SLEEP_LIGHT_LSB) & WLAN_SYSTEM_SLEEP_LIGHT_MASK)
+#define WLAN_SYSTEM_SLEEP_DISABLE_MSB 0
+#define WLAN_SYSTEM_SLEEP_DISABLE_LSB 0
+#define WLAN_SYSTEM_SLEEP_DISABLE_MASK 0x00000001
+#define WLAN_SYSTEM_SLEEP_DISABLE_GET(x) (((x) & WLAN_SYSTEM_SLEEP_DISABLE_MASK) >> WLAN_SYSTEM_SLEEP_DISABLE_LSB)
+#define WLAN_SYSTEM_SLEEP_DISABLE_SET(x) (((x) << WLAN_SYSTEM_SLEEP_DISABLE_LSB) & WLAN_SYSTEM_SLEEP_DISABLE_MASK)
+
+#define WLAN_SDIO_WRAPPER_ADDRESS 0x000000c8
+#define WLAN_SDIO_WRAPPER_OFFSET 0x000000c8
+#define WLAN_SDIO_WRAPPER_SLEEP_MSB 3
+#define WLAN_SDIO_WRAPPER_SLEEP_LSB 3
+#define WLAN_SDIO_WRAPPER_SLEEP_MASK 0x00000008
+#define WLAN_SDIO_WRAPPER_SLEEP_GET(x) (((x) & WLAN_SDIO_WRAPPER_SLEEP_MASK) >> WLAN_SDIO_WRAPPER_SLEEP_LSB)
+#define WLAN_SDIO_WRAPPER_SLEEP_SET(x) (((x) << WLAN_SDIO_WRAPPER_SLEEP_LSB) & WLAN_SDIO_WRAPPER_SLEEP_MASK)
+#define WLAN_SDIO_WRAPPER_WAKEUP_MSB 2
+#define WLAN_SDIO_WRAPPER_WAKEUP_LSB 2
+#define WLAN_SDIO_WRAPPER_WAKEUP_MASK 0x00000004
+#define WLAN_SDIO_WRAPPER_WAKEUP_GET(x) (((x) & WLAN_SDIO_WRAPPER_WAKEUP_MASK) >> WLAN_SDIO_WRAPPER_WAKEUP_LSB)
+#define WLAN_SDIO_WRAPPER_WAKEUP_SET(x) (((x) << WLAN_SDIO_WRAPPER_WAKEUP_LSB) & WLAN_SDIO_WRAPPER_WAKEUP_MASK)
+#define WLAN_SDIO_WRAPPER_SOC_ON_MSB 1
+#define WLAN_SDIO_WRAPPER_SOC_ON_LSB 1
+#define WLAN_SDIO_WRAPPER_SOC_ON_MASK 0x00000002
+#define WLAN_SDIO_WRAPPER_SOC_ON_GET(x) (((x) & WLAN_SDIO_WRAPPER_SOC_ON_MASK) >> WLAN_SDIO_WRAPPER_SOC_ON_LSB)
+#define WLAN_SDIO_WRAPPER_SOC_ON_SET(x) (((x) << WLAN_SDIO_WRAPPER_SOC_ON_LSB) & WLAN_SDIO_WRAPPER_SOC_ON_MASK)
+#define WLAN_SDIO_WRAPPER_ON_MSB 0
+#define WLAN_SDIO_WRAPPER_ON_LSB 0
+#define WLAN_SDIO_WRAPPER_ON_MASK 0x00000001
+#define WLAN_SDIO_WRAPPER_ON_GET(x) (((x) & WLAN_SDIO_WRAPPER_ON_MASK) >> WLAN_SDIO_WRAPPER_ON_LSB)
+#define WLAN_SDIO_WRAPPER_ON_SET(x) (((x) << WLAN_SDIO_WRAPPER_ON_LSB) & WLAN_SDIO_WRAPPER_ON_MASK)
+
+#define WLAN_MAC_SLEEP_CONTROL_ADDRESS 0x000000cc
+#define WLAN_MAC_SLEEP_CONTROL_OFFSET 0x000000cc
+#define WLAN_MAC_SLEEP_CONTROL_ENABLE_MSB 1
+#define WLAN_MAC_SLEEP_CONTROL_ENABLE_LSB 0
+#define WLAN_MAC_SLEEP_CONTROL_ENABLE_MASK 0x00000003
+#define WLAN_MAC_SLEEP_CONTROL_ENABLE_GET(x) (((x) & WLAN_MAC_SLEEP_CONTROL_ENABLE_MASK) >> WLAN_MAC_SLEEP_CONTROL_ENABLE_LSB)
+#define WLAN_MAC_SLEEP_CONTROL_ENABLE_SET(x) (((x) << WLAN_MAC_SLEEP_CONTROL_ENABLE_LSB) & WLAN_MAC_SLEEP_CONTROL_ENABLE_MASK)
+
+#define WLAN_KEEP_AWAKE_ADDRESS 0x000000d0
+#define WLAN_KEEP_AWAKE_OFFSET 0x000000d0
+#define WLAN_KEEP_AWAKE_COUNT_MSB 7
+#define WLAN_KEEP_AWAKE_COUNT_LSB 0
+#define WLAN_KEEP_AWAKE_COUNT_MASK 0x000000ff
+#define WLAN_KEEP_AWAKE_COUNT_GET(x) (((x) & WLAN_KEEP_AWAKE_COUNT_MASK) >> WLAN_KEEP_AWAKE_COUNT_LSB)
+#define WLAN_KEEP_AWAKE_COUNT_SET(x) (((x) << WLAN_KEEP_AWAKE_COUNT_LSB) & WLAN_KEEP_AWAKE_COUNT_MASK)
+
+#define WLAN_LPO_CAL_TIME_ADDRESS 0x000000d4
+#define WLAN_LPO_CAL_TIME_OFFSET 0x000000d4
+#define WLAN_LPO_CAL_TIME_LENGTH_MSB 13
+#define WLAN_LPO_CAL_TIME_LENGTH_LSB 0
+#define WLAN_LPO_CAL_TIME_LENGTH_MASK 0x00003fff
+#define WLAN_LPO_CAL_TIME_LENGTH_GET(x) (((x) & WLAN_LPO_CAL_TIME_LENGTH_MASK) >> WLAN_LPO_CAL_TIME_LENGTH_LSB)
+#define WLAN_LPO_CAL_TIME_LENGTH_SET(x) (((x) << WLAN_LPO_CAL_TIME_LENGTH_LSB) & WLAN_LPO_CAL_TIME_LENGTH_MASK)
+
+#define WLAN_LPO_INIT_DIVIDEND_INT_ADDRESS 0x000000d8
+#define WLAN_LPO_INIT_DIVIDEND_INT_OFFSET 0x000000d8
+#define WLAN_LPO_INIT_DIVIDEND_INT_VALUE_MSB 23
+#define WLAN_LPO_INIT_DIVIDEND_INT_VALUE_LSB 0
+#define WLAN_LPO_INIT_DIVIDEND_INT_VALUE_MASK 0x00ffffff
+#define WLAN_LPO_INIT_DIVIDEND_INT_VALUE_GET(x) (((x) & WLAN_LPO_INIT_DIVIDEND_INT_VALUE_MASK) >> WLAN_LPO_INIT_DIVIDEND_INT_VALUE_LSB)
+#define WLAN_LPO_INIT_DIVIDEND_INT_VALUE_SET(x) (((x) << WLAN_LPO_INIT_DIVIDEND_INT_VALUE_LSB) & WLAN_LPO_INIT_DIVIDEND_INT_VALUE_MASK)
+
+#define WLAN_LPO_INIT_DIVIDEND_FRACTION_ADDRESS 0x000000dc
+#define WLAN_LPO_INIT_DIVIDEND_FRACTION_OFFSET 0x000000dc
+#define WLAN_LPO_INIT_DIVIDEND_FRACTION_VALUE_MSB 10
+#define WLAN_LPO_INIT_DIVIDEND_FRACTION_VALUE_LSB 0
+#define WLAN_LPO_INIT_DIVIDEND_FRACTION_VALUE_MASK 0x000007ff
+#define WLAN_LPO_INIT_DIVIDEND_FRACTION_VALUE_GET(x) (((x) & WLAN_LPO_INIT_DIVIDEND_FRACTION_VALUE_MASK) >> WLAN_LPO_INIT_DIVIDEND_FRACTION_VALUE_LSB)
+#define WLAN_LPO_INIT_DIVIDEND_FRACTION_VALUE_SET(x) (((x) << WLAN_LPO_INIT_DIVIDEND_FRACTION_VALUE_LSB) & WLAN_LPO_INIT_DIVIDEND_FRACTION_VALUE_MASK)
+
+#define WLAN_LPO_CAL_ADDRESS 0x000000e0
+#define WLAN_LPO_CAL_OFFSET 0x000000e0
+#define WLAN_LPO_CAL_ENABLE_MSB 20
+#define WLAN_LPO_CAL_ENABLE_LSB 20
+#define WLAN_LPO_CAL_ENABLE_MASK 0x00100000
+#define WLAN_LPO_CAL_ENABLE_GET(x) (((x) & WLAN_LPO_CAL_ENABLE_MASK) >> WLAN_LPO_CAL_ENABLE_LSB)
+#define WLAN_LPO_CAL_ENABLE_SET(x) (((x) << WLAN_LPO_CAL_ENABLE_LSB) & WLAN_LPO_CAL_ENABLE_MASK)
+#define WLAN_LPO_CAL_COUNT_MSB 19
+#define WLAN_LPO_CAL_COUNT_LSB 0
+#define WLAN_LPO_CAL_COUNT_MASK 0x000fffff
+#define WLAN_LPO_CAL_COUNT_GET(x) (((x) & WLAN_LPO_CAL_COUNT_MASK) >> WLAN_LPO_CAL_COUNT_LSB)
+#define WLAN_LPO_CAL_COUNT_SET(x) (((x) << WLAN_LPO_CAL_COUNT_LSB) & WLAN_LPO_CAL_COUNT_MASK)
+
+#define WLAN_LPO_CAL_TEST_CONTROL_ADDRESS 0x000000e4
+#define WLAN_LPO_CAL_TEST_CONTROL_OFFSET 0x000000e4
+#define WLAN_LPO_CAL_TEST_CONTROL_ENABLE_MSB 5
+#define WLAN_LPO_CAL_TEST_CONTROL_ENABLE_LSB 5
+#define WLAN_LPO_CAL_TEST_CONTROL_ENABLE_MASK 0x00000020
+#define WLAN_LPO_CAL_TEST_CONTROL_ENABLE_GET(x) (((x) & WLAN_LPO_CAL_TEST_CONTROL_ENABLE_MASK) >> WLAN_LPO_CAL_TEST_CONTROL_ENABLE_LSB)
+#define WLAN_LPO_CAL_TEST_CONTROL_ENABLE_SET(x) (((x) << WLAN_LPO_CAL_TEST_CONTROL_ENABLE_LSB) & WLAN_LPO_CAL_TEST_CONTROL_ENABLE_MASK)
+#define WLAN_LPO_CAL_TEST_CONTROL_RTC_CYCLES_MSB 4
+#define WLAN_LPO_CAL_TEST_CONTROL_RTC_CYCLES_LSB 0
+#define WLAN_LPO_CAL_TEST_CONTROL_RTC_CYCLES_MASK 0x0000001f
+#define WLAN_LPO_CAL_TEST_CONTROL_RTC_CYCLES_GET(x) (((x) & WLAN_LPO_CAL_TEST_CONTROL_RTC_CYCLES_MASK) >> WLAN_LPO_CAL_TEST_CONTROL_RTC_CYCLES_LSB)
+#define WLAN_LPO_CAL_TEST_CONTROL_RTC_CYCLES_SET(x) (((x) << WLAN_LPO_CAL_TEST_CONTROL_RTC_CYCLES_LSB) & WLAN_LPO_CAL_TEST_CONTROL_RTC_CYCLES_MASK)
+
+#define WLAN_LPO_CAL_TEST_STATUS_ADDRESS 0x000000e8
+#define WLAN_LPO_CAL_TEST_STATUS_OFFSET 0x000000e8
+#define WLAN_LPO_CAL_TEST_STATUS_READY_MSB 16
+#define WLAN_LPO_CAL_TEST_STATUS_READY_LSB 16
+#define WLAN_LPO_CAL_TEST_STATUS_READY_MASK 0x00010000
+#define WLAN_LPO_CAL_TEST_STATUS_READY_GET(x) (((x) & WLAN_LPO_CAL_TEST_STATUS_READY_MASK) >> WLAN_LPO_CAL_TEST_STATUS_READY_LSB)
+#define WLAN_LPO_CAL_TEST_STATUS_READY_SET(x) (((x) << WLAN_LPO_CAL_TEST_STATUS_READY_LSB) & WLAN_LPO_CAL_TEST_STATUS_READY_MASK)
+#define WLAN_LPO_CAL_TEST_STATUS_COUNT_MSB 15
+#define WLAN_LPO_CAL_TEST_STATUS_COUNT_LSB 0
+#define WLAN_LPO_CAL_TEST_STATUS_COUNT_MASK 0x0000ffff
+#define WLAN_LPO_CAL_TEST_STATUS_COUNT_GET(x) (((x) & WLAN_LPO_CAL_TEST_STATUS_COUNT_MASK) >> WLAN_LPO_CAL_TEST_STATUS_COUNT_LSB)
+#define WLAN_LPO_CAL_TEST_STATUS_COUNT_SET(x) (((x) << WLAN_LPO_CAL_TEST_STATUS_COUNT_LSB) & WLAN_LPO_CAL_TEST_STATUS_COUNT_MASK)
+
+#define WLAN_CHIP_ID_ADDRESS 0x000000ec
+#define WLAN_CHIP_ID_OFFSET 0x000000ec
+#define WLAN_CHIP_ID_DEVICE_ID_MSB 31
+#define WLAN_CHIP_ID_DEVICE_ID_LSB 16
+#define WLAN_CHIP_ID_DEVICE_ID_MASK 0xffff0000
+#define WLAN_CHIP_ID_DEVICE_ID_GET(x) (((x) & WLAN_CHIP_ID_DEVICE_ID_MASK) >> WLAN_CHIP_ID_DEVICE_ID_LSB)
+#define WLAN_CHIP_ID_DEVICE_ID_SET(x) (((x) << WLAN_CHIP_ID_DEVICE_ID_LSB) & WLAN_CHIP_ID_DEVICE_ID_MASK)
+#define WLAN_CHIP_ID_CONFIG_ID_MSB 15
+#define WLAN_CHIP_ID_CONFIG_ID_LSB 4
+#define WLAN_CHIP_ID_CONFIG_ID_MASK 0x0000fff0
+#define WLAN_CHIP_ID_CONFIG_ID_GET(x) (((x) & WLAN_CHIP_ID_CONFIG_ID_MASK) >> WLAN_CHIP_ID_CONFIG_ID_LSB)
+#define WLAN_CHIP_ID_CONFIG_ID_SET(x) (((x) << WLAN_CHIP_ID_CONFIG_ID_LSB) & WLAN_CHIP_ID_CONFIG_ID_MASK)
+#define WLAN_CHIP_ID_VERSION_ID_MSB 3
+#define WLAN_CHIP_ID_VERSION_ID_LSB 0
+#define WLAN_CHIP_ID_VERSION_ID_MASK 0x0000000f
+#define WLAN_CHIP_ID_VERSION_ID_GET(x) (((x) & WLAN_CHIP_ID_VERSION_ID_MASK) >> WLAN_CHIP_ID_VERSION_ID_LSB)
+#define WLAN_CHIP_ID_VERSION_ID_SET(x) (((x) << WLAN_CHIP_ID_VERSION_ID_LSB) & WLAN_CHIP_ID_VERSION_ID_MASK)
+
+#define WLAN_DERIVED_RTC_CLK_ADDRESS 0x000000f0
+#define WLAN_DERIVED_RTC_CLK_OFFSET 0x000000f0
+#define WLAN_DERIVED_RTC_CLK_EXTERNAL_DETECT_EN_MSB 20
+#define WLAN_DERIVED_RTC_CLK_EXTERNAL_DETECT_EN_LSB 20
+#define WLAN_DERIVED_RTC_CLK_EXTERNAL_DETECT_EN_MASK 0x00100000
+#define WLAN_DERIVED_RTC_CLK_EXTERNAL_DETECT_EN_GET(x) (((x) & WLAN_DERIVED_RTC_CLK_EXTERNAL_DETECT_EN_MASK) >> WLAN_DERIVED_RTC_CLK_EXTERNAL_DETECT_EN_LSB)
+#define WLAN_DERIVED_RTC_CLK_EXTERNAL_DETECT_EN_SET(x) (((x) << WLAN_DERIVED_RTC_CLK_EXTERNAL_DETECT_EN_LSB) & WLAN_DERIVED_RTC_CLK_EXTERNAL_DETECT_EN_MASK)
+#define WLAN_DERIVED_RTC_CLK_EXTERNAL_DETECT_MSB 18
+#define WLAN_DERIVED_RTC_CLK_EXTERNAL_DETECT_LSB 18
+#define WLAN_DERIVED_RTC_CLK_EXTERNAL_DETECT_MASK 0x00040000
+#define WLAN_DERIVED_RTC_CLK_EXTERNAL_DETECT_GET(x) (((x) & WLAN_DERIVED_RTC_CLK_EXTERNAL_DETECT_MASK) >> WLAN_DERIVED_RTC_CLK_EXTERNAL_DETECT_LSB)
+#define WLAN_DERIVED_RTC_CLK_EXTERNAL_DETECT_SET(x) (((x) << WLAN_DERIVED_RTC_CLK_EXTERNAL_DETECT_LSB) & WLAN_DERIVED_RTC_CLK_EXTERNAL_DETECT_MASK)
+#define WLAN_DERIVED_RTC_CLK_FORCE_MSB 17
+#define WLAN_DERIVED_RTC_CLK_FORCE_LSB 16
+#define WLAN_DERIVED_RTC_CLK_FORCE_MASK 0x00030000
+#define WLAN_DERIVED_RTC_CLK_FORCE_GET(x) (((x) & WLAN_DERIVED_RTC_CLK_FORCE_MASK) >> WLAN_DERIVED_RTC_CLK_FORCE_LSB)
+#define WLAN_DERIVED_RTC_CLK_FORCE_SET(x) (((x) << WLAN_DERIVED_RTC_CLK_FORCE_LSB) & WLAN_DERIVED_RTC_CLK_FORCE_MASK)
+#define WLAN_DERIVED_RTC_CLK_PERIOD_MSB 15
+#define WLAN_DERIVED_RTC_CLK_PERIOD_LSB 1
+#define WLAN_DERIVED_RTC_CLK_PERIOD_MASK 0x0000fffe
+#define WLAN_DERIVED_RTC_CLK_PERIOD_GET(x) (((x) & WLAN_DERIVED_RTC_CLK_PERIOD_MASK) >> WLAN_DERIVED_RTC_CLK_PERIOD_LSB)
+#define WLAN_DERIVED_RTC_CLK_PERIOD_SET(x) (((x) << WLAN_DERIVED_RTC_CLK_PERIOD_LSB) & WLAN_DERIVED_RTC_CLK_PERIOD_MASK)
+
+#define MAC_PCU_SLP32_MODE_ADDRESS 0x000000f4
+#define MAC_PCU_SLP32_MODE_OFFSET 0x000000f4
+#define MAC_PCU_SLP32_MODE_TSF2_WRITE_STATUS_MSB 24
+#define MAC_PCU_SLP32_MODE_TSF2_WRITE_STATUS_LSB 24
+#define MAC_PCU_SLP32_MODE_TSF2_WRITE_STATUS_MASK 0x01000000
+#define MAC_PCU_SLP32_MODE_TSF2_WRITE_STATUS_GET(x) (((x) & MAC_PCU_SLP32_MODE_TSF2_WRITE_STATUS_MASK) >> MAC_PCU_SLP32_MODE_TSF2_WRITE_STATUS_LSB)
+#define MAC_PCU_SLP32_MODE_TSF2_WRITE_STATUS_SET(x) (((x) << MAC_PCU_SLP32_MODE_TSF2_WRITE_STATUS_LSB) & MAC_PCU_SLP32_MODE_TSF2_WRITE_STATUS_MASK)
+#define MAC_PCU_SLP32_MODE_FORCE_BIAS_BLOCK_ON_MSB 23
+#define MAC_PCU_SLP32_MODE_FORCE_BIAS_BLOCK_ON_LSB 23
+#define MAC_PCU_SLP32_MODE_FORCE_BIAS_BLOCK_ON_MASK 0x00800000
+#define MAC_PCU_SLP32_MODE_FORCE_BIAS_BLOCK_ON_GET(x) (((x) & MAC_PCU_SLP32_MODE_FORCE_BIAS_BLOCK_ON_MASK) >> MAC_PCU_SLP32_MODE_FORCE_BIAS_BLOCK_ON_LSB)
+#define MAC_PCU_SLP32_MODE_FORCE_BIAS_BLOCK_ON_SET(x) (((x) << MAC_PCU_SLP32_MODE_FORCE_BIAS_BLOCK_ON_LSB) & MAC_PCU_SLP32_MODE_FORCE_BIAS_BLOCK_ON_MASK)
+#define MAC_PCU_SLP32_MODE_DISABLE_32KHZ_MSB 22
+#define MAC_PCU_SLP32_MODE_DISABLE_32KHZ_LSB 22
+#define MAC_PCU_SLP32_MODE_DISABLE_32KHZ_MASK 0x00400000
+#define MAC_PCU_SLP32_MODE_DISABLE_32KHZ_GET(x) (((x) & MAC_PCU_SLP32_MODE_DISABLE_32KHZ_MASK) >> MAC_PCU_SLP32_MODE_DISABLE_32KHZ_LSB)
+#define MAC_PCU_SLP32_MODE_DISABLE_32KHZ_SET(x) (((x) << MAC_PCU_SLP32_MODE_DISABLE_32KHZ_LSB) & MAC_PCU_SLP32_MODE_DISABLE_32KHZ_MASK)
+#define MAC_PCU_SLP32_MODE_TSF_WRITE_STATUS_MSB 21
+#define MAC_PCU_SLP32_MODE_TSF_WRITE_STATUS_LSB 21
+#define MAC_PCU_SLP32_MODE_TSF_WRITE_STATUS_MASK 0x00200000
+#define MAC_PCU_SLP32_MODE_TSF_WRITE_STATUS_GET(x) (((x) & MAC_PCU_SLP32_MODE_TSF_WRITE_STATUS_MASK) >> MAC_PCU_SLP32_MODE_TSF_WRITE_STATUS_LSB)
+#define MAC_PCU_SLP32_MODE_TSF_WRITE_STATUS_SET(x) (((x) << MAC_PCU_SLP32_MODE_TSF_WRITE_STATUS_LSB) & MAC_PCU_SLP32_MODE_TSF_WRITE_STATUS_MASK)
+#define MAC_PCU_SLP32_MODE_ENABLE_MSB 20
+#define MAC_PCU_SLP32_MODE_ENABLE_LSB 20
+#define MAC_PCU_SLP32_MODE_ENABLE_MASK 0x00100000
+#define MAC_PCU_SLP32_MODE_ENABLE_GET(x) (((x) & MAC_PCU_SLP32_MODE_ENABLE_MASK) >> MAC_PCU_SLP32_MODE_ENABLE_LSB)
+#define MAC_PCU_SLP32_MODE_ENABLE_SET(x) (((x) << MAC_PCU_SLP32_MODE_ENABLE_LSB) & MAC_PCU_SLP32_MODE_ENABLE_MASK)
+#define MAC_PCU_SLP32_MODE_HALF_CLK_LATENCY_MSB 19
+#define MAC_PCU_SLP32_MODE_HALF_CLK_LATENCY_LSB 0
+#define MAC_PCU_SLP32_MODE_HALF_CLK_LATENCY_MASK 0x000fffff
+#define MAC_PCU_SLP32_MODE_HALF_CLK_LATENCY_GET(x) (((x) & MAC_PCU_SLP32_MODE_HALF_CLK_LATENCY_MASK) >> MAC_PCU_SLP32_MODE_HALF_CLK_LATENCY_LSB)
+#define MAC_PCU_SLP32_MODE_HALF_CLK_LATENCY_SET(x) (((x) << MAC_PCU_SLP32_MODE_HALF_CLK_LATENCY_LSB) & MAC_PCU_SLP32_MODE_HALF_CLK_LATENCY_MASK)
+
+#define MAC_PCU_SLP32_WAKE_ADDRESS 0x000000f8
+#define MAC_PCU_SLP32_WAKE_OFFSET 0x000000f8
+#define MAC_PCU_SLP32_WAKE_XTL_TIME_MSB 15
+#define MAC_PCU_SLP32_WAKE_XTL_TIME_LSB 0
+#define MAC_PCU_SLP32_WAKE_XTL_TIME_MASK 0x0000ffff
+#define MAC_PCU_SLP32_WAKE_XTL_TIME_GET(x) (((x) & MAC_PCU_SLP32_WAKE_XTL_TIME_MASK) >> MAC_PCU_SLP32_WAKE_XTL_TIME_LSB)
+#define MAC_PCU_SLP32_WAKE_XTL_TIME_SET(x) (((x) << MAC_PCU_SLP32_WAKE_XTL_TIME_LSB) & MAC_PCU_SLP32_WAKE_XTL_TIME_MASK)
+
+#define MAC_PCU_SLP32_INC_ADDRESS 0x000000fc
+#define MAC_PCU_SLP32_INC_OFFSET 0x000000fc
+#define MAC_PCU_SLP32_INC_TSF_INC_MSB 19
+#define MAC_PCU_SLP32_INC_TSF_INC_LSB 0
+#define MAC_PCU_SLP32_INC_TSF_INC_MASK 0x000fffff
+#define MAC_PCU_SLP32_INC_TSF_INC_GET(x) (((x) & MAC_PCU_SLP32_INC_TSF_INC_MASK) >> MAC_PCU_SLP32_INC_TSF_INC_LSB)
+#define MAC_PCU_SLP32_INC_TSF_INC_SET(x) (((x) << MAC_PCU_SLP32_INC_TSF_INC_LSB) & MAC_PCU_SLP32_INC_TSF_INC_MASK)
+
+#define MAC_PCU_SLP_MIB1_ADDRESS 0x00000100
+#define MAC_PCU_SLP_MIB1_OFFSET 0x00000100
+#define MAC_PCU_SLP_MIB1_SLEEP_CNT_MSB 31
+#define MAC_PCU_SLP_MIB1_SLEEP_CNT_LSB 0
+#define MAC_PCU_SLP_MIB1_SLEEP_CNT_MASK 0xffffffff
+#define MAC_PCU_SLP_MIB1_SLEEP_CNT_GET(x) (((x) & MAC_PCU_SLP_MIB1_SLEEP_CNT_MASK) >> MAC_PCU_SLP_MIB1_SLEEP_CNT_LSB)
+#define MAC_PCU_SLP_MIB1_SLEEP_CNT_SET(x) (((x) << MAC_PCU_SLP_MIB1_SLEEP_CNT_LSB) & MAC_PCU_SLP_MIB1_SLEEP_CNT_MASK)
+
+#define MAC_PCU_SLP_MIB2_ADDRESS 0x00000104
+#define MAC_PCU_SLP_MIB2_OFFSET 0x00000104
+#define MAC_PCU_SLP_MIB2_CYCLE_CNT_MSB 31
+#define MAC_PCU_SLP_MIB2_CYCLE_CNT_LSB 0
+#define MAC_PCU_SLP_MIB2_CYCLE_CNT_MASK 0xffffffff
+#define MAC_PCU_SLP_MIB2_CYCLE_CNT_GET(x) (((x) & MAC_PCU_SLP_MIB2_CYCLE_CNT_MASK) >> MAC_PCU_SLP_MIB2_CYCLE_CNT_LSB)
+#define MAC_PCU_SLP_MIB2_CYCLE_CNT_SET(x) (((x) << MAC_PCU_SLP_MIB2_CYCLE_CNT_LSB) & MAC_PCU_SLP_MIB2_CYCLE_CNT_MASK)
+
+#define MAC_PCU_SLP_MIB3_ADDRESS 0x00000108
+#define MAC_PCU_SLP_MIB3_OFFSET 0x00000108
+#define MAC_PCU_SLP_MIB3_PENDING_MSB 1
+#define MAC_PCU_SLP_MIB3_PENDING_LSB 1
+#define MAC_PCU_SLP_MIB3_PENDING_MASK 0x00000002
+#define MAC_PCU_SLP_MIB3_PENDING_GET(x) (((x) & MAC_PCU_SLP_MIB3_PENDING_MASK) >> MAC_PCU_SLP_MIB3_PENDING_LSB)
+#define MAC_PCU_SLP_MIB3_PENDING_SET(x) (((x) << MAC_PCU_SLP_MIB3_PENDING_LSB) & MAC_PCU_SLP_MIB3_PENDING_MASK)
+#define MAC_PCU_SLP_MIB3_CLR_CNT_MSB 0
+#define MAC_PCU_SLP_MIB3_CLR_CNT_LSB 0
+#define MAC_PCU_SLP_MIB3_CLR_CNT_MASK 0x00000001
+#define MAC_PCU_SLP_MIB3_CLR_CNT_GET(x) (((x) & MAC_PCU_SLP_MIB3_CLR_CNT_MASK) >> MAC_PCU_SLP_MIB3_CLR_CNT_LSB)
+#define MAC_PCU_SLP_MIB3_CLR_CNT_SET(x) (((x) << MAC_PCU_SLP_MIB3_CLR_CNT_LSB) & MAC_PCU_SLP_MIB3_CLR_CNT_MASK)
+
+#define WLAN_POWER_REG_ADDRESS 0x0000010c
+#define WLAN_POWER_REG_OFFSET 0x0000010c
+#define WLAN_POWER_REG_SLEEP_MAKE_N_BREAK_EN_MSB 15
+#define WLAN_POWER_REG_SLEEP_MAKE_N_BREAK_EN_LSB 15
+#define WLAN_POWER_REG_SLEEP_MAKE_N_BREAK_EN_MASK 0x00008000
+#define WLAN_POWER_REG_SLEEP_MAKE_N_BREAK_EN_GET(x) (((x) & WLAN_POWER_REG_SLEEP_MAKE_N_BREAK_EN_MASK) >> WLAN_POWER_REG_SLEEP_MAKE_N_BREAK_EN_LSB)
+#define WLAN_POWER_REG_SLEEP_MAKE_N_BREAK_EN_SET(x) (((x) << WLAN_POWER_REG_SLEEP_MAKE_N_BREAK_EN_LSB) & WLAN_POWER_REG_SLEEP_MAKE_N_BREAK_EN_MASK)
+#define WLAN_POWER_REG_DEBUG_EN_MSB 14
+#define WLAN_POWER_REG_DEBUG_EN_LSB 14
+#define WLAN_POWER_REG_DEBUG_EN_MASK 0x00004000
+#define WLAN_POWER_REG_DEBUG_EN_GET(x) (((x) & WLAN_POWER_REG_DEBUG_EN_MASK) >> WLAN_POWER_REG_DEBUG_EN_LSB)
+#define WLAN_POWER_REG_DEBUG_EN_SET(x) (((x) << WLAN_POWER_REG_DEBUG_EN_LSB) & WLAN_POWER_REG_DEBUG_EN_MASK)
+#define WLAN_POWER_REG_WLAN_BB_PWD_EN_MSB 13
+#define WLAN_POWER_REG_WLAN_BB_PWD_EN_LSB 13
+#define WLAN_POWER_REG_WLAN_BB_PWD_EN_MASK 0x00002000
+#define WLAN_POWER_REG_WLAN_BB_PWD_EN_GET(x) (((x) & WLAN_POWER_REG_WLAN_BB_PWD_EN_MASK) >> WLAN_POWER_REG_WLAN_BB_PWD_EN_LSB)
+#define WLAN_POWER_REG_WLAN_BB_PWD_EN_SET(x) (((x) << WLAN_POWER_REG_WLAN_BB_PWD_EN_LSB) & WLAN_POWER_REG_WLAN_BB_PWD_EN_MASK)
+#define WLAN_POWER_REG_WLAN_MAC_PWD_EN_MSB 12
+#define WLAN_POWER_REG_WLAN_MAC_PWD_EN_LSB 12
+#define WLAN_POWER_REG_WLAN_MAC_PWD_EN_MASK 0x00001000
+#define WLAN_POWER_REG_WLAN_MAC_PWD_EN_GET(x) (((x) & WLAN_POWER_REG_WLAN_MAC_PWD_EN_MASK) >> WLAN_POWER_REG_WLAN_MAC_PWD_EN_LSB)
+#define WLAN_POWER_REG_WLAN_MAC_PWD_EN_SET(x) (((x) << WLAN_POWER_REG_WLAN_MAC_PWD_EN_LSB) & WLAN_POWER_REG_WLAN_MAC_PWD_EN_MASK)
+#define WLAN_POWER_REG_VLVL_MSB 11
+#define WLAN_POWER_REG_VLVL_LSB 8
+#define WLAN_POWER_REG_VLVL_MASK 0x00000f00
+#define WLAN_POWER_REG_VLVL_GET(x) (((x) & WLAN_POWER_REG_VLVL_MASK) >> WLAN_POWER_REG_VLVL_LSB)
+#define WLAN_POWER_REG_VLVL_SET(x) (((x) << WLAN_POWER_REG_VLVL_LSB) & WLAN_POWER_REG_VLVL_MASK)
+#define WLAN_POWER_REG_CPU_INT_ENABLE_MSB 7
+#define WLAN_POWER_REG_CPU_INT_ENABLE_LSB 7
+#define WLAN_POWER_REG_CPU_INT_ENABLE_MASK 0x00000080
+#define WLAN_POWER_REG_CPU_INT_ENABLE_GET(x) (((x) & WLAN_POWER_REG_CPU_INT_ENABLE_MASK) >> WLAN_POWER_REG_CPU_INT_ENABLE_LSB)
+#define WLAN_POWER_REG_CPU_INT_ENABLE_SET(x) (((x) << WLAN_POWER_REG_CPU_INT_ENABLE_LSB) & WLAN_POWER_REG_CPU_INT_ENABLE_MASK)
+#define WLAN_POWER_REG_WLAN_ISO_DIS_MSB 6
+#define WLAN_POWER_REG_WLAN_ISO_DIS_LSB 6
+#define WLAN_POWER_REG_WLAN_ISO_DIS_MASK 0x00000040
+#define WLAN_POWER_REG_WLAN_ISO_DIS_GET(x) (((x) & WLAN_POWER_REG_WLAN_ISO_DIS_MASK) >> WLAN_POWER_REG_WLAN_ISO_DIS_LSB)
+#define WLAN_POWER_REG_WLAN_ISO_DIS_SET(x) (((x) << WLAN_POWER_REG_WLAN_ISO_DIS_LSB) & WLAN_POWER_REG_WLAN_ISO_DIS_MASK)
+#define WLAN_POWER_REG_WLAN_ISO_CNTL_MSB 5
+#define WLAN_POWER_REG_WLAN_ISO_CNTL_LSB 5
+#define WLAN_POWER_REG_WLAN_ISO_CNTL_MASK 0x00000020
+#define WLAN_POWER_REG_WLAN_ISO_CNTL_GET(x) (((x) & WLAN_POWER_REG_WLAN_ISO_CNTL_MASK) >> WLAN_POWER_REG_WLAN_ISO_CNTL_LSB)
+#define WLAN_POWER_REG_WLAN_ISO_CNTL_SET(x) (((x) << WLAN_POWER_REG_WLAN_ISO_CNTL_LSB) & WLAN_POWER_REG_WLAN_ISO_CNTL_MASK)
+#define WLAN_POWER_REG_RADIO_PWD_EN_MSB 4
+#define WLAN_POWER_REG_RADIO_PWD_EN_LSB 4
+#define WLAN_POWER_REG_RADIO_PWD_EN_MASK 0x00000010
+#define WLAN_POWER_REG_RADIO_PWD_EN_GET(x) (((x) & WLAN_POWER_REG_RADIO_PWD_EN_MASK) >> WLAN_POWER_REG_RADIO_PWD_EN_LSB)
+#define WLAN_POWER_REG_RADIO_PWD_EN_SET(x) (((x) << WLAN_POWER_REG_RADIO_PWD_EN_LSB) & WLAN_POWER_REG_RADIO_PWD_EN_MASK)
+#define WLAN_POWER_REG_SOC_ISO_EN_MSB 3
+#define WLAN_POWER_REG_SOC_ISO_EN_LSB 3
+#define WLAN_POWER_REG_SOC_ISO_EN_MASK 0x00000008
+#define WLAN_POWER_REG_SOC_ISO_EN_GET(x) (((x) & WLAN_POWER_REG_SOC_ISO_EN_MASK) >> WLAN_POWER_REG_SOC_ISO_EN_LSB)
+#define WLAN_POWER_REG_SOC_ISO_EN_SET(x) (((x) << WLAN_POWER_REG_SOC_ISO_EN_LSB) & WLAN_POWER_REG_SOC_ISO_EN_MASK)
+#define WLAN_POWER_REG_WLAN_ISO_EN_MSB 2
+#define WLAN_POWER_REG_WLAN_ISO_EN_LSB 2
+#define WLAN_POWER_REG_WLAN_ISO_EN_MASK 0x00000004
+#define WLAN_POWER_REG_WLAN_ISO_EN_GET(x) (((x) & WLAN_POWER_REG_WLAN_ISO_EN_MASK) >> WLAN_POWER_REG_WLAN_ISO_EN_LSB)
+#define WLAN_POWER_REG_WLAN_ISO_EN_SET(x) (((x) << WLAN_POWER_REG_WLAN_ISO_EN_LSB) & WLAN_POWER_REG_WLAN_ISO_EN_MASK)
+#define WLAN_POWER_REG_WLAN_PWD_EN_MSB 1
+#define WLAN_POWER_REG_WLAN_PWD_EN_LSB 1
+#define WLAN_POWER_REG_WLAN_PWD_EN_MASK 0x00000002
+#define WLAN_POWER_REG_WLAN_PWD_EN_GET(x) (((x) & WLAN_POWER_REG_WLAN_PWD_EN_MASK) >> WLAN_POWER_REG_WLAN_PWD_EN_LSB)
+#define WLAN_POWER_REG_WLAN_PWD_EN_SET(x) (((x) << WLAN_POWER_REG_WLAN_PWD_EN_LSB) & WLAN_POWER_REG_WLAN_PWD_EN_MASK)
+#define WLAN_POWER_REG_POWER_EN_MSB 0
+#define WLAN_POWER_REG_POWER_EN_LSB 0
+#define WLAN_POWER_REG_POWER_EN_MASK 0x00000001
+#define WLAN_POWER_REG_POWER_EN_GET(x) (((x) & WLAN_POWER_REG_POWER_EN_MASK) >> WLAN_POWER_REG_POWER_EN_LSB)
+#define WLAN_POWER_REG_POWER_EN_SET(x) (((x) << WLAN_POWER_REG_POWER_EN_LSB) & WLAN_POWER_REG_POWER_EN_MASK)
+
+#define WLAN_CORE_CLK_CTRL_ADDRESS 0x00000110
+#define WLAN_CORE_CLK_CTRL_OFFSET 0x00000110
+#define WLAN_CORE_CLK_CTRL_DIV_MSB 2
+#define WLAN_CORE_CLK_CTRL_DIV_LSB 0
+#define WLAN_CORE_CLK_CTRL_DIV_MASK 0x00000007
+#define WLAN_CORE_CLK_CTRL_DIV_GET(x) (((x) & WLAN_CORE_CLK_CTRL_DIV_MASK) >> WLAN_CORE_CLK_CTRL_DIV_LSB)
+#define WLAN_CORE_CLK_CTRL_DIV_SET(x) (((x) << WLAN_CORE_CLK_CTRL_DIV_LSB) & WLAN_CORE_CLK_CTRL_DIV_MASK)
+
+#define WLAN_GPIO_WAKEUP_CONTROL_ADDRESS 0x00000114
+#define WLAN_GPIO_WAKEUP_CONTROL_OFFSET 0x00000114
+#define WLAN_GPIO_WAKEUP_CONTROL_ENABLE_MSB 0
+#define WLAN_GPIO_WAKEUP_CONTROL_ENABLE_LSB 0
+#define WLAN_GPIO_WAKEUP_CONTROL_ENABLE_MASK 0x00000001
+#define WLAN_GPIO_WAKEUP_CONTROL_ENABLE_GET(x) (((x) & WLAN_GPIO_WAKEUP_CONTROL_ENABLE_MASK) >> WLAN_GPIO_WAKEUP_CONTROL_ENABLE_LSB)
+#define WLAN_GPIO_WAKEUP_CONTROL_ENABLE_SET(x) (((x) << WLAN_GPIO_WAKEUP_CONTROL_ENABLE_LSB) & WLAN_GPIO_WAKEUP_CONTROL_ENABLE_MASK)
+
+#define HT_ADDRESS 0x00000118
+#define HT_OFFSET 0x00000118
+#define HT_MODE_MSB 0
+#define HT_MODE_LSB 0
+#define HT_MODE_MASK 0x00000001
+#define HT_MODE_GET(x) (((x) & HT_MODE_MASK) >> HT_MODE_LSB)
+#define HT_MODE_SET(x) (((x) << HT_MODE_LSB) & HT_MODE_MASK)
+
+#define MAC_PCU_TSF_L32_ADDRESS 0x0000011c
+#define MAC_PCU_TSF_L32_OFFSET 0x0000011c
+#define MAC_PCU_TSF_L32_VALUE_MSB 31
+#define MAC_PCU_TSF_L32_VALUE_LSB 0
+#define MAC_PCU_TSF_L32_VALUE_MASK 0xffffffff
+#define MAC_PCU_TSF_L32_VALUE_GET(x) (((x) & MAC_PCU_TSF_L32_VALUE_MASK) >> MAC_PCU_TSF_L32_VALUE_LSB)
+#define MAC_PCU_TSF_L32_VALUE_SET(x) (((x) << MAC_PCU_TSF_L32_VALUE_LSB) & MAC_PCU_TSF_L32_VALUE_MASK)
+
+#define MAC_PCU_TSF_U32_ADDRESS 0x00000120
+#define MAC_PCU_TSF_U32_OFFSET 0x00000120
+#define MAC_PCU_TSF_U32_VALUE_MSB 31
+#define MAC_PCU_TSF_U32_VALUE_LSB 0
+#define MAC_PCU_TSF_U32_VALUE_MASK 0xffffffff
+#define MAC_PCU_TSF_U32_VALUE_GET(x) (((x) & MAC_PCU_TSF_U32_VALUE_MASK) >> MAC_PCU_TSF_U32_VALUE_LSB)
+#define MAC_PCU_TSF_U32_VALUE_SET(x) (((x) << MAC_PCU_TSF_U32_VALUE_LSB) & MAC_PCU_TSF_U32_VALUE_MASK)
+
+#define MAC_PCU_WBTIMER_ADDRESS 0x00000124
+#define MAC_PCU_WBTIMER_OFFSET 0x00000124
+#define MAC_PCU_WBTIMER_VALUE_MSB 31
+#define MAC_PCU_WBTIMER_VALUE_LSB 0
+#define MAC_PCU_WBTIMER_VALUE_MASK 0xffffffff
+#define MAC_PCU_WBTIMER_VALUE_GET(x) (((x) & MAC_PCU_WBTIMER_VALUE_MASK) >> MAC_PCU_WBTIMER_VALUE_LSB)
+#define MAC_PCU_WBTIMER_VALUE_SET(x) (((x) << MAC_PCU_WBTIMER_VALUE_LSB) & MAC_PCU_WBTIMER_VALUE_MASK)
+
+#define MAC_PCU_GENERIC_TIMERS_ADDRESS 0x00000140
+#define MAC_PCU_GENERIC_TIMERS_OFFSET 0x00000140
+#define MAC_PCU_GENERIC_TIMERS_DATA_MSB 31
+#define MAC_PCU_GENERIC_TIMERS_DATA_LSB 0
+#define MAC_PCU_GENERIC_TIMERS_DATA_MASK 0xffffffff
+#define MAC_PCU_GENERIC_TIMERS_DATA_GET(x) (((x) & MAC_PCU_GENERIC_TIMERS_DATA_MASK) >> MAC_PCU_GENERIC_TIMERS_DATA_LSB)
+#define MAC_PCU_GENERIC_TIMERS_DATA_SET(x) (((x) << MAC_PCU_GENERIC_TIMERS_DATA_LSB) & MAC_PCU_GENERIC_TIMERS_DATA_MASK)
+
+#define MAC_PCU_GENERIC_TIMERS_MODE_ADDRESS 0x00000180
+#define MAC_PCU_GENERIC_TIMERS_MODE_OFFSET 0x00000180
+#define MAC_PCU_GENERIC_TIMERS_MODE_ENABLE_MSB 15
+#define MAC_PCU_GENERIC_TIMERS_MODE_ENABLE_LSB 0
+#define MAC_PCU_GENERIC_TIMERS_MODE_ENABLE_MASK 0x0000ffff
+#define MAC_PCU_GENERIC_TIMERS_MODE_ENABLE_GET(x) (((x) & MAC_PCU_GENERIC_TIMERS_MODE_ENABLE_MASK) >> MAC_PCU_GENERIC_TIMERS_MODE_ENABLE_LSB)
+#define MAC_PCU_GENERIC_TIMERS_MODE_ENABLE_SET(x) (((x) << MAC_PCU_GENERIC_TIMERS_MODE_ENABLE_LSB) & MAC_PCU_GENERIC_TIMERS_MODE_ENABLE_MASK)
+
+#define MAC_PCU_GENERIC_TIMERS2_ADDRESS 0x000001c0
+#define MAC_PCU_GENERIC_TIMERS2_OFFSET 0x000001c0
+#define MAC_PCU_GENERIC_TIMERS2_DATA_MSB 31
+#define MAC_PCU_GENERIC_TIMERS2_DATA_LSB 0
+#define MAC_PCU_GENERIC_TIMERS2_DATA_MASK 0xffffffff
+#define MAC_PCU_GENERIC_TIMERS2_DATA_GET(x) (((x) & MAC_PCU_GENERIC_TIMERS2_DATA_MASK) >> MAC_PCU_GENERIC_TIMERS2_DATA_LSB)
+#define MAC_PCU_GENERIC_TIMERS2_DATA_SET(x) (((x) << MAC_PCU_GENERIC_TIMERS2_DATA_LSB) & MAC_PCU_GENERIC_TIMERS2_DATA_MASK)
+
+#define MAC_PCU_GENERIC_TIMERS_MODE2_ADDRESS 0x00000200
+#define MAC_PCU_GENERIC_TIMERS_MODE2_OFFSET 0x00000200
+#define MAC_PCU_GENERIC_TIMERS_MODE2_ENABLE_MSB 15
+#define MAC_PCU_GENERIC_TIMERS_MODE2_ENABLE_LSB 0
+#define MAC_PCU_GENERIC_TIMERS_MODE2_ENABLE_MASK 0x0000ffff
+#define MAC_PCU_GENERIC_TIMERS_MODE2_ENABLE_GET(x) (((x) & MAC_PCU_GENERIC_TIMERS_MODE2_ENABLE_MASK) >> MAC_PCU_GENERIC_TIMERS_MODE2_ENABLE_LSB)
+#define MAC_PCU_GENERIC_TIMERS_MODE2_ENABLE_SET(x) (((x) << MAC_PCU_GENERIC_TIMERS_MODE2_ENABLE_LSB) & MAC_PCU_GENERIC_TIMERS_MODE2_ENABLE_MASK)
+
+#define MAC_PCU_SLP1_ADDRESS 0x00000204
+#define MAC_PCU_SLP1_OFFSET 0x00000204
+#define MAC_PCU_SLP1_ASSUME_DTIM_MSB 19
+#define MAC_PCU_SLP1_ASSUME_DTIM_LSB 19
+#define MAC_PCU_SLP1_ASSUME_DTIM_MASK 0x00080000
+#define MAC_PCU_SLP1_ASSUME_DTIM_GET(x) (((x) & MAC_PCU_SLP1_ASSUME_DTIM_MASK) >> MAC_PCU_SLP1_ASSUME_DTIM_LSB)
+#define MAC_PCU_SLP1_ASSUME_DTIM_SET(x) (((x) << MAC_PCU_SLP1_ASSUME_DTIM_LSB) & MAC_PCU_SLP1_ASSUME_DTIM_MASK)
+#define MAC_PCU_SLP1_CAB_TIMEOUT_MSB 15
+#define MAC_PCU_SLP1_CAB_TIMEOUT_LSB 0
+#define MAC_PCU_SLP1_CAB_TIMEOUT_MASK 0x0000ffff
+#define MAC_PCU_SLP1_CAB_TIMEOUT_GET(x) (((x) & MAC_PCU_SLP1_CAB_TIMEOUT_MASK) >> MAC_PCU_SLP1_CAB_TIMEOUT_LSB)
+#define MAC_PCU_SLP1_CAB_TIMEOUT_SET(x) (((x) << MAC_PCU_SLP1_CAB_TIMEOUT_LSB) & MAC_PCU_SLP1_CAB_TIMEOUT_MASK)
+
+#define MAC_PCU_SLP2_ADDRESS 0x00000208
+#define MAC_PCU_SLP2_OFFSET 0x00000208
+#define MAC_PCU_SLP2_BEACON_TIMEOUT_MSB 15
+#define MAC_PCU_SLP2_BEACON_TIMEOUT_LSB 0
+#define MAC_PCU_SLP2_BEACON_TIMEOUT_MASK 0x0000ffff
+#define MAC_PCU_SLP2_BEACON_TIMEOUT_GET(x) (((x) & MAC_PCU_SLP2_BEACON_TIMEOUT_MASK) >> MAC_PCU_SLP2_BEACON_TIMEOUT_LSB)
+#define MAC_PCU_SLP2_BEACON_TIMEOUT_SET(x) (((x) << MAC_PCU_SLP2_BEACON_TIMEOUT_LSB) & MAC_PCU_SLP2_BEACON_TIMEOUT_MASK)
+
+#define MAC_PCU_RESET_TSF_ADDRESS 0x0000020c
+#define MAC_PCU_RESET_TSF_OFFSET 0x0000020c
+#define MAC_PCU_RESET_TSF_ONE_SHOT2_MSB 25
+#define MAC_PCU_RESET_TSF_ONE_SHOT2_LSB 25
+#define MAC_PCU_RESET_TSF_ONE_SHOT2_MASK 0x02000000
+#define MAC_PCU_RESET_TSF_ONE_SHOT2_GET(x) (((x) & MAC_PCU_RESET_TSF_ONE_SHOT2_MASK) >> MAC_PCU_RESET_TSF_ONE_SHOT2_LSB)
+#define MAC_PCU_RESET_TSF_ONE_SHOT2_SET(x) (((x) << MAC_PCU_RESET_TSF_ONE_SHOT2_LSB) & MAC_PCU_RESET_TSF_ONE_SHOT2_MASK)
+#define MAC_PCU_RESET_TSF_ONE_SHOT_MSB 24
+#define MAC_PCU_RESET_TSF_ONE_SHOT_LSB 24
+#define MAC_PCU_RESET_TSF_ONE_SHOT_MASK 0x01000000
+#define MAC_PCU_RESET_TSF_ONE_SHOT_GET(x) (((x) & MAC_PCU_RESET_TSF_ONE_SHOT_MASK) >> MAC_PCU_RESET_TSF_ONE_SHOT_LSB)
+#define MAC_PCU_RESET_TSF_ONE_SHOT_SET(x) (((x) << MAC_PCU_RESET_TSF_ONE_SHOT_LSB) & MAC_PCU_RESET_TSF_ONE_SHOT_MASK)
+
+#define MAC_PCU_TSF_ADD_PLL_ADDRESS 0x00000210
+#define MAC_PCU_TSF_ADD_PLL_OFFSET 0x00000210
+#define MAC_PCU_TSF_ADD_PLL_VALUE_MSB 7
+#define MAC_PCU_TSF_ADD_PLL_VALUE_LSB 0
+#define MAC_PCU_TSF_ADD_PLL_VALUE_MASK 0x000000ff
+#define MAC_PCU_TSF_ADD_PLL_VALUE_GET(x) (((x) & MAC_PCU_TSF_ADD_PLL_VALUE_MASK) >> MAC_PCU_TSF_ADD_PLL_VALUE_LSB)
+#define MAC_PCU_TSF_ADD_PLL_VALUE_SET(x) (((x) << MAC_PCU_TSF_ADD_PLL_VALUE_LSB) & MAC_PCU_TSF_ADD_PLL_VALUE_MASK)
+
+#define SLEEP_RETENTION_ADDRESS 0x00000214
+#define SLEEP_RETENTION_OFFSET 0x00000214
+#define SLEEP_RETENTION_TIME_MSB 9
+#define SLEEP_RETENTION_TIME_LSB 2
+#define SLEEP_RETENTION_TIME_MASK 0x000003fc
+#define SLEEP_RETENTION_TIME_GET(x) (((x) & SLEEP_RETENTION_TIME_MASK) >> SLEEP_RETENTION_TIME_LSB)
+#define SLEEP_RETENTION_TIME_SET(x) (((x) << SLEEP_RETENTION_TIME_LSB) & SLEEP_RETENTION_TIME_MASK)
+#define SLEEP_RETENTION_MODE_MSB 1
+#define SLEEP_RETENTION_MODE_LSB 1
+#define SLEEP_RETENTION_MODE_MASK 0x00000002
+#define SLEEP_RETENTION_MODE_GET(x) (((x) & SLEEP_RETENTION_MODE_MASK) >> SLEEP_RETENTION_MODE_LSB)
+#define SLEEP_RETENTION_MODE_SET(x) (((x) << SLEEP_RETENTION_MODE_LSB) & SLEEP_RETENTION_MODE_MASK)
+#define SLEEP_RETENTION_ENABLE_MSB 0
+#define SLEEP_RETENTION_ENABLE_LSB 0
+#define SLEEP_RETENTION_ENABLE_MASK 0x00000001
+#define SLEEP_RETENTION_ENABLE_GET(x) (((x) & SLEEP_RETENTION_ENABLE_MASK) >> SLEEP_RETENTION_ENABLE_LSB)
+#define SLEEP_RETENTION_ENABLE_SET(x) (((x) << SLEEP_RETENTION_ENABLE_LSB) & SLEEP_RETENTION_ENABLE_MASK)
+
+#define BTCOEXCTRL_ADDRESS 0x00000218
+#define BTCOEXCTRL_OFFSET 0x00000218
+#define BTCOEXCTRL_WBTIMER_ENABLE_MSB 26
+#define BTCOEXCTRL_WBTIMER_ENABLE_LSB 26
+#define BTCOEXCTRL_WBTIMER_ENABLE_MASK 0x04000000
+#define BTCOEXCTRL_WBTIMER_ENABLE_GET(x) (((x) & BTCOEXCTRL_WBTIMER_ENABLE_MASK) >> BTCOEXCTRL_WBTIMER_ENABLE_LSB)
+#define BTCOEXCTRL_WBTIMER_ENABLE_SET(x) (((x) << BTCOEXCTRL_WBTIMER_ENABLE_LSB) & BTCOEXCTRL_WBTIMER_ENABLE_MASK)
+#define BTCOEXCTRL_WBSYNC_ON_BEACON_MSB 25
+#define BTCOEXCTRL_WBSYNC_ON_BEACON_LSB 25
+#define BTCOEXCTRL_WBSYNC_ON_BEACON_MASK 0x02000000
+#define BTCOEXCTRL_WBSYNC_ON_BEACON_GET(x) (((x) & BTCOEXCTRL_WBSYNC_ON_BEACON_MASK) >> BTCOEXCTRL_WBSYNC_ON_BEACON_LSB)
+#define BTCOEXCTRL_WBSYNC_ON_BEACON_SET(x) (((x) << BTCOEXCTRL_WBSYNC_ON_BEACON_LSB) & BTCOEXCTRL_WBSYNC_ON_BEACON_MASK)
+#define BTCOEXCTRL_PTA_MODE_MSB 24
+#define BTCOEXCTRL_PTA_MODE_LSB 23
+#define BTCOEXCTRL_PTA_MODE_MASK 0x01800000
+#define BTCOEXCTRL_PTA_MODE_GET(x) (((x) & BTCOEXCTRL_PTA_MODE_MASK) >> BTCOEXCTRL_PTA_MODE_LSB)
+#define BTCOEXCTRL_PTA_MODE_SET(x) (((x) << BTCOEXCTRL_PTA_MODE_LSB) & BTCOEXCTRL_PTA_MODE_MASK)
+#define BTCOEXCTRL_FREQ_TIME_MSB 22
+#define BTCOEXCTRL_FREQ_TIME_LSB 18
+#define BTCOEXCTRL_FREQ_TIME_MASK 0x007c0000
+#define BTCOEXCTRL_FREQ_TIME_GET(x) (((x) & BTCOEXCTRL_FREQ_TIME_MASK) >> BTCOEXCTRL_FREQ_TIME_LSB)
+#define BTCOEXCTRL_FREQ_TIME_SET(x) (((x) << BTCOEXCTRL_FREQ_TIME_LSB) & BTCOEXCTRL_FREQ_TIME_MASK)
+#define BTCOEXCTRL_PRIORITY_TIME_MSB 17
+#define BTCOEXCTRL_PRIORITY_TIME_LSB 12
+#define BTCOEXCTRL_PRIORITY_TIME_MASK 0x0003f000
+#define BTCOEXCTRL_PRIORITY_TIME_GET(x) (((x) & BTCOEXCTRL_PRIORITY_TIME_MASK) >> BTCOEXCTRL_PRIORITY_TIME_LSB)
+#define BTCOEXCTRL_PRIORITY_TIME_SET(x) (((x) << BTCOEXCTRL_PRIORITY_TIME_LSB) & BTCOEXCTRL_PRIORITY_TIME_MASK)
+#define BTCOEXCTRL_SYNC_DET_EN_MSB 11
+#define BTCOEXCTRL_SYNC_DET_EN_LSB 11
+#define BTCOEXCTRL_SYNC_DET_EN_MASK 0x00000800
+#define BTCOEXCTRL_SYNC_DET_EN_GET(x) (((x) & BTCOEXCTRL_SYNC_DET_EN_MASK) >> BTCOEXCTRL_SYNC_DET_EN_LSB)
+#define BTCOEXCTRL_SYNC_DET_EN_SET(x) (((x) << BTCOEXCTRL_SYNC_DET_EN_LSB) & BTCOEXCTRL_SYNC_DET_EN_MASK)
+#define BTCOEXCTRL_IDLE_CNT_EN_MSB 10
+#define BTCOEXCTRL_IDLE_CNT_EN_LSB 10
+#define BTCOEXCTRL_IDLE_CNT_EN_MASK 0x00000400
+#define BTCOEXCTRL_IDLE_CNT_EN_GET(x) (((x) & BTCOEXCTRL_IDLE_CNT_EN_MASK) >> BTCOEXCTRL_IDLE_CNT_EN_LSB)
+#define BTCOEXCTRL_IDLE_CNT_EN_SET(x) (((x) << BTCOEXCTRL_IDLE_CNT_EN_LSB) & BTCOEXCTRL_IDLE_CNT_EN_MASK)
+#define BTCOEXCTRL_FRAME_CNT_EN_MSB 9
+#define BTCOEXCTRL_FRAME_CNT_EN_LSB 9
+#define BTCOEXCTRL_FRAME_CNT_EN_MASK 0x00000200
+#define BTCOEXCTRL_FRAME_CNT_EN_GET(x) (((x) & BTCOEXCTRL_FRAME_CNT_EN_MASK) >> BTCOEXCTRL_FRAME_CNT_EN_LSB)
+#define BTCOEXCTRL_FRAME_CNT_EN_SET(x) (((x) << BTCOEXCTRL_FRAME_CNT_EN_LSB) & BTCOEXCTRL_FRAME_CNT_EN_MASK)
+#define BTCOEXCTRL_CLK_CNT_EN_MSB 8
+#define BTCOEXCTRL_CLK_CNT_EN_LSB 8
+#define BTCOEXCTRL_CLK_CNT_EN_MASK 0x00000100
+#define BTCOEXCTRL_CLK_CNT_EN_GET(x) (((x) & BTCOEXCTRL_CLK_CNT_EN_MASK) >> BTCOEXCTRL_CLK_CNT_EN_LSB)
+#define BTCOEXCTRL_CLK_CNT_EN_SET(x) (((x) << BTCOEXCTRL_CLK_CNT_EN_LSB) & BTCOEXCTRL_CLK_CNT_EN_MASK)
+#define BTCOEXCTRL_GAP_MSB 7
+#define BTCOEXCTRL_GAP_LSB 0
+#define BTCOEXCTRL_GAP_MASK 0x000000ff
+#define BTCOEXCTRL_GAP_GET(x) (((x) & BTCOEXCTRL_GAP_MASK) >> BTCOEXCTRL_GAP_LSB)
+#define BTCOEXCTRL_GAP_SET(x) (((x) << BTCOEXCTRL_GAP_LSB) & BTCOEXCTRL_GAP_MASK)
+
+#define WBSYNC_PRIORITY1_ADDRESS 0x0000021c
+#define WBSYNC_PRIORITY1_OFFSET 0x0000021c
+#define WBSYNC_PRIORITY1_BITMAP_MSB 31
+#define WBSYNC_PRIORITY1_BITMAP_LSB 0
+#define WBSYNC_PRIORITY1_BITMAP_MASK 0xffffffff
+#define WBSYNC_PRIORITY1_BITMAP_GET(x) (((x) & WBSYNC_PRIORITY1_BITMAP_MASK) >> WBSYNC_PRIORITY1_BITMAP_LSB)
+#define WBSYNC_PRIORITY1_BITMAP_SET(x) (((x) << WBSYNC_PRIORITY1_BITMAP_LSB) & WBSYNC_PRIORITY1_BITMAP_MASK)
+
+#define WBSYNC_PRIORITY2_ADDRESS 0x00000220
+#define WBSYNC_PRIORITY2_OFFSET 0x00000220
+#define WBSYNC_PRIORITY2_BITMAP_MSB 31
+#define WBSYNC_PRIORITY2_BITMAP_LSB 0
+#define WBSYNC_PRIORITY2_BITMAP_MASK 0xffffffff
+#define WBSYNC_PRIORITY2_BITMAP_GET(x) (((x) & WBSYNC_PRIORITY2_BITMAP_MASK) >> WBSYNC_PRIORITY2_BITMAP_LSB)
+#define WBSYNC_PRIORITY2_BITMAP_SET(x) (((x) << WBSYNC_PRIORITY2_BITMAP_LSB) & WBSYNC_PRIORITY2_BITMAP_MASK)
+
+#define WBSYNC_PRIORITY3_ADDRESS 0x00000224
+#define WBSYNC_PRIORITY3_OFFSET 0x00000224
+#define WBSYNC_PRIORITY3_BITMAP_MSB 31
+#define WBSYNC_PRIORITY3_BITMAP_LSB 0
+#define WBSYNC_PRIORITY3_BITMAP_MASK 0xffffffff
+#define WBSYNC_PRIORITY3_BITMAP_GET(x) (((x) & WBSYNC_PRIORITY3_BITMAP_MASK) >> WBSYNC_PRIORITY3_BITMAP_LSB)
+#define WBSYNC_PRIORITY3_BITMAP_SET(x) (((x) << WBSYNC_PRIORITY3_BITMAP_LSB) & WBSYNC_PRIORITY3_BITMAP_MASK)
+
+#define BTCOEX0_ADDRESS 0x00000228
+#define BTCOEX0_OFFSET 0x00000228
+#define BTCOEX0_SYNC_DUR_MSB 7
+#define BTCOEX0_SYNC_DUR_LSB 0
+#define BTCOEX0_SYNC_DUR_MASK 0x000000ff
+#define BTCOEX0_SYNC_DUR_GET(x) (((x) & BTCOEX0_SYNC_DUR_MASK) >> BTCOEX0_SYNC_DUR_LSB)
+#define BTCOEX0_SYNC_DUR_SET(x) (((x) << BTCOEX0_SYNC_DUR_LSB) & BTCOEX0_SYNC_DUR_MASK)
+
+#define BTCOEX1_ADDRESS 0x0000022c
+#define BTCOEX1_OFFSET 0x0000022c
+#define BTCOEX1_CLK_THRES_MSB 20
+#define BTCOEX1_CLK_THRES_LSB 0
+#define BTCOEX1_CLK_THRES_MASK 0x001fffff
+#define BTCOEX1_CLK_THRES_GET(x) (((x) & BTCOEX1_CLK_THRES_MASK) >> BTCOEX1_CLK_THRES_LSB)
+#define BTCOEX1_CLK_THRES_SET(x) (((x) << BTCOEX1_CLK_THRES_LSB) & BTCOEX1_CLK_THRES_MASK)
+
+#define BTCOEX2_ADDRESS 0x00000230
+#define BTCOEX2_OFFSET 0x00000230
+#define BTCOEX2_FRAME_THRES_MSB 7
+#define BTCOEX2_FRAME_THRES_LSB 0
+#define BTCOEX2_FRAME_THRES_MASK 0x000000ff
+#define BTCOEX2_FRAME_THRES_GET(x) (((x) & BTCOEX2_FRAME_THRES_MASK) >> BTCOEX2_FRAME_THRES_LSB)
+#define BTCOEX2_FRAME_THRES_SET(x) (((x) << BTCOEX2_FRAME_THRES_LSB) & BTCOEX2_FRAME_THRES_MASK)
+
+#define BTCOEX3_ADDRESS 0x00000234
+#define BTCOEX3_OFFSET 0x00000234
+#define BTCOEX3_CLK_CNT_MSB 20
+#define BTCOEX3_CLK_CNT_LSB 0
+#define BTCOEX3_CLK_CNT_MASK 0x001fffff
+#define BTCOEX3_CLK_CNT_GET(x) (((x) & BTCOEX3_CLK_CNT_MASK) >> BTCOEX3_CLK_CNT_LSB)
+#define BTCOEX3_CLK_CNT_SET(x) (((x) << BTCOEX3_CLK_CNT_LSB) & BTCOEX3_CLK_CNT_MASK)
+
+#define BTCOEX4_ADDRESS 0x00000238
+#define BTCOEX4_OFFSET 0x00000238
+#define BTCOEX4_FRAME_CNT_MSB 7
+#define BTCOEX4_FRAME_CNT_LSB 0
+#define BTCOEX4_FRAME_CNT_MASK 0x000000ff
+#define BTCOEX4_FRAME_CNT_GET(x) (((x) & BTCOEX4_FRAME_CNT_MASK) >> BTCOEX4_FRAME_CNT_LSB)
+#define BTCOEX4_FRAME_CNT_SET(x) (((x) << BTCOEX4_FRAME_CNT_LSB) & BTCOEX4_FRAME_CNT_MASK)
+
+#define BTCOEX5_ADDRESS 0x0000023c
+#define BTCOEX5_OFFSET 0x0000023c
+#define BTCOEX5_IDLE_CNT_MSB 15
+#define BTCOEX5_IDLE_CNT_LSB 0
+#define BTCOEX5_IDLE_CNT_MASK 0x0000ffff
+#define BTCOEX5_IDLE_CNT_GET(x) (((x) & BTCOEX5_IDLE_CNT_MASK) >> BTCOEX5_IDLE_CNT_LSB)
+#define BTCOEX5_IDLE_CNT_SET(x) (((x) << BTCOEX5_IDLE_CNT_LSB) & BTCOEX5_IDLE_CNT_MASK)
+
+#define BTCOEX6_ADDRESS 0x00000240
+#define BTCOEX6_OFFSET 0x00000240
+#define BTCOEX6_IDLE_RESET_LVL_BITMAP_MSB 31
+#define BTCOEX6_IDLE_RESET_LVL_BITMAP_LSB 0
+#define BTCOEX6_IDLE_RESET_LVL_BITMAP_MASK 0xffffffff
+#define BTCOEX6_IDLE_RESET_LVL_BITMAP_GET(x) (((x) & BTCOEX6_IDLE_RESET_LVL_BITMAP_MASK) >> BTCOEX6_IDLE_RESET_LVL_BITMAP_LSB)
+#define BTCOEX6_IDLE_RESET_LVL_BITMAP_SET(x) (((x) << BTCOEX6_IDLE_RESET_LVL_BITMAP_LSB) & BTCOEX6_IDLE_RESET_LVL_BITMAP_MASK)
+
+#define LOCK_ADDRESS 0x00000244
+#define LOCK_OFFSET 0x00000244
+#define LOCK_TLOCK_SLAVE_MSB 31
+#define LOCK_TLOCK_SLAVE_LSB 24
+#define LOCK_TLOCK_SLAVE_MASK 0xff000000
+#define LOCK_TLOCK_SLAVE_GET(x) (((x) & LOCK_TLOCK_SLAVE_MASK) >> LOCK_TLOCK_SLAVE_LSB)
+#define LOCK_TLOCK_SLAVE_SET(x) (((x) << LOCK_TLOCK_SLAVE_LSB) & LOCK_TLOCK_SLAVE_MASK)
+#define LOCK_TUNLOCK_SLAVE_MSB 23
+#define LOCK_TUNLOCK_SLAVE_LSB 16
+#define LOCK_TUNLOCK_SLAVE_MASK 0x00ff0000
+#define LOCK_TUNLOCK_SLAVE_GET(x) (((x) & LOCK_TUNLOCK_SLAVE_MASK) >> LOCK_TUNLOCK_SLAVE_LSB)
+#define LOCK_TUNLOCK_SLAVE_SET(x) (((x) << LOCK_TUNLOCK_SLAVE_LSB) & LOCK_TUNLOCK_SLAVE_MASK)
+#define LOCK_TLOCK_MASTER_MSB 15
+#define LOCK_TLOCK_MASTER_LSB 8
+#define LOCK_TLOCK_MASTER_MASK 0x0000ff00
+#define LOCK_TLOCK_MASTER_GET(x) (((x) & LOCK_TLOCK_MASTER_MASK) >> LOCK_TLOCK_MASTER_LSB)
+#define LOCK_TLOCK_MASTER_SET(x) (((x) << LOCK_TLOCK_MASTER_LSB) & LOCK_TLOCK_MASTER_MASK)
+#define LOCK_TUNLOCK_MASTER_MSB 7
+#define LOCK_TUNLOCK_MASTER_LSB 0
+#define LOCK_TUNLOCK_MASTER_MASK 0x000000ff
+#define LOCK_TUNLOCK_MASTER_GET(x) (((x) & LOCK_TUNLOCK_MASTER_MASK) >> LOCK_TUNLOCK_MASTER_LSB)
+#define LOCK_TUNLOCK_MASTER_SET(x) (((x) << LOCK_TUNLOCK_MASTER_LSB) & LOCK_TUNLOCK_MASTER_MASK)
+
+#define NOLOCK_PRIORITY_ADDRESS 0x00000248
+#define NOLOCK_PRIORITY_OFFSET 0x00000248
+#define NOLOCK_PRIORITY_BITMAP_MSB 31
+#define NOLOCK_PRIORITY_BITMAP_LSB 0
+#define NOLOCK_PRIORITY_BITMAP_MASK 0xffffffff
+#define NOLOCK_PRIORITY_BITMAP_GET(x) (((x) & NOLOCK_PRIORITY_BITMAP_MASK) >> NOLOCK_PRIORITY_BITMAP_LSB)
+#define NOLOCK_PRIORITY_BITMAP_SET(x) (((x) << NOLOCK_PRIORITY_BITMAP_LSB) & NOLOCK_PRIORITY_BITMAP_MASK)
+
+#define WBSYNC_ADDRESS 0x0000024c
+#define WBSYNC_OFFSET 0x0000024c
+#define WBSYNC_BTCLOCK_MSB 31
+#define WBSYNC_BTCLOCK_LSB 0
+#define WBSYNC_BTCLOCK_MASK 0xffffffff
+#define WBSYNC_BTCLOCK_GET(x) (((x) & WBSYNC_BTCLOCK_MASK) >> WBSYNC_BTCLOCK_LSB)
+#define WBSYNC_BTCLOCK_SET(x) (((x) << WBSYNC_BTCLOCK_LSB) & WBSYNC_BTCLOCK_MASK)
+
+#define WBSYNC1_ADDRESS 0x00000250
+#define WBSYNC1_OFFSET 0x00000250
+#define WBSYNC1_BTCLOCK_MSB 31
+#define WBSYNC1_BTCLOCK_LSB 0
+#define WBSYNC1_BTCLOCK_MASK 0xffffffff
+#define WBSYNC1_BTCLOCK_GET(x) (((x) & WBSYNC1_BTCLOCK_MASK) >> WBSYNC1_BTCLOCK_LSB)
+#define WBSYNC1_BTCLOCK_SET(x) (((x) << WBSYNC1_BTCLOCK_LSB) & WBSYNC1_BTCLOCK_MASK)
+
+#define WBSYNC2_ADDRESS 0x00000254
+#define WBSYNC2_OFFSET 0x00000254
+#define WBSYNC2_BTCLOCK_MSB 31
+#define WBSYNC2_BTCLOCK_LSB 0
+#define WBSYNC2_BTCLOCK_MASK 0xffffffff
+#define WBSYNC2_BTCLOCK_GET(x) (((x) & WBSYNC2_BTCLOCK_MASK) >> WBSYNC2_BTCLOCK_LSB)
+#define WBSYNC2_BTCLOCK_SET(x) (((x) << WBSYNC2_BTCLOCK_LSB) & WBSYNC2_BTCLOCK_MASK)
+
+#define WBSYNC3_ADDRESS 0x00000258
+#define WBSYNC3_OFFSET 0x00000258
+#define WBSYNC3_BTCLOCK_MSB 31
+#define WBSYNC3_BTCLOCK_LSB 0
+#define WBSYNC3_BTCLOCK_MASK 0xffffffff
+#define WBSYNC3_BTCLOCK_GET(x) (((x) & WBSYNC3_BTCLOCK_MASK) >> WBSYNC3_BTCLOCK_LSB)
+#define WBSYNC3_BTCLOCK_SET(x) (((x) << WBSYNC3_BTCLOCK_LSB) & WBSYNC3_BTCLOCK_MASK)
+
+#define WB_TIMER_TARGET_ADDRESS 0x0000025c
+#define WB_TIMER_TARGET_OFFSET 0x0000025c
+#define WB_TIMER_TARGET_VALUE_MSB 31
+#define WB_TIMER_TARGET_VALUE_LSB 0
+#define WB_TIMER_TARGET_VALUE_MASK 0xffffffff
+#define WB_TIMER_TARGET_VALUE_GET(x) (((x) & WB_TIMER_TARGET_VALUE_MASK) >> WB_TIMER_TARGET_VALUE_LSB)
+#define WB_TIMER_TARGET_VALUE_SET(x) (((x) << WB_TIMER_TARGET_VALUE_LSB) & WB_TIMER_TARGET_VALUE_MASK)
+
+#define WB_TIMER_SLOP_ADDRESS 0x00000260
+#define WB_TIMER_SLOP_OFFSET 0x00000260
+#define WB_TIMER_SLOP_VALUE_MSB 9
+#define WB_TIMER_SLOP_VALUE_LSB 0
+#define WB_TIMER_SLOP_VALUE_MASK 0x000003ff
+#define WB_TIMER_SLOP_VALUE_GET(x) (((x) & WB_TIMER_SLOP_VALUE_MASK) >> WB_TIMER_SLOP_VALUE_LSB)
+#define WB_TIMER_SLOP_VALUE_SET(x) (((x) << WB_TIMER_SLOP_VALUE_LSB) & WB_TIMER_SLOP_VALUE_MASK)
+
+#define BTCOEX_INT_EN_ADDRESS 0x00000264
+#define BTCOEX_INT_EN_OFFSET 0x00000264
+#define BTCOEX_INT_EN_I2C_RECV_OVERFLOW_MSB 11
+#define BTCOEX_INT_EN_I2C_RECV_OVERFLOW_LSB 11
+#define BTCOEX_INT_EN_I2C_RECV_OVERFLOW_MASK 0x00000800
+#define BTCOEX_INT_EN_I2C_RECV_OVERFLOW_GET(x) (((x) & BTCOEX_INT_EN_I2C_RECV_OVERFLOW_MASK) >> BTCOEX_INT_EN_I2C_RECV_OVERFLOW_LSB)
+#define BTCOEX_INT_EN_I2C_RECV_OVERFLOW_SET(x) (((x) << BTCOEX_INT_EN_I2C_RECV_OVERFLOW_LSB) & BTCOEX_INT_EN_I2C_RECV_OVERFLOW_MASK)
+#define BTCOEX_INT_EN_I2C_TX_FAILED_MSB 10
+#define BTCOEX_INT_EN_I2C_TX_FAILED_LSB 10
+#define BTCOEX_INT_EN_I2C_TX_FAILED_MASK 0x00000400
+#define BTCOEX_INT_EN_I2C_TX_FAILED_GET(x) (((x) & BTCOEX_INT_EN_I2C_TX_FAILED_MASK) >> BTCOEX_INT_EN_I2C_TX_FAILED_LSB)
+#define BTCOEX_INT_EN_I2C_TX_FAILED_SET(x) (((x) << BTCOEX_INT_EN_I2C_TX_FAILED_LSB) & BTCOEX_INT_EN_I2C_TX_FAILED_MASK)
+#define BTCOEX_INT_EN_I2C_MESG_SENT_MSB 9
+#define BTCOEX_INT_EN_I2C_MESG_SENT_LSB 9
+#define BTCOEX_INT_EN_I2C_MESG_SENT_MASK 0x00000200
+#define BTCOEX_INT_EN_I2C_MESG_SENT_GET(x) (((x) & BTCOEX_INT_EN_I2C_MESG_SENT_MASK) >> BTCOEX_INT_EN_I2C_MESG_SENT_LSB)
+#define BTCOEX_INT_EN_I2C_MESG_SENT_SET(x) (((x) << BTCOEX_INT_EN_I2C_MESG_SENT_LSB) & BTCOEX_INT_EN_I2C_MESG_SENT_MASK)
+#define BTCOEX_INT_EN_ST_MESG_RECV_MSB 8
+#define BTCOEX_INT_EN_ST_MESG_RECV_LSB 8
+#define BTCOEX_INT_EN_ST_MESG_RECV_MASK 0x00000100
+#define BTCOEX_INT_EN_ST_MESG_RECV_GET(x) (((x) & BTCOEX_INT_EN_ST_MESG_RECV_MASK) >> BTCOEX_INT_EN_ST_MESG_RECV_LSB)
+#define BTCOEX_INT_EN_ST_MESG_RECV_SET(x) (((x) << BTCOEX_INT_EN_ST_MESG_RECV_LSB) & BTCOEX_INT_EN_ST_MESG_RECV_MASK)
+#define BTCOEX_INT_EN_WB_TIMER_MSB 7
+#define BTCOEX_INT_EN_WB_TIMER_LSB 7
+#define BTCOEX_INT_EN_WB_TIMER_MASK 0x00000080
+#define BTCOEX_INT_EN_WB_TIMER_GET(x) (((x) & BTCOEX_INT_EN_WB_TIMER_MASK) >> BTCOEX_INT_EN_WB_TIMER_LSB)
+#define BTCOEX_INT_EN_WB_TIMER_SET(x) (((x) << BTCOEX_INT_EN_WB_TIMER_LSB) & BTCOEX_INT_EN_WB_TIMER_MASK)
+#define BTCOEX_INT_EN_NOSYNC_MSB 4
+#define BTCOEX_INT_EN_NOSYNC_LSB 4
+#define BTCOEX_INT_EN_NOSYNC_MASK 0x00000010
+#define BTCOEX_INT_EN_NOSYNC_GET(x) (((x) & BTCOEX_INT_EN_NOSYNC_MASK) >> BTCOEX_INT_EN_NOSYNC_LSB)
+#define BTCOEX_INT_EN_NOSYNC_SET(x) (((x) << BTCOEX_INT_EN_NOSYNC_LSB) & BTCOEX_INT_EN_NOSYNC_MASK)
+#define BTCOEX_INT_EN_SYNC_MSB 3
+#define BTCOEX_INT_EN_SYNC_LSB 3
+#define BTCOEX_INT_EN_SYNC_MASK 0x00000008
+#define BTCOEX_INT_EN_SYNC_GET(x) (((x) & BTCOEX_INT_EN_SYNC_MASK) >> BTCOEX_INT_EN_SYNC_LSB)
+#define BTCOEX_INT_EN_SYNC_SET(x) (((x) << BTCOEX_INT_EN_SYNC_LSB) & BTCOEX_INT_EN_SYNC_MASK)
+#define BTCOEX_INT_EN_END_MSB 2
+#define BTCOEX_INT_EN_END_LSB 2
+#define BTCOEX_INT_EN_END_MASK 0x00000004
+#define BTCOEX_INT_EN_END_GET(x) (((x) & BTCOEX_INT_EN_END_MASK) >> BTCOEX_INT_EN_END_LSB)
+#define BTCOEX_INT_EN_END_SET(x) (((x) << BTCOEX_INT_EN_END_LSB) & BTCOEX_INT_EN_END_MASK)
+#define BTCOEX_INT_EN_FRAME_CNT_MSB 1
+#define BTCOEX_INT_EN_FRAME_CNT_LSB 1
+#define BTCOEX_INT_EN_FRAME_CNT_MASK 0x00000002
+#define BTCOEX_INT_EN_FRAME_CNT_GET(x) (((x) & BTCOEX_INT_EN_FRAME_CNT_MASK) >> BTCOEX_INT_EN_FRAME_CNT_LSB)
+#define BTCOEX_INT_EN_FRAME_CNT_SET(x) (((x) << BTCOEX_INT_EN_FRAME_CNT_LSB) & BTCOEX_INT_EN_FRAME_CNT_MASK)
+#define BTCOEX_INT_EN_CLK_CNT_MSB 0
+#define BTCOEX_INT_EN_CLK_CNT_LSB 0
+#define BTCOEX_INT_EN_CLK_CNT_MASK 0x00000001
+#define BTCOEX_INT_EN_CLK_CNT_GET(x) (((x) & BTCOEX_INT_EN_CLK_CNT_MASK) >> BTCOEX_INT_EN_CLK_CNT_LSB)
+#define BTCOEX_INT_EN_CLK_CNT_SET(x) (((x) << BTCOEX_INT_EN_CLK_CNT_LSB) & BTCOEX_INT_EN_CLK_CNT_MASK)
+
+#define BTCOEX_INT_STAT_ADDRESS 0x00000268
+#define BTCOEX_INT_STAT_OFFSET 0x00000268
+#define BTCOEX_INT_STAT_I2C_RECV_OVERFLOW_MSB 11
+#define BTCOEX_INT_STAT_I2C_RECV_OVERFLOW_LSB 11
+#define BTCOEX_INT_STAT_I2C_RECV_OVERFLOW_MASK 0x00000800
+#define BTCOEX_INT_STAT_I2C_RECV_OVERFLOW_GET(x) (((x) & BTCOEX_INT_STAT_I2C_RECV_OVERFLOW_MASK) >> BTCOEX_INT_STAT_I2C_RECV_OVERFLOW_LSB)
+#define BTCOEX_INT_STAT_I2C_RECV_OVERFLOW_SET(x) (((x) << BTCOEX_INT_STAT_I2C_RECV_OVERFLOW_LSB) & BTCOEX_INT_STAT_I2C_RECV_OVERFLOW_MASK)
+#define BTCOEX_INT_STAT_I2C_TX_FAILED_MSB 10
+#define BTCOEX_INT_STAT_I2C_TX_FAILED_LSB 10
+#define BTCOEX_INT_STAT_I2C_TX_FAILED_MASK 0x00000400
+#define BTCOEX_INT_STAT_I2C_TX_FAILED_GET(x) (((x) & BTCOEX_INT_STAT_I2C_TX_FAILED_MASK) >> BTCOEX_INT_STAT_I2C_TX_FAILED_LSB)
+#define BTCOEX_INT_STAT_I2C_TX_FAILED_SET(x) (((x) << BTCOEX_INT_STAT_I2C_TX_FAILED_LSB) & BTCOEX_INT_STAT_I2C_TX_FAILED_MASK)
+#define BTCOEX_INT_STAT_I2C_MESG_SENT_MSB 9
+#define BTCOEX_INT_STAT_I2C_MESG_SENT_LSB 9
+#define BTCOEX_INT_STAT_I2C_MESG_SENT_MASK 0x00000200
+#define BTCOEX_INT_STAT_I2C_MESG_SENT_GET(x) (((x) & BTCOEX_INT_STAT_I2C_MESG_SENT_MASK) >> BTCOEX_INT_STAT_I2C_MESG_SENT_LSB)
+#define BTCOEX_INT_STAT_I2C_MESG_SENT_SET(x) (((x) << BTCOEX_INT_STAT_I2C_MESG_SENT_LSB) & BTCOEX_INT_STAT_I2C_MESG_SENT_MASK)
+#define BTCOEX_INT_STAT_I2C_MESG_RECV_MSB 8
+#define BTCOEX_INT_STAT_I2C_MESG_RECV_LSB 8
+#define BTCOEX_INT_STAT_I2C_MESG_RECV_MASK 0x00000100
+#define BTCOEX_INT_STAT_I2C_MESG_RECV_GET(x) (((x) & BTCOEX_INT_STAT_I2C_MESG_RECV_MASK) >> BTCOEX_INT_STAT_I2C_MESG_RECV_LSB)
+#define BTCOEX_INT_STAT_I2C_MESG_RECV_SET(x) (((x) << BTCOEX_INT_STAT_I2C_MESG_RECV_LSB) & BTCOEX_INT_STAT_I2C_MESG_RECV_MASK)
+#define BTCOEX_INT_STAT_WB_TIMER_MSB 7
+#define BTCOEX_INT_STAT_WB_TIMER_LSB 7
+#define BTCOEX_INT_STAT_WB_TIMER_MASK 0x00000080
+#define BTCOEX_INT_STAT_WB_TIMER_GET(x) (((x) & BTCOEX_INT_STAT_WB_TIMER_MASK) >> BTCOEX_INT_STAT_WB_TIMER_LSB)
+#define BTCOEX_INT_STAT_WB_TIMER_SET(x) (((x) << BTCOEX_INT_STAT_WB_TIMER_LSB) & BTCOEX_INT_STAT_WB_TIMER_MASK)
+#define BTCOEX_INT_STAT_BTPRIORITY_STOMP_MSB 6
+#define BTCOEX_INT_STAT_BTPRIORITY_STOMP_LSB 6
+#define BTCOEX_INT_STAT_BTPRIORITY_STOMP_MASK 0x00000040
+#define BTCOEX_INT_STAT_BTPRIORITY_STOMP_GET(x) (((x) & BTCOEX_INT_STAT_BTPRIORITY_STOMP_MASK) >> BTCOEX_INT_STAT_BTPRIORITY_STOMP_LSB)
+#define BTCOEX_INT_STAT_BTPRIORITY_STOMP_SET(x) (((x) << BTCOEX_INT_STAT_BTPRIORITY_STOMP_LSB) & BTCOEX_INT_STAT_BTPRIORITY_STOMP_MASK)
+#define BTCOEX_INT_STAT_BTPRIORITY_MSB 5
+#define BTCOEX_INT_STAT_BTPRIORITY_LSB 5
+#define BTCOEX_INT_STAT_BTPRIORITY_MASK 0x00000020
+#define BTCOEX_INT_STAT_BTPRIORITY_GET(x) (((x) & BTCOEX_INT_STAT_BTPRIORITY_MASK) >> BTCOEX_INT_STAT_BTPRIORITY_LSB)
+#define BTCOEX_INT_STAT_BTPRIORITY_SET(x) (((x) << BTCOEX_INT_STAT_BTPRIORITY_LSB) & BTCOEX_INT_STAT_BTPRIORITY_MASK)
+#define BTCOEX_INT_STAT_NOSYNC_MSB 4
+#define BTCOEX_INT_STAT_NOSYNC_LSB 4
+#define BTCOEX_INT_STAT_NOSYNC_MASK 0x00000010
+#define BTCOEX_INT_STAT_NOSYNC_GET(x) (((x) & BTCOEX_INT_STAT_NOSYNC_MASK) >> BTCOEX_INT_STAT_NOSYNC_LSB)
+#define BTCOEX_INT_STAT_NOSYNC_SET(x) (((x) << BTCOEX_INT_STAT_NOSYNC_LSB) & BTCOEX_INT_STAT_NOSYNC_MASK)
+#define BTCOEX_INT_STAT_SYNC_MSB 3
+#define BTCOEX_INT_STAT_SYNC_LSB 3
+#define BTCOEX_INT_STAT_SYNC_MASK 0x00000008
+#define BTCOEX_INT_STAT_SYNC_GET(x) (((x) & BTCOEX_INT_STAT_SYNC_MASK) >> BTCOEX_INT_STAT_SYNC_LSB)
+#define BTCOEX_INT_STAT_SYNC_SET(x) (((x) << BTCOEX_INT_STAT_SYNC_LSB) & BTCOEX_INT_STAT_SYNC_MASK)
+#define BTCOEX_INT_STAT_END_MSB 2
+#define BTCOEX_INT_STAT_END_LSB 2
+#define BTCOEX_INT_STAT_END_MASK 0x00000004
+#define BTCOEX_INT_STAT_END_GET(x) (((x) & BTCOEX_INT_STAT_END_MASK) >> BTCOEX_INT_STAT_END_LSB)
+#define BTCOEX_INT_STAT_END_SET(x) (((x) << BTCOEX_INT_STAT_END_LSB) & BTCOEX_INT_STAT_END_MASK)
+#define BTCOEX_INT_STAT_FRAME_CNT_MSB 1
+#define BTCOEX_INT_STAT_FRAME_CNT_LSB 1
+#define BTCOEX_INT_STAT_FRAME_CNT_MASK 0x00000002
+#define BTCOEX_INT_STAT_FRAME_CNT_GET(x) (((x) & BTCOEX_INT_STAT_FRAME_CNT_MASK) >> BTCOEX_INT_STAT_FRAME_CNT_LSB)
+#define BTCOEX_INT_STAT_FRAME_CNT_SET(x) (((x) << BTCOEX_INT_STAT_FRAME_CNT_LSB) & BTCOEX_INT_STAT_FRAME_CNT_MASK)
+#define BTCOEX_INT_STAT_CLK_CNT_MSB 0
+#define BTCOEX_INT_STAT_CLK_CNT_LSB 0
+#define BTCOEX_INT_STAT_CLK_CNT_MASK 0x00000001
+#define BTCOEX_INT_STAT_CLK_CNT_GET(x) (((x) & BTCOEX_INT_STAT_CLK_CNT_MASK) >> BTCOEX_INT_STAT_CLK_CNT_LSB)
+#define BTCOEX_INT_STAT_CLK_CNT_SET(x) (((x) << BTCOEX_INT_STAT_CLK_CNT_LSB) & BTCOEX_INT_STAT_CLK_CNT_MASK)
+
+#define BTPRIORITY_INT_EN_ADDRESS 0x0000026c
+#define BTPRIORITY_INT_EN_OFFSET 0x0000026c
+#define BTPRIORITY_INT_EN_BITMAP_MSB 31
+#define BTPRIORITY_INT_EN_BITMAP_LSB 0
+#define BTPRIORITY_INT_EN_BITMAP_MASK 0xffffffff
+#define BTPRIORITY_INT_EN_BITMAP_GET(x) (((x) & BTPRIORITY_INT_EN_BITMAP_MASK) >> BTPRIORITY_INT_EN_BITMAP_LSB)
+#define BTPRIORITY_INT_EN_BITMAP_SET(x) (((x) << BTPRIORITY_INT_EN_BITMAP_LSB) & BTPRIORITY_INT_EN_BITMAP_MASK)
+
+#define BTPRIORITY_INT_STAT_ADDRESS 0x00000270
+#define BTPRIORITY_INT_STAT_OFFSET 0x00000270
+#define BTPRIORITY_INT_STAT_BITMAP_MSB 31
+#define BTPRIORITY_INT_STAT_BITMAP_LSB 0
+#define BTPRIORITY_INT_STAT_BITMAP_MASK 0xffffffff
+#define BTPRIORITY_INT_STAT_BITMAP_GET(x) (((x) & BTPRIORITY_INT_STAT_BITMAP_MASK) >> BTPRIORITY_INT_STAT_BITMAP_LSB)
+#define BTPRIORITY_INT_STAT_BITMAP_SET(x) (((x) << BTPRIORITY_INT_STAT_BITMAP_LSB) & BTPRIORITY_INT_STAT_BITMAP_MASK)
+
+#define BTPRIORITY_STOMP_INT_EN_ADDRESS 0x00000274
+#define BTPRIORITY_STOMP_INT_EN_OFFSET 0x00000274
+#define BTPRIORITY_STOMP_INT_EN_BITMAP_MSB 31
+#define BTPRIORITY_STOMP_INT_EN_BITMAP_LSB 0
+#define BTPRIORITY_STOMP_INT_EN_BITMAP_MASK 0xffffffff
+#define BTPRIORITY_STOMP_INT_EN_BITMAP_GET(x) (((x) & BTPRIORITY_STOMP_INT_EN_BITMAP_MASK) >> BTPRIORITY_STOMP_INT_EN_BITMAP_LSB)
+#define BTPRIORITY_STOMP_INT_EN_BITMAP_SET(x) (((x) << BTPRIORITY_STOMP_INT_EN_BITMAP_LSB) & BTPRIORITY_STOMP_INT_EN_BITMAP_MASK)
+
+#define BTPRIORITY_STOMP_INT_STAT_ADDRESS 0x00000278
+#define BTPRIORITY_STOMP_INT_STAT_OFFSET 0x00000278
+#define BTPRIORITY_STOMP_INT_STAT_BITMAP_MSB 31
+#define BTPRIORITY_STOMP_INT_STAT_BITMAP_LSB 0
+#define BTPRIORITY_STOMP_INT_STAT_BITMAP_MASK 0xffffffff
+#define BTPRIORITY_STOMP_INT_STAT_BITMAP_GET(x) (((x) & BTPRIORITY_STOMP_INT_STAT_BITMAP_MASK) >> BTPRIORITY_STOMP_INT_STAT_BITMAP_LSB)
+#define BTPRIORITY_STOMP_INT_STAT_BITMAP_SET(x) (((x) << BTPRIORITY_STOMP_INT_STAT_BITMAP_LSB) & BTPRIORITY_STOMP_INT_STAT_BITMAP_MASK)
+
+#define MAC_PCU_BMISS_TIMEOUT_ADDRESS 0x0000027c
+#define MAC_PCU_BMISS_TIMEOUT_OFFSET 0x0000027c
+#define MAC_PCU_BMISS_TIMEOUT_ENABLE_MSB 24
+#define MAC_PCU_BMISS_TIMEOUT_ENABLE_LSB 24
+#define MAC_PCU_BMISS_TIMEOUT_ENABLE_MASK 0x01000000
+#define MAC_PCU_BMISS_TIMEOUT_ENABLE_GET(x) (((x) & MAC_PCU_BMISS_TIMEOUT_ENABLE_MASK) >> MAC_PCU_BMISS_TIMEOUT_ENABLE_LSB)
+#define MAC_PCU_BMISS_TIMEOUT_ENABLE_SET(x) (((x) << MAC_PCU_BMISS_TIMEOUT_ENABLE_LSB) & MAC_PCU_BMISS_TIMEOUT_ENABLE_MASK)
+#define MAC_PCU_BMISS_TIMEOUT_VALUE_MSB 23
+#define MAC_PCU_BMISS_TIMEOUT_VALUE_LSB 0
+#define MAC_PCU_BMISS_TIMEOUT_VALUE_MASK 0x00ffffff
+#define MAC_PCU_BMISS_TIMEOUT_VALUE_GET(x) (((x) & MAC_PCU_BMISS_TIMEOUT_VALUE_MASK) >> MAC_PCU_BMISS_TIMEOUT_VALUE_LSB)
+#define MAC_PCU_BMISS_TIMEOUT_VALUE_SET(x) (((x) << MAC_PCU_BMISS_TIMEOUT_VALUE_LSB) & MAC_PCU_BMISS_TIMEOUT_VALUE_MASK)
+
+#define MAC_PCU_CAB_AWAKE_ADDRESS 0x00000280
+#define MAC_PCU_CAB_AWAKE_OFFSET 0x00000280
+#define MAC_PCU_CAB_AWAKE_ENABLE_MSB 16
+#define MAC_PCU_CAB_AWAKE_ENABLE_LSB 16
+#define MAC_PCU_CAB_AWAKE_ENABLE_MASK 0x00010000
+#define MAC_PCU_CAB_AWAKE_ENABLE_GET(x) (((x) & MAC_PCU_CAB_AWAKE_ENABLE_MASK) >> MAC_PCU_CAB_AWAKE_ENABLE_LSB)
+#define MAC_PCU_CAB_AWAKE_ENABLE_SET(x) (((x) << MAC_PCU_CAB_AWAKE_ENABLE_LSB) & MAC_PCU_CAB_AWAKE_ENABLE_MASK)
+#define MAC_PCU_CAB_AWAKE_DURATION_MSB 15
+#define MAC_PCU_CAB_AWAKE_DURATION_LSB 0
+#define MAC_PCU_CAB_AWAKE_DURATION_MASK 0x0000ffff
+#define MAC_PCU_CAB_AWAKE_DURATION_GET(x) (((x) & MAC_PCU_CAB_AWAKE_DURATION_MASK) >> MAC_PCU_CAB_AWAKE_DURATION_LSB)
+#define MAC_PCU_CAB_AWAKE_DURATION_SET(x) (((x) << MAC_PCU_CAB_AWAKE_DURATION_LSB) & MAC_PCU_CAB_AWAKE_DURATION_MASK)
+
+#define LP_PERF_COUNTER_ADDRESS 0x00000284
+#define LP_PERF_COUNTER_OFFSET 0x00000284
+#define LP_PERF_COUNTER_EN_MSB 0
+#define LP_PERF_COUNTER_EN_LSB 0
+#define LP_PERF_COUNTER_EN_MASK 0x00000001
+#define LP_PERF_COUNTER_EN_GET(x) (((x) & LP_PERF_COUNTER_EN_MASK) >> LP_PERF_COUNTER_EN_LSB)
+#define LP_PERF_COUNTER_EN_SET(x) (((x) << LP_PERF_COUNTER_EN_LSB) & LP_PERF_COUNTER_EN_MASK)
+
+#define LP_PERF_LIGHT_SLEEP_ADDRESS 0x00000288
+#define LP_PERF_LIGHT_SLEEP_OFFSET 0x00000288
+#define LP_PERF_LIGHT_SLEEP_CNT_MSB 31
+#define LP_PERF_LIGHT_SLEEP_CNT_LSB 0
+#define LP_PERF_LIGHT_SLEEP_CNT_MASK 0xffffffff
+#define LP_PERF_LIGHT_SLEEP_CNT_GET(x) (((x) & LP_PERF_LIGHT_SLEEP_CNT_MASK) >> LP_PERF_LIGHT_SLEEP_CNT_LSB)
+#define LP_PERF_LIGHT_SLEEP_CNT_SET(x) (((x) << LP_PERF_LIGHT_SLEEP_CNT_LSB) & LP_PERF_LIGHT_SLEEP_CNT_MASK)
+
+#define LP_PERF_DEEP_SLEEP_ADDRESS 0x0000028c
+#define LP_PERF_DEEP_SLEEP_OFFSET 0x0000028c
+#define LP_PERF_DEEP_SLEEP_CNT_MSB 31
+#define LP_PERF_DEEP_SLEEP_CNT_LSB 0
+#define LP_PERF_DEEP_SLEEP_CNT_MASK 0xffffffff
+#define LP_PERF_DEEP_SLEEP_CNT_GET(x) (((x) & LP_PERF_DEEP_SLEEP_CNT_MASK) >> LP_PERF_DEEP_SLEEP_CNT_LSB)
+#define LP_PERF_DEEP_SLEEP_CNT_SET(x) (((x) << LP_PERF_DEEP_SLEEP_CNT_LSB) & LP_PERF_DEEP_SLEEP_CNT_MASK)
+
+#define LP_PERF_ON_ADDRESS 0x00000290
+#define LP_PERF_ON_OFFSET 0x00000290
+#define LP_PERF_ON_CNT_MSB 31
+#define LP_PERF_ON_CNT_LSB 0
+#define LP_PERF_ON_CNT_MASK 0xffffffff
+#define LP_PERF_ON_CNT_GET(x) (((x) & LP_PERF_ON_CNT_MASK) >> LP_PERF_ON_CNT_LSB)
+#define LP_PERF_ON_CNT_SET(x) (((x) << LP_PERF_ON_CNT_LSB) & LP_PERF_ON_CNT_MASK)
+
+#define ST_64_BIT_ADDRESS 0x00000294
+#define ST_64_BIT_OFFSET 0x00000294
+#define ST_64_BIT_TIMEOUT_MSB 26
+#define ST_64_BIT_TIMEOUT_LSB 9
+#define ST_64_BIT_TIMEOUT_MASK 0x07fffe00
+#define ST_64_BIT_TIMEOUT_GET(x) (((x) & ST_64_BIT_TIMEOUT_MASK) >> ST_64_BIT_TIMEOUT_LSB)
+#define ST_64_BIT_TIMEOUT_SET(x) (((x) << ST_64_BIT_TIMEOUT_LSB) & ST_64_BIT_TIMEOUT_MASK)
+#define ST_64_BIT_REQ_ACK_NOT_PULLED_DOWN_MSB 8
+#define ST_64_BIT_REQ_ACK_NOT_PULLED_DOWN_LSB 8
+#define ST_64_BIT_REQ_ACK_NOT_PULLED_DOWN_MASK 0x00000100
+#define ST_64_BIT_REQ_ACK_NOT_PULLED_DOWN_GET(x) (((x) & ST_64_BIT_REQ_ACK_NOT_PULLED_DOWN_MASK) >> ST_64_BIT_REQ_ACK_NOT_PULLED_DOWN_LSB)
+#define ST_64_BIT_REQ_ACK_NOT_PULLED_DOWN_SET(x) (((x) << ST_64_BIT_REQ_ACK_NOT_PULLED_DOWN_LSB) & ST_64_BIT_REQ_ACK_NOT_PULLED_DOWN_MASK)
+#define ST_64_BIT_DRIVE_MODE_MSB 7
+#define ST_64_BIT_DRIVE_MODE_LSB 7
+#define ST_64_BIT_DRIVE_MODE_MASK 0x00000080
+#define ST_64_BIT_DRIVE_MODE_GET(x) (((x) & ST_64_BIT_DRIVE_MODE_MASK) >> ST_64_BIT_DRIVE_MODE_LSB)
+#define ST_64_BIT_DRIVE_MODE_SET(x) (((x) << ST_64_BIT_DRIVE_MODE_LSB) & ST_64_BIT_DRIVE_MODE_MASK)
+#define ST_64_BIT_CLOCK_GATE_MSB 6
+#define ST_64_BIT_CLOCK_GATE_LSB 6
+#define ST_64_BIT_CLOCK_GATE_MASK 0x00000040
+#define ST_64_BIT_CLOCK_GATE_GET(x) (((x) & ST_64_BIT_CLOCK_GATE_MASK) >> ST_64_BIT_CLOCK_GATE_LSB)
+#define ST_64_BIT_CLOCK_GATE_SET(x) (((x) << ST_64_BIT_CLOCK_GATE_LSB) & ST_64_BIT_CLOCK_GATE_MASK)
+#define ST_64_BIT_SOC_CLK_DIVIDE_RATIO_MSB 5
+#define ST_64_BIT_SOC_CLK_DIVIDE_RATIO_LSB 1
+#define ST_64_BIT_SOC_CLK_DIVIDE_RATIO_MASK 0x0000003e
+#define ST_64_BIT_SOC_CLK_DIVIDE_RATIO_GET(x) (((x) & ST_64_BIT_SOC_CLK_DIVIDE_RATIO_MASK) >> ST_64_BIT_SOC_CLK_DIVIDE_RATIO_LSB)
+#define ST_64_BIT_SOC_CLK_DIVIDE_RATIO_SET(x) (((x) << ST_64_BIT_SOC_CLK_DIVIDE_RATIO_LSB) & ST_64_BIT_SOC_CLK_DIVIDE_RATIO_MASK)
+#define ST_64_BIT_MODE_MSB 0
+#define ST_64_BIT_MODE_LSB 0
+#define ST_64_BIT_MODE_MASK 0x00000001
+#define ST_64_BIT_MODE_GET(x) (((x) & ST_64_BIT_MODE_MASK) >> ST_64_BIT_MODE_LSB)
+#define ST_64_BIT_MODE_SET(x) (((x) << ST_64_BIT_MODE_LSB) & ST_64_BIT_MODE_MASK)
+
+#define MESSAGE_WR_ADDRESS 0x00000298
+#define MESSAGE_WR_OFFSET 0x00000298
+#define MESSAGE_WR_TYPE_MSB 31
+#define MESSAGE_WR_TYPE_LSB 0
+#define MESSAGE_WR_TYPE_MASK 0xffffffff
+#define MESSAGE_WR_TYPE_GET(x) (((x) & MESSAGE_WR_TYPE_MASK) >> MESSAGE_WR_TYPE_LSB)
+#define MESSAGE_WR_TYPE_SET(x) (((x) << MESSAGE_WR_TYPE_LSB) & MESSAGE_WR_TYPE_MASK)
+
+#define MESSAGE_WR_P_ADDRESS 0x0000029c
+#define MESSAGE_WR_P_OFFSET 0x0000029c
+#define MESSAGE_WR_P_PARAMETER_MSB 31
+#define MESSAGE_WR_P_PARAMETER_LSB 0
+#define MESSAGE_WR_P_PARAMETER_MASK 0xffffffff
+#define MESSAGE_WR_P_PARAMETER_GET(x) (((x) & MESSAGE_WR_P_PARAMETER_MASK) >> MESSAGE_WR_P_PARAMETER_LSB)
+#define MESSAGE_WR_P_PARAMETER_SET(x) (((x) << MESSAGE_WR_P_PARAMETER_LSB) & MESSAGE_WR_P_PARAMETER_MASK)
+
+#define MESSAGE_RD_ADDRESS 0x000002a0
+#define MESSAGE_RD_OFFSET 0x000002a0
+#define MESSAGE_RD_TYPE_MSB 31
+#define MESSAGE_RD_TYPE_LSB 0
+#define MESSAGE_RD_TYPE_MASK 0xffffffff
+#define MESSAGE_RD_TYPE_GET(x) (((x) & MESSAGE_RD_TYPE_MASK) >> MESSAGE_RD_TYPE_LSB)
+#define MESSAGE_RD_TYPE_SET(x) (((x) << MESSAGE_RD_TYPE_LSB) & MESSAGE_RD_TYPE_MASK)
+
+#define MESSAGE_RD_P_ADDRESS 0x000002a4
+#define MESSAGE_RD_P_OFFSET 0x000002a4
+#define MESSAGE_RD_P_PARAMETER_MSB 31
+#define MESSAGE_RD_P_PARAMETER_LSB 0
+#define MESSAGE_RD_P_PARAMETER_MASK 0xffffffff
+#define MESSAGE_RD_P_PARAMETER_GET(x) (((x) & MESSAGE_RD_P_PARAMETER_MASK) >> MESSAGE_RD_P_PARAMETER_LSB)
+#define MESSAGE_RD_P_PARAMETER_SET(x) (((x) << MESSAGE_RD_P_PARAMETER_LSB) & MESSAGE_RD_P_PARAMETER_MASK)
+
+#define CHIP_MODE_ADDRESS 0x000002a8
+#define CHIP_MODE_OFFSET 0x000002a8
+#define CHIP_MODE_BIT_MSB 1
+#define CHIP_MODE_BIT_LSB 0
+#define CHIP_MODE_BIT_MASK 0x00000003
+#define CHIP_MODE_BIT_GET(x) (((x) & CHIP_MODE_BIT_MASK) >> CHIP_MODE_BIT_LSB)
+#define CHIP_MODE_BIT_SET(x) (((x) << CHIP_MODE_BIT_LSB) & CHIP_MODE_BIT_MASK)
+
+#define CLK_REQ_FALL_EDGE_ADDRESS 0x000002ac
+#define CLK_REQ_FALL_EDGE_OFFSET 0x000002ac
+#define CLK_REQ_FALL_EDGE_EN_MSB 31
+#define CLK_REQ_FALL_EDGE_EN_LSB 31
+#define CLK_REQ_FALL_EDGE_EN_MASK 0x80000000
+#define CLK_REQ_FALL_EDGE_EN_GET(x) (((x) & CLK_REQ_FALL_EDGE_EN_MASK) >> CLK_REQ_FALL_EDGE_EN_LSB)
+#define CLK_REQ_FALL_EDGE_EN_SET(x) (((x) << CLK_REQ_FALL_EDGE_EN_LSB) & CLK_REQ_FALL_EDGE_EN_MASK)
+#define CLK_REQ_FALL_EDGE_DELAY_MSB 7
+#define CLK_REQ_FALL_EDGE_DELAY_LSB 0
+#define CLK_REQ_FALL_EDGE_DELAY_MASK 0x000000ff
+#define CLK_REQ_FALL_EDGE_DELAY_GET(x) (((x) & CLK_REQ_FALL_EDGE_DELAY_MASK) >> CLK_REQ_FALL_EDGE_DELAY_LSB)
+#define CLK_REQ_FALL_EDGE_DELAY_SET(x) (((x) << CLK_REQ_FALL_EDGE_DELAY_LSB) & CLK_REQ_FALL_EDGE_DELAY_MASK)
+
+#define OTP_ADDRESS 0x000002b0
+#define OTP_OFFSET 0x000002b0
+#define OTP_LDO25_EN_MSB 1
+#define OTP_LDO25_EN_LSB 1
+#define OTP_LDO25_EN_MASK 0x00000002
+#define OTP_LDO25_EN_GET(x) (((x) & OTP_LDO25_EN_MASK) >> OTP_LDO25_EN_LSB)
+#define OTP_LDO25_EN_SET(x) (((x) << OTP_LDO25_EN_LSB) & OTP_LDO25_EN_MASK)
+#define OTP_VDD12_EN_MSB 0
+#define OTP_VDD12_EN_LSB 0
+#define OTP_VDD12_EN_MASK 0x00000001
+#define OTP_VDD12_EN_GET(x) (((x) & OTP_VDD12_EN_MASK) >> OTP_VDD12_EN_LSB)
+#define OTP_VDD12_EN_SET(x) (((x) << OTP_VDD12_EN_LSB) & OTP_VDD12_EN_MASK)
+
+#define OTP_STATUS_ADDRESS 0x000002b4
+#define OTP_STATUS_OFFSET 0x000002b4
+#define OTP_STATUS_LDO25_EN_READY_MSB 1
+#define OTP_STATUS_LDO25_EN_READY_LSB 1
+#define OTP_STATUS_LDO25_EN_READY_MASK 0x00000002
+#define OTP_STATUS_LDO25_EN_READY_GET(x) (((x) & OTP_STATUS_LDO25_EN_READY_MASK) >> OTP_STATUS_LDO25_EN_READY_LSB)
+#define OTP_STATUS_LDO25_EN_READY_SET(x) (((x) << OTP_STATUS_LDO25_EN_READY_LSB) & OTP_STATUS_LDO25_EN_READY_MASK)
+#define OTP_STATUS_VDD12_EN_READY_MSB 0
+#define OTP_STATUS_VDD12_EN_READY_LSB 0
+#define OTP_STATUS_VDD12_EN_READY_MASK 0x00000001
+#define OTP_STATUS_VDD12_EN_READY_GET(x) (((x) & OTP_STATUS_VDD12_EN_READY_MASK) >> OTP_STATUS_VDD12_EN_READY_LSB)
+#define OTP_STATUS_VDD12_EN_READY_SET(x) (((x) << OTP_STATUS_VDD12_EN_READY_LSB) & OTP_STATUS_VDD12_EN_READY_MASK)
+
+#define PMU_ADDRESS 0x000002b8
+#define PMU_OFFSET 0x000002b8
+#define PMU_REG_WAKEUP_TIME_SEL_MSB 1
+#define PMU_REG_WAKEUP_TIME_SEL_LSB 0
+#define PMU_REG_WAKEUP_TIME_SEL_MASK 0x00000003
+#define PMU_REG_WAKEUP_TIME_SEL_GET(x) (((x) & PMU_REG_WAKEUP_TIME_SEL_MASK) >> PMU_REG_WAKEUP_TIME_SEL_LSB)
+#define PMU_REG_WAKEUP_TIME_SEL_SET(x) (((x) << PMU_REG_WAKEUP_TIME_SEL_LSB) & PMU_REG_WAKEUP_TIME_SEL_MASK)
+
+#define PMU_CONFIG_ADDRESS 0x000002c0
+#define PMU_CONFIG_OFFSET 0x000002c0
+#define PMU_CONFIG_VALUE_MSB 15
+#define PMU_CONFIG_VALUE_LSB 0
+#define PMU_CONFIG_VALUE_MASK 0x0000ffff
+#define PMU_CONFIG_VALUE_GET(x) (((x) & PMU_CONFIG_VALUE_MASK) >> PMU_CONFIG_VALUE_LSB)
+#define PMU_CONFIG_VALUE_SET(x) (((x) << PMU_CONFIG_VALUE_LSB) & PMU_CONFIG_VALUE_MASK)
+
+#define PMU_BYPASS_ADDRESS 0x000002c8
+#define PMU_BYPASS_OFFSET 0x000002c8
+#define PMU_BYPASS_SWREG_MSB 2
+#define PMU_BYPASS_SWREG_LSB 2
+#define PMU_BYPASS_SWREG_MASK 0x00000004
+#define PMU_BYPASS_SWREG_GET(x) (((x) & PMU_BYPASS_SWREG_MASK) >> PMU_BYPASS_SWREG_LSB)
+#define PMU_BYPASS_SWREG_SET(x) (((x) << PMU_BYPASS_SWREG_LSB) & PMU_BYPASS_SWREG_MASK)
+#define PMU_BYPASS_DREG_MSB 1
+#define PMU_BYPASS_DREG_LSB 1
+#define PMU_BYPASS_DREG_MASK 0x00000002
+#define PMU_BYPASS_DREG_GET(x) (((x) & PMU_BYPASS_DREG_MASK) >> PMU_BYPASS_DREG_LSB)
+#define PMU_BYPASS_DREG_SET(x) (((x) << PMU_BYPASS_DREG_LSB) & PMU_BYPASS_DREG_MASK)
+#define PMU_BYPASS_PAREG_MSB 0
+#define PMU_BYPASS_PAREG_LSB 0
+#define PMU_BYPASS_PAREG_MASK 0x00000001
+#define PMU_BYPASS_PAREG_GET(x) (((x) & PMU_BYPASS_PAREG_MASK) >> PMU_BYPASS_PAREG_LSB)
+#define PMU_BYPASS_PAREG_SET(x) (((x) << PMU_BYPASS_PAREG_LSB) & PMU_BYPASS_PAREG_MASK)
+
+#define MAC_PCU_TSF2_L32_ADDRESS 0x000002cc
+#define MAC_PCU_TSF2_L32_OFFSET 0x000002cc
+#define MAC_PCU_TSF2_L32_VALUE_MSB 31
+#define MAC_PCU_TSF2_L32_VALUE_LSB 0
+#define MAC_PCU_TSF2_L32_VALUE_MASK 0xffffffff
+#define MAC_PCU_TSF2_L32_VALUE_GET(x) (((x) & MAC_PCU_TSF2_L32_VALUE_MASK) >> MAC_PCU_TSF2_L32_VALUE_LSB)
+#define MAC_PCU_TSF2_L32_VALUE_SET(x) (((x) << MAC_PCU_TSF2_L32_VALUE_LSB) & MAC_PCU_TSF2_L32_VALUE_MASK)
+
+#define MAC_PCU_TSF2_U32_ADDRESS 0x000002d0
+#define MAC_PCU_TSF2_U32_OFFSET 0x000002d0
+#define MAC_PCU_TSF2_U32_VALUE_MSB 31
+#define MAC_PCU_TSF2_U32_VALUE_LSB 0
+#define MAC_PCU_TSF2_U32_VALUE_MASK 0xffffffff
+#define MAC_PCU_TSF2_U32_VALUE_GET(x) (((x) & MAC_PCU_TSF2_U32_VALUE_MASK) >> MAC_PCU_TSF2_U32_VALUE_LSB)
+#define MAC_PCU_TSF2_U32_VALUE_SET(x) (((x) << MAC_PCU_TSF2_U32_VALUE_LSB) & MAC_PCU_TSF2_U32_VALUE_MASK)
+
+#define MAC_PCU_GENERIC_TIMERS_MODE3_ADDRESS 0x000002d4
+#define MAC_PCU_GENERIC_TIMERS_MODE3_OFFSET 0x000002d4
+#define MAC_PCU_GENERIC_TIMERS_MODE3_OVERFLOW_INDEX_MSB 27
+#define MAC_PCU_GENERIC_TIMERS_MODE3_OVERFLOW_INDEX_LSB 24
+#define MAC_PCU_GENERIC_TIMERS_MODE3_OVERFLOW_INDEX_MASK 0x0f000000
+#define MAC_PCU_GENERIC_TIMERS_MODE3_OVERFLOW_INDEX_GET(x) (((x) & MAC_PCU_GENERIC_TIMERS_MODE3_OVERFLOW_INDEX_MASK) >> MAC_PCU_GENERIC_TIMERS_MODE3_OVERFLOW_INDEX_LSB)
+#define MAC_PCU_GENERIC_TIMERS_MODE3_OVERFLOW_INDEX_SET(x) (((x) << MAC_PCU_GENERIC_TIMERS_MODE3_OVERFLOW_INDEX_LSB) & MAC_PCU_GENERIC_TIMERS_MODE3_OVERFLOW_INDEX_MASK)
+#define MAC_PCU_GENERIC_TIMERS_MODE3_THRESH_MSB 19
+#define MAC_PCU_GENERIC_TIMERS_MODE3_THRESH_LSB 0
+#define MAC_PCU_GENERIC_TIMERS_MODE3_THRESH_MASK 0x000fffff
+#define MAC_PCU_GENERIC_TIMERS_MODE3_THRESH_GET(x) (((x) & MAC_PCU_GENERIC_TIMERS_MODE3_THRESH_MASK) >> MAC_PCU_GENERIC_TIMERS_MODE3_THRESH_LSB)
+#define MAC_PCU_GENERIC_TIMERS_MODE3_THRESH_SET(x) (((x) << MAC_PCU_GENERIC_TIMERS_MODE3_THRESH_LSB) & MAC_PCU_GENERIC_TIMERS_MODE3_THRESH_MASK)
+
+#define MAC_PCU_DIRECT_CONNECT_ADDRESS 0x000002d8
+#define MAC_PCU_DIRECT_CONNECT_OFFSET 0x000002d8
+#define MAC_PCU_DIRECT_CONNECT_STA_TSF_1_2_SEL_MSB 2
+#define MAC_PCU_DIRECT_CONNECT_STA_TSF_1_2_SEL_LSB 2
+#define MAC_PCU_DIRECT_CONNECT_STA_TSF_1_2_SEL_MASK 0x00000004
+#define MAC_PCU_DIRECT_CONNECT_STA_TSF_1_2_SEL_GET(x) (((x) & MAC_PCU_DIRECT_CONNECT_STA_TSF_1_2_SEL_MASK) >> MAC_PCU_DIRECT_CONNECT_STA_TSF_1_2_SEL_LSB)
+#define MAC_PCU_DIRECT_CONNECT_STA_TSF_1_2_SEL_SET(x) (((x) << MAC_PCU_DIRECT_CONNECT_STA_TSF_1_2_SEL_LSB) & MAC_PCU_DIRECT_CONNECT_STA_TSF_1_2_SEL_MASK)
+#define MAC_PCU_DIRECT_CONNECT_AP_TSF_1_2_SEL_MSB 1
+#define MAC_PCU_DIRECT_CONNECT_AP_TSF_1_2_SEL_LSB 1
+#define MAC_PCU_DIRECT_CONNECT_AP_TSF_1_2_SEL_MASK 0x00000002
+#define MAC_PCU_DIRECT_CONNECT_AP_TSF_1_2_SEL_GET(x) (((x) & MAC_PCU_DIRECT_CONNECT_AP_TSF_1_2_SEL_MASK) >> MAC_PCU_DIRECT_CONNECT_AP_TSF_1_2_SEL_LSB)
+#define MAC_PCU_DIRECT_CONNECT_AP_TSF_1_2_SEL_SET(x) (((x) << MAC_PCU_DIRECT_CONNECT_AP_TSF_1_2_SEL_LSB) & MAC_PCU_DIRECT_CONNECT_AP_TSF_1_2_SEL_MASK)
+#define MAC_PCU_DIRECT_CONNECT_AP_STA_ENABLE_MSB 0
+#define MAC_PCU_DIRECT_CONNECT_AP_STA_ENABLE_LSB 0
+#define MAC_PCU_DIRECT_CONNECT_AP_STA_ENABLE_MASK 0x00000001
+#define MAC_PCU_DIRECT_CONNECT_AP_STA_ENABLE_GET(x) (((x) & MAC_PCU_DIRECT_CONNECT_AP_STA_ENABLE_MASK) >> MAC_PCU_DIRECT_CONNECT_AP_STA_ENABLE_LSB)
+#define MAC_PCU_DIRECT_CONNECT_AP_STA_ENABLE_SET(x) (((x) << MAC_PCU_DIRECT_CONNECT_AP_STA_ENABLE_LSB) & MAC_PCU_DIRECT_CONNECT_AP_STA_ENABLE_MASK)
+
+#define THERM_CTRL1_ADDRESS 0x000002dc
+#define THERM_CTRL1_OFFSET 0x000002dc
+#define THERM_CTRL1_BYPASS_MSB 16
+#define THERM_CTRL1_BYPASS_LSB 16
+#define THERM_CTRL1_BYPASS_MASK 0x00010000
+#define THERM_CTRL1_BYPASS_GET(x) (((x) & THERM_CTRL1_BYPASS_MASK) >> THERM_CTRL1_BYPASS_LSB)
+#define THERM_CTRL1_BYPASS_SET(x) (((x) << THERM_CTRL1_BYPASS_LSB) & THERM_CTRL1_BYPASS_MASK)
+#define THERM_CTRL1_WIDTH_ARBITOR_MSB 15
+#define THERM_CTRL1_WIDTH_ARBITOR_LSB 12
+#define THERM_CTRL1_WIDTH_ARBITOR_MASK 0x0000f000
+#define THERM_CTRL1_WIDTH_ARBITOR_GET(x) (((x) & THERM_CTRL1_WIDTH_ARBITOR_MASK) >> THERM_CTRL1_WIDTH_ARBITOR_LSB)
+#define THERM_CTRL1_WIDTH_ARBITOR_SET(x) (((x) << THERM_CTRL1_WIDTH_ARBITOR_LSB) & THERM_CTRL1_WIDTH_ARBITOR_MASK)
+#define THERM_CTRL1_WIDTH_MSB 11
+#define THERM_CTRL1_WIDTH_LSB 5
+#define THERM_CTRL1_WIDTH_MASK 0x00000fe0
+#define THERM_CTRL1_WIDTH_GET(x) (((x) & THERM_CTRL1_WIDTH_MASK) >> THERM_CTRL1_WIDTH_LSB)
+#define THERM_CTRL1_WIDTH_SET(x) (((x) << THERM_CTRL1_WIDTH_LSB) & THERM_CTRL1_WIDTH_MASK)
+#define THERM_CTRL1_TYPE_MSB 4
+#define THERM_CTRL1_TYPE_LSB 3
+#define THERM_CTRL1_TYPE_MASK 0x00000018
+#define THERM_CTRL1_TYPE_GET(x) (((x) & THERM_CTRL1_TYPE_MASK) >> THERM_CTRL1_TYPE_LSB)
+#define THERM_CTRL1_TYPE_SET(x) (((x) << THERM_CTRL1_TYPE_LSB) & THERM_CTRL1_TYPE_MASK)
+#define THERM_CTRL1_MEASURE_MSB 2
+#define THERM_CTRL1_MEASURE_LSB 2
+#define THERM_CTRL1_MEASURE_MASK 0x00000004
+#define THERM_CTRL1_MEASURE_GET(x) (((x) & THERM_CTRL1_MEASURE_MASK) >> THERM_CTRL1_MEASURE_LSB)
+#define THERM_CTRL1_MEASURE_SET(x) (((x) << THERM_CTRL1_MEASURE_LSB) & THERM_CTRL1_MEASURE_MASK)
+#define THERM_CTRL1_INT_EN_MSB 1
+#define THERM_CTRL1_INT_EN_LSB 1
+#define THERM_CTRL1_INT_EN_MASK 0x00000002
+#define THERM_CTRL1_INT_EN_GET(x) (((x) & THERM_CTRL1_INT_EN_MASK) >> THERM_CTRL1_INT_EN_LSB)
+#define THERM_CTRL1_INT_EN_SET(x) (((x) << THERM_CTRL1_INT_EN_LSB) & THERM_CTRL1_INT_EN_MASK)
+#define THERM_CTRL1_INT_STATUS_MSB 0
+#define THERM_CTRL1_INT_STATUS_LSB 0
+#define THERM_CTRL1_INT_STATUS_MASK 0x00000001
+#define THERM_CTRL1_INT_STATUS_GET(x) (((x) & THERM_CTRL1_INT_STATUS_MASK) >> THERM_CTRL1_INT_STATUS_LSB)
+#define THERM_CTRL1_INT_STATUS_SET(x) (((x) << THERM_CTRL1_INT_STATUS_LSB) & THERM_CTRL1_INT_STATUS_MASK)
+
+#define THERM_CTRL2_ADDRESS 0x000002e0
+#define THERM_CTRL2_OFFSET 0x000002e0
+#define THERM_CTRL2_ADC_OFF_MSB 25
+#define THERM_CTRL2_ADC_OFF_LSB 25
+#define THERM_CTRL2_ADC_OFF_MASK 0x02000000
+#define THERM_CTRL2_ADC_OFF_GET(x) (((x) & THERM_CTRL2_ADC_OFF_MASK) >> THERM_CTRL2_ADC_OFF_LSB)
+#define THERM_CTRL2_ADC_OFF_SET(x) (((x) << THERM_CTRL2_ADC_OFF_LSB) & THERM_CTRL2_ADC_OFF_MASK)
+#define THERM_CTRL2_ADC_ON_MSB 24
+#define THERM_CTRL2_ADC_ON_LSB 24
+#define THERM_CTRL2_ADC_ON_MASK 0x01000000
+#define THERM_CTRL2_ADC_ON_GET(x) (((x) & THERM_CTRL2_ADC_ON_MASK) >> THERM_CTRL2_ADC_ON_LSB)
+#define THERM_CTRL2_ADC_ON_SET(x) (((x) << THERM_CTRL2_ADC_ON_LSB) & THERM_CTRL2_ADC_ON_MASK)
+#define THERM_CTRL2_SAMPLE_MSB 23
+#define THERM_CTRL2_SAMPLE_LSB 16
+#define THERM_CTRL2_SAMPLE_MASK 0x00ff0000
+#define THERM_CTRL2_SAMPLE_GET(x) (((x) & THERM_CTRL2_SAMPLE_MASK) >> THERM_CTRL2_SAMPLE_LSB)
+#define THERM_CTRL2_SAMPLE_SET(x) (((x) << THERM_CTRL2_SAMPLE_LSB) & THERM_CTRL2_SAMPLE_MASK)
+#define THERM_CTRL2_HIGH_MSB 15
+#define THERM_CTRL2_HIGH_LSB 8
+#define THERM_CTRL2_HIGH_MASK 0x0000ff00
+#define THERM_CTRL2_HIGH_GET(x) (((x) & THERM_CTRL2_HIGH_MASK) >> THERM_CTRL2_HIGH_LSB)
+#define THERM_CTRL2_HIGH_SET(x) (((x) << THERM_CTRL2_HIGH_LSB) & THERM_CTRL2_HIGH_MASK)
+#define THERM_CTRL2_LOW_MSB 7
+#define THERM_CTRL2_LOW_LSB 0
+#define THERM_CTRL2_LOW_MASK 0x000000ff
+#define THERM_CTRL2_LOW_GET(x) (((x) & THERM_CTRL2_LOW_MASK) >> THERM_CTRL2_LOW_LSB)
+#define THERM_CTRL2_LOW_SET(x) (((x) << THERM_CTRL2_LOW_LSB) & THERM_CTRL2_LOW_MASK)
+
+#define THERM_CTRL3_ADDRESS 0x000002e4
+#define THERM_CTRL3_OFFSET 0x000002e4
+#define THERM_CTRL3_ADC_GAIN_MSB 16
+#define THERM_CTRL3_ADC_GAIN_LSB 8
+#define THERM_CTRL3_ADC_GAIN_MASK 0x0001ff00
+#define THERM_CTRL3_ADC_GAIN_GET(x) (((x) & THERM_CTRL3_ADC_GAIN_MASK) >> THERM_CTRL3_ADC_GAIN_LSB)
+#define THERM_CTRL3_ADC_GAIN_SET(x) (((x) << THERM_CTRL3_ADC_GAIN_LSB) & THERM_CTRL3_ADC_GAIN_MASK)
+#define THERM_CTRL3_ADC_OFFSET_MSB 7
+#define THERM_CTRL3_ADC_OFFSET_LSB 0
+#define THERM_CTRL3_ADC_OFFSET_MASK 0x000000ff
+#define THERM_CTRL3_ADC_OFFSET_GET(x) (((x) & THERM_CTRL3_ADC_OFFSET_MASK) >> THERM_CTRL3_ADC_OFFSET_LSB)
+#define THERM_CTRL3_ADC_OFFSET_SET(x) (((x) << THERM_CTRL3_ADC_OFFSET_LSB) & THERM_CTRL3_ADC_OFFSET_MASK)
+
+
+#ifndef __ASSEMBLER__
+
+typedef struct rtc_wlan_reg_reg_s {
+ volatile unsigned int wlan_reset_control;
+ volatile unsigned int wlan_xtal_control;
+ volatile unsigned int wlan_tcxo_detect;
+ volatile unsigned int wlan_xtal_test;
+ volatile unsigned int wlan_quadrature;
+ volatile unsigned int wlan_pll_control;
+ volatile unsigned int wlan_pll_settle;
+ volatile unsigned int wlan_xtal_settle;
+ volatile unsigned int wlan_cpu_clock;
+ volatile unsigned int wlan_clock_out;
+ volatile unsigned int wlan_clock_control;
+ volatile unsigned int wlan_bias_override;
+ volatile unsigned int wlan_wdt_control;
+ volatile unsigned int wlan_wdt_status;
+ volatile unsigned int wlan_wdt;
+ volatile unsigned int wlan_wdt_count;
+ volatile unsigned int wlan_wdt_reset;
+ volatile unsigned int wlan_int_status;
+ volatile unsigned int wlan_lf_timer0;
+ volatile unsigned int wlan_lf_timer_count0;
+ volatile unsigned int wlan_lf_timer_control0;
+ volatile unsigned int wlan_lf_timer_status0;
+ volatile unsigned int wlan_lf_timer1;
+ volatile unsigned int wlan_lf_timer_count1;
+ volatile unsigned int wlan_lf_timer_control1;
+ volatile unsigned int wlan_lf_timer_status1;
+ volatile unsigned int wlan_lf_timer2;
+ volatile unsigned int wlan_lf_timer_count2;
+ volatile unsigned int wlan_lf_timer_control2;
+ volatile unsigned int wlan_lf_timer_status2;
+ volatile unsigned int wlan_lf_timer3;
+ volatile unsigned int wlan_lf_timer_count3;
+ volatile unsigned int wlan_lf_timer_control3;
+ volatile unsigned int wlan_lf_timer_status3;
+ volatile unsigned int wlan_hf_timer;
+ volatile unsigned int wlan_hf_timer_count;
+ volatile unsigned int wlan_hf_lf_count;
+ volatile unsigned int wlan_hf_timer_control;
+ volatile unsigned int wlan_hf_timer_status;
+ volatile unsigned int wlan_rtc_control;
+ volatile unsigned int wlan_rtc_time;
+ volatile unsigned int wlan_rtc_date;
+ volatile unsigned int wlan_rtc_set_time;
+ volatile unsigned int wlan_rtc_set_date;
+ volatile unsigned int wlan_rtc_set_alarm;
+ volatile unsigned int wlan_rtc_config;
+ volatile unsigned int wlan_rtc_alarm_status;
+ volatile unsigned int wlan_uart_wakeup;
+ volatile unsigned int wlan_reset_cause;
+ volatile unsigned int wlan_system_sleep;
+ volatile unsigned int wlan_sdio_wrapper;
+ volatile unsigned int wlan_mac_sleep_control;
+ volatile unsigned int wlan_keep_awake;
+ volatile unsigned int wlan_lpo_cal_time;
+ volatile unsigned int wlan_lpo_init_dividend_int;
+ volatile unsigned int wlan_lpo_init_dividend_fraction;
+ volatile unsigned int wlan_lpo_cal;
+ volatile unsigned int wlan_lpo_cal_test_control;
+ volatile unsigned int wlan_lpo_cal_test_status;
+ volatile unsigned int wlan_chip_id;
+ volatile unsigned int wlan_derived_rtc_clk;
+ volatile unsigned int mac_pcu_slp32_mode;
+ volatile unsigned int mac_pcu_slp32_wake;
+ volatile unsigned int mac_pcu_slp32_inc;
+ volatile unsigned int mac_pcu_slp_mib1;
+ volatile unsigned int mac_pcu_slp_mib2;
+ volatile unsigned int mac_pcu_slp_mib3;
+ volatile unsigned int wlan_power_reg;
+ volatile unsigned int wlan_core_clk_ctrl;
+ volatile unsigned int wlan_gpio_wakeup_control;
+ volatile unsigned int ht;
+ volatile unsigned int mac_pcu_tsf_l32;
+ volatile unsigned int mac_pcu_tsf_u32;
+ volatile unsigned int mac_pcu_wbtimer;
+ unsigned char pad0[24]; /* pad to 0x140 */
+ volatile unsigned int mac_pcu_generic_timers[16];
+ volatile unsigned int mac_pcu_generic_timers_mode;
+ unsigned char pad1[60]; /* pad to 0x1c0 */
+ volatile unsigned int mac_pcu_generic_timers2[16];
+ volatile unsigned int mac_pcu_generic_timers_mode2;
+ volatile unsigned int mac_pcu_slp1;
+ volatile unsigned int mac_pcu_slp2;
+ volatile unsigned int mac_pcu_reset_tsf;
+ volatile unsigned int mac_pcu_tsf_add_pll;
+ volatile unsigned int sleep_retention;
+ volatile unsigned int btcoexctrl;
+ volatile unsigned int wbsync_priority1;
+ volatile unsigned int wbsync_priority2;
+ volatile unsigned int wbsync_priority3;
+ volatile unsigned int btcoex0;
+ volatile unsigned int btcoex1;
+ volatile unsigned int btcoex2;
+ volatile unsigned int btcoex3;
+ volatile unsigned int btcoex4;
+ volatile unsigned int btcoex5;
+ volatile unsigned int btcoex6;
+ volatile unsigned int lock;
+ volatile unsigned int nolock_priority;
+ volatile unsigned int wbsync;
+ volatile unsigned int wbsync1;
+ volatile unsigned int wbsync2;
+ volatile unsigned int wbsync3;
+ volatile unsigned int wb_timer_target;
+ volatile unsigned int wb_timer_slop;
+ volatile unsigned int btcoex_int_en;
+ volatile unsigned int btcoex_int_stat;
+ volatile unsigned int btpriority_int_en;
+ volatile unsigned int btpriority_int_stat;
+ volatile unsigned int btpriority_stomp_int_en;
+ volatile unsigned int btpriority_stomp_int_stat;
+ volatile unsigned int mac_pcu_bmiss_timeout;
+ volatile unsigned int mac_pcu_cab_awake;
+ volatile unsigned int lp_perf_counter;
+ volatile unsigned int lp_perf_light_sleep;
+ volatile unsigned int lp_perf_deep_sleep;
+ volatile unsigned int lp_perf_on;
+ volatile unsigned int st_64_bit;
+ volatile unsigned int message_wr;
+ volatile unsigned int message_wr_p;
+ volatile unsigned int message_rd;
+ volatile unsigned int message_rd_p;
+ volatile unsigned int chip_mode;
+ volatile unsigned int clk_req_fall_edge;
+ volatile unsigned int otp;
+ volatile unsigned int otp_status;
+ volatile unsigned int pmu;
+ unsigned char pad2[4]; /* pad to 0x2c0 */
+ volatile unsigned int pmu_config[2];
+ volatile unsigned int pmu_bypass;
+ volatile unsigned int mac_pcu_tsf2_l32;
+ volatile unsigned int mac_pcu_tsf2_u32;
+ volatile unsigned int mac_pcu_generic_timers_mode3;
+ volatile unsigned int mac_pcu_direct_connect;
+ volatile unsigned int therm_ctrl1;
+ volatile unsigned int therm_ctrl2;
+ volatile unsigned int therm_ctrl3;
+} rtc_wlan_reg_reg_t;
+
+#endif /* __ASSEMBLER__ */
+
+#endif /* _RTC_WLAN_REG_H_ */
diff --git a/drivers/staging/ath6kl/include/common/AR6002/hw4.0/hw/si_reg.h b/drivers/staging/ath6kl/include/common/AR6002/hw4.0/hw/si_reg.h
new file mode 100644
index 000000000000..2cd2e3cadbbc
--- /dev/null
+++ b/drivers/staging/ath6kl/include/common/AR6002/hw4.0/hw/si_reg.h
@@ -0,0 +1,209 @@
+// ------------------------------------------------------------------
+// Copyright (c) 2004-2010 Atheros Corporation. All rights reserved.
+//
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+//
+//
+// ------------------------------------------------------------------
+//===================================================================
+// Author(s): ="Atheros"
+//===================================================================
+
+
+#ifndef _SI_REG_REG_H_
+#define _SI_REG_REG_H_
+
+#define SI_CONFIG_ADDRESS 0x00000000
+#define SI_CONFIG_OFFSET 0x00000000
+#define SI_CONFIG_ERR_INT_MSB 19
+#define SI_CONFIG_ERR_INT_LSB 19
+#define SI_CONFIG_ERR_INT_MASK 0x00080000
+#define SI_CONFIG_ERR_INT_GET(x) (((x) & SI_CONFIG_ERR_INT_MASK) >> SI_CONFIG_ERR_INT_LSB)
+#define SI_CONFIG_ERR_INT_SET(x) (((x) << SI_CONFIG_ERR_INT_LSB) & SI_CONFIG_ERR_INT_MASK)
+#define SI_CONFIG_BIDIR_OD_DATA_MSB 18
+#define SI_CONFIG_BIDIR_OD_DATA_LSB 18
+#define SI_CONFIG_BIDIR_OD_DATA_MASK 0x00040000
+#define SI_CONFIG_BIDIR_OD_DATA_GET(x) (((x) & SI_CONFIG_BIDIR_OD_DATA_MASK) >> SI_CONFIG_BIDIR_OD_DATA_LSB)
+#define SI_CONFIG_BIDIR_OD_DATA_SET(x) (((x) << SI_CONFIG_BIDIR_OD_DATA_LSB) & SI_CONFIG_BIDIR_OD_DATA_MASK)
+#define SI_CONFIG_I2C_MSB 16
+#define SI_CONFIG_I2C_LSB 16
+#define SI_CONFIG_I2C_MASK 0x00010000
+#define SI_CONFIG_I2C_GET(x) (((x) & SI_CONFIG_I2C_MASK) >> SI_CONFIG_I2C_LSB)
+#define SI_CONFIG_I2C_SET(x) (((x) << SI_CONFIG_I2C_LSB) & SI_CONFIG_I2C_MASK)
+#define SI_CONFIG_POS_SAMPLE_MSB 7
+#define SI_CONFIG_POS_SAMPLE_LSB 7
+#define SI_CONFIG_POS_SAMPLE_MASK 0x00000080
+#define SI_CONFIG_POS_SAMPLE_GET(x) (((x) & SI_CONFIG_POS_SAMPLE_MASK) >> SI_CONFIG_POS_SAMPLE_LSB)
+#define SI_CONFIG_POS_SAMPLE_SET(x) (((x) << SI_CONFIG_POS_SAMPLE_LSB) & SI_CONFIG_POS_SAMPLE_MASK)
+#define SI_CONFIG_POS_DRIVE_MSB 6
+#define SI_CONFIG_POS_DRIVE_LSB 6
+#define SI_CONFIG_POS_DRIVE_MASK 0x00000040
+#define SI_CONFIG_POS_DRIVE_GET(x) (((x) & SI_CONFIG_POS_DRIVE_MASK) >> SI_CONFIG_POS_DRIVE_LSB)
+#define SI_CONFIG_POS_DRIVE_SET(x) (((x) << SI_CONFIG_POS_DRIVE_LSB) & SI_CONFIG_POS_DRIVE_MASK)
+#define SI_CONFIG_INACTIVE_DATA_MSB 5
+#define SI_CONFIG_INACTIVE_DATA_LSB 5
+#define SI_CONFIG_INACTIVE_DATA_MASK 0x00000020
+#define SI_CONFIG_INACTIVE_DATA_GET(x) (((x) & SI_CONFIG_INACTIVE_DATA_MASK) >> SI_CONFIG_INACTIVE_DATA_LSB)
+#define SI_CONFIG_INACTIVE_DATA_SET(x) (((x) << SI_CONFIG_INACTIVE_DATA_LSB) & SI_CONFIG_INACTIVE_DATA_MASK)
+#define SI_CONFIG_INACTIVE_CLK_MSB 4
+#define SI_CONFIG_INACTIVE_CLK_LSB 4
+#define SI_CONFIG_INACTIVE_CLK_MASK 0x00000010
+#define SI_CONFIG_INACTIVE_CLK_GET(x) (((x) & SI_CONFIG_INACTIVE_CLK_MASK) >> SI_CONFIG_INACTIVE_CLK_LSB)
+#define SI_CONFIG_INACTIVE_CLK_SET(x) (((x) << SI_CONFIG_INACTIVE_CLK_LSB) & SI_CONFIG_INACTIVE_CLK_MASK)
+#define SI_CONFIG_DIVIDER_MSB 3
+#define SI_CONFIG_DIVIDER_LSB 0
+#define SI_CONFIG_DIVIDER_MASK 0x0000000f
+#define SI_CONFIG_DIVIDER_GET(x) (((x) & SI_CONFIG_DIVIDER_MASK) >> SI_CONFIG_DIVIDER_LSB)
+#define SI_CONFIG_DIVIDER_SET(x) (((x) << SI_CONFIG_DIVIDER_LSB) & SI_CONFIG_DIVIDER_MASK)
+
+#define SI_CS_ADDRESS 0x00000004
+#define SI_CS_OFFSET 0x00000004
+#define SI_CS_BIT_CNT_IN_LAST_BYTE_MSB 13
+#define SI_CS_BIT_CNT_IN_LAST_BYTE_LSB 11
+#define SI_CS_BIT_CNT_IN_LAST_BYTE_MASK 0x00003800
+#define SI_CS_BIT_CNT_IN_LAST_BYTE_GET(x) (((x) & SI_CS_BIT_CNT_IN_LAST_BYTE_MASK) >> SI_CS_BIT_CNT_IN_LAST_BYTE_LSB)
+#define SI_CS_BIT_CNT_IN_LAST_BYTE_SET(x) (((x) << SI_CS_BIT_CNT_IN_LAST_BYTE_LSB) & SI_CS_BIT_CNT_IN_LAST_BYTE_MASK)
+#define SI_CS_DONE_ERR_MSB 10
+#define SI_CS_DONE_ERR_LSB 10
+#define SI_CS_DONE_ERR_MASK 0x00000400
+#define SI_CS_DONE_ERR_GET(x) (((x) & SI_CS_DONE_ERR_MASK) >> SI_CS_DONE_ERR_LSB)
+#define SI_CS_DONE_ERR_SET(x) (((x) << SI_CS_DONE_ERR_LSB) & SI_CS_DONE_ERR_MASK)
+#define SI_CS_DONE_INT_MSB 9
+#define SI_CS_DONE_INT_LSB 9
+#define SI_CS_DONE_INT_MASK 0x00000200
+#define SI_CS_DONE_INT_GET(x) (((x) & SI_CS_DONE_INT_MASK) >> SI_CS_DONE_INT_LSB)
+#define SI_CS_DONE_INT_SET(x) (((x) << SI_CS_DONE_INT_LSB) & SI_CS_DONE_INT_MASK)
+#define SI_CS_START_MSB 8
+#define SI_CS_START_LSB 8
+#define SI_CS_START_MASK 0x00000100
+#define SI_CS_START_GET(x) (((x) & SI_CS_START_MASK) >> SI_CS_START_LSB)
+#define SI_CS_START_SET(x) (((x) << SI_CS_START_LSB) & SI_CS_START_MASK)
+#define SI_CS_RX_CNT_MSB 7
+#define SI_CS_RX_CNT_LSB 4
+#define SI_CS_RX_CNT_MASK 0x000000f0
+#define SI_CS_RX_CNT_GET(x) (((x) & SI_CS_RX_CNT_MASK) >> SI_CS_RX_CNT_LSB)
+#define SI_CS_RX_CNT_SET(x) (((x) << SI_CS_RX_CNT_LSB) & SI_CS_RX_CNT_MASK)
+#define SI_CS_TX_CNT_MSB 3
+#define SI_CS_TX_CNT_LSB 0
+#define SI_CS_TX_CNT_MASK 0x0000000f
+#define SI_CS_TX_CNT_GET(x) (((x) & SI_CS_TX_CNT_MASK) >> SI_CS_TX_CNT_LSB)
+#define SI_CS_TX_CNT_SET(x) (((x) << SI_CS_TX_CNT_LSB) & SI_CS_TX_CNT_MASK)
+
+#define SI_TX_DATA0_ADDRESS 0x00000008
+#define SI_TX_DATA0_OFFSET 0x00000008
+#define SI_TX_DATA0_DATA3_MSB 31
+#define SI_TX_DATA0_DATA3_LSB 24
+#define SI_TX_DATA0_DATA3_MASK 0xff000000
+#define SI_TX_DATA0_DATA3_GET(x) (((x) & SI_TX_DATA0_DATA3_MASK) >> SI_TX_DATA0_DATA3_LSB)
+#define SI_TX_DATA0_DATA3_SET(x) (((x) << SI_TX_DATA0_DATA3_LSB) & SI_TX_DATA0_DATA3_MASK)
+#define SI_TX_DATA0_DATA2_MSB 23
+#define SI_TX_DATA0_DATA2_LSB 16
+#define SI_TX_DATA0_DATA2_MASK 0x00ff0000
+#define SI_TX_DATA0_DATA2_GET(x) (((x) & SI_TX_DATA0_DATA2_MASK) >> SI_TX_DATA0_DATA2_LSB)
+#define SI_TX_DATA0_DATA2_SET(x) (((x) << SI_TX_DATA0_DATA2_LSB) & SI_TX_DATA0_DATA2_MASK)
+#define SI_TX_DATA0_DATA1_MSB 15
+#define SI_TX_DATA0_DATA1_LSB 8
+#define SI_TX_DATA0_DATA1_MASK 0x0000ff00
+#define SI_TX_DATA0_DATA1_GET(x) (((x) & SI_TX_DATA0_DATA1_MASK) >> SI_TX_DATA0_DATA1_LSB)
+#define SI_TX_DATA0_DATA1_SET(x) (((x) << SI_TX_DATA0_DATA1_LSB) & SI_TX_DATA0_DATA1_MASK)
+#define SI_TX_DATA0_DATA0_MSB 7
+#define SI_TX_DATA0_DATA0_LSB 0
+#define SI_TX_DATA0_DATA0_MASK 0x000000ff
+#define SI_TX_DATA0_DATA0_GET(x) (((x) & SI_TX_DATA0_DATA0_MASK) >> SI_TX_DATA0_DATA0_LSB)
+#define SI_TX_DATA0_DATA0_SET(x) (((x) << SI_TX_DATA0_DATA0_LSB) & SI_TX_DATA0_DATA0_MASK)
+
+#define SI_TX_DATA1_ADDRESS 0x0000000c
+#define SI_TX_DATA1_OFFSET 0x0000000c
+#define SI_TX_DATA1_DATA7_MSB 31
+#define SI_TX_DATA1_DATA7_LSB 24
+#define SI_TX_DATA1_DATA7_MASK 0xff000000
+#define SI_TX_DATA1_DATA7_GET(x) (((x) & SI_TX_DATA1_DATA7_MASK) >> SI_TX_DATA1_DATA7_LSB)
+#define SI_TX_DATA1_DATA7_SET(x) (((x) << SI_TX_DATA1_DATA7_LSB) & SI_TX_DATA1_DATA7_MASK)
+#define SI_TX_DATA1_DATA6_MSB 23
+#define SI_TX_DATA1_DATA6_LSB 16
+#define SI_TX_DATA1_DATA6_MASK 0x00ff0000
+#define SI_TX_DATA1_DATA6_GET(x) (((x) & SI_TX_DATA1_DATA6_MASK) >> SI_TX_DATA1_DATA6_LSB)
+#define SI_TX_DATA1_DATA6_SET(x) (((x) << SI_TX_DATA1_DATA6_LSB) & SI_TX_DATA1_DATA6_MASK)
+#define SI_TX_DATA1_DATA5_MSB 15
+#define SI_TX_DATA1_DATA5_LSB 8
+#define SI_TX_DATA1_DATA5_MASK 0x0000ff00
+#define SI_TX_DATA1_DATA5_GET(x) (((x) & SI_TX_DATA1_DATA5_MASK) >> SI_TX_DATA1_DATA5_LSB)
+#define SI_TX_DATA1_DATA5_SET(x) (((x) << SI_TX_DATA1_DATA5_LSB) & SI_TX_DATA1_DATA5_MASK)
+#define SI_TX_DATA1_DATA4_MSB 7
+#define SI_TX_DATA1_DATA4_LSB 0
+#define SI_TX_DATA1_DATA4_MASK 0x000000ff
+#define SI_TX_DATA1_DATA4_GET(x) (((x) & SI_TX_DATA1_DATA4_MASK) >> SI_TX_DATA1_DATA4_LSB)
+#define SI_TX_DATA1_DATA4_SET(x) (((x) << SI_TX_DATA1_DATA4_LSB) & SI_TX_DATA1_DATA4_MASK)
+
+#define SI_RX_DATA0_ADDRESS 0x00000010
+#define SI_RX_DATA0_OFFSET 0x00000010
+#define SI_RX_DATA0_DATA3_MSB 31
+#define SI_RX_DATA0_DATA3_LSB 24
+#define SI_RX_DATA0_DATA3_MASK 0xff000000
+#define SI_RX_DATA0_DATA3_GET(x) (((x) & SI_RX_DATA0_DATA3_MASK) >> SI_RX_DATA0_DATA3_LSB)
+#define SI_RX_DATA0_DATA3_SET(x) (((x) << SI_RX_DATA0_DATA3_LSB) & SI_RX_DATA0_DATA3_MASK)
+#define SI_RX_DATA0_DATA2_MSB 23
+#define SI_RX_DATA0_DATA2_LSB 16
+#define SI_RX_DATA0_DATA2_MASK 0x00ff0000
+#define SI_RX_DATA0_DATA2_GET(x) (((x) & SI_RX_DATA0_DATA2_MASK) >> SI_RX_DATA0_DATA2_LSB)
+#define SI_RX_DATA0_DATA2_SET(x) (((x) << SI_RX_DATA0_DATA2_LSB) & SI_RX_DATA0_DATA2_MASK)
+#define SI_RX_DATA0_DATA1_MSB 15
+#define SI_RX_DATA0_DATA1_LSB 8
+#define SI_RX_DATA0_DATA1_MASK 0x0000ff00
+#define SI_RX_DATA0_DATA1_GET(x) (((x) & SI_RX_DATA0_DATA1_MASK) >> SI_RX_DATA0_DATA1_LSB)
+#define SI_RX_DATA0_DATA1_SET(x) (((x) << SI_RX_DATA0_DATA1_LSB) & SI_RX_DATA0_DATA1_MASK)
+#define SI_RX_DATA0_DATA0_MSB 7
+#define SI_RX_DATA0_DATA0_LSB 0
+#define SI_RX_DATA0_DATA0_MASK 0x000000ff
+#define SI_RX_DATA0_DATA0_GET(x) (((x) & SI_RX_DATA0_DATA0_MASK) >> SI_RX_DATA0_DATA0_LSB)
+#define SI_RX_DATA0_DATA0_SET(x) (((x) << SI_RX_DATA0_DATA0_LSB) & SI_RX_DATA0_DATA0_MASK)
+
+#define SI_RX_DATA1_ADDRESS 0x00000014
+#define SI_RX_DATA1_OFFSET 0x00000014
+#define SI_RX_DATA1_DATA7_MSB 31
+#define SI_RX_DATA1_DATA7_LSB 24
+#define SI_RX_DATA1_DATA7_MASK 0xff000000
+#define SI_RX_DATA1_DATA7_GET(x) (((x) & SI_RX_DATA1_DATA7_MASK) >> SI_RX_DATA1_DATA7_LSB)
+#define SI_RX_DATA1_DATA7_SET(x) (((x) << SI_RX_DATA1_DATA7_LSB) & SI_RX_DATA1_DATA7_MASK)
+#define SI_RX_DATA1_DATA6_MSB 23
+#define SI_RX_DATA1_DATA6_LSB 16
+#define SI_RX_DATA1_DATA6_MASK 0x00ff0000
+#define SI_RX_DATA1_DATA6_GET(x) (((x) & SI_RX_DATA1_DATA6_MASK) >> SI_RX_DATA1_DATA6_LSB)
+#define SI_RX_DATA1_DATA6_SET(x) (((x) << SI_RX_DATA1_DATA6_LSB) & SI_RX_DATA1_DATA6_MASK)
+#define SI_RX_DATA1_DATA5_MSB 15
+#define SI_RX_DATA1_DATA5_LSB 8
+#define SI_RX_DATA1_DATA5_MASK 0x0000ff00
+#define SI_RX_DATA1_DATA5_GET(x) (((x) & SI_RX_DATA1_DATA5_MASK) >> SI_RX_DATA1_DATA5_LSB)
+#define SI_RX_DATA1_DATA5_SET(x) (((x) << SI_RX_DATA1_DATA5_LSB) & SI_RX_DATA1_DATA5_MASK)
+#define SI_RX_DATA1_DATA4_MSB 7
+#define SI_RX_DATA1_DATA4_LSB 0
+#define SI_RX_DATA1_DATA4_MASK 0x000000ff
+#define SI_RX_DATA1_DATA4_GET(x) (((x) & SI_RX_DATA1_DATA4_MASK) >> SI_RX_DATA1_DATA4_LSB)
+#define SI_RX_DATA1_DATA4_SET(x) (((x) << SI_RX_DATA1_DATA4_LSB) & SI_RX_DATA1_DATA4_MASK)
+
+
+#ifndef __ASSEMBLER__
+
+typedef struct si_reg_reg_s {
+ volatile unsigned int si_config;
+ volatile unsigned int si_cs;
+ volatile unsigned int si_tx_data0;
+ volatile unsigned int si_tx_data1;
+ volatile unsigned int si_rx_data0;
+ volatile unsigned int si_rx_data1;
+} si_reg_reg_t;
+
+#endif /* __ASSEMBLER__ */
+
+#endif /* _SI_REG_H_ */
diff --git a/drivers/staging/ath6kl/include/common/AR6002/hw4.0/hw/uart_reg.h b/drivers/staging/ath6kl/include/common/AR6002/hw4.0/hw/uart_reg.h
new file mode 100644
index 000000000000..a8eccaf6d745
--- /dev/null
+++ b/drivers/staging/ath6kl/include/common/AR6002/hw4.0/hw/uart_reg.h
@@ -0,0 +1,260 @@
+// ------------------------------------------------------------------
+// Copyright (c) 2004-2010 Atheros Corporation. All rights reserved.
+//
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+//
+//
+// ------------------------------------------------------------------
+//===================================================================
+// Author(s): ="Atheros"
+//===================================================================
+
+
+#ifndef _UART_REG_REG_H_
+#define _UART_REG_REG_H_
+
+#define UART_DATA_ADDRESS 0x00000000
+#define UART_DATA_OFFSET 0x00000000
+#define UART_DATA_TX_CSR_MSB 9
+#define UART_DATA_TX_CSR_LSB 9
+#define UART_DATA_TX_CSR_MASK 0x00000200
+#define UART_DATA_TX_CSR_GET(x) (((x) & UART_DATA_TX_CSR_MASK) >> UART_DATA_TX_CSR_LSB)
+#define UART_DATA_TX_CSR_SET(x) (((x) << UART_DATA_TX_CSR_LSB) & UART_DATA_TX_CSR_MASK)
+#define UART_DATA_RX_CSR_MSB 8
+#define UART_DATA_RX_CSR_LSB 8
+#define UART_DATA_RX_CSR_MASK 0x00000100
+#define UART_DATA_RX_CSR_GET(x) (((x) & UART_DATA_RX_CSR_MASK) >> UART_DATA_RX_CSR_LSB)
+#define UART_DATA_RX_CSR_SET(x) (((x) << UART_DATA_RX_CSR_LSB) & UART_DATA_RX_CSR_MASK)
+#define UART_DATA_TXRX_DATA_MSB 7
+#define UART_DATA_TXRX_DATA_LSB 0
+#define UART_DATA_TXRX_DATA_MASK 0x000000ff
+#define UART_DATA_TXRX_DATA_GET(x) (((x) & UART_DATA_TXRX_DATA_MASK) >> UART_DATA_TXRX_DATA_LSB)
+#define UART_DATA_TXRX_DATA_SET(x) (((x) << UART_DATA_TXRX_DATA_LSB) & UART_DATA_TXRX_DATA_MASK)
+
+#define UART_CONTROL_ADDRESS 0x00000004
+#define UART_CONTROL_OFFSET 0x00000004
+#define UART_CONTROL_RX_BUSY_MSB 15
+#define UART_CONTROL_RX_BUSY_LSB 15
+#define UART_CONTROL_RX_BUSY_MASK 0x00008000
+#define UART_CONTROL_RX_BUSY_GET(x) (((x) & UART_CONTROL_RX_BUSY_MASK) >> UART_CONTROL_RX_BUSY_LSB)
+#define UART_CONTROL_RX_BUSY_SET(x) (((x) << UART_CONTROL_RX_BUSY_LSB) & UART_CONTROL_RX_BUSY_MASK)
+#define UART_CONTROL_TX_BUSY_MSB 14
+#define UART_CONTROL_TX_BUSY_LSB 14
+#define UART_CONTROL_TX_BUSY_MASK 0x00004000
+#define UART_CONTROL_TX_BUSY_GET(x) (((x) & UART_CONTROL_TX_BUSY_MASK) >> UART_CONTROL_TX_BUSY_LSB)
+#define UART_CONTROL_TX_BUSY_SET(x) (((x) << UART_CONTROL_TX_BUSY_LSB) & UART_CONTROL_TX_BUSY_MASK)
+#define UART_CONTROL_HOST_INT_ENABLE_MSB 13
+#define UART_CONTROL_HOST_INT_ENABLE_LSB 13
+#define UART_CONTROL_HOST_INT_ENABLE_MASK 0x00002000
+#define UART_CONTROL_HOST_INT_ENABLE_GET(x) (((x) & UART_CONTROL_HOST_INT_ENABLE_MASK) >> UART_CONTROL_HOST_INT_ENABLE_LSB)
+#define UART_CONTROL_HOST_INT_ENABLE_SET(x) (((x) << UART_CONTROL_HOST_INT_ENABLE_LSB) & UART_CONTROL_HOST_INT_ENABLE_MASK)
+#define UART_CONTROL_HOST_INT_MSB 12
+#define UART_CONTROL_HOST_INT_LSB 12
+#define UART_CONTROL_HOST_INT_MASK 0x00001000
+#define UART_CONTROL_HOST_INT_GET(x) (((x) & UART_CONTROL_HOST_INT_MASK) >> UART_CONTROL_HOST_INT_LSB)
+#define UART_CONTROL_HOST_INT_SET(x) (((x) << UART_CONTROL_HOST_INT_LSB) & UART_CONTROL_HOST_INT_MASK)
+#define UART_CONTROL_TX_BREAK_MSB 11
+#define UART_CONTROL_TX_BREAK_LSB 11
+#define UART_CONTROL_TX_BREAK_MASK 0x00000800
+#define UART_CONTROL_TX_BREAK_GET(x) (((x) & UART_CONTROL_TX_BREAK_MASK) >> UART_CONTROL_TX_BREAK_LSB)
+#define UART_CONTROL_TX_BREAK_SET(x) (((x) << UART_CONTROL_TX_BREAK_LSB) & UART_CONTROL_TX_BREAK_MASK)
+#define UART_CONTROL_RX_BREAK_MSB 10
+#define UART_CONTROL_RX_BREAK_LSB 10
+#define UART_CONTROL_RX_BREAK_MASK 0x00000400
+#define UART_CONTROL_RX_BREAK_GET(x) (((x) & UART_CONTROL_RX_BREAK_MASK) >> UART_CONTROL_RX_BREAK_LSB)
+#define UART_CONTROL_RX_BREAK_SET(x) (((x) << UART_CONTROL_RX_BREAK_LSB) & UART_CONTROL_RX_BREAK_MASK)
+#define UART_CONTROL_SERIAL_TX_READY_MSB 9
+#define UART_CONTROL_SERIAL_TX_READY_LSB 9
+#define UART_CONTROL_SERIAL_TX_READY_MASK 0x00000200
+#define UART_CONTROL_SERIAL_TX_READY_GET(x) (((x) & UART_CONTROL_SERIAL_TX_READY_MASK) >> UART_CONTROL_SERIAL_TX_READY_LSB)
+#define UART_CONTROL_SERIAL_TX_READY_SET(x) (((x) << UART_CONTROL_SERIAL_TX_READY_LSB) & UART_CONTROL_SERIAL_TX_READY_MASK)
+#define UART_CONTROL_TX_READY_ORIDE_MSB 8
+#define UART_CONTROL_TX_READY_ORIDE_LSB 8
+#define UART_CONTROL_TX_READY_ORIDE_MASK 0x00000100
+#define UART_CONTROL_TX_READY_ORIDE_GET(x) (((x) & UART_CONTROL_TX_READY_ORIDE_MASK) >> UART_CONTROL_TX_READY_ORIDE_LSB)
+#define UART_CONTROL_TX_READY_ORIDE_SET(x) (((x) << UART_CONTROL_TX_READY_ORIDE_LSB) & UART_CONTROL_TX_READY_ORIDE_MASK)
+#define UART_CONTROL_RX_READY_ORIDE_MSB 7
+#define UART_CONTROL_RX_READY_ORIDE_LSB 7
+#define UART_CONTROL_RX_READY_ORIDE_MASK 0x00000080
+#define UART_CONTROL_RX_READY_ORIDE_GET(x) (((x) & UART_CONTROL_RX_READY_ORIDE_MASK) >> UART_CONTROL_RX_READY_ORIDE_LSB)
+#define UART_CONTROL_RX_READY_ORIDE_SET(x) (((x) << UART_CONTROL_RX_READY_ORIDE_LSB) & UART_CONTROL_RX_READY_ORIDE_MASK)
+#define UART_CONTROL_DMA_ENABLE_MSB 6
+#define UART_CONTROL_DMA_ENABLE_LSB 6
+#define UART_CONTROL_DMA_ENABLE_MASK 0x00000040
+#define UART_CONTROL_DMA_ENABLE_GET(x) (((x) & UART_CONTROL_DMA_ENABLE_MASK) >> UART_CONTROL_DMA_ENABLE_LSB)
+#define UART_CONTROL_DMA_ENABLE_SET(x) (((x) << UART_CONTROL_DMA_ENABLE_LSB) & UART_CONTROL_DMA_ENABLE_MASK)
+#define UART_CONTROL_FLOW_ENABLE_MSB 5
+#define UART_CONTROL_FLOW_ENABLE_LSB 5
+#define UART_CONTROL_FLOW_ENABLE_MASK 0x00000020
+#define UART_CONTROL_FLOW_ENABLE_GET(x) (((x) & UART_CONTROL_FLOW_ENABLE_MASK) >> UART_CONTROL_FLOW_ENABLE_LSB)
+#define UART_CONTROL_FLOW_ENABLE_SET(x) (((x) << UART_CONTROL_FLOW_ENABLE_LSB) & UART_CONTROL_FLOW_ENABLE_MASK)
+#define UART_CONTROL_FLOW_INVERT_MSB 4
+#define UART_CONTROL_FLOW_INVERT_LSB 4
+#define UART_CONTROL_FLOW_INVERT_MASK 0x00000010
+#define UART_CONTROL_FLOW_INVERT_GET(x) (((x) & UART_CONTROL_FLOW_INVERT_MASK) >> UART_CONTROL_FLOW_INVERT_LSB)
+#define UART_CONTROL_FLOW_INVERT_SET(x) (((x) << UART_CONTROL_FLOW_INVERT_LSB) & UART_CONTROL_FLOW_INVERT_MASK)
+#define UART_CONTROL_IFC_ENABLE_MSB 3
+#define UART_CONTROL_IFC_ENABLE_LSB 3
+#define UART_CONTROL_IFC_ENABLE_MASK 0x00000008
+#define UART_CONTROL_IFC_ENABLE_GET(x) (((x) & UART_CONTROL_IFC_ENABLE_MASK) >> UART_CONTROL_IFC_ENABLE_LSB)
+#define UART_CONTROL_IFC_ENABLE_SET(x) (((x) << UART_CONTROL_IFC_ENABLE_LSB) & UART_CONTROL_IFC_ENABLE_MASK)
+#define UART_CONTROL_IFC_DCE_MSB 2
+#define UART_CONTROL_IFC_DCE_LSB 2
+#define UART_CONTROL_IFC_DCE_MASK 0x00000004
+#define UART_CONTROL_IFC_DCE_GET(x) (((x) & UART_CONTROL_IFC_DCE_MASK) >> UART_CONTROL_IFC_DCE_LSB)
+#define UART_CONTROL_IFC_DCE_SET(x) (((x) << UART_CONTROL_IFC_DCE_LSB) & UART_CONTROL_IFC_DCE_MASK)
+#define UART_CONTROL_PARITY_ENABLE_MSB 1
+#define UART_CONTROL_PARITY_ENABLE_LSB 1
+#define UART_CONTROL_PARITY_ENABLE_MASK 0x00000002
+#define UART_CONTROL_PARITY_ENABLE_GET(x) (((x) & UART_CONTROL_PARITY_ENABLE_MASK) >> UART_CONTROL_PARITY_ENABLE_LSB)
+#define UART_CONTROL_PARITY_ENABLE_SET(x) (((x) << UART_CONTROL_PARITY_ENABLE_LSB) & UART_CONTROL_PARITY_ENABLE_MASK)
+#define UART_CONTROL_PARITY_EVEN_MSB 0
+#define UART_CONTROL_PARITY_EVEN_LSB 0
+#define UART_CONTROL_PARITY_EVEN_MASK 0x00000001
+#define UART_CONTROL_PARITY_EVEN_GET(x) (((x) & UART_CONTROL_PARITY_EVEN_MASK) >> UART_CONTROL_PARITY_EVEN_LSB)
+#define UART_CONTROL_PARITY_EVEN_SET(x) (((x) << UART_CONTROL_PARITY_EVEN_LSB) & UART_CONTROL_PARITY_EVEN_MASK)
+
+#define UART_CLKDIV_ADDRESS 0x00000008
+#define UART_CLKDIV_OFFSET 0x00000008
+#define UART_CLKDIV_CLK_SCALE_MSB 23
+#define UART_CLKDIV_CLK_SCALE_LSB 16
+#define UART_CLKDIV_CLK_SCALE_MASK 0x00ff0000
+#define UART_CLKDIV_CLK_SCALE_GET(x) (((x) & UART_CLKDIV_CLK_SCALE_MASK) >> UART_CLKDIV_CLK_SCALE_LSB)
+#define UART_CLKDIV_CLK_SCALE_SET(x) (((x) << UART_CLKDIV_CLK_SCALE_LSB) & UART_CLKDIV_CLK_SCALE_MASK)
+#define UART_CLKDIV_CLK_STEP_MSB 15
+#define UART_CLKDIV_CLK_STEP_LSB 0
+#define UART_CLKDIV_CLK_STEP_MASK 0x0000ffff
+#define UART_CLKDIV_CLK_STEP_GET(x) (((x) & UART_CLKDIV_CLK_STEP_MASK) >> UART_CLKDIV_CLK_STEP_LSB)
+#define UART_CLKDIV_CLK_STEP_SET(x) (((x) << UART_CLKDIV_CLK_STEP_LSB) & UART_CLKDIV_CLK_STEP_MASK)
+
+#define UART_INT_ADDRESS 0x0000000c
+#define UART_INT_OFFSET 0x0000000c
+#define UART_INT_TX_EMPTY_INT_MSB 9
+#define UART_INT_TX_EMPTY_INT_LSB 9
+#define UART_INT_TX_EMPTY_INT_MASK 0x00000200
+#define UART_INT_TX_EMPTY_INT_GET(x) (((x) & UART_INT_TX_EMPTY_INT_MASK) >> UART_INT_TX_EMPTY_INT_LSB)
+#define UART_INT_TX_EMPTY_INT_SET(x) (((x) << UART_INT_TX_EMPTY_INT_LSB) & UART_INT_TX_EMPTY_INT_MASK)
+#define UART_INT_RX_FULL_INT_MSB 8
+#define UART_INT_RX_FULL_INT_LSB 8
+#define UART_INT_RX_FULL_INT_MASK 0x00000100
+#define UART_INT_RX_FULL_INT_GET(x) (((x) & UART_INT_RX_FULL_INT_MASK) >> UART_INT_RX_FULL_INT_LSB)
+#define UART_INT_RX_FULL_INT_SET(x) (((x) << UART_INT_RX_FULL_INT_LSB) & UART_INT_RX_FULL_INT_MASK)
+#define UART_INT_RX_BREAK_OFF_INT_MSB 7
+#define UART_INT_RX_BREAK_OFF_INT_LSB 7
+#define UART_INT_RX_BREAK_OFF_INT_MASK 0x00000080
+#define UART_INT_RX_BREAK_OFF_INT_GET(x) (((x) & UART_INT_RX_BREAK_OFF_INT_MASK) >> UART_INT_RX_BREAK_OFF_INT_LSB)
+#define UART_INT_RX_BREAK_OFF_INT_SET(x) (((x) << UART_INT_RX_BREAK_OFF_INT_LSB) & UART_INT_RX_BREAK_OFF_INT_MASK)
+#define UART_INT_RX_BREAK_ON_INT_MSB 6
+#define UART_INT_RX_BREAK_ON_INT_LSB 6
+#define UART_INT_RX_BREAK_ON_INT_MASK 0x00000040
+#define UART_INT_RX_BREAK_ON_INT_GET(x) (((x) & UART_INT_RX_BREAK_ON_INT_MASK) >> UART_INT_RX_BREAK_ON_INT_LSB)
+#define UART_INT_RX_BREAK_ON_INT_SET(x) (((x) << UART_INT_RX_BREAK_ON_INT_LSB) & UART_INT_RX_BREAK_ON_INT_MASK)
+#define UART_INT_RX_PARITY_ERR_INT_MSB 5
+#define UART_INT_RX_PARITY_ERR_INT_LSB 5
+#define UART_INT_RX_PARITY_ERR_INT_MASK 0x00000020
+#define UART_INT_RX_PARITY_ERR_INT_GET(x) (((x) & UART_INT_RX_PARITY_ERR_INT_MASK) >> UART_INT_RX_PARITY_ERR_INT_LSB)
+#define UART_INT_RX_PARITY_ERR_INT_SET(x) (((x) << UART_INT_RX_PARITY_ERR_INT_LSB) & UART_INT_RX_PARITY_ERR_INT_MASK)
+#define UART_INT_TX_OFLOW_ERR_INT_MSB 4
+#define UART_INT_TX_OFLOW_ERR_INT_LSB 4
+#define UART_INT_TX_OFLOW_ERR_INT_MASK 0x00000010
+#define UART_INT_TX_OFLOW_ERR_INT_GET(x) (((x) & UART_INT_TX_OFLOW_ERR_INT_MASK) >> UART_INT_TX_OFLOW_ERR_INT_LSB)
+#define UART_INT_TX_OFLOW_ERR_INT_SET(x) (((x) << UART_INT_TX_OFLOW_ERR_INT_LSB) & UART_INT_TX_OFLOW_ERR_INT_MASK)
+#define UART_INT_RX_OFLOW_ERR_INT_MSB 3
+#define UART_INT_RX_OFLOW_ERR_INT_LSB 3
+#define UART_INT_RX_OFLOW_ERR_INT_MASK 0x00000008
+#define UART_INT_RX_OFLOW_ERR_INT_GET(x) (((x) & UART_INT_RX_OFLOW_ERR_INT_MASK) >> UART_INT_RX_OFLOW_ERR_INT_LSB)
+#define UART_INT_RX_OFLOW_ERR_INT_SET(x) (((x) << UART_INT_RX_OFLOW_ERR_INT_LSB) & UART_INT_RX_OFLOW_ERR_INT_MASK)
+#define UART_INT_RX_FRAMING_ERR_INT_MSB 2
+#define UART_INT_RX_FRAMING_ERR_INT_LSB 2
+#define UART_INT_RX_FRAMING_ERR_INT_MASK 0x00000004
+#define UART_INT_RX_FRAMING_ERR_INT_GET(x) (((x) & UART_INT_RX_FRAMING_ERR_INT_MASK) >> UART_INT_RX_FRAMING_ERR_INT_LSB)
+#define UART_INT_RX_FRAMING_ERR_INT_SET(x) (((x) << UART_INT_RX_FRAMING_ERR_INT_LSB) & UART_INT_RX_FRAMING_ERR_INT_MASK)
+#define UART_INT_TX_READY_INT_MSB 1
+#define UART_INT_TX_READY_INT_LSB 1
+#define UART_INT_TX_READY_INT_MASK 0x00000002
+#define UART_INT_TX_READY_INT_GET(x) (((x) & UART_INT_TX_READY_INT_MASK) >> UART_INT_TX_READY_INT_LSB)
+#define UART_INT_TX_READY_INT_SET(x) (((x) << UART_INT_TX_READY_INT_LSB) & UART_INT_TX_READY_INT_MASK)
+#define UART_INT_RX_VALID_INT_MSB 0
+#define UART_INT_RX_VALID_INT_LSB 0
+#define UART_INT_RX_VALID_INT_MASK 0x00000001
+#define UART_INT_RX_VALID_INT_GET(x) (((x) & UART_INT_RX_VALID_INT_MASK) >> UART_INT_RX_VALID_INT_LSB)
+#define UART_INT_RX_VALID_INT_SET(x) (((x) << UART_INT_RX_VALID_INT_LSB) & UART_INT_RX_VALID_INT_MASK)
+
+#define UART_INT_EN_ADDRESS 0x00000010
+#define UART_INT_EN_OFFSET 0x00000010
+#define UART_INT_EN_TX_EMPTY_INT_EN_MSB 9
+#define UART_INT_EN_TX_EMPTY_INT_EN_LSB 9
+#define UART_INT_EN_TX_EMPTY_INT_EN_MASK 0x00000200
+#define UART_INT_EN_TX_EMPTY_INT_EN_GET(x) (((x) & UART_INT_EN_TX_EMPTY_INT_EN_MASK) >> UART_INT_EN_TX_EMPTY_INT_EN_LSB)
+#define UART_INT_EN_TX_EMPTY_INT_EN_SET(x) (((x) << UART_INT_EN_TX_EMPTY_INT_EN_LSB) & UART_INT_EN_TX_EMPTY_INT_EN_MASK)
+#define UART_INT_EN_RX_FULL_INT_EN_MSB 8
+#define UART_INT_EN_RX_FULL_INT_EN_LSB 8
+#define UART_INT_EN_RX_FULL_INT_EN_MASK 0x00000100
+#define UART_INT_EN_RX_FULL_INT_EN_GET(x) (((x) & UART_INT_EN_RX_FULL_INT_EN_MASK) >> UART_INT_EN_RX_FULL_INT_EN_LSB)
+#define UART_INT_EN_RX_FULL_INT_EN_SET(x) (((x) << UART_INT_EN_RX_FULL_INT_EN_LSB) & UART_INT_EN_RX_FULL_INT_EN_MASK)
+#define UART_INT_EN_RX_BREAK_OFF_INT_EN_MSB 7
+#define UART_INT_EN_RX_BREAK_OFF_INT_EN_LSB 7
+#define UART_INT_EN_RX_BREAK_OFF_INT_EN_MASK 0x00000080
+#define UART_INT_EN_RX_BREAK_OFF_INT_EN_GET(x) (((x) & UART_INT_EN_RX_BREAK_OFF_INT_EN_MASK) >> UART_INT_EN_RX_BREAK_OFF_INT_EN_LSB)
+#define UART_INT_EN_RX_BREAK_OFF_INT_EN_SET(x) (((x) << UART_INT_EN_RX_BREAK_OFF_INT_EN_LSB) & UART_INT_EN_RX_BREAK_OFF_INT_EN_MASK)
+#define UART_INT_EN_RX_BREAK_ON_INT_EN_MSB 6
+#define UART_INT_EN_RX_BREAK_ON_INT_EN_LSB 6
+#define UART_INT_EN_RX_BREAK_ON_INT_EN_MASK 0x00000040
+#define UART_INT_EN_RX_BREAK_ON_INT_EN_GET(x) (((x) & UART_INT_EN_RX_BREAK_ON_INT_EN_MASK) >> UART_INT_EN_RX_BREAK_ON_INT_EN_LSB)
+#define UART_INT_EN_RX_BREAK_ON_INT_EN_SET(x) (((x) << UART_INT_EN_RX_BREAK_ON_INT_EN_LSB) & UART_INT_EN_RX_BREAK_ON_INT_EN_MASK)
+#define UART_INT_EN_RX_PARITY_ERR_INT_EN_MSB 5
+#define UART_INT_EN_RX_PARITY_ERR_INT_EN_LSB 5
+#define UART_INT_EN_RX_PARITY_ERR_INT_EN_MASK 0x00000020
+#define UART_INT_EN_RX_PARITY_ERR_INT_EN_GET(x) (((x) & UART_INT_EN_RX_PARITY_ERR_INT_EN_MASK) >> UART_INT_EN_RX_PARITY_ERR_INT_EN_LSB)
+#define UART_INT_EN_RX_PARITY_ERR_INT_EN_SET(x) (((x) << UART_INT_EN_RX_PARITY_ERR_INT_EN_LSB) & UART_INT_EN_RX_PARITY_ERR_INT_EN_MASK)
+#define UART_INT_EN_TX_OFLOW_ERR_INT_EN_MSB 4
+#define UART_INT_EN_TX_OFLOW_ERR_INT_EN_LSB 4
+#define UART_INT_EN_TX_OFLOW_ERR_INT_EN_MASK 0x00000010
+#define UART_INT_EN_TX_OFLOW_ERR_INT_EN_GET(x) (((x) & UART_INT_EN_TX_OFLOW_ERR_INT_EN_MASK) >> UART_INT_EN_TX_OFLOW_ERR_INT_EN_LSB)
+#define UART_INT_EN_TX_OFLOW_ERR_INT_EN_SET(x) (((x) << UART_INT_EN_TX_OFLOW_ERR_INT_EN_LSB) & UART_INT_EN_TX_OFLOW_ERR_INT_EN_MASK)
+#define UART_INT_EN_RX_OFLOW_ERR_INT_EN_MSB 3
+#define UART_INT_EN_RX_OFLOW_ERR_INT_EN_LSB 3
+#define UART_INT_EN_RX_OFLOW_ERR_INT_EN_MASK 0x00000008
+#define UART_INT_EN_RX_OFLOW_ERR_INT_EN_GET(x) (((x) & UART_INT_EN_RX_OFLOW_ERR_INT_EN_MASK) >> UART_INT_EN_RX_OFLOW_ERR_INT_EN_LSB)
+#define UART_INT_EN_RX_OFLOW_ERR_INT_EN_SET(x) (((x) << UART_INT_EN_RX_OFLOW_ERR_INT_EN_LSB) & UART_INT_EN_RX_OFLOW_ERR_INT_EN_MASK)
+#define UART_INT_EN_RX_FRAMING_ERR_INT_EN_MSB 2
+#define UART_INT_EN_RX_FRAMING_ERR_INT_EN_LSB 2
+#define UART_INT_EN_RX_FRAMING_ERR_INT_EN_MASK 0x00000004
+#define UART_INT_EN_RX_FRAMING_ERR_INT_EN_GET(x) (((x) & UART_INT_EN_RX_FRAMING_ERR_INT_EN_MASK) >> UART_INT_EN_RX_FRAMING_ERR_INT_EN_LSB)
+#define UART_INT_EN_RX_FRAMING_ERR_INT_EN_SET(x) (((x) << UART_INT_EN_RX_FRAMING_ERR_INT_EN_LSB) & UART_INT_EN_RX_FRAMING_ERR_INT_EN_MASK)
+#define UART_INT_EN_TX_READY_INT_EN_MSB 1
+#define UART_INT_EN_TX_READY_INT_EN_LSB 1
+#define UART_INT_EN_TX_READY_INT_EN_MASK 0x00000002
+#define UART_INT_EN_TX_READY_INT_EN_GET(x) (((x) & UART_INT_EN_TX_READY_INT_EN_MASK) >> UART_INT_EN_TX_READY_INT_EN_LSB)
+#define UART_INT_EN_TX_READY_INT_EN_SET(x) (((x) << UART_INT_EN_TX_READY_INT_EN_LSB) & UART_INT_EN_TX_READY_INT_EN_MASK)
+#define UART_INT_EN_RX_VALID_INT_EN_MSB 0
+#define UART_INT_EN_RX_VALID_INT_EN_LSB 0
+#define UART_INT_EN_RX_VALID_INT_EN_MASK 0x00000001
+#define UART_INT_EN_RX_VALID_INT_EN_GET(x) (((x) & UART_INT_EN_RX_VALID_INT_EN_MASK) >> UART_INT_EN_RX_VALID_INT_EN_LSB)
+#define UART_INT_EN_RX_VALID_INT_EN_SET(x) (((x) << UART_INT_EN_RX_VALID_INT_EN_LSB) & UART_INT_EN_RX_VALID_INT_EN_MASK)
+
+
+#ifndef __ASSEMBLER__
+
+typedef struct uart_reg_reg_s {
+ volatile unsigned int uart_data;
+ volatile unsigned int uart_control;
+ volatile unsigned int uart_clkdiv;
+ volatile unsigned int uart_int;
+ volatile unsigned int uart_int_en;
+} uart_reg_reg_t;
+
+#endif /* __ASSEMBLER__ */
+
+#endif /* _UART_REG_H_ */
diff --git a/drivers/staging/ath6kl/include/common/AR6002/hw4.0/hw/umbox_reg.h b/drivers/staging/ath6kl/include/common/AR6002/hw4.0/hw/umbox_reg.h
new file mode 100644
index 000000000000..b233cbc513bc
--- /dev/null
+++ b/drivers/staging/ath6kl/include/common/AR6002/hw4.0/hw/umbox_reg.h
@@ -0,0 +1,37 @@
+// ------------------------------------------------------------------
+// Copyright (c) 2004-2010 Atheros Corporation. All rights reserved.
+//
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+//
+//
+// ------------------------------------------------------------------
+//===================================================================
+// Author(s): ="Atheros"
+//===================================================================
+
+
+#ifdef WLAN_HEADERS
+
+#include "umbox_wlan_reg.h"
+
+
+#ifndef BT_HEADERS
+
+
+
+#endif
+#endif
+
+
+
diff --git a/drivers/staging/ath6kl/include/common/AR6002/hw4.0/hw/umbox_wlan_reg.h b/drivers/staging/ath6kl/include/common/AR6002/hw4.0/hw/umbox_wlan_reg.h
new file mode 100644
index 000000000000..4737a2805b2f
--- /dev/null
+++ b/drivers/staging/ath6kl/include/common/AR6002/hw4.0/hw/umbox_wlan_reg.h
@@ -0,0 +1,322 @@
+// ------------------------------------------------------------------
+// Copyright (c) 2004-2010 Atheros Corporation. All rights reserved.
+//
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+//
+//
+// ------------------------------------------------------------------
+//===================================================================
+// Author(s): ="Atheros"
+//===================================================================
+
+
+#ifndef _UMBOX_WLAN_REG_REG_H_
+#define _UMBOX_WLAN_REG_REG_H_
+
+#define UMBOX_FIFO_ADDRESS 0x00000000
+#define UMBOX_FIFO_OFFSET 0x00000000
+#define UMBOX_FIFO_DATA_MSB 8
+#define UMBOX_FIFO_DATA_LSB 0
+#define UMBOX_FIFO_DATA_MASK 0x000001ff
+#define UMBOX_FIFO_DATA_GET(x) (((x) & UMBOX_FIFO_DATA_MASK) >> UMBOX_FIFO_DATA_LSB)
+#define UMBOX_FIFO_DATA_SET(x) (((x) << UMBOX_FIFO_DATA_LSB) & UMBOX_FIFO_DATA_MASK)
+
+#define UMBOX_FIFO_STATUS_ADDRESS 0x00000008
+#define UMBOX_FIFO_STATUS_OFFSET 0x00000008
+#define UMBOX_FIFO_STATUS_TX_EMPTY_MSB 3
+#define UMBOX_FIFO_STATUS_TX_EMPTY_LSB 3
+#define UMBOX_FIFO_STATUS_TX_EMPTY_MASK 0x00000008
+#define UMBOX_FIFO_STATUS_TX_EMPTY_GET(x) (((x) & UMBOX_FIFO_STATUS_TX_EMPTY_MASK) >> UMBOX_FIFO_STATUS_TX_EMPTY_LSB)
+#define UMBOX_FIFO_STATUS_TX_EMPTY_SET(x) (((x) << UMBOX_FIFO_STATUS_TX_EMPTY_LSB) & UMBOX_FIFO_STATUS_TX_EMPTY_MASK)
+#define UMBOX_FIFO_STATUS_TX_FULL_MSB 2
+#define UMBOX_FIFO_STATUS_TX_FULL_LSB 2
+#define UMBOX_FIFO_STATUS_TX_FULL_MASK 0x00000004
+#define UMBOX_FIFO_STATUS_TX_FULL_GET(x) (((x) & UMBOX_FIFO_STATUS_TX_FULL_MASK) >> UMBOX_FIFO_STATUS_TX_FULL_LSB)
+#define UMBOX_FIFO_STATUS_TX_FULL_SET(x) (((x) << UMBOX_FIFO_STATUS_TX_FULL_LSB) & UMBOX_FIFO_STATUS_TX_FULL_MASK)
+#define UMBOX_FIFO_STATUS_RX_EMPTY_MSB 1
+#define UMBOX_FIFO_STATUS_RX_EMPTY_LSB 1
+#define UMBOX_FIFO_STATUS_RX_EMPTY_MASK 0x00000002
+#define UMBOX_FIFO_STATUS_RX_EMPTY_GET(x) (((x) & UMBOX_FIFO_STATUS_RX_EMPTY_MASK) >> UMBOX_FIFO_STATUS_RX_EMPTY_LSB)
+#define UMBOX_FIFO_STATUS_RX_EMPTY_SET(x) (((x) << UMBOX_FIFO_STATUS_RX_EMPTY_LSB) & UMBOX_FIFO_STATUS_RX_EMPTY_MASK)
+#define UMBOX_FIFO_STATUS_RX_FULL_MSB 0
+#define UMBOX_FIFO_STATUS_RX_FULL_LSB 0
+#define UMBOX_FIFO_STATUS_RX_FULL_MASK 0x00000001
+#define UMBOX_FIFO_STATUS_RX_FULL_GET(x) (((x) & UMBOX_FIFO_STATUS_RX_FULL_MASK) >> UMBOX_FIFO_STATUS_RX_FULL_LSB)
+#define UMBOX_FIFO_STATUS_RX_FULL_SET(x) (((x) << UMBOX_FIFO_STATUS_RX_FULL_LSB) & UMBOX_FIFO_STATUS_RX_FULL_MASK)
+
+#define UMBOX_DMA_POLICY_ADDRESS 0x0000000c
+#define UMBOX_DMA_POLICY_OFFSET 0x0000000c
+#define UMBOX_DMA_POLICY_TX_QUANTUM_MSB 3
+#define UMBOX_DMA_POLICY_TX_QUANTUM_LSB 3
+#define UMBOX_DMA_POLICY_TX_QUANTUM_MASK 0x00000008
+#define UMBOX_DMA_POLICY_TX_QUANTUM_GET(x) (((x) & UMBOX_DMA_POLICY_TX_QUANTUM_MASK) >> UMBOX_DMA_POLICY_TX_QUANTUM_LSB)
+#define UMBOX_DMA_POLICY_TX_QUANTUM_SET(x) (((x) << UMBOX_DMA_POLICY_TX_QUANTUM_LSB) & UMBOX_DMA_POLICY_TX_QUANTUM_MASK)
+#define UMBOX_DMA_POLICY_TX_ORDER_MSB 2
+#define UMBOX_DMA_POLICY_TX_ORDER_LSB 2
+#define UMBOX_DMA_POLICY_TX_ORDER_MASK 0x00000004
+#define UMBOX_DMA_POLICY_TX_ORDER_GET(x) (((x) & UMBOX_DMA_POLICY_TX_ORDER_MASK) >> UMBOX_DMA_POLICY_TX_ORDER_LSB)
+#define UMBOX_DMA_POLICY_TX_ORDER_SET(x) (((x) << UMBOX_DMA_POLICY_TX_ORDER_LSB) & UMBOX_DMA_POLICY_TX_ORDER_MASK)
+#define UMBOX_DMA_POLICY_RX_QUANTUM_MSB 1
+#define UMBOX_DMA_POLICY_RX_QUANTUM_LSB 1
+#define UMBOX_DMA_POLICY_RX_QUANTUM_MASK 0x00000002
+#define UMBOX_DMA_POLICY_RX_QUANTUM_GET(x) (((x) & UMBOX_DMA_POLICY_RX_QUANTUM_MASK) >> UMBOX_DMA_POLICY_RX_QUANTUM_LSB)
+#define UMBOX_DMA_POLICY_RX_QUANTUM_SET(x) (((x) << UMBOX_DMA_POLICY_RX_QUANTUM_LSB) & UMBOX_DMA_POLICY_RX_QUANTUM_MASK)
+#define UMBOX_DMA_POLICY_RX_ORDER_MSB 0
+#define UMBOX_DMA_POLICY_RX_ORDER_LSB 0
+#define UMBOX_DMA_POLICY_RX_ORDER_MASK 0x00000001
+#define UMBOX_DMA_POLICY_RX_ORDER_GET(x) (((x) & UMBOX_DMA_POLICY_RX_ORDER_MASK) >> UMBOX_DMA_POLICY_RX_ORDER_LSB)
+#define UMBOX_DMA_POLICY_RX_ORDER_SET(x) (((x) << UMBOX_DMA_POLICY_RX_ORDER_LSB) & UMBOX_DMA_POLICY_RX_ORDER_MASK)
+
+#define UMBOX0_DMA_RX_DESCRIPTOR_BASE_ADDRESS 0x00000010
+#define UMBOX0_DMA_RX_DESCRIPTOR_BASE_OFFSET 0x00000010
+#define UMBOX0_DMA_RX_DESCRIPTOR_BASE_ADDRESS_MSB 27
+#define UMBOX0_DMA_RX_DESCRIPTOR_BASE_ADDRESS_LSB 2
+#define UMBOX0_DMA_RX_DESCRIPTOR_BASE_ADDRESS_MASK 0x0ffffffc
+#define UMBOX0_DMA_RX_DESCRIPTOR_BASE_ADDRESS_GET(x) (((x) & UMBOX0_DMA_RX_DESCRIPTOR_BASE_ADDRESS_MASK) >> UMBOX0_DMA_RX_DESCRIPTOR_BASE_ADDRESS_LSB)
+#define UMBOX0_DMA_RX_DESCRIPTOR_BASE_ADDRESS_SET(x) (((x) << UMBOX0_DMA_RX_DESCRIPTOR_BASE_ADDRESS_LSB) & UMBOX0_DMA_RX_DESCRIPTOR_BASE_ADDRESS_MASK)
+
+#define UMBOX0_DMA_RX_CONTROL_ADDRESS 0x00000014
+#define UMBOX0_DMA_RX_CONTROL_OFFSET 0x00000014
+#define UMBOX0_DMA_RX_CONTROL_RESUME_MSB 2
+#define UMBOX0_DMA_RX_CONTROL_RESUME_LSB 2
+#define UMBOX0_DMA_RX_CONTROL_RESUME_MASK 0x00000004
+#define UMBOX0_DMA_RX_CONTROL_RESUME_GET(x) (((x) & UMBOX0_DMA_RX_CONTROL_RESUME_MASK) >> UMBOX0_DMA_RX_CONTROL_RESUME_LSB)
+#define UMBOX0_DMA_RX_CONTROL_RESUME_SET(x) (((x) << UMBOX0_DMA_RX_CONTROL_RESUME_LSB) & UMBOX0_DMA_RX_CONTROL_RESUME_MASK)
+#define UMBOX0_DMA_RX_CONTROL_START_MSB 1
+#define UMBOX0_DMA_RX_CONTROL_START_LSB 1
+#define UMBOX0_DMA_RX_CONTROL_START_MASK 0x00000002
+#define UMBOX0_DMA_RX_CONTROL_START_GET(x) (((x) & UMBOX0_DMA_RX_CONTROL_START_MASK) >> UMBOX0_DMA_RX_CONTROL_START_LSB)
+#define UMBOX0_DMA_RX_CONTROL_START_SET(x) (((x) << UMBOX0_DMA_RX_CONTROL_START_LSB) & UMBOX0_DMA_RX_CONTROL_START_MASK)
+#define UMBOX0_DMA_RX_CONTROL_STOP_MSB 0
+#define UMBOX0_DMA_RX_CONTROL_STOP_LSB 0
+#define UMBOX0_DMA_RX_CONTROL_STOP_MASK 0x00000001
+#define UMBOX0_DMA_RX_CONTROL_STOP_GET(x) (((x) & UMBOX0_DMA_RX_CONTROL_STOP_MASK) >> UMBOX0_DMA_RX_CONTROL_STOP_LSB)
+#define UMBOX0_DMA_RX_CONTROL_STOP_SET(x) (((x) << UMBOX0_DMA_RX_CONTROL_STOP_LSB) & UMBOX0_DMA_RX_CONTROL_STOP_MASK)
+
+#define UMBOX0_DMA_TX_DESCRIPTOR_BASE_ADDRESS 0x00000018
+#define UMBOX0_DMA_TX_DESCRIPTOR_BASE_OFFSET 0x00000018
+#define UMBOX0_DMA_TX_DESCRIPTOR_BASE_ADDRESS_MSB 27
+#define UMBOX0_DMA_TX_DESCRIPTOR_BASE_ADDRESS_LSB 2
+#define UMBOX0_DMA_TX_DESCRIPTOR_BASE_ADDRESS_MASK 0x0ffffffc
+#define UMBOX0_DMA_TX_DESCRIPTOR_BASE_ADDRESS_GET(x) (((x) & UMBOX0_DMA_TX_DESCRIPTOR_BASE_ADDRESS_MASK) >> UMBOX0_DMA_TX_DESCRIPTOR_BASE_ADDRESS_LSB)
+#define UMBOX0_DMA_TX_DESCRIPTOR_BASE_ADDRESS_SET(x) (((x) << UMBOX0_DMA_TX_DESCRIPTOR_BASE_ADDRESS_LSB) & UMBOX0_DMA_TX_DESCRIPTOR_BASE_ADDRESS_MASK)
+
+#define UMBOX0_DMA_TX_CONTROL_ADDRESS 0x0000001c
+#define UMBOX0_DMA_TX_CONTROL_OFFSET 0x0000001c
+#define UMBOX0_DMA_TX_CONTROL_RESUME_MSB 2
+#define UMBOX0_DMA_TX_CONTROL_RESUME_LSB 2
+#define UMBOX0_DMA_TX_CONTROL_RESUME_MASK 0x00000004
+#define UMBOX0_DMA_TX_CONTROL_RESUME_GET(x) (((x) & UMBOX0_DMA_TX_CONTROL_RESUME_MASK) >> UMBOX0_DMA_TX_CONTROL_RESUME_LSB)
+#define UMBOX0_DMA_TX_CONTROL_RESUME_SET(x) (((x) << UMBOX0_DMA_TX_CONTROL_RESUME_LSB) & UMBOX0_DMA_TX_CONTROL_RESUME_MASK)
+#define UMBOX0_DMA_TX_CONTROL_START_MSB 1
+#define UMBOX0_DMA_TX_CONTROL_START_LSB 1
+#define UMBOX0_DMA_TX_CONTROL_START_MASK 0x00000002
+#define UMBOX0_DMA_TX_CONTROL_START_GET(x) (((x) & UMBOX0_DMA_TX_CONTROL_START_MASK) >> UMBOX0_DMA_TX_CONTROL_START_LSB)
+#define UMBOX0_DMA_TX_CONTROL_START_SET(x) (((x) << UMBOX0_DMA_TX_CONTROL_START_LSB) & UMBOX0_DMA_TX_CONTROL_START_MASK)
+#define UMBOX0_DMA_TX_CONTROL_STOP_MSB 0
+#define UMBOX0_DMA_TX_CONTROL_STOP_LSB 0
+#define UMBOX0_DMA_TX_CONTROL_STOP_MASK 0x00000001
+#define UMBOX0_DMA_TX_CONTROL_STOP_GET(x) (((x) & UMBOX0_DMA_TX_CONTROL_STOP_MASK) >> UMBOX0_DMA_TX_CONTROL_STOP_LSB)
+#define UMBOX0_DMA_TX_CONTROL_STOP_SET(x) (((x) << UMBOX0_DMA_TX_CONTROL_STOP_LSB) & UMBOX0_DMA_TX_CONTROL_STOP_MASK)
+
+#define UMBOX_FIFO_TIMEOUT_ADDRESS 0x00000020
+#define UMBOX_FIFO_TIMEOUT_OFFSET 0x00000020
+#define UMBOX_FIFO_TIMEOUT_ENABLE_SET_MSB 8
+#define UMBOX_FIFO_TIMEOUT_ENABLE_SET_LSB 8
+#define UMBOX_FIFO_TIMEOUT_ENABLE_SET_MASK 0x00000100
+#define UMBOX_FIFO_TIMEOUT_ENABLE_SET_GET(x) (((x) & UMBOX_FIFO_TIMEOUT_ENABLE_SET_MASK) >> UMBOX_FIFO_TIMEOUT_ENABLE_SET_LSB)
+#define UMBOX_FIFO_TIMEOUT_ENABLE_SET_SET(x) (((x) << UMBOX_FIFO_TIMEOUT_ENABLE_SET_LSB) & UMBOX_FIFO_TIMEOUT_ENABLE_SET_MASK)
+#define UMBOX_FIFO_TIMEOUT_VALUE_MSB 7
+#define UMBOX_FIFO_TIMEOUT_VALUE_LSB 0
+#define UMBOX_FIFO_TIMEOUT_VALUE_MASK 0x000000ff
+#define UMBOX_FIFO_TIMEOUT_VALUE_GET(x) (((x) & UMBOX_FIFO_TIMEOUT_VALUE_MASK) >> UMBOX_FIFO_TIMEOUT_VALUE_LSB)
+#define UMBOX_FIFO_TIMEOUT_VALUE_SET(x) (((x) << UMBOX_FIFO_TIMEOUT_VALUE_LSB) & UMBOX_FIFO_TIMEOUT_VALUE_MASK)
+
+#define UMBOX_INT_STATUS_ADDRESS 0x00000024
+#define UMBOX_INT_STATUS_OFFSET 0x00000024
+#define UMBOX_INT_STATUS_HCI_FRAMER_UNDERFLOW_MSB 9
+#define UMBOX_INT_STATUS_HCI_FRAMER_UNDERFLOW_LSB 9
+#define UMBOX_INT_STATUS_HCI_FRAMER_UNDERFLOW_MASK 0x00000200
+#define UMBOX_INT_STATUS_HCI_FRAMER_UNDERFLOW_GET(x) (((x) & UMBOX_INT_STATUS_HCI_FRAMER_UNDERFLOW_MASK) >> UMBOX_INT_STATUS_HCI_FRAMER_UNDERFLOW_LSB)
+#define UMBOX_INT_STATUS_HCI_FRAMER_UNDERFLOW_SET(x) (((x) << UMBOX_INT_STATUS_HCI_FRAMER_UNDERFLOW_LSB) & UMBOX_INT_STATUS_HCI_FRAMER_UNDERFLOW_MASK)
+#define UMBOX_INT_STATUS_HCI_FRAMER_OVERFLOW_MSB 8
+#define UMBOX_INT_STATUS_HCI_FRAMER_OVERFLOW_LSB 8
+#define UMBOX_INT_STATUS_HCI_FRAMER_OVERFLOW_MASK 0x00000100
+#define UMBOX_INT_STATUS_HCI_FRAMER_OVERFLOW_GET(x) (((x) & UMBOX_INT_STATUS_HCI_FRAMER_OVERFLOW_MASK) >> UMBOX_INT_STATUS_HCI_FRAMER_OVERFLOW_LSB)
+#define UMBOX_INT_STATUS_HCI_FRAMER_OVERFLOW_SET(x) (((x) << UMBOX_INT_STATUS_HCI_FRAMER_OVERFLOW_LSB) & UMBOX_INT_STATUS_HCI_FRAMER_OVERFLOW_MASK)
+#define UMBOX_INT_STATUS_RX_DMA_COMPLETE_MSB 7
+#define UMBOX_INT_STATUS_RX_DMA_COMPLETE_LSB 7
+#define UMBOX_INT_STATUS_RX_DMA_COMPLETE_MASK 0x00000080
+#define UMBOX_INT_STATUS_RX_DMA_COMPLETE_GET(x) (((x) & UMBOX_INT_STATUS_RX_DMA_COMPLETE_MASK) >> UMBOX_INT_STATUS_RX_DMA_COMPLETE_LSB)
+#define UMBOX_INT_STATUS_RX_DMA_COMPLETE_SET(x) (((x) << UMBOX_INT_STATUS_RX_DMA_COMPLETE_LSB) & UMBOX_INT_STATUS_RX_DMA_COMPLETE_MASK)
+#define UMBOX_INT_STATUS_TX_DMA_EOM_COMPLETE_MSB 6
+#define UMBOX_INT_STATUS_TX_DMA_EOM_COMPLETE_LSB 6
+#define UMBOX_INT_STATUS_TX_DMA_EOM_COMPLETE_MASK 0x00000040
+#define UMBOX_INT_STATUS_TX_DMA_EOM_COMPLETE_GET(x) (((x) & UMBOX_INT_STATUS_TX_DMA_EOM_COMPLETE_MASK) >> UMBOX_INT_STATUS_TX_DMA_EOM_COMPLETE_LSB)
+#define UMBOX_INT_STATUS_TX_DMA_EOM_COMPLETE_SET(x) (((x) << UMBOX_INT_STATUS_TX_DMA_EOM_COMPLETE_LSB) & UMBOX_INT_STATUS_TX_DMA_EOM_COMPLETE_MASK)
+#define UMBOX_INT_STATUS_TX_DMA_COMPLETE_MSB 5
+#define UMBOX_INT_STATUS_TX_DMA_COMPLETE_LSB 5
+#define UMBOX_INT_STATUS_TX_DMA_COMPLETE_MASK 0x00000020
+#define UMBOX_INT_STATUS_TX_DMA_COMPLETE_GET(x) (((x) & UMBOX_INT_STATUS_TX_DMA_COMPLETE_MASK) >> UMBOX_INT_STATUS_TX_DMA_COMPLETE_LSB)
+#define UMBOX_INT_STATUS_TX_DMA_COMPLETE_SET(x) (((x) << UMBOX_INT_STATUS_TX_DMA_COMPLETE_LSB) & UMBOX_INT_STATUS_TX_DMA_COMPLETE_MASK)
+#define UMBOX_INT_STATUS_HCI_SYNC_ERROR_MSB 4
+#define UMBOX_INT_STATUS_HCI_SYNC_ERROR_LSB 4
+#define UMBOX_INT_STATUS_HCI_SYNC_ERROR_MASK 0x00000010
+#define UMBOX_INT_STATUS_HCI_SYNC_ERROR_GET(x) (((x) & UMBOX_INT_STATUS_HCI_SYNC_ERROR_MASK) >> UMBOX_INT_STATUS_HCI_SYNC_ERROR_LSB)
+#define UMBOX_INT_STATUS_HCI_SYNC_ERROR_SET(x) (((x) << UMBOX_INT_STATUS_HCI_SYNC_ERROR_LSB) & UMBOX_INT_STATUS_HCI_SYNC_ERROR_MASK)
+#define UMBOX_INT_STATUS_TX_OVERFLOW_MSB 3
+#define UMBOX_INT_STATUS_TX_OVERFLOW_LSB 3
+#define UMBOX_INT_STATUS_TX_OVERFLOW_MASK 0x00000008
+#define UMBOX_INT_STATUS_TX_OVERFLOW_GET(x) (((x) & UMBOX_INT_STATUS_TX_OVERFLOW_MASK) >> UMBOX_INT_STATUS_TX_OVERFLOW_LSB)
+#define UMBOX_INT_STATUS_TX_OVERFLOW_SET(x) (((x) << UMBOX_INT_STATUS_TX_OVERFLOW_LSB) & UMBOX_INT_STATUS_TX_OVERFLOW_MASK)
+#define UMBOX_INT_STATUS_RX_UNDERFLOW_MSB 2
+#define UMBOX_INT_STATUS_RX_UNDERFLOW_LSB 2
+#define UMBOX_INT_STATUS_RX_UNDERFLOW_MASK 0x00000004
+#define UMBOX_INT_STATUS_RX_UNDERFLOW_GET(x) (((x) & UMBOX_INT_STATUS_RX_UNDERFLOW_MASK) >> UMBOX_INT_STATUS_RX_UNDERFLOW_LSB)
+#define UMBOX_INT_STATUS_RX_UNDERFLOW_SET(x) (((x) << UMBOX_INT_STATUS_RX_UNDERFLOW_LSB) & UMBOX_INT_STATUS_RX_UNDERFLOW_MASK)
+#define UMBOX_INT_STATUS_TX_NOT_EMPTY_MSB 1
+#define UMBOX_INT_STATUS_TX_NOT_EMPTY_LSB 1
+#define UMBOX_INT_STATUS_TX_NOT_EMPTY_MASK 0x00000002
+#define UMBOX_INT_STATUS_TX_NOT_EMPTY_GET(x) (((x) & UMBOX_INT_STATUS_TX_NOT_EMPTY_MASK) >> UMBOX_INT_STATUS_TX_NOT_EMPTY_LSB)
+#define UMBOX_INT_STATUS_TX_NOT_EMPTY_SET(x) (((x) << UMBOX_INT_STATUS_TX_NOT_EMPTY_LSB) & UMBOX_INT_STATUS_TX_NOT_EMPTY_MASK)
+#define UMBOX_INT_STATUS_RX_NOT_FULL_MSB 0
+#define UMBOX_INT_STATUS_RX_NOT_FULL_LSB 0
+#define UMBOX_INT_STATUS_RX_NOT_FULL_MASK 0x00000001
+#define UMBOX_INT_STATUS_RX_NOT_FULL_GET(x) (((x) & UMBOX_INT_STATUS_RX_NOT_FULL_MASK) >> UMBOX_INT_STATUS_RX_NOT_FULL_LSB)
+#define UMBOX_INT_STATUS_RX_NOT_FULL_SET(x) (((x) << UMBOX_INT_STATUS_RX_NOT_FULL_LSB) & UMBOX_INT_STATUS_RX_NOT_FULL_MASK)
+
+#define UMBOX_INT_ENABLE_ADDRESS 0x00000028
+#define UMBOX_INT_ENABLE_OFFSET 0x00000028
+#define UMBOX_INT_ENABLE_HCI_FRAMER_UNDERFLOW_MSB 9
+#define UMBOX_INT_ENABLE_HCI_FRAMER_UNDERFLOW_LSB 9
+#define UMBOX_INT_ENABLE_HCI_FRAMER_UNDERFLOW_MASK 0x00000200
+#define UMBOX_INT_ENABLE_HCI_FRAMER_UNDERFLOW_GET(x) (((x) & UMBOX_INT_ENABLE_HCI_FRAMER_UNDERFLOW_MASK) >> UMBOX_INT_ENABLE_HCI_FRAMER_UNDERFLOW_LSB)
+#define UMBOX_INT_ENABLE_HCI_FRAMER_UNDERFLOW_SET(x) (((x) << UMBOX_INT_ENABLE_HCI_FRAMER_UNDERFLOW_LSB) & UMBOX_INT_ENABLE_HCI_FRAMER_UNDERFLOW_MASK)
+#define UMBOX_INT_ENABLE_HCI_FRAMER_OVERFLOW_MSB 8
+#define UMBOX_INT_ENABLE_HCI_FRAMER_OVERFLOW_LSB 8
+#define UMBOX_INT_ENABLE_HCI_FRAMER_OVERFLOW_MASK 0x00000100
+#define UMBOX_INT_ENABLE_HCI_FRAMER_OVERFLOW_GET(x) (((x) & UMBOX_INT_ENABLE_HCI_FRAMER_OVERFLOW_MASK) >> UMBOX_INT_ENABLE_HCI_FRAMER_OVERFLOW_LSB)
+#define UMBOX_INT_ENABLE_HCI_FRAMER_OVERFLOW_SET(x) (((x) << UMBOX_INT_ENABLE_HCI_FRAMER_OVERFLOW_LSB) & UMBOX_INT_ENABLE_HCI_FRAMER_OVERFLOW_MASK)
+#define UMBOX_INT_ENABLE_RX_DMA_COMPLETE_MSB 7
+#define UMBOX_INT_ENABLE_RX_DMA_COMPLETE_LSB 7
+#define UMBOX_INT_ENABLE_RX_DMA_COMPLETE_MASK 0x00000080
+#define UMBOX_INT_ENABLE_RX_DMA_COMPLETE_GET(x) (((x) & UMBOX_INT_ENABLE_RX_DMA_COMPLETE_MASK) >> UMBOX_INT_ENABLE_RX_DMA_COMPLETE_LSB)
+#define UMBOX_INT_ENABLE_RX_DMA_COMPLETE_SET(x) (((x) << UMBOX_INT_ENABLE_RX_DMA_COMPLETE_LSB) & UMBOX_INT_ENABLE_RX_DMA_COMPLETE_MASK)
+#define UMBOX_INT_ENABLE_TX_DMA_EOM_COMPLETE_MSB 6
+#define UMBOX_INT_ENABLE_TX_DMA_EOM_COMPLETE_LSB 6
+#define UMBOX_INT_ENABLE_TX_DMA_EOM_COMPLETE_MASK 0x00000040
+#define UMBOX_INT_ENABLE_TX_DMA_EOM_COMPLETE_GET(x) (((x) & UMBOX_INT_ENABLE_TX_DMA_EOM_COMPLETE_MASK) >> UMBOX_INT_ENABLE_TX_DMA_EOM_COMPLETE_LSB)
+#define UMBOX_INT_ENABLE_TX_DMA_EOM_COMPLETE_SET(x) (((x) << UMBOX_INT_ENABLE_TX_DMA_EOM_COMPLETE_LSB) & UMBOX_INT_ENABLE_TX_DMA_EOM_COMPLETE_MASK)
+#define UMBOX_INT_ENABLE_TX_DMA_COMPLETE_MSB 5
+#define UMBOX_INT_ENABLE_TX_DMA_COMPLETE_LSB 5
+#define UMBOX_INT_ENABLE_TX_DMA_COMPLETE_MASK 0x00000020
+#define UMBOX_INT_ENABLE_TX_DMA_COMPLETE_GET(x) (((x) & UMBOX_INT_ENABLE_TX_DMA_COMPLETE_MASK) >> UMBOX_INT_ENABLE_TX_DMA_COMPLETE_LSB)
+#define UMBOX_INT_ENABLE_TX_DMA_COMPLETE_SET(x) (((x) << UMBOX_INT_ENABLE_TX_DMA_COMPLETE_LSB) & UMBOX_INT_ENABLE_TX_DMA_COMPLETE_MASK)
+#define UMBOX_INT_ENABLE_HCI_SYNC_ERROR_MSB 4
+#define UMBOX_INT_ENABLE_HCI_SYNC_ERROR_LSB 4
+#define UMBOX_INT_ENABLE_HCI_SYNC_ERROR_MASK 0x00000010
+#define UMBOX_INT_ENABLE_HCI_SYNC_ERROR_GET(x) (((x) & UMBOX_INT_ENABLE_HCI_SYNC_ERROR_MASK) >> UMBOX_INT_ENABLE_HCI_SYNC_ERROR_LSB)
+#define UMBOX_INT_ENABLE_HCI_SYNC_ERROR_SET(x) (((x) << UMBOX_INT_ENABLE_HCI_SYNC_ERROR_LSB) & UMBOX_INT_ENABLE_HCI_SYNC_ERROR_MASK)
+#define UMBOX_INT_ENABLE_TX_OVERFLOW_MSB 3
+#define UMBOX_INT_ENABLE_TX_OVERFLOW_LSB 3
+#define UMBOX_INT_ENABLE_TX_OVERFLOW_MASK 0x00000008
+#define UMBOX_INT_ENABLE_TX_OVERFLOW_GET(x) (((x) & UMBOX_INT_ENABLE_TX_OVERFLOW_MASK) >> UMBOX_INT_ENABLE_TX_OVERFLOW_LSB)
+#define UMBOX_INT_ENABLE_TX_OVERFLOW_SET(x) (((x) << UMBOX_INT_ENABLE_TX_OVERFLOW_LSB) & UMBOX_INT_ENABLE_TX_OVERFLOW_MASK)
+#define UMBOX_INT_ENABLE_RX_UNDERFLOW_MSB 2
+#define UMBOX_INT_ENABLE_RX_UNDERFLOW_LSB 2
+#define UMBOX_INT_ENABLE_RX_UNDERFLOW_MASK 0x00000004
+#define UMBOX_INT_ENABLE_RX_UNDERFLOW_GET(x) (((x) & UMBOX_INT_ENABLE_RX_UNDERFLOW_MASK) >> UMBOX_INT_ENABLE_RX_UNDERFLOW_LSB)
+#define UMBOX_INT_ENABLE_RX_UNDERFLOW_SET(x) (((x) << UMBOX_INT_ENABLE_RX_UNDERFLOW_LSB) & UMBOX_INT_ENABLE_RX_UNDERFLOW_MASK)
+#define UMBOX_INT_ENABLE_TX_NOT_EMPTY_MSB 1
+#define UMBOX_INT_ENABLE_TX_NOT_EMPTY_LSB 1
+#define UMBOX_INT_ENABLE_TX_NOT_EMPTY_MASK 0x00000002
+#define UMBOX_INT_ENABLE_TX_NOT_EMPTY_GET(x) (((x) & UMBOX_INT_ENABLE_TX_NOT_EMPTY_MASK) >> UMBOX_INT_ENABLE_TX_NOT_EMPTY_LSB)
+#define UMBOX_INT_ENABLE_TX_NOT_EMPTY_SET(x) (((x) << UMBOX_INT_ENABLE_TX_NOT_EMPTY_LSB) & UMBOX_INT_ENABLE_TX_NOT_EMPTY_MASK)
+#define UMBOX_INT_ENABLE_RX_NOT_FULL_MSB 0
+#define UMBOX_INT_ENABLE_RX_NOT_FULL_LSB 0
+#define UMBOX_INT_ENABLE_RX_NOT_FULL_MASK 0x00000001
+#define UMBOX_INT_ENABLE_RX_NOT_FULL_GET(x) (((x) & UMBOX_INT_ENABLE_RX_NOT_FULL_MASK) >> UMBOX_INT_ENABLE_RX_NOT_FULL_LSB)
+#define UMBOX_INT_ENABLE_RX_NOT_FULL_SET(x) (((x) << UMBOX_INT_ENABLE_RX_NOT_FULL_LSB) & UMBOX_INT_ENABLE_RX_NOT_FULL_MASK)
+
+#define UMBOX_DEBUG_ADDRESS 0x0000002c
+#define UMBOX_DEBUG_OFFSET 0x0000002c
+#define UMBOX_DEBUG_SEL_MSB 2
+#define UMBOX_DEBUG_SEL_LSB 0
+#define UMBOX_DEBUG_SEL_MASK 0x00000007
+#define UMBOX_DEBUG_SEL_GET(x) (((x) & UMBOX_DEBUG_SEL_MASK) >> UMBOX_DEBUG_SEL_LSB)
+#define UMBOX_DEBUG_SEL_SET(x) (((x) << UMBOX_DEBUG_SEL_LSB) & UMBOX_DEBUG_SEL_MASK)
+
+#define UMBOX_FIFO_RESET_ADDRESS 0x00000030
+#define UMBOX_FIFO_RESET_OFFSET 0x00000030
+#define UMBOX_FIFO_RESET_INIT_MSB 0
+#define UMBOX_FIFO_RESET_INIT_LSB 0
+#define UMBOX_FIFO_RESET_INIT_MASK 0x00000001
+#define UMBOX_FIFO_RESET_INIT_GET(x) (((x) & UMBOX_FIFO_RESET_INIT_MASK) >> UMBOX_FIFO_RESET_INIT_LSB)
+#define UMBOX_FIFO_RESET_INIT_SET(x) (((x) << UMBOX_FIFO_RESET_INIT_LSB) & UMBOX_FIFO_RESET_INIT_MASK)
+
+#define UMBOX_HCI_FRAMER_ADDRESS 0x00000034
+#define UMBOX_HCI_FRAMER_OFFSET 0x00000034
+#define UMBOX_HCI_FRAMER_CRC_OVERRIDE_MSB 6
+#define UMBOX_HCI_FRAMER_CRC_OVERRIDE_LSB 6
+#define UMBOX_HCI_FRAMER_CRC_OVERRIDE_MASK 0x00000040
+#define UMBOX_HCI_FRAMER_CRC_OVERRIDE_GET(x) (((x) & UMBOX_HCI_FRAMER_CRC_OVERRIDE_MASK) >> UMBOX_HCI_FRAMER_CRC_OVERRIDE_LSB)
+#define UMBOX_HCI_FRAMER_CRC_OVERRIDE_SET(x) (((x) << UMBOX_HCI_FRAMER_CRC_OVERRIDE_LSB) & UMBOX_HCI_FRAMER_CRC_OVERRIDE_MASK)
+#define UMBOX_HCI_FRAMER_ENABLE_MSB 5
+#define UMBOX_HCI_FRAMER_ENABLE_LSB 5
+#define UMBOX_HCI_FRAMER_ENABLE_MASK 0x00000020
+#define UMBOX_HCI_FRAMER_ENABLE_GET(x) (((x) & UMBOX_HCI_FRAMER_ENABLE_MASK) >> UMBOX_HCI_FRAMER_ENABLE_LSB)
+#define UMBOX_HCI_FRAMER_ENABLE_SET(x) (((x) << UMBOX_HCI_FRAMER_ENABLE_LSB) & UMBOX_HCI_FRAMER_ENABLE_MASK)
+#define UMBOX_HCI_FRAMER_SYNC_ERROR_MSB 4
+#define UMBOX_HCI_FRAMER_SYNC_ERROR_LSB 4
+#define UMBOX_HCI_FRAMER_SYNC_ERROR_MASK 0x00000010
+#define UMBOX_HCI_FRAMER_SYNC_ERROR_GET(x) (((x) & UMBOX_HCI_FRAMER_SYNC_ERROR_MASK) >> UMBOX_HCI_FRAMER_SYNC_ERROR_LSB)
+#define UMBOX_HCI_FRAMER_SYNC_ERROR_SET(x) (((x) << UMBOX_HCI_FRAMER_SYNC_ERROR_LSB) & UMBOX_HCI_FRAMER_SYNC_ERROR_MASK)
+#define UMBOX_HCI_FRAMER_UNDERFLOW_MSB 3
+#define UMBOX_HCI_FRAMER_UNDERFLOW_LSB 3
+#define UMBOX_HCI_FRAMER_UNDERFLOW_MASK 0x00000008
+#define UMBOX_HCI_FRAMER_UNDERFLOW_GET(x) (((x) & UMBOX_HCI_FRAMER_UNDERFLOW_MASK) >> UMBOX_HCI_FRAMER_UNDERFLOW_LSB)
+#define UMBOX_HCI_FRAMER_UNDERFLOW_SET(x) (((x) << UMBOX_HCI_FRAMER_UNDERFLOW_LSB) & UMBOX_HCI_FRAMER_UNDERFLOW_MASK)
+#define UMBOX_HCI_FRAMER_OVERFLOW_MSB 2
+#define UMBOX_HCI_FRAMER_OVERFLOW_LSB 2
+#define UMBOX_HCI_FRAMER_OVERFLOW_MASK 0x00000004
+#define UMBOX_HCI_FRAMER_OVERFLOW_GET(x) (((x) & UMBOX_HCI_FRAMER_OVERFLOW_MASK) >> UMBOX_HCI_FRAMER_OVERFLOW_LSB)
+#define UMBOX_HCI_FRAMER_OVERFLOW_SET(x) (((x) << UMBOX_HCI_FRAMER_OVERFLOW_LSB) & UMBOX_HCI_FRAMER_OVERFLOW_MASK)
+#define UMBOX_HCI_FRAMER_CONFIG_MODE_MSB 1
+#define UMBOX_HCI_FRAMER_CONFIG_MODE_LSB 0
+#define UMBOX_HCI_FRAMER_CONFIG_MODE_MASK 0x00000003
+#define UMBOX_HCI_FRAMER_CONFIG_MODE_GET(x) (((x) & UMBOX_HCI_FRAMER_CONFIG_MODE_MASK) >> UMBOX_HCI_FRAMER_CONFIG_MODE_LSB)
+#define UMBOX_HCI_FRAMER_CONFIG_MODE_SET(x) (((x) << UMBOX_HCI_FRAMER_CONFIG_MODE_LSB) & UMBOX_HCI_FRAMER_CONFIG_MODE_MASK)
+
+
+#ifndef __ASSEMBLER__
+
+typedef struct umbox_wlan_reg_reg_s {
+ volatile unsigned int umbox_fifo[2];
+ volatile unsigned int umbox_fifo_status;
+ volatile unsigned int umbox_dma_policy;
+ volatile unsigned int umbox0_dma_rx_descriptor_base;
+ volatile unsigned int umbox0_dma_rx_control;
+ volatile unsigned int umbox0_dma_tx_descriptor_base;
+ volatile unsigned int umbox0_dma_tx_control;
+ volatile unsigned int umbox_fifo_timeout;
+ volatile unsigned int umbox_int_status;
+ volatile unsigned int umbox_int_enable;
+ volatile unsigned int umbox_debug;
+ volatile unsigned int umbox_fifo_reset;
+ volatile unsigned int umbox_hci_framer;
+} umbox_wlan_reg_reg_t;
+
+#endif /* __ASSEMBLER__ */
+
+#endif /* _UMBOX_WLAN_REG_H_ */
diff --git a/drivers/staging/ath6kl/include/common/AR6002/hw4.0/hw/vmc_reg.h b/drivers/staging/ath6kl/include/common/AR6002/hw4.0/hw/vmc_reg.h
new file mode 100644
index 000000000000..c3d8088a5554
--- /dev/null
+++ b/drivers/staging/ath6kl/include/common/AR6002/hw4.0/hw/vmc_reg.h
@@ -0,0 +1,167 @@
+// ------------------------------------------------------------------
+// Copyright (c) 2004-2010 Atheros Corporation. All rights reserved.
+//
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+//
+//
+// ------------------------------------------------------------------
+//===================================================================
+// Author(s): ="Atheros"
+//===================================================================
+
+
+#ifdef WLAN_HEADERS
+
+#include "vmc_wlan_reg.h"
+
+
+#ifndef BT_HEADERS
+
+#define MC_BCAM_VALID_ADDRESS WLAN_MC_BCAM_VALID_ADDRESS
+#define MC_BCAM_VALID_OFFSET WLAN_MC_BCAM_VALID_OFFSET
+#define MC_BCAM_VALID_BIT_MSB WLAN_MC_BCAM_VALID_BIT_MSB
+#define MC_BCAM_VALID_BIT_LSB WLAN_MC_BCAM_VALID_BIT_LSB
+#define MC_BCAM_VALID_BIT_MASK WLAN_MC_BCAM_VALID_BIT_MASK
+#define MC_BCAM_VALID_BIT_GET(x) WLAN_MC_BCAM_VALID_BIT_GET(x)
+#define MC_BCAM_VALID_BIT_SET(x) WLAN_MC_BCAM_VALID_BIT_SET(x)
+#define MC_BCAM_COMPARE_ADDRESS WLAN_MC_BCAM_COMPARE_ADDRESS
+#define MC_BCAM_COMPARE_OFFSET WLAN_MC_BCAM_COMPARE_OFFSET
+#define MC_BCAM_COMPARE_KEY_MSB WLAN_MC_BCAM_COMPARE_KEY_MSB
+#define MC_BCAM_COMPARE_KEY_LSB WLAN_MC_BCAM_COMPARE_KEY_LSB
+#define MC_BCAM_COMPARE_KEY_MASK WLAN_MC_BCAM_COMPARE_KEY_MASK
+#define MC_BCAM_COMPARE_KEY_GET(x) WLAN_MC_BCAM_COMPARE_KEY_GET(x)
+#define MC_BCAM_COMPARE_KEY_SET(x) WLAN_MC_BCAM_COMPARE_KEY_SET(x)
+#define MC_BCAM_TARGET_ADDRESS WLAN_MC_BCAM_TARGET_ADDRESS
+#define MC_BCAM_TARGET_OFFSET WLAN_MC_BCAM_TARGET_OFFSET
+#define MC_BCAM_TARGET_INST_MSB WLAN_MC_BCAM_TARGET_INST_MSB
+#define MC_BCAM_TARGET_INST_LSB WLAN_MC_BCAM_TARGET_INST_LSB
+#define MC_BCAM_TARGET_INST_MASK WLAN_MC_BCAM_TARGET_INST_MASK
+#define MC_BCAM_TARGET_INST_GET(x) WLAN_MC_BCAM_TARGET_INST_GET(x)
+#define MC_BCAM_TARGET_INST_SET(x) WLAN_MC_BCAM_TARGET_INST_SET(x)
+#define APB_ADDR_ERROR_CONTROL_ADDRESS WLAN_APB_ADDR_ERROR_CONTROL_ADDRESS
+#define APB_ADDR_ERROR_CONTROL_OFFSET WLAN_APB_ADDR_ERROR_CONTROL_OFFSET
+#define APB_ADDR_ERROR_CONTROL_QUAL_ENABLE_MSB WLAN_APB_ADDR_ERROR_CONTROL_QUAL_ENABLE_MSB
+#define APB_ADDR_ERROR_CONTROL_QUAL_ENABLE_LSB WLAN_APB_ADDR_ERROR_CONTROL_QUAL_ENABLE_LSB
+#define APB_ADDR_ERROR_CONTROL_QUAL_ENABLE_MASK WLAN_APB_ADDR_ERROR_CONTROL_QUAL_ENABLE_MASK
+#define APB_ADDR_ERROR_CONTROL_QUAL_ENABLE_GET(x) WLAN_APB_ADDR_ERROR_CONTROL_QUAL_ENABLE_GET(x)
+#define APB_ADDR_ERROR_CONTROL_QUAL_ENABLE_SET(x) WLAN_APB_ADDR_ERROR_CONTROL_QUAL_ENABLE_SET(x)
+#define APB_ADDR_ERROR_CONTROL_ENABLE_MSB WLAN_APB_ADDR_ERROR_CONTROL_ENABLE_MSB
+#define APB_ADDR_ERROR_CONTROL_ENABLE_LSB WLAN_APB_ADDR_ERROR_CONTROL_ENABLE_LSB
+#define APB_ADDR_ERROR_CONTROL_ENABLE_MASK WLAN_APB_ADDR_ERROR_CONTROL_ENABLE_MASK
+#define APB_ADDR_ERROR_CONTROL_ENABLE_GET(x) WLAN_APB_ADDR_ERROR_CONTROL_ENABLE_GET(x)
+#define APB_ADDR_ERROR_CONTROL_ENABLE_SET(x) WLAN_APB_ADDR_ERROR_CONTROL_ENABLE_SET(x)
+#define APB_ADDR_ERROR_STATUS_ADDRESS WLAN_APB_ADDR_ERROR_STATUS_ADDRESS
+#define APB_ADDR_ERROR_STATUS_OFFSET WLAN_APB_ADDR_ERROR_STATUS_OFFSET
+#define APB_ADDR_ERROR_STATUS_WRITE_MSB WLAN_APB_ADDR_ERROR_STATUS_WRITE_MSB
+#define APB_ADDR_ERROR_STATUS_WRITE_LSB WLAN_APB_ADDR_ERROR_STATUS_WRITE_LSB
+#define APB_ADDR_ERROR_STATUS_WRITE_MASK WLAN_APB_ADDR_ERROR_STATUS_WRITE_MASK
+#define APB_ADDR_ERROR_STATUS_WRITE_GET(x) WLAN_APB_ADDR_ERROR_STATUS_WRITE_GET(x)
+#define APB_ADDR_ERROR_STATUS_WRITE_SET(x) WLAN_APB_ADDR_ERROR_STATUS_WRITE_SET(x)
+#define APB_ADDR_ERROR_STATUS_ADDRESS_MSB WLAN_APB_ADDR_ERROR_STATUS_ADDRESS_MSB
+#define APB_ADDR_ERROR_STATUS_ADDRESS_LSB WLAN_APB_ADDR_ERROR_STATUS_ADDRESS_LSB
+#define APB_ADDR_ERROR_STATUS_ADDRESS_MASK WLAN_APB_ADDR_ERROR_STATUS_ADDRESS_MASK
+#define APB_ADDR_ERROR_STATUS_ADDRESS_GET(x) WLAN_APB_ADDR_ERROR_STATUS_ADDRESS_GET(x)
+#define APB_ADDR_ERROR_STATUS_ADDRESS_SET(x) WLAN_APB_ADDR_ERROR_STATUS_ADDRESS_SET(x)
+#define AHB_ADDR_ERROR_CONTROL_ADDRESS WLAN_AHB_ADDR_ERROR_CONTROL_ADDRESS
+#define AHB_ADDR_ERROR_CONTROL_OFFSET WLAN_AHB_ADDR_ERROR_CONTROL_OFFSET
+#define AHB_ADDR_ERROR_CONTROL_ENABLE_MSB WLAN_AHB_ADDR_ERROR_CONTROL_ENABLE_MSB
+#define AHB_ADDR_ERROR_CONTROL_ENABLE_LSB WLAN_AHB_ADDR_ERROR_CONTROL_ENABLE_LSB
+#define AHB_ADDR_ERROR_CONTROL_ENABLE_MASK WLAN_AHB_ADDR_ERROR_CONTROL_ENABLE_MASK
+#define AHB_ADDR_ERROR_CONTROL_ENABLE_GET(x) WLAN_AHB_ADDR_ERROR_CONTROL_ENABLE_GET(x)
+#define AHB_ADDR_ERROR_CONTROL_ENABLE_SET(x) WLAN_AHB_ADDR_ERROR_CONTROL_ENABLE_SET(x)
+#define AHB_ADDR_ERROR_STATUS_ADDRESS WLAN_AHB_ADDR_ERROR_STATUS_ADDRESS
+#define AHB_ADDR_ERROR_STATUS_OFFSET WLAN_AHB_ADDR_ERROR_STATUS_OFFSET
+#define AHB_ADDR_ERROR_STATUS_MAC_MSB WLAN_AHB_ADDR_ERROR_STATUS_MAC_MSB
+#define AHB_ADDR_ERROR_STATUS_MAC_LSB WLAN_AHB_ADDR_ERROR_STATUS_MAC_LSB
+#define AHB_ADDR_ERROR_STATUS_MAC_MASK WLAN_AHB_ADDR_ERROR_STATUS_MAC_MASK
+#define AHB_ADDR_ERROR_STATUS_MAC_GET(x) WLAN_AHB_ADDR_ERROR_STATUS_MAC_GET(x)
+#define AHB_ADDR_ERROR_STATUS_MAC_SET(x) WLAN_AHB_ADDR_ERROR_STATUS_MAC_SET(x)
+#define AHB_ADDR_ERROR_STATUS_MBOX_MSB WLAN_AHB_ADDR_ERROR_STATUS_MBOX_MSB
+#define AHB_ADDR_ERROR_STATUS_MBOX_LSB WLAN_AHB_ADDR_ERROR_STATUS_MBOX_LSB
+#define AHB_ADDR_ERROR_STATUS_MBOX_MASK WLAN_AHB_ADDR_ERROR_STATUS_MBOX_MASK
+#define AHB_ADDR_ERROR_STATUS_MBOX_GET(x) WLAN_AHB_ADDR_ERROR_STATUS_MBOX_GET(x)
+#define AHB_ADDR_ERROR_STATUS_MBOX_SET(x) WLAN_AHB_ADDR_ERROR_STATUS_MBOX_SET(x)
+#define AHB_ADDR_ERROR_STATUS_ADDRESS_MSB WLAN_AHB_ADDR_ERROR_STATUS_ADDRESS_MSB
+#define AHB_ADDR_ERROR_STATUS_ADDRESS_LSB WLAN_AHB_ADDR_ERROR_STATUS_ADDRESS_LSB
+#define AHB_ADDR_ERROR_STATUS_ADDRESS_MASK WLAN_AHB_ADDR_ERROR_STATUS_ADDRESS_MASK
+#define AHB_ADDR_ERROR_STATUS_ADDRESS_GET(x) WLAN_AHB_ADDR_ERROR_STATUS_ADDRESS_GET(x)
+#define AHB_ADDR_ERROR_STATUS_ADDRESS_SET(x) WLAN_AHB_ADDR_ERROR_STATUS_ADDRESS_SET(x)
+#define BCAM_CONFLICT_ERROR_ADDRESS WLAN_BCAM_CONFLICT_ERROR_ADDRESS
+#define BCAM_CONFLICT_ERROR_OFFSET WLAN_BCAM_CONFLICT_ERROR_OFFSET
+#define BCAM_CONFLICT_ERROR_IPORT_FLAG_MSB WLAN_BCAM_CONFLICT_ERROR_IPORT_FLAG_MSB
+#define BCAM_CONFLICT_ERROR_IPORT_FLAG_LSB WLAN_BCAM_CONFLICT_ERROR_IPORT_FLAG_LSB
+#define BCAM_CONFLICT_ERROR_IPORT_FLAG_MASK WLAN_BCAM_CONFLICT_ERROR_IPORT_FLAG_MASK
+#define BCAM_CONFLICT_ERROR_IPORT_FLAG_GET(x) WLAN_BCAM_CONFLICT_ERROR_IPORT_FLAG_GET(x)
+#define BCAM_CONFLICT_ERROR_IPORT_FLAG_SET(x) WLAN_BCAM_CONFLICT_ERROR_IPORT_FLAG_SET(x)
+#define BCAM_CONFLICT_ERROR_DPORT_FLAG_MSB WLAN_BCAM_CONFLICT_ERROR_DPORT_FLAG_MSB
+#define BCAM_CONFLICT_ERROR_DPORT_FLAG_LSB WLAN_BCAM_CONFLICT_ERROR_DPORT_FLAG_LSB
+#define BCAM_CONFLICT_ERROR_DPORT_FLAG_MASK WLAN_BCAM_CONFLICT_ERROR_DPORT_FLAG_MASK
+#define BCAM_CONFLICT_ERROR_DPORT_FLAG_GET(x) WLAN_BCAM_CONFLICT_ERROR_DPORT_FLAG_GET(x)
+#define BCAM_CONFLICT_ERROR_DPORT_FLAG_SET(x) WLAN_BCAM_CONFLICT_ERROR_DPORT_FLAG_SET(x)
+#define CPU_PERF_CNT_ADDRESS WLAN_CPU_PERF_CNT_ADDRESS
+#define CPU_PERF_CNT_OFFSET WLAN_CPU_PERF_CNT_OFFSET
+#define CPU_PERF_CNT_EN_MSB WLAN_CPU_PERF_CNT_EN_MSB
+#define CPU_PERF_CNT_EN_LSB WLAN_CPU_PERF_CNT_EN_LSB
+#define CPU_PERF_CNT_EN_MASK WLAN_CPU_PERF_CNT_EN_MASK
+#define CPU_PERF_CNT_EN_GET(x) WLAN_CPU_PERF_CNT_EN_GET(x)
+#define CPU_PERF_CNT_EN_SET(x) WLAN_CPU_PERF_CNT_EN_SET(x)
+#define CPU_INST_FETCH_ADDRESS WLAN_CPU_INST_FETCH_ADDRESS
+#define CPU_INST_FETCH_OFFSET WLAN_CPU_INST_FETCH_OFFSET
+#define CPU_INST_FETCH_CNT_MSB WLAN_CPU_INST_FETCH_CNT_MSB
+#define CPU_INST_FETCH_CNT_LSB WLAN_CPU_INST_FETCH_CNT_LSB
+#define CPU_INST_FETCH_CNT_MASK WLAN_CPU_INST_FETCH_CNT_MASK
+#define CPU_INST_FETCH_CNT_GET(x) WLAN_CPU_INST_FETCH_CNT_GET(x)
+#define CPU_INST_FETCH_CNT_SET(x) WLAN_CPU_INST_FETCH_CNT_SET(x)
+#define CPU_DATA_FETCH_ADDRESS WLAN_CPU_DATA_FETCH_ADDRESS
+#define CPU_DATA_FETCH_OFFSET WLAN_CPU_DATA_FETCH_OFFSET
+#define CPU_DATA_FETCH_CNT_MSB WLAN_CPU_DATA_FETCH_CNT_MSB
+#define CPU_DATA_FETCH_CNT_LSB WLAN_CPU_DATA_FETCH_CNT_LSB
+#define CPU_DATA_FETCH_CNT_MASK WLAN_CPU_DATA_FETCH_CNT_MASK
+#define CPU_DATA_FETCH_CNT_GET(x) WLAN_CPU_DATA_FETCH_CNT_GET(x)
+#define CPU_DATA_FETCH_CNT_SET(x) WLAN_CPU_DATA_FETCH_CNT_SET(x)
+#define CPU_RAM1_CONFLICT_ADDRESS WLAN_CPU_RAM1_CONFLICT_ADDRESS
+#define CPU_RAM1_CONFLICT_OFFSET WLAN_CPU_RAM1_CONFLICT_OFFSET
+#define CPU_RAM1_CONFLICT_CNT_MSB WLAN_CPU_RAM1_CONFLICT_CNT_MSB
+#define CPU_RAM1_CONFLICT_CNT_LSB WLAN_CPU_RAM1_CONFLICT_CNT_LSB
+#define CPU_RAM1_CONFLICT_CNT_MASK WLAN_CPU_RAM1_CONFLICT_CNT_MASK
+#define CPU_RAM1_CONFLICT_CNT_GET(x) WLAN_CPU_RAM1_CONFLICT_CNT_GET(x)
+#define CPU_RAM1_CONFLICT_CNT_SET(x) WLAN_CPU_RAM1_CONFLICT_CNT_SET(x)
+#define CPU_RAM2_CONFLICT_ADDRESS WLAN_CPU_RAM2_CONFLICT_ADDRESS
+#define CPU_RAM2_CONFLICT_OFFSET WLAN_CPU_RAM2_CONFLICT_OFFSET
+#define CPU_RAM2_CONFLICT_CNT_MSB WLAN_CPU_RAM2_CONFLICT_CNT_MSB
+#define CPU_RAM2_CONFLICT_CNT_LSB WLAN_CPU_RAM2_CONFLICT_CNT_LSB
+#define CPU_RAM2_CONFLICT_CNT_MASK WLAN_CPU_RAM2_CONFLICT_CNT_MASK
+#define CPU_RAM2_CONFLICT_CNT_GET(x) WLAN_CPU_RAM2_CONFLICT_CNT_GET(x)
+#define CPU_RAM2_CONFLICT_CNT_SET(x) WLAN_CPU_RAM2_CONFLICT_CNT_SET(x)
+#define CPU_RAM3_CONFLICT_ADDRESS WLAN_CPU_RAM3_CONFLICT_ADDRESS
+#define CPU_RAM3_CONFLICT_OFFSET WLAN_CPU_RAM3_CONFLICT_OFFSET
+#define CPU_RAM3_CONFLICT_CNT_MSB WLAN_CPU_RAM3_CONFLICT_CNT_MSB
+#define CPU_RAM3_CONFLICT_CNT_LSB WLAN_CPU_RAM3_CONFLICT_CNT_LSB
+#define CPU_RAM3_CONFLICT_CNT_MASK WLAN_CPU_RAM3_CONFLICT_CNT_MASK
+#define CPU_RAM3_CONFLICT_CNT_GET(x) WLAN_CPU_RAM3_CONFLICT_CNT_GET(x)
+#define CPU_RAM3_CONFLICT_CNT_SET(x) WLAN_CPU_RAM3_CONFLICT_CNT_SET(x)
+#define CPU_RAM4_CONFLICT_ADDRESS WLAN_CPU_RAM4_CONFLICT_ADDRESS
+#define CPU_RAM4_CONFLICT_OFFSET WLAN_CPU_RAM4_CONFLICT_OFFSET
+#define CPU_RAM4_CONFLICT_CNT_MSB WLAN_CPU_RAM4_CONFLICT_CNT_MSB
+#define CPU_RAM4_CONFLICT_CNT_LSB WLAN_CPU_RAM4_CONFLICT_CNT_LSB
+#define CPU_RAM4_CONFLICT_CNT_MASK WLAN_CPU_RAM4_CONFLICT_CNT_MASK
+#define CPU_RAM4_CONFLICT_CNT_GET(x) WLAN_CPU_RAM4_CONFLICT_CNT_GET(x)
+#define CPU_RAM4_CONFLICT_CNT_SET(x) WLAN_CPU_RAM4_CONFLICT_CNT_SET(x)
+
+
+#endif
+#endif
+
+
+
diff --git a/drivers/staging/ath6kl/include/common/AR6002/hw4.0/hw/vmc_wlan_reg.h b/drivers/staging/ath6kl/include/common/AR6002/hw4.0/hw/vmc_wlan_reg.h
new file mode 100644
index 000000000000..d28de3938b2e
--- /dev/null
+++ b/drivers/staging/ath6kl/include/common/AR6002/hw4.0/hw/vmc_wlan_reg.h
@@ -0,0 +1,195 @@
+// ------------------------------------------------------------------
+// Copyright (c) 2004-2010 Atheros Corporation. All rights reserved.
+//
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+//
+//
+// ------------------------------------------------------------------
+//===================================================================
+// Author(s): ="Atheros"
+//===================================================================
+
+
+#ifndef _VMC_WLAN_REG_REG_H_
+#define _VMC_WLAN_REG_REG_H_
+
+#define WLAN_MC_BCAM_VALID_ADDRESS 0x00000000
+#define WLAN_MC_BCAM_VALID_OFFSET 0x00000000
+#define WLAN_MC_BCAM_VALID_BIT_MSB 0
+#define WLAN_MC_BCAM_VALID_BIT_LSB 0
+#define WLAN_MC_BCAM_VALID_BIT_MASK 0x00000001
+#define WLAN_MC_BCAM_VALID_BIT_GET(x) (((x) & WLAN_MC_BCAM_VALID_BIT_MASK) >> WLAN_MC_BCAM_VALID_BIT_LSB)
+#define WLAN_MC_BCAM_VALID_BIT_SET(x) (((x) << WLAN_MC_BCAM_VALID_BIT_LSB) & WLAN_MC_BCAM_VALID_BIT_MASK)
+
+#define WLAN_MC_BCAM_COMPARE_ADDRESS 0x00000200
+#define WLAN_MC_BCAM_COMPARE_OFFSET 0x00000200
+#define WLAN_MC_BCAM_COMPARE_KEY_MSB 19
+#define WLAN_MC_BCAM_COMPARE_KEY_LSB 2
+#define WLAN_MC_BCAM_COMPARE_KEY_MASK 0x000ffffc
+#define WLAN_MC_BCAM_COMPARE_KEY_GET(x) (((x) & WLAN_MC_BCAM_COMPARE_KEY_MASK) >> WLAN_MC_BCAM_COMPARE_KEY_LSB)
+#define WLAN_MC_BCAM_COMPARE_KEY_SET(x) (((x) << WLAN_MC_BCAM_COMPARE_KEY_LSB) & WLAN_MC_BCAM_COMPARE_KEY_MASK)
+
+#define WLAN_MC_BCAM_TARGET_ADDRESS 0x00000400
+#define WLAN_MC_BCAM_TARGET_OFFSET 0x00000400
+#define WLAN_MC_BCAM_TARGET_INST_MSB 31
+#define WLAN_MC_BCAM_TARGET_INST_LSB 0
+#define WLAN_MC_BCAM_TARGET_INST_MASK 0xffffffff
+#define WLAN_MC_BCAM_TARGET_INST_GET(x) (((x) & WLAN_MC_BCAM_TARGET_INST_MASK) >> WLAN_MC_BCAM_TARGET_INST_LSB)
+#define WLAN_MC_BCAM_TARGET_INST_SET(x) (((x) << WLAN_MC_BCAM_TARGET_INST_LSB) & WLAN_MC_BCAM_TARGET_INST_MASK)
+
+#define WLAN_APB_ADDR_ERROR_CONTROL_ADDRESS 0x00000600
+#define WLAN_APB_ADDR_ERROR_CONTROL_OFFSET 0x00000600
+#define WLAN_APB_ADDR_ERROR_CONTROL_QUAL_ENABLE_MSB 1
+#define WLAN_APB_ADDR_ERROR_CONTROL_QUAL_ENABLE_LSB 1
+#define WLAN_APB_ADDR_ERROR_CONTROL_QUAL_ENABLE_MASK 0x00000002
+#define WLAN_APB_ADDR_ERROR_CONTROL_QUAL_ENABLE_GET(x) (((x) & WLAN_APB_ADDR_ERROR_CONTROL_QUAL_ENABLE_MASK) >> WLAN_APB_ADDR_ERROR_CONTROL_QUAL_ENABLE_LSB)
+#define WLAN_APB_ADDR_ERROR_CONTROL_QUAL_ENABLE_SET(x) (((x) << WLAN_APB_ADDR_ERROR_CONTROL_QUAL_ENABLE_LSB) & WLAN_APB_ADDR_ERROR_CONTROL_QUAL_ENABLE_MASK)
+#define WLAN_APB_ADDR_ERROR_CONTROL_ENABLE_MSB 0
+#define WLAN_APB_ADDR_ERROR_CONTROL_ENABLE_LSB 0
+#define WLAN_APB_ADDR_ERROR_CONTROL_ENABLE_MASK 0x00000001
+#define WLAN_APB_ADDR_ERROR_CONTROL_ENABLE_GET(x) (((x) & WLAN_APB_ADDR_ERROR_CONTROL_ENABLE_MASK) >> WLAN_APB_ADDR_ERROR_CONTROL_ENABLE_LSB)
+#define WLAN_APB_ADDR_ERROR_CONTROL_ENABLE_SET(x) (((x) << WLAN_APB_ADDR_ERROR_CONTROL_ENABLE_LSB) & WLAN_APB_ADDR_ERROR_CONTROL_ENABLE_MASK)
+
+#define WLAN_APB_ADDR_ERROR_STATUS_ADDRESS 0x00000604
+#define WLAN_APB_ADDR_ERROR_STATUS_OFFSET 0x00000604
+#define WLAN_APB_ADDR_ERROR_STATUS_WRITE_MSB 25
+#define WLAN_APB_ADDR_ERROR_STATUS_WRITE_LSB 25
+#define WLAN_APB_ADDR_ERROR_STATUS_WRITE_MASK 0x02000000
+#define WLAN_APB_ADDR_ERROR_STATUS_WRITE_GET(x) (((x) & WLAN_APB_ADDR_ERROR_STATUS_WRITE_MASK) >> WLAN_APB_ADDR_ERROR_STATUS_WRITE_LSB)
+#define WLAN_APB_ADDR_ERROR_STATUS_WRITE_SET(x) (((x) << WLAN_APB_ADDR_ERROR_STATUS_WRITE_LSB) & WLAN_APB_ADDR_ERROR_STATUS_WRITE_MASK)
+#define WLAN_APB_ADDR_ERROR_STATUS_ADDRESS_MSB 24
+#define WLAN_APB_ADDR_ERROR_STATUS_ADDRESS_LSB 0
+#define WLAN_APB_ADDR_ERROR_STATUS_ADDRESS_MASK 0x01ffffff
+#define WLAN_APB_ADDR_ERROR_STATUS_ADDRESS_GET(x) (((x) & WLAN_APB_ADDR_ERROR_STATUS_ADDRESS_MASK) >> WLAN_APB_ADDR_ERROR_STATUS_ADDRESS_LSB)
+#define WLAN_APB_ADDR_ERROR_STATUS_ADDRESS_SET(x) (((x) << WLAN_APB_ADDR_ERROR_STATUS_ADDRESS_LSB) & WLAN_APB_ADDR_ERROR_STATUS_ADDRESS_MASK)
+
+#define WLAN_AHB_ADDR_ERROR_CONTROL_ADDRESS 0x00000608
+#define WLAN_AHB_ADDR_ERROR_CONTROL_OFFSET 0x00000608
+#define WLAN_AHB_ADDR_ERROR_CONTROL_ENABLE_MSB 0
+#define WLAN_AHB_ADDR_ERROR_CONTROL_ENABLE_LSB 0
+#define WLAN_AHB_ADDR_ERROR_CONTROL_ENABLE_MASK 0x00000001
+#define WLAN_AHB_ADDR_ERROR_CONTROL_ENABLE_GET(x) (((x) & WLAN_AHB_ADDR_ERROR_CONTROL_ENABLE_MASK) >> WLAN_AHB_ADDR_ERROR_CONTROL_ENABLE_LSB)
+#define WLAN_AHB_ADDR_ERROR_CONTROL_ENABLE_SET(x) (((x) << WLAN_AHB_ADDR_ERROR_CONTROL_ENABLE_LSB) & WLAN_AHB_ADDR_ERROR_CONTROL_ENABLE_MASK)
+
+#define WLAN_AHB_ADDR_ERROR_STATUS_ADDRESS 0x0000060c
+#define WLAN_AHB_ADDR_ERROR_STATUS_OFFSET 0x0000060c
+#define WLAN_AHB_ADDR_ERROR_STATUS_MAC_MSB 31
+#define WLAN_AHB_ADDR_ERROR_STATUS_MAC_LSB 31
+#define WLAN_AHB_ADDR_ERROR_STATUS_MAC_MASK 0x80000000
+#define WLAN_AHB_ADDR_ERROR_STATUS_MAC_GET(x) (((x) & WLAN_AHB_ADDR_ERROR_STATUS_MAC_MASK) >> WLAN_AHB_ADDR_ERROR_STATUS_MAC_LSB)
+#define WLAN_AHB_ADDR_ERROR_STATUS_MAC_SET(x) (((x) << WLAN_AHB_ADDR_ERROR_STATUS_MAC_LSB) & WLAN_AHB_ADDR_ERROR_STATUS_MAC_MASK)
+#define WLAN_AHB_ADDR_ERROR_STATUS_MBOX_MSB 30
+#define WLAN_AHB_ADDR_ERROR_STATUS_MBOX_LSB 30
+#define WLAN_AHB_ADDR_ERROR_STATUS_MBOX_MASK 0x40000000
+#define WLAN_AHB_ADDR_ERROR_STATUS_MBOX_GET(x) (((x) & WLAN_AHB_ADDR_ERROR_STATUS_MBOX_MASK) >> WLAN_AHB_ADDR_ERROR_STATUS_MBOX_LSB)
+#define WLAN_AHB_ADDR_ERROR_STATUS_MBOX_SET(x) (((x) << WLAN_AHB_ADDR_ERROR_STATUS_MBOX_LSB) & WLAN_AHB_ADDR_ERROR_STATUS_MBOX_MASK)
+#define WLAN_AHB_ADDR_ERROR_STATUS_ADDRESS_MSB 23
+#define WLAN_AHB_ADDR_ERROR_STATUS_ADDRESS_LSB 0
+#define WLAN_AHB_ADDR_ERROR_STATUS_ADDRESS_MASK 0x00ffffff
+#define WLAN_AHB_ADDR_ERROR_STATUS_ADDRESS_GET(x) (((x) & WLAN_AHB_ADDR_ERROR_STATUS_ADDRESS_MASK) >> WLAN_AHB_ADDR_ERROR_STATUS_ADDRESS_LSB)
+#define WLAN_AHB_ADDR_ERROR_STATUS_ADDRESS_SET(x) (((x) << WLAN_AHB_ADDR_ERROR_STATUS_ADDRESS_LSB) & WLAN_AHB_ADDR_ERROR_STATUS_ADDRESS_MASK)
+
+#define WLAN_BCAM_CONFLICT_ERROR_ADDRESS 0x00000610
+#define WLAN_BCAM_CONFLICT_ERROR_OFFSET 0x00000610
+#define WLAN_BCAM_CONFLICT_ERROR_IPORT_FLAG_MSB 1
+#define WLAN_BCAM_CONFLICT_ERROR_IPORT_FLAG_LSB 1
+#define WLAN_BCAM_CONFLICT_ERROR_IPORT_FLAG_MASK 0x00000002
+#define WLAN_BCAM_CONFLICT_ERROR_IPORT_FLAG_GET(x) (((x) & WLAN_BCAM_CONFLICT_ERROR_IPORT_FLAG_MASK) >> WLAN_BCAM_CONFLICT_ERROR_IPORT_FLAG_LSB)
+#define WLAN_BCAM_CONFLICT_ERROR_IPORT_FLAG_SET(x) (((x) << WLAN_BCAM_CONFLICT_ERROR_IPORT_FLAG_LSB) & WLAN_BCAM_CONFLICT_ERROR_IPORT_FLAG_MASK)
+#define WLAN_BCAM_CONFLICT_ERROR_DPORT_FLAG_MSB 0
+#define WLAN_BCAM_CONFLICT_ERROR_DPORT_FLAG_LSB 0
+#define WLAN_BCAM_CONFLICT_ERROR_DPORT_FLAG_MASK 0x00000001
+#define WLAN_BCAM_CONFLICT_ERROR_DPORT_FLAG_GET(x) (((x) & WLAN_BCAM_CONFLICT_ERROR_DPORT_FLAG_MASK) >> WLAN_BCAM_CONFLICT_ERROR_DPORT_FLAG_LSB)
+#define WLAN_BCAM_CONFLICT_ERROR_DPORT_FLAG_SET(x) (((x) << WLAN_BCAM_CONFLICT_ERROR_DPORT_FLAG_LSB) & WLAN_BCAM_CONFLICT_ERROR_DPORT_FLAG_MASK)
+
+#define WLAN_CPU_PERF_CNT_ADDRESS 0x00000614
+#define WLAN_CPU_PERF_CNT_OFFSET 0x00000614
+#define WLAN_CPU_PERF_CNT_EN_MSB 0
+#define WLAN_CPU_PERF_CNT_EN_LSB 0
+#define WLAN_CPU_PERF_CNT_EN_MASK 0x00000001
+#define WLAN_CPU_PERF_CNT_EN_GET(x) (((x) & WLAN_CPU_PERF_CNT_EN_MASK) >> WLAN_CPU_PERF_CNT_EN_LSB)
+#define WLAN_CPU_PERF_CNT_EN_SET(x) (((x) << WLAN_CPU_PERF_CNT_EN_LSB) & WLAN_CPU_PERF_CNT_EN_MASK)
+
+#define WLAN_CPU_INST_FETCH_ADDRESS 0x00000618
+#define WLAN_CPU_INST_FETCH_OFFSET 0x00000618
+#define WLAN_CPU_INST_FETCH_CNT_MSB 31
+#define WLAN_CPU_INST_FETCH_CNT_LSB 0
+#define WLAN_CPU_INST_FETCH_CNT_MASK 0xffffffff
+#define WLAN_CPU_INST_FETCH_CNT_GET(x) (((x) & WLAN_CPU_INST_FETCH_CNT_MASK) >> WLAN_CPU_INST_FETCH_CNT_LSB)
+#define WLAN_CPU_INST_FETCH_CNT_SET(x) (((x) << WLAN_CPU_INST_FETCH_CNT_LSB) & WLAN_CPU_INST_FETCH_CNT_MASK)
+
+#define WLAN_CPU_DATA_FETCH_ADDRESS 0x0000061c
+#define WLAN_CPU_DATA_FETCH_OFFSET 0x0000061c
+#define WLAN_CPU_DATA_FETCH_CNT_MSB 31
+#define WLAN_CPU_DATA_FETCH_CNT_LSB 0
+#define WLAN_CPU_DATA_FETCH_CNT_MASK 0xffffffff
+#define WLAN_CPU_DATA_FETCH_CNT_GET(x) (((x) & WLAN_CPU_DATA_FETCH_CNT_MASK) >> WLAN_CPU_DATA_FETCH_CNT_LSB)
+#define WLAN_CPU_DATA_FETCH_CNT_SET(x) (((x) << WLAN_CPU_DATA_FETCH_CNT_LSB) & WLAN_CPU_DATA_FETCH_CNT_MASK)
+
+#define WLAN_CPU_RAM1_CONFLICT_ADDRESS 0x00000620
+#define WLAN_CPU_RAM1_CONFLICT_OFFSET 0x00000620
+#define WLAN_CPU_RAM1_CONFLICT_CNT_MSB 11
+#define WLAN_CPU_RAM1_CONFLICT_CNT_LSB 0
+#define WLAN_CPU_RAM1_CONFLICT_CNT_MASK 0x00000fff
+#define WLAN_CPU_RAM1_CONFLICT_CNT_GET(x) (((x) & WLAN_CPU_RAM1_CONFLICT_CNT_MASK) >> WLAN_CPU_RAM1_CONFLICT_CNT_LSB)
+#define WLAN_CPU_RAM1_CONFLICT_CNT_SET(x) (((x) << WLAN_CPU_RAM1_CONFLICT_CNT_LSB) & WLAN_CPU_RAM1_CONFLICT_CNT_MASK)
+
+#define WLAN_CPU_RAM2_CONFLICT_ADDRESS 0x00000624
+#define WLAN_CPU_RAM2_CONFLICT_OFFSET 0x00000624
+#define WLAN_CPU_RAM2_CONFLICT_CNT_MSB 11
+#define WLAN_CPU_RAM2_CONFLICT_CNT_LSB 0
+#define WLAN_CPU_RAM2_CONFLICT_CNT_MASK 0x00000fff
+#define WLAN_CPU_RAM2_CONFLICT_CNT_GET(x) (((x) & WLAN_CPU_RAM2_CONFLICT_CNT_MASK) >> WLAN_CPU_RAM2_CONFLICT_CNT_LSB)
+#define WLAN_CPU_RAM2_CONFLICT_CNT_SET(x) (((x) << WLAN_CPU_RAM2_CONFLICT_CNT_LSB) & WLAN_CPU_RAM2_CONFLICT_CNT_MASK)
+
+#define WLAN_CPU_RAM3_CONFLICT_ADDRESS 0x00000628
+#define WLAN_CPU_RAM3_CONFLICT_OFFSET 0x00000628
+#define WLAN_CPU_RAM3_CONFLICT_CNT_MSB 11
+#define WLAN_CPU_RAM3_CONFLICT_CNT_LSB 0
+#define WLAN_CPU_RAM3_CONFLICT_CNT_MASK 0x00000fff
+#define WLAN_CPU_RAM3_CONFLICT_CNT_GET(x) (((x) & WLAN_CPU_RAM3_CONFLICT_CNT_MASK) >> WLAN_CPU_RAM3_CONFLICT_CNT_LSB)
+#define WLAN_CPU_RAM3_CONFLICT_CNT_SET(x) (((x) << WLAN_CPU_RAM3_CONFLICT_CNT_LSB) & WLAN_CPU_RAM3_CONFLICT_CNT_MASK)
+
+#define WLAN_CPU_RAM4_CONFLICT_ADDRESS 0x0000062c
+#define WLAN_CPU_RAM4_CONFLICT_OFFSET 0x0000062c
+#define WLAN_CPU_RAM4_CONFLICT_CNT_MSB 11
+#define WLAN_CPU_RAM4_CONFLICT_CNT_LSB 0
+#define WLAN_CPU_RAM4_CONFLICT_CNT_MASK 0x00000fff
+#define WLAN_CPU_RAM4_CONFLICT_CNT_GET(x) (((x) & WLAN_CPU_RAM4_CONFLICT_CNT_MASK) >> WLAN_CPU_RAM4_CONFLICT_CNT_LSB)
+#define WLAN_CPU_RAM4_CONFLICT_CNT_SET(x) (((x) << WLAN_CPU_RAM4_CONFLICT_CNT_LSB) & WLAN_CPU_RAM4_CONFLICT_CNT_MASK)
+
+
+#ifndef __ASSEMBLER__
+
+typedef struct vmc_wlan_reg_reg_s {
+ volatile unsigned int wlan_mc_bcam_valid[128];
+ volatile unsigned int wlan_mc_bcam_compare[128];
+ volatile unsigned int wlan_mc_bcam_target[128];
+ volatile unsigned int wlan_apb_addr_error_control;
+ volatile unsigned int wlan_apb_addr_error_status;
+ volatile unsigned int wlan_ahb_addr_error_control;
+ volatile unsigned int wlan_ahb_addr_error_status;
+ volatile unsigned int wlan_bcam_conflict_error;
+ volatile unsigned int wlan_cpu_perf_cnt;
+ volatile unsigned int wlan_cpu_inst_fetch;
+ volatile unsigned int wlan_cpu_data_fetch;
+ volatile unsigned int wlan_cpu_ram1_conflict;
+ volatile unsigned int wlan_cpu_ram2_conflict;
+ volatile unsigned int wlan_cpu_ram3_conflict;
+ volatile unsigned int wlan_cpu_ram4_conflict;
+} vmc_wlan_reg_reg_t;
+
+#endif /* __ASSEMBLER__ */
+
+#endif /* _VMC_WLAN_REG_H_ */
diff --git a/drivers/staging/ath6kl/include/common/a_hci.h b/drivers/staging/ath6kl/include/common/a_hci.h
new file mode 100644
index 000000000000..f2943466339f
--- /dev/null
+++ b/drivers/staging/ath6kl/include/common/a_hci.h
@@ -0,0 +1,682 @@
+//-
+// Copyright (c) 2009-2010 Atheros Communications Inc.
+// All rights reserved.
+//
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+//
+//
+//
+//
+
+
+#ifndef __A_HCI_H__
+#define __A_HCI_H__
+
+#define HCI_CMD_OGF_MASK 0x3F
+#define HCI_CMD_OGF_SHIFT 10
+#define HCI_CMD_GET_OGF(opcode) ((opcode >> HCI_CMD_OGF_SHIFT) & HCI_CMD_OGF_MASK)
+
+#define HCI_CMD_OCF_MASK 0x3FF
+#define HCI_CMD_OCF_SHIFT 0
+#define HCI_CMD_GET_OCF(opcode) (((opcode) >> HCI_CMD_OCF_SHIFT) & HCI_CMD_OCF_MASK)
+
+#define HCI_FORM_OPCODE(ocf, ogf) ((ocf & HCI_CMD_OCF_MASK) << HCI_CMD_OCF_SHIFT | \
+ (ogf & HCI_CMD_OGF_MASK) << HCI_CMD_OGF_SHIFT)
+
+
+/*======== HCI Opcode groups ===============*/
+#define OGF_NOP 0x00
+#define OGF_LINK_CONTROL 0x01
+#define OGF_LINK_POLICY 0x03
+#define OGF_INFO_PARAMS 0x04
+#define OGF_STATUS 0x05
+#define OGF_TESTING 0x06
+#define OGF_BLUETOOTH 0x3E
+#define OGF_VENDOR_DEBUG 0x3F
+
+
+
+#define OCF_NOP 0x00
+
+
+/*===== Link Control Commands Opcode===================*/
+#define OCF_HCI_Create_Physical_Link 0x35
+#define OCF_HCI_Accept_Physical_Link_Req 0x36
+#define OCF_HCI_Disconnect_Physical_Link 0x37
+#define OCF_HCI_Create_Logical_Link 0x38
+#define OCF_HCI_Accept_Logical_Link 0x39
+#define OCF_HCI_Disconnect_Logical_Link 0x3A
+#define OCF_HCI_Logical_Link_Cancel 0x3B
+#define OCF_HCI_Flow_Spec_Modify 0x3C
+
+
+
+/*===== Link Policy Commands Opcode====================*/
+#define OCF_HCI_Set_Event_Mask 0x01
+#define OCF_HCI_Reset 0x03
+#define OCF_HCI_Read_Conn_Accept_Timeout 0x15
+#define OCF_HCI_Write_Conn_Accept_Timeout 0x16
+#define OCF_HCI_Read_Link_Supervision_Timeout 0x36
+#define OCF_HCI_Write_Link_Supervision_Timeout 0x37
+#define OCF_HCI_Enhanced_Flush 0x5F
+#define OCF_HCI_Read_Logical_Link_Accept_Timeout 0x61
+#define OCF_HCI_Write_Logical_Link_Accept_Timeout 0x62
+#define OCF_HCI_Set_Event_Mask_Page_2 0x63
+#define OCF_HCI_Read_Location_Data 0x64
+#define OCF_HCI_Write_Location_Data 0x65
+#define OCF_HCI_Read_Flow_Control_Mode 0x66
+#define OCF_HCI_Write_Flow_Control_Mode 0x67
+#define OCF_HCI_Read_BE_Flush_Timeout 0x69
+#define OCF_HCI_Write_BE_Flush_Timeout 0x6A
+#define OCF_HCI_Short_Range_Mode 0x6B
+
+
+/*======== Info Commands Opcode========================*/
+#define OCF_HCI_Read_Local_Ver_Info 0x01
+#define OCF_HCI_Read_Local_Supported_Cmds 0x02
+#define OCF_HCI_Read_Data_Block_Size 0x0A
+/*======== Status Commands Opcode======================*/
+#define OCF_HCI_Read_Failed_Contact_Counter 0x01
+#define OCF_HCI_Reset_Failed_Contact_Counter 0x02
+#define OCF_HCI_Read_Link_Quality 0x03
+#define OCF_HCI_Read_RSSI 0x05
+#define OCF_HCI_Read_Local_AMP_Info 0x09
+#define OCF_HCI_Read_Local_AMP_ASSOC 0x0A
+#define OCF_HCI_Write_Remote_AMP_ASSOC 0x0B
+
+
+/*======= AMP_ASSOC Specific TLV tags =================*/
+#define AMP_ASSOC_MAC_ADDRESS_INFO_TYPE 0x1
+#define AMP_ASSOC_PREF_CHAN_LIST 0x2
+#define AMP_ASSOC_CONNECTED_CHAN 0x3
+#define AMP_ASSOC_PAL_CAPABILITIES 0x4
+#define AMP_ASSOC_PAL_VERSION 0x5
+
+
+/*========= PAL Events =================================*/
+#define PAL_COMMAND_COMPLETE_EVENT 0x0E
+#define PAL_COMMAND_STATUS_EVENT 0x0F
+#define PAL_HARDWARE_ERROR_EVENT 0x10
+#define PAL_FLUSH_OCCURRED_EVENT 0x11
+#define PAL_LOOPBACK_EVENT 0x19
+#define PAL_BUFFER_OVERFLOW_EVENT 0x1A
+#define PAL_QOS_VIOLATION_EVENT 0x1E
+#define PAL_ENHANCED_FLUSH_COMPLT_EVENT 0x39
+#define PAL_PHYSICAL_LINK_COMPL_EVENT 0x40
+#define PAL_CHANNEL_SELECT_EVENT 0x41
+#define PAL_DISCONNECT_PHYSICAL_LINK_EVENT 0x42
+#define PAL_PHY_LINK_EARLY_LOSS_WARNING_EVENT 0x43
+#define PAL_PHY_LINK_RECOVERY_EVENT 0x44
+#define PAL_LOGICAL_LINK_COMPL_EVENT 0x45
+#define PAL_DISCONNECT_LOGICAL_LINK_COMPL_EVENT 0x46
+#define PAL_FLOW_SPEC_MODIFY_COMPL_EVENT 0x47
+#define PAL_NUM_COMPL_DATA_BLOCK_EVENT 0x48
+#define PAL_SHORT_RANGE_MODE_CHANGE_COMPL_EVENT 0x4C
+#define PAL_AMP_STATUS_CHANGE_EVENT 0x4D
+/*======== End of PAL events definiton =================*/
+
+
+/*======== Timeouts (not part of HCI cmd, but input to PAL engine) =========*/
+#define Timer_Conn_Accept_TO 0x01
+#define Timer_Link_Supervision_TO 0x02
+
+#define NUM_HCI_COMMAND_PKTS 0x1
+
+
+/*====== NOP Cmd ============================*/
+#define HCI_CMD_NOP HCI_FORM_OPCODE(OCF_NOP, OGF_NOP)
+
+
+/*===== Link Control Commands================*/
+#define HCI_Create_Physical_Link HCI_FORM_OPCODE(OCF_HCI_Create_Physical_Link, OGF_LINK_CONTROL)
+#define HCI_Accept_Physical_Link_Req HCI_FORM_OPCODE(OCF_HCI_Accept_Physical_Link_Req, OGF_LINK_CONTROL)
+#define HCI_Disconnect_Physical_Link HCI_FORM_OPCODE(OCF_HCI_Disconnect_Physical_Link, OGF_LINK_CONTROL)
+#define HCI_Create_Logical_Link HCI_FORM_OPCODE(OCF_HCI_Create_Logical_Link, OGF_LINK_CONTROL)
+#define HCI_Accept_Logical_Link HCI_FORM_OPCODE(OCF_HCI_Accept_Logical_Link, OGF_LINK_CONTROL)
+#define HCI_Disconnect_Logical_Link HCI_FORM_OPCODE(OCF_HCI_Disconnect_Logical_Link, OGF_LINK_CONTROL)
+#define HCI_Logical_Link_Cancel HCI_FORM_OPCODE(OCF_HCI_Logical_Link_Cancel, OGF_LINK_CONTROL)
+#define HCI_Flow_Spec_Modify HCI_FORM_OPCODE(OCF_HCI_Flow_Spec_Modify, OGF_LINK_CONTROL)
+
+
+/*===== Link Policy Commands ================*/
+#define HCI_Set_Event_Mask HCI_FORM_OPCODE(OCF_HCI_Set_Event_Mask, OGF_LINK_POLICY)
+#define HCI_Reset HCI_FORM_OPCODE(OCF_HCI_Reset, OGF_LINK_POLICY)
+#define HCI_Enhanced_Flush HCI_FORM_OPCODE(OCF_HCI_Enhanced_Flush, OGF_LINK_POLICY)
+#define HCI_Read_Conn_Accept_Timeout HCI_FORM_OPCODE(OCF_HCI_Read_Conn_Accept_Timeout, OGF_LINK_POLICY)
+#define HCI_Write_Conn_Accept_Timeout HCI_FORM_OPCODE(OCF_HCI_Write_Conn_Accept_Timeout, OGF_LINK_POLICY)
+#define HCI_Read_Logical_Link_Accept_Timeout HCI_FORM_OPCODE(OCF_HCI_Read_Logical_Link_Accept_Timeout, OGF_LINK_POLICY)
+#define HCI_Write_Logical_Link_Accept_Timeout HCI_FORM_OPCODE(OCF_HCI_Write_Logical_Link_Accept_Timeout, OGF_LINK_POLICY)
+#define HCI_Read_Link_Supervision_Timeout HCI_FORM_OPCODE(OCF_HCI_Read_Link_Supervision_Timeout, OGF_LINK_POLICY)
+#define HCI_Write_Link_Supervision_Timeout HCI_FORM_OPCODE(OCF_HCI_Write_Link_Supervision_Timeout, OGF_LINK_POLICY)
+#define HCI_Read_Location_Data HCI_FORM_OPCODE(OCF_HCI_Read_Location_Data, OGF_LINK_POLICY)
+#define HCI_Write_Location_Data HCI_FORM_OPCODE(OCF_HCI_Write_Location_Data, OGF_LINK_POLICY)
+#define HCI_Set_Event_Mask_Page_2 HCI_FORM_OPCODE(OCF_HCI_Set_Event_Mask_Page_2, OGF_LINK_POLICY)
+#define HCI_Read_Flow_Control_Mode HCI_FORM_OPCODE(OCF_HCI_Read_Flow_Control_Mode, OGF_LINK_POLICY)
+#define HCI_Write_Flow_Control_Mode HCI_FORM_OPCODE(OCF_HCI_Write_Flow_Control_Mode, OGF_LINK_POLICY)
+#define HCI_Write_BE_Flush_Timeout HCI_FORM_OPCODE(OCF_HCI_Write_BE_Flush_Timeout, OGF_LINK_POLICY)
+#define HCI_Read_BE_Flush_Timeout HCI_FORM_OPCODE(OCF_HCI_Read_BE_Flush_Timeout, OGF_LINK_POLICY)
+#define HCI_Short_Range_Mode HCI_FORM_OPCODE(OCF_HCI_Short_Range_Mode, OGF_LINK_POLICY)
+
+
+/*===== Info Commands =====================*/
+#define HCI_Read_Local_Ver_Info HCI_FORM_OPCODE(OCF_HCI_Read_Local_Ver_Info, OGF_INFO_PARAMS)
+#define HCI_Read_Local_Supported_Cmds HCI_FORM_OPCODE(OCF_HCI_Read_Local_Supported_Cmds, OGF_INFO_PARAMS)
+#define HCI_Read_Data_Block_Size HCI_FORM_OPCODE(OCF_HCI_Read_Data_Block_Size, OGF_INFO_PARAMS)
+
+/*===== Status Commands =====================*/
+#define HCI_Read_Link_Quality HCI_FORM_OPCODE(OCF_HCI_Read_Link_Quality, OGF_STATUS)
+#define HCI_Read_RSSI HCI_FORM_OPCODE(OCF_HCI_Read_RSSI, OGF_STATUS)
+#define HCI_Read_Local_AMP_Info HCI_FORM_OPCODE(OCF_HCI_Read_Local_AMP_Info, OGF_STATUS)
+#define HCI_Read_Local_AMP_ASSOC HCI_FORM_OPCODE(OCF_HCI_Read_Local_AMP_ASSOC, OGF_STATUS)
+#define HCI_Write_Remote_AMP_ASSOC HCI_FORM_OPCODE(OCF_HCI_Write_Remote_AMP_ASSOC, OGF_STATUS)
+
+/*====== End of cmd definitions =============*/
+
+
+
+/*===== Timeouts(private - can't come from HCI)=================*/
+#define Conn_Accept_TO HCI_FORM_OPCODE(Timer_Conn_Accept_TO, OGF_VENDOR_DEBUG)
+#define Link_Supervision_TO HCI_FORM_OPCODE(Timer_Link_Supervision_TO, OGF_VENDOR_DEBUG)
+
+/*----- PAL Constants (Sec 6 of Doc)------------------------*/
+#define Max80211_PAL_PDU_Size 1492
+#define Max80211_AMP_ASSOC_Len 672
+#define MinGUserPrio 4
+#define MaxGUserPrio 7
+#define BEUserPrio0 0
+#define BEUserPrio1 3
+#define Max80211BeaconPeriod 2000 /* in millisec */
+#define ShortRangeModePowerMax 4 /* dBm */
+
+/*------ PAL Protocol Identifiers (Sec5.1) ------------------*/
+typedef enum {
+ ACL_DATA = 0x01,
+ ACTIVITY_REPORT,
+ SECURED_FRAMES,
+ LINK_SUPERVISION_REQ,
+ LINK_SUPERVISION_RESP,
+}PAL_PROTOCOL_IDENTIFIERS;
+
+#define HCI_CMD_HDR_SZ 3
+#define HCI_EVENT_HDR_SIZE 2
+#define MAX_EVT_PKT_SZ 255
+#define AMP_ASSOC_MAX_FRAG_SZ 248
+#define AMP_MAX_GUARANTEED_BW 20000
+
+#define DEFAULT_CONN_ACCPT_TO 5000
+#define DEFAULT_LL_ACCPT_TO 5000
+#define DEFAULT_LSTO 10000
+
+#define PACKET_BASED_FLOW_CONTROL_MODE 0x00
+#define DATA_BLK_BASED_FLOW_CONTROL_MODE 0x01
+
+#define SERVICE_TYPE_BEST_EFFORT 0x01
+#define SERVICE_TYPE_GUARANTEED 0x02
+
+#define MAC_ADDR_LEN 6
+#define LINK_KEY_LEN 32
+
+typedef enum {
+ ACL_DATA_PB_1ST_NON_AUTOMATICALLY_FLUSHABLE = 0x00,
+ ACL_DATA_PB_CONTINUING_FRAGMENT = 0x01,
+ ACL_DATA_PB_1ST_AUTOMATICALLY_FLUSHABLE = 0x02,
+ ACL_DATA_PB_COMPLETE_PDU = 0x03,
+} ACL_DATA_PB_FLAGS;
+#define ACL_DATA_PB_FLAGS_SHIFT 12
+
+typedef enum {
+ ACL_DATA_BC_POINT_TO_POINT = 0x00,
+} ACL_DATA_BC_FLAGS;
+#define ACL_DATA_BC_FLAGS_SHIFT 14
+
+/* Command pkt */
+typedef struct hci_cmd_pkt_t {
+ A_UINT16 opcode;
+ A_UINT8 param_length;
+ A_UINT8 params[255];
+} POSTPACK HCI_CMD_PKT;
+
+#define ACL_DATA_HDR_SIZE 4 /* hdl_and flags + data_len */
+/* Data pkt */
+typedef struct hci_acl_data_pkt_t {
+ A_UINT16 hdl_and_flags;
+ A_UINT16 data_len;
+ A_UINT8 data[Max80211_PAL_PDU_Size];
+} POSTPACK HCI_ACL_DATA_PKT;
+
+/* Event pkt */
+typedef struct hci_event_pkt_t {
+ A_UINT8 event_code;
+ A_UINT8 param_len;
+ A_UINT8 params[256];
+} POSTPACK HCI_EVENT_PKT;
+
+
+/*============== HCI Command definitions ======================= */
+typedef struct hci_cmd_phy_link_t {
+ A_UINT16 opcode;
+ A_UINT8 param_length;
+ A_UINT8 phy_link_hdl;
+ A_UINT8 link_key_len;
+ A_UINT8 link_key_type;
+ A_UINT8 link_key[LINK_KEY_LEN];
+} POSTPACK HCI_CMD_PHY_LINK;
+
+typedef struct hci_cmd_write_rem_amp_assoc_t {
+ A_UINT16 opcode;
+ A_UINT8 param_length;
+ A_UINT8 phy_link_hdl;
+ A_UINT16 len_so_far;
+ A_UINT16 amp_assoc_remaining_len;
+ A_UINT8 amp_assoc_frag[AMP_ASSOC_MAX_FRAG_SZ];
+} POSTPACK HCI_CMD_WRITE_REM_AMP_ASSOC;
+
+
+typedef struct hci_cmd_opcode_hdl_t {
+ A_UINT16 opcode;
+ A_UINT8 param_length;
+ A_UINT16 hdl;
+} POSTPACK HCI_CMD_READ_LINK_QUAL,
+ HCI_CMD_FLUSH,
+ HCI_CMD_READ_LINK_SUPERVISION_TIMEOUT;
+
+typedef struct hci_cmd_read_local_amp_assoc_t {
+ A_UINT16 opcode;
+ A_UINT8 param_length;
+ A_UINT8 phy_link_hdl;
+ A_UINT16 len_so_far;
+ A_UINT16 max_rem_amp_assoc_len;
+} POSTPACK HCI_CMD_READ_LOCAL_AMP_ASSOC;
+
+
+typedef struct hci_cmd_set_event_mask_t {
+ A_UINT16 opcode;
+ A_UINT8 param_length;
+ A_UINT64 mask;
+}POSTPACK HCI_CMD_SET_EVT_MASK, HCI_CMD_SET_EVT_MASK_PG_2;
+
+
+typedef struct hci_cmd_enhanced_flush_t{
+ A_UINT16 opcode;
+ A_UINT8 param_length;
+ A_UINT16 hdl;
+ A_UINT8 type;
+} POSTPACK HCI_CMD_ENHANCED_FLUSH;
+
+
+typedef struct hci_cmd_write_timeout_t {
+ A_UINT16 opcode;
+ A_UINT8 param_length;
+ A_UINT16 timeout;
+} POSTPACK HCI_CMD_WRITE_TIMEOUT;
+
+typedef struct hci_cmd_write_link_supervision_timeout_t {
+ A_UINT16 opcode;
+ A_UINT8 param_length;
+ A_UINT16 hdl;
+ A_UINT16 timeout;
+} POSTPACK HCI_CMD_WRITE_LINK_SUPERVISION_TIMEOUT;
+
+typedef struct hci_cmd_write_flow_control_t {
+ A_UINT16 opcode;
+ A_UINT8 param_length;
+ A_UINT8 mode;
+} POSTPACK HCI_CMD_WRITE_FLOW_CONTROL;
+
+typedef struct location_data_cfg_t {
+ A_UINT8 reg_domain_aware;
+ A_UINT8 reg_domain[3];
+ A_UINT8 reg_options;
+} POSTPACK LOCATION_DATA_CFG;
+
+typedef struct hci_cmd_write_location_data_t {
+ A_UINT16 opcode;
+ A_UINT8 param_length;
+ LOCATION_DATA_CFG cfg;
+} POSTPACK HCI_CMD_WRITE_LOCATION_DATA;
+
+
+typedef struct flow_spec_t {
+ A_UINT8 id;
+ A_UINT8 service_type;
+ A_UINT16 max_sdu;
+ A_UINT32 sdu_inter_arrival_time;
+ A_UINT32 access_latency;
+ A_UINT32 flush_timeout;
+} POSTPACK FLOW_SPEC;
+
+
+typedef struct hci_cmd_create_logical_link_t {
+ A_UINT16 opcode;
+ A_UINT8 param_length;
+ A_UINT8 phy_link_hdl;
+ FLOW_SPEC tx_flow_spec;
+ FLOW_SPEC rx_flow_spec;
+} POSTPACK HCI_CMD_CREATE_LOGICAL_LINK;
+
+typedef struct hci_cmd_flow_spec_modify_t {
+ A_UINT16 opcode;
+ A_UINT8 param_length;
+ A_UINT16 hdl;
+ FLOW_SPEC tx_flow_spec;
+ FLOW_SPEC rx_flow_spec;
+} POSTPACK HCI_CMD_FLOW_SPEC_MODIFY;
+
+typedef struct hci_cmd_logical_link_cancel_t {
+ A_UINT16 opcode;
+ A_UINT8 param_length;
+ A_UINT8 phy_link_hdl;
+ A_UINT8 tx_flow_spec_id;
+} POSTPACK HCI_CMD_LOGICAL_LINK_CANCEL;
+
+typedef struct hci_cmd_disconnect_logical_link_t {
+ A_UINT16 opcode;
+ A_UINT8 param_length;
+ A_UINT16 logical_link_hdl;
+} POSTPACK HCI_CMD_DISCONNECT_LOGICAL_LINK;
+
+typedef struct hci_cmd_disconnect_phy_link_t {
+ A_UINT16 opcode;
+ A_UINT8 param_length;
+ A_UINT8 phy_link_hdl;
+} POSTPACK HCI_CMD_DISCONNECT_PHY_LINK;
+
+typedef struct hci_cmd_srm_t {
+ A_UINT16 opcode;
+ A_UINT8 param_length;
+ A_UINT8 phy_link_hdl;
+ A_UINT8 mode;
+} POSTPACK HCI_CMD_SHORT_RANGE_MODE;
+/*============== HCI Command definitions end ======================= */
+
+
+
+/*============== HCI Event definitions ============================= */
+
+/* Command complete event */
+typedef struct hci_event_cmd_complete_t {
+ A_UINT8 event_code;
+ A_UINT8 param_len;
+ A_UINT8 num_hci_cmd_pkts;
+ A_UINT16 opcode;
+ A_UINT8 params[255];
+} POSTPACK HCI_EVENT_CMD_COMPLETE;
+
+
+/* Command status event */
+typedef struct hci_event_cmd_status_t {
+ A_UINT8 event_code;
+ A_UINT8 param_len;
+ A_UINT8 status;
+ A_UINT8 num_hci_cmd_pkts;
+ A_UINT16 opcode;
+} POSTPACK HCI_EVENT_CMD_STATUS;
+
+/* Hardware Error event */
+typedef struct hci_event_hw_err_t {
+ A_UINT8 event_code;
+ A_UINT8 param_len;
+ A_UINT8 hw_err_code;
+} POSTPACK HCI_EVENT_HW_ERR;
+
+/* Flush occured event */
+/* Qos Violation event */
+typedef struct hci_event_handle_t {
+ A_UINT8 event_code;
+ A_UINT8 param_len;
+ A_UINT16 handle;
+} POSTPACK HCI_EVENT_FLUSH_OCCRD,
+ HCI_EVENT_QOS_VIOLATION;
+
+/* Loopback command event */
+typedef struct hci_loopback_cmd_t {
+ A_UINT8 event_code;
+ A_UINT8 param_len;
+ A_UINT8 params[252];
+} POSTPACK HCI_EVENT_LOOPBACK_CMD;
+
+/* Data buffer overflow event */
+typedef struct hci_data_buf_overflow_t {
+ A_UINT8 event_code;
+ A_UINT8 param_len;
+ A_UINT8 link_type;
+} POSTPACK HCI_EVENT_DATA_BUF_OVERFLOW;
+
+/* Enhanced Flush complete event */
+typedef struct hci_enhanced_flush_complt_t{
+ A_UINT8 event_code;
+ A_UINT8 param_len;
+ A_UINT16 hdl;
+} POSTPACK HCI_EVENT_ENHANCED_FLUSH_COMPLT;
+
+/* Channel select event */
+typedef struct hci_event_chan_select_t {
+ A_UINT8 event_code;
+ A_UINT8 param_len;
+ A_UINT8 phy_link_hdl;
+} POSTPACK HCI_EVENT_CHAN_SELECT;
+
+/* Physical Link Complete event */
+typedef struct hci_event_phy_link_complete_event_t {
+ A_UINT8 event_code;
+ A_UINT8 param_len;
+ A_UINT8 status;
+ A_UINT8 phy_link_hdl;
+} POSTPACK HCI_EVENT_PHY_LINK_COMPLETE;
+
+/* Logical Link complete event */
+typedef struct hci_event_logical_link_complete_event_t {
+ A_UINT8 event_code;
+ A_UINT8 param_len;
+ A_UINT8 status;
+ A_UINT16 logical_link_hdl;
+ A_UINT8 phy_hdl;
+ A_UINT8 tx_flow_id;
+} POSTPACK HCI_EVENT_LOGICAL_LINK_COMPLETE_EVENT;
+
+/* Disconnect Logical Link complete event */
+typedef struct hci_event_disconnect_logical_link_event_t {
+ A_UINT8 event_code;
+ A_UINT8 param_len;
+ A_UINT8 status;
+ A_UINT16 logical_link_hdl;
+ A_UINT8 reason;
+} POSTPACK HCI_EVENT_DISCONNECT_LOGICAL_LINK_EVENT;
+
+/* Disconnect Physical Link complete event */
+typedef struct hci_event_disconnect_phy_link_complete_t {
+ A_UINT8 event_code;
+ A_UINT8 param_len;
+ A_UINT8 status;
+ A_UINT8 phy_link_hdl;
+ A_UINT8 reason;
+} POSTPACK HCI_EVENT_DISCONNECT_PHY_LINK_COMPLETE;
+
+typedef struct hci_event_physical_link_loss_early_warning_t{
+ A_UINT8 event_code;
+ A_UINT8 param_len;
+ A_UINT8 phy_hdl;
+ A_UINT8 reason;
+} POSTPACK HCI_EVENT_PHY_LINK_LOSS_EARLY_WARNING;
+
+typedef struct hci_event_physical_link_recovery_t{
+ A_UINT8 event_code;
+ A_UINT8 param_len;
+ A_UINT8 phy_hdl;
+} POSTPACK HCI_EVENT_PHY_LINK_RECOVERY;
+
+
+/* Flow spec modify complete event */
+/* Flush event */
+typedef struct hci_event_status_handle_t {
+ A_UINT8 event_code;
+ A_UINT8 param_len;
+ A_UINT8 status;
+ A_UINT16 handle;
+} POSTPACK HCI_EVENT_FLOW_SPEC_MODIFY,
+ HCI_EVENT_FLUSH;
+
+
+/* Num of completed data blocks event */
+typedef struct hci_event_num_of_compl_data_blks_t {
+ A_UINT8 event_code;
+ A_UINT8 param_len;
+ A_UINT16 num_data_blks;
+ A_UINT8 num_handles;
+ A_UINT8 params[255];
+} POSTPACK HCI_EVENT_NUM_COMPL_DATA_BLKS;
+
+/* Short range mode change complete event */
+typedef struct hci_srm_cmpl_t {
+ A_UINT8 event_code;
+ A_UINT8 param_len;
+ A_UINT8 status;
+ A_UINT8 phy_link;
+ A_UINT8 state;
+} POSTPACK HCI_EVENT_SRM_COMPL;
+
+typedef struct hci_event_amp_status_change_t{
+ A_UINT8 event_code;
+ A_UINT8 param_len;
+ A_UINT8 status;
+ A_UINT8 amp_status;
+} POSTPACK HCI_EVENT_AMP_STATUS_CHANGE;
+
+/*============== Event definitions end =========================== */
+
+
+typedef struct local_amp_info_resp_t {
+ A_UINT8 status;
+ A_UINT8 amp_status;
+ A_UINT32 total_bw; /* kbps */
+ A_UINT32 max_guranteed_bw; /* kbps */
+ A_UINT32 min_latency;
+ A_UINT32 max_pdu_size;
+ A_UINT8 amp_type;
+ A_UINT16 pal_capabilities;
+ A_UINT16 amp_assoc_len;
+ A_UINT32 max_flush_timeout; /* in ms */
+ A_UINT32 be_flush_timeout; /* in ms */
+} POSTPACK LOCAL_AMP_INFO;
+
+typedef struct amp_assoc_cmd_resp_t{
+ A_UINT8 status;
+ A_UINT8 phy_hdl;
+ A_UINT16 amp_assoc_len;
+ A_UINT8 amp_assoc_frag[AMP_ASSOC_MAX_FRAG_SZ];
+}POSTPACK AMP_ASSOC_CMD_RESP;
+
+
+enum PAL_HCI_CMD_STATUS {
+ PAL_HCI_CMD_PROCESSED,
+ PAL_HCI_CMD_IGNORED
+};
+
+
+/*============= HCI Error Codes =======================*/
+#define HCI_SUCCESS 0x00
+#define HCI_ERR_UNKNOW_CMD 0x01
+#define HCI_ERR_UNKNOWN_CONN_ID 0x02
+#define HCI_ERR_HW_FAILURE 0x03
+#define HCI_ERR_PAGE_TIMEOUT 0x04
+#define HCI_ERR_AUTH_FAILURE 0x05
+#define HCI_ERR_KEY_MISSING 0x06
+#define HCI_ERR_MEM_CAP_EXECED 0x07
+#define HCI_ERR_CON_TIMEOUT 0x08
+#define HCI_ERR_CON_LIMIT_EXECED 0x09
+#define HCI_ERR_ACL_CONN_ALRDY_EXISTS 0x0B
+#define HCI_ERR_COMMAND_DISALLOWED 0x0C
+#define HCI_ERR_CONN_REJ_BY_LIMIT_RES 0x0D
+#define HCI_ERR_CONN_REJ_BY_SEC 0x0E
+#define HCI_ERR_CONN_REJ_BY_BAD_ADDR 0x0F
+#define HCI_ERR_CONN_ACCPT_TIMEOUT 0x10
+#define HCI_ERR_UNSUPPORT_FEATURE 0x11
+#define HCI_ERR_INVALID_HCI_CMD_PARAMS 0x12
+#define HCI_ERR_REMOTE_USER_TERMINATE_CONN 0x13
+#define HCI_ERR_CON_TERM_BY_HOST 0x16
+#define HCI_ERR_UNSPECIFIED_ERROR 0x1F
+#define HCI_ERR_ENCRYPTION_MODE_NOT_SUPPORT 0x25
+#define HCI_ERR_REQUESTED_QOS_NOT_SUPPORT 0x27
+#define HCI_ERR_QOS_UNACCEPTABLE_PARM 0x2C
+#define HCI_ERR_QOS_REJECTED 0x2D
+#define HCI_ERR_CONN_REJ_NO_SUITABLE_CHAN 0x39
+
+/*============= HCI Error Codes End =======================*/
+
+
+/* Following are event return parameters.. part of HCI events
+ */
+typedef struct timeout_read_t {
+ A_UINT8 status;
+ A_UINT16 timeout;
+}POSTPACK TIMEOUT_INFO;
+
+typedef struct link_supervision_timeout_read_t {
+ A_UINT8 status;
+ A_UINT16 hdl;
+ A_UINT16 timeout;
+}POSTPACK LINK_SUPERVISION_TIMEOUT_INFO;
+
+typedef struct status_hdl_t {
+ A_UINT8 status;
+ A_UINT16 hdl;
+}POSTPACK INFO_STATUS_HDL;
+
+typedef struct write_remote_amp_assoc_t{
+ A_UINT8 status;
+ A_UINT8 hdl;
+}POSTPACK WRITE_REMOTE_AMP_ASSOC_INFO;
+
+typedef struct read_loc_info_t {
+ A_UINT8 status;
+ LOCATION_DATA_CFG loc;
+}POSTPACK READ_LOC_INFO;
+
+typedef struct read_flow_ctrl_mode_t {
+ A_UINT8 status;
+ A_UINT8 mode;
+}POSTPACK READ_FLWCTRL_INFO;
+
+typedef struct read_data_blk_size_t {
+ A_UINT8 status;
+ A_UINT16 max_acl_data_pkt_len;
+ A_UINT16 data_block_len;
+ A_UINT16 total_num_data_blks;
+}POSTPACK READ_DATA_BLK_SIZE_INFO;
+
+/* Read Link quality info */
+typedef struct link_qual_t {
+ A_UINT8 status;
+ A_UINT16 hdl;
+ A_UINT8 link_qual;
+} POSTPACK READ_LINK_QUAL_INFO,
+ READ_RSSI_INFO;
+
+typedef struct ll_cancel_resp_t {
+ A_UINT8 status;
+ A_UINT8 phy_link_hdl;
+ A_UINT8 tx_flow_spec_id;
+} POSTPACK LL_CANCEL_RESP;
+
+typedef struct read_local_ver_info_t {
+ A_UINT8 status;
+ A_UINT8 hci_version;
+ A_UINT16 hci_revision;
+ A_UINT8 pal_version;
+ A_UINT16 manf_name;
+ A_UINT16 pal_sub_ver;
+} POSTPACK READ_LOCAL_VER_INFO;
+
+
+#endif /* __A_HCI_H__ */
diff --git a/drivers/staging/ath6kl/include/common/athdefs.h b/drivers/staging/ath6kl/include/common/athdefs.h
new file mode 100644
index 000000000000..b59bfd3af0a5
--- /dev/null
+++ b/drivers/staging/ath6kl/include/common/athdefs.h
@@ -0,0 +1,84 @@
+//------------------------------------------------------------------------------
+// <copyright file="athdefs.h" company="Atheros">
+// Copyright (c) 2004-2010 Atheros Corporation. All rights reserved.
+//
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+//
+//
+//------------------------------------------------------------------------------
+//==============================================================================
+// Author(s): ="Atheros"
+//==============================================================================
+#ifndef __ATHDEFS_H__
+#define __ATHDEFS_H__
+
+/*
+ * This file contains definitions that may be used across both
+ * Host and Target software. Nothing here is module-dependent
+ * or platform-dependent.
+ */
+
+/*
+ * Generic error codes that can be used by hw, sta, ap, sim, dk
+ * and any other environments. Since these are enums, feel free to
+ * add any more codes that you need.
+ */
+
+typedef enum {
+ A_ERROR = -1, /* Generic error return */
+ A_OK = 0, /* success */
+ /* Following values start at 1 */
+ A_DEVICE_NOT_FOUND, /* not able to find PCI device */
+ A_NO_MEMORY, /* not able to allocate memory, not available */
+ A_MEMORY_NOT_AVAIL, /* memory region is not free for mapping */
+ A_NO_FREE_DESC, /* no free descriptors available */
+ A_BAD_ADDRESS, /* address does not match descriptor */
+ A_WIN_DRIVER_ERROR, /* used in NT_HW version, if problem at init */
+ A_REGS_NOT_MAPPED, /* registers not correctly mapped */
+ A_EPERM, /* Not superuser */
+ A_EACCES, /* Access denied */
+ A_ENOENT, /* No such entry, search failed, etc. */
+ A_EEXIST, /* The object already exists (can't create) */
+ A_EFAULT, /* Bad address fault */
+ A_EBUSY, /* Object is busy */
+ A_EINVAL, /* Invalid parameter */
+ A_EMSGSIZE, /* Inappropriate message buffer length */
+ A_ECANCELED, /* Operation canceled */
+ A_ENOTSUP, /* Operation not supported */
+ A_ECOMM, /* Communication error on send */
+ A_EPROTO, /* Protocol error */
+ A_ENODEV, /* No such device */
+ A_EDEVNOTUP, /* device is not UP */
+ A_NO_RESOURCE, /* No resources for requested operation */
+ A_HARDWARE, /* Hardware failure */
+ A_PENDING, /* Asynchronous routine; will send up results la
+ter (typically in callback) */
+ A_EBADCHANNEL, /* The channel cannot be used */
+ A_DECRYPT_ERROR, /* Decryption error */
+ A_PHY_ERROR, /* RX PHY error */
+ A_CONSUMED /* Object was consumed */
+} A_STATUS;
+
+#define A_SUCCESS(x) (x == A_OK)
+#define A_FAILED(x) (!A_SUCCESS(x))
+
+#ifndef TRUE
+#define TRUE 1
+#endif
+
+#ifndef FALSE
+#define FALSE 0
+#endif
+
+#endif /* __ATHDEFS_H__ */
diff --git a/drivers/staging/ath6kl/include/common/bmi_msg.h b/drivers/staging/ath6kl/include/common/bmi_msg.h
new file mode 100644
index 000000000000..f9687d325b2f
--- /dev/null
+++ b/drivers/staging/ath6kl/include/common/bmi_msg.h
@@ -0,0 +1,241 @@
+//------------------------------------------------------------------------------
+// Copyright (c) 2004-2010 Atheros Corporation. All rights reserved.
+//
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+//
+//
+//
+// Author(s): ="Atheros"
+//------------------------------------------------------------------------------
+
+#ifndef __BMI_MSG_H__
+#define __BMI_MSG_H__
+
+#ifndef ATH_TARGET
+#include "athstartpack.h"
+#endif
+
+/*
+ * Bootloader Messaging Interface (BMI)
+ *
+ * BMI is a very simple messaging interface used during initialization
+ * to read memory, write memory, execute code, and to define an
+ * application entry PC.
+ *
+ * It is used to download an application to AR6K, to provide
+ * patches to code that is already resident on AR6K, and generally
+ * to examine and modify state. The Host has an opportunity to use
+ * BMI only once during bootup. Once the Host issues a BMI_DONE
+ * command, this opportunity ends.
+ *
+ * The Host writes BMI requests to mailbox0, and reads BMI responses
+ * from mailbox0. BMI requests all begin with a command
+ * (see below for specific commands), and are followed by
+ * command-specific data.
+ *
+ * Flow control:
+ * The Host can only issue a command once the Target gives it a
+ * "BMI Command Credit", using AR6K Counter #4. As soon as the
+ * Target has completed a command, it issues another BMI Command
+ * Credit (so the Host can issue the next command).
+ *
+ * BMI handles all required Target-side cache flushing.
+ */
+
+
+/* Maximum data size used for BMI transfers */
+#define BMI_DATASZ_MAX 256
+
+/* BMI Commands */
+
+#define BMI_NO_COMMAND 0
+
+#define BMI_DONE 1
+ /*
+ * Semantics: Host is done using BMI
+ * Request format:
+ * A_UINT32 command (BMI_DONE)
+ * Response format: none
+ */
+
+#define BMI_READ_MEMORY 2
+ /*
+ * Semantics: Host reads AR6K memory
+ * Request format:
+ * A_UINT32 command (BMI_READ_MEMORY)
+ * A_UINT32 address
+ * A_UINT32 length, at most BMI_DATASZ_MAX
+ * Response format:
+ * A_UINT8 data[length]
+ */
+
+#define BMI_WRITE_MEMORY 3
+ /*
+ * Semantics: Host writes AR6K memory
+ * Request format:
+ * A_UINT32 command (BMI_WRITE_MEMORY)
+ * A_UINT32 address
+ * A_UINT32 length, at most BMI_DATASZ_MAX
+ * A_UINT8 data[length]
+ * Response format: none
+ */
+
+#define BMI_EXECUTE 4
+ /*
+ * Semantics: Causes AR6K to execute code
+ * Request format:
+ * A_UINT32 command (BMI_EXECUTE)
+ * A_UINT32 address
+ * A_UINT32 parameter
+ * Response format:
+ * A_UINT32 return value
+ */
+
+#define BMI_SET_APP_START 5
+ /*
+ * Semantics: Set Target application starting address
+ * Request format:
+ * A_UINT32 command (BMI_SET_APP_START)
+ * A_UINT32 address
+ * Response format: none
+ */
+
+#define BMI_READ_SOC_REGISTER 6
+ /*
+ * Semantics: Read a 32-bit Target SOC register.
+ * Request format:
+ * A_UINT32 command (BMI_READ_REGISTER)
+ * A_UINT32 address
+ * Response format:
+ * A_UINT32 value
+ */
+
+#define BMI_WRITE_SOC_REGISTER 7
+ /*
+ * Semantics: Write a 32-bit Target SOC register.
+ * Request format:
+ * A_UINT32 command (BMI_WRITE_REGISTER)
+ * A_UINT32 address
+ * A_UINT32 value
+ *
+ * Response format: none
+ */
+
+#define BMI_GET_TARGET_ID 8
+#define BMI_GET_TARGET_INFO 8
+ /*
+ * Semantics: Fetch the 4-byte Target information
+ * Request format:
+ * A_UINT32 command (BMI_GET_TARGET_ID/INFO)
+ * Response format1 (old firmware):
+ * A_UINT32 TargetVersionID
+ * Response format2 (newer firmware):
+ * A_UINT32 TARGET_VERSION_SENTINAL
+ * struct bmi_target_info;
+ */
+
+PREPACK struct bmi_target_info {
+ A_UINT32 target_info_byte_count; /* size of this structure */
+ A_UINT32 target_ver; /* Target Version ID */
+ A_UINT32 target_type; /* Target type */
+} POSTPACK;
+#define TARGET_VERSION_SENTINAL 0xffffffff
+#define TARGET_TYPE_AR6001 1
+#define TARGET_TYPE_AR6002 2
+#define TARGET_TYPE_AR6003 3
+
+
+#define BMI_ROMPATCH_INSTALL 9
+ /*
+ * Semantics: Install a ROM Patch.
+ * Request format:
+ * A_UINT32 command (BMI_ROMPATCH_INSTALL)
+ * A_UINT32 Target ROM Address
+ * A_UINT32 Target RAM Address or Value (depending on Target Type)
+ * A_UINT32 Size, in bytes
+ * A_UINT32 Activate? 1-->activate;
+ * 0-->install but do not activate
+ * Response format:
+ * A_UINT32 PatchID
+ */
+
+#define BMI_ROMPATCH_UNINSTALL 10
+ /*
+ * Semantics: Uninstall a previously-installed ROM Patch,
+ * automatically deactivating, if necessary.
+ * Request format:
+ * A_UINT32 command (BMI_ROMPATCH_UNINSTALL)
+ * A_UINT32 PatchID
+ *
+ * Response format: none
+ */
+
+#define BMI_ROMPATCH_ACTIVATE 11
+ /*
+ * Semantics: Activate a list of previously-installed ROM Patches.
+ * Request format:
+ * A_UINT32 command (BMI_ROMPATCH_ACTIVATE)
+ * A_UINT32 rompatch_count
+ * A_UINT32 PatchID[rompatch_count]
+ *
+ * Response format: none
+ */
+
+#define BMI_ROMPATCH_DEACTIVATE 12
+ /*
+ * Semantics: Deactivate a list of active ROM Patches.
+ * Request format:
+ * A_UINT32 command (BMI_ROMPATCH_DEACTIVATE)
+ * A_UINT32 rompatch_count
+ * A_UINT32 PatchID[rompatch_count]
+ *
+ * Response format: none
+ */
+
+
+#define BMI_LZ_STREAM_START 13
+ /*
+ * Semantics: Begin an LZ-compressed stream of input
+ * which is to be uncompressed by the Target to an
+ * output buffer at address. The output buffer must
+ * be sufficiently large to hold the uncompressed
+ * output from the compressed input stream. This BMI
+ * command should be followed by a series of 1 or more
+ * BMI_LZ_DATA commands.
+ * A_UINT32 command (BMI_LZ_STREAM_START)
+ * A_UINT32 address
+ * Note: Not supported on all versions of ROM firmware.
+ */
+
+#define BMI_LZ_DATA 14
+ /*
+ * Semantics: Host writes AR6K memory with LZ-compressed
+ * data which is uncompressed by the Target. This command
+ * must be preceded by a BMI_LZ_STREAM_START command. A series
+ * of BMI_LZ_DATA commands are considered part of a single
+ * input stream until another BMI_LZ_STREAM_START is issued.
+ * Request format:
+ * A_UINT32 command (BMI_LZ_DATA)
+ * A_UINT32 length (of compressed data),
+ * at most BMI_DATASZ_MAX
+ * A_UINT8 CompressedData[length]
+ * Response format: none
+ * Note: Not supported on all versions of ROM firmware.
+ */
+
+#ifndef ATH_TARGET
+#include "athendpack.h"
+#endif
+
+#endif /* __BMI_MSG_H__ */
diff --git a/drivers/staging/ath6kl/include/common/btcoexGpio.h b/drivers/staging/ath6kl/include/common/btcoexGpio.h
new file mode 100644
index 000000000000..bc067f557eaa
--- /dev/null
+++ b/drivers/staging/ath6kl/include/common/btcoexGpio.h
@@ -0,0 +1,86 @@
+// Copyright (c) 2010 Atheros Communications Inc.
+// All rights reserved.
+//
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+//
+//
+
+#ifndef BTCOEX_GPIO_H_
+#define BTCOEX_GPIO_H_
+
+
+
+#ifdef FPGA
+#define GPIO_A (15)
+#define GPIO_B (16)
+#define GPIO_C (17)
+#define GPIO_D (18)
+#define GPIO_E (19)
+#define GPIO_F (21)
+#define GPIO_G (21)
+#else
+#define GPIO_A (0)
+#define GPIO_B (5)
+#define GPIO_C (6)
+#define GPIO_D (7)
+#define GPIO_E (7)
+#define GPIO_F (7)
+#define GPIO_G (7)
+#endif
+
+
+
+
+
+#define GPIO_DEBUG_WORD_1 (1<<GPIO_A)
+#define GPIO_DEBUG_WORD_2 (1<<GPIO_B)
+#define GPIO_DEBUG_WORD_3 ((1<<GPIO_B) | (1<<GPIO_A))
+#define GPIO_DEBUG_WORD_4 (1<<GPIO_C)
+#define GPIO_DEBUG_WORD_5 ((1<<GPIO_C) | (1<<GPIO_A))
+#define GPIO_DEBUG_WORD_6 ((1<<GPIO_C) | (1<<GPIO_B))
+#define GPIO_DEBUG_WORD_7 ((1<<GPIO_C) | (1<<GPIO_B) | (1<<GPIO_A))
+
+#define GPIO_DEBUG_WORD_8 (1<<GPIO_D)
+#define GPIO_DEBUG_WORD_9 ((1<<GPIO_D) | GPIO_DEBUG_WORD_1)
+#define GPIO_DEBUG_WORD_10 ((1<<GPIO_D) | GPIO_DEBUG_WORD_2)
+#define GPIO_DEBUG_WORD_11 ((1<<GPIO_D) | GPIO_DEBUG_WORD_3)
+#define GPIO_DEBUG_WORD_12 ((1<<GPIO_D) | GPIO_DEBUG_WORD_4)
+#define GPIO_DEBUG_WORD_13 ((1<<GPIO_D) | GPIO_DEBUG_WORD_5)
+#define GPIO_DEBUG_WORD_14 ((1<<GPIO_D) | GPIO_DEBUG_WORD_6)
+#define GPIO_DEBUG_WORD_15 ((1<<GPIO_D) | GPIO_DEBUG_WORD_7)
+
+#define GPIO_DEBUG_WORD_16 (1<<GPIO_E)
+#define GPIO_DEBUG_WORD_17 ((1<<GPIO_E) | GPIO_DEBUG_WORD_1)
+#define GPIO_DEBUG_WORD_18 ((1<<GPIO_E) | GPIO_DEBUG_WORD_2)
+#define GPIO_DEBUG_WORD_19 ((1<<GPIO_E) | GPIO_DEBUG_WORD_3)
+#define GPIO_DEBUG_WORD_20 ((1<<GPIO_E) | GPIO_DEBUG_WORD_4)
+#define GPIO_DEBUG_WORD_21 ((1<<GPIO_E) | GPIO_DEBUG_WORD_5)
+#define GPIO_DEBUG_WORD_22 ((1<<GPIO_E) | GPIO_DEBUG_WORD_6)
+#define GPIO_DEBUG_WORD_23 ((1<<GPIO_E) | GPIO_DEBUG_WORD_7)
+
+
+
+extern void btcoexDbgPulseWord(A_UINT32 gpioPinMask);
+extern void btcoexDbgPulse(A_UINT32 pin);
+
+#ifdef CONFIG_BTCOEX_ENABLE_GPIO_DEBUG
+#define BTCOEX_DBG_PULSE_WORD(gpioPinMask) (btcoexDbgPulseWord(gpioPinMask))
+#define BTCOEX_DBG_PULSE(pin) (btcoexDbgPulse(pin))
+#else
+#define BTCOEX_DBG_PULSE_WORD(gpioPinMask)
+#define BTCOEX_DBG_PULSE(pin)
+
+#endif
+#endif
+
diff --git a/drivers/staging/ath6kl/include/common/cnxmgmt.h b/drivers/staging/ath6kl/include/common/cnxmgmt.h
new file mode 100644
index 000000000000..7a902cb54831
--- /dev/null
+++ b/drivers/staging/ath6kl/include/common/cnxmgmt.h
@@ -0,0 +1,36 @@
+//------------------------------------------------------------------------------
+// <copyright file="cnxmgmt.h" company="Atheros">
+// Copyright (c) 2004-2010 Atheros Corporation. All rights reserved.
+//
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+//
+//
+//------------------------------------------------------------------------------
+//==============================================================================
+// Author(s): ="Atheros"
+//==============================================================================
+
+#ifndef _CNXMGMT_H_
+#define _CNXMGMT_H_
+
+typedef enum {
+ CM_CONNECT_WITHOUT_SCAN = 0x0001,
+ CM_CONNECT_ASSOC_POLICY_USER = 0x0002,
+ CM_CONNECT_SEND_REASSOC = 0x0004,
+ CM_CONNECT_WITHOUT_ROAMTABLE_UPDATE = 0x0008,
+ CM_CONNECT_DO_WPA_OFFLOAD = 0x0010,
+ CM_CONNECT_DO_NOT_DEAUTH = 0x0020,
+} CM_CONNECT_TYPE;
+
+#endif /* _CNXMGMT_H_ */
diff --git a/drivers/staging/ath6kl/include/common/dbglog.h b/drivers/staging/ath6kl/include/common/dbglog.h
new file mode 100644
index 000000000000..382d9a2dd4eb
--- /dev/null
+++ b/drivers/staging/ath6kl/include/common/dbglog.h
@@ -0,0 +1,134 @@
+//------------------------------------------------------------------------------
+// <copyright file="dbglog.h" company="Atheros">
+// Copyright (c) 2004-2010 Atheros Corporation. All rights reserved.
+//
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+//
+//
+//------------------------------------------------------------------------------
+//==============================================================================
+// Author(s): ="Atheros"
+//==============================================================================
+
+#ifndef _DBGLOG_H_
+#define _DBGLOG_H_
+
+#ifndef ATH_TARGET
+#include "athstartpack.h"
+#endif
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#define DBGLOG_TIMESTAMP_OFFSET 0
+#define DBGLOG_TIMESTAMP_MASK 0x0000FFFF /* Bit 0-15. Contains bit
+ 8-23 of the LF0 timer */
+#define DBGLOG_DBGID_OFFSET 16
+#define DBGLOG_DBGID_MASK 0x03FF0000 /* Bit 16-25 */
+#define DBGLOG_DBGID_NUM_MAX 256 /* Upper limit is width of mask */
+
+#define DBGLOG_MODULEID_OFFSET 26
+#define DBGLOG_MODULEID_MASK 0x3C000000 /* Bit 26-29 */
+#define DBGLOG_MODULEID_NUM_MAX 16 /* Upper limit is width of mask */
+
+/*
+ * Please ensure that the definition of any new module intrduced is captured
+ * between the DBGLOG_MODULEID_START and DBGLOG_MODULEID_END defines. The
+ * structure is required for the parser to correctly pick up the values for
+ * different modules.
+ */
+#define DBGLOG_MODULEID_START
+#define DBGLOG_MODULEID_INF 0
+#define DBGLOG_MODULEID_WMI 1
+#define DBGLOG_MODULEID_MISC 2
+#define DBGLOG_MODULEID_PM 3
+#define DBGLOG_MODULEID_TXRX_MGMTBUF 4
+#define DBGLOG_MODULEID_TXRX_TXBUF 5
+#define DBGLOG_MODULEID_TXRX_RXBUF 6
+#define DBGLOG_MODULEID_WOW 7
+#define DBGLOG_MODULEID_WHAL 8
+#define DBGLOG_MODULEID_DC 9
+#define DBGLOG_MODULEID_CO 10
+#define DBGLOG_MODULEID_RO 11
+#define DBGLOG_MODULEID_CM 12
+#define DBGLOG_MODULEID_MGMT 13
+#define DBGLOG_MODULEID_TMR 14
+#define DBGLOG_MODULEID_BTCOEX 15
+#define DBGLOG_MODULEID_END
+
+#define DBGLOG_NUM_ARGS_OFFSET 30
+#define DBGLOG_NUM_ARGS_MASK 0xC0000000 /* Bit 30-31 */
+#define DBGLOG_NUM_ARGS_MAX 2 /* Upper limit is width of mask */
+
+#define DBGLOG_MODULE_LOG_ENABLE_OFFSET 0
+#define DBGLOG_MODULE_LOG_ENABLE_MASK 0x0000FFFF
+
+#define DBGLOG_REPORTING_ENABLED_OFFSET 16
+#define DBGLOG_REPORTING_ENABLED_MASK 0x00010000
+
+#define DBGLOG_TIMESTAMP_RESOLUTION_OFFSET 17
+#define DBGLOG_TIMESTAMP_RESOLUTION_MASK 0x000E0000
+
+#define DBGLOG_REPORT_SIZE_OFFSET 20
+#define DBGLOG_REPORT_SIZE_MASK 0x3FF00000
+
+#define DBGLOG_LOG_BUFFER_SIZE 1500
+#define DBGLOG_DBGID_DEFINITION_LEN_MAX 90
+
+PREPACK struct dbglog_buf_s {
+ struct dbglog_buf_s *next;
+ A_UINT8 *buffer;
+ A_UINT32 bufsize;
+ A_UINT32 length;
+ A_UINT32 count;
+ A_UINT32 free;
+} POSTPACK;
+
+PREPACK struct dbglog_hdr_s {
+ struct dbglog_buf_s *dbuf;
+ A_UINT32 dropped;
+} POSTPACK;
+
+PREPACK struct dbglog_config_s {
+ A_UINT32 cfgvalid; /* Mask with valid config bits */
+ union {
+ /* TODO: Take care of endianness */
+ struct {
+ A_UINT32 mmask:16; /* Mask of modules with logging on */
+ A_UINT32 rep:1; /* Reporting enabled or not */
+ A_UINT32 tsr:3; /* Time stamp resolution. Def: 1 ms */
+ A_UINT32 size:10; /* Report size in number of messages */
+ A_UINT32 reserved:2;
+ } dbglog_config;
+
+ A_UINT32 value;
+ } u;
+} POSTPACK;
+
+#define cfgmmask u.dbglog_config.mmask
+#define cfgrep u.dbglog_config.rep
+#define cfgtsr u.dbglog_config.tsr
+#define cfgsize u.dbglog_config.size
+#define cfgvalue u.value
+
+#ifdef __cplusplus
+}
+#endif
+
+#ifndef ATH_TARGET
+#include "athendpack.h"
+#endif
+
+#endif /* _DBGLOG_H_ */
diff --git a/drivers/staging/ath6kl/include/common/dbglog_id.h b/drivers/staging/ath6kl/include/common/dbglog_id.h
new file mode 100644
index 000000000000..15ef829cab20
--- /dev/null
+++ b/drivers/staging/ath6kl/include/common/dbglog_id.h
@@ -0,0 +1,558 @@
+//------------------------------------------------------------------------------
+// <copyright file="dbglog_id.h" company="Atheros">
+// Copyright (c) 2004-2010 Atheros Corporation. All rights reserved.
+//
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+//
+//
+//------------------------------------------------------------------------------
+//==============================================================================
+// Author(s): ="Atheros"
+//==============================================================================
+
+#ifndef _DBGLOG_ID_H_
+#define _DBGLOG_ID_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/*
+ * The nomenclature for the debug identifiers is MODULE_DESCRIPTION.
+ * Please ensure that the definition of any new debugid introduced is captured
+ * between the <MODULE>_DBGID_DEFINITION_START and
+ * <MODULE>_DBGID_DEFINITION_END defines. The structure is required for the
+ * parser to correctly pick up the values for different debug identifiers.
+ */
+
+/* INF debug identifier definitions */
+#define INF_DBGID_DEFINITION_START
+#define INF_ASSERTION_FAILED 1
+#define INF_TARGET_ID 2
+#define INF_DBGID_DEFINITION_END
+
+/* WMI debug identifier definitions */
+#define WMI_DBGID_DEFINITION_START
+#define WMI_CMD_RX_XTND_PKT_TOO_SHORT 1
+#define WMI_EXTENDED_CMD_NOT_HANDLED 2
+#define WMI_CMD_RX_PKT_TOO_SHORT 3
+#define WMI_CALLING_WMI_EXTENSION_FN 4
+#define WMI_CMD_NOT_HANDLED 5
+#define WMI_IN_SYNC 6
+#define WMI_TARGET_WMI_SYNC_CMD 7
+#define WMI_SET_SNR_THRESHOLD_PARAMS 8
+#define WMI_SET_RSSI_THRESHOLD_PARAMS 9
+#define WMI_SET_LQ_TRESHOLD_PARAMS 10
+#define WMI_TARGET_CREATE_PSTREAM_CMD 11
+#define WMI_WI_DTM_INUSE 12
+#define WMI_TARGET_DELETE_PSTREAM_CMD 13
+#define WMI_TARGET_IMPLICIT_DELETE_PSTREAM_CMD 14
+#define WMI_TARGET_GET_BIT_RATE_CMD 15
+#define WMI_GET_RATE_MASK_CMD_FIX_RATE_MASK_IS 16
+#define WMI_TARGET_GET_AVAILABLE_CHANNELS_CMD 17
+#define WMI_TARGET_GET_TX_PWR_CMD 18
+#define WMI_FREE_EVBUF_WMIBUF 19
+#define WMI_FREE_EVBUF_DATABUF 20
+#define WMI_FREE_EVBUF_BADFLAG 21
+#define WMI_HTC_RX_ERROR_DATA_PACKET 22
+#define WMI_HTC_RX_SYNC_PAUSING_FOR_MBOX 23
+#define WMI_INCORRECT_WMI_DATA_HDR_DROPPING_PKT 24
+#define WMI_SENDING_READY_EVENT 25
+#define WMI_SETPOWER_MDOE_TO_MAXPERF 26
+#define WMI_SETPOWER_MDOE_TO_REC 27
+#define WMI_BSSINFO_EVENT_FROM 28
+#define WMI_TARGET_GET_STATS_CMD 29
+#define WMI_SENDING_SCAN_COMPLETE_EVENT 30
+#define WMI_SENDING_RSSI_INDB_THRESHOLD_EVENT 31
+#define WMI_SENDING_RSSI_INDBM_THRESHOLD_EVENT 32
+#define WMI_SENDING_LINK_QUALITY_THRESHOLD_EVENT 33
+#define WMI_SENDING_ERROR_REPORT_EVENT 34
+#define WMI_SENDING_CAC_EVENT 35
+#define WMI_TARGET_GET_ROAM_TABLE_CMD 36
+#define WMI_TARGET_GET_ROAM_DATA_CMD 37
+#define WMI_SENDING_GPIO_INTR_EVENT 38
+#define WMI_SENDING_GPIO_ACK_EVENT 39
+#define WMI_SENDING_GPIO_DATA_EVENT 40
+#define WMI_CMD_RX 41
+#define WMI_CMD_RX_XTND 42
+#define WMI_EVENT_SEND 43
+#define WMI_EVENT_SEND_XTND 44
+#define WMI_CMD_PARAMS_DUMP_START 45
+#define WMI_CMD_PARAMS_DUMP_END 46
+#define WMI_CMD_PARAMS 47
+#define WMI_DBGID_DEFINITION_END
+
+/* MISC debug identifier definitions */
+#define MISC_DBGID_DEFINITION_START
+#define MISC_WLAN_SCHEDULER_EVENT_REGISTER_ERROR 1
+#define TLPM_INIT 2
+#define TLPM_FILTER_POWER_STATE 3
+#define TLPM_NOTIFY_NOT_IDLE 4
+#define TLPM_TIMEOUT_IDLE_HANDLER 5
+#define TLPM_TIMEOUT_WAKEUP_HANDLER 6
+#define TLPM_WAKEUP_SIGNAL_HANDLER 7
+#define TLPM_UNEXPECTED_GPIO_INTR_ERROR 8
+#define TLPM_BREAK_ON_NOT_RECEIVED_ERROR 9
+#define TLPM_BREAK_OFF_NOT_RECIVED_ERROR 10
+#define TLPM_ACK_GPIO_INTR 11
+#define TLPM_ON 12
+#define TLPM_OFF 13
+#define TLPM_WAKEUP_FROM_HOST 14
+#define TLPM_WAKEUP_FROM_BT 15
+#define TLPM_TX_BREAK_RECIVED 16
+#define TLPM_IDLE_TIMER_NOT_RUNNING 17
+#define MISC_DBGID_DEFINITION_END
+
+/* TXRX debug identifier definitions */
+#define TXRX_TXBUF_DBGID_DEFINITION_START
+#define TXRX_TXBUF_ALLOCATE_BUF 1
+#define TXRX_TXBUF_QUEUE_BUF_TO_MBOX 2
+#define TXRX_TXBUF_QUEUE_BUF_TO_TXQ 3
+#define TXRX_TXBUF_TXQ_DEPTH 4
+#define TXRX_TXBUF_IBSS_QUEUE_TO_SFQ 5
+#define TXRX_TXBUF_IBSS_QUEUE_TO_TXQ_FRM_SFQ 6
+#define TXRX_TXBUF_INITIALIZE_TIMER 7
+#define TXRX_TXBUF_ARM_TIMER 8
+#define TXRX_TXBUF_DISARM_TIMER 9
+#define TXRX_TXBUF_UNINITIALIZE_TIMER 10
+#define TXRX_TXBUF_DBGID_DEFINITION_END
+
+#define TXRX_RXBUF_DBGID_DEFINITION_START
+#define TXRX_RXBUF_ALLOCATE_BUF 1
+#define TXRX_RXBUF_QUEUE_TO_HOST 2
+#define TXRX_RXBUF_QUEUE_TO_WLAN 3
+#define TXRX_RXBUF_ZERO_LEN_BUF 4
+#define TXRX_RXBUF_QUEUE_TO_HOST_LASTBUF_IN_RXCHAIN 5
+#define TXRX_RXBUF_LASTBUF_IN_RXCHAIN_ZEROBUF 6
+#define TXRX_RXBUF_QUEUE_EMPTY_QUEUE_TO_WLAN 7
+#define TXRX_RXBUF_SEND_TO_RECV_MGMT 8
+#define TXRX_RXBUF_SEND_TO_IEEE_LAYER 9
+#define TXRX_RXBUF_REQUEUE_ERROR 10
+#define TXRX_RXBUF_DBGID_DEFINITION_END
+
+#define TXRX_MGMTBUF_DBGID_DEFINITION_START
+#define TXRX_MGMTBUF_ALLOCATE_BUF 1
+#define TXRX_MGMTBUF_ALLOCATE_SM_BUF 2
+#define TXRX_MGMTBUF_ALLOCATE_RMBUF 3
+#define TXRX_MGMTBUF_GET_BUF 4
+#define TXRX_MGMTBUF_GET_SM_BUF 5
+#define TXRX_MGMTBUF_QUEUE_BUF_TO_TXQ 6
+#define TXRX_MGMTBUF_REAPED_BUF 7
+#define TXRX_MGMTBUF_REAPED_SM_BUF 8
+#define TXRX_MGMTBUF_WAIT_FOR_TXQ_DRAIN 9
+#define TXRX_MGMTBUF_WAIT_FOR_TXQ_SFQ_DRAIN 10
+#define TXRX_MGMTBUF_ENQUEUE_INTO_DATA_SFQ 11
+#define TXRX_MGMTBUF_DEQUEUE_FROM_DATA_SFQ 12
+#define TXRX_MGMTBUF_PAUSE_DATA_TXQ 13
+#define TXRX_MGMTBUF_RESUME_DATA_TXQ 14
+#define TXRX_MGMTBUF_WAIT_FORTXQ_DRAIN_TIMEOUT 15
+#define TXRX_MGMTBUF_DRAINQ 16
+#define TXRX_MGMTBUF_INDICATE_Q_DRAINED 17
+#define TXRX_MGMTBUF_ENQUEUE_INTO_HW_SFQ 18
+#define TXRX_MGMTBUF_DEQUEUE_FROM_HW_SFQ 19
+#define TXRX_MGMTBUF_PAUSE_HW_TXQ 20
+#define TXRX_MGMTBUF_RESUME_HW_TXQ 21
+#define TXRX_MGMTBUF_TEAR_DOWN_BA 22
+#define TXRX_MGMTBUF_PROCESS_ADDBA_REQ 23
+#define TXRX_MGMTBUF_PROCESS_DELBA 24
+#define TXRX_MGMTBUF_PERFORM_BA 25
+#define TXRX_MGMTBUF_WLAN_RESET_ON_ERROR 26
+#define TXRX_MGMTBUF_DBGID_DEFINITION_END
+
+/* PM (Power Module) debug identifier definitions */
+#define PM_DBGID_DEFINITION_START
+#define PM_INIT 1
+#define PM_ENABLE 2
+#define PM_SET_STATE 3
+#define PM_SET_POWERMODE 4
+#define PM_CONN_NOTIFY 5
+#define PM_REF_COUNT_NEGATIVE 6
+#define PM_INFRA_STA_APSD_ENABLE 7
+#define PM_INFRA_STA_UPDATE_APSD_STATE 8
+#define PM_CHAN_OP_REQ 9
+#define PM_SET_MY_BEACON_POLICY 10
+#define PM_SET_ALL_BEACON_POLICY 11
+#define PM_INFRA_STA_SET_PM_PARAMS1 12
+#define PM_INFRA_STA_SET_PM_PARAMS2 13
+#define PM_ADHOC_SET_PM_CAPS_FAIL 14
+#define PM_ADHOC_UNKNOWN_IBSS_ATTRIB_ID 15
+#define PM_ADHOC_SET_PM_PARAMS 16
+#define PM_ADHOC_STATE1 18
+#define PM_ADHOC_STATE2 19
+#define PM_ADHOC_CONN_MAP 20
+#define PM_FAKE_SLEEP 21
+#define PM_AP_STATE1 22
+#define PM_AP_SET_PM_PARAMS 23
+#define PM_DBGID_DEFINITION_END
+
+/* Wake on Wireless debug identifier definitions */
+#define WOW_DBGID_DEFINITION_START
+#define WOW_INIT 1
+#define WOW_GET_CONFIG_DSET 2
+#define WOW_NO_CONFIG_DSET 3
+#define WOW_INVALID_CONFIG_DSET 4
+#define WOW_USE_DEFAULT_CONFIG 5
+#define WOW_SETUP_GPIO 6
+#define WOW_INIT_DONE 7
+#define WOW_SET_GPIO_PIN 8
+#define WOW_CLEAR_GPIO_PIN 9
+#define WOW_SET_WOW_MODE_CMD 10
+#define WOW_SET_HOST_MODE_CMD 11
+#define WOW_ADD_WOW_PATTERN_CMD 12
+#define WOW_NEW_WOW_PATTERN_AT_INDEX 13
+#define WOW_DEL_WOW_PATTERN_CMD 14
+#define WOW_LIST_CONTAINS_PATTERNS 15
+#define WOW_GET_WOW_LIST_CMD 16
+#define WOW_INVALID_FILTER_ID 17
+#define WOW_INVALID_FILTER_LISTID 18
+#define WOW_NO_VALID_FILTER_AT_ID 19
+#define WOW_NO_VALID_LIST_AT_ID 20
+#define WOW_NUM_PATTERNS_EXCEEDED 21
+#define WOW_NUM_LISTS_EXCEEDED 22
+#define WOW_GET_WOW_STATS 23
+#define WOW_CLEAR_WOW_STATS 24
+#define WOW_WAKEUP_HOST 25
+#define WOW_EVENT_WAKEUP_HOST 26
+#define WOW_EVENT_DISCARD 27
+#define WOW_PATTERN_MATCH 28
+#define WOW_PATTERN_NOT_MATCH 29
+#define WOW_PATTERN_NOT_MATCH_OFFSET 30
+#define WOW_DISABLED_HOST_ASLEEP 31
+#define WOW_ENABLED_HOST_ASLEEP_NO_PATTERNS 32
+#define WOW_ENABLED_HOST_ASLEEP_NO_MATCH_FOUND 33
+#define WOW_DBGID_DEFINITION_END
+
+/* WHAL debug identifier definitions */
+#define WHAL_DBGID_DEFINITION_START
+#define WHAL_ERROR_ANI_CONTROL 1
+#define WHAL_ERROR_CHIP_TEST1 2
+#define WHAL_ERROR_CHIP_TEST2 3
+#define WHAL_ERROR_EEPROM_CHECKSUM 4
+#define WHAL_ERROR_EEPROM_MACADDR 5
+#define WHAL_ERROR_INTERRUPT_HIU 6
+#define WHAL_ERROR_KEYCACHE_RESET 7
+#define WHAL_ERROR_KEYCACHE_SET 8
+#define WHAL_ERROR_KEYCACHE_TYPE 9
+#define WHAL_ERROR_KEYCACHE_TKIPENTRY 10
+#define WHAL_ERROR_KEYCACHE_WEPLENGTH 11
+#define WHAL_ERROR_PHY_INVALID_CHANNEL 12
+#define WHAL_ERROR_POWER_AWAKE 13
+#define WHAL_ERROR_POWER_SET 14
+#define WHAL_ERROR_RECV_STOPDMA 15
+#define WHAL_ERROR_RECV_STOPPCU 16
+#define WHAL_ERROR_RESET_CHANNF1 17
+#define WHAL_ERROR_RESET_CHANNF2 18
+#define WHAL_ERROR_RESET_PM 19
+#define WHAL_ERROR_RESET_OFFSETCAL 20
+#define WHAL_ERROR_RESET_RFGRANT 21
+#define WHAL_ERROR_RESET_RXFRAME 22
+#define WHAL_ERROR_RESET_STOPDMA 23
+#define WHAL_ERROR_RESET_RECOVER 24
+#define WHAL_ERROR_XMIT_COMPUTE 25
+#define WHAL_ERROR_XMIT_NOQUEUE 26
+#define WHAL_ERROR_XMIT_ACTIVEQUEUE 27
+#define WHAL_ERROR_XMIT_BADTYPE 28
+#define WHAL_ERROR_XMIT_STOPDMA 29
+#define WHAL_ERROR_INTERRUPT_BB_PANIC 30
+#define WHAL_ERROR_RESET_TXIQCAL 31
+#define WHAL_ERROR_PAPRD_MAXGAIN_ABOVE_WINDOW 32
+#define WHAL_DBGID_DEFINITION_END
+
+/* DC debug identifier definitions */
+#define DC_DBGID_DEFINITION_START
+#define DC_SCAN_CHAN_START 1
+#define DC_SCAN_CHAN_FINISH 2
+#define DC_BEACON_RECEIVE7 3
+#define DC_SSID_PROBE_CB 4
+#define DC_SEND_NEXT_SSID_PROBE 5
+#define DC_START_SEARCH 6
+#define DC_CANCEL_SEARCH_CB 7
+#define DC_STOP_SEARCH 8
+#define DC_END_SEARCH 9
+#define DC_MIN_CHDWELL_TIMEOUT 10
+#define DC_START_SEARCH_CANCELED 11
+#define DC_SET_POWER_MODE 12
+#define DC_INIT 13
+#define DC_SEARCH_OPPORTUNITY 14
+#define DC_RECEIVED_ANY_BEACON 15
+#define DC_RECEIVED_MY_BEACON 16
+#define DC_PROFILE_IS_ADHOC_BUT_BSS_IS_INFRA 17
+#define DC_PS_ENABLED_BUT_ATHEROS_IE_ABSENT 18
+#define DC_BSS_ADHOC_CHANNEL_NOT_ALLOWED 19
+#define DC_SET_BEACON_UPDATE 20
+#define DC_BEACON_UPDATE_COMPLETE 21
+#define DC_END_SEARCH_BEACON_UPDATE_COMP_CB 22
+#define DC_BSSINFO_EVENT_DROPPED 23
+#define DC_IEEEPS_ENABLED_BUT_ATIM_ABSENT 24
+#define DC_DBGID_DEFINITION_END
+
+/* CO debug identifier definitions */
+#define CO_DBGID_DEFINITION_START
+#define CO_INIT 1
+#define CO_ACQUIRE_LOCK 2
+#define CO_START_OP1 3
+#define CO_START_OP2 4
+#define CO_DRAIN_TX_COMPLETE_CB 5
+#define CO_CHANGE_CHANNEL_CB 6
+#define CO_RETURN_TO_HOME_CHANNEL 7
+#define CO_FINISH_OP_TIMEOUT 8
+#define CO_OP_END 9
+#define CO_CANCEL_OP 10
+#define CO_CHANGE_CHANNEL 11
+#define CO_RELEASE_LOCK 12
+#define CO_CHANGE_STATE 13
+#define CO_DBGID_DEFINITION_END
+
+/* RO debug identifier definitions */
+#define RO_DBGID_DEFINITION_START
+#define RO_REFRESH_ROAM_TABLE 1
+#define RO_UPDATE_ROAM_CANDIDATE 2
+#define RO_UPDATE_ROAM_CANDIDATE_CB 3
+#define RO_UPDATE_ROAM_CANDIDATE_FINISH 4
+#define RO_REFRESH_ROAM_TABLE_DONE 5
+#define RO_PERIODIC_SEARCH_CB 6
+#define RO_PERIODIC_SEARCH_TIMEOUT 7
+#define RO_INIT 8
+#define RO_BMISS_STATE1 9
+#define RO_BMISS_STATE2 10
+#define RO_SET_PERIODIC_SEARCH_ENABLE 11
+#define RO_SET_PERIODIC_SEARCH_DISABLE 12
+#define RO_ENABLE_SQ_THRESHOLD 13
+#define RO_DISABLE_SQ_THRESHOLD 14
+#define RO_ADD_BSS_TO_ROAM_TABLE 15
+#define RO_SET_PERIODIC_SEARCH_MODE 16
+#define RO_CONFIGURE_SQ_THRESHOLD1 17
+#define RO_CONFIGURE_SQ_THRESHOLD2 18
+#define RO_CONFIGURE_SQ_PARAMS 19
+#define RO_LOW_SIGNAL_QUALITY_EVENT 20
+#define RO_HIGH_SIGNAL_QUALITY_EVENT 21
+#define RO_REMOVE_BSS_FROM_ROAM_TABLE 22
+#define RO_UPDATE_CONNECTION_STATE_METRIC 23
+#define RO_DBGID_DEFINITION_END
+
+/* CM debug identifier definitions */
+#define CM_DBGID_DEFINITION_START
+#define CM_INITIATE_HANDOFF 1
+#define CM_INITIATE_HANDOFF_CB 2
+#define CM_CONNECT_EVENT 3
+#define CM_DISCONNECT_EVENT 4
+#define CM_INIT 5
+#define CM_HANDOFF_SOURCE 6
+#define CM_SET_HANDOFF_TRIGGERS 7
+#define CM_CONNECT_REQUEST 8
+#define CM_CONNECT_REQUEST_CB 9
+#define CM_CONTINUE_SCAN_CB 10
+#define CM_DBGID_DEFINITION_END
+
+
+/* mgmt debug identifier definitions */
+#define MGMT_DBGID_DEFINITION_START
+#define KEYMGMT_CONNECTION_INIT 1
+#define KEYMGMT_CONNECTION_COMPLETE 2
+#define KEYMGMT_CONNECTION_CLOSE 3
+#define KEYMGMT_ADD_KEY 4
+#define MLME_NEW_STATE 5
+#define MLME_CONN_INIT 6
+#define MLME_CONN_COMPLETE 7
+#define MLME_CONN_CLOSE 8
+#define MGMT_DBGID_DEFINITION_END
+
+/* TMR debug identifier definitions */
+#define TMR_DBGID_DEFINITION_START
+#define TMR_HANG_DETECTED 1
+#define TMR_WDT_TRIGGERED 2
+#define TMR_WDT_RESET 3
+#define TMR_HANDLER_ENTRY 4
+#define TMR_HANDLER_EXIT 5
+#define TMR_SAVED_START 6
+#define TMR_SAVED_END 7
+#define TMR_DBGID_DEFINITION_END
+
+/* BTCOEX debug identifier definitions */
+#define BTCOEX_DBGID_DEFINITION_START
+#define BTCOEX_STATUS_CMD 1
+#define BTCOEX_PARAMS_CMD 2
+#define BTCOEX_ANT_CONFIG 3
+#define BTCOEX_COLOCATED_BT_DEVICE 4
+#define BTCOEX_CLOSE_RANGE_SCO_ON 5
+#define BTCOEX_CLOSE_RANGE_SCO_OFF 6
+#define BTCOEX_CLOSE_RANGE_A2DP_ON 7
+#define BTCOEX_CLOSE_RANGE_A2DP_OFF 8
+#define BTCOEX_A2DP_PROTECT_ON 9
+#define BTCOEX_A2DP_PROTECT_OFF 10
+#define BTCOEX_SCO_PROTECT_ON 11
+#define BTCOEX_SCO_PROTECT_OFF 12
+#define BTCOEX_CLOSE_RANGE_DETECTOR_START 13
+#define BTCOEX_CLOSE_RANGE_DETECTOR_STOP 14
+#define BTCOEX_CLOSE_RANGE_TOGGLE 15
+#define BTCOEX_CLOSE_RANGE_TOGGLE_RSSI_LRCNT 16
+#define BTCOEX_CLOSE_RANGE_RSSI_THRESH 17
+#define BTCOEX_CLOSE_RANGE_LOW_RATE_THRESH 18
+#define BTCOEX_PTA_PRI_INTR_HANDLER 19
+#define BTCOEX_PSPOLL_QUEUED 20
+#define BTCOEX_PSPOLL_COMPLETE 21
+#define BTCOEX_DBG_PM_AWAKE 22
+#define BTCOEX_DBG_PM_SLEEP 23
+#define BTCOEX_DBG_SCO_COEX_ON 24
+#define BTCOEX_SCO_DATARECEIVE 25
+#define BTCOEX_INTR_INIT 26
+#define BTCOEX_PTA_PRI_DIFF 27
+#define BTCOEX_TIM_NOTIFICATION 28
+#define BTCOEX_SCO_WAKEUP_ON_DATA 29
+#define BTCOEX_SCO_SLEEP 30
+#define BTCOEX_SET_WEIGHTS 31
+#define BTCOEX_SCO_DATARECEIVE_LATENCY_VAL 32
+#define BTCOEX_SCO_MEASURE_TIME_DIFF 33
+#define BTCOEX_SET_EOL_VAL 34
+#define BTCOEX_OPT_DETECT_HANDLER 35
+#define BTCOEX_SCO_TOGGLE_STATE 36
+#define BTCOEX_SCO_STOMP 37
+#define BTCOEX_NULL_COMP_CALLBACK 38
+#define BTCOEX_RX_INCOMING 39
+#define BTCOEX_RX_INCOMING_CTL 40
+#define BTCOEX_RX_INCOMING_MGMT 41
+#define BTCOEX_RX_INCOMING_DATA 42
+#define BTCOEX_RTS_RECEPTION 43
+#define BTCOEX_FRAME_PRI_LOW_RATE_THRES 44
+#define BTCOEX_PM_FAKE_SLEEP 45
+#define BTCOEX_ACL_COEX_STATUS 46
+#define BTCOEX_ACL_COEX_DETECTION 47
+#define BTCOEX_A2DP_COEX_STATUS 48
+#define BTCOEX_SCO_STATUS 49
+#define BTCOEX_WAKEUP_ON_DATA 50
+#define BTCOEX_DATARECEIVE 51
+#define BTCOEX_GET_MAX_AGGR_SIZE 53
+#define BTCOEX_MAX_AGGR_AVAIL_TIME 54
+#define BTCOEX_DBG_WBTIMER_INTR 55
+#define BTCOEX_DBG_SCO_SYNC 57
+#define BTCOEX_UPLINK_QUEUED_RATE 59
+#define BTCOEX_DBG_UPLINK_ENABLE_EOL 60
+#define BTCOEX_UPLINK_FRAME_DURATION 61
+#define BTCOEX_UPLINK_SET_EOL 62
+#define BTCOEX_DBG_EOL_EXPIRED 63
+#define BTCOEX_DBG_DATA_COMPLETE 64
+#define BTCOEX_UPLINK_QUEUED_TIMESTAMP 65
+#define BTCOEX_DBG_DATA_COMPLETE_TIME 66
+#define BTCOEX_DBG_A2DP_ROLE_IS_SLAVE 67
+#define BTCOEX_DBG_A2DP_ROLE_IS_MASTER 68
+#define BTCOEX_DBG_UPLINK_SEQ_NUM 69
+#define BTCOEX_UPLINK_AGGR_SEQ 70
+#define BTCOEX_DBG_TX_COMP_SEQ_NO 71
+#define BTCOEX_DBG_MAX_AGGR_PAUSE_STATE 72
+#define BTCOEX_DBG_ACL_TRAFFIC 73
+#define BTCOEX_CURR_AGGR_PROP 74
+#define BTCOEX_DBG_SCO_GET_PER_TIME_DIFF 75
+#define BTCOEX_PSPOLL_PROCESS 76
+#define BTCOEX_RETURN_FROM_MAC 77
+#define BTCOEX_FREED_REQUEUED_CNT 78
+#define BTCOEX_DBG_TOGGLE_LOW_RATES 79
+#define BTCOEX_MAC_GOES_TO_SLEEP 80
+#define BTCOEX_DBG_A2DP_NO_SYNC 81
+#define BTCOEX_RETURN_FROM_MAC_HOLD_Q_INFO 82
+#define BTCOEX_RETURN_FROM_MAC_AC 83
+#define BTCOEX_DBG_DTIM_RECV 84
+#define BTCOEX_IS_PRE_UPDATE 86
+#define BTCOEX_ENQUEUED_BIT_MAP 87
+#define BTCOEX_TX_COMPLETE_FIRST_DESC_STATS 88
+#define BTCOEX_UPLINK_DESC 89
+#define BTCOEX_SCO_GET_PER_FIRST_FRM_TIMESTAMP 90
+#define BTCOEX_DBG_RECV_ACK 94
+#define BTCOEX_DBG_ADDBA_INDICATION 95
+#define BTCOEX_TX_COMPLETE_EOL_FAILED 96
+#define BTCOEX_DBG_A2DP_USAGE_COMPLETE 97
+#define BTCOEX_DBG_A2DP_STOMP_FOR_BCN_HANDLER 98
+#define BTCOEX_DBG_A2DP_SYNC_INTR 99
+#define BTCOEX_DBG_A2DP_STOMP_FOR_BCN_RECEPTION 100
+#define BTCOEX_FORM_AGGR_CURR_AGGR 101
+#define BTCOEX_DBG_TOGGLE_A2DP_BURST_CNT 102
+#define BTCOEX_DBG_BT_TRAFFIC 103
+#define BTCOEX_DBG_STOMP_BT_TRAFFIC 104
+#define BTCOEX_RECV_NULL 105
+#define BTCOEX_DBG_A2DP_MASTER_BT_END 106
+#define BTCOEX_DBG_A2DP_BT_START 107
+#define BTCOEX_DBG_A2DP_SLAVE_BT_END 108
+#define BTCOEX_DBG_A2DP_STOMP_BT 109
+#define BTCOEX_DBG_GO_TO_SLEEP 110
+#define BTCOEX_DBG_A2DP_PKT 111
+#define BTCOEX_DBG_A2DP_PSPOLL_DATA_RECV 112
+#define BTCOEX_DBG_A2DP_NULL 113
+#define BTCOEX_DBG_UPLINK_DATA 114
+#define BTCOEX_DBG_A2DP_STOMP_LOW_PRIO_NULL 115
+#define BTCOEX_DBG_ADD_BA_RESP_TIMEOUT 116
+#define BTCOEX_DBG_TXQ_STATE 117
+#define BTCOEX_DBG_ALLOW_SCAN 118
+#define BTCOEX_DBG_SCAN_REQUEST 119
+#define BTCOEX_A2DP_SLEEP 127
+#define BTCOEX_DBG_DATA_ACTIV_TIMEOUT 128
+#define BTCOEX_DBG_SWITCH_TO_PSPOLL_ON_MODE 129
+#define BTCOEX_DBG_SWITCH_TO_PSPOLL_OFF_MODE 130
+#define BTCOEX_DATARECEIVE_AGGR 131
+#define BTCOEX_DBG_DATA_RECV_SLEEPING_PENDING 132
+#define BTCOEX_DBG_DATARESP_TIMEOUT 133
+#define BTCOEX_BDG_BMISS 134
+#define BTCOEX_DBG_DATA_RECV_WAKEUP_TIM 135
+#define BTCOEX_DBG_SECOND_BMISS 136
+#define BTCOEX_DBG_SET_WLAN_STATE 138
+#define BTCOEX_BDG_FIRST_BMISS 139
+#define BTCOEX_DBG_A2DP_CHAN_OP 140
+#define BTCOEX_DBG_A2DP_INTR 141
+#define BTCOEX_DBG_BT_INQUIRY 142
+#define BTCOEX_DBG_BT_INQUIRY_DATA_FETCH 143
+#define BTCOEX_DBG_POST_INQUIRY_FINISH 144
+#define BTCOEX_DBG_SCO_OPT_MODE_TIMER_HANDLER 145
+#define BTCOEX_DBG_NULL_FRAME_SLEEP 146
+#define BTCOEX_DBG_NULL_FRAME_AWAKE 147
+#define BTCOEX_DBG_SET_AGGR_SIZE 152
+#define BTCOEX_DBG_TEAR_BA_TIMEOUT 153
+#define BTCOEX_DBG_MGMT_FRAME_SEQ_NO 154
+#define BTCOEX_DBG_SCO_STOMP_HIGH_PRI 155
+#define BTCOEX_DBG_COLOCATED_BT_DEV 156
+#define BTCOEX_DBG_FE_ANT_TYPE 157
+#define BTCOEX_DBG_BT_INQUIRY_CMD 158
+#define BTCOEX_DBG_SCO_CONFIG 159
+#define BTCOEX_DBG_SCO_PSPOLL_CONFIG 160
+#define BTCOEX_DBG_SCO_OPTMODE_CONFIG 161
+#define BTCOEX_DBG_A2DP_CONFIG 162
+#define BTCOEX_DBG_A2DP_PSPOLL_CONFIG 163
+#define BTCOEX_DBG_A2DP_OPTMODE_CONFIG 164
+#define BTCOEX_DBG_ACLCOEX_CONFIG 165
+#define BTCOEX_DBG_ACLCOEX_PSPOLL_CONFIG 166
+#define BTCOEX_DBG_ACLCOEX_OPTMODE_CONFIG 167
+#define BTCOEX_DBG_DEBUG_CMD 168
+#define BTCOEX_DBG_SET_BT_OPERATING_STATUS 169
+#define BTCOEX_DBG_GET_CONFIG 170
+#define BTCOEX_DBG_GET_STATS 171
+#define BTCOEX_DBG_BT_OPERATING_STATUS 172
+#define BTCOEX_DBG_PERFORM_RECONNECT 173
+#define BTCOEX_DBG_ACL_WLAN_MED 175
+#define BTCOEX_DBG_ACL_BT_MED 176
+#define BTCOEX_DBG_WLAN_CONNECT 177
+#define BTCOEX_DBG_A2DP_DUAL_START 178
+#define BTCOEX_DBG_PMAWAKE_NOTIFY 179
+#define BTCOEX_DBG_BEACON_SCAN_ENABLE 180
+#define BTCOEX_DBG_BEACON_SCAN_DISABLE 181
+#define BTCOEX_DBG_RX_NOTIFY 182
+#define BTCOEX_SCO_GET_PER_SECOND_FRM_TIMESTAMP 183
+#define BTCOEX_DBG_TXQ_DETAILS 184
+#define BTCOEX_DBG_SCO_STOMP_LOW_PRI 185
+#define BTCOEX_DBG_A2DP_FORCE_SCAN 186
+#define BTCOEX_DBG_DTIM_STOMP_COMP 187
+#define BTCOEX_ACL_PRESENCE_TIMER 188
+#define BTCOEX_DBGID_DEFINITION_END
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _DBGLOG_ID_H_ */
diff --git a/drivers/staging/ath6kl/include/common/discovery.h b/drivers/staging/ath6kl/include/common/discovery.h
new file mode 100644
index 000000000000..da1b33245069
--- /dev/null
+++ b/drivers/staging/ath6kl/include/common/discovery.h
@@ -0,0 +1,75 @@
+//------------------------------------------------------------------------------
+// <copyright file="discovery.h" company="Atheros">
+// Copyright (c) 2004-2010 Atheros Corporation. All rights reserved.
+//
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+//
+//
+//------------------------------------------------------------------------------
+//==============================================================================
+// Author(s): ="Atheros"
+//==============================================================================
+
+#ifndef _DISCOVERY_H_
+#define _DISCOVERY_H_
+
+/*
+ * DC_SCAN_PRIORITY is an 8-bit bitmap of the scan priority of a channel
+ */
+typedef enum {
+ DEFAULT_SCPRI = 0x01,
+ POPULAR_SCPRI = 0x02,
+ SSIDS_SCPRI = 0x04,
+ PROF_SCPRI = 0x08,
+} DC_SCAN_PRIORITY;
+
+/* The following search type construct can be used to manipulate the behavior of the search module based on different bits set */
+typedef enum {
+ SCAN_RESET = 0,
+ SCAN_ALL = (DEFAULT_SCPRI | POPULAR_SCPRI | \
+ SSIDS_SCPRI | PROF_SCPRI),
+
+ SCAN_POPULAR = (POPULAR_SCPRI | SSIDS_SCPRI | PROF_SCPRI),
+ SCAN_SSIDS = (SSIDS_SCPRI | PROF_SCPRI),
+ SCAN_PROF_MASK = (PROF_SCPRI),
+ SCAN_MULTI_CHANNEL = 0x000100,
+ SCAN_DETERMINISTIC = 0x000200,
+ SCAN_PROFILE_MATCH_TERMINATED = 0x000400,
+ SCAN_HOME_CHANNEL_SKIP = 0x000800,
+ SCAN_CHANNEL_LIST_CONTINUE = 0x001000,
+ SCAN_CURRENT_SSID_SKIP = 0x002000,
+ SCAN_ACTIVE_PROBE_DISABLE = 0x004000,
+ SCAN_CHANNEL_HINT_ONLY = 0x008000,
+ SCAN_ACTIVE_CHANNELS_ONLY = 0x010000,
+ SCAN_UNUSED1 = 0x020000, /* unused */
+ SCAN_PERIODIC = 0x040000,
+ SCAN_FIXED_DURATION = 0x080000,
+ SCAN_AP_ASSISTED = 0x100000,
+} DC_SCAN_TYPE;
+
+typedef enum {
+ BSS_REPORTING_DEFAULT = 0x0,
+ EXCLUDE_NON_SCAN_RESULTS = 0x1, /* Exclude results outside of scan */
+} DC_BSS_REPORTING_POLICY;
+
+typedef enum {
+ DC_IGNORE_WPAx_GROUP_CIPHER = 0x01,
+ DC_PROFILE_MATCH_DONE = 0x02,
+ DC_IGNORE_AAC_BEACON = 0x04,
+ DC_CSA_FOLLOW_BSS = 0x08,
+} DC_PROFILE_FILTER;
+
+#define DEFAULT_DC_PROFILE_FILTER (DC_CSA_FOLLOW_BSS)
+
+#endif /* _DISCOVERY_H_ */
diff --git a/drivers/staging/ath6kl/include/common/dset_internal.h b/drivers/staging/ath6kl/include/common/dset_internal.h
new file mode 100644
index 000000000000..2460f0ecf12b
--- /dev/null
+++ b/drivers/staging/ath6kl/include/common/dset_internal.h
@@ -0,0 +1,63 @@
+//------------------------------------------------------------------------------
+// <copyright file="dset_internal.h" company="Atheros">
+// Copyright (c) 2004-2010 Atheros Corporation. All rights reserved.
+//
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+//
+//
+//------------------------------------------------------------------------------
+//==============================================================================
+// Author(s): ="Atheros"
+//==============================================================================
+
+
+#ifndef __DSET_INTERNAL_H__
+#define __DSET_INTERNAL_H__
+
+#ifndef ATH_TARGET
+#include "athstartpack.h"
+#endif
+
+/*
+ * Internal dset definitions, common for DataSet layer.
+ */
+
+#define DSET_TYPE_STANDARD 0
+#define DSET_TYPE_BPATCHED 1
+#define DSET_TYPE_COMPRESSED 2
+
+/* Dataset descriptor */
+
+typedef PREPACK struct dset_descriptor_s {
+ struct dset_descriptor_s *next; /* List link. NULL only at the last
+ descriptor */
+ A_UINT16 id; /* Dset ID */
+ A_UINT16 size; /* Dset size. */
+ void *DataPtr; /* Pointer to raw data for standard
+ DataSet or pointer to original
+ dset_descriptor for patched
+ DataSet */
+ A_UINT32 data_type; /* DSET_TYPE_*, above */
+
+ void *AuxPtr; /* Additional data that might
+ needed for data_type. For
+ example, pointer to patch
+ Dataset descriptor for BPatch. */
+} POSTPACK dset_descriptor_t;
+
+#ifndef ATH_TARGET
+#include "athendpack.h"
+#endif
+
+#endif /* __DSET_INTERNAL_H__ */
diff --git a/drivers/staging/ath6kl/include/common/dsetid.h b/drivers/staging/ath6kl/include/common/dsetid.h
new file mode 100644
index 000000000000..d08fdeb39ec3
--- /dev/null
+++ b/drivers/staging/ath6kl/include/common/dsetid.h
@@ -0,0 +1,134 @@
+//------------------------------------------------------------------------------
+// <copyright file="dsetid.h" company="Atheros">
+// Copyright (c) 2004-2010 Atheros Corporation. All rights reserved.
+//
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+//
+//
+//------------------------------------------------------------------------------
+//==============================================================================
+// Author(s): ="Atheros"
+//==============================================================================
+
+
+#ifndef __DSETID_H__
+#define __DSETID_H__
+
+#ifndef ATH_TARGET
+#include "athstartpack.h"
+#endif
+
+/* Well-known DataSet IDs */
+#define DSETID_UNUSED 0x00000000
+#define DSETID_BOARD_DATA 0x00000001 /* Cal and board data */
+#define DSETID_REGDB 0x00000002 /* Regulatory Database */
+#define DSETID_POWER_CONTROL 0x00000003 /* TX Pwr Lim & Ant Gain */
+#define DSETID_USER_CONFIG 0x00000004 /* User Configuration */
+
+#define DSETID_ANALOG_CONTROL_DATA_START 0x00000005
+#define DSETID_ANALOG_CONTROL_DATA_END 0x00000025
+/*
+ * Get DSETID for various reference clock speeds.
+ * For each speed there are three DataSets that correspond
+ * to the three columns of bank6 data (addr, 11a, 11b/g).
+ * This macro returns the dsetid of the first of those
+ * three DataSets.
+ */
+#define ANALOG_CONTROL_DATA_DSETID(refclk) \
+ (DSETID_ANALOG_CONTROL_DATA_START + 3*refclk)
+
+/*
+ * There are TWO STARTUP_PATCH DataSets.
+ * DSETID_STARTUP_PATCH is historical, and was applied before BMI on
+ * earlier systems. On AR6002, it is applied after BMI, just like
+ * DSETID_STARTUP_PATCH2.
+ */
+#define DSETID_STARTUP_PATCH 0x00000026
+#define DSETID_GPIO_CONFIG_PATCH 0x00000027
+#define DSETID_WLANREGS 0x00000028 /* override wlan regs */
+#define DSETID_STARTUP_PATCH2 0x00000029
+
+#define DSETID_WOW_CONFIG 0x00000090 /* WoW Configuration */
+
+/* Add WHAL_INI_DATA_ID to DSETID_INI_DATA for a specific WHAL INI table. */
+#define DSETID_INI_DATA 0x00000100
+/* Reserved for WHAL INI Tables: 0x100..0x11f */
+#define DSETID_INI_DATA_END 0x0000011f
+
+#define DSETID_VENDOR_START 0x00010000 /* Vendor-defined DataSets */
+
+#define DSETID_INDEX_END 0xfffffffe /* Reserved to indicate the
+ end of a memory-based
+ DataSet Index */
+#define DSETID_INDEX_FREE 0xffffffff /* An unused index entry */
+
+/*
+ * PATCH DataSet format:
+ * A list of patches, terminated by a patch with
+ * address=PATCH_END.
+ *
+ * This allows for patches to be stored in flash.
+ */
+PREPACK struct patch_s {
+ A_UINT32 *address;
+ A_UINT32 data;
+} POSTPACK ;
+
+/*
+ * Skip some patches. Can be used to erase a single patch in a
+ * patch DataSet without having to re-write the DataSet. May
+ * also be used to embed information for use by subsequent
+ * patch code. The "data" in a PATCH_SKIP tells how many
+ * bytes of length "patch_s" to skip.
+ */
+#define PATCH_SKIP ((A_UINT32 *)0x00000000)
+
+/*
+ * Execute code at the address specified by "data".
+ * The address of the patch structure is passed as
+ * the one parameter.
+ */
+#define PATCH_CODE_ABS ((A_UINT32 *)0x00000001)
+
+/*
+ * Same as PATCH_CODE_ABS, but treat "data" as an
+ * offset from the start of the patch word.
+ */
+#define PATCH_CODE_REL ((A_UINT32 *)0x00000002)
+
+/* Mark the end of this patch DataSet. */
+#define PATCH_END ((A_UINT32 *)0xffffffff)
+
+/*
+ * A DataSet which contains a Binary Patch to some other DataSet
+ * uses the original dsetid with the DSETID_BPATCH_FLAG bit set.
+ * Such a BPatch DataSet consists of BPatch metadata followed by
+ * the bdiff bytes. BPatch metadata consists of a single 32-bit
+ * word that contains the size of the BPatched final image.
+ *
+ * To create a suitable bdiff DataSet, use bdiff in host/tools/bdiff
+ * to create "diffs":
+ * bdiff -q -O -nooldmd5 -nonewmd5 -d ORIGfile NEWfile diffs
+ * Then add BPatch metadata to the start of "diffs".
+ *
+ * NB: There are some implementation-induced restrictions
+ * on which DataSets can be BPatched.
+ */
+#define DSETID_BPATCH_FLAG 0x80000000
+
+#ifndef ATH_TARGET
+#include "athendpack.h"
+#endif
+
+#endif /* __DSETID_H__ */
diff --git a/drivers/staging/ath6kl/include/common/epping_test.h b/drivers/staging/ath6kl/include/common/epping_test.h
new file mode 100644
index 000000000000..f8aeb3f657ea
--- /dev/null
+++ b/drivers/staging/ath6kl/include/common/epping_test.h
@@ -0,0 +1,120 @@
+//------------------------------------------------------------------------------
+// Copyright (c) 2009-2010 Atheros Corporation. All rights reserved.
+//
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+//
+//
+//------------------------------------------------------------------------------
+//==============================================================================
+// Author(s): ="Atheros"
+//
+
+/* This file contains shared definitions for the host/target endpoint ping test */
+
+#ifndef EPPING_TEST_H_
+#define EPPING_TEST_H_
+
+#ifndef ATH_TARGET
+#include "athstartpack.h"
+#endif
+
+ /* alignment to 4-bytes */
+#define EPPING_ALIGNMENT_PAD (((sizeof(HTC_FRAME_HDR) + 3) & (~0x3)) - sizeof(HTC_FRAME_HDR))
+
+#ifndef A_OFFSETOF
+#define A_OFFSETOF(type,field) (int)(&(((type *)NULL)->field))
+#endif
+
+#define EPPING_RSVD_FILL 0xCC
+
+#define HCI_RSVD_EXPECTED_PKT_TYPE_RECV_OFFSET 7
+
+typedef PREPACK struct {
+ A_UINT8 _HCIRsvd[8]; /* reserved for HCI packet header (GMBOX) testing */
+ A_UINT8 StreamEcho_h; /* stream no. to echo this packet on (filled by host) */
+ A_UINT8 StreamEchoSent_t; /* stream no. packet was echoed to (filled by target)
+ When echoed: StreamEchoSent_t == StreamEcho_h */
+ A_UINT8 StreamRecv_t; /* stream no. that target received this packet on (filled by target) */
+ A_UINT8 StreamNo_h; /* stream number to send on (filled by host) */
+ A_UINT8 Magic_h[4]; /* magic number to filter for this packet on the host*/
+ A_UINT8 _rsvd[6]; /* reserved fields that must be set to a "reserved" value
+ since this packet maps to a 14-byte ethernet frame we want
+ to make sure ethertype field is set to something unknown */
+
+ A_UINT8 _pad[2]; /* padding for alignment */
+ A_UINT8 TimeStamp[8]; /* timestamp of packet (host or target) */
+ A_UINT32 HostContext_h; /* 4 byte host context, target echos this back */
+ A_UINT32 SeqNo; /* sequence number (set by host or target) */
+ A_UINT16 Cmd_h; /* ping command (filled by host) */
+ A_UINT16 CmdFlags_h; /* optional flags */
+ A_UINT8 CmdBuffer_h[8]; /* buffer for command (host -> target) */
+ A_UINT8 CmdBuffer_t[8]; /* buffer for command (target -> host) */
+ A_UINT16 DataLength; /* length of data */
+ A_UINT16 DataCRC; /* 16 bit CRC of data */
+ A_UINT16 HeaderCRC; /* header CRC (fields : StreamNo_h to end, minus HeaderCRC) */
+} POSTPACK EPPING_HEADER;
+
+#define EPPING_PING_MAGIC_0 0xAA
+#define EPPING_PING_MAGIC_1 0x55
+#define EPPING_PING_MAGIC_2 0xCE
+#define EPPING_PING_MAGIC_3 0xEC
+
+
+
+#define IS_EPPING_PACKET(pPkt) (((pPkt)->Magic_h[0] == EPPING_PING_MAGIC_0) && \
+ ((pPkt)->Magic_h[1] == EPPING_PING_MAGIC_1) && \
+ ((pPkt)->Magic_h[2] == EPPING_PING_MAGIC_2) && \
+ ((pPkt)->Magic_h[3] == EPPING_PING_MAGIC_3))
+
+#define SET_EPPING_PACKET_MAGIC(pPkt) { (pPkt)->Magic_h[0] = EPPING_PING_MAGIC_0; \
+ (pPkt)->Magic_h[1] = EPPING_PING_MAGIC_1; \
+ (pPkt)->Magic_h[2] = EPPING_PING_MAGIC_2; \
+ (pPkt)->Magic_h[3] = EPPING_PING_MAGIC_3;}
+
+#define CMD_FLAGS_DATA_CRC (1 << 0) /* DataCRC field is valid */
+#define CMD_FLAGS_DELAY_ECHO (1 << 1) /* delay the echo of the packet */
+#define CMD_FLAGS_NO_DROP (1 << 2) /* do not drop at HTC layer no matter what the stream is */
+
+#define IS_EPING_PACKET_NO_DROP(pPkt) ((pPkt)->CmdFlags_h & CMD_FLAGS_NO_DROP)
+
+#define EPPING_CMD_ECHO_PACKET 1 /* echo packet test */
+#define EPPING_CMD_RESET_RECV_CNT 2 /* reset recv count */
+#define EPPING_CMD_CAPTURE_RECV_CNT 3 /* fetch recv count, 4-byte count returned in CmdBuffer_t */
+#define EPPING_CMD_NO_ECHO 4 /* non-echo packet test (tx-only) */
+#define EPPING_CMD_CONT_RX_START 5 /* continous RX packets, parameters are in CmdBuffer_h */
+#define EPPING_CMD_CONT_RX_STOP 6 /* stop continuous RX packet transmission */
+
+ /* test command parameters may be no more than 8 bytes */
+typedef PREPACK struct {
+ A_UINT16 BurstCnt; /* number of packets to burst together (for HTC 2.1 testing) */
+ A_UINT16 PacketLength; /* length of packet to generate including header */
+ A_UINT16 Flags; /* flags */
+
+#define EPPING_CONT_RX_DATA_CRC (1 << 0) /* Add CRC to all data */
+#define EPPING_CONT_RX_RANDOM_DATA (1 << 1) /* randomize the data pattern */
+#define EPPING_CONT_RX_RANDOM_LEN (1 << 2) /* randomize the packet lengths */
+} POSTPACK EPPING_CONT_RX_PARAMS;
+
+#define EPPING_HDR_CRC_OFFSET A_OFFSETOF(EPPING_HEADER,StreamNo_h)
+#define EPPING_HDR_BYTES_CRC (sizeof(EPPING_HEADER) - EPPING_HDR_CRC_OFFSET - (sizeof(A_UINT16)))
+
+#define HCI_TRANSPORT_STREAM_NUM 16 /* this number is higher than the define WMM AC classes so we
+ can use this to distinguish packets */
+
+#ifndef ATH_TARGET
+#include "athendpack.h"
+#endif
+
+
+#endif /*EPPING_TEST_H_*/
diff --git a/drivers/staging/ath6kl/include/common/gmboxif.h b/drivers/staging/ath6kl/include/common/gmboxif.h
new file mode 100644
index 000000000000..4d8d85fd2e7c
--- /dev/null
+++ b/drivers/staging/ath6kl/include/common/gmboxif.h
@@ -0,0 +1,78 @@
+//------------------------------------------------------------------------------
+// Copyright (c) 2009-2010 Atheros Corporation. All rights reserved.
+//
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+//
+//
+//------------------------------------------------------------------------------
+//==============================================================================
+// Author(s): ="Atheros"
+//==============================================================================
+
+#ifndef __GMBOXIF_H__
+#define __GMBOXIF_H__
+
+#ifndef ATH_TARGET
+#include "athstartpack.h"
+#endif
+
+/* GMBOX interface definitions */
+
+#define AR6K_GMBOX_CREDIT_COUNTER 1 /* we use credit counter 1 to track credits */
+#define AR6K_GMBOX_CREDIT_SIZE_COUNTER 2 /* credit counter 2 is used to pass the size of each credit */
+
+
+ /* HCI UART transport definitions when used over GMBOX interface */
+#define HCI_UART_COMMAND_PKT 0x01
+#define HCI_UART_ACL_PKT 0x02
+#define HCI_UART_SCO_PKT 0x03
+#define HCI_UART_EVENT_PKT 0x04
+
+ /* definitions for BT HCI packets */
+typedef PREPACK struct {
+ A_UINT16 Flags_ConnHandle;
+ A_UINT16 Length;
+} POSTPACK BT_HCI_ACL_HEADER;
+
+typedef PREPACK struct {
+ A_UINT16 Flags_ConnHandle;
+ A_UINT8 Length;
+} POSTPACK BT_HCI_SCO_HEADER;
+
+typedef PREPACK struct {
+ A_UINT16 OpCode;
+ A_UINT8 ParamLength;
+} POSTPACK BT_HCI_COMMAND_HEADER;
+
+typedef PREPACK struct {
+ A_UINT8 EventCode;
+ A_UINT8 ParamLength;
+} POSTPACK BT_HCI_EVENT_HEADER;
+
+/* MBOX host interrupt signal assignments */
+
+#define MBOX_SIG_HCI_BRIDGE_MAX 8
+#define MBOX_SIG_HCI_BRIDGE_BT_ON 0
+#define MBOX_SIG_HCI_BRIDGE_BT_OFF 1
+#define MBOX_SIG_HCI_BRIDGE_BAUD_SET 2
+#define MBOX_SIG_HCI_BRIDGE_PWR_SAV_ON 3
+#define MBOX_SIG_HCI_BRIDGE_PWR_SAV_OFF 4
+
+
+#ifndef ATH_TARGET
+#include "athendpack.h"
+#endif
+
+#endif /* __GMBOXIF_H__ */
+
diff --git a/drivers/staging/ath6kl/include/common/gpio.h b/drivers/staging/ath6kl/include/common/gpio.h
new file mode 100644
index 000000000000..f7230667dd66
--- /dev/null
+++ b/drivers/staging/ath6kl/include/common/gpio.h
@@ -0,0 +1,45 @@
+//------------------------------------------------------------------------------
+// Copyright (c) 2005-2010 Atheros Corporation. All rights reserved.
+//
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+//
+//
+//
+// Author(s): ="Atheros"
+//------------------------------------------------------------------------------
+
+#define AR6001_GPIO_PIN_COUNT 18
+#define AR6002_GPIO_PIN_COUNT 18
+#define AR6003_GPIO_PIN_COUNT 28
+
+/*
+ * Possible values for WMIX_GPIO_SET_REGISTER_CMDID.
+ * NB: These match hardware order, so that addresses can
+ * easily be computed.
+ */
+#define GPIO_ID_OUT 0x00000000
+#define GPIO_ID_OUT_W1TS 0x00000001
+#define GPIO_ID_OUT_W1TC 0x00000002
+#define GPIO_ID_ENABLE 0x00000003
+#define GPIO_ID_ENABLE_W1TS 0x00000004
+#define GPIO_ID_ENABLE_W1TC 0x00000005
+#define GPIO_ID_IN 0x00000006
+#define GPIO_ID_STATUS 0x00000007
+#define GPIO_ID_STATUS_W1TS 0x00000008
+#define GPIO_ID_STATUS_W1TC 0x00000009
+#define GPIO_ID_PIN0 0x0000000a
+#define GPIO_ID_PIN(n) (GPIO_ID_PIN0+(n))
+
+#define GPIO_LAST_REGISTER_ID GPIO_ID_PIN(17)
+#define GPIO_ID_NONE 0xffffffff
diff --git a/drivers/staging/ath6kl/include/common/htc.h b/drivers/staging/ath6kl/include/common/htc.h
new file mode 100644
index 000000000000..f96cf7db7e06
--- /dev/null
+++ b/drivers/staging/ath6kl/include/common/htc.h
@@ -0,0 +1,236 @@
+//------------------------------------------------------------------------------
+// <copyright file="htc.h" company="Atheros">
+// Copyright (c) 2004-2010 Atheros Corporation. All rights reserved.
+//
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+//
+//
+//------------------------------------------------------------------------------
+//==============================================================================
+// Author(s): ="Atheros"
+//==============================================================================
+
+#ifndef __HTC_H__
+#define __HTC_H__
+
+#ifndef ATH_TARGET
+#include "athstartpack.h"
+#endif
+
+#define A_OFFSETOF(type,field) (unsigned long)(&(((type *)NULL)->field))
+
+#define ASSEMBLE_UNALIGNED_UINT16(p,highbyte,lowbyte) \
+ (((A_UINT16)(((A_UINT8 *)(p))[(highbyte)])) << 8 | (A_UINT16)(((A_UINT8 *)(p))[(lowbyte)]))
+
+/* alignment independent macros (little-endian) to fetch UINT16s or UINT8s from a
+ * structure using only the type and field name.
+ * Use these macros if there is the potential for unaligned buffer accesses. */
+#define A_GET_UINT16_FIELD(p,type,field) \
+ ASSEMBLE_UNALIGNED_UINT16(p,\
+ A_OFFSETOF(type,field) + 1, \
+ A_OFFSETOF(type,field))
+
+#define A_SET_UINT16_FIELD(p,type,field,value) \
+{ \
+ ((A_UINT8 *)(p))[A_OFFSETOF(type,field)] = (A_UINT8)(value); \
+ ((A_UINT8 *)(p))[A_OFFSETOF(type,field) + 1] = (A_UINT8)((value) >> 8); \
+}
+
+#define A_GET_UINT8_FIELD(p,type,field) \
+ ((A_UINT8 *)(p))[A_OFFSETOF(type,field)]
+
+#define A_SET_UINT8_FIELD(p,type,field,value) \
+ ((A_UINT8 *)(p))[A_OFFSETOF(type,field)] = (value)
+
+/****** DANGER DANGER ***************
+ *
+ * The frame header length and message formats defined herein were
+ * selected to accommodate optimal alignment for target processing. This reduces code
+ * size and improves performance.
+ *
+ * Any changes to the header length may alter the alignment and cause exceptions
+ * on the target. When adding to the message structures insure that fields are
+ * properly aligned.
+ *
+ */
+
+/* HTC frame header */
+typedef PREPACK struct _HTC_FRAME_HDR{
+ /* do not remove or re-arrange these fields, these are minimally required
+ * to take advantage of 4-byte lookaheads in some hardware implementations */
+ A_UINT8 EndpointID;
+ A_UINT8 Flags;
+ A_UINT16 PayloadLen; /* length of data (including trailer) that follows the header */
+
+ /***** end of 4-byte lookahead ****/
+
+ A_UINT8 ControlBytes[2];
+
+ /* message payload starts after the header */
+
+} POSTPACK HTC_FRAME_HDR;
+
+/* frame header flags */
+
+ /* send direction */
+#define HTC_FLAGS_NEED_CREDIT_UPDATE (1 << 0)
+#define HTC_FLAGS_SEND_BUNDLE (1 << 1) /* start or part of bundle */
+ /* receive direction */
+#define HTC_FLAGS_RECV_UNUSED_0 (1 << 0) /* bit 0 unused */
+#define HTC_FLAGS_RECV_TRAILER (1 << 1) /* bit 1 trailer data present */
+#define HTC_FLAGS_RECV_UNUSED_2 (1 << 0) /* bit 2 unused */
+#define HTC_FLAGS_RECV_UNUSED_3 (1 << 0) /* bit 3 unused */
+#define HTC_FLAGS_RECV_BUNDLE_CNT_MASK (0xF0) /* bits 7..4 */
+#define HTC_FLAGS_RECV_BUNDLE_CNT_SHIFT 4
+
+#define HTC_HDR_LENGTH (sizeof(HTC_FRAME_HDR))
+#define HTC_MAX_TRAILER_LENGTH 255
+#define HTC_MAX_PAYLOAD_LENGTH (4096 - sizeof(HTC_FRAME_HDR))
+
+/* HTC control message IDs */
+
+#define HTC_MSG_READY_ID 1
+#define HTC_MSG_CONNECT_SERVICE_ID 2
+#define HTC_MSG_CONNECT_SERVICE_RESPONSE_ID 3
+#define HTC_MSG_SETUP_COMPLETE_ID 4
+#define HTC_MSG_SETUP_COMPLETE_EX_ID 5
+
+#define HTC_MAX_CONTROL_MESSAGE_LENGTH 256
+
+/* base message ID header */
+typedef PREPACK struct {
+ A_UINT16 MessageID;
+} POSTPACK HTC_UNKNOWN_MSG;
+
+/* HTC ready message
+ * direction : target-to-host */
+typedef PREPACK struct {
+ A_UINT16 MessageID; /* ID */
+ A_UINT16 CreditCount; /* number of credits the target can offer */
+ A_UINT16 CreditSize; /* size of each credit */
+ A_UINT8 MaxEndpoints; /* maximum number of endpoints the target has resources for */
+ A_UINT8 _Pad1;
+} POSTPACK HTC_READY_MSG;
+
+ /* extended HTC ready message */
+typedef PREPACK struct {
+ HTC_READY_MSG Version2_0_Info; /* legacy version 2.0 information at the front... */
+ /* extended information */
+ A_UINT8 HTCVersion;
+ A_UINT8 MaxMsgsPerHTCBundle;
+} POSTPACK HTC_READY_EX_MSG;
+
+#define HTC_VERSION_2P0 0x00
+#define HTC_VERSION_2P1 0x01 /* HTC 2.1 */
+
+#define HTC_SERVICE_META_DATA_MAX_LENGTH 128
+
+/* connect service
+ * direction : host-to-target */
+typedef PREPACK struct {
+ A_UINT16 MessageID;
+ A_UINT16 ServiceID; /* service ID of the service to connect to */
+ A_UINT16 ConnectionFlags; /* connection flags */
+
+#define HTC_CONNECT_FLAGS_REDUCE_CREDIT_DRIBBLE (1 << 2) /* reduce credit dribbling when
+ the host needs credits */
+#define HTC_CONNECT_FLAGS_THRESHOLD_LEVEL_MASK (0x3)
+#define HTC_CONNECT_FLAGS_THRESHOLD_LEVEL_ONE_FOURTH 0x0
+#define HTC_CONNECT_FLAGS_THRESHOLD_LEVEL_ONE_HALF 0x1
+#define HTC_CONNECT_FLAGS_THRESHOLD_LEVEL_THREE_FOURTHS 0x2
+#define HTC_CONNECT_FLAGS_THRESHOLD_LEVEL_UNITY 0x3
+
+ A_UINT8 ServiceMetaLength; /* length of meta data that follows */
+ A_UINT8 _Pad1;
+
+ /* service-specific meta data starts after the header */
+
+} POSTPACK HTC_CONNECT_SERVICE_MSG;
+
+/* connect response
+ * direction : target-to-host */
+typedef PREPACK struct {
+ A_UINT16 MessageID;
+ A_UINT16 ServiceID; /* service ID that the connection request was made */
+ A_UINT8 Status; /* service connection status */
+ A_UINT8 EndpointID; /* assigned endpoint ID */
+ A_UINT16 MaxMsgSize; /* maximum expected message size on this endpoint */
+ A_UINT8 ServiceMetaLength; /* length of meta data that follows */
+ A_UINT8 _Pad1;
+
+ /* service-specific meta data starts after the header */
+
+} POSTPACK HTC_CONNECT_SERVICE_RESPONSE_MSG;
+
+typedef PREPACK struct {
+ A_UINT16 MessageID;
+ /* currently, no other fields */
+} POSTPACK HTC_SETUP_COMPLETE_MSG;
+
+ /* extended setup completion message */
+typedef PREPACK struct {
+ A_UINT16 MessageID;
+ A_UINT32 SetupFlags;
+ A_UINT8 MaxMsgsPerBundledRecv;
+ A_UINT8 Rsvd[3];
+} POSTPACK HTC_SETUP_COMPLETE_EX_MSG;
+
+#define HTC_SETUP_COMPLETE_FLAGS_ENABLE_BUNDLE_RECV (1 << 0)
+
+/* connect response status codes */
+#define HTC_SERVICE_SUCCESS 0 /* success */
+#define HTC_SERVICE_NOT_FOUND 1 /* service could not be found */
+#define HTC_SERVICE_FAILED 2 /* specific service failed the connect */
+#define HTC_SERVICE_NO_RESOURCES 3 /* no resources (i.e. no more endpoints) */
+#define HTC_SERVICE_NO_MORE_EP 4 /* specific service is not allowing any more
+ endpoints */
+
+/* report record IDs */
+
+#define HTC_RECORD_NULL 0
+#define HTC_RECORD_CREDITS 1
+#define HTC_RECORD_LOOKAHEAD 2
+#define HTC_RECORD_LOOKAHEAD_BUNDLE 3
+
+typedef PREPACK struct {
+ A_UINT8 RecordID; /* Record ID */
+ A_UINT8 Length; /* Length of record */
+} POSTPACK HTC_RECORD_HDR;
+
+typedef PREPACK struct {
+ A_UINT8 EndpointID; /* Endpoint that owns these credits */
+ A_UINT8 Credits; /* credits to report since last report */
+} POSTPACK HTC_CREDIT_REPORT;
+
+typedef PREPACK struct {
+ A_UINT8 PreValid; /* pre valid guard */
+ A_UINT8 LookAhead[4]; /* 4 byte lookahead */
+ A_UINT8 PostValid; /* post valid guard */
+
+ /* NOTE: the LookAhead array is guarded by a PreValid and Post Valid guard bytes.
+ * The PreValid bytes must equal the inverse of the PostValid byte */
+
+} POSTPACK HTC_LOOKAHEAD_REPORT;
+
+typedef PREPACK struct {
+ A_UINT8 LookAhead[4]; /* 4 byte lookahead */
+} POSTPACK HTC_BUNDLED_LOOKAHEAD_REPORT;
+
+#ifndef ATH_TARGET
+#include "athendpack.h"
+#endif
+
+
+#endif /* __HTC_H__ */
+
diff --git a/drivers/staging/ath6kl/include/common/htc_services.h b/drivers/staging/ath6kl/include/common/htc_services.h
new file mode 100644
index 000000000000..fb22268a8d84
--- /dev/null
+++ b/drivers/staging/ath6kl/include/common/htc_services.h
@@ -0,0 +1,52 @@
+//------------------------------------------------------------------------------
+// <copyright file="htc_services.h" company="Atheros">
+// Copyright (c) 2007 Atheros Corporation. All rights reserved.
+//
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+//
+//
+//------------------------------------------------------------------------------
+//==============================================================================
+// Author(s): ="Atheros"
+//==============================================================================
+
+#ifndef __HTC_SERVICES_H__
+#define __HTC_SERVICES_H__
+
+/* Current service IDs */
+
+typedef enum {
+ RSVD_SERVICE_GROUP = 0,
+ WMI_SERVICE_GROUP = 1,
+
+ HTC_TEST_GROUP = 254,
+ HTC_SERVICE_GROUP_LAST = 255
+}HTC_SERVICE_GROUP_IDS;
+
+#define MAKE_SERVICE_ID(group,index) \
+ (int)(((int)group << 8) | (int)(index))
+
+/* NOTE: service ID of 0x0000 is reserved and should never be used */
+#define HTC_CTRL_RSVD_SVC MAKE_SERVICE_ID(RSVD_SERVICE_GROUP,1)
+#define WMI_CONTROL_SVC MAKE_SERVICE_ID(WMI_SERVICE_GROUP,0)
+#define WMI_DATA_BE_SVC MAKE_SERVICE_ID(WMI_SERVICE_GROUP,1)
+#define WMI_DATA_BK_SVC MAKE_SERVICE_ID(WMI_SERVICE_GROUP,2)
+#define WMI_DATA_VI_SVC MAKE_SERVICE_ID(WMI_SERVICE_GROUP,3)
+#define WMI_DATA_VO_SVC MAKE_SERVICE_ID(WMI_SERVICE_GROUP,4)
+#define WMI_MAX_SERVICES 5
+
+/* raw stream service (i.e. flash, tcmd, calibration apps) */
+#define HTC_RAW_STREAMS_SVC MAKE_SERVICE_ID(HTC_TEST_GROUP,0)
+
+#endif /*HTC_SERVICES_H_*/
diff --git a/drivers/staging/ath6kl/include/common/ini_dset.h b/drivers/staging/ath6kl/include/common/ini_dset.h
new file mode 100644
index 000000000000..8cf1af834bd0
--- /dev/null
+++ b/drivers/staging/ath6kl/include/common/ini_dset.h
@@ -0,0 +1,82 @@
+//------------------------------------------------------------------------------
+// Copyright (c) 2004-2010 Atheros Corporation. All rights reserved.
+//
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+//
+//
+//
+// Author(s): ="Atheros"
+//------------------------------------------------------------------------------
+
+#ifndef _INI_DSET_H_
+#define _INI_DSET_H_
+
+/*
+ * Each of these represents a WHAL INI table, which consists
+ * of an "address column" followed by 1 or more "value columns".
+ *
+ * Software uses the base WHAL_INI_DATA_ID+column to access a
+ * DataSet that holds a particular column of data.
+ */
+typedef enum {
+#if defined(AR6002_REV4) || defined(AR6003)
+/* Add these definitions for compatability */
+#define WHAL_INI_DATA_ID_BB_RFGAIN_LNA1 WHAL_INI_DATA_ID_BB_RFGAIN
+#define WHAL_INI_DATA_ID_BB_RFGAIN_LNA2 WHAL_INI_DATA_ID_BB_RFGAIN
+ WHAL_INI_DATA_ID_NULL =0,
+ WHAL_INI_DATA_ID_MODE_SPECIFIC =1, /* 2,3,4,5 */
+ WHAL_INI_DATA_ID_COMMON =6, /* 7 */
+ WHAL_INI_DATA_ID_BB_RFGAIN =8, /* 9,10 */
+#ifdef FPGA
+ WHAL_INI_DATA_ID_ANALOG_BANK0 =11, /* 12 */
+ WHAL_INI_DATA_ID_ANALOG_BANK1 =13, /* 14 */
+ WHAL_INI_DATA_ID_ANALOG_BANK2 =15, /* 16 */
+ WHAL_INI_DATA_ID_ANALOG_BANK3 =17, /* 18, 19 */
+ WHAL_INI_DATA_ID_ANALOG_BANK6 =20, /* 21,22 */
+ WHAL_INI_DATA_ID_ANALOG_BANK7 =23, /* 24 */
+ WHAL_INI_DATA_ID_ADDAC =25, /* 26 */
+#else
+ WHAL_INI_DATA_ID_ANALOG_COMMON =11, /* 12 */
+ WHAL_INI_DATA_ID_ANALOG_MODE_SPECIFIC=13, /* 14,15 */
+ WHAL_INI_DATA_ID_ANALOG_BANK6 =16, /* 17,18 */
+ WHAL_INI_DATA_ID_MODE_OVERRIDES =19, /* 20,21,22,23 */
+ WHAL_INI_DATA_ID_COMMON_OVERRIDES =24, /* 25 */
+ WHAL_INI_DATA_ID_ANALOG_OVERRIDES =26, /* 27,28 */
+#endif /* FPGA */
+#else
+ WHAL_INI_DATA_ID_NULL =0,
+ WHAL_INI_DATA_ID_MODE_SPECIFIC =1, /* 2,3 */
+ WHAL_INI_DATA_ID_COMMON =4, /* 5 */
+ WHAL_INI_DATA_ID_BB_RFGAIN =6, /* 7,8 */
+#define WHAL_INI_DATA_ID_BB_RFGAIN_LNA1 WHAL_INI_DATA_ID_BB_RFGAIN
+ WHAL_INI_DATA_ID_ANALOG_BANK1 =9, /* 10 */
+ WHAL_INI_DATA_ID_ANALOG_BANK2 =11, /* 12 */
+ WHAL_INI_DATA_ID_ANALOG_BANK3 =13, /* 14, 15 */
+ WHAL_INI_DATA_ID_ANALOG_BANK6 =16, /* 17, 18 */
+ WHAL_INI_DATA_ID_ANALOG_BANK7 =19, /* 20 */
+ WHAL_INI_DATA_ID_MODE_OVERRIDES =21, /* 22,23 */
+ WHAL_INI_DATA_ID_COMMON_OVERRIDES =24, /* 25 */
+ WHAL_INI_DATA_ID_ANALOG_OVERRIDES =26, /* 27,28 */
+ WHAL_INI_DATA_ID_BB_RFGAIN_LNA2 =29, /* 30,31 */
+#endif
+ WHAL_INI_DATA_ID_MAX =31
+} WHAL_INI_DATA_ID;
+
+typedef PREPACK struct {
+ A_UINT16 freqIndex; // 1 - A mode 2 - B or G mode 0 - common
+ A_UINT16 offset;
+ A_UINT32 newValue;
+} POSTPACK INI_DSET_REG_OVERRIDE;
+
+#endif
diff --git a/drivers/staging/ath6kl/include/common/pkt_log.h b/drivers/staging/ath6kl/include/common/pkt_log.h
new file mode 100644
index 000000000000..331cc04edada
--- /dev/null
+++ b/drivers/staging/ath6kl/include/common/pkt_log.h
@@ -0,0 +1,45 @@
+//------------------------------------------------------------------------------
+// Copyright (c) 2005-2010 Atheros Corporation. All rights reserved.
+//
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+//
+//
+//------------------------------------------------------------------------------
+//==============================================================================
+// Author(s): ="Atheros"
+//==============================================================================
+
+#ifndef __PKT_LOG_H__
+#define __PKT_LOG_H__
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+
+/* Pkt log info */
+typedef PREPACK struct pkt_log_t {
+ struct info_t {
+ A_UINT16 st;
+ A_UINT16 end;
+ A_UINT16 cur;
+ }info[4096];
+ A_UINT16 last_idx;
+}POSTPACK PACKET_LOG;
+
+
+#ifdef __cplusplus
+}
+#endif
+#endif /* __PKT_LOG_H__ */
diff --git a/drivers/staging/ath6kl/include/common/regDb.h b/drivers/staging/ath6kl/include/common/regDb.h
new file mode 100644
index 000000000000..f8245f104528
--- /dev/null
+++ b/drivers/staging/ath6kl/include/common/regDb.h
@@ -0,0 +1,29 @@
+//------------------------------------------------------------------------------
+// Copyright (c) 2005-2010 Atheros Corporation. All rights reserved.
+//
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+//
+//
+//------------------------------------------------------------------------------
+//==============================================================================
+// Author(s): ="Atheros"
+//==============================================================================
+
+#ifndef __REG_DB_H__
+#define __REG_DB_H__
+
+#include "./regulatory/reg_dbschema.h"
+#include "./regulatory/reg_dbvalues.h"
+
+#endif /* __REG_DB_H__ */
diff --git a/drivers/staging/ath6kl/include/common/regdump.h b/drivers/staging/ath6kl/include/common/regdump.h
new file mode 100644
index 000000000000..ff79b4846e69
--- /dev/null
+++ b/drivers/staging/ath6kl/include/common/regdump.h
@@ -0,0 +1,59 @@
+//------------------------------------------------------------------------------
+// <copyright file="regdump.h" company="Atheros">
+// Copyright (c) 2004-2010 Atheros Corporation. All rights reserved.
+//
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+//
+//
+//------------------------------------------------------------------------------
+//==============================================================================
+// Author(s): ="Atheros"
+//==============================================================================
+
+#ifndef __REGDUMP_H__
+#define __REGDUMP_H__
+
+#ifndef ATH_TARGET
+#include "athstartpack.h"
+#endif
+
+#if defined(AR6001)
+#include "AR6001/AR6001_regdump.h"
+#endif
+#if defined(AR6002)
+#include "AR6002/AR6002_regdump.h"
+#endif
+
+#if !defined(__ASSEMBLER__)
+/*
+ * Target CPU state at the time of failure is reflected
+ * in a register dump, which the Host can fetch through
+ * the diagnostic window.
+ */
+PREPACK struct register_dump_s {
+ A_UINT32 target_id; /* Target ID */
+ A_UINT32 assline; /* Line number (if assertion failure) */
+ A_UINT32 pc; /* Program Counter at time of exception */
+ A_UINT32 badvaddr; /* Virtual address causing exception */
+ CPU_exception_frame_t exc_frame; /* CPU-specific exception info */
+
+ /* Could copy top of stack here, too.... */
+} POSTPACK;
+#endif /* __ASSEMBLER__ */
+
+#ifndef ATH_TARGET
+#include "athendpack.h"
+#endif
+
+#endif /* __REGDUMP_H__ */
diff --git a/drivers/staging/ath6kl/include/common/regulatory/reg_dbschema.h b/drivers/staging/ath6kl/include/common/regulatory/reg_dbschema.h
new file mode 100644
index 000000000000..c6844d69fe47
--- /dev/null
+++ b/drivers/staging/ath6kl/include/common/regulatory/reg_dbschema.h
@@ -0,0 +1,237 @@
+//------------------------------------------------------------------------------
+// Copyright (c) 2005-2010 Atheros Corporation. All rights reserved.
+//
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+//
+//
+//------------------------------------------------------------------------------
+//==============================================================================
+// Author(s): ="Atheros"
+//==============================================================================
+
+#ifndef __REG_DBSCHEMA_H__
+#define __REG_DBSCHEMA_H__
+
+/*
+ * This file describes the regulatory DB schema, which is common between the
+ * 'generator' and 'parser'. The 'generator' runs on a host(typically a x86
+ * Linux) and spits outs two binary files, which follow the DB file
+ * format(described below). The resultant output "regulatoryData_AG.bin"
+ * is binary file which has information regarding A and G regulatory
+ * information, while the "regulatoryData_G.bin" consists of G-ONLY regulatory
+ * information. This binary file is parsed in the target for extracting
+ * regulatory information.
+ *
+ * The DB values used to populate the regulatory DB are defined in
+ * reg_dbvalues.h
+ *
+ */
+
+/* Binary data file - Representation of Regulatory DB*/
+#define REG_DATA_FILE_AG "./regulatoryData_AG.bin"
+#define REG_DATA_FILE_G "./regulatoryData_G.bin"
+
+
+/* Table tags used to encode different tables in the database */
+enum data_tags_t{
+ REG_DMN_PAIR_MAPPING_TAG = 0,
+ REG_COUNTRY_CODE_TO_ENUM_RD_TAG,
+ REG_DMN_FREQ_BAND_regDmn5GhzFreq_TAG,
+ REG_DMN_FREQ_BAND_regDmn2Ghz11_BG_Freq_TAG,
+ REG_DOMAIN_TAG,
+ MAX_DB_TABLE_TAGS
+ };
+
+
+
+/*
+ ****************************************************************************
+ * Regulatory DB file format :
+ * 4-bytes : "RGDB" (Magic Key)
+ * 4-bytes : version (Default is 5379(my extn))
+ * 4-bytes : length of file
+ * dbType(4)
+ * TAG(4)
+ * Entries(1)entrySize(1)searchType(1)reserved[3]tableSize(2)"0xdeadbeef"(4)struct_data....
+ * TAG(4)
+ * Entries(1)entrySize(1)searchType(1)reserved[3]tableSize(2)"0xdeadbeef"(4)struct_data....
+ * TAG(4)
+ * Entries(1)entrySize(1)searchType(1)reserved[3]tableSize(2)"0xdeadbeef"(4)struct_data....
+ * ...
+ * ...
+ ****************************************************************************
+ *
+ */
+
+/*
+ * Length of the file would be filled in when the file is created and
+ * it would include the header size.
+ */
+
+#define REG_DB_KEY "RGDB" /* Should be EXACTLY 4-bytes */
+#define REG_DB_VER 7802 /* Between 0-9999 */
+/* REG_DB_VER history in reverse chronological order:
+ * 7802: 78 (ASCII code of N) + 02 (minor version number) - updated 10/21/09
+ * 7801: 78 (ASCII code of N) + 01 (minor version number, increment on further changes)
+ * 1178: '11N' = 11 + ASCII code of N(78)
+ * 5379: initial version, no 11N support
+ */
+#define MAGIC_KEY_OFFSET 0
+#define VERSION_OFFSET 4
+#define FILE_SZ_OFFSET 8
+#define DB_TYPE_OFFSET 12
+
+#define MAGIC_KEY_SZ 4
+#define VERSION_SZ 4
+#define FILE_SZ_SZ 4
+#define DB_TYPE_SZ 4
+#define DB_TAG_SZ 4
+
+#define REGDB_GET_MAGICKEY(x) ((char *)x + MAGIC_KEY_OFFSET)
+#define REGDB_GET_VERSION(x) ((char *)x + VERSION_OFFSET)
+#define REGDB_GET_FILESIZE(x) *((unsigned int *)((char *)x + FILE_SZ_OFFSET))
+#define REGDB_GET_DBTYPE(x) *((char *)x + DB_TYPE_OFFSET)
+
+#define REGDB_SET_FILESIZE(x, sz_) *((unsigned int *)((char *)x + FILE_SZ_OFFSET)) = (sz_)
+#define REGDB_IS_EOF(cur, begin) ( REGDB_GET_FILESIZE(begin) > ((cur) - (begin)) )
+
+
+/* A Table can be search based on key as a parameter or accessed directly
+ * by giving its index in to the table.
+ */
+enum searchType {
+ KEY_BASED_TABLE_SEARCH = 1,
+ INDEX_BASED_TABLE_ACCESS
+ };
+
+
+/* Data is organised as different tables. There is a Master table, which
+ * holds information regarding all the tables. It does not have any
+ * knowledge about the attributes of the table it is holding
+ * but has external view of the same(for ex, how many entries, record size,
+ * how to search the table, total table size and reference to the data
+ * instance of table).
+ */
+typedef PREPACK struct dbMasterTable_t { /* Hold ptrs to Table data structures */
+ A_UCHAR numOfEntries;
+ A_CHAR entrySize; /* Entry size per table row */
+ A_CHAR searchType; /* Index based access or key based */
+ A_CHAR reserved[3]; /* for alignment */
+ A_UINT16 tableSize; /* Size of this table */
+ A_CHAR *dataPtr; /* Ptr to the actual Table */
+} POSTPACK dbMasterTable; /* Master table - table of tables */
+
+
+/* used to get the number of rows in a table */
+#define REGDB_NUM_OF_ROWS(a) (sizeof (a) / sizeof (a[0]))
+
+/*
+ * Used to set the RegDomain bitmask which chooses which frequency
+ * band specs are used.
+ */
+
+#define BMLEN 2 /* Use 2 32-bit uint for channel bitmask */
+#define BMZERO {0,0} /* BMLEN zeros */
+
+#define BM(_fa, _fb, _fc, _fd, _fe, _ff, _fg, _fh) \
+ {((((_fa >= 0) && (_fa < 32)) ? (((A_UINT32) 1) << _fa) : 0) | \
+ (((_fb >= 0) && (_fb < 32)) ? (((A_UINT32) 1) << _fb) : 0) | \
+ (((_fc >= 0) && (_fc < 32)) ? (((A_UINT32) 1) << _fc) : 0) | \
+ (((_fd >= 0) && (_fd < 32)) ? (((A_UINT32) 1) << _fd) : 0) | \
+ (((_fe >= 0) && (_fe < 32)) ? (((A_UINT32) 1) << _fe) : 0) | \
+ (((_ff >= 0) && (_ff < 32)) ? (((A_UINT32) 1) << _ff) : 0) | \
+ (((_fg >= 0) && (_fg < 32)) ? (((A_UINT32) 1) << _fg) : 0) | \
+ (((_fh >= 0) && (_fh < 32)) ? (((A_UINT32) 1) << _fh) : 0)), \
+ ((((_fa > 31) && (_fa < 64)) ? (((A_UINT32) 1) << (_fa - 32)) : 0) | \
+ (((_fb > 31) && (_fb < 64)) ? (((A_UINT32) 1) << (_fb - 32)) : 0) | \
+ (((_fc > 31) && (_fc < 64)) ? (((A_UINT32) 1) << (_fc - 32)) : 0) | \
+ (((_fd > 31) && (_fd < 64)) ? (((A_UINT32) 1) << (_fd - 32)) : 0) | \
+ (((_fe > 31) && (_fe < 64)) ? (((A_UINT32) 1) << (_fe - 32)) : 0) | \
+ (((_ff > 31) && (_ff < 64)) ? (((A_UINT32) 1) << (_ff - 32)) : 0) | \
+ (((_fg > 31) && (_fg < 64)) ? (((A_UINT32) 1) << (_fg - 32)) : 0) | \
+ (((_fh > 31) && (_fh < 64)) ? (((A_UINT32) 1) << (_fh - 32)) : 0))}
+
+
+/*
+ * THE following table is the mapping of regdomain pairs specified by
+ * a regdomain value to the individual unitary reg domains
+ */
+
+typedef PREPACK struct reg_dmn_pair_mapping {
+ A_UINT16 regDmnEnum; /* 16 bit reg domain pair */
+ A_UINT16 regDmn5GHz; /* 5GHz reg domain */
+ A_UINT16 regDmn2GHz; /* 2GHz reg domain */
+ A_UINT8 flags5GHz; /* Requirements flags (AdHoc disallow etc) */
+ A_UINT8 flags2GHz; /* Requirements flags (AdHoc disallow etc) */
+ A_UINT32 pscanMask; /* Passive Scan flags which can override unitary domain passive scan
+ flags. This value is used as a mask on the unitary flags*/
+} POSTPACK REG_DMN_PAIR_MAPPING;
+
+#define OFDM_YES (1 << 0)
+#define OFDM_NO (0 << 0)
+#define MCS_HT20_YES (1 << 1)
+#define MCS_HT20_NO (0 << 1)
+#define MCS_HT40_A_YES (1 << 2)
+#define MCS_HT40_A_NO (0 << 2)
+#define MCS_HT40_G_YES (1 << 3)
+#define MCS_HT40_G_NO (0 << 3)
+
+typedef PREPACK struct {
+ A_UINT16 countryCode;
+ A_UINT16 regDmnEnum;
+ A_CHAR isoName[3];
+ A_CHAR allowMode; /* what mode is allowed - bit 0: OFDM; bit 1: MCS_HT20; bit 2: MCS_HT40_A; bit 3: MCS_HT40_G */
+} POSTPACK COUNTRY_CODE_TO_ENUM_RD;
+
+/* lower 16 bits of ht40ChanMask */
+#define NO_FREQ_HT40 0x0 /* no freq is HT40 capable */
+#define F1_TO_F4_HT40 0xF /* freq 1 to 4 in the block is ht40 capable */
+#define F2_TO_F3_HT40 0x6 /* freq 2 to 3 in the block is ht40 capable */
+#define F1_TO_F10_HT40 0x3FF /* freq 1 to 10 in the block is ht40 capable */
+#define F3_TO_F11_HT40 0x7FC /* freq 3 to 11 in the block is ht40 capable */
+#define F3_TO_F9_HT40 0x1FC /* freq 3 to 9 in the block is ht40 capable */
+#define F1_TO_F8_HT40 0xFF /* freq 1 to 8 in the block is ht40 capable */
+#define F1_TO_F4_F9_TO_F10_HT40 0x30F /* freq 1 to 4, 9 to 10 in the block is ht40 capable */
+
+/* upper 16 bits of ht40ChanMask */
+#define FREQ_HALF_RATE 0x10000
+#define FREQ_QUARTER_RATE 0x20000
+
+typedef PREPACK struct RegDmnFreqBand {
+ A_UINT16 lowChannel; /* Low channel center in MHz */
+ A_UINT16 highChannel; /* High Channel center in MHz */
+ A_UINT8 power; /* Max power (dBm) for channel range */
+ A_UINT8 channelSep; /* Channel separation within the band */
+ A_UINT8 useDfs; /* Use DFS in the RegDomain if corresponding bit is set */
+ A_UINT8 mode; /* Mode of operation */
+ A_UINT32 usePassScan; /* Use Passive Scan in the RegDomain if corresponding bit is set */
+ A_UINT32 ht40ChanMask; /* lower 16 bits: indicate which frequencies in the block is HT40 capable
+ upper 16 bits: what rate (half/quarter) the channel is */
+} POSTPACK REG_DMN_FREQ_BAND;
+
+
+
+typedef PREPACK struct regDomain {
+ A_UINT16 regDmnEnum; /* value from EnumRd table */
+ A_UINT8 rdCTL;
+ A_UINT8 maxAntGain;
+ A_UINT8 dfsMask; /* DFS bitmask for 5Ghz tables */
+ A_UINT8 flags; /* Requirement flags (AdHoc disallow etc) */
+ A_UINT16 reserved; /* for alignment */
+ A_UINT32 pscan; /* Bitmask for passive scan */
+ A_UINT32 chan11a[BMLEN]; /* 64 bit bitmask for channel/band selection */
+ A_UINT32 chan11bg[BMLEN];/* 64 bit bitmask for channel/band selection */
+} POSTPACK REG_DOMAIN;
+
+#endif /* __REG_DBSCHEMA_H__ */
diff --git a/drivers/staging/ath6kl/include/common/regulatory/reg_dbvalues.h b/drivers/staging/ath6kl/include/common/regulatory/reg_dbvalues.h
new file mode 100644
index 000000000000..278f90346b5a
--- /dev/null
+++ b/drivers/staging/ath6kl/include/common/regulatory/reg_dbvalues.h
@@ -0,0 +1,504 @@
+//------------------------------------------------------------------------------
+// Copyright (c) 2005-2010 Atheros Corporation. All rights reserved.
+//
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+//
+//
+//------------------------------------------------------------------------------
+//==============================================================================
+// Author(s): ="Atheros"
+//==============================================================================
+
+
+#ifndef __REG_DBVALUE_H__
+#define __REG_DBVALUE_H__
+
+/*
+ * Numbering from ISO 3166
+ */
+enum CountryCode {
+ CTRY_ALBANIA = 8, /* Albania */
+ CTRY_ALGERIA = 12, /* Algeria */
+ CTRY_ARGENTINA = 32, /* Argentina */
+ CTRY_ARMENIA = 51, /* Armenia */
+ CTRY_ARUBA = 533, /* Aruba */
+ CTRY_AUSTRALIA = 36, /* Australia (for STA) */
+ CTRY_AUSTRALIA_AP = 5000, /* Australia (for AP) */
+ CTRY_AUSTRIA = 40, /* Austria */
+ CTRY_AZERBAIJAN = 31, /* Azerbaijan */
+ CTRY_BAHRAIN = 48, /* Bahrain */
+ CTRY_BANGLADESH = 50, /* Bangladesh */
+ CTRY_BARBADOS = 52, /* Barbados */
+ CTRY_BELARUS = 112, /* Belarus */
+ CTRY_BELGIUM = 56, /* Belgium */
+ CTRY_BELIZE = 84, /* Belize */
+ CTRY_BOLIVIA = 68, /* Bolivia */
+ CTRY_BOSNIA_HERZEGOWANIA = 70, /* Bosnia & Herzegowania */
+ CTRY_BRAZIL = 76, /* Brazil */
+ CTRY_BRUNEI_DARUSSALAM = 96, /* Brunei Darussalam */
+ CTRY_BULGARIA = 100, /* Bulgaria */
+ CTRY_CAMBODIA = 116, /* Cambodia */
+ CTRY_CANADA = 124, /* Canada (for STA) */
+ CTRY_CANADA_AP = 5001, /* Canada (for AP) */
+ CTRY_CHILE = 152, /* Chile */
+ CTRY_CHINA = 156, /* People's Republic of China */
+ CTRY_COLOMBIA = 170, /* Colombia */
+ CTRY_COSTA_RICA = 188, /* Costa Rica */
+ CTRY_CROATIA = 191, /* Croatia */
+ CTRY_CYPRUS = 196,
+ CTRY_CZECH = 203, /* Czech Republic */
+ CTRY_DENMARK = 208, /* Denmark */
+ CTRY_DOMINICAN_REPUBLIC = 214, /* Dominican Republic */
+ CTRY_ECUADOR = 218, /* Ecuador */
+ CTRY_EGYPT = 818, /* Egypt */
+ CTRY_EL_SALVADOR = 222, /* El Salvador */
+ CTRY_ESTONIA = 233, /* Estonia */
+ CTRY_FAEROE_ISLANDS = 234, /* Faeroe Islands */
+ CTRY_FINLAND = 246, /* Finland */
+ CTRY_FRANCE = 250, /* France */
+ CTRY_FRANCE2 = 255, /* France2 */
+ CTRY_GEORGIA = 268, /* Georgia */
+ CTRY_GERMANY = 276, /* Germany */
+ CTRY_GREECE = 300, /* Greece */
+ CTRY_GREENLAND = 304, /* Greenland */
+ CTRY_GRENADA = 308, /* Grenada */
+ CTRY_GUAM = 316, /* Guam */
+ CTRY_GUATEMALA = 320, /* Guatemala */
+ CTRY_HAITI = 332, /* Haiti */
+ CTRY_HONDURAS = 340, /* Honduras */
+ CTRY_HONG_KONG = 344, /* Hong Kong S.A.R., P.R.C. */
+ CTRY_HUNGARY = 348, /* Hungary */
+ CTRY_ICELAND = 352, /* Iceland */
+ CTRY_INDIA = 356, /* India */
+ CTRY_INDONESIA = 360, /* Indonesia */
+ CTRY_IRAN = 364, /* Iran */
+ CTRY_IRAQ = 368, /* Iraq */
+ CTRY_IRELAND = 372, /* Ireland */
+ CTRY_ISRAEL = 376, /* Israel */
+ CTRY_ITALY = 380, /* Italy */
+ CTRY_JAMAICA = 388, /* Jamaica */
+ CTRY_JAPAN = 392, /* Japan */
+ CTRY_JAPAN1 = 393, /* Japan (JP1) */
+ CTRY_JAPAN2 = 394, /* Japan (JP0) */
+ CTRY_JAPAN3 = 395, /* Japan (JP1-1) */
+ CTRY_JAPAN4 = 396, /* Japan (JE1) */
+ CTRY_JAPAN5 = 397, /* Japan (JE2) */
+ CTRY_JAPAN6 = 399, /* Japan (JP6) */
+ CTRY_JORDAN = 400, /* Jordan */
+ CTRY_KAZAKHSTAN = 398, /* Kazakhstan */
+ CTRY_KENYA = 404, /* Kenya */
+ CTRY_KOREA_NORTH = 408, /* North Korea */
+ CTRY_KOREA_ROC = 410, /* South Korea (for STA) */
+ CTRY_KOREA_ROC2 = 411, /* South Korea */
+ CTRY_KOREA_ROC3 = 412, /* South Korea (for AP) */
+ CTRY_KUWAIT = 414, /* Kuwait */
+ CTRY_LATVIA = 428, /* Latvia */
+ CTRY_LEBANON = 422, /* Lebanon */
+ CTRY_LIBYA = 434, /* Libya */
+ CTRY_LIECHTENSTEIN = 438, /* Liechtenstein */
+ CTRY_LITHUANIA = 440, /* Lithuania */
+ CTRY_LUXEMBOURG = 442, /* Luxembourg */
+ CTRY_MACAU = 446, /* Macau */
+ CTRY_MACEDONIA = 807, /* the Former Yugoslav Republic of Macedonia */
+ CTRY_MALAYSIA = 458, /* Malaysia */
+ CTRY_MALTA = 470, /* Malta */
+ CTRY_MEXICO = 484, /* Mexico */
+ CTRY_MONACO = 492, /* Principality of Monaco */
+ CTRY_MOROCCO = 504, /* Morocco */
+ CTRY_NEPAL = 524, /* Nepal */
+ CTRY_NETHERLANDS = 528, /* Netherlands */
+ CTRY_NETHERLAND_ANTILLES = 530, /* Netherlands-Antilles */
+ CTRY_NEW_ZEALAND = 554, /* New Zealand */
+ CTRY_NICARAGUA = 558, /* Nicaragua */
+ CTRY_NORWAY = 578, /* Norway */
+ CTRY_OMAN = 512, /* Oman */
+ CTRY_PAKISTAN = 586, /* Islamic Republic of Pakistan */
+ CTRY_PANAMA = 591, /* Panama */
+ CTRY_PARAGUAY = 600, /* Paraguay */
+ CTRY_PERU = 604, /* Peru */
+ CTRY_PHILIPPINES = 608, /* Republic of the Philippines */
+ CTRY_POLAND = 616, /* Poland */
+ CTRY_PORTUGAL = 620, /* Portugal */
+ CTRY_PUERTO_RICO = 630, /* Puerto Rico */
+ CTRY_QATAR = 634, /* Qatar */
+ CTRY_ROMANIA = 642, /* Romania */
+ CTRY_RUSSIA = 643, /* Russia */
+ CTRY_SAUDI_ARABIA = 682, /* Saudi Arabia */
+ CTRY_MONTENEGRO = 891, /* Montenegro */
+ CTRY_SINGAPORE = 702, /* Singapore */
+ CTRY_SLOVAKIA = 703, /* Slovak Republic */
+ CTRY_SLOVENIA = 705, /* Slovenia */
+ CTRY_SOUTH_AFRICA = 710, /* South Africa */
+ CTRY_SPAIN = 724, /* Spain */
+ CTRY_SRILANKA = 144, /* Sri Lanka */
+ CTRY_SWEDEN = 752, /* Sweden */
+ CTRY_SWITZERLAND = 756, /* Switzerland */
+ CTRY_SYRIA = 760, /* Syria */
+ CTRY_TAIWAN = 158, /* Taiwan */
+ CTRY_THAILAND = 764, /* Thailand */
+ CTRY_TRINIDAD_Y_TOBAGO = 780, /* Trinidad y Tobago */
+ CTRY_TUNISIA = 788, /* Tunisia */
+ CTRY_TURKEY = 792, /* Turkey */
+ CTRY_UAE = 784, /* U.A.E. */
+ CTRY_UKRAINE = 804, /* Ukraine */
+ CTRY_UNITED_KINGDOM = 826, /* United Kingdom */
+ CTRY_UNITED_STATES = 840, /* United States (for STA) */
+ CTRY_UNITED_STATES_AP = 841, /* United States (for AP) */
+ CTRY_UNITED_STATES_PS = 842, /* United States - public safety */
+ CTRY_URUGUAY = 858, /* Uruguay */
+ CTRY_UZBEKISTAN = 860, /* Uzbekistan */
+ CTRY_VENEZUELA = 862, /* Venezuela */
+ CTRY_VIET_NAM = 704, /* Viet Nam */
+ CTRY_YEMEN = 887, /* Yemen */
+ CTRY_ZIMBABWE = 716 /* Zimbabwe */
+};
+
+#define CTRY_DEBUG 0
+#define CTRY_DEFAULT 0x1ff
+
+/*
+ * The following regulatory domain definitions are
+ * found in the EEPROM. Each regulatory domain
+ * can operate in either a 5GHz or 2.4GHz wireless mode or
+ * both 5GHz and 2.4GHz wireless modes.
+ * In general, the value holds no special
+ * meaning and is used to decode into either specific
+ * 2.4GHz or 5GHz wireless mode for that particular
+ * regulatory domain.
+ *
+ * Enumerated Regulatory Domain Information 8 bit values indicate that
+ * the regdomain is really a pair of unitary regdomains. 12 bit values
+ * are the real unitary regdomains and are the only ones which have the
+ * frequency bitmasks and flags set.
+ */
+
+enum EnumRd {
+ NO_ENUMRD = 0x00,
+ NULL1_WORLD = 0x03, /* For 11b-only countries (no 11a allowed) */
+ NULL1_ETSIB = 0x07, /* Israel */
+ NULL1_ETSIC = 0x08,
+
+ FCC1_FCCA = 0x10, /* USA */
+ FCC1_WORLD = 0x11, /* Hong Kong */
+ FCC2_FCCA = 0x20, /* Canada */
+ FCC2_WORLD = 0x21, /* Australia & HK */
+ FCC2_ETSIC = 0x22,
+ FCC3_FCCA = 0x3A, /* USA & Canada w/5470 band, 11h, DFS enabled */
+ FCC3_WORLD = 0x3B, /* USA & Canada w/5470 band, 11h, DFS enabled */
+ FCC4_FCCA = 0x12, /* FCC public safety plus UNII bands */
+ FCC5_FCCA = 0x13, /* US with no DFS */
+ FCC5_WORLD = 0x16, /* US with no DFS */
+ FCC6_FCCA = 0x14, /* Same as FCC2_FCCA but with 5600-5650MHz channels disabled for US & Canada APs */
+ FCC6_WORLD = 0x23, /* Same as FCC2_FCCA but with 5600-5650MHz channels disabled for Australia APs */
+
+ ETSI1_WORLD = 0x37,
+
+ ETSI2_WORLD = 0x35, /* Hungary & others */
+ ETSI3_WORLD = 0x36, /* France & others */
+ ETSI4_WORLD = 0x30,
+ ETSI4_ETSIC = 0x38,
+ ETSI5_WORLD = 0x39,
+ ETSI6_WORLD = 0x34, /* Bulgaria */
+ ETSI_RESERVED = 0x33, /* Reserved (Do not used) */
+ FRANCE_RES = 0x31, /* Legacy France for OEM */
+
+ APL6_WORLD = 0x5B, /* Singapore */
+ APL4_WORLD = 0x42, /* Singapore */
+ APL3_FCCA = 0x50,
+ APL_RESERVED = 0x44, /* Reserved (Do not used) */
+ APL2_WORLD = 0x45, /* Korea */
+ APL2_APLC = 0x46,
+ APL3_WORLD = 0x47,
+ APL2_APLD = 0x49, /* Korea with 2.3G channels */
+ APL2_FCCA = 0x4D, /* Specific Mobile Customer */
+ APL1_WORLD = 0x52, /* Latin America */
+ APL1_FCCA = 0x53,
+ APL1_ETSIC = 0x55,
+ APL2_ETSIC = 0x56, /* Venezuela */
+ APL5_WORLD = 0x58, /* Chile */
+ APL7_FCCA = 0x5C,
+ APL8_WORLD = 0x5D,
+ APL9_WORLD = 0x5E,
+ APL10_WORLD = 0x5F, /* Korea 5GHz for STA */
+
+
+ MKK5_MKKA = 0x99, /* This is a temporary value. MG and DQ have to give official one */
+ MKK5_FCCA = 0x9A, /* This is a temporary value. MG and DQ have to give official one */
+ MKK5_MKKC = 0x88,
+ MKK11_MKKA = 0xD4,
+ MKK11_FCCA = 0xD5,
+ MKK11_MKKC = 0xD7,
+
+ /*
+ * World mode SKUs
+ */
+ WOR0_WORLD = 0x60, /* World0 (WO0 SKU) */
+ WOR1_WORLD = 0x61, /* World1 (WO1 SKU) */
+ WOR2_WORLD = 0x62, /* World2 (WO2 SKU) */
+ WOR3_WORLD = 0x63, /* World3 (WO3 SKU) */
+ WOR4_WORLD = 0x64, /* World4 (WO4 SKU) */
+ WOR5_ETSIC = 0x65, /* World5 (WO5 SKU) */
+
+ WOR01_WORLD = 0x66, /* World0-1 (WW0-1 SKU) */
+ WOR02_WORLD = 0x67, /* World0-2 (WW0-2 SKU) */
+ EU1_WORLD = 0x68, /* Same as World0-2 (WW0-2 SKU), except active scan ch1-13. No ch14 */
+
+ WOR9_WORLD = 0x69, /* World9 (WO9 SKU) */
+ WORA_WORLD = 0x6A, /* WorldA (WOA SKU) */
+ WORB_WORLD = 0x6B, /* WorldB (WOA SKU) */
+ WORC_WORLD = 0x6C, /* WorldC (WOA SKU) */
+
+ /*
+ * Regulator domains ending in a number (e.g. APL1,
+ * MK1, ETSI4, etc) apply to 5GHz channel and power
+ * information. Regulator domains ending in a letter
+ * (e.g. APLA, FCCA, etc) apply to 2.4GHz channel and
+ * power information.
+ */
+ APL1 = 0x0150, /* LAT & Asia */
+ APL2 = 0x0250, /* LAT & Asia */
+ APL3 = 0x0350, /* Taiwan */
+ APL4 = 0x0450, /* Jordan */
+ APL5 = 0x0550, /* Chile */
+ APL6 = 0x0650, /* Singapore */
+ APL7 = 0x0750, /* Taiwan */
+ APL8 = 0x0850, /* Malaysia */
+ APL9 = 0x0950, /* Korea */
+ APL10 = 0x1050, /* Korea 5GHz */
+
+ ETSI1 = 0x0130, /* Europe & others */
+ ETSI2 = 0x0230, /* Europe & others */
+ ETSI3 = 0x0330, /* Europe & others */
+ ETSI4 = 0x0430, /* Europe & others */
+ ETSI5 = 0x0530, /* Europe & others */
+ ETSI6 = 0x0630, /* Europe & others */
+ ETSIB = 0x0B30, /* Israel */
+ ETSIC = 0x0C30, /* Latin America */
+
+ FCC1 = 0x0110, /* US & others */
+ FCC2 = 0x0120, /* Canada, Australia & New Zealand */
+ FCC3 = 0x0160, /* US w/new middle band & DFS */
+ FCC4 = 0x0165,
+ FCC5 = 0x0180,
+ FCC6 = 0x0610,
+ FCCA = 0x0A10,
+
+ APLD = 0x0D50, /* South Korea */
+
+ MKK1 = 0x0140, /* Japan */
+ MKK2 = 0x0240, /* Japan Extended */
+ MKK3 = 0x0340, /* Japan new 5GHz */
+ MKK4 = 0x0440, /* Japan new 5GHz */
+ MKK5 = 0x0540, /* Japan new 5GHz */
+ MKK6 = 0x0640, /* Japan new 5GHz */
+ MKK7 = 0x0740, /* Japan new 5GHz */
+ MKK8 = 0x0840, /* Japan new 5GHz */
+ MKK9 = 0x0940, /* Japan new 5GHz */
+ MKK10 = 0x1040, /* Japan new 5GHz */
+ MKK11 = 0x1140, /* Japan new 5GHz */
+ MKK12 = 0x1240, /* Japan new 5GHz */
+
+ MKKA = 0x0A40, /* Japan */
+ MKKC = 0x0A50,
+
+ NULL1 = 0x0198,
+ WORLD = 0x0199,
+ DEBUG_REG_DMN = 0x01ff,
+ UNINIT_REG_DMN = 0x0fff,
+};
+
+enum { /* conformance test limits */
+ FCC = 0x10,
+ MKK = 0x40,
+ ETSI = 0x30,
+ NO_CTL = 0xff,
+ CTL_11B = 1,
+ CTL_11G = 2
+};
+
+
+/*
+ * The following are flags for different requirements per reg domain.
+ * These requirements are either inhereted from the reg domain pair or
+ * from the unitary reg domain if the reg domain pair flags value is
+ * 0
+ */
+
+enum {
+ NO_REQ = 0x00,
+ DISALLOW_ADHOC_11A = 0x01,
+ ADHOC_PER_11D = 0x02,
+ ADHOC_NO_11A = 0x04,
+ DISALLOW_ADHOC_11G = 0x08
+};
+
+
+
+
+/*
+ * The following describe the bit masks for different passive scan
+ * capability/requirements per regdomain.
+ */
+#define NO_PSCAN 0x00000000
+#define PSCAN_FCC 0x00000001
+#define PSCAN_ETSI 0x00000002
+#define PSCAN_MKK 0x00000004
+#define PSCAN_ETSIB 0x00000008
+#define PSCAN_ETSIC 0x00000010
+#define PSCAN_WWR 0x00000020
+#define PSCAN_DEFER 0xFFFFFFFF
+
+/* Bit masks for DFS per regdomain */
+
+enum {
+ NO_DFS = 0x00,
+ DFS_FCC3 = 0x01,
+ DFS_ETSI = 0x02,
+ DFS_MKK = 0x04
+};
+
+
+#define DEF_REGDMN FCC1_FCCA
+
+/*
+ * The following table is the master list for all different freqeuncy
+ * bands with the complete matrix of all possible flags and settings
+ * for each band if it is used in ANY reg domain.
+ *
+ * The table of frequency bands is indexed by a bitmask. The ordering
+ * must be consistent with the enum below. When adding a new
+ * frequency band, be sure to match the location in the enum with the
+ * comments
+ */
+
+/*
+ * These frequency values are as per channel tags and regulatory domain
+ * info. Please update them as database is updated.
+ */
+#define A_FREQ_MIN 4920
+#define A_FREQ_MAX 5825
+
+#define A_CHAN0_FREQ 5000
+#define A_CHAN_MAX ((A_FREQ_MAX - A_CHAN0_FREQ)/5)
+
+#define BG_FREQ_MIN 2412
+#define BG_FREQ_MAX 2484
+
+#define BG_CHAN0_FREQ 2407
+#define BG_CHAN_MIN ((BG_FREQ_MIN - BG_CHAN0_FREQ)/5)
+#define BG_CHAN_MAX 14 /* corresponding to 2484 MHz */
+
+#define A_20MHZ_BAND_FREQ_MAX 5000
+
+
+/*
+ * 5GHz 11A channel tags
+ */
+
+enum {
+ F1_4920_4980,
+ F1_5040_5080,
+
+ F1_5120_5240,
+
+ F1_5180_5240,
+ F2_5180_5240,
+ F3_5180_5240,
+ F4_5180_5240,
+ F5_5180_5240,
+ F6_5180_5240,
+ F7_5180_5240,
+
+ F1_5260_5280,
+
+ F1_5260_5320,
+ F2_5260_5320,
+ F3_5260_5320,
+ F4_5260_5320,
+ F5_5260_5320,
+ F6_5260_5320,
+
+ F1_5260_5700,
+
+ F1_5280_5320,
+
+ F1_5500_5620,
+
+ F1_5500_5700,
+ F2_5500_5700,
+ F3_5500_5700,
+ F4_5500_5700,
+ F5_5500_5700,
+ F6_5500_5700,
+ F7_5500_5700,
+
+ F1_5745_5805,
+ F2_5745_5805,
+
+ F1_5745_5825,
+ F2_5745_5825,
+ F3_5745_5825,
+ F4_5745_5825,
+ F5_5745_5825,
+ F6_5745_5825,
+
+ W1_4920_4980,
+ W1_5040_5080,
+ W1_5170_5230,
+ W1_5180_5240,
+ W1_5260_5320,
+ W1_5745_5825,
+ W1_5500_5700,
+};
+
+
+/* 2.4 GHz table - for 11b and 11g info */
+enum {
+ BG1_2312_2372,
+ BG2_2312_2372,
+
+ BG1_2412_2472,
+ BG2_2412_2472,
+ BG3_2412_2472,
+ BG4_2412_2472,
+
+ BG1_2412_2462,
+ BG2_2412_2462,
+
+ BG1_2432_2442,
+
+ BG1_2457_2472,
+
+ BG1_2467_2472,
+
+ BG1_2484_2484, /* No G */
+ BG2_2484_2484, /* No G */
+
+ BG1_2512_2732,
+
+ WBG1_2312_2372,
+ WBG1_2412_2412,
+ WBG1_2417_2432,
+ WBG1_2437_2442,
+ WBG1_2447_2457,
+ WBG1_2462_2462,
+ WBG1_2467_2467,
+ WBG2_2467_2467,
+ WBG1_2472_2472,
+ WBG2_2472_2472,
+ WBG1_2484_2484, /* No G */
+ WBG2_2484_2484, /* No G */
+};
+
+#endif /* __REG_DBVALUE_H__ */
diff --git a/drivers/staging/ath6kl/include/common/roaming.h b/drivers/staging/ath6kl/include/common/roaming.h
new file mode 100644
index 000000000000..8019850a0571
--- /dev/null
+++ b/drivers/staging/ath6kl/include/common/roaming.h
@@ -0,0 +1,41 @@
+//------------------------------------------------------------------------------
+// <copyright file="roaming.h" company="Atheros">
+// Copyright (c) 2004-2010 Atheros Corporation. All rights reserved.
+//
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+//
+//
+//------------------------------------------------------------------------------
+//==============================================================================
+// Author(s): ="Atheros"
+//==============================================================================
+
+#ifndef _ROAMING_H_
+#define _ROAMING_H_
+
+/*
+ * The signal quality could be in terms of either snr or rssi. We should
+ * have an enum for both of them. For the time being, we are going to move
+ * it to wmi.h that is shared by both host and the target, since we are
+ * repartitioning the code to the host
+ */
+#define SIGNAL_QUALITY_NOISE_FLOOR -96
+#define SIGNAL_QUALITY_METRICS_NUM_MAX 2
+typedef enum {
+ SIGNAL_QUALITY_METRICS_SNR = 0,
+ SIGNAL_QUALITY_METRICS_RSSI,
+ SIGNAL_QUALITY_METRICS_ALL,
+} SIGNAL_QUALITY_METRICS_TYPE;
+
+#endif /* _ROAMING_H_ */
diff --git a/drivers/staging/ath6kl/include/common/targaddrs.h b/drivers/staging/ath6kl/include/common/targaddrs.h
new file mode 100644
index 000000000000..e8cf70354d21
--- /dev/null
+++ b/drivers/staging/ath6kl/include/common/targaddrs.h
@@ -0,0 +1,245 @@
+//------------------------------------------------------------------------------
+// Copyright (c) 2010 Atheros Corporation. All rights reserved.
+//
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+//
+//
+//
+// Author(s): ="Atheros"
+//------------------------------------------------------------------------------
+
+#ifndef __TARGADDRS_H__
+#define __TARGADDRS_H__
+
+#ifndef ATH_TARGET
+#include "athstartpack.h"
+#endif
+
+#if defined(AR6002)
+#include "AR6002/addrs.h"
+#endif
+
+/*
+ * AR6K option bits, to enable/disable various features.
+ * By default, all option bits are 0.
+ * These bits can be set in LOCAL_SCRATCH register 0.
+ */
+#define AR6K_OPTION_BMI_DISABLE 0x01 /* Disable BMI comm with Host */
+#define AR6K_OPTION_SERIAL_ENABLE 0x02 /* Enable serial port msgs */
+#define AR6K_OPTION_WDT_DISABLE 0x04 /* WatchDog Timer override */
+#define AR6K_OPTION_SLEEP_DISABLE 0x08 /* Disable system sleep */
+#define AR6K_OPTION_STOP_BOOT 0x10 /* Stop boot processes (for ATE) */
+#define AR6K_OPTION_ENABLE_NOANI 0x20 /* Operate without ANI */
+#define AR6K_OPTION_DSET_DISABLE 0x40 /* Ignore DataSets */
+#define AR6K_OPTION_IGNORE_FLASH 0x80 /* Ignore flash during bootup */
+
+/*
+ * xxx_HOST_INTEREST_ADDRESS is the address in Target RAM of the
+ * host_interest structure. It must match the address of the _host_interest
+ * symbol (see linker script).
+ *
+ * Host Interest is shared between Host and Target in order to coordinate
+ * between the two, and is intended to remain constant (with additions only
+ * at the end) across software releases.
+ *
+ * All addresses are available here so that it's possible to
+ * write a single binary that works with all Target Types.
+ * May be used in assembler code as well as C.
+ */
+#define AR6002_HOST_INTEREST_ADDRESS 0x00500400
+#define AR6003_HOST_INTEREST_ADDRESS 0x00540600
+
+
+#define HOST_INTEREST_MAX_SIZE 0x100
+
+#if !defined(__ASSEMBLER__)
+struct register_dump_s;
+struct dbglog_hdr_s;
+
+/*
+ * These are items that the Host may need to access
+ * via BMI or via the Diagnostic Window. The position
+ * of items in this structure must remain constant
+ * across firmware revisions!
+ *
+ * Types for each item must be fixed size across
+ * target and host platforms.
+ *
+ * More items may be added at the end.
+ */
+PREPACK struct host_interest_s {
+ /*
+ * Pointer to application-defined area, if any.
+ * Set by Target application during startup.
+ */
+ A_UINT32 hi_app_host_interest; /* 0x00 */
+
+ /* Pointer to register dump area, valid after Target crash. */
+ A_UINT32 hi_failure_state; /* 0x04 */
+
+ /* Pointer to debug logging header */
+ A_UINT32 hi_dbglog_hdr; /* 0x08 */
+
+ /* Indicates whether or not flash is present on Target.
+ * NB: flash_is_present indicator is here not just
+ * because it might be of interest to the Host; but
+ * also because it's set early on by Target's startup
+ * asm code and we need it to have a special RAM address
+ * so that it doesn't get reinitialized with the rest
+ * of data.
+ */
+ A_UINT32 hi_flash_is_present; /* 0x0c */
+
+ /*
+ * General-purpose flag bits, similar to AR6000_OPTION_* flags.
+ * Can be used by application rather than by OS.
+ */
+ A_UINT32 hi_option_flag; /* 0x10 */
+
+ /*
+ * Boolean that determines whether or not to
+ * display messages on the serial port.
+ */
+ A_UINT32 hi_serial_enable; /* 0x14 */
+
+ /* Start address of Flash DataSet index, if any */
+ A_UINT32 hi_dset_list_head; /* 0x18 */
+
+ /* Override Target application start address */
+ A_UINT32 hi_app_start; /* 0x1c */
+
+ /* Clock and voltage tuning */
+ A_UINT32 hi_skip_clock_init; /* 0x20 */
+ A_UINT32 hi_core_clock_setting; /* 0x24 */
+ A_UINT32 hi_cpu_clock_setting; /* 0x28 */
+ A_UINT32 hi_system_sleep_setting; /* 0x2c */
+ A_UINT32 hi_xtal_control_setting; /* 0x30 */
+ A_UINT32 hi_pll_ctrl_setting_24ghz; /* 0x34 */
+ A_UINT32 hi_pll_ctrl_setting_5ghz; /* 0x38 */
+ A_UINT32 hi_ref_voltage_trim_setting; /* 0x3c */
+ A_UINT32 hi_clock_info; /* 0x40 */
+
+ /*
+ * Flash configuration overrides, used only
+ * when firmware is not executing from flash.
+ * (When using flash, modify the global variables
+ * with equivalent names.)
+ */
+ A_UINT32 hi_bank0_addr_value; /* 0x44 */
+ A_UINT32 hi_bank0_read_value; /* 0x48 */
+ A_UINT32 hi_bank0_write_value; /* 0x4c */
+ A_UINT32 hi_bank0_config_value; /* 0x50 */
+
+ /* Pointer to Board Data */
+ A_UINT32 hi_board_data; /* 0x54 */
+ A_UINT32 hi_board_data_initialized; /* 0x58 */
+
+ A_UINT32 hi_dset_RAM_index_table; /* 0x5c */
+
+ A_UINT32 hi_desired_baud_rate; /* 0x60 */
+ A_UINT32 hi_dbglog_config; /* 0x64 */
+ A_UINT32 hi_end_RAM_reserve_sz; /* 0x68 */
+ A_UINT32 hi_mbox_io_block_sz; /* 0x6c */
+
+ A_UINT32 hi_num_bpatch_streams; /* 0x70 -- unused */
+ A_UINT32 hi_mbox_isr_yield_limit; /* 0x74 */
+
+ A_UINT32 hi_refclk_hz; /* 0x78 */
+ A_UINT32 hi_ext_clk_detected; /* 0x7c */
+ A_UINT32 hi_dbg_uart_txpin; /* 0x80 */
+ A_UINT32 hi_dbg_uart_rxpin; /* 0x84 */
+ A_UINT32 hi_hci_uart_baud; /* 0x88 */
+ A_UINT32 hi_hci_uart_pin_assignments; /* 0x8C */
+ /* NOTE: byte [0] = tx pin, [1] = rx pin, [2] = rts pin, [3] = cts pin */
+ A_UINT32 hi_hci_uart_baud_scale_val; /* 0x90 */
+ A_UINT32 hi_hci_uart_baud_step_val; /* 0x94 */
+
+ A_UINT32 hi_allocram_start; /* 0x98 */
+ A_UINT32 hi_allocram_sz; /* 0x9c */
+ A_UINT32 hi_hci_bridge_flags; /* 0xa0 */
+ A_UINT32 hi_hci_uart_support_pins; /* 0xa4 */
+ /* NOTE: byte [0] = RESET pin (bit 7 is polarity), bytes[1]..bytes[3] are for future use */
+ A_UINT32 hi_hci_uart_pwr_mgmt_params; /* 0xa8 */
+ /* 0xa8 - [0]: 1 = enable, 0 = disable
+ * [1]: 0 = UART FC active low, 1 = UART FC active high
+ * 0xa9 - [7:0]: wakeup timeout in ms
+ * 0xaa, 0xab - [15:0]: idle timeout in ms
+ */
+ /* Pointer to extended board Data */
+ A_UINT32 hi_board_ext_data; /* 0xac */
+ A_UINT32 hi_board_ext_data_initialized; /* 0xb0 */
+} POSTPACK;
+
+/* Bits defined in hi_option_flag */
+#define HI_OPTION_TIMER_WAR 0x01 /* Enable timer workaround */
+#define HI_OPTION_BMI_CRED_LIMIT 0x02 /* Limit BMI command credits */
+#define HI_OPTION_RELAY_DOT11_HDR 0x04 /* Relay Dot11 hdr to/from host */
+#define HI_OPTION_FW_MODE_LSB 0x08 /* low bit of MODE (see below) */
+#define HI_OPTION_FW_MODE_MSB 0x10 /* high bit of MODE (see below) */
+#define HI_OPTION_ENABLE_PROFILE 0x20 /* Enable CPU profiling */
+#define HI_OPTION_DISABLE_DBGLOG 0x40 /* Disable debug logging */
+#define HI_OPTION_SKIP_ERA_TRACKING 0x80 /* Skip Era Tracking */
+#define HI_OPTION_PAPRD_DISABLE 0x100 /* Disable PAPRD (debug) */
+
+/* 2 bits of hi_option_flag are used to represent 3 modes */
+#define HI_OPTION_FW_MODE_IBSS 0x0 /* IBSS Mode */
+#define HI_OPTION_FW_MODE_BSS_STA 0x1 /* STA Mode */
+#define HI_OPTION_FW_MODE_AP 0x2 /* AP Mode */
+
+/* Fw Mode Mask */
+#define HI_OPTION_FW_MODE_MASK 0x3
+#define HI_OPTION_FW_MODE_SHIFT 0x3
+
+/*
+ * Intended for use by Host software, this macro returns the Target RAM
+ * address of any item in the host_interest structure.
+ * Example: target_addr = AR6002_HOST_INTEREST_ITEM_ADDRESS(hi_board_data);
+ */
+#define AR6002_HOST_INTEREST_ITEM_ADDRESS(item) \
+ (A_UINT32)((unsigned long)&((((struct host_interest_s *)(AR6002_HOST_INTEREST_ADDRESS))->item)))
+
+#define AR6003_HOST_INTEREST_ITEM_ADDRESS(item) \
+ (A_UINT32)((unsigned long)&((((struct host_interest_s *)(AR6003_HOST_INTEREST_ADDRESS))->item)))
+
+#define HOST_INTEREST_DBGLOG_IS_ENABLED() \
+ (!(HOST_INTEREST->hi_option_flag & HI_OPTION_DISABLE_DBGLOG))
+
+#define HOST_INTEREST_PROFILE_IS_ENABLED() \
+ (HOST_INTEREST->hi_option_flag & HI_OPTION_ENABLE_PROFILE)
+
+/* Convert a Target virtual address into a Target physical address */
+#define AR6002_VTOP(vaddr) ((vaddr) & 0x001fffff)
+#define AR6003_VTOP(vaddr) ((vaddr) & 0x001fffff)
+#define TARG_VTOP(TargetType, vaddr) \
+ (((TargetType) == TARGET_TYPE_AR6002) ? AR6002_VTOP(vaddr) : AR6003_VTOP(vaddr))
+
+/* override REV2 ROM's app start address */
+#define AR6002_REV2_APP_START_OVERRIDE 0x911A00
+#define AR6003_REV1_APP_START_OVERRIDE 0x944c00
+#define AR6003_REV1_OTP_DATA_ADDRESS 0x542800
+#define AR6003_REV2_APP_START_OVERRIDE 0x945000
+#define AR6003_REV2_OTP_DATA_ADDRESS 0x543800
+#define AR6003_BOARD_EXT_DATA_ADDRESS 0x57E600
+
+
+/* # of A_UINT32 entries in targregs, used by DIAG_FETCH_TARG_REGS */
+#define AR6003_FETCH_TARG_REGS_COUNT 64
+
+#endif /* !__ASSEMBLER__ */
+
+#ifndef ATH_TARGET
+#include "athendpack.h"
+#endif
+
+#endif /* __TARGADDRS_H__ */
diff --git a/drivers/staging/ath6kl/include/common/testcmd.h b/drivers/staging/ath6kl/include/common/testcmd.h
new file mode 100644
index 000000000000..d6616f0fab7d
--- /dev/null
+++ b/drivers/staging/ath6kl/include/common/testcmd.h
@@ -0,0 +1,185 @@
+//------------------------------------------------------------------------------
+// <copyright file="testcmd.h" company="Atheros">
+// Copyright (c) 2004-2010 Atheros Corporation. All rights reserved.
+//
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+//
+//
+//------------------------------------------------------------------------------
+//==============================================================================
+// Author(s): ="Atheros"
+//==============================================================================
+
+#ifndef TESTCMD_H_
+#define TESTCMD_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#ifdef AR6002_REV2
+#define TCMD_MAX_RATES 12
+#else
+#define TCMD_MAX_RATES 28
+#endif
+
+typedef enum {
+ ZEROES_PATTERN = 0,
+ ONES_PATTERN,
+ REPEATING_10,
+ PN7_PATTERN,
+ PN9_PATTERN,
+ PN15_PATTERN
+}TX_DATA_PATTERN;
+
+/* Continous tx
+ mode : TCMD_CONT_TX_OFF - Disabling continous tx
+ TCMD_CONT_TX_SINE - Enable continuous unmodulated tx
+ TCMD_CONT_TX_FRAME- Enable continuous modulated tx
+ freq : Channel freq in Mhz. (e.g 2412 for channel 1 in 11 g)
+dataRate: 0 - 1 Mbps
+ 1 - 2 Mbps
+ 2 - 5.5 Mbps
+ 3 - 11 Mbps
+ 4 - 6 Mbps
+ 5 - 9 Mbps
+ 6 - 12 Mbps
+ 7 - 18 Mbps
+ 8 - 24 Mbps
+ 9 - 36 Mbps
+ 10 - 28 Mbps
+ 11 - 54 Mbps
+ txPwr: Tx power in dBm[5 -11] for unmod Tx, [5-14] for mod Tx
+antenna: 1 - one antenna
+ 2 - two antenna
+Note : Enable/disable continuous tx test cmd works only when target is awake.
+*/
+
+typedef enum {
+ TCMD_CONT_TX_OFF = 0,
+ TCMD_CONT_TX_SINE,
+ TCMD_CONT_TX_FRAME,
+ TCMD_CONT_TX_TX99,
+ TCMD_CONT_TX_TX100
+} TCMD_CONT_TX_MODE;
+
+typedef enum {
+ TCMD_WLAN_MODE_NOHT = 0,
+ TCMD_WLAN_MODE_HT20 = 1,
+ TCMD_WLAN_MODE_HT40PLUS = 2,
+ TCMD_WLAN_MODE_HT40MINUS = 3,
+} TCMD_WLAN_MODE;
+
+typedef PREPACK struct {
+ A_UINT32 testCmdId;
+ A_UINT32 mode;
+ A_UINT32 freq;
+ A_UINT32 dataRate;
+ A_INT32 txPwr;
+ A_UINT32 antenna;
+ A_UINT32 enANI;
+ A_UINT32 scramblerOff;
+ A_UINT32 aifsn;
+ A_UINT16 pktSz;
+ A_UINT16 txPattern;
+ A_UINT32 shortGuard;
+ A_UINT32 numPackets;
+ A_UINT32 wlanMode;
+} POSTPACK TCMD_CONT_TX;
+
+#define TCMD_TXPATTERN_ZERONE 0x1
+#define TCMD_TXPATTERN_ZERONE_DIS_SCRAMBLE 0x2
+
+/* Continuous Rx
+ act: TCMD_CONT_RX_PROMIS - promiscuous mode (accept all incoming frames)
+ TCMD_CONT_RX_FILTER - filter mode (accept only frames with dest
+ address equal specified
+ mac address (set via act =3)
+ TCMD_CONT_RX_REPORT off mode (disable cont rx mode and get the
+ report from the last cont
+ Rx test)
+
+ TCMD_CONT_RX_SETMAC - set MacAddr mode (sets the MAC address for the
+ target. This Overrides
+ the default MAC address.)
+
+*/
+typedef enum {
+ TCMD_CONT_RX_PROMIS =0,
+ TCMD_CONT_RX_FILTER,
+ TCMD_CONT_RX_REPORT,
+ TCMD_CONT_RX_SETMAC,
+ TCMD_CONT_RX_SET_ANT_SWITCH_TABLE
+} TCMD_CONT_RX_ACT;
+
+typedef PREPACK struct {
+ A_UINT32 testCmdId;
+ A_UINT32 act;
+ A_UINT32 enANI;
+ PREPACK union {
+ struct PREPACK TCMD_CONT_RX_PARA {
+ A_UINT32 freq;
+ A_UINT32 antenna;
+ A_UINT32 wlanMode;
+ } POSTPACK para;
+ struct PREPACK TCMD_CONT_RX_REPORT {
+ A_UINT32 totalPkt;
+ A_INT32 rssiInDBm;
+ A_UINT32 crcErrPkt;
+ A_UINT32 secErrPkt;
+ A_UINT16 rateCnt[TCMD_MAX_RATES];
+ A_UINT16 rateCntShortGuard[TCMD_MAX_RATES];
+ } POSTPACK report;
+ struct PREPACK TCMD_CONT_RX_MAC {
+ A_UCHAR addr[ATH_MAC_LEN];
+ } POSTPACK mac;
+ struct PREPACK TCMD_CONT_RX_ANT_SWITCH_TABLE {
+ A_UINT32 antswitch1;
+ A_UINT32 antswitch2;
+ }POSTPACK antswitchtable;
+ } POSTPACK u;
+} POSTPACK TCMD_CONT_RX;
+
+/* Force sleep/wake test cmd
+ mode: TCMD_PM_WAKEUP - Wakeup the target
+ TCMD_PM_SLEEP - Force the target to sleep.
+ */
+typedef enum {
+ TCMD_PM_WAKEUP = 1, /* be consistent with target */
+ TCMD_PM_SLEEP,
+ TCMD_PM_DEEPSLEEP
+} TCMD_PM_MODE;
+
+typedef PREPACK struct {
+ A_UINT32 testCmdId;
+ A_UINT32 mode;
+} POSTPACK TCMD_PM;
+
+typedef enum {
+ TCMD_CONT_TX_ID,
+ TCMD_CONT_RX_ID,
+ TCMD_PM_ID
+} TCMD_ID;
+
+typedef PREPACK union {
+ TCMD_CONT_TX contTx;
+ TCMD_CONT_RX contRx;
+ TCMD_PM pm;
+} POSTPACK TEST_CMD;
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* TESTCMD_H_ */
diff --git a/drivers/staging/ath6kl/include/common/tlpm.h b/drivers/staging/ath6kl/include/common/tlpm.h
new file mode 100644
index 000000000000..659b1c07ba90
--- /dev/null
+++ b/drivers/staging/ath6kl/include/common/tlpm.h
@@ -0,0 +1,38 @@
+//------------------------------------------------------------------------------
+// Copyright (c) 2010 Atheros Corporation. All rights reserved.
+//
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+//
+//
+//------------------------------------------------------------------------------
+//==============================================================================
+// Author(s): ="Atheros"
+//==============================================================================
+
+#ifndef __TLPM_H__
+#define __TLPM_H__
+
+/* idle timeout in 16-bit value as in HOST_INTEREST hi_hci_uart_pwr_mgmt_params */
+#define TLPM_DEFAULT_IDLE_TIMEOUT_MS 1000
+/* hex in LSB and MSB for HCI command */
+#define TLPM_DEFAULT_IDLE_TIMEOUT_LSB 0xE8
+#define TLPM_DEFAULT_IDLE_TIMEOUT_MSB 0x3
+
+/* wakeup timeout in 8-bit value as in HOST_INTEREST hi_hci_uart_pwr_mgmt_params */
+#define TLPM_DEFAULT_WAKEUP_TIMEOUT_MS 10
+
+/* default UART FC polarity is low */
+#define TLPM_DEFAULT_UART_FC_POLARITY 0
+
+#endif
diff --git a/drivers/staging/ath6kl/include/common/wlan_defs.h b/drivers/staging/ath6kl/include/common/wlan_defs.h
new file mode 100644
index 000000000000..03e4d23788ce
--- /dev/null
+++ b/drivers/staging/ath6kl/include/common/wlan_defs.h
@@ -0,0 +1,79 @@
+//------------------------------------------------------------------------------
+// <copyright file="wlan_defs.h" company="Atheros">
+// Copyright (c) 2004-2010 Atheros Corporation. All rights reserved.
+//
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+//
+//
+//------------------------------------------------------------------------------
+//==============================================================================
+// Author(s): ="Atheros"
+//==============================================================================
+#ifndef __WLAN_DEFS_H__
+#define __WLAN_DEFS_H__
+
+/*
+ * This file contains WLAN definitions that may be used across both
+ * Host and Target software.
+ */
+
+typedef enum {
+ MODE_11A = 0, /* 11a Mode */
+ MODE_11G = 1, /* 11b/g Mode */
+ MODE_11B = 2, /* 11b Mode */
+ MODE_11GONLY = 3, /* 11g only Mode */
+#ifdef SUPPORT_11N
+ MODE_11NA_HT20 = 4, /* 11a HT20 mode */
+ MODE_11NG_HT20 = 5, /* 11g HT20 mode */
+ MODE_11NA_HT40 = 6, /* 11a HT40 mode */
+ MODE_11NG_HT40 = 7, /* 11g HT40 mode */
+ MODE_UNKNOWN = 8,
+ MODE_MAX = 8
+#else
+ MODE_UNKNOWN = 4,
+ MODE_MAX = 4
+#endif
+} WLAN_PHY_MODE;
+
+typedef enum {
+ WLAN_11A_CAPABILITY = 1,
+ WLAN_11G_CAPABILITY = 2,
+ WLAN_11AG_CAPABILITY = 3,
+}WLAN_CAPABILITY;
+
+#ifdef SUPPORT_11N
+typedef unsigned long A_RATEMASK;
+#else
+typedef unsigned short A_RATEMASK;
+#endif
+
+#ifdef SUPPORT_11N
+#define IS_MODE_11A(mode) (((mode) == MODE_11A) || \
+ ((mode) == MODE_11NA_HT20) || \
+ ((mode) == MODE_11NA_HT40))
+#define IS_MODE_11B(mode) ((mode) == MODE_11B)
+#define IS_MODE_11G(mode) (((mode) == MODE_11G) || \
+ ((mode) == MODE_11GONLY) || \
+ ((mode) == MODE_11NG_HT20) || \
+ ((mode) == MODE_11NG_HT40))
+#define IS_MODE_11GONLY(mode) ((mode) == MODE_11GONLY)
+#else
+#define IS_MODE_11A(mode) ((mode) == MODE_11A)
+#define IS_MODE_11B(mode) ((mode) == MODE_11B)
+#define IS_MODE_11G(mode) (((mode) == MODE_11G) || \
+ ((mode) == MODE_11GONLY))
+#define IS_MODE_11GONLY(mode) ((mode) == MODE_11GONLY)
+#endif /* SUPPORT_11N */
+
+#endif /* __WLANDEFS_H__ */
diff --git a/drivers/staging/ath6kl/include/common/wlan_dset.h b/drivers/staging/ath6kl/include/common/wlan_dset.h
new file mode 100644
index 000000000000..864a60cedf10
--- /dev/null
+++ b/drivers/staging/ath6kl/include/common/wlan_dset.h
@@ -0,0 +1,33 @@
+//------------------------------------------------------------------------------
+// Copyright (c) 2007-2010 Atheros Corporation. All rights reserved.
+//
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+//
+//
+//------------------------------------------------------------------------------
+//==============================================================================
+// Author(s): ="Atheros"
+//==============================================================================
+
+#ifndef __WLAN_DSET_H__
+#define __WLAN_DSET_H__
+
+typedef PREPACK struct wow_config_dset {
+
+ A_UINT8 valid_dset;
+ A_UINT8 gpio_enable;
+ A_UINT16 gpio_pin;
+} POSTPACK WOW_CONFIG_DSET;
+
+#endif
diff --git a/drivers/staging/ath6kl/include/common/wmi.h b/drivers/staging/ath6kl/include/common/wmi.h
new file mode 100644
index 000000000000..c75d310c37a7
--- /dev/null
+++ b/drivers/staging/ath6kl/include/common/wmi.h
@@ -0,0 +1,3119 @@
+//------------------------------------------------------------------------------
+// Copyright (c) 2004-2010 Atheros Corporation. All rights reserved.
+//
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+//
+//
+//
+// Author(s): ="Atheros"
+//------------------------------------------------------------------------------
+
+/*
+ * This file contains the definitions of the WMI protocol specified in the
+ * Wireless Module Interface (WMI). It includes definitions of all the
+ * commands and events. Commands are messages from the host to the WM.
+ * Events and Replies are messages from the WM to the host.
+ *
+ * Ownership of correctness in regards to commands
+ * belongs to the host driver and the WMI is not required to validate
+ * parameters for value, proper range, or any other checking.
+ *
+ */
+
+#ifndef _WMI_H_
+#define _WMI_H_
+
+#ifndef ATH_TARGET
+#include "athstartpack.h"
+#endif
+
+#include "wmix.h"
+#include "wlan_defs.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#define HTC_PROTOCOL_VERSION 0x0002
+#define HTC_PROTOCOL_REVISION 0x0000
+
+#define WMI_PROTOCOL_VERSION 0x0002
+#define WMI_PROTOCOL_REVISION 0x0000
+
+#define ATH_MAC_LEN 6 /* length of mac in bytes */
+#define WMI_CMD_MAX_LEN 100
+#define WMI_CONTROL_MSG_MAX_LEN 256
+#define WMI_OPT_CONTROL_MSG_MAX_LEN 1536
+#define IS_ETHERTYPE(_typeOrLen) ((_typeOrLen) >= 0x0600)
+#define RFC1042OUI {0x00, 0x00, 0x00}
+
+#define IP_ETHERTYPE 0x0800
+
+#define WMI_IMPLICIT_PSTREAM 0xFF
+#define WMI_MAX_THINSTREAM 15
+
+#ifdef AR6002_REV2
+#define IBSS_MAX_NUM_STA 4
+#else
+#define IBSS_MAX_NUM_STA 8
+#endif
+
+PREPACK struct host_app_area_s {
+ A_UINT32 wmi_protocol_ver;
+} POSTPACK;
+
+/*
+ * Data Path
+ */
+typedef PREPACK struct {
+ A_UINT8 dstMac[ATH_MAC_LEN];
+ A_UINT8 srcMac[ATH_MAC_LEN];
+ A_UINT16 typeOrLen;
+} POSTPACK ATH_MAC_HDR;
+
+typedef PREPACK struct {
+ A_UINT8 dsap;
+ A_UINT8 ssap;
+ A_UINT8 cntl;
+ A_UINT8 orgCode[3];
+ A_UINT16 etherType;
+} POSTPACK ATH_LLC_SNAP_HDR;
+
+typedef enum {
+ DATA_MSGTYPE = 0x0,
+ CNTL_MSGTYPE,
+ SYNC_MSGTYPE,
+ OPT_MSGTYPE,
+} WMI_MSG_TYPE;
+
+
+/*
+ * Macros for operating on WMI_DATA_HDR (info) field
+ */
+
+#define WMI_DATA_HDR_MSG_TYPE_MASK 0x03
+#define WMI_DATA_HDR_MSG_TYPE_SHIFT 0
+#define WMI_DATA_HDR_UP_MASK 0x07
+#define WMI_DATA_HDR_UP_SHIFT 2
+/* In AP mode, the same bit (b5) is used to indicate Power save state in
+ * the Rx dir and More data bit state in the tx direction.
+ */
+#define WMI_DATA_HDR_PS_MASK 0x1
+#define WMI_DATA_HDR_PS_SHIFT 5
+
+#define WMI_DATA_HDR_MORE_MASK 0x1
+#define WMI_DATA_HDR_MORE_SHIFT 5
+
+typedef enum {
+ WMI_DATA_HDR_DATA_TYPE_802_3 = 0,
+ WMI_DATA_HDR_DATA_TYPE_802_11,
+ WMI_DATA_HDR_DATA_TYPE_ACL,
+} WMI_DATA_HDR_DATA_TYPE;
+
+#define WMI_DATA_HDR_DATA_TYPE_MASK 0x3
+#define WMI_DATA_HDR_DATA_TYPE_SHIFT 6
+
+#define WMI_DATA_HDR_SET_MORE_BIT(h) ((h)->info |= (WMI_DATA_HDR_MORE_MASK << WMI_DATA_HDR_MORE_SHIFT))
+
+#define WMI_DATA_HDR_IS_MSG_TYPE(h, t) (((h)->info & (WMI_DATA_HDR_MSG_TYPE_MASK)) == (t))
+#define WMI_DATA_HDR_SET_MSG_TYPE(h, t) (h)->info = (((h)->info & ~(WMI_DATA_HDR_MSG_TYPE_MASK << WMI_DATA_HDR_MSG_TYPE_SHIFT)) | (t << WMI_DATA_HDR_MSG_TYPE_SHIFT))
+#define WMI_DATA_HDR_GET_UP(h) (((h)->info >> WMI_DATA_HDR_UP_SHIFT) & WMI_DATA_HDR_UP_MASK)
+#define WMI_DATA_HDR_SET_UP(h, p) (h)->info = (((h)->info & ~(WMI_DATA_HDR_UP_MASK << WMI_DATA_HDR_UP_SHIFT)) | (p << WMI_DATA_HDR_UP_SHIFT))
+
+#define WMI_DATA_HDR_GET_DATA_TYPE(h) (((h)->info >> WMI_DATA_HDR_DATA_TYPE_SHIFT) & WMI_DATA_HDR_DATA_TYPE_MASK)
+#define WMI_DATA_HDR_SET_DATA_TYPE(h, p) (h)->info = (((h)->info & ~(WMI_DATA_HDR_DATA_TYPE_MASK << WMI_DATA_HDR_DATA_TYPE_SHIFT)) | ((p) << WMI_DATA_HDR_DATA_TYPE_SHIFT))
+
+#define WMI_DATA_HDR_GET_DOT11(h) (WMI_DATA_HDR_GET_DATA_TYPE((h)) == WMI_DATA_HDR_DATA_TYPE_802_11)
+#define WMI_DATA_HDR_SET_DOT11(h, p) WMI_DATA_HDR_SET_DATA_TYPE((h), (p))
+
+/* Macros for operating on WMI_DATA_HDR (info2) field */
+#define WMI_DATA_HDR_SEQNO_MASK 0xFFF
+#define WMI_DATA_HDR_SEQNO_SHIFT 0
+
+#define WMI_DATA_HDR_AMSDU_MASK 0x1
+#define WMI_DATA_HDR_AMSDU_SHIFT 12
+
+#define WMI_DATA_HDR_META_MASK 0x7
+#define WMI_DATA_HDR_META_SHIFT 13
+
+#define GET_SEQ_NO(_v) ((_v) & WMI_DATA_HDR_SEQNO_MASK)
+#define GET_ISMSDU(_v) ((_v) & WMI_DATA_HDR_AMSDU_MASK)
+
+#define WMI_DATA_HDR_GET_SEQNO(h) GET_SEQ_NO((h)->info2 >> WMI_DATA_HDR_SEQNO_SHIFT)
+#define WMI_DATA_HDR_SET_SEQNO(h, _v) ((h)->info2 = ((h)->info2 & ~(WMI_DATA_HDR_SEQNO_MASK << WMI_DATA_HDR_SEQNO_SHIFT)) | (GET_SEQ_NO(_v) << WMI_DATA_HDR_SEQNO_SHIFT))
+
+#define WMI_DATA_HDR_IS_AMSDU(h) GET_ISMSDU((h)->info2 >> WMI_DATA_HDR_AMSDU_SHIFT)
+#define WMI_DATA_HDR_SET_AMSDU(h, _v) ((h)->info2 = ((h)->info2 & ~(WMI_DATA_HDR_AMSDU_MASK << WMI_DATA_HDR_AMSDU_SHIFT)) | (GET_ISMSDU(_v) << WMI_DATA_HDR_AMSDU_SHIFT))
+
+#define WMI_DATA_HDR_GET_META(h) (((h)->info2 >> WMI_DATA_HDR_META_SHIFT) & WMI_DATA_HDR_META_MASK)
+#define WMI_DATA_HDR_SET_META(h, _v) ((h)->info2 = ((h)->info2 & ~(WMI_DATA_HDR_META_MASK << WMI_DATA_HDR_META_SHIFT)) | ((_v) << WMI_DATA_HDR_META_SHIFT))
+
+typedef PREPACK struct {
+ A_INT8 rssi;
+ A_UINT8 info; /* usage of 'info' field(8-bit):
+ * b1:b0 - WMI_MSG_TYPE
+ * b4:b3:b2 - UP(tid)
+ * b5 - Used in AP mode. More-data in tx dir, PS in rx.
+ * b7:b6 - Dot3 header(0),
+ * Dot11 Header(1),
+ * ACL data(2)
+ */
+
+ A_UINT16 info2; /* usage of 'info2' field(16-bit):
+ * b11:b0 - seq_no
+ * b12 - A-MSDU?
+ * b15:b13 - META_DATA_VERSION 0 - 7
+ */
+ A_UINT16 reserved;
+} POSTPACK WMI_DATA_HDR;
+
+/*
+ * TX META VERSION DEFINITIONS
+ */
+#define WMI_MAX_TX_META_SZ (12)
+#define WMI_MAX_TX_META_VERSION (7)
+#define WMI_META_VERSION_1 (0x01)
+#define WMI_META_VERSION_2 (0X02)
+
+#define WMI_ACL_TO_DOT11_HEADROOM 36
+
+#if 0 /* removed to prevent compile errors for WM.. */
+typedef PREPACK struct {
+/* intentionally empty. Default version is no meta data. */
+} POSTPACK WMI_TX_META_V0;
+#endif
+
+typedef PREPACK struct {
+ A_UINT8 pktID; /* The packet ID to identify the tx request */
+ A_UINT8 ratePolicyID; /* The rate policy to be used for the tx of this frame */
+} POSTPACK WMI_TX_META_V1;
+
+
+#define WMI_CSUM_DIR_TX (0x1)
+#define TX_CSUM_CALC_FILL (0x1)
+typedef PREPACK struct {
+ A_UINT8 csumStart; /*Offset from start of the WMI header for csum calculation to begin */
+ A_UINT8 csumDest; /*Offset from start of WMI header where final csum goes*/
+ A_UINT8 csumFlags; /*number of bytes over which csum is calculated*/
+} POSTPACK WMI_TX_META_V2;
+
+
+/*
+ * RX META VERSION DEFINITIONS
+ */
+/* if RX meta data is present at all then the meta data field
+ * will consume WMI_MAX_RX_META_SZ bytes of space between the
+ * WMI_DATA_HDR and the payload. How much of the available
+ * Meta data is actually used depends on which meta data
+ * version is active. */
+#define WMI_MAX_RX_META_SZ (12)
+#define WMI_MAX_RX_META_VERSION (7)
+
+#define WMI_RX_STATUS_OK 0 /* success */
+#define WMI_RX_STATUS_DECRYPT_ERR 1 /* decrypt error */
+#define WMI_RX_STATUS_MIC_ERR 2 /* tkip MIC error */
+#define WMI_RX_STATUS_ERR 3 /* undefined error */
+
+#define WMI_RX_FLAGS_AGGR 0x0001 /* part of AGGR */
+#define WMI_RX_FlAGS_STBC 0x0002 /* used STBC */
+#define WMI_RX_FLAGS_SGI 0x0004 /* used SGI */
+#define WMI_RX_FLAGS_HT 0x0008 /* is HT packet */
+/* the flags field is also used to store the CRYPTO_TYPE of the frame
+ * that value is shifted by WMI_RX_FLAGS_CRYPTO_SHIFT */
+#define WMI_RX_FLAGS_CRYPTO_SHIFT 4
+#define WMI_RX_FLAGS_CRYPTO_MASK 0x1f
+#define WMI_RX_META_GET_CRYPTO(flags) (((flags) >> WMI_RX_FLAGS_CRYPTO_SHIFT) & WMI_RX_FLAGS_CRYPTO_MASK)
+
+#if 0 /* removed to prevent compile errors for WM.. */
+typedef PREPACK struct {
+/* intentionally empty. Default version is no meta data. */
+} POSTPACK WMI_RX_META_VERSION_0;
+#endif
+
+typedef PREPACK struct {
+ A_UINT8 status; /* one of WMI_RX_STATUS_... */
+ A_UINT8 rix; /* rate index mapped to rate at which this packet was received. */
+ A_UINT8 rssi; /* rssi of packet */
+ A_UINT8 channel;/* rf channel during packet reception */
+ A_UINT16 flags; /* a combination of WMI_RX_FLAGS_... */
+} POSTPACK WMI_RX_META_V1;
+
+#define RX_CSUM_VALID_FLAG (0x1)
+typedef PREPACK struct {
+ A_UINT16 csum;
+ A_UINT8 csumFlags;/* bit 0 set -partial csum valid
+ bit 1 set -test mode */
+} POSTPACK WMI_RX_META_V2;
+
+
+
+#define WMI_GET_DEVICE_ID(info1) ((info1) & 0xF)
+
+/*
+ * Control Path
+ */
+typedef PREPACK struct {
+ A_UINT16 commandId;
+/*
+ * info1 - 16 bits
+ * b03:b00 - id
+ * b15:b04 - unused
+ */
+ A_UINT16 info1;
+
+ A_UINT16 reserved; /* For alignment */
+} POSTPACK WMI_CMD_HDR; /* used for commands and events */
+
+/*
+ * List of Commnands
+ */
+typedef enum {
+ WMI_CONNECT_CMDID = 0x0001,
+ WMI_RECONNECT_CMDID,
+ WMI_DISCONNECT_CMDID,
+ WMI_SYNCHRONIZE_CMDID,
+ WMI_CREATE_PSTREAM_CMDID,
+ WMI_DELETE_PSTREAM_CMDID,
+ WMI_START_SCAN_CMDID,
+ WMI_SET_SCAN_PARAMS_CMDID,
+ WMI_SET_BSS_FILTER_CMDID,
+ WMI_SET_PROBED_SSID_CMDID, /* 10 */
+ WMI_SET_LISTEN_INT_CMDID,
+ WMI_SET_BMISS_TIME_CMDID,
+ WMI_SET_DISC_TIMEOUT_CMDID,
+ WMI_GET_CHANNEL_LIST_CMDID,
+ WMI_SET_BEACON_INT_CMDID,
+ WMI_GET_STATISTICS_CMDID,
+ WMI_SET_CHANNEL_PARAMS_CMDID,
+ WMI_SET_POWER_MODE_CMDID,
+ WMI_SET_IBSS_PM_CAPS_CMDID,
+ WMI_SET_POWER_PARAMS_CMDID, /* 20 */
+ WMI_SET_POWERSAVE_TIMERS_POLICY_CMDID,
+ WMI_ADD_CIPHER_KEY_CMDID,
+ WMI_DELETE_CIPHER_KEY_CMDID,
+ WMI_ADD_KRK_CMDID,
+ WMI_DELETE_KRK_CMDID,
+ WMI_SET_PMKID_CMDID,
+ WMI_SET_TX_PWR_CMDID,
+ WMI_GET_TX_PWR_CMDID,
+ WMI_SET_ASSOC_INFO_CMDID,
+ WMI_ADD_BAD_AP_CMDID, /* 30 */
+ WMI_DELETE_BAD_AP_CMDID,
+ WMI_SET_TKIP_COUNTERMEASURES_CMDID,
+ WMI_RSSI_THRESHOLD_PARAMS_CMDID,
+ WMI_TARGET_ERROR_REPORT_BITMASK_CMDID,
+ WMI_SET_ACCESS_PARAMS_CMDID,
+ WMI_SET_RETRY_LIMITS_CMDID,
+ WMI_SET_OPT_MODE_CMDID,
+ WMI_OPT_TX_FRAME_CMDID,
+ WMI_SET_VOICE_PKT_SIZE_CMDID,
+ WMI_SET_MAX_SP_LEN_CMDID, /* 40 */
+ WMI_SET_ROAM_CTRL_CMDID,
+ WMI_GET_ROAM_TBL_CMDID,
+ WMI_GET_ROAM_DATA_CMDID,
+ WMI_ENABLE_RM_CMDID,
+ WMI_SET_MAX_OFFHOME_DURATION_CMDID,
+ WMI_EXTENSION_CMDID, /* Non-wireless extensions */
+ WMI_SNR_THRESHOLD_PARAMS_CMDID,
+ WMI_LQ_THRESHOLD_PARAMS_CMDID,
+ WMI_SET_LPREAMBLE_CMDID,
+ WMI_SET_RTS_CMDID, /* 50 */
+ WMI_CLR_RSSI_SNR_CMDID,
+ WMI_SET_FIXRATES_CMDID,
+ WMI_GET_FIXRATES_CMDID,
+ WMI_SET_AUTH_MODE_CMDID,
+ WMI_SET_REASSOC_MODE_CMDID,
+ WMI_SET_WMM_CMDID,
+ WMI_SET_WMM_TXOP_CMDID,
+ WMI_TEST_CMDID,
+ /* COEX AR6002 only*/
+ WMI_SET_BT_STATUS_CMDID,
+ WMI_SET_BT_PARAMS_CMDID, /* 60 */
+
+ WMI_SET_KEEPALIVE_CMDID,
+ WMI_GET_KEEPALIVE_CMDID,
+ WMI_SET_APPIE_CMDID,
+ WMI_GET_APPIE_CMDID,
+ WMI_SET_WSC_STATUS_CMDID,
+
+ /* Wake on Wireless */
+ WMI_SET_HOST_SLEEP_MODE_CMDID,
+ WMI_SET_WOW_MODE_CMDID,
+ WMI_GET_WOW_LIST_CMDID,
+ WMI_ADD_WOW_PATTERN_CMDID,
+ WMI_DEL_WOW_PATTERN_CMDID, /* 70 */
+
+ WMI_SET_FRAMERATES_CMDID,
+ WMI_SET_AP_PS_CMDID,
+ WMI_SET_QOS_SUPP_CMDID,
+ /* WMI_THIN_RESERVED_... mark the start and end
+ * values for WMI_THIN_RESERVED command IDs. These
+ * command IDs can be found in wmi_thin.h */
+ WMI_THIN_RESERVED_START = 0x8000,
+ WMI_THIN_RESERVED_END = 0x8fff,
+ /*
+ * Developer commands starts at 0xF000
+ */
+ WMI_SET_BITRATE_CMDID = 0xF000,
+ WMI_GET_BITRATE_CMDID,
+ WMI_SET_WHALPARAM_CMDID,
+
+
+ /*Should add the new command to the tail for compatible with
+ * etna.
+ */
+ WMI_SET_MAC_ADDRESS_CMDID,
+ WMI_SET_AKMP_PARAMS_CMDID,
+ WMI_SET_PMKID_LIST_CMDID,
+ WMI_GET_PMKID_LIST_CMDID,
+ WMI_ABORT_SCAN_CMDID,
+ WMI_SET_TARGET_EVENT_REPORT_CMDID,
+
+ // Unused
+ WMI_UNUSED1,
+ WMI_UNUSED2,
+
+ /*
+ * AP mode commands
+ */
+ WMI_AP_HIDDEN_SSID_CMDID,
+ WMI_AP_SET_NUM_STA_CMDID,
+ WMI_AP_ACL_POLICY_CMDID,
+ WMI_AP_ACL_MAC_LIST_CMDID,
+ WMI_AP_CONFIG_COMMIT_CMDID,
+ WMI_AP_SET_MLME_CMDID,
+ WMI_AP_SET_PVB_CMDID,
+ WMI_AP_CONN_INACT_CMDID,
+ WMI_AP_PROT_SCAN_TIME_CMDID,
+ WMI_AP_SET_COUNTRY_CMDID,
+ WMI_AP_SET_DTIM_CMDID,
+ WMI_AP_MODE_STAT_CMDID,
+
+ WMI_SET_IP_CMDID,
+ WMI_SET_PARAMS_CMDID,
+ WMI_SET_MCAST_FILTER_CMDID,
+ WMI_DEL_MCAST_FILTER_CMDID,
+
+ WMI_ALLOW_AGGR_CMDID,
+ WMI_ADDBA_REQ_CMDID,
+ WMI_DELBA_REQ_CMDID,
+ WMI_SET_HT_CAP_CMDID,
+ WMI_SET_HT_OP_CMDID,
+ WMI_SET_TX_SELECT_RATES_CMDID,
+ WMI_SET_TX_SGI_PARAM_CMDID,
+ WMI_SET_RATE_POLICY_CMDID,
+
+ WMI_HCI_CMD_CMDID,
+ WMI_RX_FRAME_FORMAT_CMDID,
+ WMI_SET_THIN_MODE_CMDID,
+ WMI_SET_BT_WLAN_CONN_PRECEDENCE_CMDID,
+
+ WMI_AP_SET_11BG_RATESET_CMDID,
+ WMI_SET_PMK_CMDID,
+ WMI_MCAST_FILTER_CMDID,
+ /* COEX CMDID AR6003*/
+ WMI_SET_BTCOEX_FE_ANT_CMDID,
+ WMI_SET_BTCOEX_COLOCATED_BT_DEV_CMDID,
+ WMI_SET_BTCOEX_SCO_CONFIG_CMDID,
+ WMI_SET_BTCOEX_A2DP_CONFIG_CMDID,
+ WMI_SET_BTCOEX_ACLCOEX_CONFIG_CMDID,
+ WMI_SET_BTCOEX_BTINQUIRY_PAGE_CONFIG_CMDID,
+ WMI_SET_BTCOEX_DEBUG_CMDID,
+ WMI_SET_BTCOEX_BT_OPERATING_STATUS_CMDID,
+ WMI_GET_BTCOEX_STATS_CMDID,
+ WMI_GET_BTCOEX_CONFIG_CMDID,
+} WMI_COMMAND_ID;
+
+/*
+ * Frame Types
+ */
+typedef enum {
+ WMI_FRAME_BEACON = 0,
+ WMI_FRAME_PROBE_REQ,
+ WMI_FRAME_PROBE_RESP,
+ WMI_FRAME_ASSOC_REQ,
+ WMI_FRAME_ASSOC_RESP,
+ WMI_NUM_MGMT_FRAME
+} WMI_MGMT_FRAME_TYPE;
+
+/*
+ * Connect Command
+ */
+typedef enum {
+ INFRA_NETWORK = 0x01,
+ ADHOC_NETWORK = 0x02,
+ ADHOC_CREATOR = 0x04,
+ AP_NETWORK = 0x10,
+} NETWORK_TYPE;
+
+typedef enum {
+ OPEN_AUTH = 0x01,
+ SHARED_AUTH = 0x02,
+ LEAP_AUTH = 0x04, /* different from IEEE_AUTH_MODE definitions */
+} DOT11_AUTH_MODE;
+
+typedef enum {
+ NONE_AUTH = 0x01,
+ WPA_AUTH = 0x02,
+ WPA2_AUTH = 0x04,
+ WPA_PSK_AUTH = 0x08,
+ WPA2_PSK_AUTH = 0x10,
+ WPA_AUTH_CCKM = 0x20,
+ WPA2_AUTH_CCKM = 0x40,
+} AUTH_MODE;
+
+typedef enum {
+ NONE_CRYPT = 0x01,
+ WEP_CRYPT = 0x02,
+ TKIP_CRYPT = 0x04,
+ AES_CRYPT = 0x08,
+#ifdef WAPI_ENABLE
+ WAPI_CRYPT = 0x10,
+#endif /*WAPI_ENABLE*/
+} CRYPTO_TYPE;
+
+#define WMI_MIN_CRYPTO_TYPE NONE_CRYPT
+#define WMI_MAX_CRYPTO_TYPE (AES_CRYPT + 1)
+
+#ifdef WAPI_ENABLE
+#undef WMI_MAX_CRYPTO_TYPE
+#define WMI_MAX_CRYPTO_TYPE (WAPI_CRYPT + 1)
+#endif /* WAPI_ENABLE */
+
+#ifdef WAPI_ENABLE
+#define IW_ENCODE_ALG_SM4 0x20
+#define IW_AUTH_WAPI_ENABLED 0x20
+#endif
+
+#define WMI_MIN_KEY_INDEX 0
+#define WMI_MAX_KEY_INDEX 3
+
+#ifdef WAPI_ENABLE
+#undef WMI_MAX_KEY_INDEX
+#define WMI_MAX_KEY_INDEX 7 /* wapi grpKey 0-3, prwKey 4-7 */
+#endif /* WAPI_ENABLE */
+
+#define WMI_MAX_KEY_LEN 32
+
+#define WMI_MAX_SSID_LEN 32
+
+typedef enum {
+ CONNECT_ASSOC_POLICY_USER = 0x0001,
+ CONNECT_SEND_REASSOC = 0x0002,
+ CONNECT_IGNORE_WPAx_GROUP_CIPHER = 0x0004,
+ CONNECT_PROFILE_MATCH_DONE = 0x0008,
+ CONNECT_IGNORE_AAC_BEACON = 0x0010,
+ CONNECT_CSA_FOLLOW_BSS = 0x0020,
+ CONNECT_DO_WPA_OFFLOAD = 0x0040,
+ CONNECT_DO_NOT_DEAUTH = 0x0080,
+} WMI_CONNECT_CTRL_FLAGS_BITS;
+
+#define DEFAULT_CONNECT_CTRL_FLAGS (CONNECT_CSA_FOLLOW_BSS)
+
+typedef PREPACK struct {
+ A_UINT8 networkType;
+ A_UINT8 dot11AuthMode;
+ A_UINT8 authMode;
+ A_UINT8 pairwiseCryptoType;
+ A_UINT8 pairwiseCryptoLen;
+ A_UINT8 groupCryptoType;
+ A_UINT8 groupCryptoLen;
+ A_UINT8 ssidLength;
+ A_UCHAR ssid[WMI_MAX_SSID_LEN];
+ A_UINT16 channel;
+ A_UINT8 bssid[ATH_MAC_LEN];
+ A_UINT32 ctrl_flags;
+} POSTPACK WMI_CONNECT_CMD;
+
+/*
+ * WMI_RECONNECT_CMDID
+ */
+typedef PREPACK struct {
+ A_UINT16 channel; /* hint */
+ A_UINT8 bssid[ATH_MAC_LEN]; /* mandatory if set */
+} POSTPACK WMI_RECONNECT_CMD;
+
+#define WMI_PMK_LEN 32
+typedef PREPACK struct {
+ A_UINT8 pmk[WMI_PMK_LEN];
+} POSTPACK WMI_SET_PMK_CMD;
+
+/*
+ * WMI_ADD_CIPHER_KEY_CMDID
+ */
+typedef enum {
+ PAIRWISE_USAGE = 0x00,
+ GROUP_USAGE = 0x01,
+ TX_USAGE = 0x02, /* default Tx Key - Static WEP only */
+} KEY_USAGE;
+
+/*
+ * Bit Flag
+ * Bit 0 - Initialise TSC - default is Initialize
+ */
+#define KEY_OP_INIT_TSC 0x01
+#define KEY_OP_INIT_RSC 0x02
+#ifdef WAPI_ENABLE
+#define KEY_OP_INIT_WAPIPN 0x10
+#endif /* WAPI_ENABLE */
+
+#define KEY_OP_INIT_VAL 0x03 /* Default Initialise the TSC & RSC */
+#define KEY_OP_VALID_MASK 0x03
+
+typedef PREPACK struct {
+ A_UINT8 keyIndex;
+ A_UINT8 keyType;
+ A_UINT8 keyUsage; /* KEY_USAGE */
+ A_UINT8 keyLength;
+ A_UINT8 keyRSC[8]; /* key replay sequence counter */
+ A_UINT8 key[WMI_MAX_KEY_LEN];
+ A_UINT8 key_op_ctrl; /* Additional Key Control information */
+ A_UINT8 key_macaddr[ATH_MAC_LEN];
+} POSTPACK WMI_ADD_CIPHER_KEY_CMD;
+
+/*
+ * WMI_DELETE_CIPHER_KEY_CMDID
+ */
+typedef PREPACK struct {
+ A_UINT8 keyIndex;
+} POSTPACK WMI_DELETE_CIPHER_KEY_CMD;
+
+#define WMI_KRK_LEN 16
+/*
+ * WMI_ADD_KRK_CMDID
+ */
+typedef PREPACK struct {
+ A_UINT8 krk[WMI_KRK_LEN];
+} POSTPACK WMI_ADD_KRK_CMD;
+
+/*
+ * WMI_SET_TKIP_COUNTERMEASURES_CMDID
+ */
+typedef enum {
+ WMI_TKIP_CM_DISABLE = 0x0,
+ WMI_TKIP_CM_ENABLE = 0x1,
+} WMI_TKIP_CM_CONTROL;
+
+typedef PREPACK struct {
+ A_UINT8 cm_en; /* WMI_TKIP_CM_CONTROL */
+} POSTPACK WMI_SET_TKIP_COUNTERMEASURES_CMD;
+
+/*
+ * WMI_SET_PMKID_CMDID
+ */
+
+#define WMI_PMKID_LEN 16
+
+typedef enum {
+ PMKID_DISABLE = 0,
+ PMKID_ENABLE = 1,
+} PMKID_ENABLE_FLG;
+
+typedef PREPACK struct {
+ A_UINT8 bssid[ATH_MAC_LEN];
+ A_UINT8 enable; /* PMKID_ENABLE_FLG */
+ A_UINT8 pmkid[WMI_PMKID_LEN];
+} POSTPACK WMI_SET_PMKID_CMD;
+
+/*
+ * WMI_START_SCAN_CMD
+ */
+typedef enum {
+ WMI_LONG_SCAN = 0,
+ WMI_SHORT_SCAN = 1,
+} WMI_SCAN_TYPE;
+
+typedef PREPACK struct {
+ A_BOOL forceFgScan;
+ A_BOOL isLegacy; /* For Legacy Cisco AP compatibility */
+ A_UINT32 homeDwellTime; /* Maximum duration in the home channel(milliseconds) */
+ A_UINT32 forceScanInterval; /* Time interval between scans (milliseconds)*/
+ A_UINT8 scanType; /* WMI_SCAN_TYPE */
+ A_UINT8 numChannels; /* how many channels follow */
+ A_UINT16 channelList[1]; /* channels in Mhz */
+} POSTPACK WMI_START_SCAN_CMD;
+
+/*
+ * WMI_SET_SCAN_PARAMS_CMDID
+ */
+#define WMI_SHORTSCANRATIO_DEFAULT 3
+/*
+ * Warning: ScanCtrlFlag value of 0xFF is used to disable all flags in WMI_SCAN_PARAMS_CMD
+ * Do not add any more flags to WMI_SCAN_CTRL_FLAG_BITS
+ */
+typedef enum {
+ CONNECT_SCAN_CTRL_FLAGS = 0x01, /* set if can scan in the Connect cmd */
+ SCAN_CONNECTED_CTRL_FLAGS = 0x02, /* set if scan for the SSID it is */
+ /* already connected to */
+ ACTIVE_SCAN_CTRL_FLAGS = 0x04, /* set if enable active scan */
+ ROAM_SCAN_CTRL_FLAGS = 0x08, /* set if enable roam scan when bmiss and lowrssi */
+ REPORT_BSSINFO_CTRL_FLAGS = 0x10, /* set if follows customer BSSINFO reporting rule */
+ ENABLE_AUTO_CTRL_FLAGS = 0x20, /* if disabled, target doesn't
+ scan after a disconnect event */
+ ENABLE_SCAN_ABORT_EVENT = 0x40 /* Scan complete event with canceled status will be generated when a scan is prempted before it gets completed */
+} WMI_SCAN_CTRL_FLAGS_BITS;
+
+#define CAN_SCAN_IN_CONNECT(flags) (flags & CONNECT_SCAN_CTRL_FLAGS)
+#define CAN_SCAN_CONNECTED(flags) (flags & SCAN_CONNECTED_CTRL_FLAGS)
+#define ENABLE_ACTIVE_SCAN(flags) (flags & ACTIVE_SCAN_CTRL_FLAGS)
+#define ENABLE_ROAM_SCAN(flags) (flags & ROAM_SCAN_CTRL_FLAGS)
+#define CONFIG_REPORT_BSSINFO(flags) (flags & REPORT_BSSINFO_CTRL_FLAGS)
+#define IS_AUTO_SCAN_ENABLED(flags) (flags & ENABLE_AUTO_CTRL_FLAGS)
+#define SCAN_ABORT_EVENT_ENABLED(flags) (flags & ENABLE_SCAN_ABORT_EVENT)
+
+#define DEFAULT_SCAN_CTRL_FLAGS (CONNECT_SCAN_CTRL_FLAGS| SCAN_CONNECTED_CTRL_FLAGS| ACTIVE_SCAN_CTRL_FLAGS| ROAM_SCAN_CTRL_FLAGS | ENABLE_AUTO_CTRL_FLAGS)
+
+
+typedef PREPACK struct {
+ A_UINT16 fg_start_period; /* seconds */
+ A_UINT16 fg_end_period; /* seconds */
+ A_UINT16 bg_period; /* seconds */
+ A_UINT16 maxact_chdwell_time; /* msec */
+ A_UINT16 pas_chdwell_time; /* msec */
+ A_UINT8 shortScanRatio; /* how many shorts scan for one long */
+ A_UINT8 scanCtrlFlags;
+ A_UINT16 minact_chdwell_time; /* msec */
+ A_UINT16 maxact_scan_per_ssid; /* max active scans per ssid */
+ A_UINT32 max_dfsch_act_time; /* msecs */
+} POSTPACK WMI_SCAN_PARAMS_CMD;
+
+/*
+ * WMI_SET_BSS_FILTER_CMDID
+ */
+typedef enum {
+ NONE_BSS_FILTER = 0x0, /* no beacons forwarded */
+ ALL_BSS_FILTER, /* all beacons forwarded */
+ PROFILE_FILTER, /* only beacons matching profile */
+ ALL_BUT_PROFILE_FILTER, /* all but beacons matching profile */
+ CURRENT_BSS_FILTER, /* only beacons matching current BSS */
+ ALL_BUT_BSS_FILTER, /* all but beacons matching BSS */
+ PROBED_SSID_FILTER, /* beacons matching probed ssid */
+ LAST_BSS_FILTER, /* marker only */
+} WMI_BSS_FILTER;
+
+typedef PREPACK struct {
+ A_UINT8 bssFilter; /* see WMI_BSS_FILTER */
+ A_UINT8 reserved1; /* For alignment */
+ A_UINT16 reserved2; /* For alignment */
+ A_UINT32 ieMask;
+} POSTPACK WMI_BSS_FILTER_CMD;
+
+/*
+ * WMI_SET_PROBED_SSID_CMDID
+ */
+#define MAX_PROBED_SSID_INDEX 9
+
+typedef enum {
+ DISABLE_SSID_FLAG = 0, /* disables entry */
+ SPECIFIC_SSID_FLAG = 0x01, /* probes specified ssid */
+ ANY_SSID_FLAG = 0x02, /* probes for any ssid */
+} WMI_SSID_FLAG;
+
+typedef PREPACK struct {
+ A_UINT8 entryIndex; /* 0 to MAX_PROBED_SSID_INDEX */
+ A_UINT8 flag; /* WMI_SSID_FLG */
+ A_UINT8 ssidLength;
+ A_UINT8 ssid[32];
+} POSTPACK WMI_PROBED_SSID_CMD;
+
+/*
+ * WMI_SET_LISTEN_INT_CMDID
+ * The Listen interval is between 15 and 3000 TUs
+ */
+#define MIN_LISTEN_INTERVAL 15
+#define MAX_LISTEN_INTERVAL 5000
+#define MIN_LISTEN_BEACONS 1
+#define MAX_LISTEN_BEACONS 50
+
+typedef PREPACK struct {
+ A_UINT16 listenInterval;
+ A_UINT16 numBeacons;
+} POSTPACK WMI_LISTEN_INT_CMD;
+
+/*
+ * WMI_SET_BEACON_INT_CMDID
+ */
+typedef PREPACK struct {
+ A_UINT16 beaconInterval;
+} POSTPACK WMI_BEACON_INT_CMD;
+
+/*
+ * WMI_SET_BMISS_TIME_CMDID
+ * valid values are between 1000 and 5000 TUs
+ */
+
+#define MIN_BMISS_TIME 1000
+#define MAX_BMISS_TIME 5000
+#define MIN_BMISS_BEACONS 1
+#define MAX_BMISS_BEACONS 50
+
+typedef PREPACK struct {
+ A_UINT16 bmissTime;
+ A_UINT16 numBeacons;
+} POSTPACK WMI_BMISS_TIME_CMD;
+
+/*
+ * WMI_SET_POWER_MODE_CMDID
+ */
+typedef enum {
+ REC_POWER = 0x01,
+ MAX_PERF_POWER,
+} WMI_POWER_MODE;
+
+typedef PREPACK struct {
+ A_UINT8 powerMode; /* WMI_POWER_MODE */
+} POSTPACK WMI_POWER_MODE_CMD;
+
+typedef PREPACK struct {
+ A_INT8 status; /* WMI_SET_PARAMS_REPLY */
+} POSTPACK WMI_SET_PARAMS_REPLY;
+
+typedef PREPACK struct {
+ A_UINT32 opcode;
+ A_UINT32 length;
+ A_CHAR buffer[1]; /* WMI_SET_PARAMS */
+} POSTPACK WMI_SET_PARAMS_CMD;
+
+typedef PREPACK struct {
+ A_UINT8 multicast_mac[ATH_MAC_LEN]; /* WMI_SET_MCAST_FILTER */
+} POSTPACK WMI_SET_MCAST_FILTER_CMD;
+
+typedef PREPACK struct {
+ A_UINT8 enable; /* WMI_MCAST_FILTER */
+} POSTPACK WMI_MCAST_FILTER_CMD;
+
+/*
+ * WMI_SET_POWER_PARAMS_CMDID
+ */
+typedef enum {
+ IGNORE_DTIM = 0x01,
+ NORMAL_DTIM = 0x02,
+ STICK_DTIM = 0x03,
+ AUTO_DTIM = 0x04,
+} WMI_DTIM_POLICY;
+
+/* Policy to determnine whether TX should wakeup WLAN if sleeping */
+typedef enum {
+ TX_WAKEUP_UPON_SLEEP = 1,
+ TX_DONT_WAKEUP_UPON_SLEEP = 2
+} WMI_TX_WAKEUP_POLICY_UPON_SLEEP;
+
+/*
+ * Policy to determnine whether power save failure event should be sent to
+ * host during scanning
+ */
+typedef enum {
+ SEND_POWER_SAVE_FAIL_EVENT_ALWAYS = 1,
+ IGNORE_POWER_SAVE_FAIL_EVENT_DURING_SCAN = 2,
+} POWER_SAVE_FAIL_EVENT_POLICY;
+
+typedef PREPACK struct {
+ A_UINT16 idle_period; /* msec */
+ A_UINT16 pspoll_number;
+ A_UINT16 dtim_policy;
+ A_UINT16 tx_wakeup_policy;
+ A_UINT16 num_tx_to_wakeup;
+ A_UINT16 ps_fail_event_policy;
+} POSTPACK WMI_POWER_PARAMS_CMD;
+
+/* Adhoc power save types */
+typedef enum {
+ ADHOC_PS_DISABLE=1,
+ ADHOC_PS_ATH=2,
+ ADHOC_PS_IEEE=3,
+ ADHOC_PS_OTHER=4,
+} WMI_ADHOC_PS_TYPE;
+
+typedef PREPACK struct {
+ A_UINT8 power_saving;
+ A_UINT8 ttl; /* number of beacon periods */
+ A_UINT16 atim_windows; /* msec */
+ A_UINT16 timeout_value; /* msec */
+} POSTPACK WMI_IBSS_PM_CAPS_CMD;
+
+/* AP power save types */
+typedef enum {
+ AP_PS_DISABLE=1,
+ AP_PS_ATH=2,
+} WMI_AP_PS_TYPE;
+
+typedef PREPACK struct {
+ A_UINT32 idle_time; /* in msec */
+ A_UINT32 ps_period; /* in usec */
+ A_UINT8 sleep_period; /* in ps periods */
+ A_UINT8 psType;
+} POSTPACK WMI_AP_PS_CMD;
+
+/*
+ * WMI_SET_POWERSAVE_TIMERS_POLICY_CMDID
+ */
+typedef enum {
+ IGNORE_TIM_ALL_QUEUES_APSD = 0,
+ PROCESS_TIM_ALL_QUEUES_APSD = 1,
+ IGNORE_TIM_SIMULATED_APSD = 2,
+ PROCESS_TIM_SIMULATED_APSD = 3,
+} APSD_TIM_POLICY;
+
+typedef PREPACK struct {
+ A_UINT16 psPollTimeout; /* msec */
+ A_UINT16 triggerTimeout; /* msec */
+ A_UINT32 apsdTimPolicy; /* TIM behavior with ques APSD enabled. Default is IGNORE_TIM_ALL_QUEUES_APSD */
+ A_UINT32 simulatedAPSDTimPolicy; /* TIM behavior with simulated APSD enabled. Default is PROCESS_TIM_SIMULATED_APSD */
+} POSTPACK WMI_POWERSAVE_TIMERS_POLICY_CMD;
+
+/*
+ * WMI_SET_VOICE_PKT_SIZE_CMDID
+ */
+typedef PREPACK struct {
+ A_UINT16 voicePktSize;
+} POSTPACK WMI_SET_VOICE_PKT_SIZE_CMD;
+
+/*
+ * WMI_SET_MAX_SP_LEN_CMDID
+ */
+typedef enum {
+ DELIVER_ALL_PKT = 0x0,
+ DELIVER_2_PKT = 0x1,
+ DELIVER_4_PKT = 0x2,
+ DELIVER_6_PKT = 0x3,
+} APSD_SP_LEN_TYPE;
+
+typedef PREPACK struct {
+ A_UINT8 maxSPLen;
+} POSTPACK WMI_SET_MAX_SP_LEN_CMD;
+
+/*
+ * WMI_SET_DISC_TIMEOUT_CMDID
+ */
+typedef PREPACK struct {
+ A_UINT8 disconnectTimeout; /* seconds */
+} POSTPACK WMI_DISC_TIMEOUT_CMD;
+
+typedef enum {
+ UPLINK_TRAFFIC = 0,
+ DNLINK_TRAFFIC = 1,
+ BIDIR_TRAFFIC = 2,
+} DIR_TYPE;
+
+typedef enum {
+ DISABLE_FOR_THIS_AC = 0,
+ ENABLE_FOR_THIS_AC = 1,
+ ENABLE_FOR_ALL_AC = 2,
+} VOICEPS_CAP_TYPE;
+
+typedef enum {
+ TRAFFIC_TYPE_APERIODIC = 0,
+ TRAFFIC_TYPE_PERIODIC = 1,
+}TRAFFIC_TYPE;
+
+/*
+ * WMI_SYNCHRONIZE_CMDID
+ */
+typedef PREPACK struct {
+ A_UINT8 dataSyncMap;
+} POSTPACK WMI_SYNC_CMD;
+
+/*
+ * WMI_CREATE_PSTREAM_CMDID
+ */
+typedef PREPACK struct {
+ A_UINT32 minServiceInt; /* in milli-sec */
+ A_UINT32 maxServiceInt; /* in milli-sec */
+ A_UINT32 inactivityInt; /* in milli-sec */
+ A_UINT32 suspensionInt; /* in milli-sec */
+ A_UINT32 serviceStartTime;
+ A_UINT32 minDataRate; /* in bps */
+ A_UINT32 meanDataRate; /* in bps */
+ A_UINT32 peakDataRate; /* in bps */
+ A_UINT32 maxBurstSize;
+ A_UINT32 delayBound;
+ A_UINT32 minPhyRate; /* in bps */
+ A_UINT32 sba;
+ A_UINT32 mediumTime;
+ A_UINT16 nominalMSDU; /* in octects */
+ A_UINT16 maxMSDU; /* in octects */
+ A_UINT8 trafficClass;
+ A_UINT8 trafficDirection; /* DIR_TYPE */
+ A_UINT8 rxQueueNum;
+ A_UINT8 trafficType; /* TRAFFIC_TYPE */
+ A_UINT8 voicePSCapability; /* VOICEPS_CAP_TYPE */
+ A_UINT8 tsid;
+ A_UINT8 userPriority; /* 802.1D user priority */
+ A_UINT8 nominalPHY; /* nominal phy rate */
+} POSTPACK WMI_CREATE_PSTREAM_CMD;
+
+/*
+ * WMI_DELETE_PSTREAM_CMDID
+ */
+typedef PREPACK struct {
+ A_UINT8 txQueueNumber;
+ A_UINT8 rxQueueNumber;
+ A_UINT8 trafficDirection;
+ A_UINT8 trafficClass;
+ A_UINT8 tsid;
+} POSTPACK WMI_DELETE_PSTREAM_CMD;
+
+/*
+ * WMI_SET_CHANNEL_PARAMS_CMDID
+ */
+typedef enum {
+ WMI_11A_MODE = 0x1,
+ WMI_11G_MODE = 0x2,
+ WMI_11AG_MODE = 0x3,
+ WMI_11B_MODE = 0x4,
+ WMI_11GONLY_MODE = 0x5,
+} WMI_PHY_MODE;
+
+#define WMI_MAX_CHANNELS 32
+
+typedef PREPACK struct {
+ A_UINT8 reserved1;
+ A_UINT8 scanParam; /* set if enable scan */
+ A_UINT8 phyMode; /* see WMI_PHY_MODE */
+ A_UINT8 numChannels; /* how many channels follow */
+ A_UINT16 channelList[1]; /* channels in Mhz */
+} POSTPACK WMI_CHANNEL_PARAMS_CMD;
+
+
+/*
+ * WMI_RSSI_THRESHOLD_PARAMS_CMDID
+ * Setting the polltime to 0 would disable polling.
+ * Threshold values are in the ascending order, and should agree to:
+ * (lowThreshold_lowerVal < lowThreshold_upperVal < highThreshold_lowerVal
+ * < highThreshold_upperVal)
+ */
+
+typedef PREPACK struct WMI_RSSI_THRESHOLD_PARAMS{
+ A_UINT32 pollTime; /* Polling time as a factor of LI */
+ A_INT16 thresholdAbove1_Val; /* lowest of upper */
+ A_INT16 thresholdAbove2_Val;
+ A_INT16 thresholdAbove3_Val;
+ A_INT16 thresholdAbove4_Val;
+ A_INT16 thresholdAbove5_Val;
+ A_INT16 thresholdAbove6_Val; /* highest of upper */
+ A_INT16 thresholdBelow1_Val; /* lowest of bellow */
+ A_INT16 thresholdBelow2_Val;
+ A_INT16 thresholdBelow3_Val;
+ A_INT16 thresholdBelow4_Val;
+ A_INT16 thresholdBelow5_Val;
+ A_INT16 thresholdBelow6_Val; /* highest of bellow */
+ A_UINT8 weight; /* "alpha" */
+ A_UINT8 reserved[3];
+} POSTPACK WMI_RSSI_THRESHOLD_PARAMS_CMD;
+
+/*
+ * WMI_SNR_THRESHOLD_PARAMS_CMDID
+ * Setting the polltime to 0 would disable polling.
+ */
+
+typedef PREPACK struct WMI_SNR_THRESHOLD_PARAMS{
+ A_UINT32 pollTime; /* Polling time as a factor of LI */
+ A_UINT8 weight; /* "alpha" */
+ A_UINT8 thresholdAbove1_Val; /* lowest of uppper*/
+ A_UINT8 thresholdAbove2_Val;
+ A_UINT8 thresholdAbove3_Val;
+ A_UINT8 thresholdAbove4_Val; /* highest of upper */
+ A_UINT8 thresholdBelow1_Val; /* lowest of bellow */
+ A_UINT8 thresholdBelow2_Val;
+ A_UINT8 thresholdBelow3_Val;
+ A_UINT8 thresholdBelow4_Val; /* highest of bellow */
+ A_UINT8 reserved[3];
+} POSTPACK WMI_SNR_THRESHOLD_PARAMS_CMD;
+
+/*
+ * WMI_LQ_THRESHOLD_PARAMS_CMDID
+ */
+typedef PREPACK struct WMI_LQ_THRESHOLD_PARAMS {
+ A_UINT8 enable;
+ A_UINT8 thresholdAbove1_Val;
+ A_UINT8 thresholdAbove2_Val;
+ A_UINT8 thresholdAbove3_Val;
+ A_UINT8 thresholdAbove4_Val;
+ A_UINT8 thresholdBelow1_Val;
+ A_UINT8 thresholdBelow2_Val;
+ A_UINT8 thresholdBelow3_Val;
+ A_UINT8 thresholdBelow4_Val;
+ A_UINT8 reserved[3];
+} POSTPACK WMI_LQ_THRESHOLD_PARAMS_CMD;
+
+typedef enum {
+ WMI_LPREAMBLE_DISABLED = 0,
+ WMI_LPREAMBLE_ENABLED
+} WMI_LPREAMBLE_STATUS;
+
+typedef enum {
+ WMI_IGNORE_BARKER_IN_ERP = 0,
+ WMI_DONOT_IGNORE_BARKER_IN_ERP
+} WMI_PREAMBLE_POLICY;
+
+typedef PREPACK struct {
+ A_UINT8 status;
+ A_UINT8 preamblePolicy;
+}POSTPACK WMI_SET_LPREAMBLE_CMD;
+
+typedef PREPACK struct {
+ A_UINT16 threshold;
+}POSTPACK WMI_SET_RTS_CMD;
+
+/*
+ * WMI_TARGET_ERROR_REPORT_BITMASK_CMDID
+ * Sets the error reporting event bitmask in target. Target clears it
+ * upon an error. Subsequent errors are counted, but not reported
+ * via event, unless the bitmask is set again.
+ */
+typedef PREPACK struct {
+ A_UINT32 bitmask;
+} POSTPACK WMI_TARGET_ERROR_REPORT_BITMASK;
+
+/*
+ * WMI_SET_TX_PWR_CMDID
+ */
+typedef PREPACK struct {
+ A_UINT8 dbM; /* in dbM units */
+} POSTPACK WMI_SET_TX_PWR_CMD, WMI_TX_PWR_REPLY;
+
+/*
+ * WMI_SET_ASSOC_INFO_CMDID
+ *
+ * A maximum of 2 private IEs can be sent in the [Re]Assoc request.
+ * A 3rd one, the CCX version IE can also be set from the host.
+ */
+#define WMI_MAX_ASSOC_INFO_TYPE 2
+#define WMI_CCX_VER_IE 2 /* ieType to set CCX Version IE */
+
+#define WMI_MAX_ASSOC_INFO_LEN 240
+
+typedef PREPACK struct {
+ A_UINT8 ieType;
+ A_UINT8 bufferSize;
+ A_UINT8 assocInfo[1]; /* up to WMI_MAX_ASSOC_INFO_LEN */
+} POSTPACK WMI_SET_ASSOC_INFO_CMD;
+
+
+/*
+ * WMI_GET_TX_PWR_CMDID does not take any parameters
+ */
+
+/*
+ * WMI_ADD_BAD_AP_CMDID
+ */
+#define WMI_MAX_BAD_AP_INDEX 1
+
+typedef PREPACK struct {
+ A_UINT8 badApIndex; /* 0 to WMI_MAX_BAD_AP_INDEX */
+ A_UINT8 bssid[ATH_MAC_LEN];
+} POSTPACK WMI_ADD_BAD_AP_CMD;
+
+/*
+ * WMI_DELETE_BAD_AP_CMDID
+ */
+typedef PREPACK struct {
+ A_UINT8 badApIndex; /* 0 to WMI_MAX_BAD_AP_INDEX */
+} POSTPACK WMI_DELETE_BAD_AP_CMD;
+
+/*
+ * WMI_SET_ACCESS_PARAMS_CMDID
+ */
+#define WMI_DEFAULT_TXOP_ACPARAM 0 /* implies one MSDU */
+#define WMI_DEFAULT_ECWMIN_ACPARAM 4 /* corresponds to CWmin of 15 */
+#define WMI_DEFAULT_ECWMAX_ACPARAM 10 /* corresponds to CWmax of 1023 */
+#define WMI_MAX_CW_ACPARAM 15 /* maximum eCWmin or eCWmax */
+#define WMI_DEFAULT_AIFSN_ACPARAM 2
+#define WMI_MAX_AIFSN_ACPARAM 15
+typedef PREPACK struct {
+ A_UINT16 txop; /* in units of 32 usec */
+ A_UINT8 eCWmin;
+ A_UINT8 eCWmax;
+ A_UINT8 aifsn;
+ A_UINT8 ac;
+} POSTPACK WMI_SET_ACCESS_PARAMS_CMD;
+
+
+/*
+ * WMI_SET_RETRY_LIMITS_CMDID
+ *
+ * This command is used to customize the number of retries the
+ * wlan device will perform on a given frame.
+ */
+#define WMI_MIN_RETRIES 2
+#define WMI_MAX_RETRIES 13
+typedef enum {
+ MGMT_FRAMETYPE = 0,
+ CONTROL_FRAMETYPE = 1,
+ DATA_FRAMETYPE = 2
+} WMI_FRAMETYPE;
+
+typedef PREPACK struct {
+ A_UINT8 frameType; /* WMI_FRAMETYPE */
+ A_UINT8 trafficClass; /* applies only to DATA_FRAMETYPE */
+ A_UINT8 maxRetries;
+ A_UINT8 enableNotify;
+} POSTPACK WMI_SET_RETRY_LIMITS_CMD;
+
+/*
+ * WMI_SET_ROAM_CTRL_CMDID
+ *
+ * This command is used to influence the Roaming behaviour
+ * Set the host biases of the BSSs before setting the roam mode as bias
+ * based.
+ */
+
+/*
+ * Different types of Roam Control
+ */
+
+typedef enum {
+ WMI_FORCE_ROAM = 1, /* Roam to the specified BSSID */
+ WMI_SET_ROAM_MODE = 2, /* default ,progd bias, no roam */
+ WMI_SET_HOST_BIAS = 3, /* Set the Host Bias */
+ WMI_SET_LOWRSSI_SCAN_PARAMS = 4, /* Set lowrssi Scan parameters */
+} WMI_ROAM_CTRL_TYPE;
+
+#define WMI_MIN_ROAM_CTRL_TYPE WMI_FORCE_ROAM
+#define WMI_MAX_ROAM_CTRL_TYPE WMI_SET_LOWRSSI_SCAN_PARAMS
+
+/*
+ * ROAM MODES
+ */
+
+typedef enum {
+ WMI_DEFAULT_ROAM_MODE = 1, /* RSSI based ROAM */
+ WMI_HOST_BIAS_ROAM_MODE = 2, /* HOST BIAS based ROAM */
+ WMI_LOCK_BSS_MODE = 3 /* Lock to the Current BSS - no Roam */
+} WMI_ROAM_MODE;
+
+/*
+ * BSS HOST BIAS INFO
+ */
+
+typedef PREPACK struct {
+ A_UINT8 bssid[ATH_MAC_LEN];
+ A_INT8 bias;
+} POSTPACK WMI_BSS_BIAS;
+
+typedef PREPACK struct {
+ A_UINT8 numBss;
+ WMI_BSS_BIAS bssBias[1];
+} POSTPACK WMI_BSS_BIAS_INFO;
+
+typedef PREPACK struct WMI_LOWRSSI_SCAN_PARAMS {
+ A_UINT16 lowrssi_scan_period;
+ A_INT16 lowrssi_scan_threshold;
+ A_INT16 lowrssi_roam_threshold;
+ A_UINT8 roam_rssi_floor;
+ A_UINT8 reserved[1]; /* For alignment */
+} POSTPACK WMI_LOWRSSI_SCAN_PARAMS;
+
+typedef PREPACK struct {
+ PREPACK union {
+ A_UINT8 bssid[ATH_MAC_LEN]; /* WMI_FORCE_ROAM */
+ A_UINT8 roamMode; /* WMI_SET_ROAM_MODE */
+ WMI_BSS_BIAS_INFO bssBiasInfo; /* WMI_SET_HOST_BIAS */
+ WMI_LOWRSSI_SCAN_PARAMS lrScanParams;
+ } POSTPACK info;
+ A_UINT8 roamCtrlType ;
+} POSTPACK WMI_SET_ROAM_CTRL_CMD;
+
+/*
+ * WMI_SET_BT_WLAN_CONN_PRECEDENCE_CMDID
+ */
+typedef enum {
+ BT_WLAN_CONN_PRECDENCE_WLAN=0, /* Default */
+ BT_WLAN_CONN_PRECDENCE_PAL,
+} BT_WLAN_CONN_PRECEDENCE;
+
+typedef PREPACK struct {
+ A_UINT8 precedence;
+} POSTPACK WMI_SET_BT_WLAN_CONN_PRECEDENCE;
+
+/*
+ * WMI_ENABLE_RM_CMDID
+ */
+typedef PREPACK struct {
+ A_BOOL enable_radio_measurements;
+} POSTPACK WMI_ENABLE_RM_CMD;
+
+/*
+ * WMI_SET_MAX_OFFHOME_DURATION_CMDID
+ */
+typedef PREPACK struct {
+ A_UINT8 max_offhome_duration;
+} POSTPACK WMI_SET_MAX_OFFHOME_DURATION_CMD;
+
+typedef PREPACK struct {
+ A_UINT32 frequency;
+ A_UINT8 threshold;
+} POSTPACK WMI_SET_HB_CHALLENGE_RESP_PARAMS_CMD;
+/*---------------------- BTCOEX RELATED -------------------------------------*/
+/*----------------------COMMON to AR6002 and AR6003 -------------------------*/
+typedef enum {
+ BT_STREAM_UNDEF = 0,
+ BT_STREAM_SCO, /* SCO stream */
+ BT_STREAM_A2DP, /* A2DP stream */
+ BT_STREAM_SCAN, /* BT Discovery or Page */
+ BT_STREAM_ESCO,
+ BT_STREAM_MAX
+} BT_STREAM_TYPE;
+
+typedef enum {
+ BT_PARAM_SCO_PSPOLL_LATENCY_ONE_FOURTH =1,
+ BT_PARAM_SCO_PSPOLL_LATENCY_HALF,
+ BT_PARAM_SCO_PSPOLL_LATENCY_THREE_FOURTH,
+} BT_PARAMS_SCO_PSPOLL_LATENCY;
+
+typedef enum {
+ BT_PARAMS_SCO_STOMP_SCO_NEVER =1,
+ BT_PARAMS_SCO_STOMP_SCO_ALWAYS,
+ BT_PARAMS_SCO_STOMP_SCO_IN_LOWRSSI,
+} BT_PARAMS_SCO_STOMP_RULES;
+
+typedef enum {
+ BT_STATUS_UNDEF = 0,
+ BT_STATUS_ON,
+ BT_STATUS_OFF,
+ BT_STATUS_MAX
+} BT_STREAM_STATUS;
+
+typedef PREPACK struct {
+ A_UINT8 streamType;
+ A_UINT8 status;
+} POSTPACK WMI_SET_BT_STATUS_CMD;
+
+typedef enum {
+ BT_ANT_TYPE_UNDEF=0,
+ BT_ANT_TYPE_DUAL,
+ BT_ANT_TYPE_SPLITTER,
+ BT_ANT_TYPE_SWITCH,
+ BT_ANT_TYPE_HIGH_ISO_DUAL
+} BT_ANT_FRONTEND_CONFIG;
+
+typedef enum {
+ BT_COLOCATED_DEV_BTS4020=0,
+ BT_COLCATED_DEV_CSR ,
+ BT_COLOCATED_DEV_VALKYRIE
+} BT_COLOCATED_DEV_TYPE;
+
+/*********************** Applicable to AR6002 ONLY ******************************/
+
+typedef enum {
+ BT_PARAM_SCO = 1, /* SCO stream parameters */
+ BT_PARAM_A2DP ,
+ BT_PARAM_ANTENNA_CONFIG,
+ BT_PARAM_COLOCATED_BT_DEVICE,
+ BT_PARAM_ACLCOEX,
+ BT_PARAM_11A_SEPARATE_ANT,
+ BT_PARAM_MAX
+} BT_PARAM_TYPE;
+
+
+#define BT_SCO_ALLOW_CLOSE_RANGE_OPT (1 << 0)
+#define BT_SCO_FORCE_AWAKE_OPT (1 << 1)
+#define BT_SCO_SET_RSSI_OVERRIDE(flags) ((flags) |= (1 << 2))
+#define BT_SCO_GET_RSSI_OVERRIDE(flags) (((flags) >> 2) & 0x1)
+#define BT_SCO_SET_RTS_OVERRIDE(flags) ((flags) |= (1 << 3))
+#define BT_SCO_GET_RTS_OVERRIDE(flags) (((flags) >> 3) & 0x1)
+#define BT_SCO_GET_MIN_LOW_RATE_CNT(flags) (((flags) >> 8) & 0xFF)
+#define BT_SCO_GET_MAX_LOW_RATE_CNT(flags) (((flags) >> 16) & 0xFF)
+#define BT_SCO_SET_MIN_LOW_RATE_CNT(flags,val) (flags) |= (((val) & 0xFF) << 8)
+#define BT_SCO_SET_MAX_LOW_RATE_CNT(flags,val) (flags) |= (((val) & 0xFF) << 16)
+
+typedef PREPACK struct {
+ A_UINT32 numScoCyclesForceTrigger; /* Number SCO cycles after which
+ force a pspoll. default = 10 */
+ A_UINT32 dataResponseTimeout; /* Timeout Waiting for Downlink pkt
+ in response for ps-poll,
+ default = 10 msecs */
+ A_UINT32 stompScoRules;
+ A_UINT32 scoOptFlags; /* SCO Options Flags :
+ bits: meaning:
+ 0 Allow Close Range Optimization
+ 1 Force awake during close range
+ 2 If set use host supplied RSSI for OPT
+ 3 If set use host supplied RTS COUNT for OPT
+ 4..7 Unused
+ 8..15 Low Data Rate Min Cnt
+ 16..23 Low Data Rate Max Cnt
+ */
+
+ A_UINT8 stompDutyCyleVal; /* Sco cycles to limit ps-poll queuing
+ if stomped */
+ A_UINT8 stompDutyCyleMaxVal; /*firm ware increases stomp duty cycle
+ gradually uptill this value on need basis*/
+ A_UINT8 psPollLatencyFraction; /* Fraction of idle
+ period, within which
+ additional ps-polls
+ can be queued */
+ A_UINT8 noSCOSlots; /* Number of SCO Tx/Rx slots.
+ HVx, EV3, 2EV3 = 2 */
+ A_UINT8 noIdleSlots; /* Number of Bluetooth idle slots between
+ consecutive SCO Tx/Rx slots
+ HVx, EV3 = 4
+ 2EV3 = 10 */
+ A_UINT8 scoOptOffRssi;/*RSSI value below which we go to ps poll*/
+ A_UINT8 scoOptOnRssi; /*RSSI value above which we reenter opt mode*/
+ A_UINT8 scoOptRtsCount;
+} POSTPACK BT_PARAMS_SCO;
+
+#define BT_A2DP_ALLOW_CLOSE_RANGE_OPT (1 << 0)
+#define BT_A2DP_FORCE_AWAKE_OPT (1 << 1)
+#define BT_A2DP_SET_RSSI_OVERRIDE(flags) ((flags) |= (1 << 2))
+#define BT_A2DP_GET_RSSI_OVERRIDE(flags) (((flags) >> 2) & 0x1)
+#define BT_A2DP_SET_RTS_OVERRIDE(flags) ((flags) |= (1 << 3))
+#define BT_A2DP_GET_RTS_OVERRIDE(flags) (((flags) >> 3) & 0x1)
+#define BT_A2DP_GET_MIN_LOW_RATE_CNT(flags) (((flags) >> 8) & 0xFF)
+#define BT_A2DP_GET_MAX_LOW_RATE_CNT(flags) (((flags) >> 16) & 0xFF)
+#define BT_A2DP_SET_MIN_LOW_RATE_CNT(flags,val) (flags) |= (((val) & 0xFF) << 8)
+#define BT_A2DP_SET_MAX_LOW_RATE_CNT(flags,val) (flags) |= (((val) & 0xFF) << 16)
+
+typedef PREPACK struct {
+ A_UINT32 a2dpWlanUsageLimit; /* MAX time firmware uses the medium for
+ wlan, after it identifies the idle time
+ default (30 msecs) */
+ A_UINT32 a2dpBurstCntMin; /* Minimum number of bluetooth data frames
+ to replenish Wlan Usage limit (default 3) */
+ A_UINT32 a2dpDataRespTimeout;
+ A_UINT32 a2dpOptFlags; /* A2DP Option flags:
+ bits: meaning:
+ 0 Allow Close Range Optimization
+ 1 Force awake during close range
+ 2 If set use host supplied RSSI for OPT
+ 3 If set use host supplied RTS COUNT for OPT
+ 4..7 Unused
+ 8..15 Low Data Rate Min Cnt
+ 16..23 Low Data Rate Max Cnt
+ */
+ A_UINT8 isCoLocatedBtRoleMaster;
+ A_UINT8 a2dpOptOffRssi;/*RSSI value below which we go to ps poll*/
+ A_UINT8 a2dpOptOnRssi; /*RSSI value above which we reenter opt mode*/
+ A_UINT8 a2dpOptRtsCount;
+}POSTPACK BT_PARAMS_A2DP;
+
+/* During BT ftp/ BT OPP or any another data based acl profile on bluetooth
+ (non a2dp).*/
+typedef PREPACK struct {
+ A_UINT32 aclWlanMediumUsageTime; /* Wlan usage time during Acl (non-a2dp)
+ coexistence (default 30 msecs) */
+ A_UINT32 aclBtMediumUsageTime; /* Bt usage time during acl coexistence
+ (default 30 msecs)*/
+ A_UINT32 aclDataRespTimeout;
+ A_UINT32 aclDetectTimeout; /* ACL coexistence enabled if we get
+ 10 Pkts in X msec(default 100 msecs) */
+ A_UINT32 aclmaxPktCnt; /* No of ACL pkts to receive before
+ enabling ACL coex */
+
+}POSTPACK BT_PARAMS_ACLCOEX;
+
+typedef PREPACK struct {
+ PREPACK union {
+ BT_PARAMS_SCO scoParams;
+ BT_PARAMS_A2DP a2dpParams;
+ BT_PARAMS_ACLCOEX aclCoexParams;
+ A_UINT8 antType; /* 0 -Disabled (default)
+ 1 - BT_ANT_TYPE_DUAL
+ 2 - BT_ANT_TYPE_SPLITTER
+ 3 - BT_ANT_TYPE_SWITCH */
+ A_UINT8 coLocatedBtDev; /* 0 - BT_COLOCATED_DEV_BTS4020 (default)
+ 1 - BT_COLCATED_DEV_CSR
+ 2 - BT_COLOCATED_DEV_VALKYRIe
+ */
+ } POSTPACK info;
+ A_UINT8 paramType ;
+} POSTPACK WMI_SET_BT_PARAMS_CMD;
+
+/************************ END AR6002 BTCOEX *******************************/
+/*-----------------------AR6003 BTCOEX -----------------------------------*/
+
+/* ---------------WMI_SET_BTCOEX_FE_ANT_CMDID --------------------------*/
+/* Indicates front end antenna configuration. This command needs to be issued
+ * right after initialization and after WMI_SET_BTCOEX_COLOCATED_BT_DEV_CMDID.
+ * AR6003 enables coexistence and antenna switching based on the configuration.
+ */
+typedef enum {
+ WMI_BTCOEX_NOT_ENABLED = 0,
+ WMI_BTCOEX_FE_ANT_SINGLE =1,
+ WMI_BTCOEX_FE_ANT_DUAL=2,
+ WMI_BTCOEX_FE_ANT_DUAL_HIGH_ISO=3,
+ WMI_BTCOEX_FE_ANT_TYPE_MAX
+}WMI_BTCOEX_FE_ANT_TYPE;
+
+typedef PREPACK struct {
+ A_UINT8 btcoexFeAntType; /* 1 - WMI_BTCOEX_FE_ANT_SINGLE for single antenna front end
+ 2 - WMI_BTCOEX_FE_ANT_DUAL for dual antenna front end
+ (for isolations less 35dB, for higher isolation there
+ is not need to pass this command).
+ (not implemented)
+ */
+}POSTPACK WMI_SET_BTCOEX_FE_ANT_CMD;
+
+/* -------------WMI_SET_BTCOEX_COLOCATED_BT_DEV_CMDID ----------------*/
+/* Indicate the bluetooth chip to the firmware. Firmware can have different algorithm based
+ * bluetooth chip type.Based on bluetooth device, different coexistence protocol would be used.
+ */
+typedef PREPACK struct {
+ A_UINT8 btcoexCoLocatedBTdev; /*1 - Qcom BT (3 -wire PTA)
+ 2 - CSR BT (3 wire PTA)
+ 3 - Atheros 3001 BT (3 wire PTA)
+ 4 - STE bluetooth (4-wire ePTA)
+ 5 - Atheros 3002 BT (4-wire MCI)
+ defaults= 3 (Atheros 3001 BT )
+ */
+}POSTPACK WMI_SET_BTCOEX_COLOCATED_BT_DEV_CMD;
+
+/* -------------WMI_SET_BTCOEX_BTINQUIRY_PAGE_CONFIG_CMDID ------------*/
+/* Configuration parameters during bluetooth inquiry and page. Page configuration
+ * is applicable only on interfaces which can distinguish page (applicable only for ePTA -
+ * STE bluetooth).
+ * Bluetooth inquiry start and end is indicated via WMI_SET_BTCOEX_BT_OPERATING_STATUS_CMDID.
+ * During this the station will be power-save mode.
+ */
+typedef PREPACK struct {
+ A_UINT32 btInquiryDataFetchFrequency;/* The frequency of querying the AP for data
+ (via pspoll) is configured by this parameter.
+ "default = 10 ms" */
+
+ A_UINT32 protectBmissDurPostBtInquiry;/* The firmware will continue to be in inquiry state
+ for configured duration, after inquiry completion
+ . This is to ensure other bluetooth transactions
+ (RDP, SDP profiles, link key exchange ...etc)
+ goes through smoothly without wifi stomping.
+ default = 10 secs*/
+
+ A_UINT32 maxpageStomp; /*Applicable only for STE-BT interface. Currently not
+ used */
+ A_UINT32 btInquiryPageFlag; /* Not used */
+}POSTPACK WMI_SET_BTCOEX_BTINQUIRY_PAGE_CONFIG_CMD;
+
+/*---------------------WMI_SET_BTCOEX_SCO_CONFIG_CMDID ---------------*/
+/* Configure SCO parameters. These parameters would be used whenever firmware is indicated
+ * of (e)SCO profile on bluetooth ( via WMI_SET_BTCOEX_BT_OPERATING_STATUS_CMDID).
+ * Configration of BTCOEX_SCO_CONFIG data structure are common configuration and applies
+ * ps-poll mode and opt mode.
+ * Ps-poll Mode - Station is in power-save and retrieves downlink data between sco gaps.
+ * Opt Mode - station is in awake state and access point can send data to station any time.
+ * BTCOEX_PSPOLLMODE_SCO_CONFIG - Configuration applied only during ps-poll mode.
+ * BTCOEX_OPTMODE_SCO_CONFIG - Configuration applied only during opt mode.
+ */
+#define WMI_SCO_CONFIG_FLAG_ALLOW_OPTIMIZATION (1 << 0)
+#define WMI_SCO_CONFIG_FLAG_IS_EDR_CAPABLE (1 << 1)
+#define WMI_SCO_CONFIG_FLAG_IS_BT_MASTER (1 << 2)
+#define WMI_SCO_CONFIG_FLAG_FW_DETECT_OF_PER (1 << 3)
+typedef PREPACK struct {
+ A_UINT32 scoSlots; /* Number of SCO Tx/Rx slots.
+ HVx, EV3, 2EV3 = 2 */
+ A_UINT32 scoIdleSlots; /* Number of Bluetooth idle slots between
+ consecutive SCO Tx/Rx slots
+ HVx, EV3 = 4
+ 2EV3 = 10
+ */
+ A_UINT32 scoFlags; /* SCO Options Flags :
+ bits: meaning:
+ 0 Allow Close Range Optimization
+ 1 Is EDR capable or Not
+ 2 IS Co-located Bt role Master
+ 3 Firmware determines the periodicity of SCO.
+ */
+
+ A_UINT32 linkId; /* applicable to STE-BT - not used */
+}POSTPACK BTCOEX_SCO_CONFIG;
+
+typedef PREPACK struct {
+ A_UINT32 scoCyclesForceTrigger; /* Number SCO cycles after which
+ force a pspoll. default = 10 */
+ A_UINT32 scoDataResponseTimeout; /* Timeout Waiting for Downlink pkt
+ in response for ps-poll,
+ default = 20 msecs */
+
+ A_UINT32 scoStompDutyCyleVal; /* not implemented */
+
+ A_UINT32 scoStompDutyCyleMaxVal; /*Not implemented */
+
+ A_UINT32 scoPsPollLatencyFraction; /* Fraction of idle
+ period, within which
+ additional ps-polls can be queued
+ 1 - 1/4 of idle duration
+ 2 - 1/2 of idle duration
+ 3 - 3/4 of idle duration
+ default =2 (1/2)
+ */
+}POSTPACK BTCOEX_PSPOLLMODE_SCO_CONFIG;
+
+typedef PREPACK struct {
+ A_UINT32 scoStompCntIn100ms;/*max number of SCO stomp in 100ms allowed in
+ opt mode. If exceeds the configured value,
+ switch to ps-poll mode
+ default = 3 */
+
+ A_UINT32 scoContStompMax; /* max number of continous stomp allowed in opt mode.
+ if excedded switch to pspoll mode
+ default = 3 */
+
+ A_UINT32 scoMinlowRateMbps; /* Low rate threshold */
+
+ A_UINT32 scoLowRateCnt; /* number of low rate pkts (< scoMinlowRateMbps) allowed in 100 ms.
+ If exceeded switch/stay to ps-poll mode, lower stay in opt mode.
+ default = 36
+ */
+
+ A_UINT32 scoHighPktRatio; /*(Total Rx pkts in 100 ms + 1)/
+ ((Total tx pkts in 100 ms - No of high rate pkts in 100 ms) + 1) in 100 ms,
+ if exceeded switch/stay in opt mode and if lower switch/stay in pspoll mode.
+ default = 5 (80% of high rates)
+ */
+
+ A_UINT32 scoMaxAggrSize; /* Max number of Rx subframes allowed in this mode. (Firmware re-negogiates
+ max number of aggregates if it was negogiated to higher value
+ default = 1
+ Recommended value Basic rate headsets = 1, EDR (2-EV3) =4.
+ */
+}POSTPACK BTCOEX_OPTMODE_SCO_CONFIG;
+
+typedef PREPACK struct {
+ A_UINT32 scanInterval;
+ A_UINT32 maxScanStompCnt;
+}POSTPACK BTCOEX_WLANSCAN_SCO_CONFIG;
+
+typedef PREPACK struct {
+ BTCOEX_SCO_CONFIG scoConfig;
+ BTCOEX_PSPOLLMODE_SCO_CONFIG scoPspollConfig;
+ BTCOEX_OPTMODE_SCO_CONFIG scoOptModeConfig;
+ BTCOEX_WLANSCAN_SCO_CONFIG scoWlanScanConfig;
+}POSTPACK WMI_SET_BTCOEX_SCO_CONFIG_CMD;
+
+/* ------------------WMI_SET_BTCOEX_A2DP_CONFIG_CMDID -------------------*/
+/* Configure A2DP profile parameters. These parameters would be used whenver firmware is indicated
+ * of A2DP profile on bluetooth ( via WMI_SET_BTCOEX_BT_OPERATING_STATUS_CMDID).
+ * Configuration of BTCOEX_A2DP_CONFIG data structure are common configuration and applies to
+ * ps-poll mode and opt mode.
+ * Ps-poll Mode - Station is in power-save and retrieves downlink data between a2dp data bursts.
+ * Opt Mode - station is in power save during a2dp bursts and awake in the gaps.
+ * BTCOEX_PSPOLLMODE_A2DP_CONFIG - Configuration applied only during ps-poll mode.
+ * BTCOEX_OPTMODE_A2DP_CONFIG - Configuration applied only during opt mode.
+ */
+
+#define WMI_A2DP_CONFIG_FLAG_ALLOW_OPTIMIZATION (1 << 0)
+#define WMI_A2DP_CONFIG_FLAG_IS_EDR_CAPABLE (1 << 1)
+#define WMI_A2DP_CONFIG_FLAG_IS_BT_ROLE_MASTER (1 << 2)
+#define WMI_A2DP_CONFIG_FLAG_IS_A2DP_HIGH_PRI (1 << 3)
+#define WMI_A2DP_CONFIG_FLAG_FIND_BT_ROLE (1 << 4)
+
+typedef PREPACK struct {
+ A_UINT32 a2dpFlags; /* A2DP Option flags:
+ bits: meaning:
+ 0 Allow Close Range Optimization
+ 1 IS EDR capable
+ 2 IS Co-located Bt role Master
+ 3 a2dp traffic is high priority
+ 4 Fw detect the role of bluetooth.
+ */
+ A_UINT32 linkId; /* Applicable only to STE-BT - not used */
+
+}POSTPACK BTCOEX_A2DP_CONFIG;
+
+typedef PREPACK struct {
+ A_UINT32 a2dpWlanMaxDur; /* MAX time firmware uses the medium for
+ wlan, after it identifies the idle time
+ default (30 msecs) */
+
+ A_UINT32 a2dpMinBurstCnt; /* Minimum number of bluetooth data frames
+ to replenish Wlan Usage limit (default 3) */
+
+ A_UINT32 a2dpDataRespTimeout; /* Max duration firmware waits for downlink
+ by stomping on bluetooth
+ after ps-poll is acknowledged.
+ default = 20 ms
+ */
+}POSTPACK BTCOEX_PSPOLLMODE_A2DP_CONFIG;
+
+typedef PREPACK struct {
+ A_UINT32 a2dpMinlowRateMbps; /* Low rate threshold */
+
+ A_UINT32 a2dpLowRateCnt; /* number of low rate pkts (< a2dpMinlowRateMbps) allowed in 100 ms.
+ If exceeded switch/stay to ps-poll mode, lower stay in opt mode.
+ default = 36
+ */
+
+ A_UINT32 a2dpHighPktRatio; /*(Total Rx pkts in 100 ms + 1)/
+ ((Total tx pkts in 100 ms - No of high rate pkts in 100 ms) + 1) in 100 ms,
+ if exceeded switch/stay in opt mode and if lower switch/stay in pspoll mode.
+ default = 5 (80% of high rates)
+ */
+
+ A_UINT32 a2dpMaxAggrSize; /* Max number of Rx subframes allowed in this mode. (Firmware re-negogiates
+ max number of aggregates if it was negogiated to higher value
+ default = 1
+ Recommended value Basic rate headsets = 1, EDR (2-EV3) =8.
+ */
+ A_UINT32 a2dpPktStompCnt; /*number of a2dp pkts that can be stomped per burst.
+ default = 6*/
+
+}POSTPACK BTCOEX_OPTMODE_A2DP_CONFIG;
+
+typedef PREPACK struct {
+ BTCOEX_A2DP_CONFIG a2dpConfig;
+ BTCOEX_PSPOLLMODE_A2DP_CONFIG a2dppspollConfig;
+ BTCOEX_OPTMODE_A2DP_CONFIG a2dpOptConfig;
+}POSTPACK WMI_SET_BTCOEX_A2DP_CONFIG_CMD;
+
+/*------------ WMI_SET_BTCOEX_ACLCOEX_CONFIG_CMDID---------------------*/
+/* Configure non-A2dp ACL profile parameters.The starts of ACL profile can either be
+ * indicated via WMI_SET_BTCOEX_BT_OPERATING_STATUS_CMDID orenabled via firmware detection
+ * which is configured via "aclCoexFlags".
+ * Configration of BTCOEX_ACLCOEX_CONFIG data structure are common configuration and applies
+ * ps-poll mode and opt mode.
+ * Ps-poll Mode - Station is in power-save and retrieves downlink data during wlan medium.
+ * Opt Mode - station is in power save during bluetooth medium time and awake during wlan duration.
+ * (Not implemented yet)
+ *
+ * BTCOEX_PSPOLLMODE_ACLCOEX_CONFIG - Configuration applied only during ps-poll mode.
+ * BTCOEX_OPTMODE_ACLCOEX_CONFIG - Configuration applied only during opt mode.
+ */
+
+#define WMI_ACLCOEX_FLAGS_ALLOW_OPTIMIZATION (1 << 0)
+#define WMI_ACLCOEX_FLAGS_DISABLE_FW_DETECTION (1 << 1)
+
+typedef PREPACK struct {
+ A_UINT32 aclWlanMediumDur; /* Wlan usage time during Acl (non-a2dp)
+ coexistence (default 30 msecs)
+ */
+
+ A_UINT32 aclBtMediumDur; /* Bt usage time during acl coexistence
+ (default 30 msecs)
+ */
+
+ A_UINT32 aclDetectTimeout; /* BT activity observation time limit.
+ In this time duration, number of bt pkts are counted.
+ If the Cnt reaches "aclPktCntLowerLimit" value
+ for "aclIterToEnableCoex" iteration continuously,
+ firmware gets into ACL coexistence mode.
+ Similarly, if bt traffic count during ACL coexistence
+ has not reached "aclPktCntLowerLimit" continuously
+ for "aclIterToEnableCoex", then ACL coexistence is
+ disabled.
+ -default 100 msecs
+ */
+
+ A_UINT32 aclPktCntLowerLimit; /* Acl Pkt Cnt to be received in duration of
+ "aclDetectTimeout" for
+ "aclIterForEnDis" times to enabling ACL coex.
+ Similar logic is used to disable acl coexistence.
+ (If "aclPktCntLowerLimit" cnt of acl pkts
+ are not seen by the for "aclIterForEnDis"
+ then acl coexistence is disabled).
+ default = 10
+ */
+
+ A_UINT32 aclIterForEnDis; /* number of Iteration of "aclPktCntLowerLimit" for Enabling and
+ Disabling Acl Coexistence.
+ default = 3
+ */
+
+ A_UINT32 aclPktCntUpperLimit; /* This is upperBound limit, if there is more than
+ "aclPktCntUpperLimit" seen in "aclDetectTimeout",
+ ACL coexistence is enabled right away.
+ - default 15*/
+
+ A_UINT32 aclCoexFlags; /* A2DP Option flags:
+ bits: meaning:
+ 0 Allow Close Range Optimization
+ 1 disable Firmware detection
+ (Currently supported configuration is aclCoexFlags =0)
+ */
+ A_UINT32 linkId; /* Applicable only for STE-BT - not used */
+
+}POSTPACK BTCOEX_ACLCOEX_CONFIG;
+
+typedef PREPACK struct {
+ A_UINT32 aclDataRespTimeout; /* Max duration firmware waits for downlink
+ by stomping on bluetooth
+ after ps-poll is acknowledged.
+ default = 20 ms */
+
+}POSTPACK BTCOEX_PSPOLLMODE_ACLCOEX_CONFIG;
+
+
+/* Not implemented yet*/
+typedef PREPACK struct {
+ A_UINT32 aclCoexMinlowRateMbps;
+ A_UINT32 aclCoexLowRateCnt;
+ A_UINT32 aclCoexHighPktRatio;
+ A_UINT32 aclCoexMaxAggrSize;
+ A_UINT32 aclPktStompCnt;
+}POSTPACK BTCOEX_OPTMODE_ACLCOEX_CONFIG;
+
+typedef PREPACK struct {
+ BTCOEX_ACLCOEX_CONFIG aclCoexConfig;
+ BTCOEX_PSPOLLMODE_ACLCOEX_CONFIG aclCoexPspollConfig;
+ BTCOEX_OPTMODE_ACLCOEX_CONFIG aclCoexOptConfig;
+}POSTPACK WMI_SET_BTCOEX_ACLCOEX_CONFIG_CMD;
+
+/* -----------WMI_SET_BTCOEX_BT_OPERATING_STATUS_CMDID ------------------*/
+typedef enum {
+ WMI_BTCOEX_BT_PROFILE_SCO =1,
+ WMI_BTCOEX_BT_PROFILE_A2DP,
+ WMI_BTCOEX_BT_PROFILE_INQUIRY_PAGE,
+ WMI_BTCOEX_BT_PROFILE_ACLCOEX,
+}WMI_BTCOEX_BT_PROFILE;
+
+typedef PREPACK struct {
+ A_UINT32 btProfileType;
+ A_UINT32 btOperatingStatus;
+ A_UINT32 btLinkId;
+}WMI_SET_BTCOEX_BT_OPERATING_STATUS_CMD;
+
+/*--------------------- WMI_SET_BTCOEX_DEBUG_CMDID ---------------------*/
+/* Used for firmware development and debugging */
+typedef PREPACK struct {
+ A_UINT32 btcoexDbgParam1;
+ A_UINT32 btcoexDbgParam2;
+ A_UINT32 btcoexDbgParam3;
+ A_UINT32 btcoexDbgParam4;
+ A_UINT32 btcoexDbgParam5;
+}WMI_SET_BTCOEX_DEBUG_CMD;
+
+/*---------------------WMI_GET_BTCOEX_CONFIG_CMDID --------------------- */
+/* Command to firmware to get configuration parameters of the bt profile
+ * reported via WMI_BTCOEX_CONFIG_EVENTID */
+typedef PREPACK struct {
+ A_UINT32 btProfileType; /* 1 - SCO
+ 2 - A2DP
+ 3 - INQUIRY_PAGE
+ 4 - ACLCOEX
+ */
+ A_UINT32 linkId; /* not used */
+}WMI_GET_BTCOEX_CONFIG_CMD;
+
+/*------------------WMI_REPORT_BTCOEX_CONFIG_EVENTID------------------- */
+/* Event from firmware to host, sent in response to WMI_GET_BTCOEX_CONFIG_CMDID
+ * */
+typedef PREPACK struct {
+ A_UINT32 btProfileType;
+ A_UINT32 linkId; /* not used */
+ PREPACK union {
+ WMI_SET_BTCOEX_SCO_CONFIG_CMD scoConfigCmd;
+ WMI_SET_BTCOEX_A2DP_CONFIG_CMD a2dpConfigCmd;
+ WMI_SET_BTCOEX_ACLCOEX_CONFIG_CMD aclcoexConfig;
+ WMI_SET_BTCOEX_BTINQUIRY_PAGE_CONFIG_CMD btinquiryPageConfigCmd;
+ } POSTPACK info;
+} POSTPACK WMI_BTCOEX_CONFIG_EVENT;
+
+/*------------- WMI_REPORT_BTCOEX_BTCOEX_STATS_EVENTID--------------------*/
+/* Used for firmware development and debugging*/
+typedef PREPACK struct {
+ A_UINT32 highRatePktCnt;
+ A_UINT32 firstBmissCnt;
+ A_UINT32 psPollFailureCnt;
+ A_UINT32 nullFrameFailureCnt;
+ A_UINT32 optModeTransitionCnt;
+}BTCOEX_GENERAL_STATS;
+
+typedef PREPACK struct {
+ A_UINT32 scoStompCntAvg;
+ A_UINT32 scoStompIn100ms;
+ A_UINT32 scoMaxContStomp;
+ A_UINT32 scoAvgNoRetries;
+ A_UINT32 scoMaxNoRetriesIn100ms;
+}BTCOEX_SCO_STATS;
+
+typedef PREPACK struct {
+ A_UINT32 a2dpBurstCnt;
+ A_UINT32 a2dpMaxBurstCnt;
+ A_UINT32 a2dpAvgIdletimeIn100ms;
+ A_UINT32 a2dpAvgStompCnt;
+}BTCOEX_A2DP_STATS;
+
+typedef PREPACK struct {
+ A_UINT32 aclPktCntInBtTime;
+ A_UINT32 aclStompCntInWlanTime;
+ A_UINT32 aclPktCntIn100ms;
+}BTCOEX_ACLCOEX_STATS;
+
+typedef PREPACK struct {
+ BTCOEX_GENERAL_STATS coexStats;
+ BTCOEX_SCO_STATS scoStats;
+ BTCOEX_A2DP_STATS a2dpStats;
+ BTCOEX_ACLCOEX_STATS aclCoexStats;
+}WMI_BTCOEX_STATS_EVENT;
+
+
+/*--------------------------END OF BTCOEX -------------------------------------*/
+typedef PREPACK struct {
+ A_UINT32 sleepState;
+}WMI_REPORT_SLEEP_STATE_EVENT;
+
+typedef enum {
+ WMI_REPORT_SLEEP_STATUS_IS_DEEP_SLEEP =0,
+ WMI_REPORT_SLEEP_STATUS_IS_AWAKE
+} WMI_REPORT_SLEEP_STATUS;
+typedef enum {
+ DISCONN_EVT_IN_RECONN = 0, /* default */
+ NO_DISCONN_EVT_IN_RECONN
+} TARGET_EVENT_REPORT_CONFIG;
+
+typedef PREPACK struct {
+ A_UINT32 evtConfig;
+} POSTPACK WMI_SET_TARGET_EVENT_REPORT_CMD;
+
+
+typedef PREPACK struct {
+ A_UINT16 cmd_buf_sz; /* HCI cmd buffer size */
+ A_UINT8 buf[1]; /* Absolute HCI cmd */
+} POSTPACK WMI_HCI_CMD;
+
+/*
+ * Command Replies
+ */
+
+/*
+ * WMI_GET_CHANNEL_LIST_CMDID reply
+ */
+typedef PREPACK struct {
+ A_UINT8 reserved1;
+ A_UINT8 numChannels; /* number of channels in reply */
+ A_UINT16 channelList[1]; /* channel in Mhz */
+} POSTPACK WMI_CHANNEL_LIST_REPLY;
+
+typedef enum {
+ A_SUCCEEDED = A_OK,
+ A_FAILED_DELETE_STREAM_DOESNOT_EXIST=250,
+ A_SUCCEEDED_MODIFY_STREAM=251,
+ A_FAILED_INVALID_STREAM = 252,
+ A_FAILED_MAX_THINSTREAMS = 253,
+ A_FAILED_CREATE_REMOVE_PSTREAM_FIRST = 254,
+} PSTREAM_REPLY_STATUS;
+
+typedef PREPACK struct {
+ A_UINT8 status; /* PSTREAM_REPLY_STATUS */
+ A_UINT8 txQueueNumber;
+ A_UINT8 rxQueueNumber;
+ A_UINT8 trafficClass;
+ A_UINT8 trafficDirection; /* DIR_TYPE */
+} POSTPACK WMI_CRE_PRIORITY_STREAM_REPLY;
+
+typedef PREPACK struct {
+ A_UINT8 status; /* PSTREAM_REPLY_STATUS */
+ A_UINT8 txQueueNumber;
+ A_UINT8 rxQueueNumber;
+ A_UINT8 trafficDirection; /* DIR_TYPE */
+ A_UINT8 trafficClass;
+} POSTPACK WMI_DEL_PRIORITY_STREAM_REPLY;
+
+/*
+ * List of Events (target to host)
+ */
+typedef enum {
+ WMI_READY_EVENTID = 0x1001,
+ WMI_CONNECT_EVENTID,
+ WMI_DISCONNECT_EVENTID,
+ WMI_BSSINFO_EVENTID,
+ WMI_CMDERROR_EVENTID,
+ WMI_REGDOMAIN_EVENTID,
+ WMI_PSTREAM_TIMEOUT_EVENTID,
+ WMI_NEIGHBOR_REPORT_EVENTID,
+ WMI_TKIP_MICERR_EVENTID,
+ WMI_SCAN_COMPLETE_EVENTID, /* 0x100a */
+ WMI_REPORT_STATISTICS_EVENTID,
+ WMI_RSSI_THRESHOLD_EVENTID,
+ WMI_ERROR_REPORT_EVENTID,
+ WMI_OPT_RX_FRAME_EVENTID,
+ WMI_REPORT_ROAM_TBL_EVENTID,
+ WMI_EXTENSION_EVENTID,
+ WMI_CAC_EVENTID,
+ WMI_SNR_THRESHOLD_EVENTID,
+ WMI_LQ_THRESHOLD_EVENTID,
+ WMI_TX_RETRY_ERR_EVENTID, /* 0x1014 */
+ WMI_REPORT_ROAM_DATA_EVENTID,
+ WMI_TEST_EVENTID,
+ WMI_APLIST_EVENTID,
+ WMI_GET_WOW_LIST_EVENTID,
+ WMI_GET_PMKID_LIST_EVENTID,
+ WMI_CHANNEL_CHANGE_EVENTID,
+ WMI_PEER_NODE_EVENTID,
+ WMI_PSPOLL_EVENTID,
+ WMI_DTIMEXPIRY_EVENTID,
+ WMI_WLAN_VERSION_EVENTID,
+ WMI_SET_PARAMS_REPLY_EVENTID,
+ WMI_ADDBA_REQ_EVENTID, /*0x1020 */
+ WMI_ADDBA_RESP_EVENTID,
+ WMI_DELBA_REQ_EVENTID,
+ WMI_TX_COMPLETE_EVENTID,
+ WMI_HCI_EVENT_EVENTID,
+ WMI_ACL_DATA_EVENTID,
+ WMI_REPORT_SLEEP_STATE_EVENTID,
+#ifdef WAPI_ENABLE
+ WMI_WAPI_REKEY_EVENTID,
+#endif
+ WMI_REPORT_BTCOEX_STATS_EVENTID,
+ WMI_REPORT_BTCOEX_CONFIG_EVENTID,
+ WMI_ACM_REJECT_EVENTID,
+ WMI_THIN_RESERVED_START_EVENTID = 0x8000,
+ /* Events in this range are reserved for thinmode
+ * See wmi_thin.h for actual definitions */
+ WMI_THIN_RESERVED_END_EVENTID = 0x8fff,
+
+} WMI_EVENT_ID;
+
+
+typedef enum {
+ WMI_11A_CAPABILITY = 1,
+ WMI_11G_CAPABILITY = 2,
+ WMI_11AG_CAPABILITY = 3,
+ WMI_11NA_CAPABILITY = 4,
+ WMI_11NG_CAPABILITY = 5,
+ WMI_11NAG_CAPABILITY = 6,
+ // END CAPABILITY
+ WMI_11N_CAPABILITY_OFFSET = (WMI_11NA_CAPABILITY - WMI_11A_CAPABILITY),
+} WMI_PHY_CAPABILITY;
+
+typedef PREPACK struct {
+ A_UINT8 macaddr[ATH_MAC_LEN];
+ A_UINT8 phyCapability; /* WMI_PHY_CAPABILITY */
+} POSTPACK WMI_READY_EVENT_1;
+
+typedef PREPACK struct {
+ A_UINT32 sw_version;
+ A_UINT32 abi_version;
+ A_UINT8 macaddr[ATH_MAC_LEN];
+ A_UINT8 phyCapability; /* WMI_PHY_CAPABILITY */
+} POSTPACK WMI_READY_EVENT_2;
+
+#if defined(ATH_TARGET)
+#ifdef AR6002_REV2
+#define WMI_READY_EVENT WMI_READY_EVENT_1 /* AR6002_REV2 target code */
+#else
+#define WMI_READY_EVENT WMI_READY_EVENT_2 /* AR6001, AR6002_REV4, AR6002_REV5 */
+#endif
+#else
+#define WMI_READY_EVENT WMI_READY_EVENT_2 /* host code */
+#endif
+
+
+/*
+ * Connect Event
+ */
+typedef PREPACK struct {
+ A_UINT16 channel;
+ A_UINT8 bssid[ATH_MAC_LEN];
+ A_UINT16 listenInterval;
+ A_UINT16 beaconInterval;
+ A_UINT32 networkType;
+ A_UINT8 beaconIeLen;
+ A_UINT8 assocReqLen;
+ A_UINT8 assocRespLen;
+ A_UINT8 assocInfo[1];
+} POSTPACK WMI_CONNECT_EVENT;
+
+/*
+ * Disconnect Event
+ */
+typedef enum {
+ NO_NETWORK_AVAIL = 0x01,
+ LOST_LINK = 0x02, /* bmiss */
+ DISCONNECT_CMD = 0x03,
+ BSS_DISCONNECTED = 0x04,
+ AUTH_FAILED = 0x05,
+ ASSOC_FAILED = 0x06,
+ NO_RESOURCES_AVAIL = 0x07,
+ CSERV_DISCONNECT = 0x08,
+ INVALID_PROFILE = 0x0a,
+ DOT11H_CHANNEL_SWITCH = 0x0b,
+ PROFILE_MISMATCH = 0x0c,
+ CONNECTION_EVICTED = 0x0d,
+ IBSS_MERGE = 0xe,
+} WMI_DISCONNECT_REASON;
+
+typedef PREPACK struct {
+ A_UINT16 protocolReasonStatus; /* reason code, see 802.11 spec. */
+ A_UINT8 bssid[ATH_MAC_LEN]; /* set if known */
+ A_UINT8 disconnectReason ; /* see WMI_DISCONNECT_REASON */
+ A_UINT8 assocRespLen;
+ A_UINT8 assocInfo[1];
+} POSTPACK WMI_DISCONNECT_EVENT;
+
+/*
+ * BSS Info Event.
+ * Mechanism used to inform host of the presence and characteristic of
+ * wireless networks present. Consists of bss info header followed by
+ * the beacon or probe-response frame body. The 802.11 header is not included.
+ */
+typedef enum {
+ BEACON_FTYPE = 0x1,
+ PROBERESP_FTYPE,
+ ACTION_MGMT_FTYPE,
+ PROBEREQ_FTYPE,
+} WMI_BI_FTYPE;
+
+enum {
+ BSS_ELEMID_CHANSWITCH = 0x01,
+ BSS_ELEMID_ATHEROS = 0x02,
+};
+
+typedef PREPACK struct {
+ A_UINT16 channel;
+ A_UINT8 frameType; /* see WMI_BI_FTYPE */
+ A_UINT8 snr;
+ A_INT16 rssi;
+ A_UINT8 bssid[ATH_MAC_LEN];
+ A_UINT32 ieMask;
+} POSTPACK WMI_BSS_INFO_HDR;
+
+/*
+ * BSS INFO HDR version 2.0
+ * With 6 bytes HTC header and 6 bytes of WMI header
+ * WMI_BSS_INFO_HDR cannot be accomodated in the removed 802.11 management
+ * header space.
+ * - Reduce the ieMask to 2 bytes as only two bit flags are used
+ * - Remove rssi and compute it on the host. rssi = snr - 95
+ */
+typedef PREPACK struct {
+ A_UINT16 channel;
+ A_UINT8 frameType; /* see WMI_BI_FTYPE */
+ A_UINT8 snr;
+ A_UINT8 bssid[ATH_MAC_LEN];
+ A_UINT16 ieMask;
+} POSTPACK WMI_BSS_INFO_HDR2;
+
+/*
+ * Command Error Event
+ */
+typedef enum {
+ INVALID_PARAM = 0x01,
+ ILLEGAL_STATE = 0x02,
+ INTERNAL_ERROR = 0x03,
+} WMI_ERROR_CODE;
+
+typedef PREPACK struct {
+ A_UINT16 commandId;
+ A_UINT8 errorCode;
+} POSTPACK WMI_CMD_ERROR_EVENT;
+
+/*
+ * New Regulatory Domain Event
+ */
+typedef PREPACK struct {
+ A_UINT32 regDomain;
+} POSTPACK WMI_REG_DOMAIN_EVENT;
+
+typedef PREPACK struct {
+ A_UINT8 txQueueNumber;
+ A_UINT8 rxQueueNumber;
+ A_UINT8 trafficDirection;
+ A_UINT8 trafficClass;
+} POSTPACK WMI_PSTREAM_TIMEOUT_EVENT;
+
+typedef PREPACK struct {
+ A_UINT8 reserve1;
+ A_UINT8 reserve2;
+ A_UINT8 reserve3;
+ A_UINT8 trafficClass;
+} POSTPACK WMI_ACM_REJECT_EVENT;
+
+/*
+ * The WMI_NEIGHBOR_REPORT Event is generated by the target to inform
+ * the host of BSS's it has found that matches the current profile.
+ * It can be used by the host to cache PMKs and/to initiate pre-authentication
+ * if the BSS supports it. The first bssid is always the current associated
+ * BSS.
+ * The bssid and bssFlags information repeats according to the number
+ * or APs reported.
+ */
+typedef enum {
+ WMI_DEFAULT_BSS_FLAGS = 0x00,
+ WMI_PREAUTH_CAPABLE_BSS = 0x01,
+ WMI_PMKID_VALID_BSS = 0x02,
+} WMI_BSS_FLAGS;
+
+typedef PREPACK struct {
+ A_UINT8 bssid[ATH_MAC_LEN];
+ A_UINT8 bssFlags; /* see WMI_BSS_FLAGS */
+} POSTPACK WMI_NEIGHBOR_INFO;
+
+typedef PREPACK struct {
+ A_INT8 numberOfAps;
+ WMI_NEIGHBOR_INFO neighbor[1];
+} POSTPACK WMI_NEIGHBOR_REPORT_EVENT;
+
+/*
+ * TKIP MIC Error Event
+ */
+typedef PREPACK struct {
+ A_UINT8 keyid;
+ A_UINT8 ismcast;
+} POSTPACK WMI_TKIP_MICERR_EVENT;
+
+/*
+ * WMI_SCAN_COMPLETE_EVENTID - no parameters (old), staus parameter (new)
+ */
+typedef PREPACK struct {
+ A_INT32 status;
+} POSTPACK WMI_SCAN_COMPLETE_EVENT;
+
+#define MAX_OPT_DATA_LEN 1400
+
+/*
+ * WMI_SET_ADHOC_BSSID_CMDID
+ */
+typedef PREPACK struct {
+ A_UINT8 bssid[ATH_MAC_LEN];
+} POSTPACK WMI_SET_ADHOC_BSSID_CMD;
+
+/*
+ * WMI_SET_OPT_MODE_CMDID
+ */
+typedef enum {
+ SPECIAL_OFF,
+ SPECIAL_ON,
+} OPT_MODE_TYPE;
+
+typedef PREPACK struct {
+ A_UINT8 optMode;
+} POSTPACK WMI_SET_OPT_MODE_CMD;
+
+/*
+ * WMI_TX_OPT_FRAME_CMDID
+ */
+typedef enum {
+ OPT_PROBE_REQ = 0x01,
+ OPT_PROBE_RESP = 0x02,
+ OPT_CPPP_START = 0x03,
+ OPT_CPPP_STOP = 0x04,
+} WMI_OPT_FTYPE;
+
+typedef PREPACK struct {
+ A_UINT16 optIEDataLen;
+ A_UINT8 frmType;
+ A_UINT8 dstAddr[ATH_MAC_LEN];
+ A_UINT8 bssid[ATH_MAC_LEN];
+ A_UINT8 reserved; /* For alignment */
+ A_UINT8 optIEData[1];
+} POSTPACK WMI_OPT_TX_FRAME_CMD;
+
+/*
+ * Special frame receive Event.
+ * Mechanism used to inform host of the receiption of the special frames.
+ * Consists of special frame info header followed by special frame body.
+ * The 802.11 header is not included.
+ */
+typedef PREPACK struct {
+ A_UINT16 channel;
+ A_UINT8 frameType; /* see WMI_OPT_FTYPE */
+ A_INT8 snr;
+ A_UINT8 srcAddr[ATH_MAC_LEN];
+ A_UINT8 bssid[ATH_MAC_LEN];
+} POSTPACK WMI_OPT_RX_INFO_HDR;
+
+/*
+ * Reporting statistics.
+ */
+typedef PREPACK struct {
+ A_UINT32 tx_packets;
+ A_UINT32 tx_bytes;
+ A_UINT32 tx_unicast_pkts;
+ A_UINT32 tx_unicast_bytes;
+ A_UINT32 tx_multicast_pkts;
+ A_UINT32 tx_multicast_bytes;
+ A_UINT32 tx_broadcast_pkts;
+ A_UINT32 tx_broadcast_bytes;
+ A_UINT32 tx_rts_success_cnt;
+ A_UINT32 tx_packet_per_ac[4];
+ A_UINT32 tx_errors_per_ac[4];
+
+ A_UINT32 tx_errors;
+ A_UINT32 tx_failed_cnt;
+ A_UINT32 tx_retry_cnt;
+ A_UINT32 tx_mult_retry_cnt;
+ A_UINT32 tx_rts_fail_cnt;
+ A_INT32 tx_unicast_rate;
+}POSTPACK tx_stats_t;
+
+typedef PREPACK struct {
+ A_UINT32 rx_packets;
+ A_UINT32 rx_bytes;
+ A_UINT32 rx_unicast_pkts;
+ A_UINT32 rx_unicast_bytes;
+ A_UINT32 rx_multicast_pkts;
+ A_UINT32 rx_multicast_bytes;
+ A_UINT32 rx_broadcast_pkts;
+ A_UINT32 rx_broadcast_bytes;
+ A_UINT32 rx_fragment_pkt;
+
+ A_UINT32 rx_errors;
+ A_UINT32 rx_crcerr;
+ A_UINT32 rx_key_cache_miss;
+ A_UINT32 rx_decrypt_err;
+ A_UINT32 rx_duplicate_frames;
+ A_INT32 rx_unicast_rate;
+}POSTPACK rx_stats_t;
+
+typedef PREPACK struct {
+ A_UINT32 tkip_local_mic_failure;
+ A_UINT32 tkip_counter_measures_invoked;
+ A_UINT32 tkip_replays;
+ A_UINT32 tkip_format_errors;
+ A_UINT32 ccmp_format_errors;
+ A_UINT32 ccmp_replays;
+}POSTPACK tkip_ccmp_stats_t;
+
+typedef PREPACK struct {
+ A_UINT32 power_save_failure_cnt;
+ A_UINT16 stop_tx_failure_cnt;
+ A_UINT16 atim_tx_failure_cnt;
+ A_UINT16 atim_rx_failure_cnt;
+ A_UINT16 bcn_rx_failure_cnt;
+}POSTPACK pm_stats_t;
+
+typedef PREPACK struct {
+ A_UINT32 cs_bmiss_cnt;
+ A_UINT32 cs_lowRssi_cnt;
+ A_UINT16 cs_connect_cnt;
+ A_UINT16 cs_disconnect_cnt;
+ A_INT16 cs_aveBeacon_rssi;
+ A_UINT16 cs_roam_count;
+ A_INT16 cs_rssi;
+ A_UINT8 cs_snr;
+ A_UINT8 cs_aveBeacon_snr;
+ A_UINT8 cs_lastRoam_msec;
+} POSTPACK cserv_stats_t;
+
+typedef PREPACK struct {
+ tx_stats_t tx_stats;
+ rx_stats_t rx_stats;
+ tkip_ccmp_stats_t tkipCcmpStats;
+}POSTPACK wlan_net_stats_t;
+
+typedef PREPACK struct {
+ A_UINT32 arp_received;
+ A_UINT32 arp_matched;
+ A_UINT32 arp_replied;
+} POSTPACK arp_stats_t;
+
+typedef PREPACK struct {
+ A_UINT32 wow_num_pkts_dropped;
+ A_UINT16 wow_num_events_discarded;
+ A_UINT8 wow_num_host_pkt_wakeups;
+ A_UINT8 wow_num_host_event_wakeups;
+} POSTPACK wlan_wow_stats_t;
+
+typedef PREPACK struct {
+ A_UINT32 lqVal;
+ A_INT32 noise_floor_calibation;
+ pm_stats_t pmStats;
+ wlan_net_stats_t txrxStats;
+ wlan_wow_stats_t wowStats;
+ arp_stats_t arpStats;
+ cserv_stats_t cservStats;
+} POSTPACK WMI_TARGET_STATS;
+
+/*
+ * WMI_RSSI_THRESHOLD_EVENTID.
+ * Indicate the RSSI events to host. Events are indicated when we breach a
+ * thresold value.
+ */
+typedef enum{
+ WMI_RSSI_THRESHOLD1_ABOVE = 0,
+ WMI_RSSI_THRESHOLD2_ABOVE,
+ WMI_RSSI_THRESHOLD3_ABOVE,
+ WMI_RSSI_THRESHOLD4_ABOVE,
+ WMI_RSSI_THRESHOLD5_ABOVE,
+ WMI_RSSI_THRESHOLD6_ABOVE,
+ WMI_RSSI_THRESHOLD1_BELOW,
+ WMI_RSSI_THRESHOLD2_BELOW,
+ WMI_RSSI_THRESHOLD3_BELOW,
+ WMI_RSSI_THRESHOLD4_BELOW,
+ WMI_RSSI_THRESHOLD5_BELOW,
+ WMI_RSSI_THRESHOLD6_BELOW
+}WMI_RSSI_THRESHOLD_VAL;
+
+typedef PREPACK struct {
+ A_INT16 rssi;
+ A_UINT8 range;
+}POSTPACK WMI_RSSI_THRESHOLD_EVENT;
+
+/*
+ * WMI_ERROR_REPORT_EVENTID
+ */
+typedef enum{
+ WMI_TARGET_PM_ERR_FAIL = 0x00000001,
+ WMI_TARGET_KEY_NOT_FOUND = 0x00000002,
+ WMI_TARGET_DECRYPTION_ERR = 0x00000004,
+ WMI_TARGET_BMISS = 0x00000008,
+ WMI_PSDISABLE_NODE_JOIN = 0x00000010,
+ WMI_TARGET_COM_ERR = 0x00000020,
+ WMI_TARGET_FATAL_ERR = 0x00000040
+} WMI_TARGET_ERROR_VAL;
+
+typedef PREPACK struct {
+ A_UINT32 errorVal;
+}POSTPACK WMI_TARGET_ERROR_REPORT_EVENT;
+
+typedef PREPACK struct {
+ A_UINT8 retrys;
+}POSTPACK WMI_TX_RETRY_ERR_EVENT;
+
+typedef enum{
+ WMI_SNR_THRESHOLD1_ABOVE = 1,
+ WMI_SNR_THRESHOLD1_BELOW,
+ WMI_SNR_THRESHOLD2_ABOVE,
+ WMI_SNR_THRESHOLD2_BELOW,
+ WMI_SNR_THRESHOLD3_ABOVE,
+ WMI_SNR_THRESHOLD3_BELOW,
+ WMI_SNR_THRESHOLD4_ABOVE,
+ WMI_SNR_THRESHOLD4_BELOW
+} WMI_SNR_THRESHOLD_VAL;
+
+typedef PREPACK struct {
+ A_UINT8 range; /* WMI_SNR_THRESHOLD_VAL */
+ A_UINT8 snr;
+}POSTPACK WMI_SNR_THRESHOLD_EVENT;
+
+typedef enum{
+ WMI_LQ_THRESHOLD1_ABOVE = 1,
+ WMI_LQ_THRESHOLD1_BELOW,
+ WMI_LQ_THRESHOLD2_ABOVE,
+ WMI_LQ_THRESHOLD2_BELOW,
+ WMI_LQ_THRESHOLD3_ABOVE,
+ WMI_LQ_THRESHOLD3_BELOW,
+ WMI_LQ_THRESHOLD4_ABOVE,
+ WMI_LQ_THRESHOLD4_BELOW
+} WMI_LQ_THRESHOLD_VAL;
+
+typedef PREPACK struct {
+ A_INT32 lq;
+ A_UINT8 range; /* WMI_LQ_THRESHOLD_VAL */
+}POSTPACK WMI_LQ_THRESHOLD_EVENT;
+/*
+ * WMI_REPORT_ROAM_TBL_EVENTID
+ */
+#define MAX_ROAM_TBL_CAND 5
+
+typedef PREPACK struct {
+ A_INT32 roam_util;
+ A_UINT8 bssid[ATH_MAC_LEN];
+ A_INT8 rssi;
+ A_INT8 rssidt;
+ A_INT8 last_rssi;
+ A_INT8 util;
+ A_INT8 bias;
+ A_UINT8 reserved; /* For alignment */
+} POSTPACK WMI_BSS_ROAM_INFO;
+
+
+typedef PREPACK struct {
+ A_UINT16 roamMode;
+ A_UINT16 numEntries;
+ WMI_BSS_ROAM_INFO bssRoamInfo[1];
+} POSTPACK WMI_TARGET_ROAM_TBL;
+
+/*
+ * WMI_HCI_EVENT_EVENTID
+ */
+typedef PREPACK struct {
+ A_UINT16 evt_buf_sz; /* HCI event buffer size */
+ A_UINT8 buf[1]; /* HCI event */
+} POSTPACK WMI_HCI_EVENT;
+
+/*
+ * WMI_CAC_EVENTID
+ */
+typedef enum {
+ CAC_INDICATION_ADMISSION = 0x00,
+ CAC_INDICATION_ADMISSION_RESP = 0x01,
+ CAC_INDICATION_DELETE = 0x02,
+ CAC_INDICATION_NO_RESP = 0x03,
+}CAC_INDICATION;
+
+#define WMM_TSPEC_IE_LEN 63
+
+typedef PREPACK struct {
+ A_UINT8 ac;
+ A_UINT8 cac_indication;
+ A_UINT8 statusCode;
+ A_UINT8 tspecSuggestion[WMM_TSPEC_IE_LEN];
+}POSTPACK WMI_CAC_EVENT;
+
+/*
+ * WMI_APLIST_EVENTID
+ */
+
+typedef enum {
+ APLIST_VER1 = 1,
+} APLIST_VER;
+
+typedef PREPACK struct {
+ A_UINT8 bssid[ATH_MAC_LEN];
+ A_UINT16 channel;
+} POSTPACK WMI_AP_INFO_V1;
+
+typedef PREPACK union {
+ WMI_AP_INFO_V1 apInfoV1;
+} POSTPACK WMI_AP_INFO;
+
+typedef PREPACK struct {
+ A_UINT8 apListVer;
+ A_UINT8 numAP;
+ WMI_AP_INFO apList[1];
+} POSTPACK WMI_APLIST_EVENT;
+
+/*
+ * developer commands
+ */
+
+/*
+ * WMI_SET_BITRATE_CMDID
+ *
+ * Get bit rate cmd uses same definition as set bit rate cmd
+ */
+typedef enum {
+ RATE_AUTO = -1,
+ RATE_1Mb = 0,
+ RATE_2Mb = 1,
+ RATE_5_5Mb = 2,
+ RATE_11Mb = 3,
+ RATE_6Mb = 4,
+ RATE_9Mb = 5,
+ RATE_12Mb = 6,
+ RATE_18Mb = 7,
+ RATE_24Mb = 8,
+ RATE_36Mb = 9,
+ RATE_48Mb = 10,
+ RATE_54Mb = 11,
+ RATE_MCS_0_20 = 12,
+ RATE_MCS_1_20 = 13,
+ RATE_MCS_2_20 = 14,
+ RATE_MCS_3_20 = 15,
+ RATE_MCS_4_20 = 16,
+ RATE_MCS_5_20 = 17,
+ RATE_MCS_6_20 = 18,
+ RATE_MCS_7_20 = 19,
+ RATE_MCS_0_40 = 20,
+ RATE_MCS_1_40 = 21,
+ RATE_MCS_2_40 = 22,
+ RATE_MCS_3_40 = 23,
+ RATE_MCS_4_40 = 24,
+ RATE_MCS_5_40 = 25,
+ RATE_MCS_6_40 = 26,
+ RATE_MCS_7_40 = 27,
+} WMI_BIT_RATE;
+
+typedef PREPACK struct {
+ A_INT8 rateIndex; /* see WMI_BIT_RATE */
+ A_INT8 mgmtRateIndex;
+ A_INT8 ctlRateIndex;
+} POSTPACK WMI_BIT_RATE_CMD;
+
+
+typedef PREPACK struct {
+ A_INT8 rateIndex; /* see WMI_BIT_RATE */
+} POSTPACK WMI_BIT_RATE_REPLY;
+
+
+/*
+ * WMI_SET_FIXRATES_CMDID
+ *
+ * Get fix rates cmd uses same definition as set fix rates cmd
+ */
+#define FIX_RATE_1Mb ((A_UINT32)0x1)
+#define FIX_RATE_2Mb ((A_UINT32)0x2)
+#define FIX_RATE_5_5Mb ((A_UINT32)0x4)
+#define FIX_RATE_11Mb ((A_UINT32)0x8)
+#define FIX_RATE_6Mb ((A_UINT32)0x10)
+#define FIX_RATE_9Mb ((A_UINT32)0x20)
+#define FIX_RATE_12Mb ((A_UINT32)0x40)
+#define FIX_RATE_18Mb ((A_UINT32)0x80)
+#define FIX_RATE_24Mb ((A_UINT32)0x100)
+#define FIX_RATE_36Mb ((A_UINT32)0x200)
+#define FIX_RATE_48Mb ((A_UINT32)0x400)
+#define FIX_RATE_54Mb ((A_UINT32)0x800)
+#define FIX_RATE_MCS_0_20 ((A_UINT32)0x1000)
+#define FIX_RATE_MCS_1_20 ((A_UINT32)0x2000)
+#define FIX_RATE_MCS_2_20 ((A_UINT32)0x4000)
+#define FIX_RATE_MCS_3_20 ((A_UINT32)0x8000)
+#define FIX_RATE_MCS_4_20 ((A_UINT32)0x10000)
+#define FIX_RATE_MCS_5_20 ((A_UINT32)0x20000)
+#define FIX_RATE_MCS_6_20 ((A_UINT32)0x40000)
+#define FIX_RATE_MCS_7_20 ((A_UINT32)0x80000)
+#define FIX_RATE_MCS_0_40 ((A_UINT32)0x100000)
+#define FIX_RATE_MCS_1_40 ((A_UINT32)0x200000)
+#define FIX_RATE_MCS_2_40 ((A_UINT32)0x400000)
+#define FIX_RATE_MCS_3_40 ((A_UINT32)0x800000)
+#define FIX_RATE_MCS_4_40 ((A_UINT32)0x1000000)
+#define FIX_RATE_MCS_5_40 ((A_UINT32)0x2000000)
+#define FIX_RATE_MCS_6_40 ((A_UINT32)0x4000000)
+#define FIX_RATE_MCS_7_40 ((A_UINT32)0x8000000)
+
+typedef PREPACK struct {
+ A_UINT32 fixRateMask; /* see WMI_BIT_RATE */
+} POSTPACK WMI_FIX_RATES_CMD, WMI_FIX_RATES_REPLY;
+
+typedef PREPACK struct {
+ A_UINT8 bEnableMask;
+ A_UINT8 frameType; /*type and subtype*/
+ A_UINT32 frameRateMask; /* see WMI_BIT_RATE */
+} POSTPACK WMI_FRAME_RATES_CMD, WMI_FRAME_RATES_REPLY;
+
+/*
+ * WMI_SET_RECONNECT_AUTH_MODE_CMDID
+ *
+ * Set authentication mode
+ */
+typedef enum {
+ RECONN_DO_AUTH = 0x00,
+ RECONN_NOT_AUTH = 0x01
+} WMI_AUTH_MODE;
+
+typedef PREPACK struct {
+ A_UINT8 mode;
+} POSTPACK WMI_SET_AUTH_MODE_CMD;
+
+/*
+ * WMI_SET_REASSOC_MODE_CMDID
+ *
+ * Set authentication mode
+ */
+typedef enum {
+ REASSOC_DO_DISASSOC = 0x00,
+ REASSOC_DONOT_DISASSOC = 0x01
+} WMI_REASSOC_MODE;
+
+typedef PREPACK struct {
+ A_UINT8 mode;
+}POSTPACK WMI_SET_REASSOC_MODE_CMD;
+
+typedef enum {
+ ROAM_DATA_TIME = 1, /* Get The Roam Time Data */
+} ROAM_DATA_TYPE;
+
+typedef PREPACK struct {
+ A_UINT32 disassoc_time;
+ A_UINT32 no_txrx_time;
+ A_UINT32 assoc_time;
+ A_UINT32 allow_txrx_time;
+ A_UINT8 disassoc_bssid[ATH_MAC_LEN];
+ A_INT8 disassoc_bss_rssi;
+ A_UINT8 assoc_bssid[ATH_MAC_LEN];
+ A_INT8 assoc_bss_rssi;
+} POSTPACK WMI_TARGET_ROAM_TIME;
+
+typedef PREPACK struct {
+ PREPACK union {
+ WMI_TARGET_ROAM_TIME roamTime;
+ } POSTPACK u;
+ A_UINT8 roamDataType ;
+} POSTPACK WMI_TARGET_ROAM_DATA;
+
+typedef enum {
+ WMI_WMM_DISABLED = 0,
+ WMI_WMM_ENABLED
+} WMI_WMM_STATUS;
+
+typedef PREPACK struct {
+ A_UINT8 status;
+}POSTPACK WMI_SET_WMM_CMD;
+
+typedef PREPACK struct {
+ A_UINT8 status;
+}POSTPACK WMI_SET_QOS_SUPP_CMD;
+
+typedef enum {
+ WMI_TXOP_DISABLED = 0,
+ WMI_TXOP_ENABLED
+} WMI_TXOP_CFG;
+
+typedef PREPACK struct {
+ A_UINT8 txopEnable;
+}POSTPACK WMI_SET_WMM_TXOP_CMD;
+
+typedef PREPACK struct {
+ A_UINT8 keepaliveInterval;
+} POSTPACK WMI_SET_KEEPALIVE_CMD;
+
+typedef PREPACK struct {
+ A_BOOL configured;
+ A_UINT8 keepaliveInterval;
+} POSTPACK WMI_GET_KEEPALIVE_CMD;
+
+/*
+ * Add Application specified IE to a management frame
+ */
+#define WMI_MAX_IE_LEN 255
+
+typedef PREPACK struct {
+ A_UINT8 mgmtFrmType; /* one of WMI_MGMT_FRAME_TYPE */
+ A_UINT8 ieLen; /* Length of the IE that should be added to the MGMT frame */
+ A_UINT8 ieInfo[1];
+} POSTPACK WMI_SET_APPIE_CMD;
+
+/*
+ * Notify the WSC registration status to the target
+ */
+#define WSC_REG_ACTIVE 1
+#define WSC_REG_INACTIVE 0
+/* Generic Hal Interface for setting hal paramters. */
+/* Add new Set HAL Param cmdIds here for newer params */
+typedef enum {
+ WHAL_SETCABTO_CMDID = 1,
+}WHAL_CMDID;
+
+typedef PREPACK struct {
+ A_UINT8 cabTimeOut;
+} POSTPACK WHAL_SETCABTO_PARAM;
+
+typedef PREPACK struct {
+ A_UINT8 whalCmdId;
+ A_UINT8 data[1];
+} POSTPACK WHAL_PARAMCMD;
+
+
+#define WOW_MAX_FILTER_LISTS 1 /*4*/
+#define WOW_MAX_FILTERS_PER_LIST 4
+#define WOW_PATTERN_SIZE 64
+#define WOW_MASK_SIZE 64
+
+#define MAC_MAX_FILTERS_PER_LIST 4
+
+typedef PREPACK struct {
+ A_UINT8 wow_valid_filter;
+ A_UINT8 wow_filter_id;
+ A_UINT8 wow_filter_size;
+ A_UINT8 wow_filter_offset;
+ A_UINT8 wow_filter_mask[WOW_MASK_SIZE];
+ A_UINT8 wow_filter_pattern[WOW_PATTERN_SIZE];
+} POSTPACK WOW_FILTER;
+
+
+typedef PREPACK struct {
+ A_UINT8 wow_valid_list;
+ A_UINT8 wow_list_id;
+ A_UINT8 wow_num_filters;
+ A_UINT8 wow_total_list_size;
+ WOW_FILTER list[WOW_MAX_FILTERS_PER_LIST];
+} POSTPACK WOW_FILTER_LIST;
+
+typedef PREPACK struct {
+ A_UINT8 valid_filter;
+ A_UINT8 mac_addr[ATH_MAC_LEN];
+} POSTPACK MAC_FILTER;
+
+
+typedef PREPACK struct {
+ A_UINT8 total_list_size;
+ A_UINT8 enable;
+ MAC_FILTER list[MAC_MAX_FILTERS_PER_LIST];
+} POSTPACK MAC_FILTER_LIST;
+
+#define MAX_IP_ADDRS 2
+typedef PREPACK struct {
+ A_UINT32 ips[MAX_IP_ADDRS]; /* IP in Network Byte Order */
+} POSTPACK WMI_SET_IP_CMD;
+
+typedef PREPACK struct {
+ A_BOOL awake;
+ A_BOOL asleep;
+} POSTPACK WMI_SET_HOST_SLEEP_MODE_CMD;
+
+typedef enum {
+ WOW_FILTER_SSID = 0x1
+} WMI_WOW_FILTER;
+
+typedef PREPACK struct {
+ A_BOOL enable_wow;
+ WMI_WOW_FILTER filter;
+ A_UINT16 hostReqDelay;
+} POSTPACK WMI_SET_WOW_MODE_CMD;
+
+typedef PREPACK struct {
+ A_UINT8 filter_list_id;
+} POSTPACK WMI_GET_WOW_LIST_CMD;
+
+/*
+ * WMI_GET_WOW_LIST_CMD reply
+ */
+typedef PREPACK struct {
+ A_UINT8 num_filters; /* number of patterns in reply */
+ A_UINT8 this_filter_num; /* this is filter # x of total num_filters */
+ A_UINT8 wow_mode;
+ A_UINT8 host_mode;
+ WOW_FILTER wow_filters[1];
+} POSTPACK WMI_GET_WOW_LIST_REPLY;
+
+typedef PREPACK struct {
+ A_UINT8 filter_list_id;
+ A_UINT8 filter_size;
+ A_UINT8 filter_offset;
+ A_UINT8 filter[1];
+} POSTPACK WMI_ADD_WOW_PATTERN_CMD;
+
+typedef PREPACK struct {
+ A_UINT16 filter_list_id;
+ A_UINT16 filter_id;
+} POSTPACK WMI_DEL_WOW_PATTERN_CMD;
+
+typedef PREPACK struct {
+ A_UINT8 macaddr[ATH_MAC_LEN];
+} POSTPACK WMI_SET_MAC_ADDRESS_CMD;
+
+/*
+ * WMI_SET_AKMP_PARAMS_CMD
+ */
+
+#define WMI_AKMP_MULTI_PMKID_EN 0x000001
+
+typedef PREPACK struct {
+ A_UINT32 akmpInfo;
+} POSTPACK WMI_SET_AKMP_PARAMS_CMD;
+
+typedef PREPACK struct {
+ A_UINT8 pmkid[WMI_PMKID_LEN];
+} POSTPACK WMI_PMKID;
+
+/*
+ * WMI_SET_PMKID_LIST_CMD
+ */
+#define WMI_MAX_PMKID_CACHE 8
+
+typedef PREPACK struct {
+ A_UINT32 numPMKID;
+ WMI_PMKID pmkidList[WMI_MAX_PMKID_CACHE];
+} POSTPACK WMI_SET_PMKID_LIST_CMD;
+
+/*
+ * WMI_GET_PMKID_LIST_CMD Reply
+ * Following the Number of PMKIDs is the list of PMKIDs
+ */
+typedef PREPACK struct {
+ A_UINT32 numPMKID;
+ A_UINT8 bssidList[ATH_MAC_LEN][1];
+ WMI_PMKID pmkidList[1];
+} POSTPACK WMI_PMKID_LIST_REPLY;
+
+typedef PREPACK struct {
+ A_UINT16 oldChannel;
+ A_UINT32 newChannel;
+} POSTPACK WMI_CHANNEL_CHANGE_EVENT;
+
+typedef PREPACK struct {
+ A_UINT32 version;
+} POSTPACK WMI_WLAN_VERSION_EVENT;
+
+
+/* WMI_ADDBA_REQ_EVENTID */
+typedef PREPACK struct {
+ A_UINT8 tid;
+ A_UINT8 win_sz;
+ A_UINT16 st_seq_no;
+ A_UINT8 status; /* f/w response for ADDBA Req; OK(0) or failure(!=0) */
+} POSTPACK WMI_ADDBA_REQ_EVENT;
+
+/* WMI_ADDBA_RESP_EVENTID */
+typedef PREPACK struct {
+ A_UINT8 tid;
+ A_UINT8 status; /* OK(0), failure (!=0) */
+ A_UINT16 amsdu_sz; /* Three values: Not supported(0), 3839, 8k */
+} POSTPACK WMI_ADDBA_RESP_EVENT;
+
+/* WMI_DELBA_EVENTID
+ * f/w received a DELBA for peer and processed it.
+ * Host is notified of this
+ */
+typedef PREPACK struct {
+ A_UINT8 tid;
+ A_UINT8 is_peer_initiator;
+ A_UINT16 reason_code;
+} POSTPACK WMI_DELBA_EVENT;
+
+
+#ifdef WAPI_ENABLE
+#define WAPI_REKEY_UCAST 1
+#define WAPI_REKEY_MCAST 2
+typedef PREPACK struct {
+ A_UINT8 type;
+ A_UINT8 macAddr[ATH_MAC_LEN];
+} POSTPACK WMI_WAPIREKEY_EVENT;
+#endif
+
+
+/* WMI_ALLOW_AGGR_CMDID
+ * Configures tid's to allow ADDBA negotiations
+ * on each tid, in each direction
+ */
+typedef PREPACK struct {
+ A_UINT16 tx_allow_aggr; /* 16-bit mask to allow uplink ADDBA negotiation - bit position indicates tid*/
+ A_UINT16 rx_allow_aggr; /* 16-bit mask to allow donwlink ADDBA negotiation - bit position indicates tid*/
+} POSTPACK WMI_ALLOW_AGGR_CMD;
+
+/* WMI_ADDBA_REQ_CMDID
+ * f/w starts performing ADDBA negotiations with peer
+ * on the given tid
+ */
+typedef PREPACK struct {
+ A_UINT8 tid;
+} POSTPACK WMI_ADDBA_REQ_CMD;
+
+/* WMI_DELBA_REQ_CMDID
+ * f/w would teardown BA with peer.
+ * is_send_initiator indicates if it's or tx or rx side
+ */
+typedef PREPACK struct {
+ A_UINT8 tid;
+ A_UINT8 is_sender_initiator;
+
+} POSTPACK WMI_DELBA_REQ_CMD;
+
+#define PEER_NODE_JOIN_EVENT 0x00
+#define PEER_NODE_LEAVE_EVENT 0x01
+#define PEER_FIRST_NODE_JOIN_EVENT 0x10
+#define PEER_LAST_NODE_LEAVE_EVENT 0x11
+typedef PREPACK struct {
+ A_UINT8 eventCode;
+ A_UINT8 peerMacAddr[ATH_MAC_LEN];
+} POSTPACK WMI_PEER_NODE_EVENT;
+
+#define IEEE80211_FRAME_TYPE_MGT 0x00
+#define IEEE80211_FRAME_TYPE_CTL 0x04
+
+/*
+ * Transmit complete event data structure(s)
+ */
+
+
+typedef PREPACK struct {
+#define TX_COMPLETE_STATUS_SUCCESS 0
+#define TX_COMPLETE_STATUS_RETRIES 1
+#define TX_COMPLETE_STATUS_NOLINK 2
+#define TX_COMPLETE_STATUS_TIMEOUT 3
+#define TX_COMPLETE_STATUS_OTHER 4
+
+ A_UINT8 status; /* one of TX_COMPLETE_STATUS_... */
+ A_UINT8 pktID; /* packet ID to identify parent packet */
+ A_UINT8 rateIdx; /* rate index on successful transmission */
+ A_UINT8 ackFailures; /* number of ACK failures in tx attempt */
+#if 0 /* optional params currently ommitted. */
+ A_UINT32 queueDelay; // usec delay measured Tx Start time - host delivery time
+ A_UINT32 mediaDelay; // usec delay measured ACK rx time - host delivery time
+#endif
+} POSTPACK TX_COMPLETE_MSG_V1; /* version 1 of tx complete msg */
+
+typedef PREPACK struct {
+ A_UINT8 numMessages; /* number of tx comp msgs following this struct */
+ A_UINT8 msgLen; /* length in bytes for each individual msg following this struct */
+ A_UINT8 msgType; /* version of tx complete msg data following this struct */
+ A_UINT8 reserved; /* individual messages follow this header */
+} POSTPACK WMI_TX_COMPLETE_EVENT;
+
+#define WMI_TXCOMPLETE_VERSION_1 (0x01)
+
+
+/*
+ * ------- AP Mode definitions --------------
+ */
+
+/*
+ * !!! Warning !!!
+ * -Changing the following values needs compilation of both driver and firmware
+ */
+#ifdef AR6002_REV2
+#define AP_MAX_NUM_STA 4
+#else
+#define AP_MAX_NUM_STA 8
+#endif
+#define AP_ACL_SIZE 10
+#define IEEE80211_MAX_IE 256
+#define MCAST_AID 0xFF /* Spl. AID used to set DTIM flag in the beacons */
+#define DEF_AP_COUNTRY_CODE "US "
+#define DEF_AP_WMODE_G WMI_11G_MODE
+#define DEF_AP_WMODE_AG WMI_11AG_MODE
+#define DEF_AP_DTIM 5
+#define DEF_BEACON_INTERVAL 100
+
+/* AP mode disconnect reasons */
+#define AP_DISCONNECT_STA_LEFT 101
+#define AP_DISCONNECT_FROM_HOST 102
+#define AP_DISCONNECT_COMM_TIMEOUT 103
+
+/*
+ * Used with WMI_AP_HIDDEN_SSID_CMDID
+ */
+#define HIDDEN_SSID_FALSE 0
+#define HIDDEN_SSID_TRUE 1
+typedef PREPACK struct {
+ A_UINT8 hidden_ssid;
+} POSTPACK WMI_AP_HIDDEN_SSID_CMD;
+
+/*
+ * Used with WMI_AP_ACL_POLICY_CMDID
+ */
+#define AP_ACL_DISABLE 0x00
+#define AP_ACL_ALLOW_MAC 0x01
+#define AP_ACL_DENY_MAC 0x02
+#define AP_ACL_RETAIN_LIST_MASK 0x80
+typedef PREPACK struct {
+ A_UINT8 policy;
+} POSTPACK WMI_AP_ACL_POLICY_CMD;
+
+/*
+ * Used with WMI_AP_ACL_MAC_LIST_CMDID
+ */
+#define ADD_MAC_ADDR 1
+#define DEL_MAC_ADDR 2
+typedef PREPACK struct {
+ A_UINT8 action;
+ A_UINT8 index;
+ A_UINT8 mac[ATH_MAC_LEN];
+ A_UINT8 wildcard;
+} POSTPACK WMI_AP_ACL_MAC_CMD;
+
+typedef PREPACK struct {
+ A_UINT16 index;
+ A_UINT8 acl_mac[AP_ACL_SIZE][ATH_MAC_LEN];
+ A_UINT8 wildcard[AP_ACL_SIZE];
+ A_UINT8 policy;
+} POSTPACK WMI_AP_ACL;
+
+/*
+ * Used with WMI_AP_SET_NUM_STA_CMDID
+ */
+typedef PREPACK struct {
+ A_UINT8 num_sta;
+} POSTPACK WMI_AP_SET_NUM_STA_CMD;
+
+/*
+ * Used with WMI_AP_SET_MLME_CMDID
+ */
+typedef PREPACK struct {
+ A_UINT8 mac[ATH_MAC_LEN];
+ A_UINT16 reason; /* 802.11 reason code */
+ A_UINT8 cmd; /* operation to perform */
+#define WMI_AP_MLME_ASSOC 1 /* associate station */
+#define WMI_AP_DISASSOC 2 /* disassociate station */
+#define WMI_AP_DEAUTH 3 /* deauthenticate station */
+#define WMI_AP_MLME_AUTHORIZE 4 /* authorize station */
+#define WMI_AP_MLME_UNAUTHORIZE 5 /* unauthorize station */
+} POSTPACK WMI_AP_SET_MLME_CMD;
+
+typedef PREPACK struct {
+ A_UINT32 period;
+} POSTPACK WMI_AP_CONN_INACT_CMD;
+
+typedef PREPACK struct {
+ A_UINT32 period_min;
+ A_UINT32 dwell_ms;
+} POSTPACK WMI_AP_PROT_SCAN_TIME_CMD;
+
+typedef PREPACK struct {
+ A_BOOL flag;
+ A_UINT16 aid;
+} POSTPACK WMI_AP_SET_PVB_CMD;
+
+#define WMI_DISABLE_REGULATORY_CODE "FF"
+
+typedef PREPACK struct {
+ A_UCHAR countryCode[3];
+} POSTPACK WMI_AP_SET_COUNTRY_CMD;
+
+typedef PREPACK struct {
+ A_UINT8 dtim;
+} POSTPACK WMI_AP_SET_DTIM_CMD;
+
+typedef PREPACK struct {
+ A_UINT8 band; /* specifies which band to apply these values */
+ A_UINT8 enable; /* allows 11n to be disabled on a per band basis */
+ A_UINT8 chan_width_40M_supported;
+ A_UINT8 short_GI_20MHz;
+ A_UINT8 short_GI_40MHz;
+ A_UINT8 intolerance_40MHz;
+ A_UINT8 max_ampdu_len_exp;
+} POSTPACK WMI_SET_HT_CAP_CMD;
+
+typedef PREPACK struct {
+ A_UINT8 sta_chan_width;
+} POSTPACK WMI_SET_HT_OP_CMD;
+
+typedef PREPACK struct {
+ A_UINT32 rateMasks[8];
+} POSTPACK WMI_SET_TX_SELECT_RATES_CMD;
+
+typedef PREPACK struct {
+ A_UINT32 sgiMask;
+ A_UINT8 sgiPERThreshold;
+} POSTPACK WMI_SET_TX_SGI_PARAM_CMD;
+
+#define DEFAULT_SGI_MASK 0x08080000
+#define DEFAULT_SGI_PER 10
+
+typedef PREPACK struct {
+ A_UINT32 rateField; /* 1 bit per rate corresponding to index */
+ A_UINT8 id;
+ A_UINT8 shortTrys;
+ A_UINT8 longTrys;
+ A_UINT8 reserved; /* padding */
+} POSTPACK WMI_SET_RATE_POLICY_CMD;
+
+typedef PREPACK struct {
+ A_UINT8 metaVersion; /* version of meta data for rx packets <0 = default> (0-7 = valid) */
+ A_UINT8 dot11Hdr; /* 1 == leave .11 header intact , 0 == replace .11 header with .3 <default> */
+ A_UINT8 defragOnHost; /* 1 == defragmentation is performed by host, 0 == performed by target <default> */
+ A_UINT8 reserved[1]; /* alignment */
+} POSTPACK WMI_RX_FRAME_FORMAT_CMD;
+
+
+typedef PREPACK struct {
+ A_UINT8 enable; /* 1 == device operates in thin mode , 0 == normal mode <default> */
+ A_UINT8 reserved[3];
+} POSTPACK WMI_SET_THIN_MODE_CMD;
+
+/* AP mode events */
+/* WMI_PS_POLL_EVENT */
+typedef PREPACK struct {
+ A_UINT16 aid;
+} POSTPACK WMI_PSPOLL_EVENT;
+
+typedef PREPACK struct {
+ A_UINT32 tx_bytes;
+ A_UINT32 tx_pkts;
+ A_UINT32 tx_error;
+ A_UINT32 tx_discard;
+ A_UINT32 rx_bytes;
+ A_UINT32 rx_pkts;
+ A_UINT32 rx_error;
+ A_UINT32 rx_discard;
+ A_UINT32 aid;
+} POSTPACK WMI_PER_STA_STAT;
+
+#define AP_GET_STATS 0
+#define AP_CLEAR_STATS 1
+
+typedef PREPACK struct {
+ A_UINT32 action;
+ WMI_PER_STA_STAT sta[AP_MAX_NUM_STA+1];
+} POSTPACK WMI_AP_MODE_STAT;
+#define WMI_AP_MODE_STAT_SIZE(numSta) (sizeof(A_UINT32) + ((numSta + 1) * sizeof(WMI_PER_STA_STAT)))
+
+#define AP_11BG_RATESET1 1
+#define AP_11BG_RATESET2 2
+#define DEF_AP_11BG_RATESET AP_11BG_RATESET1
+typedef PREPACK struct {
+ A_UINT8 rateset;
+} POSTPACK WMI_AP_SET_11BG_RATESET_CMD;
+/*
+ * End of AP mode definitions
+ */
+
+#ifndef ATH_TARGET
+#include "athendpack.h"
+#endif
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _WMI_H_ */
diff --git a/drivers/staging/ath6kl/include/common/wmi_thin.h b/drivers/staging/ath6kl/include/common/wmi_thin.h
new file mode 100644
index 000000000000..35391edd20ac
--- /dev/null
+++ b/drivers/staging/ath6kl/include/common/wmi_thin.h
@@ -0,0 +1,347 @@
+//------------------------------------------------------------------------------
+// <copyright file="wmi_thin.h" company="Atheros">
+// Copyright (c) 2004-2010 Atheros Corporation. All rights reserved.
+//
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+//
+//
+//------------------------------------------------------------------------------
+//==============================================================================
+// Author(s): ="Atheros"
+//==============================================================================
+
+/*
+ * This file contains the definitions of the WMI protocol specified in the
+ * Wireless Module Interface (WMI). It includes definitions of all the
+ * commands and events. Commands are messages from the host to the WM.
+ * Events and Replies are messages from the WM to the host.
+ *
+ * Ownership of correctness in regards to WMI commands
+ * belongs to the host driver and the WM is not required to validate
+ * parameters for value, proper range, or any other checking.
+ *
+ */
+
+#ifndef _WMI_THIN_H_
+#define _WMI_THIN_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+
+typedef enum {
+ WMI_THIN_CONFIG_CMDID = 0x8000, // WMI_THIN_RESERVED_START
+ WMI_THIN_SET_MIB_CMDID,
+ WMI_THIN_GET_MIB_CMDID,
+ WMI_THIN_JOIN_CMDID,
+ /* add new CMDID's here */
+ WMI_THIN_RESERVED_END_CMDID = 0x8fff // WMI_THIN_RESERVED_END
+} WMI_THIN_COMMAND_ID;
+
+typedef enum{
+ TEMPLATE_FRM_FIRST = 0,
+ TEMPLATE_FRM_PROBE_REQ =TEMPLATE_FRM_FIRST,
+ TEMPLATE_FRM_BEACON,
+ TEMPLATE_FRM_PROBE_RESP,
+ TEMPLATE_FRM_NULL,
+ TEMPLATE_FRM_QOS_NULL,
+ TEMPLATE_FRM_PSPOLL,
+ TEMPLATE_FRM_MAX
+}WMI_TEMPLATE_FRM_TYPE;
+
+/* TEMPLATE_FRM_LEN... represent the maximum allowable
+ * data lengths (bytes) for each frame type */
+#define TEMPLATE_FRM_LEN_PROBE_REQ (256) /* Symbian dictates a minimum of 256 for these 3 frame types */
+#define TEMPLATE_FRM_LEN_BEACON (256)
+#define TEMPLATE_FRM_LEN_PROBE_RESP (256)
+#define TEMPLATE_FRM_LEN_NULL (32)
+#define TEMPLATE_FRM_LEN_QOS_NULL (32)
+#define TEMPLATE_FRM_LEN_PSPOLL (32)
+#define TEMPLATE_FRM_LEN_SUM (TEMPLATE_FRM_LEN_PROBE_REQ + TEMPLATE_FRM_LEN_BEACON + TEMPLATE_FRM_LEN_PROBE_RESP + \
+ TEMPLATE_FRM_LEN_NULL + TEMPLATE_FRM_LEN_QOS_NULL + TEMPLATE_FRM_LEN_PSPOLL)
+
+
+/* MAC Header Build Rules */
+/* These values allow the host to configure the
+ * target code that is responsible for constructing
+ * the MAC header. In cases where the MAC header
+ * is provided by the host framework, the target
+ * has a diminished responsibility over what fields
+ * it must write. This will vary from framework to framework.
+ * Symbian requires different behavior from MAC80211 which
+ * requires different behavior from MS Native Wifi. */
+#define WMI_WRT_VER_TYPE 0x00000001
+#define WMI_WRT_DURATION 0x00000002
+#define WMI_WRT_DIRECTION 0x00000004
+#define WMI_WRT_POWER 0x00000008
+#define WMI_WRT_WEP 0x00000010
+#define WMI_WRT_MORE 0x00000020
+#define WMI_WRT_BSSID 0x00000040
+#define WMI_WRT_QOS 0x00000080
+#define WMI_WRT_SEQNO 0x00000100
+#define WMI_GUARD_TX 0x00000200 /* prevents TX ops that are not allowed for a current state */
+#define WMI_WRT_DEFAULT_CONFIG (WMI_WRT_VER_TYPE | WMI_WRT_DURATION | WMI_WRT_DIRECTION | \
+ WMI_WRT_POWER | WMI_WRT_MORE | WMI_WRT_WEP | WMI_WRT_BSSID | \
+ WMI_WRT_QOS | WMI_WRT_SEQNO | WMI_GUARD_TX)
+
+/* WMI_THIN_CONFIG_TXCOMPLETE -- Used to configure the params and content for
+ * TX Complete messages the will come from the Target. these messages are
+ * disabled by default but can be enabled using this structure and the
+ * WMI_THIN_CONFIG_CMDID. */
+typedef PREPACK struct {
+ A_UINT8 version; /* the versioned type of messages to use or 0 to disable */
+ A_UINT8 countThreshold; /* msg count threshold triggering a tx complete message */
+ A_UINT16 timeThreshold; /* timeout interval in MSEC triggering a tx complete message */
+} POSTPACK WMI_THIN_CONFIG_TXCOMPLETE;
+
+/* WMI_THIN_CONFIG_DECRYPT_ERR -- Used to configure behavior for received frames
+ * that have decryption errors. The default behavior is to discard the frame
+ * without notification. Alternately, the MAC Header is forwarded to the host
+ * with the failed status. */
+typedef PREPACK struct {
+ A_UINT8 enable; /* 1 == send decrypt errors to the host, 0 == don't */
+ A_UINT8 reserved[3]; /* align padding */
+} POSTPACK WMI_THIN_CONFIG_DECRYPT_ERR;
+
+/* WMI_THIN_CONFIG_TX_MAC_RULES -- Used to configure behavior for transmitted
+ * frames that require partial MAC header construction. These rules
+ * are used by the target to indicate which fields need to be written. */
+typedef PREPACK struct {
+ A_UINT32 rules; /* combination of WMI_WRT_... values */
+} POSTPACK WMI_THIN_CONFIG_TX_MAC_RULES;
+
+/* WMI_THIN_CONFIG_RX_FILTER_RULES -- Used to configure behavior for received
+ * frames as to which frames should get forwarded to the host and which
+ * should get processed internally. */
+typedef PREPACK struct {
+ A_UINT32 rules; /* combination of WMI_FILT_... values */
+} POSTPACK WMI_THIN_CONFIG_RX_FILTER_RULES;
+
+/* WMI_THIN_CONFIG_CMD -- Used to contain some combination of the above
+ * WMI_THIN_CONFIG_... structures. The actual combination is indicated
+ * by the value of cfgField. Each bit in this field corresponds to
+ * one of the above structures. */
+typedef PREPACK struct {
+#define WMI_THIN_CFG_TXCOMP 0x00000001
+#define WMI_THIN_CFG_DECRYPT 0x00000002
+#define WMI_THIN_CFG_MAC_RULES 0x00000004
+#define WMI_THIN_CFG_FILTER_RULES 0x00000008
+ A_UINT32 cfgField; /* combination of WMI_THIN_CFG_... describes contents of config command */
+ A_UINT16 length; /* length in bytes of appended sub-commands */
+ A_UINT8 reserved[2]; /* align padding */
+} POSTPACK WMI_THIN_CONFIG_CMD;
+
+/* MIB Access Identifiers tailored for Symbian. */
+enum {
+ MIB_ID_STA_MAC = 1, // [READONLY]
+ MIB_ID_RX_LIFE_TIME, // [NOT IMPLEMENTED]
+ MIB_ID_SLOT_TIME, // [READ/WRITE]
+ MIB_ID_RTS_THRESHOLD, // [READ/WRITE]
+ MIB_ID_CTS_TO_SELF, // [READ/WRITE]
+ MIB_ID_TEMPLATE_FRAME, // [WRITE ONLY]
+ MIB_ID_RXFRAME_FILTER, // [READ/WRITE]
+ MIB_ID_BEACON_FILTER_TABLE, // [WRITE ONLY]
+ MIB_ID_BEACON_FILTER, // [READ/WRITE]
+ MIB_ID_BEACON_LOST_COUNT, // [WRITE ONLY]
+ MIB_ID_RSSI_THRESHOLD, // [WRITE ONLY]
+ MIB_ID_HT_CAP, // [NOT IMPLEMENTED]
+ MIB_ID_HT_OP, // [NOT IMPLEMENTED]
+ MIB_ID_HT_2ND_BEACON, // [NOT IMPLEMENTED]
+ MIB_ID_HT_BLOCK_ACK, // [NOT IMPLEMENTED]
+ MIB_ID_PREAMBLE, // [READ/WRITE]
+ /*MIB_ID_GROUP_ADDR_TABLE,*/
+ /*MIB_ID_WEP_DEFAULT_KEY_ID */
+ /*MIB_ID_TX_POWER */
+ /*MIB_ID_ARP_IP_TABLE */
+ /*MIB_ID_SLEEP_MODE */
+ /*MIB_ID_WAKE_INTERVAL*/
+ /*MIB_ID_STAT_TABLE*/
+ /*MIB_ID_IBSS_PWR_SAVE*/
+ /*MIB_ID_COUNTERS_TABLE*/
+ /*MIB_ID_ETHERTYPE_FILTER*/
+ /*MIB_ID_BC_UDP_FILTER*/
+
+};
+
+typedef PREPACK struct {
+ A_UINT8 addr[ATH_MAC_LEN];
+} POSTPACK WMI_THIN_MIB_STA_MAC;
+
+typedef PREPACK struct {
+ A_UINT32 time; // units == msec
+} POSTPACK WMI_THIN_MIB_RX_LIFE_TIME;
+
+typedef PREPACK struct {
+ A_UINT8 enable; //1 = on, 0 = off
+} POSTPACK WMI_THIN_MIB_CTS_TO_SELF;
+
+typedef PREPACK struct {
+ A_UINT32 time; // units == usec
+} POSTPACK WMI_THIN_MIB_SLOT_TIME;
+
+typedef PREPACK struct {
+ A_UINT16 length; //units == bytes
+} POSTPACK WMI_THIN_MIB_RTS_THRESHOLD;
+
+typedef PREPACK struct {
+ A_UINT8 type; // type of frame
+ A_UINT8 rate; // tx rate to be used (one of WMI_BIT_RATE)
+ A_UINT16 length; // num bytes following this structure as the template data
+} POSTPACK WMI_THIN_MIB_TEMPLATE_FRAME;
+
+typedef PREPACK struct {
+#define FRAME_FILTER_PROMISCUOUS 0x00000001
+#define FRAME_FILTER_BSSID 0x00000002
+ A_UINT32 filterMask;
+} POSTPACK WMI_THIN_MIB_RXFRAME_FILTER;
+
+
+#define IE_FILTER_TREATMENT_CHANGE 1
+#define IE_FILTER_TREATMENT_APPEAR 2
+
+typedef PREPACK struct {
+ A_UINT8 ie;
+ A_UINT8 treatment;
+} POSTPACK WMI_THIN_MIB_BEACON_FILTER_TABLE;
+
+typedef PREPACK struct {
+ A_UINT8 ie;
+ A_UINT8 treatment;
+ A_UINT8 oui[3];
+ A_UINT8 type;
+ A_UINT16 version;
+} POSTPACK WMI_THIN_MIB_BEACON_FILTER_TABLE_OUI;
+
+typedef PREPACK struct {
+ A_UINT16 numElements;
+ A_UINT8 entrySize; // sizeof(WMI_THIN_MIB_BEACON_FILTER_TABLE) on host cpu may be 2 may be 4
+ A_UINT8 reserved;
+} POSTPACK WMI_THIN_MIB_BEACON_FILTER_TABLE_HEADER;
+
+typedef PREPACK struct {
+ A_UINT32 count; /* num beacons between deliveries */
+ A_UINT8 enable;
+ A_UINT8 reserved[3];
+} POSTPACK WMI_THIN_MIB_BEACON_FILTER;
+
+typedef PREPACK struct {
+ A_UINT32 count; /* num consec lost beacons after which send event */
+} POSTPACK WMI_THIN_MIB_BEACON_LOST_COUNT;
+
+typedef PREPACK struct {
+ A_UINT8 rssi; /* the low threshold which can trigger an event warning */
+ A_UINT8 tolerance; /* the range above and below the threshold to prevent event flooding to the host. */
+ A_UINT8 count; /* the sample count of consecutive frames necessary to trigger an event. */
+ A_UINT8 reserved[1]; /* padding */
+} POSTPACK WMI_THIN_MIB_RSSI_THRESHOLD;
+
+
+typedef PREPACK struct {
+ A_UINT32 cap;
+ A_UINT32 rxRateField;
+ A_UINT32 beamForming;
+ A_UINT8 addr[ATH_MAC_LEN];
+ A_UINT8 enable;
+ A_UINT8 stbc;
+ A_UINT8 maxAMPDU;
+ A_UINT8 msduSpacing;
+ A_UINT8 mcsFeedback;
+ A_UINT8 antennaSelCap;
+} POSTPACK WMI_THIN_MIB_HT_CAP;
+
+typedef PREPACK struct {
+ A_UINT32 infoField;
+ A_UINT32 basicRateField;
+ A_UINT8 protection;
+ A_UINT8 secondChanneloffset;
+ A_UINT8 channelWidth;
+ A_UINT8 reserved;
+} POSTPACK WMI_THIN_MIB_HT_OP;
+
+typedef PREPACK struct {
+#define SECOND_BEACON_PRIMARY 1
+#define SECOND_BEACON_EITHER 2
+#define SECOND_BEACON_SECONDARY 3
+ A_UINT8 cfg;
+ A_UINT8 reserved[3]; /* padding */
+} POSTPACK WMI_THIN_MIB_HT_2ND_BEACON;
+
+typedef PREPACK struct {
+ A_UINT8 txTIDField;
+ A_UINT8 rxTIDField;
+ A_UINT8 reserved[2]; /* padding */
+} POSTPACK WMI_THIN_MIB_HT_BLOCK_ACK;
+
+typedef PREPACK struct {
+ A_UINT8 enableLong; // 1 == long preamble, 0 == short preamble
+ A_UINT8 reserved[3];
+} POSTPACK WMI_THIN_MIB_PREAMBLE;
+
+typedef PREPACK struct {
+ A_UINT16 length; /* the length in bytes of the appended MIB data */
+ A_UINT8 mibID; /* the ID of the MIB element being set */
+ A_UINT8 reserved; /* align padding */
+} POSTPACK WMI_THIN_SET_MIB_CMD;
+
+typedef PREPACK struct {
+ A_UINT8 mibID; /* the ID of the MIB element being set */
+ A_UINT8 reserved[3]; /* align padding */
+} POSTPACK WMI_THIN_GET_MIB_CMD;
+
+typedef PREPACK struct {
+ A_UINT32 basicRateMask; /* bit mask of basic rates */
+ A_UINT32 beaconIntval; /* TUs */
+ A_UINT16 atimWindow; /* TUs */
+ A_UINT16 channel; /* frequency in Mhz */
+ A_UINT8 networkType; /* INFRA_NETWORK | ADHOC_NETWORK */
+ A_UINT8 ssidLength; /* 0 - 32 */
+ A_UINT8 probe; /* != 0 : issue probe req at start */
+ A_UINT8 reserved; /* alignment */
+ A_UCHAR ssid[WMI_MAX_SSID_LEN];
+ A_UINT8 bssid[ATH_MAC_LEN];
+} POSTPACK WMI_THIN_JOIN_CMD;
+
+typedef PREPACK struct {
+ A_UINT16 dtim; /* dtim interval in num beacons */
+ A_UINT16 aid; /* 80211 AID from Assoc resp */
+} POSTPACK WMI_THIN_POST_ASSOC_CMD;
+
+typedef enum {
+ WMI_THIN_EVENTID_RESERVED_START = 0x8000,
+ WMI_THIN_GET_MIB_EVENTID,
+ WMI_THIN_JOIN_EVENTID,
+
+ /* Add new THIN EVENTID's here */
+ WMI_THIN_EVENTID_RESERVED_END = 0x8fff
+} WMI_THIN_EVENT_ID;
+
+/* Possible values for WMI_THIN_JOIN_EVENT.result */
+typedef enum {
+ WMI_THIN_JOIN_RES_SUCCESS = 0, // device has joined the network
+ WMI_THIN_JOIN_RES_FAIL, // device failed for unspecified reason
+ WMI_THIN_JOIN_RES_TIMEOUT, // device failed due to no beacon rx in time limit
+ WMI_THIN_JOIN_RES_BAD_PARAM, // device failed due to bad cmd param.
+}WMI_THIN_JOIN_RESULT;
+
+typedef PREPACK struct {
+ A_UINT8 result; /* the result of the join cmd. one of WMI_THIN_JOIN_RESULT */
+ A_UINT8 reserved[3]; /* alignment */
+} POSTPACK WMI_THIN_JOIN_EVENT;
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _WMI_THIN_H_ */
diff --git a/drivers/staging/ath6kl/include/common/wmix.h b/drivers/staging/ath6kl/include/common/wmix.h
new file mode 100644
index 000000000000..87046e364bae
--- /dev/null
+++ b/drivers/staging/ath6kl/include/common/wmix.h
@@ -0,0 +1,279 @@
+//------------------------------------------------------------------------------
+// <copyright file="wmix.h" company="Atheros">
+// Copyright (c) 2004-2010 Atheros Corporation. All rights reserved.
+//
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+//
+//
+//------------------------------------------------------------------------------
+//==============================================================================
+// Author(s): ="Atheros"
+//==============================================================================
+
+/*
+ * This file contains extensions of the WMI protocol specified in the
+ * Wireless Module Interface (WMI). It includes definitions of all
+ * extended commands and events. Extensions include useful commands
+ * that are not directly related to wireless activities. They may
+ * be hardware-specific, and they might not be supported on all
+ * implementations.
+ *
+ * Extended WMIX commands are encapsulated in a WMI message with
+ * cmd=WMI_EXTENSION_CMD.
+ */
+
+#ifndef _WMIX_H_
+#define _WMIX_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#ifndef ATH_TARGET
+#include "athstartpack.h"
+#endif
+
+#include "dbglog.h"
+
+/*
+ * Extended WMI commands are those that are needed during wireless
+ * operation, but which are not really wireless commands. This allows,
+ * for instance, platform-specific commands. Extended WMI commands are
+ * embedded in a WMI command message with WMI_COMMAND_ID=WMI_EXTENSION_CMDID.
+ * Extended WMI events are similarly embedded in a WMI event message with
+ * WMI_EVENT_ID=WMI_EXTENSION_EVENTID.
+ */
+typedef PREPACK struct {
+ A_UINT32 commandId;
+} POSTPACK WMIX_CMD_HDR;
+
+typedef enum {
+ WMIX_DSETOPEN_REPLY_CMDID = 0x2001,
+ WMIX_DSETDATA_REPLY_CMDID,
+ WMIX_GPIO_OUTPUT_SET_CMDID,
+ WMIX_GPIO_INPUT_GET_CMDID,
+ WMIX_GPIO_REGISTER_SET_CMDID,
+ WMIX_GPIO_REGISTER_GET_CMDID,
+ WMIX_GPIO_INTR_ACK_CMDID,
+ WMIX_HB_CHALLENGE_RESP_CMDID,
+ WMIX_DBGLOG_CFG_MODULE_CMDID,
+ WMIX_PROF_CFG_CMDID, /* 0x200a */
+ WMIX_PROF_ADDR_SET_CMDID,
+ WMIX_PROF_START_CMDID,
+ WMIX_PROF_STOP_CMDID,
+ WMIX_PROF_COUNT_GET_CMDID,
+} WMIX_COMMAND_ID;
+
+typedef enum {
+ WMIX_DSETOPENREQ_EVENTID = 0x3001,
+ WMIX_DSETCLOSE_EVENTID,
+ WMIX_DSETDATAREQ_EVENTID,
+ WMIX_GPIO_INTR_EVENTID,
+ WMIX_GPIO_DATA_EVENTID,
+ WMIX_GPIO_ACK_EVENTID,
+ WMIX_HB_CHALLENGE_RESP_EVENTID,
+ WMIX_DBGLOG_EVENTID,
+ WMIX_PROF_COUNT_EVENTID,
+} WMIX_EVENT_ID;
+
+/*
+ * =============DataSet support=================
+ */
+
+/*
+ * WMIX_DSETOPENREQ_EVENTID
+ * DataSet Open Request Event
+ */
+typedef PREPACK struct {
+ A_UINT32 dset_id;
+ A_UINT32 targ_dset_handle; /* echo'ed, not used by Host, */
+ A_UINT32 targ_reply_fn; /* echo'ed, not used by Host, */
+ A_UINT32 targ_reply_arg; /* echo'ed, not used by Host, */
+} POSTPACK WMIX_DSETOPENREQ_EVENT;
+
+/*
+ * WMIX_DSETCLOSE_EVENTID
+ * DataSet Close Event
+ */
+typedef PREPACK struct {
+ A_UINT32 access_cookie;
+} POSTPACK WMIX_DSETCLOSE_EVENT;
+
+/*
+ * WMIX_DSETDATAREQ_EVENTID
+ * DataSet Data Request Event
+ */
+typedef PREPACK struct {
+ A_UINT32 access_cookie;
+ A_UINT32 offset;
+ A_UINT32 length;
+ A_UINT32 targ_buf; /* echo'ed, not used by Host, */
+ A_UINT32 targ_reply_fn; /* echo'ed, not used by Host, */
+ A_UINT32 targ_reply_arg; /* echo'ed, not used by Host, */
+} POSTPACK WMIX_DSETDATAREQ_EVENT;
+
+typedef PREPACK struct {
+ A_UINT32 status;
+ A_UINT32 targ_dset_handle;
+ A_UINT32 targ_reply_fn;
+ A_UINT32 targ_reply_arg;
+ A_UINT32 access_cookie;
+ A_UINT32 size;
+ A_UINT32 version;
+} POSTPACK WMIX_DSETOPEN_REPLY_CMD;
+
+typedef PREPACK struct {
+ A_UINT32 status;
+ A_UINT32 targ_buf;
+ A_UINT32 targ_reply_fn;
+ A_UINT32 targ_reply_arg;
+ A_UINT32 length;
+ A_UINT8 buf[1];
+} POSTPACK WMIX_DSETDATA_REPLY_CMD;
+
+
+/*
+ * =============GPIO support=================
+ * All masks are 18-bit masks with bit N operating on GPIO pin N.
+ */
+
+#include "gpio.h"
+
+/*
+ * Set GPIO pin output state.
+ * In order for output to be driven, a pin must be enabled for output.
+ * This can be done during initialization through the GPIO Configuration
+ * DataSet, or during operation with the enable_mask.
+ *
+ * If a request is made to simultaneously set/clear or set/disable or
+ * clear/disable or disable/enable, results are undefined.
+ */
+typedef PREPACK struct {
+ A_UINT32 set_mask; /* pins to set */
+ A_UINT32 clear_mask; /* pins to clear */
+ A_UINT32 enable_mask; /* pins to enable for output */
+ A_UINT32 disable_mask; /* pins to disable/tristate */
+} POSTPACK WMIX_GPIO_OUTPUT_SET_CMD;
+
+/*
+ * Set a GPIO register. For debug/exceptional cases.
+ * Values for gpioreg_id are GPIO_REGISTER_IDs, defined in a
+ * platform-dependent header.
+ */
+typedef PREPACK struct {
+ A_UINT32 gpioreg_id; /* GPIO register ID */
+ A_UINT32 value; /* value to write */
+} POSTPACK WMIX_GPIO_REGISTER_SET_CMD;
+
+/* Get a GPIO register. For debug/exceptional cases. */
+typedef PREPACK struct {
+ A_UINT32 gpioreg_id; /* GPIO register to read */
+} POSTPACK WMIX_GPIO_REGISTER_GET_CMD;
+
+/*
+ * Host acknowledges and re-arms GPIO interrupts. A single
+ * message should be used to acknowledge all interrupts that
+ * were delivered in an earlier WMIX_GPIO_INTR_EVENT message.
+ */
+typedef PREPACK struct {
+ A_UINT32 ack_mask; /* interrupts to acknowledge */
+} POSTPACK WMIX_GPIO_INTR_ACK_CMD;
+
+/*
+ * Target informs Host of GPIO interrupts that have ocurred since the
+ * last WMIX_GIPO_INTR_ACK_CMD was received. Additional information --
+ * the current GPIO input values is provided -- in order to support
+ * use of a GPIO interrupt as a Data Valid signal for other GPIO pins.
+ */
+typedef PREPACK struct {
+ A_UINT32 intr_mask; /* pending GPIO interrupts */
+ A_UINT32 input_values; /* recent GPIO input values */
+} POSTPACK WMIX_GPIO_INTR_EVENT;
+
+/*
+ * Target responds to Host's earlier WMIX_GPIO_INPUT_GET_CMDID request
+ * using a GPIO_DATA_EVENT with
+ * value set to the mask of GPIO pin inputs and
+ * reg_id set to GPIO_ID_NONE
+ *
+ *
+ * Target responds to Hosts's earlier WMIX_GPIO_REGISTER_GET_CMDID request
+ * using a GPIO_DATA_EVENT with
+ * value set to the value of the requested register and
+ * reg_id identifying the register (reflects the original request)
+ * NB: reg_id supports the future possibility of unsolicited
+ * WMIX_GPIO_DATA_EVENTs (for polling GPIO input), and it may
+ * simplify Host GPIO support.
+ */
+typedef PREPACK struct {
+ A_UINT32 value;
+ A_UINT32 reg_id;
+} POSTPACK WMIX_GPIO_DATA_EVENT;
+
+/*
+ * =============Error Detection support=================
+ */
+
+/*
+ * WMIX_HB_CHALLENGE_RESP_CMDID
+ * Heartbeat Challenge Response command
+ */
+typedef PREPACK struct {
+ A_UINT32 cookie;
+ A_UINT32 source;
+} POSTPACK WMIX_HB_CHALLENGE_RESP_CMD;
+
+/*
+ * WMIX_HB_CHALLENGE_RESP_EVENTID
+ * Heartbeat Challenge Response Event
+ */
+#define WMIX_HB_CHALLENGE_RESP_EVENT WMIX_HB_CHALLENGE_RESP_CMD
+
+typedef PREPACK struct {
+ struct dbglog_config_s config;
+} POSTPACK WMIX_DBGLOG_CFG_MODULE_CMD;
+
+/*
+ * =============Target Profiling support=================
+ */
+
+typedef PREPACK struct {
+ A_UINT32 period; /* Time (in 30.5us ticks) between samples */
+ A_UINT32 nbins;
+} POSTPACK WMIX_PROF_CFG_CMD;
+
+typedef PREPACK struct {
+ A_UINT32 addr;
+} POSTPACK WMIX_PROF_ADDR_SET_CMD;
+
+/*
+ * Target responds to Hosts's earlier WMIX_PROF_COUNT_GET_CMDID request
+ * using a WMIX_PROF_COUNT_EVENT with
+ * addr set to the next address
+ * count set to the corresponding count
+ */
+typedef PREPACK struct {
+ A_UINT32 addr;
+ A_UINT32 count;
+} POSTPACK WMIX_PROF_COUNT_EVENT;
+
+#ifndef ATH_TARGET
+#include "athendpack.h"
+#endif
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _WMIX_H_ */
diff --git a/drivers/staging/ath6kl/include/common_drv.h b/drivers/staging/ath6kl/include/common_drv.h
new file mode 100644
index 000000000000..8ebb93d5f3c2
--- /dev/null
+++ b/drivers/staging/ath6kl/include/common_drv.h
@@ -0,0 +1,108 @@
+//------------------------------------------------------------------------------
+// Copyright (c) 2010 Atheros Corporation. All rights reserved.
+//
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+//
+//
+//------------------------------------------------------------------------------
+//==============================================================================
+// Author(s): ="Atheros"
+//==============================================================================
+#ifndef COMMON_DRV_H_
+#define COMMON_DRV_H_
+
+#include "hif.h"
+#include "htc_packet.h"
+#include "htc_api.h"
+
+/* structure that is the state information for the default credit distribution callback
+ * drivers should instantiate (zero-init as well) this structure in their driver instance
+ * and pass it as a context to the HTC credit distribution functions */
+typedef struct _COMMON_CREDIT_STATE_INFO {
+ int TotalAvailableCredits; /* total credits in the system at startup */
+ int CurrentFreeCredits; /* credits available in the pool that have not been
+ given out to endpoints */
+ HTC_ENDPOINT_CREDIT_DIST *pLowestPriEpDist; /* pointer to the lowest priority endpoint dist struct */
+} COMMON_CREDIT_STATE_INFO;
+
+typedef struct {
+ A_INT32 (*setupTransport)(void *ar);
+ void (*cleanupTransport)(void *ar);
+} HCI_TRANSPORT_CALLBACKS;
+
+typedef struct {
+ void *netDevice;
+ void *hifDevice;
+ void *htcHandle;
+} HCI_TRANSPORT_MISC_HANDLES;
+
+/* HTC TX packet tagging definitions */
+#define AR6K_CONTROL_PKT_TAG HTC_TX_PACKET_TAG_USER_DEFINED
+#define AR6K_DATA_PKT_TAG (AR6K_CONTROL_PKT_TAG + 1)
+
+#define AR6002_VERSION_REV1 0x20000086
+#define AR6002_VERSION_REV2 0x20000188
+#define AR6003_VERSION_REV1 0x300002ba
+#define AR6003_VERSION_REV2 0x30000384
+
+#define AR6002_CUST_DATA_SIZE 112
+#define AR6003_CUST_DATA_SIZE 16
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/* OS-independent APIs */
+A_STATUS ar6000_setup_credit_dist(HTC_HANDLE HTCHandle, COMMON_CREDIT_STATE_INFO *pCredInfo);
+
+A_STATUS ar6000_ReadRegDiag(HIF_DEVICE *hifDevice, A_UINT32 *address, A_UINT32 *data);
+
+A_STATUS ar6000_WriteRegDiag(HIF_DEVICE *hifDevice, A_UINT32 *address, A_UINT32 *data);
+
+A_STATUS ar6000_ReadDataDiag(HIF_DEVICE *hifDevice, A_UINT32 address, A_UCHAR *data, A_UINT32 length);
+
+A_STATUS ar6000_reset_device(HIF_DEVICE *hifDevice, A_UINT32 TargetType, A_BOOL waitForCompletion, A_BOOL coldReset);
+
+void ar6000_dump_target_assert_info(HIF_DEVICE *hifDevice, A_UINT32 TargetType);
+
+A_STATUS ar6000_set_htc_params(HIF_DEVICE *hifDevice,
+ A_UINT32 TargetType,
+ A_UINT32 MboxIsrYieldValue,
+ A_UINT8 HtcControlBuffers);
+
+A_STATUS ar6000_prepare_target(HIF_DEVICE *hifDevice,
+ A_UINT32 TargetType,
+ A_UINT32 TargetVersion);
+
+A_STATUS ar6000_set_hci_bridge_flags(HIF_DEVICE *hifDevice,
+ A_UINT32 TargetType,
+ A_UINT32 Flags);
+
+void ar6000_copy_cust_data_from_target(HIF_DEVICE *hifDevice, A_UINT32 TargetType);
+
+A_UINT8 *ar6000_get_cust_data_buffer(A_UINT32 TargetType);
+
+A_STATUS ar6000_setBTState(void *context, A_UINT8 *pInBuf, A_UINT32 InBufSize);
+
+A_STATUS ar6000_setDevicePowerState(void *context, A_UINT8 *pInBuf, A_UINT32 InBufSize);
+
+A_STATUS ar6000_setWowMode(void *context, A_UINT8 *pInBuf, A_UINT32 InBufSize);
+
+A_STATUS ar6000_setHostMode(void *context, A_UINT8 *pInBuf, A_UINT32 InBufSize);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /*COMMON_DRV_H_*/
diff --git a/drivers/staging/ath6kl/include/dbglog_api.h b/drivers/staging/ath6kl/include/dbglog_api.h
new file mode 100644
index 000000000000..a53aed316e3b
--- /dev/null
+++ b/drivers/staging/ath6kl/include/dbglog_api.h
@@ -0,0 +1,52 @@
+//------------------------------------------------------------------------------
+// <copyright file="dbglog_api.h" company="Atheros">
+// Copyright (c) 2004-2010 Atheros Corporation. All rights reserved.
+//
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+//
+//
+//------------------------------------------------------------------------------
+//==============================================================================
+// This file contains host side debug primitives.
+//
+// Author(s): ="Atheros"
+//==============================================================================
+#ifndef _DBGLOG_API_H_
+#define _DBGLOG_API_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include "dbglog.h"
+
+#define DBGLOG_HOST_LOG_BUFFER_SIZE DBGLOG_LOG_BUFFER_SIZE
+
+#define DBGLOG_GET_DBGID(arg) \
+ ((arg & DBGLOG_DBGID_MASK) >> DBGLOG_DBGID_OFFSET)
+
+#define DBGLOG_GET_MODULEID(arg) \
+ ((arg & DBGLOG_MODULEID_MASK) >> DBGLOG_MODULEID_OFFSET)
+
+#define DBGLOG_GET_NUMARGS(arg) \
+ ((arg & DBGLOG_NUM_ARGS_MASK) >> DBGLOG_NUM_ARGS_OFFSET)
+
+#define DBGLOG_GET_TIMESTAMP(arg) \
+ ((arg & DBGLOG_TIMESTAMP_MASK) >> DBGLOG_TIMESTAMP_OFFSET)
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _DBGLOG_API_H_ */
diff --git a/drivers/staging/ath6kl/include/dl_list.h b/drivers/staging/ath6kl/include/dl_list.h
new file mode 100644
index 000000000000..110e1d8b047d
--- /dev/null
+++ b/drivers/staging/ath6kl/include/dl_list.h
@@ -0,0 +1,153 @@
+//------------------------------------------------------------------------------
+// <copyright file="dl_list.h" company="Atheros">
+// Copyright (c) 2004-2010 Atheros Corporation. All rights reserved.
+//
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+//
+//
+//------------------------------------------------------------------------------
+//==============================================================================
+// Double-link list definitions (adapted from Atheros SDIO stack)
+//
+// Author(s): ="Atheros"
+//==============================================================================
+#ifndef __DL_LIST_H___
+#define __DL_LIST_H___
+
+#include "a_osapi.h"
+
+#define A_CONTAINING_STRUCT(address, struct_type, field_name)\
+ ((struct_type *)((unsigned long)(address) - (unsigned long)(&((struct_type *)0)->field_name)))
+
+/* list functions */
+/* pointers for the list */
+typedef struct _DL_LIST {
+ struct _DL_LIST *pPrev;
+ struct _DL_LIST *pNext;
+}DL_LIST, *PDL_LIST;
+/*
+ * DL_LIST_INIT , initialize doubly linked list
+*/
+#define DL_LIST_INIT(pList)\
+ {(pList)->pPrev = pList; (pList)->pNext = pList;}
+
+/* faster macro to init list and add a single item */
+#define DL_LIST_INIT_AND_ADD(pList,pItem) \
+{ (pList)->pPrev = (pItem); \
+ (pList)->pNext = (pItem); \
+ (pItem)->pNext = (pList); \
+ (pItem)->pPrev = (pList); \
+}
+
+#define DL_LIST_IS_EMPTY(pList) (((pList)->pPrev == (pList)) && ((pList)->pNext == (pList)))
+#define DL_LIST_GET_ITEM_AT_HEAD(pList) (pList)->pNext
+#define DL_LIST_GET_ITEM_AT_TAIL(pList) (pList)->pPrev
+/*
+ * ITERATE_OVER_LIST pStart is the list, pTemp is a temp list member
+ * NOT: do not use this function if the items in the list are deleted inside the
+ * iteration loop
+*/
+#define ITERATE_OVER_LIST(pStart, pTemp) \
+ for((pTemp) =(pStart)->pNext; pTemp != (pStart); (pTemp) = (pTemp)->pNext)
+
+
+/* safe iterate macro that allows the item to be removed from the list
+ * the iteration continues to the next item in the list
+ */
+#define ITERATE_OVER_LIST_ALLOW_REMOVE(pStart,pItem,st,offset) \
+{ \
+ PDL_LIST pTemp; \
+ pTemp = (pStart)->pNext; \
+ while (pTemp != (pStart)) { \
+ (pItem) = A_CONTAINING_STRUCT(pTemp,st,offset); \
+ pTemp = pTemp->pNext; \
+
+#define ITERATE_END }}
+
+/*
+ * DL_ListInsertTail - insert pAdd to the end of the list
+*/
+static INLINE PDL_LIST DL_ListInsertTail(PDL_LIST pList, PDL_LIST pAdd) {
+ /* insert at tail */
+ pAdd->pPrev = pList->pPrev;
+ pAdd->pNext = pList;
+ pList->pPrev->pNext = pAdd;
+ pList->pPrev = pAdd;
+ return pAdd;
+}
+
+/*
+ * DL_ListInsertHead - insert pAdd into the head of the list
+*/
+static INLINE PDL_LIST DL_ListInsertHead(PDL_LIST pList, PDL_LIST pAdd) {
+ /* insert at head */
+ pAdd->pPrev = pList;
+ pAdd->pNext = pList->pNext;
+ pList->pNext->pPrev = pAdd;
+ pList->pNext = pAdd;
+ return pAdd;
+}
+
+#define DL_ListAdd(pList,pItem) DL_ListInsertHead((pList),(pItem))
+/*
+ * DL_ListRemove - remove pDel from list
+*/
+static INLINE PDL_LIST DL_ListRemove(PDL_LIST pDel) {
+ pDel->pNext->pPrev = pDel->pPrev;
+ pDel->pPrev->pNext = pDel->pNext;
+ /* point back to itself just to be safe, incase remove is called again */
+ pDel->pNext = pDel;
+ pDel->pPrev = pDel;
+ return pDel;
+}
+
+/*
+ * DL_ListRemoveItemFromHead - get a list item from the head
+*/
+static INLINE PDL_LIST DL_ListRemoveItemFromHead(PDL_LIST pList) {
+ PDL_LIST pItem = NULL;
+ if (pList->pNext != pList) {
+ pItem = pList->pNext;
+ /* remove the first item from head */
+ DL_ListRemove(pItem);
+ }
+ return pItem;
+}
+
+static INLINE PDL_LIST DL_ListRemoveItemFromTail(PDL_LIST pList) {
+ PDL_LIST pItem = NULL;
+ if (pList->pPrev != pList) {
+ pItem = pList->pPrev;
+ /* remove the item from tail */
+ DL_ListRemove(pItem);
+ }
+ return pItem;
+}
+
+/* transfer src list items to the tail of the destination list */
+static INLINE void DL_ListTransferItemsToTail(PDL_LIST pDest, PDL_LIST pSrc) {
+ /* only concatenate if src is not empty */
+ if (!DL_LIST_IS_EMPTY(pSrc)) {
+ /* cut out circular list in src and re-attach to end of dest */
+ pSrc->pPrev->pNext = pDest;
+ pSrc->pNext->pPrev = pDest->pPrev;
+ pDest->pPrev->pNext = pSrc->pNext;
+ pDest->pPrev = pSrc->pPrev;
+ /* terminate src list, it is now empty */
+ pSrc->pPrev = pSrc;
+ pSrc->pNext = pSrc;
+ }
+}
+
+#endif /* __DL_LIST_H___ */
diff --git a/drivers/staging/ath6kl/include/dset_api.h b/drivers/staging/ath6kl/include/dset_api.h
new file mode 100644
index 000000000000..0cc121fd25a0
--- /dev/null
+++ b/drivers/staging/ath6kl/include/dset_api.h
@@ -0,0 +1,65 @@
+//------------------------------------------------------------------------------
+// <copyright file="dset_api.h" company="Atheros">
+// Copyright (c) 2004-2010 Atheros Corporation. All rights reserved.
+//
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+//
+//
+//------------------------------------------------------------------------------
+//==============================================================================
+// Host-side DataSet API.
+//
+// Author(s): ="Atheros"
+//==============================================================================
+#ifndef _DSET_API_H_
+#define _DSET_API_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif /* __cplusplus */
+
+/*
+ * Host-side DataSet support is optional, and is not
+ * currently required for correct operation. To disable
+ * Host-side DataSet support, set this to 0.
+ */
+#ifndef CONFIG_HOST_DSET_SUPPORT
+#define CONFIG_HOST_DSET_SUPPORT 1
+#endif
+
+/* Called to send a DataSet Open Reply back to the Target. */
+A_STATUS wmi_dset_open_reply(struct wmi_t *wmip,
+ A_UINT32 status,
+ A_UINT32 access_cookie,
+ A_UINT32 size,
+ A_UINT32 version,
+ A_UINT32 targ_handle,
+ A_UINT32 targ_reply_fn,
+ A_UINT32 targ_reply_arg);
+
+/* Called to send a DataSet Data Reply back to the Target. */
+A_STATUS wmi_dset_data_reply(struct wmi_t *wmip,
+ A_UINT32 status,
+ A_UINT8 *host_buf,
+ A_UINT32 length,
+ A_UINT32 targ_buf,
+ A_UINT32 targ_reply_fn,
+ A_UINT32 targ_reply_arg);
+
+#ifdef __cplusplus
+}
+#endif /* __cplusplus */
+
+
+#endif /* _DSET_API_H_ */
diff --git a/drivers/staging/ath6kl/include/gpio_api.h b/drivers/staging/ath6kl/include/gpio_api.h
new file mode 100644
index 000000000000..96a150383358
--- /dev/null
+++ b/drivers/staging/ath6kl/include/gpio_api.h
@@ -0,0 +1,59 @@
+//------------------------------------------------------------------------------
+// <copyright file="gpio_api.h" company="Atheros">
+// Copyright (c) 2004-2010 Atheros Corporation. All rights reserved.
+//
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+//
+//
+//------------------------------------------------------------------------------
+//==============================================================================
+// Host-side General Purpose I/O API.
+//
+// Author(s): ="Atheros"
+//==============================================================================
+#ifndef _GPIO_API_H_
+#define _GPIO_API_H_
+
+/*
+ * Send a command to the Target in order to change output on GPIO pins.
+ */
+A_STATUS wmi_gpio_output_set(struct wmi_t *wmip,
+ A_UINT32 set_mask,
+ A_UINT32 clear_mask,
+ A_UINT32 enable_mask,
+ A_UINT32 disable_mask);
+
+/*
+ * Send a command to the Target requesting input state of GPIO pins.
+ */
+A_STATUS wmi_gpio_input_get(struct wmi_t *wmip);
+
+/*
+ * Send a command to the Target to change the value of a GPIO register.
+ */
+A_STATUS wmi_gpio_register_set(struct wmi_t *wmip,
+ A_UINT32 gpioreg_id,
+ A_UINT32 value);
+
+/*
+ * Send a command to the Target to fetch the value of a GPIO register.
+ */
+A_STATUS wmi_gpio_register_get(struct wmi_t *wmip, A_UINT32 gpioreg_id);
+
+/*
+ * Send a command to the Target, acknowledging some GPIO interrupts.
+ */
+A_STATUS wmi_gpio_intr_ack(struct wmi_t *wmip, A_UINT32 ack_mask);
+
+#endif /* _GPIO_API_H_ */
diff --git a/drivers/staging/ath6kl/include/hci_transport_api.h b/drivers/staging/ath6kl/include/hci_transport_api.h
new file mode 100644
index 000000000000..b5157ea5d9e9
--- /dev/null
+++ b/drivers/staging/ath6kl/include/hci_transport_api.h
@@ -0,0 +1,259 @@
+//------------------------------------------------------------------------------
+// Copyright (c) 2009-2010 Atheros Corporation. All rights reserved.
+//
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+//
+//
+//------------------------------------------------------------------------------
+//==============================================================================
+// Author(s): ="Atheros"
+//==============================================================================
+#ifndef _HCI_TRANSPORT_API_H_
+#define _HCI_TRANSPORT_API_H_
+
+ /* Bluetooth HCI packets are stored in HTC packet containers */
+#include "htc_packet.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif /* __cplusplus */
+
+typedef void *HCI_TRANSPORT_HANDLE;
+
+typedef HTC_ENDPOINT_ID HCI_TRANSPORT_PACKET_TYPE;
+
+ /* we map each HCI packet class to a static Endpoint ID */
+#define HCI_COMMAND_TYPE ENDPOINT_1
+#define HCI_EVENT_TYPE ENDPOINT_2
+#define HCI_ACL_TYPE ENDPOINT_3
+#define HCI_PACKET_INVALID ENDPOINT_MAX
+
+#define HCI_GET_PACKET_TYPE(pP) (pP)->Endpoint
+#define HCI_SET_PACKET_TYPE(pP,s) (pP)->Endpoint = (s)
+
+/* callback when an HCI packet was completely sent */
+typedef void (*HCI_TRANSPORT_SEND_PKT_COMPLETE)(void *, HTC_PACKET *);
+/* callback when an HCI packet is received */
+typedef void (*HCI_TRANSPORT_RECV_PKT)(void *, HTC_PACKET *);
+/* Optional receive buffer re-fill callback,
+ * On some OSes (like Linux) packets are allocated from a global pool and indicated up
+ * to the network stack. The driver never gets the packets back from the OS. For these OSes
+ * a refill callback can be used to allocate and re-queue buffers into HTC.
+ * A refill callback is used for the reception of ACL and EVENT packets. The caller must
+ * set the watermark trigger point to cause a refill.
+ */
+typedef void (*HCI_TRANSPORT_RECV_REFILL)(void *, HCI_TRANSPORT_PACKET_TYPE Type, int BuffersAvailable);
+/* Optional receive packet refill
+ * On some systems packet buffers are an extremely limited resource. Rather than
+ * queue largest-possible-sized buffers to the HCI bridge, some systems would rather
+ * allocate a specific size as the packet is received. The trade off is
+ * slightly more processing (callback invoked for each RX packet)
+ * for the benefit of committing fewer buffer resources into the bridge.
+ *
+ * The callback is provided the length of the pending packet to fetch. This includes the
+ * full transport header, HCI header, plus the length of payload. The callback can return a pointer to
+ * the allocated HTC packet for immediate use.
+ *
+ * NOTE*** This callback is mutually exclusive with the the refill callback above.
+ *
+ * */
+typedef HTC_PACKET *(*HCI_TRANSPORT_RECV_ALLOC)(void *, HCI_TRANSPORT_PACKET_TYPE Type, int Length);
+
+typedef enum _HCI_SEND_FULL_ACTION {
+ HCI_SEND_FULL_KEEP = 0, /* packet that overflowed should be kept in the queue */
+ HCI_SEND_FULL_DROP = 1, /* packet that overflowed should be dropped */
+} HCI_SEND_FULL_ACTION;
+
+/* callback when an HCI send queue exceeds the caller's MaxSendQueueDepth threshold,
+ * the callback must return the send full action to take (either DROP or KEEP) */
+typedef HCI_SEND_FULL_ACTION (*HCI_TRANSPORT_SEND_FULL)(void *, HTC_PACKET *);
+
+typedef struct {
+ int HeadRoom; /* number of bytes in front of HCI packet for header space */
+ int TailRoom; /* number of bytes at the end of the HCI packet for tail space */
+ int IOBlockPad; /* I/O block padding required (always a power of 2) */
+} HCI_TRANSPORT_PROPERTIES;
+
+typedef struct _HCI_TRANSPORT_CONFIG_INFO {
+ int ACLRecvBufferWaterMark; /* low watermark to trigger recv refill */
+ int EventRecvBufferWaterMark; /* low watermark to trigger recv refill */
+ int MaxSendQueueDepth; /* max number of packets in the single send queue */
+ void *pContext; /* context for all callbacks */
+ void (*TransportFailure)(void *pContext, A_STATUS Status); /* transport failure callback */
+ A_STATUS (*TransportReady)(HCI_TRANSPORT_HANDLE, HCI_TRANSPORT_PROPERTIES *,void *pContext); /* transport is ready */
+ void (*TransportRemoved)(void *pContext); /* transport was removed */
+ /* packet processing callbacks */
+ HCI_TRANSPORT_SEND_PKT_COMPLETE pHCISendComplete;
+ HCI_TRANSPORT_RECV_PKT pHCIPktRecv;
+ HCI_TRANSPORT_RECV_REFILL pHCIPktRecvRefill;
+ HCI_TRANSPORT_RECV_ALLOC pHCIPktRecvAlloc;
+ HCI_TRANSPORT_SEND_FULL pHCISendFull;
+} HCI_TRANSPORT_CONFIG_INFO;
+
+/* ------ Function Prototypes ------ */
+/*+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+ @desc: Attach to the HCI transport module
+ @function name: HCI_TransportAttach
+ @input: HTCHandle - HTC handle (see HTC apis)
+ pInfo - initialization information
+ @output:
+ @return: HCI_TRANSPORT_HANDLE on success, NULL on failure
+ @notes: The HTC module provides HCI transport services.
+ @example:
+ @see also: HCI_TransportDetach
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++*/
+HCI_TRANSPORT_HANDLE HCI_TransportAttach(void *HTCHandle, HCI_TRANSPORT_CONFIG_INFO *pInfo);
+
+/*+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+ @desc: Detach from the HCI transport module
+ @function name: HCI_TransportDetach
+ @input: HciTrans - HCI transport handle
+ pInfo - initialization information
+ @output:
+ @return:
+ @notes:
+ @example:
+ @see also:
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++*/
+void HCI_TransportDetach(HCI_TRANSPORT_HANDLE HciTrans);
+
+/*+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+ @desc: Add receive packets to the HCI transport
+ @function name: HCI_TransportAddReceivePkts
+ @input: HciTrans - HCI transport handle
+ pQueue - a queue holding one or more packets
+ @output:
+ @return: A_OK on success
+ @notes: user must supply HTC packets for capturing incomming HCI packets. The caller
+ must initialize each HTC packet using the SET_HTC_PACKET_INFO_RX_REFILL()
+ macro. Each packet in the queue must be of the same type and length
+ @example:
+ @see also:
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++*/
+A_STATUS HCI_TransportAddReceivePkts(HCI_TRANSPORT_HANDLE HciTrans, HTC_PACKET_QUEUE *pQueue);
+
+/*+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+ @desc: Send an HCI packet packet
+ @function name: HCI_TransportSendPkt
+ @input: HciTrans - HCI transport handle
+ pPacket - packet to send
+ Synchronous - send the packet synchronously (blocking)
+ @output:
+ @return: A_OK
+ @notes: Caller must initialize packet using SET_HTC_PACKET_INFO_TX() and
+ HCI_SET_PACKET_TYPE() macros to prepare the packet.
+ If Synchronous is set to FALSE the call is fully asynchronous. On error or completion,
+ the registered send complete callback will be called.
+ If Synchronous is set to TRUE, the call will block until the packet is sent, if the
+ interface cannot send the packet within a 2 second timeout, the function will return
+ the failure code : A_EBUSY.
+
+ Synchronous Mode should only be used at start-up to initialize the HCI device using
+ custom HCI commands. It should NOT be mixed with Asynchronous operations. Mixed synchronous
+ and asynchronous operation behavior is undefined.
+
+ @example:
+ @see also:
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++*/
+A_STATUS HCI_TransportSendPkt(HCI_TRANSPORT_HANDLE HciTrans, HTC_PACKET *pPacket, A_BOOL Synchronous);
+
+
+/*+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+ @desc: Stop HCI transport
+ @function name: HCI_TransportStop
+ @input: HciTrans - hci transport handle
+ @output:
+ @return:
+ @notes: HCI transport communication will be halted. All receive and pending TX packets will
+ be flushed.
+ @example:
+ @see also:
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++*/
+void HCI_TransportStop(HCI_TRANSPORT_HANDLE HciTrans);
+
+/*+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+ @desc: Start the HCI transport
+ @function name: HCI_TransportStart
+ @input: HciTrans - hci transport handle
+ @output:
+ @return: A_OK on success
+ @notes: HCI transport communication will begin, the caller can expect the arrival
+ of HCI recv packets as soon as this call returns.
+ @example:
+ @see also:
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++*/
+A_STATUS HCI_TransportStart(HCI_TRANSPORT_HANDLE HciTrans);
+
+/*+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+ @desc: Enable or Disable Asynchronous Recv
+ @function name: HCI_TransportEnableDisableAsyncRecv
+ @input: HciTrans - hci transport handle
+ Enable - enable or disable asynchronous recv
+ @output:
+ @return: A_OK on success
+ @notes: This API must be called when HCI recv is handled synchronously
+ @example:
+ @see also:
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++*/
+A_STATUS HCI_TransportEnableDisableAsyncRecv(HCI_TRANSPORT_HANDLE HciTrans, A_BOOL Enable);
+
+/*+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+ @desc: Receive an event packet from the HCI transport synchronously using polling
+ @function name: HCI_TransportRecvHCIEventSync
+ @input: HciTrans - hci transport handle
+ pPacket - HTC packet to hold the recv data
+ MaxPollMS - maximum polling duration in Milliseconds;
+ @output:
+ @return: A_OK on success
+ @notes: This API should be used only during HCI device initialization, the caller must call
+ HCI_TransportEnableDisableAsyncRecv with Enable=FALSE prior to using this API.
+ This API will only capture HCI Event packets.
+ @example:
+ @see also: HCI_TransportEnableDisableAsyncRecv
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++*/
+A_STATUS HCI_TransportRecvHCIEventSync(HCI_TRANSPORT_HANDLE HciTrans,
+ HTC_PACKET *pPacket,
+ int MaxPollMS);
+
+/*+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+ @desc: Set the desired baud rate for the underlying transport layer
+ @function name: HCI_TransportSetBaudRate
+ @input: HciTrans - hci transport handle
+ Baud - baud rate in bps
+ @output:
+ @return: A_OK on success
+ @notes: This API should be used only after HCI device initialization
+ @example:
+ @see also:
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++*/
+A_STATUS HCI_TransportSetBaudRate(HCI_TRANSPORT_HANDLE HciTrans, A_UINT32 Baud);
+
+/*+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+ @desc: Enable/Disable HCI Transport Power Management
+ @function name: HCI_TransportEnablePowerMgmt
+ @input: HciTrans - hci transport handle
+ Enable - 1 = Enable, 0 = Disable
+ @output:
+ @return: A_OK on success
+ @notes:
+ @example:
+ @see also:
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++*/
+A_STATUS HCI_TransportEnablePowerMgmt(HCI_TRANSPORT_HANDLE HciTrans, A_BOOL Enable);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _HCI_TRANSPORT_API_H_ */
diff --git a/drivers/staging/ath6kl/include/hif.h b/drivers/staging/ath6kl/include/hif.h
new file mode 100644
index 000000000000..2a082678512c
--- /dev/null
+++ b/drivers/staging/ath6kl/include/hif.h
@@ -0,0 +1,458 @@
+//------------------------------------------------------------------------------
+// <copyright file="hif.h" company="Atheros">
+// Copyright (c) 2004-2010 Atheros Corporation. All rights reserved.
+//
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+//
+//
+//------------------------------------------------------------------------------
+//==============================================================================
+// HIF specific declarations and prototypes
+//
+// Author(s): ="Atheros"
+//==============================================================================
+#ifndef _HIF_H_
+#define _HIF_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif /* __cplusplus */
+
+/* Header files */
+#include "a_config.h"
+#include "athdefs.h"
+#include "a_types.h"
+#include "a_osapi.h"
+#include "dl_list.h"
+
+
+typedef struct htc_callbacks HTC_CALLBACKS;
+typedef struct hif_device HIF_DEVICE;
+
+/*
+ * direction - Direction of transfer (HIF_READ/HIF_WRITE).
+ */
+#define HIF_READ 0x00000001
+#define HIF_WRITE 0x00000002
+#define HIF_DIR_MASK (HIF_READ | HIF_WRITE)
+
+/*
+ * type - An interface may support different kind of read/write commands.
+ * For example: SDIO supports CMD52/CMD53s. In case of MSIO it
+ * translates to using different kinds of TPCs. The command type
+ * is thus divided into a basic and an extended command and can
+ * be specified using HIF_BASIC_IO/HIF_EXTENDED_IO.
+ */
+#define HIF_BASIC_IO 0x00000004
+#define HIF_EXTENDED_IO 0x00000008
+#define HIF_TYPE_MASK (HIF_BASIC_IO | HIF_EXTENDED_IO)
+
+/*
+ * emode - This indicates the whether the command is to be executed in a
+ * blocking or non-blocking fashion (HIF_SYNCHRONOUS/
+ * HIF_ASYNCHRONOUS). The read/write data paths in HTC have been
+ * implemented using the asynchronous mode allowing the the bus
+ * driver to indicate the completion of operation through the
+ * registered callback routine. The requirement primarily comes
+ * from the contexts these operations get called from (a driver's
+ * transmit context or the ISR context in case of receive).
+ * Support for both of these modes is essential.
+ */
+#define HIF_SYNCHRONOUS 0x00000010
+#define HIF_ASYNCHRONOUS 0x00000020
+#define HIF_EMODE_MASK (HIF_SYNCHRONOUS | HIF_ASYNCHRONOUS)
+
+/*
+ * dmode - An interface may support different kinds of commands based on
+ * the tradeoff between the amount of data it can carry and the
+ * setup time. Byte and Block modes are supported (HIF_BYTE_BASIS/
+ * HIF_BLOCK_BASIS). In case of latter, the data is rounded off
+ * to the nearest block size by padding. The size of the block is
+ * configurable at compile time using the HIF_BLOCK_SIZE and is
+ * negotiated with the target during initialization after the
+ * AR6000 interrupts are enabled.
+ */
+#define HIF_BYTE_BASIS 0x00000040
+#define HIF_BLOCK_BASIS 0x00000080
+#define HIF_DMODE_MASK (HIF_BYTE_BASIS | HIF_BLOCK_BASIS)
+
+/*
+ * amode - This indicates if the address has to be incremented on AR6000
+ * after every read/write operation (HIF?FIXED_ADDRESS/
+ * HIF_INCREMENTAL_ADDRESS).
+ */
+#define HIF_FIXED_ADDRESS 0x00000100
+#define HIF_INCREMENTAL_ADDRESS 0x00000200
+#define HIF_AMODE_MASK (HIF_FIXED_ADDRESS | HIF_INCREMENTAL_ADDRESS)
+
+#define HIF_WR_ASYNC_BYTE_FIX \
+ (HIF_WRITE | HIF_ASYNCHRONOUS | HIF_EXTENDED_IO | HIF_BYTE_BASIS | HIF_FIXED_ADDRESS)
+#define HIF_WR_ASYNC_BYTE_INC \
+ (HIF_WRITE | HIF_ASYNCHRONOUS | HIF_EXTENDED_IO | HIF_BYTE_BASIS | HIF_INCREMENTAL_ADDRESS)
+#define HIF_WR_ASYNC_BLOCK_INC \
+ (HIF_WRITE | HIF_ASYNCHRONOUS | HIF_EXTENDED_IO | HIF_BLOCK_BASIS | HIF_INCREMENTAL_ADDRESS)
+#define HIF_WR_SYNC_BYTE_FIX \
+ (HIF_WRITE | HIF_SYNCHRONOUS | HIF_EXTENDED_IO | HIF_BYTE_BASIS | HIF_FIXED_ADDRESS)
+#define HIF_WR_SYNC_BYTE_INC \
+ (HIF_WRITE | HIF_SYNCHRONOUS | HIF_EXTENDED_IO | HIF_BYTE_BASIS | HIF_INCREMENTAL_ADDRESS)
+#define HIF_WR_SYNC_BLOCK_INC \
+ (HIF_WRITE | HIF_SYNCHRONOUS | HIF_EXTENDED_IO | HIF_BLOCK_BASIS | HIF_INCREMENTAL_ADDRESS)
+#define HIF_WR_ASYNC_BLOCK_FIX \
+ (HIF_WRITE | HIF_ASYNCHRONOUS | HIF_EXTENDED_IO | HIF_BLOCK_BASIS | HIF_FIXED_ADDRESS)
+#define HIF_WR_SYNC_BLOCK_FIX \
+ (HIF_WRITE | HIF_SYNCHRONOUS | HIF_EXTENDED_IO | HIF_BLOCK_BASIS | HIF_FIXED_ADDRESS)
+#define HIF_RD_SYNC_BYTE_INC \
+ (HIF_READ | HIF_SYNCHRONOUS | HIF_EXTENDED_IO | HIF_BYTE_BASIS | HIF_INCREMENTAL_ADDRESS)
+#define HIF_RD_SYNC_BYTE_FIX \
+ (HIF_READ | HIF_SYNCHRONOUS | HIF_EXTENDED_IO | HIF_BYTE_BASIS | HIF_FIXED_ADDRESS)
+#define HIF_RD_ASYNC_BYTE_FIX \
+ (HIF_READ | HIF_ASYNCHRONOUS | HIF_EXTENDED_IO | HIF_BYTE_BASIS | HIF_FIXED_ADDRESS)
+#define HIF_RD_ASYNC_BLOCK_FIX \
+ (HIF_READ | HIF_ASYNCHRONOUS | HIF_EXTENDED_IO | HIF_BLOCK_BASIS | HIF_FIXED_ADDRESS)
+#define HIF_RD_ASYNC_BYTE_INC \
+ (HIF_READ | HIF_ASYNCHRONOUS | HIF_EXTENDED_IO | HIF_BYTE_BASIS | HIF_INCREMENTAL_ADDRESS)
+#define HIF_RD_ASYNC_BLOCK_INC \
+ (HIF_READ | HIF_ASYNCHRONOUS | HIF_EXTENDED_IO | HIF_BLOCK_BASIS | HIF_INCREMENTAL_ADDRESS)
+#define HIF_RD_SYNC_BLOCK_INC \
+ (HIF_READ | HIF_SYNCHRONOUS | HIF_EXTENDED_IO | HIF_BLOCK_BASIS | HIF_INCREMENTAL_ADDRESS)
+#define HIF_RD_SYNC_BLOCK_FIX \
+ (HIF_READ | HIF_SYNCHRONOUS | HIF_EXTENDED_IO | HIF_BLOCK_BASIS | HIF_FIXED_ADDRESS)
+
+typedef enum {
+ HIF_DEVICE_POWER_STATE = 0,
+ HIF_DEVICE_GET_MBOX_BLOCK_SIZE,
+ HIF_DEVICE_GET_MBOX_ADDR,
+ HIF_DEVICE_GET_PENDING_EVENTS_FUNC,
+ HIF_DEVICE_GET_IRQ_PROC_MODE,
+ HIF_DEVICE_GET_RECV_EVENT_MASK_UNMASK_FUNC,
+ HIF_DEVICE_POWER_STATE_CHANGE,
+ HIF_DEVICE_GET_IRQ_YIELD_PARAMS,
+ HIF_CONFIGURE_QUERY_SCATTER_REQUEST_SUPPORT,
+ HIF_DEVICE_GET_OS_DEVICE,
+ HIF_DEVICE_DEBUG_BUS_STATE,
+} HIF_DEVICE_CONFIG_OPCODE;
+
+/*
+ * HIF CONFIGURE definitions:
+ *
+ * HIF_DEVICE_GET_MBOX_BLOCK_SIZE
+ * input : none
+ * output : array of 4 A_UINT32s
+ * notes: block size is returned for each mailbox (4)
+ *
+ * HIF_DEVICE_GET_MBOX_ADDR
+ * input : none
+ * output : HIF_DEVICE_MBOX_INFO
+ * notes:
+ *
+ * HIF_DEVICE_GET_PENDING_EVENTS_FUNC
+ * input : none
+ * output: HIF_PENDING_EVENTS_FUNC function pointer
+ * notes: this is optional for the HIF layer, if the request is
+ * not handled then it indicates that the upper layer can use
+ * the standard device methods to get pending events (IRQs, mailbox messages etc..)
+ * otherwise it can call the function pointer to check pending events.
+ *
+ * HIF_DEVICE_GET_IRQ_PROC_MODE
+ * input : none
+ * output : HIF_DEVICE_IRQ_PROCESSING_MODE (interrupt processing mode)
+ * note: the hif layer interfaces with the underlying OS-specific bus driver. The HIF
+ * layer can report whether IRQ processing is requires synchronous behavior or
+ * can be processed using asynchronous bus requests (typically faster).
+ *
+ * HIF_DEVICE_GET_RECV_EVENT_MASK_UNMASK_FUNC
+ * input :
+ * output : HIF_MASK_UNMASK_RECV_EVENT function pointer
+ * notes: this is optional for the HIF layer. The HIF layer may require a special mechanism
+ * to mask receive message events. The upper layer can call this pointer when it needs
+ * to mask/unmask receive events (in case it runs out of buffers).
+ *
+ * HIF_DEVICE_POWER_STATE_CHANGE
+ *
+ * input : HIF_DEVICE_POWER_CHANGE_TYPE
+ * output : none
+ * note: this is optional for the HIF layer. The HIF layer can handle power on/off state change
+ * requests in an interconnect specific way. This is highly OS and bus driver dependent.
+ * The caller must guarantee that no HIF read/write requests will be made after the device
+ * is powered down.
+ *
+ * HIF_DEVICE_GET_IRQ_YIELD_PARAMS
+ *
+ * input : none
+ * output : HIF_DEVICE_IRQ_YIELD_PARAMS
+ * note: This query checks if the HIF layer wishes to impose a processing yield count for the DSR handler.
+ * The DSR callback handler will exit after a fixed number of RX packets or events are processed.
+ * This query is only made if the device reports an IRQ processing mode of HIF_DEVICE_IRQ_SYNC_ONLY.
+ * The HIF implementation can ignore this command if it does not desire the DSR callback to yield.
+ * The HIF layer can indicate the maximum number of IRQ processing units (RX packets) before the
+ * DSR handler callback must yield and return control back to the HIF layer. When a yield limit is
+ * used the DSR callback will not call HIFAckInterrupts() as it would normally do before returning.
+ * The HIF implementation that requires a yield count must call HIFAckInterrupt() when it is prepared
+ * to process interrupts again.
+ *
+ * HIF_CONFIGURE_QUERY_SCATTER_REQUEST_SUPPORT
+ * input : none
+ * output : HIF_DEVICE_SCATTER_SUPPORT_INFO
+ * note: This query checks if the HIF layer implements the SCATTER request interface. Scatter requests
+ * allows upper layers to submit mailbox I/O operations using a list of buffers. This is useful for
+ * multi-message transfers that can better utilize the bus interconnect.
+ *
+ *
+ * HIF_DEVICE_GET_OS_DEVICE
+ * intput : none
+ * output : HIF_DEVICE_OS_DEVICE_INFO;
+ * note: On some operating systems, the HIF layer has a parent device object for the bus. This object
+ * may be required to register certain types of logical devices.
+ *
+ * HIF_DEVICE_DEBUG_BUS_STATE
+ * input : none
+ * output : none
+ * note: This configure option triggers the HIF interface to dump as much bus interface state. This
+ * configuration request is optional (No-OP on some HIF implementations)
+ *
+ */
+
+typedef struct {
+ A_UINT32 ExtendedAddress; /* extended address for larger writes */
+ A_UINT32 ExtendedSize;
+} HIF_MBOX_PROPERTIES;
+
+#define HIF_MBOX_FLAG_NO_BUNDLING (1 << 0) /* do not allow bundling over the mailbox */
+
+typedef enum _MBOX_BUF_IF_TYPE {
+ MBOX_BUS_IF_SDIO = 0,
+ MBOX_BUS_IF_SPI = 1,
+} MBOX_BUF_IF_TYPE;
+
+typedef struct {
+ A_UINT32 MboxAddresses[4]; /* must be first element for legacy HIFs that return the address in
+ and ARRAY of 32-bit words */
+
+ /* the following describe extended mailbox properties */
+ HIF_MBOX_PROPERTIES MboxProp[4];
+ /* if the HIF supports the GMbox extended address region it can report it
+ * here, some interfaces cannot support the GMBOX address range and not set this */
+ A_UINT32 GMboxAddress;
+ A_UINT32 GMboxSize;
+ A_UINT32 Flags; /* flags to describe mbox behavior or usage */
+ MBOX_BUF_IF_TYPE MboxBusIFType; /* mailbox bus interface type */
+} HIF_DEVICE_MBOX_INFO;
+
+typedef enum {
+ HIF_DEVICE_IRQ_SYNC_ONLY, /* for HIF implementations that require the DSR to process all
+ interrupts before returning */
+ HIF_DEVICE_IRQ_ASYNC_SYNC, /* for HIF implementations that allow DSR to process interrupts
+ using ASYNC I/O (that is HIFAckInterrupt can be called at a
+ later time */
+} HIF_DEVICE_IRQ_PROCESSING_MODE;
+
+typedef enum {
+ HIF_DEVICE_POWER_UP, /* HIF layer should power up interface and/or module */
+ HIF_DEVICE_POWER_DOWN, /* HIF layer should initiate bus-specific measures to minimize power */
+ HIF_DEVICE_POWER_CUT /* HIF layer should initiate bus-specific AND/OR platform-specific measures
+ to completely power-off the module and associated hardware (i.e. cut power supplies)
+ */
+} HIF_DEVICE_POWER_CHANGE_TYPE;
+
+typedef struct {
+ int RecvPacketYieldCount; /* max number of packets to force DSR to return */
+} HIF_DEVICE_IRQ_YIELD_PARAMS;
+
+
+typedef struct _HIF_SCATTER_ITEM {
+ A_UINT8 *pBuffer; /* CPU accessible address of buffer */
+ int Length; /* length of transfer to/from this buffer */
+ void *pCallerContexts[2]; /* space for caller to insert a context associated with this item */
+} HIF_SCATTER_ITEM;
+
+struct _HIF_SCATTER_REQ;
+
+typedef void ( *HIF_SCATTER_COMP_CB)(struct _HIF_SCATTER_REQ *);
+
+typedef enum _HIF_SCATTER_METHOD {
+ HIF_SCATTER_NONE = 0,
+ HIF_SCATTER_DMA_REAL, /* Real SG support no restrictions */
+ HIF_SCATTER_DMA_BOUNCE, /* Uses SG DMA but HIF layer uses an internal bounce buffer */
+} HIF_SCATTER_METHOD;
+
+typedef struct _HIF_SCATTER_REQ {
+ DL_LIST ListLink; /* link management */
+ A_UINT32 Address; /* address for the read/write operation */
+ A_UINT32 Request; /* request flags */
+ A_UINT32 TotalLength; /* total length of entire transfer */
+ A_UINT32 CallerFlags; /* caller specific flags can be stored here */
+ HIF_SCATTER_COMP_CB CompletionRoutine; /* completion routine set by caller */
+ A_STATUS CompletionStatus; /* status of completion */
+ void *Context; /* caller context for this request */
+ int ValidScatterEntries; /* number of valid entries set by caller */
+ HIF_SCATTER_METHOD ScatterMethod; /* scatter method handled by HIF */
+ void *HIFPrivate[4]; /* HIF private area */
+ A_UINT8 *pScatterBounceBuffer; /* bounce buffer for upper layers to copy to/from */
+ HIF_SCATTER_ITEM ScatterList[1]; /* start of scatter list */
+} HIF_SCATTER_REQ;
+
+typedef HIF_SCATTER_REQ * ( *HIF_ALLOCATE_SCATTER_REQUEST)(HIF_DEVICE *device);
+typedef void ( *HIF_FREE_SCATTER_REQUEST)(HIF_DEVICE *device, HIF_SCATTER_REQ *request);
+typedef A_STATUS ( *HIF_READWRITE_SCATTER)(HIF_DEVICE *device, HIF_SCATTER_REQ *request);
+
+typedef struct _HIF_DEVICE_SCATTER_SUPPORT_INFO {
+ /* information returned from HIF layer */
+ HIF_ALLOCATE_SCATTER_REQUEST pAllocateReqFunc;
+ HIF_FREE_SCATTER_REQUEST pFreeReqFunc;
+ HIF_READWRITE_SCATTER pReadWriteScatterFunc;
+ int MaxScatterEntries;
+ int MaxTransferSizePerScatterReq;
+} HIF_DEVICE_SCATTER_SUPPORT_INFO;
+
+typedef struct {
+ void *pOSDevice;
+} HIF_DEVICE_OS_DEVICE_INFO;
+
+#define HIF_MAX_DEVICES 1
+
+struct htc_callbacks {
+ void *context; /* context to pass to the dsrhandler
+ note : rwCompletionHandler is provided the context passed to HIFReadWrite */
+ A_STATUS (* rwCompletionHandler)(void *rwContext, A_STATUS status);
+ A_STATUS (* dsrHandler)(void *context);
+};
+
+typedef struct osdrv_callbacks {
+ void *context; /* context to pass for all callbacks except deviceRemovedHandler
+ the deviceRemovedHandler is only called if the device is claimed */
+ A_STATUS (* deviceInsertedHandler)(void *context, void *hif_handle);
+ A_STATUS (* deviceRemovedHandler)(void *claimedContext, void *hif_handle);
+ A_STATUS (* deviceSuspendHandler)(void *context);
+ A_STATUS (* deviceResumeHandler)(void *context);
+ A_STATUS (* deviceWakeupHandler)(void *context);
+ A_STATUS (* devicePowerChangeHandler)(void *context, HIF_DEVICE_POWER_CHANGE_TYPE config);
+} OSDRV_CALLBACKS;
+
+#define HIF_OTHER_EVENTS (1 << 0) /* other interrupts (non-Recv) are pending, host
+ needs to read the register table to figure out what */
+#define HIF_RECV_MSG_AVAIL (1 << 1) /* pending recv packet */
+
+typedef struct _HIF_PENDING_EVENTS_INFO {
+ A_UINT32 Events;
+ A_UINT32 LookAhead;
+ A_UINT32 AvailableRecvBytes;
+#ifdef THREAD_X
+ A_UINT32 Polling;
+ A_UINT32 INT_CAUSE_REG;
+#endif
+} HIF_PENDING_EVENTS_INFO;
+
+ /* function to get pending events , some HIF modules use special mechanisms
+ * to detect packet available and other interrupts */
+typedef A_STATUS ( *HIF_PENDING_EVENTS_FUNC)(HIF_DEVICE *device,
+ HIF_PENDING_EVENTS_INFO *pEvents,
+ void *AsyncContext);
+
+#define HIF_MASK_RECV TRUE
+#define HIF_UNMASK_RECV FALSE
+ /* function to mask recv events */
+typedef A_STATUS ( *HIF_MASK_UNMASK_RECV_EVENT)(HIF_DEVICE *device,
+ A_BOOL Mask,
+ void *AsyncContext);
+
+
+/*
+ * This API is used to perform any global initialization of the HIF layer
+ * and to set OS driver callbacks (i.e. insertion/removal) to the HIF layer
+ *
+ */
+A_STATUS HIFInit(OSDRV_CALLBACKS *callbacks);
+
+/* This API claims the HIF device and provides a context for handling removal.
+ * The device removal callback is only called when the OSDRV layer claims
+ * a device. The claimed context must be non-NULL */
+void HIFClaimDevice(HIF_DEVICE *device, void *claimedContext);
+/* release the claimed device */
+void HIFReleaseDevice(HIF_DEVICE *device);
+
+/* This API allows the HTC layer to attach to the HIF device */
+A_STATUS HIFAttachHTC(HIF_DEVICE *device, HTC_CALLBACKS *callbacks);
+/* This API detaches the HTC layer from the HIF device */
+void HIFDetachHTC(HIF_DEVICE *device);
+
+/*
+ * This API is used to provide the read/write interface over the specific bus
+ * interface.
+ * address - Starting address in the AR6000's address space. For mailbox
+ * writes, it refers to the start of the mbox boundary. It should
+ * be ensured that the last byte falls on the mailbox's EOM. For
+ * mailbox reads, it refers to the end of the mbox boundary.
+ * buffer - Pointer to the buffer containg the data to be transmitted or
+ * received.
+ * length - Amount of data to be transmitted or received.
+ * request - Characterizes the attributes of the command.
+ */
+A_STATUS
+HIFReadWrite(HIF_DEVICE *device,
+ A_UINT32 address,
+ A_UCHAR *buffer,
+ A_UINT32 length,
+ A_UINT32 request,
+ void *context);
+
+/*
+ * This can be initiated from the unload driver context when the OSDRV layer has no more use for
+ * the device.
+ */
+void HIFShutDownDevice(HIF_DEVICE *device);
+
+/*
+ * This should translate to an acknowledgment to the bus driver indicating that
+ * the previous interrupt request has been serviced and the all the relevant
+ * sources have been cleared. HTC is ready to process more interrupts.
+ * This should prevent the bus driver from raising an interrupt unless the
+ * previous one has been serviced and acknowledged using the previous API.
+ */
+void HIFAckInterrupt(HIF_DEVICE *device);
+
+void HIFMaskInterrupt(HIF_DEVICE *device);
+
+void HIFUnMaskInterrupt(HIF_DEVICE *device);
+
+#ifdef THREAD_X
+/*
+ * This set of functions are to be used by the bus driver to notify
+ * the HIF module about various events.
+ * These are not implemented if the bus driver provides an alternative
+ * way for this notification though callbacks for instance.
+ */
+int HIFInsertEventNotify(void);
+
+int HIFRemoveEventNotify(void);
+
+int HIFIRQEventNotify(void);
+
+int HIFRWCompleteEventNotify(void);
+#endif
+
+A_STATUS
+HIFConfigureDevice(HIF_DEVICE *device, HIF_DEVICE_CONFIG_OPCODE opcode,
+ void *config, A_UINT32 configLen);
+
+/*
+ * This API wait for the remaining MBOX messages to be drained
+ * This should be moved to HTC AR6K layer
+ */
+A_STATUS hifWaitForPendingRecv(HIF_DEVICE *device);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _HIF_H_ */
diff --git a/drivers/staging/ath6kl/include/host_version.h b/drivers/staging/ath6kl/include/host_version.h
new file mode 100644
index 000000000000..74f1982c681b
--- /dev/null
+++ b/drivers/staging/ath6kl/include/host_version.h
@@ -0,0 +1,52 @@
+//------------------------------------------------------------------------------
+// <copyright file="host_version.h" company="Atheros">
+// Copyright (c) 2004-2010 Atheros Corporation. All rights reserved.
+//
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+//
+//
+//------------------------------------------------------------------------------
+//==============================================================================
+// This file contains version information for the sample host driver for the
+// AR6000 chip
+//
+// Author(s): ="Atheros"
+//==============================================================================
+#ifndef _HOST_VERSION_H_
+#define _HOST_VERSION_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <AR6002/AR6K_version.h>
+
+/*
+ * The version number is made up of major, minor, patch and build
+ * numbers. These are 16 bit numbers. The build and release script will
+ * set the build number using a Perforce counter. Here the build number is
+ * set to 9999 so that builds done without the build-release script are easily
+ * identifiable.
+ */
+
+#define ATH_SW_VER_MAJOR __VER_MAJOR_
+#define ATH_SW_VER_MINOR __VER_MINOR_
+#define ATH_SW_VER_PATCH __VER_PATCH_
+#define ATH_SW_VER_BUILD __BUILD_NUMBER_
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _HOST_VERSION_H_ */
diff --git a/drivers/staging/ath6kl/include/htc_api.h b/drivers/staging/ath6kl/include/htc_api.h
new file mode 100644
index 000000000000..b007051e0551
--- /dev/null
+++ b/drivers/staging/ath6kl/include/htc_api.h
@@ -0,0 +1,575 @@
+//------------------------------------------------------------------------------
+// <copyright file="htc_api.h" company="Atheros">
+// Copyright (c) 2007-2010 Atheros Corporation. All rights reserved.
+//
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+//
+//
+//------------------------------------------------------------------------------
+//==============================================================================
+// Author(s): ="Atheros"
+//==============================================================================
+#ifndef _HTC_API_H_
+#define _HTC_API_H_
+
+#include "htc_packet.h"
+#include <htc.h>
+#include <htc_services.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif /* __cplusplus */
+
+/* TODO.. for BMI */
+#define ENDPOINT1 0
+// TODO -remove me, but we have to fix BMI first
+#define HTC_MAILBOX_NUM_MAX 4
+
+/* this is the amount of header room required by users of HTC */
+#define HTC_HEADER_LEN HTC_HDR_LENGTH
+
+typedef void *HTC_HANDLE;
+
+typedef A_UINT16 HTC_SERVICE_ID;
+
+typedef struct _HTC_INIT_INFO {
+ void *pContext; /* context for target failure notification */
+ void (*TargetFailure)(void *Instance, A_STATUS Status);
+} HTC_INIT_INFO;
+
+/* per service connection send completion */
+typedef void (*HTC_EP_SEND_PKT_COMPLETE)(void *,HTC_PACKET *);
+/* per service connection callback when a plurality of packets have been sent
+ * The HTC_PACKET_QUEUE is a temporary queue object (e.g. freed on return from the callback)
+ * to hold a list of completed send packets.
+ * If the handler cannot fully traverse the packet queue before returning, it should
+ * transfer the items of the queue into the caller's private queue using:
+ * HTC_PACKET_ENQUEUE() */
+typedef void (*HTC_EP_SEND_PKT_COMP_MULTIPLE)(void *,HTC_PACKET_QUEUE *);
+/* per service connection pkt received */
+typedef void (*HTC_EP_RECV_PKT)(void *,HTC_PACKET *);
+/* per service connection callback when a plurality of packets are received
+ * The HTC_PACKET_QUEUE is a temporary queue object (e.g. freed on return from the callback)
+ * to hold a list of recv packets.
+ * If the handler cannot fully traverse the packet queue before returning, it should
+ * transfer the items of the queue into the caller's private queue using:
+ * HTC_PACKET_ENQUEUE() */
+typedef void (*HTC_EP_RECV_PKT_MULTIPLE)(void *,HTC_PACKET_QUEUE *);
+
+/* Optional per service connection receive buffer re-fill callback,
+ * On some OSes (like Linux) packets are allocated from a global pool and indicated up
+ * to the network stack. The driver never gets the packets back from the OS. For these OSes
+ * a refill callback can be used to allocate and re-queue buffers into HTC.
+ *
+ * On other OSes, the network stack can call into the driver's OS-specifc "return_packet" handler and
+ * the driver can re-queue these buffers into HTC. In this regard a refill callback is
+ * unnecessary */
+typedef void (*HTC_EP_RECV_REFILL)(void *, HTC_ENDPOINT_ID Endpoint);
+
+/* Optional per service connection receive buffer allocation callback.
+ * On some systems packet buffers are an extremely limited resource. Rather than
+ * queue largest-possible-sized buffers to HTC, some systems would rather
+ * allocate a specific size as the packet is received. The trade off is
+ * slightly more processing (callback invoked for each RX packet)
+ * for the benefit of committing fewer buffer resources into HTC.
+ *
+ * The callback is provided the length of the pending packet to fetch. This includes the
+ * HTC header length plus the length of payload. The callback can return a pointer to
+ * the allocated HTC packet for immediate use.
+ *
+ * Alternatively a variant of this handler can be used to allocate large receive packets as needed.
+ * For example an application can use the refill mechanism for normal packets and the recv-alloc mechanism to
+ * handle the case where a large packet buffer is required. This can significantly reduce the
+ * amount of "committed" memory used to receive packets.
+ *
+ * */
+typedef HTC_PACKET *(*HTC_EP_RECV_ALLOC)(void *, HTC_ENDPOINT_ID Endpoint, int Length);
+
+typedef enum _HTC_SEND_FULL_ACTION {
+ HTC_SEND_FULL_KEEP = 0, /* packet that overflowed should be kept in the queue */
+ HTC_SEND_FULL_DROP = 1, /* packet that overflowed should be dropped */
+} HTC_SEND_FULL_ACTION;
+
+/* Optional per service connection callback when a send queue is full. This can occur if the
+ * host continues queueing up TX packets faster than credits can arrive
+ * To prevent the host (on some Oses like Linux) from continuously queueing packets
+ * and consuming resources, this callback is provided so that that the host
+ * can disable TX in the subsystem (i.e. network stack).
+ * This callback is invoked for each packet that "overflows" the HTC queue. The callback can
+ * determine whether the new packet that overflowed the queue can be kept (HTC_SEND_FULL_KEEP) or
+ * dropped (HTC_SEND_FULL_DROP). If a packet is dropped, the EpTxComplete handler will be called
+ * and the packet's status field will be set to A_NO_RESOURCE.
+ * Other OSes require a "per-packet" indication for each completed TX packet, this
+ * closed loop mechanism will prevent the network stack from overunning the NIC
+ * The packet to keep or drop is passed for inspection to the registered handler the handler
+ * must ONLY inspect the packet, it may not free or reclaim the packet. */
+typedef HTC_SEND_FULL_ACTION (*HTC_EP_SEND_QUEUE_FULL)(void *, HTC_PACKET *pPacket);
+
+typedef struct _HTC_EP_CALLBACKS {
+ void *pContext; /* context for each callback */
+ HTC_EP_SEND_PKT_COMPLETE EpTxComplete; /* tx completion callback for connected endpoint */
+ HTC_EP_RECV_PKT EpRecv; /* receive callback for connected endpoint */
+ HTC_EP_RECV_REFILL EpRecvRefill; /* OPTIONAL receive re-fill callback for connected endpoint */
+ HTC_EP_SEND_QUEUE_FULL EpSendFull; /* OPTIONAL send full callback */
+ HTC_EP_RECV_ALLOC EpRecvAlloc; /* OPTIONAL recv allocation callback */
+ HTC_EP_RECV_ALLOC EpRecvAllocThresh; /* OPTIONAL recv allocation callback based on a threshold */
+ HTC_EP_SEND_PKT_COMP_MULTIPLE EpTxCompleteMultiple; /* OPTIONAL completion handler for multiple complete
+ indications (EpTxComplete must be NULL) */
+ HTC_EP_RECV_PKT_MULTIPLE EpRecvPktMultiple; /* OPTIONAL completion handler for multiple
+ recv packet indications (EpRecv must be NULL) */
+ int RecvAllocThreshold; /* if EpRecvAllocThresh is non-NULL, HTC will compare the
+ threshold value to the current recv packet length and invoke
+ the EpRecvAllocThresh callback to acquire a packet buffer */
+ int RecvRefillWaterMark; /* if a EpRecvRefill handler is provided, this value
+ can be used to set a trigger refill callback
+ when the recv queue drops below this value
+ if set to 0, the refill is only called when packets
+ are empty */
+} HTC_EP_CALLBACKS;
+
+/* service connection information */
+typedef struct _HTC_SERVICE_CONNECT_REQ {
+ HTC_SERVICE_ID ServiceID; /* service ID to connect to */
+ A_UINT16 ConnectionFlags; /* connection flags, see htc protocol definition */
+ A_UINT8 *pMetaData; /* ptr to optional service-specific meta-data */
+ A_UINT8 MetaDataLength; /* optional meta data length */
+ HTC_EP_CALLBACKS EpCallbacks; /* endpoint callbacks */
+ int MaxSendQueueDepth; /* maximum depth of any send queue */
+ A_UINT32 LocalConnectionFlags; /* HTC flags for the host-side (local) connection */
+ unsigned int MaxSendMsgSize; /* override max message size in send direction */
+} HTC_SERVICE_CONNECT_REQ;
+
+#define HTC_LOCAL_CONN_FLAGS_ENABLE_SEND_BUNDLE_PADDING (1 << 0) /* enable send bundle padding for this endpoint */
+
+/* service connection response information */
+typedef struct _HTC_SERVICE_CONNECT_RESP {
+ A_UINT8 *pMetaData; /* caller supplied buffer to optional meta-data */
+ A_UINT8 BufferLength; /* length of caller supplied buffer */
+ A_UINT8 ActualLength; /* actual length of meta data */
+ HTC_ENDPOINT_ID Endpoint; /* endpoint to communicate over */
+ unsigned int MaxMsgLength; /* max length of all messages over this endpoint */
+ A_UINT8 ConnectRespCode; /* connect response code from target */
+} HTC_SERVICE_CONNECT_RESP;
+
+/* endpoint distribution structure */
+typedef struct _HTC_ENDPOINT_CREDIT_DIST {
+ struct _HTC_ENDPOINT_CREDIT_DIST *pNext;
+ struct _HTC_ENDPOINT_CREDIT_DIST *pPrev;
+ HTC_SERVICE_ID ServiceID; /* Service ID (set by HTC) */
+ HTC_ENDPOINT_ID Endpoint; /* endpoint for this distribution struct (set by HTC) */
+ A_UINT32 DistFlags; /* distribution flags, distribution function can
+ set default activity using SET_EP_ACTIVE() macro */
+ int TxCreditsNorm; /* credits for normal operation, anything above this
+ indicates the endpoint is over-subscribed, this field
+ is only relevant to the credit distribution function */
+ int TxCreditsMin; /* floor for credit distribution, this field is
+ only relevant to the credit distribution function */
+ int TxCreditsAssigned; /* number of credits assigned to this EP, this field
+ is only relevant to the credit dist function */
+ int TxCredits; /* current credits available, this field is used by
+ HTC to determine whether a message can be sent or
+ must be queued */
+ int TxCreditsToDist; /* pending credits to distribute on this endpoint, this
+ is set by HTC when credit reports arrive.
+ The credit distribution functions sets this to zero
+ when it distributes the credits */
+ int TxCreditsSeek; /* this is the number of credits that the current pending TX
+ packet needs to transmit. This is set by HTC when
+ and endpoint needs credits in order to transmit */
+ int TxCreditSize; /* size in bytes of each credit (set by HTC) */
+ int TxCreditsPerMaxMsg; /* credits required for a maximum sized messages (set by HTC) */
+ void *pHTCReserved; /* reserved for HTC use */
+ int TxQueueDepth; /* current depth of TX queue , i.e. messages waiting for credits
+ This field is valid only when HTC_CREDIT_DIST_ACTIVITY_CHANGE
+ or HTC_CREDIT_DIST_SEND_COMPLETE is indicated on an endpoint
+ that has non-zero credits to recover
+ */
+} HTC_ENDPOINT_CREDIT_DIST;
+
+#define HTC_EP_ACTIVE ((A_UINT32) (1u << 31))
+
+/* macro to check if an endpoint has gone active, useful for credit
+ * distributions */
+#define IS_EP_ACTIVE(epDist) ((epDist)->DistFlags & HTC_EP_ACTIVE)
+#define SET_EP_ACTIVE(epDist) (epDist)->DistFlags |= HTC_EP_ACTIVE
+
+ /* credit distibution code that is passed into the distrbution function,
+ * there are mandatory and optional codes that must be handled */
+typedef enum _HTC_CREDIT_DIST_REASON {
+ HTC_CREDIT_DIST_SEND_COMPLETE = 0, /* credits available as a result of completed
+ send operations (MANDATORY) resulting in credit reports */
+ HTC_CREDIT_DIST_ACTIVITY_CHANGE = 1, /* a change in endpoint activity occured (OPTIONAL) */
+ HTC_CREDIT_DIST_SEEK_CREDITS, /* an endpoint needs to "seek" credits (OPTIONAL) */
+ HTC_DUMP_CREDIT_STATE /* for debugging, dump any state information that is kept by
+ the distribution function */
+} HTC_CREDIT_DIST_REASON;
+
+typedef void (*HTC_CREDIT_DIST_CALLBACK)(void *Context,
+ HTC_ENDPOINT_CREDIT_DIST *pEPList,
+ HTC_CREDIT_DIST_REASON Reason);
+
+typedef void (*HTC_CREDIT_INIT_CALLBACK)(void *Context,
+ HTC_ENDPOINT_CREDIT_DIST *pEPList,
+ int TotalCredits);
+
+ /* endpoint statistics action */
+typedef enum _HTC_ENDPOINT_STAT_ACTION {
+ HTC_EP_STAT_SAMPLE = 0, /* only read statistics */
+ HTC_EP_STAT_SAMPLE_AND_CLEAR = 1, /* sample and immediately clear statistics */
+ HTC_EP_STAT_CLEAR /* clear only */
+} HTC_ENDPOINT_STAT_ACTION;
+
+ /* endpoint statistics */
+typedef struct _HTC_ENDPOINT_STATS {
+ A_UINT32 TxCreditLowIndications; /* number of times the host set the credit-low flag in a send message on
+ this endpoint */
+ A_UINT32 TxIssued; /* running count of total TX packets issued */
+ A_UINT32 TxPacketsBundled; /* running count of TX packets that were issued in bundles */
+ A_UINT32 TxBundles; /* running count of TX bundles that were issued */
+ A_UINT32 TxDropped; /* tx packets that were dropped */
+ A_UINT32 TxCreditRpts; /* running count of total credit reports received for this endpoint */
+ A_UINT32 TxCreditRptsFromRx; /* credit reports received from this endpoint's RX packets */
+ A_UINT32 TxCreditRptsFromOther; /* credit reports received from RX packets of other endpoints */
+ A_UINT32 TxCreditRptsFromEp0; /* credit reports received from endpoint 0 RX packets */
+ A_UINT32 TxCreditsFromRx; /* count of credits received via Rx packets on this endpoint */
+ A_UINT32 TxCreditsFromOther; /* count of credits received via another endpoint */
+ A_UINT32 TxCreditsFromEp0; /* count of credits received via another endpoint */
+ A_UINT32 TxCreditsConsummed; /* count of consummed credits */
+ A_UINT32 TxCreditsReturned; /* count of credits returned */
+ A_UINT32 RxReceived; /* count of RX packets received */
+ A_UINT32 RxLookAheads; /* count of lookahead records
+ found in messages received on this endpoint */
+ A_UINT32 RxPacketsBundled; /* count of recv packets received in a bundle */
+ A_UINT32 RxBundleLookAheads; /* count of number of bundled lookaheads */
+ A_UINT32 RxBundleIndFromHdr; /* count of the number of bundle indications from the HTC header */
+ A_UINT32 RxAllocThreshHit; /* count of the number of times the recv allocation threshhold was hit */
+ A_UINT32 RxAllocThreshBytes; /* total number of bytes */
+} HTC_ENDPOINT_STATS;
+
+/* ------ Function Prototypes ------ */
+/*+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+ @desc: Create an instance of HTC over the underlying HIF device
+ @function name: HTCCreate
+ @input: HifDevice - hif device handle,
+ pInfo - initialization information
+ @output:
+ @return: HTC_HANDLE on success, NULL on failure
+ @notes:
+ @example:
+ @see also: HTCDestroy
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++*/
+HTC_HANDLE HTCCreate(void *HifDevice, HTC_INIT_INFO *pInfo);
+/*+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+ @desc: Get the underlying HIF device handle
+ @function name: HTCGetHifDevice
+ @input: HTCHandle - handle passed into the AddInstance callback
+ @output:
+ @return: opaque HIF device handle usable in HIF API calls.
+ @notes:
+ @example:
+ @see also:
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++*/
+void *HTCGetHifDevice(HTC_HANDLE HTCHandle);
+/*+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+ @desc: Set credit distribution parameters
+ @function name: HTCSetCreditDistribution
+ @input: HTCHandle - HTC handle
+ pCreditDistCont - caller supplied context to pass into distribution functions
+ CreditDistFunc - Distribution function callback
+ CreditDistInit - Credit Distribution initialization callback
+ ServicePriorityOrder - Array containing list of service IDs, lowest index is highest
+ priority
+ ListLength - number of elements in ServicePriorityOrder
+ @output:
+ @return:
+ @notes: The user can set a custom credit distribution function to handle special requirements
+ for each endpoint. A default credit distribution routine can be used by setting
+ CreditInitFunc to NULL. The default credit distribution is only provided for simple
+ "fair" credit distribution without regard to any prioritization.
+
+ @example:
+ @see also:
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++*/
+void HTCSetCreditDistribution(HTC_HANDLE HTCHandle,
+ void *pCreditDistContext,
+ HTC_CREDIT_DIST_CALLBACK CreditDistFunc,
+ HTC_CREDIT_INIT_CALLBACK CreditInitFunc,
+ HTC_SERVICE_ID ServicePriorityOrder[],
+ int ListLength);
+/*+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+ @desc: Wait for the target to indicate the HTC layer is ready
+ @function name: HTCWaitTarget
+ @input: HTCHandle - HTC handle
+ @output:
+ @return:
+ @notes: This API blocks until the target responds with an HTC ready message.
+ The caller should not connect services until the target has indicated it is
+ ready.
+ @example:
+ @see also: HTCConnectService
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++*/
+A_STATUS HTCWaitTarget(HTC_HANDLE HTCHandle);
+/*+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+ @desc: Start target service communications
+ @function name: HTCStart
+ @input: HTCHandle - HTC handle
+ @output:
+ @return:
+ @notes: This API indicates to the target that the service connection phase is complete
+ and the target can freely start all connected services. This API should only be
+ called AFTER all service connections have been made. TCStart will issue a
+ SETUP_COMPLETE message to the target to indicate that all service connections
+ have been made and the target can start communicating over the endpoints.
+ @example:
+ @see also: HTCConnectService
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++*/
+A_STATUS HTCStart(HTC_HANDLE HTCHandle);
+/*+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+ @desc: Add receive packet to HTC
+ @function name: HTCAddReceivePkt
+ @input: HTCHandle - HTC handle
+ pPacket - HTC receive packet to add
+ @output:
+ @return: A_OK on success
+ @notes: user must supply HTC packets for capturing incomming HTC frames. The caller
+ must initialize each HTC packet using the SET_HTC_PACKET_INFO_RX_REFILL()
+ macro.
+ @example:
+ @see also:
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++*/
+A_STATUS HTCAddReceivePkt(HTC_HANDLE HTCHandle, HTC_PACKET *pPacket);
+/*+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+ @desc: Connect to an HTC service
+ @function name: HTCConnectService
+ @input: HTCHandle - HTC handle
+ pReq - connection details
+ @output: pResp - connection response
+ @return:
+ @notes: Service connections must be performed before HTCStart. User provides callback handlers
+ for various endpoint events.
+ @example:
+ @see also: HTCStart
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++*/
+A_STATUS HTCConnectService(HTC_HANDLE HTCHandle,
+ HTC_SERVICE_CONNECT_REQ *pReq,
+ HTC_SERVICE_CONNECT_RESP *pResp);
+/*+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+ @desc: Send an HTC packet
+ @function name: HTCSendPkt
+ @input: HTCHandle - HTC handle
+ pPacket - packet to send
+ @output:
+ @return: A_OK
+ @notes: Caller must initialize packet using SET_HTC_PACKET_INFO_TX() macro.
+ This interface is fully asynchronous. On error, HTC SendPkt will
+ call the registered Endpoint callback to cleanup the packet.
+ @example:
+ @see also: HTCFlushEndpoint
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++*/
+A_STATUS HTCSendPkt(HTC_HANDLE HTCHandle, HTC_PACKET *pPacket);
+/*+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+ @desc: Stop HTC service communications
+ @function name: HTCStop
+ @input: HTCHandle - HTC handle
+ @output:
+ @return:
+ @notes: HTC communications is halted. All receive and pending TX packets will
+ be flushed.
+ @example:
+ @see also:
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++*/
+void HTCStop(HTC_HANDLE HTCHandle);
+/*+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+ @desc: Destory HTC service
+ @function name: HTCDestroy
+ @input: HTCHandle
+ @output:
+ @return:
+ @notes: This cleans up all resources allocated by HTCCreate().
+ @example:
+ @see also: HTCCreate
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++*/
+void HTCDestroy(HTC_HANDLE HTCHandle);
+/*+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+ @desc: Flush pending TX packets
+ @function name: HTCFlushEndpoint
+ @input: HTCHandle - HTC handle
+ Endpoint - Endpoint to flush
+ Tag - flush tag
+ @output:
+ @return:
+ @notes: The Tag parameter is used to selectively flush packets with matching tags.
+ The value of 0 forces all packets to be flush regardless of tag.
+ @example:
+ @see also: HTCSendPkt
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++*/
+void HTCFlushEndpoint(HTC_HANDLE HTCHandle, HTC_ENDPOINT_ID Endpoint, HTC_TX_TAG Tag);
+/*+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+ @desc: Dump credit distribution state
+ @function name: HTCDumpCreditStates
+ @input: HTCHandle - HTC handle
+ @output:
+ @return:
+ @notes: This dumps all credit distribution information to the debugger
+ @example:
+ @see also:
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++*/
+void HTCDumpCreditStates(HTC_HANDLE HTCHandle);
+/*+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+ @desc: Indicate a traffic activity change on an endpoint
+ @function name: HTCIndicateActivityChange
+ @input: HTCHandle - HTC handle
+ Endpoint - endpoint in which activity has changed
+ Active - TRUE if active, FALSE if it has become inactive
+ @output:
+ @return:
+ @notes: This triggers the registered credit distribution function to
+ re-adjust credits for active/inactive endpoints.
+ @example:
+ @see also:
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++*/
+void HTCIndicateActivityChange(HTC_HANDLE HTCHandle,
+ HTC_ENDPOINT_ID Endpoint,
+ A_BOOL Active);
+
+/*+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+ @desc: Get endpoint statistics
+ @function name: HTCGetEndpointStatistics
+ @input: HTCHandle - HTC handle
+ Endpoint - Endpoint identifier
+ Action - action to take with statistics
+ @output:
+ pStats - statistics that were sampled (can be NULL if Action is HTC_EP_STAT_CLEAR)
+
+ @return: TRUE if statistics profiling is enabled, otherwise FALSE.
+
+ @notes: Statistics is a compile-time option and this function may return FALSE
+ if HTC is not compiled with profiling.
+
+ The caller can specify the statistic "action" to take when sampling
+ the statistics. This includes:
+
+ HTC_EP_STAT_SAMPLE: The pStats structure is filled with the current values.
+ HTC_EP_STAT_SAMPLE_AND_CLEAR: The structure is filled and the current statistics
+ are cleared.
+ HTC_EP_STAT_CLEA : the statistics are cleared, the called can pass a NULL value for
+ pStats
+
+ @example:
+ @see also:
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++*/
+A_BOOL HTCGetEndpointStatistics(HTC_HANDLE HTCHandle,
+ HTC_ENDPOINT_ID Endpoint,
+ HTC_ENDPOINT_STAT_ACTION Action,
+ HTC_ENDPOINT_STATS *pStats);
+
+/*+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+ @desc: Unblock HTC message reception
+ @function name: HTCUnblockRecv
+ @input: HTCHandle - HTC handle
+ @output:
+ @return:
+ @notes:
+ HTC will block the receiver if the EpRecvAlloc callback fails to provide a packet.
+ The caller can use this API to indicate to HTC when resources (buffers) are available
+ such that the receiver can be unblocked and HTC may re-attempt fetching the pending message.
+
+ This API is not required if the user uses the EpRecvRefill callback or uses the HTCAddReceivePacket()
+ API to recycle or provide receive packets to HTC.
+
+ @example:
+ @see also:
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++*/
+void HTCUnblockRecv(HTC_HANDLE HTCHandle);
+
+/*+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+ @desc: send a series of HTC packets
+ @function name: HTCSendPktsMultiple
+ @input: HTCHandle - HTC handle
+ pPktQueue - local queue holding packets to send
+ @output:
+ @return: A_OK
+ @notes: Caller must initialize each packet using SET_HTC_PACKET_INFO_TX() macro.
+ The queue must only contain packets directed at the same endpoint.
+ Caller supplies a pointer to an HTC_PACKET_QUEUE structure holding the TX packets in FIFO order.
+ This API will remove the packets from the pkt queue and place them into the HTC Tx Queue
+ and bundle messages where possible.
+ The caller may allocate the pkt queue on the stack to hold the packets.
+ This interface is fully asynchronous. On error, HTCSendPkts will
+ call the registered Endpoint callback to cleanup the packet.
+ @example:
+ @see also: HTCFlushEndpoint
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++*/
+A_STATUS HTCSendPktsMultiple(HTC_HANDLE HTCHandle, HTC_PACKET_QUEUE *pPktQueue);
+
+/*+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+ @desc: Add multiple receive packets to HTC
+ @function name: HTCAddReceivePktMultiple
+ @input: HTCHandle - HTC handle
+ pPktQueue - HTC receive packet queue holding packets to add
+ @output:
+ @return: A_OK on success
+ @notes: user must supply HTC packets for capturing incomming HTC frames. The caller
+ must initialize each HTC packet using the SET_HTC_PACKET_INFO_RX_REFILL()
+ macro. The queue must only contain recv packets for the same endpoint.
+ Caller supplies a pointer to an HTC_PACKET_QUEUE structure holding the recv packet.
+ This API will remove the packets from the pkt queue and place them into internal
+ recv packet list.
+ The caller may allocate the pkt queue on the stack to hold the packets.
+ @example:
+ @see also:
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++*/
+A_STATUS HTCAddReceivePktMultiple(HTC_HANDLE HTCHandle, HTC_PACKET_QUEUE *pPktQueue);
+
+/*+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+ @desc: Check if an endpoint is marked active
+ @function name: HTCIsEndpointActive
+ @input: HTCHandle - HTC handle
+ Endpoint - endpoint to check for active state
+ @output:
+ @return: returns TRUE if Endpoint is Active
+ @notes:
+ @example:
+ @see also:
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++*/
+A_BOOL HTCIsEndpointActive(HTC_HANDLE HTCHandle,
+ HTC_ENDPOINT_ID Endpoint);
+
+
+/*+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+ @desc: Get the number of recv buffers currently queued into an HTC endpoint
+ @function name: HTCGetNumRecvBuffers
+ @input: HTCHandle - HTC handle
+ Endpoint - endpoint to check
+ @output:
+ @return: returns number of buffers in queue
+ @notes:
+ @example:
+ @see also:
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++*/
+int HTCGetNumRecvBuffers(HTC_HANDLE HTCHandle,
+ HTC_ENDPOINT_ID Endpoint);
+
+/* internally used functions for testing... */
+void HTCEnableRecv(HTC_HANDLE HTCHandle);
+void HTCDisableRecv(HTC_HANDLE HTCHandle);
+A_STATUS HTCWaitForPendingRecv(HTC_HANDLE HTCHandle,
+ A_UINT32 TimeoutInMs,
+ A_BOOL *pbIsRecvPending);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _HTC_API_H_ */
diff --git a/drivers/staging/ath6kl/include/htc_packet.h b/drivers/staging/ath6kl/include/htc_packet.h
new file mode 100644
index 000000000000..15175cff2f28
--- /dev/null
+++ b/drivers/staging/ath6kl/include/htc_packet.h
@@ -0,0 +1,227 @@
+//------------------------------------------------------------------------------
+// <copyright file="htc_packet.h" company="Atheros">
+// Copyright (c) 2007-2010 Atheros Corporation. All rights reserved.
+//
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+//
+//
+//------------------------------------------------------------------------------
+//==============================================================================
+// Author(s): ="Atheros"
+//==============================================================================
+#ifndef HTC_PACKET_H_
+#define HTC_PACKET_H_
+
+
+#include "dl_list.h"
+
+/* ------ Endpoint IDS ------ */
+typedef enum
+{
+ ENDPOINT_UNUSED = -1,
+ ENDPOINT_0 = 0,
+ ENDPOINT_1 = 1,
+ ENDPOINT_2 = 2,
+ ENDPOINT_3,
+ ENDPOINT_4,
+ ENDPOINT_5,
+ ENDPOINT_6,
+ ENDPOINT_7,
+ ENDPOINT_8,
+ ENDPOINT_MAX,
+} HTC_ENDPOINT_ID;
+
+struct _HTC_PACKET;
+
+typedef void (* HTC_PACKET_COMPLETION)(void *,struct _HTC_PACKET *);
+
+typedef A_UINT16 HTC_TX_TAG;
+
+typedef struct _HTC_TX_PACKET_INFO {
+ HTC_TX_TAG Tag; /* tag used to selective flush packets */
+ int CreditsUsed; /* number of credits used for this TX packet (HTC internal) */
+ A_UINT8 SendFlags; /* send flags (HTC internal) */
+ int SeqNo; /* internal seq no for debugging (HTC internal) */
+} HTC_TX_PACKET_INFO;
+
+#define HTC_TX_PACKET_TAG_ALL 0 /* a tag of zero is reserved and used to flush ALL packets */
+#define HTC_TX_PACKET_TAG_INTERNAL 1 /* internal tags start here */
+#define HTC_TX_PACKET_TAG_USER_DEFINED (HTC_TX_PACKET_TAG_INTERNAL + 9) /* user-defined tags start here */
+
+typedef struct _HTC_RX_PACKET_INFO {
+ A_UINT32 ExpectedHdr; /* HTC internal use */
+ A_UINT32 HTCRxFlags; /* HTC internal use */
+ A_UINT32 IndicationFlags; /* indication flags set on each RX packet indication */
+} HTC_RX_PACKET_INFO;
+
+#define HTC_RX_FLAGS_INDICATE_MORE_PKTS (1 << 0) /* more packets on this endpoint are being fetched */
+
+/* wrapper around endpoint-specific packets */
+typedef struct _HTC_PACKET {
+ DL_LIST ListLink; /* double link */
+ void *pPktContext; /* caller's per packet specific context */
+
+ A_UINT8 *pBufferStart; /* the true buffer start , the caller can
+ store the real buffer start here. In
+ receive callbacks, the HTC layer sets pBuffer
+ to the start of the payload past the header. This
+ field allows the caller to reset pBuffer when it
+ recycles receive packets back to HTC */
+ /*
+ * Pointer to the start of the buffer. In the transmit
+ * direction this points to the start of the payload. In the
+ * receive direction, however, the buffer when queued up
+ * points to the start of the HTC header but when returned
+ * to the caller points to the start of the payload
+ */
+ A_UINT8 *pBuffer; /* payload start (RX/TX) */
+ A_UINT32 BufferLength; /* length of buffer */
+ A_UINT32 ActualLength; /* actual length of payload */
+ HTC_ENDPOINT_ID Endpoint; /* endpoint that this packet was sent/recv'd from */
+ A_STATUS Status; /* completion status */
+ union {
+ HTC_TX_PACKET_INFO AsTx; /* Tx Packet specific info */
+ HTC_RX_PACKET_INFO AsRx; /* Rx Packet specific info */
+ } PktInfo;
+
+ /* the following fields are for internal HTC use */
+ HTC_PACKET_COMPLETION Completion; /* completion */
+ void *pContext; /* HTC private completion context */
+} HTC_PACKET;
+
+
+
+#define COMPLETE_HTC_PACKET(p,status) \
+{ \
+ (p)->Status = (status); \
+ (p)->Completion((p)->pContext,(p)); \
+}
+
+#define INIT_HTC_PACKET_INFO(p,b,len) \
+{ \
+ (p)->pBufferStart = (b); \
+ (p)->BufferLength = (len); \
+}
+
+/* macro to set an initial RX packet for refilling HTC */
+#define SET_HTC_PACKET_INFO_RX_REFILL(p,c,b,len,ep) \
+{ \
+ (p)->pPktContext = (c); \
+ (p)->pBuffer = (b); \
+ (p)->pBufferStart = (b); \
+ (p)->BufferLength = (len); \
+ (p)->Endpoint = (ep); \
+}
+
+/* fast macro to recycle an RX packet that will be re-queued to HTC */
+#define HTC_PACKET_RESET_RX(p) \
+ { (p)->pBuffer = (p)->pBufferStart; (p)->ActualLength = 0; }
+
+/* macro to set packet parameters for TX */
+#define SET_HTC_PACKET_INFO_TX(p,c,b,len,ep,tag) \
+{ \
+ (p)->pPktContext = (c); \
+ (p)->pBuffer = (b); \
+ (p)->ActualLength = (len); \
+ (p)->Endpoint = (ep); \
+ (p)->PktInfo.AsTx.Tag = (tag); \
+}
+
+/* HTC Packet Queueing Macros */
+typedef struct _HTC_PACKET_QUEUE {
+ DL_LIST QueueHead;
+ int Depth;
+} HTC_PACKET_QUEUE;
+
+/* initialize queue */
+#define INIT_HTC_PACKET_QUEUE(pQ) \
+{ \
+ DL_LIST_INIT(&(pQ)->QueueHead); \
+ (pQ)->Depth = 0; \
+}
+
+/* enqueue HTC packet to the tail of the queue */
+#define HTC_PACKET_ENQUEUE(pQ,p) \
+{ DL_ListInsertTail(&(pQ)->QueueHead,&(p)->ListLink); \
+ (pQ)->Depth++; \
+}
+
+/* enqueue HTC packet to the tail of the queue */
+#define HTC_PACKET_ENQUEUE_TO_HEAD(pQ,p) \
+{ DL_ListInsertHead(&(pQ)->QueueHead,&(p)->ListLink); \
+ (pQ)->Depth++; \
+}
+/* test if a queue is empty */
+#define HTC_QUEUE_EMPTY(pQ) ((pQ)->Depth == 0)
+/* get packet at head without removing it */
+static INLINE HTC_PACKET *HTC_GET_PKT_AT_HEAD(HTC_PACKET_QUEUE *queue) {
+ if (queue->Depth == 0) {
+ return NULL;
+ }
+ return A_CONTAINING_STRUCT((DL_LIST_GET_ITEM_AT_HEAD(&queue->QueueHead)),HTC_PACKET,ListLink);
+}
+/* remove a packet from a queue, where-ever it is in the queue */
+#define HTC_PACKET_REMOVE(pQ,p) \
+{ \
+ DL_ListRemove(&(p)->ListLink); \
+ (pQ)->Depth--; \
+}
+
+/* dequeue an HTC packet from the head of the queue */
+static INLINE HTC_PACKET *HTC_PACKET_DEQUEUE(HTC_PACKET_QUEUE *queue) {
+ DL_LIST *pItem = DL_ListRemoveItemFromHead(&queue->QueueHead);
+ if (pItem != NULL) {
+ queue->Depth--;
+ return A_CONTAINING_STRUCT(pItem, HTC_PACKET, ListLink);
+ }
+ return NULL;
+}
+
+/* dequeue an HTC packet from the tail of the queue */
+static INLINE HTC_PACKET *HTC_PACKET_DEQUEUE_TAIL(HTC_PACKET_QUEUE *queue) {
+ DL_LIST *pItem = DL_ListRemoveItemFromTail(&queue->QueueHead);
+ if (pItem != NULL) {
+ queue->Depth--;
+ return A_CONTAINING_STRUCT(pItem, HTC_PACKET, ListLink);
+ }
+ return NULL;
+}
+
+#define HTC_PACKET_QUEUE_DEPTH(pQ) (pQ)->Depth
+
+
+#define HTC_GET_ENDPOINT_FROM_PKT(p) (p)->Endpoint
+#define HTC_GET_TAG_FROM_PKT(p) (p)->PktInfo.AsTx.Tag
+
+ /* transfer the packets from one queue to the tail of another queue */
+#define HTC_PACKET_QUEUE_TRANSFER_TO_TAIL(pQDest,pQSrc) \
+{ \
+ DL_ListTransferItemsToTail(&(pQDest)->QueueHead,&(pQSrc)->QueueHead); \
+ (pQDest)->Depth += (pQSrc)->Depth; \
+ (pQSrc)->Depth = 0; \
+}
+
+ /* fast version to init and add a single packet to a queue */
+#define INIT_HTC_PACKET_QUEUE_AND_ADD(pQ,pP) \
+{ \
+ DL_LIST_INIT_AND_ADD(&(pQ)->QueueHead,&(pP)->ListLink) \
+ (pQ)->Depth = 1; \
+}
+
+#define HTC_PACKET_QUEUE_ITERATE_ALLOW_REMOVE(pQ, pPTemp) \
+ ITERATE_OVER_LIST_ALLOW_REMOVE(&(pQ)->QueueHead,(pPTemp), HTC_PACKET, ListLink)
+
+#define HTC_PACKET_QUEUE_ITERATE_END ITERATE_END
+
+#endif /*HTC_PACKET_H_*/
diff --git a/drivers/staging/ath6kl/include/target_reg_table.h b/drivers/staging/ath6kl/include/target_reg_table.h
new file mode 100644
index 000000000000..901f923bee34
--- /dev/null
+++ b/drivers/staging/ath6kl/include/target_reg_table.h
@@ -0,0 +1,244 @@
+//------------------------------------------------------------------------------
+// <copyright file="target_reg_table.h" company="Atheros">
+// Copyright (c) 2004-2010 Atheros Corporation. All rights reserved.
+//
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+//
+//
+//------------------------------------------------------------------------------
+//==============================================================================
+// Target register table macros and structure definitions
+//
+// Author(s): ="Atheros"
+//==============================================================================
+
+#ifndef TARGET_REG_TABLE_H_
+#define TARGET_REG_TABLE_H_
+
+#include "targaddrs.h"
+
+/*** WARNING : Add to the end of the TABLE! do not change the order ****/
+typedef struct targetdef_s {
+ A_UINT32 d_RTC_BASE_ADDRESS;
+ A_UINT32 d_SYSTEM_SLEEP_OFFSET;
+ A_UINT32 d_SYSTEM_SLEEP_DISABLE_LSB;
+ A_UINT32 d_SYSTEM_SLEEP_DISABLE_MASK;
+ A_UINT32 d_CLOCK_CONTROL_OFFSET;
+ A_UINT32 d_CLOCK_CONTROL_SI0_CLK_MASK;
+ A_UINT32 d_RESET_CONTROL_OFFSET;
+ A_UINT32 d_RESET_CONTROL_SI0_RST_MASK;
+ A_UINT32 d_GPIO_BASE_ADDRESS;
+ A_UINT32 d_GPIO_PIN0_OFFSET;
+ A_UINT32 d_GPIO_PIN1_OFFSET;
+ A_UINT32 d_GPIO_PIN0_CONFIG_MASK;
+ A_UINT32 d_GPIO_PIN1_CONFIG_MASK;
+ A_UINT32 d_SI_CONFIG_BIDIR_OD_DATA_LSB;
+ A_UINT32 d_SI_CONFIG_BIDIR_OD_DATA_MASK;
+ A_UINT32 d_SI_CONFIG_I2C_LSB;
+ A_UINT32 d_SI_CONFIG_I2C_MASK;
+ A_UINT32 d_SI_CONFIG_POS_SAMPLE_LSB;
+ A_UINT32 d_SI_CONFIG_POS_SAMPLE_MASK;
+ A_UINT32 d_SI_CONFIG_INACTIVE_CLK_LSB;
+ A_UINT32 d_SI_CONFIG_INACTIVE_CLK_MASK;
+ A_UINT32 d_SI_CONFIG_INACTIVE_DATA_LSB;
+ A_UINT32 d_SI_CONFIG_INACTIVE_DATA_MASK;
+ A_UINT32 d_SI_CONFIG_DIVIDER_LSB;
+ A_UINT32 d_SI_CONFIG_DIVIDER_MASK;
+ A_UINT32 d_SI_BASE_ADDRESS;
+ A_UINT32 d_SI_CONFIG_OFFSET;
+ A_UINT32 d_SI_TX_DATA0_OFFSET;
+ A_UINT32 d_SI_TX_DATA1_OFFSET;
+ A_UINT32 d_SI_RX_DATA0_OFFSET;
+ A_UINT32 d_SI_RX_DATA1_OFFSET;
+ A_UINT32 d_SI_CS_OFFSET;
+ A_UINT32 d_SI_CS_DONE_ERR_MASK;
+ A_UINT32 d_SI_CS_DONE_INT_MASK;
+ A_UINT32 d_SI_CS_START_LSB;
+ A_UINT32 d_SI_CS_START_MASK;
+ A_UINT32 d_SI_CS_RX_CNT_LSB;
+ A_UINT32 d_SI_CS_RX_CNT_MASK;
+ A_UINT32 d_SI_CS_TX_CNT_LSB;
+ A_UINT32 d_SI_CS_TX_CNT_MASK;
+ A_UINT32 d_BOARD_DATA_SZ;
+ A_UINT32 d_BOARD_EXT_DATA_SZ;
+} TARGET_REGISTER_TABLE;
+
+#define BOARD_DATA_SZ_MAX 2048
+
+#if defined(MY_TARGET_DEF) /* { */
+
+#ifdef ATH_REG_TABLE_DIRECT_ASSIGN
+
+static struct targetdef_s my_target_def = {
+ RTC_BASE_ADDRESS,
+ SYSTEM_SLEEP_OFFSET,
+ SYSTEM_SLEEP_DISABLE_LSB,
+ SYSTEM_SLEEP_DISABLE_MASK,
+ CLOCK_CONTROL_OFFSET,
+ CLOCK_CONTROL_SI0_CLK_MASK,
+ RESET_CONTROL_OFFSET,
+ RESET_CONTROL_SI0_RST_MASK,
+ GPIO_BASE_ADDRESS,
+ GPIO_PIN0_OFFSET,
+ GPIO_PIN0_CONFIG_MASK,
+ GPIO_PIN1_OFFSET,
+ GPIO_PIN1_CONFIG_MASK,
+ SI_CONFIG_BIDIR_OD_DATA_LSB,
+ SI_CONFIG_BIDIR_OD_DATA_MASK,
+ SI_CONFIG_I2C_LSB,
+ SI_CONFIG_I2C_MASK,
+ SI_CONFIG_POS_SAMPLE_LSB,
+ SI_CONFIG_POS_SAMPLE_MASK,
+ SI_CONFIG_INACTIVE_CLK_LSB,
+ SI_CONFIG_INACTIVE_CLK_MASK,
+ SI_CONFIG_INACTIVE_DATA_LSB,
+ SI_CONFIG_INACTIVE_DATA_MASK,
+ SI_CONFIG_DIVIDER_LSB,
+ SI_CONFIG_DIVIDER_MASK,
+ SI_BASE_ADDRESS,
+ SI_CONFIG_OFFSET,
+ SI_TX_DATA0_OFFSET,
+ SI_TX_DATA1_OFFSET,
+ SI_RX_DATA0_OFFSET,
+ SI_RX_DATA1_OFFSET,
+ SI_CS_OFFSET,
+ SI_CS_DONE_ERR_MASK,
+ SI_CS_DONE_INT_MASK,
+ SI_CS_START_LSB,
+ SI_CS_START_MASK,
+ SI_CS_RX_CNT_LSB,
+ SI_CS_RX_CNT_MASK,
+ SI_CS_TX_CNT_LSB,
+ SI_CS_TX_CNT_MASK,
+ MY_TARGET_BOARD_DATA_SZ,
+ MY_TARGET_BOARD_EXT_DATA_SZ,
+};
+
+#else
+
+static struct targetdef_s my_target_def = {
+ .d_RTC_BASE_ADDRESS = RTC_BASE_ADDRESS,
+ .d_SYSTEM_SLEEP_OFFSET = SYSTEM_SLEEP_OFFSET,
+ .d_SYSTEM_SLEEP_DISABLE_LSB = SYSTEM_SLEEP_DISABLE_LSB,
+ .d_SYSTEM_SLEEP_DISABLE_MASK = SYSTEM_SLEEP_DISABLE_MASK,
+ .d_CLOCK_CONTROL_OFFSET = CLOCK_CONTROL_OFFSET,
+ .d_CLOCK_CONTROL_SI0_CLK_MASK = CLOCK_CONTROL_SI0_CLK_MASK,
+ .d_RESET_CONTROL_OFFSET = RESET_CONTROL_OFFSET,
+ .d_RESET_CONTROL_SI0_RST_MASK = RESET_CONTROL_SI0_RST_MASK,
+ .d_GPIO_BASE_ADDRESS = GPIO_BASE_ADDRESS,
+ .d_GPIO_PIN0_OFFSET = GPIO_PIN0_OFFSET,
+ .d_GPIO_PIN0_CONFIG_MASK = GPIO_PIN0_CONFIG_MASK,
+ .d_GPIO_PIN1_OFFSET = GPIO_PIN1_OFFSET,
+ .d_GPIO_PIN1_CONFIG_MASK = GPIO_PIN1_CONFIG_MASK,
+ .d_SI_CONFIG_BIDIR_OD_DATA_LSB = SI_CONFIG_BIDIR_OD_DATA_LSB,
+ .d_SI_CONFIG_BIDIR_OD_DATA_MASK = SI_CONFIG_BIDIR_OD_DATA_MASK,
+ .d_SI_CONFIG_I2C_LSB = SI_CONFIG_I2C_LSB,
+ .d_SI_CONFIG_I2C_MASK = SI_CONFIG_I2C_MASK,
+ .d_SI_CONFIG_POS_SAMPLE_LSB = SI_CONFIG_POS_SAMPLE_LSB,
+ .d_SI_CONFIG_POS_SAMPLE_MASK = SI_CONFIG_POS_SAMPLE_MASK,
+ .d_SI_CONFIG_INACTIVE_CLK_LSB = SI_CONFIG_INACTIVE_CLK_LSB,
+ .d_SI_CONFIG_INACTIVE_CLK_MASK = SI_CONFIG_INACTIVE_CLK_MASK,
+ .d_SI_CONFIG_INACTIVE_DATA_LSB = SI_CONFIG_INACTIVE_DATA_LSB,
+ .d_SI_CONFIG_INACTIVE_DATA_MASK = SI_CONFIG_INACTIVE_DATA_MASK,
+ .d_SI_CONFIG_DIVIDER_LSB = SI_CONFIG_DIVIDER_LSB,
+ .d_SI_CONFIG_DIVIDER_MASK = SI_CONFIG_DIVIDER_MASK,
+ .d_SI_BASE_ADDRESS = SI_BASE_ADDRESS,
+ .d_SI_CONFIG_OFFSET = SI_CONFIG_OFFSET,
+ .d_SI_TX_DATA0_OFFSET = SI_TX_DATA0_OFFSET,
+ .d_SI_TX_DATA1_OFFSET = SI_TX_DATA1_OFFSET,
+ .d_SI_RX_DATA0_OFFSET = SI_RX_DATA0_OFFSET,
+ .d_SI_RX_DATA1_OFFSET = SI_RX_DATA1_OFFSET,
+ .d_SI_CS_OFFSET = SI_CS_OFFSET,
+ .d_SI_CS_DONE_ERR_MASK = SI_CS_DONE_ERR_MASK,
+ .d_SI_CS_DONE_INT_MASK = SI_CS_DONE_INT_MASK,
+ .d_SI_CS_START_LSB = SI_CS_START_LSB,
+ .d_SI_CS_START_MASK = SI_CS_START_MASK,
+ .d_SI_CS_RX_CNT_LSB = SI_CS_RX_CNT_LSB,
+ .d_SI_CS_RX_CNT_MASK = SI_CS_RX_CNT_MASK,
+ .d_SI_CS_TX_CNT_LSB = SI_CS_TX_CNT_LSB,
+ .d_SI_CS_TX_CNT_MASK = SI_CS_TX_CNT_MASK,
+ .d_BOARD_DATA_SZ = MY_TARGET_BOARD_DATA_SZ,
+ .d_BOARD_EXT_DATA_SZ = MY_TARGET_BOARD_EXT_DATA_SZ,
+};
+
+#endif
+
+#if MY_TARGET_BOARD_DATA_SZ > BOARD_DATA_SZ_MAX
+#error "BOARD_DATA_SZ_MAX is too small"
+#endif
+
+struct targetdef_s *MY_TARGET_DEF = &my_target_def;
+
+#else /* } { */
+
+#define RTC_BASE_ADDRESS (targetdef->d_RTC_BASE_ADDRESS)
+#define SYSTEM_SLEEP_OFFSET (targetdef->d_SYSTEM_SLEEP_OFFSET)
+#define SYSTEM_SLEEP_DISABLE_LSB (targetdef->d_SYSTEM_SLEEP_DISABLE_LSB)
+#define SYSTEM_SLEEP_DISABLE_MASK (targetdef->d_SYSTEM_SLEEP_DISABLE_MASK)
+#define CLOCK_CONTROL_OFFSET (targetdef->d_CLOCK_CONTROL_OFFSET)
+#define CLOCK_CONTROL_SI0_CLK_MASK (targetdef->d_CLOCK_CONTROL_SI0_CLK_MASK)
+#define RESET_CONTROL_OFFSET (targetdef->d_RESET_CONTROL_OFFSET)
+#define RESET_CONTROL_SI0_RST_MASK (targetdef->d_RESET_CONTROL_SI0_RST_MASK)
+#define GPIO_BASE_ADDRESS (targetdef->d_GPIO_BASE_ADDRESS)
+#define GPIO_PIN0_OFFSET (targetdef->d_GPIO_PIN0_OFFSET)
+#define GPIO_PIN0_CONFIG_MASK (targetdef->d_GPIO_PIN0_CONFIG_MASK)
+#define GPIO_PIN1_OFFSET (targetdef->d_GPIO_PIN1_OFFSET)
+#define GPIO_PIN1_CONFIG_MASK (targetdef->d_GPIO_PIN1_CONFIG_MASK)
+#define SI_CONFIG_BIDIR_OD_DATA_LSB (targetdef->d_SI_CONFIG_BIDIR_OD_DATA_LSB)
+#define SI_CONFIG_BIDIR_OD_DATA_MASK (targetdef->d_SI_CONFIG_BIDIR_OD_DATA_MASK)
+#define SI_CONFIG_I2C_LSB (targetdef->d_SI_CONFIG_I2C_LSB)
+#define SI_CONFIG_I2C_MASK (targetdef->d_SI_CONFIG_I2C_MASK)
+#define SI_CONFIG_POS_SAMPLE_LSB (targetdef->d_SI_CONFIG_POS_SAMPLE_LSB)
+#define SI_CONFIG_POS_SAMPLE_MASK (targetdef->d_SI_CONFIG_POS_SAMPLE_MASK)
+#define SI_CONFIG_INACTIVE_CLK_LSB (targetdef->d_SI_CONFIG_INACTIVE_CLK_LSB)
+#define SI_CONFIG_INACTIVE_CLK_MASK (targetdef->d_SI_CONFIG_INACTIVE_CLK_MASK)
+#define SI_CONFIG_INACTIVE_DATA_LSB (targetdef->d_SI_CONFIG_INACTIVE_DATA_LSB)
+#define SI_CONFIG_INACTIVE_DATA_MASK (targetdef->d_SI_CONFIG_INACTIVE_DATA_MASK)
+#define SI_CONFIG_DIVIDER_LSB (targetdef->d_SI_CONFIG_DIVIDER_LSB)
+#define SI_CONFIG_DIVIDER_MASK (targetdef->d_SI_CONFIG_DIVIDER_MASK)
+#define SI_BASE_ADDRESS (targetdef->d_SI_BASE_ADDRESS)
+#define SI_CONFIG_OFFSET (targetdef->d_SI_CONFIG_OFFSET)
+#define SI_TX_DATA0_OFFSET (targetdef->d_SI_TX_DATA0_OFFSET)
+#define SI_TX_DATA1_OFFSET (targetdef->d_SI_TX_DATA1_OFFSET)
+#define SI_RX_DATA0_OFFSET (targetdef->d_SI_RX_DATA0_OFFSET)
+#define SI_RX_DATA1_OFFSET (targetdef->d_SI_RX_DATA1_OFFSET)
+#define SI_CS_OFFSET (targetdef->d_SI_CS_OFFSET)
+#define SI_CS_DONE_ERR_MASK (targetdef->d_SI_CS_DONE_ERR_MASK)
+#define SI_CS_DONE_INT_MASK (targetdef->d_SI_CS_DONE_INT_MASK)
+#define SI_CS_START_LSB (targetdef->d_SI_CS_START_LSB)
+#define SI_CS_START_MASK (targetdef->d_SI_CS_START_MASK)
+#define SI_CS_RX_CNT_LSB (targetdef->d_SI_CS_RX_CNT_LSB)
+#define SI_CS_RX_CNT_MASK (targetdef->d_SI_CS_RX_CNT_MASK)
+#define SI_CS_TX_CNT_LSB (targetdef->d_SI_CS_TX_CNT_LSB)
+#define SI_CS_TX_CNT_MASK (targetdef->d_SI_CS_TX_CNT_MASK)
+#define EEPROM_SZ (targetdef->d_BOARD_DATA_SZ)
+#define EEPROM_EXT_SZ (targetdef->d_BOARD_EXT_DATA_SZ)
+
+/* SET macros */
+#define SYSTEM_SLEEP_DISABLE_SET(x) (((x) << SYSTEM_SLEEP_DISABLE_LSB) & SYSTEM_SLEEP_DISABLE_MASK)
+#define SI_CONFIG_BIDIR_OD_DATA_SET(x) (((x) << SI_CONFIG_BIDIR_OD_DATA_LSB) & SI_CONFIG_BIDIR_OD_DATA_MASK)
+#define SI_CONFIG_I2C_SET(x) (((x) << SI_CONFIG_I2C_LSB) & SI_CONFIG_I2C_MASK)
+#define SI_CONFIG_POS_SAMPLE_SET(x) (((x) << SI_CONFIG_POS_SAMPLE_LSB) & SI_CONFIG_POS_SAMPLE_MASK)
+#define SI_CONFIG_INACTIVE_CLK_SET(x) (((x) << SI_CONFIG_INACTIVE_CLK_LSB) & SI_CONFIG_INACTIVE_CLK_MASK)
+#define SI_CONFIG_INACTIVE_DATA_SET(x) (((x) << SI_CONFIG_INACTIVE_DATA_LSB) & SI_CONFIG_INACTIVE_DATA_MASK)
+#define SI_CONFIG_DIVIDER_SET(x) (((x) << SI_CONFIG_DIVIDER_LSB) & SI_CONFIG_DIVIDER_MASK)
+#define SI_CS_START_SET(x) (((x) << SI_CS_START_LSB) & SI_CS_START_MASK)
+#define SI_CS_RX_CNT_SET(x) (((x) << SI_CS_RX_CNT_LSB) & SI_CS_RX_CNT_MASK)
+#define SI_CS_TX_CNT_SET(x) (((x) << SI_CS_TX_CNT_LSB) & SI_CS_TX_CNT_MASK)
+
+#endif /* } */
+
+#endif /*TARGET_REG_TABLE_H_*/
+
+
diff --git a/drivers/staging/ath6kl/include/wlan_api.h b/drivers/staging/ath6kl/include/wlan_api.h
new file mode 100644
index 000000000000..f55a6454a6b4
--- /dev/null
+++ b/drivers/staging/ath6kl/include/wlan_api.h
@@ -0,0 +1,128 @@
+//------------------------------------------------------------------------------
+// Copyright (c) 2004-2010 Atheros Corporation. All rights reserved.
+//
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+//
+//
+//------------------------------------------------------------------------------
+//==============================================================================
+// This file contains the API for the host wlan module
+//
+// Author(s): ="Atheros"
+//==============================================================================
+#ifndef _HOST_WLAN_API_H_
+#define _HOST_WLAN_API_H_
+
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <a_osapi.h>
+
+struct ieee80211_node_table;
+struct ieee80211_frame;
+
+struct ieee80211_common_ie {
+ A_UINT16 ie_chan;
+ A_UINT8 *ie_tstamp;
+ A_UINT8 *ie_ssid;
+ A_UINT8 *ie_rates;
+ A_UINT8 *ie_xrates;
+ A_UINT8 *ie_country;
+ A_UINT8 *ie_wpa;
+ A_UINT8 *ie_rsn;
+ A_UINT8 *ie_wmm;
+ A_UINT8 *ie_ath;
+ A_UINT16 ie_capInfo;
+ A_UINT16 ie_beaconInt;
+ A_UINT8 *ie_tim;
+ A_UINT8 *ie_chswitch;
+ A_UINT8 ie_erp;
+ A_UINT8 *ie_wsc;
+ A_UINT8 *ie_htcap;
+ A_UINT8 *ie_htop;
+#ifdef WAPI_ENABLE
+ A_UINT8 *ie_wapi;
+#endif
+};
+
+typedef struct bss {
+ A_UINT8 ni_macaddr[6];
+ A_UINT8 ni_snr;
+ A_INT16 ni_rssi;
+ struct bss *ni_list_next;
+ struct bss *ni_list_prev;
+ struct bss *ni_hash_next;
+ struct bss *ni_hash_prev;
+ struct ieee80211_common_ie ni_cie;
+ A_UINT8 *ni_buf;
+ A_UINT16 ni_framelen;
+ struct ieee80211_node_table *ni_table;
+ A_UINT32 ni_refcnt;
+ int ni_scangen;
+
+ A_UINT32 ni_tstamp;
+ A_UINT32 ni_actcnt;
+#ifdef OS_ROAM_MANAGEMENT
+ A_UINT32 ni_si_gen;
+#endif
+} bss_t;
+
+typedef void wlan_node_iter_func(void *arg, bss_t *);
+
+bss_t *wlan_node_alloc(struct ieee80211_node_table *nt, int wh_size);
+void wlan_node_free(bss_t *ni);
+void wlan_setup_node(struct ieee80211_node_table *nt, bss_t *ni,
+ const A_UINT8 *macaddr);
+bss_t *wlan_find_node(struct ieee80211_node_table *nt, const A_UINT8 *macaddr);
+void wlan_node_reclaim(struct ieee80211_node_table *nt, bss_t *ni);
+void wlan_free_allnodes(struct ieee80211_node_table *nt);
+void wlan_iterate_nodes(struct ieee80211_node_table *nt, wlan_node_iter_func *f,
+ void *arg);
+
+void wlan_node_table_init(void *wmip, struct ieee80211_node_table *nt);
+void wlan_node_table_reset(struct ieee80211_node_table *nt);
+void wlan_node_table_cleanup(struct ieee80211_node_table *nt);
+
+A_STATUS wlan_parse_beacon(A_UINT8 *buf, int framelen,
+ struct ieee80211_common_ie *cie);
+
+A_UINT16 wlan_ieee2freq(int chan);
+A_UINT32 wlan_freq2ieee(A_UINT16 freq);
+
+void wlan_set_nodeage(struct ieee80211_node_table *nt, A_UINT32 nodeAge);
+
+void
+wlan_refresh_inactive_nodes (struct ieee80211_node_table *nt);
+
+bss_t *
+wlan_find_Ssidnode (struct ieee80211_node_table *nt, A_UCHAR *pSsid,
+ A_UINT32 ssidLength, A_BOOL bIsWPA2, A_BOOL bMatchSSID);
+
+void
+wlan_node_return (struct ieee80211_node_table *nt, bss_t *ni);
+
+bss_t *wlan_node_remove(struct ieee80211_node_table *nt, A_UINT8 *bssid);
+
+bss_t *
+wlan_find_matching_Ssidnode (struct ieee80211_node_table *nt, A_UCHAR *pSsid,
+ A_UINT32 ssidLength, A_UINT32 dot11AuthMode, A_UINT32 authMode,
+ A_UINT32 pairwiseCryptoType, A_UINT32 grpwiseCryptoTyp);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _HOST_WLAN_API_H_ */
diff --git a/drivers/staging/ath6kl/include/wmi_api.h b/drivers/staging/ath6kl/include/wmi_api.h
new file mode 100644
index 000000000000..4a9154316a35
--- /dev/null
+++ b/drivers/staging/ath6kl/include/wmi_api.h
@@ -0,0 +1,441 @@
+//------------------------------------------------------------------------------
+// <copyright file="wmi_api.h" company="Atheros">
+// Copyright (c) 2004-2010 Atheros Corporation. All rights reserved.
+//
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+//
+//
+//------------------------------------------------------------------------------
+//==============================================================================
+// This file contains the definitions for the Wireless Module Interface (WMI).
+//
+// Author(s): ="Atheros"
+//==============================================================================
+#ifndef _WMI_API_H_
+#define _WMI_API_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+ /* WMI converts a dix frame with an ethernet payload (up to 1500 bytes)
+ * to an 802.3 frame (adds SNAP header) and adds on a WMI data header */
+#define WMI_MAX_TX_DATA_FRAME_LENGTH (1500 + sizeof(WMI_DATA_HDR) + sizeof(ATH_MAC_HDR) + sizeof(ATH_LLC_SNAP_HDR))
+
+ /* A normal WMI data frame */
+#define WMI_MAX_NORMAL_RX_DATA_FRAME_LENGTH (1500 + sizeof(WMI_DATA_HDR) + sizeof(ATH_MAC_HDR) + sizeof(ATH_LLC_SNAP_HDR))
+
+ /* An AMSDU frame */ /* The MAX AMSDU length of AR6003 is 3839 */
+#define WMI_MAX_AMSDU_RX_DATA_FRAME_LENGTH (3840 + sizeof(WMI_DATA_HDR) + sizeof(ATH_MAC_HDR) + sizeof(ATH_LLC_SNAP_HDR))
+
+/*
+ * IP QoS Field definitions according to 802.1p
+ */
+#define BEST_EFFORT_PRI 0
+#define BACKGROUND_PRI 1
+#define EXCELLENT_EFFORT_PRI 3
+#define CONTROLLED_LOAD_PRI 4
+#define VIDEO_PRI 5
+#define VOICE_PRI 6
+#define NETWORK_CONTROL_PRI 7
+#define MAX_NUM_PRI 8
+
+#define UNDEFINED_PRI (0xff)
+
+#define WMI_IMPLICIT_PSTREAM_INACTIVITY_INT 5000 /* 5 seconds */
+
+#define A_ROUND_UP(x, y) ((((x) + ((y) - 1)) / (y)) * (y))
+
+typedef enum {
+ ATHEROS_COMPLIANCE = 0x1,
+}TSPEC_PARAM_COMPLIANCE;
+
+struct wmi_t;
+
+void *wmi_init(void *devt);
+
+void wmi_qos_state_init(struct wmi_t *wmip);
+void wmi_shutdown(struct wmi_t *wmip);
+HTC_ENDPOINT_ID wmi_get_control_ep(struct wmi_t * wmip);
+void wmi_set_control_ep(struct wmi_t * wmip, HTC_ENDPOINT_ID eid);
+A_UINT16 wmi_get_mapped_qos_queue(struct wmi_t *, A_UINT8);
+A_STATUS wmi_dix_2_dot3(struct wmi_t *wmip, void *osbuf);
+A_STATUS wmi_data_hdr_add(struct wmi_t *wmip, void *osbuf, A_UINT8 msgType, A_BOOL bMoreData, WMI_DATA_HDR_DATA_TYPE data_type,A_UINT8 metaVersion, void *pTxMetaS);
+A_STATUS wmi_dot3_2_dix(void *osbuf);
+
+A_STATUS wmi_dot11_hdr_remove (struct wmi_t *wmip, void *osbuf);
+A_STATUS wmi_dot11_hdr_add(struct wmi_t *wmip, void *osbuf, NETWORK_TYPE mode);
+
+A_STATUS wmi_data_hdr_remove(struct wmi_t *wmip, void *osbuf);
+A_STATUS wmi_syncpoint(struct wmi_t *wmip);
+A_STATUS wmi_syncpoint_reset(struct wmi_t *wmip);
+A_UINT8 wmi_implicit_create_pstream(struct wmi_t *wmip, void *osbuf, A_UINT32 layer2Priority, A_BOOL wmmEnabled);
+
+A_UINT8 wmi_determine_userPriority (A_UINT8 *pkt, A_UINT32 layer2Pri);
+
+A_STATUS wmi_control_rx(struct wmi_t *wmip, void *osbuf);
+void wmi_iterate_nodes(struct wmi_t *wmip, wlan_node_iter_func *f, void *arg);
+void wmi_free_allnodes(struct wmi_t *wmip);
+bss_t *wmi_find_node(struct wmi_t *wmip, const A_UINT8 *macaddr);
+void wmi_free_node(struct wmi_t *wmip, const A_UINT8 *macaddr);
+
+
+typedef enum {
+ NO_SYNC_WMIFLAG = 0,
+ SYNC_BEFORE_WMIFLAG, /* transmit all queued data before cmd */
+ SYNC_AFTER_WMIFLAG, /* any new data waits until cmd execs */
+ SYNC_BOTH_WMIFLAG,
+ END_WMIFLAG /* end marker */
+} WMI_SYNC_FLAG;
+
+A_STATUS wmi_cmd_send(struct wmi_t *wmip, void *osbuf, WMI_COMMAND_ID cmdId,
+ WMI_SYNC_FLAG flag);
+
+A_STATUS wmi_connect_cmd(struct wmi_t *wmip,
+ NETWORK_TYPE netType,
+ DOT11_AUTH_MODE dot11AuthMode,
+ AUTH_MODE authMode,
+ CRYPTO_TYPE pairwiseCrypto,
+ A_UINT8 pairwiseCryptoLen,
+ CRYPTO_TYPE groupCrypto,
+ A_UINT8 groupCryptoLen,
+ int ssidLength,
+ A_UCHAR *ssid,
+ A_UINT8 *bssid,
+ A_UINT16 channel,
+ A_UINT32 ctrl_flags);
+
+A_STATUS wmi_reconnect_cmd(struct wmi_t *wmip,
+ A_UINT8 *bssid,
+ A_UINT16 channel);
+A_STATUS wmi_disconnect_cmd(struct wmi_t *wmip);
+A_STATUS wmi_getrev_cmd(struct wmi_t *wmip);
+A_STATUS wmi_startscan_cmd(struct wmi_t *wmip, WMI_SCAN_TYPE scanType,
+ A_BOOL forceFgScan, A_BOOL isLegacy,
+ A_UINT32 homeDwellTime, A_UINT32 forceScanInterval,
+ A_INT8 numChan, A_UINT16 *channelList);
+A_STATUS wmi_scanparams_cmd(struct wmi_t *wmip, A_UINT16 fg_start_sec,
+ A_UINT16 fg_end_sec, A_UINT16 bg_sec,
+ A_UINT16 minact_chdw_msec,
+ A_UINT16 maxact_chdw_msec, A_UINT16 pas_chdw_msec,
+ A_UINT8 shScanRatio, A_UINT8 scanCtrlFlags,
+ A_UINT32 max_dfsch_act_time,
+ A_UINT16 maxact_scan_per_ssid);
+A_STATUS wmi_bssfilter_cmd(struct wmi_t *wmip, A_UINT8 filter, A_UINT32 ieMask);
+A_STATUS wmi_probedSsid_cmd(struct wmi_t *wmip, A_UINT8 index, A_UINT8 flag,
+ A_UINT8 ssidLength, A_UCHAR *ssid);
+A_STATUS wmi_listeninterval_cmd(struct wmi_t *wmip, A_UINT16 listenInterval, A_UINT16 listenBeacons);
+A_STATUS wmi_bmisstime_cmd(struct wmi_t *wmip, A_UINT16 bmisstime, A_UINT16 bmissbeacons);
+A_STATUS wmi_associnfo_cmd(struct wmi_t *wmip, A_UINT8 ieType,
+ A_UINT8 ieLen, A_UINT8 *ieInfo);
+A_STATUS wmi_powermode_cmd(struct wmi_t *wmip, A_UINT8 powerMode);
+A_STATUS wmi_ibsspmcaps_cmd(struct wmi_t *wmip, A_UINT8 pmEnable, A_UINT8 ttl,
+ A_UINT16 atim_windows, A_UINT16 timeout_value);
+A_STATUS wmi_apps_cmd(struct wmi_t *wmip, A_UINT8 psType, A_UINT32 idle_time,
+ A_UINT32 ps_period, A_UINT8 sleep_period);
+A_STATUS wmi_pmparams_cmd(struct wmi_t *wmip, A_UINT16 idlePeriod,
+ A_UINT16 psPollNum, A_UINT16 dtimPolicy,
+ A_UINT16 wakup_tx_policy, A_UINT16 num_tx_to_wakeup,
+ A_UINT16 ps_fail_event_policy);
+A_STATUS wmi_disctimeout_cmd(struct wmi_t *wmip, A_UINT8 timeout);
+A_STATUS wmi_sync_cmd(struct wmi_t *wmip, A_UINT8 syncNumber);
+A_STATUS wmi_create_pstream_cmd(struct wmi_t *wmip, WMI_CREATE_PSTREAM_CMD *pstream);
+A_STATUS wmi_delete_pstream_cmd(struct wmi_t *wmip, A_UINT8 trafficClass, A_UINT8 streamID);
+A_STATUS wmi_set_framerate_cmd(struct wmi_t *wmip, A_UINT8 bEnable, A_UINT8 type, A_UINT8 subType, A_UINT16 rateMask);
+A_STATUS wmi_set_bitrate_cmd(struct wmi_t *wmip, A_INT32 dataRate, A_INT32 mgmtRate, A_INT32 ctlRate);
+A_STATUS wmi_get_bitrate_cmd(struct wmi_t *wmip);
+A_INT8 wmi_validate_bitrate(struct wmi_t *wmip, A_INT32 rate, A_INT8 *rate_idx);
+A_STATUS wmi_get_regDomain_cmd(struct wmi_t *wmip);
+A_STATUS wmi_get_channelList_cmd(struct wmi_t *wmip);
+A_STATUS wmi_set_channelParams_cmd(struct wmi_t *wmip, A_UINT8 scanParam,
+ WMI_PHY_MODE mode, A_INT8 numChan,
+ A_UINT16 *channelList);
+
+A_STATUS wmi_set_snr_threshold_params(struct wmi_t *wmip,
+ WMI_SNR_THRESHOLD_PARAMS_CMD *snrCmd);
+A_STATUS wmi_set_rssi_threshold_params(struct wmi_t *wmip,
+ WMI_RSSI_THRESHOLD_PARAMS_CMD *rssiCmd);
+A_STATUS wmi_clr_rssi_snr(struct wmi_t *wmip);
+A_STATUS wmi_set_lq_threshold_params(struct wmi_t *wmip,
+ WMI_LQ_THRESHOLD_PARAMS_CMD *lqCmd);
+A_STATUS wmi_set_rts_cmd(struct wmi_t *wmip, A_UINT16 threshold);
+A_STATUS wmi_set_lpreamble_cmd(struct wmi_t *wmip, A_UINT8 status, A_UINT8 preamblePolicy);
+
+A_STATUS wmi_set_error_report_bitmask(struct wmi_t *wmip, A_UINT32 bitmask);
+
+A_STATUS wmi_get_challenge_resp_cmd(struct wmi_t *wmip, A_UINT32 cookie,
+ A_UINT32 source);
+
+A_STATUS wmi_config_debug_module_cmd(struct wmi_t *wmip, A_UINT16 mmask,
+ A_UINT16 tsr, A_BOOL rep, A_UINT16 size,
+ A_UINT32 valid);
+
+A_STATUS wmi_get_stats_cmd(struct wmi_t *wmip);
+
+A_STATUS wmi_addKey_cmd(struct wmi_t *wmip, A_UINT8 keyIndex,
+ CRYPTO_TYPE keyType, A_UINT8 keyUsage,
+ A_UINT8 keyLength,A_UINT8 *keyRSC,
+ A_UINT8 *keyMaterial, A_UINT8 key_op_ctrl, A_UINT8 *mac,
+ WMI_SYNC_FLAG sync_flag);
+A_STATUS wmi_add_krk_cmd(struct wmi_t *wmip, A_UINT8 *krk);
+A_STATUS wmi_delete_krk_cmd(struct wmi_t *wmip);
+A_STATUS wmi_deleteKey_cmd(struct wmi_t *wmip, A_UINT8 keyIndex);
+A_STATUS wmi_set_akmp_params_cmd(struct wmi_t *wmip,
+ WMI_SET_AKMP_PARAMS_CMD *akmpParams);
+A_STATUS wmi_get_pmkid_list_cmd(struct wmi_t *wmip);
+A_STATUS wmi_set_pmkid_list_cmd(struct wmi_t *wmip,
+ WMI_SET_PMKID_LIST_CMD *pmkInfo);
+A_STATUS wmi_abort_scan_cmd(struct wmi_t *wmip);
+A_STATUS wmi_set_txPwr_cmd(struct wmi_t *wmip, A_UINT8 dbM);
+A_STATUS wmi_get_txPwr_cmd(struct wmi_t *wmip);
+A_STATUS wmi_addBadAp_cmd(struct wmi_t *wmip, A_UINT8 apIndex, A_UINT8 *bssid);
+A_STATUS wmi_deleteBadAp_cmd(struct wmi_t *wmip, A_UINT8 apIndex);
+A_STATUS wmi_set_tkip_countermeasures_cmd(struct wmi_t *wmip, A_BOOL en);
+A_STATUS wmi_setPmkid_cmd(struct wmi_t *wmip, A_UINT8 *bssid, A_UINT8 *pmkId,
+ A_BOOL set);
+A_STATUS wmi_set_access_params_cmd(struct wmi_t *wmip, A_UINT8 ac, A_UINT16 txop,
+ A_UINT8 eCWmin, A_UINT8 eCWmax,
+ A_UINT8 aifsn);
+A_STATUS wmi_set_retry_limits_cmd(struct wmi_t *wmip, A_UINT8 frameType,
+ A_UINT8 trafficClass, A_UINT8 maxRetries,
+ A_UINT8 enableNotify);
+
+void wmi_get_current_bssid(struct wmi_t *wmip, A_UINT8 *bssid);
+
+A_STATUS wmi_get_roam_tbl_cmd(struct wmi_t *wmip);
+A_STATUS wmi_get_roam_data_cmd(struct wmi_t *wmip, A_UINT8 roamDataType);
+A_STATUS wmi_set_roam_ctrl_cmd(struct wmi_t *wmip, WMI_SET_ROAM_CTRL_CMD *p,
+ A_UINT8 size);
+A_STATUS wmi_set_powersave_timers_cmd(struct wmi_t *wmip,
+ WMI_POWERSAVE_TIMERS_POLICY_CMD *pCmd,
+ A_UINT8 size);
+
+A_STATUS wmi_set_opt_mode_cmd(struct wmi_t *wmip, A_UINT8 optMode);
+A_STATUS wmi_opt_tx_frame_cmd(struct wmi_t *wmip,
+ A_UINT8 frmType,
+ A_UINT8 *dstMacAddr,
+ A_UINT8 *bssid,
+ A_UINT16 optIEDataLen,
+ A_UINT8 *optIEData);
+
+A_STATUS wmi_set_adhoc_bconIntvl_cmd(struct wmi_t *wmip, A_UINT16 intvl);
+A_STATUS wmi_set_voice_pkt_size_cmd(struct wmi_t *wmip, A_UINT16 voicePktSize);
+A_STATUS wmi_set_max_sp_len_cmd(struct wmi_t *wmip, A_UINT8 maxSpLen);
+A_UINT8 convert_userPriority_to_trafficClass(A_UINT8 userPriority);
+A_UINT8 wmi_get_power_mode_cmd(struct wmi_t *wmip);
+A_STATUS wmi_verify_tspec_params(WMI_CREATE_PSTREAM_CMD *pCmd, A_BOOL tspecCompliance);
+
+#ifdef CONFIG_HOST_TCMD_SUPPORT
+A_STATUS wmi_test_cmd(struct wmi_t *wmip, A_UINT8 *buf, A_UINT32 len);
+#endif
+
+A_STATUS wmi_set_bt_status_cmd(struct wmi_t *wmip, A_UINT8 streamType, A_UINT8 status);
+A_STATUS wmi_set_bt_params_cmd(struct wmi_t *wmip, WMI_SET_BT_PARAMS_CMD* cmd);
+
+A_STATUS wmi_set_btcoex_fe_ant_cmd(struct wmi_t *wmip, WMI_SET_BTCOEX_FE_ANT_CMD * cmd);
+
+A_STATUS wmi_set_btcoex_colocated_bt_dev_cmd(struct wmi_t *wmip,
+ WMI_SET_BTCOEX_COLOCATED_BT_DEV_CMD * cmd);
+
+A_STATUS wmi_set_btcoex_btinquiry_page_config_cmd(struct wmi_t *wmip,
+ WMI_SET_BTCOEX_BTINQUIRY_PAGE_CONFIG_CMD *cmd);
+
+A_STATUS wmi_set_btcoex_sco_config_cmd(struct wmi_t *wmip,
+ WMI_SET_BTCOEX_SCO_CONFIG_CMD * cmd);
+
+A_STATUS wmi_set_btcoex_a2dp_config_cmd(struct wmi_t *wmip,
+ WMI_SET_BTCOEX_A2DP_CONFIG_CMD* cmd);
+
+
+A_STATUS wmi_set_btcoex_aclcoex_config_cmd(struct wmi_t *wmip, WMI_SET_BTCOEX_ACLCOEX_CONFIG_CMD* cmd);
+
+A_STATUS wmi_set_btcoex_debug_cmd(struct wmi_t *wmip, WMI_SET_BTCOEX_DEBUG_CMD * cmd);
+
+A_STATUS wmi_set_btcoex_bt_operating_status_cmd(struct wmi_t * wmip,
+ WMI_SET_BTCOEX_BT_OPERATING_STATUS_CMD * cmd);
+
+A_STATUS wmi_get_btcoex_config_cmd(struct wmi_t * wmip, WMI_GET_BTCOEX_CONFIG_CMD * cmd);
+
+A_STATUS wmi_get_btcoex_stats_cmd(struct wmi_t * wmip);
+
+A_STATUS wmi_SGI_cmd(struct wmi_t *wmip, A_UINT32 sgiMask, A_UINT8 sgiPERThreshold);
+
+/*
+ * This function is used to configure the fix rates mask to the target.
+ */
+A_STATUS wmi_set_fixrates_cmd(struct wmi_t *wmip, A_UINT32 fixRatesMask);
+A_STATUS wmi_get_ratemask_cmd(struct wmi_t *wmip);
+
+A_STATUS wmi_set_authmode_cmd(struct wmi_t *wmip, A_UINT8 mode);
+
+A_STATUS wmi_set_reassocmode_cmd(struct wmi_t *wmip, A_UINT8 mode);
+
+A_STATUS wmi_set_qos_supp_cmd(struct wmi_t *wmip,A_UINT8 status);
+A_STATUS wmi_set_wmm_cmd(struct wmi_t *wmip, WMI_WMM_STATUS status);
+A_STATUS wmi_set_wmm_txop(struct wmi_t *wmip, WMI_TXOP_CFG txEnable);
+A_STATUS wmi_set_country(struct wmi_t *wmip, A_UCHAR *countryCode);
+
+A_STATUS wmi_get_keepalive_configured(struct wmi_t *wmip);
+A_UINT8 wmi_get_keepalive_cmd(struct wmi_t *wmip);
+A_STATUS wmi_set_keepalive_cmd(struct wmi_t *wmip, A_UINT8 keepaliveInterval);
+
+A_STATUS wmi_set_appie_cmd(struct wmi_t *wmip, A_UINT8 mgmtFrmType,
+ A_UINT8 ieLen,A_UINT8 *ieInfo);
+
+A_STATUS wmi_set_halparam_cmd(struct wmi_t *wmip, A_UINT8 *cmd, A_UINT16 dataLen);
+
+A_INT32 wmi_get_rate(A_INT8 rateindex);
+
+A_STATUS wmi_set_ip_cmd(struct wmi_t *wmip, WMI_SET_IP_CMD *cmd);
+
+/*Wake on Wireless WMI commands*/
+A_STATUS wmi_set_host_sleep_mode_cmd(struct wmi_t *wmip, WMI_SET_HOST_SLEEP_MODE_CMD *cmd);
+A_STATUS wmi_set_wow_mode_cmd(struct wmi_t *wmip, WMI_SET_WOW_MODE_CMD *cmd);
+A_STATUS wmi_get_wow_list_cmd(struct wmi_t *wmip, WMI_GET_WOW_LIST_CMD *cmd);
+A_STATUS wmi_add_wow_pattern_cmd(struct wmi_t *wmip,
+ WMI_ADD_WOW_PATTERN_CMD *cmd, A_UINT8* pattern, A_UINT8* mask, A_UINT8 pattern_size);
+A_STATUS wmi_del_wow_pattern_cmd(struct wmi_t *wmip,
+ WMI_DEL_WOW_PATTERN_CMD *cmd);
+A_STATUS wmi_set_wsc_status_cmd(struct wmi_t *wmip, A_UINT32 status);
+
+A_STATUS
+wmi_set_params_cmd(struct wmi_t *wmip, A_UINT32 opcode, A_UINT32 length, A_CHAR* buffer);
+
+A_STATUS
+wmi_set_mcast_filter_cmd(struct wmi_t *wmip, A_UINT8 dot1, A_UINT8 dot2, A_UINT8 dot3, A_UINT8 dot4);
+
+A_STATUS
+wmi_del_mcast_filter_cmd(struct wmi_t *wmip, A_UINT8 dot1, A_UINT8 dot2, A_UINT8 dot3, A_UINT8 dot4);
+
+A_STATUS
+wmi_mcast_filter_cmd(struct wmi_t *wmip, A_UINT8 enable);
+
+bss_t *
+wmi_find_Ssidnode (struct wmi_t *wmip, A_UCHAR *pSsid,
+ A_UINT32 ssidLength, A_BOOL bIsWPA2, A_BOOL bMatchSSID);
+
+
+void
+wmi_node_return (struct wmi_t *wmip, bss_t *bss);
+
+void
+wmi_set_nodeage(struct wmi_t *wmip, A_UINT32 nodeAge);
+
+#if defined(CONFIG_TARGET_PROFILE_SUPPORT)
+A_STATUS wmi_prof_cfg_cmd(struct wmi_t *wmip, A_UINT32 period, A_UINT32 nbins);
+A_STATUS wmi_prof_addr_set_cmd(struct wmi_t *wmip, A_UINT32 addr);
+A_STATUS wmi_prof_start_cmd(struct wmi_t *wmip);
+A_STATUS wmi_prof_stop_cmd(struct wmi_t *wmip);
+A_STATUS wmi_prof_count_get_cmd(struct wmi_t *wmip);
+#endif /* CONFIG_TARGET_PROFILE_SUPPORT */
+#ifdef OS_ROAM_MANAGEMENT
+void wmi_scan_indication (struct wmi_t *wmip);
+#endif
+
+A_STATUS
+wmi_set_target_event_report_cmd(struct wmi_t *wmip, WMI_SET_TARGET_EVENT_REPORT_CMD* cmd);
+
+bss_t *wmi_rm_current_bss (struct wmi_t *wmip, A_UINT8 *id);
+A_STATUS wmi_add_current_bss (struct wmi_t *wmip, A_UINT8 *id, bss_t *bss);
+
+
+/*
+ * AP mode
+ */
+A_STATUS
+wmi_ap_profile_commit(struct wmi_t *wmip, WMI_CONNECT_CMD *p);
+
+A_STATUS
+wmi_ap_set_hidden_ssid(struct wmi_t *wmip, A_UINT8 hidden_ssid);
+
+A_STATUS
+wmi_ap_set_num_sta(struct wmi_t *wmip, A_UINT8 num_sta);
+
+A_STATUS
+wmi_ap_set_acl_policy(struct wmi_t *wmip, A_UINT8 policy);
+
+A_STATUS
+wmi_ap_acl_mac_list(struct wmi_t *wmip, WMI_AP_ACL_MAC_CMD *a);
+
+A_UINT8
+acl_add_del_mac(WMI_AP_ACL *a, WMI_AP_ACL_MAC_CMD *acl);
+
+A_STATUS
+wmi_ap_set_mlme(struct wmi_t *wmip, A_UINT8 cmd, A_UINT8 *mac, A_UINT16 reason);
+
+A_STATUS
+wmi_set_pvb_cmd(struct wmi_t *wmip, A_UINT16 aid, A_BOOL flag);
+
+A_STATUS
+wmi_ap_conn_inact_time(struct wmi_t *wmip, A_UINT32 period);
+
+A_STATUS
+wmi_ap_bgscan_time(struct wmi_t *wmip, A_UINT32 period, A_UINT32 dwell);
+
+A_STATUS
+wmi_ap_set_dtim(struct wmi_t *wmip, A_UINT8 dtim);
+
+A_STATUS
+wmi_ap_set_rateset(struct wmi_t *wmip, A_UINT8 rateset);
+
+A_STATUS
+wmi_set_ht_cap_cmd(struct wmi_t *wmip, WMI_SET_HT_CAP_CMD *cmd);
+
+A_STATUS
+wmi_set_ht_op_cmd(struct wmi_t *wmip, A_UINT8 sta_chan_width);
+
+A_STATUS
+wmi_send_hci_cmd(struct wmi_t *wmip, A_UINT8 *buf, A_UINT16 sz);
+
+A_STATUS
+wmi_set_tx_select_rates_cmd(struct wmi_t *wmip, A_UINT32 *pMaskArray);
+
+A_STATUS
+wmi_setup_aggr_cmd(struct wmi_t *wmip, A_UINT8 tid);
+
+A_STATUS
+wmi_delete_aggr_cmd(struct wmi_t *wmip, A_UINT8 tid, A_BOOL uplink);
+
+A_STATUS
+wmi_allow_aggr_cmd(struct wmi_t *wmip, A_UINT16 tx_tidmask, A_UINT16 rx_tidmask);
+
+A_STATUS
+wmi_set_rx_frame_format_cmd(struct wmi_t *wmip, A_UINT8 rxMetaVersion, A_BOOL rxDot11Hdr, A_BOOL defragOnHost);
+
+A_STATUS
+wmi_set_thin_mode_cmd(struct wmi_t *wmip, A_BOOL bThinMode);
+
+A_STATUS
+wmi_set_wlan_conn_precedence_cmd(struct wmi_t *wmip, BT_WLAN_CONN_PRECEDENCE precedence);
+
+A_STATUS
+wmi_set_pmk_cmd(struct wmi_t *wmip, A_UINT8 *pmk);
+
+A_UINT16
+wmi_ieee2freq (int chan);
+
+A_UINT32
+wmi_freq2ieee (A_UINT16 freq);
+
+bss_t *
+wmi_find_matching_Ssidnode (struct wmi_t *wmip, A_UCHAR *pSsid,
+ A_UINT32 ssidLength,
+ A_UINT32 dot11AuthMode, A_UINT32 authMode,
+ A_UINT32 pairwiseCryptoType, A_UINT32 grpwiseCryptoTyp);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _WMI_API_H_ */
diff --git a/drivers/staging/ath6kl/miscdrv/ar3kconfig.c b/drivers/staging/ath6kl/miscdrv/ar3kconfig.c
new file mode 100644
index 000000000000..83bc5be3ef1b
--- /dev/null
+++ b/drivers/staging/ath6kl/miscdrv/ar3kconfig.c
@@ -0,0 +1,566 @@
+//------------------------------------------------------------------------------
+// Copyright (c) 2009-2010 Atheros Corporation. All rights reserved.
+//
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+//
+//
+//------------------------------------------------------------------------------
+//==============================================================================
+// AR3K configuration implementation
+//
+// Author(s): ="Atheros"
+//==============================================================================
+
+#include "a_config.h"
+#include "athdefs.h"
+#include "a_types.h"
+#include "a_osapi.h"
+#define ATH_MODULE_NAME misc
+#include "a_debug.h"
+#include "common_drv.h"
+#ifdef EXPORT_HCI_BRIDGE_INTERFACE
+#include "export_hci_transport.h"
+#else
+#include "hci_transport_api.h"
+#endif
+#include "ar3kconfig.h"
+#include "tlpm.h"
+
+#define BAUD_CHANGE_COMMAND_STATUS_OFFSET 5
+#define HCI_EVENT_RESP_TIMEOUTMS 3000
+#define HCI_CMD_OPCODE_BYTE_LOW_OFFSET 0
+#define HCI_CMD_OPCODE_BYTE_HI_OFFSET 1
+#define HCI_EVENT_OPCODE_BYTE_LOW 3
+#define HCI_EVENT_OPCODE_BYTE_HI 4
+#define HCI_CMD_COMPLETE_EVENT_CODE 0xE
+#define HCI_MAX_EVT_RECV_LENGTH 257
+#define EXIT_MIN_BOOT_COMMAND_STATUS_OFFSET 5
+
+A_STATUS AthPSInitialize(AR3K_CONFIG_INFO *hdev);
+
+static A_STATUS SendHCICommand(AR3K_CONFIG_INFO *pConfig,
+ A_UINT8 *pBuffer,
+ int Length)
+{
+ HTC_PACKET *pPacket = NULL;
+ A_STATUS status = A_OK;
+
+ do {
+
+ pPacket = (HTC_PACKET *)A_MALLOC(sizeof(HTC_PACKET));
+ if (NULL == pPacket) {
+ status = A_NO_MEMORY;
+ break;
+ }
+
+ A_MEMZERO(pPacket,sizeof(HTC_PACKET));
+ SET_HTC_PACKET_INFO_TX(pPacket,
+ NULL,
+ pBuffer,
+ Length,
+ HCI_COMMAND_TYPE,
+ AR6K_CONTROL_PKT_TAG);
+
+ /* issue synchronously */
+ status = HCI_TransportSendPkt(pConfig->pHCIDev,pPacket,TRUE);
+
+ } while (FALSE);
+
+ if (pPacket != NULL) {
+ A_FREE(pPacket);
+ }
+
+ return status;
+}
+
+static A_STATUS RecvHCIEvent(AR3K_CONFIG_INFO *pConfig,
+ A_UINT8 *pBuffer,
+ int *pLength)
+{
+ A_STATUS status = A_OK;
+ HTC_PACKET *pRecvPacket = NULL;
+
+ do {
+
+ pRecvPacket = (HTC_PACKET *)A_MALLOC(sizeof(HTC_PACKET));
+ if (NULL == pRecvPacket) {
+ status = A_NO_MEMORY;
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("Failed to alloc HTC struct \n"));
+ break;
+ }
+
+ A_MEMZERO(pRecvPacket,sizeof(HTC_PACKET));
+
+ SET_HTC_PACKET_INFO_RX_REFILL(pRecvPacket,NULL,pBuffer,*pLength,HCI_EVENT_TYPE);
+
+ status = HCI_TransportRecvHCIEventSync(pConfig->pHCIDev,
+ pRecvPacket,
+ HCI_EVENT_RESP_TIMEOUTMS);
+ if (A_FAILED(status)) {
+ break;
+ }
+
+ *pLength = pRecvPacket->ActualLength;
+
+ } while (FALSE);
+
+ if (pRecvPacket != NULL) {
+ A_FREE(pRecvPacket);
+ }
+
+ return status;
+}
+
+A_STATUS SendHCICommandWaitCommandComplete(AR3K_CONFIG_INFO *pConfig,
+ A_UINT8 *pHCICommand,
+ int CmdLength,
+ A_UINT8 **ppEventBuffer,
+ A_UINT8 **ppBufferToFree)
+{
+ A_STATUS status = A_OK;
+ A_UINT8 *pBuffer = NULL;
+ A_UINT8 *pTemp;
+ int length;
+ A_BOOL commandComplete = FALSE;
+ A_UINT8 opCodeBytes[2];
+
+ do {
+
+ length = max(HCI_MAX_EVT_RECV_LENGTH,CmdLength);
+ length += pConfig->pHCIProps->HeadRoom + pConfig->pHCIProps->TailRoom;
+ length += pConfig->pHCIProps->IOBlockPad;
+
+ pBuffer = (A_UINT8 *)A_MALLOC(length);
+ if (NULL == pBuffer) {
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("AR3K Config: Failed to allocate bt buffer \n"));
+ status = A_NO_MEMORY;
+ break;
+ }
+
+ /* get the opcodes to check the command complete event */
+ opCodeBytes[0] = pHCICommand[HCI_CMD_OPCODE_BYTE_LOW_OFFSET];
+ opCodeBytes[1] = pHCICommand[HCI_CMD_OPCODE_BYTE_HI_OFFSET];
+
+ /* copy HCI command */
+ A_MEMCPY(pBuffer + pConfig->pHCIProps->HeadRoom,pHCICommand,CmdLength);
+ /* send command */
+ status = SendHCICommand(pConfig,
+ pBuffer + pConfig->pHCIProps->HeadRoom,
+ CmdLength);
+ if (A_FAILED(status)) {
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("AR3K Config: Failed to send HCI Command (%d) \n", status));
+ AR_DEBUG_PRINTBUF(pHCICommand,CmdLength,"HCI Bridge Failed HCI Command");
+ break;
+ }
+
+ /* reuse buffer to capture command complete event */
+ A_MEMZERO(pBuffer,length);
+ status = RecvHCIEvent(pConfig,pBuffer,&length);
+ if (A_FAILED(status)) {
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("AR3K Config: HCI event recv failed \n"));
+ AR_DEBUG_PRINTBUF(pHCICommand,CmdLength,"HCI Bridge Failed HCI Command");
+ break;
+ }
+
+ pTemp = pBuffer + pConfig->pHCIProps->HeadRoom;
+ if (pTemp[0] == HCI_CMD_COMPLETE_EVENT_CODE) {
+ if ((pTemp[HCI_EVENT_OPCODE_BYTE_LOW] == opCodeBytes[0]) &&
+ (pTemp[HCI_EVENT_OPCODE_BYTE_HI] == opCodeBytes[1])) {
+ commandComplete = TRUE;
+ }
+ }
+
+ if (!commandComplete) {
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("AR3K Config: Unexpected HCI event : %d \n",pTemp[0]));
+ AR_DEBUG_PRINTBUF(pTemp,pTemp[1],"Unexpected HCI event");
+ status = A_ECOMM;
+ break;
+ }
+
+ if (ppEventBuffer != NULL) {
+ /* caller wants to look at the event */
+ *ppEventBuffer = pTemp;
+ if (ppBufferToFree == NULL) {
+ status = A_EINVAL;
+ break;
+ }
+ /* caller must free the buffer */
+ *ppBufferToFree = pBuffer;
+ pBuffer = NULL;
+ }
+
+ } while (FALSE);
+
+ if (pBuffer != NULL) {
+ A_FREE(pBuffer);
+ }
+
+ return status;
+}
+
+static A_STATUS AR3KConfigureHCIBaud(AR3K_CONFIG_INFO *pConfig)
+{
+ A_STATUS status = A_OK;
+ A_UINT8 hciBaudChangeCommand[] = {0x0c,0xfc,0x2,0,0};
+ A_UINT16 baudVal;
+ A_UINT8 *pEvent = NULL;
+ A_UINT8 *pBufferToFree = NULL;
+
+ do {
+
+ if (pConfig->Flags & AR3K_CONFIG_FLAG_SET_AR3K_BAUD) {
+ baudVal = (A_UINT16)(pConfig->AR3KBaudRate / 100);
+ hciBaudChangeCommand[3] = (A_UINT8)baudVal;
+ hciBaudChangeCommand[4] = (A_UINT8)(baudVal >> 8);
+
+ status = SendHCICommandWaitCommandComplete(pConfig,
+ hciBaudChangeCommand,
+ sizeof(hciBaudChangeCommand),
+ &pEvent,
+ &pBufferToFree);
+ if (A_FAILED(status)) {
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("AR3K Config: Baud rate change failed! \n"));
+ break;
+ }
+
+ if (pEvent[BAUD_CHANGE_COMMAND_STATUS_OFFSET] != 0) {
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERR,
+ ("AR3K Config: Baud change command event status failed: %d \n",
+ pEvent[BAUD_CHANGE_COMMAND_STATUS_OFFSET]));
+ status = A_ECOMM;
+ break;
+ }
+
+ AR_DEBUG_PRINTF(ATH_DEBUG_ANY,
+ ("AR3K Config: Baud Changed to %d \n",pConfig->AR3KBaudRate));
+ }
+
+ if (pConfig->Flags & AR3K_CONFIG_FLAG_AR3K_BAUD_CHANGE_DELAY) {
+ /* some versions of AR3K do not switch baud immediately, up to 300MS */
+ A_MDELAY(325);
+ }
+
+ if (pConfig->Flags & AR3K_CONFIG_FLAG_SET_AR6K_SCALE_STEP) {
+ /* Tell target to change UART baud rate for AR6K */
+ status = HCI_TransportSetBaudRate(pConfig->pHCIDev, pConfig->AR3KBaudRate);
+
+ if (A_FAILED(status)) {
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERR,
+ ("AR3K Config: failed to set scale and step values: %d \n", status));
+ break;
+ }
+
+ AR_DEBUG_PRINTF(ATH_DEBUG_ANY,
+ ("AR3K Config: Baud changed to %d for AR6K\n", pConfig->AR3KBaudRate));
+ }
+
+ } while (FALSE);
+
+ if (pBufferToFree != NULL) {
+ A_FREE(pBufferToFree);
+ }
+
+ return status;
+}
+
+static A_STATUS AR3KExitMinBoot(AR3K_CONFIG_INFO *pConfig)
+{
+ A_STATUS status;
+ A_CHAR exitMinBootCmd[] = {0x25,0xFC,0x0c,0x03,0x00,0x00,0x00,0x00,0x00,0x00,
+ 0x00,0x00,0x00,0x00,0x00};
+ A_UINT8 *pEvent = NULL;
+ A_UINT8 *pBufferToFree = NULL;
+
+ status = SendHCICommandWaitCommandComplete(pConfig,
+ exitMinBootCmd,
+ sizeof(exitMinBootCmd),
+ &pEvent,
+ &pBufferToFree);
+
+ if (A_SUCCESS(status)) {
+ if (pEvent[EXIT_MIN_BOOT_COMMAND_STATUS_OFFSET] != 0) {
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERR,
+ ("AR3K Config: MinBoot exit command event status failed: %d \n",
+ pEvent[EXIT_MIN_BOOT_COMMAND_STATUS_OFFSET]));
+ status = A_ECOMM;
+ } else {
+ AR_DEBUG_PRINTF(ATH_DEBUG_INFO,
+ ("AR3K Config: MinBoot Exit Command Complete (Success) \n"));
+ A_MDELAY(1);
+ }
+ } else {
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("AR3K Config: MinBoot Exit Failed! \n"));
+ }
+
+ if (pBufferToFree != NULL) {
+ A_FREE(pBufferToFree);
+ }
+
+ return status;
+}
+
+static A_STATUS AR3KConfigureSendHCIReset(AR3K_CONFIG_INFO *pConfig)
+{
+ A_STATUS status = A_OK;
+ A_UINT8 hciResetCommand[] = {0x03,0x0c,0x0};
+ A_UINT8 *pEvent = NULL;
+ A_UINT8 *pBufferToFree = NULL;
+
+ status = SendHCICommandWaitCommandComplete( pConfig,
+ hciResetCommand,
+ sizeof(hciResetCommand),
+ &pEvent,
+ &pBufferToFree );
+
+ if (A_FAILED(status)) {
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("AR3K Config: HCI reset failed! \n"));
+ }
+
+ if (pBufferToFree != NULL) {
+ A_FREE(pBufferToFree);
+ }
+
+ return status;
+}
+
+static A_STATUS AR3KEnableTLPM(AR3K_CONFIG_INFO *pConfig)
+{
+ A_STATUS status;
+ /* AR3K vendor specific command for Host Wakeup Config */
+ A_CHAR hostWakeupConfig[] = {0x31,0xFC,0x18,
+ 0x02,0x00,0x00,0x00,
+ 0x01,0x00,0x00,0x00,
+ TLPM_DEFAULT_IDLE_TIMEOUT_LSB,TLPM_DEFAULT_IDLE_TIMEOUT_MSB,0x00,0x00, //idle timeout in ms
+ 0x00,0x00,0x00,0x00,
+ TLPM_DEFAULT_WAKEUP_TIMEOUT_MS,0x00,0x00,0x00, //wakeup timeout in ms
+ 0x00,0x00,0x00,0x00};
+ /* AR3K vendor specific command for Target Wakeup Config */
+ A_CHAR targetWakeupConfig[] = {0x31,0xFC,0x18,
+ 0x04,0x00,0x00,0x00,
+ 0x01,0x00,0x00,0x00,
+ TLPM_DEFAULT_IDLE_TIMEOUT_LSB,TLPM_DEFAULT_IDLE_TIMEOUT_MSB,0x00,0x00, //idle timeout in ms
+ 0x00,0x00,0x00,0x00,
+ TLPM_DEFAULT_WAKEUP_TIMEOUT_MS,0x00,0x00,0x00, //wakeup timeout in ms
+ 0x00,0x00,0x00,0x00};
+ /* AR3K vendor specific command for Host Wakeup Enable */
+ A_CHAR hostWakeupEnable[] = {0x31,0xFC,0x4,
+ 0x01,0x00,0x00,0x00};
+ /* AR3K vendor specific command for Target Wakeup Enable */
+ A_CHAR targetWakeupEnable[] = {0x31,0xFC,0x4,
+ 0x06,0x00,0x00,0x00};
+ /* AR3K vendor specific command for Sleep Enable */
+ A_CHAR sleepEnable[] = {0x4,0xFC,0x1,
+ 0x1};
+ A_UINT8 *pEvent = NULL;
+ A_UINT8 *pBufferToFree = NULL;
+
+ if (0 != pConfig->IdleTimeout) {
+ A_UINT8 idle_lsb = pConfig->IdleTimeout & 0xFF;
+ A_UINT8 idle_msb = (pConfig->IdleTimeout & 0xFF00) >> 8;
+ hostWakeupConfig[11] = targetWakeupConfig[11] = idle_lsb;
+ hostWakeupConfig[12] = targetWakeupConfig[12] = idle_msb;
+ }
+
+ if (0 != pConfig->WakeupTimeout) {
+ hostWakeupConfig[19] = targetWakeupConfig[19] = (pConfig->WakeupTimeout & 0xFF);
+ }
+
+ status = SendHCICommandWaitCommandComplete(pConfig,
+ hostWakeupConfig,
+ sizeof(hostWakeupConfig),
+ &pEvent,
+ &pBufferToFree);
+ if (pBufferToFree != NULL) {
+ A_FREE(pBufferToFree);
+ }
+ if (A_FAILED(status)) {
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("HostWakeup Config Failed! \n"));
+ return status;
+ }
+
+ pEvent = NULL;
+ pBufferToFree = NULL;
+ status = SendHCICommandWaitCommandComplete(pConfig,
+ targetWakeupConfig,
+ sizeof(targetWakeupConfig),
+ &pEvent,
+ &pBufferToFree);
+ if (pBufferToFree != NULL) {
+ A_FREE(pBufferToFree);
+ }
+ if (A_FAILED(status)) {
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("Target Wakeup Config Failed! \n"));
+ return status;
+ }
+
+ pEvent = NULL;
+ pBufferToFree = NULL;
+ status = SendHCICommandWaitCommandComplete(pConfig,
+ hostWakeupEnable,
+ sizeof(hostWakeupEnable),
+ &pEvent,
+ &pBufferToFree);
+ if (pBufferToFree != NULL) {
+ A_FREE(pBufferToFree);
+ }
+ if (A_FAILED(status)) {
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("HostWakeup Enable Failed! \n"));
+ return status;
+ }
+
+ pEvent = NULL;
+ pBufferToFree = NULL;
+ status = SendHCICommandWaitCommandComplete(pConfig,
+ targetWakeupEnable,
+ sizeof(targetWakeupEnable),
+ &pEvent,
+ &pBufferToFree);
+ if (pBufferToFree != NULL) {
+ A_FREE(pBufferToFree);
+ }
+ if (A_FAILED(status)) {
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("Target Wakeup Enable Failed! \n"));
+ return status;
+ }
+
+ pEvent = NULL;
+ pBufferToFree = NULL;
+ status = SendHCICommandWaitCommandComplete(pConfig,
+ sleepEnable,
+ sizeof(sleepEnable),
+ &pEvent,
+ &pBufferToFree);
+ if (pBufferToFree != NULL) {
+ A_FREE(pBufferToFree);
+ }
+ if (A_FAILED(status)) {
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("Sleep Enable Failed! \n"));
+ }
+
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("AR3K Config: Enable TLPM Completed (status = %d) \n",status));
+
+ return status;
+}
+
+A_STATUS AR3KConfigure(AR3K_CONFIG_INFO *pConfig)
+{
+ A_STATUS status = A_OK;
+
+ AR_DEBUG_PRINTF(ATH_DEBUG_INFO,("AR3K Config: Configuring AR3K ...\n"));
+
+ do {
+
+ if ((pConfig->pHCIDev == NULL) || (pConfig->pHCIProps == NULL) || (pConfig->pHIFDevice == NULL)) {
+ status = A_EINVAL;
+ break;
+ }
+
+ /* disable asynchronous recv while we issue commands and receive events synchronously */
+ status = HCI_TransportEnableDisableAsyncRecv(pConfig->pHCIDev,FALSE);
+ if (A_FAILED(status)) {
+ break;
+ }
+
+ if (pConfig->Flags & AR3K_CONFIG_FLAG_FORCE_MINBOOT_EXIT) {
+ status = AR3KExitMinBoot(pConfig);
+ if (A_FAILED(status)) {
+ break;
+ }
+ }
+
+
+ /* Load patching and PST file if available*/
+ if (A_OK != AthPSInitialize(pConfig)) {
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("Patch Download Failed!\n"));
+ }
+
+ /* Send HCI reset to make PS tags take effect*/
+ AR3KConfigureSendHCIReset(pConfig);
+
+ if (pConfig->Flags &
+ (AR3K_CONFIG_FLAG_SET_AR3K_BAUD | AR3K_CONFIG_FLAG_SET_AR6K_SCALE_STEP)) {
+ status = AR3KConfigureHCIBaud(pConfig);
+ if (A_FAILED(status)) {
+ break;
+ }
+ }
+
+
+
+ if (pConfig->PwrMgmtEnabled) {
+ /* the delay is required after the previous HCI reset before further
+ * HCI commands can be issued
+ */
+ A_MDELAY(200);
+ AR3KEnableTLPM(pConfig);
+ }
+
+ /* re-enable asynchronous recv */
+ status = HCI_TransportEnableDisableAsyncRecv(pConfig->pHCIDev,TRUE);
+ if (A_FAILED(status)) {
+ break;
+ }
+
+
+ } while (FALSE);
+
+
+ AR_DEBUG_PRINTF(ATH_DEBUG_INFO,("AR3K Config: Configuration Complete (status = %d) \n",status));
+
+ return status;
+}
+
+A_STATUS AR3KConfigureExit(void *config)
+{
+ A_STATUS status = A_OK;
+ AR3K_CONFIG_INFO *pConfig = (AR3K_CONFIG_INFO *)config;
+
+ AR_DEBUG_PRINTF(ATH_DEBUG_INFO,("AR3K Config: Cleaning up AR3K ...\n"));
+
+ do {
+
+ if ((pConfig->pHCIDev == NULL) || (pConfig->pHCIProps == NULL) || (pConfig->pHIFDevice == NULL)) {
+ status = A_EINVAL;
+ break;
+ }
+
+ /* disable asynchronous recv while we issue commands and receive events synchronously */
+ status = HCI_TransportEnableDisableAsyncRecv(pConfig->pHCIDev,FALSE);
+ if (A_FAILED(status)) {
+ break;
+ }
+
+ if (pConfig->Flags &
+ (AR3K_CONFIG_FLAG_SET_AR3K_BAUD | AR3K_CONFIG_FLAG_SET_AR6K_SCALE_STEP)) {
+ status = AR3KConfigureHCIBaud(pConfig);
+ if (A_FAILED(status)) {
+ break;
+ }
+ }
+
+ /* re-enable asynchronous recv */
+ status = HCI_TransportEnableDisableAsyncRecv(pConfig->pHCIDev,TRUE);
+ if (A_FAILED(status)) {
+ break;
+ }
+
+
+ } while (FALSE);
+
+
+ AR_DEBUG_PRINTF(ATH_DEBUG_INFO,("AR3K Config: Cleanup Complete (status = %d) \n",status));
+
+ return status;
+}
+
diff --git a/drivers/staging/ath6kl/miscdrv/ar3kps/ar3kpsconfig.c b/drivers/staging/ath6kl/miscdrv/ar3kps/ar3kpsconfig.c
new file mode 100644
index 000000000000..0e298dba9fc8
--- /dev/null
+++ b/drivers/staging/ath6kl/miscdrv/ar3kps/ar3kpsconfig.c
@@ -0,0 +1,572 @@
+/*
+ * Copyright (c) 2004-2010 Atheros Communications Inc.
+ * All rights reserved.
+ *
+ * This file implements the Atheros PS and patch downloaded for HCI UART Transport driver.
+ * This file can be used for HCI SDIO transport implementation for AR6002 with HCI_TRANSPORT_SDIO
+ * defined.
+ *
+ *
+ * ar3kcpsconfig.c
+ *
+ *
+ *
+ * The software source and binaries included in this development package are
+ * licensed, not sold. You, or your company, received the package under one
+ * or more license agreements. The rights granted to you are specifically
+ * listed in these license agreement(s). All other rights remain with Atheros
+ * Communications, Inc., its subsidiaries, or the respective owner including
+ * those listed on the included copyright notices.. Distribution of any
+ * portion of this package must be in strict compliance with the license
+ * agreement(s) terms.
+ *
+ *
+ *
+ */
+
+
+
+#include "ar3kpsconfig.h"
+#ifndef HCI_TRANSPORT_SDIO
+#include "hci_ath.h"
+#include "hci_uart.h"
+#endif /* #ifndef HCI_TRANSPORT_SDIO */
+
+#define MAX_FW_PATH_LEN 50
+#define MAX_BDADDR_FORMAT_LENGTH 30
+
+/*
+ * Structure used to send HCI packet, hci packet length and device info
+ * together as parameter to PSThread.
+ */
+typedef struct {
+
+ PSCmdPacket *HciCmdList;
+ A_UINT32 num_packets;
+ AR3K_CONFIG_INFO *dev;
+}HciCommandListParam;
+
+A_STATUS SendHCICommandWaitCommandComplete(AR3K_CONFIG_INFO *pConfig,
+ A_UINT8 *pHCICommand,
+ int CmdLength,
+ A_UINT8 **ppEventBuffer,
+ A_UINT8 **ppBufferToFree);
+
+A_UINT32 Rom_Version;
+A_UINT32 Build_Version;
+extern A_BOOL BDADDR;
+
+A_STATUS getDeviceType(AR3K_CONFIG_INFO *pConfig, A_UINT32 * code);
+A_STATUS ReadVersionInfo(AR3K_CONFIG_INFO *pConfig);
+#ifndef HCI_TRANSPORT_SDIO
+
+DECLARE_WAIT_QUEUE_HEAD(PsCompleteEvent);
+DECLARE_WAIT_QUEUE_HEAD(HciEvent);
+A_UCHAR *HciEventpacket;
+rwlock_t syncLock;
+wait_queue_t Eventwait;
+
+int PSHciWritepacket(struct hci_dev*,A_UCHAR* Data, A_UINT32 len);
+extern char *bdaddr;
+#endif /* HCI_TRANSPORT_SDIO */
+
+A_STATUS write_bdaddr(AR3K_CONFIG_INFO *pConfig,A_UCHAR *bdaddr,int type);
+
+int PSSendOps(void *arg);
+
+#ifdef BT_PS_DEBUG
+void Hci_log(A_UCHAR * log_string,A_UCHAR *data,A_UINT32 len)
+{
+ int i;
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("%s : ",log_string));
+ for (i = 0; i < len; i++) {
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("0x%02x ", data[i]));
+ }
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("\n...................................\n"));
+}
+#else
+#define Hci_log(string,data,len)
+#endif /* BT_PS_DEBUG */
+
+
+
+
+A_STATUS AthPSInitialize(AR3K_CONFIG_INFO *hdev)
+{
+ A_STATUS status = A_OK;
+ if(hdev == NULL) {
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("Invalid Device handle received\n"));
+ return A_ERROR;
+ }
+
+#ifndef HCI_TRANSPORT_SDIO
+ DECLARE_WAITQUEUE(wait, current);
+#endif /* HCI_TRANSPORT_SDIO */
+
+
+#ifdef HCI_TRANSPORT_SDIO
+ status = PSSendOps((void*)hdev);
+#else
+ if(InitPSState(hdev) == -1) {
+ return A_ERROR;
+ }
+ allow_signal(SIGKILL);
+ add_wait_queue(&PsCompleteEvent,&wait);
+ set_current_state(TASK_INTERRUPTIBLE);
+ if(!kernel_thread(PSSendOps,(void*)hdev,CLONE_FS|CLONE_FILES|CLONE_SIGHAND|SIGCHLD)) {
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("Kthread Failed\n"));
+ remove_wait_queue(&PsCompleteEvent,&wait);
+ return A_ERROR;
+ }
+ wait_event_interruptible(PsCompleteEvent,(PSTagMode == FALSE));
+ set_current_state(TASK_RUNNING);
+ remove_wait_queue(&PsCompleteEvent,&wait);
+
+#endif /* HCI_TRANSPORT_SDIO */
+
+
+ return status;
+
+}
+
+int PSSendOps(void *arg)
+{
+ int i;
+ int status = 0;
+ PSCmdPacket *HciCmdList; /* List storing the commands */
+ const struct firmware* firmware;
+ A_UINT32 numCmds;
+ A_UINT8 *event;
+ A_UINT8 *bufferToFree;
+ struct hci_dev *device;
+ A_UCHAR *buffer;
+ A_UINT32 len;
+ A_UINT32 DevType;
+ A_UCHAR *PsFileName;
+ A_UCHAR *patchFileName;
+ A_UCHAR *path = NULL;
+ A_UCHAR *config_path = NULL;
+ A_UCHAR config_bdaddr[MAX_BDADDR_FORMAT_LENGTH];
+ AR3K_CONFIG_INFO *hdev = (AR3K_CONFIG_INFO*)arg;
+ struct device *firmwareDev = NULL;
+ status = 0;
+ HciCmdList = NULL;
+#ifdef HCI_TRANSPORT_SDIO
+ device = hdev->pBtStackHCIDev;
+ firmwareDev = device->parent;
+#else
+ device = hdev;
+ firmwareDev = &device->dev;
+ AthEnableSyncCommandOp(TRUE);
+#endif /* HCI_TRANSPORT_SDIO */
+ /* First verify if the controller is an FPGA or ASIC, so depending on the device type the PS file to be written will be different.
+ */
+
+ path =(A_UCHAR *)A_MALLOC(MAX_FW_PATH_LEN);
+ if(path == NULL) {
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("Malloc failed to allocate %d bytes for path\n", MAX_FW_PATH_LEN));
+ goto complete;
+ }
+ config_path = (A_UCHAR *) A_MALLOC(MAX_FW_PATH_LEN);
+ if(config_path == NULL) {
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("Malloc failed to allocate %d bytes for config_path\n", MAX_FW_PATH_LEN));
+ goto complete;
+ }
+
+ if(A_ERROR == getDeviceType(hdev,&DevType)) {
+ status = 1;
+ goto complete;
+ }
+ if(A_ERROR == ReadVersionInfo(hdev)) {
+ status = 1;
+ goto complete;
+ }
+
+ patchFileName = PATCH_FILE;
+ snprintf(path, MAX_FW_PATH_LEN, "%s/%xcoex/",CONFIG_PATH,Rom_Version);
+ if(DevType){
+ if(DevType == 0xdeadc0de){
+ PsFileName = PS_ASIC_FILE;
+ } else{
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERR,(" FPGA Test Image : %x %x \n",Rom_Version,Build_Version));
+ if((Rom_Version == 0x99999999) && (Build_Version == 1)){
+
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("FPGA Test Image : Skipping Patch File load\n"));
+ patchFileName = NULL;
+ }
+ PsFileName = PS_FPGA_FILE;
+ }
+ }
+ else{
+ PsFileName = PS_ASIC_FILE;
+ }
+
+ snprintf(config_path, MAX_FW_PATH_LEN, "%s%s",path,PsFileName);
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("%x: FPGA/ASIC PS File Name %s\n", DevType,config_path));
+ /* Read the PS file to a dynamically allocated buffer */
+ if(A_REQUEST_FIRMWARE(&firmware,config_path,firmwareDev) < 0) {
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("%s: firmware file open error\n", __FUNCTION__ ));
+ status = 1;
+ goto complete;
+
+ }
+ if(NULL == firmware || firmware->size == 0) {
+ status = 1;
+ goto complete;
+ }
+ buffer = (A_UCHAR *)A_MALLOC(firmware->size);
+ if(buffer != NULL) {
+ /* Copy the read file to a local Dynamic buffer */
+ memcpy(buffer,firmware->data,firmware->size);
+ len = firmware->size;
+ A_RELEASE_FIRMWARE(firmware);
+ /* Parse the PS buffer to a global variable */
+ status = AthDoParsePS(buffer,len);
+ A_FREE(buffer);
+ } else {
+ A_RELEASE_FIRMWARE(firmware);
+ }
+
+
+ /* Read the patch file to a dynamically allocated buffer */
+ if(patchFileName != NULL)
+ snprintf(config_path,
+ MAX_FW_PATH_LEN, "%s%s",path,patchFileName);
+ else {
+ status = 0;
+ }
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("Patch File Name %s\n", config_path));
+ if((patchFileName == NULL) || (A_REQUEST_FIRMWARE(&firmware,config_path,firmwareDev) < 0)) {
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("%s: firmware file open error\n", __FUNCTION__ ));
+ /*
+ * It is not necessary that Patch file be available, continue with PS Operations if.
+ * failed.
+ */
+ status = 0;
+
+ } else {
+ if(NULL == firmware || firmware->size == 0) {
+ status = 0;
+ } else {
+ buffer = (A_UCHAR *)A_MALLOC(firmware->size);
+ if(buffer != NULL) {
+ /* Copy the read file to a local Dynamic buffer */
+ memcpy(buffer,firmware->data,firmware->size);
+ len = firmware->size;
+ A_RELEASE_FIRMWARE(firmware);
+ /* parse and store the Patch file contents to a global variables */
+ status = AthDoParsePatch(buffer,len);
+ A_FREE(buffer);
+ } else {
+ A_RELEASE_FIRMWARE(firmware);
+ }
+ }
+ }
+
+ /* Create an HCI command list from the parsed PS and patch information */
+ AthCreateCommandList(&HciCmdList,&numCmds);
+
+ /* Form the parameter for PSSendOps() API */
+
+
+ /*
+ * First Send the CRC packet,
+ * We have to continue with the PS operations only if the CRC packet has been replied with
+ * a Command complete event with status Error.
+ */
+
+ if(SendHCICommandWaitCommandComplete
+ (hdev,
+ HciCmdList[0].Hcipacket,
+ HciCmdList[0].packetLen,
+ &event,
+ &bufferToFree) == A_OK) {
+ if(ReadPSEvent(event) == A_OK) { /* Exit if the status is success */
+ if(bufferToFree != NULL) {
+ A_FREE(bufferToFree);
+ }
+
+#ifndef HCI_TRANSPORT_SDIO
+ if(bdaddr && bdaddr[0] !='\0') {
+ write_bdaddr(hdev,bdaddr,BDADDR_TYPE_STRING);
+ }
+#endif
+ status = 1;
+ goto complete;
+ }
+ if(bufferToFree != NULL) {
+ A_FREE(bufferToFree);
+ }
+ } else {
+ status = 0;
+ goto complete;
+ }
+
+ for(i = 1; i <numCmds; i++) {
+
+ if(SendHCICommandWaitCommandComplete
+ (hdev,
+ HciCmdList[i].Hcipacket,
+ HciCmdList[i].packetLen,
+ &event,
+ &bufferToFree) == A_OK) {
+ if(ReadPSEvent(event) != A_OK) { /* Exit if the status is success */
+ if(bufferToFree != NULL) {
+ A_FREE(bufferToFree);
+ }
+ status = 1;
+ goto complete;
+ }
+ if(bufferToFree != NULL) {
+ A_FREE(bufferToFree);
+ }
+ } else {
+ status = 0;
+ goto complete;
+ }
+ }
+#ifdef HCI_TRANSPORT_SDIO
+ if(BDADDR == FALSE)
+ if(hdev->bdaddr[0] !=0x00 ||
+ hdev->bdaddr[1] !=0x00 ||
+ hdev->bdaddr[2] !=0x00 ||
+ hdev->bdaddr[3] !=0x00 ||
+ hdev->bdaddr[4] !=0x00 ||
+ hdev->bdaddr[5] !=0x00)
+ write_bdaddr(hdev,hdev->bdaddr,BDADDR_TYPE_HEX);
+
+#ifndef HCI_TRANSPORT_SDIO
+
+ if(bdaddr && bdaddr[0] != '\0') {
+ write_bdaddr(hdev,bdaddr,BDADDR_TYPE_STRING);
+ } else
+#endif /* HCI_TRANSPORT_SDIO */
+ /* Write BDADDR Read from OTP here */
+
+
+
+#endif
+
+ {
+ /* Read Contents of BDADDR file if user has not provided any option */
+ snprintf(config_path,MAX_FW_PATH_LEN, "%s%s",path,BDADDR_FILE);
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("Patch File Name %s\n", config_path));
+ if(A_REQUEST_FIRMWARE(&firmware,config_path,firmwareDev) < 0) {
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("%s: firmware file open error\n", __FUNCTION__ ));
+ status = 1;
+ goto complete;
+ }
+ if(NULL == firmware || firmware->size == 0) {
+ status = 1;
+ goto complete;
+ }
+ len = (firmware->size > MAX_BDADDR_FORMAT_LENGTH)? MAX_BDADDR_FORMAT_LENGTH: firmware->size;
+ memcpy(config_bdaddr, firmware->data,len);
+ config_bdaddr[len] = '\0';
+ write_bdaddr(hdev,config_bdaddr,BDADDR_TYPE_STRING);
+ A_RELEASE_FIRMWARE(firmware);
+ }
+complete:
+#ifndef HCI_TRANSPORT_SDIO
+ AthEnableSyncCommandOp(FALSE);
+ PSTagMode = FALSE;
+ wake_up_interruptible(&PsCompleteEvent);
+#endif /* HCI_TRANSPORT_SDIO */
+ if(NULL != HciCmdList) {
+ AthFreeCommandList(&HciCmdList,numCmds);
+ }
+ if(path) {
+ A_FREE(path);
+ }
+ if(config_path) {
+ A_FREE(config_path);
+ }
+ return status;
+}
+#ifndef HCI_TRANSPORT_SDIO
+/*
+ * This API is used to send the HCI command to controller and return
+ * with a HCI Command Complete event.
+ * For HCI SDIO transport, this will be internally defined.
+ */
+A_STATUS SendHCICommandWaitCommandComplete(AR3K_CONFIG_INFO *pConfig,
+ A_UINT8 *pHCICommand,
+ int CmdLength,
+ A_UINT8 **ppEventBuffer,
+ A_UINT8 **ppBufferToFree)
+{
+ if(CmdLength == 0) {
+ return A_ERROR;
+ }
+ Hci_log("COM Write -->",pHCICommand,CmdLength);
+ PSAcked = FALSE;
+ if(PSHciWritepacket(pConfig,pHCICommand,CmdLength) == 0) {
+ /* If the controller is not available, return Error */
+ return A_ERROR;
+ }
+ //add_timer(&psCmdTimer);
+ wait_event_interruptible(HciEvent,(PSAcked == TRUE));
+ if(NULL != HciEventpacket) {
+ *ppEventBuffer = HciEventpacket;
+ *ppBufferToFree = HciEventpacket;
+ } else {
+ /* Did not get an event from controller. return error */
+ *ppBufferToFree = NULL;
+ return A_ERROR;
+ }
+
+ return A_OK;
+}
+#endif /* HCI_TRANSPORT_SDIO */
+
+A_STATUS ReadPSEvent(A_UCHAR* Data){
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERR,(" PS Event %x %x %x\n",Data[4],Data[5],Data[3]));
+
+ if(Data[4] == 0xFC && Data[5] == 0x00)
+ {
+ switch(Data[3]){
+ case 0x0B:
+ return A_OK;
+ break;
+ case 0x0C:
+ /* Change Baudrate */
+ return A_OK;
+ break;
+ case 0x04:
+ return A_OK;
+ break;
+ case 0x1E:
+ Rom_Version = Data[9];
+ Rom_Version = ((Rom_Version << 8) |Data[8]);
+ Rom_Version = ((Rom_Version << 8) |Data[7]);
+ Rom_Version = ((Rom_Version << 8) |Data[6]);
+
+ Build_Version = Data[13];
+ Build_Version = ((Build_Version << 8) |Data[12]);
+ Build_Version = ((Build_Version << 8) |Data[11]);
+ Build_Version = ((Build_Version << 8) |Data[10]);
+ return A_OK;
+ break;
+
+
+ }
+ }
+
+ return A_ERROR;
+}
+int str2ba(unsigned char *str_bdaddr,unsigned char *bdaddr)
+{
+ unsigned char bdbyte[3];
+ unsigned char *str_byte = str_bdaddr;
+ int i,j;
+ unsigned char colon_present = 0;
+
+ if(NULL != strstr(str_bdaddr,":")) {
+ colon_present = 1;
+ }
+
+
+ bdbyte[2] = '\0';
+
+ for( i = 0,j = 5; i < 6; i++, j--) {
+ bdbyte[0] = str_byte[0];
+ bdbyte[1] = str_byte[1];
+ bdaddr[j] = A_STRTOL(bdbyte,NULL,16);
+ if(colon_present == 1) {
+ str_byte+=3;
+ } else {
+ str_byte+=2;
+ }
+ }
+ return 0;
+}
+
+A_STATUS write_bdaddr(AR3K_CONFIG_INFO *pConfig,A_UCHAR *bdaddr,int type)
+{
+ A_UCHAR bdaddr_cmd[] = { 0x0B, 0xFC, 0x0A, 0x01, 0x01,
+ 0x00, 0x06, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
+
+ A_UINT8 *event;
+ A_UINT8 *bufferToFree = NULL;
+ A_STATUS result = A_ERROR;
+ int inc,outc;
+
+ if (type == BDADDR_TYPE_STRING)
+ str2ba(bdaddr,&bdaddr_cmd[7]);
+ else {
+ /* Bdaddr has to be sent as LAP first */
+ for(inc = 5 ,outc = 7; inc >=0; inc--, outc++)
+ bdaddr_cmd[outc] = bdaddr[inc];
+ }
+
+ if(A_OK == SendHCICommandWaitCommandComplete(pConfig,bdaddr_cmd,
+ sizeof(bdaddr_cmd),
+ &event,&bufferToFree)) {
+
+ if(event[4] == 0xFC && event[5] == 0x00){
+ if(event[3] == 0x0B){
+ result = A_OK;
+ }
+ }
+
+ }
+ if(bufferToFree != NULL) {
+ A_FREE(bufferToFree);
+ }
+ return result;
+
+}
+A_STATUS ReadVersionInfo(AR3K_CONFIG_INFO *pConfig)
+{
+ A_UINT8 hciCommand[] = {0x1E,0xfc,0x00};
+ A_UINT8 *event;
+ A_UINT8 *bufferToFree = NULL;
+ A_STATUS result = A_ERROR;
+ if(A_OK == SendHCICommandWaitCommandComplete(pConfig,hciCommand,sizeof(hciCommand),&event,&bufferToFree)) {
+ result = ReadPSEvent(event);
+
+ }
+ if(bufferToFree != NULL) {
+ A_FREE(bufferToFree);
+ }
+ return result;
+}
+A_STATUS getDeviceType(AR3K_CONFIG_INFO *pConfig, A_UINT32 * code)
+{
+ A_UINT8 hciCommand[] = {0x05,0xfc,0x05,0x00,0x00,0x00,0x00,0x04};
+ A_UINT8 *event;
+ A_UINT8 *bufferToFree = NULL;
+ A_UINT32 reg;
+ A_STATUS result = A_ERROR;
+ *code = 0;
+ hciCommand[3] = (A_UINT8)(FPGA_REGISTER & 0xFF);
+ hciCommand[4] = (A_UINT8)((FPGA_REGISTER >> 8) & 0xFF);
+ hciCommand[5] = (A_UINT8)((FPGA_REGISTER >> 16) & 0xFF);
+ hciCommand[6] = (A_UINT8)((FPGA_REGISTER >> 24) & 0xFF);
+ if(A_OK == SendHCICommandWaitCommandComplete(pConfig,hciCommand,sizeof(hciCommand),&event,&bufferToFree)) {
+
+ if(event[4] == 0xFC && event[5] == 0x00){
+ switch(event[3]){
+ case 0x05:
+ reg = event[9];
+ reg = ((reg << 8) |event[8]);
+ reg = ((reg << 8) |event[7]);
+ reg = ((reg << 8) |event[6]);
+ *code = reg;
+ result = A_OK;
+
+ break;
+ case 0x06:
+ //Sleep(500);
+ break;
+ }
+ }
+
+ }
+ if(bufferToFree != NULL) {
+ A_FREE(bufferToFree);
+ }
+ return result;
+}
+
+
diff --git a/drivers/staging/ath6kl/miscdrv/ar3kps/ar3kpsconfig.h b/drivers/staging/ath6kl/miscdrv/ar3kps/ar3kpsconfig.h
new file mode 100644
index 000000000000..4e5b7bfc0ea9
--- /dev/null
+++ b/drivers/staging/ath6kl/miscdrv/ar3kps/ar3kpsconfig.h
@@ -0,0 +1,75 @@
+/*
+ * Copyright (c) 2004-2010 Atheros Communications Inc.
+ * All rights reserved.
+ *
+ * This file defines the symbols exported by Atheros PS and patch download module.
+ * define the constant HCI_TRANSPORT_SDIO if the module is being used for HCI SDIO transport.
+ * defined.
+ *
+ *
+ * ar3kcpsconfig.h
+ *
+ *
+ *
+ * The software source and binaries included in this development package are
+ * licensed, not sold. You, or your company, received the package under one
+ * or more license agreements. The rights granted to you are specifically
+ * listed in these license agreement(s). All other rights remain with Atheros
+ * Communications, Inc., its subsidiaries, or the respective owner including
+ * those listed on the included copyright notices.. Distribution of any
+ * portion of this package must be in strict compliance with the license
+ * agreement(s) terms.
+ *
+ *
+ *
+ */
+
+
+
+#ifndef __AR3KPSCONFIG_H
+#define __AR3KPSCONFIG_H
+
+/*
+ * Define the flag HCI_TRANSPORT_SDIO and undefine HCI_TRANSPORT_UART if the transport being used is SDIO.
+ */
+#undef HCI_TRANSPORT_UART
+
+#include <linux/fs.h>
+#include <linux/errno.h>
+#include <linux/signal.h>
+
+
+#include <linux/ioctl.h>
+#include <linux/firmware.h>
+
+
+#include <net/bluetooth/bluetooth.h>
+#include <net/bluetooth/hci_core.h>
+
+#include "ar3kpsparser.h"
+
+#define FPGA_REGISTER 0x4FFC
+#define BDADDR_TYPE_STRING 0
+#define BDADDR_TYPE_HEX 1
+#define CONFIG_PATH "ar3k"
+
+#define PS_ASIC_FILE "PS_ASIC.pst"
+#define PS_FPGA_FILE "PS_FPGA.pst"
+
+#define PATCH_FILE "RamPatch.txt"
+#define BDADDR_FILE "ar3kbdaddr.pst"
+
+#define ROM_VER_AR3001_3_1_0 30000
+#define ROM_VER_AR3001_3_1_1 30101
+
+
+#ifndef HCI_TRANSPORT_SDIO
+#define AR3K_CONFIG_INFO struct hci_dev
+extern wait_queue_head_t HciEvent;
+extern wait_queue_t Eventwait;
+extern A_UCHAR *HciEventpacket;
+#endif /* #ifndef HCI_TRANSPORT_SDIO */
+
+A_STATUS AthPSInitialize(AR3K_CONFIG_INFO *hdev);
+A_STATUS ReadPSEvent(A_UCHAR* Data);
+#endif /* __AR3KPSCONFIG_H */
diff --git a/drivers/staging/ath6kl/miscdrv/ar3kps/ar3kpsparser.c b/drivers/staging/ath6kl/miscdrv/ar3kps/ar3kpsparser.c
new file mode 100644
index 000000000000..8dce0542282b
--- /dev/null
+++ b/drivers/staging/ath6kl/miscdrv/ar3kps/ar3kpsparser.c
@@ -0,0 +1,969 @@
+/*
+ * Copyright (c) 2004-2010 Atheros Communications Inc.
+ * All rights reserved.
+ *
+ * This file implements the Atheros PS and patch parser.
+ * It implements APIs to parse data buffer with patch and PS information and convert it to HCI commands.
+ *
+ *
+ *
+ * ar3kpsparser.c
+ *
+ *
+ *
+ * The software source and binaries included in this development package are
+ * licensed, not sold. You, or your company, received the package under one
+ * or more license agreements. The rights granted to you are specifically
+ * listed in these license agreement(s). All other rights remain with Atheros
+ * Communications, Inc., its subsidiaries, or the respective owner including
+ * those listed on the included copyright notices.. Distribution of any
+ * portion of this package must be in strict compliance with the license
+ * agreement(s) terms.
+ *
+ *
+ *
+ */
+
+
+#include "ar3kpsparser.h"
+
+#include <linux/ctype.h>
+#include <linux/kernel.h>
+
+#define BD_ADDR_SIZE 6
+#define WRITE_PATCH 8
+#define ENABLE_PATCH 11
+#define PS_RESET 2
+#define PS_WRITE 1
+#define PS_VERIFY_CRC 9
+#define CHANGE_BDADDR 15
+
+#define HCI_COMMAND_HEADER 7
+
+#define HCI_EVENT_SIZE 7
+
+#define WRITE_PATCH_COMMAND_STATUS_OFFSET 5
+
+#define PS_RAM_SIZE 2048
+
+#define RAM_PS_REGION (1<<0)
+#define RAM_PATCH_REGION (1<<1)
+#define RAMPS_MAX_PS_DATA_PER_TAG 20000
+#define MAX_RADIO_CFG_TABLE_SIZE 244
+#define RAMPS_MAX_PS_TAGS_PER_FILE 50
+
+#define PS_MAX_LEN 500
+#define LINE_SIZE_MAX (PS_MAX_LEN *2)
+
+/* Constant values used by parser */
+#define BYTES_OF_PS_DATA_PER_LINE 16
+#define RAMPS_MAX_PS_DATA_PER_TAG 20000
+
+
+/* Number pf PS/Patch entries in an HCI packet */
+#define MAX_BYTE_LENGTH 244
+
+#define SKIP_BLANKS(str) while (*str == ' ') str++
+
+enum MinBootFileFormatE
+{
+ MB_FILEFORMAT_RADIOTBL,
+ MB_FILEFORMAT_PATCH,
+ MB_FILEFORMAT_COEXCONFIG
+};
+
+enum RamPsSection
+{
+ RAM_PS_SECTION,
+ RAM_PATCH_SECTION,
+ RAM_DYN_MEM_SECTION
+};
+
+enum eType {
+ eHex,
+ edecimal
+};
+
+
+typedef struct tPsTagEntry
+{
+ A_UINT32 TagId;
+ A_UINT32 TagLen;
+ A_UINT8 *TagData;
+} tPsTagEntry, *tpPsTagEntry;
+
+typedef struct tRamPatch
+{
+ A_UINT16 Len;
+ A_UINT8 * Data;
+} tRamPatch, *ptRamPatch;
+
+
+
+typedef struct ST_PS_DATA_FORMAT {
+ enum eType eDataType;
+ A_BOOL bIsArray;
+}ST_PS_DATA_FORMAT;
+
+typedef struct ST_READ_STATUS {
+ unsigned uTagID;
+ unsigned uSection;
+ unsigned uLineCount;
+ unsigned uCharCount;
+ unsigned uByteCount;
+}ST_READ_STATUS;
+
+
+/* Stores the number of PS Tags */
+static A_UINT32 Tag_Count = 0;
+
+/* Stores the number of patch commands */
+static A_UINT32 Patch_Count = 0;
+static A_UINT32 Total_tag_lenght = 0;
+A_BOOL BDADDR = FALSE;
+A_UINT32 StartTagId;
+
+tPsTagEntry PsTagEntry[RAMPS_MAX_PS_TAGS_PER_FILE];
+tRamPatch RamPatch[MAX_NUM_PATCH_ENTRY];
+
+
+A_STATUS AthParseFilesUnified(A_UCHAR *srcbuffer,A_UINT32 srclen, int FileFormat);
+char AthReadChar(A_UCHAR *buffer, A_UINT32 len,A_UINT32 *pos);
+char * AthGetLine(char * buffer, int maxlen, A_UCHAR *srcbuffer,A_UINT32 len,A_UINT32 *pos);
+static A_STATUS AthPSCreateHCICommand(A_UCHAR Opcode, A_UINT32 Param1,PSCmdPacket *PSPatchPacket,A_UINT32 *index);
+
+/* Function to reads the next character from the input buffer */
+char AthReadChar(A_UCHAR *buffer, A_UINT32 len,A_UINT32 *pos)
+{
+ char Ch;
+ if(buffer == NULL || *pos >=len )
+ {
+ return '\0';
+ } else {
+ Ch = buffer[*pos];
+ (*pos)++;
+ return Ch;
+ }
+}
+/* PS parser helper function */
+unsigned int uGetInputDataFormat(char* pCharLine, ST_PS_DATA_FORMAT *pstFormat)
+{
+ if(pCharLine[0] != '[') {
+ pstFormat->eDataType = eHex;
+ pstFormat->bIsArray = true;
+ return 0;
+ }
+ switch(pCharLine[1]) {
+ case 'H':
+ case 'h':
+ if(pCharLine[2]==':') {
+ if((pCharLine[3]== 'a') || (pCharLine[3]== 'A')) {
+ if(pCharLine[4] == ']') {
+ pstFormat->eDataType = eHex;
+ pstFormat->bIsArray = true;
+ pCharLine += 5;
+ return 0;
+ }
+ else {
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("Illegal Data format\n")); //[H:A
+ return 1;
+ }
+ }
+ if((pCharLine[3]== 'S') || (pCharLine[3]== 's')) {
+ if(pCharLine[4] == ']') {
+ pstFormat->eDataType = eHex;
+ pstFormat->bIsArray = false;
+ pCharLine += 5;
+ return 0;
+ }
+ else {
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("Illegal Data format\n")); //[H:A
+ return 1;
+ }
+ }
+ else if(pCharLine[3] == ']') { //[H:]
+ pstFormat->eDataType = eHex;
+ pstFormat->bIsArray = true;
+ pCharLine += 4;
+ return 0;
+ }
+ else { //[H:
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("Illegal Data format\n"));
+ return 1;
+ }
+ }
+ else if(pCharLine[2]==']') { //[H]
+ pstFormat->eDataType = eHex;
+ pstFormat->bIsArray = true;
+ pCharLine += 3;
+ return 0;
+ }
+ else { //[H
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("Illegal Data format\n"));
+ return 1;
+ }
+ break;
+
+ case 'A':
+ case 'a':
+ if(pCharLine[2]==':') {
+ if((pCharLine[3]== 'h') || (pCharLine[3]== 'H')) {
+ if(pCharLine[4] == ']') {
+ pstFormat->eDataType = eHex;
+ pstFormat->bIsArray = true;
+ pCharLine += 5;
+ return 0;
+ }
+ else {
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("Illegal Data format 1\n")); //[A:H
+ return 1;
+ }
+ }
+ else if(pCharLine[3]== ']') { //[A:]
+ pstFormat->eDataType = eHex;
+ pstFormat->bIsArray = true;
+ pCharLine += 4;
+ return 0;
+ }
+ else { //[A:
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("Illegal Data format 2\n"));
+ return 1;
+ }
+ }
+ else if(pCharLine[2]==']') { //[H]
+ pstFormat->eDataType = eHex;
+ pstFormat->bIsArray = true;
+ pCharLine += 3;
+ return 0;
+ }
+ else { //[H
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("Illegal Data format 3\n"));
+ return 1;
+ }
+ break;
+
+ case 'S':
+ case 's':
+ if(pCharLine[2]==':') {
+ if((pCharLine[3]== 'h') || (pCharLine[3]== 'H')) {
+ if(pCharLine[4] == ']') {
+ pstFormat->eDataType = eHex;
+ pstFormat->bIsArray = true;
+ pCharLine += 5;
+ return 0;
+ }
+ else {
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("Illegal Data format 5\n")); //[A:H
+ return 1;
+ }
+ }
+ else if(pCharLine[3]== ']') { //[A:]
+ pstFormat->eDataType = eHex;
+ pstFormat->bIsArray = true;
+ pCharLine += 4;
+ return 0;
+ }
+ else { //[A:
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("Illegal Data format 6\n"));
+ return 1;
+ }
+ }
+ else if(pCharLine[2]==']') { //[H]
+ pstFormat->eDataType = eHex;
+ pstFormat->bIsArray = true;
+ pCharLine += 3;
+ return 0;
+ }
+ else { //[H
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("Illegal Data format 7\n"));
+ return 1;
+ }
+ break;
+
+ default:
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("Illegal Data format 8\n"));
+ return 1;
+ }
+}
+
+unsigned int uReadDataInSection(char *pCharLine, ST_PS_DATA_FORMAT stPS_DataFormat)
+{
+ char *pTokenPtr = pCharLine;
+
+ if(pTokenPtr[0] == '[') {
+ while(pTokenPtr[0] != ']' && pTokenPtr[0] != '\0') {
+ pTokenPtr++;
+ }
+ if(pTokenPtr[0] == '\0') {
+ return (0x0FFF);
+ }
+ pTokenPtr++;
+
+
+ }
+ if(stPS_DataFormat.eDataType == eHex) {
+ if(stPS_DataFormat.bIsArray == true) {
+ //Not implemented
+ return (0x0FFF);
+ }
+ else {
+ return (A_STRTOL(pTokenPtr, NULL, 16));
+ }
+ }
+ else {
+ //Not implemented
+ return (0x0FFF);
+ }
+}
+A_STATUS AthParseFilesUnified(A_UCHAR *srcbuffer,A_UINT32 srclen, int FileFormat)
+{
+ char *Buffer;
+ char *pCharLine;
+ A_UINT8 TagCount;
+ A_UINT16 ByteCount;
+ A_UINT8 ParseSection=RAM_PS_SECTION;
+ A_UINT32 pos;
+
+
+
+ int uReadCount;
+ ST_PS_DATA_FORMAT stPS_DataFormat;
+ ST_READ_STATUS stReadStatus = {0, 0, 0,0};
+ pos = 0;
+ Buffer = NULL;
+
+ if (srcbuffer == NULL || srclen == 0)
+ {
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("Could not open .\n"));
+ return A_ERROR;
+ }
+ TagCount = 0;
+ ByteCount = 0;
+ Buffer = A_MALLOC(LINE_SIZE_MAX + 1);
+ if(NULL == Buffer) {
+ return A_ERROR;
+ }
+ if (FileFormat == MB_FILEFORMAT_PATCH)
+ {
+ int LineRead = 0;
+ while((pCharLine = AthGetLine(Buffer, LINE_SIZE_MAX, srcbuffer,srclen,&pos)) != NULL)
+ {
+
+ SKIP_BLANKS(pCharLine);
+
+ // Comment line or empty line
+ if ((pCharLine[0] == '/') && (pCharLine[1] == '/'))
+ {
+ continue;
+ }
+
+ if ((pCharLine[0] == '#')) {
+ if (stReadStatus.uSection != 0)
+ {
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("error\n"));
+ if(Buffer != NULL) {
+ A_FREE(Buffer);
+ }
+ return A_ERROR;
+ }
+ else {
+ stReadStatus.uSection = 1;
+ continue;
+ }
+ }
+ if ((pCharLine[0] == '/') && (pCharLine[1] == '*'))
+ {
+ pCharLine+=2;
+ SKIP_BLANKS(pCharLine);
+
+ if(!strncmp(pCharLine,"PA",2)||!strncmp(pCharLine,"Pa",2)||!strncmp(pCharLine,"pa",2))
+ ParseSection=RAM_PATCH_SECTION;
+
+ if(!strncmp(pCharLine,"DY",2)||!strncmp(pCharLine,"Dy",2)||!strncmp(pCharLine,"dy",2))
+ ParseSection=RAM_DYN_MEM_SECTION;
+
+ if(!strncmp(pCharLine,"PS",2)||!strncmp(pCharLine,"Ps",2)||!strncmp(pCharLine,"ps",2))
+ ParseSection=RAM_PS_SECTION;
+
+ LineRead = 0;
+ stReadStatus.uSection = 0;
+
+ continue;
+ }
+
+ switch(ParseSection)
+ {
+ case RAM_PS_SECTION:
+ {
+ if (stReadStatus.uSection == 1) //TagID
+ {
+ SKIP_BLANKS(pCharLine);
+ if(uGetInputDataFormat(pCharLine, &stPS_DataFormat)) {
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("uGetInputDataFormat fail\n"));
+ if(Buffer != NULL) {
+ A_FREE(Buffer);
+ }
+ return A_ERROR;
+ }
+ //pCharLine +=5;
+ PsTagEntry[TagCount].TagId = uReadDataInSection(pCharLine, stPS_DataFormat);
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERR,(" TAG ID %d \n",PsTagEntry[TagCount].TagId));
+
+ //AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("tag # %x\n", PsTagEntry[TagCount].TagId);
+ if (TagCount == 0)
+ {
+ StartTagId = PsTagEntry[TagCount].TagId;
+ }
+ stReadStatus.uSection = 2;
+ }
+ else if (stReadStatus.uSection == 2) //TagLength
+ {
+
+ if(uGetInputDataFormat(pCharLine, &stPS_DataFormat)) {
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("uGetInputDataFormat fail \n"));
+ if(Buffer != NULL) {
+ A_FREE(Buffer);
+ }
+ return A_ERROR;
+ }
+ //pCharLine +=5;
+ ByteCount = uReadDataInSection(pCharLine, stPS_DataFormat);
+
+ //AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("tag length %x\n", ByteCount));
+ if (ByteCount > LINE_SIZE_MAX/2)
+ {
+ if(Buffer != NULL) {
+ A_FREE(Buffer);
+ }
+ return A_ERROR;
+ }
+ PsTagEntry[TagCount].TagLen = ByteCount;
+ PsTagEntry[TagCount].TagData = (A_UINT8*)A_MALLOC(ByteCount);
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERR,(" TAG Length %d Tag Index %d \n",PsTagEntry[TagCount].TagLen,TagCount));
+ stReadStatus.uSection = 3;
+ stReadStatus.uLineCount = 0;
+ }
+ else if( stReadStatus.uSection == 3) { //Data
+
+ if(stReadStatus.uLineCount == 0) {
+ if(uGetInputDataFormat(pCharLine,&stPS_DataFormat)) {
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("uGetInputDataFormat Fail\n"));
+ if(Buffer != NULL) {
+ A_FREE(Buffer);
+ }
+ return A_ERROR;
+ }
+ //pCharLine +=5;
+ }
+ SKIP_BLANKS(pCharLine);
+ stReadStatus.uCharCount = 0;
+ if(pCharLine[stReadStatus.uCharCount] == '[') {
+ while(pCharLine[stReadStatus.uCharCount] != ']' && pCharLine[stReadStatus.uCharCount] != '\0' ) {
+ stReadStatus.uCharCount++;
+ }
+ if(pCharLine[stReadStatus.uCharCount] == ']' ) {
+ stReadStatus.uCharCount++;
+ } else {
+ stReadStatus.uCharCount = 0;
+ }
+ }
+ uReadCount = (ByteCount > BYTES_OF_PS_DATA_PER_LINE)? BYTES_OF_PS_DATA_PER_LINE: ByteCount;
+ //AR_DEBUG_PRINTF(ATH_DEBUG_ERR,(" "));
+ if((stPS_DataFormat.eDataType == eHex) && stPS_DataFormat.bIsArray == true) {
+ while(uReadCount > 0) {
+ PsTagEntry[TagCount].TagData[stReadStatus.uByteCount] =
+ (A_UINT8)(hex_to_bin(pCharLine[stReadStatus.uCharCount]) << 4)
+ | (A_UINT8)(hex_to_bin(pCharLine[stReadStatus.uCharCount + 1]));
+
+ PsTagEntry[TagCount].TagData[stReadStatus.uByteCount+1] =
+ (A_UINT8)(hex_to_bin(pCharLine[stReadStatus.uCharCount + 3]) << 4)
+ | (A_UINT8)(hex_to_bin(pCharLine[stReadStatus.uCharCount + 4]));
+
+ stReadStatus.uCharCount += 6; // read two bytes, plus a space;
+ stReadStatus.uByteCount += 2;
+ uReadCount -= 2;
+ }
+ if(ByteCount > BYTES_OF_PS_DATA_PER_LINE) {
+ ByteCount -= BYTES_OF_PS_DATA_PER_LINE;
+ }
+ else {
+ ByteCount = 0;
+ }
+ }
+ else {
+ //to be implemented
+ }
+
+ stReadStatus.uLineCount++;
+
+ if(ByteCount == 0) {
+ stReadStatus.uSection = 0;
+ stReadStatus.uCharCount = 0;
+ stReadStatus.uLineCount = 0;
+ stReadStatus.uByteCount = 0;
+ }
+ else {
+ stReadStatus.uCharCount = 0;
+ }
+
+ if((stReadStatus.uSection == 0)&&(++TagCount == RAMPS_MAX_PS_TAGS_PER_FILE))
+ {
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("\n Buffer over flow PS File too big!!!"));
+ if(Buffer != NULL) {
+ A_FREE(Buffer);
+ }
+ return A_ERROR;
+ //Sleep (3000);
+ //exit(1);
+ }
+
+ }
+ }
+
+ break;
+ default:
+ {
+ if(Buffer != NULL) {
+ A_FREE(Buffer);
+ }
+ return A_ERROR;
+ }
+ break;
+ }
+ LineRead++;
+ }
+ Tag_Count = TagCount;
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("Number of Tags %d\n", Tag_Count));
+ }
+
+
+ if (TagCount > RAMPS_MAX_PS_TAGS_PER_FILE)
+ {
+
+ if(Buffer != NULL) {
+ A_FREE(Buffer);
+ }
+ return A_ERROR;
+ }
+
+ if(Buffer != NULL) {
+ A_FREE(Buffer);
+ }
+ return A_OK;
+
+}
+
+
+
+/********************/
+
+
+A_STATUS GetNextTwoChar(A_UCHAR *srcbuffer,A_UINT32 len, A_UINT32 *pos, char * buffer)
+{
+ unsigned char ch;
+
+ ch = AthReadChar(srcbuffer,len,pos);
+ if(ch != '\0' && isxdigit(ch)) {
+ buffer[0] = ch;
+ } else
+ {
+ return A_ERROR;
+ }
+ ch = AthReadChar(srcbuffer,len,pos);
+ if(ch != '\0' && isxdigit(ch)) {
+ buffer[1] = ch;
+ } else
+ {
+ return A_ERROR;
+ }
+ return A_OK;
+}
+
+A_STATUS AthDoParsePatch(A_UCHAR *patchbuffer, A_UINT32 patchlen)
+{
+
+ char Byte[3];
+ char Line[MAX_BYTE_LENGTH + 1];
+ int ByteCount,ByteCount_Org;
+ int count;
+ int i,j,k;
+ int data;
+ A_UINT32 filepos;
+ Byte[2] = '\0';
+ j = 0;
+ filepos = 0;
+ Patch_Count = 0;
+
+ while(NULL != AthGetLine(Line,MAX_BYTE_LENGTH,patchbuffer,patchlen,&filepos)) {
+ if(strlen(Line) <= 1 || !isxdigit(Line[0])) {
+ continue;
+ } else {
+ break;
+ }
+ }
+ ByteCount = A_STRTOL(Line, NULL, 16);
+ ByteCount_Org = ByteCount;
+
+ while(ByteCount > MAX_BYTE_LENGTH){
+
+ /* Handle case when the number of patch buffer is more than the 20K */
+ if(MAX_NUM_PATCH_ENTRY == Patch_Count) {
+ for(i = 0; i < Patch_Count; i++) {
+ A_FREE(RamPatch[i].Data);
+ }
+ return A_ERROR;
+ }
+ RamPatch[Patch_Count].Len= MAX_BYTE_LENGTH;
+ RamPatch[Patch_Count].Data = (A_UINT8*)A_MALLOC(MAX_BYTE_LENGTH);
+ Patch_Count ++;
+
+
+ ByteCount= ByteCount - MAX_BYTE_LENGTH;
+ }
+
+ RamPatch[Patch_Count].Len= (ByteCount & 0xFF);
+ if(ByteCount != 0) {
+ RamPatch[Patch_Count].Data = (A_UINT8*)A_MALLOC(ByteCount);
+ Patch_Count ++;
+ }
+ count = 0;
+ while(ByteCount_Org > MAX_BYTE_LENGTH){
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERR,(" Index [%d]\n",j));
+ for (i = 0,k=0; i < MAX_BYTE_LENGTH*2; i += 2,k++,count +=2) {
+ if(GetNextTwoChar(patchbuffer,patchlen,&filepos,Byte) == A_ERROR) {
+ return A_ERROR;
+ }
+ data = A_STRTOUL(&Byte[0], NULL, 16);
+ RamPatch[j].Data[k] = (data & 0xFF);
+
+
+ }
+ j++;
+ ByteCount_Org = ByteCount_Org - MAX_BYTE_LENGTH;
+ }
+ if(j == 0){
+ j++;
+ }
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERR,(" Index [%d]\n",j));
+ for (k=0; k < ByteCount_Org; i += 2,k++,count+=2) {
+ if(GetNextTwoChar(patchbuffer,patchlen,&filepos,Byte) == A_ERROR) {
+ return A_ERROR;
+ }
+ data = A_STRTOUL(Byte, NULL, 16);
+ RamPatch[j].Data[k] = (data & 0xFF);
+
+
+ }
+ return A_OK;
+}
+
+
+/********************/
+A_STATUS AthDoParsePS(A_UCHAR *srcbuffer, A_UINT32 srclen)
+{
+ A_STATUS status;
+ int i;
+ A_BOOL BDADDR_Present = A_ERROR;
+
+ Tag_Count = 0;
+
+ Total_tag_lenght = 0;
+ BDADDR = FALSE;
+
+
+ status = A_ERROR;
+
+ if(NULL != srcbuffer && srclen != 0)
+ {
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("File Open Operation Successful\n"));
+
+ status = AthParseFilesUnified(srcbuffer,srclen,MB_FILEFORMAT_PATCH);
+ }
+
+
+
+ if(Tag_Count == 0){
+ Total_tag_lenght = 10;
+
+ }
+ else{
+ for(i=0; i<Tag_Count; i++){
+ if(PsTagEntry[i].TagId == 1){
+ BDADDR_Present = A_OK;
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("BD ADDR is present in Patch File \r\n"));
+
+ }
+ if(PsTagEntry[i].TagLen % 2 == 1){
+ Total_tag_lenght = Total_tag_lenght + PsTagEntry[i].TagLen + 1;
+ }
+ else{
+ Total_tag_lenght = Total_tag_lenght + PsTagEntry[i].TagLen;
+ }
+
+ }
+ }
+
+ if(Tag_Count > 0 && !BDADDR_Present){
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("BD ADDR is not present adding 10 extra bytes \r\n"));
+ Total_tag_lenght=Total_tag_lenght + 10;
+ }
+ Total_tag_lenght = Total_tag_lenght+ 10 + (Tag_Count*4);
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("** Total Length %d\n",Total_tag_lenght));
+
+
+ return status;
+}
+char * AthGetLine(char * buffer, int maxlen, A_UCHAR *srcbuffer,A_UINT32 len,A_UINT32 *pos)
+{
+
+ int count;
+ static short flag;
+ char CharRead;
+ count = 0;
+ flag = A_ERROR;
+
+ do
+ {
+ CharRead = AthReadChar(srcbuffer,len,pos);
+ if( CharRead == '\0' ) {
+ buffer[count+1] = '\0';
+ if(count == 0) {
+ return NULL;
+ }
+ else {
+ return buffer;
+ }
+ }
+
+ if(CharRead == 13) {
+ } else if(CharRead == 10) {
+ buffer[count] ='\0';
+ flag = A_ERROR;
+ return buffer;
+ }else {
+ buffer[count++] = CharRead;
+ }
+
+ }
+ while(count < maxlen-1 && CharRead != '\0');
+ buffer[count] = '\0';
+
+ return buffer;
+}
+
+static void LoadHeader(A_UCHAR *HCI_PS_Command,A_UCHAR opcode,int length,int index){
+
+ HCI_PS_Command[0]= 0x0B;
+ HCI_PS_Command[1]= 0xFC;
+ HCI_PS_Command[2]= length + 4;
+ HCI_PS_Command[3]= opcode;
+ HCI_PS_Command[4]= (index & 0xFF);
+ HCI_PS_Command[5]= ((index>>8) & 0xFF);
+ HCI_PS_Command[6]= length;
+}
+
+/////////////////////////
+//
+int AthCreateCommandList(PSCmdPacket **HciPacketList, A_UINT32 *numPackets)
+{
+
+ A_UINT8 count;
+ A_UINT32 NumcmdEntry = 0;
+
+ A_UINT32 Crc = 0;
+ *numPackets = 0;
+
+
+ if(Patch_Count > 0)
+ Crc |= RAM_PATCH_REGION;
+ if(Tag_Count > 0)
+ Crc |= RAM_PS_REGION;
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("PS Thread Started CRC %x Patch Count %d Tag Count %d \n",Crc,Patch_Count,Tag_Count));
+
+ if(Patch_Count || Tag_Count ){
+ NumcmdEntry+=(2 + Patch_Count + Tag_Count); /* CRC Packet + PS Reset Packet + Patch List + PS List*/
+ if(Patch_Count > 0) {
+ NumcmdEntry++; /* Patch Enable Command */
+ }
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("Num Cmd Entries %d Size %d \r\n",NumcmdEntry,(A_UINT32)sizeof(PSCmdPacket) * NumcmdEntry));
+ (*HciPacketList) = A_MALLOC(sizeof(PSCmdPacket) * NumcmdEntry);
+ if(NULL == *HciPacketList) {
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("memory allocation failed \r\n"));
+ }
+ AthPSCreateHCICommand(PS_VERIFY_CRC,Crc,*HciPacketList,numPackets);
+ if(Patch_Count > 0){
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("*** Write Patch**** \r\n"));
+ AthPSCreateHCICommand(WRITE_PATCH,Patch_Count,*HciPacketList,numPackets);
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("*** Enable Patch**** \r\n"));
+ AthPSCreateHCICommand(ENABLE_PATCH,0,*HciPacketList,numPackets);
+ }
+
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("*** PS Reset**** %d[0x%x] \r\n",PS_RAM_SIZE,PS_RAM_SIZE));
+ AthPSCreateHCICommand(PS_RESET,PS_RAM_SIZE,*HciPacketList,numPackets);
+ if(Tag_Count > 0){
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("*** PS Write**** \r\n"));
+ AthPSCreateHCICommand(PS_WRITE,Tag_Count,*HciPacketList,numPackets);
+ }
+ }
+ if(!BDADDR){
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("BD ADDR not present \r\n"));
+
+ }
+ for(count = 0; count < Patch_Count; count++) {
+
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("Freeing Patch Buffer %d \r\n",count));
+ A_FREE(RamPatch[Patch_Count].Data);
+ }
+
+ for(count = 0; count < Tag_Count; count++) {
+
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("Freeing PS Buffer %d \r\n",count));
+ A_FREE(PsTagEntry[count].TagData);
+ }
+
+/*
+ * SDIO Transport uses synchronous mode of data transfer
+ * So, AthPSOperations() call returns only after receiving the
+ * command complete event.
+ */
+ return *numPackets;
+}
+
+
+////////////////////////
+
+/////////////
+static A_STATUS AthPSCreateHCICommand(A_UCHAR Opcode, A_UINT32 Param1,PSCmdPacket *PSPatchPacket,A_UINT32 *index)
+{
+ A_UCHAR *HCI_PS_Command;
+ A_UINT32 Length;
+ int i,j;
+
+ switch(Opcode)
+ {
+ case WRITE_PATCH:
+
+
+ for(i=0;i< Param1;i++){
+
+ HCI_PS_Command = (A_UCHAR *) A_MALLOC(RamPatch[i].Len+HCI_COMMAND_HEADER);
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("Allocated Buffer Size %d\n",RamPatch[i].Len+HCI_COMMAND_HEADER));
+ if(HCI_PS_Command == NULL){
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("MALLOC Failed\r\n"));
+ return A_ERROR;
+ }
+ memset (HCI_PS_Command, 0, RamPatch[i].Len+HCI_COMMAND_HEADER);
+ LoadHeader(HCI_PS_Command,Opcode,RamPatch[i].Len,i);
+ for(j=0;j<RamPatch[i].Len;j++){
+ HCI_PS_Command[HCI_COMMAND_HEADER+j]=RamPatch[i].Data[j];
+ }
+ PSPatchPacket[*index].Hcipacket = HCI_PS_Command;
+ PSPatchPacket[*index].packetLen = RamPatch[i].Len+HCI_COMMAND_HEADER;
+ (*index)++;
+
+
+ }
+
+ break;
+
+ case ENABLE_PATCH:
+
+
+ Length = 0;
+ i= 0;
+ HCI_PS_Command = (A_UCHAR *) A_MALLOC(Length+HCI_COMMAND_HEADER);
+ if(HCI_PS_Command == NULL){
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("MALLOC Failed\r\n"));
+ return A_ERROR;
+ }
+
+ memset (HCI_PS_Command, 0, Length+HCI_COMMAND_HEADER);
+ LoadHeader(HCI_PS_Command,Opcode,Length,i);
+ PSPatchPacket[*index].Hcipacket = HCI_PS_Command;
+ PSPatchPacket[*index].packetLen = Length+HCI_COMMAND_HEADER;
+ (*index)++;
+
+ break;
+
+ case PS_RESET:
+ Length = 0x06;
+ i=0;
+ HCI_PS_Command = (A_UCHAR *) A_MALLOC(Length+HCI_COMMAND_HEADER);
+ if(HCI_PS_Command == NULL){
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("MALLOC Failed\r\n"));
+ return A_ERROR;
+ }
+ memset (HCI_PS_Command, 0, Length+HCI_COMMAND_HEADER);
+ LoadHeader(HCI_PS_Command,Opcode,Length,i);
+ HCI_PS_Command[7]= 0x00;
+ HCI_PS_Command[Length+HCI_COMMAND_HEADER -2]= (Param1 & 0xFF);
+ HCI_PS_Command[Length+HCI_COMMAND_HEADER -1]= ((Param1 >> 8) & 0xFF);
+ PSPatchPacket[*index].Hcipacket = HCI_PS_Command;
+ PSPatchPacket[*index].packetLen = Length+HCI_COMMAND_HEADER;
+ (*index)++;
+
+ break;
+
+ case PS_WRITE:
+ for(i=0;i< Param1;i++){
+ if(PsTagEntry[i].TagId ==1)
+ BDADDR = TRUE;
+
+ HCI_PS_Command = (A_UCHAR *) A_MALLOC(PsTagEntry[i].TagLen+HCI_COMMAND_HEADER);
+ if(HCI_PS_Command == NULL){
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("MALLOC Failed\r\n"));
+ return A_ERROR;
+ }
+
+ memset (HCI_PS_Command, 0, PsTagEntry[i].TagLen+HCI_COMMAND_HEADER);
+ LoadHeader(HCI_PS_Command,Opcode,PsTagEntry[i].TagLen,PsTagEntry[i].TagId);
+
+ for(j=0;j<PsTagEntry[i].TagLen;j++){
+ HCI_PS_Command[HCI_COMMAND_HEADER+j]=PsTagEntry[i].TagData[j];
+ }
+
+ PSPatchPacket[*index].Hcipacket = HCI_PS_Command;
+ PSPatchPacket[*index].packetLen = PsTagEntry[i].TagLen+HCI_COMMAND_HEADER;
+ (*index)++;
+
+ }
+
+ break;
+
+
+ case PS_VERIFY_CRC:
+ Length = 0x0;
+
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("VALUE of CRC:%d At index %d\r\n",Param1,*index));
+
+ HCI_PS_Command = (A_UCHAR *) A_MALLOC(Length+HCI_COMMAND_HEADER);
+ if(HCI_PS_Command == NULL){
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("MALLOC Failed\r\n"));
+ return A_ERROR;
+ }
+ memset (HCI_PS_Command, 0, Length+HCI_COMMAND_HEADER);
+ LoadHeader(HCI_PS_Command,Opcode,Length,Param1);
+
+ PSPatchPacket[*index].Hcipacket = HCI_PS_Command;
+ PSPatchPacket[*index].packetLen = Length+HCI_COMMAND_HEADER;
+ (*index)++;
+
+ break;
+
+ case CHANGE_BDADDR:
+ break;
+ }
+ return A_OK;
+}
+A_STATUS AthFreeCommandList(PSCmdPacket **HciPacketList, A_UINT32 numPackets)
+{
+ int i;
+ if(*HciPacketList == NULL) {
+ return A_ERROR;
+ }
+ for(i = 0; i < numPackets;i++) {
+ A_FREE((*HciPacketList)[i].Hcipacket);
+ }
+ A_FREE(*HciPacketList);
+ return A_OK;
+}
diff --git a/drivers/staging/ath6kl/miscdrv/ar3kps/ar3kpsparser.h b/drivers/staging/ath6kl/miscdrv/ar3kps/ar3kpsparser.h
new file mode 100644
index 000000000000..007b0eb950d2
--- /dev/null
+++ b/drivers/staging/ath6kl/miscdrv/ar3kps/ar3kpsparser.h
@@ -0,0 +1,127 @@
+//------------------------------------------------------------------------------
+//
+// Copyright (c) 2004-2010 Atheros Corporation. All rights reserved.
+//
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+//
+//
+//------------------------------------------------------------------------------
+//
+// This file is the include file for Atheros PS and patch parser.
+// It implements APIs to parse data buffer with patch and PS information and convert it to HCI commands.
+//
+
+#ifndef __AR3KPSPARSER_H
+#define __AR3KPSPARSER_H
+
+
+
+
+#include <linux/fs.h>
+#include <linux/slab.h>
+#include "athdefs.h"
+#ifdef HCI_TRANSPORT_SDIO
+#include "a_config.h"
+#include "a_types.h"
+#include "a_osapi.h"
+#define ATH_MODULE_NAME misc
+#include "a_debug.h"
+#include "common_drv.h"
+#include "hci_transport_api.h"
+#include "ar3kconfig.h"
+#else
+#ifndef A_PRINTF
+#define A_PRINTF(args...) printk(KERN_ALERT args)
+#endif /* A_PRINTF */
+#include "debug_linux.h"
+
+/* Helper data type declaration */
+
+#ifndef A_UINT32
+#define A_UCHAR unsigned char
+#define A_UINT32 unsigned long
+#define A_UINT16 unsigned short
+#define A_UINT8 unsigned char
+#define A_BOOL unsigned char
+#endif /* A_UINT32 */
+
+#define ATH_DEBUG_ERR (1 << 0)
+#define ATH_DEBUG_WARN (1 << 1)
+#define ATH_DEBUG_INFO (1 << 2)
+
+
+
+#define FALSE 0
+#define TRUE 1
+
+#ifndef A_MALLOC
+#define A_MALLOC(size) kmalloc((size),GFP_KERNEL)
+#endif /* A_MALLOC */
+
+
+#ifndef A_FREE
+#define A_FREE(addr) kfree((addr))
+#endif /* A_MALLOC */
+#endif /* HCI_TRANSPORT_UART */
+
+/* String manipulation APIs */
+#ifndef A_STRTOUL
+#define A_STRTOUL simple_strtoul
+#endif /* A_STRTOL */
+
+#ifndef A_STRTOL
+#define A_STRTOL simple_strtol
+#endif /* A_STRTOL */
+
+
+/* The maximum number of bytes possible in a patch entry */
+#define MAX_PATCH_SIZE 20000
+
+/* Maximum HCI packets that will be formed from the Patch file */
+#define MAX_NUM_PATCH_ENTRY (MAX_PATCH_SIZE/MAX_BYTE_LENGTH) + 1
+
+
+
+
+
+
+
+typedef struct PSCmdPacket
+{
+ A_UCHAR *Hcipacket;
+ int packetLen;
+} PSCmdPacket;
+
+/* Parses a Patch information buffer and store it in global structure */
+A_STATUS AthDoParsePatch(A_UCHAR *, A_UINT32);
+
+/* parses a PS information buffer and stores it in a global structure */
+A_STATUS AthDoParsePS(A_UCHAR *, A_UINT32);
+
+/*
+ * Uses the output of Both AthDoParsePS and AthDoParsePatch APIs to form HCI command array with
+ * all the PS and patch commands.
+ * The list will have the below mentioned commands in order.
+ * CRC command packet
+ * Download patch command(s)
+ * Enable patch Command
+ * PS Reset Command
+ * PS Tag Command(s)
+ *
+ */
+int AthCreateCommandList(PSCmdPacket **, A_UINT32 *);
+
+/* Cleanup the dynamically allicated HCI command list */
+A_STATUS AthFreeCommandList(PSCmdPacket **HciPacketList, A_UINT32 numPackets);
+#endif /* __AR3KPSPARSER_H */
diff --git a/drivers/staging/ath6kl/miscdrv/common_drv.c b/drivers/staging/ath6kl/miscdrv/common_drv.c
new file mode 100644
index 000000000000..6754fde467de
--- /dev/null
+++ b/drivers/staging/ath6kl/miscdrv/common_drv.c
@@ -0,0 +1,1027 @@
+//------------------------------------------------------------------------------
+// <copyright file="common_drv.c" company="Atheros">
+// Copyright (c) 2004-2010 Atheros Corporation. All rights reserved.
+//
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+//
+//
+//------------------------------------------------------------------------------
+//==============================================================================
+// Author(s): ="Atheros"
+//==============================================================================
+
+#include "a_config.h"
+#include "athdefs.h"
+#include "a_types.h"
+
+#include "AR6002/hw2.0/hw/mbox_host_reg.h"
+#include "AR6002/hw2.0/hw/apb_map.h"
+#include "AR6002/hw2.0/hw/si_reg.h"
+#include "AR6002/hw2.0/hw/gpio_reg.h"
+#include "AR6002/hw2.0/hw/rtc_reg.h"
+#include "AR6002/hw2.0/hw/vmc_reg.h"
+#include "AR6002/hw2.0/hw/mbox_reg.h"
+
+#include "a_osapi.h"
+#include "targaddrs.h"
+#include "hif.h"
+#include "htc_api.h"
+#include "wmi.h"
+#include "bmi.h"
+#include "bmi_msg.h"
+#include "common_drv.h"
+#define ATH_MODULE_NAME misc
+#include "a_debug.h"
+#include "ar6000_diag.h"
+
+static ATH_DEBUG_MODULE_DBG_INFO *g_pModuleInfoHead = NULL;
+static A_MUTEX_T g_ModuleListLock;
+static A_BOOL g_ModuleDebugInit = FALSE;
+
+#ifdef ATH_DEBUG_MODULE
+
+ATH_DEBUG_INSTANTIATE_MODULE_VAR(misc,
+ "misc",
+ "Common and misc APIs",
+ ATH_DEBUG_MASK_DEFAULTS,
+ 0,
+ NULL);
+
+#endif
+
+#define HOST_INTEREST_ITEM_ADDRESS(target, item) \
+ ((((target) == TARGET_TYPE_AR6002) ? AR6002_HOST_INTEREST_ITEM_ADDRESS(item) : \
+ (((target) == TARGET_TYPE_AR6003) ? AR6003_HOST_INTEREST_ITEM_ADDRESS(item) : 0)))
+
+
+#define AR6001_LOCAL_COUNT_ADDRESS 0x0c014080
+#define AR6002_LOCAL_COUNT_ADDRESS 0x00018080
+#define AR6003_LOCAL_COUNT_ADDRESS 0x00018080
+#define CPU_DBG_SEL_ADDRESS 0x00000483
+#define CPU_DBG_ADDRESS 0x00000484
+
+static A_UINT8 custDataAR6002[AR6002_CUST_DATA_SIZE];
+static A_UINT8 custDataAR6003[AR6003_CUST_DATA_SIZE];
+
+/* Compile the 4BYTE version of the window register setup routine,
+ * This mitigates host interconnect issues with non-4byte aligned bus requests, some
+ * interconnects use bus adapters that impose strict limitations.
+ * Since diag window access is not intended for performance critical operations, the 4byte mode should
+ * be satisfactory even though it generates 4X the bus activity. */
+
+#ifdef USE_4BYTE_REGISTER_ACCESS
+
+ /* set the window address register (using 4-byte register access ). */
+A_STATUS ar6000_SetAddressWindowRegister(HIF_DEVICE *hifDevice, A_UINT32 RegisterAddr, A_UINT32 Address)
+{
+ A_STATUS status;
+ A_UINT8 addrValue[4];
+ A_INT32 i;
+
+ /* write bytes 1,2,3 of the register to set the upper address bytes, the LSB is written
+ * last to initiate the access cycle */
+
+ for (i = 1; i <= 3; i++) {
+ /* fill the buffer with the address byte value we want to hit 4 times*/
+ addrValue[0] = ((A_UINT8 *)&Address)[i];
+ addrValue[1] = addrValue[0];
+ addrValue[2] = addrValue[0];
+ addrValue[3] = addrValue[0];
+
+ /* hit each byte of the register address with a 4-byte write operation to the same address,
+ * this is a harmless operation */
+ status = HIFReadWrite(hifDevice,
+ RegisterAddr+i,
+ addrValue,
+ 4,
+ HIF_WR_SYNC_BYTE_FIX,
+ NULL);
+ if (status != A_OK) {
+ break;
+ }
+ }
+
+ if (status != A_OK) {
+ AR_DEBUG_PRINTF(ATH_LOG_ERR, ("Cannot write initial bytes of 0x%x to window reg: 0x%X \n",
+ Address, RegisterAddr));
+ return status;
+ }
+
+ /* write the address register again, this time write the whole 4-byte value.
+ * The effect here is that the LSB write causes the cycle to start, the extra
+ * 3 byte write to bytes 1,2,3 has no effect since we are writing the same values again */
+ status = HIFReadWrite(hifDevice,
+ RegisterAddr,
+ (A_UCHAR *)(&Address),
+ 4,
+ HIF_WR_SYNC_BYTE_INC,
+ NULL);
+
+ if (status != A_OK) {
+ AR_DEBUG_PRINTF(ATH_LOG_ERR, ("Cannot write 0x%x to window reg: 0x%X \n",
+ Address, RegisterAddr));
+ return status;
+ }
+
+ return A_OK;
+
+
+
+}
+
+
+#else
+
+ /* set the window address register */
+A_STATUS ar6000_SetAddressWindowRegister(HIF_DEVICE *hifDevice, A_UINT32 RegisterAddr, A_UINT32 Address)
+{
+ A_STATUS status;
+
+ /* write bytes 1,2,3 of the register to set the upper address bytes, the LSB is written
+ * last to initiate the access cycle */
+ status = HIFReadWrite(hifDevice,
+ RegisterAddr+1, /* write upper 3 bytes */
+ ((A_UCHAR *)(&Address))+1,
+ sizeof(A_UINT32)-1,
+ HIF_WR_SYNC_BYTE_INC,
+ NULL);
+
+ if (status != A_OK) {
+ AR_DEBUG_PRINTF(ATH_LOG_ERR, ("Cannot write initial bytes of 0x%x to window reg: 0x%X \n",
+ RegisterAddr, Address));
+ return status;
+ }
+
+ /* write the LSB of the register, this initiates the operation */
+ status = HIFReadWrite(hifDevice,
+ RegisterAddr,
+ (A_UCHAR *)(&Address),
+ sizeof(A_UINT8),
+ HIF_WR_SYNC_BYTE_INC,
+ NULL);
+
+ if (status != A_OK) {
+ AR_DEBUG_PRINTF(ATH_LOG_ERR, ("Cannot write 0x%x to window reg: 0x%X \n",
+ RegisterAddr, Address));
+ return status;
+ }
+
+ return A_OK;
+}
+
+#endif
+
+/*
+ * Read from the AR6000 through its diagnostic window.
+ * No cooperation from the Target is required for this.
+ */
+A_STATUS
+ar6000_ReadRegDiag(HIF_DEVICE *hifDevice, A_UINT32 *address, A_UINT32 *data)
+{
+ A_STATUS status;
+
+ /* set window register to start read cycle */
+ status = ar6000_SetAddressWindowRegister(hifDevice,
+ WINDOW_READ_ADDR_ADDRESS,
+ *address);
+
+ if (status != A_OK) {
+ return status;
+ }
+
+ /* read the data */
+ status = HIFReadWrite(hifDevice,
+ WINDOW_DATA_ADDRESS,
+ (A_UCHAR *)data,
+ sizeof(A_UINT32),
+ HIF_RD_SYNC_BYTE_INC,
+ NULL);
+ if (status != A_OK) {
+ AR_DEBUG_PRINTF(ATH_LOG_ERR, ("Cannot read from WINDOW_DATA_ADDRESS\n"));
+ return status;
+ }
+
+ return status;
+}
+
+
+/*
+ * Write to the AR6000 through its diagnostic window.
+ * No cooperation from the Target is required for this.
+ */
+A_STATUS
+ar6000_WriteRegDiag(HIF_DEVICE *hifDevice, A_UINT32 *address, A_UINT32 *data)
+{
+ A_STATUS status;
+
+ /* set write data */
+ status = HIFReadWrite(hifDevice,
+ WINDOW_DATA_ADDRESS,
+ (A_UCHAR *)data,
+ sizeof(A_UINT32),
+ HIF_WR_SYNC_BYTE_INC,
+ NULL);
+ if (status != A_OK) {
+ AR_DEBUG_PRINTF(ATH_LOG_ERR, ("Cannot write 0x%x to WINDOW_DATA_ADDRESS\n", *data));
+ return status;
+ }
+
+ /* set window register, which starts the write cycle */
+ return ar6000_SetAddressWindowRegister(hifDevice,
+ WINDOW_WRITE_ADDR_ADDRESS,
+ *address);
+ }
+
+A_STATUS
+ar6000_ReadDataDiag(HIF_DEVICE *hifDevice, A_UINT32 address,
+ A_UCHAR *data, A_UINT32 length)
+{
+ A_UINT32 count;
+ A_STATUS status = A_OK;
+
+ for (count = 0; count < length; count += 4, address += 4) {
+ if ((status = ar6000_ReadRegDiag(hifDevice, &address,
+ (A_UINT32 *)&data[count])) != A_OK)
+ {
+ break;
+ }
+ }
+
+ return status;
+}
+
+A_STATUS
+ar6000_WriteDataDiag(HIF_DEVICE *hifDevice, A_UINT32 address,
+ A_UCHAR *data, A_UINT32 length)
+{
+ A_UINT32 count;
+ A_STATUS status = A_OK;
+
+ for (count = 0; count < length; count += 4, address += 4) {
+ if ((status = ar6000_WriteRegDiag(hifDevice, &address,
+ (A_UINT32 *)&data[count])) != A_OK)
+ {
+ break;
+ }
+ }
+
+ return status;
+}
+
+A_STATUS
+ar6k_ReadTargetRegister(HIF_DEVICE *hifDevice, int regsel, A_UINT32 *regval)
+{
+ A_STATUS status;
+ A_UCHAR vals[4];
+ A_UCHAR register_selection[4];
+
+ register_selection[0] = register_selection[1] = register_selection[2] = register_selection[3] = (regsel & 0xff);
+ status = HIFReadWrite(hifDevice,
+ CPU_DBG_SEL_ADDRESS,
+ register_selection,
+ 4,
+ HIF_WR_SYNC_BYTE_FIX,
+ NULL);
+
+ if (status != A_OK) {
+ AR_DEBUG_PRINTF(ATH_LOG_ERR, ("Cannot write CPU_DBG_SEL (%d)\n", regsel));
+ return status;
+ }
+
+ status = HIFReadWrite(hifDevice,
+ CPU_DBG_ADDRESS,
+ (A_UCHAR *)vals,
+ sizeof(vals),
+ HIF_RD_SYNC_BYTE_INC,
+ NULL);
+ if (status != A_OK) {
+ AR_DEBUG_PRINTF(ATH_LOG_ERR, ("Cannot read from CPU_DBG_ADDRESS\n"));
+ return status;
+ }
+
+ *regval = vals[0]<<0 | vals[1]<<8 | vals[2]<<16 | vals[3]<<24;
+
+ return status;
+}
+
+void
+ar6k_FetchTargetRegs(HIF_DEVICE *hifDevice, A_UINT32 *targregs)
+{
+ int i;
+ A_UINT32 val;
+
+ for (i=0; i<AR6003_FETCH_TARG_REGS_COUNT; i++) {
+ val=0xffffffff;
+ (void)ar6k_ReadTargetRegister(hifDevice, i, &val);
+ targregs[i] = val;
+ }
+}
+
+#if 0
+static A_STATUS
+_do_write_diag(HIF_DEVICE *hifDevice, A_UINT32 addr, A_UINT32 value)
+{
+ A_STATUS status;
+
+ status = ar6000_WriteRegDiag(hifDevice, &addr, &value);
+ if (status != A_OK)
+ {
+ AR_DEBUG_PRINTF(ATH_LOG_ERR, ("Cannot force Target to execute ROM!\n"));
+ }
+
+ return status;
+}
+#endif
+
+
+/*
+ * Delay up to wait_msecs millisecs to allow Target to enter BMI phase,
+ * which is a good sign that it's alive and well. This is used after
+ * explicitly forcing the Target to reset.
+ *
+ * The wait_msecs time should be sufficiently long to cover any reasonable
+ * boot-time delay. For instance, AR6001 firmware allow one second for a
+ * low frequency crystal to settle before it calibrates the refclk frequency.
+ *
+ * TBD: Might want to add special handling for AR6K_OPTION_BMI_DISABLE.
+ */
+#if 0
+static A_STATUS
+_delay_until_target_alive(HIF_DEVICE *hifDevice, A_INT32 wait_msecs, A_UINT32 TargetType)
+{
+ A_INT32 actual_wait;
+ A_INT32 i;
+ A_UINT32 address;
+
+ actual_wait = 0;
+
+ /* Hardcode the address of LOCAL_COUNT_ADDRESS based on the target type */
+ if (TargetType == TARGET_TYPE_AR6002) {
+ address = AR6002_LOCAL_COUNT_ADDRESS;
+ } else if (TargetType == TARGET_TYPE_AR6003) {
+ address = AR6003_LOCAL_COUNT_ADDRESS;
+ } else {
+ A_ASSERT(0);
+ }
+ address += 0x10;
+ for (i=0; actual_wait < wait_msecs; i++) {
+ A_UINT32 data;
+
+ A_MDELAY(100);
+ actual_wait += 100;
+
+ data = 0;
+ if (ar6000_ReadRegDiag(hifDevice, &address, &data) != A_OK) {
+ return A_ERROR;
+ }
+
+ if (data != 0) {
+ /* No need to wait longer -- we have a BMI credit */
+ return A_OK;
+ }
+ }
+ return A_ERROR; /* timed out */
+}
+#endif
+
+#define AR6001_RESET_CONTROL_ADDRESS 0x0C000000
+#define AR6002_RESET_CONTROL_ADDRESS 0x00004000
+#define AR6003_RESET_CONTROL_ADDRESS 0x00004000
+/* reset device */
+A_STATUS ar6000_reset_device(HIF_DEVICE *hifDevice, A_UINT32 TargetType, A_BOOL waitForCompletion, A_BOOL coldReset)
+{
+ A_STATUS status = A_OK;
+ A_UINT32 address;
+ A_UINT32 data;
+
+ do {
+// Workaround BEGIN
+ // address = RESET_CONTROL_ADDRESS;
+
+ if (coldReset) {
+ data = RESET_CONTROL_COLD_RST_MASK;
+ }
+ else {
+ data = RESET_CONTROL_MBOX_RST_MASK;
+ }
+
+ /* Hardcode the address of RESET_CONTROL_ADDRESS based on the target type */
+ if (TargetType == TARGET_TYPE_AR6002) {
+ address = AR6002_RESET_CONTROL_ADDRESS;
+ } else if (TargetType == TARGET_TYPE_AR6003) {
+ address = AR6003_RESET_CONTROL_ADDRESS;
+ } else {
+ A_ASSERT(0);
+ }
+
+
+ status = ar6000_WriteRegDiag(hifDevice, &address, &data);
+
+ if (A_FAILED(status)) {
+ break;
+ }
+
+ if (!waitForCompletion) {
+ break;
+ }
+
+#if 0
+ /* Up to 2 second delay to allow things to settle down */
+ (void)_delay_until_target_alive(hifDevice, 2000, TargetType);
+
+ /*
+ * Read back the RESET CAUSE register to ensure that the cold reset
+ * went through.
+ */
+
+ // address = RESET_CAUSE_ADDRESS;
+ /* Hardcode the address of RESET_CAUSE_ADDRESS based on the target type */
+ if (TargetType == TARGET_TYPE_AR6002) {
+ address = 0x000040C0;
+ } else if (TargetType == TARGET_TYPE_AR6003) {
+ address = 0x000040C0;
+ } else {
+ A_ASSERT(0);
+ }
+
+ data = 0;
+ status = ar6000_ReadRegDiag(hifDevice, &address, &data);
+
+ if (A_FAILED(status)) {
+ break;
+ }
+
+ AR_DEBUG_PRINTF(ATH_LOG_ERR, ("Reset Cause readback: 0x%X \n",data));
+ data &= RESET_CAUSE_LAST_MASK;
+ if (data != 2) {
+ AR_DEBUG_PRINTF(ATH_LOG_ERR, ("Unable to cold reset the target \n"));
+ }
+#endif
+// Workaroud END
+
+ } while (FALSE);
+
+ if (A_FAILED(status)) {
+ AR_DEBUG_PRINTF(ATH_LOG_ERR, ("Failed to reset target \n"));
+ }
+
+ return A_OK;
+}
+
+/* This should be called in BMI phase after firmware is downloaded */
+void
+ar6000_copy_cust_data_from_target(HIF_DEVICE *hifDevice, A_UINT32 TargetType)
+{
+ A_UINT32 eepHeaderAddr;
+ A_UINT8 AR6003CustDataShadow[AR6003_CUST_DATA_SIZE+4];
+ A_INT32 i;
+
+ if (BMIReadMemory(hifDevice,
+ HOST_INTEREST_ITEM_ADDRESS(TargetType, hi_board_data),
+ (A_UCHAR *)&eepHeaderAddr,
+ 4)!= A_OK)
+ {
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("BMIReadMemory for reading board data address failed \n"));
+ return;
+ }
+
+ if (TargetType == TARGET_TYPE_AR6003) {
+ eepHeaderAddr += 36; /* AR6003 customer data section offset is 37 */
+
+ for (i=0; i<AR6003_CUST_DATA_SIZE+4; i+=4){
+ if (BMIReadSOCRegister(hifDevice, eepHeaderAddr, (A_UINT32 *)&AR6003CustDataShadow[i])!= A_OK) {
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("BMIReadSOCRegister () failed \n"));
+ return ;
+ }
+ eepHeaderAddr +=4;
+ }
+
+ memcpy(custDataAR6003, AR6003CustDataShadow+1, AR6003_CUST_DATA_SIZE);
+ }
+
+ if (TargetType == TARGET_TYPE_AR6002) {
+ eepHeaderAddr += 64; /* AR6002 customer data sectioin offset is 64 */
+
+ for (i=0; i<AR6002_CUST_DATA_SIZE; i+=4){
+ if (BMIReadSOCRegister(hifDevice, eepHeaderAddr, (A_UINT32 *)&custDataAR6002[i])!= A_OK) {
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("BMIReadSOCRegister () failed \n"));
+ return ;
+ }
+ eepHeaderAddr +=4;
+ }
+ }
+
+ return;
+}
+
+/* This is the function to call when need to use the cust data */
+A_UINT8 *
+ar6000_get_cust_data_buffer(A_UINT32 TargetType)
+{
+ if (TargetType == TARGET_TYPE_AR6003)
+ return custDataAR6003;
+
+ if (TargetType == TARGET_TYPE_AR6002)
+ return custDataAR6002;
+
+ return NULL;
+}
+
+#define REG_DUMP_COUNT_AR6001 38 /* WORDs, derived from AR600x_regdump.h */
+#define REG_DUMP_COUNT_AR6002 60
+#define REG_DUMP_COUNT_AR6003 60
+#define REGISTER_DUMP_LEN_MAX 60
+#if REG_DUMP_COUNT_AR6001 > REGISTER_DUMP_LEN_MAX
+#error "REG_DUMP_COUNT_AR6001 too large"
+#endif
+#if REG_DUMP_COUNT_AR6002 > REGISTER_DUMP_LEN_MAX
+#error "REG_DUMP_COUNT_AR6002 too large"
+#endif
+#if REG_DUMP_COUNT_AR6003 > REGISTER_DUMP_LEN_MAX
+#error "REG_DUMP_COUNT_AR6003 too large"
+#endif
+
+
+void ar6000_dump_target_assert_info(HIF_DEVICE *hifDevice, A_UINT32 TargetType)
+{
+ A_UINT32 address;
+ A_UINT32 regDumpArea = 0;
+ A_STATUS status;
+ A_UINT32 regDumpValues[REGISTER_DUMP_LEN_MAX];
+ A_UINT32 regDumpCount = 0;
+ A_UINT32 i;
+
+ do {
+
+ /* the reg dump pointer is copied to the host interest area */
+ address = HOST_INTEREST_ITEM_ADDRESS(TargetType, hi_failure_state);
+ address = TARG_VTOP(TargetType, address);
+
+ if (TargetType == TARGET_TYPE_AR6002) {
+ regDumpCount = REG_DUMP_COUNT_AR6002;
+ } else if (TargetType == TARGET_TYPE_AR6003) {
+ regDumpCount = REG_DUMP_COUNT_AR6003;
+ } else {
+ A_ASSERT(0);
+ }
+
+ /* read RAM location through diagnostic window */
+ status = ar6000_ReadRegDiag(hifDevice, &address, &regDumpArea);
+
+ if (A_FAILED(status)) {
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("AR6K: Failed to get ptr to register dump area \n"));
+ break;
+ }
+
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("AR6K: Location of register dump data: 0x%X \n",regDumpArea));
+
+ if (regDumpArea == 0) {
+ /* no reg dump */
+ break;
+ }
+
+ regDumpArea = TARG_VTOP(TargetType, regDumpArea);
+
+ /* fetch register dump data */
+ status = ar6000_ReadDataDiag(hifDevice,
+ regDumpArea,
+ (A_UCHAR *)&regDumpValues[0],
+ regDumpCount * (sizeof(A_UINT32)));
+
+ if (A_FAILED(status)) {
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("AR6K: Failed to get register dump \n"));
+ break;
+ }
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("AR6K: Register Dump: \n"));
+
+ for (i = 0; i < regDumpCount; i++) {
+ //ATHR_DISPLAY_MSG (_T(" %d : 0x%8.8X \n"), i, regDumpValues[i]);
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERR,(" %d : 0x%8.8X \n",i, regDumpValues[i]));
+
+#ifdef UNDER_CE
+ /*
+ * For Every logPrintf() Open the File so that in case of Crashes
+ * We will have until the Last Message Flushed on to the File
+ * So use logPrintf Sparingly..!!
+ */
+ tgtassertPrintf (ATH_DEBUG_TRC," %d: 0x%8.8X \n",i, regDumpValues[i]);
+#endif
+ }
+
+ } while (FALSE);
+
+}
+
+/* set HTC/Mbox operational parameters, this can only be called when the target is in the
+ * BMI phase */
+A_STATUS ar6000_set_htc_params(HIF_DEVICE *hifDevice,
+ A_UINT32 TargetType,
+ A_UINT32 MboxIsrYieldValue,
+ A_UINT8 HtcControlBuffers)
+{
+ A_STATUS status;
+ A_UINT32 blocksizes[HTC_MAILBOX_NUM_MAX];
+
+ do {
+ /* get the block sizes */
+ status = HIFConfigureDevice(hifDevice, HIF_DEVICE_GET_MBOX_BLOCK_SIZE,
+ blocksizes, sizeof(blocksizes));
+
+ if (A_FAILED(status)) {
+ AR_DEBUG_PRINTF(ATH_LOG_ERR,("Failed to get block size info from HIF layer...\n"));
+ break;
+ }
+ /* note: we actually get the block size for mailbox 1, for SDIO the block
+ * size on mailbox 0 is artificially set to 1 */
+ /* must be a power of 2 */
+ A_ASSERT((blocksizes[1] & (blocksizes[1] - 1)) == 0);
+
+ if (HtcControlBuffers != 0) {
+ /* set override for number of control buffers to use */
+ blocksizes[1] |= ((A_UINT32)HtcControlBuffers) << 16;
+ }
+
+ /* set the host interest area for the block size */
+ status = BMIWriteMemory(hifDevice,
+ HOST_INTEREST_ITEM_ADDRESS(TargetType, hi_mbox_io_block_sz),
+ (A_UCHAR *)&blocksizes[1],
+ 4);
+
+ if (A_FAILED(status)) {
+ AR_DEBUG_PRINTF(ATH_LOG_ERR,("BMIWriteMemory for IO block size failed \n"));
+ break;
+ }
+
+ AR_DEBUG_PRINTF(ATH_LOG_INF,("Block Size Set: %d (target address:0x%X)\n",
+ blocksizes[1], HOST_INTEREST_ITEM_ADDRESS(TargetType, hi_mbox_io_block_sz)));
+
+ if (MboxIsrYieldValue != 0) {
+ /* set the host interest area for the mbox ISR yield limit */
+ status = BMIWriteMemory(hifDevice,
+ HOST_INTEREST_ITEM_ADDRESS(TargetType, hi_mbox_isr_yield_limit),
+ (A_UCHAR *)&MboxIsrYieldValue,
+ 4);
+
+ if (A_FAILED(status)) {
+ AR_DEBUG_PRINTF(ATH_LOG_ERR,("BMIWriteMemory for yield limit failed \n"));
+ break;
+ }
+ }
+
+ } while (FALSE);
+
+ return status;
+}
+
+
+static A_STATUS prepare_ar6002(HIF_DEVICE *hifDevice, A_UINT32 TargetVersion)
+{
+ A_STATUS status = A_OK;
+
+ /* placeholder */
+
+ return status;
+}
+
+static A_STATUS prepare_ar6003(HIF_DEVICE *hifDevice, A_UINT32 TargetVersion)
+{
+ A_STATUS status = A_OK;
+
+ /* placeholder */
+
+ return status;
+}
+
+/* this function assumes the caller has already initialized the BMI APIs */
+A_STATUS ar6000_prepare_target(HIF_DEVICE *hifDevice,
+ A_UINT32 TargetType,
+ A_UINT32 TargetVersion)
+{
+ if (TargetType == TARGET_TYPE_AR6002) {
+ /* do any preparations for AR6002 devices */
+ return prepare_ar6002(hifDevice,TargetVersion);
+ } else if (TargetType == TARGET_TYPE_AR6003) {
+ return prepare_ar6003(hifDevice,TargetVersion);
+ }
+
+ return A_OK;
+}
+
+#if defined(CONFIG_AR6002_REV1_FORCE_HOST)
+/*
+ * Call this function just before the call to BMIInit
+ * in order to force* AR6002 rev 1.x firmware to detect a Host.
+ * THIS IS FOR USE ONLY WITH AR6002 REV 1.x.
+ * TBDXXX: Remove this function when REV 1.x is desupported.
+ */
+A_STATUS
+ar6002_REV1_reset_force_host (HIF_DEVICE *hifDevice)
+{
+ A_INT32 i;
+ struct forceROM_s {
+ A_UINT32 addr;
+ A_UINT32 data;
+ };
+ struct forceROM_s *ForceROM;
+ A_INT32 szForceROM;
+ A_STATUS status = A_OK;
+ A_UINT32 address;
+ A_UINT32 data;
+
+ /* Force AR6002 REV1.x to recognize Host presence.
+ *
+ * Note: Use RAM at 0x52df80..0x52dfa0 with ROM Remap entry 0
+ * so that this workaround functions with AR6002.war1.sh. We
+ * could fold that entire workaround into this one, but it's not
+ * worth the effort at this point. This workaround cannot be
+ * merged into the other workaround because this must be done
+ * before BMI.
+ */
+
+ static struct forceROM_s ForceROM_NEW[] = {
+ {0x52df80, 0x20f31c07},
+ {0x52df84, 0x92374420},
+ {0x52df88, 0x1d120c03},
+ {0x52df8c, 0xff8216f0},
+ {0x52df90, 0xf01d120c},
+ {0x52df94, 0x81004136},
+ {0x52df98, 0xbc9100bd},
+ {0x52df9c, 0x00bba100},
+
+ {0x00008000|MC_TCAM_TARGET_ADDRESS, 0x0012dfe0}, /* Use remap entry 0 */
+ {0x00008000|MC_TCAM_COMPARE_ADDRESS, 0x000e2380},
+ {0x00008000|MC_TCAM_MASK_ADDRESS, 0x00000000},
+ {0x00008000|MC_TCAM_VALID_ADDRESS, 0x00000001},
+
+ {0x00018000|(LOCAL_COUNT_ADDRESS+0x10), 0}, /* clear BMI credit counter */
+
+ {0x00004000|AR6002_RESET_CONTROL_ADDRESS, RESET_CONTROL_WARM_RST_MASK},
+ };
+
+ address = 0x004ed4b0; /* REV1 target software ID is stored here */
+ status = ar6000_ReadRegDiag(hifDevice, &address, &data);
+ if (A_FAILED(status) || (data != AR6002_VERSION_REV1)) {
+ return A_ERROR; /* Not AR6002 REV1 */
+ }
+
+ ForceROM = ForceROM_NEW;
+ szForceROM = sizeof(ForceROM_NEW)/sizeof(*ForceROM);
+
+ ATH_DEBUG_PRINTF (DBG_MISC_DRV, ATH_DEBUG_TRC, ("Force Target to recognize Host....\n"));
+ for (i = 0; i < szForceROM; i++)
+ {
+ if (ar6000_WriteRegDiag(hifDevice,
+ &ForceROM[i].addr,
+ &ForceROM[i].data) != A_OK)
+ {
+ ATH_DEBUG_PRINTF (DBG_MISC_DRV, ATH_DEBUG_TRC, ("Cannot force Target to recognize Host!\n"));
+ return A_ERROR;
+ }
+ }
+
+ A_MDELAY(1000);
+
+ return A_OK;
+}
+
+#endif /* CONFIG_AR6002_REV1_FORCE_HOST */
+
+void DebugDumpBytes(A_UCHAR *buffer, A_UINT16 length, char *pDescription)
+{
+ A_CHAR stream[60];
+ A_CHAR byteOffsetStr[10];
+ A_UINT32 i;
+ A_UINT16 offset, count, byteOffset;
+
+ A_PRINTF("<---------Dumping %d Bytes : %s ------>\n", length, pDescription);
+
+ count = 0;
+ offset = 0;
+ byteOffset = 0;
+ for(i = 0; i < length; i++) {
+ A_SPRINTF(stream + offset, "%2.2X ", buffer[i]);
+ count ++;
+ offset += 3;
+
+ if(count == 16) {
+ count = 0;
+ offset = 0;
+ A_SPRINTF(byteOffsetStr,"%4.4X",byteOffset);
+ A_PRINTF("[%s]: %s\n", byteOffsetStr, stream);
+ A_MEMZERO(stream, 60);
+ byteOffset += 16;
+ }
+ }
+
+ if(offset != 0) {
+ A_SPRINTF(byteOffsetStr,"%4.4X",byteOffset);
+ A_PRINTF("[%s]: %s\n", byteOffsetStr, stream);
+ }
+
+ A_PRINTF("<------------------------------------------------->\n");
+}
+
+void a_dump_module_debug_info(ATH_DEBUG_MODULE_DBG_INFO *pInfo)
+{
+ int i;
+ ATH_DEBUG_MASK_DESCRIPTION *pDesc;
+
+ if (pInfo == NULL) {
+ return;
+ }
+
+ pDesc = pInfo->pMaskDescriptions;
+
+ A_PRINTF("========================================================\n\n");
+ A_PRINTF("Module Debug Info => Name : %s \n", pInfo->ModuleName);
+ A_PRINTF(" => Descr. : %s \n", pInfo->ModuleDescription);
+ A_PRINTF("\n Current mask => 0x%8.8X \n", pInfo->CurrentMask);
+ A_PRINTF("\n Avail. Debug Masks :\n\n");
+
+ for (i = 0; i < pInfo->MaxDescriptions; i++,pDesc++) {
+ A_PRINTF(" => 0x%8.8X -- %s \n", pDesc->Mask, pDesc->Description);
+ }
+
+ if (0 == i) {
+ A_PRINTF(" => * none defined * \n");
+ }
+
+ A_PRINTF("\n Standard Debug Masks :\n\n");
+ /* print standard masks */
+ A_PRINTF(" => 0x%8.8X -- Errors \n", ATH_DEBUG_ERR);
+ A_PRINTF(" => 0x%8.8X -- Warnings \n", ATH_DEBUG_WARN);
+ A_PRINTF(" => 0x%8.8X -- Informational \n", ATH_DEBUG_INFO);
+ A_PRINTF(" => 0x%8.8X -- Tracing \n", ATH_DEBUG_TRC);
+ A_PRINTF("\n========================================================\n");
+
+}
+
+
+static ATH_DEBUG_MODULE_DBG_INFO *FindModule(A_CHAR *module_name)
+{
+ ATH_DEBUG_MODULE_DBG_INFO *pInfo = g_pModuleInfoHead;
+
+ if (!g_ModuleDebugInit) {
+ return NULL;
+ }
+
+ while (pInfo != NULL) {
+ /* TODO: need to use something other than strlen */
+ if (A_MEMCMP(pInfo->ModuleName,module_name,strlen(module_name)) == 0) {
+ break;
+ }
+ pInfo = pInfo->pNext;
+ }
+
+ return pInfo;
+}
+
+
+void a_register_module_debug_info(ATH_DEBUG_MODULE_DBG_INFO *pInfo)
+{
+ if (!g_ModuleDebugInit) {
+ return;
+ }
+
+ A_MUTEX_LOCK(&g_ModuleListLock);
+
+ if (!(pInfo->Flags & ATH_DEBUG_INFO_FLAGS_REGISTERED)) {
+ if (g_pModuleInfoHead == NULL) {
+ g_pModuleInfoHead = pInfo;
+ } else {
+ pInfo->pNext = g_pModuleInfoHead;
+ g_pModuleInfoHead = pInfo;
+ }
+ pInfo->Flags |= ATH_DEBUG_INFO_FLAGS_REGISTERED;
+ }
+
+ A_MUTEX_UNLOCK(&g_ModuleListLock);
+}
+
+void a_dump_module_debug_info_by_name(A_CHAR *module_name)
+{
+ ATH_DEBUG_MODULE_DBG_INFO *pInfo = g_pModuleInfoHead;
+
+ if (!g_ModuleDebugInit) {
+ return;
+ }
+
+ if (A_MEMCMP(module_name,"all",3) == 0) {
+ /* dump all */
+ while (pInfo != NULL) {
+ a_dump_module_debug_info(pInfo);
+ pInfo = pInfo->pNext;
+ }
+ return;
+ }
+
+ pInfo = FindModule(module_name);
+
+ if (pInfo != NULL) {
+ a_dump_module_debug_info(pInfo);
+ }
+
+}
+
+A_STATUS a_get_module_mask(A_CHAR *module_name, A_UINT32 *pMask)
+{
+ ATH_DEBUG_MODULE_DBG_INFO *pInfo = FindModule(module_name);
+
+ if (NULL == pInfo) {
+ return A_ERROR;
+ }
+
+ *pMask = pInfo->CurrentMask;
+ return A_OK;
+}
+
+A_STATUS a_set_module_mask(A_CHAR *module_name, A_UINT32 Mask)
+{
+ ATH_DEBUG_MODULE_DBG_INFO *pInfo = FindModule(module_name);
+
+ if (NULL == pInfo) {
+ return A_ERROR;
+ }
+
+ pInfo->CurrentMask = Mask;
+ A_PRINTF("Module %s, new mask: 0x%8.8X \n",module_name,pInfo->CurrentMask);
+ return A_OK;
+}
+
+
+void a_module_debug_support_init(void)
+{
+ if (g_ModuleDebugInit) {
+ return;
+ }
+ A_MUTEX_INIT(&g_ModuleListLock);
+ g_pModuleInfoHead = NULL;
+ g_ModuleDebugInit = TRUE;
+ A_REGISTER_MODULE_DEBUG_INFO(misc);
+}
+
+void a_module_debug_support_cleanup(void)
+{
+ ATH_DEBUG_MODULE_DBG_INFO *pInfo = g_pModuleInfoHead;
+ ATH_DEBUG_MODULE_DBG_INFO *pCur;
+
+ if (!g_ModuleDebugInit) {
+ return;
+ }
+
+ g_ModuleDebugInit = FALSE;
+
+ A_MUTEX_LOCK(&g_ModuleListLock);
+
+ while (pInfo != NULL) {
+ pCur = pInfo;
+ pInfo = pInfo->pNext;
+ pCur->pNext = NULL;
+ /* clear registered flag */
+ pCur->Flags &= ~ATH_DEBUG_INFO_FLAGS_REGISTERED;
+ }
+
+ A_MUTEX_UNLOCK(&g_ModuleListLock);
+
+ A_MUTEX_DELETE(&g_ModuleListLock);
+ g_pModuleInfoHead = NULL;
+}
+
+ /* can only be called during bmi init stage */
+A_STATUS ar6000_set_hci_bridge_flags(HIF_DEVICE *hifDevice,
+ A_UINT32 TargetType,
+ A_UINT32 Flags)
+{
+ A_STATUS status = A_OK;
+
+ do {
+
+ if (TargetType != TARGET_TYPE_AR6003) {
+ AR_DEBUG_PRINTF(ATH_DEBUG_WARN, ("Target Type:%d, does not support HCI bridging! \n",
+ TargetType));
+ break;
+ }
+
+ /* set hci bridge flags */
+ status = BMIWriteMemory(hifDevice,
+ HOST_INTEREST_ITEM_ADDRESS(TargetType, hi_hci_bridge_flags),
+ (A_UCHAR *)&Flags,
+ 4);
+
+
+ } while (FALSE);
+
+ return status;
+}
+
diff --git a/drivers/staging/ath6kl/miscdrv/credit_dist.c b/drivers/staging/ath6kl/miscdrv/credit_dist.c
new file mode 100644
index 000000000000..91316e0b109e
--- /dev/null
+++ b/drivers/staging/ath6kl/miscdrv/credit_dist.c
@@ -0,0 +1,418 @@
+//------------------------------------------------------------------------------
+// <copyright file="credit_dist.c" company="Atheros">
+// Copyright (c) 2004-2010 Atheros Corporation. All rights reserved.
+//
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+//
+//
+//------------------------------------------------------------------------------
+//==============================================================================
+// Author(s): ="Atheros"
+//==============================================================================
+
+#include "a_config.h"
+#include "athdefs.h"
+#include "a_types.h"
+#include "a_osapi.h"
+#define ATH_MODULE_NAME misc
+#include "a_debug.h"
+#include "htc_api.h"
+#include "common_drv.h"
+
+/********* CREDIT DISTRIBUTION FUNCTIONS ******************************************/
+
+#define NO_VO_SERVICE 1 /* currently WMI only uses 3 data streams, so we leave VO service inactive */
+#define CONFIG_GIVE_LOW_PRIORITY_STREAMS_MIN_CREDITS 1
+
+#ifdef NO_VO_SERVICE
+#define DATA_SVCS_USED 3
+#else
+#define DATA_SVCS_USED 4
+#endif
+
+static void RedistributeCredits(COMMON_CREDIT_STATE_INFO *pCredInfo,
+ HTC_ENDPOINT_CREDIT_DIST *pEPDistList);
+
+static void SeekCredits(COMMON_CREDIT_STATE_INFO *pCredInfo,
+ HTC_ENDPOINT_CREDIT_DIST *pEPDistList);
+
+/* reduce an ep's credits back to a set limit */
+static INLINE void ReduceCredits(COMMON_CREDIT_STATE_INFO *pCredInfo,
+ HTC_ENDPOINT_CREDIT_DIST *pEpDist,
+ int Limit)
+{
+ int credits;
+
+ /* set the new limit */
+ pEpDist->TxCreditsAssigned = Limit;
+
+ if (pEpDist->TxCredits <= Limit) {
+ return;
+ }
+
+ /* figure out how much to take away */
+ credits = pEpDist->TxCredits - Limit;
+ /* take them away */
+ pEpDist->TxCredits -= credits;
+ pCredInfo->CurrentFreeCredits += credits;
+}
+
+/* give an endpoint some credits from the free credit pool */
+#define GiveCredits(pCredInfo,pEpDist,credits) \
+{ \
+ (pEpDist)->TxCredits += (credits); \
+ (pEpDist)->TxCreditsAssigned += (credits); \
+ (pCredInfo)->CurrentFreeCredits -= (credits); \
+}
+
+
+/* default credit init callback.
+ * This function is called in the context of HTCStart() to setup initial (application-specific)
+ * credit distributions */
+static void ar6000_credit_init(void *Context,
+ HTC_ENDPOINT_CREDIT_DIST *pEPList,
+ int TotalCredits)
+{
+ HTC_ENDPOINT_CREDIT_DIST *pCurEpDist;
+ int count;
+ COMMON_CREDIT_STATE_INFO *pCredInfo = (COMMON_CREDIT_STATE_INFO *)Context;
+
+ pCredInfo->CurrentFreeCredits = TotalCredits;
+ pCredInfo->TotalAvailableCredits = TotalCredits;
+
+ pCurEpDist = pEPList;
+
+ /* run through the list and initialize */
+ while (pCurEpDist != NULL) {
+
+ /* set minimums for each endpoint */
+ pCurEpDist->TxCreditsMin = pCurEpDist->TxCreditsPerMaxMsg;
+
+#ifdef CONFIG_GIVE_LOW_PRIORITY_STREAMS_MIN_CREDITS
+
+ if (TotalCredits > 4)
+ {
+ if ((pCurEpDist->ServiceID == WMI_DATA_BK_SVC) || (pCurEpDist->ServiceID == WMI_DATA_BE_SVC)){
+ /* assign at least min credits to lower than VO priority services */
+ GiveCredits(pCredInfo,pCurEpDist,pCurEpDist->TxCreditsMin);
+ /* force active */
+ SET_EP_ACTIVE(pCurEpDist);
+ }
+ }
+
+#endif
+
+ if (pCurEpDist->ServiceID == WMI_CONTROL_SVC) {
+ /* give control service some credits */
+ GiveCredits(pCredInfo,pCurEpDist,pCurEpDist->TxCreditsMin);
+ /* control service is always marked active, it never goes inactive EVER */
+ SET_EP_ACTIVE(pCurEpDist);
+ } else if (pCurEpDist->ServiceID == WMI_DATA_BK_SVC) {
+ /* this is the lowest priority data endpoint, save this off for easy access */
+ pCredInfo->pLowestPriEpDist = pCurEpDist;
+ }
+
+ /* Streams have to be created (explicit | implicit)for all kinds
+ * of traffic. BE endpoints are also inactive in the beginning.
+ * When BE traffic starts it creates implicit streams that
+ * redistributes credits.
+ */
+
+ /* note, all other endpoints have minimums set but are initially given NO credits.
+ * Credits will be distributed as traffic activity demands */
+ pCurEpDist = pCurEpDist->pNext;
+ }
+
+ if (pCredInfo->CurrentFreeCredits <= 0) {
+ AR_DEBUG_PRINTF(ATH_LOG_INF, ("Not enough credits (%d) to do credit distributions \n", TotalCredits));
+ A_ASSERT(FALSE);
+ return;
+ }
+
+ /* reset list */
+ pCurEpDist = pEPList;
+ /* now run through the list and set max operating credit limits for everyone */
+ while (pCurEpDist != NULL) {
+ if (pCurEpDist->ServiceID == WMI_CONTROL_SVC) {
+ /* control service max is just 1 max message */
+ pCurEpDist->TxCreditsNorm = pCurEpDist->TxCreditsPerMaxMsg;
+ } else {
+ /* for the remaining data endpoints, we assume that each TxCreditsPerMaxMsg are
+ * the same.
+ * We use a simple calculation here, we take the remaining credits and
+ * determine how many max messages this can cover and then set each endpoint's
+ * normal value equal to 3/4 this amount.
+ * */
+ count = (pCredInfo->CurrentFreeCredits/pCurEpDist->TxCreditsPerMaxMsg) * pCurEpDist->TxCreditsPerMaxMsg;
+ count = (count * 3) >> 2;
+ count = max(count,pCurEpDist->TxCreditsPerMaxMsg);
+ /* set normal */
+ pCurEpDist->TxCreditsNorm = count;
+
+ }
+ pCurEpDist = pCurEpDist->pNext;
+ }
+
+}
+
+
+/* default credit distribution callback
+ * This callback is invoked whenever endpoints require credit distributions.
+ * A lock is held while this function is invoked, this function shall NOT block.
+ * The pEPDistList is a list of distribution structures in prioritized order as
+ * defined by the call to the HTCSetCreditDistribution() api.
+ *
+ */
+static void ar6000_credit_distribute(void *Context,
+ HTC_ENDPOINT_CREDIT_DIST *pEPDistList,
+ HTC_CREDIT_DIST_REASON Reason)
+{
+ HTC_ENDPOINT_CREDIT_DIST *pCurEpDist;
+ COMMON_CREDIT_STATE_INFO *pCredInfo = (COMMON_CREDIT_STATE_INFO *)Context;
+
+ switch (Reason) {
+ case HTC_CREDIT_DIST_SEND_COMPLETE :
+ pCurEpDist = pEPDistList;
+ /* we are given the start of the endpoint distribution list.
+ * There may be one or more endpoints to service.
+ * Run through the list and distribute credits */
+ while (pCurEpDist != NULL) {
+
+ if (pCurEpDist->TxCreditsToDist > 0) {
+ /* return the credits back to the endpoint */
+ pCurEpDist->TxCredits += pCurEpDist->TxCreditsToDist;
+ /* always zero out when we are done */
+ pCurEpDist->TxCreditsToDist = 0;
+
+ if (pCurEpDist->TxCredits > pCurEpDist->TxCreditsAssigned) {
+ /* reduce to the assigned limit, previous credit reductions
+ * could have caused the limit to change */
+ ReduceCredits(pCredInfo, pCurEpDist, pCurEpDist->TxCreditsAssigned);
+ }
+
+ if (pCurEpDist->TxCredits > pCurEpDist->TxCreditsNorm) {
+ /* oversubscribed endpoints need to reduce back to normal */
+ ReduceCredits(pCredInfo, pCurEpDist, pCurEpDist->TxCreditsNorm);
+ }
+
+ if (!IS_EP_ACTIVE(pCurEpDist)) {
+ /* endpoint is inactive, now check for messages waiting for credits */
+ if (pCurEpDist->TxQueueDepth == 0) {
+ /* EP is inactive and there are no pending messages,
+ * reduce credits back to zero to recover credits */
+ ReduceCredits(pCredInfo, pCurEpDist, 0);
+ }
+ }
+ }
+
+ pCurEpDist = pCurEpDist->pNext;
+ }
+
+ break;
+
+ case HTC_CREDIT_DIST_ACTIVITY_CHANGE :
+ RedistributeCredits(pCredInfo,pEPDistList);
+ break;
+ case HTC_CREDIT_DIST_SEEK_CREDITS :
+ SeekCredits(pCredInfo,pEPDistList);
+ break;
+ case HTC_DUMP_CREDIT_STATE :
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("Credit Distribution, total : %d, free : %d\n",
+ pCredInfo->TotalAvailableCredits, pCredInfo->CurrentFreeCredits));
+ break;
+ default:
+ break;
+
+ }
+
+ /* sanity checks done after each distribution action */
+ A_ASSERT(pCredInfo->CurrentFreeCredits <= pCredInfo->TotalAvailableCredits);
+ A_ASSERT(pCredInfo->CurrentFreeCredits >= 0);
+
+}
+
+/* redistribute credits based on activity change */
+static void RedistributeCredits(COMMON_CREDIT_STATE_INFO *pCredInfo,
+ HTC_ENDPOINT_CREDIT_DIST *pEPDistList)
+{
+ HTC_ENDPOINT_CREDIT_DIST *pCurEpDist = pEPDistList;
+
+ /* walk through the list and remove credits from inactive endpoints */
+ while (pCurEpDist != NULL) {
+
+#ifdef CONFIG_GIVE_LOW_PRIORITY_STREAMS_MIN_CREDITS
+
+ if ((pCurEpDist->ServiceID == WMI_DATA_BK_SVC) || (pCurEpDist->ServiceID == WMI_DATA_BE_SVC)) {
+ /* force low priority streams to always be active to retain their minimum credit distribution */
+ SET_EP_ACTIVE(pCurEpDist);
+ }
+#endif
+
+ if (pCurEpDist->ServiceID != WMI_CONTROL_SVC) {
+ if (!IS_EP_ACTIVE(pCurEpDist)) {
+ if (pCurEpDist->TxQueueDepth == 0) {
+ /* EP is inactive and there are no pending messages, reduce credits back to zero */
+ ReduceCredits(pCredInfo, pCurEpDist, 0);
+ } else {
+ /* we cannot zero the credits assigned to this EP, but to keep
+ * the credits available for these leftover packets, reduce to
+ * a minimum */
+ ReduceCredits(pCredInfo, pCurEpDist, pCurEpDist->TxCreditsMin);
+ }
+ }
+ }
+
+ /* NOTE in the active case, we do not need to do anything further,
+ * when an EP goes active and needs credits, HTC will call into
+ * our distribution function using a reason code of HTC_CREDIT_DIST_SEEK_CREDITS */
+
+ pCurEpDist = pCurEpDist->pNext;
+ }
+
+}
+
+/* HTC has an endpoint that needs credits, pEPDist is the endpoint in question */
+static void SeekCredits(COMMON_CREDIT_STATE_INFO *pCredInfo,
+ HTC_ENDPOINT_CREDIT_DIST *pEPDist)
+{
+ HTC_ENDPOINT_CREDIT_DIST *pCurEpDist;
+ int credits = 0;
+ int need;
+
+ do {
+
+ if (pEPDist->ServiceID == WMI_CONTROL_SVC) {
+ /* we never oversubscribe on the control service, this is not
+ * a high performance path and the target never holds onto control
+ * credits for too long */
+ break;
+ }
+
+#ifdef CONFIG_GIVE_LOW_PRIORITY_STREAMS_MIN_CREDITS
+ if (pEPDist->ServiceID == WMI_DATA_VI_SVC) {
+ if ((pEPDist->TxCreditsAssigned >= pEPDist->TxCreditsNorm)) {
+ /* limit VI service from oversubscribing */
+ break;
+ }
+ }
+
+ if (pEPDist->ServiceID == WMI_DATA_VO_SVC) {
+ if ((pEPDist->TxCreditsAssigned >= pEPDist->TxCreditsNorm)) {
+ /* limit VO service from oversubscribing */
+ break;
+ }
+ }
+#else
+ if (pEPDist->ServiceID == WMI_DATA_VI_SVC) {
+ if ((pEPDist->TxCreditsAssigned >= pEPDist->TxCreditsNorm) ||
+ (pCredInfo->CurrentFreeCredits <= pEPDist->TxCreditsPerMaxMsg)) {
+ /* limit VI service from oversubscribing */
+ /* at least one free credit will not be used by VI */
+ break;
+ }
+ }
+
+ if (pEPDist->ServiceID == WMI_DATA_VO_SVC) {
+ if ((pEPDist->TxCreditsAssigned >= pEPDist->TxCreditsNorm) ||
+ (pCredInfo->CurrentFreeCredits <= pEPDist->TxCreditsPerMaxMsg)) {
+ /* limit VO service from oversubscribing */
+ /* at least one free credit will not be used by VO */
+ break;
+ }
+ }
+#endif
+
+ /* for all other services, we follow a simple algorithm of
+ * 1. checking the free pool for credits
+ * 2. checking lower priority endpoints for credits to take */
+
+ /* give what we can */
+ credits = min(pCredInfo->CurrentFreeCredits,pEPDist->TxCreditsSeek);
+
+ if (credits >= pEPDist->TxCreditsSeek) {
+ /* we found some to fullfill the seek request */
+ break;
+ }
+
+ /* we don't have enough in the free pool, try taking away from lower priority services
+ *
+ * The rule for taking away credits:
+ * 1. Only take from lower priority endpoints
+ * 2. Only take what is allocated above the minimum (never starve an endpoint completely)
+ * 3. Only take what you need.
+ *
+ * */
+
+ /* starting at the lowest priority */
+ pCurEpDist = pCredInfo->pLowestPriEpDist;
+
+ /* work backwards until we hit the endpoint again */
+ while (pCurEpDist != pEPDist) {
+ /* calculate how many we need so far */
+ need = pEPDist->TxCreditsSeek - pCredInfo->CurrentFreeCredits;
+
+ if ((pCurEpDist->TxCreditsAssigned - need) >= pCurEpDist->TxCreditsMin) {
+ /* the current one has been allocated more than it's minimum and it
+ * has enough credits assigned above it's minimum to fullfill our need
+ * try to take away just enough to fullfill our need */
+ ReduceCredits(pCredInfo,
+ pCurEpDist,
+ pCurEpDist->TxCreditsAssigned - need);
+
+ if (pCredInfo->CurrentFreeCredits >= pEPDist->TxCreditsSeek) {
+ /* we have enough */
+ break;
+ }
+ }
+
+ pCurEpDist = pCurEpDist->pPrev;
+ }
+
+ /* return what we can get */
+ credits = min(pCredInfo->CurrentFreeCredits,pEPDist->TxCreditsSeek);
+
+ } while (FALSE);
+
+ /* did we find some credits? */
+ if (credits) {
+ /* give what we can */
+ GiveCredits(pCredInfo, pEPDist, credits);
+ }
+
+}
+
+/* initialize and setup credit distribution */
+A_STATUS ar6000_setup_credit_dist(HTC_HANDLE HTCHandle, COMMON_CREDIT_STATE_INFO *pCredInfo)
+{
+ HTC_SERVICE_ID servicepriority[5];
+
+ A_MEMZERO(pCredInfo,sizeof(COMMON_CREDIT_STATE_INFO));
+
+ servicepriority[0] = WMI_CONTROL_SVC; /* highest */
+ servicepriority[1] = WMI_DATA_VO_SVC;
+ servicepriority[2] = WMI_DATA_VI_SVC;
+ servicepriority[3] = WMI_DATA_BE_SVC;
+ servicepriority[4] = WMI_DATA_BK_SVC; /* lowest */
+
+ /* set callbacks and priority list */
+ HTCSetCreditDistribution(HTCHandle,
+ pCredInfo,
+ ar6000_credit_distribute,
+ ar6000_credit_init,
+ servicepriority,
+ 5);
+
+ return A_OK;
+}
+
diff --git a/drivers/staging/ath6kl/miscdrv/miscdrv.h b/drivers/staging/ath6kl/miscdrv/miscdrv.h
new file mode 100644
index 000000000000..ae24b728c4ad
--- /dev/null
+++ b/drivers/staging/ath6kl/miscdrv/miscdrv.h
@@ -0,0 +1,42 @@
+//------------------------------------------------------------------------------
+// <copyright file="miscdrv.h" company="Atheros">
+// Copyright (c) 2004-2010 Atheros Corporation. All rights reserved.
+//
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+//
+//
+//------------------------------------------------------------------------------
+//==============================================================================
+// Author(s): ="Atheros"
+//==============================================================================
+#ifndef _MISCDRV_H
+#define _MISCDRV_H
+
+
+#define HOST_INTEREST_ITEM_ADDRESS(target, item) \
+ AR6002_HOST_INTEREST_ITEM_ADDRESS(item)
+
+A_UINT32 ar6kRev2Array[][128] = {
+ {0xFFFF, 0xFFFF}, // No Patches
+ };
+
+#define CFG_REV2_ITEMS 0 // no patches so far
+#define AR6K_RESET_ADDR 0x4000
+#define AR6K_RESET_VAL 0x100
+
+#define EEPROM_SZ 768
+#define EEPROM_WAIT_LIMIT 4
+
+#endif
+
diff --git a/drivers/staging/ath6kl/os/linux/ar6000_android.c b/drivers/staging/ath6kl/os/linux/ar6000_android.c
new file mode 100644
index 000000000000..a588825b9dab
--- /dev/null
+++ b/drivers/staging/ath6kl/os/linux/ar6000_android.c
@@ -0,0 +1,413 @@
+//------------------------------------------------------------------------------
+// Copyright (c) 2004-2010 Atheros Communications Inc.
+// All rights reserved.
+//
+//
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+//
+//
+//
+// Author(s): ="Atheros"
+//------------------------------------------------------------------------------
+#include "ar6000_drv.h"
+#include "htc.h"
+#include <linux/vmalloc.h>
+#include <linux/fs.h>
+
+#ifdef CONFIG_HAS_WAKELOCK
+#include <linux/wakelock.h>
+#endif
+#ifdef CONFIG_HAS_EARLYSUSPEND
+#include <linux/earlysuspend.h>
+#endif
+
+A_BOOL enable_mmc_host_detect_change = 0;
+static void ar6000_enable_mmchost_detect_change(int enable);
+
+
+char fwpath[256] = "/system/wifi";
+int wowledon;
+unsigned int enablelogcat;
+
+extern int bmienable;
+extern struct net_device *ar6000_devices[];
+extern char ifname[];
+
+#ifdef CONFIG_HAS_WAKELOCK
+extern struct wake_lock ar6k_wow_wake_lock;
+struct wake_lock ar6k_init_wake_lock;
+#endif
+
+const char def_ifname[] = "wlan0";
+module_param_string(fwpath, fwpath, sizeof(fwpath), 0644);
+module_param(enablelogcat, uint, 0644);
+module_param(wowledon, int, 0644);
+
+#ifdef CONFIG_HAS_EARLYSUSPEND
+static int screen_is_off;
+static struct early_suspend ar6k_early_suspend;
+#endif
+
+static A_STATUS (*ar6000_avail_ev_p)(void *, void *);
+
+#if defined(CONFIG_ANDROID_LOGGER) && (!defined(CONFIG_MMC_MSM))
+int logger_write(const enum logidx index,
+ const unsigned char prio,
+ const char __kernel * const tag,
+ const char __kernel * const fmt,
+ ...)
+{
+ int ret = 0;
+ va_list vargs;
+ struct file *filp = (struct file *)-ENOENT;
+ mm_segment_t oldfs;
+ struct iovec vec[3];
+ int tag_bytes = strlen(tag) + 1, msg_bytes;
+ char *msg;
+ va_start(vargs, fmt);
+ msg = kvasprintf(GFP_ATOMIC, fmt, vargs);
+ va_end(vargs);
+ if (!msg)
+ return -ENOMEM;
+ if (in_interrupt()) {
+ /* we have no choice since aio_write may be blocked */
+ printk(KERN_ALERT "%s", msg);
+ goto out_free_message;
+ }
+ msg_bytes = strlen(msg) + 1;
+ if (msg_bytes <= 1) /* empty message? */
+ goto out_free_message; /* don't bother, then */
+ if ((msg_bytes + tag_bytes + 1) > 2048) {
+ ret = -E2BIG;
+ goto out_free_message;
+ }
+
+ vec[0].iov_base = (unsigned char *) &prio;
+ vec[0].iov_len = 1;
+ vec[1].iov_base = (void *) tag;
+ vec[1].iov_len = strlen(tag) + 1;
+ vec[2].iov_base = (void *) msg;
+ vec[2].iov_len = strlen(msg) + 1;
+
+ oldfs = get_fs();
+ set_fs(KERNEL_DS);
+ do {
+ filp = filp_open("/dev/log/main", O_WRONLY, S_IRUSR);
+ if (IS_ERR(filp) || !filp->f_op) {
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("%s: filp_open /dev/log/main error\n", __FUNCTION__));
+ ret = -ENOENT;
+ break;
+ }
+
+ if (filp->f_op->aio_write) {
+ int nr_segs = sizeof(vec) / sizeof(vec[0]);
+ int len = vec[0].iov_len + vec[1].iov_len + vec[2].iov_len;
+ struct kiocb kiocb;
+ init_sync_kiocb(&kiocb, filp);
+ kiocb.ki_pos = 0;
+ kiocb.ki_left = len;
+ kiocb.ki_nbytes = len;
+ ret = filp->f_op->aio_write(&kiocb, vec, nr_segs, kiocb.ki_pos);
+ }
+
+ } while (0);
+
+ if (!IS_ERR(filp)) {
+ filp_close(filp, NULL);
+ }
+ set_fs(oldfs);
+out_free_message:
+ if (msg) {
+ kfree(msg);
+ }
+ return ret;
+}
+#endif
+
+int android_logger_lv(void *module, int mask)
+{
+ switch (mask) {
+ case ATH_DEBUG_ERR:
+ return 6;
+ case ATH_DEBUG_INFO:
+ return 4;
+ case ATH_DEBUG_WARN:
+ return 5;
+ case ATH_DEBUG_TRC:
+ return 3;
+ default:
+#ifdef DEBUG
+ if (!module) {
+ return 3;
+ } else if (module == &GET_ATH_MODULE_DEBUG_VAR_NAME(driver)) {
+ return (mask <=ATH_DEBUG_MAKE_MODULE_MASK(3)) ? 3 : 2;
+ } else if (module == &GET_ATH_MODULE_DEBUG_VAR_NAME(htc)) {
+ return 2;
+ } else {
+ return 3;
+ }
+#else
+ return 3; /* DEBUG */
+#endif
+ }
+}
+
+static int android_readwrite_file(const A_CHAR *filename, A_CHAR *rbuf, const A_CHAR *wbuf, size_t length)
+{
+ int ret = 0;
+ struct file *filp = (struct file *)-ENOENT;
+ mm_segment_t oldfs;
+ oldfs = get_fs();
+ set_fs(KERNEL_DS);
+ do {
+ int mode = (wbuf) ? O_RDWR : O_RDONLY;
+ filp = filp_open(filename, mode, S_IRUSR);
+ if (IS_ERR(filp) || !filp->f_op) {
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("%s: file %s filp_open error\n", __FUNCTION__, filename));
+ ret = -ENOENT;
+ break;
+ }
+
+ if (length==0) {
+ /* Read the length of the file only */
+ struct inode *inode;
+
+ inode = GET_INODE_FROM_FILEP(filp);
+ if (!inode) {
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("%s: Get inode from %s failed\n", __FUNCTION__, filename));
+ ret = -ENOENT;
+ break;
+ }
+ ret = i_size_read(inode->i_mapping->host);
+ break;
+ }
+
+ if (wbuf) {
+ if ( (ret=filp->f_op->write(filp, wbuf, length, &filp->f_pos)) < 0) {
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("%s: Write %u bytes to file %s error %d\n", __FUNCTION__,
+ length, filename, ret));
+ break;
+ }
+ } else {
+ if ( (ret=filp->f_op->read(filp, rbuf, length, &filp->f_pos)) < 0) {
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("%s: Read %u bytes from file %s error %d\n", __FUNCTION__,
+ length, filename, ret));
+ break;
+ }
+ }
+ } while (0);
+
+ if (!IS_ERR(filp)) {
+ filp_close(filp, NULL);
+ }
+ set_fs(oldfs);
+
+ return ret;
+}
+
+int android_request_firmware(const struct firmware **firmware_p, const char *name,
+ struct device *device)
+{
+ int ret = 0;
+ struct firmware *firmware;
+ char filename[256];
+ const char *raw_filename = name;
+ *firmware_p = firmware = kzalloc(sizeof(*firmware), GFP_KERNEL);
+ if (!firmware)
+ return -ENOMEM;
+ sprintf(filename, "%s/%s", fwpath, raw_filename);
+ do {
+ size_t length, bufsize, bmisize;
+
+ if ( (ret=android_readwrite_file(filename, NULL, NULL, 0)) < 0) {
+ break;
+ } else {
+ length = ret;
+ }
+
+ bufsize = ALIGN(length, PAGE_SIZE);
+ bmisize = A_ROUND_UP(length, 4);
+ bufsize = max(bmisize, bufsize);
+ firmware->data = vmalloc(bufsize);
+ firmware->size = length;
+ if (!firmware->data) {
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("%s: Cannot allocate buffer for firmware\n", __FUNCTION__));
+ ret = -ENOMEM;
+ break;
+ }
+
+ if ( (ret=android_readwrite_file(filename, (char*)firmware->data, NULL, length)) != length) {
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("%s: file read error, ret %d request %d\n", __FUNCTION__, ret, length));
+ ret = -1;
+ break;
+ }
+
+ } while (0);
+
+ if (ret<0) {
+ if (firmware) {
+ if (firmware->data)
+ vfree(firmware->data);
+ kfree(firmware);
+ }
+ *firmware_p = NULL;
+ } else {
+ ret = 0;
+ }
+ return ret;
+}
+
+void android_release_firmware(const struct firmware *firmware)
+{
+ if (firmware) {
+ if (firmware->data)
+ vfree(firmware->data);
+ kfree(firmware);
+ }
+}
+
+static A_STATUS ar6000_android_avail_ev(void *context, void *hif_handle)
+{
+ A_STATUS ret;
+#ifdef CONFIG_HAS_WAKELOCK
+ wake_lock(&ar6k_init_wake_lock);
+#endif
+ ar6000_enable_mmchost_detect_change(0);
+ ret = ar6000_avail_ev_p(context, hif_handle);
+#ifdef CONFIG_HAS_WAKELOCK
+ wake_unlock(&ar6k_init_wake_lock);
+#endif
+ return ret;
+}
+
+/* Useful for qualcom platform to detect our wlan card for mmc stack */
+static void ar6000_enable_mmchost_detect_change(int enable)
+{
+#ifdef CONFIG_MMC_MSM
+#define MMC_MSM_DEV "msm_sdcc.1"
+ char buf[3];
+ int length;
+
+ if (!enable_mmc_host_detect_change) {
+ return;
+ }
+ length = snprintf(buf, sizeof(buf), "%d\n", enable ? 1 : 0);
+ if (android_readwrite_file("/sys/devices/platform/" MMC_MSM_DEV "/detect_change",
+ NULL, buf, length) < 0) {
+ /* fall back to polling */
+ android_readwrite_file("/sys/devices/platform/" MMC_MSM_DEV "/polling", NULL, buf, length);
+ }
+#endif
+}
+
+#ifdef CONFIG_HAS_EARLYSUSPEND
+static void android_early_suspend(struct early_suspend *h)
+{
+ screen_is_off = 1;
+}
+
+static void android_late_resume(struct early_suspend *h)
+{
+ screen_is_off = 0;
+}
+#endif
+
+void android_module_init(OSDRV_CALLBACKS *osdrvCallbacks)
+{
+ bmienable = 1;
+ if (ifname[0] == '\0')
+ strcpy(ifname, def_ifname);
+#ifdef CONFIG_HAS_WAKELOCK
+ wake_lock_init(&ar6k_init_wake_lock, WAKE_LOCK_SUSPEND, "ar6k_init");
+#endif
+#ifdef CONFIG_HAS_EARLYSUSPEND
+ ar6k_early_suspend.suspend = android_early_suspend;
+ ar6k_early_suspend.resume = android_late_resume;
+ ar6k_early_suspend.level = EARLY_SUSPEND_LEVEL_BLANK_SCREEN;
+ register_early_suspend(&ar6k_early_suspend);
+#endif
+
+ ar6000_avail_ev_p = osdrvCallbacks->deviceInsertedHandler;
+ osdrvCallbacks->deviceInsertedHandler = ar6000_android_avail_ev;
+
+ ar6000_enable_mmchost_detect_change(1);
+}
+
+void android_module_exit(void)
+{
+#ifdef CONFIG_HAS_EARLYSUSPEND
+ unregister_early_suspend(&ar6k_early_suspend);
+#endif
+#ifdef CONFIG_HAS_WAKELOCK
+ wake_lock_destroy(&ar6k_init_wake_lock);
+#endif
+ ar6000_enable_mmchost_detect_change(1);
+}
+
+#ifdef CONFIG_PM
+void android_ar6k_check_wow_status(AR_SOFTC_T *ar, struct sk_buff *skb, A_BOOL isEvent)
+{
+ if (
+#ifdef CONFIG_HAS_EARLYSUSPEND
+ screen_is_off &&
+#endif
+ skb && ar->arConnected) {
+ A_BOOL needWake = FALSE;
+ if (isEvent) {
+ if (A_NETBUF_LEN(skb) >= sizeof(A_UINT16)) {
+ A_UINT16 cmd = *(const A_UINT16 *)A_NETBUF_DATA(skb);
+ switch (cmd) {
+ case WMI_CONNECT_EVENTID:
+ case WMI_DISCONNECT_EVENTID:
+ needWake = TRUE;
+ break;
+ default:
+ /* dont wake lock the system for other event */
+ break;
+ }
+ }
+ } else if (A_NETBUF_LEN(skb) >= sizeof(ATH_MAC_HDR)) {
+ ATH_MAC_HDR *datap = (ATH_MAC_HDR *)A_NETBUF_DATA(skb);
+ if (!IEEE80211_IS_MULTICAST(datap->dstMac)) {
+ switch (A_BE2CPU16(datap->typeOrLen)) {
+ case 0x0800: /* IP */
+ case 0x888e: /* EAPOL */
+ case 0x88c7: /* RSN_PREAUTH */
+ case 0x88b4: /* WAPI */
+ needWake = TRUE;
+ break;
+ case 0x0806: /* ARP is not important to hold wake lock */
+ default:
+ break;
+ }
+ }
+ }
+ if (needWake) {
+ /* keep host wake up if there is any event and packate comming in*/
+#ifdef CONFIG_HAS_WAKELOCK
+ wake_lock_timeout(&ar6k_wow_wake_lock, 3*HZ);
+#endif
+ if (wowledon) {
+ char buf[32];
+ int len = sprintf(buf, "on");
+ android_readwrite_file("/sys/power/state", NULL, buf, len);
+
+ len = sprintf(buf, "%d", 127);
+ android_readwrite_file("/sys/class/leds/lcd-backlight/brightness",
+ NULL, buf,len);
+ }
+ }
+ }
+}
+#endif /* CONFIG_PM */
diff --git a/drivers/staging/ath6kl/os/linux/ar6000_drv.c b/drivers/staging/ath6kl/os/linux/ar6000_drv.c
new file mode 100644
index 000000000000..a659f7047373
--- /dev/null
+++ b/drivers/staging/ath6kl/os/linux/ar6000_drv.c
@@ -0,0 +1,6444 @@
+//------------------------------------------------------------------------------
+// Copyright (c) 2004-2010 Atheros Communications Inc.
+// All rights reserved.
+//
+//
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+//
+//
+//
+// Author(s): ="Atheros"
+//------------------------------------------------------------------------------
+
+/*
+ * This driver is a pseudo ethernet driver to access the Atheros AR6000
+ * WLAN Device
+ */
+
+#include "ar6000_drv.h"
+#ifdef ATH6K_CONFIG_CFG80211
+#include "cfg80211.h"
+#endif /* ATH6K_CONFIG_CFG80211 */
+#include "htc.h"
+#include "wmi_filter_linux.h"
+#include "epping_test.h"
+#include "wlan_config.h"
+#include "ar3kconfig.h"
+#include "ar6k_pal.h"
+#include "AR6002/addrs.h"
+
+
+/* LINUX_HACK_FUDGE_FACTOR -- this is used to provide a workaround for linux behavior. When
+ * the meta data was added to the header it was found that linux did not correctly provide
+ * enough headroom. However when more headroom was requested beyond what was truly needed
+ * Linux gave the requested headroom. Therefore to get the necessary headroom from Linux
+ * the driver requests more than is needed by the amount = LINUX_HACK_FUDGE_FACTOR */
+#define LINUX_HACK_FUDGE_FACTOR 16
+#define BDATA_BDADDR_OFFSET 28
+
+A_UINT8 bcast_mac[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
+A_UINT8 null_mac[] = {0x0, 0x0, 0x0, 0x0, 0x0, 0x0};
+
+#ifdef DEBUG
+
+#define ATH_DEBUG_DBG_LOG ATH_DEBUG_MAKE_MODULE_MASK(0)
+#define ATH_DEBUG_WLAN_CONNECT ATH_DEBUG_MAKE_MODULE_MASK(1)
+#define ATH_DEBUG_WLAN_SCAN ATH_DEBUG_MAKE_MODULE_MASK(2)
+#define ATH_DEBUG_WLAN_TX ATH_DEBUG_MAKE_MODULE_MASK(3)
+#define ATH_DEBUG_WLAN_RX ATH_DEBUG_MAKE_MODULE_MASK(4)
+#define ATH_DEBUG_HTC_RAW ATH_DEBUG_MAKE_MODULE_MASK(5)
+#define ATH_DEBUG_HCI_BRIDGE ATH_DEBUG_MAKE_MODULE_MASK(6)
+
+static ATH_DEBUG_MASK_DESCRIPTION driver_debug_desc[] = {
+ { ATH_DEBUG_DBG_LOG , "Target Debug Logs"},
+ { ATH_DEBUG_WLAN_CONNECT , "WLAN connect"},
+ { ATH_DEBUG_WLAN_SCAN , "WLAN scan"},
+ { ATH_DEBUG_WLAN_TX , "WLAN Tx"},
+ { ATH_DEBUG_WLAN_RX , "WLAN Rx"},
+ { ATH_DEBUG_HTC_RAW , "HTC Raw IF tracing"},
+ { ATH_DEBUG_HCI_BRIDGE , "HCI Bridge Setup"},
+ { ATH_DEBUG_HCI_RECV , "HCI Recv tracing"},
+ { ATH_DEBUG_HCI_DUMP , "HCI Packet dumps"},
+};
+
+ATH_DEBUG_INSTANTIATE_MODULE_VAR(driver,
+ "driver",
+ "Linux Driver Interface",
+ ATH_DEBUG_MASK_DEFAULTS | ATH_DEBUG_WLAN_SCAN |
+ ATH_DEBUG_HCI_BRIDGE,
+ ATH_DEBUG_DESCRIPTION_COUNT(driver_debug_desc),
+ driver_debug_desc);
+
+#endif
+
+
+#define IS_MAC_NULL(mac) (mac[0]==0 && mac[1]==0 && mac[2]==0 && mac[3]==0 && mac[4]==0 && mac[5]==0)
+#define IS_MAC_BCAST(mac) (*mac==0xff)
+
+#define DESCRIPTION "Driver to access the Atheros AR600x Device, version " __stringify(__VER_MAJOR_) "." __stringify(__VER_MINOR_) "." __stringify(__VER_PATCH_) "." __stringify(__BUILD_NUMBER_)
+
+MODULE_AUTHOR("Atheros Communications, Inc.");
+MODULE_DESCRIPTION(DESCRIPTION);
+MODULE_LICENSE("Dual BSD/GPL");
+
+#ifndef REORG_APTC_HEURISTICS
+#undef ADAPTIVE_POWER_THROUGHPUT_CONTROL
+#endif /* REORG_APTC_HEURISTICS */
+
+#ifdef ADAPTIVE_POWER_THROUGHPUT_CONTROL
+#define APTC_TRAFFIC_SAMPLING_INTERVAL 100 /* msec */
+#define APTC_UPPER_THROUGHPUT_THRESHOLD 3000 /* Kbps */
+#define APTC_LOWER_THROUGHPUT_THRESHOLD 2000 /* Kbps */
+
+typedef struct aptc_traffic_record {
+ A_BOOL timerScheduled;
+ struct timeval samplingTS;
+ unsigned long bytesReceived;
+ unsigned long bytesTransmitted;
+} APTC_TRAFFIC_RECORD;
+
+A_TIMER aptcTimer;
+APTC_TRAFFIC_RECORD aptcTR;
+#endif /* ADAPTIVE_POWER_THROUGHPUT_CONTROL */
+
+#ifdef EXPORT_HCI_BRIDGE_INTERFACE
+// callbacks registered by HCI transport driver
+HCI_TRANSPORT_CALLBACKS ar6kHciTransCallbacks = { NULL };
+#endif
+
+unsigned int processDot11Hdr = 0;
+int bmienable = BMIENABLE_DEFAULT;
+
+char ifname[IFNAMSIZ] = {0,};
+
+int wlaninitmode = WLAN_INIT_MODE_DEFAULT;
+unsigned int bypasswmi = 0;
+unsigned int debuglevel = 0;
+int tspecCompliance = ATHEROS_COMPLIANCE;
+unsigned int busspeedlow = 0;
+unsigned int onebitmode = 0;
+unsigned int skipflash = 0;
+unsigned int wmitimeout = 2;
+unsigned int wlanNodeCaching = 1;
+unsigned int enableuartprint = ENABLEUARTPRINT_DEFAULT;
+unsigned int logWmiRawMsgs = 0;
+unsigned int enabletimerwar = 0;
+unsigned int fwmode = 1;
+unsigned int mbox_yield_limit = 99;
+unsigned int enablerssicompensation = 0;
+int reduce_credit_dribble = 1 + HTC_CONNECT_FLAGS_THRESHOLD_LEVEL_ONE_HALF;
+int allow_trace_signal = 0;
+#ifdef CONFIG_HOST_TCMD_SUPPORT
+unsigned int testmode =0;
+#endif
+
+unsigned int irqprocmode = HIF_DEVICE_IRQ_SYNC_ONLY;//HIF_DEVICE_IRQ_ASYNC_SYNC;
+unsigned int panic_on_assert = 1;
+unsigned int nohifscattersupport = NOHIFSCATTERSUPPORT_DEFAULT;
+
+unsigned int setuphci = SETUPHCI_DEFAULT;
+unsigned int setuphcipal = SETUPHCIPAL_DEFAULT;
+unsigned int loghci = 0;
+unsigned int setupbtdev = SETUPBTDEV_DEFAULT;
+#ifndef EXPORT_HCI_BRIDGE_INTERFACE
+unsigned int ar3khcibaud = AR3KHCIBAUD_DEFAULT;
+unsigned int hciuartscale = HCIUARTSCALE_DEFAULT;
+unsigned int hciuartstep = HCIUARTSTEP_DEFAULT;
+#endif
+#ifdef CONFIG_CHECKSUM_OFFLOAD
+unsigned int csumOffload=0;
+unsigned int csumOffloadTest=0;
+#endif
+unsigned int eppingtest=0;
+
+module_param_string(ifname, ifname, sizeof(ifname), 0644);
+module_param(wlaninitmode, int, 0644);
+module_param(bmienable, int, 0644);
+module_param(bypasswmi, uint, 0644);
+module_param(debuglevel, uint, 0644);
+module_param(tspecCompliance, int, 0644);
+module_param(onebitmode, uint, 0644);
+module_param(busspeedlow, uint, 0644);
+module_param(skipflash, uint, 0644);
+module_param(wmitimeout, uint, 0644);
+module_param(wlanNodeCaching, uint, 0644);
+module_param(logWmiRawMsgs, uint, 0644);
+module_param(enableuartprint, uint, 0644);
+module_param(enabletimerwar, uint, 0644);
+module_param(fwmode, uint, 0644);
+module_param(mbox_yield_limit, uint, 0644);
+module_param(reduce_credit_dribble, int, 0644);
+module_param(allow_trace_signal, int, 0644);
+module_param(enablerssicompensation, uint, 0644);
+module_param(processDot11Hdr, uint, 0644);
+#ifdef CONFIG_CHECKSUM_OFFLOAD
+module_param(csumOffload, uint, 0644);
+#endif
+#ifdef CONFIG_HOST_TCMD_SUPPORT
+module_param(testmode, uint, 0644);
+#endif
+module_param(irqprocmode, uint, 0644);
+module_param(nohifscattersupport, uint, 0644);
+module_param(panic_on_assert, uint, 0644);
+module_param(setuphci, uint, 0644);
+module_param(setuphcipal, uint, 0644);
+module_param(loghci, uint, 0644);
+module_param(setupbtdev, uint, 0644);
+#ifndef EXPORT_HCI_BRIDGE_INTERFACE
+module_param(ar3khcibaud, uint, 0644);
+module_param(hciuartscale, uint, 0644);
+module_param(hciuartstep, uint, 0644);
+#endif
+module_param(eppingtest, uint, 0644);
+
+/* in 2.6.10 and later this is now a pointer to a uint */
+unsigned int _mboxnum = HTC_MAILBOX_NUM_MAX;
+#define mboxnum &_mboxnum
+
+#ifdef DEBUG
+A_UINT32 g_dbg_flags = DBG_DEFAULTS;
+unsigned int debugflags = 0;
+int debugdriver = 0;
+unsigned int debughtc = 0;
+unsigned int debugbmi = 0;
+unsigned int debughif = 0;
+unsigned int txcreditsavailable[HTC_MAILBOX_NUM_MAX] = {0};
+unsigned int txcreditsconsumed[HTC_MAILBOX_NUM_MAX] = {0};
+unsigned int txcreditintrenable[HTC_MAILBOX_NUM_MAX] = {0};
+unsigned int txcreditintrenableaggregate[HTC_MAILBOX_NUM_MAX] = {0};
+module_param(debugflags, uint, 0644);
+module_param(debugdriver, int, 0644);
+module_param(debughtc, uint, 0644);
+module_param(debugbmi, uint, 0644);
+module_param(debughif, uint, 0644);
+module_param_array(txcreditsavailable, uint, mboxnum, 0644);
+module_param_array(txcreditsconsumed, uint, mboxnum, 0644);
+module_param_array(txcreditintrenable, uint, mboxnum, 0644);
+module_param_array(txcreditintrenableaggregate, uint, mboxnum, 0644);
+
+#endif /* DEBUG */
+
+unsigned int resetok = 1;
+unsigned int tx_attempt[HTC_MAILBOX_NUM_MAX] = {0};
+unsigned int tx_post[HTC_MAILBOX_NUM_MAX] = {0};
+unsigned int tx_complete[HTC_MAILBOX_NUM_MAX] = {0};
+unsigned int hifBusRequestNumMax = 40;
+unsigned int war23838_disabled = 0;
+#ifdef ADAPTIVE_POWER_THROUGHPUT_CONTROL
+unsigned int enableAPTCHeuristics = 1;
+#endif /* ADAPTIVE_POWER_THROUGHPUT_CONTROL */
+module_param_array(tx_attempt, uint, mboxnum, 0644);
+module_param_array(tx_post, uint, mboxnum, 0644);
+module_param_array(tx_complete, uint, mboxnum, 0644);
+module_param(hifBusRequestNumMax, uint, 0644);
+module_param(war23838_disabled, uint, 0644);
+module_param(resetok, uint, 0644);
+#ifdef ADAPTIVE_POWER_THROUGHPUT_CONTROL
+module_param(enableAPTCHeuristics, uint, 0644);
+#endif /* ADAPTIVE_POWER_THROUGHPUT_CONTROL */
+
+#ifdef BLOCK_TX_PATH_FLAG
+int blocktx = 0;
+module_param(blocktx, int, 0644);
+#endif /* BLOCK_TX_PATH_FLAG */
+
+typedef struct user_rssi_compensation_t {
+ A_UINT16 customerID;
+ union {
+ A_UINT16 a_enable;
+ A_UINT16 bg_enable;
+ A_UINT16 enable;
+ };
+ A_INT16 bg_param_a;
+ A_INT16 bg_param_b;
+ A_INT16 a_param_a;
+ A_INT16 a_param_b;
+ A_UINT32 reserved;
+} USER_RSSI_CPENSATION;
+
+static USER_RSSI_CPENSATION rssi_compensation_param;
+
+static A_INT16 rssi_compensation_table[96];
+
+int reconnect_flag = 0;
+static ar6k_pal_config_t ar6k_pal_config_g;
+
+/* Function declarations */
+static int ar6000_init_module(void);
+static void ar6000_cleanup_module(void);
+
+int ar6000_init(struct net_device *dev);
+static int ar6000_open(struct net_device *dev);
+static int ar6000_close(struct net_device *dev);
+static void ar6000_init_control_info(AR_SOFTC_T *ar);
+static int ar6000_data_tx(struct sk_buff *skb, struct net_device *dev);
+
+void ar6000_destroy(struct net_device *dev, unsigned int unregister);
+static void ar6000_detect_error(unsigned long ptr);
+static void ar6000_set_multicast_list(struct net_device *dev);
+static struct net_device_stats *ar6000_get_stats(struct net_device *dev);
+static struct iw_statistics *ar6000_get_iwstats(struct net_device * dev);
+
+static void disconnect_timer_handler(unsigned long ptr);
+
+void read_rssi_compensation_param(AR_SOFTC_T *ar);
+
+ /* for android builds we call external APIs that handle firmware download and configuration */
+#ifdef ANDROID_ENV
+/* !!!! Interim android support to make it easier to patch the default driver for
+ * android use. You must define an external source file ar6000_android.c that handles the following
+ * APIs */
+extern void android_module_init(OSDRV_CALLBACKS *osdrvCallbacks);
+extern void android_module_exit(void);
+#endif
+/*
+ * HTC service connection handlers
+ */
+static A_STATUS ar6000_avail_ev(void *context, void *hif_handle);
+
+static A_STATUS ar6000_unavail_ev(void *context, void *hif_handle);
+
+A_STATUS ar6000_configure_target(AR_SOFTC_T *ar);
+
+static void ar6000_target_failure(void *Instance, A_STATUS Status);
+
+static void ar6000_rx(void *Context, HTC_PACKET *pPacket);
+
+static void ar6000_rx_refill(void *Context,HTC_ENDPOINT_ID Endpoint);
+
+static void ar6000_tx_complete(void *Context, HTC_PACKET_QUEUE *pPackets);
+
+static HTC_SEND_FULL_ACTION ar6000_tx_queue_full(void *Context, HTC_PACKET *pPacket);
+
+#ifdef ATH_AR6K_11N_SUPPORT
+static void ar6000_alloc_netbufs(A_NETBUF_QUEUE_T *q, A_UINT16 num);
+#endif
+static void ar6000_deliver_frames_to_nw_stack(void * dev, void *osbuf);
+//static void ar6000_deliver_frames_to_bt_stack(void * dev, void *osbuf);
+
+static HTC_PACKET *ar6000_alloc_amsdu_rxbuf(void *Context, HTC_ENDPOINT_ID Endpoint, int Length);
+
+static void ar6000_refill_amsdu_rxbufs(AR_SOFTC_T *ar, int Count);
+
+static void ar6000_cleanup_amsdu_rxbufs(AR_SOFTC_T *ar);
+
+static ssize_t
+ar6000_sysfs_bmi_read(struct file *fp, struct kobject *kobj,
+ struct bin_attribute *bin_attr,
+ char *buf, loff_t pos, size_t count);
+
+static ssize_t
+ar6000_sysfs_bmi_write(struct file *fp, struct kobject *kobj,
+ struct bin_attribute *bin_attr,
+ char *buf, loff_t pos, size_t count);
+
+static A_STATUS
+ar6000_sysfs_bmi_init(AR_SOFTC_T *ar);
+
+/* HCI PAL callback function declarations */
+A_STATUS ar6k_setup_hci_pal(AR_SOFTC_T *ar);
+void ar6k_cleanup_hci_pal(AR_SOFTC_T *ar);
+
+static void
+ar6000_sysfs_bmi_deinit(AR_SOFTC_T *ar);
+
+A_STATUS
+ar6000_sysfs_bmi_get_config(AR_SOFTC_T *ar, A_UINT32 mode);
+
+/*
+ * Static variables
+ */
+
+struct net_device *ar6000_devices[MAX_AR6000];
+static int is_netdev_registered;
+extern struct iw_handler_def ath_iw_handler_def;
+DECLARE_WAIT_QUEUE_HEAD(arEvent);
+static void ar6000_cookie_init(AR_SOFTC_T *ar);
+static void ar6000_cookie_cleanup(AR_SOFTC_T *ar);
+static void ar6000_free_cookie(AR_SOFTC_T *ar, struct ar_cookie * cookie);
+static struct ar_cookie *ar6000_alloc_cookie(AR_SOFTC_T *ar);
+
+#ifdef USER_KEYS
+static A_STATUS ar6000_reinstall_keys(AR_SOFTC_T *ar,A_UINT8 key_op_ctrl);
+#endif
+
+#ifdef CONFIG_AP_VIRTUAL_ADAPTER_SUPPORT
+struct net_device *arApNetDev;
+#endif /* CONFIG_AP_VIRTUAL_ADAPTER_SUPPORT */
+
+static struct ar_cookie s_ar_cookie_mem[MAX_COOKIE_NUM];
+
+#define HOST_INTEREST_ITEM_ADDRESS(ar, item) \
+ (((ar)->arTargetType == TARGET_TYPE_AR6002) ? AR6002_HOST_INTEREST_ITEM_ADDRESS(item) : \
+ (((ar)->arTargetType == TARGET_TYPE_AR6003) ? AR6003_HOST_INTEREST_ITEM_ADDRESS(item) : 0))
+
+
+static struct net_device_ops ar6000_netdev_ops = {
+ .ndo_init = NULL,
+ .ndo_open = ar6000_open,
+ .ndo_stop = ar6000_close,
+ .ndo_get_stats = ar6000_get_stats,
+ .ndo_do_ioctl = ar6000_ioctl,
+ .ndo_start_xmit = ar6000_data_tx,
+ .ndo_set_multicast_list = ar6000_set_multicast_list,
+};
+
+/* Debug log support */
+
+/*
+ * Flag to govern whether the debug logs should be parsed in the kernel
+ * or reported to the application.
+ */
+#define REPORT_DEBUG_LOGS_TO_APP
+
+A_STATUS
+ar6000_set_host_app_area(AR_SOFTC_T *ar)
+{
+ A_UINT32 address, data;
+ struct host_app_area_s host_app_area;
+
+ /* Fetch the address of the host_app_area_s instance in the host interest area */
+ address = TARG_VTOP(ar->arTargetType, HOST_INTEREST_ITEM_ADDRESS(ar, hi_app_host_interest));
+ if (ar6000_ReadRegDiag(ar->arHifDevice, &address, &data) != A_OK) {
+ return A_ERROR;
+ }
+ address = TARG_VTOP(ar->arTargetType, data);
+ host_app_area.wmi_protocol_ver = WMI_PROTOCOL_VERSION;
+ if (ar6000_WriteDataDiag(ar->arHifDevice, address,
+ (A_UCHAR *)&host_app_area,
+ sizeof(struct host_app_area_s)) != A_OK)
+ {
+ return A_ERROR;
+ }
+
+ return A_OK;
+}
+
+A_UINT32
+dbglog_get_debug_hdr_ptr(AR_SOFTC_T *ar)
+{
+ A_UINT32 param;
+ A_UINT32 address;
+ A_STATUS status;
+
+ address = TARG_VTOP(ar->arTargetType, HOST_INTEREST_ITEM_ADDRESS(ar, hi_dbglog_hdr));
+ if ((status = ar6000_ReadDataDiag(ar->arHifDevice, address,
+ (A_UCHAR *)&param, 4)) != A_OK)
+ {
+ param = 0;
+ }
+
+ return param;
+}
+
+/*
+ * The dbglog module has been initialized. Its ok to access the relevant
+ * data stuctures over the diagnostic window.
+ */
+void
+ar6000_dbglog_init_done(AR_SOFTC_T *ar)
+{
+ ar->dbglog_init_done = TRUE;
+}
+
+A_UINT32
+dbglog_get_debug_fragment(A_INT8 *datap, A_UINT32 len, A_UINT32 limit)
+{
+ A_INT32 *buffer;
+ A_UINT32 count;
+ A_UINT32 numargs;
+ A_UINT32 length;
+ A_UINT32 fraglen;
+
+ count = fraglen = 0;
+ buffer = (A_INT32 *)datap;
+ length = (limit >> 2);
+
+ if (len <= limit) {
+ fraglen = len;
+ } else {
+ while (count < length) {
+ numargs = DBGLOG_GET_NUMARGS(buffer[count]);
+ fraglen = (count << 2);
+ count += numargs + 1;
+ }
+ }
+
+ return fraglen;
+}
+
+void
+dbglog_parse_debug_logs(A_INT8 *datap, A_UINT32 len)
+{
+ A_INT32 *buffer;
+ A_UINT32 count;
+ A_UINT32 timestamp;
+ A_UINT32 debugid;
+ A_UINT32 moduleid;
+ A_UINT32 numargs;
+ A_UINT32 length;
+
+ count = 0;
+ buffer = (A_INT32 *)datap;
+ length = (len >> 2);
+ while (count < length) {
+ debugid = DBGLOG_GET_DBGID(buffer[count]);
+ moduleid = DBGLOG_GET_MODULEID(buffer[count]);
+ numargs = DBGLOG_GET_NUMARGS(buffer[count]);
+ timestamp = DBGLOG_GET_TIMESTAMP(buffer[count]);
+ switch (numargs) {
+ case 0:
+ AR_DEBUG_PRINTF(ATH_DEBUG_DBG_LOG,("%d %d (%d)\n", moduleid, debugid, timestamp));
+ break;
+
+ case 1:
+ AR_DEBUG_PRINTF(ATH_DEBUG_DBG_LOG,("%d %d (%d): 0x%x\n", moduleid, debugid,
+ timestamp, buffer[count+1]));
+ break;
+
+ case 2:
+ AR_DEBUG_PRINTF(ATH_DEBUG_DBG_LOG,("%d %d (%d): 0x%x, 0x%x\n", moduleid, debugid,
+ timestamp, buffer[count+1], buffer[count+2]));
+ break;
+
+ default:
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("Invalid args: %d\n", numargs));
+ }
+ count += numargs + 1;
+ }
+}
+
+int
+ar6000_dbglog_get_debug_logs(AR_SOFTC_T *ar)
+{
+ A_UINT32 data[8]; /* Should be able to accomodate struct dbglog_buf_s */
+ A_UINT32 address;
+ A_UINT32 length;
+ A_UINT32 dropped;
+ A_UINT32 firstbuf;
+ A_UINT32 debug_hdr_ptr;
+
+ if (!ar->dbglog_init_done) return A_ERROR;
+
+
+ AR6000_SPIN_LOCK(&ar->arLock, 0);
+
+ if (ar->dbgLogFetchInProgress) {
+ AR6000_SPIN_UNLOCK(&ar->arLock, 0);
+ return A_EBUSY;
+ }
+
+ /* block out others */
+ ar->dbgLogFetchInProgress = TRUE;
+
+ AR6000_SPIN_UNLOCK(&ar->arLock, 0);
+
+ debug_hdr_ptr = dbglog_get_debug_hdr_ptr(ar);
+ printk("debug_hdr_ptr: 0x%x\n", debug_hdr_ptr);
+
+ /* Get the contents of the ring buffer */
+ if (debug_hdr_ptr) {
+ address = TARG_VTOP(ar->arTargetType, debug_hdr_ptr);
+ length = 4 /* sizeof(dbuf) */ + 4 /* sizeof(dropped) */;
+ A_MEMZERO(data, sizeof(data));
+ ar6000_ReadDataDiag(ar->arHifDevice, address, (A_UCHAR *)data, length);
+ address = TARG_VTOP(ar->arTargetType, data[0] /* dbuf */);
+ firstbuf = address;
+ dropped = data[1]; /* dropped */
+ length = 4 /* sizeof(next) */ + 4 /* sizeof(buffer) */ + 4 /* sizeof(bufsize) */ + 4 /* sizeof(length) */ + 4 /* sizeof(count) */ + 4 /* sizeof(free) */;
+ A_MEMZERO(data, sizeof(data));
+ ar6000_ReadDataDiag(ar->arHifDevice, address, (A_UCHAR *)&data, length);
+
+ do {
+ address = TARG_VTOP(ar->arTargetType, data[1] /* buffer*/);
+ length = data[3]; /* length */
+ if ((length) && (length <= data[2] /* bufsize*/)) {
+ /* Rewind the index if it is about to overrun the buffer */
+ if (ar->log_cnt > (DBGLOG_HOST_LOG_BUFFER_SIZE - length)) {
+ ar->log_cnt = 0;
+ }
+ if(A_OK != ar6000_ReadDataDiag(ar->arHifDevice, address,
+ (A_UCHAR *)&ar->log_buffer[ar->log_cnt], length))
+ {
+ break;
+ }
+ ar6000_dbglog_event(ar, dropped, (A_INT8*)&ar->log_buffer[ar->log_cnt], length);
+ ar->log_cnt += length;
+ } else {
+ AR_DEBUG_PRINTF(ATH_DEBUG_DBG_LOG,("Length: %d (Total size: %d)\n",
+ data[3], data[2]));
+ }
+
+ address = TARG_VTOP(ar->arTargetType, data[0] /* next */);
+ length = 4 /* sizeof(next) */ + 4 /* sizeof(buffer) */ + 4 /* sizeof(bufsize) */ + 4 /* sizeof(length) */ + 4 /* sizeof(count) */ + 4 /* sizeof(free) */;
+ A_MEMZERO(data, sizeof(data));
+ if(A_OK != ar6000_ReadDataDiag(ar->arHifDevice, address,
+ (A_UCHAR *)&data, length))
+ {
+ break;
+ }
+
+ } while (address != firstbuf);
+ }
+
+ ar->dbgLogFetchInProgress = FALSE;
+
+ return A_OK;
+}
+
+void
+ar6000_dbglog_event(AR_SOFTC_T *ar, A_UINT32 dropped,
+ A_INT8 *buffer, A_UINT32 length)
+{
+#ifdef REPORT_DEBUG_LOGS_TO_APP
+ #define MAX_WIRELESS_EVENT_SIZE 252
+ /*
+ * Break it up into chunks of MAX_WIRELESS_EVENT_SIZE bytes of messages.
+ * There seems to be a limitation on the length of message that could be
+ * transmitted to the user app via this mechanism.
+ */
+ A_UINT32 send, sent;
+
+ sent = 0;
+ send = dbglog_get_debug_fragment(&buffer[sent], length - sent,
+ MAX_WIRELESS_EVENT_SIZE);
+ while (send) {
+ ar6000_send_event_to_app(ar, WMIX_DBGLOG_EVENTID, (A_UINT8*)&buffer[sent], send);
+ sent += send;
+ send = dbglog_get_debug_fragment(&buffer[sent], length - sent,
+ MAX_WIRELESS_EVENT_SIZE);
+ }
+#else
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("Dropped logs: 0x%x\nDebug info length: %d\n",
+ dropped, length));
+
+ /* Interpret the debug logs */
+ dbglog_parse_debug_logs((A_INT8*)buffer, length);
+#endif /* REPORT_DEBUG_LOGS_TO_APP */
+}
+
+
+static int __init
+ar6000_init_module(void)
+{
+ static int probed = 0;
+ A_STATUS status;
+ OSDRV_CALLBACKS osdrvCallbacks;
+
+ a_module_debug_support_init();
+
+#ifdef DEBUG
+ /* check for debug mask overrides */
+ if (debughtc != 0) {
+ ATH_DEBUG_SET_DEBUG_MASK(htc,debughtc);
+ }
+ if (debugbmi != 0) {
+ ATH_DEBUG_SET_DEBUG_MASK(bmi,debugbmi);
+ }
+ if (debughif != 0) {
+ ATH_DEBUG_SET_DEBUG_MASK(hif,debughif);
+ }
+ if (debugdriver != 0) {
+ ATH_DEBUG_SET_DEBUG_MASK(driver,debugdriver);
+ }
+
+#endif
+
+ A_REGISTER_MODULE_DEBUG_INFO(driver);
+
+ A_MEMZERO(&osdrvCallbacks,sizeof(osdrvCallbacks));
+ osdrvCallbacks.deviceInsertedHandler = ar6000_avail_ev;
+ osdrvCallbacks.deviceRemovedHandler = ar6000_unavail_ev;
+#ifdef CONFIG_PM
+ osdrvCallbacks.deviceSuspendHandler = ar6000_suspend_ev;
+ osdrvCallbacks.deviceResumeHandler = ar6000_resume_ev;
+ osdrvCallbacks.devicePowerChangeHandler = ar6000_power_change_ev;
+#endif
+
+ ar6000_pm_init();
+
+#ifdef ANDROID_ENV
+ android_module_init(&osdrvCallbacks);
+#endif
+
+#ifdef DEBUG
+ /* Set the debug flags if specified at load time */
+ if(debugflags != 0)
+ {
+ g_dbg_flags = debugflags;
+ }
+#endif
+
+ if (probed) {
+ return -ENODEV;
+ }
+ probed++;
+
+#ifdef ADAPTIVE_POWER_THROUGHPUT_CONTROL
+ memset(&aptcTR, 0, sizeof(APTC_TRAFFIC_RECORD));
+#endif /* ADAPTIVE_POWER_THROUGHPUT_CONTROL */
+
+#ifdef CONFIG_HOST_GPIO_SUPPORT
+ ar6000_gpio_init();
+#endif /* CONFIG_HOST_GPIO_SUPPORT */
+
+ status = HIFInit(&osdrvCallbacks);
+ if(status != A_OK)
+ return -ENODEV;
+
+ return 0;
+}
+
+static void __exit
+ar6000_cleanup_module(void)
+{
+ int i = 0;
+ struct net_device *ar6000_netdev;
+
+#ifdef ADAPTIVE_POWER_THROUGHPUT_CONTROL
+ /* Delete the Adaptive Power Control timer */
+ if (timer_pending(&aptcTimer)) {
+ del_timer_sync(&aptcTimer);
+ }
+#endif /* ADAPTIVE_POWER_THROUGHPUT_CONTROL */
+
+ for (i=0; i < MAX_AR6000; i++) {
+ if (ar6000_devices[i] != NULL) {
+ ar6000_netdev = ar6000_devices[i];
+ ar6000_devices[i] = NULL;
+ ar6000_destroy(ar6000_netdev, 1);
+ }
+ }
+
+ HIFShutDownDevice(NULL);
+
+ a_module_debug_support_cleanup();
+
+ ar6000_pm_exit();
+
+#ifdef ANDROID_ENV
+ android_module_exit();
+#endif
+
+ AR_DEBUG_PRINTF(ATH_DEBUG_INFO,("ar6000_cleanup: success\n"));
+}
+
+#ifdef ADAPTIVE_POWER_THROUGHPUT_CONTROL
+void
+aptcTimerHandler(unsigned long arg)
+{
+ A_UINT32 numbytes;
+ A_UINT32 throughput;
+ AR_SOFTC_T *ar;
+ A_STATUS status;
+
+ ar = (AR_SOFTC_T *)arg;
+ A_ASSERT(ar != NULL);
+ A_ASSERT(!timer_pending(&aptcTimer));
+
+ AR6000_SPIN_LOCK(&ar->arLock, 0);
+
+ /* Get the number of bytes transferred */
+ numbytes = aptcTR.bytesTransmitted + aptcTR.bytesReceived;
+ aptcTR.bytesTransmitted = aptcTR.bytesReceived = 0;
+
+ /* Calculate and decide based on throughput thresholds */
+ throughput = ((numbytes * 8)/APTC_TRAFFIC_SAMPLING_INTERVAL); /* Kbps */
+ if (throughput < APTC_LOWER_THROUGHPUT_THRESHOLD) {
+ /* Enable Sleep and delete the timer */
+ A_ASSERT(ar->arWmiReady == TRUE);
+ AR6000_SPIN_UNLOCK(&ar->arLock, 0);
+ status = wmi_powermode_cmd(ar->arWmi, REC_POWER);
+ AR6000_SPIN_LOCK(&ar->arLock, 0);
+ A_ASSERT(status == A_OK);
+ aptcTR.timerScheduled = FALSE;
+ } else {
+ A_TIMEOUT_MS(&aptcTimer, APTC_TRAFFIC_SAMPLING_INTERVAL, 0);
+ }
+
+ AR6000_SPIN_UNLOCK(&ar->arLock, 0);
+}
+#endif /* ADAPTIVE_POWER_THROUGHPUT_CONTROL */
+
+#ifdef ATH_AR6K_11N_SUPPORT
+static void
+ar6000_alloc_netbufs(A_NETBUF_QUEUE_T *q, A_UINT16 num)
+{
+ void * osbuf;
+
+ while(num) {
+ if((osbuf = A_NETBUF_ALLOC(AR6000_BUFFER_SIZE))) {
+ A_NETBUF_ENQUEUE(q, osbuf);
+ } else {
+ break;
+ }
+ num--;
+ }
+
+ if(num) {
+ A_PRINTF("%s(), allocation of netbuf failed", __func__);
+ }
+}
+#endif
+
+static struct bin_attribute bmi_attr = {
+ .attr = {.name = "bmi", .mode = 0600},
+ .read = ar6000_sysfs_bmi_read,
+ .write = ar6000_sysfs_bmi_write,
+};
+
+static ssize_t
+ar6000_sysfs_bmi_read(struct file *fp, struct kobject *kobj,
+ struct bin_attribute *bin_attr,
+ char *buf, loff_t pos, size_t count)
+{
+ int index;
+ AR_SOFTC_T *ar;
+ HIF_DEVICE_OS_DEVICE_INFO *osDevInfo;
+
+ AR_DEBUG_PRINTF(ATH_DEBUG_INFO,("BMI: Read %d bytes\n", (A_UINT32)count));
+ for (index=0; index < MAX_AR6000; index++) {
+ ar = (AR_SOFTC_T *)ar6k_priv(ar6000_devices[index]);
+ osDevInfo = &ar->osDevInfo;
+ if (kobj == (&(((struct device *)osDevInfo->pOSDevice)->kobj))) {
+ break;
+ }
+ }
+
+ if (index == MAX_AR6000) return 0;
+
+ if ((BMIRawRead(ar->arHifDevice, (A_UCHAR*)buf, count, TRUE)) != A_OK) {
+ return 0;
+ }
+
+ return count;
+}
+
+static ssize_t
+ar6000_sysfs_bmi_write(struct file *fp, struct kobject *kobj,
+ struct bin_attribute *bin_attr,
+ char *buf, loff_t pos, size_t count)
+{
+ int index;
+ AR_SOFTC_T *ar;
+ HIF_DEVICE_OS_DEVICE_INFO *osDevInfo;
+
+ AR_DEBUG_PRINTF(ATH_DEBUG_INFO,("BMI: Write %d bytes\n", (A_UINT32)count));
+ for (index=0; index < MAX_AR6000; index++) {
+ ar = (AR_SOFTC_T *)ar6k_priv(ar6000_devices[index]);
+ osDevInfo = &ar->osDevInfo;
+ if (kobj == (&(((struct device *)osDevInfo->pOSDevice)->kobj))) {
+ break;
+ }
+ }
+
+ if (index == MAX_AR6000) return 0;
+
+ if ((BMIRawWrite(ar->arHifDevice, (A_UCHAR*)buf, count)) != A_OK) {
+ return 0;
+ }
+
+ return count;
+}
+
+static A_STATUS
+ar6000_sysfs_bmi_init(AR_SOFTC_T *ar)
+{
+ A_STATUS status;
+
+ AR_DEBUG_PRINTF(ATH_DEBUG_INFO,("BMI: Creating sysfs entry\n"));
+ A_MEMZERO(&ar->osDevInfo, sizeof(HIF_DEVICE_OS_DEVICE_INFO));
+
+ /* Get the underlying OS device */
+ status = HIFConfigureDevice(ar->arHifDevice,
+ HIF_DEVICE_GET_OS_DEVICE,
+ &ar->osDevInfo,
+ sizeof(HIF_DEVICE_OS_DEVICE_INFO));
+
+ if (A_FAILED(status)) {
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("BMI: Failed to get OS device info from HIF\n"));
+ return A_ERROR;
+ }
+
+ /* Create a bmi entry in the sysfs filesystem */
+ if ((sysfs_create_bin_file(&(((struct device *)ar->osDevInfo.pOSDevice)->kobj), &bmi_attr)) < 0)
+ {
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("BMI: Failed to create entry for bmi in sysfs filesystem\n"));
+ return A_ERROR;
+ }
+
+ return A_OK;
+}
+
+static void
+ar6000_sysfs_bmi_deinit(AR_SOFTC_T *ar)
+{
+ AR_DEBUG_PRINTF(ATH_DEBUG_INFO,("BMI: Deleting sysfs entry\n"));
+
+ sysfs_remove_bin_file(&(((struct device *)ar->osDevInfo.pOSDevice)->kobj), &bmi_attr);
+}
+
+#define bmifn(fn) do { \
+ if ((fn) < A_OK) { \
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("BMI operation failed: %d\n", __LINE__)); \
+ return A_ERROR; \
+ } \
+} while(0)
+
+#ifdef INIT_MODE_DRV_ENABLED
+
+#ifdef SOFTMAC_FILE_USED
+#define AR6002_MAC_ADDRESS_OFFSET 0x0A
+#define AR6003_MAC_ADDRESS_OFFSET 0x16
+static
+void calculate_crc(A_UINT32 TargetType, A_UCHAR *eeprom_data)
+{
+ A_UINT16 *ptr_crc;
+ A_UINT16 *ptr16_eeprom;
+ A_UINT16 checksum;
+ A_UINT32 i;
+ A_UINT32 eeprom_size;
+
+ if (TargetType == TARGET_TYPE_AR6001)
+ {
+ eeprom_size = 512;
+ ptr_crc = (A_UINT16 *)eeprom_data;
+ }
+ else if (TargetType == TARGET_TYPE_AR6003)
+ {
+ eeprom_size = 1024;
+ ptr_crc = (A_UINT16 *)((A_UCHAR *)eeprom_data + 0x04);
+ }
+ else
+ {
+ eeprom_size = 768;
+ ptr_crc = (A_UINT16 *)((A_UCHAR *)eeprom_data + 0x04);
+ }
+
+
+ // Clear the crc
+ *ptr_crc = 0;
+
+ // Recalculate new CRC
+ checksum = 0;
+ ptr16_eeprom = (A_UINT16 *)eeprom_data;
+ for (i = 0;i < eeprom_size; i += 2)
+ {
+ checksum = checksum ^ (*ptr16_eeprom);
+ ptr16_eeprom++;
+ }
+ checksum = 0xFFFF ^ checksum;
+ *ptr_crc = checksum;
+}
+
+static void
+ar6000_softmac_update(AR_SOFTC_T *ar, A_UCHAR *eeprom_data, size_t size)
+{
+ const char *source = "random generated";
+ const struct firmware *softmac_entry;
+ A_UCHAR *ptr_mac;
+ switch (ar->arTargetType) {
+ case TARGET_TYPE_AR6002:
+ ptr_mac = (A_UINT8 *)((A_UCHAR *)eeprom_data + AR6002_MAC_ADDRESS_OFFSET);
+ break;
+ case TARGET_TYPE_AR6003:
+ ptr_mac = (A_UINT8 *)((A_UCHAR *)eeprom_data + AR6003_MAC_ADDRESS_OFFSET);
+ break;
+ default:
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("Invalid Target Type\n"));
+ return;
+ }
+ printk(KERN_DEBUG "MAC from EEPROM %pM\n", ptr_mac);
+
+ /* create a random MAC in case we cannot read file from system */
+ ptr_mac[0] = 0;
+ ptr_mac[1] = 0x03;
+ ptr_mac[2] = 0x7F;
+ ptr_mac[3] = random32() & 0xff;
+ ptr_mac[4] = random32() & 0xff;
+ ptr_mac[5] = random32() & 0xff;
+ if ((A_REQUEST_FIRMWARE(&softmac_entry, "softmac", ((struct device *)ar->osDevInfo.pOSDevice))) == 0)
+ {
+ A_CHAR *macbuf = A_MALLOC_NOWAIT(softmac_entry->size+1);
+ if (macbuf) {
+ unsigned int softmac[6];
+ memcpy(macbuf, softmac_entry->data, softmac_entry->size);
+ macbuf[softmac_entry->size] = '\0';
+ if (sscanf(macbuf, "%02x:%02x:%02x:%02x:%02x:%02x",
+ &softmac[0], &softmac[1], &softmac[2],
+ &softmac[3], &softmac[4], &softmac[5])==6) {
+ int i;
+ for (i=0; i<6; ++i) {
+ ptr_mac[i] = softmac[i] & 0xff;
+ }
+ source = "softmac file";
+ }
+ A_FREE(macbuf);
+ }
+ A_RELEASE_FIRMWARE(softmac_entry);
+ }
+ printk(KERN_DEBUG "MAC from %s %pM\n", source, ptr_mac);
+ calculate_crc(ar->arTargetType, eeprom_data);
+}
+#endif /* SOFTMAC_FILE_USED */
+
+static A_STATUS
+ar6000_transfer_bin_file(AR_SOFTC_T *ar, AR6K_BIN_FILE file, A_UINT32 address, A_BOOL compressed)
+{
+ A_STATUS status;
+ const char *filename;
+ const struct firmware *fw_entry;
+ A_UINT32 fw_entry_size;
+
+ switch (file) {
+ case AR6K_OTP_FILE:
+ if (ar->arVersion.target_ver == AR6003_REV1_VERSION) {
+ filename = AR6003_REV1_OTP_FILE;
+ } else if (ar->arVersion.target_ver == AR6003_REV2_VERSION) {
+ filename = AR6003_REV2_OTP_FILE;
+ } else {
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("Unknown firmware revision: %d\n", ar->arVersion.target_ver));
+ return A_ERROR;
+ }
+ break;
+
+ case AR6K_FIRMWARE_FILE:
+ if (ar->arVersion.target_ver == AR6003_REV1_VERSION) {
+ filename = AR6003_REV1_FIRMWARE_FILE;
+ } else if (ar->arVersion.target_ver == AR6003_REV2_VERSION) {
+ filename = AR6003_REV2_FIRMWARE_FILE;
+ } else {
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("Unknown firmware revision: %d\n", ar->arVersion.target_ver));
+ return A_ERROR;
+ }
+
+ if (eppingtest) {
+ bypasswmi = TRUE;
+ if (ar->arVersion.target_ver == AR6003_REV1_VERSION) {
+ filename = AR6003_REV1_EPPING_FIRMWARE_FILE;
+ } else if (ar->arVersion.target_ver == AR6003_REV2_VERSION) {
+ filename = AR6003_REV2_EPPING_FIRMWARE_FILE;
+ } else {
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("eppingtest : unsupported firmware revision: %d\n",
+ ar->arVersion.target_ver));
+ return A_ERROR;
+ }
+ compressed = 0;
+ }
+
+#ifdef CONFIG_HOST_TCMD_SUPPORT
+ if(testmode) {
+ if (ar->arVersion.target_ver == AR6003_REV1_VERSION) {
+ filename = AR6003_REV1_TCMD_FIRMWARE_FILE;
+ } else if (ar->arVersion.target_ver == AR6003_REV2_VERSION) {
+ filename = AR6003_REV2_TCMD_FIRMWARE_FILE;
+ } else {
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("Unknown firmware revision: %d\n", ar->arVersion.target_ver));
+ return A_ERROR;
+ }
+ compressed = 0;
+ }
+#endif
+#ifdef HTC_RAW_INTERFACE
+ if (!eppingtest && bypasswmi) {
+ if (ar->arVersion.target_ver == AR6003_REV1_VERSION) {
+ filename = AR6003_REV1_ART_FIRMWARE_FILE;
+ } else if (ar->arVersion.target_ver == AR6003_REV2_VERSION) {
+ filename = AR6003_REV2_ART_FIRMWARE_FILE;
+ } else {
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("Unknown firmware revision: %d\n", ar->arVersion.target_ver));
+ return A_ERROR;
+ }
+ compressed = 0;
+ }
+#endif
+ break;
+
+ case AR6K_PATCH_FILE:
+ if (ar->arVersion.target_ver == AR6003_REV1_VERSION) {
+ filename = AR6003_REV1_PATCH_FILE;
+ } else if (ar->arVersion.target_ver == AR6003_REV2_VERSION) {
+ filename = AR6003_REV2_PATCH_FILE;
+ } else {
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("Unknown firmware revision: %d\n", ar->arVersion.target_ver));
+ return A_ERROR;
+ }
+ break;
+
+ case AR6K_BOARD_DATA_FILE:
+ if (ar->arVersion.target_ver == AR6003_REV1_VERSION) {
+ filename = AR6003_REV1_BOARD_DATA_FILE;
+ } else if (ar->arVersion.target_ver == AR6003_REV2_VERSION) {
+ filename = AR6003_REV2_BOARD_DATA_FILE;
+ } else {
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("Unknown firmware revision: %d\n", ar->arVersion.target_ver));
+ return A_ERROR;
+ }
+ break;
+
+ default:
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("Unknown file type: %d\n", file));
+ return A_ERROR;
+ }
+ if ((A_REQUEST_FIRMWARE(&fw_entry, filename, ((struct device *)ar->osDevInfo.pOSDevice))) != 0)
+ {
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("Failed to get %s\n", filename));
+ return A_ENOENT;
+ }
+
+#ifdef SOFTMAC_FILE_USED
+ if (file==AR6K_BOARD_DATA_FILE && fw_entry->data) {
+ ar6000_softmac_update(ar, (A_UCHAR *)fw_entry->data, fw_entry->size);
+ }
+#endif
+
+
+ fw_entry_size = fw_entry->size;
+
+ /* Load extended board data for AR6003 */
+ if ((file==AR6K_BOARD_DATA_FILE) && (fw_entry->data)) {
+ A_UINT32 board_ext_address;
+ A_UINT32 board_ext_data_size;
+ A_UINT32 board_data_size;
+
+ board_ext_data_size = (((ar)->arTargetType == TARGET_TYPE_AR6002) ? AR6002_BOARD_EXT_DATA_SZ : \
+ (((ar)->arTargetType == TARGET_TYPE_AR6003) ? AR6003_BOARD_EXT_DATA_SZ : 0));
+
+ board_data_size = (((ar)->arTargetType == TARGET_TYPE_AR6002) ? AR6002_BOARD_DATA_SZ : \
+ (((ar)->arTargetType == TARGET_TYPE_AR6003) ? AR6003_BOARD_DATA_SZ : 0));
+
+ /* Determine where in Target RAM to write Board Data */
+ bmifn(BMIReadMemory(ar->arHifDevice, HOST_INTEREST_ITEM_ADDRESS(ar, hi_board_ext_data), (A_UCHAR *)&board_ext_address, 4));
+ AR_DEBUG_PRINTF(ATH_DEBUG_INFO, ("Board extended Data download address: 0x%x\n", board_ext_address));
+
+ /* check whether the target has allocated memory for extended board data and file contains extended board data */
+ if ((board_ext_address) && (fw_entry->size == (board_data_size + board_ext_data_size))) {
+ A_UINT32 param;
+
+ status = BMIWriteMemory(ar->arHifDevice, board_ext_address, (A_UCHAR *)(fw_entry->data + board_data_size), board_ext_data_size);
+
+ if (status != A_OK) {
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("BMI operation failed: %d\n", __LINE__));
+ A_RELEASE_FIRMWARE(fw_entry);
+ return A_ERROR;
+ }
+
+ /* Record the fact that extended board Data IS initialized */
+ param = 1;
+ bmifn(BMIWriteMemory(ar->arHifDevice, HOST_INTEREST_ITEM_ADDRESS(ar, hi_board_ext_data_initialized), (A_UCHAR *)&param, 4));
+ }
+ fw_entry_size = board_data_size;
+ }
+
+ if (compressed) {
+ status = BMIFastDownload(ar->arHifDevice, address, (A_UCHAR *)fw_entry->data, fw_entry_size);
+ } else {
+ status = BMIWriteMemory(ar->arHifDevice, address, (A_UCHAR *)fw_entry->data, fw_entry_size);
+ }
+
+ if (status != A_OK) {
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("BMI operation failed: %d\n", __LINE__));
+ A_RELEASE_FIRMWARE(fw_entry);
+ return A_ERROR;
+ }
+ A_RELEASE_FIRMWARE(fw_entry);
+ return A_OK;
+}
+#endif /* INIT_MODE_DRV_ENABLED */
+
+A_STATUS
+ar6000_update_bdaddr(AR_SOFTC_T *ar)
+{
+
+ if (setupbtdev != 0) {
+ A_UINT32 address;
+
+ if (BMIReadMemory(ar->arHifDevice,
+ HOST_INTEREST_ITEM_ADDRESS(ar, hi_board_data), (A_UCHAR *)&address, 4) != A_OK)
+ {
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("BMIReadMemory for hi_board_data failed\n"));
+ return A_ERROR;
+ }
+
+ if (BMIReadMemory(ar->arHifDevice, address + BDATA_BDADDR_OFFSET, (A_UCHAR *)ar->bdaddr, 6) != A_OK)
+ {
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("BMIReadMemory for BD address failed\n"));
+ return A_ERROR;
+ }
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("BDADDR 0x%x:0x%x:0x%x:0x%x:0x%x:0x%x\n", ar->bdaddr[0],
+ ar->bdaddr[1], ar->bdaddr[2], ar->bdaddr[3],
+ ar->bdaddr[4], ar->bdaddr[5]));
+ }
+
+return A_OK;
+}
+
+A_STATUS
+ar6000_sysfs_bmi_get_config(AR_SOFTC_T *ar, A_UINT32 mode)
+{
+ AR_DEBUG_PRINTF(ATH_DEBUG_INFO,("BMI: Requesting device specific configuration\n"));
+
+ if (mode == WLAN_INIT_MODE_UDEV) {
+ A_CHAR version[16];
+ const struct firmware *fw_entry;
+
+ /* Get config using udev through a script in user space */
+ sprintf(version, "%2.2x", ar->arVersion.target_ver);
+ if ((A_REQUEST_FIRMWARE(&fw_entry, version, ((struct device *)ar->osDevInfo.pOSDevice))) != 0)
+ {
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("BMI: Failure to get configuration for target version: %s\n", version));
+ return A_ERROR;
+ }
+
+ A_RELEASE_FIRMWARE(fw_entry);
+#ifdef INIT_MODE_DRV_ENABLED
+ } else {
+ /* The config is contained within the driver itself */
+ A_STATUS status;
+ A_UINT32 param, options, sleep, address;
+
+ /* Temporarily disable system sleep */
+ address = MBOX_BASE_ADDRESS + LOCAL_SCRATCH_ADDRESS;
+ bmifn(BMIReadSOCRegister(ar->arHifDevice, address, &param));
+ options = param;
+ param |= AR6K_OPTION_SLEEP_DISABLE;
+ bmifn(BMIWriteSOCRegister(ar->arHifDevice, address, param));
+
+ address = RTC_BASE_ADDRESS + SYSTEM_SLEEP_ADDRESS;
+ bmifn(BMIReadSOCRegister(ar->arHifDevice, address, &param));
+ sleep = param;
+ param |= WLAN_SYSTEM_SLEEP_DISABLE_SET(1);
+ bmifn(BMIWriteSOCRegister(ar->arHifDevice, address, param));
+ AR_DEBUG_PRINTF(ATH_DEBUG_INFO, ("old options: %d, old sleep: %d\n", options, sleep));
+
+ if (ar->arTargetType == TARGET_TYPE_AR6003) {
+ /* Program analog PLL register */
+ bmifn(BMIWriteSOCRegister(ar->arHifDevice, ANALOG_INTF_BASE_ADDRESS + 0x284, 0xF9104001));
+ /* Run at 80/88MHz by default */
+ param = CPU_CLOCK_STANDARD_SET(1);
+ } else {
+ /* Run at 40/44MHz by default */
+ param = CPU_CLOCK_STANDARD_SET(0);
+ }
+ address = RTC_BASE_ADDRESS + CPU_CLOCK_ADDRESS;
+ bmifn(BMIWriteSOCRegister(ar->arHifDevice, address, param));
+
+ param = 0;
+ if (ar->arTargetType == TARGET_TYPE_AR6002) {
+ bmifn(BMIReadMemory(ar->arHifDevice, HOST_INTEREST_ITEM_ADDRESS(ar, hi_ext_clk_detected), (A_UCHAR *)&param, 4));
+ }
+
+ /* LPO_CAL.ENABLE = 1 if no external clk is detected */
+ if (param != 1) {
+ address = RTC_BASE_ADDRESS + LPO_CAL_ADDRESS;
+ param = LPO_CAL_ENABLE_SET(1);
+ bmifn(BMIWriteSOCRegister(ar->arHifDevice, address, param));
+ }
+
+ /* Venus2.0: Lower SDIO pad drive strength,
+ * temporary WAR to avoid SDIO CRC error */
+ if (ar->arVersion.target_ver == AR6003_REV2_VERSION) {
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("AR6K: Temporary WAR to avoid SDIO CRC error\n"));
+ param = 0x20;
+ address = GPIO_BASE_ADDRESS + GPIO_PIN10_ADDRESS;
+ bmifn(BMIWriteSOCRegister(ar->arHifDevice, address, param));
+
+ address = GPIO_BASE_ADDRESS + GPIO_PIN11_ADDRESS;
+ bmifn(BMIWriteSOCRegister(ar->arHifDevice, address, param));
+
+ address = GPIO_BASE_ADDRESS + GPIO_PIN12_ADDRESS;
+ bmifn(BMIWriteSOCRegister(ar->arHifDevice, address, param));
+
+ address = GPIO_BASE_ADDRESS + GPIO_PIN13_ADDRESS;
+ bmifn(BMIWriteSOCRegister(ar->arHifDevice, address, param));
+ }
+
+#ifdef FORCE_INTERNAL_CLOCK
+ /* Ignore external clock, if any, and force use of internal clock */
+ if (ar->arTargetType == TARGET_TYPE_AR6003) {
+ /* hi_ext_clk_detected = 0 */
+ param = 0;
+ bmifn(BMIWriteMemory(ar->arHifDevice, HOST_INTEREST_ITEM_ADDRESS(ar, hi_ext_clk_detected), (A_UCHAR *)&param, 4));
+
+ /* CLOCK_CONTROL &= ~LF_CLK32 */
+ address = RTC_BASE_ADDRESS + CLOCK_CONTROL_ADDRESS;
+ bmifn(BMIReadSOCRegister(ar->arHifDevice, address, &param));
+ param &= (~CLOCK_CONTROL_LF_CLK32_SET(1));
+ bmifn(BMIWriteSOCRegister(ar->arHifDevice, address, param));
+ }
+#endif /* FORCE_INTERNAL_CLOCK */
+
+ /* Transfer Board Data from Target EEPROM to Target RAM */
+ if (ar->arTargetType == TARGET_TYPE_AR6003) {
+ /* Determine where in Target RAM to write Board Data */
+ bmifn(BMIReadMemory(ar->arHifDevice, HOST_INTEREST_ITEM_ADDRESS(ar, hi_board_data), (A_UCHAR *)&address, 4));
+ AR_DEBUG_PRINTF(ATH_DEBUG_INFO, ("Board Data download address: 0x%x\n", address));
+
+ /* Write EEPROM data to Target RAM */
+ if ((ar6000_transfer_bin_file(ar, AR6K_BOARD_DATA_FILE, address, FALSE)) != A_OK) {
+ return A_ERROR;
+ }
+
+ /* Record the fact that Board Data IS initialized */
+ param = 1;
+ bmifn(BMIWriteMemory(ar->arHifDevice, HOST_INTEREST_ITEM_ADDRESS(ar, hi_board_data_initialized), (A_UCHAR *)&param, 4));
+
+ /* Transfer One time Programmable data */
+ AR6K_DATA_DOWNLOAD_ADDRESS(address, ar->arVersion.target_ver);
+ status = ar6000_transfer_bin_file(ar, AR6K_OTP_FILE, address, TRUE);
+ if (status == A_OK) {
+ /* Execute the OTP code */
+ param = 0;
+ AR6K_APP_START_OVERRIDE_ADDRESS(address, ar->arVersion.target_ver);
+ bmifn(BMIExecute(ar->arHifDevice, address, &param));
+ } else if (status != A_ENOENT) {
+ return A_ERROR;
+ }
+ } else {
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("Programming of board data for chip %d not supported\n", ar->arTargetType));
+ return A_ERROR;
+ }
+
+ /* Download Target firmware */
+ AR6K_DATA_DOWNLOAD_ADDRESS(address, ar->arVersion.target_ver);
+ if ((ar6000_transfer_bin_file(ar, AR6K_FIRMWARE_FILE, address, TRUE)) != A_OK) {
+ return A_ERROR;
+ }
+
+ /* Set starting address for firmware */
+ AR6K_APP_START_OVERRIDE_ADDRESS(address, ar->arVersion.target_ver);
+ bmifn(BMISetAppStart(ar->arHifDevice, address));
+
+ /* Apply the patches */
+ AR6K_PATCH_DOWNLOAD_ADDRESS(address, ar->arVersion.target_ver);
+ if ((ar6000_transfer_bin_file(ar, AR6K_PATCH_FILE, address, FALSE)) != A_OK) {
+ return A_ERROR;
+ }
+
+ param = address;
+ bmifn(BMIWriteMemory(ar->arHifDevice, HOST_INTEREST_ITEM_ADDRESS(ar, hi_dset_list_head), (A_UCHAR *)&param, 4));
+
+ if (ar->arTargetType == TARGET_TYPE_AR6003) {
+ if (ar->arVersion.target_ver == AR6003_REV1_VERSION) {
+ /* Reserve 5.5K of RAM */
+ param = 5632;
+ } else { /* AR6003_REV2_VERSION */
+ /* Reserve 6.5K of RAM */
+ param = 6656;
+ }
+ bmifn(BMIWriteMemory(ar->arHifDevice, HOST_INTEREST_ITEM_ADDRESS(ar, hi_end_RAM_reserve_sz), (A_UCHAR *)&param, 4));
+ }
+
+ /* Restore system sleep */
+ address = RTC_BASE_ADDRESS + SYSTEM_SLEEP_ADDRESS;
+ bmifn(BMIWriteSOCRegister(ar->arHifDevice, address, sleep));
+
+ address = MBOX_BASE_ADDRESS + LOCAL_SCRATCH_ADDRESS;
+ param = options | 0x20;
+ bmifn(BMIWriteSOCRegister(ar->arHifDevice, address, param));
+
+ if (ar->arTargetType == TARGET_TYPE_AR6003) {
+ /* Configure GPIO AR6003 UART */
+#ifndef CONFIG_AR600x_DEBUG_UART_TX_PIN
+#define CONFIG_AR600x_DEBUG_UART_TX_PIN 8
+#endif
+ param = CONFIG_AR600x_DEBUG_UART_TX_PIN;
+ bmifn(BMIWriteMemory(ar->arHifDevice, HOST_INTEREST_ITEM_ADDRESS(ar, hi_dbg_uart_txpin), (A_UCHAR *)&param, 4));
+
+#if (CONFIG_AR600x_DEBUG_UART_TX_PIN == 23)
+ {
+ address = GPIO_BASE_ADDRESS + CLOCK_GPIO_ADDRESS;
+ bmifn(BMIReadSOCRegister(ar->arHifDevice, address, &param));
+ param |= CLOCK_GPIO_BT_CLK_OUT_EN_SET(1);
+ bmifn(BMIWriteSOCRegister(ar->arHifDevice, address, param));
+ }
+#endif
+
+ /* Configure GPIO for BT Reset */
+#ifdef ATH6KL_CONFIG_GPIO_BT_RESET
+#define CONFIG_AR600x_BT_RESET_PIN 0x16
+ param = CONFIG_AR600x_BT_RESET_PIN;
+ bmifn(BMIWriteMemory(ar->arHifDevice, HOST_INTEREST_ITEM_ADDRESS(ar, hi_hci_uart_support_pins), (A_UCHAR *)&param, 4));
+#endif /* ATH6KL_CONFIG_GPIO_BT_RESET */
+
+ /* Configure UART flow control polarity */
+#ifndef CONFIG_ATH6KL_BT_UART_FC_POLARITY
+#define CONFIG_ATH6KL_BT_UART_FC_POLARITY 0
+#endif
+
+#if (CONFIG_ATH6KL_BT_UART_FC_POLARITY == 1)
+ if (ar->arVersion.target_ver == AR6003_REV2_VERSION) {
+ param = ((CONFIG_ATH6KL_BT_UART_FC_POLARITY << 1) & 0x2);
+ bmifn(BMIWriteMemory(ar->arHifDevice, HOST_INTEREST_ITEM_ADDRESS(ar, hi_hci_uart_pwr_mgmt_params), (A_UCHAR *)&param, 4));
+ }
+#endif /* CONFIG_ATH6KL_BT_UART_FC_POLARITY */
+ }
+
+#ifdef HTC_RAW_INTERFACE
+ if (!eppingtest && bypasswmi) {
+ /* Don't run BMIDone for ART mode and force resetok=0 */
+ resetok = 0;
+ msleep(1000);
+ }
+#endif /* HTC_RAW_INTERFACE */
+
+#endif /* INIT_MODE_DRV_ENABLED */
+ }
+
+ return A_OK;
+}
+
+A_STATUS
+ar6000_configure_target(AR_SOFTC_T *ar)
+{
+ A_UINT32 param;
+ if (enableuartprint) {
+ param = 1;
+ if (BMIWriteMemory(ar->arHifDevice,
+ HOST_INTEREST_ITEM_ADDRESS(ar, hi_serial_enable),
+ (A_UCHAR *)&param,
+ 4)!= A_OK)
+ {
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("BMIWriteMemory for enableuartprint failed \n"));
+ return A_ERROR;
+ }
+ AR_DEBUG_PRINTF(ATH_DEBUG_INFO,("Serial console prints enabled\n"));
+ }
+
+ /* Tell target which HTC version it is used*/
+ param = HTC_PROTOCOL_VERSION;
+ if (BMIWriteMemory(ar->arHifDevice,
+ HOST_INTEREST_ITEM_ADDRESS(ar, hi_app_host_interest),
+ (A_UCHAR *)&param,
+ 4)!= A_OK)
+ {
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("BMIWriteMemory for htc version failed \n"));
+ return A_ERROR;
+ }
+
+#ifdef CONFIG_HOST_TCMD_SUPPORT
+ if(testmode) {
+ ar->arTargetMode = AR6000_TCMD_MODE;
+ }else {
+ ar->arTargetMode = AR6000_WLAN_MODE;
+ }
+#endif
+ if (enabletimerwar) {
+ A_UINT32 param;
+
+ if (BMIReadMemory(ar->arHifDevice,
+ HOST_INTEREST_ITEM_ADDRESS(ar, hi_option_flag),
+ (A_UCHAR *)&param,
+ 4)!= A_OK)
+ {
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("BMIReadMemory for enabletimerwar failed \n"));
+ return A_ERROR;
+ }
+
+ param |= HI_OPTION_TIMER_WAR;
+
+ if (BMIWriteMemory(ar->arHifDevice,
+ HOST_INTEREST_ITEM_ADDRESS(ar, hi_option_flag),
+ (A_UCHAR *)&param,
+ 4) != A_OK)
+ {
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("BMIWriteMemory for enabletimerwar failed \n"));
+ return A_ERROR;
+ }
+ AR_DEBUG_PRINTF(ATH_DEBUG_INFO,("Timer WAR enabled\n"));
+ }
+
+ /* set the firmware mode to STA/IBSS/AP */
+ {
+ A_UINT32 param;
+
+ if (BMIReadMemory(ar->arHifDevice,
+ HOST_INTEREST_ITEM_ADDRESS(ar, hi_option_flag),
+ (A_UCHAR *)&param,
+ 4)!= A_OK)
+ {
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("BMIReadMemory for setting fwmode failed \n"));
+ return A_ERROR;
+ }
+
+ param |= (fwmode << HI_OPTION_FW_MODE_SHIFT);
+
+ if (BMIWriteMemory(ar->arHifDevice,
+ HOST_INTEREST_ITEM_ADDRESS(ar, hi_option_flag),
+ (A_UCHAR *)&param,
+ 4) != A_OK)
+ {
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("BMIWriteMemory for setting fwmode failed \n"));
+ return A_ERROR;
+ }
+ AR_DEBUG_PRINTF(ATH_DEBUG_INFO,("Firmware mode set\n"));
+ }
+
+#ifdef ATH6KL_DISABLE_TARGET_DBGLOGS
+ {
+ A_UINT32 param;
+
+ if (BMIReadMemory(ar->arHifDevice,
+ HOST_INTEREST_ITEM_ADDRESS(ar, hi_option_flag),
+ (A_UCHAR *)&param,
+ 4)!= A_OK)
+ {
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("BMIReadMemory for disabling debug logs failed\n"));
+ return A_ERROR;
+ }
+
+ param |= HI_OPTION_DISABLE_DBGLOG;
+
+ if (BMIWriteMemory(ar->arHifDevice,
+ HOST_INTEREST_ITEM_ADDRESS(ar, hi_option_flag),
+ (A_UCHAR *)&param,
+ 4) != A_OK)
+ {
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("BMIWriteMemory for HI_OPTION_DISABLE_DBGLOG\n"));
+ return A_ERROR;
+ }
+ AR_DEBUG_PRINTF(ATH_DEBUG_INFO,("Firmware mode set\n"));
+ }
+#endif /* ATH6KL_DISABLE_TARGET_DBGLOGS */
+
+ /*
+ * Hardcode the address use for the extended board data
+ * Ideally this should be pre-allocate by the OS at boot time
+ * But since it is a new feature and board data is loaded
+ * at init time, we have to workaround this from host.
+ * It is difficult to patch the firmware boot code,
+ * but possible in theory.
+ */
+ if (ar->arTargetType == TARGET_TYPE_AR6003) {
+ param = AR6003_BOARD_EXT_DATA_ADDRESS;
+ if (BMIWriteMemory(ar->arHifDevice,
+ HOST_INTEREST_ITEM_ADDRESS(ar, hi_board_ext_data),
+ (A_UCHAR *)&param,
+ 4) != A_OK)
+ {
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("BMIWriteMemory for hi_board_ext_data failed \n"));
+ return A_ERROR;
+ }
+ }
+
+
+ /* since BMIInit is called in the driver layer, we have to set the block
+ * size here for the target */
+
+ if (A_FAILED(ar6000_set_htc_params(ar->arHifDevice,
+ ar->arTargetType,
+ mbox_yield_limit,
+ 0 /* use default number of control buffers */
+ ))) {
+ return A_ERROR;
+ }
+
+ if (setupbtdev != 0) {
+ if (A_FAILED(ar6000_set_hci_bridge_flags(ar->arHifDevice,
+ ar->arTargetType,
+ setupbtdev))) {
+ return A_ERROR;
+ }
+ }
+ return A_OK;
+}
+
+static void
+init_netdev(struct net_device *dev, char *name)
+{
+ dev->netdev_ops = &ar6000_netdev_ops;
+ dev->watchdog_timeo = AR6000_TX_TIMEOUT;
+ dev->wireless_handlers = &ath_iw_handler_def;
+
+ ath_iw_handler_def.get_wireless_stats = ar6000_get_iwstats; /*Displayed via proc fs */
+
+ /*
+ * We need the OS to provide us with more headroom in order to
+ * perform dix to 802.3, WMI header encap, and the HTC header
+ */
+ if (processDot11Hdr) {
+ dev->hard_header_len = sizeof(struct ieee80211_qosframe) + sizeof(ATH_LLC_SNAP_HDR) + sizeof(WMI_DATA_HDR) + HTC_HEADER_LEN + WMI_MAX_TX_META_SZ + LINUX_HACK_FUDGE_FACTOR;
+ } else {
+ dev->hard_header_len = ETH_HLEN + sizeof(ATH_LLC_SNAP_HDR) +
+ sizeof(WMI_DATA_HDR) + HTC_HEADER_LEN + WMI_MAX_TX_META_SZ + LINUX_HACK_FUDGE_FACTOR;
+ }
+
+ if (name[0])
+ {
+ strcpy(dev->name, name);
+ }
+
+#ifdef SET_MODULE_OWNER
+ SET_MODULE_OWNER(dev);
+#endif
+
+#ifdef CONFIG_CHECKSUM_OFFLOAD
+ if(csumOffload){
+ dev->features |= NETIF_F_IP_CSUM; /*advertise kernel capability to do TCP/UDP CSUM offload for IPV4*/
+ }
+#endif
+
+ return;
+}
+
+/*
+ * HTC Event handlers
+ */
+static A_STATUS
+ar6000_avail_ev(void *context, void *hif_handle)
+{
+ int i;
+ struct net_device *dev;
+ void *ar_netif;
+ AR_SOFTC_T *ar;
+ int device_index = 0;
+ HTC_INIT_INFO htcInfo;
+#ifdef ATH6K_CONFIG_CFG80211
+ struct wireless_dev *wdev;
+#endif /* ATH6K_CONFIG_CFG80211 */
+ A_STATUS init_status = A_OK;
+
+ AR_DEBUG_PRINTF(ATH_DEBUG_INFO,("ar6000_available\n"));
+
+ for (i=0; i < MAX_AR6000; i++) {
+ if (ar6000_devices[i] == NULL) {
+ break;
+ }
+ }
+
+ if (i == MAX_AR6000) {
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("ar6000_available: max devices reached\n"));
+ return A_ERROR;
+ }
+
+ /* Save this. It gives a bit better readability especially since */
+ /* we use another local "i" variable below. */
+ device_index = i;
+
+#ifdef ATH6K_CONFIG_CFG80211
+ wdev = ar6k_cfg80211_init(NULL);
+ if (IS_ERR(wdev)) {
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("%s: ar6k_cfg80211_init failed\n", __func__));
+ return A_ERROR;
+ }
+ ar_netif = wdev_priv(wdev);
+#else
+ dev = alloc_etherdev(sizeof(AR_SOFTC_T));
+ if (dev == NULL) {
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("ar6000_available: can't alloc etherdev\n"));
+ return A_ERROR;
+ }
+ ether_setup(dev);
+ ar_netif = ar6k_priv(dev);
+#endif /* ATH6K_CONFIG_CFG80211 */
+
+ if (ar_netif == NULL) {
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("%s: Can't allocate ar6k priv memory\n", __func__));
+ return A_ERROR;
+ }
+
+ A_MEMZERO(ar_netif, sizeof(AR_SOFTC_T));
+ ar = (AR_SOFTC_T *)ar_netif;
+
+#ifdef ATH6K_CONFIG_CFG80211
+ ar->wdev = wdev;
+ wdev->iftype = NL80211_IFTYPE_STATION;
+
+ dev = alloc_netdev_mq(0, "wlan%d", ether_setup, 1);
+ if (!dev) {
+ printk(KERN_CRIT "AR6K: no memory for network device instance\n");
+ ar6k_cfg80211_deinit(ar);
+ return A_ERROR;
+ }
+
+ dev->ieee80211_ptr = wdev;
+ SET_NETDEV_DEV(dev, wiphy_dev(wdev->wiphy));
+ wdev->netdev = dev;
+ ar->arNetworkType = INFRA_NETWORK;
+#endif /* ATH6K_CONFIG_CFG80211 */
+
+ init_netdev(dev, ifname);
+
+#ifdef SET_NETDEV_DEV
+ if (ar_netif) {
+ HIF_DEVICE_OS_DEVICE_INFO osDevInfo;
+ A_MEMZERO(&osDevInfo, sizeof(osDevInfo));
+ if ( A_SUCCESS( HIFConfigureDevice(hif_handle, HIF_DEVICE_GET_OS_DEVICE,
+ &osDevInfo, sizeof(osDevInfo))) ) {
+ SET_NETDEV_DEV(dev, osDevInfo.pOSDevice);
+ }
+ }
+#endif
+
+ ar->arNetDev = dev;
+ ar->arHifDevice = hif_handle;
+ ar->arWlanState = WLAN_ENABLED;
+ ar->arDeviceIndex = device_index;
+
+ ar->arWlanPowerState = WLAN_POWER_STATE_ON;
+ ar->arWlanOff = FALSE; /* We are in ON state */
+#ifdef CONFIG_PM
+ ar->arWowState = WLAN_WOW_STATE_NONE;
+ ar->arBTOff = TRUE; /* BT chip assumed to be OFF */
+ ar->arBTSharing = WLAN_CONFIG_BT_SHARING;
+ ar->arWlanOffConfig = WLAN_CONFIG_WLAN_OFF;
+ ar->arSuspendConfig = WLAN_CONFIG_PM_SUSPEND;
+ ar->arWow2Config = WLAN_CONFIG_PM_WOW2;
+#endif /* CONFIG_PM */
+
+ A_INIT_TIMER(&ar->arHBChallengeResp.timer, ar6000_detect_error, dev);
+ ar->arHBChallengeResp.seqNum = 0;
+ ar->arHBChallengeResp.outstanding = FALSE;
+ ar->arHBChallengeResp.missCnt = 0;
+ ar->arHBChallengeResp.frequency = AR6000_HB_CHALLENGE_RESP_FREQ_DEFAULT;
+ ar->arHBChallengeResp.missThres = AR6000_HB_CHALLENGE_RESP_MISS_THRES_DEFAULT;
+
+ ar6000_init_control_info(ar);
+ init_waitqueue_head(&arEvent);
+ sema_init(&ar->arSem, 1);
+ ar->bIsDestroyProgress = FALSE;
+
+ INIT_HTC_PACKET_QUEUE(&ar->amsdu_rx_buffer_queue);
+
+#ifdef ADAPTIVE_POWER_THROUGHPUT_CONTROL
+ A_INIT_TIMER(&aptcTimer, aptcTimerHandler, ar);
+#endif /* ADAPTIVE_POWER_THROUGHPUT_CONTROL */
+
+ A_INIT_TIMER(&ar->disconnect_timer, disconnect_timer_handler, dev);
+
+ BMIInit();
+
+ if (bmienable) {
+ ar6000_sysfs_bmi_init(ar);
+ }
+
+ {
+ struct bmi_target_info targ_info;
+
+ if (BMIGetTargetInfo(ar->arHifDevice, &targ_info) != A_OK) {
+ init_status = A_ERROR;
+ goto avail_ev_failed;
+ }
+
+ ar->arVersion.target_ver = targ_info.target_ver;
+ ar->arTargetType = targ_info.target_type;
+
+ /* do any target-specific preparation that can be done through BMI */
+ if (ar6000_prepare_target(ar->arHifDevice,
+ targ_info.target_type,
+ targ_info.target_ver) != A_OK) {
+ init_status = A_ERROR;
+ goto avail_ev_failed;
+ }
+
+ }
+
+ if (ar6000_configure_target(ar) != A_OK) {
+ init_status = A_ERROR;
+ goto avail_ev_failed;
+ }
+
+ A_MEMZERO(&htcInfo,sizeof(htcInfo));
+ htcInfo.pContext = ar;
+ htcInfo.TargetFailure = ar6000_target_failure;
+
+ ar->arHtcTarget = HTCCreate(ar->arHifDevice,&htcInfo);
+
+ if (ar->arHtcTarget == NULL) {
+ init_status = A_ERROR;
+ goto avail_ev_failed;
+ }
+
+ spin_lock_init(&ar->arLock);
+
+#ifdef WAPI_ENABLE
+ ar->arWapiEnable = 0;
+#endif
+
+
+#ifdef CONFIG_CHECKSUM_OFFLOAD
+ if(csumOffload){
+ /*if external frame work is also needed, change and use an extended rxMetaVerion*/
+ ar->rxMetaVersion=WMI_META_VERSION_2;
+ }
+#endif
+
+#ifdef ATH_AR6K_11N_SUPPORT
+ if((ar->aggr_cntxt = aggr_init(ar6000_alloc_netbufs)) == NULL) {
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("%s() Failed to initialize aggr.\n", __func__));
+ init_status = A_ERROR;
+ goto avail_ev_failed;
+ }
+
+ aggr_register_rx_dispatcher(ar->aggr_cntxt, (void *)dev, ar6000_deliver_frames_to_nw_stack);
+#endif
+
+ HIFClaimDevice(ar->arHifDevice, ar);
+
+ /* We only register the device in the global list if we succeed. */
+ /* If the device is in the global list, it will be destroyed */
+ /* when the module is unloaded. */
+ ar6000_devices[device_index] = dev;
+
+ /* Don't install the init function if BMI is requested */
+ if (!bmienable) {
+ ar6000_netdev_ops.ndo_init = ar6000_init;
+ } else {
+ AR_DEBUG_PRINTF(ATH_DEBUG_INFO, ("BMI enabled: %d\n", wlaninitmode));
+ if ((wlaninitmode == WLAN_INIT_MODE_UDEV) ||
+ (wlaninitmode == WLAN_INIT_MODE_DRV))
+ {
+ A_STATUS status = A_OK;
+ do {
+ if ((status = ar6000_sysfs_bmi_get_config(ar, wlaninitmode)) != A_OK)
+ {
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("ar6000_avail: ar6000_sysfs_bmi_get_config failed\n"));
+ break;
+ }
+#ifdef HTC_RAW_INTERFACE
+ break; /* Don't call ar6000_init for ART */
+#endif
+ rtnl_lock();
+ status = (ar6000_init(dev)==0) ? A_OK : A_ERROR;
+ rtnl_unlock();
+ if (status != A_OK) {
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("ar6000_avail: ar6000_init\n"));
+ }
+ } while (FALSE);
+
+ if (status != A_OK) {
+ init_status = status;
+ goto avail_ev_failed;
+ }
+ }
+ }
+
+ /* This runs the init function if registered */
+ if (register_netdev(dev)) {
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("ar6000_avail: register_netdev failed\n"));
+ ar6000_destroy(dev, 0);
+ return A_ERROR;
+ }
+
+ is_netdev_registered = 1;
+
+#ifdef CONFIG_AP_VIRTUAL_ADAPTER_SUPPORT
+ arApNetDev = NULL;
+#endif /* CONFIG_AP_VIRTUAL_ADAPTER_SUPPORT */
+ AR_DEBUG_PRINTF(ATH_DEBUG_INFO,("ar6000_avail: name=%s hifdevice=0x%lx, dev=0x%lx (%d), ar=0x%lx\n",
+ dev->name, (unsigned long)ar->arHifDevice, (unsigned long)dev, device_index,
+ (unsigned long)ar));
+
+avail_ev_failed :
+ if (A_FAILED(init_status)) {
+ if (bmienable) {
+ ar6000_sysfs_bmi_deinit(ar);
+ }
+ }
+
+ return init_status;
+}
+
+static void ar6000_target_failure(void *Instance, A_STATUS Status)
+{
+ AR_SOFTC_T *ar = (AR_SOFTC_T *)Instance;
+ WMI_TARGET_ERROR_REPORT_EVENT errEvent;
+ static A_BOOL sip = FALSE;
+
+ if (Status != A_OK) {
+
+ printk(KERN_ERR "ar6000_target_failure: target asserted \n");
+
+ if (timer_pending(&ar->arHBChallengeResp.timer)) {
+ A_UNTIMEOUT(&ar->arHBChallengeResp.timer);
+ }
+
+ /* try dumping target assertion information (if any) */
+ ar6000_dump_target_assert_info(ar->arHifDevice,ar->arTargetType);
+
+ /*
+ * Fetch the logs from the target via the diagnostic
+ * window.
+ */
+ ar6000_dbglog_get_debug_logs(ar);
+
+ /* Report the error only once */
+ if (!sip) {
+ sip = TRUE;
+ errEvent.errorVal = WMI_TARGET_COM_ERR |
+ WMI_TARGET_FATAL_ERR;
+ ar6000_send_event_to_app(ar, WMI_ERROR_REPORT_EVENTID,
+ (A_UINT8 *)&errEvent,
+ sizeof(WMI_TARGET_ERROR_REPORT_EVENT));
+ }
+ }
+}
+
+static A_STATUS
+ar6000_unavail_ev(void *context, void *hif_handle)
+{
+ AR_SOFTC_T *ar = (AR_SOFTC_T *)context;
+ /* NULL out it's entry in the global list */
+ ar6000_devices[ar->arDeviceIndex] = NULL;
+ ar6000_destroy(ar->arNetDev, 1);
+
+ return A_OK;
+}
+
+void
+ar6000_restart_endpoint(struct net_device *dev)
+{
+ A_STATUS status = A_OK;
+ AR_SOFTC_T *ar = (AR_SOFTC_T *)ar6k_priv(dev);
+
+ BMIInit();
+ do {
+ if ( (status=ar6000_configure_target(ar))!=A_OK)
+ break;
+ if ( (status=ar6000_sysfs_bmi_get_config(ar, wlaninitmode)) != A_OK)
+ {
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("ar6000_avail: ar6000_sysfs_bmi_get_config failed\n"));
+ break;
+ }
+ rtnl_lock();
+ status = (ar6000_init(dev)==0) ? A_OK : A_ERROR;
+ rtnl_unlock();
+
+ if (status!=A_OK) {
+ break;
+ }
+ if (ar->arSsidLen && ar->arWlanState == WLAN_ENABLED) {
+ ar6000_connect_to_ap(ar);
+ }
+ } while (0);
+
+ if (status==A_OK) {
+ return;
+ }
+
+ ar6000_devices[ar->arDeviceIndex] = NULL;
+ ar6000_destroy(ar->arNetDev, 1);
+}
+
+void
+ar6000_stop_endpoint(struct net_device *dev, A_BOOL keepprofile, A_BOOL getdbglogs)
+{
+ AR_SOFTC_T *ar = (AR_SOFTC_T *)ar6k_priv(dev);
+
+ /* Stop the transmit queues */
+ netif_stop_queue(dev);
+
+ /* Disable the target and the interrupts associated with it */
+ if (ar->arWmiReady == TRUE)
+ {
+ if (!bypasswmi)
+ {
+ if (ar->arConnected == TRUE || ar->arConnectPending == TRUE)
+ {
+ AR_DEBUG_PRINTF(ATH_DEBUG_INFO,("%s(): Disconnect\n", __func__));
+ if (!keepprofile) {
+ AR6000_SPIN_LOCK(&ar->arLock, 0);
+ ar6000_init_profile_info(ar);
+ AR6000_SPIN_UNLOCK(&ar->arLock, 0);
+ }
+ wmi_disconnect_cmd(ar->arWmi);
+ }
+
+ A_UNTIMEOUT(&ar->disconnect_timer);
+
+ if (getdbglogs) {
+ ar6000_dbglog_get_debug_logs(ar);
+ }
+
+ ar->arWmiReady = FALSE;
+ wmi_shutdown(ar->arWmi);
+ ar->arWmiEnabled = FALSE;
+ ar->arWmi = NULL;
+ /*
+ * After wmi_shudown all WMI events will be dropped.
+ * We need to cleanup the buffers allocated in AP mode
+ * and give disconnect notification to stack, which usually
+ * happens in the disconnect_event.
+ * Simulate the disconnect_event by calling the function directly.
+ * Sometimes disconnect_event will be received when the debug logs
+ * are collected.
+ */
+ if (ar->arConnected == TRUE || ar->arConnectPending == TRUE) {
+ if(ar->arNetworkType & AP_NETWORK) {
+ ar6000_disconnect_event(ar, DISCONNECT_CMD, bcast_mac, 0, NULL, 0);
+ } else {
+ ar6000_disconnect_event(ar, DISCONNECT_CMD, ar->arBssid, 0, NULL, 0);
+ }
+ ar->arConnected = FALSE;
+ ar->arConnectPending = FALSE;
+ }
+#ifdef USER_KEYS
+ ar->user_savedkeys_stat = USER_SAVEDKEYS_STAT_INIT;
+ ar->user_key_ctrl = 0;
+#endif
+ }
+
+ AR_DEBUG_PRINTF(ATH_DEBUG_INFO,("%s(): WMI stopped\n", __func__));
+ }
+ else
+ {
+ AR_DEBUG_PRINTF(ATH_DEBUG_INFO,("%s(): WMI not ready 0x%lx 0x%lx\n",
+ __func__, (unsigned long) ar, (unsigned long) ar->arWmi));
+
+ /* Shut down WMI if we have started it */
+ if(ar->arWmiEnabled == TRUE)
+ {
+ AR_DEBUG_PRINTF(ATH_DEBUG_INFO,("%s(): Shut down WMI\n", __func__));
+ wmi_shutdown(ar->arWmi);
+ ar->arWmiEnabled = FALSE;
+ ar->arWmi = NULL;
+ }
+ }
+
+ if (ar->arHtcTarget != NULL) {
+#ifdef EXPORT_HCI_BRIDGE_INTERFACE
+ if (NULL != ar6kHciTransCallbacks.cleanupTransport) {
+ ar6kHciTransCallbacks.cleanupTransport(NULL);
+ }
+#else
+ // FIXME: workaround to reset BT's UART baud rate to default
+ if (NULL != ar->exitCallback) {
+ AR3K_CONFIG_INFO ar3kconfig;
+ A_STATUS status;
+
+ A_MEMZERO(&ar3kconfig,sizeof(ar3kconfig));
+ ar6000_set_default_ar3kconfig(ar, (void *)&ar3kconfig);
+ status = ar->exitCallback(&ar3kconfig);
+ if (A_OK != status) {
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("Failed to reset AR3K baud rate! \n"));
+ }
+ }
+ // END workaround
+ if (setuphci)
+ ar6000_cleanup_hci(ar);
+#endif
+#ifdef EXPORT_HCI_PAL_INTERFACE
+ if (setuphcipal && (NULL != ar6kHciPalCallbacks_g.cleanupTransport)) {
+ ar6kHciPalCallbacks_g.cleanupTransport(ar);
+ }
+#else
+ /* cleanup hci pal driver data structures */
+ if(setuphcipal)
+ ar6k_cleanup_hci_pal(ar);
+#endif
+ AR_DEBUG_PRINTF(ATH_DEBUG_INFO,(" Shutting down HTC .... \n"));
+ /* stop HTC */
+ HTCStop(ar->arHtcTarget);
+ }
+
+ if (resetok) {
+ /* try to reset the device if we can
+ * The driver may have been configure NOT to reset the target during
+ * a debug session */
+ AR_DEBUG_PRINTF(ATH_DEBUG_INFO,(" Attempting to reset target on instance destroy.... \n"));
+ if (ar->arHifDevice != NULL) {
+ A_BOOL coldReset = (ar->arTargetType == TARGET_TYPE_AR6003) ? TRUE: FALSE;
+ ar6000_reset_device(ar->arHifDevice, ar->arTargetType, TRUE, coldReset);
+ }
+ } else {
+ AR_DEBUG_PRINTF(ATH_DEBUG_INFO,(" Host does not want target reset. \n"));
+ }
+ /* Done with cookies */
+ ar6000_cookie_cleanup(ar);
+}
+/*
+ * We need to differentiate between the surprise and planned removal of the
+ * device because of the following consideration:
+ * - In case of surprise removal, the hcd already frees up the pending
+ * for the device and hence there is no need to unregister the function
+ * driver inorder to get these requests. For planned removal, the function
+ * driver has to explictly unregister itself to have the hcd return all the
+ * pending requests before the data structures for the devices are freed up.
+ * Note that as per the current implementation, the function driver will
+ * end up releasing all the devices since there is no API to selectively
+ * release a particular device.
+ * - Certain commands issued to the target can be skipped for surprise
+ * removal since they will anyway not go through.
+ */
+void
+ar6000_destroy(struct net_device *dev, unsigned int unregister)
+{
+ AR_SOFTC_T *ar;
+
+ AR_DEBUG_PRINTF(ATH_DEBUG_INFO,("+ar6000_destroy \n"));
+
+ if((dev == NULL) || ((ar = ar6k_priv(dev)) == NULL))
+ {
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("%s(): Failed to get device structure.\n", __func__));
+ return;
+ }
+
+ ar->bIsDestroyProgress = TRUE;
+
+ if (down_interruptible(&ar->arSem)) {
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("%s(): down_interruptible failed \n", __func__));
+ return;
+ }
+
+ if (ar->arWlanPowerState != WLAN_POWER_STATE_CUT_PWR) {
+ /* only stop endpoint if we are not stop it in suspend_ev */
+ ar6000_stop_endpoint(dev, FALSE, TRUE);
+ } else {
+ /* clear up the platform power state before rmmod */
+ plat_setup_power(1,0);
+ }
+
+ ar->arWlanState = WLAN_DISABLED;
+ if (ar->arHtcTarget != NULL) {
+ /* destroy HTC */
+ HTCDestroy(ar->arHtcTarget);
+ }
+ if (ar->arHifDevice != NULL) {
+ /*release the device so we do not get called back on remove incase we
+ * we're explicity destroyed by module unload */
+ HIFReleaseDevice(ar->arHifDevice);
+ HIFShutDownDevice(ar->arHifDevice);
+ }
+#ifdef ATH_AR6K_11N_SUPPORT
+ aggr_module_destroy(ar->aggr_cntxt);
+#endif
+
+ /* Done with cookies */
+ ar6000_cookie_cleanup(ar);
+
+ /* cleanup any allocated AMSDU buffers */
+ ar6000_cleanup_amsdu_rxbufs(ar);
+
+ if (bmienable) {
+ ar6000_sysfs_bmi_deinit(ar);
+ }
+
+ /* Cleanup BMI */
+ BMICleanup();
+
+ /* Clear the tx counters */
+ memset(tx_attempt, 0, sizeof(tx_attempt));
+ memset(tx_post, 0, sizeof(tx_post));
+ memset(tx_complete, 0, sizeof(tx_complete));
+
+#ifdef HTC_RAW_INTERFACE
+ if (ar->arRawHtc) {
+ A_FREE(ar->arRawHtc);
+ ar->arRawHtc = NULL;
+ }
+#endif
+ /* Free up the device data structure */
+ if (unregister && is_netdev_registered) {
+ unregister_netdev(dev);
+ is_netdev_registered = 0;
+ }
+ free_netdev(dev);
+
+#ifdef ATH6K_CONFIG_CFG80211
+ ar6k_cfg80211_deinit(ar);
+#endif /* ATH6K_CONFIG_CFG80211 */
+
+#ifdef CONFIG_AP_VIRTUL_ADAPTER_SUPPORT
+ ar6000_remove_ap_interface();
+#endif /*CONFIG_AP_VIRTUAL_ADAPTER_SUPPORT */
+
+ AR_DEBUG_PRINTF(ATH_DEBUG_INFO,("-ar6000_destroy \n"));
+}
+
+static void disconnect_timer_handler(unsigned long ptr)
+{
+ struct net_device *dev = (struct net_device *)ptr;
+ AR_SOFTC_T *ar = (AR_SOFTC_T *)ar6k_priv(dev);
+
+ A_UNTIMEOUT(&ar->disconnect_timer);
+
+ ar6000_init_profile_info(ar);
+ wmi_disconnect_cmd(ar->arWmi);
+}
+
+static void ar6000_detect_error(unsigned long ptr)
+{
+ struct net_device *dev = (struct net_device *)ptr;
+ AR_SOFTC_T *ar = (AR_SOFTC_T *)ar6k_priv(dev);
+ WMI_TARGET_ERROR_REPORT_EVENT errEvent;
+
+ AR6000_SPIN_LOCK(&ar->arLock, 0);
+
+ if (ar->arHBChallengeResp.outstanding) {
+ ar->arHBChallengeResp.missCnt++;
+ } else {
+ ar->arHBChallengeResp.missCnt = 0;
+ }
+
+ if (ar->arHBChallengeResp.missCnt > ar->arHBChallengeResp.missThres) {
+ /* Send Error Detect event to the application layer and do not reschedule the error detection module timer */
+ ar->arHBChallengeResp.missCnt = 0;
+ ar->arHBChallengeResp.seqNum = 0;
+ errEvent.errorVal = WMI_TARGET_COM_ERR | WMI_TARGET_FATAL_ERR;
+ AR6000_SPIN_UNLOCK(&ar->arLock, 0);
+ ar6000_send_event_to_app(ar, WMI_ERROR_REPORT_EVENTID,
+ (A_UINT8 *)&errEvent,
+ sizeof(WMI_TARGET_ERROR_REPORT_EVENT));
+ return;
+ }
+
+ /* Generate the sequence number for the next challenge */
+ ar->arHBChallengeResp.seqNum++;
+ ar->arHBChallengeResp.outstanding = TRUE;
+
+ AR6000_SPIN_UNLOCK(&ar->arLock, 0);
+
+ /* Send the challenge on the control channel */
+ if (wmi_get_challenge_resp_cmd(ar->arWmi, ar->arHBChallengeResp.seqNum, DRV_HB_CHALLENGE) != A_OK) {
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("Unable to send heart beat challenge\n"));
+ }
+
+
+ /* Reschedule the timer for the next challenge */
+ A_TIMEOUT_MS(&ar->arHBChallengeResp.timer, ar->arHBChallengeResp.frequency * 1000, 0);
+}
+
+void ar6000_init_profile_info(AR_SOFTC_T *ar)
+{
+ ar->arSsidLen = 0;
+ A_MEMZERO(ar->arSsid, sizeof(ar->arSsid));
+
+ switch(fwmode) {
+ case HI_OPTION_FW_MODE_IBSS:
+ ar->arNetworkType = ar->arNextMode = ADHOC_NETWORK;
+ break;
+ case HI_OPTION_FW_MODE_BSS_STA:
+ ar->arNetworkType = ar->arNextMode = INFRA_NETWORK;
+ break;
+ case HI_OPTION_FW_MODE_AP:
+ ar->arNetworkType = ar->arNextMode = AP_NETWORK;
+ break;
+ }
+
+ ar->arDot11AuthMode = OPEN_AUTH;
+ ar->arAuthMode = NONE_AUTH;
+ ar->arPairwiseCrypto = NONE_CRYPT;
+ ar->arPairwiseCryptoLen = 0;
+ ar->arGroupCrypto = NONE_CRYPT;
+ ar->arGroupCryptoLen = 0;
+ A_MEMZERO(ar->arWepKeyList, sizeof(ar->arWepKeyList));
+ A_MEMZERO(ar->arReqBssid, sizeof(ar->arReqBssid));
+ A_MEMZERO(ar->arBssid, sizeof(ar->arBssid));
+ ar->arBssChannel = 0;
+ ar->arConnected = FALSE;
+}
+
+static void
+ar6000_init_control_info(AR_SOFTC_T *ar)
+{
+ ar->arWmiEnabled = FALSE;
+ ar6000_init_profile_info(ar);
+ ar->arDefTxKeyIndex = 0;
+ A_MEMZERO(ar->arWepKeyList, sizeof(ar->arWepKeyList));
+ ar->arChannelHint = 0;
+ ar->arListenIntervalT = A_DEFAULT_LISTEN_INTERVAL;
+ ar->arListenIntervalB = 0;
+ ar->arVersion.host_ver = AR6K_SW_VERSION;
+ ar->arRssi = 0;
+ ar->arTxPwr = 0;
+ ar->arTxPwrSet = FALSE;
+ ar->arSkipScan = 0;
+ ar->arBeaconInterval = 0;
+ ar->arBitRate = 0;
+ ar->arMaxRetries = 0;
+ ar->arWmmEnabled = TRUE;
+ ar->intra_bss = 1;
+ ar->scan_triggered = 0;
+ A_MEMZERO(&ar->scParams, sizeof(ar->scParams));
+ ar->scParams.shortScanRatio = WMI_SHORTSCANRATIO_DEFAULT;
+ ar->scParams.scanCtrlFlags = DEFAULT_SCAN_CTRL_FLAGS;
+
+ /* Initialize the AP mode state info */
+ {
+ A_UINT8 ctr;
+ A_MEMZERO((A_UINT8 *)ar->sta_list, AP_MAX_NUM_STA * sizeof(sta_t));
+
+ /* init the Mutexes */
+ A_MUTEX_INIT(&ar->mcastpsqLock);
+
+ /* Init the PS queues */
+ for (ctr=0; ctr < AP_MAX_NUM_STA ; ctr++) {
+ A_MUTEX_INIT(&ar->sta_list[ctr].psqLock);
+ A_NETBUF_QUEUE_INIT(&ar->sta_list[ctr].psq);
+ }
+
+ ar->ap_profile_flag = 0;
+ A_NETBUF_QUEUE_INIT(&ar->mcastpsq);
+
+ A_MEMCPY(ar->ap_country_code, DEF_AP_COUNTRY_CODE, 3);
+ ar->ap_wmode = DEF_AP_WMODE_G;
+ ar->ap_dtim_period = DEF_AP_DTIM;
+ ar->ap_beacon_interval = DEF_BEACON_INTERVAL;
+ }
+}
+
+static int
+ar6000_open(struct net_device *dev)
+{
+ unsigned long flags;
+ AR_SOFTC_T *ar = (AR_SOFTC_T *)ar6k_priv(dev);
+
+ spin_lock_irqsave(&ar->arLock, flags);
+
+#ifdef ATH6K_CONFIG_CFG80211
+ if(ar->arWlanState == WLAN_DISABLED) {
+ ar->arWlanState = WLAN_ENABLED;
+ }
+#endif /* ATH6K_CONFIG_CFG80211 */
+
+ if( ar->arConnected || bypasswmi) {
+ netif_carrier_on(dev);
+ /* Wake up the queues */
+ netif_wake_queue(dev);
+ }
+ else
+ netif_carrier_off(dev);
+
+ spin_unlock_irqrestore(&ar->arLock, flags);
+ return 0;
+}
+
+static int
+ar6000_close(struct net_device *dev)
+{
+#ifdef ATH6K_CONFIG_CFG80211
+ AR_SOFTC_T *ar = (AR_SOFTC_T *)ar6k_priv(dev);
+#endif /* ATH6K_CONFIG_CFG80211 */
+ netif_stop_queue(dev);
+
+#ifdef ATH6K_CONFIG_CFG80211
+ AR6000_SPIN_LOCK(&ar->arLock, 0);
+ if (ar->arConnected == TRUE || ar->arConnectPending == TRUE) {
+ AR6000_SPIN_UNLOCK(&ar->arLock, 0);
+ wmi_disconnect_cmd(ar->arWmi);
+ } else {
+ AR6000_SPIN_UNLOCK(&ar->arLock, 0);
+ }
+
+ if(ar->arWmiReady == TRUE) {
+ if (wmi_scanparams_cmd(ar->arWmi, 0xFFFF, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0) != A_OK) {
+ return -EIO;
+ }
+ ar->arWlanState = WLAN_DISABLED;
+ }
+#endif /* ATH6K_CONFIG_CFG80211 */
+
+ return 0;
+}
+
+/* connect to a service */
+static A_STATUS ar6000_connectservice(AR_SOFTC_T *ar,
+ HTC_SERVICE_CONNECT_REQ *pConnect,
+ char *pDesc)
+{
+ A_STATUS status;
+ HTC_SERVICE_CONNECT_RESP response;
+
+ do {
+
+ A_MEMZERO(&response,sizeof(response));
+
+ status = HTCConnectService(ar->arHtcTarget,
+ pConnect,
+ &response);
+
+ if (A_FAILED(status)) {
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERR,(" Failed to connect to %s service status:%d \n",
+ pDesc, status));
+ break;
+ }
+ switch (pConnect->ServiceID) {
+ case WMI_CONTROL_SVC :
+ if (ar->arWmiEnabled) {
+ /* set control endpoint for WMI use */
+ wmi_set_control_ep(ar->arWmi, response.Endpoint);
+ }
+ /* save EP for fast lookup */
+ ar->arControlEp = response.Endpoint;
+ break;
+ case WMI_DATA_BE_SVC :
+ arSetAc2EndpointIDMap(ar, WMM_AC_BE, response.Endpoint);
+ break;
+ case WMI_DATA_BK_SVC :
+ arSetAc2EndpointIDMap(ar, WMM_AC_BK, response.Endpoint);
+ break;
+ case WMI_DATA_VI_SVC :
+ arSetAc2EndpointIDMap(ar, WMM_AC_VI, response.Endpoint);
+ break;
+ case WMI_DATA_VO_SVC :
+ arSetAc2EndpointIDMap(ar, WMM_AC_VO, response.Endpoint);
+ break;
+ default:
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("ServiceID not mapped %d\n", pConnect->ServiceID));
+ status = A_EINVAL;
+ break;
+ }
+
+ } while (FALSE);
+
+ return status;
+}
+
+void ar6000_TxDataCleanup(AR_SOFTC_T *ar)
+{
+ /* flush all the data (non-control) streams
+ * we only flush packets that are tagged as data, we leave any control packets that
+ * were in the TX queues alone */
+ HTCFlushEndpoint(ar->arHtcTarget,
+ arAc2EndpointID(ar, WMM_AC_BE),
+ AR6K_DATA_PKT_TAG);
+ HTCFlushEndpoint(ar->arHtcTarget,
+ arAc2EndpointID(ar, WMM_AC_BK),
+ AR6K_DATA_PKT_TAG);
+ HTCFlushEndpoint(ar->arHtcTarget,
+ arAc2EndpointID(ar, WMM_AC_VI),
+ AR6K_DATA_PKT_TAG);
+ HTCFlushEndpoint(ar->arHtcTarget,
+ arAc2EndpointID(ar, WMM_AC_VO),
+ AR6K_DATA_PKT_TAG);
+}
+
+HTC_ENDPOINT_ID
+ar6000_ac2_endpoint_id ( void * devt, A_UINT8 ac)
+{
+ AR_SOFTC_T *ar = (AR_SOFTC_T *) devt;
+ return(arAc2EndpointID(ar, ac));
+}
+
+A_UINT8
+ar6000_endpoint_id2_ac(void * devt, HTC_ENDPOINT_ID ep )
+{
+ AR_SOFTC_T *ar = (AR_SOFTC_T *) devt;
+ return(arEndpoint2Ac(ar, ep ));
+}
+
+/* This function does one time initialization for the lifetime of the device */
+int ar6000_init(struct net_device *dev)
+{
+ AR_SOFTC_T *ar;
+ A_STATUS status;
+ A_INT32 timeleft;
+ A_INT16 i;
+ int ret = 0;
+#if defined(INIT_MODE_DRV_ENABLED) && defined(ENABLE_COEXISTENCE)
+ WMI_SET_BTCOEX_COLOCATED_BT_DEV_CMD sbcb_cmd;
+ WMI_SET_BTCOEX_FE_ANT_CMD sbfa_cmd;
+#endif /* INIT_MODE_DRV_ENABLED && ENABLE_COEXISTENCE */
+
+ if((ar = ar6k_priv(dev)) == NULL)
+ {
+ return -EIO;
+ }
+
+ if (wlaninitmode == WLAN_INIT_MODE_USR || wlaninitmode == WLAN_INIT_MODE_DRV) {
+
+ ar6000_update_bdaddr(ar);
+
+ if (enablerssicompensation) {
+ ar6000_copy_cust_data_from_target(ar->arHifDevice, ar->arTargetType);
+ read_rssi_compensation_param(ar);
+ for (i=-95; i<=0; i++) {
+ rssi_compensation_table[0-i] = rssi_compensation_calc(ar,i);
+ }
+ }
+ }
+
+ dev_hold(dev);
+ rtnl_unlock();
+
+ /* Do we need to finish the BMI phase */
+ if ((wlaninitmode == WLAN_INIT_MODE_USR || wlaninitmode == WLAN_INIT_MODE_DRV) &&
+ (BMIDone(ar->arHifDevice) != A_OK))
+ {
+ ret = -EIO;
+ goto ar6000_init_done;
+ }
+
+ if (!bypasswmi)
+ {
+#if 0 /* TBDXXX */
+ if (ar->arVersion.host_ver != ar->arVersion.target_ver) {
+ A_PRINTF("WARNING: Host version 0x%x does not match Target "
+ " version 0x%x!\n",
+ ar->arVersion.host_ver, ar->arVersion.target_ver);
+ }
+#endif
+
+ /* Indicate that WMI is enabled (although not ready yet) */
+ ar->arWmiEnabled = TRUE;
+ if ((ar->arWmi = wmi_init((void *) ar)) == NULL)
+ {
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("%s() Failed to initialize WMI.\n", __func__));
+ ret = -EIO;
+ goto ar6000_init_done;
+ }
+
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("%s() Got WMI @ 0x%lx.\n", __func__,
+ (unsigned long) ar->arWmi));
+ }
+
+ do {
+ HTC_SERVICE_CONNECT_REQ connect;
+
+ /* the reason we have to wait for the target here is that the driver layer
+ * has to init BMI in order to set the host block size,
+ */
+ status = HTCWaitTarget(ar->arHtcTarget);
+
+ if (A_FAILED(status)) {
+ break;
+ }
+
+ A_MEMZERO(&connect,sizeof(connect));
+ /* meta data is unused for now */
+ connect.pMetaData = NULL;
+ connect.MetaDataLength = 0;
+ /* these fields are the same for all service endpoints */
+ connect.EpCallbacks.pContext = ar;
+ connect.EpCallbacks.EpTxCompleteMultiple = ar6000_tx_complete;
+ connect.EpCallbacks.EpRecv = ar6000_rx;
+ connect.EpCallbacks.EpRecvRefill = ar6000_rx_refill;
+ connect.EpCallbacks.EpSendFull = ar6000_tx_queue_full;
+ /* set the max queue depth so that our ar6000_tx_queue_full handler gets called.
+ * Linux has the peculiarity of not providing flow control between the
+ * NIC and the network stack. There is no API to indicate that a TX packet
+ * was sent which could provide some back pressure to the network stack.
+ * Under linux you would have to wait till the network stack consumed all sk_buffs
+ * before any back-flow kicked in. Which isn't very friendly.
+ * So we have to manage this ourselves */
+ connect.MaxSendQueueDepth = MAX_DEFAULT_SEND_QUEUE_DEPTH;
+ connect.EpCallbacks.RecvRefillWaterMark = AR6000_MAX_RX_BUFFERS / 4; /* set to 25 % */
+ if (0 == connect.EpCallbacks.RecvRefillWaterMark) {
+ connect.EpCallbacks.RecvRefillWaterMark++;
+ }
+ /* connect to control service */
+ connect.ServiceID = WMI_CONTROL_SVC;
+ status = ar6000_connectservice(ar,
+ &connect,
+ "WMI CONTROL");
+ if (A_FAILED(status)) {
+ break;
+ }
+
+ connect.LocalConnectionFlags |= HTC_LOCAL_CONN_FLAGS_ENABLE_SEND_BUNDLE_PADDING;
+ /* limit the HTC message size on the send path, although we can receive A-MSDU frames of
+ * 4K, we will only send ethernet-sized (802.3) frames on the send path. */
+ connect.MaxSendMsgSize = WMI_MAX_TX_DATA_FRAME_LENGTH;
+
+ /* to reduce the amount of committed memory for larger A_MSDU frames, use the recv-alloc threshold
+ * mechanism for larger packets */
+ connect.EpCallbacks.RecvAllocThreshold = AR6000_BUFFER_SIZE;
+ connect.EpCallbacks.EpRecvAllocThresh = ar6000_alloc_amsdu_rxbuf;
+
+ /* for the remaining data services set the connection flag to reduce dribbling,
+ * if configured to do so */
+ if (reduce_credit_dribble) {
+ connect.ConnectionFlags |= HTC_CONNECT_FLAGS_REDUCE_CREDIT_DRIBBLE;
+ /* the credit dribble trigger threshold is (reduce_credit_dribble - 1) for a value
+ * of 0-3 */
+ connect.ConnectionFlags &= ~HTC_CONNECT_FLAGS_THRESHOLD_LEVEL_MASK;
+ connect.ConnectionFlags |=
+ ((A_UINT16)reduce_credit_dribble - 1) & HTC_CONNECT_FLAGS_THRESHOLD_LEVEL_MASK;
+ }
+ /* connect to best-effort service */
+ connect.ServiceID = WMI_DATA_BE_SVC;
+
+ status = ar6000_connectservice(ar,
+ &connect,
+ "WMI DATA BE");
+ if (A_FAILED(status)) {
+ break;
+ }
+
+ /* connect to back-ground
+ * map this to WMI LOW_PRI */
+ connect.ServiceID = WMI_DATA_BK_SVC;
+ status = ar6000_connectservice(ar,
+ &connect,
+ "WMI DATA BK");
+ if (A_FAILED(status)) {
+ break;
+ }
+
+ /* connect to Video service, map this to
+ * to HI PRI */
+ connect.ServiceID = WMI_DATA_VI_SVC;
+ status = ar6000_connectservice(ar,
+ &connect,
+ "WMI DATA VI");
+ if (A_FAILED(status)) {
+ break;
+ }
+
+ /* connect to VO service, this is currently not
+ * mapped to a WMI priority stream due to historical reasons.
+ * WMI originally defined 3 priorities over 3 mailboxes
+ * We can change this when WMI is reworked so that priorities are not
+ * dependent on mailboxes */
+ connect.ServiceID = WMI_DATA_VO_SVC;
+ status = ar6000_connectservice(ar,
+ &connect,
+ "WMI DATA VO");
+ if (A_FAILED(status)) {
+ break;
+ }
+
+ A_ASSERT(arAc2EndpointID(ar,WMM_AC_BE) != 0);
+ A_ASSERT(arAc2EndpointID(ar,WMM_AC_BK) != 0);
+ A_ASSERT(arAc2EndpointID(ar,WMM_AC_VI) != 0);
+ A_ASSERT(arAc2EndpointID(ar,WMM_AC_VO) != 0);
+
+ /* setup access class priority mappings */
+ ar->arAcStreamPriMap[WMM_AC_BK] = 0; /* lowest */
+ ar->arAcStreamPriMap[WMM_AC_BE] = 1; /* */
+ ar->arAcStreamPriMap[WMM_AC_VI] = 2; /* */
+ ar->arAcStreamPriMap[WMM_AC_VO] = 3; /* highest */
+
+#ifdef EXPORT_HCI_BRIDGE_INTERFACE
+ if (setuphci && (NULL != ar6kHciTransCallbacks.setupTransport)) {
+ HCI_TRANSPORT_MISC_HANDLES hciHandles;
+
+ hciHandles.netDevice = ar->arNetDev;
+ hciHandles.hifDevice = ar->arHifDevice;
+ hciHandles.htcHandle = ar->arHtcTarget;
+ status = (A_STATUS)(ar6kHciTransCallbacks.setupTransport(&hciHandles));
+ }
+#else
+ if (setuphci) {
+ /* setup HCI */
+ status = ar6000_setup_hci(ar);
+ }
+#endif
+#ifdef EXPORT_HCI_PAL_INTERFACE
+ if (setuphcipal && (NULL != ar6kHciPalCallbacks_g.setupTransport))
+ status = ar6kHciPalCallbacks_g.setupTransport(ar);
+#else
+ if(setuphcipal)
+ status = ar6k_setup_hci_pal(ar);
+#endif
+
+ } while (FALSE);
+
+ if (A_FAILED(status)) {
+ ret = -EIO;
+ goto ar6000_init_done;
+ }
+
+ /*
+ * give our connected endpoints some buffers
+ */
+
+ ar6000_rx_refill(ar, ar->arControlEp);
+ ar6000_rx_refill(ar, arAc2EndpointID(ar,WMM_AC_BE));
+
+ /*
+ * We will post the receive buffers only for SPE or endpoint ping testing so we are
+ * making it conditional on the 'bypasswmi' flag.
+ */
+ if (bypasswmi) {
+ ar6000_rx_refill(ar,arAc2EndpointID(ar,WMM_AC_BK));
+ ar6000_rx_refill(ar,arAc2EndpointID(ar,WMM_AC_VI));
+ ar6000_rx_refill(ar,arAc2EndpointID(ar,WMM_AC_VO));
+ }
+
+ /* allocate some buffers that handle larger AMSDU frames */
+ ar6000_refill_amsdu_rxbufs(ar,AR6000_MAX_AMSDU_RX_BUFFERS);
+
+ /* setup credit distribution */
+ ar6000_setup_credit_dist(ar->arHtcTarget, &ar->arCreditStateInfo);
+
+ /* Since cookies are used for HTC transports, they should be */
+ /* initialized prior to enabling HTC. */
+ ar6000_cookie_init(ar);
+
+ /* start HTC */
+ status = HTCStart(ar->arHtcTarget);
+
+ if (status != A_OK) {
+ if (ar->arWmiEnabled == TRUE) {
+ wmi_shutdown(ar->arWmi);
+ ar->arWmiEnabled = FALSE;
+ ar->arWmi = NULL;
+ }
+ ar6000_cookie_cleanup(ar);
+ ret = -EIO;
+ goto ar6000_init_done;
+ }
+
+ if (!bypasswmi) {
+ /* Wait for Wmi event to be ready */
+ timeleft = wait_event_interruptible_timeout(arEvent,
+ (ar->arWmiReady == TRUE), wmitimeout * HZ);
+
+ if (ar->arVersion.abi_ver != AR6K_ABI_VERSION) {
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("ABI Version mismatch: Host(0x%x), Target(0x%x)\n", AR6K_ABI_VERSION, ar->arVersion.abi_ver));
+#ifndef ATH6K_SKIP_ABI_VERSION_CHECK
+ ret = -EIO;
+ goto ar6000_init_done;
+#endif /* ATH6K_SKIP_ABI_VERSION_CHECK */
+ }
+
+ if(!timeleft || signal_pending(current))
+ {
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("WMI is not ready or wait was interrupted\n"));
+ ret = -EIO;
+ goto ar6000_init_done;
+ }
+
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("%s() WMI is ready\n", __func__));
+
+ /* Communicate the wmi protocol verision to the target */
+ if ((ar6000_set_host_app_area(ar)) != A_OK) {
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("Unable to set the host app area\n"));
+ }
+
+ /* configure the device for rx dot11 header rules 0,0 are the default values
+ * therefore this command can be skipped if the inputs are 0,FALSE,FALSE.Required
+ if checksum offload is needed. Set RxMetaVersion to 2*/
+ if ((wmi_set_rx_frame_format_cmd(ar->arWmi,ar->rxMetaVersion, processDot11Hdr, processDot11Hdr)) != A_OK) {
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("Unable to set the rx frame format.\n"));
+ }
+
+#if defined(INIT_MODE_DRV_ENABLED) && defined(ENABLE_COEXISTENCE)
+ /* Configure the type of BT collocated with WLAN */
+ A_MEMZERO(&sbcb_cmd, sizeof(WMI_SET_BTCOEX_COLOCATED_BT_DEV_CMD));
+#ifdef CONFIG_AR600x_BT_QCOM
+ sbcb_cmd.btcoexCoLocatedBTdev = 1;
+#elif defined(CONFIG_AR600x_BT_CSR)
+ sbcb_cmd.btcoexCoLocatedBTdev = 2;
+#elif defined(CONFIG_AR600x_BT_AR3001)
+ sbcb_cmd.btcoexCoLocatedBTdev = 3;
+#else
+#error Unsupported Bluetooth Type
+#endif /* Collocated Bluetooth Type */
+
+ if ((wmi_set_btcoex_colocated_bt_dev_cmd(ar->arWmi, &sbcb_cmd)) != A_OK)
+ {
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("Unable to set collocated BT type\n"));
+ }
+
+ /* Configure the type of BT collocated with WLAN */
+ A_MEMZERO(&sbfa_cmd, sizeof(WMI_SET_BTCOEX_FE_ANT_CMD));
+#ifdef CONFIG_AR600x_DUAL_ANTENNA
+ sbfa_cmd.btcoexFeAntType = 2;
+#elif defined(CONFIG_AR600x_SINGLE_ANTENNA)
+ sbfa_cmd.btcoexFeAntType = 1;
+#else
+#error Unsupported Front-End Antenna Configuration
+#endif /* AR600x Front-End Antenna Configuration */
+
+ if ((wmi_set_btcoex_fe_ant_cmd(ar->arWmi, &sbfa_cmd)) != A_OK) {
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("Unable to set fornt end antenna configuration\n"));
+ }
+#endif /* INIT_MODE_DRV_ENABLED && ENABLE_COEXISTENCE */
+ }
+
+ ar->arNumDataEndPts = 1;
+
+ if (bypasswmi) {
+ /* for tests like endpoint ping, the MAC address needs to be non-zero otherwise
+ * the data path through a raw socket is disabled */
+ dev->dev_addr[0] = 0x00;
+ dev->dev_addr[1] = 0x01;
+ dev->dev_addr[2] = 0x02;
+ dev->dev_addr[3] = 0xAA;
+ dev->dev_addr[4] = 0xBB;
+ dev->dev_addr[5] = 0xCC;
+ }
+
+ar6000_init_done:
+ rtnl_lock();
+ dev_put(dev);
+
+ return ret;
+}
+
+
+void
+ar6000_bitrate_rx(void *devt, A_INT32 rateKbps)
+{
+ AR_SOFTC_T *ar = (AR_SOFTC_T *)devt;
+
+ ar->arBitRate = rateKbps;
+ wake_up(&arEvent);
+}
+
+void
+ar6000_ratemask_rx(void *devt, A_UINT32 ratemask)
+{
+ AR_SOFTC_T *ar = (AR_SOFTC_T *)devt;
+
+ ar->arRateMask = ratemask;
+ wake_up(&arEvent);
+}
+
+void
+ar6000_txPwr_rx(void *devt, A_UINT8 txPwr)
+{
+ AR_SOFTC_T *ar = (AR_SOFTC_T *)devt;
+
+ ar->arTxPwr = txPwr;
+ wake_up(&arEvent);
+}
+
+
+void
+ar6000_channelList_rx(void *devt, A_INT8 numChan, A_UINT16 *chanList)
+{
+ AR_SOFTC_T *ar = (AR_SOFTC_T *)devt;
+
+ A_MEMCPY(ar->arChannelList, chanList, numChan * sizeof (A_UINT16));
+ ar->arNumChannels = numChan;
+
+ wake_up(&arEvent);
+}
+
+A_UINT8
+ar6000_ibss_map_epid(struct sk_buff *skb, struct net_device *dev, A_UINT32 * mapNo)
+{
+ AR_SOFTC_T *ar = (AR_SOFTC_T *)ar6k_priv(dev);
+ A_UINT8 *datap;
+ ATH_MAC_HDR *macHdr;
+ A_UINT32 i, eptMap;
+
+ (*mapNo) = 0;
+ datap = A_NETBUF_DATA(skb);
+ macHdr = (ATH_MAC_HDR *)(datap + sizeof(WMI_DATA_HDR));
+ if (IEEE80211_IS_MULTICAST(macHdr->dstMac)) {
+ return ENDPOINT_2;
+ }
+
+ eptMap = -1;
+ for (i = 0; i < ar->arNodeNum; i ++) {
+ if (IEEE80211_ADDR_EQ(macHdr->dstMac, ar->arNodeMap[i].macAddress)) {
+ (*mapNo) = i + 1;
+ ar->arNodeMap[i].txPending ++;
+ return ar->arNodeMap[i].epId;
+ }
+
+ if ((eptMap == -1) && !ar->arNodeMap[i].txPending) {
+ eptMap = i;
+ }
+ }
+
+ if (eptMap == -1) {
+ eptMap = ar->arNodeNum;
+ ar->arNodeNum ++;
+ A_ASSERT(ar->arNodeNum <= MAX_NODE_NUM);
+ }
+
+ A_MEMCPY(ar->arNodeMap[eptMap].macAddress, macHdr->dstMac, IEEE80211_ADDR_LEN);
+
+ for (i = ENDPOINT_2; i <= ENDPOINT_5; i ++) {
+ if (!ar->arTxPending[i]) {
+ ar->arNodeMap[eptMap].epId = i;
+ break;
+ }
+ // No free endpoint is available, start redistribution on the inuse endpoints.
+ if (i == ENDPOINT_5) {
+ ar->arNodeMap[eptMap].epId = ar->arNexEpId;
+ ar->arNexEpId ++;
+ if (ar->arNexEpId > ENDPOINT_5) {
+ ar->arNexEpId = ENDPOINT_2;
+ }
+ }
+ }
+
+ (*mapNo) = eptMap + 1;
+ ar->arNodeMap[eptMap].txPending ++;
+
+ return ar->arNodeMap[eptMap].epId;
+}
+
+#ifdef DEBUG
+static void ar6000_dump_skb(struct sk_buff *skb)
+{
+ u_char *ch;
+ for (ch = A_NETBUF_DATA(skb);
+ (unsigned long)ch < ((unsigned long)A_NETBUF_DATA(skb) +
+ A_NETBUF_LEN(skb)); ch++)
+ {
+ AR_DEBUG_PRINTF(ATH_DEBUG_WARN,("%2.2x ", *ch));
+ }
+ AR_DEBUG_PRINTF(ATH_DEBUG_WARN,("\n"));
+}
+#endif
+
+#ifdef HTC_TEST_SEND_PKTS
+static void DoHTCSendPktsTest(AR_SOFTC_T *ar, int MapNo, HTC_ENDPOINT_ID eid, struct sk_buff *skb);
+#endif
+
+static int
+ar6000_data_tx(struct sk_buff *skb, struct net_device *dev)
+{
+#define AC_NOT_MAPPED 99
+ AR_SOFTC_T *ar = (AR_SOFTC_T *)ar6k_priv(dev);
+ A_UINT8 ac = AC_NOT_MAPPED;
+ HTC_ENDPOINT_ID eid = ENDPOINT_UNUSED;
+ A_UINT32 mapNo = 0;
+ int len;
+ struct ar_cookie *cookie;
+ A_BOOL checkAdHocPsMapping = FALSE,bMoreData = FALSE;
+ HTC_TX_TAG htc_tag = AR6K_DATA_PKT_TAG;
+ A_UINT8 dot11Hdr = processDot11Hdr;
+#ifdef CONFIG_PM
+ if (ar->arWowState != WLAN_WOW_STATE_NONE) {
+ A_NETBUF_FREE(skb);
+ return 0;
+ }
+#endif /* CONFIG_PM */
+
+ AR_DEBUG_PRINTF(ATH_DEBUG_WLAN_TX,("ar6000_data_tx start - skb=0x%lx, data=0x%lx, len=0x%x\n",
+ (unsigned long)skb, (unsigned long)A_NETBUF_DATA(skb),
+ A_NETBUF_LEN(skb)));
+
+ /* If target is not associated */
+ if( (!ar->arConnected && !bypasswmi)
+#ifdef CONFIG_HOST_TCMD_SUPPORT
+ /* TCMD doesnt support any data, free the buf and return */
+ || (ar->arTargetMode == AR6000_TCMD_MODE)
+#endif
+ ) {
+ A_NETBUF_FREE(skb);
+ return 0;
+ }
+
+ do {
+
+ if (ar->arWmiReady == FALSE && bypasswmi == 0) {
+ break;
+ }
+
+#ifdef BLOCK_TX_PATH_FLAG
+ if (blocktx) {
+ break;
+ }
+#endif /* BLOCK_TX_PATH_FLAG */
+
+ /* AP mode Power save processing */
+ /* If the dst STA is in sleep state, queue the pkt in its PS queue */
+
+ if (ar->arNetworkType == AP_NETWORK) {
+ ATH_MAC_HDR *datap = (ATH_MAC_HDR *)A_NETBUF_DATA(skb);
+ sta_t *conn = NULL;
+
+ /* If the dstMac is a Multicast address & atleast one of the
+ * associated STA is in PS mode, then queue the pkt to the
+ * mcastq
+ */
+ if (IEEE80211_IS_MULTICAST(datap->dstMac)) {
+ A_UINT8 ctr=0;
+ A_BOOL qMcast=FALSE;
+
+
+ for (ctr=0; ctr<AP_MAX_NUM_STA; ctr++) {
+ if (STA_IS_PWR_SLEEP((&ar->sta_list[ctr]))) {
+ qMcast = TRUE;
+ }
+ }
+ if(qMcast) {
+
+ /* If this transmit is not because of a Dtim Expiry q it */
+ if (ar->DTIMExpired == FALSE) {
+ A_BOOL isMcastqEmpty = FALSE;
+
+ A_MUTEX_LOCK(&ar->mcastpsqLock);
+ isMcastqEmpty = A_NETBUF_QUEUE_EMPTY(&ar->mcastpsq);
+ A_NETBUF_ENQUEUE(&ar->mcastpsq, skb);
+ A_MUTEX_UNLOCK(&ar->mcastpsqLock);
+
+ /* If this is the first Mcast pkt getting queued
+ * indicate to the target to set the BitmapControl LSB
+ * of the TIM IE.
+ */
+ if (isMcastqEmpty) {
+ wmi_set_pvb_cmd(ar->arWmi, MCAST_AID, 1);
+ }
+ return 0;
+ } else {
+ /* This transmit is because of Dtim expiry. Determine if
+ * MoreData bit has to be set.
+ */
+ A_MUTEX_LOCK(&ar->mcastpsqLock);
+ if(!A_NETBUF_QUEUE_EMPTY(&ar->mcastpsq)) {
+ bMoreData = TRUE;
+ }
+ A_MUTEX_UNLOCK(&ar->mcastpsqLock);
+ }
+ }
+ } else {
+ conn = ieee80211_find_conn(ar, datap->dstMac);
+ if (conn) {
+ if (STA_IS_PWR_SLEEP(conn)) {
+ /* If this transmit is not because of a PsPoll q it*/
+ if (!STA_IS_PS_POLLED(conn)) {
+ A_BOOL isPsqEmpty = FALSE;
+ /* Queue the frames if the STA is sleeping */
+ A_MUTEX_LOCK(&conn->psqLock);
+ isPsqEmpty = A_NETBUF_QUEUE_EMPTY(&conn->psq);
+ A_NETBUF_ENQUEUE(&conn->psq, skb);
+ A_MUTEX_UNLOCK(&conn->psqLock);
+
+ /* If this is the first pkt getting queued
+ * for this STA, update the PVB for this STA
+ */
+ if (isPsqEmpty) {
+ wmi_set_pvb_cmd(ar->arWmi, conn->aid, 1);
+ }
+
+ return 0;
+ } else {
+ /* This tx is because of a PsPoll. Determine if
+ * MoreData bit has to be set
+ */
+ A_MUTEX_LOCK(&conn->psqLock);
+ if (!A_NETBUF_QUEUE_EMPTY(&conn->psq)) {
+ bMoreData = TRUE;
+ }
+ A_MUTEX_UNLOCK(&conn->psqLock);
+ }
+ }
+ } else {
+
+ /* non existent STA. drop the frame */
+ A_NETBUF_FREE(skb);
+ return 0;
+ }
+ }
+ }
+
+ if (ar->arWmiEnabled) {
+#ifdef CONFIG_CHECKSUM_OFFLOAD
+ A_UINT8 csumStart=0;
+ A_UINT8 csumDest=0;
+ A_UINT8 csum=skb->ip_summed;
+ if(csumOffload && (csum==CHECKSUM_PARTIAL)){
+ csumStart = (skb->head + skb->csum_start - skb_network_header(skb) +
+ sizeof(ATH_LLC_SNAP_HDR));
+ csumDest=skb->csum_offset+csumStart;
+ }
+#endif
+ if (A_NETBUF_HEADROOM(skb) < dev->hard_header_len - LINUX_HACK_FUDGE_FACTOR) {
+ struct sk_buff *newbuf;
+
+ /*
+ * We really should have gotten enough headroom but sometimes
+ * we still get packets with not enough headroom. Copy the packet.
+ */
+ len = A_NETBUF_LEN(skb);
+ newbuf = A_NETBUF_ALLOC(len);
+ if (newbuf == NULL) {
+ break;
+ }
+ A_NETBUF_PUT(newbuf, len);
+ A_MEMCPY(A_NETBUF_DATA(newbuf), A_NETBUF_DATA(skb), len);
+ A_NETBUF_FREE(skb);
+ skb = newbuf;
+ /* fall through and assemble header */
+ }
+
+ if (dot11Hdr) {
+ if (wmi_dot11_hdr_add(ar->arWmi,skb,ar->arNetworkType) != A_OK) {
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("ar6000_data_tx-wmi_dot11_hdr_add failed\n"));
+ break;
+ }
+ } else {
+ if (wmi_dix_2_dot3(ar->arWmi, skb) != A_OK) {
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("ar6000_data_tx - wmi_dix_2_dot3 failed\n"));
+ break;
+ }
+ }
+#ifdef CONFIG_CHECKSUM_OFFLOAD
+ if(csumOffload && (csum ==CHECKSUM_PARTIAL)){
+ WMI_TX_META_V2 metaV2;
+ metaV2.csumStart =csumStart;
+ metaV2.csumDest = csumDest;
+ metaV2.csumFlags = 0x1;/*instruct target to calculate checksum*/
+ if (wmi_data_hdr_add(ar->arWmi, skb, DATA_MSGTYPE, bMoreData, dot11Hdr,
+ WMI_META_VERSION_2,&metaV2) != A_OK) {
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("ar6000_data_tx - wmi_data_hdr_add failed\n"));
+ break;
+ }
+
+ }
+ else
+#endif
+ {
+ if (wmi_data_hdr_add(ar->arWmi, skb, DATA_MSGTYPE, bMoreData, dot11Hdr,0,NULL) != A_OK) {
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("ar6000_data_tx - wmi_data_hdr_add failed\n"));
+ break;
+ }
+ }
+
+
+ if ((ar->arNetworkType == ADHOC_NETWORK) &&
+ ar->arIbssPsEnable && ar->arConnected) {
+ /* flag to check adhoc mapping once we take the lock below: */
+ checkAdHocPsMapping = TRUE;
+
+ } else {
+ /* get the stream mapping */
+ ac = wmi_implicit_create_pstream(ar->arWmi, skb, 0, ar->arWmmEnabled);
+ }
+
+ } else {
+ EPPING_HEADER *eppingHdr;
+
+ eppingHdr = A_NETBUF_DATA(skb);
+
+ if (IS_EPPING_PACKET(eppingHdr)) {
+ /* the stream ID is mapped to an access class */
+ ac = eppingHdr->StreamNo_h;
+ /* some EPPING packets cannot be dropped no matter what access class it was
+ * sent on. We can change the packet tag to guarantee it will not get dropped */
+ if (IS_EPING_PACKET_NO_DROP(eppingHdr)) {
+ htc_tag = AR6K_CONTROL_PKT_TAG;
+ }
+
+ if (ac == HCI_TRANSPORT_STREAM_NUM) {
+ /* pass this to HCI */
+#ifndef EXPORT_HCI_BRIDGE_INTERFACE
+ if (A_SUCCESS(hci_test_send(ar,skb))) {
+ return 0;
+ }
+#endif
+ /* set AC to discard this skb */
+ ac = AC_NOT_MAPPED;
+ } else {
+ /* a quirk of linux, the payload of the frame is 32-bit aligned and thus the addition
+ * of the HTC header will mis-align the start of the HTC frame, so we add some
+ * padding which will be stripped off in the target */
+ if (EPPING_ALIGNMENT_PAD > 0) {
+ A_NETBUF_PUSH(skb, EPPING_ALIGNMENT_PAD);
+ }
+ }
+
+ } else {
+ /* not a ping packet, drop it */
+ ac = AC_NOT_MAPPED;
+ }
+ }
+
+ } while (FALSE);
+
+ /* did we succeed ? */
+ if ((ac == AC_NOT_MAPPED) && !checkAdHocPsMapping) {
+ /* cleanup and exit */
+ A_NETBUF_FREE(skb);
+ AR6000_STAT_INC(ar, tx_dropped);
+ AR6000_STAT_INC(ar, tx_aborted_errors);
+ return 0;
+ }
+
+ cookie = NULL;
+
+ /* take the lock to protect driver data */
+ AR6000_SPIN_LOCK(&ar->arLock, 0);
+
+ do {
+
+ if (checkAdHocPsMapping) {
+ eid = ar6000_ibss_map_epid(skb, dev, &mapNo);
+ }else {
+ eid = arAc2EndpointID (ar, ac);
+ }
+ /* validate that the endpoint is connected */
+ if (eid == 0 || eid == ENDPOINT_UNUSED ) {
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERR,(" eid %d is NOT mapped!\n", eid));
+ break;
+ }
+ /* allocate resource for this packet */
+ cookie = ar6000_alloc_cookie(ar);
+
+ if (cookie != NULL) {
+ /* update counts while the lock is held */
+ ar->arTxPending[eid]++;
+ ar->arTotalTxDataPending++;
+ }
+
+ } while (FALSE);
+
+ AR6000_SPIN_UNLOCK(&ar->arLock, 0);
+
+ if (cookie != NULL) {
+ cookie->arc_bp[0] = (unsigned long)skb;
+ cookie->arc_bp[1] = mapNo;
+ SET_HTC_PACKET_INFO_TX(&cookie->HtcPkt,
+ cookie,
+ A_NETBUF_DATA(skb),
+ A_NETBUF_LEN(skb),
+ eid,
+ htc_tag);
+
+#ifdef DEBUG
+ if (debugdriver >= 3) {
+ ar6000_dump_skb(skb);
+ }
+#endif
+#ifdef HTC_TEST_SEND_PKTS
+ DoHTCSendPktsTest(ar,mapNo,eid,skb);
+#endif
+ /* HTC interface is asynchronous, if this fails, cleanup will happen in
+ * the ar6000_tx_complete callback */
+ HTCSendPkt(ar->arHtcTarget, &cookie->HtcPkt);
+ } else {
+ /* no packet to send, cleanup */
+ A_NETBUF_FREE(skb);
+ AR6000_STAT_INC(ar, tx_dropped);
+ AR6000_STAT_INC(ar, tx_aborted_errors);
+ }
+
+ return 0;
+}
+
+int
+ar6000_acl_data_tx(struct sk_buff *skb, struct net_device *dev)
+{
+ AR_SOFTC_T *ar = (AR_SOFTC_T *)ar6k_priv(dev);
+ struct ar_cookie *cookie;
+ HTC_ENDPOINT_ID eid = ENDPOINT_UNUSED;
+
+ cookie = NULL;
+ AR6000_SPIN_LOCK(&ar->arLock, 0);
+
+ /* For now we send ACL on BE endpoint: We can also have a dedicated EP */
+ eid = arAc2EndpointID (ar, 0);
+ /* allocate resource for this packet */
+ cookie = ar6000_alloc_cookie(ar);
+
+ if (cookie != NULL) {
+ /* update counts while the lock is held */
+ ar->arTxPending[eid]++;
+ ar->arTotalTxDataPending++;
+ }
+
+
+ AR6000_SPIN_UNLOCK(&ar->arLock, 0);
+
+ if (cookie != NULL) {
+ cookie->arc_bp[0] = (unsigned long)skb;
+ cookie->arc_bp[1] = 0;
+ SET_HTC_PACKET_INFO_TX(&cookie->HtcPkt,
+ cookie,
+ A_NETBUF_DATA(skb),
+ A_NETBUF_LEN(skb),
+ eid,
+ AR6K_DATA_PKT_TAG);
+
+ /* HTC interface is asynchronous, if this fails, cleanup will happen in
+ * the ar6000_tx_complete callback */
+ HTCSendPkt(ar->arHtcTarget, &cookie->HtcPkt);
+ } else {
+ /* no packet to send, cleanup */
+ A_NETBUF_FREE(skb);
+ AR6000_STAT_INC(ar, tx_dropped);
+ AR6000_STAT_INC(ar, tx_aborted_errors);
+ }
+ return 0;
+}
+
+
+#ifdef ADAPTIVE_POWER_THROUGHPUT_CONTROL
+static void
+tvsub(register struct timeval *out, register struct timeval *in)
+{
+ if((out->tv_usec -= in->tv_usec) < 0) {
+ out->tv_sec--;
+ out->tv_usec += 1000000;
+ }
+ out->tv_sec -= in->tv_sec;
+}
+
+void
+applyAPTCHeuristics(AR_SOFTC_T *ar)
+{
+ A_UINT32 duration;
+ A_UINT32 numbytes;
+ A_UINT32 throughput;
+ struct timeval ts;
+ A_STATUS status;
+
+ AR6000_SPIN_LOCK(&ar->arLock, 0);
+
+ if ((enableAPTCHeuristics) && (!aptcTR.timerScheduled)) {
+ do_gettimeofday(&ts);
+ tvsub(&ts, &aptcTR.samplingTS);
+ duration = ts.tv_sec * 1000 + ts.tv_usec / 1000; /* ms */
+ numbytes = aptcTR.bytesTransmitted + aptcTR.bytesReceived;
+
+ if (duration > APTC_TRAFFIC_SAMPLING_INTERVAL) {
+ /* Initialize the time stamp and byte count */
+ aptcTR.bytesTransmitted = aptcTR.bytesReceived = 0;
+ do_gettimeofday(&aptcTR.samplingTS);
+
+ /* Calculate and decide based on throughput thresholds */
+ throughput = ((numbytes * 8) / duration);
+ if (throughput > APTC_UPPER_THROUGHPUT_THRESHOLD) {
+ /* Disable Sleep and schedule a timer */
+ A_ASSERT(ar->arWmiReady == TRUE);
+ AR6000_SPIN_UNLOCK(&ar->arLock, 0);
+ status = wmi_powermode_cmd(ar->arWmi, MAX_PERF_POWER);
+ AR6000_SPIN_LOCK(&ar->arLock, 0);
+ A_TIMEOUT_MS(&aptcTimer, APTC_TRAFFIC_SAMPLING_INTERVAL, 0);
+ aptcTR.timerScheduled = TRUE;
+ }
+ }
+ }
+
+ AR6000_SPIN_UNLOCK(&ar->arLock, 0);
+}
+#endif /* ADAPTIVE_POWER_THROUGHPUT_CONTROL */
+
+static HTC_SEND_FULL_ACTION ar6000_tx_queue_full(void *Context, HTC_PACKET *pPacket)
+{
+ AR_SOFTC_T *ar = (AR_SOFTC_T *)Context;
+ HTC_SEND_FULL_ACTION action = HTC_SEND_FULL_KEEP;
+ A_BOOL stopNet = FALSE;
+ HTC_ENDPOINT_ID Endpoint = HTC_GET_ENDPOINT_FROM_PKT(pPacket);
+
+ do {
+
+ if (bypasswmi) {
+ int accessClass;
+
+ if (HTC_GET_TAG_FROM_PKT(pPacket) == AR6K_CONTROL_PKT_TAG) {
+ /* don't drop special control packets */
+ break;
+ }
+
+ accessClass = arEndpoint2Ac(ar,Endpoint);
+ /* for endpoint ping testing drop Best Effort and Background */
+ if ((accessClass == WMM_AC_BE) || (accessClass == WMM_AC_BK)) {
+ action = HTC_SEND_FULL_DROP;
+ stopNet = FALSE;
+ } else {
+ /* keep but stop the netqueues */
+ stopNet = TRUE;
+ }
+ break;
+ }
+
+ if (Endpoint == ar->arControlEp) {
+ /* under normal WMI if this is getting full, then something is running rampant
+ * the host should not be exhausting the WMI queue with too many commands
+ * the only exception to this is during testing using endpointping */
+ AR6000_SPIN_LOCK(&ar->arLock, 0);
+ /* set flag to handle subsequent messages */
+ ar->arWMIControlEpFull = TRUE;
+ AR6000_SPIN_UNLOCK(&ar->arLock, 0);
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("WMI Control Endpoint is FULL!!! \n"));
+ /* no need to stop the network */
+ stopNet = FALSE;
+ break;
+ }
+
+ /* if we get here, we are dealing with data endpoints getting full */
+
+ if (HTC_GET_TAG_FROM_PKT(pPacket) == AR6K_CONTROL_PKT_TAG) {
+ /* don't drop control packets issued on ANY data endpoint */
+ break;
+ }
+
+ if (ar->arNetworkType == ADHOC_NETWORK) {
+ /* in adhoc mode, we cannot differentiate traffic priorities so there is no need to
+ * continue, however we should stop the network */
+ stopNet = TRUE;
+ break;
+ }
+ /* the last MAX_HI_COOKIE_NUM "batch" of cookies are reserved for the highest
+ * active stream */
+ if (ar->arAcStreamPriMap[arEndpoint2Ac(ar,Endpoint)] < ar->arHiAcStreamActivePri &&
+ ar->arCookieCount <= MAX_HI_COOKIE_NUM) {
+ /* this stream's priority is less than the highest active priority, we
+ * give preference to the highest priority stream by directing
+ * HTC to drop the packet that overflowed */
+ action = HTC_SEND_FULL_DROP;
+ /* since we are dropping packets, no need to stop the network */
+ stopNet = FALSE;
+ break;
+ }
+
+ } while (FALSE);
+
+ if (stopNet) {
+ AR6000_SPIN_LOCK(&ar->arLock, 0);
+ ar->arNetQueueStopped = TRUE;
+ AR6000_SPIN_UNLOCK(&ar->arLock, 0);
+ /* one of the data endpoints queues is getting full..need to stop network stack
+ * the queue will resume in ar6000_tx_complete() */
+ netif_stop_queue(ar->arNetDev);
+ }
+
+ return action;
+}
+
+
+static void
+ar6000_tx_complete(void *Context, HTC_PACKET_QUEUE *pPacketQueue)
+{
+ AR_SOFTC_T *ar = (AR_SOFTC_T *)Context;
+ A_UINT32 mapNo = 0;
+ A_STATUS status;
+ struct ar_cookie * ar_cookie;
+ HTC_ENDPOINT_ID eid;
+ A_BOOL wakeEvent = FALSE;
+ struct sk_buff_head skb_queue;
+ HTC_PACKET *pPacket;
+ struct sk_buff *pktSkb;
+ A_BOOL flushing = FALSE;
+
+ skb_queue_head_init(&skb_queue);
+
+ /* lock the driver as we update internal state */
+ AR6000_SPIN_LOCK(&ar->arLock, 0);
+
+ /* reap completed packets */
+ while (!HTC_QUEUE_EMPTY(pPacketQueue)) {
+
+ pPacket = HTC_PACKET_DEQUEUE(pPacketQueue);
+
+ ar_cookie = (struct ar_cookie *)pPacket->pPktContext;
+ A_ASSERT(ar_cookie);
+
+ status = pPacket->Status;
+ pktSkb = (struct sk_buff *)ar_cookie->arc_bp[0];
+ eid = pPacket->Endpoint;
+ mapNo = ar_cookie->arc_bp[1];
+
+ A_ASSERT(pktSkb);
+ A_ASSERT(pPacket->pBuffer == A_NETBUF_DATA(pktSkb));
+
+ /* add this to the list, use faster non-lock API */
+ __skb_queue_tail(&skb_queue,pktSkb);
+
+ if (A_SUCCESS(status)) {
+ A_ASSERT(pPacket->ActualLength == A_NETBUF_LEN(pktSkb));
+ }
+
+ AR_DEBUG_PRINTF(ATH_DEBUG_WLAN_TX,("ar6000_tx_complete skb=0x%lx data=0x%lx len=0x%x eid=%d ",
+ (unsigned long)pktSkb, (unsigned long)pPacket->pBuffer,
+ pPacket->ActualLength,
+ eid));
+
+ ar->arTxPending[eid]--;
+
+ if ((eid != ar->arControlEp) || bypasswmi) {
+ ar->arTotalTxDataPending--;
+ }
+
+ if (eid == ar->arControlEp)
+ {
+ if (ar->arWMIControlEpFull) {
+ /* since this packet completed, the WMI EP is no longer full */
+ ar->arWMIControlEpFull = FALSE;
+ }
+
+ if (ar->arTxPending[eid] == 0) {
+ wakeEvent = TRUE;
+ }
+ }
+
+ if (A_FAILED(status)) {
+ if (status == A_ECANCELED) {
+ /* a packet was flushed */
+ flushing = TRUE;
+ }
+ AR6000_STAT_INC(ar, tx_errors);
+ if (status != A_NO_RESOURCE) {
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("%s() -TX ERROR, status: 0x%x\n", __func__,
+ status));
+ }
+ } else {
+ AR_DEBUG_PRINTF(ATH_DEBUG_WLAN_TX,("OK\n"));
+ flushing = FALSE;
+ AR6000_STAT_INC(ar, tx_packets);
+ ar->arNetStats.tx_bytes += A_NETBUF_LEN(pktSkb);
+#ifdef ADAPTIVE_POWER_THROUGHPUT_CONTROL
+ aptcTR.bytesTransmitted += a_netbuf_to_len(pktSkb);
+ applyAPTCHeuristics(ar);
+#endif /* ADAPTIVE_POWER_THROUGHPUT_CONTROL */
+ }
+
+ // TODO this needs to be looked at
+ if ((ar->arNetworkType == ADHOC_NETWORK) && ar->arIbssPsEnable
+ && (eid != ar->arControlEp) && mapNo)
+ {
+ mapNo --;
+ ar->arNodeMap[mapNo].txPending --;
+
+ if (!ar->arNodeMap[mapNo].txPending && (mapNo == (ar->arNodeNum - 1))) {
+ A_UINT32 i;
+ for (i = ar->arNodeNum; i > 0; i --) {
+ if (!ar->arNodeMap[i - 1].txPending) {
+ A_MEMZERO(&ar->arNodeMap[i - 1], sizeof(struct ar_node_mapping));
+ ar->arNodeNum --;
+ } else {
+ break;
+ }
+ }
+ }
+ }
+
+ ar6000_free_cookie(ar, ar_cookie);
+
+ if (ar->arNetQueueStopped) {
+ ar->arNetQueueStopped = FALSE;
+ }
+ }
+
+ AR6000_SPIN_UNLOCK(&ar->arLock, 0);
+
+ /* lock is released, we can freely call other kernel APIs */
+
+ /* free all skbs in our local list */
+ while (!skb_queue_empty(&skb_queue)) {
+ /* use non-lock version */
+ pktSkb = __skb_dequeue(&skb_queue);
+ A_NETBUF_FREE(pktSkb);
+ }
+
+ if ((ar->arConnected == TRUE) || (bypasswmi)) {
+ if (!flushing) {
+ /* don't wake the queue if we are flushing, other wise it will just
+ * keep queueing packets, which will keep failing */
+ netif_wake_queue(ar->arNetDev);
+ }
+ }
+
+ if (wakeEvent) {
+ wake_up(&arEvent);
+ }
+
+}
+
+sta_t *
+ieee80211_find_conn(AR_SOFTC_T *ar, A_UINT8 *node_addr)
+{
+ sta_t *conn = NULL;
+ A_UINT8 i, max_conn;
+
+ switch(ar->arNetworkType) {
+ case AP_NETWORK:
+ max_conn = AP_MAX_NUM_STA;
+ break;
+ default:
+ max_conn=0;
+ break;
+ }
+
+ for (i = 0; i < max_conn; i++) {
+ if (IEEE80211_ADDR_EQ(node_addr, ar->sta_list[i].mac)) {
+ conn = &ar->sta_list[i];
+ break;
+ }
+ }
+
+ return conn;
+}
+
+sta_t *ieee80211_find_conn_for_aid(AR_SOFTC_T *ar, A_UINT8 aid)
+{
+ sta_t *conn = NULL;
+ A_UINT8 ctr;
+
+ for (ctr = 0; ctr < AP_MAX_NUM_STA; ctr++) {
+ if (ar->sta_list[ctr].aid == aid) {
+ conn = &ar->sta_list[ctr];
+ break;
+ }
+ }
+ return conn;
+}
+
+/*
+ * Receive event handler. This is called by HTC when a packet is received
+ */
+int pktcount;
+static void
+ar6000_rx(void *Context, HTC_PACKET *pPacket)
+{
+ AR_SOFTC_T *ar = (AR_SOFTC_T *)Context;
+ struct sk_buff *skb = (struct sk_buff *)pPacket->pPktContext;
+ int minHdrLen;
+ A_UINT8 containsDot11Hdr = 0;
+ A_STATUS status = pPacket->Status;
+ HTC_ENDPOINT_ID ept = pPacket->Endpoint;
+
+ A_ASSERT((status != A_OK) ||
+ (pPacket->pBuffer == (A_NETBUF_DATA(skb) + HTC_HEADER_LEN)));
+
+ AR_DEBUG_PRINTF(ATH_DEBUG_WLAN_RX,("ar6000_rx ar=0x%lx eid=%d, skb=0x%lx, data=0x%lx, len=0x%x status:%d",
+ (unsigned long)ar, ept, (unsigned long)skb, (unsigned long)pPacket->pBuffer,
+ pPacket->ActualLength, status));
+ if (status != A_OK) {
+ if (status != A_ECANCELED) {
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("RX ERR (%d) \n",status));
+ }
+ }
+
+ /* take lock to protect buffer counts
+ * and adaptive power throughput state */
+ AR6000_SPIN_LOCK(&ar->arLock, 0);
+
+ if (A_SUCCESS(status)) {
+ AR6000_STAT_INC(ar, rx_packets);
+ ar->arNetStats.rx_bytes += pPacket->ActualLength;
+#ifdef ADAPTIVE_POWER_THROUGHPUT_CONTROL
+ aptcTR.bytesReceived += a_netbuf_to_len(skb);
+ applyAPTCHeuristics(ar);
+#endif /* ADAPTIVE_POWER_THROUGHPUT_CONTROL */
+
+ A_NETBUF_PUT(skb, pPacket->ActualLength + HTC_HEADER_LEN);
+ A_NETBUF_PULL(skb, HTC_HEADER_LEN);
+
+#ifdef DEBUG
+ if (debugdriver >= 2) {
+ ar6000_dump_skb(skb);
+ }
+#endif /* DEBUG */
+ }
+
+ AR6000_SPIN_UNLOCK(&ar->arLock, 0);
+
+ skb->dev = ar->arNetDev;
+ if (status != A_OK) {
+ AR6000_STAT_INC(ar, rx_errors);
+ A_NETBUF_FREE(skb);
+ } else if (ar->arWmiEnabled == TRUE) {
+ if (ept == ar->arControlEp) {
+ /*
+ * this is a wmi control msg
+ */
+#ifdef CONFIG_PM
+ ar6000_check_wow_status(ar, skb, TRUE);
+#endif /* CONFIG_PM */
+ wmi_control_rx(ar->arWmi, skb);
+ } else {
+ WMI_DATA_HDR *dhdr = (WMI_DATA_HDR *)A_NETBUF_DATA(skb);
+ A_UINT8 is_amsdu, tid, is_acl_data_frame;
+ is_acl_data_frame = WMI_DATA_HDR_GET_DATA_TYPE(dhdr) == WMI_DATA_HDR_DATA_TYPE_ACL;
+#ifdef CONFIG_PM
+ ar6000_check_wow_status(ar, NULL, FALSE);
+#endif /* CONFIG_PM */
+ /*
+ * this is a wmi data packet
+ */
+ // NWF
+
+ if (processDot11Hdr) {
+ minHdrLen = sizeof(WMI_DATA_HDR) + sizeof(struct ieee80211_frame) + sizeof(ATH_LLC_SNAP_HDR);
+ } else {
+ minHdrLen = sizeof (WMI_DATA_HDR) + sizeof(ATH_MAC_HDR) +
+ sizeof(ATH_LLC_SNAP_HDR);
+ }
+
+ /* In the case of AP mode we may receive NULL data frames
+ * that do not have LLC hdr. They are 16 bytes in size.
+ * Allow these frames in the AP mode.
+ * ACL data frames don't follow ethernet frame bounds for
+ * min length
+ */
+ if (ar->arNetworkType != AP_NETWORK && !is_acl_data_frame &&
+ ((pPacket->ActualLength < minHdrLen) ||
+ (pPacket->ActualLength > AR6000_MAX_RX_MESSAGE_SIZE)))
+ {
+ /*
+ * packet is too short or too long
+ */
+ AR_DEBUG_PRINTF(ATH_DEBUG_INFO,("TOO SHORT or TOO LONG\n"));
+ AR6000_STAT_INC(ar, rx_errors);
+ AR6000_STAT_INC(ar, rx_length_errors);
+ A_NETBUF_FREE(skb);
+ } else {
+ A_UINT16 seq_no;
+ A_UINT8 meta_type;
+
+#if 0
+ /* Access RSSI values here */
+ AR_DEBUG_PRINTF(ATH_DEBUG_INFO,("RSSI %d\n",
+ ((WMI_DATA_HDR *) A_NETBUF_DATA(skb))->rssi));
+#endif
+ /* Get the Power save state of the STA */
+ if (ar->arNetworkType == AP_NETWORK) {
+ sta_t *conn = NULL;
+ A_UINT8 psState=0,prevPsState;
+ ATH_MAC_HDR *datap=NULL;
+ A_UINT16 offset;
+
+ meta_type = WMI_DATA_HDR_GET_META(dhdr);
+
+ psState = (((WMI_DATA_HDR *)A_NETBUF_DATA(skb))->info
+ >> WMI_DATA_HDR_PS_SHIFT) & WMI_DATA_HDR_PS_MASK;
+
+ offset = sizeof(WMI_DATA_HDR);
+
+ switch (meta_type) {
+ case 0:
+ break;
+ case WMI_META_VERSION_1:
+ offset += sizeof(WMI_RX_META_V1);
+ break;
+#ifdef CONFIG_CHECKSUM_OFFLOAD
+ case WMI_META_VERSION_2:
+ offset += sizeof(WMI_RX_META_V2);
+ break;
+#endif
+ default:
+ break;
+ }
+
+ datap = (ATH_MAC_HDR *)(A_NETBUF_DATA(skb)+offset);
+ conn = ieee80211_find_conn(ar, datap->srcMac);
+
+ if (conn) {
+ /* if there is a change in PS state of the STA,
+ * take appropriate steps.
+ * 1. If Sleep-->Awake, flush the psq for the STA
+ * Clear the PVB for the STA.
+ * 2. If Awake-->Sleep, Starting queueing frames
+ * the STA.
+ */
+ prevPsState = STA_IS_PWR_SLEEP(conn);
+ if (psState) {
+ STA_SET_PWR_SLEEP(conn);
+ } else {
+ STA_CLR_PWR_SLEEP(conn);
+ }
+
+ if (prevPsState ^ STA_IS_PWR_SLEEP(conn)) {
+
+ if (!STA_IS_PWR_SLEEP(conn)) {
+
+ A_MUTEX_LOCK(&conn->psqLock);
+ while (!A_NETBUF_QUEUE_EMPTY(&conn->psq)) {
+ struct sk_buff *skb=NULL;
+
+ skb = A_NETBUF_DEQUEUE(&conn->psq);
+ A_MUTEX_UNLOCK(&conn->psqLock);
+ ar6000_data_tx(skb,ar->arNetDev);
+ A_MUTEX_LOCK(&conn->psqLock);
+ }
+ A_MUTEX_UNLOCK(&conn->psqLock);
+ /* Clear the PVB for this STA */
+ wmi_set_pvb_cmd(ar->arWmi, conn->aid, 0);
+ }
+ }
+ } else {
+ /* This frame is from a STA that is not associated*/
+ A_ASSERT(FALSE);
+ }
+
+ /* Drop NULL data frames here */
+ if((pPacket->ActualLength < minHdrLen) ||
+ (pPacket->ActualLength > AR6000_MAX_RX_MESSAGE_SIZE)) {
+ A_NETBUF_FREE(skb);
+ goto rx_done;
+ }
+ }
+
+ is_amsdu = WMI_DATA_HDR_IS_AMSDU(dhdr);
+ tid = WMI_DATA_HDR_GET_UP(dhdr);
+ seq_no = WMI_DATA_HDR_GET_SEQNO(dhdr);
+ meta_type = WMI_DATA_HDR_GET_META(dhdr);
+ containsDot11Hdr = WMI_DATA_HDR_GET_DOT11(dhdr);
+
+ wmi_data_hdr_remove(ar->arWmi, skb);
+
+ switch (meta_type) {
+ case WMI_META_VERSION_1:
+ {
+ WMI_RX_META_V1 *pMeta = (WMI_RX_META_V1 *)A_NETBUF_DATA(skb);
+ A_PRINTF("META %d %d %d %d %x\n", pMeta->status, pMeta->rix, pMeta->rssi, pMeta->channel, pMeta->flags);
+ A_NETBUF_PULL((void*)skb, sizeof(WMI_RX_META_V1));
+ break;
+ }
+#ifdef CONFIG_CHECKSUM_OFFLOAD
+ case WMI_META_VERSION_2:
+ {
+ WMI_RX_META_V2 *pMeta = (WMI_RX_META_V2 *)A_NETBUF_DATA(skb);
+ if(pMeta->csumFlags & 0x1){
+ skb->ip_summed=CHECKSUM_COMPLETE;
+ skb->csum=(pMeta->csum);
+ }
+ A_NETBUF_PULL((void*)skb, sizeof(WMI_RX_META_V2));
+ break;
+ }
+#endif
+ default:
+ break;
+ }
+
+ A_ASSERT(status == A_OK);
+
+ /* NWF: print the 802.11 hdr bytes */
+ if(containsDot11Hdr) {
+ status = wmi_dot11_hdr_remove(ar->arWmi,skb);
+ } else if(!is_amsdu && !is_acl_data_frame) {
+ status = wmi_dot3_2_dix(skb);
+ }
+
+ if (status != A_OK) {
+ /* Drop frames that could not be processed (lack of memory, etc.) */
+ A_NETBUF_FREE(skb);
+ goto rx_done;
+ }
+
+ if (is_acl_data_frame) {
+ A_NETBUF_PUSH(skb, sizeof(int));
+ *((short *)A_NETBUF_DATA(skb)) = WMI_ACL_DATA_EVENTID;
+ /* send the data packet to PAL driver */
+ if(ar6k_pal_config_g.fpar6k_pal_recv_pkt) {
+ if((*ar6k_pal_config_g.fpar6k_pal_recv_pkt)(ar->hcipal_info, skb) == TRUE)
+ goto rx_done;
+ }
+ }
+
+ if ((ar->arNetDev->flags & IFF_UP) == IFF_UP) {
+ if (ar->arNetworkType == AP_NETWORK) {
+ struct sk_buff *skb1 = NULL;
+ ATH_MAC_HDR *datap;
+
+ datap = (ATH_MAC_HDR *)A_NETBUF_DATA(skb);
+ if (IEEE80211_IS_MULTICAST(datap->dstMac)) {
+ /* Bcast/Mcast frames should be sent to the OS
+ * stack as well as on the air.
+ */
+ skb1 = skb_copy(skb,GFP_ATOMIC);
+ } else {
+ /* Search for a connected STA with dstMac as
+ * the Mac address. If found send the frame to
+ * it on the air else send the frame up the
+ * stack
+ */
+ sta_t *conn = NULL;
+ conn = ieee80211_find_conn(ar, datap->dstMac);
+
+ if (conn && ar->intra_bss) {
+ skb1 = skb;
+ skb = NULL;
+ } else if(conn && !ar->intra_bss) {
+ A_NETBUF_FREE(skb);
+ skb = NULL;
+ }
+ }
+ if (skb1) {
+ ar6000_data_tx(skb1, ar->arNetDev);
+ }
+ }
+ }
+#ifdef ATH_AR6K_11N_SUPPORT
+ aggr_process_recv_frm(ar->aggr_cntxt, tid, seq_no, is_amsdu, (void **)&skb);
+#endif
+ ar6000_deliver_frames_to_nw_stack((void *) ar->arNetDev, (void *)skb);
+ }
+ }
+ } else {
+ if (EPPING_ALIGNMENT_PAD > 0) {
+ A_NETBUF_PULL(skb, EPPING_ALIGNMENT_PAD);
+ }
+ ar6000_deliver_frames_to_nw_stack((void *)ar->arNetDev, (void *)skb);
+ }
+
+rx_done:
+
+ return;
+}
+
+static void
+ar6000_deliver_frames_to_nw_stack(void *dev, void *osbuf)
+{
+ struct sk_buff *skb = (struct sk_buff *)osbuf;
+
+ if(skb) {
+ skb->dev = dev;
+ if ((skb->dev->flags & IFF_UP) == IFF_UP) {
+#ifdef CONFIG_PM
+ ar6000_check_wow_status((AR_SOFTC_T *)ar6k_priv(dev), skb, FALSE);
+#endif /* CONFIG_PM */
+ skb->protocol = eth_type_trans(skb, skb->dev);
+ /*
+ * If this routine is called on a ISR (Hard IRQ) or DSR (Soft IRQ)
+ * or tasklet use the netif_rx to deliver the packet to the stack
+ * netif_rx will queue the packet onto the receive queue and mark
+ * the softirq thread has a pending action to complete. Kernel will
+ * schedule the softIrq kernel thread after processing the DSR.
+ *
+ * If this routine is called on a process context, use netif_rx_ni
+ * which will schedle the softIrq kernel thread after queuing the packet.
+ */
+ if (in_interrupt()) {
+ netif_rx(skb);
+ } else {
+ netif_rx_ni(skb);
+ }
+ } else {
+ A_NETBUF_FREE(skb);
+ }
+ }
+}
+
+#if 0
+static void
+ar6000_deliver_frames_to_bt_stack(void *dev, void *osbuf)
+{
+ struct sk_buff *skb = (struct sk_buff *)osbuf;
+
+ if(skb) {
+ skb->dev = dev;
+ if ((skb->dev->flags & IFF_UP) == IFF_UP) {
+ skb->protocol = htons(ETH_P_CONTROL);
+ netif_rx(skb);
+ } else {
+ A_NETBUF_FREE(skb);
+ }
+ }
+}
+#endif
+
+static void
+ar6000_rx_refill(void *Context, HTC_ENDPOINT_ID Endpoint)
+{
+ AR_SOFTC_T *ar = (AR_SOFTC_T *)Context;
+ void *osBuf;
+ int RxBuffers;
+ int buffersToRefill;
+ HTC_PACKET *pPacket;
+ HTC_PACKET_QUEUE queue;
+
+ buffersToRefill = (int)AR6000_MAX_RX_BUFFERS -
+ HTCGetNumRecvBuffers(ar->arHtcTarget, Endpoint);
+
+ if (buffersToRefill <= 0) {
+ /* fast return, nothing to fill */
+ return;
+ }
+
+ INIT_HTC_PACKET_QUEUE(&queue);
+
+ AR_DEBUG_PRINTF(ATH_DEBUG_WLAN_RX,("ar6000_rx_refill: providing htc with %d buffers at eid=%d\n",
+ buffersToRefill, Endpoint));
+
+ for (RxBuffers = 0; RxBuffers < buffersToRefill; RxBuffers++) {
+ osBuf = A_NETBUF_ALLOC(AR6000_BUFFER_SIZE);
+ if (NULL == osBuf) {
+ break;
+ }
+ /* the HTC packet wrapper is at the head of the reserved area
+ * in the skb */
+ pPacket = (HTC_PACKET *)(A_NETBUF_HEAD(osBuf));
+ /* set re-fill info */
+ SET_HTC_PACKET_INFO_RX_REFILL(pPacket,osBuf,A_NETBUF_DATA(osBuf),AR6000_BUFFER_SIZE,Endpoint);
+ /* add to queue */
+ HTC_PACKET_ENQUEUE(&queue,pPacket);
+ }
+
+ if (!HTC_QUEUE_EMPTY(&queue)) {
+ /* add packets */
+ HTCAddReceivePktMultiple(ar->arHtcTarget, &queue);
+ }
+
+}
+
+ /* clean up our amsdu buffer list */
+static void ar6000_cleanup_amsdu_rxbufs(AR_SOFTC_T *ar)
+{
+ HTC_PACKET *pPacket;
+ void *osBuf;
+
+ /* empty AMSDU buffer queue and free OS bufs */
+ while (TRUE) {
+
+ AR6000_SPIN_LOCK(&ar->arLock, 0);
+ pPacket = HTC_PACKET_DEQUEUE(&ar->amsdu_rx_buffer_queue);
+ AR6000_SPIN_UNLOCK(&ar->arLock, 0);
+
+ if (NULL == pPacket) {
+ break;
+ }
+
+ osBuf = pPacket->pPktContext;
+ if (NULL == osBuf) {
+ A_ASSERT(FALSE);
+ break;
+ }
+
+ A_NETBUF_FREE(osBuf);
+ }
+
+}
+
+
+ /* refill the amsdu buffer list */
+static void ar6000_refill_amsdu_rxbufs(AR_SOFTC_T *ar, int Count)
+{
+ HTC_PACKET *pPacket;
+ void *osBuf;
+
+ while (Count > 0) {
+ osBuf = A_NETBUF_ALLOC(AR6000_AMSDU_BUFFER_SIZE);
+ if (NULL == osBuf) {
+ break;
+ }
+ /* the HTC packet wrapper is at the head of the reserved area
+ * in the skb */
+ pPacket = (HTC_PACKET *)(A_NETBUF_HEAD(osBuf));
+ /* set re-fill info */
+ SET_HTC_PACKET_INFO_RX_REFILL(pPacket,osBuf,A_NETBUF_DATA(osBuf),AR6000_AMSDU_BUFFER_SIZE,0);
+
+ AR6000_SPIN_LOCK(&ar->arLock, 0);
+ /* put it in the list */
+ HTC_PACKET_ENQUEUE(&ar->amsdu_rx_buffer_queue,pPacket);
+ AR6000_SPIN_UNLOCK(&ar->arLock, 0);
+ Count--;
+ }
+
+}
+
+ /* callback to allocate a large receive buffer for a pending packet. This function is called when
+ * an HTC packet arrives whose length exceeds a threshold value
+ *
+ * We use a pre-allocated list of buffers of maximum AMSDU size (4K). Under linux it is more optimal to
+ * keep the allocation size the same to optimize cached-slab allocations.
+ *
+ * */
+static HTC_PACKET *ar6000_alloc_amsdu_rxbuf(void *Context, HTC_ENDPOINT_ID Endpoint, int Length)
+{
+ HTC_PACKET *pPacket = NULL;
+ AR_SOFTC_T *ar = (AR_SOFTC_T *)Context;
+ int refillCount = 0;
+
+ AR_DEBUG_PRINTF(ATH_DEBUG_WLAN_RX,("ar6000_alloc_amsdu_rxbuf: eid=%d, Length:%d\n",Endpoint,Length));
+
+ do {
+
+ if (Length <= AR6000_BUFFER_SIZE) {
+ /* shouldn't be getting called on normal sized packets */
+ A_ASSERT(FALSE);
+ break;
+ }
+
+ if (Length > AR6000_AMSDU_BUFFER_SIZE) {
+ A_ASSERT(FALSE);
+ break;
+ }
+
+ AR6000_SPIN_LOCK(&ar->arLock, 0);
+ /* allocate a packet from the list */
+ pPacket = HTC_PACKET_DEQUEUE(&ar->amsdu_rx_buffer_queue);
+ /* see if we need to refill again */
+ refillCount = AR6000_MAX_AMSDU_RX_BUFFERS - HTC_PACKET_QUEUE_DEPTH(&ar->amsdu_rx_buffer_queue);
+ AR6000_SPIN_UNLOCK(&ar->arLock, 0);
+
+ if (NULL == pPacket) {
+ break;
+ }
+ /* set actual endpoint ID */
+ pPacket->Endpoint = Endpoint;
+
+ } while (FALSE);
+
+ if (refillCount >= AR6000_AMSDU_REFILL_THRESHOLD) {
+ ar6000_refill_amsdu_rxbufs(ar,refillCount);
+ }
+
+ return pPacket;
+}
+
+static void
+ar6000_set_multicast_list(struct net_device *dev)
+{
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("ar6000: Multicast filter not supported\n"));
+}
+
+static struct net_device_stats *
+ar6000_get_stats(struct net_device *dev)
+{
+ AR_SOFTC_T *ar = (AR_SOFTC_T *)ar6k_priv(dev);
+ return &ar->arNetStats;
+}
+
+static struct iw_statistics *
+ar6000_get_iwstats(struct net_device * dev)
+{
+ AR_SOFTC_T *ar = (AR_SOFTC_T *)ar6k_priv(dev);
+ TARGET_STATS *pStats = &ar->arTargetStats;
+ struct iw_statistics * pIwStats = &ar->arIwStats;
+ int rtnllocked;
+
+ if (ar->bIsDestroyProgress || ar->arWmiReady == FALSE || ar->arWlanState == WLAN_DISABLED)
+ {
+ pIwStats->status = 0;
+ pIwStats->qual.qual = 0;
+ pIwStats->qual.level =0;
+ pIwStats->qual.noise = 0;
+ pIwStats->discard.code =0;
+ pIwStats->discard.retries=0;
+ pIwStats->miss.beacon =0;
+ return pIwStats;
+ }
+
+ /*
+ * The in_atomic function is used to determine if the scheduling is
+ * allowed in the current context or not. This was introduced in 2.6
+ * From what I have read on the differences between 2.4 and 2.6, the
+ * 2.4 kernel did not support preemption and so this check might not
+ * be required for 2.4 kernels.
+ */
+ if (in_atomic())
+ {
+ wmi_get_stats_cmd(ar->arWmi);
+
+ pIwStats->status = 1 ;
+ pIwStats->qual.qual = pStats->cs_aveBeacon_rssi - 161;
+ pIwStats->qual.level =pStats->cs_aveBeacon_rssi; /* noise is -95 dBm */
+ pIwStats->qual.noise = pStats->noise_floor_calibation;
+ pIwStats->discard.code = pStats->rx_decrypt_err;
+ pIwStats->discard.retries = pStats->tx_retry_cnt;
+ pIwStats->miss.beacon = pStats->cs_bmiss_cnt;
+ return pIwStats;
+ }
+
+ dev_hold(dev);
+ rtnllocked = rtnl_is_locked();
+ if (rtnllocked) {
+ rtnl_unlock();
+ }
+ pIwStats->status = 0;
+
+ if (down_interruptible(&ar->arSem)) {
+ goto err_exit;
+ }
+
+ do {
+
+ if (ar->bIsDestroyProgress || ar->arWlanState == WLAN_DISABLED) {
+ break;
+ }
+
+ ar->statsUpdatePending = TRUE;
+
+ if(wmi_get_stats_cmd(ar->arWmi) != A_OK) {
+ break;
+ }
+
+ wait_event_interruptible_timeout(arEvent, ar->statsUpdatePending == FALSE, wmitimeout * HZ);
+ if (signal_pending(current)) {
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("ar6000 : WMI get stats timeout \n"));
+ break;
+ }
+ pIwStats->status = 1 ;
+ pIwStats->qual.qual = pStats->cs_aveBeacon_rssi - 161;
+ pIwStats->qual.level =pStats->cs_aveBeacon_rssi; /* noise is -95 dBm */
+ pIwStats->qual.noise = pStats->noise_floor_calibation;
+ pIwStats->discard.code = pStats->rx_decrypt_err;
+ pIwStats->discard.retries = pStats->tx_retry_cnt;
+ pIwStats->miss.beacon = pStats->cs_bmiss_cnt;
+ } while (0);
+ up(&ar->arSem);
+
+err_exit:
+ if (rtnllocked) {
+ rtnl_lock();
+ }
+ dev_put(dev);
+ return pIwStats;
+}
+
+void
+ar6000_ready_event(void *devt, A_UINT8 *datap, A_UINT8 phyCap, A_UINT32 sw_ver, A_UINT32 abi_ver)
+{
+ AR_SOFTC_T *ar = (AR_SOFTC_T *)devt;
+ struct net_device *dev = ar->arNetDev;
+
+ A_MEMCPY(dev->dev_addr, datap, AR6000_ETH_ADDR_LEN);
+ AR_DEBUG_PRINTF(ATH_DEBUG_INFO,("mac address = %2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x\n",
+ dev->dev_addr[0], dev->dev_addr[1],
+ dev->dev_addr[2], dev->dev_addr[3],
+ dev->dev_addr[4], dev->dev_addr[5]));
+
+ ar->arPhyCapability = phyCap;
+ ar->arVersion.wlan_ver = sw_ver;
+ ar->arVersion.abi_ver = abi_ver;
+
+ /* Indicate to the waiting thread that the ready event was received */
+ ar->arWmiReady = TRUE;
+ wake_up(&arEvent);
+
+#if WLAN_CONFIG_IGNORE_POWER_SAVE_FAIL_EVENT_DURING_SCAN
+ wmi_pmparams_cmd(ar->arWmi, 0, 1, 0, 0, 1, IGNORE_POWER_SAVE_FAIL_EVENT_DURING_SCAN);
+#endif
+#if WLAN_CONFIG_DONOT_IGNORE_BARKER_IN_ERP
+ wmi_set_lpreamble_cmd(ar->arWmi, 0, WMI_DONOT_IGNORE_BARKER_IN_ERP);
+#endif
+ wmi_set_keepalive_cmd(ar->arWmi, WLAN_CONFIG_KEEP_ALIVE_INTERVAL);
+#if WLAN_CONFIG_DISABLE_11N
+ {
+ WMI_SET_HT_CAP_CMD htCap;
+
+ A_MEMZERO(&htCap, sizeof(WMI_SET_HT_CAP_CMD));
+ htCap.band = 0;
+ wmi_set_ht_cap_cmd(ar->arWmi, &htCap);
+
+ htCap.band = 1;
+ wmi_set_ht_cap_cmd(ar->arWmi, &htCap);
+ }
+#endif /* WLAN_CONFIG_DISABLE_11N */
+
+#ifdef ATH6K_CONFIG_OTA_MODE
+ wmi_powermode_cmd(ar->arWmi, MAX_PERF_POWER);
+#endif
+ wmi_disctimeout_cmd(ar->arWmi, WLAN_CONFIG_DISCONNECT_TIMEOUT);
+}
+
+void
+add_new_sta(AR_SOFTC_T *ar, A_UINT8 *mac, A_UINT16 aid, A_UINT8 *wpaie,
+ A_UINT8 ielen, A_UINT8 keymgmt, A_UINT8 ucipher, A_UINT8 auth)
+{
+ A_UINT8 free_slot=aid-1;
+
+ A_MEMCPY(ar->sta_list[free_slot].mac, mac, ATH_MAC_LEN);
+ A_MEMCPY(ar->sta_list[free_slot].wpa_ie, wpaie, ielen);
+ ar->sta_list[free_slot].aid = aid;
+ ar->sta_list[free_slot].keymgmt = keymgmt;
+ ar->sta_list[free_slot].ucipher = ucipher;
+ ar->sta_list[free_slot].auth = auth;
+ ar->sta_list_index = ar->sta_list_index | (1 << free_slot);
+ ar->arAPStats.sta[free_slot].aid = aid;
+}
+
+void
+ar6000_connect_event(AR_SOFTC_T *ar, A_UINT16 channel, A_UINT8 *bssid,
+ A_UINT16 listenInterval, A_UINT16 beaconInterval,
+ NETWORK_TYPE networkType, A_UINT8 beaconIeLen,
+ A_UINT8 assocReqLen, A_UINT8 assocRespLen,
+ A_UINT8 *assocInfo)
+{
+ union iwreq_data wrqu;
+ int i, beacon_ie_pos, assoc_resp_ie_pos, assoc_req_ie_pos;
+ static const char *tag1 = "ASSOCINFO(ReqIEs=";
+ static const char *tag2 = "ASSOCRESPIE=";
+ static const char *beaconIetag = "BEACONIE=";
+ char buf[WMI_CONTROL_MSG_MAX_LEN * 2 + strlen(tag1) + 1];
+ char *pos;
+ A_UINT8 key_op_ctrl;
+ unsigned long flags;
+ struct ieee80211req_key *ik;
+ CRYPTO_TYPE keyType = NONE_CRYPT;
+
+ if(ar->arNetworkType & AP_NETWORK) {
+ struct net_device *dev = ar->arNetDev;
+ if(A_MEMCMP(dev->dev_addr, bssid, ATH_MAC_LEN)==0) {
+ ar->arACS = channel;
+ ik = &ar->ap_mode_bkey;
+
+ switch(ar->arAuthMode) {
+ case NONE_AUTH:
+ if(ar->arPairwiseCrypto == WEP_CRYPT) {
+ ar6000_install_static_wep_keys(ar);
+ }
+#ifdef WAPI_ENABLE
+ else if(ar->arPairwiseCrypto == WAPI_CRYPT) {
+ ap_set_wapi_key(ar, ik);
+ }
+#endif
+ break;
+ case WPA_PSK_AUTH:
+ case WPA2_PSK_AUTH:
+ case (WPA_PSK_AUTH|WPA2_PSK_AUTH):
+ switch (ik->ik_type) {
+ case IEEE80211_CIPHER_TKIP:
+ keyType = TKIP_CRYPT;
+ break;
+ case IEEE80211_CIPHER_AES_CCM:
+ keyType = AES_CRYPT;
+ break;
+ default:
+ goto skip_key;
+ }
+ wmi_addKey_cmd(ar->arWmi, ik->ik_keyix, keyType, GROUP_USAGE,
+ ik->ik_keylen, (A_UINT8 *)&ik->ik_keyrsc,
+ ik->ik_keydata, KEY_OP_INIT_VAL, ik->ik_macaddr,
+ SYNC_BOTH_WMIFLAG);
+
+ break;
+ }
+skip_key:
+ ar->arConnected = TRUE;
+ return;
+ }
+
+ A_PRINTF("NEW STA %2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x \n "
+ " AID=%d \n", bssid[0], bssid[1], bssid[2],
+ bssid[3], bssid[4], bssid[5], channel);
+ switch ((listenInterval>>8)&0xFF) {
+ case OPEN_AUTH:
+ A_PRINTF("AUTH: OPEN\n");
+ break;
+ case SHARED_AUTH:
+ A_PRINTF("AUTH: SHARED\n");
+ break;
+ default:
+ A_PRINTF("AUTH: Unknown\n");
+ break;
+ };
+ switch (listenInterval&0xFF) {
+ case WPA_PSK_AUTH:
+ A_PRINTF("KeyMgmt: WPA-PSK\n");
+ break;
+ case WPA2_PSK_AUTH:
+ A_PRINTF("KeyMgmt: WPA2-PSK\n");
+ break;
+ default:
+ A_PRINTF("KeyMgmt: NONE\n");
+ break;
+ };
+ switch (beaconInterval) {
+ case AES_CRYPT:
+ A_PRINTF("Cipher: AES\n");
+ break;
+ case TKIP_CRYPT:
+ A_PRINTF("Cipher: TKIP\n");
+ break;
+ case WEP_CRYPT:
+ A_PRINTF("Cipher: WEP\n");
+ break;
+#ifdef WAPI_ENABLE
+ case WAPI_CRYPT:
+ A_PRINTF("Cipher: WAPI\n");
+ break;
+#endif
+ default:
+ A_PRINTF("Cipher: NONE\n");
+ break;
+ };
+
+ add_new_sta(ar, bssid, channel /*aid*/,
+ assocInfo /* WPA IE */, assocRespLen /* IE len */,
+ listenInterval&0xFF /* Keymgmt */, beaconInterval /* cipher */,
+ (listenInterval>>8)&0xFF /* auth alg */);
+
+ /* Send event to application */
+ A_MEMZERO(&wrqu, sizeof(wrqu));
+ A_MEMCPY(wrqu.addr.sa_data, bssid, ATH_MAC_LEN);
+ wireless_send_event(ar->arNetDev, IWEVREGISTERED, &wrqu, NULL);
+ /* In case the queue is stopped when we switch modes, this will
+ * wake it up
+ */
+ netif_wake_queue(ar->arNetDev);
+ return;
+ }
+
+#ifdef ATH6K_CONFIG_CFG80211
+ ar6k_cfg80211_connect_event(ar, channel, bssid,
+ listenInterval, beaconInterval,
+ networkType, beaconIeLen,
+ assocReqLen, assocRespLen,
+ assocInfo);
+#endif /* ATH6K_CONFIG_CFG80211 */
+
+ A_MEMCPY(ar->arBssid, bssid, sizeof(ar->arBssid));
+ ar->arBssChannel = channel;
+
+ A_PRINTF("AR6000 connected event on freq %d ", channel);
+ A_PRINTF("with bssid %2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x "
+ " listenInterval=%d, beaconInterval = %d, beaconIeLen = %d assocReqLen=%d"
+ " assocRespLen =%d\n",
+ bssid[0], bssid[1], bssid[2],
+ bssid[3], bssid[4], bssid[5],
+ listenInterval, beaconInterval,
+ beaconIeLen, assocReqLen, assocRespLen);
+ if (networkType & ADHOC_NETWORK) {
+ if (networkType & ADHOC_CREATOR) {
+ A_PRINTF("Network: Adhoc (Creator)\n");
+ } else {
+ A_PRINTF("Network: Adhoc (Joiner)\n");
+ }
+ } else {
+ A_PRINTF("Network: Infrastructure\n");
+ }
+
+ if ((ar->arNetworkType == INFRA_NETWORK)) {
+ wmi_listeninterval_cmd(ar->arWmi, ar->arListenIntervalT, ar->arListenIntervalB);
+ }
+
+ if (beaconIeLen && (sizeof(buf) > (9 + beaconIeLen * 2))) {
+ AR_DEBUG_PRINTF(ATH_DEBUG_WLAN_CONNECT,("\nBeaconIEs= "));
+
+ beacon_ie_pos = 0;
+ A_MEMZERO(buf, sizeof(buf));
+ sprintf(buf, "%s", beaconIetag);
+ pos = buf + 9;
+ for (i = beacon_ie_pos; i < beacon_ie_pos + beaconIeLen; i++) {
+ AR_DEBUG_PRINTF(ATH_DEBUG_WLAN_CONNECT,("%2.2x ", assocInfo[i]));
+ sprintf(pos, "%2.2x", assocInfo[i]);
+ pos += 2;
+ }
+ AR_DEBUG_PRINTF(ATH_DEBUG_WLAN_CONNECT,("\n"));
+
+ A_MEMZERO(&wrqu, sizeof(wrqu));
+ wrqu.data.length = strlen(buf);
+ wireless_send_event(ar->arNetDev, IWEVCUSTOM, &wrqu, buf);
+ }
+
+ if (assocRespLen && (sizeof(buf) > (12 + (assocRespLen * 2))))
+ {
+ assoc_resp_ie_pos = beaconIeLen + assocReqLen +
+ sizeof(A_UINT16) + /* capinfo*/
+ sizeof(A_UINT16) + /* status Code */
+ sizeof(A_UINT16) ; /* associd */
+ A_MEMZERO(buf, sizeof(buf));
+ sprintf(buf, "%s", tag2);
+ pos = buf + 12;
+ AR_DEBUG_PRINTF(ATH_DEBUG_WLAN_CONNECT,("\nAssocRespIEs= "));
+ /*
+ * The Association Response Frame w.o. the WLAN header is delivered to
+ * the host, so skip over to the IEs
+ */
+ for (i = assoc_resp_ie_pos; i < assoc_resp_ie_pos + assocRespLen - 6; i++)
+ {
+ AR_DEBUG_PRINTF(ATH_DEBUG_WLAN_CONNECT,("%2.2x ", assocInfo[i]));
+ sprintf(pos, "%2.2x", assocInfo[i]);
+ pos += 2;
+ }
+ AR_DEBUG_PRINTF(ATH_DEBUG_WLAN_CONNECT,("\n"));
+
+ A_MEMZERO(&wrqu, sizeof(wrqu));
+ wrqu.data.length = strlen(buf);
+ wireless_send_event(ar->arNetDev, IWEVCUSTOM, &wrqu, buf);
+ }
+
+ if (assocReqLen && (sizeof(buf) > (17 + (assocReqLen * 2)))) {
+ /*
+ * assoc Request includes capability and listen interval. Skip these.
+ */
+ assoc_req_ie_pos = beaconIeLen +
+ sizeof(A_UINT16) + /* capinfo*/
+ sizeof(A_UINT16); /* listen interval */
+
+ A_MEMZERO(buf, sizeof(buf));
+ sprintf(buf, "%s", tag1);
+ pos = buf + 17;
+ AR_DEBUG_PRINTF(ATH_DEBUG_WLAN_CONNECT,("AssocReqIEs= "));
+ for (i = assoc_req_ie_pos; i < assoc_req_ie_pos + assocReqLen - 4; i++) {
+ AR_DEBUG_PRINTF(ATH_DEBUG_WLAN_CONNECT,("%2.2x ", assocInfo[i]));
+ sprintf(pos, "%2.2x", assocInfo[i]);
+ pos += 2;;
+ }
+ AR_DEBUG_PRINTF(ATH_DEBUG_WLAN_CONNECT,("\n"));
+
+ A_MEMZERO(&wrqu, sizeof(wrqu));
+ wrqu.data.length = strlen(buf);
+ wireless_send_event(ar->arNetDev, IWEVCUSTOM, &wrqu, buf);
+ }
+
+#ifdef USER_KEYS
+ if (ar->user_savedkeys_stat == USER_SAVEDKEYS_STAT_RUN &&
+ ar->user_saved_keys.keyOk == TRUE)
+ {
+ key_op_ctrl = KEY_OP_VALID_MASK & ~KEY_OP_INIT_TSC;
+
+ if (ar->user_key_ctrl & AR6000_USER_SETKEYS_RSC_UNCHANGED) {
+ key_op_ctrl &= ~KEY_OP_INIT_RSC;
+ } else {
+ key_op_ctrl |= KEY_OP_INIT_RSC;
+ }
+ ar6000_reinstall_keys(ar, key_op_ctrl);
+ }
+#endif /* USER_KEYS */
+
+ netif_wake_queue(ar->arNetDev);
+
+ /* For CFG80211 the key configuration and the default key comes in after connect so no point in plumbing invalid keys */
+#ifndef ATH6K_CONFIG_CFG80211
+ if ((networkType & ADHOC_NETWORK) &&
+ (OPEN_AUTH == ar->arDot11AuthMode) &&
+ (NONE_AUTH == ar->arAuthMode) &&
+ (WEP_CRYPT == ar->arPairwiseCrypto))
+ {
+ if (!ar->arConnected) {
+ wmi_addKey_cmd(ar->arWmi,
+ ar->arDefTxKeyIndex,
+ WEP_CRYPT,
+ GROUP_USAGE | TX_USAGE,
+ ar->arWepKeyList[ar->arDefTxKeyIndex].arKeyLen,
+ NULL,
+ ar->arWepKeyList[ar->arDefTxKeyIndex].arKey, KEY_OP_INIT_VAL, NULL,
+ NO_SYNC_WMIFLAG);
+ }
+ }
+#endif /* ATH6K_CONFIG_CFG80211 */
+
+ /* Update connect & link status atomically */
+ spin_lock_irqsave(&ar->arLock, flags);
+ ar->arConnected = TRUE;
+ ar->arConnectPending = FALSE;
+ netif_carrier_on(ar->arNetDev);
+ spin_unlock_irqrestore(&ar->arLock, flags);
+ /* reset the rx aggr state */
+ aggr_reset_state(ar->aggr_cntxt);
+ reconnect_flag = 0;
+
+ A_MEMZERO(&wrqu, sizeof(wrqu));
+ A_MEMCPY(wrqu.addr.sa_data, bssid, IEEE80211_ADDR_LEN);
+ wrqu.addr.sa_family = ARPHRD_ETHER;
+ wireless_send_event(ar->arNetDev, SIOCGIWAP, &wrqu, NULL);
+ if ((ar->arNetworkType == ADHOC_NETWORK) && ar->arIbssPsEnable) {
+ A_MEMZERO(ar->arNodeMap, sizeof(ar->arNodeMap));
+ ar->arNodeNum = 0;
+ ar->arNexEpId = ENDPOINT_2;
+ }
+ if (!ar->arUserBssFilter) {
+ wmi_bssfilter_cmd(ar->arWmi, NONE_BSS_FILTER, 0);
+ }
+
+}
+
+void ar6000_set_numdataendpts(AR_SOFTC_T *ar, A_UINT32 num)
+{
+ A_ASSERT(num <= (HTC_MAILBOX_NUM_MAX - 1));
+ ar->arNumDataEndPts = num;
+}
+
+void
+sta_cleanup(AR_SOFTC_T *ar, A_UINT8 i)
+{
+ struct sk_buff *skb;
+
+ /* empty the queued pkts in the PS queue if any */
+ A_MUTEX_LOCK(&ar->sta_list[i].psqLock);
+ while (!A_NETBUF_QUEUE_EMPTY(&ar->sta_list[i].psq)) {
+ skb = A_NETBUF_DEQUEUE(&ar->sta_list[i].psq);
+ A_NETBUF_FREE(skb);
+ }
+ A_MUTEX_UNLOCK(&ar->sta_list[i].psqLock);
+
+ /* Zero out the state fields */
+ A_MEMZERO(&ar->arAPStats.sta[ar->sta_list[i].aid-1], sizeof(WMI_PER_STA_STAT));
+ A_MEMZERO(&ar->sta_list[i].mac, ATH_MAC_LEN);
+ A_MEMZERO(&ar->sta_list[i].wpa_ie, IEEE80211_MAX_IE);
+ ar->sta_list[i].aid = 0;
+ ar->sta_list[i].flags = 0;
+
+ ar->sta_list_index = ar->sta_list_index & ~(1 << i);
+
+}
+
+A_UINT8
+remove_sta(AR_SOFTC_T *ar, A_UINT8 *mac, A_UINT16 reason)
+{
+ A_UINT8 i, removed=0;
+
+ if(IS_MAC_NULL(mac)) {
+ return removed;
+ }
+
+ if(IS_MAC_BCAST(mac)) {
+ A_PRINTF("DEL ALL STA\n");
+ for(i=0; i < AP_MAX_NUM_STA; i++) {
+ if(!IS_MAC_NULL(ar->sta_list[i].mac)) {
+ sta_cleanup(ar, i);
+ removed = 1;
+ }
+ }
+ } else {
+ for(i=0; i < AP_MAX_NUM_STA; i++) {
+ if(A_MEMCMP(ar->sta_list[i].mac, mac, ATH_MAC_LEN)==0) {
+ A_PRINTF("DEL STA %2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x "
+ " aid=%d REASON=%d\n", mac[0], mac[1], mac[2],
+ mac[3], mac[4], mac[5], ar->sta_list[i].aid, reason);
+
+ sta_cleanup(ar, i);
+ removed = 1;
+ break;
+ }
+ }
+ }
+ return removed;
+}
+
+void
+ar6000_disconnect_event(AR_SOFTC_T *ar, A_UINT8 reason, A_UINT8 *bssid,
+ A_UINT8 assocRespLen, A_UINT8 *assocInfo, A_UINT16 protocolReasonStatus)
+{
+ A_UINT8 i;
+ unsigned long flags;
+ union iwreq_data wrqu;
+
+ if(ar->arNetworkType & AP_NETWORK) {
+ union iwreq_data wrqu;
+ struct sk_buff *skb;
+
+ if(!remove_sta(ar, bssid, protocolReasonStatus)) {
+ return;
+ }
+
+ /* If there are no more associated STAs, empty the mcast PS q */
+ if (ar->sta_list_index == 0) {
+ A_MUTEX_LOCK(&ar->mcastpsqLock);
+ while (!A_NETBUF_QUEUE_EMPTY(&ar->mcastpsq)) {
+ skb = A_NETBUF_DEQUEUE(&ar->mcastpsq);
+ A_NETBUF_FREE(skb);
+ }
+ A_MUTEX_UNLOCK(&ar->mcastpsqLock);
+
+ /* Clear the LSB of the BitMapCtl field of the TIM IE */
+ if (ar->arWmiReady) {
+ wmi_set_pvb_cmd(ar->arWmi, MCAST_AID, 0);
+ }
+ }
+
+ if(!IS_MAC_BCAST(bssid)) {
+ /* Send event to application */
+ A_MEMZERO(&wrqu, sizeof(wrqu));
+ A_MEMCPY(wrqu.addr.sa_data, bssid, ATH_MAC_LEN);
+ wireless_send_event(ar->arNetDev, IWEVEXPIRED, &wrqu, NULL);
+ }
+ return;
+ }
+
+#ifdef ATH6K_CONFIG_CFG80211
+ ar6k_cfg80211_disconnect_event(ar, reason, bssid,
+ assocRespLen, assocInfo,
+ protocolReasonStatus);
+#endif /* ATH6K_CONFIG_CFG80211 */
+
+ /* Send disconnect event to supplicant */
+ A_MEMZERO(&wrqu, sizeof(wrqu));
+ wrqu.addr.sa_family = ARPHRD_ETHER;
+ wireless_send_event(ar->arNetDev, SIOCGIWAP, &wrqu, NULL);
+
+ /* it is necessary to clear the host-side rx aggregation state */
+ aggr_reset_state(ar->aggr_cntxt);
+
+ A_UNTIMEOUT(&ar->disconnect_timer);
+
+ A_PRINTF("AR6000 disconnected");
+ if (bssid[0] || bssid[1] || bssid[2] || bssid[3] || bssid[4] || bssid[5]) {
+ A_PRINTF(" from %2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x ",
+ bssid[0], bssid[1], bssid[2], bssid[3], bssid[4], bssid[5]);
+ }
+
+ AR_DEBUG_PRINTF(ATH_DEBUG_WLAN_CONNECT,("\nDisconnect Reason is %d", reason));
+ AR_DEBUG_PRINTF(ATH_DEBUG_WLAN_CONNECT,("\nProtocol Reason/Status Code is %d", protocolReasonStatus));
+ AR_DEBUG_PRINTF(ATH_DEBUG_WLAN_CONNECT,("\nAssocResp Frame = %s",
+ assocRespLen ? " " : "NULL"));
+ for (i = 0; i < assocRespLen; i++) {
+ if (!(i % 0x10)) {
+ AR_DEBUG_PRINTF(ATH_DEBUG_WLAN_CONNECT,("\n"));
+ }
+ AR_DEBUG_PRINTF(ATH_DEBUG_WLAN_CONNECT,("%2.2x ", assocInfo[i]));
+ }
+ AR_DEBUG_PRINTF(ATH_DEBUG_WLAN_CONNECT,("\n"));
+ /*
+ * If the event is due to disconnect cmd from the host, only they the target
+ * would stop trying to connect. Under any other condition, target would
+ * keep trying to connect.
+ *
+ */
+ if( reason == DISCONNECT_CMD)
+ {
+ ar->arConnectPending = FALSE;
+ if ((!ar->arUserBssFilter) && (ar->arWmiReady)) {
+ wmi_bssfilter_cmd(ar->arWmi, NONE_BSS_FILTER, 0);
+ }
+ } else {
+ ar->arConnectPending = TRUE;
+ if (((reason == ASSOC_FAILED) && (protocolReasonStatus == 0x11)) ||
+ ((reason == ASSOC_FAILED) && (protocolReasonStatus == 0x0) && (reconnect_flag == 1))) {
+ ar->arConnected = TRUE;
+ return;
+ }
+ }
+
+ if ((reason == NO_NETWORK_AVAIL) && (ar->arWmiReady))
+ {
+ bss_t *pWmiSsidnode = NULL;
+
+ /* remove the current associated bssid node */
+ wmi_free_node (ar->arWmi, bssid);
+
+ /*
+ * In case any other same SSID nodes are present
+ * remove it, since those nodes also not available now
+ */
+ do
+ {
+ /*
+ * Find the nodes based on SSID and remove it
+ * NOTE :: This case will not work out for Hidden-SSID
+ */
+ pWmiSsidnode = wmi_find_Ssidnode (ar->arWmi, ar->arSsid, ar->arSsidLen, FALSE, TRUE);
+
+ if (pWmiSsidnode)
+ {
+ wmi_free_node (ar->arWmi, pWmiSsidnode->ni_macaddr);
+ }
+
+ } while (pWmiSsidnode);
+ }
+
+ /* Update connect & link status atomically */
+ spin_lock_irqsave(&ar->arLock, flags);
+ ar->arConnected = FALSE;
+ netif_carrier_off(ar->arNetDev);
+ spin_unlock_irqrestore(&ar->arLock, flags);
+
+ if( (reason != CSERV_DISCONNECT) || (reconnect_flag != 1) ) {
+ reconnect_flag = 0;
+ }
+
+#ifdef USER_KEYS
+ if (reason != CSERV_DISCONNECT)
+ {
+ ar->user_savedkeys_stat = USER_SAVEDKEYS_STAT_INIT;
+ ar->user_key_ctrl = 0;
+ }
+#endif /* USER_KEYS */
+
+ netif_stop_queue(ar->arNetDev);
+ A_MEMZERO(ar->arBssid, sizeof(ar->arBssid));
+ ar->arBssChannel = 0;
+ ar->arBeaconInterval = 0;
+
+ ar6000_TxDataCleanup(ar);
+}
+
+void
+ar6000_regDomain_event(AR_SOFTC_T *ar, A_UINT32 regCode)
+{
+ A_PRINTF("AR6000 Reg Code = 0x%x\n", regCode);
+ ar->arRegCode = regCode;
+}
+
+#ifdef ATH_AR6K_11N_SUPPORT
+void
+ar6000_aggr_rcv_addba_req_evt(AR_SOFTC_T *ar, WMI_ADDBA_REQ_EVENT *evt)
+{
+ if(evt->status == 0) {
+ aggr_recv_addba_req_evt(ar->aggr_cntxt, evt->tid, evt->st_seq_no, evt->win_sz);
+ }
+}
+
+void
+ar6000_aggr_rcv_addba_resp_evt(AR_SOFTC_T *ar, WMI_ADDBA_RESP_EVENT *evt)
+{
+ A_PRINTF("ADDBA RESP. tid %d status %d, sz %d\n", evt->tid, evt->status, evt->amsdu_sz);
+ if(evt->status == 0) {
+ }
+}
+
+void
+ar6000_aggr_rcv_delba_req_evt(AR_SOFTC_T *ar, WMI_DELBA_EVENT *evt)
+{
+ aggr_recv_delba_req_evt(ar->aggr_cntxt, evt->tid);
+}
+#endif
+
+void register_pal_cb(ar6k_pal_config_t *palConfig_p)
+{
+ ar6k_pal_config_g = *palConfig_p;
+}
+
+void
+ar6000_hci_event_rcv_evt(struct ar6_softc *ar, WMI_HCI_EVENT *cmd)
+{
+ void *osbuf = NULL;
+ A_INT8 i;
+ A_UINT8 size, *buf;
+ A_STATUS ret = A_OK;
+
+ size = cmd->evt_buf_sz + 4;
+ osbuf = A_NETBUF_ALLOC(size);
+ if (osbuf == NULL) {
+ ret = A_NO_MEMORY;
+ A_PRINTF("Error in allocating netbuf \n");
+ return;
+ }
+
+ A_NETBUF_PUT(osbuf, size);
+ buf = (A_UINT8 *)A_NETBUF_DATA(osbuf);
+ /* First 2-bytes carry HCI event/ACL data type
+ * the next 2 are free
+ */
+ *((short *)buf) = WMI_HCI_EVENT_EVENTID;
+ buf += sizeof(int);
+ A_MEMCPY(buf, cmd->buf, cmd->evt_buf_sz);
+
+ if(ar6k_pal_config_g.fpar6k_pal_recv_pkt)
+ {
+ /* pass the cmd packet to PAL driver */
+ if((*ar6k_pal_config_g.fpar6k_pal_recv_pkt)(ar->hcipal_info, osbuf) == TRUE)
+ return;
+ }
+ ar6000_deliver_frames_to_nw_stack(ar->arNetDev, osbuf);
+ if(loghci) {
+ A_PRINTF_LOG("HCI Event From PAL <-- \n");
+ for(i = 0; i < cmd->evt_buf_sz; i++) {
+ A_PRINTF_LOG("0x%02x ", cmd->buf[i]);
+ if((i % 10) == 0) {
+ A_PRINTF_LOG("\n");
+ }
+ }
+ A_PRINTF_LOG("\n");
+ A_PRINTF_LOG("==================================\n");
+ }
+}
+
+void
+ar6000_neighborReport_event(AR_SOFTC_T *ar, int numAps, WMI_NEIGHBOR_INFO *info)
+{
+#if WIRELESS_EXT >= 18
+ struct iw_pmkid_cand *pmkcand;
+#else /* WIRELESS_EXT >= 18 */
+ static const char *tag = "PRE-AUTH";
+ char buf[128];
+#endif /* WIRELESS_EXT >= 18 */
+
+ union iwreq_data wrqu;
+ int i;
+
+ AR_DEBUG_PRINTF(ATH_DEBUG_WLAN_SCAN,("AR6000 Neighbor Report Event\n"));
+ for (i=0; i < numAps; info++, i++) {
+ AR_DEBUG_PRINTF(ATH_DEBUG_WLAN_SCAN,("bssid %2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x ",
+ info->bssid[0], info->bssid[1], info->bssid[2],
+ info->bssid[3], info->bssid[4], info->bssid[5]));
+ if (info->bssFlags & WMI_PREAUTH_CAPABLE_BSS) {
+ AR_DEBUG_PRINTF(ATH_DEBUG_WLAN_SCAN,("preauth-cap"));
+ }
+ if (info->bssFlags & WMI_PMKID_VALID_BSS) {
+ AR_DEBUG_PRINTF(ATH_DEBUG_WLAN_SCAN,(" pmkid-valid\n"));
+ continue; /* we skip bss if the pmkid is already valid */
+ }
+ AR_DEBUG_PRINTF(ATH_DEBUG_WLAN_SCAN,("\n"));
+ A_MEMZERO(&wrqu, sizeof(wrqu));
+#if WIRELESS_EXT >= 18
+ pmkcand = A_MALLOC_NOWAIT(sizeof(struct iw_pmkid_cand));
+ A_MEMZERO(pmkcand, sizeof(struct iw_pmkid_cand));
+ pmkcand->index = i;
+ pmkcand->flags = info->bssFlags;
+ A_MEMCPY(pmkcand->bssid.sa_data, info->bssid, ATH_MAC_LEN);
+ wrqu.data.length = sizeof(struct iw_pmkid_cand);
+ wireless_send_event(ar->arNetDev, IWEVPMKIDCAND, &wrqu, (char *)pmkcand);
+ A_FREE(pmkcand);
+#else /* WIRELESS_EXT >= 18 */
+ snprintf(buf, sizeof(buf), "%s%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x",
+ tag,
+ info->bssid[0], info->bssid[1], info->bssid[2],
+ info->bssid[3], info->bssid[4], info->bssid[5],
+ i, info->bssFlags);
+ wrqu.data.length = strlen(buf);
+ wireless_send_event(ar->arNetDev, IWEVCUSTOM, &wrqu, buf);
+#endif /* WIRELESS_EXT >= 18 */
+ }
+}
+
+void
+ar6000_tkip_micerr_event(AR_SOFTC_T *ar, A_UINT8 keyid, A_BOOL ismcast)
+{
+ static const char *tag = "MLME-MICHAELMICFAILURE.indication";
+ char buf[128];
+ union iwreq_data wrqu;
+
+ /*
+ * For AP case, keyid will have aid of STA which sent pkt with
+ * MIC error. Use this aid to get MAC & send it to hostapd.
+ */
+ if (ar->arNetworkType == AP_NETWORK) {
+ sta_t *s = ieee80211_find_conn_for_aid(ar, (keyid >> 2));
+ if(!s){
+ A_PRINTF("AP TKIP MIC error received from Invalid aid / STA not found =%d\n", keyid);
+ return;
+ }
+ A_PRINTF("AP TKIP MIC error received from aid=%d\n", keyid);
+ snprintf(buf,sizeof(buf), "%s addr=%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x",
+ tag, s->mac[0],s->mac[1],s->mac[2],s->mac[3],s->mac[4],s->mac[5]);
+ } else {
+
+#ifdef ATH6K_CONFIG_CFG80211
+ ar6k_cfg80211_tkip_micerr_event(ar, keyid, ismcast);
+#endif /* ATH6K_CONFIG_CFG80211 */
+
+ A_PRINTF("AR6000 TKIP MIC error received for keyid %d %scast\n",
+ keyid & 0x3, ismcast ? "multi": "uni");
+ snprintf(buf, sizeof(buf), "%s(keyid=%d %sicast)", tag, keyid & 0x3,
+ ismcast ? "mult" : "un");
+ }
+
+ memset(&wrqu, 0, sizeof(wrqu));
+ wrqu.data.length = strlen(buf);
+ wireless_send_event(ar->arNetDev, IWEVCUSTOM, &wrqu, buf);
+}
+
+void
+ar6000_scanComplete_event(AR_SOFTC_T *ar, A_STATUS status)
+{
+
+#ifdef ATH6K_CONFIG_CFG80211
+ ar6k_cfg80211_scanComplete_event(ar, status);
+#endif /* ATH6K_CONFIG_CFG80211 */
+
+ if (!ar->arUserBssFilter) {
+ wmi_bssfilter_cmd(ar->arWmi, NONE_BSS_FILTER, 0);
+ }
+ if (ar->scan_triggered) {
+ if (status==A_OK) {
+ union iwreq_data wrqu;
+ A_MEMZERO(&wrqu, sizeof(wrqu));
+ wireless_send_event(ar->arNetDev, SIOCGIWSCAN, &wrqu, NULL);
+ }
+ ar->scan_triggered = 0;
+ }
+
+ AR_DEBUG_PRINTF(ATH_DEBUG_WLAN_SCAN,( "AR6000 scan complete: %d\n", status));
+}
+
+void
+ar6000_targetStats_event(AR_SOFTC_T *ar, A_UINT8 *ptr, A_UINT32 len)
+{
+ A_UINT8 ac;
+
+ if(ar->arNetworkType == AP_NETWORK) {
+ WMI_AP_MODE_STAT *p = (WMI_AP_MODE_STAT *)ptr;
+ WMI_AP_MODE_STAT *ap = &ar->arAPStats;
+
+ if (len < sizeof(*p)) {
+ return;
+ }
+
+ for(ac=0;ac<AP_MAX_NUM_STA;ac++) {
+ ap->sta[ac].tx_bytes += p->sta[ac].tx_bytes;
+ ap->sta[ac].tx_pkts += p->sta[ac].tx_pkts;
+ ap->sta[ac].tx_error += p->sta[ac].tx_error;
+ ap->sta[ac].tx_discard += p->sta[ac].tx_discard;
+ ap->sta[ac].rx_bytes += p->sta[ac].rx_bytes;
+ ap->sta[ac].rx_pkts += p->sta[ac].rx_pkts;
+ ap->sta[ac].rx_error += p->sta[ac].rx_error;
+ ap->sta[ac].rx_discard += p->sta[ac].rx_discard;
+ }
+
+ } else {
+ WMI_TARGET_STATS *pTarget = (WMI_TARGET_STATS *)ptr;
+ TARGET_STATS *pStats = &ar->arTargetStats;
+
+ if (len < sizeof(*pTarget)) {
+ return;
+ }
+
+ // Update the RSSI of the connected bss.
+ if (ar->arConnected) {
+ bss_t *pConnBss = NULL;
+
+ pConnBss = wmi_find_node(ar->arWmi,ar->arBssid);
+ if (pConnBss)
+ {
+ pConnBss->ni_rssi = pTarget->cservStats.cs_aveBeacon_rssi;
+ pConnBss->ni_snr = pTarget->cservStats.cs_aveBeacon_snr;
+ wmi_node_return(ar->arWmi, pConnBss);
+ }
+ }
+
+ AR_DEBUG_PRINTF(ATH_DEBUG_INFO,("AR6000 updating target stats\n"));
+ pStats->tx_packets += pTarget->txrxStats.tx_stats.tx_packets;
+ pStats->tx_bytes += pTarget->txrxStats.tx_stats.tx_bytes;
+ pStats->tx_unicast_pkts += pTarget->txrxStats.tx_stats.tx_unicast_pkts;
+ pStats->tx_unicast_bytes += pTarget->txrxStats.tx_stats.tx_unicast_bytes;
+ pStats->tx_multicast_pkts += pTarget->txrxStats.tx_stats.tx_multicast_pkts;
+ pStats->tx_multicast_bytes += pTarget->txrxStats.tx_stats.tx_multicast_bytes;
+ pStats->tx_broadcast_pkts += pTarget->txrxStats.tx_stats.tx_broadcast_pkts;
+ pStats->tx_broadcast_bytes += pTarget->txrxStats.tx_stats.tx_broadcast_bytes;
+ pStats->tx_rts_success_cnt += pTarget->txrxStats.tx_stats.tx_rts_success_cnt;
+ for(ac = 0; ac < WMM_NUM_AC; ac++)
+ pStats->tx_packet_per_ac[ac] += pTarget->txrxStats.tx_stats.tx_packet_per_ac[ac];
+ pStats->tx_errors += pTarget->txrxStats.tx_stats.tx_errors;
+ pStats->tx_failed_cnt += pTarget->txrxStats.tx_stats.tx_failed_cnt;
+ pStats->tx_retry_cnt += pTarget->txrxStats.tx_stats.tx_retry_cnt;
+ pStats->tx_mult_retry_cnt += pTarget->txrxStats.tx_stats.tx_mult_retry_cnt;
+ pStats->tx_rts_fail_cnt += pTarget->txrxStats.tx_stats.tx_rts_fail_cnt;
+ pStats->tx_unicast_rate = wmi_get_rate(pTarget->txrxStats.tx_stats.tx_unicast_rate);
+
+ pStats->rx_packets += pTarget->txrxStats.rx_stats.rx_packets;
+ pStats->rx_bytes += pTarget->txrxStats.rx_stats.rx_bytes;
+ pStats->rx_unicast_pkts += pTarget->txrxStats.rx_stats.rx_unicast_pkts;
+ pStats->rx_unicast_bytes += pTarget->txrxStats.rx_stats.rx_unicast_bytes;
+ pStats->rx_multicast_pkts += pTarget->txrxStats.rx_stats.rx_multicast_pkts;
+ pStats->rx_multicast_bytes += pTarget->txrxStats.rx_stats.rx_multicast_bytes;
+ pStats->rx_broadcast_pkts += pTarget->txrxStats.rx_stats.rx_broadcast_pkts;
+ pStats->rx_broadcast_bytes += pTarget->txrxStats.rx_stats.rx_broadcast_bytes;
+ pStats->rx_fragment_pkt += pTarget->txrxStats.rx_stats.rx_fragment_pkt;
+ pStats->rx_errors += pTarget->txrxStats.rx_stats.rx_errors;
+ pStats->rx_crcerr += pTarget->txrxStats.rx_stats.rx_crcerr;
+ pStats->rx_key_cache_miss += pTarget->txrxStats.rx_stats.rx_key_cache_miss;
+ pStats->rx_decrypt_err += pTarget->txrxStats.rx_stats.rx_decrypt_err;
+ pStats->rx_duplicate_frames += pTarget->txrxStats.rx_stats.rx_duplicate_frames;
+ pStats->rx_unicast_rate = wmi_get_rate(pTarget->txrxStats.rx_stats.rx_unicast_rate);
+
+
+ pStats->tkip_local_mic_failure
+ += pTarget->txrxStats.tkipCcmpStats.tkip_local_mic_failure;
+ pStats->tkip_counter_measures_invoked
+ += pTarget->txrxStats.tkipCcmpStats.tkip_counter_measures_invoked;
+ pStats->tkip_replays += pTarget->txrxStats.tkipCcmpStats.tkip_replays;
+ pStats->tkip_format_errors += pTarget->txrxStats.tkipCcmpStats.tkip_format_errors;
+ pStats->ccmp_format_errors += pTarget->txrxStats.tkipCcmpStats.ccmp_format_errors;
+ pStats->ccmp_replays += pTarget->txrxStats.tkipCcmpStats.ccmp_replays;
+
+ pStats->power_save_failure_cnt += pTarget->pmStats.power_save_failure_cnt;
+ pStats->noise_floor_calibation = pTarget->noise_floor_calibation;
+
+ pStats->cs_bmiss_cnt += pTarget->cservStats.cs_bmiss_cnt;
+ pStats->cs_lowRssi_cnt += pTarget->cservStats.cs_lowRssi_cnt;
+ pStats->cs_connect_cnt += pTarget->cservStats.cs_connect_cnt;
+ pStats->cs_disconnect_cnt += pTarget->cservStats.cs_disconnect_cnt;
+ pStats->cs_aveBeacon_snr = pTarget->cservStats.cs_aveBeacon_snr;
+ pStats->cs_aveBeacon_rssi = pTarget->cservStats.cs_aveBeacon_rssi;
+
+ if (enablerssicompensation) {
+ pStats->cs_aveBeacon_rssi =
+ rssi_compensation_calc(ar, pStats->cs_aveBeacon_rssi);
+ }
+ pStats->cs_lastRoam_msec = pTarget->cservStats.cs_lastRoam_msec;
+ pStats->cs_snr = pTarget->cservStats.cs_snr;
+ pStats->cs_rssi = pTarget->cservStats.cs_rssi;
+
+ pStats->lq_val = pTarget->lqVal;
+
+ pStats->wow_num_pkts_dropped += pTarget->wowStats.wow_num_pkts_dropped;
+ pStats->wow_num_host_pkt_wakeups += pTarget->wowStats.wow_num_host_pkt_wakeups;
+ pStats->wow_num_host_event_wakeups += pTarget->wowStats.wow_num_host_event_wakeups;
+ pStats->wow_num_events_discarded += pTarget->wowStats.wow_num_events_discarded;
+ pStats->arp_received += pTarget->arpStats.arp_received;
+ pStats->arp_matched += pTarget->arpStats.arp_matched;
+ pStats->arp_replied += pTarget->arpStats.arp_replied;
+
+ if (ar->statsUpdatePending) {
+ ar->statsUpdatePending = FALSE;
+ wake_up(&arEvent);
+ }
+ }
+}
+
+void
+ar6000_rssiThreshold_event(AR_SOFTC_T *ar, WMI_RSSI_THRESHOLD_VAL newThreshold, A_INT16 rssi)
+{
+ USER_RSSI_THOLD userRssiThold;
+
+ rssi = rssi + SIGNAL_QUALITY_NOISE_FLOOR;
+
+ if (enablerssicompensation) {
+ rssi = rssi_compensation_calc(ar, rssi);
+ }
+
+ /* Send an event to the app */
+ userRssiThold.tag = ar->rssi_map[newThreshold].tag;
+ userRssiThold.rssi = rssi;
+ A_PRINTF("rssi Threshold range = %d tag = %d rssi = %d\n", newThreshold,
+ userRssiThold.tag, userRssiThold.rssi);
+
+ ar6000_send_event_to_app(ar, WMI_RSSI_THRESHOLD_EVENTID,(A_UINT8 *)&userRssiThold, sizeof(USER_RSSI_THOLD));
+}
+
+
+void
+ar6000_hbChallengeResp_event(AR_SOFTC_T *ar, A_UINT32 cookie, A_UINT32 source)
+{
+ if (source == APP_HB_CHALLENGE) {
+ /* Report it to the app in case it wants a positive acknowledgement */
+ ar6000_send_event_to_app(ar, WMIX_HB_CHALLENGE_RESP_EVENTID,
+ (A_UINT8 *)&cookie, sizeof(cookie));
+ } else {
+ /* This would ignore the replys that come in after their due time */
+ if (cookie == ar->arHBChallengeResp.seqNum) {
+ ar->arHBChallengeResp.outstanding = FALSE;
+ }
+ }
+}
+
+
+void
+ar6000_reportError_event(AR_SOFTC_T *ar, WMI_TARGET_ERROR_VAL errorVal)
+{
+ static const char * const errString[] = {
+ [WMI_TARGET_PM_ERR_FAIL] "WMI_TARGET_PM_ERR_FAIL",
+ [WMI_TARGET_KEY_NOT_FOUND] "WMI_TARGET_KEY_NOT_FOUND",
+ [WMI_TARGET_DECRYPTION_ERR] "WMI_TARGET_DECRYPTION_ERR",
+ [WMI_TARGET_BMISS] "WMI_TARGET_BMISS",
+ [WMI_PSDISABLE_NODE_JOIN] "WMI_PSDISABLE_NODE_JOIN"
+ };
+
+ A_PRINTF("AR6000 Error on Target. Error = 0x%x\n", errorVal);
+
+ /* One error is reported at a time, and errorval is a bitmask */
+ if(errorVal & (errorVal - 1))
+ return;
+
+ A_PRINTF("AR6000 Error type = ");
+ switch(errorVal)
+ {
+ case WMI_TARGET_PM_ERR_FAIL:
+ case WMI_TARGET_KEY_NOT_FOUND:
+ case WMI_TARGET_DECRYPTION_ERR:
+ case WMI_TARGET_BMISS:
+ case WMI_PSDISABLE_NODE_JOIN:
+ A_PRINTF("%s\n", errString[errorVal]);
+ break;
+ default:
+ A_PRINTF("INVALID\n");
+ break;
+ }
+
+}
+
+
+void
+ar6000_cac_event(AR_SOFTC_T *ar, A_UINT8 ac, A_UINT8 cacIndication,
+ A_UINT8 statusCode, A_UINT8 *tspecSuggestion)
+{
+ WMM_TSPEC_IE *tspecIe;
+
+ /*
+ * This is the TSPEC IE suggestion from AP.
+ * Suggestion provided by AP under some error
+ * cases, could be helpful for the host app.
+ * Check documentation.
+ */
+ tspecIe = (WMM_TSPEC_IE *)tspecSuggestion;
+
+ /*
+ * What do we do, if we get TSPEC rejection? One thought
+ * that comes to mind is implictly delete the pstream...
+ */
+ A_PRINTF("AR6000 CAC notification. "
+ "AC = %d, cacIndication = 0x%x, statusCode = 0x%x\n",
+ ac, cacIndication, statusCode);
+}
+
+void
+ar6000_channel_change_event(AR_SOFTC_T *ar, A_UINT16 oldChannel,
+ A_UINT16 newChannel)
+{
+ A_PRINTF("Channel Change notification\nOld Channel: %d, New Channel: %d\n",
+ oldChannel, newChannel);
+}
+
+#define AR6000_PRINT_BSSID(_pBss) do { \
+ A_PRINTF("%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x ",\
+ (_pBss)[0],(_pBss)[1],(_pBss)[2],(_pBss)[3],\
+ (_pBss)[4],(_pBss)[5]); \
+} while(0)
+
+void
+ar6000_roam_tbl_event(AR_SOFTC_T *ar, WMI_TARGET_ROAM_TBL *pTbl)
+{
+ A_UINT8 i;
+
+ A_PRINTF("ROAM TABLE NO OF ENTRIES is %d ROAM MODE is %d\n",
+ pTbl->numEntries, pTbl->roamMode);
+ for (i= 0; i < pTbl->numEntries; i++) {
+ A_PRINTF("[%d]bssid %2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x ", i,
+ pTbl->bssRoamInfo[i].bssid[0], pTbl->bssRoamInfo[i].bssid[1],
+ pTbl->bssRoamInfo[i].bssid[2],
+ pTbl->bssRoamInfo[i].bssid[3],
+ pTbl->bssRoamInfo[i].bssid[4],
+ pTbl->bssRoamInfo[i].bssid[5]);
+ A_PRINTF("RSSI %d RSSIDT %d LAST RSSI %d UTIL %d ROAM_UTIL %d"
+ " BIAS %d\n",
+ pTbl->bssRoamInfo[i].rssi,
+ pTbl->bssRoamInfo[i].rssidt,
+ pTbl->bssRoamInfo[i].last_rssi,
+ pTbl->bssRoamInfo[i].util,
+ pTbl->bssRoamInfo[i].roam_util,
+ pTbl->bssRoamInfo[i].bias);
+ }
+}
+
+void
+ar6000_wow_list_event(struct ar6_softc *ar, A_UINT8 num_filters, WMI_GET_WOW_LIST_REPLY *wow_reply)
+{
+ A_UINT8 i,j;
+
+ /*Each event now contains exactly one filter, see bug 26613*/
+ A_PRINTF("WOW pattern %d of %d patterns\n", wow_reply->this_filter_num, wow_reply->num_filters);
+ A_PRINTF("wow mode = %s host mode = %s\n",
+ (wow_reply->wow_mode == 0? "disabled":"enabled"),
+ (wow_reply->host_mode == 1 ? "awake":"asleep"));
+
+
+ /*If there are no patterns, the reply will only contain generic
+ WoW information. Pattern information will exist only if there are
+ patterns present. Bug 26716*/
+
+ /* If this event contains pattern information, display it*/
+ if (wow_reply->this_filter_num) {
+ i=0;
+ A_PRINTF("id=%d size=%d offset=%d\n",
+ wow_reply->wow_filters[i].wow_filter_id,
+ wow_reply->wow_filters[i].wow_filter_size,
+ wow_reply->wow_filters[i].wow_filter_offset);
+ A_PRINTF("wow pattern = ");
+ for (j=0; j< wow_reply->wow_filters[i].wow_filter_size; j++) {
+ A_PRINTF("%2.2x",wow_reply->wow_filters[i].wow_filter_pattern[j]);
+ }
+
+ A_PRINTF("\nwow mask = ");
+ for (j=0; j< wow_reply->wow_filters[i].wow_filter_size; j++) {
+ A_PRINTF("%2.2x",wow_reply->wow_filters[i].wow_filter_mask[j]);
+ }
+ A_PRINTF("\n");
+ }
+}
+
+/*
+ * Report the Roaming related data collected on the target
+ */
+void
+ar6000_display_roam_time(WMI_TARGET_ROAM_TIME *p)
+{
+ A_PRINTF("Disconnect Data : BSSID: ");
+ AR6000_PRINT_BSSID(p->disassoc_bssid);
+ A_PRINTF(" RSSI %d DISASSOC Time %d NO_TXRX_TIME %d\n",
+ p->disassoc_bss_rssi,p->disassoc_time,
+ p->no_txrx_time);
+ A_PRINTF("Connect Data: BSSID: ");
+ AR6000_PRINT_BSSID(p->assoc_bssid);
+ A_PRINTF(" RSSI %d ASSOC Time %d TXRX_TIME %d\n",
+ p->assoc_bss_rssi,p->assoc_time,
+ p->allow_txrx_time);
+}
+
+void
+ar6000_roam_data_event(AR_SOFTC_T *ar, WMI_TARGET_ROAM_DATA *p)
+{
+ switch (p->roamDataType) {
+ case ROAM_DATA_TIME:
+ ar6000_display_roam_time(&p->u.roamTime);
+ break;
+ default:
+ break;
+ }
+}
+
+void
+ar6000_bssInfo_event_rx(AR_SOFTC_T *ar, A_UINT8 *datap, int len)
+{
+ struct sk_buff *skb;
+ WMI_BSS_INFO_HDR *bih = (WMI_BSS_INFO_HDR *)datap;
+
+
+ if (!ar->arMgmtFilter) {
+ return;
+ }
+ if (((ar->arMgmtFilter & IEEE80211_FILTER_TYPE_BEACON) &&
+ (bih->frameType != BEACON_FTYPE)) ||
+ ((ar->arMgmtFilter & IEEE80211_FILTER_TYPE_PROBE_RESP) &&
+ (bih->frameType != PROBERESP_FTYPE)))
+ {
+ return;
+ }
+
+ if ((skb = A_NETBUF_ALLOC_RAW(len)) != NULL) {
+
+ A_NETBUF_PUT(skb, len);
+ A_MEMCPY(A_NETBUF_DATA(skb), datap, len);
+ skb->dev = ar->arNetDev;
+ A_MEMCPY(skb_mac_header(skb), A_NETBUF_DATA(skb), 6);
+ skb->ip_summed = CHECKSUM_NONE;
+ skb->pkt_type = PACKET_OTHERHOST;
+ skb->protocol = __constant_htons(0x0019);
+ netif_rx(skb);
+ }
+}
+
+A_UINT32 wmiSendCmdNum;
+
+A_STATUS
+ar6000_control_tx(void *devt, void *osbuf, HTC_ENDPOINT_ID eid)
+{
+ AR_SOFTC_T *ar = (AR_SOFTC_T *)devt;
+ A_STATUS status = A_OK;
+ struct ar_cookie *cookie = NULL;
+ int i;
+#ifdef CONFIG_PM
+ if (ar->arWowState != WLAN_WOW_STATE_NONE) {
+ A_NETBUF_FREE(osbuf);
+ return A_EACCES;
+ }
+#endif /* CONFIG_PM */
+ /* take lock to protect ar6000_alloc_cookie() */
+ AR6000_SPIN_LOCK(&ar->arLock, 0);
+
+ do {
+
+ AR_DEBUG_PRINTF(ATH_DEBUG_WLAN_TX,("ar_contrstatus = ol_tx: skb=0x%lx, len=0x%x eid =%d\n",
+ (unsigned long)osbuf, A_NETBUF_LEN(osbuf), eid));
+
+ if (ar->arWMIControlEpFull && (eid == ar->arControlEp)) {
+ /* control endpoint is full, don't allocate resources, we
+ * are just going to drop this packet */
+ cookie = NULL;
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERR,(" WMI Control EP full, dropping packet : 0x%lX, len:%d \n",
+ (unsigned long)osbuf, A_NETBUF_LEN(osbuf)));
+ } else {
+ cookie = ar6000_alloc_cookie(ar);
+ }
+
+ if (cookie == NULL) {
+ status = A_NO_MEMORY;
+ break;
+ }
+
+ if(logWmiRawMsgs) {
+ A_PRINTF("WMI cmd send, msgNo %d :", wmiSendCmdNum);
+ for(i = 0; i < a_netbuf_to_len(osbuf); i++)
+ A_PRINTF("%x ", ((A_UINT8 *)a_netbuf_to_data(osbuf))[i]);
+ A_PRINTF("\n");
+ }
+
+ wmiSendCmdNum++;
+
+ } while (FALSE);
+
+ if (cookie != NULL) {
+ /* got a structure to send it out on */
+ ar->arTxPending[eid]++;
+
+ if (eid != ar->arControlEp) {
+ ar->arTotalTxDataPending++;
+ }
+ }
+
+ AR6000_SPIN_UNLOCK(&ar->arLock, 0);
+
+ if (cookie != NULL) {
+ cookie->arc_bp[0] = (unsigned long)osbuf;
+ cookie->arc_bp[1] = 0;
+ SET_HTC_PACKET_INFO_TX(&cookie->HtcPkt,
+ cookie,
+ A_NETBUF_DATA(osbuf),
+ A_NETBUF_LEN(osbuf),
+ eid,
+ AR6K_CONTROL_PKT_TAG);
+ /* this interface is asynchronous, if there is an error, cleanup will happen in the
+ * TX completion callback */
+ HTCSendPkt(ar->arHtcTarget, &cookie->HtcPkt);
+ status = A_OK;
+ }
+
+ if (status != A_OK) {
+ A_NETBUF_FREE(osbuf);
+ }
+ return status;
+}
+
+/* indicate tx activity or inactivity on a WMI stream */
+void ar6000_indicate_tx_activity(void *devt, A_UINT8 TrafficClass, A_BOOL Active)
+{
+ AR_SOFTC_T *ar = (AR_SOFTC_T *)devt;
+ HTC_ENDPOINT_ID eid ;
+ int i;
+
+ if (ar->arWmiEnabled) {
+ eid = arAc2EndpointID(ar, TrafficClass);
+
+ AR6000_SPIN_LOCK(&ar->arLock, 0);
+
+ ar->arAcStreamActive[TrafficClass] = Active;
+
+ if (Active) {
+ /* when a stream goes active, keep track of the active stream with the highest priority */
+
+ if (ar->arAcStreamPriMap[TrafficClass] > ar->arHiAcStreamActivePri) {
+ /* set the new highest active priority */
+ ar->arHiAcStreamActivePri = ar->arAcStreamPriMap[TrafficClass];
+ }
+
+ } else {
+ /* when a stream goes inactive, we may have to search for the next active stream
+ * that is the highest priority */
+
+ if (ar->arHiAcStreamActivePri == ar->arAcStreamPriMap[TrafficClass]) {
+
+ /* the highest priority stream just went inactive */
+
+ /* reset and search for the "next" highest "active" priority stream */
+ ar->arHiAcStreamActivePri = 0;
+ for (i = 0; i < WMM_NUM_AC; i++) {
+ if (ar->arAcStreamActive[i]) {
+ if (ar->arAcStreamPriMap[i] > ar->arHiAcStreamActivePri) {
+ /* set the new highest active priority */
+ ar->arHiAcStreamActivePri = ar->arAcStreamPriMap[i];
+ }
+ }
+ }
+ }
+ }
+
+ AR6000_SPIN_UNLOCK(&ar->arLock, 0);
+
+ } else {
+ /* for mbox ping testing, the traffic class is mapped directly as a stream ID,
+ * see handling of AR6000_XIOCTL_TRAFFIC_ACTIVITY_CHANGE in ioctl.c
+ * convert the stream ID to a endpoint */
+ eid = arAc2EndpointID(ar, TrafficClass);
+ }
+
+ /* notify HTC, this may cause credit distribution changes */
+
+ HTCIndicateActivityChange(ar->arHtcTarget,
+ eid,
+ Active);
+
+}
+
+void
+ar6000_btcoex_config_event(struct ar6_softc *ar, A_UINT8 *ptr, A_UINT32 len)
+{
+
+ WMI_BTCOEX_CONFIG_EVENT *pBtcoexConfig = (WMI_BTCOEX_CONFIG_EVENT *)ptr;
+ WMI_BTCOEX_CONFIG_EVENT *pArbtcoexConfig =&ar->arBtcoexConfig;
+
+ AR_DEBUG_PRINTF(ATH_DEBUG_INFO,("AR6000 BTCOEX CONFIG EVENT \n"));
+
+ A_PRINTF("received config event\n");
+ pArbtcoexConfig->btProfileType = pBtcoexConfig->btProfileType;
+ pArbtcoexConfig->linkId = pBtcoexConfig->linkId;
+
+ switch (pBtcoexConfig->btProfileType) {
+ case WMI_BTCOEX_BT_PROFILE_SCO:
+ A_MEMCPY(&pArbtcoexConfig->info.scoConfigCmd, &pBtcoexConfig->info.scoConfigCmd,
+ sizeof(WMI_SET_BTCOEX_SCO_CONFIG_CMD));
+ break;
+ case WMI_BTCOEX_BT_PROFILE_A2DP:
+ A_MEMCPY(&pArbtcoexConfig->info.a2dpConfigCmd, &pBtcoexConfig->info.a2dpConfigCmd,
+ sizeof(WMI_SET_BTCOEX_A2DP_CONFIG_CMD));
+ break;
+ case WMI_BTCOEX_BT_PROFILE_ACLCOEX:
+ A_MEMCPY(&pArbtcoexConfig->info.aclcoexConfig, &pBtcoexConfig->info.aclcoexConfig,
+ sizeof(WMI_SET_BTCOEX_ACLCOEX_CONFIG_CMD));
+ break;
+ case WMI_BTCOEX_BT_PROFILE_INQUIRY_PAGE:
+ A_MEMCPY(&pArbtcoexConfig->info.btinquiryPageConfigCmd, &pBtcoexConfig->info.btinquiryPageConfigCmd,
+ sizeof(WMI_SET_BTCOEX_ACLCOEX_CONFIG_CMD));
+ break;
+ }
+ if (ar->statsUpdatePending) {
+ ar->statsUpdatePending = FALSE;
+ wake_up(&arEvent);
+ }
+}
+
+void
+ar6000_btcoex_stats_event(struct ar6_softc *ar, A_UINT8 *ptr, A_UINT32 len)
+{
+ WMI_BTCOEX_STATS_EVENT *pBtcoexStats = (WMI_BTCOEX_STATS_EVENT *)ptr;
+
+ AR_DEBUG_PRINTF(ATH_DEBUG_INFO,("AR6000 BTCOEX CONFIG EVENT \n"));
+
+ A_MEMCPY(&ar->arBtcoexStats, pBtcoexStats, sizeof(WMI_BTCOEX_STATS_EVENT));
+
+ if (ar->statsUpdatePending) {
+ ar->statsUpdatePending = FALSE;
+ wake_up(&arEvent);
+ }
+
+}
+module_init(ar6000_init_module);
+module_exit(ar6000_cleanup_module);
+
+/* Init cookie queue */
+static void
+ar6000_cookie_init(AR_SOFTC_T *ar)
+{
+ A_UINT32 i;
+
+ ar->arCookieList = NULL;
+ ar->arCookieCount = 0;
+
+ A_MEMZERO(s_ar_cookie_mem, sizeof(s_ar_cookie_mem));
+
+ for (i = 0; i < MAX_COOKIE_NUM; i++) {
+ ar6000_free_cookie(ar, &s_ar_cookie_mem[i]);
+ }
+}
+
+/* cleanup cookie queue */
+static void
+ar6000_cookie_cleanup(AR_SOFTC_T *ar)
+{
+ /* It is gone .... */
+ ar->arCookieList = NULL;
+ ar->arCookieCount = 0;
+}
+
+/* Init cookie queue */
+static void
+ar6000_free_cookie(AR_SOFTC_T *ar, struct ar_cookie * cookie)
+{
+ /* Insert first */
+ A_ASSERT(ar != NULL);
+ A_ASSERT(cookie != NULL);
+
+ cookie->arc_list_next = ar->arCookieList;
+ ar->arCookieList = cookie;
+ ar->arCookieCount++;
+}
+
+/* cleanup cookie queue */
+static struct ar_cookie *
+ar6000_alloc_cookie(AR_SOFTC_T *ar)
+{
+ struct ar_cookie *cookie;
+
+ cookie = ar->arCookieList;
+ if(cookie != NULL)
+ {
+ ar->arCookieList = cookie->arc_list_next;
+ ar->arCookieCount--;
+ }
+
+ return cookie;
+}
+
+#ifdef SEND_EVENT_TO_APP
+/*
+ * This function is used to send event which come from taget to
+ * the application. The buf which send to application is include
+ * the event ID and event content.
+ */
+#define EVENT_ID_LEN 2
+void ar6000_send_event_to_app(AR_SOFTC_T *ar, A_UINT16 eventId,
+ A_UINT8 *datap, int len)
+{
+
+#if (WIRELESS_EXT >= 15)
+
+/* note: IWEVCUSTOM only exists in wireless extensions after version 15 */
+
+ char *buf;
+ A_UINT16 size;
+ union iwreq_data wrqu;
+
+ size = len + EVENT_ID_LEN;
+
+ if (size > IW_CUSTOM_MAX) {
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("WMI event ID : 0x%4.4X, len = %d too big for IWEVCUSTOM (max=%d) \n",
+ eventId, size, IW_CUSTOM_MAX));
+ return;
+ }
+
+ buf = A_MALLOC_NOWAIT(size);
+ if (NULL == buf){
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("%s: failed to allocate %d bytes\n", __func__, size));
+ return;
+ }
+
+ A_MEMZERO(buf, size);
+ A_MEMCPY(buf, &eventId, EVENT_ID_LEN);
+ A_MEMCPY(buf+EVENT_ID_LEN, datap, len);
+
+ //AR_DEBUG_PRINTF(ATH_DEBUG_INFO,("event ID = %d,len = %d\n",*(A_UINT16*)buf, size));
+ A_MEMZERO(&wrqu, sizeof(wrqu));
+ wrqu.data.length = size;
+ wireless_send_event(ar->arNetDev, IWEVCUSTOM, &wrqu, buf);
+ A_FREE(buf);
+#endif
+
+
+}
+
+/*
+ * This function is used to send events larger than 256 bytes
+ * to the application. The buf which is sent to application
+ * includes the event ID and event content.
+ */
+void ar6000_send_generic_event_to_app(AR_SOFTC_T *ar, A_UINT16 eventId,
+ A_UINT8 *datap, int len)
+{
+
+#if (WIRELESS_EXT >= 18)
+
+/* IWEVGENIE exists in wireless extensions version 18 onwards */
+
+ char *buf;
+ A_UINT16 size;
+ union iwreq_data wrqu;
+
+ size = len + EVENT_ID_LEN;
+
+ if (size > IW_GENERIC_IE_MAX) {
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("WMI event ID : 0x%4.4X, len = %d too big for IWEVGENIE (max=%d) \n",
+ eventId, size, IW_GENERIC_IE_MAX));
+ return;
+ }
+
+ buf = A_MALLOC_NOWAIT(size);
+ if (NULL == buf){
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("%s: failed to allocate %d bytes\n", __func__, size));
+ return;
+ }
+
+ A_MEMZERO(buf, size);
+ A_MEMCPY(buf, &eventId, EVENT_ID_LEN);
+ A_MEMCPY(buf+EVENT_ID_LEN, datap, len);
+
+ A_MEMZERO(&wrqu, sizeof(wrqu));
+ wrqu.data.length = size;
+ wireless_send_event(ar->arNetDev, IWEVGENIE, &wrqu, buf);
+
+ A_FREE(buf);
+
+#endif /* (WIRELESS_EXT >= 18) */
+
+}
+#endif /* SEND_EVENT_TO_APP */
+
+
+void
+ar6000_tx_retry_err_event(void *devt)
+{
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("Tx retries reach maximum!\n"));
+}
+
+void
+ar6000_snrThresholdEvent_rx(void *devt, WMI_SNR_THRESHOLD_VAL newThreshold, A_UINT8 snr)
+{
+ WMI_SNR_THRESHOLD_EVENT event;
+ AR_SOFTC_T *ar = (AR_SOFTC_T *)devt;
+
+ event.range = newThreshold;
+ event.snr = snr;
+
+ ar6000_send_event_to_app(ar, WMI_SNR_THRESHOLD_EVENTID, (A_UINT8 *)&event,
+ sizeof(WMI_SNR_THRESHOLD_EVENT));
+}
+
+void
+ar6000_lqThresholdEvent_rx(void *devt, WMI_LQ_THRESHOLD_VAL newThreshold, A_UINT8 lq)
+{
+ AR_DEBUG_PRINTF(ATH_DEBUG_INFO,("lq threshold range %d, lq %d\n", newThreshold, lq));
+}
+
+
+
+A_UINT32
+a_copy_to_user(void *to, const void *from, A_UINT32 n)
+{
+ return(copy_to_user(to, from, n));
+}
+
+A_UINT32
+a_copy_from_user(void *to, const void *from, A_UINT32 n)
+{
+ return(copy_from_user(to, from, n));
+}
+
+
+A_STATUS
+ar6000_get_driver_cfg(struct net_device *dev,
+ A_UINT16 cfgParam,
+ void *result)
+{
+
+ A_STATUS ret = 0;
+
+ switch(cfgParam)
+ {
+ case AR6000_DRIVER_CFG_GET_WLANNODECACHING:
+ *((A_UINT32 *)result) = wlanNodeCaching;
+ break;
+ case AR6000_DRIVER_CFG_LOG_RAW_WMI_MSGS:
+ *((A_UINT32 *)result) = logWmiRawMsgs;
+ break;
+ default:
+ ret = EINVAL;
+ break;
+ }
+
+ return ret;
+}
+
+void
+ar6000_keepalive_rx(void *devt, A_UINT8 configured)
+{
+ AR_SOFTC_T *ar = (AR_SOFTC_T *)devt;
+
+ ar->arKeepaliveConfigured = configured;
+ wake_up(&arEvent);
+}
+
+void
+ar6000_pmkid_list_event(void *devt, A_UINT8 numPMKID, WMI_PMKID *pmkidList,
+ A_UINT8 *bssidList)
+{
+ A_UINT8 i, j;
+
+ A_PRINTF("Number of Cached PMKIDs is %d\n", numPMKID);
+
+ for (i = 0; i < numPMKID; i++) {
+ A_PRINTF("\nBSSID %d ", i);
+ for (j = 0; j < ATH_MAC_LEN; j++) {
+ A_PRINTF("%2.2x", bssidList[j]);
+ }
+ bssidList += (ATH_MAC_LEN + WMI_PMKID_LEN);
+ A_PRINTF("\nPMKID %d ", i);
+ for (j = 0; j < WMI_PMKID_LEN; j++) {
+ A_PRINTF("%2.2x", pmkidList->pmkid[j]);
+ }
+ pmkidList = (WMI_PMKID *)((A_UINT8 *)pmkidList + ATH_MAC_LEN +
+ WMI_PMKID_LEN);
+ }
+}
+
+void ar6000_pspoll_event(AR_SOFTC_T *ar,A_UINT8 aid)
+{
+ sta_t *conn=NULL;
+ A_BOOL isPsqEmpty = FALSE;
+
+ conn = ieee80211_find_conn_for_aid(ar, aid);
+
+ /* If the PS q for this STA is not empty, dequeue and send a pkt from
+ * the head of the q. Also update the More data bit in the WMI_DATA_HDR
+ * if there are more pkts for this STA in the PS q. If there are no more
+ * pkts for this STA, update the PVB for this STA.
+ */
+ A_MUTEX_LOCK(&conn->psqLock);
+ isPsqEmpty = A_NETBUF_QUEUE_EMPTY(&conn->psq);
+ A_MUTEX_UNLOCK(&conn->psqLock);
+
+ if (isPsqEmpty) {
+ /* TODO:No buffered pkts for this STA. Send out a NULL data frame */
+ } else {
+ struct sk_buff *skb = NULL;
+
+ A_MUTEX_LOCK(&conn->psqLock);
+ skb = A_NETBUF_DEQUEUE(&conn->psq);
+ A_MUTEX_UNLOCK(&conn->psqLock);
+ /* Set the STA flag to PSPolled, so that the frame will go out */
+ STA_SET_PS_POLLED(conn);
+ ar6000_data_tx(skb, ar->arNetDev);
+ STA_CLR_PS_POLLED(conn);
+
+ /* Clear the PVB for this STA if the queue has become empty */
+ A_MUTEX_LOCK(&conn->psqLock);
+ isPsqEmpty = A_NETBUF_QUEUE_EMPTY(&conn->psq);
+ A_MUTEX_UNLOCK(&conn->psqLock);
+
+ if (isPsqEmpty) {
+ wmi_set_pvb_cmd(ar->arWmi, conn->aid, 0);
+ }
+ }
+}
+
+void ar6000_dtimexpiry_event(AR_SOFTC_T *ar)
+{
+ A_BOOL isMcastQueued = FALSE;
+ struct sk_buff *skb = NULL;
+
+ /* If there are no associated STAs, ignore the DTIM expiry event.
+ * There can be potential race conditions where the last associated
+ * STA may disconnect & before the host could clear the 'Indicate DTIM'
+ * request to the firmware, the firmware would have just indicated a DTIM
+ * expiry event. The race is between 'clear DTIM expiry cmd' going
+ * from the host to the firmware & the DTIM expiry event happening from
+ * the firmware to the host.
+ */
+ if (ar->sta_list_index == 0) {
+ return;
+ }
+
+ A_MUTEX_LOCK(&ar->mcastpsqLock);
+ isMcastQueued = A_NETBUF_QUEUE_EMPTY(&ar->mcastpsq);
+ A_MUTEX_UNLOCK(&ar->mcastpsqLock);
+
+ A_ASSERT(isMcastQueued == FALSE);
+
+ /* Flush the mcast psq to the target */
+ /* Set the STA flag to DTIMExpired, so that the frame will go out */
+ ar->DTIMExpired = TRUE;
+
+ A_MUTEX_LOCK(&ar->mcastpsqLock);
+ while (!A_NETBUF_QUEUE_EMPTY(&ar->mcastpsq)) {
+ skb = A_NETBUF_DEQUEUE(&ar->mcastpsq);
+ A_MUTEX_UNLOCK(&ar->mcastpsqLock);
+
+ ar6000_data_tx(skb, ar->arNetDev);
+
+ A_MUTEX_LOCK(&ar->mcastpsqLock);
+ }
+ A_MUTEX_UNLOCK(&ar->mcastpsqLock);
+
+ /* Reset the DTIMExpired flag back to 0 */
+ ar->DTIMExpired = FALSE;
+
+ /* Clear the LSB of the BitMapCtl field of the TIM IE */
+ wmi_set_pvb_cmd(ar->arWmi, MCAST_AID, 0);
+}
+
+void
+read_rssi_compensation_param(AR_SOFTC_T *ar)
+{
+ A_UINT8 *cust_data_ptr;
+
+//#define RSSICOMPENSATION_PRINT
+
+#ifdef RSSICOMPENSATION_PRINT
+ A_INT16 i;
+ cust_data_ptr = ar6000_get_cust_data_buffer(ar->arTargetType);
+ for (i=0; i<16; i++) {
+ A_PRINTF("cust_data_%d = %x \n", i, *(A_UINT8 *)cust_data_ptr);
+ cust_data_ptr += 1;
+ }
+#endif
+
+ cust_data_ptr = ar6000_get_cust_data_buffer(ar->arTargetType);
+
+ rssi_compensation_param.customerID = *(A_UINT16 *)cust_data_ptr & 0xffff;
+ rssi_compensation_param.enable = *(A_UINT16 *)(cust_data_ptr+2) & 0xffff;
+ rssi_compensation_param.bg_param_a = *(A_UINT16 *)(cust_data_ptr+4) & 0xffff;
+ rssi_compensation_param.bg_param_b = *(A_UINT16 *)(cust_data_ptr+6) & 0xffff;
+ rssi_compensation_param.a_param_a = *(A_UINT16 *)(cust_data_ptr+8) & 0xffff;
+ rssi_compensation_param.a_param_b = *(A_UINT16 *)(cust_data_ptr+10) &0xffff;
+ rssi_compensation_param.reserved = *(A_UINT32 *)(cust_data_ptr+12);
+
+#ifdef RSSICOMPENSATION_PRINT
+ A_PRINTF("customerID = 0x%x \n", rssi_compensation_param.customerID);
+ A_PRINTF("enable = 0x%x \n", rssi_compensation_param.enable);
+ A_PRINTF("bg_param_a = 0x%x and %d \n", rssi_compensation_param.bg_param_a, rssi_compensation_param.bg_param_a);
+ A_PRINTF("bg_param_b = 0x%x and %d \n", rssi_compensation_param.bg_param_b, rssi_compensation_param.bg_param_b);
+ A_PRINTF("a_param_a = 0x%x and %d \n", rssi_compensation_param.a_param_a, rssi_compensation_param.a_param_a);
+ A_PRINTF("a_param_b = 0x%x and %d \n", rssi_compensation_param.a_param_b, rssi_compensation_param.a_param_b);
+ A_PRINTF("Last 4 bytes = 0x%x \n", rssi_compensation_param.reserved);
+#endif
+
+ if (rssi_compensation_param.enable != 0x1) {
+ rssi_compensation_param.enable = 0;
+ }
+
+ return;
+}
+
+A_INT32
+rssi_compensation_calc_tcmd(A_UINT32 freq, A_INT32 rssi, A_UINT32 totalPkt)
+{
+
+ if (freq > 5000)
+ {
+ if (rssi_compensation_param.enable)
+ {
+ AR_DEBUG_PRINTF(ATH_DEBUG_INFO, (">>> 11a\n"));
+ AR_DEBUG_PRINTF(ATH_DEBUG_INFO, ("rssi before compensation = %d, totalPkt = %d\n", rssi,totalPkt));
+ rssi = rssi * rssi_compensation_param.a_param_a + totalPkt * rssi_compensation_param.a_param_b;
+ rssi = (rssi-50) /100;
+ AR_DEBUG_PRINTF(ATH_DEBUG_INFO, ("rssi after compensation = %d\n", rssi));
+ }
+ }
+ else
+ {
+ if (rssi_compensation_param.enable)
+ {
+ AR_DEBUG_PRINTF(ATH_DEBUG_INFO, (">>> 11bg\n"));
+ AR_DEBUG_PRINTF(ATH_DEBUG_INFO, ("rssi before compensation = %d, totalPkt = %d\n", rssi,totalPkt));
+ rssi = rssi * rssi_compensation_param.bg_param_a + totalPkt * rssi_compensation_param.bg_param_b;
+ rssi = (rssi-50) /100;
+ AR_DEBUG_PRINTF(ATH_DEBUG_INFO, ("rssi after compensation = %d\n", rssi));
+ }
+ }
+
+ return rssi;
+}
+
+A_INT16
+rssi_compensation_calc(AR_SOFTC_T *ar, A_INT16 rssi)
+{
+ if (ar->arBssChannel > 5000)
+ {
+ if (rssi_compensation_param.enable)
+ {
+ AR_DEBUG_PRINTF(ATH_DEBUG_INFO, (">>> 11a\n"));
+ AR_DEBUG_PRINTF(ATH_DEBUG_INFO, ("rssi before compensation = %d\n", rssi));
+ rssi = rssi * rssi_compensation_param.a_param_a + rssi_compensation_param.a_param_b;
+ rssi = (rssi-50) /100;
+ AR_DEBUG_PRINTF(ATH_DEBUG_INFO, ("rssi after compensation = %d\n", rssi));
+ }
+ }
+ else
+ {
+ if (rssi_compensation_param.enable)
+ {
+ AR_DEBUG_PRINTF(ATH_DEBUG_INFO, (">>> 11bg\n"));
+ AR_DEBUG_PRINTF(ATH_DEBUG_INFO, ("rssi before compensation = %d\n", rssi));
+ rssi = rssi * rssi_compensation_param.bg_param_a + rssi_compensation_param.bg_param_b;
+ rssi = (rssi-50) /100;
+ AR_DEBUG_PRINTF(ATH_DEBUG_INFO, ("rssi after compensation = %d\n", rssi));
+ }
+ }
+
+ return rssi;
+}
+
+A_INT16
+rssi_compensation_reverse_calc(AR_SOFTC_T *ar, A_INT16 rssi, A_BOOL Above)
+{
+ A_INT16 i;
+
+ if (ar->arBssChannel > 5000)
+ {
+ if (rssi_compensation_param.enable)
+ {
+ AR_DEBUG_PRINTF(ATH_DEBUG_INFO, (">>> 11a\n"));
+ AR_DEBUG_PRINTF(ATH_DEBUG_INFO, ("rssi before rev compensation = %d\n", rssi));
+ rssi = rssi * 100;
+ rssi = (rssi - rssi_compensation_param.a_param_b) / rssi_compensation_param.a_param_a;
+ AR_DEBUG_PRINTF(ATH_DEBUG_INFO, ("rssi after rev compensation = %d\n", rssi));
+ }
+ }
+ else
+ {
+ if (rssi_compensation_param.enable)
+ {
+ AR_DEBUG_PRINTF(ATH_DEBUG_INFO, (">>> 11bg\n"));
+ AR_DEBUG_PRINTF(ATH_DEBUG_INFO, ("rssi before rev compensation = %d\n", rssi));
+
+ if (Above) {
+ for (i=95; i>=0; i--) {
+ if (rssi <= rssi_compensation_table[i]) {
+ rssi = 0 - i;
+ break;
+ }
+ }
+ } else {
+ for (i=0; i<=95; i++) {
+ if (rssi >= rssi_compensation_table[i]) {
+ rssi = 0 - i;
+ break;
+ }
+ }
+ }
+ AR_DEBUG_PRINTF(ATH_DEBUG_INFO, ("rssi after rev compensation = %d\n", rssi));
+ }
+ }
+
+ return rssi;
+}
+
+#ifdef WAPI_ENABLE
+void ap_wapi_rekey_event(AR_SOFTC_T *ar, A_UINT8 type, A_UINT8 *mac)
+{
+ union iwreq_data wrqu;
+ A_CHAR buf[20];
+
+ A_MEMZERO(buf, sizeof(buf));
+
+ strcpy(buf, "WAPI_REKEY");
+ buf[10] = type;
+ A_MEMCPY(&buf[11], mac, ATH_MAC_LEN);
+
+ A_MEMZERO(&wrqu, sizeof(wrqu));
+ wrqu.data.length = 10+1+ATH_MAC_LEN;
+ wireless_send_event(ar->arNetDev, IWEVCUSTOM, &wrqu, buf);
+
+ A_PRINTF("WAPI REKEY - %d - %02x:%02x\n", type, mac[4], mac[5]);
+}
+#endif
+
+#ifdef USER_KEYS
+static A_STATUS
+
+ar6000_reinstall_keys(AR_SOFTC_T *ar, A_UINT8 key_op_ctrl)
+{
+ A_STATUS status = A_OK;
+ struct ieee80211req_key *uik = &ar->user_saved_keys.ucast_ik;
+ struct ieee80211req_key *bik = &ar->user_saved_keys.bcast_ik;
+ CRYPTO_TYPE keyType = ar->user_saved_keys.keyType;
+
+ if (IEEE80211_CIPHER_CCKM_KRK != uik->ik_type) {
+ if (NONE_CRYPT == keyType) {
+ goto _reinstall_keys_out;
+ }
+
+ if (uik->ik_keylen) {
+ status = wmi_addKey_cmd(ar->arWmi, uik->ik_keyix,
+ ar->user_saved_keys.keyType, PAIRWISE_USAGE,
+ uik->ik_keylen, (A_UINT8 *)&uik->ik_keyrsc,
+ uik->ik_keydata, key_op_ctrl, uik->ik_macaddr, SYNC_BEFORE_WMIFLAG);
+ }
+
+ } else {
+ status = wmi_add_krk_cmd(ar->arWmi, uik->ik_keydata);
+ }
+
+ if (IEEE80211_CIPHER_CCKM_KRK != bik->ik_type) {
+ if (NONE_CRYPT == keyType) {
+ goto _reinstall_keys_out;
+ }
+
+ if (bik->ik_keylen) {
+ status = wmi_addKey_cmd(ar->arWmi, bik->ik_keyix,
+ ar->user_saved_keys.keyType, GROUP_USAGE,
+ bik->ik_keylen, (A_UINT8 *)&bik->ik_keyrsc,
+ bik->ik_keydata, key_op_ctrl, bik->ik_macaddr, NO_SYNC_WMIFLAG);
+ }
+ } else {
+ status = wmi_add_krk_cmd(ar->arWmi, bik->ik_keydata);
+ }
+
+_reinstall_keys_out:
+ ar->user_savedkeys_stat = USER_SAVEDKEYS_STAT_INIT;
+ ar->user_key_ctrl = 0;
+
+ return status;
+}
+#endif /* USER_KEYS */
+
+
+void
+ar6000_dset_open_req(
+ void *context,
+ A_UINT32 id,
+ A_UINT32 targHandle,
+ A_UINT32 targReplyFn,
+ A_UINT32 targReplyArg)
+{
+}
+
+void
+ar6000_dset_close(
+ void *context,
+ A_UINT32 access_cookie)
+{
+ return;
+}
+
+void
+ar6000_dset_data_req(
+ void *context,
+ A_UINT32 accessCookie,
+ A_UINT32 offset,
+ A_UINT32 length,
+ A_UINT32 targBuf,
+ A_UINT32 targReplyFn,
+ A_UINT32 targReplyArg)
+{
+}
+
+int
+ar6000_ap_mode_profile_commit(struct ar6_softc *ar)
+{
+ WMI_CONNECT_CMD p;
+ unsigned long flags;
+
+ /* No change in AP's profile configuration */
+ if(ar->ap_profile_flag==0) {
+ A_PRINTF("COMMIT: No change in profile!!!\n");
+ return -ENODATA;
+ }
+
+ if(!ar->arSsidLen) {
+ A_PRINTF("SSID not set!!!\n");
+ return -ECHRNG;
+ }
+
+ switch(ar->arAuthMode) {
+ case NONE_AUTH:
+ if((ar->arPairwiseCrypto != NONE_CRYPT) &&
+#ifdef WAPI_ENABLE
+ (ar->arPairwiseCrypto != WAPI_CRYPT) &&
+#endif
+ (ar->arPairwiseCrypto != WEP_CRYPT)) {
+ A_PRINTF("Cipher not supported in AP mode Open auth\n");
+ return -EOPNOTSUPP;
+ }
+ break;
+ case WPA_PSK_AUTH:
+ case WPA2_PSK_AUTH:
+ case (WPA_PSK_AUTH|WPA2_PSK_AUTH):
+ break;
+ default:
+ A_PRINTF("This key mgmt type not supported in AP mode\n");
+ return -EOPNOTSUPP;
+ }
+
+ /* Update the arNetworkType */
+ ar->arNetworkType = ar->arNextMode;
+
+ A_MEMZERO(&p,sizeof(p));
+ p.ssidLength = ar->arSsidLen;
+ A_MEMCPY(p.ssid,ar->arSsid,p.ssidLength);
+ p.channel = ar->arChannelHint;
+ p.networkType = ar->arNetworkType;
+
+ p.dot11AuthMode = ar->arDot11AuthMode;
+ p.authMode = ar->arAuthMode;
+ p.pairwiseCryptoType = ar->arPairwiseCrypto;
+ p.pairwiseCryptoLen = ar->arPairwiseCryptoLen;
+ p.groupCryptoType = ar->arGroupCrypto;
+ p.groupCryptoLen = ar->arGroupCryptoLen;
+ p.ctrl_flags = ar->arConnectCtrlFlags;
+
+ ar->arConnected = FALSE;
+
+ wmi_ap_profile_commit(ar->arWmi, &p);
+ spin_lock_irqsave(&ar->arLock, flags);
+ ar->arConnected = TRUE;
+ netif_carrier_on(ar->arNetDev);
+ spin_unlock_irqrestore(&ar->arLock, flags);
+ ar->ap_profile_flag = 0;
+ return 0;
+}
+
+A_STATUS
+ar6000_connect_to_ap(struct ar6_softc *ar)
+{
+ /* The ssid length check prevents second "essid off" from the user,
+ to be treated as a connect cmd. The second "essid off" is ignored.
+ */
+ if((ar->arWmiReady == TRUE) && (ar->arSsidLen > 0) && ar->arNetworkType!=AP_NETWORK)
+ {
+ A_STATUS status;
+ if((ADHOC_NETWORK != ar->arNetworkType) &&
+ (NONE_AUTH==ar->arAuthMode) &&
+ (WEP_CRYPT==ar->arPairwiseCrypto)) {
+ ar6000_install_static_wep_keys(ar);
+ }
+
+ if (!ar->arUserBssFilter) {
+ if (wmi_bssfilter_cmd(ar->arWmi, ALL_BSS_FILTER, 0) != A_OK) {
+ return -EIO;
+ }
+ }
+#ifdef WAPI_ENABLE
+ if (ar->arWapiEnable) {
+ ar->arPairwiseCrypto = WAPI_CRYPT;
+ ar->arPairwiseCryptoLen = 0;
+ ar->arGroupCrypto = WAPI_CRYPT;
+ ar->arGroupCryptoLen = 0;
+ ar->arAuthMode = NONE_AUTH;
+ ar->arConnectCtrlFlags |= CONNECT_IGNORE_WPAx_GROUP_CIPHER;
+ }
+#endif
+ AR_DEBUG_PRINTF(ATH_DEBUG_WLAN_CONNECT,("Connect called with authmode %d dot11 auth %d"\
+ " PW crypto %d PW crypto Len %d GRP crypto %d"\
+ " GRP crypto Len %d\n",
+ ar->arAuthMode, ar->arDot11AuthMode,
+ ar->arPairwiseCrypto, ar->arPairwiseCryptoLen,
+ ar->arGroupCrypto, ar->arGroupCryptoLen));
+ reconnect_flag = 0;
+ /* Set the listen interval into 1000TUs or more. This value will be indicated to Ap in the conn.
+ later set it back locally at the STA to 100/1000 TUs depending on the power mode */
+ if ((ar->arNetworkType == INFRA_NETWORK)) {
+ wmi_listeninterval_cmd(ar->arWmi, max(ar->arListenIntervalT, (A_UINT16)A_MAX_WOW_LISTEN_INTERVAL), 0);
+ }
+ status = wmi_connect_cmd(ar->arWmi, ar->arNetworkType,
+ ar->arDot11AuthMode, ar->arAuthMode,
+ ar->arPairwiseCrypto, ar->arPairwiseCryptoLen,
+ ar->arGroupCrypto,ar->arGroupCryptoLen,
+ ar->arSsidLen, ar->arSsid,
+ ar->arReqBssid, ar->arChannelHint,
+ ar->arConnectCtrlFlags);
+ if (status != A_OK) {
+ wmi_listeninterval_cmd(ar->arWmi, ar->arListenIntervalT, ar->arListenIntervalB);
+ if (!ar->arUserBssFilter) {
+ wmi_bssfilter_cmd(ar->arWmi, NONE_BSS_FILTER, 0);
+ }
+ return status;
+ }
+
+ if ((!(ar->arConnectCtrlFlags & CONNECT_DO_WPA_OFFLOAD)) &&
+ ((WPA_PSK_AUTH == ar->arAuthMode) || (WPA2_PSK_AUTH == ar->arAuthMode)))
+ {
+ A_TIMEOUT_MS(&ar->disconnect_timer, A_DISCONNECT_TIMER_INTERVAL, 0);
+ }
+
+ ar->arConnectCtrlFlags &= ~CONNECT_DO_WPA_OFFLOAD;
+
+ ar->arConnectPending = TRUE;
+ return status;
+ }
+ return A_ERROR;
+}
+
+A_STATUS
+ar6000_ap_mode_get_wpa_ie(struct ar6_softc *ar, struct ieee80211req_wpaie *wpaie)
+{
+ sta_t *conn = NULL;
+ conn = ieee80211_find_conn(ar, wpaie->wpa_macaddr);
+
+ A_MEMZERO(wpaie->wpa_ie, IEEE80211_MAX_IE);
+ A_MEMZERO(wpaie->rsn_ie, IEEE80211_MAX_IE);
+
+ if(conn) {
+ A_MEMCPY(wpaie->wpa_ie, conn->wpa_ie, IEEE80211_MAX_IE);
+ }
+
+ return 0;
+}
+
+A_STATUS
+is_iwioctl_allowed(A_UINT8 mode, A_UINT16 cmd)
+{
+ if(cmd >= SIOCSIWCOMMIT && cmd <= SIOCGIWPOWER) {
+ cmd -= SIOCSIWCOMMIT;
+ if(sioctl_filter[cmd] == 0xFF) return A_OK;
+ if(sioctl_filter[cmd] & mode) return A_OK;
+ } else if(cmd >= SIOCIWFIRSTPRIV && cmd <= (SIOCIWFIRSTPRIV+30)) {
+ cmd -= SIOCIWFIRSTPRIV;
+ if(pioctl_filter[cmd] == 0xFF) return A_OK;
+ if(pioctl_filter[cmd] & mode) return A_OK;
+ } else {
+ return A_ERROR;
+ }
+ return A_ENOTSUP;
+}
+
+A_STATUS
+is_xioctl_allowed(A_UINT8 mode, int cmd)
+{
+ if(sizeof(xioctl_filter)-1 < cmd) {
+ A_PRINTF("Filter for this cmd=%d not defined\n",cmd);
+ return 0;
+ }
+ if(xioctl_filter[cmd] == 0xFF) return A_OK;
+ if(xioctl_filter[cmd] & mode) return A_OK;
+ return A_ERROR;
+}
+
+#ifdef WAPI_ENABLE
+int
+ap_set_wapi_key(struct ar6_softc *ar, void *ikey)
+{
+ struct ieee80211req_key *ik = (struct ieee80211req_key *)ikey;
+ KEY_USAGE keyUsage = 0;
+ A_STATUS status;
+
+ if (A_MEMCMP(ik->ik_macaddr, bcast_mac, IEEE80211_ADDR_LEN) == 0) {
+ keyUsage = GROUP_USAGE;
+ } else {
+ keyUsage = PAIRWISE_USAGE;
+ }
+ A_PRINTF("WAPI_KEY: Type:%d ix:%d mac:%02x:%02x len:%d\n",
+ keyUsage, ik->ik_keyix, ik->ik_macaddr[4], ik->ik_macaddr[5],
+ ik->ik_keylen);
+
+ status = wmi_addKey_cmd(ar->arWmi, ik->ik_keyix, WAPI_CRYPT, keyUsage,
+ ik->ik_keylen, (A_UINT8 *)&ik->ik_keyrsc,
+ ik->ik_keydata, KEY_OP_INIT_VAL, ik->ik_macaddr,
+ SYNC_BOTH_WMIFLAG);
+
+ if (A_OK != status) {
+ return -EIO;
+ }
+ return 0;
+}
+#endif
+
+void ar6000_peer_event(
+ void *context,
+ A_UINT8 eventCode,
+ A_UINT8 *macAddr)
+{
+ A_UINT8 pos;
+
+ for (pos=0;pos<6;pos++)
+ printk("%02x: ",*(macAddr+pos));
+ printk("\n");
+}
+
+#ifdef HTC_TEST_SEND_PKTS
+#define HTC_TEST_DUPLICATE 8
+static void DoHTCSendPktsTest(AR_SOFTC_T *ar, int MapNo, HTC_ENDPOINT_ID eid, struct sk_buff *dupskb)
+{
+ struct ar_cookie *cookie;
+ struct ar_cookie *cookieArray[HTC_TEST_DUPLICATE];
+ struct sk_buff *new_skb;
+ int i;
+ int pkts = 0;
+ HTC_PACKET_QUEUE pktQueue;
+ EPPING_HEADER *eppingHdr;
+
+ eppingHdr = A_NETBUF_DATA(dupskb);
+
+ if (eppingHdr->Cmd_h == EPPING_CMD_NO_ECHO) {
+ /* skip test if this is already a tx perf test */
+ return;
+ }
+
+ for (i = 0; i < HTC_TEST_DUPLICATE; i++,pkts++) {
+ AR6000_SPIN_LOCK(&ar->arLock, 0);
+ cookie = ar6000_alloc_cookie(ar);
+ if (cookie != NULL) {
+ ar->arTxPending[eid]++;
+ ar->arTotalTxDataPending++;
+ }
+
+ AR6000_SPIN_UNLOCK(&ar->arLock, 0);
+
+ if (NULL == cookie) {
+ break;
+ }
+
+ new_skb = A_NETBUF_ALLOC(A_NETBUF_LEN(dupskb));
+
+ if (new_skb == NULL) {
+ AR6000_SPIN_LOCK(&ar->arLock, 0);
+ ar6000_free_cookie(ar,cookie);
+ AR6000_SPIN_UNLOCK(&ar->arLock, 0);
+ break;
+ }
+
+ A_NETBUF_PUT_DATA(new_skb, A_NETBUF_DATA(dupskb), A_NETBUF_LEN(dupskb));
+ cookie->arc_bp[0] = (unsigned long)new_skb;
+ cookie->arc_bp[1] = MapNo;
+ SET_HTC_PACKET_INFO_TX(&cookie->HtcPkt,
+ cookie,
+ A_NETBUF_DATA(new_skb),
+ A_NETBUF_LEN(new_skb),
+ eid,
+ AR6K_DATA_PKT_TAG);
+
+ cookieArray[i] = cookie;
+
+ {
+ EPPING_HEADER *pHdr = (EPPING_HEADER *)A_NETBUF_DATA(new_skb);
+ pHdr->Cmd_h = EPPING_CMD_NO_ECHO; /* do not echo the packet */
+ }
+ }
+
+ if (pkts == 0) {
+ return;
+ }
+
+ INIT_HTC_PACKET_QUEUE(&pktQueue);
+
+ for (i = 0; i < pkts; i++) {
+ HTC_PACKET_ENQUEUE(&pktQueue,&cookieArray[i]->HtcPkt);
+ }
+
+ HTCSendPktsMultiple(ar->arHtcTarget, &pktQueue);
+
+}
+#endif
+
+#ifdef CONFIG_AP_VIRTUAL_ADAPTER_SUPPORT
+/*
+ * Add support for adding and removing a virtual adapter for soft AP.
+ * Some OS requires different adapters names for station and soft AP mode.
+ * To support these requirement, create and destory a netdevice instance
+ * when the AP mode is operational. A full fledged support for virual device
+ * is not implemented. Rather a virtual interface is created and is linked
+ * with the existing physical device instance during the operation of the
+ * AP mode.
+ */
+
+A_STATUS ar6000_start_ap_interface(AR_SOFTC_T *ar)
+{
+ AR_VIRTUAL_INTERFACE_T *arApDev;
+
+ /* Change net_device to point to AP instance */
+ arApDev = (AR_VIRTUAL_INTERFACE_T *)ar->arApDev;
+ ar->arNetDev = arApDev->arNetDev;
+
+ return A_OK;
+}
+
+A_STATUS ar6000_stop_ap_interface(AR_SOFTC_T *ar)
+{
+ AR_VIRTUAL_INTERFACE_T *arApDev;
+
+ /* Change net_device to point to sta instance */
+ arApDev = (AR_VIRTUAL_INTERFACE_T *)ar->arApDev;
+ if (arApDev) {
+ ar->arNetDev = arApDev->arStaNetDev;
+ }
+
+ return A_OK;
+}
+
+
+A_STATUS ar6000_create_ap_interface(AR_SOFTC_T *ar, char *ap_ifname)
+{
+ struct net_device *dev;
+ AR_VIRTUAL_INTERFACE_T *arApDev;
+
+ dev = alloc_etherdev(sizeof(AR_VIRTUAL_INTERFACE_T));
+ if (dev == NULL) {
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("ar6000_create_ap_interface: can't alloc etherdev\n"));
+ return A_ERROR;
+ }
+
+ ether_setup(dev);
+ init_netdev(dev, ap_ifname);
+
+ if (register_netdev(dev)) {
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("ar6000_create_ap_interface: register_netdev failed\n"));
+ return A_ERROR;
+ }
+
+ arApDev = netdev_priv(dev);
+ arApDev->arDev = ar;
+ arApDev->arNetDev = dev;
+ arApDev->arStaNetDev = ar->arNetDev;
+
+ ar->arApDev = arApDev;
+ arApNetDev = dev;
+
+ /* Copy the MAC address */
+ A_MEMCPY(dev->dev_addr, ar->arNetDev->dev_addr, AR6000_ETH_ADDR_LEN);
+
+ return A_OK;
+}
+
+A_STATUS ar6000_add_ap_interface(AR_SOFTC_T *ar, char *ap_ifname)
+{
+ /* Interface already added, need not proceed further */
+ if (ar->arApDev != NULL) {
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("ar6000_add_ap_interface: interface already present \n"));
+ return A_OK;
+ }
+
+ if (ar6000_create_ap_interface(ar, ap_ifname) != A_OK) {
+ return A_ERROR;
+ }
+
+ A_PRINTF("Add AP interface %s \n",ap_ifname);
+
+ return ar6000_start_ap_interface(ar);
+}
+
+A_STATUS ar6000_remove_ap_interface(AR_SOFTC_T *ar)
+{
+ if (arApNetDev) {
+ ar6000_stop_ap_interface(ar);
+
+ unregister_netdev(arApNetDev);
+ free_netdev(apApNetDev);
+
+ A_PRINTF("Remove AP interface\n");
+ }
+ ar->arApDev = NULL;
+ arApNetDev = NULL;
+
+
+ return A_OK;
+}
+#endif /* CONFIG_AP_VIRTUAL_ADAPTER_SUPPORT */
+
+
+#ifdef EXPORT_HCI_BRIDGE_INTERFACE
+EXPORT_SYMBOL(setupbtdev);
+#endif
diff --git a/drivers/staging/ath6kl/os/linux/ar6000_pm.c b/drivers/staging/ath6kl/os/linux/ar6000_pm.c
new file mode 100644
index 000000000000..b937df9c0cb5
--- /dev/null
+++ b/drivers/staging/ath6kl/os/linux/ar6000_pm.c
@@ -0,0 +1,731 @@
+/*
+ *
+ * Copyright (c) 2004-2010 Atheros Communications Inc.
+ * All rights reserved.
+ *
+ *
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+//
+//
+ *
+ */
+
+/*
+ * Implementation of system power management
+ */
+
+#include "ar6000_drv.h"
+#include <linux/inetdevice.h>
+#include <linux/platform_device.h>
+#include "wlan_config.h"
+
+#ifdef CONFIG_HAS_WAKELOCK
+#include <linux/wakelock.h>
+#endif
+
+#define WOW_ENABLE_MAX_INTERVAL 0
+#define WOW_SET_SCAN_PARAMS 0
+
+extern unsigned int wmitimeout;
+extern wait_queue_head_t arEvent;
+
+#ifdef CONFIG_PM
+#ifdef CONFIG_HAS_WAKELOCK
+struct wake_lock ar6k_suspend_wake_lock;
+struct wake_lock ar6k_wow_wake_lock;
+#endif
+#endif /* CONFIG_PM */
+
+#ifdef ANDROID_ENV
+extern void android_ar6k_check_wow_status(AR_SOFTC_T *ar, struct sk_buff *skb, A_BOOL isEvent);
+#endif
+#undef ATH_MODULE_NAME
+#define ATH_MODULE_NAME pm
+#define ATH_DEBUG_PM ATH_DEBUG_MAKE_MODULE_MASK(0)
+
+#ifdef DEBUG
+static ATH_DEBUG_MASK_DESCRIPTION pm_debug_desc[] = {
+ { ATH_DEBUG_PM , "System power management"},
+};
+
+ATH_DEBUG_INSTANTIATE_MODULE_VAR(pm,
+ "pm",
+ "System Power Management",
+ ATH_DEBUG_MASK_DEFAULTS | ATH_DEBUG_PM,
+ ATH_DEBUG_DESCRIPTION_COUNT(pm_debug_desc),
+ pm_debug_desc);
+
+#endif /* DEBUG */
+
+A_STATUS ar6000_exit_cut_power_state(AR_SOFTC_T *ar);
+
+#ifdef CONFIG_PM
+static void ar6k_send_asleep_event_to_app(AR_SOFTC_T *ar, A_BOOL asleep)
+{
+ char buf[128];
+ union iwreq_data wrqu;
+
+ snprintf(buf, sizeof(buf), "HOST_ASLEEP=%s", asleep ? "asleep" : "awake");
+ A_MEMZERO(&wrqu, sizeof(wrqu));
+ wrqu.data.length = strlen(buf);
+ wireless_send_event(ar->arNetDev, IWEVCUSTOM, &wrqu, buf);
+}
+
+static void ar6000_wow_resume(AR_SOFTC_T *ar)
+{
+ if (ar->arWowState!= WLAN_WOW_STATE_NONE) {
+ A_UINT16 fg_start_period = (ar->scParams.fg_start_period==0) ? 1 : ar->scParams.fg_start_period;
+ A_UINT16 bg_period = (ar->scParams.bg_period==0) ? 60 : ar->scParams.bg_period;
+ WMI_SET_HOST_SLEEP_MODE_CMD hostSleepMode = {TRUE, FALSE};
+ ar->arWowState = WLAN_WOW_STATE_NONE;
+#ifdef CONFIG_HAS_WAKELOCK
+ wake_lock_timeout(&ar6k_wow_wake_lock, 3*HZ);
+#endif
+ if (wmi_set_host_sleep_mode_cmd(ar->arWmi, &hostSleepMode)!=A_OK) {
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("Fail to setup restore host awake\n"));
+ }
+#if WOW_SET_SCAN_PARAMS
+ wmi_scanparams_cmd(ar->arWmi, fg_start_period,
+ ar->scParams.fg_end_period,
+ bg_period,
+ ar->scParams.minact_chdwell_time,
+ ar->scParams.maxact_chdwell_time,
+ ar->scParams.pas_chdwell_time,
+ ar->scParams.shortScanRatio,
+ ar->scParams.scanCtrlFlags,
+ ar->scParams.max_dfsch_act_time,
+ ar->scParams.maxact_scan_per_ssid);
+#else
+ (void)fg_start_period;
+ (void)bg_period;
+#endif
+
+
+#if WOW_ENABLE_MAX_INTERVAL /* we don't do it if the power consumption is already good enough. */
+ if (wmi_listeninterval_cmd(ar->arWmi, ar->arListenIntervalT, ar->arListenIntervalB) == A_OK) {
+ }
+#endif
+ ar6k_send_asleep_event_to_app(ar, FALSE);
+ AR_DEBUG_PRINTF(ATH_DEBUG_PM, ("Resume WoW successfully\n"));
+ } else {
+ AR_DEBUG_PRINTF(ATH_DEBUG_PM, ("WoW does not invoked. skip resume"));
+ }
+ ar->arWlanPowerState = WLAN_POWER_STATE_ON;
+}
+
+static void ar6000_wow_suspend(AR_SOFTC_T *ar)
+{
+#define WOW_LIST_ID 1
+ if (ar->arNetworkType != AP_NETWORK) {
+ /* Setup WoW for unicast & Arp request for our own IP
+ disable background scan. Set listen interval into 1000 TUs
+ Enable keepliave for 110 seconds
+ */
+ struct in_ifaddr **ifap = NULL;
+ struct in_ifaddr *ifa = NULL;
+ struct in_device *in_dev;
+ A_UINT8 macMask[] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
+ A_STATUS status;
+ WMI_ADD_WOW_PATTERN_CMD addWowCmd = { .filter = { 0 } };
+ WMI_DEL_WOW_PATTERN_CMD delWowCmd;
+ WMI_SET_HOST_SLEEP_MODE_CMD hostSleepMode = {FALSE, TRUE};
+ WMI_SET_WOW_MODE_CMD wowMode = { .enable_wow = TRUE,
+ .hostReqDelay = 500 };/*500 ms delay*/
+
+ if (ar->arWowState!= WLAN_WOW_STATE_NONE) {
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("System already go into wow mode!\n"));
+ return;
+ }
+
+ ar6000_TxDataCleanup(ar); /* IMPORTANT, otherwise there will be 11mA after listen interval as 1000*/
+
+#if WOW_ENABLE_MAX_INTERVAL /* we don't do it if the power consumption is already good enough. */
+ if (wmi_listeninterval_cmd(ar->arWmi, A_MAX_WOW_LISTEN_INTERVAL, 0) == A_OK) {
+ }
+#endif
+
+#if WOW_SET_SCAN_PARAMS
+ status = wmi_scanparams_cmd(ar->arWmi, 0xFFFF, 0, 0xFFFF, 0, 0, 0, 0, 0, 0, 0);
+#endif
+ /* clear up our WoW pattern first */
+ delWowCmd.filter_list_id = WOW_LIST_ID;
+ delWowCmd.filter_id = 0;
+ wmi_del_wow_pattern_cmd(ar->arWmi, &delWowCmd);
+
+ /* setup unicast packet pattern for WoW */
+ if (ar->arNetDev->dev_addr[1]) {
+ addWowCmd.filter_list_id = WOW_LIST_ID;
+ addWowCmd.filter_size = 6; /* MAC address */
+ addWowCmd.filter_offset = 0;
+ status = wmi_add_wow_pattern_cmd(ar->arWmi, &addWowCmd, ar->arNetDev->dev_addr, macMask, addWowCmd.filter_size);
+ if (status != A_OK) {
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("Fail to add WoW pattern\n"));
+ }
+ }
+ /* setup ARP request for our own IP */
+ if ((in_dev = __in_dev_get_rtnl(ar->arNetDev)) != NULL) {
+ for (ifap = &in_dev->ifa_list; (ifa = *ifap) != NULL; ifap = &ifa->ifa_next) {
+ if (!strcmp(ar->arNetDev->name, ifa->ifa_label)) {
+ break; /* found */
+ }
+ }
+ }
+ if (ifa && ifa->ifa_local) {
+ WMI_SET_IP_CMD ipCmd;
+ memset(&ipCmd, 0, sizeof(ipCmd));
+ ipCmd.ips[0] = ifa->ifa_local;
+ status = wmi_set_ip_cmd(ar->arWmi, &ipCmd);
+ if (status != A_OK) {
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("Fail to setup IP for ARP agent\n"));
+ }
+ }
+
+#ifndef ATH6K_CONFIG_OTA_MODE
+ wmi_powermode_cmd(ar->arWmi, REC_POWER);
+#endif
+
+ status = wmi_set_wow_mode_cmd(ar->arWmi, &wowMode);
+ if (status != A_OK) {
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("Fail to enable wow mode\n"));
+ }
+ ar6k_send_asleep_event_to_app(ar, TRUE);
+
+ status = wmi_set_host_sleep_mode_cmd(ar->arWmi, &hostSleepMode);
+ if (status != A_OK) {
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("Fail to set host asleep\n"));
+ }
+
+ ar->arWowState = WLAN_WOW_STATE_SUSPENDING;
+ if (ar->arTxPending[ar->arControlEp]) {
+ A_UINT32 timeleft = wait_event_interruptible_timeout(arEvent,
+ ar->arTxPending[ar->arControlEp] == 0, wmitimeout * HZ);
+ if (!timeleft || signal_pending(current)) {
+ /* what can I do? wow resume at once */
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("Fail to setup WoW. Pending wmi control data %d\n", ar->arTxPending[ar->arControlEp]));
+ }
+ }
+
+ status = hifWaitForPendingRecv(ar->arHifDevice);
+
+ ar->arWowState = WLAN_WOW_STATE_SUSPENDED;
+ ar->arWlanPowerState = WLAN_POWER_STATE_WOW;
+ } else {
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("Not allowed to go to WOW at this moment.\n"));
+ }
+}
+
+A_STATUS ar6000_suspend_ev(void *context)
+{
+ A_STATUS status = A_OK;
+ AR_SOFTC_T *ar = (AR_SOFTC_T *)context;
+ A_INT16 pmmode = ar->arSuspendConfig;
+wow_not_connected:
+ switch (pmmode) {
+ case WLAN_SUSPEND_WOW:
+ if (ar->arWmiReady && ar->arWlanState==WLAN_ENABLED && ar->arConnected) {
+ ar6000_wow_suspend(ar);
+ AR_DEBUG_PRINTF(ATH_DEBUG_PM,("%s:Suspend for wow mode %d\n", __func__, ar->arWlanPowerState));
+ } else {
+ pmmode = ar->arWow2Config;
+ goto wow_not_connected;
+ }
+ break;
+ case WLAN_SUSPEND_CUT_PWR:
+ /* fall through */
+ case WLAN_SUSPEND_CUT_PWR_IF_BT_OFF:
+ /* fall through */
+ case WLAN_SUSPEND_DEEP_SLEEP:
+ /* fall through */
+ default:
+ status = ar6000_update_wlan_pwr_state(ar, WLAN_DISABLED, TRUE);
+ if (ar->arWlanPowerState==WLAN_POWER_STATE_ON ||
+ ar->arWlanPowerState==WLAN_POWER_STATE_WOW) {
+ AR_DEBUG_PRINTF(ATH_DEBUG_PM, ("Strange suspend state for not wow mode %d", ar->arWlanPowerState));
+ }
+ AR_DEBUG_PRINTF(ATH_DEBUG_PM,("%s:Suspend for %d mode pwr %d status %d\n", __func__, pmmode, ar->arWlanPowerState, status));
+ status = (ar->arWlanPowerState == WLAN_POWER_STATE_CUT_PWR) ? A_OK : A_EBUSY;
+ break;
+ }
+
+ ar->scan_triggered = 0;
+ return status;
+}
+
+A_STATUS ar6000_resume_ev(void *context)
+{
+ AR_SOFTC_T *ar = (AR_SOFTC_T *)context;
+ A_UINT16 powerState = ar->arWlanPowerState;
+
+#ifdef CONFIG_HAS_WAKELOCK
+ wake_lock(&ar6k_suspend_wake_lock);
+#endif
+ AR_DEBUG_PRINTF(ATH_DEBUG_PM, ("%s: enter previous state %d wowState %d\n", __func__, powerState, ar->arWowState));
+ switch (powerState) {
+ case WLAN_POWER_STATE_WOW:
+ ar6000_wow_resume(ar);
+ break;
+ case WLAN_POWER_STATE_CUT_PWR:
+ /* fall through */
+ case WLAN_POWER_STATE_DEEP_SLEEP:
+ ar6000_update_wlan_pwr_state(ar, WLAN_ENABLED, TRUE);
+ AR_DEBUG_PRINTF(ATH_DEBUG_PM,("%s:Resume for %d mode pwr %d\n", __func__, powerState, ar->arWlanPowerState));
+ break;
+ case WLAN_POWER_STATE_ON:
+ break;
+ default:
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("Strange SDIO bus power mode!!\n"));
+ break;
+ }
+#ifdef CONFIG_HAS_WAKELOCK
+ wake_unlock(&ar6k_suspend_wake_lock);
+#endif
+ return A_OK;
+}
+
+void ar6000_check_wow_status(AR_SOFTC_T *ar, struct sk_buff *skb, A_BOOL isEvent)
+{
+ if (ar->arWowState!=WLAN_WOW_STATE_NONE) {
+ if (ar->arWowState==WLAN_WOW_STATE_SUSPENDING) {
+ AR_DEBUG_PRINTF(ATH_DEBUG_PM,("\n%s: Received IRQ while we are wow suspending!!!\n\n", __func__));
+ return;
+ }
+ /* Wow resume from irq interrupt */
+ AR_DEBUG_PRINTF(ATH_DEBUG_PM, ("%s: WoW resume from irq thread status %d\n", __func__, ar->arWlanPowerState));
+ ar6000_wow_resume(ar);
+ } else {
+#ifdef ANDROID_ENV
+ android_ar6k_check_wow_status(ar, skb, isEvent);
+#endif
+ }
+}
+
+A_STATUS ar6000_power_change_ev(void *context, A_UINT32 config)
+{
+ AR_SOFTC_T *ar = (AR_SOFTC_T *)context;
+ A_STATUS status = A_OK;
+
+ AR_DEBUG_PRINTF(ATH_DEBUG_PM, ("%s: power change event callback %d \n", __func__, config));
+ switch (config) {
+ case HIF_DEVICE_POWER_UP:
+ ar6000_restart_endpoint(ar->arNetDev);
+ status = A_OK;
+ break;
+ case HIF_DEVICE_POWER_DOWN:
+ case HIF_DEVICE_POWER_CUT:
+ status = A_OK;
+ break;
+ }
+ return status;
+}
+
+static int ar6000_pm_probe(struct platform_device *pdev)
+{
+ plat_setup_power(1,1);
+ return 0;
+}
+
+static int ar6000_pm_remove(struct platform_device *pdev)
+{
+ plat_setup_power(0,1);
+ return 0;
+}
+
+static int ar6000_pm_suspend(struct platform_device *pdev, pm_message_t state)
+{
+ return 0;
+}
+
+static int ar6000_pm_resume(struct platform_device *pdev)
+{
+ return 0;
+}
+
+static struct platform_driver ar6000_pm_device = {
+ .probe = ar6000_pm_probe,
+ .remove = ar6000_pm_remove,
+ .suspend = ar6000_pm_suspend,
+ .resume = ar6000_pm_resume,
+ .driver = {
+ .name = "wlan_ar6000_pm",
+ },
+};
+#endif /* CONFIG_PM */
+
+A_STATUS
+ar6000_setup_cut_power_state(struct ar6_softc *ar, AR6000_WLAN_STATE state)
+{
+ A_STATUS status = A_OK;
+ HIF_DEVICE_POWER_CHANGE_TYPE config;
+
+ AR_DEBUG_PRINTF(ATH_DEBUG_PM, ("%s: Cut power %d %d \n", __func__,state, ar->arWlanPowerState));
+#ifdef CONFIG_PM
+ AR_DEBUG_PRINTF(ATH_DEBUG_PM, ("Wlan OFF %d BT OFf %d \n", ar->arWlanOff, ar->arBTOff));
+#endif
+ do {
+ if (state == WLAN_ENABLED) {
+ /* Not in cut power state.. exit */
+ if (ar->arWlanPowerState != WLAN_POWER_STATE_CUT_PWR) {
+ break;
+ }
+
+ plat_setup_power(1,0);
+
+ /* Change the state to ON */
+ ar->arWlanPowerState = WLAN_POWER_STATE_ON;
+
+
+ /* Indicate POWER_UP to HIF */
+ config = HIF_DEVICE_POWER_UP;
+ status = HIFConfigureDevice(ar->arHifDevice,
+ HIF_DEVICE_POWER_STATE_CHANGE,
+ &config,
+ sizeof(HIF_DEVICE_POWER_CHANGE_TYPE));
+
+ if (status == A_PENDING) {
+#ifdef ANDROID_ENV
+ /* Wait for WMI ready event */
+ A_UINT32 timeleft = wait_event_interruptible_timeout(arEvent,
+ (ar->arWmiReady == TRUE), wmitimeout * HZ);
+ if (!timeleft || signal_pending(current)) {
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("ar6000 : Failed to get wmi ready \n"));
+ status = A_ERROR;
+ break;
+ }
+#endif
+ status = A_OK;
+ } else if (status == A_OK) {
+ ar6000_restart_endpoint(ar->arNetDev);
+ status = A_OK;
+ }
+ } else if (state == WLAN_DISABLED) {
+
+
+ /* Already in cut power state.. exit */
+ if (ar->arWlanPowerState == WLAN_POWER_STATE_CUT_PWR) {
+ break;
+ }
+ ar6000_stop_endpoint(ar->arNetDev, TRUE, FALSE);
+
+ config = HIF_DEVICE_POWER_CUT;
+ status = HIFConfigureDevice(ar->arHifDevice,
+ HIF_DEVICE_POWER_STATE_CHANGE,
+ &config,
+ sizeof(HIF_DEVICE_POWER_CHANGE_TYPE));
+
+ plat_setup_power(0,0);
+
+ ar->arWlanPowerState = WLAN_POWER_STATE_CUT_PWR;
+ }
+ } while (0);
+
+ return status;
+}
+
+A_STATUS
+ar6000_setup_deep_sleep_state(struct ar6_softc *ar, AR6000_WLAN_STATE state)
+{
+ A_STATUS status = A_OK;
+
+ AR_DEBUG_PRINTF(ATH_DEBUG_PM, ("%s: Deep sleep %d %d \n", __func__,state, ar->arWlanPowerState));
+#ifdef CONFIG_PM
+ AR_DEBUG_PRINTF(ATH_DEBUG_PM, ("Wlan OFF %d BT OFf %d \n", ar->arWlanOff, ar->arBTOff));
+#endif
+ do {
+ WMI_SET_HOST_SLEEP_MODE_CMD hostSleepMode;
+
+ if (state == WLAN_ENABLED) {
+ A_UINT16 fg_start_period;
+
+ /* Not in deep sleep state.. exit */
+ if (ar->arWlanPowerState != WLAN_POWER_STATE_DEEP_SLEEP) {
+ if (ar->arWlanPowerState != WLAN_POWER_STATE_ON) {
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("Strange state when we resume from deep sleep %d\n", ar->arWlanPowerState));
+ }
+ break;
+ }
+
+ fg_start_period = (ar->scParams.fg_start_period==0) ? 1 : ar->scParams.fg_start_period;
+ hostSleepMode.awake = TRUE;
+ hostSleepMode.asleep = FALSE;
+
+ if ((status=wmi_set_host_sleep_mode_cmd(ar->arWmi, &hostSleepMode)) != A_OK) {
+ break;
+ }
+
+ /* Change the state to ON */
+ ar->arWlanPowerState = WLAN_POWER_STATE_ON;
+
+ /* Enable foreground scanning */
+ if ((status=wmi_scanparams_cmd(ar->arWmi, fg_start_period,
+ ar->scParams.fg_end_period,
+ ar->scParams.bg_period,
+ ar->scParams.minact_chdwell_time,
+ ar->scParams.maxact_chdwell_time,
+ ar->scParams.pas_chdwell_time,
+ ar->scParams.shortScanRatio,
+ ar->scParams.scanCtrlFlags,
+ ar->scParams.max_dfsch_act_time,
+ ar->scParams.maxact_scan_per_ssid)) != A_OK)
+ {
+ break;
+ }
+
+ if (ar->arNetworkType != AP_NETWORK)
+ {
+ if (ar->arSsidLen) {
+ if (ar6000_connect_to_ap(ar) != A_OK) {
+ /* no need to report error if connection failed */
+ break;
+ }
+ }
+ }
+ } else if (state == WLAN_DISABLED){
+ WMI_SET_WOW_MODE_CMD wowMode = { .enable_wow = FALSE };
+
+ /* Already in deep sleep state.. exit */
+ if (ar->arWlanPowerState != WLAN_POWER_STATE_ON) {
+ if (ar->arWlanPowerState != WLAN_POWER_STATE_DEEP_SLEEP) {
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("Strange state when we suspend for deep sleep %d\n", ar->arWlanPowerState));
+ }
+ break;
+ }
+
+ if (ar->arNetworkType != AP_NETWORK)
+ {
+ /* Disconnect from the AP and disable foreground scanning */
+ AR6000_SPIN_LOCK(&ar->arLock, 0);
+ if (ar->arConnected == TRUE || ar->arConnectPending == TRUE) {
+ AR6000_SPIN_UNLOCK(&ar->arLock, 0);
+ wmi_disconnect_cmd(ar->arWmi);
+ } else {
+ AR6000_SPIN_UNLOCK(&ar->arLock, 0);
+ }
+ }
+
+ ar->scan_triggered = 0;
+
+ if ((status=wmi_scanparams_cmd(ar->arWmi, 0xFFFF, 0, 0, 0, 0, 0, 0, 0, 0, 0)) != A_OK) {
+ break;
+ }
+
+ /* make sure we disable wow for deep sleep */
+ if ((status=wmi_set_wow_mode_cmd(ar->arWmi, &wowMode))!=A_OK)
+ {
+ break;
+ }
+
+ ar6000_TxDataCleanup(ar);
+#ifndef ATH6K_CONFIG_OTA_MODE
+ wmi_powermode_cmd(ar->arWmi, REC_POWER);
+#endif
+
+ hostSleepMode.awake = FALSE;
+ hostSleepMode.asleep = TRUE;
+ if ((status=wmi_set_host_sleep_mode_cmd(ar->arWmi, &hostSleepMode))!=A_OK) {
+ break;
+ }
+ if (ar->arTxPending[ar->arControlEp]) {
+ A_UINT32 timeleft = wait_event_interruptible_timeout(arEvent,
+ ar->arTxPending[ar->arControlEp] == 0, wmitimeout * HZ);
+ if (!timeleft || signal_pending(current)) {
+ status = A_ERROR;
+ break;
+ }
+ }
+ status = hifWaitForPendingRecv(ar->arHifDevice);
+
+ ar->arWlanPowerState = WLAN_POWER_STATE_DEEP_SLEEP;
+ }
+ } while (0);
+
+ if (status!=A_OK) {
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("Fail to enter/exit deep sleep %d\n", state));
+ }
+
+ return status;
+}
+
+A_STATUS
+ar6000_update_wlan_pwr_state(struct ar6_softc *ar, AR6000_WLAN_STATE state, A_BOOL pmEvent)
+{
+ A_STATUS status = A_OK;
+ A_UINT16 powerState, oldPowerState;
+ AR6000_WLAN_STATE oldstate = ar->arWlanState;
+ A_BOOL wlanOff = ar->arWlanOff;
+#ifdef CONFIG_PM
+ A_BOOL btOff = ar->arBTOff;
+#endif /* CONFIG_PM */
+
+ if ((state!=WLAN_DISABLED && state!=WLAN_ENABLED)) {
+ return A_ERROR;
+ }
+
+ if (ar->bIsDestroyProgress) {
+ return A_EBUSY;
+ }
+
+ if (down_interruptible(&ar->arSem)) {
+ return A_ERROR;
+ }
+
+ if (ar->bIsDestroyProgress) {
+ up(&ar->arSem);
+ return A_EBUSY;
+ }
+
+ ar->arWlanState = wlanOff ? WLAN_DISABLED : state;
+ oldPowerState = ar->arWlanPowerState;
+ if (state == WLAN_ENABLED) {
+ powerState = ar->arWlanPowerState;
+ AR_DEBUG_PRINTF(ATH_DEBUG_PM, ("WLAN PWR set to ENABLE^^\n"));
+ if (!wlanOff) {
+ if (powerState == WLAN_POWER_STATE_DEEP_SLEEP) {
+ status = ar6000_setup_deep_sleep_state(ar, WLAN_ENABLED);
+ } else if (powerState == WLAN_POWER_STATE_CUT_PWR) {
+ status = ar6000_setup_cut_power_state(ar, WLAN_ENABLED);
+ }
+ }
+#ifdef CONFIG_PM
+ else if (pmEvent && wlanOff) {
+ A_BOOL allowCutPwr = ((!ar->arBTSharing) || btOff);
+ if ((powerState==WLAN_POWER_STATE_CUT_PWR) && (!allowCutPwr)) {
+ /* Come out of cut power */
+ ar6000_setup_cut_power_state(ar, WLAN_ENABLED);
+ status = ar6000_setup_deep_sleep_state(ar, WLAN_DISABLED);
+ }
+ }
+#endif /* CONFIG_PM */
+ } else if (state == WLAN_DISABLED) {
+ AR_DEBUG_PRINTF(ATH_DEBUG_PM, ("WLAN PWR set to DISABLED~\n"));
+ powerState = WLAN_POWER_STATE_DEEP_SLEEP;
+#ifdef CONFIG_PM
+ if (pmEvent) { /* disable due to suspend */
+ A_BOOL suspendCutPwr = (ar->arSuspendConfig == WLAN_SUSPEND_CUT_PWR ||
+ (ar->arSuspendConfig == WLAN_SUSPEND_WOW &&
+ ar->arWow2Config==WLAN_SUSPEND_CUT_PWR));
+ A_BOOL suspendCutIfBtOff = ((ar->arSuspendConfig ==
+ WLAN_SUSPEND_CUT_PWR_IF_BT_OFF ||
+ (ar->arSuspendConfig == WLAN_SUSPEND_WOW &&
+ ar->arWow2Config==WLAN_SUSPEND_CUT_PWR_IF_BT_OFF)) &&
+ (!ar->arBTSharing || btOff));
+ if ((suspendCutPwr) ||
+ (suspendCutIfBtOff) ||
+ (ar->arWlanState==WLAN_POWER_STATE_CUT_PWR))
+ {
+ powerState = WLAN_POWER_STATE_CUT_PWR;
+ }
+ } else {
+ if ((wlanOff) &&
+ (ar->arWlanOffConfig == WLAN_OFF_CUT_PWR) &&
+ (!ar->arBTSharing || btOff))
+ {
+ /* For BT clock sharing designs, CUT_POWER depend on BT state */
+ powerState = WLAN_POWER_STATE_CUT_PWR;
+ }
+ }
+#endif /* CONFIG_PM */
+
+ if (powerState == WLAN_POWER_STATE_DEEP_SLEEP) {
+ if (ar->arWlanPowerState == WLAN_POWER_STATE_CUT_PWR) {
+ AR_DEBUG_PRINTF(ATH_DEBUG_PM, ("Load firmware before set to deep sleep\n"));
+ ar6000_setup_cut_power_state(ar, WLAN_ENABLED);
+ }
+ status = ar6000_setup_deep_sleep_state(ar, WLAN_DISABLED);
+ } else if (powerState == WLAN_POWER_STATE_CUT_PWR) {
+ status = ar6000_setup_cut_power_state(ar, WLAN_DISABLED);
+ }
+
+ }
+
+ if (status!=A_OK) {
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("Fail to setup WLAN state %d\n", ar->arWlanState));
+ ar->arWlanState = oldstate;
+ } else if (status == A_OK) {
+ WMI_REPORT_SLEEP_STATE_EVENT wmiSleepEvent, *pSleepEvent = NULL;
+ if ((ar->arWlanPowerState == WLAN_POWER_STATE_ON) && (oldPowerState != WLAN_POWER_STATE_ON)) {
+ wmiSleepEvent.sleepState = WMI_REPORT_SLEEP_STATUS_IS_AWAKE;
+ pSleepEvent = &wmiSleepEvent;
+ } else if ((ar->arWlanPowerState != WLAN_POWER_STATE_ON) && (oldPowerState == WLAN_POWER_STATE_ON)) {
+ wmiSleepEvent.sleepState = WMI_REPORT_SLEEP_STATUS_IS_DEEP_SLEEP;
+ pSleepEvent = &wmiSleepEvent;
+ }
+ if (pSleepEvent) {
+ AR_DEBUG_PRINTF(ATH_DEBUG_PM, ("SENT WLAN Sleep Event %d\n", wmiSleepEvent.sleepState));
+ ar6000_send_event_to_app(ar, WMI_REPORT_SLEEP_STATE_EVENTID, (A_UINT8*)pSleepEvent,
+ sizeof(WMI_REPORT_SLEEP_STATE_EVENTID));
+ }
+ }
+ up(&ar->arSem);
+ return status;
+}
+
+A_STATUS
+ar6000_set_bt_hw_state(struct ar6_softc *ar, A_UINT32 enable)
+{
+#ifdef CONFIG_PM
+ A_BOOL off = (enable == 0);
+ A_STATUS status;
+ if (ar->arBTOff == off) {
+ return A_OK;
+ }
+ ar->arBTOff = off;
+ status = ar6000_update_wlan_pwr_state(ar, ar->arWlanOff ? WLAN_DISABLED : WLAN_ENABLED, FALSE);
+ return status;
+#else
+ return A_OK;
+#endif
+}
+
+A_STATUS
+ar6000_set_wlan_state(struct ar6_softc *ar, AR6000_WLAN_STATE state)
+{
+ A_STATUS status;
+ A_BOOL off = (state == WLAN_DISABLED);
+ if (ar->arWlanOff == off) {
+ return A_OK;
+ }
+ ar->arWlanOff = off;
+ status = ar6000_update_wlan_pwr_state(ar, state, FALSE);
+ return status;
+}
+
+void ar6000_pm_init()
+{
+ A_REGISTER_MODULE_DEBUG_INFO(pm);
+#ifdef CONFIG_PM
+#ifdef CONFIG_HAS_WAKELOCK
+ wake_lock_init(&ar6k_suspend_wake_lock, WAKE_LOCK_SUSPEND, "ar6k_suspend");
+ wake_lock_init(&ar6k_wow_wake_lock, WAKE_LOCK_SUSPEND, "ar6k_wow");
+#endif
+ /*
+ * Register ar6000_pm_device into system.
+ * We should also add platform_device into the first item of array
+ * of devices[] in file arch/xxx/mach-xxx/board-xxxx.c
+ */
+ if (platform_driver_register(&ar6000_pm_device)) {
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("ar6000: fail to register the power control driver.\n"));
+ }
+#endif /* CONFIG_PM */
+}
+
+void ar6000_pm_exit()
+{
+#ifdef CONFIG_PM
+ platform_driver_unregister(&ar6000_pm_device);
+#ifdef CONFIG_HAS_WAKELOCK
+ wake_lock_destroy(&ar6k_suspend_wake_lock);
+ wake_lock_destroy(&ar6k_wow_wake_lock);
+#endif
+#endif /* CONFIG_PM */
+}
diff --git a/drivers/staging/ath6kl/os/linux/ar6000_raw_if.c b/drivers/staging/ath6kl/os/linux/ar6000_raw_if.c
new file mode 100644
index 000000000000..6b8eeea475cf
--- /dev/null
+++ b/drivers/staging/ath6kl/os/linux/ar6000_raw_if.c
@@ -0,0 +1,455 @@
+//------------------------------------------------------------------------------
+// Copyright (c) 2004-2010 Atheros Communications Inc.
+// All rights reserved.
+//
+//
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+//
+//
+//
+// Author(s): ="Atheros"
+//------------------------------------------------------------------------------
+
+#include "ar6000_drv.h"
+
+#ifdef HTC_RAW_INTERFACE
+
+static void
+ar6000_htc_raw_read_cb(void *Context, HTC_PACKET *pPacket)
+{
+ AR_SOFTC_T *ar = (AR_SOFTC_T *)Context;
+ raw_htc_buffer *busy;
+ HTC_RAW_STREAM_ID streamID;
+ AR_RAW_HTC_T *arRaw = ar->arRawHtc;
+
+ busy = (raw_htc_buffer *)pPacket->pPktContext;
+ A_ASSERT(busy != NULL);
+
+ if (pPacket->Status == A_ECANCELED) {
+ /*
+ * HTC provides A_ECANCELED status when it doesn't want to be refilled
+ * (probably due to a shutdown)
+ */
+ return;
+ }
+
+ streamID = arEndpoint2RawStreamID(ar,pPacket->Endpoint);
+ A_ASSERT(streamID != HTC_RAW_STREAM_NOT_MAPPED);
+
+#ifdef CF
+ if (down_trylock(&arRaw->raw_htc_read_sem[streamID])) {
+#else
+ if (down_interruptible(&arRaw->raw_htc_read_sem[streamID])) {
+#endif /* CF */
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("Unable to down the semaphore\n"));
+ }
+
+ A_ASSERT((pPacket->Status != A_OK) ||
+ (pPacket->pBuffer == (busy->data + HTC_HEADER_LEN)));
+
+ busy->length = pPacket->ActualLength + HTC_HEADER_LEN;
+ busy->currPtr = HTC_HEADER_LEN;
+ arRaw->read_buffer_available[streamID] = TRUE;
+ //AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("raw read cb: 0x%X 0x%X \n", busy->currPtr,busy->length);
+ up(&arRaw->raw_htc_read_sem[streamID]);
+
+ /* Signal the waiting process */
+ AR_DEBUG_PRINTF(ATH_DEBUG_HTC_RAW,("Waking up the StreamID(%d) read process\n", streamID));
+ wake_up_interruptible(&arRaw->raw_htc_read_queue[streamID]);
+}
+
+static void
+ar6000_htc_raw_write_cb(void *Context, HTC_PACKET *pPacket)
+{
+ AR_SOFTC_T *ar = (AR_SOFTC_T *)Context;
+ raw_htc_buffer *free;
+ HTC_RAW_STREAM_ID streamID;
+ AR_RAW_HTC_T *arRaw = ar->arRawHtc;
+
+ free = (raw_htc_buffer *)pPacket->pPktContext;
+ A_ASSERT(free != NULL);
+
+ if (pPacket->Status == A_ECANCELED) {
+ /*
+ * HTC provides A_ECANCELED status when it doesn't want to be refilled
+ * (probably due to a shutdown)
+ */
+ return;
+ }
+
+ streamID = arEndpoint2RawStreamID(ar,pPacket->Endpoint);
+ A_ASSERT(streamID != HTC_RAW_STREAM_NOT_MAPPED);
+
+#ifdef CF
+ if (down_trylock(&arRaw->raw_htc_write_sem[streamID])) {
+#else
+ if (down_interruptible(&arRaw->raw_htc_write_sem[streamID])) {
+#endif
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("Unable to down the semaphore\n"));
+ }
+
+ A_ASSERT(pPacket->pBuffer == (free->data + HTC_HEADER_LEN));
+
+ free->length = 0;
+ arRaw->write_buffer_available[streamID] = TRUE;
+ up(&arRaw->raw_htc_write_sem[streamID]);
+
+ /* Signal the waiting process */
+ AR_DEBUG_PRINTF(ATH_DEBUG_HTC_RAW,("Waking up the StreamID(%d) write process\n", streamID));
+ wake_up_interruptible(&arRaw->raw_htc_write_queue[streamID]);
+}
+
+/* connect to a service */
+static A_STATUS ar6000_connect_raw_service(AR_SOFTC_T *ar,
+ HTC_RAW_STREAM_ID StreamID)
+{
+ A_STATUS status;
+ HTC_SERVICE_CONNECT_RESP response;
+ A_UINT8 streamNo;
+ HTC_SERVICE_CONNECT_REQ connect;
+
+ do {
+
+ A_MEMZERO(&connect,sizeof(connect));
+ /* pass the stream ID as meta data to the RAW streams service */
+ streamNo = (A_UINT8)StreamID;
+ connect.pMetaData = &streamNo;
+ connect.MetaDataLength = sizeof(A_UINT8);
+ /* these fields are the same for all endpoints */
+ connect.EpCallbacks.pContext = ar;
+ connect.EpCallbacks.EpTxComplete = ar6000_htc_raw_write_cb;
+ connect.EpCallbacks.EpRecv = ar6000_htc_raw_read_cb;
+ /* simple interface, we don't need these optional callbacks */
+ connect.EpCallbacks.EpRecvRefill = NULL;
+ connect.EpCallbacks.EpSendFull = NULL;
+ connect.MaxSendQueueDepth = RAW_HTC_WRITE_BUFFERS_NUM;
+
+ /* connect to the raw streams service, we may be able to get 1 or more
+ * connections, depending on WHAT is running on the target */
+ connect.ServiceID = HTC_RAW_STREAMS_SVC;
+
+ A_MEMZERO(&response,sizeof(response));
+
+ /* try to connect to the raw stream, it is okay if this fails with
+ * status HTC_SERVICE_NO_MORE_EP */
+ status = HTCConnectService(ar->arHtcTarget,
+ &connect,
+ &response);
+
+ if (A_FAILED(status)) {
+ if (response.ConnectRespCode == HTC_SERVICE_NO_MORE_EP) {
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("HTC RAW , No more streams allowed \n"));
+ status = A_OK;
+ }
+ break;
+ }
+
+ /* set endpoint mapping for the RAW HTC streams */
+ arSetRawStream2EndpointIDMap(ar,StreamID,response.Endpoint);
+
+ AR_DEBUG_PRINTF(ATH_DEBUG_HTC_RAW,("HTC RAW : stream ID: %d, endpoint: %d\n",
+ StreamID, arRawStream2EndpointID(ar,StreamID)));
+
+ } while (FALSE);
+
+ return status;
+}
+
+int ar6000_htc_raw_open(AR_SOFTC_T *ar)
+{
+ A_STATUS status;
+ int streamID, endPt, count2;
+ raw_htc_buffer *buffer;
+ HTC_SERVICE_ID servicepriority;
+ AR_RAW_HTC_T *arRaw = ar->arRawHtc;
+ if (!arRaw) {
+ arRaw = ar->arRawHtc = A_MALLOC(sizeof(AR_RAW_HTC_T));
+ if (arRaw) {
+ A_MEMZERO(arRaw, sizeof(AR_RAW_HTC_T));
+ }
+ }
+ A_ASSERT(ar->arHtcTarget != NULL);
+ if (!arRaw) {
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("Faile to allocate memory for HTC RAW interface\n"));
+ return -ENOMEM;
+ }
+ /* wait for target */
+ status = HTCWaitTarget(ar->arHtcTarget);
+
+ if (A_FAILED(status)) {
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("HTCWaitTarget failed (%d)\n", status));
+ return -ENODEV;
+ }
+
+ for (endPt = 0; endPt < ENDPOINT_MAX; endPt++) {
+ arRaw->arEp2RawMapping[endPt] = HTC_RAW_STREAM_NOT_MAPPED;
+ }
+
+ for (streamID = HTC_RAW_STREAM_0; streamID < HTC_RAW_STREAM_NUM_MAX; streamID++) {
+ /* Initialize the data structures */
+ sema_init(&arRaw->raw_htc_read_sem[streamID], 1);
+ sema_init(&arRaw->raw_htc_write_sem[streamID], 1);
+ init_waitqueue_head(&arRaw->raw_htc_read_queue[streamID]);
+ init_waitqueue_head(&arRaw->raw_htc_write_queue[streamID]);
+
+ /* try to connect to the raw service */
+ status = ar6000_connect_raw_service(ar,streamID);
+
+ if (A_FAILED(status)) {
+ break;
+ }
+
+ if (arRawStream2EndpointID(ar,streamID) == 0) {
+ break;
+ }
+
+ for (count2 = 0; count2 < RAW_HTC_READ_BUFFERS_NUM; count2 ++) {
+ /* Initialize the receive buffers */
+ buffer = &arRaw->raw_htc_write_buffer[streamID][count2];
+ memset(buffer, 0, sizeof(raw_htc_buffer));
+ buffer = &arRaw->raw_htc_read_buffer[streamID][count2];
+ memset(buffer, 0, sizeof(raw_htc_buffer));
+
+ SET_HTC_PACKET_INFO_RX_REFILL(&buffer->HTCPacket,
+ buffer,
+ buffer->data,
+ HTC_RAW_BUFFER_SIZE,
+ arRawStream2EndpointID(ar,streamID));
+
+ /* Queue buffers to HTC for receive */
+ if ((status = HTCAddReceivePkt(ar->arHtcTarget, &buffer->HTCPacket)) != A_OK)
+ {
+ BMIInit();
+ return -EIO;
+ }
+ }
+
+ for (count2 = 0; count2 < RAW_HTC_WRITE_BUFFERS_NUM; count2 ++) {
+ /* Initialize the receive buffers */
+ buffer = &arRaw->raw_htc_write_buffer[streamID][count2];
+ memset(buffer, 0, sizeof(raw_htc_buffer));
+ }
+
+ arRaw->read_buffer_available[streamID] = FALSE;
+ arRaw->write_buffer_available[streamID] = TRUE;
+ }
+
+ if (A_FAILED(status)) {
+ return -EIO;
+ }
+
+ AR_DEBUG_PRINTF(ATH_DEBUG_INFO,("HTC RAW, number of streams the target supports: %d \n", streamID));
+
+ servicepriority = HTC_RAW_STREAMS_SVC; /* only 1 */
+
+ /* set callbacks and priority list */
+ HTCSetCreditDistribution(ar->arHtcTarget,
+ ar,
+ NULL, /* use default */
+ NULL, /* use default */
+ &servicepriority,
+ 1);
+
+ /* Start the HTC component */
+ if ((status = HTCStart(ar->arHtcTarget)) != A_OK) {
+ BMIInit();
+ return -EIO;
+ }
+
+ (ar)->arRawIfInit = TRUE;
+
+ return 0;
+}
+
+int ar6000_htc_raw_close(AR_SOFTC_T *ar)
+{
+ A_PRINTF("ar6000_htc_raw_close called \n");
+ HTCStop(ar->arHtcTarget);
+
+ /* reset the device */
+ ar6000_reset_device(ar->arHifDevice, ar->arTargetType, TRUE, FALSE);
+ /* Initialize the BMI component */
+ BMIInit();
+
+ return 0;
+}
+
+raw_htc_buffer *
+get_filled_buffer(AR_SOFTC_T *ar, HTC_RAW_STREAM_ID StreamID)
+{
+ int count;
+ raw_htc_buffer *busy;
+ AR_RAW_HTC_T *arRaw = ar->arRawHtc;
+
+ /* Check for data */
+ for (count = 0; count < RAW_HTC_READ_BUFFERS_NUM; count ++) {
+ busy = &arRaw->raw_htc_read_buffer[StreamID][count];
+ if (busy->length) {
+ break;
+ }
+ }
+ if (busy->length) {
+ arRaw->read_buffer_available[StreamID] = TRUE;
+ } else {
+ arRaw->read_buffer_available[StreamID] = FALSE;
+ }
+
+ return busy;
+}
+
+ssize_t ar6000_htc_raw_read(AR_SOFTC_T *ar, HTC_RAW_STREAM_ID StreamID,
+ char __user *buffer, size_t length)
+{
+ int readPtr;
+ raw_htc_buffer *busy;
+ AR_RAW_HTC_T *arRaw = ar->arRawHtc;
+
+ if (arRawStream2EndpointID(ar,StreamID) == 0) {
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("StreamID(%d) not connected! \n", StreamID));
+ return -EFAULT;
+ }
+
+ if (down_interruptible(&arRaw->raw_htc_read_sem[StreamID])) {
+ return -ERESTARTSYS;
+ }
+
+ busy = get_filled_buffer(ar,StreamID);
+ while (!arRaw->read_buffer_available[StreamID]) {
+ up(&arRaw->raw_htc_read_sem[StreamID]);
+
+ /* Wait for the data */
+ AR_DEBUG_PRINTF(ATH_DEBUG_HTC_RAW,("Sleeping StreamID(%d) read process\n", StreamID));
+ if (wait_event_interruptible(arRaw->raw_htc_read_queue[StreamID],
+ arRaw->read_buffer_available[StreamID]))
+ {
+ return -EINTR;
+ }
+ if (down_interruptible(&arRaw->raw_htc_read_sem[StreamID])) {
+ return -ERESTARTSYS;
+ }
+ busy = get_filled_buffer(ar,StreamID);
+ }
+
+ /* Read the data */
+ readPtr = busy->currPtr;
+ if (length > busy->length - HTC_HEADER_LEN) {
+ length = busy->length - HTC_HEADER_LEN;
+ }
+ if (copy_to_user(buffer, &busy->data[readPtr], length)) {
+ up(&arRaw->raw_htc_read_sem[StreamID]);
+ return -EFAULT;
+ }
+
+ busy->currPtr += length;
+
+ if (busy->currPtr == busy->length)
+ {
+ busy->currPtr = 0;
+ busy->length = 0;
+ HTC_PACKET_RESET_RX(&busy->HTCPacket);
+ //AR_DEBUG_PRINTF(ATH_DEBUG_HTC_RAW,("raw read ioctl: ep for packet:%d \n", busy->HTCPacket.Endpoint));
+ HTCAddReceivePkt(ar->arHtcTarget, &busy->HTCPacket);
+ }
+ arRaw->read_buffer_available[StreamID] = FALSE;
+ up(&arRaw->raw_htc_read_sem[StreamID]);
+
+ return length;
+}
+
+static raw_htc_buffer *
+get_free_buffer(AR_SOFTC_T *ar, HTC_ENDPOINT_ID StreamID)
+{
+ int count;
+ raw_htc_buffer *free;
+ AR_RAW_HTC_T *arRaw = ar->arRawHtc;
+
+ free = NULL;
+ for (count = 0; count < RAW_HTC_WRITE_BUFFERS_NUM; count ++) {
+ free = &arRaw->raw_htc_write_buffer[StreamID][count];
+ if (free->length == 0) {
+ break;
+ }
+ }
+ if (!free->length) {
+ arRaw->write_buffer_available[StreamID] = TRUE;
+ } else {
+ arRaw->write_buffer_available[StreamID] = FALSE;
+ }
+
+ return free;
+}
+
+ssize_t ar6000_htc_raw_write(AR_SOFTC_T *ar, HTC_RAW_STREAM_ID StreamID,
+ char __user *buffer, size_t length)
+{
+ int writePtr;
+ raw_htc_buffer *free;
+ AR_RAW_HTC_T *arRaw = ar->arRawHtc;
+ if (arRawStream2EndpointID(ar,StreamID) == 0) {
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("StreamID(%d) not connected! \n", StreamID));
+ return -EFAULT;
+ }
+
+ if (down_interruptible(&arRaw->raw_htc_write_sem[StreamID])) {
+ return -ERESTARTSYS;
+ }
+
+ /* Search for a free buffer */
+ free = get_free_buffer(ar,StreamID);
+
+ /* Check if there is space to write else wait */
+ while (!arRaw->write_buffer_available[StreamID]) {
+ up(&arRaw->raw_htc_write_sem[StreamID]);
+
+ /* Wait for buffer to become free */
+ AR_DEBUG_PRINTF(ATH_DEBUG_HTC_RAW,("Sleeping StreamID(%d) write process\n", StreamID));
+ if (wait_event_interruptible(arRaw->raw_htc_write_queue[StreamID],
+ arRaw->write_buffer_available[StreamID]))
+ {
+ return -EINTR;
+ }
+ if (down_interruptible(&arRaw->raw_htc_write_sem[StreamID])) {
+ return -ERESTARTSYS;
+ }
+ free = get_free_buffer(ar,StreamID);
+ }
+
+ /* Send the data */
+ writePtr = HTC_HEADER_LEN;
+ if (length > (HTC_RAW_BUFFER_SIZE - HTC_HEADER_LEN)) {
+ length = HTC_RAW_BUFFER_SIZE - HTC_HEADER_LEN;
+ }
+
+ if (copy_from_user(&free->data[writePtr], buffer, length)) {
+ up(&arRaw->raw_htc_read_sem[StreamID]);
+ return -EFAULT;
+ }
+
+ free->length = length;
+
+ SET_HTC_PACKET_INFO_TX(&free->HTCPacket,
+ free,
+ &free->data[writePtr],
+ length,
+ arRawStream2EndpointID(ar,StreamID),
+ AR6K_DATA_PKT_TAG);
+
+ HTCSendPkt(ar->arHtcTarget,&free->HTCPacket);
+
+ arRaw->write_buffer_available[StreamID] = FALSE;
+ up(&arRaw->raw_htc_write_sem[StreamID]);
+
+ return length;
+}
+#endif /* HTC_RAW_INTERFACE */
diff --git a/drivers/staging/ath6kl/os/linux/ar6k_pal.c b/drivers/staging/ath6kl/os/linux/ar6k_pal.c
new file mode 100644
index 000000000000..6c98a8817aed
--- /dev/null
+++ b/drivers/staging/ath6kl/os/linux/ar6k_pal.c
@@ -0,0 +1,481 @@
+//------------------------------------------------------------------------------
+// Copyright (c) 2004-2010 Atheros Communications Inc.
+// All rights reserved.
+//
+//
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+//
+//
+//
+// Author(s): ="Atheros"
+//------------------------------------------------------------------------------
+
+#include "ar6000_drv.h"
+#ifdef AR6K_ENABLE_HCI_PAL
+#include <net/bluetooth/bluetooth.h>
+#include <net/bluetooth/hci_core.h>
+#include <ar6k_pal.h>
+
+extern unsigned int setupbtdev;
+#define bt_check_bit(val, bit) (val & bit)
+#define bt_set_bit(val, bit) (val |= bit)
+#define bt_clear_bit(val, bit) (val &= ~bit)
+
+/* export ATH_AR6K_DEBUG_HCI_PAL=yes in host/localmake.linux.inc
+ * to enable debug information */
+#ifdef HCIPAL_DEBUG
+#define PRIN_LOG(format, args...) printk(KERN_ALERT "%s:%d - %s Msg:" format "\n",__FUNCTION__, __LINE__, __FILE__, ## args)
+#else
+#define PRIN_LOG(format, args...)
+#endif
+
+/**********************************
+ * HCI PAL private info structure
+ *********************************/
+typedef struct ar6k_hci_pal_info_s{
+
+ unsigned long ulFlags;
+#define HCI_NORMAL_MODE (1)
+#define HCI_REGISTERED (1<<1)
+ struct hci_dev *hdev; /* BT Stack HCI dev */
+ AR_SOFTC_T *ar;
+
+}ar6k_hci_pal_info_t;
+
+/*** BT Stack Entrypoints *******/
+/***************************************
+ * bt_open - open a handle to the device
+ ***************************************/
+static int bt_open(struct hci_dev *hdev)
+{
+ PRIN_LOG("HCI PAL: bt_open - enter - x\n");
+ set_bit(HCI_RUNNING, &hdev->flags);
+ set_bit(HCI_UP, &hdev->flags);
+ set_bit(HCI_INIT, &hdev->flags);
+ return 0;
+}
+
+/***************************************
+ * bt_close - close handle to the device
+ ***************************************/
+static int bt_close(struct hci_dev *hdev)
+{
+ PRIN_LOG("HCI PAL: bt_close - enter\n");
+ clear_bit(HCI_RUNNING, &hdev->flags);
+ return 0;
+}
+
+/*****************************
+ * bt_ioctl - ioctl processing
+ *****************************/
+static int bt_ioctl(struct hci_dev *hdev, unsigned int cmd, unsigned long arg)
+{
+ PRIN_LOG("HCI PAL: bt_ioctl - enter\n");
+ return -ENOIOCTLCMD;
+}
+
+/**************************************
+ * bt_flush - flush outstanding packets
+ **************************************/
+static int bt_flush(struct hci_dev *hdev)
+{
+ PRIN_LOG("HCI PAL: bt_flush - enter\n");
+ return 0;
+}
+
+/***************
+ * bt_destruct
+ ***************/
+static void bt_destruct(struct hci_dev *hdev)
+{
+ PRIN_LOG("HCI PAL: bt_destruct - enter\n");
+ /* nothing to do here */
+}
+
+/****************************************************
+ * Invoked from bluetooth stack via hdev->send()
+ * to send the packet out via ar6k to PAL firmware.
+ *
+ * For HCI command packet wmi_send_hci_cmd() is invoked.
+ * wmi_send_hci_cmd adds WMI_CMD_HDR and sends the packet
+ * to PAL firmware.
+ *
+ * For HCI ACL data packet wmi_data_hdr_add is invoked
+ * to add WMI_DATA_HDR to the packet. ar6000_acl_data_tx
+ * is then invoked to send the packet to PAL firmware.
+ ******************************************************/
+static int btpal_send_frame(struct sk_buff *skb)
+{
+ struct hci_dev *hdev = (struct hci_dev *)skb->dev;
+ HCI_TRANSPORT_PACKET_TYPE type;
+ ar6k_hci_pal_info_t *pHciPalInfo;
+ A_STATUS status = A_OK;
+ struct sk_buff *txSkb = NULL;
+ AR_SOFTC_T *ar;
+
+ if (!hdev) {
+ PRIN_LOG("HCI PAL: btpal_send_frame - no device\n");
+ return -ENODEV;
+ }
+
+ if (!test_bit(HCI_RUNNING, &hdev->flags)) {
+ PRIN_LOG("HCI PAL: btpal_send_frame - not open\n");
+ return -EBUSY;
+ }
+
+ pHciPalInfo = (ar6k_hci_pal_info_t *)hdev->driver_data;
+ A_ASSERT(pHciPalInfo != NULL);
+ ar = pHciPalInfo->ar;
+
+ PRIN_LOG("+btpal_send_frame type: %d \n",bt_cb(skb)->pkt_type);
+ type = HCI_COMMAND_TYPE;
+
+ switch (bt_cb(skb)->pkt_type) {
+ case HCI_COMMAND_PKT:
+ type = HCI_COMMAND_TYPE;
+ hdev->stat.cmd_tx++;
+ break;
+
+ case HCI_ACLDATA_PKT:
+ type = HCI_ACL_TYPE;
+ hdev->stat.acl_tx++;
+ break;
+
+ case HCI_SCODATA_PKT:
+ /* we don't support SCO over the pal */
+ kfree_skb(skb);
+ return 0;
+ default:
+ A_ASSERT(FALSE);
+ kfree_skb(skb);
+ return 0;
+ }
+
+ if (AR_DEBUG_LVL_CHECK(ATH_DEBUG_HCI_DUMP)) {
+ A_PRINTF(">>> Send HCI %s packet len: %d\n",
+ (type == HCI_COMMAND_TYPE) ? "COMMAND" : "ACL",
+ skb->len);
+ if (type == HCI_COMMAND_TYPE) {
+ PRIN_LOG(" HCI Command: OGF:0x%X OCF:0x%X \r\n",
+ HCI_GET_OP_CODE(skb-data) >> 10, HCI_GET_OP_CODE(skb-data) & 0x3FF);
+ }
+ AR_DEBUG_PRINTBUF(skb->data,skb->len,"BT HCI SEND Packet Dump");
+ }
+
+ do {
+ if(type == HCI_COMMAND_TYPE)
+ {
+ PRIN_LOG("HCI command");
+
+ if (ar->arWmiReady == FALSE)
+ {
+ PRIN_LOG("WMI not ready ");
+ break;
+ }
+
+ if (wmi_send_hci_cmd(ar->arWmi, skb->data, skb->len) != A_OK)
+ {
+ PRIN_LOG("send hci cmd error");
+ break;
+ }
+ }
+ else if(type == HCI_ACL_TYPE)
+ {
+ void *osbuf;
+
+ PRIN_LOG("ACL data");
+ if (ar->arWmiReady == FALSE)
+ {
+ PRIN_LOG("WMI not ready");
+ break;
+ }
+
+ /* need to add WMI header so allocate a skb with more space */
+ txSkb = bt_skb_alloc(TX_PACKET_RSV_OFFSET + WMI_MAX_TX_META_SZ +
+ sizeof(WMI_DATA_HDR) + skb->len,
+ GFP_ATOMIC);
+
+ if (txSkb == NULL) {
+ status = A_NO_MEMORY;
+ PRIN_LOG("No memory");
+ break;
+ }
+
+ bt_cb(txSkb)->pkt_type = bt_cb(skb)->pkt_type;
+ txSkb->dev = (void *)pHciPalInfo->hdev;
+ skb_reserve(txSkb, TX_PACKET_RSV_OFFSET + WMI_MAX_TX_META_SZ + sizeof(WMI_DATA_HDR));
+ A_MEMCPY(txSkb->data, skb->data, skb->len);
+ skb_put(txSkb,skb->len);
+ /* Add WMI packet type */
+ osbuf = (void *)txSkb;
+
+ if (wmi_data_hdr_add(ar->arWmi, osbuf, DATA_MSGTYPE, 0, WMI_DATA_HDR_DATA_TYPE_ACL,0,NULL) != A_OK) {
+ PRIN_LOG("XIOCTL_ACL_DATA - wmi_data_hdr_add failed\n");
+ } else {
+ /* Send data buffer over HTC */
+ PRIN_LOG("acl data tx");
+ ar6000_acl_data_tx(osbuf, ar->arNetDev);
+ }
+ txSkb = NULL;
+ }
+ } while (FALSE);
+
+ if (txSkb != NULL) {
+ PRIN_LOG("Free skb");
+ kfree_skb(txSkb);
+ }
+ kfree_skb(skb);
+ return 0;
+}
+
+
+/***********************************************
+ * Unregister HCI device and free HCI device info
+ ***********************************************/
+static void bt_cleanup_hci_pal(ar6k_hci_pal_info_t *pHciPalInfo)
+{
+ int err;
+
+ if (bt_check_bit(pHciPalInfo->ulFlags, HCI_REGISTERED)) {
+ bt_clear_bit(pHciPalInfo->ulFlags, HCI_REGISTERED);
+ clear_bit(HCI_RUNNING, &pHciPalInfo->hdev->flags);
+ clear_bit(HCI_UP, &pHciPalInfo->hdev->flags);
+ clear_bit(HCI_INIT, &pHciPalInfo->hdev->flags);
+ A_ASSERT(pHciPalInfo->hdev != NULL);
+ /* unregister */
+ PRIN_LOG("Unregister PAL device");
+ if ((err = hci_unregister_dev(pHciPalInfo->hdev)) < 0) {
+ PRIN_LOG("HCI PAL: failed to unregister with bluetooth %d\n",err);
+ }
+ }
+
+ if (pHciPalInfo->hdev != NULL) {
+ kfree(pHciPalInfo->hdev);
+ pHciPalInfo->hdev = NULL;
+ }
+}
+
+/*********************************************************
+ * Allocate HCI device and store in PAL private info structure.
+ *********************************************************/
+static A_STATUS bt_setup_hci_pal(ar6k_hci_pal_info_t *pHciPalInfo)
+{
+ A_STATUS status = A_OK;
+ struct hci_dev *pHciDev = NULL;
+
+ if (!setupbtdev) {
+ return A_OK;
+ }
+
+ do {
+ /* allocate a BT HCI struct for this device */
+ pHciDev = hci_alloc_dev();
+ if (NULL == pHciDev) {
+ PRIN_LOG("HCI PAL driver - failed to allocate BT HCI struct \n");
+ status = A_NO_MEMORY;
+ break;
+ }
+
+ /* save the device, we'll register this later */
+ pHciPalInfo->hdev = pHciDev;
+ SET_HCI_BUS_TYPE(pHciDev, HCI_VIRTUAL, HCI_80211);
+ pHciDev->driver_data = pHciPalInfo;
+ pHciDev->open = bt_open;
+ pHciDev->close = bt_close;
+ pHciDev->send = btpal_send_frame;
+ pHciDev->ioctl = bt_ioctl;
+ pHciDev->flush = bt_flush;
+ pHciDev->destruct = bt_destruct;
+ pHciDev->owner = THIS_MODULE;
+ /* driver is running in normal BT mode */
+ PRIN_LOG("Normal mode enabled");
+ bt_set_bit(pHciPalInfo->ulFlags, HCI_NORMAL_MODE);
+
+ } while (FALSE);
+
+ if (A_FAILED(status)) {
+ bt_cleanup_hci_pal(pHciPalInfo);
+ }
+ return status;
+}
+
+/**********************************************
+ * Cleanup HCI device and free HCI PAL private info
+ *********************************************/
+void ar6k_cleanup_hci_pal(void *ar_p)
+{
+ AR_SOFTC_T *ar = (AR_SOFTC_T *)ar_p;
+ ar6k_hci_pal_info_t *pHciPalInfo = (ar6k_hci_pal_info_t *)ar->hcipal_info;
+
+ if (pHciPalInfo != NULL) {
+ bt_cleanup_hci_pal(pHciPalInfo);
+ A_FREE(pHciPalInfo);
+ ar->hcipal_info = NULL;
+ }
+}
+
+/****************************
+ * Register HCI device
+ ****************************/
+static A_BOOL ar6k_pal_transport_ready(void *pHciPal)
+{
+ ar6k_hci_pal_info_t *pHciPalInfo = (ar6k_hci_pal_info_t *)pHciPal;
+
+ PRIN_LOG("HCI device transport ready");
+ if(pHciPalInfo == NULL)
+ return FALSE;
+
+ if (hci_register_dev(pHciPalInfo->hdev) < 0) {
+ PRIN_LOG("Can't register HCI device");
+ hci_free_dev(pHciPalInfo->hdev);
+ return FALSE;
+ }
+ PRIN_LOG("HCI device registered");
+ pHciPalInfo->ulFlags |= HCI_REGISTERED;
+ return TRUE;
+}
+
+/**************************************************
+ * Called from ar6k driver when command or ACL data
+ * packet is received. Pass the packet to bluetooth
+ * stack via hci_recv_frame.
+ **************************************************/
+A_BOOL ar6k_pal_recv_pkt(void *pHciPal, void *osbuf)
+{
+ struct sk_buff *skb = (struct sk_buff *)osbuf;
+ ar6k_hci_pal_info_t *pHciPalInfo;
+ A_BOOL success = FALSE;
+ A_UINT8 btType = 0;
+ pHciPalInfo = (ar6k_hci_pal_info_t *)pHciPal;
+
+ do {
+
+ /* if normal mode is not enabled pass on to the stack
+ * by returning failure */
+ if(!(pHciPalInfo->ulFlags & HCI_NORMAL_MODE))
+ {
+ PRIN_LOG("Normal mode not enabled");
+ break;
+ }
+
+ if (!test_bit(HCI_RUNNING, &pHciPalInfo->hdev->flags)) {
+ PRIN_LOG("HCI PAL: HCI - not running\n");
+ break;
+ }
+
+ if(*((short *)A_NETBUF_DATA(skb)) == WMI_ACL_DATA_EVENTID)
+ btType = HCI_ACLDATA_PKT;
+ else
+ btType = HCI_EVENT_PKT;
+ /* pull 4 bytes which contains WMI packet type */
+ A_NETBUF_PULL(skb, sizeof(int));
+ bt_cb(skb)->pkt_type = btType;
+ skb->dev = (void *)pHciPalInfo->hdev;
+
+ /* pass the received event packet up the stack */
+ if (hci_recv_frame(skb) != 0) {
+ PRIN_LOG("HCI PAL: hci_recv_frame failed \n");
+ break;
+ } else {
+ PRIN_LOG("HCI PAL: Indicated RCV of type:%d, Length:%d \n",HCI_EVENT_PKT, skb->len);
+ }
+ PRIN_LOG("hci recv success");
+ success = TRUE;
+ }while(FALSE);
+ return success;
+}
+
+/**********************************************************
+ * HCI PAL init function called from ar6k when it is loaded..
+ * Allocates PAL private info, stores the same in ar6k private info.
+ * Registers a HCI device.
+ * Registers packet receive callback function with ar6k
+ **********************************************************/
+A_STATUS ar6k_setup_hci_pal(void *ar_p)
+{
+ A_STATUS status = A_OK;
+ ar6k_hci_pal_info_t *pHciPalInfo;
+ ar6k_pal_config_t ar6k_pal_config;
+ AR_SOFTC_T *ar = (AR_SOFTC_T *)ar_p;
+
+ do {
+
+ pHciPalInfo = (ar6k_hci_pal_info_t *)A_MALLOC(sizeof(ar6k_hci_pal_info_t));
+
+ if (NULL == pHciPalInfo) {
+ status = A_NO_MEMORY;
+ break;
+ }
+
+ A_MEMZERO(pHciPalInfo, sizeof(ar6k_hci_pal_info_t));
+ ar->hcipal_info = pHciPalInfo;
+ pHciPalInfo->ar = ar;
+
+ status = bt_setup_hci_pal(pHciPalInfo);
+ if (A_FAILED(status)) {
+ break;
+ }
+
+ if(bt_check_bit(pHciPalInfo->ulFlags, HCI_NORMAL_MODE))
+ PRIN_LOG("HCI PAL: running in normal mode... \n");
+ else
+ PRIN_LOG("HCI PAL: running in test mode... \n");
+
+ ar6k_pal_config.fpar6k_pal_recv_pkt = ar6k_pal_recv_pkt;
+ register_pal_cb(&ar6k_pal_config);
+ ar6k_pal_transport_ready(ar->hcipal_info);
+ } while (FALSE);
+
+ if (A_FAILED(status)) {
+ ar6k_cleanup_hci_pal(ar);
+ }
+ return status;
+}
+#else /* AR6K_ENABLE_HCI_PAL */
+A_STATUS ar6k_setup_hci_pal(void *ar_p)
+{
+ return A_OK;
+}
+void ar6k_cleanup_hci_pal(void *ar_p)
+{
+}
+#endif /* AR6K_ENABLE_HCI_PAL */
+
+#ifdef EXPORT_HCI_PAL_INTERFACE
+/*****************************************************
+ * Register init and callback function with ar6k
+ * when PAL driver is a separate kernel module.
+ ****************************************************/
+A_STATUS ar6k_register_hci_pal(HCI_TRANSPORT_CALLBACKS *hciTransCallbacks);
+static int __init pal_init_module(void)
+{
+ HCI_TRANSPORT_CALLBACKS hciTransCallbacks;
+
+ hciTransCallbacks.setupTransport = ar6k_setup_hci_pal;
+ hciTransCallbacks.cleanupTransport = ar6k_cleanup_hci_pal;
+
+ if(ar6k_register_hci_pal(&hciTransCallbacks) != A_OK)
+ return -ENODEV;
+
+ return 0;
+}
+
+static void __exit pal_cleanup_module(void)
+{
+}
+
+module_init(pal_init_module);
+module_exit(pal_cleanup_module);
+MODULE_LICENSE("Dual BSD/GPL");
+#endif
diff --git a/drivers/staging/ath6kl/os/linux/cfg80211.c b/drivers/staging/ath6kl/os/linux/cfg80211.c
new file mode 100644
index 000000000000..7269d0a1d618
--- /dev/null
+++ b/drivers/staging/ath6kl/os/linux/cfg80211.c
@@ -0,0 +1,1471 @@
+//------------------------------------------------------------------------------
+// Copyright (c) 2004-2010 Atheros Communications Inc.
+// All rights reserved.
+//
+//
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+//
+//
+//
+// Author(s): ="Atheros"
+//------------------------------------------------------------------------------
+
+#include <linux/wireless.h>
+#include <linux/ieee80211.h>
+#include <net/cfg80211.h>
+
+#include "ar6000_drv.h"
+
+
+extern A_WAITQUEUE_HEAD arEvent;
+extern unsigned int wmitimeout;
+extern int reconnect_flag;
+
+
+#define RATETAB_ENT(_rate, _rateid, _flags) { \
+ .bitrate = (_rate), \
+ .flags = (_flags), \
+ .hw_value = (_rateid), \
+}
+
+#define CHAN2G(_channel, _freq, _flags) { \
+ .band = IEEE80211_BAND_2GHZ, \
+ .hw_value = (_channel), \
+ .center_freq = (_freq), \
+ .flags = (_flags), \
+ .max_antenna_gain = 0, \
+ .max_power = 30, \
+}
+
+#define CHAN5G(_channel, _flags) { \
+ .band = IEEE80211_BAND_5GHZ, \
+ .hw_value = (_channel), \
+ .center_freq = 5000 + (5 * (_channel)), \
+ .flags = (_flags), \
+ .max_antenna_gain = 0, \
+ .max_power = 30, \
+}
+
+static struct
+ieee80211_rate ar6k_rates[] = {
+ RATETAB_ENT(10, 0x1, 0),
+ RATETAB_ENT(20, 0x2, 0),
+ RATETAB_ENT(55, 0x4, 0),
+ RATETAB_ENT(110, 0x8, 0),
+ RATETAB_ENT(60, 0x10, 0),
+ RATETAB_ENT(90, 0x20, 0),
+ RATETAB_ENT(120, 0x40, 0),
+ RATETAB_ENT(180, 0x80, 0),
+ RATETAB_ENT(240, 0x100, 0),
+ RATETAB_ENT(360, 0x200, 0),
+ RATETAB_ENT(480, 0x400, 0),
+ RATETAB_ENT(540, 0x800, 0),
+};
+
+#define ar6k_a_rates (ar6k_rates + 4)
+#define ar6k_a_rates_size 8
+#define ar6k_g_rates (ar6k_rates + 0)
+#define ar6k_g_rates_size 12
+
+static struct
+ieee80211_channel ar6k_2ghz_channels[] = {
+ CHAN2G(1, 2412, 0),
+ CHAN2G(2, 2417, 0),
+ CHAN2G(3, 2422, 0),
+ CHAN2G(4, 2427, 0),
+ CHAN2G(5, 2432, 0),
+ CHAN2G(6, 2437, 0),
+ CHAN2G(7, 2442, 0),
+ CHAN2G(8, 2447, 0),
+ CHAN2G(9, 2452, 0),
+ CHAN2G(10, 2457, 0),
+ CHAN2G(11, 2462, 0),
+ CHAN2G(12, 2467, 0),
+ CHAN2G(13, 2472, 0),
+ CHAN2G(14, 2484, 0),
+};
+
+static struct
+ieee80211_channel ar6k_5ghz_a_channels[] = {
+ CHAN5G(34, 0), CHAN5G(36, 0),
+ CHAN5G(38, 0), CHAN5G(40, 0),
+ CHAN5G(42, 0), CHAN5G(44, 0),
+ CHAN5G(46, 0), CHAN5G(48, 0),
+ CHAN5G(52, 0), CHAN5G(56, 0),
+ CHAN5G(60, 0), CHAN5G(64, 0),
+ CHAN5G(100, 0), CHAN5G(104, 0),
+ CHAN5G(108, 0), CHAN5G(112, 0),
+ CHAN5G(116, 0), CHAN5G(120, 0),
+ CHAN5G(124, 0), CHAN5G(128, 0),
+ CHAN5G(132, 0), CHAN5G(136, 0),
+ CHAN5G(140, 0), CHAN5G(149, 0),
+ CHAN5G(153, 0), CHAN5G(157, 0),
+ CHAN5G(161, 0), CHAN5G(165, 0),
+ CHAN5G(184, 0), CHAN5G(188, 0),
+ CHAN5G(192, 0), CHAN5G(196, 0),
+ CHAN5G(200, 0), CHAN5G(204, 0),
+ CHAN5G(208, 0), CHAN5G(212, 0),
+ CHAN5G(216, 0),
+};
+
+static struct
+ieee80211_supported_band ar6k_band_2ghz = {
+ .n_channels = ARRAY_SIZE(ar6k_2ghz_channels),
+ .channels = ar6k_2ghz_channels,
+ .n_bitrates = ar6k_g_rates_size,
+ .bitrates = ar6k_g_rates,
+};
+
+static struct
+ieee80211_supported_band ar6k_band_5ghz = {
+ .n_channels = ARRAY_SIZE(ar6k_5ghz_a_channels),
+ .channels = ar6k_5ghz_a_channels,
+ .n_bitrates = ar6k_a_rates_size,
+ .bitrates = ar6k_a_rates,
+};
+
+static int
+ar6k_set_wpa_version(AR_SOFTC_T *ar, enum nl80211_wpa_versions wpa_version)
+{
+
+ AR_DEBUG_PRINTF(ATH_DEBUG_INFO, ("%s: %u\n", __func__, wpa_version));
+
+ if (!wpa_version) {
+ ar->arAuthMode = NONE_AUTH;
+ } else if (wpa_version & NL80211_WPA_VERSION_1) {
+ ar->arAuthMode = WPA_AUTH;
+ } else if (wpa_version & NL80211_WPA_VERSION_2) {
+ ar->arAuthMode = WPA2_AUTH;
+ } else {
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERR,
+ ("%s: %u not spported\n", __func__, wpa_version));
+ return -ENOTSUPP;
+ }
+
+ return A_OK;
+}
+
+static int
+ar6k_set_auth_type(AR_SOFTC_T *ar, enum nl80211_auth_type auth_type)
+{
+
+ AR_DEBUG_PRINTF(ATH_DEBUG_INFO, ("%s: 0x%x\n", __func__, auth_type));
+
+ switch (auth_type) {
+ case NL80211_AUTHTYPE_OPEN_SYSTEM:
+ ar->arDot11AuthMode = OPEN_AUTH;
+ break;
+ case NL80211_AUTHTYPE_SHARED_KEY:
+ ar->arDot11AuthMode = SHARED_AUTH;
+ break;
+ case NL80211_AUTHTYPE_NETWORK_EAP:
+ ar->arDot11AuthMode = LEAP_AUTH;
+ break;
+ default:
+ ar->arDot11AuthMode = OPEN_AUTH;
+ AR_DEBUG_PRINTF(ATH_DEBUG_INFO,
+ ("%s: 0x%x not spported\n", __func__, auth_type));
+ return -ENOTSUPP;
+ }
+
+ return A_OK;
+}
+
+static int
+ar6k_set_cipher(AR_SOFTC_T *ar, A_UINT32 cipher, A_BOOL ucast)
+{
+ A_UINT8 *ar_cipher = ucast ? &ar->arPairwiseCrypto :
+ &ar->arGroupCrypto;
+ A_UINT8 *ar_cipher_len = ucast ? &ar->arPairwiseCryptoLen :
+ &ar->arGroupCryptoLen;
+
+ AR_DEBUG_PRINTF(ATH_DEBUG_INFO,
+ ("%s: cipher 0x%x, ucast %u\n", __func__, cipher, ucast));
+
+ switch (cipher) {
+ case 0:
+ case IW_AUTH_CIPHER_NONE:
+ *ar_cipher = NONE_CRYPT;
+ *ar_cipher_len = 0;
+ break;
+ case WLAN_CIPHER_SUITE_WEP40:
+ *ar_cipher = WEP_CRYPT;
+ *ar_cipher_len = 5;
+ break;
+ case WLAN_CIPHER_SUITE_WEP104:
+ *ar_cipher = WEP_CRYPT;
+ *ar_cipher_len = 13;
+ break;
+ case WLAN_CIPHER_SUITE_TKIP:
+ *ar_cipher = TKIP_CRYPT;
+ *ar_cipher_len = 0;
+ break;
+ case WLAN_CIPHER_SUITE_CCMP:
+ *ar_cipher = AES_CRYPT;
+ *ar_cipher_len = 0;
+ break;
+ default:
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERR,
+ ("%s: cipher 0x%x not supported\n", __func__, cipher));
+ return -ENOTSUPP;
+ }
+
+ return A_OK;
+}
+
+static void
+ar6k_set_key_mgmt(AR_SOFTC_T *ar, A_UINT32 key_mgmt)
+{
+ AR_DEBUG_PRINTF(ATH_DEBUG_INFO, ("%s: 0x%x\n", __func__, key_mgmt));
+
+ if (WLAN_AKM_SUITE_PSK == key_mgmt) {
+ if (WPA_AUTH == ar->arAuthMode) {
+ ar->arAuthMode = WPA_PSK_AUTH;
+ } else if (WPA2_AUTH == ar->arAuthMode) {
+ ar->arAuthMode = WPA2_PSK_AUTH;
+ }
+ } else if (WLAN_AKM_SUITE_8021X != key_mgmt) {
+ ar->arAuthMode = NONE_AUTH;
+ }
+}
+
+static int
+ar6k_cfg80211_connect(struct wiphy *wiphy, struct net_device *dev,
+ struct cfg80211_connect_params *sme)
+{
+ AR_SOFTC_T *ar = ar6k_priv(dev);
+ A_STATUS status;
+
+ AR_DEBUG_PRINTF(ATH_DEBUG_INFO, ("%s: \n", __func__));
+
+ if(ar->arWmiReady == FALSE) {
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("%s: Wmi not ready yet\n", __func__));
+ return -EIO;
+ }
+
+ if(ar->arWlanState == WLAN_DISABLED) {
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("%s: Wlan disabled\n", __func__));
+ return -EIO;
+ }
+
+ if(ar->bIsDestroyProgress) {
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("%s: destroy in progress\n", __func__));
+ return -EBUSY;
+ }
+
+ if(!sme->ssid_len || IEEE80211_MAX_SSID_LEN < sme->ssid_len) {
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("%s: ssid invalid\n", __func__));
+ return -EINVAL;
+ }
+
+ if(ar->arSkipScan == TRUE &&
+ ((sme->channel && sme->channel->center_freq == 0) ||
+ (sme->bssid && !sme->bssid[0] && !sme->bssid[1] && !sme->bssid[2] &&
+ !sme->bssid[3] && !sme->bssid[4] && !sme->bssid[5])))
+ {
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("%s:SkipScan: channel or bssid invalid\n", __func__));
+ return -EINVAL;
+ }
+
+ if(down_interruptible(&ar->arSem)) {
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("%s: busy, couldn't get access\n", __func__));
+ return -ERESTARTSYS;
+ }
+
+ if(ar->bIsDestroyProgress) {
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("%s: busy, destroy in progress\n", __func__));
+ up(&ar->arSem);
+ return -EBUSY;
+ }
+
+ if(ar->arTxPending[wmi_get_control_ep(ar->arWmi)]) {
+ /*
+ * sleep until the command queue drains
+ */
+ wait_event_interruptible_timeout(arEvent,
+ ar->arTxPending[wmi_get_control_ep(ar->arWmi)] == 0, wmitimeout * HZ);
+ if (signal_pending(current)) {
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("%s: cmd queue drain timeout\n", __func__));
+ up(&ar->arSem);
+ return -EINTR;
+ }
+ }
+
+ if(ar->arConnected == TRUE &&
+ ar->arSsidLen == sme->ssid_len &&
+ !A_MEMCMP(ar->arSsid, sme->ssid, ar->arSsidLen)) {
+ reconnect_flag = TRUE;
+ status = wmi_reconnect_cmd(ar->arWmi,
+ ar->arReqBssid,
+ ar->arChannelHint);
+
+ up(&ar->arSem);
+ if (status != A_OK) {
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("%s: wmi_reconnect_cmd failed\n", __func__));
+ return -EIO;
+ }
+ return 0;
+ } else if(ar->arSsidLen == sme->ssid_len &&
+ !A_MEMCMP(ar->arSsid, sme->ssid, ar->arSsidLen)) {
+ wmi_disconnect_cmd(ar->arWmi);
+ }
+
+ A_MEMZERO(ar->arSsid, sizeof(ar->arSsid));
+ ar->arSsidLen = sme->ssid_len;
+ A_MEMCPY(ar->arSsid, sme->ssid, sme->ssid_len);
+
+ if(sme->channel){
+ ar->arChannelHint = sme->channel->center_freq;
+ }
+
+ A_MEMZERO(ar->arReqBssid, sizeof(ar->arReqBssid));
+ if(sme->bssid){
+ if(A_MEMCMP(&sme->bssid, bcast_mac, AR6000_ETH_ADDR_LEN)) {
+ A_MEMCPY(ar->arReqBssid, sme->bssid, sizeof(ar->arReqBssid));
+ }
+ }
+
+ ar6k_set_wpa_version(ar, sme->crypto.wpa_versions);
+ ar6k_set_auth_type(ar, sme->auth_type);
+
+ if(sme->crypto.n_ciphers_pairwise) {
+ ar6k_set_cipher(ar, sme->crypto.ciphers_pairwise[0], true);
+ } else {
+ ar6k_set_cipher(ar, IW_AUTH_CIPHER_NONE, true);
+ }
+ ar6k_set_cipher(ar, sme->crypto.cipher_group, false);
+
+ if(sme->crypto.n_akm_suites) {
+ ar6k_set_key_mgmt(ar, sme->crypto.akm_suites[0]);
+ }
+
+ if((sme->key_len) &&
+ (NONE_AUTH == ar->arAuthMode) &&
+ (WEP_CRYPT == ar->arPairwiseCrypto)) {
+ struct ar_key *key = NULL;
+
+ if(sme->key_idx < WMI_MIN_KEY_INDEX || sme->key_idx > WMI_MAX_KEY_INDEX) {
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERR,
+ ("%s: key index %d out of bounds\n", __func__, sme->key_idx));
+ up(&ar->arSem);
+ return -ENOENT;
+ }
+
+ key = &ar->keys[sme->key_idx];
+ key->key_len = sme->key_len;
+ A_MEMCPY(key->key, sme->key, key->key_len);
+ key->cipher = ar->arPairwiseCrypto;
+ ar->arDefTxKeyIndex = sme->key_idx;
+
+ wmi_addKey_cmd(ar->arWmi, sme->key_idx,
+ ar->arPairwiseCrypto,
+ GROUP_USAGE | TX_USAGE,
+ key->key_len,
+ NULL,
+ key->key, KEY_OP_INIT_VAL, NULL,
+ NO_SYNC_WMIFLAG);
+ }
+
+ if (!ar->arUserBssFilter) {
+ if (wmi_bssfilter_cmd(ar->arWmi, ALL_BSS_FILTER, 0) != A_OK) {
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("%s: Couldn't set bss filtering\n", __func__));
+ up(&ar->arSem);
+ return -EIO;
+ }
+ }
+
+ ar->arNetworkType = ar->arNextMode;
+
+ AR_DEBUG_PRINTF(ATH_DEBUG_INFO, ("%s: Connect called with authmode %d dot11 auth %d"\
+ " PW crypto %d PW crypto Len %d GRP crypto %d"\
+ " GRP crypto Len %d channel hint %u\n",
+ __func__, ar->arAuthMode, ar->arDot11AuthMode,
+ ar->arPairwiseCrypto, ar->arPairwiseCryptoLen,
+ ar->arGroupCrypto, ar->arGroupCryptoLen, ar->arChannelHint));
+
+ reconnect_flag = 0;
+ status = wmi_connect_cmd(ar->arWmi, ar->arNetworkType,
+ ar->arDot11AuthMode, ar->arAuthMode,
+ ar->arPairwiseCrypto, ar->arPairwiseCryptoLen,
+ ar->arGroupCrypto,ar->arGroupCryptoLen,
+ ar->arSsidLen, ar->arSsid,
+ ar->arReqBssid, ar->arChannelHint,
+ ar->arConnectCtrlFlags);
+
+ up(&ar->arSem);
+
+ if (A_EINVAL == status) {
+ A_MEMZERO(ar->arSsid, sizeof(ar->arSsid));
+ ar->arSsidLen = 0;
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("%s: Invalid request\n", __func__));
+ return -ENOENT;
+ } else if (status != A_OK) {
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("%s: wmi_connect_cmd failed\n", __func__));
+ return -EIO;
+ }
+
+ if ((!(ar->arConnectCtrlFlags & CONNECT_DO_WPA_OFFLOAD)) &&
+ ((WPA_PSK_AUTH == ar->arAuthMode) || (WPA2_PSK_AUTH == ar->arAuthMode)))
+ {
+ A_TIMEOUT_MS(&ar->disconnect_timer, A_DISCONNECT_TIMER_INTERVAL, 0);
+ }
+
+ ar->arConnectCtrlFlags &= ~CONNECT_DO_WPA_OFFLOAD;
+ ar->arConnectPending = TRUE;
+
+ return 0;
+}
+
+void
+ar6k_cfg80211_connect_event(AR_SOFTC_T *ar, A_UINT16 channel,
+ A_UINT8 *bssid, A_UINT16 listenInterval,
+ A_UINT16 beaconInterval,NETWORK_TYPE networkType,
+ A_UINT8 beaconIeLen, A_UINT8 assocReqLen,
+ A_UINT8 assocRespLen, A_UINT8 *assocInfo)
+{
+ A_UINT16 size = 0;
+ A_UINT16 capability = 0;
+ struct cfg80211_bss *bss = NULL;
+ struct ieee80211_mgmt *mgmt = NULL;
+ struct ieee80211_channel *ibss_channel = NULL;
+ s32 signal = 50 * 100;
+ A_UINT8 ie_buf_len = 0;
+ unsigned char ie_buf[256];
+ unsigned char *ptr_ie_buf = ie_buf;
+ unsigned char *ieeemgmtbuf = NULL;
+ A_UINT8 source_mac[ATH_MAC_LEN];
+
+ A_UINT8 assocReqIeOffset = sizeof(A_UINT16) + /* capinfo*/
+ sizeof(A_UINT16); /* listen interval */
+ A_UINT8 assocRespIeOffset = sizeof(A_UINT16) + /* capinfo*/
+ sizeof(A_UINT16) + /* status Code */
+ sizeof(A_UINT16); /* associd */
+ A_UINT8 *assocReqIe = assocInfo + beaconIeLen + assocReqIeOffset;
+ A_UINT8 *assocRespIe = assocInfo + beaconIeLen + assocReqLen + assocRespIeOffset;
+
+ AR_DEBUG_PRINTF(ATH_DEBUG_INFO, ("%s: \n", __func__));
+
+ assocReqLen -= assocReqIeOffset;
+ assocRespLen -= assocRespIeOffset;
+
+ if((ADHOC_NETWORK & networkType)) {
+ if(NL80211_IFTYPE_ADHOC != ar->wdev->iftype) {
+ AR_DEBUG_PRINTF(ATH_DEBUG_INFO,
+ ("%s: ath6k not in ibss mode\n", __func__));
+ return;
+ }
+ }
+
+ if((INFRA_NETWORK & networkType)) {
+ if(NL80211_IFTYPE_STATION != ar->wdev->iftype) {
+ AR_DEBUG_PRINTF(ATH_DEBUG_INFO,
+ ("%s: ath6k not in station mode\n", __func__));
+ return;
+ }
+ }
+
+ /* Before informing the join/connect event, make sure that
+ * bss entry is present in scan list, if it not present
+ * construct and insert into scan list, otherwise that
+ * event will be dropped on the way by cfg80211, due to
+ * this keys will not be plumbed in case of WEP and
+ * application will not be aware of join/connect status. */
+ bss = cfg80211_get_bss(ar->wdev->wiphy, NULL, bssid,
+ ar->wdev->ssid, ar->wdev->ssid_len,
+ ((ADHOC_NETWORK & networkType) ? WLAN_CAPABILITY_IBSS : WLAN_CAPABILITY_ESS),
+ ((ADHOC_NETWORK & networkType) ? WLAN_CAPABILITY_IBSS : WLAN_CAPABILITY_ESS));
+
+ if(!bss) {
+ if (ADHOC_NETWORK & networkType) {
+ /* construct 802.11 mgmt beacon */
+ if(ptr_ie_buf) {
+ *ptr_ie_buf++ = WLAN_EID_SSID;
+ *ptr_ie_buf++ = ar->arSsidLen;
+ A_MEMCPY(ptr_ie_buf, ar->arSsid, ar->arSsidLen);
+ ptr_ie_buf +=ar->arSsidLen;
+
+ *ptr_ie_buf++ = WLAN_EID_IBSS_PARAMS;
+ *ptr_ie_buf++ = 2; /* length */
+ *ptr_ie_buf++ = 0; /* ATIM window */
+ *ptr_ie_buf++ = 0; /* ATIM window */
+
+ /* TODO: update ibss params and include supported rates,
+ * DS param set, extened support rates, wmm. */
+
+ ie_buf_len = ptr_ie_buf - ie_buf;
+ }
+
+ capability |= IEEE80211_CAPINFO_IBSS;
+ if(WEP_CRYPT == ar->arPairwiseCrypto) {
+ capability |= IEEE80211_CAPINFO_PRIVACY;
+ }
+ A_MEMCPY(source_mac, ar->arNetDev->dev_addr, ATH_MAC_LEN);
+ ptr_ie_buf = ie_buf;
+ } else {
+ capability = *(A_UINT16 *)(&assocInfo[beaconIeLen]);
+ A_MEMCPY(source_mac, bssid, ATH_MAC_LEN);
+ ptr_ie_buf = assocReqIe;
+ ie_buf_len = assocReqLen;
+ }
+
+ size = offsetof(struct ieee80211_mgmt, u)
+ + sizeof(mgmt->u.beacon)
+ + ie_buf_len;
+
+ ieeemgmtbuf = A_MALLOC_NOWAIT(size);
+ if(!ieeemgmtbuf) {
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERR,
+ ("%s: ieeeMgmtbuf alloc error\n", __func__));
+ return;
+ }
+
+ A_MEMZERO(ieeemgmtbuf, size);
+ mgmt = (struct ieee80211_mgmt *)ieeemgmtbuf;
+ mgmt->frame_control = (IEEE80211_FTYPE_MGMT | IEEE80211_STYPE_BEACON);
+ A_MEMCPY(mgmt->da, bcast_mac, ATH_MAC_LEN);
+ A_MEMCPY(mgmt->sa, source_mac, ATH_MAC_LEN);
+ A_MEMCPY(mgmt->bssid, bssid, ATH_MAC_LEN);
+ mgmt->u.beacon.beacon_int = beaconInterval;
+ mgmt->u.beacon.capab_info = capability;
+ A_MEMCPY(mgmt->u.beacon.variable, ptr_ie_buf, ie_buf_len);
+
+ ibss_channel = ieee80211_get_channel(ar->wdev->wiphy, (int)channel);
+
+ AR_DEBUG_PRINTF(ATH_DEBUG_INFO,
+ ("%s: inform bss with bssid %pM channel %d beaconInterval %d "
+ "capability 0x%x\n", __func__, mgmt->bssid,
+ ibss_channel->hw_value, beaconInterval, capability));
+
+ bss = cfg80211_inform_bss_frame(ar->wdev->wiphy,
+ ibss_channel, mgmt,
+ le16_to_cpu(size),
+ signal, GFP_KERNEL);
+ A_FREE(ieeemgmtbuf);
+ cfg80211_put_bss(bss);
+ }
+
+ if((ADHOC_NETWORK & networkType)) {
+ cfg80211_ibss_joined(ar->arNetDev, bssid, GFP_KERNEL);
+ return;
+ }
+
+ if (FALSE == ar->arConnected) {
+ /* inform connect result to cfg80211 */
+ cfg80211_connect_result(ar->arNetDev, bssid,
+ assocReqIe, assocReqLen,
+ assocRespIe, assocRespLen,
+ WLAN_STATUS_SUCCESS, GFP_KERNEL);
+ } else {
+ /* inform roam event to cfg80211 */
+ cfg80211_roamed(ar->arNetDev, bssid,
+ assocReqIe, assocReqLen,
+ assocRespIe, assocRespLen,
+ GFP_KERNEL);
+ }
+}
+
+static int
+ar6k_cfg80211_disconnect(struct wiphy *wiphy, struct net_device *dev,
+ A_UINT16 reason_code)
+{
+ AR_SOFTC_T *ar = (AR_SOFTC_T *)ar6k_priv(dev);
+
+ AR_DEBUG_PRINTF(ATH_DEBUG_INFO, ("%s: reason=%u\n", __func__, reason_code));
+
+ if(ar->arWmiReady == FALSE) {
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("%s: Wmi not ready\n", __func__));
+ return -EIO;
+ }
+
+ if(ar->arWlanState == WLAN_DISABLED) {
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("%s: Wlan disabled\n", __func__));
+ return -EIO;
+ }
+
+ if(ar->bIsDestroyProgress) {
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("%s: busy, destroy in progress\n", __func__));
+ return -EBUSY;
+ }
+
+ if(down_interruptible(&ar->arSem)) {
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("%s: busy, couldn't get access\n", __func__));
+ return -ERESTARTSYS;
+ }
+
+ reconnect_flag = 0;
+ wmi_disconnect_cmd(ar->arWmi);
+ A_MEMZERO(ar->arSsid, sizeof(ar->arSsid));
+ ar->arSsidLen = 0;
+
+ if (ar->arSkipScan == FALSE) {
+ A_MEMZERO(ar->arReqBssid, sizeof(ar->arReqBssid));
+ }
+
+ up(&ar->arSem);
+
+ return 0;
+}
+
+void
+ar6k_cfg80211_disconnect_event(AR_SOFTC_T *ar, A_UINT8 reason,
+ A_UINT8 *bssid, A_UINT8 assocRespLen,
+ A_UINT8 *assocInfo, A_UINT16 protocolReasonStatus)
+{
+
+ AR_DEBUG_PRINTF(ATH_DEBUG_INFO, ("%s: reason=%u\n", __func__, reason));
+
+ if((ADHOC_NETWORK & ar->arNetworkType)) {
+ if(NL80211_IFTYPE_ADHOC != ar->wdev->iftype) {
+ AR_DEBUG_PRINTF(ATH_DEBUG_INFO,
+ ("%s: ath6k not in ibss mode\n", __func__));
+ return;
+ }
+ A_MEMZERO(bssid, ETH_ALEN);
+ cfg80211_ibss_joined(ar->arNetDev, bssid, GFP_KERNEL);
+ return;
+ }
+
+ if((INFRA_NETWORK & ar->arNetworkType)) {
+ if(NL80211_IFTYPE_STATION != ar->wdev->iftype) {
+ AR_DEBUG_PRINTF(ATH_DEBUG_INFO,
+ ("%s: ath6k not in station mode\n", __func__));
+ return;
+ }
+ }
+
+ if(FALSE == ar->arConnected) {
+ if(NO_NETWORK_AVAIL == reason) {
+ /* connect cmd failed */
+ cfg80211_connect_result(ar->arNetDev, bssid,
+ NULL, 0,
+ NULL, 0,
+ WLAN_STATUS_UNSPECIFIED_FAILURE,
+ GFP_KERNEL);
+ }
+ } else {
+ /* connection loss due to disconnect cmd or low rssi */
+ cfg80211_disconnected(ar->arNetDev, reason, NULL, 0, GFP_KERNEL);
+ }
+}
+
+void
+ar6k_cfg80211_scan_node(void *arg, bss_t *ni)
+{
+ struct wiphy *wiphy = (struct wiphy *)arg;
+ A_UINT16 size;
+ unsigned char *ieeemgmtbuf = NULL;
+ struct ieee80211_mgmt *mgmt;
+ struct ieee80211_channel *channel;
+ struct ieee80211_supported_band *band;
+ struct ieee80211_common_ie *cie;
+ s32 signal;
+ int freq;
+
+ cie = &ni->ni_cie;
+
+#define CHAN_IS_11A(x) (!((x >= 2412) && (x <= 2484)))
+ if(CHAN_IS_11A(cie->ie_chan)) {
+ /* 11a */
+ band = wiphy->bands[IEEE80211_BAND_5GHZ];
+ } else if((cie->ie_erp) || (cie->ie_xrates)) {
+ /* 11g */
+ band = wiphy->bands[IEEE80211_BAND_2GHZ];
+ } else {
+ /* 11b */
+ band = wiphy->bands[IEEE80211_BAND_2GHZ];
+ }
+
+ size = ni->ni_framelen + offsetof(struct ieee80211_mgmt, u);
+ ieeemgmtbuf = A_MALLOC_NOWAIT(size);
+ if(!ieeemgmtbuf)
+ {
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("%s: ieeeMgmtbuf alloc error\n", __func__));
+ return;
+ }
+
+ /* Note:
+ TODO: Update target to include 802.11 mac header while sending bss info.
+ Target removes 802.11 mac header while sending the bss info to host,
+ cfg80211 needs it, for time being just filling the da, sa and bssid fields alone.
+ */
+ mgmt = (struct ieee80211_mgmt *)ieeemgmtbuf;
+ A_MEMCPY(mgmt->da, bcast_mac, ATH_MAC_LEN);
+ A_MEMCPY(mgmt->sa, ni->ni_macaddr, ATH_MAC_LEN);
+ A_MEMCPY(mgmt->bssid, ni->ni_macaddr, ATH_MAC_LEN);
+ A_MEMCPY(ieeemgmtbuf + offsetof(struct ieee80211_mgmt, u),
+ ni->ni_buf, ni->ni_framelen);
+
+ freq = cie->ie_chan;
+ channel = ieee80211_get_channel(wiphy, freq);
+ signal = ni->ni_snr * 100;
+
+ AR_DEBUG_PRINTF(ATH_DEBUG_INFO,
+ ("%s: bssid %pM channel %d freq %d size %d\n", __func__,
+ mgmt->bssid, channel->hw_value, freq, size));
+ cfg80211_inform_bss_frame(wiphy, channel, mgmt,
+ le16_to_cpu(size),
+ signal, GFP_KERNEL);
+
+ A_FREE (ieeemgmtbuf);
+}
+
+static int
+ar6k_cfg80211_scan(struct wiphy *wiphy, struct net_device *ndev,
+ struct cfg80211_scan_request *request)
+{
+ AR_SOFTC_T *ar = (AR_SOFTC_T *)ar6k_priv(ndev);
+ int ret = 0;
+ A_BOOL forceFgScan = FALSE;
+
+ AR_DEBUG_PRINTF(ATH_DEBUG_INFO, ("%s: \n", __func__));
+
+ if(ar->arWmiReady == FALSE) {
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("%s: Wmi not ready\n", __func__));
+ return -EIO;
+ }
+
+ if(ar->arWlanState == WLAN_DISABLED) {
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("%s: Wlan disabled\n", __func__));
+ return -EIO;
+ }
+
+ if (!ar->arUserBssFilter) {
+ if (wmi_bssfilter_cmd(ar->arWmi,
+ (ar->arConnected ? ALL_BUT_BSS_FILTER : ALL_BSS_FILTER),
+ 0) != A_OK) {
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("%s: Couldn't set bss filtering\n", __func__));
+ return -EIO;
+ }
+ }
+
+ if(request->n_ssids &&
+ request->ssids[0].ssid_len) {
+ A_UINT8 i;
+
+ if(request->n_ssids > MAX_PROBED_SSID_INDEX) {
+ request->n_ssids = MAX_PROBED_SSID_INDEX;
+ }
+
+ for (i = 0; i < request->n_ssids; i++) {
+ wmi_probedSsid_cmd(ar->arWmi, i, SPECIFIC_SSID_FLAG,
+ request->ssids[i].ssid_len,
+ request->ssids[i].ssid);
+ }
+ }
+
+ if(ar->arConnected) {
+ forceFgScan = TRUE;
+ }
+
+ if(wmi_startscan_cmd(ar->arWmi, WMI_LONG_SCAN, forceFgScan, FALSE, \
+ 0, 0, 0, NULL) != A_OK) {
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("%s: wmi_startscan_cmd failed\n", __func__));
+ ret = -EIO;
+ }
+
+ ar->scan_request = request;
+
+ return ret;
+}
+
+void
+ar6k_cfg80211_scanComplete_event(AR_SOFTC_T *ar, A_STATUS status)
+{
+
+ AR_DEBUG_PRINTF(ATH_DEBUG_INFO, ("%s: status %d\n", __func__, status));
+
+ if(ar->scan_request)
+ {
+ /* Translate data to cfg80211 mgmt format */
+ wmi_iterate_nodes(ar->arWmi, ar6k_cfg80211_scan_node, ar->wdev->wiphy);
+
+ cfg80211_scan_done(ar->scan_request,
+ (status & A_ECANCELED) ? true : false);
+
+ if(ar->scan_request->n_ssids &&
+ ar->scan_request->ssids[0].ssid_len) {
+ A_UINT8 i;
+
+ for (i = 0; i < ar->scan_request->n_ssids; i++) {
+ wmi_probedSsid_cmd(ar->arWmi, i, DISABLE_SSID_FLAG,
+ 0, NULL);
+ }
+ }
+ ar->scan_request = NULL;
+ }
+}
+
+static int
+ar6k_cfg80211_add_key(struct wiphy *wiphy, struct net_device *ndev,
+ A_UINT8 key_index, bool pairwise, const A_UINT8 *mac_addr,
+ struct key_params *params)
+{
+ AR_SOFTC_T *ar = (AR_SOFTC_T *)ar6k_priv(ndev);
+ struct ar_key *key = NULL;
+ A_UINT8 key_usage;
+ A_UINT8 key_type;
+ A_STATUS status = 0;
+
+ AR_DEBUG_PRINTF(ATH_DEBUG_INFO, ("%s:\n", __func__));
+
+ if(ar->arWmiReady == FALSE) {
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("%s: Wmi not ready\n", __func__));
+ return -EIO;
+ }
+
+ if(ar->arWlanState == WLAN_DISABLED) {
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("%s: Wlan disabled\n", __func__));
+ return -EIO;
+ }
+
+ if(key_index < WMI_MIN_KEY_INDEX || key_index > WMI_MAX_KEY_INDEX) {
+ AR_DEBUG_PRINTF(ATH_DEBUG_INFO,
+ ("%s: key index %d out of bounds\n", __func__, key_index));
+ return -ENOENT;
+ }
+
+ key = &ar->keys[key_index];
+ A_MEMZERO(key, sizeof(struct ar_key));
+
+ if(!mac_addr || is_broadcast_ether_addr(mac_addr)) {
+ key_usage = GROUP_USAGE;
+ } else {
+ key_usage = PAIRWISE_USAGE;
+ }
+
+ if(params) {
+ if(params->key_len > WLAN_MAX_KEY_LEN ||
+ params->seq_len > IW_ENCODE_SEQ_MAX_SIZE)
+ return -EINVAL;
+
+ key->key_len = params->key_len;
+ A_MEMCPY(key->key, params->key, key->key_len);
+ key->seq_len = params->seq_len;
+ A_MEMCPY(key->seq, params->seq, key->seq_len);
+ key->cipher = params->cipher;
+ }
+
+ switch (key->cipher) {
+ case WLAN_CIPHER_SUITE_WEP40:
+ case WLAN_CIPHER_SUITE_WEP104:
+ key_type = WEP_CRYPT;
+ break;
+
+ case WLAN_CIPHER_SUITE_TKIP:
+ key_type = TKIP_CRYPT;
+ break;
+
+ case WLAN_CIPHER_SUITE_CCMP:
+ key_type = AES_CRYPT;
+ break;
+
+ default:
+ return -ENOTSUPP;
+ }
+
+ if (((WPA_PSK_AUTH == ar->arAuthMode) || (WPA2_PSK_AUTH == ar->arAuthMode)) &&
+ (GROUP_USAGE & key_usage))
+ {
+ A_UNTIMEOUT(&ar->disconnect_timer);
+ }
+
+ AR_DEBUG_PRINTF(ATH_DEBUG_INFO,
+ ("%s: index %d, key_len %d, key_type 0x%x,"\
+ " key_usage 0x%x, seq_len %d\n",
+ __func__, key_index, key->key_len, key_type,
+ key_usage, key->seq_len));
+
+ ar->arDefTxKeyIndex = key_index;
+ status = wmi_addKey_cmd(ar->arWmi, ar->arDefTxKeyIndex, key_type, key_usage,
+ key->key_len, key->seq, key->key, KEY_OP_INIT_VAL,
+ (A_UINT8*)mac_addr, SYNC_BOTH_WMIFLAG);
+
+
+ if(status != A_OK) {
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static int
+ar6k_cfg80211_del_key(struct wiphy *wiphy, struct net_device *ndev,
+ A_UINT8 key_index, bool pairwise, const A_UINT8 *mac_addr)
+{
+ AR_SOFTC_T *ar = (AR_SOFTC_T *)ar6k_priv(ndev);
+
+ AR_DEBUG_PRINTF(ATH_DEBUG_INFO, ("%s: index %d\n", __func__, key_index));
+
+ if(ar->arWmiReady == FALSE) {
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("%s: Wmi not ready\n", __func__));
+ return -EIO;
+ }
+
+ if(ar->arWlanState == WLAN_DISABLED) {
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("%s: Wlan disabled\n", __func__));
+ return -EIO;
+ }
+
+ if(key_index < WMI_MIN_KEY_INDEX || key_index > WMI_MAX_KEY_INDEX) {
+ AR_DEBUG_PRINTF(ATH_DEBUG_INFO,
+ ("%s: key index %d out of bounds\n", __func__, key_index));
+ return -ENOENT;
+ }
+
+ if(!ar->keys[key_index].key_len) {
+ AR_DEBUG_PRINTF(ATH_DEBUG_INFO, ("%s: index %d is empty\n", __func__, key_index));
+ return 0;
+ }
+
+ ar->keys[key_index].key_len = 0;
+
+ return wmi_deleteKey_cmd(ar->arWmi, key_index);
+}
+
+
+static int
+ar6k_cfg80211_get_key(struct wiphy *wiphy, struct net_device *ndev,
+ A_UINT8 key_index, bool pairwise, const A_UINT8 *mac_addr,
+ void *cookie,
+ void (*callback)(void *cookie, struct key_params*))
+{
+ AR_SOFTC_T *ar = (AR_SOFTC_T *)ar6k_priv(ndev);
+ struct ar_key *key = NULL;
+ struct key_params params;
+
+ AR_DEBUG_PRINTF(ATH_DEBUG_INFO, ("%s: index %d\n", __func__, key_index));
+
+ if(ar->arWmiReady == FALSE) {
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("%s: Wmi not ready\n", __func__));
+ return -EIO;
+ }
+
+ if(ar->arWlanState == WLAN_DISABLED) {
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("%s: Wlan disabled\n", __func__));
+ return -EIO;
+ }
+
+ if(key_index < WMI_MIN_KEY_INDEX || key_index > WMI_MAX_KEY_INDEX) {
+ AR_DEBUG_PRINTF(ATH_DEBUG_INFO,
+ ("%s: key index %d out of bounds\n", __func__, key_index));
+ return -ENOENT;
+ }
+
+ key = &ar->keys[key_index];
+ A_MEMZERO(&params, sizeof(params));
+ params.cipher = key->cipher;
+ params.key_len = key->key_len;
+ params.seq_len = key->seq_len;
+ params.seq = key->seq;
+ params.key = key->key;
+
+ callback(cookie, &params);
+
+ return key->key_len ? 0 : -ENOENT;
+}
+
+
+static int
+ar6k_cfg80211_set_default_key(struct wiphy *wiphy, struct net_device *ndev,
+ A_UINT8 key_index)
+{
+ AR_SOFTC_T *ar = (AR_SOFTC_T *)ar6k_priv(ndev);
+ struct ar_key *key = NULL;
+ A_STATUS status = A_OK;
+
+ AR_DEBUG_PRINTF(ATH_DEBUG_INFO, ("%s: index %d\n", __func__, key_index));
+
+ if(ar->arWmiReady == FALSE) {
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("%s: Wmi not ready\n", __func__));
+ return -EIO;
+ }
+
+ if(ar->arWlanState == WLAN_DISABLED) {
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("%s: Wlan disabled\n", __func__));
+ return -EIO;
+ }
+
+ if(key_index < WMI_MIN_KEY_INDEX || key_index > WMI_MAX_KEY_INDEX) {
+ AR_DEBUG_PRINTF(ATH_DEBUG_INFO,
+ ("%s: key index %d out of bounds\n",
+ __func__, key_index));
+ return -ENOENT;
+ }
+
+ if(!ar->keys[key_index].key_len) {
+ AR_DEBUG_PRINTF(ATH_DEBUG_INFO, ("%s: invalid key index %d\n",
+ __func__, key_index));
+ return -EINVAL;
+ }
+
+ ar->arDefTxKeyIndex = key_index;
+ key = &ar->keys[ar->arDefTxKeyIndex];
+ status = wmi_addKey_cmd(ar->arWmi, ar->arDefTxKeyIndex,
+ ar->arPairwiseCrypto, GROUP_USAGE | TX_USAGE,
+ key->key_len, key->seq, key->key, KEY_OP_INIT_VAL,
+ NULL, SYNC_BOTH_WMIFLAG);
+ if (status != A_OK) {
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static int
+ar6k_cfg80211_set_default_mgmt_key(struct wiphy *wiphy, struct net_device *ndev,
+ A_UINT8 key_index)
+{
+ AR_SOFTC_T *ar = (AR_SOFTC_T *)ar6k_priv(ndev);
+
+ AR_DEBUG_PRINTF(ATH_DEBUG_INFO, ("%s: index %d\n", __func__, key_index));
+
+ if(ar->arWmiReady == FALSE) {
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("%s: Wmi not ready\n", __func__));
+ return -EIO;
+ }
+
+ if(ar->arWlanState == WLAN_DISABLED) {
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("%s: Wlan disabled\n", __func__));
+ return -EIO;
+ }
+
+ AR_DEBUG_PRINTF(ATH_DEBUG_INFO, ("%s: not supported\n", __func__));
+ return -ENOTSUPP;
+}
+
+void
+ar6k_cfg80211_tkip_micerr_event(AR_SOFTC_T *ar, A_UINT8 keyid, A_BOOL ismcast)
+{
+ AR_DEBUG_PRINTF(ATH_DEBUG_INFO,
+ ("%s: keyid %d, ismcast %d\n", __func__, keyid, ismcast));
+
+ cfg80211_michael_mic_failure(ar->arNetDev, ar->arBssid,
+ (ismcast ? NL80211_KEYTYPE_GROUP : NL80211_KEYTYPE_PAIRWISE),
+ keyid, NULL, GFP_KERNEL);
+}
+
+static int
+ar6k_cfg80211_set_wiphy_params(struct wiphy *wiphy, A_UINT32 changed)
+{
+ AR_SOFTC_T *ar = (AR_SOFTC_T *)wiphy_priv(wiphy);
+
+ AR_DEBUG_PRINTF(ATH_DEBUG_INFO, ("%s: changed 0x%x\n", __func__, changed));
+
+ if(ar->arWmiReady == FALSE) {
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("%s: Wmi not ready\n", __func__));
+ return -EIO;
+ }
+
+ if(ar->arWlanState == WLAN_DISABLED) {
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("%s: Wlan disabled\n", __func__));
+ return -EIO;
+ }
+
+ if (changed & WIPHY_PARAM_RTS_THRESHOLD) {
+ if (wmi_set_rts_cmd(ar->arWmi,wiphy->rts_threshold) != A_OK){
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("%s: wmi_set_rts_cmd failed\n", __func__));
+ return -EIO;
+ }
+ }
+
+ return 0;
+}
+
+static int
+ar6k_cfg80211_set_bitrate_mask(struct wiphy *wiphy, struct net_device *dev,
+ const A_UINT8 *peer,
+ const struct cfg80211_bitrate_mask *mask)
+{
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("Setting rates: Not supported\n"));
+ return -EIO;
+}
+
+/* The type nl80211_tx_power_setting replaces the following data type from 2.6.36 onwards */
+static int
+ar6k_cfg80211_set_txpower(struct wiphy *wiphy, enum nl80211_tx_power_setting type, int dbm)
+{
+ AR_SOFTC_T *ar = (AR_SOFTC_T *)wiphy_priv(wiphy);
+ A_UINT8 ar_dbm;
+
+ AR_DEBUG_PRINTF(ATH_DEBUG_INFO, ("%s: type 0x%x, dbm %d\n", __func__, type, dbm));
+
+ if(ar->arWmiReady == FALSE) {
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("%s: Wmi not ready\n", __func__));
+ return -EIO;
+ }
+
+ if(ar->arWlanState == WLAN_DISABLED) {
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("%s: Wlan disabled\n", __func__));
+ return -EIO;
+ }
+
+ ar->arTxPwrSet = FALSE;
+ switch(type) {
+ case NL80211_TX_POWER_AUTOMATIC:
+ return 0;
+ case NL80211_TX_POWER_LIMITED:
+ ar->arTxPwr = ar_dbm = dbm;
+ ar->arTxPwrSet = TRUE;
+ break;
+ default:
+ AR_DEBUG_PRINTF(ATH_DEBUG_INFO, ("%s: type 0x%x not supported\n", __func__, type));
+ return -EOPNOTSUPP;
+ }
+
+ wmi_set_txPwr_cmd(ar->arWmi, ar_dbm);
+
+ return 0;
+}
+
+static int
+ar6k_cfg80211_get_txpower(struct wiphy *wiphy, int *dbm)
+{
+ AR_SOFTC_T *ar = (AR_SOFTC_T *)wiphy_priv(wiphy);
+
+ AR_DEBUG_PRINTF(ATH_DEBUG_INFO, ("%s: \n", __func__));
+
+ if(ar->arWmiReady == FALSE) {
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("%s: Wmi not ready\n", __func__));
+ return -EIO;
+ }
+
+ if(ar->arWlanState == WLAN_DISABLED) {
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("%s: Wlan disabled\n", __func__));
+ return -EIO;
+ }
+
+ if((ar->arConnected == TRUE)) {
+ ar->arTxPwr = 0;
+
+ if(wmi_get_txPwr_cmd(ar->arWmi) != A_OK) {
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("%s: wmi_get_txPwr_cmd failed\n", __func__));
+ return -EIO;
+ }
+
+ wait_event_interruptible_timeout(arEvent, ar->arTxPwr != 0, 5 * HZ);
+
+ if(signal_pending(current)) {
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("%s: Target did not respond\n", __func__));
+ return -EINTR;
+ }
+ }
+
+ *dbm = ar->arTxPwr;
+ return 0;
+}
+
+static int
+ar6k_cfg80211_set_power_mgmt(struct wiphy *wiphy,
+ struct net_device *dev,
+ bool pmgmt, int timeout)
+{
+ AR_SOFTC_T *ar = ar6k_priv(dev);
+ WMI_POWER_MODE_CMD pwrMode;
+
+ AR_DEBUG_PRINTF(ATH_DEBUG_INFO, ("%s: pmgmt %d, timeout %d\n", __func__, pmgmt, timeout));
+
+ if(ar->arWmiReady == FALSE) {
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("%s: Wmi not ready\n", __func__));
+ return -EIO;
+ }
+
+ if(ar->arWlanState == WLAN_DISABLED) {
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("%s: Wlan disabled\n", __func__));
+ return -EIO;
+ }
+
+ if(pmgmt) {
+ AR_DEBUG_PRINTF(ATH_DEBUG_INFO, ("%s: Max Perf\n", __func__));
+ pwrMode.powerMode = MAX_PERF_POWER;
+ } else {
+ AR_DEBUG_PRINTF(ATH_DEBUG_INFO, ("%s: Rec Power\n", __func__));
+ pwrMode.powerMode = REC_POWER;
+ }
+
+ if(wmi_powermode_cmd(ar->arWmi, pwrMode.powerMode) != A_OK) {
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("%s: wmi_powermode_cmd failed\n", __func__));
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static int
+ar6k_cfg80211_add_virtual_intf(struct wiphy *wiphy, char *name,
+ enum nl80211_iftype type, u32 *flags,
+ struct vif_params *params)
+{
+
+ AR_DEBUG_PRINTF(ATH_DEBUG_INFO, ("%s: not supported\n", __func__));
+
+ /* Multiple virtual interface is not supported.
+ * The default interface supports STA and IBSS type
+ */
+ return -EOPNOTSUPP;
+}
+
+static int
+ar6k_cfg80211_del_virtual_intf(struct wiphy *wiphy, struct net_device *dev)
+{
+
+ AR_DEBUG_PRINTF(ATH_DEBUG_INFO, ("%s: not supported\n", __func__));
+
+ /* Multiple virtual interface is not supported.
+ * The default interface supports STA and IBSS type
+ */
+ return -EOPNOTSUPP;
+}
+
+static int
+ar6k_cfg80211_change_iface(struct wiphy *wiphy, struct net_device *ndev,
+ enum nl80211_iftype type, u32 *flags,
+ struct vif_params *params)
+{
+ AR_SOFTC_T *ar = ar6k_priv(ndev);
+ struct wireless_dev *wdev = ar->wdev;
+
+ AR_DEBUG_PRINTF(ATH_DEBUG_INFO, ("%s: type %u\n", __func__, type));
+
+ if(ar->arWmiReady == FALSE) {
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("%s: Wmi not ready\n", __func__));
+ return -EIO;
+ }
+
+ if(ar->arWlanState == WLAN_DISABLED) {
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("%s: Wlan disabled\n", __func__));
+ return -EIO;
+ }
+
+ switch (type) {
+ case NL80211_IFTYPE_STATION:
+ ar->arNextMode = INFRA_NETWORK;
+ break;
+ case NL80211_IFTYPE_ADHOC:
+ ar->arNextMode = ADHOC_NETWORK;
+ break;
+ default:
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("%s: type %u\n", __func__, type));
+ return -EOPNOTSUPP;
+ }
+
+ wdev->iftype = type;
+
+ return 0;
+}
+
+static int
+ar6k_cfg80211_join_ibss(struct wiphy *wiphy, struct net_device *dev,
+ struct cfg80211_ibss_params *ibss_param)
+{
+ AR_SOFTC_T *ar = ar6k_priv(dev);
+ A_STATUS status;
+
+ AR_DEBUG_PRINTF(ATH_DEBUG_INFO, ("%s: \n", __func__));
+
+ if(ar->arWmiReady == FALSE) {
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("%s: Wmi not ready\n", __func__));
+ return -EIO;
+ }
+
+ if(ar->arWlanState == WLAN_DISABLED) {
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("%s: Wlan disabled\n", __func__));
+ return -EIO;
+ }
+
+ if(!ibss_param->ssid_len || IEEE80211_MAX_SSID_LEN < ibss_param->ssid_len) {
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("%s: ssid invalid\n", __func__));
+ return -EINVAL;
+ }
+
+ ar->arSsidLen = ibss_param->ssid_len;
+ A_MEMCPY(ar->arSsid, ibss_param->ssid, ar->arSsidLen);
+
+ if(ibss_param->channel) {
+ ar->arChannelHint = ibss_param->channel->center_freq;
+ }
+
+ if(ibss_param->channel_fixed) {
+ /* TODO: channel_fixed: The channel should be fixed, do not search for
+ * IBSSs to join on other channels. Target firmware does not support this
+ * feature, needs to be updated.*/
+ }
+
+ A_MEMZERO(ar->arReqBssid, sizeof(ar->arReqBssid));
+ if(ibss_param->bssid) {
+ if(A_MEMCMP(&ibss_param->bssid, bcast_mac, AR6000_ETH_ADDR_LEN)) {
+ A_MEMCPY(ar->arReqBssid, ibss_param->bssid, sizeof(ar->arReqBssid));
+ }
+ }
+
+ ar6k_set_wpa_version(ar, 0);
+ ar6k_set_auth_type(ar, NL80211_AUTHTYPE_OPEN_SYSTEM);
+
+ if(ibss_param->privacy) {
+ ar6k_set_cipher(ar, WLAN_CIPHER_SUITE_WEP40, true);
+ ar6k_set_cipher(ar, WLAN_CIPHER_SUITE_WEP40, false);
+ } else {
+ ar6k_set_cipher(ar, IW_AUTH_CIPHER_NONE, true);
+ ar6k_set_cipher(ar, IW_AUTH_CIPHER_NONE, false);
+ }
+
+ ar->arNetworkType = ar->arNextMode;
+
+ AR_DEBUG_PRINTF(ATH_DEBUG_INFO, ("%s: Connect called with authmode %d dot11 auth %d"\
+ " PW crypto %d PW crypto Len %d GRP crypto %d"\
+ " GRP crypto Len %d channel hint %u\n",
+ __func__, ar->arAuthMode, ar->arDot11AuthMode,
+ ar->arPairwiseCrypto, ar->arPairwiseCryptoLen,
+ ar->arGroupCrypto, ar->arGroupCryptoLen, ar->arChannelHint));
+
+ status = wmi_connect_cmd(ar->arWmi, ar->arNetworkType,
+ ar->arDot11AuthMode, ar->arAuthMode,
+ ar->arPairwiseCrypto, ar->arPairwiseCryptoLen,
+ ar->arGroupCrypto,ar->arGroupCryptoLen,
+ ar->arSsidLen, ar->arSsid,
+ ar->arReqBssid, ar->arChannelHint,
+ ar->arConnectCtrlFlags);
+
+ return 0;
+}
+
+static int
+ar6k_cfg80211_leave_ibss(struct wiphy *wiphy, struct net_device *dev)
+{
+ AR_SOFTC_T *ar = (AR_SOFTC_T *)ar6k_priv(dev);
+
+ AR_DEBUG_PRINTF(ATH_DEBUG_INFO, ("%s: \n", __func__));
+
+ if(ar->arWmiReady == FALSE) {
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("%s: Wmi not ready\n", __func__));
+ return -EIO;
+ }
+
+ if(ar->arWlanState == WLAN_DISABLED) {
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("%s: Wlan disabled\n", __func__));
+ return -EIO;
+ }
+
+ wmi_disconnect_cmd(ar->arWmi);
+ A_MEMZERO(ar->arSsid, sizeof(ar->arSsid));
+ ar->arSsidLen = 0;
+
+ return 0;
+}
+
+
+static const
+A_UINT32 cipher_suites[] = {
+ WLAN_CIPHER_SUITE_WEP40,
+ WLAN_CIPHER_SUITE_WEP104,
+ WLAN_CIPHER_SUITE_TKIP,
+ WLAN_CIPHER_SUITE_CCMP,
+};
+
+static struct
+cfg80211_ops ar6k_cfg80211_ops = {
+ .change_virtual_intf = ar6k_cfg80211_change_iface,
+ .add_virtual_intf = ar6k_cfg80211_add_virtual_intf,
+ .del_virtual_intf = ar6k_cfg80211_del_virtual_intf,
+ .scan = ar6k_cfg80211_scan,
+ .connect = ar6k_cfg80211_connect,
+ .disconnect = ar6k_cfg80211_disconnect,
+ .add_key = ar6k_cfg80211_add_key,
+ .get_key = ar6k_cfg80211_get_key,
+ .del_key = ar6k_cfg80211_del_key,
+ .set_default_key = ar6k_cfg80211_set_default_key,
+ .set_default_mgmt_key = ar6k_cfg80211_set_default_mgmt_key,
+ .set_wiphy_params = ar6k_cfg80211_set_wiphy_params,
+ .set_bitrate_mask = ar6k_cfg80211_set_bitrate_mask,
+ .set_tx_power = ar6k_cfg80211_set_txpower,
+ .get_tx_power = ar6k_cfg80211_get_txpower,
+ .set_power_mgmt = ar6k_cfg80211_set_power_mgmt,
+ .join_ibss = ar6k_cfg80211_join_ibss,
+ .leave_ibss = ar6k_cfg80211_leave_ibss,
+};
+
+struct wireless_dev *
+ar6k_cfg80211_init(struct device *dev)
+{
+ int ret = 0;
+ struct wireless_dev *wdev;
+
+ AR_DEBUG_PRINTF(ATH_DEBUG_INFO, ("%s: \n", __func__));
+
+ wdev = kzalloc(sizeof(struct wireless_dev), GFP_KERNEL);
+ if(!wdev) {
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERR,
+ ("%s: Couldn't allocate wireless device\n", __func__));
+ return ERR_PTR(-ENOMEM);
+ }
+
+ /* create a new wiphy for use with cfg80211 */
+ wdev->wiphy = wiphy_new(&ar6k_cfg80211_ops, sizeof(AR_SOFTC_T));
+ if(!wdev->wiphy) {
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERR,
+ ("%s: Couldn't allocate wiphy device\n", __func__));
+ kfree(wdev);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ /* set device pointer for wiphy */
+ set_wiphy_dev(wdev->wiphy, dev);
+
+ wdev->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
+ BIT(NL80211_IFTYPE_ADHOC);
+ /* max num of ssids that can be probed during scanning */
+ wdev->wiphy->max_scan_ssids = MAX_PROBED_SSID_INDEX;
+ wdev->wiphy->bands[IEEE80211_BAND_2GHZ] = &ar6k_band_2ghz;
+ wdev->wiphy->bands[IEEE80211_BAND_5GHZ] = &ar6k_band_5ghz;
+ wdev->wiphy->signal_type = CFG80211_SIGNAL_TYPE_MBM;
+
+ wdev->wiphy->cipher_suites = cipher_suites;
+ wdev->wiphy->n_cipher_suites = ARRAY_SIZE(cipher_suites);
+
+ ret = wiphy_register(wdev->wiphy);
+ if(ret < 0) {
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERR,
+ ("%s: Couldn't register wiphy device\n", __func__));
+ wiphy_free(wdev->wiphy);
+ return ERR_PTR(ret);
+ }
+
+ return wdev;
+}
+
+void
+ar6k_cfg80211_deinit(AR_SOFTC_T *ar)
+{
+ struct wireless_dev *wdev = ar->wdev;
+
+ AR_DEBUG_PRINTF(ATH_DEBUG_INFO, ("%s: \n", __func__));
+
+ if(ar->scan_request) {
+ cfg80211_scan_done(ar->scan_request, true);
+ ar->scan_request = NULL;
+ }
+
+ if(!wdev)
+ return;
+
+ wiphy_unregister(wdev->wiphy);
+ wiphy_free(wdev->wiphy);
+ kfree(wdev);
+}
+
+
+
+
+
+
+
diff --git a/drivers/staging/ath6kl/os/linux/eeprom.c b/drivers/staging/ath6kl/os/linux/eeprom.c
new file mode 100644
index 000000000000..be77fb87ebf5
--- /dev/null
+++ b/drivers/staging/ath6kl/os/linux/eeprom.c
@@ -0,0 +1,574 @@
+//------------------------------------------------------------------------------
+// Copyright (c) 2004-2010 Atheros Communications Inc.
+// All rights reserved.
+//
+//
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+//
+//
+//
+// Author(s): ="Atheros"
+//------------------------------------------------------------------------------
+
+
+#include "ar6000_drv.h"
+#include "htc.h"
+#include <linux/fs.h>
+
+#include "AR6002/hw2.0/hw/gpio_reg.h"
+#include "AR6002/hw2.0/hw/si_reg.h"
+
+//
+// defines
+//
+
+#define MAX_FILENAME 1023
+#define EEPROM_WAIT_LIMIT 16
+
+#define HOST_INTEREST_ITEM_ADDRESS(item) \
+ (AR6002_HOST_INTEREST_ITEM_ADDRESS(item))
+
+#define EEPROM_SZ 768
+
+/* soft mac */
+#define ATH_MAC_LEN 6
+#define ATH_SOFT_MAC_TMP_BUF_LEN 64
+unsigned char mac_addr[ATH_MAC_LEN];
+unsigned char soft_mac_tmp_buf[ATH_SOFT_MAC_TMP_BUF_LEN];
+char *p_mac = NULL;
+/* soft mac */
+
+//
+// static variables
+//
+
+static A_UCHAR eeprom_data[EEPROM_SZ];
+static A_UINT32 sys_sleep_reg;
+static HIF_DEVICE *p_bmi_device;
+
+//
+// Functions
+//
+
+/* soft mac */
+static int
+wmic_ether_aton(const char *orig, A_UINT8 *eth)
+{
+ const char *bufp;
+ int i;
+
+ i = 0;
+ for(bufp = orig; *bufp != '\0'; ++bufp) {
+ unsigned int val;
+ int h, l;
+
+ h = hex_to_bin(*bufp++);
+
+ if (h < 0) {
+ printk("%s: MAC value is invalid\n", __FUNCTION__);
+ break;
+ }
+
+ l = hex_to_bin(*bufp++);
+ if (l < 0) {
+ printk("%s: MAC value is invalid\n", __FUNCTION__);
+ break;
+ }
+
+ val = (h << 4) | l;
+
+ eth[i] = (unsigned char) (val & 0377);
+ if(++i == ATH_MAC_LEN) {
+ /* That's it. Any trailing junk? */
+ if (*bufp != '\0') {
+ return 0;
+ }
+ return 1;
+ }
+ if (*bufp != ':')
+ break;
+ }
+ return 0;
+}
+
+static void
+update_mac(unsigned char* eeprom, int size, unsigned char* macaddr)
+{
+ int i;
+ A_UINT16* ptr = (A_UINT16*)(eeprom+4);
+ A_UINT16 checksum = 0;
+
+ memcpy(eeprom+10,macaddr,6);
+
+ *ptr = 0;
+ ptr = (A_UINT16*)eeprom;
+
+ for (i=0; i<size; i+=2) {
+ checksum ^= *ptr++;
+ }
+ checksum = ~checksum;
+
+ ptr = (A_UINT16*)(eeprom+4);
+ *ptr = checksum;
+ return;
+}
+/* soft mac */
+
+/* Read a Target register and return its value. */
+inline void
+BMI_read_reg(A_UINT32 address, A_UINT32 *pvalue)
+{
+ BMIReadSOCRegister(p_bmi_device, address, pvalue);
+}
+
+/* Write a value to a Target register. */
+inline void
+BMI_write_reg(A_UINT32 address, A_UINT32 value)
+{
+ BMIWriteSOCRegister(p_bmi_device, address, value);
+}
+
+/* Read Target memory word and return its value. */
+inline void
+BMI_read_mem(A_UINT32 address, A_UINT32 *pvalue)
+{
+ BMIReadMemory(p_bmi_device, address, (A_UCHAR*)(pvalue), 4);
+}
+
+/* Write a word to a Target memory. */
+inline void
+BMI_write_mem(A_UINT32 address, A_UINT8 *p_data, A_UINT32 sz)
+{
+ BMIWriteMemory(p_bmi_device, address, (A_UCHAR*)(p_data), sz);
+}
+
+/*
+ * Enable and configure the Target's Serial Interface
+ * so we can access the EEPROM.
+ */
+static void
+enable_SI(HIF_DEVICE *p_device)
+{
+ A_UINT32 regval;
+
+ printk("%s\n", __FUNCTION__);
+
+ p_bmi_device = p_device;
+
+ BMI_read_reg(RTC_BASE_ADDRESS+SYSTEM_SLEEP_OFFSET, &sys_sleep_reg);
+ BMI_write_reg(RTC_BASE_ADDRESS+SYSTEM_SLEEP_OFFSET, SYSTEM_SLEEP_DISABLE_SET(1)); //disable system sleep temporarily
+
+ BMI_read_reg(RTC_BASE_ADDRESS+CLOCK_CONTROL_OFFSET, &regval);
+ regval &= ~CLOCK_CONTROL_SI0_CLK_MASK;
+ BMI_write_reg(RTC_BASE_ADDRESS+CLOCK_CONTROL_OFFSET, regval);
+
+ BMI_read_reg(RTC_BASE_ADDRESS+RESET_CONTROL_OFFSET, &regval);
+ regval &= ~RESET_CONTROL_SI0_RST_MASK;
+ BMI_write_reg(RTC_BASE_ADDRESS+RESET_CONTROL_OFFSET, regval);
+
+
+ BMI_read_reg(GPIO_BASE_ADDRESS+GPIO_PIN0_OFFSET, &regval);
+ regval &= ~GPIO_PIN0_CONFIG_MASK;
+ BMI_write_reg(GPIO_BASE_ADDRESS+GPIO_PIN0_OFFSET, regval);
+
+ BMI_read_reg(GPIO_BASE_ADDRESS+GPIO_PIN1_OFFSET, &regval);
+ regval &= ~GPIO_PIN1_CONFIG_MASK;
+ BMI_write_reg(GPIO_BASE_ADDRESS+GPIO_PIN1_OFFSET, regval);
+
+ /* SI_CONFIG = 0x500a6; */
+ regval = SI_CONFIG_BIDIR_OD_DATA_SET(1) |
+ SI_CONFIG_I2C_SET(1) |
+ SI_CONFIG_POS_SAMPLE_SET(1) |
+ SI_CONFIG_INACTIVE_CLK_SET(1) |
+ SI_CONFIG_INACTIVE_DATA_SET(1) |
+ SI_CONFIG_DIVIDER_SET(6);
+ BMI_write_reg(SI_BASE_ADDRESS+SI_CONFIG_OFFSET, regval);
+
+}
+
+static void
+disable_SI(void)
+{
+ A_UINT32 regval;
+
+ printk("%s\n", __FUNCTION__);
+
+ BMI_write_reg(RTC_BASE_ADDRESS+RESET_CONTROL_OFFSET, RESET_CONTROL_SI0_RST_MASK);
+ BMI_read_reg(RTC_BASE_ADDRESS+CLOCK_CONTROL_OFFSET, &regval);
+ regval |= CLOCK_CONTROL_SI0_CLK_MASK;
+ BMI_write_reg(RTC_BASE_ADDRESS+CLOCK_CONTROL_OFFSET, regval);//Gate SI0 clock
+ BMI_write_reg(RTC_BASE_ADDRESS+SYSTEM_SLEEP_OFFSET, sys_sleep_reg); //restore system sleep setting
+}
+
+/*
+ * Tell the Target to start an 8-byte read from EEPROM,
+ * putting the results in Target RX_DATA registers.
+ */
+static void
+request_8byte_read(int offset)
+{
+ A_UINT32 regval;
+
+// printk("%s: request_8byte_read from offset 0x%x\n", __FUNCTION__, offset);
+
+
+ /* SI_TX_DATA0 = read from offset */
+ regval =(0xa1<<16)|
+ ((offset & 0xff)<<8) |
+ (0xa0 | ((offset & 0xff00)>>7));
+
+ BMI_write_reg(SI_BASE_ADDRESS+SI_TX_DATA0_OFFSET, regval);
+
+ regval = SI_CS_START_SET(1) |
+ SI_CS_RX_CNT_SET(8) |
+ SI_CS_TX_CNT_SET(3);
+ BMI_write_reg(SI_BASE_ADDRESS+SI_CS_OFFSET, regval);
+}
+
+/*
+ * Tell the Target to start a 4-byte write to EEPROM,
+ * writing values from Target TX_DATA registers.
+ */
+static void
+request_4byte_write(int offset, A_UINT32 data)
+{
+ A_UINT32 regval;
+
+ printk("%s: request_4byte_write (0x%x) to offset 0x%x\n", __FUNCTION__, data, offset);
+
+ /* SI_TX_DATA0 = write data to offset */
+ regval = ((data & 0xffff) <<16) |
+ ((offset & 0xff)<<8) |
+ (0xa0 | ((offset & 0xff00)>>7));
+ BMI_write_reg(SI_BASE_ADDRESS+SI_TX_DATA0_OFFSET, regval);
+
+ regval = data >> 16;
+ BMI_write_reg(SI_BASE_ADDRESS+SI_TX_DATA1_OFFSET, regval);
+
+ regval = SI_CS_START_SET(1) |
+ SI_CS_RX_CNT_SET(0) |
+ SI_CS_TX_CNT_SET(6);
+ BMI_write_reg(SI_BASE_ADDRESS+SI_CS_OFFSET, regval);
+}
+
+/*
+ * Check whether or not an EEPROM request that was started
+ * earlier has completed yet.
+ */
+static A_BOOL
+request_in_progress(void)
+{
+ A_UINT32 regval;
+
+ /* Wait for DONE_INT in SI_CS */
+ BMI_read_reg(SI_BASE_ADDRESS+SI_CS_OFFSET, &regval);
+
+// printk("%s: request in progress SI_CS=0x%x\n", __FUNCTION__, regval);
+ if (regval & SI_CS_DONE_ERR_MASK) {
+ printk("%s: EEPROM signaled ERROR (0x%x)\n", __FUNCTION__, regval);
+ }
+
+ return (!(regval & SI_CS_DONE_INT_MASK));
+}
+
+/*
+ * try to detect the type of EEPROM,16bit address or 8bit address
+ */
+
+static void eeprom_type_detect(void)
+{
+ A_UINT32 regval;
+ A_UINT8 i = 0;
+
+ request_8byte_read(0x100);
+ /* Wait for DONE_INT in SI_CS */
+ do{
+ BMI_read_reg(SI_BASE_ADDRESS+SI_CS_OFFSET, &regval);
+ if (regval & SI_CS_DONE_ERR_MASK) {
+ printk("%s: ERROR : address type was wrongly set\n", __FUNCTION__);
+ break;
+ }
+ if (i++ == EEPROM_WAIT_LIMIT) {
+ printk("%s: EEPROM not responding\n", __FUNCTION__);
+ }
+ } while(!(regval & SI_CS_DONE_INT_MASK));
+}
+
+/*
+ * Extract the results of a completed EEPROM Read request
+ * and return them to the caller.
+ */
+inline void
+read_8byte_results(A_UINT32 *data)
+{
+ /* Read SI_RX_DATA0 and SI_RX_DATA1 */
+ BMI_read_reg(SI_BASE_ADDRESS+SI_RX_DATA0_OFFSET, &data[0]);
+ BMI_read_reg(SI_BASE_ADDRESS+SI_RX_DATA1_OFFSET, &data[1]);
+}
+
+
+/*
+ * Wait for a previously started command to complete.
+ * Timeout if the command is takes "too long".
+ */
+static void
+wait_for_eeprom_completion(void)
+{
+ int i=0;
+
+ while (request_in_progress()) {
+ if (i++ == EEPROM_WAIT_LIMIT) {
+ printk("%s: EEPROM not responding\n", __FUNCTION__);
+ }
+ }
+}
+
+/*
+ * High-level function which starts an 8-byte read,
+ * waits for it to complete, and returns the result.
+ */
+static void
+fetch_8bytes(int offset, A_UINT32 *data)
+{
+ request_8byte_read(offset);
+ wait_for_eeprom_completion();
+ read_8byte_results(data);
+
+ /* Clear any pending intr */
+ BMI_write_reg(SI_BASE_ADDRESS+SI_CS_OFFSET, SI_CS_DONE_INT_MASK);
+}
+
+/*
+ * High-level function which starts a 4-byte write,
+ * and waits for it to complete.
+ */
+inline void
+commit_4bytes(int offset, A_UINT32 data)
+{
+ request_4byte_write(offset, data);
+ wait_for_eeprom_completion();
+}
+/* ATHENV */
+#ifdef ANDROID_ENV
+void eeprom_ar6000_transfer(HIF_DEVICE *device, char *fake_file, char *p_mac)
+{
+ A_UINT32 first_word;
+ A_UINT32 board_data_addr;
+ int i;
+
+ printk("%s: Enter\n", __FUNCTION__);
+
+ enable_SI(device);
+ eeprom_type_detect();
+
+ if (fake_file) {
+ /*
+ * Transfer from file to Target RAM.
+ * Fetch source data from file.
+ */
+ mm_segment_t oldfs;
+ struct file *filp;
+ struct inode *inode = NULL;
+ int length;
+
+ /* open file */
+ oldfs = get_fs();
+ set_fs(KERNEL_DS);
+ filp = filp_open(fake_file, O_RDONLY, S_IRUSR);
+
+ if (IS_ERR(filp)) {
+ printk("%s: file %s filp_open error\n", __FUNCTION__, fake_file);
+ set_fs(oldfs);
+ return;
+ }
+
+ if (!filp->f_op) {
+ printk("%s: File Operation Method Error\n", __FUNCTION__);
+ filp_close(filp, NULL);
+ set_fs(oldfs);
+ return;
+ }
+
+ inode = GET_INODE_FROM_FILEP(filep);
+ if (!inode) {
+ printk("%s: Get inode from filp failed\n", __FUNCTION__);
+ filp_close(filp, NULL);
+ set_fs(oldfs);
+ return;
+ }
+
+ printk("%s file offset opsition: %xh\n", __FUNCTION__, (unsigned)filp->f_pos);
+
+ /* file's size */
+ length = i_size_read(inode->i_mapping->host);
+ printk("%s: length=%d\n", __FUNCTION__, length);
+ if (length != EEPROM_SZ) {
+ printk("%s: The file's size is not as expected\n", __FUNCTION__);
+ filp_close(filp, NULL);
+ set_fs(oldfs);
+ return;
+ }
+
+ /* read data */
+ if (filp->f_op->read(filp, eeprom_data, length, &filp->f_pos) != length) {
+ printk("%s: file read error\n", __FUNCTION__);
+ filp_close(filp, NULL);
+ set_fs(oldfs);
+ return;
+ }
+
+ /* read data out successfully */
+ filp_close(filp, NULL);
+ set_fs(oldfs);
+ } else {
+ /*
+ * Read from EEPROM to file OR transfer from EEPROM to Target RAM.
+ * Fetch EEPROM_SZ Bytes of Board Data, 8 bytes at a time.
+ */
+
+ fetch_8bytes(0, (A_UINT32 *)(&eeprom_data[0]));
+
+ /* Check the first word of EEPROM for validity */
+ first_word = *((A_UINT32 *)eeprom_data);
+
+ if ((first_word == 0) || (first_word == 0xffffffff)) {
+ printk("Did not find EEPROM with valid Board Data.\n");
+ }
+
+ for (i=8; i<EEPROM_SZ; i+=8) {
+ fetch_8bytes(i, (A_UINT32 *)(&eeprom_data[i]));
+ }
+ }
+
+ /* soft mac */
+ if (p_mac) {
+
+ mm_segment_t oldfs;
+ struct file *filp;
+ struct inode *inode = NULL;
+ int length;
+
+ /* open file */
+ oldfs = get_fs();
+ set_fs(KERNEL_DS);
+ filp = filp_open(p_mac, O_RDONLY, S_IRUSR);
+
+ printk("%s try to open file %s\n", __FUNCTION__, p_mac);
+
+ if (IS_ERR(filp)) {
+ printk("%s: file %s filp_open error\n", __FUNCTION__, p_mac);
+ set_fs(oldfs);
+ return;
+ }
+
+ if (!filp->f_op) {
+ printk("%s: File Operation Method Error\n", __FUNCTION__);
+ filp_close(filp, NULL);
+ set_fs(oldfs);
+ return;
+ }
+
+ inode = GET_INODE_FROM_FILEP(filep);
+ if (!inode) {
+ printk("%s: Get inode from filp failed\n", __FUNCTION__);
+ filp_close(filp, NULL);
+ set_fs(oldfs);
+ return;
+ }
+
+ printk("%s file offset opsition: %xh\n", __FUNCTION__, (unsigned)filp->f_pos);
+
+ /* file's size */
+ length = i_size_read(inode->i_mapping->host);
+ printk("%s: length=%d\n", __FUNCTION__, length);
+ if (length > ATH_SOFT_MAC_TMP_BUF_LEN) {
+ printk("%s: MAC file's size is not as expected\n", __FUNCTION__);
+ filp_close(filp, NULL);
+ set_fs(oldfs);
+ return;
+ }
+
+ /* read data */
+ if (filp->f_op->read(filp, soft_mac_tmp_buf, length, &filp->f_pos) != length) {
+ printk("%s: file read error\n", __FUNCTION__);
+ filp_close(filp, NULL);
+ set_fs(oldfs);
+ return;
+ }
+
+#if 0
+ /* the data we just read */
+ printk("%s: mac address from the file:\n", __FUNCTION__);
+ for (i = 0; i < length; i++)
+ printk("[%c(0x%x)],", soft_mac_tmp_buf[i], soft_mac_tmp_buf[i]);
+ printk("\n");
+#endif
+
+ /* read data out successfully */
+ filp_close(filp, NULL);
+ set_fs(oldfs);
+
+ /* convert mac address */
+ if (!wmic_ether_aton(soft_mac_tmp_buf, mac_addr)) {
+ printk("%s: convert mac value fail\n", __FUNCTION__);
+ return;
+ }
+
+#if 0
+ /* the converted mac address */
+ printk("%s: the converted mac value\n", __FUNCTION__);
+ for (i = 0; i < ATH_MAC_LEN; i++)
+ printk("[0x%x],", mac_addr[i]);
+ printk("\n");
+#endif
+ }
+ /* soft mac */
+
+ /* Determine where in Target RAM to write Board Data */
+ BMI_read_mem( HOST_INTEREST_ITEM_ADDRESS(hi_board_data), &board_data_addr);
+ if (board_data_addr == 0) {
+ printk("hi_board_data is zero\n");
+ }
+
+ /* soft mac */
+#if 1
+ /* Update MAC address in RAM */
+ if (p_mac) {
+ update_mac(eeprom_data, EEPROM_SZ, mac_addr);
+ }
+#endif
+#if 0
+ /* mac address in eeprom array */
+ printk("%s: mac values in eeprom array\n", __FUNCTION__);
+ for (i = 10; i < 10 + 6; i++)
+ printk("[0x%x],", eeprom_data[i]);
+ printk("\n");
+#endif
+ /* soft mac */
+
+ /* Write EEPROM data to Target RAM */
+ BMI_write_mem(board_data_addr, ((A_UINT8 *)eeprom_data), EEPROM_SZ);
+
+ /* Record the fact that Board Data IS initialized */
+ {
+ A_UINT32 one = 1;
+ BMI_write_mem(HOST_INTEREST_ITEM_ADDRESS(hi_board_data_initialized),
+ (A_UINT8 *)&one, sizeof(A_UINT32));
+ }
+
+ disable_SI();
+}
+#endif
+/* ATHENV */
+
diff --git a/drivers/staging/ath6kl/os/linux/export_hci_transport.c b/drivers/staging/ath6kl/os/linux/export_hci_transport.c
new file mode 100644
index 000000000000..ffbf3d229a5e
--- /dev/null
+++ b/drivers/staging/ath6kl/os/linux/export_hci_transport.c
@@ -0,0 +1,125 @@
+//------------------------------------------------------------------------------
+// Copyright (c) 2009-2010 Atheros Corporation. All rights reserved.
+//
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+//
+//
+//------------------------------------------------------------------------------
+//==============================================================================
+// HCI bridge implementation
+//
+// Author(s): ="Atheros"
+//==============================================================================
+#include <a_config.h>
+#include <athdefs.h>
+#include "a_types.h"
+#include "a_osapi.h"
+#include "htc_api.h"
+#include "a_drv.h"
+#include "hif.h"
+#include "common_drv.h"
+#include "a_debug.h"
+#include "hci_transport_api.h"
+
+#include "AR6002/hw4.0/hw/apb_athr_wlan_map.h"
+#include "AR6002/hw4.0/hw/uart_reg.h"
+#include "AR6002/hw4.0/hw/rtc_wlan_reg.h"
+
+HCI_TRANSPORT_HANDLE (*_HCI_TransportAttach)(void *HTCHandle, HCI_TRANSPORT_CONFIG_INFO *pInfo);
+void (*_HCI_TransportDetach)(HCI_TRANSPORT_HANDLE HciTrans);
+A_STATUS (*_HCI_TransportAddReceivePkts)(HCI_TRANSPORT_HANDLE HciTrans, HTC_PACKET_QUEUE *pQueue);
+A_STATUS (*_HCI_TransportSendPkt)(HCI_TRANSPORT_HANDLE HciTrans, HTC_PACKET *pPacket, A_BOOL Synchronous);
+void (*_HCI_TransportStop)(HCI_TRANSPORT_HANDLE HciTrans);
+A_STATUS (*_HCI_TransportStart)(HCI_TRANSPORT_HANDLE HciTrans);
+A_STATUS (*_HCI_TransportEnableDisableAsyncRecv)(HCI_TRANSPORT_HANDLE HciTrans, A_BOOL Enable);
+A_STATUS (*_HCI_TransportRecvHCIEventSync)(HCI_TRANSPORT_HANDLE HciTrans,
+ HTC_PACKET *pPacket,
+ int MaxPollMS);
+A_STATUS (*_HCI_TransportSetBaudRate)(HCI_TRANSPORT_HANDLE HciTrans, A_UINT32 Baud);
+A_STATUS (*_HCI_TransportEnablePowerMgmt)(HCI_TRANSPORT_HANDLE HciTrans, A_BOOL Enable);
+
+extern HCI_TRANSPORT_CALLBACKS ar6kHciTransCallbacks;
+
+A_STATUS ar6000_register_hci_transport(HCI_TRANSPORT_CALLBACKS *hciTransCallbacks)
+{
+ ar6kHciTransCallbacks = *hciTransCallbacks;
+
+ _HCI_TransportAttach = HCI_TransportAttach;
+ _HCI_TransportDetach = HCI_TransportDetach;
+ _HCI_TransportAddReceivePkts = HCI_TransportAddReceivePkts;
+ _HCI_TransportSendPkt = HCI_TransportSendPkt;
+ _HCI_TransportStop = HCI_TransportStop;
+ _HCI_TransportStart = HCI_TransportStart;
+ _HCI_TransportEnableDisableAsyncRecv = HCI_TransportEnableDisableAsyncRecv;
+ _HCI_TransportRecvHCIEventSync = HCI_TransportRecvHCIEventSync;
+ _HCI_TransportSetBaudRate = HCI_TransportSetBaudRate;
+ _HCI_TransportEnablePowerMgmt = HCI_TransportEnablePowerMgmt;
+
+ return A_OK;
+}
+
+A_STATUS
+ar6000_get_hif_dev(HIF_DEVICE *device, void *config)
+{
+ A_STATUS status;
+
+ status = HIFConfigureDevice(device,
+ HIF_DEVICE_GET_OS_DEVICE,
+ (HIF_DEVICE_OS_DEVICE_INFO *)config,
+ sizeof(HIF_DEVICE_OS_DEVICE_INFO));
+ return status;
+}
+
+A_STATUS ar6000_set_uart_config(HIF_DEVICE *hifDevice,
+ A_UINT32 scale,
+ A_UINT32 step)
+{
+ A_UINT32 regAddress;
+ A_UINT32 regVal;
+ A_STATUS status;
+
+ regAddress = WLAN_UART_BASE_ADDRESS | UART_CLKDIV_ADDRESS;
+ regVal = ((A_UINT32)scale << 16) | step;
+ /* change the HCI UART scale/step values through the diagnostic window */
+ status = ar6000_WriteRegDiag(hifDevice, &regAddress, &regVal);
+
+ return status;
+}
+
+A_STATUS ar6000_get_core_clock_config(HIF_DEVICE *hifDevice, A_UINT32 *data)
+{
+ A_UINT32 regAddress;
+ A_STATUS status;
+
+ regAddress = WLAN_RTC_BASE_ADDRESS | WLAN_CPU_CLOCK_ADDRESS;
+ /* read CPU clock settings*/
+ status = ar6000_ReadRegDiag(hifDevice, &regAddress, data);
+
+ return status;
+}
+
+EXPORT_SYMBOL(ar6000_register_hci_transport);
+EXPORT_SYMBOL(ar6000_get_hif_dev);
+EXPORT_SYMBOL(ar6000_set_uart_config);
+EXPORT_SYMBOL(ar6000_get_core_clock_config);
+EXPORT_SYMBOL(_HCI_TransportAttach);
+EXPORT_SYMBOL(_HCI_TransportDetach);
+EXPORT_SYMBOL(_HCI_TransportAddReceivePkts);
+EXPORT_SYMBOL(_HCI_TransportSendPkt);
+EXPORT_SYMBOL(_HCI_TransportStop);
+EXPORT_SYMBOL(_HCI_TransportStart);
+EXPORT_SYMBOL(_HCI_TransportEnableDisableAsyncRecv);
+EXPORT_SYMBOL(_HCI_TransportRecvHCIEventSync);
+EXPORT_SYMBOL(_HCI_TransportSetBaudRate);
+EXPORT_SYMBOL(_HCI_TransportEnablePowerMgmt);
diff --git a/drivers/staging/ath6kl/os/linux/hci_bridge.c b/drivers/staging/ath6kl/os/linux/hci_bridge.c
new file mode 100644
index 000000000000..5cdc3b85a6f6
--- /dev/null
+++ b/drivers/staging/ath6kl/os/linux/hci_bridge.c
@@ -0,0 +1,1144 @@
+//------------------------------------------------------------------------------
+// Copyright (c) 2009-2010 Atheros Corporation. All rights reserved.
+//
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+//
+//
+//------------------------------------------------------------------------------
+//==============================================================================
+// HCI bridge implementation
+//
+// Author(s): ="Atheros"
+//==============================================================================
+
+#ifdef EXPORT_HCI_BRIDGE_INTERFACE
+#include <linux/etherdevice.h>
+#include <a_config.h>
+#include <athdefs.h>
+#include "a_types.h"
+#include "a_osapi.h"
+#include "htc_api.h"
+#include "wmi.h"
+#include "a_drv.h"
+#include "hif.h"
+#include "common_drv.h"
+#include "a_debug.h"
+#define ATH_DEBUG_HCI_BRIDGE ATH_DEBUG_MAKE_MODULE_MASK(6)
+#define ATH_DEBUG_HCI_RECV ATH_DEBUG_MAKE_MODULE_MASK(7)
+#define ATH_DEBUG_HCI_SEND ATH_DEBUG_MAKE_MODULE_MASK(8)
+#define ATH_DEBUG_HCI_DUMP ATH_DEBUG_MAKE_MODULE_MASK(9)
+#else
+#include "ar6000_drv.h"
+#endif /* EXPORT_HCI_BRIDGE_INTERFACE */
+
+#ifdef ATH_AR6K_ENABLE_GMBOX
+#ifdef EXPORT_HCI_BRIDGE_INTERFACE
+#include "export_hci_transport.h"
+#else
+#include "hci_transport_api.h"
+#endif
+#include "epping_test.h"
+#include "gmboxif.h"
+#include "ar3kconfig.h"
+#include <net/bluetooth/bluetooth.h>
+#include <net/bluetooth/hci_core.h>
+
+ /* only build on newer kernels which have BT configured */
+#if defined(CONFIG_BT_MODULE) || defined(CONFIG_BT)
+#define CONFIG_BLUEZ_HCI_BRIDGE
+#endif
+
+#ifdef EXPORT_HCI_BRIDGE_INTERFACE
+unsigned int ar3khcibaud = 0;
+unsigned int hciuartscale = 0;
+unsigned int hciuartstep = 0;
+
+module_param(ar3khcibaud, int, 0644);
+module_param(hciuartscale, int, 0644);
+module_param(hciuartstep, int, 0644);
+#else
+extern unsigned int ar3khcibaud;
+extern unsigned int hciuartscale;
+extern unsigned int hciuartstep;
+#endif /* EXPORT_HCI_BRIDGE_INTERFACE */
+
+typedef struct {
+ void *pHCIDev; /* HCI bridge device */
+ HCI_TRANSPORT_PROPERTIES HCIProps; /* HCI bridge props */
+ struct hci_dev *pBtStackHCIDev; /* BT Stack HCI dev */
+ A_BOOL HciNormalMode; /* Actual HCI mode enabled (non-TEST)*/
+ A_BOOL HciRegistered; /* HCI device registered with stack */
+ HTC_PACKET_QUEUE HTCPacketStructHead;
+ A_UINT8 *pHTCStructAlloc;
+ spinlock_t BridgeLock;
+#ifdef EXPORT_HCI_BRIDGE_INTERFACE
+ HCI_TRANSPORT_MISC_HANDLES HCITransHdl;
+#else
+ AR_SOFTC_T *ar;
+#endif /* EXPORT_HCI_BRIDGE_INTERFACE */
+} AR6K_HCI_BRIDGE_INFO;
+
+#define MAX_ACL_RECV_BUFS 16
+#define MAX_EVT_RECV_BUFS 8
+#define MAX_HCI_WRITE_QUEUE_DEPTH 32
+#define MAX_ACL_RECV_LENGTH 1200
+#define MAX_EVT_RECV_LENGTH 257
+#define TX_PACKET_RSV_OFFSET 32
+#define NUM_HTC_PACKET_STRUCTS ((MAX_ACL_RECV_BUFS + MAX_EVT_RECV_BUFS + MAX_HCI_WRITE_QUEUE_DEPTH) * 2)
+
+#define HCI_GET_OP_CODE(p) (((A_UINT16)((p)[1])) << 8) | ((A_UINT16)((p)[0]))
+
+extern unsigned int setupbtdev;
+AR3K_CONFIG_INFO ar3kconfig;
+
+#ifdef EXPORT_HCI_BRIDGE_INTERFACE
+AR6K_HCI_BRIDGE_INFO *g_pHcidevInfo;
+#endif
+
+static A_STATUS bt_setup_hci(AR6K_HCI_BRIDGE_INFO *pHcidevInfo);
+static void bt_cleanup_hci(AR6K_HCI_BRIDGE_INFO *pHcidevInfo);
+static A_STATUS bt_register_hci(AR6K_HCI_BRIDGE_INFO *pHcidevInfo);
+static A_BOOL bt_indicate_recv(AR6K_HCI_BRIDGE_INFO *pHcidevInfo,
+ HCI_TRANSPORT_PACKET_TYPE Type,
+ struct sk_buff *skb);
+static struct sk_buff *bt_alloc_buffer(AR6K_HCI_BRIDGE_INFO *pHcidevInfo, int Length);
+static void bt_free_buffer(AR6K_HCI_BRIDGE_INFO *pHcidevInfo, struct sk_buff *skb);
+
+#ifdef EXPORT_HCI_BRIDGE_INTERFACE
+A_STATUS ar6000_setup_hci(void *ar);
+void ar6000_cleanup_hci(void *ar);
+A_STATUS hci_test_send(void *ar, struct sk_buff *skb);
+#else
+A_STATUS ar6000_setup_hci(AR_SOFTC_T *ar);
+void ar6000_cleanup_hci(AR_SOFTC_T *ar);
+/* HCI bridge testing */
+A_STATUS hci_test_send(AR_SOFTC_T *ar, struct sk_buff *skb);
+#endif /* EXPORT_HCI_BRIDGE_INTERFACE */
+
+#define LOCK_BRIDGE(dev) spin_lock_bh(&(dev)->BridgeLock)
+#define UNLOCK_BRIDGE(dev) spin_unlock_bh(&(dev)->BridgeLock)
+
+static inline void FreeBtOsBuf(AR6K_HCI_BRIDGE_INFO *pHcidevInfo, void *osbuf)
+{
+ if (pHcidevInfo->HciNormalMode) {
+ bt_free_buffer(pHcidevInfo, (struct sk_buff *)osbuf);
+ } else {
+ /* in test mode, these are just ordinary netbuf allocations */
+ A_NETBUF_FREE(osbuf);
+ }
+}
+
+static void FreeHTCStruct(AR6K_HCI_BRIDGE_INFO *pHcidevInfo, HTC_PACKET *pPacket)
+{
+ LOCK_BRIDGE(pHcidevInfo);
+ HTC_PACKET_ENQUEUE(&pHcidevInfo->HTCPacketStructHead,pPacket);
+ UNLOCK_BRIDGE(pHcidevInfo);
+}
+
+static HTC_PACKET * AllocHTCStruct(AR6K_HCI_BRIDGE_INFO *pHcidevInfo)
+{
+ HTC_PACKET *pPacket = NULL;
+ LOCK_BRIDGE(pHcidevInfo);
+ pPacket = HTC_PACKET_DEQUEUE(&pHcidevInfo->HTCPacketStructHead);
+ UNLOCK_BRIDGE(pHcidevInfo);
+ return pPacket;
+}
+
+#define BLOCK_ROUND_UP_PWR2(x, align) (((int) (x) + ((align)-1)) & ~((align)-1))
+
+static void RefillRecvBuffers(AR6K_HCI_BRIDGE_INFO *pHcidevInfo,
+ HCI_TRANSPORT_PACKET_TYPE Type,
+ int NumBuffers)
+{
+ int length, i;
+ void *osBuf = NULL;
+ HTC_PACKET_QUEUE queue;
+ HTC_PACKET *pPacket;
+
+ INIT_HTC_PACKET_QUEUE(&queue);
+
+ if (Type == HCI_ACL_TYPE) {
+ if (pHcidevInfo->HciNormalMode) {
+ length = HCI_MAX_FRAME_SIZE;
+ } else {
+ length = MAX_ACL_RECV_LENGTH;
+ }
+ } else {
+ length = MAX_EVT_RECV_LENGTH;
+ }
+
+ /* add on transport head and tail room */
+ length += pHcidevInfo->HCIProps.HeadRoom + pHcidevInfo->HCIProps.TailRoom;
+ /* round up to the required I/O padding */
+ length = BLOCK_ROUND_UP_PWR2(length,pHcidevInfo->HCIProps.IOBlockPad);
+
+ for (i = 0; i < NumBuffers; i++) {
+
+ if (pHcidevInfo->HciNormalMode) {
+ osBuf = bt_alloc_buffer(pHcidevInfo,length);
+ } else {
+ osBuf = A_NETBUF_ALLOC(length);
+ }
+
+ if (NULL == osBuf) {
+ break;
+ }
+
+ pPacket = AllocHTCStruct(pHcidevInfo);
+ if (NULL == pPacket) {
+ FreeBtOsBuf(pHcidevInfo,osBuf);
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("Failed to alloc HTC struct \n"));
+ break;
+ }
+
+ SET_HTC_PACKET_INFO_RX_REFILL(pPacket,osBuf,A_NETBUF_DATA(osBuf),length,Type);
+ /* add to queue */
+ HTC_PACKET_ENQUEUE(&queue,pPacket);
+ }
+
+ if (i > 0) {
+ HCI_TransportAddReceivePkts(pHcidevInfo->pHCIDev, &queue);
+ }
+}
+
+#define HOST_INTEREST_ITEM_ADDRESS(ar, item) \
+ (((ar)->arTargetType == TARGET_TYPE_AR6002) ? AR6002_HOST_INTEREST_ITEM_ADDRESS(item) : \
+ (((ar)->arTargetType == TARGET_TYPE_AR6003) ? AR6003_HOST_INTEREST_ITEM_ADDRESS(item) : 0))
+static A_STATUS ar6000_hci_transport_ready(HCI_TRANSPORT_HANDLE HCIHandle,
+ HCI_TRANSPORT_PROPERTIES *pProps,
+ void *pContext)
+{
+ AR6K_HCI_BRIDGE_INFO *pHcidevInfo = (AR6K_HCI_BRIDGE_INFO *)pContext;
+ A_STATUS status;
+ A_UINT32 address, hci_uart_pwr_mgmt_params;
+// AR3K_CONFIG_INFO ar3kconfig;
+
+ pHcidevInfo->pHCIDev = HCIHandle;
+
+ A_MEMCPY(&pHcidevInfo->HCIProps,pProps,sizeof(*pProps));
+
+ AR_DEBUG_PRINTF(ATH_DEBUG_HCI_BRIDGE,("HCI ready (hci:0x%lX, headroom:%d, tailroom:%d blockpad:%d) \n",
+ (unsigned long)HCIHandle,
+ pHcidevInfo->HCIProps.HeadRoom,
+ pHcidevInfo->HCIProps.TailRoom,
+ pHcidevInfo->HCIProps.IOBlockPad));
+
+#ifdef EXPORT_HCI_BRIDGE_INTERFACE
+ A_ASSERT((pProps->HeadRoom + pProps->TailRoom) <= (struct net_device *)(pHcidevInfo->HCITransHdl.netDevice)->hard_header_len);
+#else
+ A_ASSERT((pProps->HeadRoom + pProps->TailRoom) <= pHcidevInfo->ar->arNetDev->hard_header_len);
+#endif
+
+ /* provide buffers */
+ RefillRecvBuffers(pHcidevInfo, HCI_ACL_TYPE, MAX_ACL_RECV_BUFS);
+ RefillRecvBuffers(pHcidevInfo, HCI_EVENT_TYPE, MAX_EVT_RECV_BUFS);
+
+ do {
+ /* start transport */
+ status = HCI_TransportStart(pHcidevInfo->pHCIDev);
+
+ if (A_FAILED(status)) {
+ break;
+ }
+
+ if (!pHcidevInfo->HciNormalMode) {
+ /* in test mode, no need to go any further */
+ break;
+ }
+
+ // The delay is required when AR6K is driving the BT reset line
+ // where time is needed after the BT chip is out of reset (HCI_TransportStart)
+ // and before the first HCI command is issued (AR3KConfigure)
+ // FIXME
+ // The delay should be configurable and be only applied when AR6K driving the BT
+ // reset line. This could be done by some module parameter or based on some HW config
+ // info. For now apply 100ms delay blindly
+ A_MDELAY(100);
+
+ A_MEMZERO(&ar3kconfig,sizeof(ar3kconfig));
+ ar3kconfig.pHCIDev = pHcidevInfo->pHCIDev;
+ ar3kconfig.pHCIProps = &pHcidevInfo->HCIProps;
+#ifdef EXPORT_HCI_BRIDGE_INTERFACE
+ ar3kconfig.pHIFDevice = (HIF_DEVICE *)(pHcidevInfo->HCITransHdl.hifDevice);
+#else
+ ar3kconfig.pHIFDevice = pHcidevInfo->ar->arHifDevice;
+#endif
+ ar3kconfig.pBtStackHCIDev = pHcidevInfo->pBtStackHCIDev;
+
+ if (ar3khcibaud != 0) {
+ /* user wants ar3k baud rate change */
+ ar3kconfig.Flags |= AR3K_CONFIG_FLAG_SET_AR3K_BAUD;
+ ar3kconfig.Flags |= AR3K_CONFIG_FLAG_AR3K_BAUD_CHANGE_DELAY;
+ ar3kconfig.AR3KBaudRate = ar3khcibaud;
+ }
+
+ if ((hciuartscale != 0) || (hciuartstep != 0)) {
+ /* user wants to tune HCI bridge UART scale/step values */
+ ar3kconfig.AR6KScale = (A_UINT16)hciuartscale;
+ ar3kconfig.AR6KStep = (A_UINT16)hciuartstep;
+ ar3kconfig.Flags |= AR3K_CONFIG_FLAG_SET_AR6K_SCALE_STEP;
+ }
+
+ /* Fetch the address of the hi_hci_uart_pwr_mgmt_params instance in the host interest area */
+ address = TARG_VTOP(pHcidevInfo->ar->arTargetType,
+ HOST_INTEREST_ITEM_ADDRESS(pHcidevInfo->ar, hi_hci_uart_pwr_mgmt_params));
+ status = ar6000_ReadRegDiag(pHcidevInfo->ar->arHifDevice, &address, &hci_uart_pwr_mgmt_params);
+ if (A_OK == status) {
+ ar3kconfig.PwrMgmtEnabled = (hci_uart_pwr_mgmt_params & 0x1);
+ ar3kconfig.IdleTimeout = (hci_uart_pwr_mgmt_params & 0xFFFF0000) >> 16;
+ ar3kconfig.WakeupTimeout = (hci_uart_pwr_mgmt_params & 0xFF00) >> 8;
+ } else {
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("HCI Bridge: failed to read hci_uart_pwr_mgmt_params! \n"));
+ }
+ /* configure the AR3K device */
+ memcpy(ar3kconfig.bdaddr,pHcidevInfo->ar->bdaddr,6);
+ status = AR3KConfigure(&ar3kconfig);
+ if (A_FAILED(status)) {
+ break;
+ }
+
+ /* Make sure both AR6K and AR3K have power management enabled */
+ if (ar3kconfig.PwrMgmtEnabled) {
+ status = HCI_TransportEnablePowerMgmt(pHcidevInfo->pHCIDev, TRUE);
+ if (A_FAILED(status)) {
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("HCI Bridge: failed to enable TLPM for AR6K! \n"));
+ }
+ }
+
+ status = bt_register_hci(pHcidevInfo);
+
+ } while (FALSE);
+
+ return status;
+}
+
+static void ar6000_hci_transport_failure(void *pContext, A_STATUS Status)
+{
+ AR6K_HCI_BRIDGE_INFO *pHcidevInfo = (AR6K_HCI_BRIDGE_INFO *)pContext;
+
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("HCI Bridge: transport failure! \n"));
+
+ if (pHcidevInfo->HciNormalMode) {
+ /* TODO .. */
+ }
+}
+
+static void ar6000_hci_transport_removed(void *pContext)
+{
+ AR6K_HCI_BRIDGE_INFO *pHcidevInfo = (AR6K_HCI_BRIDGE_INFO *)pContext;
+
+ AR_DEBUG_PRINTF(ATH_DEBUG_HCI_BRIDGE, ("HCI Bridge: transport removed. \n"));
+
+ A_ASSERT(pHcidevInfo->pHCIDev != NULL);
+
+ HCI_TransportDetach(pHcidevInfo->pHCIDev);
+ bt_cleanup_hci(pHcidevInfo);
+ pHcidevInfo->pHCIDev = NULL;
+}
+
+static void ar6000_hci_send_complete(void *pContext, HTC_PACKET *pPacket)
+{
+ AR6K_HCI_BRIDGE_INFO *pHcidevInfo = (AR6K_HCI_BRIDGE_INFO *)pContext;
+ void *osbuf = pPacket->pPktContext;
+ A_ASSERT(osbuf != NULL);
+ A_ASSERT(pHcidevInfo != NULL);
+
+ if (A_FAILED(pPacket->Status)) {
+ if ((pPacket->Status != A_ECANCELED) && (pPacket->Status != A_NO_RESOURCE)) {
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("HCI Bridge: Send Packet Failed: %d \n",pPacket->Status));
+ }
+ }
+
+ FreeHTCStruct(pHcidevInfo,pPacket);
+ FreeBtOsBuf(pHcidevInfo,osbuf);
+
+}
+
+static void ar6000_hci_pkt_recv(void *pContext, HTC_PACKET *pPacket)
+{
+ AR6K_HCI_BRIDGE_INFO *pHcidevInfo = (AR6K_HCI_BRIDGE_INFO *)pContext;
+ struct sk_buff *skb;
+
+ A_ASSERT(pHcidevInfo != NULL);
+ skb = (struct sk_buff *)pPacket->pPktContext;
+ A_ASSERT(skb != NULL);
+
+ do {
+
+ if (A_FAILED(pPacket->Status)) {
+ break;
+ }
+
+ AR_DEBUG_PRINTF(ATH_DEBUG_HCI_RECV,
+ ("HCI Bridge, packet received type : %d len:%d \n",
+ HCI_GET_PACKET_TYPE(pPacket),pPacket->ActualLength));
+
+ /* set the actual buffer position in the os buffer, HTC recv buffers posted to HCI are set
+ * to fill the front of the buffer */
+ A_NETBUF_PUT(skb,pPacket->ActualLength + pHcidevInfo->HCIProps.HeadRoom);
+ A_NETBUF_PULL(skb,pHcidevInfo->HCIProps.HeadRoom);
+
+ if (AR_DEBUG_LVL_CHECK(ATH_DEBUG_HCI_DUMP)) {
+ AR_DEBUG_PRINTF(ATH_DEBUG_ANY,("<<< Recv HCI %s packet len:%d \n",
+ (HCI_GET_PACKET_TYPE(pPacket) == HCI_EVENT_TYPE) ? "EVENT" : "ACL",
+ skb->len));
+ AR_DEBUG_PRINTBUF(skb->data, skb->len,"BT HCI RECV Packet Dump");
+ }
+
+ if (pHcidevInfo->HciNormalMode) {
+ /* indicate the packet */
+ if (bt_indicate_recv(pHcidevInfo,HCI_GET_PACKET_TYPE(pPacket),skb)) {
+ /* bt stack accepted the packet */
+ skb = NULL;
+ }
+ break;
+ }
+
+ /* for testing, indicate packet to the network stack */
+#ifdef EXPORT_HCI_BRIDGE_INTERFACE
+ skb->dev = (struct net_device *)(pHcidevInfo->HCITransHdl.netDevice);
+ if ((((struct net_device *)pHcidevInfo->HCITransHdl.netDevice)->flags & IFF_UP) == IFF_UP) {
+ skb->protocol = eth_type_trans(skb, (struct net_device *)(pHcidevInfo->HCITransHdl.netDevice));
+#else
+ skb->dev = pHcidevInfo->ar->arNetDev;
+ if ((pHcidevInfo->ar->arNetDev->flags & IFF_UP) == IFF_UP) {
+ skb->protocol = eth_type_trans(skb, pHcidevInfo->ar->arNetDev);
+#endif
+ netif_rx(skb);
+ skb = NULL;
+ }
+
+ } while (FALSE);
+
+ FreeHTCStruct(pHcidevInfo,pPacket);
+
+ if (skb != NULL) {
+ /* packet was not accepted, free it */
+ FreeBtOsBuf(pHcidevInfo,skb);
+ }
+
+}
+
+static void ar6000_hci_pkt_refill(void *pContext, HCI_TRANSPORT_PACKET_TYPE Type, int BuffersAvailable)
+{
+ AR6K_HCI_BRIDGE_INFO *pHcidevInfo = (AR6K_HCI_BRIDGE_INFO *)pContext;
+ int refillCount;
+
+ if (Type == HCI_ACL_TYPE) {
+ refillCount = MAX_ACL_RECV_BUFS - BuffersAvailable;
+ } else {
+ refillCount = MAX_EVT_RECV_BUFS - BuffersAvailable;
+ }
+
+ if (refillCount > 0) {
+ RefillRecvBuffers(pHcidevInfo,Type,refillCount);
+ }
+
+}
+
+static HCI_SEND_FULL_ACTION ar6000_hci_pkt_send_full(void *pContext, HTC_PACKET *pPacket)
+{
+ AR6K_HCI_BRIDGE_INFO *pHcidevInfo = (AR6K_HCI_BRIDGE_INFO *)pContext;
+ HCI_SEND_FULL_ACTION action = HCI_SEND_FULL_KEEP;
+
+ if (!pHcidevInfo->HciNormalMode) {
+ /* for epping testing, check packet tag, some epping packets are
+ * special and cannot be dropped */
+ if (HTC_GET_TAG_FROM_PKT(pPacket) == AR6K_DATA_PKT_TAG) {
+ action = HCI_SEND_FULL_DROP;
+ }
+ }
+
+ return action;
+}
+
+#ifdef EXPORT_HCI_BRIDGE_INTERFACE
+A_STATUS ar6000_setup_hci(void *ar)
+#else
+A_STATUS ar6000_setup_hci(AR_SOFTC_T *ar)
+#endif
+{
+ HCI_TRANSPORT_CONFIG_INFO config;
+ A_STATUS status = A_OK;
+ int i;
+ HTC_PACKET *pPacket;
+ AR6K_HCI_BRIDGE_INFO *pHcidevInfo;
+
+
+ do {
+
+ pHcidevInfo = (AR6K_HCI_BRIDGE_INFO *)A_MALLOC(sizeof(AR6K_HCI_BRIDGE_INFO));
+
+ if (NULL == pHcidevInfo) {
+ status = A_NO_MEMORY;
+ break;
+ }
+
+ A_MEMZERO(pHcidevInfo, sizeof(AR6K_HCI_BRIDGE_INFO));
+#ifdef EXPORT_HCI_BRIDGE_INTERFACE
+ g_pHcidevInfo = pHcidevInfo;
+ pHcidevInfo->HCITransHdl = *(HCI_TRANSPORT_MISC_HANDLES *)ar;
+#else
+ ar->hcidev_info = pHcidevInfo;
+ pHcidevInfo->ar = ar;
+#endif
+ spin_lock_init(&pHcidevInfo->BridgeLock);
+ INIT_HTC_PACKET_QUEUE(&pHcidevInfo->HTCPacketStructHead);
+
+ ar->exitCallback = AR3KConfigureExit;
+
+ status = bt_setup_hci(pHcidevInfo);
+ if (A_FAILED(status)) {
+ break;
+ }
+
+ if (pHcidevInfo->HciNormalMode) {
+ AR_DEBUG_PRINTF(ATH_DEBUG_HCI_BRIDGE, ("HCI Bridge: running in normal mode... \n"));
+ } else {
+ AR_DEBUG_PRINTF(ATH_DEBUG_HCI_BRIDGE, ("HCI Bridge: running in test mode... \n"));
+ }
+
+ pHcidevInfo->pHTCStructAlloc = (A_UINT8 *)A_MALLOC((sizeof(HTC_PACKET)) * NUM_HTC_PACKET_STRUCTS);
+
+ if (NULL == pHcidevInfo->pHTCStructAlloc) {
+ status = A_NO_MEMORY;
+ break;
+ }
+
+ pPacket = (HTC_PACKET *)pHcidevInfo->pHTCStructAlloc;
+ for (i = 0; i < NUM_HTC_PACKET_STRUCTS; i++,pPacket++) {
+ FreeHTCStruct(pHcidevInfo,pPacket);
+ }
+
+ A_MEMZERO(&config,sizeof(HCI_TRANSPORT_CONFIG_INFO));
+ config.ACLRecvBufferWaterMark = MAX_ACL_RECV_BUFS / 2;
+ config.EventRecvBufferWaterMark = MAX_EVT_RECV_BUFS / 2;
+ config.MaxSendQueueDepth = MAX_HCI_WRITE_QUEUE_DEPTH;
+ config.pContext = pHcidevInfo;
+ config.TransportFailure = ar6000_hci_transport_failure;
+ config.TransportReady = ar6000_hci_transport_ready;
+ config.TransportRemoved = ar6000_hci_transport_removed;
+ config.pHCISendComplete = ar6000_hci_send_complete;
+ config.pHCIPktRecv = ar6000_hci_pkt_recv;
+ config.pHCIPktRecvRefill = ar6000_hci_pkt_refill;
+ config.pHCISendFull = ar6000_hci_pkt_send_full;
+
+#ifdef EXPORT_HCI_BRIDGE_INTERFACE
+ pHcidevInfo->pHCIDev = HCI_TransportAttach(pHcidevInfo->HCITransHdl.htcHandle, &config);
+#else
+ pHcidevInfo->pHCIDev = HCI_TransportAttach(ar->arHtcTarget, &config);
+#endif
+
+ if (NULL == pHcidevInfo->pHCIDev) {
+ status = A_ERROR;
+ }
+
+ } while (FALSE);
+
+ if (A_FAILED(status)) {
+ if (pHcidevInfo != NULL) {
+ if (NULL == pHcidevInfo->pHCIDev) {
+ /* GMBOX may not be present in older chips */
+ /* just return success */
+ status = A_OK;
+ }
+ }
+ ar6000_cleanup_hci(ar);
+ }
+
+ return status;
+}
+
+#ifdef EXPORT_HCI_BRIDGE_INTERFACE
+void ar6000_cleanup_hci(void *ar)
+#else
+void ar6000_cleanup_hci(AR_SOFTC_T *ar)
+#endif
+{
+#ifdef EXPORT_HCI_BRIDGE_INTERFACE
+ AR6K_HCI_BRIDGE_INFO *pHcidevInfo = g_pHcidevInfo;
+#else
+ AR6K_HCI_BRIDGE_INFO *pHcidevInfo = (AR6K_HCI_BRIDGE_INFO *)ar->hcidev_info;
+#endif
+
+ if (pHcidevInfo != NULL) {
+ bt_cleanup_hci(pHcidevInfo);
+
+ if (pHcidevInfo->pHCIDev != NULL) {
+ HCI_TransportStop(pHcidevInfo->pHCIDev);
+ HCI_TransportDetach(pHcidevInfo->pHCIDev);
+ pHcidevInfo->pHCIDev = NULL;
+ }
+
+ if (pHcidevInfo->pHTCStructAlloc != NULL) {
+ A_FREE(pHcidevInfo->pHTCStructAlloc);
+ pHcidevInfo->pHTCStructAlloc = NULL;
+ }
+
+ A_FREE(pHcidevInfo);
+#ifndef EXPORT_HCI_BRIDGE_INTERFACE
+ ar->hcidev_info = NULL;
+#endif
+ }
+
+
+}
+
+#ifdef EXPORT_HCI_BRIDGE_INTERFACE
+A_STATUS hci_test_send(void *ar, struct sk_buff *skb)
+#else
+A_STATUS hci_test_send(AR_SOFTC_T *ar, struct sk_buff *skb)
+#endif
+{
+ int status = A_OK;
+ int length;
+ EPPING_HEADER *pHeader;
+ HTC_PACKET *pPacket;
+ HTC_TX_TAG htc_tag = AR6K_DATA_PKT_TAG;
+#ifdef EXPORT_HCI_BRIDGE_INTERFACE
+ AR6K_HCI_BRIDGE_INFO *pHcidevInfo = g_pHcidevInfo;
+#else
+ AR6K_HCI_BRIDGE_INFO *pHcidevInfo = (AR6K_HCI_BRIDGE_INFO *)ar->hcidev_info;
+#endif
+
+ do {
+
+ if (NULL == pHcidevInfo) {
+ status = A_ERROR;
+ break;
+ }
+
+ if (NULL == pHcidevInfo->pHCIDev) {
+ status = A_ERROR;
+ break;
+ }
+
+ if (pHcidevInfo->HciNormalMode) {
+ /* this interface cannot run when normal WMI is running */
+ status = A_ERROR;
+ break;
+ }
+
+ pHeader = (EPPING_HEADER *)A_NETBUF_DATA(skb);
+
+ if (!IS_EPPING_PACKET(pHeader)) {
+ status = A_EINVAL;
+ break;
+ }
+
+ if (IS_EPING_PACKET_NO_DROP(pHeader)) {
+ htc_tag = AR6K_CONTROL_PKT_TAG;
+ }
+
+ length = sizeof(EPPING_HEADER) + pHeader->DataLength;
+
+ pPacket = AllocHTCStruct(pHcidevInfo);
+ if (NULL == pPacket) {
+ status = A_NO_MEMORY;
+ break;
+ }
+
+ SET_HTC_PACKET_INFO_TX(pPacket,
+ skb,
+ A_NETBUF_DATA(skb),
+ length,
+ HCI_ACL_TYPE, /* send every thing out as ACL */
+ htc_tag);
+
+ HCI_TransportSendPkt(pHcidevInfo->pHCIDev,pPacket,FALSE);
+ pPacket = NULL;
+
+ } while (FALSE);
+
+ return status;
+}
+
+void ar6000_set_default_ar3kconfig(AR_SOFTC_T *ar, void *ar3kconfig)
+{
+ AR6K_HCI_BRIDGE_INFO *pHcidevInfo = (AR6K_HCI_BRIDGE_INFO *)ar->hcidev_info;
+ AR3K_CONFIG_INFO *config = (AR3K_CONFIG_INFO *)ar3kconfig;
+
+ config->pHCIDev = pHcidevInfo->pHCIDev;
+ config->pHCIProps = &pHcidevInfo->HCIProps;
+ config->pHIFDevice = ar->arHifDevice;
+ config->pBtStackHCIDev = pHcidevInfo->pBtStackHCIDev;
+ config->Flags |= AR3K_CONFIG_FLAG_SET_AR3K_BAUD;
+ config->AR3KBaudRate = 115200;
+}
+
+#ifdef CONFIG_BLUEZ_HCI_BRIDGE
+/*** BT Stack Entrypoints *******/
+
+/*
+ * bt_open - open a handle to the device
+*/
+static int bt_open(struct hci_dev *hdev)
+{
+
+ AR_DEBUG_PRINTF(ATH_DEBUG_TRC, ("HCI Bridge: bt_open - enter - x\n"));
+ set_bit(HCI_RUNNING, &hdev->flags);
+ set_bit(HCI_UP, &hdev->flags);
+ set_bit(HCI_INIT, &hdev->flags);
+ return 0;
+}
+
+/*
+ * bt_close - close handle to the device
+*/
+static int bt_close(struct hci_dev *hdev)
+{
+ AR_DEBUG_PRINTF(ATH_DEBUG_TRC, ("HCI Bridge: bt_close - enter\n"));
+ clear_bit(HCI_RUNNING, &hdev->flags);
+ return 0;
+}
+
+/*
+ * bt_send_frame - send data frames
+*/
+static int bt_send_frame(struct sk_buff *skb)
+{
+ struct hci_dev *hdev = (struct hci_dev *)skb->dev;
+ HCI_TRANSPORT_PACKET_TYPE type;
+ AR6K_HCI_BRIDGE_INFO *pHcidevInfo;
+ HTC_PACKET *pPacket;
+ A_STATUS status = A_OK;
+ struct sk_buff *txSkb = NULL;
+
+ if (!hdev) {
+ AR_DEBUG_PRINTF(ATH_DEBUG_WARN, ("HCI Bridge: bt_send_frame - no device\n"));
+ return -ENODEV;
+ }
+
+ if (!test_bit(HCI_RUNNING, &hdev->flags)) {
+ AR_DEBUG_PRINTF(ATH_DEBUG_TRC, ("HCI Bridge: bt_send_frame - not open\n"));
+ return -EBUSY;
+ }
+
+ pHcidevInfo = (AR6K_HCI_BRIDGE_INFO *)hdev->driver_data;
+ A_ASSERT(pHcidevInfo != NULL);
+
+ AR_DEBUG_PRINTF(ATH_DEBUG_HCI_SEND, ("+bt_send_frame type: %d \n",bt_cb(skb)->pkt_type));
+ type = HCI_COMMAND_TYPE;
+
+ switch (bt_cb(skb)->pkt_type) {
+ case HCI_COMMAND_PKT:
+ type = HCI_COMMAND_TYPE;
+ hdev->stat.cmd_tx++;
+ break;
+
+ case HCI_ACLDATA_PKT:
+ type = HCI_ACL_TYPE;
+ hdev->stat.acl_tx++;
+ break;
+
+ case HCI_SCODATA_PKT:
+ /* we don't support SCO over the bridge */
+ kfree_skb(skb);
+ return 0;
+ default:
+ A_ASSERT(FALSE);
+ kfree_skb(skb);
+ return 0;
+ }
+
+ if (AR_DEBUG_LVL_CHECK(ATH_DEBUG_HCI_DUMP)) {
+ AR_DEBUG_PRINTF(ATH_DEBUG_ANY,(">>> Send HCI %s packet len: %d\n",
+ (type == HCI_COMMAND_TYPE) ? "COMMAND" : "ACL",
+ skb->len));
+ if (type == HCI_COMMAND_TYPE) {
+ A_UINT16 opcode = HCI_GET_OP_CODE(skb->data);
+ AR_DEBUG_PRINTF(ATH_DEBUG_ANY,(" HCI Command: OGF:0x%X OCF:0x%X \r\n",
+ opcode >> 10, opcode & 0x3FF));
+ }
+ AR_DEBUG_PRINTBUF(skb->data,skb->len,"BT HCI SEND Packet Dump");
+ }
+
+ do {
+
+ txSkb = bt_skb_alloc(TX_PACKET_RSV_OFFSET + pHcidevInfo->HCIProps.HeadRoom +
+ pHcidevInfo->HCIProps.TailRoom + skb->len,
+ GFP_ATOMIC);
+
+ if (txSkb == NULL) {
+ status = A_NO_MEMORY;
+ break;
+ }
+
+ bt_cb(txSkb)->pkt_type = bt_cb(skb)->pkt_type;
+ txSkb->dev = (void *)pHcidevInfo->pBtStackHCIDev;
+ skb_reserve(txSkb, TX_PACKET_RSV_OFFSET + pHcidevInfo->HCIProps.HeadRoom);
+ A_MEMCPY(txSkb->data, skb->data, skb->len);
+ skb_put(txSkb,skb->len);
+
+ pPacket = AllocHTCStruct(pHcidevInfo);
+ if (NULL == pPacket) {
+ status = A_NO_MEMORY;
+ break;
+ }
+
+ /* HCI packet length here doesn't include the 1-byte transport header which
+ * will be handled by the HCI transport layer. Enough headroom has already
+ * been reserved above for the transport header
+ */
+ SET_HTC_PACKET_INFO_TX(pPacket,
+ txSkb,
+ txSkb->data,
+ txSkb->len,
+ type,
+ AR6K_CONTROL_PKT_TAG); /* HCI packets cannot be dropped */
+
+ AR_DEBUG_PRINTF(ATH_DEBUG_HCI_SEND, ("HCI Bridge: bt_send_frame skb:0x%lX \n",(unsigned long)txSkb));
+ AR_DEBUG_PRINTF(ATH_DEBUG_HCI_SEND, ("HCI Bridge: type:%d, Total Length:%d Bytes \n",
+ type, txSkb->len));
+
+ status = HCI_TransportSendPkt(pHcidevInfo->pHCIDev,pPacket,FALSE);
+ pPacket = NULL;
+ txSkb = NULL;
+
+ } while (FALSE);
+
+ if (txSkb != NULL) {
+ kfree_skb(txSkb);
+ }
+
+ kfree_skb(skb);
+
+ AR_DEBUG_PRINTF(ATH_DEBUG_HCI_SEND, ("-bt_send_frame \n"));
+ return 0;
+}
+
+/*
+ * bt_ioctl - ioctl processing
+*/
+static int bt_ioctl(struct hci_dev *hdev, unsigned int cmd, unsigned long arg)
+{
+ AR_DEBUG_PRINTF(ATH_DEBUG_TRC, ("HCI Bridge: bt_ioctl - enter\n"));
+ return -ENOIOCTLCMD;
+}
+
+/*
+ * bt_flush - flush outstandingbpackets
+*/
+static int bt_flush(struct hci_dev *hdev)
+{
+ AR6K_HCI_BRIDGE_INFO *pHcidevInfo;
+
+ AR_DEBUG_PRINTF(ATH_DEBUG_TRC, ("HCI Bridge: bt_flush - enter\n"));
+
+ pHcidevInfo = (AR6K_HCI_BRIDGE_INFO *)hdev->driver_data;
+
+ /* TODO??? */
+
+ return 0;
+}
+
+
+/*
+ * bt_destruct -
+*/
+static void bt_destruct(struct hci_dev *hdev)
+{
+ AR_DEBUG_PRINTF(ATH_DEBUG_TRC, ("HCI Bridge: bt_destruct - enter\n"));
+ /* nothing to do here */
+}
+
+static A_STATUS bt_setup_hci(AR6K_HCI_BRIDGE_INFO *pHcidevInfo)
+{
+ A_STATUS status = A_OK;
+ struct hci_dev *pHciDev = NULL;
+ HIF_DEVICE_OS_DEVICE_INFO osDevInfo;
+
+ if (!setupbtdev) {
+ return A_OK;
+ }
+
+ do {
+
+ A_MEMZERO(&osDevInfo,sizeof(osDevInfo));
+ /* get the underlying OS device */
+#ifdef EXPORT_HCI_BRIDGE_INTERFACE
+ status = ar6000_get_hif_dev((HIF_DEVICE *)(pHcidevInfo->HCITransHdl.hifDevice),
+ &osDevInfo);
+#else
+ status = HIFConfigureDevice(pHcidevInfo->ar->arHifDevice,
+ HIF_DEVICE_GET_OS_DEVICE,
+ &osDevInfo,
+ sizeof(osDevInfo));
+#endif
+
+ if (A_FAILED(status)) {
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("Failed to OS device info from HIF\n"));
+ break;
+ }
+
+ /* allocate a BT HCI struct for this device */
+ pHciDev = hci_alloc_dev();
+ if (NULL == pHciDev) {
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("HCI Bridge - failed to allocate bt struct \n"));
+ status = A_NO_MEMORY;
+ break;
+ }
+ /* save the device, we'll register this later */
+ pHcidevInfo->pBtStackHCIDev = pHciDev;
+ SET_HCIDEV_DEV(pHciDev,osDevInfo.pOSDevice);
+ SET_HCI_BUS_TYPE(pHciDev, HCI_VIRTUAL, HCI_BREDR);
+ pHciDev->driver_data = pHcidevInfo;
+ pHciDev->open = bt_open;
+ pHciDev->close = bt_close;
+ pHciDev->send = bt_send_frame;
+ pHciDev->ioctl = bt_ioctl;
+ pHciDev->flush = bt_flush;
+ pHciDev->destruct = bt_destruct;
+ pHciDev->owner = THIS_MODULE;
+ /* driver is running in normal BT mode */
+ pHcidevInfo->HciNormalMode = TRUE;
+
+ } while (FALSE);
+
+ if (A_FAILED(status)) {
+ bt_cleanup_hci(pHcidevInfo);
+ }
+
+ return status;
+}
+
+static void bt_cleanup_hci(AR6K_HCI_BRIDGE_INFO *pHcidevInfo)
+{
+ int err;
+
+ if (pHcidevInfo->HciRegistered) {
+ pHcidevInfo->HciRegistered = FALSE;
+ clear_bit(HCI_RUNNING, &pHcidevInfo->pBtStackHCIDev->flags);
+ clear_bit(HCI_UP, &pHcidevInfo->pBtStackHCIDev->flags);
+ clear_bit(HCI_INIT, &pHcidevInfo->pBtStackHCIDev->flags);
+ A_ASSERT(pHcidevInfo->pBtStackHCIDev != NULL);
+ /* unregister */
+ if ((err = hci_unregister_dev(pHcidevInfo->pBtStackHCIDev)) < 0) {
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("HCI Bridge: failed to unregister with bluetooth %d\n",err));
+ }
+ }
+
+ if (pHcidevInfo->pBtStackHCIDev != NULL) {
+ kfree(pHcidevInfo->pBtStackHCIDev);
+ pHcidevInfo->pBtStackHCIDev = NULL;
+ }
+}
+
+static A_STATUS bt_register_hci(AR6K_HCI_BRIDGE_INFO *pHcidevInfo)
+{
+ int err;
+ A_STATUS status = A_OK;
+
+ do {
+ AR_DEBUG_PRINTF(ATH_DEBUG_HCI_BRIDGE, ("HCI Bridge: registering HCI... \n"));
+ A_ASSERT(pHcidevInfo->pBtStackHCIDev != NULL);
+ /* mark that we are registered */
+ pHcidevInfo->HciRegistered = TRUE;
+ if ((err = hci_register_dev(pHcidevInfo->pBtStackHCIDev)) < 0) {
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("HCI Bridge: failed to register with bluetooth %d\n",err));
+ pHcidevInfo->HciRegistered = FALSE;
+ status = A_ERROR;
+ break;
+ }
+
+ AR_DEBUG_PRINTF(ATH_DEBUG_HCI_BRIDGE, ("HCI Bridge: HCI registered \n"));
+
+ } while (FALSE);
+
+ return status;
+}
+
+static A_BOOL bt_indicate_recv(AR6K_HCI_BRIDGE_INFO *pHcidevInfo,
+ HCI_TRANSPORT_PACKET_TYPE Type,
+ struct sk_buff *skb)
+{
+ A_UINT8 btType;
+ int len;
+ A_BOOL success = FALSE;
+ BT_HCI_EVENT_HEADER *pEvent;
+
+ do {
+
+ if (!test_bit(HCI_RUNNING, &pHcidevInfo->pBtStackHCIDev->flags)) {
+ AR_DEBUG_PRINTF(ATH_DEBUG_WARN, ("HCI Bridge: bt_indicate_recv - not running\n"));
+ break;
+ }
+
+ switch (Type) {
+ case HCI_ACL_TYPE:
+ btType = HCI_ACLDATA_PKT;
+ break;
+ case HCI_EVENT_TYPE:
+ btType = HCI_EVENT_PKT;
+ break;
+ default:
+ btType = 0;
+ A_ASSERT(FALSE);
+ break;
+ }
+
+ if (0 == btType) {
+ break;
+ }
+
+ /* set the final type */
+ bt_cb(skb)->pkt_type = btType;
+ /* set dev */
+ skb->dev = (void *)pHcidevInfo->pBtStackHCIDev;
+ len = skb->len;
+
+ if (AR_DEBUG_LVL_CHECK(ATH_DEBUG_HCI_RECV)) {
+ if (bt_cb(skb)->pkt_type == HCI_EVENT_PKT) {
+ pEvent = (BT_HCI_EVENT_HEADER *)skb->data;
+ AR_DEBUG_PRINTF(ATH_DEBUG_HCI_RECV, ("BT HCI EventCode: %d, len:%d \n",
+ pEvent->EventCode, pEvent->ParamLength));
+ }
+ }
+
+ /* pass receive packet up the stack */
+ if (hci_recv_frame(skb) != 0) {
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("HCI Bridge: hci_recv_frame failed \n"));
+ break;
+ } else {
+ AR_DEBUG_PRINTF(ATH_DEBUG_HCI_RECV,
+ ("HCI Bridge: Indicated RCV of type:%d, Length:%d \n",btType,len));
+ }
+
+ success = TRUE;
+
+ } while (FALSE);
+
+ return success;
+}
+
+static struct sk_buff* bt_alloc_buffer(AR6K_HCI_BRIDGE_INFO *pHcidevInfo, int Length)
+{
+ struct sk_buff *skb;
+ /* in normal HCI mode we need to alloc from the bt core APIs */
+ skb = bt_skb_alloc(Length, GFP_ATOMIC);
+ if (NULL == skb) {
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("Failed to alloc bt sk_buff \n"));
+ }
+ return skb;
+}
+
+static void bt_free_buffer(AR6K_HCI_BRIDGE_INFO *pHcidevInfo, struct sk_buff *skb)
+{
+ kfree_skb(skb);
+}
+
+#else // { CONFIG_BLUEZ_HCI_BRIDGE
+
+ /* stubs when we only want to test the HCI bridging Interface without the HT stack */
+static A_STATUS bt_setup_hci(AR6K_HCI_BRIDGE_INFO *pHcidevInfo)
+{
+ return A_OK;
+}
+static void bt_cleanup_hci(AR6K_HCI_BRIDGE_INFO *pHcidevInfo)
+{
+
+}
+static A_STATUS bt_register_hci(AR6K_HCI_BRIDGE_INFO *pHcidevInfo)
+{
+ A_ASSERT(FALSE);
+ return A_ERROR;
+}
+
+static A_BOOL bt_indicate_recv(AR6K_HCI_BRIDGE_INFO *pHcidevInfo,
+ HCI_TRANSPORT_PACKET_TYPE Type,
+ struct sk_buff *skb)
+{
+ A_ASSERT(FALSE);
+ return FALSE;
+}
+
+static struct sk_buff* bt_alloc_buffer(AR6K_HCI_BRIDGE_INFO *pHcidevInfo, int Length)
+{
+ A_ASSERT(FALSE);
+ return NULL;
+}
+static void bt_free_buffer(AR6K_HCI_BRIDGE_INFO *pHcidevInfo, struct sk_buff *skb)
+{
+ A_ASSERT(FALSE);
+}
+
+#endif // } CONFIG_BLUEZ_HCI_BRIDGE
+
+#else // { ATH_AR6K_ENABLE_GMBOX
+
+ /* stubs when GMBOX support is not needed */
+
+#ifdef EXPORT_HCI_BRIDGE_INTERFACE
+A_STATUS ar6000_setup_hci(void *ar)
+#else
+A_STATUS ar6000_setup_hci(AR_SOFTC_T *ar)
+#endif
+{
+ return A_OK;
+}
+
+#ifdef EXPORT_HCI_BRIDGE_INTERFACE
+void ar6000_cleanup_hci(void *ar)
+#else
+void ar6000_cleanup_hci(AR_SOFTC_T *ar)
+#endif
+{
+ return;
+}
+
+#ifndef EXPORT_HCI_BRIDGE_INTERFACE
+void ar6000_set_default_ar3kconfig(AR_SOFTC_T *ar, void *ar3kconfig)
+{
+ return;
+}
+#endif
+
+#ifdef EXPORT_HCI_BRIDGE_INTERFACE
+int hci_test_send(void *ar, struct sk_buff *skb)
+#else
+int hci_test_send(AR_SOFTC_T *ar, struct sk_buff *skb)
+#endif
+{
+ return -EOPNOTSUPP;
+}
+
+#endif // } ATH_AR6K_ENABLE_GMBOX
+
+
+#ifdef EXPORT_HCI_BRIDGE_INTERFACE
+static int __init
+hcibridge_init_module(void)
+{
+ A_STATUS status;
+ HCI_TRANSPORT_CALLBACKS hciTransCallbacks;
+
+ hciTransCallbacks.setupTransport = ar6000_setup_hci;
+ hciTransCallbacks.cleanupTransport = ar6000_cleanup_hci;
+
+ status = ar6000_register_hci_transport(&hciTransCallbacks);
+ if(status != A_OK)
+ return -ENODEV;
+
+ return 0;
+}
+
+static void __exit
+hcibridge_cleanup_module(void)
+{
+}
+
+module_init(hcibridge_init_module);
+module_exit(hcibridge_cleanup_module);
+MODULE_LICENSE("Dual BSD/GPL");
+#endif
diff --git a/drivers/staging/ath6kl/os/linux/include/ar6000_drv.h b/drivers/staging/ath6kl/os/linux/include/ar6000_drv.h
new file mode 100644
index 000000000000..e6248830b7ef
--- /dev/null
+++ b/drivers/staging/ath6kl/os/linux/include/ar6000_drv.h
@@ -0,0 +1,762 @@
+//------------------------------------------------------------------------------
+// Copyright (c) 2004-2010 Atheros Communications Inc.
+// All rights reserved.
+//
+//
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+//
+//
+//
+// Author(s): ="Atheros"
+//------------------------------------------------------------------------------
+
+#ifndef _AR6000_H_
+#define _AR6000_H_
+
+#include <linux/init.h>
+#include <linux/sched.h>
+#include <linux/spinlock.h>
+#include <linux/if_ether.h>
+#include <linux/etherdevice.h>
+#include <net/iw_handler.h>
+#include <linux/if_arp.h>
+#include <linux/ip.h>
+#include <linux/wireless.h>
+#ifdef ATH6K_CONFIG_CFG80211
+#include <net/cfg80211.h>
+#endif /* ATH6K_CONFIG_CFG80211 */
+#include <linux/module.h>
+#include <asm/io.h>
+
+#include <a_config.h>
+#include <athdefs.h>
+#include "a_types.h"
+#include "a_osapi.h"
+#include "htc_api.h"
+#include "wmi.h"
+#include "a_drv.h"
+#include "bmi.h"
+#include <ieee80211.h>
+#include <ieee80211_ioctl.h>
+#include <wlan_api.h>
+#include <wmi_api.h>
+#include "gpio_api.h"
+#include "gpio.h"
+#include "pkt_log.h"
+#include "aggr_recv_api.h"
+#include <host_version.h>
+#include <linux/rtnetlink.h>
+#include <linux/init.h>
+#include <linux/moduleparam.h>
+#include "ar6000_api.h"
+#ifdef CONFIG_HOST_TCMD_SUPPORT
+#include <testcmd.h>
+#endif
+#include <linux/firmware.h>
+
+#include "targaddrs.h"
+#include "dbglog_api.h"
+#include "ar6000_diag.h"
+#include "common_drv.h"
+#include "roaming.h"
+#include "hci_transport_api.h"
+#define ATH_MODULE_NAME driver
+#include "a_debug.h"
+#include "hw/apb_map.h"
+#include "hw/rtc_reg.h"
+#include "hw/mbox_reg.h"
+#include "hw/gpio_reg.h"
+
+#define ATH_DEBUG_DBG_LOG ATH_DEBUG_MAKE_MODULE_MASK(0)
+#define ATH_DEBUG_WLAN_CONNECT ATH_DEBUG_MAKE_MODULE_MASK(1)
+#define ATH_DEBUG_WLAN_SCAN ATH_DEBUG_MAKE_MODULE_MASK(2)
+#define ATH_DEBUG_WLAN_TX ATH_DEBUG_MAKE_MODULE_MASK(3)
+#define ATH_DEBUG_WLAN_RX ATH_DEBUG_MAKE_MODULE_MASK(4)
+#define ATH_DEBUG_HTC_RAW ATH_DEBUG_MAKE_MODULE_MASK(5)
+#define ATH_DEBUG_HCI_BRIDGE ATH_DEBUG_MAKE_MODULE_MASK(6)
+#define ATH_DEBUG_HCI_RECV ATH_DEBUG_MAKE_MODULE_MASK(7)
+#define ATH_DEBUG_HCI_SEND ATH_DEBUG_MAKE_MODULE_MASK(8)
+#define ATH_DEBUG_HCI_DUMP ATH_DEBUG_MAKE_MODULE_MASK(9)
+
+#ifndef __dev_put
+#define __dev_put(dev) dev_put(dev)
+#endif
+
+
+#ifdef USER_KEYS
+
+#define USER_SAVEDKEYS_STAT_INIT 0
+#define USER_SAVEDKEYS_STAT_RUN 1
+
+// TODO this needs to move into the AR_SOFTC struct
+struct USER_SAVEDKEYS {
+ struct ieee80211req_key ucast_ik;
+ struct ieee80211req_key bcast_ik;
+ CRYPTO_TYPE keyType;
+ A_BOOL keyOk;
+};
+#endif
+
+#define DBG_INFO 0x00000001
+#define DBG_ERROR 0x00000002
+#define DBG_WARNING 0x00000004
+#define DBG_SDIO 0x00000008
+#define DBG_HIF 0x00000010
+#define DBG_HTC 0x00000020
+#define DBG_WMI 0x00000040
+#define DBG_WMI2 0x00000080
+#define DBG_DRIVER 0x00000100
+
+#define DBG_DEFAULTS (DBG_ERROR|DBG_WARNING)
+
+
+A_STATUS ar6000_ReadRegDiag(HIF_DEVICE *hifDevice, A_UINT32 *address, A_UINT32 *data);
+A_STATUS ar6000_WriteRegDiag(HIF_DEVICE *hifDevice, A_UINT32 *address, A_UINT32 *data);
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#define MAX_AR6000 1
+#define AR6000_MAX_RX_BUFFERS 16
+#define AR6000_BUFFER_SIZE 1664
+#define AR6000_MAX_AMSDU_RX_BUFFERS 4
+#define AR6000_AMSDU_REFILL_THRESHOLD 3
+#define AR6000_AMSDU_BUFFER_SIZE (WMI_MAX_AMSDU_RX_DATA_FRAME_LENGTH + 128)
+#define AR6000_MAX_RX_MESSAGE_SIZE (max(WMI_MAX_NORMAL_RX_DATA_FRAME_LENGTH,WMI_MAX_AMSDU_RX_DATA_FRAME_LENGTH))
+
+#define AR6000_TX_TIMEOUT 10
+#define AR6000_ETH_ADDR_LEN 6
+#define AR6000_MAX_ENDPOINTS 4
+#define MAX_NODE_NUM 15
+/* MAX_HI_COOKIE_NUM are reserved for high priority traffic */
+#define MAX_DEF_COOKIE_NUM 180
+#define MAX_HI_COOKIE_NUM 18 /* 10% of MAX_COOKIE_NUM */
+#define MAX_COOKIE_NUM (MAX_DEF_COOKIE_NUM + MAX_HI_COOKIE_NUM)
+
+/* MAX_DEFAULT_SEND_QUEUE_DEPTH is used to set the default queue depth for the
+ * WMM send queues. If a queue exceeds this depth htc will query back to the
+ * OS specific layer by calling EpSendFull(). This gives the OS layer the
+ * opportunity to drop the packet if desired. Therefore changing
+ * MAX_DEFAULT_SEND_QUEUE_DEPTH does not affect resource utilization but
+ * does impact the threshold used to identify if a packet should be
+ * dropped. */
+#define MAX_DEFAULT_SEND_QUEUE_DEPTH (MAX_DEF_COOKIE_NUM / WMM_NUM_AC)
+
+#define AR6000_HB_CHALLENGE_RESP_FREQ_DEFAULT 1
+#define AR6000_HB_CHALLENGE_RESP_MISS_THRES_DEFAULT 1
+#define A_DISCONNECT_TIMER_INTERVAL 10 * 1000
+#define A_DEFAULT_LISTEN_INTERVAL 100
+#define A_MAX_WOW_LISTEN_INTERVAL 1000
+
+enum {
+ DRV_HB_CHALLENGE = 0,
+ APP_HB_CHALLENGE
+};
+
+enum {
+ WLAN_INIT_MODE_NONE = 0,
+ WLAN_INIT_MODE_USR,
+ WLAN_INIT_MODE_UDEV,
+ WLAN_INIT_MODE_DRV
+};
+
+/* Suspend - configuration */
+enum {
+ WLAN_SUSPEND_CUT_PWR = 0,
+ WLAN_SUSPEND_DEEP_SLEEP,
+ WLAN_SUSPEND_WOW,
+ WLAN_SUSPEND_CUT_PWR_IF_BT_OFF
+};
+
+/* WiFi OFF - configuration */
+enum {
+ WLAN_OFF_CUT_PWR = 0,
+ WLAN_OFF_DEEP_SLEEP,
+};
+
+/* WLAN low power state */
+enum {
+ WLAN_POWER_STATE_ON = 0,
+ WLAN_POWER_STATE_CUT_PWR = 1,
+ WLAN_POWER_STATE_DEEP_SLEEP,
+ WLAN_POWER_STATE_WOW
+};
+
+/* WLAN WoW State */
+enum {
+ WLAN_WOW_STATE_NONE = 0,
+ WLAN_WOW_STATE_SUSPENDED,
+ WLAN_WOW_STATE_SUSPENDING
+};
+
+
+typedef enum _AR6K_BIN_FILE {
+ AR6K_OTP_FILE,
+ AR6K_FIRMWARE_FILE,
+ AR6K_PATCH_FILE,
+ AR6K_BOARD_DATA_FILE,
+} AR6K_BIN_FILE;
+
+#ifdef SETUPHCI_ENABLED
+#define SETUPHCI_DEFAULT 1
+#else
+#define SETUPHCI_DEFAULT 0
+#endif /* SETUPHCI_ENABLED */
+
+#ifdef SETUPHCIPAL_ENABLED
+#define SETUPHCIPAL_DEFAULT 1
+#else
+#define SETUPHCIPAL_DEFAULT 0
+#endif /* SETUPHCIPAL_ENABLED */
+
+#ifdef SETUPBTDEV_ENABLED
+#define SETUPBTDEV_DEFAULT 1
+#else
+#define SETUPBTDEV_DEFAULT 0
+#endif /* SETUPBTDEV_ENABLED */
+
+#ifdef BMIENABLE_SET
+#define BMIENABLE_DEFAULT 1
+#else
+#define BMIENABLE_DEFAULT 0
+#endif /* BMIENABLE_SET */
+
+#ifdef ENABLEUARTPRINT_SET
+#define ENABLEUARTPRINT_DEFAULT 1
+#else
+#define ENABLEUARTPRINT_DEFAULT 0
+#endif /* ENABLEARTPRINT_SET */
+
+#ifdef ATH6K_CONFIG_HIF_VIRTUAL_SCATTER
+#define NOHIFSCATTERSUPPORT_DEFAULT 1
+#else /* ATH6K_CONFIG_HIF_VIRTUAL_SCATTER */
+#define NOHIFSCATTERSUPPORT_DEFAULT 0
+#endif /* ATH6K_CONFIG_HIF_VIRTUAL_SCATTER */
+
+#ifdef AR600x_BT_AR3001
+#define AR3KHCIBAUD_DEFAULT 3000000
+#define HCIUARTSCALE_DEFAULT 1
+#define HCIUARTSTEP_DEFAULT 8937
+#else
+#define AR3KHCIBAUD_DEFAULT 0
+#define HCIUARTSCALE_DEFAULT 0
+#define HCIUARTSTEP_DEFAULT 0
+#endif /* AR600x_BT_AR3001 */
+
+#ifdef INIT_MODE_DRV_ENABLED
+#define WLAN_INIT_MODE_DEFAULT WLAN_INIT_MODE_DRV
+#else
+#define WLAN_INIT_MODE_DEFAULT WLAN_INIT_MODE_USR
+#endif /* INIT_MODE_DRV_ENABLED */
+
+#define AR6K_PATCH_DOWNLOAD_ADDRESS(_param, _ver) do { \
+ if ((_ver) == AR6003_REV1_VERSION) { \
+ (_param) = AR6003_REV1_PATCH_DOWNLOAD_ADDRESS; \
+ } else if ((_ver) == AR6003_REV2_VERSION) { \
+ (_param) = AR6003_REV2_PATCH_DOWNLOAD_ADDRESS; \
+ } else { \
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("Unknown Version: %d\n", _ver)); \
+ A_ASSERT(0); \
+ } \
+} while (0)
+
+#define AR6K_DATA_DOWNLOAD_ADDRESS(_param, _ver) do { \
+ if ((_ver) == AR6003_REV1_VERSION) { \
+ (_param) = AR6003_REV1_DATA_DOWNLOAD_ADDRESS; \
+ } else if ((_ver) == AR6003_REV2_VERSION) { \
+ (_param) = AR6003_REV2_DATA_DOWNLOAD_ADDRESS; \
+ } else { \
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("Unknown Version: %d\n", _ver)); \
+ A_ASSERT(0); \
+ } \
+} while (0)
+
+#define AR6K_APP_START_OVERRIDE_ADDRESS(_param, _ver) do { \
+ if ((_ver) == AR6003_REV1_VERSION) { \
+ (_param) = AR6003_REV1_APP_START_OVERRIDE; \
+ } else if ((_ver) == AR6003_REV2_VERSION) { \
+ (_param) = AR6003_REV2_APP_START_OVERRIDE; \
+ } else { \
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("Unknown Version: %d\n", _ver)); \
+ A_ASSERT(0); \
+ } \
+} while (0)
+
+/* AR6003 1.0 definitions */
+#define AR6003_REV1_VERSION 0x300002ba
+#define AR6003_REV1_DATA_DOWNLOAD_ADDRESS AR6003_REV1_OTP_DATA_ADDRESS
+#define AR6003_REV1_PATCH_DOWNLOAD_ADDRESS 0x57ea6c
+#define AR6003_REV1_OTP_FILE "ath6k/AR6003/hw1.0/otp.bin.z77"
+#define AR6003_REV1_FIRMWARE_FILE "ath6k/AR6003/hw1.0/athwlan.bin.z77"
+#define AR6003_REV1_TCMD_FIRMWARE_FILE "ath6k/AR6003/hw1.0/athtcmd_ram.bin"
+#define AR6003_REV1_ART_FIRMWARE_FILE "ath6k/AR6003/hw1.0/device.bin"
+#define AR6003_REV1_PATCH_FILE "ath6k/AR6003/hw1.0/data.patch.bin"
+#define AR6003_REV1_EPPING_FIRMWARE_FILE "ath6k/AR6003/hw1.0/endpointping.bin"
+#ifdef AR600x_SD31_XXX
+#define AR6003_REV1_BOARD_DATA_FILE "ath6k/AR6003/hw1.0/bdata.SD31.bin"
+#elif defined(AR600x_SD32_XXX)
+#define AR6003_REV1_BOARD_DATA_FILE "ath6k/AR6003/hw1.0/bdata.SD32.bin"
+#elif defined(AR600x_WB31_XXX)
+#define AR6003_REV1_BOARD_DATA_FILE "ath6k/AR6003/hw1.0/bdata.WB31.bin"
+#else
+#define AR6003_REV1_BOARD_DATA_FILE "ath6k/AR6003/hw1.0/bdata.CUSTOM.bin"
+#endif /* Board Data File */
+
+/* AR6003 2.0 definitions */
+#define AR6003_REV2_VERSION 0x30000384
+#define AR6003_REV2_DATA_DOWNLOAD_ADDRESS AR6003_REV2_OTP_DATA_ADDRESS
+#define AR6003_REV2_PATCH_DOWNLOAD_ADDRESS 0x57e910
+#define AR6003_REV2_OTP_FILE "ath6k/AR6003/hw2.0/otp.bin.z77"
+#define AR6003_REV2_FIRMWARE_FILE "ath6k/AR6003/hw2.0/athwlan.bin.z77"
+#define AR6003_REV2_TCMD_FIRMWARE_FILE "ath6k/AR6003/hw2.0/athtcmd_ram.bin"
+#define AR6003_REV2_ART_FIRMWARE_FILE "ath6k/AR6003/hw2.0/device.bin"
+#define AR6003_REV2_PATCH_FILE "ath6k/AR6003/hw2.0/data.patch.bin"
+#define AR6003_REV2_EPPING_FIRMWARE_FILE "ath6k/AR6003/hw2.0/endpointping.bin"
+#ifdef AR600x_SD31_XXX
+#define AR6003_REV2_BOARD_DATA_FILE "ath6k/AR6003/hw2.0/bdata.SD31.bin"
+#elif defined(AR600x_SD32_XXX)
+#define AR6003_REV2_BOARD_DATA_FILE "ath6k/AR6003/hw2.0/bdata.SD32.bin"
+#elif defined(AR600x_WB31_XXX)
+#define AR6003_REV2_BOARD_DATA_FILE "ath6k/AR6003/hw2.0/bdata.WB31.bin"
+#else
+#define AR6003_REV2_BOARD_DATA_FILE "ath6k/AR6003/hw2.0/bdata.CUSTOM.bin"
+#endif /* Board Data File */
+
+/* Power states */
+enum {
+ WLAN_PWR_CTRL_UP = 0,
+ WLAN_PWR_CTRL_CUT_PWR,
+ WLAN_PWR_CTRL_DEEP_SLEEP,
+ WLAN_PWR_CTRL_WOW,
+ WLAN_PWR_CTRL_DEEP_SLEEP_DISABLED
+};
+
+/* HTC RAW streams */
+typedef enum _HTC_RAW_STREAM_ID {
+ HTC_RAW_STREAM_NOT_MAPPED = -1,
+ HTC_RAW_STREAM_0 = 0,
+ HTC_RAW_STREAM_1 = 1,
+ HTC_RAW_STREAM_2 = 2,
+ HTC_RAW_STREAM_3 = 3,
+ HTC_RAW_STREAM_NUM_MAX
+} HTC_RAW_STREAM_ID;
+
+#define RAW_HTC_READ_BUFFERS_NUM 4
+#define RAW_HTC_WRITE_BUFFERS_NUM 4
+
+#define HTC_RAW_BUFFER_SIZE 1664
+
+typedef struct {
+ int currPtr;
+ int length;
+ unsigned char data[HTC_RAW_BUFFER_SIZE];
+ HTC_PACKET HTCPacket;
+} raw_htc_buffer;
+
+#ifdef CONFIG_HOST_TCMD_SUPPORT
+/*
+ * add TCMD_MODE besides wmi and bypasswmi
+ * in TCMD_MODE, only few TCMD releated wmi commands
+ * counld be hanlder
+ */
+enum {
+ AR6000_WMI_MODE = 0,
+ AR6000_BYPASS_MODE,
+ AR6000_TCMD_MODE,
+ AR6000_WLAN_MODE
+};
+#endif /* CONFIG_HOST_TCMD_SUPPORT */
+
+struct ar_wep_key {
+ A_UINT8 arKeyIndex;
+ A_UINT8 arKeyLen;
+ A_UINT8 arKey[64];
+} ;
+
+#ifdef ATH6K_CONFIG_CFG80211
+struct ar_key {
+ A_UINT8 key[WLAN_MAX_KEY_LEN];
+ A_UINT8 key_len;
+ A_UINT8 seq[IW_ENCODE_SEQ_MAX_SIZE];
+ A_UINT8 seq_len;
+ A_UINT32 cipher;
+};
+#endif /* ATH6K_CONFIG_CFG80211 */
+
+
+struct ar_node_mapping {
+ A_UINT8 macAddress[6];
+ A_UINT8 epId;
+ A_UINT8 txPending;
+};
+
+struct ar_cookie {
+ unsigned long arc_bp[2]; /* Must be first field */
+ HTC_PACKET HtcPkt; /* HTC packet wrapper */
+ struct ar_cookie *arc_list_next;
+};
+
+struct ar_hb_chlng_resp {
+ A_TIMER timer;
+ A_UINT32 frequency;
+ A_UINT32 seqNum;
+ A_BOOL outstanding;
+ A_UINT8 missCnt;
+ A_UINT8 missThres;
+};
+
+/* Per STA data, used in AP mode */
+/*TODO: All this should move to OS independent dir */
+
+#define STA_PWR_MGMT_MASK 0x1
+#define STA_PWR_MGMT_SHIFT 0x0
+#define STA_PWR_MGMT_AWAKE 0x0
+#define STA_PWR_MGMT_SLEEP 0x1
+
+#define STA_SET_PWR_SLEEP(sta) (sta->flags |= (STA_PWR_MGMT_MASK << STA_PWR_MGMT_SHIFT))
+#define STA_CLR_PWR_SLEEP(sta) (sta->flags &= ~(STA_PWR_MGMT_MASK << STA_PWR_MGMT_SHIFT))
+#define STA_IS_PWR_SLEEP(sta) ((sta->flags >> STA_PWR_MGMT_SHIFT) & STA_PWR_MGMT_MASK)
+
+#define STA_PS_POLLED_MASK 0x1
+#define STA_PS_POLLED_SHIFT 0x1
+#define STA_SET_PS_POLLED(sta) (sta->flags |= (STA_PS_POLLED_MASK << STA_PS_POLLED_SHIFT))
+#define STA_CLR_PS_POLLED(sta) (sta->flags &= ~(STA_PS_POLLED_MASK << STA_PS_POLLED_SHIFT))
+#define STA_IS_PS_POLLED(sta) (sta->flags & (STA_PS_POLLED_MASK << STA_PS_POLLED_SHIFT))
+
+typedef struct {
+ A_UINT16 flags;
+ A_UINT8 mac[ATH_MAC_LEN];
+ A_UINT8 aid;
+ A_UINT8 keymgmt;
+ A_UINT8 ucipher;
+ A_UINT8 auth;
+ A_UINT8 wpa_ie[IEEE80211_MAX_IE];
+ A_NETBUF_QUEUE_T psq; /* power save q */
+ A_MUTEX_T psqLock;
+} sta_t;
+
+typedef struct ar6_raw_htc {
+ HTC_ENDPOINT_ID arRaw2EpMapping[HTC_RAW_STREAM_NUM_MAX];
+ HTC_RAW_STREAM_ID arEp2RawMapping[ENDPOINT_MAX];
+ struct semaphore raw_htc_read_sem[HTC_RAW_STREAM_NUM_MAX];
+ struct semaphore raw_htc_write_sem[HTC_RAW_STREAM_NUM_MAX];
+ wait_queue_head_t raw_htc_read_queue[HTC_RAW_STREAM_NUM_MAX];
+ wait_queue_head_t raw_htc_write_queue[HTC_RAW_STREAM_NUM_MAX];
+ raw_htc_buffer raw_htc_read_buffer[HTC_RAW_STREAM_NUM_MAX][RAW_HTC_READ_BUFFERS_NUM];
+ raw_htc_buffer raw_htc_write_buffer[HTC_RAW_STREAM_NUM_MAX][RAW_HTC_WRITE_BUFFERS_NUM];
+ A_BOOL write_buffer_available[HTC_RAW_STREAM_NUM_MAX];
+ A_BOOL read_buffer_available[HTC_RAW_STREAM_NUM_MAX];
+} AR_RAW_HTC_T;
+
+typedef struct ar6_softc {
+ struct net_device *arNetDev; /* net_device pointer */
+ void *arWmi;
+ int arTxPending[ENDPOINT_MAX];
+ int arTotalTxDataPending;
+ A_UINT8 arNumDataEndPts;
+ A_BOOL arWmiEnabled;
+ A_BOOL arWmiReady;
+ A_BOOL arConnected;
+ HTC_HANDLE arHtcTarget;
+ void *arHifDevice;
+ spinlock_t arLock;
+ struct semaphore arSem;
+ int arSsidLen;
+ u_char arSsid[32];
+ A_UINT8 arNextMode;
+ A_UINT8 arNetworkType;
+ A_UINT8 arDot11AuthMode;
+ A_UINT8 arAuthMode;
+ A_UINT8 arPairwiseCrypto;
+ A_UINT8 arPairwiseCryptoLen;
+ A_UINT8 arGroupCrypto;
+ A_UINT8 arGroupCryptoLen;
+ A_UINT8 arDefTxKeyIndex;
+ struct ar_wep_key arWepKeyList[WMI_MAX_KEY_INDEX + 1];
+ A_UINT8 arBssid[6];
+ A_UINT8 arReqBssid[6];
+ A_UINT16 arChannelHint;
+ A_UINT16 arBssChannel;
+ A_UINT16 arListenIntervalB;
+ A_UINT16 arListenIntervalT;
+ struct ar6000_version arVersion;
+ A_UINT32 arTargetType;
+ A_INT8 arRssi;
+ A_UINT8 arTxPwr;
+ A_BOOL arTxPwrSet;
+ A_INT32 arBitRate;
+ struct net_device_stats arNetStats;
+ struct iw_statistics arIwStats;
+ A_INT8 arNumChannels;
+ A_UINT16 arChannelList[32];
+ A_UINT32 arRegCode;
+ A_BOOL statsUpdatePending;
+ TARGET_STATS arTargetStats;
+ A_INT8 arMaxRetries;
+ A_UINT8 arPhyCapability;
+#ifdef CONFIG_HOST_TCMD_SUPPORT
+ A_UINT8 tcmdRxReport;
+ A_UINT32 tcmdRxTotalPkt;
+ A_INT32 tcmdRxRssi;
+ A_UINT32 tcmdPm;
+ A_UINT32 arTargetMode;
+ A_UINT32 tcmdRxcrcErrPkt;
+ A_UINT32 tcmdRxsecErrPkt;
+ A_UINT16 tcmdRateCnt[TCMD_MAX_RATES];
+ A_UINT16 tcmdRateCntShortGuard[TCMD_MAX_RATES];
+#endif
+ AR6000_WLAN_STATE arWlanState;
+ struct ar_node_mapping arNodeMap[MAX_NODE_NUM];
+ A_UINT8 arIbssPsEnable;
+ A_UINT8 arNodeNum;
+ A_UINT8 arNexEpId;
+ struct ar_cookie *arCookieList;
+ A_UINT32 arCookieCount;
+ A_UINT32 arRateMask;
+ A_UINT8 arSkipScan;
+ A_UINT16 arBeaconInterval;
+ A_BOOL arConnectPending;
+ A_BOOL arWmmEnabled;
+ struct ar_hb_chlng_resp arHBChallengeResp;
+ A_UINT8 arKeepaliveConfigured;
+ A_UINT32 arMgmtFilter;
+ HTC_ENDPOINT_ID arAc2EpMapping[WMM_NUM_AC];
+ A_BOOL arAcStreamActive[WMM_NUM_AC];
+ A_UINT8 arAcStreamPriMap[WMM_NUM_AC];
+ A_UINT8 arHiAcStreamActivePri;
+ A_UINT8 arEp2AcMapping[ENDPOINT_MAX];
+ HTC_ENDPOINT_ID arControlEp;
+#ifdef HTC_RAW_INTERFACE
+ AR_RAW_HTC_T *arRawHtc;
+#endif
+ A_BOOL arNetQueueStopped;
+ A_BOOL arRawIfInit;
+ int arDeviceIndex;
+ COMMON_CREDIT_STATE_INFO arCreditStateInfo;
+ A_BOOL arWMIControlEpFull;
+ A_BOOL dbgLogFetchInProgress;
+ A_UCHAR log_buffer[DBGLOG_HOST_LOG_BUFFER_SIZE];
+ A_UINT32 log_cnt;
+ A_UINT32 dbglog_init_done;
+ A_UINT32 arConnectCtrlFlags;
+#ifdef USER_KEYS
+ A_INT32 user_savedkeys_stat;
+ A_UINT32 user_key_ctrl;
+ struct USER_SAVEDKEYS user_saved_keys;
+#endif
+ USER_RSSI_THOLD rssi_map[12];
+ A_UINT8 arUserBssFilter;
+ A_UINT16 ap_profile_flag; /* AP mode */
+ WMI_AP_ACL g_acl; /* AP mode */
+ sta_t sta_list[AP_MAX_NUM_STA]; /* AP mode */
+ A_UINT8 sta_list_index; /* AP mode */
+ struct ieee80211req_key ap_mode_bkey; /* AP mode */
+ A_NETBUF_QUEUE_T mcastpsq; /* power save q for Mcast frames */
+ A_MUTEX_T mcastpsqLock;
+ A_BOOL DTIMExpired; /* flag to indicate DTIM expired */
+ A_UINT8 intra_bss; /* enable/disable intra bss data forward */
+ void *aggr_cntxt;
+#ifndef EXPORT_HCI_BRIDGE_INTERFACE
+ void *hcidev_info;
+#endif
+ void *hcipal_info;
+ WMI_AP_MODE_STAT arAPStats;
+ A_UINT8 ap_hidden_ssid;
+ A_UINT8 ap_country_code[3];
+ A_UINT8 ap_wmode;
+ A_UINT8 ap_dtim_period;
+ A_UINT16 ap_beacon_interval;
+ A_UINT16 arRTS;
+ A_UINT16 arACS; /* AP mode - Auto Channel Selection */
+ HTC_PACKET_QUEUE amsdu_rx_buffer_queue;
+ A_BOOL bIsDestroyProgress; /* flag to indicate ar6k destroy is in progress */
+ A_TIMER disconnect_timer;
+ A_UINT8 rxMetaVersion;
+#ifdef WAPI_ENABLE
+ A_UINT8 arWapiEnable;
+#endif
+ WMI_BTCOEX_CONFIG_EVENT arBtcoexConfig;
+ WMI_BTCOEX_STATS_EVENT arBtcoexStats;
+ A_INT32 (*exitCallback)(void *config); /* generic callback at AR6K exit */
+ HIF_DEVICE_OS_DEVICE_INFO osDevInfo;
+#ifdef ATH6K_CONFIG_CFG80211
+ struct wireless_dev *wdev;
+ struct cfg80211_scan_request *scan_request;
+ struct ar_key keys[WMI_MAX_KEY_INDEX + 1];
+#endif /* ATH6K_CONFIG_CFG80211 */
+ A_UINT16 arWlanPowerState;
+ A_BOOL arWlanOff;
+#ifdef CONFIG_PM
+ A_UINT16 arWowState;
+ A_BOOL arBTOff;
+ A_BOOL arBTSharing;
+ A_UINT16 arSuspendConfig;
+ A_UINT16 arWlanOffConfig;
+ A_UINT16 arWow2Config;
+#endif
+ A_UINT8 scan_triggered;
+ WMI_SCAN_PARAMS_CMD scParams;
+#define AR_MCAST_FILTER_MAC_ADDR_SIZE 4
+ A_UINT8 mcast_filters[MAC_MAX_FILTERS_PER_LIST][AR_MCAST_FILTER_MAC_ADDR_SIZE];
+ A_UINT8 bdaddr[6];
+ A_BOOL scanSpecificSsid;
+#ifdef CONFIG_AP_VIRTUAL_ADAPTER_SUPPORT
+ void *arApDev;
+#endif
+} AR_SOFTC_T;
+
+#ifdef CONFIG_AP_VIRTUAL_ADAPTER_SUPPORT
+typedef struct {
+ struct net_device *arNetDev; /* net_device pointer */
+ AR_SOFTC_T *arDev; /* ar device pointer */
+ struct net_device *arStaNetDev; /* net_device pointer */
+} AR_VIRTUAL_INTERFACE_T;
+#endif /* CONFIG_AP_VIRTUAL_ADAPTER_SUPPORT */
+
+#ifdef ATH6K_CONFIG_CFG80211
+static inline void *ar6k_priv(struct net_device *dev)
+{
+ return (wdev_priv(dev->ieee80211_ptr));
+}
+#else
+#ifdef CONFIG_AP_VIRTUAL_ADAPTER_SUPPORT
+static inline void *ar6k_priv(struct net_device *dev)
+{
+ extern struct net_device *arApNetDev;
+
+ if (arApNetDev == dev) {
+ /* return arDev saved in virtual interface context */
+ AR_VIRTUAL_INTERFACE_T *arVirDev;
+ arVirDev = netdev_priv(dev);
+ return arVirDev->arDev;
+ } else {
+ return netdev_priv(dev);
+ }
+}
+#else
+#define ar6k_priv netdev_priv
+#endif /* CONFIG_AP_VIRTUAL_ADAPTER_SUPPORT */
+#endif /* ATH6K_CONFIG_CFG80211 */
+
+#define SET_HCI_BUS_TYPE(pHciDev, __bus, __type) do { \
+ (pHciDev)->bus = (__bus); \
+ (pHciDev)->dev_type = (__type); \
+} while(0)
+
+#define GET_INODE_FROM_FILEP(filp) \
+ (filp)->f_path.dentry->d_inode
+
+#define arAc2EndpointID(ar,ac) (ar)->arAc2EpMapping[(ac)]
+#define arSetAc2EndpointIDMap(ar,ac,ep) \
+{ (ar)->arAc2EpMapping[(ac)] = (ep); \
+ (ar)->arEp2AcMapping[(ep)] = (ac); }
+#define arEndpoint2Ac(ar,ep) (ar)->arEp2AcMapping[(ep)]
+
+#define arRawIfEnabled(ar) (ar)->arRawIfInit
+#define arRawStream2EndpointID(ar,raw) (ar)->arRawHtc->arRaw2EpMapping[(raw)]
+#define arSetRawStream2EndpointIDMap(ar,raw,ep) \
+{ (ar)->arRawHtc->arRaw2EpMapping[(raw)] = (ep); \
+ (ar)->arRawHtc->arEp2RawMapping[(ep)] = (raw); }
+#define arEndpoint2RawStreamID(ar,ep) (ar)->arRawHtc->arEp2RawMapping[(ep)]
+
+struct ar_giwscan_param {
+ char *current_ev;
+ char *end_buf;
+ A_UINT32 bytes_needed;
+ struct iw_request_info *info;
+};
+
+#define AR6000_STAT_INC(ar, stat) (ar->arNetStats.stat++)
+
+#define AR6000_SPIN_LOCK(lock, param) do { \
+ if (irqs_disabled()) { \
+ AR_DEBUG_PRINTF(ATH_DEBUG_TRC,("IRQs disabled:AR6000_LOCK\n")); \
+ } \
+ spin_lock_bh(lock); \
+} while (0)
+
+#define AR6000_SPIN_UNLOCK(lock, param) do { \
+ if (irqs_disabled()) { \
+ AR_DEBUG_PRINTF(ATH_DEBUG_TRC,("IRQs disabled: AR6000_UNLOCK\n")); \
+ } \
+ spin_unlock_bh(lock); \
+} while (0)
+
+int ar6000_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
+int ar6000_ioctl_dispatcher(struct net_device *dev, struct ifreq *rq, int cmd);
+void ar6000_gpio_init(void);
+void ar6000_init_profile_info(AR_SOFTC_T *ar);
+void ar6000_install_static_wep_keys(AR_SOFTC_T *ar);
+int ar6000_init(struct net_device *dev);
+int ar6000_dbglog_get_debug_logs(AR_SOFTC_T *ar);
+void ar6000_TxDataCleanup(AR_SOFTC_T *ar);
+int ar6000_acl_data_tx(struct sk_buff *skb, struct net_device *dev);
+void ar6000_restart_endpoint(struct net_device *dev);
+void ar6000_stop_endpoint(struct net_device *dev, A_BOOL keepprofile, A_BOOL getdbglogs);
+
+#ifdef HTC_RAW_INTERFACE
+
+#ifndef __user
+#define __user
+#endif
+
+int ar6000_htc_raw_open(AR_SOFTC_T *ar);
+int ar6000_htc_raw_close(AR_SOFTC_T *ar);
+ssize_t ar6000_htc_raw_read(AR_SOFTC_T *ar,
+ HTC_RAW_STREAM_ID StreamID,
+ char __user *buffer, size_t count);
+ssize_t ar6000_htc_raw_write(AR_SOFTC_T *ar,
+ HTC_RAW_STREAM_ID StreamID,
+ char __user *buffer, size_t count);
+
+#endif /* HTC_RAW_INTERFACE */
+
+/* AP mode */
+/*TODO: These routines should be moved to a file that is common across OS */
+sta_t *
+ieee80211_find_conn(AR_SOFTC_T *ar, A_UINT8 *node_addr);
+
+sta_t *
+ieee80211_find_conn_for_aid(AR_SOFTC_T *ar, A_UINT8 aid);
+
+A_UINT8
+remove_sta(AR_SOFTC_T *ar, A_UINT8 *mac, A_UINT16 reason);
+
+/* HCI support */
+
+#ifndef EXPORT_HCI_BRIDGE_INTERFACE
+A_STATUS ar6000_setup_hci(AR_SOFTC_T *ar);
+void ar6000_cleanup_hci(AR_SOFTC_T *ar);
+void ar6000_set_default_ar3kconfig(AR_SOFTC_T *ar, void *ar3kconfig);
+
+/* HCI bridge testing */
+A_STATUS hci_test_send(AR_SOFTC_T *ar, struct sk_buff *skb);
+#endif
+
+ATH_DEBUG_DECLARE_EXTERN(htc);
+ATH_DEBUG_DECLARE_EXTERN(wmi);
+ATH_DEBUG_DECLARE_EXTERN(bmi);
+ATH_DEBUG_DECLARE_EXTERN(hif);
+ATH_DEBUG_DECLARE_EXTERN(wlan);
+ATH_DEBUG_DECLARE_EXTERN(misc);
+
+extern A_UINT8 bcast_mac[];
+extern A_UINT8 null_mac[];
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _AR6000_H_ */
diff --git a/drivers/staging/ath6kl/os/linux/include/ar6k_pal.h b/drivers/staging/ath6kl/os/linux/include/ar6k_pal.h
new file mode 100644
index 000000000000..a9a29a624a10
--- /dev/null
+++ b/drivers/staging/ath6kl/os/linux/include/ar6k_pal.h
@@ -0,0 +1,36 @@
+//------------------------------------------------------------------------------
+// Copyright (c) 2009-2010 Atheros Corporation. All rights reserved.
+//
+// The software source and binaries included in this development package are
+// licensed, not sold. You, or your company, received the package under one
+// or more license agreements. The rights granted to you are specifically
+// listed in these license agreement(s). All other rights remain with Atheros
+// Communications, Inc., its subsidiaries, or the respective owner including
+// those listed on the included copyright notices. Distribution of any
+// portion of this package must be in strict compliance with the license
+// agreement(s) terms.
+// </copyright>
+//
+// <summary>
+// PAL driver for AR6003
+// </summary>
+//
+//------------------------------------------------------------------------------
+//==============================================================================
+// Author(s): ="Atheros"
+//==============================================================================
+#ifndef _AR6K_PAL_H_
+#define _AR6K_PAL_H_
+#define HCI_GET_OP_CODE(p) (((A_UINT16)((p)[1])) << 8) | ((A_UINT16)((p)[0]))
+
+/* transmit packet reserve offset */
+#define TX_PACKET_RSV_OFFSET 32
+/* pal specific config structure */
+typedef A_BOOL (*ar6k_pal_recv_pkt_t)(void *pHciPalInfo, void *skb);
+typedef struct ar6k_pal_config_s
+{
+ ar6k_pal_recv_pkt_t fpar6k_pal_recv_pkt;
+}ar6k_pal_config_t;
+
+void register_pal_cb(ar6k_pal_config_t *palConfig_p);
+#endif /* _AR6K_PAL_H_ */
diff --git a/drivers/staging/ath6kl/os/linux/include/ar6xapi_linux.h b/drivers/staging/ath6kl/os/linux/include/ar6xapi_linux.h
new file mode 100644
index 000000000000..ea2d181dcfe2
--- /dev/null
+++ b/drivers/staging/ath6kl/os/linux/include/ar6xapi_linux.h
@@ -0,0 +1,197 @@
+//------------------------------------------------------------------------------
+// Copyright (c) 2004-2010 Atheros Communications Inc.
+// All rights reserved.
+//
+//
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+//
+//
+//
+// Author(s): ="Atheros"
+//------------------------------------------------------------------------------
+
+#ifndef _AR6XAPI_LINUX_H
+#define _AR6XAPI_LINUX_H
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+struct ar6_softc;
+
+void ar6000_ready_event(void *devt, A_UINT8 *datap, A_UINT8 phyCap,
+ A_UINT32 sw_ver, A_UINT32 abi_ver);
+A_STATUS ar6000_control_tx(void *devt, void *osbuf, HTC_ENDPOINT_ID eid);
+void ar6000_connect_event(struct ar6_softc *ar, A_UINT16 channel,
+ A_UINT8 *bssid, A_UINT16 listenInterval,
+ A_UINT16 beaconInterval, NETWORK_TYPE networkType,
+ A_UINT8 beaconIeLen, A_UINT8 assocReqLen,
+ A_UINT8 assocRespLen,A_UINT8 *assocInfo);
+void ar6000_disconnect_event(struct ar6_softc *ar, A_UINT8 reason,
+ A_UINT8 *bssid, A_UINT8 assocRespLen,
+ A_UINT8 *assocInfo, A_UINT16 protocolReasonStatus);
+void ar6000_tkip_micerr_event(struct ar6_softc *ar, A_UINT8 keyid,
+ A_BOOL ismcast);
+void ar6000_bitrate_rx(void *devt, A_INT32 rateKbps);
+void ar6000_channelList_rx(void *devt, A_INT8 numChan, A_UINT16 *chanList);
+void ar6000_regDomain_event(struct ar6_softc *ar, A_UINT32 regCode);
+void ar6000_txPwr_rx(void *devt, A_UINT8 txPwr);
+void ar6000_keepalive_rx(void *devt, A_UINT8 configured);
+void ar6000_neighborReport_event(struct ar6_softc *ar, int numAps,
+ WMI_NEIGHBOR_INFO *info);
+void ar6000_set_numdataendpts(struct ar6_softc *ar, A_UINT32 num);
+void ar6000_scanComplete_event(struct ar6_softc *ar, A_STATUS status);
+void ar6000_targetStats_event(struct ar6_softc *ar, A_UINT8 *ptr, A_UINT32 len);
+void ar6000_rssiThreshold_event(struct ar6_softc *ar,
+ WMI_RSSI_THRESHOLD_VAL newThreshold,
+ A_INT16 rssi);
+void ar6000_reportError_event(struct ar6_softc *, WMI_TARGET_ERROR_VAL errorVal);
+void ar6000_cac_event(struct ar6_softc *ar, A_UINT8 ac, A_UINT8 cac_indication,
+ A_UINT8 statusCode, A_UINT8 *tspecSuggestion);
+void ar6000_channel_change_event(struct ar6_softc *ar, A_UINT16 oldChannel, A_UINT16 newChannel);
+void ar6000_hbChallengeResp_event(struct ar6_softc *, A_UINT32 cookie, A_UINT32 source);
+void
+ar6000_roam_tbl_event(struct ar6_softc *ar, WMI_TARGET_ROAM_TBL *pTbl);
+
+void
+ar6000_roam_data_event(struct ar6_softc *ar, WMI_TARGET_ROAM_DATA *p);
+
+void
+ar6000_wow_list_event(struct ar6_softc *ar, A_UINT8 num_filters,
+ WMI_GET_WOW_LIST_REPLY *wow_reply);
+
+void ar6000_pmkid_list_event(void *devt, A_UINT8 numPMKID,
+ WMI_PMKID *pmkidList, A_UINT8 *bssidList);
+
+void ar6000_gpio_intr_rx(A_UINT32 intr_mask, A_UINT32 input_values);
+void ar6000_gpio_data_rx(A_UINT32 reg_id, A_UINT32 value);
+void ar6000_gpio_ack_rx(void);
+
+A_INT32 rssi_compensation_calc_tcmd(A_UINT32 freq, A_INT32 rssi, A_UINT32 totalPkt);
+A_INT16 rssi_compensation_calc(struct ar6_softc *ar, A_INT16 rssi);
+A_INT16 rssi_compensation_reverse_calc(struct ar6_softc *ar, A_INT16 rssi, A_BOOL Above);
+
+void ar6000_dbglog_init_done(struct ar6_softc *ar);
+
+#ifdef SEND_EVENT_TO_APP
+void ar6000_send_event_to_app(struct ar6_softc *ar, A_UINT16 eventId, A_UINT8 *datap, int len);
+void ar6000_send_generic_event_to_app(struct ar6_softc *ar, A_UINT16 eventId, A_UINT8 *datap, int len);
+#endif
+
+#ifdef CONFIG_HOST_TCMD_SUPPORT
+void ar6000_tcmd_rx_report_event(void *devt, A_UINT8 * results, int len);
+#endif
+
+void ar6000_tx_retry_err_event(void *devt);
+
+void ar6000_snrThresholdEvent_rx(void *devt,
+ WMI_SNR_THRESHOLD_VAL newThreshold,
+ A_UINT8 snr);
+
+void ar6000_lqThresholdEvent_rx(void *devt, WMI_LQ_THRESHOLD_VAL range, A_UINT8 lqVal);
+
+
+void ar6000_ratemask_rx(void *devt, A_UINT32 ratemask);
+
+A_STATUS ar6000_get_driver_cfg(struct net_device *dev,
+ A_UINT16 cfgParam,
+ void *result);
+void ar6000_bssInfo_event_rx(struct ar6_softc *ar, A_UINT8 *data, int len);
+
+void ar6000_dbglog_event(struct ar6_softc *ar, A_UINT32 dropped,
+ A_INT8 *buffer, A_UINT32 length);
+
+int ar6000_dbglog_get_debug_logs(struct ar6_softc *ar);
+
+void ar6000_peer_event(void *devt, A_UINT8 eventCode, A_UINT8 *bssid);
+
+void ar6000_indicate_tx_activity(void *devt, A_UINT8 trafficClass, A_BOOL Active);
+HTC_ENDPOINT_ID ar6000_ac2_endpoint_id ( void * devt, A_UINT8 ac);
+A_UINT8 ar6000_endpoint_id2_ac (void * devt, HTC_ENDPOINT_ID ep );
+
+void ar6000_btcoex_config_event(struct ar6_softc *ar, A_UINT8 *ptr, A_UINT32 len);
+
+void ar6000_btcoex_stats_event(struct ar6_softc *ar, A_UINT8 *ptr, A_UINT32 len) ;
+
+void ar6000_dset_open_req(void *devt,
+ A_UINT32 id,
+ A_UINT32 targ_handle,
+ A_UINT32 targ_reply_fn,
+ A_UINT32 targ_reply_arg);
+void ar6000_dset_close(void *devt, A_UINT32 access_cookie);
+void ar6000_dset_data_req(void *devt,
+ A_UINT32 access_cookie,
+ A_UINT32 offset,
+ A_UINT32 length,
+ A_UINT32 targ_buf,
+ A_UINT32 targ_reply_fn,
+ A_UINT32 targ_reply_arg);
+
+
+#if defined(CONFIG_TARGET_PROFILE_SUPPORT)
+void prof_count_rx(unsigned int addr, unsigned int count);
+#endif
+
+A_UINT32 ar6000_getnodeAge (void);
+
+A_UINT32 ar6000_getclkfreq (void);
+
+int ar6000_ap_mode_profile_commit(struct ar6_softc *ar);
+
+struct ieee80211req_wpaie;
+A_STATUS
+ar6000_ap_mode_get_wpa_ie(struct ar6_softc *ar, struct ieee80211req_wpaie *wpaie);
+
+A_STATUS is_iwioctl_allowed(A_UINT8 mode, A_UINT16 cmd);
+
+A_STATUS is_xioctl_allowed(A_UINT8 mode, int cmd);
+
+void ar6000_pspoll_event(struct ar6_softc *ar,A_UINT8 aid);
+
+void ar6000_dtimexpiry_event(struct ar6_softc *ar);
+
+void ar6000_aggr_rcv_addba_req_evt(struct ar6_softc *ar, WMI_ADDBA_REQ_EVENT *cmd);
+void ar6000_aggr_rcv_addba_resp_evt(struct ar6_softc *ar, WMI_ADDBA_RESP_EVENT *cmd);
+void ar6000_aggr_rcv_delba_req_evt(struct ar6_softc *ar, WMI_DELBA_EVENT *cmd);
+void ar6000_hci_event_rcv_evt(struct ar6_softc *ar, WMI_HCI_EVENT *cmd);
+
+#ifdef WAPI_ENABLE
+int ap_set_wapi_key(struct ar6_softc *ar, void *ik);
+void ap_wapi_rekey_event(struct ar6_softc *ar, A_UINT8 type, A_UINT8 *mac);
+#endif
+
+A_STATUS ar6000_connect_to_ap(struct ar6_softc *ar);
+A_STATUS ar6000_update_wlan_pwr_state(struct ar6_softc *ar, AR6000_WLAN_STATE state, A_BOOL suspending);
+A_STATUS ar6000_set_wlan_state(struct ar6_softc *ar, AR6000_WLAN_STATE state);
+A_STATUS ar6000_set_bt_hw_state(struct ar6_softc *ar, A_UINT32 state);
+
+#ifdef CONFIG_PM
+A_STATUS ar6000_suspend_ev(void *context);
+A_STATUS ar6000_resume_ev(void *context);
+A_STATUS ar6000_power_change_ev(void *context, A_UINT32 config);
+void ar6000_check_wow_status(struct ar6_softc *ar, struct sk_buff *skb, A_BOOL isEvent);
+#endif
+
+void ar6000_pm_init(void);
+void ar6000_pm_exit(void);
+
+#ifdef CONFIG_AP_VIRTUAL_ADAPTER_SUPPORT
+A_STATUS ar6000_add_ap_interface(struct ar6_softc *ar, char *ifname);
+A_STATUS ar6000_remove_ap_interface(struct ar6_softc *ar);
+#endif /* CONFIG_AP_VIRTUAL_ADAPTER_SUPPORT */
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/drivers/staging/ath6kl/os/linux/include/athdrv_linux.h b/drivers/staging/ath6kl/os/linux/include/athdrv_linux.h
new file mode 100644
index 000000000000..53bbb4837d30
--- /dev/null
+++ b/drivers/staging/ath6kl/os/linux/include/athdrv_linux.h
@@ -0,0 +1,1219 @@
+//------------------------------------------------------------------------------
+// Copyright (c) 2004-2010 Atheros Communications Inc.
+// All rights reserved.
+//
+//
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+//
+//
+//
+// Author(s): ="Atheros"
+//------------------------------------------------------------------------------
+
+#ifndef _ATHDRV_LINUX_H
+#define _ATHDRV_LINUX_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+
+/*
+ * There are two types of ioctl's here: Standard ioctls and
+ * eXtended ioctls. All extended ioctls (XIOCTL) are multiplexed
+ * off of the single ioctl command, AR6000_IOCTL_EXTENDED. The
+ * arguments for every XIOCTL starts with a 32-bit command word
+ * that is used to select which extended ioctl is in use. After
+ * the command word are command-specific arguments.
+ */
+
+/* Linux standard Wireless Extensions, private ioctl interfaces */
+#define IEEE80211_IOCTL_SETPARAM (SIOCIWFIRSTPRIV+0)
+#define IEEE80211_IOCTL_SETKEY (SIOCIWFIRSTPRIV+1)
+#define IEEE80211_IOCTL_DELKEY (SIOCIWFIRSTPRIV+2)
+#define IEEE80211_IOCTL_SETMLME (SIOCIWFIRSTPRIV+3)
+#define IEEE80211_IOCTL_ADDPMKID (SIOCIWFIRSTPRIV+4)
+#define IEEE80211_IOCTL_SETOPTIE (SIOCIWFIRSTPRIV+5)
+//#define IEEE80211_IOCTL_GETPARAM (SIOCIWFIRSTPRIV+6)
+//#define IEEE80211_IOCTL_SETWMMPARAMS (SIOCIWFIRSTPRIV+7)
+//#define IEEE80211_IOCTL_GETWMMPARAMS (SIOCIWFIRSTPRIV+8)
+//#define IEEE80211_IOCTL_GETOPTIE (SIOCIWFIRSTPRIV+9)
+//#define IEEE80211_IOCTL_SETAUTHALG (SIOCIWFIRSTPRIV+10)
+#define IEEE80211_IOCTL_LASTONE (SIOCIWFIRSTPRIV+10)
+
+
+
+/* ====WMI Ioctls==== */
+/*
+ *
+ * Many ioctls simply provide WMI services to application code:
+ * an application makes such an ioctl call with a set of arguments
+ * that are packaged into the corresponding WMI message, and sent
+ * to the Target.
+ */
+
+#define AR6000_IOCTL_WMI_GETREV (SIOCIWFIRSTPRIV+11)
+/*
+ * arguments:
+ * ar6000_version *revision
+ */
+
+#define AR6000_IOCTL_WMI_SETPWR (SIOCIWFIRSTPRIV+12)
+/*
+ * arguments:
+ * WMI_POWER_MODE_CMD pwrModeCmd (see include/wmi.h)
+ * uses: WMI_SET_POWER_MODE_CMDID
+ */
+
+#define AR6000_IOCTL_WMI_SETSCAN (SIOCIWFIRSTPRIV+13)
+/*
+ * arguments:
+ * WMI_SCAN_PARAMS_CMD scanParams (see include/wmi.h)
+ * uses: WMI_SET_SCAN_PARAMS_CMDID
+ */
+
+#define AR6000_IOCTL_WMI_SETLISTENINT (SIOCIWFIRSTPRIV+14)
+/*
+ * arguments:
+ * UINT32 listenInterval
+ * uses: WMI_SET_LISTEN_INT_CMDID
+ */
+
+#define AR6000_IOCTL_WMI_SETBSSFILTER (SIOCIWFIRSTPRIV+15)
+/*
+ * arguments:
+ * WMI_BSS_FILTER filter (see include/wmi.h)
+ * uses: WMI_SET_BSS_FILTER_CMDID
+ */
+
+#define AR6000_IOCTL_WMI_SET_CHANNELPARAMS (SIOCIWFIRSTPRIV+16)
+/*
+ * arguments:
+ * WMI_CHANNEL_PARAMS_CMD chParams
+ * uses: WMI_SET_CHANNEL_PARAMS_CMDID
+ */
+
+#define AR6000_IOCTL_WMI_SET_PROBEDSSID (SIOCIWFIRSTPRIV+17)
+/*
+ * arguments:
+ * WMI_PROBED_SSID_CMD probedSsids (see include/wmi.h)
+ * uses: WMI_SETPROBED_SSID_CMDID
+ */
+
+#define AR6000_IOCTL_WMI_SET_PMPARAMS (SIOCIWFIRSTPRIV+18)
+/*
+ * arguments:
+ * WMI_POWER_PARAMS_CMD powerParams (see include/wmi.h)
+ * uses: WMI_SET_POWER_PARAMS_CMDID
+ */
+
+#define AR6000_IOCTL_WMI_SET_BADAP (SIOCIWFIRSTPRIV+19)
+/*
+ * arguments:
+ * WMI_ADD_BAD_AP_CMD badAPs (see include/wmi.h)
+ * uses: WMI_ADD_BAD_AP_CMDID
+ */
+
+#define AR6000_IOCTL_WMI_GET_QOS_QUEUE (SIOCIWFIRSTPRIV+20)
+/*
+ * arguments:
+ * ar6000_queuereq queueRequest (see below)
+ */
+
+#define AR6000_IOCTL_WMI_CREATE_QOS (SIOCIWFIRSTPRIV+21)
+/*
+ * arguments:
+ * WMI_CREATE_PSTREAM createPstreamCmd (see include/wmi.h)
+ * uses: WMI_CREATE_PSTREAM_CMDID
+ */
+
+#define AR6000_IOCTL_WMI_DELETE_QOS (SIOCIWFIRSTPRIV+22)
+/*
+ * arguments:
+ * WMI_DELETE_PSTREAM_CMD deletePstreamCmd (see include/wmi.h)
+ * uses: WMI_DELETE_PSTREAM_CMDID
+ */
+
+#define AR6000_IOCTL_WMI_SET_SNRTHRESHOLD (SIOCIWFIRSTPRIV+23)
+/*
+ * arguments:
+ * WMI_SNR_THRESHOLD_PARAMS_CMD thresholdParams (see include/wmi.h)
+ * uses: WMI_SNR_THRESHOLD_PARAMS_CMDID
+ */
+
+#define AR6000_IOCTL_WMI_SET_ERROR_REPORT_BITMASK (SIOCIWFIRSTPRIV+24)
+/*
+ * arguments:
+ * WMI_TARGET_ERROR_REPORT_BITMASK errorReportBitMask (see include/wmi.h)
+ * uses: WMI_TARGET_ERROR_REPORT_BITMASK_CMDID
+ */
+
+#define AR6000_IOCTL_WMI_GET_TARGET_STATS (SIOCIWFIRSTPRIV+25)
+/*
+ * arguments:
+ * TARGET_STATS *targetStats (see below)
+ * uses: WMI_GET_STATISTICS_CMDID
+ */
+
+#define AR6000_IOCTL_WMI_SET_ASSOC_INFO (SIOCIWFIRSTPRIV+26)
+/*
+ * arguments:
+ * WMI_SET_ASSOC_INFO_CMD setAssocInfoCmd
+ * uses: WMI_SET_ASSOC_INFO_CMDID
+ */
+
+#define AR6000_IOCTL_WMI_SET_ACCESS_PARAMS (SIOCIWFIRSTPRIV+27)
+/*
+ * arguments:
+ * WMI_SET_ACCESS_PARAMS_CMD setAccessParams (see include/wmi.h)
+ * uses: WMI_SET_ACCESS_PARAMS_CMDID
+ */
+
+#define AR6000_IOCTL_WMI_SET_BMISS_TIME (SIOCIWFIRSTPRIV+28)
+/*
+ * arguments:
+ * UINT32 beaconMissTime
+ * uses: WMI_SET_BMISS_TIME_CMDID
+ */
+
+#define AR6000_IOCTL_WMI_SET_DISC_TIMEOUT (SIOCIWFIRSTPRIV+29)
+/*
+ * arguments:
+ * WMI_DISC_TIMEOUT_CMD disconnectTimeoutCmd (see include/wmi.h)
+ * uses: WMI_SET_DISC_TIMEOUT_CMDID
+ */
+
+#define AR6000_IOCTL_WMI_SET_IBSS_PM_CAPS (SIOCIWFIRSTPRIV+30)
+/*
+ * arguments:
+ * WMI_IBSS_PM_CAPS_CMD ibssPowerMgmtCapsCmd
+ * uses: WMI_SET_IBSS_PM_CAPS_CMDID
+ */
+
+/*
+ * There is a very small space available for driver-private
+ * wireless ioctls. In order to circumvent this limitation,
+ * we multiplex a bunch of ioctls (XIOCTLs) on top of a
+ * single AR6000_IOCTL_EXTENDED ioctl.
+ */
+#define AR6000_IOCTL_EXTENDED (SIOCIWFIRSTPRIV+31)
+
+
+/* ====BMI Extended Ioctls==== */
+
+#define AR6000_XIOCTL_BMI_DONE 1
+/*
+ * arguments:
+ * UINT32 cmd (AR6000_XIOCTL_BMI_DONE)
+ * uses: BMI_DONE
+ */
+
+#define AR6000_XIOCTL_BMI_READ_MEMORY 2
+/*
+ * arguments:
+ * union {
+ * struct {
+ * UINT32 cmd (AR6000_XIOCTL_BMI_READ_MEMORY)
+ * UINT32 address
+ * UINT32 length
+ * }
+ * char results[length]
+ * }
+ * uses: BMI_READ_MEMORY
+ */
+
+#define AR6000_XIOCTL_BMI_WRITE_MEMORY 3
+/*
+ * arguments:
+ * UINT32 cmd (AR6000_XIOCTL_BMI_WRITE_MEMORY)
+ * UINT32 address
+ * UINT32 length
+ * char data[length]
+ * uses: BMI_WRITE_MEMORY
+ */
+
+#define AR6000_XIOCTL_BMI_EXECUTE 4
+/*
+ * arguments:
+ * UINT32 cmd (AR6000_XIOCTL_BMI_EXECUTE)
+ * UINT32 TargetAddress
+ * UINT32 parameter
+ * uses: BMI_EXECUTE
+ */
+
+#define AR6000_XIOCTL_BMI_SET_APP_START 5
+/*
+ * arguments:
+ * UINT32 cmd (AR6000_XIOCTL_BMI_SET_APP_START)
+ * UINT32 TargetAddress
+ * uses: BMI_SET_APP_START
+ */
+
+#define AR6000_XIOCTL_BMI_READ_SOC_REGISTER 6
+/*
+ * arguments:
+ * union {
+ * struct {
+ * UINT32 cmd (AR6000_XIOCTL_BMI_READ_SOC_REGISTER)
+ * UINT32 TargetAddress, 32-bit aligned
+ * }
+ * UINT32 result
+ * }
+ * uses: BMI_READ_SOC_REGISTER
+ */
+
+#define AR6000_XIOCTL_BMI_WRITE_SOC_REGISTER 7
+/*
+ * arguments:
+ * struct {
+ * UINT32 cmd (AR6000_XIOCTL_BMI_WRITE_SOC_REGISTER)
+ * UINT32 TargetAddress, 32-bit aligned
+ * UINT32 newValue
+ * }
+ * uses: BMI_WRITE_SOC_REGISTER
+ */
+
+#define AR6000_XIOCTL_BMI_TEST 8
+/*
+ * arguments:
+ * UINT32 cmd (AR6000_XIOCTL_BMI_TEST)
+ * UINT32 address
+ * UINT32 length
+ * UINT32 count
+ */
+
+
+
+/* Historical Host-side DataSet support */
+#define AR6000_XIOCTL_UNUSED9 9
+#define AR6000_XIOCTL_UNUSED10 10
+#define AR6000_XIOCTL_UNUSED11 11
+
+/* ====Misc Extended Ioctls==== */
+
+#define AR6000_XIOCTL_FORCE_TARGET_RESET 12
+/*
+ * arguments:
+ * UINT32 cmd (AR6000_XIOCTL_FORCE_TARGET_RESET)
+ */
+
+
+#ifdef HTC_RAW_INTERFACE
+/* HTC Raw Interface Ioctls */
+#define AR6000_XIOCTL_HTC_RAW_OPEN 13
+/*
+ * arguments:
+ * UINT32 cmd (AR6000_XIOCTL_HTC_RAW_OPEN)
+ */
+
+#define AR6000_XIOCTL_HTC_RAW_CLOSE 14
+/*
+ * arguments:
+ * UINT32 cmd (AR6000_XIOCTL_HTC_RAW_CLOSE)
+ */
+
+#define AR6000_XIOCTL_HTC_RAW_READ 15
+/*
+ * arguments:
+ * union {
+ * struct {
+ * UINT32 cmd (AR6000_XIOCTL_HTC_RAW_READ)
+ * UINT32 mailboxID
+ * UINT32 length
+ * }
+ * results[length]
+ * }
+ */
+
+#define AR6000_XIOCTL_HTC_RAW_WRITE 16
+/*
+ * arguments:
+ * UINT32 cmd (AR6000_XIOCTL_HTC_RAW_WRITE)
+ * UINT32 mailboxID
+ * UINT32 length
+ * char buffer[length]
+ */
+#endif /* HTC_RAW_INTERFACE */
+
+#define AR6000_XIOCTL_CHECK_TARGET_READY 17
+/*
+ * arguments:
+ * UINT32 cmd (AR6000_XIOCTL_CHECK_TARGET_READY)
+ */
+
+
+
+/* ====GPIO (General Purpose I/O) Extended Ioctls==== */
+
+#define AR6000_XIOCTL_GPIO_OUTPUT_SET 18
+/*
+ * arguments:
+ * UINT32 cmd (AR6000_XIOCTL_GPIO_OUTPUT_SET)
+ * ar6000_gpio_output_set_cmd_s (see below)
+ * uses: WMIX_GPIO_OUTPUT_SET_CMDID
+ */
+
+#define AR6000_XIOCTL_GPIO_INPUT_GET 19
+/*
+ * arguments:
+ * UINT32 cmd (AR6000_XIOCTL_GPIO_INPUT_GET)
+ * uses: WMIX_GPIO_INPUT_GET_CMDID
+ */
+
+#define AR6000_XIOCTL_GPIO_REGISTER_SET 20
+/*
+ * arguments:
+ * UINT32 cmd (AR6000_XIOCTL_GPIO_REGISTER_SET)
+ * ar6000_gpio_register_cmd_s (see below)
+ * uses: WMIX_GPIO_REGISTER_SET_CMDID
+ */
+
+#define AR6000_XIOCTL_GPIO_REGISTER_GET 21
+/*
+ * arguments:
+ * UINT32 cmd (AR6000_XIOCTL_GPIO_REGISTER_GET)
+ * ar6000_gpio_register_cmd_s (see below)
+ * uses: WMIX_GPIO_REGISTER_GET_CMDID
+ */
+
+#define AR6000_XIOCTL_GPIO_INTR_ACK 22
+/*
+ * arguments:
+ * UINT32 cmd (AR6000_XIOCTL_GPIO_INTR_ACK)
+ * ar6000_cpio_intr_ack_cmd_s (see below)
+ * uses: WMIX_GPIO_INTR_ACK_CMDID
+ */
+
+#define AR6000_XIOCTL_GPIO_INTR_WAIT 23
+/*
+ * arguments:
+ * UINT32 cmd (AR6000_XIOCTL_GPIO_INTR_WAIT)
+ */
+
+
+
+/* ====more wireless commands==== */
+
+#define AR6000_XIOCTL_SET_ADHOC_BSSID 24
+/*
+ * arguments:
+ * UINT32 cmd (AR6000_XIOCTL_SET_ADHOC_BSSID)
+ * WMI_SET_ADHOC_BSSID_CMD setAdHocBssidCmd (see include/wmi.h)
+ */
+
+#define AR6000_XIOCTL_SET_OPT_MODE 25
+/*
+ * arguments:
+ * UINT32 cmd (AR6000_XIOCTL_SET_OPT_MODE)
+ * WMI_SET_OPT_MODE_CMD setOptModeCmd (see include/wmi.h)
+ * uses: WMI_SET_OPT_MODE_CMDID
+ */
+
+#define AR6000_XIOCTL_OPT_SEND_FRAME 26
+/*
+ * arguments:
+ * UINT32 cmd (AR6000_XIOCTL_OPT_SEND_FRAME)
+ * WMI_OPT_TX_FRAME_CMD optTxFrameCmd (see include/wmi.h)
+ * uses: WMI_OPT_TX_FRAME_CMDID
+ */
+
+#define AR6000_XIOCTL_SET_BEACON_INTVAL 27
+/*
+ * arguments:
+ * UINT32 cmd (AR6000_XIOCTL_SET_BEACON_INTVAL)
+ * WMI_BEACON_INT_CMD beaconIntCmd (see include/wmi.h)
+ * uses: WMI_SET_BEACON_INT_CMDID
+ */
+
+
+#define IEEE80211_IOCTL_SETAUTHALG 28
+
+
+#define AR6000_XIOCTL_SET_VOICE_PKT_SIZE 29
+/*
+ * arguments:
+ * UINT32 cmd (AR6000_XIOCTL_SET_VOICE_PKT_SIZE)
+ * WMI_SET_VOICE_PKT_SIZE_CMD setVoicePktSizeCmd (see include/wmi.h)
+ * uses: WMI_SET_VOICE_PKT_SIZE_CMDID
+ */
+
+
+#define AR6000_XIOCTL_SET_MAX_SP 30
+/*
+ * arguments:
+ * UINT32 cmd (AR6000_XIOCTL_SET_MAX_SP)
+ * WMI_SET_MAX_SP_LEN_CMD maxSPLen(see include/wmi.h)
+ * uses: WMI_SET_MAX_SP_LEN_CMDID
+ */
+
+#define AR6000_XIOCTL_WMI_GET_ROAM_TBL 31
+
+#define AR6000_XIOCTL_WMI_SET_ROAM_CTRL 32
+
+#define AR6000_XIOCTRL_WMI_SET_POWERSAVE_TIMERS 33
+
+
+/*
+ * arguments:
+ * UINT32 cmd (AR6000_XIOCTRL_WMI_SET_POWERSAVE_TIMERS)
+ * WMI_SET_POWERSAVE_TIMERS_CMD powerSaveTimers(see include/wmi.h)
+ * WMI_SET_POWERSAVE_TIMERS_CMDID
+ */
+
+#define AR6000_XIOCTRL_WMI_GET_POWER_MODE 34
+/*
+ * arguments:
+ * UINT32 cmd (AR6000_XIOCTRL_WMI_GET_POWER_MODE)
+ */
+
+#define AR6000_XIOCTRL_WMI_SET_WLAN_STATE 35
+typedef enum {
+ WLAN_DISABLED,
+ WLAN_ENABLED
+} AR6000_WLAN_STATE;
+/*
+ * arguments:
+ * enable/disable
+ */
+
+#define AR6000_XIOCTL_WMI_GET_ROAM_DATA 36
+
+#define AR6000_XIOCTL_WMI_SETRETRYLIMITS 37
+/*
+ * arguments:
+ * WMI_SET_RETRY_LIMITS_CMD ibssSetRetryLimitsCmd
+ * uses: WMI_SET_RETRY_LIMITS_CMDID
+ */
+
+#ifdef CONFIG_HOST_TCMD_SUPPORT
+/* ====extended commands for radio test ==== */
+
+#define AR6000_XIOCTL_TCMD_CONT_TX 38
+/*
+ * arguments:
+ * UINT32 cmd (AR6000_XIOCTL_TCMD_CONT_TX)
+ * WMI_TCMD_CONT_TX_CMD contTxCmd (see include/wmi.h)
+ * uses: WMI_TCMD_CONT_TX_CMDID
+ */
+
+#define AR6000_XIOCTL_TCMD_CONT_RX 39
+/*
+ * arguments:
+ * UINT32 cmd (AR6000_XIOCTL_TCMD_CONT_RX)
+ * WMI_TCMD_CONT_RX_CMD rxCmd (see include/wmi.h)
+ * uses: WMI_TCMD_CONT_RX_CMDID
+ */
+
+#define AR6000_XIOCTL_TCMD_PM 40
+/*
+ * arguments:
+ * UINT32 cmd (AR6000_XIOCTL_TCMD_PM)
+ * WMI_TCMD_PM_CMD pmCmd (see include/wmi.h)
+ * uses: WMI_TCMD_PM_CMDID
+ */
+
+#endif /* CONFIG_HOST_TCMD_SUPPORT */
+
+#define AR6000_XIOCTL_WMI_STARTSCAN 41
+/*
+ * arguments:
+ * UINT32 cmd (AR6000_XIOCTL_WMI_STARTSCAN)
+ * UINT8 scanType
+ * UINT8 scanConnected
+ * A_BOOL forceFgScan
+ * uses: WMI_START_SCAN_CMDID
+ */
+
+#define AR6000_XIOCTL_WMI_SETFIXRATES 42
+
+#define AR6000_XIOCTL_WMI_GETFIXRATES 43
+
+
+#define AR6000_XIOCTL_WMI_SET_RSSITHRESHOLD 44
+/*
+ * arguments:
+ * WMI_RSSI_THRESHOLD_PARAMS_CMD thresholdParams (see include/wmi.h)
+ * uses: WMI_RSSI_THRESHOLD_PARAMS_CMDID
+ */
+
+#define AR6000_XIOCTL_WMI_CLR_RSSISNR 45
+/*
+ * arguments:
+ * WMI_CLR_RSSISNR_CMD thresholdParams (see include/wmi.h)
+ * uses: WMI_CLR_RSSISNR_CMDID
+ */
+
+#define AR6000_XIOCTL_WMI_SET_LQTHRESHOLD 46
+/*
+ * arguments:
+ * WMI_LQ_THRESHOLD_PARAMS_CMD thresholdParams (see include/wmi.h)
+ * uses: WMI_LQ_THRESHOLD_PARAMS_CMDID
+ */
+
+#define AR6000_XIOCTL_WMI_SET_RTS 47
+/*
+ * arguments:
+ * WMI_SET_RTS_MODE_CMD (see include/wmi.h)
+ * uses: WMI_SET_RTS_MODE_CMDID
+ */
+
+#define AR6000_XIOCTL_WMI_SET_LPREAMBLE 48
+
+#define AR6000_XIOCTL_WMI_SET_AUTHMODE 49
+/*
+ * arguments:
+ * UINT32 cmd (AR6000_XIOCTL_WMI_SET_AUTHMODE)
+ * UINT8 mode
+ * uses: WMI_SET_RECONNECT_AUTH_MODE_CMDID
+ */
+
+#define AR6000_XIOCTL_WMI_SET_REASSOCMODE 50
+
+/*
+ * arguments:
+ * UINT32 cmd (AR6000_XIOCTL_WMI_SET_WMM)
+ * UINT8 mode
+ * uses: WMI_SET_WMM_CMDID
+ */
+#define AR6000_XIOCTL_WMI_SET_WMM 51
+
+/*
+ * arguments:
+ * UINT32 cmd (AR6000_XIOCTL_WMI_SET_HB_CHALLENGE_RESP_PARAMS)
+ * UINT32 frequency
+ * UINT8 threshold
+ */
+#define AR6000_XIOCTL_WMI_SET_HB_CHALLENGE_RESP_PARAMS 52
+
+/*
+ * arguments:
+ * UINT32 cmd (AR6000_XIOCTL_WMI_GET_HB_CHALLENGE_RESP)
+ * UINT32 cookie
+ */
+#define AR6000_XIOCTL_WMI_GET_HB_CHALLENGE_RESP 53
+
+/*
+ * arguments:
+ * UINT32 cmd (AR6000_XIOCTL_WMI_GET_RD)
+ * UINT32 regDomain
+ */
+#define AR6000_XIOCTL_WMI_GET_RD 54
+
+#define AR6000_XIOCTL_DIAG_READ 55
+
+#define AR6000_XIOCTL_DIAG_WRITE 56
+
+/*
+ * arguments cmd (AR6000_XIOCTL_SET_TXOP)
+ * WMI_TXOP_CFG txopEnable
+ */
+#define AR6000_XIOCTL_WMI_SET_TXOP 57
+
+#ifdef USER_KEYS
+/*
+ * arguments:
+ * UINT32 cmd (AR6000_XIOCTL_USER_SETKEYS)
+ * UINT32 keyOpCtrl
+ * uses AR6000_USER_SETKEYS_INFO
+ */
+#define AR6000_XIOCTL_USER_SETKEYS 58
+#endif /* USER_KEYS */
+
+#define AR6000_XIOCTL_WMI_SET_KEEPALIVE 59
+/*
+ * arguments:
+ * UINT8 cmd (AR6000_XIOCTL_WMI_SET_KEEPALIVE)
+ * UINT8 keepaliveInterval
+ * uses: WMI_SET_KEEPALIVE_CMDID
+ */
+
+#define AR6000_XIOCTL_WMI_GET_KEEPALIVE 60
+/*
+ * arguments:
+ * UINT8 cmd (AR6000_XIOCTL_WMI_GET_KEEPALIVE)
+ * UINT8 keepaliveInterval
+ * A_BOOL configured
+ * uses: WMI_GET_KEEPALIVE_CMDID
+ */
+
+/* ====ROM Patching Extended Ioctls==== */
+
+#define AR6000_XIOCTL_BMI_ROMPATCH_INSTALL 61
+/*
+ * arguments:
+ * union {
+ * struct {
+ * UINT32 cmd (AR6000_XIOCTL_BMI_ROMPATCH_INSTALL)
+ * UINT32 ROM Address
+ * UINT32 RAM Address
+ * UINT32 number of bytes
+ * UINT32 activate? (0 or 1)
+ * }
+ * A_UINT32 resulting rompatch ID
+ * }
+ * uses: BMI_ROMPATCH_INSTALL
+ */
+
+#define AR6000_XIOCTL_BMI_ROMPATCH_UNINSTALL 62
+/*
+ * arguments:
+ * struct {
+ * UINT32 cmd (AR6000_XIOCTL_BMI_ROMPATCH_UNINSTALL)
+ * UINT32 rompatch ID
+ * }
+ * uses: BMI_ROMPATCH_UNINSTALL
+ */
+
+#define AR6000_XIOCTL_BMI_ROMPATCH_ACTIVATE 63
+/*
+ * arguments:
+ * struct {
+ * UINT32 cmd (AR6000_XIOCTL_BMI_ROMPATCH_ACTIVATE)
+ * UINT32 rompatch count
+ * UINT32 rompatch IDs[rompatch count]
+ * }
+ * uses: BMI_ROMPATCH_ACTIVATE
+ */
+
+#define AR6000_XIOCTL_BMI_ROMPATCH_DEACTIVATE 64
+/*
+ * arguments:
+ * struct {
+ * UINT32 cmd (AR6000_XIOCTL_BMI_ROMPATCH_DEACTIVATE)
+ * UINT32 rompatch count
+ * UINT32 rompatch IDs[rompatch count]
+ * }
+ * uses: BMI_ROMPATCH_DEACTIVATE
+ */
+
+#define AR6000_XIOCTL_WMI_SET_APPIE 65
+/*
+ * arguments:
+ * struct {
+ * UINT32 cmd (AR6000_XIOCTL_WMI_SET_APPIE)
+ * UINT32 app_frmtype;
+ * UINT32 app_buflen;
+ * UINT8 app_buf[];
+ * }
+ */
+#define AR6000_XIOCTL_WMI_SET_MGMT_FRM_RX_FILTER 66
+/*
+ * arguments:
+ * A_UINT32 filter_type;
+ */
+
+#define AR6000_XIOCTL_DBGLOG_CFG_MODULE 67
+
+#define AR6000_XIOCTL_DBGLOG_GET_DEBUG_LOGS 68
+
+#define AR6000_XIOCTL_WMI_SET_WSC_STATUS 70
+/*
+ * arguments:
+ * A_UINT32 wsc_status;
+ * (WSC_REG_INACTIVE or WSC_REG_ACTIVE)
+ */
+
+/*
+ * arguments:
+ * struct {
+ * A_UINT8 streamType;
+ * A_UINT8 status;
+ * }
+ * uses: WMI_SET_BT_STATUS_CMDID
+ */
+#define AR6000_XIOCTL_WMI_SET_BT_STATUS 71
+
+/*
+ * arguments:
+ * struct {
+ * A_UINT8 paramType;
+ * union {
+ * A_UINT8 noSCOPkts;
+ * BT_PARAMS_A2DP a2dpParams;
+ * BT_COEX_REGS regs;
+ * };
+ * }
+ * uses: WMI_SET_BT_PARAM_CMDID
+ */
+#define AR6000_XIOCTL_WMI_SET_BT_PARAMS 72
+
+#define AR6000_XIOCTL_WMI_SET_HOST_SLEEP_MODE 73
+#define AR6000_XIOCTL_WMI_SET_WOW_MODE 74
+#define AR6000_XIOCTL_WMI_GET_WOW_LIST 75
+#define AR6000_XIOCTL_WMI_ADD_WOW_PATTERN 76
+#define AR6000_XIOCTL_WMI_DEL_WOW_PATTERN 77
+
+
+
+#define AR6000_XIOCTL_TARGET_INFO 78
+/*
+ * arguments:
+ * UINT32 cmd (AR6000_XIOCTL_TARGET_INFO)
+ * A_UINT32 TargetVersion (returned)
+ * A_UINT32 TargetType (returned)
+ * (See also bmi_msg.h target_ver and target_type)
+ */
+
+#define AR6000_XIOCTL_DUMP_HTC_CREDIT_STATE 79
+/*
+ * arguments:
+ * none
+ */
+
+#define AR6000_XIOCTL_TRAFFIC_ACTIVITY_CHANGE 80
+/*
+ * This ioctl is used to emulate traffic activity
+ * timeouts. Activity/inactivity will trigger the driver
+ * to re-balance credits.
+ *
+ * arguments:
+ * ar6000_traffic_activity_change
+ */
+
+#define AR6000_XIOCTL_WMI_SET_CONNECT_CTRL_FLAGS 81
+/*
+ * This ioctl is used to set the connect control flags
+ *
+ * arguments:
+ * A_UINT32 connectCtrlFlags
+ */
+
+#define AR6000_XIOCTL_WMI_SET_AKMP_PARAMS 82
+/*
+ * This IOCTL sets any Authentication,Key Management and Protection
+ * related parameters. This is used along with the information set in
+ * Connect Command.
+ * Currently this enables Multiple PMKIDs to an AP.
+ *
+ * arguments:
+ * struct {
+ * A_UINT32 akmpInfo;
+ * }
+ * uses: WMI_SET_AKMP_PARAMS_CMD
+ */
+
+#define AR6000_XIOCTL_WMI_GET_PMKID_LIST 83
+
+#define AR6000_XIOCTL_WMI_SET_PMKID_LIST 84
+/*
+ * This IOCTL is used to set a list of PMKIDs. This list of
+ * PMKIDs is used in the [Re]AssocReq Frame. This list is used
+ * only if the MultiPMKID option is enabled via the
+ * AR6000_XIOCTL_WMI_SET_AKMP_PARAMS IOCTL.
+ *
+ * arguments:
+ * struct {
+ * A_UINT32 numPMKID;
+ * WMI_PMKID pmkidList[WMI_MAX_PMKID_CACHE];
+ * }
+ * uses: WMI_SET_PMKIDLIST_CMD
+ */
+
+#define AR6000_XIOCTL_WMI_SET_PARAMS 85
+#define AR6000_XIOCTL_WMI_SET_MCAST_FILTER 86
+#define AR6000_XIOCTL_WMI_DEL_MCAST_FILTER 87
+
+
+/* Historical DSETPATCH support for INI patches */
+#define AR6000_XIOCTL_UNUSED90 90
+
+
+/* Support LZ-compressed firmware download */
+#define AR6000_XIOCTL_BMI_LZ_STREAM_START 91
+/*
+ * arguments:
+ * UINT32 cmd (AR6000_XIOCTL_BMI_LZ_STREAM_START)
+ * UINT32 address
+ * uses: BMI_LZ_STREAM_START
+ */
+
+#define AR6000_XIOCTL_BMI_LZ_DATA 92
+/*
+ * arguments:
+ * UINT32 cmd (AR6000_XIOCTL_BMI_LZ_DATA)
+ * UINT32 length
+ * char data[length]
+ * uses: BMI_LZ_DATA
+ */
+
+#define AR6000_XIOCTL_PROF_CFG 93
+/*
+ * arguments:
+ * A_UINT32 period
+ * A_UINT32 nbins
+ */
+
+#define AR6000_XIOCTL_PROF_ADDR_SET 94
+/*
+ * arguments:
+ * A_UINT32 Target address
+ */
+
+#define AR6000_XIOCTL_PROF_START 95
+
+#define AR6000_XIOCTL_PROF_STOP 96
+
+#define AR6000_XIOCTL_PROF_COUNT_GET 97
+
+#define AR6000_XIOCTL_WMI_ABORT_SCAN 98
+
+/*
+ * AP mode
+ */
+#define AR6000_XIOCTL_AP_GET_STA_LIST 99
+
+#define AR6000_XIOCTL_AP_HIDDEN_SSID 100
+
+#define AR6000_XIOCTL_AP_SET_NUM_STA 101
+
+#define AR6000_XIOCTL_AP_SET_ACL_MAC 102
+
+#define AR6000_XIOCTL_AP_GET_ACL_LIST 103
+
+#define AR6000_XIOCTL_AP_COMMIT_CONFIG 104
+
+#define IEEE80211_IOCTL_GETWPAIE 105
+
+#define AR6000_XIOCTL_AP_CONN_INACT_TIME 106
+
+#define AR6000_XIOCTL_AP_PROT_SCAN_TIME 107
+
+#define AR6000_XIOCTL_AP_SET_COUNTRY 108
+
+#define AR6000_XIOCTL_AP_SET_DTIM 109
+
+
+
+
+#define AR6000_XIOCTL_WMI_TARGET_EVENT_REPORT 110
+
+#define AR6000_XIOCTL_SET_IP 111
+
+#define AR6000_XIOCTL_AP_SET_ACL_POLICY 112
+
+#define AR6000_XIOCTL_AP_INTRA_BSS_COMM 113
+
+#define AR6000_XIOCTL_DUMP_MODULE_DEBUG_INFO 114
+
+#define AR6000_XIOCTL_MODULE_DEBUG_SET_MASK 115
+
+#define AR6000_XIOCTL_MODULE_DEBUG_GET_MASK 116
+
+#define AR6000_XIOCTL_DUMP_RCV_AGGR_STATS 117
+
+#define AR6000_XIOCTL_SET_HT_CAP 118
+
+#define AR6000_XIOCTL_SET_HT_OP 119
+
+#define AR6000_XIOCTL_AP_GET_STAT 120
+
+#define AR6000_XIOCTL_SET_TX_SELECT_RATES 121
+
+#define AR6000_XIOCTL_SETUP_AGGR 122
+
+#define AR6000_XIOCTL_ALLOW_AGGR 123
+
+#define AR6000_XIOCTL_AP_GET_HIDDEN_SSID 124
+
+#define AR6000_XIOCTL_AP_GET_COUNTRY 125
+
+#define AR6000_XIOCTL_AP_GET_WMODE 126
+
+#define AR6000_XIOCTL_AP_GET_DTIM 127
+
+#define AR6000_XIOCTL_AP_GET_BINTVL 128
+
+#define AR6000_XIOCTL_AP_GET_RTS 129
+
+#define AR6000_XIOCTL_DELE_AGGR 130
+
+#define AR6000_XIOCTL_FETCH_TARGET_REGS 131
+
+#define AR6000_XIOCTL_HCI_CMD 132
+
+#define AR6000_XIOCTL_ACL_DATA 133
+
+#define AR6000_XIOCTL_WLAN_CONN_PRECEDENCE 134
+
+#define AR6000_XIOCTL_AP_SET_11BG_RATESET 135
+
+/*
+ * arguments:
+ * WMI_AP_PS_CMD apPsCmd
+ * uses: WMI_AP_PS_CMDID
+ */
+
+#define AR6000_XIOCTL_WMI_SET_AP_PS 136
+
+#define AR6000_XIOCTL_WMI_MCAST_FILTER 137
+
+#define AR6000_XIOCTL_WMI_SET_BTCOEX_FE_ANT 138
+
+#define AR6000_XIOCTL_WMI_SET_BTCOEX_COLOCATED_BT_DEV 139
+
+#define AR6000_XIOCTL_WMI_SET_BTCOEX_BTINQUIRY_PAGE_CONFIG 140
+
+#define AR6000_XIOCTL_WMI_SET_BTCOEX_SCO_CONFIG 141
+
+#define AR6000_XIOCTL_WMI_SET_BTCOEX_A2DP_CONFIG 142
+
+#define AR6000_XIOCTL_WMI_SET_BTCOEX_ACLCOEX_CONFIG 143
+
+#define AR6000_XIOCTL_WMI_SET_BTCOEX_DEBUG 144
+
+#define AR6000_XIOCTL_WMI_SET_BT_OPERATING_STATUS 145
+
+#define AR6000_XIOCTL_WMI_GET_BTCOEX_CONFIG 146
+
+#define AR6000_XIOCTL_WMI_GET_BTCOEX_STATS 147
+/*
+ * arguments:
+ * UINT32 cmd (AR6000_XIOCTL_WMI_SET_QOS_SUPP)
+ * UINT8 mode
+ * uses: WMI_SET_QOS_SUPP_CMDID
+ */
+#define AR6000_XIOCTL_WMI_SET_QOS_SUPP 148
+
+#define AR6000_XIOCTL_GET_WLAN_SLEEP_STATE 149
+
+#define AR6000_XIOCTL_SET_BT_HW_POWER_STATE 150
+
+#define AR6000_XIOCTL_GET_BT_HW_POWER_STATE 151
+
+#define AR6000_XIOCTL_ADD_AP_INTERFACE 152
+
+#define AR6000_XIOCTL_REMOVE_AP_INTERFACE 153
+
+#define AR6000_XIOCTL_WMI_SET_TX_SGI_PARAM 154
+
+
+/* used by AR6000_IOCTL_WMI_GETREV */
+struct ar6000_version {
+ A_UINT32 host_ver;
+ A_UINT32 target_ver;
+ A_UINT32 wlan_ver;
+ A_UINT32 abi_ver;
+};
+
+/* used by AR6000_IOCTL_WMI_GET_QOS_QUEUE */
+struct ar6000_queuereq {
+ A_UINT8 trafficClass;
+ A_UINT16 activeTsids;
+};
+
+/* used by AR6000_IOCTL_WMI_GET_TARGET_STATS */
+typedef struct targetStats_t {
+ A_UINT64 tx_packets;
+ A_UINT64 tx_bytes;
+ A_UINT64 tx_unicast_pkts;
+ A_UINT64 tx_unicast_bytes;
+ A_UINT64 tx_multicast_pkts;
+ A_UINT64 tx_multicast_bytes;
+ A_UINT64 tx_broadcast_pkts;
+ A_UINT64 tx_broadcast_bytes;
+ A_UINT64 tx_rts_success_cnt;
+ A_UINT64 tx_packet_per_ac[4];
+
+ A_UINT64 tx_errors;
+ A_UINT64 tx_failed_cnt;
+ A_UINT64 tx_retry_cnt;
+ A_UINT64 tx_mult_retry_cnt;
+ A_UINT64 tx_rts_fail_cnt;
+
+ A_UINT64 rx_packets;
+ A_UINT64 rx_bytes;
+ A_UINT64 rx_unicast_pkts;
+ A_UINT64 rx_unicast_bytes;
+ A_UINT64 rx_multicast_pkts;
+ A_UINT64 rx_multicast_bytes;
+ A_UINT64 rx_broadcast_pkts;
+ A_UINT64 rx_broadcast_bytes;
+ A_UINT64 rx_fragment_pkt;
+
+ A_UINT64 rx_errors;
+ A_UINT64 rx_crcerr;
+ A_UINT64 rx_key_cache_miss;
+ A_UINT64 rx_decrypt_err;
+ A_UINT64 rx_duplicate_frames;
+
+ A_UINT64 tkip_local_mic_failure;
+ A_UINT64 tkip_counter_measures_invoked;
+ A_UINT64 tkip_replays;
+ A_UINT64 tkip_format_errors;
+ A_UINT64 ccmp_format_errors;
+ A_UINT64 ccmp_replays;
+
+ A_UINT64 power_save_failure_cnt;
+
+ A_UINT64 cs_bmiss_cnt;
+ A_UINT64 cs_lowRssi_cnt;
+ A_UINT64 cs_connect_cnt;
+ A_UINT64 cs_disconnect_cnt;
+
+ A_INT32 tx_unicast_rate;
+ A_INT32 rx_unicast_rate;
+
+ A_UINT32 lq_val;
+
+ A_UINT32 wow_num_pkts_dropped;
+ A_UINT16 wow_num_events_discarded;
+
+ A_INT16 noise_floor_calibation;
+ A_INT16 cs_rssi;
+ A_INT16 cs_aveBeacon_rssi;
+ A_UINT8 cs_aveBeacon_snr;
+ A_UINT8 cs_lastRoam_msec;
+ A_UINT8 cs_snr;
+
+ A_UINT8 wow_num_host_pkt_wakeups;
+ A_UINT8 wow_num_host_event_wakeups;
+
+ A_UINT32 arp_received;
+ A_UINT32 arp_matched;
+ A_UINT32 arp_replied;
+}TARGET_STATS;
+
+typedef struct targetStats_cmd_t {
+ TARGET_STATS targetStats;
+ int clearStats;
+} TARGET_STATS_CMD;
+
+/* used by AR6000_XIOCTL_USER_SETKEYS */
+
+/*
+ * Setting this bit to 1 doesnot initialize the RSC on the firmware
+ */
+#define AR6000_XIOCTL_USER_SETKEYS_RSC_CTRL 1
+#define AR6000_USER_SETKEYS_RSC_UNCHANGED 0x00000002
+
+typedef struct {
+ A_UINT32 keyOpCtrl; /* Bit Map of Key Mgmt Ctrl Flags */
+} AR6000_USER_SETKEYS_INFO;
+
+
+/* used by AR6000_XIOCTL_GPIO_OUTPUT_SET */
+struct ar6000_gpio_output_set_cmd_s {
+ A_UINT32 set_mask;
+ A_UINT32 clear_mask;
+ A_UINT32 enable_mask;
+ A_UINT32 disable_mask;
+};
+
+/*
+ * used by AR6000_XIOCTL_GPIO_REGISTER_GET and AR6000_XIOCTL_GPIO_REGISTER_SET
+ */
+struct ar6000_gpio_register_cmd_s {
+ A_UINT32 gpioreg_id;
+ A_UINT32 value;
+};
+
+/* used by AR6000_XIOCTL_GPIO_INTR_ACK */
+struct ar6000_gpio_intr_ack_cmd_s {
+ A_UINT32 ack_mask;
+};
+
+/* used by AR6000_XIOCTL_GPIO_INTR_WAIT */
+struct ar6000_gpio_intr_wait_cmd_s {
+ A_UINT32 intr_mask;
+ A_UINT32 input_values;
+};
+
+/* used by the AR6000_XIOCTL_DBGLOG_CFG_MODULE */
+typedef struct ar6000_dbglog_module_config_s {
+ A_UINT32 valid;
+ A_UINT16 mmask;
+ A_UINT16 tsr;
+ A_BOOL rep;
+ A_UINT16 size;
+} DBGLOG_MODULE_CONFIG;
+
+typedef struct user_rssi_thold_t {
+ A_INT16 tag;
+ A_INT16 rssi;
+} USER_RSSI_THOLD;
+
+typedef struct user_rssi_params_t {
+ A_UINT8 weight;
+ A_UINT32 pollTime;
+ USER_RSSI_THOLD tholds[12];
+} USER_RSSI_PARAMS;
+
+typedef struct ar6000_get_btcoex_config_cmd_t{
+ A_UINT32 btProfileType;
+ A_UINT32 linkId;
+ }AR6000_GET_BTCOEX_CONFIG_CMD;
+
+typedef struct ar6000_btcoex_config_t {
+ AR6000_GET_BTCOEX_CONFIG_CMD configCmd;
+ A_UINT32 * configEvent;
+} AR6000_BTCOEX_CONFIG;
+
+typedef struct ar6000_btcoex_stats_t {
+ A_UINT32 * statsEvent;
+ }AR6000_BTCOEX_STATS;
+/*
+ * Host driver may have some config parameters. Typically, these
+ * config params are one time config parameters. These could
+ * correspond to any of the underlying modules. Host driver exposes
+ * an api for the underlying modules to get this config.
+ */
+#define AR6000_DRIVER_CFG_BASE 0x8000
+
+/* Should driver perform wlan node caching? */
+#define AR6000_DRIVER_CFG_GET_WLANNODECACHING 0x8001
+/*Should we log raw WMI msgs */
+#define AR6000_DRIVER_CFG_LOG_RAW_WMI_MSGS 0x8002
+
+/* used by AR6000_XIOCTL_DIAG_READ & AR6000_XIOCTL_DIAG_WRITE */
+struct ar6000_diag_window_cmd_s {
+ unsigned int addr;
+ unsigned int value;
+};
+
+
+struct ar6000_traffic_activity_change {
+ A_UINT32 StreamID; /* stream ID to indicate activity change */
+ A_UINT32 Active; /* active (1) or inactive (0) */
+};
+
+/* Used with AR6000_XIOCTL_PROF_COUNT_GET */
+struct prof_count_s {
+ A_UINT32 addr; /* bin start address */
+ A_UINT32 count; /* hit count */
+};
+
+
+/* used by AR6000_XIOCTL_MODULE_DEBUG_SET_MASK */
+/* AR6000_XIOCTL_MODULE_DEBUG_GET_MASK */
+/* AR6000_XIOCTL_DUMP_MODULE_DEBUG_INFO */
+struct drv_debug_module_s {
+ A_CHAR modulename[128]; /* name of module */
+ A_UINT32 mask; /* new mask to set .. or .. current mask */
+};
+
+
+/* All HCI related rx events are sent up to the host app
+ * via a wmi event id. It can contain ACL data or HCI event,
+ * based on which it will be de-multiplexed.
+ */
+typedef enum {
+ PAL_HCI_EVENT = 0,
+ PAL_HCI_RX_DATA,
+} WMI_PAL_EVENT_INFO;
+
+
+#ifdef __cplusplus
+}
+#endif
+#endif
diff --git a/drivers/staging/ath6kl/os/linux/include/athtypes_linux.h b/drivers/staging/ath6kl/os/linux/include/athtypes_linux.h
new file mode 100644
index 000000000000..9d9ecbb2a4d7
--- /dev/null
+++ b/drivers/staging/ath6kl/os/linux/include/athtypes_linux.h
@@ -0,0 +1,53 @@
+//------------------------------------------------------------------------------
+//
+// This file contains the definitions of the basic atheros data types.
+// It is used to map the data types in atheros files to a platform specific
+// type.
+// Copyright (c) 2004-2010 Atheros Communications Inc.
+// All rights reserved.
+//
+//
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+//
+//
+//
+// Author(s): ="Atheros"
+//------------------------------------------------------------------------------
+
+#ifndef _ATHTYPES_LINUX_H_
+#define _ATHTYPES_LINUX_H_
+
+#ifdef __KERNEL__
+#include <linux/types.h>
+#else
+#include <sys/types.h>
+#endif
+
+typedef int8_t A_INT8;
+typedef int16_t A_INT16;
+typedef int32_t A_INT32;
+typedef int64_t A_INT64;
+
+typedef u_int8_t A_UINT8;
+typedef u_int16_t A_UINT16;
+typedef u_int32_t A_UINT32;
+typedef u_int64_t A_UINT64;
+
+typedef int A_BOOL;
+typedef char A_CHAR;
+typedef unsigned char A_UCHAR;
+typedef unsigned long A_ATH_TIMER;
+
+
+#endif /* _ATHTYPES_LINUX_H_ */
diff --git a/drivers/staging/ath6kl/os/linux/include/cfg80211.h b/drivers/staging/ath6kl/os/linux/include/cfg80211.h
new file mode 100644
index 000000000000..b60e8acf4931
--- /dev/null
+++ b/drivers/staging/ath6kl/os/linux/include/cfg80211.h
@@ -0,0 +1,50 @@
+//------------------------------------------------------------------------------
+// Copyright (c) 2004-2010 Atheros Communications Inc.
+// All rights reserved.
+//
+//
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+//
+//
+//
+// Author(s): ="Atheros"
+//------------------------------------------------------------------------------
+
+#ifndef _AR6K_CFG80211_H_
+#define _AR6K_CFG80211_H_
+
+struct wireless_dev *ar6k_cfg80211_init(struct device *dev);
+void ar6k_cfg80211_deinit(AR_SOFTC_T *ar);
+
+void ar6k_cfg80211_scanComplete_event(AR_SOFTC_T *ar, A_STATUS status);
+
+void ar6k_cfg80211_connect_event(AR_SOFTC_T *ar, A_UINT16 channel,
+ A_UINT8 *bssid, A_UINT16 listenInterval,
+ A_UINT16 beaconInterval,NETWORK_TYPE networkType,
+ A_UINT8 beaconIeLen, A_UINT8 assocReqLen,
+ A_UINT8 assocRespLen, A_UINT8 *assocInfo);
+
+void ar6k_cfg80211_disconnect_event(AR_SOFTC_T *ar, A_UINT8 reason,
+ A_UINT8 *bssid, A_UINT8 assocRespLen,
+ A_UINT8 *assocInfo, A_UINT16 protocolReasonStatus);
+
+void ar6k_cfg80211_tkip_micerr_event(AR_SOFTC_T *ar, A_UINT8 keyid, A_BOOL ismcast);
+
+#endif /* _AR6K_CFG80211_H_ */
+
+
+
+
+
+
diff --git a/drivers/staging/ath6kl/os/linux/include/config_linux.h b/drivers/staging/ath6kl/os/linux/include/config_linux.h
new file mode 100644
index 000000000000..50f53d361049
--- /dev/null
+++ b/drivers/staging/ath6kl/os/linux/include/config_linux.h
@@ -0,0 +1,60 @@
+//------------------------------------------------------------------------------
+// Copyright (c) 2004-2010 Atheros Communications Inc.
+// All rights reserved.
+//
+//
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+//
+//
+//
+// Author(s): ="Atheros"
+//------------------------------------------------------------------------------
+
+#ifndef _CONFIG_LINUX_H_
+#define _CONFIG_LINUX_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <linux/version.h>
+
+/*
+ * Host-side GPIO support is optional.
+ * If run-time access to GPIO pins is not required, then
+ * this should be changed to #undef.
+ */
+#define CONFIG_HOST_GPIO_SUPPORT
+
+/*
+ * Host side Test Command support
+ */
+#define CONFIG_HOST_TCMD_SUPPORT
+
+#define USE_4BYTE_REGISTER_ACCESS
+
+/* Host-side support for Target-side profiling */
+#undef CONFIG_TARGET_PROFILE_SUPPORT
+
+/* IP/TCP checksum offload */
+/* Checksum offload is currently not supported for 64 bit platforms */
+#ifndef __LP64__
+#define CONFIG_CHECKSUM_OFFLOAD
+#endif /* __LP64__ */
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/drivers/staging/ath6kl/os/linux/include/debug_linux.h b/drivers/staging/ath6kl/os/linux/include/debug_linux.h
new file mode 100644
index 000000000000..b8dba52badce
--- /dev/null
+++ b/drivers/staging/ath6kl/os/linux/include/debug_linux.h
@@ -0,0 +1,50 @@
+//------------------------------------------------------------------------------
+// Copyright (c) 2004-2010 Atheros Communications Inc.
+// All rights reserved.
+//
+//
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+//
+//
+//
+// Author(s): ="Atheros"
+//------------------------------------------------------------------------------
+
+#ifndef _DEBUG_LINUX_H_
+#define _DEBUG_LINUX_H_
+
+ /* macro to remove parens */
+#define ATH_PRINTX_ARG(arg...) arg
+
+#ifdef DEBUG
+ /* NOTE: the AR_DEBUG_PRINTF macro is defined here to handle special handling of variable arg macros
+ * which may be compiler dependent. */
+#define AR_DEBUG_PRINTF(mask, args) do { \
+ if (GET_ATH_MODULE_DEBUG_VAR_MASK(ATH_MODULE_NAME) & (mask)) { \
+ A_LOGGER(mask, ATH_MODULE_NAME, ATH_PRINTX_ARG args); \
+ } \
+} while (0)
+#else
+ /* on non-debug builds, keep in error and warning messages in the driver, all other
+ * message tracing will get compiled out */
+#define AR_DEBUG_PRINTF(mask, args) \
+ if ((mask) & (ATH_DEBUG_ERR | ATH_DEBUG_WARN)) { A_PRINTF(ATH_PRINTX_ARG args); }
+
+#endif
+
+ /* compile specific macro to get the function name string */
+#define _A_FUNCNAME_ __func__
+
+
+#endif /* _DEBUG_LINUX_H_ */
diff --git a/drivers/staging/ath6kl/os/linux/include/export_hci_transport.h b/drivers/staging/ath6kl/os/linux/include/export_hci_transport.h
new file mode 100644
index 000000000000..c1506805a4d5
--- /dev/null
+++ b/drivers/staging/ath6kl/os/linux/include/export_hci_transport.h
@@ -0,0 +1,76 @@
+//------------------------------------------------------------------------------
+// Copyright (c) 2009-2010 Atheros Corporation. All rights reserved.
+//
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+//
+//
+//------------------------------------------------------------------------------
+//==============================================================================
+// HCI bridge implementation
+//
+// Author(s): ="Atheros"
+//==============================================================================
+
+#include "hci_transport_api.h"
+#include "common_drv.h"
+
+extern HCI_TRANSPORT_HANDLE (*_HCI_TransportAttach)(void *HTCHandle, HCI_TRANSPORT_CONFIG_INFO *pInfo);
+extern void (*_HCI_TransportDetach)(HCI_TRANSPORT_HANDLE HciTrans);
+extern A_STATUS (*_HCI_TransportAddReceivePkts)(HCI_TRANSPORT_HANDLE HciTrans, HTC_PACKET_QUEUE *pQueue);
+extern A_STATUS (*_HCI_TransportSendPkt)(HCI_TRANSPORT_HANDLE HciTrans, HTC_PACKET *pPacket, A_BOOL Synchronous);
+extern void (*_HCI_TransportStop)(HCI_TRANSPORT_HANDLE HciTrans);
+extern A_STATUS (*_HCI_TransportStart)(HCI_TRANSPORT_HANDLE HciTrans);
+extern A_STATUS (*_HCI_TransportEnableDisableAsyncRecv)(HCI_TRANSPORT_HANDLE HciTrans, A_BOOL Enable);
+extern A_STATUS (*_HCI_TransportRecvHCIEventSync)(HCI_TRANSPORT_HANDLE HciTrans,
+ HTC_PACKET *pPacket,
+ int MaxPollMS);
+extern A_STATUS (*_HCI_TransportSetBaudRate)(HCI_TRANSPORT_HANDLE HciTrans, A_UINT32 Baud);
+extern A_STATUS (*_HCI_TransportEnablePowerMgmt)(HCI_TRANSPORT_HANDLE HciTrans, A_BOOL Enable);
+
+
+#define HCI_TransportAttach(HTCHandle, pInfo) \
+ _HCI_TransportAttach((HTCHandle), (pInfo))
+#define HCI_TransportDetach(HciTrans) \
+ _HCI_TransportDetach(HciTrans)
+#define HCI_TransportAddReceivePkts(HciTrans, pQueue) \
+ _HCI_TransportAddReceivePkts((HciTrans), (pQueue))
+#define HCI_TransportSendPkt(HciTrans, pPacket, Synchronous) \
+ _HCI_TransportSendPkt((HciTrans), (pPacket), (Synchronous))
+#define HCI_TransportStop(HciTrans) \
+ _HCI_TransportStop((HciTrans))
+#define HCI_TransportStart(HciTrans) \
+ _HCI_TransportStart((HciTrans))
+#define HCI_TransportEnableDisableAsyncRecv(HciTrans, Enable) \
+ _HCI_TransportEnableDisableAsyncRecv((HciTrans), (Enable))
+#define HCI_TransportRecvHCIEventSync(HciTrans, pPacket, MaxPollMS) \
+ _HCI_TransportRecvHCIEventSync((HciTrans), (pPacket), (MaxPollMS))
+#define HCI_TransportSetBaudRate(HciTrans, Baud) \
+ _HCI_TransportSetBaudRate((HciTrans), (Baud))
+#define HCI_TransportEnablePowerMgmt(HciTrans, Enable) \
+ _HCI_TransportEnablePowerMgmt((HciTrans), (Enable))
+
+
+extern A_STATUS ar6000_register_hci_transport(HCI_TRANSPORT_CALLBACKS *hciTransCallbacks);
+
+extern A_STATUS ar6000_get_hif_dev(HIF_DEVICE *device, void *config);
+
+extern A_STATUS ar6000_set_uart_config(HIF_DEVICE *hifDevice, A_UINT32 scale, A_UINT32 step);
+
+/* get core clock register settings
+ * data: 0 - 40/44MHz
+ * 1 - 80/88MHz
+ * where (5G band/2.4G band)
+ * assume 2.4G band for now
+ */
+extern A_STATUS ar6000_get_core_clock_config(HIF_DEVICE *hifDevice, A_UINT32 *data);
diff --git a/drivers/staging/ath6kl/os/linux/include/ieee80211_ioctl.h b/drivers/staging/ath6kl/os/linux/include/ieee80211_ioctl.h
new file mode 100644
index 000000000000..769a48014313
--- /dev/null
+++ b/drivers/staging/ath6kl/os/linux/include/ieee80211_ioctl.h
@@ -0,0 +1,179 @@
+//------------------------------------------------------------------------------
+// Copyright (c) 2004-2010 Atheros Communications Inc.
+// All rights reserved.
+//
+//
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+//
+//
+//
+// Author(s): ="Atheros"
+//------------------------------------------------------------------------------
+
+#ifndef _IEEE80211_IOCTL_H_
+#define _IEEE80211_IOCTL_H_
+
+#include <linux/version.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/*
+ * Extracted from the MADWIFI net80211/ieee80211_ioctl.h
+ */
+
+/*
+ * WPA/RSN get/set key request. Specify the key/cipher
+ * type and whether the key is to be used for sending and/or
+ * receiving. The key index should be set only when working
+ * with global keys (use IEEE80211_KEYIX_NONE for ``no index'').
+ * Otherwise a unicast/pairwise key is specified by the bssid
+ * (on a station) or mac address (on an ap). They key length
+ * must include any MIC key data; otherwise it should be no
+ more than IEEE80211_KEYBUF_SIZE.
+ */
+struct ieee80211req_key {
+ u_int8_t ik_type; /* key/cipher type */
+ u_int8_t ik_pad;
+ u_int16_t ik_keyix; /* key index */
+ u_int8_t ik_keylen; /* key length in bytes */
+ u_int8_t ik_flags;
+#define IEEE80211_KEY_XMIT 0x01
+#define IEEE80211_KEY_RECV 0x02
+#define IEEE80211_KEY_DEFAULT 0x80 /* default xmit key */
+ u_int8_t ik_macaddr[IEEE80211_ADDR_LEN];
+ u_int64_t ik_keyrsc; /* key receive sequence counter */
+ u_int64_t ik_keytsc; /* key transmit sequence counter */
+ u_int8_t ik_keydata[IEEE80211_KEYBUF_SIZE+IEEE80211_MICBUF_SIZE];
+};
+/*
+ * Delete a key either by index or address. Set the index
+ * to IEEE80211_KEYIX_NONE when deleting a unicast key.
+ */
+struct ieee80211req_del_key {
+ u_int8_t idk_keyix; /* key index */
+ u_int8_t idk_macaddr[IEEE80211_ADDR_LEN];
+};
+/*
+ * MLME state manipulation request. IEEE80211_MLME_ASSOC
+ * only makes sense when operating as a station. The other
+ * requests can be used when operating as a station or an
+ * ap (to effect a station).
+ */
+struct ieee80211req_mlme {
+ u_int8_t im_op; /* operation to perform */
+#define IEEE80211_MLME_ASSOC 1 /* associate station */
+#define IEEE80211_MLME_DISASSOC 2 /* disassociate station */
+#define IEEE80211_MLME_DEAUTH 3 /* deauthenticate station */
+#define IEEE80211_MLME_AUTHORIZE 4 /* authorize station */
+#define IEEE80211_MLME_UNAUTHORIZE 5 /* unauthorize station */
+ u_int16_t im_reason; /* 802.11 reason code */
+ u_int8_t im_macaddr[IEEE80211_ADDR_LEN];
+};
+
+struct ieee80211req_addpmkid {
+ u_int8_t pi_bssid[IEEE80211_ADDR_LEN];
+ u_int8_t pi_enable;
+ u_int8_t pi_pmkid[16];
+};
+
+#define AUTH_ALG_OPEN_SYSTEM 0x01
+#define AUTH_ALG_SHARED_KEY 0x02
+#define AUTH_ALG_LEAP 0x04
+
+struct ieee80211req_authalg {
+ u_int8_t auth_alg;
+};
+
+/*
+ * Request to add an IE to a Management Frame
+ */
+enum{
+ IEEE80211_APPIE_FRAME_BEACON = 0,
+ IEEE80211_APPIE_FRAME_PROBE_REQ = 1,
+ IEEE80211_APPIE_FRAME_PROBE_RESP = 2,
+ IEEE80211_APPIE_FRAME_ASSOC_REQ = 3,
+ IEEE80211_APPIE_FRAME_ASSOC_RESP = 4,
+ IEEE80211_APPIE_NUM_OF_FRAME = 5
+};
+
+/*
+ * The Maximum length of the IE that can be added to a Management frame
+ */
+#define IEEE80211_APPIE_FRAME_MAX_LEN 200
+
+struct ieee80211req_getset_appiebuf {
+ u_int32_t app_frmtype; /* management frame type for which buffer is added */
+ u_int32_t app_buflen; /*application supplied buffer length */
+ u_int8_t app_buf[];
+};
+
+/*
+ * The following definitions are used by an application to set filter
+ * for receiving management frames
+ */
+enum {
+ IEEE80211_FILTER_TYPE_BEACON = 0x1,
+ IEEE80211_FILTER_TYPE_PROBE_REQ = 0x2,
+ IEEE80211_FILTER_TYPE_PROBE_RESP = 0x4,
+ IEEE80211_FILTER_TYPE_ASSOC_REQ = 0x8,
+ IEEE80211_FILTER_TYPE_ASSOC_RESP = 0x10,
+ IEEE80211_FILTER_TYPE_AUTH = 0x20,
+ IEEE80211_FILTER_TYPE_DEAUTH = 0x40,
+ IEEE80211_FILTER_TYPE_DISASSOC = 0x80,
+ IEEE80211_FILTER_TYPE_ALL = 0xFF /* used to check the valid filter bits */
+};
+
+struct ieee80211req_set_filter {
+ u_int32_t app_filterype; /* management frame filter type */
+};
+
+enum {
+ IEEE80211_PARAM_AUTHMODE = 3, /* Authentication Mode */
+ IEEE80211_PARAM_MCASTCIPHER = 5,
+ IEEE80211_PARAM_MCASTKEYLEN = 6, /* multicast key length */
+ IEEE80211_PARAM_UCASTCIPHER = 8,
+ IEEE80211_PARAM_UCASTKEYLEN = 9, /* unicast key length */
+ IEEE80211_PARAM_WPA = 10, /* WPA mode (0,1,2) */
+ IEEE80211_PARAM_ROAMING = 12, /* roaming mode */
+ IEEE80211_PARAM_PRIVACY = 13, /* privacy invoked */
+ IEEE80211_PARAM_COUNTERMEASURES = 14, /* WPA/TKIP countermeasures */
+ IEEE80211_PARAM_DROPUNENCRYPTED = 15, /* discard unencrypted frames */
+ IEEE80211_PARAM_WAPI = 16, /* WAPI policy from wapid */
+};
+
+/*
+ * Values for IEEE80211_PARAM_WPA
+ */
+#define WPA_MODE_WPA1 1
+#define WPA_MODE_WPA2 2
+#define WPA_MODE_AUTO 3
+#define WPA_MODE_NONE 4
+
+struct ieee80211req_wpaie {
+ u_int8_t wpa_macaddr[IEEE80211_ADDR_LEN];
+ u_int8_t wpa_ie[IEEE80211_MAX_IE];
+ u_int8_t rsn_ie[IEEE80211_MAX_IE];
+};
+
+#ifndef IW_ENCODE_ALG_PMK
+#define IW_ENCODE_ALG_PMK 4
+#endif
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _IEEE80211_IOCTL_H_ */
diff --git a/drivers/staging/ath6kl/os/linux/include/osapi_linux.h b/drivers/staging/ath6kl/os/linux/include/osapi_linux.h
new file mode 100644
index 000000000000..fce6ceb73fa4
--- /dev/null
+++ b/drivers/staging/ath6kl/os/linux/include/osapi_linux.h
@@ -0,0 +1,387 @@
+//------------------------------------------------------------------------------
+// This file contains the definitions of the basic atheros data types.
+// It is used to map the data types in atheros files to a platform specific
+// type.
+// Copyright (c) 2004-2010 Atheros Communications Inc.
+// All rights reserved.
+//
+//
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+//
+//
+//
+// Author(s): ="Atheros"
+//------------------------------------------------------------------------------
+
+#ifndef _OSAPI_LINUX_H_
+#define _OSAPI_LINUX_H_
+
+#ifdef __KERNEL__
+
+#include <linux/version.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/skbuff.h>
+#include <linux/netdevice.h>
+#include <linux/jiffies.h>
+#include <linux/timer.h>
+#include <linux/delay.h>
+#include <linux/wait.h>
+#include <linux/semaphore.h>
+#include <linux/cache.h>
+
+#ifdef __GNUC__
+#define __ATTRIB_PACK __attribute__ ((packed))
+#define __ATTRIB_PRINTF __attribute__ ((format (printf, 1, 2)))
+#define __ATTRIB_NORETURN __attribute__ ((noreturn))
+#ifndef INLINE
+#define INLINE __inline__
+#endif
+#else /* Not GCC */
+#define __ATTRIB_PACK
+#define __ATTRIB_PRINTF
+#define __ATTRIB_NORETURN
+#ifndef INLINE
+#define INLINE __inline
+#endif
+#endif /* End __GNUC__ */
+
+#define PREPACK
+#define POSTPACK __ATTRIB_PACK
+
+/*
+ * Endianes macros
+ */
+#define A_BE2CPU8(x) ntohb(x)
+#define A_BE2CPU16(x) ntohs(x)
+#define A_BE2CPU32(x) ntohl(x)
+
+#define A_LE2CPU8(x) (x)
+#define A_LE2CPU16(x) (x)
+#define A_LE2CPU32(x) (x)
+
+#define A_CPU2BE8(x) htonb(x)
+#define A_CPU2BE16(x) htons(x)
+#define A_CPU2BE32(x) htonl(x)
+
+#define A_MEMCPY(dst, src, len) memcpy((A_UINT8 *)(dst), (src), (len))
+#define A_MEMZERO(addr, len) memset(addr, 0, len)
+#define A_MEMCMP(addr1, addr2, len) memcmp((addr1), (addr2), (len))
+#define A_MALLOC(size) kmalloc((size), GFP_KERNEL)
+#define A_MALLOC_NOWAIT(size) kmalloc((size), GFP_ATOMIC)
+#define A_FREE(addr) kfree(addr)
+
+#if defined(ANDROID_ENV) && defined(CONFIG_ANDROID_LOGGER)
+extern unsigned int enablelogcat;
+extern int android_logger_lv(void* module, int mask);
+enum logidx { LOG_MAIN_IDX = 0 };
+extern int logger_write(const enum logidx idx,
+ const unsigned char prio,
+ const char __kernel * const tag,
+ const char __kernel * const fmt,
+ ...);
+#define A_ANDROID_PRINTF(mask, module, tags, args...) do { \
+ if (enablelogcat) \
+ logger_write(LOG_MAIN_IDX, android_logger_lv(module, mask), tags, args); \
+ else \
+ printk(KERN_ALERT args); \
+} while (0)
+#ifdef DEBUG
+#define A_LOGGER_MODULE_NAME(x) #x
+#define A_LOGGER(mask, mod, args...) \
+ A_ANDROID_PRINTF(mask, &GET_ATH_MODULE_DEBUG_VAR_NAME(mod), "ar6k_" A_LOGGER_MODULE_NAME(mod), args);
+#endif
+#define A_PRINTF(args...) A_ANDROID_PRINTF(ATH_DEBUG_INFO, NULL, "ar6k_driver", args)
+#else
+#define A_LOGGER(mask, mod, args...) printk(KERN_ALERT args)
+#define A_PRINTF(args...) printk(KERN_ALERT args)
+#endif /* ANDROID */
+#define A_PRINTF_LOG(args...) printk(args)
+#define A_SPRINTF(buf, args...) sprintf (buf, args)
+
+/* Mutual Exclusion */
+typedef spinlock_t A_MUTEX_T;
+#define A_MUTEX_INIT(mutex) spin_lock_init(mutex)
+#define A_MUTEX_LOCK(mutex) spin_lock_bh(mutex)
+#define A_MUTEX_UNLOCK(mutex) spin_unlock_bh(mutex)
+#define A_IS_MUTEX_VALID(mutex) TRUE /* okay to return true, since A_MUTEX_DELETE does nothing */
+#define A_MUTEX_DELETE(mutex) /* spin locks are not kernel resources so nothing to free.. */
+
+/* Get current time in ms adding a constant offset (in ms) */
+#define A_GET_MS(offset) \
+ (jiffies + ((offset) / 1000) * HZ)
+
+/*
+ * Timer Functions
+ */
+#define A_MDELAY(msecs) mdelay(msecs)
+typedef struct timer_list A_TIMER;
+
+#define A_INIT_TIMER(pTimer, pFunction, pArg) do { \
+ init_timer(pTimer); \
+ (pTimer)->function = (pFunction); \
+ (pTimer)->data = (unsigned long)(pArg); \
+} while (0)
+
+/*
+ * Start a Timer that elapses after 'periodMSec' milli-seconds
+ * Support is provided for a one-shot timer. The 'repeatFlag' is
+ * ignored.
+ */
+#define A_TIMEOUT_MS(pTimer, periodMSec, repeatFlag) do { \
+ if (repeatFlag) { \
+ printk("\n" __FILE__ ":%d: Timer Repeat requested\n",__LINE__); \
+ panic("Timer Repeat"); \
+ } \
+ mod_timer((pTimer), jiffies + HZ * (periodMSec) / 1000); \
+} while (0)
+
+/*
+ * Cancel the Timer.
+ */
+#define A_UNTIMEOUT(pTimer) do { \
+ del_timer((pTimer)); \
+} while (0)
+
+#define A_DELETE_TIMER(pTimer) do { \
+} while (0)
+
+/*
+ * Wait Queue related functions
+ */
+typedef wait_queue_head_t A_WAITQUEUE_HEAD;
+#define A_INIT_WAITQUEUE_HEAD(head) init_waitqueue_head(head)
+#ifndef wait_event_interruptible_timeout
+#define __wait_event_interruptible_timeout(wq, condition, ret) \
+do { \
+ wait_queue_t __wait; \
+ init_waitqueue_entry(&__wait, current); \
+ \
+ add_wait_queue(&wq, &__wait); \
+ for (;;) { \
+ set_current_state(TASK_INTERRUPTIBLE); \
+ if (condition) \
+ break; \
+ if (!signal_pending(current)) { \
+ ret = schedule_timeout(ret); \
+ if (!ret) \
+ break; \
+ continue; \
+ } \
+ ret = -ERESTARTSYS; \
+ break; \
+ } \
+ current->state = TASK_RUNNING; \
+ remove_wait_queue(&wq, &__wait); \
+} while (0)
+
+#define wait_event_interruptible_timeout(wq, condition, timeout) \
+({ \
+ long __ret = timeout; \
+ if (!(condition)) \
+ __wait_event_interruptible_timeout(wq, condition, __ret); \
+ __ret; \
+})
+#endif /* wait_event_interruptible_timeout */
+
+#define A_WAIT_EVENT_INTERRUPTIBLE_TIMEOUT(head, condition, timeout) do { \
+ wait_event_interruptible_timeout(head, condition, timeout); \
+} while (0)
+
+#define A_WAKE_UP(head) wake_up(head)
+
+#ifdef DEBUG
+extern unsigned int panic_on_assert;
+#define A_ASSERT(expr) \
+ if (!(expr)) { \
+ printk(KERN_ALERT"Debug Assert Caught, File %s, Line: %d, Test:%s \n",__FILE__, __LINE__,#expr); \
+ if (panic_on_assert) panic(#expr); \
+ }
+#else
+#define A_ASSERT(expr)
+#endif /* DEBUG */
+
+#ifdef ANDROID_ENV
+struct firmware;
+int android_request_firmware(const struct firmware **firmware_p, const char *filename,
+ struct device *device);
+void android_release_firmware(const struct firmware *firmware);
+#define A_REQUEST_FIRMWARE(_ppf, _pfile, _dev) android_request_firmware(_ppf, _pfile, _dev)
+#define A_RELEASE_FIRMWARE(_pf) android_release_firmware(_pf)
+#else
+#define A_REQUEST_FIRMWARE(_ppf, _pfile, _dev) request_firmware(_ppf, _pfile, _dev)
+#define A_RELEASE_FIRMWARE(_pf) release_firmware(_pf)
+#endif
+
+/*
+ * Initialization of the network buffer subsystem
+ */
+#define A_NETBUF_INIT()
+
+/*
+ * Network buffer queue support
+ */
+typedef struct sk_buff_head A_NETBUF_QUEUE_T;
+
+#define A_NETBUF_QUEUE_INIT(q) \
+ a_netbuf_queue_init(q)
+
+#define A_NETBUF_ENQUEUE(q, pkt) \
+ a_netbuf_enqueue((q), (pkt))
+#define A_NETBUF_PREQUEUE(q, pkt) \
+ a_netbuf_prequeue((q), (pkt))
+#define A_NETBUF_DEQUEUE(q) \
+ (a_netbuf_dequeue(q))
+#define A_NETBUF_QUEUE_SIZE(q) \
+ a_netbuf_queue_size(q)
+#define A_NETBUF_QUEUE_EMPTY(q) \
+ a_netbuf_queue_empty(q)
+
+/*
+ * Network buffer support
+ */
+#define A_NETBUF_ALLOC(size) \
+ a_netbuf_alloc(size)
+#define A_NETBUF_ALLOC_RAW(size) \
+ a_netbuf_alloc_raw(size)
+#define A_NETBUF_FREE(bufPtr) \
+ a_netbuf_free(bufPtr)
+#define A_NETBUF_DATA(bufPtr) \
+ a_netbuf_to_data(bufPtr)
+#define A_NETBUF_LEN(bufPtr) \
+ a_netbuf_to_len(bufPtr)
+#define A_NETBUF_PUSH(bufPtr, len) \
+ a_netbuf_push(bufPtr, len)
+#define A_NETBUF_PUT(bufPtr, len) \
+ a_netbuf_put(bufPtr, len)
+#define A_NETBUF_TRIM(bufPtr,len) \
+ a_netbuf_trim(bufPtr, len)
+#define A_NETBUF_PULL(bufPtr, len) \
+ a_netbuf_pull(bufPtr, len)
+#define A_NETBUF_HEADROOM(bufPtr)\
+ a_netbuf_headroom(bufPtr)
+#define A_NETBUF_SETLEN(bufPtr,len) \
+ a_netbuf_setlen(bufPtr, len)
+
+/* Add data to end of a buffer */
+#define A_NETBUF_PUT_DATA(bufPtr, srcPtr, len) \
+ a_netbuf_put_data(bufPtr, srcPtr, len)
+
+/* Add data to start of the buffer */
+#define A_NETBUF_PUSH_DATA(bufPtr, srcPtr, len) \
+ a_netbuf_push_data(bufPtr, srcPtr, len)
+
+/* Remove data at start of the buffer */
+#define A_NETBUF_PULL_DATA(bufPtr, dstPtr, len) \
+ a_netbuf_pull_data(bufPtr, dstPtr, len)
+
+/* Remove data from the end of the buffer */
+#define A_NETBUF_TRIM_DATA(bufPtr, dstPtr, len) \
+ a_netbuf_trim_data(bufPtr, dstPtr, len)
+
+/* View data as "size" contiguous bytes of type "t" */
+#define A_NETBUF_VIEW_DATA(bufPtr, t, size) \
+ (t )( ((struct skbuf *)(bufPtr))->data)
+
+/* return the beginning of the headroom for the buffer */
+#define A_NETBUF_HEAD(bufPtr) \
+ ((((struct sk_buff *)(bufPtr))->head))
+
+/*
+ * OS specific network buffer access routines
+ */
+void *a_netbuf_alloc(int size);
+void *a_netbuf_alloc_raw(int size);
+void a_netbuf_free(void *bufPtr);
+void *a_netbuf_to_data(void *bufPtr);
+A_UINT32 a_netbuf_to_len(void *bufPtr);
+A_STATUS a_netbuf_push(void *bufPtr, A_INT32 len);
+A_STATUS a_netbuf_push_data(void *bufPtr, char *srcPtr, A_INT32 len);
+A_STATUS a_netbuf_put(void *bufPtr, A_INT32 len);
+A_STATUS a_netbuf_put_data(void *bufPtr, char *srcPtr, A_INT32 len);
+A_STATUS a_netbuf_pull(void *bufPtr, A_INT32 len);
+A_STATUS a_netbuf_pull_data(void *bufPtr, char *dstPtr, A_INT32 len);
+A_STATUS a_netbuf_trim(void *bufPtr, A_INT32 len);
+A_STATUS a_netbuf_trim_data(void *bufPtr, char *dstPtr, A_INT32 len);
+A_STATUS a_netbuf_setlen(void *bufPtr, A_INT32 len);
+A_INT32 a_netbuf_headroom(void *bufPtr);
+void a_netbuf_enqueue(A_NETBUF_QUEUE_T *q, void *pkt);
+void a_netbuf_prequeue(A_NETBUF_QUEUE_T *q, void *pkt);
+void *a_netbuf_dequeue(A_NETBUF_QUEUE_T *q);
+int a_netbuf_queue_size(A_NETBUF_QUEUE_T *q);
+int a_netbuf_queue_empty(A_NETBUF_QUEUE_T *q);
+int a_netbuf_queue_empty(A_NETBUF_QUEUE_T *q);
+void a_netbuf_queue_init(A_NETBUF_QUEUE_T *q);
+
+/*
+ * Kernel v.s User space functions
+ */
+A_UINT32 a_copy_to_user(void *to, const void *from, A_UINT32 n);
+A_UINT32 a_copy_from_user(void *to, const void *from, A_UINT32 n);
+
+/* In linux, WLAN Rx and Tx run in different contexts, so no need to check
+ * for any commands/data queued for WLAN */
+#define A_CHECK_DRV_TX()
+
+#define A_GET_CACHE_LINE_BYTES() L1_CACHE_BYTES
+
+#define A_CACHE_LINE_PAD 128
+
+static inline void *A_ALIGN_TO_CACHE_LINE(void *ptr) {
+ return (void *)L1_CACHE_ALIGN((unsigned long)ptr);
+}
+
+#else /* __KERNEL__ */
+
+#ifdef __GNUC__
+#define __ATTRIB_PACK __attribute__ ((packed))
+#define __ATTRIB_PRINTF __attribute__ ((format (printf, 1, 2)))
+#define __ATTRIB_NORETURN __attribute__ ((noreturn))
+#ifndef INLINE
+#define INLINE __inline__
+#endif
+#else /* Not GCC */
+#define __ATTRIB_PACK
+#define __ATTRIB_PRINTF
+#define __ATTRIB_NORETURN
+#ifndef INLINE
+#define INLINE __inline
+#endif
+#endif /* End __GNUC__ */
+
+#define PREPACK
+#define POSTPACK __ATTRIB_PACK
+
+#define A_MEMCPY(dst, src, len) memcpy((dst), (src), (len))
+#define A_MEMZERO(addr, len) memset((addr), 0, (len))
+#define A_MEMCMP(addr1, addr2, len) memcmp((addr1), (addr2), (len))
+#define A_MALLOC(size) malloc(size)
+#define A_FREE(addr) free(addr)
+
+#ifdef ANDROID
+#ifndef err
+#include <errno.h>
+#define err(_s, args...) do { \
+ fprintf(stderr, "%s: line %d ", __FILE__, __LINE__); \
+ fprintf(stderr, args); fprintf(stderr, ": %d\n", errno); \
+ exit(_s); } while (0)
+#endif
+#else
+#include <err.h>
+#endif
+
+#endif /* __KERNEL__ */
+
+#endif /* _OSAPI_LINUX_H_ */
diff --git a/drivers/staging/ath6kl/os/linux/include/wlan_config.h b/drivers/staging/ath6kl/os/linux/include/wlan_config.h
new file mode 100644
index 000000000000..f7d048722226
--- /dev/null
+++ b/drivers/staging/ath6kl/os/linux/include/wlan_config.h
@@ -0,0 +1,111 @@
+//------------------------------------------------------------------------------
+// Copyright (c) 2004-2010 Atheros Corporation. All rights reserved.
+//
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+//
+//
+//------------------------------------------------------------------------------
+//==============================================================================
+// This file contains the tunable configuration items for the WLAN module
+//
+// Author(s): ="Atheros"
+//==============================================================================
+#ifndef _HOST_WLAN_CONFIG_H_
+#define _HOST_WLAN_CONFIG_H_
+
+/* Include definitions here that can be used to tune the WLAN module behavior.
+ * Different customers can tune the behavior as per their needs, here.
+ */
+
+/* This configuration item when defined will consider the barker preamble
+ * mentioned in the ERP IE of the beacons from the AP to determine the short
+ * preamble support sent in the (Re)Assoc request frames.
+ */
+#define WLAN_CONFIG_DONOT_IGNORE_BARKER_IN_ERP 0
+
+/* This config item when defined will not send the power module state transition
+ * failure events that happen during scan, to the host.
+ */
+#define WLAN_CONFIG_IGNORE_POWER_SAVE_FAIL_EVENT_DURING_SCAN 0
+
+/*
+ * This configuration item enable/disable keepalive support.
+ * Keepalive support: In the absence of any data traffic to AP, null
+ * frames will be sent to the AP at periodic interval, to keep the association
+ * active. This configuration item defines the periodic interval.
+ * Use value of zero to disable keepalive support
+ * Default: 60 seconds
+ */
+#define WLAN_CONFIG_KEEP_ALIVE_INTERVAL 60
+
+/*
+ * This configuration item sets the value of disconnect timeout
+ * Firmware delays sending the disconnec event to the host for this
+ * timeout after is gets disconnected from the current AP.
+ * If the firmware successly roams within the disconnect timeout
+ * it sends a new connect event
+ */
+#ifdef ANDROID_ENV
+#define WLAN_CONFIG_DISCONNECT_TIMEOUT 3
+#else
+#define WLAN_CONFIG_DISCONNECT_TIMEOUT 10
+#endif /* ANDROID_ENV */
+
+/*
+ * This configuration item disables 11n support.
+ * 0 - Enable
+ * 1 - Disable
+ */
+#define WLAN_CONFIG_DISABLE_11N 0
+
+/*
+ * This configuration item enable BT clock sharing support
+ * 1 - Enable
+ * 0 - Disable (Default)
+ */
+#define WLAN_CONFIG_BT_SHARING 0
+
+/*
+ * This configuration item sets WIFI OFF policy
+ * 0 - CUT_POWER
+ * 1 - DEEP_SLEEP (Default)
+ */
+#define WLAN_CONFIG_WLAN_OFF 1
+
+/*
+ * This configuration item sets suspend policy
+ * 0 - CUT_POWER (Default)
+ * 1 - DEEP_SLEEP
+ * 2 - WoW
+ * 3 - CUT_POWER if BT OFF (clock sharing designs only)
+ */
+#define WLAN_CONFIG_PM_SUSPEND 0
+
+/*
+ * This configuration item sets suspend policy to use if PM_SUSPEND is
+ * set to WoW and device is not connected at the time of suspend
+ * 0 - CUT_POWER (Default)
+ * 1 - DEEP_SLEEP
+ * 2 - WoW
+ * 3 - CUT_POWER if BT OFF (clock sharing designs only)
+ */
+#define WLAN_CONFIG_PM_WOW2 0
+
+/*
+ * Platform specific function to power ON/OFF AR6000
+ * and enable/disable SDIO card detection
+ */
+#define plat_setup_power(on, detect)
+
+#endif /* _HOST_WLAN_CONFIG_H_ */
diff --git a/drivers/staging/ath6kl/os/linux/include/wmi_filter_linux.h b/drivers/staging/ath6kl/os/linux/include/wmi_filter_linux.h
new file mode 100644
index 000000000000..77e4ec6fea3a
--- /dev/null
+++ b/drivers/staging/ath6kl/os/linux/include/wmi_filter_linux.h
@@ -0,0 +1,293 @@
+//------------------------------------------------------------------------------
+// Copyright (c) 2004-2010 Atheros Communications Inc.
+// All rights reserved.
+//
+//
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+//
+//
+//
+// Author(s): ="Atheros"
+//------------------------------------------------------------------------------
+
+#ifndef _WMI_FILTER_LINUX_H_
+#define _WMI_FILTER_LINUX_H_
+
+/*
+ * sioctl_filter - Standard ioctl
+ * pioctl_filter - Priv ioctl
+ * xioctl_filter - eXtended ioctl
+ *
+ * ---- Possible values for the WMI filter ---------------
+ * (0) - Block this cmd always (or) not implemented
+ * (INFRA_NETWORK) - Allow this cmd only in STA mode
+ * (ADHOC_NETWORK) - Allow this cmd only in IBSS mode
+ * (AP_NETWORK) - Allow this cmd only in AP mode
+ * (INFRA_NETWORK | ADHOC_NETWORK) - Block this cmd in AP mode
+ * (ADHOC_NETWORK | AP_NETWORK) - Block this cmd in STA mode
+ * (INFRA_NETWORK | AP_NETWORK) - Block this cmd in IBSS mode
+ * (INFRA_NETWORK | ADHOC_NETWORK | AP_NETWORK)- allow only when mode is set
+ * (0xFF) - Allow this cmd always irrespective of mode
+ */
+
+A_UINT8 sioctl_filter[] = {
+(AP_NETWORK), /* SIOCSIWCOMMIT 0x8B00 */
+(0xFF), /* SIOCGIWNAME 0x8B01 */
+(0), /* SIOCSIWNWID 0x8B02 */
+(0), /* SIOCGIWNWID 0x8B03 */
+(INFRA_NETWORK | ADHOC_NETWORK | AP_NETWORK), /* SIOCSIWFREQ 0x8B04 */
+(INFRA_NETWORK | ADHOC_NETWORK | AP_NETWORK), /* SIOCGIWFREQ 0x8B05 */
+(0xFF), /* SIOCSIWMODE 0x8B06 */
+(0xFF), /* SIOCGIWMODE 0x8B07 */
+(0), /* SIOCSIWSENS 0x8B08 */
+(0), /* SIOCGIWSENS 0x8B09 */
+(0), /* SIOCSIWRANGE 0x8B0A */
+(0xFF), /* SIOCGIWRANGE 0x8B0B */
+(0), /* SIOCSIWPRIV 0x8B0C */
+(0), /* SIOCGIWPRIV 0x8B0D */
+(0), /* SIOCSIWSTATS 0x8B0E */
+(0), /* SIOCGIWSTATS 0x8B0F */
+(0), /* SIOCSIWSPY 0x8B10 */
+(0), /* SIOCGIWSPY 0x8B11 */
+(0), /* SIOCSIWTHRSPY 0x8B12 */
+(0), /* SIOCGIWTHRSPY 0x8B13 */
+(INFRA_NETWORK | ADHOC_NETWORK | AP_NETWORK), /* SIOCSIWAP 0x8B14 */
+(INFRA_NETWORK | ADHOC_NETWORK | AP_NETWORK), /* SIOCGIWAP 0x8B15 */
+#if (WIRELESS_EXT >= 18)
+(INFRA_NETWORK | ADHOC_NETWORK), /* SIOCSIWMLME 0X8B16 */
+#else
+(0), /* Dummy 0 */
+#endif /* WIRELESS_EXT */
+(0), /* SIOCGIWAPLIST 0x8B17 */
+(INFRA_NETWORK | ADHOC_NETWORK), /* SIOCSIWSCAN 0x8B18 */
+(INFRA_NETWORK | ADHOC_NETWORK), /* SIOCGIWSCAN 0x8B19 */
+(INFRA_NETWORK | ADHOC_NETWORK | AP_NETWORK), /* SIOCSIWESSID 0x8B1A */
+(INFRA_NETWORK | ADHOC_NETWORK | AP_NETWORK), /* SIOCGIWESSID 0x8B1B */
+(0), /* SIOCSIWNICKN 0x8B1C */
+(0), /* SIOCGIWNICKN 0x8B1D */
+(0), /* Dummy 0 */
+(0), /* Dummy 0 */
+(INFRA_NETWORK | ADHOC_NETWORK | AP_NETWORK), /* SIOCSIWRATE 0x8B20 */
+(INFRA_NETWORK | ADHOC_NETWORK | AP_NETWORK), /* SIOCGIWRATE 0x8B21 */
+(0), /* SIOCSIWRTS 0x8B22 */
+(0), /* SIOCGIWRTS 0x8B23 */
+(0), /* SIOCSIWFRAG 0x8B24 */
+(0), /* SIOCGIWFRAG 0x8B25 */
+(INFRA_NETWORK | ADHOC_NETWORK | AP_NETWORK), /* SIOCSIWTXPOW 0x8B26 */
+(INFRA_NETWORK | ADHOC_NETWORK | AP_NETWORK), /* SIOCGIWTXPOW 0x8B27 */
+(INFRA_NETWORK | ADHOC_NETWORK), /* SIOCSIWRETRY 0x8B28 */
+(INFRA_NETWORK | ADHOC_NETWORK), /* SIOCGIWRETRY 0x8B29 */
+(INFRA_NETWORK | ADHOC_NETWORK | AP_NETWORK), /* SIOCSIWENCODE 0x8B2A */
+(INFRA_NETWORK | ADHOC_NETWORK | AP_NETWORK), /* SIOCGIWENCODE 0x8B2B */
+(INFRA_NETWORK | ADHOC_NETWORK | AP_NETWORK), /* SIOCSIWPOWER 0x8B2C */
+(INFRA_NETWORK | ADHOC_NETWORK | AP_NETWORK), /* SIOCGIWPOWER 0x8B2D */
+};
+
+
+
+A_UINT8 pioctl_filter[] = {
+(INFRA_NETWORK | ADHOC_NETWORK | AP_NETWORK), /* IEEE80211_IOCTL_SETPARAM (SIOCIWFIRSTPRIV+0) */
+(INFRA_NETWORK | ADHOC_NETWORK | AP_NETWORK), /* IEEE80211_IOCTL_SETKEY (SIOCIWFIRSTPRIV+1) */
+(INFRA_NETWORK | ADHOC_NETWORK | AP_NETWORK), /* IEEE80211_IOCTL_DELKEY (SIOCIWFIRSTPRIV+2) */
+(AP_NETWORK), /* IEEE80211_IOCTL_SETMLME (SIOCIWFIRSTPRIV+3) */
+(INFRA_NETWORK), /* IEEE80211_IOCTL_ADDPMKID (SIOCIWFIRSTPRIV+4) */
+(0), /* IEEE80211_IOCTL_SETOPTIE (SIOCIWFIRSTPRIV+5) */
+(0), /* (SIOCIWFIRSTPRIV+6) */
+(0), /* (SIOCIWFIRSTPRIV+7) */
+(0), /* (SIOCIWFIRSTPRIV+8) */
+(0), /* (SIOCIWFIRSTPRIV+9) */
+(0), /* IEEE80211_IOCTL_LASTONE (SIOCIWFIRSTPRIV+10) */
+(0xFF), /* AR6000_IOCTL_WMI_GETREV (SIOCIWFIRSTPRIV+11) */
+(INFRA_NETWORK | ADHOC_NETWORK | AP_NETWORK), /* AR6000_IOCTL_WMI_SETPWR (SIOCIWFIRSTPRIV+12) */
+(INFRA_NETWORK | ADHOC_NETWORK), /* AR6000_IOCTL_WMI_SETSCAN (SIOCIWFIRSTPRIV+13) */
+(INFRA_NETWORK | ADHOC_NETWORK), /* AR6000_IOCTL_WMI_SETLISTENINT (SIOCIWFIRSTPRIV+14) */
+(INFRA_NETWORK | ADHOC_NETWORK), /* AR6000_IOCTL_WMI_SETBSSFILTER (SIOCIWFIRSTPRIV+15) */
+(INFRA_NETWORK | ADHOC_NETWORK | AP_NETWORK), /* AR6000_IOCTL_WMI_SET_CHANNELPARAMS (SIOCIWFIRSTPRIV+16) */
+(INFRA_NETWORK | ADHOC_NETWORK), /* AR6000_IOCTL_WMI_SET_PROBEDSSID (SIOCIWFIRSTPRIV+17) */
+(INFRA_NETWORK | ADHOC_NETWORK), /* AR6000_IOCTL_WMI_SET_PMPARAMS (SIOCIWFIRSTPRIV+18) */
+(INFRA_NETWORK), /* AR6000_IOCTL_WMI_SET_BADAP (SIOCIWFIRSTPRIV+19) */
+(INFRA_NETWORK | ADHOC_NETWORK), /* AR6000_IOCTL_WMI_GET_QOS_QUEUE (SIOCIWFIRSTPRIV+20) */
+(INFRA_NETWORK | ADHOC_NETWORK), /* AR6000_IOCTL_WMI_CREATE_QOS (SIOCIWFIRSTPRIV+21) */
+(INFRA_NETWORK | ADHOC_NETWORK), /* AR6000_IOCTL_WMI_DELETE_QOS (SIOCIWFIRSTPRIV+22) */
+(INFRA_NETWORK | ADHOC_NETWORK), /* AR6000_IOCTL_WMI_SET_SNRTHRESHOLD (SIOCIWFIRSTPRIV+23) */
+(INFRA_NETWORK | ADHOC_NETWORK), /* AR6000_IOCTL_WMI_SET_ERROR_REPORT_BITMASK (SIOCIWFIRSTPRIV+24)*/
+(0xFF), /* AR6000_IOCTL_WMI_GET_TARGET_STATS (SIOCIWFIRSTPRIV+25) */
+(INFRA_NETWORK | ADHOC_NETWORK), /* AR6000_IOCTL_WMI_SET_ASSOC_INFO (SIOCIWFIRSTPRIV+26) */
+(INFRA_NETWORK | ADHOC_NETWORK), /* AR6000_IOCTL_WMI_SET_ACCESS_PARAMS (SIOCIWFIRSTPRIV+27) */
+(INFRA_NETWORK | ADHOC_NETWORK), /* AR6000_IOCTL_WMI_SET_BMISS_TIME (SIOCIWFIRSTPRIV+28) */
+(INFRA_NETWORK | ADHOC_NETWORK), /* AR6000_IOCTL_WMI_SET_DISC_TIMEOUT (SIOCIWFIRSTPRIV+29) */
+(ADHOC_NETWORK), /* AR6000_IOCTL_WMI_SET_IBSS_PM_CAPS (SIOCIWFIRSTPRIV+30) */
+};
+
+
+
+A_UINT8 xioctl_filter[] = {
+(0xFF), /* Dummy 0 */
+(0xFF), /* AR6000_XIOCTL_BMI_DONE 1 */
+(0xFF), /* AR6000_XIOCTL_BMI_READ_MEMORY 2 */
+(0xFF), /* AR6000_XIOCTL_BMI_WRITE_MEMORY 3 */
+(0xFF), /* AR6000_XIOCTL_BMI_EXECUTE 4 */
+(0xFF), /* AR6000_XIOCTL_BMI_SET_APP_START 5 */
+(0xFF), /* AR6000_XIOCTL_BMI_READ_SOC_REGISTER 6 */
+(0xFF), /* AR6000_XIOCTL_BMI_WRITE_SOC_REGISTER 7 */
+(0xFF), /* AR6000_XIOCTL_BMI_TEST 8 */
+(0xFF), /* AR6000_XIOCTL_UNUSED9 9 */
+(0xFF), /* AR6000_XIOCTL_UNUSED10 10 */
+(0xFF), /* AR6000_XIOCTL_UNUSED11 11 */
+(0xFF), /* AR6000_XIOCTL_FORCE_TARGET_RESET 12 */
+(0xFF), /* AR6000_XIOCTL_HTC_RAW_OPEN 13 */
+(0xFF), /* AR6000_XIOCTL_HTC_RAW_CLOSE 14 */
+(0xFF), /* AR6000_XIOCTL_HTC_RAW_READ 15 */
+(0xFF), /* AR6000_XIOCTL_HTC_RAW_WRITE 16 */
+(0xFF), /* AR6000_XIOCTL_CHECK_TARGET_READY 17 */
+(0xFF), /* AR6000_XIOCTL_GPIO_OUTPUT_SET 18 */
+(0xFF), /* AR6000_XIOCTL_GPIO_INPUT_GET 19 */
+(0xFF), /* AR6000_XIOCTL_GPIO_REGISTER_SET 20 */
+(0xFF), /* AR6000_XIOCTL_GPIO_REGISTER_GET 21 */
+(0xFF), /* AR6000_XIOCTL_GPIO_INTR_ACK 22 */
+(0xFF), /* AR6000_XIOCTL_GPIO_INTR_WAIT 23 */
+(INFRA_NETWORK | ADHOC_NETWORK), /* AR6000_XIOCTL_SET_ADHOC_BSSID 24 */
+(INFRA_NETWORK | ADHOC_NETWORK), /* AR6000_XIOCTL_SET_OPT_MODE 25 */
+(INFRA_NETWORK | ADHOC_NETWORK), /* AR6000_XIOCTL_OPT_SEND_FRAME 26 */
+(ADHOC_NETWORK | AP_NETWORK), /* AR6000_XIOCTL_SET_BEACON_INTVAL 27 */
+(INFRA_NETWORK | ADHOC_NETWORK | AP_NETWORK), /* IEEE80211_IOCTL_SETAUTHALG 28 */
+(INFRA_NETWORK | ADHOC_NETWORK), /* AR6000_XIOCTL_SET_VOICE_PKT_SIZE 29 */
+(INFRA_NETWORK | ADHOC_NETWORK), /* AR6000_XIOCTL_SET_MAX_SP 30 */
+(INFRA_NETWORK | ADHOC_NETWORK), /* AR6000_XIOCTL_WMI_GET_ROAM_TBL 31 */
+(INFRA_NETWORK | ADHOC_NETWORK), /* AR6000_XIOCTL_WMI_SET_ROAM_CTRL 32 */
+(INFRA_NETWORK | ADHOC_NETWORK), /* AR6000_XIOCTRL_WMI_SET_POWERSAVE_TIMERS 33 */
+(INFRA_NETWORK | ADHOC_NETWORK | AP_NETWORK), /* AR6000_XIOCTRL_WMI_GET_POWER_MODE 34 */
+(INFRA_NETWORK | ADHOC_NETWORK | AP_NETWORK), /* AR6000_XIOCTRL_WMI_SET_WLAN_STATE 35 */
+(INFRA_NETWORK | ADHOC_NETWORK), /* AR6000_XIOCTL_WMI_GET_ROAM_DATA 36 */
+(0xFF), /* AR6000_XIOCTL_WMI_SETRETRYLIMITS 37 */
+(0xFF), /* AR6000_XIOCTL_TCMD_CONT_TX 38 */
+(0xFF), /* AR6000_XIOCTL_TCMD_CONT_RX 39 */
+(0xFF), /* AR6000_XIOCTL_TCMD_PM 40 */
+(INFRA_NETWORK | ADHOC_NETWORK), /* AR6000_XIOCTL_WMI_STARTSCAN 41 */
+(INFRA_NETWORK | ADHOC_NETWORK | AP_NETWORK), /* AR6000_XIOCTL_WMI_SETFIXRATES 42 */
+(INFRA_NETWORK | ADHOC_NETWORK | AP_NETWORK), /* AR6000_XIOCTL_WMI_GETFIXRATES 43 */
+(INFRA_NETWORK | ADHOC_NETWORK), /* AR6000_XIOCTL_WMI_SET_RSSITHRESHOLD 44 */
+(INFRA_NETWORK | ADHOC_NETWORK), /* AR6000_XIOCTL_WMI_CLR_RSSISNR 45 */
+(INFRA_NETWORK | ADHOC_NETWORK), /* AR6000_XIOCTL_WMI_SET_LQTHRESHOLD 46 */
+(INFRA_NETWORK | ADHOC_NETWORK | AP_NETWORK), /* AR6000_XIOCTL_WMI_SET_RTS 47 */
+(INFRA_NETWORK | ADHOC_NETWORK | AP_NETWORK), /* AR6000_XIOCTL_WMI_SET_LPREAMBLE 48 */
+(INFRA_NETWORK | ADHOC_NETWORK | AP_NETWORK), /* AR6000_XIOCTL_WMI_SET_AUTHMODE 49 */
+(INFRA_NETWORK | ADHOC_NETWORK), /* AR6000_XIOCTL_WMI_SET_REASSOCMODE 50 */
+(INFRA_NETWORK | ADHOC_NETWORK), /* AR6000_XIOCTL_WMI_SET_WMM 51 */
+(INFRA_NETWORK | ADHOC_NETWORK), /* AR6000_XIOCTL_WMI_SET_HB_CHALLENGE_RESP_PARAMS 52 */
+(INFRA_NETWORK | ADHOC_NETWORK), /* AR6000_XIOCTL_WMI_GET_HB_CHALLENGE_RESP 53 */
+(INFRA_NETWORK | ADHOC_NETWORK | AP_NETWORK), /* AR6000_XIOCTL_WMI_GET_RD 54 */
+(0xFF), /* AR6000_XIOCTL_DIAG_READ 55 */
+(0xFF), /* AR6000_XIOCTL_DIAG_WRITE 56 */
+(INFRA_NETWORK | ADHOC_NETWORK), /* AR6000_XIOCTL_WMI_SET_TXOP 57 */
+(INFRA_NETWORK), /* AR6000_XIOCTL_USER_SETKEYS 58 */
+(INFRA_NETWORK), /* AR6000_XIOCTL_WMI_SET_KEEPALIVE 59 */
+(INFRA_NETWORK), /* AR6000_XIOCTL_WMI_GET_KEEPALIVE 60 */
+(0xFF), /* AR6000_XIOCTL_BMI_ROMPATCH_INSTALL 61 */
+(0xFF), /* AR6000_XIOCTL_BMI_ROMPATCH_UNINSTALL 62 */
+(0xFF), /* AR6000_XIOCTL_BMI_ROMPATCH_ACTIVATE 63 */
+(0xFF), /* AR6000_XIOCTL_BMI_ROMPATCH_DEACTIVATE 64 */
+(0xFF), /* AR6000_XIOCTL_WMI_SET_APPIE 65 */
+(0xFF), /* AR6000_XIOCTL_WMI_SET_MGMT_FRM_RX_FILTER 66 */
+(0xFF), /* AR6000_XIOCTL_DBGLOG_CFG_MODULE 67 */
+(0xFF), /* AR6000_XIOCTL_DBGLOG_GET_DEBUG_LOGS 68 */
+(0xFF), /* Dummy 69 */
+(0xFF), /* AR6000_XIOCTL_WMI_SET_WSC_STATUS 70 */
+(INFRA_NETWORK | ADHOC_NETWORK), /* AR6000_XIOCTL_WMI_SET_BT_STATUS 71 */
+(INFRA_NETWORK | ADHOC_NETWORK), /* AR6000_XIOCTL_WMI_SET_BT_PARAMS 72 */
+(INFRA_NETWORK | ADHOC_NETWORK), /* AR6000_XIOCTL_WMI_SET_HOST_SLEEP_MODE 73 */
+(INFRA_NETWORK | ADHOC_NETWORK), /* AR6000_XIOCTL_WMI_SET_WOW_MODE 74 */
+(INFRA_NETWORK | ADHOC_NETWORK), /* AR6000_XIOCTL_WMI_GET_WOW_LIST 75 */
+(INFRA_NETWORK | ADHOC_NETWORK), /* AR6000_XIOCTL_WMI_ADD_WOW_PATTERN 76 */
+(INFRA_NETWORK | ADHOC_NETWORK), /* AR6000_XIOCTL_WMI_DEL_WOW_PATTERN 77 */
+(0xFF), /* AR6000_XIOCTL_TARGET_INFO 78 */
+(0xFF), /* AR6000_XIOCTL_DUMP_HTC_CREDIT_STATE 79 */
+(0xFF), /* AR6000_XIOCTL_TRAFFIC_ACTIVITY_CHANGE 80 */
+(INFRA_NETWORK | ADHOC_NETWORK), /* AR6000_XIOCTL_WMI_SET_CONNECT_CTRL_FLAGS 81 */
+(INFRA_NETWORK | ADHOC_NETWORK), /* AR6000_XIOCTL_WMI_SET_AKMP_PARAMS 82 */
+(INFRA_NETWORK | ADHOC_NETWORK), /* AR6000_XIOCTL_WMI_GET_PMKID_LIST 83 */
+(INFRA_NETWORK | ADHOC_NETWORK), /* AR6000_XIOCTL_WMI_SET_PMKID_LIST 84 */
+(0xFF), /* Dummy 85 */
+(0xFF), /* Dummy 86 */
+(0xFF), /* Dummy 87 */
+(0xFF), /* Dummy 88 */
+(0xFF), /* Dummy 89 */
+(0xFF), /* AR6000_XIOCTL_UNUSED90 90 */
+(0xFF), /* AR6000_XIOCTL_BMI_LZ_STREAM_START 91 */
+(0xFF), /* AR6000_XIOCTL_BMI_LZ_DATA 92 */
+(INFRA_NETWORK | ADHOC_NETWORK), /* AR6000_XIOCTL_PROF_CFG 93 */
+(INFRA_NETWORK | ADHOC_NETWORK), /* AR6000_XIOCTL_PROF_ADDR_SET 94 */
+(INFRA_NETWORK | ADHOC_NETWORK), /* AR6000_XIOCTL_PROF_START 95 */
+(INFRA_NETWORK | ADHOC_NETWORK), /* AR6000_XIOCTL_PROF_STOP 96 */
+(INFRA_NETWORK | ADHOC_NETWORK), /* AR6000_XIOCTL_PROF_COUNT_GET 97 */
+(INFRA_NETWORK | ADHOC_NETWORK), /* AR6000_XIOCTL_WMI_ABORT_SCAN 98 */
+(AP_NETWORK), /* AR6000_XIOCTL_AP_GET_STA_LIST 99 */
+(AP_NETWORK), /* AR6000_XIOCTL_AP_HIDDEN_SSID 100 */
+(AP_NETWORK), /* AR6000_XIOCTL_AP_SET_NUM_STA 101 */
+(AP_NETWORK), /* AR6000_XIOCTL_AP_SET_ACL_MAC 102 */
+(AP_NETWORK), /* AR6000_XIOCTL_AP_GET_ACL_LIST 103 */
+(AP_NETWORK), /* AR6000_XIOCTL_AP_COMMIT_CONFIG 104 */
+(AP_NETWORK), /* IEEE80211_IOCTL_GETWPAIE 105 */
+(AP_NETWORK), /* AR6000_XIOCTL_AP_CONN_INACT_TIME 106 */
+(AP_NETWORK), /* AR6000_XIOCTL_AP_PROT_SCAN_TIME 107 */
+(AP_NETWORK), /* AR6000_XIOCTL_WMI_SET_COUNTRY 108 */
+(AP_NETWORK), /* AR6000_XIOCTL_AP_SET_DTIM 109 */
+(0xFF), /* AR6000_XIOCTL_WMI_TARGET_EVENT_REPORT 110 */
+(INFRA_NETWORK | ADHOC_NETWORK), /* AR6000_XIOCTL_SET_IP 111 */
+(AP_NETWORK), /* AR6000_XIOCTL_AP_SET_ACL_POLICY 112 */
+(AP_NETWORK), /* AR6000_XIOCTL_AP_INTRA_BSS_COMM 113 */
+(0xFF), /* AR6000_XIOCTL_DUMP_MODULE_DEBUG_INFO 114 */
+(0xFF), /* AR6000_XIOCTL_MODULE_DEBUG_SET_MASK 115 */
+(0xFF), /* AR6000_XIOCTL_MODULE_DEBUG_GET_MASK 116 */
+(0xFF), /* AR6000_XIOCTL_DUMP_RCV_AGGR_STATS 117 */
+(0xFF), /* AR6000_XIOCTL_SET_HT_CAP 118 */
+(0xFF), /* AR6000_XIOCTL_SET_HT_OP 119 */
+(AP_NETWORK), /* AR6000_XIOCTL_AP_GET_STAT 120 */
+(0xFF), /* AR6000_XIOCTL_SET_TX_SELECT_RATES 121 */
+(0xFF), /* AR6000_XIOCTL_SETUP_AGGR 122 */
+(0xFF), /* AR6000_XIOCTL_ALLOW_AGGR 123 */
+(AP_NETWORK), /* AR6000_XIOCTL_AP_GET_HIDDEN_SSID 124 */
+(AP_NETWORK), /* AR6000_XIOCTL_AP_GET_COUNTRY 125 */
+(AP_NETWORK), /* AR6000_XIOCTL_AP_GET_WMODE 126 */
+(AP_NETWORK), /* AR6000_XIOCTL_AP_GET_DTIM 127 */
+(AP_NETWORK | ADHOC_NETWORK), /* AR6000_XIOCTL_AP_GET_BINTVL 128 */
+(0xFF), /* AR6000_XIOCTL_AP_GET_RTS 129 */
+(0xFF), /* AR6000_XIOCTL_DELE_AGGR 130 */
+(0xFF), /* AR6000_XIOCTL_FETCH_TARGET_REGS 131 */
+(0xFF), /* AR6000_XIOCTL_HCI_CMD 132 */
+(0xFF), /* AR6000_XIOCTL_ACL_DATA 133 */
+(0xFF), /* AR6000_XIOCTL_WLAN_CONN_PRECEDENCE 134 */
+(AP_NETWORK), /* AR6000_XIOCTL_AP_SET_11BG_RATESET 135 */
+(0xFF),
+(0xFF),
+(INFRA_NETWORK | ADHOC_NETWORK), /* AR6000_XIOCTL_WMI_SET_BTCOEX_FE_ANT 138 */
+(INFRA_NETWORK | ADHOC_NETWORK), /* AR6000_XIOCTL_WMI_SET_BTCOEX_COLOCATED_BT_DEV 139 */
+(INFRA_NETWORK | ADHOC_NETWORK), /* AR6000_XIOCTL_WMI_SET_BTCOEX_BTINQUIRY_PAGE_CONFIG 140 */
+(INFRA_NETWORK | ADHOC_NETWORK), /* AR6000_XIOCTL_WMI_SET_BTCOEX_SCO_CONFIG 141 */
+(INFRA_NETWORK | ADHOC_NETWORK), /* AR6000_XIOCTL_WMI_SET_BTCOEX_A2DP_CONFIG 142 */
+(INFRA_NETWORK | ADHOC_NETWORK), /* AR6000_XIOCTL_WMI_SET_BTCOEX_ACLCOEX_CONFIG 143 */
+(INFRA_NETWORK | ADHOC_NETWORK), /* AR6000_XIOCTL_WMI_SET_BTCOEX_DEBUG 144 */
+(INFRA_NETWORK | ADHOC_NETWORK), /* AR6000_XIOCTL_WMI_SET_BT_OPERATING_STATUS 145 */
+(INFRA_NETWORK | ADHOC_NETWORK), /* AR6000_XIOCTL_WMI_GET_BTCOEX_CONFIG 146 */
+(INFRA_NETWORK | ADHOC_NETWORK), /* AR6000_XIOCTL_WMI_GET_BTCOEX_GET_STATS 147 */
+(0xFF), /* AR6000_XIOCTL_WMI_SET_QOS_SUPP 148 */
+(0xFF), /* AR6000_XIOCTL_GET_WLAN_SLEEP_STATE 149 */
+(0xFF), /* AR6000_XIOCTL_SET_BT_HW_POWER_STATE 150 */
+(0xFF), /* AR6000_XIOCTL_GET_BT_HW_POWER_STATE 151 */
+(0xFF), /* AR6000_XIOCTL_ADD_AP_INTERFACE 152 */
+(0xFF), /* AR6000_XIOCTL_REMOVE_AP_INTERFACE 153 */
+(0xFF), /* AR6000_XIOCTL_WMI_SET_TX_SGI_PARAM 154 */
+};
+
+#endif /*_WMI_FILTER_LINUX_H_*/
diff --git a/drivers/staging/ath6kl/os/linux/ioctl.c b/drivers/staging/ath6kl/os/linux/ioctl.c
new file mode 100644
index 000000000000..d5f7ac08ab96
--- /dev/null
+++ b/drivers/staging/ath6kl/os/linux/ioctl.c
@@ -0,0 +1,4733 @@
+//------------------------------------------------------------------------------
+// Copyright (c) 2004-2010 Atheros Communications Inc.
+// All rights reserved.
+//
+//
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+//
+//
+//
+// Author(s): ="Atheros"
+//------------------------------------------------------------------------------
+
+#include "ar6000_drv.h"
+#include "ieee80211_ioctl.h"
+#include "ar6kap_common.h"
+#include "targaddrs.h"
+#include "a_hci.h"
+#include "wlan_config.h"
+
+extern int enablerssicompensation;
+A_UINT32 tcmdRxFreq;
+extern unsigned int wmitimeout;
+extern A_WAITQUEUE_HEAD arEvent;
+extern int tspecCompliance;
+extern int bmienable;
+extern int bypasswmi;
+extern int loghci;
+
+static int
+ar6000_ioctl_get_roam_tbl(struct net_device *dev, struct ifreq *rq)
+{
+ AR_SOFTC_T *ar = (AR_SOFTC_T *)ar6k_priv(dev);
+
+ if (ar->arWmiReady == FALSE) {
+ return -EIO;
+ }
+
+ if(wmi_get_roam_tbl_cmd(ar->arWmi) != A_OK) {
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static int
+ar6000_ioctl_get_roam_data(struct net_device *dev, struct ifreq *rq)
+{
+ AR_SOFTC_T *ar = (AR_SOFTC_T *)ar6k_priv(dev);
+
+ if (ar->arWmiReady == FALSE) {
+ return -EIO;
+ }
+
+
+ /* currently assume only roam times are required */
+ if(wmi_get_roam_data_cmd(ar->arWmi, ROAM_DATA_TIME) != A_OK) {
+ return -EIO;
+ }
+
+
+ return 0;
+}
+
+static int
+ar6000_ioctl_set_roam_ctrl(struct net_device *dev, char *userdata)
+{
+ AR_SOFTC_T *ar = (AR_SOFTC_T *)ar6k_priv(dev);
+ WMI_SET_ROAM_CTRL_CMD cmd;
+ A_UINT8 size = sizeof(cmd);
+
+ if (ar->arWmiReady == FALSE) {
+ return -EIO;
+ }
+
+
+ if (copy_from_user(&cmd, userdata, size)) {
+ return -EFAULT;
+ }
+
+ if (cmd.roamCtrlType == WMI_SET_HOST_BIAS) {
+ if (cmd.info.bssBiasInfo.numBss > 1) {
+ size += (cmd.info.bssBiasInfo.numBss - 1) * sizeof(WMI_BSS_BIAS);
+ }
+ }
+
+ if (copy_from_user(&cmd, userdata, size)) {
+ return -EFAULT;
+ }
+
+ if(wmi_set_roam_ctrl_cmd(ar->arWmi, &cmd, size) != A_OK) {
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static int
+ar6000_ioctl_set_powersave_timers(struct net_device *dev, char *userdata)
+{
+ AR_SOFTC_T *ar = (AR_SOFTC_T *)ar6k_priv(dev);
+ WMI_POWERSAVE_TIMERS_POLICY_CMD cmd;
+ A_UINT8 size = sizeof(cmd);
+
+ if (ar->arWmiReady == FALSE) {
+ return -EIO;
+ }
+
+ if (copy_from_user(&cmd, userdata, size)) {
+ return -EFAULT;
+ }
+
+ if (copy_from_user(&cmd, userdata, size)) {
+ return -EFAULT;
+ }
+
+ if(wmi_set_powersave_timers_cmd(ar->arWmi, &cmd, size) != A_OK) {
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static int
+ar6000_ioctl_set_qos_supp(struct net_device *dev, struct ifreq *rq)
+{
+ AR_SOFTC_T *ar = (AR_SOFTC_T *)ar6k_priv(dev);
+ WMI_SET_QOS_SUPP_CMD cmd;
+ A_STATUS ret;
+
+ if ((dev->flags & IFF_UP) != IFF_UP) {
+ return -EIO;
+ }
+ if (ar->arWmiReady == FALSE) {
+ return -EIO;
+ }
+
+ if (copy_from_user(&cmd, (char *)((unsigned int*)rq->ifr_data + 1),
+ sizeof(cmd)))
+ {
+ return -EFAULT;
+ }
+
+ ret = wmi_set_qos_supp_cmd(ar->arWmi, cmd.status);
+
+ switch (ret) {
+ case A_OK:
+ return 0;
+ case A_EBUSY :
+ return -EBUSY;
+ case A_NO_MEMORY:
+ return -ENOMEM;
+ case A_EINVAL:
+ default:
+ return -EFAULT;
+ }
+}
+
+static int
+ar6000_ioctl_set_wmm(struct net_device *dev, struct ifreq *rq)
+{
+ AR_SOFTC_T *ar = (AR_SOFTC_T *)ar6k_priv(dev);
+ WMI_SET_WMM_CMD cmd;
+ A_STATUS ret;
+
+ if ((dev->flags & IFF_UP) != IFF_UP) {
+ return -EIO;
+ }
+ if (ar->arWmiReady == FALSE) {
+ return -EIO;
+ }
+
+ if (copy_from_user(&cmd, (char *)((unsigned int*)rq->ifr_data + 1),
+ sizeof(cmd)))
+ {
+ return -EFAULT;
+ }
+
+ if (cmd.status == WMI_WMM_ENABLED) {
+ ar->arWmmEnabled = TRUE;
+ } else {
+ ar->arWmmEnabled = FALSE;
+ }
+
+ ret = wmi_set_wmm_cmd(ar->arWmi, cmd.status);
+
+ switch (ret) {
+ case A_OK:
+ return 0;
+ case A_EBUSY :
+ return -EBUSY;
+ case A_NO_MEMORY:
+ return -ENOMEM;
+ case A_EINVAL:
+ default:
+ return -EFAULT;
+ }
+}
+
+static int
+ar6000_ioctl_set_txop(struct net_device *dev, struct ifreq *rq)
+{
+ AR_SOFTC_T *ar = (AR_SOFTC_T *)ar6k_priv(dev);
+ WMI_SET_WMM_TXOP_CMD cmd;
+ A_STATUS ret;
+
+ if ((dev->flags & IFF_UP) != IFF_UP) {
+ return -EIO;
+ }
+ if (ar->arWmiReady == FALSE) {
+ return -EIO;
+ }
+
+ if (copy_from_user(&cmd, (char *)((unsigned int*)rq->ifr_data + 1),
+ sizeof(cmd)))
+ {
+ return -EFAULT;
+ }
+
+ ret = wmi_set_wmm_txop(ar->arWmi, cmd.txopEnable);
+
+ switch (ret) {
+ case A_OK:
+ return 0;
+ case A_EBUSY :
+ return -EBUSY;
+ case A_NO_MEMORY:
+ return -ENOMEM;
+ case A_EINVAL:
+ default:
+ return -EFAULT;
+ }
+}
+
+static int
+ar6000_ioctl_get_rd(struct net_device *dev, struct ifreq *rq)
+{
+ AR_SOFTC_T *ar = (AR_SOFTC_T *)ar6k_priv(dev);
+ A_STATUS ret = 0;
+
+ if ((dev->flags & IFF_UP) != IFF_UP || ar->arWmiReady == FALSE) {
+ return -EIO;
+ }
+
+ if(copy_to_user((char *)((unsigned int*)rq->ifr_data + 1),
+ &ar->arRegCode, sizeof(ar->arRegCode)))
+ ret = -EFAULT;
+
+ return ret;
+}
+
+static int
+ar6000_ioctl_set_country(struct net_device *dev, struct ifreq *rq)
+{
+ AR_SOFTC_T *ar = (AR_SOFTC_T *)ar6k_priv(dev);
+ WMI_AP_SET_COUNTRY_CMD cmd;
+ A_STATUS ret;
+
+ if ((dev->flags & IFF_UP) != IFF_UP) {
+ return -EIO;
+ }
+ if (ar->arWmiReady == FALSE) {
+ return -EIO;
+ }
+
+ if (copy_from_user(&cmd, (char *)((unsigned int*)rq->ifr_data + 1),
+ sizeof(cmd)))
+ {
+ return -EFAULT;
+ }
+
+ ar->ap_profile_flag = 1; /* There is a change in profile */
+
+ ret = wmi_set_country(ar->arWmi, cmd.countryCode);
+ A_MEMCPY(ar->ap_country_code, cmd.countryCode, 3);
+
+ switch (ret) {
+ case A_OK:
+ return 0;
+ case A_EBUSY :
+ return -EBUSY;
+ case A_NO_MEMORY:
+ return -ENOMEM;
+ case A_EINVAL:
+ default:
+ return -EFAULT;
+ }
+}
+
+
+/* Get power mode command */
+static int
+ar6000_ioctl_get_power_mode(struct net_device *dev, struct ifreq *rq)
+{
+ AR_SOFTC_T *ar = (AR_SOFTC_T *)ar6k_priv(dev);
+ WMI_POWER_MODE_CMD power_mode;
+ int ret = 0;
+
+ if (ar->arWmiReady == FALSE) {
+ return -EIO;
+ }
+
+ power_mode.powerMode = wmi_get_power_mode_cmd(ar->arWmi);
+ if (copy_to_user(rq->ifr_data, &power_mode, sizeof(WMI_POWER_MODE_CMD))) {
+ ret = -EFAULT;
+ }
+
+ return ret;
+}
+
+
+static int
+ar6000_ioctl_set_channelParams(struct net_device *dev, struct ifreq *rq)
+{
+ AR_SOFTC_T *ar = (AR_SOFTC_T *)ar6k_priv(dev);
+ WMI_CHANNEL_PARAMS_CMD cmd, *cmdp;
+ int ret = 0;
+
+ if (ar->arWmiReady == FALSE) {
+ return -EIO;
+ }
+
+
+ if (copy_from_user(&cmd, rq->ifr_data, sizeof(cmd))) {
+ return -EFAULT;
+ }
+
+ if( (ar->arNextMode == AP_NETWORK) && (cmd.numChannels || cmd.scanParam) ) {
+ A_PRINTF("ERROR: Only wmode is allowed in AP mode\n");
+ return -EIO;
+ }
+
+ if (cmd.numChannels > 1) {
+ cmdp = A_MALLOC(130);
+ if (copy_from_user(cmdp, rq->ifr_data,
+ sizeof (*cmdp) +
+ ((cmd.numChannels - 1) * sizeof(A_UINT16))))
+ {
+ kfree(cmdp);
+ return -EFAULT;
+ }
+ } else {
+ cmdp = &cmd;
+ }
+
+ if ((ar->arPhyCapability == WMI_11G_CAPABILITY) &&
+ ((cmdp->phyMode == WMI_11A_MODE) || (cmdp->phyMode == WMI_11AG_MODE)))
+ {
+ ret = -EINVAL;
+ }
+
+ if (!ret &&
+ (wmi_set_channelParams_cmd(ar->arWmi, cmdp->scanParam, cmdp->phyMode,
+ cmdp->numChannels, cmdp->channelList)
+ != A_OK))
+ {
+ ret = -EIO;
+ }
+
+ if (cmd.numChannels > 1) {
+ kfree(cmdp);
+ }
+
+ ar->ap_wmode = cmdp->phyMode;
+ /* Set the profile change flag to allow a commit cmd */
+ ar->ap_profile_flag = 1;
+
+ return ret;
+}
+
+
+static int
+ar6000_ioctl_set_snr_threshold(struct net_device *dev, struct ifreq *rq)
+{
+
+ AR_SOFTC_T *ar = (AR_SOFTC_T *)ar6k_priv(dev);
+ WMI_SNR_THRESHOLD_PARAMS_CMD cmd;
+ int ret = 0;
+
+ if (ar->arWmiReady == FALSE) {
+ return -EIO;
+ }
+
+ if (copy_from_user(&cmd, rq->ifr_data, sizeof(cmd))) {
+ return -EFAULT;
+ }
+
+ if( wmi_set_snr_threshold_params(ar->arWmi, &cmd) != A_OK ) {
+ ret = -EIO;
+ }
+
+ return ret;
+}
+
+static int
+ar6000_ioctl_set_rssi_threshold(struct net_device *dev, struct ifreq *rq)
+{
+#define SWAP_THOLD(thold1, thold2) do { \
+ USER_RSSI_THOLD tmpThold; \
+ tmpThold.tag = thold1.tag; \
+ tmpThold.rssi = thold1.rssi; \
+ thold1.tag = thold2.tag; \
+ thold1.rssi = thold2.rssi; \
+ thold2.tag = tmpThold.tag; \
+ thold2.rssi = tmpThold.rssi; \
+} while (0)
+
+ AR_SOFTC_T *ar = (AR_SOFTC_T *)ar6k_priv(dev);
+ WMI_RSSI_THRESHOLD_PARAMS_CMD cmd;
+ USER_RSSI_PARAMS rssiParams;
+ A_INT32 i, j;
+ int ret = 0;
+
+ if (ar->arWmiReady == FALSE) {
+ return -EIO;
+ }
+
+ if (copy_from_user((char *)&rssiParams, (char *)((unsigned int *)rq->ifr_data + 1), sizeof(USER_RSSI_PARAMS))) {
+ return -EFAULT;
+ }
+ cmd.weight = rssiParams.weight;
+ cmd.pollTime = rssiParams.pollTime;
+
+ A_MEMCPY(ar->rssi_map, &rssiParams.tholds, sizeof(ar->rssi_map));
+ /*
+ * only 6 elements, so use bubble sorting, in ascending order
+ */
+ for (i = 5; i > 0; i--) {
+ for (j = 0; j < i; j++) { /* above tholds */
+ if (ar->rssi_map[j+1].rssi < ar->rssi_map[j].rssi) {
+ SWAP_THOLD(ar->rssi_map[j+1], ar->rssi_map[j]);
+ } else if (ar->rssi_map[j+1].rssi == ar->rssi_map[j].rssi) {
+ return EFAULT;
+ }
+ }
+ }
+ for (i = 11; i > 6; i--) {
+ for (j = 6; j < i; j++) { /* below tholds */
+ if (ar->rssi_map[j+1].rssi < ar->rssi_map[j].rssi) {
+ SWAP_THOLD(ar->rssi_map[j+1], ar->rssi_map[j]);
+ } else if (ar->rssi_map[j+1].rssi == ar->rssi_map[j].rssi) {
+ return EFAULT;
+ }
+ }
+ }
+
+#ifdef DEBUG
+ for (i = 0; i < 12; i++) {
+ AR_DEBUG_PRINTF(ATH_DEBUG_INFO,("thold[%d].tag: %d, thold[%d].rssi: %d \n",
+ i, ar->rssi_map[i].tag, i, ar->rssi_map[i].rssi));
+ }
+#endif
+
+ if (enablerssicompensation) {
+ for (i = 0; i < 6; i++)
+ ar->rssi_map[i].rssi = rssi_compensation_reverse_calc(ar, ar->rssi_map[i].rssi, TRUE);
+ for (i = 6; i < 12; i++)
+ ar->rssi_map[i].rssi = rssi_compensation_reverse_calc(ar, ar->rssi_map[i].rssi, FALSE);
+ }
+
+ cmd.thresholdAbove1_Val = ar->rssi_map[0].rssi;
+ cmd.thresholdAbove2_Val = ar->rssi_map[1].rssi;
+ cmd.thresholdAbove3_Val = ar->rssi_map[2].rssi;
+ cmd.thresholdAbove4_Val = ar->rssi_map[3].rssi;
+ cmd.thresholdAbove5_Val = ar->rssi_map[4].rssi;
+ cmd.thresholdAbove6_Val = ar->rssi_map[5].rssi;
+ cmd.thresholdBelow1_Val = ar->rssi_map[6].rssi;
+ cmd.thresholdBelow2_Val = ar->rssi_map[7].rssi;
+ cmd.thresholdBelow3_Val = ar->rssi_map[8].rssi;
+ cmd.thresholdBelow4_Val = ar->rssi_map[9].rssi;
+ cmd.thresholdBelow5_Val = ar->rssi_map[10].rssi;
+ cmd.thresholdBelow6_Val = ar->rssi_map[11].rssi;
+
+ if( wmi_set_rssi_threshold_params(ar->arWmi, &cmd) != A_OK ) {
+ ret = -EIO;
+ }
+
+ return ret;
+}
+
+static int
+ar6000_ioctl_set_lq_threshold(struct net_device *dev, struct ifreq *rq)
+{
+
+ AR_SOFTC_T *ar = (AR_SOFTC_T *)ar6k_priv(dev);
+ WMI_LQ_THRESHOLD_PARAMS_CMD cmd;
+ int ret = 0;
+
+ if (ar->arWmiReady == FALSE) {
+ return -EIO;
+ }
+
+ if (copy_from_user(&cmd, (char *)((unsigned int *)rq->ifr_data + 1), sizeof(cmd))) {
+ return -EFAULT;
+ }
+
+ if( wmi_set_lq_threshold_params(ar->arWmi, &cmd) != A_OK ) {
+ ret = -EIO;
+ }
+
+ return ret;
+}
+
+
+static int
+ar6000_ioctl_set_probedSsid(struct net_device *dev, struct ifreq *rq)
+{
+ AR_SOFTC_T *ar = (AR_SOFTC_T *)ar6k_priv(dev);
+ WMI_PROBED_SSID_CMD cmd;
+ int ret = 0;
+
+ if (ar->arWmiReady == FALSE) {
+ return -EIO;
+ }
+
+ if (copy_from_user(&cmd, rq->ifr_data, sizeof(cmd))) {
+ return -EFAULT;
+ }
+
+ if (wmi_probedSsid_cmd(ar->arWmi, cmd.entryIndex, cmd.flag, cmd.ssidLength,
+ cmd.ssid) != A_OK)
+ {
+ ret = -EIO;
+ }
+
+ return ret;
+}
+
+static int
+ar6000_ioctl_set_badAp(struct net_device *dev, struct ifreq *rq)
+{
+ AR_SOFTC_T *ar = (AR_SOFTC_T *)ar6k_priv(dev);
+ WMI_ADD_BAD_AP_CMD cmd;
+ int ret = 0;
+
+ if (ar->arWmiReady == FALSE) {
+ return -EIO;
+ }
+
+
+ if (copy_from_user(&cmd, rq->ifr_data, sizeof(cmd))) {
+ return -EFAULT;
+ }
+
+ if (cmd.badApIndex > WMI_MAX_BAD_AP_INDEX) {
+ return -EIO;
+ }
+
+ if (A_MEMCMP(cmd.bssid, null_mac, AR6000_ETH_ADDR_LEN) == 0) {
+ /*
+ * This is a delete badAP.
+ */
+ if (wmi_deleteBadAp_cmd(ar->arWmi, cmd.badApIndex) != A_OK) {
+ ret = -EIO;
+ }
+ } else {
+ if (wmi_addBadAp_cmd(ar->arWmi, cmd.badApIndex, cmd.bssid) != A_OK) {
+ ret = -EIO;
+ }
+ }
+
+ return ret;
+}
+
+static int
+ar6000_ioctl_create_qos(struct net_device *dev, struct ifreq *rq)
+{
+ AR_SOFTC_T *ar = (AR_SOFTC_T *)ar6k_priv(dev);
+ WMI_CREATE_PSTREAM_CMD cmd;
+ A_STATUS ret;
+
+ if (ar->arWmiReady == FALSE) {
+ return -EIO;
+ }
+
+
+ if (copy_from_user(&cmd, rq->ifr_data, sizeof(cmd))) {
+ return -EFAULT;
+ }
+
+ ret = wmi_verify_tspec_params(&cmd, tspecCompliance);
+ if (ret == A_OK)
+ ret = wmi_create_pstream_cmd(ar->arWmi, &cmd);
+
+ switch (ret) {
+ case A_OK:
+ return 0;
+ case A_EBUSY :
+ return -EBUSY;
+ case A_NO_MEMORY:
+ return -ENOMEM;
+ case A_EINVAL:
+ default:
+ return -EFAULT;
+ }
+}
+
+static int
+ar6000_ioctl_delete_qos(struct net_device *dev, struct ifreq *rq)
+{
+ AR_SOFTC_T *ar = (AR_SOFTC_T *)ar6k_priv(dev);
+ WMI_DELETE_PSTREAM_CMD cmd;
+ int ret = 0;
+
+ if (ar->arWmiReady == FALSE) {
+ return -EIO;
+ }
+
+ if (copy_from_user(&cmd, rq->ifr_data, sizeof(cmd))) {
+ return -EFAULT;
+ }
+
+ ret = wmi_delete_pstream_cmd(ar->arWmi, cmd.trafficClass, cmd.tsid);
+
+ switch (ret) {
+ case A_OK:
+ return 0;
+ case A_EBUSY :
+ return -EBUSY;
+ case A_NO_MEMORY:
+ return -ENOMEM;
+ case A_EINVAL:
+ default:
+ return -EFAULT;
+ }
+}
+
+static int
+ar6000_ioctl_get_qos_queue(struct net_device *dev, struct ifreq *rq)
+{
+ AR_SOFTC_T *ar = (AR_SOFTC_T *)ar6k_priv(dev);
+ struct ar6000_queuereq qreq;
+ int ret = 0;
+
+ if (ar->arWmiReady == FALSE) {
+ return -EIO;
+ }
+
+ if( copy_from_user(&qreq, rq->ifr_data,
+ sizeof(struct ar6000_queuereq)))
+ return -EFAULT;
+
+ qreq.activeTsids = wmi_get_mapped_qos_queue(ar->arWmi, qreq.trafficClass);
+
+ if (copy_to_user(rq->ifr_data, &qreq,
+ sizeof(struct ar6000_queuereq)))
+ {
+ ret = -EFAULT;
+ }
+
+ return ret;
+}
+
+#ifdef CONFIG_HOST_TCMD_SUPPORT
+static A_STATUS
+ar6000_ioctl_tcmd_get_rx_report(struct net_device *dev,
+ struct ifreq *rq, A_UINT8 *data, A_UINT32 len)
+{
+ AR_SOFTC_T *ar = (AR_SOFTC_T *)ar6k_priv(dev);
+ A_UINT32 buf[4+TCMD_MAX_RATES];
+ int ret = 0;
+
+ if (ar->bIsDestroyProgress) {
+ return -EBUSY;
+ }
+
+ if (ar->arWmiReady == FALSE) {
+ return -EIO;
+ }
+
+ if (down_interruptible(&ar->arSem)) {
+ return -ERESTARTSYS;
+ }
+
+ if (ar->bIsDestroyProgress) {
+ up(&ar->arSem);
+ return -EBUSY;
+ }
+
+ ar->tcmdRxReport = 0;
+ if (wmi_test_cmd(ar->arWmi, data, len) != A_OK) {
+ up(&ar->arSem);
+ return -EIO;
+ }
+
+ wait_event_interruptible_timeout(arEvent, ar->tcmdRxReport != 0, wmitimeout * HZ);
+
+ if (signal_pending(current)) {
+ ret = -EINTR;
+ }
+
+ buf[0] = ar->tcmdRxTotalPkt;
+ buf[1] = ar->tcmdRxRssi;
+ buf[2] = ar->tcmdRxcrcErrPkt;
+ buf[3] = ar->tcmdRxsecErrPkt;
+ A_MEMCPY(((A_UCHAR *)buf)+(4*sizeof(A_UINT32)), ar->tcmdRateCnt, sizeof(ar->tcmdRateCnt));
+ A_MEMCPY(((A_UCHAR *)buf)+(4*sizeof(A_UINT32))+(TCMD_MAX_RATES *sizeof(A_UINT16)), ar->tcmdRateCntShortGuard, sizeof(ar->tcmdRateCntShortGuard));
+
+ if (!ret && copy_to_user(rq->ifr_data, buf, sizeof(buf))) {
+ ret = -EFAULT;
+ }
+
+ up(&ar->arSem);
+
+ return ret;
+}
+
+void
+ar6000_tcmd_rx_report_event(void *devt, A_UINT8 * results, int len)
+{
+ AR_SOFTC_T *ar = (AR_SOFTC_T *)devt;
+ TCMD_CONT_RX * rx_rep = (TCMD_CONT_RX *)results;
+
+ if (enablerssicompensation) {
+ rx_rep->u.report.rssiInDBm = rssi_compensation_calc_tcmd(tcmdRxFreq, rx_rep->u.report.rssiInDBm,rx_rep->u.report.totalPkt);
+ }
+
+
+ ar->tcmdRxTotalPkt = rx_rep->u.report.totalPkt;
+ ar->tcmdRxRssi = rx_rep->u.report.rssiInDBm;
+ ar->tcmdRxcrcErrPkt = rx_rep->u.report.crcErrPkt;
+ ar->tcmdRxsecErrPkt = rx_rep->u.report.secErrPkt;
+ ar->tcmdRxReport = 1;
+ A_MEMZERO(ar->tcmdRateCnt, sizeof(ar->tcmdRateCnt));
+ A_MEMZERO(ar->tcmdRateCntShortGuard, sizeof(ar->tcmdRateCntShortGuard));
+ A_MEMCPY(ar->tcmdRateCnt, rx_rep->u.report.rateCnt, sizeof(ar->tcmdRateCnt));
+ A_MEMCPY(ar->tcmdRateCntShortGuard, rx_rep->u.report.rateCntShortGuard, sizeof(ar->tcmdRateCntShortGuard));
+
+ wake_up(&arEvent);
+}
+#endif /* CONFIG_HOST_TCMD_SUPPORT*/
+
+static int
+ar6000_ioctl_set_error_report_bitmask(struct net_device *dev, struct ifreq *rq)
+{
+ AR_SOFTC_T *ar = (AR_SOFTC_T *)ar6k_priv(dev);
+ WMI_TARGET_ERROR_REPORT_BITMASK cmd;
+ int ret = 0;
+
+ if (ar->arWmiReady == FALSE) {
+ return -EIO;
+ }
+
+ if (copy_from_user(&cmd, rq->ifr_data, sizeof(cmd))) {
+ return -EFAULT;
+ }
+
+ ret = wmi_set_error_report_bitmask(ar->arWmi, cmd.bitmask);
+
+ return (ret==0 ? ret : -EINVAL);
+}
+
+static int
+ar6000_clear_target_stats(struct net_device *dev)
+{
+ AR_SOFTC_T *ar = (AR_SOFTC_T *)ar6k_priv(dev);
+ TARGET_STATS *pStats = &ar->arTargetStats;
+ int ret = 0;
+
+ if (ar->arWmiReady == FALSE) {
+ return -EIO;
+ }
+ AR6000_SPIN_LOCK(&ar->arLock, 0);
+ A_MEMZERO(pStats, sizeof(TARGET_STATS));
+ AR6000_SPIN_UNLOCK(&ar->arLock, 0);
+ return ret;
+}
+
+static int
+ar6000_ioctl_get_target_stats(struct net_device *dev, struct ifreq *rq)
+{
+ AR_SOFTC_T *ar = (AR_SOFTC_T *)ar6k_priv(dev);
+ TARGET_STATS_CMD cmd;
+ TARGET_STATS *pStats = &ar->arTargetStats;
+ int ret = 0;
+
+ if (ar->bIsDestroyProgress) {
+ return -EBUSY;
+ }
+ if (ar->arWmiReady == FALSE) {
+ return -EIO;
+ }
+ if (copy_from_user(&cmd, rq->ifr_data, sizeof(cmd))) {
+ return -EFAULT;
+ }
+ if (down_interruptible(&ar->arSem)) {
+ return -ERESTARTSYS;
+ }
+ if (ar->bIsDestroyProgress) {
+ up(&ar->arSem);
+ return -EBUSY;
+ }
+
+ ar->statsUpdatePending = TRUE;
+
+ if(wmi_get_stats_cmd(ar->arWmi) != A_OK) {
+ up(&ar->arSem);
+ return -EIO;
+ }
+
+ wait_event_interruptible_timeout(arEvent, ar->statsUpdatePending == FALSE, wmitimeout * HZ);
+
+ if (signal_pending(current)) {
+ ret = -EINTR;
+ }
+
+ if (!ret && copy_to_user(rq->ifr_data, pStats, sizeof(*pStats))) {
+ ret = -EFAULT;
+ }
+
+ if (cmd.clearStats == 1) {
+ ret = ar6000_clear_target_stats(dev);
+ }
+
+ up(&ar->arSem);
+
+ return ret;
+}
+
+static int
+ar6000_ioctl_get_ap_stats(struct net_device *dev, struct ifreq *rq)
+{
+ AR_SOFTC_T *ar = (AR_SOFTC_T *)ar6k_priv(dev);
+ A_UINT32 action; /* Allocating only the desired space on the frame. Declaring is as a WMI_AP_MODE_STAT variable results in exceeding the compiler imposed limit on the maximum frame size */
+ WMI_AP_MODE_STAT *pStats = &ar->arAPStats;
+ int ret = 0;
+
+ if (ar->arWmiReady == FALSE) {
+ return -EIO;
+ }
+ if (copy_from_user(&action, (char *)((unsigned int*)rq->ifr_data + 1),
+ sizeof(A_UINT32)))
+ {
+ return -EFAULT;
+ }
+ if (action == AP_CLEAR_STATS) {
+ A_UINT8 i;
+ AR6000_SPIN_LOCK(&ar->arLock, 0);
+ for(i = 0; i < AP_MAX_NUM_STA; i++) {
+ pStats->sta[i].tx_bytes = 0;
+ pStats->sta[i].tx_pkts = 0;
+ pStats->sta[i].tx_error = 0;
+ pStats->sta[i].tx_discard = 0;
+ pStats->sta[i].rx_bytes = 0;
+ pStats->sta[i].rx_pkts = 0;
+ pStats->sta[i].rx_error = 0;
+ pStats->sta[i].rx_discard = 0;
+ }
+ AR6000_SPIN_UNLOCK(&ar->arLock, 0);
+ return ret;
+ }
+
+ if (down_interruptible(&ar->arSem)) {
+ return -ERESTARTSYS;
+ }
+
+ ar->statsUpdatePending = TRUE;
+
+ if(wmi_get_stats_cmd(ar->arWmi) != A_OK) {
+ up(&ar->arSem);
+ return -EIO;
+ }
+
+ wait_event_interruptible_timeout(arEvent, ar->statsUpdatePending == FALSE, wmitimeout * HZ);
+
+ if (signal_pending(current)) {
+ ret = -EINTR;
+ }
+
+ if (!ret && copy_to_user(rq->ifr_data, pStats, sizeof(*pStats))) {
+ ret = -EFAULT;
+ }
+
+ up(&ar->arSem);
+
+ return ret;
+}
+
+static int
+ar6000_ioctl_set_access_params(struct net_device *dev, struct ifreq *rq)
+{
+ AR_SOFTC_T *ar = (AR_SOFTC_T *)ar6k_priv(dev);
+ WMI_SET_ACCESS_PARAMS_CMD cmd;
+ int ret = 0;
+
+ if (ar->arWmiReady == FALSE) {
+ return -EIO;
+ }
+
+ if (copy_from_user(&cmd, rq->ifr_data, sizeof(cmd))) {
+ return -EFAULT;
+ }
+
+ if (wmi_set_access_params_cmd(ar->arWmi, cmd.ac, cmd.txop, cmd.eCWmin, cmd.eCWmax,
+ cmd.aifsn) == A_OK)
+ {
+ ret = 0;
+ } else {
+ ret = -EINVAL;
+ }
+
+ return (ret);
+}
+
+static int
+ar6000_ioctl_set_disconnect_timeout(struct net_device *dev, struct ifreq *rq)
+{
+ AR_SOFTC_T *ar = (AR_SOFTC_T *)ar6k_priv(dev);
+ WMI_DISC_TIMEOUT_CMD cmd;
+ int ret = 0;
+
+ if (ar->arWmiReady == FALSE) {
+ return -EIO;
+ }
+
+ if (copy_from_user(&cmd, rq->ifr_data, sizeof(cmd))) {
+ return -EFAULT;
+ }
+
+ if (wmi_disctimeout_cmd(ar->arWmi, cmd.disconnectTimeout) == A_OK)
+ {
+ ret = 0;
+ } else {
+ ret = -EINVAL;
+ }
+
+ return (ret);
+}
+
+static int
+ar6000_xioctl_set_voice_pkt_size(struct net_device *dev, char * userdata)
+{
+ AR_SOFTC_T *ar = (AR_SOFTC_T *)ar6k_priv(dev);
+ WMI_SET_VOICE_PKT_SIZE_CMD cmd;
+ int ret = 0;
+
+ if (ar->arWmiReady == FALSE) {
+ return -EIO;
+ }
+
+ if (copy_from_user(&cmd, userdata, sizeof(cmd))) {
+ return -EFAULT;
+ }
+
+ if (wmi_set_voice_pkt_size_cmd(ar->arWmi, cmd.voicePktSize) == A_OK)
+ {
+ ret = 0;
+ } else {
+ ret = -EINVAL;
+ }
+
+
+ return (ret);
+}
+
+static int
+ar6000_xioctl_set_max_sp_len(struct net_device *dev, char * userdata)
+{
+ AR_SOFTC_T *ar = (AR_SOFTC_T *)ar6k_priv(dev);
+ WMI_SET_MAX_SP_LEN_CMD cmd;
+ int ret = 0;
+
+ if (ar->arWmiReady == FALSE) {
+ return -EIO;
+ }
+
+ if (copy_from_user(&cmd, userdata, sizeof(cmd))) {
+ return -EFAULT;
+ }
+
+ if (wmi_set_max_sp_len_cmd(ar->arWmi, cmd.maxSPLen) == A_OK)
+ {
+ ret = 0;
+ } else {
+ ret = -EINVAL;
+ }
+
+ return (ret);
+}
+
+
+static int
+ar6000_xioctl_set_bt_status_cmd(struct net_device *dev, char * userdata)
+{
+ AR_SOFTC_T *ar = (AR_SOFTC_T *)ar6k_priv(dev);
+ WMI_SET_BT_STATUS_CMD cmd;
+ int ret = 0;
+
+ if (ar->arWmiReady == FALSE) {
+ return -EIO;
+ }
+
+ if (copy_from_user(&cmd, userdata, sizeof(cmd))) {
+ return -EFAULT;
+ }
+
+ if (wmi_set_bt_status_cmd(ar->arWmi, cmd.streamType, cmd.status) == A_OK)
+ {
+ ret = 0;
+ } else {
+ ret = -EINVAL;
+ }
+
+ return (ret);
+}
+
+static int
+ar6000_xioctl_set_bt_params_cmd(struct net_device *dev, char * userdata)
+{
+ AR_SOFTC_T *ar = (AR_SOFTC_T *)ar6k_priv(dev);
+ WMI_SET_BT_PARAMS_CMD cmd;
+ int ret = 0;
+
+ if (ar->arWmiReady == FALSE) {
+ return -EIO;
+ }
+
+ if (copy_from_user(&cmd, userdata, sizeof(cmd))) {
+ return -EFAULT;
+ }
+
+ if (wmi_set_bt_params_cmd(ar->arWmi, &cmd) == A_OK)
+ {
+ ret = 0;
+ } else {
+ ret = -EINVAL;
+ }
+
+ return (ret);
+}
+
+static int
+ar6000_xioctl_set_btcoex_fe_ant_cmd(struct net_device * dev, char * userdata)
+{
+ AR_SOFTC_T *ar = (AR_SOFTC_T *)ar6k_priv(dev);
+ WMI_SET_BTCOEX_FE_ANT_CMD cmd;
+ int ret = 0;
+
+ if (ar->arWmiReady == FALSE) {
+ return -EIO;
+ }
+ if (copy_from_user(&cmd, userdata, sizeof(cmd))) {
+ return -EFAULT;
+ }
+
+ if (wmi_set_btcoex_fe_ant_cmd(ar->arWmi, &cmd) == A_OK)
+ {
+ ret = 0;
+ } else {
+ ret = -EINVAL;
+ }
+
+ return(ret);
+}
+
+static int
+ar6000_xioctl_set_btcoex_colocated_bt_dev_cmd(struct net_device * dev, char * userdata)
+{
+ AR_SOFTC_T *ar = (AR_SOFTC_T *)ar6k_priv(dev);
+ WMI_SET_BTCOEX_COLOCATED_BT_DEV_CMD cmd;
+ int ret = 0;
+
+ if (ar->arWmiReady == FALSE) {
+ return -EIO;
+ }
+
+ if (copy_from_user(&cmd, userdata, sizeof(cmd))) {
+ return -EFAULT;
+ }
+
+ if (wmi_set_btcoex_colocated_bt_dev_cmd(ar->arWmi, &cmd) == A_OK)
+ {
+ ret = 0;
+ } else {
+ ret = -EINVAL;
+ }
+
+ return(ret);
+}
+
+static int
+ar6000_xioctl_set_btcoex_btinquiry_page_config_cmd(struct net_device * dev, char * userdata)
+{
+ AR_SOFTC_T *ar = (AR_SOFTC_T *)ar6k_priv(dev);
+ WMI_SET_BTCOEX_BTINQUIRY_PAGE_CONFIG_CMD cmd;
+ int ret = 0;
+
+ if (ar->arWmiReady == FALSE) {
+ return -EIO;
+ }
+
+ if (copy_from_user(&cmd, userdata, sizeof(cmd))) {
+ return -EFAULT;
+ }
+
+ if (wmi_set_btcoex_btinquiry_page_config_cmd(ar->arWmi, &cmd) == A_OK)
+ {
+ ret = 0;
+ } else {
+ ret = -EINVAL;
+ }
+
+ return(ret);
+}
+
+static int
+ar6000_xioctl_set_btcoex_sco_config_cmd(struct net_device * dev, char * userdata)
+{
+ AR_SOFTC_T *ar = (AR_SOFTC_T *)ar6k_priv(dev);
+ WMI_SET_BTCOEX_SCO_CONFIG_CMD cmd;
+ int ret = 0;
+
+ if (ar->arWmiReady == FALSE) {
+ return -EIO;
+ }
+
+ if (copy_from_user(&cmd, userdata, sizeof(cmd))) {
+ return -EFAULT;
+ }
+
+ if (wmi_set_btcoex_sco_config_cmd(ar->arWmi, &cmd) == A_OK)
+ {
+ ret = 0;
+ } else {
+ ret = -EINVAL;
+ }
+
+ return(ret);
+}
+
+static int
+ar6000_xioctl_set_btcoex_a2dp_config_cmd(struct net_device * dev,
+ char * userdata)
+{
+ AR_SOFTC_T *ar = (AR_SOFTC_T *)ar6k_priv(dev);
+ WMI_SET_BTCOEX_A2DP_CONFIG_CMD cmd;
+ int ret = 0;
+
+ if (ar->arWmiReady == FALSE) {
+ return -EIO;
+ }
+
+ if (copy_from_user(&cmd, userdata, sizeof(cmd))) {
+ return -EFAULT;
+ }
+
+ if (wmi_set_btcoex_a2dp_config_cmd(ar->arWmi, &cmd) == A_OK)
+ {
+ ret = 0;
+ } else {
+ ret = -EINVAL;
+ }
+
+ return(ret);
+}
+
+static int
+ar6000_xioctl_set_btcoex_aclcoex_config_cmd(struct net_device * dev, char * userdata)
+{
+ AR_SOFTC_T *ar = (AR_SOFTC_T *)ar6k_priv(dev);
+ WMI_SET_BTCOEX_ACLCOEX_CONFIG_CMD cmd;
+ int ret = 0;
+
+ if (ar->arWmiReady == FALSE) {
+ return -EIO;
+ }
+
+ if (copy_from_user(&cmd, userdata, sizeof(cmd))) {
+ return -EFAULT;
+ }
+
+ if (wmi_set_btcoex_aclcoex_config_cmd(ar->arWmi, &cmd) == A_OK)
+ {
+ ret = 0;
+ } else {
+ ret = -EINVAL;
+ }
+
+ return(ret);
+}
+
+static int
+ar60000_xioctl_set_btcoex_debug_cmd(struct net_device * dev, char * userdata)
+{
+ AR_SOFTC_T *ar = (AR_SOFTC_T *)ar6k_priv(dev);
+ WMI_SET_BTCOEX_DEBUG_CMD cmd;
+ int ret = 0;
+
+ if (ar->arWmiReady == FALSE) {
+ return -EIO;
+ }
+
+ if (copy_from_user(&cmd, userdata, sizeof(cmd))) {
+ return -EFAULT;
+ }
+
+ if (wmi_set_btcoex_debug_cmd(ar->arWmi, &cmd) == A_OK)
+ {
+ ret = 0;
+ } else {
+ ret = -EINVAL;
+ }
+
+ return(ret);
+}
+
+static int
+ar6000_xioctl_set_btcoex_bt_operating_status_cmd(struct net_device * dev, char * userdata)
+{
+ AR_SOFTC_T *ar = (AR_SOFTC_T *)ar6k_priv(dev);
+ WMI_SET_BTCOEX_BT_OPERATING_STATUS_CMD cmd;
+ int ret = 0;
+
+ if (ar->arWmiReady == FALSE) {
+ return -EIO;
+ }
+
+ if (copy_from_user(&cmd, userdata, sizeof(cmd))) {
+ return -EFAULT;
+ }
+
+ if (wmi_set_btcoex_bt_operating_status_cmd(ar->arWmi, &cmd) == A_OK)
+ {
+ ret = 0;
+ } else {
+ ret = -EINVAL;
+ }
+ return(ret);
+}
+
+static int
+ar6000_xioctl_get_btcoex_config_cmd(struct net_device * dev, char * userdata,
+ struct ifreq *rq)
+{
+
+ AR_SOFTC_T *ar = (AR_SOFTC_T *)ar6k_priv(dev);
+ AR6000_BTCOEX_CONFIG btcoexConfig;
+ WMI_BTCOEX_CONFIG_EVENT *pbtcoexConfigEv = &ar->arBtcoexConfig;
+
+ int ret = 0;
+
+ if (ar->bIsDestroyProgress) {
+ return -EBUSY;
+ }
+ if (ar->arWmiReady == FALSE) {
+ return -EIO;
+ }
+ if (copy_from_user(&btcoexConfig.configCmd, userdata, sizeof(AR6000_BTCOEX_CONFIG))) {
+ return -EFAULT;
+ }
+ if (down_interruptible(&ar->arSem)) {
+ return -ERESTARTSYS;
+ }
+
+ if (wmi_get_btcoex_config_cmd(ar->arWmi, (WMI_GET_BTCOEX_CONFIG_CMD *)&btcoexConfig.configCmd) != A_OK)
+ {
+ up(&ar->arSem);
+ return -EIO;
+ }
+
+ ar->statsUpdatePending = TRUE;
+
+ wait_event_interruptible_timeout(arEvent, ar->statsUpdatePending == FALSE, wmitimeout * HZ);
+
+ if (signal_pending(current)) {
+ ret = -EINTR;
+ }
+
+ if (!ret && copy_to_user(btcoexConfig.configEvent, pbtcoexConfigEv, sizeof(WMI_BTCOEX_CONFIG_EVENT))) {
+ ret = -EFAULT;
+ }
+ up(&ar->arSem);
+ return ret;
+}
+
+static int
+ar6000_xioctl_get_btcoex_stats_cmd(struct net_device * dev, char * userdata, struct ifreq *rq)
+{
+ AR_SOFTC_T *ar = (AR_SOFTC_T *)ar6k_priv(dev);
+ AR6000_BTCOEX_STATS btcoexStats;
+ WMI_BTCOEX_STATS_EVENT *pbtcoexStats = &ar->arBtcoexStats;
+ int ret = 0;
+
+ if (ar->bIsDestroyProgress) {
+ return -EBUSY;
+ }
+ if (ar->arWmiReady == FALSE) {
+ return -EIO;
+ }
+
+ if (down_interruptible(&ar->arSem)) {
+ return -ERESTARTSYS;
+ }
+
+ if (copy_from_user(&btcoexStats.statsEvent, userdata, sizeof(AR6000_BTCOEX_CONFIG))) {
+ return -EFAULT;
+ }
+
+ if (wmi_get_btcoex_stats_cmd(ar->arWmi) != A_OK)
+ {
+ up(&ar->arSem);
+ return -EIO;
+ }
+
+ ar->statsUpdatePending = TRUE;
+
+ wait_event_interruptible_timeout(arEvent, ar->statsUpdatePending == FALSE, wmitimeout * HZ);
+
+ if (signal_pending(current)) {
+ ret = -EINTR;
+ }
+
+ if (!ret && copy_to_user(btcoexStats.statsEvent, pbtcoexStats, sizeof(WMI_BTCOEX_STATS_EVENT))) {
+ ret = -EFAULT;
+ }
+
+
+ up(&ar->arSem);
+
+ return(ret);
+}
+
+#ifdef CONFIG_HOST_GPIO_SUPPORT
+struct ar6000_gpio_intr_wait_cmd_s gpio_intr_results;
+/* gpio_reg_results and gpio_data_available are protected by arSem */
+static struct ar6000_gpio_register_cmd_s gpio_reg_results;
+static A_BOOL gpio_data_available; /* Requested GPIO data available */
+static A_BOOL gpio_intr_available; /* GPIO interrupt info available */
+static A_BOOL gpio_ack_received; /* GPIO ack was received */
+
+/* Host-side initialization for General Purpose I/O support */
+void ar6000_gpio_init(void)
+{
+ gpio_intr_available = FALSE;
+ gpio_data_available = FALSE;
+ gpio_ack_received = FALSE;
+}
+
+/*
+ * Called when a GPIO interrupt is received from the Target.
+ * intr_values shows which GPIO pins have interrupted.
+ * input_values shows a recent value of GPIO pins.
+ */
+void
+ar6000_gpio_intr_rx(A_UINT32 intr_mask, A_UINT32 input_values)
+{
+ gpio_intr_results.intr_mask = intr_mask;
+ gpio_intr_results.input_values = input_values;
+ *((volatile A_BOOL *)&gpio_intr_available) = TRUE;
+ wake_up(&arEvent);
+}
+
+/*
+ * This is called when a response is received from the Target
+ * for a previous or ar6000_gpio_input_get or ar6000_gpio_register_get
+ * call.
+ */
+void
+ar6000_gpio_data_rx(A_UINT32 reg_id, A_UINT32 value)
+{
+ gpio_reg_results.gpioreg_id = reg_id;
+ gpio_reg_results.value = value;
+ *((volatile A_BOOL *)&gpio_data_available) = TRUE;
+ wake_up(&arEvent);
+}
+
+/*
+ * This is called when an acknowledgement is received from the Target
+ * for a previous or ar6000_gpio_output_set or ar6000_gpio_register_set
+ * call.
+ */
+void
+ar6000_gpio_ack_rx(void)
+{
+ gpio_ack_received = TRUE;
+ wake_up(&arEvent);
+}
+
+A_STATUS
+ar6000_gpio_output_set(struct net_device *dev,
+ A_UINT32 set_mask,
+ A_UINT32 clear_mask,
+ A_UINT32 enable_mask,
+ A_UINT32 disable_mask)
+{
+ AR_SOFTC_T *ar = (AR_SOFTC_T *)ar6k_priv(dev);
+
+ gpio_ack_received = FALSE;
+ return wmi_gpio_output_set(ar->arWmi,
+ set_mask, clear_mask, enable_mask, disable_mask);
+}
+
+static A_STATUS
+ar6000_gpio_input_get(struct net_device *dev)
+{
+ AR_SOFTC_T *ar = (AR_SOFTC_T *)ar6k_priv(dev);
+
+ *((volatile A_BOOL *)&gpio_data_available) = FALSE;
+ return wmi_gpio_input_get(ar->arWmi);
+}
+
+static A_STATUS
+ar6000_gpio_register_set(struct net_device *dev,
+ A_UINT32 gpioreg_id,
+ A_UINT32 value)
+{
+ AR_SOFTC_T *ar = (AR_SOFTC_T *)ar6k_priv(dev);
+
+ gpio_ack_received = FALSE;
+ return wmi_gpio_register_set(ar->arWmi, gpioreg_id, value);
+}
+
+static A_STATUS
+ar6000_gpio_register_get(struct net_device *dev,
+ A_UINT32 gpioreg_id)
+{
+ AR_SOFTC_T *ar = (AR_SOFTC_T *)ar6k_priv(dev);
+
+ *((volatile A_BOOL *)&gpio_data_available) = FALSE;
+ return wmi_gpio_register_get(ar->arWmi, gpioreg_id);
+}
+
+static A_STATUS
+ar6000_gpio_intr_ack(struct net_device *dev,
+ A_UINT32 ack_mask)
+{
+ AR_SOFTC_T *ar = (AR_SOFTC_T *)ar6k_priv(dev);
+
+ gpio_intr_available = FALSE;
+ return wmi_gpio_intr_ack(ar->arWmi, ack_mask);
+}
+#endif /* CONFIG_HOST_GPIO_SUPPORT */
+
+#if defined(CONFIG_TARGET_PROFILE_SUPPORT)
+static struct prof_count_s prof_count_results;
+static A_BOOL prof_count_available; /* Requested GPIO data available */
+
+static A_STATUS
+prof_count_get(struct net_device *dev)
+{
+ AR_SOFTC_T *ar = (AR_SOFTC_T *)ar6k_priv(dev);
+
+ *((volatile A_BOOL *)&prof_count_available) = FALSE;
+ return wmi_prof_count_get_cmd(ar->arWmi);
+}
+
+/*
+ * This is called when a response is received from the Target
+ * for a previous prof_count_get call.
+ */
+void
+prof_count_rx(A_UINT32 addr, A_UINT32 count)
+{
+ prof_count_results.addr = addr;
+ prof_count_results.count = count;
+ *((volatile A_BOOL *)&prof_count_available) = TRUE;
+ wake_up(&arEvent);
+}
+#endif /* CONFIG_TARGET_PROFILE_SUPPORT */
+
+
+static A_STATUS
+ar6000_create_acl_data_osbuf(struct net_device *dev, A_UINT8 *userdata, void **p_osbuf)
+{
+ void *osbuf = NULL;
+ A_UINT8 tmp_space[8];
+ HCI_ACL_DATA_PKT *acl;
+ A_UINT8 hdr_size, *datap=NULL;
+ A_STATUS ret = A_OK;
+
+ /* ACL is in data path. There is a need to create pool
+ * mechanism for allocating and freeing NETBUFs - ToDo later.
+ */
+
+ *p_osbuf = NULL;
+ acl = (HCI_ACL_DATA_PKT *)tmp_space;
+ hdr_size = sizeof(acl->hdl_and_flags) + sizeof(acl->data_len);
+
+ do {
+ if (a_copy_from_user(acl, userdata, hdr_size)) {
+ ret = A_EFAULT;
+ break;
+ }
+
+ osbuf = A_NETBUF_ALLOC(hdr_size + acl->data_len);
+ if (osbuf == NULL) {
+ ret = A_NO_MEMORY;
+ break;
+ }
+ A_NETBUF_PUT(osbuf, hdr_size + acl->data_len);
+ datap = (A_UINT8 *)A_NETBUF_DATA(osbuf);
+
+ /* Real copy to osbuf */
+ acl = (HCI_ACL_DATA_PKT *)(datap);
+ A_MEMCPY(acl, tmp_space, hdr_size);
+ if (a_copy_from_user(acl->data, userdata + hdr_size, acl->data_len)) {
+ ret = A_EFAULT;
+ break;
+ }
+ } while(FALSE);
+
+ if (ret == A_OK) {
+ *p_osbuf = osbuf;
+ } else {
+ A_NETBUF_FREE(osbuf);
+ }
+ return ret;
+}
+
+
+
+int
+ar6000_ioctl_ap_setparam(AR_SOFTC_T *ar, int param, int value)
+{
+ int ret=0;
+
+ switch(param) {
+ case IEEE80211_PARAM_WPA:
+ switch (value) {
+ case WPA_MODE_WPA1:
+ ar->arAuthMode = WPA_AUTH;
+ break;
+ case WPA_MODE_WPA2:
+ ar->arAuthMode = WPA2_AUTH;
+ break;
+ case WPA_MODE_AUTO:
+ ar->arAuthMode = WPA_AUTH | WPA2_AUTH;
+ break;
+ case WPA_MODE_NONE:
+ ar->arAuthMode = NONE_AUTH;
+ break;
+ }
+ break;
+ case IEEE80211_PARAM_AUTHMODE:
+ if(value == IEEE80211_AUTH_WPA_PSK) {
+ if (WPA_AUTH == ar->arAuthMode) {
+ ar->arAuthMode = WPA_PSK_AUTH;
+ } else if (WPA2_AUTH == ar->arAuthMode) {
+ ar->arAuthMode = WPA2_PSK_AUTH;
+ } else if ((WPA_AUTH | WPA2_AUTH) == ar->arAuthMode) {
+ ar->arAuthMode = WPA_PSK_AUTH | WPA2_PSK_AUTH;
+ } else {
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("Error - Setting PSK "\
+ "mode when WPA param was set to %d\n",
+ ar->arAuthMode));
+ ret = -EIO;
+ }
+ }
+ break;
+ case IEEE80211_PARAM_UCASTCIPHER:
+ ar->arPairwiseCrypto = 0;
+ if(value & (1<<IEEE80211_CIPHER_AES_CCM)) {
+ ar->arPairwiseCrypto |= AES_CRYPT;
+ }
+ if(value & (1<<IEEE80211_CIPHER_TKIP)) {
+ ar->arPairwiseCrypto |= TKIP_CRYPT;
+ }
+ if(!ar->arPairwiseCrypto) {
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERR,
+ ("Error - Invalid cipher in WPA \n"));
+ ret = -EIO;
+ }
+ break;
+ case IEEE80211_PARAM_PRIVACY:
+ if(value == 0) {
+ ar->arDot11AuthMode = OPEN_AUTH;
+ ar->arAuthMode = NONE_AUTH;
+ ar->arPairwiseCrypto = NONE_CRYPT;
+ ar->arPairwiseCryptoLen = 0;
+ ar->arGroupCrypto = NONE_CRYPT;
+ ar->arGroupCryptoLen = 0;
+ }
+ break;
+#ifdef WAPI_ENABLE
+ case IEEE80211_PARAM_WAPI:
+ A_PRINTF("WAPI Policy: %d\n", value);
+ ar->arDot11AuthMode = OPEN_AUTH;
+ ar->arAuthMode = NONE_AUTH;
+ if(value & 0x1) {
+ ar->arPairwiseCrypto = WAPI_CRYPT;
+ ar->arGroupCrypto = WAPI_CRYPT;
+ } else {
+ ar->arPairwiseCrypto = NONE_CRYPT;
+ ar->arGroupCrypto = NONE_CRYPT;
+ }
+ break;
+#endif
+ }
+ return ret;
+}
+
+int
+ar6000_ioctl_setparam(AR_SOFTC_T *ar, int param, int value)
+{
+ A_BOOL profChanged = FALSE;
+ int ret=0;
+
+ if(ar->arNextMode == AP_NETWORK) {
+ ar->ap_profile_flag = 1; /* There is a change in profile */
+ switch (param) {
+ case IEEE80211_PARAM_WPA:
+ case IEEE80211_PARAM_AUTHMODE:
+ case IEEE80211_PARAM_UCASTCIPHER:
+ case IEEE80211_PARAM_PRIVACY:
+ case IEEE80211_PARAM_WAPI:
+ ret = ar6000_ioctl_ap_setparam(ar, param, value);
+ return ret;
+ }
+ }
+
+ switch (param) {
+ case IEEE80211_PARAM_WPA:
+ switch (value) {
+ case WPA_MODE_WPA1:
+ ar->arAuthMode = WPA_AUTH;
+ profChanged = TRUE;
+ break;
+ case WPA_MODE_WPA2:
+ ar->arAuthMode = WPA2_AUTH;
+ profChanged = TRUE;
+ break;
+ case WPA_MODE_NONE:
+ ar->arAuthMode = NONE_AUTH;
+ profChanged = TRUE;
+ break;
+ }
+ break;
+ case IEEE80211_PARAM_AUTHMODE:
+ switch(value) {
+ case IEEE80211_AUTH_WPA_PSK:
+ if (WPA_AUTH == ar->arAuthMode) {
+ ar->arAuthMode = WPA_PSK_AUTH;
+ profChanged = TRUE;
+ } else if (WPA2_AUTH == ar->arAuthMode) {
+ ar->arAuthMode = WPA2_PSK_AUTH;
+ profChanged = TRUE;
+ } else {
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("Error - Setting PSK "\
+ "mode when WPA param was set to %d\n",
+ ar->arAuthMode));
+ ret = -EIO;
+ }
+ break;
+ case IEEE80211_AUTH_WPA_CCKM:
+ if (WPA2_AUTH == ar->arAuthMode) {
+ ar->arAuthMode = WPA2_AUTH_CCKM;
+ } else {
+ ar->arAuthMode = WPA_AUTH_CCKM;
+ }
+ break;
+ default:
+ break;
+ }
+ break;
+ case IEEE80211_PARAM_UCASTCIPHER:
+ switch (value) {
+ case IEEE80211_CIPHER_AES_CCM:
+ ar->arPairwiseCrypto = AES_CRYPT;
+ profChanged = TRUE;
+ break;
+ case IEEE80211_CIPHER_TKIP:
+ ar->arPairwiseCrypto = TKIP_CRYPT;
+ profChanged = TRUE;
+ break;
+ case IEEE80211_CIPHER_WEP:
+ ar->arPairwiseCrypto = WEP_CRYPT;
+ profChanged = TRUE;
+ break;
+ case IEEE80211_CIPHER_NONE:
+ ar->arPairwiseCrypto = NONE_CRYPT;
+ profChanged = TRUE;
+ break;
+ }
+ break;
+ case IEEE80211_PARAM_UCASTKEYLEN:
+ if (!IEEE80211_IS_VALID_WEP_CIPHER_LEN(value)) {
+ ret = -EIO;
+ } else {
+ ar->arPairwiseCryptoLen = value;
+ }
+ break;
+ case IEEE80211_PARAM_MCASTCIPHER:
+ switch (value) {
+ case IEEE80211_CIPHER_AES_CCM:
+ ar->arGroupCrypto = AES_CRYPT;
+ profChanged = TRUE;
+ break;
+ case IEEE80211_CIPHER_TKIP:
+ ar->arGroupCrypto = TKIP_CRYPT;
+ profChanged = TRUE;
+ break;
+ case IEEE80211_CIPHER_WEP:
+ ar->arGroupCrypto = WEP_CRYPT;
+ profChanged = TRUE;
+ break;
+ case IEEE80211_CIPHER_NONE:
+ ar->arGroupCrypto = NONE_CRYPT;
+ profChanged = TRUE;
+ break;
+ }
+ break;
+ case IEEE80211_PARAM_MCASTKEYLEN:
+ if (!IEEE80211_IS_VALID_WEP_CIPHER_LEN(value)) {
+ ret = -EIO;
+ } else {
+ ar->arGroupCryptoLen = value;
+ }
+ break;
+ case IEEE80211_PARAM_COUNTERMEASURES:
+ if (ar->arWmiReady == FALSE) {
+ return -EIO;
+ }
+ wmi_set_tkip_countermeasures_cmd(ar->arWmi, value);
+ break;
+ default:
+ break;
+ }
+ if ((ar->arNextMode != AP_NETWORK) && (profChanged == TRUE)) {
+ /*
+ * profile has changed. Erase ssid to signal change
+ */
+ A_MEMZERO(ar->arSsid, sizeof(ar->arSsid));
+ }
+
+ return ret;
+}
+
+int
+ar6000_ioctl_setkey(AR_SOFTC_T *ar, struct ieee80211req_key *ik)
+{
+ KEY_USAGE keyUsage;
+ A_STATUS status;
+ CRYPTO_TYPE keyType = NONE_CRYPT;
+
+#ifdef USER_KEYS
+ ar->user_saved_keys.keyOk = FALSE;
+#endif
+ if ( (0 == memcmp(ik->ik_macaddr, null_mac, IEEE80211_ADDR_LEN)) ||
+ (0 == memcmp(ik->ik_macaddr, bcast_mac, IEEE80211_ADDR_LEN)) ) {
+ keyUsage = GROUP_USAGE;
+ if(ar->arNextMode == AP_NETWORK) {
+ A_MEMCPY(&ar->ap_mode_bkey, ik,
+ sizeof(struct ieee80211req_key));
+#ifdef WAPI_ENABLE
+ if(ar->arPairwiseCrypto == WAPI_CRYPT) {
+ return ap_set_wapi_key(ar, ik);
+ }
+#endif
+ }
+#ifdef USER_KEYS
+ A_MEMCPY(&ar->user_saved_keys.bcast_ik, ik,
+ sizeof(struct ieee80211req_key));
+#endif
+ } else {
+ keyUsage = PAIRWISE_USAGE;
+#ifdef USER_KEYS
+ A_MEMCPY(&ar->user_saved_keys.ucast_ik, ik,
+ sizeof(struct ieee80211req_key));
+#endif
+#ifdef WAPI_ENABLE
+ if(ar->arNextMode == AP_NETWORK) {
+ if(ar->arPairwiseCrypto == WAPI_CRYPT) {
+ return ap_set_wapi_key(ar, ik);
+ }
+ }
+#endif
+ }
+
+ switch (ik->ik_type) {
+ case IEEE80211_CIPHER_WEP:
+ keyType = WEP_CRYPT;
+ break;
+ case IEEE80211_CIPHER_TKIP:
+ keyType = TKIP_CRYPT;
+ break;
+ case IEEE80211_CIPHER_AES_CCM:
+ keyType = AES_CRYPT;
+ break;
+ default:
+ break;
+ }
+#ifdef USER_KEYS
+ ar->user_saved_keys.keyType = keyType;
+#endif
+ if (IEEE80211_CIPHER_CCKM_KRK != ik->ik_type) {
+ if (NONE_CRYPT == keyType) {
+ return -EIO;
+ }
+
+ if ((WEP_CRYPT == keyType)&&(!ar->arConnected)) {
+ int index = ik->ik_keyix;
+
+ if (!IEEE80211_IS_VALID_WEP_CIPHER_LEN(ik->ik_keylen)) {
+ return -EIO;
+ }
+
+ A_MEMZERO(ar->arWepKeyList[index].arKey,
+ sizeof(ar->arWepKeyList[index].arKey));
+ A_MEMCPY(ar->arWepKeyList[index].arKey, ik->ik_keydata, ik->ik_keylen);
+ ar->arWepKeyList[index].arKeyLen = ik->ik_keylen;
+
+ if(ik->ik_flags & IEEE80211_KEY_DEFAULT){
+ ar->arDefTxKeyIndex = index;
+ }
+
+ return 0;
+ }
+
+ if (((WPA_PSK_AUTH == ar->arAuthMode) || (WPA2_PSK_AUTH == ar->arAuthMode)) &&
+ (GROUP_USAGE & keyUsage))
+ {
+ A_UNTIMEOUT(&ar->disconnect_timer);
+ }
+
+ status = wmi_addKey_cmd(ar->arWmi, ik->ik_keyix, keyType, keyUsage,
+ ik->ik_keylen, (A_UINT8 *)&ik->ik_keyrsc,
+ ik->ik_keydata, KEY_OP_INIT_VAL, ik->ik_macaddr,
+ SYNC_BOTH_WMIFLAG);
+
+ if (status != A_OK) {
+ return -EIO;
+ }
+ } else {
+ status = wmi_add_krk_cmd(ar->arWmi, ik->ik_keydata);
+ }
+
+#ifdef USER_KEYS
+ ar->user_saved_keys.keyOk = TRUE;
+#endif
+
+ return 0;
+}
+
+int ar6000_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
+{
+ AR_SOFTC_T *ar = (AR_SOFTC_T *)ar6k_priv(dev);
+ HIF_DEVICE *hifDevice = ar->arHifDevice;
+ int ret = 0, param;
+ unsigned int address = 0;
+ unsigned int length = 0;
+ unsigned char *buffer;
+ char *userdata;
+ A_UINT32 connectCtrlFlags;
+
+
+ WMI_SET_AKMP_PARAMS_CMD akmpParams;
+ WMI_SET_PMKID_LIST_CMD pmkidInfo;
+
+ WMI_SET_HT_CAP_CMD htCap;
+ WMI_SET_HT_OP_CMD htOp;
+
+ /*
+ * ioctl operations may have to wait for the Target, so we cannot hold rtnl.
+ * Prevent the device from disappearing under us and release the lock during
+ * the ioctl operation.
+ */
+ dev_hold(dev);
+ rtnl_unlock();
+
+ if (cmd == AR6000_IOCTL_EXTENDED) {
+ /*
+ * This allows for many more wireless ioctls than would otherwise
+ * be available. Applications embed the actual ioctl command in
+ * the first word of the parameter block, and use the command
+ * AR6000_IOCTL_EXTENDED_CMD on the ioctl call.
+ */
+ if (get_user(cmd, (int *)rq->ifr_data)) {
+ ret = -EFAULT;
+ goto ioctl_done;
+ }
+ userdata = (char *)(((unsigned int *)rq->ifr_data)+1);
+ if(is_xioctl_allowed(ar->arNextMode, cmd) != A_OK) {
+ A_PRINTF("xioctl: cmd=%d not allowed in this mode\n",cmd);
+ ret = -EOPNOTSUPP;
+ goto ioctl_done;
+ }
+ } else {
+ A_STATUS ret = is_iwioctl_allowed(ar->arNextMode, cmd);
+ if(ret == A_ENOTSUP) {
+ A_PRINTF("iwioctl: cmd=0x%x not allowed in this mode\n", cmd);
+ ret = -EOPNOTSUPP;
+ goto ioctl_done;
+ } else if (ret == A_ERROR) {
+ /* It is not our ioctl (out of range ioctl) */
+ ret = -EOPNOTSUPP;
+ goto ioctl_done;
+ }
+ userdata = (char *)rq->ifr_data;
+ }
+
+ if ((ar->arWlanState == WLAN_DISABLED) &&
+ ((cmd != AR6000_XIOCTRL_WMI_SET_WLAN_STATE) &&
+ (cmd != AR6000_XIOCTL_GET_WLAN_SLEEP_STATE) &&
+ (cmd != AR6000_XIOCTL_DIAG_READ) &&
+ (cmd != AR6000_XIOCTL_DIAG_WRITE) &&
+ (cmd != AR6000_XIOCTL_SET_BT_HW_POWER_STATE) &&
+ (cmd != AR6000_XIOCTL_GET_BT_HW_POWER_STATE) &&
+ (cmd != AR6000_XIOCTL_ADD_AP_INTERFACE) &&
+ (cmd != AR6000_XIOCTL_REMOVE_AP_INTERFACE) &&
+ (cmd != AR6000_IOCTL_WMI_GETREV)))
+ {
+ ret = -EIO;
+ goto ioctl_done;
+ }
+
+ ret = 0;
+ switch(cmd)
+ {
+ case IEEE80211_IOCTL_SETPARAM:
+ {
+ int param, value;
+ int *ptr = (int *)rq->ifr_ifru.ifru_newname;
+ if (ar->arWmiReady == FALSE) {
+ ret = -EIO;
+ } else {
+ param = *ptr++;
+ value = *ptr;
+ ret = ar6000_ioctl_setparam(ar,param,value);
+ }
+ break;
+ }
+ case IEEE80211_IOCTL_SETKEY:
+ {
+ struct ieee80211req_key keydata;
+ if (ar->arWmiReady == FALSE) {
+ ret = -EIO;
+ } else if (copy_from_user(&keydata, userdata,
+ sizeof(struct ieee80211req_key))) {
+ ret = -EFAULT;
+ } else {
+ ar6000_ioctl_setkey(ar, &keydata);
+ }
+ break;
+ }
+ case IEEE80211_IOCTL_DELKEY:
+ case IEEE80211_IOCTL_SETOPTIE:
+ {
+ //ret = -EIO;
+ break;
+ }
+ case IEEE80211_IOCTL_SETMLME:
+ {
+ struct ieee80211req_mlme mlme;
+ if (ar->arWmiReady == FALSE) {
+ ret = -EIO;
+ } else if (copy_from_user(&mlme, userdata,
+ sizeof(struct ieee80211req_mlme))) {
+ ret = -EFAULT;
+ } else {
+ switch (mlme.im_op) {
+ case IEEE80211_MLME_AUTHORIZE:
+ A_PRINTF("setmlme AUTHORIZE %02X:%02X\n",
+ mlme.im_macaddr[4], mlme.im_macaddr[5]);
+ break;
+ case IEEE80211_MLME_UNAUTHORIZE:
+ A_PRINTF("setmlme UNAUTHORIZE %02X:%02X\n",
+ mlme.im_macaddr[4], mlme.im_macaddr[5]);
+ break;
+ case IEEE80211_MLME_DEAUTH:
+ A_PRINTF("setmlme DEAUTH %02X:%02X\n",
+ mlme.im_macaddr[4], mlme.im_macaddr[5]);
+ //remove_sta(ar, mlme.im_macaddr);
+ break;
+ case IEEE80211_MLME_DISASSOC:
+ A_PRINTF("setmlme DISASSOC %02X:%02X\n",
+ mlme.im_macaddr[4], mlme.im_macaddr[5]);
+ //remove_sta(ar, mlme.im_macaddr);
+ break;
+ default:
+ ret = 0;
+ goto ioctl_done;
+ }
+
+ wmi_ap_set_mlme(ar->arWmi, mlme.im_op, mlme.im_macaddr,
+ mlme.im_reason);
+ }
+ break;
+ }
+ case IEEE80211_IOCTL_ADDPMKID:
+ {
+ struct ieee80211req_addpmkid req;
+ if (ar->arWmiReady == FALSE) {
+ ret = -EIO;
+ } else if (copy_from_user(&req, userdata, sizeof(struct ieee80211req_addpmkid))) {
+ ret = -EFAULT;
+ } else {
+ A_STATUS status;
+
+ AR_DEBUG_PRINTF(ATH_DEBUG_WLAN_CONNECT,("Add pmkid for %2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x en=%d\n",
+ req.pi_bssid[0], req.pi_bssid[1], req.pi_bssid[2],
+ req.pi_bssid[3], req.pi_bssid[4], req.pi_bssid[5],
+ req.pi_enable));
+
+ status = wmi_setPmkid_cmd(ar->arWmi, req.pi_bssid, req.pi_pmkid,
+ req.pi_enable);
+
+ if (status != A_OK) {
+ ret = -EIO;
+ goto ioctl_done;
+ }
+ }
+ break;
+ }
+#ifdef CONFIG_HOST_TCMD_SUPPORT
+ case AR6000_XIOCTL_TCMD_CONT_TX:
+ {
+ TCMD_CONT_TX txCmd;
+
+ if ((ar->tcmdPm == TCMD_PM_SLEEP) ||
+ (ar->tcmdPm == TCMD_PM_DEEPSLEEP))
+ {
+ A_PRINTF("Can NOT send tx tcmd when target is asleep! \n");
+ ret = -EFAULT;
+ goto ioctl_done;
+ }
+
+ if(copy_from_user(&txCmd, userdata, sizeof(TCMD_CONT_TX))) {
+ ret = -EFAULT;
+ goto ioctl_done;
+ } else {
+ wmi_test_cmd(ar->arWmi,(A_UINT8 *)&txCmd, sizeof(TCMD_CONT_TX));
+ }
+ }
+ break;
+ case AR6000_XIOCTL_TCMD_CONT_RX:
+ {
+ TCMD_CONT_RX rxCmd;
+
+ if ((ar->tcmdPm == TCMD_PM_SLEEP) ||
+ (ar->tcmdPm == TCMD_PM_DEEPSLEEP))
+ {
+ A_PRINTF("Can NOT send rx tcmd when target is asleep! \n");
+ ret = -EFAULT;
+ goto ioctl_done;
+ }
+ if(copy_from_user(&rxCmd, userdata, sizeof(TCMD_CONT_RX))) {
+ ret = -EFAULT;
+ goto ioctl_done;
+ }
+
+ switch(rxCmd.act)
+ {
+ case TCMD_CONT_RX_PROMIS:
+ case TCMD_CONT_RX_FILTER:
+ case TCMD_CONT_RX_SETMAC:
+ case TCMD_CONT_RX_SET_ANT_SWITCH_TABLE:
+ wmi_test_cmd(ar->arWmi,(A_UINT8 *)&rxCmd,
+ sizeof(TCMD_CONT_RX));
+ tcmdRxFreq = rxCmd.u.para.freq;
+ break;
+ case TCMD_CONT_RX_REPORT:
+ ar6000_ioctl_tcmd_get_rx_report(dev, rq,
+ (A_UINT8 *)&rxCmd, sizeof(TCMD_CONT_RX));
+ break;
+ default:
+ A_PRINTF("Unknown Cont Rx mode: %d\n",rxCmd.act);
+ ret = -EINVAL;
+ goto ioctl_done;
+ }
+ }
+ break;
+ case AR6000_XIOCTL_TCMD_PM:
+ {
+ TCMD_PM pmCmd;
+
+ if(copy_from_user(&pmCmd, userdata, sizeof(TCMD_PM))) {
+ ret = -EFAULT;
+ goto ioctl_done;
+ }
+ ar->tcmdPm = pmCmd.mode;
+ wmi_test_cmd(ar->arWmi, (A_UINT8*)&pmCmd, sizeof(TCMD_PM));
+ }
+ break;
+#endif /* CONFIG_HOST_TCMD_SUPPORT */
+
+ case AR6000_XIOCTL_BMI_DONE:
+ if(bmienable)
+ {
+ rtnl_lock(); /* ar6000_init expects to be called holding rtnl lock */
+ ret = ar6000_init(dev);
+ rtnl_unlock();
+ }
+ else
+ {
+ ret = BMIDone(hifDevice);
+ }
+ break;
+
+ case AR6000_XIOCTL_BMI_READ_MEMORY:
+ if (get_user(address, (unsigned int *)userdata) ||
+ get_user(length, (unsigned int *)userdata + 1)) {
+ ret = -EFAULT;
+ break;
+ }
+
+ AR_DEBUG_PRINTF(ATH_DEBUG_INFO,("Read Memory (address: 0x%x, length: %d)\n",
+ address, length));
+ if ((buffer = (unsigned char *)A_MALLOC(length)) != NULL) {
+ A_MEMZERO(buffer, length);
+ ret = BMIReadMemory(hifDevice, address, buffer, length);
+ if (copy_to_user(rq->ifr_data, buffer, length)) {
+ ret = -EFAULT;
+ }
+ A_FREE(buffer);
+ } else {
+ ret = -ENOMEM;
+ }
+ break;
+
+ case AR6000_XIOCTL_BMI_WRITE_MEMORY:
+ if (get_user(address, (unsigned int *)userdata) ||
+ get_user(length, (unsigned int *)userdata + 1)) {
+ ret = -EFAULT;
+ break;
+ }
+ AR_DEBUG_PRINTF(ATH_DEBUG_INFO,("Write Memory (address: 0x%x, length: %d)\n",
+ address, length));
+ if ((buffer = (unsigned char *)A_MALLOC(length)) != NULL) {
+ A_MEMZERO(buffer, length);
+ if (copy_from_user(buffer, &userdata[sizeof(address) +
+ sizeof(length)], length))
+ {
+ ret = -EFAULT;
+ } else {
+ ret = BMIWriteMemory(hifDevice, address, buffer, length);
+ }
+ A_FREE(buffer);
+ } else {
+ ret = -ENOMEM;
+ }
+ break;
+
+ case AR6000_XIOCTL_BMI_TEST:
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("No longer supported\n"));
+ ret = -EOPNOTSUPP;
+ break;
+
+ case AR6000_XIOCTL_BMI_EXECUTE:
+ if (get_user(address, (unsigned int *)userdata) ||
+ get_user(param, (unsigned int *)userdata + 1)) {
+ ret = -EFAULT;
+ break;
+ }
+ AR_DEBUG_PRINTF(ATH_DEBUG_INFO,("Execute (address: 0x%x, param: %d)\n",
+ address, param));
+ ret = BMIExecute(hifDevice, address, (A_UINT32*)&param);
+ /* return value */
+ if (put_user(param, (unsigned int *)rq->ifr_data)) {
+ ret = -EFAULT;
+ break;
+ }
+ break;
+
+ case AR6000_XIOCTL_BMI_SET_APP_START:
+ if (get_user(address, (unsigned int *)userdata)) {
+ ret = -EFAULT;
+ break;
+ }
+ AR_DEBUG_PRINTF(ATH_DEBUG_INFO,("Set App Start (address: 0x%x)\n", address));
+ ret = BMISetAppStart(hifDevice, address);
+ break;
+
+ case AR6000_XIOCTL_BMI_READ_SOC_REGISTER:
+ if (get_user(address, (unsigned int *)userdata)) {
+ ret = -EFAULT;
+ break;
+ }
+ ret = BMIReadSOCRegister(hifDevice, address, (A_UINT32*)&param);
+ /* return value */
+ if (put_user(param, (unsigned int *)rq->ifr_data)) {
+ ret = -EFAULT;
+ break;
+ }
+ break;
+
+ case AR6000_XIOCTL_BMI_WRITE_SOC_REGISTER:
+ if (get_user(address, (unsigned int *)userdata) ||
+ get_user(param, (unsigned int *)userdata + 1)) {
+ ret = -EFAULT;
+ break;
+ }
+ ret = BMIWriteSOCRegister(hifDevice, address, param);
+ break;
+
+#ifdef HTC_RAW_INTERFACE
+ case AR6000_XIOCTL_HTC_RAW_OPEN:
+ ret = A_OK;
+ if (!arRawIfEnabled(ar)) {
+ /* make sure block size is set in case the target was reset since last
+ * BMI phase (i.e. flashup downloads) */
+ ret = ar6000_set_htc_params(ar->arHifDevice,
+ ar->arTargetType,
+ 0, /* use default yield */
+ 0 /* use default number of HTC ctrl buffers */
+ );
+ if (A_FAILED(ret)) {
+ break;
+ }
+ /* Terminate the BMI phase */
+ ret = BMIDone(hifDevice);
+ if (ret == A_OK) {
+ ret = ar6000_htc_raw_open(ar);
+ }
+ }
+ break;
+
+ case AR6000_XIOCTL_HTC_RAW_CLOSE:
+ if (arRawIfEnabled(ar)) {
+ ret = ar6000_htc_raw_close(ar);
+ arRawIfEnabled(ar) = FALSE;
+ } else {
+ ret = A_ERROR;
+ }
+ break;
+
+ case AR6000_XIOCTL_HTC_RAW_READ:
+ if (arRawIfEnabled(ar)) {
+ unsigned int streamID;
+ if (get_user(streamID, (unsigned int *)userdata) ||
+ get_user(length, (unsigned int *)userdata + 1)) {
+ ret = -EFAULT;
+ break;
+ }
+ buffer = (unsigned char*)rq->ifr_data + sizeof(length);
+ ret = ar6000_htc_raw_read(ar, (HTC_RAW_STREAM_ID)streamID,
+ (char*)buffer, length);
+ if (put_user(ret, (unsigned int *)rq->ifr_data)) {
+ ret = -EFAULT;
+ break;
+ }
+ } else {
+ ret = A_ERROR;
+ }
+ break;
+
+ case AR6000_XIOCTL_HTC_RAW_WRITE:
+ if (arRawIfEnabled(ar)) {
+ unsigned int streamID;
+ if (get_user(streamID, (unsigned int *)userdata) ||
+ get_user(length, (unsigned int *)userdata + 1)) {
+ ret = -EFAULT;
+ break;
+ }
+ buffer = (unsigned char*)userdata + sizeof(streamID) + sizeof(length);
+ ret = ar6000_htc_raw_write(ar, (HTC_RAW_STREAM_ID)streamID,
+ (char*)buffer, length);
+ if (put_user(ret, (unsigned int *)rq->ifr_data)) {
+ ret = -EFAULT;
+ break;
+ }
+ } else {
+ ret = A_ERROR;
+ }
+ break;
+#endif /* HTC_RAW_INTERFACE */
+
+ case AR6000_XIOCTL_BMI_LZ_STREAM_START:
+ if (get_user(address, (unsigned int *)userdata)) {
+ ret = -EFAULT;
+ break;
+ }
+ AR_DEBUG_PRINTF(ATH_DEBUG_INFO,("Start Compressed Stream (address: 0x%x)\n", address));
+ ret = BMILZStreamStart(hifDevice, address);
+ break;
+
+ case AR6000_XIOCTL_BMI_LZ_DATA:
+ if (get_user(length, (unsigned int *)userdata)) {
+ ret = -EFAULT;
+ break;
+ }
+ AR_DEBUG_PRINTF(ATH_DEBUG_INFO,("Send Compressed Data (length: %d)\n", length));
+ if ((buffer = (unsigned char *)A_MALLOC(length)) != NULL) {
+ A_MEMZERO(buffer, length);
+ if (copy_from_user(buffer, &userdata[sizeof(length)], length))
+ {
+ ret = -EFAULT;
+ } else {
+ ret = BMILZData(hifDevice, buffer, length);
+ }
+ A_FREE(buffer);
+ } else {
+ ret = -ENOMEM;
+ }
+ break;
+
+#if defined(CONFIG_TARGET_PROFILE_SUPPORT)
+ /*
+ * Optional support for Target-side profiling.
+ * Not needed in production.
+ */
+
+ /* Configure Target-side profiling */
+ case AR6000_XIOCTL_PROF_CFG:
+ {
+ A_UINT32 period;
+ A_UINT32 nbins;
+ if (get_user(period, (unsigned int *)userdata) ||
+ get_user(nbins, (unsigned int *)userdata + 1)) {
+ ret = -EFAULT;
+ break;
+ }
+
+ if (wmi_prof_cfg_cmd(ar->arWmi, period, nbins) != A_OK) {
+ ret = -EIO;
+ }
+
+ break;
+ }
+
+ /* Start a profiling bucket/bin at the specified address */
+ case AR6000_XIOCTL_PROF_ADDR_SET:
+ {
+ A_UINT32 addr;
+ if (get_user(addr, (unsigned int *)userdata)) {
+ ret = -EFAULT;
+ break;
+ }
+
+ if (wmi_prof_addr_set_cmd(ar->arWmi, addr) != A_OK) {
+ ret = -EIO;
+ }
+
+ break;
+ }
+
+ /* START Target-side profiling */
+ case AR6000_XIOCTL_PROF_START:
+ wmi_prof_start_cmd(ar->arWmi);
+ break;
+
+ /* STOP Target-side profiling */
+ case AR6000_XIOCTL_PROF_STOP:
+ wmi_prof_stop_cmd(ar->arWmi);
+ break;
+ case AR6000_XIOCTL_PROF_COUNT_GET:
+ {
+ if (ar->bIsDestroyProgress) {
+ ret = -EBUSY;
+ goto ioctl_done;
+ }
+ if (ar->arWmiReady == FALSE) {
+ ret = -EIO;
+ goto ioctl_done;
+ }
+ if (down_interruptible(&ar->arSem)) {
+ ret = -ERESTARTSYS;
+ goto ioctl_done;
+ }
+ if (ar->bIsDestroyProgress) {
+ up(&ar->arSem);
+ ret = -EBUSY;
+ goto ioctl_done;
+ }
+
+ prof_count_available = FALSE;
+ ret = prof_count_get(dev);
+ if (ret != A_OK) {
+ up(&ar->arSem);
+ ret = -EIO;
+ goto ioctl_done;
+ }
+
+ /* Wait for Target to respond. */
+ wait_event_interruptible(arEvent, prof_count_available);
+ if (signal_pending(current)) {
+ ret = -EINTR;
+ } else {
+ if (copy_to_user(userdata, &prof_count_results,
+ sizeof(prof_count_results)))
+ {
+ ret = -EFAULT;
+ }
+ }
+ up(&ar->arSem);
+ break;
+ }
+#endif /* CONFIG_TARGET_PROFILE_SUPPORT */
+
+ case AR6000_IOCTL_WMI_GETREV:
+ {
+ if (copy_to_user(rq->ifr_data, &ar->arVersion,
+ sizeof(ar->arVersion)))
+ {
+ ret = -EFAULT;
+ }
+ break;
+ }
+ case AR6000_IOCTL_WMI_SETPWR:
+ {
+ WMI_POWER_MODE_CMD pwrModeCmd;
+
+ if (ar->arWmiReady == FALSE) {
+ ret = -EIO;
+ } else if (copy_from_user(&pwrModeCmd, userdata,
+ sizeof(pwrModeCmd)))
+ {
+ ret = -EFAULT;
+ } else {
+ if (wmi_powermode_cmd(ar->arWmi, pwrModeCmd.powerMode)
+ != A_OK)
+ {
+ ret = -EIO;
+ }
+ }
+ break;
+ }
+ case AR6000_IOCTL_WMI_SET_IBSS_PM_CAPS:
+ {
+ WMI_IBSS_PM_CAPS_CMD ibssPmCaps;
+
+ if (ar->arWmiReady == FALSE) {
+ ret = -EIO;
+ } else if (copy_from_user(&ibssPmCaps, userdata,
+ sizeof(ibssPmCaps)))
+ {
+ ret = -EFAULT;
+ } else {
+ if (wmi_ibsspmcaps_cmd(ar->arWmi, ibssPmCaps.power_saving, ibssPmCaps.ttl,
+ ibssPmCaps.atim_windows, ibssPmCaps.timeout_value) != A_OK)
+ {
+ ret = -EIO;
+ }
+ AR6000_SPIN_LOCK(&ar->arLock, 0);
+ ar->arIbssPsEnable = ibssPmCaps.power_saving;
+ AR6000_SPIN_UNLOCK(&ar->arLock, 0);
+ }
+ break;
+ }
+ case AR6000_XIOCTL_WMI_SET_AP_PS:
+ {
+ WMI_AP_PS_CMD apPsCmd;
+
+ if (ar->arWmiReady == FALSE) {
+ ret = -EIO;
+ } else if (copy_from_user(&apPsCmd, userdata,
+ sizeof(apPsCmd)))
+ {
+ ret = -EFAULT;
+ } else {
+ if (wmi_apps_cmd(ar->arWmi, apPsCmd.psType, apPsCmd.idle_time,
+ apPsCmd.ps_period, apPsCmd.sleep_period) != A_OK)
+ {
+ ret = -EIO;
+ }
+ }
+ break;
+ }
+ case AR6000_IOCTL_WMI_SET_PMPARAMS:
+ {
+ WMI_POWER_PARAMS_CMD pmParams;
+
+ if (ar->arWmiReady == FALSE) {
+ ret = -EIO;
+ } else if (copy_from_user(&pmParams, userdata,
+ sizeof(pmParams)))
+ {
+ ret = -EFAULT;
+ } else {
+ if (wmi_pmparams_cmd(ar->arWmi, pmParams.idle_period,
+ pmParams.pspoll_number,
+ pmParams.dtim_policy,
+ pmParams.tx_wakeup_policy,
+ pmParams.num_tx_to_wakeup,
+#if WLAN_CONFIG_IGNORE_POWER_SAVE_FAIL_EVENT_DURING_SCAN
+ IGNORE_POWER_SAVE_FAIL_EVENT_DURING_SCAN
+#else
+ SEND_POWER_SAVE_FAIL_EVENT_ALWAYS
+#endif
+ ) != A_OK)
+ {
+ ret = -EIO;
+ }
+ }
+ break;
+ }
+ case AR6000_IOCTL_WMI_SETSCAN:
+ {
+ if (ar->arWmiReady == FALSE) {
+ ret = -EIO;
+ } else if (copy_from_user(&ar->scParams, userdata,
+ sizeof(ar->scParams)))
+ {
+ ret = -EFAULT;
+ } else {
+ if (CAN_SCAN_IN_CONNECT(ar->scParams.scanCtrlFlags)) {
+ ar->arSkipScan = FALSE;
+ } else {
+ ar->arSkipScan = TRUE;
+ }
+
+ if (wmi_scanparams_cmd(ar->arWmi, ar->scParams.fg_start_period,
+ ar->scParams.fg_end_period,
+ ar->scParams.bg_period,
+ ar->scParams.minact_chdwell_time,
+ ar->scParams.maxact_chdwell_time,
+ ar->scParams.pas_chdwell_time,
+ ar->scParams.shortScanRatio,
+ ar->scParams.scanCtrlFlags,
+ ar->scParams.max_dfsch_act_time,
+ ar->scParams.maxact_scan_per_ssid) != A_OK)
+ {
+ ret = -EIO;
+ }
+ }
+ break;
+ }
+ case AR6000_IOCTL_WMI_SETLISTENINT:
+ {
+ WMI_LISTEN_INT_CMD listenCmd;
+
+ if (ar->arWmiReady == FALSE) {
+ ret = -EIO;
+ } else if (copy_from_user(&listenCmd, userdata,
+ sizeof(listenCmd)))
+ {
+ ret = -EFAULT;
+ } else {
+ if (wmi_listeninterval_cmd(ar->arWmi, listenCmd.listenInterval, listenCmd.numBeacons) != A_OK) {
+ ret = -EIO;
+ } else {
+ AR6000_SPIN_LOCK(&ar->arLock, 0);
+ ar->arListenIntervalT = listenCmd.listenInterval;
+ ar->arListenIntervalB = listenCmd.numBeacons;
+ AR6000_SPIN_UNLOCK(&ar->arLock, 0);
+ }
+
+ }
+ break;
+ }
+ case AR6000_IOCTL_WMI_SET_BMISS_TIME:
+ {
+ WMI_BMISS_TIME_CMD bmissCmd;
+
+ if (ar->arWmiReady == FALSE) {
+ ret = -EIO;
+ } else if (copy_from_user(&bmissCmd, userdata,
+ sizeof(bmissCmd)))
+ {
+ ret = -EFAULT;
+ } else {
+ if (wmi_bmisstime_cmd(ar->arWmi, bmissCmd.bmissTime, bmissCmd.numBeacons) != A_OK) {
+ ret = -EIO;
+ }
+ }
+ break;
+ }
+ case AR6000_IOCTL_WMI_SETBSSFILTER:
+ {
+ WMI_BSS_FILTER_CMD filt;
+
+ if (ar->arWmiReady == FALSE) {
+ ret = -EIO;
+ } else if (copy_from_user(&filt, userdata,
+ sizeof(filt)))
+ {
+ ret = -EFAULT;
+ } else {
+ if (wmi_bssfilter_cmd(ar->arWmi, filt.bssFilter, filt.ieMask)
+ != A_OK) {
+ ret = -EIO;
+ } else {
+ ar->arUserBssFilter = param;
+ }
+ }
+ break;
+ }
+
+ case AR6000_IOCTL_WMI_SET_SNRTHRESHOLD:
+ {
+ ret = ar6000_ioctl_set_snr_threshold(dev, rq);
+ break;
+ }
+ case AR6000_XIOCTL_WMI_SET_RSSITHRESHOLD:
+ {
+ ret = ar6000_ioctl_set_rssi_threshold(dev, rq);
+ break;
+ }
+ case AR6000_XIOCTL_WMI_CLR_RSSISNR:
+ {
+ if (ar->arWmiReady == FALSE) {
+ ret = -EIO;
+ }
+ ret = wmi_clr_rssi_snr(ar->arWmi);
+ break;
+ }
+ case AR6000_XIOCTL_WMI_SET_LQTHRESHOLD:
+ {
+ ret = ar6000_ioctl_set_lq_threshold(dev, rq);
+ break;
+ }
+ case AR6000_XIOCTL_WMI_SET_LPREAMBLE:
+ {
+ WMI_SET_LPREAMBLE_CMD setLpreambleCmd;
+
+ if (ar->arWmiReady == FALSE) {
+ ret = -EIO;
+ } else if (copy_from_user(&setLpreambleCmd, userdata,
+ sizeof(setLpreambleCmd)))
+ {
+ ret = -EFAULT;
+ } else {
+ if (wmi_set_lpreamble_cmd(ar->arWmi, setLpreambleCmd.status,
+#if WLAN_CONFIG_DONOT_IGNORE_BARKER_IN_ERP
+ WMI_DONOT_IGNORE_BARKER_IN_ERP
+#else
+ WMI_IGNORE_BARKER_IN_ERP
+#endif
+ ) != A_OK)
+ {
+ ret = -EIO;
+ }
+ }
+
+ break;
+ }
+ case AR6000_XIOCTL_WMI_SET_RTS:
+ {
+ WMI_SET_RTS_CMD rtsCmd;
+ if (ar->arWmiReady == FALSE) {
+ ret = -EIO;
+ } else if (copy_from_user(&rtsCmd, userdata,
+ sizeof(rtsCmd)))
+ {
+ ret = -EFAULT;
+ } else {
+ ar->arRTS = rtsCmd.threshold;
+ if (wmi_set_rts_cmd(ar->arWmi, rtsCmd.threshold)
+ != A_OK)
+ {
+ ret = -EIO;
+ }
+ }
+
+ break;
+ }
+ case AR6000_XIOCTL_WMI_SET_WMM:
+ {
+ ret = ar6000_ioctl_set_wmm(dev, rq);
+ break;
+ }
+ case AR6000_XIOCTL_WMI_SET_QOS_SUPP:
+ {
+ ret = ar6000_ioctl_set_qos_supp(dev, rq);
+ break;
+ }
+ case AR6000_XIOCTL_WMI_SET_TXOP:
+ {
+ ret = ar6000_ioctl_set_txop(dev, rq);
+ break;
+ }
+ case AR6000_XIOCTL_WMI_GET_RD:
+ {
+ ret = ar6000_ioctl_get_rd(dev, rq);
+ break;
+ }
+ case AR6000_IOCTL_WMI_SET_CHANNELPARAMS:
+ {
+ ret = ar6000_ioctl_set_channelParams(dev, rq);
+ break;
+ }
+ case AR6000_IOCTL_WMI_SET_PROBEDSSID:
+ {
+ ret = ar6000_ioctl_set_probedSsid(dev, rq);
+ break;
+ }
+ case AR6000_IOCTL_WMI_SET_BADAP:
+ {
+ ret = ar6000_ioctl_set_badAp(dev, rq);
+ break;
+ }
+ case AR6000_IOCTL_WMI_CREATE_QOS:
+ {
+ ret = ar6000_ioctl_create_qos(dev, rq);
+ break;
+ }
+ case AR6000_IOCTL_WMI_DELETE_QOS:
+ {
+ ret = ar6000_ioctl_delete_qos(dev, rq);
+ break;
+ }
+ case AR6000_IOCTL_WMI_GET_QOS_QUEUE:
+ {
+ ret = ar6000_ioctl_get_qos_queue(dev, rq);
+ break;
+ }
+ case AR6000_IOCTL_WMI_GET_TARGET_STATS:
+ {
+ ret = ar6000_ioctl_get_target_stats(dev, rq);
+ break;
+ }
+ case AR6000_IOCTL_WMI_SET_ERROR_REPORT_BITMASK:
+ {
+ ret = ar6000_ioctl_set_error_report_bitmask(dev, rq);
+ break;
+ }
+ case AR6000_IOCTL_WMI_SET_ASSOC_INFO:
+ {
+ WMI_SET_ASSOC_INFO_CMD cmd;
+ A_UINT8 assocInfo[WMI_MAX_ASSOC_INFO_LEN];
+
+ if (ar->arWmiReady == FALSE) {
+ ret = -EIO;
+ break;
+ }
+
+ if (get_user(cmd.ieType, userdata)) {
+ ret = -EFAULT;
+ break;
+ }
+ if (cmd.ieType >= WMI_MAX_ASSOC_INFO_TYPE) {
+ ret = -EIO;
+ break;
+ }
+
+ if (get_user(cmd.bufferSize, userdata + 1) ||
+ (cmd.bufferSize > WMI_MAX_ASSOC_INFO_LEN) ||
+ copy_from_user(assocInfo, userdata + 2, cmd.bufferSize)) {
+ ret = -EFAULT;
+ break;
+ }
+ if (wmi_associnfo_cmd(ar->arWmi, cmd.ieType,
+ cmd.bufferSize, assocInfo) != A_OK) {
+ ret = -EIO;
+ break;
+ }
+ break;
+ }
+ case AR6000_IOCTL_WMI_SET_ACCESS_PARAMS:
+ {
+ ret = ar6000_ioctl_set_access_params(dev, rq);
+ break;
+ }
+ case AR6000_IOCTL_WMI_SET_DISC_TIMEOUT:
+ {
+ ret = ar6000_ioctl_set_disconnect_timeout(dev, rq);
+ break;
+ }
+ case AR6000_XIOCTL_FORCE_TARGET_RESET:
+ {
+ if (ar->arHtcTarget)
+ {
+// HTCForceReset(htcTarget);
+ }
+ else
+ {
+ AR_DEBUG_PRINTF(ATH_DEBUG_WARN,("ar6000_ioctl cannot attempt reset.\n"));
+ }
+ break;
+ }
+ case AR6000_XIOCTL_TARGET_INFO:
+ case AR6000_XIOCTL_CHECK_TARGET_READY: /* backwards compatibility */
+ {
+ /* If we made it to here, then the Target exists and is ready. */
+
+ if (cmd == AR6000_XIOCTL_TARGET_INFO) {
+ if (copy_to_user((A_UINT32 *)rq->ifr_data, &ar->arVersion.target_ver,
+ sizeof(ar->arVersion.target_ver)))
+ {
+ ret = -EFAULT;
+ }
+ if (copy_to_user(((A_UINT32 *)rq->ifr_data)+1, &ar->arTargetType,
+ sizeof(ar->arTargetType)))
+ {
+ ret = -EFAULT;
+ }
+ }
+ break;
+ }
+ case AR6000_XIOCTL_WMI_SET_HB_CHALLENGE_RESP_PARAMS:
+ {
+ WMI_SET_HB_CHALLENGE_RESP_PARAMS_CMD hbparam;
+
+ if (copy_from_user(&hbparam, userdata, sizeof(hbparam)))
+ {
+ ret = -EFAULT;
+ } else {
+ AR6000_SPIN_LOCK(&ar->arLock, 0);
+ /* Start a cyclic timer with the parameters provided. */
+ if (hbparam.frequency) {
+ ar->arHBChallengeResp.frequency = hbparam.frequency;
+ }
+ if (hbparam.threshold) {
+ ar->arHBChallengeResp.missThres = hbparam.threshold;
+ }
+
+ /* Delete the pending timer and start a new one */
+ if (timer_pending(&ar->arHBChallengeResp.timer)) {
+ A_UNTIMEOUT(&ar->arHBChallengeResp.timer);
+ }
+ A_TIMEOUT_MS(&ar->arHBChallengeResp.timer, ar->arHBChallengeResp.frequency * 1000, 0);
+ AR6000_SPIN_UNLOCK(&ar->arLock, 0);
+ }
+ break;
+ }
+ case AR6000_XIOCTL_WMI_GET_HB_CHALLENGE_RESP:
+ {
+ A_UINT32 cookie;
+
+ if (copy_from_user(&cookie, userdata, sizeof(cookie))) {
+ ret = -EFAULT;
+ goto ioctl_done;
+ }
+
+ /* Send the challenge on the control channel */
+ if (wmi_get_challenge_resp_cmd(ar->arWmi, cookie, APP_HB_CHALLENGE) != A_OK) {
+ ret = -EIO;
+ goto ioctl_done;
+ }
+ break;
+ }
+#ifdef USER_KEYS
+ case AR6000_XIOCTL_USER_SETKEYS:
+ {
+
+ ar->user_savedkeys_stat = USER_SAVEDKEYS_STAT_RUN;
+
+ if (copy_from_user(&ar->user_key_ctrl, userdata,
+ sizeof(ar->user_key_ctrl)))
+ {
+ ret = -EFAULT;
+ goto ioctl_done;
+ }
+
+ A_PRINTF("ar6000 USER set key %x\n", ar->user_key_ctrl);
+ break;
+ }
+#endif /* USER_KEYS */
+
+#ifdef CONFIG_HOST_GPIO_SUPPORT
+ case AR6000_XIOCTL_GPIO_OUTPUT_SET:
+ {
+ struct ar6000_gpio_output_set_cmd_s gpio_output_set_cmd;
+
+ if (ar->bIsDestroyProgress) {
+ ret = -EBUSY;
+ goto ioctl_done;
+ }
+ if (ar->arWmiReady == FALSE) {
+ ret = -EIO;
+ goto ioctl_done;
+ }
+ if (down_interruptible(&ar->arSem)) {
+ ret = -ERESTARTSYS;
+ goto ioctl_done;
+ }
+ if (ar->bIsDestroyProgress) {
+ up(&ar->arSem);
+ ret = -EBUSY;
+ goto ioctl_done;
+ }
+
+ if (copy_from_user(&gpio_output_set_cmd, userdata,
+ sizeof(gpio_output_set_cmd)))
+ {
+ ret = -EFAULT;
+ } else {
+ ret = ar6000_gpio_output_set(dev,
+ gpio_output_set_cmd.set_mask,
+ gpio_output_set_cmd.clear_mask,
+ gpio_output_set_cmd.enable_mask,
+ gpio_output_set_cmd.disable_mask);
+ if (ret != A_OK) {
+ ret = EIO;
+ }
+ }
+ up(&ar->arSem);
+ break;
+ }
+ case AR6000_XIOCTL_GPIO_INPUT_GET:
+ {
+ if (ar->bIsDestroyProgress) {
+ ret = -EBUSY;
+ goto ioctl_done;
+ }
+ if (ar->arWmiReady == FALSE) {
+ ret = -EIO;
+ goto ioctl_done;
+ }
+ if (down_interruptible(&ar->arSem)) {
+ ret = -ERESTARTSYS;
+ goto ioctl_done;
+ }
+ if (ar->bIsDestroyProgress) {
+ up(&ar->arSem);
+ ret = -EBUSY;
+ goto ioctl_done;
+ }
+
+ ret = ar6000_gpio_input_get(dev);
+ if (ret != A_OK) {
+ up(&ar->arSem);
+ ret = -EIO;
+ goto ioctl_done;
+ }
+
+ /* Wait for Target to respond. */
+ wait_event_interruptible(arEvent, gpio_data_available);
+ if (signal_pending(current)) {
+ ret = -EINTR;
+ } else {
+ A_ASSERT(gpio_reg_results.gpioreg_id == GPIO_ID_NONE);
+
+ if (copy_to_user(userdata, &gpio_reg_results.value,
+ sizeof(gpio_reg_results.value)))
+ {
+ ret = -EFAULT;
+ }
+ }
+ up(&ar->arSem);
+ break;
+ }
+ case AR6000_XIOCTL_GPIO_REGISTER_SET:
+ {
+ struct ar6000_gpio_register_cmd_s gpio_register_cmd;
+
+ if (ar->bIsDestroyProgress) {
+ ret = -EBUSY;
+ goto ioctl_done;
+ }
+ if (ar->arWmiReady == FALSE) {
+ ret = -EIO;
+ goto ioctl_done;
+ }
+ if (down_interruptible(&ar->arSem)) {
+ ret = -ERESTARTSYS;
+ goto ioctl_done;
+ }
+ if (ar->bIsDestroyProgress) {
+ up(&ar->arSem);
+ ret = -EBUSY;
+ goto ioctl_done;
+ }
+
+ if (copy_from_user(&gpio_register_cmd, userdata,
+ sizeof(gpio_register_cmd)))
+ {
+ ret = -EFAULT;
+ } else {
+ ret = ar6000_gpio_register_set(dev,
+ gpio_register_cmd.gpioreg_id,
+ gpio_register_cmd.value);
+ if (ret != A_OK) {
+ ret = EIO;
+ }
+
+ /* Wait for acknowledgement from Target */
+ wait_event_interruptible(arEvent, gpio_ack_received);
+ if (signal_pending(current)) {
+ ret = -EINTR;
+ }
+ }
+ up(&ar->arSem);
+ break;
+ }
+ case AR6000_XIOCTL_GPIO_REGISTER_GET:
+ {
+ struct ar6000_gpio_register_cmd_s gpio_register_cmd;
+
+ if (ar->bIsDestroyProgress) {
+ ret = -EBUSY;
+ goto ioctl_done;
+ }
+ if (ar->arWmiReady == FALSE) {
+ ret = -EIO;
+ goto ioctl_done;
+ }
+ if (down_interruptible(&ar->arSem)) {
+ ret = -ERESTARTSYS;
+ goto ioctl_done;
+ }
+ if (ar->bIsDestroyProgress) {
+ up(&ar->arSem);
+ ret = -EBUSY;
+ goto ioctl_done;
+ }
+
+ if (copy_from_user(&gpio_register_cmd, userdata,
+ sizeof(gpio_register_cmd)))
+ {
+ ret = -EFAULT;
+ } else {
+ ret = ar6000_gpio_register_get(dev, gpio_register_cmd.gpioreg_id);
+ if (ret != A_OK) {
+ up(&ar->arSem);
+ ret = -EIO;
+ goto ioctl_done;
+ }
+
+ /* Wait for Target to respond. */
+ wait_event_interruptible(arEvent, gpio_data_available);
+ if (signal_pending(current)) {
+ ret = -EINTR;
+ } else {
+ A_ASSERT(gpio_register_cmd.gpioreg_id == gpio_reg_results.gpioreg_id);
+ if (copy_to_user(userdata, &gpio_reg_results,
+ sizeof(gpio_reg_results)))
+ {
+ ret = -EFAULT;
+ }
+ }
+ }
+ up(&ar->arSem);
+ break;
+ }
+ case AR6000_XIOCTL_GPIO_INTR_ACK:
+ {
+ struct ar6000_gpio_intr_ack_cmd_s gpio_intr_ack_cmd;
+
+ if (ar->bIsDestroyProgress) {
+ ret = -EBUSY;
+ goto ioctl_done;
+ }
+ if (ar->arWmiReady == FALSE) {
+ ret = -EIO;
+ goto ioctl_done;
+ }
+ if (down_interruptible(&ar->arSem)) {
+ ret = -ERESTARTSYS;
+ goto ioctl_done;
+ }
+ if (ar->bIsDestroyProgress) {
+ up(&ar->arSem);
+ ret = -EBUSY;
+ goto ioctl_done;
+ }
+
+ if (copy_from_user(&gpio_intr_ack_cmd, userdata,
+ sizeof(gpio_intr_ack_cmd)))
+ {
+ ret = -EFAULT;
+ } else {
+ ret = ar6000_gpio_intr_ack(dev, gpio_intr_ack_cmd.ack_mask);
+ if (ret != A_OK) {
+ ret = EIO;
+ }
+ }
+ up(&ar->arSem);
+ break;
+ }
+ case AR6000_XIOCTL_GPIO_INTR_WAIT:
+ {
+ /* Wait for Target to report an interrupt. */
+ wait_event_interruptible(arEvent, gpio_intr_available);
+
+ if (signal_pending(current)) {
+ ret = -EINTR;
+ } else {
+ if (copy_to_user(userdata, &gpio_intr_results,
+ sizeof(gpio_intr_results)))
+ {
+ ret = -EFAULT;
+ }
+ }
+ break;
+ }
+#endif /* CONFIG_HOST_GPIO_SUPPORT */
+
+ case AR6000_XIOCTL_DBGLOG_CFG_MODULE:
+ {
+ struct ar6000_dbglog_module_config_s config;
+
+ if (copy_from_user(&config, userdata, sizeof(config))) {
+ ret = -EFAULT;
+ goto ioctl_done;
+ }
+
+ /* Send the challenge on the control channel */
+ if (wmi_config_debug_module_cmd(ar->arWmi, config.mmask,
+ config.tsr, config.rep,
+ config.size, config.valid) != A_OK)
+ {
+ ret = -EIO;
+ goto ioctl_done;
+ }
+ break;
+ }
+
+ case AR6000_XIOCTL_DBGLOG_GET_DEBUG_LOGS:
+ {
+ /* Send the challenge on the control channel */
+ if (ar6000_dbglog_get_debug_logs(ar) != A_OK)
+ {
+ ret = -EIO;
+ goto ioctl_done;
+ }
+ break;
+ }
+
+ case AR6000_XIOCTL_SET_ADHOC_BSSID:
+ {
+ WMI_SET_ADHOC_BSSID_CMD adhocBssid;
+
+ if (ar->arWmiReady == FALSE) {
+ ret = -EIO;
+ } else if (copy_from_user(&adhocBssid, userdata,
+ sizeof(adhocBssid)))
+ {
+ ret = -EFAULT;
+ } else if (A_MEMCMP(adhocBssid.bssid, bcast_mac,
+ AR6000_ETH_ADDR_LEN) == 0)
+ {
+ ret = -EFAULT;
+ } else {
+
+ A_MEMCPY(ar->arReqBssid, adhocBssid.bssid, sizeof(ar->arReqBssid));
+ }
+ break;
+ }
+
+ case AR6000_XIOCTL_SET_OPT_MODE:
+ {
+ WMI_SET_OPT_MODE_CMD optModeCmd;
+ AR_SOFTC_T *ar = (AR_SOFTC_T *)ar6k_priv(dev);
+
+ if (ar->arWmiReady == FALSE) {
+ ret = -EIO;
+ } else if (copy_from_user(&optModeCmd, userdata,
+ sizeof(optModeCmd)))
+ {
+ ret = -EFAULT;
+ } else if (ar->arConnected && optModeCmd.optMode == SPECIAL_ON) {
+ ret = -EFAULT;
+
+ } else if (wmi_set_opt_mode_cmd(ar->arWmi, optModeCmd.optMode)
+ != A_OK)
+ {
+ ret = -EIO;
+ }
+ break;
+ }
+
+ case AR6000_XIOCTL_OPT_SEND_FRAME:
+ {
+ WMI_OPT_TX_FRAME_CMD optTxFrmCmd;
+ A_UINT8 data[MAX_OPT_DATA_LEN];
+
+ if (ar->arWmiReady == FALSE) {
+ ret = -EIO;
+ } else if (copy_from_user(&optTxFrmCmd, userdata,
+ sizeof(optTxFrmCmd)))
+ {
+ ret = -EFAULT;
+ } else if (copy_from_user(data,
+ userdata+sizeof(WMI_OPT_TX_FRAME_CMD)-1,
+ optTxFrmCmd.optIEDataLen))
+ {
+ ret = -EFAULT;
+ } else {
+ ret = wmi_opt_tx_frame_cmd(ar->arWmi,
+ optTxFrmCmd.frmType,
+ optTxFrmCmd.dstAddr,
+ optTxFrmCmd.bssid,
+ optTxFrmCmd.optIEDataLen,
+ data);
+ }
+
+ break;
+ }
+ case AR6000_XIOCTL_WMI_SETRETRYLIMITS:
+ {
+ WMI_SET_RETRY_LIMITS_CMD setRetryParams;
+
+ if (ar->arWmiReady == FALSE) {
+ ret = -EIO;
+ } else if (copy_from_user(&setRetryParams, userdata,
+ sizeof(setRetryParams)))
+ {
+ ret = -EFAULT;
+ } else {
+ if (wmi_set_retry_limits_cmd(ar->arWmi, setRetryParams.frameType,
+ setRetryParams.trafficClass,
+ setRetryParams.maxRetries,
+ setRetryParams.enableNotify) != A_OK)
+ {
+ ret = -EIO;
+ }
+ AR6000_SPIN_LOCK(&ar->arLock, 0);
+ ar->arMaxRetries = setRetryParams.maxRetries;
+ AR6000_SPIN_UNLOCK(&ar->arLock, 0);
+ }
+ break;
+ }
+
+ case AR6000_XIOCTL_SET_BEACON_INTVAL:
+ {
+ WMI_BEACON_INT_CMD bIntvlCmd;
+
+ if (ar->arWmiReady == FALSE) {
+ ret = -EIO;
+ } else if (copy_from_user(&bIntvlCmd, userdata,
+ sizeof(bIntvlCmd)))
+ {
+ ret = -EFAULT;
+ } else if (wmi_set_adhoc_bconIntvl_cmd(ar->arWmi, bIntvlCmd.beaconInterval)
+ != A_OK)
+ {
+ ret = -EIO;
+ }
+ if(ret == 0) {
+ ar->ap_beacon_interval = bIntvlCmd.beaconInterval;
+ ar->ap_profile_flag = 1; /* There is a change in profile */
+ }
+ break;
+ }
+ case IEEE80211_IOCTL_SETAUTHALG:
+ {
+ AR_SOFTC_T *ar = (AR_SOFTC_T *)ar6k_priv(dev);
+ struct ieee80211req_authalg req;
+
+ if (ar->arWmiReady == FALSE) {
+ ret = -EIO;
+ } else if (copy_from_user(&req, userdata,
+ sizeof(struct ieee80211req_authalg)))
+ {
+ ret = -EFAULT;
+ } else {
+ if (req.auth_alg & AUTH_ALG_OPEN_SYSTEM) {
+ ar->arDot11AuthMode |= OPEN_AUTH;
+ ar->arPairwiseCrypto = NONE_CRYPT;
+ ar->arGroupCrypto = NONE_CRYPT;
+ }
+ if (req.auth_alg & AUTH_ALG_SHARED_KEY) {
+ ar->arDot11AuthMode |= SHARED_AUTH;
+ ar->arPairwiseCrypto = WEP_CRYPT;
+ ar->arGroupCrypto = WEP_CRYPT;
+ ar->arAuthMode = NONE_AUTH;
+ }
+ if (req.auth_alg == AUTH_ALG_LEAP) {
+ ar->arDot11AuthMode = LEAP_AUTH;
+ }
+ }
+ break;
+ }
+
+ case AR6000_XIOCTL_SET_VOICE_PKT_SIZE:
+ ret = ar6000_xioctl_set_voice_pkt_size(dev, userdata);
+ break;
+
+ case AR6000_XIOCTL_SET_MAX_SP:
+ ret = ar6000_xioctl_set_max_sp_len(dev, userdata);
+ break;
+
+ case AR6000_XIOCTL_WMI_GET_ROAM_TBL:
+ ret = ar6000_ioctl_get_roam_tbl(dev, rq);
+ break;
+ case AR6000_XIOCTL_WMI_SET_ROAM_CTRL:
+ ret = ar6000_ioctl_set_roam_ctrl(dev, userdata);
+ break;
+ case AR6000_XIOCTRL_WMI_SET_POWERSAVE_TIMERS:
+ ret = ar6000_ioctl_set_powersave_timers(dev, userdata);
+ break;
+ case AR6000_XIOCTRL_WMI_GET_POWER_MODE:
+ ret = ar6000_ioctl_get_power_mode(dev, rq);
+ break;
+ case AR6000_XIOCTRL_WMI_SET_WLAN_STATE:
+ {
+ AR6000_WLAN_STATE state;
+ if (get_user(state, (unsigned int *)userdata))
+ ret = -EFAULT;
+ else if (ar6000_set_wlan_state(ar, state) != A_OK)
+ ret = -EIO;
+ break;
+ }
+ case AR6000_XIOCTL_WMI_GET_ROAM_DATA:
+ ret = ar6000_ioctl_get_roam_data(dev, rq);
+ break;
+
+ case AR6000_XIOCTL_WMI_SET_BT_STATUS:
+ ret = ar6000_xioctl_set_bt_status_cmd(dev, userdata);
+ break;
+
+ case AR6000_XIOCTL_WMI_SET_BT_PARAMS:
+ ret = ar6000_xioctl_set_bt_params_cmd(dev, userdata);
+ break;
+
+ case AR6000_XIOCTL_WMI_SET_BTCOEX_FE_ANT:
+ ret = ar6000_xioctl_set_btcoex_fe_ant_cmd(dev, userdata);
+ break;
+
+ case AR6000_XIOCTL_WMI_SET_BTCOEX_COLOCATED_BT_DEV:
+ ret = ar6000_xioctl_set_btcoex_colocated_bt_dev_cmd(dev, userdata);
+ break;
+
+ case AR6000_XIOCTL_WMI_SET_BTCOEX_BTINQUIRY_PAGE_CONFIG:
+ ret = ar6000_xioctl_set_btcoex_btinquiry_page_config_cmd(dev, userdata);
+ break;
+
+ case AR6000_XIOCTL_WMI_SET_BTCOEX_SCO_CONFIG:
+ ret = ar6000_xioctl_set_btcoex_sco_config_cmd( dev, userdata);
+ break;
+
+ case AR6000_XIOCTL_WMI_SET_BTCOEX_A2DP_CONFIG:
+ ret = ar6000_xioctl_set_btcoex_a2dp_config_cmd(dev, userdata);
+ break;
+
+ case AR6000_XIOCTL_WMI_SET_BTCOEX_ACLCOEX_CONFIG:
+ ret = ar6000_xioctl_set_btcoex_aclcoex_config_cmd(dev, userdata);
+ break;
+
+ case AR6000_XIOCTL_WMI_SET_BTCOEX_DEBUG:
+ ret = ar60000_xioctl_set_btcoex_debug_cmd(dev, userdata);
+ break;
+
+ case AR6000_XIOCTL_WMI_SET_BT_OPERATING_STATUS:
+ ret = ar6000_xioctl_set_btcoex_bt_operating_status_cmd(dev, userdata);
+ break;
+
+ case AR6000_XIOCTL_WMI_GET_BTCOEX_CONFIG:
+ ret = ar6000_xioctl_get_btcoex_config_cmd(dev, userdata, rq);
+ break;
+
+ case AR6000_XIOCTL_WMI_GET_BTCOEX_STATS:
+ ret = ar6000_xioctl_get_btcoex_stats_cmd(dev, userdata, rq);
+ break;
+
+ case AR6000_XIOCTL_WMI_STARTSCAN:
+ {
+ WMI_START_SCAN_CMD setStartScanCmd, *cmdp;
+
+ if (ar->arWmiReady == FALSE) {
+ ret = -EIO;
+ } else if (copy_from_user(&setStartScanCmd, userdata,
+ sizeof(setStartScanCmd)))
+ {
+ ret = -EFAULT;
+ } else {
+ if (setStartScanCmd.numChannels > 1) {
+ cmdp = A_MALLOC(130);
+ if (copy_from_user(cmdp, userdata,
+ sizeof (*cmdp) +
+ ((setStartScanCmd.numChannels - 1) *
+ sizeof(A_UINT16))))
+ {
+ kfree(cmdp);
+ ret = -EFAULT;
+ goto ioctl_done;
+ }
+ } else {
+ cmdp = &setStartScanCmd;
+ }
+
+ if (wmi_startscan_cmd(ar->arWmi, cmdp->scanType,
+ cmdp->forceFgScan,
+ cmdp->isLegacy,
+ cmdp->homeDwellTime,
+ cmdp->forceScanInterval,
+ cmdp->numChannels,
+ cmdp->channelList) != A_OK)
+ {
+ ret = -EIO;
+ }
+ }
+ break;
+ }
+ case AR6000_XIOCTL_WMI_SETFIXRATES:
+ {
+ WMI_FIX_RATES_CMD setFixRatesCmd;
+ A_STATUS returnStatus;
+
+ if (ar->arWmiReady == FALSE) {
+ ret = -EIO;
+ } else if (copy_from_user(&setFixRatesCmd, userdata,
+ sizeof(setFixRatesCmd)))
+ {
+ ret = -EFAULT;
+ } else {
+ returnStatus = wmi_set_fixrates_cmd(ar->arWmi, setFixRatesCmd.fixRateMask);
+ if (returnStatus == A_EINVAL) {
+ ret = -EINVAL;
+ } else if(returnStatus != A_OK) {
+ ret = -EIO;
+ } else {
+ ar->ap_profile_flag = 1; /* There is a change in profile */
+ }
+ }
+ break;
+ }
+
+ case AR6000_XIOCTL_WMI_GETFIXRATES:
+ {
+ WMI_FIX_RATES_CMD getFixRatesCmd;
+ AR_SOFTC_T *ar = (AR_SOFTC_T *)ar6k_priv(dev);
+ int ret = 0;
+
+ if (ar->bIsDestroyProgress) {
+ ret = -EBUSY;
+ goto ioctl_done;
+ }
+ if (ar->arWmiReady == FALSE) {
+ ret = -EIO;
+ goto ioctl_done;
+ }
+
+ if (down_interruptible(&ar->arSem)) {
+ ret = -ERESTARTSYS;
+ goto ioctl_done;
+ }
+ if (ar->bIsDestroyProgress) {
+ up(&ar->arSem);
+ ret = -EBUSY;
+ goto ioctl_done;
+ }
+ /* Used copy_from_user/copy_to_user to access user space data */
+ if (copy_from_user(&getFixRatesCmd, userdata, sizeof(getFixRatesCmd))) {
+ ret = -EFAULT;
+ } else {
+ ar->arRateMask = 0xFFFFFFFF;
+
+ if (wmi_get_ratemask_cmd(ar->arWmi) != A_OK) {
+ up(&ar->arSem);
+ ret = -EIO;
+ goto ioctl_done;
+ }
+
+ wait_event_interruptible_timeout(arEvent, ar->arRateMask != 0xFFFFFFFF, wmitimeout * HZ);
+
+ if (signal_pending(current)) {
+ ret = -EINTR;
+ }
+
+ if (!ret) {
+ getFixRatesCmd.fixRateMask = ar->arRateMask;
+ }
+
+ if(copy_to_user(userdata, &getFixRatesCmd, sizeof(getFixRatesCmd))) {
+ ret = -EFAULT;
+ }
+
+ up(&ar->arSem);
+ }
+ break;
+ }
+ case AR6000_XIOCTL_WMI_SET_AUTHMODE:
+ {
+ WMI_SET_AUTH_MODE_CMD setAuthMode;
+
+ if (ar->arWmiReady == FALSE) {
+ ret = -EIO;
+ } else if (copy_from_user(&setAuthMode, userdata,
+ sizeof(setAuthMode)))
+ {
+ ret = -EFAULT;
+ } else {
+ if (wmi_set_authmode_cmd(ar->arWmi, setAuthMode.mode) != A_OK)
+ {
+ ret = -EIO;
+ }
+ }
+ break;
+ }
+ case AR6000_XIOCTL_WMI_SET_REASSOCMODE:
+ {
+ WMI_SET_REASSOC_MODE_CMD setReassocMode;
+
+ if (ar->arWmiReady == FALSE) {
+ ret = -EIO;
+ } else if (copy_from_user(&setReassocMode, userdata,
+ sizeof(setReassocMode)))
+ {
+ ret = -EFAULT;
+ } else {
+ if (wmi_set_reassocmode_cmd(ar->arWmi, setReassocMode.mode) != A_OK)
+ {
+ ret = -EIO;
+ }
+ }
+ break;
+ }
+ case AR6000_XIOCTL_DIAG_READ:
+ {
+ A_UINT32 addr, data;
+ if (get_user(addr, (unsigned int *)userdata)) {
+ ret = -EFAULT;
+ break;
+ }
+ addr = TARG_VTOP(ar->arTargetType, addr);
+ if (ar6000_ReadRegDiag(ar->arHifDevice, &addr, &data) != A_OK) {
+ ret = -EIO;
+ }
+ if (put_user(data, (unsigned int *)userdata + 1)) {
+ ret = -EFAULT;
+ break;
+ }
+ break;
+ }
+ case AR6000_XIOCTL_DIAG_WRITE:
+ {
+ A_UINT32 addr, data;
+ if (get_user(addr, (unsigned int *)userdata) ||
+ get_user(data, (unsigned int *)userdata + 1)) {
+ ret = -EFAULT;
+ break;
+ }
+ addr = TARG_VTOP(ar->arTargetType, addr);
+ if (ar6000_WriteRegDiag(ar->arHifDevice, &addr, &data) != A_OK) {
+ ret = -EIO;
+ }
+ break;
+ }
+ case AR6000_XIOCTL_WMI_SET_KEEPALIVE:
+ {
+ WMI_SET_KEEPALIVE_CMD setKeepAlive;
+ if (ar->arWmiReady == FALSE) {
+ ret = -EIO;
+ goto ioctl_done;
+ } else if (copy_from_user(&setKeepAlive, userdata,
+ sizeof(setKeepAlive))){
+ ret = -EFAULT;
+ } else {
+ if (wmi_set_keepalive_cmd(ar->arWmi, setKeepAlive.keepaliveInterval) != A_OK) {
+ ret = -EIO;
+ }
+ }
+ break;
+ }
+ case AR6000_XIOCTL_WMI_SET_PARAMS:
+ {
+ WMI_SET_PARAMS_CMD cmd;
+ if (ar->arWmiReady == FALSE) {
+ ret = -EIO;
+ goto ioctl_done;
+ } else if (copy_from_user(&cmd, userdata,
+ sizeof(cmd))){
+ ret = -EFAULT;
+ } else if (copy_from_user(&cmd, userdata,
+ sizeof(cmd) + cmd.length))
+ {
+ ret = -EFAULT;
+ } else {
+ if (wmi_set_params_cmd(ar->arWmi, cmd.opcode, cmd.length, cmd.buffer) != A_OK) {
+ ret = -EIO;
+ }
+ }
+ break;
+ }
+ case AR6000_XIOCTL_WMI_SET_MCAST_FILTER:
+ {
+ WMI_SET_MCAST_FILTER_CMD cmd;
+ if (ar->arWmiReady == FALSE) {
+ ret = -EIO;
+ goto ioctl_done;
+ } else if (copy_from_user(&cmd, userdata,
+ sizeof(cmd))){
+ ret = -EFAULT;
+ } else {
+ if (wmi_set_mcast_filter_cmd(ar->arWmi, cmd.multicast_mac[0],
+ cmd.multicast_mac[1],
+ cmd.multicast_mac[2],
+ cmd.multicast_mac[3]) != A_OK) {
+ ret = -EIO;
+ }
+ }
+ break;
+ }
+ case AR6000_XIOCTL_WMI_DEL_MCAST_FILTER:
+ {
+ WMI_SET_MCAST_FILTER_CMD cmd;
+ if (ar->arWmiReady == FALSE) {
+ ret = -EIO;
+ goto ioctl_done;
+ } else if (copy_from_user(&cmd, userdata,
+ sizeof(cmd))){
+ ret = -EFAULT;
+ } else {
+ if (wmi_del_mcast_filter_cmd(ar->arWmi, cmd.multicast_mac[0],
+ cmd.multicast_mac[1],
+ cmd.multicast_mac[2],
+ cmd.multicast_mac[3]) != A_OK) {
+ ret = -EIO;
+ }
+ }
+ break;
+ }
+ case AR6000_XIOCTL_WMI_MCAST_FILTER:
+ {
+ WMI_MCAST_FILTER_CMD cmd;
+ if (ar->arWmiReady == FALSE) {
+ ret = -EIO;
+ goto ioctl_done;
+ } else if (copy_from_user(&cmd, userdata,
+ sizeof(cmd))){
+ ret = -EFAULT;
+ } else {
+ if (wmi_mcast_filter_cmd(ar->arWmi, cmd.enable) != A_OK) {
+ ret = -EIO;
+ }
+ }
+ break;
+ }
+ case AR6000_XIOCTL_WMI_GET_KEEPALIVE:
+ {
+ AR_SOFTC_T *ar = (AR_SOFTC_T *)ar6k_priv(dev);
+ WMI_GET_KEEPALIVE_CMD getKeepAlive;
+ int ret = 0;
+ if (ar->bIsDestroyProgress) {
+ ret =-EBUSY;
+ goto ioctl_done;
+ }
+ if (ar->arWmiReady == FALSE) {
+ ret = -EIO;
+ goto ioctl_done;
+ }
+ if (down_interruptible(&ar->arSem)) {
+ ret = -ERESTARTSYS;
+ goto ioctl_done;
+ }
+ if (ar->bIsDestroyProgress) {
+ up(&ar->arSem);
+ ret = -EBUSY;
+ goto ioctl_done;
+ }
+ if (copy_from_user(&getKeepAlive, userdata,sizeof(getKeepAlive))) {
+ ret = -EFAULT;
+ } else {
+ getKeepAlive.keepaliveInterval = wmi_get_keepalive_cmd(ar->arWmi);
+ ar->arKeepaliveConfigured = 0xFF;
+ if (wmi_get_keepalive_configured(ar->arWmi) != A_OK){
+ up(&ar->arSem);
+ ret = -EIO;
+ goto ioctl_done;
+ }
+ wait_event_interruptible_timeout(arEvent, ar->arKeepaliveConfigured != 0xFF, wmitimeout * HZ);
+ if (signal_pending(current)) {
+ ret = -EINTR;
+ }
+
+ if (!ret) {
+ getKeepAlive.configured = ar->arKeepaliveConfigured;
+ }
+ if (copy_to_user(userdata, &getKeepAlive, sizeof(getKeepAlive))) {
+ ret = -EFAULT;
+ }
+ up(&ar->arSem);
+ }
+ break;
+ }
+ case AR6000_XIOCTL_WMI_SET_APPIE:
+ {
+ WMI_SET_APPIE_CMD appIEcmd;
+ A_UINT8 appIeInfo[IEEE80211_APPIE_FRAME_MAX_LEN];
+ A_UINT32 fType,ieLen;
+
+ if (ar->arWmiReady == FALSE) {
+ ret = -EIO;
+ goto ioctl_done;
+ }
+ if (get_user(fType, (A_UINT32 *)userdata)) {
+ ret = -EFAULT;
+ break;
+ }
+ appIEcmd.mgmtFrmType = fType;
+ if (appIEcmd.mgmtFrmType >= IEEE80211_APPIE_NUM_OF_FRAME) {
+ ret = -EIO;
+ } else {
+ if (get_user(ieLen, (A_UINT32 *)(userdata + 4))) {
+ ret = -EFAULT;
+ break;
+ }
+ appIEcmd.ieLen = ieLen;
+ A_PRINTF("WPSIE: Type-%d, Len-%d\n",appIEcmd.mgmtFrmType, appIEcmd.ieLen);
+ if (appIEcmd.ieLen > IEEE80211_APPIE_FRAME_MAX_LEN) {
+ ret = -EIO;
+ break;
+ }
+ if (copy_from_user(appIeInfo, userdata + 8, appIEcmd.ieLen)) {
+ ret = -EFAULT;
+ } else {
+ if (wmi_set_appie_cmd(ar->arWmi, appIEcmd.mgmtFrmType,
+ appIEcmd.ieLen, appIeInfo) != A_OK)
+ {
+ ret = -EIO;
+ }
+ }
+ }
+ break;
+ }
+ case AR6000_XIOCTL_WMI_SET_MGMT_FRM_RX_FILTER:
+ {
+ WMI_BSS_FILTER_CMD cmd;
+ A_UINT32 filterType;
+
+ if (copy_from_user(&filterType, userdata, sizeof(A_UINT32)))
+ {
+ ret = -EFAULT;
+ goto ioctl_done;
+ }
+ if (filterType & (IEEE80211_FILTER_TYPE_BEACON |
+ IEEE80211_FILTER_TYPE_PROBE_RESP))
+ {
+ cmd.bssFilter = ALL_BSS_FILTER;
+ } else {
+ cmd.bssFilter = NONE_BSS_FILTER;
+ }
+ if (wmi_bssfilter_cmd(ar->arWmi, cmd.bssFilter, 0) != A_OK) {
+ ret = -EIO;
+ } else {
+ ar->arUserBssFilter = cmd.bssFilter;
+ }
+
+ AR6000_SPIN_LOCK(&ar->arLock, 0);
+ ar->arMgmtFilter = filterType;
+ AR6000_SPIN_UNLOCK(&ar->arLock, 0);
+ break;
+ }
+ case AR6000_XIOCTL_WMI_SET_WSC_STATUS:
+ {
+ A_UINT32 wsc_status;
+
+ if (ar->arWmiReady == FALSE) {
+ ret = -EIO;
+ goto ioctl_done;
+ } else if (copy_from_user(&wsc_status, userdata, sizeof(A_UINT32)))
+ {
+ ret = -EFAULT;
+ goto ioctl_done;
+ }
+ if (wmi_set_wsc_status_cmd(ar->arWmi, wsc_status) != A_OK) {
+ ret = -EIO;
+ }
+ break;
+ }
+ case AR6000_XIOCTL_BMI_ROMPATCH_INSTALL:
+ {
+ A_UINT32 ROM_addr;
+ A_UINT32 RAM_addr;
+ A_UINT32 nbytes;
+ A_UINT32 do_activate;
+ A_UINT32 rompatch_id;
+
+ if (get_user(ROM_addr, (A_UINT32 *)userdata) ||
+ get_user(RAM_addr, (A_UINT32 *)userdata + 1) ||
+ get_user(nbytes, (A_UINT32 *)userdata + 2) ||
+ get_user(do_activate, (A_UINT32 *)userdata + 3)) {
+ ret = -EFAULT;
+ break;
+ }
+ AR_DEBUG_PRINTF(ATH_DEBUG_INFO,("Install rompatch from ROM: 0x%x to RAM: 0x%x length: %d\n",
+ ROM_addr, RAM_addr, nbytes));
+ ret = BMIrompatchInstall(hifDevice, ROM_addr, RAM_addr,
+ nbytes, do_activate, &rompatch_id);
+ if (ret == A_OK) {
+ /* return value */
+ if (put_user(rompatch_id, (unsigned int *)rq->ifr_data)) {
+ ret = -EFAULT;
+ break;
+ }
+ }
+ break;
+ }
+
+ case AR6000_XIOCTL_BMI_ROMPATCH_UNINSTALL:
+ {
+ A_UINT32 rompatch_id;
+
+ if (get_user(rompatch_id, (A_UINT32 *)userdata)) {
+ ret = -EFAULT;
+ break;
+ }
+ AR_DEBUG_PRINTF(ATH_DEBUG_INFO,("UNinstall rompatch_id %d\n", rompatch_id));
+ ret = BMIrompatchUninstall(hifDevice, rompatch_id);
+ break;
+ }
+
+ case AR6000_XIOCTL_BMI_ROMPATCH_ACTIVATE:
+ case AR6000_XIOCTL_BMI_ROMPATCH_DEACTIVATE:
+ {
+ A_UINT32 rompatch_count;
+
+ if (get_user(rompatch_count, (A_UINT32 *)userdata)) {
+ ret = -EFAULT;
+ break;
+ }
+ AR_DEBUG_PRINTF(ATH_DEBUG_INFO,("Change rompatch activation count=%d\n", rompatch_count));
+ length = sizeof(A_UINT32) * rompatch_count;
+ if ((buffer = (unsigned char *)A_MALLOC(length)) != NULL) {
+ A_MEMZERO(buffer, length);
+ if (copy_from_user(buffer, &userdata[sizeof(rompatch_count)], length))
+ {
+ ret = -EFAULT;
+ } else {
+ if (cmd == AR6000_XIOCTL_BMI_ROMPATCH_ACTIVATE) {
+ ret = BMIrompatchActivate(hifDevice, rompatch_count, (A_UINT32 *)buffer);
+ } else {
+ ret = BMIrompatchDeactivate(hifDevice, rompatch_count, (A_UINT32 *)buffer);
+ }
+ }
+ A_FREE(buffer);
+ } else {
+ ret = -ENOMEM;
+ }
+
+ break;
+ }
+ case AR6000_XIOCTL_SET_IP:
+ {
+ WMI_SET_IP_CMD setIP;
+
+ if (ar->arWmiReady == FALSE) {
+ ret = -EIO;
+ } else if (copy_from_user(&setIP, userdata,
+ sizeof(setIP)))
+ {
+ ret = -EFAULT;
+ } else {
+ if (wmi_set_ip_cmd(ar->arWmi,
+ &setIP) != A_OK)
+ {
+ ret = -EIO;
+ }
+ }
+ break;
+ }
+
+ case AR6000_XIOCTL_WMI_SET_HOST_SLEEP_MODE:
+ {
+ WMI_SET_HOST_SLEEP_MODE_CMD setHostSleepMode;
+
+ if (ar->arWmiReady == FALSE) {
+ ret = -EIO;
+ } else if (copy_from_user(&setHostSleepMode, userdata,
+ sizeof(setHostSleepMode)))
+ {
+ ret = -EFAULT;
+ } else {
+ if (wmi_set_host_sleep_mode_cmd(ar->arWmi,
+ &setHostSleepMode) != A_OK)
+ {
+ ret = -EIO;
+ }
+ }
+ break;
+ }
+ case AR6000_XIOCTL_WMI_SET_WOW_MODE:
+ {
+ WMI_SET_WOW_MODE_CMD setWowMode;
+
+ if (ar->arWmiReady == FALSE) {
+ ret = -EIO;
+ } else if (copy_from_user(&setWowMode, userdata,
+ sizeof(setWowMode)))
+ {
+ ret = -EFAULT;
+ } else {
+ if (wmi_set_wow_mode_cmd(ar->arWmi,
+ &setWowMode) != A_OK)
+ {
+ ret = -EIO;
+ }
+ }
+ break;
+ }
+ case AR6000_XIOCTL_WMI_GET_WOW_LIST:
+ {
+ WMI_GET_WOW_LIST_CMD getWowList;
+
+ if (ar->arWmiReady == FALSE) {
+ ret = -EIO;
+ } else if (copy_from_user(&getWowList, userdata,
+ sizeof(getWowList)))
+ {
+ ret = -EFAULT;
+ } else {
+ if (wmi_get_wow_list_cmd(ar->arWmi,
+ &getWowList) != A_OK)
+ {
+ ret = -EIO;
+ }
+ }
+ break;
+ }
+ case AR6000_XIOCTL_WMI_ADD_WOW_PATTERN:
+ {
+#define WOW_PATTERN_SIZE 64
+#define WOW_MASK_SIZE 64
+
+ WMI_ADD_WOW_PATTERN_CMD cmd;
+ A_UINT8 mask_data[WOW_PATTERN_SIZE]={0};
+ A_UINT8 pattern_data[WOW_PATTERN_SIZE]={0};
+
+ do {
+ if (ar->arWmiReady == FALSE) {
+ ret = -EIO;
+ break;
+ }
+ if(copy_from_user(&cmd, userdata,
+ sizeof(WMI_ADD_WOW_PATTERN_CMD)))
+ {
+ ret = -EFAULT;
+ break;
+ }
+ if (copy_from_user(pattern_data,
+ userdata + 3,
+ cmd.filter_size))
+ {
+ ret = -EFAULT;
+ break;
+ }
+ if (copy_from_user(mask_data,
+ (userdata + 3 + cmd.filter_size),
+ cmd.filter_size))
+ {
+ ret = -EFAULT;
+ break;
+ }
+ if (wmi_add_wow_pattern_cmd(ar->arWmi,
+ &cmd, pattern_data, mask_data, cmd.filter_size) != A_OK)
+ {
+ ret = -EIO;
+ }
+ } while(FALSE);
+#undef WOW_PATTERN_SIZE
+#undef WOW_MASK_SIZE
+ break;
+ }
+ case AR6000_XIOCTL_WMI_DEL_WOW_PATTERN:
+ {
+ WMI_DEL_WOW_PATTERN_CMD delWowPattern;
+
+ if (ar->arWmiReady == FALSE) {
+ ret = -EIO;
+ } else if (copy_from_user(&delWowPattern, userdata,
+ sizeof(delWowPattern)))
+ {
+ ret = -EFAULT;
+ } else {
+ if (wmi_del_wow_pattern_cmd(ar->arWmi,
+ &delWowPattern) != A_OK)
+ {
+ ret = -EIO;
+ }
+ }
+ break;
+ }
+ case AR6000_XIOCTL_DUMP_HTC_CREDIT_STATE:
+ if (ar->arHtcTarget != NULL) {
+#ifdef ATH_DEBUG_MODULE
+ HTCDumpCreditStates(ar->arHtcTarget);
+#endif /* ATH_DEBUG_MODULE */
+#ifdef HTC_EP_STAT_PROFILING
+ {
+ HTC_ENDPOINT_STATS stats;
+ int i;
+
+ for (i = 0; i < 5; i++) {
+ if (HTCGetEndpointStatistics(ar->arHtcTarget,
+ i,
+ HTC_EP_STAT_SAMPLE_AND_CLEAR,
+ &stats)) {
+ A_PRINTF(KERN_ALERT"------- Profiling Endpoint : %d \n", i);
+ A_PRINTF(KERN_ALERT"TxCreditLowIndications : %d \n", stats.TxCreditLowIndications);
+ A_PRINTF(KERN_ALERT"TxIssued : %d \n", stats.TxIssued);
+ A_PRINTF(KERN_ALERT"TxDropped: %d \n", stats.TxDropped);
+ A_PRINTF(KERN_ALERT"TxPacketsBundled : %d \n", stats.TxPacketsBundled);
+ A_PRINTF(KERN_ALERT"TxBundles : %d \n", stats.TxBundles);
+ A_PRINTF(KERN_ALERT"TxCreditRpts : %d \n", stats.TxCreditRpts);
+ A_PRINTF(KERN_ALERT"TxCreditsRptsFromRx : %d \n", stats.TxCreditRptsFromRx);
+ A_PRINTF(KERN_ALERT"TxCreditsRptsFromOther : %d \n", stats.TxCreditRptsFromOther);
+ A_PRINTF(KERN_ALERT"TxCreditsRptsFromEp0 : %d \n", stats.TxCreditRptsFromEp0);
+ A_PRINTF(KERN_ALERT"TxCreditsFromRx : %d \n", stats.TxCreditsFromRx);
+ A_PRINTF(KERN_ALERT"TxCreditsFromOther : %d \n", stats.TxCreditsFromOther);
+ A_PRINTF(KERN_ALERT"TxCreditsFromEp0 : %d \n", stats.TxCreditsFromEp0);
+ A_PRINTF(KERN_ALERT"TxCreditsConsummed : %d \n", stats.TxCreditsConsummed);
+ A_PRINTF(KERN_ALERT"TxCreditsReturned : %d \n", stats.TxCreditsReturned);
+ A_PRINTF(KERN_ALERT"RxReceived : %d \n", stats.RxReceived);
+ A_PRINTF(KERN_ALERT"RxPacketsBundled : %d \n", stats.RxPacketsBundled);
+ A_PRINTF(KERN_ALERT"RxLookAheads : %d \n", stats.RxLookAheads);
+ A_PRINTF(KERN_ALERT"RxBundleLookAheads : %d \n", stats.RxBundleLookAheads);
+ A_PRINTF(KERN_ALERT"RxBundleIndFromHdr : %d \n", stats.RxBundleIndFromHdr);
+ A_PRINTF(KERN_ALERT"RxAllocThreshHit : %d \n", stats.RxAllocThreshHit);
+ A_PRINTF(KERN_ALERT"RxAllocThreshBytes : %d \n", stats.RxAllocThreshBytes);
+ A_PRINTF(KERN_ALERT"---- \n");
+
+ }
+ }
+ }
+#endif
+ }
+ break;
+ case AR6000_XIOCTL_TRAFFIC_ACTIVITY_CHANGE:
+ if (ar->arHtcTarget != NULL) {
+ struct ar6000_traffic_activity_change data;
+
+ if (copy_from_user(&data, userdata, sizeof(data)))
+ {
+ ret = -EFAULT;
+ goto ioctl_done;
+ }
+ /* note, this is used for testing (mbox ping testing), indicate activity
+ * change using the stream ID as the traffic class */
+ ar6000_indicate_tx_activity(ar,
+ (A_UINT8)data.StreamID,
+ data.Active ? TRUE : FALSE);
+ }
+ break;
+ case AR6000_XIOCTL_WMI_SET_CONNECT_CTRL_FLAGS:
+ if (ar->arWmiReady == FALSE) {
+ ret = -EIO;
+ } else if (copy_from_user(&connectCtrlFlags, userdata,
+ sizeof(connectCtrlFlags)))
+ {
+ ret = -EFAULT;
+ } else {
+ ar->arConnectCtrlFlags = connectCtrlFlags;
+ }
+ break;
+ case AR6000_XIOCTL_WMI_SET_AKMP_PARAMS:
+ if (ar->arWmiReady == FALSE) {
+ ret = -EIO;
+ } else if (copy_from_user(&akmpParams, userdata,
+ sizeof(WMI_SET_AKMP_PARAMS_CMD)))
+ {
+ ret = -EFAULT;
+ } else {
+ if (wmi_set_akmp_params_cmd(ar->arWmi, &akmpParams) != A_OK) {
+ ret = -EIO;
+ }
+ }
+ break;
+ case AR6000_XIOCTL_WMI_SET_PMKID_LIST:
+ if (ar->arWmiReady == FALSE) {
+ ret = -EIO;
+ } else {
+ if (copy_from_user(&pmkidInfo.numPMKID, userdata,
+ sizeof(pmkidInfo.numPMKID)))
+ {
+ ret = -EFAULT;
+ break;
+ }
+ if (copy_from_user(&pmkidInfo.pmkidList,
+ userdata + sizeof(pmkidInfo.numPMKID),
+ pmkidInfo.numPMKID * sizeof(WMI_PMKID)))
+ {
+ ret = -EFAULT;
+ break;
+ }
+ if (wmi_set_pmkid_list_cmd(ar->arWmi, &pmkidInfo) != A_OK) {
+ ret = -EIO;
+ }
+ }
+ break;
+ case AR6000_XIOCTL_WMI_GET_PMKID_LIST:
+ if (ar->arWmiReady == FALSE) {
+ ret = -EIO;
+ } else {
+ if (wmi_get_pmkid_list_cmd(ar->arWmi) != A_OK) {
+ ret = -EIO;
+ }
+ }
+ break;
+ case AR6000_XIOCTL_WMI_ABORT_SCAN:
+ if (ar->arWmiReady == FALSE) {
+ ret = -EIO;
+ }
+ ret = wmi_abort_scan_cmd(ar->arWmi);
+ break;
+ case AR6000_XIOCTL_AP_HIDDEN_SSID:
+ {
+ A_UINT8 hidden_ssid;
+ if (ar->arWmiReady == FALSE) {
+ ret = -EIO;
+ } else if (copy_from_user(&hidden_ssid, userdata, sizeof(hidden_ssid))) {
+ ret = -EFAULT;
+ } else {
+ wmi_ap_set_hidden_ssid(ar->arWmi, hidden_ssid);
+ ar->ap_hidden_ssid = hidden_ssid;
+ ar->ap_profile_flag = 1; /* There is a change in profile */
+ }
+ break;
+ }
+ case AR6000_XIOCTL_AP_GET_STA_LIST:
+ {
+ if (ar->arWmiReady == FALSE) {
+ ret = -EIO;
+ } else {
+ A_UINT8 i;
+ ap_get_sta_t temp;
+ A_MEMZERO(&temp, sizeof(temp));
+ for(i=0;i<AP_MAX_NUM_STA;i++) {
+ A_MEMCPY(temp.sta[i].mac, ar->sta_list[i].mac, ATH_MAC_LEN);
+ temp.sta[i].aid = ar->sta_list[i].aid;
+ temp.sta[i].keymgmt = ar->sta_list[i].keymgmt;
+ temp.sta[i].ucipher = ar->sta_list[i].ucipher;
+ temp.sta[i].auth = ar->sta_list[i].auth;
+ }
+ if(copy_to_user((ap_get_sta_t *)rq->ifr_data, &temp,
+ sizeof(ar->sta_list))) {
+ ret = -EFAULT;
+ }
+ }
+ break;
+ }
+ case AR6000_XIOCTL_AP_SET_NUM_STA:
+ {
+ A_UINT8 num_sta;
+ if (ar->arWmiReady == FALSE) {
+ ret = -EIO;
+ } else if (copy_from_user(&num_sta, userdata, sizeof(num_sta))) {
+ ret = -EFAULT;
+ } else if(num_sta > AP_MAX_NUM_STA) {
+ /* value out of range */
+ ret = -EINVAL;
+ } else {
+ wmi_ap_set_num_sta(ar->arWmi, num_sta);
+ }
+ break;
+ }
+ case AR6000_XIOCTL_AP_SET_ACL_POLICY:
+ {
+ A_UINT8 policy;
+ if (ar->arWmiReady == FALSE) {
+ ret = -EIO;
+ } else if (copy_from_user(&policy, userdata, sizeof(policy))) {
+ ret = -EFAULT;
+ } else if(policy == ar->g_acl.policy) {
+ /* No change in policy */
+ } else {
+ if(!(policy & AP_ACL_RETAIN_LIST_MASK)) {
+ /* clear ACL list */
+ memset(&ar->g_acl,0,sizeof(WMI_AP_ACL));
+ }
+ ar->g_acl.policy = policy;
+ wmi_ap_set_acl_policy(ar->arWmi, policy);
+ }
+ break;
+ }
+ case AR6000_XIOCTL_AP_SET_ACL_MAC:
+ {
+ WMI_AP_ACL_MAC_CMD acl;
+ if (ar->arWmiReady == FALSE) {
+ ret = -EIO;
+ } else if (copy_from_user(&acl, userdata, sizeof(acl))) {
+ ret = -EFAULT;
+ } else {
+ if(acl_add_del_mac(&ar->g_acl, &acl)) {
+ wmi_ap_acl_mac_list(ar->arWmi, &acl);
+ } else {
+ A_PRINTF("ACL list error\n");
+ ret = -EIO;
+ }
+ }
+ break;
+ }
+ case AR6000_XIOCTL_AP_GET_ACL_LIST:
+ {
+ if (ar->arWmiReady == FALSE) {
+ ret = -EIO;
+ } else if(copy_to_user((WMI_AP_ACL *)rq->ifr_data, &ar->g_acl,
+ sizeof(WMI_AP_ACL))) {
+ ret = -EFAULT;
+ }
+ break;
+ }
+ case AR6000_XIOCTL_AP_COMMIT_CONFIG:
+ {
+ ret = ar6000_ap_mode_profile_commit(ar);
+ break;
+ }
+ case IEEE80211_IOCTL_GETWPAIE:
+ {
+ struct ieee80211req_wpaie wpaie;
+ if (ar->arWmiReady == FALSE) {
+ ret = -EIO;
+ } else if (copy_from_user(&wpaie, userdata, sizeof(wpaie))) {
+ ret = -EFAULT;
+ } else if (ar6000_ap_mode_get_wpa_ie(ar, &wpaie)) {
+ ret = -EFAULT;
+ } else if(copy_to_user(userdata, &wpaie, sizeof(wpaie))) {
+ ret = -EFAULT;
+ }
+ break;
+ }
+ case AR6000_XIOCTL_AP_CONN_INACT_TIME:
+ {
+ A_UINT32 period;
+ if (ar->arWmiReady == FALSE) {
+ ret = -EIO;
+ } else if (copy_from_user(&period, userdata, sizeof(period))) {
+ ret = -EFAULT;
+ } else {
+ wmi_ap_conn_inact_time(ar->arWmi, period);
+ }
+ break;
+ }
+ case AR6000_XIOCTL_AP_PROT_SCAN_TIME:
+ {
+ WMI_AP_PROT_SCAN_TIME_CMD bgscan;
+ if (ar->arWmiReady == FALSE) {
+ ret = -EIO;
+ } else if (copy_from_user(&bgscan, userdata, sizeof(bgscan))) {
+ ret = -EFAULT;
+ } else {
+ wmi_ap_bgscan_time(ar->arWmi, bgscan.period_min, bgscan.dwell_ms);
+ }
+ break;
+ }
+ case AR6000_XIOCTL_AP_SET_COUNTRY:
+ {
+ ret = ar6000_ioctl_set_country(dev, rq);
+ break;
+ }
+ case AR6000_XIOCTL_AP_SET_DTIM:
+ {
+ WMI_AP_SET_DTIM_CMD d;
+ if (ar->arWmiReady == FALSE) {
+ ret = -EIO;
+ } else if (copy_from_user(&d, userdata, sizeof(d))) {
+ ret = -EFAULT;
+ } else {
+ if(d.dtim > 0 && d.dtim < 11) {
+ ar->ap_dtim_period = d.dtim;
+ wmi_ap_set_dtim(ar->arWmi, d.dtim);
+ ar->ap_profile_flag = 1; /* There is a change in profile */
+ } else {
+ A_PRINTF("DTIM out of range. Valid range is [1-10]\n");
+ ret = -EIO;
+ }
+ }
+ break;
+ }
+ case AR6000_XIOCTL_WMI_TARGET_EVENT_REPORT:
+ {
+ WMI_SET_TARGET_EVENT_REPORT_CMD evtCfgCmd;
+
+ if (ar->arWmiReady == FALSE) {
+ ret = -EIO;
+ }
+ if (copy_from_user(&evtCfgCmd, userdata,
+ sizeof(evtCfgCmd))) {
+ ret = -EFAULT;
+ break;
+ }
+ ret = wmi_set_target_event_report_cmd(ar->arWmi, &evtCfgCmd);
+ break;
+ }
+ case AR6000_XIOCTL_AP_INTRA_BSS_COMM:
+ {
+ A_UINT8 intra=0;
+ if (ar->arWmiReady == FALSE) {
+ ret = -EIO;
+ } else if (copy_from_user(&intra, userdata, sizeof(intra))) {
+ ret = -EFAULT;
+ } else {
+ ar->intra_bss = (intra?1:0);
+ }
+ break;
+ }
+ case AR6000_XIOCTL_DUMP_MODULE_DEBUG_INFO:
+ {
+ struct drv_debug_module_s moduleinfo;
+
+ if (copy_from_user(&moduleinfo, userdata, sizeof(moduleinfo))) {
+ ret = -EFAULT;
+ break;
+ }
+
+ a_dump_module_debug_info_by_name(moduleinfo.modulename);
+ ret = 0;
+ break;
+ }
+ case AR6000_XIOCTL_MODULE_DEBUG_SET_MASK:
+ {
+ struct drv_debug_module_s moduleinfo;
+
+ if (copy_from_user(&moduleinfo, userdata, sizeof(moduleinfo))) {
+ ret = -EFAULT;
+ break;
+ }
+
+ if (A_FAILED(a_set_module_mask(moduleinfo.modulename, moduleinfo.mask))) {
+ ret = -EFAULT;
+ }
+
+ break;
+ }
+ case AR6000_XIOCTL_MODULE_DEBUG_GET_MASK:
+ {
+ struct drv_debug_module_s moduleinfo;
+
+ if (copy_from_user(&moduleinfo, userdata, sizeof(moduleinfo))) {
+ ret = -EFAULT;
+ break;
+ }
+
+ if (A_FAILED(a_get_module_mask(moduleinfo.modulename, &moduleinfo.mask))) {
+ ret = -EFAULT;
+ break;
+ }
+
+ if (copy_to_user(userdata, &moduleinfo, sizeof(moduleinfo))) {
+ ret = -EFAULT;
+ break;
+ }
+
+ break;
+ }
+#ifdef ATH_AR6K_11N_SUPPORT
+ case AR6000_XIOCTL_DUMP_RCV_AGGR_STATS:
+ {
+ PACKET_LOG *copy_of_pkt_log;
+
+ aggr_dump_stats(ar->aggr_cntxt, &copy_of_pkt_log);
+ if (copy_to_user(rq->ifr_data, copy_of_pkt_log, sizeof(PACKET_LOG))) {
+ ret = -EFAULT;
+ }
+ break;
+ }
+ case AR6000_XIOCTL_SETUP_AGGR:
+ {
+ WMI_ADDBA_REQ_CMD cmd;
+
+ if (ar->arWmiReady == FALSE) {
+ ret = -EIO;
+ } else if (copy_from_user(&cmd, userdata, sizeof(cmd))) {
+ ret = -EFAULT;
+ } else {
+ wmi_setup_aggr_cmd(ar->arWmi, cmd.tid);
+ }
+ }
+ break;
+
+ case AR6000_XIOCTL_DELE_AGGR:
+ {
+ WMI_DELBA_REQ_CMD cmd;
+
+ if (ar->arWmiReady == FALSE) {
+ ret = -EIO;
+ } else if (copy_from_user(&cmd, userdata, sizeof(cmd))) {
+ ret = -EFAULT;
+ } else {
+ wmi_delete_aggr_cmd(ar->arWmi, cmd.tid, cmd.is_sender_initiator);
+ }
+ }
+ break;
+
+ case AR6000_XIOCTL_ALLOW_AGGR:
+ {
+ WMI_ALLOW_AGGR_CMD cmd;
+
+ if (ar->arWmiReady == FALSE) {
+ ret = -EIO;
+ } else if (copy_from_user(&cmd, userdata, sizeof(cmd))) {
+ ret = -EFAULT;
+ } else {
+ wmi_allow_aggr_cmd(ar->arWmi, cmd.tx_allow_aggr, cmd.rx_allow_aggr);
+ }
+ }
+ break;
+
+ case AR6000_XIOCTL_SET_HT_CAP:
+ {
+ if (ar->arWmiReady == FALSE) {
+ ret = -EIO;
+ } else if (copy_from_user(&htCap, userdata,
+ sizeof(htCap)))
+ {
+ ret = -EFAULT;
+ } else {
+
+ if (wmi_set_ht_cap_cmd(ar->arWmi, &htCap) != A_OK)
+ {
+ ret = -EIO;
+ }
+ }
+ break;
+ }
+ case AR6000_XIOCTL_SET_HT_OP:
+ {
+ if (ar->arWmiReady == FALSE) {
+ ret = -EIO;
+ } else if (copy_from_user(&htOp, userdata,
+ sizeof(htOp)))
+ {
+ ret = -EFAULT;
+ } else {
+
+ if (wmi_set_ht_op_cmd(ar->arWmi, htOp.sta_chan_width) != A_OK)
+ {
+ ret = -EIO;
+ }
+ }
+ break;
+ }
+#endif
+ case AR6000_XIOCTL_ACL_DATA:
+ {
+ void *osbuf = NULL;
+ if (ar->arWmiReady == FALSE) {
+ ret = -EIO;
+ } else if (ar6000_create_acl_data_osbuf(dev, (A_UINT8*)userdata, &osbuf) != A_OK) {
+ ret = -EIO;
+ } else {
+ if (wmi_data_hdr_add(ar->arWmi, osbuf, DATA_MSGTYPE, 0, WMI_DATA_HDR_DATA_TYPE_ACL,0,NULL) != A_OK) {
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("XIOCTL_ACL_DATA - wmi_data_hdr_add failed\n"));
+ } else {
+ /* Send data buffer over HTC */
+ ar6000_acl_data_tx(osbuf, ar->arNetDev);
+ }
+ }
+ break;
+ }
+ case AR6000_XIOCTL_HCI_CMD:
+ {
+ char tmp_buf[512];
+ A_INT8 i;
+ WMI_HCI_CMD *cmd = (WMI_HCI_CMD *)tmp_buf;
+ A_UINT8 size;
+
+ size = sizeof(cmd->cmd_buf_sz);
+ if (ar->arWmiReady == FALSE) {
+ ret = -EIO;
+ } else if (copy_from_user(cmd, userdata, size)) {
+ ret = -EFAULT;
+ } else if(copy_from_user(cmd->buf, userdata + size, cmd->cmd_buf_sz)) {
+ ret = -EFAULT;
+ } else {
+ if (wmi_send_hci_cmd(ar->arWmi, cmd->buf, cmd->cmd_buf_sz) != A_OK) {
+ ret = -EIO;
+ }else if(loghci) {
+ A_PRINTF_LOG("HCI Command To PAL --> \n");
+ for(i = 0; i < cmd->cmd_buf_sz; i++) {
+ A_PRINTF_LOG("0x%02x ",cmd->buf[i]);
+ if((i % 10) == 0) {
+ A_PRINTF_LOG("\n");
+ }
+ }
+ A_PRINTF_LOG("\n");
+ A_PRINTF_LOG("==================================\n");
+ }
+ }
+ break;
+ }
+ case AR6000_XIOCTL_WLAN_CONN_PRECEDENCE:
+ {
+ WMI_SET_BT_WLAN_CONN_PRECEDENCE cmd;
+ if (ar->arWmiReady == FALSE) {
+ ret = -EIO;
+ } else if (copy_from_user(&cmd, userdata, sizeof(cmd))) {
+ ret = -EFAULT;
+ } else {
+ if (cmd.precedence == BT_WLAN_CONN_PRECDENCE_WLAN ||
+ cmd.precedence == BT_WLAN_CONN_PRECDENCE_PAL) {
+ if ( wmi_set_wlan_conn_precedence_cmd(ar->arWmi, cmd.precedence) != A_OK) {
+ ret = -EIO;
+ }
+ } else {
+ ret = -EINVAL;
+ }
+ }
+ break;
+ }
+ case AR6000_XIOCTL_AP_GET_STAT:
+ {
+ ret = ar6000_ioctl_get_ap_stats(dev, rq);
+ break;
+ }
+ case AR6000_XIOCTL_SET_TX_SELECT_RATES:
+ {
+ WMI_SET_TX_SELECT_RATES_CMD masks;
+
+ if (ar->arWmiReady == FALSE) {
+ ret = -EIO;
+ } else if (copy_from_user(&masks, userdata,
+ sizeof(masks)))
+ {
+ ret = -EFAULT;
+ } else {
+
+ if (wmi_set_tx_select_rates_cmd(ar->arWmi, masks.rateMasks) != A_OK)
+ {
+ ret = -EIO;
+ }
+ }
+ break;
+ }
+ case AR6000_XIOCTL_AP_GET_HIDDEN_SSID:
+ {
+ WMI_AP_HIDDEN_SSID_CMD ssid;
+ ssid.hidden_ssid = ar->ap_hidden_ssid;
+
+ if (ar->arWmiReady == FALSE) {
+ ret = -EIO;
+ } else if(copy_to_user((WMI_AP_HIDDEN_SSID_CMD *)rq->ifr_data,
+ &ssid, sizeof(WMI_AP_HIDDEN_SSID_CMD))) {
+ ret = -EFAULT;
+ }
+ break;
+ }
+ case AR6000_XIOCTL_AP_GET_COUNTRY:
+ {
+ WMI_AP_SET_COUNTRY_CMD cty;
+ A_MEMCPY(cty.countryCode, ar->ap_country_code, 3);
+
+ if (ar->arWmiReady == FALSE) {
+ ret = -EIO;
+ } else if(copy_to_user((WMI_AP_SET_COUNTRY_CMD *)rq->ifr_data,
+ &cty, sizeof(WMI_AP_SET_COUNTRY_CMD))) {
+ ret = -EFAULT;
+ }
+ break;
+ }
+ case AR6000_XIOCTL_AP_GET_WMODE:
+ {
+ if (ar->arWmiReady == FALSE) {
+ ret = -EIO;
+ } else if(copy_to_user((A_UINT8 *)rq->ifr_data,
+ &ar->ap_wmode, sizeof(A_UINT8))) {
+ ret = -EFAULT;
+ }
+ break;
+ }
+ case AR6000_XIOCTL_AP_GET_DTIM:
+ {
+ WMI_AP_SET_DTIM_CMD dtim;
+ dtim.dtim = ar->ap_dtim_period;
+
+ if (ar->arWmiReady == FALSE) {
+ ret = -EIO;
+ } else if(copy_to_user((WMI_AP_SET_DTIM_CMD *)rq->ifr_data,
+ &dtim, sizeof(WMI_AP_SET_DTIM_CMD))) {
+ ret = -EFAULT;
+ }
+ break;
+ }
+ case AR6000_XIOCTL_AP_GET_BINTVL:
+ {
+ WMI_BEACON_INT_CMD bi;
+ bi.beaconInterval = ar->ap_beacon_interval;
+
+ if (ar->arWmiReady == FALSE) {
+ ret = -EIO;
+ } else if(copy_to_user((WMI_BEACON_INT_CMD *)rq->ifr_data,
+ &bi, sizeof(WMI_BEACON_INT_CMD))) {
+ ret = -EFAULT;
+ }
+ break;
+ }
+ case AR6000_XIOCTL_AP_GET_RTS:
+ {
+ WMI_SET_RTS_CMD rts;
+ rts.threshold = ar->arRTS;
+
+ if (ar->arWmiReady == FALSE) {
+ ret = -EIO;
+ } else if(copy_to_user((WMI_SET_RTS_CMD *)rq->ifr_data,
+ &rts, sizeof(WMI_SET_RTS_CMD))) {
+ ret = -EFAULT;
+ }
+ break;
+ }
+ case AR6000_XIOCTL_FETCH_TARGET_REGS:
+ {
+ A_UINT32 targregs[AR6003_FETCH_TARG_REGS_COUNT];
+
+ if (ar->arTargetType == TARGET_TYPE_AR6003) {
+ ar6k_FetchTargetRegs(hifDevice, targregs);
+ if (copy_to_user((A_UINT32 *)rq->ifr_data, &targregs, sizeof(targregs)))
+ {
+ ret = -EFAULT;
+ }
+ } else {
+ ret = -EOPNOTSUPP;
+ }
+ break;
+ }
+ case AR6000_XIOCTL_AP_SET_11BG_RATESET:
+ {
+ WMI_AP_SET_11BG_RATESET_CMD rate;
+ if (ar->arWmiReady == FALSE) {
+ ret = -EIO;
+ } else if (copy_from_user(&rate, userdata, sizeof(rate))) {
+ ret = -EFAULT;
+ } else {
+ wmi_ap_set_rateset(ar->arWmi, rate.rateset);
+ }
+ break;
+ }
+ case AR6000_XIOCTL_GET_WLAN_SLEEP_STATE:
+ {
+ WMI_REPORT_SLEEP_STATE_EVENT wmiSleepEvent ;
+
+ if (ar->arWlanState == WLAN_ENABLED) {
+ wmiSleepEvent.sleepState = WMI_REPORT_SLEEP_STATUS_IS_AWAKE;
+ } else {
+ wmiSleepEvent.sleepState = WMI_REPORT_SLEEP_STATUS_IS_DEEP_SLEEP;
+ }
+ rq->ifr_ifru.ifru_ivalue = ar->arWlanState; /* return value */
+
+ ar6000_send_event_to_app(ar, WMI_REPORT_SLEEP_STATE_EVENTID, (A_UINT8*)&wmiSleepEvent,
+ sizeof(WMI_REPORT_SLEEP_STATE_EVENTID));
+ break;
+ }
+#ifdef CONFIG_PM
+ case AR6000_XIOCTL_SET_BT_HW_POWER_STATE:
+ {
+ unsigned int state;
+ if (get_user(state, (unsigned int *)userdata)) {
+ ret = -EFAULT;
+ break;
+ }
+ if (ar6000_set_bt_hw_state(ar, state)!=A_OK) {
+ ret = -EIO;
+ }
+ }
+ break;
+ case AR6000_XIOCTL_GET_BT_HW_POWER_STATE:
+ rq->ifr_ifru.ifru_ivalue = !ar->arBTOff; /* return value */
+ break;
+#endif
+
+ case AR6000_XIOCTL_WMI_SET_TX_SGI_PARAM:
+ {
+ WMI_SET_TX_SGI_PARAM_CMD SGICmd;
+
+ if (ar->arWmiReady == FALSE) {
+ ret = -EIO;
+ } else if (copy_from_user(&SGICmd, userdata,
+ sizeof(SGICmd))){
+ ret = -EFAULT;
+ } else{
+ if (wmi_SGI_cmd(ar->arWmi, SGICmd.sgiMask, SGICmd.sgiPERThreshold) != A_OK) {
+ ret = -EIO;
+ }
+
+ }
+ break;
+ }
+
+ case AR6000_XIOCTL_ADD_AP_INTERFACE:
+#ifdef CONFIG_AP_VIRTUAL_ADAPTER_SUPPORT
+ {
+ char ap_ifname[IFNAMSIZ] = {0,};
+ if (copy_from_user(ap_ifname, userdata, IFNAMSIZ)) {
+ ret = -EFAULT;
+ } else {
+ if (ar6000_add_ap_interface(ar, ap_ifname) != A_OK) {
+ ret = -EIO;
+ }
+ }
+ }
+#else
+ ret = -EOPNOTSUPP;
+#endif
+ break;
+ case AR6000_XIOCTL_REMOVE_AP_INTERFACE:
+#ifdef CONFIG_AP_VIRTUAL_ADAPTER_SUPPORT
+ if (ar6000_remove_ap_interface(ar) != A_OK) {
+ ret = -EIO;
+ }
+#else
+ ret = -EOPNOTSUPP;
+#endif
+ break;
+
+ default:
+ ret = -EOPNOTSUPP;
+ }
+
+ioctl_done:
+ rtnl_lock(); /* restore rtnl state */
+ dev_put(dev);
+
+ return ret;
+}
+
+A_UINT8 mac_cmp_wild(A_UINT8 *mac, A_UINT8 *new_mac, A_UINT8 wild, A_UINT8 new_wild)
+{
+ A_UINT8 i;
+
+ for(i=0;i<ATH_MAC_LEN;i++) {
+ if((wild & 1<<i) && (new_wild & 1<<i)) continue;
+ if(mac[i] != new_mac[i]) return 1;
+ }
+ if((A_MEMCMP(new_mac, null_mac, 6)==0) && new_wild &&
+ (wild != new_wild)) {
+ return 1;
+ }
+
+ return 0;
+}
+
+A_UINT8 acl_add_del_mac(WMI_AP_ACL *a, WMI_AP_ACL_MAC_CMD *acl)
+{
+ A_INT8 already_avail=-1, free_slot=-1, i;
+
+ /* To check whether this mac is already there in our list */
+ for(i=AP_ACL_SIZE-1;i>=0;i--)
+ {
+ if(mac_cmp_wild(a->acl_mac[i], acl->mac, a->wildcard[i],
+ acl->wildcard)==0)
+ already_avail = i;
+
+ if(!((1 << i) & a->index))
+ free_slot = i;
+ }
+
+ if(acl->action == ADD_MAC_ADDR)
+ {
+ /* Dont add mac if it is already available */
+ if((already_avail >= 0) || (free_slot == -1))
+ return 0;
+
+ A_MEMCPY(a->acl_mac[free_slot], acl->mac, ATH_MAC_LEN);
+ a->index = a->index | (1 << free_slot);
+ acl->index = free_slot;
+ a->wildcard[free_slot] = acl->wildcard;
+ return 1;
+ }
+ else if(acl->action == DEL_MAC_ADDR)
+ {
+ if(acl->index > AP_ACL_SIZE)
+ return 0;
+
+ if(!(a->index & (1 << acl->index)))
+ return 0;
+
+ A_MEMZERO(a->acl_mac[acl->index],ATH_MAC_LEN);
+ a->index = a->index & ~(1 << acl->index);
+ a->wildcard[acl->index] = 0;
+ return 1;
+ }
+
+ return 0;
+}
diff --git a/drivers/staging/ath6kl/os/linux/netbuf.c b/drivers/staging/ath6kl/os/linux/netbuf.c
new file mode 100644
index 000000000000..15e5d0475202
--- /dev/null
+++ b/drivers/staging/ath6kl/os/linux/netbuf.c
@@ -0,0 +1,234 @@
+//------------------------------------------------------------------------------
+// Copyright (c) 2004-2010 Atheros Communications Inc.
+// All rights reserved.
+//
+//
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+//
+//
+//
+// Author(s): ="Atheros"
+//------------------------------------------------------------------------------
+#include <a_config.h>
+#include "athdefs.h"
+#include "a_types.h"
+#include "a_osapi.h"
+#include "htc_packet.h"
+
+#define AR6000_DATA_OFFSET 64
+
+void a_netbuf_enqueue(A_NETBUF_QUEUE_T *q, void *pkt)
+{
+ skb_queue_tail((struct sk_buff_head *) q, (struct sk_buff *) pkt);
+}
+
+void a_netbuf_prequeue(A_NETBUF_QUEUE_T *q, void *pkt)
+{
+ skb_queue_head((struct sk_buff_head *) q, (struct sk_buff *) pkt);
+}
+
+void *a_netbuf_dequeue(A_NETBUF_QUEUE_T *q)
+{
+ return((void *) skb_dequeue((struct sk_buff_head *) q));
+}
+
+int a_netbuf_queue_size(A_NETBUF_QUEUE_T *q)
+{
+ return(skb_queue_len((struct sk_buff_head *) q));
+}
+
+int a_netbuf_queue_empty(A_NETBUF_QUEUE_T *q)
+{
+ return(skb_queue_empty((struct sk_buff_head *) q));
+}
+
+void a_netbuf_queue_init(A_NETBUF_QUEUE_T *q)
+{
+ skb_queue_head_init((struct sk_buff_head *) q);
+}
+
+void *
+a_netbuf_alloc(int size)
+{
+ struct sk_buff *skb;
+ size += 2 * (A_GET_CACHE_LINE_BYTES()); /* add some cacheline space at front and back of buffer */
+ skb = dev_alloc_skb(AR6000_DATA_OFFSET + sizeof(HTC_PACKET) + size);
+ skb_reserve(skb, AR6000_DATA_OFFSET + sizeof(HTC_PACKET) + A_GET_CACHE_LINE_BYTES());
+ return ((void *)skb);
+}
+
+/*
+ * Allocate an SKB w.o. any encapsulation requirement.
+ */
+void *
+a_netbuf_alloc_raw(int size)
+{
+ struct sk_buff *skb;
+
+ skb = dev_alloc_skb(size);
+
+ return ((void *)skb);
+}
+
+void
+a_netbuf_free(void *bufPtr)
+{
+ struct sk_buff *skb = (struct sk_buff *)bufPtr;
+
+ dev_kfree_skb(skb);
+}
+
+A_UINT32
+a_netbuf_to_len(void *bufPtr)
+{
+ return (((struct sk_buff *)bufPtr)->len);
+}
+
+void *
+a_netbuf_to_data(void *bufPtr)
+{
+ return (((struct sk_buff *)bufPtr)->data);
+}
+
+/*
+ * Add len # of bytes to the beginning of the network buffer
+ * pointed to by bufPtr
+ */
+A_STATUS
+a_netbuf_push(void *bufPtr, A_INT32 len)
+{
+ skb_push((struct sk_buff *)bufPtr, len);
+
+ return A_OK;
+}
+
+/*
+ * Add len # of bytes to the beginning of the network buffer
+ * pointed to by bufPtr and also fill with data
+ */
+A_STATUS
+a_netbuf_push_data(void *bufPtr, char *srcPtr, A_INT32 len)
+{
+ skb_push((struct sk_buff *) bufPtr, len);
+ A_MEMCPY(((struct sk_buff *)bufPtr)->data, srcPtr, len);
+
+ return A_OK;
+}
+
+/*
+ * Add len # of bytes to the end of the network buffer
+ * pointed to by bufPtr
+ */
+A_STATUS
+a_netbuf_put(void *bufPtr, A_INT32 len)
+{
+ skb_put((struct sk_buff *)bufPtr, len);
+
+ return A_OK;
+}
+
+/*
+ * Add len # of bytes to the end of the network buffer
+ * pointed to by bufPtr and also fill with data
+ */
+A_STATUS
+a_netbuf_put_data(void *bufPtr, char *srcPtr, A_INT32 len)
+{
+ char *start = (char*)(((struct sk_buff *)bufPtr)->data +
+ ((struct sk_buff *)bufPtr)->len);
+ skb_put((struct sk_buff *)bufPtr, len);
+ A_MEMCPY(start, srcPtr, len);
+
+ return A_OK;
+}
+
+
+/*
+ * Trim the network buffer pointed to by bufPtr to len # of bytes
+ */
+A_STATUS
+a_netbuf_setlen(void *bufPtr, A_INT32 len)
+{
+ skb_trim((struct sk_buff *)bufPtr, len);
+
+ return A_OK;
+}
+
+/*
+ * Chop of len # of bytes from the end of the buffer.
+ */
+A_STATUS
+a_netbuf_trim(void *bufPtr, A_INT32 len)
+{
+ skb_trim((struct sk_buff *)bufPtr, ((struct sk_buff *)bufPtr)->len - len);
+
+ return A_OK;
+}
+
+/*
+ * Chop of len # of bytes from the end of the buffer and return the data.
+ */
+A_STATUS
+a_netbuf_trim_data(void *bufPtr, char *dstPtr, A_INT32 len)
+{
+ char *start = (char*)(((struct sk_buff *)bufPtr)->data +
+ (((struct sk_buff *)bufPtr)->len - len));
+
+ A_MEMCPY(dstPtr, start, len);
+ skb_trim((struct sk_buff *)bufPtr, ((struct sk_buff *)bufPtr)->len - len);
+
+ return A_OK;
+}
+
+
+/*
+ * Returns the number of bytes available to a a_netbuf_push()
+ */
+A_INT32
+a_netbuf_headroom(void *bufPtr)
+{
+ return (skb_headroom((struct sk_buff *)bufPtr));
+}
+
+/*
+ * Removes specified number of bytes from the beginning of the buffer
+ */
+A_STATUS
+a_netbuf_pull(void *bufPtr, A_INT32 len)
+{
+ skb_pull((struct sk_buff *)bufPtr, len);
+
+ return A_OK;
+}
+
+/*
+ * Removes specified number of bytes from the beginning of the buffer
+ * and return the data
+ */
+A_STATUS
+a_netbuf_pull_data(void *bufPtr, char *dstPtr, A_INT32 len)
+{
+ A_MEMCPY(dstPtr, ((struct sk_buff *)bufPtr)->data, len);
+ skb_pull((struct sk_buff *)bufPtr, len);
+
+ return A_OK;
+}
+
+#ifdef EXPORT_HCI_BRIDGE_INTERFACE
+EXPORT_SYMBOL(a_netbuf_to_data);
+EXPORT_SYMBOL(a_netbuf_put);
+EXPORT_SYMBOL(a_netbuf_pull);
+EXPORT_SYMBOL(a_netbuf_alloc);
+EXPORT_SYMBOL(a_netbuf_free);
+#endif
diff --git a/drivers/staging/ath6kl/os/linux/wireless_ext.c b/drivers/staging/ath6kl/os/linux/wireless_ext.c
new file mode 100644
index 000000000000..bb6de0f404fe
--- /dev/null
+++ b/drivers/staging/ath6kl/os/linux/wireless_ext.c
@@ -0,0 +1,2725 @@
+//------------------------------------------------------------------------------
+// Copyright (c) 2004-2010 Atheros Communications Inc.
+// All rights reserved.
+//
+//
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+//
+//
+//
+// Author(s): ="Atheros"
+//------------------------------------------------------------------------------
+
+#include "ar6000_drv.h"
+
+#define IWE_STREAM_ADD_EVENT(p1, p2, p3, p4, p5) \
+ iwe_stream_add_event((p1), (p2), (p3), (p4), (p5))
+
+#define IWE_STREAM_ADD_POINT(p1, p2, p3, p4, p5) \
+ iwe_stream_add_point((p1), (p2), (p3), (p4), (p5))
+
+#define IWE_STREAM_ADD_VALUE(p1, p2, p3, p4, p5, p6) \
+ iwe_stream_add_value((p1), (p2), (p3), (p4), (p5), (p6))
+
+static void ar6000_set_quality(struct iw_quality *iq, A_INT8 rssi);
+extern unsigned int wmitimeout;
+extern A_WAITQUEUE_HEAD arEvent;
+
+#if WIRELESS_EXT > 14
+/*
+ * Encode a WPA or RSN information element as a custom
+ * element using the hostap format.
+ */
+static u_int
+encode_ie(void *buf, size_t bufsize,
+ const u_int8_t *ie, size_t ielen,
+ const char *leader, size_t leader_len)
+{
+ u_int8_t *p;
+ int i;
+
+ if (bufsize < leader_len)
+ return 0;
+ p = buf;
+ memcpy(p, leader, leader_len);
+ bufsize -= leader_len;
+ p += leader_len;
+ for (i = 0; i < ielen && bufsize > 2; i++)
+ {
+ p += sprintf((char*)p, "%02x", ie[i]);
+ bufsize -= 2;
+ }
+ return (i == ielen ? p - (u_int8_t *)buf : 0);
+}
+#endif /* WIRELESS_EXT > 14 */
+
+static A_UINT8
+get_bss_phy_capability(bss_t *bss)
+{
+ A_UINT8 capability = 0;
+ struct ieee80211_common_ie *cie = &bss->ni_cie;
+#define CHAN_IS_11A(x) (!((x >= 2412) && (x <= 2484)))
+ if (CHAN_IS_11A(cie->ie_chan)) {
+ if (cie->ie_htcap) {
+ capability = WMI_11NA_CAPABILITY;
+ } else {
+ capability = WMI_11A_CAPABILITY;
+ }
+ } else if ((cie->ie_erp) || (cie->ie_xrates)) {
+ if (cie->ie_htcap) {
+ capability = WMI_11NG_CAPABILITY;
+ } else {
+ capability = WMI_11G_CAPABILITY;
+ }
+ }
+ return capability;
+}
+
+void
+ar6000_scan_node(void *arg, bss_t *ni)
+{
+ struct iw_event iwe;
+#if WIRELESS_EXT > 14
+ char buf[256];
+#endif
+ struct ar_giwscan_param *param;
+ A_CHAR *current_ev;
+ A_CHAR *end_buf;
+ struct ieee80211_common_ie *cie;
+ A_CHAR *current_val;
+ A_INT32 j;
+ A_UINT32 rate_len, data_len = 0;
+
+ param = (struct ar_giwscan_param *)arg;
+
+ current_ev = param->current_ev;
+ end_buf = param->end_buf;
+
+ cie = &ni->ni_cie;
+
+ if ((end_buf - current_ev) > IW_EV_ADDR_LEN)
+ {
+ A_MEMZERO(&iwe, sizeof(iwe));
+ iwe.cmd = SIOCGIWAP;
+ iwe.u.ap_addr.sa_family = ARPHRD_ETHER;
+ A_MEMCPY(iwe.u.ap_addr.sa_data, ni->ni_macaddr, 6);
+ current_ev = IWE_STREAM_ADD_EVENT(param->info, current_ev, end_buf,
+ &iwe, IW_EV_ADDR_LEN);
+ }
+ param->bytes_needed += IW_EV_ADDR_LEN;
+
+ data_len = cie->ie_ssid[1] + IW_EV_POINT_LEN;
+ if ((end_buf - current_ev) > data_len)
+ {
+ A_MEMZERO(&iwe, sizeof(iwe));
+ iwe.cmd = SIOCGIWESSID;
+ iwe.u.data.flags = 1;
+ iwe.u.data.length = cie->ie_ssid[1];
+ current_ev = IWE_STREAM_ADD_POINT(param->info, current_ev, end_buf,
+ &iwe, (char*)&cie->ie_ssid[2]);
+ }
+ param->bytes_needed += data_len;
+
+ if (cie->ie_capInfo & (IEEE80211_CAPINFO_ESS|IEEE80211_CAPINFO_IBSS)) {
+ if ((end_buf - current_ev) > IW_EV_UINT_LEN)
+ {
+ A_MEMZERO(&iwe, sizeof(iwe));
+ iwe.cmd = SIOCGIWMODE;
+ iwe.u.mode = cie->ie_capInfo & IEEE80211_CAPINFO_ESS ?
+ IW_MODE_MASTER : IW_MODE_ADHOC;
+ current_ev = IWE_STREAM_ADD_EVENT(param->info, current_ev, end_buf,
+ &iwe, IW_EV_UINT_LEN);
+ }
+ param->bytes_needed += IW_EV_UINT_LEN;
+ }
+
+ if ((end_buf - current_ev) > IW_EV_FREQ_LEN)
+ {
+ A_MEMZERO(&iwe, sizeof(iwe));
+ iwe.cmd = SIOCGIWFREQ;
+ iwe.u.freq.m = cie->ie_chan * 100000;
+ iwe.u.freq.e = 1;
+ current_ev = IWE_STREAM_ADD_EVENT(param->info, current_ev, end_buf,
+ &iwe, IW_EV_FREQ_LEN);
+ }
+ param->bytes_needed += IW_EV_FREQ_LEN;
+
+ if ((end_buf - current_ev) > IW_EV_QUAL_LEN)
+ {
+ A_MEMZERO(&iwe, sizeof(iwe));
+ iwe.cmd = IWEVQUAL;
+ ar6000_set_quality(&iwe.u.qual, ni->ni_snr);
+ current_ev = IWE_STREAM_ADD_EVENT(param->info, current_ev, end_buf,
+ &iwe, IW_EV_QUAL_LEN);
+ }
+ param->bytes_needed += IW_EV_QUAL_LEN;
+
+ if ((end_buf - current_ev) > IW_EV_POINT_LEN)
+ {
+ A_MEMZERO(&iwe, sizeof(iwe));
+ iwe.cmd = SIOCGIWENCODE;
+ if (cie->ie_capInfo & IEEE80211_CAPINFO_PRIVACY) {
+ iwe.u.data.flags = IW_ENCODE_ENABLED | IW_ENCODE_NOKEY;
+ } else {
+ iwe.u.data.flags = IW_ENCODE_DISABLED;
+ }
+ iwe.u.data.length = 0;
+ current_ev = IWE_STREAM_ADD_POINT(param->info, current_ev, end_buf,
+ &iwe, "");
+ }
+ param->bytes_needed += IW_EV_POINT_LEN;
+
+ /* supported bit rate */
+ A_MEMZERO(&iwe, sizeof(iwe));
+ iwe.cmd = SIOCGIWRATE;
+ iwe.u.bitrate.fixed = 0;
+ iwe.u.bitrate.disabled = 0;
+ iwe.u.bitrate.value = 0;
+ current_val = current_ev + IW_EV_LCP_LEN;
+ param->bytes_needed += IW_EV_LCP_LEN;
+
+ if (cie->ie_rates != NULL) {
+ rate_len = cie->ie_rates[1];
+ data_len = (rate_len * (IW_EV_PARAM_LEN - IW_EV_LCP_LEN));
+ if ((end_buf - current_ev) > data_len)
+ {
+ for (j = 0; j < rate_len; j++) {
+ unsigned char val;
+ val = cie->ie_rates[2 + j];
+ iwe.u.bitrate.value =
+ (val >= 0x80)? ((val - 0x80) * 500000): (val * 500000);
+ current_val = IWE_STREAM_ADD_VALUE(param->info, current_ev,
+ current_val, end_buf,
+ &iwe, IW_EV_PARAM_LEN);
+ }
+ }
+ param->bytes_needed += data_len;
+ }
+
+ if (cie->ie_xrates != NULL) {
+ rate_len = cie->ie_xrates[1];
+ data_len = (rate_len * (IW_EV_PARAM_LEN - IW_EV_LCP_LEN));
+ if ((end_buf - current_ev) > data_len)
+ {
+ for (j = 0; j < rate_len; j++) {
+ unsigned char val;
+ val = cie->ie_xrates[2 + j];
+ iwe.u.bitrate.value =
+ (val >= 0x80)? ((val - 0x80) * 500000): (val * 500000);
+ current_val = IWE_STREAM_ADD_VALUE(param->info, current_ev,
+ current_val, end_buf,
+ &iwe, IW_EV_PARAM_LEN);
+ }
+ }
+ param->bytes_needed += data_len;
+ }
+ /* remove fixed header if no rates were added */
+ if ((current_val - current_ev) > IW_EV_LCP_LEN)
+ current_ev = current_val;
+
+#if WIRELESS_EXT >= 18
+ /* IE */
+ if (cie->ie_wpa != NULL) {
+ data_len = cie->ie_wpa[1] + 2 + IW_EV_POINT_LEN;
+ if ((end_buf - current_ev) > data_len)
+ {
+ A_MEMZERO(&iwe, sizeof(iwe));
+ iwe.cmd = IWEVGENIE;
+ iwe.u.data.length = cie->ie_wpa[1] + 2;
+ current_ev = IWE_STREAM_ADD_POINT(param->info, current_ev, end_buf,
+ &iwe, (char*)cie->ie_wpa);
+ }
+ param->bytes_needed += data_len;
+ }
+
+ if (cie->ie_rsn != NULL && cie->ie_rsn[0] == IEEE80211_ELEMID_RSN) {
+ data_len = cie->ie_rsn[1] + 2 + IW_EV_POINT_LEN;
+ if ((end_buf - current_ev) > data_len)
+ {
+ A_MEMZERO(&iwe, sizeof(iwe));
+ iwe.cmd = IWEVGENIE;
+ iwe.u.data.length = cie->ie_rsn[1] + 2;
+ current_ev = IWE_STREAM_ADD_POINT(param->info, current_ev, end_buf,
+ &iwe, (char*)cie->ie_rsn);
+ }
+ param->bytes_needed += data_len;
+ }
+
+#endif /* WIRELESS_EXT >= 18 */
+
+ if ((end_buf - current_ev) > IW_EV_CHAR_LEN)
+ {
+ /* protocol */
+ A_MEMZERO(&iwe, sizeof(iwe));
+ iwe.cmd = SIOCGIWNAME;
+ switch (get_bss_phy_capability(ni)) {
+ case WMI_11A_CAPABILITY:
+ snprintf(iwe.u.name, IFNAMSIZ, "IEEE 802.11a");
+ break;
+ case WMI_11G_CAPABILITY:
+ snprintf(iwe.u.name, IFNAMSIZ, "IEEE 802.11g");
+ break;
+ case WMI_11NA_CAPABILITY:
+ snprintf(iwe.u.name, IFNAMSIZ, "IEEE 802.11na");
+ break;
+ case WMI_11NG_CAPABILITY:
+ snprintf(iwe.u.name, IFNAMSIZ, "IEEE 802.11ng");
+ break;
+ default:
+ snprintf(iwe.u.name, IFNAMSIZ, "IEEE 802.11b");
+ break;
+ }
+ current_ev = IWE_STREAM_ADD_EVENT(param->info, current_ev, end_buf,
+ &iwe, IW_EV_CHAR_LEN);
+ }
+ param->bytes_needed += IW_EV_CHAR_LEN;
+
+#if WIRELESS_EXT > 14
+ A_MEMZERO(&iwe, sizeof(iwe));
+ iwe.cmd = IWEVCUSTOM;
+ iwe.u.data.length = snprintf(buf, sizeof(buf), "bcn_int=%d", cie->ie_beaconInt);
+ data_len = iwe.u.data.length + IW_EV_POINT_LEN;
+ if ((end_buf - current_ev) > data_len)
+ {
+ current_ev = IWE_STREAM_ADD_POINT(param->info, current_ev, end_buf,
+ &iwe, buf);
+ }
+ param->bytes_needed += data_len;
+
+#if WIRELESS_EXT < 18
+ if (cie->ie_wpa != NULL) {
+ static const char wpa_leader[] = "wpa_ie=";
+ data_len = (sizeof(wpa_leader) - 1) + ((cie->ie_wpa[1]+2) * 2) + IW_EV_POINT_LEN;
+ if ((end_buf - current_ev) > data_len)
+ {
+ A_MEMZERO(&iwe, sizeof(iwe));
+ iwe.cmd = IWEVCUSTOM;
+ iwe.u.data.length = encode_ie(buf, sizeof(buf), cie->ie_wpa,
+ cie->ie_wpa[1]+2,
+ wpa_leader, sizeof(wpa_leader)-1);
+
+ if (iwe.u.data.length != 0) {
+ current_ev = IWE_STREAM_ADD_POINT(param->info, current_ev,
+ end_buf, &iwe, buf);
+ }
+ }
+ param->bytes_needed += data_len;
+ }
+
+ if (cie->ie_rsn != NULL && cie->ie_rsn[0] == IEEE80211_ELEMID_RSN) {
+ static const char rsn_leader[] = "rsn_ie=";
+ data_len = (sizeof(rsn_leader) - 1) + ((cie->ie_rsn[1]+2) * 2) + IW_EV_POINT_LEN;
+ if ((end_buf - current_ev) > data_len)
+ {
+ A_MEMZERO(&iwe, sizeof(iwe));
+ iwe.cmd = IWEVCUSTOM;
+ iwe.u.data.length = encode_ie(buf, sizeof(buf), cie->ie_rsn,
+ cie->ie_rsn[1]+2,
+ rsn_leader, sizeof(rsn_leader)-1);
+
+ if (iwe.u.data.length != 0) {
+ current_ev = IWE_STREAM_ADD_POINT(param->info, current_ev,
+ end_buf, &iwe, buf);
+ }
+ }
+ param->bytes_needed += data_len;
+ }
+#endif /* WIRELESS_EXT < 18 */
+
+ if (cie->ie_wmm != NULL) {
+ static const char wmm_leader[] = "wmm_ie=";
+ data_len = (sizeof(wmm_leader) - 1) + ((cie->ie_wmm[1]+2) * 2) + IW_EV_POINT_LEN;
+ if ((end_buf - current_ev) > data_len)
+ {
+ A_MEMZERO(&iwe, sizeof(iwe));
+ iwe.cmd = IWEVCUSTOM;
+ iwe.u.data.length = encode_ie(buf, sizeof(buf), cie->ie_wmm,
+ cie->ie_wmm[1]+2,
+ wmm_leader, sizeof(wmm_leader)-1);
+ if (iwe.u.data.length != 0) {
+ current_ev = IWE_STREAM_ADD_POINT(param->info, current_ev,
+ end_buf, &iwe, buf);
+ }
+ }
+ param->bytes_needed += data_len;
+ }
+
+ if (cie->ie_ath != NULL) {
+ static const char ath_leader[] = "ath_ie=";
+ data_len = (sizeof(ath_leader) - 1) + ((cie->ie_ath[1]+2) * 2) + IW_EV_POINT_LEN;
+ if ((end_buf - current_ev) > data_len)
+ {
+ A_MEMZERO(&iwe, sizeof(iwe));
+ iwe.cmd = IWEVCUSTOM;
+ iwe.u.data.length = encode_ie(buf, sizeof(buf), cie->ie_ath,
+ cie->ie_ath[1]+2,
+ ath_leader, sizeof(ath_leader)-1);
+ if (iwe.u.data.length != 0) {
+ current_ev = IWE_STREAM_ADD_POINT(param->info, current_ev,
+ end_buf, &iwe, buf);
+ }
+ }
+ param->bytes_needed += data_len;
+ }
+
+#ifdef WAPI_ENABLE
+ if (cie->ie_wapi != NULL) {
+ static const char wapi_leader[] = "wapi_ie=";
+ data_len = (sizeof(wapi_leader) - 1) + ((cie->ie_wapi[1] + 2) * 2) + IW_EV_POINT_LEN;
+ if ((end_buf - current_ev) > data_len) {
+ A_MEMZERO(&iwe, sizeof(iwe));
+ iwe.cmd = IWEVCUSTOM;
+ iwe.u.data.length = encode_ie(buf, sizeof(buf), cie->ie_wapi,
+ cie->ie_wapi[1] + 2,
+ wapi_leader, sizeof(wapi_leader) - 1);
+ if (iwe.u.data.length != 0) {
+ current_ev = IWE_STREAM_ADD_POINT(param->info, current_ev,
+ end_buf, &iwe, buf);
+ }
+ }
+ param->bytes_needed += data_len;
+ }
+#endif /* WAPI_ENABLE */
+
+#endif /* WIRELESS_EXT > 14 */
+
+#if WIRELESS_EXT >= 18
+ if (cie->ie_wsc != NULL) {
+ data_len = (cie->ie_wsc[1] + 2) + IW_EV_POINT_LEN;
+ if ((end_buf - current_ev) > data_len)
+ {
+ A_MEMZERO(&iwe, sizeof(iwe));
+ iwe.cmd = IWEVGENIE;
+ iwe.u.data.length = cie->ie_wsc[1] + 2;
+ current_ev = IWE_STREAM_ADD_POINT(param->info, current_ev, end_buf,
+ &iwe, (char*)cie->ie_wsc);
+ }
+ param->bytes_needed += data_len;
+ }
+#endif /* WIRELESS_EXT >= 18 */
+
+ param->current_ev = current_ev;
+}
+
+int
+ar6000_ioctl_giwscan(struct net_device *dev,
+ struct iw_request_info *info,
+ struct iw_point *data, char *extra)
+{
+ AR_SOFTC_T *ar = (AR_SOFTC_T *)ar6k_priv(dev);
+ struct ar_giwscan_param param;
+
+ if (is_iwioctl_allowed(ar->arNextMode, info->cmd) != A_OK) {
+ A_PRINTF("wext_ioctl: cmd=0x%x not allowed in this mode\n", info->cmd);
+ return -EOPNOTSUPP;
+ }
+
+ if (ar->arWlanState == WLAN_DISABLED) {
+ return -EIO;
+ }
+
+ if (ar->arWmiReady == FALSE) {
+ return -EIO;
+ }
+
+ param.current_ev = extra;
+ param.end_buf = extra + data->length;
+ param.bytes_needed = 0;
+ param.info = info;
+
+ /* Translate data to WE format */
+ wmi_iterate_nodes(ar->arWmi, ar6000_scan_node, &param);
+
+ /* check if bytes needed is greater than bytes consumed */
+ if (param.bytes_needed > (param.current_ev - extra))
+ {
+ /* Request one byte more than needed, because when "data->length" equals bytes_needed,
+ it is not possible to add the last event data as all iwe_stream_add_xxxxx() functions
+ checks whether (cur_ptr + ev_len) < end_ptr, due to this one more retry would happen*/
+ data->length = param.bytes_needed + 1;
+
+ return -E2BIG;
+ }
+
+ return 0;
+}
+
+extern int reconnect_flag;
+/* SIOCSIWESSID */
+static int
+ar6000_ioctl_siwessid(struct net_device *dev,
+ struct iw_request_info *info,
+ struct iw_point *data, char *ssid)
+{
+ AR_SOFTC_T *ar = (AR_SOFTC_T *)ar6k_priv(dev);
+ A_STATUS status;
+ A_UINT8 arNetworkType;
+ A_UINT8 prevMode = ar->arNetworkType;
+
+ if (is_iwioctl_allowed(ar->arNextMode, info->cmd) != A_OK) {
+ A_PRINTF("wext_ioctl: cmd=0x%x not allowed in this mode\n", info->cmd);
+ return -EOPNOTSUPP;
+ }
+
+ if (ar->bIsDestroyProgress) {
+ return -EBUSY;
+ }
+
+ if (ar->arWlanState == WLAN_DISABLED) {
+ return -EIO;
+ }
+
+ if (ar->arWmiReady == FALSE) {
+ return -EIO;
+ }
+
+#if defined(WIRELESS_EXT)
+ if (WIRELESS_EXT >= 20) {
+ data->length += 1;
+ }
+#endif
+
+ /*
+ * iwconfig passes a null terminated string with length including this
+ * so we need to account for this
+ */
+ if (data->flags && (!data->length || (data->length == 1) ||
+ ((data->length - 1) > sizeof(ar->arSsid))))
+ {
+ /*
+ * ssid is invalid
+ */
+ return -EINVAL;
+ }
+
+ if (ar->arNextMode == AP_NETWORK) {
+ /* SSID change for AP network - Will take effect on commit */
+ if(A_MEMCMP(ar->arSsid,ssid,32) != 0) {
+ ar->arSsidLen = data->length - 1;
+ A_MEMCPY(ar->arSsid, ssid, ar->arSsidLen);
+ ar->ap_profile_flag = 1; /* There is a change in profile */
+ }
+ return 0;
+ } else if(ar->arNetworkType == AP_NETWORK) {
+ A_UINT8 ctr;
+ struct sk_buff *skb;
+
+ /* We are switching from AP to STA | IBSS mode, cleanup the AP state */
+ for (ctr=0; ctr < AP_MAX_NUM_STA; ctr++) {
+ remove_sta(ar, ar->sta_list[ctr].mac, 0);
+ }
+ A_MUTEX_LOCK(&ar->mcastpsqLock);
+ while (!A_NETBUF_QUEUE_EMPTY(&ar->mcastpsq)) {
+ skb = A_NETBUF_DEQUEUE(&ar->mcastpsq);
+ A_NETBUF_FREE(skb);
+ }
+ A_MUTEX_UNLOCK(&ar->mcastpsqLock);
+ }
+
+ /* Added for bug 25178, return an IOCTL error instead of target returning
+ Illegal parameter error when either the BSSID or channel is missing
+ and we cannot scan during connect.
+ */
+ if (data->flags) {
+ if (ar->arSkipScan == TRUE &&
+ (ar->arChannelHint == 0 ||
+ (!ar->arReqBssid[0] && !ar->arReqBssid[1] && !ar->arReqBssid[2] &&
+ !ar->arReqBssid[3] && !ar->arReqBssid[4] && !ar->arReqBssid[5])))
+ {
+ return -EINVAL;
+ }
+ }
+
+ if (down_interruptible(&ar->arSem)) {
+ return -ERESTARTSYS;
+ }
+
+ if (ar->bIsDestroyProgress || ar->arWlanState == WLAN_DISABLED) {
+ up(&ar->arSem);
+ return -EBUSY;
+ }
+
+ if (ar->arTxPending[wmi_get_control_ep(ar->arWmi)]) {
+ /*
+ * sleep until the command queue drains
+ */
+ wait_event_interruptible_timeout(arEvent,
+ ar->arTxPending[wmi_get_control_ep(ar->arWmi)] == 0, wmitimeout * HZ);
+ if (signal_pending(current)) {
+ return -EINTR;
+ }
+ }
+
+ if (!data->flags) {
+ arNetworkType = ar->arNetworkType;
+#ifdef ATH6K_CONFIG_CFG80211
+ if (ar->arConnected) {
+#endif /* ATH6K_CONFIG_CFG80211 */
+ ar6000_init_profile_info(ar);
+#ifdef ATH6K_CONFIG_CFG80211
+ }
+#endif /* ATH6K_CONFIG_CFG80211 */
+ ar->arNetworkType = arNetworkType;
+ }
+
+ /* Update the arNetworkType */
+ ar->arNetworkType = ar->arNextMode;
+
+
+ if ((prevMode != AP_NETWORK) &&
+ ((ar->arSsidLen) || ((ar->arSsidLen == 0) && ar->arConnected) || (!data->flags)))
+ {
+ if ((!data->flags) ||
+ (A_MEMCMP(ar->arSsid, ssid, ar->arSsidLen) != 0) ||
+ (ar->arSsidLen != (data->length - 1)))
+ {
+ /*
+ * SSID set previously or essid off has been issued.
+ *
+ * Disconnect Command is issued in two cases after wmi is ready
+ * (1) ssid is different from the previous setting
+ * (2) essid off has been issued
+ *
+ */
+ if (ar->arWmiReady == TRUE) {
+ reconnect_flag = 0;
+ status = wmi_setPmkid_cmd(ar->arWmi, ar->arBssid, NULL, 0);
+ status = wmi_disconnect_cmd(ar->arWmi);
+ A_MEMZERO(ar->arSsid, sizeof(ar->arSsid));
+ ar->arSsidLen = 0;
+ if (ar->arSkipScan == FALSE) {
+ A_MEMZERO(ar->arReqBssid, sizeof(ar->arReqBssid));
+ }
+ if (!data->flags) {
+ up(&ar->arSem);
+ return 0;
+ }
+ } else {
+ up(&ar->arSem);
+ }
+ }
+ else
+ {
+ /*
+ * SSID is same, so we assume profile hasn't changed.
+ * If the interface is up and wmi is ready, we issue
+ * a reconnect cmd. Issue a reconnect only we are already
+ * connected.
+ */
+ if((ar->arConnected == TRUE) && (ar->arWmiReady == TRUE))
+ {
+ reconnect_flag = TRUE;
+ status = wmi_reconnect_cmd(ar->arWmi,ar->arReqBssid,
+ ar->arChannelHint);
+ up(&ar->arSem);
+ if (status != A_OK) {
+ return -EIO;
+ }
+ return 0;
+ }
+ else{
+ /*
+ * Dont return if connect is pending.
+ */
+ if(!(ar->arConnectPending)) {
+ up(&ar->arSem);
+ return 0;
+ }
+ }
+ }
+ }
+
+ ar->arSsidLen = data->length - 1;
+ A_MEMCPY(ar->arSsid, ssid, ar->arSsidLen);
+
+ if (ar6000_connect_to_ap(ar)!= A_OK) {
+ up(&ar->arSem);
+ return -EIO;
+ }else{
+ up(&ar->arSem);
+ }
+ return 0;
+}
+
+/* SIOCGIWESSID */
+static int
+ar6000_ioctl_giwessid(struct net_device *dev,
+ struct iw_request_info *info,
+ struct iw_point *data, char *essid)
+{
+ AR_SOFTC_T *ar = (AR_SOFTC_T *)ar6k_priv(dev);
+
+ if (is_iwioctl_allowed(ar->arNextMode, info->cmd) != A_OK) {
+ A_PRINTF("wext_ioctl: cmd=0x%x not allowed in this mode\n", info->cmd);
+ return -EOPNOTSUPP;
+ }
+
+ if (ar->arWlanState == WLAN_DISABLED) {
+ return -EIO;
+ }
+
+ if (!ar->arSsidLen) {
+ return -EINVAL;
+ }
+
+ data->flags = 1;
+ data->length = ar->arSsidLen;
+ A_MEMCPY(essid, ar->arSsid, ar->arSsidLen);
+
+ return 0;
+}
+
+
+void ar6000_install_static_wep_keys(AR_SOFTC_T *ar)
+{
+ A_UINT8 index;
+ A_UINT8 keyUsage;
+
+ for (index = WMI_MIN_KEY_INDEX; index <= WMI_MAX_KEY_INDEX; index++) {
+ if (ar->arWepKeyList[index].arKeyLen) {
+ keyUsage = GROUP_USAGE;
+ if (index == ar->arDefTxKeyIndex) {
+ keyUsage |= TX_USAGE;
+ }
+ wmi_addKey_cmd(ar->arWmi,
+ index,
+ WEP_CRYPT,
+ keyUsage,
+ ar->arWepKeyList[index].arKeyLen,
+ NULL,
+ ar->arWepKeyList[index].arKey, KEY_OP_INIT_VAL, NULL,
+ NO_SYNC_WMIFLAG);
+ }
+ }
+}
+
+/*
+ * SIOCSIWRATE
+ */
+int
+ar6000_ioctl_siwrate(struct net_device *dev,
+ struct iw_request_info *info,
+ struct iw_param *rrq, char *extra)
+{
+ AR_SOFTC_T *ar = (AR_SOFTC_T *)ar6k_priv(dev);
+ A_UINT32 kbps;
+ A_INT8 rate_idx;
+
+ if (is_iwioctl_allowed(ar->arNextMode, info->cmd) != A_OK) {
+ A_PRINTF("wext_ioctl: cmd=0x%x not allowed in this mode\n", info->cmd);
+ return -EOPNOTSUPP;
+ }
+
+ if (rrq->fixed) {
+ kbps = rrq->value / 1000; /* rrq->value is in bps */
+ } else {
+ kbps = -1; /* -1 indicates auto rate */
+ }
+ if(kbps != -1 && wmi_validate_bitrate(ar->arWmi, kbps, &rate_idx) != A_OK)
+ {
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("BitRate is not Valid %d\n", kbps));
+ return -EINVAL;
+ }
+ ar->arBitRate = kbps;
+ if(ar->arWmiReady == TRUE)
+ {
+ if (wmi_set_bitrate_cmd(ar->arWmi, kbps, -1, -1) != A_OK) {
+ return -EINVAL;
+ }
+ }
+ return 0;
+}
+
+/*
+ * SIOCGIWRATE
+ */
+int
+ar6000_ioctl_giwrate(struct net_device *dev,
+ struct iw_request_info *info,
+ struct iw_param *rrq, char *extra)
+{
+ AR_SOFTC_T *ar = (AR_SOFTC_T *)ar6k_priv(dev);
+ int ret = 0;
+
+ if (is_iwioctl_allowed(ar->arNextMode, info->cmd) != A_OK) {
+ A_PRINTF("wext_ioctl: cmd=0x%x not allowed in this mode\n", info->cmd);
+ return -EOPNOTSUPP;
+ }
+
+ if (ar->bIsDestroyProgress) {
+ return -EBUSY;
+ }
+
+ if (ar->arWlanState == WLAN_DISABLED) {
+ return -EIO;
+ }
+
+ if ((ar->arNextMode != AP_NETWORK && !ar->arConnected) || ar->arWmiReady == FALSE) {
+ rrq->value = 1000 * 1000;
+ return 0;
+ }
+
+ if (down_interruptible(&ar->arSem)) {
+ return -ERESTARTSYS;
+ }
+
+ if (ar->bIsDestroyProgress || ar->arWlanState == WLAN_DISABLED) {
+ up(&ar->arSem);
+ return -EBUSY;
+ }
+
+ ar->arBitRate = 0xFFFF;
+ if (wmi_get_bitrate_cmd(ar->arWmi) != A_OK) {
+ up(&ar->arSem);
+ return -EIO;
+ }
+ wait_event_interruptible_timeout(arEvent, ar->arBitRate != 0xFFFF, wmitimeout * HZ);
+ if (signal_pending(current)) {
+ ret = -EINTR;
+ }
+ /* If the interface is down or wmi is not ready or the target is not
+ connected - return the value stored in the device structure */
+ if (!ret) {
+ if (ar->arBitRate == -1) {
+ rrq->fixed = TRUE;
+ rrq->value = 0;
+ } else {
+ rrq->value = ar->arBitRate * 1000;
+ }
+ }
+
+ up(&ar->arSem);
+
+ return ret;
+}
+
+/*
+ * SIOCSIWTXPOW
+ */
+static int
+ar6000_ioctl_siwtxpow(struct net_device *dev,
+ struct iw_request_info *info,
+ struct iw_param *rrq, char *extra)
+{
+ AR_SOFTC_T *ar = (AR_SOFTC_T *)ar6k_priv(dev);
+ A_UINT8 dbM;
+
+ if (is_iwioctl_allowed(ar->arNextMode, info->cmd) != A_OK) {
+ A_PRINTF("wext_ioctl: cmd=0x%x not allowed in this mode\n", info->cmd);
+ return -EOPNOTSUPP;
+ }
+
+ if (ar->arWlanState == WLAN_DISABLED) {
+ return -EIO;
+ }
+
+ if (rrq->disabled) {
+ return -EOPNOTSUPP;
+ }
+
+ if (rrq->fixed) {
+ if (rrq->flags != IW_TXPOW_DBM) {
+ return -EOPNOTSUPP;
+ }
+ ar->arTxPwr= dbM = rrq->value;
+ ar->arTxPwrSet = TRUE;
+ } else {
+ ar->arTxPwr = dbM = 0;
+ ar->arTxPwrSet = FALSE;
+ }
+ if(ar->arWmiReady == TRUE)
+ {
+ AR_DEBUG_PRINTF(ATH_DEBUG_WLAN_TX,("Set tx pwr cmd %d dbM\n", dbM));
+ wmi_set_txPwr_cmd(ar->arWmi, dbM);
+ }
+ return 0;
+}
+
+/*
+ * SIOCGIWTXPOW
+ */
+int
+ar6000_ioctl_giwtxpow(struct net_device *dev,
+ struct iw_request_info *info,
+ struct iw_param *rrq, char *extra)
+{
+ AR_SOFTC_T *ar = (AR_SOFTC_T *)ar6k_priv(dev);
+ int ret = 0;
+
+ if (is_iwioctl_allowed(ar->arNextMode, info->cmd) != A_OK) {
+ A_PRINTF("wext_ioctl: cmd=0x%x not allowed in this mode\n", info->cmd);
+ return -EOPNOTSUPP;
+ }
+
+ if (ar->bIsDestroyProgress) {
+ return -EBUSY;
+ }
+
+ if (ar->arWlanState == WLAN_DISABLED) {
+ return -EIO;
+ }
+
+ if (down_interruptible(&ar->arSem)) {
+ return -ERESTARTSYS;
+ }
+
+ if (ar->bIsDestroyProgress) {
+ up(&ar->arSem);
+ return -EBUSY;
+ }
+
+ if((ar->arWmiReady == TRUE) && (ar->arConnected == TRUE))
+ {
+ ar->arTxPwr = 0;
+
+ if (wmi_get_txPwr_cmd(ar->arWmi) != A_OK) {
+ up(&ar->arSem);
+ return -EIO;
+ }
+
+ wait_event_interruptible_timeout(arEvent, ar->arTxPwr != 0, wmitimeout * HZ);
+
+ if (signal_pending(current)) {
+ ret = -EINTR;
+ }
+ }
+ /* If the interace is down or wmi is not ready or target is not connected
+ then return value stored in the device structure */
+
+ if (!ret) {
+ if (ar->arTxPwrSet == TRUE) {
+ rrq->fixed = TRUE;
+ }
+ rrq->value = ar->arTxPwr;
+ rrq->flags = IW_TXPOW_DBM;
+ //
+ // IWLIST need this flag to get TxPower
+ //
+ rrq->disabled = 0;
+ }
+
+ up(&ar->arSem);
+
+ return ret;
+}
+
+/*
+ * SIOCSIWRETRY
+ * since iwconfig only provides us with one max retry value, we use it
+ * to apply to data frames of the BE traffic class.
+ */
+static int
+ar6000_ioctl_siwretry(struct net_device *dev,
+ struct iw_request_info *info,
+ struct iw_param *rrq, char *extra)
+{
+ AR_SOFTC_T *ar = (AR_SOFTC_T *)ar6k_priv(dev);
+
+ if (is_iwioctl_allowed(ar->arNextMode, info->cmd) != A_OK) {
+ A_PRINTF("wext_ioctl: cmd=0x%x not allowed in this mode\n", info->cmd);
+ return -EOPNOTSUPP;
+ }
+
+ if (ar->arWlanState == WLAN_DISABLED) {
+ return -EIO;
+ }
+
+ if (rrq->disabled) {
+ return -EOPNOTSUPP;
+ }
+
+ if ((rrq->flags & IW_RETRY_TYPE) != IW_RETRY_LIMIT) {
+ return -EOPNOTSUPP;
+ }
+
+ if ( !(rrq->value >= WMI_MIN_RETRIES) || !(rrq->value <= WMI_MAX_RETRIES)) {
+ return - EINVAL;
+ }
+ if(ar->arWmiReady == TRUE)
+ {
+ if (wmi_set_retry_limits_cmd(ar->arWmi, DATA_FRAMETYPE, WMM_AC_BE,
+ rrq->value, 0) != A_OK){
+ return -EINVAL;
+ }
+ }
+ ar->arMaxRetries = rrq->value;
+ return 0;
+}
+
+/*
+ * SIOCGIWRETRY
+ */
+static int
+ar6000_ioctl_giwretry(struct net_device *dev,
+ struct iw_request_info *info,
+ struct iw_param *rrq, char *extra)
+{
+ AR_SOFTC_T *ar = (AR_SOFTC_T *)ar6k_priv(dev);
+
+ if (is_iwioctl_allowed(ar->arNextMode, info->cmd) != A_OK) {
+ A_PRINTF("wext_ioctl: cmd=0x%x not allowed in this mode\n", info->cmd);
+ return -EOPNOTSUPP;
+ }
+
+ if (ar->arWlanState == WLAN_DISABLED) {
+ return -EIO;
+ }
+
+ rrq->disabled = 0;
+ switch (rrq->flags & IW_RETRY_TYPE) {
+ case IW_RETRY_LIFETIME:
+ return -EOPNOTSUPP;
+ break;
+ case IW_RETRY_LIMIT:
+ rrq->flags = IW_RETRY_LIMIT;
+ switch (rrq->flags & IW_RETRY_MODIFIER) {
+ case IW_RETRY_MIN:
+ rrq->flags |= IW_RETRY_MIN;
+ rrq->value = WMI_MIN_RETRIES;
+ break;
+ case IW_RETRY_MAX:
+ rrq->flags |= IW_RETRY_MAX;
+ rrq->value = ar->arMaxRetries;
+ break;
+ }
+ break;
+ }
+ return 0;
+}
+
+/*
+ * SIOCSIWENCODE
+ */
+static int
+ar6000_ioctl_siwencode(struct net_device *dev,
+ struct iw_request_info *info,
+ struct iw_point *erq, char *keybuf)
+{
+ AR_SOFTC_T *ar = (AR_SOFTC_T *)ar6k_priv(dev);
+ int index;
+ A_INT32 auth = 0;
+
+ if (is_iwioctl_allowed(ar->arNextMode, info->cmd) != A_OK) {
+ A_PRINTF("wext_ioctl: cmd=0x%x not allowed in this mode\n", info->cmd);
+ return -EOPNOTSUPP;
+ }
+
+ if(ar->arNextMode != AP_NETWORK) {
+ /*
+ * Static WEP Keys should be configured before setting the SSID
+ */
+ if (ar->arSsid[0] && erq->length) {
+ return -EIO;
+ }
+ }
+
+ if (ar->arWlanState == WLAN_DISABLED) {
+ return -EIO;
+ }
+
+ index = erq->flags & IW_ENCODE_INDEX;
+
+ if (index && (((index - 1) < WMI_MIN_KEY_INDEX) ||
+ ((index - 1) > WMI_MAX_KEY_INDEX)))
+ {
+ return -EIO;
+ }
+
+ if (erq->flags & IW_ENCODE_DISABLED) {
+ /*
+ * Encryption disabled
+ */
+ if (index) {
+ /*
+ * If key index was specified then clear the specified key
+ */
+ index--;
+ A_MEMZERO(ar->arWepKeyList[index].arKey,
+ sizeof(ar->arWepKeyList[index].arKey));
+ ar->arWepKeyList[index].arKeyLen = 0;
+ }
+ ar->arDot11AuthMode = OPEN_AUTH;
+ ar->arPairwiseCrypto = NONE_CRYPT;
+ ar->arGroupCrypto = NONE_CRYPT;
+ ar->arAuthMode = NONE_AUTH;
+ } else {
+ /*
+ * Enabling WEP encryption
+ */
+ if (index) {
+ index--; /* keyindex is off base 1 in iwconfig */
+ }
+
+ if (erq->flags & IW_ENCODE_OPEN) {
+ auth |= OPEN_AUTH;
+ ar->arDefTxKeyIndex = index;
+ }
+ if (erq->flags & IW_ENCODE_RESTRICTED) {
+ auth |= SHARED_AUTH;
+ }
+
+ if (!auth) {
+ auth = OPEN_AUTH;
+ }
+
+ if (erq->length) {
+ if (!IEEE80211_IS_VALID_WEP_CIPHER_LEN(erq->length)) {
+ return -EIO;
+ }
+
+ A_MEMZERO(ar->arWepKeyList[index].arKey,
+ sizeof(ar->arWepKeyList[index].arKey));
+ A_MEMCPY(ar->arWepKeyList[index].arKey, keybuf, erq->length);
+ ar->arWepKeyList[index].arKeyLen = erq->length;
+ ar->arDot11AuthMode = auth;
+ } else {
+ if (ar->arWepKeyList[index].arKeyLen == 0) {
+ return -EIO;
+ }
+ ar->arDefTxKeyIndex = index;
+
+ if(ar->arSsidLen && ar->arWepKeyList[index].arKeyLen) {
+ wmi_addKey_cmd(ar->arWmi,
+ index,
+ WEP_CRYPT,
+ GROUP_USAGE | TX_USAGE,
+ ar->arWepKeyList[index].arKeyLen,
+ NULL,
+ ar->arWepKeyList[index].arKey, KEY_OP_INIT_VAL, NULL,
+ NO_SYNC_WMIFLAG);
+ }
+ }
+
+ ar->arPairwiseCrypto = WEP_CRYPT;
+ ar->arGroupCrypto = WEP_CRYPT;
+ ar->arAuthMode = NONE_AUTH;
+ }
+
+ if(ar->arNextMode != AP_NETWORK) {
+ /*
+ * profile has changed. Erase ssid to signal change
+ */
+ A_MEMZERO(ar->arSsid, sizeof(ar->arSsid));
+ ar->arSsidLen = 0;
+ }
+ ar->ap_profile_flag = 1; /* There is a change in profile */
+ return 0;
+}
+
+static int
+ar6000_ioctl_giwencode(struct net_device *dev,
+ struct iw_request_info *info,
+ struct iw_point *erq, char *key)
+{
+ AR_SOFTC_T *ar = (AR_SOFTC_T *)ar6k_priv(dev);
+ A_UINT8 keyIndex;
+ struct ar_wep_key *wk;
+
+ if (is_iwioctl_allowed(ar->arNextMode, info->cmd) != A_OK) {
+ A_PRINTF("wext_ioctl: cmd=0x%x not allowed in this mode\n", info->cmd);
+ return -EOPNOTSUPP;
+ }
+
+ if (ar->arWlanState == WLAN_DISABLED) {
+ return -EIO;
+ }
+
+ if (ar->arPairwiseCrypto == NONE_CRYPT) {
+ erq->length = 0;
+ erq->flags = IW_ENCODE_DISABLED;
+ } else {
+ if (ar->arPairwiseCrypto == WEP_CRYPT) {
+ /* get the keyIndex */
+ keyIndex = erq->flags & IW_ENCODE_INDEX;
+ if (0 == keyIndex) {
+ keyIndex = ar->arDefTxKeyIndex;
+ } else if ((keyIndex - 1 < WMI_MIN_KEY_INDEX) ||
+ (keyIndex - 1 > WMI_MAX_KEY_INDEX))
+ {
+ keyIndex = WMI_MIN_KEY_INDEX;
+ } else {
+ keyIndex--;
+ }
+ erq->flags = keyIndex + 1;
+ erq->flags &= ~IW_ENCODE_DISABLED;
+ wk = &ar->arWepKeyList[keyIndex];
+ if (erq->length > wk->arKeyLen) {
+ erq->length = wk->arKeyLen;
+ }
+ if (wk->arKeyLen) {
+ A_MEMCPY(key, wk->arKey, erq->length);
+ }
+ } else {
+ erq->flags &= ~IW_ENCODE_DISABLED;
+ if (ar->user_saved_keys.keyOk) {
+ erq->length = ar->user_saved_keys.ucast_ik.ik_keylen;
+ if (erq->length) {
+ A_MEMCPY(key, ar->user_saved_keys.ucast_ik.ik_keydata, erq->length);
+ }
+ } else {
+ erq->length = 1; // not really printing any key but let iwconfig know enc is on
+ }
+ }
+
+ if (ar->arDot11AuthMode & OPEN_AUTH) {
+ erq->flags |= IW_ENCODE_OPEN;
+ }
+ if (ar->arDot11AuthMode & SHARED_AUTH) {
+ erq->flags |= IW_ENCODE_RESTRICTED;
+ }
+ }
+
+ return 0;
+}
+
+#if WIRELESS_EXT >= 18
+/*
+ * SIOCSIWGENIE
+ */
+static int
+ar6000_ioctl_siwgenie(struct net_device *dev,
+ struct iw_request_info *info,
+ struct iw_point *erq, char *extra)
+{
+ AR_SOFTC_T *ar = (AR_SOFTC_T *)ar6k_priv(dev);
+
+#ifdef WAPI_ENABLE
+ A_UINT8 *ie = erq->pointer;
+ A_UINT8 ie_type = ie[0];
+ A_UINT16 ie_length = erq->length;
+ A_UINT8 wapi_ie[128];
+#endif
+
+ if (ar->arWmiReady == FALSE) {
+ return -EIO;
+ }
+#ifdef WAPI_ENABLE
+ if (ie_type == IEEE80211_ELEMID_WAPI) {
+ if (ie_length > 0) {
+ if (copy_from_user(wapi_ie, ie, ie_length)) {
+ return -EIO;
+ }
+ }
+ wmi_set_appie_cmd(ar->arWmi, WMI_FRAME_ASSOC_REQ, ie_length, wapi_ie);
+ } else if (ie_length == 0) {
+ wmi_set_appie_cmd(ar->arWmi, WMI_FRAME_ASSOC_REQ, ie_length, wapi_ie);
+ }
+#endif
+ return 0;
+}
+
+
+/*
+ * SIOCGIWGENIE
+ */
+static int
+ar6000_ioctl_giwgenie(struct net_device *dev,
+ struct iw_request_info *info,
+ struct iw_point *erq, char *extra)
+{
+ AR_SOFTC_T *ar = (AR_SOFTC_T *)ar6k_priv(dev);
+
+ if (ar->arWmiReady == FALSE) {
+ return -EIO;
+ }
+ erq->length = 0;
+ erq->flags = 0;
+
+ return 0;
+}
+
+/*
+ * SIOCSIWAUTH
+ */
+static int
+ar6000_ioctl_siwauth(struct net_device *dev,
+ struct iw_request_info *info,
+ struct iw_param *data, char *extra)
+{
+ AR_SOFTC_T *ar = (AR_SOFTC_T *)ar6k_priv(dev);
+
+ A_BOOL profChanged;
+ A_UINT16 param;
+ A_INT32 ret;
+ A_INT32 value;
+
+ if (ar->arWmiReady == FALSE) {
+ return -EIO;
+ }
+
+ if (ar->arWlanState == WLAN_DISABLED) {
+ return -EIO;
+ }
+
+ param = data->flags & IW_AUTH_INDEX;
+ value = data->value;
+ profChanged = TRUE;
+ ret = 0;
+
+ switch (param) {
+ case IW_AUTH_WPA_VERSION:
+ if (value & IW_AUTH_WPA_VERSION_DISABLED) {
+ ar->arAuthMode = NONE_AUTH;
+ } else if (value & IW_AUTH_WPA_VERSION_WPA) {
+ ar->arAuthMode = WPA_AUTH;
+ } else if (value & IW_AUTH_WPA_VERSION_WPA2) {
+ ar->arAuthMode = WPA2_AUTH;
+ } else {
+ ret = -1;
+ profChanged = FALSE;
+ }
+ break;
+ case IW_AUTH_CIPHER_PAIRWISE:
+ if (value & IW_AUTH_CIPHER_NONE) {
+ ar->arPairwiseCrypto = NONE_CRYPT;
+ ar->arPairwiseCryptoLen = 0;
+ } else if (value & IW_AUTH_CIPHER_WEP40) {
+ ar->arPairwiseCrypto = WEP_CRYPT;
+ ar->arPairwiseCryptoLen = 5;
+ } else if (value & IW_AUTH_CIPHER_TKIP) {
+ ar->arPairwiseCrypto = TKIP_CRYPT;
+ ar->arPairwiseCryptoLen = 0;
+ } else if (value & IW_AUTH_CIPHER_CCMP) {
+ ar->arPairwiseCrypto = AES_CRYPT;
+ ar->arPairwiseCryptoLen = 0;
+ } else if (value & IW_AUTH_CIPHER_WEP104) {
+ ar->arPairwiseCrypto = WEP_CRYPT;
+ ar->arPairwiseCryptoLen = 13;
+ } else {
+ ret = -1;
+ profChanged = FALSE;
+ }
+ break;
+ case IW_AUTH_CIPHER_GROUP:
+ if (value & IW_AUTH_CIPHER_NONE) {
+ ar->arGroupCrypto = NONE_CRYPT;
+ ar->arGroupCryptoLen = 0;
+ } else if (value & IW_AUTH_CIPHER_WEP40) {
+ ar->arGroupCrypto = WEP_CRYPT;
+ ar->arGroupCryptoLen = 5;
+ } else if (value & IW_AUTH_CIPHER_TKIP) {
+ ar->arGroupCrypto = TKIP_CRYPT;
+ ar->arGroupCryptoLen = 0;
+ } else if (value & IW_AUTH_CIPHER_CCMP) {
+ ar->arGroupCrypto = AES_CRYPT;
+ ar->arGroupCryptoLen = 0;
+ } else if (value & IW_AUTH_CIPHER_WEP104) {
+ ar->arGroupCrypto = WEP_CRYPT;
+ ar->arGroupCryptoLen = 13;
+ } else {
+ ret = -1;
+ profChanged = FALSE;
+ }
+ break;
+ case IW_AUTH_KEY_MGMT:
+ if (value & IW_AUTH_KEY_MGMT_PSK) {
+ if (WPA_AUTH == ar->arAuthMode) {
+ ar->arAuthMode = WPA_PSK_AUTH;
+ } else if (WPA2_AUTH == ar->arAuthMode) {
+ ar->arAuthMode = WPA2_PSK_AUTH;
+ } else {
+ ret = -1;
+ }
+ } else if (!(value & IW_AUTH_KEY_MGMT_802_1X)) {
+ ar->arAuthMode = NONE_AUTH;
+ }
+ break;
+ case IW_AUTH_TKIP_COUNTERMEASURES:
+ wmi_set_tkip_countermeasures_cmd(ar->arWmi, value);
+ profChanged = FALSE;
+ break;
+ case IW_AUTH_DROP_UNENCRYPTED:
+ profChanged = FALSE;
+ break;
+ case IW_AUTH_80211_AUTH_ALG:
+ ar->arDot11AuthMode = 0;
+ if (value & IW_AUTH_ALG_OPEN_SYSTEM) {
+ ar->arDot11AuthMode |= OPEN_AUTH;
+ }
+ if (value & IW_AUTH_ALG_SHARED_KEY) {
+ ar->arDot11AuthMode |= SHARED_AUTH;
+ }
+ if (value & IW_AUTH_ALG_LEAP) {
+ ar->arDot11AuthMode = LEAP_AUTH;
+ }
+ if(ar->arDot11AuthMode == 0) {
+ ret = -1;
+ profChanged = FALSE;
+ }
+ break;
+ case IW_AUTH_WPA_ENABLED:
+ if (!value) {
+ ar->arAuthMode = NONE_AUTH;
+ /* when the supplicant is stopped, it calls this
+ * handler with value=0. The followings need to be
+ * reset if the STA were to connect again
+ * without security
+ */
+ ar->arDot11AuthMode = OPEN_AUTH;
+ ar->arPairwiseCrypto = NONE_CRYPT;
+ ar->arPairwiseCryptoLen = 0;
+ ar->arGroupCrypto = NONE_CRYPT;
+ ar->arGroupCryptoLen = 0;
+ }
+ break;
+ case IW_AUTH_RX_UNENCRYPTED_EAPOL:
+ profChanged = FALSE;
+ break;
+ case IW_AUTH_ROAMING_CONTROL:
+ profChanged = FALSE;
+ break;
+ case IW_AUTH_PRIVACY_INVOKED:
+ if (!value) {
+ ar->arPairwiseCrypto = NONE_CRYPT;
+ ar->arPairwiseCryptoLen = 0;
+ ar->arGroupCrypto = NONE_CRYPT;
+ ar->arGroupCryptoLen = 0;
+ }
+ break;
+#ifdef WAPI_ENABLE
+ case IW_AUTH_WAPI_ENABLED:
+ ar->arWapiEnable = value;
+ break;
+#endif
+ default:
+ ret = -1;
+ profChanged = FALSE;
+ break;
+ }
+
+ if (profChanged == TRUE) {
+ /*
+ * profile has changed. Erase ssid to signal change
+ */
+ A_MEMZERO(ar->arSsid, sizeof(ar->arSsid));
+ ar->arSsidLen = 0;
+ }
+
+ return ret;
+}
+
+
+/*
+ * SIOCGIWAUTH
+ */
+static int
+ar6000_ioctl_giwauth(struct net_device *dev,
+ struct iw_request_info *info,
+ struct iw_param *data, char *extra)
+{
+ AR_SOFTC_T *ar = (AR_SOFTC_T *)ar6k_priv(dev);
+ A_UINT16 param;
+ A_INT32 ret;
+
+ if (ar->arWmiReady == FALSE) {
+ return -EIO;
+ }
+
+ if (ar->arWlanState == WLAN_DISABLED) {
+ return -EIO;
+ }
+
+ param = data->flags & IW_AUTH_INDEX;
+ ret = 0;
+ data->value = 0;
+
+
+ switch (param) {
+ case IW_AUTH_WPA_VERSION:
+ if (ar->arAuthMode == NONE_AUTH) {
+ data->value |= IW_AUTH_WPA_VERSION_DISABLED;
+ } else if (ar->arAuthMode == WPA_AUTH) {
+ data->value |= IW_AUTH_WPA_VERSION_WPA;
+ } else if (ar->arAuthMode == WPA2_AUTH) {
+ data->value |= IW_AUTH_WPA_VERSION_WPA2;
+ } else {
+ ret = -1;
+ }
+ break;
+ case IW_AUTH_CIPHER_PAIRWISE:
+ if (ar->arPairwiseCrypto == NONE_CRYPT) {
+ data->value |= IW_AUTH_CIPHER_NONE;
+ } else if (ar->arPairwiseCrypto == WEP_CRYPT) {
+ if (ar->arPairwiseCryptoLen == 13) {
+ data->value |= IW_AUTH_CIPHER_WEP104;
+ } else {
+ data->value |= IW_AUTH_CIPHER_WEP40;
+ }
+ } else if (ar->arPairwiseCrypto == TKIP_CRYPT) {
+ data->value |= IW_AUTH_CIPHER_TKIP;
+ } else if (ar->arPairwiseCrypto == AES_CRYPT) {
+ data->value |= IW_AUTH_CIPHER_CCMP;
+ } else {
+ ret = -1;
+ }
+ break;
+ case IW_AUTH_CIPHER_GROUP:
+ if (ar->arGroupCrypto == NONE_CRYPT) {
+ data->value |= IW_AUTH_CIPHER_NONE;
+ } else if (ar->arGroupCrypto == WEP_CRYPT) {
+ if (ar->arGroupCryptoLen == 13) {
+ data->value |= IW_AUTH_CIPHER_WEP104;
+ } else {
+ data->value |= IW_AUTH_CIPHER_WEP40;
+ }
+ } else if (ar->arGroupCrypto == TKIP_CRYPT) {
+ data->value |= IW_AUTH_CIPHER_TKIP;
+ } else if (ar->arGroupCrypto == AES_CRYPT) {
+ data->value |= IW_AUTH_CIPHER_CCMP;
+ } else {
+ ret = -1;
+ }
+ break;
+ case IW_AUTH_KEY_MGMT:
+ if ((ar->arAuthMode == WPA_PSK_AUTH) ||
+ (ar->arAuthMode == WPA2_PSK_AUTH)) {
+ data->value |= IW_AUTH_KEY_MGMT_PSK;
+ } else if ((ar->arAuthMode == WPA_AUTH) ||
+ (ar->arAuthMode == WPA2_AUTH)) {
+ data->value |= IW_AUTH_KEY_MGMT_802_1X;
+ }
+ break;
+ case IW_AUTH_TKIP_COUNTERMEASURES:
+ // TODO. Save countermeassure enable/disable
+ data->value = 0;
+ break;
+ case IW_AUTH_DROP_UNENCRYPTED:
+ break;
+ case IW_AUTH_80211_AUTH_ALG:
+ if (ar->arDot11AuthMode == OPEN_AUTH) {
+ data->value |= IW_AUTH_ALG_OPEN_SYSTEM;
+ } else if (ar->arDot11AuthMode == SHARED_AUTH) {
+ data->value |= IW_AUTH_ALG_SHARED_KEY;
+ } else if (ar->arDot11AuthMode == LEAP_AUTH) {
+ data->value |= IW_AUTH_ALG_LEAP;
+ } else {
+ ret = -1;
+ }
+ break;
+ case IW_AUTH_WPA_ENABLED:
+ if (ar->arAuthMode == NONE_AUTH) {
+ data->value = 0;
+ } else {
+ data->value = 1;
+ }
+ break;
+ case IW_AUTH_RX_UNENCRYPTED_EAPOL:
+ break;
+ case IW_AUTH_ROAMING_CONTROL:
+ break;
+ case IW_AUTH_PRIVACY_INVOKED:
+ if (ar->arPairwiseCrypto == NONE_CRYPT) {
+ data->value = 0;
+ } else {
+ data->value = 1;
+ }
+ break;
+#ifdef WAPI_ENABLE
+ case IW_AUTH_WAPI_ENABLED:
+ data->value = ar->arWapiEnable;
+ break;
+#endif
+ default:
+ ret = -1;
+ break;
+ }
+
+ return 0;
+}
+
+/*
+ * SIOCSIWPMKSA
+ */
+static int
+ar6000_ioctl_siwpmksa(struct net_device *dev,
+ struct iw_request_info *info,
+ struct iw_point *data, char *extra)
+{
+ AR_SOFTC_T *ar = (AR_SOFTC_T *)ar6k_priv(dev);
+ A_INT32 ret;
+ A_STATUS status;
+ struct iw_pmksa *pmksa;
+
+ pmksa = (struct iw_pmksa *)extra;
+
+ if (ar->arWmiReady == FALSE) {
+ return -EIO;
+ }
+
+ ret = 0;
+ status = A_OK;
+
+ switch (pmksa->cmd) {
+ case IW_PMKSA_ADD:
+ status = wmi_setPmkid_cmd(ar->arWmi, (A_UINT8*)pmksa->bssid.sa_data, pmksa->pmkid, TRUE);
+ break;
+ case IW_PMKSA_REMOVE:
+ status = wmi_setPmkid_cmd(ar->arWmi, (A_UINT8*)pmksa->bssid.sa_data, pmksa->pmkid, FALSE);
+ break;
+ case IW_PMKSA_FLUSH:
+ if (ar->arConnected == TRUE) {
+ status = wmi_setPmkid_cmd(ar->arWmi, ar->arBssid, NULL, 0);
+ }
+ break;
+ default:
+ ret=-1;
+ break;
+ }
+ if (status != A_OK) {
+ ret = -1;
+ }
+
+ return ret;
+}
+
+#ifdef WAPI_ENABLE
+
+#define PN_INIT 0x5c365c36
+
+static int ar6000_set_wapi_key(struct net_device *dev,
+ struct iw_request_info *info,
+ struct iw_point *erq, char *extra)
+{
+ AR_SOFTC_T *ar = (AR_SOFTC_T *)ar6k_priv(dev);
+ struct iw_encode_ext *ext = (struct iw_encode_ext *)extra;
+ KEY_USAGE keyUsage = 0;
+ A_INT32 keyLen;
+ A_UINT8 *keyData;
+ A_INT32 index;
+ A_UINT32 *PN;
+ A_INT32 i;
+ A_STATUS status;
+ A_UINT8 wapiKeyRsc[16];
+ CRYPTO_TYPE keyType = WAPI_CRYPT;
+ const A_UINT8 broadcastMac[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
+
+ index = erq->flags & IW_ENCODE_INDEX;
+ if (index && (((index - 1) < WMI_MIN_KEY_INDEX) ||
+ ((index - 1) > WMI_MAX_KEY_INDEX))) {
+ return -EIO;
+ }
+
+ index--;
+ if (index < 0 || index > 4) {
+ return -EIO;
+ }
+ keyData = (A_UINT8 *)(ext + 1);
+ keyLen = erq->length - sizeof(struct iw_encode_ext);
+ A_MEMCPY(wapiKeyRsc, ext->tx_seq, sizeof(wapiKeyRsc));
+
+ if (A_MEMCMP(ext->addr.sa_data, broadcastMac, sizeof(broadcastMac)) == 0) {
+ keyUsage |= GROUP_USAGE;
+ PN = (A_UINT32 *)wapiKeyRsc;
+ for (i = 0; i < 4; i++) {
+ PN[i] = PN_INIT;
+ }
+ } else {
+ keyUsage |= PAIRWISE_USAGE;
+ }
+ status = wmi_addKey_cmd(ar->arWmi,
+ index,
+ keyType,
+ keyUsage,
+ keyLen,
+ wapiKeyRsc,
+ keyData,
+ KEY_OP_INIT_WAPIPN,
+ NULL,
+ SYNC_BEFORE_WMIFLAG);
+ if (A_OK != status) {
+ return -EIO;
+ }
+ return 0;
+}
+
+#endif
+
+/*
+ * SIOCSIWENCODEEXT
+ */
+static int
+ar6000_ioctl_siwencodeext(struct net_device *dev,
+ struct iw_request_info *info,
+ struct iw_point *erq, char *extra)
+{
+ AR_SOFTC_T *ar = (AR_SOFTC_T *)ar6k_priv(dev);
+ A_INT32 index;
+ struct iw_encode_ext *ext;
+ KEY_USAGE keyUsage;
+ A_INT32 keyLen;
+ A_UINT8 *keyData;
+ A_UINT8 keyRsc[8];
+ A_STATUS status;
+ CRYPTO_TYPE keyType;
+#ifdef USER_KEYS
+ struct ieee80211req_key ik;
+#endif /* USER_KEYS */
+
+ if (ar->arWlanState == WLAN_DISABLED) {
+ return -EIO;
+ }
+
+#ifdef USER_KEYS
+ ar->user_saved_keys.keyOk = FALSE;
+#endif /* USER_KEYS */
+
+ index = erq->flags & IW_ENCODE_INDEX;
+
+ if (index && (((index - 1) < WMI_MIN_KEY_INDEX) ||
+ ((index - 1) > WMI_MAX_KEY_INDEX)))
+ {
+ return -EIO;
+ }
+
+ ext = (struct iw_encode_ext *)extra;
+ if (erq->flags & IW_ENCODE_DISABLED) {
+ /*
+ * Encryption disabled
+ */
+ if (index) {
+ /*
+ * If key index was specified then clear the specified key
+ */
+ index--;
+ A_MEMZERO(ar->arWepKeyList[index].arKey,
+ sizeof(ar->arWepKeyList[index].arKey));
+ ar->arWepKeyList[index].arKeyLen = 0;
+ }
+ } else {
+ /*
+ * Enabling WEP encryption
+ */
+ if (index) {
+ index--; /* keyindex is off base 1 in iwconfig */
+ }
+
+ keyUsage = 0;
+ keyLen = erq->length - sizeof(struct iw_encode_ext);
+
+ if (ext->ext_flags & IW_ENCODE_EXT_SET_TX_KEY) {
+ keyUsage = TX_USAGE;
+ ar->arDefTxKeyIndex = index;
+ // Just setting the key index
+ if (keyLen == 0) {
+ return 0;
+ }
+ }
+
+ if (keyLen <= 0) {
+ return -EIO;
+ }
+
+ /* key follows iw_encode_ext */
+ keyData = (A_UINT8 *)(ext + 1);
+
+ switch (ext->alg) {
+ case IW_ENCODE_ALG_WEP:
+ keyType = WEP_CRYPT;
+#ifdef USER_KEYS
+ ik.ik_type = IEEE80211_CIPHER_WEP;
+#endif /* USER_KEYS */
+ if (!IEEE80211_IS_VALID_WEP_CIPHER_LEN(keyLen)) {
+ return -EIO;
+ }
+
+ /* Check whether it is static wep. */
+ if (!ar->arConnected) {
+ A_MEMZERO(ar->arWepKeyList[index].arKey,
+ sizeof(ar->arWepKeyList[index].arKey));
+ A_MEMCPY(ar->arWepKeyList[index].arKey, keyData, keyLen);
+ ar->arWepKeyList[index].arKeyLen = keyLen;
+
+ return 0;
+ }
+ break;
+ case IW_ENCODE_ALG_TKIP:
+ keyType = TKIP_CRYPT;
+#ifdef USER_KEYS
+ ik.ik_type = IEEE80211_CIPHER_TKIP;
+#endif /* USER_KEYS */
+ break;
+ case IW_ENCODE_ALG_CCMP:
+ keyType = AES_CRYPT;
+#ifdef USER_KEYS
+ ik.ik_type = IEEE80211_CIPHER_AES_CCM;
+#endif /* USER_KEYS */
+ break;
+#ifdef WAPI_ENABLE
+ case IW_ENCODE_ALG_SM4:
+ if (ar->arWapiEnable) {
+ return ar6000_set_wapi_key(dev, info, erq, extra);
+ } else {
+ return -EIO;
+ }
+#endif
+ case IW_ENCODE_ALG_PMK:
+ ar->arConnectCtrlFlags |= CONNECT_DO_WPA_OFFLOAD;
+ return wmi_set_pmk_cmd(ar->arWmi, keyData);
+ default:
+ return -EIO;
+ }
+
+
+ if (ext->ext_flags & IW_ENCODE_EXT_GROUP_KEY) {
+ keyUsage |= GROUP_USAGE;
+ } else {
+ keyUsage |= PAIRWISE_USAGE;
+ }
+
+ if (ext->ext_flags & IW_ENCODE_EXT_RX_SEQ_VALID) {
+ A_MEMCPY(keyRsc, ext->rx_seq, sizeof(keyRsc));
+ } else {
+ A_MEMZERO(keyRsc, sizeof(keyRsc));
+ }
+
+ if (((WPA_PSK_AUTH == ar->arAuthMode) || (WPA2_PSK_AUTH == ar->arAuthMode)) &&
+ (GROUP_USAGE & keyUsage))
+ {
+ A_UNTIMEOUT(&ar->disconnect_timer);
+ }
+
+ status = wmi_addKey_cmd(ar->arWmi, index, keyType, keyUsage,
+ keyLen, keyRsc,
+ keyData, KEY_OP_INIT_VAL,
+ (A_UINT8*)ext->addr.sa_data,
+ SYNC_BOTH_WMIFLAG);
+ if (status != A_OK) {
+ return -EIO;
+ }
+
+#ifdef USER_KEYS
+ ik.ik_keyix = index;
+ ik.ik_keylen = keyLen;
+ memcpy(ik.ik_keydata, keyData, keyLen);
+ memcpy(&ik.ik_keyrsc, keyRsc, sizeof(keyRsc));
+ memcpy(ik.ik_macaddr, ext->addr.sa_data, ETH_ALEN);
+ if (ext->ext_flags & IW_ENCODE_EXT_GROUP_KEY) {
+ memcpy(&ar->user_saved_keys.bcast_ik, &ik,
+ sizeof(struct ieee80211req_key));
+ } else {
+ memcpy(&ar->user_saved_keys.ucast_ik, &ik,
+ sizeof(struct ieee80211req_key));
+ }
+ ar->user_saved_keys.keyOk = TRUE;
+#endif /* USER_KEYS */
+ }
+
+
+ return 0;
+}
+
+/*
+ * SIOCGIWENCODEEXT
+ */
+static int
+ar6000_ioctl_giwencodeext(struct net_device *dev,
+ struct iw_request_info *info,
+ struct iw_point *erq, char *extra)
+{
+ AR_SOFTC_T *ar = (AR_SOFTC_T *)ar6k_priv(dev);
+
+ if (ar->arWlanState == WLAN_DISABLED) {
+ return -EIO;
+ }
+
+ if (ar->arPairwiseCrypto == NONE_CRYPT) {
+ erq->length = 0;
+ erq->flags = IW_ENCODE_DISABLED;
+ } else {
+ erq->length = 0;
+ }
+
+ return 0;
+}
+#endif // WIRELESS_EXT >= 18
+
+#if WIRELESS_EXT > 20
+static int ar6000_ioctl_siwpower(struct net_device *dev,
+ struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra)
+{
+#ifndef ATH6K_CONFIG_OTA_MODE
+ AR_SOFTC_T *ar = (AR_SOFTC_T *)ar6k_priv(dev);
+ WMI_POWER_MODE power_mode;
+
+ if (ar->arWmiReady == FALSE) {
+ return -EIO;
+ }
+
+ if (ar->arWlanState == WLAN_DISABLED) {
+ return -EIO;
+ }
+
+ if (wrqu->power.disabled)
+ power_mode = MAX_PERF_POWER;
+ else
+ power_mode = REC_POWER;
+
+ if (wmi_powermode_cmd(ar->arWmi, power_mode) < 0)
+ return -EIO;
+#endif
+ return 0;
+}
+
+static int ar6000_ioctl_giwpower(struct net_device *dev,
+ struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra)
+{
+ AR_SOFTC_T *ar = (AR_SOFTC_T *)ar6k_priv(dev);
+ WMI_POWER_MODE power_mode;
+
+ if (ar->arWmiReady == FALSE) {
+ return -EIO;
+ }
+
+ if (ar->arWlanState == WLAN_DISABLED) {
+ return -EIO;
+ }
+
+ power_mode = wmi_get_power_mode_cmd(ar->arWmi);
+
+ if (power_mode == MAX_PERF_POWER)
+ wrqu->power.disabled = 1;
+ else
+ wrqu->power.disabled = 0;
+
+ return 0;
+}
+#endif // WIRELESS_EXT > 20
+
+/*
+ * SIOCGIWNAME
+ */
+int
+ar6000_ioctl_giwname(struct net_device *dev,
+ struct iw_request_info *info,
+ char *name, char *extra)
+{
+ A_UINT8 capability;
+ AR_SOFTC_T *ar = (AR_SOFTC_T *)ar6k_priv(dev);
+
+ if (is_iwioctl_allowed(ar->arNextMode, info->cmd) != A_OK) {
+ A_PRINTF("wext_ioctl: cmd=0x%x not allowed in this mode\n", info->cmd);
+ return -EOPNOTSUPP;
+ }
+
+ if (ar->arWlanState == WLAN_DISABLED) {
+ return -EIO;
+ }
+
+ capability = ar->arPhyCapability;
+ if(ar->arNetworkType == INFRA_NETWORK && ar->arConnected) {
+ bss_t *bss = wmi_find_node(ar->arWmi, ar->arBssid);
+ if (bss) {
+ capability = get_bss_phy_capability(bss);
+ wmi_node_return(ar->arWmi, bss);
+ }
+ }
+ switch (capability) {
+ case (WMI_11A_CAPABILITY):
+ strncpy(name, "AR6000 802.11a", IFNAMSIZ);
+ break;
+ case (WMI_11G_CAPABILITY):
+ strncpy(name, "AR6000 802.11g", IFNAMSIZ);
+ break;
+ case (WMI_11AG_CAPABILITY):
+ strncpy(name, "AR6000 802.11ag", IFNAMSIZ);
+ break;
+ case (WMI_11NA_CAPABILITY):
+ strncpy(name, "AR6000 802.11na", IFNAMSIZ);
+ break;
+ case (WMI_11NG_CAPABILITY):
+ strncpy(name, "AR6000 802.11ng", IFNAMSIZ);
+ break;
+ case (WMI_11NAG_CAPABILITY):
+ strncpy(name, "AR6000 802.11nag", IFNAMSIZ);
+ break;
+ default:
+ strncpy(name, "AR6000 802.11b", IFNAMSIZ);
+ break;
+ }
+
+ return 0;
+}
+
+/*
+ * SIOCSIWFREQ
+ */
+int
+ar6000_ioctl_siwfreq(struct net_device *dev,
+ struct iw_request_info *info,
+ struct iw_freq *freq, char *extra)
+{
+ AR_SOFTC_T *ar = (AR_SOFTC_T *)ar6k_priv(dev);
+
+ if (is_iwioctl_allowed(ar->arNextMode, info->cmd) != A_OK) {
+ A_PRINTF("wext_ioctl: cmd=0x%x not allowed in this mode\n", info->cmd);
+ return -EOPNOTSUPP;
+ }
+
+ if (ar->arWlanState == WLAN_DISABLED) {
+ return -EIO;
+ }
+
+ /*
+ * We support limiting the channels via wmiconfig.
+ *
+ * We use this command to configure the channel hint for the connect cmd
+ * so it is possible the target will end up connecting to a different
+ * channel.
+ */
+ if (freq->e > 1) {
+ return -EINVAL;
+ } else if (freq->e == 1) {
+ ar->arChannelHint = freq->m / 100000;
+ } else {
+ if(freq->m) {
+ ar->arChannelHint = wlan_ieee2freq(freq->m);
+ } else {
+ /* Auto Channel Selection */
+ ar->arChannelHint = 0;
+ }
+ }
+
+ ar->ap_profile_flag = 1; /* There is a change in profile */
+
+ A_PRINTF("channel hint set to %d\n", ar->arChannelHint);
+ return 0;
+}
+
+/*
+ * SIOCGIWFREQ
+ */
+int
+ar6000_ioctl_giwfreq(struct net_device *dev,
+ struct iw_request_info *info,
+ struct iw_freq *freq, char *extra)
+{
+ AR_SOFTC_T *ar = (AR_SOFTC_T *)ar6k_priv(dev);
+
+ if (is_iwioctl_allowed(ar->arNextMode, info->cmd) != A_OK) {
+ A_PRINTF("wext_ioctl: cmd=0x%x not allowed in this mode\n", info->cmd);
+ return -EOPNOTSUPP;
+ }
+
+ if (ar->arWlanState == WLAN_DISABLED) {
+ return -EIO;
+ }
+
+ if (ar->arNetworkType == AP_NETWORK) {
+ if(ar->arChannelHint) {
+ freq->m = ar->arChannelHint * 100000;
+ } else if(ar->arACS) {
+ freq->m = ar->arACS * 100000;
+ } else {
+ return -EINVAL;
+ }
+ } else {
+ if (ar->arConnected != TRUE) {
+ return -EINVAL;
+ } else {
+ freq->m = ar->arBssChannel * 100000;
+ }
+ }
+
+ freq->e = 1;
+
+ return 0;
+}
+
+/*
+ * SIOCSIWMODE
+ */
+int
+ar6000_ioctl_siwmode(struct net_device *dev,
+ struct iw_request_info *info,
+ __u32 *mode, char *extra)
+{
+ AR_SOFTC_T *ar = (AR_SOFTC_T *)ar6k_priv(dev);
+
+ if (is_iwioctl_allowed(ar->arNextMode, info->cmd) != A_OK) {
+ A_PRINTF("wext_ioctl: cmd=0x%x not allowed in this mode\n", info->cmd);
+ return -EOPNOTSUPP;
+ }
+
+ if (ar->arWlanState == WLAN_DISABLED) {
+ return -EIO;
+ }
+
+ /*
+ * clear SSID during mode switch in connected state
+ */
+ if(!(ar->arNetworkType == (((*mode) == IW_MODE_INFRA) ? INFRA_NETWORK : ADHOC_NETWORK)) && (ar->arConnected == TRUE) ){
+ A_MEMZERO(ar->arSsid, sizeof(ar->arSsid));
+ ar->arSsidLen = 0;
+ }
+
+ switch (*mode) {
+ case IW_MODE_INFRA:
+ ar->arNextMode = INFRA_NETWORK;
+ break;
+ case IW_MODE_ADHOC:
+ ar->arNextMode = ADHOC_NETWORK;
+ break;
+ case IW_MODE_MASTER:
+ ar->arNextMode = AP_NETWORK;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ /* clear all shared parameters between AP and STA|IBSS modes when we
+ * switch between them. Switch between STA & IBSS modes does'nt clear
+ * the shared profile. This is as per the original design for switching
+ * between STA & IBSS.
+ */
+ if (ar->arNetworkType == AP_NETWORK || ar->arNextMode == AP_NETWORK) {
+ ar->arDot11AuthMode = OPEN_AUTH;
+ ar->arAuthMode = NONE_AUTH;
+ ar->arPairwiseCrypto = NONE_CRYPT;
+ ar->arPairwiseCryptoLen = 0;
+ ar->arGroupCrypto = NONE_CRYPT;
+ ar->arGroupCryptoLen = 0;
+ ar->arChannelHint = 0;
+ ar->arBssChannel = 0;
+ A_MEMZERO(ar->arBssid, sizeof(ar->arBssid));
+ A_MEMZERO(ar->arSsid, sizeof(ar->arSsid));
+ ar->arSsidLen = 0;
+ }
+
+ /* SSID has to be cleared to trigger a profile change while switching
+ * between STA & IBSS modes having the same SSID
+ */
+ if (ar->arNetworkType != ar->arNextMode) {
+ A_MEMZERO(ar->arSsid, sizeof(ar->arSsid));
+ ar->arSsidLen = 0;
+ }
+
+ return 0;
+}
+
+/*
+ * SIOCGIWMODE
+ */
+int
+ar6000_ioctl_giwmode(struct net_device *dev,
+ struct iw_request_info *info,
+ __u32 *mode, char *extra)
+{
+ AR_SOFTC_T *ar = (AR_SOFTC_T *)ar6k_priv(dev);
+
+ if (is_iwioctl_allowed(ar->arNextMode, info->cmd) != A_OK) {
+ A_PRINTF("wext_ioctl: cmd=0x%x not allowed in this mode\n", info->cmd);
+ return -EOPNOTSUPP;
+ }
+
+ if (ar->arWlanState == WLAN_DISABLED) {
+ return -EIO;
+ }
+
+ switch (ar->arNetworkType) {
+ case INFRA_NETWORK:
+ *mode = IW_MODE_INFRA;
+ break;
+ case ADHOC_NETWORK:
+ *mode = IW_MODE_ADHOC;
+ break;
+ case AP_NETWORK:
+ *mode = IW_MODE_MASTER;
+ break;
+ default:
+ return -EIO;
+ }
+ return 0;
+}
+
+/*
+ * SIOCSIWSENS
+ */
+int
+ar6000_ioctl_siwsens(struct net_device *dev,
+ struct iw_request_info *info,
+ struct iw_param *sens, char *extra)
+{
+ return 0;
+}
+
+/*
+ * SIOCGIWSENS
+ */
+int
+ar6000_ioctl_giwsens(struct net_device *dev,
+ struct iw_request_info *info,
+ struct iw_param *sens, char *extra)
+{
+ sens->value = 0;
+ sens->fixed = 1;
+
+ return 0;
+}
+
+/*
+ * SIOCGIWRANGE
+ */
+int
+ar6000_ioctl_giwrange(struct net_device *dev,
+ struct iw_request_info *info,
+ struct iw_point *data, char *extra)
+{
+ AR_SOFTC_T *ar = (AR_SOFTC_T *)ar6k_priv(dev);
+ struct iw_range *range = (struct iw_range *) extra;
+ int i, ret = 0;
+
+ if (is_iwioctl_allowed(ar->arNextMode, info->cmd) != A_OK) {
+ A_PRINTF("wext_ioctl: cmd=0x%x not allowed in this mode\n", info->cmd);
+ return -EOPNOTSUPP;
+ }
+
+ if (ar->bIsDestroyProgress) {
+ return -EBUSY;
+ }
+
+ if (ar->arWmiReady == FALSE) {
+ return -EIO;
+ }
+
+ if (down_interruptible(&ar->arSem)) {
+ return -ERESTARTSYS;
+ }
+
+ if (ar->bIsDestroyProgress) {
+ up(&ar->arSem);
+ return -EBUSY;
+ }
+
+ ar->arNumChannels = -1;
+ A_MEMZERO(ar->arChannelList, sizeof (ar->arChannelList));
+
+ if (wmi_get_channelList_cmd(ar->arWmi) != A_OK) {
+ up(&ar->arSem);
+ return -EIO;
+ }
+
+ wait_event_interruptible_timeout(arEvent, ar->arNumChannels != -1, wmitimeout * HZ);
+
+ if (signal_pending(current)) {
+ up(&ar->arSem);
+ return -EINTR;
+ }
+
+ data->length = sizeof(struct iw_range);
+ A_MEMZERO(range, sizeof(struct iw_range));
+
+ range->txpower_capa = 0;
+
+ range->min_pmp = 1 * 1024;
+ range->max_pmp = 65535 * 1024;
+ range->min_pmt = 1 * 1024;
+ range->max_pmt = 1000 * 1024;
+ range->pmp_flags = IW_POWER_PERIOD;
+ range->pmt_flags = IW_POWER_TIMEOUT;
+ range->pm_capa = 0;
+
+ range->we_version_compiled = WIRELESS_EXT;
+ range->we_version_source = 13;
+
+ range->retry_capa = IW_RETRY_LIMIT;
+ range->retry_flags = IW_RETRY_LIMIT;
+ range->min_retry = 0;
+ range->max_retry = 255;
+
+ range->num_frequency = range->num_channels = ar->arNumChannels;
+ for (i = 0; i < ar->arNumChannels; i++) {
+ range->freq[i].i = wlan_freq2ieee(ar->arChannelList[i]);
+ range->freq[i].m = ar->arChannelList[i] * 100000;
+ range->freq[i].e = 1;
+ /*
+ * Linux supports max of 32 channels, bail out once you
+ * reach the max.
+ */
+ if (i == IW_MAX_FREQUENCIES) {
+ break;
+ }
+ }
+
+ /* Max quality is max field value minus noise floor */
+ range->max_qual.qual = 0xff - 161;
+
+ /*
+ * In order to use dBm measurements, 'level' must be lower
+ * than any possible measurement (see iw_print_stats() in
+ * wireless tools). It's unclear how this is meant to be
+ * done, but setting zero in these values forces dBm and
+ * the actual numbers are not used.
+ */
+ range->max_qual.level = 0;
+ range->max_qual.noise = 0;
+
+ range->sensitivity = 3;
+
+ range->max_encoding_tokens = 4;
+ /* XXX query driver to find out supported key sizes */
+ range->num_encoding_sizes = 3;
+ range->encoding_size[0] = 5; /* 40-bit */
+ range->encoding_size[1] = 13; /* 104-bit */
+ range->encoding_size[2] = 16; /* 128-bit */
+
+ range->num_bitrates = 0;
+
+ /* estimated maximum TCP throughput values (bps) */
+ range->throughput = 22000000;
+
+ range->min_rts = 0;
+ range->max_rts = 2347;
+ range->min_frag = 256;
+ range->max_frag = 2346;
+
+ up(&ar->arSem);
+
+ return ret;
+}
+
+
+/*
+ * SIOCSIWAP
+ * This ioctl is used to set the desired bssid for the connect command.
+ */
+int
+ar6000_ioctl_siwap(struct net_device *dev,
+ struct iw_request_info *info,
+ struct sockaddr *ap_addr, char *extra)
+{
+ AR_SOFTC_T *ar = (AR_SOFTC_T *)ar6k_priv(dev);
+
+ if (is_iwioctl_allowed(ar->arNextMode, info->cmd) != A_OK) {
+ A_PRINTF("wext_ioctl: cmd=0x%x not allowed in this mode\n", info->cmd);
+ return -EOPNOTSUPP;
+ }
+
+ if (ar->arWlanState == WLAN_DISABLED) {
+ return -EIO;
+ }
+
+ if (ap_addr->sa_family != ARPHRD_ETHER) {
+ return -EIO;
+ }
+
+ if (A_MEMCMP(&ap_addr->sa_data, bcast_mac, AR6000_ETH_ADDR_LEN) == 0) {
+ A_MEMZERO(ar->arReqBssid, sizeof(ar->arReqBssid));
+ } else {
+ A_MEMCPY(ar->arReqBssid, &ap_addr->sa_data, sizeof(ar->arReqBssid));
+ }
+
+ return 0;
+}
+
+/*
+ * SIOCGIWAP
+ */
+int
+ar6000_ioctl_giwap(struct net_device *dev,
+ struct iw_request_info *info,
+ struct sockaddr *ap_addr, char *extra)
+{
+ AR_SOFTC_T *ar = (AR_SOFTC_T *)ar6k_priv(dev);
+
+ if (is_iwioctl_allowed(ar->arNextMode, info->cmd) != A_OK) {
+ A_PRINTF("wext_ioctl: cmd=0x%x not allowed in this mode\n", info->cmd);
+ return -EOPNOTSUPP;
+ }
+
+ if (ar->arWlanState == WLAN_DISABLED) {
+ return -EIO;
+ }
+
+ if (ar->arNetworkType == AP_NETWORK) {
+ A_MEMCPY(&ap_addr->sa_data, dev->dev_addr, ATH_MAC_LEN);
+ ap_addr->sa_family = ARPHRD_ETHER;
+ return 0;
+ }
+
+ if (ar->arConnected != TRUE) {
+ return -EINVAL;
+ }
+
+ A_MEMCPY(&ap_addr->sa_data, ar->arBssid, sizeof(ar->arBssid));
+ ap_addr->sa_family = ARPHRD_ETHER;
+
+ return 0;
+}
+
+#if (WIRELESS_EXT >= 18)
+/*
+ * SIOCSIWMLME
+ */
+int
+ar6000_ioctl_siwmlme(struct net_device *dev,
+ struct iw_request_info *info,
+ struct iw_point *data, char *extra)
+{
+ AR_SOFTC_T *ar = (AR_SOFTC_T *)ar6k_priv(dev);
+
+ if (is_iwioctl_allowed(ar->arNextMode, info->cmd) != A_OK) {
+ A_PRINTF("wext_ioctl: cmd=0x%x not allowed in this mode\n", info->cmd);
+ return -EOPNOTSUPP;
+ }
+
+ if (ar->bIsDestroyProgress) {
+ return -EBUSY;
+ }
+
+ if (ar->arWlanState == WLAN_DISABLED) {
+ return -EIO;
+ }
+
+ if (ar->arWmiReady == FALSE) {
+ return -EIO;
+ }
+
+ if (down_interruptible(&ar->arSem)) {
+ return -ERESTARTSYS;
+ }
+
+ if (data->pointer && data->length == sizeof(struct iw_mlme)) {
+
+ A_UINT8 arNetworkType;
+ struct iw_mlme mlme;
+
+ if (copy_from_user(&mlme, data->pointer, sizeof(struct iw_mlme)))
+ return -EIO;
+
+ switch (mlme.cmd) {
+
+ case IW_MLME_DEAUTH:
+ /* fall through */
+ case IW_MLME_DISASSOC:
+ if ((ar->arConnected != TRUE) ||
+ (memcmp(ar->arBssid, mlme.addr.sa_data, 6) != 0)) {
+
+ up(&ar->arSem);
+ return -EINVAL;
+ }
+ wmi_setPmkid_cmd(ar->arWmi, ar->arBssid, NULL, 0);
+ arNetworkType = ar->arNetworkType;
+ ar6000_init_profile_info(ar);
+ ar->arNetworkType = arNetworkType;
+ reconnect_flag = 0;
+ wmi_disconnect_cmd(ar->arWmi);
+ A_MEMZERO(ar->arSsid, sizeof(ar->arSsid));
+ ar->arSsidLen = 0;
+ if (ar->arSkipScan == FALSE) {
+ A_MEMZERO(ar->arReqBssid, sizeof(ar->arReqBssid));
+ }
+ break;
+
+ case IW_MLME_AUTH:
+ /* fall through */
+ case IW_MLME_ASSOC:
+ /* fall through */
+ default:
+ up(&ar->arSem);
+ return -EOPNOTSUPP;
+ }
+ }
+
+ up(&ar->arSem);
+ return 0;
+}
+#endif /* WIRELESS_EXT >= 18 */
+
+/*
+ * SIOCGIWAPLIST
+ */
+int
+ar6000_ioctl_iwaplist(struct net_device *dev,
+ struct iw_request_info *info,
+ struct iw_point *data, char *extra)
+{
+ return -EIO; /* for now */
+}
+
+/*
+ * SIOCSIWSCAN
+ */
+int
+ar6000_ioctl_siwscan(struct net_device *dev,
+ struct iw_request_info *info,
+ struct iw_point *data, char *extra)
+{
+#define ACT_DWELLTIME_DEFAULT 105
+#define HOME_TXDRAIN_TIME 100
+#define SCAN_INT HOME_TXDRAIN_TIME + ACT_DWELLTIME_DEFAULT
+ AR_SOFTC_T *ar = (AR_SOFTC_T *)ar6k_priv(dev);
+ int ret = 0;
+
+ if (is_iwioctl_allowed(ar->arNextMode, info->cmd) != A_OK) {
+ A_PRINTF("wext_ioctl: cmd=0x%x not allowed in this mode\n", info->cmd);
+ return -EOPNOTSUPP;
+ }
+
+ if (ar->arWmiReady == FALSE) {
+ return -EIO;
+ }
+
+ if (ar->arWlanState == WLAN_DISABLED) {
+ return -EIO;
+ }
+
+ /* If scan is issued in the middle of ongoing scan or connect,
+ dont issue another one */
+ if ( ar->scan_triggered > 0 ) {
+ ++ar->scan_triggered;
+ if (ar->scan_triggered < 5) {
+ return 0;
+ } else {
+ AR_DEBUG_PRINTF(ATH_DEBUG_WLAN_SCAN,("Scan request is triggered over 5 times. Not scan complete event\n"));
+ }
+ }
+
+ if (!ar->arUserBssFilter) {
+ if (wmi_bssfilter_cmd(ar->arWmi, ALL_BSS_FILTER, 0) != A_OK) {
+ return -EIO;
+ }
+ }
+
+ if (ar->arConnected) {
+ if (wmi_get_stats_cmd(ar->arWmi) != A_OK) {
+ return -EIO;
+ }
+ }
+
+#ifdef ANDROID_ENV
+#if WIRELESS_EXT >= 18
+ if (data->pointer && (data->length == sizeof(struct iw_scan_req)))
+ {
+ if ((data->flags & IW_SCAN_THIS_ESSID) == IW_SCAN_THIS_ESSID)
+ {
+ struct iw_scan_req req;
+ if (copy_from_user(&req, data->pointer, sizeof(struct iw_scan_req)))
+ return -EIO;
+ if (wmi_probedSsid_cmd(ar->arWmi, 1, SPECIFIC_SSID_FLAG, req.essid_len, req.essid) != A_OK)
+ return -EIO;
+ ar->scanSpecificSsid = 1;
+ }
+ else
+ {
+ if (ar->scanSpecificSsid) {
+ if (wmi_probedSsid_cmd(ar->arWmi, 1, DISABLE_SSID_FLAG, 0, NULL) != A_OK)
+ return -EIO;
+ ar->scanSpecificSsid = 0;
+ }
+ }
+ }
+ else
+ {
+ if (ar->scanSpecificSsid) {
+ if (wmi_probedSsid_cmd(ar->arWmi, 1, DISABLE_SSID_FLAG, 0, NULL) != A_OK)
+ return -EIO;
+ ar->scanSpecificSsid = 0;
+ }
+ }
+#endif
+#endif /* ANDROID_ENV */
+
+ if (wmi_startscan_cmd(ar->arWmi, WMI_LONG_SCAN, FALSE, FALSE, \
+ 0, 0, 0, NULL) != A_OK) {
+ ret = -EIO;
+ }
+
+ if (ret == 0) {
+ ar->scan_triggered = 1;
+ }
+
+ return ret;
+#undef ACT_DWELLTIME_DEFAULT
+#undef HOME_TXDRAIN_TIME
+#undef SCAN_INT
+}
+
+
+/*
+ * Units are in db above the noise floor. That means the
+ * rssi values reported in the tx/rx descriptors in the
+ * driver are the SNR expressed in db.
+ *
+ * If you assume that the noise floor is -95, which is an
+ * excellent assumption 99.5 % of the time, then you can
+ * derive the absolute signal level (i.e. -95 + rssi).
+ * There are some other slight factors to take into account
+ * depending on whether the rssi measurement is from 11b,
+ * 11g, or 11a. These differences are at most 2db and
+ * can be documented.
+ *
+ * NB: various calculations are based on the orinoco/wavelan
+ * drivers for compatibility
+ */
+static void
+ar6000_set_quality(struct iw_quality *iq, A_INT8 rssi)
+{
+ if (rssi < 0) {
+ iq->qual = 0;
+ } else {
+ iq->qual = rssi;
+ }
+
+ /* NB: max is 94 because noise is hardcoded to 161 */
+ if (iq->qual > 94)
+ iq->qual = 94;
+
+ iq->noise = 161; /* -95dBm */
+ iq->level = iq->noise + iq->qual;
+ iq->updated = 7;
+}
+
+
+int
+ar6000_ioctl_siwcommit(struct net_device *dev,
+ struct iw_request_info *info,
+ struct iw_point *data, char *extra)
+{
+ AR_SOFTC_T *ar = (AR_SOFTC_T *)ar6k_priv(dev);
+
+ if (is_iwioctl_allowed(ar->arNextMode, info->cmd) != A_OK) {
+ A_PRINTF("wext_ioctl: cmd=0x%x not allowed in this mode\n", info->cmd);
+ return -EOPNOTSUPP;
+ }
+
+ if (ar->arWmiReady == FALSE) {
+ return -EIO;
+ }
+
+ if (ar->arWlanState == WLAN_DISABLED) {
+ return -EIO;
+ }
+
+ AR_DEBUG_PRINTF(ATH_DEBUG_WLAN_CONNECT,("AP: SSID %s freq %d authmode %d dot11 auth %d"\
+ " PW crypto %d GRP crypto %d\n",
+ ar->arSsid, ar->arChannelHint,
+ ar->arAuthMode, ar->arDot11AuthMode,
+ ar->arPairwiseCrypto, ar->arGroupCrypto));
+
+ ar6000_ap_mode_profile_commit(ar);
+
+ /* if there is a profile switch from STA|IBSS mode to AP mode,
+ * update the host driver association state for the STA|IBSS mode.
+ */
+ if (ar->arNetworkType != AP_NETWORK && ar->arNextMode == AP_NETWORK) {
+ ar->arConnectPending = FALSE;
+ ar->arConnected = FALSE;
+ /* Stop getting pkts from upper stack */
+ netif_stop_queue(ar->arNetDev);
+ A_MEMZERO(ar->arBssid, sizeof(ar->arBssid));
+ ar->arBssChannel = 0;
+ ar->arBeaconInterval = 0;
+
+ /* Flush the Tx queues */
+ ar6000_TxDataCleanup(ar);
+
+ /* Start getting pkts from upper stack */
+ netif_wake_queue(ar->arNetDev);
+ }
+
+ return 0;
+}
+
+#define W_PROTO(_x) wait_ ## _x
+#define WAIT_HANDLER_IMPL(_x, type) \
+int wait_ ## _x (struct net_device *dev, struct iw_request_info *info, type wrqu, char *extra) {\
+ int ret; \
+ dev_hold(dev); \
+ rtnl_unlock(); \
+ ret = _x(dev, info, wrqu, extra); \
+ rtnl_lock(); \
+ dev_put(dev); \
+ return ret;\
+}
+
+WAIT_HANDLER_IMPL(ar6000_ioctl_siwessid, struct iw_point *)
+WAIT_HANDLER_IMPL(ar6000_ioctl_giwrate, struct iw_param *)
+WAIT_HANDLER_IMPL(ar6000_ioctl_giwtxpow, struct iw_param *)
+WAIT_HANDLER_IMPL(ar6000_ioctl_giwrange, struct iw_point*)
+
+/* Structures to export the Wireless Handlers */
+static const iw_handler ath_handlers[] = {
+ (iw_handler) ar6000_ioctl_siwcommit, /* SIOCSIWCOMMIT */
+ (iw_handler) ar6000_ioctl_giwname, /* SIOCGIWNAME */
+ (iw_handler) NULL, /* SIOCSIWNWID */
+ (iw_handler) NULL, /* SIOCGIWNWID */
+ (iw_handler) ar6000_ioctl_siwfreq, /* SIOCSIWFREQ */
+ (iw_handler) ar6000_ioctl_giwfreq, /* SIOCGIWFREQ */
+ (iw_handler) ar6000_ioctl_siwmode, /* SIOCSIWMODE */
+ (iw_handler) ar6000_ioctl_giwmode, /* SIOCGIWMODE */
+ (iw_handler) ar6000_ioctl_siwsens, /* SIOCSIWSENS */
+ (iw_handler) ar6000_ioctl_giwsens, /* SIOCGIWSENS */
+ (iw_handler) NULL /* not _used */, /* SIOCSIWRANGE */
+ (iw_handler) W_PROTO(ar6000_ioctl_giwrange),/* SIOCGIWRANGE */
+ (iw_handler) NULL /* not used */, /* SIOCSIWPRIV */
+ (iw_handler) NULL /* kernel code */, /* SIOCGIWPRIV */
+ (iw_handler) NULL /* not used */, /* SIOCSIWSTATS */
+ (iw_handler) NULL /* kernel code */, /* SIOCGIWSTATS */
+ (iw_handler) NULL, /* SIOCSIWSPY */
+ (iw_handler) NULL, /* SIOCGIWSPY */
+ (iw_handler) NULL, /* SIOCSIWTHRSPY */
+ (iw_handler) NULL, /* SIOCGIWTHRSPY */
+ (iw_handler) ar6000_ioctl_siwap, /* SIOCSIWAP */
+ (iw_handler) ar6000_ioctl_giwap, /* SIOCGIWAP */
+#if (WIRELESS_EXT >= 18)
+ (iw_handler) ar6000_ioctl_siwmlme, /* SIOCSIWMLME */
+#else
+ (iw_handler) NULL, /* -- hole -- */
+#endif /* WIRELESS_EXT >= 18 */
+ (iw_handler) ar6000_ioctl_iwaplist, /* SIOCGIWAPLIST */
+ (iw_handler) ar6000_ioctl_siwscan, /* SIOCSIWSCAN */
+ (iw_handler) ar6000_ioctl_giwscan, /* SIOCGIWSCAN */
+ (iw_handler) W_PROTO(ar6000_ioctl_siwessid),/* SIOCSIWESSID */
+ (iw_handler) ar6000_ioctl_giwessid, /* SIOCGIWESSID */
+ (iw_handler) NULL, /* SIOCSIWNICKN */
+ (iw_handler) NULL, /* SIOCGIWNICKN */
+ (iw_handler) NULL, /* -- hole -- */
+ (iw_handler) NULL, /* -- hole -- */
+ (iw_handler) ar6000_ioctl_siwrate, /* SIOCSIWRATE */
+ (iw_handler) W_PROTO(ar6000_ioctl_giwrate), /* SIOCGIWRATE */
+ (iw_handler) NULL, /* SIOCSIWRTS */
+ (iw_handler) NULL, /* SIOCGIWRTS */
+ (iw_handler) NULL, /* SIOCSIWFRAG */
+ (iw_handler) NULL, /* SIOCGIWFRAG */
+ (iw_handler) ar6000_ioctl_siwtxpow, /* SIOCSIWTXPOW */
+ (iw_handler) W_PROTO(ar6000_ioctl_giwtxpow),/* SIOCGIWTXPOW */
+ (iw_handler) ar6000_ioctl_siwretry, /* SIOCSIWRETRY */
+ (iw_handler) ar6000_ioctl_giwretry, /* SIOCGIWRETRY */
+ (iw_handler) ar6000_ioctl_siwencode, /* SIOCSIWENCODE */
+ (iw_handler) ar6000_ioctl_giwencode, /* SIOCGIWENCODE */
+#if WIRELESS_EXT > 20
+ (iw_handler) ar6000_ioctl_siwpower, /* SIOCSIWPOWER */
+ (iw_handler) ar6000_ioctl_giwpower, /* SIOCGIWPOWER */
+#endif // WIRELESS_EXT > 20
+#if WIRELESS_EXT >= 18
+ (iw_handler) NULL, /* -- hole -- */
+ (iw_handler) NULL, /* -- hole -- */
+ (iw_handler) ar6000_ioctl_siwgenie, /* SIOCSIWGENIE */
+ (iw_handler) ar6000_ioctl_giwgenie, /* SIOCGIWGENIE */
+ (iw_handler) ar6000_ioctl_siwauth, /* SIOCSIWAUTH */
+ (iw_handler) ar6000_ioctl_giwauth, /* SIOCGIWAUTH */
+ (iw_handler) ar6000_ioctl_siwencodeext, /* SIOCSIWENCODEEXT */
+ (iw_handler) ar6000_ioctl_giwencodeext, /* SIOCGIWENCODEEXT */
+ (iw_handler) ar6000_ioctl_siwpmksa, /* SIOCSIWPMKSA */
+#endif // WIRELESS_EXT >= 18
+};
+
+struct iw_handler_def ath_iw_handler_def = {
+ .standard = (iw_handler *)ath_handlers,
+ .num_standard = ARRAY_SIZE(ath_handlers),
+ .private = NULL,
+ .num_private = 0,
+};
diff --git a/drivers/staging/ath6kl/reorder/aggr_rx_internal.h b/drivers/staging/ath6kl/reorder/aggr_rx_internal.h
new file mode 100644
index 000000000000..5dbf8f86f713
--- /dev/null
+++ b/drivers/staging/ath6kl/reorder/aggr_rx_internal.h
@@ -0,0 +1,116 @@
+/*
+ *
+ * Copyright (c) 2004-2010 Atheros Communications Inc.
+ * All rights reserved.
+ *
+ *
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+//
+//
+ *
+ */
+
+#ifndef __AGGR_RX_INTERNAL_H__
+#define __AGGR_RX_INTERNAL_H__
+
+#include "a_osapi.h"
+#include "aggr_recv_api.h"
+
+#define AGGR_WIN_IDX(x, y) ((x) % (y))
+#define AGGR_INCR_IDX(x, y) AGGR_WIN_IDX(((x)+1), (y))
+#define AGGR_DCRM_IDX(x, y) AGGR_WIN_IDX(((x)-1), (y))
+#define IEEE80211_MAX_SEQ_NO 0xFFF
+#define IEEE80211_NEXT_SEQ_NO(x) (((x) + 1) & IEEE80211_MAX_SEQ_NO)
+
+
+#define NUM_OF_TIDS 8
+#define AGGR_SZ_DEFAULT 8
+
+#define AGGR_WIN_SZ_MIN 2
+#define AGGR_WIN_SZ_MAX 8
+/* TID Window sz is double of what is negotiated. Derive TID_WINDOW_SZ from win_sz, per tid */
+#define TID_WINDOW_SZ(_x) ((_x) << 1)
+
+#define AGGR_NUM_OF_FREE_NETBUFS 16
+
+#define AGGR_GET_RXTID_STATS(_p, _x) (&(_p->stat[(_x)]))
+#define AGGR_GET_RXTID(_p, _x) (&(_p->RxTid[(_x)]))
+
+/* Hold q is a function of win_sz, which is negotiated per tid */
+#define HOLD_Q_SZ(_x) (TID_WINDOW_SZ((_x))*sizeof(OSBUF_HOLD_Q))
+/* AGGR_RX_TIMEOUT value is important as a (too) small value can cause frames to be
+ * delivered out of order and a (too) large value can cause undesirable latency in
+ * certain situations. */
+#define AGGR_RX_TIMEOUT 400 /* Timeout(in ms) for delivery of frames, if they are stuck */
+
+typedef enum {
+ ALL_SEQNO = 0,
+ CONTIGUOUS_SEQNO = 1,
+}DELIVERY_ORDER;
+
+typedef struct {
+ void *osbuf;
+ A_BOOL is_amsdu;
+ A_UINT16 seq_no;
+}OSBUF_HOLD_Q;
+
+
+#if 0
+typedef struct {
+ A_UINT16 seqno_st;
+ A_UINT16 seqno_end;
+}WINDOW_SNAPSHOT;
+#endif
+
+typedef struct {
+ A_BOOL aggr; /* is it ON or OFF */
+ A_BOOL progress; /* TRUE when frames have arrived after a timer start */
+ A_BOOL timerMon; /* TRUE if the timer started for the sake of this TID */
+ A_UINT16 win_sz; /* negotiated window size */
+ A_UINT16 seq_next; /* Next seq no, in current window */
+ A_UINT32 hold_q_sz; /* Num of frames that can be held in hold q */
+ OSBUF_HOLD_Q *hold_q; /* Hold q for re-order */
+#if 0
+ WINDOW_SNAPSHOT old_win; /* Sliding window snapshot - for timeout */
+#endif
+ A_NETBUF_QUEUE_T q; /* q head for enqueuing frames for dispatch */
+ A_MUTEX_T lock;
+}RXTID;
+
+typedef struct {
+ A_UINT32 num_into_aggr; /* hitting at the input of this module */
+ A_UINT32 num_dups; /* duplicate */
+ A_UINT32 num_oow; /* out of window */
+ A_UINT32 num_mpdu; /* single payload 802.3/802.11 frame */
+ A_UINT32 num_amsdu; /* AMSDU */
+ A_UINT32 num_delivered; /* frames delivered to IP stack */
+ A_UINT32 num_timeouts; /* num of timeouts, during which frames delivered */
+ A_UINT32 num_hole; /* frame not present, when window moved over */
+ A_UINT32 num_bar; /* num of resets of seq_num, via BAR */
+}RXTID_STATS;
+
+typedef struct {
+ A_UINT8 aggr_sz; /* config value of aggregation size */
+ A_UINT8 timerScheduled;
+ A_TIMER timer; /* timer for returning held up pkts in re-order que */
+ void *dev; /* dev handle */
+ RX_CALLBACK rx_fn; /* callback function to return frames; to upper layer */
+ RXTID RxTid[NUM_OF_TIDS]; /* Per tid window */
+ ALLOC_NETBUFS netbuf_allocator; /* OS netbuf alloc fn */
+ A_NETBUF_QUEUE_T freeQ; /* pre-allocated buffers - for A_MSDU slicing */
+ RXTID_STATS stat[NUM_OF_TIDS]; /* Tid based statistics */
+ PACKET_LOG pkt_log; /* Log info of the packets */
+}AGGR_INFO;
+
+#endif /* __AGGR_RX_INTERNAL_H__ */
diff --git a/drivers/staging/ath6kl/reorder/rcv_aggr.c b/drivers/staging/ath6kl/reorder/rcv_aggr.c
new file mode 100644
index 000000000000..092bb3007c5d
--- /dev/null
+++ b/drivers/staging/ath6kl/reorder/rcv_aggr.c
@@ -0,0 +1,666 @@
+/*
+ *
+ * Copyright (c) 2010 Atheros Communications Inc.
+ * All rights reserved.
+ *
+ *
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+//
+//
+ *
+ */
+
+#ifdef ATH_AR6K_11N_SUPPORT
+
+#include <a_config.h>
+#include <athdefs.h>
+#include <a_types.h>
+#include <a_osapi.h>
+#include <a_debug.h>
+#include "pkt_log.h"
+#include "aggr_recv_api.h"
+#include "aggr_rx_internal.h"
+#include "wmi.h"
+
+extern A_STATUS
+wmi_dot3_2_dix(void *osbuf);
+
+static void
+aggr_slice_amsdu(AGGR_INFO *p_aggr, RXTID *rxtid, void **osbuf);
+
+static void
+aggr_timeout(A_ATH_TIMER arg);
+
+static void
+aggr_deque_frms(AGGR_INFO *p_aggr, A_UINT8 tid, A_UINT16 seq_no, A_UINT8 order);
+
+static void
+aggr_dispatch_frames(AGGR_INFO *p_aggr, A_NETBUF_QUEUE_T *q);
+
+static void *
+aggr_get_osbuf(AGGR_INFO *p_aggr);
+
+void *
+aggr_init(ALLOC_NETBUFS netbuf_allocator)
+{
+ AGGR_INFO *p_aggr = NULL;
+ RXTID *rxtid;
+ A_UINT8 i;
+ A_STATUS status = A_OK;
+
+ A_PRINTF("In aggr_init..\n");
+
+ do {
+ p_aggr = A_MALLOC(sizeof(AGGR_INFO));
+ if(!p_aggr) {
+ A_PRINTF("Failed to allocate memory for aggr_node\n");
+ status = A_ERROR;
+ break;
+ }
+
+ /* Init timer and data structures */
+ A_MEMZERO(p_aggr, sizeof(AGGR_INFO));
+ p_aggr->aggr_sz = AGGR_SZ_DEFAULT;
+ A_INIT_TIMER(&p_aggr->timer, aggr_timeout, p_aggr);
+ p_aggr->timerScheduled = FALSE;
+ A_NETBUF_QUEUE_INIT(&p_aggr->freeQ);
+
+ p_aggr->netbuf_allocator = netbuf_allocator;
+ p_aggr->netbuf_allocator(&p_aggr->freeQ, AGGR_NUM_OF_FREE_NETBUFS);
+
+ for(i = 0; i < NUM_OF_TIDS; i++) {
+ rxtid = AGGR_GET_RXTID(p_aggr, i);
+ rxtid->aggr = FALSE;
+ rxtid->progress = FALSE;
+ rxtid->timerMon = FALSE;
+ A_NETBUF_QUEUE_INIT(&rxtid->q);
+ A_MUTEX_INIT(&rxtid->lock);
+ }
+ }while(FALSE);
+
+ A_PRINTF("going out of aggr_init..status %s\n",
+ (status == A_OK) ? "OK":"Error");
+
+ if(status != A_OK) {
+ /* Cleanup */
+ aggr_module_destroy(p_aggr);
+ }
+ return ((status == A_OK) ? p_aggr : NULL);
+}
+
+/* utility function to clear rx hold_q for a tid */
+static void
+aggr_delete_tid_state(AGGR_INFO *p_aggr, A_UINT8 tid)
+{
+ RXTID *rxtid;
+ RXTID_STATS *stats;
+
+ A_ASSERT(tid < NUM_OF_TIDS && p_aggr);
+
+ rxtid = AGGR_GET_RXTID(p_aggr, tid);
+ stats = AGGR_GET_RXTID_STATS(p_aggr, tid);
+
+ if(rxtid->aggr) {
+ aggr_deque_frms(p_aggr, tid, 0, ALL_SEQNO);
+ }
+
+ rxtid->aggr = FALSE;
+ rxtid->progress = FALSE;
+ rxtid->timerMon = FALSE;
+ rxtid->win_sz = 0;
+ rxtid->seq_next = 0;
+ rxtid->hold_q_sz = 0;
+
+ if(rxtid->hold_q) {
+ A_FREE(rxtid->hold_q);
+ rxtid->hold_q = NULL;
+ }
+
+ A_MEMZERO(stats, sizeof(RXTID_STATS));
+}
+
+void
+aggr_module_destroy(void *cntxt)
+{
+ AGGR_INFO *p_aggr = (AGGR_INFO *)cntxt;
+ RXTID *rxtid;
+ A_UINT8 i, k;
+ A_PRINTF("%s(): aggr = %p\n",_A_FUNCNAME_, p_aggr);
+ A_ASSERT(p_aggr);
+
+ if(p_aggr) {
+ if(p_aggr->timerScheduled) {
+ A_UNTIMEOUT(&p_aggr->timer);
+ p_aggr->timerScheduled = FALSE;
+ }
+
+ for(i = 0; i < NUM_OF_TIDS; i++) {
+ rxtid = AGGR_GET_RXTID(p_aggr, i);
+ /* Free the hold q contents and hold_q*/
+ if(rxtid->hold_q) {
+ for(k = 0; k< rxtid->hold_q_sz; k++) {
+ if(rxtid->hold_q[k].osbuf) {
+ A_NETBUF_FREE(rxtid->hold_q[k].osbuf);
+ }
+ }
+ A_FREE(rxtid->hold_q);
+ }
+ /* Free the dispatch q contents*/
+ while(A_NETBUF_QUEUE_SIZE(&rxtid->q)) {
+ A_NETBUF_FREE(A_NETBUF_DEQUEUE(&rxtid->q));
+ }
+ if (A_IS_MUTEX_VALID(&rxtid->lock)) {
+ A_MUTEX_DELETE(&rxtid->lock);
+ }
+ }
+ /* free the freeQ and its contents*/
+ while(A_NETBUF_QUEUE_SIZE(&p_aggr->freeQ)) {
+ A_NETBUF_FREE(A_NETBUF_DEQUEUE(&p_aggr->freeQ));
+ }
+ A_FREE(p_aggr);
+ }
+ A_PRINTF("out aggr_module_destroy\n");
+}
+
+
+void
+aggr_register_rx_dispatcher(void *cntxt, void * dev, RX_CALLBACK fn)
+{
+ AGGR_INFO *p_aggr = (AGGR_INFO *)cntxt;
+
+ A_ASSERT(p_aggr && fn && dev);
+
+ p_aggr->rx_fn = fn;
+ p_aggr->dev = dev;
+}
+
+
+void
+aggr_process_bar(void *cntxt, A_UINT8 tid, A_UINT16 seq_no)
+{
+ AGGR_INFO *p_aggr = (AGGR_INFO *)cntxt;
+ RXTID_STATS *stats;
+
+ A_ASSERT(p_aggr);
+ stats = AGGR_GET_RXTID_STATS(p_aggr, tid);
+ stats->num_bar++;
+
+ aggr_deque_frms(p_aggr, tid, seq_no, ALL_SEQNO);
+}
+
+
+void
+aggr_recv_addba_req_evt(void *cntxt, A_UINT8 tid, A_UINT16 seq_no, A_UINT8 win_sz)
+{
+ AGGR_INFO *p_aggr = (AGGR_INFO *)cntxt;
+ RXTID *rxtid;
+ RXTID_STATS *stats;
+
+ A_ASSERT(p_aggr);
+ rxtid = AGGR_GET_RXTID(p_aggr, tid);
+ stats = AGGR_GET_RXTID_STATS(p_aggr, tid);
+
+ A_PRINTF("%s(): win_sz = %d aggr %d\n", _A_FUNCNAME_, win_sz, rxtid->aggr);
+ if(win_sz < AGGR_WIN_SZ_MIN || win_sz > AGGR_WIN_SZ_MAX) {
+ A_PRINTF("win_sz %d, tid %d\n", win_sz, tid);
+ }
+
+ if(rxtid->aggr) {
+ /* Just go and deliver all the frames up from this
+ * queue, as if we got DELBA and re-initialize the queue
+ */
+ aggr_delete_tid_state(p_aggr, tid);
+ }
+
+ rxtid->seq_next = seq_no;
+ /* create these queues, only upon receiving of ADDBA for a
+ * tid, reducing memory requirement
+ */
+ rxtid->hold_q = A_MALLOC(HOLD_Q_SZ(win_sz));
+ if((rxtid->hold_q == NULL)) {
+ A_PRINTF("Failed to allocate memory, tid = %d\n", tid);
+ A_ASSERT(0);
+ }
+ A_MEMZERO(rxtid->hold_q, HOLD_Q_SZ(win_sz));
+
+ /* Update rxtid for the window sz */
+ rxtid->win_sz = win_sz;
+ /* hold_q_sz inicates the depth of holding q - which is
+ * a factor of win_sz. Compute once, as it will be used often
+ */
+ rxtid->hold_q_sz = TID_WINDOW_SZ(win_sz);
+ /* There should be no frames on q - even when second ADDBA comes in.
+ * If aggr was previously ON on this tid, we would have cleaned up
+ * the q
+ */
+ if(A_NETBUF_QUEUE_SIZE(&rxtid->q) != 0) {
+ A_PRINTF("ERROR: Frames still on queue ?\n");
+ A_ASSERT(0);
+ }
+
+ rxtid->aggr = TRUE;
+}
+
+void
+aggr_recv_delba_req_evt(void *cntxt, A_UINT8 tid)
+{
+ AGGR_INFO *p_aggr = (AGGR_INFO *)cntxt;
+ RXTID *rxtid;
+
+ A_ASSERT(p_aggr);
+ A_PRINTF("%s(): tid %d\n", _A_FUNCNAME_, tid);
+
+ rxtid = AGGR_GET_RXTID(p_aggr, tid);
+
+ if(rxtid->aggr) {
+ aggr_delete_tid_state(p_aggr, tid);
+ }
+}
+
+static void
+aggr_deque_frms(AGGR_INFO *p_aggr, A_UINT8 tid, A_UINT16 seq_no, A_UINT8 order)
+{
+ RXTID *rxtid;
+ OSBUF_HOLD_Q *node;
+ A_UINT16 idx, idx_end, seq_end;
+ RXTID_STATS *stats;
+
+ A_ASSERT(p_aggr);
+ rxtid = AGGR_GET_RXTID(p_aggr, tid);
+ stats = AGGR_GET_RXTID_STATS(p_aggr, tid);
+
+ /* idx is absolute location for first frame */
+ idx = AGGR_WIN_IDX(rxtid->seq_next, rxtid->hold_q_sz);
+
+ /* idx_end is typically the last possible frame in the window,
+ * but changes to 'the' seq_no, when BAR comes. If seq_no
+ * is non-zero, we will go up to that and stop.
+ * Note: last seq no in current window will occupy the same
+ * index position as index that is just previous to start.
+ * An imp point : if win_sz is 7, for seq_no space of 4095,
+ * then, there would be holes when sequence wrap around occurs.
+ * Target should judiciously choose the win_sz, based on
+ * this condition. For 4095, (TID_WINDOW_SZ = 2 x win_sz
+ * 2, 4, 8, 16 win_sz works fine).
+ * We must deque from "idx" to "idx_end", including both.
+ */
+ seq_end = (seq_no) ? seq_no : rxtid->seq_next;
+ idx_end = AGGR_WIN_IDX(seq_end, rxtid->hold_q_sz);
+
+ /* Critical section begins */
+ A_MUTEX_LOCK(&rxtid->lock);
+ do {
+
+ node = &rxtid->hold_q[idx];
+
+ if((order == CONTIGUOUS_SEQNO) && (!node->osbuf))
+ break;
+
+ /* chain frames and deliver frames bcos:
+ * 1. either the frames are in order and window is contiguous, OR
+ * 2. we need to deque frames, irrespective of holes
+ */
+ if(node->osbuf) {
+ if(node->is_amsdu) {
+ aggr_slice_amsdu(p_aggr, rxtid, &node->osbuf);
+ } else {
+ A_NETBUF_ENQUEUE(&rxtid->q, node->osbuf);
+ }
+ node->osbuf = NULL;
+ } else {
+ stats->num_hole++;
+ }
+
+ /* window is moving */
+ rxtid->seq_next = IEEE80211_NEXT_SEQ_NO(rxtid->seq_next);
+ idx = AGGR_WIN_IDX(rxtid->seq_next, rxtid->hold_q_sz);
+ } while(idx != idx_end);
+ /* Critical section ends */
+ A_MUTEX_UNLOCK(&rxtid->lock);
+
+ stats->num_delivered += A_NETBUF_QUEUE_SIZE(&rxtid->q);
+ aggr_dispatch_frames(p_aggr, &rxtid->q);
+}
+
+static void *
+aggr_get_osbuf(AGGR_INFO *p_aggr)
+{
+ void *buf = NULL;
+
+ /* Starving for buffers? get more from OS
+ * check for low netbuffers( < 1/4 AGGR_NUM_OF_FREE_NETBUFS) :
+ * re-allocate bufs if so
+ * allocate a free buf from freeQ
+ */
+ if (A_NETBUF_QUEUE_SIZE(&p_aggr->freeQ) < (AGGR_NUM_OF_FREE_NETBUFS >> 2)) {
+ p_aggr->netbuf_allocator(&p_aggr->freeQ, AGGR_NUM_OF_FREE_NETBUFS);
+ }
+
+ if (A_NETBUF_QUEUE_SIZE(&p_aggr->freeQ)) {
+ buf = A_NETBUF_DEQUEUE(&p_aggr->freeQ);
+ }
+
+ return buf;
+}
+
+
+static void
+aggr_slice_amsdu(AGGR_INFO *p_aggr, RXTID *rxtid, void **osbuf)
+{
+ void *new_buf;
+ A_UINT16 frame_8023_len, payload_8023_len, mac_hdr_len, amsdu_len;
+ A_UINT8 *framep;
+
+ /* Frame format at this point:
+ * [DIX hdr | 802.3 | 802.3 | ... | 802.3]
+ *
+ * Strip the DIX header.
+ * Iterate through the osbuf and do:
+ * grab a free netbuf from freeQ
+ * find the start and end of a frame
+ * copy it to netbuf(Vista can do better here)
+ * convert all msdu's(802.3) frames to upper layer format - os routine
+ * -for now lets convert from 802.3 to dix
+ * enque this to dispatch q of tid
+ * repeat
+ * free the osbuf - to OS. It's been sliced.
+ */
+
+ mac_hdr_len = sizeof(ATH_MAC_HDR);
+ framep = A_NETBUF_DATA(*osbuf) + mac_hdr_len;
+ amsdu_len = A_NETBUF_LEN(*osbuf) - mac_hdr_len;
+
+ while(amsdu_len > mac_hdr_len) {
+ /* Begin of a 802.3 frame */
+ payload_8023_len = A_BE2CPU16(((ATH_MAC_HDR *)framep)->typeOrLen);
+#define MAX_MSDU_SUBFRAME_PAYLOAD_LEN 1508
+#define MIN_MSDU_SUBFRAME_PAYLOAD_LEN 46
+ if(payload_8023_len < MIN_MSDU_SUBFRAME_PAYLOAD_LEN || payload_8023_len > MAX_MSDU_SUBFRAME_PAYLOAD_LEN) {
+ A_PRINTF("802.3 AMSDU frame bound check failed. len %d\n", payload_8023_len);
+ break;
+ }
+ frame_8023_len = payload_8023_len + mac_hdr_len;
+ new_buf = aggr_get_osbuf(p_aggr);
+ if(new_buf == NULL) {
+ A_PRINTF("No buffer available \n");
+ break;
+ }
+
+ A_MEMCPY(A_NETBUF_DATA(new_buf), framep, frame_8023_len);
+ A_NETBUF_PUT(new_buf, frame_8023_len);
+ if (wmi_dot3_2_dix(new_buf) != A_OK) {
+ A_PRINTF("dot3_2_dix err..\n");
+ A_NETBUF_FREE(new_buf);
+ break;
+ }
+
+ A_NETBUF_ENQUEUE(&rxtid->q, new_buf);
+
+ /* Is this the last subframe within this aggregate ? */
+ if ((amsdu_len - frame_8023_len) == 0) {
+ break;
+ }
+
+ /* Add the length of A-MSDU subframe padding bytes -
+ * Round to nearest word.
+ */
+ frame_8023_len = ((frame_8023_len + 3) & ~3);
+
+ framep += frame_8023_len;
+ amsdu_len -= frame_8023_len;
+ }
+
+ A_NETBUF_FREE(*osbuf);
+ *osbuf = NULL;
+}
+
+void
+aggr_process_recv_frm(void *cntxt, A_UINT8 tid, A_UINT16 seq_no, A_BOOL is_amsdu, void **osbuf)
+{
+ AGGR_INFO *p_aggr = (AGGR_INFO *)cntxt;
+ RXTID *rxtid;
+ RXTID_STATS *stats;
+ A_UINT16 idx, st, cur, end;
+ A_UINT16 *log_idx;
+ OSBUF_HOLD_Q *node;
+ PACKET_LOG *log;
+
+ A_ASSERT(p_aggr);
+ A_ASSERT(tid < NUM_OF_TIDS);
+
+ rxtid = AGGR_GET_RXTID(p_aggr, tid);
+ stats = AGGR_GET_RXTID_STATS(p_aggr, tid);
+
+ stats->num_into_aggr++;
+
+ if(!rxtid->aggr) {
+ if(is_amsdu) {
+ aggr_slice_amsdu(p_aggr, rxtid, osbuf);
+ stats->num_amsdu++;
+ aggr_dispatch_frames(p_aggr, &rxtid->q);
+ }
+ return;
+ }
+
+ /* Check the incoming sequence no, if it's in the window */
+ st = rxtid->seq_next;
+ cur = seq_no;
+ end = (st + rxtid->hold_q_sz-1) & IEEE80211_MAX_SEQ_NO;
+ /* Log the pkt info for future analysis */
+ log = &p_aggr->pkt_log;
+ log_idx = &log->last_idx;
+ log->info[*log_idx].cur = cur;
+ log->info[*log_idx].st = st;
+ log->info[*log_idx].end = end;
+ *log_idx = IEEE80211_NEXT_SEQ_NO(*log_idx);
+
+ if(((st < end) && (cur < st || cur > end)) ||
+ ((st > end) && (cur > end) && (cur < st))) {
+ /* the cur frame is outside the window. Since we know
+ * our target would not do this without reason it must
+ * be assumed that the window has moved for some valid reason.
+ * Therefore, we dequeue all frames and start fresh.
+ */
+ A_UINT16 extended_end;
+
+ extended_end = (end + rxtid->hold_q_sz-1) & IEEE80211_MAX_SEQ_NO;
+
+ if(((end < extended_end) && (cur < end || cur > extended_end)) ||
+ ((end > extended_end) && (cur > extended_end) && (cur < end))) {
+ // dequeue all frames in queue and shift window to new frame
+ aggr_deque_frms(p_aggr, tid, 0, ALL_SEQNO);
+ //set window start so that new frame is last frame in window
+ if(cur >= rxtid->hold_q_sz-1) {
+ rxtid->seq_next = cur - (rxtid->hold_q_sz-1);
+ }else{
+ rxtid->seq_next = IEEE80211_MAX_SEQ_NO - (rxtid->hold_q_sz-2 - cur);
+ }
+ } else {
+ // dequeue only those frames that are outside the new shifted window
+ if(cur >= rxtid->hold_q_sz-1) {
+ st = cur - (rxtid->hold_q_sz-1);
+ }else{
+ st = IEEE80211_MAX_SEQ_NO - (rxtid->hold_q_sz-2 - cur);
+ }
+
+ aggr_deque_frms(p_aggr, tid, st, ALL_SEQNO);
+ }
+
+ stats->num_oow++;
+ }
+
+ idx = AGGR_WIN_IDX(seq_no, rxtid->hold_q_sz);
+
+ /*enque the frame, in hold_q */
+ node = &rxtid->hold_q[idx];
+
+ A_MUTEX_LOCK(&rxtid->lock);
+ if(node->osbuf) {
+ /* Is the cur frame duplicate or something beyond our
+ * window(hold_q -> which is 2x, already)?
+ * 1. Duplicate is easy - drop incoming frame.
+ * 2. Not falling in current sliding window.
+ * 2a. is the frame_seq_no preceding current tid_seq_no?
+ * -> drop the frame. perhaps sender did not get our ACK.
+ * this is taken care of above.
+ * 2b. is the frame_seq_no beyond window(st, TID_WINDOW_SZ);
+ * -> Taken care of it above, by moving window forward.
+ *
+ */
+ A_NETBUF_FREE(node->osbuf);
+ stats->num_dups++;
+ }
+
+ node->osbuf = *osbuf;
+ node->is_amsdu = is_amsdu;
+ node->seq_no = seq_no;
+ if(node->is_amsdu) {
+ stats->num_amsdu++;
+ } else {
+ stats->num_mpdu++;
+ }
+ A_MUTEX_UNLOCK(&rxtid->lock);
+
+ *osbuf = NULL;
+ aggr_deque_frms(p_aggr, tid, 0, CONTIGUOUS_SEQNO);
+
+ if(p_aggr->timerScheduled) {
+ rxtid->progress = TRUE;
+ }else{
+ for(idx=0 ; idx<rxtid->hold_q_sz ; idx++) {
+ if(rxtid->hold_q[idx].osbuf) {
+ /* there is a frame in the queue and no timer so
+ * start a timer to ensure that the frame doesn't remain
+ * stuck forever. */
+ p_aggr->timerScheduled = TRUE;
+ A_TIMEOUT_MS(&p_aggr->timer, AGGR_RX_TIMEOUT, 0);
+ rxtid->progress = FALSE;
+ rxtid->timerMon = TRUE;
+ break;
+ }
+ }
+ }
+}
+
+/*
+ * aggr_reset_state -- Called when it is deemed necessary to clear the aggregate
+ * hold Q state. Examples include when a Connect event or disconnect event is
+ * received.
+ */
+void
+aggr_reset_state(void *cntxt)
+{
+ A_UINT8 tid;
+ AGGR_INFO *p_aggr = (AGGR_INFO *)cntxt;
+
+ A_ASSERT(p_aggr);
+
+ for(tid=0 ; tid<NUM_OF_TIDS ; tid++) {
+ aggr_delete_tid_state(p_aggr, tid);
+ }
+}
+
+
+static void
+aggr_timeout(A_ATH_TIMER arg)
+{
+ A_UINT8 i,j;
+ AGGR_INFO *p_aggr = (AGGR_INFO *)arg;
+ RXTID *rxtid;
+ RXTID_STATS *stats;
+ /*
+ * If the q for which the timer was originally started has
+ * not progressed then it is necessary to dequeue all the
+ * contained frames so that they are not held forever.
+ */
+ for(i = 0; i < NUM_OF_TIDS; i++) {
+ rxtid = AGGR_GET_RXTID(p_aggr, i);
+ stats = AGGR_GET_RXTID_STATS(p_aggr, i);
+
+ if(rxtid->aggr == FALSE ||
+ rxtid->timerMon == FALSE ||
+ rxtid->progress == TRUE) {
+ continue;
+ }
+ // dequeue all frames in for this tid
+ stats->num_timeouts++;
+ A_PRINTF("TO: st %d end %d\n", rxtid->seq_next, ((rxtid->seq_next + rxtid->hold_q_sz-1) & IEEE80211_MAX_SEQ_NO));
+ aggr_deque_frms(p_aggr, i, 0, ALL_SEQNO);
+ }
+
+ p_aggr->timerScheduled = FALSE;
+ // determine whether a new timer should be started.
+ for(i = 0; i < NUM_OF_TIDS; i++) {
+ rxtid = AGGR_GET_RXTID(p_aggr, i);
+
+ if(rxtid->aggr == TRUE && rxtid->hold_q) {
+ for(j = 0 ; j < rxtid->hold_q_sz ; j++)
+ {
+ if(rxtid->hold_q[j].osbuf)
+ {
+ p_aggr->timerScheduled = TRUE;
+ rxtid->timerMon = TRUE;
+ rxtid->progress = FALSE;
+ break;
+ }
+ }
+
+ if(j >= rxtid->hold_q_sz) {
+ rxtid->timerMon = FALSE;
+ }
+ }
+ }
+
+ if(p_aggr->timerScheduled) {
+ /* Rearm the timer*/
+ A_TIMEOUT_MS(&p_aggr->timer, AGGR_RX_TIMEOUT, 0);
+ }
+
+}
+
+static void
+aggr_dispatch_frames(AGGR_INFO *p_aggr, A_NETBUF_QUEUE_T *q)
+{
+ void *osbuf;
+
+ while((osbuf = A_NETBUF_DEQUEUE(q))) {
+ p_aggr->rx_fn(p_aggr->dev, osbuf);
+ }
+}
+
+void
+aggr_dump_stats(void *cntxt, PACKET_LOG **log_buf)
+{
+ AGGR_INFO *p_aggr = (AGGR_INFO *)cntxt;
+ RXTID *rxtid;
+ RXTID_STATS *stats;
+ A_UINT8 i;
+
+ *log_buf = &p_aggr->pkt_log;
+ A_PRINTF("\n\n================================================\n");
+ A_PRINTF("tid: num_into_aggr, dups, oow, mpdu, amsdu, delivered, timeouts, holes, bar, seq_next\n");
+ for(i = 0; i < NUM_OF_TIDS; i++) {
+ stats = AGGR_GET_RXTID_STATS(p_aggr, i);
+ rxtid = AGGR_GET_RXTID(p_aggr, i);
+ A_PRINTF("%d: %d %d %d %d %d %d %d %d %d : %d\n", i, stats->num_into_aggr, stats->num_dups,
+ stats->num_oow, stats->num_mpdu,
+ stats->num_amsdu, stats->num_delivered, stats->num_timeouts,
+ stats->num_hole, stats->num_bar,
+ rxtid->seq_next);
+ }
+ A_PRINTF("================================================\n\n");
+
+}
+
+#endif /* ATH_AR6K_11N_SUPPORT */
diff --git a/drivers/staging/ath6kl/wlan/include/ieee80211.h b/drivers/staging/ath6kl/wlan/include/ieee80211.h
new file mode 100644
index 000000000000..c4fd13fe0a91
--- /dev/null
+++ b/drivers/staging/ath6kl/wlan/include/ieee80211.h
@@ -0,0 +1,401 @@
+//------------------------------------------------------------------------------
+// <copyright file="ieee80211.h" company="Atheros">
+// Copyright (c) 2004-2010 Atheros Corporation. All rights reserved.
+//
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+//
+//
+//------------------------------------------------------------------------------
+//==============================================================================
+// Author(s): ="Atheros"
+//==============================================================================
+#ifndef _NET80211_IEEE80211_H_
+#define _NET80211_IEEE80211_H_
+
+#include "athstartpack.h"
+
+/*
+ * 802.11 protocol definitions.
+ */
+#define IEEE80211_WEP_KEYLEN 5 /* 40bit */
+#define IEEE80211_WEP_IVLEN 3 /* 24bit */
+#define IEEE80211_WEP_KIDLEN 1 /* 1 octet */
+#define IEEE80211_WEP_CRCLEN 4 /* CRC-32 */
+#define IEEE80211_WEP_NKID 4 /* number of key ids */
+
+/*
+ * 802.11i defines an extended IV for use with non-WEP ciphers.
+ * When the EXTIV bit is set in the key id byte an additional
+ * 4 bytes immediately follow the IV for TKIP. For CCMP the
+ * EXTIV bit is likewise set but the 8 bytes represent the
+ * CCMP header rather than IV+extended-IV.
+ */
+#define IEEE80211_WEP_EXTIV 0x20
+#define IEEE80211_WEP_EXTIVLEN 4 /* extended IV length */
+#define IEEE80211_WEP_MICLEN 8 /* trailing MIC */
+
+#define IEEE80211_CRC_LEN 4
+
+#ifdef WAPI_ENABLE
+#define IEEE80211_WAPI_EXTIVLEN 10 /* extended IV length */
+#endif /* WAPI ENABLE */
+
+
+#define IEEE80211_ADDR_LEN 6 /* size of 802.11 address */
+/* is 802.11 address multicast/broadcast? */
+#define IEEE80211_IS_MULTICAST(_a) (*(_a) & 0x01)
+#define IEEE80211_IS_BROADCAST(_a) (*(_a) == 0xFF)
+#define WEP_HEADER (IEEE80211_WEP_IVLEN + IEEE80211_WEP_KIDLEN)
+#define WEP_TRAILER IEEE80211_WEP_CRCLEN
+#define CCMP_HEADER (IEEE80211_WEP_IVLEN + IEEE80211_WEP_KIDLEN + \
+ IEEE80211_WEP_EXTIVLEN)
+#define CCMP_TRAILER IEEE80211_WEP_MICLEN
+#define TKIP_HEADER (IEEE80211_WEP_IVLEN + IEEE80211_WEP_KIDLEN + \
+ IEEE80211_WEP_EXTIVLEN)
+#define TKIP_TRAILER IEEE80211_WEP_CRCLEN
+#define TKIP_MICLEN IEEE80211_WEP_MICLEN
+
+
+#define IEEE80211_ADDR_EQ(addr1, addr2) \
+ (A_MEMCMP(addr1, addr2, IEEE80211_ADDR_LEN) == 0)
+
+#define IEEE80211_ADDR_COPY(dst,src) A_MEMCPY(dst,src,IEEE80211_ADDR_LEN)
+
+#define IEEE80211_KEYBUF_SIZE 16
+#define IEEE80211_MICBUF_SIZE (8+8) /* space for both tx and rx */
+
+/*
+ * NB: these values are ordered carefully; there are lots of
+ * of implications in any reordering. In particular beware
+ * that 4 is not used to avoid conflicting with IEEE80211_F_PRIVACY.
+ */
+#define IEEE80211_CIPHER_WEP 0
+#define IEEE80211_CIPHER_TKIP 1
+#define IEEE80211_CIPHER_AES_OCB 2
+#define IEEE80211_CIPHER_AES_CCM 3
+#define IEEE80211_CIPHER_CKIP 5
+#define IEEE80211_CIPHER_CCKM_KRK 6
+#define IEEE80211_CIPHER_NONE 7 /* pseudo value */
+
+#define IEEE80211_CIPHER_MAX (IEEE80211_CIPHER_NONE+1)
+
+#define IEEE80211_IS_VALID_WEP_CIPHER_LEN(len) \
+ (((len) == 5) || ((len) == 13) || ((len) == 16))
+
+
+
+/*
+ * generic definitions for IEEE 802.11 frames
+ */
+PREPACK struct ieee80211_frame {
+ A_UINT8 i_fc[2];
+ A_UINT8 i_dur[2];
+ A_UINT8 i_addr1[IEEE80211_ADDR_LEN];
+ A_UINT8 i_addr2[IEEE80211_ADDR_LEN];
+ A_UINT8 i_addr3[IEEE80211_ADDR_LEN];
+ A_UINT8 i_seq[2];
+ /* possibly followed by addr4[IEEE80211_ADDR_LEN]; */
+ /* see below */
+} POSTPACK;
+
+PREPACK struct ieee80211_qosframe {
+ A_UINT8 i_fc[2];
+ A_UINT8 i_dur[2];
+ A_UINT8 i_addr1[IEEE80211_ADDR_LEN];
+ A_UINT8 i_addr2[IEEE80211_ADDR_LEN];
+ A_UINT8 i_addr3[IEEE80211_ADDR_LEN];
+ A_UINT8 i_seq[2];
+ A_UINT8 i_qos[2];
+} POSTPACK;
+
+#define IEEE80211_FC0_VERSION_MASK 0x03
+#define IEEE80211_FC0_VERSION_SHIFT 0
+#define IEEE80211_FC0_VERSION_0 0x00
+#define IEEE80211_FC0_TYPE_MASK 0x0c
+#define IEEE80211_FC0_TYPE_SHIFT 2
+#define IEEE80211_FC0_TYPE_MGT 0x00
+#define IEEE80211_FC0_TYPE_CTL 0x04
+#define IEEE80211_FC0_TYPE_DATA 0x08
+
+#define IEEE80211_FC0_SUBTYPE_MASK 0xf0
+#define IEEE80211_FC0_SUBTYPE_SHIFT 4
+/* for TYPE_MGT */
+#define IEEE80211_FC0_SUBTYPE_ASSOC_REQ 0x00
+#define IEEE80211_FC0_SUBTYPE_ASSOC_RESP 0x10
+#define IEEE80211_FC0_SUBTYPE_REASSOC_REQ 0x20
+#define IEEE80211_FC0_SUBTYPE_REASSOC_RESP 0x30
+#define IEEE80211_FC0_SUBTYPE_PROBE_REQ 0x40
+#define IEEE80211_FC0_SUBTYPE_PROBE_RESP 0x50
+#define IEEE80211_FC0_SUBTYPE_BEACON 0x80
+#define IEEE80211_FC0_SUBTYPE_ATIM 0x90
+#define IEEE80211_FC0_SUBTYPE_DISASSOC 0xa0
+#define IEEE80211_FC0_SUBTYPE_AUTH 0xb0
+#define IEEE80211_FC0_SUBTYPE_DEAUTH 0xc0
+/* for TYPE_CTL */
+#define IEEE80211_FC0_SUBTYPE_PS_POLL 0xa0
+#define IEEE80211_FC0_SUBTYPE_RTS 0xb0
+#define IEEE80211_FC0_SUBTYPE_CTS 0xc0
+#define IEEE80211_FC0_SUBTYPE_ACK 0xd0
+#define IEEE80211_FC0_SUBTYPE_CF_END 0xe0
+#define IEEE80211_FC0_SUBTYPE_CF_END_ACK 0xf0
+/* for TYPE_DATA (bit combination) */
+#define IEEE80211_FC0_SUBTYPE_DATA 0x00
+#define IEEE80211_FC0_SUBTYPE_CF_ACK 0x10
+#define IEEE80211_FC0_SUBTYPE_CF_POLL 0x20
+#define IEEE80211_FC0_SUBTYPE_CF_ACPL 0x30
+#define IEEE80211_FC0_SUBTYPE_NODATA 0x40
+#define IEEE80211_FC0_SUBTYPE_CFACK 0x50
+#define IEEE80211_FC0_SUBTYPE_CFPOLL 0x60
+#define IEEE80211_FC0_SUBTYPE_CF_ACK_CF_ACK 0x70
+#define IEEE80211_FC0_SUBTYPE_QOS 0x80
+#define IEEE80211_FC0_SUBTYPE_QOS_NULL 0xc0
+
+#define IEEE80211_FC1_DIR_MASK 0x03
+#define IEEE80211_FC1_DIR_NODS 0x00 /* STA->STA */
+#define IEEE80211_FC1_DIR_TODS 0x01 /* STA->AP */
+#define IEEE80211_FC1_DIR_FROMDS 0x02 /* AP ->STA */
+#define IEEE80211_FC1_DIR_DSTODS 0x03 /* AP ->AP */
+
+#define IEEE80211_FC1_MORE_FRAG 0x04
+#define IEEE80211_FC1_RETRY 0x08
+#define IEEE80211_FC1_PWR_MGT 0x10
+#define IEEE80211_FC1_MORE_DATA 0x20
+#define IEEE80211_FC1_WEP 0x40
+#define IEEE80211_FC1_ORDER 0x80
+
+#define IEEE80211_SEQ_FRAG_MASK 0x000f
+#define IEEE80211_SEQ_FRAG_SHIFT 0
+#define IEEE80211_SEQ_SEQ_MASK 0xfff0
+#define IEEE80211_SEQ_SEQ_SHIFT 4
+
+#define IEEE80211_NWID_LEN 32
+
+/*
+ * 802.11 rate set.
+ */
+#define IEEE80211_RATE_SIZE 8 /* 802.11 standard */
+#define IEEE80211_RATE_MAXSIZE 15 /* max rates we'll handle */
+
+#define WMM_NUM_AC 4 /* 4 AC categories */
+
+#define WMM_PARAM_ACI_M 0x60 /* Mask for ACI field */
+#define WMM_PARAM_ACI_S 5 /* Shift for ACI field */
+#define WMM_PARAM_ACM_M 0x10 /* Mask for ACM bit */
+#define WMM_PARAM_ACM_S 4 /* Shift for ACM bit */
+#define WMM_PARAM_AIFSN_M 0x0f /* Mask for aifsn field */
+#define WMM_PARAM_LOGCWMIN_M 0x0f /* Mask for CwMin field (in log) */
+#define WMM_PARAM_LOGCWMAX_M 0xf0 /* Mask for CwMax field (in log) */
+#define WMM_PARAM_LOGCWMAX_S 4 /* Shift for CwMax field */
+
+#define WMM_AC_TO_TID(_ac) ( \
+ ((_ac) == WMM_AC_VO) ? 6 : \
+ ((_ac) == WMM_AC_VI) ? 5 : \
+ ((_ac) == WMM_AC_BK) ? 1 : \
+ 0)
+
+#define TID_TO_WMM_AC(_tid) ( \
+ ((_tid) < 1) ? WMM_AC_BE : \
+ ((_tid) < 3) ? WMM_AC_BK : \
+ ((_tid) < 6) ? WMM_AC_VI : \
+ WMM_AC_VO)
+/*
+ * Management information element payloads.
+ */
+
+enum {
+ IEEE80211_ELEMID_SSID = 0,
+ IEEE80211_ELEMID_RATES = 1,
+ IEEE80211_ELEMID_FHPARMS = 2,
+ IEEE80211_ELEMID_DSPARMS = 3,
+ IEEE80211_ELEMID_CFPARMS = 4,
+ IEEE80211_ELEMID_TIM = 5,
+ IEEE80211_ELEMID_IBSSPARMS = 6,
+ IEEE80211_ELEMID_COUNTRY = 7,
+ IEEE80211_ELEMID_CHALLENGE = 16,
+ /* 17-31 reserved for challenge text extension */
+ IEEE80211_ELEMID_PWRCNSTR = 32,
+ IEEE80211_ELEMID_PWRCAP = 33,
+ IEEE80211_ELEMID_TPCREQ = 34,
+ IEEE80211_ELEMID_TPCREP = 35,
+ IEEE80211_ELEMID_SUPPCHAN = 36,
+ IEEE80211_ELEMID_CHANSWITCH = 37,
+ IEEE80211_ELEMID_MEASREQ = 38,
+ IEEE80211_ELEMID_MEASREP = 39,
+ IEEE80211_ELEMID_QUIET = 40,
+ IEEE80211_ELEMID_IBSSDFS = 41,
+ IEEE80211_ELEMID_ERP = 42,
+ IEEE80211_ELEMID_HTCAP_ANA = 45, /* Address ANA, and non-ANA story, for interop. CL#171733 */
+ IEEE80211_ELEMID_RSN = 48,
+ IEEE80211_ELEMID_XRATES = 50,
+ IEEE80211_ELEMID_HTINFO_ANA = 61,
+#ifdef WAPI_ENABLE
+ IEEE80211_ELEMID_WAPI = 68,
+#endif
+ IEEE80211_ELEMID_TPC = 150,
+ IEEE80211_ELEMID_CCKM = 156,
+ IEEE80211_ELEMID_VENDOR = 221, /* vendor private */
+};
+
+#define ATH_OUI 0x7f0300 /* Atheros OUI */
+#define ATH_OUI_TYPE 0x01
+#define ATH_OUI_SUBTYPE 0x01
+#define ATH_OUI_VERSION 0x00
+
+#define WPA_OUI 0xf25000
+#define WPA_OUI_TYPE 0x01
+#define WPA_VERSION 1 /* current supported version */
+
+#define WPA_CSE_NULL 0x00
+#define WPA_CSE_WEP40 0x01
+#define WPA_CSE_TKIP 0x02
+#define WPA_CSE_CCMP 0x04
+#define WPA_CSE_WEP104 0x05
+
+#define WPA_ASE_NONE 0x00
+#define WPA_ASE_8021X_UNSPEC 0x01
+#define WPA_ASE_8021X_PSK 0x02
+
+#define RSN_OUI 0xac0f00
+#define RSN_VERSION 1 /* current supported version */
+
+#define RSN_CSE_NULL 0x00
+#define RSN_CSE_WEP40 0x01
+#define RSN_CSE_TKIP 0x02
+#define RSN_CSE_WRAP 0x03
+#define RSN_CSE_CCMP 0x04
+#define RSN_CSE_WEP104 0x05
+
+#define RSN_ASE_NONE 0x00
+#define RSN_ASE_8021X_UNSPEC 0x01
+#define RSN_ASE_8021X_PSK 0x02
+
+#define RSN_CAP_PREAUTH 0x01
+
+#define WMM_OUI 0xf25000
+#define WMM_OUI_TYPE 0x02
+#define WMM_INFO_OUI_SUBTYPE 0x00
+#define WMM_PARAM_OUI_SUBTYPE 0x01
+#define WMM_VERSION 1
+
+/* WMM stream classes */
+#define WMM_NUM_AC 4
+#define WMM_AC_BE 0 /* best effort */
+#define WMM_AC_BK 1 /* background */
+#define WMM_AC_VI 2 /* video */
+#define WMM_AC_VO 3 /* voice */
+
+/* TSPEC related */
+#define ACTION_CATEGORY_CODE_TSPEC 17
+#define ACTION_CODE_TSPEC_ADDTS 0
+#define ACTION_CODE_TSPEC_ADDTS_RESP 1
+#define ACTION_CODE_TSPEC_DELTS 2
+
+typedef enum {
+ TSPEC_STATUS_CODE_ADMISSION_ACCEPTED = 0,
+ TSPEC_STATUS_CODE_ADDTS_INVALID_PARAMS = 0x1,
+ TSPEC_STATUS_CODE_ADDTS_REQUEST_REFUSED = 0x3,
+ TSPEC_STATUS_CODE_UNSPECIFIED_QOS_RELATED_FAILURE = 0xC8,
+ TSPEC_STATUS_CODE_REQUESTED_REFUSED_POLICY_CONFIGURATION = 0xC9,
+ TSPEC_STATUS_CODE_INSUFFCIENT_BANDWIDTH = 0xCA,
+ TSPEC_STATUS_CODE_INVALID_PARAMS = 0xCB,
+ TSPEC_STATUS_CODE_DELTS_SENT = 0x30,
+ TSPEC_STATUS_CODE_DELTS_RECV = 0x31,
+} TSPEC_STATUS_CODE;
+
+#define TSPEC_TSID_MASK 0xF
+#define TSPEC_TSID_S 1
+
+/*
+ * WMM/802.11e Tspec Element
+ */
+typedef PREPACK struct wmm_tspec_ie_t {
+ A_UINT8 elementId;
+ A_UINT8 len;
+ A_UINT8 oui[3];
+ A_UINT8 ouiType;
+ A_UINT8 ouiSubType;
+ A_UINT8 version;
+ A_UINT16 tsInfo_info;
+ A_UINT8 tsInfo_reserved;
+ A_UINT16 nominalMSDU;
+ A_UINT16 maxMSDU;
+ A_UINT32 minServiceInt;
+ A_UINT32 maxServiceInt;
+ A_UINT32 inactivityInt;
+ A_UINT32 suspensionInt;
+ A_UINT32 serviceStartTime;
+ A_UINT32 minDataRate;
+ A_UINT32 meanDataRate;
+ A_UINT32 peakDataRate;
+ A_UINT32 maxBurstSize;
+ A_UINT32 delayBound;
+ A_UINT32 minPhyRate;
+ A_UINT16 sba;
+ A_UINT16 mediumTime;
+} POSTPACK WMM_TSPEC_IE;
+
+
+/*
+ * BEACON management packets
+ *
+ * octet timestamp[8]
+ * octet beacon interval[2]
+ * octet capability information[2]
+ * information element
+ * octet elemid
+ * octet length
+ * octet information[length]
+ */
+
+#define IEEE80211_BEACON_INTERVAL(beacon) \
+ ((beacon)[8] | ((beacon)[9] << 8))
+#define IEEE80211_BEACON_CAPABILITY(beacon) \
+ ((beacon)[10] | ((beacon)[11] << 8))
+
+#define IEEE80211_CAPINFO_ESS 0x0001
+#define IEEE80211_CAPINFO_IBSS 0x0002
+#define IEEE80211_CAPINFO_CF_POLLABLE 0x0004
+#define IEEE80211_CAPINFO_CF_POLLREQ 0x0008
+#define IEEE80211_CAPINFO_PRIVACY 0x0010
+#define IEEE80211_CAPINFO_SHORT_PREAMBLE 0x0020
+#define IEEE80211_CAPINFO_PBCC 0x0040
+#define IEEE80211_CAPINFO_CHNL_AGILITY 0x0080
+/* bits 8-9 are reserved */
+#define IEEE80211_CAPINFO_SHORT_SLOTTIME 0x0400
+#define IEEE80211_CAPINFO_APSD 0x0800
+/* bit 12 is reserved */
+#define IEEE80211_CAPINFO_DSSSOFDM 0x2000
+/* bits 14-15 are reserved */
+
+/*
+ * Authentication Modes
+ */
+
+enum ieee80211_authmode {
+ IEEE80211_AUTH_NONE = 0,
+ IEEE80211_AUTH_OPEN = 1,
+ IEEE80211_AUTH_SHARED = 2,
+ IEEE80211_AUTH_8021X = 3,
+ IEEE80211_AUTH_AUTO = 4, /* auto-select/accept */
+ /* NB: these are used only for ioctls */
+ IEEE80211_AUTH_WPA = 5, /* WPA/RSN w/ 802.1x */
+ IEEE80211_AUTH_WPA_PSK = 6, /* WPA/RSN w/ PSK */
+ IEEE80211_AUTH_WPA_CCKM = 7, /* WPA/RSN IE w/ CCKM */
+};
+
+#define IEEE80211_PS_MAX_QUEUE 50 /*Maximum no of buffers that can be queues for PS*/
+
+#include "athendpack.h"
+
+#endif /* _NET80211_IEEE80211_H_ */
diff --git a/drivers/staging/ath6kl/wlan/include/ieee80211_node.h b/drivers/staging/ath6kl/wlan/include/ieee80211_node.h
new file mode 100644
index 000000000000..683deec87b2d
--- /dev/null
+++ b/drivers/staging/ath6kl/wlan/include/ieee80211_node.h
@@ -0,0 +1,93 @@
+//------------------------------------------------------------------------------
+// <copyright file="ieee80211_node.h" company="Atheros">
+// Copyright (c) 2004-2010 Atheros Corporation. All rights reserved.
+//
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+//
+//
+//------------------------------------------------------------------------------
+//==============================================================================
+// Author(s): ="Atheros"
+//==============================================================================
+#ifndef _IEEE80211_NODE_H_
+#define _IEEE80211_NODE_H_
+
+/*
+ * Node locking definitions.
+ */
+#define IEEE80211_NODE_LOCK_INIT(_nt) A_MUTEX_INIT(&(_nt)->nt_nodelock)
+#define IEEE80211_NODE_LOCK_DESTROY(_nt) if (A_IS_MUTEX_VALID(&(_nt)->nt_nodelock)) { \
+ A_MUTEX_DELETE(&(_nt)->nt_nodelock); }
+
+#define IEEE80211_NODE_LOCK(_nt) A_MUTEX_LOCK(&(_nt)->nt_nodelock)
+#define IEEE80211_NODE_UNLOCK(_nt) A_MUTEX_UNLOCK(&(_nt)->nt_nodelock)
+#define IEEE80211_NODE_LOCK_BH(_nt) A_MUTEX_LOCK(&(_nt)->nt_nodelock)
+#define IEEE80211_NODE_UNLOCK_BH(_nt) A_MUTEX_UNLOCK(&(_nt)->nt_nodelock)
+#define IEEE80211_NODE_LOCK_ASSERT(_nt)
+
+/*
+ * Node reference counting definitions.
+ *
+ * ieee80211_node_initref initialize the reference count to 1
+ * ieee80211_node_incref add a reference
+ * ieee80211_node_decref remove a reference
+ * ieee80211_node_dectestref remove a reference and return 1 if this
+ * is the last reference, otherwise 0
+ * ieee80211_node_refcnt reference count for printing (only)
+ */
+#define ieee80211_node_initref(_ni) ((_ni)->ni_refcnt = 1)
+#define ieee80211_node_incref(_ni) ((_ni)->ni_refcnt++)
+#define ieee80211_node_decref(_ni) ((_ni)->ni_refcnt--)
+#define ieee80211_node_dectestref(_ni) (((_ni)->ni_refcnt--) == 1)
+#define ieee80211_node_refcnt(_ni) ((_ni)->ni_refcnt)
+
+#define IEEE80211_NODE_HASHSIZE 32
+/* simple hash is enough for variation of macaddr */
+#define IEEE80211_NODE_HASH(addr) \
+ (((const A_UINT8 *)(addr))[IEEE80211_ADDR_LEN - 1] % \
+ IEEE80211_NODE_HASHSIZE)
+
+/*
+ * Table of ieee80211_node instances. Each ieee80211com
+ * has at least one for holding the scan candidates.
+ * When operating as an access point or in ibss mode there
+ * is a second table for associated stations or neighbors.
+ */
+struct ieee80211_node_table {
+ void *nt_wmip; /* back reference */
+ A_MUTEX_T nt_nodelock; /* on node table */
+ struct bss *nt_node_first; /* information of all nodes */
+ struct bss *nt_node_last; /* information of all nodes */
+ struct bss *nt_hash[IEEE80211_NODE_HASHSIZE];
+ const char *nt_name; /* for debugging */
+ A_UINT32 nt_scangen; /* gen# for timeout scan */
+#ifdef THREAD_X
+ A_TIMER nt_inact_timer;
+ A_UINT8 isTimerArmed; /* is the node timer armed */
+#endif
+ A_UINT32 nt_nodeAge; /* node aging time */
+#ifdef OS_ROAM_MANAGEMENT
+ A_UINT32 nt_si_gen; /* gen# for scan indication*/
+#endif
+};
+
+#ifdef THREAD_X
+#define WLAN_NODE_INACT_TIMEOUT_MSEC 20000
+#else
+#define WLAN_NODE_INACT_TIMEOUT_MSEC 120000
+#endif
+
+#define WLAN_NODE_INACT_CNT 4
+
+#endif /* _IEEE80211_NODE_H_ */
diff --git a/drivers/staging/ath6kl/wlan/src/wlan_node.c b/drivers/staging/ath6kl/wlan/src/wlan_node.c
new file mode 100644
index 000000000000..6ec4e48eb2fd
--- /dev/null
+++ b/drivers/staging/ath6kl/wlan/src/wlan_node.c
@@ -0,0 +1,636 @@
+//------------------------------------------------------------------------------
+// <copyright file="wlan_node.c" company="Atheros">
+// Copyright (c) 2004-2010 Atheros Corporation. All rights reserved.
+//
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+//
+//
+//------------------------------------------------------------------------------
+//==============================================================================
+// IEEE 802.11 node handling support.
+//
+// Author(s): ="Atheros"
+//==============================================================================
+#include <a_config.h>
+#include <athdefs.h>
+#include <a_types.h>
+#include <a_osapi.h>
+#define ATH_MODULE_NAME wlan
+#include <a_debug.h>
+#include "htc.h"
+#include "htc_api.h"
+#include <wmi.h>
+#include <ieee80211.h>
+#include <wlan_api.h>
+#include <wmi_api.h>
+#include <ieee80211_node.h>
+
+#define ATH_DEBUG_WLAN ATH_DEBUG_MAKE_MODULE_MASK(0)
+
+#ifdef ATH_DEBUG_MODULE
+
+static ATH_DEBUG_MASK_DESCRIPTION wlan_debug_desc[] = {
+ { ATH_DEBUG_WLAN , "General WLAN Node Tracing"},
+};
+
+ATH_DEBUG_INSTANTIATE_MODULE_VAR(wlan,
+ "wlan",
+ "WLAN Node Management",
+ ATH_DEBUG_MASK_DEFAULTS,
+ ATH_DEBUG_DESCRIPTION_COUNT(wlan_debug_desc),
+ wlan_debug_desc);
+
+#endif
+
+#ifdef THREAD_X
+static void wlan_node_timeout(A_ATH_TIMER arg);
+#endif
+
+static bss_t * _ieee80211_find_node (struct ieee80211_node_table *nt,
+ const A_UINT8 *macaddr);
+
+bss_t *
+wlan_node_alloc(struct ieee80211_node_table *nt, int wh_size)
+{
+ bss_t *ni;
+
+ ni = A_MALLOC_NOWAIT(sizeof(bss_t));
+
+ if (ni != NULL) {
+ if (wh_size)
+ {
+ ni->ni_buf = A_MALLOC_NOWAIT(wh_size);
+ if (ni->ni_buf == NULL) {
+ A_FREE(ni);
+ ni = NULL;
+ return ni;
+ }
+ }
+ } else {
+ return ni;
+ }
+
+ /* Make sure our lists are clean */
+ ni->ni_list_next = NULL;
+ ni->ni_list_prev = NULL;
+ ni->ni_hash_next = NULL;
+ ni->ni_hash_prev = NULL;
+
+ //
+ // ni_scangen never initialized before and during suspend/resume of winmobile,
+ // that some junk has been stored in this, due to this scan list didn't properly updated
+ //
+ ni->ni_scangen = 0;
+
+#ifdef OS_ROAM_MANAGEMENT
+ ni->ni_si_gen = 0;
+#endif
+
+ return ni;
+}
+
+void
+wlan_node_free(bss_t *ni)
+{
+ if (ni->ni_buf != NULL) {
+ A_FREE(ni->ni_buf);
+ }
+ A_FREE(ni);
+}
+
+void
+wlan_setup_node(struct ieee80211_node_table *nt, bss_t *ni,
+ const A_UINT8 *macaddr)
+{
+ int hash;
+ A_UINT32 timeoutValue = 0;
+
+ A_MEMCPY(ni->ni_macaddr, macaddr, IEEE80211_ADDR_LEN);
+ hash = IEEE80211_NODE_HASH (macaddr);
+ ieee80211_node_initref (ni); /* mark referenced */
+
+ timeoutValue = nt->nt_nodeAge;
+
+ ni->ni_tstamp = A_GET_MS (timeoutValue);
+ ni->ni_actcnt = WLAN_NODE_INACT_CNT;
+
+ IEEE80211_NODE_LOCK_BH(nt);
+
+ /* Insert at the end of the node list */
+ ni->ni_list_next = NULL;
+ ni->ni_list_prev = nt->nt_node_last;
+ if(nt->nt_node_last != NULL)
+ {
+ nt->nt_node_last->ni_list_next = ni;
+ }
+ nt->nt_node_last = ni;
+ if(nt->nt_node_first == NULL)
+ {
+ nt->nt_node_first = ni;
+ }
+
+ /* Insert into the hash list i.e. the bucket */
+ if((ni->ni_hash_next = nt->nt_hash[hash]) != NULL)
+ {
+ nt->nt_hash[hash]->ni_hash_prev = ni;
+ }
+ ni->ni_hash_prev = NULL;
+ nt->nt_hash[hash] = ni;
+
+#ifdef THREAD_X
+ if (!nt->isTimerArmed) {
+ A_TIMEOUT_MS(&nt->nt_inact_timer, timeoutValue, 0);
+ nt->isTimerArmed = TRUE;
+ }
+#endif
+
+ IEEE80211_NODE_UNLOCK_BH(nt);
+}
+
+static bss_t *
+_ieee80211_find_node(struct ieee80211_node_table *nt,
+ const A_UINT8 *macaddr)
+{
+ bss_t *ni;
+ int hash;
+
+ IEEE80211_NODE_LOCK_ASSERT(nt);
+
+ hash = IEEE80211_NODE_HASH(macaddr);
+ for(ni = nt->nt_hash[hash]; ni; ni = ni->ni_hash_next) {
+ if (IEEE80211_ADDR_EQ(ni->ni_macaddr, macaddr)) {
+ ieee80211_node_incref(ni); /* mark referenced */
+ return ni;
+ }
+ }
+ return NULL;
+}
+
+bss_t *
+wlan_find_node(struct ieee80211_node_table *nt, const A_UINT8 *macaddr)
+{
+ bss_t *ni;
+
+ IEEE80211_NODE_LOCK(nt);
+ ni = _ieee80211_find_node(nt, macaddr);
+ IEEE80211_NODE_UNLOCK(nt);
+ return ni;
+}
+
+/*
+ * Reclaim a node. If this is the last reference count then
+ * do the normal free work. Otherwise remove it from the node
+ * table and mark it gone by clearing the back-reference.
+ */
+void
+wlan_node_reclaim(struct ieee80211_node_table *nt, bss_t *ni)
+{
+ IEEE80211_NODE_LOCK(nt);
+
+ if(ni->ni_list_prev == NULL)
+ {
+ /* First in list so fix the list head */
+ nt->nt_node_first = ni->ni_list_next;
+ }
+ else
+ {
+ ni->ni_list_prev->ni_list_next = ni->ni_list_next;
+ }
+
+ if(ni->ni_list_next == NULL)
+ {
+ /* Last in list so fix list tail */
+ nt->nt_node_last = ni->ni_list_prev;
+ }
+ else
+ {
+ ni->ni_list_next->ni_list_prev = ni->ni_list_prev;
+ }
+
+ if(ni->ni_hash_prev == NULL)
+ {
+ /* First in list so fix the list head */
+ int hash;
+ hash = IEEE80211_NODE_HASH(ni->ni_macaddr);
+ nt->nt_hash[hash] = ni->ni_hash_next;
+ }
+ else
+ {
+ ni->ni_hash_prev->ni_hash_next = ni->ni_hash_next;
+ }
+
+ if(ni->ni_hash_next != NULL)
+ {
+ ni->ni_hash_next->ni_hash_prev = ni->ni_hash_prev;
+ }
+ wlan_node_free(ni);
+
+ IEEE80211_NODE_UNLOCK(nt);
+}
+
+static void
+wlan_node_dec_free(bss_t *ni)
+{
+ if (ieee80211_node_dectestref(ni)) {
+ wlan_node_free(ni);
+ }
+}
+
+void
+wlan_free_allnodes(struct ieee80211_node_table *nt)
+{
+ bss_t *ni;
+
+ while ((ni = nt->nt_node_first) != NULL) {
+ wlan_node_reclaim(nt, ni);
+ }
+}
+
+void
+wlan_iterate_nodes(struct ieee80211_node_table *nt, wlan_node_iter_func *f,
+ void *arg)
+{
+ bss_t *ni;
+ A_UINT32 gen;
+
+ gen = ++nt->nt_scangen;
+
+ IEEE80211_NODE_LOCK(nt);
+ for (ni = nt->nt_node_first; ni; ni = ni->ni_list_next) {
+ if (ni->ni_scangen != gen) {
+ ni->ni_scangen = gen;
+ (void) ieee80211_node_incref(ni);
+ (*f)(arg, ni);
+ wlan_node_dec_free(ni);
+ }
+ }
+ IEEE80211_NODE_UNLOCK(nt);
+}
+
+/*
+ * Node table support.
+ */
+void
+wlan_node_table_init(void *wmip, struct ieee80211_node_table *nt)
+{
+ int i;
+
+ AR_DEBUG_PRINTF(ATH_DEBUG_WLAN, ("node table = 0x%lx\n", (unsigned long)nt));
+ IEEE80211_NODE_LOCK_INIT(nt);
+
+ A_REGISTER_MODULE_DEBUG_INFO(wlan);
+
+ nt->nt_node_first = nt->nt_node_last = NULL;
+ for(i = 0; i < IEEE80211_NODE_HASHSIZE; i++)
+ {
+ nt->nt_hash[i] = NULL;
+ }
+
+#ifdef THREAD_X
+ A_INIT_TIMER(&nt->nt_inact_timer, wlan_node_timeout, nt);
+ nt->isTimerArmed = FALSE;
+#endif
+ nt->nt_wmip = wmip;
+ nt->nt_nodeAge = WLAN_NODE_INACT_TIMEOUT_MSEC;
+
+ //
+ // nt_scangen never initialized before and during suspend/resume of winmobile,
+ // that some junk has been stored in this, due to this scan list didn't properly updated
+ //
+ nt->nt_scangen = 0;
+
+#ifdef OS_ROAM_MANAGEMENT
+ nt->nt_si_gen = 0;
+#endif
+}
+
+void
+wlan_set_nodeage(struct ieee80211_node_table *nt, A_UINT32 nodeAge)
+{
+ nt->nt_nodeAge = nodeAge;
+ return;
+}
+void
+wlan_refresh_inactive_nodes (struct ieee80211_node_table *nt)
+{
+#ifdef THREAD_X
+ bss_t *bss, *nextBss;
+ A_UINT8 myBssid[IEEE80211_ADDR_LEN], reArmTimer = FALSE;
+
+ wmi_get_current_bssid(nt->nt_wmip, myBssid);
+
+ bss = nt->nt_node_first;
+ while (bss != NULL)
+ {
+ nextBss = bss->ni_list_next;
+ if (A_MEMCMP(myBssid, bss->ni_macaddr, sizeof(myBssid)) != 0)
+ {
+ /*
+ * free up all but the current bss - if set
+ */
+ wlan_node_reclaim(nt, bss);
+
+ }
+ bss = nextBss;
+ }
+#else
+ bss_t *bss, *nextBss;
+ A_UINT8 myBssid[IEEE80211_ADDR_LEN];
+ A_UINT32 timeoutValue = 0;
+ A_UINT32 now = A_GET_MS(0);
+ timeoutValue = nt->nt_nodeAge;
+
+ wmi_get_current_bssid(nt->nt_wmip, myBssid);
+
+ bss = nt->nt_node_first;
+ while (bss != NULL)
+ {
+ nextBss = bss->ni_list_next;
+ if (A_MEMCMP(myBssid, bss->ni_macaddr, sizeof(myBssid)) != 0)
+ {
+
+ if (bss->ni_tstamp <= now || --bss->ni_actcnt == 0)
+ {
+ /*
+ * free up all but the current bss - if set
+ */
+ wlan_node_reclaim(nt, bss);
+ }
+ }
+ bss = nextBss;
+ }
+#endif
+}
+
+#ifdef THREAD_X
+static void
+wlan_node_timeout (A_ATH_TIMER arg)
+{
+ struct ieee80211_node_table *nt = (struct ieee80211_node_table *)arg;
+ bss_t *bss, *nextBss;
+ A_UINT8 myBssid[IEEE80211_ADDR_LEN], reArmTimer = FALSE;
+ A_UINT32 timeoutValue = 0;
+
+ timeoutValue = nt->nt_nodeAge;
+
+ wmi_get_current_bssid(nt->nt_wmip, myBssid);
+
+ bss = nt->nt_node_first;
+ while (bss != NULL)
+ {
+ nextBss = bss->ni_list_next;
+ if (A_MEMCMP(myBssid, bss->ni_macaddr, sizeof(myBssid)) != 0)
+ {
+
+ if (bss->ni_tstamp <= A_GET_MS(0))
+ {
+ /*
+ * free up all but the current bss - if set
+ */
+ wlan_node_reclaim(nt, bss);
+ }
+ else
+ {
+ /*
+ * Re-arm timer, only when we have a bss other than
+ * current bss AND it is not aged-out.
+ */
+ reArmTimer = TRUE;
+ }
+ }
+ bss = nextBss;
+ }
+
+ if (reArmTimer)
+ A_TIMEOUT_MS (&nt->nt_inact_timer, timeoutValue, 0);
+
+ nt->isTimerArmed = reArmTimer;
+}
+#endif
+
+void
+wlan_node_table_cleanup(struct ieee80211_node_table *nt)
+{
+#ifdef THREAD_X
+ A_UNTIMEOUT(&nt->nt_inact_timer);
+ A_DELETE_TIMER(&nt->nt_inact_timer);
+#endif
+ wlan_free_allnodes(nt);
+ IEEE80211_NODE_LOCK_DESTROY(nt);
+}
+
+bss_t *
+wlan_find_Ssidnode (struct ieee80211_node_table *nt, A_UCHAR *pSsid,
+ A_UINT32 ssidLength, A_BOOL bIsWPA2, A_BOOL bMatchSSID)
+{
+ bss_t *ni = NULL;
+ A_UCHAR *pIESsid = NULL;
+
+ IEEE80211_NODE_LOCK (nt);
+
+ for (ni = nt->nt_node_first; ni; ni = ni->ni_list_next) {
+ pIESsid = ni->ni_cie.ie_ssid;
+ if (pIESsid[1] <= 32) {
+
+ // Step 1 : Check SSID
+ if (0x00 == memcmp (pSsid, &pIESsid[2], ssidLength)) {
+
+ //
+ // Step 2.1 : Check MatchSSID is TRUE, if so, return Matched SSID
+ // Profile, otherwise check whether WPA2 or WPA
+ //
+ if (TRUE == bMatchSSID) {
+ ieee80211_node_incref (ni); /* mark referenced */
+ IEEE80211_NODE_UNLOCK (nt);
+ return ni;
+ }
+
+ // Step 2 : if SSID matches, check WPA or WPA2
+ if (TRUE == bIsWPA2 && NULL != ni->ni_cie.ie_rsn) {
+ ieee80211_node_incref (ni); /* mark referenced */
+ IEEE80211_NODE_UNLOCK (nt);
+ return ni;
+ }
+ if (FALSE == bIsWPA2 && NULL != ni->ni_cie.ie_wpa) {
+ ieee80211_node_incref(ni); /* mark referenced */
+ IEEE80211_NODE_UNLOCK (nt);
+ return ni;
+ }
+ }
+ }
+ }
+
+ IEEE80211_NODE_UNLOCK (nt);
+
+ return NULL;
+}
+
+void
+wlan_node_return (struct ieee80211_node_table *nt, bss_t *ni)
+{
+ IEEE80211_NODE_LOCK (nt);
+ wlan_node_dec_free (ni);
+ IEEE80211_NODE_UNLOCK (nt);
+}
+
+void
+wlan_node_remove_core (struct ieee80211_node_table *nt, bss_t *ni)
+{
+ if(ni->ni_list_prev == NULL)
+ {
+ /* First in list so fix the list head */
+ nt->nt_node_first = ni->ni_list_next;
+ }
+ else
+ {
+ ni->ni_list_prev->ni_list_next = ni->ni_list_next;
+ }
+
+ if(ni->ni_list_next == NULL)
+ {
+ /* Last in list so fix list tail */
+ nt->nt_node_last = ni->ni_list_prev;
+ }
+ else
+ {
+ ni->ni_list_next->ni_list_prev = ni->ni_list_prev;
+ }
+
+ if(ni->ni_hash_prev == NULL)
+ {
+ /* First in list so fix the list head */
+ int hash;
+ hash = IEEE80211_NODE_HASH(ni->ni_macaddr);
+ nt->nt_hash[hash] = ni->ni_hash_next;
+ }
+ else
+ {
+ ni->ni_hash_prev->ni_hash_next = ni->ni_hash_next;
+ }
+
+ if(ni->ni_hash_next != NULL)
+ {
+ ni->ni_hash_next->ni_hash_prev = ni->ni_hash_prev;
+ }
+}
+
+bss_t *
+wlan_node_remove(struct ieee80211_node_table *nt, A_UINT8 *bssid)
+{
+ bss_t *bss, *nextBss;
+
+ IEEE80211_NODE_LOCK(nt);
+
+ bss = nt->nt_node_first;
+
+ while (bss != NULL)
+ {
+ nextBss = bss->ni_list_next;
+
+ if (A_MEMCMP(bssid, bss->ni_macaddr, 6) == 0)
+ {
+ wlan_node_remove_core (nt, bss);
+ IEEE80211_NODE_UNLOCK(nt);
+ return bss;
+ }
+
+ bss = nextBss;
+ }
+
+ IEEE80211_NODE_UNLOCK(nt);
+ return NULL;
+}
+
+bss_t *
+wlan_find_matching_Ssidnode (struct ieee80211_node_table *nt, A_UCHAR *pSsid,
+ A_UINT32 ssidLength, A_UINT32 dot11AuthMode, A_UINT32 authMode,
+ A_UINT32 pairwiseCryptoType, A_UINT32 grpwiseCryptoTyp)
+{
+ bss_t *ni = NULL;
+ bss_t *best_ni = NULL;
+ A_UCHAR *pIESsid = NULL;
+
+ IEEE80211_NODE_LOCK (nt);
+
+ for (ni = nt->nt_node_first; ni; ni = ni->ni_list_next) {
+ pIESsid = ni->ni_cie.ie_ssid;
+ if (pIESsid[1] <= 32) {
+
+ // Step 1 : Check SSID
+ if (0x00 == memcmp (pSsid, &pIESsid[2], ssidLength)) {
+
+ if (ni->ni_cie.ie_capInfo & 0x10)
+ {
+
+ if ((NULL != ni->ni_cie.ie_rsn) && (WPA2_PSK_AUTH == authMode))
+ {
+ /* WPA2 */
+ if (NULL == best_ni)
+ {
+ best_ni = ni;
+ }
+ else if (ni->ni_rssi > best_ni->ni_rssi)
+ {
+ best_ni = ni;
+ }
+ }
+ else if ((NULL != ni->ni_cie.ie_wpa) && (WPA_PSK_AUTH == authMode))
+ {
+ /* WPA */
+ if (NULL == best_ni)
+ {
+ best_ni = ni;
+ }
+ else if (ni->ni_rssi > best_ni->ni_rssi)
+ {
+ best_ni = ni;
+ }
+ }
+ else if (WEP_CRYPT == pairwiseCryptoType)
+ {
+ /* WEP */
+ if (NULL == best_ni)
+ {
+ best_ni = ni;
+ }
+ else if (ni->ni_rssi > best_ni->ni_rssi)
+ {
+ best_ni = ni;
+ }
+ }
+ }
+ else
+ {
+ /* open AP */
+ if ((OPEN_AUTH == authMode) && (NONE_CRYPT == pairwiseCryptoType))
+ {
+ if (NULL == best_ni)
+ {
+ best_ni = ni;
+ }
+ else if (ni->ni_rssi > best_ni->ni_rssi)
+ {
+ best_ni = ni;
+ }
+ }
+ }
+ }
+ }
+ }
+
+ IEEE80211_NODE_UNLOCK (nt);
+
+ return best_ni;
+}
+
diff --git a/drivers/staging/ath6kl/wlan/src/wlan_recv_beacon.c b/drivers/staging/ath6kl/wlan/src/wlan_recv_beacon.c
new file mode 100644
index 000000000000..f4926f215bbd
--- /dev/null
+++ b/drivers/staging/ath6kl/wlan/src/wlan_recv_beacon.c
@@ -0,0 +1,200 @@
+//------------------------------------------------------------------------------
+// <copyright file="wlan_recv_beacon.c" company="Atheros">
+// Copyright (c) 2004-2010 Atheros Corporation. All rights reserved.
+//
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+//
+//
+//------------------------------------------------------------------------------
+//==============================================================================
+// IEEE 802.11 input handling.
+//
+// Author(s): ="Atheros"
+//==============================================================================
+
+#include "a_config.h"
+#include "athdefs.h"
+#include "a_types.h"
+#include "a_osapi.h"
+#include <wmi.h>
+#include <ieee80211.h>
+#include <wlan_api.h>
+
+#define IEEE80211_VERIFY_LENGTH(_len, _minlen) do { \
+ if ((_len) < (_minlen)) { \
+ return A_EINVAL; \
+ } \
+} while (0)
+
+#define IEEE80211_VERIFY_ELEMENT(__elem, __maxlen) do { \
+ if ((__elem) == NULL) { \
+ return A_EINVAL; \
+ } \
+ if ((__elem)[1] > (__maxlen)) { \
+ return A_EINVAL; \
+ } \
+} while (0)
+
+
+/* unaligned little endian access */
+#define LE_READ_2(p) \
+ ((A_UINT16) \
+ ((((A_UINT8 *)(p))[0] ) | (((A_UINT8 *)(p))[1] << 8)))
+
+#define LE_READ_4(p) \
+ ((A_UINT32) \
+ ((((A_UINT8 *)(p))[0] ) | (((A_UINT8 *)(p))[1] << 8) | \
+ (((A_UINT8 *)(p))[2] << 16) | (((A_UINT8 *)(p))[3] << 24)))
+
+
+static int __inline
+iswpaoui(const A_UINT8 *frm)
+{
+ return frm[1] > 3 && LE_READ_4(frm+2) == ((WPA_OUI_TYPE<<24)|WPA_OUI);
+}
+
+static int __inline
+iswmmoui(const A_UINT8 *frm)
+{
+ return frm[1] > 3 && LE_READ_4(frm+2) == ((WMM_OUI_TYPE<<24)|WMM_OUI);
+}
+
+/* unused functions for now */
+#if 0
+static int __inline
+iswmmparam(const A_UINT8 *frm)
+{
+ return frm[1] > 5 && frm[6] == WMM_PARAM_OUI_SUBTYPE;
+}
+
+static int __inline
+iswmminfo(const A_UINT8 *frm)
+{
+ return frm[1] > 5 && frm[6] == WMM_INFO_OUI_SUBTYPE;
+}
+#endif
+
+static int __inline
+isatherosoui(const A_UINT8 *frm)
+{
+ return frm[1] > 3 && LE_READ_4(frm+2) == ((ATH_OUI_TYPE<<24)|ATH_OUI);
+}
+
+static int __inline
+iswscoui(const A_UINT8 *frm)
+{
+ return frm[1] > 3 && LE_READ_4(frm+2) == ((0x04<<24)|WPA_OUI);
+}
+
+A_STATUS
+wlan_parse_beacon(A_UINT8 *buf, int framelen, struct ieee80211_common_ie *cie)
+{
+ A_UINT8 *frm, *efrm;
+ A_UINT8 elemid_ssid = FALSE;
+
+ frm = buf;
+ efrm = (A_UINT8 *) (frm + framelen);
+
+ /*
+ * beacon/probe response frame format
+ * [8] time stamp
+ * [2] beacon interval
+ * [2] capability information
+ * [tlv] ssid
+ * [tlv] supported rates
+ * [tlv] country information
+ * [tlv] parameter set (FH/DS)
+ * [tlv] erp information
+ * [tlv] extended supported rates
+ * [tlv] WMM
+ * [tlv] WPA or RSN
+ * [tlv] Atheros Advanced Capabilities
+ */
+ IEEE80211_VERIFY_LENGTH(efrm - frm, 12);
+ A_MEMZERO(cie, sizeof(*cie));
+
+ cie->ie_tstamp = frm; frm += 8;
+ cie->ie_beaconInt = A_LE2CPU16(*(A_UINT16 *)frm); frm += 2;
+ cie->ie_capInfo = A_LE2CPU16(*(A_UINT16 *)frm); frm += 2;
+ cie->ie_chan = 0;
+
+ while (frm < efrm) {
+ switch (*frm) {
+ case IEEE80211_ELEMID_SSID:
+ if (!elemid_ssid) {
+ cie->ie_ssid = frm;
+ elemid_ssid = TRUE;
+ }
+ break;
+ case IEEE80211_ELEMID_RATES:
+ cie->ie_rates = frm;
+ break;
+ case IEEE80211_ELEMID_COUNTRY:
+ cie->ie_country = frm;
+ break;
+ case IEEE80211_ELEMID_FHPARMS:
+ break;
+ case IEEE80211_ELEMID_DSPARMS:
+ cie->ie_chan = frm[2];
+ break;
+ case IEEE80211_ELEMID_TIM:
+ cie->ie_tim = frm;
+ break;
+ case IEEE80211_ELEMID_IBSSPARMS:
+ break;
+ case IEEE80211_ELEMID_XRATES:
+ cie->ie_xrates = frm;
+ break;
+ case IEEE80211_ELEMID_ERP:
+ if (frm[1] != 1) {
+ //A_PRINTF("Discarding ERP Element - Bad Len\n");
+ return A_EINVAL;
+ }
+ cie->ie_erp = frm[2];
+ break;
+ case IEEE80211_ELEMID_RSN:
+ cie->ie_rsn = frm;
+ break;
+ case IEEE80211_ELEMID_HTCAP_ANA:
+ cie->ie_htcap = frm;
+ break;
+ case IEEE80211_ELEMID_HTINFO_ANA:
+ cie->ie_htop = frm;
+ break;
+#ifdef WAPI_ENABLE
+ case IEEE80211_ELEMID_WAPI:
+ cie->ie_wapi = frm;
+ break;
+#endif
+ case IEEE80211_ELEMID_VENDOR:
+ if (iswpaoui(frm)) {
+ cie->ie_wpa = frm;
+ } else if (iswmmoui(frm)) {
+ cie->ie_wmm = frm;
+ } else if (isatherosoui(frm)) {
+ cie->ie_ath = frm;
+ } else if(iswscoui(frm)) {
+ cie->ie_wsc = frm;
+ }
+ break;
+ default:
+ break;
+ }
+ frm += frm[1] + 2;
+ }
+ IEEE80211_VERIFY_ELEMENT(cie->ie_rates, IEEE80211_RATE_MAXSIZE);
+ IEEE80211_VERIFY_ELEMENT(cie->ie_ssid, IEEE80211_NWID_LEN);
+
+ return A_OK;
+}
diff --git a/drivers/staging/ath6kl/wlan/src/wlan_utils.c b/drivers/staging/ath6kl/wlan/src/wlan_utils.c
new file mode 100644
index 000000000000..1eee7bab3e50
--- /dev/null
+++ b/drivers/staging/ath6kl/wlan/src/wlan_utils.c
@@ -0,0 +1,61 @@
+//------------------------------------------------------------------------------
+// <copyright file="wlan_utils.c" company="Atheros">
+// Copyright (c) 2004-2010 Atheros Corporation. All rights reserved.
+//
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+//
+//
+//------------------------------------------------------------------------------
+//==============================================================================
+// This module implements frequently used wlan utilies
+//
+// Author(s): ="Atheros"
+//==============================================================================
+#include <a_config.h>
+#include <athdefs.h>
+#include <a_types.h>
+#include <a_osapi.h>
+
+/*
+ * converts ieee channel number to frequency
+ */
+A_UINT16
+wlan_ieee2freq(int chan)
+{
+ if (chan == 14) {
+ return 2484;
+ }
+ if (chan < 14) { /* 0-13 */
+ return (2407 + (chan*5));
+ }
+ if (chan < 27) { /* 15-26 */
+ return (2512 + ((chan-15)*20));
+ }
+ return (5000 + (chan*5));
+}
+
+/*
+ * Converts MHz frequency to IEEE channel number.
+ */
+A_UINT32
+wlan_freq2ieee(A_UINT16 freq)
+{
+ if (freq == 2484)
+ return 14;
+ if (freq < 2484)
+ return (freq - 2407) / 5;
+ if (freq < 5000)
+ return 15 + ((freq - 2512) / 20);
+ return (freq - 5000) / 5;
+}
diff --git a/drivers/staging/ath6kl/wmi/wmi.c b/drivers/staging/ath6kl/wmi/wmi.c
new file mode 100644
index 000000000000..7800778099bd
--- /dev/null
+++ b/drivers/staging/ath6kl/wmi/wmi.c
@@ -0,0 +1,6670 @@
+//------------------------------------------------------------------------------
+// Copyright (c) 2004-2010 Atheros Corporation. All rights reserved.
+//
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+//
+//
+//------------------------------------------------------------------------------
+//==============================================================================
+// This module implements the hardware independent layer of the
+// Wireless Module Interface (WMI) protocol.
+//
+// Author(s): ="Atheros"
+//==============================================================================
+
+#include <a_config.h>
+#include <athdefs.h>
+#include <a_types.h>
+#include <a_osapi.h>
+#include "htc.h"
+#include "htc_api.h"
+#include "wmi.h"
+#include <wlan_api.h>
+#include <wmi_api.h>
+#include <ieee80211.h>
+#include <ieee80211_node.h>
+#include "dset_api.h"
+#include "gpio_api.h"
+#include "wmi_host.h"
+#include "a_drv.h"
+#include "a_drv_api.h"
+#define ATH_MODULE_NAME wmi
+#include "a_debug.h"
+#include "dbglog_api.h"
+#include "roaming.h"
+
+#define ATH_DEBUG_WMI ATH_DEBUG_MAKE_MODULE_MASK(0)
+
+#ifdef ATH_DEBUG_MODULE
+
+static ATH_DEBUG_MASK_DESCRIPTION wmi_debug_desc[] = {
+ { ATH_DEBUG_WMI , "General WMI Tracing"},
+};
+
+ATH_DEBUG_INSTANTIATE_MODULE_VAR(wmi,
+ "wmi",
+ "Wireless Module Interface",
+ ATH_DEBUG_MASK_DEFAULTS,
+ ATH_DEBUG_DESCRIPTION_COUNT(wmi_debug_desc),
+ wmi_debug_desc);
+
+#endif
+
+#ifndef REXOS
+#define DBGARG _A_FUNCNAME_
+#define DBGFMT "%s() : "
+#define DBG_WMI ATH_DEBUG_WMI
+#define DBG_ERROR ATH_DEBUG_ERR
+#define DBG_WMI2 ATH_DEBUG_WMI
+#define A_DPRINTF AR_DEBUG_PRINTF
+#endif
+
+static A_STATUS wmi_ready_event_rx(struct wmi_t *wmip, A_UINT8 *datap, int len);
+
+static A_STATUS wmi_connect_event_rx(struct wmi_t *wmip, A_UINT8 *datap,
+ int len);
+static A_STATUS wmi_disconnect_event_rx(struct wmi_t *wmip, A_UINT8 *datap,
+ int len);
+
+static A_STATUS wmi_tkip_micerr_event_rx(struct wmi_t *wmip, A_UINT8 *datap,
+ int len);
+static A_STATUS wmi_bssInfo_event_rx(struct wmi_t *wmip, A_UINT8 *datap,
+ int len);
+static A_STATUS wmi_opt_frame_event_rx(struct wmi_t *wmip, A_UINT8 *datap,
+ int len);
+static A_STATUS wmi_pstream_timeout_event_rx(struct wmi_t *wmip, A_UINT8 *datap,
+ int len);
+static A_STATUS wmi_sync_point(struct wmi_t *wmip);
+
+static A_STATUS wmi_bitrate_reply_rx(struct wmi_t *wmip, A_UINT8 *datap,
+ int len);
+static A_STATUS wmi_ratemask_reply_rx(struct wmi_t *wmip, A_UINT8 *datap,
+ int len);
+static A_STATUS wmi_channelList_reply_rx(struct wmi_t *wmip, A_UINT8 *datap,
+ int len);
+static A_STATUS wmi_regDomain_event_rx(struct wmi_t *wmip, A_UINT8 *datap,
+ int len);
+static A_STATUS wmi_txPwr_reply_rx(struct wmi_t *wmip, A_UINT8 *datap, int len);
+static A_STATUS wmi_neighborReport_event_rx(struct wmi_t *wmip, A_UINT8 *datap,
+ int len);
+
+static A_STATUS wmi_dset_open_req_rx(struct wmi_t *wmip, A_UINT8 *datap,
+ int len);
+#ifdef CONFIG_HOST_DSET_SUPPORT
+static A_STATUS wmi_dset_close_rx(struct wmi_t *wmip, A_UINT8 *datap, int len);
+static A_STATUS wmi_dset_data_req_rx(struct wmi_t *wmip, A_UINT8 *datap,
+ int len);
+#endif /* CONFIG_HOST_DSET_SUPPORT */
+
+
+static A_STATUS wmi_scanComplete_rx(struct wmi_t *wmip, A_UINT8 *datap,
+ int len);
+static A_STATUS wmi_errorEvent_rx(struct wmi_t *wmip, A_UINT8 *datap, int len);
+static A_STATUS wmi_statsEvent_rx(struct wmi_t *wmip, A_UINT8 *datap, int len);
+static A_STATUS wmi_rssiThresholdEvent_rx(struct wmi_t *wmip, A_UINT8 *datap, int len);
+static A_STATUS wmi_hbChallengeResp_rx(struct wmi_t *wmip, A_UINT8 *datap, int len);
+static A_STATUS wmi_reportErrorEvent_rx(struct wmi_t *wmip, A_UINT8 *datap, int len);
+static A_STATUS wmi_cac_event_rx(struct wmi_t *wmip, A_UINT8 *datap, int len);
+static A_STATUS wmi_channel_change_event_rx(struct wmi_t *wmip, A_UINT8 *datap, int len);
+static A_STATUS wmi_roam_tbl_event_rx(struct wmi_t *wmip, A_UINT8 *datap,
+ int len);
+static A_STATUS wmi_roam_data_event_rx(struct wmi_t *wmip, A_UINT8 *datap,
+ int len);
+static A_STATUS wmi_get_wow_list_event_rx(struct wmi_t *wmip, A_UINT8 *datap,
+ int len);
+static A_STATUS
+wmi_get_pmkid_list_event_rx(struct wmi_t *wmip, A_UINT8 *datap, A_UINT32 len);
+
+static A_STATUS
+wmi_set_params_event_rx(struct wmi_t *wmip, A_UINT8 *datap, A_UINT32 len);
+
+static A_STATUS
+wmi_acm_reject_event_rx(struct wmi_t *wmip, A_UINT8 *datap, A_UINT32 len);
+
+#ifdef CONFIG_HOST_GPIO_SUPPORT
+static A_STATUS wmi_gpio_intr_rx(struct wmi_t *wmip, A_UINT8 *datap, int len);
+static A_STATUS wmi_gpio_data_rx(struct wmi_t *wmip, A_UINT8 *datap, int len);
+static A_STATUS wmi_gpio_ack_rx(struct wmi_t *wmip, A_UINT8 *datap, int len);
+#endif /* CONFIG_HOST_GPIO_SUPPORT */
+
+#ifdef CONFIG_HOST_TCMD_SUPPORT
+static A_STATUS
+wmi_tcmd_test_report_rx(struct wmi_t *wmip, A_UINT8 *datap, int len);
+#endif
+
+static A_STATUS
+wmi_txRetryErrEvent_rx(struct wmi_t *wmip, A_UINT8 *datap, int len);
+
+static A_STATUS
+wmi_snrThresholdEvent_rx(struct wmi_t *wmip, A_UINT8 *datap, int len);
+
+static A_STATUS
+wmi_lqThresholdEvent_rx(struct wmi_t *wmip, A_UINT8 *datap, int len);
+
+static A_BOOL
+wmi_is_bitrate_index_valid(struct wmi_t *wmip, A_INT32 rateIndex);
+
+static A_STATUS
+wmi_aplistEvent_rx(struct wmi_t *wmip, A_UINT8 *datap, int len);
+
+static A_STATUS
+wmi_dbglog_event_rx(struct wmi_t *wmip, A_UINT8 *datap, int len);
+
+static A_STATUS wmi_keepalive_reply_rx(struct wmi_t *wmip, A_UINT8 *datap, int len);
+
+A_STATUS wmi_cmd_send_xtnd(struct wmi_t *wmip, void *osbuf, WMIX_COMMAND_ID cmdId,
+ WMI_SYNC_FLAG syncflag);
+
+A_UINT8 ar6000_get_upper_threshold(A_INT16 rssi, SQ_THRESHOLD_PARAMS *sq_thresh, A_UINT32 size);
+A_UINT8 ar6000_get_lower_threshold(A_INT16 rssi, SQ_THRESHOLD_PARAMS *sq_thresh, A_UINT32 size);
+
+void wmi_cache_configure_rssithreshold(struct wmi_t *wmip, WMI_RSSI_THRESHOLD_PARAMS_CMD *rssiCmd);
+void wmi_cache_configure_snrthreshold(struct wmi_t *wmip, WMI_SNR_THRESHOLD_PARAMS_CMD *snrCmd);
+static A_STATUS wmi_send_rssi_threshold_params(struct wmi_t *wmip,
+ WMI_RSSI_THRESHOLD_PARAMS_CMD *rssiCmd);
+static A_STATUS wmi_send_snr_threshold_params(struct wmi_t *wmip,
+ WMI_SNR_THRESHOLD_PARAMS_CMD *snrCmd);
+#if defined(CONFIG_TARGET_PROFILE_SUPPORT)
+static A_STATUS
+wmi_prof_count_rx(struct wmi_t *wmip, A_UINT8 *datap, int len);
+#endif /* CONFIG_TARGET_PROFILE_SUPPORT */
+
+static A_STATUS wmi_pspoll_event_rx(struct wmi_t *wmip, A_UINT8 *datap,
+ int len);
+static A_STATUS wmi_dtimexpiry_event_rx(struct wmi_t *wmip, A_UINT8 *datap,
+ int len);
+
+static A_STATUS wmi_peer_node_event_rx (struct wmi_t *wmip, A_UINT8 *datap,
+ int len);
+#ifdef ATH_AR6K_11N_SUPPORT
+static A_STATUS wmi_addba_req_event_rx(struct wmi_t *, A_UINT8 *, int);
+static A_STATUS wmi_addba_resp_event_rx(struct wmi_t *, A_UINT8 *, int);
+static A_STATUS wmi_delba_req_event_rx(struct wmi_t *, A_UINT8 *, int);
+static A_STATUS wmi_btcoex_config_event_rx(struct wmi_t *wmip, A_UINT8 *datap, int len);
+static A_STATUS wmi_btcoex_stats_event_rx(struct wmi_t *wmip, A_UINT8 *datap, int len);
+#endif
+static A_STATUS wmi_hci_event_rx(struct wmi_t *, A_UINT8 *, int);
+
+#ifdef WAPI_ENABLE
+static A_STATUS wmi_wapi_rekey_event_rx(struct wmi_t *wmip, A_UINT8 *datap,
+ int len);
+#endif
+
+#if defined(UNDER_CE)
+#if defined(NDIS51_MINIPORT)
+unsigned int processDot11Hdr = 0;
+#else
+unsigned int processDot11Hdr = 1;
+#endif
+#else
+extern unsigned int processDot11Hdr;
+#endif
+
+int wps_enable;
+static const A_INT32 wmi_rateTable[][2] = {
+ //{W/O SGI, with SGI}
+ {1000, 1000},
+ {2000, 2000},
+ {5500, 5500},
+ {11000, 11000},
+ {6000, 6000},
+ {9000, 9000},
+ {12000, 12000},
+ {18000, 18000},
+ {24000, 24000},
+ {36000, 36000},
+ {48000, 48000},
+ {54000, 54000},
+ {6500, 7200},
+ {13000, 14400},
+ {19500, 21700},
+ {26000, 28900},
+ {39000, 43300},
+ {52000, 57800},
+ {58500, 65000},
+ {65000, 72200},
+ {13500, 15000},
+ {27000, 30000},
+ {40500, 45000},
+ {54000, 60000},
+ {81000, 90000},
+ {108000, 120000},
+ {121500, 135000},
+ {135000, 150000},
+ {0, 0}};
+
+#define MODE_A_SUPPORT_RATE_START ((A_INT32) 4)
+#define MODE_A_SUPPORT_RATE_STOP ((A_INT32) 11)
+
+#define MODE_GONLY_SUPPORT_RATE_START MODE_A_SUPPORT_RATE_START
+#define MODE_GONLY_SUPPORT_RATE_STOP MODE_A_SUPPORT_RATE_STOP
+
+#define MODE_B_SUPPORT_RATE_START ((A_INT32) 0)
+#define MODE_B_SUPPORT_RATE_STOP ((A_INT32) 3)
+
+#define MODE_G_SUPPORT_RATE_START ((A_INT32) 0)
+#define MODE_G_SUPPORT_RATE_STOP ((A_INT32) 11)
+
+#define MODE_GHT20_SUPPORT_RATE_START ((A_INT32) 0)
+#define MODE_GHT20_SUPPORT_RATE_STOP ((A_INT32) 19)
+
+#define MAX_NUMBER_OF_SUPPORT_RATES (MODE_GHT20_SUPPORT_RATE_STOP + 1)
+
+/* 802.1d to AC mapping. Refer pg 57 of WMM-test-plan-v1.2 */
+const A_UINT8 up_to_ac[]= {
+ WMM_AC_BE,
+ WMM_AC_BK,
+ WMM_AC_BK,
+ WMM_AC_BE,
+ WMM_AC_VI,
+ WMM_AC_VI,
+ WMM_AC_VO,
+ WMM_AC_VO,
+ };
+
+#include "athstartpack.h"
+
+/* This stuff is used when we want a simple layer-3 visibility */
+typedef PREPACK struct _iphdr {
+ A_UINT8 ip_ver_hdrlen; /* version and hdr length */
+ A_UINT8 ip_tos; /* type of service */
+ A_UINT16 ip_len; /* total length */
+ A_UINT16 ip_id; /* identification */
+ A_INT16 ip_off; /* fragment offset field */
+#define IP_DF 0x4000 /* dont fragment flag */
+#define IP_MF 0x2000 /* more fragments flag */
+#define IP_OFFMASK 0x1fff /* mask for fragmenting bits */
+ A_UINT8 ip_ttl; /* time to live */
+ A_UINT8 ip_p; /* protocol */
+ A_UINT16 ip_sum; /* checksum */
+ A_UINT8 ip_src[4]; /* source and dest address */
+ A_UINT8 ip_dst[4];
+} POSTPACK iphdr;
+
+#include "athendpack.h"
+
+static A_INT16 rssi_event_value = 0;
+static A_INT16 snr_event_value = 0;
+
+A_BOOL is_probe_ssid = FALSE;
+
+void *
+wmi_init(void *devt)
+{
+ struct wmi_t *wmip;
+
+ A_REGISTER_MODULE_DEBUG_INFO(wmi);
+
+ wmip = A_MALLOC (sizeof(struct wmi_t));
+ if (wmip == NULL) {
+ return (NULL);
+ }
+ A_MEMZERO(wmip, sizeof(struct wmi_t ));
+#ifdef THREAD_X
+ INIT_WMI_LOCK(wmip);
+#else
+ A_MUTEX_INIT(&wmip->wmi_lock);
+#endif
+ wmip->wmi_devt = devt;
+ wlan_node_table_init(wmip, &wmip->wmi_scan_table);
+ wmi_qos_state_init(wmip);
+
+ wmip->wmi_powerMode = REC_POWER;
+ wmip->wmi_phyMode = WMI_11G_MODE;
+
+ wmip->wmi_pair_crypto_type = NONE_CRYPT;
+ wmip->wmi_grp_crypto_type = NONE_CRYPT;
+
+ wmip->wmi_ht_allowed[A_BAND_24GHZ] = 1;
+ wmip->wmi_ht_allowed[A_BAND_5GHZ] = 1;
+
+ return (wmip);
+}
+
+void
+wmi_qos_state_init(struct wmi_t *wmip)
+{
+ A_UINT8 i;
+
+ if (wmip == NULL) {
+ return;
+ }
+ LOCK_WMI(wmip);
+
+ /* Initialize QoS States */
+ wmip->wmi_numQoSStream = 0;
+
+ wmip->wmi_fatPipeExists = 0;
+
+ for (i=0; i < WMM_NUM_AC; i++) {
+ wmip->wmi_streamExistsForAC[i]=0;
+ }
+
+ UNLOCK_WMI(wmip);
+
+ A_WMI_SET_NUMDATAENDPTS(wmip->wmi_devt, 1);
+}
+
+void
+wmi_set_control_ep(struct wmi_t * wmip, HTC_ENDPOINT_ID eid)
+{
+ A_ASSERT( eid != ENDPOINT_UNUSED);
+ wmip->wmi_endpoint_id = eid;
+}
+
+HTC_ENDPOINT_ID
+wmi_get_control_ep(struct wmi_t * wmip)
+{
+ return(wmip->wmi_endpoint_id);
+}
+
+void
+wmi_shutdown(struct wmi_t *wmip)
+{
+ if (wmip != NULL) {
+ wlan_node_table_cleanup(&wmip->wmi_scan_table);
+ if (A_IS_MUTEX_VALID(&wmip->wmi_lock)) {
+#ifdef THREAD_X
+ DELETE_WMI_LOCK(&wmip);
+#else
+ A_MUTEX_DELETE(&wmip->wmi_lock);
+#endif
+ }
+ A_FREE(wmip);
+ }
+}
+
+/*
+ * performs DIX to 802.3 encapsulation for transmit packets.
+ * uses passed in buffer. Returns buffer or NULL if failed.
+ * Assumes the entire DIX header is contigous and that there is
+ * enough room in the buffer for a 802.3 mac header and LLC+SNAP headers.
+ */
+A_STATUS
+wmi_dix_2_dot3(struct wmi_t *wmip, void *osbuf)
+{
+ A_UINT8 *datap;
+ A_UINT16 typeorlen;
+ ATH_MAC_HDR macHdr;
+ ATH_LLC_SNAP_HDR *llcHdr;
+
+ A_ASSERT(osbuf != NULL);
+
+ if (A_NETBUF_HEADROOM(osbuf) <
+ (sizeof(ATH_LLC_SNAP_HDR) + sizeof(WMI_DATA_HDR)))
+ {
+ return A_NO_MEMORY;
+ }
+
+ datap = A_NETBUF_DATA(osbuf);
+
+ typeorlen = *(A_UINT16 *)(datap + ATH_MAC_LEN + ATH_MAC_LEN);
+
+ if (!IS_ETHERTYPE(A_BE2CPU16(typeorlen))) {
+ /*
+ * packet is already in 802.3 format - return success
+ */
+ A_DPRINTF(DBG_WMI, (DBGFMT "packet already 802.3\n", DBGARG));
+ return (A_OK);
+ }
+
+ /*
+ * Save mac fields and length to be inserted later
+ */
+ A_MEMCPY(macHdr.dstMac, datap, ATH_MAC_LEN);
+ A_MEMCPY(macHdr.srcMac, datap + ATH_MAC_LEN, ATH_MAC_LEN);
+ macHdr.typeOrLen = A_CPU2BE16(A_NETBUF_LEN(osbuf) - sizeof(ATH_MAC_HDR) +
+ sizeof(ATH_LLC_SNAP_HDR));
+
+ /*
+ * Make room for LLC+SNAP headers
+ */
+ if (A_NETBUF_PUSH(osbuf, sizeof(ATH_LLC_SNAP_HDR)) != A_OK) {
+ return A_NO_MEMORY;
+ }
+ datap = A_NETBUF_DATA(osbuf);
+
+ A_MEMCPY(datap, &macHdr, sizeof (ATH_MAC_HDR));
+
+ llcHdr = (ATH_LLC_SNAP_HDR *)(datap + sizeof(ATH_MAC_HDR));
+ llcHdr->dsap = 0xAA;
+ llcHdr->ssap = 0xAA;
+ llcHdr->cntl = 0x03;
+ llcHdr->orgCode[0] = 0x0;
+ llcHdr->orgCode[1] = 0x0;
+ llcHdr->orgCode[2] = 0x0;
+ llcHdr->etherType = typeorlen;
+
+ return (A_OK);
+}
+
+A_STATUS wmi_meta_add(struct wmi_t *wmip, void *osbuf, A_UINT8 *pVersion,void *pTxMetaS)
+{
+ switch(*pVersion){
+ case 0:
+ return (A_OK);
+ case WMI_META_VERSION_1:
+ {
+ WMI_TX_META_V1 *pV1= NULL;
+ A_ASSERT(osbuf != NULL);
+ if (A_NETBUF_PUSH(osbuf, WMI_MAX_TX_META_SZ) != A_OK) {
+ return A_NO_MEMORY;
+ }
+
+ pV1 = (WMI_TX_META_V1 *)A_NETBUF_DATA(osbuf);
+ /* the pktID is used in conjunction with txComplete messages
+ * allowing the target to notify which tx requests have been
+ * completed and how. */
+ pV1->pktID = 0;
+ /* the ratePolicyID allows the host to specify which rate policy
+ * to use for transmitting this packet. 0 means use default behavior. */
+ pV1->ratePolicyID = 0;
+ A_ASSERT(pVersion != NULL);
+ /* the version must be used to populate the meta field of the WMI_DATA_HDR */
+ *pVersion = WMI_META_VERSION_1;
+ return (A_OK);
+ }
+#ifdef CONFIG_CHECKSUM_OFFLOAD
+ case WMI_META_VERSION_2:
+ {
+ WMI_TX_META_V2 *pV2 ;
+ A_ASSERT(osbuf != NULL);
+ if (A_NETBUF_PUSH(osbuf, WMI_MAX_TX_META_SZ) != A_OK) {
+ return A_NO_MEMORY;
+ }
+ pV2 = (WMI_TX_META_V2 *)A_NETBUF_DATA(osbuf);
+ A_MEMCPY(pV2,(WMI_TX_META_V2 *)pTxMetaS,sizeof(WMI_TX_META_V2));
+ return (A_OK);
+ }
+#endif
+ default:
+ return (A_OK);
+ }
+}
+
+/* Adds a WMI data header */
+A_STATUS
+wmi_data_hdr_add(struct wmi_t *wmip, void *osbuf, A_UINT8 msgType, A_BOOL bMoreData,
+ WMI_DATA_HDR_DATA_TYPE data_type,A_UINT8 metaVersion, void *pTxMetaS)
+{
+ WMI_DATA_HDR *dtHdr;
+// A_UINT8 metaVersion = 0;
+ A_STATUS status;
+
+ A_ASSERT(osbuf != NULL);
+
+ /* adds the meta data field after the wmi data hdr. If metaVersion
+ * is returns 0 then no meta field was added. */
+ if ((status = wmi_meta_add(wmip, osbuf, &metaVersion,pTxMetaS)) != A_OK) {
+ return status;
+ }
+
+ if (A_NETBUF_PUSH(osbuf, sizeof(WMI_DATA_HDR)) != A_OK) {
+ return A_NO_MEMORY;
+ }
+
+ dtHdr = (WMI_DATA_HDR *)A_NETBUF_DATA(osbuf);
+ A_MEMZERO(dtHdr, sizeof(WMI_DATA_HDR));
+
+ WMI_DATA_HDR_SET_MSG_TYPE(dtHdr, msgType);
+ WMI_DATA_HDR_SET_DATA_TYPE(dtHdr, data_type);
+
+ if (bMoreData) {
+ WMI_DATA_HDR_SET_MORE_BIT(dtHdr);
+ }
+
+ WMI_DATA_HDR_SET_META(dtHdr, metaVersion);
+ //dtHdr->rssi = 0;
+
+ return (A_OK);
+}
+
+
+A_UINT8 wmi_implicit_create_pstream(struct wmi_t *wmip, void *osbuf, A_UINT32 layer2Priority, A_BOOL wmmEnabled)
+{
+ A_UINT8 *datap;
+ A_UINT8 trafficClass = WMM_AC_BE;
+ A_UINT16 ipType = IP_ETHERTYPE;
+ WMI_DATA_HDR *dtHdr;
+ A_BOOL streamExists = FALSE;
+ A_UINT8 userPriority;
+ A_UINT32 hdrsize, metasize;
+ ATH_LLC_SNAP_HDR *llcHdr;
+
+ WMI_CREATE_PSTREAM_CMD cmd;
+
+ A_ASSERT(osbuf != NULL);
+
+ //
+ // Initialize header size
+ //
+ hdrsize = 0;
+
+ datap = A_NETBUF_DATA(osbuf);
+ dtHdr = (WMI_DATA_HDR *)datap;
+ metasize = (WMI_DATA_HDR_GET_META(dtHdr))? WMI_MAX_TX_META_SZ : 0;
+
+ if (!wmmEnabled)
+ {
+ /* If WMM is disabled all traffic goes as BE traffic */
+ userPriority = 0;
+ }
+ else
+ {
+ if (processDot11Hdr)
+ {
+ hdrsize = A_ROUND_UP(sizeof(struct ieee80211_qosframe),sizeof(A_UINT32));
+ llcHdr = (ATH_LLC_SNAP_HDR *)(datap + sizeof(WMI_DATA_HDR) + metasize +
+ hdrsize);
+
+
+ }
+ else
+ {
+ llcHdr = (ATH_LLC_SNAP_HDR *)(datap + sizeof(WMI_DATA_HDR) + metasize +
+ sizeof(ATH_MAC_HDR));
+ }
+
+ if (llcHdr->etherType == A_CPU2BE16(ipType))
+ {
+ /* Extract the endpoint info from the TOS field in the IP header */
+
+ userPriority = wmi_determine_userPriority (((A_UINT8 *)llcHdr) + sizeof(ATH_LLC_SNAP_HDR),layer2Priority);
+ }
+ else
+ {
+ userPriority = layer2Priority & 0x7;
+ }
+ }
+
+
+ /* workaround for WMM S5 */
+ if ((WMM_AC_VI == wmip->wmi_traffic_class) && ((5 == userPriority) || (4 == userPriority)))
+ {
+ userPriority = 1;
+ }
+
+ trafficClass = convert_userPriority_to_trafficClass(userPriority);
+
+ WMI_DATA_HDR_SET_UP(dtHdr, userPriority);
+ /* lower 3-bits are 802.1d priority */
+ //dtHdr->info |= (userPriority & WMI_DATA_HDR_UP_MASK) << WMI_DATA_HDR_UP_SHIFT;
+
+ LOCK_WMI(wmip);
+ streamExists = wmip->wmi_fatPipeExists;
+ UNLOCK_WMI(wmip);
+
+ if (!(streamExists & (1 << trafficClass)))
+ {
+
+ A_MEMZERO(&cmd, sizeof(cmd));
+ cmd.trafficClass = trafficClass;
+ cmd.userPriority = userPriority;
+ cmd.inactivityInt = WMI_IMPLICIT_PSTREAM_INACTIVITY_INT;
+ /* Implicit streams are created with TSID 0xFF */
+
+ cmd.tsid = WMI_IMPLICIT_PSTREAM;
+ wmi_create_pstream_cmd(wmip, &cmd);
+ }
+
+ return trafficClass;
+}
+
+A_STATUS
+wmi_dot11_hdr_add (struct wmi_t *wmip, void *osbuf, NETWORK_TYPE mode)
+{
+ A_UINT8 *datap;
+ A_UINT16 typeorlen;
+ ATH_MAC_HDR macHdr;
+ ATH_LLC_SNAP_HDR *llcHdr;
+ struct ieee80211_frame *wh;
+ A_UINT32 hdrsize;
+
+ A_ASSERT(osbuf != NULL);
+
+ if (A_NETBUF_HEADROOM(osbuf) <
+ (sizeof(struct ieee80211_qosframe) + sizeof(ATH_LLC_SNAP_HDR) + sizeof(WMI_DATA_HDR)))
+ {
+ return A_NO_MEMORY;
+ }
+
+ datap = A_NETBUF_DATA(osbuf);
+
+ typeorlen = *(A_UINT16 *)(datap + ATH_MAC_LEN + ATH_MAC_LEN);
+
+ if (!IS_ETHERTYPE(A_BE2CPU16(typeorlen))) {
+/*
+ * packet is already in 802.3 format - return success
+ */
+ A_DPRINTF(DBG_WMI, (DBGFMT "packet already 802.3\n", DBGARG));
+ goto AddDot11Hdr;
+ }
+
+ /*
+ * Save mac fields and length to be inserted later
+ */
+ A_MEMCPY(macHdr.dstMac, datap, ATH_MAC_LEN);
+ A_MEMCPY(macHdr.srcMac, datap + ATH_MAC_LEN, ATH_MAC_LEN);
+ macHdr.typeOrLen = A_CPU2BE16(A_NETBUF_LEN(osbuf) - sizeof(ATH_MAC_HDR) +
+ sizeof(ATH_LLC_SNAP_HDR));
+
+ // Remove the Ethernet hdr
+ A_NETBUF_PULL(osbuf, sizeof(ATH_MAC_HDR));
+ /*
+ * Make room for LLC+SNAP headers
+ */
+ if (A_NETBUF_PUSH(osbuf, sizeof(ATH_LLC_SNAP_HDR)) != A_OK) {
+ return A_NO_MEMORY;
+ }
+ datap = A_NETBUF_DATA(osbuf);
+
+ llcHdr = (ATH_LLC_SNAP_HDR *)(datap);
+ llcHdr->dsap = 0xAA;
+ llcHdr->ssap = 0xAA;
+ llcHdr->cntl = 0x03;
+ llcHdr->orgCode[0] = 0x0;
+ llcHdr->orgCode[1] = 0x0;
+ llcHdr->orgCode[2] = 0x0;
+ llcHdr->etherType = typeorlen;
+
+AddDot11Hdr:
+ /* Make room for 802.11 hdr */
+ if (wmip->wmi_is_wmm_enabled)
+ {
+ hdrsize = A_ROUND_UP(sizeof(struct ieee80211_qosframe),sizeof(A_UINT32));
+ if (A_NETBUF_PUSH(osbuf, hdrsize) != A_OK)
+ {
+ return A_NO_MEMORY;
+ }
+ wh = (struct ieee80211_frame *) A_NETBUF_DATA(osbuf);
+ wh->i_fc[0] = IEEE80211_FC0_SUBTYPE_QOS;
+ }
+ else
+ {
+ hdrsize = A_ROUND_UP(sizeof(struct ieee80211_frame),sizeof(A_UINT32));
+ if (A_NETBUF_PUSH(osbuf, hdrsize) != A_OK)
+ {
+ return A_NO_MEMORY;
+ }
+ wh = (struct ieee80211_frame *) A_NETBUF_DATA(osbuf);
+ wh->i_fc[0] = IEEE80211_FC0_SUBTYPE_DATA;
+ }
+ /* Setup the SA & DA */
+ IEEE80211_ADDR_COPY(wh->i_addr2, macHdr.srcMac);
+
+ if (mode == INFRA_NETWORK) {
+ IEEE80211_ADDR_COPY(wh->i_addr3, macHdr.dstMac);
+ }
+ else if (mode == ADHOC_NETWORK) {
+ IEEE80211_ADDR_COPY(wh->i_addr1, macHdr.dstMac);
+ }
+
+ return (A_OK);
+}
+
+A_STATUS
+wmi_dot11_hdr_remove(struct wmi_t *wmip, void *osbuf)
+{
+ A_UINT8 *datap;
+ struct ieee80211_frame *pwh,wh;
+ A_UINT8 type,subtype;
+ ATH_LLC_SNAP_HDR *llcHdr;
+ ATH_MAC_HDR macHdr;
+ A_UINT32 hdrsize;
+
+ A_ASSERT(osbuf != NULL);
+ datap = A_NETBUF_DATA(osbuf);
+
+ pwh = (struct ieee80211_frame *)datap;
+ type = pwh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
+ subtype = pwh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
+
+ A_MEMCPY((A_UINT8 *)&wh, datap, sizeof(struct ieee80211_frame));
+
+ /* strip off the 802.11 hdr*/
+ if (subtype == IEEE80211_FC0_SUBTYPE_QOS) {
+ hdrsize = A_ROUND_UP(sizeof(struct ieee80211_qosframe),sizeof(A_UINT32));
+ A_NETBUF_PULL(osbuf, hdrsize);
+ } else if (subtype == IEEE80211_FC0_SUBTYPE_DATA) {
+ A_NETBUF_PULL(osbuf, sizeof(struct ieee80211_frame));
+ }
+
+ datap = A_NETBUF_DATA(osbuf);
+ llcHdr = (ATH_LLC_SNAP_HDR *)(datap);
+
+ macHdr.typeOrLen = llcHdr->etherType;
+ A_MEMZERO(macHdr.dstMac, sizeof(macHdr.dstMac));
+ A_MEMZERO(macHdr.srcMac, sizeof(macHdr.srcMac));
+
+ switch (wh.i_fc[1] & IEEE80211_FC1_DIR_MASK) {
+ case IEEE80211_FC1_DIR_NODS:
+ IEEE80211_ADDR_COPY(macHdr.dstMac, wh.i_addr1);
+ IEEE80211_ADDR_COPY(macHdr.srcMac, wh.i_addr2);
+ break;
+ case IEEE80211_FC1_DIR_TODS:
+ IEEE80211_ADDR_COPY(macHdr.dstMac, wh.i_addr3);
+ IEEE80211_ADDR_COPY(macHdr.srcMac, wh.i_addr2);
+ break;
+ case IEEE80211_FC1_DIR_FROMDS:
+ IEEE80211_ADDR_COPY(macHdr.dstMac, wh.i_addr1);
+ IEEE80211_ADDR_COPY(macHdr.srcMac, wh.i_addr3);
+ break;
+ case IEEE80211_FC1_DIR_DSTODS:
+ break;
+ }
+
+ // Remove the LLC Hdr.
+ A_NETBUF_PULL(osbuf, sizeof(ATH_LLC_SNAP_HDR));
+
+ // Insert the ATH MAC hdr.
+
+ A_NETBUF_PUSH(osbuf, sizeof(ATH_MAC_HDR));
+ datap = A_NETBUF_DATA(osbuf);
+
+ A_MEMCPY (datap, &macHdr, sizeof(ATH_MAC_HDR));
+
+ return A_OK;
+}
+
+/*
+ * performs 802.3 to DIX encapsulation for received packets.
+ * Assumes the entire 802.3 header is contigous.
+ */
+A_STATUS
+wmi_dot3_2_dix(void *osbuf)
+{
+ A_UINT8 *datap;
+ ATH_MAC_HDR macHdr;
+ ATH_LLC_SNAP_HDR *llcHdr;
+
+ A_ASSERT(osbuf != NULL);
+ datap = A_NETBUF_DATA(osbuf);
+
+ A_MEMCPY(&macHdr, datap, sizeof(ATH_MAC_HDR));
+ llcHdr = (ATH_LLC_SNAP_HDR *)(datap + sizeof(ATH_MAC_HDR));
+ macHdr.typeOrLen = llcHdr->etherType;
+
+ if (A_NETBUF_PULL(osbuf, sizeof(ATH_LLC_SNAP_HDR)) != A_OK) {
+ return A_NO_MEMORY;
+ }
+
+ datap = A_NETBUF_DATA(osbuf);
+
+ A_MEMCPY(datap, &macHdr, sizeof (ATH_MAC_HDR));
+
+ return (A_OK);
+}
+
+/*
+ * Removes a WMI data header
+ */
+A_STATUS
+wmi_data_hdr_remove(struct wmi_t *wmip, void *osbuf)
+{
+ A_ASSERT(osbuf != NULL);
+
+ return (A_NETBUF_PULL(osbuf, sizeof(WMI_DATA_HDR)));
+}
+
+void
+wmi_iterate_nodes(struct wmi_t *wmip, wlan_node_iter_func *f, void *arg)
+{
+ wlan_iterate_nodes(&wmip->wmi_scan_table, f, arg);
+}
+
+/*
+ * WMI Extended Event received from Target.
+ */
+A_STATUS
+wmi_control_rx_xtnd(struct wmi_t *wmip, void *osbuf)
+{
+ WMIX_CMD_HDR *cmd;
+ A_UINT16 id;
+ A_UINT8 *datap;
+ A_UINT32 len;
+ A_STATUS status = A_OK;
+
+ if (A_NETBUF_LEN(osbuf) < sizeof(WMIX_CMD_HDR)) {
+ A_DPRINTF(DBG_WMI, (DBGFMT "bad packet 1\n", DBGARG));
+ wmip->wmi_stats.cmd_len_err++;
+ return A_ERROR;
+ }
+
+ cmd = (WMIX_CMD_HDR *)A_NETBUF_DATA(osbuf);
+ id = cmd->commandId;
+
+ if (A_NETBUF_PULL(osbuf, sizeof(WMIX_CMD_HDR)) != A_OK) {
+ A_DPRINTF(DBG_WMI, (DBGFMT "bad packet 2\n", DBGARG));
+ wmip->wmi_stats.cmd_len_err++;
+ return A_ERROR;
+ }
+
+ datap = A_NETBUF_DATA(osbuf);
+ len = A_NETBUF_LEN(osbuf);
+
+ switch (id) {
+ case (WMIX_DSETOPENREQ_EVENTID):
+ status = wmi_dset_open_req_rx(wmip, datap, len);
+ break;
+#ifdef CONFIG_HOST_DSET_SUPPORT
+ case (WMIX_DSETCLOSE_EVENTID):
+ status = wmi_dset_close_rx(wmip, datap, len);
+ break;
+ case (WMIX_DSETDATAREQ_EVENTID):
+ status = wmi_dset_data_req_rx(wmip, datap, len);
+ break;
+#endif /* CONFIG_HOST_DSET_SUPPORT */
+#ifdef CONFIG_HOST_GPIO_SUPPORT
+ case (WMIX_GPIO_INTR_EVENTID):
+ wmi_gpio_intr_rx(wmip, datap, len);
+ break;
+ case (WMIX_GPIO_DATA_EVENTID):
+ wmi_gpio_data_rx(wmip, datap, len);
+ break;
+ case (WMIX_GPIO_ACK_EVENTID):
+ wmi_gpio_ack_rx(wmip, datap, len);
+ break;
+#endif /* CONFIG_HOST_GPIO_SUPPORT */
+ case (WMIX_HB_CHALLENGE_RESP_EVENTID):
+ wmi_hbChallengeResp_rx(wmip, datap, len);
+ break;
+ case (WMIX_DBGLOG_EVENTID):
+ wmi_dbglog_event_rx(wmip, datap, len);
+ break;
+#if defined(CONFIG_TARGET_PROFILE_SUPPORT)
+ case (WMIX_PROF_COUNT_EVENTID):
+ wmi_prof_count_rx(wmip, datap, len);
+ break;
+#endif /* CONFIG_TARGET_PROFILE_SUPPORT */
+ default:
+ A_DPRINTF(DBG_WMI|DBG_ERROR,
+ (DBGFMT "Unknown id 0x%x\n", DBGARG, id));
+ wmip->wmi_stats.cmd_id_err++;
+ status = A_ERROR;
+ break;
+ }
+
+ return status;
+}
+
+/*
+ * Control Path
+ */
+A_UINT32 cmdRecvNum;
+
+A_STATUS
+wmi_control_rx(struct wmi_t *wmip, void *osbuf)
+{
+ WMI_CMD_HDR *cmd;
+ A_UINT16 id;
+ A_UINT8 *datap;
+ A_UINT32 len, i, loggingReq;
+ A_STATUS status = A_OK;
+
+ A_ASSERT(osbuf != NULL);
+ if (A_NETBUF_LEN(osbuf) < sizeof(WMI_CMD_HDR)) {
+ A_NETBUF_FREE(osbuf);
+ A_DPRINTF(DBG_WMI, (DBGFMT "bad packet 1\n", DBGARG));
+ wmip->wmi_stats.cmd_len_err++;
+ return A_ERROR;
+ }
+
+ cmd = (WMI_CMD_HDR *)A_NETBUF_DATA(osbuf);
+ id = cmd->commandId;
+
+ if (A_NETBUF_PULL(osbuf, sizeof(WMI_CMD_HDR)) != A_OK) {
+ A_NETBUF_FREE(osbuf);
+ A_DPRINTF(DBG_WMI, (DBGFMT "bad packet 2\n", DBGARG));
+ wmip->wmi_stats.cmd_len_err++;
+ return A_ERROR;
+ }
+
+ datap = A_NETBUF_DATA(osbuf);
+ len = A_NETBUF_LEN(osbuf);
+
+ loggingReq = 0;
+
+ ar6000_get_driver_cfg(wmip->wmi_devt,
+ AR6000_DRIVER_CFG_LOG_RAW_WMI_MSGS,
+ &loggingReq);
+
+ if(loggingReq) {
+ AR_DEBUG_PRINTF(ATH_DEBUG_WMI, ("WMI %d \n",id));
+ AR_DEBUG_PRINTF(ATH_DEBUG_WMI, ("WMI recv, MsgNo %d : ", cmdRecvNum));
+ for(i = 0; i < len; i++)
+ AR_DEBUG_PRINTF(ATH_DEBUG_WMI, ("%x ", datap[i]));
+ AR_DEBUG_PRINTF(ATH_DEBUG_WMI, ("\n"));
+ }
+
+ LOCK_WMI(wmip);
+ cmdRecvNum++;
+ UNLOCK_WMI(wmip);
+
+ switch (id) {
+ case (WMI_GET_BITRATE_CMDID):
+ A_DPRINTF(DBG_WMI, (DBGFMT "WMI_GET_BITRATE_CMDID\n", DBGARG));
+ status = wmi_bitrate_reply_rx(wmip, datap, len);
+ break;
+ case (WMI_GET_CHANNEL_LIST_CMDID):
+ A_DPRINTF(DBG_WMI, (DBGFMT "WMI_GET_CHANNEL_LIST_CMDID\n", DBGARG));
+ status = wmi_channelList_reply_rx(wmip, datap, len);
+ break;
+ case (WMI_GET_TX_PWR_CMDID):
+ A_DPRINTF(DBG_WMI, (DBGFMT "WMI_GET_TX_PWR_CMDID\n", DBGARG));
+ status = wmi_txPwr_reply_rx(wmip, datap, len);
+ break;
+ case (WMI_READY_EVENTID):
+ A_DPRINTF(DBG_WMI, (DBGFMT "WMI_READY_EVENTID\n", DBGARG));
+ status = wmi_ready_event_rx(wmip, datap, len);
+ A_WMI_SEND_EVENT_TO_APP(wmip->wmi_devt, id, datap, len);
+ A_WMI_DBGLOG_INIT_DONE(wmip->wmi_devt);
+ break;
+ case (WMI_CONNECT_EVENTID):
+ A_DPRINTF(DBG_WMI, (DBGFMT "WMI_CONNECT_EVENTID\n", DBGARG));
+ status = wmi_connect_event_rx(wmip, datap, len);
+ A_WMI_SEND_GENERIC_EVENT_TO_APP(wmip->wmi_devt, id, datap, len);
+ break;
+ case (WMI_DISCONNECT_EVENTID):
+ A_DPRINTF(DBG_WMI, (DBGFMT "WMI_DISCONNECT_EVENTID\n", DBGARG));
+ status = wmi_disconnect_event_rx(wmip, datap, len);
+ A_WMI_SEND_EVENT_TO_APP(wmip->wmi_devt, id, datap, len);
+ break;
+ case (WMI_PEER_NODE_EVENTID):
+ A_DPRINTF (DBG_WMI, (DBGFMT "WMI_PEER_NODE_EVENTID\n", DBGARG));
+ status = wmi_peer_node_event_rx(wmip, datap, len);
+ A_WMI_SEND_EVENT_TO_APP(wmip->wmi_devt, id, datap, len);
+ break;
+ case (WMI_TKIP_MICERR_EVENTID):
+ A_DPRINTF(DBG_WMI, (DBGFMT "WMI_TKIP_MICERR_EVENTID\n", DBGARG));
+ status = wmi_tkip_micerr_event_rx(wmip, datap, len);
+ break;
+ case (WMI_BSSINFO_EVENTID):
+ A_DPRINTF(DBG_WMI, (DBGFMT "WMI_BSSINFO_EVENTID\n", DBGARG));
+ {
+ /*
+ * convert WMI_BSS_INFO_HDR2 to WMI_BSS_INFO_HDR
+ * Take a local copy of the WMI_BSS_INFO_HDR2 from the wmi buffer
+ * and reconstruct the WMI_BSS_INFO_HDR in its place
+ */
+ WMI_BSS_INFO_HDR2 bih2;
+ WMI_BSS_INFO_HDR *bih;
+ A_MEMCPY(&bih2, datap, sizeof(WMI_BSS_INFO_HDR2));
+
+ A_NETBUF_PUSH(osbuf, 4);
+ datap = A_NETBUF_DATA(osbuf);
+ len = A_NETBUF_LEN(osbuf);
+ bih = (WMI_BSS_INFO_HDR *)datap;
+
+ bih->channel = bih2.channel;
+ bih->frameType = bih2.frameType;
+ bih->snr = bih2.snr;
+ bih->rssi = bih2.snr - 95;
+ bih->ieMask = bih2.ieMask;
+ A_MEMCPY(bih->bssid, bih2.bssid, ATH_MAC_LEN);
+
+ status = wmi_bssInfo_event_rx(wmip, datap, len);
+ A_WMI_SEND_GENERIC_EVENT_TO_APP(wmip->wmi_devt, id, datap, len);
+ }
+ break;
+ case (WMI_REGDOMAIN_EVENTID):
+ A_DPRINTF(DBG_WMI, (DBGFMT "WMI_REGDOMAIN_EVENTID\n", DBGARG));
+ status = wmi_regDomain_event_rx(wmip, datap, len);
+ break;
+ case (WMI_PSTREAM_TIMEOUT_EVENTID):
+ A_DPRINTF(DBG_WMI, (DBGFMT "WMI_PSTREAM_TIMEOUT_EVENTID\n", DBGARG));
+ status = wmi_pstream_timeout_event_rx(wmip, datap, len);
+ /* pstreams are fatpipe abstractions that get implicitly created.
+ * User apps only deal with thinstreams. creation of a thinstream
+ * by the user or data traffic flow in an AC triggers implicit
+ * pstream creation. Do we need to send this event to App..?
+ * no harm in sending it.
+ */
+ A_WMI_SEND_EVENT_TO_APP(wmip->wmi_devt, id, datap, len);
+ break;
+ case (WMI_NEIGHBOR_REPORT_EVENTID):
+ A_DPRINTF(DBG_WMI, (DBGFMT "WMI_NEIGHBOR_REPORT_EVENTID\n", DBGARG));
+ status = wmi_neighborReport_event_rx(wmip, datap, len);
+ break;
+ case (WMI_SCAN_COMPLETE_EVENTID):
+ A_DPRINTF(DBG_WMI, (DBGFMT "WMI_SCAN_COMPLETE_EVENTID\n", DBGARG));
+ status = wmi_scanComplete_rx(wmip, datap, len);
+ A_WMI_SEND_EVENT_TO_APP(wmip->wmi_devt, id, datap, len);
+ break;
+ case (WMI_CMDERROR_EVENTID):
+ A_DPRINTF(DBG_WMI, (DBGFMT "WMI_CMDERROR_EVENTID\n", DBGARG));
+ status = wmi_errorEvent_rx(wmip, datap, len);
+ break;
+ case (WMI_REPORT_STATISTICS_EVENTID):
+ A_DPRINTF(DBG_WMI, (DBGFMT "WMI_REPORT_STATISTICS_EVENTID\n", DBGARG));
+ status = wmi_statsEvent_rx(wmip, datap, len);
+ break;
+ case (WMI_RSSI_THRESHOLD_EVENTID):
+ A_DPRINTF(DBG_WMI, (DBGFMT "WMI_RSSI_THRESHOLD_EVENTID\n", DBGARG));
+ status = wmi_rssiThresholdEvent_rx(wmip, datap, len);
+ break;
+ case (WMI_ERROR_REPORT_EVENTID):
+ A_DPRINTF(DBG_WMI, (DBGFMT "WMI_ERROR_REPORT_EVENTID\n", DBGARG));
+ status = wmi_reportErrorEvent_rx(wmip, datap, len);
+ A_WMI_SEND_EVENT_TO_APP(wmip->wmi_devt, id, datap, len);
+ break;
+ case (WMI_OPT_RX_FRAME_EVENTID):
+ A_DPRINTF(DBG_WMI, (DBGFMT "WMI_OPT_RX_FRAME_EVENTID\n", DBGARG));
+ status = wmi_opt_frame_event_rx(wmip, datap, len);
+ break;
+ case (WMI_REPORT_ROAM_TBL_EVENTID):
+ A_DPRINTF(DBG_WMI, (DBGFMT "WMI_REPORT_ROAM_TBL_EVENTID\n", DBGARG));
+ status = wmi_roam_tbl_event_rx(wmip, datap, len);
+ break;
+ case (WMI_EXTENSION_EVENTID):
+ A_DPRINTF(DBG_WMI, (DBGFMT "WMI_EXTENSION_EVENTID\n", DBGARG));
+ status = wmi_control_rx_xtnd(wmip, osbuf);
+ break;
+ case (WMI_CAC_EVENTID):
+ A_DPRINTF(DBG_WMI, (DBGFMT "WMI_CAC_EVENTID\n", DBGARG));
+ status = wmi_cac_event_rx(wmip, datap, len);
+ break;
+ case (WMI_CHANNEL_CHANGE_EVENTID):
+ A_DPRINTF(DBG_WMI, (DBGFMT "WMI_CHANNEL_CHANGE_EVENTID\n", DBGARG));
+ status = wmi_channel_change_event_rx(wmip, datap, len);
+ break;
+ case (WMI_REPORT_ROAM_DATA_EVENTID):
+ A_DPRINTF(DBG_WMI, (DBGFMT "WMI_REPORT_ROAM_DATA_EVENTID\n", DBGARG));
+ status = wmi_roam_data_event_rx(wmip, datap, len);
+ break;
+#ifdef CONFIG_HOST_TCMD_SUPPORT
+ case (WMI_TEST_EVENTID):
+ A_DPRINTF(DBG_WMI, (DBGFMT "WMI_TEST_EVENTID\n", DBGARG));
+ status = wmi_tcmd_test_report_rx(wmip, datap, len);
+ break;
+#endif
+ case (WMI_GET_FIXRATES_CMDID):
+ A_DPRINTF(DBG_WMI, (DBGFMT "WMI_GET_FIXRATES_CMDID\n", DBGARG));
+ status = wmi_ratemask_reply_rx(wmip, datap, len);
+ break;
+ case (WMI_TX_RETRY_ERR_EVENTID):
+ A_DPRINTF(DBG_WMI, (DBGFMT "WMI_TX_RETRY_ERR_EVENTID\n", DBGARG));
+ status = wmi_txRetryErrEvent_rx(wmip, datap, len);
+ A_WMI_SEND_EVENT_TO_APP(wmip->wmi_devt, id, datap, len);
+ break;
+ case (WMI_SNR_THRESHOLD_EVENTID):
+ A_DPRINTF(DBG_WMI, (DBGFMT "WMI_SNR_THRESHOLD_EVENTID\n", DBGARG));
+ status = wmi_snrThresholdEvent_rx(wmip, datap, len);
+ break;
+ case (WMI_LQ_THRESHOLD_EVENTID):
+ A_DPRINTF(DBG_WMI, (DBGFMT "WMI_LQ_THRESHOLD_EVENTID\n", DBGARG));
+ status = wmi_lqThresholdEvent_rx(wmip, datap, len);
+ A_WMI_SEND_EVENT_TO_APP(wmip->wmi_devt, id, datap, len);
+ break;
+ case (WMI_APLIST_EVENTID):
+ AR_DEBUG_PRINTF(ATH_DEBUG_WMI, ("Received APLIST Event\n"));
+ status = wmi_aplistEvent_rx(wmip, datap, len);
+ break;
+ case (WMI_GET_KEEPALIVE_CMDID):
+ A_DPRINTF(DBG_WMI, (DBGFMT "WMI_GET_KEEPALIVE_CMDID\n", DBGARG));
+ status = wmi_keepalive_reply_rx(wmip, datap, len);
+ break;
+ case (WMI_GET_WOW_LIST_EVENTID):
+ status = wmi_get_wow_list_event_rx(wmip, datap, len);
+ break;
+ case (WMI_GET_PMKID_LIST_EVENTID):
+ A_DPRINTF(DBG_WMI, (DBGFMT "WMI_GET_PMKID_LIST Event\n", DBGARG));
+ status = wmi_get_pmkid_list_event_rx(wmip, datap, len);
+ break;
+ case (WMI_PSPOLL_EVENTID):
+ A_DPRINTF(DBG_WMI, (DBGFMT "WMI_PSPOLL_EVENT\n", DBGARG));
+ status = wmi_pspoll_event_rx(wmip, datap, len);
+ break;
+ case (WMI_DTIMEXPIRY_EVENTID):
+ A_DPRINTF(DBG_WMI, (DBGFMT "WMI_DTIMEXPIRY_EVENT\n", DBGARG));
+ status = wmi_dtimexpiry_event_rx(wmip, datap, len);
+ break;
+ case (WMI_SET_PARAMS_REPLY_EVENTID):
+ A_DPRINTF(DBG_WMI, (DBGFMT "WMI_SET_PARAMS_REPLY Event\n", DBGARG));
+ status = wmi_set_params_event_rx(wmip, datap, len);
+ break;
+ case (WMI_ACM_REJECT_EVENTID):
+ A_DPRINTF(DBG_WMI, (DBGFMT "WMI_SET_PARAMS_REPLY Event\n", DBGARG));
+ status = wmi_acm_reject_event_rx(wmip, datap, len);
+ break;
+#ifdef ATH_AR6K_11N_SUPPORT
+ case (WMI_ADDBA_REQ_EVENTID):
+ status = wmi_addba_req_event_rx(wmip, datap, len);
+ break;
+ case (WMI_ADDBA_RESP_EVENTID):
+ status = wmi_addba_resp_event_rx(wmip, datap, len);
+ break;
+ case (WMI_DELBA_REQ_EVENTID):
+ status = wmi_delba_req_event_rx(wmip, datap, len);
+ break;
+ case (WMI_REPORT_BTCOEX_CONFIG_EVENTID):
+ A_DPRINTF(DBG_WMI, (DBGFMT "WMI_BTCOEX_CONFIG_EVENTID", DBGARG));
+ status = wmi_btcoex_config_event_rx(wmip, datap, len);
+ break;
+ case (WMI_REPORT_BTCOEX_STATS_EVENTID):
+ A_DPRINTF(DBG_WMI, (DBGFMT "WMI_BTCOEX_STATS_EVENTID", DBGARG));
+ status = wmi_btcoex_stats_event_rx(wmip, datap, len);
+ break;
+#endif
+ case (WMI_TX_COMPLETE_EVENTID):
+ {
+ int index;
+ TX_COMPLETE_MSG_V1 *pV1;
+ WMI_TX_COMPLETE_EVENT *pEv = (WMI_TX_COMPLETE_EVENT *)datap;
+ A_PRINTF("comp: %d %d %d\n", pEv->numMessages, pEv->msgLen, pEv->msgType);
+
+ for(index = 0 ; index < pEv->numMessages ; index++) {
+ pV1 = (TX_COMPLETE_MSG_V1 *)(datap + sizeof(WMI_TX_COMPLETE_EVENT) + index*sizeof(TX_COMPLETE_MSG_V1));
+ A_PRINTF("msg: %d %d %d %d\n", pV1->status, pV1->pktID, pV1->rateIdx, pV1->ackFailures);
+ }
+ }
+ break;
+ case (WMI_HCI_EVENT_EVENTID):
+ status = wmi_hci_event_rx(wmip, datap, len);
+ break;
+#ifdef WAPI_ENABLE
+ case (WMI_WAPI_REKEY_EVENTID):
+ A_DPRINTF(DBG_WMI, (DBGFMT "WMI_WAPI_REKEY_EVENTID", DBGARG));
+ status = wmi_wapi_rekey_event_rx(wmip, datap, len);
+ break;
+#endif
+ default:
+ A_DPRINTF(DBG_WMI|DBG_ERROR,
+ (DBGFMT "Unknown id 0x%x\n", DBGARG, id));
+ wmip->wmi_stats.cmd_id_err++;
+ status = A_ERROR;
+ break;
+ }
+
+ A_NETBUF_FREE(osbuf);
+
+ return status;
+}
+
+/* Send a "simple" wmi command -- one with no arguments */
+static A_STATUS
+wmi_simple_cmd(struct wmi_t *wmip, WMI_COMMAND_ID cmdid)
+{
+ void *osbuf;
+
+ osbuf = A_NETBUF_ALLOC(0);
+ if (osbuf == NULL) {
+ return A_NO_MEMORY;
+ }
+
+ return (wmi_cmd_send(wmip, osbuf, cmdid, NO_SYNC_WMIFLAG));
+}
+
+/* Send a "simple" extended wmi command -- one with no arguments.
+ Enabling this command only if GPIO or profiling support is enabled.
+ This is to suppress warnings on some platforms */
+#if defined(CONFIG_HOST_GPIO_SUPPORT) || defined(CONFIG_TARGET_PROFILE_SUPPORT)
+static A_STATUS
+wmi_simple_cmd_xtnd(struct wmi_t *wmip, WMIX_COMMAND_ID cmdid)
+{
+ void *osbuf;
+
+ osbuf = A_NETBUF_ALLOC(0);
+ if (osbuf == NULL) {
+ return A_NO_MEMORY;
+ }
+
+ return (wmi_cmd_send_xtnd(wmip, osbuf, cmdid, NO_SYNC_WMIFLAG));
+}
+#endif
+
+static A_STATUS
+wmi_ready_event_rx(struct wmi_t *wmip, A_UINT8 *datap, int len)
+{
+ WMI_READY_EVENT *ev = (WMI_READY_EVENT *)datap;
+
+ if (len < sizeof(WMI_READY_EVENT)) {
+ return A_EINVAL;
+ }
+ A_DPRINTF(DBG_WMI, (DBGFMT "Enter\n", DBGARG));
+ wmip->wmi_ready = TRUE;
+ A_WMI_READY_EVENT(wmip->wmi_devt, ev->macaddr, ev->phyCapability,
+ ev->sw_version, ev->abi_version);
+
+ return A_OK;
+}
+
+#define LE_READ_4(p) \
+ ((A_UINT32) \
+ ((((A_UINT8 *)(p))[0] ) | (((A_UINT8 *)(p))[1] << 8) | \
+ (((A_UINT8 *)(p))[2] << 16) | (((A_UINT8 *)(p))[3] << 24)))
+
+static int __inline
+iswmmoui(const A_UINT8 *frm)
+{
+ return frm[1] > 3 && LE_READ_4(frm+2) == ((WMM_OUI_TYPE<<24)|WMM_OUI);
+}
+
+static int __inline
+iswmmparam(const A_UINT8 *frm)
+{
+ return frm[1] > 5 && frm[6] == WMM_PARAM_OUI_SUBTYPE;
+}
+
+
+static A_STATUS
+wmi_connect_event_rx(struct wmi_t *wmip, A_UINT8 *datap, int len)
+{
+ WMI_CONNECT_EVENT *ev;
+ A_UINT8 *pie,*peie;
+
+ if (len < sizeof(WMI_CONNECT_EVENT))
+ {
+ return A_EINVAL;
+ }
+ ev = (WMI_CONNECT_EVENT *)datap;
+
+ A_DPRINTF(DBG_WMI,
+ (DBGFMT "freq %d bssid %2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x\n",
+ DBGARG, ev->channel,
+ ev->bssid[0], ev->bssid[1], ev->bssid[2],
+ ev->bssid[3], ev->bssid[4], ev->bssid[5]));
+
+ A_MEMCPY(wmip->wmi_bssid, ev->bssid, ATH_MAC_LEN);
+
+ /* initialize pointer to start of assoc rsp IEs */
+ pie = ev->assocInfo + ev->beaconIeLen + ev->assocReqLen +
+ sizeof(A_UINT16) + /* capinfo*/
+ sizeof(A_UINT16) + /* status Code */
+ sizeof(A_UINT16) ; /* associd */
+
+ /* initialize pointer to end of assoc rsp IEs */
+ peie = ev->assocInfo + ev->beaconIeLen + ev->assocReqLen + ev->assocRespLen;
+
+ while (pie < peie)
+ {
+ switch (*pie)
+ {
+ case IEEE80211_ELEMID_VENDOR:
+ if (iswmmoui(pie))
+ {
+ if(iswmmparam (pie))
+ {
+ wmip->wmi_is_wmm_enabled = TRUE;
+ }
+ }
+ break;
+ }
+
+ if (wmip->wmi_is_wmm_enabled)
+ {
+ break;
+ }
+ pie += pie[1] + 2;
+ }
+
+ A_WMI_CONNECT_EVENT(wmip->wmi_devt, ev->channel, ev->bssid,
+ ev->listenInterval, ev->beaconInterval,
+ (NETWORK_TYPE) ev->networkType, ev->beaconIeLen,
+ ev->assocReqLen, ev->assocRespLen,
+ ev->assocInfo);
+
+ return A_OK;
+}
+
+static A_STATUS
+wmi_regDomain_event_rx(struct wmi_t *wmip, A_UINT8 *datap, int len)
+{
+ WMI_REG_DOMAIN_EVENT *ev;
+
+ if (len < sizeof(*ev)) {
+ return A_EINVAL;
+ }
+ ev = (WMI_REG_DOMAIN_EVENT *)datap;
+
+ A_WMI_REGDOMAIN_EVENT(wmip->wmi_devt, ev->regDomain);
+
+ return A_OK;
+}
+
+static A_STATUS
+wmi_neighborReport_event_rx(struct wmi_t *wmip, A_UINT8 *datap, int len)
+{
+ WMI_NEIGHBOR_REPORT_EVENT *ev;
+ int numAps;
+
+ if (len < sizeof(*ev)) {
+ return A_EINVAL;
+ }
+ ev = (WMI_NEIGHBOR_REPORT_EVENT *)datap;
+ numAps = ev->numberOfAps;
+
+ if (len < (int)(sizeof(*ev) + ((numAps - 1) * sizeof(WMI_NEIGHBOR_INFO)))) {
+ return A_EINVAL;
+ }
+
+ A_WMI_NEIGHBORREPORT_EVENT(wmip->wmi_devt, numAps, ev->neighbor);
+
+ return A_OK;
+}
+
+static A_STATUS
+wmi_disconnect_event_rx(struct wmi_t *wmip, A_UINT8 *datap, int len)
+{
+ WMI_DISCONNECT_EVENT *ev;
+ wmip->wmi_traffic_class = 100;
+
+ if (len < sizeof(WMI_DISCONNECT_EVENT)) {
+ return A_EINVAL;
+ }
+ A_DPRINTF(DBG_WMI, (DBGFMT "Enter\n", DBGARG));
+
+ ev = (WMI_DISCONNECT_EVENT *)datap;
+
+ A_MEMZERO(wmip->wmi_bssid, sizeof(wmip->wmi_bssid));
+
+ wmip->wmi_is_wmm_enabled = FALSE;
+ wmip->wmi_pair_crypto_type = NONE_CRYPT;
+ wmip->wmi_grp_crypto_type = NONE_CRYPT;
+
+ A_WMI_DISCONNECT_EVENT(wmip->wmi_devt, ev->disconnectReason, ev->bssid,
+ ev->assocRespLen, ev->assocInfo, ev->protocolReasonStatus);
+
+ return A_OK;
+}
+
+static A_STATUS
+wmi_peer_node_event_rx(struct wmi_t *wmip, A_UINT8 *datap, int len)
+{
+ WMI_PEER_NODE_EVENT *ev;
+
+ if (len < sizeof(WMI_PEER_NODE_EVENT)) {
+ return A_EINVAL;
+ }
+ ev = (WMI_PEER_NODE_EVENT *)datap;
+ if (ev->eventCode == PEER_NODE_JOIN_EVENT) {
+ A_DPRINTF (DBG_WMI, (DBGFMT "Joined node with Macaddr: ", DBGARG));
+ } else if(ev->eventCode == PEER_NODE_LEAVE_EVENT) {
+ A_DPRINTF (DBG_WMI, (DBGFMT "left node with Macaddr: ", DBGARG));
+ }
+
+ A_WMI_PEER_EVENT (wmip->wmi_devt, ev->eventCode, ev->peerMacAddr);
+
+ return A_OK;
+}
+
+static A_STATUS
+wmi_tkip_micerr_event_rx(struct wmi_t *wmip, A_UINT8 *datap, int len)
+{
+ WMI_TKIP_MICERR_EVENT *ev;
+
+ if (len < sizeof(*ev)) {
+ return A_EINVAL;
+ }
+ A_DPRINTF(DBG_WMI, (DBGFMT "Enter\n", DBGARG));
+
+ ev = (WMI_TKIP_MICERR_EVENT *)datap;
+ A_WMI_TKIP_MICERR_EVENT(wmip->wmi_devt, ev->keyid, ev->ismcast);
+
+ return A_OK;
+}
+
+static A_STATUS
+wmi_bssInfo_event_rx(struct wmi_t *wmip, A_UINT8 *datap, int len)
+{
+ bss_t *bss = NULL;
+ WMI_BSS_INFO_HDR *bih;
+ A_UINT8 *buf;
+ A_UINT32 nodeCachingAllowed = 1;
+ A_UCHAR cached_ssid_len = 0;
+ A_UCHAR cached_ssid_buf[IEEE80211_NWID_LEN] = {0};
+ A_UINT8 beacon_ssid_len = 0;
+
+ if (len <= sizeof(WMI_BSS_INFO_HDR)) {
+ return A_EINVAL;
+ }
+
+ bih = (WMI_BSS_INFO_HDR *)datap;
+ bss = wlan_find_node(&wmip->wmi_scan_table, bih->bssid);
+
+ if (bih->rssi > 0) {
+ if (NULL == bss)
+ return A_OK; //no node found in the table, just drop the node with incorrect RSSI
+ else
+ bih->rssi = bss->ni_rssi; //Adjust RSSI in datap in case it is used in A_WMI_BSSINFO_EVENT_RX
+ }
+
+ A_WMI_BSSINFO_EVENT_RX(wmip->wmi_devt, datap, len);
+ /* What is driver config for wlan node caching? */
+ if(ar6000_get_driver_cfg(wmip->wmi_devt,
+ AR6000_DRIVER_CFG_GET_WLANNODECACHING,
+ &nodeCachingAllowed) != A_OK) {
+ wmi_node_return(wmip, bss);
+ return A_EINVAL;
+ }
+
+ if(!nodeCachingAllowed) {
+ wmi_node_return(wmip, bss);
+ return A_OK;
+ }
+
+ buf = datap + sizeof(WMI_BSS_INFO_HDR);
+ len -= sizeof(WMI_BSS_INFO_HDR);
+
+ A_DPRINTF(DBG_WMI2, (DBGFMT "bssInfo event - ch %u, rssi %02x, "
+ "bssid \"%pM\"\n", DBGARG, bih->channel,
+ (unsigned char) bih->rssi, bih->bssid));
+
+ if(wps_enable && (bih->frameType == PROBERESP_FTYPE) ) {
+ wmi_node_return(wmip, bss);
+ return A_OK;
+ }
+
+ if (bss != NULL) {
+ /*
+ * Free up the node. Not the most efficient process given
+ * we are about to allocate a new node but it is simple and should be
+ * adequate.
+ */
+
+ /* In case of hidden AP, beacon will not have ssid,
+ * but a directed probe response will have it,
+ * so cache the probe-resp-ssid if already present. */
+ if ((TRUE == is_probe_ssid) && (BEACON_FTYPE == bih->frameType))
+ {
+ A_UCHAR *ie_ssid;
+
+ ie_ssid = bss->ni_cie.ie_ssid;
+ if(ie_ssid && (ie_ssid[1] <= IEEE80211_NWID_LEN) && (ie_ssid[2] != 0))
+ {
+ cached_ssid_len = ie_ssid[1];
+ memcpy(cached_ssid_buf, ie_ssid + 2, cached_ssid_len);
+ }
+ }
+
+ /*
+ * Use the current average rssi of associated AP base on assumpiton
+ * 1. Most os with GUI will update RSSI by wmi_get_stats_cmd() periodically
+ * 2. wmi_get_stats_cmd(..) will be called when calling wmi_startscan_cmd(...)
+ * The average value of RSSI give end-user better feeling for instance value of scan result
+ * It also sync up RSSI info in GUI between scan result and RSSI signal icon
+ */
+ if (IEEE80211_ADDR_EQ(wmip->wmi_bssid, bih->bssid)) {
+ bih->rssi = bss->ni_rssi;
+ bih->snr = bss->ni_snr;
+ }
+
+ wlan_node_reclaim(&wmip->wmi_scan_table, bss);
+ }
+
+ /* beacon/probe response frame format
+ * [8] time stamp
+ * [2] beacon interval
+ * [2] capability information
+ * [tlv] ssid */
+ beacon_ssid_len = buf[SSID_IE_LEN_INDEX];
+
+ /* If ssid is cached for this hidden AP, then change buffer len accordingly. */
+ if ((TRUE == is_probe_ssid) && (BEACON_FTYPE == bih->frameType) &&
+ (0 != cached_ssid_len) &&
+ (0 == beacon_ssid_len || (cached_ssid_len > beacon_ssid_len && 0 == buf[SSID_IE_LEN_INDEX + 1])))
+ {
+ len += (cached_ssid_len - beacon_ssid_len);
+ }
+
+ bss = wlan_node_alloc(&wmip->wmi_scan_table, len);
+ if (bss == NULL) {
+ return A_NO_MEMORY;
+ }
+
+ bss->ni_snr = bih->snr;
+ bss->ni_rssi = bih->rssi;
+ A_ASSERT(bss->ni_buf != NULL);
+
+ /* In case of hidden AP, beacon will not have ssid,
+ * but a directed probe response will have it,
+ * so place the cached-ssid(probe-resp) in the bssinfo. */
+ if ((TRUE == is_probe_ssid) && (BEACON_FTYPE == bih->frameType) &&
+ (0 != cached_ssid_len) &&
+ (0 == beacon_ssid_len || (beacon_ssid_len && 0 == buf[SSID_IE_LEN_INDEX + 1])))
+ {
+ A_UINT8 *ni_buf = bss->ni_buf;
+ int buf_len = len;
+
+ /* copy the first 14 bytes such as
+ * time-stamp(8), beacon-interval(2), cap-info(2), ssid-id(1), ssid-len(1). */
+ A_MEMCPY(ni_buf, buf, SSID_IE_LEN_INDEX + 1);
+
+ ni_buf[SSID_IE_LEN_INDEX] = cached_ssid_len;
+ ni_buf += (SSID_IE_LEN_INDEX + 1);
+
+ buf += (SSID_IE_LEN_INDEX + 1);
+ buf_len -= (SSID_IE_LEN_INDEX + 1);
+
+ /* copy the cached ssid */
+ A_MEMCPY(ni_buf, cached_ssid_buf, cached_ssid_len);
+ ni_buf += cached_ssid_len;
+
+ buf += beacon_ssid_len;
+ buf_len -= beacon_ssid_len;
+
+ if (cached_ssid_len > beacon_ssid_len)
+ buf_len -= (cached_ssid_len - beacon_ssid_len);
+
+ /* now copy the rest of bytes */
+ A_MEMCPY(ni_buf, buf, buf_len);
+ }
+ else
+ A_MEMCPY(bss->ni_buf, buf, len);
+
+ bss->ni_framelen = len;
+ if (wlan_parse_beacon(bss->ni_buf, len, &bss->ni_cie) != A_OK) {
+ wlan_node_free(bss);
+ return A_EINVAL;
+ }
+
+ /*
+ * Update the frequency in ie_chan, overwriting of channel number
+ * which is done in wlan_parse_beacon
+ */
+ bss->ni_cie.ie_chan = bih->channel;
+ wlan_setup_node(&wmip->wmi_scan_table, bss, bih->bssid);
+
+ return A_OK;
+}
+
+static A_STATUS
+wmi_opt_frame_event_rx(struct wmi_t *wmip, A_UINT8 *datap, int len)
+{
+ bss_t *bss;
+ WMI_OPT_RX_INFO_HDR *bih;
+ A_UINT8 *buf;
+
+ if (len <= sizeof(WMI_OPT_RX_INFO_HDR)) {
+ return A_EINVAL;
+ }
+
+ bih = (WMI_OPT_RX_INFO_HDR *)datap;
+ buf = datap + sizeof(WMI_OPT_RX_INFO_HDR);
+ len -= sizeof(WMI_OPT_RX_INFO_HDR);
+
+ A_DPRINTF(DBG_WMI2, (DBGFMT "opt frame event %2.2x:%2.2x\n", DBGARG,
+ bih->bssid[4], bih->bssid[5]));
+
+ bss = wlan_find_node(&wmip->wmi_scan_table, bih->bssid);
+ if (bss != NULL) {
+ /*
+ * Free up the node. Not the most efficient process given
+ * we are about to allocate a new node but it is simple and should be
+ * adequate.
+ */
+ wlan_node_reclaim(&wmip->wmi_scan_table, bss);
+ }
+
+ bss = wlan_node_alloc(&wmip->wmi_scan_table, len);
+ if (bss == NULL) {
+ return A_NO_MEMORY;
+ }
+
+ bss->ni_snr = bih->snr;
+ bss->ni_cie.ie_chan = bih->channel;
+ A_ASSERT(bss->ni_buf != NULL);
+ A_MEMCPY(bss->ni_buf, buf, len);
+ wlan_setup_node(&wmip->wmi_scan_table, bss, bih->bssid);
+
+ return A_OK;
+}
+
+ /* This event indicates inactivity timeout of a fatpipe(pstream)
+ * at the target
+ */
+static A_STATUS
+wmi_pstream_timeout_event_rx(struct wmi_t *wmip, A_UINT8 *datap, int len)
+{
+ WMI_PSTREAM_TIMEOUT_EVENT *ev;
+
+ if (len < sizeof(WMI_PSTREAM_TIMEOUT_EVENT)) {
+ return A_EINVAL;
+ }
+
+ A_DPRINTF(DBG_WMI, (DBGFMT "wmi_pstream_timeout_event_rx\n", DBGARG));
+
+ ev = (WMI_PSTREAM_TIMEOUT_EVENT *)datap;
+
+ /* When the pstream (fat pipe == AC) timesout, it means there were no
+ * thinStreams within this pstream & it got implicitly created due to
+ * data flow on this AC. We start the inactivity timer only for
+ * implicitly created pstream. Just reset the host state.
+ */
+ /* Set the activeTsids for this AC to 0 */
+ LOCK_WMI(wmip);
+ wmip->wmi_streamExistsForAC[ev->trafficClass]=0;
+ wmip->wmi_fatPipeExists &= ~(1 << ev->trafficClass);
+ UNLOCK_WMI(wmip);
+
+ /*Indicate inactivity to driver layer for this fatpipe (pstream)*/
+ A_WMI_STREAM_TX_INACTIVE(wmip->wmi_devt, ev->trafficClass);
+
+ return A_OK;
+}
+
+static A_STATUS
+wmi_bitrate_reply_rx(struct wmi_t *wmip, A_UINT8 *datap, int len)
+{
+ WMI_BIT_RATE_REPLY *reply;
+ A_INT32 rate;
+ A_UINT32 sgi,index;
+ /* 54149:
+ * WMI_BIT_RATE_CMD structure is changed to WMI_BIT_RATE_REPLY.
+ * since there is difference in the length and to avoid returning
+ * error value.
+ */
+ if (len < sizeof(WMI_BIT_RATE_REPLY)) {
+ return A_EINVAL;
+ }
+ reply = (WMI_BIT_RATE_REPLY *)datap;
+ A_DPRINTF(DBG_WMI,
+ (DBGFMT "Enter - rateindex %d\n", DBGARG, reply->rateIndex));
+
+ if (reply->rateIndex == (A_INT8) RATE_AUTO) {
+ rate = RATE_AUTO;
+ } else {
+ // the SGI state is stored as the MSb of the rateIndex
+ index = reply->rateIndex & 0x7f;
+ sgi = (reply->rateIndex & 0x80)? 1:0;
+ rate = wmi_rateTable[index][sgi];
+ }
+
+ A_WMI_BITRATE_RX(wmip->wmi_devt, rate);
+ return A_OK;
+}
+
+static A_STATUS
+wmi_ratemask_reply_rx(struct wmi_t *wmip, A_UINT8 *datap, int len)
+{
+ WMI_FIX_RATES_REPLY *reply;
+
+ if (len < sizeof(WMI_FIX_RATES_REPLY)) {
+ return A_EINVAL;
+ }
+ reply = (WMI_FIX_RATES_REPLY *)datap;
+ A_DPRINTF(DBG_WMI,
+ (DBGFMT "Enter - fixed rate mask %x\n", DBGARG, reply->fixRateMask));
+
+ A_WMI_RATEMASK_RX(wmip->wmi_devt, reply->fixRateMask);
+
+ return A_OK;
+}
+
+static A_STATUS
+wmi_channelList_reply_rx(struct wmi_t *wmip, A_UINT8 *datap, int len)
+{
+ WMI_CHANNEL_LIST_REPLY *reply;
+
+ if (len < sizeof(WMI_CHANNEL_LIST_REPLY)) {
+ return A_EINVAL;
+ }
+ reply = (WMI_CHANNEL_LIST_REPLY *)datap;
+ A_DPRINTF(DBG_WMI, (DBGFMT "Enter\n", DBGARG));
+
+ A_WMI_CHANNELLIST_RX(wmip->wmi_devt, reply->numChannels,
+ reply->channelList);
+
+ return A_OK;
+}
+
+static A_STATUS
+wmi_txPwr_reply_rx(struct wmi_t *wmip, A_UINT8 *datap, int len)
+{
+ WMI_TX_PWR_REPLY *reply;
+
+ if (len < sizeof(*reply)) {
+ return A_EINVAL;
+ }
+ reply = (WMI_TX_PWR_REPLY *)datap;
+ A_DPRINTF(DBG_WMI, (DBGFMT "Enter\n", DBGARG));
+
+ A_WMI_TXPWR_RX(wmip->wmi_devt, reply->dbM);
+
+ return A_OK;
+}
+static A_STATUS
+wmi_keepalive_reply_rx(struct wmi_t *wmip, A_UINT8 *datap, int len)
+{
+ WMI_GET_KEEPALIVE_CMD *reply;
+
+ if (len < sizeof(*reply)) {
+ return A_EINVAL;
+ }
+ reply = (WMI_GET_KEEPALIVE_CMD *)datap;
+ A_DPRINTF(DBG_WMI, (DBGFMT "Enter\n", DBGARG));
+
+ A_WMI_KEEPALIVE_RX(wmip->wmi_devt, reply->configured);
+
+ return A_OK;
+}
+
+
+static A_STATUS
+wmi_dset_open_req_rx(struct wmi_t *wmip, A_UINT8 *datap, int len)
+{
+ WMIX_DSETOPENREQ_EVENT *dsetopenreq;
+
+ if (len < sizeof(WMIX_DSETOPENREQ_EVENT)) {
+ return A_EINVAL;
+ }
+ dsetopenreq = (WMIX_DSETOPENREQ_EVENT *)datap;
+ A_DPRINTF(DBG_WMI,
+ (DBGFMT "Enter - dset_id=0x%x\n", DBGARG, dsetopenreq->dset_id));
+ A_WMI_DSET_OPEN_REQ(wmip->wmi_devt,
+ dsetopenreq->dset_id,
+ dsetopenreq->targ_dset_handle,
+ dsetopenreq->targ_reply_fn,
+ dsetopenreq->targ_reply_arg);
+
+ return A_OK;
+}
+
+#ifdef CONFIG_HOST_DSET_SUPPORT
+static A_STATUS
+wmi_dset_close_rx(struct wmi_t *wmip, A_UINT8 *datap, int len)
+{
+ WMIX_DSETCLOSE_EVENT *dsetclose;
+
+ if (len < sizeof(WMIX_DSETCLOSE_EVENT)) {
+ return A_EINVAL;
+ }
+ A_DPRINTF(DBG_WMI, (DBGFMT "Enter\n", DBGARG));
+
+ dsetclose = (WMIX_DSETCLOSE_EVENT *)datap;
+ A_WMI_DSET_CLOSE(wmip->wmi_devt, dsetclose->access_cookie);
+
+ return A_OK;
+}
+
+static A_STATUS
+wmi_dset_data_req_rx(struct wmi_t *wmip, A_UINT8 *datap, int len)
+{
+ WMIX_DSETDATAREQ_EVENT *dsetdatareq;
+
+ if (len < sizeof(WMIX_DSETDATAREQ_EVENT)) {
+ return A_EINVAL;
+ }
+ A_DPRINTF(DBG_WMI, (DBGFMT "Enter\n", DBGARG));
+
+ dsetdatareq = (WMIX_DSETDATAREQ_EVENT *)datap;
+ A_WMI_DSET_DATA_REQ(wmip->wmi_devt,
+ dsetdatareq->access_cookie,
+ dsetdatareq->offset,
+ dsetdatareq->length,
+ dsetdatareq->targ_buf,
+ dsetdatareq->targ_reply_fn,
+ dsetdatareq->targ_reply_arg);
+
+ return A_OK;
+}
+#endif /* CONFIG_HOST_DSET_SUPPORT */
+
+static A_STATUS
+wmi_scanComplete_rx(struct wmi_t *wmip, A_UINT8 *datap, int len)
+{
+ WMI_SCAN_COMPLETE_EVENT *ev;
+
+ ev = (WMI_SCAN_COMPLETE_EVENT *)datap;
+ if ((A_STATUS)ev->status == A_OK) {
+ wlan_refresh_inactive_nodes(&wmip->wmi_scan_table);
+ }
+ A_WMI_SCANCOMPLETE_EVENT(wmip->wmi_devt, (A_STATUS) ev->status);
+ is_probe_ssid = FALSE;
+
+ return A_OK;
+}
+
+/*
+ * Target is reporting a programming error. This is for
+ * developer aid only. Target only checks a few common violations
+ * and it is responsibility of host to do all error checking.
+ * Behavior of target after wmi error event is undefined.
+ * A reset is recommended.
+ */
+static A_STATUS
+wmi_errorEvent_rx(struct wmi_t *wmip, A_UINT8 *datap, int len)
+{
+ WMI_CMD_ERROR_EVENT *ev;
+
+ ev = (WMI_CMD_ERROR_EVENT *)datap;
+ AR_DEBUG_PRINTF(ATH_DEBUG_WMI, ("Programming Error: cmd=%d ", ev->commandId));
+ switch (ev->errorCode) {
+ case (INVALID_PARAM):
+ AR_DEBUG_PRINTF(ATH_DEBUG_WMI, ("Illegal Parameter\n"));
+ break;
+ case (ILLEGAL_STATE):
+ AR_DEBUG_PRINTF(ATH_DEBUG_WMI, ("Illegal State\n"));
+ break;
+ case (INTERNAL_ERROR):
+ AR_DEBUG_PRINTF(ATH_DEBUG_WMI, ("Internal Error\n"));
+ break;
+ }
+
+ return A_OK;
+}
+
+
+static A_STATUS
+wmi_statsEvent_rx(struct wmi_t *wmip, A_UINT8 *datap, int len)
+{
+ A_DPRINTF(DBG_WMI, (DBGFMT "Enter\n", DBGARG));
+
+ A_WMI_TARGETSTATS_EVENT(wmip->wmi_devt, datap, len);
+
+ return A_OK;
+}
+
+static A_STATUS
+wmi_rssiThresholdEvent_rx(struct wmi_t *wmip, A_UINT8 *datap, int len)
+{
+ WMI_RSSI_THRESHOLD_EVENT *reply;
+ WMI_RSSI_THRESHOLD_VAL newThreshold;
+ WMI_RSSI_THRESHOLD_PARAMS_CMD cmd;
+ SQ_THRESHOLD_PARAMS *sq_thresh =
+ &wmip->wmi_SqThresholdParams[SIGNAL_QUALITY_METRICS_RSSI];
+ A_UINT8 upper_rssi_threshold, lower_rssi_threshold;
+ A_INT16 rssi;
+
+ if (len < sizeof(*reply)) {
+ return A_EINVAL;
+ }
+ reply = (WMI_RSSI_THRESHOLD_EVENT *)datap;
+ A_DPRINTF(DBG_WMI, (DBGFMT "Enter\n", DBGARG));
+ newThreshold = (WMI_RSSI_THRESHOLD_VAL) reply->range;
+ rssi = reply->rssi;
+
+ /*
+ * Identify the threshold breached and communicate that to the app. After
+ * that install a new set of thresholds based on the signal quality
+ * reported by the target
+ */
+ if (newThreshold) {
+ /* Upper threshold breached */
+ if (rssi < sq_thresh->upper_threshold[0]) {
+ A_DPRINTF(DBG_WMI, (DBGFMT "Spurious upper RSSI threshold event: "
+ " %d\n", DBGARG, rssi));
+ } else if ((rssi < sq_thresh->upper_threshold[1]) &&
+ (rssi >= sq_thresh->upper_threshold[0]))
+ {
+ newThreshold = WMI_RSSI_THRESHOLD1_ABOVE;
+ } else if ((rssi < sq_thresh->upper_threshold[2]) &&
+ (rssi >= sq_thresh->upper_threshold[1]))
+ {
+ newThreshold = WMI_RSSI_THRESHOLD2_ABOVE;
+ } else if ((rssi < sq_thresh->upper_threshold[3]) &&
+ (rssi >= sq_thresh->upper_threshold[2]))
+ {
+ newThreshold = WMI_RSSI_THRESHOLD3_ABOVE;
+ } else if ((rssi < sq_thresh->upper_threshold[4]) &&
+ (rssi >= sq_thresh->upper_threshold[3]))
+ {
+ newThreshold = WMI_RSSI_THRESHOLD4_ABOVE;
+ } else if ((rssi < sq_thresh->upper_threshold[5]) &&
+ (rssi >= sq_thresh->upper_threshold[4]))
+ {
+ newThreshold = WMI_RSSI_THRESHOLD5_ABOVE;
+ } else if (rssi >= sq_thresh->upper_threshold[5]) {
+ newThreshold = WMI_RSSI_THRESHOLD6_ABOVE;
+ }
+ } else {
+ /* Lower threshold breached */
+ if (rssi > sq_thresh->lower_threshold[0]) {
+ A_DPRINTF(DBG_WMI, (DBGFMT "Spurious lower RSSI threshold event: "
+ "%d %d\n", DBGARG, rssi, sq_thresh->lower_threshold[0]));
+ } else if ((rssi > sq_thresh->lower_threshold[1]) &&
+ (rssi <= sq_thresh->lower_threshold[0]))
+ {
+ newThreshold = WMI_RSSI_THRESHOLD6_BELOW;
+ } else if ((rssi > sq_thresh->lower_threshold[2]) &&
+ (rssi <= sq_thresh->lower_threshold[1]))
+ {
+ newThreshold = WMI_RSSI_THRESHOLD5_BELOW;
+ } else if ((rssi > sq_thresh->lower_threshold[3]) &&
+ (rssi <= sq_thresh->lower_threshold[2]))
+ {
+ newThreshold = WMI_RSSI_THRESHOLD4_BELOW;
+ } else if ((rssi > sq_thresh->lower_threshold[4]) &&
+ (rssi <= sq_thresh->lower_threshold[3]))
+ {
+ newThreshold = WMI_RSSI_THRESHOLD3_BELOW;
+ } else if ((rssi > sq_thresh->lower_threshold[5]) &&
+ (rssi <= sq_thresh->lower_threshold[4]))
+ {
+ newThreshold = WMI_RSSI_THRESHOLD2_BELOW;
+ } else if (rssi <= sq_thresh->lower_threshold[5]) {
+ newThreshold = WMI_RSSI_THRESHOLD1_BELOW;
+ }
+ }
+ /* Calculate and install the next set of thresholds */
+ lower_rssi_threshold = ar6000_get_lower_threshold(rssi, sq_thresh,
+ sq_thresh->lower_threshold_valid_count);
+ upper_rssi_threshold = ar6000_get_upper_threshold(rssi, sq_thresh,
+ sq_thresh->upper_threshold_valid_count);
+ /* Issue a wmi command to install the thresholds */
+ cmd.thresholdAbove1_Val = upper_rssi_threshold;
+ cmd.thresholdBelow1_Val = lower_rssi_threshold;
+ cmd.weight = sq_thresh->weight;
+ cmd.pollTime = sq_thresh->polling_interval;
+
+ rssi_event_value = rssi;
+
+ if (wmi_send_rssi_threshold_params(wmip, &cmd) != A_OK) {
+ A_DPRINTF(DBG_WMI, (DBGFMT "Unable to configure the RSSI thresholds\n",
+ DBGARG));
+ }
+
+ A_WMI_RSSI_THRESHOLD_EVENT(wmip->wmi_devt, newThreshold, reply->rssi);
+
+ return A_OK;
+}
+
+
+static A_STATUS
+wmi_reportErrorEvent_rx(struct wmi_t *wmip, A_UINT8 *datap, int len)
+{
+ WMI_TARGET_ERROR_REPORT_EVENT *reply;
+
+ if (len < sizeof(*reply)) {
+ return A_EINVAL;
+ }
+ reply = (WMI_TARGET_ERROR_REPORT_EVENT *)datap;
+ A_DPRINTF(DBG_WMI, (DBGFMT "Enter\n", DBGARG));
+
+ A_WMI_REPORT_ERROR_EVENT(wmip->wmi_devt, (WMI_TARGET_ERROR_VAL) reply->errorVal);
+
+ return A_OK;
+}
+
+static A_STATUS
+wmi_cac_event_rx(struct wmi_t *wmip, A_UINT8 *datap, int len)
+{
+ WMI_CAC_EVENT *reply;
+ WMM_TSPEC_IE *tspec_ie;
+ A_UINT16 activeTsids;
+
+ if (len < sizeof(*reply)) {
+ return A_EINVAL;
+ }
+ reply = (WMI_CAC_EVENT *)datap;
+
+ A_DPRINTF(DBG_WMI, (DBGFMT "Enter\n", DBGARG));
+
+ if ((reply->cac_indication == CAC_INDICATION_ADMISSION_RESP) &&
+ (reply->statusCode != TSPEC_STATUS_CODE_ADMISSION_ACCEPTED)) {
+ tspec_ie = (WMM_TSPEC_IE *) &(reply->tspecSuggestion);
+
+ wmi_delete_pstream_cmd(wmip, reply->ac,
+ (tspec_ie->tsInfo_info >> TSPEC_TSID_S) & TSPEC_TSID_MASK);
+ }
+ else if (reply->cac_indication == CAC_INDICATION_NO_RESP) {
+ A_UINT8 i;
+
+ /* following assumes that there is only one outstanding ADDTS request
+ when this event is received */
+ LOCK_WMI(wmip);
+ activeTsids = wmip->wmi_streamExistsForAC[reply->ac];
+ UNLOCK_WMI(wmip);
+
+ for (i = 0; i < sizeof(activeTsids) * 8; i++) {
+ if ((activeTsids >> i) & 1) {
+ break;
+ }
+ }
+ if (i < (sizeof(activeTsids) * 8)) {
+ wmi_delete_pstream_cmd(wmip, reply->ac, i);
+ }
+ }
+ /*
+ * Ev#72990: Clear active tsids and Add missing handling
+ * for delete qos stream from AP
+ */
+ else if (reply->cac_indication == CAC_INDICATION_DELETE) {
+ A_UINT8 tsid = 0;
+
+ tspec_ie = (WMM_TSPEC_IE *) &(reply->tspecSuggestion);
+ tsid= ((tspec_ie->tsInfo_info >> TSPEC_TSID_S) & TSPEC_TSID_MASK);
+ LOCK_WMI(wmip);
+ wmip->wmi_streamExistsForAC[reply->ac] &= ~(1<<tsid);
+ activeTsids = wmip->wmi_streamExistsForAC[reply->ac];
+ UNLOCK_WMI(wmip);
+
+
+ /* Indicate stream inactivity to driver layer only if all tsids
+ * within this AC are deleted.
+ */
+ if (!activeTsids) {
+ A_WMI_STREAM_TX_INACTIVE(wmip->wmi_devt, reply->ac);
+ wmip->wmi_fatPipeExists &= ~(1 << reply->ac);
+ }